From c1abc95b157fe4574919942018af143203ecca8e Mon Sep 17 00:00:00 2001 From: Jeff Kirsher Date: Tue, 29 Mar 2011 18:25:21 -0700 Subject: drivers/net/ethernet: Add ethernet dir and config option This is the initial patch to organize the drivers/net directory structure and networking device driver config options. This patch does the following: - add drivers/net/ethernet/Kconfig - integrate the new files into the existing config Signed-off-by: Jeff Kirsher diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 8d0314d..5b95796 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -193,6 +193,8 @@ source "drivers/net/phy/Kconfig" # Ethernet # +source "drivers/net/ethernet/Kconfig" + menuconfig NET_ETHERNET bool "Ethernet (10 or 100Mbit)" depends on !UML diff --git a/drivers/net/Makefile b/drivers/net/Makefile index e1eca2a..670b514 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -1,5 +1,5 @@ # -# Makefile for the Linux network (ethercard) device drivers. +# Makefile for the Linux network device drivers. # obj-$(CONFIG_MII) += mii.o diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig new file mode 100644 index 0000000..d59e4f2 --- /dev/null +++ b/drivers/net/ethernet/Kconfig @@ -0,0 +1,14 @@ +# +# Ethernet LAN device configuration +# + +menuconfig ETHERNET + bool "Ethernet driver support" + depends on NET + default y + ---help--- + This section contains all the Ethernet device drivers. + +if ETHERNET + +endif # ETHERNET diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile new file mode 100644 index 0000000..0d21dda --- /dev/null +++ b/drivers/net/ethernet/Makefile @@ -0,0 +1,3 @@ +# +# Makefile for the Linux network Ethernet device drivers. +# -- cgit v0.10.2 From ca7a8e85262e93065b2a49dfb96a24d4a534a049 Mon Sep 17 00:00:00 2001 From: Jeff Kirsher Date: Wed, 30 Mar 2011 03:47:06 -0700 Subject: 3c*/acenic/typhoon: Move 3Com Ethernet drivers Moves the 3Com drivers into drivers/net/ethernet/3com/ and the necessary Kconfig and Makefile changes. Did not move the following drivers becuase they use a non-3Com chipset: 3c503, 3c505, 3c507, 3c523 and 3c527 CC: Steffen Klassert CC: David Dillow CC: Jes Sorensen CC: Alan Cox CC: David Hinds Signed-off-by: Jeff Kirsher Acked-by: David Dillow diff --git a/MAINTAINERS b/MAINTAINERS index 51d42fb..112e226 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -124,13 +124,13 @@ M: Steffen Klassert L: netdev@vger.kernel.org S: Maintained F: Documentation/networking/vortex.txt -F: drivers/net/3c59x.c +F: drivers/net/ethernet/3com/3c59x.c 3CR990 NETWORK DRIVER M: David Dillow L: netdev@vger.kernel.org S: Maintained -F: drivers/net/typhoon* +F: drivers/net/ethernet/3com/typhoon* 3WARE SAS/SATA-RAID SCSI DRIVERS (3W-XXXX, 3W-9XXX, 3W-SAS) M: Adam Radford @@ -214,7 +214,7 @@ ACENIC DRIVER M: Jes Sorensen L: linux-acenic@sunsite.dk S: Maintained -F: drivers/net/acenic* +F: drivers/net/ethernet/3com/acenic* ACER ASPIRE ONE TEMPERATURE AND FAN DRIVER M: Peter Feuerer diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c deleted file mode 100644 index 5420f6d..0000000 --- a/drivers/net/3c501.c +++ /dev/null @@ -1,896 +0,0 @@ -/* 3c501.c: A 3Com 3c501 Ethernet driver for Linux. */ -/* - Written 1992,1993,1994 Donald Becker - - Copyright 1993 United States Government as represented by the - Director, National Security Agency. This software may be used and - distributed according to the terms of the GNU General Public License, - incorporated herein by reference. - - This is a device driver for the 3Com Etherlink 3c501. - Do not purchase this card, even as a joke. It's performance is horrible, - and it breaks in many ways. - - The original author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation - 410 Severn Ave., Suite 210 - Annapolis MD 21403 - - Fixed (again!) the missing interrupt locking on TX/RX shifting. - Alan Cox - - Removed calls to init_etherdev since they are no longer needed, and - cleaned up modularization just a bit. The driver still allows only - the default address for cards when loaded as a module, but that's - really less braindead than anyone using a 3c501 board. :) - 19950208 (invid@msen.com) - - Added traps for interrupts hitting the window as we clear and TX load - the board. Now getting 150K/second FTP with a 3c501 card. Still playing - with a TX-TX optimisation to see if we can touch 180-200K/second as seems - theoretically maximum. - 19950402 Alan Cox - - Cleaned up for 2.3.x because we broke SMP now. - 20000208 Alan Cox - - Check up pass for 2.5. Nothing significant changed - 20021009 Alan Cox - - Fixed zero fill corner case - 20030104 Alan Cox - - - For the avoidance of doubt the "preferred form" of this code is one which - is in an open non patent encumbered format. Where cryptographic key signing - forms part of the process of creating an executable the information - including keys needed to generate an equivalently functional executable - are deemed to be part of the source code. - -*/ - - -/** - * DOC: 3c501 Card Notes - * - * Some notes on this thing if you have to hack it. [Alan] - * - * Some documentation is available from 3Com. Due to the boards age - * standard responses when you ask for this will range from 'be serious' - * to 'give it to a museum'. The documentation is incomplete and mostly - * of historical interest anyway. - * - * The basic system is a single buffer which can be used to receive or - * transmit a packet. A third command mode exists when you are setting - * things up. - * - * If it's transmitting it's not receiving and vice versa. In fact the - * time to get the board back into useful state after an operation is - * quite large. - * - * The driver works by keeping the board in receive mode waiting for a - * packet to arrive. When one arrives it is copied out of the buffer - * and delivered to the kernel. The card is reloaded and off we go. - * - * When transmitting lp->txing is set and the card is reset (from - * receive mode) [possibly losing a packet just received] to command - * mode. A packet is loaded and transmit mode triggered. The interrupt - * handler runs different code for transmit interrupts and can handle - * returning to receive mode or retransmissions (yes you have to help - * out with those too). - * - * DOC: Problems - * - * There are a wide variety of undocumented error returns from the card - * and you basically have to kick the board and pray if they turn up. Most - * only occur under extreme load or if you do something the board doesn't - * like (eg touching a register at the wrong time). - * - * The driver is less efficient than it could be. It switches through - * receive mode even if more transmits are queued. If this worries you buy - * a real Ethernet card. - * - * The combination of slow receive restart and no real multicast - * filter makes the board unusable with a kernel compiled for IP - * multicasting in a real multicast environment. That's down to the board, - * but even with no multicast programs running a multicast IP kernel is - * in group 224.0.0.1 and you will therefore be listening to all multicasts. - * One nv conference running over that Ethernet and you can give up. - * - */ - -#define DRV_NAME "3c501" -#define DRV_VERSION "2002/10/09" - - -static const char version[] = - DRV_NAME ".c: " DRV_VERSION " Alan Cox (alan@lxorguk.ukuu.org.uk).\n"; - -/* - * Braindamage remaining: - * The 3c501 board. - */ - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -#include -#include -#include - -#include "3c501.h" - -/* - * The boilerplate probe code. - */ - -static int io = 0x280; -static int irq = 5; -static int mem_start; - -/** - * el1_probe: - probe for a 3c501 - * @dev: The device structure passed in to probe. - * - * This can be called from two places. The network layer will probe using - * a device structure passed in with the probe information completed. For a - * modular driver we use #init_module to fill in our own structure and probe - * for it. - * - * Returns 0 on success. ENXIO if asked not to probe and ENODEV if asked to - * probe and failing to find anything. - */ - -struct net_device * __init el1_probe(int unit) -{ - struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); - static const unsigned ports[] = { 0x280, 0x300, 0}; - const unsigned *port; - int err = 0; - - if (!dev) - return ERR_PTR(-ENOMEM); - - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - io = dev->base_addr; - irq = dev->irq; - mem_start = dev->mem_start & 7; - } - - if (io > 0x1ff) { /* Check a single specified location. */ - err = el1_probe1(dev, io); - } else if (io != 0) { - err = -ENXIO; /* Don't probe at all. */ - } else { - for (port = ports; *port && el1_probe1(dev, *port); port++) - ; - if (!*port) - err = -ENODEV; - } - if (err) - goto out; - err = register_netdev(dev); - if (err) - goto out1; - return dev; -out1: - release_region(dev->base_addr, EL1_IO_EXTENT); -out: - free_netdev(dev); - return ERR_PTR(err); -} - -static const struct net_device_ops el_netdev_ops = { - .ndo_open = el_open, - .ndo_stop = el1_close, - .ndo_start_xmit = el_start_xmit, - .ndo_tx_timeout = el_timeout, - .ndo_set_multicast_list = set_multicast_list, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -/** - * el1_probe1: - * @dev: The device structure to use - * @ioaddr: An I/O address to probe at. - * - * The actual probe. This is iterated over by #el1_probe in order to - * check all the applicable device locations. - * - * Returns 0 for a success, in which case the device is activated, - * EAGAIN if the IRQ is in use by another driver, and ENODEV if the - * board cannot be found. - */ - -static int __init el1_probe1(struct net_device *dev, int ioaddr) -{ - struct net_local *lp; - const char *mname; /* Vendor name */ - unsigned char station_addr[6]; - int autoirq = 0; - int i; - - /* - * Reserve I/O resource for exclusive use by this driver - */ - - if (!request_region(ioaddr, EL1_IO_EXTENT, DRV_NAME)) - return -ENODEV; - - /* - * Read the station address PROM data from the special port. - */ - - for (i = 0; i < 6; i++) { - outw(i, ioaddr + EL1_DATAPTR); - station_addr[i] = inb(ioaddr + EL1_SAPROM); - } - /* - * Check the first three octets of the S.A. for 3Com's prefix, or - * for the Sager NP943 prefix. - */ - - if (station_addr[0] == 0x02 && station_addr[1] == 0x60 && - station_addr[2] == 0x8c) - mname = "3c501"; - else if (station_addr[0] == 0x00 && station_addr[1] == 0x80 && - station_addr[2] == 0xC8) - mname = "NP943"; - else { - release_region(ioaddr, EL1_IO_EXTENT); - return -ENODEV; - } - - /* - * We auto-IRQ by shutting off the interrupt line and letting it - * float high. - */ - - dev->irq = irq; - - if (dev->irq < 2) { - unsigned long irq_mask; - - irq_mask = probe_irq_on(); - inb(RX_STATUS); /* Clear pending interrupts. */ - inb(TX_STATUS); - outb(AX_LOOP + 1, AX_CMD); - - outb(0x00, AX_CMD); - - mdelay(20); - autoirq = probe_irq_off(irq_mask); - - if (autoirq == 0) { - pr_warning("%s probe at %#x failed to detect IRQ line.\n", - mname, ioaddr); - release_region(ioaddr, EL1_IO_EXTENT); - return -EAGAIN; - } - } - - outb(AX_RESET+AX_LOOP, AX_CMD); /* Loopback mode. */ - dev->base_addr = ioaddr; - memcpy(dev->dev_addr, station_addr, ETH_ALEN); - - if (mem_start & 0xf) - el_debug = mem_start & 0x7; - if (autoirq) - dev->irq = autoirq; - - pr_info("%s: %s EtherLink at %#lx, using %sIRQ %d.\n", - dev->name, mname, dev->base_addr, - autoirq ? "auto":"assigned ", dev->irq); - -#ifdef CONFIG_IP_MULTICAST - pr_warning("WARNING: Use of the 3c501 in a multicast kernel is NOT recommended.\n"); -#endif - - if (el_debug) - pr_debug("%s", version); - - lp = netdev_priv(dev); - memset(lp, 0, sizeof(struct net_local)); - spin_lock_init(&lp->lock); - - /* - * The EL1-specific entries in the device structure. - */ - - dev->netdev_ops = &el_netdev_ops; - dev->watchdog_timeo = HZ; - dev->ethtool_ops = &netdev_ethtool_ops; - return 0; -} - -/** - * el1_open: - * @dev: device that is being opened - * - * When an ifconfig is issued which changes the device flags to include - * IFF_UP this function is called. It is only called when the change - * occurs, not when the interface remains up. #el1_close will be called - * when it goes down. - * - * Returns 0 for a successful open, or -EAGAIN if someone has run off - * with our interrupt line. - */ - -static int el_open(struct net_device *dev) -{ - int retval; - int ioaddr = dev->base_addr; - struct net_local *lp = netdev_priv(dev); - unsigned long flags; - - if (el_debug > 2) - pr_debug("%s: Doing el_open()...\n", dev->name); - - retval = request_irq(dev->irq, el_interrupt, 0, dev->name, dev); - if (retval) - return retval; - - spin_lock_irqsave(&lp->lock, flags); - el_reset(dev); - spin_unlock_irqrestore(&lp->lock, flags); - - lp->txing = 0; /* Board in RX mode */ - outb(AX_RX, AX_CMD); /* Aux control, irq and receive enabled */ - netif_start_queue(dev); - return 0; -} - -/** - * el_timeout: - * @dev: The 3c501 card that has timed out - * - * Attempt to restart the board. This is basically a mixture of extreme - * violence and prayer - * - */ - -static void el_timeout(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - - if (el_debug) - pr_debug("%s: transmit timed out, txsr %#2x axsr=%02x rxsr=%02x.\n", - dev->name, inb(TX_STATUS), - inb(AX_STATUS), inb(RX_STATUS)); - dev->stats.tx_errors++; - outb(TX_NORM, TX_CMD); - outb(RX_NORM, RX_CMD); - outb(AX_OFF, AX_CMD); /* Just trigger a false interrupt. */ - outb(AX_RX, AX_CMD); /* Aux control, irq and receive enabled */ - lp->txing = 0; /* Ripped back in to RX */ - netif_wake_queue(dev); -} - - -/** - * el_start_xmit: - * @skb: The packet that is queued to be sent - * @dev: The 3c501 card we want to throw it down - * - * Attempt to send a packet to a 3c501 card. There are some interesting - * catches here because the 3c501 is an extremely old and therefore - * stupid piece of technology. - * - * If we are handling an interrupt on the other CPU we cannot load a packet - * as we may still be attempting to retrieve the last RX packet buffer. - * - * When a transmit times out we dump the card into control mode and just - * start again. It happens enough that it isn't worth logging. - * - * We avoid holding the spin locks when doing the packet load to the board. - * The device is very slow, and its DMA mode is even slower. If we held the - * lock while loading 1500 bytes onto the controller we would drop a lot of - * serial port characters. This requires we do extra locking, but we have - * no real choice. - */ - -static netdev_tx_t el_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - unsigned long flags; - - /* - * Avoid incoming interrupts between us flipping txing and flipping - * mode as the driver assumes txing is a faithful indicator of card - * state - */ - - spin_lock_irqsave(&lp->lock, flags); - - /* - * Avoid timer-based retransmission conflicts. - */ - - netif_stop_queue(dev); - - do { - int len = skb->len; - int pad = 0; - int gp_start; - unsigned char *buf = skb->data; - - if (len < ETH_ZLEN) - pad = ETH_ZLEN - len; - - gp_start = 0x800 - (len + pad); - - lp->tx_pkt_start = gp_start; - lp->collisions = 0; - - dev->stats.tx_bytes += skb->len; - - /* - * Command mode with status cleared should [in theory] - * mean no more interrupts can be pending on the card. - */ - - outb_p(AX_SYS, AX_CMD); - inb_p(RX_STATUS); - inb_p(TX_STATUS); - - lp->loading = 1; - lp->txing = 1; - - /* - * Turn interrupts back on while we spend a pleasant - * afternoon loading bytes into the board - */ - - spin_unlock_irqrestore(&lp->lock, flags); - - /* Set rx packet area to 0. */ - outw(0x00, RX_BUF_CLR); - /* aim - packet will be loaded into buffer start */ - outw(gp_start, GP_LOW); - /* load buffer (usual thing each byte increments the pointer) */ - outsb(DATAPORT, buf, len); - if (pad) { - while (pad--) /* Zero fill buffer tail */ - outb(0, DATAPORT); - } - /* the board reuses the same register */ - outw(gp_start, GP_LOW); - - if (lp->loading != 2) { - /* fire ... Trigger xmit. */ - outb(AX_XMIT, AX_CMD); - lp->loading = 0; - if (el_debug > 2) - pr_debug(" queued xmit.\n"); - dev_kfree_skb(skb); - return NETDEV_TX_OK; - } - /* A receive upset our load, despite our best efforts */ - if (el_debug > 2) - pr_debug("%s: burped during tx load.\n", dev->name); - spin_lock_irqsave(&lp->lock, flags); - } while (1); -} - -/** - * el_interrupt: - * @irq: Interrupt number - * @dev_id: The 3c501 that burped - * - * Handle the ether interface interrupts. The 3c501 needs a lot more - * hand holding than most cards. In particular we get a transmit interrupt - * with a collision error because the board firmware isn't capable of rewinding - * its own transmit buffer pointers. It can however count to 16 for us. - * - * On the receive side the card is also very dumb. It has no buffering to - * speak of. We simply pull the packet out of its PIO buffer (which is slow) - * and queue it for the kernel. Then we reset the card for the next packet. - * - * We sometimes get surprise interrupts late both because the SMP IRQ delivery - * is message passing and because the card sometimes seems to deliver late. I - * think if it is part way through a receive and the mode is changed it carries - * on receiving and sends us an interrupt. We have to band aid all these cases - * to get a sensible 150kBytes/second performance. Even then you want a small - * TCP window. - */ - -static irqreturn_t el_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct net_local *lp; - int ioaddr; - int axsr; /* Aux. status reg. */ - - ioaddr = dev->base_addr; - lp = netdev_priv(dev); - - spin_lock(&lp->lock); - - /* - * What happened ? - */ - - axsr = inb(AX_STATUS); - - /* - * Log it - */ - - if (el_debug > 3) - pr_debug("%s: el_interrupt() aux=%#02x\n", dev->name, axsr); - - if (lp->loading == 1 && !lp->txing) - pr_warning("%s: Inconsistent state loading while not in tx\n", - dev->name); - - if (lp->txing) { - /* - * Board in transmit mode. May be loading. If we are - * loading we shouldn't have got this. - */ - int txsr = inb(TX_STATUS); - - if (lp->loading == 1) { - if (el_debug > 2) - pr_debug("%s: Interrupt while loading [txsr=%02x gp=%04x rp=%04x]\n", - dev->name, txsr, inw(GP_LOW), inw(RX_LOW)); - - /* Force a reload */ - lp->loading = 2; - spin_unlock(&lp->lock); - goto out; - } - if (el_debug > 6) - pr_debug("%s: txsr=%02x gp=%04x rp=%04x\n", dev->name, - txsr, inw(GP_LOW), inw(RX_LOW)); - - if ((axsr & 0x80) && (txsr & TX_READY) == 0) { - /* - * FIXME: is there a logic to whether to keep - * on trying or reset immediately ? - */ - if (el_debug > 1) - pr_debug("%s: Unusual interrupt during Tx, txsr=%02x axsr=%02x gp=%03x rp=%03x.\n", - dev->name, txsr, axsr, - inw(ioaddr + EL1_DATAPTR), - inw(ioaddr + EL1_RXPTR)); - lp->txing = 0; - netif_wake_queue(dev); - } else if (txsr & TX_16COLLISIONS) { - /* - * Timed out - */ - if (el_debug) - pr_debug("%s: Transmit failed 16 times, Ethernet jammed?\n", dev->name); - outb(AX_SYS, AX_CMD); - lp->txing = 0; - dev->stats.tx_aborted_errors++; - netif_wake_queue(dev); - } else if (txsr & TX_COLLISION) { - /* - * Retrigger xmit. - */ - - if (el_debug > 6) - pr_debug("%s: retransmitting after a collision.\n", dev->name); - /* - * Poor little chip can't reset its own start - * pointer - */ - - outb(AX_SYS, AX_CMD); - outw(lp->tx_pkt_start, GP_LOW); - outb(AX_XMIT, AX_CMD); - dev->stats.collisions++; - spin_unlock(&lp->lock); - goto out; - } else { - /* - * It worked.. we will now fall through and receive - */ - dev->stats.tx_packets++; - if (el_debug > 6) - pr_debug("%s: Tx succeeded %s\n", dev->name, - (txsr & TX_RDY) ? "." : "but tx is busy!"); - /* - * This is safe the interrupt is atomic WRT itself. - */ - lp->txing = 0; - /* In case more to transmit */ - netif_wake_queue(dev); - } - } else { - /* - * In receive mode. - */ - - int rxsr = inb(RX_STATUS); - if (el_debug > 5) - pr_debug("%s: rxsr=%02x txsr=%02x rp=%04x\n", - dev->name, rxsr, inb(TX_STATUS), inw(RX_LOW)); - /* - * Just reading rx_status fixes most errors. - */ - if (rxsr & RX_MISSED) - dev->stats.rx_missed_errors++; - else if (rxsr & RX_RUNT) { - /* Handled to avoid board lock-up. */ - dev->stats.rx_length_errors++; - if (el_debug > 5) - pr_debug("%s: runt.\n", dev->name); - } else if (rxsr & RX_GOOD) { - /* - * Receive worked. - */ - el_receive(dev); - } else { - /* - * Nothing? Something is broken! - */ - if (el_debug > 2) - pr_debug("%s: No packet seen, rxsr=%02x **resetting 3c501***\n", - dev->name, rxsr); - el_reset(dev); - } - } - - /* - * Move into receive mode - */ - - outb(AX_RX, AX_CMD); - outw(0x00, RX_BUF_CLR); - inb(RX_STATUS); /* Be certain that interrupts are cleared. */ - inb(TX_STATUS); - spin_unlock(&lp->lock); -out: - return IRQ_HANDLED; -} - - -/** - * el_receive: - * @dev: Device to pull the packets from - * - * We have a good packet. Well, not really "good", just mostly not broken. - * We must check everything to see if it is good. In particular we occasionally - * get wild packet sizes from the card. If the packet seems sane we PIO it - * off the card and queue it for the protocol layers. - */ - -static void el_receive(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - int pkt_len; - struct sk_buff *skb; - - pkt_len = inw(RX_LOW); - - if (el_debug > 4) - pr_debug(" el_receive %d.\n", pkt_len); - - if (pkt_len < 60 || pkt_len > 1536) { - if (el_debug) - pr_debug("%s: bogus packet, length=%d\n", - dev->name, pkt_len); - dev->stats.rx_over_errors++; - return; - } - - /* - * Command mode so we can empty the buffer - */ - - outb(AX_SYS, AX_CMD); - skb = dev_alloc_skb(pkt_len+2); - - /* - * Start of frame - */ - - outw(0x00, GP_LOW); - if (skb == NULL) { - pr_info("%s: Memory squeeze, dropping packet.\n", dev->name); - dev->stats.rx_dropped++; - return; - } else { - skb_reserve(skb, 2); /* Force 16 byte alignment */ - /* - * The read increments through the bytes. The interrupt - * handler will fix the pointer when it returns to - * receive mode. - */ - insb(DATAPORT, skb_put(skb, pkt_len), pkt_len); - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; - } -} - -/** - * el_reset: Reset a 3c501 card - * @dev: The 3c501 card about to get zapped - * - * Even resetting a 3c501 isn't simple. When you activate reset it loses all - * its configuration. You must hold the lock when doing this. The function - * cannot take the lock itself as it is callable from the irq handler. - */ - -static void el_reset(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - - if (el_debug > 2) - pr_info("3c501 reset...\n"); - outb(AX_RESET, AX_CMD); /* Reset the chip */ - /* Aux control, irq and loopback enabled */ - outb(AX_LOOP, AX_CMD); - { - int i; - for (i = 0; i < 6; i++) /* Set the station address. */ - outb(dev->dev_addr[i], ioaddr + i); - } - - outw(0, RX_BUF_CLR); /* Set rx packet area to 0. */ - outb(TX_NORM, TX_CMD); /* tx irq on done, collision */ - outb(RX_NORM, RX_CMD); /* Set Rx commands. */ - inb(RX_STATUS); /* Clear status. */ - inb(TX_STATUS); - lp->txing = 0; -} - -/** - * el1_close: - * @dev: 3c501 card to shut down - * - * Close a 3c501 card. The IFF_UP flag has been cleared by the user via - * the SIOCSIFFLAGS ioctl. We stop any further transmissions being queued, - * and then disable the interrupts. Finally we reset the chip. The effects - * of the rest will be cleaned up by #el1_open. Always returns 0 indicating - * a success. - */ - -static int el1_close(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - - if (el_debug > 2) - pr_info("%s: Shutting down Ethernet card at %#x.\n", - dev->name, ioaddr); - - netif_stop_queue(dev); - - /* - * Free and disable the IRQ. - */ - - free_irq(dev->irq, dev); - outb(AX_RESET, AX_CMD); /* Reset the chip */ - - return 0; -} - -/** - * set_multicast_list: - * @dev: The device to adjust - * - * Set or clear the multicast filter for this adaptor to use the best-effort - * filtering supported. The 3c501 supports only three modes of filtering. - * It always receives broadcasts and packets for itself. You can choose to - * optionally receive all packets, or all multicast packets on top of this. - */ - -static void set_multicast_list(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - - if (dev->flags & IFF_PROMISC) { - outb(RX_PROM, RX_CMD); - inb(RX_STATUS); - } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) { - /* Multicast or all multicast is the same */ - outb(RX_MULT, RX_CMD); - inb(RX_STATUS); /* Clear status. */ - } else { - outb(RX_NORM, RX_CMD); - inb(RX_STATUS); - } -} - - -static void netdev_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr); -} - -static u32 netdev_get_msglevel(struct net_device *dev) -{ - return debug; -} - -static void netdev_set_msglevel(struct net_device *dev, u32 level) -{ - debug = level; -} - -static const struct ethtool_ops netdev_ethtool_ops = { - .get_drvinfo = netdev_get_drvinfo, - .get_msglevel = netdev_get_msglevel, - .set_msglevel = netdev_set_msglevel, -}; - -#ifdef MODULE - -static struct net_device *dev_3c501; - -module_param(io, int, 0); -module_param(irq, int, 0); -MODULE_PARM_DESC(io, "EtherLink I/O base address"); -MODULE_PARM_DESC(irq, "EtherLink IRQ number"); - -/** - * init_module: - * - * When the driver is loaded as a module this function is called. We fake up - * a device structure with the base I/O and interrupt set as if it were being - * called from Space.c. This minimises the extra code that would otherwise - * be required. - * - * Returns 0 for success or -EIO if a card is not found. Returning an error - * here also causes the module to be unloaded - */ - -int __init init_module(void) -{ - dev_3c501 = el1_probe(-1); - if (IS_ERR(dev_3c501)) - return PTR_ERR(dev_3c501); - return 0; -} - -/** - * cleanup_module: - * - * The module is being unloaded. We unhook our network device from the system - * and then free up the resources we took when the card was found. - */ - -void __exit cleanup_module(void) -{ - struct net_device *dev = dev_3c501; - unregister_netdev(dev); - release_region(dev->base_addr, EL1_IO_EXTENT); - free_netdev(dev); -} - -#endif /* MODULE */ - -MODULE_AUTHOR("Donald Becker, Alan Cox"); -MODULE_DESCRIPTION("Support for the ancient 3Com 3c501 ethernet card"); -MODULE_LICENSE("GPL"); - diff --git a/drivers/net/3c501.h b/drivers/net/3c501.h deleted file mode 100644 index 183fd55..0000000 --- a/drivers/net/3c501.h +++ /dev/null @@ -1,91 +0,0 @@ - -/* - * Index to functions. - */ - -static int el1_probe1(struct net_device *dev, int ioaddr); -static int el_open(struct net_device *dev); -static void el_timeout(struct net_device *dev); -static netdev_tx_t el_start_xmit(struct sk_buff *skb, struct net_device *dev); -static irqreturn_t el_interrupt(int irq, void *dev_id); -static void el_receive(struct net_device *dev); -static void el_reset(struct net_device *dev); -static int el1_close(struct net_device *dev); -static void set_multicast_list(struct net_device *dev); -static const struct ethtool_ops netdev_ethtool_ops; - -#define EL1_IO_EXTENT 16 - -#ifndef EL_DEBUG -#define EL_DEBUG 0 /* use 0 for production, 1 for devel., >2 for debug */ -#endif /* Anything above 5 is wordy death! */ -#define debug el_debug -static int el_debug = EL_DEBUG; - -/* - * Board-specific info in netdev_priv(dev). - */ - -struct net_local -{ - int tx_pkt_start; /* The length of the current Tx packet. */ - int collisions; /* Tx collisions this packet */ - int loading; /* Spot buffer load collisions */ - int txing; /* True if card is in TX mode */ - spinlock_t lock; /* Serializing lock */ -}; - - -#define RX_STATUS (ioaddr + 0x06) -#define RX_CMD RX_STATUS -#define TX_STATUS (ioaddr + 0x07) -#define TX_CMD TX_STATUS -#define GP_LOW (ioaddr + 0x08) -#define GP_HIGH (ioaddr + 0x09) -#define RX_BUF_CLR (ioaddr + 0x0A) -#define RX_LOW (ioaddr + 0x0A) -#define RX_HIGH (ioaddr + 0x0B) -#define SAPROM (ioaddr + 0x0C) -#define AX_STATUS (ioaddr + 0x0E) -#define AX_CMD AX_STATUS -#define DATAPORT (ioaddr + 0x0F) -#define TX_RDY 0x08 /* In TX_STATUS */ - -#define EL1_DATAPTR 0x08 -#define EL1_RXPTR 0x0A -#define EL1_SAPROM 0x0C -#define EL1_DATAPORT 0x0f - -/* - * Writes to the ax command register. - */ - -#define AX_OFF 0x00 /* Irq off, buffer access on */ -#define AX_SYS 0x40 /* Load the buffer */ -#define AX_XMIT 0x44 /* Transmit a packet */ -#define AX_RX 0x48 /* Receive a packet */ -#define AX_LOOP 0x0C /* Loopback mode */ -#define AX_RESET 0x80 - -/* - * Normal receive mode written to RX_STATUS. We must intr on short packets - * to avoid bogus rx lockups. - */ - -#define RX_NORM 0xA8 /* 0x68 == all addrs, 0xA8 only to me. */ -#define RX_PROM 0x68 /* Senior Prom, uhmm promiscuous mode. */ -#define RX_MULT 0xE8 /* Accept multicast packets. */ -#define TX_NORM 0x0A /* Interrupt on everything that might hang the chip */ - -/* - * TX_STATUS register. - */ - -#define TX_COLLISION 0x02 -#define TX_16COLLISIONS 0x04 -#define TX_READY 0x08 - -#define RX_RUNT 0x08 -#define RX_MISSED 0x01 /* Missed a packet due to 3c501 braindamage. */ -#define RX_GOOD 0x30 /* Good packet 0x20, or simple overflow 0x10. */ - diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c deleted file mode 100644 index 44b28b2..0000000 --- a/drivers/net/3c509.c +++ /dev/null @@ -1,1594 +0,0 @@ -/* 3c509.c: A 3c509 EtherLink3 ethernet driver for linux. */ -/* - Written 1993-2000 by Donald Becker. - - Copyright 1994-2000 by Donald Becker. - Copyright 1993 United States Government as represented by the - Director, National Security Agency. This software may be used and - distributed according to the terms of the GNU General Public License, - incorporated herein by reference. - - This driver is for the 3Com EtherLinkIII series. - - The author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation - 410 Severn Ave., Suite 210 - Annapolis MD 21403 - - Known limitations: - Because of the way 3c509 ISA detection works it's difficult to predict - a priori which of several ISA-mode cards will be detected first. - - This driver does not use predictive interrupt mode, resulting in higher - packet latency but lower overhead. If interrupts are disabled for an - unusually long time it could also result in missed packets, but in - practice this rarely happens. - - - FIXES: - Alan Cox: Removed the 'Unexpected interrupt' bug. - Michael Meskes: Upgraded to Donald Becker's version 1.07. - Alan Cox: Increased the eeprom delay. Regardless of - what the docs say some people definitely - get problems with lower (but in card spec) - delays - v1.10 4/21/97 Fixed module code so that multiple cards may be detected, - other cleanups. -djb - Andrea Arcangeli: Upgraded to Donald Becker's version 1.12. - Rick Payne: Fixed SMP race condition - v1.13 9/8/97 Made 'max_interrupt_work' an insmod-settable variable -djb - v1.14 10/15/97 Avoided waiting..discard message for fast machines -djb - v1.15 1/31/98 Faster recovery for Tx errors. -djb - v1.16 2/3/98 Different ID port handling to avoid sound cards. -djb - v1.18 12Mar2001 Andrew Morton - - Avoid bogus detect of 3c590's (Andrzej Krzysztofowicz) - - Reviewed against 1.18 from scyld.com - v1.18a 17Nov2001 Jeff Garzik - - ethtool support - v1.18b 1Mar2002 Zwane Mwaikambo - - Power Management support - v1.18c 1Mar2002 David Ruggiero - - Full duplex support - v1.19 16Oct2002 Zwane Mwaikambo - - Additional ethtool features - v1.19a 28Oct2002 Davud Ruggiero - - Increase *read_eeprom udelay to workaround oops with 2 cards. - v1.19b 08Nov2002 Marc Zyngier - - Introduce driver model for EISA cards. - v1.20 04Feb2008 Ondrej Zary - - convert to isa_driver and pnp_driver and some cleanups -*/ - -#define DRV_NAME "3c509" -#define DRV_VERSION "1.20" -#define DRV_RELDATE "04Feb2008" - -/* A few values that may be tweaked. */ - -/* Time in jiffies before concluding the transmitter is hung. */ -#define TX_TIMEOUT (400*HZ/1000) - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include /* for udelay() */ -#include -#include -#include -#include -#include - -#include -#include -#include - -static char version[] __devinitdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n"; - -#ifdef EL3_DEBUG -static int el3_debug = EL3_DEBUG; -#else -static int el3_debug = 2; -#endif - -/* Used to do a global count of all the cards in the system. Must be - * a global variable so that the mca/eisa probe routines can increment - * it */ -static int el3_cards = 0; -#define EL3_MAX_CARDS 8 - -/* To minimize the size of the driver source I only define operating - constants if they are used several times. You'll need the manual - anyway if you want to understand driver details. */ -/* Offsets from base I/O address. */ -#define EL3_DATA 0x00 -#define EL3_CMD 0x0e -#define EL3_STATUS 0x0e -#define EEPROM_READ 0x80 - -#define EL3_IO_EXTENT 16 - -#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD) - - -/* The top five bits written to EL3_CMD are a command, the lower - 11 bits are the parameter, if applicable. */ -enum c509cmd { - TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11, - RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, RxDiscard = 8<<11, - TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11, - FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11, - SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11, - SetTxThreshold = 18<<11, SetTxStart = 19<<11, StatsEnable = 21<<11, - StatsDisable = 22<<11, StopCoax = 23<<11, PowerUp = 27<<11, - PowerDown = 28<<11, PowerAuto = 29<<11}; - -enum c509status { - IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004, - TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020, - IntReq = 0x0040, StatsFull = 0x0080, CmdBusy = 0x1000, }; - -/* The SetRxFilter command accepts the following classes: */ -enum RxFilter { - RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 }; - -/* Register window 1 offsets, the window used in normal operation. */ -#define TX_FIFO 0x00 -#define RX_FIFO 0x00 -#define RX_STATUS 0x08 -#define TX_STATUS 0x0B -#define TX_FREE 0x0C /* Remaining free bytes in Tx buffer. */ - -#define WN0_CONF_CTRL 0x04 /* Window 0: Configuration control register */ -#define WN0_ADDR_CONF 0x06 /* Window 0: Address configuration register */ -#define WN0_IRQ 0x08 /* Window 0: Set IRQ line in bits 12-15. */ -#define WN4_MEDIA 0x0A /* Window 4: Various transcvr/media bits. */ -#define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */ -#define WN4_NETDIAG 0x06 /* Window 4: Net diagnostic */ -#define FD_ENABLE 0x8000 /* Enable full-duplex ("external loopback") */ - -/* - * Must be a power of two (we use a binary and in the - * circular queue) - */ -#define SKB_QUEUE_SIZE 64 - -enum el3_cardtype { EL3_ISA, EL3_PNP, EL3_MCA, EL3_EISA }; - -struct el3_private { - spinlock_t lock; - /* skb send-queue */ - int head, size; - struct sk_buff *queue[SKB_QUEUE_SIZE]; - enum el3_cardtype type; -}; -static int id_port; -static int current_tag; -static struct net_device *el3_devs[EL3_MAX_CARDS]; - -/* Parameters that may be passed into the module. */ -static int debug = -1; -static int irq[] = {-1, -1, -1, -1, -1, -1, -1, -1}; -/* Maximum events (Rx packets, etc.) to handle at each interrupt. */ -static int max_interrupt_work = 10; -#ifdef CONFIG_PNP -static int nopnp; -#endif - -static int __devinit el3_common_init(struct net_device *dev); -static void el3_common_remove(struct net_device *dev); -static ushort id_read_eeprom(int index); -static ushort read_eeprom(int ioaddr, int index); -static int el3_open(struct net_device *dev); -static netdev_tx_t el3_start_xmit(struct sk_buff *skb, struct net_device *dev); -static irqreturn_t el3_interrupt(int irq, void *dev_id); -static void update_stats(struct net_device *dev); -static struct net_device_stats *el3_get_stats(struct net_device *dev); -static int el3_rx(struct net_device *dev); -static int el3_close(struct net_device *dev); -static void set_multicast_list(struct net_device *dev); -static void el3_tx_timeout (struct net_device *dev); -static void el3_down(struct net_device *dev); -static void el3_up(struct net_device *dev); -static const struct ethtool_ops ethtool_ops; -#ifdef CONFIG_PM -static int el3_suspend(struct device *, pm_message_t); -static int el3_resume(struct device *); -#else -#define el3_suspend NULL -#define el3_resume NULL -#endif - - -/* generic device remove for all device types */ -static int el3_device_remove (struct device *device); -#ifdef CONFIG_NET_POLL_CONTROLLER -static void el3_poll_controller(struct net_device *dev); -#endif - -/* Return 0 on success, 1 on error, 2 when found already detected PnP card */ -static int el3_isa_id_sequence(__be16 *phys_addr) -{ - short lrs_state = 0xff; - int i; - - /* ISA boards are detected by sending the ID sequence to the - ID_PORT. We find cards past the first by setting the 'current_tag' - on cards as they are found. Cards with their tag set will not - respond to subsequent ID sequences. */ - - outb(0x00, id_port); - outb(0x00, id_port); - for (i = 0; i < 255; i++) { - outb(lrs_state, id_port); - lrs_state <<= 1; - lrs_state = lrs_state & 0x100 ? lrs_state ^ 0xcf : lrs_state; - } - /* For the first probe, clear all board's tag registers. */ - if (current_tag == 0) - outb(0xd0, id_port); - else /* Otherwise kill off already-found boards. */ - outb(0xd8, id_port); - if (id_read_eeprom(7) != 0x6d50) - return 1; - /* Read in EEPROM data, which does contention-select. - Only the lowest address board will stay "on-line". - 3Com got the byte order backwards. */ - for (i = 0; i < 3; i++) - phys_addr[i] = htons(id_read_eeprom(i)); -#ifdef CONFIG_PNP - if (!nopnp) { - /* The ISA PnP 3c509 cards respond to the ID sequence too. - This check is needed in order not to register them twice. */ - for (i = 0; i < el3_cards; i++) { - struct el3_private *lp = netdev_priv(el3_devs[i]); - if (lp->type == EL3_PNP && - !memcmp(phys_addr, el3_devs[i]->dev_addr, - ETH_ALEN)) { - if (el3_debug > 3) - pr_debug("3c509 with address %02x %02x %02x %02x %02x %02x was found by ISAPnP\n", - phys_addr[0] & 0xff, phys_addr[0] >> 8, - phys_addr[1] & 0xff, phys_addr[1] >> 8, - phys_addr[2] & 0xff, phys_addr[2] >> 8); - /* Set the adaptor tag so that the next card can be found. */ - outb(0xd0 + ++current_tag, id_port); - return 2; - } - } - } -#endif /* CONFIG_PNP */ - return 0; - -} - -static void __devinit el3_dev_fill(struct net_device *dev, __be16 *phys_addr, - int ioaddr, int irq, int if_port, - enum el3_cardtype type) -{ - struct el3_private *lp = netdev_priv(dev); - - memcpy(dev->dev_addr, phys_addr, ETH_ALEN); - dev->base_addr = ioaddr; - dev->irq = irq; - dev->if_port = if_port; - lp->type = type; -} - -static int __devinit el3_isa_match(struct device *pdev, - unsigned int ndev) -{ - struct net_device *dev; - int ioaddr, isa_irq, if_port, err; - unsigned int iobase; - __be16 phys_addr[3]; - - while ((err = el3_isa_id_sequence(phys_addr)) == 2) - ; /* Skip to next card when PnP card found */ - if (err == 1) - return 0; - - iobase = id_read_eeprom(8); - if_port = iobase >> 14; - ioaddr = 0x200 + ((iobase & 0x1f) << 4); - if (irq[el3_cards] > 1 && irq[el3_cards] < 16) - isa_irq = irq[el3_cards]; - else - isa_irq = id_read_eeprom(9) >> 12; - - dev = alloc_etherdev(sizeof(struct el3_private)); - if (!dev) - return -ENOMEM; - - netdev_boot_setup_check(dev); - - if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-isa")) { - free_netdev(dev); - return 0; - } - - /* Set the adaptor tag so that the next card can be found. */ - outb(0xd0 + ++current_tag, id_port); - - /* Activate the adaptor at the EEPROM location. */ - outb((ioaddr >> 4) | 0xe0, id_port); - - EL3WINDOW(0); - if (inw(ioaddr) != 0x6d50) { - free_netdev(dev); - return 0; - } - - /* Free the interrupt so that some other card can use it. */ - outw(0x0f00, ioaddr + WN0_IRQ); - - el3_dev_fill(dev, phys_addr, ioaddr, isa_irq, if_port, EL3_ISA); - dev_set_drvdata(pdev, dev); - if (el3_common_init(dev)) { - free_netdev(dev); - return 0; - } - - el3_devs[el3_cards++] = dev; - return 1; -} - -static int __devexit el3_isa_remove(struct device *pdev, - unsigned int ndev) -{ - el3_device_remove(pdev); - dev_set_drvdata(pdev, NULL); - return 0; -} - -#ifdef CONFIG_PM -static int el3_isa_suspend(struct device *dev, unsigned int n, - pm_message_t state) -{ - current_tag = 0; - return el3_suspend(dev, state); -} - -static int el3_isa_resume(struct device *dev, unsigned int n) -{ - struct net_device *ndev = dev_get_drvdata(dev); - int ioaddr = ndev->base_addr, err; - __be16 phys_addr[3]; - - while ((err = el3_isa_id_sequence(phys_addr)) == 2) - ; /* Skip to next card when PnP card found */ - if (err == 1) - return 0; - /* Set the adaptor tag so that the next card can be found. */ - outb(0xd0 + ++current_tag, id_port); - /* Enable the card */ - outb((ioaddr >> 4) | 0xe0, id_port); - EL3WINDOW(0); - if (inw(ioaddr) != 0x6d50) - return 1; - /* Free the interrupt so that some other card can use it. */ - outw(0x0f00, ioaddr + WN0_IRQ); - return el3_resume(dev); -} -#endif - -static struct isa_driver el3_isa_driver = { - .match = el3_isa_match, - .remove = __devexit_p(el3_isa_remove), -#ifdef CONFIG_PM - .suspend = el3_isa_suspend, - .resume = el3_isa_resume, -#endif - .driver = { - .name = "3c509" - }, -}; -static int isa_registered; - -#ifdef CONFIG_PNP -static struct pnp_device_id el3_pnp_ids[] = { - { .id = "TCM5090" }, /* 3Com Etherlink III (TP) */ - { .id = "TCM5091" }, /* 3Com Etherlink III */ - { .id = "TCM5094" }, /* 3Com Etherlink III (combo) */ - { .id = "TCM5095" }, /* 3Com Etherlink III (TPO) */ - { .id = "TCM5098" }, /* 3Com Etherlink III (TPC) */ - { .id = "PNP80f7" }, /* 3Com Etherlink III compatible */ - { .id = "PNP80f8" }, /* 3Com Etherlink III compatible */ - { .id = "" } -}; -MODULE_DEVICE_TABLE(pnp, el3_pnp_ids); - -static int __devinit el3_pnp_probe(struct pnp_dev *pdev, - const struct pnp_device_id *id) -{ - short i; - int ioaddr, irq, if_port; - __be16 phys_addr[3]; - struct net_device *dev = NULL; - int err; - - ioaddr = pnp_port_start(pdev, 0); - if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-pnp")) - return -EBUSY; - irq = pnp_irq(pdev, 0); - EL3WINDOW(0); - for (i = 0; i < 3; i++) - phys_addr[i] = htons(read_eeprom(ioaddr, i)); - if_port = read_eeprom(ioaddr, 8) >> 14; - dev = alloc_etherdev(sizeof(struct el3_private)); - if (!dev) { - release_region(ioaddr, EL3_IO_EXTENT); - return -ENOMEM; - } - SET_NETDEV_DEV(dev, &pdev->dev); - netdev_boot_setup_check(dev); - - el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_PNP); - pnp_set_drvdata(pdev, dev); - err = el3_common_init(dev); - - if (err) { - pnp_set_drvdata(pdev, NULL); - free_netdev(dev); - return err; - } - - el3_devs[el3_cards++] = dev; - return 0; -} - -static void __devexit el3_pnp_remove(struct pnp_dev *pdev) -{ - el3_common_remove(pnp_get_drvdata(pdev)); - pnp_set_drvdata(pdev, NULL); -} - -#ifdef CONFIG_PM -static int el3_pnp_suspend(struct pnp_dev *pdev, pm_message_t state) -{ - return el3_suspend(&pdev->dev, state); -} - -static int el3_pnp_resume(struct pnp_dev *pdev) -{ - return el3_resume(&pdev->dev); -} -#endif - -static struct pnp_driver el3_pnp_driver = { - .name = "3c509", - .id_table = el3_pnp_ids, - .probe = el3_pnp_probe, - .remove = __devexit_p(el3_pnp_remove), -#ifdef CONFIG_PM - .suspend = el3_pnp_suspend, - .resume = el3_pnp_resume, -#endif -}; -static int pnp_registered; -#endif /* CONFIG_PNP */ - -#ifdef CONFIG_EISA -static struct eisa_device_id el3_eisa_ids[] = { - { "TCM5090" }, - { "TCM5091" }, - { "TCM5092" }, - { "TCM5093" }, - { "TCM5094" }, - { "TCM5095" }, - { "TCM5098" }, - { "" } -}; -MODULE_DEVICE_TABLE(eisa, el3_eisa_ids); - -static int el3_eisa_probe (struct device *device); - -static struct eisa_driver el3_eisa_driver = { - .id_table = el3_eisa_ids, - .driver = { - .name = "3c579", - .probe = el3_eisa_probe, - .remove = __devexit_p (el3_device_remove), - .suspend = el3_suspend, - .resume = el3_resume, - } -}; -static int eisa_registered; -#endif - -#ifdef CONFIG_MCA -static int el3_mca_probe(struct device *dev); - -static short el3_mca_adapter_ids[] __initdata = { - 0x627c, - 0x627d, - 0x62db, - 0x62f6, - 0x62f7, - 0x0000 -}; - -static char *el3_mca_adapter_names[] __initdata = { - "3Com 3c529 EtherLink III (10base2)", - "3Com 3c529 EtherLink III (10baseT)", - "3Com 3c529 EtherLink III (test mode)", - "3Com 3c529 EtherLink III (TP or coax)", - "3Com 3c529 EtherLink III (TP)", - NULL -}; - -static struct mca_driver el3_mca_driver = { - .id_table = el3_mca_adapter_ids, - .driver = { - .name = "3c529", - .bus = &mca_bus_type, - .probe = el3_mca_probe, - .remove = __devexit_p(el3_device_remove), - .suspend = el3_suspend, - .resume = el3_resume, - }, -}; -static int mca_registered; -#endif /* CONFIG_MCA */ - -static const struct net_device_ops netdev_ops = { - .ndo_open = el3_open, - .ndo_stop = el3_close, - .ndo_start_xmit = el3_start_xmit, - .ndo_get_stats = el3_get_stats, - .ndo_set_multicast_list = set_multicast_list, - .ndo_tx_timeout = el3_tx_timeout, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = el3_poll_controller, -#endif -}; - -static int __devinit el3_common_init(struct net_device *dev) -{ - struct el3_private *lp = netdev_priv(dev); - int err; - const char *if_names[] = {"10baseT", "AUI", "undefined", "BNC"}; - - spin_lock_init(&lp->lock); - - if (dev->mem_start & 0x05) { /* xcvr codes 1/3/4/12 */ - dev->if_port = (dev->mem_start & 0x0f); - } else { /* xcvr codes 0/8 */ - /* use eeprom value, but save user's full-duplex selection */ - dev->if_port |= (dev->mem_start & 0x08); - } - - /* The EL3-specific entries in the device structure. */ - dev->netdev_ops = &netdev_ops; - dev->watchdog_timeo = TX_TIMEOUT; - SET_ETHTOOL_OPS(dev, ðtool_ops); - - err = register_netdev(dev); - if (err) { - pr_err("Failed to register 3c5x9 at %#3.3lx, IRQ %d.\n", - dev->base_addr, dev->irq); - release_region(dev->base_addr, EL3_IO_EXTENT); - return err; - } - - pr_info("%s: 3c5x9 found at %#3.3lx, %s port, address %pM, IRQ %d.\n", - dev->name, dev->base_addr, if_names[(dev->if_port & 0x03)], - dev->dev_addr, dev->irq); - - if (el3_debug > 0) - pr_info("%s", version); - return 0; - -} - -static void el3_common_remove (struct net_device *dev) -{ - unregister_netdev (dev); - release_region(dev->base_addr, EL3_IO_EXTENT); - free_netdev (dev); -} - -#ifdef CONFIG_MCA -static int __init el3_mca_probe(struct device *device) -{ - /* Based on Erik Nygren's (nygren@mit.edu) 3c529 patch, - * heavily modified by Chris Beauregard - * (cpbeaure@csclub.uwaterloo.ca) to support standard MCA - * probing. - * - * redone for multi-card detection by ZP Gu (zpg@castle.net) - * now works as a module */ - - short i; - int ioaddr, irq, if_port; - __be16 phys_addr[3]; - struct net_device *dev = NULL; - u_char pos4, pos5; - struct mca_device *mdev = to_mca_device(device); - int slot = mdev->slot; - int err; - - pos4 = mca_device_read_stored_pos(mdev, 4); - pos5 = mca_device_read_stored_pos(mdev, 5); - - ioaddr = ((short)((pos4&0xfc)|0x02)) << 8; - irq = pos5 & 0x0f; - - - pr_info("3c529: found %s at slot %d\n", - el3_mca_adapter_names[mdev->index], slot + 1); - - /* claim the slot */ - strncpy(mdev->name, el3_mca_adapter_names[mdev->index], - sizeof(mdev->name)); - mca_device_set_claim(mdev, 1); - - if_port = pos4 & 0x03; - - irq = mca_device_transform_irq(mdev, irq); - ioaddr = mca_device_transform_ioport(mdev, ioaddr); - if (el3_debug > 2) { - pr_debug("3c529: irq %d ioaddr 0x%x ifport %d\n", irq, ioaddr, if_port); - } - EL3WINDOW(0); - for (i = 0; i < 3; i++) - phys_addr[i] = htons(read_eeprom(ioaddr, i)); - - dev = alloc_etherdev(sizeof (struct el3_private)); - if (dev == NULL) { - release_region(ioaddr, EL3_IO_EXTENT); - return -ENOMEM; - } - - netdev_boot_setup_check(dev); - - el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_MCA); - dev_set_drvdata(device, dev); - err = el3_common_init(dev); - - if (err) { - dev_set_drvdata(device, NULL); - free_netdev(dev); - return -ENOMEM; - } - - el3_devs[el3_cards++] = dev; - return 0; -} - -#endif /* CONFIG_MCA */ - -#ifdef CONFIG_EISA -static int __init el3_eisa_probe (struct device *device) -{ - short i; - int ioaddr, irq, if_port; - __be16 phys_addr[3]; - struct net_device *dev = NULL; - struct eisa_device *edev; - int err; - - /* Yeepee, The driver framework is calling us ! */ - edev = to_eisa_device (device); - ioaddr = edev->base_addr; - - if (!request_region(ioaddr, EL3_IO_EXTENT, "3c579-eisa")) - return -EBUSY; - - /* Change the register set to the configuration window 0. */ - outw(SelectWindow | 0, ioaddr + 0xC80 + EL3_CMD); - - irq = inw(ioaddr + WN0_IRQ) >> 12; - if_port = inw(ioaddr + 6)>>14; - for (i = 0; i < 3; i++) - phys_addr[i] = htons(read_eeprom(ioaddr, i)); - - /* Restore the "Product ID" to the EEPROM read register. */ - read_eeprom(ioaddr, 3); - - dev = alloc_etherdev(sizeof (struct el3_private)); - if (dev == NULL) { - release_region(ioaddr, EL3_IO_EXTENT); - return -ENOMEM; - } - - netdev_boot_setup_check(dev); - - el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_EISA); - eisa_set_drvdata (edev, dev); - err = el3_common_init(dev); - - if (err) { - eisa_set_drvdata (edev, NULL); - free_netdev(dev); - return err; - } - - el3_devs[el3_cards++] = dev; - return 0; -} -#endif - -/* This remove works for all device types. - * - * The net dev must be stored in the driver data field */ -static int __devexit el3_device_remove (struct device *device) -{ - struct net_device *dev; - - dev = dev_get_drvdata(device); - - el3_common_remove (dev); - return 0; -} - -/* Read a word from the EEPROM using the regular EEPROM access register. - Assume that we are in register window zero. - */ -static ushort read_eeprom(int ioaddr, int index) -{ - outw(EEPROM_READ + index, ioaddr + 10); - /* Pause for at least 162 us. for the read to take place. - Some chips seem to require much longer */ - mdelay(2); - return inw(ioaddr + 12); -} - -/* Read a word from the EEPROM when in the ISA ID probe state. */ -static ushort id_read_eeprom(int index) -{ - int bit, word = 0; - - /* Issue read command, and pause for at least 162 us. for it to complete. - Assume extra-fast 16Mhz bus. */ - outb(EEPROM_READ + index, id_port); - - /* Pause for at least 162 us. for the read to take place. */ - /* Some chips seem to require much longer */ - mdelay(4); - - for (bit = 15; bit >= 0; bit--) - word = (word << 1) + (inb(id_port) & 0x01); - - if (el3_debug > 3) - pr_debug(" 3c509 EEPROM word %d %#4.4x.\n", index, word); - - return word; -} - - -static int -el3_open(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - int i; - - outw(TxReset, ioaddr + EL3_CMD); - outw(RxReset, ioaddr + EL3_CMD); - outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD); - - i = request_irq(dev->irq, el3_interrupt, 0, dev->name, dev); - if (i) - return i; - - EL3WINDOW(0); - if (el3_debug > 3) - pr_debug("%s: Opening, IRQ %d status@%x %4.4x.\n", dev->name, - dev->irq, ioaddr + EL3_STATUS, inw(ioaddr + EL3_STATUS)); - - el3_up(dev); - - if (el3_debug > 3) - pr_debug("%s: Opened 3c509 IRQ %d status %4.4x.\n", - dev->name, dev->irq, inw(ioaddr + EL3_STATUS)); - - return 0; -} - -static void -el3_tx_timeout (struct net_device *dev) -{ - int ioaddr = dev->base_addr; - - /* Transmitter timeout, serious problems. */ - pr_warning("%s: transmit timed out, Tx_status %2.2x status %4.4x Tx FIFO room %d.\n", - dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS), - inw(ioaddr + TX_FREE)); - dev->stats.tx_errors++; - dev->trans_start = jiffies; /* prevent tx timeout */ - /* Issue TX_RESET and TX_START commands. */ - outw(TxReset, ioaddr + EL3_CMD); - outw(TxEnable, ioaddr + EL3_CMD); - netif_wake_queue(dev); -} - - -static netdev_tx_t -el3_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct el3_private *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - unsigned long flags; - - netif_stop_queue (dev); - - dev->stats.tx_bytes += skb->len; - - if (el3_debug > 4) { - pr_debug("%s: el3_start_xmit(length = %u) called, status %4.4x.\n", - dev->name, skb->len, inw(ioaddr + EL3_STATUS)); - } -#if 0 -#ifndef final_version - { /* Error-checking code, delete someday. */ - ushort status = inw(ioaddr + EL3_STATUS); - if (status & 0x0001 && /* IRQ line active, missed one. */ - inw(ioaddr + EL3_STATUS) & 1) { /* Make sure. */ - pr_debug("%s: Missed interrupt, status then %04x now %04x" - " Tx %2.2x Rx %4.4x.\n", dev->name, status, - inw(ioaddr + EL3_STATUS), inb(ioaddr + TX_STATUS), - inw(ioaddr + RX_STATUS)); - /* Fake interrupt trigger by masking, acknowledge interrupts. */ - outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD); - outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, - ioaddr + EL3_CMD); - outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD); - } - } -#endif -#endif - /* - * We lock the driver against other processors. Note - * we don't need to lock versus the IRQ as we suspended - * that. This means that we lose the ability to take - * an RX during a TX upload. That sucks a bit with SMP - * on an original 3c509 (2K buffer) - * - * Using disable_irq stops us crapping on other - * time sensitive devices. - */ - - spin_lock_irqsave(&lp->lock, flags); - - /* Put out the doubleword header... */ - outw(skb->len, ioaddr + TX_FIFO); - outw(0x00, ioaddr + TX_FIFO); - /* ... and the packet rounded to a doubleword. */ - outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); - - if (inw(ioaddr + TX_FREE) > 1536) - netif_start_queue(dev); - else - /* Interrupt us when the FIFO has room for max-sized packet. */ - outw(SetTxThreshold + 1536, ioaddr + EL3_CMD); - - spin_unlock_irqrestore(&lp->lock, flags); - - dev_kfree_skb (skb); - - /* Clear the Tx status stack. */ - { - short tx_status; - int i = 4; - - while (--i > 0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) { - if (tx_status & 0x38) dev->stats.tx_aborted_errors++; - if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD); - if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD); - outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */ - } - } - return NETDEV_TX_OK; -} - -/* The EL3 interrupt handler. */ -static irqreturn_t -el3_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct el3_private *lp; - int ioaddr, status; - int i = max_interrupt_work; - - lp = netdev_priv(dev); - spin_lock(&lp->lock); - - ioaddr = dev->base_addr; - - if (el3_debug > 4) { - status = inw(ioaddr + EL3_STATUS); - pr_debug("%s: interrupt, status %4.4x.\n", dev->name, status); - } - - while ((status = inw(ioaddr + EL3_STATUS)) & - (IntLatch | RxComplete | StatsFull)) { - - if (status & RxComplete) - el3_rx(dev); - - if (status & TxAvailable) { - if (el3_debug > 5) - pr_debug(" TX room bit was handled.\n"); - /* There's room in the FIFO for a full-sized packet. */ - outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); - netif_wake_queue (dev); - } - if (status & (AdapterFailure | RxEarly | StatsFull | TxComplete)) { - /* Handle all uncommon interrupts. */ - if (status & StatsFull) /* Empty statistics. */ - update_stats(dev); - if (status & RxEarly) { /* Rx early is unused. */ - el3_rx(dev); - outw(AckIntr | RxEarly, ioaddr + EL3_CMD); - } - if (status & TxComplete) { /* Really Tx error. */ - short tx_status; - int i = 4; - - while (--i>0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) { - if (tx_status & 0x38) dev->stats.tx_aborted_errors++; - if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD); - if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD); - outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */ - } - } - if (status & AdapterFailure) { - /* Adapter failure requires Rx reset and reinit. */ - outw(RxReset, ioaddr + EL3_CMD); - /* Set the Rx filter to the current state. */ - outw(SetRxFilter | RxStation | RxBroadcast - | (dev->flags & IFF_ALLMULTI ? RxMulticast : 0) - | (dev->flags & IFF_PROMISC ? RxProm : 0), - ioaddr + EL3_CMD); - outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */ - outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD); - } - } - - if (--i < 0) { - pr_err("%s: Infinite loop in interrupt, status %4.4x.\n", - dev->name, status); - /* Clear all interrupts. */ - outw(AckIntr | 0xFF, ioaddr + EL3_CMD); - break; - } - /* Acknowledge the IRQ. */ - outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); /* Ack IRQ */ - } - - if (el3_debug > 4) { - pr_debug("%s: exiting interrupt, status %4.4x.\n", dev->name, - inw(ioaddr + EL3_STATUS)); - } - spin_unlock(&lp->lock); - return IRQ_HANDLED; -} - - -#ifdef CONFIG_NET_POLL_CONTROLLER -/* - * Polling receive - used by netconsole and other diagnostic tools - * to allow network i/o with interrupts disabled. - */ -static void el3_poll_controller(struct net_device *dev) -{ - disable_irq(dev->irq); - el3_interrupt(dev->irq, dev); - enable_irq(dev->irq); -} -#endif - -static struct net_device_stats * -el3_get_stats(struct net_device *dev) -{ - struct el3_private *lp = netdev_priv(dev); - unsigned long flags; - - /* - * This is fast enough not to bother with disable IRQ - * stuff. - */ - - spin_lock_irqsave(&lp->lock, flags); - update_stats(dev); - spin_unlock_irqrestore(&lp->lock, flags); - return &dev->stats; -} - -/* Update statistics. We change to register window 6, so this should be run - single-threaded if the device is active. This is expected to be a rare - operation, and it's simpler for the rest of the driver to assume that - window 1 is always valid rather than use a special window-state variable. - */ -static void update_stats(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - - if (el3_debug > 5) - pr_debug(" Updating the statistics.\n"); - /* Turn off statistics updates while reading. */ - outw(StatsDisable, ioaddr + EL3_CMD); - /* Switch to the stats window, and read everything. */ - EL3WINDOW(6); - dev->stats.tx_carrier_errors += inb(ioaddr + 0); - dev->stats.tx_heartbeat_errors += inb(ioaddr + 1); - /* Multiple collisions. */ inb(ioaddr + 2); - dev->stats.collisions += inb(ioaddr + 3); - dev->stats.tx_window_errors += inb(ioaddr + 4); - dev->stats.rx_fifo_errors += inb(ioaddr + 5); - dev->stats.tx_packets += inb(ioaddr + 6); - /* Rx packets */ inb(ioaddr + 7); - /* Tx deferrals */ inb(ioaddr + 8); - inw(ioaddr + 10); /* Total Rx and Tx octets. */ - inw(ioaddr + 12); - - /* Back to window 1, and turn statistics back on. */ - EL3WINDOW(1); - outw(StatsEnable, ioaddr + EL3_CMD); -} - -static int -el3_rx(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - short rx_status; - - if (el3_debug > 5) - pr_debug(" In rx_packet(), status %4.4x, rx_status %4.4x.\n", - inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS)); - while ((rx_status = inw(ioaddr + RX_STATUS)) > 0) { - if (rx_status & 0x4000) { /* Error, update stats. */ - short error = rx_status & 0x3800; - - outw(RxDiscard, ioaddr + EL3_CMD); - dev->stats.rx_errors++; - switch (error) { - case 0x0000: dev->stats.rx_over_errors++; break; - case 0x0800: dev->stats.rx_length_errors++; break; - case 0x1000: dev->stats.rx_frame_errors++; break; - case 0x1800: dev->stats.rx_length_errors++; break; - case 0x2000: dev->stats.rx_frame_errors++; break; - case 0x2800: dev->stats.rx_crc_errors++; break; - } - } else { - short pkt_len = rx_status & 0x7ff; - struct sk_buff *skb; - - skb = dev_alloc_skb(pkt_len+5); - if (el3_debug > 4) - pr_debug("Receiving packet size %d status %4.4x.\n", - pkt_len, rx_status); - if (skb != NULL) { - skb_reserve(skb, 2); /* Align IP on 16 byte */ - - /* 'skb->data' points to the start of sk_buff data area. */ - insl(ioaddr + RX_FIFO, skb_put(skb,pkt_len), - (pkt_len + 3) >> 2); - - outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */ - skb->protocol = eth_type_trans(skb,dev); - netif_rx(skb); - dev->stats.rx_bytes += pkt_len; - dev->stats.rx_packets++; - continue; - } - outw(RxDiscard, ioaddr + EL3_CMD); - dev->stats.rx_dropped++; - if (el3_debug) - pr_debug("%s: Couldn't allocate a sk_buff of size %d.\n", - dev->name, pkt_len); - } - inw(ioaddr + EL3_STATUS); /* Delay. */ - while (inw(ioaddr + EL3_STATUS) & 0x1000) - pr_debug(" Waiting for 3c509 to discard packet, status %x.\n", - inw(ioaddr + EL3_STATUS) ); - } - - return 0; -} - -/* - * Set or clear the multicast filter for this adaptor. - */ -static void -set_multicast_list(struct net_device *dev) -{ - unsigned long flags; - struct el3_private *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - int mc_count = netdev_mc_count(dev); - - if (el3_debug > 1) { - static int old; - if (old != mc_count) { - old = mc_count; - pr_debug("%s: Setting Rx mode to %d addresses.\n", - dev->name, mc_count); - } - } - spin_lock_irqsave(&lp->lock, flags); - if (dev->flags&IFF_PROMISC) { - outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm, - ioaddr + EL3_CMD); - } - else if (mc_count || (dev->flags&IFF_ALLMULTI)) { - outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast, ioaddr + EL3_CMD); - } - else - outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD); - spin_unlock_irqrestore(&lp->lock, flags); -} - -static int -el3_close(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - struct el3_private *lp = netdev_priv(dev); - - if (el3_debug > 2) - pr_debug("%s: Shutting down ethercard.\n", dev->name); - - el3_down(dev); - - free_irq(dev->irq, dev); - /* Switching back to window 0 disables the IRQ. */ - EL3WINDOW(0); - if (lp->type != EL3_EISA) { - /* But we explicitly zero the IRQ line select anyway. Don't do - * it on EISA cards, it prevents the module from getting an - * IRQ after unload+reload... */ - outw(0x0f00, ioaddr + WN0_IRQ); - } - - return 0; -} - -static int -el3_link_ok(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - u16 tmp; - - EL3WINDOW(4); - tmp = inw(ioaddr + WN4_MEDIA); - EL3WINDOW(1); - return tmp & (1<<11); -} - -static int -el3_netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) -{ - u16 tmp; - int ioaddr = dev->base_addr; - - EL3WINDOW(0); - /* obtain current transceiver via WN4_MEDIA? */ - tmp = inw(ioaddr + WN0_ADDR_CONF); - ecmd->transceiver = XCVR_INTERNAL; - switch (tmp >> 14) { - case 0: - ecmd->port = PORT_TP; - break; - case 1: - ecmd->port = PORT_AUI; - ecmd->transceiver = XCVR_EXTERNAL; - break; - case 3: - ecmd->port = PORT_BNC; - default: - break; - } - - ecmd->duplex = DUPLEX_HALF; - ecmd->supported = 0; - tmp = inw(ioaddr + WN0_CONF_CTRL); - if (tmp & (1<<13)) - ecmd->supported |= SUPPORTED_AUI; - if (tmp & (1<<12)) - ecmd->supported |= SUPPORTED_BNC; - if (tmp & (1<<9)) { - ecmd->supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full; /* hmm... */ - EL3WINDOW(4); - tmp = inw(ioaddr + WN4_NETDIAG); - if (tmp & FD_ENABLE) - ecmd->duplex = DUPLEX_FULL; - } - - ethtool_cmd_speed_set(ecmd, SPEED_10); - EL3WINDOW(1); - return 0; -} - -static int -el3_netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) -{ - u16 tmp; - int ioaddr = dev->base_addr; - - if (ecmd->speed != SPEED_10) - return -EINVAL; - if ((ecmd->duplex != DUPLEX_HALF) && (ecmd->duplex != DUPLEX_FULL)) - return -EINVAL; - if ((ecmd->transceiver != XCVR_INTERNAL) && (ecmd->transceiver != XCVR_EXTERNAL)) - return -EINVAL; - - /* change XCVR type */ - EL3WINDOW(0); - tmp = inw(ioaddr + WN0_ADDR_CONF); - switch (ecmd->port) { - case PORT_TP: - tmp &= ~(3<<14); - dev->if_port = 0; - break; - case PORT_AUI: - tmp |= (1<<14); - dev->if_port = 1; - break; - case PORT_BNC: - tmp |= (3<<14); - dev->if_port = 3; - break; - default: - return -EINVAL; - } - - outw(tmp, ioaddr + WN0_ADDR_CONF); - if (dev->if_port == 3) { - /* fire up the DC-DC convertor if BNC gets enabled */ - tmp = inw(ioaddr + WN0_ADDR_CONF); - if (tmp & (3 << 14)) { - outw(StartCoax, ioaddr + EL3_CMD); - udelay(800); - } else - return -EIO; - } - - EL3WINDOW(4); - tmp = inw(ioaddr + WN4_NETDIAG); - if (ecmd->duplex == DUPLEX_FULL) - tmp |= FD_ENABLE; - else - tmp &= ~FD_ENABLE; - outw(tmp, ioaddr + WN4_NETDIAG); - EL3WINDOW(1); - - return 0; -} - -static void el3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) -{ - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); -} - -static int el3_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) -{ - struct el3_private *lp = netdev_priv(dev); - int ret; - - spin_lock_irq(&lp->lock); - ret = el3_netdev_get_ecmd(dev, ecmd); - spin_unlock_irq(&lp->lock); - return ret; -} - -static int el3_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) -{ - struct el3_private *lp = netdev_priv(dev); - int ret; - - spin_lock_irq(&lp->lock); - ret = el3_netdev_set_ecmd(dev, ecmd); - spin_unlock_irq(&lp->lock); - return ret; -} - -static u32 el3_get_link(struct net_device *dev) -{ - struct el3_private *lp = netdev_priv(dev); - u32 ret; - - spin_lock_irq(&lp->lock); - ret = el3_link_ok(dev); - spin_unlock_irq(&lp->lock); - return ret; -} - -static u32 el3_get_msglevel(struct net_device *dev) -{ - return el3_debug; -} - -static void el3_set_msglevel(struct net_device *dev, u32 v) -{ - el3_debug = v; -} - -static const struct ethtool_ops ethtool_ops = { - .get_drvinfo = el3_get_drvinfo, - .get_settings = el3_get_settings, - .set_settings = el3_set_settings, - .get_link = el3_get_link, - .get_msglevel = el3_get_msglevel, - .set_msglevel = el3_set_msglevel, -}; - -static void -el3_down(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - - netif_stop_queue(dev); - - /* Turn off statistics ASAP. We update lp->stats below. */ - outw(StatsDisable, ioaddr + EL3_CMD); - - /* Disable the receiver and transmitter. */ - outw(RxDisable, ioaddr + EL3_CMD); - outw(TxDisable, ioaddr + EL3_CMD); - - if (dev->if_port == 3) - /* Turn off thinnet power. Green! */ - outw(StopCoax, ioaddr + EL3_CMD); - else if (dev->if_port == 0) { - /* Disable link beat and jabber, if_port may change here next open(). */ - EL3WINDOW(4); - outw(inw(ioaddr + WN4_MEDIA) & ~MEDIA_TP, ioaddr + WN4_MEDIA); - } - - outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD); - - update_stats(dev); -} - -static void -el3_up(struct net_device *dev) -{ - int i, sw_info, net_diag; - int ioaddr = dev->base_addr; - - /* Activating the board required and does no harm otherwise */ - outw(0x0001, ioaddr + 4); - - /* Set the IRQ line. */ - outw((dev->irq << 12) | 0x0f00, ioaddr + WN0_IRQ); - - /* Set the station address in window 2 each time opened. */ - EL3WINDOW(2); - - for (i = 0; i < 6; i++) - outb(dev->dev_addr[i], ioaddr + i); - - if ((dev->if_port & 0x03) == 3) /* BNC interface */ - /* Start the thinnet transceiver. We should really wait 50ms...*/ - outw(StartCoax, ioaddr + EL3_CMD); - else if ((dev->if_port & 0x03) == 0) { /* 10baseT interface */ - /* Combine secondary sw_info word (the adapter level) and primary - sw_info word (duplex setting plus other useless bits) */ - EL3WINDOW(0); - sw_info = (read_eeprom(ioaddr, 0x14) & 0x400f) | - (read_eeprom(ioaddr, 0x0d) & 0xBff0); - - EL3WINDOW(4); - net_diag = inw(ioaddr + WN4_NETDIAG); - net_diag = (net_diag | FD_ENABLE); /* temporarily assume full-duplex will be set */ - pr_info("%s: ", dev->name); - switch (dev->if_port & 0x0c) { - case 12: - /* force full-duplex mode if 3c5x9b */ - if (sw_info & 0x000f) { - pr_cont("Forcing 3c5x9b full-duplex mode"); - break; - } - case 8: - /* set full-duplex mode based on eeprom config setting */ - if ((sw_info & 0x000f) && (sw_info & 0x8000)) { - pr_cont("Setting 3c5x9b full-duplex mode (from EEPROM configuration bit)"); - break; - } - default: - /* xcvr=(0 || 4) OR user has an old 3c5x9 non "B" model */ - pr_cont("Setting 3c5x9/3c5x9B half-duplex mode"); - net_diag = (net_diag & ~FD_ENABLE); /* disable full duplex */ - } - - outw(net_diag, ioaddr + WN4_NETDIAG); - pr_cont(" if_port: %d, sw_info: %4.4x\n", dev->if_port, sw_info); - if (el3_debug > 3) - pr_debug("%s: 3c5x9 net diag word is now: %4.4x.\n", dev->name, net_diag); - /* Enable link beat and jabber check. */ - outw(inw(ioaddr + WN4_MEDIA) | MEDIA_TP, ioaddr + WN4_MEDIA); - } - - /* Switch to the stats window, and clear all stats by reading. */ - outw(StatsDisable, ioaddr + EL3_CMD); - EL3WINDOW(6); - for (i = 0; i < 9; i++) - inb(ioaddr + i); - inw(ioaddr + 10); - inw(ioaddr + 12); - - /* Switch to register set 1 for normal use. */ - EL3WINDOW(1); - - /* Accept b-case and phys addr only. */ - outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD); - outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ - - outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ - outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ - /* Allow status bits to be seen. */ - outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD); - /* Ack all pending events, and set active indicator mask. */ - outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, - ioaddr + EL3_CMD); - outw(SetIntrEnb | IntLatch|TxAvailable|TxComplete|RxComplete|StatsFull, - ioaddr + EL3_CMD); - - netif_start_queue(dev); -} - -/* Power Management support functions */ -#ifdef CONFIG_PM - -static int -el3_suspend(struct device *pdev, pm_message_t state) -{ - unsigned long flags; - struct net_device *dev; - struct el3_private *lp; - int ioaddr; - - dev = dev_get_drvdata(pdev); - lp = netdev_priv(dev); - ioaddr = dev->base_addr; - - spin_lock_irqsave(&lp->lock, flags); - - if (netif_running(dev)) - netif_device_detach(dev); - - el3_down(dev); - outw(PowerDown, ioaddr + EL3_CMD); - - spin_unlock_irqrestore(&lp->lock, flags); - return 0; -} - -static int -el3_resume(struct device *pdev) -{ - unsigned long flags; - struct net_device *dev; - struct el3_private *lp; - int ioaddr; - - dev = dev_get_drvdata(pdev); - lp = netdev_priv(dev); - ioaddr = dev->base_addr; - - spin_lock_irqsave(&lp->lock, flags); - - outw(PowerUp, ioaddr + EL3_CMD); - EL3WINDOW(0); - el3_up(dev); - - if (netif_running(dev)) - netif_device_attach(dev); - - spin_unlock_irqrestore(&lp->lock, flags); - return 0; -} - -#endif /* CONFIG_PM */ - -module_param(debug,int, 0); -module_param_array(irq, int, NULL, 0); -module_param(max_interrupt_work, int, 0); -MODULE_PARM_DESC(debug, "debug level (0-6)"); -MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)"); -MODULE_PARM_DESC(max_interrupt_work, "maximum events handled per interrupt"); -#ifdef CONFIG_PNP -module_param(nopnp, int, 0); -MODULE_PARM_DESC(nopnp, "disable ISA PnP support (0-1)"); -#endif /* CONFIG_PNP */ -MODULE_DESCRIPTION("3Com Etherlink III (3c509, 3c509B, 3c529, 3c579) ethernet driver"); -MODULE_LICENSE("GPL"); - -static int __init el3_init_module(void) -{ - int ret = 0; - - if (debug >= 0) - el3_debug = debug; - -#ifdef CONFIG_PNP - if (!nopnp) { - ret = pnp_register_driver(&el3_pnp_driver); - if (!ret) - pnp_registered = 1; - } -#endif - /* Select an open I/O location at 0x1*0 to do ISA contention select. */ - /* Start with 0x110 to avoid some sound cards.*/ - for (id_port = 0x110 ; id_port < 0x200; id_port += 0x10) { - if (!request_region(id_port, 1, "3c509-control")) - continue; - outb(0x00, id_port); - outb(0xff, id_port); - if (inb(id_port) & 0x01) - break; - else - release_region(id_port, 1); - } - if (id_port >= 0x200) { - id_port = 0; - pr_err("No I/O port available for 3c509 activation.\n"); - } else { - ret = isa_register_driver(&el3_isa_driver, EL3_MAX_CARDS); - if (!ret) - isa_registered = 1; - } -#ifdef CONFIG_EISA - ret = eisa_driver_register(&el3_eisa_driver); - if (!ret) - eisa_registered = 1; -#endif -#ifdef CONFIG_MCA - ret = mca_register_driver(&el3_mca_driver); - if (!ret) - mca_registered = 1; -#endif - -#ifdef CONFIG_PNP - if (pnp_registered) - ret = 0; -#endif - if (isa_registered) - ret = 0; -#ifdef CONFIG_EISA - if (eisa_registered) - ret = 0; -#endif -#ifdef CONFIG_MCA - if (mca_registered) - ret = 0; -#endif - return ret; -} - -static void __exit el3_cleanup_module(void) -{ -#ifdef CONFIG_PNP - if (pnp_registered) - pnp_unregister_driver(&el3_pnp_driver); -#endif - if (isa_registered) - isa_unregister_driver(&el3_isa_driver); - if (id_port) - release_region(id_port, 1); -#ifdef CONFIG_EISA - if (eisa_registered) - eisa_driver_unregister(&el3_eisa_driver); -#endif -#ifdef CONFIG_MCA - if (mca_registered) - mca_unregister_driver(&el3_mca_driver); -#endif -} - -module_init (el3_init_module); -module_exit (el3_cleanup_module); diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c deleted file mode 100644 index d2bb4b2..0000000 --- a/drivers/net/3c515.c +++ /dev/null @@ -1,1584 +0,0 @@ -/* - Written 1997-1998 by Donald Becker. - - This software may be used and distributed according to the terms - of the GNU General Public License, incorporated herein by reference. - - This driver is for the 3Com ISA EtherLink XL "Corkscrew" 3c515 ethercard. - - The author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation - 410 Severn Ave., Suite 210 - Annapolis MD 21403 - - - 2000/2/2- Added support for kernel-level ISAPnP - by Stephen Frost and Alessandro Zummo - Cleaned up for 2.3.x/softnet by Jeff Garzik and Alan Cox. - - 2001/11/17 - Added ethtool support (jgarzik) - - 2002/10/28 - Locking updates for 2.5 (alan@lxorguk.ukuu.org.uk) - -*/ - -#define DRV_NAME "3c515" -#define DRV_VERSION "0.99t-ac" -#define DRV_RELDATE "28-Oct-2002" - -static char *version = -DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " becker@scyld.com and others\n"; - -#define CORKSCREW 1 - -/* "Knobs" that adjust features and parameters. */ -/* Set the copy breakpoint for the copy-only-tiny-frames scheme. - Setting to > 1512 effectively disables this feature. */ -static int rx_copybreak = 200; - -/* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */ -static const int mtu = 1500; - -/* Maximum events (Rx packets, etc.) to handle at each interrupt. */ -static int max_interrupt_work = 20; - -/* Enable the automatic media selection code -- usually set. */ -#define AUTOMEDIA 1 - -/* Allow the use of fragment bus master transfers instead of only - programmed-I/O for Vortex cards. Full-bus-master transfers are always - enabled by default on Boomerang cards. If VORTEX_BUS_MASTER is defined, - the feature may be turned on using 'options'. */ -#define VORTEX_BUS_MASTER - -/* A few values that may be tweaked. */ -/* Keep the ring sizes a power of two for efficiency. */ -#define TX_RING_SIZE 16 -#define RX_RING_SIZE 16 -#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#define NEW_MULTICAST -#include - -#define MAX_UNITS 8 - -MODULE_AUTHOR("Donald Becker "); -MODULE_DESCRIPTION("3Com 3c515 Corkscrew driver"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_VERSION); - -/* "Knobs" for adjusting internal parameters. */ -/* Put out somewhat more debugging messages. (0 - no msg, 1 minimal msgs). */ -#define DRIVER_DEBUG 1 -/* Some values here only for performance evaluation and path-coverage - debugging. */ -static int rx_nocopy, rx_copy, queued_packet; - -/* Number of times to check to see if the Tx FIFO has space, used in some - limited cases. */ -#define WAIT_TX_AVAIL 200 - -/* Operational parameter that usually are not changed. */ -#define TX_TIMEOUT ((4*HZ)/10) /* Time in jiffies before concluding Tx hung */ - -/* The size here is somewhat misleading: the Corkscrew also uses the ISA - aliased registers at +0x400. - */ -#define CORKSCREW_TOTAL_SIZE 0x20 - -#ifdef DRIVER_DEBUG -static int corkscrew_debug = DRIVER_DEBUG; -#else -static int corkscrew_debug = 1; -#endif - -#define CORKSCREW_ID 10 - -/* - Theory of Operation - -I. Board Compatibility - -This device driver is designed for the 3Com 3c515 ISA Fast EtherLink XL, -3Com's ISA bus adapter for Fast Ethernet. Due to the unique I/O port layout, -it's not practical to integrate this driver with the other EtherLink drivers. - -II. Board-specific settings - -The Corkscrew has an EEPROM for configuration, but no special settings are -needed for Linux. - -III. Driver operation - -The 3c515 series use an interface that's very similar to the 3c900 "Boomerang" -PCI cards, with the bus master interface extensively modified to work with -the ISA bus. - -The card is capable of full-bus-master transfers with separate -lists of transmit and receive descriptors, similar to the AMD LANCE/PCnet, -DEC Tulip and Intel Speedo3. - -This driver uses a "RX_COPYBREAK" scheme rather than a fixed intermediate -receive buffer. This scheme allocates full-sized skbuffs as receive -buffers. The value RX_COPYBREAK is used as the copying breakpoint: it is -chosen to trade-off the memory wasted by passing the full-sized skbuff to -the queue layer for all frames vs. the copying cost of copying a frame to a -correctly-sized skbuff. - - -IIIC. Synchronization -The driver runs as two independent, single-threaded flows of control. One -is the send-packet routine, which enforces single-threaded use by the netif -layer. The other thread is the interrupt handler, which is single -threaded by the hardware and other software. - -IV. Notes - -Thanks to Terry Murphy of 3Com for providing documentation and a development -board. - -The names "Vortex", "Boomerang" and "Corkscrew" are the internal 3Com -project names. I use these names to eliminate confusion -- 3Com product -numbers and names are very similar and often confused. - -The new chips support both ethernet (1.5K) and FDDI (4.5K) frame sizes! -This driver only supports ethernet frames because of the recent MTU limit -of 1.5K, but the changes to support 4.5K are minimal. -*/ - -/* Operational definitions. - These are not used by other compilation units and thus are not - exported in a ".h" file. - - First the windows. There are eight register windows, with the command - and status registers available in each. - */ -#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD) -#define EL3_CMD 0x0e -#define EL3_STATUS 0x0e - -/* The top five bits written to EL3_CMD are a command, the lower - 11 bits are the parameter, if applicable. - Note that 11 parameters bits was fine for ethernet, but the new chips - can handle FDDI length frames (~4500 octets) and now parameters count - 32-bit 'Dwords' rather than octets. */ - -enum corkscrew_cmd { - TotalReset = 0 << 11, SelectWindow = 1 << 11, StartCoax = 2 << 11, - RxDisable = 3 << 11, RxEnable = 4 << 11, RxReset = 5 << 11, - UpStall = 6 << 11, UpUnstall = (6 << 11) + 1, DownStall = (6 << 11) + 2, - DownUnstall = (6 << 11) + 3, RxDiscard = 8 << 11, TxEnable = 9 << 11, - TxDisable = 10 << 11, TxReset = 11 << 11, FakeIntr = 12 << 11, - AckIntr = 13 << 11, SetIntrEnb = 14 << 11, SetStatusEnb = 15 << 11, - SetRxFilter = 16 << 11, SetRxThreshold = 17 << 11, - SetTxThreshold = 18 << 11, SetTxStart = 19 << 11, StartDMAUp = 20 << 11, - StartDMADown = (20 << 11) + 1, StatsEnable = 21 << 11, - StatsDisable = 22 << 11, StopCoax = 23 << 11, -}; - -/* The SetRxFilter command accepts the following classes: */ -enum RxFilter { - RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 -}; - -/* Bits in the general status register. */ -enum corkscrew_status { - IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004, - TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020, - IntReq = 0x0040, StatsFull = 0x0080, - DMADone = 1 << 8, DownComplete = 1 << 9, UpComplete = 1 << 10, - DMAInProgress = 1 << 11, /* DMA controller is still busy. */ - CmdInProgress = 1 << 12, /* EL3_CMD is still busy. */ -}; - -/* Register window 1 offsets, the window used in normal operation. - On the Corkscrew this window is always mapped at offsets 0x10-0x1f. */ -enum Window1 { - TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14, - RxStatus = 0x18, Timer = 0x1A, TxStatus = 0x1B, - TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */ -}; -enum Window0 { - Wn0IRQ = 0x08, -#if defined(CORKSCREW) - Wn0EepromCmd = 0x200A, /* Corkscrew EEPROM command register. */ - Wn0EepromData = 0x200C, /* Corkscrew EEPROM results register. */ -#else - Wn0EepromCmd = 10, /* Window 0: EEPROM command register. */ - Wn0EepromData = 12, /* Window 0: EEPROM results register. */ -#endif -}; -enum Win0_EEPROM_bits { - EEPROM_Read = 0x80, EEPROM_WRITE = 0x40, EEPROM_ERASE = 0xC0, - EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */ - EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */ -}; - -/* EEPROM locations. */ -enum eeprom_offset { - PhysAddr01 = 0, PhysAddr23 = 1, PhysAddr45 = 2, ModelID = 3, - EtherLink3ID = 7, -}; - -enum Window3 { /* Window 3: MAC/config bits. */ - Wn3_Config = 0, Wn3_MAC_Ctrl = 6, Wn3_Options = 8, -}; -enum wn3_config { - Ram_size = 7, - Ram_width = 8, - Ram_speed = 0x30, - Rom_size = 0xc0, - Ram_split_shift = 16, - Ram_split = 3 << Ram_split_shift, - Xcvr_shift = 20, - Xcvr = 7 << Xcvr_shift, - Autoselect = 0x1000000, -}; - -enum Window4 { - Wn4_NetDiag = 6, Wn4_Media = 10, /* Window 4: Xcvr/media bits. */ -}; -enum Win4_Media_bits { - Media_SQE = 0x0008, /* Enable SQE error counting for AUI. */ - Media_10TP = 0x00C0, /* Enable link beat and jabber for 10baseT. */ - Media_Lnk = 0x0080, /* Enable just link beat for 100TX/100FX. */ - Media_LnkBeat = 0x0800, -}; -enum Window7 { /* Window 7: Bus Master control. */ - Wn7_MasterAddr = 0, Wn7_MasterLen = 6, Wn7_MasterStatus = 12, -}; - -/* Boomerang-style bus master control registers. Note ISA aliases! */ -enum MasterCtrl { - PktStatus = 0x400, DownListPtr = 0x404, FragAddr = 0x408, FragLen = - 0x40c, - TxFreeThreshold = 0x40f, UpPktStatus = 0x410, UpListPtr = 0x418, -}; - -/* The Rx and Tx descriptor lists. - Caution Alpha hackers: these types are 32 bits! Note also the 8 byte - alignment contraint on tx_ring[] and rx_ring[]. */ -struct boom_rx_desc { - u32 next; - s32 status; - u32 addr; - s32 length; -}; - -/* Values for the Rx status entry. */ -enum rx_desc_status { - RxDComplete = 0x00008000, RxDError = 0x4000, - /* See boomerang_rx() for actual error bits */ -}; - -struct boom_tx_desc { - u32 next; - s32 status; - u32 addr; - s32 length; -}; - -struct corkscrew_private { - const char *product_name; - struct list_head list; - struct net_device *our_dev; - /* The Rx and Tx rings are here to keep them quad-word-aligned. */ - struct boom_rx_desc rx_ring[RX_RING_SIZE]; - struct boom_tx_desc tx_ring[TX_RING_SIZE]; - /* The addresses of transmit- and receive-in-place skbuffs. */ - struct sk_buff *rx_skbuff[RX_RING_SIZE]; - struct sk_buff *tx_skbuff[TX_RING_SIZE]; - unsigned int cur_rx, cur_tx; /* The next free ring entry */ - unsigned int dirty_rx, dirty_tx;/* The ring entries to be free()ed. */ - struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */ - struct timer_list timer; /* Media selection timer. */ - int capabilities ; /* Adapter capabilities word. */ - int options; /* User-settable misc. driver options. */ - int last_rx_packets; /* For media autoselection. */ - unsigned int available_media:8, /* From Wn3_Options */ - media_override:3, /* Passed-in media type. */ - default_media:3, /* Read from the EEPROM. */ - full_duplex:1, autoselect:1, bus_master:1, /* Vortex can only do a fragment bus-m. */ - full_bus_master_tx:1, full_bus_master_rx:1, /* Boomerang */ - tx_full:1; - spinlock_t lock; - struct device *dev; -}; - -/* The action to take with a media selection timer tick. - Note that we deviate from the 3Com order by checking 10base2 before AUI. - */ -enum xcvr_types { - XCVR_10baseT = 0, XCVR_AUI, XCVR_10baseTOnly, XCVR_10base2, XCVR_100baseTx, - XCVR_100baseFx, XCVR_MII = 6, XCVR_Default = 8, -}; - -static struct media_table { - char *name; - unsigned int media_bits:16, /* Bits to set in Wn4_Media register. */ - mask:8, /* The transceiver-present bit in Wn3_Config. */ - next:8; /* The media type to try next. */ - short wait; /* Time before we check media status. */ -} media_tbl[] = { - { "10baseT", Media_10TP, 0x08, XCVR_10base2, (14 * HZ) / 10 }, - { "10Mbs AUI", Media_SQE, 0x20, XCVR_Default, (1 * HZ) / 10}, - { "undefined", 0, 0x80, XCVR_10baseT, 10000}, - { "10base2", 0, 0x10, XCVR_AUI, (1 * HZ) / 10}, - { "100baseTX", Media_Lnk, 0x02, XCVR_100baseFx, (14 * HZ) / 10}, - { "100baseFX", Media_Lnk, 0x04, XCVR_MII, (14 * HZ) / 10}, - { "MII", 0, 0x40, XCVR_10baseT, 3 * HZ}, - { "undefined", 0, 0x01, XCVR_10baseT, 10000}, - { "Default", 0, 0xFF, XCVR_10baseT, 10000}, -}; - -#ifdef __ISAPNP__ -static struct isapnp_device_id corkscrew_isapnp_adapters[] = { - { ISAPNP_ANY_ID, ISAPNP_ANY_ID, - ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5051), - (long) "3Com Fast EtherLink ISA" }, - { } /* terminate list */ -}; - -MODULE_DEVICE_TABLE(isapnp, corkscrew_isapnp_adapters); - -static int nopnp; -#endif /* __ISAPNP__ */ - -static struct net_device *corkscrew_scan(int unit); -static int corkscrew_setup(struct net_device *dev, int ioaddr, - struct pnp_dev *idev, int card_number); -static int corkscrew_open(struct net_device *dev); -static void corkscrew_timer(unsigned long arg); -static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb, - struct net_device *dev); -static int corkscrew_rx(struct net_device *dev); -static void corkscrew_timeout(struct net_device *dev); -static int boomerang_rx(struct net_device *dev); -static irqreturn_t corkscrew_interrupt(int irq, void *dev_id); -static int corkscrew_close(struct net_device *dev); -static void update_stats(int addr, struct net_device *dev); -static struct net_device_stats *corkscrew_get_stats(struct net_device *dev); -static void set_rx_mode(struct net_device *dev); -static const struct ethtool_ops netdev_ethtool_ops; - - -/* - Unfortunately maximizing the shared code between the integrated and - module version of the driver results in a complicated set of initialization - procedures. - init_module() -- modules / tc59x_init() -- built-in - The wrappers for corkscrew_scan() - corkscrew_scan() The common routine that scans for PCI and EISA cards - corkscrew_found_device() Allocate a device structure when we find a card. - Different versions exist for modules and built-in. - corkscrew_probe1() Fill in the device structure -- this is separated - so that the modules code can put it in dev->init. -*/ -/* This driver uses 'options' to pass the media type, full-duplex flag, etc. */ -/* Note: this is the only limit on the number of cards supported!! */ -static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1, }; - -#ifdef MODULE -static int debug = -1; - -module_param(debug, int, 0); -module_param_array(options, int, NULL, 0); -module_param(rx_copybreak, int, 0); -module_param(max_interrupt_work, int, 0); -MODULE_PARM_DESC(debug, "3c515 debug level (0-6)"); -MODULE_PARM_DESC(options, "3c515: Bits 0-2: media type, bit 3: full duplex, bit 4: bus mastering"); -MODULE_PARM_DESC(rx_copybreak, "3c515 copy breakpoint for copy-only-tiny-frames"); -MODULE_PARM_DESC(max_interrupt_work, "3c515 maximum events handled per interrupt"); - -/* A list of all installed Vortex devices, for removing the driver module. */ -/* we will need locking (and refcounting) if we ever use it for more */ -static LIST_HEAD(root_corkscrew_dev); - -int init_module(void) -{ - int found = 0; - if (debug >= 0) - corkscrew_debug = debug; - if (corkscrew_debug) - pr_debug("%s", version); - while (corkscrew_scan(-1)) - found++; - return found ? 0 : -ENODEV; -} - -#else -struct net_device *tc515_probe(int unit) -{ - struct net_device *dev = corkscrew_scan(unit); - static int printed; - - if (!dev) - return ERR_PTR(-ENODEV); - - if (corkscrew_debug > 0 && !printed) { - printed = 1; - pr_debug("%s", version); - } - - return dev; -} -#endif /* not MODULE */ - -static int check_device(unsigned ioaddr) -{ - int timer; - - if (!request_region(ioaddr, CORKSCREW_TOTAL_SIZE, "3c515")) - return 0; - /* Check the resource configuration for a matching ioaddr. */ - if ((inw(ioaddr + 0x2002) & 0x1f0) != (ioaddr & 0x1f0)) { - release_region(ioaddr, CORKSCREW_TOTAL_SIZE); - return 0; - } - /* Verify by reading the device ID from the EEPROM. */ - outw(EEPROM_Read + 7, ioaddr + Wn0EepromCmd); - /* Pause for at least 162 us. for the read to take place. */ - for (timer = 4; timer >= 0; timer--) { - udelay(162); - if ((inw(ioaddr + Wn0EepromCmd) & 0x0200) == 0) - break; - } - if (inw(ioaddr + Wn0EepromData) != 0x6d50) { - release_region(ioaddr, CORKSCREW_TOTAL_SIZE); - return 0; - } - return 1; -} - -static void cleanup_card(struct net_device *dev) -{ - struct corkscrew_private *vp = netdev_priv(dev); - list_del_init(&vp->list); - if (dev->dma) - free_dma(dev->dma); - outw(TotalReset, dev->base_addr + EL3_CMD); - release_region(dev->base_addr, CORKSCREW_TOTAL_SIZE); - if (vp->dev) - pnp_device_detach(to_pnp_dev(vp->dev)); -} - -static struct net_device *corkscrew_scan(int unit) -{ - struct net_device *dev; - static int cards_found = 0; - static int ioaddr; - int err; -#ifdef __ISAPNP__ - short i; - static int pnp_cards; -#endif - - dev = alloc_etherdev(sizeof(struct corkscrew_private)); - if (!dev) - return ERR_PTR(-ENOMEM); - - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - } - -#ifdef __ISAPNP__ - if(nopnp == 1) - goto no_pnp; - for(i=0; corkscrew_isapnp_adapters[i].vendor != 0; i++) { - struct pnp_dev *idev = NULL; - int irq; - while((idev = pnp_find_dev(NULL, - corkscrew_isapnp_adapters[i].vendor, - corkscrew_isapnp_adapters[i].function, - idev))) { - - if (pnp_device_attach(idev) < 0) - continue; - if (pnp_activate_dev(idev) < 0) { - pr_warning("pnp activate failed (out of resources?)\n"); - pnp_device_detach(idev); - continue; - } - if (!pnp_port_valid(idev, 0) || !pnp_irq_valid(idev, 0)) { - pnp_device_detach(idev); - continue; - } - ioaddr = pnp_port_start(idev, 0); - irq = pnp_irq(idev, 0); - if (!check_device(ioaddr)) { - pnp_device_detach(idev); - continue; - } - if(corkscrew_debug) - pr_debug("ISAPNP reports %s at i/o 0x%x, irq %d\n", - (char*) corkscrew_isapnp_adapters[i].driver_data, ioaddr, irq); - pr_info("3c515 Resource configuration register %#4.4x, DCR %4.4x.\n", - inl(ioaddr + 0x2002), inw(ioaddr + 0x2000)); - /* irq = inw(ioaddr + 0x2002) & 15; */ /* Use the irq from isapnp */ - SET_NETDEV_DEV(dev, &idev->dev); - pnp_cards++; - err = corkscrew_setup(dev, ioaddr, idev, cards_found++); - if (!err) - return dev; - cleanup_card(dev); - } - } -no_pnp: -#endif /* __ISAPNP__ */ - - /* Check all locations on the ISA bus -- evil! */ - for (ioaddr = 0x100; ioaddr < 0x400; ioaddr += 0x20) { - if (!check_device(ioaddr)) - continue; - - pr_info("3c515 Resource configuration register %#4.4x, DCR %4.4x.\n", - inl(ioaddr + 0x2002), inw(ioaddr + 0x2000)); - err = corkscrew_setup(dev, ioaddr, NULL, cards_found++); - if (!err) - return dev; - cleanup_card(dev); - } - free_netdev(dev); - return NULL; -} - - -static const struct net_device_ops netdev_ops = { - .ndo_open = corkscrew_open, - .ndo_stop = corkscrew_close, - .ndo_start_xmit = corkscrew_start_xmit, - .ndo_tx_timeout = corkscrew_timeout, - .ndo_get_stats = corkscrew_get_stats, - .ndo_set_multicast_list = set_rx_mode, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - - -static int corkscrew_setup(struct net_device *dev, int ioaddr, - struct pnp_dev *idev, int card_number) -{ - struct corkscrew_private *vp = netdev_priv(dev); - unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */ - int i; - int irq; - -#ifdef __ISAPNP__ - if (idev) { - irq = pnp_irq(idev, 0); - vp->dev = &idev->dev; - } else { - irq = inw(ioaddr + 0x2002) & 15; - } -#else - irq = inw(ioaddr + 0x2002) & 15; -#endif - - dev->base_addr = ioaddr; - dev->irq = irq; - dev->dma = inw(ioaddr + 0x2000) & 7; - vp->product_name = "3c515"; - vp->options = dev->mem_start; - vp->our_dev = dev; - - if (!vp->options) { - if (card_number >= MAX_UNITS) - vp->options = -1; - else - vp->options = options[card_number]; - } - - if (vp->options >= 0) { - vp->media_override = vp->options & 7; - if (vp->media_override == 2) - vp->media_override = 0; - vp->full_duplex = (vp->options & 8) ? 1 : 0; - vp->bus_master = (vp->options & 16) ? 1 : 0; - } else { - vp->media_override = 7; - vp->full_duplex = 0; - vp->bus_master = 0; - } -#ifdef MODULE - list_add(&vp->list, &root_corkscrew_dev); -#endif - - pr_info("%s: 3Com %s at %#3x,", dev->name, vp->product_name, ioaddr); - - spin_lock_init(&vp->lock); - - /* Read the station address from the EEPROM. */ - EL3WINDOW(0); - for (i = 0; i < 0x18; i++) { - __be16 *phys_addr = (__be16 *) dev->dev_addr; - int timer; - outw(EEPROM_Read + i, ioaddr + Wn0EepromCmd); - /* Pause for at least 162 us. for the read to take place. */ - for (timer = 4; timer >= 0; timer--) { - udelay(162); - if ((inw(ioaddr + Wn0EepromCmd) & 0x0200) == 0) - break; - } - eeprom[i] = inw(ioaddr + Wn0EepromData); - checksum ^= eeprom[i]; - if (i < 3) - phys_addr[i] = htons(eeprom[i]); - } - checksum = (checksum ^ (checksum >> 8)) & 0xff; - if (checksum != 0x00) - pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum); - pr_cont(" %pM", dev->dev_addr); - if (eeprom[16] == 0x11c7) { /* Corkscrew */ - if (request_dma(dev->dma, "3c515")) { - pr_cont(", DMA %d allocation failed", dev->dma); - dev->dma = 0; - } else - pr_cont(", DMA %d", dev->dma); - } - pr_cont(", IRQ %d\n", dev->irq); - /* Tell them about an invalid IRQ. */ - if (corkscrew_debug && (dev->irq <= 0 || dev->irq > 15)) - pr_warning(" *** Warning: this IRQ is unlikely to work! ***\n"); - - { - static const char * const ram_split[] = { - "5:3", "3:1", "1:1", "3:5" - }; - __u32 config; - EL3WINDOW(3); - vp->available_media = inw(ioaddr + Wn3_Options); - config = inl(ioaddr + Wn3_Config); - if (corkscrew_debug > 1) - pr_info(" Internal config register is %4.4x, transceivers %#x.\n", - config, inw(ioaddr + Wn3_Options)); - pr_info(" %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n", - 8 << config & Ram_size, - config & Ram_width ? "word" : "byte", - ram_split[(config & Ram_split) >> Ram_split_shift], - config & Autoselect ? "autoselect/" : "", - media_tbl[(config & Xcvr) >> Xcvr_shift].name); - vp->default_media = (config & Xcvr) >> Xcvr_shift; - vp->autoselect = config & Autoselect ? 1 : 0; - dev->if_port = vp->default_media; - } - if (vp->media_override != 7) { - pr_info(" Media override to transceiver type %d (%s).\n", - vp->media_override, - media_tbl[vp->media_override].name); - dev->if_port = vp->media_override; - } - - vp->capabilities = eeprom[16]; - vp->full_bus_master_tx = (vp->capabilities & 0x20) ? 1 : 0; - /* Rx is broken at 10mbps, so we always disable it. */ - /* vp->full_bus_master_rx = 0; */ - vp->full_bus_master_rx = (vp->capabilities & 0x20) ? 1 : 0; - - /* The 3c51x-specific entries in the device structure. */ - dev->netdev_ops = &netdev_ops; - dev->watchdog_timeo = (400 * HZ) / 1000; - dev->ethtool_ops = &netdev_ethtool_ops; - - return register_netdev(dev); -} - - -static int corkscrew_open(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - struct corkscrew_private *vp = netdev_priv(dev); - __u32 config; - int i; - - /* Before initializing select the active media port. */ - EL3WINDOW(3); - if (vp->full_duplex) - outb(0x20, ioaddr + Wn3_MAC_Ctrl); /* Set the full-duplex bit. */ - config = inl(ioaddr + Wn3_Config); - - if (vp->media_override != 7) { - if (corkscrew_debug > 1) - pr_info("%s: Media override to transceiver %d (%s).\n", - dev->name, vp->media_override, - media_tbl[vp->media_override].name); - dev->if_port = vp->media_override; - } else if (vp->autoselect) { - /* Find first available media type, starting with 100baseTx. */ - dev->if_port = 4; - while (!(vp->available_media & media_tbl[dev->if_port].mask)) - dev->if_port = media_tbl[dev->if_port].next; - - if (corkscrew_debug > 1) - pr_debug("%s: Initial media type %s.\n", - dev->name, media_tbl[dev->if_port].name); - - init_timer(&vp->timer); - vp->timer.expires = jiffies + media_tbl[dev->if_port].wait; - vp->timer.data = (unsigned long) dev; - vp->timer.function = corkscrew_timer; /* timer handler */ - add_timer(&vp->timer); - } else - dev->if_port = vp->default_media; - - config = (config & ~Xcvr) | (dev->if_port << Xcvr_shift); - outl(config, ioaddr + Wn3_Config); - - if (corkscrew_debug > 1) { - pr_debug("%s: corkscrew_open() InternalConfig %8.8x.\n", - dev->name, config); - } - - outw(TxReset, ioaddr + EL3_CMD); - for (i = 20; i >= 0; i--) - if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress)) - break; - - outw(RxReset, ioaddr + EL3_CMD); - /* Wait a few ticks for the RxReset command to complete. */ - for (i = 20; i >= 0; i--) - if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress)) - break; - - outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD); - - /* Use the now-standard shared IRQ implementation. */ - if (vp->capabilities == 0x11c7) { - /* Corkscrew: Cannot share ISA resources. */ - if (dev->irq == 0 || - dev->dma == 0 || - request_irq(dev->irq, corkscrew_interrupt, 0, - vp->product_name, dev)) - return -EAGAIN; - enable_dma(dev->dma); - set_dma_mode(dev->dma, DMA_MODE_CASCADE); - } else if (request_irq(dev->irq, corkscrew_interrupt, IRQF_SHARED, - vp->product_name, dev)) { - return -EAGAIN; - } - - if (corkscrew_debug > 1) { - EL3WINDOW(4); - pr_debug("%s: corkscrew_open() irq %d media status %4.4x.\n", - dev->name, dev->irq, inw(ioaddr + Wn4_Media)); - } - - /* Set the station address and mask in window 2 each time opened. */ - EL3WINDOW(2); - for (i = 0; i < 6; i++) - outb(dev->dev_addr[i], ioaddr + i); - for (; i < 12; i += 2) - outw(0, ioaddr + i); - - if (dev->if_port == 3) - /* Start the thinnet transceiver. We should really wait 50ms... */ - outw(StartCoax, ioaddr + EL3_CMD); - EL3WINDOW(4); - outw((inw(ioaddr + Wn4_Media) & ~(Media_10TP | Media_SQE)) | - media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media); - - /* Switch to the stats window, and clear all stats by reading. */ - outw(StatsDisable, ioaddr + EL3_CMD); - EL3WINDOW(6); - for (i = 0; i < 10; i++) - inb(ioaddr + i); - inw(ioaddr + 10); - inw(ioaddr + 12); - /* New: On the Vortex we must also clear the BadSSD counter. */ - EL3WINDOW(4); - inb(ioaddr + 12); - /* ..and on the Boomerang we enable the extra statistics bits. */ - outw(0x0040, ioaddr + Wn4_NetDiag); - - /* Switch to register set 7 for normal use. */ - EL3WINDOW(7); - - if (vp->full_bus_master_rx) { /* Boomerang bus master. */ - vp->cur_rx = vp->dirty_rx = 0; - if (corkscrew_debug > 2) - pr_debug("%s: Filling in the Rx ring.\n", dev->name); - for (i = 0; i < RX_RING_SIZE; i++) { - struct sk_buff *skb; - if (i < (RX_RING_SIZE - 1)) - vp->rx_ring[i].next = - isa_virt_to_bus(&vp->rx_ring[i + 1]); - else - vp->rx_ring[i].next = 0; - vp->rx_ring[i].status = 0; /* Clear complete bit. */ - vp->rx_ring[i].length = PKT_BUF_SZ | 0x80000000; - skb = dev_alloc_skb(PKT_BUF_SZ); - vp->rx_skbuff[i] = skb; - if (skb == NULL) - break; /* Bad news! */ - skb->dev = dev; /* Mark as being used by this device. */ - skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ - vp->rx_ring[i].addr = isa_virt_to_bus(skb->data); - } - if (i != 0) - vp->rx_ring[i - 1].next = - isa_virt_to_bus(&vp->rx_ring[0]); /* Wrap the ring. */ - outl(isa_virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr); - } - if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */ - vp->cur_tx = vp->dirty_tx = 0; - outb(PKT_BUF_SZ >> 8, ioaddr + TxFreeThreshold); /* Room for a packet. */ - /* Clear the Tx ring. */ - for (i = 0; i < TX_RING_SIZE; i++) - vp->tx_skbuff[i] = NULL; - outl(0, ioaddr + DownListPtr); - } - /* Set receiver mode: presumably accept b-case and phys addr only. */ - set_rx_mode(dev); - outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ - - netif_start_queue(dev); - - outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ - outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ - /* Allow status bits to be seen. */ - outw(SetStatusEnb | AdapterFailure | IntReq | StatsFull | - (vp->full_bus_master_tx ? DownComplete : TxAvailable) | - (vp->full_bus_master_rx ? UpComplete : RxComplete) | - (vp->bus_master ? DMADone : 0), ioaddr + EL3_CMD); - /* Ack all pending events, and set active indicator mask. */ - outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, - ioaddr + EL3_CMD); - outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull - | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete, - ioaddr + EL3_CMD); - - return 0; -} - -static void corkscrew_timer(unsigned long data) -{ -#ifdef AUTOMEDIA - struct net_device *dev = (struct net_device *) data; - struct corkscrew_private *vp = netdev_priv(dev); - int ioaddr = dev->base_addr; - unsigned long flags; - int ok = 0; - - if (corkscrew_debug > 1) - pr_debug("%s: Media selection timer tick happened, %s.\n", - dev->name, media_tbl[dev->if_port].name); - - spin_lock_irqsave(&vp->lock, flags); - - { - int old_window = inw(ioaddr + EL3_CMD) >> 13; - int media_status; - EL3WINDOW(4); - media_status = inw(ioaddr + Wn4_Media); - switch (dev->if_port) { - case 0: - case 4: - case 5: /* 10baseT, 100baseTX, 100baseFX */ - if (media_status & Media_LnkBeat) { - ok = 1; - if (corkscrew_debug > 1) - pr_debug("%s: Media %s has link beat, %x.\n", - dev->name, - media_tbl[dev->if_port].name, - media_status); - } else if (corkscrew_debug > 1) - pr_debug("%s: Media %s is has no link beat, %x.\n", - dev->name, - media_tbl[dev->if_port].name, - media_status); - - break; - default: /* Other media types handled by Tx timeouts. */ - if (corkscrew_debug > 1) - pr_debug("%s: Media %s is has no indication, %x.\n", - dev->name, - media_tbl[dev->if_port].name, - media_status); - ok = 1; - } - if (!ok) { - __u32 config; - - do { - dev->if_port = - media_tbl[dev->if_port].next; - } - while (!(vp->available_media & media_tbl[dev->if_port].mask)); - - if (dev->if_port == 8) { /* Go back to default. */ - dev->if_port = vp->default_media; - if (corkscrew_debug > 1) - pr_debug("%s: Media selection failing, using default %s port.\n", - dev->name, - media_tbl[dev->if_port].name); - } else { - if (corkscrew_debug > 1) - pr_debug("%s: Media selection failed, now trying %s port.\n", - dev->name, - media_tbl[dev->if_port].name); - vp->timer.expires = jiffies + media_tbl[dev->if_port].wait; - add_timer(&vp->timer); - } - outw((media_status & ~(Media_10TP | Media_SQE)) | - media_tbl[dev->if_port].media_bits, - ioaddr + Wn4_Media); - - EL3WINDOW(3); - config = inl(ioaddr + Wn3_Config); - config = (config & ~Xcvr) | (dev->if_port << Xcvr_shift); - outl(config, ioaddr + Wn3_Config); - - outw(dev->if_port == 3 ? StartCoax : StopCoax, - ioaddr + EL3_CMD); - } - EL3WINDOW(old_window); - } - - spin_unlock_irqrestore(&vp->lock, flags); - if (corkscrew_debug > 1) - pr_debug("%s: Media selection timer finished, %s.\n", - dev->name, media_tbl[dev->if_port].name); - -#endif /* AUTOMEDIA */ -} - -static void corkscrew_timeout(struct net_device *dev) -{ - int i; - struct corkscrew_private *vp = netdev_priv(dev); - int ioaddr = dev->base_addr; - - pr_warning("%s: transmit timed out, tx_status %2.2x status %4.4x.\n", - dev->name, inb(ioaddr + TxStatus), - inw(ioaddr + EL3_STATUS)); - /* Slight code bloat to be user friendly. */ - if ((inb(ioaddr + TxStatus) & 0x88) == 0x88) - pr_warning("%s: Transmitter encountered 16 collisions --" - " network cable problem?\n", dev->name); -#ifndef final_version - pr_debug(" Flags; bus-master %d, full %d; dirty %d current %d.\n", - vp->full_bus_master_tx, vp->tx_full, vp->dirty_tx, - vp->cur_tx); - pr_debug(" Down list %8.8x vs. %p.\n", inl(ioaddr + DownListPtr), - &vp->tx_ring[0]); - for (i = 0; i < TX_RING_SIZE; i++) { - pr_debug(" %d: %p length %8.8x status %8.8x\n", i, - &vp->tx_ring[i], - vp->tx_ring[i].length, vp->tx_ring[i].status); - } -#endif - /* Issue TX_RESET and TX_START commands. */ - outw(TxReset, ioaddr + EL3_CMD); - for (i = 20; i >= 0; i--) - if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress)) - break; - outw(TxEnable, ioaddr + EL3_CMD); - dev->trans_start = jiffies; /* prevent tx timeout */ - dev->stats.tx_errors++; - dev->stats.tx_dropped++; - netif_wake_queue(dev); -} - -static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - struct corkscrew_private *vp = netdev_priv(dev); - int ioaddr = dev->base_addr; - - /* Block a timer-based transmit from overlapping. */ - - netif_stop_queue(dev); - - if (vp->full_bus_master_tx) { /* BOOMERANG bus-master */ - /* Calculate the next Tx descriptor entry. */ - int entry = vp->cur_tx % TX_RING_SIZE; - struct boom_tx_desc *prev_entry; - unsigned long flags; - int i; - - if (vp->tx_full) /* No room to transmit with */ - return NETDEV_TX_BUSY; - if (vp->cur_tx != 0) - prev_entry = &vp->tx_ring[(vp->cur_tx - 1) % TX_RING_SIZE]; - else - prev_entry = NULL; - if (corkscrew_debug > 3) - pr_debug("%s: Trying to send a packet, Tx index %d.\n", - dev->name, vp->cur_tx); - /* vp->tx_full = 1; */ - vp->tx_skbuff[entry] = skb; - vp->tx_ring[entry].next = 0; - vp->tx_ring[entry].addr = isa_virt_to_bus(skb->data); - vp->tx_ring[entry].length = skb->len | 0x80000000; - vp->tx_ring[entry].status = skb->len | 0x80000000; - - spin_lock_irqsave(&vp->lock, flags); - outw(DownStall, ioaddr + EL3_CMD); - /* Wait for the stall to complete. */ - for (i = 20; i >= 0; i--) - if ((inw(ioaddr + EL3_STATUS) & CmdInProgress) == 0) - break; - if (prev_entry) - prev_entry->next = isa_virt_to_bus(&vp->tx_ring[entry]); - if (inl(ioaddr + DownListPtr) == 0) { - outl(isa_virt_to_bus(&vp->tx_ring[entry]), - ioaddr + DownListPtr); - queued_packet++; - } - outw(DownUnstall, ioaddr + EL3_CMD); - spin_unlock_irqrestore(&vp->lock, flags); - - vp->cur_tx++; - if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) - vp->tx_full = 1; - else { /* Clear previous interrupt enable. */ - if (prev_entry) - prev_entry->status &= ~0x80000000; - netif_wake_queue(dev); - } - return NETDEV_TX_OK; - } - /* Put out the doubleword header... */ - outl(skb->len, ioaddr + TX_FIFO); - dev->stats.tx_bytes += skb->len; -#ifdef VORTEX_BUS_MASTER - if (vp->bus_master) { - /* Set the bus-master controller to transfer the packet. */ - outl((int) (skb->data), ioaddr + Wn7_MasterAddr); - outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen); - vp->tx_skb = skb; - outw(StartDMADown, ioaddr + EL3_CMD); - /* queue will be woken at the DMADone interrupt. */ - } else { - /* ... and the packet rounded to a doubleword. */ - outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); - dev_kfree_skb(skb); - if (inw(ioaddr + TxFree) > 1536) { - netif_wake_queue(dev); - } else - /* Interrupt us when the FIFO has room for max-sized packet. */ - outw(SetTxThreshold + (1536 >> 2), - ioaddr + EL3_CMD); - } -#else - /* ... and the packet rounded to a doubleword. */ - outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); - dev_kfree_skb(skb); - if (inw(ioaddr + TxFree) > 1536) { - netif_wake_queue(dev); - } else - /* Interrupt us when the FIFO has room for max-sized packet. */ - outw(SetTxThreshold + (1536 >> 2), ioaddr + EL3_CMD); -#endif /* bus master */ - - - /* Clear the Tx status stack. */ - { - short tx_status; - int i = 4; - - while (--i > 0 && (tx_status = inb(ioaddr + TxStatus)) > 0) { - if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */ - if (corkscrew_debug > 2) - pr_debug("%s: Tx error, status %2.2x.\n", - dev->name, tx_status); - if (tx_status & 0x04) - dev->stats.tx_fifo_errors++; - if (tx_status & 0x38) - dev->stats.tx_aborted_errors++; - if (tx_status & 0x30) { - int j; - outw(TxReset, ioaddr + EL3_CMD); - for (j = 20; j >= 0; j--) - if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress)) - break; - } - outw(TxEnable, ioaddr + EL3_CMD); - } - outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */ - } - } - return NETDEV_TX_OK; -} - -/* The interrupt handler does all of the Rx thread work and cleans up - after the Tx thread. */ - -static irqreturn_t corkscrew_interrupt(int irq, void *dev_id) -{ - /* Use the now-standard shared IRQ implementation. */ - struct net_device *dev = dev_id; - struct corkscrew_private *lp = netdev_priv(dev); - int ioaddr, status; - int latency; - int i = max_interrupt_work; - - ioaddr = dev->base_addr; - latency = inb(ioaddr + Timer); - - spin_lock(&lp->lock); - - status = inw(ioaddr + EL3_STATUS); - - if (corkscrew_debug > 4) - pr_debug("%s: interrupt, status %4.4x, timer %d.\n", - dev->name, status, latency); - if ((status & 0xE000) != 0xE000) { - static int donedidthis; - /* Some interrupt controllers store a bogus interrupt from boot-time. - Ignore a single early interrupt, but don't hang the machine for - other interrupt problems. */ - if (donedidthis++ > 100) { - pr_err("%s: Bogus interrupt, bailing. Status %4.4x, start=%d.\n", - dev->name, status, netif_running(dev)); - free_irq(dev->irq, dev); - dev->irq = -1; - } - } - - do { - if (corkscrew_debug > 5) - pr_debug("%s: In interrupt loop, status %4.4x.\n", - dev->name, status); - if (status & RxComplete) - corkscrew_rx(dev); - - if (status & TxAvailable) { - if (corkscrew_debug > 5) - pr_debug(" TX room bit was handled.\n"); - /* There's room in the FIFO for a full-sized packet. */ - outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); - netif_wake_queue(dev); - } - if (status & DownComplete) { - unsigned int dirty_tx = lp->dirty_tx; - - while (lp->cur_tx - dirty_tx > 0) { - int entry = dirty_tx % TX_RING_SIZE; - if (inl(ioaddr + DownListPtr) == isa_virt_to_bus(&lp->tx_ring[entry])) - break; /* It still hasn't been processed. */ - if (lp->tx_skbuff[entry]) { - dev_kfree_skb_irq(lp->tx_skbuff[entry]); - lp->tx_skbuff[entry] = NULL; - } - dirty_tx++; - } - lp->dirty_tx = dirty_tx; - outw(AckIntr | DownComplete, ioaddr + EL3_CMD); - if (lp->tx_full && (lp->cur_tx - dirty_tx <= TX_RING_SIZE - 1)) { - lp->tx_full = 0; - netif_wake_queue(dev); - } - } -#ifdef VORTEX_BUS_MASTER - if (status & DMADone) { - outw(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */ - dev_kfree_skb_irq(lp->tx_skb); /* Release the transferred buffer */ - netif_wake_queue(dev); - } -#endif - if (status & UpComplete) { - boomerang_rx(dev); - outw(AckIntr | UpComplete, ioaddr + EL3_CMD); - } - if (status & (AdapterFailure | RxEarly | StatsFull)) { - /* Handle all uncommon interrupts at once. */ - if (status & RxEarly) { /* Rx early is unused. */ - corkscrew_rx(dev); - outw(AckIntr | RxEarly, ioaddr + EL3_CMD); - } - if (status & StatsFull) { /* Empty statistics. */ - static int DoneDidThat; - if (corkscrew_debug > 4) - pr_debug("%s: Updating stats.\n", dev->name); - update_stats(ioaddr, dev); - /* DEBUG HACK: Disable statistics as an interrupt source. */ - /* This occurs when we have the wrong media type! */ - if (DoneDidThat == 0 && inw(ioaddr + EL3_STATUS) & StatsFull) { - int win, reg; - pr_notice("%s: Updating stats failed, disabling stats as an interrupt source.\n", - dev->name); - for (win = 0; win < 8; win++) { - EL3WINDOW(win); - pr_notice("Vortex window %d:", win); - for (reg = 0; reg < 16; reg++) - pr_cont(" %2.2x", inb(ioaddr + reg)); - pr_cont("\n"); - } - EL3WINDOW(7); - outw(SetIntrEnb | TxAvailable | - RxComplete | AdapterFailure | - UpComplete | DownComplete | - TxComplete, ioaddr + EL3_CMD); - DoneDidThat++; - } - } - if (status & AdapterFailure) { - /* Adapter failure requires Rx reset and reinit. */ - outw(RxReset, ioaddr + EL3_CMD); - /* Set the Rx filter to the current state. */ - set_rx_mode(dev); - outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */ - outw(AckIntr | AdapterFailure, - ioaddr + EL3_CMD); - } - } - - if (--i < 0) { - pr_err("%s: Too much work in interrupt, status %4.4x. Disabling functions (%4.4x).\n", - dev->name, status, SetStatusEnb | ((~status) & 0x7FE)); - /* Disable all pending interrupts. */ - outw(SetStatusEnb | ((~status) & 0x7FE), ioaddr + EL3_CMD); - outw(AckIntr | 0x7FF, ioaddr + EL3_CMD); - break; - } - /* Acknowledge the IRQ. */ - outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); - - } while ((status = inw(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete)); - - spin_unlock(&lp->lock); - - if (corkscrew_debug > 4) - pr_debug("%s: exiting interrupt, status %4.4x.\n", dev->name, status); - return IRQ_HANDLED; -} - -static int corkscrew_rx(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - int i; - short rx_status; - - if (corkscrew_debug > 5) - pr_debug(" In rx_packet(), status %4.4x, rx_status %4.4x.\n", - inw(ioaddr + EL3_STATUS), inw(ioaddr + RxStatus)); - while ((rx_status = inw(ioaddr + RxStatus)) > 0) { - if (rx_status & 0x4000) { /* Error, update stats. */ - unsigned char rx_error = inb(ioaddr + RxErrors); - if (corkscrew_debug > 2) - pr_debug(" Rx error: status %2.2x.\n", - rx_error); - dev->stats.rx_errors++; - if (rx_error & 0x01) - dev->stats.rx_over_errors++; - if (rx_error & 0x02) - dev->stats.rx_length_errors++; - if (rx_error & 0x04) - dev->stats.rx_frame_errors++; - if (rx_error & 0x08) - dev->stats.rx_crc_errors++; - if (rx_error & 0x10) - dev->stats.rx_length_errors++; - } else { - /* The packet length: up to 4.5K!. */ - short pkt_len = rx_status & 0x1fff; - struct sk_buff *skb; - - skb = dev_alloc_skb(pkt_len + 5 + 2); - if (corkscrew_debug > 4) - pr_debug("Receiving packet size %d status %4.4x.\n", - pkt_len, rx_status); - if (skb != NULL) { - skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ - /* 'skb_put()' points to the start of sk_buff data area. */ - insl(ioaddr + RX_FIFO, - skb_put(skb, pkt_len), - (pkt_len + 3) >> 2); - outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */ - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; - /* Wait a limited time to go to next packet. */ - for (i = 200; i >= 0; i--) - if (! (inw(ioaddr + EL3_STATUS) & CmdInProgress)) - break; - continue; - } else if (corkscrew_debug) - pr_debug("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, pkt_len); - } - outw(RxDiscard, ioaddr + EL3_CMD); - dev->stats.rx_dropped++; - /* Wait a limited time to skip this packet. */ - for (i = 200; i >= 0; i--) - if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress)) - break; - } - return 0; -} - -static int boomerang_rx(struct net_device *dev) -{ - struct corkscrew_private *vp = netdev_priv(dev); - int entry = vp->cur_rx % RX_RING_SIZE; - int ioaddr = dev->base_addr; - int rx_status; - - if (corkscrew_debug > 5) - pr_debug(" In boomerang_rx(), status %4.4x, rx_status %4.4x.\n", - inw(ioaddr + EL3_STATUS), inw(ioaddr + RxStatus)); - while ((rx_status = vp->rx_ring[entry].status) & RxDComplete) { - if (rx_status & RxDError) { /* Error, update stats. */ - unsigned char rx_error = rx_status >> 16; - if (corkscrew_debug > 2) - pr_debug(" Rx error: status %2.2x.\n", - rx_error); - dev->stats.rx_errors++; - if (rx_error & 0x01) - dev->stats.rx_over_errors++; - if (rx_error & 0x02) - dev->stats.rx_length_errors++; - if (rx_error & 0x04) - dev->stats.rx_frame_errors++; - if (rx_error & 0x08) - dev->stats.rx_crc_errors++; - if (rx_error & 0x10) - dev->stats.rx_length_errors++; - } else { - /* The packet length: up to 4.5K!. */ - short pkt_len = rx_status & 0x1fff; - struct sk_buff *skb; - - dev->stats.rx_bytes += pkt_len; - if (corkscrew_debug > 4) - pr_debug("Receiving packet size %d status %4.4x.\n", - pkt_len, rx_status); - - /* Check if the packet is long enough to just accept without - copying to a properly sized skbuff. */ - if (pkt_len < rx_copybreak && - (skb = dev_alloc_skb(pkt_len + 4)) != NULL) { - skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ - /* 'skb_put()' points to the start of sk_buff data area. */ - memcpy(skb_put(skb, pkt_len), - isa_bus_to_virt(vp->rx_ring[entry]. - addr), pkt_len); - rx_copy++; - } else { - void *temp; - /* Pass up the skbuff already on the Rx ring. */ - skb = vp->rx_skbuff[entry]; - vp->rx_skbuff[entry] = NULL; - temp = skb_put(skb, pkt_len); - /* Remove this checking code for final release. */ - if (isa_bus_to_virt(vp->rx_ring[entry].addr) != temp) - pr_warning("%s: Warning -- the skbuff addresses do not match" - " in boomerang_rx: %p vs. %p / %p.\n", - dev->name, - isa_bus_to_virt(vp-> - rx_ring[entry]. - addr), skb->head, - temp); - rx_nocopy++; - } - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); - dev->stats.rx_packets++; - } - entry = (++vp->cur_rx) % RX_RING_SIZE; - } - /* Refill the Rx ring buffers. */ - for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) { - struct sk_buff *skb; - entry = vp->dirty_rx % RX_RING_SIZE; - if (vp->rx_skbuff[entry] == NULL) { - skb = dev_alloc_skb(PKT_BUF_SZ); - if (skb == NULL) - break; /* Bad news! */ - skb->dev = dev; /* Mark as being used by this device. */ - skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ - vp->rx_ring[entry].addr = isa_virt_to_bus(skb->data); - vp->rx_skbuff[entry] = skb; - } - vp->rx_ring[entry].status = 0; /* Clear complete bit. */ - } - return 0; -} - -static int corkscrew_close(struct net_device *dev) -{ - struct corkscrew_private *vp = netdev_priv(dev); - int ioaddr = dev->base_addr; - int i; - - netif_stop_queue(dev); - - if (corkscrew_debug > 1) { - pr_debug("%s: corkscrew_close() status %4.4x, Tx status %2.2x.\n", - dev->name, inw(ioaddr + EL3_STATUS), - inb(ioaddr + TxStatus)); - pr_debug("%s: corkscrew close stats: rx_nocopy %d rx_copy %d tx_queued %d.\n", - dev->name, rx_nocopy, rx_copy, queued_packet); - } - - del_timer(&vp->timer); - - /* Turn off statistics ASAP. We update lp->stats below. */ - outw(StatsDisable, ioaddr + EL3_CMD); - - /* Disable the receiver and transmitter. */ - outw(RxDisable, ioaddr + EL3_CMD); - outw(TxDisable, ioaddr + EL3_CMD); - - if (dev->if_port == XCVR_10base2) - /* Turn off thinnet power. Green! */ - outw(StopCoax, ioaddr + EL3_CMD); - - free_irq(dev->irq, dev); - - outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD); - - update_stats(ioaddr, dev); - if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */ - outl(0, ioaddr + UpListPtr); - for (i = 0; i < RX_RING_SIZE; i++) - if (vp->rx_skbuff[i]) { - dev_kfree_skb(vp->rx_skbuff[i]); - vp->rx_skbuff[i] = NULL; - } - } - if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */ - outl(0, ioaddr + DownListPtr); - for (i = 0; i < TX_RING_SIZE; i++) - if (vp->tx_skbuff[i]) { - dev_kfree_skb(vp->tx_skbuff[i]); - vp->tx_skbuff[i] = NULL; - } - } - - return 0; -} - -static struct net_device_stats *corkscrew_get_stats(struct net_device *dev) -{ - struct corkscrew_private *vp = netdev_priv(dev); - unsigned long flags; - - if (netif_running(dev)) { - spin_lock_irqsave(&vp->lock, flags); - update_stats(dev->base_addr, dev); - spin_unlock_irqrestore(&vp->lock, flags); - } - return &dev->stats; -} - -/* Update statistics. - Unlike with the EL3 we need not worry about interrupts changing - the window setting from underneath us, but we must still guard - against a race condition with a StatsUpdate interrupt updating the - table. This is done by checking that the ASM (!) code generated uses - atomic updates with '+='. - */ -static void update_stats(int ioaddr, struct net_device *dev) -{ - /* Unlike the 3c5x9 we need not turn off stats updates while reading. */ - /* Switch to the stats window, and read everything. */ - EL3WINDOW(6); - dev->stats.tx_carrier_errors += inb(ioaddr + 0); - dev->stats.tx_heartbeat_errors += inb(ioaddr + 1); - /* Multiple collisions. */ inb(ioaddr + 2); - dev->stats.collisions += inb(ioaddr + 3); - dev->stats.tx_window_errors += inb(ioaddr + 4); - dev->stats.rx_fifo_errors += inb(ioaddr + 5); - dev->stats.tx_packets += inb(ioaddr + 6); - dev->stats.tx_packets += (inb(ioaddr + 9) & 0x30) << 4; - /* Rx packets */ inb(ioaddr + 7); - /* Must read to clear */ - /* Tx deferrals */ inb(ioaddr + 8); - /* Don't bother with register 9, an extension of registers 6&7. - If we do use the 6&7 values the atomic update assumption above - is invalid. */ - inw(ioaddr + 10); /* Total Rx and Tx octets. */ - inw(ioaddr + 12); - /* New: On the Vortex we must also clear the BadSSD counter. */ - EL3WINDOW(4); - inb(ioaddr + 12); - - /* We change back to window 7 (not 1) with the Vortex. */ - EL3WINDOW(7); -} - -/* This new version of set_rx_mode() supports v1.4 kernels. - The Vortex chip has no documented multicast filter, so the only - multicast setting is to receive all multicast frames. At least - the chip has a very clean way to set the mode, unlike many others. */ -static void set_rx_mode(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - short new_mode; - - if (dev->flags & IFF_PROMISC) { - if (corkscrew_debug > 3) - pr_debug("%s: Setting promiscuous mode.\n", - dev->name); - new_mode = SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm; - } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) { - new_mode = SetRxFilter | RxStation | RxMulticast | RxBroadcast; - } else - new_mode = SetRxFilter | RxStation | RxBroadcast; - - outw(new_mode, ioaddr + EL3_CMD); -} - -static void netdev_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr); -} - -static u32 netdev_get_msglevel(struct net_device *dev) -{ - return corkscrew_debug; -} - -static void netdev_set_msglevel(struct net_device *dev, u32 level) -{ - corkscrew_debug = level; -} - -static const struct ethtool_ops netdev_ethtool_ops = { - .get_drvinfo = netdev_get_drvinfo, - .get_msglevel = netdev_get_msglevel, - .set_msglevel = netdev_set_msglevel, -}; - - -#ifdef MODULE -void cleanup_module(void) -{ - while (!list_empty(&root_corkscrew_dev)) { - struct net_device *dev; - struct corkscrew_private *vp; - - vp = list_entry(root_corkscrew_dev.next, - struct corkscrew_private, list); - dev = vp->our_dev; - unregister_netdev(dev); - cleanup_card(dev); - free_netdev(dev); - } -} -#endif /* MODULE */ diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c deleted file mode 100644 index 8cc2256..0000000 --- a/drivers/net/3c59x.c +++ /dev/null @@ -1,3326 +0,0 @@ -/* EtherLinkXL.c: A 3Com EtherLink PCI III/XL ethernet driver for linux. */ -/* - Written 1996-1999 by Donald Becker. - - This software may be used and distributed according to the terms - of the GNU General Public License, incorporated herein by reference. - - This driver is for the 3Com "Vortex" and "Boomerang" series ethercards. - Members of the series include Fast EtherLink 3c590/3c592/3c595/3c597 - and the EtherLink XL 3c900 and 3c905 cards. - - Problem reports and questions should be directed to - vortex@scyld.com - - The author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation - 410 Severn Ave., Suite 210 - Annapolis MD 21403 - -*/ - -/* - * FIXME: This driver _could_ support MTU changing, but doesn't. See Don's hamachi.c implementation - * as well as other drivers - * - * NOTE: If you make 'vortex_debug' a constant (#define vortex_debug 0) the driver shrinks by 2k - * due to dead code elimination. There will be some performance benefits from this due to - * elimination of all the tests and reduced cache footprint. - */ - - -#define DRV_NAME "3c59x" - - - -/* A few values that may be tweaked. */ -/* Keep the ring sizes a power of two for efficiency. */ -#define TX_RING_SIZE 16 -#define RX_RING_SIZE 32 -#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ - -/* "Knobs" that adjust features and parameters. */ -/* Set the copy breakpoint for the copy-only-tiny-frames scheme. - Setting to > 1512 effectively disables this feature. */ -#ifndef __arm__ -static int rx_copybreak = 200; -#else -/* ARM systems perform better by disregarding the bus-master - transfer capability of these cards. -- rmk */ -static int rx_copybreak = 1513; -#endif -/* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */ -static const int mtu = 1500; -/* Maximum events (Rx packets, etc.) to handle at each interrupt. */ -static int max_interrupt_work = 32; -/* Tx timeout interval (millisecs) */ -static int watchdog = 5000; - -/* Allow aggregation of Tx interrupts. Saves CPU load at the cost - * of possible Tx stalls if the system is blocking interrupts - * somewhere else. Undefine this to disable. - */ -#define tx_interrupt_mitigation 1 - -/* Put out somewhat more debugging messages. (0: no msg, 1 minimal .. 6). */ -#define vortex_debug debug -#ifdef VORTEX_DEBUG -static int vortex_debug = VORTEX_DEBUG; -#else -static int vortex_debug = 1; -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include /* For nr_irqs only. */ -#include -#include - -/* Kernel compatibility defines, some common to David Hinds' PCMCIA package. - This is only in the support-all-kernels source code. */ - -#define RUN_AT(x) (jiffies + (x)) - -#include - - -static const char version[] __devinitconst = - DRV_NAME ": Donald Becker and others.\n"; - -MODULE_AUTHOR("Donald Becker "); -MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver "); -MODULE_LICENSE("GPL"); - - -/* Operational parameter that usually are not changed. */ - -/* The Vortex size is twice that of the original EtherLinkIII series: the - runtime register window, window 1, is now always mapped in. - The Boomerang size is twice as large as the Vortex -- it has additional - bus master control registers. */ -#define VORTEX_TOTAL_SIZE 0x20 -#define BOOMERANG_TOTAL_SIZE 0x40 - -/* Set iff a MII transceiver on any interface requires mdio preamble. - This only set with the original DP83840 on older 3c905 boards, so the extra - code size of a per-interface flag is not worthwhile. */ -static char mii_preamble_required; - -#define PFX DRV_NAME ": " - - - -/* - Theory of Operation - -I. Board Compatibility - -This device driver is designed for the 3Com FastEtherLink and FastEtherLink -XL, 3Com's PCI to 10/100baseT adapters. It also works with the 10Mbs -versions of the FastEtherLink cards. The supported product IDs are - 3c590, 3c592, 3c595, 3c597, 3c900, 3c905 - -The related ISA 3c515 is supported with a separate driver, 3c515.c, included -with the kernel source or available from - cesdis.gsfc.nasa.gov:/pub/linux/drivers/3c515.html - -II. Board-specific settings - -PCI bus devices are configured by the system at boot time, so no jumpers -need to be set on the board. The system BIOS should be set to assign the -PCI INTA signal to an otherwise unused system IRQ line. - -The EEPROM settings for media type and forced-full-duplex are observed. -The EEPROM media type should be left at the default "autoselect" unless using -10base2 or AUI connections which cannot be reliably detected. - -III. Driver operation - -The 3c59x series use an interface that's very similar to the previous 3c5x9 -series. The primary interface is two programmed-I/O FIFOs, with an -alternate single-contiguous-region bus-master transfer (see next). - -The 3c900 "Boomerang" series uses a full-bus-master interface with separate -lists of transmit and receive descriptors, similar to the AMD LANCE/PCnet, -DEC Tulip and Intel Speedo3. The first chip version retains a compatible -programmed-I/O interface that has been removed in 'B' and subsequent board -revisions. - -One extension that is advertised in a very large font is that the adapters -are capable of being bus masters. On the Vortex chip this capability was -only for a single contiguous region making it far less useful than the full -bus master capability. There is a significant performance impact of taking -an extra interrupt or polling for the completion of each transfer, as well -as difficulty sharing the single transfer engine between the transmit and -receive threads. Using DMA transfers is a win only with large blocks or -with the flawed versions of the Intel Orion motherboard PCI controller. - -The Boomerang chip's full-bus-master interface is useful, and has the -currently-unused advantages over other similar chips that queued transmit -packets may be reordered and receive buffer groups are associated with a -single frame. - -With full-bus-master support, this driver uses a "RX_COPYBREAK" scheme. -Rather than a fixed intermediate receive buffer, this scheme allocates -full-sized skbuffs as receive buffers. The value RX_COPYBREAK is used as -the copying breakpoint: it is chosen to trade-off the memory wasted by -passing the full-sized skbuff to the queue layer for all frames vs. the -copying cost of copying a frame to a correctly-sized skbuff. - -IIIC. Synchronization -The driver runs as two independent, single-threaded flows of control. One -is the send-packet routine, which enforces single-threaded use by the -dev->tbusy flag. The other thread is the interrupt handler, which is single -threaded by the hardware and other software. - -IV. Notes - -Thanks to Cameron Spitzer and Terry Murphy of 3Com for providing development -3c590, 3c595, and 3c900 boards. -The name "Vortex" is the internal 3Com project name for the PCI ASIC, and -the EISA version is called "Demon". According to Terry these names come -from rides at the local amusement park. - -The new chips support both ethernet (1.5K) and FDDI (4.5K) packet sizes! -This driver only supports ethernet packets because of the skbuff allocation -limit of 4K. -*/ - -/* This table drives the PCI probe routines. It's mostly boilerplate in all - of the drivers, and will likely be provided by some future kernel. -*/ -enum pci_flags_bit { - PCI_USES_MASTER=4, -}; - -enum { IS_VORTEX=1, IS_BOOMERANG=2, IS_CYCLONE=4, IS_TORNADO=8, - EEPROM_8BIT=0x10, /* AKPM: Uses 0x230 as the base bitmaps for EEPROM reads */ - HAS_PWR_CTRL=0x20, HAS_MII=0x40, HAS_NWAY=0x80, HAS_CB_FNS=0x100, - INVERT_MII_PWR=0x200, INVERT_LED_PWR=0x400, MAX_COLLISION_RESET=0x800, - EEPROM_OFFSET=0x1000, HAS_HWCKSM=0x2000, WNO_XCVR_PWR=0x4000, - EXTRA_PREAMBLE=0x8000, EEPROM_RESET=0x10000, }; - -enum vortex_chips { - CH_3C590 = 0, - CH_3C592, - CH_3C597, - CH_3C595_1, - CH_3C595_2, - - CH_3C595_3, - CH_3C900_1, - CH_3C900_2, - CH_3C900_3, - CH_3C900_4, - - CH_3C900_5, - CH_3C900B_FL, - CH_3C905_1, - CH_3C905_2, - CH_3C905B_TX, - CH_3C905B_1, - - CH_3C905B_2, - CH_3C905B_FX, - CH_3C905C, - CH_3C9202, - CH_3C980, - CH_3C9805, - - CH_3CSOHO100_TX, - CH_3C555, - CH_3C556, - CH_3C556B, - CH_3C575, - - CH_3C575_1, - CH_3CCFE575, - CH_3CCFE575CT, - CH_3CCFE656, - CH_3CCFEM656, - - CH_3CCFEM656_1, - CH_3C450, - CH_3C920, - CH_3C982A, - CH_3C982B, - - CH_905BT4, - CH_920B_EMB_WNM, -}; - - -/* note: this array directly indexed by above enums, and MUST - * be kept in sync with both the enums above, and the PCI device - * table below - */ -static struct vortex_chip_info { - const char *name; - int flags; - int drv_flags; - int io_size; -} vortex_info_tbl[] __devinitdata = { - {"3c590 Vortex 10Mbps", - PCI_USES_MASTER, IS_VORTEX, 32, }, - {"3c592 EISA 10Mbps Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */ - PCI_USES_MASTER, IS_VORTEX, 32, }, - {"3c597 EISA Fast Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */ - PCI_USES_MASTER, IS_VORTEX, 32, }, - {"3c595 Vortex 100baseTx", - PCI_USES_MASTER, IS_VORTEX, 32, }, - {"3c595 Vortex 100baseT4", - PCI_USES_MASTER, IS_VORTEX, 32, }, - - {"3c595 Vortex 100base-MII", - PCI_USES_MASTER, IS_VORTEX, 32, }, - {"3c900 Boomerang 10baseT", - PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, }, - {"3c900 Boomerang 10Mbps Combo", - PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, }, - {"3c900 Cyclone 10Mbps TPO", /* AKPM: from Don's 0.99M */ - PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, - {"3c900 Cyclone 10Mbps Combo", - PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, - - {"3c900 Cyclone 10Mbps TPC", /* AKPM: from Don's 0.99M */ - PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, - {"3c900B-FL Cyclone 10base-FL", - PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, - {"3c905 Boomerang 100baseTx", - PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, }, - {"3c905 Boomerang 100baseT4", - PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, }, - {"3C905B-TX Fast Etherlink XL PCI", - PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, - {"3c905B Cyclone 100baseTx", - PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, - - {"3c905B Cyclone 10/100/BNC", - PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, }, - {"3c905B-FX Cyclone 100baseFx", - PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, - {"3c905C Tornado", - PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, - {"3c920B-EMB-WNM (ATI Radeon 9100 IGP)", - PCI_USES_MASTER, IS_TORNADO|HAS_MII|HAS_HWCKSM, 128, }, - {"3c980 Cyclone", - PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, - - {"3c980C Python-T", - PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, }, - {"3cSOHO100-TX Hurricane", - PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, - {"3c555 Laptop Hurricane", - PCI_USES_MASTER, IS_CYCLONE|EEPROM_8BIT|HAS_HWCKSM, 128, }, - {"3c556 Laptop Tornado", - PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_8BIT|HAS_CB_FNS|INVERT_MII_PWR| - HAS_HWCKSM, 128, }, - {"3c556B Laptop Hurricane", - PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_OFFSET|HAS_CB_FNS|INVERT_MII_PWR| - WNO_XCVR_PWR|HAS_HWCKSM, 128, }, - - {"3c575 [Megahertz] 10/100 LAN CardBus", - PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, }, - {"3c575 Boomerang CardBus", - PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, }, - {"3CCFE575BT Cyclone CardBus", - PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT| - INVERT_LED_PWR|HAS_HWCKSM, 128, }, - {"3CCFE575CT Tornado CardBus", - PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| - MAX_COLLISION_RESET|HAS_HWCKSM, 128, }, - {"3CCFE656 Cyclone CardBus", - PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| - INVERT_LED_PWR|HAS_HWCKSM, 128, }, - - {"3CCFEM656B Cyclone+Winmodem CardBus", - PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| - INVERT_LED_PWR|HAS_HWCKSM, 128, }, - {"3CXFEM656C Tornado+Winmodem CardBus", /* From pcmcia-cs-3.1.5 */ - PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| - MAX_COLLISION_RESET|HAS_HWCKSM, 128, }, - {"3c450 HomePNA Tornado", /* AKPM: from Don's 0.99Q */ - PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, }, - {"3c920 Tornado", - PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, }, - {"3c982 Hydra Dual Port A", - PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, }, - - {"3c982 Hydra Dual Port B", - PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, }, - {"3c905B-T4", - PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, - {"3c920B-EMB-WNM Tornado", - PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, }, - - {NULL,}, /* NULL terminated list. */ -}; - - -static DEFINE_PCI_DEVICE_TABLE(vortex_pci_tbl) = { - { 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 }, - { 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 }, - { 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 }, - { 0x10B7, 0x5950, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_1 }, - { 0x10B7, 0x5951, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_2 }, - - { 0x10B7, 0x5952, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_3 }, - { 0x10B7, 0x9000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_1 }, - { 0x10B7, 0x9001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_2 }, - { 0x10B7, 0x9004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_3 }, - { 0x10B7, 0x9005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_4 }, - - { 0x10B7, 0x9006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_5 }, - { 0x10B7, 0x900A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900B_FL }, - { 0x10B7, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_1 }, - { 0x10B7, 0x9051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_2 }, - { 0x10B7, 0x9054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_TX }, - { 0x10B7, 0x9055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_1 }, - - { 0x10B7, 0x9058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_2 }, - { 0x10B7, 0x905A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_FX }, - { 0x10B7, 0x9200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905C }, - { 0x10B7, 0x9202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9202 }, - { 0x10B7, 0x9800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C980 }, - { 0x10B7, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9805 }, - - { 0x10B7, 0x7646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CSOHO100_TX }, - { 0x10B7, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C555 }, - { 0x10B7, 0x6055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556 }, - { 0x10B7, 0x6056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556B }, - { 0x10B7, 0x5b57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575 }, - - { 0x10B7, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575_1 }, - { 0x10B7, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575 }, - { 0x10B7, 0x5257, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575CT }, - { 0x10B7, 0x6560, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE656 }, - { 0x10B7, 0x6562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656 }, - - { 0x10B7, 0x6564, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656_1 }, - { 0x10B7, 0x4500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C450 }, - { 0x10B7, 0x9201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C920 }, - { 0x10B7, 0x1201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C982A }, - { 0x10B7, 0x1202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C982B }, - - { 0x10B7, 0x9056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_905BT4 }, - { 0x10B7, 0x9210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_920B_EMB_WNM }, - - {0,} /* 0 terminated list. */ -}; -MODULE_DEVICE_TABLE(pci, vortex_pci_tbl); - - -/* Operational definitions. - These are not used by other compilation units and thus are not - exported in a ".h" file. - - First the windows. There are eight register windows, with the command - and status registers available in each. - */ -#define EL3_CMD 0x0e -#define EL3_STATUS 0x0e - -/* The top five bits written to EL3_CMD are a command, the lower - 11 bits are the parameter, if applicable. - Note that 11 parameters bits was fine for ethernet, but the new chip - can handle FDDI length frames (~4500 octets) and now parameters count - 32-bit 'Dwords' rather than octets. */ - -enum vortex_cmd { - TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11, - RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, - UpStall = 6<<11, UpUnstall = (6<<11)+1, - DownStall = (6<<11)+2, DownUnstall = (6<<11)+3, - RxDiscard = 8<<11, TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11, - FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11, - SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11, - SetTxThreshold = 18<<11, SetTxStart = 19<<11, - StartDMAUp = 20<<11, StartDMADown = (20<<11)+1, StatsEnable = 21<<11, - StatsDisable = 22<<11, StopCoax = 23<<11, SetFilterBit = 25<<11,}; - -/* The SetRxFilter command accepts the following classes: */ -enum RxFilter { - RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 }; - -/* Bits in the general status register. */ -enum vortex_status { - IntLatch = 0x0001, HostError = 0x0002, TxComplete = 0x0004, - TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020, - IntReq = 0x0040, StatsFull = 0x0080, - DMADone = 1<<8, DownComplete = 1<<9, UpComplete = 1<<10, - DMAInProgress = 1<<11, /* DMA controller is still busy.*/ - CmdInProgress = 1<<12, /* EL3_CMD is still busy.*/ -}; - -/* Register window 1 offsets, the window used in normal operation. - On the Vortex this window is always mapped at offsets 0x10-0x1f. */ -enum Window1 { - TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14, - RxStatus = 0x18, Timer=0x1A, TxStatus = 0x1B, - TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */ -}; -enum Window0 { - Wn0EepromCmd = 10, /* Window 0: EEPROM command register. */ - Wn0EepromData = 12, /* Window 0: EEPROM results register. */ - IntrStatus=0x0E, /* Valid in all windows. */ -}; -enum Win0_EEPROM_bits { - EEPROM_Read = 0x80, EEPROM_WRITE = 0x40, EEPROM_ERASE = 0xC0, - EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */ - EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */ -}; -/* EEPROM locations. */ -enum eeprom_offset { - PhysAddr01=0, PhysAddr23=1, PhysAddr45=2, ModelID=3, - EtherLink3ID=7, IFXcvrIO=8, IRQLine=9, - NodeAddr01=10, NodeAddr23=11, NodeAddr45=12, - DriverTune=13, Checksum=15}; - -enum Window2 { /* Window 2. */ - Wn2_ResetOptions=12, -}; -enum Window3 { /* Window 3: MAC/config bits. */ - Wn3_Config=0, Wn3_MaxPktSize=4, Wn3_MAC_Ctrl=6, Wn3_Options=8, -}; - -#define BFEXT(value, offset, bitcount) \ - ((((unsigned long)(value)) >> (offset)) & ((1 << (bitcount)) - 1)) - -#define BFINS(lhs, rhs, offset, bitcount) \ - (((lhs) & ~((((1 << (bitcount)) - 1)) << (offset))) | \ - (((rhs) & ((1 << (bitcount)) - 1)) << (offset))) - -#define RAM_SIZE(v) BFEXT(v, 0, 3) -#define RAM_WIDTH(v) BFEXT(v, 3, 1) -#define RAM_SPEED(v) BFEXT(v, 4, 2) -#define ROM_SIZE(v) BFEXT(v, 6, 2) -#define RAM_SPLIT(v) BFEXT(v, 16, 2) -#define XCVR(v) BFEXT(v, 20, 4) -#define AUTOSELECT(v) BFEXT(v, 24, 1) - -enum Window4 { /* Window 4: Xcvr/media bits. */ - Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10, -}; -enum Win4_Media_bits { - Media_SQE = 0x0008, /* Enable SQE error counting for AUI. */ - Media_10TP = 0x00C0, /* Enable link beat and jabber for 10baseT. */ - Media_Lnk = 0x0080, /* Enable just link beat for 100TX/100FX. */ - Media_LnkBeat = 0x0800, -}; -enum Window7 { /* Window 7: Bus Master control. */ - Wn7_MasterAddr = 0, Wn7_VlanEtherType=4, Wn7_MasterLen = 6, - Wn7_MasterStatus = 12, -}; -/* Boomerang bus master control registers. */ -enum MasterCtrl { - PktStatus = 0x20, DownListPtr = 0x24, FragAddr = 0x28, FragLen = 0x2c, - TxFreeThreshold = 0x2f, UpPktStatus = 0x30, UpListPtr = 0x38, -}; - -/* The Rx and Tx descriptor lists. - Caution Alpha hackers: these types are 32 bits! Note also the 8 byte - alignment contraint on tx_ring[] and rx_ring[]. */ -#define LAST_FRAG 0x80000000 /* Last Addr/Len pair in descriptor. */ -#define DN_COMPLETE 0x00010000 /* This packet has been downloaded */ -struct boom_rx_desc { - __le32 next; /* Last entry points to 0. */ - __le32 status; - __le32 addr; /* Up to 63 addr/len pairs possible. */ - __le32 length; /* Set LAST_FRAG to indicate last pair. */ -}; -/* Values for the Rx status entry. */ -enum rx_desc_status { - RxDComplete=0x00008000, RxDError=0x4000, - /* See boomerang_rx() for actual error bits */ - IPChksumErr=1<<25, TCPChksumErr=1<<26, UDPChksumErr=1<<27, - IPChksumValid=1<<29, TCPChksumValid=1<<30, UDPChksumValid=1<<31, -}; - -#ifdef MAX_SKB_FRAGS -#define DO_ZEROCOPY 1 -#else -#define DO_ZEROCOPY 0 -#endif - -struct boom_tx_desc { - __le32 next; /* Last entry points to 0. */ - __le32 status; /* bits 0:12 length, others see below. */ -#if DO_ZEROCOPY - struct { - __le32 addr; - __le32 length; - } frag[1+MAX_SKB_FRAGS]; -#else - __le32 addr; - __le32 length; -#endif -}; - -/* Values for the Tx status entry. */ -enum tx_desc_status { - CRCDisable=0x2000, TxDComplete=0x8000, - AddIPChksum=0x02000000, AddTCPChksum=0x04000000, AddUDPChksum=0x08000000, - TxIntrUploaded=0x80000000, /* IRQ when in FIFO, but maybe not sent. */ -}; - -/* Chip features we care about in vp->capabilities, read from the EEPROM. */ -enum ChipCaps { CapBusMaster=0x20, CapPwrMgmt=0x2000 }; - -struct vortex_extra_stats { - unsigned long tx_deferred; - unsigned long tx_max_collisions; - unsigned long tx_multiple_collisions; - unsigned long tx_single_collisions; - unsigned long rx_bad_ssd; -}; - -struct vortex_private { - /* The Rx and Tx rings should be quad-word-aligned. */ - struct boom_rx_desc* rx_ring; - struct boom_tx_desc* tx_ring; - dma_addr_t rx_ring_dma; - dma_addr_t tx_ring_dma; - /* The addresses of transmit- and receive-in-place skbuffs. */ - struct sk_buff* rx_skbuff[RX_RING_SIZE]; - struct sk_buff* tx_skbuff[TX_RING_SIZE]; - unsigned int cur_rx, cur_tx; /* The next free ring entry */ - unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ - struct vortex_extra_stats xstats; /* NIC-specific extra stats */ - struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */ - dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */ - - /* PCI configuration space information. */ - struct device *gendev; - void __iomem *ioaddr; /* IO address space */ - void __iomem *cb_fn_base; /* CardBus function status addr space. */ - - /* Some values here only for performance evaluation and path-coverage */ - int rx_nocopy, rx_copy, queued_packet, rx_csumhits; - int card_idx; - - /* The remainder are related to chip state, mostly media selection. */ - struct timer_list timer; /* Media selection timer. */ - struct timer_list rx_oom_timer; /* Rx skb allocation retry timer */ - int options; /* User-settable misc. driver options. */ - unsigned int media_override:4, /* Passed-in media type. */ - default_media:4, /* Read from the EEPROM/Wn3_Config. */ - full_duplex:1, autoselect:1, - bus_master:1, /* Vortex can only do a fragment bus-m. */ - full_bus_master_tx:1, full_bus_master_rx:2, /* Boomerang */ - flow_ctrl:1, /* Use 802.3x flow control (PAUSE only) */ - partner_flow_ctrl:1, /* Partner supports flow control */ - has_nway:1, - enable_wol:1, /* Wake-on-LAN is enabled */ - pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */ - open:1, - medialock:1, - must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */ - large_frames:1, /* accept large frames */ - handling_irq:1; /* private in_irq indicator */ - /* {get|set}_wol operations are already serialized by rtnl. - * no additional locking is required for the enable_wol and acpi_set_WOL() - */ - int drv_flags; - u16 status_enable; - u16 intr_enable; - u16 available_media; /* From Wn3_Options. */ - u16 capabilities, info1, info2; /* Various, from EEPROM. */ - u16 advertising; /* NWay media advertisement */ - unsigned char phys[2]; /* MII device addresses. */ - u16 deferred; /* Resend these interrupts when we - * bale from the ISR */ - u16 io_size; /* Size of PCI region (for release_region) */ - - /* Serialises access to hardware other than MII and variables below. - * The lock hierarchy is rtnl_lock > {lock, mii_lock} > window_lock. */ - spinlock_t lock; - - spinlock_t mii_lock; /* Serialises access to MII */ - struct mii_if_info mii; /* MII lib hooks/info */ - spinlock_t window_lock; /* Serialises access to windowed regs */ - int window; /* Register window */ -}; - -static void window_set(struct vortex_private *vp, int window) -{ - if (window != vp->window) { - iowrite16(SelectWindow + window, vp->ioaddr + EL3_CMD); - vp->window = window; - } -} - -#define DEFINE_WINDOW_IO(size) \ -static u ## size \ -window_read ## size(struct vortex_private *vp, int window, int addr) \ -{ \ - unsigned long flags; \ - u ## size ret; \ - spin_lock_irqsave(&vp->window_lock, flags); \ - window_set(vp, window); \ - ret = ioread ## size(vp->ioaddr + addr); \ - spin_unlock_irqrestore(&vp->window_lock, flags); \ - return ret; \ -} \ -static void \ -window_write ## size(struct vortex_private *vp, u ## size value, \ - int window, int addr) \ -{ \ - unsigned long flags; \ - spin_lock_irqsave(&vp->window_lock, flags); \ - window_set(vp, window); \ - iowrite ## size(value, vp->ioaddr + addr); \ - spin_unlock_irqrestore(&vp->window_lock, flags); \ -} -DEFINE_WINDOW_IO(8) -DEFINE_WINDOW_IO(16) -DEFINE_WINDOW_IO(32) - -#ifdef CONFIG_PCI -#define DEVICE_PCI(dev) (((dev)->bus == &pci_bus_type) ? to_pci_dev((dev)) : NULL) -#else -#define DEVICE_PCI(dev) NULL -#endif - -#define VORTEX_PCI(vp) \ - ((struct pci_dev *) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL)) - -#ifdef CONFIG_EISA -#define DEVICE_EISA(dev) (((dev)->bus == &eisa_bus_type) ? to_eisa_device((dev)) : NULL) -#else -#define DEVICE_EISA(dev) NULL -#endif - -#define VORTEX_EISA(vp) \ - ((struct eisa_device *) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL)) - -/* The action to take with a media selection timer tick. - Note that we deviate from the 3Com order by checking 10base2 before AUI. - */ -enum xcvr_types { - XCVR_10baseT=0, XCVR_AUI, XCVR_10baseTOnly, XCVR_10base2, XCVR_100baseTx, - XCVR_100baseFx, XCVR_MII=6, XCVR_NWAY=8, XCVR_ExtMII=9, XCVR_Default=10, -}; - -static const struct media_table { - char *name; - unsigned int media_bits:16, /* Bits to set in Wn4_Media register. */ - mask:8, /* The transceiver-present bit in Wn3_Config.*/ - next:8; /* The media type to try next. */ - int wait; /* Time before we check media status. */ -} media_tbl[] = { - { "10baseT", Media_10TP,0x08, XCVR_10base2, (14*HZ)/10}, - { "10Mbs AUI", Media_SQE, 0x20, XCVR_Default, (1*HZ)/10}, - { "undefined", 0, 0x80, XCVR_10baseT, 10000}, - { "10base2", 0, 0x10, XCVR_AUI, (1*HZ)/10}, - { "100baseTX", Media_Lnk, 0x02, XCVR_100baseFx, (14*HZ)/10}, - { "100baseFX", Media_Lnk, 0x04, XCVR_MII, (14*HZ)/10}, - { "MII", 0, 0x41, XCVR_10baseT, 3*HZ }, - { "undefined", 0, 0x01, XCVR_10baseT, 10000}, - { "Autonegotiate", 0, 0x41, XCVR_10baseT, 3*HZ}, - { "MII-External", 0, 0x41, XCVR_10baseT, 3*HZ }, - { "Default", 0, 0xFF, XCVR_10baseT, 10000}, -}; - -static struct { - const char str[ETH_GSTRING_LEN]; -} ethtool_stats_keys[] = { - { "tx_deferred" }, - { "tx_max_collisions" }, - { "tx_multiple_collisions" }, - { "tx_single_collisions" }, - { "rx_bad_ssd" }, -}; - -/* number of ETHTOOL_GSTATS u64's */ -#define VORTEX_NUM_STATS 5 - -static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, - int chip_idx, int card_idx); -static int vortex_up(struct net_device *dev); -static void vortex_down(struct net_device *dev, int final); -static int vortex_open(struct net_device *dev); -static void mdio_sync(struct vortex_private *vp, int bits); -static int mdio_read(struct net_device *dev, int phy_id, int location); -static void mdio_write(struct net_device *vp, int phy_id, int location, int value); -static void vortex_timer(unsigned long arg); -static void rx_oom_timer(unsigned long arg); -static netdev_tx_t vortex_start_xmit(struct sk_buff *skb, - struct net_device *dev); -static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb, - struct net_device *dev); -static int vortex_rx(struct net_device *dev); -static int boomerang_rx(struct net_device *dev); -static irqreturn_t vortex_interrupt(int irq, void *dev_id); -static irqreturn_t boomerang_interrupt(int irq, void *dev_id); -static int vortex_close(struct net_device *dev); -static void dump_tx_ring(struct net_device *dev); -static void update_stats(void __iomem *ioaddr, struct net_device *dev); -static struct net_device_stats *vortex_get_stats(struct net_device *dev); -static void set_rx_mode(struct net_device *dev); -#ifdef CONFIG_PCI -static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); -#endif -static void vortex_tx_timeout(struct net_device *dev); -static void acpi_set_WOL(struct net_device *dev); -static const struct ethtool_ops vortex_ethtool_ops; -static void set_8021q_mode(struct net_device *dev, int enable); - -/* This driver uses 'options' to pass the media type, full-duplex flag, etc. */ -/* Option count limit only -- unlimited interfaces are supported. */ -#define MAX_UNITS 8 -static int options[MAX_UNITS] = { [0 ... MAX_UNITS-1] = -1 }; -static int full_duplex[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; -static int hw_checksums[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; -static int flow_ctrl[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; -static int enable_wol[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; -static int use_mmio[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; -static int global_options = -1; -static int global_full_duplex = -1; -static int global_enable_wol = -1; -static int global_use_mmio = -1; - -/* Variables to work-around the Compaq PCI BIOS32 problem. */ -static int compaq_ioaddr, compaq_irq, compaq_device_id = 0x5900; -static struct net_device *compaq_net_device; - -static int vortex_cards_found; - -module_param(debug, int, 0); -module_param(global_options, int, 0); -module_param_array(options, int, NULL, 0); -module_param(global_full_duplex, int, 0); -module_param_array(full_duplex, int, NULL, 0); -module_param_array(hw_checksums, int, NULL, 0); -module_param_array(flow_ctrl, int, NULL, 0); -module_param(global_enable_wol, int, 0); -module_param_array(enable_wol, int, NULL, 0); -module_param(rx_copybreak, int, 0); -module_param(max_interrupt_work, int, 0); -module_param(compaq_ioaddr, int, 0); -module_param(compaq_irq, int, 0); -module_param(compaq_device_id, int, 0); -module_param(watchdog, int, 0); -module_param(global_use_mmio, int, 0); -module_param_array(use_mmio, int, NULL, 0); -MODULE_PARM_DESC(debug, "3c59x debug level (0-6)"); -MODULE_PARM_DESC(options, "3c59x: Bits 0-3: media type, bit 4: bus mastering, bit 9: full duplex"); -MODULE_PARM_DESC(global_options, "3c59x: same as options, but applies to all NICs if options is unset"); -MODULE_PARM_DESC(full_duplex, "3c59x full duplex setting(s) (1)"); -MODULE_PARM_DESC(global_full_duplex, "3c59x: same as full_duplex, but applies to all NICs if full_duplex is unset"); -MODULE_PARM_DESC(hw_checksums, "3c59x Hardware checksum checking by adapter(s) (0-1)"); -MODULE_PARM_DESC(flow_ctrl, "3c59x 802.3x flow control usage (PAUSE only) (0-1)"); -MODULE_PARM_DESC(enable_wol, "3c59x: Turn on Wake-on-LAN for adapter(s) (0-1)"); -MODULE_PARM_DESC(global_enable_wol, "3c59x: same as enable_wol, but applies to all NICs if enable_wol is unset"); -MODULE_PARM_DESC(rx_copybreak, "3c59x copy breakpoint for copy-only-tiny-frames"); -MODULE_PARM_DESC(max_interrupt_work, "3c59x maximum events handled per interrupt"); -MODULE_PARM_DESC(compaq_ioaddr, "3c59x PCI I/O base address (Compaq BIOS problem workaround)"); -MODULE_PARM_DESC(compaq_irq, "3c59x PCI IRQ number (Compaq BIOS problem workaround)"); -MODULE_PARM_DESC(compaq_device_id, "3c59x PCI device ID (Compaq BIOS problem workaround)"); -MODULE_PARM_DESC(watchdog, "3c59x transmit timeout in milliseconds"); -MODULE_PARM_DESC(global_use_mmio, "3c59x: same as use_mmio, but applies to all NICs if options is unset"); -MODULE_PARM_DESC(use_mmio, "3c59x: use memory-mapped PCI I/O resource (0-1)"); - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void poll_vortex(struct net_device *dev) -{ - struct vortex_private *vp = netdev_priv(dev); - unsigned long flags; - local_irq_save(flags); - (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev); - local_irq_restore(flags); -} -#endif - -#ifdef CONFIG_PM - -static int vortex_suspend(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *ndev = pci_get_drvdata(pdev); - - if (!ndev || !netif_running(ndev)) - return 0; - - netif_device_detach(ndev); - vortex_down(ndev, 1); - - return 0; -} - -static int vortex_resume(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *ndev = pci_get_drvdata(pdev); - int err; - - if (!ndev || !netif_running(ndev)) - return 0; - - err = vortex_up(ndev); - if (err) - return err; - - netif_device_attach(ndev); - - return 0; -} - -static const struct dev_pm_ops vortex_pm_ops = { - .suspend = vortex_suspend, - .resume = vortex_resume, - .freeze = vortex_suspend, - .thaw = vortex_resume, - .poweroff = vortex_suspend, - .restore = vortex_resume, -}; - -#define VORTEX_PM_OPS (&vortex_pm_ops) - -#else /* !CONFIG_PM */ - -#define VORTEX_PM_OPS NULL - -#endif /* !CONFIG_PM */ - -#ifdef CONFIG_EISA -static struct eisa_device_id vortex_eisa_ids[] = { - { "TCM5920", CH_3C592 }, - { "TCM5970", CH_3C597 }, - { "" } -}; -MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids); - -static int __init vortex_eisa_probe(struct device *device) -{ - void __iomem *ioaddr; - struct eisa_device *edev; - - edev = to_eisa_device(device); - - if (!request_region(edev->base_addr, VORTEX_TOTAL_SIZE, DRV_NAME)) - return -EBUSY; - - ioaddr = ioport_map(edev->base_addr, VORTEX_TOTAL_SIZE); - - if (vortex_probe1(device, ioaddr, ioread16(ioaddr + 0xC88) >> 12, - edev->id.driver_data, vortex_cards_found)) { - release_region(edev->base_addr, VORTEX_TOTAL_SIZE); - return -ENODEV; - } - - vortex_cards_found++; - - return 0; -} - -static int __devexit vortex_eisa_remove(struct device *device) -{ - struct eisa_device *edev; - struct net_device *dev; - struct vortex_private *vp; - void __iomem *ioaddr; - - edev = to_eisa_device(device); - dev = eisa_get_drvdata(edev); - - if (!dev) { - pr_err("vortex_eisa_remove called for Compaq device!\n"); - BUG(); - } - - vp = netdev_priv(dev); - ioaddr = vp->ioaddr; - - unregister_netdev(dev); - iowrite16(TotalReset|0x14, ioaddr + EL3_CMD); - release_region(dev->base_addr, VORTEX_TOTAL_SIZE); - - free_netdev(dev); - return 0; -} - -static struct eisa_driver vortex_eisa_driver = { - .id_table = vortex_eisa_ids, - .driver = { - .name = "3c59x", - .probe = vortex_eisa_probe, - .remove = __devexit_p(vortex_eisa_remove) - } -}; - -#endif /* CONFIG_EISA */ - -/* returns count found (>= 0), or negative on error */ -static int __init vortex_eisa_init(void) -{ - int eisa_found = 0; - int orig_cards_found = vortex_cards_found; - -#ifdef CONFIG_EISA - int err; - - err = eisa_driver_register (&vortex_eisa_driver); - if (!err) { - /* - * Because of the way EISA bus is probed, we cannot assume - * any device have been found when we exit from - * eisa_driver_register (the bus root driver may not be - * initialized yet). So we blindly assume something was - * found, and let the sysfs magic happened... - */ - eisa_found = 1; - } -#endif - - /* Special code to work-around the Compaq PCI BIOS32 problem. */ - if (compaq_ioaddr) { - vortex_probe1(NULL, ioport_map(compaq_ioaddr, VORTEX_TOTAL_SIZE), - compaq_irq, compaq_device_id, vortex_cards_found++); - } - - return vortex_cards_found - orig_cards_found + eisa_found; -} - -/* returns count (>= 0), or negative on error */ -static int __devinit vortex_init_one(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - int rc, unit, pci_bar; - struct vortex_chip_info *vci; - void __iomem *ioaddr; - - /* wake up and enable device */ - rc = pci_enable_device(pdev); - if (rc < 0) - goto out; - - unit = vortex_cards_found; - - if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) { - /* Determine the default if the user didn't override us */ - vci = &vortex_info_tbl[ent->driver_data]; - pci_bar = vci->drv_flags & (IS_CYCLONE | IS_TORNADO) ? 1 : 0; - } else if (unit < MAX_UNITS && use_mmio[unit] >= 0) - pci_bar = use_mmio[unit] ? 1 : 0; - else - pci_bar = global_use_mmio ? 1 : 0; - - ioaddr = pci_iomap(pdev, pci_bar, 0); - if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */ - ioaddr = pci_iomap(pdev, 0, 0); - if (!ioaddr) { - pci_disable_device(pdev); - rc = -ENOMEM; - goto out; - } - - rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq, - ent->driver_data, unit); - if (rc < 0) { - pci_iounmap(pdev, ioaddr); - pci_disable_device(pdev); - goto out; - } - - vortex_cards_found++; - -out: - return rc; -} - -static const struct net_device_ops boomrang_netdev_ops = { - .ndo_open = vortex_open, - .ndo_stop = vortex_close, - .ndo_start_xmit = boomerang_start_xmit, - .ndo_tx_timeout = vortex_tx_timeout, - .ndo_get_stats = vortex_get_stats, -#ifdef CONFIG_PCI - .ndo_do_ioctl = vortex_ioctl, -#endif - .ndo_set_multicast_list = set_rx_mode, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = poll_vortex, -#endif -}; - -static const struct net_device_ops vortex_netdev_ops = { - .ndo_open = vortex_open, - .ndo_stop = vortex_close, - .ndo_start_xmit = vortex_start_xmit, - .ndo_tx_timeout = vortex_tx_timeout, - .ndo_get_stats = vortex_get_stats, -#ifdef CONFIG_PCI - .ndo_do_ioctl = vortex_ioctl, -#endif - .ndo_set_multicast_list = set_rx_mode, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = poll_vortex, -#endif -}; - -/* - * Start up the PCI/EISA device which is described by *gendev. - * Return 0 on success. - * - * NOTE: pdev can be NULL, for the case of a Compaq device - */ -static int __devinit vortex_probe1(struct device *gendev, - void __iomem *ioaddr, int irq, - int chip_idx, int card_idx) -{ - struct vortex_private *vp; - int option; - unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */ - int i, step; - struct net_device *dev; - static int printed_version; - int retval, print_info; - struct vortex_chip_info * const vci = &vortex_info_tbl[chip_idx]; - const char *print_name = "3c59x"; - struct pci_dev *pdev = NULL; - struct eisa_device *edev = NULL; - - if (!printed_version) { - pr_info("%s", version); - printed_version = 1; - } - - if (gendev) { - if ((pdev = DEVICE_PCI(gendev))) { - print_name = pci_name(pdev); - } - - if ((edev = DEVICE_EISA(gendev))) { - print_name = dev_name(&edev->dev); - } - } - - dev = alloc_etherdev(sizeof(*vp)); - retval = -ENOMEM; - if (!dev) { - pr_err(PFX "unable to allocate etherdev, aborting\n"); - goto out; - } - SET_NETDEV_DEV(dev, gendev); - vp = netdev_priv(dev); - - option = global_options; - - /* The lower four bits are the media type. */ - if (dev->mem_start) { - /* - * The 'options' param is passed in as the third arg to the - * LILO 'ether=' argument for non-modular use - */ - option = dev->mem_start; - } - else if (card_idx < MAX_UNITS) { - if (options[card_idx] >= 0) - option = options[card_idx]; - } - - if (option > 0) { - if (option & 0x8000) - vortex_debug = 7; - if (option & 0x4000) - vortex_debug = 2; - if (option & 0x0400) - vp->enable_wol = 1; - } - - print_info = (vortex_debug > 1); - if (print_info) - pr_info("See Documentation/networking/vortex.txt\n"); - - pr_info("%s: 3Com %s %s at %p.\n", - print_name, - pdev ? "PCI" : "EISA", - vci->name, - ioaddr); - - dev->base_addr = (unsigned long)ioaddr; - dev->irq = irq; - dev->mtu = mtu; - vp->ioaddr = ioaddr; - vp->large_frames = mtu > 1500; - vp->drv_flags = vci->drv_flags; - vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0; - vp->io_size = vci->io_size; - vp->card_idx = card_idx; - vp->window = -1; - - /* module list only for Compaq device */ - if (gendev == NULL) { - compaq_net_device = dev; - } - - /* PCI-only startup logic */ - if (pdev) { - /* EISA resources already marked, so only PCI needs to do this here */ - /* Ignore return value, because Cardbus drivers already allocate for us */ - if (request_region(dev->base_addr, vci->io_size, print_name) != NULL) - vp->must_free_region = 1; - - /* enable bus-mastering if necessary */ - if (vci->flags & PCI_USES_MASTER) - pci_set_master(pdev); - - if (vci->drv_flags & IS_VORTEX) { - u8 pci_latency; - u8 new_latency = 248; - - /* Check the PCI latency value. On the 3c590 series the latency timer - must be set to the maximum value to avoid data corruption that occurs - when the timer expires during a transfer. This bug exists the Vortex - chip only. */ - pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency); - if (pci_latency < new_latency) { - pr_info("%s: Overriding PCI latency timer (CFLT) setting of %d, new value is %d.\n", - print_name, pci_latency, new_latency); - pci_write_config_byte(pdev, PCI_LATENCY_TIMER, new_latency); - } - } - } - - spin_lock_init(&vp->lock); - spin_lock_init(&vp->mii_lock); - spin_lock_init(&vp->window_lock); - vp->gendev = gendev; - vp->mii.dev = dev; - vp->mii.mdio_read = mdio_read; - vp->mii.mdio_write = mdio_write; - vp->mii.phy_id_mask = 0x1f; - vp->mii.reg_num_mask = 0x1f; - - /* Makes sure rings are at least 16 byte aligned. */ - vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE - + sizeof(struct boom_tx_desc) * TX_RING_SIZE, - &vp->rx_ring_dma); - retval = -ENOMEM; - if (!vp->rx_ring) - goto free_region; - - vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE); - vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE; - - /* if we are a PCI driver, we store info in pdev->driver_data - * instead of a module list */ - if (pdev) - pci_set_drvdata(pdev, dev); - if (edev) - eisa_set_drvdata(edev, dev); - - vp->media_override = 7; - if (option >= 0) { - vp->media_override = ((option & 7) == 2) ? 0 : option & 15; - if (vp->media_override != 7) - vp->medialock = 1; - vp->full_duplex = (option & 0x200) ? 1 : 0; - vp->bus_master = (option & 16) ? 1 : 0; - } - - if (global_full_duplex > 0) - vp->full_duplex = 1; - if (global_enable_wol > 0) - vp->enable_wol = 1; - - if (card_idx < MAX_UNITS) { - if (full_duplex[card_idx] > 0) - vp->full_duplex = 1; - if (flow_ctrl[card_idx] > 0) - vp->flow_ctrl = 1; - if (enable_wol[card_idx] > 0) - vp->enable_wol = 1; - } - - vp->mii.force_media = vp->full_duplex; - vp->options = option; - /* Read the station address from the EEPROM. */ - { - int base; - - if (vci->drv_flags & EEPROM_8BIT) - base = 0x230; - else if (vci->drv_flags & EEPROM_OFFSET) - base = EEPROM_Read + 0x30; - else - base = EEPROM_Read; - - for (i = 0; i < 0x40; i++) { - int timer; - window_write16(vp, base + i, 0, Wn0EepromCmd); - /* Pause for at least 162 us. for the read to take place. */ - for (timer = 10; timer >= 0; timer--) { - udelay(162); - if ((window_read16(vp, 0, Wn0EepromCmd) & - 0x8000) == 0) - break; - } - eeprom[i] = window_read16(vp, 0, Wn0EepromData); - } - } - for (i = 0; i < 0x18; i++) - checksum ^= eeprom[i]; - checksum = (checksum ^ (checksum >> 8)) & 0xff; - if (checksum != 0x00) { /* Grrr, needless incompatible change 3Com. */ - while (i < 0x21) - checksum ^= eeprom[i++]; - checksum = (checksum ^ (checksum >> 8)) & 0xff; - } - if ((checksum != 0x00) && !(vci->drv_flags & IS_TORNADO)) - pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum); - for (i = 0; i < 3; i++) - ((__be16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]); - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); - if (print_info) - pr_cont(" %pM", dev->dev_addr); - /* Unfortunately an all zero eeprom passes the checksum and this - gets found in the wild in failure cases. Crypto is hard 8) */ - if (!is_valid_ether_addr(dev->dev_addr)) { - retval = -EINVAL; - pr_err("*** EEPROM MAC address is invalid.\n"); - goto free_ring; /* With every pack */ - } - for (i = 0; i < 6; i++) - window_write8(vp, dev->dev_addr[i], 2, i); - - if (print_info) - pr_cont(", IRQ %d\n", dev->irq); - /* Tell them about an invalid IRQ. */ - if (dev->irq <= 0 || dev->irq >= nr_irqs) - pr_warning(" *** Warning: IRQ %d is unlikely to work! ***\n", - dev->irq); - - step = (window_read8(vp, 4, Wn4_NetDiag) & 0x1e) >> 1; - if (print_info) { - pr_info(" product code %02x%02x rev %02x.%d date %02d-%02d-%02d\n", - eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14], - step, (eeprom[4]>>5) & 15, eeprom[4] & 31, eeprom[4]>>9); - } - - - if (pdev && vci->drv_flags & HAS_CB_FNS) { - unsigned short n; - - vp->cb_fn_base = pci_iomap(pdev, 2, 0); - if (!vp->cb_fn_base) { - retval = -ENOMEM; - goto free_ring; - } - - if (print_info) { - pr_info("%s: CardBus functions mapped %16.16llx->%p\n", - print_name, - (unsigned long long)pci_resource_start(pdev, 2), - vp->cb_fn_base); - } - - n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010; - if (vp->drv_flags & INVERT_LED_PWR) - n |= 0x10; - if (vp->drv_flags & INVERT_MII_PWR) - n |= 0x4000; - window_write16(vp, n, 2, Wn2_ResetOptions); - if (vp->drv_flags & WNO_XCVR_PWR) { - window_write16(vp, 0x0800, 0, 0); - } - } - - /* Extract our information from the EEPROM data. */ - vp->info1 = eeprom[13]; - vp->info2 = eeprom[15]; - vp->capabilities = eeprom[16]; - - if (vp->info1 & 0x8000) { - vp->full_duplex = 1; - if (print_info) - pr_info("Full duplex capable\n"); - } - - { - static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; - unsigned int config; - vp->available_media = window_read16(vp, 3, Wn3_Options); - if ((vp->available_media & 0xff) == 0) /* Broken 3c916 */ - vp->available_media = 0x40; - config = window_read32(vp, 3, Wn3_Config); - if (print_info) { - pr_debug(" Internal config register is %4.4x, transceivers %#x.\n", - config, window_read16(vp, 3, Wn3_Options)); - pr_info(" %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n", - 8 << RAM_SIZE(config), - RAM_WIDTH(config) ? "word" : "byte", - ram_split[RAM_SPLIT(config)], - AUTOSELECT(config) ? "autoselect/" : "", - XCVR(config) > XCVR_ExtMII ? "" : - media_tbl[XCVR(config)].name); - } - vp->default_media = XCVR(config); - if (vp->default_media == XCVR_NWAY) - vp->has_nway = 1; - vp->autoselect = AUTOSELECT(config); - } - - if (vp->media_override != 7) { - pr_info("%s: Media override to transceiver type %d (%s).\n", - print_name, vp->media_override, - media_tbl[vp->media_override].name); - dev->if_port = vp->media_override; - } else - dev->if_port = vp->default_media; - - if ((vp->available_media & 0x40) || (vci->drv_flags & HAS_NWAY) || - dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) { - int phy, phy_idx = 0; - mii_preamble_required++; - if (vp->drv_flags & EXTRA_PREAMBLE) - mii_preamble_required++; - mdio_sync(vp, 32); - mdio_read(dev, 24, MII_BMSR); - for (phy = 0; phy < 32 && phy_idx < 1; phy++) { - int mii_status, phyx; - - /* - * For the 3c905CX we look at index 24 first, because it bogusly - * reports an external PHY at all indices - */ - if (phy == 0) - phyx = 24; - else if (phy <= 24) - phyx = phy - 1; - else - phyx = phy; - mii_status = mdio_read(dev, phyx, MII_BMSR); - if (mii_status && mii_status != 0xffff) { - vp->phys[phy_idx++] = phyx; - if (print_info) { - pr_info(" MII transceiver found at address %d, status %4x.\n", - phyx, mii_status); - } - if ((mii_status & 0x0040) == 0) - mii_preamble_required++; - } - } - mii_preamble_required--; - if (phy_idx == 0) { - pr_warning(" ***WARNING*** No MII transceivers found!\n"); - vp->phys[0] = 24; - } else { - vp->advertising = mdio_read(dev, vp->phys[0], MII_ADVERTISE); - if (vp->full_duplex) { - /* Only advertise the FD media types. */ - vp->advertising &= ~0x02A0; - mdio_write(dev, vp->phys[0], 4, vp->advertising); - } - } - vp->mii.phy_id = vp->phys[0]; - } - - if (vp->capabilities & CapBusMaster) { - vp->full_bus_master_tx = 1; - if (print_info) { - pr_info(" Enabling bus-master transmits and %s receives.\n", - (vp->info2 & 1) ? "early" : "whole-frame" ); - } - vp->full_bus_master_rx = (vp->info2 & 1) ? 1 : 2; - vp->bus_master = 0; /* AKPM: vortex only */ - } - - /* The 3c59x-specific entries in the device structure. */ - if (vp->full_bus_master_tx) { - dev->netdev_ops = &boomrang_netdev_ops; - /* Actually, it still should work with iommu. */ - if (card_idx < MAX_UNITS && - ((hw_checksums[card_idx] == -1 && (vp->drv_flags & HAS_HWCKSM)) || - hw_checksums[card_idx] == 1)) { - dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; - } - } else - dev->netdev_ops = &vortex_netdev_ops; - - if (print_info) { - pr_info("%s: scatter/gather %sabled. h/w checksums %sabled\n", - print_name, - (dev->features & NETIF_F_SG) ? "en":"dis", - (dev->features & NETIF_F_IP_CSUM) ? "en":"dis"); - } - - dev->ethtool_ops = &vortex_ethtool_ops; - dev->watchdog_timeo = (watchdog * HZ) / 1000; - - if (pdev) { - vp->pm_state_valid = 1; - pci_save_state(VORTEX_PCI(vp)); - acpi_set_WOL(dev); - } - retval = register_netdev(dev); - if (retval == 0) - return 0; - -free_ring: - pci_free_consistent(pdev, - sizeof(struct boom_rx_desc) * RX_RING_SIZE - + sizeof(struct boom_tx_desc) * TX_RING_SIZE, - vp->rx_ring, - vp->rx_ring_dma); -free_region: - if (vp->must_free_region) - release_region(dev->base_addr, vci->io_size); - free_netdev(dev); - pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval); -out: - return retval; -} - -static void -issue_and_wait(struct net_device *dev, int cmd) -{ - struct vortex_private *vp = netdev_priv(dev); - void __iomem *ioaddr = vp->ioaddr; - int i; - - iowrite16(cmd, ioaddr + EL3_CMD); - for (i = 0; i < 2000; i++) { - if (!(ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) - return; - } - - /* OK, that didn't work. Do it the slow way. One second */ - for (i = 0; i < 100000; i++) { - if (!(ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) { - if (vortex_debug > 1) - pr_info("%s: command 0x%04x took %d usecs\n", - dev->name, cmd, i * 10); - return; - } - udelay(10); - } - pr_err("%s: command 0x%04x did not complete! Status=0x%x\n", - dev->name, cmd, ioread16(ioaddr + EL3_STATUS)); -} - -static void -vortex_set_duplex(struct net_device *dev) -{ - struct vortex_private *vp = netdev_priv(dev); - - pr_info("%s: setting %s-duplex.\n", - dev->name, (vp->full_duplex) ? "full" : "half"); - - /* Set the full-duplex bit. */ - window_write16(vp, - ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) | - (vp->large_frames ? 0x40 : 0) | - ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? - 0x100 : 0), - 3, Wn3_MAC_Ctrl); -} - -static void vortex_check_media(struct net_device *dev, unsigned int init) -{ - struct vortex_private *vp = netdev_priv(dev); - unsigned int ok_to_print = 0; - - if (vortex_debug > 3) - ok_to_print = 1; - - if (mii_check_media(&vp->mii, ok_to_print, init)) { - vp->full_duplex = vp->mii.full_duplex; - vortex_set_duplex(dev); - } else if (init) { - vortex_set_duplex(dev); - } -} - -static int -vortex_up(struct net_device *dev) -{ - struct vortex_private *vp = netdev_priv(dev); - void __iomem *ioaddr = vp->ioaddr; - unsigned int config; - int i, mii_reg1, mii_reg5, err = 0; - - if (VORTEX_PCI(vp)) { - pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */ - if (vp->pm_state_valid) - pci_restore_state(VORTEX_PCI(vp)); - err = pci_enable_device(VORTEX_PCI(vp)); - if (err) { - pr_warning("%s: Could not enable device\n", - dev->name); - goto err_out; - } - } - - /* Before initializing select the active media port. */ - config = window_read32(vp, 3, Wn3_Config); - - if (vp->media_override != 7) { - pr_info("%s: Media override to transceiver %d (%s).\n", - dev->name, vp->media_override, - media_tbl[vp->media_override].name); - dev->if_port = vp->media_override; - } else if (vp->autoselect) { - if (vp->has_nway) { - if (vortex_debug > 1) - pr_info("%s: using NWAY device table, not %d\n", - dev->name, dev->if_port); - dev->if_port = XCVR_NWAY; - } else { - /* Find first available media type, starting with 100baseTx. */ - dev->if_port = XCVR_100baseTx; - while (! (vp->available_media & media_tbl[dev->if_port].mask)) - dev->if_port = media_tbl[dev->if_port].next; - if (vortex_debug > 1) - pr_info("%s: first available media type: %s\n", - dev->name, media_tbl[dev->if_port].name); - } - } else { - dev->if_port = vp->default_media; - if (vortex_debug > 1) - pr_info("%s: using default media %s\n", - dev->name, media_tbl[dev->if_port].name); - } - - init_timer(&vp->timer); - vp->timer.expires = RUN_AT(media_tbl[dev->if_port].wait); - vp->timer.data = (unsigned long)dev; - vp->timer.function = vortex_timer; /* timer handler */ - add_timer(&vp->timer); - - init_timer(&vp->rx_oom_timer); - vp->rx_oom_timer.data = (unsigned long)dev; - vp->rx_oom_timer.function = rx_oom_timer; - - if (vortex_debug > 1) - pr_debug("%s: Initial media type %s.\n", - dev->name, media_tbl[dev->if_port].name); - - vp->full_duplex = vp->mii.force_media; - config = BFINS(config, dev->if_port, 20, 4); - if (vortex_debug > 6) - pr_debug("vortex_up(): writing 0x%x to InternalConfig\n", config); - window_write32(vp, config, 3, Wn3_Config); - - if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) { - mii_reg1 = mdio_read(dev, vp->phys[0], MII_BMSR); - mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA); - vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0); - vp->mii.full_duplex = vp->full_duplex; - - vortex_check_media(dev, 1); - } - else - vortex_set_duplex(dev); - - issue_and_wait(dev, TxReset); - /* - * Don't reset the PHY - that upsets autonegotiation during DHCP operations. - */ - issue_and_wait(dev, RxReset|0x04); - - - iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD); - - if (vortex_debug > 1) { - pr_debug("%s: vortex_up() irq %d media status %4.4x.\n", - dev->name, dev->irq, window_read16(vp, 4, Wn4_Media)); - } - - /* Set the station address and mask in window 2 each time opened. */ - for (i = 0; i < 6; i++) - window_write8(vp, dev->dev_addr[i], 2, i); - for (; i < 12; i+=2) - window_write16(vp, 0, 2, i); - - if (vp->cb_fn_base) { - unsigned short n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010; - if (vp->drv_flags & INVERT_LED_PWR) - n |= 0x10; - if (vp->drv_flags & INVERT_MII_PWR) - n |= 0x4000; - window_write16(vp, n, 2, Wn2_ResetOptions); - } - - if (dev->if_port == XCVR_10base2) - /* Start the thinnet transceiver. We should really wait 50ms...*/ - iowrite16(StartCoax, ioaddr + EL3_CMD); - if (dev->if_port != XCVR_NWAY) { - window_write16(vp, - (window_read16(vp, 4, Wn4_Media) & - ~(Media_10TP|Media_SQE)) | - media_tbl[dev->if_port].media_bits, - 4, Wn4_Media); - } - - /* Switch to the stats window, and clear all stats by reading. */ - iowrite16(StatsDisable, ioaddr + EL3_CMD); - for (i = 0; i < 10; i++) - window_read8(vp, 6, i); - window_read16(vp, 6, 10); - window_read16(vp, 6, 12); - /* New: On the Vortex we must also clear the BadSSD counter. */ - window_read8(vp, 4, 12); - /* ..and on the Boomerang we enable the extra statistics bits. */ - window_write16(vp, 0x0040, 4, Wn4_NetDiag); - - if (vp->full_bus_master_rx) { /* Boomerang bus master. */ - vp->cur_rx = vp->dirty_rx = 0; - /* Initialize the RxEarly register as recommended. */ - iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD); - iowrite32(0x0020, ioaddr + PktStatus); - iowrite32(vp->rx_ring_dma, ioaddr + UpListPtr); - } - if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */ - vp->cur_tx = vp->dirty_tx = 0; - if (vp->drv_flags & IS_BOOMERANG) - iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); /* Room for a packet. */ - /* Clear the Rx, Tx rings. */ - for (i = 0; i < RX_RING_SIZE; i++) /* AKPM: this is done in vortex_open, too */ - vp->rx_ring[i].status = 0; - for (i = 0; i < TX_RING_SIZE; i++) - vp->tx_skbuff[i] = NULL; - iowrite32(0, ioaddr + DownListPtr); - } - /* Set receiver mode: presumably accept b-case and phys addr only. */ - set_rx_mode(dev); - /* enable 802.1q tagged frames */ - set_8021q_mode(dev, 1); - iowrite16(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ - - iowrite16(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ - iowrite16(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ - /* Allow status bits to be seen. */ - vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete| - (vp->full_bus_master_tx ? DownComplete : TxAvailable) | - (vp->full_bus_master_rx ? UpComplete : RxComplete) | - (vp->bus_master ? DMADone : 0); - vp->intr_enable = SetIntrEnb | IntLatch | TxAvailable | - (vp->full_bus_master_rx ? 0 : RxComplete) | - StatsFull | HostError | TxComplete | IntReq - | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete; - iowrite16(vp->status_enable, ioaddr + EL3_CMD); - /* Ack all pending events, and set active indicator mask. */ - iowrite16(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, - ioaddr + EL3_CMD); - iowrite16(vp->intr_enable, ioaddr + EL3_CMD); - if (vp->cb_fn_base) /* The PCMCIA people are idiots. */ - iowrite32(0x8000, vp->cb_fn_base + 4); - netif_start_queue (dev); -err_out: - return err; -} - -static int -vortex_open(struct net_device *dev) -{ - struct vortex_private *vp = netdev_priv(dev); - int i; - int retval; - - /* Use the now-standard shared IRQ implementation. */ - if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ? - boomerang_interrupt : vortex_interrupt, IRQF_SHARED, dev->name, dev))) { - pr_err("%s: Could not reserve IRQ %d\n", dev->name, dev->irq); - goto err; - } - - if (vp->full_bus_master_rx) { /* Boomerang bus master. */ - if (vortex_debug > 2) - pr_debug("%s: Filling in the Rx ring.\n", dev->name); - for (i = 0; i < RX_RING_SIZE; i++) { - struct sk_buff *skb; - vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1)); - vp->rx_ring[i].status = 0; /* Clear complete bit. */ - vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG); - - skb = __netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN, - GFP_KERNEL); - vp->rx_skbuff[i] = skb; - if (skb == NULL) - break; /* Bad news! */ - - skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */ - vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); - } - if (i != RX_RING_SIZE) { - int j; - pr_emerg("%s: no memory for rx ring\n", dev->name); - for (j = 0; j < i; j++) { - if (vp->rx_skbuff[j]) { - dev_kfree_skb(vp->rx_skbuff[j]); - vp->rx_skbuff[j] = NULL; - } - } - retval = -ENOMEM; - goto err_free_irq; - } - /* Wrap the ring. */ - vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma); - } - - retval = vortex_up(dev); - if (!retval) - goto out; - -err_free_irq: - free_irq(dev->irq, dev); -err: - if (vortex_debug > 1) - pr_err("%s: vortex_open() fails: returning %d\n", dev->name, retval); -out: - return retval; -} - -static void -vortex_timer(unsigned long data) -{ - struct net_device *dev = (struct net_device *)data; - struct vortex_private *vp = netdev_priv(dev); - void __iomem *ioaddr = vp->ioaddr; - int next_tick = 60*HZ; - int ok = 0; - int media_status; - - if (vortex_debug > 2) { - pr_debug("%s: Media selection timer tick happened, %s.\n", - dev->name, media_tbl[dev->if_port].name); - pr_debug("dev->watchdog_timeo=%d\n", dev->watchdog_timeo); - } - - media_status = window_read16(vp, 4, Wn4_Media); - switch (dev->if_port) { - case XCVR_10baseT: case XCVR_100baseTx: case XCVR_100baseFx: - if (media_status & Media_LnkBeat) { - netif_carrier_on(dev); - ok = 1; - if (vortex_debug > 1) - pr_debug("%s: Media %s has link beat, %x.\n", - dev->name, media_tbl[dev->if_port].name, media_status); - } else { - netif_carrier_off(dev); - if (vortex_debug > 1) { - pr_debug("%s: Media %s has no link beat, %x.\n", - dev->name, media_tbl[dev->if_port].name, media_status); - } - } - break; - case XCVR_MII: case XCVR_NWAY: - { - ok = 1; - vortex_check_media(dev, 0); - } - break; - default: /* Other media types handled by Tx timeouts. */ - if (vortex_debug > 1) - pr_debug("%s: Media %s has no indication, %x.\n", - dev->name, media_tbl[dev->if_port].name, media_status); - ok = 1; - } - - if (!netif_carrier_ok(dev)) - next_tick = 5*HZ; - - if (vp->medialock) - goto leave_media_alone; - - if (!ok) { - unsigned int config; - - spin_lock_irq(&vp->lock); - - do { - dev->if_port = media_tbl[dev->if_port].next; - } while ( ! (vp->available_media & media_tbl[dev->if_port].mask)); - if (dev->if_port == XCVR_Default) { /* Go back to default. */ - dev->if_port = vp->default_media; - if (vortex_debug > 1) - pr_debug("%s: Media selection failing, using default %s port.\n", - dev->name, media_tbl[dev->if_port].name); - } else { - if (vortex_debug > 1) - pr_debug("%s: Media selection failed, now trying %s port.\n", - dev->name, media_tbl[dev->if_port].name); - next_tick = media_tbl[dev->if_port].wait; - } - window_write16(vp, - (media_status & ~(Media_10TP|Media_SQE)) | - media_tbl[dev->if_port].media_bits, - 4, Wn4_Media); - - config = window_read32(vp, 3, Wn3_Config); - config = BFINS(config, dev->if_port, 20, 4); - window_write32(vp, config, 3, Wn3_Config); - - iowrite16(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax, - ioaddr + EL3_CMD); - if (vortex_debug > 1) - pr_debug("wrote 0x%08x to Wn3_Config\n", config); - /* AKPM: FIXME: Should reset Rx & Tx here. P60 of 3c90xc.pdf */ - - spin_unlock_irq(&vp->lock); - } - -leave_media_alone: - if (vortex_debug > 2) - pr_debug("%s: Media selection timer finished, %s.\n", - dev->name, media_tbl[dev->if_port].name); - - mod_timer(&vp->timer, RUN_AT(next_tick)); - if (vp->deferred) - iowrite16(FakeIntr, ioaddr + EL3_CMD); -} - -static void vortex_tx_timeout(struct net_device *dev) -{ - struct vortex_private *vp = netdev_priv(dev); - void __iomem *ioaddr = vp->ioaddr; - - pr_err("%s: transmit timed out, tx_status %2.2x status %4.4x.\n", - dev->name, ioread8(ioaddr + TxStatus), - ioread16(ioaddr + EL3_STATUS)); - pr_err(" diagnostics: net %04x media %04x dma %08x fifo %04x\n", - window_read16(vp, 4, Wn4_NetDiag), - window_read16(vp, 4, Wn4_Media), - ioread32(ioaddr + PktStatus), - window_read16(vp, 4, Wn4_FIFODiag)); - /* Slight code bloat to be user friendly. */ - if ((ioread8(ioaddr + TxStatus) & 0x88) == 0x88) - pr_err("%s: Transmitter encountered 16 collisions --" - " network cable problem?\n", dev->name); - if (ioread16(ioaddr + EL3_STATUS) & IntLatch) { - pr_err("%s: Interrupt posted but not delivered --" - " IRQ blocked by another device?\n", dev->name); - /* Bad idea here.. but we might as well handle a few events. */ - { - /* - * Block interrupts because vortex_interrupt does a bare spin_lock() - */ - unsigned long flags; - local_irq_save(flags); - if (vp->full_bus_master_tx) - boomerang_interrupt(dev->irq, dev); - else - vortex_interrupt(dev->irq, dev); - local_irq_restore(flags); - } - } - - if (vortex_debug > 0) - dump_tx_ring(dev); - - issue_and_wait(dev, TxReset); - - dev->stats.tx_errors++; - if (vp->full_bus_master_tx) { - pr_debug("%s: Resetting the Tx ring pointer.\n", dev->name); - if (vp->cur_tx - vp->dirty_tx > 0 && ioread32(ioaddr + DownListPtr) == 0) - iowrite32(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc), - ioaddr + DownListPtr); - if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE) - netif_wake_queue (dev); - if (vp->drv_flags & IS_BOOMERANG) - iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); - iowrite16(DownUnstall, ioaddr + EL3_CMD); - } else { - dev->stats.tx_dropped++; - netif_wake_queue(dev); - } - - /* Issue Tx Enable */ - iowrite16(TxEnable, ioaddr + EL3_CMD); - dev->trans_start = jiffies; /* prevent tx timeout */ -} - -/* - * Handle uncommon interrupt sources. This is a separate routine to minimize - * the cache impact. - */ -static void -vortex_error(struct net_device *dev, int status) -{ - struct vortex_private *vp = netdev_priv(dev); - void __iomem *ioaddr = vp->ioaddr; - int do_tx_reset = 0, reset_mask = 0; - unsigned char tx_status = 0; - - if (vortex_debug > 2) { - pr_err("%s: vortex_error(), status=0x%x\n", dev->name, status); - } - - if (status & TxComplete) { /* Really "TxError" for us. */ - tx_status = ioread8(ioaddr + TxStatus); - /* Presumably a tx-timeout. We must merely re-enable. */ - if (vortex_debug > 2 || - (tx_status != 0x88 && vortex_debug > 0)) { - pr_err("%s: Transmit error, Tx status register %2.2x.\n", - dev->name, tx_status); - if (tx_status == 0x82) { - pr_err("Probably a duplex mismatch. See " - "Documentation/networking/vortex.txt\n"); - } - dump_tx_ring(dev); - } - if (tx_status & 0x14) dev->stats.tx_fifo_errors++; - if (tx_status & 0x38) dev->stats.tx_aborted_errors++; - if (tx_status & 0x08) vp->xstats.tx_max_collisions++; - iowrite8(0, ioaddr + TxStatus); - if (tx_status & 0x30) { /* txJabber or txUnderrun */ - do_tx_reset = 1; - } else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET)) { /* maxCollisions */ - do_tx_reset = 1; - reset_mask = 0x0108; /* Reset interface logic, but not download logic */ - } else { /* Merely re-enable the transmitter. */ - iowrite16(TxEnable, ioaddr + EL3_CMD); - } - } - - if (status & RxEarly) /* Rx early is unused. */ - iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD); - - if (status & StatsFull) { /* Empty statistics. */ - static int DoneDidThat; - if (vortex_debug > 4) - pr_debug("%s: Updating stats.\n", dev->name); - update_stats(ioaddr, dev); - /* HACK: Disable statistics as an interrupt source. */ - /* This occurs when we have the wrong media type! */ - if (DoneDidThat == 0 && - ioread16(ioaddr + EL3_STATUS) & StatsFull) { - pr_warning("%s: Updating statistics failed, disabling " - "stats as an interrupt source.\n", dev->name); - iowrite16(SetIntrEnb | - (window_read16(vp, 5, 10) & ~StatsFull), - ioaddr + EL3_CMD); - vp->intr_enable &= ~StatsFull; - DoneDidThat++; - } - } - if (status & IntReq) { /* Restore all interrupt sources. */ - iowrite16(vp->status_enable, ioaddr + EL3_CMD); - iowrite16(vp->intr_enable, ioaddr + EL3_CMD); - } - if (status & HostError) { - u16 fifo_diag; - fifo_diag = window_read16(vp, 4, Wn4_FIFODiag); - pr_err("%s: Host error, FIFO diagnostic register %4.4x.\n", - dev->name, fifo_diag); - /* Adapter failure requires Tx/Rx reset and reinit. */ - if (vp->full_bus_master_tx) { - int bus_status = ioread32(ioaddr + PktStatus); - /* 0x80000000 PCI master abort. */ - /* 0x40000000 PCI target abort. */ - if (vortex_debug) - pr_err("%s: PCI bus error, bus status %8.8x\n", dev->name, bus_status); - - /* In this case, blow the card away */ - /* Must not enter D3 or we can't legally issue the reset! */ - vortex_down(dev, 0); - issue_and_wait(dev, TotalReset | 0xff); - vortex_up(dev); /* AKPM: bug. vortex_up() assumes that the rx ring is full. It may not be. */ - } else if (fifo_diag & 0x0400) - do_tx_reset = 1; - if (fifo_diag & 0x3000) { - /* Reset Rx fifo and upload logic */ - issue_and_wait(dev, RxReset|0x07); - /* Set the Rx filter to the current state. */ - set_rx_mode(dev); - /* enable 802.1q VLAN tagged frames */ - set_8021q_mode(dev, 1); - iowrite16(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */ - iowrite16(AckIntr | HostError, ioaddr + EL3_CMD); - } - } - - if (do_tx_reset) { - issue_and_wait(dev, TxReset|reset_mask); - iowrite16(TxEnable, ioaddr + EL3_CMD); - if (!vp->full_bus_master_tx) - netif_wake_queue(dev); - } -} - -static netdev_tx_t -vortex_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct vortex_private *vp = netdev_priv(dev); - void __iomem *ioaddr = vp->ioaddr; - - /* Put out the doubleword header... */ - iowrite32(skb->len, ioaddr + TX_FIFO); - if (vp->bus_master) { - /* Set the bus-master controller to transfer the packet. */ - int len = (skb->len + 3) & ~3; - vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, - PCI_DMA_TODEVICE); - spin_lock_irq(&vp->window_lock); - window_set(vp, 7); - iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr); - iowrite16(len, ioaddr + Wn7_MasterLen); - spin_unlock_irq(&vp->window_lock); - vp->tx_skb = skb; - iowrite16(StartDMADown, ioaddr + EL3_CMD); - /* netif_wake_queue() will be called at the DMADone interrupt. */ - } else { - /* ... and the packet rounded to a doubleword. */ - iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); - dev_kfree_skb (skb); - if (ioread16(ioaddr + TxFree) > 1536) { - netif_start_queue (dev); /* AKPM: redundant? */ - } else { - /* Interrupt us when the FIFO has room for max-sized packet. */ - netif_stop_queue(dev); - iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD); - } - } - - - /* Clear the Tx status stack. */ - { - int tx_status; - int i = 32; - - while (--i > 0 && (tx_status = ioread8(ioaddr + TxStatus)) > 0) { - if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */ - if (vortex_debug > 2) - pr_debug("%s: Tx error, status %2.2x.\n", - dev->name, tx_status); - if (tx_status & 0x04) dev->stats.tx_fifo_errors++; - if (tx_status & 0x38) dev->stats.tx_aborted_errors++; - if (tx_status & 0x30) { - issue_and_wait(dev, TxReset); - } - iowrite16(TxEnable, ioaddr + EL3_CMD); - } - iowrite8(0x00, ioaddr + TxStatus); /* Pop the status stack. */ - } - } - return NETDEV_TX_OK; -} - -static netdev_tx_t -boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct vortex_private *vp = netdev_priv(dev); - void __iomem *ioaddr = vp->ioaddr; - /* Calculate the next Tx descriptor entry. */ - int entry = vp->cur_tx % TX_RING_SIZE; - struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE]; - unsigned long flags; - - if (vortex_debug > 6) { - pr_debug("boomerang_start_xmit()\n"); - pr_debug("%s: Trying to send a packet, Tx index %d.\n", - dev->name, vp->cur_tx); - } - - /* - * We can't allow a recursion from our interrupt handler back into the - * tx routine, as they take the same spin lock, and that causes - * deadlock. Just return NETDEV_TX_BUSY and let the stack try again in - * a bit - */ - if (vp->handling_irq) - return NETDEV_TX_BUSY; - - if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) { - if (vortex_debug > 0) - pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n", - dev->name); - netif_stop_queue(dev); - return NETDEV_TX_BUSY; - } - - vp->tx_skbuff[entry] = skb; - - vp->tx_ring[entry].next = 0; -#if DO_ZEROCOPY - if (skb->ip_summed != CHECKSUM_PARTIAL) - vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); - else - vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); - - if (!skb_shinfo(skb)->nr_frags) { - vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, - skb->len, PCI_DMA_TODEVICE)); - vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG); - } else { - int i; - - vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, - skb_headlen(skb), PCI_DMA_TODEVICE)); - vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb)); - - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - - vp->tx_ring[entry].frag[i+1].addr = - cpu_to_le32(pci_map_single(VORTEX_PCI(vp), - (void*)page_address(frag->page) + frag->page_offset, - frag->size, PCI_DMA_TODEVICE)); - - if (i == skb_shinfo(skb)->nr_frags-1) - vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size|LAST_FRAG); - else - vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size); - } - } -#else - vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE)); - vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); - vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); -#endif - - spin_lock_irqsave(&vp->lock, flags); - /* Wait for the stall to complete. */ - issue_and_wait(dev, DownStall); - prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc)); - if (ioread32(ioaddr + DownListPtr) == 0) { - iowrite32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr); - vp->queued_packet++; - } - - vp->cur_tx++; - if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) { - netif_stop_queue (dev); - } else { /* Clear previous interrupt enable. */ -#if defined(tx_interrupt_mitigation) - /* Dubious. If in boomeang_interrupt "faster" cyclone ifdef - * were selected, this would corrupt DN_COMPLETE. No? - */ - prev_entry->status &= cpu_to_le32(~TxIntrUploaded); -#endif - } - iowrite16(DownUnstall, ioaddr + EL3_CMD); - spin_unlock_irqrestore(&vp->lock, flags); - return NETDEV_TX_OK; -} - -/* The interrupt handler does all of the Rx thread work and cleans up - after the Tx thread. */ - -/* - * This is the ISR for the vortex series chips. - * full_bus_master_tx == 0 && full_bus_master_rx == 0 - */ - -static irqreturn_t -vortex_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct vortex_private *vp = netdev_priv(dev); - void __iomem *ioaddr; - int status; - int work_done = max_interrupt_work; - int handled = 0; - - ioaddr = vp->ioaddr; - spin_lock(&vp->lock); - - status = ioread16(ioaddr + EL3_STATUS); - - if (vortex_debug > 6) - pr_debug("vortex_interrupt(). status=0x%4x\n", status); - - if ((status & IntLatch) == 0) - goto handler_exit; /* No interrupt: shared IRQs cause this */ - handled = 1; - - if (status & IntReq) { - status |= vp->deferred; - vp->deferred = 0; - } - - if (status == 0xffff) /* h/w no longer present (hotplug)? */ - goto handler_exit; - - if (vortex_debug > 4) - pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n", - dev->name, status, ioread8(ioaddr + Timer)); - - spin_lock(&vp->window_lock); - window_set(vp, 7); - - do { - if (vortex_debug > 5) - pr_debug("%s: In interrupt loop, status %4.4x.\n", - dev->name, status); - if (status & RxComplete) - vortex_rx(dev); - - if (status & TxAvailable) { - if (vortex_debug > 5) - pr_debug(" TX room bit was handled.\n"); - /* There's room in the FIFO for a full-sized packet. */ - iowrite16(AckIntr | TxAvailable, ioaddr + EL3_CMD); - netif_wake_queue (dev); - } - - if (status & DMADone) { - if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) { - iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */ - pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE); - dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */ - if (ioread16(ioaddr + TxFree) > 1536) { - /* - * AKPM: FIXME: I don't think we need this. If the queue was stopped due to - * insufficient FIFO room, the TxAvailable test will succeed and call - * netif_wake_queue() - */ - netif_wake_queue(dev); - } else { /* Interrupt when FIFO has room for max-sized packet. */ - iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD); - netif_stop_queue(dev); - } - } - } - /* Check for all uncommon interrupts at once. */ - if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) { - if (status == 0xffff) - break; - if (status & RxEarly) - vortex_rx(dev); - spin_unlock(&vp->window_lock); - vortex_error(dev, status); - spin_lock(&vp->window_lock); - window_set(vp, 7); - } - - if (--work_done < 0) { - pr_warning("%s: Too much work in interrupt, status %4.4x.\n", - dev->name, status); - /* Disable all pending interrupts. */ - do { - vp->deferred |= status; - iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable), - ioaddr + EL3_CMD); - iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD); - } while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch); - /* The timer will reenable interrupts. */ - mod_timer(&vp->timer, jiffies + 1*HZ); - break; - } - /* Acknowledge the IRQ. */ - iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); - } while ((status = ioread16(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete)); - - spin_unlock(&vp->window_lock); - - if (vortex_debug > 4) - pr_debug("%s: exiting interrupt, status %4.4x.\n", - dev->name, status); -handler_exit: - spin_unlock(&vp->lock); - return IRQ_RETVAL(handled); -} - -/* - * This is the ISR for the boomerang series chips. - * full_bus_master_tx == 1 && full_bus_master_rx == 1 - */ - -static irqreturn_t -boomerang_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct vortex_private *vp = netdev_priv(dev); - void __iomem *ioaddr; - int status; - int work_done = max_interrupt_work; - - ioaddr = vp->ioaddr; - - - /* - * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout - * and boomerang_start_xmit - */ - spin_lock(&vp->lock); - vp->handling_irq = 1; - - status = ioread16(ioaddr + EL3_STATUS); - - if (vortex_debug > 6) - pr_debug("boomerang_interrupt. status=0x%4x\n", status); - - if ((status & IntLatch) == 0) - goto handler_exit; /* No interrupt: shared IRQs can cause this */ - - if (status == 0xffff) { /* h/w no longer present (hotplug)? */ - if (vortex_debug > 1) - pr_debug("boomerang_interrupt(1): status = 0xffff\n"); - goto handler_exit; - } - - if (status & IntReq) { - status |= vp->deferred; - vp->deferred = 0; - } - - if (vortex_debug > 4) - pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n", - dev->name, status, ioread8(ioaddr + Timer)); - do { - if (vortex_debug > 5) - pr_debug("%s: In interrupt loop, status %4.4x.\n", - dev->name, status); - if (status & UpComplete) { - iowrite16(AckIntr | UpComplete, ioaddr + EL3_CMD); - if (vortex_debug > 5) - pr_debug("boomerang_interrupt->boomerang_rx\n"); - boomerang_rx(dev); - } - - if (status & DownComplete) { - unsigned int dirty_tx = vp->dirty_tx; - - iowrite16(AckIntr | DownComplete, ioaddr + EL3_CMD); - while (vp->cur_tx - dirty_tx > 0) { - int entry = dirty_tx % TX_RING_SIZE; -#if 1 /* AKPM: the latter is faster, but cyclone-only */ - if (ioread32(ioaddr + DownListPtr) == - vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc)) - break; /* It still hasn't been processed. */ -#else - if ((vp->tx_ring[entry].status & DN_COMPLETE) == 0) - break; /* It still hasn't been processed. */ -#endif - - if (vp->tx_skbuff[entry]) { - struct sk_buff *skb = vp->tx_skbuff[entry]; -#if DO_ZEROCOPY - int i; - for (i=0; i<=skb_shinfo(skb)->nr_frags; i++) - pci_unmap_single(VORTEX_PCI(vp), - le32_to_cpu(vp->tx_ring[entry].frag[i].addr), - le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF, - PCI_DMA_TODEVICE); -#else - pci_unmap_single(VORTEX_PCI(vp), - le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE); -#endif - dev_kfree_skb_irq(skb); - vp->tx_skbuff[entry] = NULL; - } else { - pr_debug("boomerang_interrupt: no skb!\n"); - } - /* dev->stats.tx_packets++; Counted below. */ - dirty_tx++; - } - vp->dirty_tx = dirty_tx; - if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) { - if (vortex_debug > 6) - pr_debug("boomerang_interrupt: wake queue\n"); - netif_wake_queue (dev); - } - } - - /* Check for all uncommon interrupts at once. */ - if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) - vortex_error(dev, status); - - if (--work_done < 0) { - pr_warning("%s: Too much work in interrupt, status %4.4x.\n", - dev->name, status); - /* Disable all pending interrupts. */ - do { - vp->deferred |= status; - iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable), - ioaddr + EL3_CMD); - iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD); - } while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch); - /* The timer will reenable interrupts. */ - mod_timer(&vp->timer, jiffies + 1*HZ); - break; - } - /* Acknowledge the IRQ. */ - iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); - if (vp->cb_fn_base) /* The PCMCIA people are idiots. */ - iowrite32(0x8000, vp->cb_fn_base + 4); - - } while ((status = ioread16(ioaddr + EL3_STATUS)) & IntLatch); - - if (vortex_debug > 4) - pr_debug("%s: exiting interrupt, status %4.4x.\n", - dev->name, status); -handler_exit: - vp->handling_irq = 0; - spin_unlock(&vp->lock); - return IRQ_HANDLED; -} - -static int vortex_rx(struct net_device *dev) -{ - struct vortex_private *vp = netdev_priv(dev); - void __iomem *ioaddr = vp->ioaddr; - int i; - short rx_status; - - if (vortex_debug > 5) - pr_debug("vortex_rx(): status %4.4x, rx_status %4.4x.\n", - ioread16(ioaddr+EL3_STATUS), ioread16(ioaddr+RxStatus)); - while ((rx_status = ioread16(ioaddr + RxStatus)) > 0) { - if (rx_status & 0x4000) { /* Error, update stats. */ - unsigned char rx_error = ioread8(ioaddr + RxErrors); - if (vortex_debug > 2) - pr_debug(" Rx error: status %2.2x.\n", rx_error); - dev->stats.rx_errors++; - if (rx_error & 0x01) dev->stats.rx_over_errors++; - if (rx_error & 0x02) dev->stats.rx_length_errors++; - if (rx_error & 0x04) dev->stats.rx_frame_errors++; - if (rx_error & 0x08) dev->stats.rx_crc_errors++; - if (rx_error & 0x10) dev->stats.rx_length_errors++; - } else { - /* The packet length: up to 4.5K!. */ - int pkt_len = rx_status & 0x1fff; - struct sk_buff *skb; - - skb = dev_alloc_skb(pkt_len + 5); - if (vortex_debug > 4) - pr_debug("Receiving packet size %d status %4.4x.\n", - pkt_len, rx_status); - if (skb != NULL) { - skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ - /* 'skb_put()' points to the start of sk_buff data area. */ - if (vp->bus_master && - ! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) { - dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len), - pkt_len, PCI_DMA_FROMDEVICE); - iowrite32(dma, ioaddr + Wn7_MasterAddr); - iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen); - iowrite16(StartDMAUp, ioaddr + EL3_CMD); - while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000) - ; - pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE); - } else { - ioread32_rep(ioaddr + RX_FIFO, - skb_put(skb, pkt_len), - (pkt_len + 3) >> 2); - } - iowrite16(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */ - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); - dev->stats.rx_packets++; - /* Wait a limited time to go to next packet. */ - for (i = 200; i >= 0; i--) - if ( ! (ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) - break; - continue; - } else if (vortex_debug > 0) - pr_notice("%s: No memory to allocate a sk_buff of size %d.\n", - dev->name, pkt_len); - dev->stats.rx_dropped++; - } - issue_and_wait(dev, RxDiscard); - } - - return 0; -} - -static int -boomerang_rx(struct net_device *dev) -{ - struct vortex_private *vp = netdev_priv(dev); - int entry = vp->cur_rx % RX_RING_SIZE; - void __iomem *ioaddr = vp->ioaddr; - int rx_status; - int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx; - - if (vortex_debug > 5) - pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS)); - - while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){ - if (--rx_work_limit < 0) - break; - if (rx_status & RxDError) { /* Error, update stats. */ - unsigned char rx_error = rx_status >> 16; - if (vortex_debug > 2) - pr_debug(" Rx error: status %2.2x.\n", rx_error); - dev->stats.rx_errors++; - if (rx_error & 0x01) dev->stats.rx_over_errors++; - if (rx_error & 0x02) dev->stats.rx_length_errors++; - if (rx_error & 0x04) dev->stats.rx_frame_errors++; - if (rx_error & 0x08) dev->stats.rx_crc_errors++; - if (rx_error & 0x10) dev->stats.rx_length_errors++; - } else { - /* The packet length: up to 4.5K!. */ - int pkt_len = rx_status & 0x1fff; - struct sk_buff *skb; - dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr); - - if (vortex_debug > 4) - pr_debug("Receiving packet size %d status %4.4x.\n", - pkt_len, rx_status); - - /* Check if the packet is long enough to just accept without - copying to a properly sized skbuff. */ - if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { - skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ - pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); - /* 'skb_put()' points to the start of sk_buff data area. */ - memcpy(skb_put(skb, pkt_len), - vp->rx_skbuff[entry]->data, - pkt_len); - pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); - vp->rx_copy++; - } else { - /* Pass up the skbuff already on the Rx ring. */ - skb = vp->rx_skbuff[entry]; - vp->rx_skbuff[entry] = NULL; - skb_put(skb, pkt_len); - pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); - vp->rx_nocopy++; - } - skb->protocol = eth_type_trans(skb, dev); - { /* Use hardware checksum info. */ - int csum_bits = rx_status & 0xee000000; - if (csum_bits && - (csum_bits == (IPChksumValid | TCPChksumValid) || - csum_bits == (IPChksumValid | UDPChksumValid))) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - vp->rx_csumhits++; - } - } - netif_rx(skb); - dev->stats.rx_packets++; - } - entry = (++vp->cur_rx) % RX_RING_SIZE; - } - /* Refill the Rx ring buffers. */ - for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) { - struct sk_buff *skb; - entry = vp->dirty_rx % RX_RING_SIZE; - if (vp->rx_skbuff[entry] == NULL) { - skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ); - if (skb == NULL) { - static unsigned long last_jif; - if (time_after(jiffies, last_jif + 10 * HZ)) { - pr_warning("%s: memory shortage\n", dev->name); - last_jif = jiffies; - } - if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) - mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1)); - break; /* Bad news! */ - } - - vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); - vp->rx_skbuff[entry] = skb; - } - vp->rx_ring[entry].status = 0; /* Clear complete bit. */ - iowrite16(UpUnstall, ioaddr + EL3_CMD); - } - return 0; -} - -/* - * If we've hit a total OOM refilling the Rx ring we poll once a second - * for some memory. Otherwise there is no way to restart the rx process. - */ -static void -rx_oom_timer(unsigned long arg) -{ - struct net_device *dev = (struct net_device *)arg; - struct vortex_private *vp = netdev_priv(dev); - - spin_lock_irq(&vp->lock); - if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */ - boomerang_rx(dev); - if (vortex_debug > 1) { - pr_debug("%s: rx_oom_timer %s\n", dev->name, - ((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying"); - } - spin_unlock_irq(&vp->lock); -} - -static void -vortex_down(struct net_device *dev, int final_down) -{ - struct vortex_private *vp = netdev_priv(dev); - void __iomem *ioaddr = vp->ioaddr; - - netif_stop_queue (dev); - - del_timer_sync(&vp->rx_oom_timer); - del_timer_sync(&vp->timer); - - /* Turn off statistics ASAP. We update dev->stats below. */ - iowrite16(StatsDisable, ioaddr + EL3_CMD); - - /* Disable the receiver and transmitter. */ - iowrite16(RxDisable, ioaddr + EL3_CMD); - iowrite16(TxDisable, ioaddr + EL3_CMD); - - /* Disable receiving 802.1q tagged frames */ - set_8021q_mode(dev, 0); - - if (dev->if_port == XCVR_10base2) - /* Turn off thinnet power. Green! */ - iowrite16(StopCoax, ioaddr + EL3_CMD); - - iowrite16(SetIntrEnb | 0x0000, ioaddr + EL3_CMD); - - update_stats(ioaddr, dev); - if (vp->full_bus_master_rx) - iowrite32(0, ioaddr + UpListPtr); - if (vp->full_bus_master_tx) - iowrite32(0, ioaddr + DownListPtr); - - if (final_down && VORTEX_PCI(vp)) { - vp->pm_state_valid = 1; - pci_save_state(VORTEX_PCI(vp)); - acpi_set_WOL(dev); - } -} - -static int -vortex_close(struct net_device *dev) -{ - struct vortex_private *vp = netdev_priv(dev); - void __iomem *ioaddr = vp->ioaddr; - int i; - - if (netif_device_present(dev)) - vortex_down(dev, 1); - - if (vortex_debug > 1) { - pr_debug("%s: vortex_close() status %4.4x, Tx status %2.2x.\n", - dev->name, ioread16(ioaddr + EL3_STATUS), ioread8(ioaddr + TxStatus)); - pr_debug("%s: vortex close stats: rx_nocopy %d rx_copy %d" - " tx_queued %d Rx pre-checksummed %d.\n", - dev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits); - } - -#if DO_ZEROCOPY - if (vp->rx_csumhits && - (vp->drv_flags & HAS_HWCKSM) == 0 && - (vp->card_idx >= MAX_UNITS || hw_checksums[vp->card_idx] == -1)) { - pr_warning("%s supports hardware checksums, and we're not using them!\n", dev->name); - } -#endif - - free_irq(dev->irq, dev); - - if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */ - for (i = 0; i < RX_RING_SIZE; i++) - if (vp->rx_skbuff[i]) { - pci_unmap_single( VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr), - PKT_BUF_SZ, PCI_DMA_FROMDEVICE); - dev_kfree_skb(vp->rx_skbuff[i]); - vp->rx_skbuff[i] = NULL; - } - } - if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */ - for (i = 0; i < TX_RING_SIZE; i++) { - if (vp->tx_skbuff[i]) { - struct sk_buff *skb = vp->tx_skbuff[i]; -#if DO_ZEROCOPY - int k; - - for (k=0; k<=skb_shinfo(skb)->nr_frags; k++) - pci_unmap_single(VORTEX_PCI(vp), - le32_to_cpu(vp->tx_ring[i].frag[k].addr), - le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF, - PCI_DMA_TODEVICE); -#else - pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE); -#endif - dev_kfree_skb(skb); - vp->tx_skbuff[i] = NULL; - } - } - } - - return 0; -} - -static void -dump_tx_ring(struct net_device *dev) -{ - if (vortex_debug > 0) { - struct vortex_private *vp = netdev_priv(dev); - void __iomem *ioaddr = vp->ioaddr; - - if (vp->full_bus_master_tx) { - int i; - int stalled = ioread32(ioaddr + PktStatus) & 0x04; /* Possible racy. But it's only debug stuff */ - - pr_err(" Flags; bus-master %d, dirty %d(%d) current %d(%d)\n", - vp->full_bus_master_tx, - vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE, - vp->cur_tx, vp->cur_tx % TX_RING_SIZE); - pr_err(" Transmit list %8.8x vs. %p.\n", - ioread32(ioaddr + DownListPtr), - &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]); - issue_and_wait(dev, DownStall); - for (i = 0; i < TX_RING_SIZE; i++) { - unsigned int length; - -#if DO_ZEROCOPY - length = le32_to_cpu(vp->tx_ring[i].frag[0].length); -#else - length = le32_to_cpu(vp->tx_ring[i].length); -#endif - pr_err(" %d: @%p length %8.8x status %8.8x\n", - i, &vp->tx_ring[i], length, - le32_to_cpu(vp->tx_ring[i].status)); - } - if (!stalled) - iowrite16(DownUnstall, ioaddr + EL3_CMD); - } - } -} - -static struct net_device_stats *vortex_get_stats(struct net_device *dev) -{ - struct vortex_private *vp = netdev_priv(dev); - void __iomem *ioaddr = vp->ioaddr; - unsigned long flags; - - if (netif_device_present(dev)) { /* AKPM: Used to be netif_running */ - spin_lock_irqsave (&vp->lock, flags); - update_stats(ioaddr, dev); - spin_unlock_irqrestore (&vp->lock, flags); - } - return &dev->stats; -} - -/* Update statistics. - Unlike with the EL3 we need not worry about interrupts changing - the window setting from underneath us, but we must still guard - against a race condition with a StatsUpdate interrupt updating the - table. This is done by checking that the ASM (!) code generated uses - atomic updates with '+='. - */ -static void update_stats(void __iomem *ioaddr, struct net_device *dev) -{ - struct vortex_private *vp = netdev_priv(dev); - - /* Unlike the 3c5x9 we need not turn off stats updates while reading. */ - /* Switch to the stats window, and read everything. */ - dev->stats.tx_carrier_errors += window_read8(vp, 6, 0); - dev->stats.tx_heartbeat_errors += window_read8(vp, 6, 1); - dev->stats.tx_window_errors += window_read8(vp, 6, 4); - dev->stats.rx_fifo_errors += window_read8(vp, 6, 5); - dev->stats.tx_packets += window_read8(vp, 6, 6); - dev->stats.tx_packets += (window_read8(vp, 6, 9) & - 0x30) << 4; - /* Rx packets */ window_read8(vp, 6, 7); /* Must read to clear */ - /* Don't bother with register 9, an extension of registers 6&7. - If we do use the 6&7 values the atomic update assumption above - is invalid. */ - dev->stats.rx_bytes += window_read16(vp, 6, 10); - dev->stats.tx_bytes += window_read16(vp, 6, 12); - /* Extra stats for get_ethtool_stats() */ - vp->xstats.tx_multiple_collisions += window_read8(vp, 6, 2); - vp->xstats.tx_single_collisions += window_read8(vp, 6, 3); - vp->xstats.tx_deferred += window_read8(vp, 6, 8); - vp->xstats.rx_bad_ssd += window_read8(vp, 4, 12); - - dev->stats.collisions = vp->xstats.tx_multiple_collisions - + vp->xstats.tx_single_collisions - + vp->xstats.tx_max_collisions; - - { - u8 up = window_read8(vp, 4, 13); - dev->stats.rx_bytes += (up & 0x0f) << 16; - dev->stats.tx_bytes += (up & 0xf0) << 12; - } -} - -static int vortex_nway_reset(struct net_device *dev) -{ - struct vortex_private *vp = netdev_priv(dev); - - return mii_nway_restart(&vp->mii); -} - -static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct vortex_private *vp = netdev_priv(dev); - - return mii_ethtool_gset(&vp->mii, cmd); -} - -static int vortex_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct vortex_private *vp = netdev_priv(dev); - - return mii_ethtool_sset(&vp->mii, cmd); -} - -static u32 vortex_get_msglevel(struct net_device *dev) -{ - return vortex_debug; -} - -static void vortex_set_msglevel(struct net_device *dev, u32 dbg) -{ - vortex_debug = dbg; -} - -static int vortex_get_sset_count(struct net_device *dev, int sset) -{ - switch (sset) { - case ETH_SS_STATS: - return VORTEX_NUM_STATS; - default: - return -EOPNOTSUPP; - } -} - -static void vortex_get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 *data) -{ - struct vortex_private *vp = netdev_priv(dev); - void __iomem *ioaddr = vp->ioaddr; - unsigned long flags; - - spin_lock_irqsave(&vp->lock, flags); - update_stats(ioaddr, dev); - spin_unlock_irqrestore(&vp->lock, flags); - - data[0] = vp->xstats.tx_deferred; - data[1] = vp->xstats.tx_max_collisions; - data[2] = vp->xstats.tx_multiple_collisions; - data[3] = vp->xstats.tx_single_collisions; - data[4] = vp->xstats.rx_bad_ssd; -} - - -static void vortex_get_strings(struct net_device *dev, u32 stringset, u8 *data) -{ - switch (stringset) { - case ETH_SS_STATS: - memcpy(data, ðtool_stats_keys, sizeof(ethtool_stats_keys)); - break; - default: - WARN_ON(1); - break; - } -} - -static void vortex_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - struct vortex_private *vp = netdev_priv(dev); - - strcpy(info->driver, DRV_NAME); - if (VORTEX_PCI(vp)) { - strcpy(info->bus_info, pci_name(VORTEX_PCI(vp))); - } else { - if (VORTEX_EISA(vp)) - strcpy(info->bus_info, dev_name(vp->gendev)); - else - sprintf(info->bus_info, "EISA 0x%lx %d", - dev->base_addr, dev->irq); - } -} - -static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - struct vortex_private *vp = netdev_priv(dev); - - if (!VORTEX_PCI(vp)) - return; - - wol->supported = WAKE_MAGIC; - - wol->wolopts = 0; - if (vp->enable_wol) - wol->wolopts |= WAKE_MAGIC; -} - -static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - struct vortex_private *vp = netdev_priv(dev); - - if (!VORTEX_PCI(vp)) - return -EOPNOTSUPP; - - if (wol->wolopts & ~WAKE_MAGIC) - return -EINVAL; - - if (wol->wolopts & WAKE_MAGIC) - vp->enable_wol = 1; - else - vp->enable_wol = 0; - acpi_set_WOL(dev); - - return 0; -} - -static const struct ethtool_ops vortex_ethtool_ops = { - .get_drvinfo = vortex_get_drvinfo, - .get_strings = vortex_get_strings, - .get_msglevel = vortex_get_msglevel, - .set_msglevel = vortex_set_msglevel, - .get_ethtool_stats = vortex_get_ethtool_stats, - .get_sset_count = vortex_get_sset_count, - .get_settings = vortex_get_settings, - .set_settings = vortex_set_settings, - .get_link = ethtool_op_get_link, - .nway_reset = vortex_nway_reset, - .get_wol = vortex_get_wol, - .set_wol = vortex_set_wol, -}; - -#ifdef CONFIG_PCI -/* - * Must power the device up to do MDIO operations - */ -static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -{ - int err; - struct vortex_private *vp = netdev_priv(dev); - pci_power_t state = 0; - - if(VORTEX_PCI(vp)) - state = VORTEX_PCI(vp)->current_state; - - /* The kernel core really should have pci_get_power_state() */ - - if(state != 0) - pci_set_power_state(VORTEX_PCI(vp), PCI_D0); - err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL); - if(state != 0) - pci_set_power_state(VORTEX_PCI(vp), state); - - return err; -} -#endif - - -/* Pre-Cyclone chips have no documented multicast filter, so the only - multicast setting is to receive all multicast frames. At least - the chip has a very clean way to set the mode, unlike many others. */ -static void set_rx_mode(struct net_device *dev) -{ - struct vortex_private *vp = netdev_priv(dev); - void __iomem *ioaddr = vp->ioaddr; - int new_mode; - - if (dev->flags & IFF_PROMISC) { - if (vortex_debug > 3) - pr_notice("%s: Setting promiscuous mode.\n", dev->name); - new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm; - } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) { - new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast; - } else - new_mode = SetRxFilter | RxStation | RxBroadcast; - - iowrite16(new_mode, ioaddr + EL3_CMD); -} - -#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) -/* Setup the card so that it can receive frames with an 802.1q VLAN tag. - Note that this must be done after each RxReset due to some backwards - compatibility logic in the Cyclone and Tornado ASICs */ - -/* The Ethernet Type used for 802.1q tagged frames */ -#define VLAN_ETHER_TYPE 0x8100 - -static void set_8021q_mode(struct net_device *dev, int enable) -{ - struct vortex_private *vp = netdev_priv(dev); - int mac_ctrl; - - if ((vp->drv_flags&IS_CYCLONE) || (vp->drv_flags&IS_TORNADO)) { - /* cyclone and tornado chipsets can recognize 802.1q - * tagged frames and treat them correctly */ - - int max_pkt_size = dev->mtu+14; /* MTU+Ethernet header */ - if (enable) - max_pkt_size += 4; /* 802.1Q VLAN tag */ - - window_write16(vp, max_pkt_size, 3, Wn3_MaxPktSize); - - /* set VlanEtherType to let the hardware checksumming - treat tagged frames correctly */ - window_write16(vp, VLAN_ETHER_TYPE, 7, Wn7_VlanEtherType); - } else { - /* on older cards we have to enable large frames */ - - vp->large_frames = dev->mtu > 1500 || enable; - - mac_ctrl = window_read16(vp, 3, Wn3_MAC_Ctrl); - if (vp->large_frames) - mac_ctrl |= 0x40; - else - mac_ctrl &= ~0x40; - window_write16(vp, mac_ctrl, 3, Wn3_MAC_Ctrl); - } -} -#else - -static void set_8021q_mode(struct net_device *dev, int enable) -{ -} - - -#endif - -/* MII transceiver control section. - Read and write the MII registers using software-generated serial - MDIO protocol. See the MII specifications or DP83840A data sheet - for details. */ - -/* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually - met by back-to-back PCI I/O cycles, but we insert a delay to avoid - "overclocking" issues. */ -static void mdio_delay(struct vortex_private *vp) -{ - window_read32(vp, 4, Wn4_PhysicalMgmt); -} - -#define MDIO_SHIFT_CLK 0x01 -#define MDIO_DIR_WRITE 0x04 -#define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE) -#define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE) -#define MDIO_DATA_READ 0x02 -#define MDIO_ENB_IN 0x00 - -/* Generate the preamble required for initial synchronization and - a few older transceivers. */ -static void mdio_sync(struct vortex_private *vp, int bits) -{ - /* Establish sync by sending at least 32 logic ones. */ - while (-- bits >= 0) { - window_write16(vp, MDIO_DATA_WRITE1, 4, Wn4_PhysicalMgmt); - mdio_delay(vp); - window_write16(vp, MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, - 4, Wn4_PhysicalMgmt); - mdio_delay(vp); - } -} - -static int mdio_read(struct net_device *dev, int phy_id, int location) -{ - int i; - struct vortex_private *vp = netdev_priv(dev); - int read_cmd = (0xf6 << 10) | (phy_id << 5) | location; - unsigned int retval = 0; - - spin_lock_bh(&vp->mii_lock); - - if (mii_preamble_required) - mdio_sync(vp, 32); - - /* Shift the read command bits out. */ - for (i = 14; i >= 0; i--) { - int dataval = (read_cmd&(1< 0; i--) { - window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt); - mdio_delay(vp); - retval = (retval << 1) | - ((window_read16(vp, 4, Wn4_PhysicalMgmt) & - MDIO_DATA_READ) ? 1 : 0); - window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK, - 4, Wn4_PhysicalMgmt); - mdio_delay(vp); - } - - spin_unlock_bh(&vp->mii_lock); - - return retval & 0x20000 ? 0xffff : retval>>1 & 0xffff; -} - -static void mdio_write(struct net_device *dev, int phy_id, int location, int value) -{ - struct vortex_private *vp = netdev_priv(dev); - int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value; - int i; - - spin_lock_bh(&vp->mii_lock); - - if (mii_preamble_required) - mdio_sync(vp, 32); - - /* Shift the command bits out. */ - for (i = 31; i >= 0; i--) { - int dataval = (write_cmd&(1<= 0; i--) { - window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt); - mdio_delay(vp); - window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK, - 4, Wn4_PhysicalMgmt); - mdio_delay(vp); - } - - spin_unlock_bh(&vp->mii_lock); -} - -/* ACPI: Advanced Configuration and Power Interface. */ -/* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */ -static void acpi_set_WOL(struct net_device *dev) -{ - struct vortex_private *vp = netdev_priv(dev); - void __iomem *ioaddr = vp->ioaddr; - - device_set_wakeup_enable(vp->gendev, vp->enable_wol); - - if (vp->enable_wol) { - /* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */ - window_write16(vp, 2, 7, 0x0c); - /* The RxFilter must accept the WOL frames. */ - iowrite16(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD); - iowrite16(RxEnable, ioaddr + EL3_CMD); - - if (pci_enable_wake(VORTEX_PCI(vp), PCI_D3hot, 1)) { - pr_info("%s: WOL not supported.\n", pci_name(VORTEX_PCI(vp))); - - vp->enable_wol = 0; - return; - } - - if (VORTEX_PCI(vp)->current_state < PCI_D3hot) - return; - - /* Change the power state to D3; RxEnable doesn't take effect. */ - pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot); - } -} - - -static void __devexit vortex_remove_one(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct vortex_private *vp; - - if (!dev) { - pr_err("vortex_remove_one called for Compaq device!\n"); - BUG(); - } - - vp = netdev_priv(dev); - - if (vp->cb_fn_base) - pci_iounmap(VORTEX_PCI(vp), vp->cb_fn_base); - - unregister_netdev(dev); - - if (VORTEX_PCI(vp)) { - pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */ - if (vp->pm_state_valid) - pci_restore_state(VORTEX_PCI(vp)); - pci_disable_device(VORTEX_PCI(vp)); - } - /* Should really use issue_and_wait() here */ - iowrite16(TotalReset | ((vp->drv_flags & EEPROM_RESET) ? 0x04 : 0x14), - vp->ioaddr + EL3_CMD); - - pci_iounmap(VORTEX_PCI(vp), vp->ioaddr); - - pci_free_consistent(pdev, - sizeof(struct boom_rx_desc) * RX_RING_SIZE - + sizeof(struct boom_tx_desc) * TX_RING_SIZE, - vp->rx_ring, - vp->rx_ring_dma); - if (vp->must_free_region) - release_region(dev->base_addr, vp->io_size); - free_netdev(dev); -} - - -static struct pci_driver vortex_driver = { - .name = "3c59x", - .probe = vortex_init_one, - .remove = __devexit_p(vortex_remove_one), - .id_table = vortex_pci_tbl, - .driver.pm = VORTEX_PM_OPS, -}; - - -static int vortex_have_pci; -static int vortex_have_eisa; - - -static int __init vortex_init(void) -{ - int pci_rc, eisa_rc; - - pci_rc = pci_register_driver(&vortex_driver); - eisa_rc = vortex_eisa_init(); - - if (pci_rc == 0) - vortex_have_pci = 1; - if (eisa_rc > 0) - vortex_have_eisa = 1; - - return (vortex_have_pci + vortex_have_eisa) ? 0 : -ENODEV; -} - - -static void __exit vortex_eisa_cleanup(void) -{ - struct vortex_private *vp; - void __iomem *ioaddr; - -#ifdef CONFIG_EISA - /* Take care of the EISA devices */ - eisa_driver_unregister(&vortex_eisa_driver); -#endif - - if (compaq_net_device) { - vp = netdev_priv(compaq_net_device); - ioaddr = ioport_map(compaq_net_device->base_addr, - VORTEX_TOTAL_SIZE); - - unregister_netdev(compaq_net_device); - iowrite16(TotalReset, ioaddr + EL3_CMD); - release_region(compaq_net_device->base_addr, - VORTEX_TOTAL_SIZE); - - free_netdev(compaq_net_device); - } -} - - -static void __exit vortex_cleanup(void) -{ - if (vortex_have_pci) - pci_unregister_driver(&vortex_driver); - if (vortex_have_eisa) - vortex_eisa_cleanup(); -} - - -module_init(vortex_init); -module_exit(vortex_cleanup); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 5b95796..0b3ea21 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -616,38 +616,11 @@ config SUNVNET help Support for virtual network devices under Sun Logical Domains. -config NET_VENDOR_3COM - bool "3COM cards" - depends on ISA || EISA || MCA || PCI - help - If you have a network (Ethernet) card belonging to this class, say Y - and read the Ethernet-HOWTO, available from - . - - Note that the answer to this question doesn't directly affect the - kernel: saying N will just cause the configurator to skip all - the questions about 3COM cards. If you say Y, you will be asked for - your specific card in the following questions. - -config EL1 - tristate "3c501 \"EtherLink\" support" - depends on NET_VENDOR_3COM && ISA - ---help--- - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - . Also, consider buying a - new card, since the 3c501 is slow, broken, and obsolete: you will - have problems. Some people suggest to ping ("man ping") a nearby - machine every minute ("man cron") when using this card. - - To compile this driver as a module, choose M here. The module - will be called 3c501. - config EL2 tristate "3c503 \"EtherLink II\" support" - depends on NET_VENDOR_3COM && ISA + depends on ISA select CRC32 - help + ---help--- If you have a network (Ethernet) card of this type, say Y and read the Ethernet-HOWTO, available from . @@ -657,7 +630,7 @@ config EL2 config ELPLUS tristate "3c505 \"EtherLink Plus\" support" - depends on NET_VENDOR_3COM && ISA && ISA_DMA_API + depends on ISA && ISA_DMA_API ---help--- Information about this network (Ethernet) card can be found in . If you have a card of @@ -669,8 +642,8 @@ config ELPLUS config EL16 tristate "3c507 \"EtherLink 16\" support (EXPERIMENTAL)" - depends on NET_VENDOR_3COM && ISA && EXPERIMENTAL - help + depends on ISA && EXPERIMENTAL + ---help--- If you have a network (Ethernet) card of this type, say Y and read the Ethernet-HOWTO, available from . @@ -678,36 +651,10 @@ config EL16 To compile this driver as a module, choose M here. The module will be called 3c507. -config EL3 - tristate "3c509/3c529 (MCA)/3c579 \"EtherLink III\" support" - depends on NET_VENDOR_3COM && (ISA || EISA || MCA) - ---help--- - If you have a network (Ethernet) card belonging to the 3Com - EtherLinkIII series, say Y and read the Ethernet-HOWTO, available - from . - - If your card is not working you may need to use the DOS - setup disk to disable Plug & Play mode, and to select the default - media type. - - To compile this driver as a module, choose M here. The module - will be called 3c509. - -config 3C515 - tristate "3c515 ISA \"Fast EtherLink\"" - depends on NET_VENDOR_3COM && (ISA || EISA) && ISA_DMA_API - help - If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet - network card, say Y and read the Ethernet-HOWTO, available from - . - - To compile this driver as a module, choose M here. The module - will be called 3c515. - config ELMC tristate "3c523 \"EtherLink/MC\" support" - depends on NET_VENDOR_3COM && MCA_LEGACY - help + depends on MCA_LEGACY + ---help--- If you have a network (Ethernet) card of this type, say Y and read the Ethernet-HOWTO, available from . @@ -717,54 +664,14 @@ config ELMC config ELMC_II tristate "3c527 \"EtherLink/MC 32\" support (EXPERIMENTAL)" - depends on NET_VENDOR_3COM && MCA && MCA_LEGACY - help - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - . - - To compile this driver as a module, choose M here. The module - will be called 3c527. - -config VORTEX - tristate "3c590/3c900 series (592/595/597) \"Vortex/Boomerang\" support" - depends on NET_VENDOR_3COM && (PCI || EISA) - select MII - ---help--- - This option enables driver support for a large number of 10Mbps and - 10/100Mbps EISA, PCI and PCMCIA 3Com network cards: - - "Vortex" (Fast EtherLink 3c590/3c592/3c595/3c597) EISA and PCI - "Boomerang" (EtherLink XL 3c900 or 3c905) PCI - "Cyclone" (3c540/3c900/3c905/3c980/3c575/3c656) PCI and Cardbus - "Tornado" (3c905) PCI - "Hurricane" (3c555/3cSOHO) PCI - - If you have such a card, say Y and read the Ethernet-HOWTO, - available from . More - specific information is in - and in the comments at - the beginning of . - - To compile this support as a module, choose M here. - -config TYPHOON - tristate "3cr990 series \"Typhoon\" support" - depends on NET_VENDOR_3COM && PCI - select CRC32 + depends on MCA && MCA_LEGACY ---help--- - This option enables driver support for the 3cr990 series of cards: - - 3C990-TX, 3CR990-TX-95, 3CR990-TX-97, 3CR990-FX-95, 3CR990-FX-97, - 3CR990SVR, 3CR990SVR95, 3CR990SVR97, 3CR990-FX-95 Server, - 3CR990-FX-97 Server, 3C990B-TX-M, 3C990BSVR - If you have a network (Ethernet) card of this type, say Y and read the Ethernet-HOWTO, available from . To compile this driver as a module, choose M here. The module - will be called typhoon. + will be called 3c527. config LANCE tristate "AMD LANCE and PCnet (AT1500 and NE2100) support" @@ -2046,33 +1953,6 @@ menuconfig NETDEV_1000 if NETDEV_1000 -config ACENIC - tristate "Alteon AceNIC/3Com 3C985/NetGear GA620 Gigabit support" - depends on PCI - ---help--- - Say Y here if you have an Alteon AceNIC, 3Com 3C985(B), NetGear - GA620, SGI Gigabit or Farallon PN9000-SX PCI Gigabit Ethernet - adapter. The driver allows for using the Jumbo Frame option (9000 - bytes/frame) however it requires that your switches can handle this - as well. To enable Jumbo Frames, add `mtu 9000' to your ifconfig - line. - - To compile this driver as a module, choose M here: the - module will be called acenic. - -config ACENIC_OMIT_TIGON_I - bool "Omit support for old Tigon I based AceNICs" - depends on ACENIC - help - Say Y here if you only have Tigon II based AceNICs and want to leave - out support for the older Tigon I based cards which are no longer - being sold (ie. the original Alteon AceNIC and 3Com 3C985 (non B - version)). This will reduce the size of the driver object by - app. 100KB. If you are not sure whether your card is a Tigon I or a - Tigon II, say N here. - - The safe and default value for this is N. - config DL2K tristate "DL2000/TC902x-based Gigabit Ethernet support" depends on PCI diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 670b514..d4d6744 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -66,8 +66,6 @@ obj-$(CONFIG_SUNVNET) += sunvnet.o obj-$(CONFIG_MACE) += mace.o obj-$(CONFIG_BMAC) += bmac.o -obj-$(CONFIG_VORTEX) += 3c59x.o -obj-$(CONFIG_TYPHOON) += typhoon.o obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o obj-$(CONFIG_PCNET32) += pcnet32.o obj-$(CONFIG_E100) += e100.o @@ -78,7 +76,6 @@ obj-$(CONFIG_SIS190) += sis190.o obj-$(CONFIG_SIS900) += sis900.o obj-$(CONFIG_R6040) += r6040.o obj-$(CONFIG_YELLOWFIN) += yellowfin.o -obj-$(CONFIG_ACENIC) += acenic.o obj-$(CONFIG_ISERIES_VETH) += iseries_veth.o obj-$(CONFIG_NATSEMI) += natsemi.o obj-$(CONFIG_NS83820) += ns83820.o @@ -187,13 +184,10 @@ obj-$(CONFIG_DEFXX) += defxx.o obj-$(CONFIG_SGISEEQ) += sgiseeq.o obj-$(CONFIG_SGI_O2MACE_ETH) += meth.o obj-$(CONFIG_AT1700) += at1700.o -obj-$(CONFIG_EL1) += 3c501.o obj-$(CONFIG_EL16) += 3c507.o obj-$(CONFIG_ELMC) += 3c523.o obj-$(CONFIG_IBMLANA) += ibmlana.o obj-$(CONFIG_ELMC_II) += 3c527.o -obj-$(CONFIG_EL3) += 3c509.o -obj-$(CONFIG_3C515) += 3c515.o obj-$(CONFIG_EEXPRESS) += eexpress.o obj-$(CONFIG_EEXPRESS_PRO) += eepro.o obj-$(CONFIG_8139CP) += 8139cp.o @@ -269,6 +263,7 @@ obj-$(CONFIG_S6GMAC) += s6gmac.o obj-$(CONFIG_ARM) += arm/ obj-$(CONFIG_DEV_APPLETALK) += appletalk/ +obj-$(CONFIG_ETHERNET) += ethernet/ obj-$(CONFIG_TR) += tokenring/ obj-$(CONFIG_WAN) += wan/ obj-$(CONFIG_ARCNET) += arcnet/ diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c deleted file mode 100644 index 31798f5..0000000 --- a/drivers/net/acenic.c +++ /dev/null @@ -1,3206 +0,0 @@ -/* - * acenic.c: Linux driver for the Alteon AceNIC Gigabit Ethernet card - * and other Tigon based cards. - * - * Copyright 1998-2002 by Jes Sorensen, . - * - * Thanks to Alteon and 3Com for providing hardware and documentation - * enabling me to write this driver. - * - * A mailing list for discussing the use of this driver has been - * setup, please subscribe to the lists if you have any questions - * about the driver. Send mail to linux-acenic-help@sunsite.auc.dk to - * see how to subscribe. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * Additional credits: - * Pete Wyckoff : Initial Linux/Alpha and trace - * dump support. The trace dump support has not been - * integrated yet however. - * Troy Benjegerdes: Big Endian (PPC) patches. - * Nate Stahl: Better out of memory handling and stats support. - * Aman Singla: Nasty race between interrupt handler and tx code dealing - * with 'testing the tx_ret_csm and setting tx_full' - * David S. Miller : conversion to new PCI dma mapping - * infrastructure and Sparc support - * Pierrick Pinasseau (CERN): For lending me an Ultra 5 to test the - * driver under Linux/Sparc64 - * Matt Domsch : Detect Alteon 1000baseT cards - * ETHTOOL_GDRVINFO support - * Chip Salzenberg : Fix race condition between tx - * handler and close() cleanup. - * Ken Aaker : Correct check for whether - * memory mapped IO is enabled to - * make the driver work on RS/6000. - * Takayoshi Kouchi : Identifying problem - * where the driver would disable - * bus master mode if it had to disable - * write and invalidate. - * Stephen Hack : Fixed ace_set_mac_addr for little - * endian systems. - * Val Henson : Reset Jumbo skb producer and - * rx producer index when - * flushing the Jumbo ring. - * Hans Grobler : Memory leak fixes in the - * driver init path. - * Grant Grundler : PCI write posting fixes. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef SIOCETHTOOL -#include -#endif - -#include -#include - -#include -#include -#include -#include -#include - - -#define DRV_NAME "acenic" - -#undef INDEX_DEBUG - -#ifdef CONFIG_ACENIC_OMIT_TIGON_I -#define ACE_IS_TIGON_I(ap) 0 -#define ACE_TX_RING_ENTRIES(ap) MAX_TX_RING_ENTRIES -#else -#define ACE_IS_TIGON_I(ap) (ap->version == 1) -#define ACE_TX_RING_ENTRIES(ap) ap->tx_ring_entries -#endif - -#ifndef PCI_VENDOR_ID_ALTEON -#define PCI_VENDOR_ID_ALTEON 0x12ae -#endif -#ifndef PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE -#define PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE 0x0001 -#define PCI_DEVICE_ID_ALTEON_ACENIC_COPPER 0x0002 -#endif -#ifndef PCI_DEVICE_ID_3COM_3C985 -#define PCI_DEVICE_ID_3COM_3C985 0x0001 -#endif -#ifndef PCI_VENDOR_ID_NETGEAR -#define PCI_VENDOR_ID_NETGEAR 0x1385 -#define PCI_DEVICE_ID_NETGEAR_GA620 0x620a -#endif -#ifndef PCI_DEVICE_ID_NETGEAR_GA620T -#define PCI_DEVICE_ID_NETGEAR_GA620T 0x630a -#endif - - -/* - * Farallon used the DEC vendor ID by mistake and they seem not - * to care - stinky! - */ -#ifndef PCI_DEVICE_ID_FARALLON_PN9000SX -#define PCI_DEVICE_ID_FARALLON_PN9000SX 0x1a -#endif -#ifndef PCI_DEVICE_ID_FARALLON_PN9100T -#define PCI_DEVICE_ID_FARALLON_PN9100T 0xfa -#endif -#ifndef PCI_VENDOR_ID_SGI -#define PCI_VENDOR_ID_SGI 0x10a9 -#endif -#ifndef PCI_DEVICE_ID_SGI_ACENIC -#define PCI_DEVICE_ID_SGI_ACENIC 0x0009 -#endif - -static DEFINE_PCI_DEVICE_TABLE(acenic_pci_tbl) = { - { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE, - PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, - { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_COPPER, - PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, - { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C985, - PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, - { PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620, - PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, - { PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620T, - PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, - /* - * Farallon used the DEC vendor ID on their cards incorrectly, - * then later Alteon's ID. - */ - { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_FARALLON_PN9000SX, - PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, - { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_FARALLON_PN9100T, - PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, - { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_ACENIC, - PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, - { } -}; -MODULE_DEVICE_TABLE(pci, acenic_pci_tbl); - -#define ace_sync_irq(irq) synchronize_irq(irq) - -#ifndef offset_in_page -#define offset_in_page(ptr) ((unsigned long)(ptr) & ~PAGE_MASK) -#endif - -#define ACE_MAX_MOD_PARMS 8 -#define BOARD_IDX_STATIC 0 -#define BOARD_IDX_OVERFLOW -1 - -#include "acenic.h" - -/* - * These must be defined before the firmware is included. - */ -#define MAX_TEXT_LEN 96*1024 -#define MAX_RODATA_LEN 8*1024 -#define MAX_DATA_LEN 2*1024 - -#ifndef tigon2FwReleaseLocal -#define tigon2FwReleaseLocal 0 -#endif - -/* - * This driver currently supports Tigon I and Tigon II based cards - * including the Alteon AceNIC, the 3Com 3C985[B] and NetGear - * GA620. The driver should also work on the SGI, DEC and Farallon - * versions of the card, however I have not been able to test that - * myself. - * - * This card is really neat, it supports receive hardware checksumming - * and jumbo frames (up to 9000 bytes) and does a lot of work in the - * firmware. Also the programming interface is quite neat, except for - * the parts dealing with the i2c eeprom on the card ;-) - * - * Using jumbo frames: - * - * To enable jumbo frames, simply specify an mtu between 1500 and 9000 - * bytes to ifconfig. Jumbo frames can be enabled or disabled at any time - * by running `ifconfig eth mtu ' with being the Ethernet - * interface number and being the MTU value. - * - * Module parameters: - * - * When compiled as a loadable module, the driver allows for a number - * of module parameters to be specified. The driver supports the - * following module parameters: - * - * trace= - Firmware trace level. This requires special traced - * firmware to replace the firmware supplied with - * the driver - for debugging purposes only. - * - * link= - Link state. Normally you want to use the default link - * parameters set by the driver. This can be used to - * override these in case your switch doesn't negotiate - * the link properly. Valid values are: - * 0x0001 - Force half duplex link. - * 0x0002 - Do not negotiate line speed with the other end. - * 0x0010 - 10Mbit/sec link. - * 0x0020 - 100Mbit/sec link. - * 0x0040 - 1000Mbit/sec link. - * 0x0100 - Do not negotiate flow control. - * 0x0200 - Enable RX flow control Y - * 0x0400 - Enable TX flow control Y (Tigon II NICs only). - * Default value is 0x0270, ie. enable link+flow - * control negotiation. Negotiating the highest - * possible link speed with RX flow control enabled. - * - * When disabling link speed negotiation, only one link - * speed is allowed to be specified! - * - * tx_coal_tick= - number of coalescing clock ticks (us) allowed - * to wait for more packets to arive before - * interrupting the host, from the time the first - * packet arrives. - * - * rx_coal_tick= - number of coalescing clock ticks (us) allowed - * to wait for more packets to arive in the transmit ring, - * before interrupting the host, after transmitting the - * first packet in the ring. - * - * max_tx_desc= - maximum number of transmit descriptors - * (packets) transmitted before interrupting the host. - * - * max_rx_desc= - maximum number of receive descriptors - * (packets) received before interrupting the host. - * - * tx_ratio= - 7 bit value (0 - 63) specifying the split in 64th - * increments of the NIC's on board memory to be used for - * transmit and receive buffers. For the 1MB NIC app. 800KB - * is available, on the 1/2MB NIC app. 300KB is available. - * 68KB will always be available as a minimum for both - * directions. The default value is a 50/50 split. - * dis_pci_mem_inval= - disable PCI memory write and invalidate - * operations, default (1) is to always disable this as - * that is what Alteon does on NT. I have not been able - * to measure any real performance differences with - * this on my systems. Set =0 if you want to - * enable these operations. - * - * If you use more than one NIC, specify the parameters for the - * individual NICs with a comma, ie. trace=0,0x00001fff,0 you want to - * run tracing on NIC #2 but not on NIC #1 and #3. - * - * TODO: - * - * - Proper multicast support. - * - NIC dump support. - * - More tuning parameters. - * - * The mini ring is not used under Linux and I am not sure it makes sense - * to actually use it. - * - * New interrupt handler strategy: - * - * The old interrupt handler worked using the traditional method of - * replacing an skbuff with a new one when a packet arrives. However - * the rx rings do not need to contain a static number of buffer - * descriptors, thus it makes sense to move the memory allocation out - * of the main interrupt handler and do it in a bottom half handler - * and only allocate new buffers when the number of buffers in the - * ring is below a certain threshold. In order to avoid starving the - * NIC under heavy load it is however necessary to force allocation - * when hitting a minimum threshold. The strategy for alloction is as - * follows: - * - * RX_LOW_BUF_THRES - allocate buffers in the bottom half - * RX_PANIC_LOW_THRES - we are very low on buffers, allocate - * the buffers in the interrupt handler - * RX_RING_THRES - maximum number of buffers in the rx ring - * RX_MINI_THRES - maximum number of buffers in the mini ring - * RX_JUMBO_THRES - maximum number of buffers in the jumbo ring - * - * One advantagous side effect of this allocation approach is that the - * entire rx processing can be done without holding any spin lock - * since the rx rings and registers are totally independent of the tx - * ring and its registers. This of course includes the kmalloc's of - * new skb's. Thus start_xmit can run in parallel with rx processing - * and the memory allocation on SMP systems. - * - * Note that running the skb reallocation in a bottom half opens up - * another can of races which needs to be handled properly. In - * particular it can happen that the interrupt handler tries to run - * the reallocation while the bottom half is either running on another - * CPU or was interrupted on the same CPU. To get around this the - * driver uses bitops to prevent the reallocation routines from being - * reentered. - * - * TX handling can also be done without holding any spin lock, wheee - * this is fun! since tx_ret_csm is only written to by the interrupt - * handler. The case to be aware of is when shutting down the device - * and cleaning up where it is necessary to make sure that - * start_xmit() is not running while this is happening. Well DaveM - * informs me that this case is already protected against ... bye bye - * Mr. Spin Lock, it was nice to know you. - * - * TX interrupts are now partly disabled so the NIC will only generate - * TX interrupts for the number of coal ticks, not for the number of - * TX packets in the queue. This should reduce the number of TX only, - * ie. when no RX processing is done, interrupts seen. - */ - -/* - * Threshold values for RX buffer allocation - the low water marks for - * when to start refilling the rings are set to 75% of the ring - * sizes. It seems to make sense to refill the rings entirely from the - * intrrupt handler once it gets below the panic threshold, that way - * we don't risk that the refilling is moved to another CPU when the - * one running the interrupt handler just got the slab code hot in its - * cache. - */ -#define RX_RING_SIZE 72 -#define RX_MINI_SIZE 64 -#define RX_JUMBO_SIZE 48 - -#define RX_PANIC_STD_THRES 16 -#define RX_PANIC_STD_REFILL (3*RX_PANIC_STD_THRES)/2 -#define RX_LOW_STD_THRES (3*RX_RING_SIZE)/4 -#define RX_PANIC_MINI_THRES 12 -#define RX_PANIC_MINI_REFILL (3*RX_PANIC_MINI_THRES)/2 -#define RX_LOW_MINI_THRES (3*RX_MINI_SIZE)/4 -#define RX_PANIC_JUMBO_THRES 6 -#define RX_PANIC_JUMBO_REFILL (3*RX_PANIC_JUMBO_THRES)/2 -#define RX_LOW_JUMBO_THRES (3*RX_JUMBO_SIZE)/4 - - -/* - * Size of the mini ring entries, basically these just should be big - * enough to take TCP ACKs - */ -#define ACE_MINI_SIZE 100 - -#define ACE_MINI_BUFSIZE ACE_MINI_SIZE -#define ACE_STD_BUFSIZE (ACE_STD_MTU + ETH_HLEN + 4) -#define ACE_JUMBO_BUFSIZE (ACE_JUMBO_MTU + ETH_HLEN + 4) - -/* - * There seems to be a magic difference in the effect between 995 and 996 - * but little difference between 900 and 995 ... no idea why. - * - * There is now a default set of tuning parameters which is set, depending - * on whether or not the user enables Jumbo frames. It's assumed that if - * Jumbo frames are enabled, the user wants optimal tuning for that case. - */ -#define DEF_TX_COAL 400 /* 996 */ -#define DEF_TX_MAX_DESC 60 /* was 40 */ -#define DEF_RX_COAL 120 /* 1000 */ -#define DEF_RX_MAX_DESC 25 -#define DEF_TX_RATIO 21 /* 24 */ - -#define DEF_JUMBO_TX_COAL 20 -#define DEF_JUMBO_TX_MAX_DESC 60 -#define DEF_JUMBO_RX_COAL 30 -#define DEF_JUMBO_RX_MAX_DESC 6 -#define DEF_JUMBO_TX_RATIO 21 - -#if tigon2FwReleaseLocal < 20001118 -/* - * Standard firmware and early modifications duplicate - * IRQ load without this flag (coal timer is never reset). - * Note that with this flag tx_coal should be less than - * time to xmit full tx ring. - * 400usec is not so bad for tx ring size of 128. - */ -#define TX_COAL_INTS_ONLY 1 /* worth it */ -#else -/* - * With modified firmware, this is not necessary, but still useful. - */ -#define TX_COAL_INTS_ONLY 1 -#endif - -#define DEF_TRACE 0 -#define DEF_STAT (2 * TICKS_PER_SEC) - - -static int link_state[ACE_MAX_MOD_PARMS]; -static int trace[ACE_MAX_MOD_PARMS]; -static int tx_coal_tick[ACE_MAX_MOD_PARMS]; -static int rx_coal_tick[ACE_MAX_MOD_PARMS]; -static int max_tx_desc[ACE_MAX_MOD_PARMS]; -static int max_rx_desc[ACE_MAX_MOD_PARMS]; -static int tx_ratio[ACE_MAX_MOD_PARMS]; -static int dis_pci_mem_inval[ACE_MAX_MOD_PARMS] = {1, 1, 1, 1, 1, 1, 1, 1}; - -MODULE_AUTHOR("Jes Sorensen "); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("AceNIC/3C985/GA620 Gigabit Ethernet driver"); -#ifndef CONFIG_ACENIC_OMIT_TIGON_I -MODULE_FIRMWARE("acenic/tg1.bin"); -#endif -MODULE_FIRMWARE("acenic/tg2.bin"); - -module_param_array_named(link, link_state, int, NULL, 0); -module_param_array(trace, int, NULL, 0); -module_param_array(tx_coal_tick, int, NULL, 0); -module_param_array(max_tx_desc, int, NULL, 0); -module_param_array(rx_coal_tick, int, NULL, 0); -module_param_array(max_rx_desc, int, NULL, 0); -module_param_array(tx_ratio, int, NULL, 0); -MODULE_PARM_DESC(link, "AceNIC/3C985/NetGear link state"); -MODULE_PARM_DESC(trace, "AceNIC/3C985/NetGear firmware trace level"); -MODULE_PARM_DESC(tx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first tx descriptor arrives"); -MODULE_PARM_DESC(max_tx_desc, "AceNIC/3C985/GA620 max number of transmit descriptors to wait"); -MODULE_PARM_DESC(rx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first rx descriptor arrives"); -MODULE_PARM_DESC(max_rx_desc, "AceNIC/3C985/GA620 max number of receive descriptors to wait"); -MODULE_PARM_DESC(tx_ratio, "AceNIC/3C985/GA620 ratio of NIC memory used for TX/RX descriptors (range 0-63)"); - - -static const char version[] __devinitconst = - "acenic.c: v0.92 08/05/2002 Jes Sorensen, linux-acenic@SunSITE.dk\n" - " http://home.cern.ch/~jes/gige/acenic.html\n"; - -static int ace_get_settings(struct net_device *, struct ethtool_cmd *); -static int ace_set_settings(struct net_device *, struct ethtool_cmd *); -static void ace_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); - -static const struct ethtool_ops ace_ethtool_ops = { - .get_settings = ace_get_settings, - .set_settings = ace_set_settings, - .get_drvinfo = ace_get_drvinfo, -}; - -static void ace_watchdog(struct net_device *dev); - -static const struct net_device_ops ace_netdev_ops = { - .ndo_open = ace_open, - .ndo_stop = ace_close, - .ndo_tx_timeout = ace_watchdog, - .ndo_get_stats = ace_get_stats, - .ndo_start_xmit = ace_start_xmit, - .ndo_set_multicast_list = ace_set_multicast_list, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = ace_set_mac_addr, - .ndo_change_mtu = ace_change_mtu, -}; - -static int __devinit acenic_probe_one(struct pci_dev *pdev, - const struct pci_device_id *id) -{ - struct net_device *dev; - struct ace_private *ap; - static int boards_found; - - dev = alloc_etherdev(sizeof(struct ace_private)); - if (dev == NULL) { - printk(KERN_ERR "acenic: Unable to allocate " - "net_device structure!\n"); - return -ENOMEM; - } - - SET_NETDEV_DEV(dev, &pdev->dev); - - ap = netdev_priv(dev); - ap->pdev = pdev; - ap->name = pci_name(pdev); - - dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; - dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; - - dev->watchdog_timeo = 5*HZ; - - dev->netdev_ops = &ace_netdev_ops; - SET_ETHTOOL_OPS(dev, &ace_ethtool_ops); - - /* we only display this string ONCE */ - if (!boards_found) - printk(version); - - if (pci_enable_device(pdev)) - goto fail_free_netdev; - - /* - * Enable master mode before we start playing with the - * pci_command word since pci_set_master() will modify - * it. - */ - pci_set_master(pdev); - - pci_read_config_word(pdev, PCI_COMMAND, &ap->pci_command); - - /* OpenFirmware on Mac's does not set this - DOH.. */ - if (!(ap->pci_command & PCI_COMMAND_MEMORY)) { - printk(KERN_INFO "%s: Enabling PCI Memory Mapped " - "access - was not enabled by BIOS/Firmware\n", - ap->name); - ap->pci_command = ap->pci_command | PCI_COMMAND_MEMORY; - pci_write_config_word(ap->pdev, PCI_COMMAND, - ap->pci_command); - wmb(); - } - - pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &ap->pci_latency); - if (ap->pci_latency <= 0x40) { - ap->pci_latency = 0x40; - pci_write_config_byte(pdev, PCI_LATENCY_TIMER, ap->pci_latency); - } - - /* - * Remap the regs into kernel space - this is abuse of - * dev->base_addr since it was means for I/O port - * addresses but who gives a damn. - */ - dev->base_addr = pci_resource_start(pdev, 0); - ap->regs = ioremap(dev->base_addr, 0x4000); - if (!ap->regs) { - printk(KERN_ERR "%s: Unable to map I/O register, " - "AceNIC %i will be disabled.\n", - ap->name, boards_found); - goto fail_free_netdev; - } - - switch(pdev->vendor) { - case PCI_VENDOR_ID_ALTEON: - if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9100T) { - printk(KERN_INFO "%s: Farallon PN9100-T ", - ap->name); - } else { - printk(KERN_INFO "%s: Alteon AceNIC ", - ap->name); - } - break; - case PCI_VENDOR_ID_3COM: - printk(KERN_INFO "%s: 3Com 3C985 ", ap->name); - break; - case PCI_VENDOR_ID_NETGEAR: - printk(KERN_INFO "%s: NetGear GA620 ", ap->name); - break; - case PCI_VENDOR_ID_DEC: - if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9000SX) { - printk(KERN_INFO "%s: Farallon PN9000-SX ", - ap->name); - break; - } - case PCI_VENDOR_ID_SGI: - printk(KERN_INFO "%s: SGI AceNIC ", ap->name); - break; - default: - printk(KERN_INFO "%s: Unknown AceNIC ", ap->name); - break; - } - - printk("Gigabit Ethernet at 0x%08lx, ", dev->base_addr); - printk("irq %d\n", pdev->irq); - -#ifdef CONFIG_ACENIC_OMIT_TIGON_I - if ((readl(&ap->regs->HostCtrl) >> 28) == 4) { - printk(KERN_ERR "%s: Driver compiled without Tigon I" - " support - NIC disabled\n", dev->name); - goto fail_uninit; - } -#endif - - if (ace_allocate_descriptors(dev)) - goto fail_free_netdev; - -#ifdef MODULE - if (boards_found >= ACE_MAX_MOD_PARMS) - ap->board_idx = BOARD_IDX_OVERFLOW; - else - ap->board_idx = boards_found; -#else - ap->board_idx = BOARD_IDX_STATIC; -#endif - - if (ace_init(dev)) - goto fail_free_netdev; - - if (register_netdev(dev)) { - printk(KERN_ERR "acenic: device registration failed\n"); - goto fail_uninit; - } - ap->name = dev->name; - - if (ap->pci_using_dac) - dev->features |= NETIF_F_HIGHDMA; - - pci_set_drvdata(pdev, dev); - - boards_found++; - return 0; - - fail_uninit: - ace_init_cleanup(dev); - fail_free_netdev: - free_netdev(dev); - return -ENODEV; -} - -static void __devexit acenic_remove_one(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct ace_private *ap = netdev_priv(dev); - struct ace_regs __iomem *regs = ap->regs; - short i; - - unregister_netdev(dev); - - writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl); - if (ap->version >= 2) - writel(readl(®s->CpuBCtrl) | CPU_HALT, ®s->CpuBCtrl); - - /* - * This clears any pending interrupts - */ - writel(1, ®s->Mb0Lo); - readl(®s->CpuCtrl); /* flush */ - - /* - * Make sure no other CPUs are processing interrupts - * on the card before the buffers are being released. - * Otherwise one might experience some `interesting' - * effects. - * - * Then release the RX buffers - jumbo buffers were - * already released in ace_close(). - */ - ace_sync_irq(dev->irq); - - for (i = 0; i < RX_STD_RING_ENTRIES; i++) { - struct sk_buff *skb = ap->skb->rx_std_skbuff[i].skb; - - if (skb) { - struct ring_info *ringp; - dma_addr_t mapping; - - ringp = &ap->skb->rx_std_skbuff[i]; - mapping = dma_unmap_addr(ringp, mapping); - pci_unmap_page(ap->pdev, mapping, - ACE_STD_BUFSIZE, - PCI_DMA_FROMDEVICE); - - ap->rx_std_ring[i].size = 0; - ap->skb->rx_std_skbuff[i].skb = NULL; - dev_kfree_skb(skb); - } - } - - if (ap->version >= 2) { - for (i = 0; i < RX_MINI_RING_ENTRIES; i++) { - struct sk_buff *skb = ap->skb->rx_mini_skbuff[i].skb; - - if (skb) { - struct ring_info *ringp; - dma_addr_t mapping; - - ringp = &ap->skb->rx_mini_skbuff[i]; - mapping = dma_unmap_addr(ringp,mapping); - pci_unmap_page(ap->pdev, mapping, - ACE_MINI_BUFSIZE, - PCI_DMA_FROMDEVICE); - - ap->rx_mini_ring[i].size = 0; - ap->skb->rx_mini_skbuff[i].skb = NULL; - dev_kfree_skb(skb); - } - } - } - - for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) { - struct sk_buff *skb = ap->skb->rx_jumbo_skbuff[i].skb; - if (skb) { - struct ring_info *ringp; - dma_addr_t mapping; - - ringp = &ap->skb->rx_jumbo_skbuff[i]; - mapping = dma_unmap_addr(ringp, mapping); - pci_unmap_page(ap->pdev, mapping, - ACE_JUMBO_BUFSIZE, - PCI_DMA_FROMDEVICE); - - ap->rx_jumbo_ring[i].size = 0; - ap->skb->rx_jumbo_skbuff[i].skb = NULL; - dev_kfree_skb(skb); - } - } - - ace_init_cleanup(dev); - free_netdev(dev); -} - -static struct pci_driver acenic_pci_driver = { - .name = "acenic", - .id_table = acenic_pci_tbl, - .probe = acenic_probe_one, - .remove = __devexit_p(acenic_remove_one), -}; - -static int __init acenic_init(void) -{ - return pci_register_driver(&acenic_pci_driver); -} - -static void __exit acenic_exit(void) -{ - pci_unregister_driver(&acenic_pci_driver); -} - -module_init(acenic_init); -module_exit(acenic_exit); - -static void ace_free_descriptors(struct net_device *dev) -{ - struct ace_private *ap = netdev_priv(dev); - int size; - - if (ap->rx_std_ring != NULL) { - size = (sizeof(struct rx_desc) * - (RX_STD_RING_ENTRIES + - RX_JUMBO_RING_ENTRIES + - RX_MINI_RING_ENTRIES + - RX_RETURN_RING_ENTRIES)); - pci_free_consistent(ap->pdev, size, ap->rx_std_ring, - ap->rx_ring_base_dma); - ap->rx_std_ring = NULL; - ap->rx_jumbo_ring = NULL; - ap->rx_mini_ring = NULL; - ap->rx_return_ring = NULL; - } - if (ap->evt_ring != NULL) { - size = (sizeof(struct event) * EVT_RING_ENTRIES); - pci_free_consistent(ap->pdev, size, ap->evt_ring, - ap->evt_ring_dma); - ap->evt_ring = NULL; - } - if (ap->tx_ring != NULL && !ACE_IS_TIGON_I(ap)) { - size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES); - pci_free_consistent(ap->pdev, size, ap->tx_ring, - ap->tx_ring_dma); - } - ap->tx_ring = NULL; - - if (ap->evt_prd != NULL) { - pci_free_consistent(ap->pdev, sizeof(u32), - (void *)ap->evt_prd, ap->evt_prd_dma); - ap->evt_prd = NULL; - } - if (ap->rx_ret_prd != NULL) { - pci_free_consistent(ap->pdev, sizeof(u32), - (void *)ap->rx_ret_prd, - ap->rx_ret_prd_dma); - ap->rx_ret_prd = NULL; - } - if (ap->tx_csm != NULL) { - pci_free_consistent(ap->pdev, sizeof(u32), - (void *)ap->tx_csm, ap->tx_csm_dma); - ap->tx_csm = NULL; - } -} - - -static int ace_allocate_descriptors(struct net_device *dev) -{ - struct ace_private *ap = netdev_priv(dev); - int size; - - size = (sizeof(struct rx_desc) * - (RX_STD_RING_ENTRIES + - RX_JUMBO_RING_ENTRIES + - RX_MINI_RING_ENTRIES + - RX_RETURN_RING_ENTRIES)); - - ap->rx_std_ring = pci_alloc_consistent(ap->pdev, size, - &ap->rx_ring_base_dma); - if (ap->rx_std_ring == NULL) - goto fail; - - ap->rx_jumbo_ring = ap->rx_std_ring + RX_STD_RING_ENTRIES; - ap->rx_mini_ring = ap->rx_jumbo_ring + RX_JUMBO_RING_ENTRIES; - ap->rx_return_ring = ap->rx_mini_ring + RX_MINI_RING_ENTRIES; - - size = (sizeof(struct event) * EVT_RING_ENTRIES); - - ap->evt_ring = pci_alloc_consistent(ap->pdev, size, &ap->evt_ring_dma); - - if (ap->evt_ring == NULL) - goto fail; - - /* - * Only allocate a host TX ring for the Tigon II, the Tigon I - * has to use PCI registers for this ;-( - */ - if (!ACE_IS_TIGON_I(ap)) { - size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES); - - ap->tx_ring = pci_alloc_consistent(ap->pdev, size, - &ap->tx_ring_dma); - - if (ap->tx_ring == NULL) - goto fail; - } - - ap->evt_prd = pci_alloc_consistent(ap->pdev, sizeof(u32), - &ap->evt_prd_dma); - if (ap->evt_prd == NULL) - goto fail; - - ap->rx_ret_prd = pci_alloc_consistent(ap->pdev, sizeof(u32), - &ap->rx_ret_prd_dma); - if (ap->rx_ret_prd == NULL) - goto fail; - - ap->tx_csm = pci_alloc_consistent(ap->pdev, sizeof(u32), - &ap->tx_csm_dma); - if (ap->tx_csm == NULL) - goto fail; - - return 0; - -fail: - /* Clean up. */ - ace_init_cleanup(dev); - return 1; -} - - -/* - * Generic cleanup handling data allocated during init. Used when the - * module is unloaded or if an error occurs during initialization - */ -static void ace_init_cleanup(struct net_device *dev) -{ - struct ace_private *ap; - - ap = netdev_priv(dev); - - ace_free_descriptors(dev); - - if (ap->info) - pci_free_consistent(ap->pdev, sizeof(struct ace_info), - ap->info, ap->info_dma); - kfree(ap->skb); - kfree(ap->trace_buf); - - if (dev->irq) - free_irq(dev->irq, dev); - - iounmap(ap->regs); -} - - -/* - * Commands are considered to be slow. - */ -static inline void ace_issue_cmd(struct ace_regs __iomem *regs, struct cmd *cmd) -{ - u32 idx; - - idx = readl(®s->CmdPrd); - - writel(*(u32 *)(cmd), ®s->CmdRng[idx]); - idx = (idx + 1) % CMD_RING_ENTRIES; - - writel(idx, ®s->CmdPrd); -} - - -static int __devinit ace_init(struct net_device *dev) -{ - struct ace_private *ap; - struct ace_regs __iomem *regs; - struct ace_info *info = NULL; - struct pci_dev *pdev; - unsigned long myjif; - u64 tmp_ptr; - u32 tig_ver, mac1, mac2, tmp, pci_state; - int board_idx, ecode = 0; - short i; - unsigned char cache_size; - - ap = netdev_priv(dev); - regs = ap->regs; - - board_idx = ap->board_idx; - - /* - * aman@sgi.com - its useful to do a NIC reset here to - * address the `Firmware not running' problem subsequent - * to any crashes involving the NIC - */ - writel(HW_RESET | (HW_RESET << 24), ®s->HostCtrl); - readl(®s->HostCtrl); /* PCI write posting */ - udelay(5); - - /* - * Don't access any other registers before this point! - */ -#ifdef __BIG_ENDIAN - /* - * This will most likely need BYTE_SWAP once we switch - * to using __raw_writel() - */ - writel((WORD_SWAP | CLR_INT | ((WORD_SWAP | CLR_INT) << 24)), - ®s->HostCtrl); -#else - writel((CLR_INT | WORD_SWAP | ((CLR_INT | WORD_SWAP) << 24)), - ®s->HostCtrl); -#endif - readl(®s->HostCtrl); /* PCI write posting */ - - /* - * Stop the NIC CPU and clear pending interrupts - */ - writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl); - readl(®s->CpuCtrl); /* PCI write posting */ - writel(0, ®s->Mb0Lo); - - tig_ver = readl(®s->HostCtrl) >> 28; - - switch(tig_ver){ -#ifndef CONFIG_ACENIC_OMIT_TIGON_I - case 4: - case 5: - printk(KERN_INFO " Tigon I (Rev. %i), Firmware: %i.%i.%i, ", - tig_ver, ap->firmware_major, ap->firmware_minor, - ap->firmware_fix); - writel(0, ®s->LocalCtrl); - ap->version = 1; - ap->tx_ring_entries = TIGON_I_TX_RING_ENTRIES; - break; -#endif - case 6: - printk(KERN_INFO " Tigon II (Rev. %i), Firmware: %i.%i.%i, ", - tig_ver, ap->firmware_major, ap->firmware_minor, - ap->firmware_fix); - writel(readl(®s->CpuBCtrl) | CPU_HALT, ®s->CpuBCtrl); - readl(®s->CpuBCtrl); /* PCI write posting */ - /* - * The SRAM bank size does _not_ indicate the amount - * of memory on the card, it controls the _bank_ size! - * Ie. a 1MB AceNIC will have two banks of 512KB. - */ - writel(SRAM_BANK_512K, ®s->LocalCtrl); - writel(SYNC_SRAM_TIMING, ®s->MiscCfg); - ap->version = 2; - ap->tx_ring_entries = MAX_TX_RING_ENTRIES; - break; - default: - printk(KERN_WARNING " Unsupported Tigon version detected " - "(%i)\n", tig_ver); - ecode = -ENODEV; - goto init_error; - } - - /* - * ModeStat _must_ be set after the SRAM settings as this change - * seems to corrupt the ModeStat and possible other registers. - * The SRAM settings survive resets and setting it to the same - * value a second time works as well. This is what caused the - * `Firmware not running' problem on the Tigon II. - */ -#ifdef __BIG_ENDIAN - writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL | ACE_BYTE_SWAP_BD | - ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, ®s->ModeStat); -#else - writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL | - ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, ®s->ModeStat); -#endif - readl(®s->ModeStat); /* PCI write posting */ - - mac1 = 0; - for(i = 0; i < 4; i++) { - int t; - - mac1 = mac1 << 8; - t = read_eeprom_byte(dev, 0x8c+i); - if (t < 0) { - ecode = -EIO; - goto init_error; - } else - mac1 |= (t & 0xff); - } - mac2 = 0; - for(i = 4; i < 8; i++) { - int t; - - mac2 = mac2 << 8; - t = read_eeprom_byte(dev, 0x8c+i); - if (t < 0) { - ecode = -EIO; - goto init_error; - } else - mac2 |= (t & 0xff); - } - - writel(mac1, ®s->MacAddrHi); - writel(mac2, ®s->MacAddrLo); - - dev->dev_addr[0] = (mac1 >> 8) & 0xff; - dev->dev_addr[1] = mac1 & 0xff; - dev->dev_addr[2] = (mac2 >> 24) & 0xff; - dev->dev_addr[3] = (mac2 >> 16) & 0xff; - dev->dev_addr[4] = (mac2 >> 8) & 0xff; - dev->dev_addr[5] = mac2 & 0xff; - - printk("MAC: %pM\n", dev->dev_addr); - - /* - * Looks like this is necessary to deal with on all architectures, - * even this %$#%$# N440BX Intel based thing doesn't get it right. - * Ie. having two NICs in the machine, one will have the cache - * line set at boot time, the other will not. - */ - pdev = ap->pdev; - pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_size); - cache_size <<= 2; - if (cache_size != SMP_CACHE_BYTES) { - printk(KERN_INFO " PCI cache line size set incorrectly " - "(%i bytes) by BIOS/FW, ", cache_size); - if (cache_size > SMP_CACHE_BYTES) - printk("expecting %i\n", SMP_CACHE_BYTES); - else { - printk("correcting to %i\n", SMP_CACHE_BYTES); - pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, - SMP_CACHE_BYTES >> 2); - } - } - - pci_state = readl(®s->PciState); - printk(KERN_INFO " PCI bus width: %i bits, speed: %iMHz, " - "latency: %i clks\n", - (pci_state & PCI_32BIT) ? 32 : 64, - (pci_state & PCI_66MHZ) ? 66 : 33, - ap->pci_latency); - - /* - * Set the max DMA transfer size. Seems that for most systems - * the performance is better when no MAX parameter is - * set. However for systems enabling PCI write and invalidate, - * DMA writes must be set to the L1 cache line size to get - * optimal performance. - * - * The default is now to turn the PCI write and invalidate off - * - that is what Alteon does for NT. - */ - tmp = READ_CMD_MEM | WRITE_CMD_MEM; - if (ap->version >= 2) { - tmp |= (MEM_READ_MULTIPLE | (pci_state & PCI_66MHZ)); - /* - * Tuning parameters only supported for 8 cards - */ - if (board_idx == BOARD_IDX_OVERFLOW || - dis_pci_mem_inval[board_idx]) { - if (ap->pci_command & PCI_COMMAND_INVALIDATE) { - ap->pci_command &= ~PCI_COMMAND_INVALIDATE; - pci_write_config_word(pdev, PCI_COMMAND, - ap->pci_command); - printk(KERN_INFO " Disabling PCI memory " - "write and invalidate\n"); - } - } else if (ap->pci_command & PCI_COMMAND_INVALIDATE) { - printk(KERN_INFO " PCI memory write & invalidate " - "enabled by BIOS, enabling counter measures\n"); - - switch(SMP_CACHE_BYTES) { - case 16: - tmp |= DMA_WRITE_MAX_16; - break; - case 32: - tmp |= DMA_WRITE_MAX_32; - break; - case 64: - tmp |= DMA_WRITE_MAX_64; - break; - case 128: - tmp |= DMA_WRITE_MAX_128; - break; - default: - printk(KERN_INFO " Cache line size %i not " - "supported, PCI write and invalidate " - "disabled\n", SMP_CACHE_BYTES); - ap->pci_command &= ~PCI_COMMAND_INVALIDATE; - pci_write_config_word(pdev, PCI_COMMAND, - ap->pci_command); - } - } - } - -#ifdef __sparc__ - /* - * On this platform, we know what the best dma settings - * are. We use 64-byte maximum bursts, because if we - * burst larger than the cache line size (or even cross - * a 64byte boundary in a single burst) the UltraSparc - * PCI controller will disconnect at 64-byte multiples. - * - * Read-multiple will be properly enabled above, and when - * set will give the PCI controller proper hints about - * prefetching. - */ - tmp &= ~DMA_READ_WRITE_MASK; - tmp |= DMA_READ_MAX_64; - tmp |= DMA_WRITE_MAX_64; -#endif -#ifdef __alpha__ - tmp &= ~DMA_READ_WRITE_MASK; - tmp |= DMA_READ_MAX_128; - /* - * All the docs say MUST NOT. Well, I did. - * Nothing terrible happens, if we load wrong size. - * Bit w&i still works better! - */ - tmp |= DMA_WRITE_MAX_128; -#endif - writel(tmp, ®s->PciState); - -#if 0 - /* - * The Host PCI bus controller driver has to set FBB. - * If all devices on that PCI bus support FBB, then the controller - * can enable FBB support in the Host PCI Bus controller (or on - * the PCI-PCI bridge if that applies). - * -ggg - */ - /* - * I have received reports from people having problems when this - * bit is enabled. - */ - if (!(ap->pci_command & PCI_COMMAND_FAST_BACK)) { - printk(KERN_INFO " Enabling PCI Fast Back to Back\n"); - ap->pci_command |= PCI_COMMAND_FAST_BACK; - pci_write_config_word(pdev, PCI_COMMAND, ap->pci_command); - } -#endif - - /* - * Configure DMA attributes. - */ - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { - ap->pci_using_dac = 1; - } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { - ap->pci_using_dac = 0; - } else { - ecode = -ENODEV; - goto init_error; - } - - /* - * Initialize the generic info block and the command+event rings - * and the control blocks for the transmit and receive rings - * as they need to be setup once and for all. - */ - if (!(info = pci_alloc_consistent(ap->pdev, sizeof(struct ace_info), - &ap->info_dma))) { - ecode = -EAGAIN; - goto init_error; - } - ap->info = info; - - /* - * Get the memory for the skb rings. - */ - if (!(ap->skb = kmalloc(sizeof(struct ace_skb), GFP_KERNEL))) { - ecode = -EAGAIN; - goto init_error; - } - - ecode = request_irq(pdev->irq, ace_interrupt, IRQF_SHARED, - DRV_NAME, dev); - if (ecode) { - printk(KERN_WARNING "%s: Requested IRQ %d is busy\n", - DRV_NAME, pdev->irq); - goto init_error; - } else - dev->irq = pdev->irq; - -#ifdef INDEX_DEBUG - spin_lock_init(&ap->debug_lock); - ap->last_tx = ACE_TX_RING_ENTRIES(ap) - 1; - ap->last_std_rx = 0; - ap->last_mini_rx = 0; -#endif - - memset(ap->info, 0, sizeof(struct ace_info)); - memset(ap->skb, 0, sizeof(struct ace_skb)); - - ecode = ace_load_firmware(dev); - if (ecode) - goto init_error; - - ap->fw_running = 0; - - tmp_ptr = ap->info_dma; - writel(tmp_ptr >> 32, ®s->InfoPtrHi); - writel(tmp_ptr & 0xffffffff, ®s->InfoPtrLo); - - memset(ap->evt_ring, 0, EVT_RING_ENTRIES * sizeof(struct event)); - - set_aceaddr(&info->evt_ctrl.rngptr, ap->evt_ring_dma); - info->evt_ctrl.flags = 0; - - *(ap->evt_prd) = 0; - wmb(); - set_aceaddr(&info->evt_prd_ptr, ap->evt_prd_dma); - writel(0, ®s->EvtCsm); - - set_aceaddr(&info->cmd_ctrl.rngptr, 0x100); - info->cmd_ctrl.flags = 0; - info->cmd_ctrl.max_len = 0; - - for (i = 0; i < CMD_RING_ENTRIES; i++) - writel(0, ®s->CmdRng[i]); - - writel(0, ®s->CmdPrd); - writel(0, ®s->CmdCsm); - - tmp_ptr = ap->info_dma; - tmp_ptr += (unsigned long) &(((struct ace_info *)0)->s.stats); - set_aceaddr(&info->stats2_ptr, (dma_addr_t) tmp_ptr); - - set_aceaddr(&info->rx_std_ctrl.rngptr, ap->rx_ring_base_dma); - info->rx_std_ctrl.max_len = ACE_STD_BUFSIZE; - info->rx_std_ctrl.flags = - RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST; - - memset(ap->rx_std_ring, 0, - RX_STD_RING_ENTRIES * sizeof(struct rx_desc)); - - for (i = 0; i < RX_STD_RING_ENTRIES; i++) - ap->rx_std_ring[i].flags = BD_FLG_TCP_UDP_SUM; - - ap->rx_std_skbprd = 0; - atomic_set(&ap->cur_rx_bufs, 0); - - set_aceaddr(&info->rx_jumbo_ctrl.rngptr, - (ap->rx_ring_base_dma + - (sizeof(struct rx_desc) * RX_STD_RING_ENTRIES))); - info->rx_jumbo_ctrl.max_len = 0; - info->rx_jumbo_ctrl.flags = - RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST; - - memset(ap->rx_jumbo_ring, 0, - RX_JUMBO_RING_ENTRIES * sizeof(struct rx_desc)); - - for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) - ap->rx_jumbo_ring[i].flags = BD_FLG_TCP_UDP_SUM | BD_FLG_JUMBO; - - ap->rx_jumbo_skbprd = 0; - atomic_set(&ap->cur_jumbo_bufs, 0); - - memset(ap->rx_mini_ring, 0, - RX_MINI_RING_ENTRIES * sizeof(struct rx_desc)); - - if (ap->version >= 2) { - set_aceaddr(&info->rx_mini_ctrl.rngptr, - (ap->rx_ring_base_dma + - (sizeof(struct rx_desc) * - (RX_STD_RING_ENTRIES + - RX_JUMBO_RING_ENTRIES)))); - info->rx_mini_ctrl.max_len = ACE_MINI_SIZE; - info->rx_mini_ctrl.flags = - RCB_FLG_TCP_UDP_SUM|RCB_FLG_NO_PSEUDO_HDR|RCB_FLG_VLAN_ASSIST; - - for (i = 0; i < RX_MINI_RING_ENTRIES; i++) - ap->rx_mini_ring[i].flags = - BD_FLG_TCP_UDP_SUM | BD_FLG_MINI; - } else { - set_aceaddr(&info->rx_mini_ctrl.rngptr, 0); - info->rx_mini_ctrl.flags = RCB_FLG_RNG_DISABLE; - info->rx_mini_ctrl.max_len = 0; - } - - ap->rx_mini_skbprd = 0; - atomic_set(&ap->cur_mini_bufs, 0); - - set_aceaddr(&info->rx_return_ctrl.rngptr, - (ap->rx_ring_base_dma + - (sizeof(struct rx_desc) * - (RX_STD_RING_ENTRIES + - RX_JUMBO_RING_ENTRIES + - RX_MINI_RING_ENTRIES)))); - info->rx_return_ctrl.flags = 0; - info->rx_return_ctrl.max_len = RX_RETURN_RING_ENTRIES; - - memset(ap->rx_return_ring, 0, - RX_RETURN_RING_ENTRIES * sizeof(struct rx_desc)); - - set_aceaddr(&info->rx_ret_prd_ptr, ap->rx_ret_prd_dma); - *(ap->rx_ret_prd) = 0; - - writel(TX_RING_BASE, ®s->WinBase); - - if (ACE_IS_TIGON_I(ap)) { - ap->tx_ring = (__force struct tx_desc *) regs->Window; - for (i = 0; i < (TIGON_I_TX_RING_ENTRIES - * sizeof(struct tx_desc)) / sizeof(u32); i++) - writel(0, (__force void __iomem *)ap->tx_ring + i * 4); - - set_aceaddr(&info->tx_ctrl.rngptr, TX_RING_BASE); - } else { - memset(ap->tx_ring, 0, - MAX_TX_RING_ENTRIES * sizeof(struct tx_desc)); - - set_aceaddr(&info->tx_ctrl.rngptr, ap->tx_ring_dma); - } - - info->tx_ctrl.max_len = ACE_TX_RING_ENTRIES(ap); - tmp = RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST; - - /* - * The Tigon I does not like having the TX ring in host memory ;-( - */ - if (!ACE_IS_TIGON_I(ap)) - tmp |= RCB_FLG_TX_HOST_RING; -#if TX_COAL_INTS_ONLY - tmp |= RCB_FLG_COAL_INT_ONLY; -#endif - info->tx_ctrl.flags = tmp; - - set_aceaddr(&info->tx_csm_ptr, ap->tx_csm_dma); - - /* - * Potential item for tuning parameter - */ -#if 0 /* NO */ - writel(DMA_THRESH_16W, ®s->DmaReadCfg); - writel(DMA_THRESH_16W, ®s->DmaWriteCfg); -#else - writel(DMA_THRESH_8W, ®s->DmaReadCfg); - writel(DMA_THRESH_8W, ®s->DmaWriteCfg); -#endif - - writel(0, ®s->MaskInt); - writel(1, ®s->IfIdx); -#if 0 - /* - * McKinley boxes do not like us fiddling with AssistState - * this early - */ - writel(1, ®s->AssistState); -#endif - - writel(DEF_STAT, ®s->TuneStatTicks); - writel(DEF_TRACE, ®s->TuneTrace); - - ace_set_rxtx_parms(dev, 0); - - if (board_idx == BOARD_IDX_OVERFLOW) { - printk(KERN_WARNING "%s: more than %i NICs detected, " - "ignoring module parameters!\n", - ap->name, ACE_MAX_MOD_PARMS); - } else if (board_idx >= 0) { - if (tx_coal_tick[board_idx]) - writel(tx_coal_tick[board_idx], - ®s->TuneTxCoalTicks); - if (max_tx_desc[board_idx]) - writel(max_tx_desc[board_idx], ®s->TuneMaxTxDesc); - - if (rx_coal_tick[board_idx]) - writel(rx_coal_tick[board_idx], - ®s->TuneRxCoalTicks); - if (max_rx_desc[board_idx]) - writel(max_rx_desc[board_idx], ®s->TuneMaxRxDesc); - - if (trace[board_idx]) - writel(trace[board_idx], ®s->TuneTrace); - - if ((tx_ratio[board_idx] > 0) && (tx_ratio[board_idx] < 64)) - writel(tx_ratio[board_idx], ®s->TxBufRat); - } - - /* - * Default link parameters - */ - tmp = LNK_ENABLE | LNK_FULL_DUPLEX | LNK_1000MB | LNK_100MB | - LNK_10MB | LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL | LNK_NEGOTIATE; - if(ap->version >= 2) - tmp |= LNK_TX_FLOW_CTL_Y; - - /* - * Override link default parameters - */ - if ((board_idx >= 0) && link_state[board_idx]) { - int option = link_state[board_idx]; - - tmp = LNK_ENABLE; - - if (option & 0x01) { - printk(KERN_INFO "%s: Setting half duplex link\n", - ap->name); - tmp &= ~LNK_FULL_DUPLEX; - } - if (option & 0x02) - tmp &= ~LNK_NEGOTIATE; - if (option & 0x10) - tmp |= LNK_10MB; - if (option & 0x20) - tmp |= LNK_100MB; - if (option & 0x40) - tmp |= LNK_1000MB; - if ((option & 0x70) == 0) { - printk(KERN_WARNING "%s: No media speed specified, " - "forcing auto negotiation\n", ap->name); - tmp |= LNK_NEGOTIATE | LNK_1000MB | - LNK_100MB | LNK_10MB; - } - if ((option & 0x100) == 0) - tmp |= LNK_NEG_FCTL; - else - printk(KERN_INFO "%s: Disabling flow control " - "negotiation\n", ap->name); - if (option & 0x200) - tmp |= LNK_RX_FLOW_CTL_Y; - if ((option & 0x400) && (ap->version >= 2)) { - printk(KERN_INFO "%s: Enabling TX flow control\n", - ap->name); - tmp |= LNK_TX_FLOW_CTL_Y; - } - } - - ap->link = tmp; - writel(tmp, ®s->TuneLink); - if (ap->version >= 2) - writel(tmp, ®s->TuneFastLink); - - writel(ap->firmware_start, ®s->Pc); - - writel(0, ®s->Mb0Lo); - - /* - * Set tx_csm before we start receiving interrupts, otherwise - * the interrupt handler might think it is supposed to process - * tx ints before we are up and running, which may cause a null - * pointer access in the int handler. - */ - ap->cur_rx = 0; - ap->tx_prd = *(ap->tx_csm) = ap->tx_ret_csm = 0; - - wmb(); - ace_set_txprd(regs, ap, 0); - writel(0, ®s->RxRetCsm); - - /* - * Enable DMA engine now. - * If we do this sooner, Mckinley box pukes. - * I assume it's because Tigon II DMA engine wants to check - * *something* even before the CPU is started. - */ - writel(1, ®s->AssistState); /* enable DMA */ - - /* - * Start the NIC CPU - */ - writel(readl(®s->CpuCtrl) & ~(CPU_HALT|CPU_TRACE), ®s->CpuCtrl); - readl(®s->CpuCtrl); - - /* - * Wait for the firmware to spin up - max 3 seconds. - */ - myjif = jiffies + 3 * HZ; - while (time_before(jiffies, myjif) && !ap->fw_running) - cpu_relax(); - - if (!ap->fw_running) { - printk(KERN_ERR "%s: Firmware NOT running!\n", ap->name); - - ace_dump_trace(ap); - writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl); - readl(®s->CpuCtrl); - - /* aman@sgi.com - account for badly behaving firmware/NIC: - * - have observed that the NIC may continue to generate - * interrupts for some reason; attempt to stop it - halt - * second CPU for Tigon II cards, and also clear Mb0 - * - if we're a module, we'll fail to load if this was - * the only GbE card in the system => if the kernel does - * see an interrupt from the NIC, code to handle it is - * gone and OOps! - so free_irq also - */ - if (ap->version >= 2) - writel(readl(®s->CpuBCtrl) | CPU_HALT, - ®s->CpuBCtrl); - writel(0, ®s->Mb0Lo); - readl(®s->Mb0Lo); - - ecode = -EBUSY; - goto init_error; - } - - /* - * We load the ring here as there seem to be no way to tell the - * firmware to wipe the ring without re-initializing it. - */ - if (!test_and_set_bit(0, &ap->std_refill_busy)) - ace_load_std_rx_ring(dev, RX_RING_SIZE); - else - printk(KERN_ERR "%s: Someone is busy refilling the RX ring\n", - ap->name); - if (ap->version >= 2) { - if (!test_and_set_bit(0, &ap->mini_refill_busy)) - ace_load_mini_rx_ring(dev, RX_MINI_SIZE); - else - printk(KERN_ERR "%s: Someone is busy refilling " - "the RX mini ring\n", ap->name); - } - return 0; - - init_error: - ace_init_cleanup(dev); - return ecode; -} - - -static void ace_set_rxtx_parms(struct net_device *dev, int jumbo) -{ - struct ace_private *ap = netdev_priv(dev); - struct ace_regs __iomem *regs = ap->regs; - int board_idx = ap->board_idx; - - if (board_idx >= 0) { - if (!jumbo) { - if (!tx_coal_tick[board_idx]) - writel(DEF_TX_COAL, ®s->TuneTxCoalTicks); - if (!max_tx_desc[board_idx]) - writel(DEF_TX_MAX_DESC, ®s->TuneMaxTxDesc); - if (!rx_coal_tick[board_idx]) - writel(DEF_RX_COAL, ®s->TuneRxCoalTicks); - if (!max_rx_desc[board_idx]) - writel(DEF_RX_MAX_DESC, ®s->TuneMaxRxDesc); - if (!tx_ratio[board_idx]) - writel(DEF_TX_RATIO, ®s->TxBufRat); - } else { - if (!tx_coal_tick[board_idx]) - writel(DEF_JUMBO_TX_COAL, - ®s->TuneTxCoalTicks); - if (!max_tx_desc[board_idx]) - writel(DEF_JUMBO_TX_MAX_DESC, - ®s->TuneMaxTxDesc); - if (!rx_coal_tick[board_idx]) - writel(DEF_JUMBO_RX_COAL, - ®s->TuneRxCoalTicks); - if (!max_rx_desc[board_idx]) - writel(DEF_JUMBO_RX_MAX_DESC, - ®s->TuneMaxRxDesc); - if (!tx_ratio[board_idx]) - writel(DEF_JUMBO_TX_RATIO, ®s->TxBufRat); - } - } -} - - -static void ace_watchdog(struct net_device *data) -{ - struct net_device *dev = data; - struct ace_private *ap = netdev_priv(dev); - struct ace_regs __iomem *regs = ap->regs; - - /* - * We haven't received a stats update event for more than 2.5 - * seconds and there is data in the transmit queue, thus we - * assume the card is stuck. - */ - if (*ap->tx_csm != ap->tx_ret_csm) { - printk(KERN_WARNING "%s: Transmitter is stuck, %08x\n", - dev->name, (unsigned int)readl(®s->HostCtrl)); - /* This can happen due to ieee flow control. */ - } else { - printk(KERN_DEBUG "%s: BUG... transmitter died. Kicking it.\n", - dev->name); -#if 0 - netif_wake_queue(dev); -#endif - } -} - - -static void ace_tasklet(unsigned long arg) -{ - struct net_device *dev = (struct net_device *) arg; - struct ace_private *ap = netdev_priv(dev); - int cur_size; - - cur_size = atomic_read(&ap->cur_rx_bufs); - if ((cur_size < RX_LOW_STD_THRES) && - !test_and_set_bit(0, &ap->std_refill_busy)) { -#ifdef DEBUG - printk("refilling buffers (current %i)\n", cur_size); -#endif - ace_load_std_rx_ring(dev, RX_RING_SIZE - cur_size); - } - - if (ap->version >= 2) { - cur_size = atomic_read(&ap->cur_mini_bufs); - if ((cur_size < RX_LOW_MINI_THRES) && - !test_and_set_bit(0, &ap->mini_refill_busy)) { -#ifdef DEBUG - printk("refilling mini buffers (current %i)\n", - cur_size); -#endif - ace_load_mini_rx_ring(dev, RX_MINI_SIZE - cur_size); - } - } - - cur_size = atomic_read(&ap->cur_jumbo_bufs); - if (ap->jumbo && (cur_size < RX_LOW_JUMBO_THRES) && - !test_and_set_bit(0, &ap->jumbo_refill_busy)) { -#ifdef DEBUG - printk("refilling jumbo buffers (current %i)\n", cur_size); -#endif - ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE - cur_size); - } - ap->tasklet_pending = 0; -} - - -/* - * Copy the contents of the NIC's trace buffer to kernel memory. - */ -static void ace_dump_trace(struct ace_private *ap) -{ -#if 0 - if (!ap->trace_buf) - if (!(ap->trace_buf = kmalloc(ACE_TRACE_SIZE, GFP_KERNEL))) - return; -#endif -} - - -/* - * Load the standard rx ring. - * - * Loading rings is safe without holding the spin lock since this is - * done only before the device is enabled, thus no interrupts are - * generated and by the interrupt handler/tasklet handler. - */ -static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs) -{ - struct ace_private *ap = netdev_priv(dev); - struct ace_regs __iomem *regs = ap->regs; - short i, idx; - - - prefetchw(&ap->cur_rx_bufs); - - idx = ap->rx_std_skbprd; - - for (i = 0; i < nr_bufs; i++) { - struct sk_buff *skb; - struct rx_desc *rd; - dma_addr_t mapping; - - skb = netdev_alloc_skb_ip_align(dev, ACE_STD_BUFSIZE); - if (!skb) - break; - - mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), - offset_in_page(skb->data), - ACE_STD_BUFSIZE, - PCI_DMA_FROMDEVICE); - ap->skb->rx_std_skbuff[idx].skb = skb; - dma_unmap_addr_set(&ap->skb->rx_std_skbuff[idx], - mapping, mapping); - - rd = &ap->rx_std_ring[idx]; - set_aceaddr(&rd->addr, mapping); - rd->size = ACE_STD_BUFSIZE; - rd->idx = idx; - idx = (idx + 1) % RX_STD_RING_ENTRIES; - } - - if (!i) - goto error_out; - - atomic_add(i, &ap->cur_rx_bufs); - ap->rx_std_skbprd = idx; - - if (ACE_IS_TIGON_I(ap)) { - struct cmd cmd; - cmd.evt = C_SET_RX_PRD_IDX; - cmd.code = 0; - cmd.idx = ap->rx_std_skbprd; - ace_issue_cmd(regs, &cmd); - } else { - writel(idx, ®s->RxStdPrd); - wmb(); - } - - out: - clear_bit(0, &ap->std_refill_busy); - return; - - error_out: - printk(KERN_INFO "Out of memory when allocating " - "standard receive buffers\n"); - goto out; -} - - -static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs) -{ - struct ace_private *ap = netdev_priv(dev); - struct ace_regs __iomem *regs = ap->regs; - short i, idx; - - prefetchw(&ap->cur_mini_bufs); - - idx = ap->rx_mini_skbprd; - for (i = 0; i < nr_bufs; i++) { - struct sk_buff *skb; - struct rx_desc *rd; - dma_addr_t mapping; - - skb = netdev_alloc_skb_ip_align(dev, ACE_MINI_BUFSIZE); - if (!skb) - break; - - mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), - offset_in_page(skb->data), - ACE_MINI_BUFSIZE, - PCI_DMA_FROMDEVICE); - ap->skb->rx_mini_skbuff[idx].skb = skb; - dma_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx], - mapping, mapping); - - rd = &ap->rx_mini_ring[idx]; - set_aceaddr(&rd->addr, mapping); - rd->size = ACE_MINI_BUFSIZE; - rd->idx = idx; - idx = (idx + 1) % RX_MINI_RING_ENTRIES; - } - - if (!i) - goto error_out; - - atomic_add(i, &ap->cur_mini_bufs); - - ap->rx_mini_skbprd = idx; - - writel(idx, ®s->RxMiniPrd); - wmb(); - - out: - clear_bit(0, &ap->mini_refill_busy); - return; - error_out: - printk(KERN_INFO "Out of memory when allocating " - "mini receive buffers\n"); - goto out; -} - - -/* - * Load the jumbo rx ring, this may happen at any time if the MTU - * is changed to a value > 1500. - */ -static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs) -{ - struct ace_private *ap = netdev_priv(dev); - struct ace_regs __iomem *regs = ap->regs; - short i, idx; - - idx = ap->rx_jumbo_skbprd; - - for (i = 0; i < nr_bufs; i++) { - struct sk_buff *skb; - struct rx_desc *rd; - dma_addr_t mapping; - - skb = netdev_alloc_skb_ip_align(dev, ACE_JUMBO_BUFSIZE); - if (!skb) - break; - - mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), - offset_in_page(skb->data), - ACE_JUMBO_BUFSIZE, - PCI_DMA_FROMDEVICE); - ap->skb->rx_jumbo_skbuff[idx].skb = skb; - dma_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx], - mapping, mapping); - - rd = &ap->rx_jumbo_ring[idx]; - set_aceaddr(&rd->addr, mapping); - rd->size = ACE_JUMBO_BUFSIZE; - rd->idx = idx; - idx = (idx + 1) % RX_JUMBO_RING_ENTRIES; - } - - if (!i) - goto error_out; - - atomic_add(i, &ap->cur_jumbo_bufs); - ap->rx_jumbo_skbprd = idx; - - if (ACE_IS_TIGON_I(ap)) { - struct cmd cmd; - cmd.evt = C_SET_RX_JUMBO_PRD_IDX; - cmd.code = 0; - cmd.idx = ap->rx_jumbo_skbprd; - ace_issue_cmd(regs, &cmd); - } else { - writel(idx, ®s->RxJumboPrd); - wmb(); - } - - out: - clear_bit(0, &ap->jumbo_refill_busy); - return; - error_out: - if (net_ratelimit()) - printk(KERN_INFO "Out of memory when allocating " - "jumbo receive buffers\n"); - goto out; -} - - -/* - * All events are considered to be slow (RX/TX ints do not generate - * events) and are handled here, outside the main interrupt handler, - * to reduce the size of the handler. - */ -static u32 ace_handle_event(struct net_device *dev, u32 evtcsm, u32 evtprd) -{ - struct ace_private *ap; - - ap = netdev_priv(dev); - - while (evtcsm != evtprd) { - switch (ap->evt_ring[evtcsm].evt) { - case E_FW_RUNNING: - printk(KERN_INFO "%s: Firmware up and running\n", - ap->name); - ap->fw_running = 1; - wmb(); - break; - case E_STATS_UPDATED: - break; - case E_LNK_STATE: - { - u16 code = ap->evt_ring[evtcsm].code; - switch (code) { - case E_C_LINK_UP: - { - u32 state = readl(&ap->regs->GigLnkState); - printk(KERN_WARNING "%s: Optical link UP " - "(%s Duplex, Flow Control: %s%s)\n", - ap->name, - state & LNK_FULL_DUPLEX ? "Full":"Half", - state & LNK_TX_FLOW_CTL_Y ? "TX " : "", - state & LNK_RX_FLOW_CTL_Y ? "RX" : ""); - break; - } - case E_C_LINK_DOWN: - printk(KERN_WARNING "%s: Optical link DOWN\n", - ap->name); - break; - case E_C_LINK_10_100: - printk(KERN_WARNING "%s: 10/100BaseT link " - "UP\n", ap->name); - break; - default: - printk(KERN_ERR "%s: Unknown optical link " - "state %02x\n", ap->name, code); - } - break; - } - case E_ERROR: - switch(ap->evt_ring[evtcsm].code) { - case E_C_ERR_INVAL_CMD: - printk(KERN_ERR "%s: invalid command error\n", - ap->name); - break; - case E_C_ERR_UNIMP_CMD: - printk(KERN_ERR "%s: unimplemented command " - "error\n", ap->name); - break; - case E_C_ERR_BAD_CFG: - printk(KERN_ERR "%s: bad config error\n", - ap->name); - break; - default: - printk(KERN_ERR "%s: unknown error %02x\n", - ap->name, ap->evt_ring[evtcsm].code); - } - break; - case E_RESET_JUMBO_RNG: - { - int i; - for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) { - if (ap->skb->rx_jumbo_skbuff[i].skb) { - ap->rx_jumbo_ring[i].size = 0; - set_aceaddr(&ap->rx_jumbo_ring[i].addr, 0); - dev_kfree_skb(ap->skb->rx_jumbo_skbuff[i].skb); - ap->skb->rx_jumbo_skbuff[i].skb = NULL; - } - } - - if (ACE_IS_TIGON_I(ap)) { - struct cmd cmd; - cmd.evt = C_SET_RX_JUMBO_PRD_IDX; - cmd.code = 0; - cmd.idx = 0; - ace_issue_cmd(ap->regs, &cmd); - } else { - writel(0, &((ap->regs)->RxJumboPrd)); - wmb(); - } - - ap->jumbo = 0; - ap->rx_jumbo_skbprd = 0; - printk(KERN_INFO "%s: Jumbo ring flushed\n", - ap->name); - clear_bit(0, &ap->jumbo_refill_busy); - break; - } - default: - printk(KERN_ERR "%s: Unhandled event 0x%02x\n", - ap->name, ap->evt_ring[evtcsm].evt); - } - evtcsm = (evtcsm + 1) % EVT_RING_ENTRIES; - } - - return evtcsm; -} - - -static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm) -{ - struct ace_private *ap = netdev_priv(dev); - u32 idx; - int mini_count = 0, std_count = 0; - - idx = rxretcsm; - - prefetchw(&ap->cur_rx_bufs); - prefetchw(&ap->cur_mini_bufs); - - while (idx != rxretprd) { - struct ring_info *rip; - struct sk_buff *skb; - struct rx_desc *rxdesc, *retdesc; - u32 skbidx; - int bd_flags, desc_type, mapsize; - u16 csum; - - - /* make sure the rx descriptor isn't read before rxretprd */ - if (idx == rxretcsm) - rmb(); - - retdesc = &ap->rx_return_ring[idx]; - skbidx = retdesc->idx; - bd_flags = retdesc->flags; - desc_type = bd_flags & (BD_FLG_JUMBO | BD_FLG_MINI); - - switch(desc_type) { - /* - * Normal frames do not have any flags set - * - * Mini and normal frames arrive frequently, - * so use a local counter to avoid doing - * atomic operations for each packet arriving. - */ - case 0: - rip = &ap->skb->rx_std_skbuff[skbidx]; - mapsize = ACE_STD_BUFSIZE; - rxdesc = &ap->rx_std_ring[skbidx]; - std_count++; - break; - case BD_FLG_JUMBO: - rip = &ap->skb->rx_jumbo_skbuff[skbidx]; - mapsize = ACE_JUMBO_BUFSIZE; - rxdesc = &ap->rx_jumbo_ring[skbidx]; - atomic_dec(&ap->cur_jumbo_bufs); - break; - case BD_FLG_MINI: - rip = &ap->skb->rx_mini_skbuff[skbidx]; - mapsize = ACE_MINI_BUFSIZE; - rxdesc = &ap->rx_mini_ring[skbidx]; - mini_count++; - break; - default: - printk(KERN_INFO "%s: unknown frame type (0x%02x) " - "returned by NIC\n", dev->name, - retdesc->flags); - goto error; - } - - skb = rip->skb; - rip->skb = NULL; - pci_unmap_page(ap->pdev, - dma_unmap_addr(rip, mapping), - mapsize, - PCI_DMA_FROMDEVICE); - skb_put(skb, retdesc->size); - - /* - * Fly baby, fly! - */ - csum = retdesc->tcp_udp_csum; - - skb->protocol = eth_type_trans(skb, dev); - - /* - * Instead of forcing the poor tigon mips cpu to calculate - * pseudo hdr checksum, we do this ourselves. - */ - if (bd_flags & BD_FLG_TCP_UDP_SUM) { - skb->csum = htons(csum); - skb->ip_summed = CHECKSUM_COMPLETE; - } else { - skb_checksum_none_assert(skb); - } - - /* send it up */ - if ((bd_flags & BD_FLG_VLAN_TAG)) - __vlan_hwaccel_put_tag(skb, retdesc->vlan); - netif_rx(skb); - - dev->stats.rx_packets++; - dev->stats.rx_bytes += retdesc->size; - - idx = (idx + 1) % RX_RETURN_RING_ENTRIES; - } - - atomic_sub(std_count, &ap->cur_rx_bufs); - if (!ACE_IS_TIGON_I(ap)) - atomic_sub(mini_count, &ap->cur_mini_bufs); - - out: - /* - * According to the documentation RxRetCsm is obsolete with - * the 12.3.x Firmware - my Tigon I NICs seem to disagree! - */ - if (ACE_IS_TIGON_I(ap)) { - writel(idx, &ap->regs->RxRetCsm); - } - ap->cur_rx = idx; - - return; - error: - idx = rxretprd; - goto out; -} - - -static inline void ace_tx_int(struct net_device *dev, - u32 txcsm, u32 idx) -{ - struct ace_private *ap = netdev_priv(dev); - - do { - struct sk_buff *skb; - struct tx_ring_info *info; - - info = ap->skb->tx_skbuff + idx; - skb = info->skb; - - if (dma_unmap_len(info, maplen)) { - pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping), - dma_unmap_len(info, maplen), - PCI_DMA_TODEVICE); - dma_unmap_len_set(info, maplen, 0); - } - - if (skb) { - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; - dev_kfree_skb_irq(skb); - info->skb = NULL; - } - - idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); - } while (idx != txcsm); - - if (netif_queue_stopped(dev)) - netif_wake_queue(dev); - - wmb(); - ap->tx_ret_csm = txcsm; - - /* So... tx_ret_csm is advanced _after_ check for device wakeup. - * - * We could try to make it before. In this case we would get - * the following race condition: hard_start_xmit on other cpu - * enters after we advanced tx_ret_csm and fills space, - * which we have just freed, so that we make illegal device wakeup. - * There is no good way to workaround this (at entry - * to ace_start_xmit detects this condition and prevents - * ring corruption, but it is not a good workaround.) - * - * When tx_ret_csm is advanced after, we wake up device _only_ - * if we really have some space in ring (though the core doing - * hard_start_xmit can see full ring for some period and has to - * synchronize.) Superb. - * BUT! We get another subtle race condition. hard_start_xmit - * may think that ring is full between wakeup and advancing - * tx_ret_csm and will stop device instantly! It is not so bad. - * We are guaranteed that there is something in ring, so that - * the next irq will resume transmission. To speedup this we could - * mark descriptor, which closes ring with BD_FLG_COAL_NOW - * (see ace_start_xmit). - * - * Well, this dilemma exists in all lock-free devices. - * We, following scheme used in drivers by Donald Becker, - * select the least dangerous. - * --ANK - */ -} - - -static irqreturn_t ace_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = (struct net_device *)dev_id; - struct ace_private *ap = netdev_priv(dev); - struct ace_regs __iomem *regs = ap->regs; - u32 idx; - u32 txcsm, rxretcsm, rxretprd; - u32 evtcsm, evtprd; - - /* - * In case of PCI shared interrupts or spurious interrupts, - * we want to make sure it is actually our interrupt before - * spending any time in here. - */ - if (!(readl(®s->HostCtrl) & IN_INT)) - return IRQ_NONE; - - /* - * ACK intr now. Otherwise we will lose updates to rx_ret_prd, - * which happened _after_ rxretprd = *ap->rx_ret_prd; but before - * writel(0, ®s->Mb0Lo). - * - * "IRQ avoidance" recommended in docs applies to IRQs served - * threads and it is wrong even for that case. - */ - writel(0, ®s->Mb0Lo); - readl(®s->Mb0Lo); - - /* - * There is no conflict between transmit handling in - * start_xmit and receive processing, thus there is no reason - * to take a spin lock for RX handling. Wait until we start - * working on the other stuff - hey we don't need a spin lock - * anymore. - */ - rxretprd = *ap->rx_ret_prd; - rxretcsm = ap->cur_rx; - - if (rxretprd != rxretcsm) - ace_rx_int(dev, rxretprd, rxretcsm); - - txcsm = *ap->tx_csm; - idx = ap->tx_ret_csm; - - if (txcsm != idx) { - /* - * If each skb takes only one descriptor this check degenerates - * to identity, because new space has just been opened. - * But if skbs are fragmented we must check that this index - * update releases enough of space, otherwise we just - * wait for device to make more work. - */ - if (!tx_ring_full(ap, txcsm, ap->tx_prd)) - ace_tx_int(dev, txcsm, idx); - } - - evtcsm = readl(®s->EvtCsm); - evtprd = *ap->evt_prd; - - if (evtcsm != evtprd) { - evtcsm = ace_handle_event(dev, evtcsm, evtprd); - writel(evtcsm, ®s->EvtCsm); - } - - /* - * This has to go last in the interrupt handler and run with - * the spin lock released ... what lock? - */ - if (netif_running(dev)) { - int cur_size; - int run_tasklet = 0; - - cur_size = atomic_read(&ap->cur_rx_bufs); - if (cur_size < RX_LOW_STD_THRES) { - if ((cur_size < RX_PANIC_STD_THRES) && - !test_and_set_bit(0, &ap->std_refill_busy)) { -#ifdef DEBUG - printk("low on std buffers %i\n", cur_size); -#endif - ace_load_std_rx_ring(dev, - RX_RING_SIZE - cur_size); - } else - run_tasklet = 1; - } - - if (!ACE_IS_TIGON_I(ap)) { - cur_size = atomic_read(&ap->cur_mini_bufs); - if (cur_size < RX_LOW_MINI_THRES) { - if ((cur_size < RX_PANIC_MINI_THRES) && - !test_and_set_bit(0, - &ap->mini_refill_busy)) { -#ifdef DEBUG - printk("low on mini buffers %i\n", - cur_size); -#endif - ace_load_mini_rx_ring(dev, - RX_MINI_SIZE - cur_size); - } else - run_tasklet = 1; - } - } - - if (ap->jumbo) { - cur_size = atomic_read(&ap->cur_jumbo_bufs); - if (cur_size < RX_LOW_JUMBO_THRES) { - if ((cur_size < RX_PANIC_JUMBO_THRES) && - !test_and_set_bit(0, - &ap->jumbo_refill_busy)){ -#ifdef DEBUG - printk("low on jumbo buffers %i\n", - cur_size); -#endif - ace_load_jumbo_rx_ring(dev, - RX_JUMBO_SIZE - cur_size); - } else - run_tasklet = 1; - } - } - if (run_tasklet && !ap->tasklet_pending) { - ap->tasklet_pending = 1; - tasklet_schedule(&ap->ace_tasklet); - } - } - - return IRQ_HANDLED; -} - -static int ace_open(struct net_device *dev) -{ - struct ace_private *ap = netdev_priv(dev); - struct ace_regs __iomem *regs = ap->regs; - struct cmd cmd; - - if (!(ap->fw_running)) { - printk(KERN_WARNING "%s: Firmware not running!\n", dev->name); - return -EBUSY; - } - - writel(dev->mtu + ETH_HLEN + 4, ®s->IfMtu); - - cmd.evt = C_CLEAR_STATS; - cmd.code = 0; - cmd.idx = 0; - ace_issue_cmd(regs, &cmd); - - cmd.evt = C_HOST_STATE; - cmd.code = C_C_STACK_UP; - cmd.idx = 0; - ace_issue_cmd(regs, &cmd); - - if (ap->jumbo && - !test_and_set_bit(0, &ap->jumbo_refill_busy)) - ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE); - - if (dev->flags & IFF_PROMISC) { - cmd.evt = C_SET_PROMISC_MODE; - cmd.code = C_C_PROMISC_ENABLE; - cmd.idx = 0; - ace_issue_cmd(regs, &cmd); - - ap->promisc = 1; - }else - ap->promisc = 0; - ap->mcast_all = 0; - -#if 0 - cmd.evt = C_LNK_NEGOTIATION; - cmd.code = 0; - cmd.idx = 0; - ace_issue_cmd(regs, &cmd); -#endif - - netif_start_queue(dev); - - /* - * Setup the bottom half rx ring refill handler - */ - tasklet_init(&ap->ace_tasklet, ace_tasklet, (unsigned long)dev); - return 0; -} - - -static int ace_close(struct net_device *dev) -{ - struct ace_private *ap = netdev_priv(dev); - struct ace_regs __iomem *regs = ap->regs; - struct cmd cmd; - unsigned long flags; - short i; - - /* - * Without (or before) releasing irq and stopping hardware, this - * is an absolute non-sense, by the way. It will be reset instantly - * by the first irq. - */ - netif_stop_queue(dev); - - - if (ap->promisc) { - cmd.evt = C_SET_PROMISC_MODE; - cmd.code = C_C_PROMISC_DISABLE; - cmd.idx = 0; - ace_issue_cmd(regs, &cmd); - ap->promisc = 0; - } - - cmd.evt = C_HOST_STATE; - cmd.code = C_C_STACK_DOWN; - cmd.idx = 0; - ace_issue_cmd(regs, &cmd); - - tasklet_kill(&ap->ace_tasklet); - - /* - * Make sure one CPU is not processing packets while - * buffers are being released by another. - */ - - local_irq_save(flags); - ace_mask_irq(dev); - - for (i = 0; i < ACE_TX_RING_ENTRIES(ap); i++) { - struct sk_buff *skb; - struct tx_ring_info *info; - - info = ap->skb->tx_skbuff + i; - skb = info->skb; - - if (dma_unmap_len(info, maplen)) { - if (ACE_IS_TIGON_I(ap)) { - /* NB: TIGON_1 is special, tx_ring is in io space */ - struct tx_desc __iomem *tx; - tx = (__force struct tx_desc __iomem *) &ap->tx_ring[i]; - writel(0, &tx->addr.addrhi); - writel(0, &tx->addr.addrlo); - writel(0, &tx->flagsize); - } else - memset(ap->tx_ring + i, 0, - sizeof(struct tx_desc)); - pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping), - dma_unmap_len(info, maplen), - PCI_DMA_TODEVICE); - dma_unmap_len_set(info, maplen, 0); - } - if (skb) { - dev_kfree_skb(skb); - info->skb = NULL; - } - } - - if (ap->jumbo) { - cmd.evt = C_RESET_JUMBO_RNG; - cmd.code = 0; - cmd.idx = 0; - ace_issue_cmd(regs, &cmd); - } - - ace_unmask_irq(dev); - local_irq_restore(flags); - - return 0; -} - - -static inline dma_addr_t -ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb, - struct sk_buff *tail, u32 idx) -{ - dma_addr_t mapping; - struct tx_ring_info *info; - - mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), - offset_in_page(skb->data), - skb->len, PCI_DMA_TODEVICE); - - info = ap->skb->tx_skbuff + idx; - info->skb = tail; - dma_unmap_addr_set(info, mapping, mapping); - dma_unmap_len_set(info, maplen, skb->len); - return mapping; -} - - -static inline void -ace_load_tx_bd(struct ace_private *ap, struct tx_desc *desc, u64 addr, - u32 flagsize, u32 vlan_tag) -{ -#if !USE_TX_COAL_NOW - flagsize &= ~BD_FLG_COAL_NOW; -#endif - - if (ACE_IS_TIGON_I(ap)) { - struct tx_desc __iomem *io = (__force struct tx_desc __iomem *) desc; - writel(addr >> 32, &io->addr.addrhi); - writel(addr & 0xffffffff, &io->addr.addrlo); - writel(flagsize, &io->flagsize); - writel(vlan_tag, &io->vlanres); - } else { - desc->addr.addrhi = addr >> 32; - desc->addr.addrlo = addr; - desc->flagsize = flagsize; - desc->vlanres = vlan_tag; - } -} - - -static netdev_tx_t ace_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - struct ace_private *ap = netdev_priv(dev); - struct ace_regs __iomem *regs = ap->regs; - struct tx_desc *desc; - u32 idx, flagsize; - unsigned long maxjiff = jiffies + 3*HZ; - -restart: - idx = ap->tx_prd; - - if (tx_ring_full(ap, ap->tx_ret_csm, idx)) - goto overflow; - - if (!skb_shinfo(skb)->nr_frags) { - dma_addr_t mapping; - u32 vlan_tag = 0; - - mapping = ace_map_tx_skb(ap, skb, skb, idx); - flagsize = (skb->len << 16) | (BD_FLG_END); - if (skb->ip_summed == CHECKSUM_PARTIAL) - flagsize |= BD_FLG_TCP_UDP_SUM; - if (vlan_tx_tag_present(skb)) { - flagsize |= BD_FLG_VLAN_TAG; - vlan_tag = vlan_tx_tag_get(skb); - } - desc = ap->tx_ring + idx; - idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); - - /* Look at ace_tx_int for explanations. */ - if (tx_ring_full(ap, ap->tx_ret_csm, idx)) - flagsize |= BD_FLG_COAL_NOW; - - ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag); - } else { - dma_addr_t mapping; - u32 vlan_tag = 0; - int i, len = 0; - - mapping = ace_map_tx_skb(ap, skb, NULL, idx); - flagsize = (skb_headlen(skb) << 16); - if (skb->ip_summed == CHECKSUM_PARTIAL) - flagsize |= BD_FLG_TCP_UDP_SUM; - if (vlan_tx_tag_present(skb)) { - flagsize |= BD_FLG_VLAN_TAG; - vlan_tag = vlan_tx_tag_get(skb); - } - - ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag); - - idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); - - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - struct tx_ring_info *info; - - len += frag->size; - info = ap->skb->tx_skbuff + idx; - desc = ap->tx_ring + idx; - - mapping = pci_map_page(ap->pdev, frag->page, - frag->page_offset, frag->size, - PCI_DMA_TODEVICE); - - flagsize = (frag->size << 16); - if (skb->ip_summed == CHECKSUM_PARTIAL) - flagsize |= BD_FLG_TCP_UDP_SUM; - idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); - - if (i == skb_shinfo(skb)->nr_frags - 1) { - flagsize |= BD_FLG_END; - if (tx_ring_full(ap, ap->tx_ret_csm, idx)) - flagsize |= BD_FLG_COAL_NOW; - - /* - * Only the last fragment frees - * the skb! - */ - info->skb = skb; - } else { - info->skb = NULL; - } - dma_unmap_addr_set(info, mapping, mapping); - dma_unmap_len_set(info, maplen, frag->size); - ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag); - } - } - - wmb(); - ap->tx_prd = idx; - ace_set_txprd(regs, ap, idx); - - if (flagsize & BD_FLG_COAL_NOW) { - netif_stop_queue(dev); - - /* - * A TX-descriptor producer (an IRQ) might have gotten - * between, making the ring free again. Since xmit is - * serialized, this is the only situation we have to - * re-test. - */ - if (!tx_ring_full(ap, ap->tx_ret_csm, idx)) - netif_wake_queue(dev); - } - - return NETDEV_TX_OK; - -overflow: - /* - * This race condition is unavoidable with lock-free drivers. - * We wake up the queue _before_ tx_prd is advanced, so that we can - * enter hard_start_xmit too early, while tx ring still looks closed. - * This happens ~1-4 times per 100000 packets, so that we can allow - * to loop syncing to other CPU. Probably, we need an additional - * wmb() in ace_tx_intr as well. - * - * Note that this race is relieved by reserving one more entry - * in tx ring than it is necessary (see original non-SG driver). - * However, with SG we need to reserve 2*MAX_SKB_FRAGS+1, which - * is already overkill. - * - * Alternative is to return with 1 not throttling queue. In this - * case loop becomes longer, no more useful effects. - */ - if (time_before(jiffies, maxjiff)) { - barrier(); - cpu_relax(); - goto restart; - } - - /* The ring is stuck full. */ - printk(KERN_WARNING "%s: Transmit ring stuck full\n", dev->name); - return NETDEV_TX_BUSY; -} - - -static int ace_change_mtu(struct net_device *dev, int new_mtu) -{ - struct ace_private *ap = netdev_priv(dev); - struct ace_regs __iomem *regs = ap->regs; - - if (new_mtu > ACE_JUMBO_MTU) - return -EINVAL; - - writel(new_mtu + ETH_HLEN + 4, ®s->IfMtu); - dev->mtu = new_mtu; - - if (new_mtu > ACE_STD_MTU) { - if (!(ap->jumbo)) { - printk(KERN_INFO "%s: Enabling Jumbo frame " - "support\n", dev->name); - ap->jumbo = 1; - if (!test_and_set_bit(0, &ap->jumbo_refill_busy)) - ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE); - ace_set_rxtx_parms(dev, 1); - } - } else { - while (test_and_set_bit(0, &ap->jumbo_refill_busy)); - ace_sync_irq(dev->irq); - ace_set_rxtx_parms(dev, 0); - if (ap->jumbo) { - struct cmd cmd; - - cmd.evt = C_RESET_JUMBO_RNG; - cmd.code = 0; - cmd.idx = 0; - ace_issue_cmd(regs, &cmd); - } - } - - return 0; -} - -static int ace_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) -{ - struct ace_private *ap = netdev_priv(dev); - struct ace_regs __iomem *regs = ap->regs; - u32 link; - - memset(ecmd, 0, sizeof(struct ethtool_cmd)); - ecmd->supported = - (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | - SUPPORTED_Autoneg | SUPPORTED_FIBRE); - - ecmd->port = PORT_FIBRE; - ecmd->transceiver = XCVR_INTERNAL; - - link = readl(®s->GigLnkState); - if (link & LNK_1000MB) - ethtool_cmd_speed_set(ecmd, SPEED_1000); - else { - link = readl(®s->FastLnkState); - if (link & LNK_100MB) - ethtool_cmd_speed_set(ecmd, SPEED_100); - else if (link & LNK_10MB) - ethtool_cmd_speed_set(ecmd, SPEED_10); - else - ethtool_cmd_speed_set(ecmd, 0); - } - if (link & LNK_FULL_DUPLEX) - ecmd->duplex = DUPLEX_FULL; - else - ecmd->duplex = DUPLEX_HALF; - - if (link & LNK_NEGOTIATE) - ecmd->autoneg = AUTONEG_ENABLE; - else - ecmd->autoneg = AUTONEG_DISABLE; - -#if 0 - /* - * Current struct ethtool_cmd is insufficient - */ - ecmd->trace = readl(®s->TuneTrace); - - ecmd->txcoal = readl(®s->TuneTxCoalTicks); - ecmd->rxcoal = readl(®s->TuneRxCoalTicks); -#endif - ecmd->maxtxpkt = readl(®s->TuneMaxTxDesc); - ecmd->maxrxpkt = readl(®s->TuneMaxRxDesc); - - return 0; -} - -static int ace_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) -{ - struct ace_private *ap = netdev_priv(dev); - struct ace_regs __iomem *regs = ap->regs; - u32 link, speed; - - link = readl(®s->GigLnkState); - if (link & LNK_1000MB) - speed = SPEED_1000; - else { - link = readl(®s->FastLnkState); - if (link & LNK_100MB) - speed = SPEED_100; - else if (link & LNK_10MB) - speed = SPEED_10; - else - speed = SPEED_100; - } - - link = LNK_ENABLE | LNK_1000MB | LNK_100MB | LNK_10MB | - LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL; - if (!ACE_IS_TIGON_I(ap)) - link |= LNK_TX_FLOW_CTL_Y; - if (ecmd->autoneg == AUTONEG_ENABLE) - link |= LNK_NEGOTIATE; - if (ethtool_cmd_speed(ecmd) != speed) { - link &= ~(LNK_1000MB | LNK_100MB | LNK_10MB); - switch (ethtool_cmd_speed(ecmd)) { - case SPEED_1000: - link |= LNK_1000MB; - break; - case SPEED_100: - link |= LNK_100MB; - break; - case SPEED_10: - link |= LNK_10MB; - break; - } - } - - if (ecmd->duplex == DUPLEX_FULL) - link |= LNK_FULL_DUPLEX; - - if (link != ap->link) { - struct cmd cmd; - printk(KERN_INFO "%s: Renegotiating link state\n", - dev->name); - - ap->link = link; - writel(link, ®s->TuneLink); - if (!ACE_IS_TIGON_I(ap)) - writel(link, ®s->TuneFastLink); - wmb(); - - cmd.evt = C_LNK_NEGOTIATION; - cmd.code = 0; - cmd.idx = 0; - ace_issue_cmd(regs, &cmd); - } - return 0; -} - -static void ace_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - struct ace_private *ap = netdev_priv(dev); - - strlcpy(info->driver, "acenic", sizeof(info->driver)); - snprintf(info->version, sizeof(info->version), "%i.%i.%i", - ap->firmware_major, ap->firmware_minor, - ap->firmware_fix); - - if (ap->pdev) - strlcpy(info->bus_info, pci_name(ap->pdev), - sizeof(info->bus_info)); - -} - -/* - * Set the hardware MAC address. - */ -static int ace_set_mac_addr(struct net_device *dev, void *p) -{ - struct ace_private *ap = netdev_priv(dev); - struct ace_regs __iomem *regs = ap->regs; - struct sockaddr *addr=p; - u8 *da; - struct cmd cmd; - - if(netif_running(dev)) - return -EBUSY; - - memcpy(dev->dev_addr, addr->sa_data,dev->addr_len); - - da = (u8 *)dev->dev_addr; - - writel(da[0] << 8 | da[1], ®s->MacAddrHi); - writel((da[2] << 24) | (da[3] << 16) | (da[4] << 8) | da[5], - ®s->MacAddrLo); - - cmd.evt = C_SET_MAC_ADDR; - cmd.code = 0; - cmd.idx = 0; - ace_issue_cmd(regs, &cmd); - - return 0; -} - - -static void ace_set_multicast_list(struct net_device *dev) -{ - struct ace_private *ap = netdev_priv(dev); - struct ace_regs __iomem *regs = ap->regs; - struct cmd cmd; - - if ((dev->flags & IFF_ALLMULTI) && !(ap->mcast_all)) { - cmd.evt = C_SET_MULTICAST_MODE; - cmd.code = C_C_MCAST_ENABLE; - cmd.idx = 0; - ace_issue_cmd(regs, &cmd); - ap->mcast_all = 1; - } else if (ap->mcast_all) { - cmd.evt = C_SET_MULTICAST_MODE; - cmd.code = C_C_MCAST_DISABLE; - cmd.idx = 0; - ace_issue_cmd(regs, &cmd); - ap->mcast_all = 0; - } - - if ((dev->flags & IFF_PROMISC) && !(ap->promisc)) { - cmd.evt = C_SET_PROMISC_MODE; - cmd.code = C_C_PROMISC_ENABLE; - cmd.idx = 0; - ace_issue_cmd(regs, &cmd); - ap->promisc = 1; - }else if (!(dev->flags & IFF_PROMISC) && (ap->promisc)) { - cmd.evt = C_SET_PROMISC_MODE; - cmd.code = C_C_PROMISC_DISABLE; - cmd.idx = 0; - ace_issue_cmd(regs, &cmd); - ap->promisc = 0; - } - - /* - * For the time being multicast relies on the upper layers - * filtering it properly. The Firmware does not allow one to - * set the entire multicast list at a time and keeping track of - * it here is going to be messy. - */ - if (!netdev_mc_empty(dev) && !ap->mcast_all) { - cmd.evt = C_SET_MULTICAST_MODE; - cmd.code = C_C_MCAST_ENABLE; - cmd.idx = 0; - ace_issue_cmd(regs, &cmd); - }else if (!ap->mcast_all) { - cmd.evt = C_SET_MULTICAST_MODE; - cmd.code = C_C_MCAST_DISABLE; - cmd.idx = 0; - ace_issue_cmd(regs, &cmd); - } -} - - -static struct net_device_stats *ace_get_stats(struct net_device *dev) -{ - struct ace_private *ap = netdev_priv(dev); - struct ace_mac_stats __iomem *mac_stats = - (struct ace_mac_stats __iomem *)ap->regs->Stats; - - dev->stats.rx_missed_errors = readl(&mac_stats->drop_space); - dev->stats.multicast = readl(&mac_stats->kept_mc); - dev->stats.collisions = readl(&mac_stats->coll); - - return &dev->stats; -} - - -static void __devinit ace_copy(struct ace_regs __iomem *regs, const __be32 *src, - u32 dest, int size) -{ - void __iomem *tdest; - short tsize, i; - - if (size <= 0) - return; - - while (size > 0) { - tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1), - min_t(u32, size, ACE_WINDOW_SIZE)); - tdest = (void __iomem *) ®s->Window + - (dest & (ACE_WINDOW_SIZE - 1)); - writel(dest & ~(ACE_WINDOW_SIZE - 1), ®s->WinBase); - for (i = 0; i < (tsize / 4); i++) { - /* Firmware is big-endian */ - writel(be32_to_cpup(src), tdest); - src++; - tdest += 4; - dest += 4; - size -= 4; - } - } -} - - -static void __devinit ace_clear(struct ace_regs __iomem *regs, u32 dest, int size) -{ - void __iomem *tdest; - short tsize = 0, i; - - if (size <= 0) - return; - - while (size > 0) { - tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1), - min_t(u32, size, ACE_WINDOW_SIZE)); - tdest = (void __iomem *) ®s->Window + - (dest & (ACE_WINDOW_SIZE - 1)); - writel(dest & ~(ACE_WINDOW_SIZE - 1), ®s->WinBase); - - for (i = 0; i < (tsize / 4); i++) { - writel(0, tdest + i*4); - } - - dest += tsize; - size -= tsize; - } -} - - -/* - * Download the firmware into the SRAM on the NIC - * - * This operation requires the NIC to be halted and is performed with - * interrupts disabled and with the spinlock hold. - */ -static int __devinit ace_load_firmware(struct net_device *dev) -{ - const struct firmware *fw; - const char *fw_name = "acenic/tg2.bin"; - struct ace_private *ap = netdev_priv(dev); - struct ace_regs __iomem *regs = ap->regs; - const __be32 *fw_data; - u32 load_addr; - int ret; - - if (!(readl(®s->CpuCtrl) & CPU_HALTED)) { - printk(KERN_ERR "%s: trying to download firmware while the " - "CPU is running!\n", ap->name); - return -EFAULT; - } - - if (ACE_IS_TIGON_I(ap)) - fw_name = "acenic/tg1.bin"; - - ret = request_firmware(&fw, fw_name, &ap->pdev->dev); - if (ret) { - printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n", - ap->name, fw_name); - return ret; - } - - fw_data = (void *)fw->data; - - /* Firmware blob starts with version numbers, followed by - load and start address. Remainder is the blob to be loaded - contiguously from load address. We don't bother to represent - the BSS/SBSS sections any more, since we were clearing the - whole thing anyway. */ - ap->firmware_major = fw->data[0]; - ap->firmware_minor = fw->data[1]; - ap->firmware_fix = fw->data[2]; - - ap->firmware_start = be32_to_cpu(fw_data[1]); - if (ap->firmware_start < 0x4000 || ap->firmware_start >= 0x80000) { - printk(KERN_ERR "%s: bogus load address %08x in \"%s\"\n", - ap->name, ap->firmware_start, fw_name); - ret = -EINVAL; - goto out; - } - - load_addr = be32_to_cpu(fw_data[2]); - if (load_addr < 0x4000 || load_addr >= 0x80000) { - printk(KERN_ERR "%s: bogus load address %08x in \"%s\"\n", - ap->name, load_addr, fw_name); - ret = -EINVAL; - goto out; - } - - /* - * Do not try to clear more than 512KiB or we end up seeing - * funny things on NICs with only 512KiB SRAM - */ - ace_clear(regs, 0x2000, 0x80000-0x2000); - ace_copy(regs, &fw_data[3], load_addr, fw->size-12); - out: - release_firmware(fw); - return ret; -} - - -/* - * The eeprom on the AceNIC is an Atmel i2c EEPROM. - * - * Accessing the EEPROM is `interesting' to say the least - don't read - * this code right after dinner. - * - * This is all about black magic and bit-banging the device .... I - * wonder in what hospital they have put the guy who designed the i2c - * specs. - * - * Oh yes, this is only the beginning! - * - * Thanks to Stevarino Webinski for helping tracking down the bugs in the - * code i2c readout code by beta testing all my hacks. - */ -static void __devinit eeprom_start(struct ace_regs __iomem *regs) -{ - u32 local; - - readl(®s->LocalCtrl); - udelay(ACE_SHORT_DELAY); - local = readl(®s->LocalCtrl); - local |= EEPROM_DATA_OUT | EEPROM_WRITE_ENABLE; - writel(local, ®s->LocalCtrl); - readl(®s->LocalCtrl); - mb(); - udelay(ACE_SHORT_DELAY); - local |= EEPROM_CLK_OUT; - writel(local, ®s->LocalCtrl); - readl(®s->LocalCtrl); - mb(); - udelay(ACE_SHORT_DELAY); - local &= ~EEPROM_DATA_OUT; - writel(local, ®s->LocalCtrl); - readl(®s->LocalCtrl); - mb(); - udelay(ACE_SHORT_DELAY); - local &= ~EEPROM_CLK_OUT; - writel(local, ®s->LocalCtrl); - readl(®s->LocalCtrl); - mb(); -} - - -static void __devinit eeprom_prep(struct ace_regs __iomem *regs, u8 magic) -{ - short i; - u32 local; - - udelay(ACE_SHORT_DELAY); - local = readl(®s->LocalCtrl); - local &= ~EEPROM_DATA_OUT; - local |= EEPROM_WRITE_ENABLE; - writel(local, ®s->LocalCtrl); - readl(®s->LocalCtrl); - mb(); - - for (i = 0; i < 8; i++, magic <<= 1) { - udelay(ACE_SHORT_DELAY); - if (magic & 0x80) - local |= EEPROM_DATA_OUT; - else - local &= ~EEPROM_DATA_OUT; - writel(local, ®s->LocalCtrl); - readl(®s->LocalCtrl); - mb(); - - udelay(ACE_SHORT_DELAY); - local |= EEPROM_CLK_OUT; - writel(local, ®s->LocalCtrl); - readl(®s->LocalCtrl); - mb(); - udelay(ACE_SHORT_DELAY); - local &= ~(EEPROM_CLK_OUT | EEPROM_DATA_OUT); - writel(local, ®s->LocalCtrl); - readl(®s->LocalCtrl); - mb(); - } -} - - -static int __devinit eeprom_check_ack(struct ace_regs __iomem *regs) -{ - int state; - u32 local; - - local = readl(®s->LocalCtrl); - local &= ~EEPROM_WRITE_ENABLE; - writel(local, ®s->LocalCtrl); - readl(®s->LocalCtrl); - mb(); - udelay(ACE_LONG_DELAY); - local |= EEPROM_CLK_OUT; - writel(local, ®s->LocalCtrl); - readl(®s->LocalCtrl); - mb(); - udelay(ACE_SHORT_DELAY); - /* sample data in middle of high clk */ - state = (readl(®s->LocalCtrl) & EEPROM_DATA_IN) != 0; - udelay(ACE_SHORT_DELAY); - mb(); - writel(readl(®s->LocalCtrl) & ~EEPROM_CLK_OUT, ®s->LocalCtrl); - readl(®s->LocalCtrl); - mb(); - - return state; -} - - -static void __devinit eeprom_stop(struct ace_regs __iomem *regs) -{ - u32 local; - - udelay(ACE_SHORT_DELAY); - local = readl(®s->LocalCtrl); - local |= EEPROM_WRITE_ENABLE; - writel(local, ®s->LocalCtrl); - readl(®s->LocalCtrl); - mb(); - udelay(ACE_SHORT_DELAY); - local &= ~EEPROM_DATA_OUT; - writel(local, ®s->LocalCtrl); - readl(®s->LocalCtrl); - mb(); - udelay(ACE_SHORT_DELAY); - local |= EEPROM_CLK_OUT; - writel(local, ®s->LocalCtrl); - readl(®s->LocalCtrl); - mb(); - udelay(ACE_SHORT_DELAY); - local |= EEPROM_DATA_OUT; - writel(local, ®s->LocalCtrl); - readl(®s->LocalCtrl); - mb(); - udelay(ACE_LONG_DELAY); - local &= ~EEPROM_CLK_OUT; - writel(local, ®s->LocalCtrl); - mb(); -} - - -/* - * Read a whole byte from the EEPROM. - */ -static int __devinit read_eeprom_byte(struct net_device *dev, - unsigned long offset) -{ - struct ace_private *ap = netdev_priv(dev); - struct ace_regs __iomem *regs = ap->regs; - unsigned long flags; - u32 local; - int result = 0; - short i; - - /* - * Don't take interrupts on this CPU will bit banging - * the %#%#@$ I2C device - */ - local_irq_save(flags); - - eeprom_start(regs); - - eeprom_prep(regs, EEPROM_WRITE_SELECT); - if (eeprom_check_ack(regs)) { - local_irq_restore(flags); - printk(KERN_ERR "%s: Unable to sync eeprom\n", ap->name); - result = -EIO; - goto eeprom_read_error; - } - - eeprom_prep(regs, (offset >> 8) & 0xff); - if (eeprom_check_ack(regs)) { - local_irq_restore(flags); - printk(KERN_ERR "%s: Unable to set address byte 0\n", - ap->name); - result = -EIO; - goto eeprom_read_error; - } - - eeprom_prep(regs, offset & 0xff); - if (eeprom_check_ack(regs)) { - local_irq_restore(flags); - printk(KERN_ERR "%s: Unable to set address byte 1\n", - ap->name); - result = -EIO; - goto eeprom_read_error; - } - - eeprom_start(regs); - eeprom_prep(regs, EEPROM_READ_SELECT); - if (eeprom_check_ack(regs)) { - local_irq_restore(flags); - printk(KERN_ERR "%s: Unable to set READ_SELECT\n", - ap->name); - result = -EIO; - goto eeprom_read_error; - } - - for (i = 0; i < 8; i++) { - local = readl(®s->LocalCtrl); - local &= ~EEPROM_WRITE_ENABLE; - writel(local, ®s->LocalCtrl); - readl(®s->LocalCtrl); - udelay(ACE_LONG_DELAY); - mb(); - local |= EEPROM_CLK_OUT; - writel(local, ®s->LocalCtrl); - readl(®s->LocalCtrl); - mb(); - udelay(ACE_SHORT_DELAY); - /* sample data mid high clk */ - result = (result << 1) | - ((readl(®s->LocalCtrl) & EEPROM_DATA_IN) != 0); - udelay(ACE_SHORT_DELAY); - mb(); - local = readl(®s->LocalCtrl); - local &= ~EEPROM_CLK_OUT; - writel(local, ®s->LocalCtrl); - readl(®s->LocalCtrl); - udelay(ACE_SHORT_DELAY); - mb(); - if (i == 7) { - local |= EEPROM_WRITE_ENABLE; - writel(local, ®s->LocalCtrl); - readl(®s->LocalCtrl); - mb(); - udelay(ACE_SHORT_DELAY); - } - } - - local |= EEPROM_DATA_OUT; - writel(local, ®s->LocalCtrl); - readl(®s->LocalCtrl); - mb(); - udelay(ACE_SHORT_DELAY); - writel(readl(®s->LocalCtrl) | EEPROM_CLK_OUT, ®s->LocalCtrl); - readl(®s->LocalCtrl); - udelay(ACE_LONG_DELAY); - writel(readl(®s->LocalCtrl) & ~EEPROM_CLK_OUT, ®s->LocalCtrl); - readl(®s->LocalCtrl); - mb(); - udelay(ACE_SHORT_DELAY); - eeprom_stop(regs); - - local_irq_restore(flags); - out: - return result; - - eeprom_read_error: - printk(KERN_ERR "%s: Unable to read eeprom byte 0x%02lx\n", - ap->name, offset); - goto out; -} diff --git a/drivers/net/acenic.h b/drivers/net/acenic.h deleted file mode 100644 index 51c486c..0000000 --- a/drivers/net/acenic.h +++ /dev/null @@ -1,790 +0,0 @@ -#ifndef _ACENIC_H_ -#define _ACENIC_H_ -#include - - -/* - * Generate TX index update each time, when TX ring is closed. - * Normally, this is not useful, because results in more dma (and irqs - * without TX_COAL_INTS_ONLY). - */ -#define USE_TX_COAL_NOW 0 - -/* - * Addressing: - * - * The Tigon uses 64-bit host addresses, regardless of their actual - * length, and it expects a big-endian format. For 32 bit systems the - * upper 32 bits of the address are simply ignored (zero), however for - * little endian 64 bit systems (Alpha) this looks strange with the - * two parts of the address word being swapped. - * - * The addresses are split in two 32 bit words for all architectures - * as some of them are in PCI shared memory and it is necessary to use - * readl/writel to access them. - * - * The addressing code is derived from Pete Wyckoff's work, but - * modified to deal properly with readl/writel usage. - */ - -struct ace_regs { - u32 pad0[16]; /* PCI control registers */ - - u32 HostCtrl; /* 0x40 */ - u32 LocalCtrl; - - u32 pad1[2]; - - u32 MiscCfg; /* 0x50 */ - - u32 pad2[2]; - - u32 PciState; - - u32 pad3[2]; /* 0x60 */ - - u32 WinBase; - u32 WinData; - - u32 pad4[12]; /* 0x70 */ - - u32 DmaWriteState; /* 0xa0 */ - u32 pad5[3]; - u32 DmaReadState; /* 0xb0 */ - - u32 pad6[26]; - - u32 AssistState; - - u32 pad7[8]; /* 0x120 */ - - u32 CpuCtrl; /* 0x140 */ - u32 Pc; - - u32 pad8[3]; - - u32 SramAddr; /* 0x154 */ - u32 SramData; - - u32 pad9[49]; - - u32 MacRxState; /* 0x220 */ - - u32 pad10[7]; - - u32 CpuBCtrl; /* 0x240 */ - u32 PcB; - - u32 pad11[3]; - - u32 SramBAddr; /* 0x254 */ - u32 SramBData; - - u32 pad12[105]; - - u32 pad13[32]; /* 0x400 */ - u32 Stats[32]; - - u32 Mb0Hi; /* 0x500 */ - u32 Mb0Lo; - u32 Mb1Hi; - u32 CmdPrd; - u32 Mb2Hi; - u32 TxPrd; - u32 Mb3Hi; - u32 RxStdPrd; - u32 Mb4Hi; - u32 RxJumboPrd; - u32 Mb5Hi; - u32 RxMiniPrd; - u32 Mb6Hi; - u32 Mb6Lo; - u32 Mb7Hi; - u32 Mb7Lo; - u32 Mb8Hi; - u32 Mb8Lo; - u32 Mb9Hi; - u32 Mb9Lo; - u32 MbAHi; - u32 MbALo; - u32 MbBHi; - u32 MbBLo; - u32 MbCHi; - u32 MbCLo; - u32 MbDHi; - u32 MbDLo; - u32 MbEHi; - u32 MbELo; - u32 MbFHi; - u32 MbFLo; - - u32 pad14[32]; - - u32 MacAddrHi; /* 0x600 */ - u32 MacAddrLo; - u32 InfoPtrHi; - u32 InfoPtrLo; - u32 MultiCastHi; /* 0x610 */ - u32 MultiCastLo; - u32 ModeStat; - u32 DmaReadCfg; - u32 DmaWriteCfg; /* 0x620 */ - u32 TxBufRat; - u32 EvtCsm; - u32 CmdCsm; - u32 TuneRxCoalTicks;/* 0x630 */ - u32 TuneTxCoalTicks; - u32 TuneStatTicks; - u32 TuneMaxTxDesc; - u32 TuneMaxRxDesc; /* 0x640 */ - u32 TuneTrace; - u32 TuneLink; - u32 TuneFastLink; - u32 TracePtr; /* 0x650 */ - u32 TraceStrt; - u32 TraceLen; - u32 IfIdx; - u32 IfMtu; /* 0x660 */ - u32 MaskInt; - u32 GigLnkState; - u32 FastLnkState; - u32 pad16[4]; /* 0x670 */ - u32 RxRetCsm; /* 0x680 */ - - u32 pad17[31]; - - u32 CmdRng[64]; /* 0x700 */ - u32 Window[0x200]; -}; - - -typedef struct { - u32 addrhi; - u32 addrlo; -} aceaddr; - - -#define ACE_WINDOW_SIZE 0x800 - -#define ACE_JUMBO_MTU 9000 -#define ACE_STD_MTU 1500 - -#define ACE_TRACE_SIZE 0x8000 - -/* - * Host control register bits. - */ - -#define IN_INT 0x01 -#define CLR_INT 0x02 -#define HW_RESET 0x08 -#define BYTE_SWAP 0x10 -#define WORD_SWAP 0x20 -#define MASK_INTS 0x40 - -/* - * Local control register bits. - */ - -#define EEPROM_DATA_IN 0x800000 -#define EEPROM_DATA_OUT 0x400000 -#define EEPROM_WRITE_ENABLE 0x200000 -#define EEPROM_CLK_OUT 0x100000 - -#define EEPROM_BASE 0xa0000000 - -#define EEPROM_WRITE_SELECT 0xa0 -#define EEPROM_READ_SELECT 0xa1 - -#define SRAM_BANK_512K 0x200 - - -/* - * udelay() values for when clocking the eeprom - */ -#define ACE_SHORT_DELAY 2 -#define ACE_LONG_DELAY 4 - - -/* - * Misc Config bits - */ - -#define SYNC_SRAM_TIMING 0x100000 - - -/* - * CPU state bits. - */ - -#define CPU_RESET 0x01 -#define CPU_TRACE 0x02 -#define CPU_PROM_FAILED 0x10 -#define CPU_HALT 0x00010000 -#define CPU_HALTED 0xffff0000 - - -/* - * PCI State bits. - */ - -#define DMA_READ_MAX_4 0x04 -#define DMA_READ_MAX_16 0x08 -#define DMA_READ_MAX_32 0x0c -#define DMA_READ_MAX_64 0x10 -#define DMA_READ_MAX_128 0x14 -#define DMA_READ_MAX_256 0x18 -#define DMA_READ_MAX_1K 0x1c -#define DMA_WRITE_MAX_4 0x20 -#define DMA_WRITE_MAX_16 0x40 -#define DMA_WRITE_MAX_32 0x60 -#define DMA_WRITE_MAX_64 0x80 -#define DMA_WRITE_MAX_128 0xa0 -#define DMA_WRITE_MAX_256 0xc0 -#define DMA_WRITE_MAX_1K 0xe0 -#define DMA_READ_WRITE_MASK 0xfc -#define MEM_READ_MULTIPLE 0x00020000 -#define PCI_66MHZ 0x00080000 -#define PCI_32BIT 0x00100000 -#define DMA_WRITE_ALL_ALIGN 0x00800000 -#define READ_CMD_MEM 0x06000000 -#define WRITE_CMD_MEM 0x70000000 - - -/* - * Mode status - */ - -#define ACE_BYTE_SWAP_BD 0x02 -#define ACE_WORD_SWAP_BD 0x04 /* not actually used */ -#define ACE_WARN 0x08 -#define ACE_BYTE_SWAP_DMA 0x10 -#define ACE_NO_JUMBO_FRAG 0x200 -#define ACE_FATAL 0x40000000 - - -/* - * DMA config - */ - -#define DMA_THRESH_1W 0x10 -#define DMA_THRESH_2W 0x20 -#define DMA_THRESH_4W 0x40 -#define DMA_THRESH_8W 0x80 -#define DMA_THRESH_16W 0x100 -#define DMA_THRESH_32W 0x0 /* not described in doc, but exists. */ - - -/* - * Tuning parameters - */ - -#define TICKS_PER_SEC 1000000 - - -/* - * Link bits - */ - -#define LNK_PREF 0x00008000 -#define LNK_10MB 0x00010000 -#define LNK_100MB 0x00020000 -#define LNK_1000MB 0x00040000 -#define LNK_FULL_DUPLEX 0x00080000 -#define LNK_HALF_DUPLEX 0x00100000 -#define LNK_TX_FLOW_CTL_Y 0x00200000 -#define LNK_NEG_ADVANCED 0x00400000 -#define LNK_RX_FLOW_CTL_Y 0x00800000 -#define LNK_NIC 0x01000000 -#define LNK_JAM 0x02000000 -#define LNK_JUMBO 0x04000000 -#define LNK_ALTEON 0x08000000 -#define LNK_NEG_FCTL 0x10000000 -#define LNK_NEGOTIATE 0x20000000 -#define LNK_ENABLE 0x40000000 -#define LNK_UP 0x80000000 - - -/* - * Event definitions - */ - -#define EVT_RING_ENTRIES 256 -#define EVT_RING_SIZE (EVT_RING_ENTRIES * sizeof(struct event)) - -struct event { -#ifdef __LITTLE_ENDIAN_BITFIELD - u32 idx:12; - u32 code:12; - u32 evt:8; -#else - u32 evt:8; - u32 code:12; - u32 idx:12; -#endif - u32 pad; -}; - - -/* - * Events - */ - -#define E_FW_RUNNING 0x01 -#define E_STATS_UPDATED 0x04 - -#define E_STATS_UPDATE 0x04 - -#define E_LNK_STATE 0x06 -#define E_C_LINK_UP 0x01 -#define E_C_LINK_DOWN 0x02 -#define E_C_LINK_10_100 0x03 - -#define E_ERROR 0x07 -#define E_C_ERR_INVAL_CMD 0x01 -#define E_C_ERR_UNIMP_CMD 0x02 -#define E_C_ERR_BAD_CFG 0x03 - -#define E_MCAST_LIST 0x08 -#define E_C_MCAST_ADDR_ADD 0x01 -#define E_C_MCAST_ADDR_DEL 0x02 - -#define E_RESET_JUMBO_RNG 0x09 - - -/* - * Commands - */ - -#define CMD_RING_ENTRIES 64 - -struct cmd { -#ifdef __LITTLE_ENDIAN_BITFIELD - u32 idx:12; - u32 code:12; - u32 evt:8; -#else - u32 evt:8; - u32 code:12; - u32 idx:12; -#endif -}; - - -#define C_HOST_STATE 0x01 -#define C_C_STACK_UP 0x01 -#define C_C_STACK_DOWN 0x02 - -#define C_FDR_FILTERING 0x02 -#define C_C_FDR_FILT_ENABLE 0x01 -#define C_C_FDR_FILT_DISABLE 0x02 - -#define C_SET_RX_PRD_IDX 0x03 -#define C_UPDATE_STATS 0x04 -#define C_RESET_JUMBO_RNG 0x05 -#define C_ADD_MULTICAST_ADDR 0x08 -#define C_DEL_MULTICAST_ADDR 0x09 - -#define C_SET_PROMISC_MODE 0x0a -#define C_C_PROMISC_ENABLE 0x01 -#define C_C_PROMISC_DISABLE 0x02 - -#define C_LNK_NEGOTIATION 0x0b -#define C_C_NEGOTIATE_BOTH 0x00 -#define C_C_NEGOTIATE_GIG 0x01 -#define C_C_NEGOTIATE_10_100 0x02 - -#define C_SET_MAC_ADDR 0x0c -#define C_CLEAR_PROFILE 0x0d - -#define C_SET_MULTICAST_MODE 0x0e -#define C_C_MCAST_ENABLE 0x01 -#define C_C_MCAST_DISABLE 0x02 - -#define C_CLEAR_STATS 0x0f -#define C_SET_RX_JUMBO_PRD_IDX 0x10 -#define C_REFRESH_STATS 0x11 - - -/* - * Descriptor flags - */ -#define BD_FLG_TCP_UDP_SUM 0x01 -#define BD_FLG_IP_SUM 0x02 -#define BD_FLG_END 0x04 -#define BD_FLG_MORE 0x08 -#define BD_FLG_JUMBO 0x10 -#define BD_FLG_UCAST 0x20 -#define BD_FLG_MCAST 0x40 -#define BD_FLG_BCAST 0x60 -#define BD_FLG_TYP_MASK 0x60 -#define BD_FLG_IP_FRAG 0x80 -#define BD_FLG_IP_FRAG_END 0x100 -#define BD_FLG_VLAN_TAG 0x200 -#define BD_FLG_FRAME_ERROR 0x400 -#define BD_FLG_COAL_NOW 0x800 -#define BD_FLG_MINI 0x1000 - - -/* - * Ring Control block flags - */ -#define RCB_FLG_TCP_UDP_SUM 0x01 -#define RCB_FLG_IP_SUM 0x02 -#define RCB_FLG_NO_PSEUDO_HDR 0x08 -#define RCB_FLG_VLAN_ASSIST 0x10 -#define RCB_FLG_COAL_INT_ONLY 0x20 -#define RCB_FLG_TX_HOST_RING 0x40 -#define RCB_FLG_IEEE_SNAP_SUM 0x80 -#define RCB_FLG_EXT_RX_BD 0x100 -#define RCB_FLG_RNG_DISABLE 0x200 - - -/* - * TX ring - maximum TX ring entries for Tigon I's is 128 - */ -#define MAX_TX_RING_ENTRIES 256 -#define TIGON_I_TX_RING_ENTRIES 128 -#define TX_RING_SIZE (MAX_TX_RING_ENTRIES * sizeof(struct tx_desc)) -#define TX_RING_BASE 0x3800 - -struct tx_desc{ - aceaddr addr; - u32 flagsize; -#if 0 -/* - * This is in PCI shared mem and must be accessed with readl/writel - * real layout is: - */ -#if __LITTLE_ENDIAN - u16 flags; - u16 size; - u16 vlan; - u16 reserved; -#else - u16 size; - u16 flags; - u16 reserved; - u16 vlan; -#endif -#endif - u32 vlanres; -}; - - -#define RX_STD_RING_ENTRIES 512 -#define RX_STD_RING_SIZE (RX_STD_RING_ENTRIES * sizeof(struct rx_desc)) - -#define RX_JUMBO_RING_ENTRIES 256 -#define RX_JUMBO_RING_SIZE (RX_JUMBO_RING_ENTRIES *sizeof(struct rx_desc)) - -#define RX_MINI_RING_ENTRIES 1024 -#define RX_MINI_RING_SIZE (RX_MINI_RING_ENTRIES *sizeof(struct rx_desc)) - -#define RX_RETURN_RING_ENTRIES 2048 -#define RX_RETURN_RING_SIZE (RX_MAX_RETURN_RING_ENTRIES * \ - sizeof(struct rx_desc)) - -struct rx_desc{ - aceaddr addr; -#ifdef __LITTLE_ENDIAN - u16 size; - u16 idx; -#else - u16 idx; - u16 size; -#endif -#ifdef __LITTLE_ENDIAN - u16 flags; - u16 type; -#else - u16 type; - u16 flags; -#endif -#ifdef __LITTLE_ENDIAN - u16 tcp_udp_csum; - u16 ip_csum; -#else - u16 ip_csum; - u16 tcp_udp_csum; -#endif -#ifdef __LITTLE_ENDIAN - u16 vlan; - u16 err_flags; -#else - u16 err_flags; - u16 vlan; -#endif - u32 reserved; - u32 opague; -}; - - -/* - * This struct is shared with the NIC firmware. - */ -struct ring_ctrl { - aceaddr rngptr; -#ifdef __LITTLE_ENDIAN - u16 flags; - u16 max_len; -#else - u16 max_len; - u16 flags; -#endif - u32 pad; -}; - - -struct ace_mac_stats { - u32 excess_colls; - u32 coll_1; - u32 coll_2; - u32 coll_3; - u32 coll_4; - u32 coll_5; - u32 coll_6; - u32 coll_7; - u32 coll_8; - u32 coll_9; - u32 coll_10; - u32 coll_11; - u32 coll_12; - u32 coll_13; - u32 coll_14; - u32 coll_15; - u32 late_coll; - u32 defers; - u32 crc_err; - u32 underrun; - u32 crs_err; - u32 pad[3]; - u32 drop_ula; - u32 drop_mc; - u32 drop_fc; - u32 drop_space; - u32 coll; - u32 kept_bc; - u32 kept_mc; - u32 kept_uc; -}; - - -struct ace_info { - union { - u32 stats[256]; - } s; - struct ring_ctrl evt_ctrl; - struct ring_ctrl cmd_ctrl; - struct ring_ctrl tx_ctrl; - struct ring_ctrl rx_std_ctrl; - struct ring_ctrl rx_jumbo_ctrl; - struct ring_ctrl rx_mini_ctrl; - struct ring_ctrl rx_return_ctrl; - aceaddr evt_prd_ptr; - aceaddr rx_ret_prd_ptr; - aceaddr tx_csm_ptr; - aceaddr stats2_ptr; -}; - - -struct ring_info { - struct sk_buff *skb; - DEFINE_DMA_UNMAP_ADDR(mapping); -}; - - -/* - * Funny... As soon as we add maplen on alpha, it starts to work - * much slower. Hmm... is it because struct does not fit to one cacheline? - * So, split tx_ring_info. - */ -struct tx_ring_info { - struct sk_buff *skb; - DEFINE_DMA_UNMAP_ADDR(mapping); - DEFINE_DMA_UNMAP_LEN(maplen); -}; - - -/* - * struct ace_skb holding the rings of skb's. This is an awful lot of - * pointers, but I don't see any other smart mode to do this in an - * efficient manner ;-( - */ -struct ace_skb -{ - struct tx_ring_info tx_skbuff[MAX_TX_RING_ENTRIES]; - struct ring_info rx_std_skbuff[RX_STD_RING_ENTRIES]; - struct ring_info rx_mini_skbuff[RX_MINI_RING_ENTRIES]; - struct ring_info rx_jumbo_skbuff[RX_JUMBO_RING_ENTRIES]; -}; - - -/* - * Struct private for the AceNIC. - * - * Elements are grouped so variables used by the tx handling goes - * together, and will go into the same cache lines etc. in order to - * avoid cache line contention between the rx and tx handling on SMP. - * - * Frequently accessed variables are put at the beginning of the - * struct to help the compiler generate better/shorter code. - */ -struct ace_private -{ - struct ace_info *info; - struct ace_regs __iomem *regs; /* register base */ - struct ace_skb *skb; - dma_addr_t info_dma; /* 32/64 bit */ - - int version, link; - int promisc, mcast_all; - - /* - * TX elements - */ - struct tx_desc *tx_ring; - u32 tx_prd; - volatile u32 tx_ret_csm; - int tx_ring_entries; - - /* - * RX elements - */ - unsigned long std_refill_busy - __attribute__ ((aligned (SMP_CACHE_BYTES))); - unsigned long mini_refill_busy, jumbo_refill_busy; - atomic_t cur_rx_bufs; - atomic_t cur_mini_bufs; - atomic_t cur_jumbo_bufs; - u32 rx_std_skbprd, rx_mini_skbprd, rx_jumbo_skbprd; - u32 cur_rx; - - struct rx_desc *rx_std_ring; - struct rx_desc *rx_jumbo_ring; - struct rx_desc *rx_mini_ring; - struct rx_desc *rx_return_ring; - - int tasklet_pending, jumbo; - struct tasklet_struct ace_tasklet; - - struct event *evt_ring; - - volatile u32 *evt_prd, *rx_ret_prd, *tx_csm; - - dma_addr_t tx_ring_dma; /* 32/64 bit */ - dma_addr_t rx_ring_base_dma; - dma_addr_t evt_ring_dma; - dma_addr_t evt_prd_dma, rx_ret_prd_dma, tx_csm_dma; - - unsigned char *trace_buf; - struct pci_dev *pdev; - struct net_device *next; - volatile int fw_running; - int board_idx; - u16 pci_command; - u8 pci_latency; - const char *name; -#ifdef INDEX_DEBUG - spinlock_t debug_lock - __attribute__ ((aligned (SMP_CACHE_BYTES))); - u32 last_tx, last_std_rx, last_mini_rx; -#endif - int pci_using_dac; - u8 firmware_major; - u8 firmware_minor; - u8 firmware_fix; - u32 firmware_start; -}; - - -#define TX_RESERVED MAX_SKB_FRAGS - -static inline int tx_space (struct ace_private *ap, u32 csm, u32 prd) -{ - return (csm - prd - 1) & (ACE_TX_RING_ENTRIES(ap) - 1); -} - -#define tx_free(ap) tx_space((ap)->tx_ret_csm, (ap)->tx_prd, ap) -#define tx_ring_full(ap, csm, prd) (tx_space(ap, csm, prd) <= TX_RESERVED) - -static inline void set_aceaddr(aceaddr *aa, dma_addr_t addr) -{ - u64 baddr = (u64) addr; - aa->addrlo = baddr & 0xffffffff; - aa->addrhi = baddr >> 32; - wmb(); -} - - -static inline void ace_set_txprd(struct ace_regs __iomem *regs, - struct ace_private *ap, u32 value) -{ -#ifdef INDEX_DEBUG - unsigned long flags; - spin_lock_irqsave(&ap->debug_lock, flags); - writel(value, ®s->TxPrd); - if (value == ap->last_tx) - printk(KERN_ERR "AceNIC RACE ALERT! writing identical value " - "to tx producer (%i)\n", value); - ap->last_tx = value; - spin_unlock_irqrestore(&ap->debug_lock, flags); -#else - writel(value, ®s->TxPrd); -#endif - wmb(); -} - - -static inline void ace_mask_irq(struct net_device *dev) -{ - struct ace_private *ap = netdev_priv(dev); - struct ace_regs __iomem *regs = ap->regs; - - if (ACE_IS_TIGON_I(ap)) - writel(1, ®s->MaskInt); - else - writel(readl(®s->HostCtrl) | MASK_INTS, ®s->HostCtrl); - - ace_sync_irq(dev->irq); -} - - -static inline void ace_unmask_irq(struct net_device *dev) -{ - struct ace_private *ap = netdev_priv(dev); - struct ace_regs __iomem *regs = ap->regs; - - if (ACE_IS_TIGON_I(ap)) - writel(0, ®s->MaskInt); - else - writel(readl(®s->HostCtrl) & ~MASK_INTS, ®s->HostCtrl); -} - - -/* - * Prototypes - */ -static int ace_init(struct net_device *dev); -static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs); -static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs); -static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs); -static irqreturn_t ace_interrupt(int irq, void *dev_id); -static int ace_load_firmware(struct net_device *dev); -static int ace_open(struct net_device *dev); -static netdev_tx_t ace_start_xmit(struct sk_buff *skb, - struct net_device *dev); -static int ace_close(struct net_device *dev); -static void ace_tasklet(unsigned long dev); -static void ace_dump_trace(struct ace_private *ap); -static void ace_set_multicast_list(struct net_device *dev); -static int ace_change_mtu(struct net_device *dev, int new_mtu); -static int ace_set_mac_addr(struct net_device *dev, void *p); -static void ace_set_rxtx_parms(struct net_device *dev, int jumbo); -static int ace_allocate_descriptors(struct net_device *dev); -static void ace_free_descriptors(struct net_device *dev); -static void ace_init_cleanup(struct net_device *dev); -static struct net_device_stats *ace_get_stats(struct net_device *dev); -static int read_eeprom_byte(struct net_device *dev, unsigned long offset); - -#endif /* _ACENIC_H_ */ diff --git a/drivers/net/ethernet/3com/3c501.c b/drivers/net/ethernet/3com/3c501.c new file mode 100644 index 0000000..5420f6d --- /dev/null +++ b/drivers/net/ethernet/3com/3c501.c @@ -0,0 +1,896 @@ +/* 3c501.c: A 3Com 3c501 Ethernet driver for Linux. */ +/* + Written 1992,1993,1994 Donald Becker + + Copyright 1993 United States Government as represented by the + Director, National Security Agency. This software may be used and + distributed according to the terms of the GNU General Public License, + incorporated herein by reference. + + This is a device driver for the 3Com Etherlink 3c501. + Do not purchase this card, even as a joke. It's performance is horrible, + and it breaks in many ways. + + The original author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + Fixed (again!) the missing interrupt locking on TX/RX shifting. + Alan Cox + + Removed calls to init_etherdev since they are no longer needed, and + cleaned up modularization just a bit. The driver still allows only + the default address for cards when loaded as a module, but that's + really less braindead than anyone using a 3c501 board. :) + 19950208 (invid@msen.com) + + Added traps for interrupts hitting the window as we clear and TX load + the board. Now getting 150K/second FTP with a 3c501 card. Still playing + with a TX-TX optimisation to see if we can touch 180-200K/second as seems + theoretically maximum. + 19950402 Alan Cox + + Cleaned up for 2.3.x because we broke SMP now. + 20000208 Alan Cox + + Check up pass for 2.5. Nothing significant changed + 20021009 Alan Cox + + Fixed zero fill corner case + 20030104 Alan Cox + + + For the avoidance of doubt the "preferred form" of this code is one which + is in an open non patent encumbered format. Where cryptographic key signing + forms part of the process of creating an executable the information + including keys needed to generate an equivalently functional executable + are deemed to be part of the source code. + +*/ + + +/** + * DOC: 3c501 Card Notes + * + * Some notes on this thing if you have to hack it. [Alan] + * + * Some documentation is available from 3Com. Due to the boards age + * standard responses when you ask for this will range from 'be serious' + * to 'give it to a museum'. The documentation is incomplete and mostly + * of historical interest anyway. + * + * The basic system is a single buffer which can be used to receive or + * transmit a packet. A third command mode exists when you are setting + * things up. + * + * If it's transmitting it's not receiving and vice versa. In fact the + * time to get the board back into useful state after an operation is + * quite large. + * + * The driver works by keeping the board in receive mode waiting for a + * packet to arrive. When one arrives it is copied out of the buffer + * and delivered to the kernel. The card is reloaded and off we go. + * + * When transmitting lp->txing is set and the card is reset (from + * receive mode) [possibly losing a packet just received] to command + * mode. A packet is loaded and transmit mode triggered. The interrupt + * handler runs different code for transmit interrupts and can handle + * returning to receive mode or retransmissions (yes you have to help + * out with those too). + * + * DOC: Problems + * + * There are a wide variety of undocumented error returns from the card + * and you basically have to kick the board and pray if they turn up. Most + * only occur under extreme load or if you do something the board doesn't + * like (eg touching a register at the wrong time). + * + * The driver is less efficient than it could be. It switches through + * receive mode even if more transmits are queued. If this worries you buy + * a real Ethernet card. + * + * The combination of slow receive restart and no real multicast + * filter makes the board unusable with a kernel compiled for IP + * multicasting in a real multicast environment. That's down to the board, + * but even with no multicast programs running a multicast IP kernel is + * in group 224.0.0.1 and you will therefore be listening to all multicasts. + * One nv conference running over that Ethernet and you can give up. + * + */ + +#define DRV_NAME "3c501" +#define DRV_VERSION "2002/10/09" + + +static const char version[] = + DRV_NAME ".c: " DRV_VERSION " Alan Cox (alan@lxorguk.ukuu.org.uk).\n"; + +/* + * Braindamage remaining: + * The 3c501 board. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#include "3c501.h" + +/* + * The boilerplate probe code. + */ + +static int io = 0x280; +static int irq = 5; +static int mem_start; + +/** + * el1_probe: - probe for a 3c501 + * @dev: The device structure passed in to probe. + * + * This can be called from two places. The network layer will probe using + * a device structure passed in with the probe information completed. For a + * modular driver we use #init_module to fill in our own structure and probe + * for it. + * + * Returns 0 on success. ENXIO if asked not to probe and ENODEV if asked to + * probe and failing to find anything. + */ + +struct net_device * __init el1_probe(int unit) +{ + struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); + static const unsigned ports[] = { 0x280, 0x300, 0}; + const unsigned *port; + int err = 0; + + if (!dev) + return ERR_PTR(-ENOMEM); + + if (unit >= 0) { + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + io = dev->base_addr; + irq = dev->irq; + mem_start = dev->mem_start & 7; + } + + if (io > 0x1ff) { /* Check a single specified location. */ + err = el1_probe1(dev, io); + } else if (io != 0) { + err = -ENXIO; /* Don't probe at all. */ + } else { + for (port = ports; *port && el1_probe1(dev, *port); port++) + ; + if (!*port) + err = -ENODEV; + } + if (err) + goto out; + err = register_netdev(dev); + if (err) + goto out1; + return dev; +out1: + release_region(dev->base_addr, EL1_IO_EXTENT); +out: + free_netdev(dev); + return ERR_PTR(err); +} + +static const struct net_device_ops el_netdev_ops = { + .ndo_open = el_open, + .ndo_stop = el1_close, + .ndo_start_xmit = el_start_xmit, + .ndo_tx_timeout = el_timeout, + .ndo_set_multicast_list = set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +/** + * el1_probe1: + * @dev: The device structure to use + * @ioaddr: An I/O address to probe at. + * + * The actual probe. This is iterated over by #el1_probe in order to + * check all the applicable device locations. + * + * Returns 0 for a success, in which case the device is activated, + * EAGAIN if the IRQ is in use by another driver, and ENODEV if the + * board cannot be found. + */ + +static int __init el1_probe1(struct net_device *dev, int ioaddr) +{ + struct net_local *lp; + const char *mname; /* Vendor name */ + unsigned char station_addr[6]; + int autoirq = 0; + int i; + + /* + * Reserve I/O resource for exclusive use by this driver + */ + + if (!request_region(ioaddr, EL1_IO_EXTENT, DRV_NAME)) + return -ENODEV; + + /* + * Read the station address PROM data from the special port. + */ + + for (i = 0; i < 6; i++) { + outw(i, ioaddr + EL1_DATAPTR); + station_addr[i] = inb(ioaddr + EL1_SAPROM); + } + /* + * Check the first three octets of the S.A. for 3Com's prefix, or + * for the Sager NP943 prefix. + */ + + if (station_addr[0] == 0x02 && station_addr[1] == 0x60 && + station_addr[2] == 0x8c) + mname = "3c501"; + else if (station_addr[0] == 0x00 && station_addr[1] == 0x80 && + station_addr[2] == 0xC8) + mname = "NP943"; + else { + release_region(ioaddr, EL1_IO_EXTENT); + return -ENODEV; + } + + /* + * We auto-IRQ by shutting off the interrupt line and letting it + * float high. + */ + + dev->irq = irq; + + if (dev->irq < 2) { + unsigned long irq_mask; + + irq_mask = probe_irq_on(); + inb(RX_STATUS); /* Clear pending interrupts. */ + inb(TX_STATUS); + outb(AX_LOOP + 1, AX_CMD); + + outb(0x00, AX_CMD); + + mdelay(20); + autoirq = probe_irq_off(irq_mask); + + if (autoirq == 0) { + pr_warning("%s probe at %#x failed to detect IRQ line.\n", + mname, ioaddr); + release_region(ioaddr, EL1_IO_EXTENT); + return -EAGAIN; + } + } + + outb(AX_RESET+AX_LOOP, AX_CMD); /* Loopback mode. */ + dev->base_addr = ioaddr; + memcpy(dev->dev_addr, station_addr, ETH_ALEN); + + if (mem_start & 0xf) + el_debug = mem_start & 0x7; + if (autoirq) + dev->irq = autoirq; + + pr_info("%s: %s EtherLink at %#lx, using %sIRQ %d.\n", + dev->name, mname, dev->base_addr, + autoirq ? "auto":"assigned ", dev->irq); + +#ifdef CONFIG_IP_MULTICAST + pr_warning("WARNING: Use of the 3c501 in a multicast kernel is NOT recommended.\n"); +#endif + + if (el_debug) + pr_debug("%s", version); + + lp = netdev_priv(dev); + memset(lp, 0, sizeof(struct net_local)); + spin_lock_init(&lp->lock); + + /* + * The EL1-specific entries in the device structure. + */ + + dev->netdev_ops = &el_netdev_ops; + dev->watchdog_timeo = HZ; + dev->ethtool_ops = &netdev_ethtool_ops; + return 0; +} + +/** + * el1_open: + * @dev: device that is being opened + * + * When an ifconfig is issued which changes the device flags to include + * IFF_UP this function is called. It is only called when the change + * occurs, not when the interface remains up. #el1_close will be called + * when it goes down. + * + * Returns 0 for a successful open, or -EAGAIN if someone has run off + * with our interrupt line. + */ + +static int el_open(struct net_device *dev) +{ + int retval; + int ioaddr = dev->base_addr; + struct net_local *lp = netdev_priv(dev); + unsigned long flags; + + if (el_debug > 2) + pr_debug("%s: Doing el_open()...\n", dev->name); + + retval = request_irq(dev->irq, el_interrupt, 0, dev->name, dev); + if (retval) + return retval; + + spin_lock_irqsave(&lp->lock, flags); + el_reset(dev); + spin_unlock_irqrestore(&lp->lock, flags); + + lp->txing = 0; /* Board in RX mode */ + outb(AX_RX, AX_CMD); /* Aux control, irq and receive enabled */ + netif_start_queue(dev); + return 0; +} + +/** + * el_timeout: + * @dev: The 3c501 card that has timed out + * + * Attempt to restart the board. This is basically a mixture of extreme + * violence and prayer + * + */ + +static void el_timeout(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + int ioaddr = dev->base_addr; + + if (el_debug) + pr_debug("%s: transmit timed out, txsr %#2x axsr=%02x rxsr=%02x.\n", + dev->name, inb(TX_STATUS), + inb(AX_STATUS), inb(RX_STATUS)); + dev->stats.tx_errors++; + outb(TX_NORM, TX_CMD); + outb(RX_NORM, RX_CMD); + outb(AX_OFF, AX_CMD); /* Just trigger a false interrupt. */ + outb(AX_RX, AX_CMD); /* Aux control, irq and receive enabled */ + lp->txing = 0; /* Ripped back in to RX */ + netif_wake_queue(dev); +} + + +/** + * el_start_xmit: + * @skb: The packet that is queued to be sent + * @dev: The 3c501 card we want to throw it down + * + * Attempt to send a packet to a 3c501 card. There are some interesting + * catches here because the 3c501 is an extremely old and therefore + * stupid piece of technology. + * + * If we are handling an interrupt on the other CPU we cannot load a packet + * as we may still be attempting to retrieve the last RX packet buffer. + * + * When a transmit times out we dump the card into control mode and just + * start again. It happens enough that it isn't worth logging. + * + * We avoid holding the spin locks when doing the packet load to the board. + * The device is very slow, and its DMA mode is even slower. If we held the + * lock while loading 1500 bytes onto the controller we would drop a lot of + * serial port characters. This requires we do extra locking, but we have + * no real choice. + */ + +static netdev_tx_t el_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + int ioaddr = dev->base_addr; + unsigned long flags; + + /* + * Avoid incoming interrupts between us flipping txing and flipping + * mode as the driver assumes txing is a faithful indicator of card + * state + */ + + spin_lock_irqsave(&lp->lock, flags); + + /* + * Avoid timer-based retransmission conflicts. + */ + + netif_stop_queue(dev); + + do { + int len = skb->len; + int pad = 0; + int gp_start; + unsigned char *buf = skb->data; + + if (len < ETH_ZLEN) + pad = ETH_ZLEN - len; + + gp_start = 0x800 - (len + pad); + + lp->tx_pkt_start = gp_start; + lp->collisions = 0; + + dev->stats.tx_bytes += skb->len; + + /* + * Command mode with status cleared should [in theory] + * mean no more interrupts can be pending on the card. + */ + + outb_p(AX_SYS, AX_CMD); + inb_p(RX_STATUS); + inb_p(TX_STATUS); + + lp->loading = 1; + lp->txing = 1; + + /* + * Turn interrupts back on while we spend a pleasant + * afternoon loading bytes into the board + */ + + spin_unlock_irqrestore(&lp->lock, flags); + + /* Set rx packet area to 0. */ + outw(0x00, RX_BUF_CLR); + /* aim - packet will be loaded into buffer start */ + outw(gp_start, GP_LOW); + /* load buffer (usual thing each byte increments the pointer) */ + outsb(DATAPORT, buf, len); + if (pad) { + while (pad--) /* Zero fill buffer tail */ + outb(0, DATAPORT); + } + /* the board reuses the same register */ + outw(gp_start, GP_LOW); + + if (lp->loading != 2) { + /* fire ... Trigger xmit. */ + outb(AX_XMIT, AX_CMD); + lp->loading = 0; + if (el_debug > 2) + pr_debug(" queued xmit.\n"); + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + /* A receive upset our load, despite our best efforts */ + if (el_debug > 2) + pr_debug("%s: burped during tx load.\n", dev->name); + spin_lock_irqsave(&lp->lock, flags); + } while (1); +} + +/** + * el_interrupt: + * @irq: Interrupt number + * @dev_id: The 3c501 that burped + * + * Handle the ether interface interrupts. The 3c501 needs a lot more + * hand holding than most cards. In particular we get a transmit interrupt + * with a collision error because the board firmware isn't capable of rewinding + * its own transmit buffer pointers. It can however count to 16 for us. + * + * On the receive side the card is also very dumb. It has no buffering to + * speak of. We simply pull the packet out of its PIO buffer (which is slow) + * and queue it for the kernel. Then we reset the card for the next packet. + * + * We sometimes get surprise interrupts late both because the SMP IRQ delivery + * is message passing and because the card sometimes seems to deliver late. I + * think if it is part way through a receive and the mode is changed it carries + * on receiving and sends us an interrupt. We have to band aid all these cases + * to get a sensible 150kBytes/second performance. Even then you want a small + * TCP window. + */ + +static irqreturn_t el_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct net_local *lp; + int ioaddr; + int axsr; /* Aux. status reg. */ + + ioaddr = dev->base_addr; + lp = netdev_priv(dev); + + spin_lock(&lp->lock); + + /* + * What happened ? + */ + + axsr = inb(AX_STATUS); + + /* + * Log it + */ + + if (el_debug > 3) + pr_debug("%s: el_interrupt() aux=%#02x\n", dev->name, axsr); + + if (lp->loading == 1 && !lp->txing) + pr_warning("%s: Inconsistent state loading while not in tx\n", + dev->name); + + if (lp->txing) { + /* + * Board in transmit mode. May be loading. If we are + * loading we shouldn't have got this. + */ + int txsr = inb(TX_STATUS); + + if (lp->loading == 1) { + if (el_debug > 2) + pr_debug("%s: Interrupt while loading [txsr=%02x gp=%04x rp=%04x]\n", + dev->name, txsr, inw(GP_LOW), inw(RX_LOW)); + + /* Force a reload */ + lp->loading = 2; + spin_unlock(&lp->lock); + goto out; + } + if (el_debug > 6) + pr_debug("%s: txsr=%02x gp=%04x rp=%04x\n", dev->name, + txsr, inw(GP_LOW), inw(RX_LOW)); + + if ((axsr & 0x80) && (txsr & TX_READY) == 0) { + /* + * FIXME: is there a logic to whether to keep + * on trying or reset immediately ? + */ + if (el_debug > 1) + pr_debug("%s: Unusual interrupt during Tx, txsr=%02x axsr=%02x gp=%03x rp=%03x.\n", + dev->name, txsr, axsr, + inw(ioaddr + EL1_DATAPTR), + inw(ioaddr + EL1_RXPTR)); + lp->txing = 0; + netif_wake_queue(dev); + } else if (txsr & TX_16COLLISIONS) { + /* + * Timed out + */ + if (el_debug) + pr_debug("%s: Transmit failed 16 times, Ethernet jammed?\n", dev->name); + outb(AX_SYS, AX_CMD); + lp->txing = 0; + dev->stats.tx_aborted_errors++; + netif_wake_queue(dev); + } else if (txsr & TX_COLLISION) { + /* + * Retrigger xmit. + */ + + if (el_debug > 6) + pr_debug("%s: retransmitting after a collision.\n", dev->name); + /* + * Poor little chip can't reset its own start + * pointer + */ + + outb(AX_SYS, AX_CMD); + outw(lp->tx_pkt_start, GP_LOW); + outb(AX_XMIT, AX_CMD); + dev->stats.collisions++; + spin_unlock(&lp->lock); + goto out; + } else { + /* + * It worked.. we will now fall through and receive + */ + dev->stats.tx_packets++; + if (el_debug > 6) + pr_debug("%s: Tx succeeded %s\n", dev->name, + (txsr & TX_RDY) ? "." : "but tx is busy!"); + /* + * This is safe the interrupt is atomic WRT itself. + */ + lp->txing = 0; + /* In case more to transmit */ + netif_wake_queue(dev); + } + } else { + /* + * In receive mode. + */ + + int rxsr = inb(RX_STATUS); + if (el_debug > 5) + pr_debug("%s: rxsr=%02x txsr=%02x rp=%04x\n", + dev->name, rxsr, inb(TX_STATUS), inw(RX_LOW)); + /* + * Just reading rx_status fixes most errors. + */ + if (rxsr & RX_MISSED) + dev->stats.rx_missed_errors++; + else if (rxsr & RX_RUNT) { + /* Handled to avoid board lock-up. */ + dev->stats.rx_length_errors++; + if (el_debug > 5) + pr_debug("%s: runt.\n", dev->name); + } else if (rxsr & RX_GOOD) { + /* + * Receive worked. + */ + el_receive(dev); + } else { + /* + * Nothing? Something is broken! + */ + if (el_debug > 2) + pr_debug("%s: No packet seen, rxsr=%02x **resetting 3c501***\n", + dev->name, rxsr); + el_reset(dev); + } + } + + /* + * Move into receive mode + */ + + outb(AX_RX, AX_CMD); + outw(0x00, RX_BUF_CLR); + inb(RX_STATUS); /* Be certain that interrupts are cleared. */ + inb(TX_STATUS); + spin_unlock(&lp->lock); +out: + return IRQ_HANDLED; +} + + +/** + * el_receive: + * @dev: Device to pull the packets from + * + * We have a good packet. Well, not really "good", just mostly not broken. + * We must check everything to see if it is good. In particular we occasionally + * get wild packet sizes from the card. If the packet seems sane we PIO it + * off the card and queue it for the protocol layers. + */ + +static void el_receive(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + int pkt_len; + struct sk_buff *skb; + + pkt_len = inw(RX_LOW); + + if (el_debug > 4) + pr_debug(" el_receive %d.\n", pkt_len); + + if (pkt_len < 60 || pkt_len > 1536) { + if (el_debug) + pr_debug("%s: bogus packet, length=%d\n", + dev->name, pkt_len); + dev->stats.rx_over_errors++; + return; + } + + /* + * Command mode so we can empty the buffer + */ + + outb(AX_SYS, AX_CMD); + skb = dev_alloc_skb(pkt_len+2); + + /* + * Start of frame + */ + + outw(0x00, GP_LOW); + if (skb == NULL) { + pr_info("%s: Memory squeeze, dropping packet.\n", dev->name); + dev->stats.rx_dropped++; + return; + } else { + skb_reserve(skb, 2); /* Force 16 byte alignment */ + /* + * The read increments through the bytes. The interrupt + * handler will fix the pointer when it returns to + * receive mode. + */ + insb(DATAPORT, skb_put(skb, pkt_len), pkt_len); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + } +} + +/** + * el_reset: Reset a 3c501 card + * @dev: The 3c501 card about to get zapped + * + * Even resetting a 3c501 isn't simple. When you activate reset it loses all + * its configuration. You must hold the lock when doing this. The function + * cannot take the lock itself as it is callable from the irq handler. + */ + +static void el_reset(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + int ioaddr = dev->base_addr; + + if (el_debug > 2) + pr_info("3c501 reset...\n"); + outb(AX_RESET, AX_CMD); /* Reset the chip */ + /* Aux control, irq and loopback enabled */ + outb(AX_LOOP, AX_CMD); + { + int i; + for (i = 0; i < 6; i++) /* Set the station address. */ + outb(dev->dev_addr[i], ioaddr + i); + } + + outw(0, RX_BUF_CLR); /* Set rx packet area to 0. */ + outb(TX_NORM, TX_CMD); /* tx irq on done, collision */ + outb(RX_NORM, RX_CMD); /* Set Rx commands. */ + inb(RX_STATUS); /* Clear status. */ + inb(TX_STATUS); + lp->txing = 0; +} + +/** + * el1_close: + * @dev: 3c501 card to shut down + * + * Close a 3c501 card. The IFF_UP flag has been cleared by the user via + * the SIOCSIFFLAGS ioctl. We stop any further transmissions being queued, + * and then disable the interrupts. Finally we reset the chip. The effects + * of the rest will be cleaned up by #el1_open. Always returns 0 indicating + * a success. + */ + +static int el1_close(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + + if (el_debug > 2) + pr_info("%s: Shutting down Ethernet card at %#x.\n", + dev->name, ioaddr); + + netif_stop_queue(dev); + + /* + * Free and disable the IRQ. + */ + + free_irq(dev->irq, dev); + outb(AX_RESET, AX_CMD); /* Reset the chip */ + + return 0; +} + +/** + * set_multicast_list: + * @dev: The device to adjust + * + * Set or clear the multicast filter for this adaptor to use the best-effort + * filtering supported. The 3c501 supports only three modes of filtering. + * It always receives broadcasts and packets for itself. You can choose to + * optionally receive all packets, or all multicast packets on top of this. + */ + +static void set_multicast_list(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + + if (dev->flags & IFF_PROMISC) { + outb(RX_PROM, RX_CMD); + inb(RX_STATUS); + } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) { + /* Multicast or all multicast is the same */ + outb(RX_MULT, RX_CMD); + inb(RX_STATUS); /* Clear status. */ + } else { + outb(RX_NORM, RX_CMD); + inb(RX_STATUS); + } +} + + +static void netdev_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr); +} + +static u32 netdev_get_msglevel(struct net_device *dev) +{ + return debug; +} + +static void netdev_set_msglevel(struct net_device *dev, u32 level) +{ + debug = level; +} + +static const struct ethtool_ops netdev_ethtool_ops = { + .get_drvinfo = netdev_get_drvinfo, + .get_msglevel = netdev_get_msglevel, + .set_msglevel = netdev_set_msglevel, +}; + +#ifdef MODULE + +static struct net_device *dev_3c501; + +module_param(io, int, 0); +module_param(irq, int, 0); +MODULE_PARM_DESC(io, "EtherLink I/O base address"); +MODULE_PARM_DESC(irq, "EtherLink IRQ number"); + +/** + * init_module: + * + * When the driver is loaded as a module this function is called. We fake up + * a device structure with the base I/O and interrupt set as if it were being + * called from Space.c. This minimises the extra code that would otherwise + * be required. + * + * Returns 0 for success or -EIO if a card is not found. Returning an error + * here also causes the module to be unloaded + */ + +int __init init_module(void) +{ + dev_3c501 = el1_probe(-1); + if (IS_ERR(dev_3c501)) + return PTR_ERR(dev_3c501); + return 0; +} + +/** + * cleanup_module: + * + * The module is being unloaded. We unhook our network device from the system + * and then free up the resources we took when the card was found. + */ + +void __exit cleanup_module(void) +{ + struct net_device *dev = dev_3c501; + unregister_netdev(dev); + release_region(dev->base_addr, EL1_IO_EXTENT); + free_netdev(dev); +} + +#endif /* MODULE */ + +MODULE_AUTHOR("Donald Becker, Alan Cox"); +MODULE_DESCRIPTION("Support for the ancient 3Com 3c501 ethernet card"); +MODULE_LICENSE("GPL"); + diff --git a/drivers/net/ethernet/3com/3c501.h b/drivers/net/ethernet/3com/3c501.h new file mode 100644 index 0000000..183fd55 --- /dev/null +++ b/drivers/net/ethernet/3com/3c501.h @@ -0,0 +1,91 @@ + +/* + * Index to functions. + */ + +static int el1_probe1(struct net_device *dev, int ioaddr); +static int el_open(struct net_device *dev); +static void el_timeout(struct net_device *dev); +static netdev_tx_t el_start_xmit(struct sk_buff *skb, struct net_device *dev); +static irqreturn_t el_interrupt(int irq, void *dev_id); +static void el_receive(struct net_device *dev); +static void el_reset(struct net_device *dev); +static int el1_close(struct net_device *dev); +static void set_multicast_list(struct net_device *dev); +static const struct ethtool_ops netdev_ethtool_ops; + +#define EL1_IO_EXTENT 16 + +#ifndef EL_DEBUG +#define EL_DEBUG 0 /* use 0 for production, 1 for devel., >2 for debug */ +#endif /* Anything above 5 is wordy death! */ +#define debug el_debug +static int el_debug = EL_DEBUG; + +/* + * Board-specific info in netdev_priv(dev). + */ + +struct net_local +{ + int tx_pkt_start; /* The length of the current Tx packet. */ + int collisions; /* Tx collisions this packet */ + int loading; /* Spot buffer load collisions */ + int txing; /* True if card is in TX mode */ + spinlock_t lock; /* Serializing lock */ +}; + + +#define RX_STATUS (ioaddr + 0x06) +#define RX_CMD RX_STATUS +#define TX_STATUS (ioaddr + 0x07) +#define TX_CMD TX_STATUS +#define GP_LOW (ioaddr + 0x08) +#define GP_HIGH (ioaddr + 0x09) +#define RX_BUF_CLR (ioaddr + 0x0A) +#define RX_LOW (ioaddr + 0x0A) +#define RX_HIGH (ioaddr + 0x0B) +#define SAPROM (ioaddr + 0x0C) +#define AX_STATUS (ioaddr + 0x0E) +#define AX_CMD AX_STATUS +#define DATAPORT (ioaddr + 0x0F) +#define TX_RDY 0x08 /* In TX_STATUS */ + +#define EL1_DATAPTR 0x08 +#define EL1_RXPTR 0x0A +#define EL1_SAPROM 0x0C +#define EL1_DATAPORT 0x0f + +/* + * Writes to the ax command register. + */ + +#define AX_OFF 0x00 /* Irq off, buffer access on */ +#define AX_SYS 0x40 /* Load the buffer */ +#define AX_XMIT 0x44 /* Transmit a packet */ +#define AX_RX 0x48 /* Receive a packet */ +#define AX_LOOP 0x0C /* Loopback mode */ +#define AX_RESET 0x80 + +/* + * Normal receive mode written to RX_STATUS. We must intr on short packets + * to avoid bogus rx lockups. + */ + +#define RX_NORM 0xA8 /* 0x68 == all addrs, 0xA8 only to me. */ +#define RX_PROM 0x68 /* Senior Prom, uhmm promiscuous mode. */ +#define RX_MULT 0xE8 /* Accept multicast packets. */ +#define TX_NORM 0x0A /* Interrupt on everything that might hang the chip */ + +/* + * TX_STATUS register. + */ + +#define TX_COLLISION 0x02 +#define TX_16COLLISIONS 0x04 +#define TX_READY 0x08 + +#define RX_RUNT 0x08 +#define RX_MISSED 0x01 /* Missed a packet due to 3c501 braindamage. */ +#define RX_GOOD 0x30 /* Good packet 0x20, or simple overflow 0x10. */ + diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c new file mode 100644 index 0000000..44b28b2 --- /dev/null +++ b/drivers/net/ethernet/3com/3c509.c @@ -0,0 +1,1594 @@ +/* 3c509.c: A 3c509 EtherLink3 ethernet driver for linux. */ +/* + Written 1993-2000 by Donald Becker. + + Copyright 1994-2000 by Donald Becker. + Copyright 1993 United States Government as represented by the + Director, National Security Agency. This software may be used and + distributed according to the terms of the GNU General Public License, + incorporated herein by reference. + + This driver is for the 3Com EtherLinkIII series. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + Known limitations: + Because of the way 3c509 ISA detection works it's difficult to predict + a priori which of several ISA-mode cards will be detected first. + + This driver does not use predictive interrupt mode, resulting in higher + packet latency but lower overhead. If interrupts are disabled for an + unusually long time it could also result in missed packets, but in + practice this rarely happens. + + + FIXES: + Alan Cox: Removed the 'Unexpected interrupt' bug. + Michael Meskes: Upgraded to Donald Becker's version 1.07. + Alan Cox: Increased the eeprom delay. Regardless of + what the docs say some people definitely + get problems with lower (but in card spec) + delays + v1.10 4/21/97 Fixed module code so that multiple cards may be detected, + other cleanups. -djb + Andrea Arcangeli: Upgraded to Donald Becker's version 1.12. + Rick Payne: Fixed SMP race condition + v1.13 9/8/97 Made 'max_interrupt_work' an insmod-settable variable -djb + v1.14 10/15/97 Avoided waiting..discard message for fast machines -djb + v1.15 1/31/98 Faster recovery for Tx errors. -djb + v1.16 2/3/98 Different ID port handling to avoid sound cards. -djb + v1.18 12Mar2001 Andrew Morton + - Avoid bogus detect of 3c590's (Andrzej Krzysztofowicz) + - Reviewed against 1.18 from scyld.com + v1.18a 17Nov2001 Jeff Garzik + - ethtool support + v1.18b 1Mar2002 Zwane Mwaikambo + - Power Management support + v1.18c 1Mar2002 David Ruggiero + - Full duplex support + v1.19 16Oct2002 Zwane Mwaikambo + - Additional ethtool features + v1.19a 28Oct2002 Davud Ruggiero + - Increase *read_eeprom udelay to workaround oops with 2 cards. + v1.19b 08Nov2002 Marc Zyngier + - Introduce driver model for EISA cards. + v1.20 04Feb2008 Ondrej Zary + - convert to isa_driver and pnp_driver and some cleanups +*/ + +#define DRV_NAME "3c509" +#define DRV_VERSION "1.20" +#define DRV_RELDATE "04Feb2008" + +/* A few values that may be tweaked. */ + +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (400*HZ/1000) + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for udelay() */ +#include +#include +#include +#include +#include + +#include +#include +#include + +static char version[] __devinitdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n"; + +#ifdef EL3_DEBUG +static int el3_debug = EL3_DEBUG; +#else +static int el3_debug = 2; +#endif + +/* Used to do a global count of all the cards in the system. Must be + * a global variable so that the mca/eisa probe routines can increment + * it */ +static int el3_cards = 0; +#define EL3_MAX_CARDS 8 + +/* To minimize the size of the driver source I only define operating + constants if they are used several times. You'll need the manual + anyway if you want to understand driver details. */ +/* Offsets from base I/O address. */ +#define EL3_DATA 0x00 +#define EL3_CMD 0x0e +#define EL3_STATUS 0x0e +#define EEPROM_READ 0x80 + +#define EL3_IO_EXTENT 16 + +#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD) + + +/* The top five bits written to EL3_CMD are a command, the lower + 11 bits are the parameter, if applicable. */ +enum c509cmd { + TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11, + RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, RxDiscard = 8<<11, + TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11, + FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11, + SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11, + SetTxThreshold = 18<<11, SetTxStart = 19<<11, StatsEnable = 21<<11, + StatsDisable = 22<<11, StopCoax = 23<<11, PowerUp = 27<<11, + PowerDown = 28<<11, PowerAuto = 29<<11}; + +enum c509status { + IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004, + TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020, + IntReq = 0x0040, StatsFull = 0x0080, CmdBusy = 0x1000, }; + +/* The SetRxFilter command accepts the following classes: */ +enum RxFilter { + RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 }; + +/* Register window 1 offsets, the window used in normal operation. */ +#define TX_FIFO 0x00 +#define RX_FIFO 0x00 +#define RX_STATUS 0x08 +#define TX_STATUS 0x0B +#define TX_FREE 0x0C /* Remaining free bytes in Tx buffer. */ + +#define WN0_CONF_CTRL 0x04 /* Window 0: Configuration control register */ +#define WN0_ADDR_CONF 0x06 /* Window 0: Address configuration register */ +#define WN0_IRQ 0x08 /* Window 0: Set IRQ line in bits 12-15. */ +#define WN4_MEDIA 0x0A /* Window 4: Various transcvr/media bits. */ +#define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */ +#define WN4_NETDIAG 0x06 /* Window 4: Net diagnostic */ +#define FD_ENABLE 0x8000 /* Enable full-duplex ("external loopback") */ + +/* + * Must be a power of two (we use a binary and in the + * circular queue) + */ +#define SKB_QUEUE_SIZE 64 + +enum el3_cardtype { EL3_ISA, EL3_PNP, EL3_MCA, EL3_EISA }; + +struct el3_private { + spinlock_t lock; + /* skb send-queue */ + int head, size; + struct sk_buff *queue[SKB_QUEUE_SIZE]; + enum el3_cardtype type; +}; +static int id_port; +static int current_tag; +static struct net_device *el3_devs[EL3_MAX_CARDS]; + +/* Parameters that may be passed into the module. */ +static int debug = -1; +static int irq[] = {-1, -1, -1, -1, -1, -1, -1, -1}; +/* Maximum events (Rx packets, etc.) to handle at each interrupt. */ +static int max_interrupt_work = 10; +#ifdef CONFIG_PNP +static int nopnp; +#endif + +static int __devinit el3_common_init(struct net_device *dev); +static void el3_common_remove(struct net_device *dev); +static ushort id_read_eeprom(int index); +static ushort read_eeprom(int ioaddr, int index); +static int el3_open(struct net_device *dev); +static netdev_tx_t el3_start_xmit(struct sk_buff *skb, struct net_device *dev); +static irqreturn_t el3_interrupt(int irq, void *dev_id); +static void update_stats(struct net_device *dev); +static struct net_device_stats *el3_get_stats(struct net_device *dev); +static int el3_rx(struct net_device *dev); +static int el3_close(struct net_device *dev); +static void set_multicast_list(struct net_device *dev); +static void el3_tx_timeout (struct net_device *dev); +static void el3_down(struct net_device *dev); +static void el3_up(struct net_device *dev); +static const struct ethtool_ops ethtool_ops; +#ifdef CONFIG_PM +static int el3_suspend(struct device *, pm_message_t); +static int el3_resume(struct device *); +#else +#define el3_suspend NULL +#define el3_resume NULL +#endif + + +/* generic device remove for all device types */ +static int el3_device_remove (struct device *device); +#ifdef CONFIG_NET_POLL_CONTROLLER +static void el3_poll_controller(struct net_device *dev); +#endif + +/* Return 0 on success, 1 on error, 2 when found already detected PnP card */ +static int el3_isa_id_sequence(__be16 *phys_addr) +{ + short lrs_state = 0xff; + int i; + + /* ISA boards are detected by sending the ID sequence to the + ID_PORT. We find cards past the first by setting the 'current_tag' + on cards as they are found. Cards with their tag set will not + respond to subsequent ID sequences. */ + + outb(0x00, id_port); + outb(0x00, id_port); + for (i = 0; i < 255; i++) { + outb(lrs_state, id_port); + lrs_state <<= 1; + lrs_state = lrs_state & 0x100 ? lrs_state ^ 0xcf : lrs_state; + } + /* For the first probe, clear all board's tag registers. */ + if (current_tag == 0) + outb(0xd0, id_port); + else /* Otherwise kill off already-found boards. */ + outb(0xd8, id_port); + if (id_read_eeprom(7) != 0x6d50) + return 1; + /* Read in EEPROM data, which does contention-select. + Only the lowest address board will stay "on-line". + 3Com got the byte order backwards. */ + for (i = 0; i < 3; i++) + phys_addr[i] = htons(id_read_eeprom(i)); +#ifdef CONFIG_PNP + if (!nopnp) { + /* The ISA PnP 3c509 cards respond to the ID sequence too. + This check is needed in order not to register them twice. */ + for (i = 0; i < el3_cards; i++) { + struct el3_private *lp = netdev_priv(el3_devs[i]); + if (lp->type == EL3_PNP && + !memcmp(phys_addr, el3_devs[i]->dev_addr, + ETH_ALEN)) { + if (el3_debug > 3) + pr_debug("3c509 with address %02x %02x %02x %02x %02x %02x was found by ISAPnP\n", + phys_addr[0] & 0xff, phys_addr[0] >> 8, + phys_addr[1] & 0xff, phys_addr[1] >> 8, + phys_addr[2] & 0xff, phys_addr[2] >> 8); + /* Set the adaptor tag so that the next card can be found. */ + outb(0xd0 + ++current_tag, id_port); + return 2; + } + } + } +#endif /* CONFIG_PNP */ + return 0; + +} + +static void __devinit el3_dev_fill(struct net_device *dev, __be16 *phys_addr, + int ioaddr, int irq, int if_port, + enum el3_cardtype type) +{ + struct el3_private *lp = netdev_priv(dev); + + memcpy(dev->dev_addr, phys_addr, ETH_ALEN); + dev->base_addr = ioaddr; + dev->irq = irq; + dev->if_port = if_port; + lp->type = type; +} + +static int __devinit el3_isa_match(struct device *pdev, + unsigned int ndev) +{ + struct net_device *dev; + int ioaddr, isa_irq, if_port, err; + unsigned int iobase; + __be16 phys_addr[3]; + + while ((err = el3_isa_id_sequence(phys_addr)) == 2) + ; /* Skip to next card when PnP card found */ + if (err == 1) + return 0; + + iobase = id_read_eeprom(8); + if_port = iobase >> 14; + ioaddr = 0x200 + ((iobase & 0x1f) << 4); + if (irq[el3_cards] > 1 && irq[el3_cards] < 16) + isa_irq = irq[el3_cards]; + else + isa_irq = id_read_eeprom(9) >> 12; + + dev = alloc_etherdev(sizeof(struct el3_private)); + if (!dev) + return -ENOMEM; + + netdev_boot_setup_check(dev); + + if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-isa")) { + free_netdev(dev); + return 0; + } + + /* Set the adaptor tag so that the next card can be found. */ + outb(0xd0 + ++current_tag, id_port); + + /* Activate the adaptor at the EEPROM location. */ + outb((ioaddr >> 4) | 0xe0, id_port); + + EL3WINDOW(0); + if (inw(ioaddr) != 0x6d50) { + free_netdev(dev); + return 0; + } + + /* Free the interrupt so that some other card can use it. */ + outw(0x0f00, ioaddr + WN0_IRQ); + + el3_dev_fill(dev, phys_addr, ioaddr, isa_irq, if_port, EL3_ISA); + dev_set_drvdata(pdev, dev); + if (el3_common_init(dev)) { + free_netdev(dev); + return 0; + } + + el3_devs[el3_cards++] = dev; + return 1; +} + +static int __devexit el3_isa_remove(struct device *pdev, + unsigned int ndev) +{ + el3_device_remove(pdev); + dev_set_drvdata(pdev, NULL); + return 0; +} + +#ifdef CONFIG_PM +static int el3_isa_suspend(struct device *dev, unsigned int n, + pm_message_t state) +{ + current_tag = 0; + return el3_suspend(dev, state); +} + +static int el3_isa_resume(struct device *dev, unsigned int n) +{ + struct net_device *ndev = dev_get_drvdata(dev); + int ioaddr = ndev->base_addr, err; + __be16 phys_addr[3]; + + while ((err = el3_isa_id_sequence(phys_addr)) == 2) + ; /* Skip to next card when PnP card found */ + if (err == 1) + return 0; + /* Set the adaptor tag so that the next card can be found. */ + outb(0xd0 + ++current_tag, id_port); + /* Enable the card */ + outb((ioaddr >> 4) | 0xe0, id_port); + EL3WINDOW(0); + if (inw(ioaddr) != 0x6d50) + return 1; + /* Free the interrupt so that some other card can use it. */ + outw(0x0f00, ioaddr + WN0_IRQ); + return el3_resume(dev); +} +#endif + +static struct isa_driver el3_isa_driver = { + .match = el3_isa_match, + .remove = __devexit_p(el3_isa_remove), +#ifdef CONFIG_PM + .suspend = el3_isa_suspend, + .resume = el3_isa_resume, +#endif + .driver = { + .name = "3c509" + }, +}; +static int isa_registered; + +#ifdef CONFIG_PNP +static struct pnp_device_id el3_pnp_ids[] = { + { .id = "TCM5090" }, /* 3Com Etherlink III (TP) */ + { .id = "TCM5091" }, /* 3Com Etherlink III */ + { .id = "TCM5094" }, /* 3Com Etherlink III (combo) */ + { .id = "TCM5095" }, /* 3Com Etherlink III (TPO) */ + { .id = "TCM5098" }, /* 3Com Etherlink III (TPC) */ + { .id = "PNP80f7" }, /* 3Com Etherlink III compatible */ + { .id = "PNP80f8" }, /* 3Com Etherlink III compatible */ + { .id = "" } +}; +MODULE_DEVICE_TABLE(pnp, el3_pnp_ids); + +static int __devinit el3_pnp_probe(struct pnp_dev *pdev, + const struct pnp_device_id *id) +{ + short i; + int ioaddr, irq, if_port; + __be16 phys_addr[3]; + struct net_device *dev = NULL; + int err; + + ioaddr = pnp_port_start(pdev, 0); + if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-pnp")) + return -EBUSY; + irq = pnp_irq(pdev, 0); + EL3WINDOW(0); + for (i = 0; i < 3; i++) + phys_addr[i] = htons(read_eeprom(ioaddr, i)); + if_port = read_eeprom(ioaddr, 8) >> 14; + dev = alloc_etherdev(sizeof(struct el3_private)); + if (!dev) { + release_region(ioaddr, EL3_IO_EXTENT); + return -ENOMEM; + } + SET_NETDEV_DEV(dev, &pdev->dev); + netdev_boot_setup_check(dev); + + el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_PNP); + pnp_set_drvdata(pdev, dev); + err = el3_common_init(dev); + + if (err) { + pnp_set_drvdata(pdev, NULL); + free_netdev(dev); + return err; + } + + el3_devs[el3_cards++] = dev; + return 0; +} + +static void __devexit el3_pnp_remove(struct pnp_dev *pdev) +{ + el3_common_remove(pnp_get_drvdata(pdev)); + pnp_set_drvdata(pdev, NULL); +} + +#ifdef CONFIG_PM +static int el3_pnp_suspend(struct pnp_dev *pdev, pm_message_t state) +{ + return el3_suspend(&pdev->dev, state); +} + +static int el3_pnp_resume(struct pnp_dev *pdev) +{ + return el3_resume(&pdev->dev); +} +#endif + +static struct pnp_driver el3_pnp_driver = { + .name = "3c509", + .id_table = el3_pnp_ids, + .probe = el3_pnp_probe, + .remove = __devexit_p(el3_pnp_remove), +#ifdef CONFIG_PM + .suspend = el3_pnp_suspend, + .resume = el3_pnp_resume, +#endif +}; +static int pnp_registered; +#endif /* CONFIG_PNP */ + +#ifdef CONFIG_EISA +static struct eisa_device_id el3_eisa_ids[] = { + { "TCM5090" }, + { "TCM5091" }, + { "TCM5092" }, + { "TCM5093" }, + { "TCM5094" }, + { "TCM5095" }, + { "TCM5098" }, + { "" } +}; +MODULE_DEVICE_TABLE(eisa, el3_eisa_ids); + +static int el3_eisa_probe (struct device *device); + +static struct eisa_driver el3_eisa_driver = { + .id_table = el3_eisa_ids, + .driver = { + .name = "3c579", + .probe = el3_eisa_probe, + .remove = __devexit_p (el3_device_remove), + .suspend = el3_suspend, + .resume = el3_resume, + } +}; +static int eisa_registered; +#endif + +#ifdef CONFIG_MCA +static int el3_mca_probe(struct device *dev); + +static short el3_mca_adapter_ids[] __initdata = { + 0x627c, + 0x627d, + 0x62db, + 0x62f6, + 0x62f7, + 0x0000 +}; + +static char *el3_mca_adapter_names[] __initdata = { + "3Com 3c529 EtherLink III (10base2)", + "3Com 3c529 EtherLink III (10baseT)", + "3Com 3c529 EtherLink III (test mode)", + "3Com 3c529 EtherLink III (TP or coax)", + "3Com 3c529 EtherLink III (TP)", + NULL +}; + +static struct mca_driver el3_mca_driver = { + .id_table = el3_mca_adapter_ids, + .driver = { + .name = "3c529", + .bus = &mca_bus_type, + .probe = el3_mca_probe, + .remove = __devexit_p(el3_device_remove), + .suspend = el3_suspend, + .resume = el3_resume, + }, +}; +static int mca_registered; +#endif /* CONFIG_MCA */ + +static const struct net_device_ops netdev_ops = { + .ndo_open = el3_open, + .ndo_stop = el3_close, + .ndo_start_xmit = el3_start_xmit, + .ndo_get_stats = el3_get_stats, + .ndo_set_multicast_list = set_multicast_list, + .ndo_tx_timeout = el3_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = el3_poll_controller, +#endif +}; + +static int __devinit el3_common_init(struct net_device *dev) +{ + struct el3_private *lp = netdev_priv(dev); + int err; + const char *if_names[] = {"10baseT", "AUI", "undefined", "BNC"}; + + spin_lock_init(&lp->lock); + + if (dev->mem_start & 0x05) { /* xcvr codes 1/3/4/12 */ + dev->if_port = (dev->mem_start & 0x0f); + } else { /* xcvr codes 0/8 */ + /* use eeprom value, but save user's full-duplex selection */ + dev->if_port |= (dev->mem_start & 0x08); + } + + /* The EL3-specific entries in the device structure. */ + dev->netdev_ops = &netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + SET_ETHTOOL_OPS(dev, ðtool_ops); + + err = register_netdev(dev); + if (err) { + pr_err("Failed to register 3c5x9 at %#3.3lx, IRQ %d.\n", + dev->base_addr, dev->irq); + release_region(dev->base_addr, EL3_IO_EXTENT); + return err; + } + + pr_info("%s: 3c5x9 found at %#3.3lx, %s port, address %pM, IRQ %d.\n", + dev->name, dev->base_addr, if_names[(dev->if_port & 0x03)], + dev->dev_addr, dev->irq); + + if (el3_debug > 0) + pr_info("%s", version); + return 0; + +} + +static void el3_common_remove (struct net_device *dev) +{ + unregister_netdev (dev); + release_region(dev->base_addr, EL3_IO_EXTENT); + free_netdev (dev); +} + +#ifdef CONFIG_MCA +static int __init el3_mca_probe(struct device *device) +{ + /* Based on Erik Nygren's (nygren@mit.edu) 3c529 patch, + * heavily modified by Chris Beauregard + * (cpbeaure@csclub.uwaterloo.ca) to support standard MCA + * probing. + * + * redone for multi-card detection by ZP Gu (zpg@castle.net) + * now works as a module */ + + short i; + int ioaddr, irq, if_port; + __be16 phys_addr[3]; + struct net_device *dev = NULL; + u_char pos4, pos5; + struct mca_device *mdev = to_mca_device(device); + int slot = mdev->slot; + int err; + + pos4 = mca_device_read_stored_pos(mdev, 4); + pos5 = mca_device_read_stored_pos(mdev, 5); + + ioaddr = ((short)((pos4&0xfc)|0x02)) << 8; + irq = pos5 & 0x0f; + + + pr_info("3c529: found %s at slot %d\n", + el3_mca_adapter_names[mdev->index], slot + 1); + + /* claim the slot */ + strncpy(mdev->name, el3_mca_adapter_names[mdev->index], + sizeof(mdev->name)); + mca_device_set_claim(mdev, 1); + + if_port = pos4 & 0x03; + + irq = mca_device_transform_irq(mdev, irq); + ioaddr = mca_device_transform_ioport(mdev, ioaddr); + if (el3_debug > 2) { + pr_debug("3c529: irq %d ioaddr 0x%x ifport %d\n", irq, ioaddr, if_port); + } + EL3WINDOW(0); + for (i = 0; i < 3; i++) + phys_addr[i] = htons(read_eeprom(ioaddr, i)); + + dev = alloc_etherdev(sizeof (struct el3_private)); + if (dev == NULL) { + release_region(ioaddr, EL3_IO_EXTENT); + return -ENOMEM; + } + + netdev_boot_setup_check(dev); + + el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_MCA); + dev_set_drvdata(device, dev); + err = el3_common_init(dev); + + if (err) { + dev_set_drvdata(device, NULL); + free_netdev(dev); + return -ENOMEM; + } + + el3_devs[el3_cards++] = dev; + return 0; +} + +#endif /* CONFIG_MCA */ + +#ifdef CONFIG_EISA +static int __init el3_eisa_probe (struct device *device) +{ + short i; + int ioaddr, irq, if_port; + __be16 phys_addr[3]; + struct net_device *dev = NULL; + struct eisa_device *edev; + int err; + + /* Yeepee, The driver framework is calling us ! */ + edev = to_eisa_device (device); + ioaddr = edev->base_addr; + + if (!request_region(ioaddr, EL3_IO_EXTENT, "3c579-eisa")) + return -EBUSY; + + /* Change the register set to the configuration window 0. */ + outw(SelectWindow | 0, ioaddr + 0xC80 + EL3_CMD); + + irq = inw(ioaddr + WN0_IRQ) >> 12; + if_port = inw(ioaddr + 6)>>14; + for (i = 0; i < 3; i++) + phys_addr[i] = htons(read_eeprom(ioaddr, i)); + + /* Restore the "Product ID" to the EEPROM read register. */ + read_eeprom(ioaddr, 3); + + dev = alloc_etherdev(sizeof (struct el3_private)); + if (dev == NULL) { + release_region(ioaddr, EL3_IO_EXTENT); + return -ENOMEM; + } + + netdev_boot_setup_check(dev); + + el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_EISA); + eisa_set_drvdata (edev, dev); + err = el3_common_init(dev); + + if (err) { + eisa_set_drvdata (edev, NULL); + free_netdev(dev); + return err; + } + + el3_devs[el3_cards++] = dev; + return 0; +} +#endif + +/* This remove works for all device types. + * + * The net dev must be stored in the driver data field */ +static int __devexit el3_device_remove (struct device *device) +{ + struct net_device *dev; + + dev = dev_get_drvdata(device); + + el3_common_remove (dev); + return 0; +} + +/* Read a word from the EEPROM using the regular EEPROM access register. + Assume that we are in register window zero. + */ +static ushort read_eeprom(int ioaddr, int index) +{ + outw(EEPROM_READ + index, ioaddr + 10); + /* Pause for at least 162 us. for the read to take place. + Some chips seem to require much longer */ + mdelay(2); + return inw(ioaddr + 12); +} + +/* Read a word from the EEPROM when in the ISA ID probe state. */ +static ushort id_read_eeprom(int index) +{ + int bit, word = 0; + + /* Issue read command, and pause for at least 162 us. for it to complete. + Assume extra-fast 16Mhz bus. */ + outb(EEPROM_READ + index, id_port); + + /* Pause for at least 162 us. for the read to take place. */ + /* Some chips seem to require much longer */ + mdelay(4); + + for (bit = 15; bit >= 0; bit--) + word = (word << 1) + (inb(id_port) & 0x01); + + if (el3_debug > 3) + pr_debug(" 3c509 EEPROM word %d %#4.4x.\n", index, word); + + return word; +} + + +static int +el3_open(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + int i; + + outw(TxReset, ioaddr + EL3_CMD); + outw(RxReset, ioaddr + EL3_CMD); + outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD); + + i = request_irq(dev->irq, el3_interrupt, 0, dev->name, dev); + if (i) + return i; + + EL3WINDOW(0); + if (el3_debug > 3) + pr_debug("%s: Opening, IRQ %d status@%x %4.4x.\n", dev->name, + dev->irq, ioaddr + EL3_STATUS, inw(ioaddr + EL3_STATUS)); + + el3_up(dev); + + if (el3_debug > 3) + pr_debug("%s: Opened 3c509 IRQ %d status %4.4x.\n", + dev->name, dev->irq, inw(ioaddr + EL3_STATUS)); + + return 0; +} + +static void +el3_tx_timeout (struct net_device *dev) +{ + int ioaddr = dev->base_addr; + + /* Transmitter timeout, serious problems. */ + pr_warning("%s: transmit timed out, Tx_status %2.2x status %4.4x Tx FIFO room %d.\n", + dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS), + inw(ioaddr + TX_FREE)); + dev->stats.tx_errors++; + dev->trans_start = jiffies; /* prevent tx timeout */ + /* Issue TX_RESET and TX_START commands. */ + outw(TxReset, ioaddr + EL3_CMD); + outw(TxEnable, ioaddr + EL3_CMD); + netif_wake_queue(dev); +} + + +static netdev_tx_t +el3_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct el3_private *lp = netdev_priv(dev); + int ioaddr = dev->base_addr; + unsigned long flags; + + netif_stop_queue (dev); + + dev->stats.tx_bytes += skb->len; + + if (el3_debug > 4) { + pr_debug("%s: el3_start_xmit(length = %u) called, status %4.4x.\n", + dev->name, skb->len, inw(ioaddr + EL3_STATUS)); + } +#if 0 +#ifndef final_version + { /* Error-checking code, delete someday. */ + ushort status = inw(ioaddr + EL3_STATUS); + if (status & 0x0001 && /* IRQ line active, missed one. */ + inw(ioaddr + EL3_STATUS) & 1) { /* Make sure. */ + pr_debug("%s: Missed interrupt, status then %04x now %04x" + " Tx %2.2x Rx %4.4x.\n", dev->name, status, + inw(ioaddr + EL3_STATUS), inb(ioaddr + TX_STATUS), + inw(ioaddr + RX_STATUS)); + /* Fake interrupt trigger by masking, acknowledge interrupts. */ + outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD); + outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, + ioaddr + EL3_CMD); + outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD); + } + } +#endif +#endif + /* + * We lock the driver against other processors. Note + * we don't need to lock versus the IRQ as we suspended + * that. This means that we lose the ability to take + * an RX during a TX upload. That sucks a bit with SMP + * on an original 3c509 (2K buffer) + * + * Using disable_irq stops us crapping on other + * time sensitive devices. + */ + + spin_lock_irqsave(&lp->lock, flags); + + /* Put out the doubleword header... */ + outw(skb->len, ioaddr + TX_FIFO); + outw(0x00, ioaddr + TX_FIFO); + /* ... and the packet rounded to a doubleword. */ + outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); + + if (inw(ioaddr + TX_FREE) > 1536) + netif_start_queue(dev); + else + /* Interrupt us when the FIFO has room for max-sized packet. */ + outw(SetTxThreshold + 1536, ioaddr + EL3_CMD); + + spin_unlock_irqrestore(&lp->lock, flags); + + dev_kfree_skb (skb); + + /* Clear the Tx status stack. */ + { + short tx_status; + int i = 4; + + while (--i > 0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) { + if (tx_status & 0x38) dev->stats.tx_aborted_errors++; + if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD); + if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD); + outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */ + } + } + return NETDEV_TX_OK; +} + +/* The EL3 interrupt handler. */ +static irqreturn_t +el3_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct el3_private *lp; + int ioaddr, status; + int i = max_interrupt_work; + + lp = netdev_priv(dev); + spin_lock(&lp->lock); + + ioaddr = dev->base_addr; + + if (el3_debug > 4) { + status = inw(ioaddr + EL3_STATUS); + pr_debug("%s: interrupt, status %4.4x.\n", dev->name, status); + } + + while ((status = inw(ioaddr + EL3_STATUS)) & + (IntLatch | RxComplete | StatsFull)) { + + if (status & RxComplete) + el3_rx(dev); + + if (status & TxAvailable) { + if (el3_debug > 5) + pr_debug(" TX room bit was handled.\n"); + /* There's room in the FIFO for a full-sized packet. */ + outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); + netif_wake_queue (dev); + } + if (status & (AdapterFailure | RxEarly | StatsFull | TxComplete)) { + /* Handle all uncommon interrupts. */ + if (status & StatsFull) /* Empty statistics. */ + update_stats(dev); + if (status & RxEarly) { /* Rx early is unused. */ + el3_rx(dev); + outw(AckIntr | RxEarly, ioaddr + EL3_CMD); + } + if (status & TxComplete) { /* Really Tx error. */ + short tx_status; + int i = 4; + + while (--i>0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) { + if (tx_status & 0x38) dev->stats.tx_aborted_errors++; + if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD); + if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD); + outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */ + } + } + if (status & AdapterFailure) { + /* Adapter failure requires Rx reset and reinit. */ + outw(RxReset, ioaddr + EL3_CMD); + /* Set the Rx filter to the current state. */ + outw(SetRxFilter | RxStation | RxBroadcast + | (dev->flags & IFF_ALLMULTI ? RxMulticast : 0) + | (dev->flags & IFF_PROMISC ? RxProm : 0), + ioaddr + EL3_CMD); + outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */ + outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD); + } + } + + if (--i < 0) { + pr_err("%s: Infinite loop in interrupt, status %4.4x.\n", + dev->name, status); + /* Clear all interrupts. */ + outw(AckIntr | 0xFF, ioaddr + EL3_CMD); + break; + } + /* Acknowledge the IRQ. */ + outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); /* Ack IRQ */ + } + + if (el3_debug > 4) { + pr_debug("%s: exiting interrupt, status %4.4x.\n", dev->name, + inw(ioaddr + EL3_STATUS)); + } + spin_unlock(&lp->lock); + return IRQ_HANDLED; +} + + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling receive - used by netconsole and other diagnostic tools + * to allow network i/o with interrupts disabled. + */ +static void el3_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + el3_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +static struct net_device_stats * +el3_get_stats(struct net_device *dev) +{ + struct el3_private *lp = netdev_priv(dev); + unsigned long flags; + + /* + * This is fast enough not to bother with disable IRQ + * stuff. + */ + + spin_lock_irqsave(&lp->lock, flags); + update_stats(dev); + spin_unlock_irqrestore(&lp->lock, flags); + return &dev->stats; +} + +/* Update statistics. We change to register window 6, so this should be run + single-threaded if the device is active. This is expected to be a rare + operation, and it's simpler for the rest of the driver to assume that + window 1 is always valid rather than use a special window-state variable. + */ +static void update_stats(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + + if (el3_debug > 5) + pr_debug(" Updating the statistics.\n"); + /* Turn off statistics updates while reading. */ + outw(StatsDisable, ioaddr + EL3_CMD); + /* Switch to the stats window, and read everything. */ + EL3WINDOW(6); + dev->stats.tx_carrier_errors += inb(ioaddr + 0); + dev->stats.tx_heartbeat_errors += inb(ioaddr + 1); + /* Multiple collisions. */ inb(ioaddr + 2); + dev->stats.collisions += inb(ioaddr + 3); + dev->stats.tx_window_errors += inb(ioaddr + 4); + dev->stats.rx_fifo_errors += inb(ioaddr + 5); + dev->stats.tx_packets += inb(ioaddr + 6); + /* Rx packets */ inb(ioaddr + 7); + /* Tx deferrals */ inb(ioaddr + 8); + inw(ioaddr + 10); /* Total Rx and Tx octets. */ + inw(ioaddr + 12); + + /* Back to window 1, and turn statistics back on. */ + EL3WINDOW(1); + outw(StatsEnable, ioaddr + EL3_CMD); +} + +static int +el3_rx(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + short rx_status; + + if (el3_debug > 5) + pr_debug(" In rx_packet(), status %4.4x, rx_status %4.4x.\n", + inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS)); + while ((rx_status = inw(ioaddr + RX_STATUS)) > 0) { + if (rx_status & 0x4000) { /* Error, update stats. */ + short error = rx_status & 0x3800; + + outw(RxDiscard, ioaddr + EL3_CMD); + dev->stats.rx_errors++; + switch (error) { + case 0x0000: dev->stats.rx_over_errors++; break; + case 0x0800: dev->stats.rx_length_errors++; break; + case 0x1000: dev->stats.rx_frame_errors++; break; + case 0x1800: dev->stats.rx_length_errors++; break; + case 0x2000: dev->stats.rx_frame_errors++; break; + case 0x2800: dev->stats.rx_crc_errors++; break; + } + } else { + short pkt_len = rx_status & 0x7ff; + struct sk_buff *skb; + + skb = dev_alloc_skb(pkt_len+5); + if (el3_debug > 4) + pr_debug("Receiving packet size %d status %4.4x.\n", + pkt_len, rx_status); + if (skb != NULL) { + skb_reserve(skb, 2); /* Align IP on 16 byte */ + + /* 'skb->data' points to the start of sk_buff data area. */ + insl(ioaddr + RX_FIFO, skb_put(skb,pkt_len), + (pkt_len + 3) >> 2); + + outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */ + skb->protocol = eth_type_trans(skb,dev); + netif_rx(skb); + dev->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + continue; + } + outw(RxDiscard, ioaddr + EL3_CMD); + dev->stats.rx_dropped++; + if (el3_debug) + pr_debug("%s: Couldn't allocate a sk_buff of size %d.\n", + dev->name, pkt_len); + } + inw(ioaddr + EL3_STATUS); /* Delay. */ + while (inw(ioaddr + EL3_STATUS) & 0x1000) + pr_debug(" Waiting for 3c509 to discard packet, status %x.\n", + inw(ioaddr + EL3_STATUS) ); + } + + return 0; +} + +/* + * Set or clear the multicast filter for this adaptor. + */ +static void +set_multicast_list(struct net_device *dev) +{ + unsigned long flags; + struct el3_private *lp = netdev_priv(dev); + int ioaddr = dev->base_addr; + int mc_count = netdev_mc_count(dev); + + if (el3_debug > 1) { + static int old; + if (old != mc_count) { + old = mc_count; + pr_debug("%s: Setting Rx mode to %d addresses.\n", + dev->name, mc_count); + } + } + spin_lock_irqsave(&lp->lock, flags); + if (dev->flags&IFF_PROMISC) { + outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm, + ioaddr + EL3_CMD); + } + else if (mc_count || (dev->flags&IFF_ALLMULTI)) { + outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast, ioaddr + EL3_CMD); + } + else + outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD); + spin_unlock_irqrestore(&lp->lock, flags); +} + +static int +el3_close(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + struct el3_private *lp = netdev_priv(dev); + + if (el3_debug > 2) + pr_debug("%s: Shutting down ethercard.\n", dev->name); + + el3_down(dev); + + free_irq(dev->irq, dev); + /* Switching back to window 0 disables the IRQ. */ + EL3WINDOW(0); + if (lp->type != EL3_EISA) { + /* But we explicitly zero the IRQ line select anyway. Don't do + * it on EISA cards, it prevents the module from getting an + * IRQ after unload+reload... */ + outw(0x0f00, ioaddr + WN0_IRQ); + } + + return 0; +} + +static int +el3_link_ok(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + u16 tmp; + + EL3WINDOW(4); + tmp = inw(ioaddr + WN4_MEDIA); + EL3WINDOW(1); + return tmp & (1<<11); +} + +static int +el3_netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + u16 tmp; + int ioaddr = dev->base_addr; + + EL3WINDOW(0); + /* obtain current transceiver via WN4_MEDIA? */ + tmp = inw(ioaddr + WN0_ADDR_CONF); + ecmd->transceiver = XCVR_INTERNAL; + switch (tmp >> 14) { + case 0: + ecmd->port = PORT_TP; + break; + case 1: + ecmd->port = PORT_AUI; + ecmd->transceiver = XCVR_EXTERNAL; + break; + case 3: + ecmd->port = PORT_BNC; + default: + break; + } + + ecmd->duplex = DUPLEX_HALF; + ecmd->supported = 0; + tmp = inw(ioaddr + WN0_CONF_CTRL); + if (tmp & (1<<13)) + ecmd->supported |= SUPPORTED_AUI; + if (tmp & (1<<12)) + ecmd->supported |= SUPPORTED_BNC; + if (tmp & (1<<9)) { + ecmd->supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full; /* hmm... */ + EL3WINDOW(4); + tmp = inw(ioaddr + WN4_NETDIAG); + if (tmp & FD_ENABLE) + ecmd->duplex = DUPLEX_FULL; + } + + ethtool_cmd_speed_set(ecmd, SPEED_10); + EL3WINDOW(1); + return 0; +} + +static int +el3_netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + u16 tmp; + int ioaddr = dev->base_addr; + + if (ecmd->speed != SPEED_10) + return -EINVAL; + if ((ecmd->duplex != DUPLEX_HALF) && (ecmd->duplex != DUPLEX_FULL)) + return -EINVAL; + if ((ecmd->transceiver != XCVR_INTERNAL) && (ecmd->transceiver != XCVR_EXTERNAL)) + return -EINVAL; + + /* change XCVR type */ + EL3WINDOW(0); + tmp = inw(ioaddr + WN0_ADDR_CONF); + switch (ecmd->port) { + case PORT_TP: + tmp &= ~(3<<14); + dev->if_port = 0; + break; + case PORT_AUI: + tmp |= (1<<14); + dev->if_port = 1; + break; + case PORT_BNC: + tmp |= (3<<14); + dev->if_port = 3; + break; + default: + return -EINVAL; + } + + outw(tmp, ioaddr + WN0_ADDR_CONF); + if (dev->if_port == 3) { + /* fire up the DC-DC convertor if BNC gets enabled */ + tmp = inw(ioaddr + WN0_ADDR_CONF); + if (tmp & (3 << 14)) { + outw(StartCoax, ioaddr + EL3_CMD); + udelay(800); + } else + return -EIO; + } + + EL3WINDOW(4); + tmp = inw(ioaddr + WN4_NETDIAG); + if (ecmd->duplex == DUPLEX_FULL) + tmp |= FD_ENABLE; + else + tmp &= ~FD_ENABLE; + outw(tmp, ioaddr + WN4_NETDIAG); + EL3WINDOW(1); + + return 0; +} + +static void el3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); +} + +static int el3_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct el3_private *lp = netdev_priv(dev); + int ret; + + spin_lock_irq(&lp->lock); + ret = el3_netdev_get_ecmd(dev, ecmd); + spin_unlock_irq(&lp->lock); + return ret; +} + +static int el3_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct el3_private *lp = netdev_priv(dev); + int ret; + + spin_lock_irq(&lp->lock); + ret = el3_netdev_set_ecmd(dev, ecmd); + spin_unlock_irq(&lp->lock); + return ret; +} + +static u32 el3_get_link(struct net_device *dev) +{ + struct el3_private *lp = netdev_priv(dev); + u32 ret; + + spin_lock_irq(&lp->lock); + ret = el3_link_ok(dev); + spin_unlock_irq(&lp->lock); + return ret; +} + +static u32 el3_get_msglevel(struct net_device *dev) +{ + return el3_debug; +} + +static void el3_set_msglevel(struct net_device *dev, u32 v) +{ + el3_debug = v; +} + +static const struct ethtool_ops ethtool_ops = { + .get_drvinfo = el3_get_drvinfo, + .get_settings = el3_get_settings, + .set_settings = el3_set_settings, + .get_link = el3_get_link, + .get_msglevel = el3_get_msglevel, + .set_msglevel = el3_set_msglevel, +}; + +static void +el3_down(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + + netif_stop_queue(dev); + + /* Turn off statistics ASAP. We update lp->stats below. */ + outw(StatsDisable, ioaddr + EL3_CMD); + + /* Disable the receiver and transmitter. */ + outw(RxDisable, ioaddr + EL3_CMD); + outw(TxDisable, ioaddr + EL3_CMD); + + if (dev->if_port == 3) + /* Turn off thinnet power. Green! */ + outw(StopCoax, ioaddr + EL3_CMD); + else if (dev->if_port == 0) { + /* Disable link beat and jabber, if_port may change here next open(). */ + EL3WINDOW(4); + outw(inw(ioaddr + WN4_MEDIA) & ~MEDIA_TP, ioaddr + WN4_MEDIA); + } + + outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD); + + update_stats(dev); +} + +static void +el3_up(struct net_device *dev) +{ + int i, sw_info, net_diag; + int ioaddr = dev->base_addr; + + /* Activating the board required and does no harm otherwise */ + outw(0x0001, ioaddr + 4); + + /* Set the IRQ line. */ + outw((dev->irq << 12) | 0x0f00, ioaddr + WN0_IRQ); + + /* Set the station address in window 2 each time opened. */ + EL3WINDOW(2); + + for (i = 0; i < 6; i++) + outb(dev->dev_addr[i], ioaddr + i); + + if ((dev->if_port & 0x03) == 3) /* BNC interface */ + /* Start the thinnet transceiver. We should really wait 50ms...*/ + outw(StartCoax, ioaddr + EL3_CMD); + else if ((dev->if_port & 0x03) == 0) { /* 10baseT interface */ + /* Combine secondary sw_info word (the adapter level) and primary + sw_info word (duplex setting plus other useless bits) */ + EL3WINDOW(0); + sw_info = (read_eeprom(ioaddr, 0x14) & 0x400f) | + (read_eeprom(ioaddr, 0x0d) & 0xBff0); + + EL3WINDOW(4); + net_diag = inw(ioaddr + WN4_NETDIAG); + net_diag = (net_diag | FD_ENABLE); /* temporarily assume full-duplex will be set */ + pr_info("%s: ", dev->name); + switch (dev->if_port & 0x0c) { + case 12: + /* force full-duplex mode if 3c5x9b */ + if (sw_info & 0x000f) { + pr_cont("Forcing 3c5x9b full-duplex mode"); + break; + } + case 8: + /* set full-duplex mode based on eeprom config setting */ + if ((sw_info & 0x000f) && (sw_info & 0x8000)) { + pr_cont("Setting 3c5x9b full-duplex mode (from EEPROM configuration bit)"); + break; + } + default: + /* xcvr=(0 || 4) OR user has an old 3c5x9 non "B" model */ + pr_cont("Setting 3c5x9/3c5x9B half-duplex mode"); + net_diag = (net_diag & ~FD_ENABLE); /* disable full duplex */ + } + + outw(net_diag, ioaddr + WN4_NETDIAG); + pr_cont(" if_port: %d, sw_info: %4.4x\n", dev->if_port, sw_info); + if (el3_debug > 3) + pr_debug("%s: 3c5x9 net diag word is now: %4.4x.\n", dev->name, net_diag); + /* Enable link beat and jabber check. */ + outw(inw(ioaddr + WN4_MEDIA) | MEDIA_TP, ioaddr + WN4_MEDIA); + } + + /* Switch to the stats window, and clear all stats by reading. */ + outw(StatsDisable, ioaddr + EL3_CMD); + EL3WINDOW(6); + for (i = 0; i < 9; i++) + inb(ioaddr + i); + inw(ioaddr + 10); + inw(ioaddr + 12); + + /* Switch to register set 1 for normal use. */ + EL3WINDOW(1); + + /* Accept b-case and phys addr only. */ + outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD); + outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ + + outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ + outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ + /* Allow status bits to be seen. */ + outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD); + /* Ack all pending events, and set active indicator mask. */ + outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, + ioaddr + EL3_CMD); + outw(SetIntrEnb | IntLatch|TxAvailable|TxComplete|RxComplete|StatsFull, + ioaddr + EL3_CMD); + + netif_start_queue(dev); +} + +/* Power Management support functions */ +#ifdef CONFIG_PM + +static int +el3_suspend(struct device *pdev, pm_message_t state) +{ + unsigned long flags; + struct net_device *dev; + struct el3_private *lp; + int ioaddr; + + dev = dev_get_drvdata(pdev); + lp = netdev_priv(dev); + ioaddr = dev->base_addr; + + spin_lock_irqsave(&lp->lock, flags); + + if (netif_running(dev)) + netif_device_detach(dev); + + el3_down(dev); + outw(PowerDown, ioaddr + EL3_CMD); + + spin_unlock_irqrestore(&lp->lock, flags); + return 0; +} + +static int +el3_resume(struct device *pdev) +{ + unsigned long flags; + struct net_device *dev; + struct el3_private *lp; + int ioaddr; + + dev = dev_get_drvdata(pdev); + lp = netdev_priv(dev); + ioaddr = dev->base_addr; + + spin_lock_irqsave(&lp->lock, flags); + + outw(PowerUp, ioaddr + EL3_CMD); + EL3WINDOW(0); + el3_up(dev); + + if (netif_running(dev)) + netif_device_attach(dev); + + spin_unlock_irqrestore(&lp->lock, flags); + return 0; +} + +#endif /* CONFIG_PM */ + +module_param(debug,int, 0); +module_param_array(irq, int, NULL, 0); +module_param(max_interrupt_work, int, 0); +MODULE_PARM_DESC(debug, "debug level (0-6)"); +MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)"); +MODULE_PARM_DESC(max_interrupt_work, "maximum events handled per interrupt"); +#ifdef CONFIG_PNP +module_param(nopnp, int, 0); +MODULE_PARM_DESC(nopnp, "disable ISA PnP support (0-1)"); +#endif /* CONFIG_PNP */ +MODULE_DESCRIPTION("3Com Etherlink III (3c509, 3c509B, 3c529, 3c579) ethernet driver"); +MODULE_LICENSE("GPL"); + +static int __init el3_init_module(void) +{ + int ret = 0; + + if (debug >= 0) + el3_debug = debug; + +#ifdef CONFIG_PNP + if (!nopnp) { + ret = pnp_register_driver(&el3_pnp_driver); + if (!ret) + pnp_registered = 1; + } +#endif + /* Select an open I/O location at 0x1*0 to do ISA contention select. */ + /* Start with 0x110 to avoid some sound cards.*/ + for (id_port = 0x110 ; id_port < 0x200; id_port += 0x10) { + if (!request_region(id_port, 1, "3c509-control")) + continue; + outb(0x00, id_port); + outb(0xff, id_port); + if (inb(id_port) & 0x01) + break; + else + release_region(id_port, 1); + } + if (id_port >= 0x200) { + id_port = 0; + pr_err("No I/O port available for 3c509 activation.\n"); + } else { + ret = isa_register_driver(&el3_isa_driver, EL3_MAX_CARDS); + if (!ret) + isa_registered = 1; + } +#ifdef CONFIG_EISA + ret = eisa_driver_register(&el3_eisa_driver); + if (!ret) + eisa_registered = 1; +#endif +#ifdef CONFIG_MCA + ret = mca_register_driver(&el3_mca_driver); + if (!ret) + mca_registered = 1; +#endif + +#ifdef CONFIG_PNP + if (pnp_registered) + ret = 0; +#endif + if (isa_registered) + ret = 0; +#ifdef CONFIG_EISA + if (eisa_registered) + ret = 0; +#endif +#ifdef CONFIG_MCA + if (mca_registered) + ret = 0; +#endif + return ret; +} + +static void __exit el3_cleanup_module(void) +{ +#ifdef CONFIG_PNP + if (pnp_registered) + pnp_unregister_driver(&el3_pnp_driver); +#endif + if (isa_registered) + isa_unregister_driver(&el3_isa_driver); + if (id_port) + release_region(id_port, 1); +#ifdef CONFIG_EISA + if (eisa_registered) + eisa_driver_unregister(&el3_eisa_driver); +#endif +#ifdef CONFIG_MCA + if (mca_registered) + mca_unregister_driver(&el3_mca_driver); +#endif +} + +module_init (el3_init_module); +module_exit (el3_cleanup_module); diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c new file mode 100644 index 0000000..d2bb4b2 --- /dev/null +++ b/drivers/net/ethernet/3com/3c515.c @@ -0,0 +1,1584 @@ +/* + Written 1997-1998 by Donald Becker. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + This driver is for the 3Com ISA EtherLink XL "Corkscrew" 3c515 ethercard. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + + 2000/2/2- Added support for kernel-level ISAPnP + by Stephen Frost and Alessandro Zummo + Cleaned up for 2.3.x/softnet by Jeff Garzik and Alan Cox. + + 2001/11/17 - Added ethtool support (jgarzik) + + 2002/10/28 - Locking updates for 2.5 (alan@lxorguk.ukuu.org.uk) + +*/ + +#define DRV_NAME "3c515" +#define DRV_VERSION "0.99t-ac" +#define DRV_RELDATE "28-Oct-2002" + +static char *version = +DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " becker@scyld.com and others\n"; + +#define CORKSCREW 1 + +/* "Knobs" that adjust features and parameters. */ +/* Set the copy breakpoint for the copy-only-tiny-frames scheme. + Setting to > 1512 effectively disables this feature. */ +static int rx_copybreak = 200; + +/* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */ +static const int mtu = 1500; + +/* Maximum events (Rx packets, etc.) to handle at each interrupt. */ +static int max_interrupt_work = 20; + +/* Enable the automatic media selection code -- usually set. */ +#define AUTOMEDIA 1 + +/* Allow the use of fragment bus master transfers instead of only + programmed-I/O for Vortex cards. Full-bus-master transfers are always + enabled by default on Boomerang cards. If VORTEX_BUS_MASTER is defined, + the feature may be turned on using 'options'. */ +#define VORTEX_BUS_MASTER + +/* A few values that may be tweaked. */ +/* Keep the ring sizes a power of two for efficiency. */ +#define TX_RING_SIZE 16 +#define RX_RING_SIZE 16 +#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define NEW_MULTICAST +#include + +#define MAX_UNITS 8 + +MODULE_AUTHOR("Donald Becker "); +MODULE_DESCRIPTION("3Com 3c515 Corkscrew driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +/* "Knobs" for adjusting internal parameters. */ +/* Put out somewhat more debugging messages. (0 - no msg, 1 minimal msgs). */ +#define DRIVER_DEBUG 1 +/* Some values here only for performance evaluation and path-coverage + debugging. */ +static int rx_nocopy, rx_copy, queued_packet; + +/* Number of times to check to see if the Tx FIFO has space, used in some + limited cases. */ +#define WAIT_TX_AVAIL 200 + +/* Operational parameter that usually are not changed. */ +#define TX_TIMEOUT ((4*HZ)/10) /* Time in jiffies before concluding Tx hung */ + +/* The size here is somewhat misleading: the Corkscrew also uses the ISA + aliased registers at +0x400. + */ +#define CORKSCREW_TOTAL_SIZE 0x20 + +#ifdef DRIVER_DEBUG +static int corkscrew_debug = DRIVER_DEBUG; +#else +static int corkscrew_debug = 1; +#endif + +#define CORKSCREW_ID 10 + +/* + Theory of Operation + +I. Board Compatibility + +This device driver is designed for the 3Com 3c515 ISA Fast EtherLink XL, +3Com's ISA bus adapter for Fast Ethernet. Due to the unique I/O port layout, +it's not practical to integrate this driver with the other EtherLink drivers. + +II. Board-specific settings + +The Corkscrew has an EEPROM for configuration, but no special settings are +needed for Linux. + +III. Driver operation + +The 3c515 series use an interface that's very similar to the 3c900 "Boomerang" +PCI cards, with the bus master interface extensively modified to work with +the ISA bus. + +The card is capable of full-bus-master transfers with separate +lists of transmit and receive descriptors, similar to the AMD LANCE/PCnet, +DEC Tulip and Intel Speedo3. + +This driver uses a "RX_COPYBREAK" scheme rather than a fixed intermediate +receive buffer. This scheme allocates full-sized skbuffs as receive +buffers. The value RX_COPYBREAK is used as the copying breakpoint: it is +chosen to trade-off the memory wasted by passing the full-sized skbuff to +the queue layer for all frames vs. the copying cost of copying a frame to a +correctly-sized skbuff. + + +IIIC. Synchronization +The driver runs as two independent, single-threaded flows of control. One +is the send-packet routine, which enforces single-threaded use by the netif +layer. The other thread is the interrupt handler, which is single +threaded by the hardware and other software. + +IV. Notes + +Thanks to Terry Murphy of 3Com for providing documentation and a development +board. + +The names "Vortex", "Boomerang" and "Corkscrew" are the internal 3Com +project names. I use these names to eliminate confusion -- 3Com product +numbers and names are very similar and often confused. + +The new chips support both ethernet (1.5K) and FDDI (4.5K) frame sizes! +This driver only supports ethernet frames because of the recent MTU limit +of 1.5K, but the changes to support 4.5K are minimal. +*/ + +/* Operational definitions. + These are not used by other compilation units and thus are not + exported in a ".h" file. + + First the windows. There are eight register windows, with the command + and status registers available in each. + */ +#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD) +#define EL3_CMD 0x0e +#define EL3_STATUS 0x0e + +/* The top five bits written to EL3_CMD are a command, the lower + 11 bits are the parameter, if applicable. + Note that 11 parameters bits was fine for ethernet, but the new chips + can handle FDDI length frames (~4500 octets) and now parameters count + 32-bit 'Dwords' rather than octets. */ + +enum corkscrew_cmd { + TotalReset = 0 << 11, SelectWindow = 1 << 11, StartCoax = 2 << 11, + RxDisable = 3 << 11, RxEnable = 4 << 11, RxReset = 5 << 11, + UpStall = 6 << 11, UpUnstall = (6 << 11) + 1, DownStall = (6 << 11) + 2, + DownUnstall = (6 << 11) + 3, RxDiscard = 8 << 11, TxEnable = 9 << 11, + TxDisable = 10 << 11, TxReset = 11 << 11, FakeIntr = 12 << 11, + AckIntr = 13 << 11, SetIntrEnb = 14 << 11, SetStatusEnb = 15 << 11, + SetRxFilter = 16 << 11, SetRxThreshold = 17 << 11, + SetTxThreshold = 18 << 11, SetTxStart = 19 << 11, StartDMAUp = 20 << 11, + StartDMADown = (20 << 11) + 1, StatsEnable = 21 << 11, + StatsDisable = 22 << 11, StopCoax = 23 << 11, +}; + +/* The SetRxFilter command accepts the following classes: */ +enum RxFilter { + RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 +}; + +/* Bits in the general status register. */ +enum corkscrew_status { + IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004, + TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020, + IntReq = 0x0040, StatsFull = 0x0080, + DMADone = 1 << 8, DownComplete = 1 << 9, UpComplete = 1 << 10, + DMAInProgress = 1 << 11, /* DMA controller is still busy. */ + CmdInProgress = 1 << 12, /* EL3_CMD is still busy. */ +}; + +/* Register window 1 offsets, the window used in normal operation. + On the Corkscrew this window is always mapped at offsets 0x10-0x1f. */ +enum Window1 { + TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14, + RxStatus = 0x18, Timer = 0x1A, TxStatus = 0x1B, + TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */ +}; +enum Window0 { + Wn0IRQ = 0x08, +#if defined(CORKSCREW) + Wn0EepromCmd = 0x200A, /* Corkscrew EEPROM command register. */ + Wn0EepromData = 0x200C, /* Corkscrew EEPROM results register. */ +#else + Wn0EepromCmd = 10, /* Window 0: EEPROM command register. */ + Wn0EepromData = 12, /* Window 0: EEPROM results register. */ +#endif +}; +enum Win0_EEPROM_bits { + EEPROM_Read = 0x80, EEPROM_WRITE = 0x40, EEPROM_ERASE = 0xC0, + EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */ + EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */ +}; + +/* EEPROM locations. */ +enum eeprom_offset { + PhysAddr01 = 0, PhysAddr23 = 1, PhysAddr45 = 2, ModelID = 3, + EtherLink3ID = 7, +}; + +enum Window3 { /* Window 3: MAC/config bits. */ + Wn3_Config = 0, Wn3_MAC_Ctrl = 6, Wn3_Options = 8, +}; +enum wn3_config { + Ram_size = 7, + Ram_width = 8, + Ram_speed = 0x30, + Rom_size = 0xc0, + Ram_split_shift = 16, + Ram_split = 3 << Ram_split_shift, + Xcvr_shift = 20, + Xcvr = 7 << Xcvr_shift, + Autoselect = 0x1000000, +}; + +enum Window4 { + Wn4_NetDiag = 6, Wn4_Media = 10, /* Window 4: Xcvr/media bits. */ +}; +enum Win4_Media_bits { + Media_SQE = 0x0008, /* Enable SQE error counting for AUI. */ + Media_10TP = 0x00C0, /* Enable link beat and jabber for 10baseT. */ + Media_Lnk = 0x0080, /* Enable just link beat for 100TX/100FX. */ + Media_LnkBeat = 0x0800, +}; +enum Window7 { /* Window 7: Bus Master control. */ + Wn7_MasterAddr = 0, Wn7_MasterLen = 6, Wn7_MasterStatus = 12, +}; + +/* Boomerang-style bus master control registers. Note ISA aliases! */ +enum MasterCtrl { + PktStatus = 0x400, DownListPtr = 0x404, FragAddr = 0x408, FragLen = + 0x40c, + TxFreeThreshold = 0x40f, UpPktStatus = 0x410, UpListPtr = 0x418, +}; + +/* The Rx and Tx descriptor lists. + Caution Alpha hackers: these types are 32 bits! Note also the 8 byte + alignment contraint on tx_ring[] and rx_ring[]. */ +struct boom_rx_desc { + u32 next; + s32 status; + u32 addr; + s32 length; +}; + +/* Values for the Rx status entry. */ +enum rx_desc_status { + RxDComplete = 0x00008000, RxDError = 0x4000, + /* See boomerang_rx() for actual error bits */ +}; + +struct boom_tx_desc { + u32 next; + s32 status; + u32 addr; + s32 length; +}; + +struct corkscrew_private { + const char *product_name; + struct list_head list; + struct net_device *our_dev; + /* The Rx and Tx rings are here to keep them quad-word-aligned. */ + struct boom_rx_desc rx_ring[RX_RING_SIZE]; + struct boom_tx_desc tx_ring[TX_RING_SIZE]; + /* The addresses of transmit- and receive-in-place skbuffs. */ + struct sk_buff *rx_skbuff[RX_RING_SIZE]; + struct sk_buff *tx_skbuff[TX_RING_SIZE]; + unsigned int cur_rx, cur_tx; /* The next free ring entry */ + unsigned int dirty_rx, dirty_tx;/* The ring entries to be free()ed. */ + struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */ + struct timer_list timer; /* Media selection timer. */ + int capabilities ; /* Adapter capabilities word. */ + int options; /* User-settable misc. driver options. */ + int last_rx_packets; /* For media autoselection. */ + unsigned int available_media:8, /* From Wn3_Options */ + media_override:3, /* Passed-in media type. */ + default_media:3, /* Read from the EEPROM. */ + full_duplex:1, autoselect:1, bus_master:1, /* Vortex can only do a fragment bus-m. */ + full_bus_master_tx:1, full_bus_master_rx:1, /* Boomerang */ + tx_full:1; + spinlock_t lock; + struct device *dev; +}; + +/* The action to take with a media selection timer tick. + Note that we deviate from the 3Com order by checking 10base2 before AUI. + */ +enum xcvr_types { + XCVR_10baseT = 0, XCVR_AUI, XCVR_10baseTOnly, XCVR_10base2, XCVR_100baseTx, + XCVR_100baseFx, XCVR_MII = 6, XCVR_Default = 8, +}; + +static struct media_table { + char *name; + unsigned int media_bits:16, /* Bits to set in Wn4_Media register. */ + mask:8, /* The transceiver-present bit in Wn3_Config. */ + next:8; /* The media type to try next. */ + short wait; /* Time before we check media status. */ +} media_tbl[] = { + { "10baseT", Media_10TP, 0x08, XCVR_10base2, (14 * HZ) / 10 }, + { "10Mbs AUI", Media_SQE, 0x20, XCVR_Default, (1 * HZ) / 10}, + { "undefined", 0, 0x80, XCVR_10baseT, 10000}, + { "10base2", 0, 0x10, XCVR_AUI, (1 * HZ) / 10}, + { "100baseTX", Media_Lnk, 0x02, XCVR_100baseFx, (14 * HZ) / 10}, + { "100baseFX", Media_Lnk, 0x04, XCVR_MII, (14 * HZ) / 10}, + { "MII", 0, 0x40, XCVR_10baseT, 3 * HZ}, + { "undefined", 0, 0x01, XCVR_10baseT, 10000}, + { "Default", 0, 0xFF, XCVR_10baseT, 10000}, +}; + +#ifdef __ISAPNP__ +static struct isapnp_device_id corkscrew_isapnp_adapters[] = { + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, + ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5051), + (long) "3Com Fast EtherLink ISA" }, + { } /* terminate list */ +}; + +MODULE_DEVICE_TABLE(isapnp, corkscrew_isapnp_adapters); + +static int nopnp; +#endif /* __ISAPNP__ */ + +static struct net_device *corkscrew_scan(int unit); +static int corkscrew_setup(struct net_device *dev, int ioaddr, + struct pnp_dev *idev, int card_number); +static int corkscrew_open(struct net_device *dev); +static void corkscrew_timer(unsigned long arg); +static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static int corkscrew_rx(struct net_device *dev); +static void corkscrew_timeout(struct net_device *dev); +static int boomerang_rx(struct net_device *dev); +static irqreturn_t corkscrew_interrupt(int irq, void *dev_id); +static int corkscrew_close(struct net_device *dev); +static void update_stats(int addr, struct net_device *dev); +static struct net_device_stats *corkscrew_get_stats(struct net_device *dev); +static void set_rx_mode(struct net_device *dev); +static const struct ethtool_ops netdev_ethtool_ops; + + +/* + Unfortunately maximizing the shared code between the integrated and + module version of the driver results in a complicated set of initialization + procedures. + init_module() -- modules / tc59x_init() -- built-in + The wrappers for corkscrew_scan() + corkscrew_scan() The common routine that scans for PCI and EISA cards + corkscrew_found_device() Allocate a device structure when we find a card. + Different versions exist for modules and built-in. + corkscrew_probe1() Fill in the device structure -- this is separated + so that the modules code can put it in dev->init. +*/ +/* This driver uses 'options' to pass the media type, full-duplex flag, etc. */ +/* Note: this is the only limit on the number of cards supported!! */ +static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1, }; + +#ifdef MODULE +static int debug = -1; + +module_param(debug, int, 0); +module_param_array(options, int, NULL, 0); +module_param(rx_copybreak, int, 0); +module_param(max_interrupt_work, int, 0); +MODULE_PARM_DESC(debug, "3c515 debug level (0-6)"); +MODULE_PARM_DESC(options, "3c515: Bits 0-2: media type, bit 3: full duplex, bit 4: bus mastering"); +MODULE_PARM_DESC(rx_copybreak, "3c515 copy breakpoint for copy-only-tiny-frames"); +MODULE_PARM_DESC(max_interrupt_work, "3c515 maximum events handled per interrupt"); + +/* A list of all installed Vortex devices, for removing the driver module. */ +/* we will need locking (and refcounting) if we ever use it for more */ +static LIST_HEAD(root_corkscrew_dev); + +int init_module(void) +{ + int found = 0; + if (debug >= 0) + corkscrew_debug = debug; + if (corkscrew_debug) + pr_debug("%s", version); + while (corkscrew_scan(-1)) + found++; + return found ? 0 : -ENODEV; +} + +#else +struct net_device *tc515_probe(int unit) +{ + struct net_device *dev = corkscrew_scan(unit); + static int printed; + + if (!dev) + return ERR_PTR(-ENODEV); + + if (corkscrew_debug > 0 && !printed) { + printed = 1; + pr_debug("%s", version); + } + + return dev; +} +#endif /* not MODULE */ + +static int check_device(unsigned ioaddr) +{ + int timer; + + if (!request_region(ioaddr, CORKSCREW_TOTAL_SIZE, "3c515")) + return 0; + /* Check the resource configuration for a matching ioaddr. */ + if ((inw(ioaddr + 0x2002) & 0x1f0) != (ioaddr & 0x1f0)) { + release_region(ioaddr, CORKSCREW_TOTAL_SIZE); + return 0; + } + /* Verify by reading the device ID from the EEPROM. */ + outw(EEPROM_Read + 7, ioaddr + Wn0EepromCmd); + /* Pause for at least 162 us. for the read to take place. */ + for (timer = 4; timer >= 0; timer--) { + udelay(162); + if ((inw(ioaddr + Wn0EepromCmd) & 0x0200) == 0) + break; + } + if (inw(ioaddr + Wn0EepromData) != 0x6d50) { + release_region(ioaddr, CORKSCREW_TOTAL_SIZE); + return 0; + } + return 1; +} + +static void cleanup_card(struct net_device *dev) +{ + struct corkscrew_private *vp = netdev_priv(dev); + list_del_init(&vp->list); + if (dev->dma) + free_dma(dev->dma); + outw(TotalReset, dev->base_addr + EL3_CMD); + release_region(dev->base_addr, CORKSCREW_TOTAL_SIZE); + if (vp->dev) + pnp_device_detach(to_pnp_dev(vp->dev)); +} + +static struct net_device *corkscrew_scan(int unit) +{ + struct net_device *dev; + static int cards_found = 0; + static int ioaddr; + int err; +#ifdef __ISAPNP__ + short i; + static int pnp_cards; +#endif + + dev = alloc_etherdev(sizeof(struct corkscrew_private)); + if (!dev) + return ERR_PTR(-ENOMEM); + + if (unit >= 0) { + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + } + +#ifdef __ISAPNP__ + if(nopnp == 1) + goto no_pnp; + for(i=0; corkscrew_isapnp_adapters[i].vendor != 0; i++) { + struct pnp_dev *idev = NULL; + int irq; + while((idev = pnp_find_dev(NULL, + corkscrew_isapnp_adapters[i].vendor, + corkscrew_isapnp_adapters[i].function, + idev))) { + + if (pnp_device_attach(idev) < 0) + continue; + if (pnp_activate_dev(idev) < 0) { + pr_warning("pnp activate failed (out of resources?)\n"); + pnp_device_detach(idev); + continue; + } + if (!pnp_port_valid(idev, 0) || !pnp_irq_valid(idev, 0)) { + pnp_device_detach(idev); + continue; + } + ioaddr = pnp_port_start(idev, 0); + irq = pnp_irq(idev, 0); + if (!check_device(ioaddr)) { + pnp_device_detach(idev); + continue; + } + if(corkscrew_debug) + pr_debug("ISAPNP reports %s at i/o 0x%x, irq %d\n", + (char*) corkscrew_isapnp_adapters[i].driver_data, ioaddr, irq); + pr_info("3c515 Resource configuration register %#4.4x, DCR %4.4x.\n", + inl(ioaddr + 0x2002), inw(ioaddr + 0x2000)); + /* irq = inw(ioaddr + 0x2002) & 15; */ /* Use the irq from isapnp */ + SET_NETDEV_DEV(dev, &idev->dev); + pnp_cards++; + err = corkscrew_setup(dev, ioaddr, idev, cards_found++); + if (!err) + return dev; + cleanup_card(dev); + } + } +no_pnp: +#endif /* __ISAPNP__ */ + + /* Check all locations on the ISA bus -- evil! */ + for (ioaddr = 0x100; ioaddr < 0x400; ioaddr += 0x20) { + if (!check_device(ioaddr)) + continue; + + pr_info("3c515 Resource configuration register %#4.4x, DCR %4.4x.\n", + inl(ioaddr + 0x2002), inw(ioaddr + 0x2000)); + err = corkscrew_setup(dev, ioaddr, NULL, cards_found++); + if (!err) + return dev; + cleanup_card(dev); + } + free_netdev(dev); + return NULL; +} + + +static const struct net_device_ops netdev_ops = { + .ndo_open = corkscrew_open, + .ndo_stop = corkscrew_close, + .ndo_start_xmit = corkscrew_start_xmit, + .ndo_tx_timeout = corkscrew_timeout, + .ndo_get_stats = corkscrew_get_stats, + .ndo_set_multicast_list = set_rx_mode, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + + +static int corkscrew_setup(struct net_device *dev, int ioaddr, + struct pnp_dev *idev, int card_number) +{ + struct corkscrew_private *vp = netdev_priv(dev); + unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */ + int i; + int irq; + +#ifdef __ISAPNP__ + if (idev) { + irq = pnp_irq(idev, 0); + vp->dev = &idev->dev; + } else { + irq = inw(ioaddr + 0x2002) & 15; + } +#else + irq = inw(ioaddr + 0x2002) & 15; +#endif + + dev->base_addr = ioaddr; + dev->irq = irq; + dev->dma = inw(ioaddr + 0x2000) & 7; + vp->product_name = "3c515"; + vp->options = dev->mem_start; + vp->our_dev = dev; + + if (!vp->options) { + if (card_number >= MAX_UNITS) + vp->options = -1; + else + vp->options = options[card_number]; + } + + if (vp->options >= 0) { + vp->media_override = vp->options & 7; + if (vp->media_override == 2) + vp->media_override = 0; + vp->full_duplex = (vp->options & 8) ? 1 : 0; + vp->bus_master = (vp->options & 16) ? 1 : 0; + } else { + vp->media_override = 7; + vp->full_duplex = 0; + vp->bus_master = 0; + } +#ifdef MODULE + list_add(&vp->list, &root_corkscrew_dev); +#endif + + pr_info("%s: 3Com %s at %#3x,", dev->name, vp->product_name, ioaddr); + + spin_lock_init(&vp->lock); + + /* Read the station address from the EEPROM. */ + EL3WINDOW(0); + for (i = 0; i < 0x18; i++) { + __be16 *phys_addr = (__be16 *) dev->dev_addr; + int timer; + outw(EEPROM_Read + i, ioaddr + Wn0EepromCmd); + /* Pause for at least 162 us. for the read to take place. */ + for (timer = 4; timer >= 0; timer--) { + udelay(162); + if ((inw(ioaddr + Wn0EepromCmd) & 0x0200) == 0) + break; + } + eeprom[i] = inw(ioaddr + Wn0EepromData); + checksum ^= eeprom[i]; + if (i < 3) + phys_addr[i] = htons(eeprom[i]); + } + checksum = (checksum ^ (checksum >> 8)) & 0xff; + if (checksum != 0x00) + pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum); + pr_cont(" %pM", dev->dev_addr); + if (eeprom[16] == 0x11c7) { /* Corkscrew */ + if (request_dma(dev->dma, "3c515")) { + pr_cont(", DMA %d allocation failed", dev->dma); + dev->dma = 0; + } else + pr_cont(", DMA %d", dev->dma); + } + pr_cont(", IRQ %d\n", dev->irq); + /* Tell them about an invalid IRQ. */ + if (corkscrew_debug && (dev->irq <= 0 || dev->irq > 15)) + pr_warning(" *** Warning: this IRQ is unlikely to work! ***\n"); + + { + static const char * const ram_split[] = { + "5:3", "3:1", "1:1", "3:5" + }; + __u32 config; + EL3WINDOW(3); + vp->available_media = inw(ioaddr + Wn3_Options); + config = inl(ioaddr + Wn3_Config); + if (corkscrew_debug > 1) + pr_info(" Internal config register is %4.4x, transceivers %#x.\n", + config, inw(ioaddr + Wn3_Options)); + pr_info(" %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n", + 8 << config & Ram_size, + config & Ram_width ? "word" : "byte", + ram_split[(config & Ram_split) >> Ram_split_shift], + config & Autoselect ? "autoselect/" : "", + media_tbl[(config & Xcvr) >> Xcvr_shift].name); + vp->default_media = (config & Xcvr) >> Xcvr_shift; + vp->autoselect = config & Autoselect ? 1 : 0; + dev->if_port = vp->default_media; + } + if (vp->media_override != 7) { + pr_info(" Media override to transceiver type %d (%s).\n", + vp->media_override, + media_tbl[vp->media_override].name); + dev->if_port = vp->media_override; + } + + vp->capabilities = eeprom[16]; + vp->full_bus_master_tx = (vp->capabilities & 0x20) ? 1 : 0; + /* Rx is broken at 10mbps, so we always disable it. */ + /* vp->full_bus_master_rx = 0; */ + vp->full_bus_master_rx = (vp->capabilities & 0x20) ? 1 : 0; + + /* The 3c51x-specific entries in the device structure. */ + dev->netdev_ops = &netdev_ops; + dev->watchdog_timeo = (400 * HZ) / 1000; + dev->ethtool_ops = &netdev_ethtool_ops; + + return register_netdev(dev); +} + + +static int corkscrew_open(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + struct corkscrew_private *vp = netdev_priv(dev); + __u32 config; + int i; + + /* Before initializing select the active media port. */ + EL3WINDOW(3); + if (vp->full_duplex) + outb(0x20, ioaddr + Wn3_MAC_Ctrl); /* Set the full-duplex bit. */ + config = inl(ioaddr + Wn3_Config); + + if (vp->media_override != 7) { + if (corkscrew_debug > 1) + pr_info("%s: Media override to transceiver %d (%s).\n", + dev->name, vp->media_override, + media_tbl[vp->media_override].name); + dev->if_port = vp->media_override; + } else if (vp->autoselect) { + /* Find first available media type, starting with 100baseTx. */ + dev->if_port = 4; + while (!(vp->available_media & media_tbl[dev->if_port].mask)) + dev->if_port = media_tbl[dev->if_port].next; + + if (corkscrew_debug > 1) + pr_debug("%s: Initial media type %s.\n", + dev->name, media_tbl[dev->if_port].name); + + init_timer(&vp->timer); + vp->timer.expires = jiffies + media_tbl[dev->if_port].wait; + vp->timer.data = (unsigned long) dev; + vp->timer.function = corkscrew_timer; /* timer handler */ + add_timer(&vp->timer); + } else + dev->if_port = vp->default_media; + + config = (config & ~Xcvr) | (dev->if_port << Xcvr_shift); + outl(config, ioaddr + Wn3_Config); + + if (corkscrew_debug > 1) { + pr_debug("%s: corkscrew_open() InternalConfig %8.8x.\n", + dev->name, config); + } + + outw(TxReset, ioaddr + EL3_CMD); + for (i = 20; i >= 0; i--) + if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress)) + break; + + outw(RxReset, ioaddr + EL3_CMD); + /* Wait a few ticks for the RxReset command to complete. */ + for (i = 20; i >= 0; i--) + if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress)) + break; + + outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD); + + /* Use the now-standard shared IRQ implementation. */ + if (vp->capabilities == 0x11c7) { + /* Corkscrew: Cannot share ISA resources. */ + if (dev->irq == 0 || + dev->dma == 0 || + request_irq(dev->irq, corkscrew_interrupt, 0, + vp->product_name, dev)) + return -EAGAIN; + enable_dma(dev->dma); + set_dma_mode(dev->dma, DMA_MODE_CASCADE); + } else if (request_irq(dev->irq, corkscrew_interrupt, IRQF_SHARED, + vp->product_name, dev)) { + return -EAGAIN; + } + + if (corkscrew_debug > 1) { + EL3WINDOW(4); + pr_debug("%s: corkscrew_open() irq %d media status %4.4x.\n", + dev->name, dev->irq, inw(ioaddr + Wn4_Media)); + } + + /* Set the station address and mask in window 2 each time opened. */ + EL3WINDOW(2); + for (i = 0; i < 6; i++) + outb(dev->dev_addr[i], ioaddr + i); + for (; i < 12; i += 2) + outw(0, ioaddr + i); + + if (dev->if_port == 3) + /* Start the thinnet transceiver. We should really wait 50ms... */ + outw(StartCoax, ioaddr + EL3_CMD); + EL3WINDOW(4); + outw((inw(ioaddr + Wn4_Media) & ~(Media_10TP | Media_SQE)) | + media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media); + + /* Switch to the stats window, and clear all stats by reading. */ + outw(StatsDisable, ioaddr + EL3_CMD); + EL3WINDOW(6); + for (i = 0; i < 10; i++) + inb(ioaddr + i); + inw(ioaddr + 10); + inw(ioaddr + 12); + /* New: On the Vortex we must also clear the BadSSD counter. */ + EL3WINDOW(4); + inb(ioaddr + 12); + /* ..and on the Boomerang we enable the extra statistics bits. */ + outw(0x0040, ioaddr + Wn4_NetDiag); + + /* Switch to register set 7 for normal use. */ + EL3WINDOW(7); + + if (vp->full_bus_master_rx) { /* Boomerang bus master. */ + vp->cur_rx = vp->dirty_rx = 0; + if (corkscrew_debug > 2) + pr_debug("%s: Filling in the Rx ring.\n", dev->name); + for (i = 0; i < RX_RING_SIZE; i++) { + struct sk_buff *skb; + if (i < (RX_RING_SIZE - 1)) + vp->rx_ring[i].next = + isa_virt_to_bus(&vp->rx_ring[i + 1]); + else + vp->rx_ring[i].next = 0; + vp->rx_ring[i].status = 0; /* Clear complete bit. */ + vp->rx_ring[i].length = PKT_BUF_SZ | 0x80000000; + skb = dev_alloc_skb(PKT_BUF_SZ); + vp->rx_skbuff[i] = skb; + if (skb == NULL) + break; /* Bad news! */ + skb->dev = dev; /* Mark as being used by this device. */ + skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ + vp->rx_ring[i].addr = isa_virt_to_bus(skb->data); + } + if (i != 0) + vp->rx_ring[i - 1].next = + isa_virt_to_bus(&vp->rx_ring[0]); /* Wrap the ring. */ + outl(isa_virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr); + } + if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */ + vp->cur_tx = vp->dirty_tx = 0; + outb(PKT_BUF_SZ >> 8, ioaddr + TxFreeThreshold); /* Room for a packet. */ + /* Clear the Tx ring. */ + for (i = 0; i < TX_RING_SIZE; i++) + vp->tx_skbuff[i] = NULL; + outl(0, ioaddr + DownListPtr); + } + /* Set receiver mode: presumably accept b-case and phys addr only. */ + set_rx_mode(dev); + outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ + + netif_start_queue(dev); + + outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ + outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ + /* Allow status bits to be seen. */ + outw(SetStatusEnb | AdapterFailure | IntReq | StatsFull | + (vp->full_bus_master_tx ? DownComplete : TxAvailable) | + (vp->full_bus_master_rx ? UpComplete : RxComplete) | + (vp->bus_master ? DMADone : 0), ioaddr + EL3_CMD); + /* Ack all pending events, and set active indicator mask. */ + outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, + ioaddr + EL3_CMD); + outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull + | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete, + ioaddr + EL3_CMD); + + return 0; +} + +static void corkscrew_timer(unsigned long data) +{ +#ifdef AUTOMEDIA + struct net_device *dev = (struct net_device *) data; + struct corkscrew_private *vp = netdev_priv(dev); + int ioaddr = dev->base_addr; + unsigned long flags; + int ok = 0; + + if (corkscrew_debug > 1) + pr_debug("%s: Media selection timer tick happened, %s.\n", + dev->name, media_tbl[dev->if_port].name); + + spin_lock_irqsave(&vp->lock, flags); + + { + int old_window = inw(ioaddr + EL3_CMD) >> 13; + int media_status; + EL3WINDOW(4); + media_status = inw(ioaddr + Wn4_Media); + switch (dev->if_port) { + case 0: + case 4: + case 5: /* 10baseT, 100baseTX, 100baseFX */ + if (media_status & Media_LnkBeat) { + ok = 1; + if (corkscrew_debug > 1) + pr_debug("%s: Media %s has link beat, %x.\n", + dev->name, + media_tbl[dev->if_port].name, + media_status); + } else if (corkscrew_debug > 1) + pr_debug("%s: Media %s is has no link beat, %x.\n", + dev->name, + media_tbl[dev->if_port].name, + media_status); + + break; + default: /* Other media types handled by Tx timeouts. */ + if (corkscrew_debug > 1) + pr_debug("%s: Media %s is has no indication, %x.\n", + dev->name, + media_tbl[dev->if_port].name, + media_status); + ok = 1; + } + if (!ok) { + __u32 config; + + do { + dev->if_port = + media_tbl[dev->if_port].next; + } + while (!(vp->available_media & media_tbl[dev->if_port].mask)); + + if (dev->if_port == 8) { /* Go back to default. */ + dev->if_port = vp->default_media; + if (corkscrew_debug > 1) + pr_debug("%s: Media selection failing, using default %s port.\n", + dev->name, + media_tbl[dev->if_port].name); + } else { + if (corkscrew_debug > 1) + pr_debug("%s: Media selection failed, now trying %s port.\n", + dev->name, + media_tbl[dev->if_port].name); + vp->timer.expires = jiffies + media_tbl[dev->if_port].wait; + add_timer(&vp->timer); + } + outw((media_status & ~(Media_10TP | Media_SQE)) | + media_tbl[dev->if_port].media_bits, + ioaddr + Wn4_Media); + + EL3WINDOW(3); + config = inl(ioaddr + Wn3_Config); + config = (config & ~Xcvr) | (dev->if_port << Xcvr_shift); + outl(config, ioaddr + Wn3_Config); + + outw(dev->if_port == 3 ? StartCoax : StopCoax, + ioaddr + EL3_CMD); + } + EL3WINDOW(old_window); + } + + spin_unlock_irqrestore(&vp->lock, flags); + if (corkscrew_debug > 1) + pr_debug("%s: Media selection timer finished, %s.\n", + dev->name, media_tbl[dev->if_port].name); + +#endif /* AUTOMEDIA */ +} + +static void corkscrew_timeout(struct net_device *dev) +{ + int i; + struct corkscrew_private *vp = netdev_priv(dev); + int ioaddr = dev->base_addr; + + pr_warning("%s: transmit timed out, tx_status %2.2x status %4.4x.\n", + dev->name, inb(ioaddr + TxStatus), + inw(ioaddr + EL3_STATUS)); + /* Slight code bloat to be user friendly. */ + if ((inb(ioaddr + TxStatus) & 0x88) == 0x88) + pr_warning("%s: Transmitter encountered 16 collisions --" + " network cable problem?\n", dev->name); +#ifndef final_version + pr_debug(" Flags; bus-master %d, full %d; dirty %d current %d.\n", + vp->full_bus_master_tx, vp->tx_full, vp->dirty_tx, + vp->cur_tx); + pr_debug(" Down list %8.8x vs. %p.\n", inl(ioaddr + DownListPtr), + &vp->tx_ring[0]); + for (i = 0; i < TX_RING_SIZE; i++) { + pr_debug(" %d: %p length %8.8x status %8.8x\n", i, + &vp->tx_ring[i], + vp->tx_ring[i].length, vp->tx_ring[i].status); + } +#endif + /* Issue TX_RESET and TX_START commands. */ + outw(TxReset, ioaddr + EL3_CMD); + for (i = 20; i >= 0; i--) + if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress)) + break; + outw(TxEnable, ioaddr + EL3_CMD); + dev->trans_start = jiffies; /* prevent tx timeout */ + dev->stats.tx_errors++; + dev->stats.tx_dropped++; + netif_wake_queue(dev); +} + +static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct corkscrew_private *vp = netdev_priv(dev); + int ioaddr = dev->base_addr; + + /* Block a timer-based transmit from overlapping. */ + + netif_stop_queue(dev); + + if (vp->full_bus_master_tx) { /* BOOMERANG bus-master */ + /* Calculate the next Tx descriptor entry. */ + int entry = vp->cur_tx % TX_RING_SIZE; + struct boom_tx_desc *prev_entry; + unsigned long flags; + int i; + + if (vp->tx_full) /* No room to transmit with */ + return NETDEV_TX_BUSY; + if (vp->cur_tx != 0) + prev_entry = &vp->tx_ring[(vp->cur_tx - 1) % TX_RING_SIZE]; + else + prev_entry = NULL; + if (corkscrew_debug > 3) + pr_debug("%s: Trying to send a packet, Tx index %d.\n", + dev->name, vp->cur_tx); + /* vp->tx_full = 1; */ + vp->tx_skbuff[entry] = skb; + vp->tx_ring[entry].next = 0; + vp->tx_ring[entry].addr = isa_virt_to_bus(skb->data); + vp->tx_ring[entry].length = skb->len | 0x80000000; + vp->tx_ring[entry].status = skb->len | 0x80000000; + + spin_lock_irqsave(&vp->lock, flags); + outw(DownStall, ioaddr + EL3_CMD); + /* Wait for the stall to complete. */ + for (i = 20; i >= 0; i--) + if ((inw(ioaddr + EL3_STATUS) & CmdInProgress) == 0) + break; + if (prev_entry) + prev_entry->next = isa_virt_to_bus(&vp->tx_ring[entry]); + if (inl(ioaddr + DownListPtr) == 0) { + outl(isa_virt_to_bus(&vp->tx_ring[entry]), + ioaddr + DownListPtr); + queued_packet++; + } + outw(DownUnstall, ioaddr + EL3_CMD); + spin_unlock_irqrestore(&vp->lock, flags); + + vp->cur_tx++; + if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) + vp->tx_full = 1; + else { /* Clear previous interrupt enable. */ + if (prev_entry) + prev_entry->status &= ~0x80000000; + netif_wake_queue(dev); + } + return NETDEV_TX_OK; + } + /* Put out the doubleword header... */ + outl(skb->len, ioaddr + TX_FIFO); + dev->stats.tx_bytes += skb->len; +#ifdef VORTEX_BUS_MASTER + if (vp->bus_master) { + /* Set the bus-master controller to transfer the packet. */ + outl((int) (skb->data), ioaddr + Wn7_MasterAddr); + outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen); + vp->tx_skb = skb; + outw(StartDMADown, ioaddr + EL3_CMD); + /* queue will be woken at the DMADone interrupt. */ + } else { + /* ... and the packet rounded to a doubleword. */ + outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); + dev_kfree_skb(skb); + if (inw(ioaddr + TxFree) > 1536) { + netif_wake_queue(dev); + } else + /* Interrupt us when the FIFO has room for max-sized packet. */ + outw(SetTxThreshold + (1536 >> 2), + ioaddr + EL3_CMD); + } +#else + /* ... and the packet rounded to a doubleword. */ + outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); + dev_kfree_skb(skb); + if (inw(ioaddr + TxFree) > 1536) { + netif_wake_queue(dev); + } else + /* Interrupt us when the FIFO has room for max-sized packet. */ + outw(SetTxThreshold + (1536 >> 2), ioaddr + EL3_CMD); +#endif /* bus master */ + + + /* Clear the Tx status stack. */ + { + short tx_status; + int i = 4; + + while (--i > 0 && (tx_status = inb(ioaddr + TxStatus)) > 0) { + if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */ + if (corkscrew_debug > 2) + pr_debug("%s: Tx error, status %2.2x.\n", + dev->name, tx_status); + if (tx_status & 0x04) + dev->stats.tx_fifo_errors++; + if (tx_status & 0x38) + dev->stats.tx_aborted_errors++; + if (tx_status & 0x30) { + int j; + outw(TxReset, ioaddr + EL3_CMD); + for (j = 20; j >= 0; j--) + if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress)) + break; + } + outw(TxEnable, ioaddr + EL3_CMD); + } + outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */ + } + } + return NETDEV_TX_OK; +} + +/* The interrupt handler does all of the Rx thread work and cleans up + after the Tx thread. */ + +static irqreturn_t corkscrew_interrupt(int irq, void *dev_id) +{ + /* Use the now-standard shared IRQ implementation. */ + struct net_device *dev = dev_id; + struct corkscrew_private *lp = netdev_priv(dev); + int ioaddr, status; + int latency; + int i = max_interrupt_work; + + ioaddr = dev->base_addr; + latency = inb(ioaddr + Timer); + + spin_lock(&lp->lock); + + status = inw(ioaddr + EL3_STATUS); + + if (corkscrew_debug > 4) + pr_debug("%s: interrupt, status %4.4x, timer %d.\n", + dev->name, status, latency); + if ((status & 0xE000) != 0xE000) { + static int donedidthis; + /* Some interrupt controllers store a bogus interrupt from boot-time. + Ignore a single early interrupt, but don't hang the machine for + other interrupt problems. */ + if (donedidthis++ > 100) { + pr_err("%s: Bogus interrupt, bailing. Status %4.4x, start=%d.\n", + dev->name, status, netif_running(dev)); + free_irq(dev->irq, dev); + dev->irq = -1; + } + } + + do { + if (corkscrew_debug > 5) + pr_debug("%s: In interrupt loop, status %4.4x.\n", + dev->name, status); + if (status & RxComplete) + corkscrew_rx(dev); + + if (status & TxAvailable) { + if (corkscrew_debug > 5) + pr_debug(" TX room bit was handled.\n"); + /* There's room in the FIFO for a full-sized packet. */ + outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); + netif_wake_queue(dev); + } + if (status & DownComplete) { + unsigned int dirty_tx = lp->dirty_tx; + + while (lp->cur_tx - dirty_tx > 0) { + int entry = dirty_tx % TX_RING_SIZE; + if (inl(ioaddr + DownListPtr) == isa_virt_to_bus(&lp->tx_ring[entry])) + break; /* It still hasn't been processed. */ + if (lp->tx_skbuff[entry]) { + dev_kfree_skb_irq(lp->tx_skbuff[entry]); + lp->tx_skbuff[entry] = NULL; + } + dirty_tx++; + } + lp->dirty_tx = dirty_tx; + outw(AckIntr | DownComplete, ioaddr + EL3_CMD); + if (lp->tx_full && (lp->cur_tx - dirty_tx <= TX_RING_SIZE - 1)) { + lp->tx_full = 0; + netif_wake_queue(dev); + } + } +#ifdef VORTEX_BUS_MASTER + if (status & DMADone) { + outw(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */ + dev_kfree_skb_irq(lp->tx_skb); /* Release the transferred buffer */ + netif_wake_queue(dev); + } +#endif + if (status & UpComplete) { + boomerang_rx(dev); + outw(AckIntr | UpComplete, ioaddr + EL3_CMD); + } + if (status & (AdapterFailure | RxEarly | StatsFull)) { + /* Handle all uncommon interrupts at once. */ + if (status & RxEarly) { /* Rx early is unused. */ + corkscrew_rx(dev); + outw(AckIntr | RxEarly, ioaddr + EL3_CMD); + } + if (status & StatsFull) { /* Empty statistics. */ + static int DoneDidThat; + if (corkscrew_debug > 4) + pr_debug("%s: Updating stats.\n", dev->name); + update_stats(ioaddr, dev); + /* DEBUG HACK: Disable statistics as an interrupt source. */ + /* This occurs when we have the wrong media type! */ + if (DoneDidThat == 0 && inw(ioaddr + EL3_STATUS) & StatsFull) { + int win, reg; + pr_notice("%s: Updating stats failed, disabling stats as an interrupt source.\n", + dev->name); + for (win = 0; win < 8; win++) { + EL3WINDOW(win); + pr_notice("Vortex window %d:", win); + for (reg = 0; reg < 16; reg++) + pr_cont(" %2.2x", inb(ioaddr + reg)); + pr_cont("\n"); + } + EL3WINDOW(7); + outw(SetIntrEnb | TxAvailable | + RxComplete | AdapterFailure | + UpComplete | DownComplete | + TxComplete, ioaddr + EL3_CMD); + DoneDidThat++; + } + } + if (status & AdapterFailure) { + /* Adapter failure requires Rx reset and reinit. */ + outw(RxReset, ioaddr + EL3_CMD); + /* Set the Rx filter to the current state. */ + set_rx_mode(dev); + outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */ + outw(AckIntr | AdapterFailure, + ioaddr + EL3_CMD); + } + } + + if (--i < 0) { + pr_err("%s: Too much work in interrupt, status %4.4x. Disabling functions (%4.4x).\n", + dev->name, status, SetStatusEnb | ((~status) & 0x7FE)); + /* Disable all pending interrupts. */ + outw(SetStatusEnb | ((~status) & 0x7FE), ioaddr + EL3_CMD); + outw(AckIntr | 0x7FF, ioaddr + EL3_CMD); + break; + } + /* Acknowledge the IRQ. */ + outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); + + } while ((status = inw(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete)); + + spin_unlock(&lp->lock); + + if (corkscrew_debug > 4) + pr_debug("%s: exiting interrupt, status %4.4x.\n", dev->name, status); + return IRQ_HANDLED; +} + +static int corkscrew_rx(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + int i; + short rx_status; + + if (corkscrew_debug > 5) + pr_debug(" In rx_packet(), status %4.4x, rx_status %4.4x.\n", + inw(ioaddr + EL3_STATUS), inw(ioaddr + RxStatus)); + while ((rx_status = inw(ioaddr + RxStatus)) > 0) { + if (rx_status & 0x4000) { /* Error, update stats. */ + unsigned char rx_error = inb(ioaddr + RxErrors); + if (corkscrew_debug > 2) + pr_debug(" Rx error: status %2.2x.\n", + rx_error); + dev->stats.rx_errors++; + if (rx_error & 0x01) + dev->stats.rx_over_errors++; + if (rx_error & 0x02) + dev->stats.rx_length_errors++; + if (rx_error & 0x04) + dev->stats.rx_frame_errors++; + if (rx_error & 0x08) + dev->stats.rx_crc_errors++; + if (rx_error & 0x10) + dev->stats.rx_length_errors++; + } else { + /* The packet length: up to 4.5K!. */ + short pkt_len = rx_status & 0x1fff; + struct sk_buff *skb; + + skb = dev_alloc_skb(pkt_len + 5 + 2); + if (corkscrew_debug > 4) + pr_debug("Receiving packet size %d status %4.4x.\n", + pkt_len, rx_status); + if (skb != NULL) { + skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ + /* 'skb_put()' points to the start of sk_buff data area. */ + insl(ioaddr + RX_FIFO, + skb_put(skb, pkt_len), + (pkt_len + 3) >> 2); + outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */ + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + /* Wait a limited time to go to next packet. */ + for (i = 200; i >= 0; i--) + if (! (inw(ioaddr + EL3_STATUS) & CmdInProgress)) + break; + continue; + } else if (corkscrew_debug) + pr_debug("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, pkt_len); + } + outw(RxDiscard, ioaddr + EL3_CMD); + dev->stats.rx_dropped++; + /* Wait a limited time to skip this packet. */ + for (i = 200; i >= 0; i--) + if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress)) + break; + } + return 0; +} + +static int boomerang_rx(struct net_device *dev) +{ + struct corkscrew_private *vp = netdev_priv(dev); + int entry = vp->cur_rx % RX_RING_SIZE; + int ioaddr = dev->base_addr; + int rx_status; + + if (corkscrew_debug > 5) + pr_debug(" In boomerang_rx(), status %4.4x, rx_status %4.4x.\n", + inw(ioaddr + EL3_STATUS), inw(ioaddr + RxStatus)); + while ((rx_status = vp->rx_ring[entry].status) & RxDComplete) { + if (rx_status & RxDError) { /* Error, update stats. */ + unsigned char rx_error = rx_status >> 16; + if (corkscrew_debug > 2) + pr_debug(" Rx error: status %2.2x.\n", + rx_error); + dev->stats.rx_errors++; + if (rx_error & 0x01) + dev->stats.rx_over_errors++; + if (rx_error & 0x02) + dev->stats.rx_length_errors++; + if (rx_error & 0x04) + dev->stats.rx_frame_errors++; + if (rx_error & 0x08) + dev->stats.rx_crc_errors++; + if (rx_error & 0x10) + dev->stats.rx_length_errors++; + } else { + /* The packet length: up to 4.5K!. */ + short pkt_len = rx_status & 0x1fff; + struct sk_buff *skb; + + dev->stats.rx_bytes += pkt_len; + if (corkscrew_debug > 4) + pr_debug("Receiving packet size %d status %4.4x.\n", + pkt_len, rx_status); + + /* Check if the packet is long enough to just accept without + copying to a properly sized skbuff. */ + if (pkt_len < rx_copybreak && + (skb = dev_alloc_skb(pkt_len + 4)) != NULL) { + skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ + /* 'skb_put()' points to the start of sk_buff data area. */ + memcpy(skb_put(skb, pkt_len), + isa_bus_to_virt(vp->rx_ring[entry]. + addr), pkt_len); + rx_copy++; + } else { + void *temp; + /* Pass up the skbuff already on the Rx ring. */ + skb = vp->rx_skbuff[entry]; + vp->rx_skbuff[entry] = NULL; + temp = skb_put(skb, pkt_len); + /* Remove this checking code for final release. */ + if (isa_bus_to_virt(vp->rx_ring[entry].addr) != temp) + pr_warning("%s: Warning -- the skbuff addresses do not match" + " in boomerang_rx: %p vs. %p / %p.\n", + dev->name, + isa_bus_to_virt(vp-> + rx_ring[entry]. + addr), skb->head, + temp); + rx_nocopy++; + } + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + } + entry = (++vp->cur_rx) % RX_RING_SIZE; + } + /* Refill the Rx ring buffers. */ + for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) { + struct sk_buff *skb; + entry = vp->dirty_rx % RX_RING_SIZE; + if (vp->rx_skbuff[entry] == NULL) { + skb = dev_alloc_skb(PKT_BUF_SZ); + if (skb == NULL) + break; /* Bad news! */ + skb->dev = dev; /* Mark as being used by this device. */ + skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ + vp->rx_ring[entry].addr = isa_virt_to_bus(skb->data); + vp->rx_skbuff[entry] = skb; + } + vp->rx_ring[entry].status = 0; /* Clear complete bit. */ + } + return 0; +} + +static int corkscrew_close(struct net_device *dev) +{ + struct corkscrew_private *vp = netdev_priv(dev); + int ioaddr = dev->base_addr; + int i; + + netif_stop_queue(dev); + + if (corkscrew_debug > 1) { + pr_debug("%s: corkscrew_close() status %4.4x, Tx status %2.2x.\n", + dev->name, inw(ioaddr + EL3_STATUS), + inb(ioaddr + TxStatus)); + pr_debug("%s: corkscrew close stats: rx_nocopy %d rx_copy %d tx_queued %d.\n", + dev->name, rx_nocopy, rx_copy, queued_packet); + } + + del_timer(&vp->timer); + + /* Turn off statistics ASAP. We update lp->stats below. */ + outw(StatsDisable, ioaddr + EL3_CMD); + + /* Disable the receiver and transmitter. */ + outw(RxDisable, ioaddr + EL3_CMD); + outw(TxDisable, ioaddr + EL3_CMD); + + if (dev->if_port == XCVR_10base2) + /* Turn off thinnet power. Green! */ + outw(StopCoax, ioaddr + EL3_CMD); + + free_irq(dev->irq, dev); + + outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD); + + update_stats(ioaddr, dev); + if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */ + outl(0, ioaddr + UpListPtr); + for (i = 0; i < RX_RING_SIZE; i++) + if (vp->rx_skbuff[i]) { + dev_kfree_skb(vp->rx_skbuff[i]); + vp->rx_skbuff[i] = NULL; + } + } + if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */ + outl(0, ioaddr + DownListPtr); + for (i = 0; i < TX_RING_SIZE; i++) + if (vp->tx_skbuff[i]) { + dev_kfree_skb(vp->tx_skbuff[i]); + vp->tx_skbuff[i] = NULL; + } + } + + return 0; +} + +static struct net_device_stats *corkscrew_get_stats(struct net_device *dev) +{ + struct corkscrew_private *vp = netdev_priv(dev); + unsigned long flags; + + if (netif_running(dev)) { + spin_lock_irqsave(&vp->lock, flags); + update_stats(dev->base_addr, dev); + spin_unlock_irqrestore(&vp->lock, flags); + } + return &dev->stats; +} + +/* Update statistics. + Unlike with the EL3 we need not worry about interrupts changing + the window setting from underneath us, but we must still guard + against a race condition with a StatsUpdate interrupt updating the + table. This is done by checking that the ASM (!) code generated uses + atomic updates with '+='. + */ +static void update_stats(int ioaddr, struct net_device *dev) +{ + /* Unlike the 3c5x9 we need not turn off stats updates while reading. */ + /* Switch to the stats window, and read everything. */ + EL3WINDOW(6); + dev->stats.tx_carrier_errors += inb(ioaddr + 0); + dev->stats.tx_heartbeat_errors += inb(ioaddr + 1); + /* Multiple collisions. */ inb(ioaddr + 2); + dev->stats.collisions += inb(ioaddr + 3); + dev->stats.tx_window_errors += inb(ioaddr + 4); + dev->stats.rx_fifo_errors += inb(ioaddr + 5); + dev->stats.tx_packets += inb(ioaddr + 6); + dev->stats.tx_packets += (inb(ioaddr + 9) & 0x30) << 4; + /* Rx packets */ inb(ioaddr + 7); + /* Must read to clear */ + /* Tx deferrals */ inb(ioaddr + 8); + /* Don't bother with register 9, an extension of registers 6&7. + If we do use the 6&7 values the atomic update assumption above + is invalid. */ + inw(ioaddr + 10); /* Total Rx and Tx octets. */ + inw(ioaddr + 12); + /* New: On the Vortex we must also clear the BadSSD counter. */ + EL3WINDOW(4); + inb(ioaddr + 12); + + /* We change back to window 7 (not 1) with the Vortex. */ + EL3WINDOW(7); +} + +/* This new version of set_rx_mode() supports v1.4 kernels. + The Vortex chip has no documented multicast filter, so the only + multicast setting is to receive all multicast frames. At least + the chip has a very clean way to set the mode, unlike many others. */ +static void set_rx_mode(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + short new_mode; + + if (dev->flags & IFF_PROMISC) { + if (corkscrew_debug > 3) + pr_debug("%s: Setting promiscuous mode.\n", + dev->name); + new_mode = SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm; + } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) { + new_mode = SetRxFilter | RxStation | RxMulticast | RxBroadcast; + } else + new_mode = SetRxFilter | RxStation | RxBroadcast; + + outw(new_mode, ioaddr + EL3_CMD); +} + +static void netdev_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr); +} + +static u32 netdev_get_msglevel(struct net_device *dev) +{ + return corkscrew_debug; +} + +static void netdev_set_msglevel(struct net_device *dev, u32 level) +{ + corkscrew_debug = level; +} + +static const struct ethtool_ops netdev_ethtool_ops = { + .get_drvinfo = netdev_get_drvinfo, + .get_msglevel = netdev_get_msglevel, + .set_msglevel = netdev_set_msglevel, +}; + + +#ifdef MODULE +void cleanup_module(void) +{ + while (!list_empty(&root_corkscrew_dev)) { + struct net_device *dev; + struct corkscrew_private *vp; + + vp = list_entry(root_corkscrew_dev.next, + struct corkscrew_private, list); + dev = vp->our_dev; + unregister_netdev(dev); + cleanup_card(dev); + free_netdev(dev); + } +} +#endif /* MODULE */ diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c new file mode 100644 index 0000000..34c5e1c --- /dev/null +++ b/drivers/net/ethernet/3com/3c574_cs.c @@ -0,0 +1,1181 @@ +/* 3c574.c: A PCMCIA ethernet driver for the 3com 3c574 "RoadRunner". + + Written 1993-1998 by + Donald Becker, becker@scyld.com, (driver core) and + David Hinds, dahinds@users.sourceforge.net (from his PC card code). + Locking fixes (C) Copyright 2003 Red Hat Inc + + This software may be used and distributed according to the terms of + the GNU General Public License, incorporated herein by reference. + + This driver derives from Donald Becker's 3c509 core, which has the + following copyright: + Copyright 1993 United States Government as represented by the + Director, National Security Agency. + + +*/ + +/* + Theory of Operation + +I. Board Compatibility + +This device driver is designed for the 3Com 3c574 PC card Fast Ethernet +Adapter. + +II. Board-specific settings + +None -- PC cards are autoconfigured. + +III. Driver operation + +The 3c574 uses a Boomerang-style interface, without the bus-master capability. +See the Boomerang driver and documentation for most details. + +IV. Notes and chip documentation. + +Two added registers are used to enhance PIO performance, RunnerRdCtrl and +RunnerWrCtrl. These are 11 bit down-counters that are preloaded with the +count of word (16 bits) reads or writes the driver is about to do to the Rx +or Tx FIFO. The chip is then able to hide the internal-PCI-bus to PC-card +translation latency by buffering the I/O operations with an 8 word FIFO. +Note: No other chip accesses are permitted when this buffer is used. + +A second enhancement is that both attribute and common memory space +0x0800-0x0fff can translated to the PIO FIFO. Thus memory operations (faster +with *some* PCcard bridges) may be used instead of I/O operations. +This is enabled by setting the 0x10 bit in the PCMCIA LAN COR. + +Some slow PC card bridges work better if they never see a WAIT signal. +This is configured by setting the 0x20 bit in the PCMCIA LAN COR. +Only do this after testing that it is reliable and improves performance. + +The upper five bits of RunnerRdCtrl are used to window into PCcard +configuration space registers. Window 0 is the regular Boomerang/Odie +register set, 1-5 are various PC card control registers, and 16-31 are +the (reversed!) CIS table. + +A final note: writing the InternalConfig register in window 3 with an +invalid ramWidth is Very Bad. + +V. References + +http://www.scyld.com/expert/NWay.html +http://www.national.com/opf/DP/DP83840A.html + +Thanks to Terry Murphy of 3Com for providing development information for +earlier 3Com products. + +*/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +/*====================================================================*/ + +/* Module parameters */ + +MODULE_AUTHOR("David Hinds "); +MODULE_DESCRIPTION("3Com 3c574 series PCMCIA ethernet driver"); +MODULE_LICENSE("GPL"); + +#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) + +/* Maximum events (Rx packets, etc.) to handle at each interrupt. */ +INT_MODULE_PARM(max_interrupt_work, 32); + +/* Force full duplex modes? */ +INT_MODULE_PARM(full_duplex, 0); + +/* Autodetect link polarity reversal? */ +INT_MODULE_PARM(auto_polarity, 1); + + +/*====================================================================*/ + +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT ((800*HZ)/1000) + +/* To minimize the size of the driver source and make the driver more + readable not all constants are symbolically defined. + You'll need the manual if you want to understand driver details anyway. */ +/* Offsets from base I/O address. */ +#define EL3_DATA 0x00 +#define EL3_CMD 0x0e +#define EL3_STATUS 0x0e + +#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD) + +/* The top five bits written to EL3_CMD are a command, the lower + 11 bits are the parameter, if applicable. */ +enum el3_cmds { + TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11, + RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, RxDiscard = 8<<11, + TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11, + FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11, + SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11, + SetTxThreshold = 18<<11, SetTxStart = 19<<11, StatsEnable = 21<<11, + StatsDisable = 22<<11, StopCoax = 23<<11, +}; + +enum elxl_status { + IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004, + TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020, + IntReq = 0x0040, StatsFull = 0x0080, CmdBusy = 0x1000 }; + +/* The SetRxFilter command accepts the following classes: */ +enum RxFilter { + RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 +}; + +enum Window0 { + Wn0EepromCmd = 10, Wn0EepromData = 12, /* EEPROM command/address, data. */ + IntrStatus=0x0E, /* Valid in all windows. */ +}; +/* These assumes the larger EEPROM. */ +enum Win0_EEPROM_cmds { + EEPROM_Read = 0x200, EEPROM_WRITE = 0x100, EEPROM_ERASE = 0x300, + EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */ + EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */ +}; + +/* Register window 1 offsets, the window used in normal operation. + On the "Odie" this window is always mapped at offsets 0x10-0x1f. + Except for TxFree, which is overlapped by RunnerWrCtrl. */ +enum Window1 { + TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14, + RxStatus = 0x18, Timer=0x1A, TxStatus = 0x1B, + TxFree = 0x0C, /* Remaining free bytes in Tx buffer. */ + RunnerRdCtrl = 0x16, RunnerWrCtrl = 0x1c, +}; + +enum Window3 { /* Window 3: MAC/config bits. */ + Wn3_Config=0, Wn3_MAC_Ctrl=6, Wn3_Options=8, +}; +enum wn3_config { + Ram_size = 7, + Ram_width = 8, + Ram_speed = 0x30, + Rom_size = 0xc0, + Ram_split_shift = 16, + Ram_split = 3 << Ram_split_shift, + Xcvr_shift = 20, + Xcvr = 7 << Xcvr_shift, + Autoselect = 0x1000000, +}; + +enum Window4 { /* Window 4: Xcvr/media bits. */ + Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10, +}; + +#define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */ + +struct el3_private { + struct pcmcia_device *p_dev; + u16 advertising, partner; /* NWay media advertisement */ + unsigned char phys; /* MII device address */ + unsigned int autoselect:1, default_media:3; /* Read from the EEPROM/Wn3_Config. */ + /* for transceiver monitoring */ + struct timer_list media; + unsigned short media_status; + unsigned short fast_poll; + unsigned long last_irq; + spinlock_t window_lock; /* Guards the Window selection */ +}; + +/* Set iff a MII transceiver on any interface requires mdio preamble. + This only set with the original DP83840 on older 3c905 boards, so the extra + code size of a per-interface flag is not worthwhile. */ +static char mii_preamble_required = 0; + +/* Index of functions. */ + +static int tc574_config(struct pcmcia_device *link); +static void tc574_release(struct pcmcia_device *link); + +static void mdio_sync(unsigned int ioaddr, int bits); +static int mdio_read(unsigned int ioaddr, int phy_id, int location); +static void mdio_write(unsigned int ioaddr, int phy_id, int location, + int value); +static unsigned short read_eeprom(unsigned int ioaddr, int index); +static void tc574_wait_for_completion(struct net_device *dev, int cmd); + +static void tc574_reset(struct net_device *dev); +static void media_check(unsigned long arg); +static int el3_open(struct net_device *dev); +static netdev_tx_t el3_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static irqreturn_t el3_interrupt(int irq, void *dev_id); +static void update_stats(struct net_device *dev); +static struct net_device_stats *el3_get_stats(struct net_device *dev); +static int el3_rx(struct net_device *dev, int worklimit); +static int el3_close(struct net_device *dev); +static void el3_tx_timeout(struct net_device *dev); +static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static void set_rx_mode(struct net_device *dev); +static void set_multicast_list(struct net_device *dev); + +static void tc574_detach(struct pcmcia_device *p_dev); + +/* + tc574_attach() creates an "instance" of the driver, allocating + local data structures for one device. The device is registered + with Card Services. +*/ +static const struct net_device_ops el3_netdev_ops = { + .ndo_open = el3_open, + .ndo_stop = el3_close, + .ndo_start_xmit = el3_start_xmit, + .ndo_tx_timeout = el3_tx_timeout, + .ndo_get_stats = el3_get_stats, + .ndo_do_ioctl = el3_ioctl, + .ndo_set_multicast_list = set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int tc574_probe(struct pcmcia_device *link) +{ + struct el3_private *lp; + struct net_device *dev; + + dev_dbg(&link->dev, "3c574_attach()\n"); + + /* Create the PC card device object. */ + dev = alloc_etherdev(sizeof(struct el3_private)); + if (!dev) + return -ENOMEM; + lp = netdev_priv(dev); + link->priv = dev; + lp->p_dev = link; + + spin_lock_init(&lp->window_lock); + link->resource[0]->end = 32; + link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16; + link->config_flags |= CONF_ENABLE_IRQ; + link->config_index = 1; + + dev->netdev_ops = &el3_netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + + return tc574_config(link); +} + +static void tc574_detach(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + dev_dbg(&link->dev, "3c574_detach()\n"); + + unregister_netdev(dev); + + tc574_release(link); + + free_netdev(dev); +} /* tc574_detach */ + +static const char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; + +static int tc574_config(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + struct el3_private *lp = netdev_priv(dev); + int ret, i, j; + unsigned int ioaddr; + __be16 *phys_addr; + char *cardname; + __u32 config; + u8 *buf; + size_t len; + + phys_addr = (__be16 *)dev->dev_addr; + + dev_dbg(&link->dev, "3c574_config()\n"); + + link->io_lines = 16; + + for (i = j = 0; j < 0x400; j += 0x20) { + link->resource[0]->start = j ^ 0x300; + i = pcmcia_request_io(link); + if (i == 0) + break; + } + if (i != 0) + goto failed; + + ret = pcmcia_request_irq(link, el3_interrupt); + if (ret) + goto failed; + + ret = pcmcia_enable_device(link); + if (ret) + goto failed; + + dev->irq = link->irq; + dev->base_addr = link->resource[0]->start; + + ioaddr = dev->base_addr; + + /* The 3c574 normally uses an EEPROM for configuration info, including + the hardware address. The future products may include a modem chip + and put the address in the CIS. */ + + len = pcmcia_get_tuple(link, 0x88, &buf); + if (buf && len >= 6) { + for (i = 0; i < 3; i++) + phys_addr[i] = htons(le16_to_cpu(buf[i * 2])); + kfree(buf); + } else { + kfree(buf); /* 0 < len < 6 */ + EL3WINDOW(0); + for (i = 0; i < 3; i++) + phys_addr[i] = htons(read_eeprom(ioaddr, i + 10)); + if (phys_addr[0] == htons(0x6060)) { + pr_notice("IO port conflict at 0x%03lx-0x%03lx\n", + dev->base_addr, dev->base_addr+15); + goto failed; + } + } + if (link->prod_id[1]) + cardname = link->prod_id[1]; + else + cardname = "3Com 3c574"; + + { + u_char mcr; + outw(2<<11, ioaddr + RunnerRdCtrl); + mcr = inb(ioaddr + 2); + outw(0<<11, ioaddr + RunnerRdCtrl); + pr_info(" ASIC rev %d,", mcr>>3); + EL3WINDOW(3); + config = inl(ioaddr + Wn3_Config); + lp->default_media = (config & Xcvr) >> Xcvr_shift; + lp->autoselect = config & Autoselect ? 1 : 0; + } + + init_timer(&lp->media); + + { + int phy; + + /* Roadrunner only: Turn on the MII transceiver */ + outw(0x8040, ioaddr + Wn3_Options); + mdelay(1); + outw(0xc040, ioaddr + Wn3_Options); + tc574_wait_for_completion(dev, TxReset); + tc574_wait_for_completion(dev, RxReset); + mdelay(1); + outw(0x8040, ioaddr + Wn3_Options); + + EL3WINDOW(4); + for (phy = 1; phy <= 32; phy++) { + int mii_status; + mdio_sync(ioaddr, 32); + mii_status = mdio_read(ioaddr, phy & 0x1f, 1); + if (mii_status != 0xffff) { + lp->phys = phy & 0x1f; + dev_dbg(&link->dev, " MII transceiver at " + "index %d, status %x.\n", + phy, mii_status); + if ((mii_status & 0x0040) == 0) + mii_preamble_required = 1; + break; + } + } + if (phy > 32) { + pr_notice(" No MII transceivers found!\n"); + goto failed; + } + i = mdio_read(ioaddr, lp->phys, 16) | 0x40; + mdio_write(ioaddr, lp->phys, 16, i); + lp->advertising = mdio_read(ioaddr, lp->phys, 4); + if (full_duplex) { + /* Only advertise the FD media types. */ + lp->advertising &= ~0x02a0; + mdio_write(ioaddr, lp->phys, 4, lp->advertising); + } + } + + SET_NETDEV_DEV(dev, &link->dev); + + if (register_netdev(dev) != 0) { + pr_notice("register_netdev() failed\n"); + goto failed; + } + + netdev_info(dev, "%s at io %#3lx, irq %d, hw_addr %pM\n", + cardname, dev->base_addr, dev->irq, dev->dev_addr); + netdev_info(dev, " %dK FIFO split %s Rx:Tx, %sMII interface.\n", + 8 << config & Ram_size, + ram_split[(config & Ram_split) >> Ram_split_shift], + config & Autoselect ? "autoselect " : ""); + + return 0; + +failed: + tc574_release(link); + return -ENODEV; + +} /* tc574_config */ + +static void tc574_release(struct pcmcia_device *link) +{ + pcmcia_disable_device(link); +} + +static int tc574_suspend(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + if (link->open) + netif_device_detach(dev); + + return 0; +} + +static int tc574_resume(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + if (link->open) { + tc574_reset(dev); + netif_device_attach(dev); + } + + return 0; +} + +static void dump_status(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + EL3WINDOW(1); + netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x, tx free %04x\n", + inw(ioaddr+EL3_STATUS), + inw(ioaddr+RxStatus), inb(ioaddr+TxStatus), + inw(ioaddr+TxFree)); + EL3WINDOW(4); + netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n", + inw(ioaddr+0x04), inw(ioaddr+0x06), + inw(ioaddr+0x08), inw(ioaddr+0x0a)); + EL3WINDOW(1); +} + +/* + Use this for commands that may take time to finish +*/ +static void tc574_wait_for_completion(struct net_device *dev, int cmd) +{ + int i = 1500; + outw(cmd, dev->base_addr + EL3_CMD); + while (--i > 0) + if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break; + if (i == 0) + netdev_notice(dev, "command 0x%04x did not complete!\n", cmd); +} + +/* Read a word from the EEPROM using the regular EEPROM access register. + Assume that we are in register window zero. + */ +static unsigned short read_eeprom(unsigned int ioaddr, int index) +{ + int timer; + outw(EEPROM_Read + index, ioaddr + Wn0EepromCmd); + /* Pause for at least 162 usec for the read to take place. */ + for (timer = 1620; timer >= 0; timer--) { + if ((inw(ioaddr + Wn0EepromCmd) & 0x8000) == 0) + break; + } + return inw(ioaddr + Wn0EepromData); +} + +/* MII transceiver control section. + Read and write the MII registers using software-generated serial + MDIO protocol. See the MII specifications or DP83840A data sheet + for details. + The maxium data clock rate is 2.5 Mhz. The timing is easily met by the + slow PC card interface. */ + +#define MDIO_SHIFT_CLK 0x01 +#define MDIO_DIR_WRITE 0x04 +#define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE) +#define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE) +#define MDIO_DATA_READ 0x02 +#define MDIO_ENB_IN 0x00 + +/* Generate the preamble required for initial synchronization and + a few older transceivers. */ +static void mdio_sync(unsigned int ioaddr, int bits) +{ + unsigned int mdio_addr = ioaddr + Wn4_PhysicalMgmt; + + /* Establish sync by sending at least 32 logic ones. */ + while (-- bits >= 0) { + outw(MDIO_DATA_WRITE1, mdio_addr); + outw(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr); + } +} + +static int mdio_read(unsigned int ioaddr, int phy_id, int location) +{ + int i; + int read_cmd = (0xf6 << 10) | (phy_id << 5) | location; + unsigned int retval = 0; + unsigned int mdio_addr = ioaddr + Wn4_PhysicalMgmt; + + if (mii_preamble_required) + mdio_sync(ioaddr, 32); + + /* Shift the read command bits out. */ + for (i = 14; i >= 0; i--) { + int dataval = (read_cmd&(1< 0; i--) { + outw(MDIO_ENB_IN, mdio_addr); + retval = (retval << 1) | ((inw(mdio_addr) & MDIO_DATA_READ) ? 1 : 0); + outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); + } + return (retval>>1) & 0xffff; +} + +static void mdio_write(unsigned int ioaddr, int phy_id, int location, int value) +{ + int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value; + unsigned int mdio_addr = ioaddr + Wn4_PhysicalMgmt; + int i; + + if (mii_preamble_required) + mdio_sync(ioaddr, 32); + + /* Shift the command bits out. */ + for (i = 31; i >= 0; i--) { + int dataval = (write_cmd&(1<= 0; i--) { + outw(MDIO_ENB_IN, mdio_addr); + outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); + } +} + +/* Reset and restore all of the 3c574 registers. */ +static void tc574_reset(struct net_device *dev) +{ + struct el3_private *lp = netdev_priv(dev); + int i; + unsigned int ioaddr = dev->base_addr; + unsigned long flags; + + tc574_wait_for_completion(dev, TotalReset|0x10); + + spin_lock_irqsave(&lp->window_lock, flags); + /* Clear any transactions in progress. */ + outw(0, ioaddr + RunnerWrCtrl); + outw(0, ioaddr + RunnerRdCtrl); + + /* Set the station address and mask. */ + EL3WINDOW(2); + for (i = 0; i < 6; i++) + outb(dev->dev_addr[i], ioaddr + i); + for (; i < 12; i+=2) + outw(0, ioaddr + i); + + /* Reset config options */ + EL3WINDOW(3); + outb((dev->mtu > 1500 ? 0x40 : 0), ioaddr + Wn3_MAC_Ctrl); + outl((lp->autoselect ? 0x01000000 : 0) | 0x0062001b, + ioaddr + Wn3_Config); + /* Roadrunner only: Turn on the MII transceiver. */ + outw(0x8040, ioaddr + Wn3_Options); + mdelay(1); + outw(0xc040, ioaddr + Wn3_Options); + EL3WINDOW(1); + spin_unlock_irqrestore(&lp->window_lock, flags); + + tc574_wait_for_completion(dev, TxReset); + tc574_wait_for_completion(dev, RxReset); + mdelay(1); + spin_lock_irqsave(&lp->window_lock, flags); + EL3WINDOW(3); + outw(0x8040, ioaddr + Wn3_Options); + + /* Switch to the stats window, and clear all stats by reading. */ + outw(StatsDisable, ioaddr + EL3_CMD); + EL3WINDOW(6); + for (i = 0; i < 10; i++) + inb(ioaddr + i); + inw(ioaddr + 10); + inw(ioaddr + 12); + EL3WINDOW(4); + inb(ioaddr + 12); + inb(ioaddr + 13); + + /* .. enable any extra statistics bits.. */ + outw(0x0040, ioaddr + Wn4_NetDiag); + + EL3WINDOW(1); + spin_unlock_irqrestore(&lp->window_lock, flags); + + /* .. re-sync MII and re-fill what NWay is advertising. */ + mdio_sync(ioaddr, 32); + mdio_write(ioaddr, lp->phys, 4, lp->advertising); + if (!auto_polarity) { + /* works for TDK 78Q2120 series MII's */ + i = mdio_read(ioaddr, lp->phys, 16) | 0x20; + mdio_write(ioaddr, lp->phys, 16, i); + } + + spin_lock_irqsave(&lp->window_lock, flags); + /* Switch to register set 1 for normal use, just for TxFree. */ + set_rx_mode(dev); + spin_unlock_irqrestore(&lp->window_lock, flags); + outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ + outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ + outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ + /* Allow status bits to be seen. */ + outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD); + /* Ack all pending events, and set active indicator mask. */ + outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, + ioaddr + EL3_CMD); + outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull + | AdapterFailure | RxEarly, ioaddr + EL3_CMD); +} + +static int el3_open(struct net_device *dev) +{ + struct el3_private *lp = netdev_priv(dev); + struct pcmcia_device *link = lp->p_dev; + + if (!pcmcia_dev_present(link)) + return -ENODEV; + + link->open++; + netif_start_queue(dev); + + tc574_reset(dev); + lp->media.function = media_check; + lp->media.data = (unsigned long) dev; + lp->media.expires = jiffies + HZ; + add_timer(&lp->media); + + dev_dbg(&link->dev, "%s: opened, status %4.4x.\n", + dev->name, inw(dev->base_addr + EL3_STATUS)); + + return 0; +} + +static void el3_tx_timeout(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + + netdev_notice(dev, "Transmit timed out!\n"); + dump_status(dev); + dev->stats.tx_errors++; + dev->trans_start = jiffies; /* prevent tx timeout */ + /* Issue TX_RESET and TX_START commands. */ + tc574_wait_for_completion(dev, TxReset); + outw(TxEnable, ioaddr + EL3_CMD); + netif_wake_queue(dev); +} + +static void pop_tx_status(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + int i; + + /* Clear the Tx status stack. */ + for (i = 32; i > 0; i--) { + u_char tx_status = inb(ioaddr + TxStatus); + if (!(tx_status & 0x84)) + break; + /* reset transmitter on jabber error or underrun */ + if (tx_status & 0x30) + tc574_wait_for_completion(dev, TxReset); + if (tx_status & 0x38) { + pr_debug("%s: transmit error: status 0x%02x\n", + dev->name, tx_status); + outw(TxEnable, ioaddr + EL3_CMD); + dev->stats.tx_aborted_errors++; + } + outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */ + } +} + +static netdev_tx_t el3_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + struct el3_private *lp = netdev_priv(dev); + unsigned long flags; + + pr_debug("%s: el3_start_xmit(length = %ld) called, " + "status %4.4x.\n", dev->name, (long)skb->len, + inw(ioaddr + EL3_STATUS)); + + spin_lock_irqsave(&lp->window_lock, flags); + + dev->stats.tx_bytes += skb->len; + + /* Put out the doubleword header... */ + outw(skb->len, ioaddr + TX_FIFO); + outw(0, ioaddr + TX_FIFO); + /* ... and the packet rounded to a doubleword. */ + outsl(ioaddr + TX_FIFO, skb->data, (skb->len+3)>>2); + + /* TxFree appears only in Window 1, not offset 0x1c. */ + if (inw(ioaddr + TxFree) <= 1536) { + netif_stop_queue(dev); + /* Interrupt us when the FIFO has room for max-sized packet. + The threshold is in units of dwords. */ + outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD); + } + + pop_tx_status(dev); + spin_unlock_irqrestore(&lp->window_lock, flags); + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +/* The EL3 interrupt handler. */ +static irqreturn_t el3_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *) dev_id; + struct el3_private *lp = netdev_priv(dev); + unsigned int ioaddr; + unsigned status; + int work_budget = max_interrupt_work; + int handled = 0; + + if (!netif_device_present(dev)) + return IRQ_NONE; + ioaddr = dev->base_addr; + + pr_debug("%s: interrupt, status %4.4x.\n", + dev->name, inw(ioaddr + EL3_STATUS)); + + spin_lock(&lp->window_lock); + + while ((status = inw(ioaddr + EL3_STATUS)) & + (IntLatch | RxComplete | RxEarly | StatsFull)) { + if (!netif_device_present(dev) || + ((status & 0xe000) != 0x2000)) { + pr_debug("%s: Interrupt from dead card\n", dev->name); + break; + } + + handled = 1; + + if (status & RxComplete) + work_budget = el3_rx(dev, work_budget); + + if (status & TxAvailable) { + pr_debug(" TX room bit was handled.\n"); + /* There's room in the FIFO for a full-sized packet. */ + outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); + netif_wake_queue(dev); + } + + if (status & TxComplete) + pop_tx_status(dev); + + if (status & (AdapterFailure | RxEarly | StatsFull)) { + /* Handle all uncommon interrupts. */ + if (status & StatsFull) + update_stats(dev); + if (status & RxEarly) { + work_budget = el3_rx(dev, work_budget); + outw(AckIntr | RxEarly, ioaddr + EL3_CMD); + } + if (status & AdapterFailure) { + u16 fifo_diag; + EL3WINDOW(4); + fifo_diag = inw(ioaddr + Wn4_FIFODiag); + EL3WINDOW(1); + netdev_notice(dev, "adapter failure, FIFO diagnostic register %04x\n", + fifo_diag); + if (fifo_diag & 0x0400) { + /* Tx overrun */ + tc574_wait_for_completion(dev, TxReset); + outw(TxEnable, ioaddr + EL3_CMD); + } + if (fifo_diag & 0x2000) { + /* Rx underrun */ + tc574_wait_for_completion(dev, RxReset); + set_rx_mode(dev); + outw(RxEnable, ioaddr + EL3_CMD); + } + outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD); + } + } + + if (--work_budget < 0) { + pr_debug("%s: Too much work in interrupt, " + "status %4.4x.\n", dev->name, status); + /* Clear all interrupts */ + outw(AckIntr | 0xFF, ioaddr + EL3_CMD); + break; + } + /* Acknowledge the IRQ. */ + outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); + } + + pr_debug("%s: exiting interrupt, status %4.4x.\n", + dev->name, inw(ioaddr + EL3_STATUS)); + + spin_unlock(&lp->window_lock); + return IRQ_RETVAL(handled); +} + +/* + This timer serves two purposes: to check for missed interrupts + (and as a last resort, poll the NIC for events), and to monitor + the MII, reporting changes in cable status. +*/ +static void media_check(unsigned long arg) +{ + struct net_device *dev = (struct net_device *) arg; + struct el3_private *lp = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + unsigned long flags; + unsigned short /* cable, */ media, partner; + + if (!netif_device_present(dev)) + goto reschedule; + + /* Check for pending interrupt with expired latency timer: with + this, we can limp along even if the interrupt is blocked */ + if ((inw(ioaddr + EL3_STATUS) & IntLatch) && (inb(ioaddr + Timer) == 0xff)) { + if (!lp->fast_poll) + netdev_info(dev, "interrupt(s) dropped!\n"); + + local_irq_save(flags); + el3_interrupt(dev->irq, dev); + local_irq_restore(flags); + + lp->fast_poll = HZ; + } + if (lp->fast_poll) { + lp->fast_poll--; + lp->media.expires = jiffies + 2*HZ/100; + add_timer(&lp->media); + return; + } + + spin_lock_irqsave(&lp->window_lock, flags); + EL3WINDOW(4); + media = mdio_read(ioaddr, lp->phys, 1); + partner = mdio_read(ioaddr, lp->phys, 5); + EL3WINDOW(1); + + if (media != lp->media_status) { + if ((media ^ lp->media_status) & 0x0004) + netdev_info(dev, "%s link beat\n", + (lp->media_status & 0x0004) ? "lost" : "found"); + if ((media ^ lp->media_status) & 0x0020) { + lp->partner = 0; + if (lp->media_status & 0x0020) { + netdev_info(dev, "autonegotiation restarted\n"); + } else if (partner) { + partner &= lp->advertising; + lp->partner = partner; + netdev_info(dev, "autonegotiation complete: " + "%dbaseT-%cD selected\n", + (partner & 0x0180) ? 100 : 10, + (partner & 0x0140) ? 'F' : 'H'); + } else { + netdev_info(dev, "link partner did not autonegotiate\n"); + } + + EL3WINDOW(3); + outb((partner & 0x0140 ? 0x20 : 0) | + (dev->mtu > 1500 ? 0x40 : 0), ioaddr + Wn3_MAC_Ctrl); + EL3WINDOW(1); + + } + if (media & 0x0010) + netdev_info(dev, "remote fault detected\n"); + if (media & 0x0002) + netdev_info(dev, "jabber detected\n"); + lp->media_status = media; + } + spin_unlock_irqrestore(&lp->window_lock, flags); + +reschedule: + lp->media.expires = jiffies + HZ; + add_timer(&lp->media); +} + +static struct net_device_stats *el3_get_stats(struct net_device *dev) +{ + struct el3_private *lp = netdev_priv(dev); + + if (netif_device_present(dev)) { + unsigned long flags; + spin_lock_irqsave(&lp->window_lock, flags); + update_stats(dev); + spin_unlock_irqrestore(&lp->window_lock, flags); + } + return &dev->stats; +} + +/* Update statistics. + Surprisingly this need not be run single-threaded, but it effectively is. + The counters clear when read, so the adds must merely be atomic. + */ +static void update_stats(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + u8 rx, tx, up; + + pr_debug("%s: updating the statistics.\n", dev->name); + + if (inw(ioaddr+EL3_STATUS) == 0xffff) /* No card. */ + return; + + /* Unlike the 3c509 we need not turn off stats updates while reading. */ + /* Switch to the stats window, and read everything. */ + EL3WINDOW(6); + dev->stats.tx_carrier_errors += inb(ioaddr + 0); + dev->stats.tx_heartbeat_errors += inb(ioaddr + 1); + /* Multiple collisions. */ inb(ioaddr + 2); + dev->stats.collisions += inb(ioaddr + 3); + dev->stats.tx_window_errors += inb(ioaddr + 4); + dev->stats.rx_fifo_errors += inb(ioaddr + 5); + dev->stats.tx_packets += inb(ioaddr + 6); + up = inb(ioaddr + 9); + dev->stats.tx_packets += (up&0x30) << 4; + /* Rx packets */ inb(ioaddr + 7); + /* Tx deferrals */ inb(ioaddr + 8); + rx = inw(ioaddr + 10); + tx = inw(ioaddr + 12); + + EL3WINDOW(4); + /* BadSSD */ inb(ioaddr + 12); + up = inb(ioaddr + 13); + + EL3WINDOW(1); +} + +static int el3_rx(struct net_device *dev, int worklimit) +{ + unsigned int ioaddr = dev->base_addr; + short rx_status; + + pr_debug("%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n", + dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus)); + while (!((rx_status = inw(ioaddr + RxStatus)) & 0x8000) && + worklimit > 0) { + worklimit--; + if (rx_status & 0x4000) { /* Error, update stats. */ + short error = rx_status & 0x3800; + dev->stats.rx_errors++; + switch (error) { + case 0x0000: dev->stats.rx_over_errors++; break; + case 0x0800: dev->stats.rx_length_errors++; break; + case 0x1000: dev->stats.rx_frame_errors++; break; + case 0x1800: dev->stats.rx_length_errors++; break; + case 0x2000: dev->stats.rx_frame_errors++; break; + case 0x2800: dev->stats.rx_crc_errors++; break; + } + } else { + short pkt_len = rx_status & 0x7ff; + struct sk_buff *skb; + + skb = dev_alloc_skb(pkt_len+5); + + pr_debug(" Receiving packet size %d status %4.4x.\n", + pkt_len, rx_status); + if (skb != NULL) { + skb_reserve(skb, 2); + insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len), + ((pkt_len+3)>>2)); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + } else { + pr_debug("%s: couldn't allocate a sk_buff of" + " size %d.\n", dev->name, pkt_len); + dev->stats.rx_dropped++; + } + } + tc574_wait_for_completion(dev, RxDiscard); + } + + return worklimit; +} + +/* Provide ioctl() calls to examine the MII xcvr state. */ +static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct el3_private *lp = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + struct mii_ioctl_data *data = if_mii(rq); + int phy = lp->phys & 0x1f; + + pr_debug("%s: In ioct(%-.6s, %#4.4x) %4.4x %4.4x %4.4x %4.4x.\n", + dev->name, rq->ifr_ifrn.ifrn_name, cmd, + data->phy_id, data->reg_num, data->val_in, data->val_out); + + switch(cmd) { + case SIOCGMIIPHY: /* Get the address of the PHY in use. */ + data->phy_id = phy; + case SIOCGMIIREG: /* Read the specified MII register. */ + { + int saved_window; + unsigned long flags; + + spin_lock_irqsave(&lp->window_lock, flags); + saved_window = inw(ioaddr + EL3_CMD) >> 13; + EL3WINDOW(4); + data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, + data->reg_num & 0x1f); + EL3WINDOW(saved_window); + spin_unlock_irqrestore(&lp->window_lock, flags); + return 0; + } + case SIOCSMIIREG: /* Write the specified MII register */ + { + int saved_window; + unsigned long flags; + + spin_lock_irqsave(&lp->window_lock, flags); + saved_window = inw(ioaddr + EL3_CMD) >> 13; + EL3WINDOW(4); + mdio_write(ioaddr, data->phy_id & 0x1f, + data->reg_num & 0x1f, data->val_in); + EL3WINDOW(saved_window); + spin_unlock_irqrestore(&lp->window_lock, flags); + return 0; + } + default: + return -EOPNOTSUPP; + } +} + +/* The Odie chip has a 64 bin multicast filter, but the bit layout is not + documented. Until it is we revert to receiving all multicast frames when + any multicast reception is desired. + Note: My other drivers emit a log message whenever promiscuous mode is + entered to help detect password sniffers. This is less desirable on + typical PC card machines, so we omit the message. + */ + +static void set_rx_mode(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + + if (dev->flags & IFF_PROMISC) + outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm, + ioaddr + EL3_CMD); + else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI)) + outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD); + else + outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD); +} + +static void set_multicast_list(struct net_device *dev) +{ + struct el3_private *lp = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&lp->window_lock, flags); + set_rx_mode(dev); + spin_unlock_irqrestore(&lp->window_lock, flags); +} + +static int el3_close(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + struct el3_private *lp = netdev_priv(dev); + struct pcmcia_device *link = lp->p_dev; + + dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name); + + if (pcmcia_dev_present(link)) { + unsigned long flags; + + /* Turn off statistics ASAP. We update lp->stats below. */ + outw(StatsDisable, ioaddr + EL3_CMD); + + /* Disable the receiver and transmitter. */ + outw(RxDisable, ioaddr + EL3_CMD); + outw(TxDisable, ioaddr + EL3_CMD); + + /* Note: Switching to window 0 may disable the IRQ. */ + EL3WINDOW(0); + spin_lock_irqsave(&lp->window_lock, flags); + update_stats(dev); + spin_unlock_irqrestore(&lp->window_lock, flags); + + /* force interrupts off */ + outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD); + } + + link->open--; + netif_stop_queue(dev); + del_timer_sync(&lp->media); + + return 0; +} + +static const struct pcmcia_device_id tc574_ids[] = { + PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0574), + PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0101, 0x0556, "cis/3CCFEM556.cis"), + PCMCIA_DEVICE_NULL, +}; +MODULE_DEVICE_TABLE(pcmcia, tc574_ids); + +static struct pcmcia_driver tc574_driver = { + .owner = THIS_MODULE, + .name = "3c574_cs", + .probe = tc574_probe, + .remove = tc574_detach, + .id_table = tc574_ids, + .suspend = tc574_suspend, + .resume = tc574_resume, +}; + +static int __init init_tc574(void) +{ + return pcmcia_register_driver(&tc574_driver); +} + +static void __exit exit_tc574(void) +{ + pcmcia_unregister_driver(&tc574_driver); +} + +module_init(init_tc574); +module_exit(exit_tc574); diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c new file mode 100644 index 0000000..4a1a358 --- /dev/null +++ b/drivers/net/ethernet/3com/3c589_cs.c @@ -0,0 +1,943 @@ +/*====================================================================== + + A PCMCIA ethernet driver for the 3com 3c589 card. + + Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net + + 3c589_cs.c 1.162 2001/10/13 00:08:50 + + The network driver code is based on Donald Becker's 3c589 code: + + Written 1994 by Donald Becker. + Copyright 1993 United States Government as represented by the + Director, National Security Agency. This software may be used and + distributed according to the terms of the GNU General Public License, + incorporated herein by reference. + Donald Becker may be reached at becker@scyld.com + + Updated for 2.5.x by Alan Cox + +======================================================================*/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define DRV_NAME "3c589_cs" +#define DRV_VERSION "1.162-ac" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +/* To minimize the size of the driver source I only define operating + constants if they are used several times. You'll need the manual + if you want to understand driver details. */ +/* Offsets from base I/O address. */ +#define EL3_DATA 0x00 +#define EL3_TIMER 0x0a +#define EL3_CMD 0x0e +#define EL3_STATUS 0x0e + +#define EEPROM_READ 0x0080 +#define EEPROM_BUSY 0x8000 + +#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD) + +/* The top five bits written to EL3_CMD are a command, the lower + 11 bits are the parameter, if applicable. */ +enum c509cmd { + TotalReset = 0<<11, + SelectWindow = 1<<11, + StartCoax = 2<<11, + RxDisable = 3<<11, + RxEnable = 4<<11, + RxReset = 5<<11, + RxDiscard = 8<<11, + TxEnable = 9<<11, + TxDisable = 10<<11, + TxReset = 11<<11, + FakeIntr = 12<<11, + AckIntr = 13<<11, + SetIntrEnb = 14<<11, + SetStatusEnb = 15<<11, + SetRxFilter = 16<<11, + SetRxThreshold = 17<<11, + SetTxThreshold = 18<<11, + SetTxStart = 19<<11, + StatsEnable = 21<<11, + StatsDisable = 22<<11, + StopCoax = 23<<11 +}; + +enum c509status { + IntLatch = 0x0001, + AdapterFailure = 0x0002, + TxComplete = 0x0004, + TxAvailable = 0x0008, + RxComplete = 0x0010, + RxEarly = 0x0020, + IntReq = 0x0040, + StatsFull = 0x0080, + CmdBusy = 0x1000 +}; + +/* The SetRxFilter command accepts the following classes: */ +enum RxFilter { + RxStation = 1, + RxMulticast = 2, + RxBroadcast = 4, + RxProm = 8 +}; + +/* Register window 1 offsets, the window used in normal operation. */ +#define TX_FIFO 0x00 +#define RX_FIFO 0x00 +#define RX_STATUS 0x08 +#define TX_STATUS 0x0B +#define TX_FREE 0x0C /* Remaining free bytes in Tx buffer. */ + +#define WN0_IRQ 0x08 /* Window 0: Set IRQ line in bits 12-15. */ +#define WN4_MEDIA 0x0A /* Window 4: Various transcvr/media bits. */ +#define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */ +#define MEDIA_LED 0x0001 /* Enable link light on 3C589E cards. */ + +/* Time in jiffies before concluding Tx hung */ +#define TX_TIMEOUT ((400*HZ)/1000) + +struct el3_private { + struct pcmcia_device *p_dev; + /* For transceiver monitoring */ + struct timer_list media; + u16 media_status; + u16 fast_poll; + unsigned long last_irq; + spinlock_t lock; +}; + +static const char *if_names[] = { "auto", "10baseT", "10base2", "AUI" }; + +/*====================================================================*/ + +/* Module parameters */ + +MODULE_AUTHOR("David Hinds "); +MODULE_DESCRIPTION("3Com 3c589 series PCMCIA ethernet driver"); +MODULE_LICENSE("GPL"); + +#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) + +/* Special hook for setting if_port when module is loaded */ +INT_MODULE_PARM(if_port, 0); + + +/*====================================================================*/ + +static int tc589_config(struct pcmcia_device *link); +static void tc589_release(struct pcmcia_device *link); + +static u16 read_eeprom(unsigned int ioaddr, int index); +static void tc589_reset(struct net_device *dev); +static void media_check(unsigned long arg); +static int el3_config(struct net_device *dev, struct ifmap *map); +static int el3_open(struct net_device *dev); +static netdev_tx_t el3_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static irqreturn_t el3_interrupt(int irq, void *dev_id); +static void update_stats(struct net_device *dev); +static struct net_device_stats *el3_get_stats(struct net_device *dev); +static int el3_rx(struct net_device *dev); +static int el3_close(struct net_device *dev); +static void el3_tx_timeout(struct net_device *dev); +static void set_rx_mode(struct net_device *dev); +static void set_multicast_list(struct net_device *dev); +static const struct ethtool_ops netdev_ethtool_ops; + +static void tc589_detach(struct pcmcia_device *p_dev); + +static const struct net_device_ops el3_netdev_ops = { + .ndo_open = el3_open, + .ndo_stop = el3_close, + .ndo_start_xmit = el3_start_xmit, + .ndo_tx_timeout = el3_tx_timeout, + .ndo_set_config = el3_config, + .ndo_get_stats = el3_get_stats, + .ndo_set_multicast_list = set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int tc589_probe(struct pcmcia_device *link) +{ + struct el3_private *lp; + struct net_device *dev; + + dev_dbg(&link->dev, "3c589_attach()\n"); + + /* Create new ethernet device */ + dev = alloc_etherdev(sizeof(struct el3_private)); + if (!dev) + return -ENOMEM; + lp = netdev_priv(dev); + link->priv = dev; + lp->p_dev = link; + + spin_lock_init(&lp->lock); + link->resource[0]->end = 16; + link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16; + + link->config_flags |= CONF_ENABLE_IRQ; + link->config_index = 1; + + dev->netdev_ops = &el3_netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + + SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); + + return tc589_config(link); +} + +static void tc589_detach(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + dev_dbg(&link->dev, "3c589_detach\n"); + + unregister_netdev(dev); + + tc589_release(link); + + free_netdev(dev); +} /* tc589_detach */ + +static int tc589_config(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + __be16 *phys_addr; + int ret, i, j, multi = 0, fifo; + unsigned int ioaddr; + static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; + u8 *buf; + size_t len; + + dev_dbg(&link->dev, "3c589_config\n"); + + phys_addr = (__be16 *)dev->dev_addr; + /* Is this a 3c562? */ + if (link->manf_id != MANFID_3COM) + dev_info(&link->dev, "hmmm, is this really a 3Com card??\n"); + multi = (link->card_id == PRODID_3COM_3C562); + + link->io_lines = 16; + + /* For the 3c562, the base address must be xx00-xx7f */ + for (i = j = 0; j < 0x400; j += 0x10) { + if (multi && (j & 0x80)) continue; + link->resource[0]->start = j ^ 0x300; + i = pcmcia_request_io(link); + if (i == 0) + break; + } + if (i != 0) + goto failed; + + ret = pcmcia_request_irq(link, el3_interrupt); + if (ret) + goto failed; + + ret = pcmcia_enable_device(link); + if (ret) + goto failed; + + dev->irq = link->irq; + dev->base_addr = link->resource[0]->start; + ioaddr = dev->base_addr; + EL3WINDOW(0); + + /* The 3c589 has an extra EEPROM for configuration info, including + the hardware address. The 3c562 puts the address in the CIS. */ + len = pcmcia_get_tuple(link, 0x88, &buf); + if (buf && len >= 6) { + for (i = 0; i < 3; i++) + phys_addr[i] = htons(le16_to_cpu(buf[i*2])); + kfree(buf); + } else { + kfree(buf); /* 0 < len < 6 */ + for (i = 0; i < 3; i++) + phys_addr[i] = htons(read_eeprom(ioaddr, i)); + if (phys_addr[0] == htons(0x6060)) { + dev_err(&link->dev, "IO port conflict at 0x%03lx-0x%03lx\n", + dev->base_addr, dev->base_addr+15); + goto failed; + } + } + + /* The address and resource configuration register aren't loaded from + the EEPROM and *must* be set to 0 and IRQ3 for the PCMCIA version. */ + outw(0x3f00, ioaddr + 8); + fifo = inl(ioaddr); + + /* The if_port symbol can be set when the module is loaded */ + if ((if_port >= 0) && (if_port <= 3)) + dev->if_port = if_port; + else + dev_err(&link->dev, "invalid if_port requested\n"); + + SET_NETDEV_DEV(dev, &link->dev); + + if (register_netdev(dev) != 0) { + dev_err(&link->dev, "register_netdev() failed\n"); + goto failed; + } + + netdev_info(dev, "3Com 3c%s, io %#3lx, irq %d, hw_addr %pM\n", + (multi ? "562" : "589"), dev->base_addr, dev->irq, + dev->dev_addr); + netdev_info(dev, " %dK FIFO split %s Rx:Tx, %s xcvr\n", + (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3], + if_names[dev->if_port]); + return 0; + +failed: + tc589_release(link); + return -ENODEV; +} /* tc589_config */ + +static void tc589_release(struct pcmcia_device *link) +{ + pcmcia_disable_device(link); +} + +static int tc589_suspend(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + if (link->open) + netif_device_detach(dev); + + return 0; +} + +static int tc589_resume(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + if (link->open) { + tc589_reset(dev); + netif_device_attach(dev); + } + + return 0; +} + +/*====================================================================*/ + +/* + Use this for commands that may take time to finish +*/ +static void tc589_wait_for_completion(struct net_device *dev, int cmd) +{ + int i = 100; + outw(cmd, dev->base_addr + EL3_CMD); + while (--i > 0) + if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break; + if (i == 0) + netdev_warn(dev, "command 0x%04x did not complete!\n", cmd); +} + +/* + Read a word from the EEPROM using the regular EEPROM access register. + Assume that we are in register window zero. +*/ +static u16 read_eeprom(unsigned int ioaddr, int index) +{ + int i; + outw(EEPROM_READ + index, ioaddr + 10); + /* Reading the eeprom takes 162 us */ + for (i = 1620; i >= 0; i--) + if ((inw(ioaddr + 10) & EEPROM_BUSY) == 0) + break; + return inw(ioaddr + 12); +} + +/* + Set transceiver type, perhaps to something other than what the user + specified in dev->if_port. +*/ +static void tc589_set_xcvr(struct net_device *dev, int if_port) +{ + struct el3_private *lp = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + + EL3WINDOW(0); + switch (if_port) { + case 0: case 1: outw(0, ioaddr + 6); break; + case 2: outw(3<<14, ioaddr + 6); break; + case 3: outw(1<<14, ioaddr + 6); break; + } + /* On PCMCIA, this just turns on the LED */ + outw((if_port == 2) ? StartCoax : StopCoax, ioaddr + EL3_CMD); + /* 10baseT interface, enable link beat and jabber check. */ + EL3WINDOW(4); + outw(MEDIA_LED | ((if_port < 2) ? MEDIA_TP : 0), ioaddr + WN4_MEDIA); + EL3WINDOW(1); + if (if_port == 2) + lp->media_status = ((dev->if_port == 0) ? 0x8000 : 0x4000); + else + lp->media_status = ((dev->if_port == 0) ? 0x4010 : 0x8800); +} + +static void dump_status(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + EL3WINDOW(1); + netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x tx free %04x\n", + inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS), + inb(ioaddr+TX_STATUS), inw(ioaddr+TX_FREE)); + EL3WINDOW(4); + netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n", + inw(ioaddr+0x04), inw(ioaddr+0x06), inw(ioaddr+0x08), + inw(ioaddr+0x0a)); + EL3WINDOW(1); +} + +/* Reset and restore all of the 3c589 registers. */ +static void tc589_reset(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + int i; + + EL3WINDOW(0); + outw(0x0001, ioaddr + 4); /* Activate board. */ + outw(0x3f00, ioaddr + 8); /* Set the IRQ line. */ + + /* Set the station address in window 2. */ + EL3WINDOW(2); + for (i = 0; i < 6; i++) + outb(dev->dev_addr[i], ioaddr + i); + + tc589_set_xcvr(dev, dev->if_port); + + /* Switch to the stats window, and clear all stats by reading. */ + outw(StatsDisable, ioaddr + EL3_CMD); + EL3WINDOW(6); + for (i = 0; i < 9; i++) + inb(ioaddr+i); + inw(ioaddr + 10); + inw(ioaddr + 12); + + /* Switch to register set 1 for normal use. */ + EL3WINDOW(1); + + set_rx_mode(dev); + outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ + outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ + outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ + /* Allow status bits to be seen. */ + outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD); + /* Ack all pending events, and set active indicator mask. */ + outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, + ioaddr + EL3_CMD); + outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull + | AdapterFailure, ioaddr + EL3_CMD); +} + +static void netdev_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr); +} + +static const struct ethtool_ops netdev_ethtool_ops = { + .get_drvinfo = netdev_get_drvinfo, +}; + +static int el3_config(struct net_device *dev, struct ifmap *map) +{ + if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { + if (map->port <= 3) { + dev->if_port = map->port; + netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]); + tc589_set_xcvr(dev, dev->if_port); + } else + return -EINVAL; + } + return 0; +} + +static int el3_open(struct net_device *dev) +{ + struct el3_private *lp = netdev_priv(dev); + struct pcmcia_device *link = lp->p_dev; + + if (!pcmcia_dev_present(link)) + return -ENODEV; + + link->open++; + netif_start_queue(dev); + + tc589_reset(dev); + init_timer(&lp->media); + lp->media.function = media_check; + lp->media.data = (unsigned long) dev; + lp->media.expires = jiffies + HZ; + add_timer(&lp->media); + + dev_dbg(&link->dev, "%s: opened, status %4.4x.\n", + dev->name, inw(dev->base_addr + EL3_STATUS)); + + return 0; +} + +static void el3_tx_timeout(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + + netdev_warn(dev, "Transmit timed out!\n"); + dump_status(dev); + dev->stats.tx_errors++; + dev->trans_start = jiffies; /* prevent tx timeout */ + /* Issue TX_RESET and TX_START commands. */ + tc589_wait_for_completion(dev, TxReset); + outw(TxEnable, ioaddr + EL3_CMD); + netif_wake_queue(dev); +} + +static void pop_tx_status(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + int i; + + /* Clear the Tx status stack. */ + for (i = 32; i > 0; i--) { + u_char tx_status = inb(ioaddr + TX_STATUS); + if (!(tx_status & 0x84)) break; + /* reset transmitter on jabber error or underrun */ + if (tx_status & 0x30) + tc589_wait_for_completion(dev, TxReset); + if (tx_status & 0x38) { + netdev_dbg(dev, "transmit error: status 0x%02x\n", tx_status); + outw(TxEnable, ioaddr + EL3_CMD); + dev->stats.tx_aborted_errors++; + } + outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */ + } +} + +static netdev_tx_t el3_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + struct el3_private *priv = netdev_priv(dev); + unsigned long flags; + + netdev_dbg(dev, "el3_start_xmit(length = %ld) called, status %4.4x.\n", + (long)skb->len, inw(ioaddr + EL3_STATUS)); + + spin_lock_irqsave(&priv->lock, flags); + + dev->stats.tx_bytes += skb->len; + + /* Put out the doubleword header... */ + outw(skb->len, ioaddr + TX_FIFO); + outw(0x00, ioaddr + TX_FIFO); + /* ... and the packet rounded to a doubleword. */ + outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); + + if (inw(ioaddr + TX_FREE) <= 1536) { + netif_stop_queue(dev); + /* Interrupt us when the FIFO has room for max-sized packet. */ + outw(SetTxThreshold + 1536, ioaddr + EL3_CMD); + } + + pop_tx_status(dev); + spin_unlock_irqrestore(&priv->lock, flags); + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +/* The EL3 interrupt handler. */ +static irqreturn_t el3_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *) dev_id; + struct el3_private *lp = netdev_priv(dev); + unsigned int ioaddr; + __u16 status; + int i = 0, handled = 1; + + if (!netif_device_present(dev)) + return IRQ_NONE; + + ioaddr = dev->base_addr; + + netdev_dbg(dev, "interrupt, status %4.4x.\n", inw(ioaddr + EL3_STATUS)); + + spin_lock(&lp->lock); + while ((status = inw(ioaddr + EL3_STATUS)) & + (IntLatch | RxComplete | StatsFull)) { + if ((status & 0xe000) != 0x2000) { + netdev_dbg(dev, "interrupt from dead card\n"); + handled = 0; + break; + } + if (status & RxComplete) + el3_rx(dev); + if (status & TxAvailable) { + netdev_dbg(dev, " TX room bit was handled.\n"); + /* There's room in the FIFO for a full-sized packet. */ + outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); + netif_wake_queue(dev); + } + if (status & TxComplete) + pop_tx_status(dev); + if (status & (AdapterFailure | RxEarly | StatsFull)) { + /* Handle all uncommon interrupts. */ + if (status & StatsFull) /* Empty statistics. */ + update_stats(dev); + if (status & RxEarly) { /* Rx early is unused. */ + el3_rx(dev); + outw(AckIntr | RxEarly, ioaddr + EL3_CMD); + } + if (status & AdapterFailure) { + u16 fifo_diag; + EL3WINDOW(4); + fifo_diag = inw(ioaddr + 4); + EL3WINDOW(1); + netdev_warn(dev, "adapter failure, FIFO diagnostic register %04x.\n", + fifo_diag); + if (fifo_diag & 0x0400) { + /* Tx overrun */ + tc589_wait_for_completion(dev, TxReset); + outw(TxEnable, ioaddr + EL3_CMD); + } + if (fifo_diag & 0x2000) { + /* Rx underrun */ + tc589_wait_for_completion(dev, RxReset); + set_rx_mode(dev); + outw(RxEnable, ioaddr + EL3_CMD); + } + outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD); + } + } + if (++i > 10) { + netdev_err(dev, "infinite loop in interrupt, status %4.4x.\n", + status); + /* Clear all interrupts */ + outw(AckIntr | 0xFF, ioaddr + EL3_CMD); + break; + } + /* Acknowledge the IRQ. */ + outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); + } + lp->last_irq = jiffies; + spin_unlock(&lp->lock); + netdev_dbg(dev, "exiting interrupt, status %4.4x.\n", + inw(ioaddr + EL3_STATUS)); + return IRQ_RETVAL(handled); +} + +static void media_check(unsigned long arg) +{ + struct net_device *dev = (struct net_device *)(arg); + struct el3_private *lp = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + u16 media, errs; + unsigned long flags; + + if (!netif_device_present(dev)) goto reschedule; + + /* Check for pending interrupt with expired latency timer: with + this, we can limp along even if the interrupt is blocked */ + if ((inw(ioaddr + EL3_STATUS) & IntLatch) && + (inb(ioaddr + EL3_TIMER) == 0xff)) { + if (!lp->fast_poll) + netdev_warn(dev, "interrupt(s) dropped!\n"); + + local_irq_save(flags); + el3_interrupt(dev->irq, dev); + local_irq_restore(flags); + + lp->fast_poll = HZ; + } + if (lp->fast_poll) { + lp->fast_poll--; + lp->media.expires = jiffies + HZ/100; + add_timer(&lp->media); + return; + } + + /* lp->lock guards the EL3 window. Window should always be 1 except + when the lock is held */ + spin_lock_irqsave(&lp->lock, flags); + EL3WINDOW(4); + media = inw(ioaddr+WN4_MEDIA) & 0xc810; + + /* Ignore collisions unless we've had no irq's recently */ + if (time_before(jiffies, lp->last_irq + HZ)) { + media &= ~0x0010; + } else { + /* Try harder to detect carrier errors */ + EL3WINDOW(6); + outw(StatsDisable, ioaddr + EL3_CMD); + errs = inb(ioaddr + 0); + outw(StatsEnable, ioaddr + EL3_CMD); + dev->stats.tx_carrier_errors += errs; + if (errs || (lp->media_status & 0x0010)) media |= 0x0010; + } + + if (media != lp->media_status) { + if ((media & lp->media_status & 0x8000) && + ((lp->media_status ^ media) & 0x0800)) + netdev_info(dev, "%s link beat\n", + (lp->media_status & 0x0800 ? "lost" : "found")); + else if ((media & lp->media_status & 0x4000) && + ((lp->media_status ^ media) & 0x0010)) + netdev_info(dev, "coax cable %s\n", + (lp->media_status & 0x0010 ? "ok" : "problem")); + if (dev->if_port == 0) { + if (media & 0x8000) { + if (media & 0x0800) + netdev_info(dev, "flipped to 10baseT\n"); + else + tc589_set_xcvr(dev, 2); + } else if (media & 0x4000) { + if (media & 0x0010) + tc589_set_xcvr(dev, 1); + else + netdev_info(dev, "flipped to 10base2\n"); + } + } + lp->media_status = media; + } + + EL3WINDOW(1); + spin_unlock_irqrestore(&lp->lock, flags); + +reschedule: + lp->media.expires = jiffies + HZ; + add_timer(&lp->media); +} + +static struct net_device_stats *el3_get_stats(struct net_device *dev) +{ + struct el3_private *lp = netdev_priv(dev); + unsigned long flags; + struct pcmcia_device *link = lp->p_dev; + + if (pcmcia_dev_present(link)) { + spin_lock_irqsave(&lp->lock, flags); + update_stats(dev); + spin_unlock_irqrestore(&lp->lock, flags); + } + return &dev->stats; +} + +/* + Update statistics. We change to register window 6, so this should be run + single-threaded if the device is active. This is expected to be a rare + operation, and it's simpler for the rest of the driver to assume that + window 1 is always valid rather than use a special window-state variable. + + Caller must hold the lock for this +*/ +static void update_stats(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + + netdev_dbg(dev, "updating the statistics.\n"); + /* Turn off statistics updates while reading. */ + outw(StatsDisable, ioaddr + EL3_CMD); + /* Switch to the stats window, and read everything. */ + EL3WINDOW(6); + dev->stats.tx_carrier_errors += inb(ioaddr + 0); + dev->stats.tx_heartbeat_errors += inb(ioaddr + 1); + /* Multiple collisions. */ inb(ioaddr + 2); + dev->stats.collisions += inb(ioaddr + 3); + dev->stats.tx_window_errors += inb(ioaddr + 4); + dev->stats.rx_fifo_errors += inb(ioaddr + 5); + dev->stats.tx_packets += inb(ioaddr + 6); + /* Rx packets */ inb(ioaddr + 7); + /* Tx deferrals */ inb(ioaddr + 8); + /* Rx octets */ inw(ioaddr + 10); + /* Tx octets */ inw(ioaddr + 12); + + /* Back to window 1, and turn statistics back on. */ + EL3WINDOW(1); + outw(StatsEnable, ioaddr + EL3_CMD); +} + +static int el3_rx(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + int worklimit = 32; + short rx_status; + + netdev_dbg(dev, "in rx_packet(), status %4.4x, rx_status %4.4x.\n", + inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS)); + while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) && + worklimit > 0) { + worklimit--; + if (rx_status & 0x4000) { /* Error, update stats. */ + short error = rx_status & 0x3800; + dev->stats.rx_errors++; + switch (error) { + case 0x0000: dev->stats.rx_over_errors++; break; + case 0x0800: dev->stats.rx_length_errors++; break; + case 0x1000: dev->stats.rx_frame_errors++; break; + case 0x1800: dev->stats.rx_length_errors++; break; + case 0x2000: dev->stats.rx_frame_errors++; break; + case 0x2800: dev->stats.rx_crc_errors++; break; + } + } else { + short pkt_len = rx_status & 0x7ff; + struct sk_buff *skb; + + skb = dev_alloc_skb(pkt_len+5); + + netdev_dbg(dev, " Receiving packet size %d status %4.4x.\n", + pkt_len, rx_status); + if (skb != NULL) { + skb_reserve(skb, 2); + insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len), + (pkt_len+3)>>2); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + } else { + netdev_dbg(dev, "couldn't allocate a sk_buff of size %d.\n", + pkt_len); + dev->stats.rx_dropped++; + } + } + /* Pop the top of the Rx FIFO */ + tc589_wait_for_completion(dev, RxDiscard); + } + if (worklimit == 0) + netdev_warn(dev, "too much work in el3_rx!\n"); + return 0; +} + +static void set_rx_mode(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + u16 opts = SetRxFilter | RxStation | RxBroadcast; + + if (dev->flags & IFF_PROMISC) + opts |= RxMulticast | RxProm; + else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI)) + opts |= RxMulticast; + outw(opts, ioaddr + EL3_CMD); +} + +static void set_multicast_list(struct net_device *dev) +{ + struct el3_private *priv = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + set_rx_mode(dev); + spin_unlock_irqrestore(&priv->lock, flags); +} + +static int el3_close(struct net_device *dev) +{ + struct el3_private *lp = netdev_priv(dev); + struct pcmcia_device *link = lp->p_dev; + unsigned int ioaddr = dev->base_addr; + + dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name); + + if (pcmcia_dev_present(link)) { + /* Turn off statistics ASAP. We update dev->stats below. */ + outw(StatsDisable, ioaddr + EL3_CMD); + + /* Disable the receiver and transmitter. */ + outw(RxDisable, ioaddr + EL3_CMD); + outw(TxDisable, ioaddr + EL3_CMD); + + if (dev->if_port == 2) + /* Turn off thinnet power. Green! */ + outw(StopCoax, ioaddr + EL3_CMD); + else if (dev->if_port == 1) { + /* Disable link beat and jabber */ + EL3WINDOW(4); + outw(0, ioaddr + WN4_MEDIA); + } + + /* Switching back to window 0 disables the IRQ. */ + EL3WINDOW(0); + /* But we explicitly zero the IRQ line select anyway. */ + outw(0x0f00, ioaddr + WN0_IRQ); + + /* Check if the card still exists */ + if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000) + update_stats(dev); + } + + link->open--; + netif_stop_queue(dev); + del_timer_sync(&lp->media); + + return 0; +} + +static const struct pcmcia_device_id tc589_ids[] = { + PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0101, 0x0562), + PCMCIA_MFC_DEVICE_PROD_ID1(0, "Motorola MARQUIS", 0xf03e4e77), + PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0589), + PCMCIA_DEVICE_PROD_ID12("Farallon", "ENet", 0x58d93fc4, 0x992c2202), + PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0101, 0x0035, "cis/3CXEM556.cis"), + PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0101, 0x003d, "cis/3CXEM556.cis"), + PCMCIA_DEVICE_NULL, +}; +MODULE_DEVICE_TABLE(pcmcia, tc589_ids); + +static struct pcmcia_driver tc589_driver = { + .owner = THIS_MODULE, + .name = "3c589_cs", + .probe = tc589_probe, + .remove = tc589_detach, + .id_table = tc589_ids, + .suspend = tc589_suspend, + .resume = tc589_resume, +}; + +static int __init init_tc589(void) +{ + return pcmcia_register_driver(&tc589_driver); +} + +static void __exit exit_tc589(void) +{ + pcmcia_unregister_driver(&tc589_driver); +} + +module_init(init_tc589); +module_exit(exit_tc589); diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c new file mode 100644 index 0000000..8cc2256 --- /dev/null +++ b/drivers/net/ethernet/3com/3c59x.c @@ -0,0 +1,3326 @@ +/* EtherLinkXL.c: A 3Com EtherLink PCI III/XL ethernet driver for linux. */ +/* + Written 1996-1999 by Donald Becker. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + This driver is for the 3Com "Vortex" and "Boomerang" series ethercards. + Members of the series include Fast EtherLink 3c590/3c592/3c595/3c597 + and the EtherLink XL 3c900 and 3c905 cards. + + Problem reports and questions should be directed to + vortex@scyld.com + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + +*/ + +/* + * FIXME: This driver _could_ support MTU changing, but doesn't. See Don's hamachi.c implementation + * as well as other drivers + * + * NOTE: If you make 'vortex_debug' a constant (#define vortex_debug 0) the driver shrinks by 2k + * due to dead code elimination. There will be some performance benefits from this due to + * elimination of all the tests and reduced cache footprint. + */ + + +#define DRV_NAME "3c59x" + + + +/* A few values that may be tweaked. */ +/* Keep the ring sizes a power of two for efficiency. */ +#define TX_RING_SIZE 16 +#define RX_RING_SIZE 32 +#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ + +/* "Knobs" that adjust features and parameters. */ +/* Set the copy breakpoint for the copy-only-tiny-frames scheme. + Setting to > 1512 effectively disables this feature. */ +#ifndef __arm__ +static int rx_copybreak = 200; +#else +/* ARM systems perform better by disregarding the bus-master + transfer capability of these cards. -- rmk */ +static int rx_copybreak = 1513; +#endif +/* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */ +static const int mtu = 1500; +/* Maximum events (Rx packets, etc.) to handle at each interrupt. */ +static int max_interrupt_work = 32; +/* Tx timeout interval (millisecs) */ +static int watchdog = 5000; + +/* Allow aggregation of Tx interrupts. Saves CPU load at the cost + * of possible Tx stalls if the system is blocking interrupts + * somewhere else. Undefine this to disable. + */ +#define tx_interrupt_mitigation 1 + +/* Put out somewhat more debugging messages. (0: no msg, 1 minimal .. 6). */ +#define vortex_debug debug +#ifdef VORTEX_DEBUG +static int vortex_debug = VORTEX_DEBUG; +#else +static int vortex_debug = 1; +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* For nr_irqs only. */ +#include +#include + +/* Kernel compatibility defines, some common to David Hinds' PCMCIA package. + This is only in the support-all-kernels source code. */ + +#define RUN_AT(x) (jiffies + (x)) + +#include + + +static const char version[] __devinitconst = + DRV_NAME ": Donald Becker and others.\n"; + +MODULE_AUTHOR("Donald Becker "); +MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver "); +MODULE_LICENSE("GPL"); + + +/* Operational parameter that usually are not changed. */ + +/* The Vortex size is twice that of the original EtherLinkIII series: the + runtime register window, window 1, is now always mapped in. + The Boomerang size is twice as large as the Vortex -- it has additional + bus master control registers. */ +#define VORTEX_TOTAL_SIZE 0x20 +#define BOOMERANG_TOTAL_SIZE 0x40 + +/* Set iff a MII transceiver on any interface requires mdio preamble. + This only set with the original DP83840 on older 3c905 boards, so the extra + code size of a per-interface flag is not worthwhile. */ +static char mii_preamble_required; + +#define PFX DRV_NAME ": " + + + +/* + Theory of Operation + +I. Board Compatibility + +This device driver is designed for the 3Com FastEtherLink and FastEtherLink +XL, 3Com's PCI to 10/100baseT adapters. It also works with the 10Mbs +versions of the FastEtherLink cards. The supported product IDs are + 3c590, 3c592, 3c595, 3c597, 3c900, 3c905 + +The related ISA 3c515 is supported with a separate driver, 3c515.c, included +with the kernel source or available from + cesdis.gsfc.nasa.gov:/pub/linux/drivers/3c515.html + +II. Board-specific settings + +PCI bus devices are configured by the system at boot time, so no jumpers +need to be set on the board. The system BIOS should be set to assign the +PCI INTA signal to an otherwise unused system IRQ line. + +The EEPROM settings for media type and forced-full-duplex are observed. +The EEPROM media type should be left at the default "autoselect" unless using +10base2 or AUI connections which cannot be reliably detected. + +III. Driver operation + +The 3c59x series use an interface that's very similar to the previous 3c5x9 +series. The primary interface is two programmed-I/O FIFOs, with an +alternate single-contiguous-region bus-master transfer (see next). + +The 3c900 "Boomerang" series uses a full-bus-master interface with separate +lists of transmit and receive descriptors, similar to the AMD LANCE/PCnet, +DEC Tulip and Intel Speedo3. The first chip version retains a compatible +programmed-I/O interface that has been removed in 'B' and subsequent board +revisions. + +One extension that is advertised in a very large font is that the adapters +are capable of being bus masters. On the Vortex chip this capability was +only for a single contiguous region making it far less useful than the full +bus master capability. There is a significant performance impact of taking +an extra interrupt or polling for the completion of each transfer, as well +as difficulty sharing the single transfer engine between the transmit and +receive threads. Using DMA transfers is a win only with large blocks or +with the flawed versions of the Intel Orion motherboard PCI controller. + +The Boomerang chip's full-bus-master interface is useful, and has the +currently-unused advantages over other similar chips that queued transmit +packets may be reordered and receive buffer groups are associated with a +single frame. + +With full-bus-master support, this driver uses a "RX_COPYBREAK" scheme. +Rather than a fixed intermediate receive buffer, this scheme allocates +full-sized skbuffs as receive buffers. The value RX_COPYBREAK is used as +the copying breakpoint: it is chosen to trade-off the memory wasted by +passing the full-sized skbuff to the queue layer for all frames vs. the +copying cost of copying a frame to a correctly-sized skbuff. + +IIIC. Synchronization +The driver runs as two independent, single-threaded flows of control. One +is the send-packet routine, which enforces single-threaded use by the +dev->tbusy flag. The other thread is the interrupt handler, which is single +threaded by the hardware and other software. + +IV. Notes + +Thanks to Cameron Spitzer and Terry Murphy of 3Com for providing development +3c590, 3c595, and 3c900 boards. +The name "Vortex" is the internal 3Com project name for the PCI ASIC, and +the EISA version is called "Demon". According to Terry these names come +from rides at the local amusement park. + +The new chips support both ethernet (1.5K) and FDDI (4.5K) packet sizes! +This driver only supports ethernet packets because of the skbuff allocation +limit of 4K. +*/ + +/* This table drives the PCI probe routines. It's mostly boilerplate in all + of the drivers, and will likely be provided by some future kernel. +*/ +enum pci_flags_bit { + PCI_USES_MASTER=4, +}; + +enum { IS_VORTEX=1, IS_BOOMERANG=2, IS_CYCLONE=4, IS_TORNADO=8, + EEPROM_8BIT=0x10, /* AKPM: Uses 0x230 as the base bitmaps for EEPROM reads */ + HAS_PWR_CTRL=0x20, HAS_MII=0x40, HAS_NWAY=0x80, HAS_CB_FNS=0x100, + INVERT_MII_PWR=0x200, INVERT_LED_PWR=0x400, MAX_COLLISION_RESET=0x800, + EEPROM_OFFSET=0x1000, HAS_HWCKSM=0x2000, WNO_XCVR_PWR=0x4000, + EXTRA_PREAMBLE=0x8000, EEPROM_RESET=0x10000, }; + +enum vortex_chips { + CH_3C590 = 0, + CH_3C592, + CH_3C597, + CH_3C595_1, + CH_3C595_2, + + CH_3C595_3, + CH_3C900_1, + CH_3C900_2, + CH_3C900_3, + CH_3C900_4, + + CH_3C900_5, + CH_3C900B_FL, + CH_3C905_1, + CH_3C905_2, + CH_3C905B_TX, + CH_3C905B_1, + + CH_3C905B_2, + CH_3C905B_FX, + CH_3C905C, + CH_3C9202, + CH_3C980, + CH_3C9805, + + CH_3CSOHO100_TX, + CH_3C555, + CH_3C556, + CH_3C556B, + CH_3C575, + + CH_3C575_1, + CH_3CCFE575, + CH_3CCFE575CT, + CH_3CCFE656, + CH_3CCFEM656, + + CH_3CCFEM656_1, + CH_3C450, + CH_3C920, + CH_3C982A, + CH_3C982B, + + CH_905BT4, + CH_920B_EMB_WNM, +}; + + +/* note: this array directly indexed by above enums, and MUST + * be kept in sync with both the enums above, and the PCI device + * table below + */ +static struct vortex_chip_info { + const char *name; + int flags; + int drv_flags; + int io_size; +} vortex_info_tbl[] __devinitdata = { + {"3c590 Vortex 10Mbps", + PCI_USES_MASTER, IS_VORTEX, 32, }, + {"3c592 EISA 10Mbps Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */ + PCI_USES_MASTER, IS_VORTEX, 32, }, + {"3c597 EISA Fast Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */ + PCI_USES_MASTER, IS_VORTEX, 32, }, + {"3c595 Vortex 100baseTx", + PCI_USES_MASTER, IS_VORTEX, 32, }, + {"3c595 Vortex 100baseT4", + PCI_USES_MASTER, IS_VORTEX, 32, }, + + {"3c595 Vortex 100base-MII", + PCI_USES_MASTER, IS_VORTEX, 32, }, + {"3c900 Boomerang 10baseT", + PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, }, + {"3c900 Boomerang 10Mbps Combo", + PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, }, + {"3c900 Cyclone 10Mbps TPO", /* AKPM: from Don's 0.99M */ + PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, + {"3c900 Cyclone 10Mbps Combo", + PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, + + {"3c900 Cyclone 10Mbps TPC", /* AKPM: from Don's 0.99M */ + PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, + {"3c900B-FL Cyclone 10base-FL", + PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, + {"3c905 Boomerang 100baseTx", + PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, }, + {"3c905 Boomerang 100baseT4", + PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, }, + {"3C905B-TX Fast Etherlink XL PCI", + PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, + {"3c905B Cyclone 100baseTx", + PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, + + {"3c905B Cyclone 10/100/BNC", + PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, }, + {"3c905B-FX Cyclone 100baseFx", + PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, + {"3c905C Tornado", + PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, + {"3c920B-EMB-WNM (ATI Radeon 9100 IGP)", + PCI_USES_MASTER, IS_TORNADO|HAS_MII|HAS_HWCKSM, 128, }, + {"3c980 Cyclone", + PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, + + {"3c980C Python-T", + PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, }, + {"3cSOHO100-TX Hurricane", + PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, + {"3c555 Laptop Hurricane", + PCI_USES_MASTER, IS_CYCLONE|EEPROM_8BIT|HAS_HWCKSM, 128, }, + {"3c556 Laptop Tornado", + PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_8BIT|HAS_CB_FNS|INVERT_MII_PWR| + HAS_HWCKSM, 128, }, + {"3c556B Laptop Hurricane", + PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_OFFSET|HAS_CB_FNS|INVERT_MII_PWR| + WNO_XCVR_PWR|HAS_HWCKSM, 128, }, + + {"3c575 [Megahertz] 10/100 LAN CardBus", + PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, }, + {"3c575 Boomerang CardBus", + PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, }, + {"3CCFE575BT Cyclone CardBus", + PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT| + INVERT_LED_PWR|HAS_HWCKSM, 128, }, + {"3CCFE575CT Tornado CardBus", + PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| + MAX_COLLISION_RESET|HAS_HWCKSM, 128, }, + {"3CCFE656 Cyclone CardBus", + PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| + INVERT_LED_PWR|HAS_HWCKSM, 128, }, + + {"3CCFEM656B Cyclone+Winmodem CardBus", + PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| + INVERT_LED_PWR|HAS_HWCKSM, 128, }, + {"3CXFEM656C Tornado+Winmodem CardBus", /* From pcmcia-cs-3.1.5 */ + PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| + MAX_COLLISION_RESET|HAS_HWCKSM, 128, }, + {"3c450 HomePNA Tornado", /* AKPM: from Don's 0.99Q */ + PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, }, + {"3c920 Tornado", + PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, }, + {"3c982 Hydra Dual Port A", + PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, }, + + {"3c982 Hydra Dual Port B", + PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, }, + {"3c905B-T4", + PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, + {"3c920B-EMB-WNM Tornado", + PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, }, + + {NULL,}, /* NULL terminated list. */ +}; + + +static DEFINE_PCI_DEVICE_TABLE(vortex_pci_tbl) = { + { 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 }, + { 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 }, + { 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 }, + { 0x10B7, 0x5950, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_1 }, + { 0x10B7, 0x5951, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_2 }, + + { 0x10B7, 0x5952, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_3 }, + { 0x10B7, 0x9000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_1 }, + { 0x10B7, 0x9001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_2 }, + { 0x10B7, 0x9004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_3 }, + { 0x10B7, 0x9005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_4 }, + + { 0x10B7, 0x9006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_5 }, + { 0x10B7, 0x900A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900B_FL }, + { 0x10B7, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_1 }, + { 0x10B7, 0x9051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_2 }, + { 0x10B7, 0x9054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_TX }, + { 0x10B7, 0x9055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_1 }, + + { 0x10B7, 0x9058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_2 }, + { 0x10B7, 0x905A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_FX }, + { 0x10B7, 0x9200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905C }, + { 0x10B7, 0x9202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9202 }, + { 0x10B7, 0x9800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C980 }, + { 0x10B7, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9805 }, + + { 0x10B7, 0x7646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CSOHO100_TX }, + { 0x10B7, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C555 }, + { 0x10B7, 0x6055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556 }, + { 0x10B7, 0x6056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556B }, + { 0x10B7, 0x5b57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575 }, + + { 0x10B7, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575_1 }, + { 0x10B7, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575 }, + { 0x10B7, 0x5257, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575CT }, + { 0x10B7, 0x6560, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE656 }, + { 0x10B7, 0x6562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656 }, + + { 0x10B7, 0x6564, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656_1 }, + { 0x10B7, 0x4500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C450 }, + { 0x10B7, 0x9201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C920 }, + { 0x10B7, 0x1201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C982A }, + { 0x10B7, 0x1202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C982B }, + + { 0x10B7, 0x9056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_905BT4 }, + { 0x10B7, 0x9210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_920B_EMB_WNM }, + + {0,} /* 0 terminated list. */ +}; +MODULE_DEVICE_TABLE(pci, vortex_pci_tbl); + + +/* Operational definitions. + These are not used by other compilation units and thus are not + exported in a ".h" file. + + First the windows. There are eight register windows, with the command + and status registers available in each. + */ +#define EL3_CMD 0x0e +#define EL3_STATUS 0x0e + +/* The top five bits written to EL3_CMD are a command, the lower + 11 bits are the parameter, if applicable. + Note that 11 parameters bits was fine for ethernet, but the new chip + can handle FDDI length frames (~4500 octets) and now parameters count + 32-bit 'Dwords' rather than octets. */ + +enum vortex_cmd { + TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11, + RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, + UpStall = 6<<11, UpUnstall = (6<<11)+1, + DownStall = (6<<11)+2, DownUnstall = (6<<11)+3, + RxDiscard = 8<<11, TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11, + FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11, + SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11, + SetTxThreshold = 18<<11, SetTxStart = 19<<11, + StartDMAUp = 20<<11, StartDMADown = (20<<11)+1, StatsEnable = 21<<11, + StatsDisable = 22<<11, StopCoax = 23<<11, SetFilterBit = 25<<11,}; + +/* The SetRxFilter command accepts the following classes: */ +enum RxFilter { + RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 }; + +/* Bits in the general status register. */ +enum vortex_status { + IntLatch = 0x0001, HostError = 0x0002, TxComplete = 0x0004, + TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020, + IntReq = 0x0040, StatsFull = 0x0080, + DMADone = 1<<8, DownComplete = 1<<9, UpComplete = 1<<10, + DMAInProgress = 1<<11, /* DMA controller is still busy.*/ + CmdInProgress = 1<<12, /* EL3_CMD is still busy.*/ +}; + +/* Register window 1 offsets, the window used in normal operation. + On the Vortex this window is always mapped at offsets 0x10-0x1f. */ +enum Window1 { + TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14, + RxStatus = 0x18, Timer=0x1A, TxStatus = 0x1B, + TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */ +}; +enum Window0 { + Wn0EepromCmd = 10, /* Window 0: EEPROM command register. */ + Wn0EepromData = 12, /* Window 0: EEPROM results register. */ + IntrStatus=0x0E, /* Valid in all windows. */ +}; +enum Win0_EEPROM_bits { + EEPROM_Read = 0x80, EEPROM_WRITE = 0x40, EEPROM_ERASE = 0xC0, + EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */ + EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */ +}; +/* EEPROM locations. */ +enum eeprom_offset { + PhysAddr01=0, PhysAddr23=1, PhysAddr45=2, ModelID=3, + EtherLink3ID=7, IFXcvrIO=8, IRQLine=9, + NodeAddr01=10, NodeAddr23=11, NodeAddr45=12, + DriverTune=13, Checksum=15}; + +enum Window2 { /* Window 2. */ + Wn2_ResetOptions=12, +}; +enum Window3 { /* Window 3: MAC/config bits. */ + Wn3_Config=0, Wn3_MaxPktSize=4, Wn3_MAC_Ctrl=6, Wn3_Options=8, +}; + +#define BFEXT(value, offset, bitcount) \ + ((((unsigned long)(value)) >> (offset)) & ((1 << (bitcount)) - 1)) + +#define BFINS(lhs, rhs, offset, bitcount) \ + (((lhs) & ~((((1 << (bitcount)) - 1)) << (offset))) | \ + (((rhs) & ((1 << (bitcount)) - 1)) << (offset))) + +#define RAM_SIZE(v) BFEXT(v, 0, 3) +#define RAM_WIDTH(v) BFEXT(v, 3, 1) +#define RAM_SPEED(v) BFEXT(v, 4, 2) +#define ROM_SIZE(v) BFEXT(v, 6, 2) +#define RAM_SPLIT(v) BFEXT(v, 16, 2) +#define XCVR(v) BFEXT(v, 20, 4) +#define AUTOSELECT(v) BFEXT(v, 24, 1) + +enum Window4 { /* Window 4: Xcvr/media bits. */ + Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10, +}; +enum Win4_Media_bits { + Media_SQE = 0x0008, /* Enable SQE error counting for AUI. */ + Media_10TP = 0x00C0, /* Enable link beat and jabber for 10baseT. */ + Media_Lnk = 0x0080, /* Enable just link beat for 100TX/100FX. */ + Media_LnkBeat = 0x0800, +}; +enum Window7 { /* Window 7: Bus Master control. */ + Wn7_MasterAddr = 0, Wn7_VlanEtherType=4, Wn7_MasterLen = 6, + Wn7_MasterStatus = 12, +}; +/* Boomerang bus master control registers. */ +enum MasterCtrl { + PktStatus = 0x20, DownListPtr = 0x24, FragAddr = 0x28, FragLen = 0x2c, + TxFreeThreshold = 0x2f, UpPktStatus = 0x30, UpListPtr = 0x38, +}; + +/* The Rx and Tx descriptor lists. + Caution Alpha hackers: these types are 32 bits! Note also the 8 byte + alignment contraint on tx_ring[] and rx_ring[]. */ +#define LAST_FRAG 0x80000000 /* Last Addr/Len pair in descriptor. */ +#define DN_COMPLETE 0x00010000 /* This packet has been downloaded */ +struct boom_rx_desc { + __le32 next; /* Last entry points to 0. */ + __le32 status; + __le32 addr; /* Up to 63 addr/len pairs possible. */ + __le32 length; /* Set LAST_FRAG to indicate last pair. */ +}; +/* Values for the Rx status entry. */ +enum rx_desc_status { + RxDComplete=0x00008000, RxDError=0x4000, + /* See boomerang_rx() for actual error bits */ + IPChksumErr=1<<25, TCPChksumErr=1<<26, UDPChksumErr=1<<27, + IPChksumValid=1<<29, TCPChksumValid=1<<30, UDPChksumValid=1<<31, +}; + +#ifdef MAX_SKB_FRAGS +#define DO_ZEROCOPY 1 +#else +#define DO_ZEROCOPY 0 +#endif + +struct boom_tx_desc { + __le32 next; /* Last entry points to 0. */ + __le32 status; /* bits 0:12 length, others see below. */ +#if DO_ZEROCOPY + struct { + __le32 addr; + __le32 length; + } frag[1+MAX_SKB_FRAGS]; +#else + __le32 addr; + __le32 length; +#endif +}; + +/* Values for the Tx status entry. */ +enum tx_desc_status { + CRCDisable=0x2000, TxDComplete=0x8000, + AddIPChksum=0x02000000, AddTCPChksum=0x04000000, AddUDPChksum=0x08000000, + TxIntrUploaded=0x80000000, /* IRQ when in FIFO, but maybe not sent. */ +}; + +/* Chip features we care about in vp->capabilities, read from the EEPROM. */ +enum ChipCaps { CapBusMaster=0x20, CapPwrMgmt=0x2000 }; + +struct vortex_extra_stats { + unsigned long tx_deferred; + unsigned long tx_max_collisions; + unsigned long tx_multiple_collisions; + unsigned long tx_single_collisions; + unsigned long rx_bad_ssd; +}; + +struct vortex_private { + /* The Rx and Tx rings should be quad-word-aligned. */ + struct boom_rx_desc* rx_ring; + struct boom_tx_desc* tx_ring; + dma_addr_t rx_ring_dma; + dma_addr_t tx_ring_dma; + /* The addresses of transmit- and receive-in-place skbuffs. */ + struct sk_buff* rx_skbuff[RX_RING_SIZE]; + struct sk_buff* tx_skbuff[TX_RING_SIZE]; + unsigned int cur_rx, cur_tx; /* The next free ring entry */ + unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ + struct vortex_extra_stats xstats; /* NIC-specific extra stats */ + struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */ + dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */ + + /* PCI configuration space information. */ + struct device *gendev; + void __iomem *ioaddr; /* IO address space */ + void __iomem *cb_fn_base; /* CardBus function status addr space. */ + + /* Some values here only for performance evaluation and path-coverage */ + int rx_nocopy, rx_copy, queued_packet, rx_csumhits; + int card_idx; + + /* The remainder are related to chip state, mostly media selection. */ + struct timer_list timer; /* Media selection timer. */ + struct timer_list rx_oom_timer; /* Rx skb allocation retry timer */ + int options; /* User-settable misc. driver options. */ + unsigned int media_override:4, /* Passed-in media type. */ + default_media:4, /* Read from the EEPROM/Wn3_Config. */ + full_duplex:1, autoselect:1, + bus_master:1, /* Vortex can only do a fragment bus-m. */ + full_bus_master_tx:1, full_bus_master_rx:2, /* Boomerang */ + flow_ctrl:1, /* Use 802.3x flow control (PAUSE only) */ + partner_flow_ctrl:1, /* Partner supports flow control */ + has_nway:1, + enable_wol:1, /* Wake-on-LAN is enabled */ + pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */ + open:1, + medialock:1, + must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */ + large_frames:1, /* accept large frames */ + handling_irq:1; /* private in_irq indicator */ + /* {get|set}_wol operations are already serialized by rtnl. + * no additional locking is required for the enable_wol and acpi_set_WOL() + */ + int drv_flags; + u16 status_enable; + u16 intr_enable; + u16 available_media; /* From Wn3_Options. */ + u16 capabilities, info1, info2; /* Various, from EEPROM. */ + u16 advertising; /* NWay media advertisement */ + unsigned char phys[2]; /* MII device addresses. */ + u16 deferred; /* Resend these interrupts when we + * bale from the ISR */ + u16 io_size; /* Size of PCI region (for release_region) */ + + /* Serialises access to hardware other than MII and variables below. + * The lock hierarchy is rtnl_lock > {lock, mii_lock} > window_lock. */ + spinlock_t lock; + + spinlock_t mii_lock; /* Serialises access to MII */ + struct mii_if_info mii; /* MII lib hooks/info */ + spinlock_t window_lock; /* Serialises access to windowed regs */ + int window; /* Register window */ +}; + +static void window_set(struct vortex_private *vp, int window) +{ + if (window != vp->window) { + iowrite16(SelectWindow + window, vp->ioaddr + EL3_CMD); + vp->window = window; + } +} + +#define DEFINE_WINDOW_IO(size) \ +static u ## size \ +window_read ## size(struct vortex_private *vp, int window, int addr) \ +{ \ + unsigned long flags; \ + u ## size ret; \ + spin_lock_irqsave(&vp->window_lock, flags); \ + window_set(vp, window); \ + ret = ioread ## size(vp->ioaddr + addr); \ + spin_unlock_irqrestore(&vp->window_lock, flags); \ + return ret; \ +} \ +static void \ +window_write ## size(struct vortex_private *vp, u ## size value, \ + int window, int addr) \ +{ \ + unsigned long flags; \ + spin_lock_irqsave(&vp->window_lock, flags); \ + window_set(vp, window); \ + iowrite ## size(value, vp->ioaddr + addr); \ + spin_unlock_irqrestore(&vp->window_lock, flags); \ +} +DEFINE_WINDOW_IO(8) +DEFINE_WINDOW_IO(16) +DEFINE_WINDOW_IO(32) + +#ifdef CONFIG_PCI +#define DEVICE_PCI(dev) (((dev)->bus == &pci_bus_type) ? to_pci_dev((dev)) : NULL) +#else +#define DEVICE_PCI(dev) NULL +#endif + +#define VORTEX_PCI(vp) \ + ((struct pci_dev *) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL)) + +#ifdef CONFIG_EISA +#define DEVICE_EISA(dev) (((dev)->bus == &eisa_bus_type) ? to_eisa_device((dev)) : NULL) +#else +#define DEVICE_EISA(dev) NULL +#endif + +#define VORTEX_EISA(vp) \ + ((struct eisa_device *) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL)) + +/* The action to take with a media selection timer tick. + Note that we deviate from the 3Com order by checking 10base2 before AUI. + */ +enum xcvr_types { + XCVR_10baseT=0, XCVR_AUI, XCVR_10baseTOnly, XCVR_10base2, XCVR_100baseTx, + XCVR_100baseFx, XCVR_MII=6, XCVR_NWAY=8, XCVR_ExtMII=9, XCVR_Default=10, +}; + +static const struct media_table { + char *name; + unsigned int media_bits:16, /* Bits to set in Wn4_Media register. */ + mask:8, /* The transceiver-present bit in Wn3_Config.*/ + next:8; /* The media type to try next. */ + int wait; /* Time before we check media status. */ +} media_tbl[] = { + { "10baseT", Media_10TP,0x08, XCVR_10base2, (14*HZ)/10}, + { "10Mbs AUI", Media_SQE, 0x20, XCVR_Default, (1*HZ)/10}, + { "undefined", 0, 0x80, XCVR_10baseT, 10000}, + { "10base2", 0, 0x10, XCVR_AUI, (1*HZ)/10}, + { "100baseTX", Media_Lnk, 0x02, XCVR_100baseFx, (14*HZ)/10}, + { "100baseFX", Media_Lnk, 0x04, XCVR_MII, (14*HZ)/10}, + { "MII", 0, 0x41, XCVR_10baseT, 3*HZ }, + { "undefined", 0, 0x01, XCVR_10baseT, 10000}, + { "Autonegotiate", 0, 0x41, XCVR_10baseT, 3*HZ}, + { "MII-External", 0, 0x41, XCVR_10baseT, 3*HZ }, + { "Default", 0, 0xFF, XCVR_10baseT, 10000}, +}; + +static struct { + const char str[ETH_GSTRING_LEN]; +} ethtool_stats_keys[] = { + { "tx_deferred" }, + { "tx_max_collisions" }, + { "tx_multiple_collisions" }, + { "tx_single_collisions" }, + { "rx_bad_ssd" }, +}; + +/* number of ETHTOOL_GSTATS u64's */ +#define VORTEX_NUM_STATS 5 + +static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, + int chip_idx, int card_idx); +static int vortex_up(struct net_device *dev); +static void vortex_down(struct net_device *dev, int final); +static int vortex_open(struct net_device *dev); +static void mdio_sync(struct vortex_private *vp, int bits); +static int mdio_read(struct net_device *dev, int phy_id, int location); +static void mdio_write(struct net_device *vp, int phy_id, int location, int value); +static void vortex_timer(unsigned long arg); +static void rx_oom_timer(unsigned long arg); +static netdev_tx_t vortex_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static int vortex_rx(struct net_device *dev); +static int boomerang_rx(struct net_device *dev); +static irqreturn_t vortex_interrupt(int irq, void *dev_id); +static irqreturn_t boomerang_interrupt(int irq, void *dev_id); +static int vortex_close(struct net_device *dev); +static void dump_tx_ring(struct net_device *dev); +static void update_stats(void __iomem *ioaddr, struct net_device *dev); +static struct net_device_stats *vortex_get_stats(struct net_device *dev); +static void set_rx_mode(struct net_device *dev); +#ifdef CONFIG_PCI +static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +#endif +static void vortex_tx_timeout(struct net_device *dev); +static void acpi_set_WOL(struct net_device *dev); +static const struct ethtool_ops vortex_ethtool_ops; +static void set_8021q_mode(struct net_device *dev, int enable); + +/* This driver uses 'options' to pass the media type, full-duplex flag, etc. */ +/* Option count limit only -- unlimited interfaces are supported. */ +#define MAX_UNITS 8 +static int options[MAX_UNITS] = { [0 ... MAX_UNITS-1] = -1 }; +static int full_duplex[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; +static int hw_checksums[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; +static int flow_ctrl[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; +static int enable_wol[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; +static int use_mmio[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; +static int global_options = -1; +static int global_full_duplex = -1; +static int global_enable_wol = -1; +static int global_use_mmio = -1; + +/* Variables to work-around the Compaq PCI BIOS32 problem. */ +static int compaq_ioaddr, compaq_irq, compaq_device_id = 0x5900; +static struct net_device *compaq_net_device; + +static int vortex_cards_found; + +module_param(debug, int, 0); +module_param(global_options, int, 0); +module_param_array(options, int, NULL, 0); +module_param(global_full_duplex, int, 0); +module_param_array(full_duplex, int, NULL, 0); +module_param_array(hw_checksums, int, NULL, 0); +module_param_array(flow_ctrl, int, NULL, 0); +module_param(global_enable_wol, int, 0); +module_param_array(enable_wol, int, NULL, 0); +module_param(rx_copybreak, int, 0); +module_param(max_interrupt_work, int, 0); +module_param(compaq_ioaddr, int, 0); +module_param(compaq_irq, int, 0); +module_param(compaq_device_id, int, 0); +module_param(watchdog, int, 0); +module_param(global_use_mmio, int, 0); +module_param_array(use_mmio, int, NULL, 0); +MODULE_PARM_DESC(debug, "3c59x debug level (0-6)"); +MODULE_PARM_DESC(options, "3c59x: Bits 0-3: media type, bit 4: bus mastering, bit 9: full duplex"); +MODULE_PARM_DESC(global_options, "3c59x: same as options, but applies to all NICs if options is unset"); +MODULE_PARM_DESC(full_duplex, "3c59x full duplex setting(s) (1)"); +MODULE_PARM_DESC(global_full_duplex, "3c59x: same as full_duplex, but applies to all NICs if full_duplex is unset"); +MODULE_PARM_DESC(hw_checksums, "3c59x Hardware checksum checking by adapter(s) (0-1)"); +MODULE_PARM_DESC(flow_ctrl, "3c59x 802.3x flow control usage (PAUSE only) (0-1)"); +MODULE_PARM_DESC(enable_wol, "3c59x: Turn on Wake-on-LAN for adapter(s) (0-1)"); +MODULE_PARM_DESC(global_enable_wol, "3c59x: same as enable_wol, but applies to all NICs if enable_wol is unset"); +MODULE_PARM_DESC(rx_copybreak, "3c59x copy breakpoint for copy-only-tiny-frames"); +MODULE_PARM_DESC(max_interrupt_work, "3c59x maximum events handled per interrupt"); +MODULE_PARM_DESC(compaq_ioaddr, "3c59x PCI I/O base address (Compaq BIOS problem workaround)"); +MODULE_PARM_DESC(compaq_irq, "3c59x PCI IRQ number (Compaq BIOS problem workaround)"); +MODULE_PARM_DESC(compaq_device_id, "3c59x PCI device ID (Compaq BIOS problem workaround)"); +MODULE_PARM_DESC(watchdog, "3c59x transmit timeout in milliseconds"); +MODULE_PARM_DESC(global_use_mmio, "3c59x: same as use_mmio, but applies to all NICs if options is unset"); +MODULE_PARM_DESC(use_mmio, "3c59x: use memory-mapped PCI I/O resource (0-1)"); + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void poll_vortex(struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + unsigned long flags; + local_irq_save(flags); + (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev); + local_irq_restore(flags); +} +#endif + +#ifdef CONFIG_PM + +static int vortex_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *ndev = pci_get_drvdata(pdev); + + if (!ndev || !netif_running(ndev)) + return 0; + + netif_device_detach(ndev); + vortex_down(ndev, 1); + + return 0; +} + +static int vortex_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *ndev = pci_get_drvdata(pdev); + int err; + + if (!ndev || !netif_running(ndev)) + return 0; + + err = vortex_up(ndev); + if (err) + return err; + + netif_device_attach(ndev); + + return 0; +} + +static const struct dev_pm_ops vortex_pm_ops = { + .suspend = vortex_suspend, + .resume = vortex_resume, + .freeze = vortex_suspend, + .thaw = vortex_resume, + .poweroff = vortex_suspend, + .restore = vortex_resume, +}; + +#define VORTEX_PM_OPS (&vortex_pm_ops) + +#else /* !CONFIG_PM */ + +#define VORTEX_PM_OPS NULL + +#endif /* !CONFIG_PM */ + +#ifdef CONFIG_EISA +static struct eisa_device_id vortex_eisa_ids[] = { + { "TCM5920", CH_3C592 }, + { "TCM5970", CH_3C597 }, + { "" } +}; +MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids); + +static int __init vortex_eisa_probe(struct device *device) +{ + void __iomem *ioaddr; + struct eisa_device *edev; + + edev = to_eisa_device(device); + + if (!request_region(edev->base_addr, VORTEX_TOTAL_SIZE, DRV_NAME)) + return -EBUSY; + + ioaddr = ioport_map(edev->base_addr, VORTEX_TOTAL_SIZE); + + if (vortex_probe1(device, ioaddr, ioread16(ioaddr + 0xC88) >> 12, + edev->id.driver_data, vortex_cards_found)) { + release_region(edev->base_addr, VORTEX_TOTAL_SIZE); + return -ENODEV; + } + + vortex_cards_found++; + + return 0; +} + +static int __devexit vortex_eisa_remove(struct device *device) +{ + struct eisa_device *edev; + struct net_device *dev; + struct vortex_private *vp; + void __iomem *ioaddr; + + edev = to_eisa_device(device); + dev = eisa_get_drvdata(edev); + + if (!dev) { + pr_err("vortex_eisa_remove called for Compaq device!\n"); + BUG(); + } + + vp = netdev_priv(dev); + ioaddr = vp->ioaddr; + + unregister_netdev(dev); + iowrite16(TotalReset|0x14, ioaddr + EL3_CMD); + release_region(dev->base_addr, VORTEX_TOTAL_SIZE); + + free_netdev(dev); + return 0; +} + +static struct eisa_driver vortex_eisa_driver = { + .id_table = vortex_eisa_ids, + .driver = { + .name = "3c59x", + .probe = vortex_eisa_probe, + .remove = __devexit_p(vortex_eisa_remove) + } +}; + +#endif /* CONFIG_EISA */ + +/* returns count found (>= 0), or negative on error */ +static int __init vortex_eisa_init(void) +{ + int eisa_found = 0; + int orig_cards_found = vortex_cards_found; + +#ifdef CONFIG_EISA + int err; + + err = eisa_driver_register (&vortex_eisa_driver); + if (!err) { + /* + * Because of the way EISA bus is probed, we cannot assume + * any device have been found when we exit from + * eisa_driver_register (the bus root driver may not be + * initialized yet). So we blindly assume something was + * found, and let the sysfs magic happened... + */ + eisa_found = 1; + } +#endif + + /* Special code to work-around the Compaq PCI BIOS32 problem. */ + if (compaq_ioaddr) { + vortex_probe1(NULL, ioport_map(compaq_ioaddr, VORTEX_TOTAL_SIZE), + compaq_irq, compaq_device_id, vortex_cards_found++); + } + + return vortex_cards_found - orig_cards_found + eisa_found; +} + +/* returns count (>= 0), or negative on error */ +static int __devinit vortex_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int rc, unit, pci_bar; + struct vortex_chip_info *vci; + void __iomem *ioaddr; + + /* wake up and enable device */ + rc = pci_enable_device(pdev); + if (rc < 0) + goto out; + + unit = vortex_cards_found; + + if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) { + /* Determine the default if the user didn't override us */ + vci = &vortex_info_tbl[ent->driver_data]; + pci_bar = vci->drv_flags & (IS_CYCLONE | IS_TORNADO) ? 1 : 0; + } else if (unit < MAX_UNITS && use_mmio[unit] >= 0) + pci_bar = use_mmio[unit] ? 1 : 0; + else + pci_bar = global_use_mmio ? 1 : 0; + + ioaddr = pci_iomap(pdev, pci_bar, 0); + if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */ + ioaddr = pci_iomap(pdev, 0, 0); + if (!ioaddr) { + pci_disable_device(pdev); + rc = -ENOMEM; + goto out; + } + + rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq, + ent->driver_data, unit); + if (rc < 0) { + pci_iounmap(pdev, ioaddr); + pci_disable_device(pdev); + goto out; + } + + vortex_cards_found++; + +out: + return rc; +} + +static const struct net_device_ops boomrang_netdev_ops = { + .ndo_open = vortex_open, + .ndo_stop = vortex_close, + .ndo_start_xmit = boomerang_start_xmit, + .ndo_tx_timeout = vortex_tx_timeout, + .ndo_get_stats = vortex_get_stats, +#ifdef CONFIG_PCI + .ndo_do_ioctl = vortex_ioctl, +#endif + .ndo_set_multicast_list = set_rx_mode, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = poll_vortex, +#endif +}; + +static const struct net_device_ops vortex_netdev_ops = { + .ndo_open = vortex_open, + .ndo_stop = vortex_close, + .ndo_start_xmit = vortex_start_xmit, + .ndo_tx_timeout = vortex_tx_timeout, + .ndo_get_stats = vortex_get_stats, +#ifdef CONFIG_PCI + .ndo_do_ioctl = vortex_ioctl, +#endif + .ndo_set_multicast_list = set_rx_mode, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = poll_vortex, +#endif +}; + +/* + * Start up the PCI/EISA device which is described by *gendev. + * Return 0 on success. + * + * NOTE: pdev can be NULL, for the case of a Compaq device + */ +static int __devinit vortex_probe1(struct device *gendev, + void __iomem *ioaddr, int irq, + int chip_idx, int card_idx) +{ + struct vortex_private *vp; + int option; + unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */ + int i, step; + struct net_device *dev; + static int printed_version; + int retval, print_info; + struct vortex_chip_info * const vci = &vortex_info_tbl[chip_idx]; + const char *print_name = "3c59x"; + struct pci_dev *pdev = NULL; + struct eisa_device *edev = NULL; + + if (!printed_version) { + pr_info("%s", version); + printed_version = 1; + } + + if (gendev) { + if ((pdev = DEVICE_PCI(gendev))) { + print_name = pci_name(pdev); + } + + if ((edev = DEVICE_EISA(gendev))) { + print_name = dev_name(&edev->dev); + } + } + + dev = alloc_etherdev(sizeof(*vp)); + retval = -ENOMEM; + if (!dev) { + pr_err(PFX "unable to allocate etherdev, aborting\n"); + goto out; + } + SET_NETDEV_DEV(dev, gendev); + vp = netdev_priv(dev); + + option = global_options; + + /* The lower four bits are the media type. */ + if (dev->mem_start) { + /* + * The 'options' param is passed in as the third arg to the + * LILO 'ether=' argument for non-modular use + */ + option = dev->mem_start; + } + else if (card_idx < MAX_UNITS) { + if (options[card_idx] >= 0) + option = options[card_idx]; + } + + if (option > 0) { + if (option & 0x8000) + vortex_debug = 7; + if (option & 0x4000) + vortex_debug = 2; + if (option & 0x0400) + vp->enable_wol = 1; + } + + print_info = (vortex_debug > 1); + if (print_info) + pr_info("See Documentation/networking/vortex.txt\n"); + + pr_info("%s: 3Com %s %s at %p.\n", + print_name, + pdev ? "PCI" : "EISA", + vci->name, + ioaddr); + + dev->base_addr = (unsigned long)ioaddr; + dev->irq = irq; + dev->mtu = mtu; + vp->ioaddr = ioaddr; + vp->large_frames = mtu > 1500; + vp->drv_flags = vci->drv_flags; + vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0; + vp->io_size = vci->io_size; + vp->card_idx = card_idx; + vp->window = -1; + + /* module list only for Compaq device */ + if (gendev == NULL) { + compaq_net_device = dev; + } + + /* PCI-only startup logic */ + if (pdev) { + /* EISA resources already marked, so only PCI needs to do this here */ + /* Ignore return value, because Cardbus drivers already allocate for us */ + if (request_region(dev->base_addr, vci->io_size, print_name) != NULL) + vp->must_free_region = 1; + + /* enable bus-mastering if necessary */ + if (vci->flags & PCI_USES_MASTER) + pci_set_master(pdev); + + if (vci->drv_flags & IS_VORTEX) { + u8 pci_latency; + u8 new_latency = 248; + + /* Check the PCI latency value. On the 3c590 series the latency timer + must be set to the maximum value to avoid data corruption that occurs + when the timer expires during a transfer. This bug exists the Vortex + chip only. */ + pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency); + if (pci_latency < new_latency) { + pr_info("%s: Overriding PCI latency timer (CFLT) setting of %d, new value is %d.\n", + print_name, pci_latency, new_latency); + pci_write_config_byte(pdev, PCI_LATENCY_TIMER, new_latency); + } + } + } + + spin_lock_init(&vp->lock); + spin_lock_init(&vp->mii_lock); + spin_lock_init(&vp->window_lock); + vp->gendev = gendev; + vp->mii.dev = dev; + vp->mii.mdio_read = mdio_read; + vp->mii.mdio_write = mdio_write; + vp->mii.phy_id_mask = 0x1f; + vp->mii.reg_num_mask = 0x1f; + + /* Makes sure rings are at least 16 byte aligned. */ + vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE + + sizeof(struct boom_tx_desc) * TX_RING_SIZE, + &vp->rx_ring_dma); + retval = -ENOMEM; + if (!vp->rx_ring) + goto free_region; + + vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE); + vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE; + + /* if we are a PCI driver, we store info in pdev->driver_data + * instead of a module list */ + if (pdev) + pci_set_drvdata(pdev, dev); + if (edev) + eisa_set_drvdata(edev, dev); + + vp->media_override = 7; + if (option >= 0) { + vp->media_override = ((option & 7) == 2) ? 0 : option & 15; + if (vp->media_override != 7) + vp->medialock = 1; + vp->full_duplex = (option & 0x200) ? 1 : 0; + vp->bus_master = (option & 16) ? 1 : 0; + } + + if (global_full_duplex > 0) + vp->full_duplex = 1; + if (global_enable_wol > 0) + vp->enable_wol = 1; + + if (card_idx < MAX_UNITS) { + if (full_duplex[card_idx] > 0) + vp->full_duplex = 1; + if (flow_ctrl[card_idx] > 0) + vp->flow_ctrl = 1; + if (enable_wol[card_idx] > 0) + vp->enable_wol = 1; + } + + vp->mii.force_media = vp->full_duplex; + vp->options = option; + /* Read the station address from the EEPROM. */ + { + int base; + + if (vci->drv_flags & EEPROM_8BIT) + base = 0x230; + else if (vci->drv_flags & EEPROM_OFFSET) + base = EEPROM_Read + 0x30; + else + base = EEPROM_Read; + + for (i = 0; i < 0x40; i++) { + int timer; + window_write16(vp, base + i, 0, Wn0EepromCmd); + /* Pause for at least 162 us. for the read to take place. */ + for (timer = 10; timer >= 0; timer--) { + udelay(162); + if ((window_read16(vp, 0, Wn0EepromCmd) & + 0x8000) == 0) + break; + } + eeprom[i] = window_read16(vp, 0, Wn0EepromData); + } + } + for (i = 0; i < 0x18; i++) + checksum ^= eeprom[i]; + checksum = (checksum ^ (checksum >> 8)) & 0xff; + if (checksum != 0x00) { /* Grrr, needless incompatible change 3Com. */ + while (i < 0x21) + checksum ^= eeprom[i++]; + checksum = (checksum ^ (checksum >> 8)) & 0xff; + } + if ((checksum != 0x00) && !(vci->drv_flags & IS_TORNADO)) + pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum); + for (i = 0; i < 3; i++) + ((__be16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]); + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); + if (print_info) + pr_cont(" %pM", dev->dev_addr); + /* Unfortunately an all zero eeprom passes the checksum and this + gets found in the wild in failure cases. Crypto is hard 8) */ + if (!is_valid_ether_addr(dev->dev_addr)) { + retval = -EINVAL; + pr_err("*** EEPROM MAC address is invalid.\n"); + goto free_ring; /* With every pack */ + } + for (i = 0; i < 6; i++) + window_write8(vp, dev->dev_addr[i], 2, i); + + if (print_info) + pr_cont(", IRQ %d\n", dev->irq); + /* Tell them about an invalid IRQ. */ + if (dev->irq <= 0 || dev->irq >= nr_irqs) + pr_warning(" *** Warning: IRQ %d is unlikely to work! ***\n", + dev->irq); + + step = (window_read8(vp, 4, Wn4_NetDiag) & 0x1e) >> 1; + if (print_info) { + pr_info(" product code %02x%02x rev %02x.%d date %02d-%02d-%02d\n", + eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14], + step, (eeprom[4]>>5) & 15, eeprom[4] & 31, eeprom[4]>>9); + } + + + if (pdev && vci->drv_flags & HAS_CB_FNS) { + unsigned short n; + + vp->cb_fn_base = pci_iomap(pdev, 2, 0); + if (!vp->cb_fn_base) { + retval = -ENOMEM; + goto free_ring; + } + + if (print_info) { + pr_info("%s: CardBus functions mapped %16.16llx->%p\n", + print_name, + (unsigned long long)pci_resource_start(pdev, 2), + vp->cb_fn_base); + } + + n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010; + if (vp->drv_flags & INVERT_LED_PWR) + n |= 0x10; + if (vp->drv_flags & INVERT_MII_PWR) + n |= 0x4000; + window_write16(vp, n, 2, Wn2_ResetOptions); + if (vp->drv_flags & WNO_XCVR_PWR) { + window_write16(vp, 0x0800, 0, 0); + } + } + + /* Extract our information from the EEPROM data. */ + vp->info1 = eeprom[13]; + vp->info2 = eeprom[15]; + vp->capabilities = eeprom[16]; + + if (vp->info1 & 0x8000) { + vp->full_duplex = 1; + if (print_info) + pr_info("Full duplex capable\n"); + } + + { + static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; + unsigned int config; + vp->available_media = window_read16(vp, 3, Wn3_Options); + if ((vp->available_media & 0xff) == 0) /* Broken 3c916 */ + vp->available_media = 0x40; + config = window_read32(vp, 3, Wn3_Config); + if (print_info) { + pr_debug(" Internal config register is %4.4x, transceivers %#x.\n", + config, window_read16(vp, 3, Wn3_Options)); + pr_info(" %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n", + 8 << RAM_SIZE(config), + RAM_WIDTH(config) ? "word" : "byte", + ram_split[RAM_SPLIT(config)], + AUTOSELECT(config) ? "autoselect/" : "", + XCVR(config) > XCVR_ExtMII ? "" : + media_tbl[XCVR(config)].name); + } + vp->default_media = XCVR(config); + if (vp->default_media == XCVR_NWAY) + vp->has_nway = 1; + vp->autoselect = AUTOSELECT(config); + } + + if (vp->media_override != 7) { + pr_info("%s: Media override to transceiver type %d (%s).\n", + print_name, vp->media_override, + media_tbl[vp->media_override].name); + dev->if_port = vp->media_override; + } else + dev->if_port = vp->default_media; + + if ((vp->available_media & 0x40) || (vci->drv_flags & HAS_NWAY) || + dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) { + int phy, phy_idx = 0; + mii_preamble_required++; + if (vp->drv_flags & EXTRA_PREAMBLE) + mii_preamble_required++; + mdio_sync(vp, 32); + mdio_read(dev, 24, MII_BMSR); + for (phy = 0; phy < 32 && phy_idx < 1; phy++) { + int mii_status, phyx; + + /* + * For the 3c905CX we look at index 24 first, because it bogusly + * reports an external PHY at all indices + */ + if (phy == 0) + phyx = 24; + else if (phy <= 24) + phyx = phy - 1; + else + phyx = phy; + mii_status = mdio_read(dev, phyx, MII_BMSR); + if (mii_status && mii_status != 0xffff) { + vp->phys[phy_idx++] = phyx; + if (print_info) { + pr_info(" MII transceiver found at address %d, status %4x.\n", + phyx, mii_status); + } + if ((mii_status & 0x0040) == 0) + mii_preamble_required++; + } + } + mii_preamble_required--; + if (phy_idx == 0) { + pr_warning(" ***WARNING*** No MII transceivers found!\n"); + vp->phys[0] = 24; + } else { + vp->advertising = mdio_read(dev, vp->phys[0], MII_ADVERTISE); + if (vp->full_duplex) { + /* Only advertise the FD media types. */ + vp->advertising &= ~0x02A0; + mdio_write(dev, vp->phys[0], 4, vp->advertising); + } + } + vp->mii.phy_id = vp->phys[0]; + } + + if (vp->capabilities & CapBusMaster) { + vp->full_bus_master_tx = 1; + if (print_info) { + pr_info(" Enabling bus-master transmits and %s receives.\n", + (vp->info2 & 1) ? "early" : "whole-frame" ); + } + vp->full_bus_master_rx = (vp->info2 & 1) ? 1 : 2; + vp->bus_master = 0; /* AKPM: vortex only */ + } + + /* The 3c59x-specific entries in the device structure. */ + if (vp->full_bus_master_tx) { + dev->netdev_ops = &boomrang_netdev_ops; + /* Actually, it still should work with iommu. */ + if (card_idx < MAX_UNITS && + ((hw_checksums[card_idx] == -1 && (vp->drv_flags & HAS_HWCKSM)) || + hw_checksums[card_idx] == 1)) { + dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; + } + } else + dev->netdev_ops = &vortex_netdev_ops; + + if (print_info) { + pr_info("%s: scatter/gather %sabled. h/w checksums %sabled\n", + print_name, + (dev->features & NETIF_F_SG) ? "en":"dis", + (dev->features & NETIF_F_IP_CSUM) ? "en":"dis"); + } + + dev->ethtool_ops = &vortex_ethtool_ops; + dev->watchdog_timeo = (watchdog * HZ) / 1000; + + if (pdev) { + vp->pm_state_valid = 1; + pci_save_state(VORTEX_PCI(vp)); + acpi_set_WOL(dev); + } + retval = register_netdev(dev); + if (retval == 0) + return 0; + +free_ring: + pci_free_consistent(pdev, + sizeof(struct boom_rx_desc) * RX_RING_SIZE + + sizeof(struct boom_tx_desc) * TX_RING_SIZE, + vp->rx_ring, + vp->rx_ring_dma); +free_region: + if (vp->must_free_region) + release_region(dev->base_addr, vci->io_size); + free_netdev(dev); + pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval); +out: + return retval; +} + +static void +issue_and_wait(struct net_device *dev, int cmd) +{ + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr = vp->ioaddr; + int i; + + iowrite16(cmd, ioaddr + EL3_CMD); + for (i = 0; i < 2000; i++) { + if (!(ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) + return; + } + + /* OK, that didn't work. Do it the slow way. One second */ + for (i = 0; i < 100000; i++) { + if (!(ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) { + if (vortex_debug > 1) + pr_info("%s: command 0x%04x took %d usecs\n", + dev->name, cmd, i * 10); + return; + } + udelay(10); + } + pr_err("%s: command 0x%04x did not complete! Status=0x%x\n", + dev->name, cmd, ioread16(ioaddr + EL3_STATUS)); +} + +static void +vortex_set_duplex(struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + + pr_info("%s: setting %s-duplex.\n", + dev->name, (vp->full_duplex) ? "full" : "half"); + + /* Set the full-duplex bit. */ + window_write16(vp, + ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) | + (vp->large_frames ? 0x40 : 0) | + ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? + 0x100 : 0), + 3, Wn3_MAC_Ctrl); +} + +static void vortex_check_media(struct net_device *dev, unsigned int init) +{ + struct vortex_private *vp = netdev_priv(dev); + unsigned int ok_to_print = 0; + + if (vortex_debug > 3) + ok_to_print = 1; + + if (mii_check_media(&vp->mii, ok_to_print, init)) { + vp->full_duplex = vp->mii.full_duplex; + vortex_set_duplex(dev); + } else if (init) { + vortex_set_duplex(dev); + } +} + +static int +vortex_up(struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr = vp->ioaddr; + unsigned int config; + int i, mii_reg1, mii_reg5, err = 0; + + if (VORTEX_PCI(vp)) { + pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */ + if (vp->pm_state_valid) + pci_restore_state(VORTEX_PCI(vp)); + err = pci_enable_device(VORTEX_PCI(vp)); + if (err) { + pr_warning("%s: Could not enable device\n", + dev->name); + goto err_out; + } + } + + /* Before initializing select the active media port. */ + config = window_read32(vp, 3, Wn3_Config); + + if (vp->media_override != 7) { + pr_info("%s: Media override to transceiver %d (%s).\n", + dev->name, vp->media_override, + media_tbl[vp->media_override].name); + dev->if_port = vp->media_override; + } else if (vp->autoselect) { + if (vp->has_nway) { + if (vortex_debug > 1) + pr_info("%s: using NWAY device table, not %d\n", + dev->name, dev->if_port); + dev->if_port = XCVR_NWAY; + } else { + /* Find first available media type, starting with 100baseTx. */ + dev->if_port = XCVR_100baseTx; + while (! (vp->available_media & media_tbl[dev->if_port].mask)) + dev->if_port = media_tbl[dev->if_port].next; + if (vortex_debug > 1) + pr_info("%s: first available media type: %s\n", + dev->name, media_tbl[dev->if_port].name); + } + } else { + dev->if_port = vp->default_media; + if (vortex_debug > 1) + pr_info("%s: using default media %s\n", + dev->name, media_tbl[dev->if_port].name); + } + + init_timer(&vp->timer); + vp->timer.expires = RUN_AT(media_tbl[dev->if_port].wait); + vp->timer.data = (unsigned long)dev; + vp->timer.function = vortex_timer; /* timer handler */ + add_timer(&vp->timer); + + init_timer(&vp->rx_oom_timer); + vp->rx_oom_timer.data = (unsigned long)dev; + vp->rx_oom_timer.function = rx_oom_timer; + + if (vortex_debug > 1) + pr_debug("%s: Initial media type %s.\n", + dev->name, media_tbl[dev->if_port].name); + + vp->full_duplex = vp->mii.force_media; + config = BFINS(config, dev->if_port, 20, 4); + if (vortex_debug > 6) + pr_debug("vortex_up(): writing 0x%x to InternalConfig\n", config); + window_write32(vp, config, 3, Wn3_Config); + + if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) { + mii_reg1 = mdio_read(dev, vp->phys[0], MII_BMSR); + mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA); + vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0); + vp->mii.full_duplex = vp->full_duplex; + + vortex_check_media(dev, 1); + } + else + vortex_set_duplex(dev); + + issue_and_wait(dev, TxReset); + /* + * Don't reset the PHY - that upsets autonegotiation during DHCP operations. + */ + issue_and_wait(dev, RxReset|0x04); + + + iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD); + + if (vortex_debug > 1) { + pr_debug("%s: vortex_up() irq %d media status %4.4x.\n", + dev->name, dev->irq, window_read16(vp, 4, Wn4_Media)); + } + + /* Set the station address and mask in window 2 each time opened. */ + for (i = 0; i < 6; i++) + window_write8(vp, dev->dev_addr[i], 2, i); + for (; i < 12; i+=2) + window_write16(vp, 0, 2, i); + + if (vp->cb_fn_base) { + unsigned short n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010; + if (vp->drv_flags & INVERT_LED_PWR) + n |= 0x10; + if (vp->drv_flags & INVERT_MII_PWR) + n |= 0x4000; + window_write16(vp, n, 2, Wn2_ResetOptions); + } + + if (dev->if_port == XCVR_10base2) + /* Start the thinnet transceiver. We should really wait 50ms...*/ + iowrite16(StartCoax, ioaddr + EL3_CMD); + if (dev->if_port != XCVR_NWAY) { + window_write16(vp, + (window_read16(vp, 4, Wn4_Media) & + ~(Media_10TP|Media_SQE)) | + media_tbl[dev->if_port].media_bits, + 4, Wn4_Media); + } + + /* Switch to the stats window, and clear all stats by reading. */ + iowrite16(StatsDisable, ioaddr + EL3_CMD); + for (i = 0; i < 10; i++) + window_read8(vp, 6, i); + window_read16(vp, 6, 10); + window_read16(vp, 6, 12); + /* New: On the Vortex we must also clear the BadSSD counter. */ + window_read8(vp, 4, 12); + /* ..and on the Boomerang we enable the extra statistics bits. */ + window_write16(vp, 0x0040, 4, Wn4_NetDiag); + + if (vp->full_bus_master_rx) { /* Boomerang bus master. */ + vp->cur_rx = vp->dirty_rx = 0; + /* Initialize the RxEarly register as recommended. */ + iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD); + iowrite32(0x0020, ioaddr + PktStatus); + iowrite32(vp->rx_ring_dma, ioaddr + UpListPtr); + } + if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */ + vp->cur_tx = vp->dirty_tx = 0; + if (vp->drv_flags & IS_BOOMERANG) + iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); /* Room for a packet. */ + /* Clear the Rx, Tx rings. */ + for (i = 0; i < RX_RING_SIZE; i++) /* AKPM: this is done in vortex_open, too */ + vp->rx_ring[i].status = 0; + for (i = 0; i < TX_RING_SIZE; i++) + vp->tx_skbuff[i] = NULL; + iowrite32(0, ioaddr + DownListPtr); + } + /* Set receiver mode: presumably accept b-case and phys addr only. */ + set_rx_mode(dev); + /* enable 802.1q tagged frames */ + set_8021q_mode(dev, 1); + iowrite16(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ + + iowrite16(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ + iowrite16(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ + /* Allow status bits to be seen. */ + vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete| + (vp->full_bus_master_tx ? DownComplete : TxAvailable) | + (vp->full_bus_master_rx ? UpComplete : RxComplete) | + (vp->bus_master ? DMADone : 0); + vp->intr_enable = SetIntrEnb | IntLatch | TxAvailable | + (vp->full_bus_master_rx ? 0 : RxComplete) | + StatsFull | HostError | TxComplete | IntReq + | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete; + iowrite16(vp->status_enable, ioaddr + EL3_CMD); + /* Ack all pending events, and set active indicator mask. */ + iowrite16(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, + ioaddr + EL3_CMD); + iowrite16(vp->intr_enable, ioaddr + EL3_CMD); + if (vp->cb_fn_base) /* The PCMCIA people are idiots. */ + iowrite32(0x8000, vp->cb_fn_base + 4); + netif_start_queue (dev); +err_out: + return err; +} + +static int +vortex_open(struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + int i; + int retval; + + /* Use the now-standard shared IRQ implementation. */ + if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ? + boomerang_interrupt : vortex_interrupt, IRQF_SHARED, dev->name, dev))) { + pr_err("%s: Could not reserve IRQ %d\n", dev->name, dev->irq); + goto err; + } + + if (vp->full_bus_master_rx) { /* Boomerang bus master. */ + if (vortex_debug > 2) + pr_debug("%s: Filling in the Rx ring.\n", dev->name); + for (i = 0; i < RX_RING_SIZE; i++) { + struct sk_buff *skb; + vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1)); + vp->rx_ring[i].status = 0; /* Clear complete bit. */ + vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG); + + skb = __netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN, + GFP_KERNEL); + vp->rx_skbuff[i] = skb; + if (skb == NULL) + break; /* Bad news! */ + + skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */ + vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); + } + if (i != RX_RING_SIZE) { + int j; + pr_emerg("%s: no memory for rx ring\n", dev->name); + for (j = 0; j < i; j++) { + if (vp->rx_skbuff[j]) { + dev_kfree_skb(vp->rx_skbuff[j]); + vp->rx_skbuff[j] = NULL; + } + } + retval = -ENOMEM; + goto err_free_irq; + } + /* Wrap the ring. */ + vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma); + } + + retval = vortex_up(dev); + if (!retval) + goto out; + +err_free_irq: + free_irq(dev->irq, dev); +err: + if (vortex_debug > 1) + pr_err("%s: vortex_open() fails: returning %d\n", dev->name, retval); +out: + return retval; +} + +static void +vortex_timer(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr = vp->ioaddr; + int next_tick = 60*HZ; + int ok = 0; + int media_status; + + if (vortex_debug > 2) { + pr_debug("%s: Media selection timer tick happened, %s.\n", + dev->name, media_tbl[dev->if_port].name); + pr_debug("dev->watchdog_timeo=%d\n", dev->watchdog_timeo); + } + + media_status = window_read16(vp, 4, Wn4_Media); + switch (dev->if_port) { + case XCVR_10baseT: case XCVR_100baseTx: case XCVR_100baseFx: + if (media_status & Media_LnkBeat) { + netif_carrier_on(dev); + ok = 1; + if (vortex_debug > 1) + pr_debug("%s: Media %s has link beat, %x.\n", + dev->name, media_tbl[dev->if_port].name, media_status); + } else { + netif_carrier_off(dev); + if (vortex_debug > 1) { + pr_debug("%s: Media %s has no link beat, %x.\n", + dev->name, media_tbl[dev->if_port].name, media_status); + } + } + break; + case XCVR_MII: case XCVR_NWAY: + { + ok = 1; + vortex_check_media(dev, 0); + } + break; + default: /* Other media types handled by Tx timeouts. */ + if (vortex_debug > 1) + pr_debug("%s: Media %s has no indication, %x.\n", + dev->name, media_tbl[dev->if_port].name, media_status); + ok = 1; + } + + if (!netif_carrier_ok(dev)) + next_tick = 5*HZ; + + if (vp->medialock) + goto leave_media_alone; + + if (!ok) { + unsigned int config; + + spin_lock_irq(&vp->lock); + + do { + dev->if_port = media_tbl[dev->if_port].next; + } while ( ! (vp->available_media & media_tbl[dev->if_port].mask)); + if (dev->if_port == XCVR_Default) { /* Go back to default. */ + dev->if_port = vp->default_media; + if (vortex_debug > 1) + pr_debug("%s: Media selection failing, using default %s port.\n", + dev->name, media_tbl[dev->if_port].name); + } else { + if (vortex_debug > 1) + pr_debug("%s: Media selection failed, now trying %s port.\n", + dev->name, media_tbl[dev->if_port].name); + next_tick = media_tbl[dev->if_port].wait; + } + window_write16(vp, + (media_status & ~(Media_10TP|Media_SQE)) | + media_tbl[dev->if_port].media_bits, + 4, Wn4_Media); + + config = window_read32(vp, 3, Wn3_Config); + config = BFINS(config, dev->if_port, 20, 4); + window_write32(vp, config, 3, Wn3_Config); + + iowrite16(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax, + ioaddr + EL3_CMD); + if (vortex_debug > 1) + pr_debug("wrote 0x%08x to Wn3_Config\n", config); + /* AKPM: FIXME: Should reset Rx & Tx here. P60 of 3c90xc.pdf */ + + spin_unlock_irq(&vp->lock); + } + +leave_media_alone: + if (vortex_debug > 2) + pr_debug("%s: Media selection timer finished, %s.\n", + dev->name, media_tbl[dev->if_port].name); + + mod_timer(&vp->timer, RUN_AT(next_tick)); + if (vp->deferred) + iowrite16(FakeIntr, ioaddr + EL3_CMD); +} + +static void vortex_tx_timeout(struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr = vp->ioaddr; + + pr_err("%s: transmit timed out, tx_status %2.2x status %4.4x.\n", + dev->name, ioread8(ioaddr + TxStatus), + ioread16(ioaddr + EL3_STATUS)); + pr_err(" diagnostics: net %04x media %04x dma %08x fifo %04x\n", + window_read16(vp, 4, Wn4_NetDiag), + window_read16(vp, 4, Wn4_Media), + ioread32(ioaddr + PktStatus), + window_read16(vp, 4, Wn4_FIFODiag)); + /* Slight code bloat to be user friendly. */ + if ((ioread8(ioaddr + TxStatus) & 0x88) == 0x88) + pr_err("%s: Transmitter encountered 16 collisions --" + " network cable problem?\n", dev->name); + if (ioread16(ioaddr + EL3_STATUS) & IntLatch) { + pr_err("%s: Interrupt posted but not delivered --" + " IRQ blocked by another device?\n", dev->name); + /* Bad idea here.. but we might as well handle a few events. */ + { + /* + * Block interrupts because vortex_interrupt does a bare spin_lock() + */ + unsigned long flags; + local_irq_save(flags); + if (vp->full_bus_master_tx) + boomerang_interrupt(dev->irq, dev); + else + vortex_interrupt(dev->irq, dev); + local_irq_restore(flags); + } + } + + if (vortex_debug > 0) + dump_tx_ring(dev); + + issue_and_wait(dev, TxReset); + + dev->stats.tx_errors++; + if (vp->full_bus_master_tx) { + pr_debug("%s: Resetting the Tx ring pointer.\n", dev->name); + if (vp->cur_tx - vp->dirty_tx > 0 && ioread32(ioaddr + DownListPtr) == 0) + iowrite32(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc), + ioaddr + DownListPtr); + if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE) + netif_wake_queue (dev); + if (vp->drv_flags & IS_BOOMERANG) + iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); + iowrite16(DownUnstall, ioaddr + EL3_CMD); + } else { + dev->stats.tx_dropped++; + netif_wake_queue(dev); + } + + /* Issue Tx Enable */ + iowrite16(TxEnable, ioaddr + EL3_CMD); + dev->trans_start = jiffies; /* prevent tx timeout */ +} + +/* + * Handle uncommon interrupt sources. This is a separate routine to minimize + * the cache impact. + */ +static void +vortex_error(struct net_device *dev, int status) +{ + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr = vp->ioaddr; + int do_tx_reset = 0, reset_mask = 0; + unsigned char tx_status = 0; + + if (vortex_debug > 2) { + pr_err("%s: vortex_error(), status=0x%x\n", dev->name, status); + } + + if (status & TxComplete) { /* Really "TxError" for us. */ + tx_status = ioread8(ioaddr + TxStatus); + /* Presumably a tx-timeout. We must merely re-enable. */ + if (vortex_debug > 2 || + (tx_status != 0x88 && vortex_debug > 0)) { + pr_err("%s: Transmit error, Tx status register %2.2x.\n", + dev->name, tx_status); + if (tx_status == 0x82) { + pr_err("Probably a duplex mismatch. See " + "Documentation/networking/vortex.txt\n"); + } + dump_tx_ring(dev); + } + if (tx_status & 0x14) dev->stats.tx_fifo_errors++; + if (tx_status & 0x38) dev->stats.tx_aborted_errors++; + if (tx_status & 0x08) vp->xstats.tx_max_collisions++; + iowrite8(0, ioaddr + TxStatus); + if (tx_status & 0x30) { /* txJabber or txUnderrun */ + do_tx_reset = 1; + } else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET)) { /* maxCollisions */ + do_tx_reset = 1; + reset_mask = 0x0108; /* Reset interface logic, but not download logic */ + } else { /* Merely re-enable the transmitter. */ + iowrite16(TxEnable, ioaddr + EL3_CMD); + } + } + + if (status & RxEarly) /* Rx early is unused. */ + iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD); + + if (status & StatsFull) { /* Empty statistics. */ + static int DoneDidThat; + if (vortex_debug > 4) + pr_debug("%s: Updating stats.\n", dev->name); + update_stats(ioaddr, dev); + /* HACK: Disable statistics as an interrupt source. */ + /* This occurs when we have the wrong media type! */ + if (DoneDidThat == 0 && + ioread16(ioaddr + EL3_STATUS) & StatsFull) { + pr_warning("%s: Updating statistics failed, disabling " + "stats as an interrupt source.\n", dev->name); + iowrite16(SetIntrEnb | + (window_read16(vp, 5, 10) & ~StatsFull), + ioaddr + EL3_CMD); + vp->intr_enable &= ~StatsFull; + DoneDidThat++; + } + } + if (status & IntReq) { /* Restore all interrupt sources. */ + iowrite16(vp->status_enable, ioaddr + EL3_CMD); + iowrite16(vp->intr_enable, ioaddr + EL3_CMD); + } + if (status & HostError) { + u16 fifo_diag; + fifo_diag = window_read16(vp, 4, Wn4_FIFODiag); + pr_err("%s: Host error, FIFO diagnostic register %4.4x.\n", + dev->name, fifo_diag); + /* Adapter failure requires Tx/Rx reset and reinit. */ + if (vp->full_bus_master_tx) { + int bus_status = ioread32(ioaddr + PktStatus); + /* 0x80000000 PCI master abort. */ + /* 0x40000000 PCI target abort. */ + if (vortex_debug) + pr_err("%s: PCI bus error, bus status %8.8x\n", dev->name, bus_status); + + /* In this case, blow the card away */ + /* Must not enter D3 or we can't legally issue the reset! */ + vortex_down(dev, 0); + issue_and_wait(dev, TotalReset | 0xff); + vortex_up(dev); /* AKPM: bug. vortex_up() assumes that the rx ring is full. It may not be. */ + } else if (fifo_diag & 0x0400) + do_tx_reset = 1; + if (fifo_diag & 0x3000) { + /* Reset Rx fifo and upload logic */ + issue_and_wait(dev, RxReset|0x07); + /* Set the Rx filter to the current state. */ + set_rx_mode(dev); + /* enable 802.1q VLAN tagged frames */ + set_8021q_mode(dev, 1); + iowrite16(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */ + iowrite16(AckIntr | HostError, ioaddr + EL3_CMD); + } + } + + if (do_tx_reset) { + issue_and_wait(dev, TxReset|reset_mask); + iowrite16(TxEnable, ioaddr + EL3_CMD); + if (!vp->full_bus_master_tx) + netif_wake_queue(dev); + } +} + +static netdev_tx_t +vortex_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr = vp->ioaddr; + + /* Put out the doubleword header... */ + iowrite32(skb->len, ioaddr + TX_FIFO); + if (vp->bus_master) { + /* Set the bus-master controller to transfer the packet. */ + int len = (skb->len + 3) & ~3; + vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, + PCI_DMA_TODEVICE); + spin_lock_irq(&vp->window_lock); + window_set(vp, 7); + iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr); + iowrite16(len, ioaddr + Wn7_MasterLen); + spin_unlock_irq(&vp->window_lock); + vp->tx_skb = skb; + iowrite16(StartDMADown, ioaddr + EL3_CMD); + /* netif_wake_queue() will be called at the DMADone interrupt. */ + } else { + /* ... and the packet rounded to a doubleword. */ + iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); + dev_kfree_skb (skb); + if (ioread16(ioaddr + TxFree) > 1536) { + netif_start_queue (dev); /* AKPM: redundant? */ + } else { + /* Interrupt us when the FIFO has room for max-sized packet. */ + netif_stop_queue(dev); + iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD); + } + } + + + /* Clear the Tx status stack. */ + { + int tx_status; + int i = 32; + + while (--i > 0 && (tx_status = ioread8(ioaddr + TxStatus)) > 0) { + if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */ + if (vortex_debug > 2) + pr_debug("%s: Tx error, status %2.2x.\n", + dev->name, tx_status); + if (tx_status & 0x04) dev->stats.tx_fifo_errors++; + if (tx_status & 0x38) dev->stats.tx_aborted_errors++; + if (tx_status & 0x30) { + issue_and_wait(dev, TxReset); + } + iowrite16(TxEnable, ioaddr + EL3_CMD); + } + iowrite8(0x00, ioaddr + TxStatus); /* Pop the status stack. */ + } + } + return NETDEV_TX_OK; +} + +static netdev_tx_t +boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr = vp->ioaddr; + /* Calculate the next Tx descriptor entry. */ + int entry = vp->cur_tx % TX_RING_SIZE; + struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE]; + unsigned long flags; + + if (vortex_debug > 6) { + pr_debug("boomerang_start_xmit()\n"); + pr_debug("%s: Trying to send a packet, Tx index %d.\n", + dev->name, vp->cur_tx); + } + + /* + * We can't allow a recursion from our interrupt handler back into the + * tx routine, as they take the same spin lock, and that causes + * deadlock. Just return NETDEV_TX_BUSY and let the stack try again in + * a bit + */ + if (vp->handling_irq) + return NETDEV_TX_BUSY; + + if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) { + if (vortex_debug > 0) + pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n", + dev->name); + netif_stop_queue(dev); + return NETDEV_TX_BUSY; + } + + vp->tx_skbuff[entry] = skb; + + vp->tx_ring[entry].next = 0; +#if DO_ZEROCOPY + if (skb->ip_summed != CHECKSUM_PARTIAL) + vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); + else + vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); + + if (!skb_shinfo(skb)->nr_frags) { + vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, + skb->len, PCI_DMA_TODEVICE)); + vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG); + } else { + int i; + + vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, + skb_headlen(skb), PCI_DMA_TODEVICE)); + vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb)); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + vp->tx_ring[entry].frag[i+1].addr = + cpu_to_le32(pci_map_single(VORTEX_PCI(vp), + (void*)page_address(frag->page) + frag->page_offset, + frag->size, PCI_DMA_TODEVICE)); + + if (i == skb_shinfo(skb)->nr_frags-1) + vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size|LAST_FRAG); + else + vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size); + } + } +#else + vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE)); + vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); + vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); +#endif + + spin_lock_irqsave(&vp->lock, flags); + /* Wait for the stall to complete. */ + issue_and_wait(dev, DownStall); + prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc)); + if (ioread32(ioaddr + DownListPtr) == 0) { + iowrite32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr); + vp->queued_packet++; + } + + vp->cur_tx++; + if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) { + netif_stop_queue (dev); + } else { /* Clear previous interrupt enable. */ +#if defined(tx_interrupt_mitigation) + /* Dubious. If in boomeang_interrupt "faster" cyclone ifdef + * were selected, this would corrupt DN_COMPLETE. No? + */ + prev_entry->status &= cpu_to_le32(~TxIntrUploaded); +#endif + } + iowrite16(DownUnstall, ioaddr + EL3_CMD); + spin_unlock_irqrestore(&vp->lock, flags); + return NETDEV_TX_OK; +} + +/* The interrupt handler does all of the Rx thread work and cleans up + after the Tx thread. */ + +/* + * This is the ISR for the vortex series chips. + * full_bus_master_tx == 0 && full_bus_master_rx == 0 + */ + +static irqreturn_t +vortex_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr; + int status; + int work_done = max_interrupt_work; + int handled = 0; + + ioaddr = vp->ioaddr; + spin_lock(&vp->lock); + + status = ioread16(ioaddr + EL3_STATUS); + + if (vortex_debug > 6) + pr_debug("vortex_interrupt(). status=0x%4x\n", status); + + if ((status & IntLatch) == 0) + goto handler_exit; /* No interrupt: shared IRQs cause this */ + handled = 1; + + if (status & IntReq) { + status |= vp->deferred; + vp->deferred = 0; + } + + if (status == 0xffff) /* h/w no longer present (hotplug)? */ + goto handler_exit; + + if (vortex_debug > 4) + pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n", + dev->name, status, ioread8(ioaddr + Timer)); + + spin_lock(&vp->window_lock); + window_set(vp, 7); + + do { + if (vortex_debug > 5) + pr_debug("%s: In interrupt loop, status %4.4x.\n", + dev->name, status); + if (status & RxComplete) + vortex_rx(dev); + + if (status & TxAvailable) { + if (vortex_debug > 5) + pr_debug(" TX room bit was handled.\n"); + /* There's room in the FIFO for a full-sized packet. */ + iowrite16(AckIntr | TxAvailable, ioaddr + EL3_CMD); + netif_wake_queue (dev); + } + + if (status & DMADone) { + if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) { + iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */ + pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE); + dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */ + if (ioread16(ioaddr + TxFree) > 1536) { + /* + * AKPM: FIXME: I don't think we need this. If the queue was stopped due to + * insufficient FIFO room, the TxAvailable test will succeed and call + * netif_wake_queue() + */ + netif_wake_queue(dev); + } else { /* Interrupt when FIFO has room for max-sized packet. */ + iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD); + netif_stop_queue(dev); + } + } + } + /* Check for all uncommon interrupts at once. */ + if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) { + if (status == 0xffff) + break; + if (status & RxEarly) + vortex_rx(dev); + spin_unlock(&vp->window_lock); + vortex_error(dev, status); + spin_lock(&vp->window_lock); + window_set(vp, 7); + } + + if (--work_done < 0) { + pr_warning("%s: Too much work in interrupt, status %4.4x.\n", + dev->name, status); + /* Disable all pending interrupts. */ + do { + vp->deferred |= status; + iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable), + ioaddr + EL3_CMD); + iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD); + } while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch); + /* The timer will reenable interrupts. */ + mod_timer(&vp->timer, jiffies + 1*HZ); + break; + } + /* Acknowledge the IRQ. */ + iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); + } while ((status = ioread16(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete)); + + spin_unlock(&vp->window_lock); + + if (vortex_debug > 4) + pr_debug("%s: exiting interrupt, status %4.4x.\n", + dev->name, status); +handler_exit: + spin_unlock(&vp->lock); + return IRQ_RETVAL(handled); +} + +/* + * This is the ISR for the boomerang series chips. + * full_bus_master_tx == 1 && full_bus_master_rx == 1 + */ + +static irqreturn_t +boomerang_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr; + int status; + int work_done = max_interrupt_work; + + ioaddr = vp->ioaddr; + + + /* + * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout + * and boomerang_start_xmit + */ + spin_lock(&vp->lock); + vp->handling_irq = 1; + + status = ioread16(ioaddr + EL3_STATUS); + + if (vortex_debug > 6) + pr_debug("boomerang_interrupt. status=0x%4x\n", status); + + if ((status & IntLatch) == 0) + goto handler_exit; /* No interrupt: shared IRQs can cause this */ + + if (status == 0xffff) { /* h/w no longer present (hotplug)? */ + if (vortex_debug > 1) + pr_debug("boomerang_interrupt(1): status = 0xffff\n"); + goto handler_exit; + } + + if (status & IntReq) { + status |= vp->deferred; + vp->deferred = 0; + } + + if (vortex_debug > 4) + pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n", + dev->name, status, ioread8(ioaddr + Timer)); + do { + if (vortex_debug > 5) + pr_debug("%s: In interrupt loop, status %4.4x.\n", + dev->name, status); + if (status & UpComplete) { + iowrite16(AckIntr | UpComplete, ioaddr + EL3_CMD); + if (vortex_debug > 5) + pr_debug("boomerang_interrupt->boomerang_rx\n"); + boomerang_rx(dev); + } + + if (status & DownComplete) { + unsigned int dirty_tx = vp->dirty_tx; + + iowrite16(AckIntr | DownComplete, ioaddr + EL3_CMD); + while (vp->cur_tx - dirty_tx > 0) { + int entry = dirty_tx % TX_RING_SIZE; +#if 1 /* AKPM: the latter is faster, but cyclone-only */ + if (ioread32(ioaddr + DownListPtr) == + vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc)) + break; /* It still hasn't been processed. */ +#else + if ((vp->tx_ring[entry].status & DN_COMPLETE) == 0) + break; /* It still hasn't been processed. */ +#endif + + if (vp->tx_skbuff[entry]) { + struct sk_buff *skb = vp->tx_skbuff[entry]; +#if DO_ZEROCOPY + int i; + for (i=0; i<=skb_shinfo(skb)->nr_frags; i++) + pci_unmap_single(VORTEX_PCI(vp), + le32_to_cpu(vp->tx_ring[entry].frag[i].addr), + le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF, + PCI_DMA_TODEVICE); +#else + pci_unmap_single(VORTEX_PCI(vp), + le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE); +#endif + dev_kfree_skb_irq(skb); + vp->tx_skbuff[entry] = NULL; + } else { + pr_debug("boomerang_interrupt: no skb!\n"); + } + /* dev->stats.tx_packets++; Counted below. */ + dirty_tx++; + } + vp->dirty_tx = dirty_tx; + if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) { + if (vortex_debug > 6) + pr_debug("boomerang_interrupt: wake queue\n"); + netif_wake_queue (dev); + } + } + + /* Check for all uncommon interrupts at once. */ + if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) + vortex_error(dev, status); + + if (--work_done < 0) { + pr_warning("%s: Too much work in interrupt, status %4.4x.\n", + dev->name, status); + /* Disable all pending interrupts. */ + do { + vp->deferred |= status; + iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable), + ioaddr + EL3_CMD); + iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD); + } while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch); + /* The timer will reenable interrupts. */ + mod_timer(&vp->timer, jiffies + 1*HZ); + break; + } + /* Acknowledge the IRQ. */ + iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); + if (vp->cb_fn_base) /* The PCMCIA people are idiots. */ + iowrite32(0x8000, vp->cb_fn_base + 4); + + } while ((status = ioread16(ioaddr + EL3_STATUS)) & IntLatch); + + if (vortex_debug > 4) + pr_debug("%s: exiting interrupt, status %4.4x.\n", + dev->name, status); +handler_exit: + vp->handling_irq = 0; + spin_unlock(&vp->lock); + return IRQ_HANDLED; +} + +static int vortex_rx(struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr = vp->ioaddr; + int i; + short rx_status; + + if (vortex_debug > 5) + pr_debug("vortex_rx(): status %4.4x, rx_status %4.4x.\n", + ioread16(ioaddr+EL3_STATUS), ioread16(ioaddr+RxStatus)); + while ((rx_status = ioread16(ioaddr + RxStatus)) > 0) { + if (rx_status & 0x4000) { /* Error, update stats. */ + unsigned char rx_error = ioread8(ioaddr + RxErrors); + if (vortex_debug > 2) + pr_debug(" Rx error: status %2.2x.\n", rx_error); + dev->stats.rx_errors++; + if (rx_error & 0x01) dev->stats.rx_over_errors++; + if (rx_error & 0x02) dev->stats.rx_length_errors++; + if (rx_error & 0x04) dev->stats.rx_frame_errors++; + if (rx_error & 0x08) dev->stats.rx_crc_errors++; + if (rx_error & 0x10) dev->stats.rx_length_errors++; + } else { + /* The packet length: up to 4.5K!. */ + int pkt_len = rx_status & 0x1fff; + struct sk_buff *skb; + + skb = dev_alloc_skb(pkt_len + 5); + if (vortex_debug > 4) + pr_debug("Receiving packet size %d status %4.4x.\n", + pkt_len, rx_status); + if (skb != NULL) { + skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ + /* 'skb_put()' points to the start of sk_buff data area. */ + if (vp->bus_master && + ! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) { + dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len), + pkt_len, PCI_DMA_FROMDEVICE); + iowrite32(dma, ioaddr + Wn7_MasterAddr); + iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen); + iowrite16(StartDMAUp, ioaddr + EL3_CMD); + while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000) + ; + pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE); + } else { + ioread32_rep(ioaddr + RX_FIFO, + skb_put(skb, pkt_len), + (pkt_len + 3) >> 2); + } + iowrite16(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */ + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + /* Wait a limited time to go to next packet. */ + for (i = 200; i >= 0; i--) + if ( ! (ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) + break; + continue; + } else if (vortex_debug > 0) + pr_notice("%s: No memory to allocate a sk_buff of size %d.\n", + dev->name, pkt_len); + dev->stats.rx_dropped++; + } + issue_and_wait(dev, RxDiscard); + } + + return 0; +} + +static int +boomerang_rx(struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + int entry = vp->cur_rx % RX_RING_SIZE; + void __iomem *ioaddr = vp->ioaddr; + int rx_status; + int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx; + + if (vortex_debug > 5) + pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS)); + + while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){ + if (--rx_work_limit < 0) + break; + if (rx_status & RxDError) { /* Error, update stats. */ + unsigned char rx_error = rx_status >> 16; + if (vortex_debug > 2) + pr_debug(" Rx error: status %2.2x.\n", rx_error); + dev->stats.rx_errors++; + if (rx_error & 0x01) dev->stats.rx_over_errors++; + if (rx_error & 0x02) dev->stats.rx_length_errors++; + if (rx_error & 0x04) dev->stats.rx_frame_errors++; + if (rx_error & 0x08) dev->stats.rx_crc_errors++; + if (rx_error & 0x10) dev->stats.rx_length_errors++; + } else { + /* The packet length: up to 4.5K!. */ + int pkt_len = rx_status & 0x1fff; + struct sk_buff *skb; + dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr); + + if (vortex_debug > 4) + pr_debug("Receiving packet size %d status %4.4x.\n", + pkt_len, rx_status); + + /* Check if the packet is long enough to just accept without + copying to a properly sized skbuff. */ + if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { + skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ + pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + /* 'skb_put()' points to the start of sk_buff data area. */ + memcpy(skb_put(skb, pkt_len), + vp->rx_skbuff[entry]->data, + pkt_len); + pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + vp->rx_copy++; + } else { + /* Pass up the skbuff already on the Rx ring. */ + skb = vp->rx_skbuff[entry]; + vp->rx_skbuff[entry] = NULL; + skb_put(skb, pkt_len); + pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + vp->rx_nocopy++; + } + skb->protocol = eth_type_trans(skb, dev); + { /* Use hardware checksum info. */ + int csum_bits = rx_status & 0xee000000; + if (csum_bits && + (csum_bits == (IPChksumValid | TCPChksumValid) || + csum_bits == (IPChksumValid | UDPChksumValid))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + vp->rx_csumhits++; + } + } + netif_rx(skb); + dev->stats.rx_packets++; + } + entry = (++vp->cur_rx) % RX_RING_SIZE; + } + /* Refill the Rx ring buffers. */ + for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) { + struct sk_buff *skb; + entry = vp->dirty_rx % RX_RING_SIZE; + if (vp->rx_skbuff[entry] == NULL) { + skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ); + if (skb == NULL) { + static unsigned long last_jif; + if (time_after(jiffies, last_jif + 10 * HZ)) { + pr_warning("%s: memory shortage\n", dev->name); + last_jif = jiffies; + } + if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) + mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1)); + break; /* Bad news! */ + } + + vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); + vp->rx_skbuff[entry] = skb; + } + vp->rx_ring[entry].status = 0; /* Clear complete bit. */ + iowrite16(UpUnstall, ioaddr + EL3_CMD); + } + return 0; +} + +/* + * If we've hit a total OOM refilling the Rx ring we poll once a second + * for some memory. Otherwise there is no way to restart the rx process. + */ +static void +rx_oom_timer(unsigned long arg) +{ + struct net_device *dev = (struct net_device *)arg; + struct vortex_private *vp = netdev_priv(dev); + + spin_lock_irq(&vp->lock); + if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */ + boomerang_rx(dev); + if (vortex_debug > 1) { + pr_debug("%s: rx_oom_timer %s\n", dev->name, + ((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying"); + } + spin_unlock_irq(&vp->lock); +} + +static void +vortex_down(struct net_device *dev, int final_down) +{ + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr = vp->ioaddr; + + netif_stop_queue (dev); + + del_timer_sync(&vp->rx_oom_timer); + del_timer_sync(&vp->timer); + + /* Turn off statistics ASAP. We update dev->stats below. */ + iowrite16(StatsDisable, ioaddr + EL3_CMD); + + /* Disable the receiver and transmitter. */ + iowrite16(RxDisable, ioaddr + EL3_CMD); + iowrite16(TxDisable, ioaddr + EL3_CMD); + + /* Disable receiving 802.1q tagged frames */ + set_8021q_mode(dev, 0); + + if (dev->if_port == XCVR_10base2) + /* Turn off thinnet power. Green! */ + iowrite16(StopCoax, ioaddr + EL3_CMD); + + iowrite16(SetIntrEnb | 0x0000, ioaddr + EL3_CMD); + + update_stats(ioaddr, dev); + if (vp->full_bus_master_rx) + iowrite32(0, ioaddr + UpListPtr); + if (vp->full_bus_master_tx) + iowrite32(0, ioaddr + DownListPtr); + + if (final_down && VORTEX_PCI(vp)) { + vp->pm_state_valid = 1; + pci_save_state(VORTEX_PCI(vp)); + acpi_set_WOL(dev); + } +} + +static int +vortex_close(struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr = vp->ioaddr; + int i; + + if (netif_device_present(dev)) + vortex_down(dev, 1); + + if (vortex_debug > 1) { + pr_debug("%s: vortex_close() status %4.4x, Tx status %2.2x.\n", + dev->name, ioread16(ioaddr + EL3_STATUS), ioread8(ioaddr + TxStatus)); + pr_debug("%s: vortex close stats: rx_nocopy %d rx_copy %d" + " tx_queued %d Rx pre-checksummed %d.\n", + dev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits); + } + +#if DO_ZEROCOPY + if (vp->rx_csumhits && + (vp->drv_flags & HAS_HWCKSM) == 0 && + (vp->card_idx >= MAX_UNITS || hw_checksums[vp->card_idx] == -1)) { + pr_warning("%s supports hardware checksums, and we're not using them!\n", dev->name); + } +#endif + + free_irq(dev->irq, dev); + + if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */ + for (i = 0; i < RX_RING_SIZE; i++) + if (vp->rx_skbuff[i]) { + pci_unmap_single( VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr), + PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + dev_kfree_skb(vp->rx_skbuff[i]); + vp->rx_skbuff[i] = NULL; + } + } + if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */ + for (i = 0; i < TX_RING_SIZE; i++) { + if (vp->tx_skbuff[i]) { + struct sk_buff *skb = vp->tx_skbuff[i]; +#if DO_ZEROCOPY + int k; + + for (k=0; k<=skb_shinfo(skb)->nr_frags; k++) + pci_unmap_single(VORTEX_PCI(vp), + le32_to_cpu(vp->tx_ring[i].frag[k].addr), + le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF, + PCI_DMA_TODEVICE); +#else + pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE); +#endif + dev_kfree_skb(skb); + vp->tx_skbuff[i] = NULL; + } + } + } + + return 0; +} + +static void +dump_tx_ring(struct net_device *dev) +{ + if (vortex_debug > 0) { + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr = vp->ioaddr; + + if (vp->full_bus_master_tx) { + int i; + int stalled = ioread32(ioaddr + PktStatus) & 0x04; /* Possible racy. But it's only debug stuff */ + + pr_err(" Flags; bus-master %d, dirty %d(%d) current %d(%d)\n", + vp->full_bus_master_tx, + vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE, + vp->cur_tx, vp->cur_tx % TX_RING_SIZE); + pr_err(" Transmit list %8.8x vs. %p.\n", + ioread32(ioaddr + DownListPtr), + &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]); + issue_and_wait(dev, DownStall); + for (i = 0; i < TX_RING_SIZE; i++) { + unsigned int length; + +#if DO_ZEROCOPY + length = le32_to_cpu(vp->tx_ring[i].frag[0].length); +#else + length = le32_to_cpu(vp->tx_ring[i].length); +#endif + pr_err(" %d: @%p length %8.8x status %8.8x\n", + i, &vp->tx_ring[i], length, + le32_to_cpu(vp->tx_ring[i].status)); + } + if (!stalled) + iowrite16(DownUnstall, ioaddr + EL3_CMD); + } + } +} + +static struct net_device_stats *vortex_get_stats(struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr = vp->ioaddr; + unsigned long flags; + + if (netif_device_present(dev)) { /* AKPM: Used to be netif_running */ + spin_lock_irqsave (&vp->lock, flags); + update_stats(ioaddr, dev); + spin_unlock_irqrestore (&vp->lock, flags); + } + return &dev->stats; +} + +/* Update statistics. + Unlike with the EL3 we need not worry about interrupts changing + the window setting from underneath us, but we must still guard + against a race condition with a StatsUpdate interrupt updating the + table. This is done by checking that the ASM (!) code generated uses + atomic updates with '+='. + */ +static void update_stats(void __iomem *ioaddr, struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + + /* Unlike the 3c5x9 we need not turn off stats updates while reading. */ + /* Switch to the stats window, and read everything. */ + dev->stats.tx_carrier_errors += window_read8(vp, 6, 0); + dev->stats.tx_heartbeat_errors += window_read8(vp, 6, 1); + dev->stats.tx_window_errors += window_read8(vp, 6, 4); + dev->stats.rx_fifo_errors += window_read8(vp, 6, 5); + dev->stats.tx_packets += window_read8(vp, 6, 6); + dev->stats.tx_packets += (window_read8(vp, 6, 9) & + 0x30) << 4; + /* Rx packets */ window_read8(vp, 6, 7); /* Must read to clear */ + /* Don't bother with register 9, an extension of registers 6&7. + If we do use the 6&7 values the atomic update assumption above + is invalid. */ + dev->stats.rx_bytes += window_read16(vp, 6, 10); + dev->stats.tx_bytes += window_read16(vp, 6, 12); + /* Extra stats for get_ethtool_stats() */ + vp->xstats.tx_multiple_collisions += window_read8(vp, 6, 2); + vp->xstats.tx_single_collisions += window_read8(vp, 6, 3); + vp->xstats.tx_deferred += window_read8(vp, 6, 8); + vp->xstats.rx_bad_ssd += window_read8(vp, 4, 12); + + dev->stats.collisions = vp->xstats.tx_multiple_collisions + + vp->xstats.tx_single_collisions + + vp->xstats.tx_max_collisions; + + { + u8 up = window_read8(vp, 4, 13); + dev->stats.rx_bytes += (up & 0x0f) << 16; + dev->stats.tx_bytes += (up & 0xf0) << 12; + } +} + +static int vortex_nway_reset(struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + + return mii_nway_restart(&vp->mii); +} + +static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct vortex_private *vp = netdev_priv(dev); + + return mii_ethtool_gset(&vp->mii, cmd); +} + +static int vortex_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct vortex_private *vp = netdev_priv(dev); + + return mii_ethtool_sset(&vp->mii, cmd); +} + +static u32 vortex_get_msglevel(struct net_device *dev) +{ + return vortex_debug; +} + +static void vortex_set_msglevel(struct net_device *dev, u32 dbg) +{ + vortex_debug = dbg; +} + +static int vortex_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return VORTEX_NUM_STATS; + default: + return -EOPNOTSUPP; + } +} + +static void vortex_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr = vp->ioaddr; + unsigned long flags; + + spin_lock_irqsave(&vp->lock, flags); + update_stats(ioaddr, dev); + spin_unlock_irqrestore(&vp->lock, flags); + + data[0] = vp->xstats.tx_deferred; + data[1] = vp->xstats.tx_max_collisions; + data[2] = vp->xstats.tx_multiple_collisions; + data[3] = vp->xstats.tx_single_collisions; + data[4] = vp->xstats.rx_bad_ssd; +} + + +static void vortex_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(data, ðtool_stats_keys, sizeof(ethtool_stats_keys)); + break; + default: + WARN_ON(1); + break; + } +} + +static void vortex_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct vortex_private *vp = netdev_priv(dev); + + strcpy(info->driver, DRV_NAME); + if (VORTEX_PCI(vp)) { + strcpy(info->bus_info, pci_name(VORTEX_PCI(vp))); + } else { + if (VORTEX_EISA(vp)) + strcpy(info->bus_info, dev_name(vp->gendev)); + else + sprintf(info->bus_info, "EISA 0x%lx %d", + dev->base_addr, dev->irq); + } +} + +static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct vortex_private *vp = netdev_priv(dev); + + if (!VORTEX_PCI(vp)) + return; + + wol->supported = WAKE_MAGIC; + + wol->wolopts = 0; + if (vp->enable_wol) + wol->wolopts |= WAKE_MAGIC; +} + +static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct vortex_private *vp = netdev_priv(dev); + + if (!VORTEX_PCI(vp)) + return -EOPNOTSUPP; + + if (wol->wolopts & ~WAKE_MAGIC) + return -EINVAL; + + if (wol->wolopts & WAKE_MAGIC) + vp->enable_wol = 1; + else + vp->enable_wol = 0; + acpi_set_WOL(dev); + + return 0; +} + +static const struct ethtool_ops vortex_ethtool_ops = { + .get_drvinfo = vortex_get_drvinfo, + .get_strings = vortex_get_strings, + .get_msglevel = vortex_get_msglevel, + .set_msglevel = vortex_set_msglevel, + .get_ethtool_stats = vortex_get_ethtool_stats, + .get_sset_count = vortex_get_sset_count, + .get_settings = vortex_get_settings, + .set_settings = vortex_set_settings, + .get_link = ethtool_op_get_link, + .nway_reset = vortex_nway_reset, + .get_wol = vortex_get_wol, + .set_wol = vortex_set_wol, +}; + +#ifdef CONFIG_PCI +/* + * Must power the device up to do MDIO operations + */ +static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + int err; + struct vortex_private *vp = netdev_priv(dev); + pci_power_t state = 0; + + if(VORTEX_PCI(vp)) + state = VORTEX_PCI(vp)->current_state; + + /* The kernel core really should have pci_get_power_state() */ + + if(state != 0) + pci_set_power_state(VORTEX_PCI(vp), PCI_D0); + err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL); + if(state != 0) + pci_set_power_state(VORTEX_PCI(vp), state); + + return err; +} +#endif + + +/* Pre-Cyclone chips have no documented multicast filter, so the only + multicast setting is to receive all multicast frames. At least + the chip has a very clean way to set the mode, unlike many others. */ +static void set_rx_mode(struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr = vp->ioaddr; + int new_mode; + + if (dev->flags & IFF_PROMISC) { + if (vortex_debug > 3) + pr_notice("%s: Setting promiscuous mode.\n", dev->name); + new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm; + } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) { + new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast; + } else + new_mode = SetRxFilter | RxStation | RxBroadcast; + + iowrite16(new_mode, ioaddr + EL3_CMD); +} + +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +/* Setup the card so that it can receive frames with an 802.1q VLAN tag. + Note that this must be done after each RxReset due to some backwards + compatibility logic in the Cyclone and Tornado ASICs */ + +/* The Ethernet Type used for 802.1q tagged frames */ +#define VLAN_ETHER_TYPE 0x8100 + +static void set_8021q_mode(struct net_device *dev, int enable) +{ + struct vortex_private *vp = netdev_priv(dev); + int mac_ctrl; + + if ((vp->drv_flags&IS_CYCLONE) || (vp->drv_flags&IS_TORNADO)) { + /* cyclone and tornado chipsets can recognize 802.1q + * tagged frames and treat them correctly */ + + int max_pkt_size = dev->mtu+14; /* MTU+Ethernet header */ + if (enable) + max_pkt_size += 4; /* 802.1Q VLAN tag */ + + window_write16(vp, max_pkt_size, 3, Wn3_MaxPktSize); + + /* set VlanEtherType to let the hardware checksumming + treat tagged frames correctly */ + window_write16(vp, VLAN_ETHER_TYPE, 7, Wn7_VlanEtherType); + } else { + /* on older cards we have to enable large frames */ + + vp->large_frames = dev->mtu > 1500 || enable; + + mac_ctrl = window_read16(vp, 3, Wn3_MAC_Ctrl); + if (vp->large_frames) + mac_ctrl |= 0x40; + else + mac_ctrl &= ~0x40; + window_write16(vp, mac_ctrl, 3, Wn3_MAC_Ctrl); + } +} +#else + +static void set_8021q_mode(struct net_device *dev, int enable) +{ +} + + +#endif + +/* MII transceiver control section. + Read and write the MII registers using software-generated serial + MDIO protocol. See the MII specifications or DP83840A data sheet + for details. */ + +/* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually + met by back-to-back PCI I/O cycles, but we insert a delay to avoid + "overclocking" issues. */ +static void mdio_delay(struct vortex_private *vp) +{ + window_read32(vp, 4, Wn4_PhysicalMgmt); +} + +#define MDIO_SHIFT_CLK 0x01 +#define MDIO_DIR_WRITE 0x04 +#define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE) +#define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE) +#define MDIO_DATA_READ 0x02 +#define MDIO_ENB_IN 0x00 + +/* Generate the preamble required for initial synchronization and + a few older transceivers. */ +static void mdio_sync(struct vortex_private *vp, int bits) +{ + /* Establish sync by sending at least 32 logic ones. */ + while (-- bits >= 0) { + window_write16(vp, MDIO_DATA_WRITE1, 4, Wn4_PhysicalMgmt); + mdio_delay(vp); + window_write16(vp, MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, + 4, Wn4_PhysicalMgmt); + mdio_delay(vp); + } +} + +static int mdio_read(struct net_device *dev, int phy_id, int location) +{ + int i; + struct vortex_private *vp = netdev_priv(dev); + int read_cmd = (0xf6 << 10) | (phy_id << 5) | location; + unsigned int retval = 0; + + spin_lock_bh(&vp->mii_lock); + + if (mii_preamble_required) + mdio_sync(vp, 32); + + /* Shift the read command bits out. */ + for (i = 14; i >= 0; i--) { + int dataval = (read_cmd&(1< 0; i--) { + window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt); + mdio_delay(vp); + retval = (retval << 1) | + ((window_read16(vp, 4, Wn4_PhysicalMgmt) & + MDIO_DATA_READ) ? 1 : 0); + window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK, + 4, Wn4_PhysicalMgmt); + mdio_delay(vp); + } + + spin_unlock_bh(&vp->mii_lock); + + return retval & 0x20000 ? 0xffff : retval>>1 & 0xffff; +} + +static void mdio_write(struct net_device *dev, int phy_id, int location, int value) +{ + struct vortex_private *vp = netdev_priv(dev); + int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value; + int i; + + spin_lock_bh(&vp->mii_lock); + + if (mii_preamble_required) + mdio_sync(vp, 32); + + /* Shift the command bits out. */ + for (i = 31; i >= 0; i--) { + int dataval = (write_cmd&(1<= 0; i--) { + window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt); + mdio_delay(vp); + window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK, + 4, Wn4_PhysicalMgmt); + mdio_delay(vp); + } + + spin_unlock_bh(&vp->mii_lock); +} + +/* ACPI: Advanced Configuration and Power Interface. */ +/* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */ +static void acpi_set_WOL(struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr = vp->ioaddr; + + device_set_wakeup_enable(vp->gendev, vp->enable_wol); + + if (vp->enable_wol) { + /* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */ + window_write16(vp, 2, 7, 0x0c); + /* The RxFilter must accept the WOL frames. */ + iowrite16(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD); + iowrite16(RxEnable, ioaddr + EL3_CMD); + + if (pci_enable_wake(VORTEX_PCI(vp), PCI_D3hot, 1)) { + pr_info("%s: WOL not supported.\n", pci_name(VORTEX_PCI(vp))); + + vp->enable_wol = 0; + return; + } + + if (VORTEX_PCI(vp)->current_state < PCI_D3hot) + return; + + /* Change the power state to D3; RxEnable doesn't take effect. */ + pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot); + } +} + + +static void __devexit vortex_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct vortex_private *vp; + + if (!dev) { + pr_err("vortex_remove_one called for Compaq device!\n"); + BUG(); + } + + vp = netdev_priv(dev); + + if (vp->cb_fn_base) + pci_iounmap(VORTEX_PCI(vp), vp->cb_fn_base); + + unregister_netdev(dev); + + if (VORTEX_PCI(vp)) { + pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */ + if (vp->pm_state_valid) + pci_restore_state(VORTEX_PCI(vp)); + pci_disable_device(VORTEX_PCI(vp)); + } + /* Should really use issue_and_wait() here */ + iowrite16(TotalReset | ((vp->drv_flags & EEPROM_RESET) ? 0x04 : 0x14), + vp->ioaddr + EL3_CMD); + + pci_iounmap(VORTEX_PCI(vp), vp->ioaddr); + + pci_free_consistent(pdev, + sizeof(struct boom_rx_desc) * RX_RING_SIZE + + sizeof(struct boom_tx_desc) * TX_RING_SIZE, + vp->rx_ring, + vp->rx_ring_dma); + if (vp->must_free_region) + release_region(dev->base_addr, vp->io_size); + free_netdev(dev); +} + + +static struct pci_driver vortex_driver = { + .name = "3c59x", + .probe = vortex_init_one, + .remove = __devexit_p(vortex_remove_one), + .id_table = vortex_pci_tbl, + .driver.pm = VORTEX_PM_OPS, +}; + + +static int vortex_have_pci; +static int vortex_have_eisa; + + +static int __init vortex_init(void) +{ + int pci_rc, eisa_rc; + + pci_rc = pci_register_driver(&vortex_driver); + eisa_rc = vortex_eisa_init(); + + if (pci_rc == 0) + vortex_have_pci = 1; + if (eisa_rc > 0) + vortex_have_eisa = 1; + + return (vortex_have_pci + vortex_have_eisa) ? 0 : -ENODEV; +} + + +static void __exit vortex_eisa_cleanup(void) +{ + struct vortex_private *vp; + void __iomem *ioaddr; + +#ifdef CONFIG_EISA + /* Take care of the EISA devices */ + eisa_driver_unregister(&vortex_eisa_driver); +#endif + + if (compaq_net_device) { + vp = netdev_priv(compaq_net_device); + ioaddr = ioport_map(compaq_net_device->base_addr, + VORTEX_TOTAL_SIZE); + + unregister_netdev(compaq_net_device); + iowrite16(TotalReset, ioaddr + EL3_CMD); + release_region(compaq_net_device->base_addr, + VORTEX_TOTAL_SIZE); + + free_netdev(compaq_net_device); + } +} + + +static void __exit vortex_cleanup(void) +{ + if (vortex_have_pci) + pci_unregister_driver(&vortex_driver); + if (vortex_have_eisa) + vortex_eisa_cleanup(); +} + + +module_init(vortex_init); +module_exit(vortex_cleanup); diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig new file mode 100644 index 0000000..497f038 --- /dev/null +++ b/drivers/net/ethernet/3com/Kconfig @@ -0,0 +1,147 @@ +# +# 3Com Ethernet device configuration +# + +config NET_VENDOR_3COM + bool "3Com devices" + depends on ISA || EISA || MCA || PCI || PCMCIA + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about 3Com cards. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_3COM + +config EL1 + tristate "3c501 \"EtherLink\" support" + depends on ISA + ---help--- + If you have a network (Ethernet) card of this type, say Y and read + the Ethernet-HOWTO, available from + . Also, consider buying a + new card, since the 3c501 is slow, broken, and obsolete: you will + have problems. Some people suggest to ping ("man ping") a nearby + machine every minute ("man cron") when using this card. + + To compile this driver as a module, choose M here. The module + will be called 3c501. + +config EL3 + tristate "3c509/3c529 (MCA)/3c579 \"EtherLink III\" support" + depends on (ISA || EISA || MCA) + ---help--- + If you have a network (Ethernet) card belonging to the 3Com + EtherLinkIII series, say Y and read the Ethernet-HOWTO, available + from . + + If your card is not working you may need to use the DOS + setup disk to disable Plug & Play mode, and to select the default + media type. + + To compile this driver as a module, choose M here. The module + will be called 3c509. + +config 3C515 + tristate "3c515 ISA \"Fast EtherLink\"" + depends on (ISA || EISA) && ISA_DMA_API + ---help--- + If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet + network card, say Y and read the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called 3c515. + +config PCMCIA_3C574 + tristate "3Com 3c574 PCMCIA support" + depends on PCMCIA + ---help--- + Say Y here if you intend to attach a 3Com 3c574 or compatible PCMCIA + (PC-card) Fast Ethernet card to your computer. + + To compile this driver as a module, choose M here: the module will be + called 3c574_cs. If unsure, say N. + +config PCMCIA_3C589 + tristate "3Com 3c589 PCMCIA support" + depends on PCMCIA + ---help--- + Say Y here if you intend to attach a 3Com 3c589 or compatible PCMCIA + (PC-card) Ethernet card to your computer. + + To compile this driver as a module, choose M here: the module will be + called 3c589_cs. If unsure, say N. + +config VORTEX + tristate "3c590/3c900 series (592/595/597) \"Vortex/Boomerang\" support" + depends on (PCI || EISA) + select MII + ---help--- + This option enables driver support for a large number of 10Mbps and + 10/100Mbps EISA, PCI and PCMCIA 3Com network cards: + + "Vortex" (Fast EtherLink 3c590/3c592/3c595/3c597) EISA and PCI + "Boomerang" (EtherLink XL 3c900 or 3c905) PCI + "Cyclone" (3c540/3c900/3c905/3c980/3c575/3c656) PCI and Cardbus + "Tornado" (3c905) PCI + "Hurricane" (3c555/3cSOHO) PCI + + If you have such a card, say Y and read the Ethernet-HOWTO, + available from . More + specific information is in + and in the comments at + the beginning of . + + To compile this support as a module, choose M here. + +config TYPHOON + tristate "3cr990 series \"Typhoon\" support" + depends on PCI + select CRC32 + ---help--- + This option enables driver support for the 3cr990 series of cards: + + 3C990-TX, 3CR990-TX-95, 3CR990-TX-97, 3CR990-FX-95, 3CR990-FX-97, + 3CR990SVR, 3CR990SVR95, 3CR990SVR97, 3CR990-FX-95 Server, + 3CR990-FX-97 Server, 3C990B-TX-M, 3C990BSVR + + If you have a network (Ethernet) card of this type, say Y and read + the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called typhoon. + +config ACENIC + tristate "Alteon AceNIC/3Com 3C985/NetGear GA620 Gigabit support" + depends on PCI + ---help--- + Say Y here if you have an Alteon AceNIC, 3Com 3C985(B), NetGear + GA620, SGI Gigabit or Farallon PN9000-SX PCI Gigabit Ethernet + adapter. The driver allows for using the Jumbo Frame option (9000 + bytes/frame) however it requires that your switches can handle this + as well. To enable Jumbo Frames, add `mtu 9000' to your ifconfig + line. + + To compile this driver as a module, choose M here: the + module will be called acenic. + +config ACENIC_OMIT_TIGON_I + bool "Omit support for old Tigon I based AceNICs" + depends on ACENIC + ---help--- + Say Y here if you only have Tigon II based AceNICs and want to leave + out support for the older Tigon I based cards which are no longer + being sold (ie. the original Alteon AceNIC and 3Com 3C985 (non B + version)). This will reduce the size of the driver object by + app. 100KB. If you are not sure whether your card is a Tigon I or a + Tigon II, say N here. + + The safe and default value for this is N. + +endif # NET_VENDOR_3COM diff --git a/drivers/net/ethernet/3com/Makefile b/drivers/net/ethernet/3com/Makefile new file mode 100644 index 0000000..96d1d60 --- /dev/null +++ b/drivers/net/ethernet/3com/Makefile @@ -0,0 +1,12 @@ +# +# Makefile for the 3Com Ethernet device drivers +# + +obj-$(CONFIG_EL1) += 3c501.o +obj-$(CONFIG_EL3) += 3c509.o +obj-$(CONFIG_3C515) += 3c515.o +obj-$(CONFIG_PCMCIA_3C589) += 3c589_cs.o +obj-$(CONFIG_PCMCIA_3C574) += 3c574_cs.o +obj-$(CONFIG_VORTEX) += 3c59x.o +obj-$(CONFIG_ACENIC) += acenic.o +obj-$(CONFIG_TYPHOON) += typhoon.o diff --git a/drivers/net/ethernet/3com/acenic.c b/drivers/net/ethernet/3com/acenic.c new file mode 100644 index 0000000..31798f5 --- /dev/null +++ b/drivers/net/ethernet/3com/acenic.c @@ -0,0 +1,3206 @@ +/* + * acenic.c: Linux driver for the Alteon AceNIC Gigabit Ethernet card + * and other Tigon based cards. + * + * Copyright 1998-2002 by Jes Sorensen, . + * + * Thanks to Alteon and 3Com for providing hardware and documentation + * enabling me to write this driver. + * + * A mailing list for discussing the use of this driver has been + * setup, please subscribe to the lists if you have any questions + * about the driver. Send mail to linux-acenic-help@sunsite.auc.dk to + * see how to subscribe. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Additional credits: + * Pete Wyckoff : Initial Linux/Alpha and trace + * dump support. The trace dump support has not been + * integrated yet however. + * Troy Benjegerdes: Big Endian (PPC) patches. + * Nate Stahl: Better out of memory handling and stats support. + * Aman Singla: Nasty race between interrupt handler and tx code dealing + * with 'testing the tx_ret_csm and setting tx_full' + * David S. Miller : conversion to new PCI dma mapping + * infrastructure and Sparc support + * Pierrick Pinasseau (CERN): For lending me an Ultra 5 to test the + * driver under Linux/Sparc64 + * Matt Domsch : Detect Alteon 1000baseT cards + * ETHTOOL_GDRVINFO support + * Chip Salzenberg : Fix race condition between tx + * handler and close() cleanup. + * Ken Aaker : Correct check for whether + * memory mapped IO is enabled to + * make the driver work on RS/6000. + * Takayoshi Kouchi : Identifying problem + * where the driver would disable + * bus master mode if it had to disable + * write and invalidate. + * Stephen Hack : Fixed ace_set_mac_addr for little + * endian systems. + * Val Henson : Reset Jumbo skb producer and + * rx producer index when + * flushing the Jumbo ring. + * Hans Grobler : Memory leak fixes in the + * driver init path. + * Grant Grundler : PCI write posting fixes. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef SIOCETHTOOL +#include +#endif + +#include +#include + +#include +#include +#include +#include +#include + + +#define DRV_NAME "acenic" + +#undef INDEX_DEBUG + +#ifdef CONFIG_ACENIC_OMIT_TIGON_I +#define ACE_IS_TIGON_I(ap) 0 +#define ACE_TX_RING_ENTRIES(ap) MAX_TX_RING_ENTRIES +#else +#define ACE_IS_TIGON_I(ap) (ap->version == 1) +#define ACE_TX_RING_ENTRIES(ap) ap->tx_ring_entries +#endif + +#ifndef PCI_VENDOR_ID_ALTEON +#define PCI_VENDOR_ID_ALTEON 0x12ae +#endif +#ifndef PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE +#define PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE 0x0001 +#define PCI_DEVICE_ID_ALTEON_ACENIC_COPPER 0x0002 +#endif +#ifndef PCI_DEVICE_ID_3COM_3C985 +#define PCI_DEVICE_ID_3COM_3C985 0x0001 +#endif +#ifndef PCI_VENDOR_ID_NETGEAR +#define PCI_VENDOR_ID_NETGEAR 0x1385 +#define PCI_DEVICE_ID_NETGEAR_GA620 0x620a +#endif +#ifndef PCI_DEVICE_ID_NETGEAR_GA620T +#define PCI_DEVICE_ID_NETGEAR_GA620T 0x630a +#endif + + +/* + * Farallon used the DEC vendor ID by mistake and they seem not + * to care - stinky! + */ +#ifndef PCI_DEVICE_ID_FARALLON_PN9000SX +#define PCI_DEVICE_ID_FARALLON_PN9000SX 0x1a +#endif +#ifndef PCI_DEVICE_ID_FARALLON_PN9100T +#define PCI_DEVICE_ID_FARALLON_PN9100T 0xfa +#endif +#ifndef PCI_VENDOR_ID_SGI +#define PCI_VENDOR_ID_SGI 0x10a9 +#endif +#ifndef PCI_DEVICE_ID_SGI_ACENIC +#define PCI_DEVICE_ID_SGI_ACENIC 0x0009 +#endif + +static DEFINE_PCI_DEVICE_TABLE(acenic_pci_tbl) = { + { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE, + PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, + { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_COPPER, + PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, + { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C985, + PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, + { PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620, + PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, + { PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620T, + PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, + /* + * Farallon used the DEC vendor ID on their cards incorrectly, + * then later Alteon's ID. + */ + { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_FARALLON_PN9000SX, + PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, + { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_FARALLON_PN9100T, + PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, + { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_ACENIC, + PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, + { } +}; +MODULE_DEVICE_TABLE(pci, acenic_pci_tbl); + +#define ace_sync_irq(irq) synchronize_irq(irq) + +#ifndef offset_in_page +#define offset_in_page(ptr) ((unsigned long)(ptr) & ~PAGE_MASK) +#endif + +#define ACE_MAX_MOD_PARMS 8 +#define BOARD_IDX_STATIC 0 +#define BOARD_IDX_OVERFLOW -1 + +#include "acenic.h" + +/* + * These must be defined before the firmware is included. + */ +#define MAX_TEXT_LEN 96*1024 +#define MAX_RODATA_LEN 8*1024 +#define MAX_DATA_LEN 2*1024 + +#ifndef tigon2FwReleaseLocal +#define tigon2FwReleaseLocal 0 +#endif + +/* + * This driver currently supports Tigon I and Tigon II based cards + * including the Alteon AceNIC, the 3Com 3C985[B] and NetGear + * GA620. The driver should also work on the SGI, DEC and Farallon + * versions of the card, however I have not been able to test that + * myself. + * + * This card is really neat, it supports receive hardware checksumming + * and jumbo frames (up to 9000 bytes) and does a lot of work in the + * firmware. Also the programming interface is quite neat, except for + * the parts dealing with the i2c eeprom on the card ;-) + * + * Using jumbo frames: + * + * To enable jumbo frames, simply specify an mtu between 1500 and 9000 + * bytes to ifconfig. Jumbo frames can be enabled or disabled at any time + * by running `ifconfig eth mtu ' with being the Ethernet + * interface number and being the MTU value. + * + * Module parameters: + * + * When compiled as a loadable module, the driver allows for a number + * of module parameters to be specified. The driver supports the + * following module parameters: + * + * trace= - Firmware trace level. This requires special traced + * firmware to replace the firmware supplied with + * the driver - for debugging purposes only. + * + * link= - Link state. Normally you want to use the default link + * parameters set by the driver. This can be used to + * override these in case your switch doesn't negotiate + * the link properly. Valid values are: + * 0x0001 - Force half duplex link. + * 0x0002 - Do not negotiate line speed with the other end. + * 0x0010 - 10Mbit/sec link. + * 0x0020 - 100Mbit/sec link. + * 0x0040 - 1000Mbit/sec link. + * 0x0100 - Do not negotiate flow control. + * 0x0200 - Enable RX flow control Y + * 0x0400 - Enable TX flow control Y (Tigon II NICs only). + * Default value is 0x0270, ie. enable link+flow + * control negotiation. Negotiating the highest + * possible link speed with RX flow control enabled. + * + * When disabling link speed negotiation, only one link + * speed is allowed to be specified! + * + * tx_coal_tick= - number of coalescing clock ticks (us) allowed + * to wait for more packets to arive before + * interrupting the host, from the time the first + * packet arrives. + * + * rx_coal_tick= - number of coalescing clock ticks (us) allowed + * to wait for more packets to arive in the transmit ring, + * before interrupting the host, after transmitting the + * first packet in the ring. + * + * max_tx_desc= - maximum number of transmit descriptors + * (packets) transmitted before interrupting the host. + * + * max_rx_desc= - maximum number of receive descriptors + * (packets) received before interrupting the host. + * + * tx_ratio= - 7 bit value (0 - 63) specifying the split in 64th + * increments of the NIC's on board memory to be used for + * transmit and receive buffers. For the 1MB NIC app. 800KB + * is available, on the 1/2MB NIC app. 300KB is available. + * 68KB will always be available as a minimum for both + * directions. The default value is a 50/50 split. + * dis_pci_mem_inval= - disable PCI memory write and invalidate + * operations, default (1) is to always disable this as + * that is what Alteon does on NT. I have not been able + * to measure any real performance differences with + * this on my systems. Set =0 if you want to + * enable these operations. + * + * If you use more than one NIC, specify the parameters for the + * individual NICs with a comma, ie. trace=0,0x00001fff,0 you want to + * run tracing on NIC #2 but not on NIC #1 and #3. + * + * TODO: + * + * - Proper multicast support. + * - NIC dump support. + * - More tuning parameters. + * + * The mini ring is not used under Linux and I am not sure it makes sense + * to actually use it. + * + * New interrupt handler strategy: + * + * The old interrupt handler worked using the traditional method of + * replacing an skbuff with a new one when a packet arrives. However + * the rx rings do not need to contain a static number of buffer + * descriptors, thus it makes sense to move the memory allocation out + * of the main interrupt handler and do it in a bottom half handler + * and only allocate new buffers when the number of buffers in the + * ring is below a certain threshold. In order to avoid starving the + * NIC under heavy load it is however necessary to force allocation + * when hitting a minimum threshold. The strategy for alloction is as + * follows: + * + * RX_LOW_BUF_THRES - allocate buffers in the bottom half + * RX_PANIC_LOW_THRES - we are very low on buffers, allocate + * the buffers in the interrupt handler + * RX_RING_THRES - maximum number of buffers in the rx ring + * RX_MINI_THRES - maximum number of buffers in the mini ring + * RX_JUMBO_THRES - maximum number of buffers in the jumbo ring + * + * One advantagous side effect of this allocation approach is that the + * entire rx processing can be done without holding any spin lock + * since the rx rings and registers are totally independent of the tx + * ring and its registers. This of course includes the kmalloc's of + * new skb's. Thus start_xmit can run in parallel with rx processing + * and the memory allocation on SMP systems. + * + * Note that running the skb reallocation in a bottom half opens up + * another can of races which needs to be handled properly. In + * particular it can happen that the interrupt handler tries to run + * the reallocation while the bottom half is either running on another + * CPU or was interrupted on the same CPU. To get around this the + * driver uses bitops to prevent the reallocation routines from being + * reentered. + * + * TX handling can also be done without holding any spin lock, wheee + * this is fun! since tx_ret_csm is only written to by the interrupt + * handler. The case to be aware of is when shutting down the device + * and cleaning up where it is necessary to make sure that + * start_xmit() is not running while this is happening. Well DaveM + * informs me that this case is already protected against ... bye bye + * Mr. Spin Lock, it was nice to know you. + * + * TX interrupts are now partly disabled so the NIC will only generate + * TX interrupts for the number of coal ticks, not for the number of + * TX packets in the queue. This should reduce the number of TX only, + * ie. when no RX processing is done, interrupts seen. + */ + +/* + * Threshold values for RX buffer allocation - the low water marks for + * when to start refilling the rings are set to 75% of the ring + * sizes. It seems to make sense to refill the rings entirely from the + * intrrupt handler once it gets below the panic threshold, that way + * we don't risk that the refilling is moved to another CPU when the + * one running the interrupt handler just got the slab code hot in its + * cache. + */ +#define RX_RING_SIZE 72 +#define RX_MINI_SIZE 64 +#define RX_JUMBO_SIZE 48 + +#define RX_PANIC_STD_THRES 16 +#define RX_PANIC_STD_REFILL (3*RX_PANIC_STD_THRES)/2 +#define RX_LOW_STD_THRES (3*RX_RING_SIZE)/4 +#define RX_PANIC_MINI_THRES 12 +#define RX_PANIC_MINI_REFILL (3*RX_PANIC_MINI_THRES)/2 +#define RX_LOW_MINI_THRES (3*RX_MINI_SIZE)/4 +#define RX_PANIC_JUMBO_THRES 6 +#define RX_PANIC_JUMBO_REFILL (3*RX_PANIC_JUMBO_THRES)/2 +#define RX_LOW_JUMBO_THRES (3*RX_JUMBO_SIZE)/4 + + +/* + * Size of the mini ring entries, basically these just should be big + * enough to take TCP ACKs + */ +#define ACE_MINI_SIZE 100 + +#define ACE_MINI_BUFSIZE ACE_MINI_SIZE +#define ACE_STD_BUFSIZE (ACE_STD_MTU + ETH_HLEN + 4) +#define ACE_JUMBO_BUFSIZE (ACE_JUMBO_MTU + ETH_HLEN + 4) + +/* + * There seems to be a magic difference in the effect between 995 and 996 + * but little difference between 900 and 995 ... no idea why. + * + * There is now a default set of tuning parameters which is set, depending + * on whether or not the user enables Jumbo frames. It's assumed that if + * Jumbo frames are enabled, the user wants optimal tuning for that case. + */ +#define DEF_TX_COAL 400 /* 996 */ +#define DEF_TX_MAX_DESC 60 /* was 40 */ +#define DEF_RX_COAL 120 /* 1000 */ +#define DEF_RX_MAX_DESC 25 +#define DEF_TX_RATIO 21 /* 24 */ + +#define DEF_JUMBO_TX_COAL 20 +#define DEF_JUMBO_TX_MAX_DESC 60 +#define DEF_JUMBO_RX_COAL 30 +#define DEF_JUMBO_RX_MAX_DESC 6 +#define DEF_JUMBO_TX_RATIO 21 + +#if tigon2FwReleaseLocal < 20001118 +/* + * Standard firmware and early modifications duplicate + * IRQ load without this flag (coal timer is never reset). + * Note that with this flag tx_coal should be less than + * time to xmit full tx ring. + * 400usec is not so bad for tx ring size of 128. + */ +#define TX_COAL_INTS_ONLY 1 /* worth it */ +#else +/* + * With modified firmware, this is not necessary, but still useful. + */ +#define TX_COAL_INTS_ONLY 1 +#endif + +#define DEF_TRACE 0 +#define DEF_STAT (2 * TICKS_PER_SEC) + + +static int link_state[ACE_MAX_MOD_PARMS]; +static int trace[ACE_MAX_MOD_PARMS]; +static int tx_coal_tick[ACE_MAX_MOD_PARMS]; +static int rx_coal_tick[ACE_MAX_MOD_PARMS]; +static int max_tx_desc[ACE_MAX_MOD_PARMS]; +static int max_rx_desc[ACE_MAX_MOD_PARMS]; +static int tx_ratio[ACE_MAX_MOD_PARMS]; +static int dis_pci_mem_inval[ACE_MAX_MOD_PARMS] = {1, 1, 1, 1, 1, 1, 1, 1}; + +MODULE_AUTHOR("Jes Sorensen "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("AceNIC/3C985/GA620 Gigabit Ethernet driver"); +#ifndef CONFIG_ACENIC_OMIT_TIGON_I +MODULE_FIRMWARE("acenic/tg1.bin"); +#endif +MODULE_FIRMWARE("acenic/tg2.bin"); + +module_param_array_named(link, link_state, int, NULL, 0); +module_param_array(trace, int, NULL, 0); +module_param_array(tx_coal_tick, int, NULL, 0); +module_param_array(max_tx_desc, int, NULL, 0); +module_param_array(rx_coal_tick, int, NULL, 0); +module_param_array(max_rx_desc, int, NULL, 0); +module_param_array(tx_ratio, int, NULL, 0); +MODULE_PARM_DESC(link, "AceNIC/3C985/NetGear link state"); +MODULE_PARM_DESC(trace, "AceNIC/3C985/NetGear firmware trace level"); +MODULE_PARM_DESC(tx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first tx descriptor arrives"); +MODULE_PARM_DESC(max_tx_desc, "AceNIC/3C985/GA620 max number of transmit descriptors to wait"); +MODULE_PARM_DESC(rx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first rx descriptor arrives"); +MODULE_PARM_DESC(max_rx_desc, "AceNIC/3C985/GA620 max number of receive descriptors to wait"); +MODULE_PARM_DESC(tx_ratio, "AceNIC/3C985/GA620 ratio of NIC memory used for TX/RX descriptors (range 0-63)"); + + +static const char version[] __devinitconst = + "acenic.c: v0.92 08/05/2002 Jes Sorensen, linux-acenic@SunSITE.dk\n" + " http://home.cern.ch/~jes/gige/acenic.html\n"; + +static int ace_get_settings(struct net_device *, struct ethtool_cmd *); +static int ace_set_settings(struct net_device *, struct ethtool_cmd *); +static void ace_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); + +static const struct ethtool_ops ace_ethtool_ops = { + .get_settings = ace_get_settings, + .set_settings = ace_set_settings, + .get_drvinfo = ace_get_drvinfo, +}; + +static void ace_watchdog(struct net_device *dev); + +static const struct net_device_ops ace_netdev_ops = { + .ndo_open = ace_open, + .ndo_stop = ace_close, + .ndo_tx_timeout = ace_watchdog, + .ndo_get_stats = ace_get_stats, + .ndo_start_xmit = ace_start_xmit, + .ndo_set_multicast_list = ace_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = ace_set_mac_addr, + .ndo_change_mtu = ace_change_mtu, +}; + +static int __devinit acenic_probe_one(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct net_device *dev; + struct ace_private *ap; + static int boards_found; + + dev = alloc_etherdev(sizeof(struct ace_private)); + if (dev == NULL) { + printk(KERN_ERR "acenic: Unable to allocate " + "net_device structure!\n"); + return -ENOMEM; + } + + SET_NETDEV_DEV(dev, &pdev->dev); + + ap = netdev_priv(dev); + ap->pdev = pdev; + ap->name = pci_name(pdev); + + dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; + dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + + dev->watchdog_timeo = 5*HZ; + + dev->netdev_ops = &ace_netdev_ops; + SET_ETHTOOL_OPS(dev, &ace_ethtool_ops); + + /* we only display this string ONCE */ + if (!boards_found) + printk(version); + + if (pci_enable_device(pdev)) + goto fail_free_netdev; + + /* + * Enable master mode before we start playing with the + * pci_command word since pci_set_master() will modify + * it. + */ + pci_set_master(pdev); + + pci_read_config_word(pdev, PCI_COMMAND, &ap->pci_command); + + /* OpenFirmware on Mac's does not set this - DOH.. */ + if (!(ap->pci_command & PCI_COMMAND_MEMORY)) { + printk(KERN_INFO "%s: Enabling PCI Memory Mapped " + "access - was not enabled by BIOS/Firmware\n", + ap->name); + ap->pci_command = ap->pci_command | PCI_COMMAND_MEMORY; + pci_write_config_word(ap->pdev, PCI_COMMAND, + ap->pci_command); + wmb(); + } + + pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &ap->pci_latency); + if (ap->pci_latency <= 0x40) { + ap->pci_latency = 0x40; + pci_write_config_byte(pdev, PCI_LATENCY_TIMER, ap->pci_latency); + } + + /* + * Remap the regs into kernel space - this is abuse of + * dev->base_addr since it was means for I/O port + * addresses but who gives a damn. + */ + dev->base_addr = pci_resource_start(pdev, 0); + ap->regs = ioremap(dev->base_addr, 0x4000); + if (!ap->regs) { + printk(KERN_ERR "%s: Unable to map I/O register, " + "AceNIC %i will be disabled.\n", + ap->name, boards_found); + goto fail_free_netdev; + } + + switch(pdev->vendor) { + case PCI_VENDOR_ID_ALTEON: + if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9100T) { + printk(KERN_INFO "%s: Farallon PN9100-T ", + ap->name); + } else { + printk(KERN_INFO "%s: Alteon AceNIC ", + ap->name); + } + break; + case PCI_VENDOR_ID_3COM: + printk(KERN_INFO "%s: 3Com 3C985 ", ap->name); + break; + case PCI_VENDOR_ID_NETGEAR: + printk(KERN_INFO "%s: NetGear GA620 ", ap->name); + break; + case PCI_VENDOR_ID_DEC: + if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9000SX) { + printk(KERN_INFO "%s: Farallon PN9000-SX ", + ap->name); + break; + } + case PCI_VENDOR_ID_SGI: + printk(KERN_INFO "%s: SGI AceNIC ", ap->name); + break; + default: + printk(KERN_INFO "%s: Unknown AceNIC ", ap->name); + break; + } + + printk("Gigabit Ethernet at 0x%08lx, ", dev->base_addr); + printk("irq %d\n", pdev->irq); + +#ifdef CONFIG_ACENIC_OMIT_TIGON_I + if ((readl(&ap->regs->HostCtrl) >> 28) == 4) { + printk(KERN_ERR "%s: Driver compiled without Tigon I" + " support - NIC disabled\n", dev->name); + goto fail_uninit; + } +#endif + + if (ace_allocate_descriptors(dev)) + goto fail_free_netdev; + +#ifdef MODULE + if (boards_found >= ACE_MAX_MOD_PARMS) + ap->board_idx = BOARD_IDX_OVERFLOW; + else + ap->board_idx = boards_found; +#else + ap->board_idx = BOARD_IDX_STATIC; +#endif + + if (ace_init(dev)) + goto fail_free_netdev; + + if (register_netdev(dev)) { + printk(KERN_ERR "acenic: device registration failed\n"); + goto fail_uninit; + } + ap->name = dev->name; + + if (ap->pci_using_dac) + dev->features |= NETIF_F_HIGHDMA; + + pci_set_drvdata(pdev, dev); + + boards_found++; + return 0; + + fail_uninit: + ace_init_cleanup(dev); + fail_free_netdev: + free_netdev(dev); + return -ENODEV; +} + +static void __devexit acenic_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + short i; + + unregister_netdev(dev); + + writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl); + if (ap->version >= 2) + writel(readl(®s->CpuBCtrl) | CPU_HALT, ®s->CpuBCtrl); + + /* + * This clears any pending interrupts + */ + writel(1, ®s->Mb0Lo); + readl(®s->CpuCtrl); /* flush */ + + /* + * Make sure no other CPUs are processing interrupts + * on the card before the buffers are being released. + * Otherwise one might experience some `interesting' + * effects. + * + * Then release the RX buffers - jumbo buffers were + * already released in ace_close(). + */ + ace_sync_irq(dev->irq); + + for (i = 0; i < RX_STD_RING_ENTRIES; i++) { + struct sk_buff *skb = ap->skb->rx_std_skbuff[i].skb; + + if (skb) { + struct ring_info *ringp; + dma_addr_t mapping; + + ringp = &ap->skb->rx_std_skbuff[i]; + mapping = dma_unmap_addr(ringp, mapping); + pci_unmap_page(ap->pdev, mapping, + ACE_STD_BUFSIZE, + PCI_DMA_FROMDEVICE); + + ap->rx_std_ring[i].size = 0; + ap->skb->rx_std_skbuff[i].skb = NULL; + dev_kfree_skb(skb); + } + } + + if (ap->version >= 2) { + for (i = 0; i < RX_MINI_RING_ENTRIES; i++) { + struct sk_buff *skb = ap->skb->rx_mini_skbuff[i].skb; + + if (skb) { + struct ring_info *ringp; + dma_addr_t mapping; + + ringp = &ap->skb->rx_mini_skbuff[i]; + mapping = dma_unmap_addr(ringp,mapping); + pci_unmap_page(ap->pdev, mapping, + ACE_MINI_BUFSIZE, + PCI_DMA_FROMDEVICE); + + ap->rx_mini_ring[i].size = 0; + ap->skb->rx_mini_skbuff[i].skb = NULL; + dev_kfree_skb(skb); + } + } + } + + for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) { + struct sk_buff *skb = ap->skb->rx_jumbo_skbuff[i].skb; + if (skb) { + struct ring_info *ringp; + dma_addr_t mapping; + + ringp = &ap->skb->rx_jumbo_skbuff[i]; + mapping = dma_unmap_addr(ringp, mapping); + pci_unmap_page(ap->pdev, mapping, + ACE_JUMBO_BUFSIZE, + PCI_DMA_FROMDEVICE); + + ap->rx_jumbo_ring[i].size = 0; + ap->skb->rx_jumbo_skbuff[i].skb = NULL; + dev_kfree_skb(skb); + } + } + + ace_init_cleanup(dev); + free_netdev(dev); +} + +static struct pci_driver acenic_pci_driver = { + .name = "acenic", + .id_table = acenic_pci_tbl, + .probe = acenic_probe_one, + .remove = __devexit_p(acenic_remove_one), +}; + +static int __init acenic_init(void) +{ + return pci_register_driver(&acenic_pci_driver); +} + +static void __exit acenic_exit(void) +{ + pci_unregister_driver(&acenic_pci_driver); +} + +module_init(acenic_init); +module_exit(acenic_exit); + +static void ace_free_descriptors(struct net_device *dev) +{ + struct ace_private *ap = netdev_priv(dev); + int size; + + if (ap->rx_std_ring != NULL) { + size = (sizeof(struct rx_desc) * + (RX_STD_RING_ENTRIES + + RX_JUMBO_RING_ENTRIES + + RX_MINI_RING_ENTRIES + + RX_RETURN_RING_ENTRIES)); + pci_free_consistent(ap->pdev, size, ap->rx_std_ring, + ap->rx_ring_base_dma); + ap->rx_std_ring = NULL; + ap->rx_jumbo_ring = NULL; + ap->rx_mini_ring = NULL; + ap->rx_return_ring = NULL; + } + if (ap->evt_ring != NULL) { + size = (sizeof(struct event) * EVT_RING_ENTRIES); + pci_free_consistent(ap->pdev, size, ap->evt_ring, + ap->evt_ring_dma); + ap->evt_ring = NULL; + } + if (ap->tx_ring != NULL && !ACE_IS_TIGON_I(ap)) { + size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES); + pci_free_consistent(ap->pdev, size, ap->tx_ring, + ap->tx_ring_dma); + } + ap->tx_ring = NULL; + + if (ap->evt_prd != NULL) { + pci_free_consistent(ap->pdev, sizeof(u32), + (void *)ap->evt_prd, ap->evt_prd_dma); + ap->evt_prd = NULL; + } + if (ap->rx_ret_prd != NULL) { + pci_free_consistent(ap->pdev, sizeof(u32), + (void *)ap->rx_ret_prd, + ap->rx_ret_prd_dma); + ap->rx_ret_prd = NULL; + } + if (ap->tx_csm != NULL) { + pci_free_consistent(ap->pdev, sizeof(u32), + (void *)ap->tx_csm, ap->tx_csm_dma); + ap->tx_csm = NULL; + } +} + + +static int ace_allocate_descriptors(struct net_device *dev) +{ + struct ace_private *ap = netdev_priv(dev); + int size; + + size = (sizeof(struct rx_desc) * + (RX_STD_RING_ENTRIES + + RX_JUMBO_RING_ENTRIES + + RX_MINI_RING_ENTRIES + + RX_RETURN_RING_ENTRIES)); + + ap->rx_std_ring = pci_alloc_consistent(ap->pdev, size, + &ap->rx_ring_base_dma); + if (ap->rx_std_ring == NULL) + goto fail; + + ap->rx_jumbo_ring = ap->rx_std_ring + RX_STD_RING_ENTRIES; + ap->rx_mini_ring = ap->rx_jumbo_ring + RX_JUMBO_RING_ENTRIES; + ap->rx_return_ring = ap->rx_mini_ring + RX_MINI_RING_ENTRIES; + + size = (sizeof(struct event) * EVT_RING_ENTRIES); + + ap->evt_ring = pci_alloc_consistent(ap->pdev, size, &ap->evt_ring_dma); + + if (ap->evt_ring == NULL) + goto fail; + + /* + * Only allocate a host TX ring for the Tigon II, the Tigon I + * has to use PCI registers for this ;-( + */ + if (!ACE_IS_TIGON_I(ap)) { + size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES); + + ap->tx_ring = pci_alloc_consistent(ap->pdev, size, + &ap->tx_ring_dma); + + if (ap->tx_ring == NULL) + goto fail; + } + + ap->evt_prd = pci_alloc_consistent(ap->pdev, sizeof(u32), + &ap->evt_prd_dma); + if (ap->evt_prd == NULL) + goto fail; + + ap->rx_ret_prd = pci_alloc_consistent(ap->pdev, sizeof(u32), + &ap->rx_ret_prd_dma); + if (ap->rx_ret_prd == NULL) + goto fail; + + ap->tx_csm = pci_alloc_consistent(ap->pdev, sizeof(u32), + &ap->tx_csm_dma); + if (ap->tx_csm == NULL) + goto fail; + + return 0; + +fail: + /* Clean up. */ + ace_init_cleanup(dev); + return 1; +} + + +/* + * Generic cleanup handling data allocated during init. Used when the + * module is unloaded or if an error occurs during initialization + */ +static void ace_init_cleanup(struct net_device *dev) +{ + struct ace_private *ap; + + ap = netdev_priv(dev); + + ace_free_descriptors(dev); + + if (ap->info) + pci_free_consistent(ap->pdev, sizeof(struct ace_info), + ap->info, ap->info_dma); + kfree(ap->skb); + kfree(ap->trace_buf); + + if (dev->irq) + free_irq(dev->irq, dev); + + iounmap(ap->regs); +} + + +/* + * Commands are considered to be slow. + */ +static inline void ace_issue_cmd(struct ace_regs __iomem *regs, struct cmd *cmd) +{ + u32 idx; + + idx = readl(®s->CmdPrd); + + writel(*(u32 *)(cmd), ®s->CmdRng[idx]); + idx = (idx + 1) % CMD_RING_ENTRIES; + + writel(idx, ®s->CmdPrd); +} + + +static int __devinit ace_init(struct net_device *dev) +{ + struct ace_private *ap; + struct ace_regs __iomem *regs; + struct ace_info *info = NULL; + struct pci_dev *pdev; + unsigned long myjif; + u64 tmp_ptr; + u32 tig_ver, mac1, mac2, tmp, pci_state; + int board_idx, ecode = 0; + short i; + unsigned char cache_size; + + ap = netdev_priv(dev); + regs = ap->regs; + + board_idx = ap->board_idx; + + /* + * aman@sgi.com - its useful to do a NIC reset here to + * address the `Firmware not running' problem subsequent + * to any crashes involving the NIC + */ + writel(HW_RESET | (HW_RESET << 24), ®s->HostCtrl); + readl(®s->HostCtrl); /* PCI write posting */ + udelay(5); + + /* + * Don't access any other registers before this point! + */ +#ifdef __BIG_ENDIAN + /* + * This will most likely need BYTE_SWAP once we switch + * to using __raw_writel() + */ + writel((WORD_SWAP | CLR_INT | ((WORD_SWAP | CLR_INT) << 24)), + ®s->HostCtrl); +#else + writel((CLR_INT | WORD_SWAP | ((CLR_INT | WORD_SWAP) << 24)), + ®s->HostCtrl); +#endif + readl(®s->HostCtrl); /* PCI write posting */ + + /* + * Stop the NIC CPU and clear pending interrupts + */ + writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl); + readl(®s->CpuCtrl); /* PCI write posting */ + writel(0, ®s->Mb0Lo); + + tig_ver = readl(®s->HostCtrl) >> 28; + + switch(tig_ver){ +#ifndef CONFIG_ACENIC_OMIT_TIGON_I + case 4: + case 5: + printk(KERN_INFO " Tigon I (Rev. %i), Firmware: %i.%i.%i, ", + tig_ver, ap->firmware_major, ap->firmware_minor, + ap->firmware_fix); + writel(0, ®s->LocalCtrl); + ap->version = 1; + ap->tx_ring_entries = TIGON_I_TX_RING_ENTRIES; + break; +#endif + case 6: + printk(KERN_INFO " Tigon II (Rev. %i), Firmware: %i.%i.%i, ", + tig_ver, ap->firmware_major, ap->firmware_minor, + ap->firmware_fix); + writel(readl(®s->CpuBCtrl) | CPU_HALT, ®s->CpuBCtrl); + readl(®s->CpuBCtrl); /* PCI write posting */ + /* + * The SRAM bank size does _not_ indicate the amount + * of memory on the card, it controls the _bank_ size! + * Ie. a 1MB AceNIC will have two banks of 512KB. + */ + writel(SRAM_BANK_512K, ®s->LocalCtrl); + writel(SYNC_SRAM_TIMING, ®s->MiscCfg); + ap->version = 2; + ap->tx_ring_entries = MAX_TX_RING_ENTRIES; + break; + default: + printk(KERN_WARNING " Unsupported Tigon version detected " + "(%i)\n", tig_ver); + ecode = -ENODEV; + goto init_error; + } + + /* + * ModeStat _must_ be set after the SRAM settings as this change + * seems to corrupt the ModeStat and possible other registers. + * The SRAM settings survive resets and setting it to the same + * value a second time works as well. This is what caused the + * `Firmware not running' problem on the Tigon II. + */ +#ifdef __BIG_ENDIAN + writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL | ACE_BYTE_SWAP_BD | + ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, ®s->ModeStat); +#else + writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL | + ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, ®s->ModeStat); +#endif + readl(®s->ModeStat); /* PCI write posting */ + + mac1 = 0; + for(i = 0; i < 4; i++) { + int t; + + mac1 = mac1 << 8; + t = read_eeprom_byte(dev, 0x8c+i); + if (t < 0) { + ecode = -EIO; + goto init_error; + } else + mac1 |= (t & 0xff); + } + mac2 = 0; + for(i = 4; i < 8; i++) { + int t; + + mac2 = mac2 << 8; + t = read_eeprom_byte(dev, 0x8c+i); + if (t < 0) { + ecode = -EIO; + goto init_error; + } else + mac2 |= (t & 0xff); + } + + writel(mac1, ®s->MacAddrHi); + writel(mac2, ®s->MacAddrLo); + + dev->dev_addr[0] = (mac1 >> 8) & 0xff; + dev->dev_addr[1] = mac1 & 0xff; + dev->dev_addr[2] = (mac2 >> 24) & 0xff; + dev->dev_addr[3] = (mac2 >> 16) & 0xff; + dev->dev_addr[4] = (mac2 >> 8) & 0xff; + dev->dev_addr[5] = mac2 & 0xff; + + printk("MAC: %pM\n", dev->dev_addr); + + /* + * Looks like this is necessary to deal with on all architectures, + * even this %$#%$# N440BX Intel based thing doesn't get it right. + * Ie. having two NICs in the machine, one will have the cache + * line set at boot time, the other will not. + */ + pdev = ap->pdev; + pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_size); + cache_size <<= 2; + if (cache_size != SMP_CACHE_BYTES) { + printk(KERN_INFO " PCI cache line size set incorrectly " + "(%i bytes) by BIOS/FW, ", cache_size); + if (cache_size > SMP_CACHE_BYTES) + printk("expecting %i\n", SMP_CACHE_BYTES); + else { + printk("correcting to %i\n", SMP_CACHE_BYTES); + pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, + SMP_CACHE_BYTES >> 2); + } + } + + pci_state = readl(®s->PciState); + printk(KERN_INFO " PCI bus width: %i bits, speed: %iMHz, " + "latency: %i clks\n", + (pci_state & PCI_32BIT) ? 32 : 64, + (pci_state & PCI_66MHZ) ? 66 : 33, + ap->pci_latency); + + /* + * Set the max DMA transfer size. Seems that for most systems + * the performance is better when no MAX parameter is + * set. However for systems enabling PCI write and invalidate, + * DMA writes must be set to the L1 cache line size to get + * optimal performance. + * + * The default is now to turn the PCI write and invalidate off + * - that is what Alteon does for NT. + */ + tmp = READ_CMD_MEM | WRITE_CMD_MEM; + if (ap->version >= 2) { + tmp |= (MEM_READ_MULTIPLE | (pci_state & PCI_66MHZ)); + /* + * Tuning parameters only supported for 8 cards + */ + if (board_idx == BOARD_IDX_OVERFLOW || + dis_pci_mem_inval[board_idx]) { + if (ap->pci_command & PCI_COMMAND_INVALIDATE) { + ap->pci_command &= ~PCI_COMMAND_INVALIDATE; + pci_write_config_word(pdev, PCI_COMMAND, + ap->pci_command); + printk(KERN_INFO " Disabling PCI memory " + "write and invalidate\n"); + } + } else if (ap->pci_command & PCI_COMMAND_INVALIDATE) { + printk(KERN_INFO " PCI memory write & invalidate " + "enabled by BIOS, enabling counter measures\n"); + + switch(SMP_CACHE_BYTES) { + case 16: + tmp |= DMA_WRITE_MAX_16; + break; + case 32: + tmp |= DMA_WRITE_MAX_32; + break; + case 64: + tmp |= DMA_WRITE_MAX_64; + break; + case 128: + tmp |= DMA_WRITE_MAX_128; + break; + default: + printk(KERN_INFO " Cache line size %i not " + "supported, PCI write and invalidate " + "disabled\n", SMP_CACHE_BYTES); + ap->pci_command &= ~PCI_COMMAND_INVALIDATE; + pci_write_config_word(pdev, PCI_COMMAND, + ap->pci_command); + } + } + } + +#ifdef __sparc__ + /* + * On this platform, we know what the best dma settings + * are. We use 64-byte maximum bursts, because if we + * burst larger than the cache line size (or even cross + * a 64byte boundary in a single burst) the UltraSparc + * PCI controller will disconnect at 64-byte multiples. + * + * Read-multiple will be properly enabled above, and when + * set will give the PCI controller proper hints about + * prefetching. + */ + tmp &= ~DMA_READ_WRITE_MASK; + tmp |= DMA_READ_MAX_64; + tmp |= DMA_WRITE_MAX_64; +#endif +#ifdef __alpha__ + tmp &= ~DMA_READ_WRITE_MASK; + tmp |= DMA_READ_MAX_128; + /* + * All the docs say MUST NOT. Well, I did. + * Nothing terrible happens, if we load wrong size. + * Bit w&i still works better! + */ + tmp |= DMA_WRITE_MAX_128; +#endif + writel(tmp, ®s->PciState); + +#if 0 + /* + * The Host PCI bus controller driver has to set FBB. + * If all devices on that PCI bus support FBB, then the controller + * can enable FBB support in the Host PCI Bus controller (or on + * the PCI-PCI bridge if that applies). + * -ggg + */ + /* + * I have received reports from people having problems when this + * bit is enabled. + */ + if (!(ap->pci_command & PCI_COMMAND_FAST_BACK)) { + printk(KERN_INFO " Enabling PCI Fast Back to Back\n"); + ap->pci_command |= PCI_COMMAND_FAST_BACK; + pci_write_config_word(pdev, PCI_COMMAND, ap->pci_command); + } +#endif + + /* + * Configure DMA attributes. + */ + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + ap->pci_using_dac = 1; + } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { + ap->pci_using_dac = 0; + } else { + ecode = -ENODEV; + goto init_error; + } + + /* + * Initialize the generic info block and the command+event rings + * and the control blocks for the transmit and receive rings + * as they need to be setup once and for all. + */ + if (!(info = pci_alloc_consistent(ap->pdev, sizeof(struct ace_info), + &ap->info_dma))) { + ecode = -EAGAIN; + goto init_error; + } + ap->info = info; + + /* + * Get the memory for the skb rings. + */ + if (!(ap->skb = kmalloc(sizeof(struct ace_skb), GFP_KERNEL))) { + ecode = -EAGAIN; + goto init_error; + } + + ecode = request_irq(pdev->irq, ace_interrupt, IRQF_SHARED, + DRV_NAME, dev); + if (ecode) { + printk(KERN_WARNING "%s: Requested IRQ %d is busy\n", + DRV_NAME, pdev->irq); + goto init_error; + } else + dev->irq = pdev->irq; + +#ifdef INDEX_DEBUG + spin_lock_init(&ap->debug_lock); + ap->last_tx = ACE_TX_RING_ENTRIES(ap) - 1; + ap->last_std_rx = 0; + ap->last_mini_rx = 0; +#endif + + memset(ap->info, 0, sizeof(struct ace_info)); + memset(ap->skb, 0, sizeof(struct ace_skb)); + + ecode = ace_load_firmware(dev); + if (ecode) + goto init_error; + + ap->fw_running = 0; + + tmp_ptr = ap->info_dma; + writel(tmp_ptr >> 32, ®s->InfoPtrHi); + writel(tmp_ptr & 0xffffffff, ®s->InfoPtrLo); + + memset(ap->evt_ring, 0, EVT_RING_ENTRIES * sizeof(struct event)); + + set_aceaddr(&info->evt_ctrl.rngptr, ap->evt_ring_dma); + info->evt_ctrl.flags = 0; + + *(ap->evt_prd) = 0; + wmb(); + set_aceaddr(&info->evt_prd_ptr, ap->evt_prd_dma); + writel(0, ®s->EvtCsm); + + set_aceaddr(&info->cmd_ctrl.rngptr, 0x100); + info->cmd_ctrl.flags = 0; + info->cmd_ctrl.max_len = 0; + + for (i = 0; i < CMD_RING_ENTRIES; i++) + writel(0, ®s->CmdRng[i]); + + writel(0, ®s->CmdPrd); + writel(0, ®s->CmdCsm); + + tmp_ptr = ap->info_dma; + tmp_ptr += (unsigned long) &(((struct ace_info *)0)->s.stats); + set_aceaddr(&info->stats2_ptr, (dma_addr_t) tmp_ptr); + + set_aceaddr(&info->rx_std_ctrl.rngptr, ap->rx_ring_base_dma); + info->rx_std_ctrl.max_len = ACE_STD_BUFSIZE; + info->rx_std_ctrl.flags = + RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST; + + memset(ap->rx_std_ring, 0, + RX_STD_RING_ENTRIES * sizeof(struct rx_desc)); + + for (i = 0; i < RX_STD_RING_ENTRIES; i++) + ap->rx_std_ring[i].flags = BD_FLG_TCP_UDP_SUM; + + ap->rx_std_skbprd = 0; + atomic_set(&ap->cur_rx_bufs, 0); + + set_aceaddr(&info->rx_jumbo_ctrl.rngptr, + (ap->rx_ring_base_dma + + (sizeof(struct rx_desc) * RX_STD_RING_ENTRIES))); + info->rx_jumbo_ctrl.max_len = 0; + info->rx_jumbo_ctrl.flags = + RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST; + + memset(ap->rx_jumbo_ring, 0, + RX_JUMBO_RING_ENTRIES * sizeof(struct rx_desc)); + + for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) + ap->rx_jumbo_ring[i].flags = BD_FLG_TCP_UDP_SUM | BD_FLG_JUMBO; + + ap->rx_jumbo_skbprd = 0; + atomic_set(&ap->cur_jumbo_bufs, 0); + + memset(ap->rx_mini_ring, 0, + RX_MINI_RING_ENTRIES * sizeof(struct rx_desc)); + + if (ap->version >= 2) { + set_aceaddr(&info->rx_mini_ctrl.rngptr, + (ap->rx_ring_base_dma + + (sizeof(struct rx_desc) * + (RX_STD_RING_ENTRIES + + RX_JUMBO_RING_ENTRIES)))); + info->rx_mini_ctrl.max_len = ACE_MINI_SIZE; + info->rx_mini_ctrl.flags = + RCB_FLG_TCP_UDP_SUM|RCB_FLG_NO_PSEUDO_HDR|RCB_FLG_VLAN_ASSIST; + + for (i = 0; i < RX_MINI_RING_ENTRIES; i++) + ap->rx_mini_ring[i].flags = + BD_FLG_TCP_UDP_SUM | BD_FLG_MINI; + } else { + set_aceaddr(&info->rx_mini_ctrl.rngptr, 0); + info->rx_mini_ctrl.flags = RCB_FLG_RNG_DISABLE; + info->rx_mini_ctrl.max_len = 0; + } + + ap->rx_mini_skbprd = 0; + atomic_set(&ap->cur_mini_bufs, 0); + + set_aceaddr(&info->rx_return_ctrl.rngptr, + (ap->rx_ring_base_dma + + (sizeof(struct rx_desc) * + (RX_STD_RING_ENTRIES + + RX_JUMBO_RING_ENTRIES + + RX_MINI_RING_ENTRIES)))); + info->rx_return_ctrl.flags = 0; + info->rx_return_ctrl.max_len = RX_RETURN_RING_ENTRIES; + + memset(ap->rx_return_ring, 0, + RX_RETURN_RING_ENTRIES * sizeof(struct rx_desc)); + + set_aceaddr(&info->rx_ret_prd_ptr, ap->rx_ret_prd_dma); + *(ap->rx_ret_prd) = 0; + + writel(TX_RING_BASE, ®s->WinBase); + + if (ACE_IS_TIGON_I(ap)) { + ap->tx_ring = (__force struct tx_desc *) regs->Window; + for (i = 0; i < (TIGON_I_TX_RING_ENTRIES + * sizeof(struct tx_desc)) / sizeof(u32); i++) + writel(0, (__force void __iomem *)ap->tx_ring + i * 4); + + set_aceaddr(&info->tx_ctrl.rngptr, TX_RING_BASE); + } else { + memset(ap->tx_ring, 0, + MAX_TX_RING_ENTRIES * sizeof(struct tx_desc)); + + set_aceaddr(&info->tx_ctrl.rngptr, ap->tx_ring_dma); + } + + info->tx_ctrl.max_len = ACE_TX_RING_ENTRIES(ap); + tmp = RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST; + + /* + * The Tigon I does not like having the TX ring in host memory ;-( + */ + if (!ACE_IS_TIGON_I(ap)) + tmp |= RCB_FLG_TX_HOST_RING; +#if TX_COAL_INTS_ONLY + tmp |= RCB_FLG_COAL_INT_ONLY; +#endif + info->tx_ctrl.flags = tmp; + + set_aceaddr(&info->tx_csm_ptr, ap->tx_csm_dma); + + /* + * Potential item for tuning parameter + */ +#if 0 /* NO */ + writel(DMA_THRESH_16W, ®s->DmaReadCfg); + writel(DMA_THRESH_16W, ®s->DmaWriteCfg); +#else + writel(DMA_THRESH_8W, ®s->DmaReadCfg); + writel(DMA_THRESH_8W, ®s->DmaWriteCfg); +#endif + + writel(0, ®s->MaskInt); + writel(1, ®s->IfIdx); +#if 0 + /* + * McKinley boxes do not like us fiddling with AssistState + * this early + */ + writel(1, ®s->AssistState); +#endif + + writel(DEF_STAT, ®s->TuneStatTicks); + writel(DEF_TRACE, ®s->TuneTrace); + + ace_set_rxtx_parms(dev, 0); + + if (board_idx == BOARD_IDX_OVERFLOW) { + printk(KERN_WARNING "%s: more than %i NICs detected, " + "ignoring module parameters!\n", + ap->name, ACE_MAX_MOD_PARMS); + } else if (board_idx >= 0) { + if (tx_coal_tick[board_idx]) + writel(tx_coal_tick[board_idx], + ®s->TuneTxCoalTicks); + if (max_tx_desc[board_idx]) + writel(max_tx_desc[board_idx], ®s->TuneMaxTxDesc); + + if (rx_coal_tick[board_idx]) + writel(rx_coal_tick[board_idx], + ®s->TuneRxCoalTicks); + if (max_rx_desc[board_idx]) + writel(max_rx_desc[board_idx], ®s->TuneMaxRxDesc); + + if (trace[board_idx]) + writel(trace[board_idx], ®s->TuneTrace); + + if ((tx_ratio[board_idx] > 0) && (tx_ratio[board_idx] < 64)) + writel(tx_ratio[board_idx], ®s->TxBufRat); + } + + /* + * Default link parameters + */ + tmp = LNK_ENABLE | LNK_FULL_DUPLEX | LNK_1000MB | LNK_100MB | + LNK_10MB | LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL | LNK_NEGOTIATE; + if(ap->version >= 2) + tmp |= LNK_TX_FLOW_CTL_Y; + + /* + * Override link default parameters + */ + if ((board_idx >= 0) && link_state[board_idx]) { + int option = link_state[board_idx]; + + tmp = LNK_ENABLE; + + if (option & 0x01) { + printk(KERN_INFO "%s: Setting half duplex link\n", + ap->name); + tmp &= ~LNK_FULL_DUPLEX; + } + if (option & 0x02) + tmp &= ~LNK_NEGOTIATE; + if (option & 0x10) + tmp |= LNK_10MB; + if (option & 0x20) + tmp |= LNK_100MB; + if (option & 0x40) + tmp |= LNK_1000MB; + if ((option & 0x70) == 0) { + printk(KERN_WARNING "%s: No media speed specified, " + "forcing auto negotiation\n", ap->name); + tmp |= LNK_NEGOTIATE | LNK_1000MB | + LNK_100MB | LNK_10MB; + } + if ((option & 0x100) == 0) + tmp |= LNK_NEG_FCTL; + else + printk(KERN_INFO "%s: Disabling flow control " + "negotiation\n", ap->name); + if (option & 0x200) + tmp |= LNK_RX_FLOW_CTL_Y; + if ((option & 0x400) && (ap->version >= 2)) { + printk(KERN_INFO "%s: Enabling TX flow control\n", + ap->name); + tmp |= LNK_TX_FLOW_CTL_Y; + } + } + + ap->link = tmp; + writel(tmp, ®s->TuneLink); + if (ap->version >= 2) + writel(tmp, ®s->TuneFastLink); + + writel(ap->firmware_start, ®s->Pc); + + writel(0, ®s->Mb0Lo); + + /* + * Set tx_csm before we start receiving interrupts, otherwise + * the interrupt handler might think it is supposed to process + * tx ints before we are up and running, which may cause a null + * pointer access in the int handler. + */ + ap->cur_rx = 0; + ap->tx_prd = *(ap->tx_csm) = ap->tx_ret_csm = 0; + + wmb(); + ace_set_txprd(regs, ap, 0); + writel(0, ®s->RxRetCsm); + + /* + * Enable DMA engine now. + * If we do this sooner, Mckinley box pukes. + * I assume it's because Tigon II DMA engine wants to check + * *something* even before the CPU is started. + */ + writel(1, ®s->AssistState); /* enable DMA */ + + /* + * Start the NIC CPU + */ + writel(readl(®s->CpuCtrl) & ~(CPU_HALT|CPU_TRACE), ®s->CpuCtrl); + readl(®s->CpuCtrl); + + /* + * Wait for the firmware to spin up - max 3 seconds. + */ + myjif = jiffies + 3 * HZ; + while (time_before(jiffies, myjif) && !ap->fw_running) + cpu_relax(); + + if (!ap->fw_running) { + printk(KERN_ERR "%s: Firmware NOT running!\n", ap->name); + + ace_dump_trace(ap); + writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl); + readl(®s->CpuCtrl); + + /* aman@sgi.com - account for badly behaving firmware/NIC: + * - have observed that the NIC may continue to generate + * interrupts for some reason; attempt to stop it - halt + * second CPU for Tigon II cards, and also clear Mb0 + * - if we're a module, we'll fail to load if this was + * the only GbE card in the system => if the kernel does + * see an interrupt from the NIC, code to handle it is + * gone and OOps! - so free_irq also + */ + if (ap->version >= 2) + writel(readl(®s->CpuBCtrl) | CPU_HALT, + ®s->CpuBCtrl); + writel(0, ®s->Mb0Lo); + readl(®s->Mb0Lo); + + ecode = -EBUSY; + goto init_error; + } + + /* + * We load the ring here as there seem to be no way to tell the + * firmware to wipe the ring without re-initializing it. + */ + if (!test_and_set_bit(0, &ap->std_refill_busy)) + ace_load_std_rx_ring(dev, RX_RING_SIZE); + else + printk(KERN_ERR "%s: Someone is busy refilling the RX ring\n", + ap->name); + if (ap->version >= 2) { + if (!test_and_set_bit(0, &ap->mini_refill_busy)) + ace_load_mini_rx_ring(dev, RX_MINI_SIZE); + else + printk(KERN_ERR "%s: Someone is busy refilling " + "the RX mini ring\n", ap->name); + } + return 0; + + init_error: + ace_init_cleanup(dev); + return ecode; +} + + +static void ace_set_rxtx_parms(struct net_device *dev, int jumbo) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + int board_idx = ap->board_idx; + + if (board_idx >= 0) { + if (!jumbo) { + if (!tx_coal_tick[board_idx]) + writel(DEF_TX_COAL, ®s->TuneTxCoalTicks); + if (!max_tx_desc[board_idx]) + writel(DEF_TX_MAX_DESC, ®s->TuneMaxTxDesc); + if (!rx_coal_tick[board_idx]) + writel(DEF_RX_COAL, ®s->TuneRxCoalTicks); + if (!max_rx_desc[board_idx]) + writel(DEF_RX_MAX_DESC, ®s->TuneMaxRxDesc); + if (!tx_ratio[board_idx]) + writel(DEF_TX_RATIO, ®s->TxBufRat); + } else { + if (!tx_coal_tick[board_idx]) + writel(DEF_JUMBO_TX_COAL, + ®s->TuneTxCoalTicks); + if (!max_tx_desc[board_idx]) + writel(DEF_JUMBO_TX_MAX_DESC, + ®s->TuneMaxTxDesc); + if (!rx_coal_tick[board_idx]) + writel(DEF_JUMBO_RX_COAL, + ®s->TuneRxCoalTicks); + if (!max_rx_desc[board_idx]) + writel(DEF_JUMBO_RX_MAX_DESC, + ®s->TuneMaxRxDesc); + if (!tx_ratio[board_idx]) + writel(DEF_JUMBO_TX_RATIO, ®s->TxBufRat); + } + } +} + + +static void ace_watchdog(struct net_device *data) +{ + struct net_device *dev = data; + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + + /* + * We haven't received a stats update event for more than 2.5 + * seconds and there is data in the transmit queue, thus we + * assume the card is stuck. + */ + if (*ap->tx_csm != ap->tx_ret_csm) { + printk(KERN_WARNING "%s: Transmitter is stuck, %08x\n", + dev->name, (unsigned int)readl(®s->HostCtrl)); + /* This can happen due to ieee flow control. */ + } else { + printk(KERN_DEBUG "%s: BUG... transmitter died. Kicking it.\n", + dev->name); +#if 0 + netif_wake_queue(dev); +#endif + } +} + + +static void ace_tasklet(unsigned long arg) +{ + struct net_device *dev = (struct net_device *) arg; + struct ace_private *ap = netdev_priv(dev); + int cur_size; + + cur_size = atomic_read(&ap->cur_rx_bufs); + if ((cur_size < RX_LOW_STD_THRES) && + !test_and_set_bit(0, &ap->std_refill_busy)) { +#ifdef DEBUG + printk("refilling buffers (current %i)\n", cur_size); +#endif + ace_load_std_rx_ring(dev, RX_RING_SIZE - cur_size); + } + + if (ap->version >= 2) { + cur_size = atomic_read(&ap->cur_mini_bufs); + if ((cur_size < RX_LOW_MINI_THRES) && + !test_and_set_bit(0, &ap->mini_refill_busy)) { +#ifdef DEBUG + printk("refilling mini buffers (current %i)\n", + cur_size); +#endif + ace_load_mini_rx_ring(dev, RX_MINI_SIZE - cur_size); + } + } + + cur_size = atomic_read(&ap->cur_jumbo_bufs); + if (ap->jumbo && (cur_size < RX_LOW_JUMBO_THRES) && + !test_and_set_bit(0, &ap->jumbo_refill_busy)) { +#ifdef DEBUG + printk("refilling jumbo buffers (current %i)\n", cur_size); +#endif + ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE - cur_size); + } + ap->tasklet_pending = 0; +} + + +/* + * Copy the contents of the NIC's trace buffer to kernel memory. + */ +static void ace_dump_trace(struct ace_private *ap) +{ +#if 0 + if (!ap->trace_buf) + if (!(ap->trace_buf = kmalloc(ACE_TRACE_SIZE, GFP_KERNEL))) + return; +#endif +} + + +/* + * Load the standard rx ring. + * + * Loading rings is safe without holding the spin lock since this is + * done only before the device is enabled, thus no interrupts are + * generated and by the interrupt handler/tasklet handler. + */ +static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + short i, idx; + + + prefetchw(&ap->cur_rx_bufs); + + idx = ap->rx_std_skbprd; + + for (i = 0; i < nr_bufs; i++) { + struct sk_buff *skb; + struct rx_desc *rd; + dma_addr_t mapping; + + skb = netdev_alloc_skb_ip_align(dev, ACE_STD_BUFSIZE); + if (!skb) + break; + + mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), + offset_in_page(skb->data), + ACE_STD_BUFSIZE, + PCI_DMA_FROMDEVICE); + ap->skb->rx_std_skbuff[idx].skb = skb; + dma_unmap_addr_set(&ap->skb->rx_std_skbuff[idx], + mapping, mapping); + + rd = &ap->rx_std_ring[idx]; + set_aceaddr(&rd->addr, mapping); + rd->size = ACE_STD_BUFSIZE; + rd->idx = idx; + idx = (idx + 1) % RX_STD_RING_ENTRIES; + } + + if (!i) + goto error_out; + + atomic_add(i, &ap->cur_rx_bufs); + ap->rx_std_skbprd = idx; + + if (ACE_IS_TIGON_I(ap)) { + struct cmd cmd; + cmd.evt = C_SET_RX_PRD_IDX; + cmd.code = 0; + cmd.idx = ap->rx_std_skbprd; + ace_issue_cmd(regs, &cmd); + } else { + writel(idx, ®s->RxStdPrd); + wmb(); + } + + out: + clear_bit(0, &ap->std_refill_busy); + return; + + error_out: + printk(KERN_INFO "Out of memory when allocating " + "standard receive buffers\n"); + goto out; +} + + +static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + short i, idx; + + prefetchw(&ap->cur_mini_bufs); + + idx = ap->rx_mini_skbprd; + for (i = 0; i < nr_bufs; i++) { + struct sk_buff *skb; + struct rx_desc *rd; + dma_addr_t mapping; + + skb = netdev_alloc_skb_ip_align(dev, ACE_MINI_BUFSIZE); + if (!skb) + break; + + mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), + offset_in_page(skb->data), + ACE_MINI_BUFSIZE, + PCI_DMA_FROMDEVICE); + ap->skb->rx_mini_skbuff[idx].skb = skb; + dma_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx], + mapping, mapping); + + rd = &ap->rx_mini_ring[idx]; + set_aceaddr(&rd->addr, mapping); + rd->size = ACE_MINI_BUFSIZE; + rd->idx = idx; + idx = (idx + 1) % RX_MINI_RING_ENTRIES; + } + + if (!i) + goto error_out; + + atomic_add(i, &ap->cur_mini_bufs); + + ap->rx_mini_skbprd = idx; + + writel(idx, ®s->RxMiniPrd); + wmb(); + + out: + clear_bit(0, &ap->mini_refill_busy); + return; + error_out: + printk(KERN_INFO "Out of memory when allocating " + "mini receive buffers\n"); + goto out; +} + + +/* + * Load the jumbo rx ring, this may happen at any time if the MTU + * is changed to a value > 1500. + */ +static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + short i, idx; + + idx = ap->rx_jumbo_skbprd; + + for (i = 0; i < nr_bufs; i++) { + struct sk_buff *skb; + struct rx_desc *rd; + dma_addr_t mapping; + + skb = netdev_alloc_skb_ip_align(dev, ACE_JUMBO_BUFSIZE); + if (!skb) + break; + + mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), + offset_in_page(skb->data), + ACE_JUMBO_BUFSIZE, + PCI_DMA_FROMDEVICE); + ap->skb->rx_jumbo_skbuff[idx].skb = skb; + dma_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx], + mapping, mapping); + + rd = &ap->rx_jumbo_ring[idx]; + set_aceaddr(&rd->addr, mapping); + rd->size = ACE_JUMBO_BUFSIZE; + rd->idx = idx; + idx = (idx + 1) % RX_JUMBO_RING_ENTRIES; + } + + if (!i) + goto error_out; + + atomic_add(i, &ap->cur_jumbo_bufs); + ap->rx_jumbo_skbprd = idx; + + if (ACE_IS_TIGON_I(ap)) { + struct cmd cmd; + cmd.evt = C_SET_RX_JUMBO_PRD_IDX; + cmd.code = 0; + cmd.idx = ap->rx_jumbo_skbprd; + ace_issue_cmd(regs, &cmd); + } else { + writel(idx, ®s->RxJumboPrd); + wmb(); + } + + out: + clear_bit(0, &ap->jumbo_refill_busy); + return; + error_out: + if (net_ratelimit()) + printk(KERN_INFO "Out of memory when allocating " + "jumbo receive buffers\n"); + goto out; +} + + +/* + * All events are considered to be slow (RX/TX ints do not generate + * events) and are handled here, outside the main interrupt handler, + * to reduce the size of the handler. + */ +static u32 ace_handle_event(struct net_device *dev, u32 evtcsm, u32 evtprd) +{ + struct ace_private *ap; + + ap = netdev_priv(dev); + + while (evtcsm != evtprd) { + switch (ap->evt_ring[evtcsm].evt) { + case E_FW_RUNNING: + printk(KERN_INFO "%s: Firmware up and running\n", + ap->name); + ap->fw_running = 1; + wmb(); + break; + case E_STATS_UPDATED: + break; + case E_LNK_STATE: + { + u16 code = ap->evt_ring[evtcsm].code; + switch (code) { + case E_C_LINK_UP: + { + u32 state = readl(&ap->regs->GigLnkState); + printk(KERN_WARNING "%s: Optical link UP " + "(%s Duplex, Flow Control: %s%s)\n", + ap->name, + state & LNK_FULL_DUPLEX ? "Full":"Half", + state & LNK_TX_FLOW_CTL_Y ? "TX " : "", + state & LNK_RX_FLOW_CTL_Y ? "RX" : ""); + break; + } + case E_C_LINK_DOWN: + printk(KERN_WARNING "%s: Optical link DOWN\n", + ap->name); + break; + case E_C_LINK_10_100: + printk(KERN_WARNING "%s: 10/100BaseT link " + "UP\n", ap->name); + break; + default: + printk(KERN_ERR "%s: Unknown optical link " + "state %02x\n", ap->name, code); + } + break; + } + case E_ERROR: + switch(ap->evt_ring[evtcsm].code) { + case E_C_ERR_INVAL_CMD: + printk(KERN_ERR "%s: invalid command error\n", + ap->name); + break; + case E_C_ERR_UNIMP_CMD: + printk(KERN_ERR "%s: unimplemented command " + "error\n", ap->name); + break; + case E_C_ERR_BAD_CFG: + printk(KERN_ERR "%s: bad config error\n", + ap->name); + break; + default: + printk(KERN_ERR "%s: unknown error %02x\n", + ap->name, ap->evt_ring[evtcsm].code); + } + break; + case E_RESET_JUMBO_RNG: + { + int i; + for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) { + if (ap->skb->rx_jumbo_skbuff[i].skb) { + ap->rx_jumbo_ring[i].size = 0; + set_aceaddr(&ap->rx_jumbo_ring[i].addr, 0); + dev_kfree_skb(ap->skb->rx_jumbo_skbuff[i].skb); + ap->skb->rx_jumbo_skbuff[i].skb = NULL; + } + } + + if (ACE_IS_TIGON_I(ap)) { + struct cmd cmd; + cmd.evt = C_SET_RX_JUMBO_PRD_IDX; + cmd.code = 0; + cmd.idx = 0; + ace_issue_cmd(ap->regs, &cmd); + } else { + writel(0, &((ap->regs)->RxJumboPrd)); + wmb(); + } + + ap->jumbo = 0; + ap->rx_jumbo_skbprd = 0; + printk(KERN_INFO "%s: Jumbo ring flushed\n", + ap->name); + clear_bit(0, &ap->jumbo_refill_busy); + break; + } + default: + printk(KERN_ERR "%s: Unhandled event 0x%02x\n", + ap->name, ap->evt_ring[evtcsm].evt); + } + evtcsm = (evtcsm + 1) % EVT_RING_ENTRIES; + } + + return evtcsm; +} + + +static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm) +{ + struct ace_private *ap = netdev_priv(dev); + u32 idx; + int mini_count = 0, std_count = 0; + + idx = rxretcsm; + + prefetchw(&ap->cur_rx_bufs); + prefetchw(&ap->cur_mini_bufs); + + while (idx != rxretprd) { + struct ring_info *rip; + struct sk_buff *skb; + struct rx_desc *rxdesc, *retdesc; + u32 skbidx; + int bd_flags, desc_type, mapsize; + u16 csum; + + + /* make sure the rx descriptor isn't read before rxretprd */ + if (idx == rxretcsm) + rmb(); + + retdesc = &ap->rx_return_ring[idx]; + skbidx = retdesc->idx; + bd_flags = retdesc->flags; + desc_type = bd_flags & (BD_FLG_JUMBO | BD_FLG_MINI); + + switch(desc_type) { + /* + * Normal frames do not have any flags set + * + * Mini and normal frames arrive frequently, + * so use a local counter to avoid doing + * atomic operations for each packet arriving. + */ + case 0: + rip = &ap->skb->rx_std_skbuff[skbidx]; + mapsize = ACE_STD_BUFSIZE; + rxdesc = &ap->rx_std_ring[skbidx]; + std_count++; + break; + case BD_FLG_JUMBO: + rip = &ap->skb->rx_jumbo_skbuff[skbidx]; + mapsize = ACE_JUMBO_BUFSIZE; + rxdesc = &ap->rx_jumbo_ring[skbidx]; + atomic_dec(&ap->cur_jumbo_bufs); + break; + case BD_FLG_MINI: + rip = &ap->skb->rx_mini_skbuff[skbidx]; + mapsize = ACE_MINI_BUFSIZE; + rxdesc = &ap->rx_mini_ring[skbidx]; + mini_count++; + break; + default: + printk(KERN_INFO "%s: unknown frame type (0x%02x) " + "returned by NIC\n", dev->name, + retdesc->flags); + goto error; + } + + skb = rip->skb; + rip->skb = NULL; + pci_unmap_page(ap->pdev, + dma_unmap_addr(rip, mapping), + mapsize, + PCI_DMA_FROMDEVICE); + skb_put(skb, retdesc->size); + + /* + * Fly baby, fly! + */ + csum = retdesc->tcp_udp_csum; + + skb->protocol = eth_type_trans(skb, dev); + + /* + * Instead of forcing the poor tigon mips cpu to calculate + * pseudo hdr checksum, we do this ourselves. + */ + if (bd_flags & BD_FLG_TCP_UDP_SUM) { + skb->csum = htons(csum); + skb->ip_summed = CHECKSUM_COMPLETE; + } else { + skb_checksum_none_assert(skb); + } + + /* send it up */ + if ((bd_flags & BD_FLG_VLAN_TAG)) + __vlan_hwaccel_put_tag(skb, retdesc->vlan); + netif_rx(skb); + + dev->stats.rx_packets++; + dev->stats.rx_bytes += retdesc->size; + + idx = (idx + 1) % RX_RETURN_RING_ENTRIES; + } + + atomic_sub(std_count, &ap->cur_rx_bufs); + if (!ACE_IS_TIGON_I(ap)) + atomic_sub(mini_count, &ap->cur_mini_bufs); + + out: + /* + * According to the documentation RxRetCsm is obsolete with + * the 12.3.x Firmware - my Tigon I NICs seem to disagree! + */ + if (ACE_IS_TIGON_I(ap)) { + writel(idx, &ap->regs->RxRetCsm); + } + ap->cur_rx = idx; + + return; + error: + idx = rxretprd; + goto out; +} + + +static inline void ace_tx_int(struct net_device *dev, + u32 txcsm, u32 idx) +{ + struct ace_private *ap = netdev_priv(dev); + + do { + struct sk_buff *skb; + struct tx_ring_info *info; + + info = ap->skb->tx_skbuff + idx; + skb = info->skb; + + if (dma_unmap_len(info, maplen)) { + pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping), + dma_unmap_len(info, maplen), + PCI_DMA_TODEVICE); + dma_unmap_len_set(info, maplen, 0); + } + + if (skb) { + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + dev_kfree_skb_irq(skb); + info->skb = NULL; + } + + idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); + } while (idx != txcsm); + + if (netif_queue_stopped(dev)) + netif_wake_queue(dev); + + wmb(); + ap->tx_ret_csm = txcsm; + + /* So... tx_ret_csm is advanced _after_ check for device wakeup. + * + * We could try to make it before. In this case we would get + * the following race condition: hard_start_xmit on other cpu + * enters after we advanced tx_ret_csm and fills space, + * which we have just freed, so that we make illegal device wakeup. + * There is no good way to workaround this (at entry + * to ace_start_xmit detects this condition and prevents + * ring corruption, but it is not a good workaround.) + * + * When tx_ret_csm is advanced after, we wake up device _only_ + * if we really have some space in ring (though the core doing + * hard_start_xmit can see full ring for some period and has to + * synchronize.) Superb. + * BUT! We get another subtle race condition. hard_start_xmit + * may think that ring is full between wakeup and advancing + * tx_ret_csm and will stop device instantly! It is not so bad. + * We are guaranteed that there is something in ring, so that + * the next irq will resume transmission. To speedup this we could + * mark descriptor, which closes ring with BD_FLG_COAL_NOW + * (see ace_start_xmit). + * + * Well, this dilemma exists in all lock-free devices. + * We, following scheme used in drivers by Donald Becker, + * select the least dangerous. + * --ANK + */ +} + + +static irqreturn_t ace_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + u32 idx; + u32 txcsm, rxretcsm, rxretprd; + u32 evtcsm, evtprd; + + /* + * In case of PCI shared interrupts or spurious interrupts, + * we want to make sure it is actually our interrupt before + * spending any time in here. + */ + if (!(readl(®s->HostCtrl) & IN_INT)) + return IRQ_NONE; + + /* + * ACK intr now. Otherwise we will lose updates to rx_ret_prd, + * which happened _after_ rxretprd = *ap->rx_ret_prd; but before + * writel(0, ®s->Mb0Lo). + * + * "IRQ avoidance" recommended in docs applies to IRQs served + * threads and it is wrong even for that case. + */ + writel(0, ®s->Mb0Lo); + readl(®s->Mb0Lo); + + /* + * There is no conflict between transmit handling in + * start_xmit and receive processing, thus there is no reason + * to take a spin lock for RX handling. Wait until we start + * working on the other stuff - hey we don't need a spin lock + * anymore. + */ + rxretprd = *ap->rx_ret_prd; + rxretcsm = ap->cur_rx; + + if (rxretprd != rxretcsm) + ace_rx_int(dev, rxretprd, rxretcsm); + + txcsm = *ap->tx_csm; + idx = ap->tx_ret_csm; + + if (txcsm != idx) { + /* + * If each skb takes only one descriptor this check degenerates + * to identity, because new space has just been opened. + * But if skbs are fragmented we must check that this index + * update releases enough of space, otherwise we just + * wait for device to make more work. + */ + if (!tx_ring_full(ap, txcsm, ap->tx_prd)) + ace_tx_int(dev, txcsm, idx); + } + + evtcsm = readl(®s->EvtCsm); + evtprd = *ap->evt_prd; + + if (evtcsm != evtprd) { + evtcsm = ace_handle_event(dev, evtcsm, evtprd); + writel(evtcsm, ®s->EvtCsm); + } + + /* + * This has to go last in the interrupt handler and run with + * the spin lock released ... what lock? + */ + if (netif_running(dev)) { + int cur_size; + int run_tasklet = 0; + + cur_size = atomic_read(&ap->cur_rx_bufs); + if (cur_size < RX_LOW_STD_THRES) { + if ((cur_size < RX_PANIC_STD_THRES) && + !test_and_set_bit(0, &ap->std_refill_busy)) { +#ifdef DEBUG + printk("low on std buffers %i\n", cur_size); +#endif + ace_load_std_rx_ring(dev, + RX_RING_SIZE - cur_size); + } else + run_tasklet = 1; + } + + if (!ACE_IS_TIGON_I(ap)) { + cur_size = atomic_read(&ap->cur_mini_bufs); + if (cur_size < RX_LOW_MINI_THRES) { + if ((cur_size < RX_PANIC_MINI_THRES) && + !test_and_set_bit(0, + &ap->mini_refill_busy)) { +#ifdef DEBUG + printk("low on mini buffers %i\n", + cur_size); +#endif + ace_load_mini_rx_ring(dev, + RX_MINI_SIZE - cur_size); + } else + run_tasklet = 1; + } + } + + if (ap->jumbo) { + cur_size = atomic_read(&ap->cur_jumbo_bufs); + if (cur_size < RX_LOW_JUMBO_THRES) { + if ((cur_size < RX_PANIC_JUMBO_THRES) && + !test_and_set_bit(0, + &ap->jumbo_refill_busy)){ +#ifdef DEBUG + printk("low on jumbo buffers %i\n", + cur_size); +#endif + ace_load_jumbo_rx_ring(dev, + RX_JUMBO_SIZE - cur_size); + } else + run_tasklet = 1; + } + } + if (run_tasklet && !ap->tasklet_pending) { + ap->tasklet_pending = 1; + tasklet_schedule(&ap->ace_tasklet); + } + } + + return IRQ_HANDLED; +} + +static int ace_open(struct net_device *dev) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + struct cmd cmd; + + if (!(ap->fw_running)) { + printk(KERN_WARNING "%s: Firmware not running!\n", dev->name); + return -EBUSY; + } + + writel(dev->mtu + ETH_HLEN + 4, ®s->IfMtu); + + cmd.evt = C_CLEAR_STATS; + cmd.code = 0; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); + + cmd.evt = C_HOST_STATE; + cmd.code = C_C_STACK_UP; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); + + if (ap->jumbo && + !test_and_set_bit(0, &ap->jumbo_refill_busy)) + ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE); + + if (dev->flags & IFF_PROMISC) { + cmd.evt = C_SET_PROMISC_MODE; + cmd.code = C_C_PROMISC_ENABLE; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); + + ap->promisc = 1; + }else + ap->promisc = 0; + ap->mcast_all = 0; + +#if 0 + cmd.evt = C_LNK_NEGOTIATION; + cmd.code = 0; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); +#endif + + netif_start_queue(dev); + + /* + * Setup the bottom half rx ring refill handler + */ + tasklet_init(&ap->ace_tasklet, ace_tasklet, (unsigned long)dev); + return 0; +} + + +static int ace_close(struct net_device *dev) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + struct cmd cmd; + unsigned long flags; + short i; + + /* + * Without (or before) releasing irq and stopping hardware, this + * is an absolute non-sense, by the way. It will be reset instantly + * by the first irq. + */ + netif_stop_queue(dev); + + + if (ap->promisc) { + cmd.evt = C_SET_PROMISC_MODE; + cmd.code = C_C_PROMISC_DISABLE; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); + ap->promisc = 0; + } + + cmd.evt = C_HOST_STATE; + cmd.code = C_C_STACK_DOWN; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); + + tasklet_kill(&ap->ace_tasklet); + + /* + * Make sure one CPU is not processing packets while + * buffers are being released by another. + */ + + local_irq_save(flags); + ace_mask_irq(dev); + + for (i = 0; i < ACE_TX_RING_ENTRIES(ap); i++) { + struct sk_buff *skb; + struct tx_ring_info *info; + + info = ap->skb->tx_skbuff + i; + skb = info->skb; + + if (dma_unmap_len(info, maplen)) { + if (ACE_IS_TIGON_I(ap)) { + /* NB: TIGON_1 is special, tx_ring is in io space */ + struct tx_desc __iomem *tx; + tx = (__force struct tx_desc __iomem *) &ap->tx_ring[i]; + writel(0, &tx->addr.addrhi); + writel(0, &tx->addr.addrlo); + writel(0, &tx->flagsize); + } else + memset(ap->tx_ring + i, 0, + sizeof(struct tx_desc)); + pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping), + dma_unmap_len(info, maplen), + PCI_DMA_TODEVICE); + dma_unmap_len_set(info, maplen, 0); + } + if (skb) { + dev_kfree_skb(skb); + info->skb = NULL; + } + } + + if (ap->jumbo) { + cmd.evt = C_RESET_JUMBO_RNG; + cmd.code = 0; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); + } + + ace_unmask_irq(dev); + local_irq_restore(flags); + + return 0; +} + + +static inline dma_addr_t +ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb, + struct sk_buff *tail, u32 idx) +{ + dma_addr_t mapping; + struct tx_ring_info *info; + + mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), + offset_in_page(skb->data), + skb->len, PCI_DMA_TODEVICE); + + info = ap->skb->tx_skbuff + idx; + info->skb = tail; + dma_unmap_addr_set(info, mapping, mapping); + dma_unmap_len_set(info, maplen, skb->len); + return mapping; +} + + +static inline void +ace_load_tx_bd(struct ace_private *ap, struct tx_desc *desc, u64 addr, + u32 flagsize, u32 vlan_tag) +{ +#if !USE_TX_COAL_NOW + flagsize &= ~BD_FLG_COAL_NOW; +#endif + + if (ACE_IS_TIGON_I(ap)) { + struct tx_desc __iomem *io = (__force struct tx_desc __iomem *) desc; + writel(addr >> 32, &io->addr.addrhi); + writel(addr & 0xffffffff, &io->addr.addrlo); + writel(flagsize, &io->flagsize); + writel(vlan_tag, &io->vlanres); + } else { + desc->addr.addrhi = addr >> 32; + desc->addr.addrlo = addr; + desc->flagsize = flagsize; + desc->vlanres = vlan_tag; + } +} + + +static netdev_tx_t ace_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + struct tx_desc *desc; + u32 idx, flagsize; + unsigned long maxjiff = jiffies + 3*HZ; + +restart: + idx = ap->tx_prd; + + if (tx_ring_full(ap, ap->tx_ret_csm, idx)) + goto overflow; + + if (!skb_shinfo(skb)->nr_frags) { + dma_addr_t mapping; + u32 vlan_tag = 0; + + mapping = ace_map_tx_skb(ap, skb, skb, idx); + flagsize = (skb->len << 16) | (BD_FLG_END); + if (skb->ip_summed == CHECKSUM_PARTIAL) + flagsize |= BD_FLG_TCP_UDP_SUM; + if (vlan_tx_tag_present(skb)) { + flagsize |= BD_FLG_VLAN_TAG; + vlan_tag = vlan_tx_tag_get(skb); + } + desc = ap->tx_ring + idx; + idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); + + /* Look at ace_tx_int for explanations. */ + if (tx_ring_full(ap, ap->tx_ret_csm, idx)) + flagsize |= BD_FLG_COAL_NOW; + + ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag); + } else { + dma_addr_t mapping; + u32 vlan_tag = 0; + int i, len = 0; + + mapping = ace_map_tx_skb(ap, skb, NULL, idx); + flagsize = (skb_headlen(skb) << 16); + if (skb->ip_summed == CHECKSUM_PARTIAL) + flagsize |= BD_FLG_TCP_UDP_SUM; + if (vlan_tx_tag_present(skb)) { + flagsize |= BD_FLG_VLAN_TAG; + vlan_tag = vlan_tx_tag_get(skb); + } + + ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag); + + idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + struct tx_ring_info *info; + + len += frag->size; + info = ap->skb->tx_skbuff + idx; + desc = ap->tx_ring + idx; + + mapping = pci_map_page(ap->pdev, frag->page, + frag->page_offset, frag->size, + PCI_DMA_TODEVICE); + + flagsize = (frag->size << 16); + if (skb->ip_summed == CHECKSUM_PARTIAL) + flagsize |= BD_FLG_TCP_UDP_SUM; + idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); + + if (i == skb_shinfo(skb)->nr_frags - 1) { + flagsize |= BD_FLG_END; + if (tx_ring_full(ap, ap->tx_ret_csm, idx)) + flagsize |= BD_FLG_COAL_NOW; + + /* + * Only the last fragment frees + * the skb! + */ + info->skb = skb; + } else { + info->skb = NULL; + } + dma_unmap_addr_set(info, mapping, mapping); + dma_unmap_len_set(info, maplen, frag->size); + ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag); + } + } + + wmb(); + ap->tx_prd = idx; + ace_set_txprd(regs, ap, idx); + + if (flagsize & BD_FLG_COAL_NOW) { + netif_stop_queue(dev); + + /* + * A TX-descriptor producer (an IRQ) might have gotten + * between, making the ring free again. Since xmit is + * serialized, this is the only situation we have to + * re-test. + */ + if (!tx_ring_full(ap, ap->tx_ret_csm, idx)) + netif_wake_queue(dev); + } + + return NETDEV_TX_OK; + +overflow: + /* + * This race condition is unavoidable with lock-free drivers. + * We wake up the queue _before_ tx_prd is advanced, so that we can + * enter hard_start_xmit too early, while tx ring still looks closed. + * This happens ~1-4 times per 100000 packets, so that we can allow + * to loop syncing to other CPU. Probably, we need an additional + * wmb() in ace_tx_intr as well. + * + * Note that this race is relieved by reserving one more entry + * in tx ring than it is necessary (see original non-SG driver). + * However, with SG we need to reserve 2*MAX_SKB_FRAGS+1, which + * is already overkill. + * + * Alternative is to return with 1 not throttling queue. In this + * case loop becomes longer, no more useful effects. + */ + if (time_before(jiffies, maxjiff)) { + barrier(); + cpu_relax(); + goto restart; + } + + /* The ring is stuck full. */ + printk(KERN_WARNING "%s: Transmit ring stuck full\n", dev->name); + return NETDEV_TX_BUSY; +} + + +static int ace_change_mtu(struct net_device *dev, int new_mtu) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + + if (new_mtu > ACE_JUMBO_MTU) + return -EINVAL; + + writel(new_mtu + ETH_HLEN + 4, ®s->IfMtu); + dev->mtu = new_mtu; + + if (new_mtu > ACE_STD_MTU) { + if (!(ap->jumbo)) { + printk(KERN_INFO "%s: Enabling Jumbo frame " + "support\n", dev->name); + ap->jumbo = 1; + if (!test_and_set_bit(0, &ap->jumbo_refill_busy)) + ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE); + ace_set_rxtx_parms(dev, 1); + } + } else { + while (test_and_set_bit(0, &ap->jumbo_refill_busy)); + ace_sync_irq(dev->irq); + ace_set_rxtx_parms(dev, 0); + if (ap->jumbo) { + struct cmd cmd; + + cmd.evt = C_RESET_JUMBO_RNG; + cmd.code = 0; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); + } + } + + return 0; +} + +static int ace_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + u32 link; + + memset(ecmd, 0, sizeof(struct ethtool_cmd)); + ecmd->supported = + (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | SUPPORTED_FIBRE); + + ecmd->port = PORT_FIBRE; + ecmd->transceiver = XCVR_INTERNAL; + + link = readl(®s->GigLnkState); + if (link & LNK_1000MB) + ethtool_cmd_speed_set(ecmd, SPEED_1000); + else { + link = readl(®s->FastLnkState); + if (link & LNK_100MB) + ethtool_cmd_speed_set(ecmd, SPEED_100); + else if (link & LNK_10MB) + ethtool_cmd_speed_set(ecmd, SPEED_10); + else + ethtool_cmd_speed_set(ecmd, 0); + } + if (link & LNK_FULL_DUPLEX) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + + if (link & LNK_NEGOTIATE) + ecmd->autoneg = AUTONEG_ENABLE; + else + ecmd->autoneg = AUTONEG_DISABLE; + +#if 0 + /* + * Current struct ethtool_cmd is insufficient + */ + ecmd->trace = readl(®s->TuneTrace); + + ecmd->txcoal = readl(®s->TuneTxCoalTicks); + ecmd->rxcoal = readl(®s->TuneRxCoalTicks); +#endif + ecmd->maxtxpkt = readl(®s->TuneMaxTxDesc); + ecmd->maxrxpkt = readl(®s->TuneMaxRxDesc); + + return 0; +} + +static int ace_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + u32 link, speed; + + link = readl(®s->GigLnkState); + if (link & LNK_1000MB) + speed = SPEED_1000; + else { + link = readl(®s->FastLnkState); + if (link & LNK_100MB) + speed = SPEED_100; + else if (link & LNK_10MB) + speed = SPEED_10; + else + speed = SPEED_100; + } + + link = LNK_ENABLE | LNK_1000MB | LNK_100MB | LNK_10MB | + LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL; + if (!ACE_IS_TIGON_I(ap)) + link |= LNK_TX_FLOW_CTL_Y; + if (ecmd->autoneg == AUTONEG_ENABLE) + link |= LNK_NEGOTIATE; + if (ethtool_cmd_speed(ecmd) != speed) { + link &= ~(LNK_1000MB | LNK_100MB | LNK_10MB); + switch (ethtool_cmd_speed(ecmd)) { + case SPEED_1000: + link |= LNK_1000MB; + break; + case SPEED_100: + link |= LNK_100MB; + break; + case SPEED_10: + link |= LNK_10MB; + break; + } + } + + if (ecmd->duplex == DUPLEX_FULL) + link |= LNK_FULL_DUPLEX; + + if (link != ap->link) { + struct cmd cmd; + printk(KERN_INFO "%s: Renegotiating link state\n", + dev->name); + + ap->link = link; + writel(link, ®s->TuneLink); + if (!ACE_IS_TIGON_I(ap)) + writel(link, ®s->TuneFastLink); + wmb(); + + cmd.evt = C_LNK_NEGOTIATION; + cmd.code = 0; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); + } + return 0; +} + +static void ace_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct ace_private *ap = netdev_priv(dev); + + strlcpy(info->driver, "acenic", sizeof(info->driver)); + snprintf(info->version, sizeof(info->version), "%i.%i.%i", + ap->firmware_major, ap->firmware_minor, + ap->firmware_fix); + + if (ap->pdev) + strlcpy(info->bus_info, pci_name(ap->pdev), + sizeof(info->bus_info)); + +} + +/* + * Set the hardware MAC address. + */ +static int ace_set_mac_addr(struct net_device *dev, void *p) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + struct sockaddr *addr=p; + u8 *da; + struct cmd cmd; + + if(netif_running(dev)) + return -EBUSY; + + memcpy(dev->dev_addr, addr->sa_data,dev->addr_len); + + da = (u8 *)dev->dev_addr; + + writel(da[0] << 8 | da[1], ®s->MacAddrHi); + writel((da[2] << 24) | (da[3] << 16) | (da[4] << 8) | da[5], + ®s->MacAddrLo); + + cmd.evt = C_SET_MAC_ADDR; + cmd.code = 0; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); + + return 0; +} + + +static void ace_set_multicast_list(struct net_device *dev) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + struct cmd cmd; + + if ((dev->flags & IFF_ALLMULTI) && !(ap->mcast_all)) { + cmd.evt = C_SET_MULTICAST_MODE; + cmd.code = C_C_MCAST_ENABLE; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); + ap->mcast_all = 1; + } else if (ap->mcast_all) { + cmd.evt = C_SET_MULTICAST_MODE; + cmd.code = C_C_MCAST_DISABLE; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); + ap->mcast_all = 0; + } + + if ((dev->flags & IFF_PROMISC) && !(ap->promisc)) { + cmd.evt = C_SET_PROMISC_MODE; + cmd.code = C_C_PROMISC_ENABLE; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); + ap->promisc = 1; + }else if (!(dev->flags & IFF_PROMISC) && (ap->promisc)) { + cmd.evt = C_SET_PROMISC_MODE; + cmd.code = C_C_PROMISC_DISABLE; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); + ap->promisc = 0; + } + + /* + * For the time being multicast relies on the upper layers + * filtering it properly. The Firmware does not allow one to + * set the entire multicast list at a time and keeping track of + * it here is going to be messy. + */ + if (!netdev_mc_empty(dev) && !ap->mcast_all) { + cmd.evt = C_SET_MULTICAST_MODE; + cmd.code = C_C_MCAST_ENABLE; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); + }else if (!ap->mcast_all) { + cmd.evt = C_SET_MULTICAST_MODE; + cmd.code = C_C_MCAST_DISABLE; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); + } +} + + +static struct net_device_stats *ace_get_stats(struct net_device *dev) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_mac_stats __iomem *mac_stats = + (struct ace_mac_stats __iomem *)ap->regs->Stats; + + dev->stats.rx_missed_errors = readl(&mac_stats->drop_space); + dev->stats.multicast = readl(&mac_stats->kept_mc); + dev->stats.collisions = readl(&mac_stats->coll); + + return &dev->stats; +} + + +static void __devinit ace_copy(struct ace_regs __iomem *regs, const __be32 *src, + u32 dest, int size) +{ + void __iomem *tdest; + short tsize, i; + + if (size <= 0) + return; + + while (size > 0) { + tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1), + min_t(u32, size, ACE_WINDOW_SIZE)); + tdest = (void __iomem *) ®s->Window + + (dest & (ACE_WINDOW_SIZE - 1)); + writel(dest & ~(ACE_WINDOW_SIZE - 1), ®s->WinBase); + for (i = 0; i < (tsize / 4); i++) { + /* Firmware is big-endian */ + writel(be32_to_cpup(src), tdest); + src++; + tdest += 4; + dest += 4; + size -= 4; + } + } +} + + +static void __devinit ace_clear(struct ace_regs __iomem *regs, u32 dest, int size) +{ + void __iomem *tdest; + short tsize = 0, i; + + if (size <= 0) + return; + + while (size > 0) { + tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1), + min_t(u32, size, ACE_WINDOW_SIZE)); + tdest = (void __iomem *) ®s->Window + + (dest & (ACE_WINDOW_SIZE - 1)); + writel(dest & ~(ACE_WINDOW_SIZE - 1), ®s->WinBase); + + for (i = 0; i < (tsize / 4); i++) { + writel(0, tdest + i*4); + } + + dest += tsize; + size -= tsize; + } +} + + +/* + * Download the firmware into the SRAM on the NIC + * + * This operation requires the NIC to be halted and is performed with + * interrupts disabled and with the spinlock hold. + */ +static int __devinit ace_load_firmware(struct net_device *dev) +{ + const struct firmware *fw; + const char *fw_name = "acenic/tg2.bin"; + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + const __be32 *fw_data; + u32 load_addr; + int ret; + + if (!(readl(®s->CpuCtrl) & CPU_HALTED)) { + printk(KERN_ERR "%s: trying to download firmware while the " + "CPU is running!\n", ap->name); + return -EFAULT; + } + + if (ACE_IS_TIGON_I(ap)) + fw_name = "acenic/tg1.bin"; + + ret = request_firmware(&fw, fw_name, &ap->pdev->dev); + if (ret) { + printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n", + ap->name, fw_name); + return ret; + } + + fw_data = (void *)fw->data; + + /* Firmware blob starts with version numbers, followed by + load and start address. Remainder is the blob to be loaded + contiguously from load address. We don't bother to represent + the BSS/SBSS sections any more, since we were clearing the + whole thing anyway. */ + ap->firmware_major = fw->data[0]; + ap->firmware_minor = fw->data[1]; + ap->firmware_fix = fw->data[2]; + + ap->firmware_start = be32_to_cpu(fw_data[1]); + if (ap->firmware_start < 0x4000 || ap->firmware_start >= 0x80000) { + printk(KERN_ERR "%s: bogus load address %08x in \"%s\"\n", + ap->name, ap->firmware_start, fw_name); + ret = -EINVAL; + goto out; + } + + load_addr = be32_to_cpu(fw_data[2]); + if (load_addr < 0x4000 || load_addr >= 0x80000) { + printk(KERN_ERR "%s: bogus load address %08x in \"%s\"\n", + ap->name, load_addr, fw_name); + ret = -EINVAL; + goto out; + } + + /* + * Do not try to clear more than 512KiB or we end up seeing + * funny things on NICs with only 512KiB SRAM + */ + ace_clear(regs, 0x2000, 0x80000-0x2000); + ace_copy(regs, &fw_data[3], load_addr, fw->size-12); + out: + release_firmware(fw); + return ret; +} + + +/* + * The eeprom on the AceNIC is an Atmel i2c EEPROM. + * + * Accessing the EEPROM is `interesting' to say the least - don't read + * this code right after dinner. + * + * This is all about black magic and bit-banging the device .... I + * wonder in what hospital they have put the guy who designed the i2c + * specs. + * + * Oh yes, this is only the beginning! + * + * Thanks to Stevarino Webinski for helping tracking down the bugs in the + * code i2c readout code by beta testing all my hacks. + */ +static void __devinit eeprom_start(struct ace_regs __iomem *regs) +{ + u32 local; + + readl(®s->LocalCtrl); + udelay(ACE_SHORT_DELAY); + local = readl(®s->LocalCtrl); + local |= EEPROM_DATA_OUT | EEPROM_WRITE_ENABLE; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + udelay(ACE_SHORT_DELAY); + local |= EEPROM_CLK_OUT; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + udelay(ACE_SHORT_DELAY); + local &= ~EEPROM_DATA_OUT; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + udelay(ACE_SHORT_DELAY); + local &= ~EEPROM_CLK_OUT; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); +} + + +static void __devinit eeprom_prep(struct ace_regs __iomem *regs, u8 magic) +{ + short i; + u32 local; + + udelay(ACE_SHORT_DELAY); + local = readl(®s->LocalCtrl); + local &= ~EEPROM_DATA_OUT; + local |= EEPROM_WRITE_ENABLE; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + + for (i = 0; i < 8; i++, magic <<= 1) { + udelay(ACE_SHORT_DELAY); + if (magic & 0x80) + local |= EEPROM_DATA_OUT; + else + local &= ~EEPROM_DATA_OUT; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + + udelay(ACE_SHORT_DELAY); + local |= EEPROM_CLK_OUT; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + udelay(ACE_SHORT_DELAY); + local &= ~(EEPROM_CLK_OUT | EEPROM_DATA_OUT); + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + } +} + + +static int __devinit eeprom_check_ack(struct ace_regs __iomem *regs) +{ + int state; + u32 local; + + local = readl(®s->LocalCtrl); + local &= ~EEPROM_WRITE_ENABLE; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + udelay(ACE_LONG_DELAY); + local |= EEPROM_CLK_OUT; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + udelay(ACE_SHORT_DELAY); + /* sample data in middle of high clk */ + state = (readl(®s->LocalCtrl) & EEPROM_DATA_IN) != 0; + udelay(ACE_SHORT_DELAY); + mb(); + writel(readl(®s->LocalCtrl) & ~EEPROM_CLK_OUT, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + + return state; +} + + +static void __devinit eeprom_stop(struct ace_regs __iomem *regs) +{ + u32 local; + + udelay(ACE_SHORT_DELAY); + local = readl(®s->LocalCtrl); + local |= EEPROM_WRITE_ENABLE; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + udelay(ACE_SHORT_DELAY); + local &= ~EEPROM_DATA_OUT; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + udelay(ACE_SHORT_DELAY); + local |= EEPROM_CLK_OUT; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + udelay(ACE_SHORT_DELAY); + local |= EEPROM_DATA_OUT; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + udelay(ACE_LONG_DELAY); + local &= ~EEPROM_CLK_OUT; + writel(local, ®s->LocalCtrl); + mb(); +} + + +/* + * Read a whole byte from the EEPROM. + */ +static int __devinit read_eeprom_byte(struct net_device *dev, + unsigned long offset) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + unsigned long flags; + u32 local; + int result = 0; + short i; + + /* + * Don't take interrupts on this CPU will bit banging + * the %#%#@$ I2C device + */ + local_irq_save(flags); + + eeprom_start(regs); + + eeprom_prep(regs, EEPROM_WRITE_SELECT); + if (eeprom_check_ack(regs)) { + local_irq_restore(flags); + printk(KERN_ERR "%s: Unable to sync eeprom\n", ap->name); + result = -EIO; + goto eeprom_read_error; + } + + eeprom_prep(regs, (offset >> 8) & 0xff); + if (eeprom_check_ack(regs)) { + local_irq_restore(flags); + printk(KERN_ERR "%s: Unable to set address byte 0\n", + ap->name); + result = -EIO; + goto eeprom_read_error; + } + + eeprom_prep(regs, offset & 0xff); + if (eeprom_check_ack(regs)) { + local_irq_restore(flags); + printk(KERN_ERR "%s: Unable to set address byte 1\n", + ap->name); + result = -EIO; + goto eeprom_read_error; + } + + eeprom_start(regs); + eeprom_prep(regs, EEPROM_READ_SELECT); + if (eeprom_check_ack(regs)) { + local_irq_restore(flags); + printk(KERN_ERR "%s: Unable to set READ_SELECT\n", + ap->name); + result = -EIO; + goto eeprom_read_error; + } + + for (i = 0; i < 8; i++) { + local = readl(®s->LocalCtrl); + local &= ~EEPROM_WRITE_ENABLE; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + udelay(ACE_LONG_DELAY); + mb(); + local |= EEPROM_CLK_OUT; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + udelay(ACE_SHORT_DELAY); + /* sample data mid high clk */ + result = (result << 1) | + ((readl(®s->LocalCtrl) & EEPROM_DATA_IN) != 0); + udelay(ACE_SHORT_DELAY); + mb(); + local = readl(®s->LocalCtrl); + local &= ~EEPROM_CLK_OUT; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + udelay(ACE_SHORT_DELAY); + mb(); + if (i == 7) { + local |= EEPROM_WRITE_ENABLE; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + udelay(ACE_SHORT_DELAY); + } + } + + local |= EEPROM_DATA_OUT; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + udelay(ACE_SHORT_DELAY); + writel(readl(®s->LocalCtrl) | EEPROM_CLK_OUT, ®s->LocalCtrl); + readl(®s->LocalCtrl); + udelay(ACE_LONG_DELAY); + writel(readl(®s->LocalCtrl) & ~EEPROM_CLK_OUT, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + udelay(ACE_SHORT_DELAY); + eeprom_stop(regs); + + local_irq_restore(flags); + out: + return result; + + eeprom_read_error: + printk(KERN_ERR "%s: Unable to read eeprom byte 0x%02lx\n", + ap->name, offset); + goto out; +} diff --git a/drivers/net/ethernet/3com/acenic.h b/drivers/net/ethernet/3com/acenic.h new file mode 100644 index 0000000..51c486c --- /dev/null +++ b/drivers/net/ethernet/3com/acenic.h @@ -0,0 +1,790 @@ +#ifndef _ACENIC_H_ +#define _ACENIC_H_ +#include + + +/* + * Generate TX index update each time, when TX ring is closed. + * Normally, this is not useful, because results in more dma (and irqs + * without TX_COAL_INTS_ONLY). + */ +#define USE_TX_COAL_NOW 0 + +/* + * Addressing: + * + * The Tigon uses 64-bit host addresses, regardless of their actual + * length, and it expects a big-endian format. For 32 bit systems the + * upper 32 bits of the address are simply ignored (zero), however for + * little endian 64 bit systems (Alpha) this looks strange with the + * two parts of the address word being swapped. + * + * The addresses are split in two 32 bit words for all architectures + * as some of them are in PCI shared memory and it is necessary to use + * readl/writel to access them. + * + * The addressing code is derived from Pete Wyckoff's work, but + * modified to deal properly with readl/writel usage. + */ + +struct ace_regs { + u32 pad0[16]; /* PCI control registers */ + + u32 HostCtrl; /* 0x40 */ + u32 LocalCtrl; + + u32 pad1[2]; + + u32 MiscCfg; /* 0x50 */ + + u32 pad2[2]; + + u32 PciState; + + u32 pad3[2]; /* 0x60 */ + + u32 WinBase; + u32 WinData; + + u32 pad4[12]; /* 0x70 */ + + u32 DmaWriteState; /* 0xa0 */ + u32 pad5[3]; + u32 DmaReadState; /* 0xb0 */ + + u32 pad6[26]; + + u32 AssistState; + + u32 pad7[8]; /* 0x120 */ + + u32 CpuCtrl; /* 0x140 */ + u32 Pc; + + u32 pad8[3]; + + u32 SramAddr; /* 0x154 */ + u32 SramData; + + u32 pad9[49]; + + u32 MacRxState; /* 0x220 */ + + u32 pad10[7]; + + u32 CpuBCtrl; /* 0x240 */ + u32 PcB; + + u32 pad11[3]; + + u32 SramBAddr; /* 0x254 */ + u32 SramBData; + + u32 pad12[105]; + + u32 pad13[32]; /* 0x400 */ + u32 Stats[32]; + + u32 Mb0Hi; /* 0x500 */ + u32 Mb0Lo; + u32 Mb1Hi; + u32 CmdPrd; + u32 Mb2Hi; + u32 TxPrd; + u32 Mb3Hi; + u32 RxStdPrd; + u32 Mb4Hi; + u32 RxJumboPrd; + u32 Mb5Hi; + u32 RxMiniPrd; + u32 Mb6Hi; + u32 Mb6Lo; + u32 Mb7Hi; + u32 Mb7Lo; + u32 Mb8Hi; + u32 Mb8Lo; + u32 Mb9Hi; + u32 Mb9Lo; + u32 MbAHi; + u32 MbALo; + u32 MbBHi; + u32 MbBLo; + u32 MbCHi; + u32 MbCLo; + u32 MbDHi; + u32 MbDLo; + u32 MbEHi; + u32 MbELo; + u32 MbFHi; + u32 MbFLo; + + u32 pad14[32]; + + u32 MacAddrHi; /* 0x600 */ + u32 MacAddrLo; + u32 InfoPtrHi; + u32 InfoPtrLo; + u32 MultiCastHi; /* 0x610 */ + u32 MultiCastLo; + u32 ModeStat; + u32 DmaReadCfg; + u32 DmaWriteCfg; /* 0x620 */ + u32 TxBufRat; + u32 EvtCsm; + u32 CmdCsm; + u32 TuneRxCoalTicks;/* 0x630 */ + u32 TuneTxCoalTicks; + u32 TuneStatTicks; + u32 TuneMaxTxDesc; + u32 TuneMaxRxDesc; /* 0x640 */ + u32 TuneTrace; + u32 TuneLink; + u32 TuneFastLink; + u32 TracePtr; /* 0x650 */ + u32 TraceStrt; + u32 TraceLen; + u32 IfIdx; + u32 IfMtu; /* 0x660 */ + u32 MaskInt; + u32 GigLnkState; + u32 FastLnkState; + u32 pad16[4]; /* 0x670 */ + u32 RxRetCsm; /* 0x680 */ + + u32 pad17[31]; + + u32 CmdRng[64]; /* 0x700 */ + u32 Window[0x200]; +}; + + +typedef struct { + u32 addrhi; + u32 addrlo; +} aceaddr; + + +#define ACE_WINDOW_SIZE 0x800 + +#define ACE_JUMBO_MTU 9000 +#define ACE_STD_MTU 1500 + +#define ACE_TRACE_SIZE 0x8000 + +/* + * Host control register bits. + */ + +#define IN_INT 0x01 +#define CLR_INT 0x02 +#define HW_RESET 0x08 +#define BYTE_SWAP 0x10 +#define WORD_SWAP 0x20 +#define MASK_INTS 0x40 + +/* + * Local control register bits. + */ + +#define EEPROM_DATA_IN 0x800000 +#define EEPROM_DATA_OUT 0x400000 +#define EEPROM_WRITE_ENABLE 0x200000 +#define EEPROM_CLK_OUT 0x100000 + +#define EEPROM_BASE 0xa0000000 + +#define EEPROM_WRITE_SELECT 0xa0 +#define EEPROM_READ_SELECT 0xa1 + +#define SRAM_BANK_512K 0x200 + + +/* + * udelay() values for when clocking the eeprom + */ +#define ACE_SHORT_DELAY 2 +#define ACE_LONG_DELAY 4 + + +/* + * Misc Config bits + */ + +#define SYNC_SRAM_TIMING 0x100000 + + +/* + * CPU state bits. + */ + +#define CPU_RESET 0x01 +#define CPU_TRACE 0x02 +#define CPU_PROM_FAILED 0x10 +#define CPU_HALT 0x00010000 +#define CPU_HALTED 0xffff0000 + + +/* + * PCI State bits. + */ + +#define DMA_READ_MAX_4 0x04 +#define DMA_READ_MAX_16 0x08 +#define DMA_READ_MAX_32 0x0c +#define DMA_READ_MAX_64 0x10 +#define DMA_READ_MAX_128 0x14 +#define DMA_READ_MAX_256 0x18 +#define DMA_READ_MAX_1K 0x1c +#define DMA_WRITE_MAX_4 0x20 +#define DMA_WRITE_MAX_16 0x40 +#define DMA_WRITE_MAX_32 0x60 +#define DMA_WRITE_MAX_64 0x80 +#define DMA_WRITE_MAX_128 0xa0 +#define DMA_WRITE_MAX_256 0xc0 +#define DMA_WRITE_MAX_1K 0xe0 +#define DMA_READ_WRITE_MASK 0xfc +#define MEM_READ_MULTIPLE 0x00020000 +#define PCI_66MHZ 0x00080000 +#define PCI_32BIT 0x00100000 +#define DMA_WRITE_ALL_ALIGN 0x00800000 +#define READ_CMD_MEM 0x06000000 +#define WRITE_CMD_MEM 0x70000000 + + +/* + * Mode status + */ + +#define ACE_BYTE_SWAP_BD 0x02 +#define ACE_WORD_SWAP_BD 0x04 /* not actually used */ +#define ACE_WARN 0x08 +#define ACE_BYTE_SWAP_DMA 0x10 +#define ACE_NO_JUMBO_FRAG 0x200 +#define ACE_FATAL 0x40000000 + + +/* + * DMA config + */ + +#define DMA_THRESH_1W 0x10 +#define DMA_THRESH_2W 0x20 +#define DMA_THRESH_4W 0x40 +#define DMA_THRESH_8W 0x80 +#define DMA_THRESH_16W 0x100 +#define DMA_THRESH_32W 0x0 /* not described in doc, but exists. */ + + +/* + * Tuning parameters + */ + +#define TICKS_PER_SEC 1000000 + + +/* + * Link bits + */ + +#define LNK_PREF 0x00008000 +#define LNK_10MB 0x00010000 +#define LNK_100MB 0x00020000 +#define LNK_1000MB 0x00040000 +#define LNK_FULL_DUPLEX 0x00080000 +#define LNK_HALF_DUPLEX 0x00100000 +#define LNK_TX_FLOW_CTL_Y 0x00200000 +#define LNK_NEG_ADVANCED 0x00400000 +#define LNK_RX_FLOW_CTL_Y 0x00800000 +#define LNK_NIC 0x01000000 +#define LNK_JAM 0x02000000 +#define LNK_JUMBO 0x04000000 +#define LNK_ALTEON 0x08000000 +#define LNK_NEG_FCTL 0x10000000 +#define LNK_NEGOTIATE 0x20000000 +#define LNK_ENABLE 0x40000000 +#define LNK_UP 0x80000000 + + +/* + * Event definitions + */ + +#define EVT_RING_ENTRIES 256 +#define EVT_RING_SIZE (EVT_RING_ENTRIES * sizeof(struct event)) + +struct event { +#ifdef __LITTLE_ENDIAN_BITFIELD + u32 idx:12; + u32 code:12; + u32 evt:8; +#else + u32 evt:8; + u32 code:12; + u32 idx:12; +#endif + u32 pad; +}; + + +/* + * Events + */ + +#define E_FW_RUNNING 0x01 +#define E_STATS_UPDATED 0x04 + +#define E_STATS_UPDATE 0x04 + +#define E_LNK_STATE 0x06 +#define E_C_LINK_UP 0x01 +#define E_C_LINK_DOWN 0x02 +#define E_C_LINK_10_100 0x03 + +#define E_ERROR 0x07 +#define E_C_ERR_INVAL_CMD 0x01 +#define E_C_ERR_UNIMP_CMD 0x02 +#define E_C_ERR_BAD_CFG 0x03 + +#define E_MCAST_LIST 0x08 +#define E_C_MCAST_ADDR_ADD 0x01 +#define E_C_MCAST_ADDR_DEL 0x02 + +#define E_RESET_JUMBO_RNG 0x09 + + +/* + * Commands + */ + +#define CMD_RING_ENTRIES 64 + +struct cmd { +#ifdef __LITTLE_ENDIAN_BITFIELD + u32 idx:12; + u32 code:12; + u32 evt:8; +#else + u32 evt:8; + u32 code:12; + u32 idx:12; +#endif +}; + + +#define C_HOST_STATE 0x01 +#define C_C_STACK_UP 0x01 +#define C_C_STACK_DOWN 0x02 + +#define C_FDR_FILTERING 0x02 +#define C_C_FDR_FILT_ENABLE 0x01 +#define C_C_FDR_FILT_DISABLE 0x02 + +#define C_SET_RX_PRD_IDX 0x03 +#define C_UPDATE_STATS 0x04 +#define C_RESET_JUMBO_RNG 0x05 +#define C_ADD_MULTICAST_ADDR 0x08 +#define C_DEL_MULTICAST_ADDR 0x09 + +#define C_SET_PROMISC_MODE 0x0a +#define C_C_PROMISC_ENABLE 0x01 +#define C_C_PROMISC_DISABLE 0x02 + +#define C_LNK_NEGOTIATION 0x0b +#define C_C_NEGOTIATE_BOTH 0x00 +#define C_C_NEGOTIATE_GIG 0x01 +#define C_C_NEGOTIATE_10_100 0x02 + +#define C_SET_MAC_ADDR 0x0c +#define C_CLEAR_PROFILE 0x0d + +#define C_SET_MULTICAST_MODE 0x0e +#define C_C_MCAST_ENABLE 0x01 +#define C_C_MCAST_DISABLE 0x02 + +#define C_CLEAR_STATS 0x0f +#define C_SET_RX_JUMBO_PRD_IDX 0x10 +#define C_REFRESH_STATS 0x11 + + +/* + * Descriptor flags + */ +#define BD_FLG_TCP_UDP_SUM 0x01 +#define BD_FLG_IP_SUM 0x02 +#define BD_FLG_END 0x04 +#define BD_FLG_MORE 0x08 +#define BD_FLG_JUMBO 0x10 +#define BD_FLG_UCAST 0x20 +#define BD_FLG_MCAST 0x40 +#define BD_FLG_BCAST 0x60 +#define BD_FLG_TYP_MASK 0x60 +#define BD_FLG_IP_FRAG 0x80 +#define BD_FLG_IP_FRAG_END 0x100 +#define BD_FLG_VLAN_TAG 0x200 +#define BD_FLG_FRAME_ERROR 0x400 +#define BD_FLG_COAL_NOW 0x800 +#define BD_FLG_MINI 0x1000 + + +/* + * Ring Control block flags + */ +#define RCB_FLG_TCP_UDP_SUM 0x01 +#define RCB_FLG_IP_SUM 0x02 +#define RCB_FLG_NO_PSEUDO_HDR 0x08 +#define RCB_FLG_VLAN_ASSIST 0x10 +#define RCB_FLG_COAL_INT_ONLY 0x20 +#define RCB_FLG_TX_HOST_RING 0x40 +#define RCB_FLG_IEEE_SNAP_SUM 0x80 +#define RCB_FLG_EXT_RX_BD 0x100 +#define RCB_FLG_RNG_DISABLE 0x200 + + +/* + * TX ring - maximum TX ring entries for Tigon I's is 128 + */ +#define MAX_TX_RING_ENTRIES 256 +#define TIGON_I_TX_RING_ENTRIES 128 +#define TX_RING_SIZE (MAX_TX_RING_ENTRIES * sizeof(struct tx_desc)) +#define TX_RING_BASE 0x3800 + +struct tx_desc{ + aceaddr addr; + u32 flagsize; +#if 0 +/* + * This is in PCI shared mem and must be accessed with readl/writel + * real layout is: + */ +#if __LITTLE_ENDIAN + u16 flags; + u16 size; + u16 vlan; + u16 reserved; +#else + u16 size; + u16 flags; + u16 reserved; + u16 vlan; +#endif +#endif + u32 vlanres; +}; + + +#define RX_STD_RING_ENTRIES 512 +#define RX_STD_RING_SIZE (RX_STD_RING_ENTRIES * sizeof(struct rx_desc)) + +#define RX_JUMBO_RING_ENTRIES 256 +#define RX_JUMBO_RING_SIZE (RX_JUMBO_RING_ENTRIES *sizeof(struct rx_desc)) + +#define RX_MINI_RING_ENTRIES 1024 +#define RX_MINI_RING_SIZE (RX_MINI_RING_ENTRIES *sizeof(struct rx_desc)) + +#define RX_RETURN_RING_ENTRIES 2048 +#define RX_RETURN_RING_SIZE (RX_MAX_RETURN_RING_ENTRIES * \ + sizeof(struct rx_desc)) + +struct rx_desc{ + aceaddr addr; +#ifdef __LITTLE_ENDIAN + u16 size; + u16 idx; +#else + u16 idx; + u16 size; +#endif +#ifdef __LITTLE_ENDIAN + u16 flags; + u16 type; +#else + u16 type; + u16 flags; +#endif +#ifdef __LITTLE_ENDIAN + u16 tcp_udp_csum; + u16 ip_csum; +#else + u16 ip_csum; + u16 tcp_udp_csum; +#endif +#ifdef __LITTLE_ENDIAN + u16 vlan; + u16 err_flags; +#else + u16 err_flags; + u16 vlan; +#endif + u32 reserved; + u32 opague; +}; + + +/* + * This struct is shared with the NIC firmware. + */ +struct ring_ctrl { + aceaddr rngptr; +#ifdef __LITTLE_ENDIAN + u16 flags; + u16 max_len; +#else + u16 max_len; + u16 flags; +#endif + u32 pad; +}; + + +struct ace_mac_stats { + u32 excess_colls; + u32 coll_1; + u32 coll_2; + u32 coll_3; + u32 coll_4; + u32 coll_5; + u32 coll_6; + u32 coll_7; + u32 coll_8; + u32 coll_9; + u32 coll_10; + u32 coll_11; + u32 coll_12; + u32 coll_13; + u32 coll_14; + u32 coll_15; + u32 late_coll; + u32 defers; + u32 crc_err; + u32 underrun; + u32 crs_err; + u32 pad[3]; + u32 drop_ula; + u32 drop_mc; + u32 drop_fc; + u32 drop_space; + u32 coll; + u32 kept_bc; + u32 kept_mc; + u32 kept_uc; +}; + + +struct ace_info { + union { + u32 stats[256]; + } s; + struct ring_ctrl evt_ctrl; + struct ring_ctrl cmd_ctrl; + struct ring_ctrl tx_ctrl; + struct ring_ctrl rx_std_ctrl; + struct ring_ctrl rx_jumbo_ctrl; + struct ring_ctrl rx_mini_ctrl; + struct ring_ctrl rx_return_ctrl; + aceaddr evt_prd_ptr; + aceaddr rx_ret_prd_ptr; + aceaddr tx_csm_ptr; + aceaddr stats2_ptr; +}; + + +struct ring_info { + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(mapping); +}; + + +/* + * Funny... As soon as we add maplen on alpha, it starts to work + * much slower. Hmm... is it because struct does not fit to one cacheline? + * So, split tx_ring_info. + */ +struct tx_ring_info { + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(mapping); + DEFINE_DMA_UNMAP_LEN(maplen); +}; + + +/* + * struct ace_skb holding the rings of skb's. This is an awful lot of + * pointers, but I don't see any other smart mode to do this in an + * efficient manner ;-( + */ +struct ace_skb +{ + struct tx_ring_info tx_skbuff[MAX_TX_RING_ENTRIES]; + struct ring_info rx_std_skbuff[RX_STD_RING_ENTRIES]; + struct ring_info rx_mini_skbuff[RX_MINI_RING_ENTRIES]; + struct ring_info rx_jumbo_skbuff[RX_JUMBO_RING_ENTRIES]; +}; + + +/* + * Struct private for the AceNIC. + * + * Elements are grouped so variables used by the tx handling goes + * together, and will go into the same cache lines etc. in order to + * avoid cache line contention between the rx and tx handling on SMP. + * + * Frequently accessed variables are put at the beginning of the + * struct to help the compiler generate better/shorter code. + */ +struct ace_private +{ + struct ace_info *info; + struct ace_regs __iomem *regs; /* register base */ + struct ace_skb *skb; + dma_addr_t info_dma; /* 32/64 bit */ + + int version, link; + int promisc, mcast_all; + + /* + * TX elements + */ + struct tx_desc *tx_ring; + u32 tx_prd; + volatile u32 tx_ret_csm; + int tx_ring_entries; + + /* + * RX elements + */ + unsigned long std_refill_busy + __attribute__ ((aligned (SMP_CACHE_BYTES))); + unsigned long mini_refill_busy, jumbo_refill_busy; + atomic_t cur_rx_bufs; + atomic_t cur_mini_bufs; + atomic_t cur_jumbo_bufs; + u32 rx_std_skbprd, rx_mini_skbprd, rx_jumbo_skbprd; + u32 cur_rx; + + struct rx_desc *rx_std_ring; + struct rx_desc *rx_jumbo_ring; + struct rx_desc *rx_mini_ring; + struct rx_desc *rx_return_ring; + + int tasklet_pending, jumbo; + struct tasklet_struct ace_tasklet; + + struct event *evt_ring; + + volatile u32 *evt_prd, *rx_ret_prd, *tx_csm; + + dma_addr_t tx_ring_dma; /* 32/64 bit */ + dma_addr_t rx_ring_base_dma; + dma_addr_t evt_ring_dma; + dma_addr_t evt_prd_dma, rx_ret_prd_dma, tx_csm_dma; + + unsigned char *trace_buf; + struct pci_dev *pdev; + struct net_device *next; + volatile int fw_running; + int board_idx; + u16 pci_command; + u8 pci_latency; + const char *name; +#ifdef INDEX_DEBUG + spinlock_t debug_lock + __attribute__ ((aligned (SMP_CACHE_BYTES))); + u32 last_tx, last_std_rx, last_mini_rx; +#endif + int pci_using_dac; + u8 firmware_major; + u8 firmware_minor; + u8 firmware_fix; + u32 firmware_start; +}; + + +#define TX_RESERVED MAX_SKB_FRAGS + +static inline int tx_space (struct ace_private *ap, u32 csm, u32 prd) +{ + return (csm - prd - 1) & (ACE_TX_RING_ENTRIES(ap) - 1); +} + +#define tx_free(ap) tx_space((ap)->tx_ret_csm, (ap)->tx_prd, ap) +#define tx_ring_full(ap, csm, prd) (tx_space(ap, csm, prd) <= TX_RESERVED) + +static inline void set_aceaddr(aceaddr *aa, dma_addr_t addr) +{ + u64 baddr = (u64) addr; + aa->addrlo = baddr & 0xffffffff; + aa->addrhi = baddr >> 32; + wmb(); +} + + +static inline void ace_set_txprd(struct ace_regs __iomem *regs, + struct ace_private *ap, u32 value) +{ +#ifdef INDEX_DEBUG + unsigned long flags; + spin_lock_irqsave(&ap->debug_lock, flags); + writel(value, ®s->TxPrd); + if (value == ap->last_tx) + printk(KERN_ERR "AceNIC RACE ALERT! writing identical value " + "to tx producer (%i)\n", value); + ap->last_tx = value; + spin_unlock_irqrestore(&ap->debug_lock, flags); +#else + writel(value, ®s->TxPrd); +#endif + wmb(); +} + + +static inline void ace_mask_irq(struct net_device *dev) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + + if (ACE_IS_TIGON_I(ap)) + writel(1, ®s->MaskInt); + else + writel(readl(®s->HostCtrl) | MASK_INTS, ®s->HostCtrl); + + ace_sync_irq(dev->irq); +} + + +static inline void ace_unmask_irq(struct net_device *dev) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + + if (ACE_IS_TIGON_I(ap)) + writel(0, ®s->MaskInt); + else + writel(readl(®s->HostCtrl) & ~MASK_INTS, ®s->HostCtrl); +} + + +/* + * Prototypes + */ +static int ace_init(struct net_device *dev); +static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs); +static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs); +static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs); +static irqreturn_t ace_interrupt(int irq, void *dev_id); +static int ace_load_firmware(struct net_device *dev); +static int ace_open(struct net_device *dev); +static netdev_tx_t ace_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static int ace_close(struct net_device *dev); +static void ace_tasklet(unsigned long dev); +static void ace_dump_trace(struct ace_private *ap); +static void ace_set_multicast_list(struct net_device *dev); +static int ace_change_mtu(struct net_device *dev, int new_mtu); +static int ace_set_mac_addr(struct net_device *dev, void *p); +static void ace_set_rxtx_parms(struct net_device *dev, int jumbo); +static int ace_allocate_descriptors(struct net_device *dev); +static void ace_free_descriptors(struct net_device *dev); +static void ace_init_cleanup(struct net_device *dev); +static struct net_device_stats *ace_get_stats(struct net_device *dev); +static int read_eeprom_byte(struct net_device *dev, unsigned long offset); + +#endif /* _ACENIC_H_ */ diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c new file mode 100644 index 0000000..1d5091a --- /dev/null +++ b/drivers/net/ethernet/3com/typhoon.c @@ -0,0 +1,2574 @@ +/* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */ +/* + Written 2002-2004 by David Dillow + Based on code written 1998-2000 by Donald Becker and + Linux 2.2.x driver by David P. McLean . + + This software may be used and distributed according to the terms of + the GNU General Public License (GPL), incorporated herein by reference. + Drivers based on or derived from this code fall under the GPL and must + retain the authorship, copyright and license notice. This file is not + a complete program and may only be used when the entire operating + system is licensed under the GPL. + + This software is available on a public web site. It may enable + cryptographic capabilities of the 3Com hardware, and may be + exported from the United States under License Exception "TSU" + pursuant to 15 C.F.R. Section 740.13(e). + + This work was funded by the National Library of Medicine under + the Department of Energy project number 0274DD06D1 and NLM project + number Y1-LM-2015-01. + + This driver is designed for the 3Com 3CR990 Family of cards with the + 3XP Processor. It has been tested on x86 and sparc64. + + KNOWN ISSUES: + *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware + issue. Hopefully 3Com will fix it. + *) Waiting for a command response takes 8ms due to non-preemptable + polling. Only significant for getting stats and creating + SAs, but an ugly wart never the less. + + TODO: + *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming. + *) Add more support for ethtool (especially for NIC stats) + *) Allow disabling of RX checksum offloading + *) Fix MAC changing to work while the interface is up + (Need to put commands on the TX ring, which changes + the locking) + *) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See + http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org +*/ + +/* Set the copy breakpoint for the copy-only-tiny-frames scheme. + * Setting to > 1518 effectively disables this feature. + */ +static int rx_copybreak = 200; + +/* Should we use MMIO or Port IO? + * 0: Port IO + * 1: MMIO + * 2: Try MMIO, fallback to Port IO + */ +static unsigned int use_mmio = 2; + +/* end user-configurable values */ + +/* Maximum number of multicast addresses to filter (vs. rx-all-multicast). + */ +static const int multicast_filter_limit = 32; + +/* Operational parameters that are set at compile time. */ + +/* Keep the ring sizes a power of two for compile efficiency. + * The compiler will convert '%'<2^N> into a bit mask. + * Making the Tx ring too large decreases the effectiveness of channel + * bonding and packet priority. + * There are no ill effects from too-large receive rings. + * + * We don't currently use the Hi Tx ring so, don't make it very big. + * + * Beware that if we start using the Hi Tx ring, we will need to change + * typhoon_num_free_tx() and typhoon_tx_complete() to account for that. + */ +#define TXHI_ENTRIES 2 +#define TXLO_ENTRIES 128 +#define RX_ENTRIES 32 +#define COMMAND_ENTRIES 16 +#define RESPONSE_ENTRIES 32 + +#define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc)) +#define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc)) + +/* The 3XP will preload and remove 64 entries from the free buffer + * list, and we need one entry to keep the ring from wrapping, so + * to keep this a power of two, we use 128 entries. + */ +#define RXFREE_ENTRIES 128 +#define RXENT_ENTRIES (RXFREE_ENTRIES - 1) + +/* Operational parameters that usually are not changed. */ + +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (2*HZ) + +#define PKT_BUF_SZ 1536 +#define FIRMWARE_NAME "3com/typhoon.bin" + +#define pr_fmt(fmt) KBUILD_MODNAME " " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "typhoon.h" + +MODULE_AUTHOR("David Dillow "); +MODULE_VERSION("1.0"); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(FIRMWARE_NAME); +MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)"); +MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and " + "the buffer given back to the NIC. Default " + "is 200."); +MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. " + "Default is to try MMIO and fallback to PIO."); +module_param(rx_copybreak, int, 0); +module_param(use_mmio, int, 0); + +#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32 +#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO +#undef NETIF_F_TSO +#endif + +#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS) +#error TX ring too small! +#endif + +struct typhoon_card_info { + const char *name; + const int capabilities; +}; + +#define TYPHOON_CRYPTO_NONE 0x00 +#define TYPHOON_CRYPTO_DES 0x01 +#define TYPHOON_CRYPTO_3DES 0x02 +#define TYPHOON_CRYPTO_VARIABLE 0x04 +#define TYPHOON_FIBER 0x08 +#define TYPHOON_WAKEUP_NEEDS_RESET 0x10 + +enum typhoon_cards { + TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR, + TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR, + TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR, + TYPHOON_FXM, +}; + +/* directly indexed by enum typhoon_cards, above */ +static struct typhoon_card_info typhoon_card_info[] __devinitdata = { + { "3Com Typhoon (3C990-TX)", + TYPHOON_CRYPTO_NONE}, + { "3Com Typhoon (3CR990-TX-95)", + TYPHOON_CRYPTO_DES}, + { "3Com Typhoon (3CR990-TX-97)", + TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES}, + { "3Com Typhoon (3C990SVR)", + TYPHOON_CRYPTO_NONE}, + { "3Com Typhoon (3CR990SVR95)", + TYPHOON_CRYPTO_DES}, + { "3Com Typhoon (3CR990SVR97)", + TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES}, + { "3Com Typhoon2 (3C990B-TX-M)", + TYPHOON_CRYPTO_VARIABLE}, + { "3Com Typhoon2 (3C990BSVR)", + TYPHOON_CRYPTO_VARIABLE}, + { "3Com Typhoon (3CR990-FX-95)", + TYPHOON_CRYPTO_DES | TYPHOON_FIBER}, + { "3Com Typhoon (3CR990-FX-97)", + TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER}, + { "3Com Typhoon (3CR990-FX-95 Server)", + TYPHOON_CRYPTO_DES | TYPHOON_FIBER}, + { "3Com Typhoon (3CR990-FX-97 Server)", + TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER}, + { "3Com Typhoon2 (3C990B-FX-97)", + TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER}, +}; + +/* Notes on the new subsystem numbering scheme: + * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES + * bit 4 indicates if this card has secured firmware (we don't support it) + * bit 8 indicates if this is a (0) copper or (1) fiber card + * bits 12-16 indicate card type: (0) client and (1) server + */ +static DEFINE_PCI_DEVICE_TABLE(typhoon_pci_tbl) = { + { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990, + PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX }, + { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 }, + { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 }, + { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B, + PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM }, + { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B, + PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM }, + { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B, + PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR }, + { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX, + PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 }, + { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX, + PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 }, + { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX, + PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR }, + { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX, + PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR }, + { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 }, + { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 }, + { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl); + +/* Define the shared memory area + * Align everything the 3XP will normally be using. + * We'll need to move/align txHi if we start using that ring. + */ +#define __3xp_aligned ____cacheline_aligned +struct typhoon_shared { + struct typhoon_interface iface; + struct typhoon_indexes indexes __3xp_aligned; + struct tx_desc txLo[TXLO_ENTRIES] __3xp_aligned; + struct rx_desc rxLo[RX_ENTRIES] __3xp_aligned; + struct rx_desc rxHi[RX_ENTRIES] __3xp_aligned; + struct cmd_desc cmd[COMMAND_ENTRIES] __3xp_aligned; + struct resp_desc resp[RESPONSE_ENTRIES] __3xp_aligned; + struct rx_free rxBuff[RXFREE_ENTRIES] __3xp_aligned; + u32 zeroWord; + struct tx_desc txHi[TXHI_ENTRIES]; +} __packed; + +struct rxbuff_ent { + struct sk_buff *skb; + dma_addr_t dma_addr; +}; + +struct typhoon { + /* Tx cache line section */ + struct transmit_ring txLoRing ____cacheline_aligned; + struct pci_dev * tx_pdev; + void __iomem *tx_ioaddr; + u32 txlo_dma_addr; + + /* Irq/Rx cache line section */ + void __iomem *ioaddr ____cacheline_aligned; + struct typhoon_indexes *indexes; + u8 awaiting_resp; + u8 duplex; + u8 speed; + u8 card_state; + struct basic_ring rxLoRing; + struct pci_dev * pdev; + struct net_device * dev; + struct napi_struct napi; + struct basic_ring rxHiRing; + struct basic_ring rxBuffRing; + struct rxbuff_ent rxbuffers[RXENT_ENTRIES]; + + /* general section */ + spinlock_t command_lock ____cacheline_aligned; + struct basic_ring cmdRing; + struct basic_ring respRing; + struct net_device_stats stats; + struct net_device_stats stats_saved; + struct typhoon_shared * shared; + dma_addr_t shared_dma; + __le16 xcvr_select; + __le16 wol_events; + __le32 offload; + + /* unused stuff (future use) */ + int capabilities; + struct transmit_ring txHiRing; +}; + +enum completion_wait_values { + NoWait = 0, WaitNoSleep, WaitSleep, +}; + +/* These are the values for the typhoon.card_state variable. + * These determine where the statistics will come from in get_stats(). + * The sleep image does not support the statistics we need. + */ +enum state_values { + Sleeping = 0, Running, +}; + +/* PCI writes are not guaranteed to be posted in order, but outstanding writes + * cannot pass a read, so this forces current writes to post. + */ +#define typhoon_post_pci_writes(x) \ + do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0) + +/* We'll wait up to six seconds for a reset, and half a second normally. + */ +#define TYPHOON_UDELAY 50 +#define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ) +#define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY) +#define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY) + +#if defined(NETIF_F_TSO) +#define skb_tso_size(x) (skb_shinfo(x)->gso_size) +#define TSO_NUM_DESCRIPTORS 2 +#define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT +#else +#define NETIF_F_TSO 0 +#define skb_tso_size(x) 0 +#define TSO_NUM_DESCRIPTORS 0 +#define TSO_OFFLOAD_ON 0 +#endif + +static inline void +typhoon_inc_index(u32 *index, const int count, const int num_entries) +{ + /* Increment a ring index -- we can use this for all rings execept + * the Rx rings, as they use different size descriptors + * otherwise, everything is the same size as a cmd_desc + */ + *index += count * sizeof(struct cmd_desc); + *index %= num_entries * sizeof(struct cmd_desc); +} + +static inline void +typhoon_inc_cmd_index(u32 *index, const int count) +{ + typhoon_inc_index(index, count, COMMAND_ENTRIES); +} + +static inline void +typhoon_inc_resp_index(u32 *index, const int count) +{ + typhoon_inc_index(index, count, RESPONSE_ENTRIES); +} + +static inline void +typhoon_inc_rxfree_index(u32 *index, const int count) +{ + typhoon_inc_index(index, count, RXFREE_ENTRIES); +} + +static inline void +typhoon_inc_tx_index(u32 *index, const int count) +{ + /* if we start using the Hi Tx ring, this needs updateing */ + typhoon_inc_index(index, count, TXLO_ENTRIES); +} + +static inline void +typhoon_inc_rx_index(u32 *index, const int count) +{ + /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */ + *index += count * sizeof(struct rx_desc); + *index %= RX_ENTRIES * sizeof(struct rx_desc); +} + +static int +typhoon_reset(void __iomem *ioaddr, int wait_type) +{ + int i, err = 0; + int timeout; + + if(wait_type == WaitNoSleep) + timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP; + else + timeout = TYPHOON_RESET_TIMEOUT_SLEEP; + + iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); + iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS); + + iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET); + typhoon_post_pci_writes(ioaddr); + udelay(1); + iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET); + + if(wait_type != NoWait) { + for(i = 0; i < timeout; i++) { + if(ioread32(ioaddr + TYPHOON_REG_STATUS) == + TYPHOON_STATUS_WAITING_FOR_HOST) + goto out; + + if(wait_type == WaitSleep) + schedule_timeout_uninterruptible(1); + else + udelay(TYPHOON_UDELAY); + } + + err = -ETIMEDOUT; + } + +out: + iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); + iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS); + + /* The 3XP seems to need a little extra time to complete the load + * of the sleep image before we can reliably boot it. Failure to + * do this occasionally results in a hung adapter after boot in + * typhoon_init_one() while trying to read the MAC address or + * putting the card to sleep. 3Com's driver waits 5ms, but + * that seems to be overkill. However, if we can sleep, we might + * as well give it that much time. Otherwise, we'll give it 500us, + * which should be enough (I've see it work well at 100us, but still + * saw occasional problems.) + */ + if(wait_type == WaitSleep) + msleep(5); + else + udelay(500); + return err; +} + +static int +typhoon_wait_status(void __iomem *ioaddr, u32 wait_value) +{ + int i, err = 0; + + for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) { + if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value) + goto out; + udelay(TYPHOON_UDELAY); + } + + err = -ETIMEDOUT; + +out: + return err; +} + +static inline void +typhoon_media_status(struct net_device *dev, struct resp_desc *resp) +{ + if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK) + netif_carrier_off(dev); + else + netif_carrier_on(dev); +} + +static inline void +typhoon_hello(struct typhoon *tp) +{ + struct basic_ring *ring = &tp->cmdRing; + struct cmd_desc *cmd; + + /* We only get a hello request if we've not sent anything to the + * card in a long while. If the lock is held, then we're in the + * process of issuing a command, so we don't need to respond. + */ + if(spin_trylock(&tp->command_lock)) { + cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite); + typhoon_inc_cmd_index(&ring->lastWrite, 1); + + INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP); + wmb(); + iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY); + spin_unlock(&tp->command_lock); + } +} + +static int +typhoon_process_response(struct typhoon *tp, int resp_size, + struct resp_desc *resp_save) +{ + struct typhoon_indexes *indexes = tp->indexes; + struct resp_desc *resp; + u8 *base = tp->respRing.ringBase; + int count, len, wrap_len; + u32 cleared; + u32 ready; + + cleared = le32_to_cpu(indexes->respCleared); + ready = le32_to_cpu(indexes->respReady); + while(cleared != ready) { + resp = (struct resp_desc *)(base + cleared); + count = resp->numDesc + 1; + if(resp_save && resp->seqNo) { + if(count > resp_size) { + resp_save->flags = TYPHOON_RESP_ERROR; + goto cleanup; + } + + wrap_len = 0; + len = count * sizeof(*resp); + if(unlikely(cleared + len > RESPONSE_RING_SIZE)) { + wrap_len = cleared + len - RESPONSE_RING_SIZE; + len = RESPONSE_RING_SIZE - cleared; + } + + memcpy(resp_save, resp, len); + if(unlikely(wrap_len)) { + resp_save += len / sizeof(*resp); + memcpy(resp_save, base, wrap_len); + } + + resp_save = NULL; + } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) { + typhoon_media_status(tp->dev, resp); + } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) { + typhoon_hello(tp); + } else { + netdev_err(tp->dev, + "dumping unexpected response 0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n", + le16_to_cpu(resp->cmd), + resp->numDesc, resp->flags, + le16_to_cpu(resp->parm1), + le32_to_cpu(resp->parm2), + le32_to_cpu(resp->parm3)); + } + +cleanup: + typhoon_inc_resp_index(&cleared, count); + } + + indexes->respCleared = cpu_to_le32(cleared); + wmb(); + return resp_save == NULL; +} + +static inline int +typhoon_num_free(int lastWrite, int lastRead, int ringSize) +{ + /* this works for all descriptors but rx_desc, as they are a + * different size than the cmd_desc -- everyone else is the same + */ + lastWrite /= sizeof(struct cmd_desc); + lastRead /= sizeof(struct cmd_desc); + return (ringSize + lastRead - lastWrite - 1) % ringSize; +} + +static inline int +typhoon_num_free_cmd(struct typhoon *tp) +{ + int lastWrite = tp->cmdRing.lastWrite; + int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared); + + return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES); +} + +static inline int +typhoon_num_free_resp(struct typhoon *tp) +{ + int respReady = le32_to_cpu(tp->indexes->respReady); + int respCleared = le32_to_cpu(tp->indexes->respCleared); + + return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES); +} + +static inline int +typhoon_num_free_tx(struct transmit_ring *ring) +{ + /* if we start using the Hi Tx ring, this needs updating */ + return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES); +} + +static int +typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd, + int num_resp, struct resp_desc *resp) +{ + struct typhoon_indexes *indexes = tp->indexes; + struct basic_ring *ring = &tp->cmdRing; + struct resp_desc local_resp; + int i, err = 0; + int got_resp; + int freeCmd, freeResp; + int len, wrap_len; + + spin_lock(&tp->command_lock); + + freeCmd = typhoon_num_free_cmd(tp); + freeResp = typhoon_num_free_resp(tp); + + if(freeCmd < num_cmd || freeResp < num_resp) { + netdev_err(tp->dev, "no descs for cmd, had (needed) %d (%d) cmd, %d (%d) resp\n", + freeCmd, num_cmd, freeResp, num_resp); + err = -ENOMEM; + goto out; + } + + if(cmd->flags & TYPHOON_CMD_RESPOND) { + /* If we're expecting a response, but the caller hasn't given + * us a place to put it, we'll provide one. + */ + tp->awaiting_resp = 1; + if(resp == NULL) { + resp = &local_resp; + num_resp = 1; + } + } + + wrap_len = 0; + len = num_cmd * sizeof(*cmd); + if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) { + wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE; + len = COMMAND_RING_SIZE - ring->lastWrite; + } + + memcpy(ring->ringBase + ring->lastWrite, cmd, len); + if(unlikely(wrap_len)) { + struct cmd_desc *wrap_ptr = cmd; + wrap_ptr += len / sizeof(*cmd); + memcpy(ring->ringBase, wrap_ptr, wrap_len); + } + + typhoon_inc_cmd_index(&ring->lastWrite, num_cmd); + + /* "I feel a presence... another warrior is on the mesa." + */ + wmb(); + iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY); + typhoon_post_pci_writes(tp->ioaddr); + + if((cmd->flags & TYPHOON_CMD_RESPOND) == 0) + goto out; + + /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to + * preempt or do anything other than take interrupts. So, don't + * wait for a response unless you have to. + * + * I've thought about trying to sleep here, but we're called + * from many contexts that don't allow that. Also, given the way + * 3Com has implemented irq coalescing, we would likely timeout -- + * this has been observed in real life! + * + * The big killer is we have to wait to get stats from the card, + * though we could go to a periodic refresh of those if we don't + * mind them getting somewhat stale. The rest of the waiting + * commands occur during open/close/suspend/resume, so they aren't + * time critical. Creating SAs in the future will also have to + * wait here. + */ + got_resp = 0; + for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) { + if(indexes->respCleared != indexes->respReady) + got_resp = typhoon_process_response(tp, num_resp, + resp); + udelay(TYPHOON_UDELAY); + } + + if(!got_resp) { + err = -ETIMEDOUT; + goto out; + } + + /* Collect the error response even if we don't care about the + * rest of the response + */ + if(resp->flags & TYPHOON_RESP_ERROR) + err = -EIO; + +out: + if(tp->awaiting_resp) { + tp->awaiting_resp = 0; + smp_wmb(); + + /* Ugh. If a response was added to the ring between + * the call to typhoon_process_response() and the clearing + * of tp->awaiting_resp, we could have missed the interrupt + * and it could hang in the ring an indeterminate amount of + * time. So, check for it, and interrupt ourselves if this + * is the case. + */ + if(indexes->respCleared != indexes->respReady) + iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT); + } + + spin_unlock(&tp->command_lock); + return err; +} + +static inline void +typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing, + u32 ring_dma) +{ + struct tcpopt_desc *tcpd; + u32 tcpd_offset = ring_dma; + + tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite); + tcpd_offset += txRing->lastWrite; + tcpd_offset += offsetof(struct tcpopt_desc, bytesTx); + typhoon_inc_tx_index(&txRing->lastWrite, 1); + + tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG; + tcpd->numDesc = 1; + tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb)); + tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST; + tcpd->respAddrLo = cpu_to_le32(tcpd_offset); + tcpd->bytesTx = cpu_to_le32(skb->len); + tcpd->status = 0; +} + +static netdev_tx_t +typhoon_start_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct typhoon *tp = netdev_priv(dev); + struct transmit_ring *txRing; + struct tx_desc *txd, *first_txd; + dma_addr_t skb_dma; + int numDesc; + + /* we have two rings to choose from, but we only use txLo for now + * If we start using the Hi ring as well, we'll need to update + * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(), + * and TXHI_ENTRIES to match, as well as update the TSO code below + * to get the right DMA address + */ + txRing = &tp->txLoRing; + + /* We need one descriptor for each fragment of the sk_buff, plus the + * one for the ->data area of it. + * + * The docs say a maximum of 16 fragment descriptors per TCP option + * descriptor, then make a new packet descriptor and option descriptor + * for the next 16 fragments. The engineers say just an option + * descriptor is needed. I've tested up to 26 fragments with a single + * packet descriptor/option descriptor combo, so I use that for now. + * + * If problems develop with TSO, check this first. + */ + numDesc = skb_shinfo(skb)->nr_frags + 1; + if (skb_is_gso(skb)) + numDesc++; + + /* When checking for free space in the ring, we need to also + * account for the initial Tx descriptor, and we always must leave + * at least one descriptor unused in the ring so that it doesn't + * wrap and look empty. + * + * The only time we should loop here is when we hit the race + * between marking the queue awake and updating the cleared index. + * Just loop and it will appear. This comes from the acenic driver. + */ + while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2))) + smp_rmb(); + + first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite); + typhoon_inc_tx_index(&txRing->lastWrite, 1); + + first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID; + first_txd->numDesc = 0; + first_txd->len = 0; + first_txd->tx_addr = (u64)((unsigned long) skb); + first_txd->processFlags = 0; + + if(skb->ip_summed == CHECKSUM_PARTIAL) { + /* The 3XP will figure out if this is UDP/TCP */ + first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM; + first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM; + first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM; + } + + if(vlan_tx_tag_present(skb)) { + first_txd->processFlags |= + TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY; + first_txd->processFlags |= + cpu_to_le32(htons(vlan_tx_tag_get(skb)) << + TYPHOON_TX_PF_VLAN_TAG_SHIFT); + } + + if (skb_is_gso(skb)) { + first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT; + first_txd->numDesc++; + + typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr); + } + + txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite); + typhoon_inc_tx_index(&txRing->lastWrite, 1); + + /* No need to worry about padding packet -- the firmware pads + * it with zeros to ETH_ZLEN for us. + */ + if(skb_shinfo(skb)->nr_frags == 0) { + skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len, + PCI_DMA_TODEVICE); + txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID; + txd->len = cpu_to_le16(skb->len); + txd->frag.addr = cpu_to_le32(skb_dma); + txd->frag.addrHi = 0; + first_txd->numDesc++; + } else { + int i, len; + + len = skb_headlen(skb); + skb_dma = pci_map_single(tp->tx_pdev, skb->data, len, + PCI_DMA_TODEVICE); + txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID; + txd->len = cpu_to_le16(len); + txd->frag.addr = cpu_to_le32(skb_dma); + txd->frag.addrHi = 0; + first_txd->numDesc++; + + for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + void *frag_addr; + + txd = (struct tx_desc *) (txRing->ringBase + + txRing->lastWrite); + typhoon_inc_tx_index(&txRing->lastWrite, 1); + + len = frag->size; + frag_addr = (void *) page_address(frag->page) + + frag->page_offset; + skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len, + PCI_DMA_TODEVICE); + txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID; + txd->len = cpu_to_le16(len); + txd->frag.addr = cpu_to_le32(skb_dma); + txd->frag.addrHi = 0; + first_txd->numDesc++; + } + } + + /* Kick the 3XP + */ + wmb(); + iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister); + + /* If we don't have room to put the worst case packet on the + * queue, then we must stop the queue. We need 2 extra + * descriptors -- one to prevent ring wrap, and one for the + * Tx header. + */ + numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1; + + if(typhoon_num_free_tx(txRing) < (numDesc + 2)) { + netif_stop_queue(dev); + + /* A Tx complete IRQ could have gotten between, making + * the ring free again. Only need to recheck here, since + * Tx is serialized. + */ + if(typhoon_num_free_tx(txRing) >= (numDesc + 2)) + netif_wake_queue(dev); + } + + return NETDEV_TX_OK; +} + +static void +typhoon_set_rx_mode(struct net_device *dev) +{ + struct typhoon *tp = netdev_priv(dev); + struct cmd_desc xp_cmd; + u32 mc_filter[2]; + __le16 filter; + + filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST; + if(dev->flags & IFF_PROMISC) { + filter |= TYPHOON_RX_FILTER_PROMISCOUS; + } else if ((netdev_mc_count(dev) > multicast_filter_limit) || + (dev->flags & IFF_ALLMULTI)) { + /* Too many to match, or accept all multicasts. */ + filter |= TYPHOON_RX_FILTER_ALL_MCAST; + } else if (!netdev_mc_empty(dev)) { + struct netdev_hw_addr *ha; + + memset(mc_filter, 0, sizeof(mc_filter)); + netdev_for_each_mc_addr(ha, dev) { + int bit = ether_crc(ETH_ALEN, ha->addr) & 0x3f; + mc_filter[bit >> 5] |= 1 << (bit & 0x1f); + } + + INIT_COMMAND_NO_RESPONSE(&xp_cmd, + TYPHOON_CMD_SET_MULTICAST_HASH); + xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET; + xp_cmd.parm2 = cpu_to_le32(mc_filter[0]); + xp_cmd.parm3 = cpu_to_le32(mc_filter[1]); + typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); + + filter |= TYPHOON_RX_FILTER_MCAST_HASH; + } + + INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER); + xp_cmd.parm1 = filter; + typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); +} + +static int +typhoon_do_get_stats(struct typhoon *tp) +{ + struct net_device_stats *stats = &tp->stats; + struct net_device_stats *saved = &tp->stats_saved; + struct cmd_desc xp_cmd; + struct resp_desc xp_resp[7]; + struct stats_resp *s = (struct stats_resp *) xp_resp; + int err; + + INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS); + err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp); + if(err < 0) + return err; + + /* 3Com's Linux driver uses txMultipleCollisions as it's + * collisions value, but there is some other collision info as well... + * + * The extra status reported would be a good candidate for + * ethtool_ops->get_{strings,stats}() + */ + stats->tx_packets = le32_to_cpu(s->txPackets) + + saved->tx_packets; + stats->tx_bytes = le64_to_cpu(s->txBytes) + + saved->tx_bytes; + stats->tx_errors = le32_to_cpu(s->txCarrierLost) + + saved->tx_errors; + stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost) + + saved->tx_carrier_errors; + stats->collisions = le32_to_cpu(s->txMultipleCollisions) + + saved->collisions; + stats->rx_packets = le32_to_cpu(s->rxPacketsGood) + + saved->rx_packets; + stats->rx_bytes = le64_to_cpu(s->rxBytesGood) + + saved->rx_bytes; + stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns) + + saved->rx_fifo_errors; + stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) + + le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors) + + saved->rx_errors; + stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors) + + saved->rx_crc_errors; + stats->rx_length_errors = le32_to_cpu(s->rxOversized) + + saved->rx_length_errors; + tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ? + SPEED_100 : SPEED_10; + tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ? + DUPLEX_FULL : DUPLEX_HALF; + + return 0; +} + +static struct net_device_stats * +typhoon_get_stats(struct net_device *dev) +{ + struct typhoon *tp = netdev_priv(dev); + struct net_device_stats *stats = &tp->stats; + struct net_device_stats *saved = &tp->stats_saved; + + smp_rmb(); + if(tp->card_state == Sleeping) + return saved; + + if(typhoon_do_get_stats(tp) < 0) { + netdev_err(dev, "error getting stats\n"); + return saved; + } + + return stats; +} + +static int +typhoon_set_mac_address(struct net_device *dev, void *addr) +{ + struct sockaddr *saddr = (struct sockaddr *) addr; + + if(netif_running(dev)) + return -EBUSY; + + memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len); + return 0; +} + +static void +typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct typhoon *tp = netdev_priv(dev); + struct pci_dev *pci_dev = tp->pdev; + struct cmd_desc xp_cmd; + struct resp_desc xp_resp[3]; + + smp_rmb(); + if(tp->card_state == Sleeping) { + strcpy(info->fw_version, "Sleep image"); + } else { + INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS); + if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) { + strcpy(info->fw_version, "Unknown runtime"); + } else { + u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2); + snprintf(info->fw_version, 32, "%02x.%03x.%03x", + sleep_ver >> 24, (sleep_ver >> 12) & 0xfff, + sleep_ver & 0xfff); + } + } + + strcpy(info->driver, KBUILD_MODNAME); + strcpy(info->bus_info, pci_name(pci_dev)); +} + +static int +typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct typhoon *tp = netdev_priv(dev); + + cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg; + + switch (tp->xcvr_select) { + case TYPHOON_XCVR_10HALF: + cmd->advertising = ADVERTISED_10baseT_Half; + break; + case TYPHOON_XCVR_10FULL: + cmd->advertising = ADVERTISED_10baseT_Full; + break; + case TYPHOON_XCVR_100HALF: + cmd->advertising = ADVERTISED_100baseT_Half; + break; + case TYPHOON_XCVR_100FULL: + cmd->advertising = ADVERTISED_100baseT_Full; + break; + case TYPHOON_XCVR_AUTONEG: + cmd->advertising = ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_Autoneg; + break; + } + + if(tp->capabilities & TYPHOON_FIBER) { + cmd->supported |= SUPPORTED_FIBRE; + cmd->advertising |= ADVERTISED_FIBRE; + cmd->port = PORT_FIBRE; + } else { + cmd->supported |= SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_TP; + cmd->advertising |= ADVERTISED_TP; + cmd->port = PORT_TP; + } + + /* need to get stats to make these link speed/duplex valid */ + typhoon_do_get_stats(tp); + ethtool_cmd_speed_set(cmd, tp->speed); + cmd->duplex = tp->duplex; + cmd->phy_address = 0; + cmd->transceiver = XCVR_INTERNAL; + if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG) + cmd->autoneg = AUTONEG_ENABLE; + else + cmd->autoneg = AUTONEG_DISABLE; + cmd->maxtxpkt = 1; + cmd->maxrxpkt = 1; + + return 0; +} + +static int +typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct typhoon *tp = netdev_priv(dev); + u32 speed = ethtool_cmd_speed(cmd); + struct cmd_desc xp_cmd; + __le16 xcvr; + int err; + + err = -EINVAL; + if (cmd->autoneg == AUTONEG_ENABLE) { + xcvr = TYPHOON_XCVR_AUTONEG; + } else { + if (cmd->duplex == DUPLEX_HALF) { + if (speed == SPEED_10) + xcvr = TYPHOON_XCVR_10HALF; + else if (speed == SPEED_100) + xcvr = TYPHOON_XCVR_100HALF; + else + goto out; + } else if (cmd->duplex == DUPLEX_FULL) { + if (speed == SPEED_10) + xcvr = TYPHOON_XCVR_10FULL; + else if (speed == SPEED_100) + xcvr = TYPHOON_XCVR_100FULL; + else + goto out; + } else + goto out; + } + + INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT); + xp_cmd.parm1 = xcvr; + err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); + if(err < 0) + goto out; + + tp->xcvr_select = xcvr; + if(cmd->autoneg == AUTONEG_ENABLE) { + tp->speed = 0xff; /* invalid */ + tp->duplex = 0xff; /* invalid */ + } else { + tp->speed = speed; + tp->duplex = cmd->duplex; + } + +out: + return err; +} + +static void +typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct typhoon *tp = netdev_priv(dev); + + wol->supported = WAKE_PHY | WAKE_MAGIC; + wol->wolopts = 0; + if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT) + wol->wolopts |= WAKE_PHY; + if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) + wol->wolopts |= WAKE_MAGIC; + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + +static int +typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct typhoon *tp = netdev_priv(dev); + + if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC)) + return -EINVAL; + + tp->wol_events = 0; + if(wol->wolopts & WAKE_PHY) + tp->wol_events |= TYPHOON_WAKE_LINK_EVENT; + if(wol->wolopts & WAKE_MAGIC) + tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT; + + return 0; +} + +static void +typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) +{ + ering->rx_max_pending = RXENT_ENTRIES; + ering->rx_mini_max_pending = 0; + ering->rx_jumbo_max_pending = 0; + ering->tx_max_pending = TXLO_ENTRIES - 1; + + ering->rx_pending = RXENT_ENTRIES; + ering->rx_mini_pending = 0; + ering->rx_jumbo_pending = 0; + ering->tx_pending = TXLO_ENTRIES - 1; +} + +static const struct ethtool_ops typhoon_ethtool_ops = { + .get_settings = typhoon_get_settings, + .set_settings = typhoon_set_settings, + .get_drvinfo = typhoon_get_drvinfo, + .get_wol = typhoon_get_wol, + .set_wol = typhoon_set_wol, + .get_link = ethtool_op_get_link, + .get_ringparam = typhoon_get_ringparam, +}; + +static int +typhoon_wait_interrupt(void __iomem *ioaddr) +{ + int i, err = 0; + + for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) { + if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) & + TYPHOON_INTR_BOOTCMD) + goto out; + udelay(TYPHOON_UDELAY); + } + + err = -ETIMEDOUT; + +out: + iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS); + return err; +} + +#define shared_offset(x) offsetof(struct typhoon_shared, x) + +static void +typhoon_init_interface(struct typhoon *tp) +{ + struct typhoon_interface *iface = &tp->shared->iface; + dma_addr_t shared_dma; + + memset(tp->shared, 0, sizeof(struct typhoon_shared)); + + /* The *Hi members of iface are all init'd to zero by the memset(). + */ + shared_dma = tp->shared_dma + shared_offset(indexes); + iface->ringIndex = cpu_to_le32(shared_dma); + + shared_dma = tp->shared_dma + shared_offset(txLo); + iface->txLoAddr = cpu_to_le32(shared_dma); + iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc)); + + shared_dma = tp->shared_dma + shared_offset(txHi); + iface->txHiAddr = cpu_to_le32(shared_dma); + iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc)); + + shared_dma = tp->shared_dma + shared_offset(rxBuff); + iface->rxBuffAddr = cpu_to_le32(shared_dma); + iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES * + sizeof(struct rx_free)); + + shared_dma = tp->shared_dma + shared_offset(rxLo); + iface->rxLoAddr = cpu_to_le32(shared_dma); + iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc)); + + shared_dma = tp->shared_dma + shared_offset(rxHi); + iface->rxHiAddr = cpu_to_le32(shared_dma); + iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc)); + + shared_dma = tp->shared_dma + shared_offset(cmd); + iface->cmdAddr = cpu_to_le32(shared_dma); + iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE); + + shared_dma = tp->shared_dma + shared_offset(resp); + iface->respAddr = cpu_to_le32(shared_dma); + iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE); + + shared_dma = tp->shared_dma + shared_offset(zeroWord); + iface->zeroAddr = cpu_to_le32(shared_dma); + + tp->indexes = &tp->shared->indexes; + tp->txLoRing.ringBase = (u8 *) tp->shared->txLo; + tp->txHiRing.ringBase = (u8 *) tp->shared->txHi; + tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo; + tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi; + tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff; + tp->cmdRing.ringBase = (u8 *) tp->shared->cmd; + tp->respRing.ringBase = (u8 *) tp->shared->resp; + + tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY; + tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY; + + tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr); + tp->card_state = Sleeping; + + tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM; + tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON; + tp->offload |= TYPHOON_OFFLOAD_VLAN; + + spin_lock_init(&tp->command_lock); + + /* Force the writes to the shared memory area out before continuing. */ + wmb(); +} + +static void +typhoon_init_rings(struct typhoon *tp) +{ + memset(tp->indexes, 0, sizeof(struct typhoon_indexes)); + + tp->txLoRing.lastWrite = 0; + tp->txHiRing.lastWrite = 0; + tp->rxLoRing.lastWrite = 0; + tp->rxHiRing.lastWrite = 0; + tp->rxBuffRing.lastWrite = 0; + tp->cmdRing.lastWrite = 0; + tp->respRing.lastWrite = 0; + + tp->txLoRing.lastRead = 0; + tp->txHiRing.lastRead = 0; +} + +static const struct firmware *typhoon_fw; + +static int +typhoon_request_firmware(struct typhoon *tp) +{ + const struct typhoon_file_header *fHdr; + const struct typhoon_section_header *sHdr; + const u8 *image_data; + u32 numSections; + u32 section_len; + u32 remaining; + int err; + + if (typhoon_fw) + return 0; + + err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev); + if (err) { + netdev_err(tp->dev, "Failed to load firmware \"%s\"\n", + FIRMWARE_NAME); + return err; + } + + image_data = (u8 *) typhoon_fw->data; + remaining = typhoon_fw->size; + if (remaining < sizeof(struct typhoon_file_header)) + goto invalid_fw; + + fHdr = (struct typhoon_file_header *) image_data; + if (memcmp(fHdr->tag, "TYPHOON", 8)) + goto invalid_fw; + + numSections = le32_to_cpu(fHdr->numSections); + image_data += sizeof(struct typhoon_file_header); + remaining -= sizeof(struct typhoon_file_header); + + while (numSections--) { + if (remaining < sizeof(struct typhoon_section_header)) + goto invalid_fw; + + sHdr = (struct typhoon_section_header *) image_data; + image_data += sizeof(struct typhoon_section_header); + section_len = le32_to_cpu(sHdr->len); + + if (remaining < section_len) + goto invalid_fw; + + image_data += section_len; + remaining -= section_len; + } + + return 0; + +invalid_fw: + netdev_err(tp->dev, "Invalid firmware image\n"); + release_firmware(typhoon_fw); + typhoon_fw = NULL; + return -EINVAL; +} + +static int +typhoon_download_firmware(struct typhoon *tp) +{ + void __iomem *ioaddr = tp->ioaddr; + struct pci_dev *pdev = tp->pdev; + const struct typhoon_file_header *fHdr; + const struct typhoon_section_header *sHdr; + const u8 *image_data; + void *dpage; + dma_addr_t dpage_dma; + __sum16 csum; + u32 irqEnabled; + u32 irqMasked; + u32 numSections; + u32 section_len; + u32 len; + u32 load_addr; + u32 hmac; + int i; + int err; + + image_data = (u8 *) typhoon_fw->data; + fHdr = (struct typhoon_file_header *) image_data; + + /* Cannot just map the firmware image using pci_map_single() as + * the firmware is vmalloc()'d and may not be physically contiguous, + * so we allocate some consistent memory to copy the sections into. + */ + err = -ENOMEM; + dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma); + if(!dpage) { + netdev_err(tp->dev, "no DMA mem for firmware\n"); + goto err_out; + } + + irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE); + iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD, + ioaddr + TYPHOON_REG_INTR_ENABLE); + irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK); + iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD, + ioaddr + TYPHOON_REG_INTR_MASK); + + err = -ETIMEDOUT; + if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) { + netdev_err(tp->dev, "card ready timeout\n"); + goto err_out_irq; + } + + numSections = le32_to_cpu(fHdr->numSections); + load_addr = le32_to_cpu(fHdr->startAddr); + + iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS); + iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR); + hmac = le32_to_cpu(fHdr->hmacDigest[0]); + iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0); + hmac = le32_to_cpu(fHdr->hmacDigest[1]); + iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1); + hmac = le32_to_cpu(fHdr->hmacDigest[2]); + iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2); + hmac = le32_to_cpu(fHdr->hmacDigest[3]); + iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3); + hmac = le32_to_cpu(fHdr->hmacDigest[4]); + iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4); + typhoon_post_pci_writes(ioaddr); + iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND); + + image_data += sizeof(struct typhoon_file_header); + + /* The ioread32() in typhoon_wait_interrupt() will force the + * last write to the command register to post, so + * we don't need a typhoon_post_pci_writes() after it. + */ + for(i = 0; i < numSections; i++) { + sHdr = (struct typhoon_section_header *) image_data; + image_data += sizeof(struct typhoon_section_header); + load_addr = le32_to_cpu(sHdr->startAddr); + section_len = le32_to_cpu(sHdr->len); + + while(section_len) { + len = min_t(u32, section_len, PAGE_SIZE); + + if(typhoon_wait_interrupt(ioaddr) < 0 || + ioread32(ioaddr + TYPHOON_REG_STATUS) != + TYPHOON_STATUS_WAITING_FOR_SEGMENT) { + netdev_err(tp->dev, "segment ready timeout\n"); + goto err_out_irq; + } + + /* Do an pseudo IPv4 checksum on the data -- first + * need to convert each u16 to cpu order before + * summing. Fortunately, due to the properties of + * the checksum, we can do this once, at the end. + */ + csum = csum_fold(csum_partial_copy_nocheck(image_data, + dpage, len, + 0)); + + iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH); + iowrite32(le16_to_cpu((__force __le16)csum), + ioaddr + TYPHOON_REG_BOOT_CHECKSUM); + iowrite32(load_addr, + ioaddr + TYPHOON_REG_BOOT_DEST_ADDR); + iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI); + iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO); + typhoon_post_pci_writes(ioaddr); + iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE, + ioaddr + TYPHOON_REG_COMMAND); + + image_data += len; + load_addr += len; + section_len -= len; + } + } + + if(typhoon_wait_interrupt(ioaddr) < 0 || + ioread32(ioaddr + TYPHOON_REG_STATUS) != + TYPHOON_STATUS_WAITING_FOR_SEGMENT) { + netdev_err(tp->dev, "final segment ready timeout\n"); + goto err_out_irq; + } + + iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND); + + if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) { + netdev_err(tp->dev, "boot ready timeout, status 0x%0x\n", + ioread32(ioaddr + TYPHOON_REG_STATUS)); + goto err_out_irq; + } + + err = 0; + +err_out_irq: + iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK); + iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE); + + pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma); + +err_out: + return err; +} + +static int +typhoon_boot_3XP(struct typhoon *tp, u32 initial_status) +{ + void __iomem *ioaddr = tp->ioaddr; + + if(typhoon_wait_status(ioaddr, initial_status) < 0) { + netdev_err(tp->dev, "boot ready timeout\n"); + goto out_timeout; + } + + iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI); + iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO); + typhoon_post_pci_writes(ioaddr); + iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD, + ioaddr + TYPHOON_REG_COMMAND); + + if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) { + netdev_err(tp->dev, "boot finish timeout (status 0x%x)\n", + ioread32(ioaddr + TYPHOON_REG_STATUS)); + goto out_timeout; + } + + /* Clear the Transmit and Command ready registers + */ + iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY); + iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY); + iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY); + typhoon_post_pci_writes(ioaddr); + iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND); + + return 0; + +out_timeout: + return -ETIMEDOUT; +} + +static u32 +typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing, + volatile __le32 * index) +{ + u32 lastRead = txRing->lastRead; + struct tx_desc *tx; + dma_addr_t skb_dma; + int dma_len; + int type; + + while(lastRead != le32_to_cpu(*index)) { + tx = (struct tx_desc *) (txRing->ringBase + lastRead); + type = tx->flags & TYPHOON_TYPE_MASK; + + if(type == TYPHOON_TX_DESC) { + /* This tx_desc describes a packet. + */ + unsigned long ptr = tx->tx_addr; + struct sk_buff *skb = (struct sk_buff *) ptr; + dev_kfree_skb_irq(skb); + } else if(type == TYPHOON_FRAG_DESC) { + /* This tx_desc describes a memory mapping. Free it. + */ + skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr); + dma_len = le16_to_cpu(tx->len); + pci_unmap_single(tp->pdev, skb_dma, dma_len, + PCI_DMA_TODEVICE); + } + + tx->flags = 0; + typhoon_inc_tx_index(&lastRead, 1); + } + + return lastRead; +} + +static void +typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing, + volatile __le32 * index) +{ + u32 lastRead; + int numDesc = MAX_SKB_FRAGS + 1; + + /* This will need changing if we start to use the Hi Tx ring. */ + lastRead = typhoon_clean_tx(tp, txRing, index); + if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite, + lastRead, TXLO_ENTRIES) > (numDesc + 2)) + netif_wake_queue(tp->dev); + + txRing->lastRead = lastRead; + smp_wmb(); +} + +static void +typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx) +{ + struct typhoon_indexes *indexes = tp->indexes; + struct rxbuff_ent *rxb = &tp->rxbuffers[idx]; + struct basic_ring *ring = &tp->rxBuffRing; + struct rx_free *r; + + if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) == + le32_to_cpu(indexes->rxBuffCleared)) { + /* no room in ring, just drop the skb + */ + dev_kfree_skb_any(rxb->skb); + rxb->skb = NULL; + return; + } + + r = (struct rx_free *) (ring->ringBase + ring->lastWrite); + typhoon_inc_rxfree_index(&ring->lastWrite, 1); + r->virtAddr = idx; + r->physAddr = cpu_to_le32(rxb->dma_addr); + + /* Tell the card about it */ + wmb(); + indexes->rxBuffReady = cpu_to_le32(ring->lastWrite); +} + +static int +typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx) +{ + struct typhoon_indexes *indexes = tp->indexes; + struct rxbuff_ent *rxb = &tp->rxbuffers[idx]; + struct basic_ring *ring = &tp->rxBuffRing; + struct rx_free *r; + struct sk_buff *skb; + dma_addr_t dma_addr; + + rxb->skb = NULL; + + if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) == + le32_to_cpu(indexes->rxBuffCleared)) + return -ENOMEM; + + skb = dev_alloc_skb(PKT_BUF_SZ); + if(!skb) + return -ENOMEM; + +#if 0 + /* Please, 3com, fix the firmware to allow DMA to a unaligned + * address! Pretty please? + */ + skb_reserve(skb, 2); +#endif + + skb->dev = tp->dev; + dma_addr = pci_map_single(tp->pdev, skb->data, + PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + + /* Since no card does 64 bit DAC, the high bits will never + * change from zero. + */ + r = (struct rx_free *) (ring->ringBase + ring->lastWrite); + typhoon_inc_rxfree_index(&ring->lastWrite, 1); + r->virtAddr = idx; + r->physAddr = cpu_to_le32(dma_addr); + rxb->skb = skb; + rxb->dma_addr = dma_addr; + + /* Tell the card about it */ + wmb(); + indexes->rxBuffReady = cpu_to_le32(ring->lastWrite); + return 0; +} + +static int +typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready, + volatile __le32 * cleared, int budget) +{ + struct rx_desc *rx; + struct sk_buff *skb, *new_skb; + struct rxbuff_ent *rxb; + dma_addr_t dma_addr; + u32 local_ready; + u32 rxaddr; + int pkt_len; + u32 idx; + __le32 csum_bits; + int received; + + received = 0; + local_ready = le32_to_cpu(*ready); + rxaddr = le32_to_cpu(*cleared); + while(rxaddr != local_ready && budget > 0) { + rx = (struct rx_desc *) (rxRing->ringBase + rxaddr); + idx = rx->addr; + rxb = &tp->rxbuffers[idx]; + skb = rxb->skb; + dma_addr = rxb->dma_addr; + + typhoon_inc_rx_index(&rxaddr, 1); + + if(rx->flags & TYPHOON_RX_ERROR) { + typhoon_recycle_rx_skb(tp, idx); + continue; + } + + pkt_len = le16_to_cpu(rx->frameLen); + + if(pkt_len < rx_copybreak && + (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) { + skb_reserve(new_skb, 2); + pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, + PKT_BUF_SZ, + PCI_DMA_FROMDEVICE); + skb_copy_to_linear_data(new_skb, skb->data, pkt_len); + pci_dma_sync_single_for_device(tp->pdev, dma_addr, + PKT_BUF_SZ, + PCI_DMA_FROMDEVICE); + skb_put(new_skb, pkt_len); + typhoon_recycle_rx_skb(tp, idx); + } else { + new_skb = skb; + skb_put(new_skb, pkt_len); + pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ, + PCI_DMA_FROMDEVICE); + typhoon_alloc_rx_skb(tp, idx); + } + new_skb->protocol = eth_type_trans(new_skb, tp->dev); + csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD | + TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD); + if(csum_bits == + (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD) || + csum_bits == + (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) { + new_skb->ip_summed = CHECKSUM_UNNECESSARY; + } else + skb_checksum_none_assert(new_skb); + + if (rx->rxStatus & TYPHOON_RX_VLAN) + __vlan_hwaccel_put_tag(new_skb, + ntohl(rx->vlanTag) & 0xffff); + netif_receive_skb(new_skb); + + received++; + budget--; + } + *cleared = cpu_to_le32(rxaddr); + + return received; +} + +static void +typhoon_fill_free_ring(struct typhoon *tp) +{ + u32 i; + + for(i = 0; i < RXENT_ENTRIES; i++) { + struct rxbuff_ent *rxb = &tp->rxbuffers[i]; + if(rxb->skb) + continue; + if(typhoon_alloc_rx_skb(tp, i) < 0) + break; + } +} + +static int +typhoon_poll(struct napi_struct *napi, int budget) +{ + struct typhoon *tp = container_of(napi, struct typhoon, napi); + struct typhoon_indexes *indexes = tp->indexes; + int work_done; + + rmb(); + if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared) + typhoon_process_response(tp, 0, NULL); + + if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead) + typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared); + + work_done = 0; + + if(indexes->rxHiCleared != indexes->rxHiReady) { + work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady, + &indexes->rxHiCleared, budget); + } + + if(indexes->rxLoCleared != indexes->rxLoReady) { + work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady, + &indexes->rxLoCleared, budget - work_done); + } + + if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) { + /* rxBuff ring is empty, try to fill it. */ + typhoon_fill_free_ring(tp); + } + + if (work_done < budget) { + napi_complete(napi); + iowrite32(TYPHOON_INTR_NONE, + tp->ioaddr + TYPHOON_REG_INTR_MASK); + typhoon_post_pci_writes(tp->ioaddr); + } + + return work_done; +} + +static irqreturn_t +typhoon_interrupt(int irq, void *dev_instance) +{ + struct net_device *dev = dev_instance; + struct typhoon *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->ioaddr; + u32 intr_status; + + intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS); + if(!(intr_status & TYPHOON_INTR_HOST_INT)) + return IRQ_NONE; + + iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS); + + if (napi_schedule_prep(&tp->napi)) { + iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); + typhoon_post_pci_writes(ioaddr); + __napi_schedule(&tp->napi); + } else { + netdev_err(dev, "Error, poll already scheduled\n"); + } + return IRQ_HANDLED; +} + +static void +typhoon_free_rx_rings(struct typhoon *tp) +{ + u32 i; + + for(i = 0; i < RXENT_ENTRIES; i++) { + struct rxbuff_ent *rxb = &tp->rxbuffers[i]; + if(rxb->skb) { + pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ, + PCI_DMA_FROMDEVICE); + dev_kfree_skb(rxb->skb); + rxb->skb = NULL; + } + } +} + +static int +typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events) +{ + struct pci_dev *pdev = tp->pdev; + void __iomem *ioaddr = tp->ioaddr; + struct cmd_desc xp_cmd; + int err; + + INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS); + xp_cmd.parm1 = events; + err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); + if(err < 0) { + netdev_err(tp->dev, "typhoon_sleep(): wake events cmd err %d\n", + err); + return err; + } + + INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP); + err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); + if(err < 0) { + netdev_err(tp->dev, "typhoon_sleep(): sleep cmd err %d\n", err); + return err; + } + + if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0) + return -ETIMEDOUT; + + /* Since we cannot monitor the status of the link while sleeping, + * tell the world it went away. + */ + netif_carrier_off(tp->dev); + + pci_enable_wake(tp->pdev, state, 1); + pci_disable_device(pdev); + return pci_set_power_state(pdev, state); +} + +static int +typhoon_wakeup(struct typhoon *tp, int wait_type) +{ + struct pci_dev *pdev = tp->pdev; + void __iomem *ioaddr = tp->ioaddr; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + /* Post 2.x.x versions of the Sleep Image require a reset before + * we can download the Runtime Image. But let's not make users of + * the old firmware pay for the reset. + */ + iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND); + if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 || + (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET)) + return typhoon_reset(ioaddr, wait_type); + + return 0; +} + +static int +typhoon_start_runtime(struct typhoon *tp) +{ + struct net_device *dev = tp->dev; + void __iomem *ioaddr = tp->ioaddr; + struct cmd_desc xp_cmd; + int err; + + typhoon_init_rings(tp); + typhoon_fill_free_ring(tp); + + err = typhoon_download_firmware(tp); + if(err < 0) { + netdev_err(tp->dev, "cannot load runtime on 3XP\n"); + goto error_out; + } + + if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) { + netdev_err(tp->dev, "cannot boot 3XP\n"); + err = -EIO; + goto error_out; + } + + INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE); + xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ); + err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); + if(err < 0) + goto error_out; + + INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS); + xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0])); + xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2])); + err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); + if(err < 0) + goto error_out; + + /* Disable IRQ coalescing -- we can reenable it when 3Com gives + * us some more information on how to control it. + */ + INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL); + xp_cmd.parm1 = 0; + err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); + if(err < 0) + goto error_out; + + INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT); + xp_cmd.parm1 = tp->xcvr_select; + err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); + if(err < 0) + goto error_out; + + INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE); + xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q); + err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); + if(err < 0) + goto error_out; + + INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS); + xp_cmd.parm2 = tp->offload; + xp_cmd.parm3 = tp->offload; + err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); + if(err < 0) + goto error_out; + + typhoon_set_rx_mode(dev); + + INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE); + err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); + if(err < 0) + goto error_out; + + INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE); + err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); + if(err < 0) + goto error_out; + + tp->card_state = Running; + smp_wmb(); + + iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE); + iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK); + typhoon_post_pci_writes(ioaddr); + + return 0; + +error_out: + typhoon_reset(ioaddr, WaitNoSleep); + typhoon_free_rx_rings(tp); + typhoon_init_rings(tp); + return err; +} + +static int +typhoon_stop_runtime(struct typhoon *tp, int wait_type) +{ + struct typhoon_indexes *indexes = tp->indexes; + struct transmit_ring *txLo = &tp->txLoRing; + void __iomem *ioaddr = tp->ioaddr; + struct cmd_desc xp_cmd; + int i; + + /* Disable interrupts early, since we can't schedule a poll + * when called with !netif_running(). This will be posted + * when we force the posting of the command. + */ + iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE); + + INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE); + typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); + + /* Wait 1/2 sec for any outstanding transmits to occur + * We'll cleanup after the reset if this times out. + */ + for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) { + if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite)) + break; + udelay(TYPHOON_UDELAY); + } + + if(i == TYPHOON_WAIT_TIMEOUT) + netdev_err(tp->dev, "halt timed out waiting for Tx to complete\n"); + + INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE); + typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); + + /* save the statistics so when we bring the interface up again, + * the values reported to userspace are correct. + */ + tp->card_state = Sleeping; + smp_wmb(); + typhoon_do_get_stats(tp); + memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats)); + + INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT); + typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); + + if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0) + netdev_err(tp->dev, "timed out waiting for 3XP to halt\n"); + + if(typhoon_reset(ioaddr, wait_type) < 0) { + netdev_err(tp->dev, "unable to reset 3XP\n"); + return -ETIMEDOUT; + } + + /* cleanup any outstanding Tx packets */ + if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) { + indexes->txLoCleared = cpu_to_le32(txLo->lastWrite); + typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared); + } + + return 0; +} + +static void +typhoon_tx_timeout(struct net_device *dev) +{ + struct typhoon *tp = netdev_priv(dev); + + if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) { + netdev_warn(dev, "could not reset in tx timeout\n"); + goto truly_dead; + } + + /* If we ever start using the Hi ring, it will need cleaning too */ + typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared); + typhoon_free_rx_rings(tp); + + if(typhoon_start_runtime(tp) < 0) { + netdev_err(dev, "could not start runtime in tx timeout\n"); + goto truly_dead; + } + + netif_wake_queue(dev); + return; + +truly_dead: + /* Reset the hardware, and turn off carrier to avoid more timeouts */ + typhoon_reset(tp->ioaddr, NoWait); + netif_carrier_off(dev); +} + +static int +typhoon_open(struct net_device *dev) +{ + struct typhoon *tp = netdev_priv(dev); + int err; + + err = typhoon_request_firmware(tp); + if (err) + goto out; + + err = typhoon_wakeup(tp, WaitSleep); + if(err < 0) { + netdev_err(dev, "unable to wakeup device\n"); + goto out_sleep; + } + + err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED, + dev->name, dev); + if(err < 0) + goto out_sleep; + + napi_enable(&tp->napi); + + err = typhoon_start_runtime(tp); + if(err < 0) { + napi_disable(&tp->napi); + goto out_irq; + } + + netif_start_queue(dev); + return 0; + +out_irq: + free_irq(dev->irq, dev); + +out_sleep: + if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) { + netdev_err(dev, "unable to reboot into sleep img\n"); + typhoon_reset(tp->ioaddr, NoWait); + goto out; + } + + if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) + netdev_err(dev, "unable to go back to sleep\n"); + +out: + return err; +} + +static int +typhoon_close(struct net_device *dev) +{ + struct typhoon *tp = netdev_priv(dev); + + netif_stop_queue(dev); + napi_disable(&tp->napi); + + if(typhoon_stop_runtime(tp, WaitSleep) < 0) + netdev_err(dev, "unable to stop runtime\n"); + + /* Make sure there is no irq handler running on a different CPU. */ + free_irq(dev->irq, dev); + + typhoon_free_rx_rings(tp); + typhoon_init_rings(tp); + + if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) + netdev_err(dev, "unable to boot sleep image\n"); + + if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) + netdev_err(dev, "unable to put card to sleep\n"); + + return 0; +} + +#ifdef CONFIG_PM +static int +typhoon_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct typhoon *tp = netdev_priv(dev); + + /* If we're down, resume when we are upped. + */ + if(!netif_running(dev)) + return 0; + + if(typhoon_wakeup(tp, WaitNoSleep) < 0) { + netdev_err(dev, "critical: could not wake up in resume\n"); + goto reset; + } + + if(typhoon_start_runtime(tp) < 0) { + netdev_err(dev, "critical: could not start runtime in resume\n"); + goto reset; + } + + netif_device_attach(dev); + return 0; + +reset: + typhoon_reset(tp->ioaddr, NoWait); + return -EBUSY; +} + +static int +typhoon_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct typhoon *tp = netdev_priv(dev); + struct cmd_desc xp_cmd; + + /* If we're down, we're already suspended. + */ + if(!netif_running(dev)) + return 0; + + /* TYPHOON_OFFLOAD_VLAN is always on now, so this doesn't work */ + if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) + netdev_warn(dev, "cannot do WAKE_MAGIC with VLAN offloading\n"); + + netif_device_detach(dev); + + if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) { + netdev_err(dev, "unable to stop runtime\n"); + goto need_resume; + } + + typhoon_free_rx_rings(tp); + typhoon_init_rings(tp); + + if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) { + netdev_err(dev, "unable to boot sleep image\n"); + goto need_resume; + } + + INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS); + xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0])); + xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2])); + if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) { + netdev_err(dev, "unable to set mac address in suspend\n"); + goto need_resume; + } + + INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER); + xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST; + if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) { + netdev_err(dev, "unable to set rx filter in suspend\n"); + goto need_resume; + } + + if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) { + netdev_err(dev, "unable to put card to sleep\n"); + goto need_resume; + } + + return 0; + +need_resume: + typhoon_resume(pdev); + return -EBUSY; +} +#endif + +static int __devinit +typhoon_test_mmio(struct pci_dev *pdev) +{ + void __iomem *ioaddr = pci_iomap(pdev, 1, 128); + int mode = 0; + u32 val; + + if(!ioaddr) + goto out; + + if(ioread32(ioaddr + TYPHOON_REG_STATUS) != + TYPHOON_STATUS_WAITING_FOR_HOST) + goto out_unmap; + + iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); + iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS); + iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE); + + /* Ok, see if we can change our interrupt status register by + * sending ourselves an interrupt. If so, then MMIO works. + * The 50usec delay is arbitrary -- it could probably be smaller. + */ + val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS); + if((val & TYPHOON_INTR_SELF) == 0) { + iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT); + ioread32(ioaddr + TYPHOON_REG_INTR_STATUS); + udelay(50); + val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS); + if(val & TYPHOON_INTR_SELF) + mode = 1; + } + + iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); + iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS); + iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE); + ioread32(ioaddr + TYPHOON_REG_INTR_STATUS); + +out_unmap: + pci_iounmap(pdev, ioaddr); + +out: + if(!mode) + pr_info("%s: falling back to port IO\n", pci_name(pdev)); + return mode; +} + +static const struct net_device_ops typhoon_netdev_ops = { + .ndo_open = typhoon_open, + .ndo_stop = typhoon_close, + .ndo_start_xmit = typhoon_start_tx, + .ndo_set_multicast_list = typhoon_set_rx_mode, + .ndo_tx_timeout = typhoon_tx_timeout, + .ndo_get_stats = typhoon_get_stats, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = typhoon_set_mac_address, + .ndo_change_mtu = eth_change_mtu, +}; + +static int __devinit +typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *dev; + struct typhoon *tp; + int card_id = (int) ent->driver_data; + void __iomem *ioaddr; + void *shared; + dma_addr_t shared_dma; + struct cmd_desc xp_cmd; + struct resp_desc xp_resp[3]; + int err = 0; + const char *err_msg; + + dev = alloc_etherdev(sizeof(*tp)); + if(dev == NULL) { + err_msg = "unable to alloc new net device"; + err = -ENOMEM; + goto error_out; + } + SET_NETDEV_DEV(dev, &pdev->dev); + + err = pci_enable_device(pdev); + if(err < 0) { + err_msg = "unable to enable device"; + goto error_out_dev; + } + + err = pci_set_mwi(pdev); + if(err < 0) { + err_msg = "unable to set MWI"; + goto error_out_disable; + } + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if(err < 0) { + err_msg = "No usable DMA configuration"; + goto error_out_mwi; + } + + /* sanity checks on IO and MMIO BARs + */ + if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) { + err_msg = "region #1 not a PCI IO resource, aborting"; + err = -ENODEV; + goto error_out_mwi; + } + if(pci_resource_len(pdev, 0) < 128) { + err_msg = "Invalid PCI IO region size, aborting"; + err = -ENODEV; + goto error_out_mwi; + } + if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { + err_msg = "region #1 not a PCI MMIO resource, aborting"; + err = -ENODEV; + goto error_out_mwi; + } + if(pci_resource_len(pdev, 1) < 128) { + err_msg = "Invalid PCI MMIO region size, aborting"; + err = -ENODEV; + goto error_out_mwi; + } + + err = pci_request_regions(pdev, KBUILD_MODNAME); + if(err < 0) { + err_msg = "could not request regions"; + goto error_out_mwi; + } + + /* map our registers + */ + if(use_mmio != 0 && use_mmio != 1) + use_mmio = typhoon_test_mmio(pdev); + + ioaddr = pci_iomap(pdev, use_mmio, 128); + if (!ioaddr) { + err_msg = "cannot remap registers, aborting"; + err = -EIO; + goto error_out_regions; + } + + /* allocate pci dma space for rx and tx descriptor rings + */ + shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared), + &shared_dma); + if(!shared) { + err_msg = "could not allocate DMA memory"; + err = -ENOMEM; + goto error_out_remap; + } + + dev->irq = pdev->irq; + tp = netdev_priv(dev); + tp->shared = shared; + tp->shared_dma = shared_dma; + tp->pdev = pdev; + tp->tx_pdev = pdev; + tp->ioaddr = ioaddr; + tp->tx_ioaddr = ioaddr; + tp->dev = dev; + + /* Init sequence: + * 1) Reset the adapter to clear any bad juju + * 2) Reload the sleep image + * 3) Boot the sleep image + * 4) Get the hardware address. + * 5) Put the card to sleep. + */ + if (typhoon_reset(ioaddr, WaitSleep) < 0) { + err_msg = "could not reset 3XP"; + err = -EIO; + goto error_out_dma; + } + + /* Now that we've reset the 3XP and are sure it's not going to + * write all over memory, enable bus mastering, and save our + * state for resuming after a suspend. + */ + pci_set_master(pdev); + pci_save_state(pdev); + + typhoon_init_interface(tp); + typhoon_init_rings(tp); + + if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) { + err_msg = "cannot boot 3XP sleep image"; + err = -EIO; + goto error_out_reset; + } + + INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS); + if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) { + err_msg = "cannot read MAC address"; + err = -EIO; + goto error_out_reset; + } + + *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1)); + *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2)); + + if(!is_valid_ether_addr(dev->dev_addr)) { + err_msg = "Could not obtain valid ethernet address, aborting"; + goto error_out_reset; + } + + /* Read the Sleep Image version last, so the response is valid + * later when we print out the version reported. + */ + INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS); + if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) { + err_msg = "Could not get Sleep Image version"; + goto error_out_reset; + } + + tp->capabilities = typhoon_card_info[card_id].capabilities; + tp->xcvr_select = TYPHOON_XCVR_AUTONEG; + + /* Typhoon 1.0 Sleep Images return one response descriptor to the + * READ_VERSIONS command. Those versions are OK after waking up + * from sleep without needing a reset. Typhoon 1.1+ Sleep Images + * seem to need a little extra help to get started. Since we don't + * know how to nudge it along, just kick it. + */ + if(xp_resp[0].numDesc != 0) + tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET; + + if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) { + err_msg = "cannot put adapter to sleep"; + err = -EIO; + goto error_out_reset; + } + + /* The chip-specific entries in the device structure. */ + dev->netdev_ops = &typhoon_netdev_ops; + netif_napi_add(dev, &tp->napi, typhoon_poll, 16); + dev->watchdog_timeo = TX_TIMEOUT; + + SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops); + + /* We can handle scatter gather, up to 16 entries, and + * we can do IP checksumming (only version 4, doh...) + * + * There's no way to turn off the RX VLAN offloading and stripping + * on the current 3XP firmware -- it does not respect the offload + * settings -- so we only allow the user to toggle the TX processing. + */ + dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | + NETIF_F_HW_VLAN_TX; + dev->features = dev->hw_features | + NETIF_F_HW_VLAN_RX | NETIF_F_RXCSUM; + + if(register_netdev(dev) < 0) { + err_msg = "unable to register netdev"; + goto error_out_reset; + } + + pci_set_drvdata(pdev, dev); + + netdev_info(dev, "%s at %s 0x%llx, %pM\n", + typhoon_card_info[card_id].name, + use_mmio ? "MMIO" : "IO", + (unsigned long long)pci_resource_start(pdev, use_mmio), + dev->dev_addr); + + /* xp_resp still contains the response to the READ_VERSIONS command. + * For debugging, let the user know what version he has. + */ + if(xp_resp[0].numDesc == 0) { + /* This is the Typhoon 1.0 type Sleep Image, last 16 bits + * of version is Month/Day of build. + */ + u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff; + netdev_info(dev, "Typhoon 1.0 Sleep Image built %02u/%02u/2000\n", + monthday >> 8, monthday & 0xff); + } else if(xp_resp[0].numDesc == 2) { + /* This is the Typhoon 1.1+ type Sleep Image + */ + u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2); + u8 *ver_string = (u8 *) &xp_resp[1]; + ver_string[25] = 0; + netdev_info(dev, "Typhoon 1.1+ Sleep Image version %02x.%03x.%03x %s\n", + sleep_ver >> 24, (sleep_ver >> 12) & 0xfff, + sleep_ver & 0xfff, ver_string); + } else { + netdev_warn(dev, "Unknown Sleep Image version (%u:%04x)\n", + xp_resp[0].numDesc, le32_to_cpu(xp_resp[0].parm2)); + } + + return 0; + +error_out_reset: + typhoon_reset(ioaddr, NoWait); + +error_out_dma: + pci_free_consistent(pdev, sizeof(struct typhoon_shared), + shared, shared_dma); +error_out_remap: + pci_iounmap(pdev, ioaddr); +error_out_regions: + pci_release_regions(pdev); +error_out_mwi: + pci_clear_mwi(pdev); +error_out_disable: + pci_disable_device(pdev); +error_out_dev: + free_netdev(dev); +error_out: + pr_err("%s: %s\n", pci_name(pdev), err_msg); + return err; +} + +static void __devexit +typhoon_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct typhoon *tp = netdev_priv(dev); + + unregister_netdev(dev); + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + typhoon_reset(tp->ioaddr, NoWait); + pci_iounmap(pdev, tp->ioaddr); + pci_free_consistent(pdev, sizeof(struct typhoon_shared), + tp->shared, tp->shared_dma); + pci_release_regions(pdev); + pci_clear_mwi(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + free_netdev(dev); +} + +static struct pci_driver typhoon_driver = { + .name = KBUILD_MODNAME, + .id_table = typhoon_pci_tbl, + .probe = typhoon_init_one, + .remove = __devexit_p(typhoon_remove_one), +#ifdef CONFIG_PM + .suspend = typhoon_suspend, + .resume = typhoon_resume, +#endif +}; + +static int __init +typhoon_init(void) +{ + return pci_register_driver(&typhoon_driver); +} + +static void __exit +typhoon_cleanup(void) +{ + if (typhoon_fw) + release_firmware(typhoon_fw); + pci_unregister_driver(&typhoon_driver); +} + +module_init(typhoon_init); +module_exit(typhoon_cleanup); diff --git a/drivers/net/ethernet/3com/typhoon.h b/drivers/net/ethernet/3com/typhoon.h new file mode 100644 index 0000000..88187fc --- /dev/null +++ b/drivers/net/ethernet/3com/typhoon.h @@ -0,0 +1,624 @@ +/* typhoon.h: chip info for the 3Com 3CR990 family of controllers */ +/* + Written 2002-2003 by David Dillow + + This software may be used and distributed according to the terms of + the GNU General Public License (GPL), incorporated herein by reference. + Drivers based on or derived from this code fall under the GPL and must + retain the authorship, copyright and license notice. This file is not + a complete program and may only be used when the entire operating + system is licensed under the GPL. + + This software is available on a public web site. It may enable + cryptographic capabilities of the 3Com hardware, and may be + exported from the United States under License Exception "TSU" + pursuant to 15 C.F.R. Section 740.13(e). + + This work was funded by the National Library of Medicine under + the Department of Energy project number 0274DD06D1 and NLM project + number Y1-LM-2015-01. +*/ + +/* All Typhoon ring positions are specificed in bytes, and point to the + * first "clean" entry in the ring -- ie the next entry we use for whatever + * purpose. + */ + +/* The Typhoon basic ring + * ringBase: where this ring lives (our virtual address) + * lastWrite: the next entry we'll use + */ +struct basic_ring { + u8 *ringBase; + u32 lastWrite; +}; + +/* The Typoon transmit ring -- same as a basic ring, plus: + * lastRead: where we're at in regard to cleaning up the ring + * writeRegister: register to use for writing (different for Hi & Lo rings) + */ +struct transmit_ring { + u8 *ringBase; + u32 lastWrite; + u32 lastRead; + int writeRegister; +}; + +/* The host<->Typhoon ring index structure + * This indicates the current positions in the rings + * + * All values must be in little endian format for the 3XP + * + * rxHiCleared: entry we've cleared to in the Hi receive ring + * rxLoCleared: entry we've cleared to in the Lo receive ring + * rxBuffReady: next entry we'll put a free buffer in + * respCleared: entry we've cleared to in the response ring + * + * txLoCleared: entry the NIC has cleared to in the Lo transmit ring + * txHiCleared: entry the NIC has cleared to in the Hi transmit ring + * rxLoReady: entry the NIC has filled to in the Lo receive ring + * rxBuffCleared: entry the NIC has cleared in the free buffer ring + * cmdCleared: entry the NIC has cleared in the command ring + * respReady: entry the NIC has filled to in the response ring + * rxHiReady: entry the NIC has filled to in the Hi receive ring + */ +struct typhoon_indexes { + /* The first four are written by the host, and read by the NIC */ + volatile __le32 rxHiCleared; + volatile __le32 rxLoCleared; + volatile __le32 rxBuffReady; + volatile __le32 respCleared; + + /* The remaining are written by the NIC, and read by the host */ + volatile __le32 txLoCleared; + volatile __le32 txHiCleared; + volatile __le32 rxLoReady; + volatile __le32 rxBuffCleared; + volatile __le32 cmdCleared; + volatile __le32 respReady; + volatile __le32 rxHiReady; +} __packed; + +/* The host<->Typhoon interface + * Our means of communicating where things are + * + * All values must be in little endian format for the 3XP + * + * ringIndex: 64 bit bus address of the index structure + * txLoAddr: 64 bit bus address of the Lo transmit ring + * txLoSize: size (in bytes) of the Lo transmit ring + * txHi*: as above for the Hi priority transmit ring + * rxLo*: as above for the Lo priority receive ring + * rxBuff*: as above for the free buffer ring + * cmd*: as above for the command ring + * resp*: as above for the response ring + * zeroAddr: 64 bit bus address of a zero word (for DMA) + * rxHi*: as above for the Hi Priority receive ring + * + * While there is room for 64 bit addresses, current versions of the 3XP + * only do 32 bit addresses, so the *Hi for each of the above will always + * be zero. + */ +struct typhoon_interface { + __le32 ringIndex; + __le32 ringIndexHi; + __le32 txLoAddr; + __le32 txLoAddrHi; + __le32 txLoSize; + __le32 txHiAddr; + __le32 txHiAddrHi; + __le32 txHiSize; + __le32 rxLoAddr; + __le32 rxLoAddrHi; + __le32 rxLoSize; + __le32 rxBuffAddr; + __le32 rxBuffAddrHi; + __le32 rxBuffSize; + __le32 cmdAddr; + __le32 cmdAddrHi; + __le32 cmdSize; + __le32 respAddr; + __le32 respAddrHi; + __le32 respSize; + __le32 zeroAddr; + __le32 zeroAddrHi; + __le32 rxHiAddr; + __le32 rxHiAddrHi; + __le32 rxHiSize; +} __packed; + +/* The Typhoon transmit/fragment descriptor + * + * A packet is described by a packet descriptor, followed by option descriptors, + * if any, then one or more fragment descriptors. + * + * Packet descriptor: + * flags: Descriptor type + * len:i zero, or length of this packet + * addr*: 8 bytes of opaque data to the firmware -- for skb pointer + * processFlags: Determine offload tasks to perform on this packet. + * + * Fragment descriptor: + * flags: Descriptor type + * len:i length of this fragment + * addr: low bytes of DMA address for this part of the packet + * addrHi: hi bytes of DMA address for this part of the packet + * processFlags: must be zero + * + * TYPHOON_DESC_VALID is not mentioned in their docs, but their Linux + * driver uses it. + */ +struct tx_desc { + u8 flags; +#define TYPHOON_TYPE_MASK 0x07 +#define TYPHOON_FRAG_DESC 0x00 +#define TYPHOON_TX_DESC 0x01 +#define TYPHOON_CMD_DESC 0x02 +#define TYPHOON_OPT_DESC 0x03 +#define TYPHOON_RX_DESC 0x04 +#define TYPHOON_RESP_DESC 0x05 +#define TYPHOON_OPT_TYPE_MASK 0xf0 +#define TYPHOON_OPT_IPSEC 0x00 +#define TYPHOON_OPT_TCP_SEG 0x10 +#define TYPHOON_CMD_RESPOND 0x40 +#define TYPHOON_RESP_ERROR 0x40 +#define TYPHOON_RX_ERROR 0x40 +#define TYPHOON_DESC_VALID 0x80 + u8 numDesc; + __le16 len; + union { + struct { + __le32 addr; + __le32 addrHi; + } frag; + u64 tx_addr; /* opaque for hardware, for TX_DESC */ + }; + __le32 processFlags; +#define TYPHOON_TX_PF_NO_CRC cpu_to_le32(0x00000001) +#define TYPHOON_TX_PF_IP_CHKSUM cpu_to_le32(0x00000002) +#define TYPHOON_TX_PF_TCP_CHKSUM cpu_to_le32(0x00000004) +#define TYPHOON_TX_PF_TCP_SEGMENT cpu_to_le32(0x00000008) +#define TYPHOON_TX_PF_INSERT_VLAN cpu_to_le32(0x00000010) +#define TYPHOON_TX_PF_IPSEC cpu_to_le32(0x00000020) +#define TYPHOON_TX_PF_VLAN_PRIORITY cpu_to_le32(0x00000040) +#define TYPHOON_TX_PF_UDP_CHKSUM cpu_to_le32(0x00000080) +#define TYPHOON_TX_PF_PAD_FRAME cpu_to_le32(0x00000100) +#define TYPHOON_TX_PF_RESERVED cpu_to_le32(0x00000e00) +#define TYPHOON_TX_PF_VLAN_MASK cpu_to_le32(0x0ffff000) +#define TYPHOON_TX_PF_INTERNAL cpu_to_le32(0xf0000000) +#define TYPHOON_TX_PF_VLAN_TAG_SHIFT 12 +} __packed; + +/* The TCP Segmentation offload option descriptor + * + * flags: descriptor type + * numDesc: must be 1 + * mss_flags: bits 0-11 (little endian) are MSS, 12 is first TSO descriptor + * 13 is list TSO descriptor, set both if only one TSO + * respAddrLo: low bytes of address of the bytesTx field of this descriptor + * bytesTx: total number of bytes in this TSO request + * status: 0 on completion + */ +struct tcpopt_desc { + u8 flags; + u8 numDesc; + __le16 mss_flags; +#define TYPHOON_TSO_FIRST cpu_to_le16(0x1000) +#define TYPHOON_TSO_LAST cpu_to_le16(0x2000) + __le32 respAddrLo; + __le32 bytesTx; + __le32 status; +} __packed; + +/* The IPSEC Offload descriptor + * + * flags: descriptor type + * numDesc: must be 1 + * ipsecFlags: bit 0: 0 -- generate IV, 1 -- use supplied IV + * sa1, sa2: Security Association IDs for this packet + * reserved: set to 0 + */ +struct ipsec_desc { + u8 flags; + u8 numDesc; + __le16 ipsecFlags; +#define TYPHOON_IPSEC_GEN_IV cpu_to_le16(0x0000) +#define TYPHOON_IPSEC_USE_IV cpu_to_le16(0x0001) + __le32 sa1; + __le32 sa2; + __le32 reserved; +} __packed; + +/* The Typhoon receive descriptor (Updated by NIC) + * + * flags: Descriptor type, error indication + * numDesc: Always zero + * frameLen: the size of the packet received + * addr: low 32 bytes of the virtual addr passed in for this buffer + * addrHi: high 32 bytes of the virtual addr passed in for this buffer + * rxStatus: Error if set in flags, otherwise result of offload processing + * filterResults: results of filtering on packet, not used + * ipsecResults: Results of IPSEC processing + * vlanTag: the 801.2q TCI from the packet + */ +struct rx_desc { + u8 flags; + u8 numDesc; + __le16 frameLen; + u32 addr; /* opaque, comes from virtAddr */ + u32 addrHi; /* opaque, comes from virtAddrHi */ + __le32 rxStatus; +#define TYPHOON_RX_ERR_INTERNAL cpu_to_le32(0x00000000) +#define TYPHOON_RX_ERR_FIFO_UNDERRUN cpu_to_le32(0x00000001) +#define TYPHOON_RX_ERR_BAD_SSD cpu_to_le32(0x00000002) +#define TYPHOON_RX_ERR_RUNT cpu_to_le32(0x00000003) +#define TYPHOON_RX_ERR_CRC cpu_to_le32(0x00000004) +#define TYPHOON_RX_ERR_OVERSIZE cpu_to_le32(0x00000005) +#define TYPHOON_RX_ERR_ALIGN cpu_to_le32(0x00000006) +#define TYPHOON_RX_ERR_DRIBBLE cpu_to_le32(0x00000007) +#define TYPHOON_RX_PROTO_MASK cpu_to_le32(0x00000003) +#define TYPHOON_RX_PROTO_UNKNOWN cpu_to_le32(0x00000000) +#define TYPHOON_RX_PROTO_IP cpu_to_le32(0x00000001) +#define TYPHOON_RX_PROTO_IPX cpu_to_le32(0x00000002) +#define TYPHOON_RX_VLAN cpu_to_le32(0x00000004) +#define TYPHOON_RX_IP_FRAG cpu_to_le32(0x00000008) +#define TYPHOON_RX_IPSEC cpu_to_le32(0x00000010) +#define TYPHOON_RX_IP_CHK_FAIL cpu_to_le32(0x00000020) +#define TYPHOON_RX_TCP_CHK_FAIL cpu_to_le32(0x00000040) +#define TYPHOON_RX_UDP_CHK_FAIL cpu_to_le32(0x00000080) +#define TYPHOON_RX_IP_CHK_GOOD cpu_to_le32(0x00000100) +#define TYPHOON_RX_TCP_CHK_GOOD cpu_to_le32(0x00000200) +#define TYPHOON_RX_UDP_CHK_GOOD cpu_to_le32(0x00000400) + __le16 filterResults; +#define TYPHOON_RX_FILTER_MASK cpu_to_le16(0x7fff) +#define TYPHOON_RX_FILTERED cpu_to_le16(0x8000) + __le16 ipsecResults; +#define TYPHOON_RX_OUTER_AH_GOOD cpu_to_le16(0x0001) +#define TYPHOON_RX_OUTER_ESP_GOOD cpu_to_le16(0x0002) +#define TYPHOON_RX_INNER_AH_GOOD cpu_to_le16(0x0004) +#define TYPHOON_RX_INNER_ESP_GOOD cpu_to_le16(0x0008) +#define TYPHOON_RX_OUTER_AH_FAIL cpu_to_le16(0x0010) +#define TYPHOON_RX_OUTER_ESP_FAIL cpu_to_le16(0x0020) +#define TYPHOON_RX_INNER_AH_FAIL cpu_to_le16(0x0040) +#define TYPHOON_RX_INNER_ESP_FAIL cpu_to_le16(0x0080) +#define TYPHOON_RX_UNKNOWN_SA cpu_to_le16(0x0100) +#define TYPHOON_RX_ESP_FORMAT_ERR cpu_to_le16(0x0200) + __be32 vlanTag; +} __packed; + +/* The Typhoon free buffer descriptor, used to give a buffer to the NIC + * + * physAddr: low 32 bits of the bus address of the buffer + * physAddrHi: high 32 bits of the bus address of the buffer, always zero + * virtAddr: low 32 bits of the skb address + * virtAddrHi: high 32 bits of the skb address, always zero + * + * the virt* address is basically two 32 bit cookies, just passed back + * from the NIC + */ +struct rx_free { + __le32 physAddr; + __le32 physAddrHi; + u32 virtAddr; + u32 virtAddrHi; +} __packed; + +/* The Typhoon command descriptor, used for commands and responses + * + * flags: descriptor type + * numDesc: number of descriptors following in this command/response, + * ie, zero for a one descriptor command + * cmd: the command + * seqNo: sequence number (unused) + * parm1: use varies by command + * parm2: use varies by command + * parm3: use varies by command + */ +struct cmd_desc { + u8 flags; + u8 numDesc; + __le16 cmd; +#define TYPHOON_CMD_TX_ENABLE cpu_to_le16(0x0001) +#define TYPHOON_CMD_TX_DISABLE cpu_to_le16(0x0002) +#define TYPHOON_CMD_RX_ENABLE cpu_to_le16(0x0003) +#define TYPHOON_CMD_RX_DISABLE cpu_to_le16(0x0004) +#define TYPHOON_CMD_SET_RX_FILTER cpu_to_le16(0x0005) +#define TYPHOON_CMD_READ_STATS cpu_to_le16(0x0007) +#define TYPHOON_CMD_XCVR_SELECT cpu_to_le16(0x0013) +#define TYPHOON_CMD_SET_MAX_PKT_SIZE cpu_to_le16(0x001a) +#define TYPHOON_CMD_READ_MEDIA_STATUS cpu_to_le16(0x001b) +#define TYPHOON_CMD_GOTO_SLEEP cpu_to_le16(0x0023) +#define TYPHOON_CMD_SET_MULTICAST_HASH cpu_to_le16(0x0025) +#define TYPHOON_CMD_SET_MAC_ADDRESS cpu_to_le16(0x0026) +#define TYPHOON_CMD_READ_MAC_ADDRESS cpu_to_le16(0x0027) +#define TYPHOON_CMD_VLAN_TYPE_WRITE cpu_to_le16(0x002b) +#define TYPHOON_CMD_CREATE_SA cpu_to_le16(0x0034) +#define TYPHOON_CMD_DELETE_SA cpu_to_le16(0x0035) +#define TYPHOON_CMD_READ_VERSIONS cpu_to_le16(0x0043) +#define TYPHOON_CMD_IRQ_COALESCE_CTRL cpu_to_le16(0x0045) +#define TYPHOON_CMD_ENABLE_WAKE_EVENTS cpu_to_le16(0x0049) +#define TYPHOON_CMD_SET_OFFLOAD_TASKS cpu_to_le16(0x004f) +#define TYPHOON_CMD_HELLO_RESP cpu_to_le16(0x0057) +#define TYPHOON_CMD_HALT cpu_to_le16(0x005d) +#define TYPHOON_CMD_READ_IPSEC_INFO cpu_to_le16(0x005e) +#define TYPHOON_CMD_GET_IPSEC_ENABLE cpu_to_le16(0x0067) +#define TYPHOON_CMD_GET_CMD_LVL cpu_to_le16(0x0069) + u16 seqNo; + __le16 parm1; + __le32 parm2; + __le32 parm3; +} __packed; + +/* The Typhoon response descriptor, see command descriptor for details + */ +struct resp_desc { + u8 flags; + u8 numDesc; + __le16 cmd; + __le16 seqNo; + __le16 parm1; + __le32 parm2; + __le32 parm3; +} __packed; + +#define INIT_COMMAND_NO_RESPONSE(x, command) \ + do { struct cmd_desc *_ptr = (x); \ + memset(_ptr, 0, sizeof(struct cmd_desc)); \ + _ptr->flags = TYPHOON_CMD_DESC | TYPHOON_DESC_VALID; \ + _ptr->cmd = command; \ + } while(0) + +/* We set seqNo to 1 if we're expecting a response from this command */ +#define INIT_COMMAND_WITH_RESPONSE(x, command) \ + do { struct cmd_desc *_ptr = (x); \ + memset(_ptr, 0, sizeof(struct cmd_desc)); \ + _ptr->flags = TYPHOON_CMD_RESPOND | TYPHOON_CMD_DESC; \ + _ptr->flags |= TYPHOON_DESC_VALID; \ + _ptr->cmd = command; \ + _ptr->seqNo = 1; \ + } while(0) + +/* TYPHOON_CMD_SET_RX_FILTER filter bits (cmd.parm1) + */ +#define TYPHOON_RX_FILTER_DIRECTED cpu_to_le16(0x0001) +#define TYPHOON_RX_FILTER_ALL_MCAST cpu_to_le16(0x0002) +#define TYPHOON_RX_FILTER_BROADCAST cpu_to_le16(0x0004) +#define TYPHOON_RX_FILTER_PROMISCOUS cpu_to_le16(0x0008) +#define TYPHOON_RX_FILTER_MCAST_HASH cpu_to_le16(0x0010) + +/* TYPHOON_CMD_READ_STATS response format + */ +struct stats_resp { + u8 flags; + u8 numDesc; + __le16 cmd; + __le16 seqNo; + __le16 unused; + __le32 txPackets; + __le64 txBytes; + __le32 txDeferred; + __le32 txLateCollisions; + __le32 txCollisions; + __le32 txCarrierLost; + __le32 txMultipleCollisions; + __le32 txExcessiveCollisions; + __le32 txFifoUnderruns; + __le32 txMulticastTxOverflows; + __le32 txFiltered; + __le32 rxPacketsGood; + __le64 rxBytesGood; + __le32 rxFifoOverruns; + __le32 BadSSD; + __le32 rxCrcErrors; + __le32 rxOversized; + __le32 rxBroadcast; + __le32 rxMulticast; + __le32 rxOverflow; + __le32 rxFiltered; + __le32 linkStatus; +#define TYPHOON_LINK_STAT_MASK cpu_to_le32(0x00000001) +#define TYPHOON_LINK_GOOD cpu_to_le32(0x00000001) +#define TYPHOON_LINK_BAD cpu_to_le32(0x00000000) +#define TYPHOON_LINK_SPEED_MASK cpu_to_le32(0x00000002) +#define TYPHOON_LINK_100MBPS cpu_to_le32(0x00000002) +#define TYPHOON_LINK_10MBPS cpu_to_le32(0x00000000) +#define TYPHOON_LINK_DUPLEX_MASK cpu_to_le32(0x00000004) +#define TYPHOON_LINK_FULL_DUPLEX cpu_to_le32(0x00000004) +#define TYPHOON_LINK_HALF_DUPLEX cpu_to_le32(0x00000000) + __le32 unused2; + __le32 unused3; +} __packed; + +/* TYPHOON_CMD_XCVR_SELECT xcvr values (resp.parm1) + */ +#define TYPHOON_XCVR_10HALF cpu_to_le16(0x0000) +#define TYPHOON_XCVR_10FULL cpu_to_le16(0x0001) +#define TYPHOON_XCVR_100HALF cpu_to_le16(0x0002) +#define TYPHOON_XCVR_100FULL cpu_to_le16(0x0003) +#define TYPHOON_XCVR_AUTONEG cpu_to_le16(0x0004) + +/* TYPHOON_CMD_READ_MEDIA_STATUS (resp.parm1) + */ +#define TYPHOON_MEDIA_STAT_CRC_STRIP_DISABLE cpu_to_le16(0x0004) +#define TYPHOON_MEDIA_STAT_COLLISION_DETECT cpu_to_le16(0x0010) +#define TYPHOON_MEDIA_STAT_CARRIER_SENSE cpu_to_le16(0x0020) +#define TYPHOON_MEDIA_STAT_POLARITY_REV cpu_to_le16(0x0400) +#define TYPHOON_MEDIA_STAT_NO_LINK cpu_to_le16(0x0800) + +/* TYPHOON_CMD_SET_MULTICAST_HASH enable values (cmd.parm1) + */ +#define TYPHOON_MCAST_HASH_DISABLE cpu_to_le16(0x0000) +#define TYPHOON_MCAST_HASH_ENABLE cpu_to_le16(0x0001) +#define TYPHOON_MCAST_HASH_SET cpu_to_le16(0x0002) + +/* TYPHOON_CMD_CREATE_SA descriptor and settings + */ +struct sa_descriptor { + u8 flags; + u8 numDesc; + u16 cmd; + u16 seqNo; + u16 mode; +#define TYPHOON_SA_MODE_NULL cpu_to_le16(0x0000) +#define TYPHOON_SA_MODE_AH cpu_to_le16(0x0001) +#define TYPHOON_SA_MODE_ESP cpu_to_le16(0x0002) + u8 hashFlags; +#define TYPHOON_SA_HASH_ENABLE 0x01 +#define TYPHOON_SA_HASH_SHA1 0x02 +#define TYPHOON_SA_HASH_MD5 0x04 + u8 direction; +#define TYPHOON_SA_DIR_RX 0x00 +#define TYPHOON_SA_DIR_TX 0x01 + u8 encryptionFlags; +#define TYPHOON_SA_ENCRYPT_ENABLE 0x01 +#define TYPHOON_SA_ENCRYPT_DES 0x02 +#define TYPHOON_SA_ENCRYPT_3DES 0x00 +#define TYPHOON_SA_ENCRYPT_3DES_2KEY 0x00 +#define TYPHOON_SA_ENCRYPT_3DES_3KEY 0x04 +#define TYPHOON_SA_ENCRYPT_CBC 0x08 +#define TYPHOON_SA_ENCRYPT_ECB 0x00 + u8 specifyIndex; +#define TYPHOON_SA_SPECIFY_INDEX 0x01 +#define TYPHOON_SA_GENERATE_INDEX 0x00 + u32 SPI; + u32 destAddr; + u32 destMask; + u8 integKey[20]; + u8 confKey[24]; + u32 index; + u32 unused; + u32 unused2; +} __packed; + +/* TYPHOON_CMD_SET_OFFLOAD_TASKS bits (cmd.parm2 (Tx) & cmd.parm3 (Rx)) + * This is all for IPv4. + */ +#define TYPHOON_OFFLOAD_TCP_CHKSUM cpu_to_le32(0x00000002) +#define TYPHOON_OFFLOAD_UDP_CHKSUM cpu_to_le32(0x00000004) +#define TYPHOON_OFFLOAD_IP_CHKSUM cpu_to_le32(0x00000008) +#define TYPHOON_OFFLOAD_IPSEC cpu_to_le32(0x00000010) +#define TYPHOON_OFFLOAD_BCAST_THROTTLE cpu_to_le32(0x00000020) +#define TYPHOON_OFFLOAD_DHCP_PREVENT cpu_to_le32(0x00000040) +#define TYPHOON_OFFLOAD_VLAN cpu_to_le32(0x00000080) +#define TYPHOON_OFFLOAD_FILTERING cpu_to_le32(0x00000100) +#define TYPHOON_OFFLOAD_TCP_SEGMENT cpu_to_le32(0x00000200) + +/* TYPHOON_CMD_ENABLE_WAKE_EVENTS bits (cmd.parm1) + */ +#define TYPHOON_WAKE_MAGIC_PKT cpu_to_le16(0x01) +#define TYPHOON_WAKE_LINK_EVENT cpu_to_le16(0x02) +#define TYPHOON_WAKE_ICMP_ECHO cpu_to_le16(0x04) +#define TYPHOON_WAKE_ARP cpu_to_le16(0x08) + +/* These are used to load the firmware image on the NIC + */ +struct typhoon_file_header { + u8 tag[8]; + __le32 version; + __le32 numSections; + __le32 startAddr; + __le32 hmacDigest[5]; +} __packed; + +struct typhoon_section_header { + __le32 len; + u16 checksum; + u16 reserved; + __le32 startAddr; +} __packed; + +/* The Typhoon Register offsets + */ +#define TYPHOON_REG_SOFT_RESET 0x00 +#define TYPHOON_REG_INTR_STATUS 0x04 +#define TYPHOON_REG_INTR_ENABLE 0x08 +#define TYPHOON_REG_INTR_MASK 0x0c +#define TYPHOON_REG_SELF_INTERRUPT 0x10 +#define TYPHOON_REG_HOST2ARM7 0x14 +#define TYPHOON_REG_HOST2ARM6 0x18 +#define TYPHOON_REG_HOST2ARM5 0x1c +#define TYPHOON_REG_HOST2ARM4 0x20 +#define TYPHOON_REG_HOST2ARM3 0x24 +#define TYPHOON_REG_HOST2ARM2 0x28 +#define TYPHOON_REG_HOST2ARM1 0x2c +#define TYPHOON_REG_HOST2ARM0 0x30 +#define TYPHOON_REG_ARM2HOST3 0x34 +#define TYPHOON_REG_ARM2HOST2 0x38 +#define TYPHOON_REG_ARM2HOST1 0x3c +#define TYPHOON_REG_ARM2HOST0 0x40 + +#define TYPHOON_REG_BOOT_DATA_LO TYPHOON_REG_HOST2ARM5 +#define TYPHOON_REG_BOOT_DATA_HI TYPHOON_REG_HOST2ARM4 +#define TYPHOON_REG_BOOT_DEST_ADDR TYPHOON_REG_HOST2ARM3 +#define TYPHOON_REG_BOOT_CHECKSUM TYPHOON_REG_HOST2ARM2 +#define TYPHOON_REG_BOOT_LENGTH TYPHOON_REG_HOST2ARM1 + +#define TYPHOON_REG_DOWNLOAD_BOOT_ADDR TYPHOON_REG_HOST2ARM1 +#define TYPHOON_REG_DOWNLOAD_HMAC_0 TYPHOON_REG_HOST2ARM2 +#define TYPHOON_REG_DOWNLOAD_HMAC_1 TYPHOON_REG_HOST2ARM3 +#define TYPHOON_REG_DOWNLOAD_HMAC_2 TYPHOON_REG_HOST2ARM4 +#define TYPHOON_REG_DOWNLOAD_HMAC_3 TYPHOON_REG_HOST2ARM5 +#define TYPHOON_REG_DOWNLOAD_HMAC_4 TYPHOON_REG_HOST2ARM6 + +#define TYPHOON_REG_BOOT_RECORD_ADDR_HI TYPHOON_REG_HOST2ARM2 +#define TYPHOON_REG_BOOT_RECORD_ADDR_LO TYPHOON_REG_HOST2ARM1 + +#define TYPHOON_REG_TX_LO_READY TYPHOON_REG_HOST2ARM3 +#define TYPHOON_REG_CMD_READY TYPHOON_REG_HOST2ARM2 +#define TYPHOON_REG_TX_HI_READY TYPHOON_REG_HOST2ARM1 + +#define TYPHOON_REG_COMMAND TYPHOON_REG_HOST2ARM0 +#define TYPHOON_REG_HEARTBEAT TYPHOON_REG_ARM2HOST3 +#define TYPHOON_REG_STATUS TYPHOON_REG_ARM2HOST0 + +/* 3XP Reset values (TYPHOON_REG_SOFT_RESET) + */ +#define TYPHOON_RESET_ALL 0x7f +#define TYPHOON_RESET_NONE 0x00 + +/* 3XP irq bits (TYPHOON_REG_INTR{STATUS,ENABLE,MASK}) + * + * Some of these came from OpenBSD, as the 3Com docs have it wrong + * (INTR_SELF) or don't list it at all (INTR_*_ABORT) + * + * Enabling irqs on the Heartbeat reg (ArmToHost3) gets you an irq + * about every 8ms, so don't do it. + */ +#define TYPHOON_INTR_HOST_INT 0x00000001 +#define TYPHOON_INTR_ARM2HOST0 0x00000002 +#define TYPHOON_INTR_ARM2HOST1 0x00000004 +#define TYPHOON_INTR_ARM2HOST2 0x00000008 +#define TYPHOON_INTR_ARM2HOST3 0x00000010 +#define TYPHOON_INTR_DMA0 0x00000020 +#define TYPHOON_INTR_DMA1 0x00000040 +#define TYPHOON_INTR_DMA2 0x00000080 +#define TYPHOON_INTR_DMA3 0x00000100 +#define TYPHOON_INTR_MASTER_ABORT 0x00000200 +#define TYPHOON_INTR_TARGET_ABORT 0x00000400 +#define TYPHOON_INTR_SELF 0x00000800 +#define TYPHOON_INTR_RESERVED 0xfffff000 + +#define TYPHOON_INTR_BOOTCMD TYPHOON_INTR_ARM2HOST0 + +#define TYPHOON_INTR_ENABLE_ALL 0xffffffef +#define TYPHOON_INTR_ALL 0xffffffff +#define TYPHOON_INTR_NONE 0x00000000 + +/* The commands for the 3XP chip (TYPHOON_REG_COMMAND) + */ +#define TYPHOON_BOOTCMD_BOOT 0x00 +#define TYPHOON_BOOTCMD_WAKEUP 0xfa +#define TYPHOON_BOOTCMD_DNLD_COMPLETE 0xfb +#define TYPHOON_BOOTCMD_SEG_AVAILABLE 0xfc +#define TYPHOON_BOOTCMD_RUNTIME_IMAGE 0xfd +#define TYPHOON_BOOTCMD_REG_BOOT_RECORD 0xff + +/* 3XP Status values (TYPHOON_REG_STATUS) + */ +#define TYPHOON_STATUS_WAITING_FOR_BOOT 0x07 +#define TYPHOON_STATUS_SECOND_INIT 0x08 +#define TYPHOON_STATUS_RUNNING 0x09 +#define TYPHOON_STATUS_WAITING_FOR_HOST 0x0d +#define TYPHOON_STATUS_WAITING_FOR_SEGMENT 0x10 +#define TYPHOON_STATUS_SLEEPING 0x11 +#define TYPHOON_STATUS_HALTED 0x14 diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index d59e4f2..18193ec 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -11,4 +11,6 @@ menuconfig ETHERNET if ETHERNET +source "drivers/net/ethernet/3com/Kconfig" + endif # ETHERNET diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 0d21dda..07766ba 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -1,3 +1,5 @@ # # Makefile for the Linux network Ethernet device drivers. # + +obj-$(CONFIG_NET_VENDOR_3COM) += 3com/ diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c deleted file mode 100644 index 34c5e1c..0000000 --- a/drivers/net/pcmcia/3c574_cs.c +++ /dev/null @@ -1,1181 +0,0 @@ -/* 3c574.c: A PCMCIA ethernet driver for the 3com 3c574 "RoadRunner". - - Written 1993-1998 by - Donald Becker, becker@scyld.com, (driver core) and - David Hinds, dahinds@users.sourceforge.net (from his PC card code). - Locking fixes (C) Copyright 2003 Red Hat Inc - - This software may be used and distributed according to the terms of - the GNU General Public License, incorporated herein by reference. - - This driver derives from Donald Becker's 3c509 core, which has the - following copyright: - Copyright 1993 United States Government as represented by the - Director, National Security Agency. - - -*/ - -/* - Theory of Operation - -I. Board Compatibility - -This device driver is designed for the 3Com 3c574 PC card Fast Ethernet -Adapter. - -II. Board-specific settings - -None -- PC cards are autoconfigured. - -III. Driver operation - -The 3c574 uses a Boomerang-style interface, without the bus-master capability. -See the Boomerang driver and documentation for most details. - -IV. Notes and chip documentation. - -Two added registers are used to enhance PIO performance, RunnerRdCtrl and -RunnerWrCtrl. These are 11 bit down-counters that are preloaded with the -count of word (16 bits) reads or writes the driver is about to do to the Rx -or Tx FIFO. The chip is then able to hide the internal-PCI-bus to PC-card -translation latency by buffering the I/O operations with an 8 word FIFO. -Note: No other chip accesses are permitted when this buffer is used. - -A second enhancement is that both attribute and common memory space -0x0800-0x0fff can translated to the PIO FIFO. Thus memory operations (faster -with *some* PCcard bridges) may be used instead of I/O operations. -This is enabled by setting the 0x10 bit in the PCMCIA LAN COR. - -Some slow PC card bridges work better if they never see a WAIT signal. -This is configured by setting the 0x20 bit in the PCMCIA LAN COR. -Only do this after testing that it is reliable and improves performance. - -The upper five bits of RunnerRdCtrl are used to window into PCcard -configuration space registers. Window 0 is the regular Boomerang/Odie -register set, 1-5 are various PC card control registers, and 16-31 are -the (reversed!) CIS table. - -A final note: writing the InternalConfig register in window 3 with an -invalid ramWidth is Very Bad. - -V. References - -http://www.scyld.com/expert/NWay.html -http://www.national.com/opf/DP/DP83840A.html - -Thanks to Terry Murphy of 3Com for providing development information for -earlier 3Com products. - -*/ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include -#include -#include - -/*====================================================================*/ - -/* Module parameters */ - -MODULE_AUTHOR("David Hinds "); -MODULE_DESCRIPTION("3Com 3c574 series PCMCIA ethernet driver"); -MODULE_LICENSE("GPL"); - -#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) - -/* Maximum events (Rx packets, etc.) to handle at each interrupt. */ -INT_MODULE_PARM(max_interrupt_work, 32); - -/* Force full duplex modes? */ -INT_MODULE_PARM(full_duplex, 0); - -/* Autodetect link polarity reversal? */ -INT_MODULE_PARM(auto_polarity, 1); - - -/*====================================================================*/ - -/* Time in jiffies before concluding the transmitter is hung. */ -#define TX_TIMEOUT ((800*HZ)/1000) - -/* To minimize the size of the driver source and make the driver more - readable not all constants are symbolically defined. - You'll need the manual if you want to understand driver details anyway. */ -/* Offsets from base I/O address. */ -#define EL3_DATA 0x00 -#define EL3_CMD 0x0e -#define EL3_STATUS 0x0e - -#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD) - -/* The top five bits written to EL3_CMD are a command, the lower - 11 bits are the parameter, if applicable. */ -enum el3_cmds { - TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11, - RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, RxDiscard = 8<<11, - TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11, - FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11, - SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11, - SetTxThreshold = 18<<11, SetTxStart = 19<<11, StatsEnable = 21<<11, - StatsDisable = 22<<11, StopCoax = 23<<11, -}; - -enum elxl_status { - IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004, - TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020, - IntReq = 0x0040, StatsFull = 0x0080, CmdBusy = 0x1000 }; - -/* The SetRxFilter command accepts the following classes: */ -enum RxFilter { - RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 -}; - -enum Window0 { - Wn0EepromCmd = 10, Wn0EepromData = 12, /* EEPROM command/address, data. */ - IntrStatus=0x0E, /* Valid in all windows. */ -}; -/* These assumes the larger EEPROM. */ -enum Win0_EEPROM_cmds { - EEPROM_Read = 0x200, EEPROM_WRITE = 0x100, EEPROM_ERASE = 0x300, - EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */ - EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */ -}; - -/* Register window 1 offsets, the window used in normal operation. - On the "Odie" this window is always mapped at offsets 0x10-0x1f. - Except for TxFree, which is overlapped by RunnerWrCtrl. */ -enum Window1 { - TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14, - RxStatus = 0x18, Timer=0x1A, TxStatus = 0x1B, - TxFree = 0x0C, /* Remaining free bytes in Tx buffer. */ - RunnerRdCtrl = 0x16, RunnerWrCtrl = 0x1c, -}; - -enum Window3 { /* Window 3: MAC/config bits. */ - Wn3_Config=0, Wn3_MAC_Ctrl=6, Wn3_Options=8, -}; -enum wn3_config { - Ram_size = 7, - Ram_width = 8, - Ram_speed = 0x30, - Rom_size = 0xc0, - Ram_split_shift = 16, - Ram_split = 3 << Ram_split_shift, - Xcvr_shift = 20, - Xcvr = 7 << Xcvr_shift, - Autoselect = 0x1000000, -}; - -enum Window4 { /* Window 4: Xcvr/media bits. */ - Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10, -}; - -#define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */ - -struct el3_private { - struct pcmcia_device *p_dev; - u16 advertising, partner; /* NWay media advertisement */ - unsigned char phys; /* MII device address */ - unsigned int autoselect:1, default_media:3; /* Read from the EEPROM/Wn3_Config. */ - /* for transceiver monitoring */ - struct timer_list media; - unsigned short media_status; - unsigned short fast_poll; - unsigned long last_irq; - spinlock_t window_lock; /* Guards the Window selection */ -}; - -/* Set iff a MII transceiver on any interface requires mdio preamble. - This only set with the original DP83840 on older 3c905 boards, so the extra - code size of a per-interface flag is not worthwhile. */ -static char mii_preamble_required = 0; - -/* Index of functions. */ - -static int tc574_config(struct pcmcia_device *link); -static void tc574_release(struct pcmcia_device *link); - -static void mdio_sync(unsigned int ioaddr, int bits); -static int mdio_read(unsigned int ioaddr, int phy_id, int location); -static void mdio_write(unsigned int ioaddr, int phy_id, int location, - int value); -static unsigned short read_eeprom(unsigned int ioaddr, int index); -static void tc574_wait_for_completion(struct net_device *dev, int cmd); - -static void tc574_reset(struct net_device *dev); -static void media_check(unsigned long arg); -static int el3_open(struct net_device *dev); -static netdev_tx_t el3_start_xmit(struct sk_buff *skb, - struct net_device *dev); -static irqreturn_t el3_interrupt(int irq, void *dev_id); -static void update_stats(struct net_device *dev); -static struct net_device_stats *el3_get_stats(struct net_device *dev); -static int el3_rx(struct net_device *dev, int worklimit); -static int el3_close(struct net_device *dev); -static void el3_tx_timeout(struct net_device *dev); -static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); -static void set_rx_mode(struct net_device *dev); -static void set_multicast_list(struct net_device *dev); - -static void tc574_detach(struct pcmcia_device *p_dev); - -/* - tc574_attach() creates an "instance" of the driver, allocating - local data structures for one device. The device is registered - with Card Services. -*/ -static const struct net_device_ops el3_netdev_ops = { - .ndo_open = el3_open, - .ndo_stop = el3_close, - .ndo_start_xmit = el3_start_xmit, - .ndo_tx_timeout = el3_tx_timeout, - .ndo_get_stats = el3_get_stats, - .ndo_do_ioctl = el3_ioctl, - .ndo_set_multicast_list = set_multicast_list, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -static int tc574_probe(struct pcmcia_device *link) -{ - struct el3_private *lp; - struct net_device *dev; - - dev_dbg(&link->dev, "3c574_attach()\n"); - - /* Create the PC card device object. */ - dev = alloc_etherdev(sizeof(struct el3_private)); - if (!dev) - return -ENOMEM; - lp = netdev_priv(dev); - link->priv = dev; - lp->p_dev = link; - - spin_lock_init(&lp->window_lock); - link->resource[0]->end = 32; - link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16; - link->config_flags |= CONF_ENABLE_IRQ; - link->config_index = 1; - - dev->netdev_ops = &el3_netdev_ops; - dev->watchdog_timeo = TX_TIMEOUT; - - return tc574_config(link); -} - -static void tc574_detach(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - - dev_dbg(&link->dev, "3c574_detach()\n"); - - unregister_netdev(dev); - - tc574_release(link); - - free_netdev(dev); -} /* tc574_detach */ - -static const char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; - -static int tc574_config(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - struct el3_private *lp = netdev_priv(dev); - int ret, i, j; - unsigned int ioaddr; - __be16 *phys_addr; - char *cardname; - __u32 config; - u8 *buf; - size_t len; - - phys_addr = (__be16 *)dev->dev_addr; - - dev_dbg(&link->dev, "3c574_config()\n"); - - link->io_lines = 16; - - for (i = j = 0; j < 0x400; j += 0x20) { - link->resource[0]->start = j ^ 0x300; - i = pcmcia_request_io(link); - if (i == 0) - break; - } - if (i != 0) - goto failed; - - ret = pcmcia_request_irq(link, el3_interrupt); - if (ret) - goto failed; - - ret = pcmcia_enable_device(link); - if (ret) - goto failed; - - dev->irq = link->irq; - dev->base_addr = link->resource[0]->start; - - ioaddr = dev->base_addr; - - /* The 3c574 normally uses an EEPROM for configuration info, including - the hardware address. The future products may include a modem chip - and put the address in the CIS. */ - - len = pcmcia_get_tuple(link, 0x88, &buf); - if (buf && len >= 6) { - for (i = 0; i < 3; i++) - phys_addr[i] = htons(le16_to_cpu(buf[i * 2])); - kfree(buf); - } else { - kfree(buf); /* 0 < len < 6 */ - EL3WINDOW(0); - for (i = 0; i < 3; i++) - phys_addr[i] = htons(read_eeprom(ioaddr, i + 10)); - if (phys_addr[0] == htons(0x6060)) { - pr_notice("IO port conflict at 0x%03lx-0x%03lx\n", - dev->base_addr, dev->base_addr+15); - goto failed; - } - } - if (link->prod_id[1]) - cardname = link->prod_id[1]; - else - cardname = "3Com 3c574"; - - { - u_char mcr; - outw(2<<11, ioaddr + RunnerRdCtrl); - mcr = inb(ioaddr + 2); - outw(0<<11, ioaddr + RunnerRdCtrl); - pr_info(" ASIC rev %d,", mcr>>3); - EL3WINDOW(3); - config = inl(ioaddr + Wn3_Config); - lp->default_media = (config & Xcvr) >> Xcvr_shift; - lp->autoselect = config & Autoselect ? 1 : 0; - } - - init_timer(&lp->media); - - { - int phy; - - /* Roadrunner only: Turn on the MII transceiver */ - outw(0x8040, ioaddr + Wn3_Options); - mdelay(1); - outw(0xc040, ioaddr + Wn3_Options); - tc574_wait_for_completion(dev, TxReset); - tc574_wait_for_completion(dev, RxReset); - mdelay(1); - outw(0x8040, ioaddr + Wn3_Options); - - EL3WINDOW(4); - for (phy = 1; phy <= 32; phy++) { - int mii_status; - mdio_sync(ioaddr, 32); - mii_status = mdio_read(ioaddr, phy & 0x1f, 1); - if (mii_status != 0xffff) { - lp->phys = phy & 0x1f; - dev_dbg(&link->dev, " MII transceiver at " - "index %d, status %x.\n", - phy, mii_status); - if ((mii_status & 0x0040) == 0) - mii_preamble_required = 1; - break; - } - } - if (phy > 32) { - pr_notice(" No MII transceivers found!\n"); - goto failed; - } - i = mdio_read(ioaddr, lp->phys, 16) | 0x40; - mdio_write(ioaddr, lp->phys, 16, i); - lp->advertising = mdio_read(ioaddr, lp->phys, 4); - if (full_duplex) { - /* Only advertise the FD media types. */ - lp->advertising &= ~0x02a0; - mdio_write(ioaddr, lp->phys, 4, lp->advertising); - } - } - - SET_NETDEV_DEV(dev, &link->dev); - - if (register_netdev(dev) != 0) { - pr_notice("register_netdev() failed\n"); - goto failed; - } - - netdev_info(dev, "%s at io %#3lx, irq %d, hw_addr %pM\n", - cardname, dev->base_addr, dev->irq, dev->dev_addr); - netdev_info(dev, " %dK FIFO split %s Rx:Tx, %sMII interface.\n", - 8 << config & Ram_size, - ram_split[(config & Ram_split) >> Ram_split_shift], - config & Autoselect ? "autoselect " : ""); - - return 0; - -failed: - tc574_release(link); - return -ENODEV; - -} /* tc574_config */ - -static void tc574_release(struct pcmcia_device *link) -{ - pcmcia_disable_device(link); -} - -static int tc574_suspend(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - - if (link->open) - netif_device_detach(dev); - - return 0; -} - -static int tc574_resume(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - - if (link->open) { - tc574_reset(dev); - netif_device_attach(dev); - } - - return 0; -} - -static void dump_status(struct net_device *dev) -{ - unsigned int ioaddr = dev->base_addr; - EL3WINDOW(1); - netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x, tx free %04x\n", - inw(ioaddr+EL3_STATUS), - inw(ioaddr+RxStatus), inb(ioaddr+TxStatus), - inw(ioaddr+TxFree)); - EL3WINDOW(4); - netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n", - inw(ioaddr+0x04), inw(ioaddr+0x06), - inw(ioaddr+0x08), inw(ioaddr+0x0a)); - EL3WINDOW(1); -} - -/* - Use this for commands that may take time to finish -*/ -static void tc574_wait_for_completion(struct net_device *dev, int cmd) -{ - int i = 1500; - outw(cmd, dev->base_addr + EL3_CMD); - while (--i > 0) - if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break; - if (i == 0) - netdev_notice(dev, "command 0x%04x did not complete!\n", cmd); -} - -/* Read a word from the EEPROM using the regular EEPROM access register. - Assume that we are in register window zero. - */ -static unsigned short read_eeprom(unsigned int ioaddr, int index) -{ - int timer; - outw(EEPROM_Read + index, ioaddr + Wn0EepromCmd); - /* Pause for at least 162 usec for the read to take place. */ - for (timer = 1620; timer >= 0; timer--) { - if ((inw(ioaddr + Wn0EepromCmd) & 0x8000) == 0) - break; - } - return inw(ioaddr + Wn0EepromData); -} - -/* MII transceiver control section. - Read and write the MII registers using software-generated serial - MDIO protocol. See the MII specifications or DP83840A data sheet - for details. - The maxium data clock rate is 2.5 Mhz. The timing is easily met by the - slow PC card interface. */ - -#define MDIO_SHIFT_CLK 0x01 -#define MDIO_DIR_WRITE 0x04 -#define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE) -#define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE) -#define MDIO_DATA_READ 0x02 -#define MDIO_ENB_IN 0x00 - -/* Generate the preamble required for initial synchronization and - a few older transceivers. */ -static void mdio_sync(unsigned int ioaddr, int bits) -{ - unsigned int mdio_addr = ioaddr + Wn4_PhysicalMgmt; - - /* Establish sync by sending at least 32 logic ones. */ - while (-- bits >= 0) { - outw(MDIO_DATA_WRITE1, mdio_addr); - outw(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr); - } -} - -static int mdio_read(unsigned int ioaddr, int phy_id, int location) -{ - int i; - int read_cmd = (0xf6 << 10) | (phy_id << 5) | location; - unsigned int retval = 0; - unsigned int mdio_addr = ioaddr + Wn4_PhysicalMgmt; - - if (mii_preamble_required) - mdio_sync(ioaddr, 32); - - /* Shift the read command bits out. */ - for (i = 14; i >= 0; i--) { - int dataval = (read_cmd&(1< 0; i--) { - outw(MDIO_ENB_IN, mdio_addr); - retval = (retval << 1) | ((inw(mdio_addr) & MDIO_DATA_READ) ? 1 : 0); - outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); - } - return (retval>>1) & 0xffff; -} - -static void mdio_write(unsigned int ioaddr, int phy_id, int location, int value) -{ - int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value; - unsigned int mdio_addr = ioaddr + Wn4_PhysicalMgmt; - int i; - - if (mii_preamble_required) - mdio_sync(ioaddr, 32); - - /* Shift the command bits out. */ - for (i = 31; i >= 0; i--) { - int dataval = (write_cmd&(1<= 0; i--) { - outw(MDIO_ENB_IN, mdio_addr); - outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); - } -} - -/* Reset and restore all of the 3c574 registers. */ -static void tc574_reset(struct net_device *dev) -{ - struct el3_private *lp = netdev_priv(dev); - int i; - unsigned int ioaddr = dev->base_addr; - unsigned long flags; - - tc574_wait_for_completion(dev, TotalReset|0x10); - - spin_lock_irqsave(&lp->window_lock, flags); - /* Clear any transactions in progress. */ - outw(0, ioaddr + RunnerWrCtrl); - outw(0, ioaddr + RunnerRdCtrl); - - /* Set the station address and mask. */ - EL3WINDOW(2); - for (i = 0; i < 6; i++) - outb(dev->dev_addr[i], ioaddr + i); - for (; i < 12; i+=2) - outw(0, ioaddr + i); - - /* Reset config options */ - EL3WINDOW(3); - outb((dev->mtu > 1500 ? 0x40 : 0), ioaddr + Wn3_MAC_Ctrl); - outl((lp->autoselect ? 0x01000000 : 0) | 0x0062001b, - ioaddr + Wn3_Config); - /* Roadrunner only: Turn on the MII transceiver. */ - outw(0x8040, ioaddr + Wn3_Options); - mdelay(1); - outw(0xc040, ioaddr + Wn3_Options); - EL3WINDOW(1); - spin_unlock_irqrestore(&lp->window_lock, flags); - - tc574_wait_for_completion(dev, TxReset); - tc574_wait_for_completion(dev, RxReset); - mdelay(1); - spin_lock_irqsave(&lp->window_lock, flags); - EL3WINDOW(3); - outw(0x8040, ioaddr + Wn3_Options); - - /* Switch to the stats window, and clear all stats by reading. */ - outw(StatsDisable, ioaddr + EL3_CMD); - EL3WINDOW(6); - for (i = 0; i < 10; i++) - inb(ioaddr + i); - inw(ioaddr + 10); - inw(ioaddr + 12); - EL3WINDOW(4); - inb(ioaddr + 12); - inb(ioaddr + 13); - - /* .. enable any extra statistics bits.. */ - outw(0x0040, ioaddr + Wn4_NetDiag); - - EL3WINDOW(1); - spin_unlock_irqrestore(&lp->window_lock, flags); - - /* .. re-sync MII and re-fill what NWay is advertising. */ - mdio_sync(ioaddr, 32); - mdio_write(ioaddr, lp->phys, 4, lp->advertising); - if (!auto_polarity) { - /* works for TDK 78Q2120 series MII's */ - i = mdio_read(ioaddr, lp->phys, 16) | 0x20; - mdio_write(ioaddr, lp->phys, 16, i); - } - - spin_lock_irqsave(&lp->window_lock, flags); - /* Switch to register set 1 for normal use, just for TxFree. */ - set_rx_mode(dev); - spin_unlock_irqrestore(&lp->window_lock, flags); - outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ - outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ - outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ - /* Allow status bits to be seen. */ - outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD); - /* Ack all pending events, and set active indicator mask. */ - outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, - ioaddr + EL3_CMD); - outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull - | AdapterFailure | RxEarly, ioaddr + EL3_CMD); -} - -static int el3_open(struct net_device *dev) -{ - struct el3_private *lp = netdev_priv(dev); - struct pcmcia_device *link = lp->p_dev; - - if (!pcmcia_dev_present(link)) - return -ENODEV; - - link->open++; - netif_start_queue(dev); - - tc574_reset(dev); - lp->media.function = media_check; - lp->media.data = (unsigned long) dev; - lp->media.expires = jiffies + HZ; - add_timer(&lp->media); - - dev_dbg(&link->dev, "%s: opened, status %4.4x.\n", - dev->name, inw(dev->base_addr + EL3_STATUS)); - - return 0; -} - -static void el3_tx_timeout(struct net_device *dev) -{ - unsigned int ioaddr = dev->base_addr; - - netdev_notice(dev, "Transmit timed out!\n"); - dump_status(dev); - dev->stats.tx_errors++; - dev->trans_start = jiffies; /* prevent tx timeout */ - /* Issue TX_RESET and TX_START commands. */ - tc574_wait_for_completion(dev, TxReset); - outw(TxEnable, ioaddr + EL3_CMD); - netif_wake_queue(dev); -} - -static void pop_tx_status(struct net_device *dev) -{ - unsigned int ioaddr = dev->base_addr; - int i; - - /* Clear the Tx status stack. */ - for (i = 32; i > 0; i--) { - u_char tx_status = inb(ioaddr + TxStatus); - if (!(tx_status & 0x84)) - break; - /* reset transmitter on jabber error or underrun */ - if (tx_status & 0x30) - tc574_wait_for_completion(dev, TxReset); - if (tx_status & 0x38) { - pr_debug("%s: transmit error: status 0x%02x\n", - dev->name, tx_status); - outw(TxEnable, ioaddr + EL3_CMD); - dev->stats.tx_aborted_errors++; - } - outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */ - } -} - -static netdev_tx_t el3_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - unsigned int ioaddr = dev->base_addr; - struct el3_private *lp = netdev_priv(dev); - unsigned long flags; - - pr_debug("%s: el3_start_xmit(length = %ld) called, " - "status %4.4x.\n", dev->name, (long)skb->len, - inw(ioaddr + EL3_STATUS)); - - spin_lock_irqsave(&lp->window_lock, flags); - - dev->stats.tx_bytes += skb->len; - - /* Put out the doubleword header... */ - outw(skb->len, ioaddr + TX_FIFO); - outw(0, ioaddr + TX_FIFO); - /* ... and the packet rounded to a doubleword. */ - outsl(ioaddr + TX_FIFO, skb->data, (skb->len+3)>>2); - - /* TxFree appears only in Window 1, not offset 0x1c. */ - if (inw(ioaddr + TxFree) <= 1536) { - netif_stop_queue(dev); - /* Interrupt us when the FIFO has room for max-sized packet. - The threshold is in units of dwords. */ - outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD); - } - - pop_tx_status(dev); - spin_unlock_irqrestore(&lp->window_lock, flags); - dev_kfree_skb(skb); - return NETDEV_TX_OK; -} - -/* The EL3 interrupt handler. */ -static irqreturn_t el3_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = (struct net_device *) dev_id; - struct el3_private *lp = netdev_priv(dev); - unsigned int ioaddr; - unsigned status; - int work_budget = max_interrupt_work; - int handled = 0; - - if (!netif_device_present(dev)) - return IRQ_NONE; - ioaddr = dev->base_addr; - - pr_debug("%s: interrupt, status %4.4x.\n", - dev->name, inw(ioaddr + EL3_STATUS)); - - spin_lock(&lp->window_lock); - - while ((status = inw(ioaddr + EL3_STATUS)) & - (IntLatch | RxComplete | RxEarly | StatsFull)) { - if (!netif_device_present(dev) || - ((status & 0xe000) != 0x2000)) { - pr_debug("%s: Interrupt from dead card\n", dev->name); - break; - } - - handled = 1; - - if (status & RxComplete) - work_budget = el3_rx(dev, work_budget); - - if (status & TxAvailable) { - pr_debug(" TX room bit was handled.\n"); - /* There's room in the FIFO for a full-sized packet. */ - outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); - netif_wake_queue(dev); - } - - if (status & TxComplete) - pop_tx_status(dev); - - if (status & (AdapterFailure | RxEarly | StatsFull)) { - /* Handle all uncommon interrupts. */ - if (status & StatsFull) - update_stats(dev); - if (status & RxEarly) { - work_budget = el3_rx(dev, work_budget); - outw(AckIntr | RxEarly, ioaddr + EL3_CMD); - } - if (status & AdapterFailure) { - u16 fifo_diag; - EL3WINDOW(4); - fifo_diag = inw(ioaddr + Wn4_FIFODiag); - EL3WINDOW(1); - netdev_notice(dev, "adapter failure, FIFO diagnostic register %04x\n", - fifo_diag); - if (fifo_diag & 0x0400) { - /* Tx overrun */ - tc574_wait_for_completion(dev, TxReset); - outw(TxEnable, ioaddr + EL3_CMD); - } - if (fifo_diag & 0x2000) { - /* Rx underrun */ - tc574_wait_for_completion(dev, RxReset); - set_rx_mode(dev); - outw(RxEnable, ioaddr + EL3_CMD); - } - outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD); - } - } - - if (--work_budget < 0) { - pr_debug("%s: Too much work in interrupt, " - "status %4.4x.\n", dev->name, status); - /* Clear all interrupts */ - outw(AckIntr | 0xFF, ioaddr + EL3_CMD); - break; - } - /* Acknowledge the IRQ. */ - outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); - } - - pr_debug("%s: exiting interrupt, status %4.4x.\n", - dev->name, inw(ioaddr + EL3_STATUS)); - - spin_unlock(&lp->window_lock); - return IRQ_RETVAL(handled); -} - -/* - This timer serves two purposes: to check for missed interrupts - (and as a last resort, poll the NIC for events), and to monitor - the MII, reporting changes in cable status. -*/ -static void media_check(unsigned long arg) -{ - struct net_device *dev = (struct net_device *) arg; - struct el3_private *lp = netdev_priv(dev); - unsigned int ioaddr = dev->base_addr; - unsigned long flags; - unsigned short /* cable, */ media, partner; - - if (!netif_device_present(dev)) - goto reschedule; - - /* Check for pending interrupt with expired latency timer: with - this, we can limp along even if the interrupt is blocked */ - if ((inw(ioaddr + EL3_STATUS) & IntLatch) && (inb(ioaddr + Timer) == 0xff)) { - if (!lp->fast_poll) - netdev_info(dev, "interrupt(s) dropped!\n"); - - local_irq_save(flags); - el3_interrupt(dev->irq, dev); - local_irq_restore(flags); - - lp->fast_poll = HZ; - } - if (lp->fast_poll) { - lp->fast_poll--; - lp->media.expires = jiffies + 2*HZ/100; - add_timer(&lp->media); - return; - } - - spin_lock_irqsave(&lp->window_lock, flags); - EL3WINDOW(4); - media = mdio_read(ioaddr, lp->phys, 1); - partner = mdio_read(ioaddr, lp->phys, 5); - EL3WINDOW(1); - - if (media != lp->media_status) { - if ((media ^ lp->media_status) & 0x0004) - netdev_info(dev, "%s link beat\n", - (lp->media_status & 0x0004) ? "lost" : "found"); - if ((media ^ lp->media_status) & 0x0020) { - lp->partner = 0; - if (lp->media_status & 0x0020) { - netdev_info(dev, "autonegotiation restarted\n"); - } else if (partner) { - partner &= lp->advertising; - lp->partner = partner; - netdev_info(dev, "autonegotiation complete: " - "%dbaseT-%cD selected\n", - (partner & 0x0180) ? 100 : 10, - (partner & 0x0140) ? 'F' : 'H'); - } else { - netdev_info(dev, "link partner did not autonegotiate\n"); - } - - EL3WINDOW(3); - outb((partner & 0x0140 ? 0x20 : 0) | - (dev->mtu > 1500 ? 0x40 : 0), ioaddr + Wn3_MAC_Ctrl); - EL3WINDOW(1); - - } - if (media & 0x0010) - netdev_info(dev, "remote fault detected\n"); - if (media & 0x0002) - netdev_info(dev, "jabber detected\n"); - lp->media_status = media; - } - spin_unlock_irqrestore(&lp->window_lock, flags); - -reschedule: - lp->media.expires = jiffies + HZ; - add_timer(&lp->media); -} - -static struct net_device_stats *el3_get_stats(struct net_device *dev) -{ - struct el3_private *lp = netdev_priv(dev); - - if (netif_device_present(dev)) { - unsigned long flags; - spin_lock_irqsave(&lp->window_lock, flags); - update_stats(dev); - spin_unlock_irqrestore(&lp->window_lock, flags); - } - return &dev->stats; -} - -/* Update statistics. - Surprisingly this need not be run single-threaded, but it effectively is. - The counters clear when read, so the adds must merely be atomic. - */ -static void update_stats(struct net_device *dev) -{ - unsigned int ioaddr = dev->base_addr; - u8 rx, tx, up; - - pr_debug("%s: updating the statistics.\n", dev->name); - - if (inw(ioaddr+EL3_STATUS) == 0xffff) /* No card. */ - return; - - /* Unlike the 3c509 we need not turn off stats updates while reading. */ - /* Switch to the stats window, and read everything. */ - EL3WINDOW(6); - dev->stats.tx_carrier_errors += inb(ioaddr + 0); - dev->stats.tx_heartbeat_errors += inb(ioaddr + 1); - /* Multiple collisions. */ inb(ioaddr + 2); - dev->stats.collisions += inb(ioaddr + 3); - dev->stats.tx_window_errors += inb(ioaddr + 4); - dev->stats.rx_fifo_errors += inb(ioaddr + 5); - dev->stats.tx_packets += inb(ioaddr + 6); - up = inb(ioaddr + 9); - dev->stats.tx_packets += (up&0x30) << 4; - /* Rx packets */ inb(ioaddr + 7); - /* Tx deferrals */ inb(ioaddr + 8); - rx = inw(ioaddr + 10); - tx = inw(ioaddr + 12); - - EL3WINDOW(4); - /* BadSSD */ inb(ioaddr + 12); - up = inb(ioaddr + 13); - - EL3WINDOW(1); -} - -static int el3_rx(struct net_device *dev, int worklimit) -{ - unsigned int ioaddr = dev->base_addr; - short rx_status; - - pr_debug("%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n", - dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus)); - while (!((rx_status = inw(ioaddr + RxStatus)) & 0x8000) && - worklimit > 0) { - worklimit--; - if (rx_status & 0x4000) { /* Error, update stats. */ - short error = rx_status & 0x3800; - dev->stats.rx_errors++; - switch (error) { - case 0x0000: dev->stats.rx_over_errors++; break; - case 0x0800: dev->stats.rx_length_errors++; break; - case 0x1000: dev->stats.rx_frame_errors++; break; - case 0x1800: dev->stats.rx_length_errors++; break; - case 0x2000: dev->stats.rx_frame_errors++; break; - case 0x2800: dev->stats.rx_crc_errors++; break; - } - } else { - short pkt_len = rx_status & 0x7ff; - struct sk_buff *skb; - - skb = dev_alloc_skb(pkt_len+5); - - pr_debug(" Receiving packet size %d status %4.4x.\n", - pkt_len, rx_status); - if (skb != NULL) { - skb_reserve(skb, 2); - insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len), - ((pkt_len+3)>>2)); - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; - } else { - pr_debug("%s: couldn't allocate a sk_buff of" - " size %d.\n", dev->name, pkt_len); - dev->stats.rx_dropped++; - } - } - tc574_wait_for_completion(dev, RxDiscard); - } - - return worklimit; -} - -/* Provide ioctl() calls to examine the MII xcvr state. */ -static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -{ - struct el3_private *lp = netdev_priv(dev); - unsigned int ioaddr = dev->base_addr; - struct mii_ioctl_data *data = if_mii(rq); - int phy = lp->phys & 0x1f; - - pr_debug("%s: In ioct(%-.6s, %#4.4x) %4.4x %4.4x %4.4x %4.4x.\n", - dev->name, rq->ifr_ifrn.ifrn_name, cmd, - data->phy_id, data->reg_num, data->val_in, data->val_out); - - switch(cmd) { - case SIOCGMIIPHY: /* Get the address of the PHY in use. */ - data->phy_id = phy; - case SIOCGMIIREG: /* Read the specified MII register. */ - { - int saved_window; - unsigned long flags; - - spin_lock_irqsave(&lp->window_lock, flags); - saved_window = inw(ioaddr + EL3_CMD) >> 13; - EL3WINDOW(4); - data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, - data->reg_num & 0x1f); - EL3WINDOW(saved_window); - spin_unlock_irqrestore(&lp->window_lock, flags); - return 0; - } - case SIOCSMIIREG: /* Write the specified MII register */ - { - int saved_window; - unsigned long flags; - - spin_lock_irqsave(&lp->window_lock, flags); - saved_window = inw(ioaddr + EL3_CMD) >> 13; - EL3WINDOW(4); - mdio_write(ioaddr, data->phy_id & 0x1f, - data->reg_num & 0x1f, data->val_in); - EL3WINDOW(saved_window); - spin_unlock_irqrestore(&lp->window_lock, flags); - return 0; - } - default: - return -EOPNOTSUPP; - } -} - -/* The Odie chip has a 64 bin multicast filter, but the bit layout is not - documented. Until it is we revert to receiving all multicast frames when - any multicast reception is desired. - Note: My other drivers emit a log message whenever promiscuous mode is - entered to help detect password sniffers. This is less desirable on - typical PC card machines, so we omit the message. - */ - -static void set_rx_mode(struct net_device *dev) -{ - unsigned int ioaddr = dev->base_addr; - - if (dev->flags & IFF_PROMISC) - outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm, - ioaddr + EL3_CMD); - else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI)) - outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD); - else - outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD); -} - -static void set_multicast_list(struct net_device *dev) -{ - struct el3_private *lp = netdev_priv(dev); - unsigned long flags; - - spin_lock_irqsave(&lp->window_lock, flags); - set_rx_mode(dev); - spin_unlock_irqrestore(&lp->window_lock, flags); -} - -static int el3_close(struct net_device *dev) -{ - unsigned int ioaddr = dev->base_addr; - struct el3_private *lp = netdev_priv(dev); - struct pcmcia_device *link = lp->p_dev; - - dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name); - - if (pcmcia_dev_present(link)) { - unsigned long flags; - - /* Turn off statistics ASAP. We update lp->stats below. */ - outw(StatsDisable, ioaddr + EL3_CMD); - - /* Disable the receiver and transmitter. */ - outw(RxDisable, ioaddr + EL3_CMD); - outw(TxDisable, ioaddr + EL3_CMD); - - /* Note: Switching to window 0 may disable the IRQ. */ - EL3WINDOW(0); - spin_lock_irqsave(&lp->window_lock, flags); - update_stats(dev); - spin_unlock_irqrestore(&lp->window_lock, flags); - - /* force interrupts off */ - outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD); - } - - link->open--; - netif_stop_queue(dev); - del_timer_sync(&lp->media); - - return 0; -} - -static const struct pcmcia_device_id tc574_ids[] = { - PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0574), - PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0101, 0x0556, "cis/3CCFEM556.cis"), - PCMCIA_DEVICE_NULL, -}; -MODULE_DEVICE_TABLE(pcmcia, tc574_ids); - -static struct pcmcia_driver tc574_driver = { - .owner = THIS_MODULE, - .name = "3c574_cs", - .probe = tc574_probe, - .remove = tc574_detach, - .id_table = tc574_ids, - .suspend = tc574_suspend, - .resume = tc574_resume, -}; - -static int __init init_tc574(void) -{ - return pcmcia_register_driver(&tc574_driver); -} - -static void __exit exit_tc574(void) -{ - pcmcia_unregister_driver(&tc574_driver); -} - -module_init(init_tc574); -module_exit(exit_tc574); diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c deleted file mode 100644 index 4a1a358..0000000 --- a/drivers/net/pcmcia/3c589_cs.c +++ /dev/null @@ -1,943 +0,0 @@ -/*====================================================================== - - A PCMCIA ethernet driver for the 3com 3c589 card. - - Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net - - 3c589_cs.c 1.162 2001/10/13 00:08:50 - - The network driver code is based on Donald Becker's 3c589 code: - - Written 1994 by Donald Becker. - Copyright 1993 United States Government as represented by the - Director, National Security Agency. This software may be used and - distributed according to the terms of the GNU General Public License, - incorporated herein by reference. - Donald Becker may be reached at becker@scyld.com - - Updated for 2.5.x by Alan Cox - -======================================================================*/ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#define DRV_NAME "3c589_cs" -#define DRV_VERSION "1.162-ac" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include -#include -#include - -/* To minimize the size of the driver source I only define operating - constants if they are used several times. You'll need the manual - if you want to understand driver details. */ -/* Offsets from base I/O address. */ -#define EL3_DATA 0x00 -#define EL3_TIMER 0x0a -#define EL3_CMD 0x0e -#define EL3_STATUS 0x0e - -#define EEPROM_READ 0x0080 -#define EEPROM_BUSY 0x8000 - -#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD) - -/* The top five bits written to EL3_CMD are a command, the lower - 11 bits are the parameter, if applicable. */ -enum c509cmd { - TotalReset = 0<<11, - SelectWindow = 1<<11, - StartCoax = 2<<11, - RxDisable = 3<<11, - RxEnable = 4<<11, - RxReset = 5<<11, - RxDiscard = 8<<11, - TxEnable = 9<<11, - TxDisable = 10<<11, - TxReset = 11<<11, - FakeIntr = 12<<11, - AckIntr = 13<<11, - SetIntrEnb = 14<<11, - SetStatusEnb = 15<<11, - SetRxFilter = 16<<11, - SetRxThreshold = 17<<11, - SetTxThreshold = 18<<11, - SetTxStart = 19<<11, - StatsEnable = 21<<11, - StatsDisable = 22<<11, - StopCoax = 23<<11 -}; - -enum c509status { - IntLatch = 0x0001, - AdapterFailure = 0x0002, - TxComplete = 0x0004, - TxAvailable = 0x0008, - RxComplete = 0x0010, - RxEarly = 0x0020, - IntReq = 0x0040, - StatsFull = 0x0080, - CmdBusy = 0x1000 -}; - -/* The SetRxFilter command accepts the following classes: */ -enum RxFilter { - RxStation = 1, - RxMulticast = 2, - RxBroadcast = 4, - RxProm = 8 -}; - -/* Register window 1 offsets, the window used in normal operation. */ -#define TX_FIFO 0x00 -#define RX_FIFO 0x00 -#define RX_STATUS 0x08 -#define TX_STATUS 0x0B -#define TX_FREE 0x0C /* Remaining free bytes in Tx buffer. */ - -#define WN0_IRQ 0x08 /* Window 0: Set IRQ line in bits 12-15. */ -#define WN4_MEDIA 0x0A /* Window 4: Various transcvr/media bits. */ -#define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */ -#define MEDIA_LED 0x0001 /* Enable link light on 3C589E cards. */ - -/* Time in jiffies before concluding Tx hung */ -#define TX_TIMEOUT ((400*HZ)/1000) - -struct el3_private { - struct pcmcia_device *p_dev; - /* For transceiver monitoring */ - struct timer_list media; - u16 media_status; - u16 fast_poll; - unsigned long last_irq; - spinlock_t lock; -}; - -static const char *if_names[] = { "auto", "10baseT", "10base2", "AUI" }; - -/*====================================================================*/ - -/* Module parameters */ - -MODULE_AUTHOR("David Hinds "); -MODULE_DESCRIPTION("3Com 3c589 series PCMCIA ethernet driver"); -MODULE_LICENSE("GPL"); - -#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) - -/* Special hook for setting if_port when module is loaded */ -INT_MODULE_PARM(if_port, 0); - - -/*====================================================================*/ - -static int tc589_config(struct pcmcia_device *link); -static void tc589_release(struct pcmcia_device *link); - -static u16 read_eeprom(unsigned int ioaddr, int index); -static void tc589_reset(struct net_device *dev); -static void media_check(unsigned long arg); -static int el3_config(struct net_device *dev, struct ifmap *map); -static int el3_open(struct net_device *dev); -static netdev_tx_t el3_start_xmit(struct sk_buff *skb, - struct net_device *dev); -static irqreturn_t el3_interrupt(int irq, void *dev_id); -static void update_stats(struct net_device *dev); -static struct net_device_stats *el3_get_stats(struct net_device *dev); -static int el3_rx(struct net_device *dev); -static int el3_close(struct net_device *dev); -static void el3_tx_timeout(struct net_device *dev); -static void set_rx_mode(struct net_device *dev); -static void set_multicast_list(struct net_device *dev); -static const struct ethtool_ops netdev_ethtool_ops; - -static void tc589_detach(struct pcmcia_device *p_dev); - -static const struct net_device_ops el3_netdev_ops = { - .ndo_open = el3_open, - .ndo_stop = el3_close, - .ndo_start_xmit = el3_start_xmit, - .ndo_tx_timeout = el3_tx_timeout, - .ndo_set_config = el3_config, - .ndo_get_stats = el3_get_stats, - .ndo_set_multicast_list = set_multicast_list, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -static int tc589_probe(struct pcmcia_device *link) -{ - struct el3_private *lp; - struct net_device *dev; - - dev_dbg(&link->dev, "3c589_attach()\n"); - - /* Create new ethernet device */ - dev = alloc_etherdev(sizeof(struct el3_private)); - if (!dev) - return -ENOMEM; - lp = netdev_priv(dev); - link->priv = dev; - lp->p_dev = link; - - spin_lock_init(&lp->lock); - link->resource[0]->end = 16; - link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16; - - link->config_flags |= CONF_ENABLE_IRQ; - link->config_index = 1; - - dev->netdev_ops = &el3_netdev_ops; - dev->watchdog_timeo = TX_TIMEOUT; - - SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); - - return tc589_config(link); -} - -static void tc589_detach(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - - dev_dbg(&link->dev, "3c589_detach\n"); - - unregister_netdev(dev); - - tc589_release(link); - - free_netdev(dev); -} /* tc589_detach */ - -static int tc589_config(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - __be16 *phys_addr; - int ret, i, j, multi = 0, fifo; - unsigned int ioaddr; - static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; - u8 *buf; - size_t len; - - dev_dbg(&link->dev, "3c589_config\n"); - - phys_addr = (__be16 *)dev->dev_addr; - /* Is this a 3c562? */ - if (link->manf_id != MANFID_3COM) - dev_info(&link->dev, "hmmm, is this really a 3Com card??\n"); - multi = (link->card_id == PRODID_3COM_3C562); - - link->io_lines = 16; - - /* For the 3c562, the base address must be xx00-xx7f */ - for (i = j = 0; j < 0x400; j += 0x10) { - if (multi && (j & 0x80)) continue; - link->resource[0]->start = j ^ 0x300; - i = pcmcia_request_io(link); - if (i == 0) - break; - } - if (i != 0) - goto failed; - - ret = pcmcia_request_irq(link, el3_interrupt); - if (ret) - goto failed; - - ret = pcmcia_enable_device(link); - if (ret) - goto failed; - - dev->irq = link->irq; - dev->base_addr = link->resource[0]->start; - ioaddr = dev->base_addr; - EL3WINDOW(0); - - /* The 3c589 has an extra EEPROM for configuration info, including - the hardware address. The 3c562 puts the address in the CIS. */ - len = pcmcia_get_tuple(link, 0x88, &buf); - if (buf && len >= 6) { - for (i = 0; i < 3; i++) - phys_addr[i] = htons(le16_to_cpu(buf[i*2])); - kfree(buf); - } else { - kfree(buf); /* 0 < len < 6 */ - for (i = 0; i < 3; i++) - phys_addr[i] = htons(read_eeprom(ioaddr, i)); - if (phys_addr[0] == htons(0x6060)) { - dev_err(&link->dev, "IO port conflict at 0x%03lx-0x%03lx\n", - dev->base_addr, dev->base_addr+15); - goto failed; - } - } - - /* The address and resource configuration register aren't loaded from - the EEPROM and *must* be set to 0 and IRQ3 for the PCMCIA version. */ - outw(0x3f00, ioaddr + 8); - fifo = inl(ioaddr); - - /* The if_port symbol can be set when the module is loaded */ - if ((if_port >= 0) && (if_port <= 3)) - dev->if_port = if_port; - else - dev_err(&link->dev, "invalid if_port requested\n"); - - SET_NETDEV_DEV(dev, &link->dev); - - if (register_netdev(dev) != 0) { - dev_err(&link->dev, "register_netdev() failed\n"); - goto failed; - } - - netdev_info(dev, "3Com 3c%s, io %#3lx, irq %d, hw_addr %pM\n", - (multi ? "562" : "589"), dev->base_addr, dev->irq, - dev->dev_addr); - netdev_info(dev, " %dK FIFO split %s Rx:Tx, %s xcvr\n", - (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3], - if_names[dev->if_port]); - return 0; - -failed: - tc589_release(link); - return -ENODEV; -} /* tc589_config */ - -static void tc589_release(struct pcmcia_device *link) -{ - pcmcia_disable_device(link); -} - -static int tc589_suspend(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - - if (link->open) - netif_device_detach(dev); - - return 0; -} - -static int tc589_resume(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - - if (link->open) { - tc589_reset(dev); - netif_device_attach(dev); - } - - return 0; -} - -/*====================================================================*/ - -/* - Use this for commands that may take time to finish -*/ -static void tc589_wait_for_completion(struct net_device *dev, int cmd) -{ - int i = 100; - outw(cmd, dev->base_addr + EL3_CMD); - while (--i > 0) - if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break; - if (i == 0) - netdev_warn(dev, "command 0x%04x did not complete!\n", cmd); -} - -/* - Read a word from the EEPROM using the regular EEPROM access register. - Assume that we are in register window zero. -*/ -static u16 read_eeprom(unsigned int ioaddr, int index) -{ - int i; - outw(EEPROM_READ + index, ioaddr + 10); - /* Reading the eeprom takes 162 us */ - for (i = 1620; i >= 0; i--) - if ((inw(ioaddr + 10) & EEPROM_BUSY) == 0) - break; - return inw(ioaddr + 12); -} - -/* - Set transceiver type, perhaps to something other than what the user - specified in dev->if_port. -*/ -static void tc589_set_xcvr(struct net_device *dev, int if_port) -{ - struct el3_private *lp = netdev_priv(dev); - unsigned int ioaddr = dev->base_addr; - - EL3WINDOW(0); - switch (if_port) { - case 0: case 1: outw(0, ioaddr + 6); break; - case 2: outw(3<<14, ioaddr + 6); break; - case 3: outw(1<<14, ioaddr + 6); break; - } - /* On PCMCIA, this just turns on the LED */ - outw((if_port == 2) ? StartCoax : StopCoax, ioaddr + EL3_CMD); - /* 10baseT interface, enable link beat and jabber check. */ - EL3WINDOW(4); - outw(MEDIA_LED | ((if_port < 2) ? MEDIA_TP : 0), ioaddr + WN4_MEDIA); - EL3WINDOW(1); - if (if_port == 2) - lp->media_status = ((dev->if_port == 0) ? 0x8000 : 0x4000); - else - lp->media_status = ((dev->if_port == 0) ? 0x4010 : 0x8800); -} - -static void dump_status(struct net_device *dev) -{ - unsigned int ioaddr = dev->base_addr; - EL3WINDOW(1); - netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x tx free %04x\n", - inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS), - inb(ioaddr+TX_STATUS), inw(ioaddr+TX_FREE)); - EL3WINDOW(4); - netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n", - inw(ioaddr+0x04), inw(ioaddr+0x06), inw(ioaddr+0x08), - inw(ioaddr+0x0a)); - EL3WINDOW(1); -} - -/* Reset and restore all of the 3c589 registers. */ -static void tc589_reset(struct net_device *dev) -{ - unsigned int ioaddr = dev->base_addr; - int i; - - EL3WINDOW(0); - outw(0x0001, ioaddr + 4); /* Activate board. */ - outw(0x3f00, ioaddr + 8); /* Set the IRQ line. */ - - /* Set the station address in window 2. */ - EL3WINDOW(2); - for (i = 0; i < 6; i++) - outb(dev->dev_addr[i], ioaddr + i); - - tc589_set_xcvr(dev, dev->if_port); - - /* Switch to the stats window, and clear all stats by reading. */ - outw(StatsDisable, ioaddr + EL3_CMD); - EL3WINDOW(6); - for (i = 0; i < 9; i++) - inb(ioaddr+i); - inw(ioaddr + 10); - inw(ioaddr + 12); - - /* Switch to register set 1 for normal use. */ - EL3WINDOW(1); - - set_rx_mode(dev); - outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ - outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ - outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ - /* Allow status bits to be seen. */ - outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD); - /* Ack all pending events, and set active indicator mask. */ - outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, - ioaddr + EL3_CMD); - outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull - | AdapterFailure, ioaddr + EL3_CMD); -} - -static void netdev_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr); -} - -static const struct ethtool_ops netdev_ethtool_ops = { - .get_drvinfo = netdev_get_drvinfo, -}; - -static int el3_config(struct net_device *dev, struct ifmap *map) -{ - if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { - if (map->port <= 3) { - dev->if_port = map->port; - netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]); - tc589_set_xcvr(dev, dev->if_port); - } else - return -EINVAL; - } - return 0; -} - -static int el3_open(struct net_device *dev) -{ - struct el3_private *lp = netdev_priv(dev); - struct pcmcia_device *link = lp->p_dev; - - if (!pcmcia_dev_present(link)) - return -ENODEV; - - link->open++; - netif_start_queue(dev); - - tc589_reset(dev); - init_timer(&lp->media); - lp->media.function = media_check; - lp->media.data = (unsigned long) dev; - lp->media.expires = jiffies + HZ; - add_timer(&lp->media); - - dev_dbg(&link->dev, "%s: opened, status %4.4x.\n", - dev->name, inw(dev->base_addr + EL3_STATUS)); - - return 0; -} - -static void el3_tx_timeout(struct net_device *dev) -{ - unsigned int ioaddr = dev->base_addr; - - netdev_warn(dev, "Transmit timed out!\n"); - dump_status(dev); - dev->stats.tx_errors++; - dev->trans_start = jiffies; /* prevent tx timeout */ - /* Issue TX_RESET and TX_START commands. */ - tc589_wait_for_completion(dev, TxReset); - outw(TxEnable, ioaddr + EL3_CMD); - netif_wake_queue(dev); -} - -static void pop_tx_status(struct net_device *dev) -{ - unsigned int ioaddr = dev->base_addr; - int i; - - /* Clear the Tx status stack. */ - for (i = 32; i > 0; i--) { - u_char tx_status = inb(ioaddr + TX_STATUS); - if (!(tx_status & 0x84)) break; - /* reset transmitter on jabber error or underrun */ - if (tx_status & 0x30) - tc589_wait_for_completion(dev, TxReset); - if (tx_status & 0x38) { - netdev_dbg(dev, "transmit error: status 0x%02x\n", tx_status); - outw(TxEnable, ioaddr + EL3_CMD); - dev->stats.tx_aborted_errors++; - } - outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */ - } -} - -static netdev_tx_t el3_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - unsigned int ioaddr = dev->base_addr; - struct el3_private *priv = netdev_priv(dev); - unsigned long flags; - - netdev_dbg(dev, "el3_start_xmit(length = %ld) called, status %4.4x.\n", - (long)skb->len, inw(ioaddr + EL3_STATUS)); - - spin_lock_irqsave(&priv->lock, flags); - - dev->stats.tx_bytes += skb->len; - - /* Put out the doubleword header... */ - outw(skb->len, ioaddr + TX_FIFO); - outw(0x00, ioaddr + TX_FIFO); - /* ... and the packet rounded to a doubleword. */ - outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); - - if (inw(ioaddr + TX_FREE) <= 1536) { - netif_stop_queue(dev); - /* Interrupt us when the FIFO has room for max-sized packet. */ - outw(SetTxThreshold + 1536, ioaddr + EL3_CMD); - } - - pop_tx_status(dev); - spin_unlock_irqrestore(&priv->lock, flags); - dev_kfree_skb(skb); - - return NETDEV_TX_OK; -} - -/* The EL3 interrupt handler. */ -static irqreturn_t el3_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = (struct net_device *) dev_id; - struct el3_private *lp = netdev_priv(dev); - unsigned int ioaddr; - __u16 status; - int i = 0, handled = 1; - - if (!netif_device_present(dev)) - return IRQ_NONE; - - ioaddr = dev->base_addr; - - netdev_dbg(dev, "interrupt, status %4.4x.\n", inw(ioaddr + EL3_STATUS)); - - spin_lock(&lp->lock); - while ((status = inw(ioaddr + EL3_STATUS)) & - (IntLatch | RxComplete | StatsFull)) { - if ((status & 0xe000) != 0x2000) { - netdev_dbg(dev, "interrupt from dead card\n"); - handled = 0; - break; - } - if (status & RxComplete) - el3_rx(dev); - if (status & TxAvailable) { - netdev_dbg(dev, " TX room bit was handled.\n"); - /* There's room in the FIFO for a full-sized packet. */ - outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); - netif_wake_queue(dev); - } - if (status & TxComplete) - pop_tx_status(dev); - if (status & (AdapterFailure | RxEarly | StatsFull)) { - /* Handle all uncommon interrupts. */ - if (status & StatsFull) /* Empty statistics. */ - update_stats(dev); - if (status & RxEarly) { /* Rx early is unused. */ - el3_rx(dev); - outw(AckIntr | RxEarly, ioaddr + EL3_CMD); - } - if (status & AdapterFailure) { - u16 fifo_diag; - EL3WINDOW(4); - fifo_diag = inw(ioaddr + 4); - EL3WINDOW(1); - netdev_warn(dev, "adapter failure, FIFO diagnostic register %04x.\n", - fifo_diag); - if (fifo_diag & 0x0400) { - /* Tx overrun */ - tc589_wait_for_completion(dev, TxReset); - outw(TxEnable, ioaddr + EL3_CMD); - } - if (fifo_diag & 0x2000) { - /* Rx underrun */ - tc589_wait_for_completion(dev, RxReset); - set_rx_mode(dev); - outw(RxEnable, ioaddr + EL3_CMD); - } - outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD); - } - } - if (++i > 10) { - netdev_err(dev, "infinite loop in interrupt, status %4.4x.\n", - status); - /* Clear all interrupts */ - outw(AckIntr | 0xFF, ioaddr + EL3_CMD); - break; - } - /* Acknowledge the IRQ. */ - outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); - } - lp->last_irq = jiffies; - spin_unlock(&lp->lock); - netdev_dbg(dev, "exiting interrupt, status %4.4x.\n", - inw(ioaddr + EL3_STATUS)); - return IRQ_RETVAL(handled); -} - -static void media_check(unsigned long arg) -{ - struct net_device *dev = (struct net_device *)(arg); - struct el3_private *lp = netdev_priv(dev); - unsigned int ioaddr = dev->base_addr; - u16 media, errs; - unsigned long flags; - - if (!netif_device_present(dev)) goto reschedule; - - /* Check for pending interrupt with expired latency timer: with - this, we can limp along even if the interrupt is blocked */ - if ((inw(ioaddr + EL3_STATUS) & IntLatch) && - (inb(ioaddr + EL3_TIMER) == 0xff)) { - if (!lp->fast_poll) - netdev_warn(dev, "interrupt(s) dropped!\n"); - - local_irq_save(flags); - el3_interrupt(dev->irq, dev); - local_irq_restore(flags); - - lp->fast_poll = HZ; - } - if (lp->fast_poll) { - lp->fast_poll--; - lp->media.expires = jiffies + HZ/100; - add_timer(&lp->media); - return; - } - - /* lp->lock guards the EL3 window. Window should always be 1 except - when the lock is held */ - spin_lock_irqsave(&lp->lock, flags); - EL3WINDOW(4); - media = inw(ioaddr+WN4_MEDIA) & 0xc810; - - /* Ignore collisions unless we've had no irq's recently */ - if (time_before(jiffies, lp->last_irq + HZ)) { - media &= ~0x0010; - } else { - /* Try harder to detect carrier errors */ - EL3WINDOW(6); - outw(StatsDisable, ioaddr + EL3_CMD); - errs = inb(ioaddr + 0); - outw(StatsEnable, ioaddr + EL3_CMD); - dev->stats.tx_carrier_errors += errs; - if (errs || (lp->media_status & 0x0010)) media |= 0x0010; - } - - if (media != lp->media_status) { - if ((media & lp->media_status & 0x8000) && - ((lp->media_status ^ media) & 0x0800)) - netdev_info(dev, "%s link beat\n", - (lp->media_status & 0x0800 ? "lost" : "found")); - else if ((media & lp->media_status & 0x4000) && - ((lp->media_status ^ media) & 0x0010)) - netdev_info(dev, "coax cable %s\n", - (lp->media_status & 0x0010 ? "ok" : "problem")); - if (dev->if_port == 0) { - if (media & 0x8000) { - if (media & 0x0800) - netdev_info(dev, "flipped to 10baseT\n"); - else - tc589_set_xcvr(dev, 2); - } else if (media & 0x4000) { - if (media & 0x0010) - tc589_set_xcvr(dev, 1); - else - netdev_info(dev, "flipped to 10base2\n"); - } - } - lp->media_status = media; - } - - EL3WINDOW(1); - spin_unlock_irqrestore(&lp->lock, flags); - -reschedule: - lp->media.expires = jiffies + HZ; - add_timer(&lp->media); -} - -static struct net_device_stats *el3_get_stats(struct net_device *dev) -{ - struct el3_private *lp = netdev_priv(dev); - unsigned long flags; - struct pcmcia_device *link = lp->p_dev; - - if (pcmcia_dev_present(link)) { - spin_lock_irqsave(&lp->lock, flags); - update_stats(dev); - spin_unlock_irqrestore(&lp->lock, flags); - } - return &dev->stats; -} - -/* - Update statistics. We change to register window 6, so this should be run - single-threaded if the device is active. This is expected to be a rare - operation, and it's simpler for the rest of the driver to assume that - window 1 is always valid rather than use a special window-state variable. - - Caller must hold the lock for this -*/ -static void update_stats(struct net_device *dev) -{ - unsigned int ioaddr = dev->base_addr; - - netdev_dbg(dev, "updating the statistics.\n"); - /* Turn off statistics updates while reading. */ - outw(StatsDisable, ioaddr + EL3_CMD); - /* Switch to the stats window, and read everything. */ - EL3WINDOW(6); - dev->stats.tx_carrier_errors += inb(ioaddr + 0); - dev->stats.tx_heartbeat_errors += inb(ioaddr + 1); - /* Multiple collisions. */ inb(ioaddr + 2); - dev->stats.collisions += inb(ioaddr + 3); - dev->stats.tx_window_errors += inb(ioaddr + 4); - dev->stats.rx_fifo_errors += inb(ioaddr + 5); - dev->stats.tx_packets += inb(ioaddr + 6); - /* Rx packets */ inb(ioaddr + 7); - /* Tx deferrals */ inb(ioaddr + 8); - /* Rx octets */ inw(ioaddr + 10); - /* Tx octets */ inw(ioaddr + 12); - - /* Back to window 1, and turn statistics back on. */ - EL3WINDOW(1); - outw(StatsEnable, ioaddr + EL3_CMD); -} - -static int el3_rx(struct net_device *dev) -{ - unsigned int ioaddr = dev->base_addr; - int worklimit = 32; - short rx_status; - - netdev_dbg(dev, "in rx_packet(), status %4.4x, rx_status %4.4x.\n", - inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS)); - while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) && - worklimit > 0) { - worklimit--; - if (rx_status & 0x4000) { /* Error, update stats. */ - short error = rx_status & 0x3800; - dev->stats.rx_errors++; - switch (error) { - case 0x0000: dev->stats.rx_over_errors++; break; - case 0x0800: dev->stats.rx_length_errors++; break; - case 0x1000: dev->stats.rx_frame_errors++; break; - case 0x1800: dev->stats.rx_length_errors++; break; - case 0x2000: dev->stats.rx_frame_errors++; break; - case 0x2800: dev->stats.rx_crc_errors++; break; - } - } else { - short pkt_len = rx_status & 0x7ff; - struct sk_buff *skb; - - skb = dev_alloc_skb(pkt_len+5); - - netdev_dbg(dev, " Receiving packet size %d status %4.4x.\n", - pkt_len, rx_status); - if (skb != NULL) { - skb_reserve(skb, 2); - insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len), - (pkt_len+3)>>2); - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; - } else { - netdev_dbg(dev, "couldn't allocate a sk_buff of size %d.\n", - pkt_len); - dev->stats.rx_dropped++; - } - } - /* Pop the top of the Rx FIFO */ - tc589_wait_for_completion(dev, RxDiscard); - } - if (worklimit == 0) - netdev_warn(dev, "too much work in el3_rx!\n"); - return 0; -} - -static void set_rx_mode(struct net_device *dev) -{ - unsigned int ioaddr = dev->base_addr; - u16 opts = SetRxFilter | RxStation | RxBroadcast; - - if (dev->flags & IFF_PROMISC) - opts |= RxMulticast | RxProm; - else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI)) - opts |= RxMulticast; - outw(opts, ioaddr + EL3_CMD); -} - -static void set_multicast_list(struct net_device *dev) -{ - struct el3_private *priv = netdev_priv(dev); - unsigned long flags; - - spin_lock_irqsave(&priv->lock, flags); - set_rx_mode(dev); - spin_unlock_irqrestore(&priv->lock, flags); -} - -static int el3_close(struct net_device *dev) -{ - struct el3_private *lp = netdev_priv(dev); - struct pcmcia_device *link = lp->p_dev; - unsigned int ioaddr = dev->base_addr; - - dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name); - - if (pcmcia_dev_present(link)) { - /* Turn off statistics ASAP. We update dev->stats below. */ - outw(StatsDisable, ioaddr + EL3_CMD); - - /* Disable the receiver and transmitter. */ - outw(RxDisable, ioaddr + EL3_CMD); - outw(TxDisable, ioaddr + EL3_CMD); - - if (dev->if_port == 2) - /* Turn off thinnet power. Green! */ - outw(StopCoax, ioaddr + EL3_CMD); - else if (dev->if_port == 1) { - /* Disable link beat and jabber */ - EL3WINDOW(4); - outw(0, ioaddr + WN4_MEDIA); - } - - /* Switching back to window 0 disables the IRQ. */ - EL3WINDOW(0); - /* But we explicitly zero the IRQ line select anyway. */ - outw(0x0f00, ioaddr + WN0_IRQ); - - /* Check if the card still exists */ - if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000) - update_stats(dev); - } - - link->open--; - netif_stop_queue(dev); - del_timer_sync(&lp->media); - - return 0; -} - -static const struct pcmcia_device_id tc589_ids[] = { - PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0101, 0x0562), - PCMCIA_MFC_DEVICE_PROD_ID1(0, "Motorola MARQUIS", 0xf03e4e77), - PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0589), - PCMCIA_DEVICE_PROD_ID12("Farallon", "ENet", 0x58d93fc4, 0x992c2202), - PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0101, 0x0035, "cis/3CXEM556.cis"), - PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0101, 0x003d, "cis/3CXEM556.cis"), - PCMCIA_DEVICE_NULL, -}; -MODULE_DEVICE_TABLE(pcmcia, tc589_ids); - -static struct pcmcia_driver tc589_driver = { - .owner = THIS_MODULE, - .name = "3c589_cs", - .probe = tc589_probe, - .remove = tc589_detach, - .id_table = tc589_ids, - .suspend = tc589_suspend, - .resume = tc589_resume, -}; - -static int __init init_tc589(void) -{ - return pcmcia_register_driver(&tc589_driver); -} - -static void __exit exit_tc589(void) -{ - pcmcia_unregister_driver(&tc589_driver); -} - -module_init(init_tc589); -module_exit(exit_tc589); diff --git a/drivers/net/pcmcia/Kconfig b/drivers/net/pcmcia/Kconfig index 9b8f793..b67c5ed 100644 --- a/drivers/net/pcmcia/Kconfig +++ b/drivers/net/pcmcia/Kconfig @@ -21,24 +21,6 @@ menuconfig NET_PCMCIA if NET_PCMCIA && PCMCIA -config PCMCIA_3C589 - tristate "3Com 3c589 PCMCIA support" - help - Say Y here if you intend to attach a 3Com 3c589 or compatible PCMCIA - (PC-card) Ethernet card to your computer. - - To compile this driver as a module, choose M here: the module will be - called 3c589_cs. If unsure, say N. - -config PCMCIA_3C574 - tristate "3Com 3c574 PCMCIA support" - help - Say Y here if you intend to attach a 3Com 3c574 or compatible PCMCIA - (PC-card) Fast Ethernet card to your computer. - - To compile this driver as a module, choose M here: the module will be - called 3c574_cs. If unsure, say N. - config PCMCIA_FMVJ18X tristate "Fujitsu FMV-J18x PCMCIA support" select CRC32 diff --git a/drivers/net/pcmcia/Makefile b/drivers/net/pcmcia/Makefile index 87d2d99..2f2ab3b 100644 --- a/drivers/net/pcmcia/Makefile +++ b/drivers/net/pcmcia/Makefile @@ -3,8 +3,6 @@ # # 16-bit client drivers -obj-$(CONFIG_PCMCIA_3C589) += 3c589_cs.o -obj-$(CONFIG_PCMCIA_3C574) += 3c574_cs.o obj-$(CONFIG_PCMCIA_FMVJ18X) += fmvj18x_cs.o obj-$(CONFIG_PCMCIA_NMCLAN) += nmclan_cs.o obj-$(CONFIG_PCMCIA_PCNET) += pcnet_cs.o diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c deleted file mode 100644 index 1d5091a..0000000 --- a/drivers/net/typhoon.c +++ /dev/null @@ -1,2574 +0,0 @@ -/* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */ -/* - Written 2002-2004 by David Dillow - Based on code written 1998-2000 by Donald Becker and - Linux 2.2.x driver by David P. McLean . - - This software may be used and distributed according to the terms of - the GNU General Public License (GPL), incorporated herein by reference. - Drivers based on or derived from this code fall under the GPL and must - retain the authorship, copyright and license notice. This file is not - a complete program and may only be used when the entire operating - system is licensed under the GPL. - - This software is available on a public web site. It may enable - cryptographic capabilities of the 3Com hardware, and may be - exported from the United States under License Exception "TSU" - pursuant to 15 C.F.R. Section 740.13(e). - - This work was funded by the National Library of Medicine under - the Department of Energy project number 0274DD06D1 and NLM project - number Y1-LM-2015-01. - - This driver is designed for the 3Com 3CR990 Family of cards with the - 3XP Processor. It has been tested on x86 and sparc64. - - KNOWN ISSUES: - *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware - issue. Hopefully 3Com will fix it. - *) Waiting for a command response takes 8ms due to non-preemptable - polling. Only significant for getting stats and creating - SAs, but an ugly wart never the less. - - TODO: - *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming. - *) Add more support for ethtool (especially for NIC stats) - *) Allow disabling of RX checksum offloading - *) Fix MAC changing to work while the interface is up - (Need to put commands on the TX ring, which changes - the locking) - *) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See - http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org -*/ - -/* Set the copy breakpoint for the copy-only-tiny-frames scheme. - * Setting to > 1518 effectively disables this feature. - */ -static int rx_copybreak = 200; - -/* Should we use MMIO or Port IO? - * 0: Port IO - * 1: MMIO - * 2: Try MMIO, fallback to Port IO - */ -static unsigned int use_mmio = 2; - -/* end user-configurable values */ - -/* Maximum number of multicast addresses to filter (vs. rx-all-multicast). - */ -static const int multicast_filter_limit = 32; - -/* Operational parameters that are set at compile time. */ - -/* Keep the ring sizes a power of two for compile efficiency. - * The compiler will convert '%'<2^N> into a bit mask. - * Making the Tx ring too large decreases the effectiveness of channel - * bonding and packet priority. - * There are no ill effects from too-large receive rings. - * - * We don't currently use the Hi Tx ring so, don't make it very big. - * - * Beware that if we start using the Hi Tx ring, we will need to change - * typhoon_num_free_tx() and typhoon_tx_complete() to account for that. - */ -#define TXHI_ENTRIES 2 -#define TXLO_ENTRIES 128 -#define RX_ENTRIES 32 -#define COMMAND_ENTRIES 16 -#define RESPONSE_ENTRIES 32 - -#define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc)) -#define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc)) - -/* The 3XP will preload and remove 64 entries from the free buffer - * list, and we need one entry to keep the ring from wrapping, so - * to keep this a power of two, we use 128 entries. - */ -#define RXFREE_ENTRIES 128 -#define RXENT_ENTRIES (RXFREE_ENTRIES - 1) - -/* Operational parameters that usually are not changed. */ - -/* Time in jiffies before concluding the transmitter is hung. */ -#define TX_TIMEOUT (2*HZ) - -#define PKT_BUF_SZ 1536 -#define FIRMWARE_NAME "3com/typhoon.bin" - -#define pr_fmt(fmt) KBUILD_MODNAME " " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "typhoon.h" - -MODULE_AUTHOR("David Dillow "); -MODULE_VERSION("1.0"); -MODULE_LICENSE("GPL"); -MODULE_FIRMWARE(FIRMWARE_NAME); -MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)"); -MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and " - "the buffer given back to the NIC. Default " - "is 200."); -MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. " - "Default is to try MMIO and fallback to PIO."); -module_param(rx_copybreak, int, 0); -module_param(use_mmio, int, 0); - -#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32 -#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO -#undef NETIF_F_TSO -#endif - -#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS) -#error TX ring too small! -#endif - -struct typhoon_card_info { - const char *name; - const int capabilities; -}; - -#define TYPHOON_CRYPTO_NONE 0x00 -#define TYPHOON_CRYPTO_DES 0x01 -#define TYPHOON_CRYPTO_3DES 0x02 -#define TYPHOON_CRYPTO_VARIABLE 0x04 -#define TYPHOON_FIBER 0x08 -#define TYPHOON_WAKEUP_NEEDS_RESET 0x10 - -enum typhoon_cards { - TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR, - TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR, - TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR, - TYPHOON_FXM, -}; - -/* directly indexed by enum typhoon_cards, above */ -static struct typhoon_card_info typhoon_card_info[] __devinitdata = { - { "3Com Typhoon (3C990-TX)", - TYPHOON_CRYPTO_NONE}, - { "3Com Typhoon (3CR990-TX-95)", - TYPHOON_CRYPTO_DES}, - { "3Com Typhoon (3CR990-TX-97)", - TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES}, - { "3Com Typhoon (3C990SVR)", - TYPHOON_CRYPTO_NONE}, - { "3Com Typhoon (3CR990SVR95)", - TYPHOON_CRYPTO_DES}, - { "3Com Typhoon (3CR990SVR97)", - TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES}, - { "3Com Typhoon2 (3C990B-TX-M)", - TYPHOON_CRYPTO_VARIABLE}, - { "3Com Typhoon2 (3C990BSVR)", - TYPHOON_CRYPTO_VARIABLE}, - { "3Com Typhoon (3CR990-FX-95)", - TYPHOON_CRYPTO_DES | TYPHOON_FIBER}, - { "3Com Typhoon (3CR990-FX-97)", - TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER}, - { "3Com Typhoon (3CR990-FX-95 Server)", - TYPHOON_CRYPTO_DES | TYPHOON_FIBER}, - { "3Com Typhoon (3CR990-FX-97 Server)", - TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER}, - { "3Com Typhoon2 (3C990B-FX-97)", - TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER}, -}; - -/* Notes on the new subsystem numbering scheme: - * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES - * bit 4 indicates if this card has secured firmware (we don't support it) - * bit 8 indicates if this is a (0) copper or (1) fiber card - * bits 12-16 indicate card type: (0) client and (1) server - */ -static DEFINE_PCI_DEVICE_TABLE(typhoon_pci_tbl) = { - { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990, - PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX }, - { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 }, - { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 }, - { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B, - PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM }, - { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B, - PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM }, - { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B, - PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR }, - { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX, - PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 }, - { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX, - PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 }, - { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX, - PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR }, - { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX, - PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR }, - { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 }, - { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 }, - { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR }, - { 0, } -}; -MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl); - -/* Define the shared memory area - * Align everything the 3XP will normally be using. - * We'll need to move/align txHi if we start using that ring. - */ -#define __3xp_aligned ____cacheline_aligned -struct typhoon_shared { - struct typhoon_interface iface; - struct typhoon_indexes indexes __3xp_aligned; - struct tx_desc txLo[TXLO_ENTRIES] __3xp_aligned; - struct rx_desc rxLo[RX_ENTRIES] __3xp_aligned; - struct rx_desc rxHi[RX_ENTRIES] __3xp_aligned; - struct cmd_desc cmd[COMMAND_ENTRIES] __3xp_aligned; - struct resp_desc resp[RESPONSE_ENTRIES] __3xp_aligned; - struct rx_free rxBuff[RXFREE_ENTRIES] __3xp_aligned; - u32 zeroWord; - struct tx_desc txHi[TXHI_ENTRIES]; -} __packed; - -struct rxbuff_ent { - struct sk_buff *skb; - dma_addr_t dma_addr; -}; - -struct typhoon { - /* Tx cache line section */ - struct transmit_ring txLoRing ____cacheline_aligned; - struct pci_dev * tx_pdev; - void __iomem *tx_ioaddr; - u32 txlo_dma_addr; - - /* Irq/Rx cache line section */ - void __iomem *ioaddr ____cacheline_aligned; - struct typhoon_indexes *indexes; - u8 awaiting_resp; - u8 duplex; - u8 speed; - u8 card_state; - struct basic_ring rxLoRing; - struct pci_dev * pdev; - struct net_device * dev; - struct napi_struct napi; - struct basic_ring rxHiRing; - struct basic_ring rxBuffRing; - struct rxbuff_ent rxbuffers[RXENT_ENTRIES]; - - /* general section */ - spinlock_t command_lock ____cacheline_aligned; - struct basic_ring cmdRing; - struct basic_ring respRing; - struct net_device_stats stats; - struct net_device_stats stats_saved; - struct typhoon_shared * shared; - dma_addr_t shared_dma; - __le16 xcvr_select; - __le16 wol_events; - __le32 offload; - - /* unused stuff (future use) */ - int capabilities; - struct transmit_ring txHiRing; -}; - -enum completion_wait_values { - NoWait = 0, WaitNoSleep, WaitSleep, -}; - -/* These are the values for the typhoon.card_state variable. - * These determine where the statistics will come from in get_stats(). - * The sleep image does not support the statistics we need. - */ -enum state_values { - Sleeping = 0, Running, -}; - -/* PCI writes are not guaranteed to be posted in order, but outstanding writes - * cannot pass a read, so this forces current writes to post. - */ -#define typhoon_post_pci_writes(x) \ - do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0) - -/* We'll wait up to six seconds for a reset, and half a second normally. - */ -#define TYPHOON_UDELAY 50 -#define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ) -#define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY) -#define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY) - -#if defined(NETIF_F_TSO) -#define skb_tso_size(x) (skb_shinfo(x)->gso_size) -#define TSO_NUM_DESCRIPTORS 2 -#define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT -#else -#define NETIF_F_TSO 0 -#define skb_tso_size(x) 0 -#define TSO_NUM_DESCRIPTORS 0 -#define TSO_OFFLOAD_ON 0 -#endif - -static inline void -typhoon_inc_index(u32 *index, const int count, const int num_entries) -{ - /* Increment a ring index -- we can use this for all rings execept - * the Rx rings, as they use different size descriptors - * otherwise, everything is the same size as a cmd_desc - */ - *index += count * sizeof(struct cmd_desc); - *index %= num_entries * sizeof(struct cmd_desc); -} - -static inline void -typhoon_inc_cmd_index(u32 *index, const int count) -{ - typhoon_inc_index(index, count, COMMAND_ENTRIES); -} - -static inline void -typhoon_inc_resp_index(u32 *index, const int count) -{ - typhoon_inc_index(index, count, RESPONSE_ENTRIES); -} - -static inline void -typhoon_inc_rxfree_index(u32 *index, const int count) -{ - typhoon_inc_index(index, count, RXFREE_ENTRIES); -} - -static inline void -typhoon_inc_tx_index(u32 *index, const int count) -{ - /* if we start using the Hi Tx ring, this needs updateing */ - typhoon_inc_index(index, count, TXLO_ENTRIES); -} - -static inline void -typhoon_inc_rx_index(u32 *index, const int count) -{ - /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */ - *index += count * sizeof(struct rx_desc); - *index %= RX_ENTRIES * sizeof(struct rx_desc); -} - -static int -typhoon_reset(void __iomem *ioaddr, int wait_type) -{ - int i, err = 0; - int timeout; - - if(wait_type == WaitNoSleep) - timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP; - else - timeout = TYPHOON_RESET_TIMEOUT_SLEEP; - - iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); - iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS); - - iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET); - typhoon_post_pci_writes(ioaddr); - udelay(1); - iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET); - - if(wait_type != NoWait) { - for(i = 0; i < timeout; i++) { - if(ioread32(ioaddr + TYPHOON_REG_STATUS) == - TYPHOON_STATUS_WAITING_FOR_HOST) - goto out; - - if(wait_type == WaitSleep) - schedule_timeout_uninterruptible(1); - else - udelay(TYPHOON_UDELAY); - } - - err = -ETIMEDOUT; - } - -out: - iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); - iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS); - - /* The 3XP seems to need a little extra time to complete the load - * of the sleep image before we can reliably boot it. Failure to - * do this occasionally results in a hung adapter after boot in - * typhoon_init_one() while trying to read the MAC address or - * putting the card to sleep. 3Com's driver waits 5ms, but - * that seems to be overkill. However, if we can sleep, we might - * as well give it that much time. Otherwise, we'll give it 500us, - * which should be enough (I've see it work well at 100us, but still - * saw occasional problems.) - */ - if(wait_type == WaitSleep) - msleep(5); - else - udelay(500); - return err; -} - -static int -typhoon_wait_status(void __iomem *ioaddr, u32 wait_value) -{ - int i, err = 0; - - for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) { - if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value) - goto out; - udelay(TYPHOON_UDELAY); - } - - err = -ETIMEDOUT; - -out: - return err; -} - -static inline void -typhoon_media_status(struct net_device *dev, struct resp_desc *resp) -{ - if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK) - netif_carrier_off(dev); - else - netif_carrier_on(dev); -} - -static inline void -typhoon_hello(struct typhoon *tp) -{ - struct basic_ring *ring = &tp->cmdRing; - struct cmd_desc *cmd; - - /* We only get a hello request if we've not sent anything to the - * card in a long while. If the lock is held, then we're in the - * process of issuing a command, so we don't need to respond. - */ - if(spin_trylock(&tp->command_lock)) { - cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite); - typhoon_inc_cmd_index(&ring->lastWrite, 1); - - INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP); - wmb(); - iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY); - spin_unlock(&tp->command_lock); - } -} - -static int -typhoon_process_response(struct typhoon *tp, int resp_size, - struct resp_desc *resp_save) -{ - struct typhoon_indexes *indexes = tp->indexes; - struct resp_desc *resp; - u8 *base = tp->respRing.ringBase; - int count, len, wrap_len; - u32 cleared; - u32 ready; - - cleared = le32_to_cpu(indexes->respCleared); - ready = le32_to_cpu(indexes->respReady); - while(cleared != ready) { - resp = (struct resp_desc *)(base + cleared); - count = resp->numDesc + 1; - if(resp_save && resp->seqNo) { - if(count > resp_size) { - resp_save->flags = TYPHOON_RESP_ERROR; - goto cleanup; - } - - wrap_len = 0; - len = count * sizeof(*resp); - if(unlikely(cleared + len > RESPONSE_RING_SIZE)) { - wrap_len = cleared + len - RESPONSE_RING_SIZE; - len = RESPONSE_RING_SIZE - cleared; - } - - memcpy(resp_save, resp, len); - if(unlikely(wrap_len)) { - resp_save += len / sizeof(*resp); - memcpy(resp_save, base, wrap_len); - } - - resp_save = NULL; - } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) { - typhoon_media_status(tp->dev, resp); - } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) { - typhoon_hello(tp); - } else { - netdev_err(tp->dev, - "dumping unexpected response 0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n", - le16_to_cpu(resp->cmd), - resp->numDesc, resp->flags, - le16_to_cpu(resp->parm1), - le32_to_cpu(resp->parm2), - le32_to_cpu(resp->parm3)); - } - -cleanup: - typhoon_inc_resp_index(&cleared, count); - } - - indexes->respCleared = cpu_to_le32(cleared); - wmb(); - return resp_save == NULL; -} - -static inline int -typhoon_num_free(int lastWrite, int lastRead, int ringSize) -{ - /* this works for all descriptors but rx_desc, as they are a - * different size than the cmd_desc -- everyone else is the same - */ - lastWrite /= sizeof(struct cmd_desc); - lastRead /= sizeof(struct cmd_desc); - return (ringSize + lastRead - lastWrite - 1) % ringSize; -} - -static inline int -typhoon_num_free_cmd(struct typhoon *tp) -{ - int lastWrite = tp->cmdRing.lastWrite; - int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared); - - return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES); -} - -static inline int -typhoon_num_free_resp(struct typhoon *tp) -{ - int respReady = le32_to_cpu(tp->indexes->respReady); - int respCleared = le32_to_cpu(tp->indexes->respCleared); - - return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES); -} - -static inline int -typhoon_num_free_tx(struct transmit_ring *ring) -{ - /* if we start using the Hi Tx ring, this needs updating */ - return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES); -} - -static int -typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd, - int num_resp, struct resp_desc *resp) -{ - struct typhoon_indexes *indexes = tp->indexes; - struct basic_ring *ring = &tp->cmdRing; - struct resp_desc local_resp; - int i, err = 0; - int got_resp; - int freeCmd, freeResp; - int len, wrap_len; - - spin_lock(&tp->command_lock); - - freeCmd = typhoon_num_free_cmd(tp); - freeResp = typhoon_num_free_resp(tp); - - if(freeCmd < num_cmd || freeResp < num_resp) { - netdev_err(tp->dev, "no descs for cmd, had (needed) %d (%d) cmd, %d (%d) resp\n", - freeCmd, num_cmd, freeResp, num_resp); - err = -ENOMEM; - goto out; - } - - if(cmd->flags & TYPHOON_CMD_RESPOND) { - /* If we're expecting a response, but the caller hasn't given - * us a place to put it, we'll provide one. - */ - tp->awaiting_resp = 1; - if(resp == NULL) { - resp = &local_resp; - num_resp = 1; - } - } - - wrap_len = 0; - len = num_cmd * sizeof(*cmd); - if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) { - wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE; - len = COMMAND_RING_SIZE - ring->lastWrite; - } - - memcpy(ring->ringBase + ring->lastWrite, cmd, len); - if(unlikely(wrap_len)) { - struct cmd_desc *wrap_ptr = cmd; - wrap_ptr += len / sizeof(*cmd); - memcpy(ring->ringBase, wrap_ptr, wrap_len); - } - - typhoon_inc_cmd_index(&ring->lastWrite, num_cmd); - - /* "I feel a presence... another warrior is on the mesa." - */ - wmb(); - iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY); - typhoon_post_pci_writes(tp->ioaddr); - - if((cmd->flags & TYPHOON_CMD_RESPOND) == 0) - goto out; - - /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to - * preempt or do anything other than take interrupts. So, don't - * wait for a response unless you have to. - * - * I've thought about trying to sleep here, but we're called - * from many contexts that don't allow that. Also, given the way - * 3Com has implemented irq coalescing, we would likely timeout -- - * this has been observed in real life! - * - * The big killer is we have to wait to get stats from the card, - * though we could go to a periodic refresh of those if we don't - * mind them getting somewhat stale. The rest of the waiting - * commands occur during open/close/suspend/resume, so they aren't - * time critical. Creating SAs in the future will also have to - * wait here. - */ - got_resp = 0; - for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) { - if(indexes->respCleared != indexes->respReady) - got_resp = typhoon_process_response(tp, num_resp, - resp); - udelay(TYPHOON_UDELAY); - } - - if(!got_resp) { - err = -ETIMEDOUT; - goto out; - } - - /* Collect the error response even if we don't care about the - * rest of the response - */ - if(resp->flags & TYPHOON_RESP_ERROR) - err = -EIO; - -out: - if(tp->awaiting_resp) { - tp->awaiting_resp = 0; - smp_wmb(); - - /* Ugh. If a response was added to the ring between - * the call to typhoon_process_response() and the clearing - * of tp->awaiting_resp, we could have missed the interrupt - * and it could hang in the ring an indeterminate amount of - * time. So, check for it, and interrupt ourselves if this - * is the case. - */ - if(indexes->respCleared != indexes->respReady) - iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT); - } - - spin_unlock(&tp->command_lock); - return err; -} - -static inline void -typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing, - u32 ring_dma) -{ - struct tcpopt_desc *tcpd; - u32 tcpd_offset = ring_dma; - - tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite); - tcpd_offset += txRing->lastWrite; - tcpd_offset += offsetof(struct tcpopt_desc, bytesTx); - typhoon_inc_tx_index(&txRing->lastWrite, 1); - - tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG; - tcpd->numDesc = 1; - tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb)); - tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST; - tcpd->respAddrLo = cpu_to_le32(tcpd_offset); - tcpd->bytesTx = cpu_to_le32(skb->len); - tcpd->status = 0; -} - -static netdev_tx_t -typhoon_start_tx(struct sk_buff *skb, struct net_device *dev) -{ - struct typhoon *tp = netdev_priv(dev); - struct transmit_ring *txRing; - struct tx_desc *txd, *first_txd; - dma_addr_t skb_dma; - int numDesc; - - /* we have two rings to choose from, but we only use txLo for now - * If we start using the Hi ring as well, we'll need to update - * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(), - * and TXHI_ENTRIES to match, as well as update the TSO code below - * to get the right DMA address - */ - txRing = &tp->txLoRing; - - /* We need one descriptor for each fragment of the sk_buff, plus the - * one for the ->data area of it. - * - * The docs say a maximum of 16 fragment descriptors per TCP option - * descriptor, then make a new packet descriptor and option descriptor - * for the next 16 fragments. The engineers say just an option - * descriptor is needed. I've tested up to 26 fragments with a single - * packet descriptor/option descriptor combo, so I use that for now. - * - * If problems develop with TSO, check this first. - */ - numDesc = skb_shinfo(skb)->nr_frags + 1; - if (skb_is_gso(skb)) - numDesc++; - - /* When checking for free space in the ring, we need to also - * account for the initial Tx descriptor, and we always must leave - * at least one descriptor unused in the ring so that it doesn't - * wrap and look empty. - * - * The only time we should loop here is when we hit the race - * between marking the queue awake and updating the cleared index. - * Just loop and it will appear. This comes from the acenic driver. - */ - while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2))) - smp_rmb(); - - first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite); - typhoon_inc_tx_index(&txRing->lastWrite, 1); - - first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID; - first_txd->numDesc = 0; - first_txd->len = 0; - first_txd->tx_addr = (u64)((unsigned long) skb); - first_txd->processFlags = 0; - - if(skb->ip_summed == CHECKSUM_PARTIAL) { - /* The 3XP will figure out if this is UDP/TCP */ - first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM; - first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM; - first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM; - } - - if(vlan_tx_tag_present(skb)) { - first_txd->processFlags |= - TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY; - first_txd->processFlags |= - cpu_to_le32(htons(vlan_tx_tag_get(skb)) << - TYPHOON_TX_PF_VLAN_TAG_SHIFT); - } - - if (skb_is_gso(skb)) { - first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT; - first_txd->numDesc++; - - typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr); - } - - txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite); - typhoon_inc_tx_index(&txRing->lastWrite, 1); - - /* No need to worry about padding packet -- the firmware pads - * it with zeros to ETH_ZLEN for us. - */ - if(skb_shinfo(skb)->nr_frags == 0) { - skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len, - PCI_DMA_TODEVICE); - txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID; - txd->len = cpu_to_le16(skb->len); - txd->frag.addr = cpu_to_le32(skb_dma); - txd->frag.addrHi = 0; - first_txd->numDesc++; - } else { - int i, len; - - len = skb_headlen(skb); - skb_dma = pci_map_single(tp->tx_pdev, skb->data, len, - PCI_DMA_TODEVICE); - txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID; - txd->len = cpu_to_le16(len); - txd->frag.addr = cpu_to_le32(skb_dma); - txd->frag.addrHi = 0; - first_txd->numDesc++; - - for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - void *frag_addr; - - txd = (struct tx_desc *) (txRing->ringBase + - txRing->lastWrite); - typhoon_inc_tx_index(&txRing->lastWrite, 1); - - len = frag->size; - frag_addr = (void *) page_address(frag->page) + - frag->page_offset; - skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len, - PCI_DMA_TODEVICE); - txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID; - txd->len = cpu_to_le16(len); - txd->frag.addr = cpu_to_le32(skb_dma); - txd->frag.addrHi = 0; - first_txd->numDesc++; - } - } - - /* Kick the 3XP - */ - wmb(); - iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister); - - /* If we don't have room to put the worst case packet on the - * queue, then we must stop the queue. We need 2 extra - * descriptors -- one to prevent ring wrap, and one for the - * Tx header. - */ - numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1; - - if(typhoon_num_free_tx(txRing) < (numDesc + 2)) { - netif_stop_queue(dev); - - /* A Tx complete IRQ could have gotten between, making - * the ring free again. Only need to recheck here, since - * Tx is serialized. - */ - if(typhoon_num_free_tx(txRing) >= (numDesc + 2)) - netif_wake_queue(dev); - } - - return NETDEV_TX_OK; -} - -static void -typhoon_set_rx_mode(struct net_device *dev) -{ - struct typhoon *tp = netdev_priv(dev); - struct cmd_desc xp_cmd; - u32 mc_filter[2]; - __le16 filter; - - filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST; - if(dev->flags & IFF_PROMISC) { - filter |= TYPHOON_RX_FILTER_PROMISCOUS; - } else if ((netdev_mc_count(dev) > multicast_filter_limit) || - (dev->flags & IFF_ALLMULTI)) { - /* Too many to match, or accept all multicasts. */ - filter |= TYPHOON_RX_FILTER_ALL_MCAST; - } else if (!netdev_mc_empty(dev)) { - struct netdev_hw_addr *ha; - - memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(ha, dev) { - int bit = ether_crc(ETH_ALEN, ha->addr) & 0x3f; - mc_filter[bit >> 5] |= 1 << (bit & 0x1f); - } - - INIT_COMMAND_NO_RESPONSE(&xp_cmd, - TYPHOON_CMD_SET_MULTICAST_HASH); - xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET; - xp_cmd.parm2 = cpu_to_le32(mc_filter[0]); - xp_cmd.parm3 = cpu_to_le32(mc_filter[1]); - typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); - - filter |= TYPHOON_RX_FILTER_MCAST_HASH; - } - - INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER); - xp_cmd.parm1 = filter; - typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); -} - -static int -typhoon_do_get_stats(struct typhoon *tp) -{ - struct net_device_stats *stats = &tp->stats; - struct net_device_stats *saved = &tp->stats_saved; - struct cmd_desc xp_cmd; - struct resp_desc xp_resp[7]; - struct stats_resp *s = (struct stats_resp *) xp_resp; - int err; - - INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS); - err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp); - if(err < 0) - return err; - - /* 3Com's Linux driver uses txMultipleCollisions as it's - * collisions value, but there is some other collision info as well... - * - * The extra status reported would be a good candidate for - * ethtool_ops->get_{strings,stats}() - */ - stats->tx_packets = le32_to_cpu(s->txPackets) + - saved->tx_packets; - stats->tx_bytes = le64_to_cpu(s->txBytes) + - saved->tx_bytes; - stats->tx_errors = le32_to_cpu(s->txCarrierLost) + - saved->tx_errors; - stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost) + - saved->tx_carrier_errors; - stats->collisions = le32_to_cpu(s->txMultipleCollisions) + - saved->collisions; - stats->rx_packets = le32_to_cpu(s->rxPacketsGood) + - saved->rx_packets; - stats->rx_bytes = le64_to_cpu(s->rxBytesGood) + - saved->rx_bytes; - stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns) + - saved->rx_fifo_errors; - stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) + - le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors) + - saved->rx_errors; - stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors) + - saved->rx_crc_errors; - stats->rx_length_errors = le32_to_cpu(s->rxOversized) + - saved->rx_length_errors; - tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ? - SPEED_100 : SPEED_10; - tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ? - DUPLEX_FULL : DUPLEX_HALF; - - return 0; -} - -static struct net_device_stats * -typhoon_get_stats(struct net_device *dev) -{ - struct typhoon *tp = netdev_priv(dev); - struct net_device_stats *stats = &tp->stats; - struct net_device_stats *saved = &tp->stats_saved; - - smp_rmb(); - if(tp->card_state == Sleeping) - return saved; - - if(typhoon_do_get_stats(tp) < 0) { - netdev_err(dev, "error getting stats\n"); - return saved; - } - - return stats; -} - -static int -typhoon_set_mac_address(struct net_device *dev, void *addr) -{ - struct sockaddr *saddr = (struct sockaddr *) addr; - - if(netif_running(dev)) - return -EBUSY; - - memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len); - return 0; -} - -static void -typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) -{ - struct typhoon *tp = netdev_priv(dev); - struct pci_dev *pci_dev = tp->pdev; - struct cmd_desc xp_cmd; - struct resp_desc xp_resp[3]; - - smp_rmb(); - if(tp->card_state == Sleeping) { - strcpy(info->fw_version, "Sleep image"); - } else { - INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS); - if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) { - strcpy(info->fw_version, "Unknown runtime"); - } else { - u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2); - snprintf(info->fw_version, 32, "%02x.%03x.%03x", - sleep_ver >> 24, (sleep_ver >> 12) & 0xfff, - sleep_ver & 0xfff); - } - } - - strcpy(info->driver, KBUILD_MODNAME); - strcpy(info->bus_info, pci_name(pci_dev)); -} - -static int -typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct typhoon *tp = netdev_priv(dev); - - cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | - SUPPORTED_Autoneg; - - switch (tp->xcvr_select) { - case TYPHOON_XCVR_10HALF: - cmd->advertising = ADVERTISED_10baseT_Half; - break; - case TYPHOON_XCVR_10FULL: - cmd->advertising = ADVERTISED_10baseT_Full; - break; - case TYPHOON_XCVR_100HALF: - cmd->advertising = ADVERTISED_100baseT_Half; - break; - case TYPHOON_XCVR_100FULL: - cmd->advertising = ADVERTISED_100baseT_Full; - break; - case TYPHOON_XCVR_AUTONEG: - cmd->advertising = ADVERTISED_10baseT_Half | - ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | - ADVERTISED_Autoneg; - break; - } - - if(tp->capabilities & TYPHOON_FIBER) { - cmd->supported |= SUPPORTED_FIBRE; - cmd->advertising |= ADVERTISED_FIBRE; - cmd->port = PORT_FIBRE; - } else { - cmd->supported |= SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_TP; - cmd->advertising |= ADVERTISED_TP; - cmd->port = PORT_TP; - } - - /* need to get stats to make these link speed/duplex valid */ - typhoon_do_get_stats(tp); - ethtool_cmd_speed_set(cmd, tp->speed); - cmd->duplex = tp->duplex; - cmd->phy_address = 0; - cmd->transceiver = XCVR_INTERNAL; - if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG) - cmd->autoneg = AUTONEG_ENABLE; - else - cmd->autoneg = AUTONEG_DISABLE; - cmd->maxtxpkt = 1; - cmd->maxrxpkt = 1; - - return 0; -} - -static int -typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct typhoon *tp = netdev_priv(dev); - u32 speed = ethtool_cmd_speed(cmd); - struct cmd_desc xp_cmd; - __le16 xcvr; - int err; - - err = -EINVAL; - if (cmd->autoneg == AUTONEG_ENABLE) { - xcvr = TYPHOON_XCVR_AUTONEG; - } else { - if (cmd->duplex == DUPLEX_HALF) { - if (speed == SPEED_10) - xcvr = TYPHOON_XCVR_10HALF; - else if (speed == SPEED_100) - xcvr = TYPHOON_XCVR_100HALF; - else - goto out; - } else if (cmd->duplex == DUPLEX_FULL) { - if (speed == SPEED_10) - xcvr = TYPHOON_XCVR_10FULL; - else if (speed == SPEED_100) - xcvr = TYPHOON_XCVR_100FULL; - else - goto out; - } else - goto out; - } - - INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT); - xp_cmd.parm1 = xcvr; - err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); - if(err < 0) - goto out; - - tp->xcvr_select = xcvr; - if(cmd->autoneg == AUTONEG_ENABLE) { - tp->speed = 0xff; /* invalid */ - tp->duplex = 0xff; /* invalid */ - } else { - tp->speed = speed; - tp->duplex = cmd->duplex; - } - -out: - return err; -} - -static void -typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - struct typhoon *tp = netdev_priv(dev); - - wol->supported = WAKE_PHY | WAKE_MAGIC; - wol->wolopts = 0; - if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT) - wol->wolopts |= WAKE_PHY; - if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) - wol->wolopts |= WAKE_MAGIC; - memset(&wol->sopass, 0, sizeof(wol->sopass)); -} - -static int -typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - struct typhoon *tp = netdev_priv(dev); - - if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC)) - return -EINVAL; - - tp->wol_events = 0; - if(wol->wolopts & WAKE_PHY) - tp->wol_events |= TYPHOON_WAKE_LINK_EVENT; - if(wol->wolopts & WAKE_MAGIC) - tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT; - - return 0; -} - -static void -typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) -{ - ering->rx_max_pending = RXENT_ENTRIES; - ering->rx_mini_max_pending = 0; - ering->rx_jumbo_max_pending = 0; - ering->tx_max_pending = TXLO_ENTRIES - 1; - - ering->rx_pending = RXENT_ENTRIES; - ering->rx_mini_pending = 0; - ering->rx_jumbo_pending = 0; - ering->tx_pending = TXLO_ENTRIES - 1; -} - -static const struct ethtool_ops typhoon_ethtool_ops = { - .get_settings = typhoon_get_settings, - .set_settings = typhoon_set_settings, - .get_drvinfo = typhoon_get_drvinfo, - .get_wol = typhoon_get_wol, - .set_wol = typhoon_set_wol, - .get_link = ethtool_op_get_link, - .get_ringparam = typhoon_get_ringparam, -}; - -static int -typhoon_wait_interrupt(void __iomem *ioaddr) -{ - int i, err = 0; - - for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) { - if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) & - TYPHOON_INTR_BOOTCMD) - goto out; - udelay(TYPHOON_UDELAY); - } - - err = -ETIMEDOUT; - -out: - iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS); - return err; -} - -#define shared_offset(x) offsetof(struct typhoon_shared, x) - -static void -typhoon_init_interface(struct typhoon *tp) -{ - struct typhoon_interface *iface = &tp->shared->iface; - dma_addr_t shared_dma; - - memset(tp->shared, 0, sizeof(struct typhoon_shared)); - - /* The *Hi members of iface are all init'd to zero by the memset(). - */ - shared_dma = tp->shared_dma + shared_offset(indexes); - iface->ringIndex = cpu_to_le32(shared_dma); - - shared_dma = tp->shared_dma + shared_offset(txLo); - iface->txLoAddr = cpu_to_le32(shared_dma); - iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc)); - - shared_dma = tp->shared_dma + shared_offset(txHi); - iface->txHiAddr = cpu_to_le32(shared_dma); - iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc)); - - shared_dma = tp->shared_dma + shared_offset(rxBuff); - iface->rxBuffAddr = cpu_to_le32(shared_dma); - iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES * - sizeof(struct rx_free)); - - shared_dma = tp->shared_dma + shared_offset(rxLo); - iface->rxLoAddr = cpu_to_le32(shared_dma); - iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc)); - - shared_dma = tp->shared_dma + shared_offset(rxHi); - iface->rxHiAddr = cpu_to_le32(shared_dma); - iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc)); - - shared_dma = tp->shared_dma + shared_offset(cmd); - iface->cmdAddr = cpu_to_le32(shared_dma); - iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE); - - shared_dma = tp->shared_dma + shared_offset(resp); - iface->respAddr = cpu_to_le32(shared_dma); - iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE); - - shared_dma = tp->shared_dma + shared_offset(zeroWord); - iface->zeroAddr = cpu_to_le32(shared_dma); - - tp->indexes = &tp->shared->indexes; - tp->txLoRing.ringBase = (u8 *) tp->shared->txLo; - tp->txHiRing.ringBase = (u8 *) tp->shared->txHi; - tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo; - tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi; - tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff; - tp->cmdRing.ringBase = (u8 *) tp->shared->cmd; - tp->respRing.ringBase = (u8 *) tp->shared->resp; - - tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY; - tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY; - - tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr); - tp->card_state = Sleeping; - - tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM; - tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON; - tp->offload |= TYPHOON_OFFLOAD_VLAN; - - spin_lock_init(&tp->command_lock); - - /* Force the writes to the shared memory area out before continuing. */ - wmb(); -} - -static void -typhoon_init_rings(struct typhoon *tp) -{ - memset(tp->indexes, 0, sizeof(struct typhoon_indexes)); - - tp->txLoRing.lastWrite = 0; - tp->txHiRing.lastWrite = 0; - tp->rxLoRing.lastWrite = 0; - tp->rxHiRing.lastWrite = 0; - tp->rxBuffRing.lastWrite = 0; - tp->cmdRing.lastWrite = 0; - tp->respRing.lastWrite = 0; - - tp->txLoRing.lastRead = 0; - tp->txHiRing.lastRead = 0; -} - -static const struct firmware *typhoon_fw; - -static int -typhoon_request_firmware(struct typhoon *tp) -{ - const struct typhoon_file_header *fHdr; - const struct typhoon_section_header *sHdr; - const u8 *image_data; - u32 numSections; - u32 section_len; - u32 remaining; - int err; - - if (typhoon_fw) - return 0; - - err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev); - if (err) { - netdev_err(tp->dev, "Failed to load firmware \"%s\"\n", - FIRMWARE_NAME); - return err; - } - - image_data = (u8 *) typhoon_fw->data; - remaining = typhoon_fw->size; - if (remaining < sizeof(struct typhoon_file_header)) - goto invalid_fw; - - fHdr = (struct typhoon_file_header *) image_data; - if (memcmp(fHdr->tag, "TYPHOON", 8)) - goto invalid_fw; - - numSections = le32_to_cpu(fHdr->numSections); - image_data += sizeof(struct typhoon_file_header); - remaining -= sizeof(struct typhoon_file_header); - - while (numSections--) { - if (remaining < sizeof(struct typhoon_section_header)) - goto invalid_fw; - - sHdr = (struct typhoon_section_header *) image_data; - image_data += sizeof(struct typhoon_section_header); - section_len = le32_to_cpu(sHdr->len); - - if (remaining < section_len) - goto invalid_fw; - - image_data += section_len; - remaining -= section_len; - } - - return 0; - -invalid_fw: - netdev_err(tp->dev, "Invalid firmware image\n"); - release_firmware(typhoon_fw); - typhoon_fw = NULL; - return -EINVAL; -} - -static int -typhoon_download_firmware(struct typhoon *tp) -{ - void __iomem *ioaddr = tp->ioaddr; - struct pci_dev *pdev = tp->pdev; - const struct typhoon_file_header *fHdr; - const struct typhoon_section_header *sHdr; - const u8 *image_data; - void *dpage; - dma_addr_t dpage_dma; - __sum16 csum; - u32 irqEnabled; - u32 irqMasked; - u32 numSections; - u32 section_len; - u32 len; - u32 load_addr; - u32 hmac; - int i; - int err; - - image_data = (u8 *) typhoon_fw->data; - fHdr = (struct typhoon_file_header *) image_data; - - /* Cannot just map the firmware image using pci_map_single() as - * the firmware is vmalloc()'d and may not be physically contiguous, - * so we allocate some consistent memory to copy the sections into. - */ - err = -ENOMEM; - dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma); - if(!dpage) { - netdev_err(tp->dev, "no DMA mem for firmware\n"); - goto err_out; - } - - irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE); - iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD, - ioaddr + TYPHOON_REG_INTR_ENABLE); - irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK); - iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD, - ioaddr + TYPHOON_REG_INTR_MASK); - - err = -ETIMEDOUT; - if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) { - netdev_err(tp->dev, "card ready timeout\n"); - goto err_out_irq; - } - - numSections = le32_to_cpu(fHdr->numSections); - load_addr = le32_to_cpu(fHdr->startAddr); - - iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS); - iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR); - hmac = le32_to_cpu(fHdr->hmacDigest[0]); - iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0); - hmac = le32_to_cpu(fHdr->hmacDigest[1]); - iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1); - hmac = le32_to_cpu(fHdr->hmacDigest[2]); - iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2); - hmac = le32_to_cpu(fHdr->hmacDigest[3]); - iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3); - hmac = le32_to_cpu(fHdr->hmacDigest[4]); - iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4); - typhoon_post_pci_writes(ioaddr); - iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND); - - image_data += sizeof(struct typhoon_file_header); - - /* The ioread32() in typhoon_wait_interrupt() will force the - * last write to the command register to post, so - * we don't need a typhoon_post_pci_writes() after it. - */ - for(i = 0; i < numSections; i++) { - sHdr = (struct typhoon_section_header *) image_data; - image_data += sizeof(struct typhoon_section_header); - load_addr = le32_to_cpu(sHdr->startAddr); - section_len = le32_to_cpu(sHdr->len); - - while(section_len) { - len = min_t(u32, section_len, PAGE_SIZE); - - if(typhoon_wait_interrupt(ioaddr) < 0 || - ioread32(ioaddr + TYPHOON_REG_STATUS) != - TYPHOON_STATUS_WAITING_FOR_SEGMENT) { - netdev_err(tp->dev, "segment ready timeout\n"); - goto err_out_irq; - } - - /* Do an pseudo IPv4 checksum on the data -- first - * need to convert each u16 to cpu order before - * summing. Fortunately, due to the properties of - * the checksum, we can do this once, at the end. - */ - csum = csum_fold(csum_partial_copy_nocheck(image_data, - dpage, len, - 0)); - - iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH); - iowrite32(le16_to_cpu((__force __le16)csum), - ioaddr + TYPHOON_REG_BOOT_CHECKSUM); - iowrite32(load_addr, - ioaddr + TYPHOON_REG_BOOT_DEST_ADDR); - iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI); - iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO); - typhoon_post_pci_writes(ioaddr); - iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE, - ioaddr + TYPHOON_REG_COMMAND); - - image_data += len; - load_addr += len; - section_len -= len; - } - } - - if(typhoon_wait_interrupt(ioaddr) < 0 || - ioread32(ioaddr + TYPHOON_REG_STATUS) != - TYPHOON_STATUS_WAITING_FOR_SEGMENT) { - netdev_err(tp->dev, "final segment ready timeout\n"); - goto err_out_irq; - } - - iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND); - - if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) { - netdev_err(tp->dev, "boot ready timeout, status 0x%0x\n", - ioread32(ioaddr + TYPHOON_REG_STATUS)); - goto err_out_irq; - } - - err = 0; - -err_out_irq: - iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK); - iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE); - - pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma); - -err_out: - return err; -} - -static int -typhoon_boot_3XP(struct typhoon *tp, u32 initial_status) -{ - void __iomem *ioaddr = tp->ioaddr; - - if(typhoon_wait_status(ioaddr, initial_status) < 0) { - netdev_err(tp->dev, "boot ready timeout\n"); - goto out_timeout; - } - - iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI); - iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO); - typhoon_post_pci_writes(ioaddr); - iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD, - ioaddr + TYPHOON_REG_COMMAND); - - if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) { - netdev_err(tp->dev, "boot finish timeout (status 0x%x)\n", - ioread32(ioaddr + TYPHOON_REG_STATUS)); - goto out_timeout; - } - - /* Clear the Transmit and Command ready registers - */ - iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY); - iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY); - iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY); - typhoon_post_pci_writes(ioaddr); - iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND); - - return 0; - -out_timeout: - return -ETIMEDOUT; -} - -static u32 -typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing, - volatile __le32 * index) -{ - u32 lastRead = txRing->lastRead; - struct tx_desc *tx; - dma_addr_t skb_dma; - int dma_len; - int type; - - while(lastRead != le32_to_cpu(*index)) { - tx = (struct tx_desc *) (txRing->ringBase + lastRead); - type = tx->flags & TYPHOON_TYPE_MASK; - - if(type == TYPHOON_TX_DESC) { - /* This tx_desc describes a packet. - */ - unsigned long ptr = tx->tx_addr; - struct sk_buff *skb = (struct sk_buff *) ptr; - dev_kfree_skb_irq(skb); - } else if(type == TYPHOON_FRAG_DESC) { - /* This tx_desc describes a memory mapping. Free it. - */ - skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr); - dma_len = le16_to_cpu(tx->len); - pci_unmap_single(tp->pdev, skb_dma, dma_len, - PCI_DMA_TODEVICE); - } - - tx->flags = 0; - typhoon_inc_tx_index(&lastRead, 1); - } - - return lastRead; -} - -static void -typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing, - volatile __le32 * index) -{ - u32 lastRead; - int numDesc = MAX_SKB_FRAGS + 1; - - /* This will need changing if we start to use the Hi Tx ring. */ - lastRead = typhoon_clean_tx(tp, txRing, index); - if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite, - lastRead, TXLO_ENTRIES) > (numDesc + 2)) - netif_wake_queue(tp->dev); - - txRing->lastRead = lastRead; - smp_wmb(); -} - -static void -typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx) -{ - struct typhoon_indexes *indexes = tp->indexes; - struct rxbuff_ent *rxb = &tp->rxbuffers[idx]; - struct basic_ring *ring = &tp->rxBuffRing; - struct rx_free *r; - - if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) == - le32_to_cpu(indexes->rxBuffCleared)) { - /* no room in ring, just drop the skb - */ - dev_kfree_skb_any(rxb->skb); - rxb->skb = NULL; - return; - } - - r = (struct rx_free *) (ring->ringBase + ring->lastWrite); - typhoon_inc_rxfree_index(&ring->lastWrite, 1); - r->virtAddr = idx; - r->physAddr = cpu_to_le32(rxb->dma_addr); - - /* Tell the card about it */ - wmb(); - indexes->rxBuffReady = cpu_to_le32(ring->lastWrite); -} - -static int -typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx) -{ - struct typhoon_indexes *indexes = tp->indexes; - struct rxbuff_ent *rxb = &tp->rxbuffers[idx]; - struct basic_ring *ring = &tp->rxBuffRing; - struct rx_free *r; - struct sk_buff *skb; - dma_addr_t dma_addr; - - rxb->skb = NULL; - - if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) == - le32_to_cpu(indexes->rxBuffCleared)) - return -ENOMEM; - - skb = dev_alloc_skb(PKT_BUF_SZ); - if(!skb) - return -ENOMEM; - -#if 0 - /* Please, 3com, fix the firmware to allow DMA to a unaligned - * address! Pretty please? - */ - skb_reserve(skb, 2); -#endif - - skb->dev = tp->dev; - dma_addr = pci_map_single(tp->pdev, skb->data, - PKT_BUF_SZ, PCI_DMA_FROMDEVICE); - - /* Since no card does 64 bit DAC, the high bits will never - * change from zero. - */ - r = (struct rx_free *) (ring->ringBase + ring->lastWrite); - typhoon_inc_rxfree_index(&ring->lastWrite, 1); - r->virtAddr = idx; - r->physAddr = cpu_to_le32(dma_addr); - rxb->skb = skb; - rxb->dma_addr = dma_addr; - - /* Tell the card about it */ - wmb(); - indexes->rxBuffReady = cpu_to_le32(ring->lastWrite); - return 0; -} - -static int -typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready, - volatile __le32 * cleared, int budget) -{ - struct rx_desc *rx; - struct sk_buff *skb, *new_skb; - struct rxbuff_ent *rxb; - dma_addr_t dma_addr; - u32 local_ready; - u32 rxaddr; - int pkt_len; - u32 idx; - __le32 csum_bits; - int received; - - received = 0; - local_ready = le32_to_cpu(*ready); - rxaddr = le32_to_cpu(*cleared); - while(rxaddr != local_ready && budget > 0) { - rx = (struct rx_desc *) (rxRing->ringBase + rxaddr); - idx = rx->addr; - rxb = &tp->rxbuffers[idx]; - skb = rxb->skb; - dma_addr = rxb->dma_addr; - - typhoon_inc_rx_index(&rxaddr, 1); - - if(rx->flags & TYPHOON_RX_ERROR) { - typhoon_recycle_rx_skb(tp, idx); - continue; - } - - pkt_len = le16_to_cpu(rx->frameLen); - - if(pkt_len < rx_copybreak && - (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) { - skb_reserve(new_skb, 2); - pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, - PKT_BUF_SZ, - PCI_DMA_FROMDEVICE); - skb_copy_to_linear_data(new_skb, skb->data, pkt_len); - pci_dma_sync_single_for_device(tp->pdev, dma_addr, - PKT_BUF_SZ, - PCI_DMA_FROMDEVICE); - skb_put(new_skb, pkt_len); - typhoon_recycle_rx_skb(tp, idx); - } else { - new_skb = skb; - skb_put(new_skb, pkt_len); - pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ, - PCI_DMA_FROMDEVICE); - typhoon_alloc_rx_skb(tp, idx); - } - new_skb->protocol = eth_type_trans(new_skb, tp->dev); - csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD | - TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD); - if(csum_bits == - (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD) || - csum_bits == - (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) { - new_skb->ip_summed = CHECKSUM_UNNECESSARY; - } else - skb_checksum_none_assert(new_skb); - - if (rx->rxStatus & TYPHOON_RX_VLAN) - __vlan_hwaccel_put_tag(new_skb, - ntohl(rx->vlanTag) & 0xffff); - netif_receive_skb(new_skb); - - received++; - budget--; - } - *cleared = cpu_to_le32(rxaddr); - - return received; -} - -static void -typhoon_fill_free_ring(struct typhoon *tp) -{ - u32 i; - - for(i = 0; i < RXENT_ENTRIES; i++) { - struct rxbuff_ent *rxb = &tp->rxbuffers[i]; - if(rxb->skb) - continue; - if(typhoon_alloc_rx_skb(tp, i) < 0) - break; - } -} - -static int -typhoon_poll(struct napi_struct *napi, int budget) -{ - struct typhoon *tp = container_of(napi, struct typhoon, napi); - struct typhoon_indexes *indexes = tp->indexes; - int work_done; - - rmb(); - if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared) - typhoon_process_response(tp, 0, NULL); - - if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead) - typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared); - - work_done = 0; - - if(indexes->rxHiCleared != indexes->rxHiReady) { - work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady, - &indexes->rxHiCleared, budget); - } - - if(indexes->rxLoCleared != indexes->rxLoReady) { - work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady, - &indexes->rxLoCleared, budget - work_done); - } - - if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) { - /* rxBuff ring is empty, try to fill it. */ - typhoon_fill_free_ring(tp); - } - - if (work_done < budget) { - napi_complete(napi); - iowrite32(TYPHOON_INTR_NONE, - tp->ioaddr + TYPHOON_REG_INTR_MASK); - typhoon_post_pci_writes(tp->ioaddr); - } - - return work_done; -} - -static irqreturn_t -typhoon_interrupt(int irq, void *dev_instance) -{ - struct net_device *dev = dev_instance; - struct typhoon *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->ioaddr; - u32 intr_status; - - intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS); - if(!(intr_status & TYPHOON_INTR_HOST_INT)) - return IRQ_NONE; - - iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS); - - if (napi_schedule_prep(&tp->napi)) { - iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); - typhoon_post_pci_writes(ioaddr); - __napi_schedule(&tp->napi); - } else { - netdev_err(dev, "Error, poll already scheduled\n"); - } - return IRQ_HANDLED; -} - -static void -typhoon_free_rx_rings(struct typhoon *tp) -{ - u32 i; - - for(i = 0; i < RXENT_ENTRIES; i++) { - struct rxbuff_ent *rxb = &tp->rxbuffers[i]; - if(rxb->skb) { - pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ, - PCI_DMA_FROMDEVICE); - dev_kfree_skb(rxb->skb); - rxb->skb = NULL; - } - } -} - -static int -typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events) -{ - struct pci_dev *pdev = tp->pdev; - void __iomem *ioaddr = tp->ioaddr; - struct cmd_desc xp_cmd; - int err; - - INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS); - xp_cmd.parm1 = events; - err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); - if(err < 0) { - netdev_err(tp->dev, "typhoon_sleep(): wake events cmd err %d\n", - err); - return err; - } - - INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP); - err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); - if(err < 0) { - netdev_err(tp->dev, "typhoon_sleep(): sleep cmd err %d\n", err); - return err; - } - - if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0) - return -ETIMEDOUT; - - /* Since we cannot monitor the status of the link while sleeping, - * tell the world it went away. - */ - netif_carrier_off(tp->dev); - - pci_enable_wake(tp->pdev, state, 1); - pci_disable_device(pdev); - return pci_set_power_state(pdev, state); -} - -static int -typhoon_wakeup(struct typhoon *tp, int wait_type) -{ - struct pci_dev *pdev = tp->pdev; - void __iomem *ioaddr = tp->ioaddr; - - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - - /* Post 2.x.x versions of the Sleep Image require a reset before - * we can download the Runtime Image. But let's not make users of - * the old firmware pay for the reset. - */ - iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND); - if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 || - (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET)) - return typhoon_reset(ioaddr, wait_type); - - return 0; -} - -static int -typhoon_start_runtime(struct typhoon *tp) -{ - struct net_device *dev = tp->dev; - void __iomem *ioaddr = tp->ioaddr; - struct cmd_desc xp_cmd; - int err; - - typhoon_init_rings(tp); - typhoon_fill_free_ring(tp); - - err = typhoon_download_firmware(tp); - if(err < 0) { - netdev_err(tp->dev, "cannot load runtime on 3XP\n"); - goto error_out; - } - - if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) { - netdev_err(tp->dev, "cannot boot 3XP\n"); - err = -EIO; - goto error_out; - } - - INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE); - xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ); - err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); - if(err < 0) - goto error_out; - - INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS); - xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0])); - xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2])); - err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); - if(err < 0) - goto error_out; - - /* Disable IRQ coalescing -- we can reenable it when 3Com gives - * us some more information on how to control it. - */ - INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL); - xp_cmd.parm1 = 0; - err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); - if(err < 0) - goto error_out; - - INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT); - xp_cmd.parm1 = tp->xcvr_select; - err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); - if(err < 0) - goto error_out; - - INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE); - xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q); - err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); - if(err < 0) - goto error_out; - - INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS); - xp_cmd.parm2 = tp->offload; - xp_cmd.parm3 = tp->offload; - err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); - if(err < 0) - goto error_out; - - typhoon_set_rx_mode(dev); - - INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE); - err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); - if(err < 0) - goto error_out; - - INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE); - err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); - if(err < 0) - goto error_out; - - tp->card_state = Running; - smp_wmb(); - - iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE); - iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK); - typhoon_post_pci_writes(ioaddr); - - return 0; - -error_out: - typhoon_reset(ioaddr, WaitNoSleep); - typhoon_free_rx_rings(tp); - typhoon_init_rings(tp); - return err; -} - -static int -typhoon_stop_runtime(struct typhoon *tp, int wait_type) -{ - struct typhoon_indexes *indexes = tp->indexes; - struct transmit_ring *txLo = &tp->txLoRing; - void __iomem *ioaddr = tp->ioaddr; - struct cmd_desc xp_cmd; - int i; - - /* Disable interrupts early, since we can't schedule a poll - * when called with !netif_running(). This will be posted - * when we force the posting of the command. - */ - iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE); - - INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE); - typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); - - /* Wait 1/2 sec for any outstanding transmits to occur - * We'll cleanup after the reset if this times out. - */ - for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) { - if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite)) - break; - udelay(TYPHOON_UDELAY); - } - - if(i == TYPHOON_WAIT_TIMEOUT) - netdev_err(tp->dev, "halt timed out waiting for Tx to complete\n"); - - INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE); - typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); - - /* save the statistics so when we bring the interface up again, - * the values reported to userspace are correct. - */ - tp->card_state = Sleeping; - smp_wmb(); - typhoon_do_get_stats(tp); - memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats)); - - INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT); - typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); - - if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0) - netdev_err(tp->dev, "timed out waiting for 3XP to halt\n"); - - if(typhoon_reset(ioaddr, wait_type) < 0) { - netdev_err(tp->dev, "unable to reset 3XP\n"); - return -ETIMEDOUT; - } - - /* cleanup any outstanding Tx packets */ - if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) { - indexes->txLoCleared = cpu_to_le32(txLo->lastWrite); - typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared); - } - - return 0; -} - -static void -typhoon_tx_timeout(struct net_device *dev) -{ - struct typhoon *tp = netdev_priv(dev); - - if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) { - netdev_warn(dev, "could not reset in tx timeout\n"); - goto truly_dead; - } - - /* If we ever start using the Hi ring, it will need cleaning too */ - typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared); - typhoon_free_rx_rings(tp); - - if(typhoon_start_runtime(tp) < 0) { - netdev_err(dev, "could not start runtime in tx timeout\n"); - goto truly_dead; - } - - netif_wake_queue(dev); - return; - -truly_dead: - /* Reset the hardware, and turn off carrier to avoid more timeouts */ - typhoon_reset(tp->ioaddr, NoWait); - netif_carrier_off(dev); -} - -static int -typhoon_open(struct net_device *dev) -{ - struct typhoon *tp = netdev_priv(dev); - int err; - - err = typhoon_request_firmware(tp); - if (err) - goto out; - - err = typhoon_wakeup(tp, WaitSleep); - if(err < 0) { - netdev_err(dev, "unable to wakeup device\n"); - goto out_sleep; - } - - err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED, - dev->name, dev); - if(err < 0) - goto out_sleep; - - napi_enable(&tp->napi); - - err = typhoon_start_runtime(tp); - if(err < 0) { - napi_disable(&tp->napi); - goto out_irq; - } - - netif_start_queue(dev); - return 0; - -out_irq: - free_irq(dev->irq, dev); - -out_sleep: - if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) { - netdev_err(dev, "unable to reboot into sleep img\n"); - typhoon_reset(tp->ioaddr, NoWait); - goto out; - } - - if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) - netdev_err(dev, "unable to go back to sleep\n"); - -out: - return err; -} - -static int -typhoon_close(struct net_device *dev) -{ - struct typhoon *tp = netdev_priv(dev); - - netif_stop_queue(dev); - napi_disable(&tp->napi); - - if(typhoon_stop_runtime(tp, WaitSleep) < 0) - netdev_err(dev, "unable to stop runtime\n"); - - /* Make sure there is no irq handler running on a different CPU. */ - free_irq(dev->irq, dev); - - typhoon_free_rx_rings(tp); - typhoon_init_rings(tp); - - if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) - netdev_err(dev, "unable to boot sleep image\n"); - - if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) - netdev_err(dev, "unable to put card to sleep\n"); - - return 0; -} - -#ifdef CONFIG_PM -static int -typhoon_resume(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct typhoon *tp = netdev_priv(dev); - - /* If we're down, resume when we are upped. - */ - if(!netif_running(dev)) - return 0; - - if(typhoon_wakeup(tp, WaitNoSleep) < 0) { - netdev_err(dev, "critical: could not wake up in resume\n"); - goto reset; - } - - if(typhoon_start_runtime(tp) < 0) { - netdev_err(dev, "critical: could not start runtime in resume\n"); - goto reset; - } - - netif_device_attach(dev); - return 0; - -reset: - typhoon_reset(tp->ioaddr, NoWait); - return -EBUSY; -} - -static int -typhoon_suspend(struct pci_dev *pdev, pm_message_t state) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct typhoon *tp = netdev_priv(dev); - struct cmd_desc xp_cmd; - - /* If we're down, we're already suspended. - */ - if(!netif_running(dev)) - return 0; - - /* TYPHOON_OFFLOAD_VLAN is always on now, so this doesn't work */ - if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) - netdev_warn(dev, "cannot do WAKE_MAGIC with VLAN offloading\n"); - - netif_device_detach(dev); - - if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) { - netdev_err(dev, "unable to stop runtime\n"); - goto need_resume; - } - - typhoon_free_rx_rings(tp); - typhoon_init_rings(tp); - - if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) { - netdev_err(dev, "unable to boot sleep image\n"); - goto need_resume; - } - - INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS); - xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0])); - xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2])); - if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) { - netdev_err(dev, "unable to set mac address in suspend\n"); - goto need_resume; - } - - INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER); - xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST; - if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) { - netdev_err(dev, "unable to set rx filter in suspend\n"); - goto need_resume; - } - - if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) { - netdev_err(dev, "unable to put card to sleep\n"); - goto need_resume; - } - - return 0; - -need_resume: - typhoon_resume(pdev); - return -EBUSY; -} -#endif - -static int __devinit -typhoon_test_mmio(struct pci_dev *pdev) -{ - void __iomem *ioaddr = pci_iomap(pdev, 1, 128); - int mode = 0; - u32 val; - - if(!ioaddr) - goto out; - - if(ioread32(ioaddr + TYPHOON_REG_STATUS) != - TYPHOON_STATUS_WAITING_FOR_HOST) - goto out_unmap; - - iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); - iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS); - iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE); - - /* Ok, see if we can change our interrupt status register by - * sending ourselves an interrupt. If so, then MMIO works. - * The 50usec delay is arbitrary -- it could probably be smaller. - */ - val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS); - if((val & TYPHOON_INTR_SELF) == 0) { - iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT); - ioread32(ioaddr + TYPHOON_REG_INTR_STATUS); - udelay(50); - val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS); - if(val & TYPHOON_INTR_SELF) - mode = 1; - } - - iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); - iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS); - iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE); - ioread32(ioaddr + TYPHOON_REG_INTR_STATUS); - -out_unmap: - pci_iounmap(pdev, ioaddr); - -out: - if(!mode) - pr_info("%s: falling back to port IO\n", pci_name(pdev)); - return mode; -} - -static const struct net_device_ops typhoon_netdev_ops = { - .ndo_open = typhoon_open, - .ndo_stop = typhoon_close, - .ndo_start_xmit = typhoon_start_tx, - .ndo_set_multicast_list = typhoon_set_rx_mode, - .ndo_tx_timeout = typhoon_tx_timeout, - .ndo_get_stats = typhoon_get_stats, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = typhoon_set_mac_address, - .ndo_change_mtu = eth_change_mtu, -}; - -static int __devinit -typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - struct net_device *dev; - struct typhoon *tp; - int card_id = (int) ent->driver_data; - void __iomem *ioaddr; - void *shared; - dma_addr_t shared_dma; - struct cmd_desc xp_cmd; - struct resp_desc xp_resp[3]; - int err = 0; - const char *err_msg; - - dev = alloc_etherdev(sizeof(*tp)); - if(dev == NULL) { - err_msg = "unable to alloc new net device"; - err = -ENOMEM; - goto error_out; - } - SET_NETDEV_DEV(dev, &pdev->dev); - - err = pci_enable_device(pdev); - if(err < 0) { - err_msg = "unable to enable device"; - goto error_out_dev; - } - - err = pci_set_mwi(pdev); - if(err < 0) { - err_msg = "unable to set MWI"; - goto error_out_disable; - } - - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if(err < 0) { - err_msg = "No usable DMA configuration"; - goto error_out_mwi; - } - - /* sanity checks on IO and MMIO BARs - */ - if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) { - err_msg = "region #1 not a PCI IO resource, aborting"; - err = -ENODEV; - goto error_out_mwi; - } - if(pci_resource_len(pdev, 0) < 128) { - err_msg = "Invalid PCI IO region size, aborting"; - err = -ENODEV; - goto error_out_mwi; - } - if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { - err_msg = "region #1 not a PCI MMIO resource, aborting"; - err = -ENODEV; - goto error_out_mwi; - } - if(pci_resource_len(pdev, 1) < 128) { - err_msg = "Invalid PCI MMIO region size, aborting"; - err = -ENODEV; - goto error_out_mwi; - } - - err = pci_request_regions(pdev, KBUILD_MODNAME); - if(err < 0) { - err_msg = "could not request regions"; - goto error_out_mwi; - } - - /* map our registers - */ - if(use_mmio != 0 && use_mmio != 1) - use_mmio = typhoon_test_mmio(pdev); - - ioaddr = pci_iomap(pdev, use_mmio, 128); - if (!ioaddr) { - err_msg = "cannot remap registers, aborting"; - err = -EIO; - goto error_out_regions; - } - - /* allocate pci dma space for rx and tx descriptor rings - */ - shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared), - &shared_dma); - if(!shared) { - err_msg = "could not allocate DMA memory"; - err = -ENOMEM; - goto error_out_remap; - } - - dev->irq = pdev->irq; - tp = netdev_priv(dev); - tp->shared = shared; - tp->shared_dma = shared_dma; - tp->pdev = pdev; - tp->tx_pdev = pdev; - tp->ioaddr = ioaddr; - tp->tx_ioaddr = ioaddr; - tp->dev = dev; - - /* Init sequence: - * 1) Reset the adapter to clear any bad juju - * 2) Reload the sleep image - * 3) Boot the sleep image - * 4) Get the hardware address. - * 5) Put the card to sleep. - */ - if (typhoon_reset(ioaddr, WaitSleep) < 0) { - err_msg = "could not reset 3XP"; - err = -EIO; - goto error_out_dma; - } - - /* Now that we've reset the 3XP and are sure it's not going to - * write all over memory, enable bus mastering, and save our - * state for resuming after a suspend. - */ - pci_set_master(pdev); - pci_save_state(pdev); - - typhoon_init_interface(tp); - typhoon_init_rings(tp); - - if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) { - err_msg = "cannot boot 3XP sleep image"; - err = -EIO; - goto error_out_reset; - } - - INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS); - if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) { - err_msg = "cannot read MAC address"; - err = -EIO; - goto error_out_reset; - } - - *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1)); - *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2)); - - if(!is_valid_ether_addr(dev->dev_addr)) { - err_msg = "Could not obtain valid ethernet address, aborting"; - goto error_out_reset; - } - - /* Read the Sleep Image version last, so the response is valid - * later when we print out the version reported. - */ - INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS); - if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) { - err_msg = "Could not get Sleep Image version"; - goto error_out_reset; - } - - tp->capabilities = typhoon_card_info[card_id].capabilities; - tp->xcvr_select = TYPHOON_XCVR_AUTONEG; - - /* Typhoon 1.0 Sleep Images return one response descriptor to the - * READ_VERSIONS command. Those versions are OK after waking up - * from sleep without needing a reset. Typhoon 1.1+ Sleep Images - * seem to need a little extra help to get started. Since we don't - * know how to nudge it along, just kick it. - */ - if(xp_resp[0].numDesc != 0) - tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET; - - if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) { - err_msg = "cannot put adapter to sleep"; - err = -EIO; - goto error_out_reset; - } - - /* The chip-specific entries in the device structure. */ - dev->netdev_ops = &typhoon_netdev_ops; - netif_napi_add(dev, &tp->napi, typhoon_poll, 16); - dev->watchdog_timeo = TX_TIMEOUT; - - SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops); - - /* We can handle scatter gather, up to 16 entries, and - * we can do IP checksumming (only version 4, doh...) - * - * There's no way to turn off the RX VLAN offloading and stripping - * on the current 3XP firmware -- it does not respect the offload - * settings -- so we only allow the user to toggle the TX processing. - */ - dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | - NETIF_F_HW_VLAN_TX; - dev->features = dev->hw_features | - NETIF_F_HW_VLAN_RX | NETIF_F_RXCSUM; - - if(register_netdev(dev) < 0) { - err_msg = "unable to register netdev"; - goto error_out_reset; - } - - pci_set_drvdata(pdev, dev); - - netdev_info(dev, "%s at %s 0x%llx, %pM\n", - typhoon_card_info[card_id].name, - use_mmio ? "MMIO" : "IO", - (unsigned long long)pci_resource_start(pdev, use_mmio), - dev->dev_addr); - - /* xp_resp still contains the response to the READ_VERSIONS command. - * For debugging, let the user know what version he has. - */ - if(xp_resp[0].numDesc == 0) { - /* This is the Typhoon 1.0 type Sleep Image, last 16 bits - * of version is Month/Day of build. - */ - u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff; - netdev_info(dev, "Typhoon 1.0 Sleep Image built %02u/%02u/2000\n", - monthday >> 8, monthday & 0xff); - } else if(xp_resp[0].numDesc == 2) { - /* This is the Typhoon 1.1+ type Sleep Image - */ - u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2); - u8 *ver_string = (u8 *) &xp_resp[1]; - ver_string[25] = 0; - netdev_info(dev, "Typhoon 1.1+ Sleep Image version %02x.%03x.%03x %s\n", - sleep_ver >> 24, (sleep_ver >> 12) & 0xfff, - sleep_ver & 0xfff, ver_string); - } else { - netdev_warn(dev, "Unknown Sleep Image version (%u:%04x)\n", - xp_resp[0].numDesc, le32_to_cpu(xp_resp[0].parm2)); - } - - return 0; - -error_out_reset: - typhoon_reset(ioaddr, NoWait); - -error_out_dma: - pci_free_consistent(pdev, sizeof(struct typhoon_shared), - shared, shared_dma); -error_out_remap: - pci_iounmap(pdev, ioaddr); -error_out_regions: - pci_release_regions(pdev); -error_out_mwi: - pci_clear_mwi(pdev); -error_out_disable: - pci_disable_device(pdev); -error_out_dev: - free_netdev(dev); -error_out: - pr_err("%s: %s\n", pci_name(pdev), err_msg); - return err; -} - -static void __devexit -typhoon_remove_one(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct typhoon *tp = netdev_priv(dev); - - unregister_netdev(dev); - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - typhoon_reset(tp->ioaddr, NoWait); - pci_iounmap(pdev, tp->ioaddr); - pci_free_consistent(pdev, sizeof(struct typhoon_shared), - tp->shared, tp->shared_dma); - pci_release_regions(pdev); - pci_clear_mwi(pdev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - free_netdev(dev); -} - -static struct pci_driver typhoon_driver = { - .name = KBUILD_MODNAME, - .id_table = typhoon_pci_tbl, - .probe = typhoon_init_one, - .remove = __devexit_p(typhoon_remove_one), -#ifdef CONFIG_PM - .suspend = typhoon_suspend, - .resume = typhoon_resume, -#endif -}; - -static int __init -typhoon_init(void) -{ - return pci_register_driver(&typhoon_driver); -} - -static void __exit -typhoon_cleanup(void) -{ - if (typhoon_fw) - release_firmware(typhoon_fw); - pci_unregister_driver(&typhoon_driver); -} - -module_init(typhoon_init); -module_exit(typhoon_cleanup); diff --git a/drivers/net/typhoon.h b/drivers/net/typhoon.h deleted file mode 100644 index 88187fc..0000000 --- a/drivers/net/typhoon.h +++ /dev/null @@ -1,624 +0,0 @@ -/* typhoon.h: chip info for the 3Com 3CR990 family of controllers */ -/* - Written 2002-2003 by David Dillow - - This software may be used and distributed according to the terms of - the GNU General Public License (GPL), incorporated herein by reference. - Drivers based on or derived from this code fall under the GPL and must - retain the authorship, copyright and license notice. This file is not - a complete program and may only be used when the entire operating - system is licensed under the GPL. - - This software is available on a public web site. It may enable - cryptographic capabilities of the 3Com hardware, and may be - exported from the United States under License Exception "TSU" - pursuant to 15 C.F.R. Section 740.13(e). - - This work was funded by the National Library of Medicine under - the Department of Energy project number 0274DD06D1 and NLM project - number Y1-LM-2015-01. -*/ - -/* All Typhoon ring positions are specificed in bytes, and point to the - * first "clean" entry in the ring -- ie the next entry we use for whatever - * purpose. - */ - -/* The Typhoon basic ring - * ringBase: where this ring lives (our virtual address) - * lastWrite: the next entry we'll use - */ -struct basic_ring { - u8 *ringBase; - u32 lastWrite; -}; - -/* The Typoon transmit ring -- same as a basic ring, plus: - * lastRead: where we're at in regard to cleaning up the ring - * writeRegister: register to use for writing (different for Hi & Lo rings) - */ -struct transmit_ring { - u8 *ringBase; - u32 lastWrite; - u32 lastRead; - int writeRegister; -}; - -/* The host<->Typhoon ring index structure - * This indicates the current positions in the rings - * - * All values must be in little endian format for the 3XP - * - * rxHiCleared: entry we've cleared to in the Hi receive ring - * rxLoCleared: entry we've cleared to in the Lo receive ring - * rxBuffReady: next entry we'll put a free buffer in - * respCleared: entry we've cleared to in the response ring - * - * txLoCleared: entry the NIC has cleared to in the Lo transmit ring - * txHiCleared: entry the NIC has cleared to in the Hi transmit ring - * rxLoReady: entry the NIC has filled to in the Lo receive ring - * rxBuffCleared: entry the NIC has cleared in the free buffer ring - * cmdCleared: entry the NIC has cleared in the command ring - * respReady: entry the NIC has filled to in the response ring - * rxHiReady: entry the NIC has filled to in the Hi receive ring - */ -struct typhoon_indexes { - /* The first four are written by the host, and read by the NIC */ - volatile __le32 rxHiCleared; - volatile __le32 rxLoCleared; - volatile __le32 rxBuffReady; - volatile __le32 respCleared; - - /* The remaining are written by the NIC, and read by the host */ - volatile __le32 txLoCleared; - volatile __le32 txHiCleared; - volatile __le32 rxLoReady; - volatile __le32 rxBuffCleared; - volatile __le32 cmdCleared; - volatile __le32 respReady; - volatile __le32 rxHiReady; -} __packed; - -/* The host<->Typhoon interface - * Our means of communicating where things are - * - * All values must be in little endian format for the 3XP - * - * ringIndex: 64 bit bus address of the index structure - * txLoAddr: 64 bit bus address of the Lo transmit ring - * txLoSize: size (in bytes) of the Lo transmit ring - * txHi*: as above for the Hi priority transmit ring - * rxLo*: as above for the Lo priority receive ring - * rxBuff*: as above for the free buffer ring - * cmd*: as above for the command ring - * resp*: as above for the response ring - * zeroAddr: 64 bit bus address of a zero word (for DMA) - * rxHi*: as above for the Hi Priority receive ring - * - * While there is room for 64 bit addresses, current versions of the 3XP - * only do 32 bit addresses, so the *Hi for each of the above will always - * be zero. - */ -struct typhoon_interface { - __le32 ringIndex; - __le32 ringIndexHi; - __le32 txLoAddr; - __le32 txLoAddrHi; - __le32 txLoSize; - __le32 txHiAddr; - __le32 txHiAddrHi; - __le32 txHiSize; - __le32 rxLoAddr; - __le32 rxLoAddrHi; - __le32 rxLoSize; - __le32 rxBuffAddr; - __le32 rxBuffAddrHi; - __le32 rxBuffSize; - __le32 cmdAddr; - __le32 cmdAddrHi; - __le32 cmdSize; - __le32 respAddr; - __le32 respAddrHi; - __le32 respSize; - __le32 zeroAddr; - __le32 zeroAddrHi; - __le32 rxHiAddr; - __le32 rxHiAddrHi; - __le32 rxHiSize; -} __packed; - -/* The Typhoon transmit/fragment descriptor - * - * A packet is described by a packet descriptor, followed by option descriptors, - * if any, then one or more fragment descriptors. - * - * Packet descriptor: - * flags: Descriptor type - * len:i zero, or length of this packet - * addr*: 8 bytes of opaque data to the firmware -- for skb pointer - * processFlags: Determine offload tasks to perform on this packet. - * - * Fragment descriptor: - * flags: Descriptor type - * len:i length of this fragment - * addr: low bytes of DMA address for this part of the packet - * addrHi: hi bytes of DMA address for this part of the packet - * processFlags: must be zero - * - * TYPHOON_DESC_VALID is not mentioned in their docs, but their Linux - * driver uses it. - */ -struct tx_desc { - u8 flags; -#define TYPHOON_TYPE_MASK 0x07 -#define TYPHOON_FRAG_DESC 0x00 -#define TYPHOON_TX_DESC 0x01 -#define TYPHOON_CMD_DESC 0x02 -#define TYPHOON_OPT_DESC 0x03 -#define TYPHOON_RX_DESC 0x04 -#define TYPHOON_RESP_DESC 0x05 -#define TYPHOON_OPT_TYPE_MASK 0xf0 -#define TYPHOON_OPT_IPSEC 0x00 -#define TYPHOON_OPT_TCP_SEG 0x10 -#define TYPHOON_CMD_RESPOND 0x40 -#define TYPHOON_RESP_ERROR 0x40 -#define TYPHOON_RX_ERROR 0x40 -#define TYPHOON_DESC_VALID 0x80 - u8 numDesc; - __le16 len; - union { - struct { - __le32 addr; - __le32 addrHi; - } frag; - u64 tx_addr; /* opaque for hardware, for TX_DESC */ - }; - __le32 processFlags; -#define TYPHOON_TX_PF_NO_CRC cpu_to_le32(0x00000001) -#define TYPHOON_TX_PF_IP_CHKSUM cpu_to_le32(0x00000002) -#define TYPHOON_TX_PF_TCP_CHKSUM cpu_to_le32(0x00000004) -#define TYPHOON_TX_PF_TCP_SEGMENT cpu_to_le32(0x00000008) -#define TYPHOON_TX_PF_INSERT_VLAN cpu_to_le32(0x00000010) -#define TYPHOON_TX_PF_IPSEC cpu_to_le32(0x00000020) -#define TYPHOON_TX_PF_VLAN_PRIORITY cpu_to_le32(0x00000040) -#define TYPHOON_TX_PF_UDP_CHKSUM cpu_to_le32(0x00000080) -#define TYPHOON_TX_PF_PAD_FRAME cpu_to_le32(0x00000100) -#define TYPHOON_TX_PF_RESERVED cpu_to_le32(0x00000e00) -#define TYPHOON_TX_PF_VLAN_MASK cpu_to_le32(0x0ffff000) -#define TYPHOON_TX_PF_INTERNAL cpu_to_le32(0xf0000000) -#define TYPHOON_TX_PF_VLAN_TAG_SHIFT 12 -} __packed; - -/* The TCP Segmentation offload option descriptor - * - * flags: descriptor type - * numDesc: must be 1 - * mss_flags: bits 0-11 (little endian) are MSS, 12 is first TSO descriptor - * 13 is list TSO descriptor, set both if only one TSO - * respAddrLo: low bytes of address of the bytesTx field of this descriptor - * bytesTx: total number of bytes in this TSO request - * status: 0 on completion - */ -struct tcpopt_desc { - u8 flags; - u8 numDesc; - __le16 mss_flags; -#define TYPHOON_TSO_FIRST cpu_to_le16(0x1000) -#define TYPHOON_TSO_LAST cpu_to_le16(0x2000) - __le32 respAddrLo; - __le32 bytesTx; - __le32 status; -} __packed; - -/* The IPSEC Offload descriptor - * - * flags: descriptor type - * numDesc: must be 1 - * ipsecFlags: bit 0: 0 -- generate IV, 1 -- use supplied IV - * sa1, sa2: Security Association IDs for this packet - * reserved: set to 0 - */ -struct ipsec_desc { - u8 flags; - u8 numDesc; - __le16 ipsecFlags; -#define TYPHOON_IPSEC_GEN_IV cpu_to_le16(0x0000) -#define TYPHOON_IPSEC_USE_IV cpu_to_le16(0x0001) - __le32 sa1; - __le32 sa2; - __le32 reserved; -} __packed; - -/* The Typhoon receive descriptor (Updated by NIC) - * - * flags: Descriptor type, error indication - * numDesc: Always zero - * frameLen: the size of the packet received - * addr: low 32 bytes of the virtual addr passed in for this buffer - * addrHi: high 32 bytes of the virtual addr passed in for this buffer - * rxStatus: Error if set in flags, otherwise result of offload processing - * filterResults: results of filtering on packet, not used - * ipsecResults: Results of IPSEC processing - * vlanTag: the 801.2q TCI from the packet - */ -struct rx_desc { - u8 flags; - u8 numDesc; - __le16 frameLen; - u32 addr; /* opaque, comes from virtAddr */ - u32 addrHi; /* opaque, comes from virtAddrHi */ - __le32 rxStatus; -#define TYPHOON_RX_ERR_INTERNAL cpu_to_le32(0x00000000) -#define TYPHOON_RX_ERR_FIFO_UNDERRUN cpu_to_le32(0x00000001) -#define TYPHOON_RX_ERR_BAD_SSD cpu_to_le32(0x00000002) -#define TYPHOON_RX_ERR_RUNT cpu_to_le32(0x00000003) -#define TYPHOON_RX_ERR_CRC cpu_to_le32(0x00000004) -#define TYPHOON_RX_ERR_OVERSIZE cpu_to_le32(0x00000005) -#define TYPHOON_RX_ERR_ALIGN cpu_to_le32(0x00000006) -#define TYPHOON_RX_ERR_DRIBBLE cpu_to_le32(0x00000007) -#define TYPHOON_RX_PROTO_MASK cpu_to_le32(0x00000003) -#define TYPHOON_RX_PROTO_UNKNOWN cpu_to_le32(0x00000000) -#define TYPHOON_RX_PROTO_IP cpu_to_le32(0x00000001) -#define TYPHOON_RX_PROTO_IPX cpu_to_le32(0x00000002) -#define TYPHOON_RX_VLAN cpu_to_le32(0x00000004) -#define TYPHOON_RX_IP_FRAG cpu_to_le32(0x00000008) -#define TYPHOON_RX_IPSEC cpu_to_le32(0x00000010) -#define TYPHOON_RX_IP_CHK_FAIL cpu_to_le32(0x00000020) -#define TYPHOON_RX_TCP_CHK_FAIL cpu_to_le32(0x00000040) -#define TYPHOON_RX_UDP_CHK_FAIL cpu_to_le32(0x00000080) -#define TYPHOON_RX_IP_CHK_GOOD cpu_to_le32(0x00000100) -#define TYPHOON_RX_TCP_CHK_GOOD cpu_to_le32(0x00000200) -#define TYPHOON_RX_UDP_CHK_GOOD cpu_to_le32(0x00000400) - __le16 filterResults; -#define TYPHOON_RX_FILTER_MASK cpu_to_le16(0x7fff) -#define TYPHOON_RX_FILTERED cpu_to_le16(0x8000) - __le16 ipsecResults; -#define TYPHOON_RX_OUTER_AH_GOOD cpu_to_le16(0x0001) -#define TYPHOON_RX_OUTER_ESP_GOOD cpu_to_le16(0x0002) -#define TYPHOON_RX_INNER_AH_GOOD cpu_to_le16(0x0004) -#define TYPHOON_RX_INNER_ESP_GOOD cpu_to_le16(0x0008) -#define TYPHOON_RX_OUTER_AH_FAIL cpu_to_le16(0x0010) -#define TYPHOON_RX_OUTER_ESP_FAIL cpu_to_le16(0x0020) -#define TYPHOON_RX_INNER_AH_FAIL cpu_to_le16(0x0040) -#define TYPHOON_RX_INNER_ESP_FAIL cpu_to_le16(0x0080) -#define TYPHOON_RX_UNKNOWN_SA cpu_to_le16(0x0100) -#define TYPHOON_RX_ESP_FORMAT_ERR cpu_to_le16(0x0200) - __be32 vlanTag; -} __packed; - -/* The Typhoon free buffer descriptor, used to give a buffer to the NIC - * - * physAddr: low 32 bits of the bus address of the buffer - * physAddrHi: high 32 bits of the bus address of the buffer, always zero - * virtAddr: low 32 bits of the skb address - * virtAddrHi: high 32 bits of the skb address, always zero - * - * the virt* address is basically two 32 bit cookies, just passed back - * from the NIC - */ -struct rx_free { - __le32 physAddr; - __le32 physAddrHi; - u32 virtAddr; - u32 virtAddrHi; -} __packed; - -/* The Typhoon command descriptor, used for commands and responses - * - * flags: descriptor type - * numDesc: number of descriptors following in this command/response, - * ie, zero for a one descriptor command - * cmd: the command - * seqNo: sequence number (unused) - * parm1: use varies by command - * parm2: use varies by command - * parm3: use varies by command - */ -struct cmd_desc { - u8 flags; - u8 numDesc; - __le16 cmd; -#define TYPHOON_CMD_TX_ENABLE cpu_to_le16(0x0001) -#define TYPHOON_CMD_TX_DISABLE cpu_to_le16(0x0002) -#define TYPHOON_CMD_RX_ENABLE cpu_to_le16(0x0003) -#define TYPHOON_CMD_RX_DISABLE cpu_to_le16(0x0004) -#define TYPHOON_CMD_SET_RX_FILTER cpu_to_le16(0x0005) -#define TYPHOON_CMD_READ_STATS cpu_to_le16(0x0007) -#define TYPHOON_CMD_XCVR_SELECT cpu_to_le16(0x0013) -#define TYPHOON_CMD_SET_MAX_PKT_SIZE cpu_to_le16(0x001a) -#define TYPHOON_CMD_READ_MEDIA_STATUS cpu_to_le16(0x001b) -#define TYPHOON_CMD_GOTO_SLEEP cpu_to_le16(0x0023) -#define TYPHOON_CMD_SET_MULTICAST_HASH cpu_to_le16(0x0025) -#define TYPHOON_CMD_SET_MAC_ADDRESS cpu_to_le16(0x0026) -#define TYPHOON_CMD_READ_MAC_ADDRESS cpu_to_le16(0x0027) -#define TYPHOON_CMD_VLAN_TYPE_WRITE cpu_to_le16(0x002b) -#define TYPHOON_CMD_CREATE_SA cpu_to_le16(0x0034) -#define TYPHOON_CMD_DELETE_SA cpu_to_le16(0x0035) -#define TYPHOON_CMD_READ_VERSIONS cpu_to_le16(0x0043) -#define TYPHOON_CMD_IRQ_COALESCE_CTRL cpu_to_le16(0x0045) -#define TYPHOON_CMD_ENABLE_WAKE_EVENTS cpu_to_le16(0x0049) -#define TYPHOON_CMD_SET_OFFLOAD_TASKS cpu_to_le16(0x004f) -#define TYPHOON_CMD_HELLO_RESP cpu_to_le16(0x0057) -#define TYPHOON_CMD_HALT cpu_to_le16(0x005d) -#define TYPHOON_CMD_READ_IPSEC_INFO cpu_to_le16(0x005e) -#define TYPHOON_CMD_GET_IPSEC_ENABLE cpu_to_le16(0x0067) -#define TYPHOON_CMD_GET_CMD_LVL cpu_to_le16(0x0069) - u16 seqNo; - __le16 parm1; - __le32 parm2; - __le32 parm3; -} __packed; - -/* The Typhoon response descriptor, see command descriptor for details - */ -struct resp_desc { - u8 flags; - u8 numDesc; - __le16 cmd; - __le16 seqNo; - __le16 parm1; - __le32 parm2; - __le32 parm3; -} __packed; - -#define INIT_COMMAND_NO_RESPONSE(x, command) \ - do { struct cmd_desc *_ptr = (x); \ - memset(_ptr, 0, sizeof(struct cmd_desc)); \ - _ptr->flags = TYPHOON_CMD_DESC | TYPHOON_DESC_VALID; \ - _ptr->cmd = command; \ - } while(0) - -/* We set seqNo to 1 if we're expecting a response from this command */ -#define INIT_COMMAND_WITH_RESPONSE(x, command) \ - do { struct cmd_desc *_ptr = (x); \ - memset(_ptr, 0, sizeof(struct cmd_desc)); \ - _ptr->flags = TYPHOON_CMD_RESPOND | TYPHOON_CMD_DESC; \ - _ptr->flags |= TYPHOON_DESC_VALID; \ - _ptr->cmd = command; \ - _ptr->seqNo = 1; \ - } while(0) - -/* TYPHOON_CMD_SET_RX_FILTER filter bits (cmd.parm1) - */ -#define TYPHOON_RX_FILTER_DIRECTED cpu_to_le16(0x0001) -#define TYPHOON_RX_FILTER_ALL_MCAST cpu_to_le16(0x0002) -#define TYPHOON_RX_FILTER_BROADCAST cpu_to_le16(0x0004) -#define TYPHOON_RX_FILTER_PROMISCOUS cpu_to_le16(0x0008) -#define TYPHOON_RX_FILTER_MCAST_HASH cpu_to_le16(0x0010) - -/* TYPHOON_CMD_READ_STATS response format - */ -struct stats_resp { - u8 flags; - u8 numDesc; - __le16 cmd; - __le16 seqNo; - __le16 unused; - __le32 txPackets; - __le64 txBytes; - __le32 txDeferred; - __le32 txLateCollisions; - __le32 txCollisions; - __le32 txCarrierLost; - __le32 txMultipleCollisions; - __le32 txExcessiveCollisions; - __le32 txFifoUnderruns; - __le32 txMulticastTxOverflows; - __le32 txFiltered; - __le32 rxPacketsGood; - __le64 rxBytesGood; - __le32 rxFifoOverruns; - __le32 BadSSD; - __le32 rxCrcErrors; - __le32 rxOversized; - __le32 rxBroadcast; - __le32 rxMulticast; - __le32 rxOverflow; - __le32 rxFiltered; - __le32 linkStatus; -#define TYPHOON_LINK_STAT_MASK cpu_to_le32(0x00000001) -#define TYPHOON_LINK_GOOD cpu_to_le32(0x00000001) -#define TYPHOON_LINK_BAD cpu_to_le32(0x00000000) -#define TYPHOON_LINK_SPEED_MASK cpu_to_le32(0x00000002) -#define TYPHOON_LINK_100MBPS cpu_to_le32(0x00000002) -#define TYPHOON_LINK_10MBPS cpu_to_le32(0x00000000) -#define TYPHOON_LINK_DUPLEX_MASK cpu_to_le32(0x00000004) -#define TYPHOON_LINK_FULL_DUPLEX cpu_to_le32(0x00000004) -#define TYPHOON_LINK_HALF_DUPLEX cpu_to_le32(0x00000000) - __le32 unused2; - __le32 unused3; -} __packed; - -/* TYPHOON_CMD_XCVR_SELECT xcvr values (resp.parm1) - */ -#define TYPHOON_XCVR_10HALF cpu_to_le16(0x0000) -#define TYPHOON_XCVR_10FULL cpu_to_le16(0x0001) -#define TYPHOON_XCVR_100HALF cpu_to_le16(0x0002) -#define TYPHOON_XCVR_100FULL cpu_to_le16(0x0003) -#define TYPHOON_XCVR_AUTONEG cpu_to_le16(0x0004) - -/* TYPHOON_CMD_READ_MEDIA_STATUS (resp.parm1) - */ -#define TYPHOON_MEDIA_STAT_CRC_STRIP_DISABLE cpu_to_le16(0x0004) -#define TYPHOON_MEDIA_STAT_COLLISION_DETECT cpu_to_le16(0x0010) -#define TYPHOON_MEDIA_STAT_CARRIER_SENSE cpu_to_le16(0x0020) -#define TYPHOON_MEDIA_STAT_POLARITY_REV cpu_to_le16(0x0400) -#define TYPHOON_MEDIA_STAT_NO_LINK cpu_to_le16(0x0800) - -/* TYPHOON_CMD_SET_MULTICAST_HASH enable values (cmd.parm1) - */ -#define TYPHOON_MCAST_HASH_DISABLE cpu_to_le16(0x0000) -#define TYPHOON_MCAST_HASH_ENABLE cpu_to_le16(0x0001) -#define TYPHOON_MCAST_HASH_SET cpu_to_le16(0x0002) - -/* TYPHOON_CMD_CREATE_SA descriptor and settings - */ -struct sa_descriptor { - u8 flags; - u8 numDesc; - u16 cmd; - u16 seqNo; - u16 mode; -#define TYPHOON_SA_MODE_NULL cpu_to_le16(0x0000) -#define TYPHOON_SA_MODE_AH cpu_to_le16(0x0001) -#define TYPHOON_SA_MODE_ESP cpu_to_le16(0x0002) - u8 hashFlags; -#define TYPHOON_SA_HASH_ENABLE 0x01 -#define TYPHOON_SA_HASH_SHA1 0x02 -#define TYPHOON_SA_HASH_MD5 0x04 - u8 direction; -#define TYPHOON_SA_DIR_RX 0x00 -#define TYPHOON_SA_DIR_TX 0x01 - u8 encryptionFlags; -#define TYPHOON_SA_ENCRYPT_ENABLE 0x01 -#define TYPHOON_SA_ENCRYPT_DES 0x02 -#define TYPHOON_SA_ENCRYPT_3DES 0x00 -#define TYPHOON_SA_ENCRYPT_3DES_2KEY 0x00 -#define TYPHOON_SA_ENCRYPT_3DES_3KEY 0x04 -#define TYPHOON_SA_ENCRYPT_CBC 0x08 -#define TYPHOON_SA_ENCRYPT_ECB 0x00 - u8 specifyIndex; -#define TYPHOON_SA_SPECIFY_INDEX 0x01 -#define TYPHOON_SA_GENERATE_INDEX 0x00 - u32 SPI; - u32 destAddr; - u32 destMask; - u8 integKey[20]; - u8 confKey[24]; - u32 index; - u32 unused; - u32 unused2; -} __packed; - -/* TYPHOON_CMD_SET_OFFLOAD_TASKS bits (cmd.parm2 (Tx) & cmd.parm3 (Rx)) - * This is all for IPv4. - */ -#define TYPHOON_OFFLOAD_TCP_CHKSUM cpu_to_le32(0x00000002) -#define TYPHOON_OFFLOAD_UDP_CHKSUM cpu_to_le32(0x00000004) -#define TYPHOON_OFFLOAD_IP_CHKSUM cpu_to_le32(0x00000008) -#define TYPHOON_OFFLOAD_IPSEC cpu_to_le32(0x00000010) -#define TYPHOON_OFFLOAD_BCAST_THROTTLE cpu_to_le32(0x00000020) -#define TYPHOON_OFFLOAD_DHCP_PREVENT cpu_to_le32(0x00000040) -#define TYPHOON_OFFLOAD_VLAN cpu_to_le32(0x00000080) -#define TYPHOON_OFFLOAD_FILTERING cpu_to_le32(0x00000100) -#define TYPHOON_OFFLOAD_TCP_SEGMENT cpu_to_le32(0x00000200) - -/* TYPHOON_CMD_ENABLE_WAKE_EVENTS bits (cmd.parm1) - */ -#define TYPHOON_WAKE_MAGIC_PKT cpu_to_le16(0x01) -#define TYPHOON_WAKE_LINK_EVENT cpu_to_le16(0x02) -#define TYPHOON_WAKE_ICMP_ECHO cpu_to_le16(0x04) -#define TYPHOON_WAKE_ARP cpu_to_le16(0x08) - -/* These are used to load the firmware image on the NIC - */ -struct typhoon_file_header { - u8 tag[8]; - __le32 version; - __le32 numSections; - __le32 startAddr; - __le32 hmacDigest[5]; -} __packed; - -struct typhoon_section_header { - __le32 len; - u16 checksum; - u16 reserved; - __le32 startAddr; -} __packed; - -/* The Typhoon Register offsets - */ -#define TYPHOON_REG_SOFT_RESET 0x00 -#define TYPHOON_REG_INTR_STATUS 0x04 -#define TYPHOON_REG_INTR_ENABLE 0x08 -#define TYPHOON_REG_INTR_MASK 0x0c -#define TYPHOON_REG_SELF_INTERRUPT 0x10 -#define TYPHOON_REG_HOST2ARM7 0x14 -#define TYPHOON_REG_HOST2ARM6 0x18 -#define TYPHOON_REG_HOST2ARM5 0x1c -#define TYPHOON_REG_HOST2ARM4 0x20 -#define TYPHOON_REG_HOST2ARM3 0x24 -#define TYPHOON_REG_HOST2ARM2 0x28 -#define TYPHOON_REG_HOST2ARM1 0x2c -#define TYPHOON_REG_HOST2ARM0 0x30 -#define TYPHOON_REG_ARM2HOST3 0x34 -#define TYPHOON_REG_ARM2HOST2 0x38 -#define TYPHOON_REG_ARM2HOST1 0x3c -#define TYPHOON_REG_ARM2HOST0 0x40 - -#define TYPHOON_REG_BOOT_DATA_LO TYPHOON_REG_HOST2ARM5 -#define TYPHOON_REG_BOOT_DATA_HI TYPHOON_REG_HOST2ARM4 -#define TYPHOON_REG_BOOT_DEST_ADDR TYPHOON_REG_HOST2ARM3 -#define TYPHOON_REG_BOOT_CHECKSUM TYPHOON_REG_HOST2ARM2 -#define TYPHOON_REG_BOOT_LENGTH TYPHOON_REG_HOST2ARM1 - -#define TYPHOON_REG_DOWNLOAD_BOOT_ADDR TYPHOON_REG_HOST2ARM1 -#define TYPHOON_REG_DOWNLOAD_HMAC_0 TYPHOON_REG_HOST2ARM2 -#define TYPHOON_REG_DOWNLOAD_HMAC_1 TYPHOON_REG_HOST2ARM3 -#define TYPHOON_REG_DOWNLOAD_HMAC_2 TYPHOON_REG_HOST2ARM4 -#define TYPHOON_REG_DOWNLOAD_HMAC_3 TYPHOON_REG_HOST2ARM5 -#define TYPHOON_REG_DOWNLOAD_HMAC_4 TYPHOON_REG_HOST2ARM6 - -#define TYPHOON_REG_BOOT_RECORD_ADDR_HI TYPHOON_REG_HOST2ARM2 -#define TYPHOON_REG_BOOT_RECORD_ADDR_LO TYPHOON_REG_HOST2ARM1 - -#define TYPHOON_REG_TX_LO_READY TYPHOON_REG_HOST2ARM3 -#define TYPHOON_REG_CMD_READY TYPHOON_REG_HOST2ARM2 -#define TYPHOON_REG_TX_HI_READY TYPHOON_REG_HOST2ARM1 - -#define TYPHOON_REG_COMMAND TYPHOON_REG_HOST2ARM0 -#define TYPHOON_REG_HEARTBEAT TYPHOON_REG_ARM2HOST3 -#define TYPHOON_REG_STATUS TYPHOON_REG_ARM2HOST0 - -/* 3XP Reset values (TYPHOON_REG_SOFT_RESET) - */ -#define TYPHOON_RESET_ALL 0x7f -#define TYPHOON_RESET_NONE 0x00 - -/* 3XP irq bits (TYPHOON_REG_INTR{STATUS,ENABLE,MASK}) - * - * Some of these came from OpenBSD, as the 3Com docs have it wrong - * (INTR_SELF) or don't list it at all (INTR_*_ABORT) - * - * Enabling irqs on the Heartbeat reg (ArmToHost3) gets you an irq - * about every 8ms, so don't do it. - */ -#define TYPHOON_INTR_HOST_INT 0x00000001 -#define TYPHOON_INTR_ARM2HOST0 0x00000002 -#define TYPHOON_INTR_ARM2HOST1 0x00000004 -#define TYPHOON_INTR_ARM2HOST2 0x00000008 -#define TYPHOON_INTR_ARM2HOST3 0x00000010 -#define TYPHOON_INTR_DMA0 0x00000020 -#define TYPHOON_INTR_DMA1 0x00000040 -#define TYPHOON_INTR_DMA2 0x00000080 -#define TYPHOON_INTR_DMA3 0x00000100 -#define TYPHOON_INTR_MASTER_ABORT 0x00000200 -#define TYPHOON_INTR_TARGET_ABORT 0x00000400 -#define TYPHOON_INTR_SELF 0x00000800 -#define TYPHOON_INTR_RESERVED 0xfffff000 - -#define TYPHOON_INTR_BOOTCMD TYPHOON_INTR_ARM2HOST0 - -#define TYPHOON_INTR_ENABLE_ALL 0xffffffef -#define TYPHOON_INTR_ALL 0xffffffff -#define TYPHOON_INTR_NONE 0x00000000 - -/* The commands for the 3XP chip (TYPHOON_REG_COMMAND) - */ -#define TYPHOON_BOOTCMD_BOOT 0x00 -#define TYPHOON_BOOTCMD_WAKEUP 0xfa -#define TYPHOON_BOOTCMD_DNLD_COMPLETE 0xfb -#define TYPHOON_BOOTCMD_SEG_AVAILABLE 0xfc -#define TYPHOON_BOOTCMD_RUNTIME_IMAGE 0xfd -#define TYPHOON_BOOTCMD_REG_BOOT_RECORD 0xff - -/* 3XP Status values (TYPHOON_REG_STATUS) - */ -#define TYPHOON_STATUS_WAITING_FOR_BOOT 0x07 -#define TYPHOON_STATUS_SECOND_INIT 0x08 -#define TYPHOON_STATUS_RUNNING 0x09 -#define TYPHOON_STATUS_WAITING_FOR_HOST 0x0d -#define TYPHOON_STATUS_WAITING_FOR_SEGMENT 0x10 -#define TYPHOON_STATUS_SLEEPING 0x11 -#define TYPHOON_STATUS_HALTED 0x14 -- cgit v0.10.2 From b955f6ca776f3bab3d1e2c5fb1d247b203cbda14 Mon Sep 17 00:00:00 2001 From: Jeff Kirsher Date: Wed, 30 Mar 2011 07:46:36 -0700 Subject: amd: Move AMD (Lance) chipset drivers Moves the drivers for the AMD chipsets into drivers/net/ethernet/amd/ and the necessary Kconfig and Makfile changes. The au1000 (Alchemy) driver was also moved into the same directory even though it is not a "Lance" driver. CC: Peter Maydell CC: Roman Hodek CC: "Maciej W. Rozycki" CC: Donald Becker CC: Sam Creasey CC: Miguel de Icaza CC: Thomas Bogendoerfer CC: Don Fry CC: Geert Uytterhoeven CC: Russell King CC: David Davies CC: "M.Hipp" CC: Pete Popov CC: David Hinds CC: "Roger C. Pao" Signed-off-by: Jeff Kirsher diff --git a/MAINTAINERS b/MAINTAINERS index 112e226..6f9dc94 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -746,7 +746,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) W: http://www.arm.linux.org.uk/ S: Maintained F: arch/arm/mach-ebsa110/ -F: drivers/net/arm/am79c961a.* +F: drivers/net/ethernet/amd/am79c961a.* ARM/EZX SMARTPHONES (A780, A910, A1200, E680, ROKR E2 and ROKR E6) M: Daniel Ribeiro @@ -4941,7 +4941,7 @@ PCNET32 NETWORK DRIVER M: Don Fry L: netdev@vger.kernel.org S: Maintained -F: drivers/net/pcnet32.c +F: drivers/net/ethernet/amd/pcnet32.c PCRYPT PARALLEL CRYPTO ENGINE M: Steffen Klassert diff --git a/drivers/net/7990.c b/drivers/net/7990.c deleted file mode 100644 index 60b35fb..0000000 --- a/drivers/net/7990.c +++ /dev/null @@ -1,662 +0,0 @@ -/* - * 7990.c -- LANCE ethernet IC generic routines. - * This is an attempt to separate out the bits of various ethernet - * drivers that are common because they all use the AMD 7990 LANCE - * (Local Area Network Controller for Ethernet) chip. - * - * Copyright (C) 05/1998 Peter Maydell - * - * Most of this stuff was obtained by looking at other LANCE drivers, - * in particular a2065.[ch]. The AMD C-LANCE datasheet was also helpful. - * NB: this was made easy by the fact that Jes Sorensen had cleaned up - * most of a2025 and sunlance with the aim of merging them, so the - * common code was pretty obvious. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -/* Used for the temporal inet entries and routing */ -#include -#include - -#include -#include -#include -#include -#ifdef CONFIG_HP300 -#include -#endif - -#include "7990.h" - -#define WRITERAP(lp,x) out_be16(lp->base + LANCE_RAP, (x)) -#define WRITERDP(lp,x) out_be16(lp->base + LANCE_RDP, (x)) -#define READRDP(lp) in_be16(lp->base + LANCE_RDP) - -#if defined(CONFIG_HPLANCE) || defined(CONFIG_HPLANCE_MODULE) -#include "hplance.h" - -#undef WRITERAP -#undef WRITERDP -#undef READRDP - -#if defined(CONFIG_MVME147_NET) || defined(CONFIG_MVME147_NET_MODULE) - -/* Lossage Factor Nine, Mr Sulu. */ -#define WRITERAP(lp,x) (lp->writerap(lp,x)) -#define WRITERDP(lp,x) (lp->writerdp(lp,x)) -#define READRDP(lp) (lp->readrdp(lp)) - -#else - -/* These inlines can be used if only CONFIG_HPLANCE is defined */ -static inline void WRITERAP(struct lance_private *lp, __u16 value) -{ - do { - out_be16(lp->base + HPLANCE_REGOFF + LANCE_RAP, value); - } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); -} - -static inline void WRITERDP(struct lance_private *lp, __u16 value) -{ - do { - out_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP, value); - } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); -} - -static inline __u16 READRDP(struct lance_private *lp) -{ - __u16 value; - do { - value = in_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP); - } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); - return value; -} - -#endif -#endif /* CONFIG_HPLANCE || CONFIG_HPLANCE_MODULE */ - -/* debugging output macros, various flavours */ -/* #define TEST_HITS */ -#ifdef UNDEF -#define PRINT_RINGS() \ -do { \ - int t; \ - for (t=0; t < RX_RING_SIZE; t++) { \ - printk("R%d: @(%02X %04X) len %04X, mblen %04X, bits %02X\n",\ - t, ib->brx_ring[t].rmd1_hadr, ib->brx_ring[t].rmd0,\ - ib->brx_ring[t].length,\ - ib->brx_ring[t].mblength, ib->brx_ring[t].rmd1_bits);\ - }\ - for (t=0; t < TX_RING_SIZE; t++) { \ - printk("T%d: @(%02X %04X) len %04X, misc %04X, bits %02X\n",\ - t, ib->btx_ring[t].tmd1_hadr, ib->btx_ring[t].tmd0,\ - ib->btx_ring[t].length,\ - ib->btx_ring[t].misc, ib->btx_ring[t].tmd1_bits);\ - }\ -} while (0) -#else -#define PRINT_RINGS() -#endif - -/* Load the CSR registers. The LANCE has to be STOPped when we do this! */ -static void load_csrs (struct lance_private *lp) -{ - volatile struct lance_init_block *aib = lp->lance_init_block; - int leptr; - - leptr = LANCE_ADDR (aib); - - WRITERAP(lp, LE_CSR1); /* load address of init block */ - WRITERDP(lp, leptr & 0xFFFF); - WRITERAP(lp, LE_CSR2); - WRITERDP(lp, leptr >> 16); - WRITERAP(lp, LE_CSR3); - WRITERDP(lp, lp->busmaster_regval); /* set byteswap/ALEctrl/byte ctrl */ - - /* Point back to csr0 */ - WRITERAP(lp, LE_CSR0); -} - -/* #define to 0 or 1 appropriately */ -#define DEBUG_IRING 0 -/* Set up the Lance Rx and Tx rings and the init block */ -static void lance_init_ring (struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - volatile struct lance_init_block *ib = lp->init_block; - volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */ - int leptr; - int i; - - aib = lp->lance_init_block; - - lp->rx_new = lp->tx_new = 0; - lp->rx_old = lp->tx_old = 0; - - ib->mode = LE_MO_PROM; /* normal, enable Tx & Rx */ - - /* Copy the ethernet address to the lance init block - * Notice that we do a byteswap if we're big endian. - * [I think this is the right criterion; at least, sunlance, - * a2065 and atarilance do the byteswap and lance.c (PC) doesn't. - * However, the datasheet says that the BSWAP bit doesn't affect - * the init block, so surely it should be low byte first for - * everybody? Um.] - * We could define the ib->physaddr as three 16bit values and - * use (addr[1] << 8) | addr[0] & co, but this is more efficient. - */ -#ifdef __BIG_ENDIAN - ib->phys_addr [0] = dev->dev_addr [1]; - ib->phys_addr [1] = dev->dev_addr [0]; - ib->phys_addr [2] = dev->dev_addr [3]; - ib->phys_addr [3] = dev->dev_addr [2]; - ib->phys_addr [4] = dev->dev_addr [5]; - ib->phys_addr [5] = dev->dev_addr [4]; -#else - for (i=0; i<6; i++) - ib->phys_addr[i] = dev->dev_addr[i]; -#endif - - if (DEBUG_IRING) - printk ("TX rings:\n"); - - lp->tx_full = 0; - /* Setup the Tx ring entries */ - for (i = 0; i < (1<lance_log_tx_bufs); i++) { - leptr = LANCE_ADDR(&aib->tx_buf[i][0]); - ib->btx_ring [i].tmd0 = leptr; - ib->btx_ring [i].tmd1_hadr = leptr >> 16; - ib->btx_ring [i].tmd1_bits = 0; - ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */ - ib->btx_ring [i].misc = 0; - if (DEBUG_IRING) - printk ("%d: 0x%8.8x\n", i, leptr); - } - - /* Setup the Rx ring entries */ - if (DEBUG_IRING) - printk ("RX rings:\n"); - for (i = 0; i < (1<lance_log_rx_bufs); i++) { - leptr = LANCE_ADDR(&aib->rx_buf[i][0]); - - ib->brx_ring [i].rmd0 = leptr; - ib->brx_ring [i].rmd1_hadr = leptr >> 16; - ib->brx_ring [i].rmd1_bits = LE_R1_OWN; - /* 0xf000 == bits that must be one (reserved, presumably) */ - ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000; - ib->brx_ring [i].mblength = 0; - if (DEBUG_IRING) - printk ("%d: 0x%8.8x\n", i, leptr); - } - - /* Setup the initialization block */ - - /* Setup rx descriptor pointer */ - leptr = LANCE_ADDR(&aib->brx_ring); - ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16); - ib->rx_ptr = leptr; - if (DEBUG_IRING) - printk ("RX ptr: %8.8x\n", leptr); - - /* Setup tx descriptor pointer */ - leptr = LANCE_ADDR(&aib->btx_ring); - ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16); - ib->tx_ptr = leptr; - if (DEBUG_IRING) - printk ("TX ptr: %8.8x\n", leptr); - - /* Clear the multicast filter */ - ib->filter [0] = 0; - ib->filter [1] = 0; - PRINT_RINGS(); -} - -/* LANCE must be STOPped before we do this, too... */ -static int init_restart_lance (struct lance_private *lp) -{ - int i; - - WRITERAP(lp, LE_CSR0); - WRITERDP(lp, LE_C0_INIT); - - /* Need a hook here for sunlance ledma stuff */ - - /* Wait for the lance to complete initialization */ - for (i = 0; (i < 100) && !(READRDP(lp) & (LE_C0_ERR | LE_C0_IDON)); i++) - barrier(); - if ((i == 100) || (READRDP(lp) & LE_C0_ERR)) { - printk ("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, READRDP(lp)); - return -1; - } - - /* Clear IDON by writing a "1", enable interrupts and start lance */ - WRITERDP(lp, LE_C0_IDON); - WRITERDP(lp, LE_C0_INEA | LE_C0_STRT); - - return 0; -} - -static int lance_reset (struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - int status; - - /* Stop the lance */ - WRITERAP(lp, LE_CSR0); - WRITERDP(lp, LE_C0_STOP); - - load_csrs (lp); - lance_init_ring (dev); - dev->trans_start = jiffies; /* prevent tx timeout */ - status = init_restart_lance (lp); -#ifdef DEBUG_DRIVER - printk ("Lance restart=%d\n", status); -#endif - return status; -} - -static int lance_rx (struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - volatile struct lance_init_block *ib = lp->init_block; - volatile struct lance_rx_desc *rd; - unsigned char bits; -#ifdef TEST_HITS - int i; -#endif - -#ifdef TEST_HITS - printk ("["); - for (i = 0; i < RX_RING_SIZE; i++) { - if (i == lp->rx_new) - printk ("%s", - ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "_" : "X"); - else - printk ("%s", - ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "." : "1"); - } - printk ("]"); -#endif -#ifdef CONFIG_HP300 - blinken_leds(0x40, 0); -#endif - WRITERDP(lp, LE_C0_RINT | LE_C0_INEA); /* ack Rx int, reenable ints */ - for (rd = &ib->brx_ring [lp->rx_new]; /* For each Rx ring we own... */ - !((bits = rd->rmd1_bits) & LE_R1_OWN); - rd = &ib->brx_ring [lp->rx_new]) { - - /* We got an incomplete frame? */ - if ((bits & LE_R1_POK) != LE_R1_POK) { - dev->stats.rx_over_errors++; - dev->stats.rx_errors++; - continue; - } else if (bits & LE_R1_ERR) { - /* Count only the end frame as a rx error, - * not the beginning - */ - if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++; - if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++; - if (bits & LE_R1_OFL) dev->stats.rx_over_errors++; - if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++; - if (bits & LE_R1_EOP) dev->stats.rx_errors++; - } else { - int len = (rd->mblength & 0xfff) - 4; - struct sk_buff *skb = dev_alloc_skb (len+2); - - if (!skb) { - printk ("%s: Memory squeeze, deferring packet.\n", - dev->name); - dev->stats.rx_dropped++; - rd->mblength = 0; - rd->rmd1_bits = LE_R1_OWN; - lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; - return 0; - } - - skb_reserve (skb, 2); /* 16 byte align */ - skb_put (skb, len); /* make room */ - skb_copy_to_linear_data(skb, - (unsigned char *)&(ib->rx_buf [lp->rx_new][0]), - len); - skb->protocol = eth_type_trans (skb, dev); - netif_rx (skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += len; - } - - /* Return the packet to the pool */ - rd->mblength = 0; - rd->rmd1_bits = LE_R1_OWN; - lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; - } - return 0; -} - -static int lance_tx (struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - volatile struct lance_init_block *ib = lp->init_block; - volatile struct lance_tx_desc *td; - int i, j; - int status; - -#ifdef CONFIG_HP300 - blinken_leds(0x80, 0); -#endif - /* csr0 is 2f3 */ - WRITERDP(lp, LE_C0_TINT | LE_C0_INEA); - /* csr0 is 73 */ - - j = lp->tx_old; - for (i = j; i != lp->tx_new; i = j) { - td = &ib->btx_ring [i]; - - /* If we hit a packet not owned by us, stop */ - if (td->tmd1_bits & LE_T1_OWN) - break; - - if (td->tmd1_bits & LE_T1_ERR) { - status = td->misc; - - dev->stats.tx_errors++; - if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++; - if (status & LE_T3_LCOL) dev->stats.tx_window_errors++; - - if (status & LE_T3_CLOS) { - dev->stats.tx_carrier_errors++; - if (lp->auto_select) { - lp->tpe = 1 - lp->tpe; - printk("%s: Carrier Lost, trying %s\n", - dev->name, lp->tpe?"TPE":"AUI"); - /* Stop the lance */ - WRITERAP(lp, LE_CSR0); - WRITERDP(lp, LE_C0_STOP); - lance_init_ring (dev); - load_csrs (lp); - init_restart_lance (lp); - return 0; - } - } - - /* buffer errors and underflows turn off the transmitter */ - /* Restart the adapter */ - if (status & (LE_T3_BUF|LE_T3_UFL)) { - dev->stats.tx_fifo_errors++; - - printk ("%s: Tx: ERR_BUF|ERR_UFL, restarting\n", - dev->name); - /* Stop the lance */ - WRITERAP(lp, LE_CSR0); - WRITERDP(lp, LE_C0_STOP); - lance_init_ring (dev); - load_csrs (lp); - init_restart_lance (lp); - return 0; - } - } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) { - /* - * So we don't count the packet more than once. - */ - td->tmd1_bits &= ~(LE_T1_POK); - - /* One collision before packet was sent. */ - if (td->tmd1_bits & LE_T1_EONE) - dev->stats.collisions++; - - /* More than one collision, be optimistic. */ - if (td->tmd1_bits & LE_T1_EMORE) - dev->stats.collisions += 2; - - dev->stats.tx_packets++; - } - - j = (j + 1) & lp->tx_ring_mod_mask; - } - lp->tx_old = j; - WRITERDP(lp, LE_C0_TINT | LE_C0_INEA); - return 0; -} - -static irqreturn_t -lance_interrupt (int irq, void *dev_id) -{ - struct net_device *dev = (struct net_device *)dev_id; - struct lance_private *lp = netdev_priv(dev); - int csr0; - - spin_lock (&lp->devlock); - - WRITERAP(lp, LE_CSR0); /* LANCE Controller Status */ - csr0 = READRDP(lp); - - PRINT_RINGS(); - - if (!(csr0 & LE_C0_INTR)) { /* Check if any interrupt has */ - spin_unlock (&lp->devlock); - return IRQ_NONE; /* been generated by the Lance. */ - } - - /* Acknowledge all the interrupt sources ASAP */ - WRITERDP(lp, csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|LE_C0_INIT)); - - if ((csr0 & LE_C0_ERR)) { - /* Clear the error condition */ - WRITERDP(lp, LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA); - } - - if (csr0 & LE_C0_RINT) - lance_rx (dev); - - if (csr0 & LE_C0_TINT) - lance_tx (dev); - - /* Log misc errors. */ - if (csr0 & LE_C0_BABL) - dev->stats.tx_errors++; /* Tx babble. */ - if (csr0 & LE_C0_MISS) - dev->stats.rx_errors++; /* Missed a Rx frame. */ - if (csr0 & LE_C0_MERR) { - printk("%s: Bus master arbitration failure, status %4.4x.\n", - dev->name, csr0); - /* Restart the chip. */ - WRITERDP(lp, LE_C0_STRT); - } - - if (lp->tx_full && netif_queue_stopped(dev) && (TX_BUFFS_AVAIL >= 0)) { - lp->tx_full = 0; - netif_wake_queue (dev); - } - - WRITERAP(lp, LE_CSR0); - WRITERDP(lp, LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|LE_C0_IDON|LE_C0_INEA); - - spin_unlock (&lp->devlock); - return IRQ_HANDLED; -} - -int lance_open (struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - int res; - - /* Install the Interrupt handler. Or we could shunt this out to specific drivers? */ - if (request_irq(lp->irq, lance_interrupt, IRQF_SHARED, lp->name, dev)) - return -EAGAIN; - - res = lance_reset(dev); - spin_lock_init(&lp->devlock); - netif_start_queue (dev); - - return res; -} -EXPORT_SYMBOL_GPL(lance_open); - -int lance_close (struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - - netif_stop_queue (dev); - - /* Stop the LANCE */ - WRITERAP(lp, LE_CSR0); - WRITERDP(lp, LE_C0_STOP); - - free_irq(lp->irq, dev); - - return 0; -} -EXPORT_SYMBOL_GPL(lance_close); - -void lance_tx_timeout(struct net_device *dev) -{ - printk("lance_tx_timeout\n"); - lance_reset(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ - netif_wake_queue (dev); -} -EXPORT_SYMBOL_GPL(lance_tx_timeout); - -int lance_start_xmit (struct sk_buff *skb, struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - volatile struct lance_init_block *ib = lp->init_block; - int entry, skblen, len; - static int outs; - unsigned long flags; - - if (!TX_BUFFS_AVAIL) - return NETDEV_TX_LOCKED; - - netif_stop_queue (dev); - - skblen = skb->len; - -#ifdef DEBUG_DRIVER - /* dump the packet */ - { - int i; - - for (i = 0; i < 64; i++) { - if ((i % 16) == 0) - printk ("\n"); - printk ("%2.2x ", skb->data [i]); - } - } -#endif - len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen; - entry = lp->tx_new & lp->tx_ring_mod_mask; - ib->btx_ring [entry].length = (-len) | 0xf000; - ib->btx_ring [entry].misc = 0; - - if (skb->len < ETH_ZLEN) - memset((void *)&ib->tx_buf[entry][0], 0, ETH_ZLEN); - skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen); - - /* Now, give the packet to the lance */ - ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN); - lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask; - - outs++; - /* Kick the lance: transmit now */ - WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD); - dev_kfree_skb (skb); - - spin_lock_irqsave (&lp->devlock, flags); - if (TX_BUFFS_AVAIL) - netif_start_queue (dev); - else - lp->tx_full = 1; - spin_unlock_irqrestore (&lp->devlock, flags); - - return NETDEV_TX_OK; -} -EXPORT_SYMBOL_GPL(lance_start_xmit); - -/* taken from the depca driver via a2065.c */ -static void lance_load_multicast (struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - volatile struct lance_init_block *ib = lp->init_block; - volatile u16 *mcast_table = (u16 *)&ib->filter; - struct netdev_hw_addr *ha; - u32 crc; - - /* set all multicast bits */ - if (dev->flags & IFF_ALLMULTI){ - ib->filter [0] = 0xffffffff; - ib->filter [1] = 0xffffffff; - return; - } - /* clear the multicast filter */ - ib->filter [0] = 0; - ib->filter [1] = 0; - - /* Add addresses */ - netdev_for_each_mc_addr(ha, dev) { - crc = ether_crc_le(6, ha->addr); - crc = crc >> 26; - mcast_table [crc >> 4] |= 1 << (crc & 0xf); - } -} - - -void lance_set_multicast (struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - volatile struct lance_init_block *ib = lp->init_block; - int stopped; - - stopped = netif_queue_stopped(dev); - if (!stopped) - netif_stop_queue (dev); - - while (lp->tx_old != lp->tx_new) - schedule(); - - WRITERAP(lp, LE_CSR0); - WRITERDP(lp, LE_C0_STOP); - lance_init_ring (dev); - - if (dev->flags & IFF_PROMISC) { - ib->mode |= LE_MO_PROM; - } else { - ib->mode &= ~LE_MO_PROM; - lance_load_multicast (dev); - } - load_csrs (lp); - init_restart_lance (lp); - - if (!stopped) - netif_start_queue (dev); -} -EXPORT_SYMBOL_GPL(lance_set_multicast); - -#ifdef CONFIG_NET_POLL_CONTROLLER -void lance_poll(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - - spin_lock (&lp->devlock); - WRITERAP(lp, LE_CSR0); - WRITERDP(lp, LE_C0_STRT); - spin_unlock (&lp->devlock); - lance_interrupt(dev->irq, dev); -} -#endif - -MODULE_LICENSE("GPL"); diff --git a/drivers/net/7990.h b/drivers/net/7990.h deleted file mode 100644 index 0a5837b..0000000 --- a/drivers/net/7990.h +++ /dev/null @@ -1,254 +0,0 @@ -/* - * 7990.h -- LANCE ethernet IC generic routines. - * This is an attempt to separate out the bits of various ethernet - * drivers that are common because they all use the AMD 7990 LANCE - * (Local Area Network Controller for Ethernet) chip. - * - * Copyright (C) 05/1998 Peter Maydell - * - * Most of this stuff was obtained by looking at other LANCE drivers, - * in particular a2065.[ch]. The AMD C-LANCE datasheet was also helpful. - */ - -#ifndef _7990_H -#define _7990_H - -/* The lance only has two register locations. We communicate mostly via memory. */ -#define LANCE_RDP 0 /* Register Data Port */ -#define LANCE_RAP 2 /* Register Address Port */ - -/* Transmit/receive ring definitions. - * We allow the specific drivers to override these defaults if they want to. - * NB: according to lance.c, increasing the number of buffers is a waste - * of space and reduces the chance that an upper layer will be able to - * reorder queued Tx packets based on priority. [Clearly there is a minimum - * limit too: too small and we drop rx packets and can't tx at full speed.] - * 4+4 seems to be the usual setting; the atarilance driver uses 3 and 5. - */ - -/* Blast! This won't work. The problem is that we can't specify a default - * setting because that would cause the lance_init_block struct to be - * too long (and overflow the RAM on shared-memory cards like the HP LANCE. - */ -#ifndef LANCE_LOG_TX_BUFFERS -#define LANCE_LOG_TX_BUFFERS 1 -#define LANCE_LOG_RX_BUFFERS 3 -#endif - -#define TX_RING_SIZE (1<tx_old<=lp->tx_new)?\ - lp->tx_old+lp->tx_ring_mod_mask-lp->tx_new:\ - lp->tx_old - lp->tx_new-1) - -/* The LANCE only uses 24 bit addresses. This does the obvious thing. */ -#define LANCE_ADDR(x) ((int)(x) & ~0xff000000) - -/* Now the prototypes we export */ -extern int lance_open(struct net_device *dev); -extern int lance_close (struct net_device *dev); -extern int lance_start_xmit (struct sk_buff *skb, struct net_device *dev); -extern void lance_set_multicast (struct net_device *dev); -extern void lance_tx_timeout(struct net_device *dev); -#ifdef CONFIG_NET_POLL_CONTROLLER -extern void lance_poll(struct net_device *dev); -#endif - -#endif /* ndef _7990_H */ diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 0b3ea21..b686dab 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -287,27 +287,6 @@ config BMAC To compile this driver as a module, choose M here: the module will be called bmac. -config ARIADNE - tristate "Ariadne support" - depends on ZORRO - help - If you have a Village Tronic Ariadne Ethernet adapter, say Y. - Otherwise, say N. - - To compile this driver as a module, choose M here: the module - will be called ariadne. - -config A2065 - tristate "A2065 support" - depends on ZORRO - select CRC32 - help - If you have a Commodore A2065 Ethernet adapter, say Y. Otherwise, - say N. - - To compile this driver as a module, choose M here: the module - will be called a2065. - config HYDRA tristate "Hydra support" depends on ZORRO @@ -387,16 +366,6 @@ config MACMACE say Y and read the Ethernet-HOWTO, available from . -config MVME147_NET - tristate "MVME147 (Lance) Ethernet support" - depends on MVME147 - select CRC32 - help - Support for the on-board Ethernet interface on the Motorola MVME147 - single-board computer. Say Y here to include the - driver for this chip in your kernel. - To compile this driver as a module, choose M here. - config MVME16x_NET tristate "MVME16x Ethernet support" depends on MVME16x @@ -415,27 +384,6 @@ config BVME6000_NET in your kernel. To compile this driver as a module, choose M here. -config ATARILANCE - tristate "Atari Lance support" - depends on ATARI - help - Say Y to include support for several Atari Ethernet adapters based - on the AMD Lance chipset: RieblCard (with or without battery), or - PAMCard VME (also the version by Rhotron, with different addresses). - -config SUN3LANCE - tristate "Sun3/Sun3x on-board LANCE support" - depends on SUN3 || SUN3X - help - Most Sun3 and Sun3x motherboards (including the 3/50, 3/60 and 3/80) - featured an AMD Lance 10Mbit Ethernet controller on board; say Y - here to compile in the Linux driver for this and enable Ethernet. - General Linux information on the Sun 3 and 3x series (now - discontinued) is at - . - - If you're not building a kernel for a Sun 3, say N. - config SUN3_82586 bool "Sun3 on-board Intel 82586 support" depends on SUN3 @@ -445,14 +393,6 @@ config SUN3_82586 that this driver does not support 82586-based adapters on additional VME boards. -config HPLANCE - bool "HP on-board LANCE support" - depends on DIO - select CRC32 - help - If you want to use the builtin "LANCE" Ethernet controller on an - HP300 machine, say Y here. - config LASI_82596 tristate "Lasi ethernet" depends on GSC @@ -487,15 +427,6 @@ config XTENSA_XT2000_SONIC help This is the driver for the onboard card of the Xtensa XT2000 board. -config MIPS_AU1X00_ENET - tristate "MIPS AU1000 Ethernet support" - depends on MIPS_ALCHEMY - select PHYLIB - select CRC32 - help - If you have an Alchemy Semi AU1X00 based system - say Y. Otherwise, say N. - config SGI_IOC3_ETH bool "SGI IOC3 Ethernet" depends on PCI && SGI_IP27 @@ -545,19 +476,6 @@ config SH_ETH This driver supporting CPUs are: - SH7710, SH7712, SH7763, SH7619, SH7724, and SH7757. -config SUNLANCE - tristate "Sun LANCE support" - depends on SBUS - select CRC32 - help - This driver supports the "le" interface present on all 32-bit Sparc - systems, on some older Ultra systems and as an Sbus option. These - cards are based on the AMD Lance chipset, which is better known - via the NE2100 cards. - - To compile this driver as a module, choose M here: the module - will be called sunlance. - config HAPPYMEAL tristate "Sun Happy Meal 10/100baseT support" depends on SBUS || PCI @@ -673,18 +591,6 @@ config ELMC_II To compile this driver as a module, choose M here. The module will be called 3c527. -config LANCE - tristate "AMD LANCE and PCnet (AT1500 and NE2100) support" - depends on ISA && ISA_DMA_API - help - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - . Some LinkSys cards are - of this type. - - To compile this driver as a module, choose M here: the module - will be called lance. This is recommended. - config NET_VENDOR_SMC bool "Western Digital/SMC cards" depends on ISA || MCA || EISA || MAC @@ -1020,17 +926,6 @@ config NI52 To compile this driver as a module, choose M here. The module will be called ni52. -config NI65 - tristate "NI6510 support" - depends on NET_VENDOR_RACAL && ISA && ISA_DMA_API - help - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - . - - To compile this driver as a module, choose M here. The module - will be called ni65. - config DNET tristate "Dave ethernet support (DNET)" depends on NET_ETHERNET && HAS_IOMEM @@ -1056,19 +951,6 @@ config AT1700 To compile this driver as a module, choose M here. The module will be called at1700. -config DEPCA - tristate "DEPCA, DE10x, DE200, DE201, DE202, DE422 support" - depends on ISA || EISA || MCA - select CRC32 - ---help--- - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - as well as - . - - To compile this driver as a module, choose M here. The module - will be called depca. - config HP100 tristate "HP 10/100VG PCLAN (ISA, EISA, PCI) support" depends on ISA || EISA || PCI @@ -1287,32 +1169,6 @@ config NET_PCI will be asked for your specific card in the following questions. If you are unsure, say Y. -config PCNET32 - tristate "AMD PCnet32 PCI support" - depends on NET_PCI && PCI - select CRC32 - select MII - help - If you have a PCnet32 or PCnetPCI based network (Ethernet) card, - answer Y here and read the Ethernet-HOWTO, available from - . - - To compile this driver as a module, choose M here. The module - will be called pcnet32. - -config AMD8111_ETH - tristate "AMD 8111 (new PCI lance) support" - depends on NET_PCI && PCI - select CRC32 - select MII - help - If you have an AMD 8111-based PCI lance ethernet card, - answer Y here and read the Ethernet-HOWTO, available from - . - - To compile this driver as a module, choose M here. The module - will be called amd8111e. - config ADAPTEC_STARFIRE tristate "Adaptec Starfire/DuraLAN support" depends on NET_PCI && PCI @@ -1834,15 +1690,6 @@ config SGISEEQ Say Y here if you have an Seeq based Ethernet network card. This is used in many Silicon Graphics machines. -config DECLANCE - tristate "DEC LANCE ethernet controller support" - depends on MACH_DECSTATION - select CRC32 - help - This driver is for the series of Ethernet controllers produced by - DEC (now Compaq) based on the AMD Lance chipset, including the - DEPCA series. (This chipset is better known via the NE2100 cards.) - config FEC bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)" depends on M523x || M527x || M5272 || M528x || M520x || M532x || \ diff --git a/drivers/net/Makefile b/drivers/net/Makefile index d4d6744..59b6cc9 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -56,7 +56,6 @@ obj-$(CONFIG_PLIP) += plip.o obj-$(CONFIG_ROADRUNNER) += rrunner.o obj-$(CONFIG_HAPPYMEAL) += sunhme.o -obj-$(CONFIG_SUNLANCE) += sunlance.o obj-$(CONFIG_SUNQE) += sunqe.o obj-$(CONFIG_SUNBMAC) += sunbmac.o obj-$(CONFIG_SUNGEM) += sungem.o sungem_phy.o @@ -67,7 +66,6 @@ obj-$(CONFIG_MACE) += mace.o obj-$(CONFIG_BMAC) += bmac.o obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o -obj-$(CONFIG_PCNET32) += pcnet32.o obj-$(CONFIG_E100) += e100.o obj-$(CONFIG_TLAN) += tlan.o obj-$(CONFIG_EPIC100) += epic100.o @@ -177,9 +175,7 @@ obj-$(CONFIG_MACVLAN) += macvlan.o obj-$(CONFIG_MACVTAP) += macvtap.o obj-$(CONFIG_DE600) += de600.o obj-$(CONFIG_DE620) += de620.o -obj-$(CONFIG_LANCE) += lance.o obj-$(CONFIG_SUN3_82586) += sun3_82586.o -obj-$(CONFIG_SUN3LANCE) += sun3lance.o obj-$(CONFIG_DEFXX) += defxx.o obj-$(CONFIG_SGISEEQ) += sgiseeq.o obj-$(CONFIG_SGI_O2MACE_ETH) += meth.o @@ -194,12 +190,10 @@ obj-$(CONFIG_8139CP) += 8139cp.o obj-$(CONFIG_8139TOO) += 8139too.o obj-$(CONFIG_ZNET) += znet.o obj-$(CONFIG_CPMAC) += cpmac.o -obj-$(CONFIG_DEPCA) += depca.o obj-$(CONFIG_EWRK3) += ewrk3.o obj-$(CONFIG_ATP) += atp.o obj-$(CONFIG_NI5010) += ni5010.o obj-$(CONFIG_NI52) += ni52.o -obj-$(CONFIG_NI65) += ni65.o obj-$(CONFIG_ELPLUS) += 3c505.o obj-$(CONFIG_AC3200) += ac3200.o 8390.o obj-$(CONFIG_APRICOT) += 82596.o @@ -214,19 +208,12 @@ obj-$(CONFIG_LP486E) += lp486e.o obj-$(CONFIG_ETH16I) += eth16i.o obj-$(CONFIG_ZORRO8390) += zorro8390.o -obj-$(CONFIG_HPLANCE) += hplance.o 7990.o -obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o obj-$(CONFIG_EQUALIZER) += eql.o obj-$(CONFIG_KORINA) += korina.o obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o -obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o -obj-$(CONFIG_DECLANCE) += declance.o -obj-$(CONFIG_ATARILANCE) += atarilance.o -obj-$(CONFIG_A2065) += a2065.o obj-$(CONFIG_HYDRA) += hydra.o -obj-$(CONFIG_ARIADNE) += ariadne.o obj-$(CONFIG_CS89x0) += cs89x0.o obj-$(CONFIG_MACSONIC) += macsonic.o obj-$(CONFIG_MACMACE) += macmace.o @@ -236,7 +223,6 @@ obj-$(CONFIG_VETH) += veth.o obj-$(CONFIG_NET_NETX) += netx-eth.o obj-$(CONFIG_DL2K) += dl2k.o obj-$(CONFIG_R8169) += r8169.o -obj-$(CONFIG_AMD8111_ETH) += amd8111e.o obj-$(CONFIG_IBMVETH) += ibmveth.o obj-$(CONFIG_S2IO) += s2io.o obj-$(CONFIG_VXGE) += vxge/ diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c deleted file mode 100644 index e1e1b07..0000000 --- a/drivers/net/a2065.c +++ /dev/null @@ -1,781 +0,0 @@ -/* - * Amiga Linux/68k A2065 Ethernet Driver - * - * (C) Copyright 1995-2003 by Geert Uytterhoeven - * - * Fixes and tips by: - * - Janos Farkas (CHEXUM@sparta.banki.hu) - * - Jes Degn Soerensen (jds@kom.auc.dk) - * - Matt Domsch (Matt_Domsch@dell.com) - * - * ---------------------------------------------------------------------------- - * - * This program is based on - * - * ariadne.?: Amiga Linux/68k Ariadne Ethernet Driver - * (C) Copyright 1995 by Geert Uytterhoeven, - * Peter De Schrijver - * - * lance.c: An AMD LANCE ethernet driver for linux. - * Written 1993-94 by Donald Becker. - * - * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller - * Advanced Micro Devices - * Publication #16907, Rev. B, Amendment/0, May 1994 - * - * ---------------------------------------------------------------------------- - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file COPYING in the main directory of the Linux - * distribution for more details. - * - * ---------------------------------------------------------------------------- - * - * The A2065 is a Zorro-II board made by Commodore/Ameristar. It contains: - * - * - an Am7990 Local Area Network Controller for Ethernet (LANCE) with - * both 10BASE-2 (thin coax) and AUI (DB-15) connectors - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -/*#define DEBUG*/ -/*#define TEST_HITS*/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "a2065.h" - -/* Transmit/Receive Ring Definitions */ - -#define LANCE_LOG_TX_BUFFERS (2) -#define LANCE_LOG_RX_BUFFERS (4) - -#define TX_RING_SIZE (1 << LANCE_LOG_TX_BUFFERS) -#define RX_RING_SIZE (1 << LANCE_LOG_RX_BUFFERS) - -#define TX_RING_MOD_MASK (TX_RING_SIZE - 1) -#define RX_RING_MOD_MASK (RX_RING_SIZE - 1) - -#define PKT_BUF_SIZE (1544) -#define RX_BUFF_SIZE PKT_BUF_SIZE -#define TX_BUFF_SIZE PKT_BUF_SIZE - -/* Layout of the Lance's RAM Buffer */ - -struct lance_init_block { - unsigned short mode; /* Pre-set mode (reg. 15) */ - unsigned char phys_addr[6]; /* Physical ethernet address */ - unsigned filter[2]; /* Multicast filter. */ - - /* Receive and transmit ring base, along with extra bits. */ - unsigned short rx_ptr; /* receive descriptor addr */ - unsigned short rx_len; /* receive len and high addr */ - unsigned short tx_ptr; /* transmit descriptor addr */ - unsigned short tx_len; /* transmit len and high addr */ - - /* The Tx and Rx ring entries must aligned on 8-byte boundaries. */ - struct lance_rx_desc brx_ring[RX_RING_SIZE]; - struct lance_tx_desc btx_ring[TX_RING_SIZE]; - - char rx_buf[RX_RING_SIZE][RX_BUFF_SIZE]; - char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE]; -}; - -/* Private Device Data */ - -struct lance_private { - char *name; - volatile struct lance_regs *ll; - volatile struct lance_init_block *init_block; /* Hosts view */ - volatile struct lance_init_block *lance_init_block; /* Lance view */ - - int rx_new, tx_new; - int rx_old, tx_old; - - int lance_log_rx_bufs, lance_log_tx_bufs; - int rx_ring_mod_mask, tx_ring_mod_mask; - - int tpe; /* cable-selection is TPE */ - int auto_select; /* cable-selection by carrier */ - unsigned short busmaster_regval; - -#ifdef CONFIG_SUNLANCE - struct Linux_SBus_DMA *ledma; /* if set this points to ledma and arch=4m */ - int burst_sizes; /* ledma SBus burst sizes */ -#endif - struct timer_list multicast_timer; -}; - -#define LANCE_ADDR(x) ((int)(x) & ~0xff000000) - -/* Load the CSR registers */ -static void load_csrs(struct lance_private *lp) -{ - volatile struct lance_regs *ll = lp->ll; - volatile struct lance_init_block *aib = lp->lance_init_block; - int leptr = LANCE_ADDR(aib); - - ll->rap = LE_CSR1; - ll->rdp = (leptr & 0xFFFF); - ll->rap = LE_CSR2; - ll->rdp = leptr >> 16; - ll->rap = LE_CSR3; - ll->rdp = lp->busmaster_regval; - - /* Point back to csr0 */ - ll->rap = LE_CSR0; -} - -/* Setup the Lance Rx and Tx rings */ -static void lance_init_ring(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - volatile struct lance_init_block *ib = lp->init_block; - volatile struct lance_init_block *aib = lp->lance_init_block; - /* for LANCE_ADDR computations */ - int leptr; - int i; - - /* Lock out other processes while setting up hardware */ - netif_stop_queue(dev); - lp->rx_new = lp->tx_new = 0; - lp->rx_old = lp->tx_old = 0; - - ib->mode = 0; - - /* Copy the ethernet address to the lance init block - * Note that on the sparc you need to swap the ethernet address. - */ - ib->phys_addr[0] = dev->dev_addr[1]; - ib->phys_addr[1] = dev->dev_addr[0]; - ib->phys_addr[2] = dev->dev_addr[3]; - ib->phys_addr[3] = dev->dev_addr[2]; - ib->phys_addr[4] = dev->dev_addr[5]; - ib->phys_addr[5] = dev->dev_addr[4]; - - /* Setup the Tx ring entries */ - netdev_dbg(dev, "TX rings:\n"); - for (i = 0; i <= 1 << lp->lance_log_tx_bufs; i++) { - leptr = LANCE_ADDR(&aib->tx_buf[i][0]); - ib->btx_ring[i].tmd0 = leptr; - ib->btx_ring[i].tmd1_hadr = leptr >> 16; - ib->btx_ring[i].tmd1_bits = 0; - ib->btx_ring[i].length = 0xf000; /* The ones required by tmd2 */ - ib->btx_ring[i].misc = 0; - if (i < 3) - netdev_dbg(dev, "%d: 0x%08x\n", i, leptr); - } - - /* Setup the Rx ring entries */ - netdev_dbg(dev, "RX rings:\n"); - for (i = 0; i < 1 << lp->lance_log_rx_bufs; i++) { - leptr = LANCE_ADDR(&aib->rx_buf[i][0]); - - ib->brx_ring[i].rmd0 = leptr; - ib->brx_ring[i].rmd1_hadr = leptr >> 16; - ib->brx_ring[i].rmd1_bits = LE_R1_OWN; - ib->brx_ring[i].length = -RX_BUFF_SIZE | 0xf000; - ib->brx_ring[i].mblength = 0; - if (i < 3) - netdev_dbg(dev, "%d: 0x%08x\n", i, leptr); - } - - /* Setup the initialization block */ - - /* Setup rx descriptor pointer */ - leptr = LANCE_ADDR(&aib->brx_ring); - ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16); - ib->rx_ptr = leptr; - netdev_dbg(dev, "RX ptr: %08x\n", leptr); - - /* Setup tx descriptor pointer */ - leptr = LANCE_ADDR(&aib->btx_ring); - ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16); - ib->tx_ptr = leptr; - netdev_dbg(dev, "TX ptr: %08x\n", leptr); - - /* Clear the multicast filter */ - ib->filter[0] = 0; - ib->filter[1] = 0; -} - -static int init_restart_lance(struct lance_private *lp) -{ - volatile struct lance_regs *ll = lp->ll; - int i; - - ll->rap = LE_CSR0; - ll->rdp = LE_C0_INIT; - - /* Wait for the lance to complete initialization */ - for (i = 0; (i < 100) && !(ll->rdp & (LE_C0_ERR | LE_C0_IDON)); i++) - barrier(); - if ((i == 100) || (ll->rdp & LE_C0_ERR)) { - pr_err("unopened after %d ticks, csr0=%04x\n", i, ll->rdp); - return -EIO; - } - - /* Clear IDON by writing a "1", enable interrupts and start lance */ - ll->rdp = LE_C0_IDON; - ll->rdp = LE_C0_INEA | LE_C0_STRT; - - return 0; -} - -static int lance_rx(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - volatile struct lance_init_block *ib = lp->init_block; - volatile struct lance_regs *ll = lp->ll; - volatile struct lance_rx_desc *rd; - unsigned char bits; - -#ifdef TEST_HITS - int i; - char buf[RX_RING_SIZE + 1]; - - for (i = 0; i < RX_RING_SIZE; i++) { - char r1_own = ib->brx_ring[i].rmd1_bits & LE_R1_OWN; - if (i == lp->rx_new) - buf[i] = r1_own ? '_' : 'X'; - else - buf[i] = r1_own ? '.' : '1'; - } - buf[RX_RING_SIZE] = 0; - - pr_debug("RxRing TestHits: [%s]\n", buf); -#endif - - ll->rdp = LE_C0_RINT | LE_C0_INEA; - for (rd = &ib->brx_ring[lp->rx_new]; - !((bits = rd->rmd1_bits) & LE_R1_OWN); - rd = &ib->brx_ring[lp->rx_new]) { - - /* We got an incomplete frame? */ - if ((bits & LE_R1_POK) != LE_R1_POK) { - dev->stats.rx_over_errors++; - dev->stats.rx_errors++; - continue; - } else if (bits & LE_R1_ERR) { - /* Count only the end frame as a rx error, - * not the beginning - */ - if (bits & LE_R1_BUF) - dev->stats.rx_fifo_errors++; - if (bits & LE_R1_CRC) - dev->stats.rx_crc_errors++; - if (bits & LE_R1_OFL) - dev->stats.rx_over_errors++; - if (bits & LE_R1_FRA) - dev->stats.rx_frame_errors++; - if (bits & LE_R1_EOP) - dev->stats.rx_errors++; - } else { - int len = (rd->mblength & 0xfff) - 4; - struct sk_buff *skb = dev_alloc_skb(len + 2); - - if (!skb) { - netdev_warn(dev, "Memory squeeze, deferring packet\n"); - dev->stats.rx_dropped++; - rd->mblength = 0; - rd->rmd1_bits = LE_R1_OWN; - lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; - return 0; - } - - skb_reserve(skb, 2); /* 16 byte align */ - skb_put(skb, len); /* make room */ - skb_copy_to_linear_data(skb, - (unsigned char *)&ib->rx_buf[lp->rx_new][0], - len); - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += len; - } - - /* Return the packet to the pool */ - rd->mblength = 0; - rd->rmd1_bits = LE_R1_OWN; - lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; - } - return 0; -} - -static int lance_tx(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - volatile struct lance_init_block *ib = lp->init_block; - volatile struct lance_regs *ll = lp->ll; - volatile struct lance_tx_desc *td; - int i, j; - int status; - - /* csr0 is 2f3 */ - ll->rdp = LE_C0_TINT | LE_C0_INEA; - /* csr0 is 73 */ - - j = lp->tx_old; - for (i = j; i != lp->tx_new; i = j) { - td = &ib->btx_ring[i]; - - /* If we hit a packet not owned by us, stop */ - if (td->tmd1_bits & LE_T1_OWN) - break; - - if (td->tmd1_bits & LE_T1_ERR) { - status = td->misc; - - dev->stats.tx_errors++; - if (status & LE_T3_RTY) - dev->stats.tx_aborted_errors++; - if (status & LE_T3_LCOL) - dev->stats.tx_window_errors++; - - if (status & LE_T3_CLOS) { - dev->stats.tx_carrier_errors++; - if (lp->auto_select) { - lp->tpe = 1 - lp->tpe; - netdev_err(dev, "Carrier Lost, trying %s\n", - lp->tpe ? "TPE" : "AUI"); - /* Stop the lance */ - ll->rap = LE_CSR0; - ll->rdp = LE_C0_STOP; - lance_init_ring(dev); - load_csrs(lp); - init_restart_lance(lp); - return 0; - } - } - - /* buffer errors and underflows turn off - * the transmitter, so restart the adapter - */ - if (status & (LE_T3_BUF | LE_T3_UFL)) { - dev->stats.tx_fifo_errors++; - - netdev_err(dev, "Tx: ERR_BUF|ERR_UFL, restarting\n"); - /* Stop the lance */ - ll->rap = LE_CSR0; - ll->rdp = LE_C0_STOP; - lance_init_ring(dev); - load_csrs(lp); - init_restart_lance(lp); - return 0; - } - } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) { - /* So we don't count the packet more than once. */ - td->tmd1_bits &= ~(LE_T1_POK); - - /* One collision before packet was sent. */ - if (td->tmd1_bits & LE_T1_EONE) - dev->stats.collisions++; - - /* More than one collision, be optimistic. */ - if (td->tmd1_bits & LE_T1_EMORE) - dev->stats.collisions += 2; - - dev->stats.tx_packets++; - } - - j = (j + 1) & lp->tx_ring_mod_mask; - } - lp->tx_old = j; - ll->rdp = LE_C0_TINT | LE_C0_INEA; - return 0; -} - -static int lance_tx_buffs_avail(struct lance_private *lp) -{ - if (lp->tx_old <= lp->tx_new) - return lp->tx_old + lp->tx_ring_mod_mask - lp->tx_new; - return lp->tx_old - lp->tx_new - 1; -} - -static irqreturn_t lance_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct lance_private *lp = netdev_priv(dev); - volatile struct lance_regs *ll = lp->ll; - int csr0; - - ll->rap = LE_CSR0; /* LANCE Controller Status */ - csr0 = ll->rdp; - - if (!(csr0 & LE_C0_INTR)) /* Check if any interrupt has */ - return IRQ_NONE; /* been generated by the Lance. */ - - /* Acknowledge all the interrupt sources ASAP */ - ll->rdp = csr0 & ~(LE_C0_INEA | LE_C0_TDMD | LE_C0_STOP | LE_C0_STRT | - LE_C0_INIT); - - if (csr0 & LE_C0_ERR) { - /* Clear the error condition */ - ll->rdp = LE_C0_BABL | LE_C0_ERR | LE_C0_MISS | LE_C0_INEA; - } - - if (csr0 & LE_C0_RINT) - lance_rx(dev); - - if (csr0 & LE_C0_TINT) - lance_tx(dev); - - /* Log misc errors. */ - if (csr0 & LE_C0_BABL) - dev->stats.tx_errors++; /* Tx babble. */ - if (csr0 & LE_C0_MISS) - dev->stats.rx_errors++; /* Missed a Rx frame. */ - if (csr0 & LE_C0_MERR) { - netdev_err(dev, "Bus master arbitration failure, status %04x\n", - csr0); - /* Restart the chip. */ - ll->rdp = LE_C0_STRT; - } - - if (netif_queue_stopped(dev) && lance_tx_buffs_avail(lp) > 0) - netif_wake_queue(dev); - - ll->rap = LE_CSR0; - ll->rdp = (LE_C0_BABL | LE_C0_CERR | LE_C0_MISS | LE_C0_MERR | - LE_C0_IDON | LE_C0_INEA); - return IRQ_HANDLED; -} - -static int lance_open(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - volatile struct lance_regs *ll = lp->ll; - int ret; - - /* Stop the Lance */ - ll->rap = LE_CSR0; - ll->rdp = LE_C0_STOP; - - /* Install the Interrupt handler */ - ret = request_irq(IRQ_AMIGA_PORTS, lance_interrupt, IRQF_SHARED, - dev->name, dev); - if (ret) - return ret; - - load_csrs(lp); - lance_init_ring(dev); - - netif_start_queue(dev); - - return init_restart_lance(lp); -} - -static int lance_close(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - volatile struct lance_regs *ll = lp->ll; - - netif_stop_queue(dev); - del_timer_sync(&lp->multicast_timer); - - /* Stop the card */ - ll->rap = LE_CSR0; - ll->rdp = LE_C0_STOP; - - free_irq(IRQ_AMIGA_PORTS, dev); - return 0; -} - -static inline int lance_reset(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - volatile struct lance_regs *ll = lp->ll; - int status; - - /* Stop the lance */ - ll->rap = LE_CSR0; - ll->rdp = LE_C0_STOP; - - load_csrs(lp); - - lance_init_ring(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ - netif_start_queue(dev); - - status = init_restart_lance(lp); - netdev_dbg(dev, "Lance restart=%d\n", status); - - return status; -} - -static void lance_tx_timeout(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - volatile struct lance_regs *ll = lp->ll; - - netdev_err(dev, "transmit timed out, status %04x, reset\n", ll->rdp); - lance_reset(dev); - netif_wake_queue(dev); -} - -static netdev_tx_t lance_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - volatile struct lance_regs *ll = lp->ll; - volatile struct lance_init_block *ib = lp->init_block; - int entry, skblen; - int status = NETDEV_TX_OK; - unsigned long flags; - - if (skb_padto(skb, ETH_ZLEN)) - return NETDEV_TX_OK; - skblen = max_t(unsigned, skb->len, ETH_ZLEN); - - local_irq_save(flags); - - if (!lance_tx_buffs_avail(lp)) { - local_irq_restore(flags); - return NETDEV_TX_LOCKED; - } - -#ifdef DEBUG - /* dump the packet */ - print_hex_dump(KERN_DEBUG, "skb->data: ", DUMP_PREFIX_NONE, - 16, 1, skb->data, 64, true); -#endif - entry = lp->tx_new & lp->tx_ring_mod_mask; - ib->btx_ring[entry].length = (-skblen) | 0xf000; - ib->btx_ring[entry].misc = 0; - - skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen); - - /* Now, give the packet to the lance */ - ib->btx_ring[entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN); - lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask; - dev->stats.tx_bytes += skblen; - - if (lance_tx_buffs_avail(lp) <= 0) - netif_stop_queue(dev); - - /* Kick the lance: transmit now */ - ll->rdp = LE_C0_INEA | LE_C0_TDMD; - dev_kfree_skb(skb); - - local_irq_restore(flags); - - return status; -} - -/* taken from the depca driver */ -static void lance_load_multicast(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - volatile struct lance_init_block *ib = lp->init_block; - volatile u16 *mcast_table = (u16 *)&ib->filter; - struct netdev_hw_addr *ha; - u32 crc; - - /* set all multicast bits */ - if (dev->flags & IFF_ALLMULTI) { - ib->filter[0] = 0xffffffff; - ib->filter[1] = 0xffffffff; - return; - } - /* clear the multicast filter */ - ib->filter[0] = 0; - ib->filter[1] = 0; - - /* Add addresses */ - netdev_for_each_mc_addr(ha, dev) { - crc = ether_crc_le(6, ha->addr); - crc = crc >> 26; - mcast_table[crc >> 4] |= 1 << (crc & 0xf); - } -} - -static void lance_set_multicast(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - volatile struct lance_init_block *ib = lp->init_block; - volatile struct lance_regs *ll = lp->ll; - - if (!netif_running(dev)) - return; - - if (lp->tx_old != lp->tx_new) { - mod_timer(&lp->multicast_timer, jiffies + 4); - netif_wake_queue(dev); - return; - } - - netif_stop_queue(dev); - - ll->rap = LE_CSR0; - ll->rdp = LE_C0_STOP; - lance_init_ring(dev); - - if (dev->flags & IFF_PROMISC) { - ib->mode |= LE_MO_PROM; - } else { - ib->mode &= ~LE_MO_PROM; - lance_load_multicast(dev); - } - load_csrs(lp); - init_restart_lance(lp); - netif_wake_queue(dev); -} - -static int __devinit a2065_init_one(struct zorro_dev *z, - const struct zorro_device_id *ent); -static void __devexit a2065_remove_one(struct zorro_dev *z); - - -static struct zorro_device_id a2065_zorro_tbl[] __devinitdata = { - { ZORRO_PROD_CBM_A2065_1 }, - { ZORRO_PROD_CBM_A2065_2 }, - { ZORRO_PROD_AMERISTAR_A2065 }, - { 0 } -}; -MODULE_DEVICE_TABLE(zorro, a2065_zorro_tbl); - -static struct zorro_driver a2065_driver = { - .name = "a2065", - .id_table = a2065_zorro_tbl, - .probe = a2065_init_one, - .remove = __devexit_p(a2065_remove_one), -}; - -static const struct net_device_ops lance_netdev_ops = { - .ndo_open = lance_open, - .ndo_stop = lance_close, - .ndo_start_xmit = lance_start_xmit, - .ndo_tx_timeout = lance_tx_timeout, - .ndo_set_multicast_list = lance_set_multicast, - .ndo_validate_addr = eth_validate_addr, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, -}; - -static int __devinit a2065_init_one(struct zorro_dev *z, - const struct zorro_device_id *ent) -{ - struct net_device *dev; - struct lance_private *priv; - unsigned long board = z->resource.start; - unsigned long base_addr = board + A2065_LANCE; - unsigned long mem_start = board + A2065_RAM; - struct resource *r1, *r2; - int err; - - r1 = request_mem_region(base_addr, sizeof(struct lance_regs), - "Am7990"); - if (!r1) - return -EBUSY; - r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM"); - if (!r2) { - release_mem_region(base_addr, sizeof(struct lance_regs)); - return -EBUSY; - } - - dev = alloc_etherdev(sizeof(struct lance_private)); - if (dev == NULL) { - release_mem_region(base_addr, sizeof(struct lance_regs)); - release_mem_region(mem_start, A2065_RAM_SIZE); - return -ENOMEM; - } - - priv = netdev_priv(dev); - - r1->name = dev->name; - r2->name = dev->name; - - dev->dev_addr[0] = 0x00; - if (z->id != ZORRO_PROD_AMERISTAR_A2065) { /* Commodore */ - dev->dev_addr[1] = 0x80; - dev->dev_addr[2] = 0x10; - } else { /* Ameristar */ - dev->dev_addr[1] = 0x00; - dev->dev_addr[2] = 0x9f; - } - dev->dev_addr[3] = (z->rom.er_SerialNumber >> 16) & 0xff; - dev->dev_addr[4] = (z->rom.er_SerialNumber >> 8) & 0xff; - dev->dev_addr[5] = z->rom.er_SerialNumber & 0xff; - dev->base_addr = ZTWO_VADDR(base_addr); - dev->mem_start = ZTWO_VADDR(mem_start); - dev->mem_end = dev->mem_start + A2065_RAM_SIZE; - - priv->ll = (volatile struct lance_regs *)dev->base_addr; - priv->init_block = (struct lance_init_block *)dev->mem_start; - priv->lance_init_block = (struct lance_init_block *)A2065_RAM; - priv->auto_select = 0; - priv->busmaster_regval = LE_C3_BSWP; - - priv->lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS; - priv->lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS; - priv->rx_ring_mod_mask = RX_RING_MOD_MASK; - priv->tx_ring_mod_mask = TX_RING_MOD_MASK; - - dev->netdev_ops = &lance_netdev_ops; - dev->watchdog_timeo = 5*HZ; - dev->dma = 0; - - init_timer(&priv->multicast_timer); - priv->multicast_timer.data = (unsigned long) dev; - priv->multicast_timer.function = - (void (*)(unsigned long))lance_set_multicast; - - err = register_netdev(dev); - if (err) { - release_mem_region(base_addr, sizeof(struct lance_regs)); - release_mem_region(mem_start, A2065_RAM_SIZE); - free_netdev(dev); - return err; - } - zorro_set_drvdata(z, dev); - - netdev_info(dev, "A2065 at 0x%08lx, Ethernet Address %pM\n", - board, dev->dev_addr); - - return 0; -} - - -static void __devexit a2065_remove_one(struct zorro_dev *z) -{ - struct net_device *dev = zorro_get_drvdata(z); - - unregister_netdev(dev); - release_mem_region(ZTWO_PADDR(dev->base_addr), - sizeof(struct lance_regs)); - release_mem_region(ZTWO_PADDR(dev->mem_start), A2065_RAM_SIZE); - free_netdev(dev); -} - -static int __init a2065_init_module(void) -{ - return zorro_register_driver(&a2065_driver); -} - -static void __exit a2065_cleanup_module(void) -{ - zorro_unregister_driver(&a2065_driver); -} - -module_init(a2065_init_module); -module_exit(a2065_cleanup_module); - -MODULE_LICENSE("GPL"); diff --git a/drivers/net/a2065.h b/drivers/net/a2065.h deleted file mode 100644 index 5117759..0000000 --- a/drivers/net/a2065.h +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Amiga Linux/68k A2065 Ethernet Driver - * - * (C) Copyright 1995 by Geert Uytterhoeven - * - * --------------------------------------------------------------------------- - * - * This program is based on - * - * ariadne.?: Amiga Linux/68k Ariadne Ethernet Driver - * (C) Copyright 1995 by Geert Uytterhoeven, - * Peter De Schrijver - * - * lance.c: An AMD LANCE ethernet driver for linux. - * Written 1993-94 by Donald Becker. - * - * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller - * Advanced Micro Devices - * Publication #16907, Rev. B, Amendment/0, May 1994 - * - * --------------------------------------------------------------------------- - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file COPYING in the main directory of the Linux - * distribution for more details. - * - * --------------------------------------------------------------------------- - * - * The A2065 is a Zorro-II board made by Commodore/Ameristar. It contains: - * - * - an Am7990 Local Area Network Controller for Ethernet (LANCE) with - * both 10BASE-2 (thin coax) and AUI (DB-15) connectors - */ - - -/* - * Am7990 Local Area Network Controller for Ethernet (LANCE) - */ - -struct lance_regs { - unsigned short rdp; /* Register Data Port */ - unsigned short rap; /* Register Address Port */ -}; - - -/* - * Am7990 Control and Status Registers - */ - -#define LE_CSR0 0x0000 /* LANCE Controller Status */ -#define LE_CSR1 0x0001 /* IADR[15:0] */ -#define LE_CSR2 0x0002 /* IADR[23:16] */ -#define LE_CSR3 0x0003 /* Misc */ - - -/* - * Bit definitions for CSR0 (LANCE Controller Status) - */ - -#define LE_C0_ERR 0x8000 /* Error */ -#define LE_C0_BABL 0x4000 /* Babble: Transmitted too many bits */ -#define LE_C0_CERR 0x2000 /* No Heartbeat (10BASE-T) */ -#define LE_C0_MISS 0x1000 /* Missed Frame */ -#define LE_C0_MERR 0x0800 /* Memory Error */ -#define LE_C0_RINT 0x0400 /* Receive Interrupt */ -#define LE_C0_TINT 0x0200 /* Transmit Interrupt */ -#define LE_C0_IDON 0x0100 /* Initialization Done */ -#define LE_C0_INTR 0x0080 /* Interrupt Flag */ -#define LE_C0_INEA 0x0040 /* Interrupt Enable */ -#define LE_C0_RXON 0x0020 /* Receive On */ -#define LE_C0_TXON 0x0010 /* Transmit On */ -#define LE_C0_TDMD 0x0008 /* Transmit Demand */ -#define LE_C0_STOP 0x0004 /* Stop */ -#define LE_C0_STRT 0x0002 /* Start */ -#define LE_C0_INIT 0x0001 /* Initialize */ - - -/* - * Bit definitions for CSR3 - */ - -#define LE_C3_BSWP 0x0004 /* Byte Swap - (on for big endian byte order) */ -#define LE_C3_ACON 0x0002 /* ALE Control - (on for active low ALE) */ -#define LE_C3_BCON 0x0001 /* Byte Control */ - - -/* - * Mode Flags - */ - -#define LE_MO_PROM 0x8000 /* Promiscuous Mode */ -#define LE_MO_INTL 0x0040 /* Internal Loopback */ -#define LE_MO_DRTY 0x0020 /* Disable Retry */ -#define LE_MO_FCOLL 0x0010 /* Force Collision */ -#define LE_MO_DXMTFCS 0x0008 /* Disable Transmit CRC */ -#define LE_MO_LOOP 0x0004 /* Loopback Enable */ -#define LE_MO_DTX 0x0002 /* Disable Transmitter */ -#define LE_MO_DRX 0x0001 /* Disable Receiver */ - - -struct lance_rx_desc { - unsigned short rmd0; /* low address of packet */ - unsigned char rmd1_bits; /* descriptor bits */ - unsigned char rmd1_hadr; /* high address of packet */ - short length; /* This length is 2s complement (negative)! - * Buffer length - */ - unsigned short mblength; /* Aactual number of bytes received */ -}; - -struct lance_tx_desc { - unsigned short tmd0; /* low address of packet */ - unsigned char tmd1_bits; /* descriptor bits */ - unsigned char tmd1_hadr; /* high address of packet */ - short length; /* Length is 2s complement (negative)! */ - unsigned short misc; -}; - - -/* - * Receive Flags - */ - -#define LE_R1_OWN 0x80 /* LANCE owns the descriptor */ -#define LE_R1_ERR 0x40 /* Error */ -#define LE_R1_FRA 0x20 /* Framing Error */ -#define LE_R1_OFL 0x10 /* Overflow Error */ -#define LE_R1_CRC 0x08 /* CRC Error */ -#define LE_R1_BUF 0x04 /* Buffer Error */ -#define LE_R1_SOP 0x02 /* Start of Packet */ -#define LE_R1_EOP 0x01 /* End of Packet */ -#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */ - - -/* - * Transmit Flags - */ - -#define LE_T1_OWN 0x80 /* LANCE owns the descriptor */ -#define LE_T1_ERR 0x40 /* Error */ -#define LE_T1_RES 0x20 /* Reserved, - LANCE writes this with a zero */ -#define LE_T1_EMORE 0x10 /* More than one retry needed */ -#define LE_T1_EONE 0x08 /* One retry needed */ -#define LE_T1_EDEF 0x04 /* Deferred */ -#define LE_T1_SOP 0x02 /* Start of Packet */ -#define LE_T1_EOP 0x01 /* End of Packet */ -#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */ - - -/* - * Error Flags - */ - -#define LE_T3_BUF 0x8000 /* Buffer Error */ -#define LE_T3_UFL 0x4000 /* Underflow Error */ -#define LE_T3_LCOL 0x1000 /* Late Collision */ -#define LE_T3_CLOS 0x0800 /* Loss of Carrier */ -#define LE_T3_RTY 0x0400 /* Retry Error */ -#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry */ - - -/* - * A2065 Expansion Board Structure - */ - -#define A2065_LANCE 0x4000 - -#define A2065_RAM 0x8000 -#define A2065_RAM_SIZE 0x8000 - diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c deleted file mode 100644 index 78002ef..0000000 --- a/drivers/net/amd8111e.c +++ /dev/null @@ -1,1992 +0,0 @@ - -/* Advanced Micro Devices Inc. AMD8111E Linux Network Driver - * Copyright (C) 2004 Advanced Micro Devices - * - * - * Copyright 2001,2002 Jeff Garzik [ 8139cp.c,tg3.c ] - * Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)[ tg3.c] - * Copyright 1996-1999 Thomas Bogendoerfer [ pcnet32.c ] - * Derived from the lance driver written 1993,1994,1995 by Donald Becker. - * Copyright 1993 United States Government as represented by the - * Director, National Security Agency.[ pcnet32.c ] - * Carsten Langgaard, carstenl@mips.com [ pcnet32.c ] - * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. - * - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 - * USA - -Module Name: - - amd8111e.c - -Abstract: - - AMD8111 based 10/100 Ethernet Controller Driver. - -Environment: - - Kernel Mode - -Revision History: - 3.0.0 - Initial Revision. - 3.0.1 - 1. Dynamic interrupt coalescing. - 2. Removed prev_stats. - 3. MII support. - 4. Dynamic IPG support - 3.0.2 05/29/2003 - 1. Bug fix: Fixed failure to send jumbo packets larger than 4k. - 2. Bug fix: Fixed VLAN support failure. - 3. Bug fix: Fixed receive interrupt coalescing bug. - 4. Dynamic IPG support is disabled by default. - 3.0.3 06/05/2003 - 1. Bug fix: Fixed failure to close the interface if SMP is enabled. - 3.0.4 12/09/2003 - 1. Added set_mac_address routine for bonding driver support. - 2. Tested the driver for bonding support - 3. Bug fix: Fixed mismach in actual receive buffer lenth and lenth - indicated to the h/w. - 4. Modified amd8111e_rx() routine to receive all the received packets - in the first interrupt. - 5. Bug fix: Corrected rx_errors reported in get_stats() function. - 3.0.5 03/22/2004 - 1. Added NAPI support - -*/ - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) -#define AMD8111E_VLAN_TAG_USED 1 -#else -#define AMD8111E_VLAN_TAG_USED 0 -#endif - -#include "amd8111e.h" -#define MODULE_NAME "amd8111e" -#define MODULE_VERS "3.0.7" -MODULE_AUTHOR("Advanced Micro Devices, Inc."); -MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version "MODULE_VERS); -MODULE_LICENSE("GPL"); -MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl); -module_param_array(speed_duplex, int, NULL, 0); -MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotiate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex"); -module_param_array(coalesce, bool, NULL, 0); -MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable"); -module_param_array(dynamic_ipg, bool, NULL, 0); -MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable"); - -static DEFINE_PCI_DEVICE_TABLE(amd8111e_pci_tbl) = { - - { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { 0, } - -}; -/* -This function will read the PHY registers. -*/ -static int amd8111e_read_phy(struct amd8111e_priv* lp, int phy_id, int reg, u32* val) -{ - void __iomem *mmio = lp->mmio; - unsigned int reg_val; - unsigned int repeat= REPEAT_CNT; - - reg_val = readl(mmio + PHY_ACCESS); - while (reg_val & PHY_CMD_ACTIVE) - reg_val = readl( mmio + PHY_ACCESS ); - - writel( PHY_RD_CMD | ((phy_id & 0x1f) << 21) | - ((reg & 0x1f) << 16), mmio +PHY_ACCESS); - do{ - reg_val = readl(mmio + PHY_ACCESS); - udelay(30); /* It takes 30 us to read/write data */ - } while (--repeat && (reg_val & PHY_CMD_ACTIVE)); - if(reg_val & PHY_RD_ERR) - goto err_phy_read; - - *val = reg_val & 0xffff; - return 0; -err_phy_read: - *val = 0; - return -EINVAL; - -} - -/* -This function will write into PHY registers. -*/ -static int amd8111e_write_phy(struct amd8111e_priv* lp,int phy_id, int reg, u32 val) -{ - unsigned int repeat = REPEAT_CNT; - void __iomem *mmio = lp->mmio; - unsigned int reg_val; - - reg_val = readl(mmio + PHY_ACCESS); - while (reg_val & PHY_CMD_ACTIVE) - reg_val = readl( mmio + PHY_ACCESS ); - - writel( PHY_WR_CMD | ((phy_id & 0x1f) << 21) | - ((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS); - - do{ - reg_val = readl(mmio + PHY_ACCESS); - udelay(30); /* It takes 30 us to read/write the data */ - } while (--repeat && (reg_val & PHY_CMD_ACTIVE)); - - if(reg_val & PHY_RD_ERR) - goto err_phy_write; - - return 0; - -err_phy_write: - return -EINVAL; - -} -/* -This is the mii register read function provided to the mii interface. -*/ -static int amd8111e_mdio_read(struct net_device * dev, int phy_id, int reg_num) -{ - struct amd8111e_priv* lp = netdev_priv(dev); - unsigned int reg_val; - - amd8111e_read_phy(lp,phy_id,reg_num,®_val); - return reg_val; - -} - -/* -This is the mii register write function provided to the mii interface. -*/ -static void amd8111e_mdio_write(struct net_device * dev, int phy_id, int reg_num, int val) -{ - struct amd8111e_priv* lp = netdev_priv(dev); - - amd8111e_write_phy(lp, phy_id, reg_num, val); -} - -/* -This function will set PHY speed. During initialization sets the original speed to 100 full. -*/ -static void amd8111e_set_ext_phy(struct net_device *dev) -{ - struct amd8111e_priv *lp = netdev_priv(dev); - u32 bmcr,advert,tmp; - - /* Determine mii register values to set the speed */ - advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE); - tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4); - switch (lp->ext_phy_option){ - - default: - case SPEED_AUTONEG: /* advertise all values */ - tmp |= ( ADVERTISE_10HALF|ADVERTISE_10FULL| - ADVERTISE_100HALF|ADVERTISE_100FULL) ; - break; - case SPEED10_HALF: - tmp |= ADVERTISE_10HALF; - break; - case SPEED10_FULL: - tmp |= ADVERTISE_10FULL; - break; - case SPEED100_HALF: - tmp |= ADVERTISE_100HALF; - break; - case SPEED100_FULL: - tmp |= ADVERTISE_100FULL; - break; - } - - if(advert != tmp) - amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_ADVERTISE, tmp); - /* Restart auto negotiation */ - bmcr = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_BMCR); - bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); - amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_BMCR, bmcr); - -} - -/* -This function will unmap skb->data space and will free -all transmit and receive skbuffs. -*/ -static int amd8111e_free_skbs(struct net_device *dev) -{ - struct amd8111e_priv *lp = netdev_priv(dev); - struct sk_buff* rx_skbuff; - int i; - - /* Freeing transmit skbs */ - for(i = 0; i < NUM_TX_BUFFERS; i++){ - if(lp->tx_skbuff[i]){ - pci_unmap_single(lp->pci_dev,lp->tx_dma_addr[i], lp->tx_skbuff[i]->len,PCI_DMA_TODEVICE); - dev_kfree_skb (lp->tx_skbuff[i]); - lp->tx_skbuff[i] = NULL; - lp->tx_dma_addr[i] = 0; - } - } - /* Freeing previously allocated receive buffers */ - for (i = 0; i < NUM_RX_BUFFERS; i++){ - rx_skbuff = lp->rx_skbuff[i]; - if(rx_skbuff != NULL){ - pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[i], - lp->rx_buff_len - 2,PCI_DMA_FROMDEVICE); - dev_kfree_skb(lp->rx_skbuff[i]); - lp->rx_skbuff[i] = NULL; - lp->rx_dma_addr[i] = 0; - } - } - - return 0; -} - -/* -This will set the receive buffer length corresponding to the mtu size of networkinterface. -*/ -static inline void amd8111e_set_rx_buff_len(struct net_device* dev) -{ - struct amd8111e_priv* lp = netdev_priv(dev); - unsigned int mtu = dev->mtu; - - if (mtu > ETH_DATA_LEN){ - /* MTU + ethernet header + FCS - + optional VLAN tag + skb reserve space 2 */ - - lp->rx_buff_len = mtu + ETH_HLEN + 10; - lp->options |= OPTION_JUMBO_ENABLE; - } else{ - lp->rx_buff_len = PKT_BUFF_SZ; - lp->options &= ~OPTION_JUMBO_ENABLE; - } -} - -/* -This function will free all the previously allocated buffers, determine new receive buffer length and will allocate new receive buffers. This function also allocates and initializes both the transmitter and receive hardware descriptors. - */ -static int amd8111e_init_ring(struct net_device *dev) -{ - struct amd8111e_priv *lp = netdev_priv(dev); - int i; - - lp->rx_idx = lp->tx_idx = 0; - lp->tx_complete_idx = 0; - lp->tx_ring_idx = 0; - - - if(lp->opened) - /* Free previously allocated transmit and receive skbs */ - amd8111e_free_skbs(dev); - - else{ - /* allocate the tx and rx descriptors */ - if((lp->tx_ring = pci_alloc_consistent(lp->pci_dev, - sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR, - &lp->tx_ring_dma_addr)) == NULL) - - goto err_no_mem; - - if((lp->rx_ring = pci_alloc_consistent(lp->pci_dev, - sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR, - &lp->rx_ring_dma_addr)) == NULL) - - goto err_free_tx_ring; - - } - /* Set new receive buff size */ - amd8111e_set_rx_buff_len(dev); - - /* Allocating receive skbs */ - for (i = 0; i < NUM_RX_BUFFERS; i++) { - - if (!(lp->rx_skbuff[i] = dev_alloc_skb(lp->rx_buff_len))) { - /* Release previos allocated skbs */ - for(--i; i >= 0 ;i--) - dev_kfree_skb(lp->rx_skbuff[i]); - goto err_free_rx_ring; - } - skb_reserve(lp->rx_skbuff[i],2); - } - /* Initilaizing receive descriptors */ - for (i = 0; i < NUM_RX_BUFFERS; i++) { - lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, - lp->rx_skbuff[i]->data,lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); - - lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]); - lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len-2); - wmb(); - lp->rx_ring[i].rx_flags = cpu_to_le16(OWN_BIT); - } - - /* Initializing transmit descriptors */ - for (i = 0; i < NUM_TX_RING_DR; i++) { - lp->tx_ring[i].buff_phy_addr = 0; - lp->tx_ring[i].tx_flags = 0; - lp->tx_ring[i].buff_count = 0; - } - - return 0; - -err_free_rx_ring: - - pci_free_consistent(lp->pci_dev, - sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,lp->rx_ring, - lp->rx_ring_dma_addr); - -err_free_tx_ring: - - pci_free_consistent(lp->pci_dev, - sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,lp->tx_ring, - lp->tx_ring_dma_addr); - -err_no_mem: - return -ENOMEM; -} -/* This function will set the interrupt coalescing according to the input arguments */ -static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod) -{ - unsigned int timeout; - unsigned int event_count; - - struct amd8111e_priv *lp = netdev_priv(dev); - void __iomem *mmio = lp->mmio; - struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf; - - - switch(cmod) - { - case RX_INTR_COAL : - timeout = coal_conf->rx_timeout; - event_count = coal_conf->rx_event_count; - if( timeout > MAX_TIMEOUT || - event_count > MAX_EVENT_COUNT ) - return -EINVAL; - - timeout = timeout * DELAY_TIMER_CONV; - writel(VAL0|STINTEN, mmio+INTEN0); - writel((u32)DLY_INT_A_R0|( event_count<< 16 )|timeout, - mmio+DLY_INT_A); - break; - - case TX_INTR_COAL : - timeout = coal_conf->tx_timeout; - event_count = coal_conf->tx_event_count; - if( timeout > MAX_TIMEOUT || - event_count > MAX_EVENT_COUNT ) - return -EINVAL; - - - timeout = timeout * DELAY_TIMER_CONV; - writel(VAL0|STINTEN,mmio+INTEN0); - writel((u32)DLY_INT_B_T0|( event_count<< 16 )|timeout, - mmio+DLY_INT_B); - break; - - case DISABLE_COAL: - writel(0,mmio+STVAL); - writel(STINTEN, mmio+INTEN0); - writel(0, mmio +DLY_INT_B); - writel(0, mmio+DLY_INT_A); - break; - case ENABLE_COAL: - /* Start the timer */ - writel((u32)SOFT_TIMER_FREQ, mmio+STVAL); /* 0.5 sec */ - writel(VAL0|STINTEN, mmio+INTEN0); - break; - default: - break; - - } - return 0; - -} - -/* -This function initializes the device registers and starts the device. -*/ -static int amd8111e_restart(struct net_device *dev) -{ - struct amd8111e_priv *lp = netdev_priv(dev); - void __iomem *mmio = lp->mmio; - int i,reg_val; - - /* stop the chip */ - writel(RUN, mmio + CMD0); - - if(amd8111e_init_ring(dev)) - return -ENOMEM; - - /* enable the port manager and set auto negotiation always */ - writel((u32) VAL1|EN_PMGR, mmio + CMD3 ); - writel((u32)XPHYANE|XPHYRST , mmio + CTRL2); - - amd8111e_set_ext_phy(dev); - - /* set control registers */ - reg_val = readl(mmio + CTRL1); - reg_val &= ~XMTSP_MASK; - writel( reg_val| XMTSP_128 | CACHE_ALIGN, mmio + CTRL1 ); - - /* enable interrupt */ - writel( APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN | - APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN | - SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0); - - writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0); - - /* initialize tx and rx ring base addresses */ - writel((u32)lp->tx_ring_dma_addr,mmio + XMT_RING_BASE_ADDR0); - writel((u32)lp->rx_ring_dma_addr,mmio+ RCV_RING_BASE_ADDR0); - - writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0); - writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0); - - /* set default IPG to 96 */ - writew((u32)DEFAULT_IPG,mmio+IPG); - writew((u32)(DEFAULT_IPG-IFS1_DELTA), mmio + IFS1); - - if(lp->options & OPTION_JUMBO_ENABLE){ - writel((u32)VAL2|JUMBO, mmio + CMD3); - /* Reset REX_UFLO */ - writel( REX_UFLO, mmio + CMD2); - /* Should not set REX_UFLO for jumbo frames */ - writel( VAL0 | APAD_XMT|REX_RTRY , mmio + CMD2); - }else{ - writel( VAL0 | APAD_XMT | REX_RTRY|REX_UFLO, mmio + CMD2); - writel((u32)JUMBO, mmio + CMD3); - } - -#if AMD8111E_VLAN_TAG_USED - writel((u32) VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3); -#endif - writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 ); - - /* Setting the MAC address to the device */ - for(i = 0; i < ETH_ADDR_LEN; i++) - writeb( dev->dev_addr[i], mmio + PADR + i ); - - /* Enable interrupt coalesce */ - if(lp->options & OPTION_INTR_COAL_ENABLE){ - printk(KERN_INFO "%s: Interrupt Coalescing Enabled.\n", - dev->name); - amd8111e_set_coalesce(dev,ENABLE_COAL); - } - - /* set RUN bit to start the chip */ - writel(VAL2 | RDMD0, mmio + CMD0); - writel(VAL0 | INTREN | RUN, mmio + CMD0); - - /* To avoid PCI posting bug */ - readl(mmio+CMD0); - return 0; -} -/* -This function clears necessary the device registers. -*/ -static void amd8111e_init_hw_default( struct amd8111e_priv* lp) -{ - unsigned int reg_val; - unsigned int logic_filter[2] ={0,}; - void __iomem *mmio = lp->mmio; - - - /* stop the chip */ - writel(RUN, mmio + CMD0); - - /* AUTOPOLL0 Register *//*TBD default value is 8100 in FPS */ - writew( 0x8100 | lp->ext_phy_addr, mmio + AUTOPOLL0); - - /* Clear RCV_RING_BASE_ADDR */ - writel(0, mmio + RCV_RING_BASE_ADDR0); - - /* Clear XMT_RING_BASE_ADDR */ - writel(0, mmio + XMT_RING_BASE_ADDR0); - writel(0, mmio + XMT_RING_BASE_ADDR1); - writel(0, mmio + XMT_RING_BASE_ADDR2); - writel(0, mmio + XMT_RING_BASE_ADDR3); - - /* Clear CMD0 */ - writel(CMD0_CLEAR,mmio + CMD0); - - /* Clear CMD2 */ - writel(CMD2_CLEAR, mmio +CMD2); - - /* Clear CMD7 */ - writel(CMD7_CLEAR , mmio + CMD7); - - /* Clear DLY_INT_A and DLY_INT_B */ - writel(0x0, mmio + DLY_INT_A); - writel(0x0, mmio + DLY_INT_B); - - /* Clear FLOW_CONTROL */ - writel(0x0, mmio + FLOW_CONTROL); - - /* Clear INT0 write 1 to clear register */ - reg_val = readl(mmio + INT0); - writel(reg_val, mmio + INT0); - - /* Clear STVAL */ - writel(0x0, mmio + STVAL); - - /* Clear INTEN0 */ - writel( INTEN0_CLEAR, mmio + INTEN0); - - /* Clear LADRF */ - writel(0x0 , mmio + LADRF); - - /* Set SRAM_SIZE & SRAM_BOUNDARY registers */ - writel( 0x80010,mmio + SRAM_SIZE); - - /* Clear RCV_RING0_LEN */ - writel(0x0, mmio + RCV_RING_LEN0); - - /* Clear XMT_RING0/1/2/3_LEN */ - writel(0x0, mmio + XMT_RING_LEN0); - writel(0x0, mmio + XMT_RING_LEN1); - writel(0x0, mmio + XMT_RING_LEN2); - writel(0x0, mmio + XMT_RING_LEN3); - - /* Clear XMT_RING_LIMIT */ - writel(0x0, mmio + XMT_RING_LIMIT); - - /* Clear MIB */ - writew(MIB_CLEAR, mmio + MIB_ADDR); - - /* Clear LARF */ - amd8111e_writeq(*(u64*)logic_filter,mmio+LADRF); - - /* SRAM_SIZE register */ - reg_val = readl(mmio + SRAM_SIZE); - - if(lp->options & OPTION_JUMBO_ENABLE) - writel( VAL2|JUMBO, mmio + CMD3); -#if AMD8111E_VLAN_TAG_USED - writel(VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3 ); -#endif - /* Set default value to CTRL1 Register */ - writel(CTRL1_DEFAULT, mmio + CTRL1); - - /* To avoid PCI posting bug */ - readl(mmio + CMD2); - -} - -/* -This function disables the interrupt and clears all the pending -interrupts in INT0 - */ -static void amd8111e_disable_interrupt(struct amd8111e_priv* lp) -{ - u32 intr0; - - /* Disable interrupt */ - writel(INTREN, lp->mmio + CMD0); - - /* Clear INT0 */ - intr0 = readl(lp->mmio + INT0); - writel(intr0, lp->mmio + INT0); - - /* To avoid PCI posting bug */ - readl(lp->mmio + INT0); - -} - -/* -This function stops the chip. -*/ -static void amd8111e_stop_chip(struct amd8111e_priv* lp) -{ - writel(RUN, lp->mmio + CMD0); - - /* To avoid PCI posting bug */ - readl(lp->mmio + CMD0); -} - -/* -This function frees the transmiter and receiver descriptor rings. -*/ -static void amd8111e_free_ring(struct amd8111e_priv* lp) -{ - /* Free transmit and receive descriptor rings */ - if(lp->rx_ring){ - pci_free_consistent(lp->pci_dev, - sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR, - lp->rx_ring, lp->rx_ring_dma_addr); - lp->rx_ring = NULL; - } - - if(lp->tx_ring){ - pci_free_consistent(lp->pci_dev, - sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR, - lp->tx_ring, lp->tx_ring_dma_addr); - - lp->tx_ring = NULL; - } - -} - -/* -This function will free all the transmit skbs that are actually transmitted by the device. It will check the ownership of the skb before freeing the skb. -*/ -static int amd8111e_tx(struct net_device *dev) -{ - struct amd8111e_priv* lp = netdev_priv(dev); - int tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK; - int status; - /* Complete all the transmit packet */ - while (lp->tx_complete_idx != lp->tx_idx){ - tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK; - status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags); - - if(status & OWN_BIT) - break; /* It still hasn't been Txed */ - - lp->tx_ring[tx_index].buff_phy_addr = 0; - - /* We must free the original skb */ - if (lp->tx_skbuff[tx_index]) { - pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index], - lp->tx_skbuff[tx_index]->len, - PCI_DMA_TODEVICE); - dev_kfree_skb_irq (lp->tx_skbuff[tx_index]); - lp->tx_skbuff[tx_index] = NULL; - lp->tx_dma_addr[tx_index] = 0; - } - lp->tx_complete_idx++; - /*COAL update tx coalescing parameters */ - lp->coal_conf.tx_packets++; - lp->coal_conf.tx_bytes += - le16_to_cpu(lp->tx_ring[tx_index].buff_count); - - if (netif_queue_stopped(dev) && - lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){ - /* The ring is no longer full, clear tbusy. */ - /* lp->tx_full = 0; */ - netif_wake_queue (dev); - } - } - return 0; -} - -/* This function handles the driver receive operation in polling mode */ -static int amd8111e_rx_poll(struct napi_struct *napi, int budget) -{ - struct amd8111e_priv *lp = container_of(napi, struct amd8111e_priv, napi); - struct net_device *dev = lp->amd8111e_net_dev; - int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK; - void __iomem *mmio = lp->mmio; - struct sk_buff *skb,*new_skb; - int min_pkt_len, status; - unsigned int intr0; - int num_rx_pkt = 0; - short pkt_len; -#if AMD8111E_VLAN_TAG_USED - short vtag; -#endif - int rx_pkt_limit = budget; - unsigned long flags; - - do{ - /* process receive packets until we use the quota*/ - /* If we own the next entry, it's a new packet. Send it up. */ - while(1) { - status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags); - if (status & OWN_BIT) - break; - - /* - * There is a tricky error noted by John Murphy, - * to Russ Nelson: Even with - * full-sized * buffers it's possible for a - * jabber packet to use two buffers, with only - * the last correctly noting the error. - */ - - if(status & ERR_BIT) { - /* reseting flags */ - lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; - goto err_next_pkt; - } - /* check for STP and ENP */ - if(!((status & STP_BIT) && (status & ENP_BIT))){ - /* reseting flags */ - lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; - goto err_next_pkt; - } - pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4; - -#if AMD8111E_VLAN_TAG_USED - vtag = status & TT_MASK; - /*MAC will strip vlan tag*/ - if (vtag != 0) - min_pkt_len =MIN_PKT_LEN - 4; - else -#endif - min_pkt_len =MIN_PKT_LEN; - - if (pkt_len < min_pkt_len) { - lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; - lp->drv_rx_errors++; - goto err_next_pkt; - } - if(--rx_pkt_limit < 0) - goto rx_not_empty; - if(!(new_skb = dev_alloc_skb(lp->rx_buff_len))){ - /* if allocation fail, - ignore that pkt and go to next one */ - lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; - lp->drv_rx_errors++; - goto err_next_pkt; - } - - skb_reserve(new_skb, 2); - skb = lp->rx_skbuff[rx_index]; - pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index], - lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); - skb_put(skb, pkt_len); - lp->rx_skbuff[rx_index] = new_skb; - lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev, - new_skb->data, - lp->rx_buff_len-2, - PCI_DMA_FROMDEVICE); - - skb->protocol = eth_type_trans(skb, dev); - -#if AMD8111E_VLAN_TAG_USED - if (vtag == TT_VLAN_TAGGED){ - u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info); - __vlan_hwaccel_put_tag(skb, vlan_tag); - } -#endif - netif_receive_skb(skb); - /*COAL update rx coalescing parameters*/ - lp->coal_conf.rx_packets++; - lp->coal_conf.rx_bytes += pkt_len; - num_rx_pkt++; - - err_next_pkt: - lp->rx_ring[rx_index].buff_phy_addr - = cpu_to_le32(lp->rx_dma_addr[rx_index]); - lp->rx_ring[rx_index].buff_count = - cpu_to_le16(lp->rx_buff_len-2); - wmb(); - lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT); - rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK; - } - /* Check the interrupt status register for more packets in the - mean time. Process them since we have not used up our quota.*/ - - intr0 = readl(mmio + INT0); - /*Ack receive packets */ - writel(intr0 & RINT0,mmio + INT0); - - } while(intr0 & RINT0); - - if (rx_pkt_limit > 0) { - /* Receive descriptor is empty now */ - spin_lock_irqsave(&lp->lock, flags); - __napi_complete(napi); - writel(VAL0|RINTEN0, mmio + INTEN0); - writel(VAL2 | RDMD0, mmio + CMD0); - spin_unlock_irqrestore(&lp->lock, flags); - } - -rx_not_empty: - return num_rx_pkt; -} - -/* -This function will indicate the link status to the kernel. -*/ -static int amd8111e_link_change(struct net_device* dev) -{ - struct amd8111e_priv *lp = netdev_priv(dev); - int status0,speed; - - /* read the link change */ - status0 = readl(lp->mmio + STAT0); - - if(status0 & LINK_STATS){ - if(status0 & AUTONEG_COMPLETE) - lp->link_config.autoneg = AUTONEG_ENABLE; - else - lp->link_config.autoneg = AUTONEG_DISABLE; - - if(status0 & FULL_DPLX) - lp->link_config.duplex = DUPLEX_FULL; - else - lp->link_config.duplex = DUPLEX_HALF; - speed = (status0 & SPEED_MASK) >> 7; - if(speed == PHY_SPEED_10) - lp->link_config.speed = SPEED_10; - else if(speed == PHY_SPEED_100) - lp->link_config.speed = SPEED_100; - - printk(KERN_INFO "%s: Link is Up. Speed is %s Mbps %s Duplex\n", dev->name, - (lp->link_config.speed == SPEED_100) ? "100": "10", - (lp->link_config.duplex == DUPLEX_FULL)? "Full": "Half"); - netif_carrier_on(dev); - } - else{ - lp->link_config.speed = SPEED_INVALID; - lp->link_config.duplex = DUPLEX_INVALID; - lp->link_config.autoneg = AUTONEG_INVALID; - printk(KERN_INFO "%s: Link is Down.\n",dev->name); - netif_carrier_off(dev); - } - - return 0; -} -/* -This function reads the mib counters. -*/ -static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER) -{ - unsigned int status; - unsigned int data; - unsigned int repeat = REPEAT_CNT; - - writew( MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR); - do { - status = readw(mmio + MIB_ADDR); - udelay(2); /* controller takes MAX 2 us to get mib data */ - } - while (--repeat && (status & MIB_CMD_ACTIVE)); - - data = readl(mmio + MIB_DATA); - return data; -} - -/* - * This function reads the mib registers and returns the hardware statistics. - * It updates previous internal driver statistics with new values. - */ -static struct net_device_stats *amd8111e_get_stats(struct net_device *dev) -{ - struct amd8111e_priv *lp = netdev_priv(dev); - void __iomem *mmio = lp->mmio; - unsigned long flags; - struct net_device_stats *new_stats = &dev->stats; - - if (!lp->opened) - return new_stats; - spin_lock_irqsave (&lp->lock, flags); - - /* stats.rx_packets */ - new_stats->rx_packets = amd8111e_read_mib(mmio, rcv_broadcast_pkts)+ - amd8111e_read_mib(mmio, rcv_multicast_pkts)+ - amd8111e_read_mib(mmio, rcv_unicast_pkts); - - /* stats.tx_packets */ - new_stats->tx_packets = amd8111e_read_mib(mmio, xmt_packets); - - /*stats.rx_bytes */ - new_stats->rx_bytes = amd8111e_read_mib(mmio, rcv_octets); - - /* stats.tx_bytes */ - new_stats->tx_bytes = amd8111e_read_mib(mmio, xmt_octets); - - /* stats.rx_errors */ - /* hw errors + errors driver reported */ - new_stats->rx_errors = amd8111e_read_mib(mmio, rcv_undersize_pkts)+ - amd8111e_read_mib(mmio, rcv_fragments)+ - amd8111e_read_mib(mmio, rcv_jabbers)+ - amd8111e_read_mib(mmio, rcv_alignment_errors)+ - amd8111e_read_mib(mmio, rcv_fcs_errors)+ - amd8111e_read_mib(mmio, rcv_miss_pkts)+ - lp->drv_rx_errors; - - /* stats.tx_errors */ - new_stats->tx_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts); - - /* stats.rx_dropped*/ - new_stats->rx_dropped = amd8111e_read_mib(mmio, rcv_miss_pkts); - - /* stats.tx_dropped*/ - new_stats->tx_dropped = amd8111e_read_mib(mmio, xmt_underrun_pkts); - - /* stats.multicast*/ - new_stats->multicast = amd8111e_read_mib(mmio, rcv_multicast_pkts); - - /* stats.collisions*/ - new_stats->collisions = amd8111e_read_mib(mmio, xmt_collisions); - - /* stats.rx_length_errors*/ - new_stats->rx_length_errors = - amd8111e_read_mib(mmio, rcv_undersize_pkts)+ - amd8111e_read_mib(mmio, rcv_oversize_pkts); - - /* stats.rx_over_errors*/ - new_stats->rx_over_errors = amd8111e_read_mib(mmio, rcv_miss_pkts); - - /* stats.rx_crc_errors*/ - new_stats->rx_crc_errors = amd8111e_read_mib(mmio, rcv_fcs_errors); - - /* stats.rx_frame_errors*/ - new_stats->rx_frame_errors = - amd8111e_read_mib(mmio, rcv_alignment_errors); - - /* stats.rx_fifo_errors */ - new_stats->rx_fifo_errors = amd8111e_read_mib(mmio, rcv_miss_pkts); - - /* stats.rx_missed_errors */ - new_stats->rx_missed_errors = amd8111e_read_mib(mmio, rcv_miss_pkts); - - /* stats.tx_aborted_errors*/ - new_stats->tx_aborted_errors = - amd8111e_read_mib(mmio, xmt_excessive_collision); - - /* stats.tx_carrier_errors*/ - new_stats->tx_carrier_errors = - amd8111e_read_mib(mmio, xmt_loss_carrier); - - /* stats.tx_fifo_errors*/ - new_stats->tx_fifo_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts); - - /* stats.tx_window_errors*/ - new_stats->tx_window_errors = - amd8111e_read_mib(mmio, xmt_late_collision); - - /* Reset the mibs for collecting new statistics */ - /* writew(MIB_CLEAR, mmio + MIB_ADDR);*/ - - spin_unlock_irqrestore (&lp->lock, flags); - - return new_stats; -} -/* This function recalculate the interrupt coalescing mode on every interrupt -according to the datarate and the packet rate. -*/ -static int amd8111e_calc_coalesce(struct net_device *dev) -{ - struct amd8111e_priv *lp = netdev_priv(dev); - struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf; - int tx_pkt_rate; - int rx_pkt_rate; - int tx_data_rate; - int rx_data_rate; - int rx_pkt_size; - int tx_pkt_size; - - tx_pkt_rate = coal_conf->tx_packets - coal_conf->tx_prev_packets; - coal_conf->tx_prev_packets = coal_conf->tx_packets; - - tx_data_rate = coal_conf->tx_bytes - coal_conf->tx_prev_bytes; - coal_conf->tx_prev_bytes = coal_conf->tx_bytes; - - rx_pkt_rate = coal_conf->rx_packets - coal_conf->rx_prev_packets; - coal_conf->rx_prev_packets = coal_conf->rx_packets; - - rx_data_rate = coal_conf->rx_bytes - coal_conf->rx_prev_bytes; - coal_conf->rx_prev_bytes = coal_conf->rx_bytes; - - if(rx_pkt_rate < 800){ - if(coal_conf->rx_coal_type != NO_COALESCE){ - - coal_conf->rx_timeout = 0x0; - coal_conf->rx_event_count = 0; - amd8111e_set_coalesce(dev,RX_INTR_COAL); - coal_conf->rx_coal_type = NO_COALESCE; - } - } - else{ - - rx_pkt_size = rx_data_rate/rx_pkt_rate; - if (rx_pkt_size < 128){ - if(coal_conf->rx_coal_type != NO_COALESCE){ - - coal_conf->rx_timeout = 0; - coal_conf->rx_event_count = 0; - amd8111e_set_coalesce(dev,RX_INTR_COAL); - coal_conf->rx_coal_type = NO_COALESCE; - } - - } - else if ( (rx_pkt_size >= 128) && (rx_pkt_size < 512) ){ - - if(coal_conf->rx_coal_type != LOW_COALESCE){ - coal_conf->rx_timeout = 1; - coal_conf->rx_event_count = 4; - amd8111e_set_coalesce(dev,RX_INTR_COAL); - coal_conf->rx_coal_type = LOW_COALESCE; - } - } - else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)){ - - if(coal_conf->rx_coal_type != MEDIUM_COALESCE){ - coal_conf->rx_timeout = 1; - coal_conf->rx_event_count = 4; - amd8111e_set_coalesce(dev,RX_INTR_COAL); - coal_conf->rx_coal_type = MEDIUM_COALESCE; - } - - } - else if(rx_pkt_size >= 1024){ - if(coal_conf->rx_coal_type != HIGH_COALESCE){ - coal_conf->rx_timeout = 2; - coal_conf->rx_event_count = 3; - amd8111e_set_coalesce(dev,RX_INTR_COAL); - coal_conf->rx_coal_type = HIGH_COALESCE; - } - } - } - /* NOW FOR TX INTR COALESC */ - if(tx_pkt_rate < 800){ - if(coal_conf->tx_coal_type != NO_COALESCE){ - - coal_conf->tx_timeout = 0x0; - coal_conf->tx_event_count = 0; - amd8111e_set_coalesce(dev,TX_INTR_COAL); - coal_conf->tx_coal_type = NO_COALESCE; - } - } - else{ - - tx_pkt_size = tx_data_rate/tx_pkt_rate; - if (tx_pkt_size < 128){ - - if(coal_conf->tx_coal_type != NO_COALESCE){ - - coal_conf->tx_timeout = 0; - coal_conf->tx_event_count = 0; - amd8111e_set_coalesce(dev,TX_INTR_COAL); - coal_conf->tx_coal_type = NO_COALESCE; - } - - } - else if ( (tx_pkt_size >= 128) && (tx_pkt_size < 512) ){ - - if(coal_conf->tx_coal_type != LOW_COALESCE){ - coal_conf->tx_timeout = 1; - coal_conf->tx_event_count = 2; - amd8111e_set_coalesce(dev,TX_INTR_COAL); - coal_conf->tx_coal_type = LOW_COALESCE; - - } - } - else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)){ - - if(coal_conf->tx_coal_type != MEDIUM_COALESCE){ - coal_conf->tx_timeout = 2; - coal_conf->tx_event_count = 5; - amd8111e_set_coalesce(dev,TX_INTR_COAL); - coal_conf->tx_coal_type = MEDIUM_COALESCE; - } - - } - else if(tx_pkt_size >= 1024){ - if (tx_pkt_size >= 1024){ - if(coal_conf->tx_coal_type != HIGH_COALESCE){ - coal_conf->tx_timeout = 4; - coal_conf->tx_event_count = 8; - amd8111e_set_coalesce(dev,TX_INTR_COAL); - coal_conf->tx_coal_type = HIGH_COALESCE; - } - } - } - } - return 0; - -} -/* -This is device interrupt function. It handles transmit, receive,link change and hardware timer interrupts. -*/ -static irqreturn_t amd8111e_interrupt(int irq, void *dev_id) -{ - - struct net_device * dev = (struct net_device *) dev_id; - struct amd8111e_priv *lp = netdev_priv(dev); - void __iomem *mmio = lp->mmio; - unsigned int intr0, intren0; - unsigned int handled = 1; - - if(unlikely(dev == NULL)) - return IRQ_NONE; - - spin_lock(&lp->lock); - - /* disabling interrupt */ - writel(INTREN, mmio + CMD0); - - /* Read interrupt status */ - intr0 = readl(mmio + INT0); - intren0 = readl(mmio + INTEN0); - - /* Process all the INT event until INTR bit is clear. */ - - if (!(intr0 & INTR)){ - handled = 0; - goto err_no_interrupt; - } - - /* Current driver processes 4 interrupts : RINT,TINT,LCINT,STINT */ - writel(intr0, mmio + INT0); - - /* Check if Receive Interrupt has occurred. */ - if (intr0 & RINT0) { - if (napi_schedule_prep(&lp->napi)) { - /* Disable receive interupts */ - writel(RINTEN0, mmio + INTEN0); - /* Schedule a polling routine */ - __napi_schedule(&lp->napi); - } else if (intren0 & RINTEN0) { - printk("************Driver bug! interrupt while in poll\n"); - /* Fix by disable receive interrupts */ - writel(RINTEN0, mmio + INTEN0); - } - } - - /* Check if Transmit Interrupt has occurred. */ - if (intr0 & TINT0) - amd8111e_tx(dev); - - /* Check if Link Change Interrupt has occurred. */ - if (intr0 & LCINT) - amd8111e_link_change(dev); - - /* Check if Hardware Timer Interrupt has occurred. */ - if (intr0 & STINT) - amd8111e_calc_coalesce(dev); - -err_no_interrupt: - writel( VAL0 | INTREN,mmio + CMD0); - - spin_unlock(&lp->lock); - - return IRQ_RETVAL(handled); -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void amd8111e_poll(struct net_device *dev) -{ - unsigned long flags; - local_irq_save(flags); - amd8111e_interrupt(0, dev); - local_irq_restore(flags); -} -#endif - - -/* -This function closes the network interface and updates the statistics so that most recent statistics will be available after the interface is down. -*/ -static int amd8111e_close(struct net_device * dev) -{ - struct amd8111e_priv *lp = netdev_priv(dev); - netif_stop_queue(dev); - - napi_disable(&lp->napi); - - spin_lock_irq(&lp->lock); - - amd8111e_disable_interrupt(lp); - amd8111e_stop_chip(lp); - - /* Free transmit and receive skbs */ - amd8111e_free_skbs(lp->amd8111e_net_dev); - - netif_carrier_off(lp->amd8111e_net_dev); - - /* Delete ipg timer */ - if(lp->options & OPTION_DYN_IPG_ENABLE) - del_timer_sync(&lp->ipg_data.ipg_timer); - - spin_unlock_irq(&lp->lock); - free_irq(dev->irq, dev); - amd8111e_free_ring(lp); - - /* Update the statistics before closing */ - amd8111e_get_stats(dev); - lp->opened = 0; - return 0; -} -/* This function opens new interface.It requests irq for the device, initializes the device,buffers and descriptors, and starts the device. -*/ -static int amd8111e_open(struct net_device * dev ) -{ - struct amd8111e_priv *lp = netdev_priv(dev); - - if(dev->irq ==0 || request_irq(dev->irq, amd8111e_interrupt, IRQF_SHARED, - dev->name, dev)) - return -EAGAIN; - - napi_enable(&lp->napi); - - spin_lock_irq(&lp->lock); - - amd8111e_init_hw_default(lp); - - if(amd8111e_restart(dev)){ - spin_unlock_irq(&lp->lock); - napi_disable(&lp->napi); - if (dev->irq) - free_irq(dev->irq, dev); - return -ENOMEM; - } - /* Start ipg timer */ - if(lp->options & OPTION_DYN_IPG_ENABLE){ - add_timer(&lp->ipg_data.ipg_timer); - printk(KERN_INFO "%s: Dynamic IPG Enabled.\n",dev->name); - } - - lp->opened = 1; - - spin_unlock_irq(&lp->lock); - - netif_start_queue(dev); - - return 0; -} -/* -This function checks if there is any transmit descriptors available to queue more packet. -*/ -static int amd8111e_tx_queue_avail(struct amd8111e_priv* lp ) -{ - int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK; - if (lp->tx_skbuff[tx_index]) - return -1; - else - return 0; - -} -/* -This function will queue the transmit packets to the descriptors and will trigger the send operation. It also initializes the transmit descriptors with buffer physical address, byte count, ownership to hardware etc. -*/ - -static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb, - struct net_device * dev) -{ - struct amd8111e_priv *lp = netdev_priv(dev); - int tx_index; - unsigned long flags; - - spin_lock_irqsave(&lp->lock, flags); - - tx_index = lp->tx_idx & TX_RING_DR_MOD_MASK; - - lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len); - - lp->tx_skbuff[tx_index] = skb; - lp->tx_ring[tx_index].tx_flags = 0; - -#if AMD8111E_VLAN_TAG_USED - if (vlan_tx_tag_present(skb)) { - lp->tx_ring[tx_index].tag_ctrl_cmd |= - cpu_to_le16(TCC_VLAN_INSERT); - lp->tx_ring[tx_index].tag_ctrl_info = - cpu_to_le16(vlan_tx_tag_get(skb)); - - } -#endif - lp->tx_dma_addr[tx_index] = - pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); - lp->tx_ring[tx_index].buff_phy_addr = - cpu_to_le32(lp->tx_dma_addr[tx_index]); - - /* Set FCS and LTINT bits */ - wmb(); - lp->tx_ring[tx_index].tx_flags |= - cpu_to_le16(OWN_BIT | STP_BIT | ENP_BIT|ADD_FCS_BIT|LTINT_BIT); - - lp->tx_idx++; - - /* Trigger an immediate send poll. */ - writel( VAL1 | TDMD0, lp->mmio + CMD0); - writel( VAL2 | RDMD0,lp->mmio + CMD0); - - if(amd8111e_tx_queue_avail(lp) < 0){ - netif_stop_queue(dev); - } - spin_unlock_irqrestore(&lp->lock, flags); - return NETDEV_TX_OK; -} -/* -This function returns all the memory mapped registers of the device. -*/ -static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf) -{ - void __iomem *mmio = lp->mmio; - /* Read only necessary registers */ - buf[0] = readl(mmio + XMT_RING_BASE_ADDR0); - buf[1] = readl(mmio + XMT_RING_LEN0); - buf[2] = readl(mmio + RCV_RING_BASE_ADDR0); - buf[3] = readl(mmio + RCV_RING_LEN0); - buf[4] = readl(mmio + CMD0); - buf[5] = readl(mmio + CMD2); - buf[6] = readl(mmio + CMD3); - buf[7] = readl(mmio + CMD7); - buf[8] = readl(mmio + INT0); - buf[9] = readl(mmio + INTEN0); - buf[10] = readl(mmio + LADRF); - buf[11] = readl(mmio + LADRF+4); - buf[12] = readl(mmio + STAT0); -} - - -/* -This function sets promiscuos mode, all-multi mode or the multicast address -list to the device. -*/ -static void amd8111e_set_multicast_list(struct net_device *dev) -{ - struct netdev_hw_addr *ha; - struct amd8111e_priv *lp = netdev_priv(dev); - u32 mc_filter[2] ; - int bit_num; - - if(dev->flags & IFF_PROMISC){ - writel( VAL2 | PROM, lp->mmio + CMD2); - return; - } - else - writel( PROM, lp->mmio + CMD2); - if (dev->flags & IFF_ALLMULTI || - netdev_mc_count(dev) > MAX_FILTER_SIZE) { - /* get all multicast packet */ - mc_filter[1] = mc_filter[0] = 0xffffffff; - lp->options |= OPTION_MULTICAST_ENABLE; - amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF); - return; - } - if (netdev_mc_empty(dev)) { - /* get only own packets */ - mc_filter[1] = mc_filter[0] = 0; - lp->options &= ~OPTION_MULTICAST_ENABLE; - amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF); - /* disable promiscuous mode */ - writel(PROM, lp->mmio + CMD2); - return; - } - /* load all the multicast addresses in the logic filter */ - lp->options |= OPTION_MULTICAST_ENABLE; - mc_filter[1] = mc_filter[0] = 0; - netdev_for_each_mc_addr(ha, dev) { - bit_num = (ether_crc_le(ETH_ALEN, ha->addr) >> 26) & 0x3f; - mc_filter[bit_num >> 5] |= 1 << (bit_num & 31); - } - amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF); - - /* To eliminate PCI posting bug */ - readl(lp->mmio + CMD2); - -} - -static void amd8111e_get_drvinfo(struct net_device* dev, struct ethtool_drvinfo *info) -{ - struct amd8111e_priv *lp = netdev_priv(dev); - struct pci_dev *pci_dev = lp->pci_dev; - strcpy (info->driver, MODULE_NAME); - strcpy (info->version, MODULE_VERS); - sprintf(info->fw_version,"%u",chip_version); - strcpy (info->bus_info, pci_name(pci_dev)); -} - -static int amd8111e_get_regs_len(struct net_device *dev) -{ - return AMD8111E_REG_DUMP_LEN; -} - -static void amd8111e_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) -{ - struct amd8111e_priv *lp = netdev_priv(dev); - regs->version = 0; - amd8111e_read_regs(lp, buf); -} - -static int amd8111e_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) -{ - struct amd8111e_priv *lp = netdev_priv(dev); - spin_lock_irq(&lp->lock); - mii_ethtool_gset(&lp->mii_if, ecmd); - spin_unlock_irq(&lp->lock); - return 0; -} - -static int amd8111e_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) -{ - struct amd8111e_priv *lp = netdev_priv(dev); - int res; - spin_lock_irq(&lp->lock); - res = mii_ethtool_sset(&lp->mii_if, ecmd); - spin_unlock_irq(&lp->lock); - return res; -} - -static int amd8111e_nway_reset(struct net_device *dev) -{ - struct amd8111e_priv *lp = netdev_priv(dev); - return mii_nway_restart(&lp->mii_if); -} - -static u32 amd8111e_get_link(struct net_device *dev) -{ - struct amd8111e_priv *lp = netdev_priv(dev); - return mii_link_ok(&lp->mii_if); -} - -static void amd8111e_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info) -{ - struct amd8111e_priv *lp = netdev_priv(dev); - wol_info->supported = WAKE_MAGIC|WAKE_PHY; - if (lp->options & OPTION_WOL_ENABLE) - wol_info->wolopts = WAKE_MAGIC; -} - -static int amd8111e_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info) -{ - struct amd8111e_priv *lp = netdev_priv(dev); - if (wol_info->wolopts & ~(WAKE_MAGIC|WAKE_PHY)) - return -EINVAL; - spin_lock_irq(&lp->lock); - if (wol_info->wolopts & WAKE_MAGIC) - lp->options |= - (OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE); - else if(wol_info->wolopts & WAKE_PHY) - lp->options |= - (OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE); - else - lp->options &= ~OPTION_WOL_ENABLE; - spin_unlock_irq(&lp->lock); - return 0; -} - -static const struct ethtool_ops ops = { - .get_drvinfo = amd8111e_get_drvinfo, - .get_regs_len = amd8111e_get_regs_len, - .get_regs = amd8111e_get_regs, - .get_settings = amd8111e_get_settings, - .set_settings = amd8111e_set_settings, - .nway_reset = amd8111e_nway_reset, - .get_link = amd8111e_get_link, - .get_wol = amd8111e_get_wol, - .set_wol = amd8111e_set_wol, -}; - -/* -This function handles all the ethtool ioctls. It gives driver info, gets/sets driver speed, gets memory mapped register values, forces auto negotiation, sets/gets WOL options for ethtool application. -*/ - -static int amd8111e_ioctl(struct net_device * dev , struct ifreq *ifr, int cmd) -{ - struct mii_ioctl_data *data = if_mii(ifr); - struct amd8111e_priv *lp = netdev_priv(dev); - int err; - u32 mii_regval; - - switch(cmd) { - case SIOCGMIIPHY: - data->phy_id = lp->ext_phy_addr; - - /* fallthru */ - case SIOCGMIIREG: - - spin_lock_irq(&lp->lock); - err = amd8111e_read_phy(lp, data->phy_id, - data->reg_num & PHY_REG_ADDR_MASK, &mii_regval); - spin_unlock_irq(&lp->lock); - - data->val_out = mii_regval; - return err; - - case SIOCSMIIREG: - - spin_lock_irq(&lp->lock); - err = amd8111e_write_phy(lp, data->phy_id, - data->reg_num & PHY_REG_ADDR_MASK, data->val_in); - spin_unlock_irq(&lp->lock); - - return err; - - default: - /* do nothing */ - break; - } - return -EOPNOTSUPP; -} -static int amd8111e_set_mac_address(struct net_device *dev, void *p) -{ - struct amd8111e_priv *lp = netdev_priv(dev); - int i; - struct sockaddr *addr = p; - - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - spin_lock_irq(&lp->lock); - /* Setting the MAC address to the device */ - for(i = 0; i < ETH_ADDR_LEN; i++) - writeb( dev->dev_addr[i], lp->mmio + PADR + i ); - - spin_unlock_irq(&lp->lock); - - return 0; -} - -/* -This function changes the mtu of the device. It restarts the device to initialize the descriptor with new receive buffers. -*/ -static int amd8111e_change_mtu(struct net_device *dev, int new_mtu) -{ - struct amd8111e_priv *lp = netdev_priv(dev); - int err; - - if ((new_mtu < AMD8111E_MIN_MTU) || (new_mtu > AMD8111E_MAX_MTU)) - return -EINVAL; - - if (!netif_running(dev)) { - /* new_mtu will be used - when device starts netxt time */ - dev->mtu = new_mtu; - return 0; - } - - spin_lock_irq(&lp->lock); - - /* stop the chip */ - writel(RUN, lp->mmio + CMD0); - - dev->mtu = new_mtu; - - err = amd8111e_restart(dev); - spin_unlock_irq(&lp->lock); - if(!err) - netif_start_queue(dev); - return err; -} - -static int amd8111e_enable_magicpkt(struct amd8111e_priv* lp) -{ - writel( VAL1|MPPLBA, lp->mmio + CMD3); - writel( VAL0|MPEN_SW, lp->mmio + CMD7); - - /* To eliminate PCI posting bug */ - readl(lp->mmio + CMD7); - return 0; -} - -static int amd8111e_enable_link_change(struct amd8111e_priv* lp) -{ - - /* Adapter is already stoped/suspended/interrupt-disabled */ - writel(VAL0|LCMODE_SW,lp->mmio + CMD7); - - /* To eliminate PCI posting bug */ - readl(lp->mmio + CMD7); - return 0; -} - -/* - * This function is called when a packet transmission fails to complete - * within a reasonable period, on the assumption that an interrupt have - * failed or the interface is locked up. This function will reinitialize - * the hardware. - */ -static void amd8111e_tx_timeout(struct net_device *dev) -{ - struct amd8111e_priv* lp = netdev_priv(dev); - int err; - - printk(KERN_ERR "%s: transmit timed out, resetting\n", - dev->name); - spin_lock_irq(&lp->lock); - err = amd8111e_restart(dev); - spin_unlock_irq(&lp->lock); - if(!err) - netif_wake_queue(dev); -} -static int amd8111e_suspend(struct pci_dev *pci_dev, pm_message_t state) -{ - struct net_device *dev = pci_get_drvdata(pci_dev); - struct amd8111e_priv *lp = netdev_priv(dev); - - if (!netif_running(dev)) - return 0; - - /* disable the interrupt */ - spin_lock_irq(&lp->lock); - amd8111e_disable_interrupt(lp); - spin_unlock_irq(&lp->lock); - - netif_device_detach(dev); - - /* stop chip */ - spin_lock_irq(&lp->lock); - if(lp->options & OPTION_DYN_IPG_ENABLE) - del_timer_sync(&lp->ipg_data.ipg_timer); - amd8111e_stop_chip(lp); - spin_unlock_irq(&lp->lock); - - if(lp->options & OPTION_WOL_ENABLE){ - /* enable wol */ - if(lp->options & OPTION_WAKE_MAGIC_ENABLE) - amd8111e_enable_magicpkt(lp); - if(lp->options & OPTION_WAKE_PHY_ENABLE) - amd8111e_enable_link_change(lp); - - pci_enable_wake(pci_dev, PCI_D3hot, 1); - pci_enable_wake(pci_dev, PCI_D3cold, 1); - - } - else{ - pci_enable_wake(pci_dev, PCI_D3hot, 0); - pci_enable_wake(pci_dev, PCI_D3cold, 0); - } - - pci_save_state(pci_dev); - pci_set_power_state(pci_dev, PCI_D3hot); - - return 0; -} -static int amd8111e_resume(struct pci_dev *pci_dev) -{ - struct net_device *dev = pci_get_drvdata(pci_dev); - struct amd8111e_priv *lp = netdev_priv(dev); - - if (!netif_running(dev)) - return 0; - - pci_set_power_state(pci_dev, PCI_D0); - pci_restore_state(pci_dev); - - pci_enable_wake(pci_dev, PCI_D3hot, 0); - pci_enable_wake(pci_dev, PCI_D3cold, 0); /* D3 cold */ - - netif_device_attach(dev); - - spin_lock_irq(&lp->lock); - amd8111e_restart(dev); - /* Restart ipg timer */ - if(lp->options & OPTION_DYN_IPG_ENABLE) - mod_timer(&lp->ipg_data.ipg_timer, - jiffies + IPG_CONVERGE_JIFFIES); - spin_unlock_irq(&lp->lock); - - return 0; -} - - -static void __devexit amd8111e_remove_one(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - if (dev) { - unregister_netdev(dev); - iounmap(((struct amd8111e_priv *)netdev_priv(dev))->mmio); - free_netdev(dev); - pci_release_regions(pdev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - } -} -static void amd8111e_config_ipg(struct net_device* dev) -{ - struct amd8111e_priv *lp = netdev_priv(dev); - struct ipg_info* ipg_data = &lp->ipg_data; - void __iomem *mmio = lp->mmio; - unsigned int prev_col_cnt = ipg_data->col_cnt; - unsigned int total_col_cnt; - unsigned int tmp_ipg; - - if(lp->link_config.duplex == DUPLEX_FULL){ - ipg_data->ipg = DEFAULT_IPG; - return; - } - - if(ipg_data->ipg_state == SSTATE){ - - if(ipg_data->timer_tick == IPG_STABLE_TIME){ - - ipg_data->timer_tick = 0; - ipg_data->ipg = MIN_IPG - IPG_STEP; - ipg_data->current_ipg = MIN_IPG; - ipg_data->diff_col_cnt = 0xFFFFFFFF; - ipg_data->ipg_state = CSTATE; - } - else - ipg_data->timer_tick++; - } - - if(ipg_data->ipg_state == CSTATE){ - - /* Get the current collision count */ - - total_col_cnt = ipg_data->col_cnt = - amd8111e_read_mib(mmio, xmt_collisions); - - if ((total_col_cnt - prev_col_cnt) < - (ipg_data->diff_col_cnt)){ - - ipg_data->diff_col_cnt = - total_col_cnt - prev_col_cnt ; - - ipg_data->ipg = ipg_data->current_ipg; - } - - ipg_data->current_ipg += IPG_STEP; - - if (ipg_data->current_ipg <= MAX_IPG) - tmp_ipg = ipg_data->current_ipg; - else{ - tmp_ipg = ipg_data->ipg; - ipg_data->ipg_state = SSTATE; - } - writew((u32)tmp_ipg, mmio + IPG); - writew((u32)(tmp_ipg - IFS1_DELTA), mmio + IFS1); - } - mod_timer(&lp->ipg_data.ipg_timer, jiffies + IPG_CONVERGE_JIFFIES); - return; - -} - -static void __devinit amd8111e_probe_ext_phy(struct net_device* dev) -{ - struct amd8111e_priv *lp = netdev_priv(dev); - int i; - - for (i = 0x1e; i >= 0; i--) { - u32 id1, id2; - - if (amd8111e_read_phy(lp, i, MII_PHYSID1, &id1)) - continue; - if (amd8111e_read_phy(lp, i, MII_PHYSID2, &id2)) - continue; - lp->ext_phy_id = (id1 << 16) | id2; - lp->ext_phy_addr = i; - return; - } - lp->ext_phy_id = 0; - lp->ext_phy_addr = 1; -} - -static const struct net_device_ops amd8111e_netdev_ops = { - .ndo_open = amd8111e_open, - .ndo_stop = amd8111e_close, - .ndo_start_xmit = amd8111e_start_xmit, - .ndo_tx_timeout = amd8111e_tx_timeout, - .ndo_get_stats = amd8111e_get_stats, - .ndo_set_multicast_list = amd8111e_set_multicast_list, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = amd8111e_set_mac_address, - .ndo_do_ioctl = amd8111e_ioctl, - .ndo_change_mtu = amd8111e_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = amd8111e_poll, -#endif -}; - -static int __devinit amd8111e_probe_one(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - int err,i,pm_cap; - unsigned long reg_addr,reg_len; - struct amd8111e_priv* lp; - struct net_device* dev; - - err = pci_enable_device(pdev); - if(err){ - printk(KERN_ERR "amd8111e: Cannot enable new PCI device, " - "exiting.\n"); - return err; - } - - if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){ - printk(KERN_ERR "amd8111e: Cannot find PCI base address, " - "exiting.\n"); - err = -ENODEV; - goto err_disable_pdev; - } - - err = pci_request_regions(pdev, MODULE_NAME); - if(err){ - printk(KERN_ERR "amd8111e: Cannot obtain PCI resources, " - "exiting.\n"); - goto err_disable_pdev; - } - - pci_set_master(pdev); - - /* Find power-management capability. */ - if((pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM))==0){ - printk(KERN_ERR "amd8111e: No Power Management capability, " - "exiting.\n"); - goto err_free_reg; - } - - /* Initialize DMA */ - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) < 0) { - printk(KERN_ERR "amd8111e: DMA not supported," - "exiting.\n"); - goto err_free_reg; - } - - reg_addr = pci_resource_start(pdev, 0); - reg_len = pci_resource_len(pdev, 0); - - dev = alloc_etherdev(sizeof(struct amd8111e_priv)); - if (!dev) { - printk(KERN_ERR "amd8111e: Etherdev alloc failed, exiting.\n"); - err = -ENOMEM; - goto err_free_reg; - } - - SET_NETDEV_DEV(dev, &pdev->dev); - -#if AMD8111E_VLAN_TAG_USED - dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX ; -#endif - - lp = netdev_priv(dev); - lp->pci_dev = pdev; - lp->amd8111e_net_dev = dev; - lp->pm_cap = pm_cap; - - spin_lock_init(&lp->lock); - - lp->mmio = ioremap(reg_addr, reg_len); - if (!lp->mmio) { - printk(KERN_ERR "amd8111e: Cannot map device registers, " - "exiting\n"); - err = -ENOMEM; - goto err_free_dev; - } - - /* Initializing MAC address */ - for(i = 0; i < ETH_ADDR_LEN; i++) - dev->dev_addr[i] = readb(lp->mmio + PADR + i); - - /* Setting user defined parametrs */ - lp->ext_phy_option = speed_duplex[card_idx]; - if(coalesce[card_idx]) - lp->options |= OPTION_INTR_COAL_ENABLE; - if(dynamic_ipg[card_idx++]) - lp->options |= OPTION_DYN_IPG_ENABLE; - - - /* Initialize driver entry points */ - dev->netdev_ops = &amd8111e_netdev_ops; - SET_ETHTOOL_OPS(dev, &ops); - dev->irq =pdev->irq; - dev->watchdog_timeo = AMD8111E_TX_TIMEOUT; - netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32); - -#if AMD8111E_VLAN_TAG_USED - dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; -#endif - /* Probe the external PHY */ - amd8111e_probe_ext_phy(dev); - - /* setting mii default values */ - lp->mii_if.dev = dev; - lp->mii_if.mdio_read = amd8111e_mdio_read; - lp->mii_if.mdio_write = amd8111e_mdio_write; - lp->mii_if.phy_id = lp->ext_phy_addr; - - /* Set receive buffer length and set jumbo option*/ - amd8111e_set_rx_buff_len(dev); - - - err = register_netdev(dev); - if (err) { - printk(KERN_ERR "amd8111e: Cannot register net device, " - "exiting.\n"); - goto err_iounmap; - } - - pci_set_drvdata(pdev, dev); - - /* Initialize software ipg timer */ - if(lp->options & OPTION_DYN_IPG_ENABLE){ - init_timer(&lp->ipg_data.ipg_timer); - lp->ipg_data.ipg_timer.data = (unsigned long) dev; - lp->ipg_data.ipg_timer.function = (void *)&amd8111e_config_ipg; - lp->ipg_data.ipg_timer.expires = jiffies + - IPG_CONVERGE_JIFFIES; - lp->ipg_data.ipg = DEFAULT_IPG; - lp->ipg_data.ipg_state = CSTATE; - } - - /* display driver and device information */ - - chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28; - printk(KERN_INFO "%s: AMD-8111e Driver Version: %s\n", - dev->name,MODULE_VERS); - printk(KERN_INFO "%s: [ Rev %x ] PCI 10/100BaseT Ethernet %pM\n", - dev->name, chip_version, dev->dev_addr); - if (lp->ext_phy_id) - printk(KERN_INFO "%s: Found MII PHY ID 0x%08x at address 0x%02x\n", - dev->name, lp->ext_phy_id, lp->ext_phy_addr); - else - printk(KERN_INFO "%s: Couldn't detect MII PHY, assuming address 0x01\n", - dev->name); - return 0; -err_iounmap: - iounmap(lp->mmio); - -err_free_dev: - free_netdev(dev); - -err_free_reg: - pci_release_regions(pdev); - -err_disable_pdev: - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - return err; - -} - -static struct pci_driver amd8111e_driver = { - .name = MODULE_NAME, - .id_table = amd8111e_pci_tbl, - .probe = amd8111e_probe_one, - .remove = __devexit_p(amd8111e_remove_one), - .suspend = amd8111e_suspend, - .resume = amd8111e_resume -}; - -static int __init amd8111e_init(void) -{ - return pci_register_driver(&amd8111e_driver); -} - -static void __exit amd8111e_cleanup(void) -{ - pci_unregister_driver(&amd8111e_driver); -} - -module_init(amd8111e_init); -module_exit(amd8111e_cleanup); diff --git a/drivers/net/amd8111e.h b/drivers/net/amd8111e.h deleted file mode 100644 index 2ff2e7a..0000000 --- a/drivers/net/amd8111e.h +++ /dev/null @@ -1,816 +0,0 @@ -/* - * Advanced Micro Devices Inc. AMD8111E Linux Network Driver - * Copyright (C) 2003 Advanced Micro Devices - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 - * USA - -Module Name: - - amd8111e.h - -Abstract: - - AMD8111 based 10/100 Ethernet Controller driver definitions. - -Environment: - - Kernel Mode - -Revision History: - 3.0.0 - Initial Revision. - 3.0.1 -*/ - -#ifndef _AMD811E_H -#define _AMD811E_H - -/* Command style register access - -Registers CMD0, CMD2, CMD3,CMD7 and INTEN0 uses a write access technique called command style access. It allows the write to selected bits of this register without altering the bits that are not selected. Command style registers are divided into 4 bytes that can be written independently. Higher order bit of each byte is the value bit that specifies the value that will be written into the selected bits of register. - -eg., if the value 10011010b is written into the least significant byte of a command style register, bits 1,3 and 4 of the register will be set to 1, and the other bits will not be altered. If the value 00011010b is written into the same byte, bits 1,3 and 4 will be cleared to 0 and the other bits will not be altered. - -*/ - -/* Offset for Memory Mapped Registers. */ -/* 32 bit registers */ - -#define ASF_STAT 0x00 /* ASF status register */ -#define CHIPID 0x04 /* Chip ID regsiter */ -#define MIB_DATA 0x10 /* MIB data register */ -#define MIB_ADDR 0x14 /* MIB address register */ -#define STAT0 0x30 /* Status0 register */ -#define INT0 0x38 /* Interrupt0 register */ -#define INTEN0 0x40 /* Interrupt0 enable register*/ -#define CMD0 0x48 /* Command0 register */ -#define CMD2 0x50 /* Command2 register */ -#define CMD3 0x54 /* Command3 resiter */ -#define CMD7 0x64 /* Command7 register */ - -#define CTRL1 0x6C /* Control1 register */ -#define CTRL2 0x70 /* Control2 register */ - -#define XMT_RING_LIMIT 0x7C /* Transmit ring limit register */ - -#define AUTOPOLL0 0x88 /* Auto-poll0 register */ -#define AUTOPOLL1 0x8A /* Auto-poll1 register */ -#define AUTOPOLL2 0x8C /* Auto-poll2 register */ -#define AUTOPOLL3 0x8E /* Auto-poll3 register */ -#define AUTOPOLL4 0x90 /* Auto-poll4 register */ -#define AUTOPOLL5 0x92 /* Auto-poll5 register */ - -#define AP_VALUE 0x98 /* Auto-poll value register */ -#define DLY_INT_A 0xA8 /* Group A delayed interrupt register */ -#define DLY_INT_B 0xAC /* Group B delayed interrupt register */ - -#define FLOW_CONTROL 0xC8 /* Flow control register */ -#define PHY_ACCESS 0xD0 /* PHY access register */ - -#define STVAL 0xD8 /* Software timer value register */ - -#define XMT_RING_BASE_ADDR0 0x100 /* Transmit ring0 base addr register */ -#define XMT_RING_BASE_ADDR1 0x108 /* Transmit ring1 base addr register */ -#define XMT_RING_BASE_ADDR2 0x110 /* Transmit ring2 base addr register */ -#define XMT_RING_BASE_ADDR3 0x118 /* Transmit ring2 base addr register */ - -#define RCV_RING_BASE_ADDR0 0x120 /* Transmit ring0 base addr register */ - -#define PMAT0 0x190 /* OnNow pattern register0 */ -#define PMAT1 0x194 /* OnNow pattern register1 */ - -/* 16bit registers */ - -#define XMT_RING_LEN0 0x140 /* Transmit Ring0 length register */ -#define XMT_RING_LEN1 0x144 /* Transmit Ring1 length register */ -#define XMT_RING_LEN2 0x148 /* Transmit Ring2 length register */ -#define XMT_RING_LEN3 0x14C /* Transmit Ring3 length register */ - -#define RCV_RING_LEN0 0x150 /* Receive Ring0 length register */ - -#define SRAM_SIZE 0x178 /* SRAM size register */ -#define SRAM_BOUNDARY 0x17A /* SRAM boundary register */ - -/* 48bit register */ - -#define PADR 0x160 /* Physical address register */ - -#define IFS1 0x18C /* Inter-frame spacing Part1 register */ -#define IFS 0x18D /* Inter-frame spacing register */ -#define IPG 0x18E /* Inter-frame gap register */ -/* 64bit register */ - -#define LADRF 0x168 /* Logical address filter register */ - - -/* Register Bit Definitions */ -typedef enum { - - ASF_INIT_DONE = (1 << 1), - ASF_INIT_PRESENT = (1 << 0), - -}STAT_ASF_BITS; - -typedef enum { - - MIB_CMD_ACTIVE = (1 << 15 ), - MIB_RD_CMD = (1 << 13 ), - MIB_CLEAR = (1 << 12 ), - MIB_ADDRESS = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3)| - (1 << 4) | (1 << 5), -}MIB_ADDR_BITS; - - -typedef enum { - - PMAT_DET = (1 << 12), - MP_DET = (1 << 11), - LC_DET = (1 << 10), - SPEED_MASK = (1 << 9)|(1 << 8)|(1 << 7), - FULL_DPLX = (1 << 6), - LINK_STATS = (1 << 5), - AUTONEG_COMPLETE = (1 << 4), - MIIPD = (1 << 3), - RX_SUSPENDED = (1 << 2), - TX_SUSPENDED = (1 << 1), - RUNNING = (1 << 0), - -}STAT0_BITS; - -#define PHY_SPEED_10 0x2 -#define PHY_SPEED_100 0x3 - -/* INT0 0x38, 32bit register */ -typedef enum { - - INTR = (1 << 31), - PCSINT = (1 << 28), - LCINT = (1 << 27), - APINT5 = (1 << 26), - APINT4 = (1 << 25), - APINT3 = (1 << 24), - TINT_SUM = (1 << 23), - APINT2 = (1 << 22), - APINT1 = (1 << 21), - APINT0 = (1 << 20), - MIIPDTINT = (1 << 19), - MCCINT = (1 << 17), - MREINT = (1 << 16), - RINT_SUM = (1 << 15), - SPNDINT = (1 << 14), - MPINT = (1 << 13), - SINT = (1 << 12), - TINT3 = (1 << 11), - TINT2 = (1 << 10), - TINT1 = (1 << 9), - TINT0 = (1 << 8), - UINT = (1 << 7), - STINT = (1 << 4), - RINT0 = (1 << 0), - -}INT0_BITS; - -typedef enum { - - VAL3 = (1 << 31), /* VAL bit for byte 3 */ - VAL2 = (1 << 23), /* VAL bit for byte 2 */ - VAL1 = (1 << 15), /* VAL bit for byte 1 */ - VAL0 = (1 << 7), /* VAL bit for byte 0 */ - -}VAL_BITS; - -typedef enum { - - /* VAL3 */ - LCINTEN = (1 << 27), - APINT5EN = (1 << 26), - APINT4EN = (1 << 25), - APINT3EN = (1 << 24), - /* VAL2 */ - APINT2EN = (1 << 22), - APINT1EN = (1 << 21), - APINT0EN = (1 << 20), - MIIPDTINTEN = (1 << 19), - MCCIINTEN = (1 << 18), - MCCINTEN = (1 << 17), - MREINTEN = (1 << 16), - /* VAL1 */ - SPNDINTEN = (1 << 14), - MPINTEN = (1 << 13), - TINTEN3 = (1 << 11), - SINTEN = (1 << 12), - TINTEN2 = (1 << 10), - TINTEN1 = (1 << 9), - TINTEN0 = (1 << 8), - /* VAL0 */ - STINTEN = (1 << 4), - RINTEN0 = (1 << 0), - - INTEN0_CLEAR = 0x1F7F7F1F, /* Command style register */ - -}INTEN0_BITS; - -typedef enum { - /* VAL2 */ - RDMD0 = (1 << 16), - /* VAL1 */ - TDMD3 = (1 << 11), - TDMD2 = (1 << 10), - TDMD1 = (1 << 9), - TDMD0 = (1 << 8), - /* VAL0 */ - UINTCMD = (1 << 6), - RX_FAST_SPND = (1 << 5), - TX_FAST_SPND = (1 << 4), - RX_SPND = (1 << 3), - TX_SPND = (1 << 2), - INTREN = (1 << 1), - RUN = (1 << 0), - - CMD0_CLEAR = 0x000F0F7F, /* Command style register */ - -}CMD0_BITS; - -typedef enum { - - /* VAL3 */ - CONDUIT_MODE = (1 << 29), - /* VAL2 */ - RPA = (1 << 19), - DRCVPA = (1 << 18), - DRCVBC = (1 << 17), - PROM = (1 << 16), - /* VAL1 */ - ASTRP_RCV = (1 << 13), - RCV_DROP0 = (1 << 12), - EMBA = (1 << 11), - DXMT2PD = (1 << 10), - LTINTEN = (1 << 9), - DXMTFCS = (1 << 8), - /* VAL0 */ - APAD_XMT = (1 << 6), - DRTY = (1 << 5), - INLOOP = (1 << 4), - EXLOOP = (1 << 3), - REX_RTRY = (1 << 2), - REX_UFLO = (1 << 1), - REX_LCOL = (1 << 0), - - CMD2_CLEAR = 0x3F7F3F7F, /* Command style register */ - -}CMD2_BITS; - -typedef enum { - - /* VAL3 */ - ASF_INIT_DONE_ALIAS = (1 << 29), - /* VAL2 */ - JUMBO = (1 << 21), - VSIZE = (1 << 20), - VLONLY = (1 << 19), - VL_TAG_DEL = (1 << 18), - /* VAL1 */ - EN_PMGR = (1 << 14), - INTLEVEL = (1 << 13), - FORCE_FULL_DUPLEX = (1 << 12), - FORCE_LINK_STATUS = (1 << 11), - APEP = (1 << 10), - MPPLBA = (1 << 9), - /* VAL0 */ - RESET_PHY_PULSE = (1 << 2), - RESET_PHY = (1 << 1), - PHY_RST_POL = (1 << 0), - -}CMD3_BITS; - - -typedef enum { - - /* VAL0 */ - PMAT_SAVE_MATCH = (1 << 4), - PMAT_MODE = (1 << 3), - MPEN_SW = (1 << 1), - LCMODE_SW = (1 << 0), - - CMD7_CLEAR = 0x0000001B /* Command style register */ - -}CMD7_BITS; - - -typedef enum { - - RESET_PHY_WIDTH = (0xF << 16) | (0xF<< 20), /* 0x00FF0000 */ - XMTSP_MASK = (1 << 9) | (1 << 8), /* 9:8 */ - XMTSP_128 = (1 << 9), /* 9 */ - XMTSP_64 = (1 << 8), - CACHE_ALIGN = (1 << 4), - BURST_LIMIT_MASK = (0xF << 0 ), - CTRL1_DEFAULT = 0x00010111, - -}CTRL1_BITS; - -typedef enum { - - FMDC_MASK = (1 << 9)|(1 << 8), /* 9:8 */ - XPHYRST = (1 << 7), - XPHYANE = (1 << 6), - XPHYFD = (1 << 5), - XPHYSP = (1 << 4) | (1 << 3), /* 4:3 */ - APDW_MASK = (1 << 2) | (1 << 1) | (1 << 0), /* 2:0 */ - -}CTRL2_BITS; - -/* XMT_RING_LIMIT 0x7C, 32bit register */ -typedef enum { - - XMT_RING2_LIMIT = (0xFF << 16), /* 23:16 */ - XMT_RING1_LIMIT = (0xFF << 8), /* 15:8 */ - XMT_RING0_LIMIT = (0xFF << 0), /* 7:0 */ - -}XMT_RING_LIMIT_BITS; - -typedef enum { - - AP_REG0_EN = (1 << 15), - AP_REG0_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */ - AP_PHY0_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */ - -}AUTOPOLL0_BITS; - -/* AUTOPOLL1 0x8A, 16bit register */ -typedef enum { - - AP_REG1_EN = (1 << 15), - AP_REG1_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */ - AP_PRE_SUP1 = (1 << 6), - AP_PHY1_DFLT = (1 << 5), - AP_PHY1_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */ - -}AUTOPOLL1_BITS; - - -typedef enum { - - AP_REG2_EN = (1 << 15), - AP_REG2_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */ - AP_PRE_SUP2 = (1 << 6), - AP_PHY2_DFLT = (1 << 5), - AP_PHY2_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */ - -}AUTOPOLL2_BITS; - -typedef enum { - - AP_REG3_EN = (1 << 15), - AP_REG3_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */ - AP_PRE_SUP3 = (1 << 6), - AP_PHY3_DFLT = (1 << 5), - AP_PHY3_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */ - -}AUTOPOLL3_BITS; - - -typedef enum { - - AP_REG4_EN = (1 << 15), - AP_REG4_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */ - AP_PRE_SUP4 = (1 << 6), - AP_PHY4_DFLT = (1 << 5), - AP_PHY4_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */ - -}AUTOPOLL4_BITS; - - -typedef enum { - - AP_REG5_EN = (1 << 15), - AP_REG5_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */ - AP_PRE_SUP5 = (1 << 6), - AP_PHY5_DFLT = (1 << 5), - AP_PHY5_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */ - -}AUTOPOLL5_BITS; - - - - -/* AP_VALUE 0x98, 32bit ragister */ -typedef enum { - - AP_VAL_ACTIVE = (1 << 31), - AP_VAL_RD_CMD = ( 1 << 29), - AP_ADDR = (1 << 18)|(1 << 17)|(1 << 16), /* 18:16 */ - AP_VAL = (0xF << 0) | (0xF << 4) |( 0xF << 8) | - (0xF << 12), /* 15:0 */ - -}AP_VALUE_BITS; - -typedef enum { - - DLY_INT_A_R3 = (1 << 31), - DLY_INT_A_R2 = (1 << 30), - DLY_INT_A_R1 = (1 << 29), - DLY_INT_A_R0 = (1 << 28), - DLY_INT_A_T3 = (1 << 27), - DLY_INT_A_T2 = (1 << 26), - DLY_INT_A_T1 = (1 << 25), - DLY_INT_A_T0 = ( 1 << 24), - EVENT_COUNT_A = (0xF << 16) | (0x1 << 20),/* 20:16 */ - MAX_DELAY_TIME_A = (0xF << 0) | (0xF << 4) | (1 << 8)| - (1 << 9) | (1 << 10), /* 10:0 */ - -}DLY_INT_A_BITS; - -typedef enum { - - DLY_INT_B_R3 = (1 << 31), - DLY_INT_B_R2 = (1 << 30), - DLY_INT_B_R1 = (1 << 29), - DLY_INT_B_R0 = (1 << 28), - DLY_INT_B_T3 = (1 << 27), - DLY_INT_B_T2 = (1 << 26), - DLY_INT_B_T1 = (1 << 25), - DLY_INT_B_T0 = ( 1 << 24), - EVENT_COUNT_B = (0xF << 16) | (0x1 << 20),/* 20:16 */ - MAX_DELAY_TIME_B = (0xF << 0) | (0xF << 4) | (1 << 8)| - (1 << 9) | (1 << 10), /* 10:0 */ -}DLY_INT_B_BITS; - - -/* FLOW_CONTROL 0xC8, 32bit register */ -typedef enum { - - PAUSE_LEN_CHG = (1 << 30), - FTPE = (1 << 22), - FRPE = (1 << 21), - NAPA = (1 << 20), - NPA = (1 << 19), - FIXP = ( 1 << 18), - FCCMD = ( 1 << 16), - PAUSE_LEN = (0xF << 0) | (0xF << 4) |( 0xF << 8) | (0xF << 12), /* 15:0 */ - -}FLOW_CONTROL_BITS; - -/* PHY_ ACCESS 0xD0, 32bit register */ -typedef enum { - - PHY_CMD_ACTIVE = (1 << 31), - PHY_WR_CMD = (1 << 30), - PHY_RD_CMD = (1 << 29), - PHY_RD_ERR = (1 << 28), - PHY_PRE_SUP = (1 << 27), - PHY_ADDR = (1 << 21) | (1 << 22) | (1 << 23)| - (1 << 24) |(1 << 25),/* 25:21 */ - PHY_REG_ADDR = (1 << 16) | (1 << 17) | (1 << 18)| (1 << 19) | (1 << 20),/* 20:16 */ - PHY_DATA = (0xF << 0)|(0xF << 4) |(0xF << 8)| - (0xF << 12),/* 15:0 */ - -}PHY_ACCESS_BITS; - - -/* PMAT0 0x190, 32bit register */ -typedef enum { - PMR_ACTIVE = (1 << 31), - PMR_WR_CMD = (1 << 30), - PMR_RD_CMD = (1 << 29), - PMR_BANK = (1 <<28), - PMR_ADDR = (0xF << 16)|(1 << 20)|(1 << 21)| - (1 << 22),/* 22:16 */ - PMR_B4 = (0xF << 0) | (0xF << 4),/* 15:0 */ -}PMAT0_BITS; - - -/* PMAT1 0x194, 32bit register */ -typedef enum { - PMR_B3 = (0xF << 24) | (0xF <<28),/* 31:24 */ - PMR_B2 = (0xF << 16) |(0xF << 20),/* 23:16 */ - PMR_B1 = (0xF << 8) | (0xF <<12), /* 15:8 */ - PMR_B0 = (0xF << 0)|(0xF << 4),/* 7:0 */ -}PMAT1_BITS; - -/************************************************************************/ -/* */ -/* MIB counter definitions */ -/* */ -/************************************************************************/ - -#define rcv_miss_pkts 0x00 -#define rcv_octets 0x01 -#define rcv_broadcast_pkts 0x02 -#define rcv_multicast_pkts 0x03 -#define rcv_undersize_pkts 0x04 -#define rcv_oversize_pkts 0x05 -#define rcv_fragments 0x06 -#define rcv_jabbers 0x07 -#define rcv_unicast_pkts 0x08 -#define rcv_alignment_errors 0x09 -#define rcv_fcs_errors 0x0A -#define rcv_good_octets 0x0B -#define rcv_mac_ctrl 0x0C -#define rcv_flow_ctrl 0x0D -#define rcv_pkts_64_octets 0x0E -#define rcv_pkts_65to127_octets 0x0F -#define rcv_pkts_128to255_octets 0x10 -#define rcv_pkts_256to511_octets 0x11 -#define rcv_pkts_512to1023_octets 0x12 -#define rcv_pkts_1024to1518_octets 0x13 -#define rcv_unsupported_opcode 0x14 -#define rcv_symbol_errors 0x15 -#define rcv_drop_pkts_ring1 0x16 -#define rcv_drop_pkts_ring2 0x17 -#define rcv_drop_pkts_ring3 0x18 -#define rcv_drop_pkts_ring4 0x19 -#define rcv_jumbo_pkts 0x1A - -#define xmt_underrun_pkts 0x20 -#define xmt_octets 0x21 -#define xmt_packets 0x22 -#define xmt_broadcast_pkts 0x23 -#define xmt_multicast_pkts 0x24 -#define xmt_collisions 0x25 -#define xmt_unicast_pkts 0x26 -#define xmt_one_collision 0x27 -#define xmt_multiple_collision 0x28 -#define xmt_deferred_transmit 0x29 -#define xmt_late_collision 0x2A -#define xmt_excessive_defer 0x2B -#define xmt_loss_carrier 0x2C -#define xmt_excessive_collision 0x2D -#define xmt_back_pressure 0x2E -#define xmt_flow_ctrl 0x2F -#define xmt_pkts_64_octets 0x30 -#define xmt_pkts_65to127_octets 0x31 -#define xmt_pkts_128to255_octets 0x32 -#define xmt_pkts_256to511_octets 0x33 -#define xmt_pkts_512to1023_octets 0x34 -#define xmt_pkts_1024to1518_octet 0x35 -#define xmt_oversize_pkts 0x36 -#define xmt_jumbo_pkts 0x37 - - -/* Driver definitions */ - -#define PCI_VENDOR_ID_AMD 0x1022 -#define PCI_DEVICE_ID_AMD8111E_7462 0x7462 - -#define MAX_UNITS 8 /* Maximum number of devices possible */ - -#define NUM_TX_BUFFERS 32 /* Number of transmit buffers */ -#define NUM_RX_BUFFERS 32 /* Number of receive buffers */ - -#define TX_BUFF_MOD_MASK 31 /* (NUM_TX_BUFFERS -1) */ -#define RX_BUFF_MOD_MASK 31 /* (NUM_RX_BUFFERS -1) */ - -#define NUM_TX_RING_DR 32 -#define NUM_RX_RING_DR 32 - -#define TX_RING_DR_MOD_MASK 31 /* (NUM_TX_RING_DR -1) */ -#define RX_RING_DR_MOD_MASK 31 /* (NUM_RX_RING_DR -1) */ - -#define MAX_FILTER_SIZE 64 /* Maximum multicast address */ -#define AMD8111E_MIN_MTU 60 -#define AMD8111E_MAX_MTU 9000 - -#define PKT_BUFF_SZ 1536 -#define MIN_PKT_LEN 60 -#define ETH_ADDR_LEN 6 - -#define AMD8111E_TX_TIMEOUT (3 * HZ)/* 3 sec */ -#define SOFT_TIMER_FREQ 0xBEBC /* 0.5 sec */ -#define DELAY_TIMER_CONV 50 /* msec to 10 usec conversion. - Only 500 usec resolution */ -#define OPTION_VLAN_ENABLE 0x0001 -#define OPTION_JUMBO_ENABLE 0x0002 -#define OPTION_MULTICAST_ENABLE 0x0004 -#define OPTION_WOL_ENABLE 0x0008 -#define OPTION_WAKE_MAGIC_ENABLE 0x0010 -#define OPTION_WAKE_PHY_ENABLE 0x0020 -#define OPTION_INTR_COAL_ENABLE 0x0040 -#define OPTION_DYN_IPG_ENABLE 0x0080 - -#define PHY_REG_ADDR_MASK 0x1f - -/* ipg parameters */ -#define DEFAULT_IPG 0x60 -#define IFS1_DELTA 36 -#define IPG_CONVERGE_JIFFIES (HZ/2) -#define IPG_STABLE_TIME 5 -#define MIN_IPG 96 -#define MAX_IPG 255 -#define IPG_STEP 16 -#define CSTATE 1 -#define SSTATE 2 - -/* Assume contoller gets data 10 times the maximum processing time */ -#define REPEAT_CNT 10 - -/* amd8111e decriptor flag definitions */ -typedef enum { - - OWN_BIT = (1 << 15), - ADD_FCS_BIT = (1 << 13), - LTINT_BIT = (1 << 12), - STP_BIT = (1 << 9), - ENP_BIT = (1 << 8), - KILL_BIT = (1 << 6), - TCC_VLAN_INSERT = (1 << 1), - TCC_VLAN_REPLACE = (1 << 1) |( 1<< 0), - -}TX_FLAG_BITS; - -typedef enum { - ERR_BIT = (1 << 14), - FRAM_BIT = (1 << 13), - OFLO_BIT = (1 << 12), - CRC_BIT = (1 << 11), - PAM_BIT = (1 << 6), - LAFM_BIT = (1 << 5), - BAM_BIT = (1 << 4), - TT_VLAN_TAGGED = (1 << 3) |(1 << 2),/* 0x000 */ - TT_PRTY_TAGGED = (1 << 3),/* 0x0008 */ - -}RX_FLAG_BITS; - -#define RESET_RX_FLAGS 0x0000 -#define TT_MASK 0x000c -#define TCC_MASK 0x0003 - -/* driver ioctl parameters */ -#define AMD8111E_REG_DUMP_LEN 13*sizeof(u32) - -/* amd8111e desriptor format */ - -struct amd8111e_tx_dr{ - - __le16 buff_count; /* Size of the buffer pointed by this descriptor */ - - __le16 tx_flags; - - __le16 tag_ctrl_info; - - __le16 tag_ctrl_cmd; - - __le32 buff_phy_addr; - - __le32 reserved; -}; - -struct amd8111e_rx_dr{ - - __le32 reserved; - - __le16 msg_count; /* Received message len */ - - __le16 tag_ctrl_info; - - __le16 buff_count; /* Len of the buffer pointed by descriptor. */ - - __le16 rx_flags; - - __le32 buff_phy_addr; - -}; -struct amd8111e_link_config{ - -#define SPEED_INVALID 0xffff -#define DUPLEX_INVALID 0xff -#define AUTONEG_INVALID 0xff - - unsigned long orig_phy_option; - u16 speed; - u8 duplex; - u8 autoneg; - u8 reserved; /* 32bit alignment */ -}; - -enum coal_type{ - - NO_COALESCE, - LOW_COALESCE, - MEDIUM_COALESCE, - HIGH_COALESCE, - -}; - -enum coal_mode{ - RX_INTR_COAL, - TX_INTR_COAL, - DISABLE_COAL, - ENABLE_COAL, - -}; -#define MAX_TIMEOUT 40 -#define MAX_EVENT_COUNT 31 -struct amd8111e_coalesce_conf{ - - unsigned int rx_timeout; - unsigned int rx_event_count; - unsigned long rx_packets; - unsigned long rx_prev_packets; - unsigned long rx_bytes; - unsigned long rx_prev_bytes; - unsigned int rx_coal_type; - - unsigned int tx_timeout; - unsigned int tx_event_count; - unsigned long tx_packets; - unsigned long tx_prev_packets; - unsigned long tx_bytes; - unsigned long tx_prev_bytes; - unsigned int tx_coal_type; - -}; -struct ipg_info{ - - unsigned int ipg_state; - unsigned int ipg; - unsigned int current_ipg; - unsigned int col_cnt; - unsigned int diff_col_cnt; - unsigned int timer_tick; - unsigned int prev_ipg; - struct timer_list ipg_timer; -}; - -struct amd8111e_priv{ - - struct amd8111e_tx_dr* tx_ring; - struct amd8111e_rx_dr* rx_ring; - dma_addr_t tx_ring_dma_addr; /* tx descriptor ring base address */ - dma_addr_t rx_ring_dma_addr; /* rx descriptor ring base address */ - const char *name; - struct pci_dev *pci_dev; /* Ptr to the associated pci_dev */ - struct net_device* amd8111e_net_dev; /* ptr to associated net_device */ - /* Transmit and recive skbs */ - struct sk_buff *tx_skbuff[NUM_TX_BUFFERS]; - struct sk_buff *rx_skbuff[NUM_RX_BUFFERS]; - /* Transmit and receive dma mapped addr */ - dma_addr_t tx_dma_addr[NUM_TX_BUFFERS]; - dma_addr_t rx_dma_addr[NUM_RX_BUFFERS]; - /* Reg memory mapped address */ - void __iomem *mmio; - - struct napi_struct napi; - - spinlock_t lock; /* Guard lock */ - unsigned long rx_idx, tx_idx; /* The next free ring entry */ - unsigned long tx_complete_idx; - unsigned long tx_ring_complete_idx; - unsigned long tx_ring_idx; - unsigned int rx_buff_len; /* Buffer length of rx buffers */ - int options; /* Options enabled/disabled for the device */ - - unsigned long ext_phy_option; - int ext_phy_addr; - u32 ext_phy_id; - - struct amd8111e_link_config link_config; - int pm_cap; - - struct net_device *next; - int mii; - struct mii_if_info mii_if; - char opened; - unsigned int drv_rx_errors; - struct amd8111e_coalesce_conf coal_conf; - - struct ipg_info ipg_data; - -}; - -/* kernel provided writeq does not write 64 bits into the amd8111e device register instead writes only higher 32bits data into lower 32bits of the register. -BUG? */ -#define amd8111e_writeq(_UlData,_memMap) \ - writel(*(u32*)(&_UlData), _memMap); \ - writel(*(u32*)((u8*)(&_UlData)+4), _memMap+4) - -/* maps the external speed options to internal value */ -typedef enum { - SPEED_AUTONEG, - SPEED10_HALF, - SPEED10_FULL, - SPEED100_HALF, - SPEED100_FULL, -}EXT_PHY_OPTION; - -static int card_idx; -static int speed_duplex[MAX_UNITS] = { 0, }; -static int coalesce[MAX_UNITS] = {1,1,1,1,1,1,1,1}; -static int dynamic_ipg[MAX_UNITS] = {0,0,0,0,0,0,0,0}; -static unsigned int chip_version; - -#endif /* _AMD8111E_H */ - diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c deleted file mode 100644 index 7ed78f4..0000000 --- a/drivers/net/ariadne.c +++ /dev/null @@ -1,793 +0,0 @@ -/* - * Amiga Linux/m68k Ariadne Ethernet Driver - * - * © Copyright 1995-2003 by Geert Uytterhoeven (geert@linux-m68k.org) - * Peter De Schrijver (p2@mind.be) - * - * --------------------------------------------------------------------------- - * - * This program is based on - * - * lance.c: An AMD LANCE ethernet driver for linux. - * Written 1993-94 by Donald Becker. - * - * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller - * Advanced Micro Devices - * Publication #16907, Rev. B, Amendment/0, May 1994 - * - * MC68230: Parallel Interface/Timer (PI/T) - * Motorola Semiconductors, December, 1983 - * - * --------------------------------------------------------------------------- - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file COPYING in the main directory of the Linux - * distribution for more details. - * - * --------------------------------------------------------------------------- - * - * The Ariadne is a Zorro-II board made by Village Tronic. It contains: - * - * - an Am79C960 PCnet-ISA Single-Chip Ethernet Controller with both - * 10BASE-2 (thin coax) and 10BASE-T (UTP) connectors - * - * - an MC68230 Parallel Interface/Timer configured as 2 parallel ports - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -/*#define DEBUG*/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "ariadne.h" - -#ifdef ARIADNE_DEBUG -int ariadne_debug = ARIADNE_DEBUG; -#else -int ariadne_debug = 1; -#endif - -/* Macros to Fix Endianness problems */ - -/* Swap the Bytes in a WORD */ -#define swapw(x) (((x >> 8) & 0x00ff) | ((x << 8) & 0xff00)) -/* Get the Low BYTE in a WORD */ -#define lowb(x) (x & 0xff) -/* Get the Swapped High WORD in a LONG */ -#define swhighw(x) ((((x) >> 8) & 0xff00) | (((x) >> 24) & 0x00ff)) -/* Get the Swapped Low WORD in a LONG */ -#define swloww(x) ((((x) << 8) & 0xff00) | (((x) >> 8) & 0x00ff)) - -/* Transmit/Receive Ring Definitions */ - -#define TX_RING_SIZE 5 -#define RX_RING_SIZE 16 - -#define PKT_BUF_SIZE 1520 - -/* Private Device Data */ - -struct ariadne_private { - volatile struct TDRE *tx_ring[TX_RING_SIZE]; - volatile struct RDRE *rx_ring[RX_RING_SIZE]; - volatile u_short *tx_buff[TX_RING_SIZE]; - volatile u_short *rx_buff[RX_RING_SIZE]; - int cur_tx, cur_rx; /* The next free ring entry */ - int dirty_tx; /* The ring entries to be free()ed */ - char tx_full; -}; - -/* Structure Created in the Ariadne's RAM Buffer */ - -struct lancedata { - struct TDRE tx_ring[TX_RING_SIZE]; - struct RDRE rx_ring[RX_RING_SIZE]; - u_short tx_buff[TX_RING_SIZE][PKT_BUF_SIZE / sizeof(u_short)]; - u_short rx_buff[RX_RING_SIZE][PKT_BUF_SIZE / sizeof(u_short)]; -}; - -static void memcpyw(volatile u_short *dest, u_short *src, int len) -{ - while (len >= 2) { - *(dest++) = *(src++); - len -= 2; - } - if (len == 1) - *dest = (*(u_char *)src) << 8; -} - -static void ariadne_init_ring(struct net_device *dev) -{ - struct ariadne_private *priv = netdev_priv(dev); - volatile struct lancedata *lancedata = (struct lancedata *)dev->mem_start; - int i; - - netif_stop_queue(dev); - - priv->tx_full = 0; - priv->cur_rx = priv->cur_tx = 0; - priv->dirty_tx = 0; - - /* Set up TX Ring */ - for (i = 0; i < TX_RING_SIZE; i++) { - volatile struct TDRE *t = &lancedata->tx_ring[i]; - t->TMD0 = swloww(ARIADNE_RAM + - offsetof(struct lancedata, tx_buff[i])); - t->TMD1 = swhighw(ARIADNE_RAM + - offsetof(struct lancedata, tx_buff[i])) | - TF_STP | TF_ENP; - t->TMD2 = swapw((u_short)-PKT_BUF_SIZE); - t->TMD3 = 0; - priv->tx_ring[i] = &lancedata->tx_ring[i]; - priv->tx_buff[i] = lancedata->tx_buff[i]; - netdev_dbg(dev, "TX Entry %2d at %p, Buf at %p\n", - i, &lancedata->tx_ring[i], lancedata->tx_buff[i]); - } - - /* Set up RX Ring */ - for (i = 0; i < RX_RING_SIZE; i++) { - volatile struct RDRE *r = &lancedata->rx_ring[i]; - r->RMD0 = swloww(ARIADNE_RAM + - offsetof(struct lancedata, rx_buff[i])); - r->RMD1 = swhighw(ARIADNE_RAM + - offsetof(struct lancedata, rx_buff[i])) | - RF_OWN; - r->RMD2 = swapw((u_short)-PKT_BUF_SIZE); - r->RMD3 = 0x0000; - priv->rx_ring[i] = &lancedata->rx_ring[i]; - priv->rx_buff[i] = lancedata->rx_buff[i]; - netdev_dbg(dev, "RX Entry %2d at %p, Buf at %p\n", - i, &lancedata->rx_ring[i], lancedata->rx_buff[i]); - } -} - -static int ariadne_rx(struct net_device *dev) -{ - struct ariadne_private *priv = netdev_priv(dev); - int entry = priv->cur_rx % RX_RING_SIZE; - int i; - - /* If we own the next entry, it's a new packet. Send it up */ - while (!(lowb(priv->rx_ring[entry]->RMD1) & RF_OWN)) { - int status = lowb(priv->rx_ring[entry]->RMD1); - - if (status != (RF_STP | RF_ENP)) { /* There was an error */ - /* There is a tricky error noted by - * John Murphy to Russ Nelson: - * Even with full-sized buffers it's possible for a - * jabber packet to use two buffers, with only the - * last correctly noting the error - */ - /* Only count a general error at the end of a packet */ - if (status & RF_ENP) - dev->stats.rx_errors++; - if (status & RF_FRAM) - dev->stats.rx_frame_errors++; - if (status & RF_OFLO) - dev->stats.rx_over_errors++; - if (status & RF_CRC) - dev->stats.rx_crc_errors++; - if (status & RF_BUFF) - dev->stats.rx_fifo_errors++; - priv->rx_ring[entry]->RMD1 &= 0xff00 | RF_STP | RF_ENP; - } else { - /* Malloc up new buffer, compatible with net-3 */ - short pkt_len = swapw(priv->rx_ring[entry]->RMD3); - struct sk_buff *skb; - - skb = dev_alloc_skb(pkt_len + 2); - if (skb == NULL) { - netdev_warn(dev, "Memory squeeze, deferring packet\n"); - for (i = 0; i < RX_RING_SIZE; i++) - if (lowb(priv->rx_ring[(entry + i) % RX_RING_SIZE]->RMD1) & RF_OWN) - break; - - if (i > RX_RING_SIZE - 2) { - dev->stats.rx_dropped++; - priv->rx_ring[entry]->RMD1 |= RF_OWN; - priv->cur_rx++; - } - break; - } - - - skb_reserve(skb, 2); /* 16 byte align */ - skb_put(skb, pkt_len); /* Make room */ - skb_copy_to_linear_data(skb, - (const void *)priv->rx_buff[entry], - pkt_len); - skb->protocol = eth_type_trans(skb, dev); - netdev_dbg(dev, "RX pkt type 0x%04x from %pM to %pM data 0x%08x len %d\n", - ((u_short *)skb->data)[6], - skb->data + 6, skb->data, - (int)skb->data, (int)skb->len); - - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; - } - - priv->rx_ring[entry]->RMD1 |= RF_OWN; - entry = (++priv->cur_rx) % RX_RING_SIZE; - } - - priv->cur_rx = priv->cur_rx % RX_RING_SIZE; - - /* We should check that at least two ring entries are free. - * If not, we should free one and mark stats->rx_dropped++ - */ - - return 0; -} - -static irqreturn_t ariadne_interrupt(int irq, void *data) -{ - struct net_device *dev = (struct net_device *)data; - volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr; - struct ariadne_private *priv; - int csr0, boguscnt; - int handled = 0; - - lance->RAP = CSR0; /* PCnet-ISA Controller Status */ - - if (!(lance->RDP & INTR)) /* Check if any interrupt has been */ - return IRQ_NONE; /* generated by the board */ - - priv = netdev_priv(dev); - - boguscnt = 10; - while ((csr0 = lance->RDP) & (ERR | RINT | TINT) && --boguscnt >= 0) { - /* Acknowledge all of the current interrupt sources ASAP */ - lance->RDP = csr0 & ~(INEA | TDMD | STOP | STRT | INIT); - -#ifdef DEBUG - if (ariadne_debug > 5) { - netdev_dbg(dev, "interrupt csr0=%#02x new csr=%#02x [", - csr0, lance->RDP); - if (csr0 & INTR) - pr_cont(" INTR"); - if (csr0 & INEA) - pr_cont(" INEA"); - if (csr0 & RXON) - pr_cont(" RXON"); - if (csr0 & TXON) - pr_cont(" TXON"); - if (csr0 & TDMD) - pr_cont(" TDMD"); - if (csr0 & STOP) - pr_cont(" STOP"); - if (csr0 & STRT) - pr_cont(" STRT"); - if (csr0 & INIT) - pr_cont(" INIT"); - if (csr0 & ERR) - pr_cont(" ERR"); - if (csr0 & BABL) - pr_cont(" BABL"); - if (csr0 & CERR) - pr_cont(" CERR"); - if (csr0 & MISS) - pr_cont(" MISS"); - if (csr0 & MERR) - pr_cont(" MERR"); - if (csr0 & RINT) - pr_cont(" RINT"); - if (csr0 & TINT) - pr_cont(" TINT"); - if (csr0 & IDON) - pr_cont(" IDON"); - pr_cont(" ]\n"); - } -#endif - - if (csr0 & RINT) { /* Rx interrupt */ - handled = 1; - ariadne_rx(dev); - } - - if (csr0 & TINT) { /* Tx-done interrupt */ - int dirty_tx = priv->dirty_tx; - - handled = 1; - while (dirty_tx < priv->cur_tx) { - int entry = dirty_tx % TX_RING_SIZE; - int status = lowb(priv->tx_ring[entry]->TMD1); - - if (status & TF_OWN) - break; /* It still hasn't been Txed */ - - priv->tx_ring[entry]->TMD1 &= 0xff00; - - if (status & TF_ERR) { - /* There was an major error, log it */ - int err_status = priv->tx_ring[entry]->TMD3; - dev->stats.tx_errors++; - if (err_status & EF_RTRY) - dev->stats.tx_aborted_errors++; - if (err_status & EF_LCAR) - dev->stats.tx_carrier_errors++; - if (err_status & EF_LCOL) - dev->stats.tx_window_errors++; - if (err_status & EF_UFLO) { - /* Ackk! On FIFO errors the Tx unit is turned off! */ - dev->stats.tx_fifo_errors++; - /* Remove this verbosity later! */ - netdev_err(dev, "Tx FIFO error! Status %04x\n", - csr0); - /* Restart the chip */ - lance->RDP = STRT; - } - } else { - if (status & (TF_MORE | TF_ONE)) - dev->stats.collisions++; - dev->stats.tx_packets++; - } - dirty_tx++; - } - -#ifndef final_version - if (priv->cur_tx - dirty_tx >= TX_RING_SIZE) { - netdev_err(dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n", - dirty_tx, priv->cur_tx, - priv->tx_full); - dirty_tx += TX_RING_SIZE; - } -#endif - - if (priv->tx_full && netif_queue_stopped(dev) && - dirty_tx > priv->cur_tx - TX_RING_SIZE + 2) { - /* The ring is no longer full */ - priv->tx_full = 0; - netif_wake_queue(dev); - } - - priv->dirty_tx = dirty_tx; - } - - /* Log misc errors */ - if (csr0 & BABL) { - handled = 1; - dev->stats.tx_errors++; /* Tx babble */ - } - if (csr0 & MISS) { - handled = 1; - dev->stats.rx_errors++; /* Missed a Rx frame */ - } - if (csr0 & MERR) { - handled = 1; - netdev_err(dev, "Bus master arbitration failure, status %04x\n", - csr0); - /* Restart the chip */ - lance->RDP = STRT; - } - } - - /* Clear any other interrupt, and set interrupt enable */ - lance->RAP = CSR0; /* PCnet-ISA Controller Status */ - lance->RDP = INEA | BABL | CERR | MISS | MERR | IDON; - - if (ariadne_debug > 4) - netdev_dbg(dev, "exiting interrupt, csr%d=%#04x\n", - lance->RAP, lance->RDP); - - return IRQ_RETVAL(handled); -} - -static int ariadne_open(struct net_device *dev) -{ - volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr; - u_short in; - u_long version; - int i; - - /* Reset the LANCE */ - in = lance->Reset; - - /* Stop the LANCE */ - lance->RAP = CSR0; /* PCnet-ISA Controller Status */ - lance->RDP = STOP; - - /* Check the LANCE version */ - lance->RAP = CSR88; /* Chip ID */ - version = swapw(lance->RDP); - lance->RAP = CSR89; /* Chip ID */ - version |= swapw(lance->RDP) << 16; - if ((version & 0x00000fff) != 0x00000003) { - pr_warn("Couldn't find AMD Ethernet Chip\n"); - return -EAGAIN; - } - if ((version & 0x0ffff000) != 0x00003000) { - pr_warn("Couldn't find Am79C960 (Wrong part number = %ld)\n", - (version & 0x0ffff000) >> 12); - return -EAGAIN; - } - - netdev_dbg(dev, "Am79C960 (PCnet-ISA) Revision %ld\n", - (version & 0xf0000000) >> 28); - - ariadne_init_ring(dev); - - /* Miscellaneous Stuff */ - lance->RAP = CSR3; /* Interrupt Masks and Deferral Control */ - lance->RDP = 0x0000; - lance->RAP = CSR4; /* Test and Features Control */ - lance->RDP = DPOLL | APAD_XMT | MFCOM | RCVCCOM | TXSTRTM | JABM; - - /* Set the Multicast Table */ - lance->RAP = CSR8; /* Logical Address Filter, LADRF[15:0] */ - lance->RDP = 0x0000; - lance->RAP = CSR9; /* Logical Address Filter, LADRF[31:16] */ - lance->RDP = 0x0000; - lance->RAP = CSR10; /* Logical Address Filter, LADRF[47:32] */ - lance->RDP = 0x0000; - lance->RAP = CSR11; /* Logical Address Filter, LADRF[63:48] */ - lance->RDP = 0x0000; - - /* Set the Ethernet Hardware Address */ - lance->RAP = CSR12; /* Physical Address Register, PADR[15:0] */ - lance->RDP = ((u_short *)&dev->dev_addr[0])[0]; - lance->RAP = CSR13; /* Physical Address Register, PADR[31:16] */ - lance->RDP = ((u_short *)&dev->dev_addr[0])[1]; - lance->RAP = CSR14; /* Physical Address Register, PADR[47:32] */ - lance->RDP = ((u_short *)&dev->dev_addr[0])[2]; - - /* Set the Init Block Mode */ - lance->RAP = CSR15; /* Mode Register */ - lance->RDP = 0x0000; - - /* Set the Transmit Descriptor Ring Pointer */ - lance->RAP = CSR30; /* Base Address of Transmit Ring */ - lance->RDP = swloww(ARIADNE_RAM + offsetof(struct lancedata, tx_ring)); - lance->RAP = CSR31; /* Base Address of transmit Ring */ - lance->RDP = swhighw(ARIADNE_RAM + offsetof(struct lancedata, tx_ring)); - - /* Set the Receive Descriptor Ring Pointer */ - lance->RAP = CSR24; /* Base Address of Receive Ring */ - lance->RDP = swloww(ARIADNE_RAM + offsetof(struct lancedata, rx_ring)); - lance->RAP = CSR25; /* Base Address of Receive Ring */ - lance->RDP = swhighw(ARIADNE_RAM + offsetof(struct lancedata, rx_ring)); - - /* Set the Number of RX and TX Ring Entries */ - lance->RAP = CSR76; /* Receive Ring Length */ - lance->RDP = swapw(((u_short)-RX_RING_SIZE)); - lance->RAP = CSR78; /* Transmit Ring Length */ - lance->RDP = swapw(((u_short)-TX_RING_SIZE)); - - /* Enable Media Interface Port Auto Select (10BASE-2/10BASE-T) */ - lance->RAP = ISACSR2; /* Miscellaneous Configuration */ - lance->IDP = ASEL; - - /* LED Control */ - lance->RAP = ISACSR5; /* LED1 Status */ - lance->IDP = PSE|XMTE; - lance->RAP = ISACSR6; /* LED2 Status */ - lance->IDP = PSE|COLE; - lance->RAP = ISACSR7; /* LED3 Status */ - lance->IDP = PSE|RCVE; - - netif_start_queue(dev); - - i = request_irq(IRQ_AMIGA_PORTS, ariadne_interrupt, IRQF_SHARED, - dev->name, dev); - if (i) - return i; - - lance->RAP = CSR0; /* PCnet-ISA Controller Status */ - lance->RDP = INEA | STRT; - - return 0; -} - -static int ariadne_close(struct net_device *dev) -{ - volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr; - - netif_stop_queue(dev); - - lance->RAP = CSR112; /* Missed Frame Count */ - dev->stats.rx_missed_errors = swapw(lance->RDP); - lance->RAP = CSR0; /* PCnet-ISA Controller Status */ - - if (ariadne_debug > 1) { - netdev_dbg(dev, "Shutting down ethercard, status was %02x\n", - lance->RDP); - netdev_dbg(dev, "%lu packets missed\n", - dev->stats.rx_missed_errors); - } - - /* We stop the LANCE here -- it occasionally polls memory if we don't */ - lance->RDP = STOP; - - free_irq(IRQ_AMIGA_PORTS, dev); - - return 0; -} - -static inline void ariadne_reset(struct net_device *dev) -{ - volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr; - - lance->RAP = CSR0; /* PCnet-ISA Controller Status */ - lance->RDP = STOP; - ariadne_init_ring(dev); - lance->RDP = INEA | STRT; - netif_start_queue(dev); -} - -static void ariadne_tx_timeout(struct net_device *dev) -{ - volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr; - - netdev_err(dev, "transmit timed out, status %04x, resetting\n", - lance->RDP); - ariadne_reset(dev); - netif_wake_queue(dev); -} - -static netdev_tx_t ariadne_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - struct ariadne_private *priv = netdev_priv(dev); - volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr; - int entry; - unsigned long flags; - int len = skb->len; - -#if 0 - if (ariadne_debug > 3) { - lance->RAP = CSR0; /* PCnet-ISA Controller Status */ - netdev_dbg(dev, "%s: csr0 %04x\n", __func__, lance->RDP); - lance->RDP = 0x0000; - } -#endif - - /* FIXME: is the 79C960 new enough to do its own padding right ? */ - if (skb->len < ETH_ZLEN) { - if (skb_padto(skb, ETH_ZLEN)) - return NETDEV_TX_OK; - len = ETH_ZLEN; - } - - /* Fill in a Tx ring entry */ - - netdev_dbg(dev, "TX pkt type 0x%04x from %pM to %pM data 0x%08x len %d\n", - ((u_short *)skb->data)[6], - skb->data + 6, skb->data, - (int)skb->data, (int)skb->len); - - local_irq_save(flags); - - entry = priv->cur_tx % TX_RING_SIZE; - - /* Caution: the write order is important here, set the base address with - the "ownership" bits last */ - - priv->tx_ring[entry]->TMD2 = swapw((u_short)-skb->len); - priv->tx_ring[entry]->TMD3 = 0x0000; - memcpyw(priv->tx_buff[entry], (u_short *)skb->data, len); - -#ifdef DEBUG - print_hex_dump(KERN_DEBUG, "tx_buff: ", DUMP_PREFIX_OFFSET, 16, 1, - (void *)priv->tx_buff[entry], - skb->len > 64 ? 64 : skb->len, true); -#endif - - priv->tx_ring[entry]->TMD1 = (priv->tx_ring[entry]->TMD1 & 0xff00) - | TF_OWN | TF_STP | TF_ENP; - - dev_kfree_skb(skb); - - priv->cur_tx++; - if ((priv->cur_tx >= TX_RING_SIZE) && - (priv->dirty_tx >= TX_RING_SIZE)) { - - netdev_dbg(dev, "*** Subtracting TX_RING_SIZE from cur_tx (%d) and dirty_tx (%d)\n", - priv->cur_tx, priv->dirty_tx); - - priv->cur_tx -= TX_RING_SIZE; - priv->dirty_tx -= TX_RING_SIZE; - } - dev->stats.tx_bytes += len; - - /* Trigger an immediate send poll */ - lance->RAP = CSR0; /* PCnet-ISA Controller Status */ - lance->RDP = INEA | TDMD; - - if (lowb(priv->tx_ring[(entry + 1) % TX_RING_SIZE]->TMD1) != 0) { - netif_stop_queue(dev); - priv->tx_full = 1; - } - local_irq_restore(flags); - - return NETDEV_TX_OK; -} - -static struct net_device_stats *ariadne_get_stats(struct net_device *dev) -{ - volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr; - short saved_addr; - unsigned long flags; - - local_irq_save(flags); - saved_addr = lance->RAP; - lance->RAP = CSR112; /* Missed Frame Count */ - dev->stats.rx_missed_errors = swapw(lance->RDP); - lance->RAP = saved_addr; - local_irq_restore(flags); - - return &dev->stats; -} - -/* Set or clear the multicast filter for this adaptor. - * num_addrs == -1 Promiscuous mode, receive all packets - * num_addrs == 0 Normal mode, clear multicast list - * num_addrs > 0 Multicast mode, receive normal and MC packets, - * and do best-effort filtering. - */ -static void set_multicast_list(struct net_device *dev) -{ - volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr; - - if (!netif_running(dev)) - return; - - netif_stop_queue(dev); - - /* We take the simple way out and always enable promiscuous mode */ - lance->RAP = CSR0; /* PCnet-ISA Controller Status */ - lance->RDP = STOP; /* Temporarily stop the lance */ - ariadne_init_ring(dev); - - if (dev->flags & IFF_PROMISC) { - lance->RAP = CSR15; /* Mode Register */ - lance->RDP = PROM; /* Set promiscuous mode */ - } else { - short multicast_table[4]; - int num_addrs = netdev_mc_count(dev); - int i; - /* We don't use the multicast table, - * but rely on upper-layer filtering - */ - memset(multicast_table, (num_addrs == 0) ? 0 : -1, - sizeof(multicast_table)); - for (i = 0; i < 4; i++) { - lance->RAP = CSR8 + (i << 8); - /* Logical Address Filter */ - lance->RDP = swapw(multicast_table[i]); - } - lance->RAP = CSR15; /* Mode Register */ - lance->RDP = 0x0000; /* Unset promiscuous mode */ - } - - lance->RAP = CSR0; /* PCnet-ISA Controller Status */ - lance->RDP = INEA | STRT | IDON;/* Resume normal operation */ - - netif_wake_queue(dev); -} - - -static void __devexit ariadne_remove_one(struct zorro_dev *z) -{ - struct net_device *dev = zorro_get_drvdata(z); - - unregister_netdev(dev); - release_mem_region(ZTWO_PADDR(dev->base_addr), sizeof(struct Am79C960)); - release_mem_region(ZTWO_PADDR(dev->mem_start), ARIADNE_RAM_SIZE); - free_netdev(dev); -} - -static struct zorro_device_id ariadne_zorro_tbl[] __devinitdata = { - { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE }, - { 0 } -}; -MODULE_DEVICE_TABLE(zorro, ariadne_zorro_tbl); - -static const struct net_device_ops ariadne_netdev_ops = { - .ndo_open = ariadne_open, - .ndo_stop = ariadne_close, - .ndo_start_xmit = ariadne_start_xmit, - .ndo_tx_timeout = ariadne_tx_timeout, - .ndo_get_stats = ariadne_get_stats, - .ndo_set_multicast_list = set_multicast_list, - .ndo_validate_addr = eth_validate_addr, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, -}; - -static int __devinit ariadne_init_one(struct zorro_dev *z, - const struct zorro_device_id *ent) -{ - unsigned long board = z->resource.start; - unsigned long base_addr = board + ARIADNE_LANCE; - unsigned long mem_start = board + ARIADNE_RAM; - struct resource *r1, *r2; - struct net_device *dev; - struct ariadne_private *priv; - int err; - - r1 = request_mem_region(base_addr, sizeof(struct Am79C960), "Am79C960"); - if (!r1) - return -EBUSY; - r2 = request_mem_region(mem_start, ARIADNE_RAM_SIZE, "RAM"); - if (!r2) { - release_mem_region(base_addr, sizeof(struct Am79C960)); - return -EBUSY; - } - - dev = alloc_etherdev(sizeof(struct ariadne_private)); - if (dev == NULL) { - release_mem_region(base_addr, sizeof(struct Am79C960)); - release_mem_region(mem_start, ARIADNE_RAM_SIZE); - return -ENOMEM; - } - - priv = netdev_priv(dev); - - r1->name = dev->name; - r2->name = dev->name; - - dev->dev_addr[0] = 0x00; - dev->dev_addr[1] = 0x60; - dev->dev_addr[2] = 0x30; - dev->dev_addr[3] = (z->rom.er_SerialNumber >> 16) & 0xff; - dev->dev_addr[4] = (z->rom.er_SerialNumber >> 8) & 0xff; - dev->dev_addr[5] = z->rom.er_SerialNumber & 0xff; - dev->base_addr = ZTWO_VADDR(base_addr); - dev->mem_start = ZTWO_VADDR(mem_start); - dev->mem_end = dev->mem_start + ARIADNE_RAM_SIZE; - - dev->netdev_ops = &ariadne_netdev_ops; - dev->watchdog_timeo = 5 * HZ; - - err = register_netdev(dev); - if (err) { - release_mem_region(base_addr, sizeof(struct Am79C960)); - release_mem_region(mem_start, ARIADNE_RAM_SIZE); - free_netdev(dev); - return err; - } - zorro_set_drvdata(z, dev); - - netdev_info(dev, "Ariadne at 0x%08lx, Ethernet Address %pM\n", - board, dev->dev_addr); - - return 0; -} - -static struct zorro_driver ariadne_driver = { - .name = "ariadne", - .id_table = ariadne_zorro_tbl, - .probe = ariadne_init_one, - .remove = __devexit_p(ariadne_remove_one), -}; - -static int __init ariadne_init_module(void) -{ - return zorro_register_driver(&ariadne_driver); -} - -static void __exit ariadne_cleanup_module(void) -{ - zorro_unregister_driver(&ariadne_driver); -} - -module_init(ariadne_init_module); -module_exit(ariadne_cleanup_module); - -MODULE_LICENSE("GPL"); diff --git a/drivers/net/ariadne.h b/drivers/net/ariadne.h deleted file mode 100644 index 727be5c..0000000 --- a/drivers/net/ariadne.h +++ /dev/null @@ -1,415 +0,0 @@ -/* - * Amiga Linux/m68k Ariadne Ethernet Driver - * - * © Copyright 1995 by Geert Uytterhoeven (geert@linux-m68k.org) - * Peter De Schrijver - * (Peter.DeSchrijver@linux.cc.kuleuven.ac.be) - * - * ---------------------------------------------------------------------------------- - * - * This program is based on - * - * lance.c: An AMD LANCE ethernet driver for linux. - * Written 1993-94 by Donald Becker. - * - * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller - * Advanced Micro Devices - * Publication #16907, Rev. B, Amendment/0, May 1994 - * - * MC68230: Parallel Interface/Timer (PI/T) - * Motorola Semiconductors, December, 1983 - * - * ---------------------------------------------------------------------------------- - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file COPYING in the main directory of the Linux - * distribution for more details. - * - * ---------------------------------------------------------------------------------- - * - * The Ariadne is a Zorro-II board made by Village Tronic. It contains: - * - * - an Am79C960 PCnet-ISA Single-Chip Ethernet Controller with both - * 10BASE-2 (thin coax) and 10BASE-T (UTP) connectors - * - * - an MC68230 Parallel Interface/Timer configured as 2 parallel ports - */ - - - /* - * Am79C960 PCnet-ISA - */ - -struct Am79C960 { - volatile u_short AddressPROM[8]; - /* IEEE Address PROM (Unused in the Ariadne) */ - volatile u_short RDP; /* Register Data Port */ - volatile u_short RAP; /* Register Address Port */ - volatile u_short Reset; /* Reset Chip on Read Access */ - volatile u_short IDP; /* ISACSR Data Port */ -}; - - - /* - * Am79C960 Control and Status Registers - * - * These values are already swap()ed!! - * - * Only registers marked with a `-' are intended for network software - * access - */ - -#define CSR0 0x0000 /* - PCnet-ISA Controller Status */ -#define CSR1 0x0100 /* - IADR[15:0] */ -#define CSR2 0x0200 /* - IADR[23:16] */ -#define CSR3 0x0300 /* - Interrupt Masks and Deferral Control */ -#define CSR4 0x0400 /* - Test and Features Control */ -#define CSR6 0x0600 /* RCV/XMT Descriptor Table Length */ -#define CSR8 0x0800 /* - Logical Address Filter, LADRF[15:0] */ -#define CSR9 0x0900 /* - Logical Address Filter, LADRF[31:16] */ -#define CSR10 0x0a00 /* - Logical Address Filter, LADRF[47:32] */ -#define CSR11 0x0b00 /* - Logical Address Filter, LADRF[63:48] */ -#define CSR12 0x0c00 /* - Physical Address Register, PADR[15:0] */ -#define CSR13 0x0d00 /* - Physical Address Register, PADR[31:16] */ -#define CSR14 0x0e00 /* - Physical Address Register, PADR[47:32] */ -#define CSR15 0x0f00 /* - Mode Register */ -#define CSR16 0x1000 /* Initialization Block Address Lower */ -#define CSR17 0x1100 /* Initialization Block Address Upper */ -#define CSR18 0x1200 /* Current Receive Buffer Address */ -#define CSR19 0x1300 /* Current Receive Buffer Address */ -#define CSR20 0x1400 /* Current Transmit Buffer Address */ -#define CSR21 0x1500 /* Current Transmit Buffer Address */ -#define CSR22 0x1600 /* Next Receive Buffer Address */ -#define CSR23 0x1700 /* Next Receive Buffer Address */ -#define CSR24 0x1800 /* - Base Address of Receive Ring */ -#define CSR25 0x1900 /* - Base Address of Receive Ring */ -#define CSR26 0x1a00 /* Next Receive Descriptor Address */ -#define CSR27 0x1b00 /* Next Receive Descriptor Address */ -#define CSR28 0x1c00 /* Current Receive Descriptor Address */ -#define CSR29 0x1d00 /* Current Receive Descriptor Address */ -#define CSR30 0x1e00 /* - Base Address of Transmit Ring */ -#define CSR31 0x1f00 /* - Base Address of transmit Ring */ -#define CSR32 0x2000 /* Next Transmit Descriptor Address */ -#define CSR33 0x2100 /* Next Transmit Descriptor Address */ -#define CSR34 0x2200 /* Current Transmit Descriptor Address */ -#define CSR35 0x2300 /* Current Transmit Descriptor Address */ -#define CSR36 0x2400 /* Next Next Receive Descriptor Address */ -#define CSR37 0x2500 /* Next Next Receive Descriptor Address */ -#define CSR38 0x2600 /* Next Next Transmit Descriptor Address */ -#define CSR39 0x2700 /* Next Next Transmit Descriptor Address */ -#define CSR40 0x2800 /* Current Receive Status and Byte Count */ -#define CSR41 0x2900 /* Current Receive Status and Byte Count */ -#define CSR42 0x2a00 /* Current Transmit Status and Byte Count */ -#define CSR43 0x2b00 /* Current Transmit Status and Byte Count */ -#define CSR44 0x2c00 /* Next Receive Status and Byte Count */ -#define CSR45 0x2d00 /* Next Receive Status and Byte Count */ -#define CSR46 0x2e00 /* Poll Time Counter */ -#define CSR47 0x2f00 /* Polling Interval */ -#define CSR48 0x3000 /* Temporary Storage */ -#define CSR49 0x3100 /* Temporary Storage */ -#define CSR50 0x3200 /* Temporary Storage */ -#define CSR51 0x3300 /* Temporary Storage */ -#define CSR52 0x3400 /* Temporary Storage */ -#define CSR53 0x3500 /* Temporary Storage */ -#define CSR54 0x3600 /* Temporary Storage */ -#define CSR55 0x3700 /* Temporary Storage */ -#define CSR56 0x3800 /* Temporary Storage */ -#define CSR57 0x3900 /* Temporary Storage */ -#define CSR58 0x3a00 /* Temporary Storage */ -#define CSR59 0x3b00 /* Temporary Storage */ -#define CSR60 0x3c00 /* Previous Transmit Descriptor Address */ -#define CSR61 0x3d00 /* Previous Transmit Descriptor Address */ -#define CSR62 0x3e00 /* Previous Transmit Status and Byte Count */ -#define CSR63 0x3f00 /* Previous Transmit Status and Byte Count */ -#define CSR64 0x4000 /* Next Transmit Buffer Address */ -#define CSR65 0x4100 /* Next Transmit Buffer Address */ -#define CSR66 0x4200 /* Next Transmit Status and Byte Count */ -#define CSR67 0x4300 /* Next Transmit Status and Byte Count */ -#define CSR68 0x4400 /* Transmit Status Temporary Storage */ -#define CSR69 0x4500 /* Transmit Status Temporary Storage */ -#define CSR70 0x4600 /* Temporary Storage */ -#define CSR71 0x4700 /* Temporary Storage */ -#define CSR72 0x4800 /* Receive Ring Counter */ -#define CSR74 0x4a00 /* Transmit Ring Counter */ -#define CSR76 0x4c00 /* - Receive Ring Length */ -#define CSR78 0x4e00 /* - Transmit Ring Length */ -#define CSR80 0x5000 /* - Burst and FIFO Threshold Control */ -#define CSR82 0x5200 /* - Bus Activity Timer */ -#define CSR84 0x5400 /* DMA Address */ -#define CSR85 0x5500 /* DMA Address */ -#define CSR86 0x5600 /* Buffer Byte Counter */ -#define CSR88 0x5800 /* - Chip ID */ -#define CSR89 0x5900 /* - Chip ID */ -#define CSR92 0x5c00 /* Ring Length Conversion */ -#define CSR94 0x5e00 /* Transmit Time Domain Reflectometry Count */ -#define CSR96 0x6000 /* Bus Interface Scratch Register 0 */ -#define CSR97 0x6100 /* Bus Interface Scratch Register 0 */ -#define CSR98 0x6200 /* Bus Interface Scratch Register 1 */ -#define CSR99 0x6300 /* Bus Interface Scratch Register 1 */ -#define CSR104 0x6800 /* SWAP */ -#define CSR105 0x6900 /* SWAP */ -#define CSR108 0x6c00 /* Buffer Management Scratch */ -#define CSR109 0x6d00 /* Buffer Management Scratch */ -#define CSR112 0x7000 /* - Missed Frame Count */ -#define CSR114 0x7200 /* - Receive Collision Count */ -#define CSR124 0x7c00 /* - Buffer Management Unit Test */ - - - /* - * Am79C960 ISA Control and Status Registers - * - * These values are already swap()ed!! - */ - -#define ISACSR0 0x0000 /* Master Mode Read Active */ -#define ISACSR1 0x0100 /* Master Mode Write Active */ -#define ISACSR2 0x0200 /* Miscellaneous Configuration */ -#define ISACSR4 0x0400 /* LED0 Status (Link Integrity) */ -#define ISACSR5 0x0500 /* LED1 Status */ -#define ISACSR6 0x0600 /* LED2 Status */ -#define ISACSR7 0x0700 /* LED3 Status */ - - - /* - * Bit definitions for CSR0 (PCnet-ISA Controller Status) - * - * These values are already swap()ed!! - */ - -#define ERR 0x0080 /* Error */ -#define BABL 0x0040 /* Babble: Transmitted too many bits */ -#define CERR 0x0020 /* No Heartbeat (10BASE-T) */ -#define MISS 0x0010 /* Missed Frame */ -#define MERR 0x0008 /* Memory Error */ -#define RINT 0x0004 /* Receive Interrupt */ -#define TINT 0x0002 /* Transmit Interrupt */ -#define IDON 0x0001 /* Initialization Done */ -#define INTR 0x8000 /* Interrupt Flag */ -#define INEA 0x4000 /* Interrupt Enable */ -#define RXON 0x2000 /* Receive On */ -#define TXON 0x1000 /* Transmit On */ -#define TDMD 0x0800 /* Transmit Demand */ -#define STOP 0x0400 /* Stop */ -#define STRT 0x0200 /* Start */ -#define INIT 0x0100 /* Initialize */ - - - /* - * Bit definitions for CSR3 (Interrupt Masks and Deferral Control) - * - * These values are already swap()ed!! - */ - -#define BABLM 0x0040 /* Babble Mask */ -#define MISSM 0x0010 /* Missed Frame Mask */ -#define MERRM 0x0008 /* Memory Error Mask */ -#define RINTM 0x0004 /* Receive Interrupt Mask */ -#define TINTM 0x0002 /* Transmit Interrupt Mask */ -#define IDONM 0x0001 /* Initialization Done Mask */ -#define DXMT2PD 0x1000 /* Disable Transmit Two Part Deferral */ -#define EMBA 0x0800 /* Enable Modified Back-off Algorithm */ - - - /* - * Bit definitions for CSR4 (Test and Features Control) - * - * These values are already swap()ed!! - */ - -#define ENTST 0x0080 /* Enable Test Mode */ -#define DMAPLUS 0x0040 /* Disable Burst Transaction Counter */ -#define TIMER 0x0020 /* Timer Enable Register */ -#define DPOLL 0x0010 /* Disable Transmit Polling */ -#define APAD_XMT 0x0008 /* Auto Pad Transmit */ -#define ASTRP_RCV 0x0004 /* Auto Pad Stripping */ -#define MFCO 0x0002 /* Missed Frame Counter Overflow Interrupt */ -#define MFCOM 0x0001 /* Missed Frame Counter Overflow Mask */ -#define RCVCCO 0x2000 /* Receive Collision Counter Overflow Interrupt */ -#define RCVCCOM 0x1000 /* Receive Collision Counter Overflow Mask */ -#define TXSTRT 0x0800 /* Transmit Start Status */ -#define TXSTRTM 0x0400 /* Transmit Start Mask */ -#define JAB 0x0200 /* Jabber Error */ -#define JABM 0x0100 /* Jabber Error Mask */ - - - /* - * Bit definitions for CSR15 (Mode Register) - * - * These values are already swap()ed!! - */ - -#define PROM 0x0080 /* Promiscuous Mode */ -#define DRCVBC 0x0040 /* Disable Receive Broadcast */ -#define DRCVPA 0x0020 /* Disable Receive Physical Address */ -#define DLNKTST 0x0010 /* Disable Link Status */ -#define DAPC 0x0008 /* Disable Automatic Polarity Correction */ -#define MENDECL 0x0004 /* MENDEC Loopback Mode */ -#define LRTTSEL 0x0002 /* Low Receive Threshold/Transmit Mode Select */ -#define PORTSEL1 0x0001 /* Port Select Bits */ -#define PORTSEL2 0x8000 /* Port Select Bits */ -#define INTL 0x4000 /* Internal Loopback */ -#define DRTY 0x2000 /* Disable Retry */ -#define FCOLL 0x1000 /* Force Collision */ -#define DXMTFCS 0x0800 /* Disable Transmit CRC */ -#define LOOP 0x0400 /* Loopback Enable */ -#define DTX 0x0200 /* Disable Transmitter */ -#define DRX 0x0100 /* Disable Receiver */ - - - /* - * Bit definitions for ISACSR2 (Miscellaneous Configuration) - * - * These values are already swap()ed!! - */ - -#define ASEL 0x0200 /* Media Interface Port Auto Select */ - - - /* - * Bit definitions for ISACSR5-7 (LED1-3 Status) - * - * These values are already swap()ed!! - */ - -#define LEDOUT 0x0080 /* Current LED Status */ -#define PSE 0x8000 /* Pulse Stretcher Enable */ -#define XMTE 0x1000 /* Enable Transmit Status Signal */ -#define RVPOLE 0x0800 /* Enable Receive Polarity Signal */ -#define RCVE 0x0400 /* Enable Receive Status Signal */ -#define JABE 0x0200 /* Enable Jabber Signal */ -#define COLE 0x0100 /* Enable Collision Signal */ - - - /* - * Receive Descriptor Ring Entry - */ - -struct RDRE { - volatile u_short RMD0; /* LADR[15:0] */ - volatile u_short RMD1; /* HADR[23:16] | Receive Flags */ - volatile u_short RMD2; /* Buffer Byte Count (two's complement) */ - volatile u_short RMD3; /* Message Byte Count */ -}; - - - /* - * Transmit Descriptor Ring Entry - */ - -struct TDRE { - volatile u_short TMD0; /* LADR[15:0] */ - volatile u_short TMD1; /* HADR[23:16] | Transmit Flags */ - volatile u_short TMD2; /* Buffer Byte Count (two's complement) */ - volatile u_short TMD3; /* Error Flags */ -}; - - - /* - * Receive Flags - */ - -#define RF_OWN 0x0080 /* PCnet-ISA controller owns the descriptor */ -#define RF_ERR 0x0040 /* Error */ -#define RF_FRAM 0x0020 /* Framing Error */ -#define RF_OFLO 0x0010 /* Overflow Error */ -#define RF_CRC 0x0008 /* CRC Error */ -#define RF_BUFF 0x0004 /* Buffer Error */ -#define RF_STP 0x0002 /* Start of Packet */ -#define RF_ENP 0x0001 /* End of Packet */ - - - /* - * Transmit Flags - */ - -#define TF_OWN 0x0080 /* PCnet-ISA controller owns the descriptor */ -#define TF_ERR 0x0040 /* Error */ -#define TF_ADD_FCS 0x0020 /* Controls FCS Generation */ -#define TF_MORE 0x0010 /* More than one retry needed */ -#define TF_ONE 0x0008 /* One retry needed */ -#define TF_DEF 0x0004 /* Deferred */ -#define TF_STP 0x0002 /* Start of Packet */ -#define TF_ENP 0x0001 /* End of Packet */ - - - /* - * Error Flags - */ - -#define EF_BUFF 0x0080 /* Buffer Error */ -#define EF_UFLO 0x0040 /* Underflow Error */ -#define EF_LCOL 0x0010 /* Late Collision */ -#define EF_LCAR 0x0008 /* Loss of Carrier */ -#define EF_RTRY 0x0004 /* Retry Error */ -#define EF_TDR 0xff03 /* Time Domain Reflectometry */ - - - - /* - * MC68230 Parallel Interface/Timer - */ - -struct MC68230 { - volatile u_char PGCR; /* Port General Control Register */ - u_char Pad1[1]; - volatile u_char PSRR; /* Port Service Request Register */ - u_char Pad2[1]; - volatile u_char PADDR; /* Port A Data Direction Register */ - u_char Pad3[1]; - volatile u_char PBDDR; /* Port B Data Direction Register */ - u_char Pad4[1]; - volatile u_char PCDDR; /* Port C Data Direction Register */ - u_char Pad5[1]; - volatile u_char PIVR; /* Port Interrupt Vector Register */ - u_char Pad6[1]; - volatile u_char PACR; /* Port A Control Register */ - u_char Pad7[1]; - volatile u_char PBCR; /* Port B Control Register */ - u_char Pad8[1]; - volatile u_char PADR; /* Port A Data Register */ - u_char Pad9[1]; - volatile u_char PBDR; /* Port B Data Register */ - u_char Pad10[1]; - volatile u_char PAAR; /* Port A Alternate Register */ - u_char Pad11[1]; - volatile u_char PBAR; /* Port B Alternate Register */ - u_char Pad12[1]; - volatile u_char PCDR; /* Port C Data Register */ - u_char Pad13[1]; - volatile u_char PSR; /* Port Status Register */ - u_char Pad14[5]; - volatile u_char TCR; /* Timer Control Register */ - u_char Pad15[1]; - volatile u_char TIVR; /* Timer Interrupt Vector Register */ - u_char Pad16[3]; - volatile u_char CPRH; /* Counter Preload Register (High) */ - u_char Pad17[1]; - volatile u_char CPRM; /* Counter Preload Register (Mid) */ - u_char Pad18[1]; - volatile u_char CPRL; /* Counter Preload Register (Low) */ - u_char Pad19[3]; - volatile u_char CNTRH; /* Count Register (High) */ - u_char Pad20[1]; - volatile u_char CNTRM; /* Count Register (Mid) */ - u_char Pad21[1]; - volatile u_char CNTRL; /* Count Register (Low) */ - u_char Pad22[1]; - volatile u_char TSR; /* Timer Status Register */ - u_char Pad23[11]; -}; - - - /* - * Ariadne Expansion Board Structure - */ - -#define ARIADNE_LANCE 0x360 - -#define ARIADNE_PIT 0x1000 - -#define ARIADNE_BOOTPROM 0x4000 /* I guess it's here :-) */ -#define ARIADNE_BOOTPROM_SIZE 0x4000 - -#define ARIADNE_RAM 0x8000 /* Always access WORDs!! */ -#define ARIADNE_RAM_SIZE 0x8000 - diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig index 39e1c0d..d0c8cf2 100644 --- a/drivers/net/arm/Kconfig +++ b/drivers/net/arm/Kconfig @@ -2,13 +2,6 @@ # Acorn Network device configuration # These are for Acorn's Expansion card network interfaces # -config ARM_AM79C961A - bool "ARM EBSA110 AM79C961A support" - depends on ARM && ARCH_EBSA110 - select CRC32 - help - If you wish to compile a kernel for the EBSA-110, then you should - always answer Y to this. config ARM_ETHER1 tristate "Acorn Ether1 support" diff --git a/drivers/net/arm/Makefile b/drivers/net/arm/Makefile index 303171f..63c57be3 100644 --- a/drivers/net/arm/Makefile +++ b/drivers/net/arm/Makefile @@ -3,7 +3,6 @@ # Makefile for the ARM network device drivers # -obj-$(CONFIG_ARM_AM79C961A) += am79c961a.o obj-$(CONFIG_ARM_ETHERH) += etherh.o obj-$(CONFIG_ARM_ETHER3) += ether3.o obj-$(CONFIG_ARM_ETHER1) += ether1.o diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c deleted file mode 100644 index 52fe21e..0000000 --- a/drivers/net/arm/am79c961a.c +++ /dev/null @@ -1,767 +0,0 @@ -/* - * linux/drivers/net/am79c961.c - * - * by Russell King 1995-2001. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Derived from various things including skeleton.c - * - * This is a special driver for the am79c961A Lance chip used in the - * Intel (formally Digital Equipment Corp) EBSA110 platform. Please - * note that this can not be built as a module (it doesn't make sense). - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#define TX_BUFFERS 15 -#define RX_BUFFERS 25 - -#include "am79c961a.h" - -static irqreturn_t -am79c961_interrupt (int irq, void *dev_id); - -static unsigned int net_debug = NET_DEBUG; - -static const char version[] = - "am79c961 ethernet driver (C) 1995-2001 Russell King v0.04\n"; - -/* --------------------------------------------------------------------------- */ - -#ifdef __arm__ -static void write_rreg(u_long base, u_int reg, u_int val) -{ - asm volatile( - "str%?h %1, [%2] @ NET_RAP\n\t" - "str%?h %0, [%2, #-4] @ NET_RDP" - : - : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464)); -} - -static inline unsigned short read_rreg(u_long base_addr, u_int reg) -{ - unsigned short v; - asm volatile( - "str%?h %1, [%2] @ NET_RAP\n\t" - "ldr%?h %0, [%2, #-4] @ NET_RDP" - : "=r" (v) - : "r" (reg), "r" (ISAIO_BASE + 0x0464)); - return v; -} - -static inline void write_ireg(u_long base, u_int reg, u_int val) -{ - asm volatile( - "str%?h %1, [%2] @ NET_RAP\n\t" - "str%?h %0, [%2, #8] @ NET_IDP" - : - : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464)); -} - -static inline unsigned short read_ireg(u_long base_addr, u_int reg) -{ - u_short v; - asm volatile( - "str%?h %1, [%2] @ NAT_RAP\n\t" - "ldr%?h %0, [%2, #8] @ NET_IDP\n\t" - : "=r" (v) - : "r" (reg), "r" (ISAIO_BASE + 0x0464)); - return v; -} - -#define am_writeword(dev,off,val) __raw_writew(val, ISAMEM_BASE + ((off) << 1)) -#define am_readword(dev,off) __raw_readw(ISAMEM_BASE + ((off) << 1)) - -static void -am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length) -{ - offset = ISAMEM_BASE + (offset << 1); - length = (length + 1) & ~1; - if ((int)buf & 2) { - asm volatile("str%?h %2, [%0], #4" - : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8))); - buf += 2; - length -= 2; - } - while (length > 8) { - register unsigned int tmp asm("r2"), tmp2 asm("r3"); - asm volatile( - "ldm%?ia %0!, {%1, %2}" - : "+r" (buf), "=&r" (tmp), "=&r" (tmp2)); - length -= 8; - asm volatile( - "str%?h %1, [%0], #4\n\t" - "mov%? %1, %1, lsr #16\n\t" - "str%?h %1, [%0], #4\n\t" - "str%?h %2, [%0], #4\n\t" - "mov%? %2, %2, lsr #16\n\t" - "str%?h %2, [%0], #4" - : "+r" (offset), "=&r" (tmp), "=&r" (tmp2)); - } - while (length > 0) { - asm volatile("str%?h %2, [%0], #4" - : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8))); - buf += 2; - length -= 2; - } -} - -static void -am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length) -{ - offset = ISAMEM_BASE + (offset << 1); - length = (length + 1) & ~1; - if ((int)buf & 2) { - unsigned int tmp; - asm volatile( - "ldr%?h %2, [%0], #4\n\t" - "str%?b %2, [%1], #1\n\t" - "mov%? %2, %2, lsr #8\n\t" - "str%?b %2, [%1], #1" - : "=&r" (offset), "=&r" (buf), "=r" (tmp): "0" (offset), "1" (buf)); - length -= 2; - } - while (length > 8) { - register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3; - asm volatile( - "ldr%?h %2, [%0], #4\n\t" - "ldr%?h %4, [%0], #4\n\t" - "ldr%?h %3, [%0], #4\n\t" - "orr%? %2, %2, %4, lsl #16\n\t" - "ldr%?h %4, [%0], #4\n\t" - "orr%? %3, %3, %4, lsl #16\n\t" - "stm%?ia %1!, {%2, %3}" - : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2), "=r" (tmp3) - : "0" (offset), "1" (buf)); - length -= 8; - } - while (length > 0) { - unsigned int tmp; - asm volatile( - "ldr%?h %2, [%0], #4\n\t" - "str%?b %2, [%1], #1\n\t" - "mov%? %2, %2, lsr #8\n\t" - "str%?b %2, [%1], #1" - : "=&r" (offset), "=&r" (buf), "=r" (tmp) : "0" (offset), "1" (buf)); - length -= 2; - } -} -#else -#error Not compatible -#endif - -static int -am79c961_ramtest(struct net_device *dev, unsigned int val) -{ - unsigned char *buffer = kmalloc (65536, GFP_KERNEL); - int i, error = 0, errorcount = 0; - - if (!buffer) - return 0; - memset (buffer, val, 65536); - am_writebuffer(dev, 0, buffer, 65536); - memset (buffer, val ^ 255, 65536); - am_readbuffer(dev, 0, buffer, 65536); - for (i = 0; i < 65536; i++) { - if (buffer[i] != val && !error) { - printk ("%s: buffer error (%02X %02X) %05X - ", dev->name, val, buffer[i], i); - error = 1; - errorcount ++; - } else if (error && buffer[i] == val) { - printk ("%05X\n", i); - error = 0; - } - } - if (error) - printk ("10000\n"); - kfree (buffer); - return errorcount; -} - -static void am79c961_mc_hash(char *addr, u16 *hash) -{ - int idx, bit; - u32 crc; - - crc = ether_crc_le(ETH_ALEN, addr); - - idx = crc >> 30; - bit = (crc >> 26) & 15; - - hash[idx] |= 1 << bit; -} - -static unsigned int am79c961_get_rx_mode(struct net_device *dev, u16 *hash) -{ - unsigned int mode = MODE_PORT_10BT; - - if (dev->flags & IFF_PROMISC) { - mode |= MODE_PROMISC; - memset(hash, 0xff, 4 * sizeof(*hash)); - } else if (dev->flags & IFF_ALLMULTI) { - memset(hash, 0xff, 4 * sizeof(*hash)); - } else { - struct netdev_hw_addr *ha; - - memset(hash, 0, 4 * sizeof(*hash)); - - netdev_for_each_mc_addr(ha, dev) - am79c961_mc_hash(ha->addr, hash); - } - - return mode; -} - -static void -am79c961_init_for_open(struct net_device *dev) -{ - struct dev_priv *priv = netdev_priv(dev); - unsigned long flags; - unsigned char *p; - u_int hdr_addr, first_free_addr; - u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash); - int i; - - /* - * Stop the chip. - */ - spin_lock_irqsave(&priv->chip_lock, flags); - write_rreg (dev->base_addr, CSR0, CSR0_BABL|CSR0_CERR|CSR0_MISS|CSR0_MERR|CSR0_TINT|CSR0_RINT|CSR0_STOP); - spin_unlock_irqrestore(&priv->chip_lock, flags); - - write_ireg (dev->base_addr, 5, 0x00a0); /* Receive address LED */ - write_ireg (dev->base_addr, 6, 0x0081); /* Collision LED */ - write_ireg (dev->base_addr, 7, 0x0090); /* XMIT LED */ - write_ireg (dev->base_addr, 2, 0x0000); /* MODE register selects media */ - - for (i = LADRL; i <= LADRH; i++) - write_rreg (dev->base_addr, i, multi_hash[i - LADRL]); - - for (i = PADRL, p = dev->dev_addr; i <= PADRH; i++, p += 2) - write_rreg (dev->base_addr, i, p[0] | (p[1] << 8)); - - write_rreg (dev->base_addr, MODE, mode); - write_rreg (dev->base_addr, POLLINT, 0); - write_rreg (dev->base_addr, SIZERXR, -RX_BUFFERS); - write_rreg (dev->base_addr, SIZETXR, -TX_BUFFERS); - - first_free_addr = RX_BUFFERS * 8 + TX_BUFFERS * 8 + 16; - hdr_addr = 0; - - priv->rxhead = 0; - priv->rxtail = 0; - priv->rxhdr = hdr_addr; - - for (i = 0; i < RX_BUFFERS; i++) { - priv->rxbuffer[i] = first_free_addr; - am_writeword (dev, hdr_addr, first_free_addr); - am_writeword (dev, hdr_addr + 2, RMD_OWN); - am_writeword (dev, hdr_addr + 4, (-1600)); - am_writeword (dev, hdr_addr + 6, 0); - first_free_addr += 1600; - hdr_addr += 8; - } - priv->txhead = 0; - priv->txtail = 0; - priv->txhdr = hdr_addr; - for (i = 0; i < TX_BUFFERS; i++) { - priv->txbuffer[i] = first_free_addr; - am_writeword (dev, hdr_addr, first_free_addr); - am_writeword (dev, hdr_addr + 2, TMD_STP|TMD_ENP); - am_writeword (dev, hdr_addr + 4, 0xf000); - am_writeword (dev, hdr_addr + 6, 0); - first_free_addr += 1600; - hdr_addr += 8; - } - - write_rreg (dev->base_addr, BASERXL, priv->rxhdr); - write_rreg (dev->base_addr, BASERXH, 0); - write_rreg (dev->base_addr, BASETXL, priv->txhdr); - write_rreg (dev->base_addr, BASERXH, 0); - write_rreg (dev->base_addr, CSR0, CSR0_STOP); - write_rreg (dev->base_addr, CSR3, CSR3_IDONM|CSR3_BABLM|CSR3_DXSUFLO); - write_rreg (dev->base_addr, CSR4, CSR4_APAD_XMIT|CSR4_MFCOM|CSR4_RCVCCOM|CSR4_TXSTRTM|CSR4_JABM); - write_rreg (dev->base_addr, CSR0, CSR0_IENA|CSR0_STRT); -} - -static void am79c961_timer(unsigned long data) -{ - struct net_device *dev = (struct net_device *)data; - struct dev_priv *priv = netdev_priv(dev); - unsigned int lnkstat, carrier; - - lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST; - carrier = netif_carrier_ok(dev); - - if (lnkstat && !carrier) { - netif_carrier_on(dev); - printk("%s: link up\n", dev->name); - } else if (!lnkstat && carrier) { - netif_carrier_off(dev); - printk("%s: link down\n", dev->name); - } - - mod_timer(&priv->timer, jiffies + msecs_to_jiffies(500)); -} - -/* - * Open/initialize the board. - */ -static int -am79c961_open(struct net_device *dev) -{ - struct dev_priv *priv = netdev_priv(dev); - int ret; - - ret = request_irq(dev->irq, am79c961_interrupt, 0, dev->name, dev); - if (ret) - return ret; - - am79c961_init_for_open(dev); - - netif_carrier_off(dev); - - priv->timer.expires = jiffies; - add_timer(&priv->timer); - - netif_start_queue(dev); - - return 0; -} - -/* - * The inverse routine to am79c961_open(). - */ -static int -am79c961_close(struct net_device *dev) -{ - struct dev_priv *priv = netdev_priv(dev); - unsigned long flags; - - del_timer_sync(&priv->timer); - - netif_stop_queue(dev); - netif_carrier_off(dev); - - spin_lock_irqsave(&priv->chip_lock, flags); - write_rreg (dev->base_addr, CSR0, CSR0_STOP); - write_rreg (dev->base_addr, CSR3, CSR3_MASKALL); - spin_unlock_irqrestore(&priv->chip_lock, flags); - - free_irq (dev->irq, dev); - - return 0; -} - -/* - * Set or clear promiscuous/multicast mode filter for this adapter. - */ -static void am79c961_setmulticastlist (struct net_device *dev) -{ - struct dev_priv *priv = netdev_priv(dev); - unsigned long flags; - u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash); - int i, stopped; - - spin_lock_irqsave(&priv->chip_lock, flags); - - stopped = read_rreg(dev->base_addr, CSR0) & CSR0_STOP; - - if (!stopped) { - /* - * Put the chip into suspend mode - */ - write_rreg(dev->base_addr, CTRL1, CTRL1_SPND); - - /* - * Spin waiting for chip to report suspend mode - */ - while ((read_rreg(dev->base_addr, CTRL1) & CTRL1_SPND) == 0) { - spin_unlock_irqrestore(&priv->chip_lock, flags); - nop(); - spin_lock_irqsave(&priv->chip_lock, flags); - } - } - - /* - * Update the multicast hash table - */ - for (i = 0; i < ARRAY_SIZE(multi_hash); i++) - write_rreg(dev->base_addr, i + LADRL, multi_hash[i]); - - /* - * Write the mode register - */ - write_rreg(dev->base_addr, MODE, mode); - - if (!stopped) { - /* - * Put the chip back into running mode - */ - write_rreg(dev->base_addr, CTRL1, 0); - } - - spin_unlock_irqrestore(&priv->chip_lock, flags); -} - -static void am79c961_timeout(struct net_device *dev) -{ - printk(KERN_WARNING "%s: transmit timed out, network cable problem?\n", - dev->name); - - /* - * ought to do some setup of the tx side here - */ - - netif_wake_queue(dev); -} - -/* - * Transmit a packet - */ -static int -am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev) -{ - struct dev_priv *priv = netdev_priv(dev); - unsigned int hdraddr, bufaddr; - unsigned int head; - unsigned long flags; - - head = priv->txhead; - hdraddr = priv->txhdr + (head << 3); - bufaddr = priv->txbuffer[head]; - head += 1; - if (head >= TX_BUFFERS) - head = 0; - - am_writebuffer (dev, bufaddr, skb->data, skb->len); - am_writeword (dev, hdraddr + 4, -skb->len); - am_writeword (dev, hdraddr + 2, TMD_OWN|TMD_STP|TMD_ENP); - priv->txhead = head; - - spin_lock_irqsave(&priv->chip_lock, flags); - write_rreg (dev->base_addr, CSR0, CSR0_TDMD|CSR0_IENA); - spin_unlock_irqrestore(&priv->chip_lock, flags); - - /* - * If the next packet is owned by the ethernet device, - * then the tx ring is full and we can't add another - * packet. - */ - if (am_readword(dev, priv->txhdr + (priv->txhead << 3) + 2) & TMD_OWN) - netif_stop_queue(dev); - - dev_kfree_skb(skb); - - return NETDEV_TX_OK; -} - -/* - * If we have a good packet(s), get it/them out of the buffers. - */ -static void -am79c961_rx(struct net_device *dev, struct dev_priv *priv) -{ - do { - struct sk_buff *skb; - u_int hdraddr; - u_int pktaddr; - u_int status; - int len; - - hdraddr = priv->rxhdr + (priv->rxtail << 3); - pktaddr = priv->rxbuffer[priv->rxtail]; - - status = am_readword (dev, hdraddr + 2); - if (status & RMD_OWN) /* do we own it? */ - break; - - priv->rxtail ++; - if (priv->rxtail >= RX_BUFFERS) - priv->rxtail = 0; - - if ((status & (RMD_ERR|RMD_STP|RMD_ENP)) != (RMD_STP|RMD_ENP)) { - am_writeword (dev, hdraddr + 2, RMD_OWN); - dev->stats.rx_errors++; - if (status & RMD_ERR) { - if (status & RMD_FRAM) - dev->stats.rx_frame_errors++; - if (status & RMD_CRC) - dev->stats.rx_crc_errors++; - } else if (status & RMD_STP) - dev->stats.rx_length_errors++; - continue; - } - - len = am_readword(dev, hdraddr + 6); - skb = dev_alloc_skb(len + 2); - - if (skb) { - skb_reserve(skb, 2); - - am_readbuffer(dev, pktaddr, skb_put(skb, len), len); - am_writeword(dev, hdraddr + 2, RMD_OWN); - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); - dev->stats.rx_bytes += len; - dev->stats.rx_packets++; - } else { - am_writeword (dev, hdraddr + 2, RMD_OWN); - printk (KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name); - dev->stats.rx_dropped++; - break; - } - } while (1); -} - -/* - * Update stats for the transmitted packet - */ -static void -am79c961_tx(struct net_device *dev, struct dev_priv *priv) -{ - do { - short len; - u_int hdraddr; - u_int status; - - hdraddr = priv->txhdr + (priv->txtail << 3); - status = am_readword (dev, hdraddr + 2); - if (status & TMD_OWN) - break; - - priv->txtail ++; - if (priv->txtail >= TX_BUFFERS) - priv->txtail = 0; - - if (status & TMD_ERR) { - u_int status2; - - dev->stats.tx_errors++; - - status2 = am_readword (dev, hdraddr + 6); - - /* - * Clear the error byte - */ - am_writeword (dev, hdraddr + 6, 0); - - if (status2 & TST_RTRY) - dev->stats.collisions += 16; - if (status2 & TST_LCOL) - dev->stats.tx_window_errors++; - if (status2 & TST_LCAR) - dev->stats.tx_carrier_errors++; - if (status2 & TST_UFLO) - dev->stats.tx_fifo_errors++; - continue; - } - dev->stats.tx_packets++; - len = am_readword (dev, hdraddr + 4); - dev->stats.tx_bytes += -len; - } while (priv->txtail != priv->txhead); - - netif_wake_queue(dev); -} - -static irqreturn_t -am79c961_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = (struct net_device *)dev_id; - struct dev_priv *priv = netdev_priv(dev); - u_int status, n = 100; - int handled = 0; - - do { - status = read_rreg(dev->base_addr, CSR0); - write_rreg(dev->base_addr, CSR0, status & - (CSR0_IENA|CSR0_TINT|CSR0_RINT| - CSR0_MERR|CSR0_MISS|CSR0_CERR|CSR0_BABL)); - - if (status & CSR0_RINT) { - handled = 1; - am79c961_rx(dev, priv); - } - if (status & CSR0_TINT) { - handled = 1; - am79c961_tx(dev, priv); - } - if (status & CSR0_MISS) { - handled = 1; - dev->stats.rx_dropped++; - } - if (status & CSR0_CERR) { - handled = 1; - mod_timer(&priv->timer, jiffies); - } - } while (--n && status & (CSR0_RINT | CSR0_TINT)); - - return IRQ_RETVAL(handled); -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void am79c961_poll_controller(struct net_device *dev) -{ - unsigned long flags; - local_irq_save(flags); - am79c961_interrupt(dev->irq, dev); - local_irq_restore(flags); -} -#endif - -/* - * Initialise the chip. Note that we always expect - * to be entered with interrupts enabled. - */ -static int -am79c961_hw_init(struct net_device *dev) -{ - struct dev_priv *priv = netdev_priv(dev); - - spin_lock_irq(&priv->chip_lock); - write_rreg (dev->base_addr, CSR0, CSR0_STOP); - write_rreg (dev->base_addr, CSR3, CSR3_MASKALL); - spin_unlock_irq(&priv->chip_lock); - - am79c961_ramtest(dev, 0x66); - am79c961_ramtest(dev, 0x99); - - return 0; -} - -static void __init am79c961_banner(void) -{ - static unsigned version_printed; - - if (net_debug && version_printed++ == 0) - printk(KERN_INFO "%s", version); -} -static const struct net_device_ops am79c961_netdev_ops = { - .ndo_open = am79c961_open, - .ndo_stop = am79c961_close, - .ndo_start_xmit = am79c961_sendpacket, - .ndo_set_multicast_list = am79c961_setmulticastlist, - .ndo_tx_timeout = am79c961_timeout, - .ndo_validate_addr = eth_validate_addr, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = am79c961_poll_controller, -#endif -}; - -static int __devinit am79c961_probe(struct platform_device *pdev) -{ - struct resource *res; - struct net_device *dev; - struct dev_priv *priv; - int i, ret; - - res = platform_get_resource(pdev, IORESOURCE_IO, 0); - if (!res) - return -ENODEV; - - dev = alloc_etherdev(sizeof(struct dev_priv)); - ret = -ENOMEM; - if (!dev) - goto out; - - SET_NETDEV_DEV(dev, &pdev->dev); - - priv = netdev_priv(dev); - - /* - * Fixed address and IRQ lines here. - * The PNP initialisation should have been - * done by the ether bootp loader. - */ - dev->base_addr = res->start; - ret = platform_get_irq(pdev, 0); - - if (ret < 0) { - ret = -ENODEV; - goto nodev; - } - dev->irq = ret; - - ret = -ENODEV; - if (!request_region(dev->base_addr, 0x18, dev->name)) - goto nodev; - - /* - * Reset the device. - */ - inb(dev->base_addr + NET_RESET); - udelay(5); - - /* - * Check the manufacturer part of the - * ether address. - */ - if (inb(dev->base_addr) != 0x08 || - inb(dev->base_addr + 2) != 0x00 || - inb(dev->base_addr + 4) != 0x2b) - goto release; - - for (i = 0; i < 6; i++) - dev->dev_addr[i] = inb(dev->base_addr + i * 2) & 0xff; - - am79c961_banner(); - - spin_lock_init(&priv->chip_lock); - init_timer(&priv->timer); - priv->timer.data = (unsigned long)dev; - priv->timer.function = am79c961_timer; - - if (am79c961_hw_init(dev)) - goto release; - - dev->netdev_ops = &am79c961_netdev_ops; - - ret = register_netdev(dev); - if (ret == 0) { - printk(KERN_INFO "%s: ether address %pM\n", - dev->name, dev->dev_addr); - return 0; - } - -release: - release_region(dev->base_addr, 0x18); -nodev: - free_netdev(dev); -out: - return ret; -} - -static struct platform_driver am79c961_driver = { - .probe = am79c961_probe, - .driver = { - .name = "am79c961", - }, -}; - -static int __init am79c961_init(void) -{ - return platform_driver_register(&am79c961_driver); -} - -__initcall(am79c961_init); diff --git a/drivers/net/arm/am79c961a.h b/drivers/net/arm/am79c961a.h deleted file mode 100644 index fd634d3..0000000 --- a/drivers/net/arm/am79c961a.h +++ /dev/null @@ -1,145 +0,0 @@ -/* - * linux/drivers/net/arm/am79c961a.h - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _LINUX_am79c961a_H -#define _LINUX_am79c961a_H - -/* use 0 for production, 1 for verification, >2 for debug. debug flags: */ -#define DEBUG_TX 2 -#define DEBUG_RX 4 -#define DEBUG_INT 8 -#define DEBUG_IC 16 -#ifndef NET_DEBUG -#define NET_DEBUG 0 -#endif - -#define NET_UID 0 -#define NET_RDP 0x10 -#define NET_RAP 0x12 -#define NET_RESET 0x14 -#define NET_IDP 0x16 - -/* - * RAP registers - */ -#define CSR0 0 -#define CSR0_INIT 0x0001 -#define CSR0_STRT 0x0002 -#define CSR0_STOP 0x0004 -#define CSR0_TDMD 0x0008 -#define CSR0_TXON 0x0010 -#define CSR0_RXON 0x0020 -#define CSR0_IENA 0x0040 -#define CSR0_INTR 0x0080 -#define CSR0_IDON 0x0100 -#define CSR0_TINT 0x0200 -#define CSR0_RINT 0x0400 -#define CSR0_MERR 0x0800 -#define CSR0_MISS 0x1000 -#define CSR0_CERR 0x2000 -#define CSR0_BABL 0x4000 -#define CSR0_ERR 0x8000 - -#define CSR3 3 -#define CSR3_EMBA 0x0008 -#define CSR3_DXMT2PD 0x0010 -#define CSR3_LAPPEN 0x0020 -#define CSR3_DXSUFLO 0x0040 -#define CSR3_IDONM 0x0100 -#define CSR3_TINTM 0x0200 -#define CSR3_RINTM 0x0400 -#define CSR3_MERRM 0x0800 -#define CSR3_MISSM 0x1000 -#define CSR3_BABLM 0x4000 -#define CSR3_MASKALL 0x5F00 - -#define CSR4 4 -#define CSR4_JABM 0x0001 -#define CSR4_JAB 0x0002 -#define CSR4_TXSTRTM 0x0004 -#define CSR4_TXSTRT 0x0008 -#define CSR4_RCVCCOM 0x0010 -#define CSR4_RCVCCO 0x0020 -#define CSR4_MFCOM 0x0100 -#define CSR4_MFCO 0x0200 -#define CSR4_ASTRP_RCV 0x0400 -#define CSR4_APAD_XMIT 0x0800 - -#define CTRL1 5 -#define CTRL1_SPND 0x0001 - -#define LADRL 8 -#define LADRM1 9 -#define LADRM2 10 -#define LADRH 11 -#define PADRL 12 -#define PADRM 13 -#define PADRH 14 - -#define MODE 15 -#define MODE_DISRX 0x0001 -#define MODE_DISTX 0x0002 -#define MODE_LOOP 0x0004 -#define MODE_DTCRC 0x0008 -#define MODE_COLL 0x0010 -#define MODE_DRETRY 0x0020 -#define MODE_INTLOOP 0x0040 -#define MODE_PORT_AUI 0x0000 -#define MODE_PORT_10BT 0x0080 -#define MODE_DRXPA 0x2000 -#define MODE_DRXBA 0x4000 -#define MODE_PROMISC 0x8000 - -#define BASERXL 24 -#define BASERXH 25 -#define BASETXL 30 -#define BASETXH 31 - -#define POLLINT 47 - -#define SIZERXR 76 -#define SIZETXR 78 - -#define CSR_MFC 112 - -#define RMD_ENP 0x0100 -#define RMD_STP 0x0200 -#define RMD_CRC 0x0800 -#define RMD_FRAM 0x2000 -#define RMD_ERR 0x4000 -#define RMD_OWN 0x8000 - -#define TMD_ENP 0x0100 -#define TMD_STP 0x0200 -#define TMD_MORE 0x1000 -#define TMD_ERR 0x4000 -#define TMD_OWN 0x8000 - -#define TST_RTRY 0x0400 -#define TST_LCAR 0x0800 -#define TST_LCOL 0x1000 -#define TST_UFLO 0x4000 -#define TST_BUFF 0x8000 - -#define ISALED0 0x0004 -#define ISALED0_LNKST 0x8000 - -struct dev_priv { - unsigned long rxbuffer[RX_BUFFERS]; - unsigned long txbuffer[TX_BUFFERS]; - unsigned char txhead; - unsigned char txtail; - unsigned char rxhead; - unsigned char rxtail; - unsigned long rxhdr; - unsigned long txhdr; - spinlock_t chip_lock; - struct timer_list timer; -}; - -#endif diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c deleted file mode 100644 index 1264d78..0000000 --- a/drivers/net/atarilance.c +++ /dev/null @@ -1,1176 +0,0 @@ -/* atarilance.c: Ethernet driver for VME Lance cards on the Atari */ -/* - Written 1995/96 by Roman Hodek (Roman.Hodek@informatik.uni-erlangen.de) - - This software may be used and distributed according to the terms - of the GNU General Public License, incorporated herein by reference. - - This drivers was written with the following sources of reference: - - The driver for the Riebl Lance card by the TU Vienna. - - The modified TUW driver for PAM's VME cards - - The PC-Linux driver for Lance cards (but this is for bus master - cards, not the shared memory ones) - - The Amiga Ariadne driver - - v1.0: (in 1.2.13pl4/0.9.13) - Initial version - v1.1: (in 1.2.13pl5) - more comments - deleted some debugging stuff - optimized register access (keep AREG pointing to CSR0) - following AMD, CSR0_STRT should be set only after IDON is detected - use memcpy() for data transfers, that also employs long word moves - better probe procedure for 24-bit systems - non-VME-RieblCards need extra delays in memcpy - must also do write test, since 0xfxe00000 may hit ROM - use 8/32 tx/rx buffers, which should give better NFS performance; - this is made possible by shifting the last packet buffer after the - RieblCard reserved area - v1.2: (in 1.2.13pl8) - again fixed probing for the Falcon; 0xfe01000 hits phys. 0x00010000 - and thus RAM, in case of no Lance found all memory contents have to - be restored! - Now possible to compile as module. - v1.3: 03/30/96 Jes Sorensen, Roman (in 1.3) - Several little 1.3 adaptions - When the lance is stopped it jumps back into little-endian - mode. It is therefore necessary to put it back where it - belongs, in big endian mode, in order to make things work. - This might be the reason why multicast-mode didn't work - before, but I'm not able to test it as I only got an Amiga - (we had similar problems with the A2065 driver). - -*/ - -static char version[] = "atarilance.c: v1.3 04/04/96 " - "Roman.Hodek@informatik.uni-erlangen.de\n"; - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -/* Debug level: - * 0 = silent, print only serious errors - * 1 = normal, print error messages - * 2 = debug, print debug infos - * 3 = debug, print even more debug infos (packet data) - */ - -#define LANCE_DEBUG 1 - -#ifdef LANCE_DEBUG -static int lance_debug = LANCE_DEBUG; -#else -static int lance_debug = 1; -#endif -module_param(lance_debug, int, 0); -MODULE_PARM_DESC(lance_debug, "atarilance debug level (0-3)"); -MODULE_LICENSE("GPL"); - -/* Print debug messages on probing? */ -#undef LANCE_DEBUG_PROBE - -#define DPRINTK(n,a) \ - do { \ - if (lance_debug >= n) \ - printk a; \ - } while( 0 ) - -#ifdef LANCE_DEBUG_PROBE -# define PROBE_PRINT(a) printk a -#else -# define PROBE_PRINT(a) -#endif - -/* These define the number of Rx and Tx buffers as log2. (Only powers - * of two are valid) - * Much more rx buffers (32) are reserved than tx buffers (8), since receiving - * is more time critical then sending and packets may have to remain in the - * board's memory when main memory is low. - */ - -#define TX_LOG_RING_SIZE 3 -#define RX_LOG_RING_SIZE 5 - -/* These are the derived values */ - -#define TX_RING_SIZE (1 << TX_LOG_RING_SIZE) -#define TX_RING_LEN_BITS (TX_LOG_RING_SIZE << 5) -#define TX_RING_MOD_MASK (TX_RING_SIZE - 1) - -#define RX_RING_SIZE (1 << RX_LOG_RING_SIZE) -#define RX_RING_LEN_BITS (RX_LOG_RING_SIZE << 5) -#define RX_RING_MOD_MASK (RX_RING_SIZE - 1) - -#define TX_TIMEOUT (HZ/5) - -/* The LANCE Rx and Tx ring descriptors. */ -struct lance_rx_head { - unsigned short base; /* Low word of base addr */ - volatile unsigned char flag; - unsigned char base_hi; /* High word of base addr (unused) */ - short buf_length; /* This length is 2s complement! */ - volatile short msg_length; /* This length is "normal". */ -}; - -struct lance_tx_head { - unsigned short base; /* Low word of base addr */ - volatile unsigned char flag; - unsigned char base_hi; /* High word of base addr (unused) */ - short length; /* Length is 2s complement! */ - volatile short misc; -}; - -struct ringdesc { - unsigned short adr_lo; /* Low 16 bits of address */ - unsigned char len; /* Length bits */ - unsigned char adr_hi; /* High 8 bits of address (unused) */ -}; - -/* The LANCE initialization block, described in databook. */ -struct lance_init_block { - unsigned short mode; /* Pre-set mode */ - unsigned char hwaddr[6]; /* Physical ethernet address */ - unsigned filter[2]; /* Multicast filter (unused). */ - /* Receive and transmit ring base, along with length bits. */ - struct ringdesc rx_ring; - struct ringdesc tx_ring; -}; - -/* The whole layout of the Lance shared memory */ -struct lance_memory { - struct lance_init_block init; - struct lance_tx_head tx_head[TX_RING_SIZE]; - struct lance_rx_head rx_head[RX_RING_SIZE]; - char packet_area[0]; /* packet data follow after the - * init block and the ring - * descriptors and are located - * at runtime */ -}; - -/* RieblCard specifics: - * The original TOS driver for these cards reserves the area from offset - * 0xee70 to 0xeebb for storing configuration data. Of interest to us is the - * Ethernet address there, and the magic for verifying the data's validity. - * The reserved area isn't touch by packet buffers. Furthermore, offset 0xfffe - * is reserved for the interrupt vector number. - */ -#define RIEBL_RSVD_START 0xee70 -#define RIEBL_RSVD_END 0xeec0 -#define RIEBL_MAGIC 0x09051990 -#define RIEBL_MAGIC_ADDR ((unsigned long *)(((char *)MEM) + 0xee8a)) -#define RIEBL_HWADDR_ADDR ((unsigned char *)(((char *)MEM) + 0xee8e)) -#define RIEBL_IVEC_ADDR ((unsigned short *)(((char *)MEM) + 0xfffe)) - -/* This is a default address for the old RieblCards without a battery - * that have no ethernet address at boot time. 00:00:36:04 is the - * prefix for Riebl cards, the 00:00 at the end is arbitrary. - */ - -static unsigned char OldRieblDefHwaddr[6] = { - 0x00, 0x00, 0x36, 0x04, 0x00, 0x00 -}; - - -/* I/O registers of the Lance chip */ - -struct lance_ioreg { -/* base+0x0 */ volatile unsigned short data; -/* base+0x2 */ volatile unsigned short addr; - unsigned char _dummy1[3]; -/* base+0x7 */ volatile unsigned char ivec; - unsigned char _dummy2[5]; -/* base+0xd */ volatile unsigned char eeprom; - unsigned char _dummy3; -/* base+0xf */ volatile unsigned char mem; -}; - -/* Types of boards this driver supports */ - -enum lance_type { - OLD_RIEBL, /* old Riebl card without battery */ - NEW_RIEBL, /* new Riebl card with battery */ - PAM_CARD /* PAM card with EEPROM */ -}; - -static char *lance_names[] = { - "Riebl-Card (without battery)", - "Riebl-Card (with battery)", - "PAM intern card" -}; - -/* The driver's private device structure */ - -struct lance_private { - enum lance_type cardtype; - struct lance_ioreg *iobase; - struct lance_memory *mem; - int cur_rx, cur_tx; /* The next free ring entry */ - int dirty_tx; /* Ring entries to be freed. */ - /* copy function */ - void *(*memcpy_f)( void *, const void *, size_t ); -/* This must be long for set_bit() */ - long tx_full; - spinlock_t devlock; -}; - -/* I/O register access macros */ - -#define MEM lp->mem -#define DREG IO->data -#define AREG IO->addr -#define REGA(a) (*( AREG = (a), &DREG )) - -/* Definitions for packet buffer access: */ -#define PKT_BUF_SZ 1544 -/* Get the address of a packet buffer corresponding to a given buffer head */ -#define PKTBUF_ADDR(head) (((unsigned char *)(MEM)) + (head)->base) - -/* Possible memory/IO addresses for probing */ - -static struct lance_addr { - unsigned long memaddr; - unsigned long ioaddr; - int slow_flag; -} lance_addr_list[] = { - { 0xfe010000, 0xfe00fff0, 0 }, /* RieblCard VME in TT */ - { 0xffc10000, 0xffc0fff0, 0 }, /* RieblCard VME in MegaSTE - (highest byte stripped) */ - { 0xffe00000, 0xffff7000, 1 }, /* RieblCard in ST - (highest byte stripped) */ - { 0xffd00000, 0xffff7000, 1 }, /* RieblCard in ST with hw modif. to - avoid conflict with ROM - (highest byte stripped) */ - { 0xffcf0000, 0xffcffff0, 0 }, /* PAMCard VME in TT and MSTE - (highest byte stripped) */ - { 0xfecf0000, 0xfecffff0, 0 }, /* Rhotron's PAMCard VME in TT and MSTE - (highest byte stripped) */ -}; - -#define N_LANCE_ADDR ARRAY_SIZE(lance_addr_list) - - -/* Definitions for the Lance */ - -/* tx_head flags */ -#define TMD1_ENP 0x01 /* end of packet */ -#define TMD1_STP 0x02 /* start of packet */ -#define TMD1_DEF 0x04 /* deferred */ -#define TMD1_ONE 0x08 /* one retry needed */ -#define TMD1_MORE 0x10 /* more than one retry needed */ -#define TMD1_ERR 0x40 /* error summary */ -#define TMD1_OWN 0x80 /* ownership (set: chip owns) */ - -#define TMD1_OWN_CHIP TMD1_OWN -#define TMD1_OWN_HOST 0 - -/* tx_head misc field */ -#define TMD3_TDR 0x03FF /* Time Domain Reflectometry counter */ -#define TMD3_RTRY 0x0400 /* failed after 16 retries */ -#define TMD3_LCAR 0x0800 /* carrier lost */ -#define TMD3_LCOL 0x1000 /* late collision */ -#define TMD3_UFLO 0x4000 /* underflow (late memory) */ -#define TMD3_BUFF 0x8000 /* buffering error (no ENP) */ - -/* rx_head flags */ -#define RMD1_ENP 0x01 /* end of packet */ -#define RMD1_STP 0x02 /* start of packet */ -#define RMD1_BUFF 0x04 /* buffer error */ -#define RMD1_CRC 0x08 /* CRC error */ -#define RMD1_OFLO 0x10 /* overflow */ -#define RMD1_FRAM 0x20 /* framing error */ -#define RMD1_ERR 0x40 /* error summary */ -#define RMD1_OWN 0x80 /* ownership (set: ship owns) */ - -#define RMD1_OWN_CHIP RMD1_OWN -#define RMD1_OWN_HOST 0 - -/* register names */ -#define CSR0 0 /* mode/status */ -#define CSR1 1 /* init block addr (low) */ -#define CSR2 2 /* init block addr (high) */ -#define CSR3 3 /* misc */ -#define CSR8 8 /* address filter */ -#define CSR15 15 /* promiscuous mode */ - -/* CSR0 */ -/* (R=readable, W=writeable, S=set on write, C=clear on write) */ -#define CSR0_INIT 0x0001 /* initialize (RS) */ -#define CSR0_STRT 0x0002 /* start (RS) */ -#define CSR0_STOP 0x0004 /* stop (RS) */ -#define CSR0_TDMD 0x0008 /* transmit demand (RS) */ -#define CSR0_TXON 0x0010 /* transmitter on (R) */ -#define CSR0_RXON 0x0020 /* receiver on (R) */ -#define CSR0_INEA 0x0040 /* interrupt enable (RW) */ -#define CSR0_INTR 0x0080 /* interrupt active (R) */ -#define CSR0_IDON 0x0100 /* initialization done (RC) */ -#define CSR0_TINT 0x0200 /* transmitter interrupt (RC) */ -#define CSR0_RINT 0x0400 /* receiver interrupt (RC) */ -#define CSR0_MERR 0x0800 /* memory error (RC) */ -#define CSR0_MISS 0x1000 /* missed frame (RC) */ -#define CSR0_CERR 0x2000 /* carrier error (no heartbeat :-) (RC) */ -#define CSR0_BABL 0x4000 /* babble: tx-ed too many bits (RC) */ -#define CSR0_ERR 0x8000 /* error (RC) */ - -/* CSR3 */ -#define CSR3_BCON 0x0001 /* byte control */ -#define CSR3_ACON 0x0002 /* ALE control */ -#define CSR3_BSWP 0x0004 /* byte swap (1=big endian) */ - - - -/***************************** Prototypes *****************************/ - -static unsigned long lance_probe1( struct net_device *dev, struct lance_addr - *init_rec ); -static int lance_open( struct net_device *dev ); -static void lance_init_ring( struct net_device *dev ); -static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ); -static irqreturn_t lance_interrupt( int irq, void *dev_id ); -static int lance_rx( struct net_device *dev ); -static int lance_close( struct net_device *dev ); -static void set_multicast_list( struct net_device *dev ); -static int lance_set_mac_address( struct net_device *dev, void *addr ); -static void lance_tx_timeout (struct net_device *dev); - -/************************* End of Prototypes **************************/ - - - - - -static void *slow_memcpy( void *dst, const void *src, size_t len ) - -{ char *cto = dst; - const char *cfrom = src; - - while( len-- ) { - *cto++ = *cfrom++; - MFPDELAY(); - } - return dst; -} - - -struct net_device * __init atarilance_probe(int unit) -{ - int i; - static int found; - struct net_device *dev; - int err = -ENODEV; - - if (!MACH_IS_ATARI || found) - /* Assume there's only one board possible... That seems true, since - * the Riebl/PAM board's address cannot be changed. */ - return ERR_PTR(-ENODEV); - - dev = alloc_etherdev(sizeof(struct lance_private)); - if (!dev) - return ERR_PTR(-ENOMEM); - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - } - - for( i = 0; i < N_LANCE_ADDR; ++i ) { - if (lance_probe1( dev, &lance_addr_list[i] )) { - found = 1; - err = register_netdev(dev); - if (!err) - return dev; - free_irq(dev->irq, dev); - break; - } - } - free_netdev(dev); - return ERR_PTR(err); -} - - -/* Derived from hwreg_present() in atari/config.c: */ - -static noinline int __init addr_accessible(volatile void *regp, int wordflag, - int writeflag) -{ - int ret; - unsigned long flags; - long *vbr, save_berr; - - local_irq_save(flags); - - __asm__ __volatile__ ( "movec %/vbr,%0" : "=r" (vbr) : ); - save_berr = vbr[2]; - - __asm__ __volatile__ - ( "movel %/sp,%/d1\n\t" - "movel #Lberr,%2@\n\t" - "moveq #0,%0\n\t" - "tstl %3\n\t" - "bne 1f\n\t" - "moveb %1@,%/d0\n\t" - "nop \n\t" - "bra 2f\n" -"1: movew %1@,%/d0\n\t" - "nop \n" -"2: tstl %4\n\t" - "beq 2f\n\t" - "tstl %3\n\t" - "bne 1f\n\t" - "clrb %1@\n\t" - "nop \n\t" - "moveb %/d0,%1@\n\t" - "nop \n\t" - "bra 2f\n" -"1: clrw %1@\n\t" - "nop \n\t" - "movew %/d0,%1@\n\t" - "nop \n" -"2: moveq #1,%0\n" -"Lberr: movel %/d1,%/sp" - : "=&d" (ret) - : "a" (regp), "a" (&vbr[2]), "rm" (wordflag), "rm" (writeflag) - : "d0", "d1", "memory" - ); - - vbr[2] = save_berr; - local_irq_restore(flags); - - return ret; -} - -static const struct net_device_ops lance_netdev_ops = { - .ndo_open = lance_open, - .ndo_stop = lance_close, - .ndo_start_xmit = lance_start_xmit, - .ndo_set_multicast_list = set_multicast_list, - .ndo_set_mac_address = lance_set_mac_address, - .ndo_tx_timeout = lance_tx_timeout, - .ndo_validate_addr = eth_validate_addr, - .ndo_change_mtu = eth_change_mtu, -}; - -static unsigned long __init lance_probe1( struct net_device *dev, - struct lance_addr *init_rec ) -{ - volatile unsigned short *memaddr = - (volatile unsigned short *)init_rec->memaddr; - volatile unsigned short *ioaddr = - (volatile unsigned short *)init_rec->ioaddr; - struct lance_private *lp; - struct lance_ioreg *IO; - int i; - static int did_version; - unsigned short save1, save2; - - PROBE_PRINT(( "Probing for Lance card at mem %#lx io %#lx\n", - (long)memaddr, (long)ioaddr )); - - /* Test whether memory readable and writable */ - PROBE_PRINT(( "lance_probe1: testing memory to be accessible\n" )); - if (!addr_accessible( memaddr, 1, 1 )) goto probe_fail; - - /* Written values should come back... */ - PROBE_PRINT(( "lance_probe1: testing memory to be writable (1)\n" )); - save1 = *memaddr; - *memaddr = 0x0001; - if (*memaddr != 0x0001) goto probe_fail; - PROBE_PRINT(( "lance_probe1: testing memory to be writable (2)\n" )); - *memaddr = 0x0000; - if (*memaddr != 0x0000) goto probe_fail; - *memaddr = save1; - - /* First port should be readable and writable */ - PROBE_PRINT(( "lance_probe1: testing ioport to be accessible\n" )); - if (!addr_accessible( ioaddr, 1, 1 )) goto probe_fail; - - /* and written values should be readable */ - PROBE_PRINT(( "lance_probe1: testing ioport to be writeable\n" )); - save2 = ioaddr[1]; - ioaddr[1] = 0x0001; - if (ioaddr[1] != 0x0001) goto probe_fail; - - /* The CSR0_INIT bit should not be readable */ - PROBE_PRINT(( "lance_probe1: testing CSR0 register function (1)\n" )); - save1 = ioaddr[0]; - ioaddr[1] = CSR0; - ioaddr[0] = CSR0_INIT | CSR0_STOP; - if (ioaddr[0] != CSR0_STOP) { - ioaddr[0] = save1; - ioaddr[1] = save2; - goto probe_fail; - } - PROBE_PRINT(( "lance_probe1: testing CSR0 register function (2)\n" )); - ioaddr[0] = CSR0_STOP; - if (ioaddr[0] != CSR0_STOP) { - ioaddr[0] = save1; - ioaddr[1] = save2; - goto probe_fail; - } - - /* Now ok... */ - PROBE_PRINT(( "lance_probe1: Lance card detected\n" )); - goto probe_ok; - - probe_fail: - return 0; - - probe_ok: - lp = netdev_priv(dev); - MEM = (struct lance_memory *)memaddr; - IO = lp->iobase = (struct lance_ioreg *)ioaddr; - dev->base_addr = (unsigned long)ioaddr; /* informational only */ - lp->memcpy_f = init_rec->slow_flag ? slow_memcpy : memcpy; - - REGA( CSR0 ) = CSR0_STOP; - - /* Now test for type: If the eeprom I/O port is readable, it is a - * PAM card */ - if (addr_accessible( &(IO->eeprom), 0, 0 )) { - /* Switch back to Ram */ - i = IO->mem; - lp->cardtype = PAM_CARD; - } - else if (*RIEBL_MAGIC_ADDR == RIEBL_MAGIC) { - lp->cardtype = NEW_RIEBL; - } - else - lp->cardtype = OLD_RIEBL; - - if (lp->cardtype == PAM_CARD || - memaddr == (unsigned short *)0xffe00000) { - /* PAMs card and Riebl on ST use level 5 autovector */ - if (request_irq(IRQ_AUTO_5, lance_interrupt, IRQ_TYPE_PRIO, - "PAM,Riebl-ST Ethernet", dev)) { - printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 ); - return 0; - } - dev->irq = (unsigned short)IRQ_AUTO_5; - } - else { - /* For VME-RieblCards, request a free VME int; - * (This must be unsigned long, since dev->irq is short and the - * IRQ_MACHSPEC bit would be cut off...) - */ - unsigned long irq = atari_register_vme_int(); - if (!irq) { - printk( "Lance: request for VME interrupt failed\n" ); - return 0; - } - if (request_irq(irq, lance_interrupt, IRQ_TYPE_PRIO, - "Riebl-VME Ethernet", dev)) { - printk( "Lance: request for irq %ld failed\n", irq ); - return 0; - } - dev->irq = irq; - } - - printk("%s: %s at io %#lx, mem %#lx, irq %d%s, hwaddr ", - dev->name, lance_names[lp->cardtype], - (unsigned long)ioaddr, - (unsigned long)memaddr, - dev->irq, - init_rec->slow_flag ? " (slow memcpy)" : "" ); - - /* Get the ethernet address */ - switch( lp->cardtype ) { - case OLD_RIEBL: - /* No ethernet address! (Set some default address) */ - memcpy( dev->dev_addr, OldRieblDefHwaddr, 6 ); - break; - case NEW_RIEBL: - lp->memcpy_f( dev->dev_addr, RIEBL_HWADDR_ADDR, 6 ); - break; - case PAM_CARD: - i = IO->eeprom; - for( i = 0; i < 6; ++i ) - dev->dev_addr[i] = - ((((unsigned short *)MEM)[i*2] & 0x0f) << 4) | - ((((unsigned short *)MEM)[i*2+1] & 0x0f)); - i = IO->mem; - break; - } - printk("%pM\n", dev->dev_addr); - if (lp->cardtype == OLD_RIEBL) { - printk( "%s: Warning: This is a default ethernet address!\n", - dev->name ); - printk( " Use \"ifconfig hw ether ...\" to set the address.\n" ); - } - - spin_lock_init(&lp->devlock); - - MEM->init.mode = 0x0000; /* Disable Rx and Tx. */ - for( i = 0; i < 6; i++ ) - MEM->init.hwaddr[i] = dev->dev_addr[i^1]; /* <- 16 bit swap! */ - MEM->init.filter[0] = 0x00000000; - MEM->init.filter[1] = 0x00000000; - MEM->init.rx_ring.adr_lo = offsetof( struct lance_memory, rx_head ); - MEM->init.rx_ring.adr_hi = 0; - MEM->init.rx_ring.len = RX_RING_LEN_BITS; - MEM->init.tx_ring.adr_lo = offsetof( struct lance_memory, tx_head ); - MEM->init.tx_ring.adr_hi = 0; - MEM->init.tx_ring.len = TX_RING_LEN_BITS; - - if (lp->cardtype == PAM_CARD) - IO->ivec = IRQ_SOURCE_TO_VECTOR(dev->irq); - else - *RIEBL_IVEC_ADDR = IRQ_SOURCE_TO_VECTOR(dev->irq); - - if (did_version++ == 0) - DPRINTK( 1, ( version )); - - dev->netdev_ops = &lance_netdev_ops; - - /* XXX MSch */ - dev->watchdog_timeo = TX_TIMEOUT; - - return 1; -} - - -static int lance_open( struct net_device *dev ) -{ - struct lance_private *lp = netdev_priv(dev); - struct lance_ioreg *IO = lp->iobase; - int i; - - DPRINTK( 2, ( "%s: lance_open()\n", dev->name )); - - lance_init_ring(dev); - /* Re-initialize the LANCE, and start it when done. */ - - REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0); - REGA( CSR2 ) = 0; - REGA( CSR1 ) = 0; - REGA( CSR0 ) = CSR0_INIT; - /* From now on, AREG is kept to point to CSR0 */ - - i = 1000000; - while (--i > 0) - if (DREG & CSR0_IDON) - break; - if (i <= 0 || (DREG & CSR0_ERR)) { - DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n", - dev->name, i, DREG )); - DREG = CSR0_STOP; - return -EIO; - } - DREG = CSR0_IDON; - DREG = CSR0_STRT; - DREG = CSR0_INEA; - - netif_start_queue (dev); - - DPRINTK( 2, ( "%s: LANCE is open, csr0 %04x\n", dev->name, DREG )); - - return 0; -} - - -/* Initialize the LANCE Rx and Tx rings. */ - -static void lance_init_ring( struct net_device *dev ) -{ - struct lance_private *lp = netdev_priv(dev); - int i; - unsigned offset; - - lp->tx_full = 0; - lp->cur_rx = lp->cur_tx = 0; - lp->dirty_tx = 0; - - offset = offsetof( struct lance_memory, packet_area ); - -/* If the packet buffer at offset 'o' would conflict with the reserved area - * of RieblCards, advance it */ -#define CHECK_OFFSET(o) \ - do { \ - if (lp->cardtype == OLD_RIEBL || lp->cardtype == NEW_RIEBL) { \ - if (((o) < RIEBL_RSVD_START) ? (o)+PKT_BUF_SZ > RIEBL_RSVD_START \ - : (o) < RIEBL_RSVD_END) \ - (o) = RIEBL_RSVD_END; \ - } \ - } while(0) - - for( i = 0; i < TX_RING_SIZE; i++ ) { - CHECK_OFFSET(offset); - MEM->tx_head[i].base = offset; - MEM->tx_head[i].flag = TMD1_OWN_HOST; - MEM->tx_head[i].base_hi = 0; - MEM->tx_head[i].length = 0; - MEM->tx_head[i].misc = 0; - offset += PKT_BUF_SZ; - } - - for( i = 0; i < RX_RING_SIZE; i++ ) { - CHECK_OFFSET(offset); - MEM->rx_head[i].base = offset; - MEM->rx_head[i].flag = TMD1_OWN_CHIP; - MEM->rx_head[i].base_hi = 0; - MEM->rx_head[i].buf_length = -PKT_BUF_SZ; - MEM->rx_head[i].msg_length = 0; - offset += PKT_BUF_SZ; - } -} - - -/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ - - -static void lance_tx_timeout (struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - struct lance_ioreg *IO = lp->iobase; - - AREG = CSR0; - DPRINTK( 1, ( "%s: transmit timed out, status %04x, resetting.\n", - dev->name, DREG )); - DREG = CSR0_STOP; - /* - * Always set BSWP after a STOP as STOP puts it back into - * little endian mode. - */ - REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0); - dev->stats.tx_errors++; -#ifndef final_version - { int i; - DPRINTK( 2, ( "Ring data: dirty_tx %d cur_tx %d%s cur_rx %d\n", - lp->dirty_tx, lp->cur_tx, - lp->tx_full ? " (full)" : "", - lp->cur_rx )); - for( i = 0 ; i < RX_RING_SIZE; i++ ) - DPRINTK( 2, ( "rx #%d: base=%04x blen=%04x mlen=%04x\n", - i, MEM->rx_head[i].base, - -MEM->rx_head[i].buf_length, - MEM->rx_head[i].msg_length )); - for( i = 0 ; i < TX_RING_SIZE; i++ ) - DPRINTK( 2, ( "tx #%d: base=%04x len=%04x misc=%04x\n", - i, MEM->tx_head[i].base, - -MEM->tx_head[i].length, - MEM->tx_head[i].misc )); - } -#endif - /* XXX MSch: maybe purge/reinit ring here */ - /* lance_restart, essentially */ - lance_init_ring(dev); - REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT; - dev->trans_start = jiffies; /* prevent tx timeout */ - netif_wake_queue(dev); -} - -/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ - -static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) -{ - struct lance_private *lp = netdev_priv(dev); - struct lance_ioreg *IO = lp->iobase; - int entry, len; - struct lance_tx_head *head; - unsigned long flags; - - DPRINTK( 2, ( "%s: lance_start_xmit() called, csr0 %4.4x.\n", - dev->name, DREG )); - - - /* The old LANCE chips doesn't automatically pad buffers to min. size. */ - len = skb->len; - if (len < ETH_ZLEN) - len = ETH_ZLEN; - /* PAM-Card has a bug: Can only send packets with even number of bytes! */ - else if (lp->cardtype == PAM_CARD && (len & 1)) - ++len; - - if (len > skb->len) { - if (skb_padto(skb, len)) - return NETDEV_TX_OK; - } - - netif_stop_queue (dev); - - /* Fill in a Tx ring entry */ - if (lance_debug >= 3) { - printk( "%s: TX pkt type 0x%04x from %pM to %pM" - " data at 0x%08x len %d\n", - dev->name, ((u_short *)skb->data)[6], - &skb->data[6], skb->data, - (int)skb->data, (int)skb->len ); - } - - /* We're not prepared for the int until the last flags are set/reset. And - * the int may happen already after setting the OWN_CHIP... */ - spin_lock_irqsave (&lp->devlock, flags); - - /* Mask to ring buffer boundary. */ - entry = lp->cur_tx & TX_RING_MOD_MASK; - head = &(MEM->tx_head[entry]); - - /* Caution: the write order is important here, set the "ownership" bits - * last. - */ - - - head->length = -len; - head->misc = 0; - lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len ); - head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP; - dev->stats.tx_bytes += skb->len; - dev_kfree_skb( skb ); - lp->cur_tx++; - while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) { - lp->cur_tx -= TX_RING_SIZE; - lp->dirty_tx -= TX_RING_SIZE; - } - - /* Trigger an immediate send poll. */ - DREG = CSR0_INEA | CSR0_TDMD; - - if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) == - TMD1_OWN_HOST) - netif_start_queue (dev); - else - lp->tx_full = 1; - spin_unlock_irqrestore (&lp->devlock, flags); - - return NETDEV_TX_OK; -} - -/* The LANCE interrupt handler. */ - -static irqreturn_t lance_interrupt( int irq, void *dev_id ) -{ - struct net_device *dev = dev_id; - struct lance_private *lp; - struct lance_ioreg *IO; - int csr0, boguscnt = 10; - int handled = 0; - - if (dev == NULL) { - DPRINTK( 1, ( "lance_interrupt(): interrupt for unknown device.\n" )); - return IRQ_NONE; - } - - lp = netdev_priv(dev); - IO = lp->iobase; - spin_lock (&lp->devlock); - - AREG = CSR0; - - while( ((csr0 = DREG) & (CSR0_ERR | CSR0_TINT | CSR0_RINT)) && - --boguscnt >= 0) { - handled = 1; - /* Acknowledge all of the current interrupt sources ASAP. */ - DREG = csr0 & ~(CSR0_INIT | CSR0_STRT | CSR0_STOP | - CSR0_TDMD | CSR0_INEA); - - DPRINTK( 2, ( "%s: interrupt csr0=%04x new csr=%04x.\n", - dev->name, csr0, DREG )); - - if (csr0 & CSR0_RINT) /* Rx interrupt */ - lance_rx( dev ); - - if (csr0 & CSR0_TINT) { /* Tx-done interrupt */ - int dirty_tx = lp->dirty_tx; - - while( dirty_tx < lp->cur_tx) { - int entry = dirty_tx & TX_RING_MOD_MASK; - int status = MEM->tx_head[entry].flag; - - if (status & TMD1_OWN_CHIP) - break; /* It still hasn't been Txed */ - - MEM->tx_head[entry].flag = 0; - - if (status & TMD1_ERR) { - /* There was an major error, log it. */ - int err_status = MEM->tx_head[entry].misc; - dev->stats.tx_errors++; - if (err_status & TMD3_RTRY) dev->stats.tx_aborted_errors++; - if (err_status & TMD3_LCAR) dev->stats.tx_carrier_errors++; - if (err_status & TMD3_LCOL) dev->stats.tx_window_errors++; - if (err_status & TMD3_UFLO) { - /* Ackk! On FIFO errors the Tx unit is turned off! */ - dev->stats.tx_fifo_errors++; - /* Remove this verbosity later! */ - DPRINTK( 1, ( "%s: Tx FIFO error! Status %04x\n", - dev->name, csr0 )); - /* Restart the chip. */ - DREG = CSR0_STRT; - } - } else { - if (status & (TMD1_MORE | TMD1_ONE | TMD1_DEF)) - dev->stats.collisions++; - dev->stats.tx_packets++; - } - - /* XXX MSch: free skb?? */ - dirty_tx++; - } - -#ifndef final_version - if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) { - DPRINTK( 0, ( "out-of-sync dirty pointer," - " %d vs. %d, full=%ld.\n", - dirty_tx, lp->cur_tx, lp->tx_full )); - dirty_tx += TX_RING_SIZE; - } -#endif - - if (lp->tx_full && (netif_queue_stopped(dev)) && - dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) { - /* The ring is no longer full, clear tbusy. */ - lp->tx_full = 0; - netif_wake_queue (dev); - } - - lp->dirty_tx = dirty_tx; - } - - /* Log misc errors. */ - if (csr0 & CSR0_BABL) dev->stats.tx_errors++; /* Tx babble. */ - if (csr0 & CSR0_MISS) dev->stats.rx_errors++; /* Missed a Rx frame. */ - if (csr0 & CSR0_MERR) { - DPRINTK( 1, ( "%s: Bus master arbitration failure (?!?), " - "status %04x.\n", dev->name, csr0 )); - /* Restart the chip. */ - DREG = CSR0_STRT; - } - } - - /* Clear any other interrupt, and set interrupt enable. */ - DREG = CSR0_BABL | CSR0_CERR | CSR0_MISS | CSR0_MERR | - CSR0_IDON | CSR0_INEA; - - DPRINTK( 2, ( "%s: exiting interrupt, csr0=%#04x.\n", - dev->name, DREG )); - - spin_unlock (&lp->devlock); - return IRQ_RETVAL(handled); -} - - -static int lance_rx( struct net_device *dev ) -{ - struct lance_private *lp = netdev_priv(dev); - int entry = lp->cur_rx & RX_RING_MOD_MASK; - int i; - - DPRINTK( 2, ( "%s: rx int, flag=%04x\n", dev->name, - MEM->rx_head[entry].flag )); - - /* If we own the next entry, it's a new packet. Send it up. */ - while( (MEM->rx_head[entry].flag & RMD1_OWN) == RMD1_OWN_HOST ) { - struct lance_rx_head *head = &(MEM->rx_head[entry]); - int status = head->flag; - - if (status != (RMD1_ENP|RMD1_STP)) { /* There was an error. */ - /* There is a tricky error noted by John Murphy, - to Russ Nelson: Even with full-sized - buffers it's possible for a jabber packet to use two - buffers, with only the last correctly noting the error. */ - if (status & RMD1_ENP) /* Only count a general error at the */ - dev->stats.rx_errors++; /* end of a packet.*/ - if (status & RMD1_FRAM) dev->stats.rx_frame_errors++; - if (status & RMD1_OFLO) dev->stats.rx_over_errors++; - if (status & RMD1_CRC) dev->stats.rx_crc_errors++; - if (status & RMD1_BUFF) dev->stats.rx_fifo_errors++; - head->flag &= (RMD1_ENP|RMD1_STP); - } else { - /* Malloc up new buffer, compatible with net-3. */ - short pkt_len = head->msg_length & 0xfff; - struct sk_buff *skb; - - if (pkt_len < 60) { - printk( "%s: Runt packet!\n", dev->name ); - dev->stats.rx_errors++; - } - else { - skb = dev_alloc_skb( pkt_len+2 ); - if (skb == NULL) { - DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n", - dev->name )); - for( i = 0; i < RX_RING_SIZE; i++ ) - if (MEM->rx_head[(entry+i) & RX_RING_MOD_MASK].flag & - RMD1_OWN_CHIP) - break; - - if (i > RX_RING_SIZE - 2) { - dev->stats.rx_dropped++; - head->flag |= RMD1_OWN_CHIP; - lp->cur_rx++; - } - break; - } - - if (lance_debug >= 3) { - u_char *data = PKTBUF_ADDR(head); - - printk(KERN_DEBUG "%s: RX pkt type 0x%04x from %pM to %pM " - "data %02x %02x %02x %02x %02x %02x %02x %02x " - "len %d\n", - dev->name, ((u_short *)data)[6], - &data[6], data, - data[15], data[16], data[17], data[18], - data[19], data[20], data[21], data[22], - pkt_len); - } - - skb_reserve( skb, 2 ); /* 16 byte align */ - skb_put( skb, pkt_len ); /* Make room */ - lp->memcpy_f( skb->data, PKTBUF_ADDR(head), pkt_len ); - skb->protocol = eth_type_trans( skb, dev ); - netif_rx( skb ); - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; - } - } - - head->flag |= RMD1_OWN_CHIP; - entry = (++lp->cur_rx) & RX_RING_MOD_MASK; - } - lp->cur_rx &= RX_RING_MOD_MASK; - - /* From lance.c (Donald Becker): */ - /* We should check that at least two ring entries are free. If not, - we should free one and mark stats->rx_dropped++. */ - - return 0; -} - - -static int lance_close( struct net_device *dev ) -{ - struct lance_private *lp = netdev_priv(dev); - struct lance_ioreg *IO = lp->iobase; - - netif_stop_queue (dev); - - AREG = CSR0; - - DPRINTK( 2, ( "%s: Shutting down ethercard, status was %2.2x.\n", - dev->name, DREG )); - - /* We stop the LANCE here -- it occasionally polls - memory if we don't. */ - DREG = CSR0_STOP; - - return 0; -} - - -/* Set or clear the multicast filter for this adaptor. - num_addrs == -1 Promiscuous mode, receive all packets - num_addrs == 0 Normal mode, clear multicast list - num_addrs > 0 Multicast mode, receive normal and MC packets, and do - best-effort filtering. - */ - -static void set_multicast_list( struct net_device *dev ) -{ - struct lance_private *lp = netdev_priv(dev); - struct lance_ioreg *IO = lp->iobase; - - if (netif_running(dev)) - /* Only possible if board is already started */ - return; - - /* We take the simple way out and always enable promiscuous mode. */ - DREG = CSR0_STOP; /* Temporarily stop the lance. */ - - if (dev->flags & IFF_PROMISC) { - /* Log any net taps. */ - DPRINTK( 2, ( "%s: Promiscuous mode enabled.\n", dev->name )); - REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */ - } else { - short multicast_table[4]; - int num_addrs = netdev_mc_count(dev); - int i; - /* We don't use the multicast table, but rely on upper-layer - * filtering. */ - memset( multicast_table, (num_addrs == 0) ? 0 : -1, - sizeof(multicast_table) ); - for( i = 0; i < 4; i++ ) - REGA( CSR8+i ) = multicast_table[i]; - REGA( CSR15 ) = 0; /* Unset promiscuous mode */ - } - - /* - * Always set BSWP after a STOP as STOP puts it back into - * little endian mode. - */ - REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0); - - /* Resume normal operation and reset AREG to CSR0 */ - REGA( CSR0 ) = CSR0_IDON | CSR0_INEA | CSR0_STRT; -} - - -/* This is needed for old RieblCards and possible for new RieblCards */ - -static int lance_set_mac_address( struct net_device *dev, void *addr ) -{ - struct lance_private *lp = netdev_priv(dev); - struct sockaddr *saddr = addr; - int i; - - if (lp->cardtype != OLD_RIEBL && lp->cardtype != NEW_RIEBL) - return -EOPNOTSUPP; - - if (netif_running(dev)) { - /* Only possible while card isn't started */ - DPRINTK( 1, ( "%s: hwaddr can be set only while card isn't open.\n", - dev->name )); - return -EIO; - } - - memcpy( dev->dev_addr, saddr->sa_data, dev->addr_len ); - for( i = 0; i < 6; i++ ) - MEM->init.hwaddr[i] = dev->dev_addr[i^1]; /* <- 16 bit swap! */ - lp->memcpy_f( RIEBL_HWADDR_ADDR, dev->dev_addr, 6 ); - /* set also the magic for future sessions */ - *RIEBL_MAGIC_ADDR = RIEBL_MAGIC; - - return 0; -} - - -#ifdef MODULE -static struct net_device *atarilance_dev; - -static int __init atarilance_module_init(void) -{ - atarilance_dev = atarilance_probe(-1); - if (IS_ERR(atarilance_dev)) - return PTR_ERR(atarilance_dev); - return 0; -} - -static void __exit atarilance_module_exit(void) -{ - unregister_netdev(atarilance_dev); - free_irq(atarilance_dev->irq, atarilance_dev); - free_netdev(atarilance_dev); -} -module_init(atarilance_module_init); -module_exit(atarilance_module_exit); -#endif /* MODULE */ - - -/* - * Local variables: - * c-indent-level: 4 - * tab-width: 4 - * End: - */ diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c deleted file mode 100644 index b9debcf..0000000 --- a/drivers/net/au1000_eth.c +++ /dev/null @@ -1,1332 +0,0 @@ -/* - * - * Alchemy Au1x00 ethernet driver - * - * Copyright 2001-2003, 2006 MontaVista Software Inc. - * Copyright 2002 TimeSys Corp. - * Added ethtool/mii-tool support, - * Copyright 2004 Matt Porter - * Update: 2004 Bjoern Riemer, riemer@fokus.fraunhofer.de - * or riemer@riemer-nt.de: fixed the link beat detection with - * ioctls (SIOCGMIIPHY) - * Copyright 2006 Herbert Valerio Riedel - * converted to use linux-2.6.x's PHY framework - * - * Author: MontaVista Software, Inc. - * ppopov@mvista.com or source@mvista.com - * - * ######################################################################## - * - * This program is free software; you can distribute it and/or modify it - * under the terms of the GNU General Public License (Version 2) as - * published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. - * - * ######################################################################## - * - * - */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include -#include -#include - -#include "au1000_eth.h" - -#ifdef AU1000_ETH_DEBUG -static int au1000_debug = 5; -#else -static int au1000_debug = 3; -#endif - -#define AU1000_DEF_MSG_ENABLE (NETIF_MSG_DRV | \ - NETIF_MSG_PROBE | \ - NETIF_MSG_LINK) - -#define DRV_NAME "au1000_eth" -#define DRV_VERSION "1.7" -#define DRV_AUTHOR "Pete Popov " -#define DRV_DESC "Au1xxx on-chip Ethernet driver" - -MODULE_AUTHOR(DRV_AUTHOR); -MODULE_DESCRIPTION(DRV_DESC); -MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_VERSION); - -/* - * Theory of operation - * - * The Au1000 MACs use a simple rx and tx descriptor ring scheme. - * There are four receive and four transmit descriptors. These - * descriptors are not in memory; rather, they are just a set of - * hardware registers. - * - * Since the Au1000 has a coherent data cache, the receive and - * transmit buffers are allocated from the KSEG0 segment. The - * hardware registers, however, are still mapped at KSEG1 to - * make sure there's no out-of-order writes, and that all writes - * complete immediately. - */ - -/* - * board-specific configurations - * - * PHY detection algorithm - * - * If phy_static_config is undefined, the PHY setup is - * autodetected: - * - * mii_probe() first searches the current MAC's MII bus for a PHY, - * selecting the first (or last, if phy_search_highest_addr is - * defined) PHY address not already claimed by another netdev. - * - * If nothing was found that way when searching for the 2nd ethernet - * controller's PHY and phy1_search_mac0 is defined, then - * the first MII bus is searched as well for an unclaimed PHY; this is - * needed in case of a dual-PHY accessible only through the MAC0's MII - * bus. - * - * Finally, if no PHY is found, then the corresponding ethernet - * controller is not registered to the network subsystem. - */ - -/* autodetection defaults: phy1_search_mac0 */ - -/* static PHY setup - * - * most boards PHY setup should be detectable properly with the - * autodetection algorithm in mii_probe(), but in some cases (e.g. if - * you have a switch attached, or want to use the PHY's interrupt - * notification capabilities) you can provide a static PHY - * configuration here - * - * IRQs may only be set, if a PHY address was configured - * If a PHY address is given, also a bus id is required to be set - * - * ps: make sure the used irqs are configured properly in the board - * specific irq-map - */ - -static void au1000_enable_mac(struct net_device *dev, int force_reset) -{ - unsigned long flags; - struct au1000_private *aup = netdev_priv(dev); - - spin_lock_irqsave(&aup->lock, flags); - - if (force_reset || (!aup->mac_enabled)) { - writel(MAC_EN_CLOCK_ENABLE, aup->enable); - au_sync_delay(2); - writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2 - | MAC_EN_CLOCK_ENABLE), aup->enable); - au_sync_delay(2); - - aup->mac_enabled = 1; - } - - spin_unlock_irqrestore(&aup->lock, flags); -} - -/* - * MII operations - */ -static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg) -{ - struct au1000_private *aup = netdev_priv(dev); - u32 *const mii_control_reg = &aup->mac->mii_control; - u32 *const mii_data_reg = &aup->mac->mii_data; - u32 timedout = 20; - u32 mii_control; - - while (readl(mii_control_reg) & MAC_MII_BUSY) { - mdelay(1); - if (--timedout == 0) { - netdev_err(dev, "read_MII busy timeout!!\n"); - return -1; - } - } - - mii_control = MAC_SET_MII_SELECT_REG(reg) | - MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_READ; - - writel(mii_control, mii_control_reg); - - timedout = 20; - while (readl(mii_control_reg) & MAC_MII_BUSY) { - mdelay(1); - if (--timedout == 0) { - netdev_err(dev, "mdio_read busy timeout!!\n"); - return -1; - } - } - return readl(mii_data_reg); -} - -static void au1000_mdio_write(struct net_device *dev, int phy_addr, - int reg, u16 value) -{ - struct au1000_private *aup = netdev_priv(dev); - u32 *const mii_control_reg = &aup->mac->mii_control; - u32 *const mii_data_reg = &aup->mac->mii_data; - u32 timedout = 20; - u32 mii_control; - - while (readl(mii_control_reg) & MAC_MII_BUSY) { - mdelay(1); - if (--timedout == 0) { - netdev_err(dev, "mdio_write busy timeout!!\n"); - return; - } - } - - mii_control = MAC_SET_MII_SELECT_REG(reg) | - MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_WRITE; - - writel(value, mii_data_reg); - writel(mii_control, mii_control_reg); -} - -static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum) -{ - /* WARNING: bus->phy_map[phy_addr].attached_dev == dev does - * _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus) - */ - struct net_device *const dev = bus->priv; - - /* make sure the MAC associated with this - * mii_bus is enabled - */ - au1000_enable_mac(dev, 0); - - return au1000_mdio_read(dev, phy_addr, regnum); -} - -static int au1000_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum, - u16 value) -{ - struct net_device *const dev = bus->priv; - - /* make sure the MAC associated with this - * mii_bus is enabled - */ - au1000_enable_mac(dev, 0); - - au1000_mdio_write(dev, phy_addr, regnum, value); - return 0; -} - -static int au1000_mdiobus_reset(struct mii_bus *bus) -{ - struct net_device *const dev = bus->priv; - - /* make sure the MAC associated with this - * mii_bus is enabled - */ - au1000_enable_mac(dev, 0); - - return 0; -} - -static void au1000_hard_stop(struct net_device *dev) -{ - struct au1000_private *aup = netdev_priv(dev); - u32 reg; - - netif_dbg(aup, drv, dev, "hard stop\n"); - - reg = readl(&aup->mac->control); - reg &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE); - writel(reg, &aup->mac->control); - au_sync_delay(10); -} - -static void au1000_enable_rx_tx(struct net_device *dev) -{ - struct au1000_private *aup = netdev_priv(dev); - u32 reg; - - netif_dbg(aup, hw, dev, "enable_rx_tx\n"); - - reg = readl(&aup->mac->control); - reg |= (MAC_RX_ENABLE | MAC_TX_ENABLE); - writel(reg, &aup->mac->control); - au_sync_delay(10); -} - -static void -au1000_adjust_link(struct net_device *dev) -{ - struct au1000_private *aup = netdev_priv(dev); - struct phy_device *phydev = aup->phy_dev; - unsigned long flags; - u32 reg; - - int status_change = 0; - - BUG_ON(!aup->phy_dev); - - spin_lock_irqsave(&aup->lock, flags); - - if (phydev->link && (aup->old_speed != phydev->speed)) { - /* speed changed */ - - switch (phydev->speed) { - case SPEED_10: - case SPEED_100: - break; - default: - netdev_warn(dev, "Speed (%d) is not 10/100 ???\n", - phydev->speed); - break; - } - - aup->old_speed = phydev->speed; - - status_change = 1; - } - - if (phydev->link && (aup->old_duplex != phydev->duplex)) { - /* duplex mode changed */ - - /* switching duplex mode requires to disable rx and tx! */ - au1000_hard_stop(dev); - - reg = readl(&aup->mac->control); - if (DUPLEX_FULL == phydev->duplex) { - reg |= MAC_FULL_DUPLEX; - reg &= ~MAC_DISABLE_RX_OWN; - } else { - reg &= ~MAC_FULL_DUPLEX; - reg |= MAC_DISABLE_RX_OWN; - } - writel(reg, &aup->mac->control); - au_sync_delay(1); - - au1000_enable_rx_tx(dev); - aup->old_duplex = phydev->duplex; - - status_change = 1; - } - - if (phydev->link != aup->old_link) { - /* link state changed */ - - if (!phydev->link) { - /* link went down */ - aup->old_speed = 0; - aup->old_duplex = -1; - } - - aup->old_link = phydev->link; - status_change = 1; - } - - spin_unlock_irqrestore(&aup->lock, flags); - - if (status_change) { - if (phydev->link) - netdev_info(dev, "link up (%d/%s)\n", - phydev->speed, - DUPLEX_FULL == phydev->duplex ? "Full" : "Half"); - else - netdev_info(dev, "link down\n"); - } -} - -static int au1000_mii_probe(struct net_device *dev) -{ - struct au1000_private *const aup = netdev_priv(dev); - struct phy_device *phydev = NULL; - int phy_addr; - - if (aup->phy_static_config) { - BUG_ON(aup->mac_id < 0 || aup->mac_id > 1); - - if (aup->phy_addr) - phydev = aup->mii_bus->phy_map[aup->phy_addr]; - else - netdev_info(dev, "using PHY-less setup\n"); - return 0; - } - - /* find the first (lowest address) PHY - * on the current MAC's MII bus - */ - for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) - if (aup->mii_bus->phy_map[phy_addr]) { - phydev = aup->mii_bus->phy_map[phy_addr]; - if (!aup->phy_search_highest_addr) - /* break out with first one found */ - break; - } - - if (aup->phy1_search_mac0) { - /* try harder to find a PHY */ - if (!phydev && (aup->mac_id == 1)) { - /* no PHY found, maybe we have a dual PHY? */ - dev_info(&dev->dev, ": no PHY found on MAC1, " - "let's see if it's attached to MAC0...\n"); - - /* find the first (lowest address) non-attached - * PHY on the MAC0 MII bus - */ - for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { - struct phy_device *const tmp_phydev = - aup->mii_bus->phy_map[phy_addr]; - - if (aup->mac_id == 1) - break; - - /* no PHY here... */ - if (!tmp_phydev) - continue; - - /* already claimed by MAC0 */ - if (tmp_phydev->attached_dev) - continue; - - phydev = tmp_phydev; - break; /* found it */ - } - } - } - - if (!phydev) { - netdev_err(dev, "no PHY found\n"); - return -1; - } - - /* now we are supposed to have a proper phydev, to attach to... */ - BUG_ON(phydev->attached_dev); - - phydev = phy_connect(dev, dev_name(&phydev->dev), &au1000_adjust_link, - 0, PHY_INTERFACE_MODE_MII); - - if (IS_ERR(phydev)) { - netdev_err(dev, "Could not attach to PHY\n"); - return PTR_ERR(phydev); - } - - /* mask with MAC supported features */ - phydev->supported &= (SUPPORTED_10baseT_Half - | SUPPORTED_10baseT_Full - | SUPPORTED_100baseT_Half - | SUPPORTED_100baseT_Full - | SUPPORTED_Autoneg - /* | SUPPORTED_Pause | SUPPORTED_Asym_Pause */ - | SUPPORTED_MII - | SUPPORTED_TP); - - phydev->advertising = phydev->supported; - - aup->old_link = 0; - aup->old_speed = 0; - aup->old_duplex = -1; - aup->phy_dev = phydev; - - netdev_info(dev, "attached PHY driver [%s] " - "(mii_bus:phy_addr=%s, irq=%d)\n", - phydev->drv->name, dev_name(&phydev->dev), phydev->irq); - - return 0; -} - - -/* - * Buffer allocation/deallocation routines. The buffer descriptor returned - * has the virtual and dma address of a buffer suitable for - * both, receive and transmit operations. - */ -static struct db_dest *au1000_GetFreeDB(struct au1000_private *aup) -{ - struct db_dest *pDB; - pDB = aup->pDBfree; - - if (pDB) - aup->pDBfree = pDB->pnext; - - return pDB; -} - -void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB) -{ - struct db_dest *pDBfree = aup->pDBfree; - if (pDBfree) - pDBfree->pnext = pDB; - aup->pDBfree = pDB; -} - -static void au1000_reset_mac_unlocked(struct net_device *dev) -{ - struct au1000_private *const aup = netdev_priv(dev); - int i; - - au1000_hard_stop(dev); - - writel(MAC_EN_CLOCK_ENABLE, aup->enable); - au_sync_delay(2); - writel(0, aup->enable); - au_sync_delay(2); - - aup->tx_full = 0; - for (i = 0; i < NUM_RX_DMA; i++) { - /* reset control bits */ - aup->rx_dma_ring[i]->buff_stat &= ~0xf; - } - for (i = 0; i < NUM_TX_DMA; i++) { - /* reset control bits */ - aup->tx_dma_ring[i]->buff_stat &= ~0xf; - } - - aup->mac_enabled = 0; - -} - -static void au1000_reset_mac(struct net_device *dev) -{ - struct au1000_private *const aup = netdev_priv(dev); - unsigned long flags; - - netif_dbg(aup, hw, dev, "reset mac, aup %x\n", - (unsigned)aup); - - spin_lock_irqsave(&aup->lock, flags); - - au1000_reset_mac_unlocked(dev); - - spin_unlock_irqrestore(&aup->lock, flags); -} - -/* - * Setup the receive and transmit "rings". These pointers are the addresses - * of the rx and tx MAC DMA registers so they are fixed by the hardware -- - * these are not descriptors sitting in memory. - */ -static void -au1000_setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base) -{ - int i; - - for (i = 0; i < NUM_RX_DMA; i++) { - aup->rx_dma_ring[i] = - (struct rx_dma *) - (rx_base + sizeof(struct rx_dma)*i); - } - for (i = 0; i < NUM_TX_DMA; i++) { - aup->tx_dma_ring[i] = - (struct tx_dma *) - (tx_base + sizeof(struct tx_dma)*i); - } -} - -/* - * ethtool operations - */ - -static int au1000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct au1000_private *aup = netdev_priv(dev); - - if (aup->phy_dev) - return phy_ethtool_gset(aup->phy_dev, cmd); - - return -EINVAL; -} - -static int au1000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct au1000_private *aup = netdev_priv(dev); - - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - if (aup->phy_dev) - return phy_ethtool_sset(aup->phy_dev, cmd); - - return -EINVAL; -} - -static void -au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) -{ - struct au1000_private *aup = netdev_priv(dev); - - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - info->fw_version[0] = '\0'; - sprintf(info->bus_info, "%s %d", DRV_NAME, aup->mac_id); - info->regdump_len = 0; -} - -static void au1000_set_msglevel(struct net_device *dev, u32 value) -{ - struct au1000_private *aup = netdev_priv(dev); - aup->msg_enable = value; -} - -static u32 au1000_get_msglevel(struct net_device *dev) -{ - struct au1000_private *aup = netdev_priv(dev); - return aup->msg_enable; -} - -static const struct ethtool_ops au1000_ethtool_ops = { - .get_settings = au1000_get_settings, - .set_settings = au1000_set_settings, - .get_drvinfo = au1000_get_drvinfo, - .get_link = ethtool_op_get_link, - .get_msglevel = au1000_get_msglevel, - .set_msglevel = au1000_set_msglevel, -}; - - -/* - * Initialize the interface. - * - * When the device powers up, the clocks are disabled and the - * mac is in reset state. When the interface is closed, we - * do the same -- reset the device and disable the clocks to - * conserve power. Thus, whenever au1000_init() is called, - * the device should already be in reset state. - */ -static int au1000_init(struct net_device *dev) -{ - struct au1000_private *aup = netdev_priv(dev); - unsigned long flags; - int i; - u32 control; - - netif_dbg(aup, hw, dev, "au1000_init\n"); - - /* bring the device out of reset */ - au1000_enable_mac(dev, 1); - - spin_lock_irqsave(&aup->lock, flags); - - writel(0, &aup->mac->control); - aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2; - aup->tx_tail = aup->tx_head; - aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2; - - writel(dev->dev_addr[5]<<8 | dev->dev_addr[4], - &aup->mac->mac_addr_high); - writel(dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 | - dev->dev_addr[1]<<8 | dev->dev_addr[0], - &aup->mac->mac_addr_low); - - - for (i = 0; i < NUM_RX_DMA; i++) - aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE; - - au_sync(); - - control = MAC_RX_ENABLE | MAC_TX_ENABLE; -#ifndef CONFIG_CPU_LITTLE_ENDIAN - control |= MAC_BIG_ENDIAN; -#endif - if (aup->phy_dev) { - if (aup->phy_dev->link && (DUPLEX_FULL == aup->phy_dev->duplex)) - control |= MAC_FULL_DUPLEX; - else - control |= MAC_DISABLE_RX_OWN; - } else { /* PHY-less op, assume full-duplex */ - control |= MAC_FULL_DUPLEX; - } - - writel(control, &aup->mac->control); - writel(0x8100, &aup->mac->vlan1_tag); /* activate vlan support */ - au_sync(); - - spin_unlock_irqrestore(&aup->lock, flags); - return 0; -} - -static inline void au1000_update_rx_stats(struct net_device *dev, u32 status) -{ - struct net_device_stats *ps = &dev->stats; - - ps->rx_packets++; - if (status & RX_MCAST_FRAME) - ps->multicast++; - - if (status & RX_ERROR) { - ps->rx_errors++; - if (status & RX_MISSED_FRAME) - ps->rx_missed_errors++; - if (status & (RX_OVERLEN | RX_RUNT | RX_LEN_ERROR)) - ps->rx_length_errors++; - if (status & RX_CRC_ERROR) - ps->rx_crc_errors++; - if (status & RX_COLL) - ps->collisions++; - } else - ps->rx_bytes += status & RX_FRAME_LEN_MASK; - -} - -/* - * Au1000 receive routine. - */ -static int au1000_rx(struct net_device *dev) -{ - struct au1000_private *aup = netdev_priv(dev); - struct sk_buff *skb; - struct rx_dma *prxd; - u32 buff_stat, status; - struct db_dest *pDB; - u32 frmlen; - - netif_dbg(aup, rx_status, dev, "au1000_rx head %d\n", aup->rx_head); - - prxd = aup->rx_dma_ring[aup->rx_head]; - buff_stat = prxd->buff_stat; - while (buff_stat & RX_T_DONE) { - status = prxd->status; - pDB = aup->rx_db_inuse[aup->rx_head]; - au1000_update_rx_stats(dev, status); - if (!(status & RX_ERROR)) { - - /* good frame */ - frmlen = (status & RX_FRAME_LEN_MASK); - frmlen -= 4; /* Remove FCS */ - skb = dev_alloc_skb(frmlen + 2); - if (skb == NULL) { - netdev_err(dev, "Memory squeeze, dropping packet.\n"); - dev->stats.rx_dropped++; - continue; - } - skb_reserve(skb, 2); /* 16 byte IP header align */ - skb_copy_to_linear_data(skb, - (unsigned char *)pDB->vaddr, frmlen); - skb_put(skb, frmlen); - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); /* pass the packet to upper layers */ - } else { - if (au1000_debug > 4) { - pr_err("rx_error(s):"); - if (status & RX_MISSED_FRAME) - pr_cont(" miss"); - if (status & RX_WDOG_TIMER) - pr_cont(" wdog"); - if (status & RX_RUNT) - pr_cont(" runt"); - if (status & RX_OVERLEN) - pr_cont(" overlen"); - if (status & RX_COLL) - pr_cont(" coll"); - if (status & RX_MII_ERROR) - pr_cont(" mii error"); - if (status & RX_CRC_ERROR) - pr_cont(" crc error"); - if (status & RX_LEN_ERROR) - pr_cont(" len error"); - if (status & RX_U_CNTRL_FRAME) - pr_cont(" u control frame"); - pr_cont("\n"); - } - } - prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE); - aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1); - au_sync(); - - /* next descriptor */ - prxd = aup->rx_dma_ring[aup->rx_head]; - buff_stat = prxd->buff_stat; - } - return 0; -} - -static void au1000_update_tx_stats(struct net_device *dev, u32 status) -{ - struct au1000_private *aup = netdev_priv(dev); - struct net_device_stats *ps = &dev->stats; - - if (status & TX_FRAME_ABORTED) { - if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) { - if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) { - /* any other tx errors are only valid - * in half duplex mode - */ - ps->tx_errors++; - ps->tx_aborted_errors++; - } - } else { - ps->tx_errors++; - ps->tx_aborted_errors++; - if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER)) - ps->tx_carrier_errors++; - } - } -} - -/* - * Called from the interrupt service routine to acknowledge - * the TX DONE bits. This is a must if the irq is setup as - * edge triggered. - */ -static void au1000_tx_ack(struct net_device *dev) -{ - struct au1000_private *aup = netdev_priv(dev); - struct tx_dma *ptxd; - - ptxd = aup->tx_dma_ring[aup->tx_tail]; - - while (ptxd->buff_stat & TX_T_DONE) { - au1000_update_tx_stats(dev, ptxd->status); - ptxd->buff_stat &= ~TX_T_DONE; - ptxd->len = 0; - au_sync(); - - aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1); - ptxd = aup->tx_dma_ring[aup->tx_tail]; - - if (aup->tx_full) { - aup->tx_full = 0; - netif_wake_queue(dev); - } - } -} - -/* - * Au1000 interrupt service routine. - */ -static irqreturn_t au1000_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - - /* Handle RX interrupts first to minimize chance of overrun */ - - au1000_rx(dev); - au1000_tx_ack(dev); - return IRQ_RETVAL(1); -} - -static int au1000_open(struct net_device *dev) -{ - int retval; - struct au1000_private *aup = netdev_priv(dev); - - netif_dbg(aup, drv, dev, "open: dev=%p\n", dev); - - retval = request_irq(dev->irq, au1000_interrupt, 0, - dev->name, dev); - if (retval) { - netdev_err(dev, "unable to get IRQ %d\n", dev->irq); - return retval; - } - - retval = au1000_init(dev); - if (retval) { - netdev_err(dev, "error in au1000_init\n"); - free_irq(dev->irq, dev); - return retval; - } - - if (aup->phy_dev) { - /* cause the PHY state machine to schedule a link state check */ - aup->phy_dev->state = PHY_CHANGELINK; - phy_start(aup->phy_dev); - } - - netif_start_queue(dev); - - netif_dbg(aup, drv, dev, "open: Initialization done.\n"); - - return 0; -} - -static int au1000_close(struct net_device *dev) -{ - unsigned long flags; - struct au1000_private *const aup = netdev_priv(dev); - - netif_dbg(aup, drv, dev, "close: dev=%p\n", dev); - - if (aup->phy_dev) - phy_stop(aup->phy_dev); - - spin_lock_irqsave(&aup->lock, flags); - - au1000_reset_mac_unlocked(dev); - - /* stop the device */ - netif_stop_queue(dev); - - /* disable the interrupt */ - free_irq(dev->irq, dev); - spin_unlock_irqrestore(&aup->lock, flags); - - return 0; -} - -/* - * Au1000 transmit routine. - */ -static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev) -{ - struct au1000_private *aup = netdev_priv(dev); - struct net_device_stats *ps = &dev->stats; - struct tx_dma *ptxd; - u32 buff_stat; - struct db_dest *pDB; - int i; - - netif_dbg(aup, tx_queued, dev, "tx: aup %x len=%d, data=%p, head %d\n", - (unsigned)aup, skb->len, - skb->data, aup->tx_head); - - ptxd = aup->tx_dma_ring[aup->tx_head]; - buff_stat = ptxd->buff_stat; - if (buff_stat & TX_DMA_ENABLE) { - /* We've wrapped around and the transmitter is still busy */ - netif_stop_queue(dev); - aup->tx_full = 1; - return NETDEV_TX_BUSY; - } else if (buff_stat & TX_T_DONE) { - au1000_update_tx_stats(dev, ptxd->status); - ptxd->len = 0; - } - - if (aup->tx_full) { - aup->tx_full = 0; - netif_wake_queue(dev); - } - - pDB = aup->tx_db_inuse[aup->tx_head]; - skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len); - if (skb->len < ETH_ZLEN) { - for (i = skb->len; i < ETH_ZLEN; i++) - ((char *)pDB->vaddr)[i] = 0; - - ptxd->len = ETH_ZLEN; - } else - ptxd->len = skb->len; - - ps->tx_packets++; - ps->tx_bytes += ptxd->len; - - ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE; - au_sync(); - dev_kfree_skb(skb); - aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1); - return NETDEV_TX_OK; -} - -/* - * The Tx ring has been full longer than the watchdog timeout - * value. The transmitter must be hung? - */ -static void au1000_tx_timeout(struct net_device *dev) -{ - netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev); - au1000_reset_mac(dev); - au1000_init(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ - netif_wake_queue(dev); -} - -static void au1000_multicast_list(struct net_device *dev) -{ - struct au1000_private *aup = netdev_priv(dev); - u32 reg; - - netif_dbg(aup, drv, dev, "%s: flags=%x\n", __func__, dev->flags); - reg = readl(&aup->mac->control); - if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ - reg |= MAC_PROMISCUOUS; - } else if ((dev->flags & IFF_ALLMULTI) || - netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) { - reg |= MAC_PASS_ALL_MULTI; - reg &= ~MAC_PROMISCUOUS; - netdev_info(dev, "Pass all multicast\n"); - } else { - struct netdev_hw_addr *ha; - u32 mc_filter[2]; /* Multicast hash filter */ - - mc_filter[1] = mc_filter[0] = 0; - netdev_for_each_mc_addr(ha, dev) - set_bit(ether_crc(ETH_ALEN, ha->addr)>>26, - (long *)mc_filter); - writel(mc_filter[1], &aup->mac->multi_hash_high); - writel(mc_filter[0], &aup->mac->multi_hash_low); - reg &= ~MAC_PROMISCUOUS; - reg |= MAC_HASH_MODE; - } - writel(reg, &aup->mac->control); -} - -static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -{ - struct au1000_private *aup = netdev_priv(dev); - - if (!netif_running(dev)) - return -EINVAL; - - if (!aup->phy_dev) - return -EINVAL; /* PHY not controllable */ - - return phy_mii_ioctl(aup->phy_dev, rq, cmd); -} - -static const struct net_device_ops au1000_netdev_ops = { - .ndo_open = au1000_open, - .ndo_stop = au1000_close, - .ndo_start_xmit = au1000_tx, - .ndo_set_multicast_list = au1000_multicast_list, - .ndo_do_ioctl = au1000_ioctl, - .ndo_tx_timeout = au1000_tx_timeout, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, - .ndo_change_mtu = eth_change_mtu, -}; - -static int __devinit au1000_probe(struct platform_device *pdev) -{ - static unsigned version_printed; - struct au1000_private *aup = NULL; - struct au1000_eth_platform_data *pd; - struct net_device *dev = NULL; - struct db_dest *pDB, *pDBfree; - int irq, i, err = 0; - struct resource *base, *macen; - - base = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!base) { - dev_err(&pdev->dev, "failed to retrieve base register\n"); - err = -ENODEV; - goto out; - } - - macen = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!macen) { - dev_err(&pdev->dev, "failed to retrieve MAC Enable register\n"); - err = -ENODEV; - goto out; - } - - irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, "failed to retrieve IRQ\n"); - err = -ENODEV; - goto out; - } - - if (!request_mem_region(base->start, resource_size(base), - pdev->name)) { - dev_err(&pdev->dev, "failed to request memory region for base registers\n"); - err = -ENXIO; - goto out; - } - - if (!request_mem_region(macen->start, resource_size(macen), - pdev->name)) { - dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n"); - err = -ENXIO; - goto err_request; - } - - dev = alloc_etherdev(sizeof(struct au1000_private)); - if (!dev) { - dev_err(&pdev->dev, "alloc_etherdev failed\n"); - err = -ENOMEM; - goto err_alloc; - } - - SET_NETDEV_DEV(dev, &pdev->dev); - platform_set_drvdata(pdev, dev); - aup = netdev_priv(dev); - - spin_lock_init(&aup->lock); - aup->msg_enable = (au1000_debug < 4 ? - AU1000_DEF_MSG_ENABLE : au1000_debug); - - /* Allocate the data buffers - * Snooping works fine with eth on all au1xxx - */ - aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE * - (NUM_TX_BUFFS + NUM_RX_BUFFS), - &aup->dma_addr, 0); - if (!aup->vaddr) { - dev_err(&pdev->dev, "failed to allocate data buffers\n"); - err = -ENOMEM; - goto err_vaddr; - } - - /* aup->mac is the base address of the MAC's registers */ - aup->mac = (struct mac_reg *) - ioremap_nocache(base->start, resource_size(base)); - if (!aup->mac) { - dev_err(&pdev->dev, "failed to ioremap MAC registers\n"); - err = -ENXIO; - goto err_remap1; - } - - /* Setup some variables for quick register address access */ - aup->enable = (u32 *)ioremap_nocache(macen->start, - resource_size(macen)); - if (!aup->enable) { - dev_err(&pdev->dev, "failed to ioremap MAC enable register\n"); - err = -ENXIO; - goto err_remap2; - } - aup->mac_id = pdev->id; - - if (pdev->id == 0) - au1000_setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR); - else if (pdev->id == 1) - au1000_setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR); - - /* set a random MAC now in case platform_data doesn't provide one */ - random_ether_addr(dev->dev_addr); - - writel(0, aup->enable); - aup->mac_enabled = 0; - - pd = pdev->dev.platform_data; - if (!pd) { - dev_info(&pdev->dev, "no platform_data passed," - " PHY search on MAC0\n"); - aup->phy1_search_mac0 = 1; - } else { - if (is_valid_ether_addr(pd->mac)) - memcpy(dev->dev_addr, pd->mac, 6); - - aup->phy_static_config = pd->phy_static_config; - aup->phy_search_highest_addr = pd->phy_search_highest_addr; - aup->phy1_search_mac0 = pd->phy1_search_mac0; - aup->phy_addr = pd->phy_addr; - aup->phy_busid = pd->phy_busid; - aup->phy_irq = pd->phy_irq; - } - - if (aup->phy_busid && aup->phy_busid > 0) { - dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII bus not supported yet\n"); - err = -ENODEV; - goto err_mdiobus_alloc; - } - - aup->mii_bus = mdiobus_alloc(); - if (aup->mii_bus == NULL) { - dev_err(&pdev->dev, "failed to allocate mdiobus structure\n"); - err = -ENOMEM; - goto err_mdiobus_alloc; - } - - aup->mii_bus->priv = dev; - aup->mii_bus->read = au1000_mdiobus_read; - aup->mii_bus->write = au1000_mdiobus_write; - aup->mii_bus->reset = au1000_mdiobus_reset; - aup->mii_bus->name = "au1000_eth_mii"; - snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%x", aup->mac_id); - aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); - if (aup->mii_bus->irq == NULL) - goto err_out; - - for (i = 0; i < PHY_MAX_ADDR; ++i) - aup->mii_bus->irq[i] = PHY_POLL; - /* if known, set corresponding PHY IRQs */ - if (aup->phy_static_config) - if (aup->phy_irq && aup->phy_busid == aup->mac_id) - aup->mii_bus->irq[aup->phy_addr] = aup->phy_irq; - - err = mdiobus_register(aup->mii_bus); - if (err) { - dev_err(&pdev->dev, "failed to register MDIO bus\n"); - goto err_mdiobus_reg; - } - - if (au1000_mii_probe(dev) != 0) - goto err_out; - - pDBfree = NULL; - /* setup the data buffer descriptors and attach a buffer to each one */ - pDB = aup->db; - for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) { - pDB->pnext = pDBfree; - pDBfree = pDB; - pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i); - pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr); - pDB++; - } - aup->pDBfree = pDBfree; - - for (i = 0; i < NUM_RX_DMA; i++) { - pDB = au1000_GetFreeDB(aup); - if (!pDB) - goto err_out; - - aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; - aup->rx_db_inuse[i] = pDB; - } - for (i = 0; i < NUM_TX_DMA; i++) { - pDB = au1000_GetFreeDB(aup); - if (!pDB) - goto err_out; - - aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; - aup->tx_dma_ring[i]->len = 0; - aup->tx_db_inuse[i] = pDB; - } - - dev->base_addr = base->start; - dev->irq = irq; - dev->netdev_ops = &au1000_netdev_ops; - SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops); - dev->watchdog_timeo = ETH_TX_TIMEOUT; - - /* - * The boot code uses the ethernet controller, so reset it to start - * fresh. au1000_init() expects that the device is in reset state. - */ - au1000_reset_mac(dev); - - err = register_netdev(dev); - if (err) { - netdev_err(dev, "Cannot register net device, aborting.\n"); - goto err_out; - } - - netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n", - (unsigned long)base->start, irq); - if (version_printed++ == 0) - pr_info("%s version %s %s\n", - DRV_NAME, DRV_VERSION, DRV_AUTHOR); - - return 0; - -err_out: - if (aup->mii_bus != NULL) - mdiobus_unregister(aup->mii_bus); - - /* here we should have a valid dev plus aup-> register addresses - * so we can reset the mac properly. - */ - au1000_reset_mac(dev); - - for (i = 0; i < NUM_RX_DMA; i++) { - if (aup->rx_db_inuse[i]) - au1000_ReleaseDB(aup, aup->rx_db_inuse[i]); - } - for (i = 0; i < NUM_TX_DMA; i++) { - if (aup->tx_db_inuse[i]) - au1000_ReleaseDB(aup, aup->tx_db_inuse[i]); - } -err_mdiobus_reg: - mdiobus_free(aup->mii_bus); -err_mdiobus_alloc: - iounmap(aup->enable); -err_remap2: - iounmap(aup->mac); -err_remap1: - dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), - (void *)aup->vaddr, aup->dma_addr); -err_vaddr: - free_netdev(dev); -err_alloc: - release_mem_region(macen->start, resource_size(macen)); -err_request: - release_mem_region(base->start, resource_size(base)); -out: - return err; -} - -static int __devexit au1000_remove(struct platform_device *pdev) -{ - struct net_device *dev = platform_get_drvdata(pdev); - struct au1000_private *aup = netdev_priv(dev); - int i; - struct resource *base, *macen; - - platform_set_drvdata(pdev, NULL); - - unregister_netdev(dev); - mdiobus_unregister(aup->mii_bus); - mdiobus_free(aup->mii_bus); - - for (i = 0; i < NUM_RX_DMA; i++) - if (aup->rx_db_inuse[i]) - au1000_ReleaseDB(aup, aup->rx_db_inuse[i]); - - for (i = 0; i < NUM_TX_DMA; i++) - if (aup->tx_db_inuse[i]) - au1000_ReleaseDB(aup, aup->tx_db_inuse[i]); - - dma_free_noncoherent(NULL, MAX_BUF_SIZE * - (NUM_TX_BUFFS + NUM_RX_BUFFS), - (void *)aup->vaddr, aup->dma_addr); - - iounmap(aup->mac); - iounmap(aup->enable); - - base = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(base->start, resource_size(base)); - - macen = platform_get_resource(pdev, IORESOURCE_MEM, 1); - release_mem_region(macen->start, resource_size(macen)); - - free_netdev(dev); - - return 0; -} - -static struct platform_driver au1000_eth_driver = { - .probe = au1000_probe, - .remove = __devexit_p(au1000_remove), - .driver = { - .name = "au1000-eth", - .owner = THIS_MODULE, - }, -}; -MODULE_ALIAS("platform:au1000-eth"); - - -static int __init au1000_init_module(void) -{ - return platform_driver_register(&au1000_eth_driver); -} - -static void __exit au1000_exit_module(void) -{ - platform_driver_unregister(&au1000_eth_driver); -} - -module_init(au1000_init_module); -module_exit(au1000_exit_module); diff --git a/drivers/net/au1000_eth.h b/drivers/net/au1000_eth.h deleted file mode 100644 index 6229c77..0000000 --- a/drivers/net/au1000_eth.h +++ /dev/null @@ -1,134 +0,0 @@ -/* - * - * Alchemy Au1x00 ethernet driver include file - * - * Author: Pete Popov - * - * Copyright 2001 MontaVista Software Inc. - * - * ######################################################################## - * - * This program is free software; you can distribute it and/or modify it - * under the terms of the GNU General Public License (Version 2) as - * published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. - * - * ######################################################################## - * - * - */ - - -#define MAC_IOSIZE 0x10000 -#define NUM_RX_DMA 4 /* Au1x00 has 4 rx hardware descriptors */ -#define NUM_TX_DMA 4 /* Au1x00 has 4 tx hardware descriptors */ - -#define NUM_RX_BUFFS 4 -#define NUM_TX_BUFFS 4 -#define MAX_BUF_SIZE 2048 - -#define ETH_TX_TIMEOUT (HZ/4) -#define MAC_MIN_PKT_SIZE 64 - -#define MULTICAST_FILTER_LIMIT 64 - -/* - * Data Buffer Descriptor. Data buffers must be aligned on 32 byte - * boundary for both, receive and transmit. - */ -struct db_dest { - struct db_dest *pnext; - u32 *vaddr; - dma_addr_t dma_addr; -}; - -/* - * The transmit and receive descriptors are memory - * mapped registers. - */ -struct tx_dma { - u32 status; - u32 buff_stat; - u32 len; - u32 pad; -}; - -struct rx_dma { - u32 status; - u32 buff_stat; - u32 pad[2]; -}; - - -/* - * MAC control registers, memory mapped. - */ -struct mac_reg { - u32 control; - u32 mac_addr_high; - u32 mac_addr_low; - u32 multi_hash_high; - u32 multi_hash_low; - u32 mii_control; - u32 mii_data; - u32 flow_control; - u32 vlan1_tag; - u32 vlan2_tag; -}; - - -struct au1000_private { - struct db_dest *pDBfree; - struct db_dest db[NUM_RX_BUFFS+NUM_TX_BUFFS]; - struct rx_dma *rx_dma_ring[NUM_RX_DMA]; - struct tx_dma *tx_dma_ring[NUM_TX_DMA]; - struct db_dest *rx_db_inuse[NUM_RX_DMA]; - struct db_dest *tx_db_inuse[NUM_TX_DMA]; - u32 rx_head; - u32 tx_head; - u32 tx_tail; - u32 tx_full; - - int mac_id; - - int mac_enabled; /* whether MAC is currently enabled and running - * (req. for mdio) - */ - - int old_link; /* used by au1000_adjust_link */ - int old_speed; - int old_duplex; - - struct phy_device *phy_dev; - struct mii_bus *mii_bus; - - /* PHY configuration */ - int phy_static_config; - int phy_search_highest_addr; - int phy1_search_mac0; - - int phy_addr; - int phy_busid; - int phy_irq; - - /* These variables are just for quick access - * to certain regs addresses. - */ - struct mac_reg *mac; /* mac registers */ - u32 *enable; /* address of MAC Enable Register */ - - u32 vaddr; /* virtual address of rx/tx buffers */ - dma_addr_t dma_addr; /* dma address of rx/tx buffers */ - - spinlock_t lock; /* Serialise access to device */ - - u32 msg_enable; -}; diff --git a/drivers/net/declance.c b/drivers/net/declance.c deleted file mode 100644 index d5598f6..0000000 --- a/drivers/net/declance.c +++ /dev/null @@ -1,1381 +0,0 @@ -/* - * Lance ethernet driver for the MIPS processor based - * DECstation family - * - * - * adopted from sunlance.c by Richard van den Berg - * - * Copyright (C) 2002, 2003, 2005, 2006 Maciej W. Rozycki - * - * additional sources: - * - PMAD-AA TURBOchannel Ethernet Module Functional Specification, - * Revision 1.2 - * - * History: - * - * v0.001: The kernel accepts the code and it shows the hardware address. - * - * v0.002: Removed most sparc stuff, left only some module and dma stuff. - * - * v0.003: Enhanced base address calculation from proposals by - * Harald Koerfgen and Thomas Riemer. - * - * v0.004: lance-regs is pointing at the right addresses, added prom - * check. First start of address mapping and DMA. - * - * v0.005: started to play around with LANCE-DMA. This driver will not - * work for non IOASIC lances. HK - * - * v0.006: added pointer arrays to lance_private and setup routine for - * them in dec_lance_init. HK - * - * v0.007: Big shit. The LANCE seems to use a different DMA mechanism to - * access the init block. This looks like one (short) word at a - * time, but the smallest amount the IOASIC can transfer is a - * (long) word. So we have a 2-2 padding here. Changed - * lance_init_block accordingly. The 16-16 padding for the buffers - * seems to be correct. HK - * - * v0.008: mods to make PMAX_LANCE work. 01/09/1999 triemer - * - * v0.009: Module support fixes, multiple interfaces support, various - * bits. macro - * - * v0.010: Fixes for the PMAD mapping of the LANCE buffer and for the - * PMAX requirement to only use halfword accesses to the - * buffer. macro - * - * v0.011: Converted the PMAD to the driver model. macro - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -#include -#include -#include -#include -#include - -static char version[] __devinitdata = -"declance.c: v0.011 by Linux MIPS DECstation task force\n"; - -MODULE_AUTHOR("Linux MIPS DECstation task force"); -MODULE_DESCRIPTION("DEC LANCE (DECstation onboard, PMAD-xx) driver"); -MODULE_LICENSE("GPL"); - -#define __unused __attribute__ ((unused)) - -/* - * card types - */ -#define ASIC_LANCE 1 -#define PMAD_LANCE 2 -#define PMAX_LANCE 3 - - -#define LE_CSR0 0 -#define LE_CSR1 1 -#define LE_CSR2 2 -#define LE_CSR3 3 - -#define LE_MO_PROM 0x8000 /* Enable promiscuous mode */ - -#define LE_C0_ERR 0x8000 /* Error: set if BAB, SQE, MISS or ME is set */ -#define LE_C0_BABL 0x4000 /* BAB: Babble: tx timeout. */ -#define LE_C0_CERR 0x2000 /* SQE: Signal quality error */ -#define LE_C0_MISS 0x1000 /* MISS: Missed a packet */ -#define LE_C0_MERR 0x0800 /* ME: Memory error */ -#define LE_C0_RINT 0x0400 /* Received interrupt */ -#define LE_C0_TINT 0x0200 /* Transmitter Interrupt */ -#define LE_C0_IDON 0x0100 /* IFIN: Init finished. */ -#define LE_C0_INTR 0x0080 /* Interrupt or error */ -#define LE_C0_INEA 0x0040 /* Interrupt enable */ -#define LE_C0_RXON 0x0020 /* Receiver on */ -#define LE_C0_TXON 0x0010 /* Transmitter on */ -#define LE_C0_TDMD 0x0008 /* Transmitter demand */ -#define LE_C0_STOP 0x0004 /* Stop the card */ -#define LE_C0_STRT 0x0002 /* Start the card */ -#define LE_C0_INIT 0x0001 /* Init the card */ - -#define LE_C3_BSWP 0x4 /* SWAP */ -#define LE_C3_ACON 0x2 /* ALE Control */ -#define LE_C3_BCON 0x1 /* Byte control */ - -/* Receive message descriptor 1 */ -#define LE_R1_OWN 0x8000 /* Who owns the entry */ -#define LE_R1_ERR 0x4000 /* Error: if FRA, OFL, CRC or BUF is set */ -#define LE_R1_FRA 0x2000 /* FRA: Frame error */ -#define LE_R1_OFL 0x1000 /* OFL: Frame overflow */ -#define LE_R1_CRC 0x0800 /* CRC error */ -#define LE_R1_BUF 0x0400 /* BUF: Buffer error */ -#define LE_R1_SOP 0x0200 /* Start of packet */ -#define LE_R1_EOP 0x0100 /* End of packet */ -#define LE_R1_POK 0x0300 /* Packet is complete: SOP + EOP */ - -/* Transmit message descriptor 1 */ -#define LE_T1_OWN 0x8000 /* Lance owns the packet */ -#define LE_T1_ERR 0x4000 /* Error summary */ -#define LE_T1_EMORE 0x1000 /* Error: more than one retry needed */ -#define LE_T1_EONE 0x0800 /* Error: one retry needed */ -#define LE_T1_EDEF 0x0400 /* Error: deferred */ -#define LE_T1_SOP 0x0200 /* Start of packet */ -#define LE_T1_EOP 0x0100 /* End of packet */ -#define LE_T1_POK 0x0300 /* Packet is complete: SOP + EOP */ - -#define LE_T3_BUF 0x8000 /* Buffer error */ -#define LE_T3_UFL 0x4000 /* Error underflow */ -#define LE_T3_LCOL 0x1000 /* Error late collision */ -#define LE_T3_CLOS 0x0800 /* Error carrier loss */ -#define LE_T3_RTY 0x0400 /* Error retry */ -#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry counter */ - -/* Define: 2^4 Tx buffers and 2^4 Rx buffers */ - -#ifndef LANCE_LOG_TX_BUFFERS -#define LANCE_LOG_TX_BUFFERS 4 -#define LANCE_LOG_RX_BUFFERS 4 -#endif - -#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS)) -#define TX_RING_MOD_MASK (TX_RING_SIZE - 1) - -#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS)) -#define RX_RING_MOD_MASK (RX_RING_SIZE - 1) - -#define PKT_BUF_SZ 1536 -#define RX_BUFF_SIZE PKT_BUF_SZ -#define TX_BUFF_SIZE PKT_BUF_SZ - -#undef TEST_HITS -#define ZERO 0 - -/* - * The DS2100/3100 have a linear 64 kB buffer which supports halfword - * accesses only. Each halfword of the buffer is word-aligned in the - * CPU address space. - * - * The PMAD-AA has a 128 kB buffer on-board. - * - * The IOASIC LANCE devices use a shared memory region. This region - * as seen from the CPU is (max) 128 kB long and has to be on an 128 kB - * boundary. The LANCE sees this as a 64 kB long continuous memory - * region. - * - * The LANCE's DMA address is used as an index in this buffer and DMA - * takes place in bursts of eight 16-bit words which are packed into - * four 32-bit words by the IOASIC. This leads to a strange padding: - * 16 bytes of valid data followed by a 16 byte gap :-(. - */ - -struct lance_rx_desc { - unsigned short rmd0; /* low address of packet */ - unsigned short rmd1; /* high address of packet - and descriptor bits */ - short length; /* 2s complement (negative!) - of buffer length */ - unsigned short mblength; /* actual number of bytes received */ -}; - -struct lance_tx_desc { - unsigned short tmd0; /* low address of packet */ - unsigned short tmd1; /* high address of packet - and descriptor bits */ - short length; /* 2s complement (negative!) - of buffer length */ - unsigned short misc; -}; - - -/* First part of the LANCE initialization block, described in databook. */ -struct lance_init_block { - unsigned short mode; /* pre-set mode (reg. 15) */ - - unsigned short phys_addr[3]; /* physical ethernet address */ - unsigned short filter[4]; /* multicast filter */ - - /* Receive and transmit ring base, along with extra bits. */ - unsigned short rx_ptr; /* receive descriptor addr */ - unsigned short rx_len; /* receive len and high addr */ - unsigned short tx_ptr; /* transmit descriptor addr */ - unsigned short tx_len; /* transmit len and high addr */ - - short gap[4]; - - /* The buffer descriptors */ - struct lance_rx_desc brx_ring[RX_RING_SIZE]; - struct lance_tx_desc btx_ring[TX_RING_SIZE]; -}; - -#define BUF_OFFSET_CPU sizeof(struct lance_init_block) -#define BUF_OFFSET_LNC sizeof(struct lance_init_block) - -#define shift_off(off, type) \ - (type == ASIC_LANCE || type == PMAX_LANCE ? off << 1 : off) - -#define lib_off(rt, type) \ - shift_off(offsetof(struct lance_init_block, rt), type) - -#define lib_ptr(ib, rt, type) \ - ((volatile u16 *)((u8 *)(ib) + lib_off(rt, type))) - -#define rds_off(rt, type) \ - shift_off(offsetof(struct lance_rx_desc, rt), type) - -#define rds_ptr(rd, rt, type) \ - ((volatile u16 *)((u8 *)(rd) + rds_off(rt, type))) - -#define tds_off(rt, type) \ - shift_off(offsetof(struct lance_tx_desc, rt), type) - -#define tds_ptr(td, rt, type) \ - ((volatile u16 *)((u8 *)(td) + tds_off(rt, type))) - -struct lance_private { - struct net_device *next; - int type; - int dma_irq; - volatile struct lance_regs *ll; - - spinlock_t lock; - - int rx_new, tx_new; - int rx_old, tx_old; - - unsigned short busmaster_regval; - - struct timer_list multicast_timer; - - /* Pointers to the ring buffers as seen from the CPU */ - char *rx_buf_ptr_cpu[RX_RING_SIZE]; - char *tx_buf_ptr_cpu[TX_RING_SIZE]; - - /* Pointers to the ring buffers as seen from the LANCE */ - uint rx_buf_ptr_lnc[RX_RING_SIZE]; - uint tx_buf_ptr_lnc[TX_RING_SIZE]; -}; - -#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\ - lp->tx_old+TX_RING_MOD_MASK-lp->tx_new:\ - lp->tx_old - lp->tx_new-1) - -/* The lance control ports are at an absolute address, machine and tc-slot - * dependent. - * DECstations do only 32-bit access and the LANCE uses 16 bit addresses, - * so we have to give the structure an extra member making rap pointing - * at the right address - */ -struct lance_regs { - volatile unsigned short rdp; /* register data port */ - unsigned short pad; - volatile unsigned short rap; /* register address port */ -}; - -int dec_lance_debug = 2; - -static struct tc_driver dec_lance_tc_driver; -static struct net_device *root_lance_dev; - -static inline void writereg(volatile unsigned short *regptr, short value) -{ - *regptr = value; - iob(); -} - -/* Load the CSR registers */ -static void load_csrs(struct lance_private *lp) -{ - volatile struct lance_regs *ll = lp->ll; - uint leptr; - - /* The address space as seen from the LANCE - * begins at address 0. HK - */ - leptr = 0; - - writereg(&ll->rap, LE_CSR1); - writereg(&ll->rdp, (leptr & 0xFFFF)); - writereg(&ll->rap, LE_CSR2); - writereg(&ll->rdp, leptr >> 16); - writereg(&ll->rap, LE_CSR3); - writereg(&ll->rdp, lp->busmaster_regval); - - /* Point back to csr0 */ - writereg(&ll->rap, LE_CSR0); -} - -/* - * Our specialized copy routines - * - */ -static void cp_to_buf(const int type, void *to, const void *from, int len) -{ - unsigned short *tp; - const unsigned short *fp; - unsigned short clen; - unsigned char *rtp; - const unsigned char *rfp; - - if (type == PMAD_LANCE) { - memcpy(to, from, len); - } else if (type == PMAX_LANCE) { - clen = len >> 1; - tp = to; - fp = from; - - while (clen--) { - *tp++ = *fp++; - tp++; - } - - clen = len & 1; - rtp = tp; - rfp = fp; - while (clen--) { - *rtp++ = *rfp++; - } - } else { - /* - * copy 16 Byte chunks - */ - clen = len >> 4; - tp = to; - fp = from; - while (clen--) { - *tp++ = *fp++; - *tp++ = *fp++; - *tp++ = *fp++; - *tp++ = *fp++; - *tp++ = *fp++; - *tp++ = *fp++; - *tp++ = *fp++; - *tp++ = *fp++; - tp += 8; - } - - /* - * do the rest, if any. - */ - clen = len & 15; - rtp = (unsigned char *) tp; - rfp = (unsigned char *) fp; - while (clen--) { - *rtp++ = *rfp++; - } - } - - iob(); -} - -static void cp_from_buf(const int type, void *to, const void *from, int len) -{ - unsigned short *tp; - const unsigned short *fp; - unsigned short clen; - unsigned char *rtp; - const unsigned char *rfp; - - if (type == PMAD_LANCE) { - memcpy(to, from, len); - } else if (type == PMAX_LANCE) { - clen = len >> 1; - tp = to; - fp = from; - while (clen--) { - *tp++ = *fp++; - fp++; - } - - clen = len & 1; - - rtp = tp; - rfp = fp; - - while (clen--) { - *rtp++ = *rfp++; - } - } else { - - /* - * copy 16 Byte chunks - */ - clen = len >> 4; - tp = to; - fp = from; - while (clen--) { - *tp++ = *fp++; - *tp++ = *fp++; - *tp++ = *fp++; - *tp++ = *fp++; - *tp++ = *fp++; - *tp++ = *fp++; - *tp++ = *fp++; - *tp++ = *fp++; - fp += 8; - } - - /* - * do the rest, if any. - */ - clen = len & 15; - rtp = (unsigned char *) tp; - rfp = (unsigned char *) fp; - while (clen--) { - *rtp++ = *rfp++; - } - - - } - -} - -/* Setup the Lance Rx and Tx rings */ -static void lance_init_ring(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - volatile u16 *ib = (volatile u16 *)dev->mem_start; - uint leptr; - int i; - - /* Lock out other processes while setting up hardware */ - netif_stop_queue(dev); - lp->rx_new = lp->tx_new = 0; - lp->rx_old = lp->tx_old = 0; - - /* Copy the ethernet address to the lance init block. - * XXX bit 0 of the physical address registers has to be zero - */ - *lib_ptr(ib, phys_addr[0], lp->type) = (dev->dev_addr[1] << 8) | - dev->dev_addr[0]; - *lib_ptr(ib, phys_addr[1], lp->type) = (dev->dev_addr[3] << 8) | - dev->dev_addr[2]; - *lib_ptr(ib, phys_addr[2], lp->type) = (dev->dev_addr[5] << 8) | - dev->dev_addr[4]; - /* Setup the initialization block */ - - /* Setup rx descriptor pointer */ - leptr = offsetof(struct lance_init_block, brx_ring); - *lib_ptr(ib, rx_len, lp->type) = (LANCE_LOG_RX_BUFFERS << 13) | - (leptr >> 16); - *lib_ptr(ib, rx_ptr, lp->type) = leptr; - if (ZERO) - printk("RX ptr: %8.8x(%8.8x)\n", - leptr, lib_off(brx_ring, lp->type)); - - /* Setup tx descriptor pointer */ - leptr = offsetof(struct lance_init_block, btx_ring); - *lib_ptr(ib, tx_len, lp->type) = (LANCE_LOG_TX_BUFFERS << 13) | - (leptr >> 16); - *lib_ptr(ib, tx_ptr, lp->type) = leptr; - if (ZERO) - printk("TX ptr: %8.8x(%8.8x)\n", - leptr, lib_off(btx_ring, lp->type)); - - if (ZERO) - printk("TX rings:\n"); - - /* Setup the Tx ring entries */ - for (i = 0; i < TX_RING_SIZE; i++) { - leptr = lp->tx_buf_ptr_lnc[i]; - *lib_ptr(ib, btx_ring[i].tmd0, lp->type) = leptr; - *lib_ptr(ib, btx_ring[i].tmd1, lp->type) = (leptr >> 16) & - 0xff; - *lib_ptr(ib, btx_ring[i].length, lp->type) = 0xf000; - /* The ones required by tmd2 */ - *lib_ptr(ib, btx_ring[i].misc, lp->type) = 0; - if (i < 3 && ZERO) - printk("%d: 0x%8.8x(0x%8.8x)\n", - i, leptr, (uint)lp->tx_buf_ptr_cpu[i]); - } - - /* Setup the Rx ring entries */ - if (ZERO) - printk("RX rings:\n"); - for (i = 0; i < RX_RING_SIZE; i++) { - leptr = lp->rx_buf_ptr_lnc[i]; - *lib_ptr(ib, brx_ring[i].rmd0, lp->type) = leptr; - *lib_ptr(ib, brx_ring[i].rmd1, lp->type) = ((leptr >> 16) & - 0xff) | - LE_R1_OWN; - *lib_ptr(ib, brx_ring[i].length, lp->type) = -RX_BUFF_SIZE | - 0xf000; - *lib_ptr(ib, brx_ring[i].mblength, lp->type) = 0; - if (i < 3 && ZERO) - printk("%d: 0x%8.8x(0x%8.8x)\n", - i, leptr, (uint)lp->rx_buf_ptr_cpu[i]); - } - iob(); -} - -static int init_restart_lance(struct lance_private *lp) -{ - volatile struct lance_regs *ll = lp->ll; - int i; - - writereg(&ll->rap, LE_CSR0); - writereg(&ll->rdp, LE_C0_INIT); - - /* Wait for the lance to complete initialization */ - for (i = 0; (i < 100) && !(ll->rdp & LE_C0_IDON); i++) { - udelay(10); - } - if ((i == 100) || (ll->rdp & LE_C0_ERR)) { - printk("LANCE unopened after %d ticks, csr0=%4.4x.\n", - i, ll->rdp); - return -1; - } - if ((ll->rdp & LE_C0_ERR)) { - printk("LANCE unopened after %d ticks, csr0=%4.4x.\n", - i, ll->rdp); - return -1; - } - writereg(&ll->rdp, LE_C0_IDON); - writereg(&ll->rdp, LE_C0_STRT); - writereg(&ll->rdp, LE_C0_INEA); - - return 0; -} - -static int lance_rx(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - volatile u16 *ib = (volatile u16 *)dev->mem_start; - volatile u16 *rd; - unsigned short bits; - int entry, len; - struct sk_buff *skb; - -#ifdef TEST_HITS - { - int i; - - printk("["); - for (i = 0; i < RX_RING_SIZE; i++) { - if (i == lp->rx_new) - printk("%s", *lib_ptr(ib, brx_ring[i].rmd1, - lp->type) & - LE_R1_OWN ? "_" : "X"); - else - printk("%s", *lib_ptr(ib, brx_ring[i].rmd1, - lp->type) & - LE_R1_OWN ? "." : "1"); - } - printk("]"); - } -#endif - - for (rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type); - !((bits = *rds_ptr(rd, rmd1, lp->type)) & LE_R1_OWN); - rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type)) { - entry = lp->rx_new; - - /* We got an incomplete frame? */ - if ((bits & LE_R1_POK) != LE_R1_POK) { - dev->stats.rx_over_errors++; - dev->stats.rx_errors++; - } else if (bits & LE_R1_ERR) { - /* Count only the end frame as a rx error, - * not the beginning - */ - if (bits & LE_R1_BUF) - dev->stats.rx_fifo_errors++; - if (bits & LE_R1_CRC) - dev->stats.rx_crc_errors++; - if (bits & LE_R1_OFL) - dev->stats.rx_over_errors++; - if (bits & LE_R1_FRA) - dev->stats.rx_frame_errors++; - if (bits & LE_R1_EOP) - dev->stats.rx_errors++; - } else { - len = (*rds_ptr(rd, mblength, lp->type) & 0xfff) - 4; - skb = dev_alloc_skb(len + 2); - - if (skb == 0) { - printk("%s: Memory squeeze, deferring packet.\n", - dev->name); - dev->stats.rx_dropped++; - *rds_ptr(rd, mblength, lp->type) = 0; - *rds_ptr(rd, rmd1, lp->type) = - ((lp->rx_buf_ptr_lnc[entry] >> 16) & - 0xff) | LE_R1_OWN; - lp->rx_new = (entry + 1) & RX_RING_MOD_MASK; - return 0; - } - dev->stats.rx_bytes += len; - - skb_reserve(skb, 2); /* 16 byte align */ - skb_put(skb, len); /* make room */ - - cp_from_buf(lp->type, skb->data, - (char *)lp->rx_buf_ptr_cpu[entry], len); - - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); - dev->stats.rx_packets++; - } - - /* Return the packet to the pool */ - *rds_ptr(rd, mblength, lp->type) = 0; - *rds_ptr(rd, length, lp->type) = -RX_BUFF_SIZE | 0xf000; - *rds_ptr(rd, rmd1, lp->type) = - ((lp->rx_buf_ptr_lnc[entry] >> 16) & 0xff) | LE_R1_OWN; - lp->rx_new = (entry + 1) & RX_RING_MOD_MASK; - } - return 0; -} - -static void lance_tx(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - volatile u16 *ib = (volatile u16 *)dev->mem_start; - volatile struct lance_regs *ll = lp->ll; - volatile u16 *td; - int i, j; - int status; - - j = lp->tx_old; - - spin_lock(&lp->lock); - - for (i = j; i != lp->tx_new; i = j) { - td = lib_ptr(ib, btx_ring[i], lp->type); - /* If we hit a packet not owned by us, stop */ - if (*tds_ptr(td, tmd1, lp->type) & LE_T1_OWN) - break; - - if (*tds_ptr(td, tmd1, lp->type) & LE_T1_ERR) { - status = *tds_ptr(td, misc, lp->type); - - dev->stats.tx_errors++; - if (status & LE_T3_RTY) - dev->stats.tx_aborted_errors++; - if (status & LE_T3_LCOL) - dev->stats.tx_window_errors++; - - if (status & LE_T3_CLOS) { - dev->stats.tx_carrier_errors++; - printk("%s: Carrier Lost\n", dev->name); - /* Stop the lance */ - writereg(&ll->rap, LE_CSR0); - writereg(&ll->rdp, LE_C0_STOP); - lance_init_ring(dev); - load_csrs(lp); - init_restart_lance(lp); - goto out; - } - /* Buffer errors and underflows turn off the - * transmitter, restart the adapter. - */ - if (status & (LE_T3_BUF | LE_T3_UFL)) { - dev->stats.tx_fifo_errors++; - - printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n", - dev->name); - /* Stop the lance */ - writereg(&ll->rap, LE_CSR0); - writereg(&ll->rdp, LE_C0_STOP); - lance_init_ring(dev); - load_csrs(lp); - init_restart_lance(lp); - goto out; - } - } else if ((*tds_ptr(td, tmd1, lp->type) & LE_T1_POK) == - LE_T1_POK) { - /* - * So we don't count the packet more than once. - */ - *tds_ptr(td, tmd1, lp->type) &= ~(LE_T1_POK); - - /* One collision before packet was sent. */ - if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EONE) - dev->stats.collisions++; - - /* More than one collision, be optimistic. */ - if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EMORE) - dev->stats.collisions += 2; - - dev->stats.tx_packets++; - } - j = (j + 1) & TX_RING_MOD_MASK; - } - lp->tx_old = j; -out: - if (netif_queue_stopped(dev) && - TX_BUFFS_AVAIL > 0) - netif_wake_queue(dev); - - spin_unlock(&lp->lock); -} - -static irqreturn_t lance_dma_merr_int(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - - printk(KERN_ERR "%s: DMA error\n", dev->name); - return IRQ_HANDLED; -} - -static irqreturn_t lance_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct lance_private *lp = netdev_priv(dev); - volatile struct lance_regs *ll = lp->ll; - int csr0; - - writereg(&ll->rap, LE_CSR0); - csr0 = ll->rdp; - - /* Acknowledge all the interrupt sources ASAP */ - writereg(&ll->rdp, csr0 & (LE_C0_INTR | LE_C0_TINT | LE_C0_RINT)); - - if ((csr0 & LE_C0_ERR)) { - /* Clear the error condition */ - writereg(&ll->rdp, LE_C0_BABL | LE_C0_ERR | LE_C0_MISS | - LE_C0_CERR | LE_C0_MERR); - } - if (csr0 & LE_C0_RINT) - lance_rx(dev); - - if (csr0 & LE_C0_TINT) - lance_tx(dev); - - if (csr0 & LE_C0_BABL) - dev->stats.tx_errors++; - - if (csr0 & LE_C0_MISS) - dev->stats.rx_errors++; - - if (csr0 & LE_C0_MERR) { - printk("%s: Memory error, status %04x\n", dev->name, csr0); - - writereg(&ll->rdp, LE_C0_STOP); - - lance_init_ring(dev); - load_csrs(lp); - init_restart_lance(lp); - netif_wake_queue(dev); - } - - writereg(&ll->rdp, LE_C0_INEA); - writereg(&ll->rdp, LE_C0_INEA); - return IRQ_HANDLED; -} - -static int lance_open(struct net_device *dev) -{ - volatile u16 *ib = (volatile u16 *)dev->mem_start; - struct lance_private *lp = netdev_priv(dev); - volatile struct lance_regs *ll = lp->ll; - int status = 0; - - /* Stop the Lance */ - writereg(&ll->rap, LE_CSR0); - writereg(&ll->rdp, LE_C0_STOP); - - /* Set mode and clear multicast filter only at device open, - * so that lance_init_ring() called at any error will not - * forget multicast filters. - * - * BTW it is common bug in all lance drivers! --ANK - */ - *lib_ptr(ib, mode, lp->type) = 0; - *lib_ptr(ib, filter[0], lp->type) = 0; - *lib_ptr(ib, filter[1], lp->type) = 0; - *lib_ptr(ib, filter[2], lp->type) = 0; - *lib_ptr(ib, filter[3], lp->type) = 0; - - lance_init_ring(dev); - load_csrs(lp); - - netif_start_queue(dev); - - /* Associate IRQ with lance_interrupt */ - if (request_irq(dev->irq, lance_interrupt, 0, "lance", dev)) { - printk("%s: Can't get IRQ %d\n", dev->name, dev->irq); - return -EAGAIN; - } - if (lp->dma_irq >= 0) { - unsigned long flags; - - if (request_irq(lp->dma_irq, lance_dma_merr_int, 0, - "lance error", dev)) { - free_irq(dev->irq, dev); - printk("%s: Can't get DMA IRQ %d\n", dev->name, - lp->dma_irq); - return -EAGAIN; - } - - spin_lock_irqsave(&ioasic_ssr_lock, flags); - - fast_mb(); - /* Enable I/O ASIC LANCE DMA. */ - ioasic_write(IO_REG_SSR, - ioasic_read(IO_REG_SSR) | IO_SSR_LANCE_DMA_EN); - - fast_mb(); - spin_unlock_irqrestore(&ioasic_ssr_lock, flags); - } - - status = init_restart_lance(lp); - return status; -} - -static int lance_close(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - volatile struct lance_regs *ll = lp->ll; - - netif_stop_queue(dev); - del_timer_sync(&lp->multicast_timer); - - /* Stop the card */ - writereg(&ll->rap, LE_CSR0); - writereg(&ll->rdp, LE_C0_STOP); - - if (lp->dma_irq >= 0) { - unsigned long flags; - - spin_lock_irqsave(&ioasic_ssr_lock, flags); - - fast_mb(); - /* Disable I/O ASIC LANCE DMA. */ - ioasic_write(IO_REG_SSR, - ioasic_read(IO_REG_SSR) & ~IO_SSR_LANCE_DMA_EN); - - fast_iob(); - spin_unlock_irqrestore(&ioasic_ssr_lock, flags); - - free_irq(lp->dma_irq, dev); - } - free_irq(dev->irq, dev); - return 0; -} - -static inline int lance_reset(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - volatile struct lance_regs *ll = lp->ll; - int status; - - /* Stop the lance */ - writereg(&ll->rap, LE_CSR0); - writereg(&ll->rdp, LE_C0_STOP); - - lance_init_ring(dev); - load_csrs(lp); - dev->trans_start = jiffies; /* prevent tx timeout */ - status = init_restart_lance(lp); - return status; -} - -static void lance_tx_timeout(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - volatile struct lance_regs *ll = lp->ll; - - printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n", - dev->name, ll->rdp); - lance_reset(dev); - netif_wake_queue(dev); -} - -static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - volatile struct lance_regs *ll = lp->ll; - volatile u16 *ib = (volatile u16 *)dev->mem_start; - unsigned long flags; - int entry, len; - - len = skb->len; - - if (len < ETH_ZLEN) { - if (skb_padto(skb, ETH_ZLEN)) - return NETDEV_TX_OK; - len = ETH_ZLEN; - } - - dev->stats.tx_bytes += len; - - spin_lock_irqsave(&lp->lock, flags); - - entry = lp->tx_new; - *lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len); - *lib_ptr(ib, btx_ring[entry].misc, lp->type) = 0; - - cp_to_buf(lp->type, (char *)lp->tx_buf_ptr_cpu[entry], skb->data, len); - - /* Now, give the packet to the lance */ - *lib_ptr(ib, btx_ring[entry].tmd1, lp->type) = - ((lp->tx_buf_ptr_lnc[entry] >> 16) & 0xff) | - (LE_T1_POK | LE_T1_OWN); - lp->tx_new = (entry + 1) & TX_RING_MOD_MASK; - - if (TX_BUFFS_AVAIL <= 0) - netif_stop_queue(dev); - - /* Kick the lance: transmit now */ - writereg(&ll->rdp, LE_C0_INEA | LE_C0_TDMD); - - spin_unlock_irqrestore(&lp->lock, flags); - - dev_kfree_skb(skb); - - return NETDEV_TX_OK; -} - -static void lance_load_multicast(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - volatile u16 *ib = (volatile u16 *)dev->mem_start; - struct netdev_hw_addr *ha; - u32 crc; - - /* set all multicast bits */ - if (dev->flags & IFF_ALLMULTI) { - *lib_ptr(ib, filter[0], lp->type) = 0xffff; - *lib_ptr(ib, filter[1], lp->type) = 0xffff; - *lib_ptr(ib, filter[2], lp->type) = 0xffff; - *lib_ptr(ib, filter[3], lp->type) = 0xffff; - return; - } - /* clear the multicast filter */ - *lib_ptr(ib, filter[0], lp->type) = 0; - *lib_ptr(ib, filter[1], lp->type) = 0; - *lib_ptr(ib, filter[2], lp->type) = 0; - *lib_ptr(ib, filter[3], lp->type) = 0; - - /* Add addresses */ - netdev_for_each_mc_addr(ha, dev) { - crc = ether_crc_le(ETH_ALEN, ha->addr); - crc = crc >> 26; - *lib_ptr(ib, filter[crc >> 4], lp->type) |= 1 << (crc & 0xf); - } -} - -static void lance_set_multicast(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - volatile u16 *ib = (volatile u16 *)dev->mem_start; - volatile struct lance_regs *ll = lp->ll; - - if (!netif_running(dev)) - return; - - if (lp->tx_old != lp->tx_new) { - mod_timer(&lp->multicast_timer, jiffies + 4 * HZ/100); - netif_wake_queue(dev); - return; - } - - netif_stop_queue(dev); - - writereg(&ll->rap, LE_CSR0); - writereg(&ll->rdp, LE_C0_STOP); - - lance_init_ring(dev); - - if (dev->flags & IFF_PROMISC) { - *lib_ptr(ib, mode, lp->type) |= LE_MO_PROM; - } else { - *lib_ptr(ib, mode, lp->type) &= ~LE_MO_PROM; - lance_load_multicast(dev); - } - load_csrs(lp); - init_restart_lance(lp); - netif_wake_queue(dev); -} - -static void lance_set_multicast_retry(unsigned long _opaque) -{ - struct net_device *dev = (struct net_device *) _opaque; - - lance_set_multicast(dev); -} - -static const struct net_device_ops lance_netdev_ops = { - .ndo_open = lance_open, - .ndo_stop = lance_close, - .ndo_start_xmit = lance_start_xmit, - .ndo_tx_timeout = lance_tx_timeout, - .ndo_set_multicast_list = lance_set_multicast, - .ndo_change_mtu = eth_change_mtu, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, -}; - -static int __devinit dec_lance_probe(struct device *bdev, const int type) -{ - static unsigned version_printed; - static const char fmt[] = "declance%d"; - char name[10]; - struct net_device *dev; - struct lance_private *lp; - volatile struct lance_regs *ll; - resource_size_t start = 0, len = 0; - int i, ret; - unsigned long esar_base; - unsigned char *esar; - - if (dec_lance_debug && version_printed++ == 0) - printk(version); - - if (bdev) - snprintf(name, sizeof(name), "%s", dev_name(bdev)); - else { - i = 0; - dev = root_lance_dev; - while (dev) { - i++; - lp = netdev_priv(dev); - dev = lp->next; - } - snprintf(name, sizeof(name), fmt, i); - } - - dev = alloc_etherdev(sizeof(struct lance_private)); - if (!dev) { - printk(KERN_ERR "%s: Unable to allocate etherdev, aborting.\n", - name); - ret = -ENOMEM; - goto err_out; - } - - /* - * alloc_etherdev ensures the data structures used by the LANCE - * are aligned. - */ - lp = netdev_priv(dev); - spin_lock_init(&lp->lock); - - lp->type = type; - switch (type) { - case ASIC_LANCE: - dev->base_addr = CKSEG1ADDR(dec_kn_slot_base + IOASIC_LANCE); - - /* buffer space for the on-board LANCE shared memory */ - /* - * FIXME: ugly hack! - */ - dev->mem_start = CKSEG1ADDR(0x00020000); - dev->mem_end = dev->mem_start + 0x00020000; - dev->irq = dec_interrupt[DEC_IRQ_LANCE]; - esar_base = CKSEG1ADDR(dec_kn_slot_base + IOASIC_ESAR); - - /* Workaround crash with booting KN04 2.1k from Disk */ - memset((void *)dev->mem_start, 0, - dev->mem_end - dev->mem_start); - - /* - * setup the pointer arrays, this sucks [tm] :-( - */ - for (i = 0; i < RX_RING_SIZE; i++) { - lp->rx_buf_ptr_cpu[i] = - (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU + - 2 * i * RX_BUFF_SIZE); - lp->rx_buf_ptr_lnc[i] = - (BUF_OFFSET_LNC + i * RX_BUFF_SIZE); - } - for (i = 0; i < TX_RING_SIZE; i++) { - lp->tx_buf_ptr_cpu[i] = - (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU + - 2 * RX_RING_SIZE * RX_BUFF_SIZE + - 2 * i * TX_BUFF_SIZE); - lp->tx_buf_ptr_lnc[i] = - (BUF_OFFSET_LNC + - RX_RING_SIZE * RX_BUFF_SIZE + - i * TX_BUFF_SIZE); - } - - /* Setup I/O ASIC LANCE DMA. */ - lp->dma_irq = dec_interrupt[DEC_IRQ_LANCE_MERR]; - ioasic_write(IO_REG_LANCE_DMA_P, - CPHYSADDR(dev->mem_start) << 3); - - break; -#ifdef CONFIG_TC - case PMAD_LANCE: - dev_set_drvdata(bdev, dev); - - start = to_tc_dev(bdev)->resource.start; - len = to_tc_dev(bdev)->resource.end - start + 1; - if (!request_mem_region(start, len, dev_name(bdev))) { - printk(KERN_ERR - "%s: Unable to reserve MMIO resource\n", - dev_name(bdev)); - ret = -EBUSY; - goto err_out_dev; - } - - dev->mem_start = CKSEG1ADDR(start); - dev->mem_end = dev->mem_start + 0x100000; - dev->base_addr = dev->mem_start + 0x100000; - dev->irq = to_tc_dev(bdev)->interrupt; - esar_base = dev->mem_start + 0x1c0002; - lp->dma_irq = -1; - - for (i = 0; i < RX_RING_SIZE; i++) { - lp->rx_buf_ptr_cpu[i] = - (char *)(dev->mem_start + BUF_OFFSET_CPU + - i * RX_BUFF_SIZE); - lp->rx_buf_ptr_lnc[i] = - (BUF_OFFSET_LNC + i * RX_BUFF_SIZE); - } - for (i = 0; i < TX_RING_SIZE; i++) { - lp->tx_buf_ptr_cpu[i] = - (char *)(dev->mem_start + BUF_OFFSET_CPU + - RX_RING_SIZE * RX_BUFF_SIZE + - i * TX_BUFF_SIZE); - lp->tx_buf_ptr_lnc[i] = - (BUF_OFFSET_LNC + - RX_RING_SIZE * RX_BUFF_SIZE + - i * TX_BUFF_SIZE); - } - - break; -#endif - case PMAX_LANCE: - dev->irq = dec_interrupt[DEC_IRQ_LANCE]; - dev->base_addr = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE); - dev->mem_start = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE_MEM); - dev->mem_end = dev->mem_start + KN01_SLOT_SIZE; - esar_base = CKSEG1ADDR(KN01_SLOT_BASE + KN01_ESAR + 1); - lp->dma_irq = -1; - - /* - * setup the pointer arrays, this sucks [tm] :-( - */ - for (i = 0; i < RX_RING_SIZE; i++) { - lp->rx_buf_ptr_cpu[i] = - (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU + - 2 * i * RX_BUFF_SIZE); - lp->rx_buf_ptr_lnc[i] = - (BUF_OFFSET_LNC + i * RX_BUFF_SIZE); - } - for (i = 0; i < TX_RING_SIZE; i++) { - lp->tx_buf_ptr_cpu[i] = - (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU + - 2 * RX_RING_SIZE * RX_BUFF_SIZE + - 2 * i * TX_BUFF_SIZE); - lp->tx_buf_ptr_lnc[i] = - (BUF_OFFSET_LNC + - RX_RING_SIZE * RX_BUFF_SIZE + - i * TX_BUFF_SIZE); - } - - break; - - default: - printk(KERN_ERR "%s: declance_init called with unknown type\n", - name); - ret = -ENODEV; - goto err_out_dev; - } - - ll = (struct lance_regs *) dev->base_addr; - esar = (unsigned char *) esar_base; - - /* prom checks */ - /* First, check for test pattern */ - if (esar[0x60] != 0xff && esar[0x64] != 0x00 && - esar[0x68] != 0x55 && esar[0x6c] != 0xaa) { - printk(KERN_ERR - "%s: Ethernet station address prom not found!\n", - name); - ret = -ENODEV; - goto err_out_resource; - } - /* Check the prom contents */ - for (i = 0; i < 8; i++) { - if (esar[i * 4] != esar[0x3c - i * 4] && - esar[i * 4] != esar[0x40 + i * 4] && - esar[0x3c - i * 4] != esar[0x40 + i * 4]) { - printk(KERN_ERR "%s: Something is wrong with the " - "ethernet station address prom!\n", name); - ret = -ENODEV; - goto err_out_resource; - } - } - - /* Copy the ethernet address to the device structure, later to the - * lance initialization block so the lance gets it every time it's - * (re)initialized. - */ - switch (type) { - case ASIC_LANCE: - printk("%s: IOASIC onboard LANCE", name); - break; - case PMAD_LANCE: - printk("%s: PMAD-AA", name); - break; - case PMAX_LANCE: - printk("%s: PMAX onboard LANCE", name); - break; - } - for (i = 0; i < 6; i++) - dev->dev_addr[i] = esar[i * 4]; - - printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq); - - dev->netdev_ops = &lance_netdev_ops; - dev->watchdog_timeo = 5*HZ; - - /* lp->ll is the location of the registers for lance card */ - lp->ll = ll; - - /* busmaster_regval (CSR3) should be zero according to the PMAD-AA - * specification. - */ - lp->busmaster_regval = 0; - - dev->dma = 0; - - /* We cannot sleep if the chip is busy during a - * multicast list update event, because such events - * can occur from interrupts (ex. IPv6). So we - * use a timer to try again later when necessary. -DaveM - */ - init_timer(&lp->multicast_timer); - lp->multicast_timer.data = (unsigned long) dev; - lp->multicast_timer.function = lance_set_multicast_retry; - - ret = register_netdev(dev); - if (ret) { - printk(KERN_ERR - "%s: Unable to register netdev, aborting.\n", name); - goto err_out_resource; - } - - if (!bdev) { - lp->next = root_lance_dev; - root_lance_dev = dev; - } - - printk("%s: registered as %s.\n", name, dev->name); - return 0; - -err_out_resource: - if (bdev) - release_mem_region(start, len); - -err_out_dev: - free_netdev(dev); - -err_out: - return ret; -} - -static void __exit dec_lance_remove(struct device *bdev) -{ - struct net_device *dev = dev_get_drvdata(bdev); - resource_size_t start, len; - - unregister_netdev(dev); - start = to_tc_dev(bdev)->resource.start; - len = to_tc_dev(bdev)->resource.end - start + 1; - release_mem_region(start, len); - free_netdev(dev); -} - -/* Find all the lance cards on the system and initialize them */ -static int __init dec_lance_platform_probe(void) -{ - int count = 0; - - if (dec_interrupt[DEC_IRQ_LANCE] >= 0) { - if (dec_interrupt[DEC_IRQ_LANCE_MERR] >= 0) { - if (dec_lance_probe(NULL, ASIC_LANCE) >= 0) - count++; - } else if (!TURBOCHANNEL) { - if (dec_lance_probe(NULL, PMAX_LANCE) >= 0) - count++; - } - } - - return (count > 0) ? 0 : -ENODEV; -} - -static void __exit dec_lance_platform_remove(void) -{ - while (root_lance_dev) { - struct net_device *dev = root_lance_dev; - struct lance_private *lp = netdev_priv(dev); - - unregister_netdev(dev); - root_lance_dev = lp->next; - free_netdev(dev); - } -} - -#ifdef CONFIG_TC -static int __devinit dec_lance_tc_probe(struct device *dev); -static int __exit dec_lance_tc_remove(struct device *dev); - -static const struct tc_device_id dec_lance_tc_table[] = { - { "DEC ", "PMAD-AA " }, - { } -}; -MODULE_DEVICE_TABLE(tc, dec_lance_tc_table); - -static struct tc_driver dec_lance_tc_driver = { - .id_table = dec_lance_tc_table, - .driver = { - .name = "declance", - .bus = &tc_bus_type, - .probe = dec_lance_tc_probe, - .remove = __exit_p(dec_lance_tc_remove), - }, -}; - -static int __devinit dec_lance_tc_probe(struct device *dev) -{ - int status = dec_lance_probe(dev, PMAD_LANCE); - if (!status) - get_device(dev); - return status; -} - -static int __exit dec_lance_tc_remove(struct device *dev) -{ - put_device(dev); - dec_lance_remove(dev); - return 0; -} -#endif - -static int __init dec_lance_init(void) -{ - int status; - - status = tc_register_driver(&dec_lance_tc_driver); - if (!status) - dec_lance_platform_probe(); - return status; -} - -static void __exit dec_lance_exit(void) -{ - dec_lance_platform_remove(); - tc_unregister_driver(&dec_lance_tc_driver); -} - - -module_init(dec_lance_init); -module_exit(dec_lance_exit); diff --git a/drivers/net/depca.c b/drivers/net/depca.c deleted file mode 100644 index f2015a8..0000000 --- a/drivers/net/depca.c +++ /dev/null @@ -1,2111 +0,0 @@ -/* depca.c: A DIGITAL DEPCA & EtherWORKS ethernet driver for linux. - - Written 1994, 1995 by David C. Davies. - - - Copyright 1994 David C. Davies - and - United States Government - (as represented by the Director, National Security Agency). - - Copyright 1995 Digital Equipment Corporation. - - - This software may be used and distributed according to the terms of - the GNU General Public License, incorporated herein by reference. - - This driver is written for the Digital Equipment Corporation series - of DEPCA and EtherWORKS ethernet cards: - - DEPCA (the original) - DE100 - DE101 - DE200 Turbo - DE201 Turbo - DE202 Turbo (TP BNC) - DE210 - DE422 (EISA) - - The driver has been tested on DE100, DE200 and DE202 cards in a - relatively busy network. The DE422 has been tested a little. - - This driver will NOT work for the DE203, DE204 and DE205 series of - cards, since they have a new custom ASIC in place of the AMD LANCE - chip. See the 'ewrk3.c' driver in the Linux source tree for running - those cards. - - I have benchmarked the driver with a DE100 at 595kB/s to (542kB/s from) - a DECstation 5000/200. - - The author may be reached at davies@maniac.ultranet.com - - ========================================================================= - - The driver was originally based on the 'lance.c' driver from Donald - Becker which is included with the standard driver distribution for - linux. V0.4 is a complete re-write with only the kernel interface - remaining from the original code. - - 1) Lance.c code in /linux/drivers/net/ - 2) "Ethernet/IEEE 802.3 Family. 1992 World Network Data Book/Handbook", - AMD, 1992 [(800) 222-9323]. - 3) "Am79C90 CMOS Local Area Network Controller for Ethernet (C-LANCE)", - AMD, Pub. #17881, May 1993. - 4) "Am79C960 PCnet-ISA(tm), Single-Chip Ethernet Controller for ISA", - AMD, Pub. #16907, May 1992 - 5) "DEC EtherWORKS LC Ethernet Controller Owners Manual", - Digital Equipment corporation, 1990, Pub. #EK-DE100-OM.003 - 6) "DEC EtherWORKS Turbo Ethernet Controller Owners Manual", - Digital Equipment corporation, 1990, Pub. #EK-DE200-OM.003 - 7) "DEPCA Hardware Reference Manual", Pub. #EK-DEPCA-PR - Digital Equipment Corporation, 1989 - 8) "DEC EtherWORKS Turbo_(TP BNC) Ethernet Controller Owners Manual", - Digital Equipment corporation, 1991, Pub. #EK-DE202-OM.001 - - - Peter Bauer's depca.c (V0.5) was referred to when debugging V0.1 of this - driver. - - The original DEPCA card requires that the ethernet ROM address counter - be enabled to count and has an 8 bit NICSR. The ROM counter enabling is - only done when a 0x08 is read as the first address octet (to minimise - the chances of writing over some other hardware's I/O register). The - NICSR accesses have been changed to byte accesses for all the cards - supported by this driver, since there is only one useful bit in the MSB - (remote boot timeout) and it is not used. Also, there is a maximum of - only 48kB network RAM for this card. My thanks to Torbjorn Lindh for - help debugging all this (and holding my feet to the fire until I got it - right). - - The DE200 series boards have on-board 64kB RAM for use as a shared - memory network buffer. Only the DE100 cards make use of a 2kB buffer - mode which has not been implemented in this driver (only the 32kB and - 64kB modes are supported [16kB/48kB for the original DEPCA]). - - At the most only 2 DEPCA cards can be supported on the ISA bus because - there is only provision for two I/O base addresses on each card (0x300 - and 0x200). The I/O address is detected by searching for a byte sequence - in the Ethernet station address PROM at the expected I/O address for the - Ethernet PROM. The shared memory base address is 'autoprobed' by - looking for the self test PROM and detecting the card name. When a - second DEPCA is detected, information is placed in the base_addr - variable of the next device structure (which is created if necessary), - thus enabling ethif_probe initialization for the device. More than 2 - EISA cards can be supported, but care will be needed assigning the - shared memory to ensure that each slot has the correct IRQ, I/O address - and shared memory address assigned. - - ************************************************************************ - - NOTE: If you are using two ISA DEPCAs, it is important that you assign - the base memory addresses correctly. The driver autoprobes I/O 0x300 - then 0x200. The base memory address for the first device must be less - than that of the second so that the auto probe will correctly assign the - I/O and memory addresses on the same card. I can't think of a way to do - this unambiguously at the moment, since there is nothing on the cards to - tie I/O and memory information together. - - I am unable to test 2 cards together for now, so this code is - unchecked. All reports, good or bad, are welcome. - - ************************************************************************ - - The board IRQ setting must be at an unused IRQ which is auto-probed - using Donald Becker's autoprobe routines. DEPCA and DE100 board IRQs are - {2,3,4,5,7}, whereas the DE200 is at {5,9,10,11,15}. Note that IRQ2 is - really IRQ9 in machines with 16 IRQ lines. - - No 16MB memory limitation should exist with this driver as DMA is not - used and the common memory area is in low memory on the network card (my - current system has 20MB and I've not had problems yet). - - The ability to load this driver as a loadable module has been added. To - utilise this ability, you have to do <8 things: - - 0) have a copy of the loadable modules code installed on your system. - 1) copy depca.c from the /linux/drivers/net directory to your favourite - temporary directory. - 2) if you wish, edit the source code near line 1530 to reflect the I/O - address and IRQ you're using (see also 5). - 3) compile depca.c, but include -DMODULE in the command line to ensure - that the correct bits are compiled (see end of source code). - 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a - kernel with the depca configuration turned off and reboot. - 5) insmod depca.o [irq=7] [io=0x200] [mem=0xd0000] [adapter_name=DE100] - [Alan Cox: Changed the code to allow command line irq/io assignments] - [Dave Davies: Changed the code to allow command line mem/name - assignments] - 6) run the net startup bits for your eth?? interface manually - (usually /etc/rc.inet[12] at boot time). - 7) enjoy! - - Note that autoprobing is not allowed in loadable modules - the system is - already up and running and you're messing with interrupts. - - To unload a module, turn off the associated interface - 'ifconfig eth?? down' then 'rmmod depca'. - - To assign a base memory address for the shared memory when running as a - loadable module, see 5 above. To include the adapter name (if you have - no PROM but know the card name) also see 5 above. Note that this last - option will not work with kernel built-in depca's. - - The shared memory assignment for a loadable module makes sense to avoid - the 'memory autoprobe' picking the wrong shared memory (for the case of - 2 depca's in a PC). - - ************************************************************************ - Support for MCA EtherWORKS cards added 11-3-98. - Verified to work with up to 2 DE212 cards in a system (although not - fully stress-tested). - - Currently known bugs/limitations: - - Note: with the MCA stuff as a module, it trusts the MCA configuration, - not the command line for IRQ and memory address. You can - specify them if you want, but it will throw your values out. - You still have to pass the IO address it was configured as - though. - - ************************************************************************ - TO DO: - ------ - - - Revision History - ---------------- - - Version Date Description - - 0.1 25-jan-94 Initial writing. - 0.2 27-jan-94 Added LANCE TX hardware buffer chaining. - 0.3 1-feb-94 Added multiple DEPCA support. - 0.31 4-feb-94 Added DE202 recognition. - 0.32 19-feb-94 Tidy up. Improve multi-DEPCA support. - 0.33 25-feb-94 Fix DEPCA ethernet ROM counter enable. - Add jabber packet fix from murf@perftech.com - and becker@super.org - 0.34 7-mar-94 Fix DEPCA max network memory RAM & NICSR access. - 0.35 8-mar-94 Added DE201 recognition. Tidied up. - 0.351 30-apr-94 Added EISA support. Added DE422 recognition. - 0.36 16-may-94 DE422 fix released. - 0.37 22-jul-94 Added MODULE support - 0.38 15-aug-94 Added DBR ROM switch in depca_close(). - Multi DEPCA bug fix. - 0.38axp 15-sep-94 Special version for Alpha AXP Linux V1.0. - 0.381 12-dec-94 Added DE101 recognition, fix multicast bug. - 0.382 9-feb-95 Fix recognition bug reported by . - 0.383 22-feb-95 Fix for conflict with VESA SCSI reported by - - 0.384 17-mar-95 Fix a ring full bug reported by - 0.385 3-apr-95 Fix a recognition bug reported by - - 0.386 21-apr-95 Fix the last fix...sorry, must be galloping senility - 0.40 25-May-95 Rewrite for portability & updated. - ALPHA support from - 0.41 26-Jun-95 Added verify_area() calls in depca_ioctl() from - suggestion by - 0.42 27-Dec-95 Add 'mem' shared memory assignment for loadable - modules. - Add 'adapter_name' for loadable modules when no PROM. - Both above from a suggestion by - . - Add new multicasting code. - 0.421 22-Apr-96 Fix alloc_device() bug - 0.422 29-Apr-96 Fix depca_hw_init() bug - 0.423 7-Jun-96 Fix module load bug - 0.43 16-Aug-96 Update alloc_device() to conform to de4x5.c - 0.44 1-Sep-97 Fix *_probe() to test check_region() first - bug - reported by - 0.45 3-Nov-98 Added support for MCA EtherWORKS (DE210/DE212) cards - by - 0.451 5-Nov-98 Fixed mca stuff cuz I'm a dummy. - 0.5 14-Nov-98 Re-spin for 2.1.x kernels. - 0.51 27-Jun-99 Correct received packet length for CRC from - report by - 0.52 16-Oct-00 Fixes for 2.3 io memory accesses - Fix show-stopper (ints left masked) in depca_interrupt - by - 0.53 12-Jan-01 Release resources on failure, bss tidbits - by acme@conectiva.com.br - 0.54 08-Nov-01 use library crc32 functions - by Matt_Domsch@dell.com - 0.55 01-Mar-03 Use EISA/sysfs framework - - ========================================================================= -*/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#ifdef CONFIG_MCA -#include -#endif - -#ifdef CONFIG_EISA -#include -#endif - -#include "depca.h" - -static char version[] __initdata = "depca.c:v0.53 2001/1/12 davies@maniac.ultranet.com\n"; - -#ifdef DEPCA_DEBUG -static int depca_debug = DEPCA_DEBUG; -#else -static int depca_debug = 1; -#endif - -#define DEPCA_NDA 0xffe0 /* No Device Address */ - -#define TX_TIMEOUT (1*HZ) - -/* -** Ethernet PROM defines -*/ -#define PROBE_LENGTH 32 -#define ETH_PROM_SIG 0xAA5500FFUL - -/* -** Set the number of Tx and Rx buffers. Ensure that the memory requested -** here is <= to the amount of shared memory set up by the board switches. -** The number of descriptors MUST BE A POWER OF 2. -** -** total_memory = NUM_RX_DESC*(8+RX_BUFF_SZ) + NUM_TX_DESC*(8+TX_BUFF_SZ) -*/ -#define NUM_RX_DESC 8 /* Number of RX descriptors */ -#define NUM_TX_DESC 8 /* Number of TX descriptors */ -#define RX_BUFF_SZ 1536 /* Buffer size for each Rx buffer */ -#define TX_BUFF_SZ 1536 /* Buffer size for each Tx buffer */ - -/* -** EISA bus defines -*/ -#define DEPCA_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */ - -/* -** ISA Bus defines -*/ -#define DEPCA_RAM_BASE_ADDRESSES {0xc0000,0xd0000,0xe0000,0x00000} -#define DEPCA_TOTAL_SIZE 0x10 - -static struct { - u_long iobase; - struct platform_device *device; -} depca_io_ports[] = { - { 0x300, NULL }, - { 0x200, NULL }, - { 0 , NULL }, -}; - -/* -** Name <-> Adapter mapping -*/ -#define DEPCA_SIGNATURE {"DEPCA",\ - "DE100","DE101",\ - "DE200","DE201","DE202",\ - "DE210","DE212",\ - "DE422",\ - ""} - -static char* __initdata depca_signature[] = DEPCA_SIGNATURE; - -enum depca_type { - DEPCA, de100, de101, de200, de201, de202, de210, de212, de422, unknown -}; - -static char depca_string[] = "depca"; - -static int depca_device_remove (struct device *device); - -#ifdef CONFIG_EISA -static struct eisa_device_id depca_eisa_ids[] = { - { "DEC4220", de422 }, - { "" } -}; -MODULE_DEVICE_TABLE(eisa, depca_eisa_ids); - -static int depca_eisa_probe (struct device *device); - -static struct eisa_driver depca_eisa_driver = { - .id_table = depca_eisa_ids, - .driver = { - .name = depca_string, - .probe = depca_eisa_probe, - .remove = __devexit_p (depca_device_remove) - } -}; -#endif - -#ifdef CONFIG_MCA -/* -** Adapter ID for the MCA EtherWORKS DE210/212 adapter -*/ -#define DE210_ID 0x628d -#define DE212_ID 0x6def - -static short depca_mca_adapter_ids[] = { - DE210_ID, - DE212_ID, - 0x0000 -}; - -static char *depca_mca_adapter_name[] = { - "DEC EtherWORKS MC Adapter (DE210)", - "DEC EtherWORKS MC Adapter (DE212)", - NULL -}; - -static enum depca_type depca_mca_adapter_type[] = { - de210, - de212, - 0 -}; - -static int depca_mca_probe (struct device *); - -static struct mca_driver depca_mca_driver = { - .id_table = depca_mca_adapter_ids, - .driver = { - .name = depca_string, - .bus = &mca_bus_type, - .probe = depca_mca_probe, - .remove = __devexit_p(depca_device_remove), - }, -}; -#endif - -static int depca_isa_probe (struct platform_device *); - -static int __devexit depca_isa_remove(struct platform_device *pdev) -{ - return depca_device_remove(&pdev->dev); -} - -static struct platform_driver depca_isa_driver = { - .probe = depca_isa_probe, - .remove = __devexit_p(depca_isa_remove), - .driver = { - .name = depca_string, - }, -}; - -/* -** Miscellaneous info... -*/ -#define DEPCA_STRLEN 16 - -/* -** Memory Alignment. Each descriptor is 4 longwords long. To force a -** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and -** DESC_ALIGN. DEPCA_ALIGN aligns the start address of the private memory area -** and hence the RX descriptor ring's first entry. -*/ -#define DEPCA_ALIGN4 ((u_long)4 - 1) /* 1 longword align */ -#define DEPCA_ALIGN8 ((u_long)8 - 1) /* 2 longword (quadword) align */ -#define DEPCA_ALIGN DEPCA_ALIGN8 /* Keep the LANCE happy... */ - -/* -** The DEPCA Rx and Tx ring descriptors. -*/ -struct depca_rx_desc { - volatile s32 base; - s16 buf_length; /* This length is negative 2's complement! */ - s16 msg_length; /* This length is "normal". */ -}; - -struct depca_tx_desc { - volatile s32 base; - s16 length; /* This length is negative 2's complement! */ - s16 misc; /* Errors and TDR info */ -}; - -#define LA_MASK 0x0000ffff /* LANCE address mask for mapping network RAM - to LANCE memory address space */ - -/* -** The Lance initialization block, described in databook, in common memory. -*/ -struct depca_init { - u16 mode; /* Mode register */ - u8 phys_addr[ETH_ALEN]; /* Physical ethernet address */ - u8 mcast_table[8]; /* Multicast Hash Table. */ - u32 rx_ring; /* Rx ring base pointer & ring length */ - u32 tx_ring; /* Tx ring base pointer & ring length */ -}; - -#define DEPCA_PKT_STAT_SZ 16 -#define DEPCA_PKT_BIN_SZ 128 /* Should be >=100 unless you - increase DEPCA_PKT_STAT_SZ */ -struct depca_private { - char adapter_name[DEPCA_STRLEN]; /* /proc/ioports string */ - enum depca_type adapter; /* Adapter type */ - enum { - DEPCA_BUS_MCA = 1, - DEPCA_BUS_ISA, - DEPCA_BUS_EISA, - } depca_bus; /* type of bus */ - struct depca_init init_block; /* Shadow Initialization block */ -/* CPU address space fields */ - struct depca_rx_desc __iomem *rx_ring; /* Pointer to start of RX descriptor ring */ - struct depca_tx_desc __iomem *tx_ring; /* Pointer to start of TX descriptor ring */ - void __iomem *rx_buff[NUM_RX_DESC]; /* CPU virt address of sh'd memory buffs */ - void __iomem *tx_buff[NUM_TX_DESC]; /* CPU virt address of sh'd memory buffs */ - void __iomem *sh_mem; /* CPU mapped virt address of device RAM */ - u_long mem_start; /* Bus address of device RAM (before remap) */ - u_long mem_len; /* device memory size */ -/* Device address space fields */ - u_long device_ram_start; /* Start of RAM in device addr space */ -/* Offsets used in both address spaces */ - u_long rx_ring_offset; /* Offset from start of RAM to rx_ring */ - u_long tx_ring_offset; /* Offset from start of RAM to tx_ring */ - u_long buffs_offset; /* LANCE Rx and Tx buffers start address. */ -/* Kernel-only (not device) fields */ - int rx_new, tx_new; /* The next free ring entry */ - int rx_old, tx_old; /* The ring entries to be free()ed. */ - spinlock_t lock; - struct { /* Private stats counters */ - u32 bins[DEPCA_PKT_STAT_SZ]; - u32 unicast; - u32 multicast; - u32 broadcast; - u32 excessive_collisions; - u32 tx_underruns; - u32 excessive_underruns; - } pktStats; - int txRingMask; /* TX ring mask */ - int rxRingMask; /* RX ring mask */ - s32 rx_rlen; /* log2(rxRingMask+1) for the descriptors */ - s32 tx_rlen; /* log2(txRingMask+1) for the descriptors */ -}; - -/* -** The transmit ring full condition is described by the tx_old and tx_new -** pointers by: -** tx_old = tx_new Empty ring -** tx_old = tx_new+1 Full ring -** tx_old+txRingMask = tx_new Full ring (wrapped condition) -*/ -#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\ - lp->tx_old+lp->txRingMask-lp->tx_new:\ - lp->tx_old -lp->tx_new-1) - -/* -** Public Functions -*/ -static int depca_open(struct net_device *dev); -static netdev_tx_t depca_start_xmit(struct sk_buff *skb, - struct net_device *dev); -static irqreturn_t depca_interrupt(int irq, void *dev_id); -static int depca_close(struct net_device *dev); -static int depca_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); -static void depca_tx_timeout(struct net_device *dev); -static void set_multicast_list(struct net_device *dev); - -/* -** Private functions -*/ -static void depca_init_ring(struct net_device *dev); -static int depca_rx(struct net_device *dev); -static int depca_tx(struct net_device *dev); - -static void LoadCSRs(struct net_device *dev); -static int InitRestartDepca(struct net_device *dev); -static int DepcaSignature(char *name, u_long paddr); -static int DevicePresent(u_long ioaddr); -static int get_hw_addr(struct net_device *dev); -static void SetMulticastFilter(struct net_device *dev); -static int load_packet(struct net_device *dev, struct sk_buff *skb); -static void depca_dbg_open(struct net_device *dev); - -static u_char de1xx_irq[] __initdata = { 2, 3, 4, 5, 7, 9, 0 }; -static u_char de2xx_irq[] __initdata = { 5, 9, 10, 11, 15, 0 }; -static u_char de422_irq[] __initdata = { 5, 9, 10, 11, 0 }; -static u_char *depca_irq; - -static int irq; -static int io; -static char *adapter_name; -static int mem; /* For loadable module assignment - use insmod mem=0x????? .... */ -module_param (irq, int, 0); -module_param (io, int, 0); -module_param (adapter_name, charp, 0); -module_param (mem, int, 0); -MODULE_PARM_DESC(irq, "DEPCA IRQ number"); -MODULE_PARM_DESC(io, "DEPCA I/O base address"); -MODULE_PARM_DESC(adapter_name, "DEPCA adapter name"); -MODULE_PARM_DESC(mem, "DEPCA shared memory address"); -MODULE_LICENSE("GPL"); - -/* -** Miscellaneous defines... -*/ -#define STOP_DEPCA \ - outw(CSR0, DEPCA_ADDR);\ - outw(STOP, DEPCA_DATA) - -static const struct net_device_ops depca_netdev_ops = { - .ndo_open = depca_open, - .ndo_start_xmit = depca_start_xmit, - .ndo_stop = depca_close, - .ndo_set_multicast_list = set_multicast_list, - .ndo_do_ioctl = depca_ioctl, - .ndo_tx_timeout = depca_tx_timeout, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -static int __init depca_hw_init (struct net_device *dev, struct device *device) -{ - struct depca_private *lp; - int i, j, offset, netRAM, mem_len, status = 0; - s16 nicsr; - u_long ioaddr; - u_long mem_start; - - /* - * We are now supposed to enter this function with the - * following fields filled with proper values : - * - * dev->base_addr - * lp->mem_start - * lp->depca_bus - * lp->adapter - * - * dev->irq can be set if known from device configuration (on - * MCA or EISA) or module option. Otherwise, it will be auto - * detected. - */ - - ioaddr = dev->base_addr; - - STOP_DEPCA; - - nicsr = inb(DEPCA_NICSR); - nicsr = ((nicsr & ~SHE & ~RBE & ~IEN) | IM); - outb(nicsr, DEPCA_NICSR); - - if (inw(DEPCA_DATA) != STOP) { - return -ENXIO; - } - - lp = netdev_priv(dev); - mem_start = lp->mem_start; - - if (!mem_start || lp->adapter < DEPCA || lp->adapter >=unknown) - return -ENXIO; - - printk("%s: %s at 0x%04lx", - dev_name(device), depca_signature[lp->adapter], ioaddr); - - switch (lp->depca_bus) { -#ifdef CONFIG_MCA - case DEPCA_BUS_MCA: - printk(" (MCA slot %d)", to_mca_device(device)->slot + 1); - break; -#endif - -#ifdef CONFIG_EISA - case DEPCA_BUS_EISA: - printk(" (EISA slot %d)", to_eisa_device(device)->slot); - break; -#endif - - case DEPCA_BUS_ISA: - break; - - default: - printk("Unknown DEPCA bus %d\n", lp->depca_bus); - return -ENXIO; - } - - printk(", h/w address "); - status = get_hw_addr(dev); - printk("%pM", dev->dev_addr); - if (status != 0) { - printk(" which has an Ethernet PROM CRC error.\n"); - return -ENXIO; - } - - /* Set up the maximum amount of network RAM(kB) */ - netRAM = ((lp->adapter != DEPCA) ? 64 : 48); - if ((nicsr & _128KB) && (lp->adapter == de422)) - netRAM = 128; - - /* Shared Memory Base Address */ - if (nicsr & BUF) { - nicsr &= ~BS; /* DEPCA RAM in top 32k */ - netRAM -= 32; - - /* Only EISA/ISA needs start address to be re-computed */ - if (lp->depca_bus != DEPCA_BUS_MCA) - mem_start += 0x8000; - } - - if ((mem_len = (NUM_RX_DESC * (sizeof(struct depca_rx_desc) + RX_BUFF_SZ) + NUM_TX_DESC * (sizeof(struct depca_tx_desc) + TX_BUFF_SZ) + sizeof(struct depca_init))) - > (netRAM << 10)) { - printk(",\n requests %dkB RAM: only %dkB is available!\n", (mem_len >> 10), netRAM); - return -ENXIO; - } - - printk(",\n has %dkB RAM at 0x%.5lx", netRAM, mem_start); - - /* Enable the shadow RAM. */ - if (lp->adapter != DEPCA) { - nicsr |= SHE; - outb(nicsr, DEPCA_NICSR); - } - - spin_lock_init(&lp->lock); - sprintf(lp->adapter_name, "%s (%s)", - depca_signature[lp->adapter], dev_name(device)); - status = -EBUSY; - - /* Initialisation Block */ - if (!request_mem_region (mem_start, mem_len, lp->adapter_name)) { - printk(KERN_ERR "depca: cannot request ISA memory, aborting\n"); - goto out_priv; - } - - status = -EIO; - lp->sh_mem = ioremap(mem_start, mem_len); - if (lp->sh_mem == NULL) { - printk(KERN_ERR "depca: cannot remap ISA memory, aborting\n"); - goto out1; - } - - lp->mem_start = mem_start; - lp->mem_len = mem_len; - lp->device_ram_start = mem_start & LA_MASK; - - offset = 0; - offset += sizeof(struct depca_init); - - /* Tx & Rx descriptors (aligned to a quadword boundary) */ - offset = (offset + DEPCA_ALIGN) & ~DEPCA_ALIGN; - lp->rx_ring = lp->sh_mem + offset; - lp->rx_ring_offset = offset; - - offset += (sizeof(struct depca_rx_desc) * NUM_RX_DESC); - lp->tx_ring = lp->sh_mem + offset; - lp->tx_ring_offset = offset; - - offset += (sizeof(struct depca_tx_desc) * NUM_TX_DESC); - - lp->buffs_offset = offset; - - /* Finish initialising the ring information. */ - lp->rxRingMask = NUM_RX_DESC - 1; - lp->txRingMask = NUM_TX_DESC - 1; - - /* Calculate Tx/Rx RLEN size for the descriptors. */ - for (i = 0, j = lp->rxRingMask; j > 0; i++) { - j >>= 1; - } - lp->rx_rlen = (s32) (i << 29); - for (i = 0, j = lp->txRingMask; j > 0; i++) { - j >>= 1; - } - lp->tx_rlen = (s32) (i << 29); - - /* Load the initialisation block */ - depca_init_ring(dev); - - /* Initialise the control and status registers */ - LoadCSRs(dev); - - /* Enable DEPCA board interrupts for autoprobing */ - nicsr = ((nicsr & ~IM) | IEN); - outb(nicsr, DEPCA_NICSR); - - /* To auto-IRQ we enable the initialization-done and DMA err, - interrupts. For now we will always get a DMA error. */ - if (dev->irq < 2) { - unsigned char irqnum; - unsigned long irq_mask, delay; - - irq_mask = probe_irq_on(); - - /* Assign the correct irq list */ - switch (lp->adapter) { - case DEPCA: - case de100: - case de101: - depca_irq = de1xx_irq; - break; - case de200: - case de201: - case de202: - case de210: - case de212: - depca_irq = de2xx_irq; - break; - case de422: - depca_irq = de422_irq; - break; - - default: - break; /* Not reached */ - } - - /* Trigger an initialization just for the interrupt. */ - outw(INEA | INIT, DEPCA_DATA); - - delay = jiffies + HZ/50; - while (time_before(jiffies, delay)) - yield(); - - irqnum = probe_irq_off(irq_mask); - - status = -ENXIO; - if (!irqnum) { - printk(" and failed to detect IRQ line.\n"); - goto out2; - } else { - for (dev->irq = 0, i = 0; (depca_irq[i]) && (!dev->irq); i++) - if (irqnum == depca_irq[i]) { - dev->irq = irqnum; - printk(" and uses IRQ%d.\n", dev->irq); - } - - if (!dev->irq) { - printk(" but incorrect IRQ line detected.\n"); - goto out2; - } - } - } else { - printk(" and assigned IRQ%d.\n", dev->irq); - } - - if (depca_debug > 1) { - printk(version); - } - - /* The DEPCA-specific entries in the device structure. */ - dev->netdev_ops = &depca_netdev_ops; - dev->watchdog_timeo = TX_TIMEOUT; - - dev->mem_start = 0; - - dev_set_drvdata(device, dev); - SET_NETDEV_DEV (dev, device); - - status = register_netdev(dev); - if (status == 0) - return 0; -out2: - iounmap(lp->sh_mem); -out1: - release_mem_region (mem_start, mem_len); -out_priv: - return status; -} - - -static int depca_open(struct net_device *dev) -{ - struct depca_private *lp = netdev_priv(dev); - u_long ioaddr = dev->base_addr; - s16 nicsr; - int status = 0; - - STOP_DEPCA; - nicsr = inb(DEPCA_NICSR); - - /* Make sure the shadow RAM is enabled */ - if (lp->adapter != DEPCA) { - nicsr |= SHE; - outb(nicsr, DEPCA_NICSR); - } - - /* Re-initialize the DEPCA... */ - depca_init_ring(dev); - LoadCSRs(dev); - - depca_dbg_open(dev); - - if (request_irq(dev->irq, depca_interrupt, 0, lp->adapter_name, dev)) { - printk("depca_open(): Requested IRQ%d is busy\n", dev->irq); - status = -EAGAIN; - } else { - - /* Enable DEPCA board interrupts and turn off LED */ - nicsr = ((nicsr & ~IM & ~LED) | IEN); - outb(nicsr, DEPCA_NICSR); - outw(CSR0, DEPCA_ADDR); - - netif_start_queue(dev); - - status = InitRestartDepca(dev); - - if (depca_debug > 1) { - printk("CSR0: 0x%4.4x\n", inw(DEPCA_DATA)); - printk("nicsr: 0x%02x\n", inb(DEPCA_NICSR)); - } - } - return status; -} - -/* Initialize the lance Rx and Tx descriptor rings. */ -static void depca_init_ring(struct net_device *dev) -{ - struct depca_private *lp = netdev_priv(dev); - u_int i; - u_long offset; - - /* Lock out other processes whilst setting up the hardware */ - netif_stop_queue(dev); - - lp->rx_new = lp->tx_new = 0; - lp->rx_old = lp->tx_old = 0; - - /* Initialize the base address and length of each buffer in the ring */ - for (i = 0; i <= lp->rxRingMask; i++) { - offset = lp->buffs_offset + i * RX_BUFF_SZ; - writel((lp->device_ram_start + offset) | R_OWN, &lp->rx_ring[i].base); - writew(-RX_BUFF_SZ, &lp->rx_ring[i].buf_length); - lp->rx_buff[i] = lp->sh_mem + offset; - } - - for (i = 0; i <= lp->txRingMask; i++) { - offset = lp->buffs_offset + (i + lp->rxRingMask + 1) * TX_BUFF_SZ; - writel((lp->device_ram_start + offset) & 0x00ffffff, &lp->tx_ring[i].base); - lp->tx_buff[i] = lp->sh_mem + offset; - } - - /* Set up the initialization block */ - lp->init_block.rx_ring = (lp->device_ram_start + lp->rx_ring_offset) | lp->rx_rlen; - lp->init_block.tx_ring = (lp->device_ram_start + lp->tx_ring_offset) | lp->tx_rlen; - - SetMulticastFilter(dev); - - for (i = 0; i < ETH_ALEN; i++) { - lp->init_block.phys_addr[i] = dev->dev_addr[i]; - } - - lp->init_block.mode = 0x0000; /* Enable the Tx and Rx */ -} - - -static void depca_tx_timeout(struct net_device *dev) -{ - u_long ioaddr = dev->base_addr; - - printk("%s: transmit timed out, status %04x, resetting.\n", dev->name, inw(DEPCA_DATA)); - - STOP_DEPCA; - depca_init_ring(dev); - LoadCSRs(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ - netif_wake_queue(dev); - InitRestartDepca(dev); -} - - -/* -** Writes a socket buffer to TX descriptor ring and starts transmission -*/ -static netdev_tx_t depca_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - struct depca_private *lp = netdev_priv(dev); - u_long ioaddr = dev->base_addr; - int status = 0; - - /* Transmitter timeout, serious problems. */ - if (skb->len < 1) - goto out; - - if (skb_padto(skb, ETH_ZLEN)) - goto out; - - netif_stop_queue(dev); - - if (TX_BUFFS_AVAIL) { /* Fill in a Tx ring entry */ - status = load_packet(dev, skb); - - if (!status) { - /* Trigger an immediate send demand. */ - outw(CSR0, DEPCA_ADDR); - outw(INEA | TDMD, DEPCA_DATA); - - dev_kfree_skb(skb); - } - if (TX_BUFFS_AVAIL) - netif_start_queue(dev); - } else - status = NETDEV_TX_LOCKED; - - out: - return status; -} - -/* -** The DEPCA interrupt handler. -*/ -static irqreturn_t depca_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct depca_private *lp; - s16 csr0, nicsr; - u_long ioaddr; - - if (dev == NULL) { - printk("depca_interrupt(): irq %d for unknown device.\n", irq); - return IRQ_NONE; - } - - lp = netdev_priv(dev); - ioaddr = dev->base_addr; - - spin_lock(&lp->lock); - - /* mask the DEPCA board interrupts and turn on the LED */ - nicsr = inb(DEPCA_NICSR); - nicsr |= (IM | LED); - outb(nicsr, DEPCA_NICSR); - - outw(CSR0, DEPCA_ADDR); - csr0 = inw(DEPCA_DATA); - - /* Acknowledge all of the current interrupt sources ASAP. */ - outw(csr0 & INTE, DEPCA_DATA); - - if (csr0 & RINT) /* Rx interrupt (packet arrived) */ - depca_rx(dev); - - if (csr0 & TINT) /* Tx interrupt (packet sent) */ - depca_tx(dev); - - /* Any resources available? */ - if ((TX_BUFFS_AVAIL >= 0) && netif_queue_stopped(dev)) { - netif_wake_queue(dev); - } - - /* Unmask the DEPCA board interrupts and turn off the LED */ - nicsr = (nicsr & ~IM & ~LED); - outb(nicsr, DEPCA_NICSR); - - spin_unlock(&lp->lock); - return IRQ_HANDLED; -} - -/* Called with lp->lock held */ -static int depca_rx(struct net_device *dev) -{ - struct depca_private *lp = netdev_priv(dev); - int i, entry; - s32 status; - - for (entry = lp->rx_new; !(readl(&lp->rx_ring[entry].base) & R_OWN); entry = lp->rx_new) { - status = readl(&lp->rx_ring[entry].base) >> 16; - if (status & R_STP) { /* Remember start of frame */ - lp->rx_old = entry; - } - if (status & R_ENP) { /* Valid frame status */ - if (status & R_ERR) { /* There was an error. */ - dev->stats.rx_errors++; /* Update the error stats. */ - if (status & R_FRAM) - dev->stats.rx_frame_errors++; - if (status & R_OFLO) - dev->stats.rx_over_errors++; - if (status & R_CRC) - dev->stats.rx_crc_errors++; - if (status & R_BUFF) - dev->stats.rx_fifo_errors++; - } else { - short len, pkt_len = readw(&lp->rx_ring[entry].msg_length) - 4; - struct sk_buff *skb; - - skb = dev_alloc_skb(pkt_len + 2); - if (skb != NULL) { - unsigned char *buf; - skb_reserve(skb, 2); /* 16 byte align the IP header */ - buf = skb_put(skb, pkt_len); - if (entry < lp->rx_old) { /* Wrapped buffer */ - len = (lp->rxRingMask - lp->rx_old + 1) * RX_BUFF_SZ; - memcpy_fromio(buf, lp->rx_buff[lp->rx_old], len); - memcpy_fromio(buf + len, lp->rx_buff[0], pkt_len - len); - } else { /* Linear buffer */ - memcpy_fromio(buf, lp->rx_buff[lp->rx_old], pkt_len); - } - - /* - ** Notify the upper protocol layers that there is another - ** packet to handle - */ - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); - - /* - ** Update stats - */ - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; - for (i = 1; i < DEPCA_PKT_STAT_SZ - 1; i++) { - if (pkt_len < (i * DEPCA_PKT_BIN_SZ)) { - lp->pktStats.bins[i]++; - i = DEPCA_PKT_STAT_SZ; - } - } - if (is_multicast_ether_addr(buf)) { - if (is_broadcast_ether_addr(buf)) { - lp->pktStats.broadcast++; - } else { - lp->pktStats.multicast++; - } - } else if (compare_ether_addr(buf, dev->dev_addr) == 0) { - lp->pktStats.unicast++; - } - - lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */ - if (lp->pktStats.bins[0] == 0) { /* Reset counters */ - memset((char *) &lp->pktStats, 0, sizeof(lp->pktStats)); - } - } else { - printk("%s: Memory squeeze, deferring packet.\n", dev->name); - dev->stats.rx_dropped++; /* Really, deferred. */ - break; - } - } - /* Change buffer ownership for this last frame, back to the adapter */ - for (; lp->rx_old != entry; lp->rx_old = (lp->rx_old + 1) & lp->rxRingMask) { - writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN, &lp->rx_ring[lp->rx_old].base); - } - writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base); - } - - /* - ** Update entry information - */ - lp->rx_new = (lp->rx_new + 1) & lp->rxRingMask; - } - - return 0; -} - -/* -** Buffer sent - check for buffer errors. -** Called with lp->lock held -*/ -static int depca_tx(struct net_device *dev) -{ - struct depca_private *lp = netdev_priv(dev); - int entry; - s32 status; - u_long ioaddr = dev->base_addr; - - for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) { - status = readl(&lp->tx_ring[entry].base) >> 16; - - if (status < 0) { /* Packet not yet sent! */ - break; - } else if (status & T_ERR) { /* An error occurred. */ - status = readl(&lp->tx_ring[entry].misc); - dev->stats.tx_errors++; - if (status & TMD3_RTRY) - dev->stats.tx_aborted_errors++; - if (status & TMD3_LCAR) - dev->stats.tx_carrier_errors++; - if (status & TMD3_LCOL) - dev->stats.tx_window_errors++; - if (status & TMD3_UFLO) - dev->stats.tx_fifo_errors++; - if (status & (TMD3_BUFF | TMD3_UFLO)) { - /* Trigger an immediate send demand. */ - outw(CSR0, DEPCA_ADDR); - outw(INEA | TDMD, DEPCA_DATA); - } - } else if (status & (T_MORE | T_ONE)) { - dev->stats.collisions++; - } else { - dev->stats.tx_packets++; - } - - /* Update all the pointers */ - lp->tx_old = (lp->tx_old + 1) & lp->txRingMask; - } - - return 0; -} - -static int depca_close(struct net_device *dev) -{ - struct depca_private *lp = netdev_priv(dev); - s16 nicsr; - u_long ioaddr = dev->base_addr; - - netif_stop_queue(dev); - - outw(CSR0, DEPCA_ADDR); - - if (depca_debug > 1) { - printk("%s: Shutting down ethercard, status was %2.2x.\n", dev->name, inw(DEPCA_DATA)); - } - - /* - ** We stop the DEPCA here -- it occasionally polls - ** memory if we don't. - */ - outw(STOP, DEPCA_DATA); - - /* - ** Give back the ROM in case the user wants to go to DOS - */ - if (lp->adapter != DEPCA) { - nicsr = inb(DEPCA_NICSR); - nicsr &= ~SHE; - outb(nicsr, DEPCA_NICSR); - } - - /* - ** Free the associated irq - */ - free_irq(dev->irq, dev); - return 0; -} - -static void LoadCSRs(struct net_device *dev) -{ - struct depca_private *lp = netdev_priv(dev); - u_long ioaddr = dev->base_addr; - - outw(CSR1, DEPCA_ADDR); /* initialisation block address LSW */ - outw((u16) lp->device_ram_start, DEPCA_DATA); - outw(CSR2, DEPCA_ADDR); /* initialisation block address MSW */ - outw((u16) (lp->device_ram_start >> 16), DEPCA_DATA); - outw(CSR3, DEPCA_ADDR); /* ALE control */ - outw(ACON, DEPCA_DATA); - - outw(CSR0, DEPCA_ADDR); /* Point back to CSR0 */ -} - -static int InitRestartDepca(struct net_device *dev) -{ - struct depca_private *lp = netdev_priv(dev); - u_long ioaddr = dev->base_addr; - int i, status = 0; - - /* Copy the shadow init_block to shared memory */ - memcpy_toio(lp->sh_mem, &lp->init_block, sizeof(struct depca_init)); - - outw(CSR0, DEPCA_ADDR); /* point back to CSR0 */ - outw(INIT, DEPCA_DATA); /* initialize DEPCA */ - - /* wait for lance to complete initialisation */ - for (i = 0; (i < 100) && !(inw(DEPCA_DATA) & IDON); i++); - - if (i != 100) { - /* clear IDON by writing a "1", enable interrupts and start lance */ - outw(IDON | INEA | STRT, DEPCA_DATA); - if (depca_debug > 2) { - printk("%s: DEPCA open after %d ticks, init block 0x%08lx csr0 %4.4x.\n", dev->name, i, lp->mem_start, inw(DEPCA_DATA)); - } - } else { - printk("%s: DEPCA unopen after %d ticks, init block 0x%08lx csr0 %4.4x.\n", dev->name, i, lp->mem_start, inw(DEPCA_DATA)); - status = -1; - } - - return status; -} - -/* -** Set or clear the multicast filter for this adaptor. -*/ -static void set_multicast_list(struct net_device *dev) -{ - struct depca_private *lp = netdev_priv(dev); - u_long ioaddr = dev->base_addr; - - netif_stop_queue(dev); - while (lp->tx_old != lp->tx_new); /* Wait for the ring to empty */ - - STOP_DEPCA; /* Temporarily stop the depca. */ - depca_init_ring(dev); /* Initialize the descriptor rings */ - - if (dev->flags & IFF_PROMISC) { /* Set promiscuous mode */ - lp->init_block.mode |= PROM; - } else { - SetMulticastFilter(dev); - lp->init_block.mode &= ~PROM; /* Unset promiscuous mode */ - } - - LoadCSRs(dev); /* Reload CSR3 */ - InitRestartDepca(dev); /* Resume normal operation. */ - netif_start_queue(dev); /* Unlock the TX ring */ -} - -/* -** Calculate the hash code and update the logical address filter -** from a list of ethernet multicast addresses. -** Big endian crc one liner is mine, all mine, ha ha ha ha! -** LANCE calculates its hash codes big endian. -*/ -static void SetMulticastFilter(struct net_device *dev) -{ - struct depca_private *lp = netdev_priv(dev); - struct netdev_hw_addr *ha; - int i, j, bit, byte; - u16 hashcode; - u32 crc; - - if (dev->flags & IFF_ALLMULTI) { /* Set all multicast bits */ - for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) { - lp->init_block.mcast_table[i] = (char) 0xff; - } - } else { - for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) { /* Clear the multicast table */ - lp->init_block.mcast_table[i] = 0; - } - /* Add multicast addresses */ - netdev_for_each_mc_addr(ha, dev) { - crc = ether_crc(ETH_ALEN, ha->addr); - hashcode = (crc & 1); /* hashcode is 6 LSb of CRC ... */ - for (j = 0; j < 5; j++) { /* ... in reverse order. */ - hashcode = (hashcode << 1) | ((crc >>= 1) & 1); - } - - byte = hashcode >> 3; /* bit[3-5] -> byte in filter */ - bit = 1 << (hashcode & 0x07); /* bit[0-2] -> bit in byte */ - lp->init_block.mcast_table[byte] |= bit; - } - } -} - -static int __init depca_common_init (u_long ioaddr, struct net_device **devp) -{ - int status = 0; - - if (!request_region (ioaddr, DEPCA_TOTAL_SIZE, depca_string)) { - status = -EBUSY; - goto out; - } - - if (DevicePresent(ioaddr)) { - status = -ENODEV; - goto out_release; - } - - if (!(*devp = alloc_etherdev (sizeof (struct depca_private)))) { - status = -ENOMEM; - goto out_release; - } - - return 0; - - out_release: - release_region (ioaddr, DEPCA_TOTAL_SIZE); - out: - return status; -} - -#ifdef CONFIG_MCA -/* -** Microchannel bus I/O device probe -*/ -static int __init depca_mca_probe(struct device *device) -{ - unsigned char pos[2]; - unsigned char where; - unsigned long iobase, mem_start; - int irq, err; - struct mca_device *mdev = to_mca_device (device); - struct net_device *dev; - struct depca_private *lp; - - /* - ** Search for the adapter. If an address has been given, search - ** specifically for the card at that address. Otherwise find the - ** first card in the system. - */ - - pos[0] = mca_device_read_stored_pos(mdev, 2); - pos[1] = mca_device_read_stored_pos(mdev, 3); - - /* - ** IO of card is handled by bits 1 and 2 of pos0. - ** - ** bit2 bit1 IO - ** 0 0 0x2c00 - ** 0 1 0x2c10 - ** 1 0 0x2c20 - ** 1 1 0x2c30 - */ - where = (pos[0] & 6) >> 1; - iobase = 0x2c00 + (0x10 * where); - - /* - ** Found the adapter we were looking for. Now start setting it up. - ** - ** First work on decoding the IRQ. It's stored in the lower 4 bits - ** of pos1. Bits are as follows (from the ADF file): - ** - ** Bits - ** 3 2 1 0 IRQ - ** -------------------- - ** 0 0 1 0 5 - ** 0 0 0 1 9 - ** 0 1 0 0 10 - ** 1 0 0 0 11 - */ - where = pos[1] & 0x0f; - switch (where) { - case 1: - irq = 9; - break; - case 2: - irq = 5; - break; - case 4: - irq = 10; - break; - case 8: - irq = 11; - break; - default: - printk("%s: mca_probe IRQ error. You should never get here (%d).\n", mdev->name, where); - return -EINVAL; - } - - /* - ** Shared memory address of adapter is stored in bits 3-5 of pos0. - ** They are mapped as follows: - ** - ** Bit - ** 5 4 3 Memory Addresses - ** 0 0 0 C0000-CFFFF (64K) - ** 1 0 0 C8000-CFFFF (32K) - ** 0 0 1 D0000-DFFFF (64K) - ** 1 0 1 D8000-DFFFF (32K) - ** 0 1 0 E0000-EFFFF (64K) - ** 1 1 0 E8000-EFFFF (32K) - */ - where = (pos[0] & 0x18) >> 3; - mem_start = 0xc0000 + (where * 0x10000); - if (pos[0] & 0x20) { - mem_start += 0x8000; - } - - /* claim the slot */ - strncpy(mdev->name, depca_mca_adapter_name[mdev->index], - sizeof(mdev->name)); - mca_device_set_claim(mdev, 1); - - /* - ** Get everything allocated and initialized... (almost just - ** like the ISA and EISA probes) - */ - irq = mca_device_transform_irq(mdev, irq); - iobase = mca_device_transform_ioport(mdev, iobase); - - if ((err = depca_common_init (iobase, &dev))) - goto out_unclaim; - - dev->irq = irq; - dev->base_addr = iobase; - lp = netdev_priv(dev); - lp->depca_bus = DEPCA_BUS_MCA; - lp->adapter = depca_mca_adapter_type[mdev->index]; - lp->mem_start = mem_start; - - if ((err = depca_hw_init(dev, device))) - goto out_free; - - return 0; - - out_free: - free_netdev (dev); - release_region (iobase, DEPCA_TOTAL_SIZE); - out_unclaim: - mca_device_set_claim(mdev, 0); - - return err; -} -#endif - -/* -** ISA bus I/O device probe -*/ - -static void __init depca_platform_probe (void) -{ - int i; - struct platform_device *pldev; - - for (i = 0; depca_io_ports[i].iobase; i++) { - depca_io_ports[i].device = NULL; - - /* if an address has been specified on the command - * line, use it (if valid) */ - if (io && io != depca_io_ports[i].iobase) - continue; - - pldev = platform_device_alloc(depca_string, i); - if (!pldev) - continue; - - pldev->dev.platform_data = (void *) depca_io_ports[i].iobase; - depca_io_ports[i].device = pldev; - - if (platform_device_add(pldev)) { - depca_io_ports[i].device = NULL; - pldev->dev.platform_data = NULL; - platform_device_put(pldev); - continue; - } - - if (!pldev->dev.driver) { - /* The driver was not bound to this device, there was - * no hardware at this address. Unregister it, as the - * release function will take care of freeing the - * allocated structure */ - - depca_io_ports[i].device = NULL; - pldev->dev.platform_data = NULL; - platform_device_unregister (pldev); - } - } -} - -static enum depca_type __init depca_shmem_probe (ulong *mem_start) -{ - u_long mem_base[] = DEPCA_RAM_BASE_ADDRESSES; - enum depca_type adapter = unknown; - int i; - - for (i = 0; mem_base[i]; i++) { - *mem_start = mem ? mem : mem_base[i]; - adapter = DepcaSignature (adapter_name, *mem_start); - if (adapter != unknown) - break; - } - - return adapter; -} - -static int __devinit depca_isa_probe (struct platform_device *device) -{ - struct net_device *dev; - struct depca_private *lp; - u_long ioaddr, mem_start = 0; - enum depca_type adapter = unknown; - int status = 0; - - ioaddr = (u_long) device->dev.platform_data; - - if ((status = depca_common_init (ioaddr, &dev))) - goto out; - - adapter = depca_shmem_probe (&mem_start); - - if (adapter == unknown) { - status = -ENODEV; - goto out_free; - } - - dev->base_addr = ioaddr; - dev->irq = irq; /* Use whatever value the user gave - * us, and 0 if he didn't. */ - lp = netdev_priv(dev); - lp->depca_bus = DEPCA_BUS_ISA; - lp->adapter = adapter; - lp->mem_start = mem_start; - - if ((status = depca_hw_init(dev, &device->dev))) - goto out_free; - - return 0; - - out_free: - free_netdev (dev); - release_region (ioaddr, DEPCA_TOTAL_SIZE); - out: - return status; -} - -/* -** EISA callbacks from sysfs. -*/ - -#ifdef CONFIG_EISA -static int __init depca_eisa_probe (struct device *device) -{ - enum depca_type adapter = unknown; - struct eisa_device *edev; - struct net_device *dev; - struct depca_private *lp; - u_long ioaddr, mem_start; - int status = 0; - - edev = to_eisa_device (device); - ioaddr = edev->base_addr + DEPCA_EISA_IO_PORTS; - - if ((status = depca_common_init (ioaddr, &dev))) - goto out; - - /* It would have been nice to get card configuration from the - * card. Unfortunately, this register is write-only (shares - * it's address with the ethernet prom)... As we don't parse - * the EISA configuration structures (yet... :-), just rely on - * the ISA probing to sort it out... */ - - adapter = depca_shmem_probe (&mem_start); - if (adapter == unknown) { - status = -ENODEV; - goto out_free; - } - - dev->base_addr = ioaddr; - dev->irq = irq; - lp = netdev_priv(dev); - lp->depca_bus = DEPCA_BUS_EISA; - lp->adapter = edev->id.driver_data; - lp->mem_start = mem_start; - - if ((status = depca_hw_init(dev, device))) - goto out_free; - - return 0; - - out_free: - free_netdev (dev); - release_region (ioaddr, DEPCA_TOTAL_SIZE); - out: - return status; -} -#endif - -static int __devexit depca_device_remove (struct device *device) -{ - struct net_device *dev; - struct depca_private *lp; - int bus; - - dev = dev_get_drvdata(device); - lp = netdev_priv(dev); - - unregister_netdev (dev); - iounmap (lp->sh_mem); - release_mem_region (lp->mem_start, lp->mem_len); - release_region (dev->base_addr, DEPCA_TOTAL_SIZE); - bus = lp->depca_bus; - free_netdev (dev); - - return 0; -} - -/* -** Look for a particular board name in the on-board Remote Diagnostics -** and Boot (readb) ROM. This will also give us a clue to the network RAM -** base address. -*/ -static int __init DepcaSignature(char *name, u_long base_addr) -{ - u_int i, j, k; - void __iomem *ptr; - char tmpstr[16]; - u_long prom_addr = base_addr + 0xc000; - u_long mem_addr = base_addr + 0x8000; /* 32KB */ - - /* Can't reserve the prom region, it is already marked as - * used, at least on x86. Instead, reserve a memory region a - * board would certainly use. If it works, go ahead. If not, - * run like hell... */ - - if (!request_mem_region (mem_addr, 16, depca_string)) - return unknown; - - /* Copy the first 16 bytes of ROM */ - - ptr = ioremap(prom_addr, 16); - if (ptr == NULL) { - printk(KERN_ERR "depca: I/O remap failed at %lx\n", prom_addr); - return unknown; - } - for (i = 0; i < 16; i++) { - tmpstr[i] = readb(ptr + i); - } - iounmap(ptr); - - release_mem_region (mem_addr, 16); - - /* Check if PROM contains a valid string */ - for (i = 0; *depca_signature[i] != '\0'; i++) { - for (j = 0, k = 0; j < 16 && k < strlen(depca_signature[i]); j++) { - if (depca_signature[i][k] == tmpstr[j]) { /* track signature */ - k++; - } else { /* lost signature; begin search again */ - k = 0; - } - } - if (k == strlen(depca_signature[i])) - break; - } - - /* Check if name string is valid, provided there's no PROM */ - if (name && *name && (i == unknown)) { - for (i = 0; *depca_signature[i] != '\0'; i++) { - if (strcmp(name, depca_signature[i]) == 0) - break; - } - } - - return i; -} - -/* -** Look for a special sequence in the Ethernet station address PROM that -** is common across all DEPCA products. Note that the original DEPCA needs -** its ROM address counter to be initialized and enabled. Only enable -** if the first address octet is a 0x08 - this minimises the chances of -** messing around with some other hardware, but it assumes that this DEPCA -** card initialized itself correctly. -** -** Search the Ethernet address ROM for the signature. Since the ROM address -** counter can start at an arbitrary point, the search must include the entire -** probe sequence length plus the (length_of_the_signature - 1). -** Stop the search IMMEDIATELY after the signature is found so that the -** PROM address counter is correctly positioned at the start of the -** ethernet address for later read out. -*/ -static int __init DevicePresent(u_long ioaddr) -{ - union { - struct { - u32 a; - u32 b; - } llsig; - char Sig[sizeof(u32) << 1]; - } - dev; - short sigLength = 0; - s8 data; - s16 nicsr; - int i, j, status = 0; - - data = inb(DEPCA_PROM); /* clear counter on DEPCA */ - data = inb(DEPCA_PROM); /* read data */ - - if (data == 0x08) { /* Enable counter on DEPCA */ - nicsr = inb(DEPCA_NICSR); - nicsr |= AAC; - outb(nicsr, DEPCA_NICSR); - } - - dev.llsig.a = ETH_PROM_SIG; - dev.llsig.b = ETH_PROM_SIG; - sigLength = sizeof(u32) << 1; - - for (i = 0, j = 0; j < sigLength && i < PROBE_LENGTH + sigLength - 1; i++) { - data = inb(DEPCA_PROM); - if (dev.Sig[j] == data) { /* track signature */ - j++; - } else { /* lost signature; begin search again */ - if (data == dev.Sig[0]) { /* rare case.... */ - j = 1; - } else { - j = 0; - } - } - } - - if (j != sigLength) { - status = -ENODEV; /* search failed */ - } - - return status; -} - -/* -** The DE100 and DE101 PROM accesses were made non-standard for some bizarre -** reason: access the upper half of the PROM with x=0; access the lower half -** with x=1. -*/ -static int __init get_hw_addr(struct net_device *dev) -{ - u_long ioaddr = dev->base_addr; - struct depca_private *lp = netdev_priv(dev); - int i, k, tmp, status = 0; - u_short j, x, chksum; - - x = (((lp->adapter == de100) || (lp->adapter == de101)) ? 1 : 0); - - for (i = 0, k = 0, j = 0; j < 3; j++) { - k <<= 1; - if (k > 0xffff) - k -= 0xffff; - - k += (u_char) (tmp = inb(DEPCA_PROM + x)); - dev->dev_addr[i++] = (u_char) tmp; - k += (u_short) ((tmp = inb(DEPCA_PROM + x)) << 8); - dev->dev_addr[i++] = (u_char) tmp; - - if (k > 0xffff) - k -= 0xffff; - } - if (k == 0xffff) - k = 0; - - chksum = (u_char) inb(DEPCA_PROM + x); - chksum |= (u_short) (inb(DEPCA_PROM + x) << 8); - if (k != chksum) - status = -1; - - return status; -} - -/* -** Load a packet into the shared memory -*/ -static int load_packet(struct net_device *dev, struct sk_buff *skb) -{ - struct depca_private *lp = netdev_priv(dev); - int i, entry, end, len, status = NETDEV_TX_OK; - - entry = lp->tx_new; /* Ring around buffer number. */ - end = (entry + (skb->len - 1) / TX_BUFF_SZ) & lp->txRingMask; - if (!(readl(&lp->tx_ring[end].base) & T_OWN)) { /* Enough room? */ - /* - ** Caution: the write order is important here... don't set up the - ** ownership rights until all the other information is in place. - */ - if (end < entry) { /* wrapped buffer */ - len = (lp->txRingMask - entry + 1) * TX_BUFF_SZ; - memcpy_toio(lp->tx_buff[entry], skb->data, len); - memcpy_toio(lp->tx_buff[0], skb->data + len, skb->len - len); - } else { /* linear buffer */ - memcpy_toio(lp->tx_buff[entry], skb->data, skb->len); - } - - /* set up the buffer descriptors */ - len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len; - for (i = entry; i != end; i = (i+1) & lp->txRingMask) { - /* clean out flags */ - writel(readl(&lp->tx_ring[i].base) & ~T_FLAGS, &lp->tx_ring[i].base); - writew(0x0000, &lp->tx_ring[i].misc); /* clears other error flags */ - writew(-TX_BUFF_SZ, &lp->tx_ring[i].length); /* packet length in buffer */ - len -= TX_BUFF_SZ; - } - /* clean out flags */ - writel(readl(&lp->tx_ring[end].base) & ~T_FLAGS, &lp->tx_ring[end].base); - writew(0x0000, &lp->tx_ring[end].misc); /* clears other error flags */ - writew(-len, &lp->tx_ring[end].length); /* packet length in last buff */ - - /* start of packet */ - writel(readl(&lp->tx_ring[entry].base) | T_STP, &lp->tx_ring[entry].base); - /* end of packet */ - writel(readl(&lp->tx_ring[end].base) | T_ENP, &lp->tx_ring[end].base); - - for (i = end; i != entry; --i) { - /* ownership of packet */ - writel(readl(&lp->tx_ring[i].base) | T_OWN, &lp->tx_ring[i].base); - if (i == 0) - i = lp->txRingMask + 1; - } - writel(readl(&lp->tx_ring[entry].base) | T_OWN, &lp->tx_ring[entry].base); - - lp->tx_new = (++end) & lp->txRingMask; /* update current pointers */ - } else { - status = NETDEV_TX_LOCKED; - } - - return status; -} - -static void depca_dbg_open(struct net_device *dev) -{ - struct depca_private *lp = netdev_priv(dev); - u_long ioaddr = dev->base_addr; - struct depca_init *p = &lp->init_block; - int i; - - if (depca_debug > 1) { - /* Do not copy the shadow init block into shared memory */ - /* Debugging should not affect normal operation! */ - /* The shadow init block will get copied across during InitRestartDepca */ - printk("%s: depca open with irq %d\n", dev->name, dev->irq); - printk("Descriptor head addresses (CPU):\n"); - printk(" 0x%lx 0x%lx\n", (u_long) lp->rx_ring, (u_long) lp->tx_ring); - printk("Descriptor addresses (CPU):\nRX: "); - for (i = 0; i < lp->rxRingMask; i++) { - if (i < 3) { - printk("%p ", &lp->rx_ring[i].base); - } - } - printk("...%p\n", &lp->rx_ring[i].base); - printk("TX: "); - for (i = 0; i < lp->txRingMask; i++) { - if (i < 3) { - printk("%p ", &lp->tx_ring[i].base); - } - } - printk("...%p\n", &lp->tx_ring[i].base); - printk("\nDescriptor buffers (Device):\nRX: "); - for (i = 0; i < lp->rxRingMask; i++) { - if (i < 3) { - printk("0x%8.8x ", readl(&lp->rx_ring[i].base)); - } - } - printk("...0x%8.8x\n", readl(&lp->rx_ring[i].base)); - printk("TX: "); - for (i = 0; i < lp->txRingMask; i++) { - if (i < 3) { - printk("0x%8.8x ", readl(&lp->tx_ring[i].base)); - } - } - printk("...0x%8.8x\n", readl(&lp->tx_ring[i].base)); - printk("Initialisation block at 0x%8.8lx(Phys)\n", lp->mem_start); - printk(" mode: 0x%4.4x\n", p->mode); - printk(" physical address: %pM\n", p->phys_addr); - printk(" multicast hash table: "); - for (i = 0; i < (HASH_TABLE_LEN >> 3) - 1; i++) { - printk("%2.2x:", p->mcast_table[i]); - } - printk("%2.2x\n", p->mcast_table[i]); - printk(" rx_ring at: 0x%8.8x\n", p->rx_ring); - printk(" tx_ring at: 0x%8.8x\n", p->tx_ring); - printk("buffers (Phys): 0x%8.8lx\n", lp->mem_start + lp->buffs_offset); - printk("Ring size:\nRX: %d Log2(rxRingMask): 0x%8.8x\n", (int) lp->rxRingMask + 1, lp->rx_rlen); - printk("TX: %d Log2(txRingMask): 0x%8.8x\n", (int) lp->txRingMask + 1, lp->tx_rlen); - outw(CSR2, DEPCA_ADDR); - printk("CSR2&1: 0x%4.4x", inw(DEPCA_DATA)); - outw(CSR1, DEPCA_ADDR); - printk("%4.4x\n", inw(DEPCA_DATA)); - outw(CSR3, DEPCA_ADDR); - printk("CSR3: 0x%4.4x\n", inw(DEPCA_DATA)); - } -} - -/* -** Perform IOCTL call functions here. Some are privileged operations and the -** effective uid is checked in those cases. -** All multicast IOCTLs will not work here and are for testing purposes only. -*/ -static int depca_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -{ - struct depca_private *lp = netdev_priv(dev); - struct depca_ioctl *ioc = (struct depca_ioctl *) &rq->ifr_ifru; - int i, status = 0; - u_long ioaddr = dev->base_addr; - union { - u8 addr[(HASH_TABLE_LEN * ETH_ALEN)]; - u16 sval[(HASH_TABLE_LEN * ETH_ALEN) >> 1]; - u32 lval[(HASH_TABLE_LEN * ETH_ALEN) >> 2]; - } tmp; - unsigned long flags; - void *buf; - - switch (ioc->cmd) { - case DEPCA_GET_HWADDR: /* Get the hardware address */ - for (i = 0; i < ETH_ALEN; i++) { - tmp.addr[i] = dev->dev_addr[i]; - } - ioc->len = ETH_ALEN; - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) - return -EFAULT; - break; - - case DEPCA_SET_HWADDR: /* Set the hardware address */ - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN)) - return -EFAULT; - for (i = 0; i < ETH_ALEN; i++) { - dev->dev_addr[i] = tmp.addr[i]; - } - netif_stop_queue(dev); - while (lp->tx_old != lp->tx_new) - cpu_relax(); /* Wait for the ring to empty */ - - STOP_DEPCA; /* Temporarily stop the depca. */ - depca_init_ring(dev); /* Initialize the descriptor rings */ - LoadCSRs(dev); /* Reload CSR3 */ - InitRestartDepca(dev); /* Resume normal operation. */ - netif_start_queue(dev); /* Unlock the TX ring */ - break; - - case DEPCA_SET_PROM: /* Set Promiscuous Mode */ - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - netif_stop_queue(dev); - while (lp->tx_old != lp->tx_new) - cpu_relax(); /* Wait for the ring to empty */ - - STOP_DEPCA; /* Temporarily stop the depca. */ - depca_init_ring(dev); /* Initialize the descriptor rings */ - lp->init_block.mode |= PROM; /* Set promiscuous mode */ - - LoadCSRs(dev); /* Reload CSR3 */ - InitRestartDepca(dev); /* Resume normal operation. */ - netif_start_queue(dev); /* Unlock the TX ring */ - break; - - case DEPCA_CLR_PROM: /* Clear Promiscuous Mode */ - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - netif_stop_queue(dev); - while (lp->tx_old != lp->tx_new) - cpu_relax(); /* Wait for the ring to empty */ - - STOP_DEPCA; /* Temporarily stop the depca. */ - depca_init_ring(dev); /* Initialize the descriptor rings */ - lp->init_block.mode &= ~PROM; /* Clear promiscuous mode */ - - LoadCSRs(dev); /* Reload CSR3 */ - InitRestartDepca(dev); /* Resume normal operation. */ - netif_start_queue(dev); /* Unlock the TX ring */ - break; - - case DEPCA_SAY_BOO: /* Say "Boo!" to the kernel log file */ - if(!capable(CAP_NET_ADMIN)) - return -EPERM; - printk("%s: Boo!\n", dev->name); - break; - - case DEPCA_GET_MCA: /* Get the multicast address table */ - ioc->len = (HASH_TABLE_LEN >> 3); - if (copy_to_user(ioc->data, lp->init_block.mcast_table, ioc->len)) - return -EFAULT; - break; - - case DEPCA_SET_MCA: /* Set a multicast address */ - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - if (ioc->len >= HASH_TABLE_LEN) - return -EINVAL; - if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN * ioc->len)) - return -EFAULT; - set_multicast_list(dev); - break; - - case DEPCA_CLR_MCA: /* Clear all multicast addresses */ - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - set_multicast_list(dev); - break; - - case DEPCA_MCA_EN: /* Enable pass all multicast addressing */ - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - set_multicast_list(dev); - break; - - case DEPCA_GET_STATS: /* Get the driver statistics */ - ioc->len = sizeof(lp->pktStats); - buf = kmalloc(ioc->len, GFP_KERNEL); - if(!buf) - return -ENOMEM; - spin_lock_irqsave(&lp->lock, flags); - memcpy(buf, &lp->pktStats, ioc->len); - spin_unlock_irqrestore(&lp->lock, flags); - if (copy_to_user(ioc->data, buf, ioc->len)) - status = -EFAULT; - kfree(buf); - break; - - case DEPCA_CLR_STATS: /* Zero out the driver statistics */ - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - spin_lock_irqsave(&lp->lock, flags); - memset(&lp->pktStats, 0, sizeof(lp->pktStats)); - spin_unlock_irqrestore(&lp->lock, flags); - break; - - case DEPCA_GET_REG: /* Get the DEPCA Registers */ - i = 0; - tmp.sval[i++] = inw(DEPCA_NICSR); - outw(CSR0, DEPCA_ADDR); /* status register */ - tmp.sval[i++] = inw(DEPCA_DATA); - memcpy(&tmp.sval[i], &lp->init_block, sizeof(struct depca_init)); - ioc->len = i + sizeof(struct depca_init); - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) - return -EFAULT; - break; - - default: - return -EOPNOTSUPP; - } - - return status; -} - -static int __init depca_module_init (void) -{ - int err = 0; - -#ifdef CONFIG_MCA - err = mca_register_driver(&depca_mca_driver); - if (err) - goto err; -#endif -#ifdef CONFIG_EISA - err = eisa_driver_register(&depca_eisa_driver); - if (err) - goto err_mca; -#endif - err = platform_driver_register(&depca_isa_driver); - if (err) - goto err_eisa; - - depca_platform_probe(); - return 0; - -err_eisa: -#ifdef CONFIG_EISA - eisa_driver_unregister(&depca_eisa_driver); -err_mca: -#endif -#ifdef CONFIG_MCA - mca_unregister_driver(&depca_mca_driver); -err: -#endif - return err; -} - -static void __exit depca_module_exit (void) -{ - int i; -#ifdef CONFIG_MCA - mca_unregister_driver (&depca_mca_driver); -#endif -#ifdef CONFIG_EISA - eisa_driver_unregister (&depca_eisa_driver); -#endif - platform_driver_unregister (&depca_isa_driver); - - for (i = 0; depca_io_ports[i].iobase; i++) { - if (depca_io_ports[i].device) { - depca_io_ports[i].device->dev.platform_data = NULL; - platform_device_unregister (depca_io_ports[i].device); - depca_io_ports[i].device = NULL; - } - } -} - -module_init (depca_module_init); -module_exit (depca_module_exit); diff --git a/drivers/net/depca.h b/drivers/net/depca.h deleted file mode 100644 index ee42648..0000000 --- a/drivers/net/depca.h +++ /dev/null @@ -1,185 +0,0 @@ -/* - Written 1994 by David C. Davies. - - Copyright 1994 David C. Davies. This software may be used and distributed - according to the terms of the GNU General Public License, incorporated herein by - reference. -*/ - -/* -** I/O addresses. Note that the 2k buffer option is not supported in -** this driver. -*/ -#define DEPCA_NICSR ioaddr+0x00 /* Network interface CSR */ -#define DEPCA_RBI ioaddr+0x02 /* RAM buffer index (2k buffer mode) */ -#define DEPCA_DATA ioaddr+0x04 /* LANCE registers' data port */ -#define DEPCA_ADDR ioaddr+0x06 /* LANCE registers' address port */ -#define DEPCA_HBASE ioaddr+0x08 /* EISA high memory base address reg. */ -#define DEPCA_PROM ioaddr+0x0c /* Ethernet address ROM data port */ -#define DEPCA_CNFG ioaddr+0x0c /* EISA Configuration port */ -#define DEPCA_RBSA ioaddr+0x0e /* RAM buffer starting address (2k buff.) */ - -/* -** These are LANCE registers addressable through DEPCA_ADDR -*/ -#define CSR0 0 -#define CSR1 1 -#define CSR2 2 -#define CSR3 3 - -/* -** NETWORK INTERFACE CSR (NI_CSR) bit definitions -*/ - -#define TO 0x0100 /* Time Out for remote boot */ -#define SHE 0x0080 /* SHadow memory Enable */ -#define BS 0x0040 /* Bank Select */ -#define BUF 0x0020 /* BUFfer size (1->32k, 0->64k) */ -#define RBE 0x0010 /* Remote Boot Enable (1->net boot) */ -#define AAC 0x0008 /* Address ROM Address Counter (1->enable) */ -#define _128KB 0x0008 /* 128kB Network RAM (1->enable) */ -#define IM 0x0004 /* Interrupt Mask (1->mask) */ -#define IEN 0x0002 /* Interrupt tristate ENable (1->enable) */ -#define LED 0x0001 /* LED control */ - -/* -** Control and Status Register 0 (CSR0) bit definitions -*/ - -#define ERR 0x8000 /* Error summary */ -#define BABL 0x4000 /* Babble transmitter timeout error */ -#define CERR 0x2000 /* Collision Error */ -#define MISS 0x1000 /* Missed packet */ -#define MERR 0x0800 /* Memory Error */ -#define RINT 0x0400 /* Receiver Interrupt */ -#define TINT 0x0200 /* Transmit Interrupt */ -#define IDON 0x0100 /* Initialization Done */ -#define INTR 0x0080 /* Interrupt Flag */ -#define INEA 0x0040 /* Interrupt Enable */ -#define RXON 0x0020 /* Receiver on */ -#define TXON 0x0010 /* Transmitter on */ -#define TDMD 0x0008 /* Transmit Demand */ -#define STOP 0x0004 /* Stop */ -#define STRT 0x0002 /* Start */ -#define INIT 0x0001 /* Initialize */ -#define INTM 0xff00 /* Interrupt Mask */ -#define INTE 0xfff0 /* Interrupt Enable */ - -/* -** CONTROL AND STATUS REGISTER 3 (CSR3) -*/ - -#define BSWP 0x0004 /* Byte SWaP */ -#define ACON 0x0002 /* ALE control */ -#define BCON 0x0001 /* Byte CONtrol */ - -/* -** Initialization Block Mode Register -*/ - -#define PROM 0x8000 /* Promiscuous Mode */ -#define EMBA 0x0080 /* Enable Modified Back-off Algorithm */ -#define INTL 0x0040 /* Internal Loopback */ -#define DRTY 0x0020 /* Disable Retry */ -#define COLL 0x0010 /* Force Collision */ -#define DTCR 0x0008 /* Disable Transmit CRC */ -#define LOOP 0x0004 /* Loopback */ -#define DTX 0x0002 /* Disable the Transmitter */ -#define DRX 0x0001 /* Disable the Receiver */ - -/* -** Receive Message Descriptor 1 (RMD1) bit definitions. -*/ - -#define R_OWN 0x80000000 /* Owner bit 0 = host, 1 = lance */ -#define R_ERR 0x4000 /* Error Summary */ -#define R_FRAM 0x2000 /* Framing Error */ -#define R_OFLO 0x1000 /* Overflow Error */ -#define R_CRC 0x0800 /* CRC Error */ -#define R_BUFF 0x0400 /* Buffer Error */ -#define R_STP 0x0200 /* Start of Packet */ -#define R_ENP 0x0100 /* End of Packet */ - -/* -** Transmit Message Descriptor 1 (TMD1) bit definitions. -*/ - -#define T_OWN 0x80000000 /* Owner bit 0 = host, 1 = lance */ -#define T_ERR 0x4000 /* Error Summary */ -#define T_ADD_FCS 0x2000 /* More the 1 retry needed to Xmit */ -#define T_MORE 0x1000 /* >1 retry to transmit packet */ -#define T_ONE 0x0800 /* 1 try needed to transmit the packet */ -#define T_DEF 0x0400 /* Deferred */ -#define T_STP 0x02000000 /* Start of Packet */ -#define T_ENP 0x01000000 /* End of Packet */ -#define T_FLAGS 0xff000000 /* TX Flags Field */ - -/* -** Transmit Message Descriptor 3 (TMD3) bit definitions. -*/ - -#define TMD3_BUFF 0x8000 /* BUFFer error */ -#define TMD3_UFLO 0x4000 /* UnderFLOw error */ -#define TMD3_RES 0x2000 /* REServed */ -#define TMD3_LCOL 0x1000 /* Late COLlision */ -#define TMD3_LCAR 0x0800 /* Loss of CARrier */ -#define TMD3_RTRY 0x0400 /* ReTRY error */ - -/* -** EISA configuration Register (CNFG) bit definitions -*/ - -#define TIMEOUT 0x0100 /* 0:2.5 mins, 1: 30 secs */ -#define REMOTE 0x0080 /* Remote Boot Enable -> 1 */ -#define IRQ11 0x0040 /* Enable -> 1 */ -#define IRQ10 0x0020 /* Enable -> 1 */ -#define IRQ9 0x0010 /* Enable -> 1 */ -#define IRQ5 0x0008 /* Enable -> 1 */ -#define BUFF 0x0004 /* 0: 64kB or 128kB, 1: 32kB */ -#define PADR16 0x0002 /* RAM on 64kB boundary */ -#define PADR17 0x0001 /* RAM on 128kB boundary */ - -/* -** Miscellaneous -*/ -#define HASH_TABLE_LEN 64 /* Bits */ -#define HASH_BITS 0x003f /* 6 LS bits */ - -#define MASK_INTERRUPTS 1 -#define UNMASK_INTERRUPTS 0 - -#define EISA_EN 0x0001 /* Enable EISA bus buffers */ -#define EISA_ID iobase+0x0080 /* ID long word for EISA card */ -#define EISA_CTRL iobase+0x0084 /* Control word for EISA card */ - -/* -** Include the IOCTL stuff -*/ -#include - -#define DEPCAIOCTL SIOCDEVPRIVATE - -struct depca_ioctl { - unsigned short cmd; /* Command to run */ - unsigned short len; /* Length of the data buffer */ - unsigned char __user *data; /* Pointer to the data buffer */ -}; - -/* -** Recognised commands for the driver -*/ -#define DEPCA_GET_HWADDR 0x01 /* Get the hardware address */ -#define DEPCA_SET_HWADDR 0x02 /* Get the hardware address */ -#define DEPCA_SET_PROM 0x03 /* Set Promiscuous Mode */ -#define DEPCA_CLR_PROM 0x04 /* Clear Promiscuous Mode */ -#define DEPCA_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */ -#define DEPCA_GET_MCA 0x06 /* Get a multicast address */ -#define DEPCA_SET_MCA 0x07 /* Set a multicast address */ -#define DEPCA_CLR_MCA 0x08 /* Clear a multicast address */ -#define DEPCA_MCA_EN 0x09 /* Enable a multicast address group */ -#define DEPCA_GET_STATS 0x0a /* Get the driver statistics */ -#define DEPCA_CLR_STATS 0x0b /* Zero out the driver statistics */ -#define DEPCA_GET_REG 0x0c /* Get the Register contents */ -#define DEPCA_SET_REG 0x0d /* Set the Register contents */ -#define DEPCA_DUMP 0x0f /* Dump the DEPCA Status */ - diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 18193ec..5e62efd 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -12,5 +12,6 @@ menuconfig ETHERNET if ETHERNET source "drivers/net/ethernet/3com/Kconfig" +source "drivers/net/ethernet/amd/Kconfig" endif # ETHERNET diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 07766ba..1bc2ac2 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -3,3 +3,4 @@ # obj-$(CONFIG_NET_VENDOR_3COM) += 3com/ +obj-$(CONFIG_NET_VENDOR_AMD) += amd/ diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c new file mode 100644 index 0000000..60b35fb --- /dev/null +++ b/drivers/net/ethernet/amd/7990.c @@ -0,0 +1,662 @@ +/* + * 7990.c -- LANCE ethernet IC generic routines. + * This is an attempt to separate out the bits of various ethernet + * drivers that are common because they all use the AMD 7990 LANCE + * (Local Area Network Controller for Ethernet) chip. + * + * Copyright (C) 05/1998 Peter Maydell + * + * Most of this stuff was obtained by looking at other LANCE drivers, + * in particular a2065.[ch]. The AMD C-LANCE datasheet was also helpful. + * NB: this was made easy by the fact that Jes Sorensen had cleaned up + * most of a2025 and sunlance with the aim of merging them, so the + * common code was pretty obvious. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +/* Used for the temporal inet entries and routing */ +#include +#include + +#include +#include +#include +#include +#ifdef CONFIG_HP300 +#include +#endif + +#include "7990.h" + +#define WRITERAP(lp,x) out_be16(lp->base + LANCE_RAP, (x)) +#define WRITERDP(lp,x) out_be16(lp->base + LANCE_RDP, (x)) +#define READRDP(lp) in_be16(lp->base + LANCE_RDP) + +#if defined(CONFIG_HPLANCE) || defined(CONFIG_HPLANCE_MODULE) +#include "hplance.h" + +#undef WRITERAP +#undef WRITERDP +#undef READRDP + +#if defined(CONFIG_MVME147_NET) || defined(CONFIG_MVME147_NET_MODULE) + +/* Lossage Factor Nine, Mr Sulu. */ +#define WRITERAP(lp,x) (lp->writerap(lp,x)) +#define WRITERDP(lp,x) (lp->writerdp(lp,x)) +#define READRDP(lp) (lp->readrdp(lp)) + +#else + +/* These inlines can be used if only CONFIG_HPLANCE is defined */ +static inline void WRITERAP(struct lance_private *lp, __u16 value) +{ + do { + out_be16(lp->base + HPLANCE_REGOFF + LANCE_RAP, value); + } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); +} + +static inline void WRITERDP(struct lance_private *lp, __u16 value) +{ + do { + out_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP, value); + } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); +} + +static inline __u16 READRDP(struct lance_private *lp) +{ + __u16 value; + do { + value = in_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP); + } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); + return value; +} + +#endif +#endif /* CONFIG_HPLANCE || CONFIG_HPLANCE_MODULE */ + +/* debugging output macros, various flavours */ +/* #define TEST_HITS */ +#ifdef UNDEF +#define PRINT_RINGS() \ +do { \ + int t; \ + for (t=0; t < RX_RING_SIZE; t++) { \ + printk("R%d: @(%02X %04X) len %04X, mblen %04X, bits %02X\n",\ + t, ib->brx_ring[t].rmd1_hadr, ib->brx_ring[t].rmd0,\ + ib->brx_ring[t].length,\ + ib->brx_ring[t].mblength, ib->brx_ring[t].rmd1_bits);\ + }\ + for (t=0; t < TX_RING_SIZE; t++) { \ + printk("T%d: @(%02X %04X) len %04X, misc %04X, bits %02X\n",\ + t, ib->btx_ring[t].tmd1_hadr, ib->btx_ring[t].tmd0,\ + ib->btx_ring[t].length,\ + ib->btx_ring[t].misc, ib->btx_ring[t].tmd1_bits);\ + }\ +} while (0) +#else +#define PRINT_RINGS() +#endif + +/* Load the CSR registers. The LANCE has to be STOPped when we do this! */ +static void load_csrs (struct lance_private *lp) +{ + volatile struct lance_init_block *aib = lp->lance_init_block; + int leptr; + + leptr = LANCE_ADDR (aib); + + WRITERAP(lp, LE_CSR1); /* load address of init block */ + WRITERDP(lp, leptr & 0xFFFF); + WRITERAP(lp, LE_CSR2); + WRITERDP(lp, leptr >> 16); + WRITERAP(lp, LE_CSR3); + WRITERDP(lp, lp->busmaster_regval); /* set byteswap/ALEctrl/byte ctrl */ + + /* Point back to csr0 */ + WRITERAP(lp, LE_CSR0); +} + +/* #define to 0 or 1 appropriately */ +#define DEBUG_IRING 0 +/* Set up the Lance Rx and Tx rings and the init block */ +static void lance_init_ring (struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_init_block *ib = lp->init_block; + volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */ + int leptr; + int i; + + aib = lp->lance_init_block; + + lp->rx_new = lp->tx_new = 0; + lp->rx_old = lp->tx_old = 0; + + ib->mode = LE_MO_PROM; /* normal, enable Tx & Rx */ + + /* Copy the ethernet address to the lance init block + * Notice that we do a byteswap if we're big endian. + * [I think this is the right criterion; at least, sunlance, + * a2065 and atarilance do the byteswap and lance.c (PC) doesn't. + * However, the datasheet says that the BSWAP bit doesn't affect + * the init block, so surely it should be low byte first for + * everybody? Um.] + * We could define the ib->physaddr as three 16bit values and + * use (addr[1] << 8) | addr[0] & co, but this is more efficient. + */ +#ifdef __BIG_ENDIAN + ib->phys_addr [0] = dev->dev_addr [1]; + ib->phys_addr [1] = dev->dev_addr [0]; + ib->phys_addr [2] = dev->dev_addr [3]; + ib->phys_addr [3] = dev->dev_addr [2]; + ib->phys_addr [4] = dev->dev_addr [5]; + ib->phys_addr [5] = dev->dev_addr [4]; +#else + for (i=0; i<6; i++) + ib->phys_addr[i] = dev->dev_addr[i]; +#endif + + if (DEBUG_IRING) + printk ("TX rings:\n"); + + lp->tx_full = 0; + /* Setup the Tx ring entries */ + for (i = 0; i < (1<lance_log_tx_bufs); i++) { + leptr = LANCE_ADDR(&aib->tx_buf[i][0]); + ib->btx_ring [i].tmd0 = leptr; + ib->btx_ring [i].tmd1_hadr = leptr >> 16; + ib->btx_ring [i].tmd1_bits = 0; + ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */ + ib->btx_ring [i].misc = 0; + if (DEBUG_IRING) + printk ("%d: 0x%8.8x\n", i, leptr); + } + + /* Setup the Rx ring entries */ + if (DEBUG_IRING) + printk ("RX rings:\n"); + for (i = 0; i < (1<lance_log_rx_bufs); i++) { + leptr = LANCE_ADDR(&aib->rx_buf[i][0]); + + ib->brx_ring [i].rmd0 = leptr; + ib->brx_ring [i].rmd1_hadr = leptr >> 16; + ib->brx_ring [i].rmd1_bits = LE_R1_OWN; + /* 0xf000 == bits that must be one (reserved, presumably) */ + ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000; + ib->brx_ring [i].mblength = 0; + if (DEBUG_IRING) + printk ("%d: 0x%8.8x\n", i, leptr); + } + + /* Setup the initialization block */ + + /* Setup rx descriptor pointer */ + leptr = LANCE_ADDR(&aib->brx_ring); + ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16); + ib->rx_ptr = leptr; + if (DEBUG_IRING) + printk ("RX ptr: %8.8x\n", leptr); + + /* Setup tx descriptor pointer */ + leptr = LANCE_ADDR(&aib->btx_ring); + ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16); + ib->tx_ptr = leptr; + if (DEBUG_IRING) + printk ("TX ptr: %8.8x\n", leptr); + + /* Clear the multicast filter */ + ib->filter [0] = 0; + ib->filter [1] = 0; + PRINT_RINGS(); +} + +/* LANCE must be STOPped before we do this, too... */ +static int init_restart_lance (struct lance_private *lp) +{ + int i; + + WRITERAP(lp, LE_CSR0); + WRITERDP(lp, LE_C0_INIT); + + /* Need a hook here for sunlance ledma stuff */ + + /* Wait for the lance to complete initialization */ + for (i = 0; (i < 100) && !(READRDP(lp) & (LE_C0_ERR | LE_C0_IDON)); i++) + barrier(); + if ((i == 100) || (READRDP(lp) & LE_C0_ERR)) { + printk ("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, READRDP(lp)); + return -1; + } + + /* Clear IDON by writing a "1", enable interrupts and start lance */ + WRITERDP(lp, LE_C0_IDON); + WRITERDP(lp, LE_C0_INEA | LE_C0_STRT); + + return 0; +} + +static int lance_reset (struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + int status; + + /* Stop the lance */ + WRITERAP(lp, LE_CSR0); + WRITERDP(lp, LE_C0_STOP); + + load_csrs (lp); + lance_init_ring (dev); + dev->trans_start = jiffies; /* prevent tx timeout */ + status = init_restart_lance (lp); +#ifdef DEBUG_DRIVER + printk ("Lance restart=%d\n", status); +#endif + return status; +} + +static int lance_rx (struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_init_block *ib = lp->init_block; + volatile struct lance_rx_desc *rd; + unsigned char bits; +#ifdef TEST_HITS + int i; +#endif + +#ifdef TEST_HITS + printk ("["); + for (i = 0; i < RX_RING_SIZE; i++) { + if (i == lp->rx_new) + printk ("%s", + ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "_" : "X"); + else + printk ("%s", + ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "." : "1"); + } + printk ("]"); +#endif +#ifdef CONFIG_HP300 + blinken_leds(0x40, 0); +#endif + WRITERDP(lp, LE_C0_RINT | LE_C0_INEA); /* ack Rx int, reenable ints */ + for (rd = &ib->brx_ring [lp->rx_new]; /* For each Rx ring we own... */ + !((bits = rd->rmd1_bits) & LE_R1_OWN); + rd = &ib->brx_ring [lp->rx_new]) { + + /* We got an incomplete frame? */ + if ((bits & LE_R1_POK) != LE_R1_POK) { + dev->stats.rx_over_errors++; + dev->stats.rx_errors++; + continue; + } else if (bits & LE_R1_ERR) { + /* Count only the end frame as a rx error, + * not the beginning + */ + if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++; + if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++; + if (bits & LE_R1_OFL) dev->stats.rx_over_errors++; + if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++; + if (bits & LE_R1_EOP) dev->stats.rx_errors++; + } else { + int len = (rd->mblength & 0xfff) - 4; + struct sk_buff *skb = dev_alloc_skb (len+2); + + if (!skb) { + printk ("%s: Memory squeeze, deferring packet.\n", + dev->name); + dev->stats.rx_dropped++; + rd->mblength = 0; + rd->rmd1_bits = LE_R1_OWN; + lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; + return 0; + } + + skb_reserve (skb, 2); /* 16 byte align */ + skb_put (skb, len); /* make room */ + skb_copy_to_linear_data(skb, + (unsigned char *)&(ib->rx_buf [lp->rx_new][0]), + len); + skb->protocol = eth_type_trans (skb, dev); + netif_rx (skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + } + + /* Return the packet to the pool */ + rd->mblength = 0; + rd->rmd1_bits = LE_R1_OWN; + lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; + } + return 0; +} + +static int lance_tx (struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_init_block *ib = lp->init_block; + volatile struct lance_tx_desc *td; + int i, j; + int status; + +#ifdef CONFIG_HP300 + blinken_leds(0x80, 0); +#endif + /* csr0 is 2f3 */ + WRITERDP(lp, LE_C0_TINT | LE_C0_INEA); + /* csr0 is 73 */ + + j = lp->tx_old; + for (i = j; i != lp->tx_new; i = j) { + td = &ib->btx_ring [i]; + + /* If we hit a packet not owned by us, stop */ + if (td->tmd1_bits & LE_T1_OWN) + break; + + if (td->tmd1_bits & LE_T1_ERR) { + status = td->misc; + + dev->stats.tx_errors++; + if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++; + if (status & LE_T3_LCOL) dev->stats.tx_window_errors++; + + if (status & LE_T3_CLOS) { + dev->stats.tx_carrier_errors++; + if (lp->auto_select) { + lp->tpe = 1 - lp->tpe; + printk("%s: Carrier Lost, trying %s\n", + dev->name, lp->tpe?"TPE":"AUI"); + /* Stop the lance */ + WRITERAP(lp, LE_CSR0); + WRITERDP(lp, LE_C0_STOP); + lance_init_ring (dev); + load_csrs (lp); + init_restart_lance (lp); + return 0; + } + } + + /* buffer errors and underflows turn off the transmitter */ + /* Restart the adapter */ + if (status & (LE_T3_BUF|LE_T3_UFL)) { + dev->stats.tx_fifo_errors++; + + printk ("%s: Tx: ERR_BUF|ERR_UFL, restarting\n", + dev->name); + /* Stop the lance */ + WRITERAP(lp, LE_CSR0); + WRITERDP(lp, LE_C0_STOP); + lance_init_ring (dev); + load_csrs (lp); + init_restart_lance (lp); + return 0; + } + } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) { + /* + * So we don't count the packet more than once. + */ + td->tmd1_bits &= ~(LE_T1_POK); + + /* One collision before packet was sent. */ + if (td->tmd1_bits & LE_T1_EONE) + dev->stats.collisions++; + + /* More than one collision, be optimistic. */ + if (td->tmd1_bits & LE_T1_EMORE) + dev->stats.collisions += 2; + + dev->stats.tx_packets++; + } + + j = (j + 1) & lp->tx_ring_mod_mask; + } + lp->tx_old = j; + WRITERDP(lp, LE_C0_TINT | LE_C0_INEA); + return 0; +} + +static irqreturn_t +lance_interrupt (int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct lance_private *lp = netdev_priv(dev); + int csr0; + + spin_lock (&lp->devlock); + + WRITERAP(lp, LE_CSR0); /* LANCE Controller Status */ + csr0 = READRDP(lp); + + PRINT_RINGS(); + + if (!(csr0 & LE_C0_INTR)) { /* Check if any interrupt has */ + spin_unlock (&lp->devlock); + return IRQ_NONE; /* been generated by the Lance. */ + } + + /* Acknowledge all the interrupt sources ASAP */ + WRITERDP(lp, csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|LE_C0_INIT)); + + if ((csr0 & LE_C0_ERR)) { + /* Clear the error condition */ + WRITERDP(lp, LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA); + } + + if (csr0 & LE_C0_RINT) + lance_rx (dev); + + if (csr0 & LE_C0_TINT) + lance_tx (dev); + + /* Log misc errors. */ + if (csr0 & LE_C0_BABL) + dev->stats.tx_errors++; /* Tx babble. */ + if (csr0 & LE_C0_MISS) + dev->stats.rx_errors++; /* Missed a Rx frame. */ + if (csr0 & LE_C0_MERR) { + printk("%s: Bus master arbitration failure, status %4.4x.\n", + dev->name, csr0); + /* Restart the chip. */ + WRITERDP(lp, LE_C0_STRT); + } + + if (lp->tx_full && netif_queue_stopped(dev) && (TX_BUFFS_AVAIL >= 0)) { + lp->tx_full = 0; + netif_wake_queue (dev); + } + + WRITERAP(lp, LE_CSR0); + WRITERDP(lp, LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|LE_C0_IDON|LE_C0_INEA); + + spin_unlock (&lp->devlock); + return IRQ_HANDLED; +} + +int lance_open (struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + int res; + + /* Install the Interrupt handler. Or we could shunt this out to specific drivers? */ + if (request_irq(lp->irq, lance_interrupt, IRQF_SHARED, lp->name, dev)) + return -EAGAIN; + + res = lance_reset(dev); + spin_lock_init(&lp->devlock); + netif_start_queue (dev); + + return res; +} +EXPORT_SYMBOL_GPL(lance_open); + +int lance_close (struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + + netif_stop_queue (dev); + + /* Stop the LANCE */ + WRITERAP(lp, LE_CSR0); + WRITERDP(lp, LE_C0_STOP); + + free_irq(lp->irq, dev); + + return 0; +} +EXPORT_SYMBOL_GPL(lance_close); + +void lance_tx_timeout(struct net_device *dev) +{ + printk("lance_tx_timeout\n"); + lance_reset(dev); + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue (dev); +} +EXPORT_SYMBOL_GPL(lance_tx_timeout); + +int lance_start_xmit (struct sk_buff *skb, struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_init_block *ib = lp->init_block; + int entry, skblen, len; + static int outs; + unsigned long flags; + + if (!TX_BUFFS_AVAIL) + return NETDEV_TX_LOCKED; + + netif_stop_queue (dev); + + skblen = skb->len; + +#ifdef DEBUG_DRIVER + /* dump the packet */ + { + int i; + + for (i = 0; i < 64; i++) { + if ((i % 16) == 0) + printk ("\n"); + printk ("%2.2x ", skb->data [i]); + } + } +#endif + len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen; + entry = lp->tx_new & lp->tx_ring_mod_mask; + ib->btx_ring [entry].length = (-len) | 0xf000; + ib->btx_ring [entry].misc = 0; + + if (skb->len < ETH_ZLEN) + memset((void *)&ib->tx_buf[entry][0], 0, ETH_ZLEN); + skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen); + + /* Now, give the packet to the lance */ + ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN); + lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask; + + outs++; + /* Kick the lance: transmit now */ + WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD); + dev_kfree_skb (skb); + + spin_lock_irqsave (&lp->devlock, flags); + if (TX_BUFFS_AVAIL) + netif_start_queue (dev); + else + lp->tx_full = 1; + spin_unlock_irqrestore (&lp->devlock, flags); + + return NETDEV_TX_OK; +} +EXPORT_SYMBOL_GPL(lance_start_xmit); + +/* taken from the depca driver via a2065.c */ +static void lance_load_multicast (struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_init_block *ib = lp->init_block; + volatile u16 *mcast_table = (u16 *)&ib->filter; + struct netdev_hw_addr *ha; + u32 crc; + + /* set all multicast bits */ + if (dev->flags & IFF_ALLMULTI){ + ib->filter [0] = 0xffffffff; + ib->filter [1] = 0xffffffff; + return; + } + /* clear the multicast filter */ + ib->filter [0] = 0; + ib->filter [1] = 0; + + /* Add addresses */ + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(6, ha->addr); + crc = crc >> 26; + mcast_table [crc >> 4] |= 1 << (crc & 0xf); + } +} + + +void lance_set_multicast (struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_init_block *ib = lp->init_block; + int stopped; + + stopped = netif_queue_stopped(dev); + if (!stopped) + netif_stop_queue (dev); + + while (lp->tx_old != lp->tx_new) + schedule(); + + WRITERAP(lp, LE_CSR0); + WRITERDP(lp, LE_C0_STOP); + lance_init_ring (dev); + + if (dev->flags & IFF_PROMISC) { + ib->mode |= LE_MO_PROM; + } else { + ib->mode &= ~LE_MO_PROM; + lance_load_multicast (dev); + } + load_csrs (lp); + init_restart_lance (lp); + + if (!stopped) + netif_start_queue (dev); +} +EXPORT_SYMBOL_GPL(lance_set_multicast); + +#ifdef CONFIG_NET_POLL_CONTROLLER +void lance_poll(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + + spin_lock (&lp->devlock); + WRITERAP(lp, LE_CSR0); + WRITERDP(lp, LE_C0_STRT); + spin_unlock (&lp->devlock); + lance_interrupt(dev->irq, dev); +} +#endif + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/amd/7990.h b/drivers/net/ethernet/amd/7990.h new file mode 100644 index 0000000..0a5837b --- /dev/null +++ b/drivers/net/ethernet/amd/7990.h @@ -0,0 +1,254 @@ +/* + * 7990.h -- LANCE ethernet IC generic routines. + * This is an attempt to separate out the bits of various ethernet + * drivers that are common because they all use the AMD 7990 LANCE + * (Local Area Network Controller for Ethernet) chip. + * + * Copyright (C) 05/1998 Peter Maydell + * + * Most of this stuff was obtained by looking at other LANCE drivers, + * in particular a2065.[ch]. The AMD C-LANCE datasheet was also helpful. + */ + +#ifndef _7990_H +#define _7990_H + +/* The lance only has two register locations. We communicate mostly via memory. */ +#define LANCE_RDP 0 /* Register Data Port */ +#define LANCE_RAP 2 /* Register Address Port */ + +/* Transmit/receive ring definitions. + * We allow the specific drivers to override these defaults if they want to. + * NB: according to lance.c, increasing the number of buffers is a waste + * of space and reduces the chance that an upper layer will be able to + * reorder queued Tx packets based on priority. [Clearly there is a minimum + * limit too: too small and we drop rx packets and can't tx at full speed.] + * 4+4 seems to be the usual setting; the atarilance driver uses 3 and 5. + */ + +/* Blast! This won't work. The problem is that we can't specify a default + * setting because that would cause the lance_init_block struct to be + * too long (and overflow the RAM on shared-memory cards like the HP LANCE. + */ +#ifndef LANCE_LOG_TX_BUFFERS +#define LANCE_LOG_TX_BUFFERS 1 +#define LANCE_LOG_RX_BUFFERS 3 +#endif + +#define TX_RING_SIZE (1<tx_old<=lp->tx_new)?\ + lp->tx_old+lp->tx_ring_mod_mask-lp->tx_new:\ + lp->tx_old - lp->tx_new-1) + +/* The LANCE only uses 24 bit addresses. This does the obvious thing. */ +#define LANCE_ADDR(x) ((int)(x) & ~0xff000000) + +/* Now the prototypes we export */ +extern int lance_open(struct net_device *dev); +extern int lance_close (struct net_device *dev); +extern int lance_start_xmit (struct sk_buff *skb, struct net_device *dev); +extern void lance_set_multicast (struct net_device *dev); +extern void lance_tx_timeout(struct net_device *dev); +#ifdef CONFIG_NET_POLL_CONTROLLER +extern void lance_poll(struct net_device *dev); +#endif + +#endif /* ndef _7990_H */ diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig new file mode 100644 index 0000000..0513940 --- /dev/null +++ b/drivers/net/ethernet/amd/Kconfig @@ -0,0 +1,192 @@ +# +# AMD network device configuration +# + +config NET_VENDOR_AMD + bool "AMD devices" + depends on DIO || MACH_DECSTATION || MVME147 || ATARI || SUN3 || \ + SUN3X || SBUS || PCI || ZORRO || (ISA && ISA_DMA_API) || \ + (ARM && ARCH_EBSA110) || ISA || EISA || MCA || PCMCIA + ---help--- + If you have a network (Ethernet) chipset belonging to this class, + say Y. + + Note that the answer to this question does not directly affect + the kernel: saying N will just case the configurator to skip all + the questions regarding AMD chipsets. If you say Y, you will be asked + for your specific chipset/driver in the following questions. + +if NET_VENDOR_AMD + +config A2065 + tristate "A2065 support" + depends on ZORRO + select CRC32 + ---help--- + If you have a Commodore A2065 Ethernet adapter, say Y. Otherwise, + say N. + + To compile this driver as a module, choose M here: the module + will be called a2065. + +config AMD8111_ETH + tristate "AMD 8111 (new PCI LANCE) support" + depends on PCI + select CRC32 + select MII + ---help--- + If you have an AMD 8111-based PCI LANCE ethernet card, + answer Y here and read the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called amd8111e. + +config LANCE + tristate "AMD LANCE and PCnet (AT1500 and NE2100) support" + depends on ISA && ISA_DMA_API + ---help--- + If you have a network (Ethernet) card of this type, say Y and read + the Ethernet-HOWTO, available from + . Some LinkSys cards are + of this type. + + To compile this driver as a module, choose M here: the module + will be called lance. This is recommended. + +config PCNET32 + tristate "AMD PCnet32 PCI support" + depends on PCI + select CRC32 + select MII + ---help--- + If you have a PCnet32 or PCnetPCI based network (Ethernet) card, + answer Y here and read the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called pcnet32. + +config ARIADNE + tristate "Ariadne support" + depends on ZORRO + ---help--- + If you have a Village Tronic Ariadne Ethernet adapter, say Y. + Otherwise, say N. + + To compile this driver as a module, choose M here: the module + will be called ariadne. + +config ARM_AM79C961A + bool "ARM EBSA110 AM79C961A support" + depends on ARM && ARCH_EBSA110 + select CRC32 + ---help--- + If you wish to compile a kernel for the EBSA-110, then you should + always answer Y to this. + +config ATARILANCE + tristate "Atari LANCE support" + depends on ATARI + ---help--- + Say Y to include support for several Atari Ethernet adapters based + on the AMD LANCE chipset: RieblCard (with or without battery), or + PAMCard VME (also the version by Rhotron, with different addresses). + +config DECLANCE + tristate "DEC LANCE ethernet controller support" + depends on MACH_DECSTATION + select CRC32 + ---help--- + This driver is for the series of Ethernet controllers produced by + DEC (now Compaq) based on the AMD LANCE chipset, including the + DEPCA series. (This chipset is better known via the NE2100 cards.) + +config DEPCA + tristate "DEPCA, DE10x, DE200, DE201, DE202, DE422 support" + depends on (ISA || EISA || MCA) + select CRC32 + ---help--- + If you have a network (Ethernet) card of this type, say Y and read + the Ethernet-HOWTO, available from + as well as + . + + To compile this driver as a module, choose M here. The module + will be called depca. + +config HPLANCE + bool "HP on-board LANCE support" + depends on DIO + select CRC32 + ---help--- + If you want to use the builtin "LANCE" Ethernet controller on an + HP300 machine, say Y here. + +config MIPS_AU1X00_ENET + tristate "MIPS AU1000 Ethernet support" + depends on MIPS_ALCHEMY + select PHYLIB + select CRC32 + ---help--- + If you have an Alchemy Semi AU1X00 based system + say Y. Otherwise, say N. + +config MVME147_NET + tristate "MVME147 (LANCE) Ethernet support" + depends on MVME147 + select CRC32 + ---help--- + Support for the on-board Ethernet interface on the Motorola MVME147 + single-board computer. Say Y here to include the + driver for this chip in your kernel. + To compile this driver as a module, choose M here. + +config PCMCIA_NMCLAN + tristate "New Media PCMCIA support" + depends on PCMCIA + help + Say Y here if you intend to attach a New Media Ethernet or LiveWire + PCMCIA (PC-card) Ethernet card to your computer. + + To compile this driver as a module, choose M here: the module will be + called nmclan_cs. If unsure, say N. + +config NI65 + tristate "NI6510 support" + depends on ISA && ISA_DMA_API + ---help--- + If you have a network (Ethernet) card of this type, say Y and read + the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called ni65. + +config SUN3LANCE + tristate "Sun3/Sun3x on-board LANCE support" + depends on (SUN3 || SUN3X) + ---help--- + Most Sun3 and Sun3x motherboards (including the 3/50, 3/60 and 3/80) + featured an AMD LANCE 10Mbit Ethernet controller on board; say Y + here to compile in the Linux driver for this and enable Ethernet. + General Linux information on the Sun 3 and 3x series (now + discontinued) is at + . + + If you're not building a kernel for a Sun 3, say N. + +config SUNLANCE + tristate "Sun LANCE support" + depends on SBUS + select CRC32 + ---help--- + This driver supports the "le" interface present on all 32-bit Sparc + systems, on some older Ultra systems and as an Sbus option. These + cards are based on the AMD LANCE chipset, which is better known + via the NE2100 cards. + + To compile this driver as a module, choose M here: the module + will be called sunlance. + +endif # NET_VENDOR_AMD diff --git a/drivers/net/ethernet/amd/Makefile b/drivers/net/ethernet/amd/Makefile new file mode 100644 index 0000000..175caa5 --- /dev/null +++ b/drivers/net/ethernet/amd/Makefile @@ -0,0 +1,20 @@ +# +# Makefile for the AMD network device drivers. +# + +obj-$(CONFIG_A2065) += a2065.o +obj-$(CONFIG_AMD8111_ETH) += amd8111e.o +obj-$(CONFIG_ARM_AM79C961A) += am79c961a.o +obj-$(CONFIG_ARIADNE) += ariadne.o +obj-$(CONFIG_ATARILANCE) += atarilance.o +obj-$(CONFIG_DECLANCE) += declance.o +obj-$(CONFIG_DEPCA) += depca.o +obj-$(CONFIG_HPLANCE) += hplance.o 7990.o +obj-$(CONFIG_LANCE) += lance.o +obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o +obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o +obj-$(CONFIG_PCMCIA_NMCLAN) += nmclan_cs.o +obj-$(CONFIG_NI65) += ni65.o +obj-$(CONFIG_PCNET32) += pcnet32.o +obj-$(CONFIG_SUN3LANCE) += sun3lance.o +obj-$(CONFIG_SUNLANCE) += sunlance.o diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c new file mode 100644 index 0000000..e1e1b07 --- /dev/null +++ b/drivers/net/ethernet/amd/a2065.c @@ -0,0 +1,781 @@ +/* + * Amiga Linux/68k A2065 Ethernet Driver + * + * (C) Copyright 1995-2003 by Geert Uytterhoeven + * + * Fixes and tips by: + * - Janos Farkas (CHEXUM@sparta.banki.hu) + * - Jes Degn Soerensen (jds@kom.auc.dk) + * - Matt Domsch (Matt_Domsch@dell.com) + * + * ---------------------------------------------------------------------------- + * + * This program is based on + * + * ariadne.?: Amiga Linux/68k Ariadne Ethernet Driver + * (C) Copyright 1995 by Geert Uytterhoeven, + * Peter De Schrijver + * + * lance.c: An AMD LANCE ethernet driver for linux. + * Written 1993-94 by Donald Becker. + * + * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller + * Advanced Micro Devices + * Publication #16907, Rev. B, Amendment/0, May 1994 + * + * ---------------------------------------------------------------------------- + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of the Linux + * distribution for more details. + * + * ---------------------------------------------------------------------------- + * + * The A2065 is a Zorro-II board made by Commodore/Ameristar. It contains: + * + * - an Am7990 Local Area Network Controller for Ethernet (LANCE) with + * both 10BASE-2 (thin coax) and AUI (DB-15) connectors + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +/*#define DEBUG*/ +/*#define TEST_HITS*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "a2065.h" + +/* Transmit/Receive Ring Definitions */ + +#define LANCE_LOG_TX_BUFFERS (2) +#define LANCE_LOG_RX_BUFFERS (4) + +#define TX_RING_SIZE (1 << LANCE_LOG_TX_BUFFERS) +#define RX_RING_SIZE (1 << LANCE_LOG_RX_BUFFERS) + +#define TX_RING_MOD_MASK (TX_RING_SIZE - 1) +#define RX_RING_MOD_MASK (RX_RING_SIZE - 1) + +#define PKT_BUF_SIZE (1544) +#define RX_BUFF_SIZE PKT_BUF_SIZE +#define TX_BUFF_SIZE PKT_BUF_SIZE + +/* Layout of the Lance's RAM Buffer */ + +struct lance_init_block { + unsigned short mode; /* Pre-set mode (reg. 15) */ + unsigned char phys_addr[6]; /* Physical ethernet address */ + unsigned filter[2]; /* Multicast filter. */ + + /* Receive and transmit ring base, along with extra bits. */ + unsigned short rx_ptr; /* receive descriptor addr */ + unsigned short rx_len; /* receive len and high addr */ + unsigned short tx_ptr; /* transmit descriptor addr */ + unsigned short tx_len; /* transmit len and high addr */ + + /* The Tx and Rx ring entries must aligned on 8-byte boundaries. */ + struct lance_rx_desc brx_ring[RX_RING_SIZE]; + struct lance_tx_desc btx_ring[TX_RING_SIZE]; + + char rx_buf[RX_RING_SIZE][RX_BUFF_SIZE]; + char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE]; +}; + +/* Private Device Data */ + +struct lance_private { + char *name; + volatile struct lance_regs *ll; + volatile struct lance_init_block *init_block; /* Hosts view */ + volatile struct lance_init_block *lance_init_block; /* Lance view */ + + int rx_new, tx_new; + int rx_old, tx_old; + + int lance_log_rx_bufs, lance_log_tx_bufs; + int rx_ring_mod_mask, tx_ring_mod_mask; + + int tpe; /* cable-selection is TPE */ + int auto_select; /* cable-selection by carrier */ + unsigned short busmaster_regval; + +#ifdef CONFIG_SUNLANCE + struct Linux_SBus_DMA *ledma; /* if set this points to ledma and arch=4m */ + int burst_sizes; /* ledma SBus burst sizes */ +#endif + struct timer_list multicast_timer; +}; + +#define LANCE_ADDR(x) ((int)(x) & ~0xff000000) + +/* Load the CSR registers */ +static void load_csrs(struct lance_private *lp) +{ + volatile struct lance_regs *ll = lp->ll; + volatile struct lance_init_block *aib = lp->lance_init_block; + int leptr = LANCE_ADDR(aib); + + ll->rap = LE_CSR1; + ll->rdp = (leptr & 0xFFFF); + ll->rap = LE_CSR2; + ll->rdp = leptr >> 16; + ll->rap = LE_CSR3; + ll->rdp = lp->busmaster_regval; + + /* Point back to csr0 */ + ll->rap = LE_CSR0; +} + +/* Setup the Lance Rx and Tx rings */ +static void lance_init_ring(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_init_block *ib = lp->init_block; + volatile struct lance_init_block *aib = lp->lance_init_block; + /* for LANCE_ADDR computations */ + int leptr; + int i; + + /* Lock out other processes while setting up hardware */ + netif_stop_queue(dev); + lp->rx_new = lp->tx_new = 0; + lp->rx_old = lp->tx_old = 0; + + ib->mode = 0; + + /* Copy the ethernet address to the lance init block + * Note that on the sparc you need to swap the ethernet address. + */ + ib->phys_addr[0] = dev->dev_addr[1]; + ib->phys_addr[1] = dev->dev_addr[0]; + ib->phys_addr[2] = dev->dev_addr[3]; + ib->phys_addr[3] = dev->dev_addr[2]; + ib->phys_addr[4] = dev->dev_addr[5]; + ib->phys_addr[5] = dev->dev_addr[4]; + + /* Setup the Tx ring entries */ + netdev_dbg(dev, "TX rings:\n"); + for (i = 0; i <= 1 << lp->lance_log_tx_bufs; i++) { + leptr = LANCE_ADDR(&aib->tx_buf[i][0]); + ib->btx_ring[i].tmd0 = leptr; + ib->btx_ring[i].tmd1_hadr = leptr >> 16; + ib->btx_ring[i].tmd1_bits = 0; + ib->btx_ring[i].length = 0xf000; /* The ones required by tmd2 */ + ib->btx_ring[i].misc = 0; + if (i < 3) + netdev_dbg(dev, "%d: 0x%08x\n", i, leptr); + } + + /* Setup the Rx ring entries */ + netdev_dbg(dev, "RX rings:\n"); + for (i = 0; i < 1 << lp->lance_log_rx_bufs; i++) { + leptr = LANCE_ADDR(&aib->rx_buf[i][0]); + + ib->brx_ring[i].rmd0 = leptr; + ib->brx_ring[i].rmd1_hadr = leptr >> 16; + ib->brx_ring[i].rmd1_bits = LE_R1_OWN; + ib->brx_ring[i].length = -RX_BUFF_SIZE | 0xf000; + ib->brx_ring[i].mblength = 0; + if (i < 3) + netdev_dbg(dev, "%d: 0x%08x\n", i, leptr); + } + + /* Setup the initialization block */ + + /* Setup rx descriptor pointer */ + leptr = LANCE_ADDR(&aib->brx_ring); + ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16); + ib->rx_ptr = leptr; + netdev_dbg(dev, "RX ptr: %08x\n", leptr); + + /* Setup tx descriptor pointer */ + leptr = LANCE_ADDR(&aib->btx_ring); + ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16); + ib->tx_ptr = leptr; + netdev_dbg(dev, "TX ptr: %08x\n", leptr); + + /* Clear the multicast filter */ + ib->filter[0] = 0; + ib->filter[1] = 0; +} + +static int init_restart_lance(struct lance_private *lp) +{ + volatile struct lance_regs *ll = lp->ll; + int i; + + ll->rap = LE_CSR0; + ll->rdp = LE_C0_INIT; + + /* Wait for the lance to complete initialization */ + for (i = 0; (i < 100) && !(ll->rdp & (LE_C0_ERR | LE_C0_IDON)); i++) + barrier(); + if ((i == 100) || (ll->rdp & LE_C0_ERR)) { + pr_err("unopened after %d ticks, csr0=%04x\n", i, ll->rdp); + return -EIO; + } + + /* Clear IDON by writing a "1", enable interrupts and start lance */ + ll->rdp = LE_C0_IDON; + ll->rdp = LE_C0_INEA | LE_C0_STRT; + + return 0; +} + +static int lance_rx(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_init_block *ib = lp->init_block; + volatile struct lance_regs *ll = lp->ll; + volatile struct lance_rx_desc *rd; + unsigned char bits; + +#ifdef TEST_HITS + int i; + char buf[RX_RING_SIZE + 1]; + + for (i = 0; i < RX_RING_SIZE; i++) { + char r1_own = ib->brx_ring[i].rmd1_bits & LE_R1_OWN; + if (i == lp->rx_new) + buf[i] = r1_own ? '_' : 'X'; + else + buf[i] = r1_own ? '.' : '1'; + } + buf[RX_RING_SIZE] = 0; + + pr_debug("RxRing TestHits: [%s]\n", buf); +#endif + + ll->rdp = LE_C0_RINT | LE_C0_INEA; + for (rd = &ib->brx_ring[lp->rx_new]; + !((bits = rd->rmd1_bits) & LE_R1_OWN); + rd = &ib->brx_ring[lp->rx_new]) { + + /* We got an incomplete frame? */ + if ((bits & LE_R1_POK) != LE_R1_POK) { + dev->stats.rx_over_errors++; + dev->stats.rx_errors++; + continue; + } else if (bits & LE_R1_ERR) { + /* Count only the end frame as a rx error, + * not the beginning + */ + if (bits & LE_R1_BUF) + dev->stats.rx_fifo_errors++; + if (bits & LE_R1_CRC) + dev->stats.rx_crc_errors++; + if (bits & LE_R1_OFL) + dev->stats.rx_over_errors++; + if (bits & LE_R1_FRA) + dev->stats.rx_frame_errors++; + if (bits & LE_R1_EOP) + dev->stats.rx_errors++; + } else { + int len = (rd->mblength & 0xfff) - 4; + struct sk_buff *skb = dev_alloc_skb(len + 2); + + if (!skb) { + netdev_warn(dev, "Memory squeeze, deferring packet\n"); + dev->stats.rx_dropped++; + rd->mblength = 0; + rd->rmd1_bits = LE_R1_OWN; + lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; + return 0; + } + + skb_reserve(skb, 2); /* 16 byte align */ + skb_put(skb, len); /* make room */ + skb_copy_to_linear_data(skb, + (unsigned char *)&ib->rx_buf[lp->rx_new][0], + len); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + } + + /* Return the packet to the pool */ + rd->mblength = 0; + rd->rmd1_bits = LE_R1_OWN; + lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; + } + return 0; +} + +static int lance_tx(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_init_block *ib = lp->init_block; + volatile struct lance_regs *ll = lp->ll; + volatile struct lance_tx_desc *td; + int i, j; + int status; + + /* csr0 is 2f3 */ + ll->rdp = LE_C0_TINT | LE_C0_INEA; + /* csr0 is 73 */ + + j = lp->tx_old; + for (i = j; i != lp->tx_new; i = j) { + td = &ib->btx_ring[i]; + + /* If we hit a packet not owned by us, stop */ + if (td->tmd1_bits & LE_T1_OWN) + break; + + if (td->tmd1_bits & LE_T1_ERR) { + status = td->misc; + + dev->stats.tx_errors++; + if (status & LE_T3_RTY) + dev->stats.tx_aborted_errors++; + if (status & LE_T3_LCOL) + dev->stats.tx_window_errors++; + + if (status & LE_T3_CLOS) { + dev->stats.tx_carrier_errors++; + if (lp->auto_select) { + lp->tpe = 1 - lp->tpe; + netdev_err(dev, "Carrier Lost, trying %s\n", + lp->tpe ? "TPE" : "AUI"); + /* Stop the lance */ + ll->rap = LE_CSR0; + ll->rdp = LE_C0_STOP; + lance_init_ring(dev); + load_csrs(lp); + init_restart_lance(lp); + return 0; + } + } + + /* buffer errors and underflows turn off + * the transmitter, so restart the adapter + */ + if (status & (LE_T3_BUF | LE_T3_UFL)) { + dev->stats.tx_fifo_errors++; + + netdev_err(dev, "Tx: ERR_BUF|ERR_UFL, restarting\n"); + /* Stop the lance */ + ll->rap = LE_CSR0; + ll->rdp = LE_C0_STOP; + lance_init_ring(dev); + load_csrs(lp); + init_restart_lance(lp); + return 0; + } + } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) { + /* So we don't count the packet more than once. */ + td->tmd1_bits &= ~(LE_T1_POK); + + /* One collision before packet was sent. */ + if (td->tmd1_bits & LE_T1_EONE) + dev->stats.collisions++; + + /* More than one collision, be optimistic. */ + if (td->tmd1_bits & LE_T1_EMORE) + dev->stats.collisions += 2; + + dev->stats.tx_packets++; + } + + j = (j + 1) & lp->tx_ring_mod_mask; + } + lp->tx_old = j; + ll->rdp = LE_C0_TINT | LE_C0_INEA; + return 0; +} + +static int lance_tx_buffs_avail(struct lance_private *lp) +{ + if (lp->tx_old <= lp->tx_new) + return lp->tx_old + lp->tx_ring_mod_mask - lp->tx_new; + return lp->tx_old - lp->tx_new - 1; +} + +static irqreturn_t lance_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_regs *ll = lp->ll; + int csr0; + + ll->rap = LE_CSR0; /* LANCE Controller Status */ + csr0 = ll->rdp; + + if (!(csr0 & LE_C0_INTR)) /* Check if any interrupt has */ + return IRQ_NONE; /* been generated by the Lance. */ + + /* Acknowledge all the interrupt sources ASAP */ + ll->rdp = csr0 & ~(LE_C0_INEA | LE_C0_TDMD | LE_C0_STOP | LE_C0_STRT | + LE_C0_INIT); + + if (csr0 & LE_C0_ERR) { + /* Clear the error condition */ + ll->rdp = LE_C0_BABL | LE_C0_ERR | LE_C0_MISS | LE_C0_INEA; + } + + if (csr0 & LE_C0_RINT) + lance_rx(dev); + + if (csr0 & LE_C0_TINT) + lance_tx(dev); + + /* Log misc errors. */ + if (csr0 & LE_C0_BABL) + dev->stats.tx_errors++; /* Tx babble. */ + if (csr0 & LE_C0_MISS) + dev->stats.rx_errors++; /* Missed a Rx frame. */ + if (csr0 & LE_C0_MERR) { + netdev_err(dev, "Bus master arbitration failure, status %04x\n", + csr0); + /* Restart the chip. */ + ll->rdp = LE_C0_STRT; + } + + if (netif_queue_stopped(dev) && lance_tx_buffs_avail(lp) > 0) + netif_wake_queue(dev); + + ll->rap = LE_CSR0; + ll->rdp = (LE_C0_BABL | LE_C0_CERR | LE_C0_MISS | LE_C0_MERR | + LE_C0_IDON | LE_C0_INEA); + return IRQ_HANDLED; +} + +static int lance_open(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_regs *ll = lp->ll; + int ret; + + /* Stop the Lance */ + ll->rap = LE_CSR0; + ll->rdp = LE_C0_STOP; + + /* Install the Interrupt handler */ + ret = request_irq(IRQ_AMIGA_PORTS, lance_interrupt, IRQF_SHARED, + dev->name, dev); + if (ret) + return ret; + + load_csrs(lp); + lance_init_ring(dev); + + netif_start_queue(dev); + + return init_restart_lance(lp); +} + +static int lance_close(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_regs *ll = lp->ll; + + netif_stop_queue(dev); + del_timer_sync(&lp->multicast_timer); + + /* Stop the card */ + ll->rap = LE_CSR0; + ll->rdp = LE_C0_STOP; + + free_irq(IRQ_AMIGA_PORTS, dev); + return 0; +} + +static inline int lance_reset(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_regs *ll = lp->ll; + int status; + + /* Stop the lance */ + ll->rap = LE_CSR0; + ll->rdp = LE_C0_STOP; + + load_csrs(lp); + + lance_init_ring(dev); + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_start_queue(dev); + + status = init_restart_lance(lp); + netdev_dbg(dev, "Lance restart=%d\n", status); + + return status; +} + +static void lance_tx_timeout(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_regs *ll = lp->ll; + + netdev_err(dev, "transmit timed out, status %04x, reset\n", ll->rdp); + lance_reset(dev); + netif_wake_queue(dev); +} + +static netdev_tx_t lance_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_regs *ll = lp->ll; + volatile struct lance_init_block *ib = lp->init_block; + int entry, skblen; + int status = NETDEV_TX_OK; + unsigned long flags; + + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + skblen = max_t(unsigned, skb->len, ETH_ZLEN); + + local_irq_save(flags); + + if (!lance_tx_buffs_avail(lp)) { + local_irq_restore(flags); + return NETDEV_TX_LOCKED; + } + +#ifdef DEBUG + /* dump the packet */ + print_hex_dump(KERN_DEBUG, "skb->data: ", DUMP_PREFIX_NONE, + 16, 1, skb->data, 64, true); +#endif + entry = lp->tx_new & lp->tx_ring_mod_mask; + ib->btx_ring[entry].length = (-skblen) | 0xf000; + ib->btx_ring[entry].misc = 0; + + skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen); + + /* Now, give the packet to the lance */ + ib->btx_ring[entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN); + lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask; + dev->stats.tx_bytes += skblen; + + if (lance_tx_buffs_avail(lp) <= 0) + netif_stop_queue(dev); + + /* Kick the lance: transmit now */ + ll->rdp = LE_C0_INEA | LE_C0_TDMD; + dev_kfree_skb(skb); + + local_irq_restore(flags); + + return status; +} + +/* taken from the depca driver */ +static void lance_load_multicast(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_init_block *ib = lp->init_block; + volatile u16 *mcast_table = (u16 *)&ib->filter; + struct netdev_hw_addr *ha; + u32 crc; + + /* set all multicast bits */ + if (dev->flags & IFF_ALLMULTI) { + ib->filter[0] = 0xffffffff; + ib->filter[1] = 0xffffffff; + return; + } + /* clear the multicast filter */ + ib->filter[0] = 0; + ib->filter[1] = 0; + + /* Add addresses */ + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(6, ha->addr); + crc = crc >> 26; + mcast_table[crc >> 4] |= 1 << (crc & 0xf); + } +} + +static void lance_set_multicast(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_init_block *ib = lp->init_block; + volatile struct lance_regs *ll = lp->ll; + + if (!netif_running(dev)) + return; + + if (lp->tx_old != lp->tx_new) { + mod_timer(&lp->multicast_timer, jiffies + 4); + netif_wake_queue(dev); + return; + } + + netif_stop_queue(dev); + + ll->rap = LE_CSR0; + ll->rdp = LE_C0_STOP; + lance_init_ring(dev); + + if (dev->flags & IFF_PROMISC) { + ib->mode |= LE_MO_PROM; + } else { + ib->mode &= ~LE_MO_PROM; + lance_load_multicast(dev); + } + load_csrs(lp); + init_restart_lance(lp); + netif_wake_queue(dev); +} + +static int __devinit a2065_init_one(struct zorro_dev *z, + const struct zorro_device_id *ent); +static void __devexit a2065_remove_one(struct zorro_dev *z); + + +static struct zorro_device_id a2065_zorro_tbl[] __devinitdata = { + { ZORRO_PROD_CBM_A2065_1 }, + { ZORRO_PROD_CBM_A2065_2 }, + { ZORRO_PROD_AMERISTAR_A2065 }, + { 0 } +}; +MODULE_DEVICE_TABLE(zorro, a2065_zorro_tbl); + +static struct zorro_driver a2065_driver = { + .name = "a2065", + .id_table = a2065_zorro_tbl, + .probe = a2065_init_one, + .remove = __devexit_p(a2065_remove_one), +}; + +static const struct net_device_ops lance_netdev_ops = { + .ndo_open = lance_open, + .ndo_stop = lance_close, + .ndo_start_xmit = lance_start_xmit, + .ndo_tx_timeout = lance_tx_timeout, + .ndo_set_multicast_list = lance_set_multicast, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +}; + +static int __devinit a2065_init_one(struct zorro_dev *z, + const struct zorro_device_id *ent) +{ + struct net_device *dev; + struct lance_private *priv; + unsigned long board = z->resource.start; + unsigned long base_addr = board + A2065_LANCE; + unsigned long mem_start = board + A2065_RAM; + struct resource *r1, *r2; + int err; + + r1 = request_mem_region(base_addr, sizeof(struct lance_regs), + "Am7990"); + if (!r1) + return -EBUSY; + r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM"); + if (!r2) { + release_mem_region(base_addr, sizeof(struct lance_regs)); + return -EBUSY; + } + + dev = alloc_etherdev(sizeof(struct lance_private)); + if (dev == NULL) { + release_mem_region(base_addr, sizeof(struct lance_regs)); + release_mem_region(mem_start, A2065_RAM_SIZE); + return -ENOMEM; + } + + priv = netdev_priv(dev); + + r1->name = dev->name; + r2->name = dev->name; + + dev->dev_addr[0] = 0x00; + if (z->id != ZORRO_PROD_AMERISTAR_A2065) { /* Commodore */ + dev->dev_addr[1] = 0x80; + dev->dev_addr[2] = 0x10; + } else { /* Ameristar */ + dev->dev_addr[1] = 0x00; + dev->dev_addr[2] = 0x9f; + } + dev->dev_addr[3] = (z->rom.er_SerialNumber >> 16) & 0xff; + dev->dev_addr[4] = (z->rom.er_SerialNumber >> 8) & 0xff; + dev->dev_addr[5] = z->rom.er_SerialNumber & 0xff; + dev->base_addr = ZTWO_VADDR(base_addr); + dev->mem_start = ZTWO_VADDR(mem_start); + dev->mem_end = dev->mem_start + A2065_RAM_SIZE; + + priv->ll = (volatile struct lance_regs *)dev->base_addr; + priv->init_block = (struct lance_init_block *)dev->mem_start; + priv->lance_init_block = (struct lance_init_block *)A2065_RAM; + priv->auto_select = 0; + priv->busmaster_regval = LE_C3_BSWP; + + priv->lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS; + priv->lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS; + priv->rx_ring_mod_mask = RX_RING_MOD_MASK; + priv->tx_ring_mod_mask = TX_RING_MOD_MASK; + + dev->netdev_ops = &lance_netdev_ops; + dev->watchdog_timeo = 5*HZ; + dev->dma = 0; + + init_timer(&priv->multicast_timer); + priv->multicast_timer.data = (unsigned long) dev; + priv->multicast_timer.function = + (void (*)(unsigned long))lance_set_multicast; + + err = register_netdev(dev); + if (err) { + release_mem_region(base_addr, sizeof(struct lance_regs)); + release_mem_region(mem_start, A2065_RAM_SIZE); + free_netdev(dev); + return err; + } + zorro_set_drvdata(z, dev); + + netdev_info(dev, "A2065 at 0x%08lx, Ethernet Address %pM\n", + board, dev->dev_addr); + + return 0; +} + + +static void __devexit a2065_remove_one(struct zorro_dev *z) +{ + struct net_device *dev = zorro_get_drvdata(z); + + unregister_netdev(dev); + release_mem_region(ZTWO_PADDR(dev->base_addr), + sizeof(struct lance_regs)); + release_mem_region(ZTWO_PADDR(dev->mem_start), A2065_RAM_SIZE); + free_netdev(dev); +} + +static int __init a2065_init_module(void) +{ + return zorro_register_driver(&a2065_driver); +} + +static void __exit a2065_cleanup_module(void) +{ + zorro_unregister_driver(&a2065_driver); +} + +module_init(a2065_init_module); +module_exit(a2065_cleanup_module); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/amd/a2065.h b/drivers/net/ethernet/amd/a2065.h new file mode 100644 index 0000000..5117759 --- /dev/null +++ b/drivers/net/ethernet/amd/a2065.h @@ -0,0 +1,173 @@ +/* + * Amiga Linux/68k A2065 Ethernet Driver + * + * (C) Copyright 1995 by Geert Uytterhoeven + * + * --------------------------------------------------------------------------- + * + * This program is based on + * + * ariadne.?: Amiga Linux/68k Ariadne Ethernet Driver + * (C) Copyright 1995 by Geert Uytterhoeven, + * Peter De Schrijver + * + * lance.c: An AMD LANCE ethernet driver for linux. + * Written 1993-94 by Donald Becker. + * + * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller + * Advanced Micro Devices + * Publication #16907, Rev. B, Amendment/0, May 1994 + * + * --------------------------------------------------------------------------- + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of the Linux + * distribution for more details. + * + * --------------------------------------------------------------------------- + * + * The A2065 is a Zorro-II board made by Commodore/Ameristar. It contains: + * + * - an Am7990 Local Area Network Controller for Ethernet (LANCE) with + * both 10BASE-2 (thin coax) and AUI (DB-15) connectors + */ + + +/* + * Am7990 Local Area Network Controller for Ethernet (LANCE) + */ + +struct lance_regs { + unsigned short rdp; /* Register Data Port */ + unsigned short rap; /* Register Address Port */ +}; + + +/* + * Am7990 Control and Status Registers + */ + +#define LE_CSR0 0x0000 /* LANCE Controller Status */ +#define LE_CSR1 0x0001 /* IADR[15:0] */ +#define LE_CSR2 0x0002 /* IADR[23:16] */ +#define LE_CSR3 0x0003 /* Misc */ + + +/* + * Bit definitions for CSR0 (LANCE Controller Status) + */ + +#define LE_C0_ERR 0x8000 /* Error */ +#define LE_C0_BABL 0x4000 /* Babble: Transmitted too many bits */ +#define LE_C0_CERR 0x2000 /* No Heartbeat (10BASE-T) */ +#define LE_C0_MISS 0x1000 /* Missed Frame */ +#define LE_C0_MERR 0x0800 /* Memory Error */ +#define LE_C0_RINT 0x0400 /* Receive Interrupt */ +#define LE_C0_TINT 0x0200 /* Transmit Interrupt */ +#define LE_C0_IDON 0x0100 /* Initialization Done */ +#define LE_C0_INTR 0x0080 /* Interrupt Flag */ +#define LE_C0_INEA 0x0040 /* Interrupt Enable */ +#define LE_C0_RXON 0x0020 /* Receive On */ +#define LE_C0_TXON 0x0010 /* Transmit On */ +#define LE_C0_TDMD 0x0008 /* Transmit Demand */ +#define LE_C0_STOP 0x0004 /* Stop */ +#define LE_C0_STRT 0x0002 /* Start */ +#define LE_C0_INIT 0x0001 /* Initialize */ + + +/* + * Bit definitions for CSR3 + */ + +#define LE_C3_BSWP 0x0004 /* Byte Swap + (on for big endian byte order) */ +#define LE_C3_ACON 0x0002 /* ALE Control + (on for active low ALE) */ +#define LE_C3_BCON 0x0001 /* Byte Control */ + + +/* + * Mode Flags + */ + +#define LE_MO_PROM 0x8000 /* Promiscuous Mode */ +#define LE_MO_INTL 0x0040 /* Internal Loopback */ +#define LE_MO_DRTY 0x0020 /* Disable Retry */ +#define LE_MO_FCOLL 0x0010 /* Force Collision */ +#define LE_MO_DXMTFCS 0x0008 /* Disable Transmit CRC */ +#define LE_MO_LOOP 0x0004 /* Loopback Enable */ +#define LE_MO_DTX 0x0002 /* Disable Transmitter */ +#define LE_MO_DRX 0x0001 /* Disable Receiver */ + + +struct lance_rx_desc { + unsigned short rmd0; /* low address of packet */ + unsigned char rmd1_bits; /* descriptor bits */ + unsigned char rmd1_hadr; /* high address of packet */ + short length; /* This length is 2s complement (negative)! + * Buffer length + */ + unsigned short mblength; /* Aactual number of bytes received */ +}; + +struct lance_tx_desc { + unsigned short tmd0; /* low address of packet */ + unsigned char tmd1_bits; /* descriptor bits */ + unsigned char tmd1_hadr; /* high address of packet */ + short length; /* Length is 2s complement (negative)! */ + unsigned short misc; +}; + + +/* + * Receive Flags + */ + +#define LE_R1_OWN 0x80 /* LANCE owns the descriptor */ +#define LE_R1_ERR 0x40 /* Error */ +#define LE_R1_FRA 0x20 /* Framing Error */ +#define LE_R1_OFL 0x10 /* Overflow Error */ +#define LE_R1_CRC 0x08 /* CRC Error */ +#define LE_R1_BUF 0x04 /* Buffer Error */ +#define LE_R1_SOP 0x02 /* Start of Packet */ +#define LE_R1_EOP 0x01 /* End of Packet */ +#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */ + + +/* + * Transmit Flags + */ + +#define LE_T1_OWN 0x80 /* LANCE owns the descriptor */ +#define LE_T1_ERR 0x40 /* Error */ +#define LE_T1_RES 0x20 /* Reserved, + LANCE writes this with a zero */ +#define LE_T1_EMORE 0x10 /* More than one retry needed */ +#define LE_T1_EONE 0x08 /* One retry needed */ +#define LE_T1_EDEF 0x04 /* Deferred */ +#define LE_T1_SOP 0x02 /* Start of Packet */ +#define LE_T1_EOP 0x01 /* End of Packet */ +#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */ + + +/* + * Error Flags + */ + +#define LE_T3_BUF 0x8000 /* Buffer Error */ +#define LE_T3_UFL 0x4000 /* Underflow Error */ +#define LE_T3_LCOL 0x1000 /* Late Collision */ +#define LE_T3_CLOS 0x0800 /* Loss of Carrier */ +#define LE_T3_RTY 0x0400 /* Retry Error */ +#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry */ + + +/* + * A2065 Expansion Board Structure + */ + +#define A2065_LANCE 0x4000 + +#define A2065_RAM 0x8000 +#define A2065_RAM_SIZE 0x8000 + diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c new file mode 100644 index 0000000..52fe21e --- /dev/null +++ b/drivers/net/ethernet/amd/am79c961a.c @@ -0,0 +1,767 @@ +/* + * linux/drivers/net/am79c961.c + * + * by Russell King 1995-2001. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Derived from various things including skeleton.c + * + * This is a special driver for the am79c961A Lance chip used in the + * Intel (formally Digital Equipment Corp) EBSA110 platform. Please + * note that this can not be built as a module (it doesn't make sense). + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define TX_BUFFERS 15 +#define RX_BUFFERS 25 + +#include "am79c961a.h" + +static irqreturn_t +am79c961_interrupt (int irq, void *dev_id); + +static unsigned int net_debug = NET_DEBUG; + +static const char version[] = + "am79c961 ethernet driver (C) 1995-2001 Russell King v0.04\n"; + +/* --------------------------------------------------------------------------- */ + +#ifdef __arm__ +static void write_rreg(u_long base, u_int reg, u_int val) +{ + asm volatile( + "str%?h %1, [%2] @ NET_RAP\n\t" + "str%?h %0, [%2, #-4] @ NET_RDP" + : + : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464)); +} + +static inline unsigned short read_rreg(u_long base_addr, u_int reg) +{ + unsigned short v; + asm volatile( + "str%?h %1, [%2] @ NET_RAP\n\t" + "ldr%?h %0, [%2, #-4] @ NET_RDP" + : "=r" (v) + : "r" (reg), "r" (ISAIO_BASE + 0x0464)); + return v; +} + +static inline void write_ireg(u_long base, u_int reg, u_int val) +{ + asm volatile( + "str%?h %1, [%2] @ NET_RAP\n\t" + "str%?h %0, [%2, #8] @ NET_IDP" + : + : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464)); +} + +static inline unsigned short read_ireg(u_long base_addr, u_int reg) +{ + u_short v; + asm volatile( + "str%?h %1, [%2] @ NAT_RAP\n\t" + "ldr%?h %0, [%2, #8] @ NET_IDP\n\t" + : "=r" (v) + : "r" (reg), "r" (ISAIO_BASE + 0x0464)); + return v; +} + +#define am_writeword(dev,off,val) __raw_writew(val, ISAMEM_BASE + ((off) << 1)) +#define am_readword(dev,off) __raw_readw(ISAMEM_BASE + ((off) << 1)) + +static void +am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length) +{ + offset = ISAMEM_BASE + (offset << 1); + length = (length + 1) & ~1; + if ((int)buf & 2) { + asm volatile("str%?h %2, [%0], #4" + : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8))); + buf += 2; + length -= 2; + } + while (length > 8) { + register unsigned int tmp asm("r2"), tmp2 asm("r3"); + asm volatile( + "ldm%?ia %0!, {%1, %2}" + : "+r" (buf), "=&r" (tmp), "=&r" (tmp2)); + length -= 8; + asm volatile( + "str%?h %1, [%0], #4\n\t" + "mov%? %1, %1, lsr #16\n\t" + "str%?h %1, [%0], #4\n\t" + "str%?h %2, [%0], #4\n\t" + "mov%? %2, %2, lsr #16\n\t" + "str%?h %2, [%0], #4" + : "+r" (offset), "=&r" (tmp), "=&r" (tmp2)); + } + while (length > 0) { + asm volatile("str%?h %2, [%0], #4" + : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8))); + buf += 2; + length -= 2; + } +} + +static void +am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length) +{ + offset = ISAMEM_BASE + (offset << 1); + length = (length + 1) & ~1; + if ((int)buf & 2) { + unsigned int tmp; + asm volatile( + "ldr%?h %2, [%0], #4\n\t" + "str%?b %2, [%1], #1\n\t" + "mov%? %2, %2, lsr #8\n\t" + "str%?b %2, [%1], #1" + : "=&r" (offset), "=&r" (buf), "=r" (tmp): "0" (offset), "1" (buf)); + length -= 2; + } + while (length > 8) { + register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3; + asm volatile( + "ldr%?h %2, [%0], #4\n\t" + "ldr%?h %4, [%0], #4\n\t" + "ldr%?h %3, [%0], #4\n\t" + "orr%? %2, %2, %4, lsl #16\n\t" + "ldr%?h %4, [%0], #4\n\t" + "orr%? %3, %3, %4, lsl #16\n\t" + "stm%?ia %1!, {%2, %3}" + : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2), "=r" (tmp3) + : "0" (offset), "1" (buf)); + length -= 8; + } + while (length > 0) { + unsigned int tmp; + asm volatile( + "ldr%?h %2, [%0], #4\n\t" + "str%?b %2, [%1], #1\n\t" + "mov%? %2, %2, lsr #8\n\t" + "str%?b %2, [%1], #1" + : "=&r" (offset), "=&r" (buf), "=r" (tmp) : "0" (offset), "1" (buf)); + length -= 2; + } +} +#else +#error Not compatible +#endif + +static int +am79c961_ramtest(struct net_device *dev, unsigned int val) +{ + unsigned char *buffer = kmalloc (65536, GFP_KERNEL); + int i, error = 0, errorcount = 0; + + if (!buffer) + return 0; + memset (buffer, val, 65536); + am_writebuffer(dev, 0, buffer, 65536); + memset (buffer, val ^ 255, 65536); + am_readbuffer(dev, 0, buffer, 65536); + for (i = 0; i < 65536; i++) { + if (buffer[i] != val && !error) { + printk ("%s: buffer error (%02X %02X) %05X - ", dev->name, val, buffer[i], i); + error = 1; + errorcount ++; + } else if (error && buffer[i] == val) { + printk ("%05X\n", i); + error = 0; + } + } + if (error) + printk ("10000\n"); + kfree (buffer); + return errorcount; +} + +static void am79c961_mc_hash(char *addr, u16 *hash) +{ + int idx, bit; + u32 crc; + + crc = ether_crc_le(ETH_ALEN, addr); + + idx = crc >> 30; + bit = (crc >> 26) & 15; + + hash[idx] |= 1 << bit; +} + +static unsigned int am79c961_get_rx_mode(struct net_device *dev, u16 *hash) +{ + unsigned int mode = MODE_PORT_10BT; + + if (dev->flags & IFF_PROMISC) { + mode |= MODE_PROMISC; + memset(hash, 0xff, 4 * sizeof(*hash)); + } else if (dev->flags & IFF_ALLMULTI) { + memset(hash, 0xff, 4 * sizeof(*hash)); + } else { + struct netdev_hw_addr *ha; + + memset(hash, 0, 4 * sizeof(*hash)); + + netdev_for_each_mc_addr(ha, dev) + am79c961_mc_hash(ha->addr, hash); + } + + return mode; +} + +static void +am79c961_init_for_open(struct net_device *dev) +{ + struct dev_priv *priv = netdev_priv(dev); + unsigned long flags; + unsigned char *p; + u_int hdr_addr, first_free_addr; + u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash); + int i; + + /* + * Stop the chip. + */ + spin_lock_irqsave(&priv->chip_lock, flags); + write_rreg (dev->base_addr, CSR0, CSR0_BABL|CSR0_CERR|CSR0_MISS|CSR0_MERR|CSR0_TINT|CSR0_RINT|CSR0_STOP); + spin_unlock_irqrestore(&priv->chip_lock, flags); + + write_ireg (dev->base_addr, 5, 0x00a0); /* Receive address LED */ + write_ireg (dev->base_addr, 6, 0x0081); /* Collision LED */ + write_ireg (dev->base_addr, 7, 0x0090); /* XMIT LED */ + write_ireg (dev->base_addr, 2, 0x0000); /* MODE register selects media */ + + for (i = LADRL; i <= LADRH; i++) + write_rreg (dev->base_addr, i, multi_hash[i - LADRL]); + + for (i = PADRL, p = dev->dev_addr; i <= PADRH; i++, p += 2) + write_rreg (dev->base_addr, i, p[0] | (p[1] << 8)); + + write_rreg (dev->base_addr, MODE, mode); + write_rreg (dev->base_addr, POLLINT, 0); + write_rreg (dev->base_addr, SIZERXR, -RX_BUFFERS); + write_rreg (dev->base_addr, SIZETXR, -TX_BUFFERS); + + first_free_addr = RX_BUFFERS * 8 + TX_BUFFERS * 8 + 16; + hdr_addr = 0; + + priv->rxhead = 0; + priv->rxtail = 0; + priv->rxhdr = hdr_addr; + + for (i = 0; i < RX_BUFFERS; i++) { + priv->rxbuffer[i] = first_free_addr; + am_writeword (dev, hdr_addr, first_free_addr); + am_writeword (dev, hdr_addr + 2, RMD_OWN); + am_writeword (dev, hdr_addr + 4, (-1600)); + am_writeword (dev, hdr_addr + 6, 0); + first_free_addr += 1600; + hdr_addr += 8; + } + priv->txhead = 0; + priv->txtail = 0; + priv->txhdr = hdr_addr; + for (i = 0; i < TX_BUFFERS; i++) { + priv->txbuffer[i] = first_free_addr; + am_writeword (dev, hdr_addr, first_free_addr); + am_writeword (dev, hdr_addr + 2, TMD_STP|TMD_ENP); + am_writeword (dev, hdr_addr + 4, 0xf000); + am_writeword (dev, hdr_addr + 6, 0); + first_free_addr += 1600; + hdr_addr += 8; + } + + write_rreg (dev->base_addr, BASERXL, priv->rxhdr); + write_rreg (dev->base_addr, BASERXH, 0); + write_rreg (dev->base_addr, BASETXL, priv->txhdr); + write_rreg (dev->base_addr, BASERXH, 0); + write_rreg (dev->base_addr, CSR0, CSR0_STOP); + write_rreg (dev->base_addr, CSR3, CSR3_IDONM|CSR3_BABLM|CSR3_DXSUFLO); + write_rreg (dev->base_addr, CSR4, CSR4_APAD_XMIT|CSR4_MFCOM|CSR4_RCVCCOM|CSR4_TXSTRTM|CSR4_JABM); + write_rreg (dev->base_addr, CSR0, CSR0_IENA|CSR0_STRT); +} + +static void am79c961_timer(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct dev_priv *priv = netdev_priv(dev); + unsigned int lnkstat, carrier; + + lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST; + carrier = netif_carrier_ok(dev); + + if (lnkstat && !carrier) { + netif_carrier_on(dev); + printk("%s: link up\n", dev->name); + } else if (!lnkstat && carrier) { + netif_carrier_off(dev); + printk("%s: link down\n", dev->name); + } + + mod_timer(&priv->timer, jiffies + msecs_to_jiffies(500)); +} + +/* + * Open/initialize the board. + */ +static int +am79c961_open(struct net_device *dev) +{ + struct dev_priv *priv = netdev_priv(dev); + int ret; + + ret = request_irq(dev->irq, am79c961_interrupt, 0, dev->name, dev); + if (ret) + return ret; + + am79c961_init_for_open(dev); + + netif_carrier_off(dev); + + priv->timer.expires = jiffies; + add_timer(&priv->timer); + + netif_start_queue(dev); + + return 0; +} + +/* + * The inverse routine to am79c961_open(). + */ +static int +am79c961_close(struct net_device *dev) +{ + struct dev_priv *priv = netdev_priv(dev); + unsigned long flags; + + del_timer_sync(&priv->timer); + + netif_stop_queue(dev); + netif_carrier_off(dev); + + spin_lock_irqsave(&priv->chip_lock, flags); + write_rreg (dev->base_addr, CSR0, CSR0_STOP); + write_rreg (dev->base_addr, CSR3, CSR3_MASKALL); + spin_unlock_irqrestore(&priv->chip_lock, flags); + + free_irq (dev->irq, dev); + + return 0; +} + +/* + * Set or clear promiscuous/multicast mode filter for this adapter. + */ +static void am79c961_setmulticastlist (struct net_device *dev) +{ + struct dev_priv *priv = netdev_priv(dev); + unsigned long flags; + u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash); + int i, stopped; + + spin_lock_irqsave(&priv->chip_lock, flags); + + stopped = read_rreg(dev->base_addr, CSR0) & CSR0_STOP; + + if (!stopped) { + /* + * Put the chip into suspend mode + */ + write_rreg(dev->base_addr, CTRL1, CTRL1_SPND); + + /* + * Spin waiting for chip to report suspend mode + */ + while ((read_rreg(dev->base_addr, CTRL1) & CTRL1_SPND) == 0) { + spin_unlock_irqrestore(&priv->chip_lock, flags); + nop(); + spin_lock_irqsave(&priv->chip_lock, flags); + } + } + + /* + * Update the multicast hash table + */ + for (i = 0; i < ARRAY_SIZE(multi_hash); i++) + write_rreg(dev->base_addr, i + LADRL, multi_hash[i]); + + /* + * Write the mode register + */ + write_rreg(dev->base_addr, MODE, mode); + + if (!stopped) { + /* + * Put the chip back into running mode + */ + write_rreg(dev->base_addr, CTRL1, 0); + } + + spin_unlock_irqrestore(&priv->chip_lock, flags); +} + +static void am79c961_timeout(struct net_device *dev) +{ + printk(KERN_WARNING "%s: transmit timed out, network cable problem?\n", + dev->name); + + /* + * ought to do some setup of the tx side here + */ + + netif_wake_queue(dev); +} + +/* + * Transmit a packet + */ +static int +am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev) +{ + struct dev_priv *priv = netdev_priv(dev); + unsigned int hdraddr, bufaddr; + unsigned int head; + unsigned long flags; + + head = priv->txhead; + hdraddr = priv->txhdr + (head << 3); + bufaddr = priv->txbuffer[head]; + head += 1; + if (head >= TX_BUFFERS) + head = 0; + + am_writebuffer (dev, bufaddr, skb->data, skb->len); + am_writeword (dev, hdraddr + 4, -skb->len); + am_writeword (dev, hdraddr + 2, TMD_OWN|TMD_STP|TMD_ENP); + priv->txhead = head; + + spin_lock_irqsave(&priv->chip_lock, flags); + write_rreg (dev->base_addr, CSR0, CSR0_TDMD|CSR0_IENA); + spin_unlock_irqrestore(&priv->chip_lock, flags); + + /* + * If the next packet is owned by the ethernet device, + * then the tx ring is full and we can't add another + * packet. + */ + if (am_readword(dev, priv->txhdr + (priv->txhead << 3) + 2) & TMD_OWN) + netif_stop_queue(dev); + + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +/* + * If we have a good packet(s), get it/them out of the buffers. + */ +static void +am79c961_rx(struct net_device *dev, struct dev_priv *priv) +{ + do { + struct sk_buff *skb; + u_int hdraddr; + u_int pktaddr; + u_int status; + int len; + + hdraddr = priv->rxhdr + (priv->rxtail << 3); + pktaddr = priv->rxbuffer[priv->rxtail]; + + status = am_readword (dev, hdraddr + 2); + if (status & RMD_OWN) /* do we own it? */ + break; + + priv->rxtail ++; + if (priv->rxtail >= RX_BUFFERS) + priv->rxtail = 0; + + if ((status & (RMD_ERR|RMD_STP|RMD_ENP)) != (RMD_STP|RMD_ENP)) { + am_writeword (dev, hdraddr + 2, RMD_OWN); + dev->stats.rx_errors++; + if (status & RMD_ERR) { + if (status & RMD_FRAM) + dev->stats.rx_frame_errors++; + if (status & RMD_CRC) + dev->stats.rx_crc_errors++; + } else if (status & RMD_STP) + dev->stats.rx_length_errors++; + continue; + } + + len = am_readword(dev, hdraddr + 6); + skb = dev_alloc_skb(len + 2); + + if (skb) { + skb_reserve(skb, 2); + + am_readbuffer(dev, pktaddr, skb_put(skb, len), len); + am_writeword(dev, hdraddr + 2, RMD_OWN); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_bytes += len; + dev->stats.rx_packets++; + } else { + am_writeword (dev, hdraddr + 2, RMD_OWN); + printk (KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name); + dev->stats.rx_dropped++; + break; + } + } while (1); +} + +/* + * Update stats for the transmitted packet + */ +static void +am79c961_tx(struct net_device *dev, struct dev_priv *priv) +{ + do { + short len; + u_int hdraddr; + u_int status; + + hdraddr = priv->txhdr + (priv->txtail << 3); + status = am_readword (dev, hdraddr + 2); + if (status & TMD_OWN) + break; + + priv->txtail ++; + if (priv->txtail >= TX_BUFFERS) + priv->txtail = 0; + + if (status & TMD_ERR) { + u_int status2; + + dev->stats.tx_errors++; + + status2 = am_readword (dev, hdraddr + 6); + + /* + * Clear the error byte + */ + am_writeword (dev, hdraddr + 6, 0); + + if (status2 & TST_RTRY) + dev->stats.collisions += 16; + if (status2 & TST_LCOL) + dev->stats.tx_window_errors++; + if (status2 & TST_LCAR) + dev->stats.tx_carrier_errors++; + if (status2 & TST_UFLO) + dev->stats.tx_fifo_errors++; + continue; + } + dev->stats.tx_packets++; + len = am_readword (dev, hdraddr + 4); + dev->stats.tx_bytes += -len; + } while (priv->txtail != priv->txhead); + + netif_wake_queue(dev); +} + +static irqreturn_t +am79c961_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct dev_priv *priv = netdev_priv(dev); + u_int status, n = 100; + int handled = 0; + + do { + status = read_rreg(dev->base_addr, CSR0); + write_rreg(dev->base_addr, CSR0, status & + (CSR0_IENA|CSR0_TINT|CSR0_RINT| + CSR0_MERR|CSR0_MISS|CSR0_CERR|CSR0_BABL)); + + if (status & CSR0_RINT) { + handled = 1; + am79c961_rx(dev, priv); + } + if (status & CSR0_TINT) { + handled = 1; + am79c961_tx(dev, priv); + } + if (status & CSR0_MISS) { + handled = 1; + dev->stats.rx_dropped++; + } + if (status & CSR0_CERR) { + handled = 1; + mod_timer(&priv->timer, jiffies); + } + } while (--n && status & (CSR0_RINT | CSR0_TINT)); + + return IRQ_RETVAL(handled); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void am79c961_poll_controller(struct net_device *dev) +{ + unsigned long flags; + local_irq_save(flags); + am79c961_interrupt(dev->irq, dev); + local_irq_restore(flags); +} +#endif + +/* + * Initialise the chip. Note that we always expect + * to be entered with interrupts enabled. + */ +static int +am79c961_hw_init(struct net_device *dev) +{ + struct dev_priv *priv = netdev_priv(dev); + + spin_lock_irq(&priv->chip_lock); + write_rreg (dev->base_addr, CSR0, CSR0_STOP); + write_rreg (dev->base_addr, CSR3, CSR3_MASKALL); + spin_unlock_irq(&priv->chip_lock); + + am79c961_ramtest(dev, 0x66); + am79c961_ramtest(dev, 0x99); + + return 0; +} + +static void __init am79c961_banner(void) +{ + static unsigned version_printed; + + if (net_debug && version_printed++ == 0) + printk(KERN_INFO "%s", version); +} +static const struct net_device_ops am79c961_netdev_ops = { + .ndo_open = am79c961_open, + .ndo_stop = am79c961_close, + .ndo_start_xmit = am79c961_sendpacket, + .ndo_set_multicast_list = am79c961_setmulticastlist, + .ndo_tx_timeout = am79c961_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = am79c961_poll_controller, +#endif +}; + +static int __devinit am79c961_probe(struct platform_device *pdev) +{ + struct resource *res; + struct net_device *dev; + struct dev_priv *priv; + int i, ret; + + res = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (!res) + return -ENODEV; + + dev = alloc_etherdev(sizeof(struct dev_priv)); + ret = -ENOMEM; + if (!dev) + goto out; + + SET_NETDEV_DEV(dev, &pdev->dev); + + priv = netdev_priv(dev); + + /* + * Fixed address and IRQ lines here. + * The PNP initialisation should have been + * done by the ether bootp loader. + */ + dev->base_addr = res->start; + ret = platform_get_irq(pdev, 0); + + if (ret < 0) { + ret = -ENODEV; + goto nodev; + } + dev->irq = ret; + + ret = -ENODEV; + if (!request_region(dev->base_addr, 0x18, dev->name)) + goto nodev; + + /* + * Reset the device. + */ + inb(dev->base_addr + NET_RESET); + udelay(5); + + /* + * Check the manufacturer part of the + * ether address. + */ + if (inb(dev->base_addr) != 0x08 || + inb(dev->base_addr + 2) != 0x00 || + inb(dev->base_addr + 4) != 0x2b) + goto release; + + for (i = 0; i < 6; i++) + dev->dev_addr[i] = inb(dev->base_addr + i * 2) & 0xff; + + am79c961_banner(); + + spin_lock_init(&priv->chip_lock); + init_timer(&priv->timer); + priv->timer.data = (unsigned long)dev; + priv->timer.function = am79c961_timer; + + if (am79c961_hw_init(dev)) + goto release; + + dev->netdev_ops = &am79c961_netdev_ops; + + ret = register_netdev(dev); + if (ret == 0) { + printk(KERN_INFO "%s: ether address %pM\n", + dev->name, dev->dev_addr); + return 0; + } + +release: + release_region(dev->base_addr, 0x18); +nodev: + free_netdev(dev); +out: + return ret; +} + +static struct platform_driver am79c961_driver = { + .probe = am79c961_probe, + .driver = { + .name = "am79c961", + }, +}; + +static int __init am79c961_init(void) +{ + return platform_driver_register(&am79c961_driver); +} + +__initcall(am79c961_init); diff --git a/drivers/net/ethernet/amd/am79c961a.h b/drivers/net/ethernet/amd/am79c961a.h new file mode 100644 index 0000000..fd634d3 --- /dev/null +++ b/drivers/net/ethernet/amd/am79c961a.h @@ -0,0 +1,145 @@ +/* + * linux/drivers/net/arm/am79c961a.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _LINUX_am79c961a_H +#define _LINUX_am79c961a_H + +/* use 0 for production, 1 for verification, >2 for debug. debug flags: */ +#define DEBUG_TX 2 +#define DEBUG_RX 4 +#define DEBUG_INT 8 +#define DEBUG_IC 16 +#ifndef NET_DEBUG +#define NET_DEBUG 0 +#endif + +#define NET_UID 0 +#define NET_RDP 0x10 +#define NET_RAP 0x12 +#define NET_RESET 0x14 +#define NET_IDP 0x16 + +/* + * RAP registers + */ +#define CSR0 0 +#define CSR0_INIT 0x0001 +#define CSR0_STRT 0x0002 +#define CSR0_STOP 0x0004 +#define CSR0_TDMD 0x0008 +#define CSR0_TXON 0x0010 +#define CSR0_RXON 0x0020 +#define CSR0_IENA 0x0040 +#define CSR0_INTR 0x0080 +#define CSR0_IDON 0x0100 +#define CSR0_TINT 0x0200 +#define CSR0_RINT 0x0400 +#define CSR0_MERR 0x0800 +#define CSR0_MISS 0x1000 +#define CSR0_CERR 0x2000 +#define CSR0_BABL 0x4000 +#define CSR0_ERR 0x8000 + +#define CSR3 3 +#define CSR3_EMBA 0x0008 +#define CSR3_DXMT2PD 0x0010 +#define CSR3_LAPPEN 0x0020 +#define CSR3_DXSUFLO 0x0040 +#define CSR3_IDONM 0x0100 +#define CSR3_TINTM 0x0200 +#define CSR3_RINTM 0x0400 +#define CSR3_MERRM 0x0800 +#define CSR3_MISSM 0x1000 +#define CSR3_BABLM 0x4000 +#define CSR3_MASKALL 0x5F00 + +#define CSR4 4 +#define CSR4_JABM 0x0001 +#define CSR4_JAB 0x0002 +#define CSR4_TXSTRTM 0x0004 +#define CSR4_TXSTRT 0x0008 +#define CSR4_RCVCCOM 0x0010 +#define CSR4_RCVCCO 0x0020 +#define CSR4_MFCOM 0x0100 +#define CSR4_MFCO 0x0200 +#define CSR4_ASTRP_RCV 0x0400 +#define CSR4_APAD_XMIT 0x0800 + +#define CTRL1 5 +#define CTRL1_SPND 0x0001 + +#define LADRL 8 +#define LADRM1 9 +#define LADRM2 10 +#define LADRH 11 +#define PADRL 12 +#define PADRM 13 +#define PADRH 14 + +#define MODE 15 +#define MODE_DISRX 0x0001 +#define MODE_DISTX 0x0002 +#define MODE_LOOP 0x0004 +#define MODE_DTCRC 0x0008 +#define MODE_COLL 0x0010 +#define MODE_DRETRY 0x0020 +#define MODE_INTLOOP 0x0040 +#define MODE_PORT_AUI 0x0000 +#define MODE_PORT_10BT 0x0080 +#define MODE_DRXPA 0x2000 +#define MODE_DRXBA 0x4000 +#define MODE_PROMISC 0x8000 + +#define BASERXL 24 +#define BASERXH 25 +#define BASETXL 30 +#define BASETXH 31 + +#define POLLINT 47 + +#define SIZERXR 76 +#define SIZETXR 78 + +#define CSR_MFC 112 + +#define RMD_ENP 0x0100 +#define RMD_STP 0x0200 +#define RMD_CRC 0x0800 +#define RMD_FRAM 0x2000 +#define RMD_ERR 0x4000 +#define RMD_OWN 0x8000 + +#define TMD_ENP 0x0100 +#define TMD_STP 0x0200 +#define TMD_MORE 0x1000 +#define TMD_ERR 0x4000 +#define TMD_OWN 0x8000 + +#define TST_RTRY 0x0400 +#define TST_LCAR 0x0800 +#define TST_LCOL 0x1000 +#define TST_UFLO 0x4000 +#define TST_BUFF 0x8000 + +#define ISALED0 0x0004 +#define ISALED0_LNKST 0x8000 + +struct dev_priv { + unsigned long rxbuffer[RX_BUFFERS]; + unsigned long txbuffer[TX_BUFFERS]; + unsigned char txhead; + unsigned char txtail; + unsigned char rxhead; + unsigned char rxtail; + unsigned long rxhdr; + unsigned long txhdr; + spinlock_t chip_lock; + struct timer_list timer; +}; + +#endif diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c new file mode 100644 index 0000000..78002ef --- /dev/null +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -0,0 +1,1992 @@ + +/* Advanced Micro Devices Inc. AMD8111E Linux Network Driver + * Copyright (C) 2004 Advanced Micro Devices + * + * + * Copyright 2001,2002 Jeff Garzik [ 8139cp.c,tg3.c ] + * Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)[ tg3.c] + * Copyright 1996-1999 Thomas Bogendoerfer [ pcnet32.c ] + * Derived from the lance driver written 1993,1994,1995 by Donald Becker. + * Copyright 1993 United States Government as represented by the + * Director, National Security Agency.[ pcnet32.c ] + * Carsten Langgaard, carstenl@mips.com [ pcnet32.c ] + * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA + +Module Name: + + amd8111e.c + +Abstract: + + AMD8111 based 10/100 Ethernet Controller Driver. + +Environment: + + Kernel Mode + +Revision History: + 3.0.0 + Initial Revision. + 3.0.1 + 1. Dynamic interrupt coalescing. + 2. Removed prev_stats. + 3. MII support. + 4. Dynamic IPG support + 3.0.2 05/29/2003 + 1. Bug fix: Fixed failure to send jumbo packets larger than 4k. + 2. Bug fix: Fixed VLAN support failure. + 3. Bug fix: Fixed receive interrupt coalescing bug. + 4. Dynamic IPG support is disabled by default. + 3.0.3 06/05/2003 + 1. Bug fix: Fixed failure to close the interface if SMP is enabled. + 3.0.4 12/09/2003 + 1. Added set_mac_address routine for bonding driver support. + 2. Tested the driver for bonding support + 3. Bug fix: Fixed mismach in actual receive buffer lenth and lenth + indicated to the h/w. + 4. Modified amd8111e_rx() routine to receive all the received packets + in the first interrupt. + 5. Bug fix: Corrected rx_errors reported in get_stats() function. + 3.0.5 03/22/2004 + 1. Added NAPI support + +*/ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +#define AMD8111E_VLAN_TAG_USED 1 +#else +#define AMD8111E_VLAN_TAG_USED 0 +#endif + +#include "amd8111e.h" +#define MODULE_NAME "amd8111e" +#define MODULE_VERS "3.0.7" +MODULE_AUTHOR("Advanced Micro Devices, Inc."); +MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version "MODULE_VERS); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl); +module_param_array(speed_duplex, int, NULL, 0); +MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotiate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex"); +module_param_array(coalesce, bool, NULL, 0); +MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable"); +module_param_array(dynamic_ipg, bool, NULL, 0); +MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable"); + +static DEFINE_PCI_DEVICE_TABLE(amd8111e_pci_tbl) = { + + { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { 0, } + +}; +/* +This function will read the PHY registers. +*/ +static int amd8111e_read_phy(struct amd8111e_priv* lp, int phy_id, int reg, u32* val) +{ + void __iomem *mmio = lp->mmio; + unsigned int reg_val; + unsigned int repeat= REPEAT_CNT; + + reg_val = readl(mmio + PHY_ACCESS); + while (reg_val & PHY_CMD_ACTIVE) + reg_val = readl( mmio + PHY_ACCESS ); + + writel( PHY_RD_CMD | ((phy_id & 0x1f) << 21) | + ((reg & 0x1f) << 16), mmio +PHY_ACCESS); + do{ + reg_val = readl(mmio + PHY_ACCESS); + udelay(30); /* It takes 30 us to read/write data */ + } while (--repeat && (reg_val & PHY_CMD_ACTIVE)); + if(reg_val & PHY_RD_ERR) + goto err_phy_read; + + *val = reg_val & 0xffff; + return 0; +err_phy_read: + *val = 0; + return -EINVAL; + +} + +/* +This function will write into PHY registers. +*/ +static int amd8111e_write_phy(struct amd8111e_priv* lp,int phy_id, int reg, u32 val) +{ + unsigned int repeat = REPEAT_CNT; + void __iomem *mmio = lp->mmio; + unsigned int reg_val; + + reg_val = readl(mmio + PHY_ACCESS); + while (reg_val & PHY_CMD_ACTIVE) + reg_val = readl( mmio + PHY_ACCESS ); + + writel( PHY_WR_CMD | ((phy_id & 0x1f) << 21) | + ((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS); + + do{ + reg_val = readl(mmio + PHY_ACCESS); + udelay(30); /* It takes 30 us to read/write the data */ + } while (--repeat && (reg_val & PHY_CMD_ACTIVE)); + + if(reg_val & PHY_RD_ERR) + goto err_phy_write; + + return 0; + +err_phy_write: + return -EINVAL; + +} +/* +This is the mii register read function provided to the mii interface. +*/ +static int amd8111e_mdio_read(struct net_device * dev, int phy_id, int reg_num) +{ + struct amd8111e_priv* lp = netdev_priv(dev); + unsigned int reg_val; + + amd8111e_read_phy(lp,phy_id,reg_num,®_val); + return reg_val; + +} + +/* +This is the mii register write function provided to the mii interface. +*/ +static void amd8111e_mdio_write(struct net_device * dev, int phy_id, int reg_num, int val) +{ + struct amd8111e_priv* lp = netdev_priv(dev); + + amd8111e_write_phy(lp, phy_id, reg_num, val); +} + +/* +This function will set PHY speed. During initialization sets the original speed to 100 full. +*/ +static void amd8111e_set_ext_phy(struct net_device *dev) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + u32 bmcr,advert,tmp; + + /* Determine mii register values to set the speed */ + advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE); + tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4); + switch (lp->ext_phy_option){ + + default: + case SPEED_AUTONEG: /* advertise all values */ + tmp |= ( ADVERTISE_10HALF|ADVERTISE_10FULL| + ADVERTISE_100HALF|ADVERTISE_100FULL) ; + break; + case SPEED10_HALF: + tmp |= ADVERTISE_10HALF; + break; + case SPEED10_FULL: + tmp |= ADVERTISE_10FULL; + break; + case SPEED100_HALF: + tmp |= ADVERTISE_100HALF; + break; + case SPEED100_FULL: + tmp |= ADVERTISE_100FULL; + break; + } + + if(advert != tmp) + amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_ADVERTISE, tmp); + /* Restart auto negotiation */ + bmcr = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_BMCR); + bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); + amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_BMCR, bmcr); + +} + +/* +This function will unmap skb->data space and will free +all transmit and receive skbuffs. +*/ +static int amd8111e_free_skbs(struct net_device *dev) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + struct sk_buff* rx_skbuff; + int i; + + /* Freeing transmit skbs */ + for(i = 0; i < NUM_TX_BUFFERS; i++){ + if(lp->tx_skbuff[i]){ + pci_unmap_single(lp->pci_dev,lp->tx_dma_addr[i], lp->tx_skbuff[i]->len,PCI_DMA_TODEVICE); + dev_kfree_skb (lp->tx_skbuff[i]); + lp->tx_skbuff[i] = NULL; + lp->tx_dma_addr[i] = 0; + } + } + /* Freeing previously allocated receive buffers */ + for (i = 0; i < NUM_RX_BUFFERS; i++){ + rx_skbuff = lp->rx_skbuff[i]; + if(rx_skbuff != NULL){ + pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[i], + lp->rx_buff_len - 2,PCI_DMA_FROMDEVICE); + dev_kfree_skb(lp->rx_skbuff[i]); + lp->rx_skbuff[i] = NULL; + lp->rx_dma_addr[i] = 0; + } + } + + return 0; +} + +/* +This will set the receive buffer length corresponding to the mtu size of networkinterface. +*/ +static inline void amd8111e_set_rx_buff_len(struct net_device* dev) +{ + struct amd8111e_priv* lp = netdev_priv(dev); + unsigned int mtu = dev->mtu; + + if (mtu > ETH_DATA_LEN){ + /* MTU + ethernet header + FCS + + optional VLAN tag + skb reserve space 2 */ + + lp->rx_buff_len = mtu + ETH_HLEN + 10; + lp->options |= OPTION_JUMBO_ENABLE; + } else{ + lp->rx_buff_len = PKT_BUFF_SZ; + lp->options &= ~OPTION_JUMBO_ENABLE; + } +} + +/* +This function will free all the previously allocated buffers, determine new receive buffer length and will allocate new receive buffers. This function also allocates and initializes both the transmitter and receive hardware descriptors. + */ +static int amd8111e_init_ring(struct net_device *dev) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + int i; + + lp->rx_idx = lp->tx_idx = 0; + lp->tx_complete_idx = 0; + lp->tx_ring_idx = 0; + + + if(lp->opened) + /* Free previously allocated transmit and receive skbs */ + amd8111e_free_skbs(dev); + + else{ + /* allocate the tx and rx descriptors */ + if((lp->tx_ring = pci_alloc_consistent(lp->pci_dev, + sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR, + &lp->tx_ring_dma_addr)) == NULL) + + goto err_no_mem; + + if((lp->rx_ring = pci_alloc_consistent(lp->pci_dev, + sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR, + &lp->rx_ring_dma_addr)) == NULL) + + goto err_free_tx_ring; + + } + /* Set new receive buff size */ + amd8111e_set_rx_buff_len(dev); + + /* Allocating receive skbs */ + for (i = 0; i < NUM_RX_BUFFERS; i++) { + + if (!(lp->rx_skbuff[i] = dev_alloc_skb(lp->rx_buff_len))) { + /* Release previos allocated skbs */ + for(--i; i >= 0 ;i--) + dev_kfree_skb(lp->rx_skbuff[i]); + goto err_free_rx_ring; + } + skb_reserve(lp->rx_skbuff[i],2); + } + /* Initilaizing receive descriptors */ + for (i = 0; i < NUM_RX_BUFFERS; i++) { + lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, + lp->rx_skbuff[i]->data,lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); + + lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]); + lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len-2); + wmb(); + lp->rx_ring[i].rx_flags = cpu_to_le16(OWN_BIT); + } + + /* Initializing transmit descriptors */ + for (i = 0; i < NUM_TX_RING_DR; i++) { + lp->tx_ring[i].buff_phy_addr = 0; + lp->tx_ring[i].tx_flags = 0; + lp->tx_ring[i].buff_count = 0; + } + + return 0; + +err_free_rx_ring: + + pci_free_consistent(lp->pci_dev, + sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,lp->rx_ring, + lp->rx_ring_dma_addr); + +err_free_tx_ring: + + pci_free_consistent(lp->pci_dev, + sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,lp->tx_ring, + lp->tx_ring_dma_addr); + +err_no_mem: + return -ENOMEM; +} +/* This function will set the interrupt coalescing according to the input arguments */ +static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod) +{ + unsigned int timeout; + unsigned int event_count; + + struct amd8111e_priv *lp = netdev_priv(dev); + void __iomem *mmio = lp->mmio; + struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf; + + + switch(cmod) + { + case RX_INTR_COAL : + timeout = coal_conf->rx_timeout; + event_count = coal_conf->rx_event_count; + if( timeout > MAX_TIMEOUT || + event_count > MAX_EVENT_COUNT ) + return -EINVAL; + + timeout = timeout * DELAY_TIMER_CONV; + writel(VAL0|STINTEN, mmio+INTEN0); + writel((u32)DLY_INT_A_R0|( event_count<< 16 )|timeout, + mmio+DLY_INT_A); + break; + + case TX_INTR_COAL : + timeout = coal_conf->tx_timeout; + event_count = coal_conf->tx_event_count; + if( timeout > MAX_TIMEOUT || + event_count > MAX_EVENT_COUNT ) + return -EINVAL; + + + timeout = timeout * DELAY_TIMER_CONV; + writel(VAL0|STINTEN,mmio+INTEN0); + writel((u32)DLY_INT_B_T0|( event_count<< 16 )|timeout, + mmio+DLY_INT_B); + break; + + case DISABLE_COAL: + writel(0,mmio+STVAL); + writel(STINTEN, mmio+INTEN0); + writel(0, mmio +DLY_INT_B); + writel(0, mmio+DLY_INT_A); + break; + case ENABLE_COAL: + /* Start the timer */ + writel((u32)SOFT_TIMER_FREQ, mmio+STVAL); /* 0.5 sec */ + writel(VAL0|STINTEN, mmio+INTEN0); + break; + default: + break; + + } + return 0; + +} + +/* +This function initializes the device registers and starts the device. +*/ +static int amd8111e_restart(struct net_device *dev) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + void __iomem *mmio = lp->mmio; + int i,reg_val; + + /* stop the chip */ + writel(RUN, mmio + CMD0); + + if(amd8111e_init_ring(dev)) + return -ENOMEM; + + /* enable the port manager and set auto negotiation always */ + writel((u32) VAL1|EN_PMGR, mmio + CMD3 ); + writel((u32)XPHYANE|XPHYRST , mmio + CTRL2); + + amd8111e_set_ext_phy(dev); + + /* set control registers */ + reg_val = readl(mmio + CTRL1); + reg_val &= ~XMTSP_MASK; + writel( reg_val| XMTSP_128 | CACHE_ALIGN, mmio + CTRL1 ); + + /* enable interrupt */ + writel( APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN | + APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN | + SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0); + + writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0); + + /* initialize tx and rx ring base addresses */ + writel((u32)lp->tx_ring_dma_addr,mmio + XMT_RING_BASE_ADDR0); + writel((u32)lp->rx_ring_dma_addr,mmio+ RCV_RING_BASE_ADDR0); + + writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0); + writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0); + + /* set default IPG to 96 */ + writew((u32)DEFAULT_IPG,mmio+IPG); + writew((u32)(DEFAULT_IPG-IFS1_DELTA), mmio + IFS1); + + if(lp->options & OPTION_JUMBO_ENABLE){ + writel((u32)VAL2|JUMBO, mmio + CMD3); + /* Reset REX_UFLO */ + writel( REX_UFLO, mmio + CMD2); + /* Should not set REX_UFLO for jumbo frames */ + writel( VAL0 | APAD_XMT|REX_RTRY , mmio + CMD2); + }else{ + writel( VAL0 | APAD_XMT | REX_RTRY|REX_UFLO, mmio + CMD2); + writel((u32)JUMBO, mmio + CMD3); + } + +#if AMD8111E_VLAN_TAG_USED + writel((u32) VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3); +#endif + writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 ); + + /* Setting the MAC address to the device */ + for(i = 0; i < ETH_ADDR_LEN; i++) + writeb( dev->dev_addr[i], mmio + PADR + i ); + + /* Enable interrupt coalesce */ + if(lp->options & OPTION_INTR_COAL_ENABLE){ + printk(KERN_INFO "%s: Interrupt Coalescing Enabled.\n", + dev->name); + amd8111e_set_coalesce(dev,ENABLE_COAL); + } + + /* set RUN bit to start the chip */ + writel(VAL2 | RDMD0, mmio + CMD0); + writel(VAL0 | INTREN | RUN, mmio + CMD0); + + /* To avoid PCI posting bug */ + readl(mmio+CMD0); + return 0; +} +/* +This function clears necessary the device registers. +*/ +static void amd8111e_init_hw_default( struct amd8111e_priv* lp) +{ + unsigned int reg_val; + unsigned int logic_filter[2] ={0,}; + void __iomem *mmio = lp->mmio; + + + /* stop the chip */ + writel(RUN, mmio + CMD0); + + /* AUTOPOLL0 Register *//*TBD default value is 8100 in FPS */ + writew( 0x8100 | lp->ext_phy_addr, mmio + AUTOPOLL0); + + /* Clear RCV_RING_BASE_ADDR */ + writel(0, mmio + RCV_RING_BASE_ADDR0); + + /* Clear XMT_RING_BASE_ADDR */ + writel(0, mmio + XMT_RING_BASE_ADDR0); + writel(0, mmio + XMT_RING_BASE_ADDR1); + writel(0, mmio + XMT_RING_BASE_ADDR2); + writel(0, mmio + XMT_RING_BASE_ADDR3); + + /* Clear CMD0 */ + writel(CMD0_CLEAR,mmio + CMD0); + + /* Clear CMD2 */ + writel(CMD2_CLEAR, mmio +CMD2); + + /* Clear CMD7 */ + writel(CMD7_CLEAR , mmio + CMD7); + + /* Clear DLY_INT_A and DLY_INT_B */ + writel(0x0, mmio + DLY_INT_A); + writel(0x0, mmio + DLY_INT_B); + + /* Clear FLOW_CONTROL */ + writel(0x0, mmio + FLOW_CONTROL); + + /* Clear INT0 write 1 to clear register */ + reg_val = readl(mmio + INT0); + writel(reg_val, mmio + INT0); + + /* Clear STVAL */ + writel(0x0, mmio + STVAL); + + /* Clear INTEN0 */ + writel( INTEN0_CLEAR, mmio + INTEN0); + + /* Clear LADRF */ + writel(0x0 , mmio + LADRF); + + /* Set SRAM_SIZE & SRAM_BOUNDARY registers */ + writel( 0x80010,mmio + SRAM_SIZE); + + /* Clear RCV_RING0_LEN */ + writel(0x0, mmio + RCV_RING_LEN0); + + /* Clear XMT_RING0/1/2/3_LEN */ + writel(0x0, mmio + XMT_RING_LEN0); + writel(0x0, mmio + XMT_RING_LEN1); + writel(0x0, mmio + XMT_RING_LEN2); + writel(0x0, mmio + XMT_RING_LEN3); + + /* Clear XMT_RING_LIMIT */ + writel(0x0, mmio + XMT_RING_LIMIT); + + /* Clear MIB */ + writew(MIB_CLEAR, mmio + MIB_ADDR); + + /* Clear LARF */ + amd8111e_writeq(*(u64*)logic_filter,mmio+LADRF); + + /* SRAM_SIZE register */ + reg_val = readl(mmio + SRAM_SIZE); + + if(lp->options & OPTION_JUMBO_ENABLE) + writel( VAL2|JUMBO, mmio + CMD3); +#if AMD8111E_VLAN_TAG_USED + writel(VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3 ); +#endif + /* Set default value to CTRL1 Register */ + writel(CTRL1_DEFAULT, mmio + CTRL1); + + /* To avoid PCI posting bug */ + readl(mmio + CMD2); + +} + +/* +This function disables the interrupt and clears all the pending +interrupts in INT0 + */ +static void amd8111e_disable_interrupt(struct amd8111e_priv* lp) +{ + u32 intr0; + + /* Disable interrupt */ + writel(INTREN, lp->mmio + CMD0); + + /* Clear INT0 */ + intr0 = readl(lp->mmio + INT0); + writel(intr0, lp->mmio + INT0); + + /* To avoid PCI posting bug */ + readl(lp->mmio + INT0); + +} + +/* +This function stops the chip. +*/ +static void amd8111e_stop_chip(struct amd8111e_priv* lp) +{ + writel(RUN, lp->mmio + CMD0); + + /* To avoid PCI posting bug */ + readl(lp->mmio + CMD0); +} + +/* +This function frees the transmiter and receiver descriptor rings. +*/ +static void amd8111e_free_ring(struct amd8111e_priv* lp) +{ + /* Free transmit and receive descriptor rings */ + if(lp->rx_ring){ + pci_free_consistent(lp->pci_dev, + sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR, + lp->rx_ring, lp->rx_ring_dma_addr); + lp->rx_ring = NULL; + } + + if(lp->tx_ring){ + pci_free_consistent(lp->pci_dev, + sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR, + lp->tx_ring, lp->tx_ring_dma_addr); + + lp->tx_ring = NULL; + } + +} + +/* +This function will free all the transmit skbs that are actually transmitted by the device. It will check the ownership of the skb before freeing the skb. +*/ +static int amd8111e_tx(struct net_device *dev) +{ + struct amd8111e_priv* lp = netdev_priv(dev); + int tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK; + int status; + /* Complete all the transmit packet */ + while (lp->tx_complete_idx != lp->tx_idx){ + tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK; + status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags); + + if(status & OWN_BIT) + break; /* It still hasn't been Txed */ + + lp->tx_ring[tx_index].buff_phy_addr = 0; + + /* We must free the original skb */ + if (lp->tx_skbuff[tx_index]) { + pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index], + lp->tx_skbuff[tx_index]->len, + PCI_DMA_TODEVICE); + dev_kfree_skb_irq (lp->tx_skbuff[tx_index]); + lp->tx_skbuff[tx_index] = NULL; + lp->tx_dma_addr[tx_index] = 0; + } + lp->tx_complete_idx++; + /*COAL update tx coalescing parameters */ + lp->coal_conf.tx_packets++; + lp->coal_conf.tx_bytes += + le16_to_cpu(lp->tx_ring[tx_index].buff_count); + + if (netif_queue_stopped(dev) && + lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){ + /* The ring is no longer full, clear tbusy. */ + /* lp->tx_full = 0; */ + netif_wake_queue (dev); + } + } + return 0; +} + +/* This function handles the driver receive operation in polling mode */ +static int amd8111e_rx_poll(struct napi_struct *napi, int budget) +{ + struct amd8111e_priv *lp = container_of(napi, struct amd8111e_priv, napi); + struct net_device *dev = lp->amd8111e_net_dev; + int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK; + void __iomem *mmio = lp->mmio; + struct sk_buff *skb,*new_skb; + int min_pkt_len, status; + unsigned int intr0; + int num_rx_pkt = 0; + short pkt_len; +#if AMD8111E_VLAN_TAG_USED + short vtag; +#endif + int rx_pkt_limit = budget; + unsigned long flags; + + do{ + /* process receive packets until we use the quota*/ + /* If we own the next entry, it's a new packet. Send it up. */ + while(1) { + status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags); + if (status & OWN_BIT) + break; + + /* + * There is a tricky error noted by John Murphy, + * to Russ Nelson: Even with + * full-sized * buffers it's possible for a + * jabber packet to use two buffers, with only + * the last correctly noting the error. + */ + + if(status & ERR_BIT) { + /* reseting flags */ + lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; + goto err_next_pkt; + } + /* check for STP and ENP */ + if(!((status & STP_BIT) && (status & ENP_BIT))){ + /* reseting flags */ + lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; + goto err_next_pkt; + } + pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4; + +#if AMD8111E_VLAN_TAG_USED + vtag = status & TT_MASK; + /*MAC will strip vlan tag*/ + if (vtag != 0) + min_pkt_len =MIN_PKT_LEN - 4; + else +#endif + min_pkt_len =MIN_PKT_LEN; + + if (pkt_len < min_pkt_len) { + lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; + lp->drv_rx_errors++; + goto err_next_pkt; + } + if(--rx_pkt_limit < 0) + goto rx_not_empty; + if(!(new_skb = dev_alloc_skb(lp->rx_buff_len))){ + /* if allocation fail, + ignore that pkt and go to next one */ + lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; + lp->drv_rx_errors++; + goto err_next_pkt; + } + + skb_reserve(new_skb, 2); + skb = lp->rx_skbuff[rx_index]; + pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index], + lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); + skb_put(skb, pkt_len); + lp->rx_skbuff[rx_index] = new_skb; + lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev, + new_skb->data, + lp->rx_buff_len-2, + PCI_DMA_FROMDEVICE); + + skb->protocol = eth_type_trans(skb, dev); + +#if AMD8111E_VLAN_TAG_USED + if (vtag == TT_VLAN_TAGGED){ + u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info); + __vlan_hwaccel_put_tag(skb, vlan_tag); + } +#endif + netif_receive_skb(skb); + /*COAL update rx coalescing parameters*/ + lp->coal_conf.rx_packets++; + lp->coal_conf.rx_bytes += pkt_len; + num_rx_pkt++; + + err_next_pkt: + lp->rx_ring[rx_index].buff_phy_addr + = cpu_to_le32(lp->rx_dma_addr[rx_index]); + lp->rx_ring[rx_index].buff_count = + cpu_to_le16(lp->rx_buff_len-2); + wmb(); + lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT); + rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK; + } + /* Check the interrupt status register for more packets in the + mean time. Process them since we have not used up our quota.*/ + + intr0 = readl(mmio + INT0); + /*Ack receive packets */ + writel(intr0 & RINT0,mmio + INT0); + + } while(intr0 & RINT0); + + if (rx_pkt_limit > 0) { + /* Receive descriptor is empty now */ + spin_lock_irqsave(&lp->lock, flags); + __napi_complete(napi); + writel(VAL0|RINTEN0, mmio + INTEN0); + writel(VAL2 | RDMD0, mmio + CMD0); + spin_unlock_irqrestore(&lp->lock, flags); + } + +rx_not_empty: + return num_rx_pkt; +} + +/* +This function will indicate the link status to the kernel. +*/ +static int amd8111e_link_change(struct net_device* dev) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + int status0,speed; + + /* read the link change */ + status0 = readl(lp->mmio + STAT0); + + if(status0 & LINK_STATS){ + if(status0 & AUTONEG_COMPLETE) + lp->link_config.autoneg = AUTONEG_ENABLE; + else + lp->link_config.autoneg = AUTONEG_DISABLE; + + if(status0 & FULL_DPLX) + lp->link_config.duplex = DUPLEX_FULL; + else + lp->link_config.duplex = DUPLEX_HALF; + speed = (status0 & SPEED_MASK) >> 7; + if(speed == PHY_SPEED_10) + lp->link_config.speed = SPEED_10; + else if(speed == PHY_SPEED_100) + lp->link_config.speed = SPEED_100; + + printk(KERN_INFO "%s: Link is Up. Speed is %s Mbps %s Duplex\n", dev->name, + (lp->link_config.speed == SPEED_100) ? "100": "10", + (lp->link_config.duplex == DUPLEX_FULL)? "Full": "Half"); + netif_carrier_on(dev); + } + else{ + lp->link_config.speed = SPEED_INVALID; + lp->link_config.duplex = DUPLEX_INVALID; + lp->link_config.autoneg = AUTONEG_INVALID; + printk(KERN_INFO "%s: Link is Down.\n",dev->name); + netif_carrier_off(dev); + } + + return 0; +} +/* +This function reads the mib counters. +*/ +static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER) +{ + unsigned int status; + unsigned int data; + unsigned int repeat = REPEAT_CNT; + + writew( MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR); + do { + status = readw(mmio + MIB_ADDR); + udelay(2); /* controller takes MAX 2 us to get mib data */ + } + while (--repeat && (status & MIB_CMD_ACTIVE)); + + data = readl(mmio + MIB_DATA); + return data; +} + +/* + * This function reads the mib registers and returns the hardware statistics. + * It updates previous internal driver statistics with new values. + */ +static struct net_device_stats *amd8111e_get_stats(struct net_device *dev) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + void __iomem *mmio = lp->mmio; + unsigned long flags; + struct net_device_stats *new_stats = &dev->stats; + + if (!lp->opened) + return new_stats; + spin_lock_irqsave (&lp->lock, flags); + + /* stats.rx_packets */ + new_stats->rx_packets = amd8111e_read_mib(mmio, rcv_broadcast_pkts)+ + amd8111e_read_mib(mmio, rcv_multicast_pkts)+ + amd8111e_read_mib(mmio, rcv_unicast_pkts); + + /* stats.tx_packets */ + new_stats->tx_packets = amd8111e_read_mib(mmio, xmt_packets); + + /*stats.rx_bytes */ + new_stats->rx_bytes = amd8111e_read_mib(mmio, rcv_octets); + + /* stats.tx_bytes */ + new_stats->tx_bytes = amd8111e_read_mib(mmio, xmt_octets); + + /* stats.rx_errors */ + /* hw errors + errors driver reported */ + new_stats->rx_errors = amd8111e_read_mib(mmio, rcv_undersize_pkts)+ + amd8111e_read_mib(mmio, rcv_fragments)+ + amd8111e_read_mib(mmio, rcv_jabbers)+ + amd8111e_read_mib(mmio, rcv_alignment_errors)+ + amd8111e_read_mib(mmio, rcv_fcs_errors)+ + amd8111e_read_mib(mmio, rcv_miss_pkts)+ + lp->drv_rx_errors; + + /* stats.tx_errors */ + new_stats->tx_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts); + + /* stats.rx_dropped*/ + new_stats->rx_dropped = amd8111e_read_mib(mmio, rcv_miss_pkts); + + /* stats.tx_dropped*/ + new_stats->tx_dropped = amd8111e_read_mib(mmio, xmt_underrun_pkts); + + /* stats.multicast*/ + new_stats->multicast = amd8111e_read_mib(mmio, rcv_multicast_pkts); + + /* stats.collisions*/ + new_stats->collisions = amd8111e_read_mib(mmio, xmt_collisions); + + /* stats.rx_length_errors*/ + new_stats->rx_length_errors = + amd8111e_read_mib(mmio, rcv_undersize_pkts)+ + amd8111e_read_mib(mmio, rcv_oversize_pkts); + + /* stats.rx_over_errors*/ + new_stats->rx_over_errors = amd8111e_read_mib(mmio, rcv_miss_pkts); + + /* stats.rx_crc_errors*/ + new_stats->rx_crc_errors = amd8111e_read_mib(mmio, rcv_fcs_errors); + + /* stats.rx_frame_errors*/ + new_stats->rx_frame_errors = + amd8111e_read_mib(mmio, rcv_alignment_errors); + + /* stats.rx_fifo_errors */ + new_stats->rx_fifo_errors = amd8111e_read_mib(mmio, rcv_miss_pkts); + + /* stats.rx_missed_errors */ + new_stats->rx_missed_errors = amd8111e_read_mib(mmio, rcv_miss_pkts); + + /* stats.tx_aborted_errors*/ + new_stats->tx_aborted_errors = + amd8111e_read_mib(mmio, xmt_excessive_collision); + + /* stats.tx_carrier_errors*/ + new_stats->tx_carrier_errors = + amd8111e_read_mib(mmio, xmt_loss_carrier); + + /* stats.tx_fifo_errors*/ + new_stats->tx_fifo_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts); + + /* stats.tx_window_errors*/ + new_stats->tx_window_errors = + amd8111e_read_mib(mmio, xmt_late_collision); + + /* Reset the mibs for collecting new statistics */ + /* writew(MIB_CLEAR, mmio + MIB_ADDR);*/ + + spin_unlock_irqrestore (&lp->lock, flags); + + return new_stats; +} +/* This function recalculate the interrupt coalescing mode on every interrupt +according to the datarate and the packet rate. +*/ +static int amd8111e_calc_coalesce(struct net_device *dev) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf; + int tx_pkt_rate; + int rx_pkt_rate; + int tx_data_rate; + int rx_data_rate; + int rx_pkt_size; + int tx_pkt_size; + + tx_pkt_rate = coal_conf->tx_packets - coal_conf->tx_prev_packets; + coal_conf->tx_prev_packets = coal_conf->tx_packets; + + tx_data_rate = coal_conf->tx_bytes - coal_conf->tx_prev_bytes; + coal_conf->tx_prev_bytes = coal_conf->tx_bytes; + + rx_pkt_rate = coal_conf->rx_packets - coal_conf->rx_prev_packets; + coal_conf->rx_prev_packets = coal_conf->rx_packets; + + rx_data_rate = coal_conf->rx_bytes - coal_conf->rx_prev_bytes; + coal_conf->rx_prev_bytes = coal_conf->rx_bytes; + + if(rx_pkt_rate < 800){ + if(coal_conf->rx_coal_type != NO_COALESCE){ + + coal_conf->rx_timeout = 0x0; + coal_conf->rx_event_count = 0; + amd8111e_set_coalesce(dev,RX_INTR_COAL); + coal_conf->rx_coal_type = NO_COALESCE; + } + } + else{ + + rx_pkt_size = rx_data_rate/rx_pkt_rate; + if (rx_pkt_size < 128){ + if(coal_conf->rx_coal_type != NO_COALESCE){ + + coal_conf->rx_timeout = 0; + coal_conf->rx_event_count = 0; + amd8111e_set_coalesce(dev,RX_INTR_COAL); + coal_conf->rx_coal_type = NO_COALESCE; + } + + } + else if ( (rx_pkt_size >= 128) && (rx_pkt_size < 512) ){ + + if(coal_conf->rx_coal_type != LOW_COALESCE){ + coal_conf->rx_timeout = 1; + coal_conf->rx_event_count = 4; + amd8111e_set_coalesce(dev,RX_INTR_COAL); + coal_conf->rx_coal_type = LOW_COALESCE; + } + } + else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)){ + + if(coal_conf->rx_coal_type != MEDIUM_COALESCE){ + coal_conf->rx_timeout = 1; + coal_conf->rx_event_count = 4; + amd8111e_set_coalesce(dev,RX_INTR_COAL); + coal_conf->rx_coal_type = MEDIUM_COALESCE; + } + + } + else if(rx_pkt_size >= 1024){ + if(coal_conf->rx_coal_type != HIGH_COALESCE){ + coal_conf->rx_timeout = 2; + coal_conf->rx_event_count = 3; + amd8111e_set_coalesce(dev,RX_INTR_COAL); + coal_conf->rx_coal_type = HIGH_COALESCE; + } + } + } + /* NOW FOR TX INTR COALESC */ + if(tx_pkt_rate < 800){ + if(coal_conf->tx_coal_type != NO_COALESCE){ + + coal_conf->tx_timeout = 0x0; + coal_conf->tx_event_count = 0; + amd8111e_set_coalesce(dev,TX_INTR_COAL); + coal_conf->tx_coal_type = NO_COALESCE; + } + } + else{ + + tx_pkt_size = tx_data_rate/tx_pkt_rate; + if (tx_pkt_size < 128){ + + if(coal_conf->tx_coal_type != NO_COALESCE){ + + coal_conf->tx_timeout = 0; + coal_conf->tx_event_count = 0; + amd8111e_set_coalesce(dev,TX_INTR_COAL); + coal_conf->tx_coal_type = NO_COALESCE; + } + + } + else if ( (tx_pkt_size >= 128) && (tx_pkt_size < 512) ){ + + if(coal_conf->tx_coal_type != LOW_COALESCE){ + coal_conf->tx_timeout = 1; + coal_conf->tx_event_count = 2; + amd8111e_set_coalesce(dev,TX_INTR_COAL); + coal_conf->tx_coal_type = LOW_COALESCE; + + } + } + else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)){ + + if(coal_conf->tx_coal_type != MEDIUM_COALESCE){ + coal_conf->tx_timeout = 2; + coal_conf->tx_event_count = 5; + amd8111e_set_coalesce(dev,TX_INTR_COAL); + coal_conf->tx_coal_type = MEDIUM_COALESCE; + } + + } + else if(tx_pkt_size >= 1024){ + if (tx_pkt_size >= 1024){ + if(coal_conf->tx_coal_type != HIGH_COALESCE){ + coal_conf->tx_timeout = 4; + coal_conf->tx_event_count = 8; + amd8111e_set_coalesce(dev,TX_INTR_COAL); + coal_conf->tx_coal_type = HIGH_COALESCE; + } + } + } + } + return 0; + +} +/* +This is device interrupt function. It handles transmit, receive,link change and hardware timer interrupts. +*/ +static irqreturn_t amd8111e_interrupt(int irq, void *dev_id) +{ + + struct net_device * dev = (struct net_device *) dev_id; + struct amd8111e_priv *lp = netdev_priv(dev); + void __iomem *mmio = lp->mmio; + unsigned int intr0, intren0; + unsigned int handled = 1; + + if(unlikely(dev == NULL)) + return IRQ_NONE; + + spin_lock(&lp->lock); + + /* disabling interrupt */ + writel(INTREN, mmio + CMD0); + + /* Read interrupt status */ + intr0 = readl(mmio + INT0); + intren0 = readl(mmio + INTEN0); + + /* Process all the INT event until INTR bit is clear. */ + + if (!(intr0 & INTR)){ + handled = 0; + goto err_no_interrupt; + } + + /* Current driver processes 4 interrupts : RINT,TINT,LCINT,STINT */ + writel(intr0, mmio + INT0); + + /* Check if Receive Interrupt has occurred. */ + if (intr0 & RINT0) { + if (napi_schedule_prep(&lp->napi)) { + /* Disable receive interupts */ + writel(RINTEN0, mmio + INTEN0); + /* Schedule a polling routine */ + __napi_schedule(&lp->napi); + } else if (intren0 & RINTEN0) { + printk("************Driver bug! interrupt while in poll\n"); + /* Fix by disable receive interrupts */ + writel(RINTEN0, mmio + INTEN0); + } + } + + /* Check if Transmit Interrupt has occurred. */ + if (intr0 & TINT0) + amd8111e_tx(dev); + + /* Check if Link Change Interrupt has occurred. */ + if (intr0 & LCINT) + amd8111e_link_change(dev); + + /* Check if Hardware Timer Interrupt has occurred. */ + if (intr0 & STINT) + amd8111e_calc_coalesce(dev); + +err_no_interrupt: + writel( VAL0 | INTREN,mmio + CMD0); + + spin_unlock(&lp->lock); + + return IRQ_RETVAL(handled); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void amd8111e_poll(struct net_device *dev) +{ + unsigned long flags; + local_irq_save(flags); + amd8111e_interrupt(0, dev); + local_irq_restore(flags); +} +#endif + + +/* +This function closes the network interface and updates the statistics so that most recent statistics will be available after the interface is down. +*/ +static int amd8111e_close(struct net_device * dev) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + netif_stop_queue(dev); + + napi_disable(&lp->napi); + + spin_lock_irq(&lp->lock); + + amd8111e_disable_interrupt(lp); + amd8111e_stop_chip(lp); + + /* Free transmit and receive skbs */ + amd8111e_free_skbs(lp->amd8111e_net_dev); + + netif_carrier_off(lp->amd8111e_net_dev); + + /* Delete ipg timer */ + if(lp->options & OPTION_DYN_IPG_ENABLE) + del_timer_sync(&lp->ipg_data.ipg_timer); + + spin_unlock_irq(&lp->lock); + free_irq(dev->irq, dev); + amd8111e_free_ring(lp); + + /* Update the statistics before closing */ + amd8111e_get_stats(dev); + lp->opened = 0; + return 0; +} +/* This function opens new interface.It requests irq for the device, initializes the device,buffers and descriptors, and starts the device. +*/ +static int amd8111e_open(struct net_device * dev ) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + + if(dev->irq ==0 || request_irq(dev->irq, amd8111e_interrupt, IRQF_SHARED, + dev->name, dev)) + return -EAGAIN; + + napi_enable(&lp->napi); + + spin_lock_irq(&lp->lock); + + amd8111e_init_hw_default(lp); + + if(amd8111e_restart(dev)){ + spin_unlock_irq(&lp->lock); + napi_disable(&lp->napi); + if (dev->irq) + free_irq(dev->irq, dev); + return -ENOMEM; + } + /* Start ipg timer */ + if(lp->options & OPTION_DYN_IPG_ENABLE){ + add_timer(&lp->ipg_data.ipg_timer); + printk(KERN_INFO "%s: Dynamic IPG Enabled.\n",dev->name); + } + + lp->opened = 1; + + spin_unlock_irq(&lp->lock); + + netif_start_queue(dev); + + return 0; +} +/* +This function checks if there is any transmit descriptors available to queue more packet. +*/ +static int amd8111e_tx_queue_avail(struct amd8111e_priv* lp ) +{ + int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK; + if (lp->tx_skbuff[tx_index]) + return -1; + else + return 0; + +} +/* +This function will queue the transmit packets to the descriptors and will trigger the send operation. It also initializes the transmit descriptors with buffer physical address, byte count, ownership to hardware etc. +*/ + +static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb, + struct net_device * dev) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + int tx_index; + unsigned long flags; + + spin_lock_irqsave(&lp->lock, flags); + + tx_index = lp->tx_idx & TX_RING_DR_MOD_MASK; + + lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len); + + lp->tx_skbuff[tx_index] = skb; + lp->tx_ring[tx_index].tx_flags = 0; + +#if AMD8111E_VLAN_TAG_USED + if (vlan_tx_tag_present(skb)) { + lp->tx_ring[tx_index].tag_ctrl_cmd |= + cpu_to_le16(TCC_VLAN_INSERT); + lp->tx_ring[tx_index].tag_ctrl_info = + cpu_to_le16(vlan_tx_tag_get(skb)); + + } +#endif + lp->tx_dma_addr[tx_index] = + pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); + lp->tx_ring[tx_index].buff_phy_addr = + cpu_to_le32(lp->tx_dma_addr[tx_index]); + + /* Set FCS and LTINT bits */ + wmb(); + lp->tx_ring[tx_index].tx_flags |= + cpu_to_le16(OWN_BIT | STP_BIT | ENP_BIT|ADD_FCS_BIT|LTINT_BIT); + + lp->tx_idx++; + + /* Trigger an immediate send poll. */ + writel( VAL1 | TDMD0, lp->mmio + CMD0); + writel( VAL2 | RDMD0,lp->mmio + CMD0); + + if(amd8111e_tx_queue_avail(lp) < 0){ + netif_stop_queue(dev); + } + spin_unlock_irqrestore(&lp->lock, flags); + return NETDEV_TX_OK; +} +/* +This function returns all the memory mapped registers of the device. +*/ +static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf) +{ + void __iomem *mmio = lp->mmio; + /* Read only necessary registers */ + buf[0] = readl(mmio + XMT_RING_BASE_ADDR0); + buf[1] = readl(mmio + XMT_RING_LEN0); + buf[2] = readl(mmio + RCV_RING_BASE_ADDR0); + buf[3] = readl(mmio + RCV_RING_LEN0); + buf[4] = readl(mmio + CMD0); + buf[5] = readl(mmio + CMD2); + buf[6] = readl(mmio + CMD3); + buf[7] = readl(mmio + CMD7); + buf[8] = readl(mmio + INT0); + buf[9] = readl(mmio + INTEN0); + buf[10] = readl(mmio + LADRF); + buf[11] = readl(mmio + LADRF+4); + buf[12] = readl(mmio + STAT0); +} + + +/* +This function sets promiscuos mode, all-multi mode or the multicast address +list to the device. +*/ +static void amd8111e_set_multicast_list(struct net_device *dev) +{ + struct netdev_hw_addr *ha; + struct amd8111e_priv *lp = netdev_priv(dev); + u32 mc_filter[2] ; + int bit_num; + + if(dev->flags & IFF_PROMISC){ + writel( VAL2 | PROM, lp->mmio + CMD2); + return; + } + else + writel( PROM, lp->mmio + CMD2); + if (dev->flags & IFF_ALLMULTI || + netdev_mc_count(dev) > MAX_FILTER_SIZE) { + /* get all multicast packet */ + mc_filter[1] = mc_filter[0] = 0xffffffff; + lp->options |= OPTION_MULTICAST_ENABLE; + amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF); + return; + } + if (netdev_mc_empty(dev)) { + /* get only own packets */ + mc_filter[1] = mc_filter[0] = 0; + lp->options &= ~OPTION_MULTICAST_ENABLE; + amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF); + /* disable promiscuous mode */ + writel(PROM, lp->mmio + CMD2); + return; + } + /* load all the multicast addresses in the logic filter */ + lp->options |= OPTION_MULTICAST_ENABLE; + mc_filter[1] = mc_filter[0] = 0; + netdev_for_each_mc_addr(ha, dev) { + bit_num = (ether_crc_le(ETH_ALEN, ha->addr) >> 26) & 0x3f; + mc_filter[bit_num >> 5] |= 1 << (bit_num & 31); + } + amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF); + + /* To eliminate PCI posting bug */ + readl(lp->mmio + CMD2); + +} + +static void amd8111e_get_drvinfo(struct net_device* dev, struct ethtool_drvinfo *info) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + struct pci_dev *pci_dev = lp->pci_dev; + strcpy (info->driver, MODULE_NAME); + strcpy (info->version, MODULE_VERS); + sprintf(info->fw_version,"%u",chip_version); + strcpy (info->bus_info, pci_name(pci_dev)); +} + +static int amd8111e_get_regs_len(struct net_device *dev) +{ + return AMD8111E_REG_DUMP_LEN; +} + +static void amd8111e_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + regs->version = 0; + amd8111e_read_regs(lp, buf); +} + +static int amd8111e_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + spin_lock_irq(&lp->lock); + mii_ethtool_gset(&lp->mii_if, ecmd); + spin_unlock_irq(&lp->lock); + return 0; +} + +static int amd8111e_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + int res; + spin_lock_irq(&lp->lock); + res = mii_ethtool_sset(&lp->mii_if, ecmd); + spin_unlock_irq(&lp->lock); + return res; +} + +static int amd8111e_nway_reset(struct net_device *dev) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + return mii_nway_restart(&lp->mii_if); +} + +static u32 amd8111e_get_link(struct net_device *dev) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + return mii_link_ok(&lp->mii_if); +} + +static void amd8111e_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + wol_info->supported = WAKE_MAGIC|WAKE_PHY; + if (lp->options & OPTION_WOL_ENABLE) + wol_info->wolopts = WAKE_MAGIC; +} + +static int amd8111e_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + if (wol_info->wolopts & ~(WAKE_MAGIC|WAKE_PHY)) + return -EINVAL; + spin_lock_irq(&lp->lock); + if (wol_info->wolopts & WAKE_MAGIC) + lp->options |= + (OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE); + else if(wol_info->wolopts & WAKE_PHY) + lp->options |= + (OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE); + else + lp->options &= ~OPTION_WOL_ENABLE; + spin_unlock_irq(&lp->lock); + return 0; +} + +static const struct ethtool_ops ops = { + .get_drvinfo = amd8111e_get_drvinfo, + .get_regs_len = amd8111e_get_regs_len, + .get_regs = amd8111e_get_regs, + .get_settings = amd8111e_get_settings, + .set_settings = amd8111e_set_settings, + .nway_reset = amd8111e_nway_reset, + .get_link = amd8111e_get_link, + .get_wol = amd8111e_get_wol, + .set_wol = amd8111e_set_wol, +}; + +/* +This function handles all the ethtool ioctls. It gives driver info, gets/sets driver speed, gets memory mapped register values, forces auto negotiation, sets/gets WOL options for ethtool application. +*/ + +static int amd8111e_ioctl(struct net_device * dev , struct ifreq *ifr, int cmd) +{ + struct mii_ioctl_data *data = if_mii(ifr); + struct amd8111e_priv *lp = netdev_priv(dev); + int err; + u32 mii_regval; + + switch(cmd) { + case SIOCGMIIPHY: + data->phy_id = lp->ext_phy_addr; + + /* fallthru */ + case SIOCGMIIREG: + + spin_lock_irq(&lp->lock); + err = amd8111e_read_phy(lp, data->phy_id, + data->reg_num & PHY_REG_ADDR_MASK, &mii_regval); + spin_unlock_irq(&lp->lock); + + data->val_out = mii_regval; + return err; + + case SIOCSMIIREG: + + spin_lock_irq(&lp->lock); + err = amd8111e_write_phy(lp, data->phy_id, + data->reg_num & PHY_REG_ADDR_MASK, data->val_in); + spin_unlock_irq(&lp->lock); + + return err; + + default: + /* do nothing */ + break; + } + return -EOPNOTSUPP; +} +static int amd8111e_set_mac_address(struct net_device *dev, void *p) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + int i; + struct sockaddr *addr = p; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + spin_lock_irq(&lp->lock); + /* Setting the MAC address to the device */ + for(i = 0; i < ETH_ADDR_LEN; i++) + writeb( dev->dev_addr[i], lp->mmio + PADR + i ); + + spin_unlock_irq(&lp->lock); + + return 0; +} + +/* +This function changes the mtu of the device. It restarts the device to initialize the descriptor with new receive buffers. +*/ +static int amd8111e_change_mtu(struct net_device *dev, int new_mtu) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + int err; + + if ((new_mtu < AMD8111E_MIN_MTU) || (new_mtu > AMD8111E_MAX_MTU)) + return -EINVAL; + + if (!netif_running(dev)) { + /* new_mtu will be used + when device starts netxt time */ + dev->mtu = new_mtu; + return 0; + } + + spin_lock_irq(&lp->lock); + + /* stop the chip */ + writel(RUN, lp->mmio + CMD0); + + dev->mtu = new_mtu; + + err = amd8111e_restart(dev); + spin_unlock_irq(&lp->lock); + if(!err) + netif_start_queue(dev); + return err; +} + +static int amd8111e_enable_magicpkt(struct amd8111e_priv* lp) +{ + writel( VAL1|MPPLBA, lp->mmio + CMD3); + writel( VAL0|MPEN_SW, lp->mmio + CMD7); + + /* To eliminate PCI posting bug */ + readl(lp->mmio + CMD7); + return 0; +} + +static int amd8111e_enable_link_change(struct amd8111e_priv* lp) +{ + + /* Adapter is already stoped/suspended/interrupt-disabled */ + writel(VAL0|LCMODE_SW,lp->mmio + CMD7); + + /* To eliminate PCI posting bug */ + readl(lp->mmio + CMD7); + return 0; +} + +/* + * This function is called when a packet transmission fails to complete + * within a reasonable period, on the assumption that an interrupt have + * failed or the interface is locked up. This function will reinitialize + * the hardware. + */ +static void amd8111e_tx_timeout(struct net_device *dev) +{ + struct amd8111e_priv* lp = netdev_priv(dev); + int err; + + printk(KERN_ERR "%s: transmit timed out, resetting\n", + dev->name); + spin_lock_irq(&lp->lock); + err = amd8111e_restart(dev); + spin_unlock_irq(&lp->lock); + if(!err) + netif_wake_queue(dev); +} +static int amd8111e_suspend(struct pci_dev *pci_dev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pci_dev); + struct amd8111e_priv *lp = netdev_priv(dev); + + if (!netif_running(dev)) + return 0; + + /* disable the interrupt */ + spin_lock_irq(&lp->lock); + amd8111e_disable_interrupt(lp); + spin_unlock_irq(&lp->lock); + + netif_device_detach(dev); + + /* stop chip */ + spin_lock_irq(&lp->lock); + if(lp->options & OPTION_DYN_IPG_ENABLE) + del_timer_sync(&lp->ipg_data.ipg_timer); + amd8111e_stop_chip(lp); + spin_unlock_irq(&lp->lock); + + if(lp->options & OPTION_WOL_ENABLE){ + /* enable wol */ + if(lp->options & OPTION_WAKE_MAGIC_ENABLE) + amd8111e_enable_magicpkt(lp); + if(lp->options & OPTION_WAKE_PHY_ENABLE) + amd8111e_enable_link_change(lp); + + pci_enable_wake(pci_dev, PCI_D3hot, 1); + pci_enable_wake(pci_dev, PCI_D3cold, 1); + + } + else{ + pci_enable_wake(pci_dev, PCI_D3hot, 0); + pci_enable_wake(pci_dev, PCI_D3cold, 0); + } + + pci_save_state(pci_dev); + pci_set_power_state(pci_dev, PCI_D3hot); + + return 0; +} +static int amd8111e_resume(struct pci_dev *pci_dev) +{ + struct net_device *dev = pci_get_drvdata(pci_dev); + struct amd8111e_priv *lp = netdev_priv(dev); + + if (!netif_running(dev)) + return 0; + + pci_set_power_state(pci_dev, PCI_D0); + pci_restore_state(pci_dev); + + pci_enable_wake(pci_dev, PCI_D3hot, 0); + pci_enable_wake(pci_dev, PCI_D3cold, 0); /* D3 cold */ + + netif_device_attach(dev); + + spin_lock_irq(&lp->lock); + amd8111e_restart(dev); + /* Restart ipg timer */ + if(lp->options & OPTION_DYN_IPG_ENABLE) + mod_timer(&lp->ipg_data.ipg_timer, + jiffies + IPG_CONVERGE_JIFFIES); + spin_unlock_irq(&lp->lock); + + return 0; +} + + +static void __devexit amd8111e_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + if (dev) { + unregister_netdev(dev); + iounmap(((struct amd8111e_priv *)netdev_priv(dev))->mmio); + free_netdev(dev); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + } +} +static void amd8111e_config_ipg(struct net_device* dev) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + struct ipg_info* ipg_data = &lp->ipg_data; + void __iomem *mmio = lp->mmio; + unsigned int prev_col_cnt = ipg_data->col_cnt; + unsigned int total_col_cnt; + unsigned int tmp_ipg; + + if(lp->link_config.duplex == DUPLEX_FULL){ + ipg_data->ipg = DEFAULT_IPG; + return; + } + + if(ipg_data->ipg_state == SSTATE){ + + if(ipg_data->timer_tick == IPG_STABLE_TIME){ + + ipg_data->timer_tick = 0; + ipg_data->ipg = MIN_IPG - IPG_STEP; + ipg_data->current_ipg = MIN_IPG; + ipg_data->diff_col_cnt = 0xFFFFFFFF; + ipg_data->ipg_state = CSTATE; + } + else + ipg_data->timer_tick++; + } + + if(ipg_data->ipg_state == CSTATE){ + + /* Get the current collision count */ + + total_col_cnt = ipg_data->col_cnt = + amd8111e_read_mib(mmio, xmt_collisions); + + if ((total_col_cnt - prev_col_cnt) < + (ipg_data->diff_col_cnt)){ + + ipg_data->diff_col_cnt = + total_col_cnt - prev_col_cnt ; + + ipg_data->ipg = ipg_data->current_ipg; + } + + ipg_data->current_ipg += IPG_STEP; + + if (ipg_data->current_ipg <= MAX_IPG) + tmp_ipg = ipg_data->current_ipg; + else{ + tmp_ipg = ipg_data->ipg; + ipg_data->ipg_state = SSTATE; + } + writew((u32)tmp_ipg, mmio + IPG); + writew((u32)(tmp_ipg - IFS1_DELTA), mmio + IFS1); + } + mod_timer(&lp->ipg_data.ipg_timer, jiffies + IPG_CONVERGE_JIFFIES); + return; + +} + +static void __devinit amd8111e_probe_ext_phy(struct net_device* dev) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + int i; + + for (i = 0x1e; i >= 0; i--) { + u32 id1, id2; + + if (amd8111e_read_phy(lp, i, MII_PHYSID1, &id1)) + continue; + if (amd8111e_read_phy(lp, i, MII_PHYSID2, &id2)) + continue; + lp->ext_phy_id = (id1 << 16) | id2; + lp->ext_phy_addr = i; + return; + } + lp->ext_phy_id = 0; + lp->ext_phy_addr = 1; +} + +static const struct net_device_ops amd8111e_netdev_ops = { + .ndo_open = amd8111e_open, + .ndo_stop = amd8111e_close, + .ndo_start_xmit = amd8111e_start_xmit, + .ndo_tx_timeout = amd8111e_tx_timeout, + .ndo_get_stats = amd8111e_get_stats, + .ndo_set_multicast_list = amd8111e_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = amd8111e_set_mac_address, + .ndo_do_ioctl = amd8111e_ioctl, + .ndo_change_mtu = amd8111e_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = amd8111e_poll, +#endif +}; + +static int __devinit amd8111e_probe_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int err,i,pm_cap; + unsigned long reg_addr,reg_len; + struct amd8111e_priv* lp; + struct net_device* dev; + + err = pci_enable_device(pdev); + if(err){ + printk(KERN_ERR "amd8111e: Cannot enable new PCI device, " + "exiting.\n"); + return err; + } + + if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){ + printk(KERN_ERR "amd8111e: Cannot find PCI base address, " + "exiting.\n"); + err = -ENODEV; + goto err_disable_pdev; + } + + err = pci_request_regions(pdev, MODULE_NAME); + if(err){ + printk(KERN_ERR "amd8111e: Cannot obtain PCI resources, " + "exiting.\n"); + goto err_disable_pdev; + } + + pci_set_master(pdev); + + /* Find power-management capability. */ + if((pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM))==0){ + printk(KERN_ERR "amd8111e: No Power Management capability, " + "exiting.\n"); + goto err_free_reg; + } + + /* Initialize DMA */ + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) < 0) { + printk(KERN_ERR "amd8111e: DMA not supported," + "exiting.\n"); + goto err_free_reg; + } + + reg_addr = pci_resource_start(pdev, 0); + reg_len = pci_resource_len(pdev, 0); + + dev = alloc_etherdev(sizeof(struct amd8111e_priv)); + if (!dev) { + printk(KERN_ERR "amd8111e: Etherdev alloc failed, exiting.\n"); + err = -ENOMEM; + goto err_free_reg; + } + + SET_NETDEV_DEV(dev, &pdev->dev); + +#if AMD8111E_VLAN_TAG_USED + dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX ; +#endif + + lp = netdev_priv(dev); + lp->pci_dev = pdev; + lp->amd8111e_net_dev = dev; + lp->pm_cap = pm_cap; + + spin_lock_init(&lp->lock); + + lp->mmio = ioremap(reg_addr, reg_len); + if (!lp->mmio) { + printk(KERN_ERR "amd8111e: Cannot map device registers, " + "exiting\n"); + err = -ENOMEM; + goto err_free_dev; + } + + /* Initializing MAC address */ + for(i = 0; i < ETH_ADDR_LEN; i++) + dev->dev_addr[i] = readb(lp->mmio + PADR + i); + + /* Setting user defined parametrs */ + lp->ext_phy_option = speed_duplex[card_idx]; + if(coalesce[card_idx]) + lp->options |= OPTION_INTR_COAL_ENABLE; + if(dynamic_ipg[card_idx++]) + lp->options |= OPTION_DYN_IPG_ENABLE; + + + /* Initialize driver entry points */ + dev->netdev_ops = &amd8111e_netdev_ops; + SET_ETHTOOL_OPS(dev, &ops); + dev->irq =pdev->irq; + dev->watchdog_timeo = AMD8111E_TX_TIMEOUT; + netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32); + +#if AMD8111E_VLAN_TAG_USED + dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; +#endif + /* Probe the external PHY */ + amd8111e_probe_ext_phy(dev); + + /* setting mii default values */ + lp->mii_if.dev = dev; + lp->mii_if.mdio_read = amd8111e_mdio_read; + lp->mii_if.mdio_write = amd8111e_mdio_write; + lp->mii_if.phy_id = lp->ext_phy_addr; + + /* Set receive buffer length and set jumbo option*/ + amd8111e_set_rx_buff_len(dev); + + + err = register_netdev(dev); + if (err) { + printk(KERN_ERR "amd8111e: Cannot register net device, " + "exiting.\n"); + goto err_iounmap; + } + + pci_set_drvdata(pdev, dev); + + /* Initialize software ipg timer */ + if(lp->options & OPTION_DYN_IPG_ENABLE){ + init_timer(&lp->ipg_data.ipg_timer); + lp->ipg_data.ipg_timer.data = (unsigned long) dev; + lp->ipg_data.ipg_timer.function = (void *)&amd8111e_config_ipg; + lp->ipg_data.ipg_timer.expires = jiffies + + IPG_CONVERGE_JIFFIES; + lp->ipg_data.ipg = DEFAULT_IPG; + lp->ipg_data.ipg_state = CSTATE; + } + + /* display driver and device information */ + + chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28; + printk(KERN_INFO "%s: AMD-8111e Driver Version: %s\n", + dev->name,MODULE_VERS); + printk(KERN_INFO "%s: [ Rev %x ] PCI 10/100BaseT Ethernet %pM\n", + dev->name, chip_version, dev->dev_addr); + if (lp->ext_phy_id) + printk(KERN_INFO "%s: Found MII PHY ID 0x%08x at address 0x%02x\n", + dev->name, lp->ext_phy_id, lp->ext_phy_addr); + else + printk(KERN_INFO "%s: Couldn't detect MII PHY, assuming address 0x01\n", + dev->name); + return 0; +err_iounmap: + iounmap(lp->mmio); + +err_free_dev: + free_netdev(dev); + +err_free_reg: + pci_release_regions(pdev); + +err_disable_pdev: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + return err; + +} + +static struct pci_driver amd8111e_driver = { + .name = MODULE_NAME, + .id_table = amd8111e_pci_tbl, + .probe = amd8111e_probe_one, + .remove = __devexit_p(amd8111e_remove_one), + .suspend = amd8111e_suspend, + .resume = amd8111e_resume +}; + +static int __init amd8111e_init(void) +{ + return pci_register_driver(&amd8111e_driver); +} + +static void __exit amd8111e_cleanup(void) +{ + pci_unregister_driver(&amd8111e_driver); +} + +module_init(amd8111e_init); +module_exit(amd8111e_cleanup); diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h new file mode 100644 index 0000000..2ff2e7a --- /dev/null +++ b/drivers/net/ethernet/amd/amd8111e.h @@ -0,0 +1,816 @@ +/* + * Advanced Micro Devices Inc. AMD8111E Linux Network Driver + * Copyright (C) 2003 Advanced Micro Devices + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA + +Module Name: + + amd8111e.h + +Abstract: + + AMD8111 based 10/100 Ethernet Controller driver definitions. + +Environment: + + Kernel Mode + +Revision History: + 3.0.0 + Initial Revision. + 3.0.1 +*/ + +#ifndef _AMD811E_H +#define _AMD811E_H + +/* Command style register access + +Registers CMD0, CMD2, CMD3,CMD7 and INTEN0 uses a write access technique called command style access. It allows the write to selected bits of this register without altering the bits that are not selected. Command style registers are divided into 4 bytes that can be written independently. Higher order bit of each byte is the value bit that specifies the value that will be written into the selected bits of register. + +eg., if the value 10011010b is written into the least significant byte of a command style register, bits 1,3 and 4 of the register will be set to 1, and the other bits will not be altered. If the value 00011010b is written into the same byte, bits 1,3 and 4 will be cleared to 0 and the other bits will not be altered. + +*/ + +/* Offset for Memory Mapped Registers. */ +/* 32 bit registers */ + +#define ASF_STAT 0x00 /* ASF status register */ +#define CHIPID 0x04 /* Chip ID regsiter */ +#define MIB_DATA 0x10 /* MIB data register */ +#define MIB_ADDR 0x14 /* MIB address register */ +#define STAT0 0x30 /* Status0 register */ +#define INT0 0x38 /* Interrupt0 register */ +#define INTEN0 0x40 /* Interrupt0 enable register*/ +#define CMD0 0x48 /* Command0 register */ +#define CMD2 0x50 /* Command2 register */ +#define CMD3 0x54 /* Command3 resiter */ +#define CMD7 0x64 /* Command7 register */ + +#define CTRL1 0x6C /* Control1 register */ +#define CTRL2 0x70 /* Control2 register */ + +#define XMT_RING_LIMIT 0x7C /* Transmit ring limit register */ + +#define AUTOPOLL0 0x88 /* Auto-poll0 register */ +#define AUTOPOLL1 0x8A /* Auto-poll1 register */ +#define AUTOPOLL2 0x8C /* Auto-poll2 register */ +#define AUTOPOLL3 0x8E /* Auto-poll3 register */ +#define AUTOPOLL4 0x90 /* Auto-poll4 register */ +#define AUTOPOLL5 0x92 /* Auto-poll5 register */ + +#define AP_VALUE 0x98 /* Auto-poll value register */ +#define DLY_INT_A 0xA8 /* Group A delayed interrupt register */ +#define DLY_INT_B 0xAC /* Group B delayed interrupt register */ + +#define FLOW_CONTROL 0xC8 /* Flow control register */ +#define PHY_ACCESS 0xD0 /* PHY access register */ + +#define STVAL 0xD8 /* Software timer value register */ + +#define XMT_RING_BASE_ADDR0 0x100 /* Transmit ring0 base addr register */ +#define XMT_RING_BASE_ADDR1 0x108 /* Transmit ring1 base addr register */ +#define XMT_RING_BASE_ADDR2 0x110 /* Transmit ring2 base addr register */ +#define XMT_RING_BASE_ADDR3 0x118 /* Transmit ring2 base addr register */ + +#define RCV_RING_BASE_ADDR0 0x120 /* Transmit ring0 base addr register */ + +#define PMAT0 0x190 /* OnNow pattern register0 */ +#define PMAT1 0x194 /* OnNow pattern register1 */ + +/* 16bit registers */ + +#define XMT_RING_LEN0 0x140 /* Transmit Ring0 length register */ +#define XMT_RING_LEN1 0x144 /* Transmit Ring1 length register */ +#define XMT_RING_LEN2 0x148 /* Transmit Ring2 length register */ +#define XMT_RING_LEN3 0x14C /* Transmit Ring3 length register */ + +#define RCV_RING_LEN0 0x150 /* Receive Ring0 length register */ + +#define SRAM_SIZE 0x178 /* SRAM size register */ +#define SRAM_BOUNDARY 0x17A /* SRAM boundary register */ + +/* 48bit register */ + +#define PADR 0x160 /* Physical address register */ + +#define IFS1 0x18C /* Inter-frame spacing Part1 register */ +#define IFS 0x18D /* Inter-frame spacing register */ +#define IPG 0x18E /* Inter-frame gap register */ +/* 64bit register */ + +#define LADRF 0x168 /* Logical address filter register */ + + +/* Register Bit Definitions */ +typedef enum { + + ASF_INIT_DONE = (1 << 1), + ASF_INIT_PRESENT = (1 << 0), + +}STAT_ASF_BITS; + +typedef enum { + + MIB_CMD_ACTIVE = (1 << 15 ), + MIB_RD_CMD = (1 << 13 ), + MIB_CLEAR = (1 << 12 ), + MIB_ADDRESS = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3)| + (1 << 4) | (1 << 5), +}MIB_ADDR_BITS; + + +typedef enum { + + PMAT_DET = (1 << 12), + MP_DET = (1 << 11), + LC_DET = (1 << 10), + SPEED_MASK = (1 << 9)|(1 << 8)|(1 << 7), + FULL_DPLX = (1 << 6), + LINK_STATS = (1 << 5), + AUTONEG_COMPLETE = (1 << 4), + MIIPD = (1 << 3), + RX_SUSPENDED = (1 << 2), + TX_SUSPENDED = (1 << 1), + RUNNING = (1 << 0), + +}STAT0_BITS; + +#define PHY_SPEED_10 0x2 +#define PHY_SPEED_100 0x3 + +/* INT0 0x38, 32bit register */ +typedef enum { + + INTR = (1 << 31), + PCSINT = (1 << 28), + LCINT = (1 << 27), + APINT5 = (1 << 26), + APINT4 = (1 << 25), + APINT3 = (1 << 24), + TINT_SUM = (1 << 23), + APINT2 = (1 << 22), + APINT1 = (1 << 21), + APINT0 = (1 << 20), + MIIPDTINT = (1 << 19), + MCCINT = (1 << 17), + MREINT = (1 << 16), + RINT_SUM = (1 << 15), + SPNDINT = (1 << 14), + MPINT = (1 << 13), + SINT = (1 << 12), + TINT3 = (1 << 11), + TINT2 = (1 << 10), + TINT1 = (1 << 9), + TINT0 = (1 << 8), + UINT = (1 << 7), + STINT = (1 << 4), + RINT0 = (1 << 0), + +}INT0_BITS; + +typedef enum { + + VAL3 = (1 << 31), /* VAL bit for byte 3 */ + VAL2 = (1 << 23), /* VAL bit for byte 2 */ + VAL1 = (1 << 15), /* VAL bit for byte 1 */ + VAL0 = (1 << 7), /* VAL bit for byte 0 */ + +}VAL_BITS; + +typedef enum { + + /* VAL3 */ + LCINTEN = (1 << 27), + APINT5EN = (1 << 26), + APINT4EN = (1 << 25), + APINT3EN = (1 << 24), + /* VAL2 */ + APINT2EN = (1 << 22), + APINT1EN = (1 << 21), + APINT0EN = (1 << 20), + MIIPDTINTEN = (1 << 19), + MCCIINTEN = (1 << 18), + MCCINTEN = (1 << 17), + MREINTEN = (1 << 16), + /* VAL1 */ + SPNDINTEN = (1 << 14), + MPINTEN = (1 << 13), + TINTEN3 = (1 << 11), + SINTEN = (1 << 12), + TINTEN2 = (1 << 10), + TINTEN1 = (1 << 9), + TINTEN0 = (1 << 8), + /* VAL0 */ + STINTEN = (1 << 4), + RINTEN0 = (1 << 0), + + INTEN0_CLEAR = 0x1F7F7F1F, /* Command style register */ + +}INTEN0_BITS; + +typedef enum { + /* VAL2 */ + RDMD0 = (1 << 16), + /* VAL1 */ + TDMD3 = (1 << 11), + TDMD2 = (1 << 10), + TDMD1 = (1 << 9), + TDMD0 = (1 << 8), + /* VAL0 */ + UINTCMD = (1 << 6), + RX_FAST_SPND = (1 << 5), + TX_FAST_SPND = (1 << 4), + RX_SPND = (1 << 3), + TX_SPND = (1 << 2), + INTREN = (1 << 1), + RUN = (1 << 0), + + CMD0_CLEAR = 0x000F0F7F, /* Command style register */ + +}CMD0_BITS; + +typedef enum { + + /* VAL3 */ + CONDUIT_MODE = (1 << 29), + /* VAL2 */ + RPA = (1 << 19), + DRCVPA = (1 << 18), + DRCVBC = (1 << 17), + PROM = (1 << 16), + /* VAL1 */ + ASTRP_RCV = (1 << 13), + RCV_DROP0 = (1 << 12), + EMBA = (1 << 11), + DXMT2PD = (1 << 10), + LTINTEN = (1 << 9), + DXMTFCS = (1 << 8), + /* VAL0 */ + APAD_XMT = (1 << 6), + DRTY = (1 << 5), + INLOOP = (1 << 4), + EXLOOP = (1 << 3), + REX_RTRY = (1 << 2), + REX_UFLO = (1 << 1), + REX_LCOL = (1 << 0), + + CMD2_CLEAR = 0x3F7F3F7F, /* Command style register */ + +}CMD2_BITS; + +typedef enum { + + /* VAL3 */ + ASF_INIT_DONE_ALIAS = (1 << 29), + /* VAL2 */ + JUMBO = (1 << 21), + VSIZE = (1 << 20), + VLONLY = (1 << 19), + VL_TAG_DEL = (1 << 18), + /* VAL1 */ + EN_PMGR = (1 << 14), + INTLEVEL = (1 << 13), + FORCE_FULL_DUPLEX = (1 << 12), + FORCE_LINK_STATUS = (1 << 11), + APEP = (1 << 10), + MPPLBA = (1 << 9), + /* VAL0 */ + RESET_PHY_PULSE = (1 << 2), + RESET_PHY = (1 << 1), + PHY_RST_POL = (1 << 0), + +}CMD3_BITS; + + +typedef enum { + + /* VAL0 */ + PMAT_SAVE_MATCH = (1 << 4), + PMAT_MODE = (1 << 3), + MPEN_SW = (1 << 1), + LCMODE_SW = (1 << 0), + + CMD7_CLEAR = 0x0000001B /* Command style register */ + +}CMD7_BITS; + + +typedef enum { + + RESET_PHY_WIDTH = (0xF << 16) | (0xF<< 20), /* 0x00FF0000 */ + XMTSP_MASK = (1 << 9) | (1 << 8), /* 9:8 */ + XMTSP_128 = (1 << 9), /* 9 */ + XMTSP_64 = (1 << 8), + CACHE_ALIGN = (1 << 4), + BURST_LIMIT_MASK = (0xF << 0 ), + CTRL1_DEFAULT = 0x00010111, + +}CTRL1_BITS; + +typedef enum { + + FMDC_MASK = (1 << 9)|(1 << 8), /* 9:8 */ + XPHYRST = (1 << 7), + XPHYANE = (1 << 6), + XPHYFD = (1 << 5), + XPHYSP = (1 << 4) | (1 << 3), /* 4:3 */ + APDW_MASK = (1 << 2) | (1 << 1) | (1 << 0), /* 2:0 */ + +}CTRL2_BITS; + +/* XMT_RING_LIMIT 0x7C, 32bit register */ +typedef enum { + + XMT_RING2_LIMIT = (0xFF << 16), /* 23:16 */ + XMT_RING1_LIMIT = (0xFF << 8), /* 15:8 */ + XMT_RING0_LIMIT = (0xFF << 0), /* 7:0 */ + +}XMT_RING_LIMIT_BITS; + +typedef enum { + + AP_REG0_EN = (1 << 15), + AP_REG0_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */ + AP_PHY0_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */ + +}AUTOPOLL0_BITS; + +/* AUTOPOLL1 0x8A, 16bit register */ +typedef enum { + + AP_REG1_EN = (1 << 15), + AP_REG1_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */ + AP_PRE_SUP1 = (1 << 6), + AP_PHY1_DFLT = (1 << 5), + AP_PHY1_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */ + +}AUTOPOLL1_BITS; + + +typedef enum { + + AP_REG2_EN = (1 << 15), + AP_REG2_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */ + AP_PRE_SUP2 = (1 << 6), + AP_PHY2_DFLT = (1 << 5), + AP_PHY2_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */ + +}AUTOPOLL2_BITS; + +typedef enum { + + AP_REG3_EN = (1 << 15), + AP_REG3_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */ + AP_PRE_SUP3 = (1 << 6), + AP_PHY3_DFLT = (1 << 5), + AP_PHY3_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */ + +}AUTOPOLL3_BITS; + + +typedef enum { + + AP_REG4_EN = (1 << 15), + AP_REG4_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */ + AP_PRE_SUP4 = (1 << 6), + AP_PHY4_DFLT = (1 << 5), + AP_PHY4_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */ + +}AUTOPOLL4_BITS; + + +typedef enum { + + AP_REG5_EN = (1 << 15), + AP_REG5_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */ + AP_PRE_SUP5 = (1 << 6), + AP_PHY5_DFLT = (1 << 5), + AP_PHY5_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */ + +}AUTOPOLL5_BITS; + + + + +/* AP_VALUE 0x98, 32bit ragister */ +typedef enum { + + AP_VAL_ACTIVE = (1 << 31), + AP_VAL_RD_CMD = ( 1 << 29), + AP_ADDR = (1 << 18)|(1 << 17)|(1 << 16), /* 18:16 */ + AP_VAL = (0xF << 0) | (0xF << 4) |( 0xF << 8) | + (0xF << 12), /* 15:0 */ + +}AP_VALUE_BITS; + +typedef enum { + + DLY_INT_A_R3 = (1 << 31), + DLY_INT_A_R2 = (1 << 30), + DLY_INT_A_R1 = (1 << 29), + DLY_INT_A_R0 = (1 << 28), + DLY_INT_A_T3 = (1 << 27), + DLY_INT_A_T2 = (1 << 26), + DLY_INT_A_T1 = (1 << 25), + DLY_INT_A_T0 = ( 1 << 24), + EVENT_COUNT_A = (0xF << 16) | (0x1 << 20),/* 20:16 */ + MAX_DELAY_TIME_A = (0xF << 0) | (0xF << 4) | (1 << 8)| + (1 << 9) | (1 << 10), /* 10:0 */ + +}DLY_INT_A_BITS; + +typedef enum { + + DLY_INT_B_R3 = (1 << 31), + DLY_INT_B_R2 = (1 << 30), + DLY_INT_B_R1 = (1 << 29), + DLY_INT_B_R0 = (1 << 28), + DLY_INT_B_T3 = (1 << 27), + DLY_INT_B_T2 = (1 << 26), + DLY_INT_B_T1 = (1 << 25), + DLY_INT_B_T0 = ( 1 << 24), + EVENT_COUNT_B = (0xF << 16) | (0x1 << 20),/* 20:16 */ + MAX_DELAY_TIME_B = (0xF << 0) | (0xF << 4) | (1 << 8)| + (1 << 9) | (1 << 10), /* 10:0 */ +}DLY_INT_B_BITS; + + +/* FLOW_CONTROL 0xC8, 32bit register */ +typedef enum { + + PAUSE_LEN_CHG = (1 << 30), + FTPE = (1 << 22), + FRPE = (1 << 21), + NAPA = (1 << 20), + NPA = (1 << 19), + FIXP = ( 1 << 18), + FCCMD = ( 1 << 16), + PAUSE_LEN = (0xF << 0) | (0xF << 4) |( 0xF << 8) | (0xF << 12), /* 15:0 */ + +}FLOW_CONTROL_BITS; + +/* PHY_ ACCESS 0xD0, 32bit register */ +typedef enum { + + PHY_CMD_ACTIVE = (1 << 31), + PHY_WR_CMD = (1 << 30), + PHY_RD_CMD = (1 << 29), + PHY_RD_ERR = (1 << 28), + PHY_PRE_SUP = (1 << 27), + PHY_ADDR = (1 << 21) | (1 << 22) | (1 << 23)| + (1 << 24) |(1 << 25),/* 25:21 */ + PHY_REG_ADDR = (1 << 16) | (1 << 17) | (1 << 18)| (1 << 19) | (1 << 20),/* 20:16 */ + PHY_DATA = (0xF << 0)|(0xF << 4) |(0xF << 8)| + (0xF << 12),/* 15:0 */ + +}PHY_ACCESS_BITS; + + +/* PMAT0 0x190, 32bit register */ +typedef enum { + PMR_ACTIVE = (1 << 31), + PMR_WR_CMD = (1 << 30), + PMR_RD_CMD = (1 << 29), + PMR_BANK = (1 <<28), + PMR_ADDR = (0xF << 16)|(1 << 20)|(1 << 21)| + (1 << 22),/* 22:16 */ + PMR_B4 = (0xF << 0) | (0xF << 4),/* 15:0 */ +}PMAT0_BITS; + + +/* PMAT1 0x194, 32bit register */ +typedef enum { + PMR_B3 = (0xF << 24) | (0xF <<28),/* 31:24 */ + PMR_B2 = (0xF << 16) |(0xF << 20),/* 23:16 */ + PMR_B1 = (0xF << 8) | (0xF <<12), /* 15:8 */ + PMR_B0 = (0xF << 0)|(0xF << 4),/* 7:0 */ +}PMAT1_BITS; + +/************************************************************************/ +/* */ +/* MIB counter definitions */ +/* */ +/************************************************************************/ + +#define rcv_miss_pkts 0x00 +#define rcv_octets 0x01 +#define rcv_broadcast_pkts 0x02 +#define rcv_multicast_pkts 0x03 +#define rcv_undersize_pkts 0x04 +#define rcv_oversize_pkts 0x05 +#define rcv_fragments 0x06 +#define rcv_jabbers 0x07 +#define rcv_unicast_pkts 0x08 +#define rcv_alignment_errors 0x09 +#define rcv_fcs_errors 0x0A +#define rcv_good_octets 0x0B +#define rcv_mac_ctrl 0x0C +#define rcv_flow_ctrl 0x0D +#define rcv_pkts_64_octets 0x0E +#define rcv_pkts_65to127_octets 0x0F +#define rcv_pkts_128to255_octets 0x10 +#define rcv_pkts_256to511_octets 0x11 +#define rcv_pkts_512to1023_octets 0x12 +#define rcv_pkts_1024to1518_octets 0x13 +#define rcv_unsupported_opcode 0x14 +#define rcv_symbol_errors 0x15 +#define rcv_drop_pkts_ring1 0x16 +#define rcv_drop_pkts_ring2 0x17 +#define rcv_drop_pkts_ring3 0x18 +#define rcv_drop_pkts_ring4 0x19 +#define rcv_jumbo_pkts 0x1A + +#define xmt_underrun_pkts 0x20 +#define xmt_octets 0x21 +#define xmt_packets 0x22 +#define xmt_broadcast_pkts 0x23 +#define xmt_multicast_pkts 0x24 +#define xmt_collisions 0x25 +#define xmt_unicast_pkts 0x26 +#define xmt_one_collision 0x27 +#define xmt_multiple_collision 0x28 +#define xmt_deferred_transmit 0x29 +#define xmt_late_collision 0x2A +#define xmt_excessive_defer 0x2B +#define xmt_loss_carrier 0x2C +#define xmt_excessive_collision 0x2D +#define xmt_back_pressure 0x2E +#define xmt_flow_ctrl 0x2F +#define xmt_pkts_64_octets 0x30 +#define xmt_pkts_65to127_octets 0x31 +#define xmt_pkts_128to255_octets 0x32 +#define xmt_pkts_256to511_octets 0x33 +#define xmt_pkts_512to1023_octets 0x34 +#define xmt_pkts_1024to1518_octet 0x35 +#define xmt_oversize_pkts 0x36 +#define xmt_jumbo_pkts 0x37 + + +/* Driver definitions */ + +#define PCI_VENDOR_ID_AMD 0x1022 +#define PCI_DEVICE_ID_AMD8111E_7462 0x7462 + +#define MAX_UNITS 8 /* Maximum number of devices possible */ + +#define NUM_TX_BUFFERS 32 /* Number of transmit buffers */ +#define NUM_RX_BUFFERS 32 /* Number of receive buffers */ + +#define TX_BUFF_MOD_MASK 31 /* (NUM_TX_BUFFERS -1) */ +#define RX_BUFF_MOD_MASK 31 /* (NUM_RX_BUFFERS -1) */ + +#define NUM_TX_RING_DR 32 +#define NUM_RX_RING_DR 32 + +#define TX_RING_DR_MOD_MASK 31 /* (NUM_TX_RING_DR -1) */ +#define RX_RING_DR_MOD_MASK 31 /* (NUM_RX_RING_DR -1) */ + +#define MAX_FILTER_SIZE 64 /* Maximum multicast address */ +#define AMD8111E_MIN_MTU 60 +#define AMD8111E_MAX_MTU 9000 + +#define PKT_BUFF_SZ 1536 +#define MIN_PKT_LEN 60 +#define ETH_ADDR_LEN 6 + +#define AMD8111E_TX_TIMEOUT (3 * HZ)/* 3 sec */ +#define SOFT_TIMER_FREQ 0xBEBC /* 0.5 sec */ +#define DELAY_TIMER_CONV 50 /* msec to 10 usec conversion. + Only 500 usec resolution */ +#define OPTION_VLAN_ENABLE 0x0001 +#define OPTION_JUMBO_ENABLE 0x0002 +#define OPTION_MULTICAST_ENABLE 0x0004 +#define OPTION_WOL_ENABLE 0x0008 +#define OPTION_WAKE_MAGIC_ENABLE 0x0010 +#define OPTION_WAKE_PHY_ENABLE 0x0020 +#define OPTION_INTR_COAL_ENABLE 0x0040 +#define OPTION_DYN_IPG_ENABLE 0x0080 + +#define PHY_REG_ADDR_MASK 0x1f + +/* ipg parameters */ +#define DEFAULT_IPG 0x60 +#define IFS1_DELTA 36 +#define IPG_CONVERGE_JIFFIES (HZ/2) +#define IPG_STABLE_TIME 5 +#define MIN_IPG 96 +#define MAX_IPG 255 +#define IPG_STEP 16 +#define CSTATE 1 +#define SSTATE 2 + +/* Assume contoller gets data 10 times the maximum processing time */ +#define REPEAT_CNT 10 + +/* amd8111e decriptor flag definitions */ +typedef enum { + + OWN_BIT = (1 << 15), + ADD_FCS_BIT = (1 << 13), + LTINT_BIT = (1 << 12), + STP_BIT = (1 << 9), + ENP_BIT = (1 << 8), + KILL_BIT = (1 << 6), + TCC_VLAN_INSERT = (1 << 1), + TCC_VLAN_REPLACE = (1 << 1) |( 1<< 0), + +}TX_FLAG_BITS; + +typedef enum { + ERR_BIT = (1 << 14), + FRAM_BIT = (1 << 13), + OFLO_BIT = (1 << 12), + CRC_BIT = (1 << 11), + PAM_BIT = (1 << 6), + LAFM_BIT = (1 << 5), + BAM_BIT = (1 << 4), + TT_VLAN_TAGGED = (1 << 3) |(1 << 2),/* 0x000 */ + TT_PRTY_TAGGED = (1 << 3),/* 0x0008 */ + +}RX_FLAG_BITS; + +#define RESET_RX_FLAGS 0x0000 +#define TT_MASK 0x000c +#define TCC_MASK 0x0003 + +/* driver ioctl parameters */ +#define AMD8111E_REG_DUMP_LEN 13*sizeof(u32) + +/* amd8111e desriptor format */ + +struct amd8111e_tx_dr{ + + __le16 buff_count; /* Size of the buffer pointed by this descriptor */ + + __le16 tx_flags; + + __le16 tag_ctrl_info; + + __le16 tag_ctrl_cmd; + + __le32 buff_phy_addr; + + __le32 reserved; +}; + +struct amd8111e_rx_dr{ + + __le32 reserved; + + __le16 msg_count; /* Received message len */ + + __le16 tag_ctrl_info; + + __le16 buff_count; /* Len of the buffer pointed by descriptor. */ + + __le16 rx_flags; + + __le32 buff_phy_addr; + +}; +struct amd8111e_link_config{ + +#define SPEED_INVALID 0xffff +#define DUPLEX_INVALID 0xff +#define AUTONEG_INVALID 0xff + + unsigned long orig_phy_option; + u16 speed; + u8 duplex; + u8 autoneg; + u8 reserved; /* 32bit alignment */ +}; + +enum coal_type{ + + NO_COALESCE, + LOW_COALESCE, + MEDIUM_COALESCE, + HIGH_COALESCE, + +}; + +enum coal_mode{ + RX_INTR_COAL, + TX_INTR_COAL, + DISABLE_COAL, + ENABLE_COAL, + +}; +#define MAX_TIMEOUT 40 +#define MAX_EVENT_COUNT 31 +struct amd8111e_coalesce_conf{ + + unsigned int rx_timeout; + unsigned int rx_event_count; + unsigned long rx_packets; + unsigned long rx_prev_packets; + unsigned long rx_bytes; + unsigned long rx_prev_bytes; + unsigned int rx_coal_type; + + unsigned int tx_timeout; + unsigned int tx_event_count; + unsigned long tx_packets; + unsigned long tx_prev_packets; + unsigned long tx_bytes; + unsigned long tx_prev_bytes; + unsigned int tx_coal_type; + +}; +struct ipg_info{ + + unsigned int ipg_state; + unsigned int ipg; + unsigned int current_ipg; + unsigned int col_cnt; + unsigned int diff_col_cnt; + unsigned int timer_tick; + unsigned int prev_ipg; + struct timer_list ipg_timer; +}; + +struct amd8111e_priv{ + + struct amd8111e_tx_dr* tx_ring; + struct amd8111e_rx_dr* rx_ring; + dma_addr_t tx_ring_dma_addr; /* tx descriptor ring base address */ + dma_addr_t rx_ring_dma_addr; /* rx descriptor ring base address */ + const char *name; + struct pci_dev *pci_dev; /* Ptr to the associated pci_dev */ + struct net_device* amd8111e_net_dev; /* ptr to associated net_device */ + /* Transmit and recive skbs */ + struct sk_buff *tx_skbuff[NUM_TX_BUFFERS]; + struct sk_buff *rx_skbuff[NUM_RX_BUFFERS]; + /* Transmit and receive dma mapped addr */ + dma_addr_t tx_dma_addr[NUM_TX_BUFFERS]; + dma_addr_t rx_dma_addr[NUM_RX_BUFFERS]; + /* Reg memory mapped address */ + void __iomem *mmio; + + struct napi_struct napi; + + spinlock_t lock; /* Guard lock */ + unsigned long rx_idx, tx_idx; /* The next free ring entry */ + unsigned long tx_complete_idx; + unsigned long tx_ring_complete_idx; + unsigned long tx_ring_idx; + unsigned int rx_buff_len; /* Buffer length of rx buffers */ + int options; /* Options enabled/disabled for the device */ + + unsigned long ext_phy_option; + int ext_phy_addr; + u32 ext_phy_id; + + struct amd8111e_link_config link_config; + int pm_cap; + + struct net_device *next; + int mii; + struct mii_if_info mii_if; + char opened; + unsigned int drv_rx_errors; + struct amd8111e_coalesce_conf coal_conf; + + struct ipg_info ipg_data; + +}; + +/* kernel provided writeq does not write 64 bits into the amd8111e device register instead writes only higher 32bits data into lower 32bits of the register. +BUG? */ +#define amd8111e_writeq(_UlData,_memMap) \ + writel(*(u32*)(&_UlData), _memMap); \ + writel(*(u32*)((u8*)(&_UlData)+4), _memMap+4) + +/* maps the external speed options to internal value */ +typedef enum { + SPEED_AUTONEG, + SPEED10_HALF, + SPEED10_FULL, + SPEED100_HALF, + SPEED100_FULL, +}EXT_PHY_OPTION; + +static int card_idx; +static int speed_duplex[MAX_UNITS] = { 0, }; +static int coalesce[MAX_UNITS] = {1,1,1,1,1,1,1,1}; +static int dynamic_ipg[MAX_UNITS] = {0,0,0,0,0,0,0,0}; +static unsigned int chip_version; + +#endif /* _AMD8111E_H */ + diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c new file mode 100644 index 0000000..7ed78f4 --- /dev/null +++ b/drivers/net/ethernet/amd/ariadne.c @@ -0,0 +1,793 @@ +/* + * Amiga Linux/m68k Ariadne Ethernet Driver + * + * © Copyright 1995-2003 by Geert Uytterhoeven (geert@linux-m68k.org) + * Peter De Schrijver (p2@mind.be) + * + * --------------------------------------------------------------------------- + * + * This program is based on + * + * lance.c: An AMD LANCE ethernet driver for linux. + * Written 1993-94 by Donald Becker. + * + * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller + * Advanced Micro Devices + * Publication #16907, Rev. B, Amendment/0, May 1994 + * + * MC68230: Parallel Interface/Timer (PI/T) + * Motorola Semiconductors, December, 1983 + * + * --------------------------------------------------------------------------- + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of the Linux + * distribution for more details. + * + * --------------------------------------------------------------------------- + * + * The Ariadne is a Zorro-II board made by Village Tronic. It contains: + * + * - an Am79C960 PCnet-ISA Single-Chip Ethernet Controller with both + * 10BASE-2 (thin coax) and 10BASE-T (UTP) connectors + * + * - an MC68230 Parallel Interface/Timer configured as 2 parallel ports + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +/*#define DEBUG*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "ariadne.h" + +#ifdef ARIADNE_DEBUG +int ariadne_debug = ARIADNE_DEBUG; +#else +int ariadne_debug = 1; +#endif + +/* Macros to Fix Endianness problems */ + +/* Swap the Bytes in a WORD */ +#define swapw(x) (((x >> 8) & 0x00ff) | ((x << 8) & 0xff00)) +/* Get the Low BYTE in a WORD */ +#define lowb(x) (x & 0xff) +/* Get the Swapped High WORD in a LONG */ +#define swhighw(x) ((((x) >> 8) & 0xff00) | (((x) >> 24) & 0x00ff)) +/* Get the Swapped Low WORD in a LONG */ +#define swloww(x) ((((x) << 8) & 0xff00) | (((x) >> 8) & 0x00ff)) + +/* Transmit/Receive Ring Definitions */ + +#define TX_RING_SIZE 5 +#define RX_RING_SIZE 16 + +#define PKT_BUF_SIZE 1520 + +/* Private Device Data */ + +struct ariadne_private { + volatile struct TDRE *tx_ring[TX_RING_SIZE]; + volatile struct RDRE *rx_ring[RX_RING_SIZE]; + volatile u_short *tx_buff[TX_RING_SIZE]; + volatile u_short *rx_buff[RX_RING_SIZE]; + int cur_tx, cur_rx; /* The next free ring entry */ + int dirty_tx; /* The ring entries to be free()ed */ + char tx_full; +}; + +/* Structure Created in the Ariadne's RAM Buffer */ + +struct lancedata { + struct TDRE tx_ring[TX_RING_SIZE]; + struct RDRE rx_ring[RX_RING_SIZE]; + u_short tx_buff[TX_RING_SIZE][PKT_BUF_SIZE / sizeof(u_short)]; + u_short rx_buff[RX_RING_SIZE][PKT_BUF_SIZE / sizeof(u_short)]; +}; + +static void memcpyw(volatile u_short *dest, u_short *src, int len) +{ + while (len >= 2) { + *(dest++) = *(src++); + len -= 2; + } + if (len == 1) + *dest = (*(u_char *)src) << 8; +} + +static void ariadne_init_ring(struct net_device *dev) +{ + struct ariadne_private *priv = netdev_priv(dev); + volatile struct lancedata *lancedata = (struct lancedata *)dev->mem_start; + int i; + + netif_stop_queue(dev); + + priv->tx_full = 0; + priv->cur_rx = priv->cur_tx = 0; + priv->dirty_tx = 0; + + /* Set up TX Ring */ + for (i = 0; i < TX_RING_SIZE; i++) { + volatile struct TDRE *t = &lancedata->tx_ring[i]; + t->TMD0 = swloww(ARIADNE_RAM + + offsetof(struct lancedata, tx_buff[i])); + t->TMD1 = swhighw(ARIADNE_RAM + + offsetof(struct lancedata, tx_buff[i])) | + TF_STP | TF_ENP; + t->TMD2 = swapw((u_short)-PKT_BUF_SIZE); + t->TMD3 = 0; + priv->tx_ring[i] = &lancedata->tx_ring[i]; + priv->tx_buff[i] = lancedata->tx_buff[i]; + netdev_dbg(dev, "TX Entry %2d at %p, Buf at %p\n", + i, &lancedata->tx_ring[i], lancedata->tx_buff[i]); + } + + /* Set up RX Ring */ + for (i = 0; i < RX_RING_SIZE; i++) { + volatile struct RDRE *r = &lancedata->rx_ring[i]; + r->RMD0 = swloww(ARIADNE_RAM + + offsetof(struct lancedata, rx_buff[i])); + r->RMD1 = swhighw(ARIADNE_RAM + + offsetof(struct lancedata, rx_buff[i])) | + RF_OWN; + r->RMD2 = swapw((u_short)-PKT_BUF_SIZE); + r->RMD3 = 0x0000; + priv->rx_ring[i] = &lancedata->rx_ring[i]; + priv->rx_buff[i] = lancedata->rx_buff[i]; + netdev_dbg(dev, "RX Entry %2d at %p, Buf at %p\n", + i, &lancedata->rx_ring[i], lancedata->rx_buff[i]); + } +} + +static int ariadne_rx(struct net_device *dev) +{ + struct ariadne_private *priv = netdev_priv(dev); + int entry = priv->cur_rx % RX_RING_SIZE; + int i; + + /* If we own the next entry, it's a new packet. Send it up */ + while (!(lowb(priv->rx_ring[entry]->RMD1) & RF_OWN)) { + int status = lowb(priv->rx_ring[entry]->RMD1); + + if (status != (RF_STP | RF_ENP)) { /* There was an error */ + /* There is a tricky error noted by + * John Murphy to Russ Nelson: + * Even with full-sized buffers it's possible for a + * jabber packet to use two buffers, with only the + * last correctly noting the error + */ + /* Only count a general error at the end of a packet */ + if (status & RF_ENP) + dev->stats.rx_errors++; + if (status & RF_FRAM) + dev->stats.rx_frame_errors++; + if (status & RF_OFLO) + dev->stats.rx_over_errors++; + if (status & RF_CRC) + dev->stats.rx_crc_errors++; + if (status & RF_BUFF) + dev->stats.rx_fifo_errors++; + priv->rx_ring[entry]->RMD1 &= 0xff00 | RF_STP | RF_ENP; + } else { + /* Malloc up new buffer, compatible with net-3 */ + short pkt_len = swapw(priv->rx_ring[entry]->RMD3); + struct sk_buff *skb; + + skb = dev_alloc_skb(pkt_len + 2); + if (skb == NULL) { + netdev_warn(dev, "Memory squeeze, deferring packet\n"); + for (i = 0; i < RX_RING_SIZE; i++) + if (lowb(priv->rx_ring[(entry + i) % RX_RING_SIZE]->RMD1) & RF_OWN) + break; + + if (i > RX_RING_SIZE - 2) { + dev->stats.rx_dropped++; + priv->rx_ring[entry]->RMD1 |= RF_OWN; + priv->cur_rx++; + } + break; + } + + + skb_reserve(skb, 2); /* 16 byte align */ + skb_put(skb, pkt_len); /* Make room */ + skb_copy_to_linear_data(skb, + (const void *)priv->rx_buff[entry], + pkt_len); + skb->protocol = eth_type_trans(skb, dev); + netdev_dbg(dev, "RX pkt type 0x%04x from %pM to %pM data 0x%08x len %d\n", + ((u_short *)skb->data)[6], + skb->data + 6, skb->data, + (int)skb->data, (int)skb->len); + + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + } + + priv->rx_ring[entry]->RMD1 |= RF_OWN; + entry = (++priv->cur_rx) % RX_RING_SIZE; + } + + priv->cur_rx = priv->cur_rx % RX_RING_SIZE; + + /* We should check that at least two ring entries are free. + * If not, we should free one and mark stats->rx_dropped++ + */ + + return 0; +} + +static irqreturn_t ariadne_interrupt(int irq, void *data) +{ + struct net_device *dev = (struct net_device *)data; + volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr; + struct ariadne_private *priv; + int csr0, boguscnt; + int handled = 0; + + lance->RAP = CSR0; /* PCnet-ISA Controller Status */ + + if (!(lance->RDP & INTR)) /* Check if any interrupt has been */ + return IRQ_NONE; /* generated by the board */ + + priv = netdev_priv(dev); + + boguscnt = 10; + while ((csr0 = lance->RDP) & (ERR | RINT | TINT) && --boguscnt >= 0) { + /* Acknowledge all of the current interrupt sources ASAP */ + lance->RDP = csr0 & ~(INEA | TDMD | STOP | STRT | INIT); + +#ifdef DEBUG + if (ariadne_debug > 5) { + netdev_dbg(dev, "interrupt csr0=%#02x new csr=%#02x [", + csr0, lance->RDP); + if (csr0 & INTR) + pr_cont(" INTR"); + if (csr0 & INEA) + pr_cont(" INEA"); + if (csr0 & RXON) + pr_cont(" RXON"); + if (csr0 & TXON) + pr_cont(" TXON"); + if (csr0 & TDMD) + pr_cont(" TDMD"); + if (csr0 & STOP) + pr_cont(" STOP"); + if (csr0 & STRT) + pr_cont(" STRT"); + if (csr0 & INIT) + pr_cont(" INIT"); + if (csr0 & ERR) + pr_cont(" ERR"); + if (csr0 & BABL) + pr_cont(" BABL"); + if (csr0 & CERR) + pr_cont(" CERR"); + if (csr0 & MISS) + pr_cont(" MISS"); + if (csr0 & MERR) + pr_cont(" MERR"); + if (csr0 & RINT) + pr_cont(" RINT"); + if (csr0 & TINT) + pr_cont(" TINT"); + if (csr0 & IDON) + pr_cont(" IDON"); + pr_cont(" ]\n"); + } +#endif + + if (csr0 & RINT) { /* Rx interrupt */ + handled = 1; + ariadne_rx(dev); + } + + if (csr0 & TINT) { /* Tx-done interrupt */ + int dirty_tx = priv->dirty_tx; + + handled = 1; + while (dirty_tx < priv->cur_tx) { + int entry = dirty_tx % TX_RING_SIZE; + int status = lowb(priv->tx_ring[entry]->TMD1); + + if (status & TF_OWN) + break; /* It still hasn't been Txed */ + + priv->tx_ring[entry]->TMD1 &= 0xff00; + + if (status & TF_ERR) { + /* There was an major error, log it */ + int err_status = priv->tx_ring[entry]->TMD3; + dev->stats.tx_errors++; + if (err_status & EF_RTRY) + dev->stats.tx_aborted_errors++; + if (err_status & EF_LCAR) + dev->stats.tx_carrier_errors++; + if (err_status & EF_LCOL) + dev->stats.tx_window_errors++; + if (err_status & EF_UFLO) { + /* Ackk! On FIFO errors the Tx unit is turned off! */ + dev->stats.tx_fifo_errors++; + /* Remove this verbosity later! */ + netdev_err(dev, "Tx FIFO error! Status %04x\n", + csr0); + /* Restart the chip */ + lance->RDP = STRT; + } + } else { + if (status & (TF_MORE | TF_ONE)) + dev->stats.collisions++; + dev->stats.tx_packets++; + } + dirty_tx++; + } + +#ifndef final_version + if (priv->cur_tx - dirty_tx >= TX_RING_SIZE) { + netdev_err(dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n", + dirty_tx, priv->cur_tx, + priv->tx_full); + dirty_tx += TX_RING_SIZE; + } +#endif + + if (priv->tx_full && netif_queue_stopped(dev) && + dirty_tx > priv->cur_tx - TX_RING_SIZE + 2) { + /* The ring is no longer full */ + priv->tx_full = 0; + netif_wake_queue(dev); + } + + priv->dirty_tx = dirty_tx; + } + + /* Log misc errors */ + if (csr0 & BABL) { + handled = 1; + dev->stats.tx_errors++; /* Tx babble */ + } + if (csr0 & MISS) { + handled = 1; + dev->stats.rx_errors++; /* Missed a Rx frame */ + } + if (csr0 & MERR) { + handled = 1; + netdev_err(dev, "Bus master arbitration failure, status %04x\n", + csr0); + /* Restart the chip */ + lance->RDP = STRT; + } + } + + /* Clear any other interrupt, and set interrupt enable */ + lance->RAP = CSR0; /* PCnet-ISA Controller Status */ + lance->RDP = INEA | BABL | CERR | MISS | MERR | IDON; + + if (ariadne_debug > 4) + netdev_dbg(dev, "exiting interrupt, csr%d=%#04x\n", + lance->RAP, lance->RDP); + + return IRQ_RETVAL(handled); +} + +static int ariadne_open(struct net_device *dev) +{ + volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr; + u_short in; + u_long version; + int i; + + /* Reset the LANCE */ + in = lance->Reset; + + /* Stop the LANCE */ + lance->RAP = CSR0; /* PCnet-ISA Controller Status */ + lance->RDP = STOP; + + /* Check the LANCE version */ + lance->RAP = CSR88; /* Chip ID */ + version = swapw(lance->RDP); + lance->RAP = CSR89; /* Chip ID */ + version |= swapw(lance->RDP) << 16; + if ((version & 0x00000fff) != 0x00000003) { + pr_warn("Couldn't find AMD Ethernet Chip\n"); + return -EAGAIN; + } + if ((version & 0x0ffff000) != 0x00003000) { + pr_warn("Couldn't find Am79C960 (Wrong part number = %ld)\n", + (version & 0x0ffff000) >> 12); + return -EAGAIN; + } + + netdev_dbg(dev, "Am79C960 (PCnet-ISA) Revision %ld\n", + (version & 0xf0000000) >> 28); + + ariadne_init_ring(dev); + + /* Miscellaneous Stuff */ + lance->RAP = CSR3; /* Interrupt Masks and Deferral Control */ + lance->RDP = 0x0000; + lance->RAP = CSR4; /* Test and Features Control */ + lance->RDP = DPOLL | APAD_XMT | MFCOM | RCVCCOM | TXSTRTM | JABM; + + /* Set the Multicast Table */ + lance->RAP = CSR8; /* Logical Address Filter, LADRF[15:0] */ + lance->RDP = 0x0000; + lance->RAP = CSR9; /* Logical Address Filter, LADRF[31:16] */ + lance->RDP = 0x0000; + lance->RAP = CSR10; /* Logical Address Filter, LADRF[47:32] */ + lance->RDP = 0x0000; + lance->RAP = CSR11; /* Logical Address Filter, LADRF[63:48] */ + lance->RDP = 0x0000; + + /* Set the Ethernet Hardware Address */ + lance->RAP = CSR12; /* Physical Address Register, PADR[15:0] */ + lance->RDP = ((u_short *)&dev->dev_addr[0])[0]; + lance->RAP = CSR13; /* Physical Address Register, PADR[31:16] */ + lance->RDP = ((u_short *)&dev->dev_addr[0])[1]; + lance->RAP = CSR14; /* Physical Address Register, PADR[47:32] */ + lance->RDP = ((u_short *)&dev->dev_addr[0])[2]; + + /* Set the Init Block Mode */ + lance->RAP = CSR15; /* Mode Register */ + lance->RDP = 0x0000; + + /* Set the Transmit Descriptor Ring Pointer */ + lance->RAP = CSR30; /* Base Address of Transmit Ring */ + lance->RDP = swloww(ARIADNE_RAM + offsetof(struct lancedata, tx_ring)); + lance->RAP = CSR31; /* Base Address of transmit Ring */ + lance->RDP = swhighw(ARIADNE_RAM + offsetof(struct lancedata, tx_ring)); + + /* Set the Receive Descriptor Ring Pointer */ + lance->RAP = CSR24; /* Base Address of Receive Ring */ + lance->RDP = swloww(ARIADNE_RAM + offsetof(struct lancedata, rx_ring)); + lance->RAP = CSR25; /* Base Address of Receive Ring */ + lance->RDP = swhighw(ARIADNE_RAM + offsetof(struct lancedata, rx_ring)); + + /* Set the Number of RX and TX Ring Entries */ + lance->RAP = CSR76; /* Receive Ring Length */ + lance->RDP = swapw(((u_short)-RX_RING_SIZE)); + lance->RAP = CSR78; /* Transmit Ring Length */ + lance->RDP = swapw(((u_short)-TX_RING_SIZE)); + + /* Enable Media Interface Port Auto Select (10BASE-2/10BASE-T) */ + lance->RAP = ISACSR2; /* Miscellaneous Configuration */ + lance->IDP = ASEL; + + /* LED Control */ + lance->RAP = ISACSR5; /* LED1 Status */ + lance->IDP = PSE|XMTE; + lance->RAP = ISACSR6; /* LED2 Status */ + lance->IDP = PSE|COLE; + lance->RAP = ISACSR7; /* LED3 Status */ + lance->IDP = PSE|RCVE; + + netif_start_queue(dev); + + i = request_irq(IRQ_AMIGA_PORTS, ariadne_interrupt, IRQF_SHARED, + dev->name, dev); + if (i) + return i; + + lance->RAP = CSR0; /* PCnet-ISA Controller Status */ + lance->RDP = INEA | STRT; + + return 0; +} + +static int ariadne_close(struct net_device *dev) +{ + volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr; + + netif_stop_queue(dev); + + lance->RAP = CSR112; /* Missed Frame Count */ + dev->stats.rx_missed_errors = swapw(lance->RDP); + lance->RAP = CSR0; /* PCnet-ISA Controller Status */ + + if (ariadne_debug > 1) { + netdev_dbg(dev, "Shutting down ethercard, status was %02x\n", + lance->RDP); + netdev_dbg(dev, "%lu packets missed\n", + dev->stats.rx_missed_errors); + } + + /* We stop the LANCE here -- it occasionally polls memory if we don't */ + lance->RDP = STOP; + + free_irq(IRQ_AMIGA_PORTS, dev); + + return 0; +} + +static inline void ariadne_reset(struct net_device *dev) +{ + volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr; + + lance->RAP = CSR0; /* PCnet-ISA Controller Status */ + lance->RDP = STOP; + ariadne_init_ring(dev); + lance->RDP = INEA | STRT; + netif_start_queue(dev); +} + +static void ariadne_tx_timeout(struct net_device *dev) +{ + volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr; + + netdev_err(dev, "transmit timed out, status %04x, resetting\n", + lance->RDP); + ariadne_reset(dev); + netif_wake_queue(dev); +} + +static netdev_tx_t ariadne_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct ariadne_private *priv = netdev_priv(dev); + volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr; + int entry; + unsigned long flags; + int len = skb->len; + +#if 0 + if (ariadne_debug > 3) { + lance->RAP = CSR0; /* PCnet-ISA Controller Status */ + netdev_dbg(dev, "%s: csr0 %04x\n", __func__, lance->RDP); + lance->RDP = 0x0000; + } +#endif + + /* FIXME: is the 79C960 new enough to do its own padding right ? */ + if (skb->len < ETH_ZLEN) { + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + len = ETH_ZLEN; + } + + /* Fill in a Tx ring entry */ + + netdev_dbg(dev, "TX pkt type 0x%04x from %pM to %pM data 0x%08x len %d\n", + ((u_short *)skb->data)[6], + skb->data + 6, skb->data, + (int)skb->data, (int)skb->len); + + local_irq_save(flags); + + entry = priv->cur_tx % TX_RING_SIZE; + + /* Caution: the write order is important here, set the base address with + the "ownership" bits last */ + + priv->tx_ring[entry]->TMD2 = swapw((u_short)-skb->len); + priv->tx_ring[entry]->TMD3 = 0x0000; + memcpyw(priv->tx_buff[entry], (u_short *)skb->data, len); + +#ifdef DEBUG + print_hex_dump(KERN_DEBUG, "tx_buff: ", DUMP_PREFIX_OFFSET, 16, 1, + (void *)priv->tx_buff[entry], + skb->len > 64 ? 64 : skb->len, true); +#endif + + priv->tx_ring[entry]->TMD1 = (priv->tx_ring[entry]->TMD1 & 0xff00) + | TF_OWN | TF_STP | TF_ENP; + + dev_kfree_skb(skb); + + priv->cur_tx++; + if ((priv->cur_tx >= TX_RING_SIZE) && + (priv->dirty_tx >= TX_RING_SIZE)) { + + netdev_dbg(dev, "*** Subtracting TX_RING_SIZE from cur_tx (%d) and dirty_tx (%d)\n", + priv->cur_tx, priv->dirty_tx); + + priv->cur_tx -= TX_RING_SIZE; + priv->dirty_tx -= TX_RING_SIZE; + } + dev->stats.tx_bytes += len; + + /* Trigger an immediate send poll */ + lance->RAP = CSR0; /* PCnet-ISA Controller Status */ + lance->RDP = INEA | TDMD; + + if (lowb(priv->tx_ring[(entry + 1) % TX_RING_SIZE]->TMD1) != 0) { + netif_stop_queue(dev); + priv->tx_full = 1; + } + local_irq_restore(flags); + + return NETDEV_TX_OK; +} + +static struct net_device_stats *ariadne_get_stats(struct net_device *dev) +{ + volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr; + short saved_addr; + unsigned long flags; + + local_irq_save(flags); + saved_addr = lance->RAP; + lance->RAP = CSR112; /* Missed Frame Count */ + dev->stats.rx_missed_errors = swapw(lance->RDP); + lance->RAP = saved_addr; + local_irq_restore(flags); + + return &dev->stats; +} + +/* Set or clear the multicast filter for this adaptor. + * num_addrs == -1 Promiscuous mode, receive all packets + * num_addrs == 0 Normal mode, clear multicast list + * num_addrs > 0 Multicast mode, receive normal and MC packets, + * and do best-effort filtering. + */ +static void set_multicast_list(struct net_device *dev) +{ + volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr; + + if (!netif_running(dev)) + return; + + netif_stop_queue(dev); + + /* We take the simple way out and always enable promiscuous mode */ + lance->RAP = CSR0; /* PCnet-ISA Controller Status */ + lance->RDP = STOP; /* Temporarily stop the lance */ + ariadne_init_ring(dev); + + if (dev->flags & IFF_PROMISC) { + lance->RAP = CSR15; /* Mode Register */ + lance->RDP = PROM; /* Set promiscuous mode */ + } else { + short multicast_table[4]; + int num_addrs = netdev_mc_count(dev); + int i; + /* We don't use the multicast table, + * but rely on upper-layer filtering + */ + memset(multicast_table, (num_addrs == 0) ? 0 : -1, + sizeof(multicast_table)); + for (i = 0; i < 4; i++) { + lance->RAP = CSR8 + (i << 8); + /* Logical Address Filter */ + lance->RDP = swapw(multicast_table[i]); + } + lance->RAP = CSR15; /* Mode Register */ + lance->RDP = 0x0000; /* Unset promiscuous mode */ + } + + lance->RAP = CSR0; /* PCnet-ISA Controller Status */ + lance->RDP = INEA | STRT | IDON;/* Resume normal operation */ + + netif_wake_queue(dev); +} + + +static void __devexit ariadne_remove_one(struct zorro_dev *z) +{ + struct net_device *dev = zorro_get_drvdata(z); + + unregister_netdev(dev); + release_mem_region(ZTWO_PADDR(dev->base_addr), sizeof(struct Am79C960)); + release_mem_region(ZTWO_PADDR(dev->mem_start), ARIADNE_RAM_SIZE); + free_netdev(dev); +} + +static struct zorro_device_id ariadne_zorro_tbl[] __devinitdata = { + { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE }, + { 0 } +}; +MODULE_DEVICE_TABLE(zorro, ariadne_zorro_tbl); + +static const struct net_device_ops ariadne_netdev_ops = { + .ndo_open = ariadne_open, + .ndo_stop = ariadne_close, + .ndo_start_xmit = ariadne_start_xmit, + .ndo_tx_timeout = ariadne_tx_timeout, + .ndo_get_stats = ariadne_get_stats, + .ndo_set_multicast_list = set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +}; + +static int __devinit ariadne_init_one(struct zorro_dev *z, + const struct zorro_device_id *ent) +{ + unsigned long board = z->resource.start; + unsigned long base_addr = board + ARIADNE_LANCE; + unsigned long mem_start = board + ARIADNE_RAM; + struct resource *r1, *r2; + struct net_device *dev; + struct ariadne_private *priv; + int err; + + r1 = request_mem_region(base_addr, sizeof(struct Am79C960), "Am79C960"); + if (!r1) + return -EBUSY; + r2 = request_mem_region(mem_start, ARIADNE_RAM_SIZE, "RAM"); + if (!r2) { + release_mem_region(base_addr, sizeof(struct Am79C960)); + return -EBUSY; + } + + dev = alloc_etherdev(sizeof(struct ariadne_private)); + if (dev == NULL) { + release_mem_region(base_addr, sizeof(struct Am79C960)); + release_mem_region(mem_start, ARIADNE_RAM_SIZE); + return -ENOMEM; + } + + priv = netdev_priv(dev); + + r1->name = dev->name; + r2->name = dev->name; + + dev->dev_addr[0] = 0x00; + dev->dev_addr[1] = 0x60; + dev->dev_addr[2] = 0x30; + dev->dev_addr[3] = (z->rom.er_SerialNumber >> 16) & 0xff; + dev->dev_addr[4] = (z->rom.er_SerialNumber >> 8) & 0xff; + dev->dev_addr[5] = z->rom.er_SerialNumber & 0xff; + dev->base_addr = ZTWO_VADDR(base_addr); + dev->mem_start = ZTWO_VADDR(mem_start); + dev->mem_end = dev->mem_start + ARIADNE_RAM_SIZE; + + dev->netdev_ops = &ariadne_netdev_ops; + dev->watchdog_timeo = 5 * HZ; + + err = register_netdev(dev); + if (err) { + release_mem_region(base_addr, sizeof(struct Am79C960)); + release_mem_region(mem_start, ARIADNE_RAM_SIZE); + free_netdev(dev); + return err; + } + zorro_set_drvdata(z, dev); + + netdev_info(dev, "Ariadne at 0x%08lx, Ethernet Address %pM\n", + board, dev->dev_addr); + + return 0; +} + +static struct zorro_driver ariadne_driver = { + .name = "ariadne", + .id_table = ariadne_zorro_tbl, + .probe = ariadne_init_one, + .remove = __devexit_p(ariadne_remove_one), +}; + +static int __init ariadne_init_module(void) +{ + return zorro_register_driver(&ariadne_driver); +} + +static void __exit ariadne_cleanup_module(void) +{ + zorro_unregister_driver(&ariadne_driver); +} + +module_init(ariadne_init_module); +module_exit(ariadne_cleanup_module); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/amd/ariadne.h b/drivers/net/ethernet/amd/ariadne.h new file mode 100644 index 0000000..727be5c --- /dev/null +++ b/drivers/net/ethernet/amd/ariadne.h @@ -0,0 +1,415 @@ +/* + * Amiga Linux/m68k Ariadne Ethernet Driver + * + * © Copyright 1995 by Geert Uytterhoeven (geert@linux-m68k.org) + * Peter De Schrijver + * (Peter.DeSchrijver@linux.cc.kuleuven.ac.be) + * + * ---------------------------------------------------------------------------------- + * + * This program is based on + * + * lance.c: An AMD LANCE ethernet driver for linux. + * Written 1993-94 by Donald Becker. + * + * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller + * Advanced Micro Devices + * Publication #16907, Rev. B, Amendment/0, May 1994 + * + * MC68230: Parallel Interface/Timer (PI/T) + * Motorola Semiconductors, December, 1983 + * + * ---------------------------------------------------------------------------------- + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of the Linux + * distribution for more details. + * + * ---------------------------------------------------------------------------------- + * + * The Ariadne is a Zorro-II board made by Village Tronic. It contains: + * + * - an Am79C960 PCnet-ISA Single-Chip Ethernet Controller with both + * 10BASE-2 (thin coax) and 10BASE-T (UTP) connectors + * + * - an MC68230 Parallel Interface/Timer configured as 2 parallel ports + */ + + + /* + * Am79C960 PCnet-ISA + */ + +struct Am79C960 { + volatile u_short AddressPROM[8]; + /* IEEE Address PROM (Unused in the Ariadne) */ + volatile u_short RDP; /* Register Data Port */ + volatile u_short RAP; /* Register Address Port */ + volatile u_short Reset; /* Reset Chip on Read Access */ + volatile u_short IDP; /* ISACSR Data Port */ +}; + + + /* + * Am79C960 Control and Status Registers + * + * These values are already swap()ed!! + * + * Only registers marked with a `-' are intended for network software + * access + */ + +#define CSR0 0x0000 /* - PCnet-ISA Controller Status */ +#define CSR1 0x0100 /* - IADR[15:0] */ +#define CSR2 0x0200 /* - IADR[23:16] */ +#define CSR3 0x0300 /* - Interrupt Masks and Deferral Control */ +#define CSR4 0x0400 /* - Test and Features Control */ +#define CSR6 0x0600 /* RCV/XMT Descriptor Table Length */ +#define CSR8 0x0800 /* - Logical Address Filter, LADRF[15:0] */ +#define CSR9 0x0900 /* - Logical Address Filter, LADRF[31:16] */ +#define CSR10 0x0a00 /* - Logical Address Filter, LADRF[47:32] */ +#define CSR11 0x0b00 /* - Logical Address Filter, LADRF[63:48] */ +#define CSR12 0x0c00 /* - Physical Address Register, PADR[15:0] */ +#define CSR13 0x0d00 /* - Physical Address Register, PADR[31:16] */ +#define CSR14 0x0e00 /* - Physical Address Register, PADR[47:32] */ +#define CSR15 0x0f00 /* - Mode Register */ +#define CSR16 0x1000 /* Initialization Block Address Lower */ +#define CSR17 0x1100 /* Initialization Block Address Upper */ +#define CSR18 0x1200 /* Current Receive Buffer Address */ +#define CSR19 0x1300 /* Current Receive Buffer Address */ +#define CSR20 0x1400 /* Current Transmit Buffer Address */ +#define CSR21 0x1500 /* Current Transmit Buffer Address */ +#define CSR22 0x1600 /* Next Receive Buffer Address */ +#define CSR23 0x1700 /* Next Receive Buffer Address */ +#define CSR24 0x1800 /* - Base Address of Receive Ring */ +#define CSR25 0x1900 /* - Base Address of Receive Ring */ +#define CSR26 0x1a00 /* Next Receive Descriptor Address */ +#define CSR27 0x1b00 /* Next Receive Descriptor Address */ +#define CSR28 0x1c00 /* Current Receive Descriptor Address */ +#define CSR29 0x1d00 /* Current Receive Descriptor Address */ +#define CSR30 0x1e00 /* - Base Address of Transmit Ring */ +#define CSR31 0x1f00 /* - Base Address of transmit Ring */ +#define CSR32 0x2000 /* Next Transmit Descriptor Address */ +#define CSR33 0x2100 /* Next Transmit Descriptor Address */ +#define CSR34 0x2200 /* Current Transmit Descriptor Address */ +#define CSR35 0x2300 /* Current Transmit Descriptor Address */ +#define CSR36 0x2400 /* Next Next Receive Descriptor Address */ +#define CSR37 0x2500 /* Next Next Receive Descriptor Address */ +#define CSR38 0x2600 /* Next Next Transmit Descriptor Address */ +#define CSR39 0x2700 /* Next Next Transmit Descriptor Address */ +#define CSR40 0x2800 /* Current Receive Status and Byte Count */ +#define CSR41 0x2900 /* Current Receive Status and Byte Count */ +#define CSR42 0x2a00 /* Current Transmit Status and Byte Count */ +#define CSR43 0x2b00 /* Current Transmit Status and Byte Count */ +#define CSR44 0x2c00 /* Next Receive Status and Byte Count */ +#define CSR45 0x2d00 /* Next Receive Status and Byte Count */ +#define CSR46 0x2e00 /* Poll Time Counter */ +#define CSR47 0x2f00 /* Polling Interval */ +#define CSR48 0x3000 /* Temporary Storage */ +#define CSR49 0x3100 /* Temporary Storage */ +#define CSR50 0x3200 /* Temporary Storage */ +#define CSR51 0x3300 /* Temporary Storage */ +#define CSR52 0x3400 /* Temporary Storage */ +#define CSR53 0x3500 /* Temporary Storage */ +#define CSR54 0x3600 /* Temporary Storage */ +#define CSR55 0x3700 /* Temporary Storage */ +#define CSR56 0x3800 /* Temporary Storage */ +#define CSR57 0x3900 /* Temporary Storage */ +#define CSR58 0x3a00 /* Temporary Storage */ +#define CSR59 0x3b00 /* Temporary Storage */ +#define CSR60 0x3c00 /* Previous Transmit Descriptor Address */ +#define CSR61 0x3d00 /* Previous Transmit Descriptor Address */ +#define CSR62 0x3e00 /* Previous Transmit Status and Byte Count */ +#define CSR63 0x3f00 /* Previous Transmit Status and Byte Count */ +#define CSR64 0x4000 /* Next Transmit Buffer Address */ +#define CSR65 0x4100 /* Next Transmit Buffer Address */ +#define CSR66 0x4200 /* Next Transmit Status and Byte Count */ +#define CSR67 0x4300 /* Next Transmit Status and Byte Count */ +#define CSR68 0x4400 /* Transmit Status Temporary Storage */ +#define CSR69 0x4500 /* Transmit Status Temporary Storage */ +#define CSR70 0x4600 /* Temporary Storage */ +#define CSR71 0x4700 /* Temporary Storage */ +#define CSR72 0x4800 /* Receive Ring Counter */ +#define CSR74 0x4a00 /* Transmit Ring Counter */ +#define CSR76 0x4c00 /* - Receive Ring Length */ +#define CSR78 0x4e00 /* - Transmit Ring Length */ +#define CSR80 0x5000 /* - Burst and FIFO Threshold Control */ +#define CSR82 0x5200 /* - Bus Activity Timer */ +#define CSR84 0x5400 /* DMA Address */ +#define CSR85 0x5500 /* DMA Address */ +#define CSR86 0x5600 /* Buffer Byte Counter */ +#define CSR88 0x5800 /* - Chip ID */ +#define CSR89 0x5900 /* - Chip ID */ +#define CSR92 0x5c00 /* Ring Length Conversion */ +#define CSR94 0x5e00 /* Transmit Time Domain Reflectometry Count */ +#define CSR96 0x6000 /* Bus Interface Scratch Register 0 */ +#define CSR97 0x6100 /* Bus Interface Scratch Register 0 */ +#define CSR98 0x6200 /* Bus Interface Scratch Register 1 */ +#define CSR99 0x6300 /* Bus Interface Scratch Register 1 */ +#define CSR104 0x6800 /* SWAP */ +#define CSR105 0x6900 /* SWAP */ +#define CSR108 0x6c00 /* Buffer Management Scratch */ +#define CSR109 0x6d00 /* Buffer Management Scratch */ +#define CSR112 0x7000 /* - Missed Frame Count */ +#define CSR114 0x7200 /* - Receive Collision Count */ +#define CSR124 0x7c00 /* - Buffer Management Unit Test */ + + + /* + * Am79C960 ISA Control and Status Registers + * + * These values are already swap()ed!! + */ + +#define ISACSR0 0x0000 /* Master Mode Read Active */ +#define ISACSR1 0x0100 /* Master Mode Write Active */ +#define ISACSR2 0x0200 /* Miscellaneous Configuration */ +#define ISACSR4 0x0400 /* LED0 Status (Link Integrity) */ +#define ISACSR5 0x0500 /* LED1 Status */ +#define ISACSR6 0x0600 /* LED2 Status */ +#define ISACSR7 0x0700 /* LED3 Status */ + + + /* + * Bit definitions for CSR0 (PCnet-ISA Controller Status) + * + * These values are already swap()ed!! + */ + +#define ERR 0x0080 /* Error */ +#define BABL 0x0040 /* Babble: Transmitted too many bits */ +#define CERR 0x0020 /* No Heartbeat (10BASE-T) */ +#define MISS 0x0010 /* Missed Frame */ +#define MERR 0x0008 /* Memory Error */ +#define RINT 0x0004 /* Receive Interrupt */ +#define TINT 0x0002 /* Transmit Interrupt */ +#define IDON 0x0001 /* Initialization Done */ +#define INTR 0x8000 /* Interrupt Flag */ +#define INEA 0x4000 /* Interrupt Enable */ +#define RXON 0x2000 /* Receive On */ +#define TXON 0x1000 /* Transmit On */ +#define TDMD 0x0800 /* Transmit Demand */ +#define STOP 0x0400 /* Stop */ +#define STRT 0x0200 /* Start */ +#define INIT 0x0100 /* Initialize */ + + + /* + * Bit definitions for CSR3 (Interrupt Masks and Deferral Control) + * + * These values are already swap()ed!! + */ + +#define BABLM 0x0040 /* Babble Mask */ +#define MISSM 0x0010 /* Missed Frame Mask */ +#define MERRM 0x0008 /* Memory Error Mask */ +#define RINTM 0x0004 /* Receive Interrupt Mask */ +#define TINTM 0x0002 /* Transmit Interrupt Mask */ +#define IDONM 0x0001 /* Initialization Done Mask */ +#define DXMT2PD 0x1000 /* Disable Transmit Two Part Deferral */ +#define EMBA 0x0800 /* Enable Modified Back-off Algorithm */ + + + /* + * Bit definitions for CSR4 (Test and Features Control) + * + * These values are already swap()ed!! + */ + +#define ENTST 0x0080 /* Enable Test Mode */ +#define DMAPLUS 0x0040 /* Disable Burst Transaction Counter */ +#define TIMER 0x0020 /* Timer Enable Register */ +#define DPOLL 0x0010 /* Disable Transmit Polling */ +#define APAD_XMT 0x0008 /* Auto Pad Transmit */ +#define ASTRP_RCV 0x0004 /* Auto Pad Stripping */ +#define MFCO 0x0002 /* Missed Frame Counter Overflow Interrupt */ +#define MFCOM 0x0001 /* Missed Frame Counter Overflow Mask */ +#define RCVCCO 0x2000 /* Receive Collision Counter Overflow Interrupt */ +#define RCVCCOM 0x1000 /* Receive Collision Counter Overflow Mask */ +#define TXSTRT 0x0800 /* Transmit Start Status */ +#define TXSTRTM 0x0400 /* Transmit Start Mask */ +#define JAB 0x0200 /* Jabber Error */ +#define JABM 0x0100 /* Jabber Error Mask */ + + + /* + * Bit definitions for CSR15 (Mode Register) + * + * These values are already swap()ed!! + */ + +#define PROM 0x0080 /* Promiscuous Mode */ +#define DRCVBC 0x0040 /* Disable Receive Broadcast */ +#define DRCVPA 0x0020 /* Disable Receive Physical Address */ +#define DLNKTST 0x0010 /* Disable Link Status */ +#define DAPC 0x0008 /* Disable Automatic Polarity Correction */ +#define MENDECL 0x0004 /* MENDEC Loopback Mode */ +#define LRTTSEL 0x0002 /* Low Receive Threshold/Transmit Mode Select */ +#define PORTSEL1 0x0001 /* Port Select Bits */ +#define PORTSEL2 0x8000 /* Port Select Bits */ +#define INTL 0x4000 /* Internal Loopback */ +#define DRTY 0x2000 /* Disable Retry */ +#define FCOLL 0x1000 /* Force Collision */ +#define DXMTFCS 0x0800 /* Disable Transmit CRC */ +#define LOOP 0x0400 /* Loopback Enable */ +#define DTX 0x0200 /* Disable Transmitter */ +#define DRX 0x0100 /* Disable Receiver */ + + + /* + * Bit definitions for ISACSR2 (Miscellaneous Configuration) + * + * These values are already swap()ed!! + */ + +#define ASEL 0x0200 /* Media Interface Port Auto Select */ + + + /* + * Bit definitions for ISACSR5-7 (LED1-3 Status) + * + * These values are already swap()ed!! + */ + +#define LEDOUT 0x0080 /* Current LED Status */ +#define PSE 0x8000 /* Pulse Stretcher Enable */ +#define XMTE 0x1000 /* Enable Transmit Status Signal */ +#define RVPOLE 0x0800 /* Enable Receive Polarity Signal */ +#define RCVE 0x0400 /* Enable Receive Status Signal */ +#define JABE 0x0200 /* Enable Jabber Signal */ +#define COLE 0x0100 /* Enable Collision Signal */ + + + /* + * Receive Descriptor Ring Entry + */ + +struct RDRE { + volatile u_short RMD0; /* LADR[15:0] */ + volatile u_short RMD1; /* HADR[23:16] | Receive Flags */ + volatile u_short RMD2; /* Buffer Byte Count (two's complement) */ + volatile u_short RMD3; /* Message Byte Count */ +}; + + + /* + * Transmit Descriptor Ring Entry + */ + +struct TDRE { + volatile u_short TMD0; /* LADR[15:0] */ + volatile u_short TMD1; /* HADR[23:16] | Transmit Flags */ + volatile u_short TMD2; /* Buffer Byte Count (two's complement) */ + volatile u_short TMD3; /* Error Flags */ +}; + + + /* + * Receive Flags + */ + +#define RF_OWN 0x0080 /* PCnet-ISA controller owns the descriptor */ +#define RF_ERR 0x0040 /* Error */ +#define RF_FRAM 0x0020 /* Framing Error */ +#define RF_OFLO 0x0010 /* Overflow Error */ +#define RF_CRC 0x0008 /* CRC Error */ +#define RF_BUFF 0x0004 /* Buffer Error */ +#define RF_STP 0x0002 /* Start of Packet */ +#define RF_ENP 0x0001 /* End of Packet */ + + + /* + * Transmit Flags + */ + +#define TF_OWN 0x0080 /* PCnet-ISA controller owns the descriptor */ +#define TF_ERR 0x0040 /* Error */ +#define TF_ADD_FCS 0x0020 /* Controls FCS Generation */ +#define TF_MORE 0x0010 /* More than one retry needed */ +#define TF_ONE 0x0008 /* One retry needed */ +#define TF_DEF 0x0004 /* Deferred */ +#define TF_STP 0x0002 /* Start of Packet */ +#define TF_ENP 0x0001 /* End of Packet */ + + + /* + * Error Flags + */ + +#define EF_BUFF 0x0080 /* Buffer Error */ +#define EF_UFLO 0x0040 /* Underflow Error */ +#define EF_LCOL 0x0010 /* Late Collision */ +#define EF_LCAR 0x0008 /* Loss of Carrier */ +#define EF_RTRY 0x0004 /* Retry Error */ +#define EF_TDR 0xff03 /* Time Domain Reflectometry */ + + + + /* + * MC68230 Parallel Interface/Timer + */ + +struct MC68230 { + volatile u_char PGCR; /* Port General Control Register */ + u_char Pad1[1]; + volatile u_char PSRR; /* Port Service Request Register */ + u_char Pad2[1]; + volatile u_char PADDR; /* Port A Data Direction Register */ + u_char Pad3[1]; + volatile u_char PBDDR; /* Port B Data Direction Register */ + u_char Pad4[1]; + volatile u_char PCDDR; /* Port C Data Direction Register */ + u_char Pad5[1]; + volatile u_char PIVR; /* Port Interrupt Vector Register */ + u_char Pad6[1]; + volatile u_char PACR; /* Port A Control Register */ + u_char Pad7[1]; + volatile u_char PBCR; /* Port B Control Register */ + u_char Pad8[1]; + volatile u_char PADR; /* Port A Data Register */ + u_char Pad9[1]; + volatile u_char PBDR; /* Port B Data Register */ + u_char Pad10[1]; + volatile u_char PAAR; /* Port A Alternate Register */ + u_char Pad11[1]; + volatile u_char PBAR; /* Port B Alternate Register */ + u_char Pad12[1]; + volatile u_char PCDR; /* Port C Data Register */ + u_char Pad13[1]; + volatile u_char PSR; /* Port Status Register */ + u_char Pad14[5]; + volatile u_char TCR; /* Timer Control Register */ + u_char Pad15[1]; + volatile u_char TIVR; /* Timer Interrupt Vector Register */ + u_char Pad16[3]; + volatile u_char CPRH; /* Counter Preload Register (High) */ + u_char Pad17[1]; + volatile u_char CPRM; /* Counter Preload Register (Mid) */ + u_char Pad18[1]; + volatile u_char CPRL; /* Counter Preload Register (Low) */ + u_char Pad19[3]; + volatile u_char CNTRH; /* Count Register (High) */ + u_char Pad20[1]; + volatile u_char CNTRM; /* Count Register (Mid) */ + u_char Pad21[1]; + volatile u_char CNTRL; /* Count Register (Low) */ + u_char Pad22[1]; + volatile u_char TSR; /* Timer Status Register */ + u_char Pad23[11]; +}; + + + /* + * Ariadne Expansion Board Structure + */ + +#define ARIADNE_LANCE 0x360 + +#define ARIADNE_PIT 0x1000 + +#define ARIADNE_BOOTPROM 0x4000 /* I guess it's here :-) */ +#define ARIADNE_BOOTPROM_SIZE 0x4000 + +#define ARIADNE_RAM 0x8000 /* Always access WORDs!! */ +#define ARIADNE_RAM_SIZE 0x8000 + diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c new file mode 100644 index 0000000..1264d78 --- /dev/null +++ b/drivers/net/ethernet/amd/atarilance.c @@ -0,0 +1,1176 @@ +/* atarilance.c: Ethernet driver for VME Lance cards on the Atari */ +/* + Written 1995/96 by Roman Hodek (Roman.Hodek@informatik.uni-erlangen.de) + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + This drivers was written with the following sources of reference: + - The driver for the Riebl Lance card by the TU Vienna. + - The modified TUW driver for PAM's VME cards + - The PC-Linux driver for Lance cards (but this is for bus master + cards, not the shared memory ones) + - The Amiga Ariadne driver + + v1.0: (in 1.2.13pl4/0.9.13) + Initial version + v1.1: (in 1.2.13pl5) + more comments + deleted some debugging stuff + optimized register access (keep AREG pointing to CSR0) + following AMD, CSR0_STRT should be set only after IDON is detected + use memcpy() for data transfers, that also employs long word moves + better probe procedure for 24-bit systems + non-VME-RieblCards need extra delays in memcpy + must also do write test, since 0xfxe00000 may hit ROM + use 8/32 tx/rx buffers, which should give better NFS performance; + this is made possible by shifting the last packet buffer after the + RieblCard reserved area + v1.2: (in 1.2.13pl8) + again fixed probing for the Falcon; 0xfe01000 hits phys. 0x00010000 + and thus RAM, in case of no Lance found all memory contents have to + be restored! + Now possible to compile as module. + v1.3: 03/30/96 Jes Sorensen, Roman (in 1.3) + Several little 1.3 adaptions + When the lance is stopped it jumps back into little-endian + mode. It is therefore necessary to put it back where it + belongs, in big endian mode, in order to make things work. + This might be the reason why multicast-mode didn't work + before, but I'm not able to test it as I only got an Amiga + (we had similar problems with the A2065 driver). + +*/ + +static char version[] = "atarilance.c: v1.3 04/04/96 " + "Roman.Hodek@informatik.uni-erlangen.de\n"; + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +/* Debug level: + * 0 = silent, print only serious errors + * 1 = normal, print error messages + * 2 = debug, print debug infos + * 3 = debug, print even more debug infos (packet data) + */ + +#define LANCE_DEBUG 1 + +#ifdef LANCE_DEBUG +static int lance_debug = LANCE_DEBUG; +#else +static int lance_debug = 1; +#endif +module_param(lance_debug, int, 0); +MODULE_PARM_DESC(lance_debug, "atarilance debug level (0-3)"); +MODULE_LICENSE("GPL"); + +/* Print debug messages on probing? */ +#undef LANCE_DEBUG_PROBE + +#define DPRINTK(n,a) \ + do { \ + if (lance_debug >= n) \ + printk a; \ + } while( 0 ) + +#ifdef LANCE_DEBUG_PROBE +# define PROBE_PRINT(a) printk a +#else +# define PROBE_PRINT(a) +#endif + +/* These define the number of Rx and Tx buffers as log2. (Only powers + * of two are valid) + * Much more rx buffers (32) are reserved than tx buffers (8), since receiving + * is more time critical then sending and packets may have to remain in the + * board's memory when main memory is low. + */ + +#define TX_LOG_RING_SIZE 3 +#define RX_LOG_RING_SIZE 5 + +/* These are the derived values */ + +#define TX_RING_SIZE (1 << TX_LOG_RING_SIZE) +#define TX_RING_LEN_BITS (TX_LOG_RING_SIZE << 5) +#define TX_RING_MOD_MASK (TX_RING_SIZE - 1) + +#define RX_RING_SIZE (1 << RX_LOG_RING_SIZE) +#define RX_RING_LEN_BITS (RX_LOG_RING_SIZE << 5) +#define RX_RING_MOD_MASK (RX_RING_SIZE - 1) + +#define TX_TIMEOUT (HZ/5) + +/* The LANCE Rx and Tx ring descriptors. */ +struct lance_rx_head { + unsigned short base; /* Low word of base addr */ + volatile unsigned char flag; + unsigned char base_hi; /* High word of base addr (unused) */ + short buf_length; /* This length is 2s complement! */ + volatile short msg_length; /* This length is "normal". */ +}; + +struct lance_tx_head { + unsigned short base; /* Low word of base addr */ + volatile unsigned char flag; + unsigned char base_hi; /* High word of base addr (unused) */ + short length; /* Length is 2s complement! */ + volatile short misc; +}; + +struct ringdesc { + unsigned short adr_lo; /* Low 16 bits of address */ + unsigned char len; /* Length bits */ + unsigned char adr_hi; /* High 8 bits of address (unused) */ +}; + +/* The LANCE initialization block, described in databook. */ +struct lance_init_block { + unsigned short mode; /* Pre-set mode */ + unsigned char hwaddr[6]; /* Physical ethernet address */ + unsigned filter[2]; /* Multicast filter (unused). */ + /* Receive and transmit ring base, along with length bits. */ + struct ringdesc rx_ring; + struct ringdesc tx_ring; +}; + +/* The whole layout of the Lance shared memory */ +struct lance_memory { + struct lance_init_block init; + struct lance_tx_head tx_head[TX_RING_SIZE]; + struct lance_rx_head rx_head[RX_RING_SIZE]; + char packet_area[0]; /* packet data follow after the + * init block and the ring + * descriptors and are located + * at runtime */ +}; + +/* RieblCard specifics: + * The original TOS driver for these cards reserves the area from offset + * 0xee70 to 0xeebb for storing configuration data. Of interest to us is the + * Ethernet address there, and the magic for verifying the data's validity. + * The reserved area isn't touch by packet buffers. Furthermore, offset 0xfffe + * is reserved for the interrupt vector number. + */ +#define RIEBL_RSVD_START 0xee70 +#define RIEBL_RSVD_END 0xeec0 +#define RIEBL_MAGIC 0x09051990 +#define RIEBL_MAGIC_ADDR ((unsigned long *)(((char *)MEM) + 0xee8a)) +#define RIEBL_HWADDR_ADDR ((unsigned char *)(((char *)MEM) + 0xee8e)) +#define RIEBL_IVEC_ADDR ((unsigned short *)(((char *)MEM) + 0xfffe)) + +/* This is a default address for the old RieblCards without a battery + * that have no ethernet address at boot time. 00:00:36:04 is the + * prefix for Riebl cards, the 00:00 at the end is arbitrary. + */ + +static unsigned char OldRieblDefHwaddr[6] = { + 0x00, 0x00, 0x36, 0x04, 0x00, 0x00 +}; + + +/* I/O registers of the Lance chip */ + +struct lance_ioreg { +/* base+0x0 */ volatile unsigned short data; +/* base+0x2 */ volatile unsigned short addr; + unsigned char _dummy1[3]; +/* base+0x7 */ volatile unsigned char ivec; + unsigned char _dummy2[5]; +/* base+0xd */ volatile unsigned char eeprom; + unsigned char _dummy3; +/* base+0xf */ volatile unsigned char mem; +}; + +/* Types of boards this driver supports */ + +enum lance_type { + OLD_RIEBL, /* old Riebl card without battery */ + NEW_RIEBL, /* new Riebl card with battery */ + PAM_CARD /* PAM card with EEPROM */ +}; + +static char *lance_names[] = { + "Riebl-Card (without battery)", + "Riebl-Card (with battery)", + "PAM intern card" +}; + +/* The driver's private device structure */ + +struct lance_private { + enum lance_type cardtype; + struct lance_ioreg *iobase; + struct lance_memory *mem; + int cur_rx, cur_tx; /* The next free ring entry */ + int dirty_tx; /* Ring entries to be freed. */ + /* copy function */ + void *(*memcpy_f)( void *, const void *, size_t ); +/* This must be long for set_bit() */ + long tx_full; + spinlock_t devlock; +}; + +/* I/O register access macros */ + +#define MEM lp->mem +#define DREG IO->data +#define AREG IO->addr +#define REGA(a) (*( AREG = (a), &DREG )) + +/* Definitions for packet buffer access: */ +#define PKT_BUF_SZ 1544 +/* Get the address of a packet buffer corresponding to a given buffer head */ +#define PKTBUF_ADDR(head) (((unsigned char *)(MEM)) + (head)->base) + +/* Possible memory/IO addresses for probing */ + +static struct lance_addr { + unsigned long memaddr; + unsigned long ioaddr; + int slow_flag; +} lance_addr_list[] = { + { 0xfe010000, 0xfe00fff0, 0 }, /* RieblCard VME in TT */ + { 0xffc10000, 0xffc0fff0, 0 }, /* RieblCard VME in MegaSTE + (highest byte stripped) */ + { 0xffe00000, 0xffff7000, 1 }, /* RieblCard in ST + (highest byte stripped) */ + { 0xffd00000, 0xffff7000, 1 }, /* RieblCard in ST with hw modif. to + avoid conflict with ROM + (highest byte stripped) */ + { 0xffcf0000, 0xffcffff0, 0 }, /* PAMCard VME in TT and MSTE + (highest byte stripped) */ + { 0xfecf0000, 0xfecffff0, 0 }, /* Rhotron's PAMCard VME in TT and MSTE + (highest byte stripped) */ +}; + +#define N_LANCE_ADDR ARRAY_SIZE(lance_addr_list) + + +/* Definitions for the Lance */ + +/* tx_head flags */ +#define TMD1_ENP 0x01 /* end of packet */ +#define TMD1_STP 0x02 /* start of packet */ +#define TMD1_DEF 0x04 /* deferred */ +#define TMD1_ONE 0x08 /* one retry needed */ +#define TMD1_MORE 0x10 /* more than one retry needed */ +#define TMD1_ERR 0x40 /* error summary */ +#define TMD1_OWN 0x80 /* ownership (set: chip owns) */ + +#define TMD1_OWN_CHIP TMD1_OWN +#define TMD1_OWN_HOST 0 + +/* tx_head misc field */ +#define TMD3_TDR 0x03FF /* Time Domain Reflectometry counter */ +#define TMD3_RTRY 0x0400 /* failed after 16 retries */ +#define TMD3_LCAR 0x0800 /* carrier lost */ +#define TMD3_LCOL 0x1000 /* late collision */ +#define TMD3_UFLO 0x4000 /* underflow (late memory) */ +#define TMD3_BUFF 0x8000 /* buffering error (no ENP) */ + +/* rx_head flags */ +#define RMD1_ENP 0x01 /* end of packet */ +#define RMD1_STP 0x02 /* start of packet */ +#define RMD1_BUFF 0x04 /* buffer error */ +#define RMD1_CRC 0x08 /* CRC error */ +#define RMD1_OFLO 0x10 /* overflow */ +#define RMD1_FRAM 0x20 /* framing error */ +#define RMD1_ERR 0x40 /* error summary */ +#define RMD1_OWN 0x80 /* ownership (set: ship owns) */ + +#define RMD1_OWN_CHIP RMD1_OWN +#define RMD1_OWN_HOST 0 + +/* register names */ +#define CSR0 0 /* mode/status */ +#define CSR1 1 /* init block addr (low) */ +#define CSR2 2 /* init block addr (high) */ +#define CSR3 3 /* misc */ +#define CSR8 8 /* address filter */ +#define CSR15 15 /* promiscuous mode */ + +/* CSR0 */ +/* (R=readable, W=writeable, S=set on write, C=clear on write) */ +#define CSR0_INIT 0x0001 /* initialize (RS) */ +#define CSR0_STRT 0x0002 /* start (RS) */ +#define CSR0_STOP 0x0004 /* stop (RS) */ +#define CSR0_TDMD 0x0008 /* transmit demand (RS) */ +#define CSR0_TXON 0x0010 /* transmitter on (R) */ +#define CSR0_RXON 0x0020 /* receiver on (R) */ +#define CSR0_INEA 0x0040 /* interrupt enable (RW) */ +#define CSR0_INTR 0x0080 /* interrupt active (R) */ +#define CSR0_IDON 0x0100 /* initialization done (RC) */ +#define CSR0_TINT 0x0200 /* transmitter interrupt (RC) */ +#define CSR0_RINT 0x0400 /* receiver interrupt (RC) */ +#define CSR0_MERR 0x0800 /* memory error (RC) */ +#define CSR0_MISS 0x1000 /* missed frame (RC) */ +#define CSR0_CERR 0x2000 /* carrier error (no heartbeat :-) (RC) */ +#define CSR0_BABL 0x4000 /* babble: tx-ed too many bits (RC) */ +#define CSR0_ERR 0x8000 /* error (RC) */ + +/* CSR3 */ +#define CSR3_BCON 0x0001 /* byte control */ +#define CSR3_ACON 0x0002 /* ALE control */ +#define CSR3_BSWP 0x0004 /* byte swap (1=big endian) */ + + + +/***************************** Prototypes *****************************/ + +static unsigned long lance_probe1( struct net_device *dev, struct lance_addr + *init_rec ); +static int lance_open( struct net_device *dev ); +static void lance_init_ring( struct net_device *dev ); +static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ); +static irqreturn_t lance_interrupt( int irq, void *dev_id ); +static int lance_rx( struct net_device *dev ); +static int lance_close( struct net_device *dev ); +static void set_multicast_list( struct net_device *dev ); +static int lance_set_mac_address( struct net_device *dev, void *addr ); +static void lance_tx_timeout (struct net_device *dev); + +/************************* End of Prototypes **************************/ + + + + + +static void *slow_memcpy( void *dst, const void *src, size_t len ) + +{ char *cto = dst; + const char *cfrom = src; + + while( len-- ) { + *cto++ = *cfrom++; + MFPDELAY(); + } + return dst; +} + + +struct net_device * __init atarilance_probe(int unit) +{ + int i; + static int found; + struct net_device *dev; + int err = -ENODEV; + + if (!MACH_IS_ATARI || found) + /* Assume there's only one board possible... That seems true, since + * the Riebl/PAM board's address cannot be changed. */ + return ERR_PTR(-ENODEV); + + dev = alloc_etherdev(sizeof(struct lance_private)); + if (!dev) + return ERR_PTR(-ENOMEM); + if (unit >= 0) { + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + } + + for( i = 0; i < N_LANCE_ADDR; ++i ) { + if (lance_probe1( dev, &lance_addr_list[i] )) { + found = 1; + err = register_netdev(dev); + if (!err) + return dev; + free_irq(dev->irq, dev); + break; + } + } + free_netdev(dev); + return ERR_PTR(err); +} + + +/* Derived from hwreg_present() in atari/config.c: */ + +static noinline int __init addr_accessible(volatile void *regp, int wordflag, + int writeflag) +{ + int ret; + unsigned long flags; + long *vbr, save_berr; + + local_irq_save(flags); + + __asm__ __volatile__ ( "movec %/vbr,%0" : "=r" (vbr) : ); + save_berr = vbr[2]; + + __asm__ __volatile__ + ( "movel %/sp,%/d1\n\t" + "movel #Lberr,%2@\n\t" + "moveq #0,%0\n\t" + "tstl %3\n\t" + "bne 1f\n\t" + "moveb %1@,%/d0\n\t" + "nop \n\t" + "bra 2f\n" +"1: movew %1@,%/d0\n\t" + "nop \n" +"2: tstl %4\n\t" + "beq 2f\n\t" + "tstl %3\n\t" + "bne 1f\n\t" + "clrb %1@\n\t" + "nop \n\t" + "moveb %/d0,%1@\n\t" + "nop \n\t" + "bra 2f\n" +"1: clrw %1@\n\t" + "nop \n\t" + "movew %/d0,%1@\n\t" + "nop \n" +"2: moveq #1,%0\n" +"Lberr: movel %/d1,%/sp" + : "=&d" (ret) + : "a" (regp), "a" (&vbr[2]), "rm" (wordflag), "rm" (writeflag) + : "d0", "d1", "memory" + ); + + vbr[2] = save_berr; + local_irq_restore(flags); + + return ret; +} + +static const struct net_device_ops lance_netdev_ops = { + .ndo_open = lance_open, + .ndo_stop = lance_close, + .ndo_start_xmit = lance_start_xmit, + .ndo_set_multicast_list = set_multicast_list, + .ndo_set_mac_address = lance_set_mac_address, + .ndo_tx_timeout = lance_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +}; + +static unsigned long __init lance_probe1( struct net_device *dev, + struct lance_addr *init_rec ) +{ + volatile unsigned short *memaddr = + (volatile unsigned short *)init_rec->memaddr; + volatile unsigned short *ioaddr = + (volatile unsigned short *)init_rec->ioaddr; + struct lance_private *lp; + struct lance_ioreg *IO; + int i; + static int did_version; + unsigned short save1, save2; + + PROBE_PRINT(( "Probing for Lance card at mem %#lx io %#lx\n", + (long)memaddr, (long)ioaddr )); + + /* Test whether memory readable and writable */ + PROBE_PRINT(( "lance_probe1: testing memory to be accessible\n" )); + if (!addr_accessible( memaddr, 1, 1 )) goto probe_fail; + + /* Written values should come back... */ + PROBE_PRINT(( "lance_probe1: testing memory to be writable (1)\n" )); + save1 = *memaddr; + *memaddr = 0x0001; + if (*memaddr != 0x0001) goto probe_fail; + PROBE_PRINT(( "lance_probe1: testing memory to be writable (2)\n" )); + *memaddr = 0x0000; + if (*memaddr != 0x0000) goto probe_fail; + *memaddr = save1; + + /* First port should be readable and writable */ + PROBE_PRINT(( "lance_probe1: testing ioport to be accessible\n" )); + if (!addr_accessible( ioaddr, 1, 1 )) goto probe_fail; + + /* and written values should be readable */ + PROBE_PRINT(( "lance_probe1: testing ioport to be writeable\n" )); + save2 = ioaddr[1]; + ioaddr[1] = 0x0001; + if (ioaddr[1] != 0x0001) goto probe_fail; + + /* The CSR0_INIT bit should not be readable */ + PROBE_PRINT(( "lance_probe1: testing CSR0 register function (1)\n" )); + save1 = ioaddr[0]; + ioaddr[1] = CSR0; + ioaddr[0] = CSR0_INIT | CSR0_STOP; + if (ioaddr[0] != CSR0_STOP) { + ioaddr[0] = save1; + ioaddr[1] = save2; + goto probe_fail; + } + PROBE_PRINT(( "lance_probe1: testing CSR0 register function (2)\n" )); + ioaddr[0] = CSR0_STOP; + if (ioaddr[0] != CSR0_STOP) { + ioaddr[0] = save1; + ioaddr[1] = save2; + goto probe_fail; + } + + /* Now ok... */ + PROBE_PRINT(( "lance_probe1: Lance card detected\n" )); + goto probe_ok; + + probe_fail: + return 0; + + probe_ok: + lp = netdev_priv(dev); + MEM = (struct lance_memory *)memaddr; + IO = lp->iobase = (struct lance_ioreg *)ioaddr; + dev->base_addr = (unsigned long)ioaddr; /* informational only */ + lp->memcpy_f = init_rec->slow_flag ? slow_memcpy : memcpy; + + REGA( CSR0 ) = CSR0_STOP; + + /* Now test for type: If the eeprom I/O port is readable, it is a + * PAM card */ + if (addr_accessible( &(IO->eeprom), 0, 0 )) { + /* Switch back to Ram */ + i = IO->mem; + lp->cardtype = PAM_CARD; + } + else if (*RIEBL_MAGIC_ADDR == RIEBL_MAGIC) { + lp->cardtype = NEW_RIEBL; + } + else + lp->cardtype = OLD_RIEBL; + + if (lp->cardtype == PAM_CARD || + memaddr == (unsigned short *)0xffe00000) { + /* PAMs card and Riebl on ST use level 5 autovector */ + if (request_irq(IRQ_AUTO_5, lance_interrupt, IRQ_TYPE_PRIO, + "PAM,Riebl-ST Ethernet", dev)) { + printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 ); + return 0; + } + dev->irq = (unsigned short)IRQ_AUTO_5; + } + else { + /* For VME-RieblCards, request a free VME int; + * (This must be unsigned long, since dev->irq is short and the + * IRQ_MACHSPEC bit would be cut off...) + */ + unsigned long irq = atari_register_vme_int(); + if (!irq) { + printk( "Lance: request for VME interrupt failed\n" ); + return 0; + } + if (request_irq(irq, lance_interrupt, IRQ_TYPE_PRIO, + "Riebl-VME Ethernet", dev)) { + printk( "Lance: request for irq %ld failed\n", irq ); + return 0; + } + dev->irq = irq; + } + + printk("%s: %s at io %#lx, mem %#lx, irq %d%s, hwaddr ", + dev->name, lance_names[lp->cardtype], + (unsigned long)ioaddr, + (unsigned long)memaddr, + dev->irq, + init_rec->slow_flag ? " (slow memcpy)" : "" ); + + /* Get the ethernet address */ + switch( lp->cardtype ) { + case OLD_RIEBL: + /* No ethernet address! (Set some default address) */ + memcpy( dev->dev_addr, OldRieblDefHwaddr, 6 ); + break; + case NEW_RIEBL: + lp->memcpy_f( dev->dev_addr, RIEBL_HWADDR_ADDR, 6 ); + break; + case PAM_CARD: + i = IO->eeprom; + for( i = 0; i < 6; ++i ) + dev->dev_addr[i] = + ((((unsigned short *)MEM)[i*2] & 0x0f) << 4) | + ((((unsigned short *)MEM)[i*2+1] & 0x0f)); + i = IO->mem; + break; + } + printk("%pM\n", dev->dev_addr); + if (lp->cardtype == OLD_RIEBL) { + printk( "%s: Warning: This is a default ethernet address!\n", + dev->name ); + printk( " Use \"ifconfig hw ether ...\" to set the address.\n" ); + } + + spin_lock_init(&lp->devlock); + + MEM->init.mode = 0x0000; /* Disable Rx and Tx. */ + for( i = 0; i < 6; i++ ) + MEM->init.hwaddr[i] = dev->dev_addr[i^1]; /* <- 16 bit swap! */ + MEM->init.filter[0] = 0x00000000; + MEM->init.filter[1] = 0x00000000; + MEM->init.rx_ring.adr_lo = offsetof( struct lance_memory, rx_head ); + MEM->init.rx_ring.adr_hi = 0; + MEM->init.rx_ring.len = RX_RING_LEN_BITS; + MEM->init.tx_ring.adr_lo = offsetof( struct lance_memory, tx_head ); + MEM->init.tx_ring.adr_hi = 0; + MEM->init.tx_ring.len = TX_RING_LEN_BITS; + + if (lp->cardtype == PAM_CARD) + IO->ivec = IRQ_SOURCE_TO_VECTOR(dev->irq); + else + *RIEBL_IVEC_ADDR = IRQ_SOURCE_TO_VECTOR(dev->irq); + + if (did_version++ == 0) + DPRINTK( 1, ( version )); + + dev->netdev_ops = &lance_netdev_ops; + + /* XXX MSch */ + dev->watchdog_timeo = TX_TIMEOUT; + + return 1; +} + + +static int lance_open( struct net_device *dev ) +{ + struct lance_private *lp = netdev_priv(dev); + struct lance_ioreg *IO = lp->iobase; + int i; + + DPRINTK( 2, ( "%s: lance_open()\n", dev->name )); + + lance_init_ring(dev); + /* Re-initialize the LANCE, and start it when done. */ + + REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0); + REGA( CSR2 ) = 0; + REGA( CSR1 ) = 0; + REGA( CSR0 ) = CSR0_INIT; + /* From now on, AREG is kept to point to CSR0 */ + + i = 1000000; + while (--i > 0) + if (DREG & CSR0_IDON) + break; + if (i <= 0 || (DREG & CSR0_ERR)) { + DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n", + dev->name, i, DREG )); + DREG = CSR0_STOP; + return -EIO; + } + DREG = CSR0_IDON; + DREG = CSR0_STRT; + DREG = CSR0_INEA; + + netif_start_queue (dev); + + DPRINTK( 2, ( "%s: LANCE is open, csr0 %04x\n", dev->name, DREG )); + + return 0; +} + + +/* Initialize the LANCE Rx and Tx rings. */ + +static void lance_init_ring( struct net_device *dev ) +{ + struct lance_private *lp = netdev_priv(dev); + int i; + unsigned offset; + + lp->tx_full = 0; + lp->cur_rx = lp->cur_tx = 0; + lp->dirty_tx = 0; + + offset = offsetof( struct lance_memory, packet_area ); + +/* If the packet buffer at offset 'o' would conflict with the reserved area + * of RieblCards, advance it */ +#define CHECK_OFFSET(o) \ + do { \ + if (lp->cardtype == OLD_RIEBL || lp->cardtype == NEW_RIEBL) { \ + if (((o) < RIEBL_RSVD_START) ? (o)+PKT_BUF_SZ > RIEBL_RSVD_START \ + : (o) < RIEBL_RSVD_END) \ + (o) = RIEBL_RSVD_END; \ + } \ + } while(0) + + for( i = 0; i < TX_RING_SIZE; i++ ) { + CHECK_OFFSET(offset); + MEM->tx_head[i].base = offset; + MEM->tx_head[i].flag = TMD1_OWN_HOST; + MEM->tx_head[i].base_hi = 0; + MEM->tx_head[i].length = 0; + MEM->tx_head[i].misc = 0; + offset += PKT_BUF_SZ; + } + + for( i = 0; i < RX_RING_SIZE; i++ ) { + CHECK_OFFSET(offset); + MEM->rx_head[i].base = offset; + MEM->rx_head[i].flag = TMD1_OWN_CHIP; + MEM->rx_head[i].base_hi = 0; + MEM->rx_head[i].buf_length = -PKT_BUF_SZ; + MEM->rx_head[i].msg_length = 0; + offset += PKT_BUF_SZ; + } +} + + +/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ + + +static void lance_tx_timeout (struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + struct lance_ioreg *IO = lp->iobase; + + AREG = CSR0; + DPRINTK( 1, ( "%s: transmit timed out, status %04x, resetting.\n", + dev->name, DREG )); + DREG = CSR0_STOP; + /* + * Always set BSWP after a STOP as STOP puts it back into + * little endian mode. + */ + REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0); + dev->stats.tx_errors++; +#ifndef final_version + { int i; + DPRINTK( 2, ( "Ring data: dirty_tx %d cur_tx %d%s cur_rx %d\n", + lp->dirty_tx, lp->cur_tx, + lp->tx_full ? " (full)" : "", + lp->cur_rx )); + for( i = 0 ; i < RX_RING_SIZE; i++ ) + DPRINTK( 2, ( "rx #%d: base=%04x blen=%04x mlen=%04x\n", + i, MEM->rx_head[i].base, + -MEM->rx_head[i].buf_length, + MEM->rx_head[i].msg_length )); + for( i = 0 ; i < TX_RING_SIZE; i++ ) + DPRINTK( 2, ( "tx #%d: base=%04x len=%04x misc=%04x\n", + i, MEM->tx_head[i].base, + -MEM->tx_head[i].length, + MEM->tx_head[i].misc )); + } +#endif + /* XXX MSch: maybe purge/reinit ring here */ + /* lance_restart, essentially */ + lance_init_ring(dev); + REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT; + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue(dev); +} + +/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ + +static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) +{ + struct lance_private *lp = netdev_priv(dev); + struct lance_ioreg *IO = lp->iobase; + int entry, len; + struct lance_tx_head *head; + unsigned long flags; + + DPRINTK( 2, ( "%s: lance_start_xmit() called, csr0 %4.4x.\n", + dev->name, DREG )); + + + /* The old LANCE chips doesn't automatically pad buffers to min. size. */ + len = skb->len; + if (len < ETH_ZLEN) + len = ETH_ZLEN; + /* PAM-Card has a bug: Can only send packets with even number of bytes! */ + else if (lp->cardtype == PAM_CARD && (len & 1)) + ++len; + + if (len > skb->len) { + if (skb_padto(skb, len)) + return NETDEV_TX_OK; + } + + netif_stop_queue (dev); + + /* Fill in a Tx ring entry */ + if (lance_debug >= 3) { + printk( "%s: TX pkt type 0x%04x from %pM to %pM" + " data at 0x%08x len %d\n", + dev->name, ((u_short *)skb->data)[6], + &skb->data[6], skb->data, + (int)skb->data, (int)skb->len ); + } + + /* We're not prepared for the int until the last flags are set/reset. And + * the int may happen already after setting the OWN_CHIP... */ + spin_lock_irqsave (&lp->devlock, flags); + + /* Mask to ring buffer boundary. */ + entry = lp->cur_tx & TX_RING_MOD_MASK; + head = &(MEM->tx_head[entry]); + + /* Caution: the write order is important here, set the "ownership" bits + * last. + */ + + + head->length = -len; + head->misc = 0; + lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len ); + head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP; + dev->stats.tx_bytes += skb->len; + dev_kfree_skb( skb ); + lp->cur_tx++; + while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) { + lp->cur_tx -= TX_RING_SIZE; + lp->dirty_tx -= TX_RING_SIZE; + } + + /* Trigger an immediate send poll. */ + DREG = CSR0_INEA | CSR0_TDMD; + + if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) == + TMD1_OWN_HOST) + netif_start_queue (dev); + else + lp->tx_full = 1; + spin_unlock_irqrestore (&lp->devlock, flags); + + return NETDEV_TX_OK; +} + +/* The LANCE interrupt handler. */ + +static irqreturn_t lance_interrupt( int irq, void *dev_id ) +{ + struct net_device *dev = dev_id; + struct lance_private *lp; + struct lance_ioreg *IO; + int csr0, boguscnt = 10; + int handled = 0; + + if (dev == NULL) { + DPRINTK( 1, ( "lance_interrupt(): interrupt for unknown device.\n" )); + return IRQ_NONE; + } + + lp = netdev_priv(dev); + IO = lp->iobase; + spin_lock (&lp->devlock); + + AREG = CSR0; + + while( ((csr0 = DREG) & (CSR0_ERR | CSR0_TINT | CSR0_RINT)) && + --boguscnt >= 0) { + handled = 1; + /* Acknowledge all of the current interrupt sources ASAP. */ + DREG = csr0 & ~(CSR0_INIT | CSR0_STRT | CSR0_STOP | + CSR0_TDMD | CSR0_INEA); + + DPRINTK( 2, ( "%s: interrupt csr0=%04x new csr=%04x.\n", + dev->name, csr0, DREG )); + + if (csr0 & CSR0_RINT) /* Rx interrupt */ + lance_rx( dev ); + + if (csr0 & CSR0_TINT) { /* Tx-done interrupt */ + int dirty_tx = lp->dirty_tx; + + while( dirty_tx < lp->cur_tx) { + int entry = dirty_tx & TX_RING_MOD_MASK; + int status = MEM->tx_head[entry].flag; + + if (status & TMD1_OWN_CHIP) + break; /* It still hasn't been Txed */ + + MEM->tx_head[entry].flag = 0; + + if (status & TMD1_ERR) { + /* There was an major error, log it. */ + int err_status = MEM->tx_head[entry].misc; + dev->stats.tx_errors++; + if (err_status & TMD3_RTRY) dev->stats.tx_aborted_errors++; + if (err_status & TMD3_LCAR) dev->stats.tx_carrier_errors++; + if (err_status & TMD3_LCOL) dev->stats.tx_window_errors++; + if (err_status & TMD3_UFLO) { + /* Ackk! On FIFO errors the Tx unit is turned off! */ + dev->stats.tx_fifo_errors++; + /* Remove this verbosity later! */ + DPRINTK( 1, ( "%s: Tx FIFO error! Status %04x\n", + dev->name, csr0 )); + /* Restart the chip. */ + DREG = CSR0_STRT; + } + } else { + if (status & (TMD1_MORE | TMD1_ONE | TMD1_DEF)) + dev->stats.collisions++; + dev->stats.tx_packets++; + } + + /* XXX MSch: free skb?? */ + dirty_tx++; + } + +#ifndef final_version + if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) { + DPRINTK( 0, ( "out-of-sync dirty pointer," + " %d vs. %d, full=%ld.\n", + dirty_tx, lp->cur_tx, lp->tx_full )); + dirty_tx += TX_RING_SIZE; + } +#endif + + if (lp->tx_full && (netif_queue_stopped(dev)) && + dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) { + /* The ring is no longer full, clear tbusy. */ + lp->tx_full = 0; + netif_wake_queue (dev); + } + + lp->dirty_tx = dirty_tx; + } + + /* Log misc errors. */ + if (csr0 & CSR0_BABL) dev->stats.tx_errors++; /* Tx babble. */ + if (csr0 & CSR0_MISS) dev->stats.rx_errors++; /* Missed a Rx frame. */ + if (csr0 & CSR0_MERR) { + DPRINTK( 1, ( "%s: Bus master arbitration failure (?!?), " + "status %04x.\n", dev->name, csr0 )); + /* Restart the chip. */ + DREG = CSR0_STRT; + } + } + + /* Clear any other interrupt, and set interrupt enable. */ + DREG = CSR0_BABL | CSR0_CERR | CSR0_MISS | CSR0_MERR | + CSR0_IDON | CSR0_INEA; + + DPRINTK( 2, ( "%s: exiting interrupt, csr0=%#04x.\n", + dev->name, DREG )); + + spin_unlock (&lp->devlock); + return IRQ_RETVAL(handled); +} + + +static int lance_rx( struct net_device *dev ) +{ + struct lance_private *lp = netdev_priv(dev); + int entry = lp->cur_rx & RX_RING_MOD_MASK; + int i; + + DPRINTK( 2, ( "%s: rx int, flag=%04x\n", dev->name, + MEM->rx_head[entry].flag )); + + /* If we own the next entry, it's a new packet. Send it up. */ + while( (MEM->rx_head[entry].flag & RMD1_OWN) == RMD1_OWN_HOST ) { + struct lance_rx_head *head = &(MEM->rx_head[entry]); + int status = head->flag; + + if (status != (RMD1_ENP|RMD1_STP)) { /* There was an error. */ + /* There is a tricky error noted by John Murphy, + to Russ Nelson: Even with full-sized + buffers it's possible for a jabber packet to use two + buffers, with only the last correctly noting the error. */ + if (status & RMD1_ENP) /* Only count a general error at the */ + dev->stats.rx_errors++; /* end of a packet.*/ + if (status & RMD1_FRAM) dev->stats.rx_frame_errors++; + if (status & RMD1_OFLO) dev->stats.rx_over_errors++; + if (status & RMD1_CRC) dev->stats.rx_crc_errors++; + if (status & RMD1_BUFF) dev->stats.rx_fifo_errors++; + head->flag &= (RMD1_ENP|RMD1_STP); + } else { + /* Malloc up new buffer, compatible with net-3. */ + short pkt_len = head->msg_length & 0xfff; + struct sk_buff *skb; + + if (pkt_len < 60) { + printk( "%s: Runt packet!\n", dev->name ); + dev->stats.rx_errors++; + } + else { + skb = dev_alloc_skb( pkt_len+2 ); + if (skb == NULL) { + DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n", + dev->name )); + for( i = 0; i < RX_RING_SIZE; i++ ) + if (MEM->rx_head[(entry+i) & RX_RING_MOD_MASK].flag & + RMD1_OWN_CHIP) + break; + + if (i > RX_RING_SIZE - 2) { + dev->stats.rx_dropped++; + head->flag |= RMD1_OWN_CHIP; + lp->cur_rx++; + } + break; + } + + if (lance_debug >= 3) { + u_char *data = PKTBUF_ADDR(head); + + printk(KERN_DEBUG "%s: RX pkt type 0x%04x from %pM to %pM " + "data %02x %02x %02x %02x %02x %02x %02x %02x " + "len %d\n", + dev->name, ((u_short *)data)[6], + &data[6], data, + data[15], data[16], data[17], data[18], + data[19], data[20], data[21], data[22], + pkt_len); + } + + skb_reserve( skb, 2 ); /* 16 byte align */ + skb_put( skb, pkt_len ); /* Make room */ + lp->memcpy_f( skb->data, PKTBUF_ADDR(head), pkt_len ); + skb->protocol = eth_type_trans( skb, dev ); + netif_rx( skb ); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + } + } + + head->flag |= RMD1_OWN_CHIP; + entry = (++lp->cur_rx) & RX_RING_MOD_MASK; + } + lp->cur_rx &= RX_RING_MOD_MASK; + + /* From lance.c (Donald Becker): */ + /* We should check that at least two ring entries are free. If not, + we should free one and mark stats->rx_dropped++. */ + + return 0; +} + + +static int lance_close( struct net_device *dev ) +{ + struct lance_private *lp = netdev_priv(dev); + struct lance_ioreg *IO = lp->iobase; + + netif_stop_queue (dev); + + AREG = CSR0; + + DPRINTK( 2, ( "%s: Shutting down ethercard, status was %2.2x.\n", + dev->name, DREG )); + + /* We stop the LANCE here -- it occasionally polls + memory if we don't. */ + DREG = CSR0_STOP; + + return 0; +} + + +/* Set or clear the multicast filter for this adaptor. + num_addrs == -1 Promiscuous mode, receive all packets + num_addrs == 0 Normal mode, clear multicast list + num_addrs > 0 Multicast mode, receive normal and MC packets, and do + best-effort filtering. + */ + +static void set_multicast_list( struct net_device *dev ) +{ + struct lance_private *lp = netdev_priv(dev); + struct lance_ioreg *IO = lp->iobase; + + if (netif_running(dev)) + /* Only possible if board is already started */ + return; + + /* We take the simple way out and always enable promiscuous mode. */ + DREG = CSR0_STOP; /* Temporarily stop the lance. */ + + if (dev->flags & IFF_PROMISC) { + /* Log any net taps. */ + DPRINTK( 2, ( "%s: Promiscuous mode enabled.\n", dev->name )); + REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */ + } else { + short multicast_table[4]; + int num_addrs = netdev_mc_count(dev); + int i; + /* We don't use the multicast table, but rely on upper-layer + * filtering. */ + memset( multicast_table, (num_addrs == 0) ? 0 : -1, + sizeof(multicast_table) ); + for( i = 0; i < 4; i++ ) + REGA( CSR8+i ) = multicast_table[i]; + REGA( CSR15 ) = 0; /* Unset promiscuous mode */ + } + + /* + * Always set BSWP after a STOP as STOP puts it back into + * little endian mode. + */ + REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0); + + /* Resume normal operation and reset AREG to CSR0 */ + REGA( CSR0 ) = CSR0_IDON | CSR0_INEA | CSR0_STRT; +} + + +/* This is needed for old RieblCards and possible for new RieblCards */ + +static int lance_set_mac_address( struct net_device *dev, void *addr ) +{ + struct lance_private *lp = netdev_priv(dev); + struct sockaddr *saddr = addr; + int i; + + if (lp->cardtype != OLD_RIEBL && lp->cardtype != NEW_RIEBL) + return -EOPNOTSUPP; + + if (netif_running(dev)) { + /* Only possible while card isn't started */ + DPRINTK( 1, ( "%s: hwaddr can be set only while card isn't open.\n", + dev->name )); + return -EIO; + } + + memcpy( dev->dev_addr, saddr->sa_data, dev->addr_len ); + for( i = 0; i < 6; i++ ) + MEM->init.hwaddr[i] = dev->dev_addr[i^1]; /* <- 16 bit swap! */ + lp->memcpy_f( RIEBL_HWADDR_ADDR, dev->dev_addr, 6 ); + /* set also the magic for future sessions */ + *RIEBL_MAGIC_ADDR = RIEBL_MAGIC; + + return 0; +} + + +#ifdef MODULE +static struct net_device *atarilance_dev; + +static int __init atarilance_module_init(void) +{ + atarilance_dev = atarilance_probe(-1); + if (IS_ERR(atarilance_dev)) + return PTR_ERR(atarilance_dev); + return 0; +} + +static void __exit atarilance_module_exit(void) +{ + unregister_netdev(atarilance_dev); + free_irq(atarilance_dev->irq, atarilance_dev); + free_netdev(atarilance_dev); +} +module_init(atarilance_module_init); +module_exit(atarilance_module_exit); +#endif /* MODULE */ + + +/* + * Local variables: + * c-indent-level: 4 + * tab-width: 4 + * End: + */ diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c new file mode 100644 index 0000000..b9debcf --- /dev/null +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -0,0 +1,1332 @@ +/* + * + * Alchemy Au1x00 ethernet driver + * + * Copyright 2001-2003, 2006 MontaVista Software Inc. + * Copyright 2002 TimeSys Corp. + * Added ethtool/mii-tool support, + * Copyright 2004 Matt Porter + * Update: 2004 Bjoern Riemer, riemer@fokus.fraunhofer.de + * or riemer@riemer-nt.de: fixed the link beat detection with + * ioctls (SIOCGMIIPHY) + * Copyright 2006 Herbert Valerio Riedel + * converted to use linux-2.6.x's PHY framework + * + * Author: MontaVista Software, Inc. + * ppopov@mvista.com or source@mvista.com + * + * ######################################################################## + * + * This program is free software; you can distribute it and/or modify it + * under the terms of the GNU General Public License (Version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * ######################################################################## + * + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include "au1000_eth.h" + +#ifdef AU1000_ETH_DEBUG +static int au1000_debug = 5; +#else +static int au1000_debug = 3; +#endif + +#define AU1000_DEF_MSG_ENABLE (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK) + +#define DRV_NAME "au1000_eth" +#define DRV_VERSION "1.7" +#define DRV_AUTHOR "Pete Popov " +#define DRV_DESC "Au1xxx on-chip Ethernet driver" + +MODULE_AUTHOR(DRV_AUTHOR); +MODULE_DESCRIPTION(DRV_DESC); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +/* + * Theory of operation + * + * The Au1000 MACs use a simple rx and tx descriptor ring scheme. + * There are four receive and four transmit descriptors. These + * descriptors are not in memory; rather, they are just a set of + * hardware registers. + * + * Since the Au1000 has a coherent data cache, the receive and + * transmit buffers are allocated from the KSEG0 segment. The + * hardware registers, however, are still mapped at KSEG1 to + * make sure there's no out-of-order writes, and that all writes + * complete immediately. + */ + +/* + * board-specific configurations + * + * PHY detection algorithm + * + * If phy_static_config is undefined, the PHY setup is + * autodetected: + * + * mii_probe() first searches the current MAC's MII bus for a PHY, + * selecting the first (or last, if phy_search_highest_addr is + * defined) PHY address not already claimed by another netdev. + * + * If nothing was found that way when searching for the 2nd ethernet + * controller's PHY and phy1_search_mac0 is defined, then + * the first MII bus is searched as well for an unclaimed PHY; this is + * needed in case of a dual-PHY accessible only through the MAC0's MII + * bus. + * + * Finally, if no PHY is found, then the corresponding ethernet + * controller is not registered to the network subsystem. + */ + +/* autodetection defaults: phy1_search_mac0 */ + +/* static PHY setup + * + * most boards PHY setup should be detectable properly with the + * autodetection algorithm in mii_probe(), but in some cases (e.g. if + * you have a switch attached, or want to use the PHY's interrupt + * notification capabilities) you can provide a static PHY + * configuration here + * + * IRQs may only be set, if a PHY address was configured + * If a PHY address is given, also a bus id is required to be set + * + * ps: make sure the used irqs are configured properly in the board + * specific irq-map + */ + +static void au1000_enable_mac(struct net_device *dev, int force_reset) +{ + unsigned long flags; + struct au1000_private *aup = netdev_priv(dev); + + spin_lock_irqsave(&aup->lock, flags); + + if (force_reset || (!aup->mac_enabled)) { + writel(MAC_EN_CLOCK_ENABLE, aup->enable); + au_sync_delay(2); + writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2 + | MAC_EN_CLOCK_ENABLE), aup->enable); + au_sync_delay(2); + + aup->mac_enabled = 1; + } + + spin_unlock_irqrestore(&aup->lock, flags); +} + +/* + * MII operations + */ +static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg) +{ + struct au1000_private *aup = netdev_priv(dev); + u32 *const mii_control_reg = &aup->mac->mii_control; + u32 *const mii_data_reg = &aup->mac->mii_data; + u32 timedout = 20; + u32 mii_control; + + while (readl(mii_control_reg) & MAC_MII_BUSY) { + mdelay(1); + if (--timedout == 0) { + netdev_err(dev, "read_MII busy timeout!!\n"); + return -1; + } + } + + mii_control = MAC_SET_MII_SELECT_REG(reg) | + MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_READ; + + writel(mii_control, mii_control_reg); + + timedout = 20; + while (readl(mii_control_reg) & MAC_MII_BUSY) { + mdelay(1); + if (--timedout == 0) { + netdev_err(dev, "mdio_read busy timeout!!\n"); + return -1; + } + } + return readl(mii_data_reg); +} + +static void au1000_mdio_write(struct net_device *dev, int phy_addr, + int reg, u16 value) +{ + struct au1000_private *aup = netdev_priv(dev); + u32 *const mii_control_reg = &aup->mac->mii_control; + u32 *const mii_data_reg = &aup->mac->mii_data; + u32 timedout = 20; + u32 mii_control; + + while (readl(mii_control_reg) & MAC_MII_BUSY) { + mdelay(1); + if (--timedout == 0) { + netdev_err(dev, "mdio_write busy timeout!!\n"); + return; + } + } + + mii_control = MAC_SET_MII_SELECT_REG(reg) | + MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_WRITE; + + writel(value, mii_data_reg); + writel(mii_control, mii_control_reg); +} + +static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum) +{ + /* WARNING: bus->phy_map[phy_addr].attached_dev == dev does + * _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus) + */ + struct net_device *const dev = bus->priv; + + /* make sure the MAC associated with this + * mii_bus is enabled + */ + au1000_enable_mac(dev, 0); + + return au1000_mdio_read(dev, phy_addr, regnum); +} + +static int au1000_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum, + u16 value) +{ + struct net_device *const dev = bus->priv; + + /* make sure the MAC associated with this + * mii_bus is enabled + */ + au1000_enable_mac(dev, 0); + + au1000_mdio_write(dev, phy_addr, regnum, value); + return 0; +} + +static int au1000_mdiobus_reset(struct mii_bus *bus) +{ + struct net_device *const dev = bus->priv; + + /* make sure the MAC associated with this + * mii_bus is enabled + */ + au1000_enable_mac(dev, 0); + + return 0; +} + +static void au1000_hard_stop(struct net_device *dev) +{ + struct au1000_private *aup = netdev_priv(dev); + u32 reg; + + netif_dbg(aup, drv, dev, "hard stop\n"); + + reg = readl(&aup->mac->control); + reg &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE); + writel(reg, &aup->mac->control); + au_sync_delay(10); +} + +static void au1000_enable_rx_tx(struct net_device *dev) +{ + struct au1000_private *aup = netdev_priv(dev); + u32 reg; + + netif_dbg(aup, hw, dev, "enable_rx_tx\n"); + + reg = readl(&aup->mac->control); + reg |= (MAC_RX_ENABLE | MAC_TX_ENABLE); + writel(reg, &aup->mac->control); + au_sync_delay(10); +} + +static void +au1000_adjust_link(struct net_device *dev) +{ + struct au1000_private *aup = netdev_priv(dev); + struct phy_device *phydev = aup->phy_dev; + unsigned long flags; + u32 reg; + + int status_change = 0; + + BUG_ON(!aup->phy_dev); + + spin_lock_irqsave(&aup->lock, flags); + + if (phydev->link && (aup->old_speed != phydev->speed)) { + /* speed changed */ + + switch (phydev->speed) { + case SPEED_10: + case SPEED_100: + break; + default: + netdev_warn(dev, "Speed (%d) is not 10/100 ???\n", + phydev->speed); + break; + } + + aup->old_speed = phydev->speed; + + status_change = 1; + } + + if (phydev->link && (aup->old_duplex != phydev->duplex)) { + /* duplex mode changed */ + + /* switching duplex mode requires to disable rx and tx! */ + au1000_hard_stop(dev); + + reg = readl(&aup->mac->control); + if (DUPLEX_FULL == phydev->duplex) { + reg |= MAC_FULL_DUPLEX; + reg &= ~MAC_DISABLE_RX_OWN; + } else { + reg &= ~MAC_FULL_DUPLEX; + reg |= MAC_DISABLE_RX_OWN; + } + writel(reg, &aup->mac->control); + au_sync_delay(1); + + au1000_enable_rx_tx(dev); + aup->old_duplex = phydev->duplex; + + status_change = 1; + } + + if (phydev->link != aup->old_link) { + /* link state changed */ + + if (!phydev->link) { + /* link went down */ + aup->old_speed = 0; + aup->old_duplex = -1; + } + + aup->old_link = phydev->link; + status_change = 1; + } + + spin_unlock_irqrestore(&aup->lock, flags); + + if (status_change) { + if (phydev->link) + netdev_info(dev, "link up (%d/%s)\n", + phydev->speed, + DUPLEX_FULL == phydev->duplex ? "Full" : "Half"); + else + netdev_info(dev, "link down\n"); + } +} + +static int au1000_mii_probe(struct net_device *dev) +{ + struct au1000_private *const aup = netdev_priv(dev); + struct phy_device *phydev = NULL; + int phy_addr; + + if (aup->phy_static_config) { + BUG_ON(aup->mac_id < 0 || aup->mac_id > 1); + + if (aup->phy_addr) + phydev = aup->mii_bus->phy_map[aup->phy_addr]; + else + netdev_info(dev, "using PHY-less setup\n"); + return 0; + } + + /* find the first (lowest address) PHY + * on the current MAC's MII bus + */ + for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) + if (aup->mii_bus->phy_map[phy_addr]) { + phydev = aup->mii_bus->phy_map[phy_addr]; + if (!aup->phy_search_highest_addr) + /* break out with first one found */ + break; + } + + if (aup->phy1_search_mac0) { + /* try harder to find a PHY */ + if (!phydev && (aup->mac_id == 1)) { + /* no PHY found, maybe we have a dual PHY? */ + dev_info(&dev->dev, ": no PHY found on MAC1, " + "let's see if it's attached to MAC0...\n"); + + /* find the first (lowest address) non-attached + * PHY on the MAC0 MII bus + */ + for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { + struct phy_device *const tmp_phydev = + aup->mii_bus->phy_map[phy_addr]; + + if (aup->mac_id == 1) + break; + + /* no PHY here... */ + if (!tmp_phydev) + continue; + + /* already claimed by MAC0 */ + if (tmp_phydev->attached_dev) + continue; + + phydev = tmp_phydev; + break; /* found it */ + } + } + } + + if (!phydev) { + netdev_err(dev, "no PHY found\n"); + return -1; + } + + /* now we are supposed to have a proper phydev, to attach to... */ + BUG_ON(phydev->attached_dev); + + phydev = phy_connect(dev, dev_name(&phydev->dev), &au1000_adjust_link, + 0, PHY_INTERFACE_MODE_MII); + + if (IS_ERR(phydev)) { + netdev_err(dev, "Could not attach to PHY\n"); + return PTR_ERR(phydev); + } + + /* mask with MAC supported features */ + phydev->supported &= (SUPPORTED_10baseT_Half + | SUPPORTED_10baseT_Full + | SUPPORTED_100baseT_Half + | SUPPORTED_100baseT_Full + | SUPPORTED_Autoneg + /* | SUPPORTED_Pause | SUPPORTED_Asym_Pause */ + | SUPPORTED_MII + | SUPPORTED_TP); + + phydev->advertising = phydev->supported; + + aup->old_link = 0; + aup->old_speed = 0; + aup->old_duplex = -1; + aup->phy_dev = phydev; + + netdev_info(dev, "attached PHY driver [%s] " + "(mii_bus:phy_addr=%s, irq=%d)\n", + phydev->drv->name, dev_name(&phydev->dev), phydev->irq); + + return 0; +} + + +/* + * Buffer allocation/deallocation routines. The buffer descriptor returned + * has the virtual and dma address of a buffer suitable for + * both, receive and transmit operations. + */ +static struct db_dest *au1000_GetFreeDB(struct au1000_private *aup) +{ + struct db_dest *pDB; + pDB = aup->pDBfree; + + if (pDB) + aup->pDBfree = pDB->pnext; + + return pDB; +} + +void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB) +{ + struct db_dest *pDBfree = aup->pDBfree; + if (pDBfree) + pDBfree->pnext = pDB; + aup->pDBfree = pDB; +} + +static void au1000_reset_mac_unlocked(struct net_device *dev) +{ + struct au1000_private *const aup = netdev_priv(dev); + int i; + + au1000_hard_stop(dev); + + writel(MAC_EN_CLOCK_ENABLE, aup->enable); + au_sync_delay(2); + writel(0, aup->enable); + au_sync_delay(2); + + aup->tx_full = 0; + for (i = 0; i < NUM_RX_DMA; i++) { + /* reset control bits */ + aup->rx_dma_ring[i]->buff_stat &= ~0xf; + } + for (i = 0; i < NUM_TX_DMA; i++) { + /* reset control bits */ + aup->tx_dma_ring[i]->buff_stat &= ~0xf; + } + + aup->mac_enabled = 0; + +} + +static void au1000_reset_mac(struct net_device *dev) +{ + struct au1000_private *const aup = netdev_priv(dev); + unsigned long flags; + + netif_dbg(aup, hw, dev, "reset mac, aup %x\n", + (unsigned)aup); + + spin_lock_irqsave(&aup->lock, flags); + + au1000_reset_mac_unlocked(dev); + + spin_unlock_irqrestore(&aup->lock, flags); +} + +/* + * Setup the receive and transmit "rings". These pointers are the addresses + * of the rx and tx MAC DMA registers so they are fixed by the hardware -- + * these are not descriptors sitting in memory. + */ +static void +au1000_setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base) +{ + int i; + + for (i = 0; i < NUM_RX_DMA; i++) { + aup->rx_dma_ring[i] = + (struct rx_dma *) + (rx_base + sizeof(struct rx_dma)*i); + } + for (i = 0; i < NUM_TX_DMA; i++) { + aup->tx_dma_ring[i] = + (struct tx_dma *) + (tx_base + sizeof(struct tx_dma)*i); + } +} + +/* + * ethtool operations + */ + +static int au1000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct au1000_private *aup = netdev_priv(dev); + + if (aup->phy_dev) + return phy_ethtool_gset(aup->phy_dev, cmd); + + return -EINVAL; +} + +static int au1000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct au1000_private *aup = netdev_priv(dev); + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (aup->phy_dev) + return phy_ethtool_sset(aup->phy_dev, cmd); + + return -EINVAL; +} + +static void +au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct au1000_private *aup = netdev_priv(dev); + + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + info->fw_version[0] = '\0'; + sprintf(info->bus_info, "%s %d", DRV_NAME, aup->mac_id); + info->regdump_len = 0; +} + +static void au1000_set_msglevel(struct net_device *dev, u32 value) +{ + struct au1000_private *aup = netdev_priv(dev); + aup->msg_enable = value; +} + +static u32 au1000_get_msglevel(struct net_device *dev) +{ + struct au1000_private *aup = netdev_priv(dev); + return aup->msg_enable; +} + +static const struct ethtool_ops au1000_ethtool_ops = { + .get_settings = au1000_get_settings, + .set_settings = au1000_set_settings, + .get_drvinfo = au1000_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_msglevel = au1000_get_msglevel, + .set_msglevel = au1000_set_msglevel, +}; + + +/* + * Initialize the interface. + * + * When the device powers up, the clocks are disabled and the + * mac is in reset state. When the interface is closed, we + * do the same -- reset the device and disable the clocks to + * conserve power. Thus, whenever au1000_init() is called, + * the device should already be in reset state. + */ +static int au1000_init(struct net_device *dev) +{ + struct au1000_private *aup = netdev_priv(dev); + unsigned long flags; + int i; + u32 control; + + netif_dbg(aup, hw, dev, "au1000_init\n"); + + /* bring the device out of reset */ + au1000_enable_mac(dev, 1); + + spin_lock_irqsave(&aup->lock, flags); + + writel(0, &aup->mac->control); + aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2; + aup->tx_tail = aup->tx_head; + aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2; + + writel(dev->dev_addr[5]<<8 | dev->dev_addr[4], + &aup->mac->mac_addr_high); + writel(dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 | + dev->dev_addr[1]<<8 | dev->dev_addr[0], + &aup->mac->mac_addr_low); + + + for (i = 0; i < NUM_RX_DMA; i++) + aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE; + + au_sync(); + + control = MAC_RX_ENABLE | MAC_TX_ENABLE; +#ifndef CONFIG_CPU_LITTLE_ENDIAN + control |= MAC_BIG_ENDIAN; +#endif + if (aup->phy_dev) { + if (aup->phy_dev->link && (DUPLEX_FULL == aup->phy_dev->duplex)) + control |= MAC_FULL_DUPLEX; + else + control |= MAC_DISABLE_RX_OWN; + } else { /* PHY-less op, assume full-duplex */ + control |= MAC_FULL_DUPLEX; + } + + writel(control, &aup->mac->control); + writel(0x8100, &aup->mac->vlan1_tag); /* activate vlan support */ + au_sync(); + + spin_unlock_irqrestore(&aup->lock, flags); + return 0; +} + +static inline void au1000_update_rx_stats(struct net_device *dev, u32 status) +{ + struct net_device_stats *ps = &dev->stats; + + ps->rx_packets++; + if (status & RX_MCAST_FRAME) + ps->multicast++; + + if (status & RX_ERROR) { + ps->rx_errors++; + if (status & RX_MISSED_FRAME) + ps->rx_missed_errors++; + if (status & (RX_OVERLEN | RX_RUNT | RX_LEN_ERROR)) + ps->rx_length_errors++; + if (status & RX_CRC_ERROR) + ps->rx_crc_errors++; + if (status & RX_COLL) + ps->collisions++; + } else + ps->rx_bytes += status & RX_FRAME_LEN_MASK; + +} + +/* + * Au1000 receive routine. + */ +static int au1000_rx(struct net_device *dev) +{ + struct au1000_private *aup = netdev_priv(dev); + struct sk_buff *skb; + struct rx_dma *prxd; + u32 buff_stat, status; + struct db_dest *pDB; + u32 frmlen; + + netif_dbg(aup, rx_status, dev, "au1000_rx head %d\n", aup->rx_head); + + prxd = aup->rx_dma_ring[aup->rx_head]; + buff_stat = prxd->buff_stat; + while (buff_stat & RX_T_DONE) { + status = prxd->status; + pDB = aup->rx_db_inuse[aup->rx_head]; + au1000_update_rx_stats(dev, status); + if (!(status & RX_ERROR)) { + + /* good frame */ + frmlen = (status & RX_FRAME_LEN_MASK); + frmlen -= 4; /* Remove FCS */ + skb = dev_alloc_skb(frmlen + 2); + if (skb == NULL) { + netdev_err(dev, "Memory squeeze, dropping packet.\n"); + dev->stats.rx_dropped++; + continue; + } + skb_reserve(skb, 2); /* 16 byte IP header align */ + skb_copy_to_linear_data(skb, + (unsigned char *)pDB->vaddr, frmlen); + skb_put(skb, frmlen); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); /* pass the packet to upper layers */ + } else { + if (au1000_debug > 4) { + pr_err("rx_error(s):"); + if (status & RX_MISSED_FRAME) + pr_cont(" miss"); + if (status & RX_WDOG_TIMER) + pr_cont(" wdog"); + if (status & RX_RUNT) + pr_cont(" runt"); + if (status & RX_OVERLEN) + pr_cont(" overlen"); + if (status & RX_COLL) + pr_cont(" coll"); + if (status & RX_MII_ERROR) + pr_cont(" mii error"); + if (status & RX_CRC_ERROR) + pr_cont(" crc error"); + if (status & RX_LEN_ERROR) + pr_cont(" len error"); + if (status & RX_U_CNTRL_FRAME) + pr_cont(" u control frame"); + pr_cont("\n"); + } + } + prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE); + aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1); + au_sync(); + + /* next descriptor */ + prxd = aup->rx_dma_ring[aup->rx_head]; + buff_stat = prxd->buff_stat; + } + return 0; +} + +static void au1000_update_tx_stats(struct net_device *dev, u32 status) +{ + struct au1000_private *aup = netdev_priv(dev); + struct net_device_stats *ps = &dev->stats; + + if (status & TX_FRAME_ABORTED) { + if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) { + if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) { + /* any other tx errors are only valid + * in half duplex mode + */ + ps->tx_errors++; + ps->tx_aborted_errors++; + } + } else { + ps->tx_errors++; + ps->tx_aborted_errors++; + if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER)) + ps->tx_carrier_errors++; + } + } +} + +/* + * Called from the interrupt service routine to acknowledge + * the TX DONE bits. This is a must if the irq is setup as + * edge triggered. + */ +static void au1000_tx_ack(struct net_device *dev) +{ + struct au1000_private *aup = netdev_priv(dev); + struct tx_dma *ptxd; + + ptxd = aup->tx_dma_ring[aup->tx_tail]; + + while (ptxd->buff_stat & TX_T_DONE) { + au1000_update_tx_stats(dev, ptxd->status); + ptxd->buff_stat &= ~TX_T_DONE; + ptxd->len = 0; + au_sync(); + + aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1); + ptxd = aup->tx_dma_ring[aup->tx_tail]; + + if (aup->tx_full) { + aup->tx_full = 0; + netif_wake_queue(dev); + } + } +} + +/* + * Au1000 interrupt service routine. + */ +static irqreturn_t au1000_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + + /* Handle RX interrupts first to minimize chance of overrun */ + + au1000_rx(dev); + au1000_tx_ack(dev); + return IRQ_RETVAL(1); +} + +static int au1000_open(struct net_device *dev) +{ + int retval; + struct au1000_private *aup = netdev_priv(dev); + + netif_dbg(aup, drv, dev, "open: dev=%p\n", dev); + + retval = request_irq(dev->irq, au1000_interrupt, 0, + dev->name, dev); + if (retval) { + netdev_err(dev, "unable to get IRQ %d\n", dev->irq); + return retval; + } + + retval = au1000_init(dev); + if (retval) { + netdev_err(dev, "error in au1000_init\n"); + free_irq(dev->irq, dev); + return retval; + } + + if (aup->phy_dev) { + /* cause the PHY state machine to schedule a link state check */ + aup->phy_dev->state = PHY_CHANGELINK; + phy_start(aup->phy_dev); + } + + netif_start_queue(dev); + + netif_dbg(aup, drv, dev, "open: Initialization done.\n"); + + return 0; +} + +static int au1000_close(struct net_device *dev) +{ + unsigned long flags; + struct au1000_private *const aup = netdev_priv(dev); + + netif_dbg(aup, drv, dev, "close: dev=%p\n", dev); + + if (aup->phy_dev) + phy_stop(aup->phy_dev); + + spin_lock_irqsave(&aup->lock, flags); + + au1000_reset_mac_unlocked(dev); + + /* stop the device */ + netif_stop_queue(dev); + + /* disable the interrupt */ + free_irq(dev->irq, dev); + spin_unlock_irqrestore(&aup->lock, flags); + + return 0; +} + +/* + * Au1000 transmit routine. + */ +static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct au1000_private *aup = netdev_priv(dev); + struct net_device_stats *ps = &dev->stats; + struct tx_dma *ptxd; + u32 buff_stat; + struct db_dest *pDB; + int i; + + netif_dbg(aup, tx_queued, dev, "tx: aup %x len=%d, data=%p, head %d\n", + (unsigned)aup, skb->len, + skb->data, aup->tx_head); + + ptxd = aup->tx_dma_ring[aup->tx_head]; + buff_stat = ptxd->buff_stat; + if (buff_stat & TX_DMA_ENABLE) { + /* We've wrapped around and the transmitter is still busy */ + netif_stop_queue(dev); + aup->tx_full = 1; + return NETDEV_TX_BUSY; + } else if (buff_stat & TX_T_DONE) { + au1000_update_tx_stats(dev, ptxd->status); + ptxd->len = 0; + } + + if (aup->tx_full) { + aup->tx_full = 0; + netif_wake_queue(dev); + } + + pDB = aup->tx_db_inuse[aup->tx_head]; + skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len); + if (skb->len < ETH_ZLEN) { + for (i = skb->len; i < ETH_ZLEN; i++) + ((char *)pDB->vaddr)[i] = 0; + + ptxd->len = ETH_ZLEN; + } else + ptxd->len = skb->len; + + ps->tx_packets++; + ps->tx_bytes += ptxd->len; + + ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE; + au_sync(); + dev_kfree_skb(skb); + aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1); + return NETDEV_TX_OK; +} + +/* + * The Tx ring has been full longer than the watchdog timeout + * value. The transmitter must be hung? + */ +static void au1000_tx_timeout(struct net_device *dev) +{ + netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev); + au1000_reset_mac(dev); + au1000_init(dev); + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue(dev); +} + +static void au1000_multicast_list(struct net_device *dev) +{ + struct au1000_private *aup = netdev_priv(dev); + u32 reg; + + netif_dbg(aup, drv, dev, "%s: flags=%x\n", __func__, dev->flags); + reg = readl(&aup->mac->control); + if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ + reg |= MAC_PROMISCUOUS; + } else if ((dev->flags & IFF_ALLMULTI) || + netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) { + reg |= MAC_PASS_ALL_MULTI; + reg &= ~MAC_PROMISCUOUS; + netdev_info(dev, "Pass all multicast\n"); + } else { + struct netdev_hw_addr *ha; + u32 mc_filter[2]; /* Multicast hash filter */ + + mc_filter[1] = mc_filter[0] = 0; + netdev_for_each_mc_addr(ha, dev) + set_bit(ether_crc(ETH_ALEN, ha->addr)>>26, + (long *)mc_filter); + writel(mc_filter[1], &aup->mac->multi_hash_high); + writel(mc_filter[0], &aup->mac->multi_hash_low); + reg &= ~MAC_PROMISCUOUS; + reg |= MAC_HASH_MODE; + } + writel(reg, &aup->mac->control); +} + +static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct au1000_private *aup = netdev_priv(dev); + + if (!netif_running(dev)) + return -EINVAL; + + if (!aup->phy_dev) + return -EINVAL; /* PHY not controllable */ + + return phy_mii_ioctl(aup->phy_dev, rq, cmd); +} + +static const struct net_device_ops au1000_netdev_ops = { + .ndo_open = au1000_open, + .ndo_stop = au1000_close, + .ndo_start_xmit = au1000_tx, + .ndo_set_multicast_list = au1000_multicast_list, + .ndo_do_ioctl = au1000_ioctl, + .ndo_tx_timeout = au1000_tx_timeout, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +}; + +static int __devinit au1000_probe(struct platform_device *pdev) +{ + static unsigned version_printed; + struct au1000_private *aup = NULL; + struct au1000_eth_platform_data *pd; + struct net_device *dev = NULL; + struct db_dest *pDB, *pDBfree; + int irq, i, err = 0; + struct resource *base, *macen; + + base = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!base) { + dev_err(&pdev->dev, "failed to retrieve base register\n"); + err = -ENODEV; + goto out; + } + + macen = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!macen) { + dev_err(&pdev->dev, "failed to retrieve MAC Enable register\n"); + err = -ENODEV; + goto out; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "failed to retrieve IRQ\n"); + err = -ENODEV; + goto out; + } + + if (!request_mem_region(base->start, resource_size(base), + pdev->name)) { + dev_err(&pdev->dev, "failed to request memory region for base registers\n"); + err = -ENXIO; + goto out; + } + + if (!request_mem_region(macen->start, resource_size(macen), + pdev->name)) { + dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n"); + err = -ENXIO; + goto err_request; + } + + dev = alloc_etherdev(sizeof(struct au1000_private)); + if (!dev) { + dev_err(&pdev->dev, "alloc_etherdev failed\n"); + err = -ENOMEM; + goto err_alloc; + } + + SET_NETDEV_DEV(dev, &pdev->dev); + platform_set_drvdata(pdev, dev); + aup = netdev_priv(dev); + + spin_lock_init(&aup->lock); + aup->msg_enable = (au1000_debug < 4 ? + AU1000_DEF_MSG_ENABLE : au1000_debug); + + /* Allocate the data buffers + * Snooping works fine with eth on all au1xxx + */ + aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE * + (NUM_TX_BUFFS + NUM_RX_BUFFS), + &aup->dma_addr, 0); + if (!aup->vaddr) { + dev_err(&pdev->dev, "failed to allocate data buffers\n"); + err = -ENOMEM; + goto err_vaddr; + } + + /* aup->mac is the base address of the MAC's registers */ + aup->mac = (struct mac_reg *) + ioremap_nocache(base->start, resource_size(base)); + if (!aup->mac) { + dev_err(&pdev->dev, "failed to ioremap MAC registers\n"); + err = -ENXIO; + goto err_remap1; + } + + /* Setup some variables for quick register address access */ + aup->enable = (u32 *)ioremap_nocache(macen->start, + resource_size(macen)); + if (!aup->enable) { + dev_err(&pdev->dev, "failed to ioremap MAC enable register\n"); + err = -ENXIO; + goto err_remap2; + } + aup->mac_id = pdev->id; + + if (pdev->id == 0) + au1000_setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR); + else if (pdev->id == 1) + au1000_setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR); + + /* set a random MAC now in case platform_data doesn't provide one */ + random_ether_addr(dev->dev_addr); + + writel(0, aup->enable); + aup->mac_enabled = 0; + + pd = pdev->dev.platform_data; + if (!pd) { + dev_info(&pdev->dev, "no platform_data passed," + " PHY search on MAC0\n"); + aup->phy1_search_mac0 = 1; + } else { + if (is_valid_ether_addr(pd->mac)) + memcpy(dev->dev_addr, pd->mac, 6); + + aup->phy_static_config = pd->phy_static_config; + aup->phy_search_highest_addr = pd->phy_search_highest_addr; + aup->phy1_search_mac0 = pd->phy1_search_mac0; + aup->phy_addr = pd->phy_addr; + aup->phy_busid = pd->phy_busid; + aup->phy_irq = pd->phy_irq; + } + + if (aup->phy_busid && aup->phy_busid > 0) { + dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII bus not supported yet\n"); + err = -ENODEV; + goto err_mdiobus_alloc; + } + + aup->mii_bus = mdiobus_alloc(); + if (aup->mii_bus == NULL) { + dev_err(&pdev->dev, "failed to allocate mdiobus structure\n"); + err = -ENOMEM; + goto err_mdiobus_alloc; + } + + aup->mii_bus->priv = dev; + aup->mii_bus->read = au1000_mdiobus_read; + aup->mii_bus->write = au1000_mdiobus_write; + aup->mii_bus->reset = au1000_mdiobus_reset; + aup->mii_bus->name = "au1000_eth_mii"; + snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%x", aup->mac_id); + aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); + if (aup->mii_bus->irq == NULL) + goto err_out; + + for (i = 0; i < PHY_MAX_ADDR; ++i) + aup->mii_bus->irq[i] = PHY_POLL; + /* if known, set corresponding PHY IRQs */ + if (aup->phy_static_config) + if (aup->phy_irq && aup->phy_busid == aup->mac_id) + aup->mii_bus->irq[aup->phy_addr] = aup->phy_irq; + + err = mdiobus_register(aup->mii_bus); + if (err) { + dev_err(&pdev->dev, "failed to register MDIO bus\n"); + goto err_mdiobus_reg; + } + + if (au1000_mii_probe(dev) != 0) + goto err_out; + + pDBfree = NULL; + /* setup the data buffer descriptors and attach a buffer to each one */ + pDB = aup->db; + for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) { + pDB->pnext = pDBfree; + pDBfree = pDB; + pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i); + pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr); + pDB++; + } + aup->pDBfree = pDBfree; + + for (i = 0; i < NUM_RX_DMA; i++) { + pDB = au1000_GetFreeDB(aup); + if (!pDB) + goto err_out; + + aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; + aup->rx_db_inuse[i] = pDB; + } + for (i = 0; i < NUM_TX_DMA; i++) { + pDB = au1000_GetFreeDB(aup); + if (!pDB) + goto err_out; + + aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; + aup->tx_dma_ring[i]->len = 0; + aup->tx_db_inuse[i] = pDB; + } + + dev->base_addr = base->start; + dev->irq = irq; + dev->netdev_ops = &au1000_netdev_ops; + SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops); + dev->watchdog_timeo = ETH_TX_TIMEOUT; + + /* + * The boot code uses the ethernet controller, so reset it to start + * fresh. au1000_init() expects that the device is in reset state. + */ + au1000_reset_mac(dev); + + err = register_netdev(dev); + if (err) { + netdev_err(dev, "Cannot register net device, aborting.\n"); + goto err_out; + } + + netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n", + (unsigned long)base->start, irq); + if (version_printed++ == 0) + pr_info("%s version %s %s\n", + DRV_NAME, DRV_VERSION, DRV_AUTHOR); + + return 0; + +err_out: + if (aup->mii_bus != NULL) + mdiobus_unregister(aup->mii_bus); + + /* here we should have a valid dev plus aup-> register addresses + * so we can reset the mac properly. + */ + au1000_reset_mac(dev); + + for (i = 0; i < NUM_RX_DMA; i++) { + if (aup->rx_db_inuse[i]) + au1000_ReleaseDB(aup, aup->rx_db_inuse[i]); + } + for (i = 0; i < NUM_TX_DMA; i++) { + if (aup->tx_db_inuse[i]) + au1000_ReleaseDB(aup, aup->tx_db_inuse[i]); + } +err_mdiobus_reg: + mdiobus_free(aup->mii_bus); +err_mdiobus_alloc: + iounmap(aup->enable); +err_remap2: + iounmap(aup->mac); +err_remap1: + dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), + (void *)aup->vaddr, aup->dma_addr); +err_vaddr: + free_netdev(dev); +err_alloc: + release_mem_region(macen->start, resource_size(macen)); +err_request: + release_mem_region(base->start, resource_size(base)); +out: + return err; +} + +static int __devexit au1000_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct au1000_private *aup = netdev_priv(dev); + int i; + struct resource *base, *macen; + + platform_set_drvdata(pdev, NULL); + + unregister_netdev(dev); + mdiobus_unregister(aup->mii_bus); + mdiobus_free(aup->mii_bus); + + for (i = 0; i < NUM_RX_DMA; i++) + if (aup->rx_db_inuse[i]) + au1000_ReleaseDB(aup, aup->rx_db_inuse[i]); + + for (i = 0; i < NUM_TX_DMA; i++) + if (aup->tx_db_inuse[i]) + au1000_ReleaseDB(aup, aup->tx_db_inuse[i]); + + dma_free_noncoherent(NULL, MAX_BUF_SIZE * + (NUM_TX_BUFFS + NUM_RX_BUFFS), + (void *)aup->vaddr, aup->dma_addr); + + iounmap(aup->mac); + iounmap(aup->enable); + + base = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(base->start, resource_size(base)); + + macen = platform_get_resource(pdev, IORESOURCE_MEM, 1); + release_mem_region(macen->start, resource_size(macen)); + + free_netdev(dev); + + return 0; +} + +static struct platform_driver au1000_eth_driver = { + .probe = au1000_probe, + .remove = __devexit_p(au1000_remove), + .driver = { + .name = "au1000-eth", + .owner = THIS_MODULE, + }, +}; +MODULE_ALIAS("platform:au1000-eth"); + + +static int __init au1000_init_module(void) +{ + return platform_driver_register(&au1000_eth_driver); +} + +static void __exit au1000_exit_module(void) +{ + platform_driver_unregister(&au1000_eth_driver); +} + +module_init(au1000_init_module); +module_exit(au1000_exit_module); diff --git a/drivers/net/ethernet/amd/au1000_eth.h b/drivers/net/ethernet/amd/au1000_eth.h new file mode 100644 index 0000000..6229c77 --- /dev/null +++ b/drivers/net/ethernet/amd/au1000_eth.h @@ -0,0 +1,134 @@ +/* + * + * Alchemy Au1x00 ethernet driver include file + * + * Author: Pete Popov + * + * Copyright 2001 MontaVista Software Inc. + * + * ######################################################################## + * + * This program is free software; you can distribute it and/or modify it + * under the terms of the GNU General Public License (Version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * ######################################################################## + * + * + */ + + +#define MAC_IOSIZE 0x10000 +#define NUM_RX_DMA 4 /* Au1x00 has 4 rx hardware descriptors */ +#define NUM_TX_DMA 4 /* Au1x00 has 4 tx hardware descriptors */ + +#define NUM_RX_BUFFS 4 +#define NUM_TX_BUFFS 4 +#define MAX_BUF_SIZE 2048 + +#define ETH_TX_TIMEOUT (HZ/4) +#define MAC_MIN_PKT_SIZE 64 + +#define MULTICAST_FILTER_LIMIT 64 + +/* + * Data Buffer Descriptor. Data buffers must be aligned on 32 byte + * boundary for both, receive and transmit. + */ +struct db_dest { + struct db_dest *pnext; + u32 *vaddr; + dma_addr_t dma_addr; +}; + +/* + * The transmit and receive descriptors are memory + * mapped registers. + */ +struct tx_dma { + u32 status; + u32 buff_stat; + u32 len; + u32 pad; +}; + +struct rx_dma { + u32 status; + u32 buff_stat; + u32 pad[2]; +}; + + +/* + * MAC control registers, memory mapped. + */ +struct mac_reg { + u32 control; + u32 mac_addr_high; + u32 mac_addr_low; + u32 multi_hash_high; + u32 multi_hash_low; + u32 mii_control; + u32 mii_data; + u32 flow_control; + u32 vlan1_tag; + u32 vlan2_tag; +}; + + +struct au1000_private { + struct db_dest *pDBfree; + struct db_dest db[NUM_RX_BUFFS+NUM_TX_BUFFS]; + struct rx_dma *rx_dma_ring[NUM_RX_DMA]; + struct tx_dma *tx_dma_ring[NUM_TX_DMA]; + struct db_dest *rx_db_inuse[NUM_RX_DMA]; + struct db_dest *tx_db_inuse[NUM_TX_DMA]; + u32 rx_head; + u32 tx_head; + u32 tx_tail; + u32 tx_full; + + int mac_id; + + int mac_enabled; /* whether MAC is currently enabled and running + * (req. for mdio) + */ + + int old_link; /* used by au1000_adjust_link */ + int old_speed; + int old_duplex; + + struct phy_device *phy_dev; + struct mii_bus *mii_bus; + + /* PHY configuration */ + int phy_static_config; + int phy_search_highest_addr; + int phy1_search_mac0; + + int phy_addr; + int phy_busid; + int phy_irq; + + /* These variables are just for quick access + * to certain regs addresses. + */ + struct mac_reg *mac; /* mac registers */ + u32 *enable; /* address of MAC Enable Register */ + + u32 vaddr; /* virtual address of rx/tx buffers */ + dma_addr_t dma_addr; /* dma address of rx/tx buffers */ + + spinlock_t lock; /* Serialise access to device */ + + u32 msg_enable; +}; diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c new file mode 100644 index 0000000..d5598f6 --- /dev/null +++ b/drivers/net/ethernet/amd/declance.c @@ -0,0 +1,1381 @@ +/* + * Lance ethernet driver for the MIPS processor based + * DECstation family + * + * + * adopted from sunlance.c by Richard van den Berg + * + * Copyright (C) 2002, 2003, 2005, 2006 Maciej W. Rozycki + * + * additional sources: + * - PMAD-AA TURBOchannel Ethernet Module Functional Specification, + * Revision 1.2 + * + * History: + * + * v0.001: The kernel accepts the code and it shows the hardware address. + * + * v0.002: Removed most sparc stuff, left only some module and dma stuff. + * + * v0.003: Enhanced base address calculation from proposals by + * Harald Koerfgen and Thomas Riemer. + * + * v0.004: lance-regs is pointing at the right addresses, added prom + * check. First start of address mapping and DMA. + * + * v0.005: started to play around with LANCE-DMA. This driver will not + * work for non IOASIC lances. HK + * + * v0.006: added pointer arrays to lance_private and setup routine for + * them in dec_lance_init. HK + * + * v0.007: Big shit. The LANCE seems to use a different DMA mechanism to + * access the init block. This looks like one (short) word at a + * time, but the smallest amount the IOASIC can transfer is a + * (long) word. So we have a 2-2 padding here. Changed + * lance_init_block accordingly. The 16-16 padding for the buffers + * seems to be correct. HK + * + * v0.008: mods to make PMAX_LANCE work. 01/09/1999 triemer + * + * v0.009: Module support fixes, multiple interfaces support, various + * bits. macro + * + * v0.010: Fixes for the PMAD mapping of the LANCE buffer and for the + * PMAX requirement to only use halfword accesses to the + * buffer. macro + * + * v0.011: Converted the PMAD to the driver model. macro + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +static char version[] __devinitdata = +"declance.c: v0.011 by Linux MIPS DECstation task force\n"; + +MODULE_AUTHOR("Linux MIPS DECstation task force"); +MODULE_DESCRIPTION("DEC LANCE (DECstation onboard, PMAD-xx) driver"); +MODULE_LICENSE("GPL"); + +#define __unused __attribute__ ((unused)) + +/* + * card types + */ +#define ASIC_LANCE 1 +#define PMAD_LANCE 2 +#define PMAX_LANCE 3 + + +#define LE_CSR0 0 +#define LE_CSR1 1 +#define LE_CSR2 2 +#define LE_CSR3 3 + +#define LE_MO_PROM 0x8000 /* Enable promiscuous mode */ + +#define LE_C0_ERR 0x8000 /* Error: set if BAB, SQE, MISS or ME is set */ +#define LE_C0_BABL 0x4000 /* BAB: Babble: tx timeout. */ +#define LE_C0_CERR 0x2000 /* SQE: Signal quality error */ +#define LE_C0_MISS 0x1000 /* MISS: Missed a packet */ +#define LE_C0_MERR 0x0800 /* ME: Memory error */ +#define LE_C0_RINT 0x0400 /* Received interrupt */ +#define LE_C0_TINT 0x0200 /* Transmitter Interrupt */ +#define LE_C0_IDON 0x0100 /* IFIN: Init finished. */ +#define LE_C0_INTR 0x0080 /* Interrupt or error */ +#define LE_C0_INEA 0x0040 /* Interrupt enable */ +#define LE_C0_RXON 0x0020 /* Receiver on */ +#define LE_C0_TXON 0x0010 /* Transmitter on */ +#define LE_C0_TDMD 0x0008 /* Transmitter demand */ +#define LE_C0_STOP 0x0004 /* Stop the card */ +#define LE_C0_STRT 0x0002 /* Start the card */ +#define LE_C0_INIT 0x0001 /* Init the card */ + +#define LE_C3_BSWP 0x4 /* SWAP */ +#define LE_C3_ACON 0x2 /* ALE Control */ +#define LE_C3_BCON 0x1 /* Byte control */ + +/* Receive message descriptor 1 */ +#define LE_R1_OWN 0x8000 /* Who owns the entry */ +#define LE_R1_ERR 0x4000 /* Error: if FRA, OFL, CRC or BUF is set */ +#define LE_R1_FRA 0x2000 /* FRA: Frame error */ +#define LE_R1_OFL 0x1000 /* OFL: Frame overflow */ +#define LE_R1_CRC 0x0800 /* CRC error */ +#define LE_R1_BUF 0x0400 /* BUF: Buffer error */ +#define LE_R1_SOP 0x0200 /* Start of packet */ +#define LE_R1_EOP 0x0100 /* End of packet */ +#define LE_R1_POK 0x0300 /* Packet is complete: SOP + EOP */ + +/* Transmit message descriptor 1 */ +#define LE_T1_OWN 0x8000 /* Lance owns the packet */ +#define LE_T1_ERR 0x4000 /* Error summary */ +#define LE_T1_EMORE 0x1000 /* Error: more than one retry needed */ +#define LE_T1_EONE 0x0800 /* Error: one retry needed */ +#define LE_T1_EDEF 0x0400 /* Error: deferred */ +#define LE_T1_SOP 0x0200 /* Start of packet */ +#define LE_T1_EOP 0x0100 /* End of packet */ +#define LE_T1_POK 0x0300 /* Packet is complete: SOP + EOP */ + +#define LE_T3_BUF 0x8000 /* Buffer error */ +#define LE_T3_UFL 0x4000 /* Error underflow */ +#define LE_T3_LCOL 0x1000 /* Error late collision */ +#define LE_T3_CLOS 0x0800 /* Error carrier loss */ +#define LE_T3_RTY 0x0400 /* Error retry */ +#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry counter */ + +/* Define: 2^4 Tx buffers and 2^4 Rx buffers */ + +#ifndef LANCE_LOG_TX_BUFFERS +#define LANCE_LOG_TX_BUFFERS 4 +#define LANCE_LOG_RX_BUFFERS 4 +#endif + +#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS)) +#define TX_RING_MOD_MASK (TX_RING_SIZE - 1) + +#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS)) +#define RX_RING_MOD_MASK (RX_RING_SIZE - 1) + +#define PKT_BUF_SZ 1536 +#define RX_BUFF_SIZE PKT_BUF_SZ +#define TX_BUFF_SIZE PKT_BUF_SZ + +#undef TEST_HITS +#define ZERO 0 + +/* + * The DS2100/3100 have a linear 64 kB buffer which supports halfword + * accesses only. Each halfword of the buffer is word-aligned in the + * CPU address space. + * + * The PMAD-AA has a 128 kB buffer on-board. + * + * The IOASIC LANCE devices use a shared memory region. This region + * as seen from the CPU is (max) 128 kB long and has to be on an 128 kB + * boundary. The LANCE sees this as a 64 kB long continuous memory + * region. + * + * The LANCE's DMA address is used as an index in this buffer and DMA + * takes place in bursts of eight 16-bit words which are packed into + * four 32-bit words by the IOASIC. This leads to a strange padding: + * 16 bytes of valid data followed by a 16 byte gap :-(. + */ + +struct lance_rx_desc { + unsigned short rmd0; /* low address of packet */ + unsigned short rmd1; /* high address of packet + and descriptor bits */ + short length; /* 2s complement (negative!) + of buffer length */ + unsigned short mblength; /* actual number of bytes received */ +}; + +struct lance_tx_desc { + unsigned short tmd0; /* low address of packet */ + unsigned short tmd1; /* high address of packet + and descriptor bits */ + short length; /* 2s complement (negative!) + of buffer length */ + unsigned short misc; +}; + + +/* First part of the LANCE initialization block, described in databook. */ +struct lance_init_block { + unsigned short mode; /* pre-set mode (reg. 15) */ + + unsigned short phys_addr[3]; /* physical ethernet address */ + unsigned short filter[4]; /* multicast filter */ + + /* Receive and transmit ring base, along with extra bits. */ + unsigned short rx_ptr; /* receive descriptor addr */ + unsigned short rx_len; /* receive len and high addr */ + unsigned short tx_ptr; /* transmit descriptor addr */ + unsigned short tx_len; /* transmit len and high addr */ + + short gap[4]; + + /* The buffer descriptors */ + struct lance_rx_desc brx_ring[RX_RING_SIZE]; + struct lance_tx_desc btx_ring[TX_RING_SIZE]; +}; + +#define BUF_OFFSET_CPU sizeof(struct lance_init_block) +#define BUF_OFFSET_LNC sizeof(struct lance_init_block) + +#define shift_off(off, type) \ + (type == ASIC_LANCE || type == PMAX_LANCE ? off << 1 : off) + +#define lib_off(rt, type) \ + shift_off(offsetof(struct lance_init_block, rt), type) + +#define lib_ptr(ib, rt, type) \ + ((volatile u16 *)((u8 *)(ib) + lib_off(rt, type))) + +#define rds_off(rt, type) \ + shift_off(offsetof(struct lance_rx_desc, rt), type) + +#define rds_ptr(rd, rt, type) \ + ((volatile u16 *)((u8 *)(rd) + rds_off(rt, type))) + +#define tds_off(rt, type) \ + shift_off(offsetof(struct lance_tx_desc, rt), type) + +#define tds_ptr(td, rt, type) \ + ((volatile u16 *)((u8 *)(td) + tds_off(rt, type))) + +struct lance_private { + struct net_device *next; + int type; + int dma_irq; + volatile struct lance_regs *ll; + + spinlock_t lock; + + int rx_new, tx_new; + int rx_old, tx_old; + + unsigned short busmaster_regval; + + struct timer_list multicast_timer; + + /* Pointers to the ring buffers as seen from the CPU */ + char *rx_buf_ptr_cpu[RX_RING_SIZE]; + char *tx_buf_ptr_cpu[TX_RING_SIZE]; + + /* Pointers to the ring buffers as seen from the LANCE */ + uint rx_buf_ptr_lnc[RX_RING_SIZE]; + uint tx_buf_ptr_lnc[TX_RING_SIZE]; +}; + +#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\ + lp->tx_old+TX_RING_MOD_MASK-lp->tx_new:\ + lp->tx_old - lp->tx_new-1) + +/* The lance control ports are at an absolute address, machine and tc-slot + * dependent. + * DECstations do only 32-bit access and the LANCE uses 16 bit addresses, + * so we have to give the structure an extra member making rap pointing + * at the right address + */ +struct lance_regs { + volatile unsigned short rdp; /* register data port */ + unsigned short pad; + volatile unsigned short rap; /* register address port */ +}; + +int dec_lance_debug = 2; + +static struct tc_driver dec_lance_tc_driver; +static struct net_device *root_lance_dev; + +static inline void writereg(volatile unsigned short *regptr, short value) +{ + *regptr = value; + iob(); +} + +/* Load the CSR registers */ +static void load_csrs(struct lance_private *lp) +{ + volatile struct lance_regs *ll = lp->ll; + uint leptr; + + /* The address space as seen from the LANCE + * begins at address 0. HK + */ + leptr = 0; + + writereg(&ll->rap, LE_CSR1); + writereg(&ll->rdp, (leptr & 0xFFFF)); + writereg(&ll->rap, LE_CSR2); + writereg(&ll->rdp, leptr >> 16); + writereg(&ll->rap, LE_CSR3); + writereg(&ll->rdp, lp->busmaster_regval); + + /* Point back to csr0 */ + writereg(&ll->rap, LE_CSR0); +} + +/* + * Our specialized copy routines + * + */ +static void cp_to_buf(const int type, void *to, const void *from, int len) +{ + unsigned short *tp; + const unsigned short *fp; + unsigned short clen; + unsigned char *rtp; + const unsigned char *rfp; + + if (type == PMAD_LANCE) { + memcpy(to, from, len); + } else if (type == PMAX_LANCE) { + clen = len >> 1; + tp = to; + fp = from; + + while (clen--) { + *tp++ = *fp++; + tp++; + } + + clen = len & 1; + rtp = tp; + rfp = fp; + while (clen--) { + *rtp++ = *rfp++; + } + } else { + /* + * copy 16 Byte chunks + */ + clen = len >> 4; + tp = to; + fp = from; + while (clen--) { + *tp++ = *fp++; + *tp++ = *fp++; + *tp++ = *fp++; + *tp++ = *fp++; + *tp++ = *fp++; + *tp++ = *fp++; + *tp++ = *fp++; + *tp++ = *fp++; + tp += 8; + } + + /* + * do the rest, if any. + */ + clen = len & 15; + rtp = (unsigned char *) tp; + rfp = (unsigned char *) fp; + while (clen--) { + *rtp++ = *rfp++; + } + } + + iob(); +} + +static void cp_from_buf(const int type, void *to, const void *from, int len) +{ + unsigned short *tp; + const unsigned short *fp; + unsigned short clen; + unsigned char *rtp; + const unsigned char *rfp; + + if (type == PMAD_LANCE) { + memcpy(to, from, len); + } else if (type == PMAX_LANCE) { + clen = len >> 1; + tp = to; + fp = from; + while (clen--) { + *tp++ = *fp++; + fp++; + } + + clen = len & 1; + + rtp = tp; + rfp = fp; + + while (clen--) { + *rtp++ = *rfp++; + } + } else { + + /* + * copy 16 Byte chunks + */ + clen = len >> 4; + tp = to; + fp = from; + while (clen--) { + *tp++ = *fp++; + *tp++ = *fp++; + *tp++ = *fp++; + *tp++ = *fp++; + *tp++ = *fp++; + *tp++ = *fp++; + *tp++ = *fp++; + *tp++ = *fp++; + fp += 8; + } + + /* + * do the rest, if any. + */ + clen = len & 15; + rtp = (unsigned char *) tp; + rfp = (unsigned char *) fp; + while (clen--) { + *rtp++ = *rfp++; + } + + + } + +} + +/* Setup the Lance Rx and Tx rings */ +static void lance_init_ring(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile u16 *ib = (volatile u16 *)dev->mem_start; + uint leptr; + int i; + + /* Lock out other processes while setting up hardware */ + netif_stop_queue(dev); + lp->rx_new = lp->tx_new = 0; + lp->rx_old = lp->tx_old = 0; + + /* Copy the ethernet address to the lance init block. + * XXX bit 0 of the physical address registers has to be zero + */ + *lib_ptr(ib, phys_addr[0], lp->type) = (dev->dev_addr[1] << 8) | + dev->dev_addr[0]; + *lib_ptr(ib, phys_addr[1], lp->type) = (dev->dev_addr[3] << 8) | + dev->dev_addr[2]; + *lib_ptr(ib, phys_addr[2], lp->type) = (dev->dev_addr[5] << 8) | + dev->dev_addr[4]; + /* Setup the initialization block */ + + /* Setup rx descriptor pointer */ + leptr = offsetof(struct lance_init_block, brx_ring); + *lib_ptr(ib, rx_len, lp->type) = (LANCE_LOG_RX_BUFFERS << 13) | + (leptr >> 16); + *lib_ptr(ib, rx_ptr, lp->type) = leptr; + if (ZERO) + printk("RX ptr: %8.8x(%8.8x)\n", + leptr, lib_off(brx_ring, lp->type)); + + /* Setup tx descriptor pointer */ + leptr = offsetof(struct lance_init_block, btx_ring); + *lib_ptr(ib, tx_len, lp->type) = (LANCE_LOG_TX_BUFFERS << 13) | + (leptr >> 16); + *lib_ptr(ib, tx_ptr, lp->type) = leptr; + if (ZERO) + printk("TX ptr: %8.8x(%8.8x)\n", + leptr, lib_off(btx_ring, lp->type)); + + if (ZERO) + printk("TX rings:\n"); + + /* Setup the Tx ring entries */ + for (i = 0; i < TX_RING_SIZE; i++) { + leptr = lp->tx_buf_ptr_lnc[i]; + *lib_ptr(ib, btx_ring[i].tmd0, lp->type) = leptr; + *lib_ptr(ib, btx_ring[i].tmd1, lp->type) = (leptr >> 16) & + 0xff; + *lib_ptr(ib, btx_ring[i].length, lp->type) = 0xf000; + /* The ones required by tmd2 */ + *lib_ptr(ib, btx_ring[i].misc, lp->type) = 0; + if (i < 3 && ZERO) + printk("%d: 0x%8.8x(0x%8.8x)\n", + i, leptr, (uint)lp->tx_buf_ptr_cpu[i]); + } + + /* Setup the Rx ring entries */ + if (ZERO) + printk("RX rings:\n"); + for (i = 0; i < RX_RING_SIZE; i++) { + leptr = lp->rx_buf_ptr_lnc[i]; + *lib_ptr(ib, brx_ring[i].rmd0, lp->type) = leptr; + *lib_ptr(ib, brx_ring[i].rmd1, lp->type) = ((leptr >> 16) & + 0xff) | + LE_R1_OWN; + *lib_ptr(ib, brx_ring[i].length, lp->type) = -RX_BUFF_SIZE | + 0xf000; + *lib_ptr(ib, brx_ring[i].mblength, lp->type) = 0; + if (i < 3 && ZERO) + printk("%d: 0x%8.8x(0x%8.8x)\n", + i, leptr, (uint)lp->rx_buf_ptr_cpu[i]); + } + iob(); +} + +static int init_restart_lance(struct lance_private *lp) +{ + volatile struct lance_regs *ll = lp->ll; + int i; + + writereg(&ll->rap, LE_CSR0); + writereg(&ll->rdp, LE_C0_INIT); + + /* Wait for the lance to complete initialization */ + for (i = 0; (i < 100) && !(ll->rdp & LE_C0_IDON); i++) { + udelay(10); + } + if ((i == 100) || (ll->rdp & LE_C0_ERR)) { + printk("LANCE unopened after %d ticks, csr0=%4.4x.\n", + i, ll->rdp); + return -1; + } + if ((ll->rdp & LE_C0_ERR)) { + printk("LANCE unopened after %d ticks, csr0=%4.4x.\n", + i, ll->rdp); + return -1; + } + writereg(&ll->rdp, LE_C0_IDON); + writereg(&ll->rdp, LE_C0_STRT); + writereg(&ll->rdp, LE_C0_INEA); + + return 0; +} + +static int lance_rx(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile u16 *ib = (volatile u16 *)dev->mem_start; + volatile u16 *rd; + unsigned short bits; + int entry, len; + struct sk_buff *skb; + +#ifdef TEST_HITS + { + int i; + + printk("["); + for (i = 0; i < RX_RING_SIZE; i++) { + if (i == lp->rx_new) + printk("%s", *lib_ptr(ib, brx_ring[i].rmd1, + lp->type) & + LE_R1_OWN ? "_" : "X"); + else + printk("%s", *lib_ptr(ib, brx_ring[i].rmd1, + lp->type) & + LE_R1_OWN ? "." : "1"); + } + printk("]"); + } +#endif + + for (rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type); + !((bits = *rds_ptr(rd, rmd1, lp->type)) & LE_R1_OWN); + rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type)) { + entry = lp->rx_new; + + /* We got an incomplete frame? */ + if ((bits & LE_R1_POK) != LE_R1_POK) { + dev->stats.rx_over_errors++; + dev->stats.rx_errors++; + } else if (bits & LE_R1_ERR) { + /* Count only the end frame as a rx error, + * not the beginning + */ + if (bits & LE_R1_BUF) + dev->stats.rx_fifo_errors++; + if (bits & LE_R1_CRC) + dev->stats.rx_crc_errors++; + if (bits & LE_R1_OFL) + dev->stats.rx_over_errors++; + if (bits & LE_R1_FRA) + dev->stats.rx_frame_errors++; + if (bits & LE_R1_EOP) + dev->stats.rx_errors++; + } else { + len = (*rds_ptr(rd, mblength, lp->type) & 0xfff) - 4; + skb = dev_alloc_skb(len + 2); + + if (skb == 0) { + printk("%s: Memory squeeze, deferring packet.\n", + dev->name); + dev->stats.rx_dropped++; + *rds_ptr(rd, mblength, lp->type) = 0; + *rds_ptr(rd, rmd1, lp->type) = + ((lp->rx_buf_ptr_lnc[entry] >> 16) & + 0xff) | LE_R1_OWN; + lp->rx_new = (entry + 1) & RX_RING_MOD_MASK; + return 0; + } + dev->stats.rx_bytes += len; + + skb_reserve(skb, 2); /* 16 byte align */ + skb_put(skb, len); /* make room */ + + cp_from_buf(lp->type, skb->data, + (char *)lp->rx_buf_ptr_cpu[entry], len); + + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + } + + /* Return the packet to the pool */ + *rds_ptr(rd, mblength, lp->type) = 0; + *rds_ptr(rd, length, lp->type) = -RX_BUFF_SIZE | 0xf000; + *rds_ptr(rd, rmd1, lp->type) = + ((lp->rx_buf_ptr_lnc[entry] >> 16) & 0xff) | LE_R1_OWN; + lp->rx_new = (entry + 1) & RX_RING_MOD_MASK; + } + return 0; +} + +static void lance_tx(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile u16 *ib = (volatile u16 *)dev->mem_start; + volatile struct lance_regs *ll = lp->ll; + volatile u16 *td; + int i, j; + int status; + + j = lp->tx_old; + + spin_lock(&lp->lock); + + for (i = j; i != lp->tx_new; i = j) { + td = lib_ptr(ib, btx_ring[i], lp->type); + /* If we hit a packet not owned by us, stop */ + if (*tds_ptr(td, tmd1, lp->type) & LE_T1_OWN) + break; + + if (*tds_ptr(td, tmd1, lp->type) & LE_T1_ERR) { + status = *tds_ptr(td, misc, lp->type); + + dev->stats.tx_errors++; + if (status & LE_T3_RTY) + dev->stats.tx_aborted_errors++; + if (status & LE_T3_LCOL) + dev->stats.tx_window_errors++; + + if (status & LE_T3_CLOS) { + dev->stats.tx_carrier_errors++; + printk("%s: Carrier Lost\n", dev->name); + /* Stop the lance */ + writereg(&ll->rap, LE_CSR0); + writereg(&ll->rdp, LE_C0_STOP); + lance_init_ring(dev); + load_csrs(lp); + init_restart_lance(lp); + goto out; + } + /* Buffer errors and underflows turn off the + * transmitter, restart the adapter. + */ + if (status & (LE_T3_BUF | LE_T3_UFL)) { + dev->stats.tx_fifo_errors++; + + printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n", + dev->name); + /* Stop the lance */ + writereg(&ll->rap, LE_CSR0); + writereg(&ll->rdp, LE_C0_STOP); + lance_init_ring(dev); + load_csrs(lp); + init_restart_lance(lp); + goto out; + } + } else if ((*tds_ptr(td, tmd1, lp->type) & LE_T1_POK) == + LE_T1_POK) { + /* + * So we don't count the packet more than once. + */ + *tds_ptr(td, tmd1, lp->type) &= ~(LE_T1_POK); + + /* One collision before packet was sent. */ + if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EONE) + dev->stats.collisions++; + + /* More than one collision, be optimistic. */ + if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EMORE) + dev->stats.collisions += 2; + + dev->stats.tx_packets++; + } + j = (j + 1) & TX_RING_MOD_MASK; + } + lp->tx_old = j; +out: + if (netif_queue_stopped(dev) && + TX_BUFFS_AVAIL > 0) + netif_wake_queue(dev); + + spin_unlock(&lp->lock); +} + +static irqreturn_t lance_dma_merr_int(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + + printk(KERN_ERR "%s: DMA error\n", dev->name); + return IRQ_HANDLED; +} + +static irqreturn_t lance_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_regs *ll = lp->ll; + int csr0; + + writereg(&ll->rap, LE_CSR0); + csr0 = ll->rdp; + + /* Acknowledge all the interrupt sources ASAP */ + writereg(&ll->rdp, csr0 & (LE_C0_INTR | LE_C0_TINT | LE_C0_RINT)); + + if ((csr0 & LE_C0_ERR)) { + /* Clear the error condition */ + writereg(&ll->rdp, LE_C0_BABL | LE_C0_ERR | LE_C0_MISS | + LE_C0_CERR | LE_C0_MERR); + } + if (csr0 & LE_C0_RINT) + lance_rx(dev); + + if (csr0 & LE_C0_TINT) + lance_tx(dev); + + if (csr0 & LE_C0_BABL) + dev->stats.tx_errors++; + + if (csr0 & LE_C0_MISS) + dev->stats.rx_errors++; + + if (csr0 & LE_C0_MERR) { + printk("%s: Memory error, status %04x\n", dev->name, csr0); + + writereg(&ll->rdp, LE_C0_STOP); + + lance_init_ring(dev); + load_csrs(lp); + init_restart_lance(lp); + netif_wake_queue(dev); + } + + writereg(&ll->rdp, LE_C0_INEA); + writereg(&ll->rdp, LE_C0_INEA); + return IRQ_HANDLED; +} + +static int lance_open(struct net_device *dev) +{ + volatile u16 *ib = (volatile u16 *)dev->mem_start; + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_regs *ll = lp->ll; + int status = 0; + + /* Stop the Lance */ + writereg(&ll->rap, LE_CSR0); + writereg(&ll->rdp, LE_C0_STOP); + + /* Set mode and clear multicast filter only at device open, + * so that lance_init_ring() called at any error will not + * forget multicast filters. + * + * BTW it is common bug in all lance drivers! --ANK + */ + *lib_ptr(ib, mode, lp->type) = 0; + *lib_ptr(ib, filter[0], lp->type) = 0; + *lib_ptr(ib, filter[1], lp->type) = 0; + *lib_ptr(ib, filter[2], lp->type) = 0; + *lib_ptr(ib, filter[3], lp->type) = 0; + + lance_init_ring(dev); + load_csrs(lp); + + netif_start_queue(dev); + + /* Associate IRQ with lance_interrupt */ + if (request_irq(dev->irq, lance_interrupt, 0, "lance", dev)) { + printk("%s: Can't get IRQ %d\n", dev->name, dev->irq); + return -EAGAIN; + } + if (lp->dma_irq >= 0) { + unsigned long flags; + + if (request_irq(lp->dma_irq, lance_dma_merr_int, 0, + "lance error", dev)) { + free_irq(dev->irq, dev); + printk("%s: Can't get DMA IRQ %d\n", dev->name, + lp->dma_irq); + return -EAGAIN; + } + + spin_lock_irqsave(&ioasic_ssr_lock, flags); + + fast_mb(); + /* Enable I/O ASIC LANCE DMA. */ + ioasic_write(IO_REG_SSR, + ioasic_read(IO_REG_SSR) | IO_SSR_LANCE_DMA_EN); + + fast_mb(); + spin_unlock_irqrestore(&ioasic_ssr_lock, flags); + } + + status = init_restart_lance(lp); + return status; +} + +static int lance_close(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_regs *ll = lp->ll; + + netif_stop_queue(dev); + del_timer_sync(&lp->multicast_timer); + + /* Stop the card */ + writereg(&ll->rap, LE_CSR0); + writereg(&ll->rdp, LE_C0_STOP); + + if (lp->dma_irq >= 0) { + unsigned long flags; + + spin_lock_irqsave(&ioasic_ssr_lock, flags); + + fast_mb(); + /* Disable I/O ASIC LANCE DMA. */ + ioasic_write(IO_REG_SSR, + ioasic_read(IO_REG_SSR) & ~IO_SSR_LANCE_DMA_EN); + + fast_iob(); + spin_unlock_irqrestore(&ioasic_ssr_lock, flags); + + free_irq(lp->dma_irq, dev); + } + free_irq(dev->irq, dev); + return 0; +} + +static inline int lance_reset(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_regs *ll = lp->ll; + int status; + + /* Stop the lance */ + writereg(&ll->rap, LE_CSR0); + writereg(&ll->rdp, LE_C0_STOP); + + lance_init_ring(dev); + load_csrs(lp); + dev->trans_start = jiffies; /* prevent tx timeout */ + status = init_restart_lance(lp); + return status; +} + +static void lance_tx_timeout(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_regs *ll = lp->ll; + + printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n", + dev->name, ll->rdp); + lance_reset(dev); + netif_wake_queue(dev); +} + +static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_regs *ll = lp->ll; + volatile u16 *ib = (volatile u16 *)dev->mem_start; + unsigned long flags; + int entry, len; + + len = skb->len; + + if (len < ETH_ZLEN) { + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + len = ETH_ZLEN; + } + + dev->stats.tx_bytes += len; + + spin_lock_irqsave(&lp->lock, flags); + + entry = lp->tx_new; + *lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len); + *lib_ptr(ib, btx_ring[entry].misc, lp->type) = 0; + + cp_to_buf(lp->type, (char *)lp->tx_buf_ptr_cpu[entry], skb->data, len); + + /* Now, give the packet to the lance */ + *lib_ptr(ib, btx_ring[entry].tmd1, lp->type) = + ((lp->tx_buf_ptr_lnc[entry] >> 16) & 0xff) | + (LE_T1_POK | LE_T1_OWN); + lp->tx_new = (entry + 1) & TX_RING_MOD_MASK; + + if (TX_BUFFS_AVAIL <= 0) + netif_stop_queue(dev); + + /* Kick the lance: transmit now */ + writereg(&ll->rdp, LE_C0_INEA | LE_C0_TDMD); + + spin_unlock_irqrestore(&lp->lock, flags); + + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +static void lance_load_multicast(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile u16 *ib = (volatile u16 *)dev->mem_start; + struct netdev_hw_addr *ha; + u32 crc; + + /* set all multicast bits */ + if (dev->flags & IFF_ALLMULTI) { + *lib_ptr(ib, filter[0], lp->type) = 0xffff; + *lib_ptr(ib, filter[1], lp->type) = 0xffff; + *lib_ptr(ib, filter[2], lp->type) = 0xffff; + *lib_ptr(ib, filter[3], lp->type) = 0xffff; + return; + } + /* clear the multicast filter */ + *lib_ptr(ib, filter[0], lp->type) = 0; + *lib_ptr(ib, filter[1], lp->type) = 0; + *lib_ptr(ib, filter[2], lp->type) = 0; + *lib_ptr(ib, filter[3], lp->type) = 0; + + /* Add addresses */ + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(ETH_ALEN, ha->addr); + crc = crc >> 26; + *lib_ptr(ib, filter[crc >> 4], lp->type) |= 1 << (crc & 0xf); + } +} + +static void lance_set_multicast(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile u16 *ib = (volatile u16 *)dev->mem_start; + volatile struct lance_regs *ll = lp->ll; + + if (!netif_running(dev)) + return; + + if (lp->tx_old != lp->tx_new) { + mod_timer(&lp->multicast_timer, jiffies + 4 * HZ/100); + netif_wake_queue(dev); + return; + } + + netif_stop_queue(dev); + + writereg(&ll->rap, LE_CSR0); + writereg(&ll->rdp, LE_C0_STOP); + + lance_init_ring(dev); + + if (dev->flags & IFF_PROMISC) { + *lib_ptr(ib, mode, lp->type) |= LE_MO_PROM; + } else { + *lib_ptr(ib, mode, lp->type) &= ~LE_MO_PROM; + lance_load_multicast(dev); + } + load_csrs(lp); + init_restart_lance(lp); + netif_wake_queue(dev); +} + +static void lance_set_multicast_retry(unsigned long _opaque) +{ + struct net_device *dev = (struct net_device *) _opaque; + + lance_set_multicast(dev); +} + +static const struct net_device_ops lance_netdev_ops = { + .ndo_open = lance_open, + .ndo_stop = lance_close, + .ndo_start_xmit = lance_start_xmit, + .ndo_tx_timeout = lance_tx_timeout, + .ndo_set_multicast_list = lance_set_multicast, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, +}; + +static int __devinit dec_lance_probe(struct device *bdev, const int type) +{ + static unsigned version_printed; + static const char fmt[] = "declance%d"; + char name[10]; + struct net_device *dev; + struct lance_private *lp; + volatile struct lance_regs *ll; + resource_size_t start = 0, len = 0; + int i, ret; + unsigned long esar_base; + unsigned char *esar; + + if (dec_lance_debug && version_printed++ == 0) + printk(version); + + if (bdev) + snprintf(name, sizeof(name), "%s", dev_name(bdev)); + else { + i = 0; + dev = root_lance_dev; + while (dev) { + i++; + lp = netdev_priv(dev); + dev = lp->next; + } + snprintf(name, sizeof(name), fmt, i); + } + + dev = alloc_etherdev(sizeof(struct lance_private)); + if (!dev) { + printk(KERN_ERR "%s: Unable to allocate etherdev, aborting.\n", + name); + ret = -ENOMEM; + goto err_out; + } + + /* + * alloc_etherdev ensures the data structures used by the LANCE + * are aligned. + */ + lp = netdev_priv(dev); + spin_lock_init(&lp->lock); + + lp->type = type; + switch (type) { + case ASIC_LANCE: + dev->base_addr = CKSEG1ADDR(dec_kn_slot_base + IOASIC_LANCE); + + /* buffer space for the on-board LANCE shared memory */ + /* + * FIXME: ugly hack! + */ + dev->mem_start = CKSEG1ADDR(0x00020000); + dev->mem_end = dev->mem_start + 0x00020000; + dev->irq = dec_interrupt[DEC_IRQ_LANCE]; + esar_base = CKSEG1ADDR(dec_kn_slot_base + IOASIC_ESAR); + + /* Workaround crash with booting KN04 2.1k from Disk */ + memset((void *)dev->mem_start, 0, + dev->mem_end - dev->mem_start); + + /* + * setup the pointer arrays, this sucks [tm] :-( + */ + for (i = 0; i < RX_RING_SIZE; i++) { + lp->rx_buf_ptr_cpu[i] = + (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU + + 2 * i * RX_BUFF_SIZE); + lp->rx_buf_ptr_lnc[i] = + (BUF_OFFSET_LNC + i * RX_BUFF_SIZE); + } + for (i = 0; i < TX_RING_SIZE; i++) { + lp->tx_buf_ptr_cpu[i] = + (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU + + 2 * RX_RING_SIZE * RX_BUFF_SIZE + + 2 * i * TX_BUFF_SIZE); + lp->tx_buf_ptr_lnc[i] = + (BUF_OFFSET_LNC + + RX_RING_SIZE * RX_BUFF_SIZE + + i * TX_BUFF_SIZE); + } + + /* Setup I/O ASIC LANCE DMA. */ + lp->dma_irq = dec_interrupt[DEC_IRQ_LANCE_MERR]; + ioasic_write(IO_REG_LANCE_DMA_P, + CPHYSADDR(dev->mem_start) << 3); + + break; +#ifdef CONFIG_TC + case PMAD_LANCE: + dev_set_drvdata(bdev, dev); + + start = to_tc_dev(bdev)->resource.start; + len = to_tc_dev(bdev)->resource.end - start + 1; + if (!request_mem_region(start, len, dev_name(bdev))) { + printk(KERN_ERR + "%s: Unable to reserve MMIO resource\n", + dev_name(bdev)); + ret = -EBUSY; + goto err_out_dev; + } + + dev->mem_start = CKSEG1ADDR(start); + dev->mem_end = dev->mem_start + 0x100000; + dev->base_addr = dev->mem_start + 0x100000; + dev->irq = to_tc_dev(bdev)->interrupt; + esar_base = dev->mem_start + 0x1c0002; + lp->dma_irq = -1; + + for (i = 0; i < RX_RING_SIZE; i++) { + lp->rx_buf_ptr_cpu[i] = + (char *)(dev->mem_start + BUF_OFFSET_CPU + + i * RX_BUFF_SIZE); + lp->rx_buf_ptr_lnc[i] = + (BUF_OFFSET_LNC + i * RX_BUFF_SIZE); + } + for (i = 0; i < TX_RING_SIZE; i++) { + lp->tx_buf_ptr_cpu[i] = + (char *)(dev->mem_start + BUF_OFFSET_CPU + + RX_RING_SIZE * RX_BUFF_SIZE + + i * TX_BUFF_SIZE); + lp->tx_buf_ptr_lnc[i] = + (BUF_OFFSET_LNC + + RX_RING_SIZE * RX_BUFF_SIZE + + i * TX_BUFF_SIZE); + } + + break; +#endif + case PMAX_LANCE: + dev->irq = dec_interrupt[DEC_IRQ_LANCE]; + dev->base_addr = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE); + dev->mem_start = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE_MEM); + dev->mem_end = dev->mem_start + KN01_SLOT_SIZE; + esar_base = CKSEG1ADDR(KN01_SLOT_BASE + KN01_ESAR + 1); + lp->dma_irq = -1; + + /* + * setup the pointer arrays, this sucks [tm] :-( + */ + for (i = 0; i < RX_RING_SIZE; i++) { + lp->rx_buf_ptr_cpu[i] = + (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU + + 2 * i * RX_BUFF_SIZE); + lp->rx_buf_ptr_lnc[i] = + (BUF_OFFSET_LNC + i * RX_BUFF_SIZE); + } + for (i = 0; i < TX_RING_SIZE; i++) { + lp->tx_buf_ptr_cpu[i] = + (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU + + 2 * RX_RING_SIZE * RX_BUFF_SIZE + + 2 * i * TX_BUFF_SIZE); + lp->tx_buf_ptr_lnc[i] = + (BUF_OFFSET_LNC + + RX_RING_SIZE * RX_BUFF_SIZE + + i * TX_BUFF_SIZE); + } + + break; + + default: + printk(KERN_ERR "%s: declance_init called with unknown type\n", + name); + ret = -ENODEV; + goto err_out_dev; + } + + ll = (struct lance_regs *) dev->base_addr; + esar = (unsigned char *) esar_base; + + /* prom checks */ + /* First, check for test pattern */ + if (esar[0x60] != 0xff && esar[0x64] != 0x00 && + esar[0x68] != 0x55 && esar[0x6c] != 0xaa) { + printk(KERN_ERR + "%s: Ethernet station address prom not found!\n", + name); + ret = -ENODEV; + goto err_out_resource; + } + /* Check the prom contents */ + for (i = 0; i < 8; i++) { + if (esar[i * 4] != esar[0x3c - i * 4] && + esar[i * 4] != esar[0x40 + i * 4] && + esar[0x3c - i * 4] != esar[0x40 + i * 4]) { + printk(KERN_ERR "%s: Something is wrong with the " + "ethernet station address prom!\n", name); + ret = -ENODEV; + goto err_out_resource; + } + } + + /* Copy the ethernet address to the device structure, later to the + * lance initialization block so the lance gets it every time it's + * (re)initialized. + */ + switch (type) { + case ASIC_LANCE: + printk("%s: IOASIC onboard LANCE", name); + break; + case PMAD_LANCE: + printk("%s: PMAD-AA", name); + break; + case PMAX_LANCE: + printk("%s: PMAX onboard LANCE", name); + break; + } + for (i = 0; i < 6; i++) + dev->dev_addr[i] = esar[i * 4]; + + printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq); + + dev->netdev_ops = &lance_netdev_ops; + dev->watchdog_timeo = 5*HZ; + + /* lp->ll is the location of the registers for lance card */ + lp->ll = ll; + + /* busmaster_regval (CSR3) should be zero according to the PMAD-AA + * specification. + */ + lp->busmaster_regval = 0; + + dev->dma = 0; + + /* We cannot sleep if the chip is busy during a + * multicast list update event, because such events + * can occur from interrupts (ex. IPv6). So we + * use a timer to try again later when necessary. -DaveM + */ + init_timer(&lp->multicast_timer); + lp->multicast_timer.data = (unsigned long) dev; + lp->multicast_timer.function = lance_set_multicast_retry; + + ret = register_netdev(dev); + if (ret) { + printk(KERN_ERR + "%s: Unable to register netdev, aborting.\n", name); + goto err_out_resource; + } + + if (!bdev) { + lp->next = root_lance_dev; + root_lance_dev = dev; + } + + printk("%s: registered as %s.\n", name, dev->name); + return 0; + +err_out_resource: + if (bdev) + release_mem_region(start, len); + +err_out_dev: + free_netdev(dev); + +err_out: + return ret; +} + +static void __exit dec_lance_remove(struct device *bdev) +{ + struct net_device *dev = dev_get_drvdata(bdev); + resource_size_t start, len; + + unregister_netdev(dev); + start = to_tc_dev(bdev)->resource.start; + len = to_tc_dev(bdev)->resource.end - start + 1; + release_mem_region(start, len); + free_netdev(dev); +} + +/* Find all the lance cards on the system and initialize them */ +static int __init dec_lance_platform_probe(void) +{ + int count = 0; + + if (dec_interrupt[DEC_IRQ_LANCE] >= 0) { + if (dec_interrupt[DEC_IRQ_LANCE_MERR] >= 0) { + if (dec_lance_probe(NULL, ASIC_LANCE) >= 0) + count++; + } else if (!TURBOCHANNEL) { + if (dec_lance_probe(NULL, PMAX_LANCE) >= 0) + count++; + } + } + + return (count > 0) ? 0 : -ENODEV; +} + +static void __exit dec_lance_platform_remove(void) +{ + while (root_lance_dev) { + struct net_device *dev = root_lance_dev; + struct lance_private *lp = netdev_priv(dev); + + unregister_netdev(dev); + root_lance_dev = lp->next; + free_netdev(dev); + } +} + +#ifdef CONFIG_TC +static int __devinit dec_lance_tc_probe(struct device *dev); +static int __exit dec_lance_tc_remove(struct device *dev); + +static const struct tc_device_id dec_lance_tc_table[] = { + { "DEC ", "PMAD-AA " }, + { } +}; +MODULE_DEVICE_TABLE(tc, dec_lance_tc_table); + +static struct tc_driver dec_lance_tc_driver = { + .id_table = dec_lance_tc_table, + .driver = { + .name = "declance", + .bus = &tc_bus_type, + .probe = dec_lance_tc_probe, + .remove = __exit_p(dec_lance_tc_remove), + }, +}; + +static int __devinit dec_lance_tc_probe(struct device *dev) +{ + int status = dec_lance_probe(dev, PMAD_LANCE); + if (!status) + get_device(dev); + return status; +} + +static int __exit dec_lance_tc_remove(struct device *dev) +{ + put_device(dev); + dec_lance_remove(dev); + return 0; +} +#endif + +static int __init dec_lance_init(void) +{ + int status; + + status = tc_register_driver(&dec_lance_tc_driver); + if (!status) + dec_lance_platform_probe(); + return status; +} + +static void __exit dec_lance_exit(void) +{ + dec_lance_platform_remove(); + tc_unregister_driver(&dec_lance_tc_driver); +} + + +module_init(dec_lance_init); +module_exit(dec_lance_exit); diff --git a/drivers/net/ethernet/amd/depca.c b/drivers/net/ethernet/amd/depca.c new file mode 100644 index 0000000..f2015a8 --- /dev/null +++ b/drivers/net/ethernet/amd/depca.c @@ -0,0 +1,2111 @@ +/* depca.c: A DIGITAL DEPCA & EtherWORKS ethernet driver for linux. + + Written 1994, 1995 by David C. Davies. + + + Copyright 1994 David C. Davies + and + United States Government + (as represented by the Director, National Security Agency). + + Copyright 1995 Digital Equipment Corporation. + + + This software may be used and distributed according to the terms of + the GNU General Public License, incorporated herein by reference. + + This driver is written for the Digital Equipment Corporation series + of DEPCA and EtherWORKS ethernet cards: + + DEPCA (the original) + DE100 + DE101 + DE200 Turbo + DE201 Turbo + DE202 Turbo (TP BNC) + DE210 + DE422 (EISA) + + The driver has been tested on DE100, DE200 and DE202 cards in a + relatively busy network. The DE422 has been tested a little. + + This driver will NOT work for the DE203, DE204 and DE205 series of + cards, since they have a new custom ASIC in place of the AMD LANCE + chip. See the 'ewrk3.c' driver in the Linux source tree for running + those cards. + + I have benchmarked the driver with a DE100 at 595kB/s to (542kB/s from) + a DECstation 5000/200. + + The author may be reached at davies@maniac.ultranet.com + + ========================================================================= + + The driver was originally based on the 'lance.c' driver from Donald + Becker which is included with the standard driver distribution for + linux. V0.4 is a complete re-write with only the kernel interface + remaining from the original code. + + 1) Lance.c code in /linux/drivers/net/ + 2) "Ethernet/IEEE 802.3 Family. 1992 World Network Data Book/Handbook", + AMD, 1992 [(800) 222-9323]. + 3) "Am79C90 CMOS Local Area Network Controller for Ethernet (C-LANCE)", + AMD, Pub. #17881, May 1993. + 4) "Am79C960 PCnet-ISA(tm), Single-Chip Ethernet Controller for ISA", + AMD, Pub. #16907, May 1992 + 5) "DEC EtherWORKS LC Ethernet Controller Owners Manual", + Digital Equipment corporation, 1990, Pub. #EK-DE100-OM.003 + 6) "DEC EtherWORKS Turbo Ethernet Controller Owners Manual", + Digital Equipment corporation, 1990, Pub. #EK-DE200-OM.003 + 7) "DEPCA Hardware Reference Manual", Pub. #EK-DEPCA-PR + Digital Equipment Corporation, 1989 + 8) "DEC EtherWORKS Turbo_(TP BNC) Ethernet Controller Owners Manual", + Digital Equipment corporation, 1991, Pub. #EK-DE202-OM.001 + + + Peter Bauer's depca.c (V0.5) was referred to when debugging V0.1 of this + driver. + + The original DEPCA card requires that the ethernet ROM address counter + be enabled to count and has an 8 bit NICSR. The ROM counter enabling is + only done when a 0x08 is read as the first address octet (to minimise + the chances of writing over some other hardware's I/O register). The + NICSR accesses have been changed to byte accesses for all the cards + supported by this driver, since there is only one useful bit in the MSB + (remote boot timeout) and it is not used. Also, there is a maximum of + only 48kB network RAM for this card. My thanks to Torbjorn Lindh for + help debugging all this (and holding my feet to the fire until I got it + right). + + The DE200 series boards have on-board 64kB RAM for use as a shared + memory network buffer. Only the DE100 cards make use of a 2kB buffer + mode which has not been implemented in this driver (only the 32kB and + 64kB modes are supported [16kB/48kB for the original DEPCA]). + + At the most only 2 DEPCA cards can be supported on the ISA bus because + there is only provision for two I/O base addresses on each card (0x300 + and 0x200). The I/O address is detected by searching for a byte sequence + in the Ethernet station address PROM at the expected I/O address for the + Ethernet PROM. The shared memory base address is 'autoprobed' by + looking for the self test PROM and detecting the card name. When a + second DEPCA is detected, information is placed in the base_addr + variable of the next device structure (which is created if necessary), + thus enabling ethif_probe initialization for the device. More than 2 + EISA cards can be supported, but care will be needed assigning the + shared memory to ensure that each slot has the correct IRQ, I/O address + and shared memory address assigned. + + ************************************************************************ + + NOTE: If you are using two ISA DEPCAs, it is important that you assign + the base memory addresses correctly. The driver autoprobes I/O 0x300 + then 0x200. The base memory address for the first device must be less + than that of the second so that the auto probe will correctly assign the + I/O and memory addresses on the same card. I can't think of a way to do + this unambiguously at the moment, since there is nothing on the cards to + tie I/O and memory information together. + + I am unable to test 2 cards together for now, so this code is + unchecked. All reports, good or bad, are welcome. + + ************************************************************************ + + The board IRQ setting must be at an unused IRQ which is auto-probed + using Donald Becker's autoprobe routines. DEPCA and DE100 board IRQs are + {2,3,4,5,7}, whereas the DE200 is at {5,9,10,11,15}. Note that IRQ2 is + really IRQ9 in machines with 16 IRQ lines. + + No 16MB memory limitation should exist with this driver as DMA is not + used and the common memory area is in low memory on the network card (my + current system has 20MB and I've not had problems yet). + + The ability to load this driver as a loadable module has been added. To + utilise this ability, you have to do <8 things: + + 0) have a copy of the loadable modules code installed on your system. + 1) copy depca.c from the /linux/drivers/net directory to your favourite + temporary directory. + 2) if you wish, edit the source code near line 1530 to reflect the I/O + address and IRQ you're using (see also 5). + 3) compile depca.c, but include -DMODULE in the command line to ensure + that the correct bits are compiled (see end of source code). + 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a + kernel with the depca configuration turned off and reboot. + 5) insmod depca.o [irq=7] [io=0x200] [mem=0xd0000] [adapter_name=DE100] + [Alan Cox: Changed the code to allow command line irq/io assignments] + [Dave Davies: Changed the code to allow command line mem/name + assignments] + 6) run the net startup bits for your eth?? interface manually + (usually /etc/rc.inet[12] at boot time). + 7) enjoy! + + Note that autoprobing is not allowed in loadable modules - the system is + already up and running and you're messing with interrupts. + + To unload a module, turn off the associated interface + 'ifconfig eth?? down' then 'rmmod depca'. + + To assign a base memory address for the shared memory when running as a + loadable module, see 5 above. To include the adapter name (if you have + no PROM but know the card name) also see 5 above. Note that this last + option will not work with kernel built-in depca's. + + The shared memory assignment for a loadable module makes sense to avoid + the 'memory autoprobe' picking the wrong shared memory (for the case of + 2 depca's in a PC). + + ************************************************************************ + Support for MCA EtherWORKS cards added 11-3-98. + Verified to work with up to 2 DE212 cards in a system (although not + fully stress-tested). + + Currently known bugs/limitations: + + Note: with the MCA stuff as a module, it trusts the MCA configuration, + not the command line for IRQ and memory address. You can + specify them if you want, but it will throw your values out. + You still have to pass the IO address it was configured as + though. + + ************************************************************************ + TO DO: + ------ + + + Revision History + ---------------- + + Version Date Description + + 0.1 25-jan-94 Initial writing. + 0.2 27-jan-94 Added LANCE TX hardware buffer chaining. + 0.3 1-feb-94 Added multiple DEPCA support. + 0.31 4-feb-94 Added DE202 recognition. + 0.32 19-feb-94 Tidy up. Improve multi-DEPCA support. + 0.33 25-feb-94 Fix DEPCA ethernet ROM counter enable. + Add jabber packet fix from murf@perftech.com + and becker@super.org + 0.34 7-mar-94 Fix DEPCA max network memory RAM & NICSR access. + 0.35 8-mar-94 Added DE201 recognition. Tidied up. + 0.351 30-apr-94 Added EISA support. Added DE422 recognition. + 0.36 16-may-94 DE422 fix released. + 0.37 22-jul-94 Added MODULE support + 0.38 15-aug-94 Added DBR ROM switch in depca_close(). + Multi DEPCA bug fix. + 0.38axp 15-sep-94 Special version for Alpha AXP Linux V1.0. + 0.381 12-dec-94 Added DE101 recognition, fix multicast bug. + 0.382 9-feb-95 Fix recognition bug reported by . + 0.383 22-feb-95 Fix for conflict with VESA SCSI reported by + + 0.384 17-mar-95 Fix a ring full bug reported by + 0.385 3-apr-95 Fix a recognition bug reported by + + 0.386 21-apr-95 Fix the last fix...sorry, must be galloping senility + 0.40 25-May-95 Rewrite for portability & updated. + ALPHA support from + 0.41 26-Jun-95 Added verify_area() calls in depca_ioctl() from + suggestion by + 0.42 27-Dec-95 Add 'mem' shared memory assignment for loadable + modules. + Add 'adapter_name' for loadable modules when no PROM. + Both above from a suggestion by + . + Add new multicasting code. + 0.421 22-Apr-96 Fix alloc_device() bug + 0.422 29-Apr-96 Fix depca_hw_init() bug + 0.423 7-Jun-96 Fix module load bug + 0.43 16-Aug-96 Update alloc_device() to conform to de4x5.c + 0.44 1-Sep-97 Fix *_probe() to test check_region() first - bug + reported by + 0.45 3-Nov-98 Added support for MCA EtherWORKS (DE210/DE212) cards + by + 0.451 5-Nov-98 Fixed mca stuff cuz I'm a dummy. + 0.5 14-Nov-98 Re-spin for 2.1.x kernels. + 0.51 27-Jun-99 Correct received packet length for CRC from + report by + 0.52 16-Oct-00 Fixes for 2.3 io memory accesses + Fix show-stopper (ints left masked) in depca_interrupt + by + 0.53 12-Jan-01 Release resources on failure, bss tidbits + by acme@conectiva.com.br + 0.54 08-Nov-01 use library crc32 functions + by Matt_Domsch@dell.com + 0.55 01-Mar-03 Use EISA/sysfs framework + + ========================================================================= +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#ifdef CONFIG_MCA +#include +#endif + +#ifdef CONFIG_EISA +#include +#endif + +#include "depca.h" + +static char version[] __initdata = "depca.c:v0.53 2001/1/12 davies@maniac.ultranet.com\n"; + +#ifdef DEPCA_DEBUG +static int depca_debug = DEPCA_DEBUG; +#else +static int depca_debug = 1; +#endif + +#define DEPCA_NDA 0xffe0 /* No Device Address */ + +#define TX_TIMEOUT (1*HZ) + +/* +** Ethernet PROM defines +*/ +#define PROBE_LENGTH 32 +#define ETH_PROM_SIG 0xAA5500FFUL + +/* +** Set the number of Tx and Rx buffers. Ensure that the memory requested +** here is <= to the amount of shared memory set up by the board switches. +** The number of descriptors MUST BE A POWER OF 2. +** +** total_memory = NUM_RX_DESC*(8+RX_BUFF_SZ) + NUM_TX_DESC*(8+TX_BUFF_SZ) +*/ +#define NUM_RX_DESC 8 /* Number of RX descriptors */ +#define NUM_TX_DESC 8 /* Number of TX descriptors */ +#define RX_BUFF_SZ 1536 /* Buffer size for each Rx buffer */ +#define TX_BUFF_SZ 1536 /* Buffer size for each Tx buffer */ + +/* +** EISA bus defines +*/ +#define DEPCA_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */ + +/* +** ISA Bus defines +*/ +#define DEPCA_RAM_BASE_ADDRESSES {0xc0000,0xd0000,0xe0000,0x00000} +#define DEPCA_TOTAL_SIZE 0x10 + +static struct { + u_long iobase; + struct platform_device *device; +} depca_io_ports[] = { + { 0x300, NULL }, + { 0x200, NULL }, + { 0 , NULL }, +}; + +/* +** Name <-> Adapter mapping +*/ +#define DEPCA_SIGNATURE {"DEPCA",\ + "DE100","DE101",\ + "DE200","DE201","DE202",\ + "DE210","DE212",\ + "DE422",\ + ""} + +static char* __initdata depca_signature[] = DEPCA_SIGNATURE; + +enum depca_type { + DEPCA, de100, de101, de200, de201, de202, de210, de212, de422, unknown +}; + +static char depca_string[] = "depca"; + +static int depca_device_remove (struct device *device); + +#ifdef CONFIG_EISA +static struct eisa_device_id depca_eisa_ids[] = { + { "DEC4220", de422 }, + { "" } +}; +MODULE_DEVICE_TABLE(eisa, depca_eisa_ids); + +static int depca_eisa_probe (struct device *device); + +static struct eisa_driver depca_eisa_driver = { + .id_table = depca_eisa_ids, + .driver = { + .name = depca_string, + .probe = depca_eisa_probe, + .remove = __devexit_p (depca_device_remove) + } +}; +#endif + +#ifdef CONFIG_MCA +/* +** Adapter ID for the MCA EtherWORKS DE210/212 adapter +*/ +#define DE210_ID 0x628d +#define DE212_ID 0x6def + +static short depca_mca_adapter_ids[] = { + DE210_ID, + DE212_ID, + 0x0000 +}; + +static char *depca_mca_adapter_name[] = { + "DEC EtherWORKS MC Adapter (DE210)", + "DEC EtherWORKS MC Adapter (DE212)", + NULL +}; + +static enum depca_type depca_mca_adapter_type[] = { + de210, + de212, + 0 +}; + +static int depca_mca_probe (struct device *); + +static struct mca_driver depca_mca_driver = { + .id_table = depca_mca_adapter_ids, + .driver = { + .name = depca_string, + .bus = &mca_bus_type, + .probe = depca_mca_probe, + .remove = __devexit_p(depca_device_remove), + }, +}; +#endif + +static int depca_isa_probe (struct platform_device *); + +static int __devexit depca_isa_remove(struct platform_device *pdev) +{ + return depca_device_remove(&pdev->dev); +} + +static struct platform_driver depca_isa_driver = { + .probe = depca_isa_probe, + .remove = __devexit_p(depca_isa_remove), + .driver = { + .name = depca_string, + }, +}; + +/* +** Miscellaneous info... +*/ +#define DEPCA_STRLEN 16 + +/* +** Memory Alignment. Each descriptor is 4 longwords long. To force a +** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and +** DESC_ALIGN. DEPCA_ALIGN aligns the start address of the private memory area +** and hence the RX descriptor ring's first entry. +*/ +#define DEPCA_ALIGN4 ((u_long)4 - 1) /* 1 longword align */ +#define DEPCA_ALIGN8 ((u_long)8 - 1) /* 2 longword (quadword) align */ +#define DEPCA_ALIGN DEPCA_ALIGN8 /* Keep the LANCE happy... */ + +/* +** The DEPCA Rx and Tx ring descriptors. +*/ +struct depca_rx_desc { + volatile s32 base; + s16 buf_length; /* This length is negative 2's complement! */ + s16 msg_length; /* This length is "normal". */ +}; + +struct depca_tx_desc { + volatile s32 base; + s16 length; /* This length is negative 2's complement! */ + s16 misc; /* Errors and TDR info */ +}; + +#define LA_MASK 0x0000ffff /* LANCE address mask for mapping network RAM + to LANCE memory address space */ + +/* +** The Lance initialization block, described in databook, in common memory. +*/ +struct depca_init { + u16 mode; /* Mode register */ + u8 phys_addr[ETH_ALEN]; /* Physical ethernet address */ + u8 mcast_table[8]; /* Multicast Hash Table. */ + u32 rx_ring; /* Rx ring base pointer & ring length */ + u32 tx_ring; /* Tx ring base pointer & ring length */ +}; + +#define DEPCA_PKT_STAT_SZ 16 +#define DEPCA_PKT_BIN_SZ 128 /* Should be >=100 unless you + increase DEPCA_PKT_STAT_SZ */ +struct depca_private { + char adapter_name[DEPCA_STRLEN]; /* /proc/ioports string */ + enum depca_type adapter; /* Adapter type */ + enum { + DEPCA_BUS_MCA = 1, + DEPCA_BUS_ISA, + DEPCA_BUS_EISA, + } depca_bus; /* type of bus */ + struct depca_init init_block; /* Shadow Initialization block */ +/* CPU address space fields */ + struct depca_rx_desc __iomem *rx_ring; /* Pointer to start of RX descriptor ring */ + struct depca_tx_desc __iomem *tx_ring; /* Pointer to start of TX descriptor ring */ + void __iomem *rx_buff[NUM_RX_DESC]; /* CPU virt address of sh'd memory buffs */ + void __iomem *tx_buff[NUM_TX_DESC]; /* CPU virt address of sh'd memory buffs */ + void __iomem *sh_mem; /* CPU mapped virt address of device RAM */ + u_long mem_start; /* Bus address of device RAM (before remap) */ + u_long mem_len; /* device memory size */ +/* Device address space fields */ + u_long device_ram_start; /* Start of RAM in device addr space */ +/* Offsets used in both address spaces */ + u_long rx_ring_offset; /* Offset from start of RAM to rx_ring */ + u_long tx_ring_offset; /* Offset from start of RAM to tx_ring */ + u_long buffs_offset; /* LANCE Rx and Tx buffers start address. */ +/* Kernel-only (not device) fields */ + int rx_new, tx_new; /* The next free ring entry */ + int rx_old, tx_old; /* The ring entries to be free()ed. */ + spinlock_t lock; + struct { /* Private stats counters */ + u32 bins[DEPCA_PKT_STAT_SZ]; + u32 unicast; + u32 multicast; + u32 broadcast; + u32 excessive_collisions; + u32 tx_underruns; + u32 excessive_underruns; + } pktStats; + int txRingMask; /* TX ring mask */ + int rxRingMask; /* RX ring mask */ + s32 rx_rlen; /* log2(rxRingMask+1) for the descriptors */ + s32 tx_rlen; /* log2(txRingMask+1) for the descriptors */ +}; + +/* +** The transmit ring full condition is described by the tx_old and tx_new +** pointers by: +** tx_old = tx_new Empty ring +** tx_old = tx_new+1 Full ring +** tx_old+txRingMask = tx_new Full ring (wrapped condition) +*/ +#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\ + lp->tx_old+lp->txRingMask-lp->tx_new:\ + lp->tx_old -lp->tx_new-1) + +/* +** Public Functions +*/ +static int depca_open(struct net_device *dev); +static netdev_tx_t depca_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static irqreturn_t depca_interrupt(int irq, void *dev_id); +static int depca_close(struct net_device *dev); +static int depca_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static void depca_tx_timeout(struct net_device *dev); +static void set_multicast_list(struct net_device *dev); + +/* +** Private functions +*/ +static void depca_init_ring(struct net_device *dev); +static int depca_rx(struct net_device *dev); +static int depca_tx(struct net_device *dev); + +static void LoadCSRs(struct net_device *dev); +static int InitRestartDepca(struct net_device *dev); +static int DepcaSignature(char *name, u_long paddr); +static int DevicePresent(u_long ioaddr); +static int get_hw_addr(struct net_device *dev); +static void SetMulticastFilter(struct net_device *dev); +static int load_packet(struct net_device *dev, struct sk_buff *skb); +static void depca_dbg_open(struct net_device *dev); + +static u_char de1xx_irq[] __initdata = { 2, 3, 4, 5, 7, 9, 0 }; +static u_char de2xx_irq[] __initdata = { 5, 9, 10, 11, 15, 0 }; +static u_char de422_irq[] __initdata = { 5, 9, 10, 11, 0 }; +static u_char *depca_irq; + +static int irq; +static int io; +static char *adapter_name; +static int mem; /* For loadable module assignment + use insmod mem=0x????? .... */ +module_param (irq, int, 0); +module_param (io, int, 0); +module_param (adapter_name, charp, 0); +module_param (mem, int, 0); +MODULE_PARM_DESC(irq, "DEPCA IRQ number"); +MODULE_PARM_DESC(io, "DEPCA I/O base address"); +MODULE_PARM_DESC(adapter_name, "DEPCA adapter name"); +MODULE_PARM_DESC(mem, "DEPCA shared memory address"); +MODULE_LICENSE("GPL"); + +/* +** Miscellaneous defines... +*/ +#define STOP_DEPCA \ + outw(CSR0, DEPCA_ADDR);\ + outw(STOP, DEPCA_DATA) + +static const struct net_device_ops depca_netdev_ops = { + .ndo_open = depca_open, + .ndo_start_xmit = depca_start_xmit, + .ndo_stop = depca_close, + .ndo_set_multicast_list = set_multicast_list, + .ndo_do_ioctl = depca_ioctl, + .ndo_tx_timeout = depca_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int __init depca_hw_init (struct net_device *dev, struct device *device) +{ + struct depca_private *lp; + int i, j, offset, netRAM, mem_len, status = 0; + s16 nicsr; + u_long ioaddr; + u_long mem_start; + + /* + * We are now supposed to enter this function with the + * following fields filled with proper values : + * + * dev->base_addr + * lp->mem_start + * lp->depca_bus + * lp->adapter + * + * dev->irq can be set if known from device configuration (on + * MCA or EISA) or module option. Otherwise, it will be auto + * detected. + */ + + ioaddr = dev->base_addr; + + STOP_DEPCA; + + nicsr = inb(DEPCA_NICSR); + nicsr = ((nicsr & ~SHE & ~RBE & ~IEN) | IM); + outb(nicsr, DEPCA_NICSR); + + if (inw(DEPCA_DATA) != STOP) { + return -ENXIO; + } + + lp = netdev_priv(dev); + mem_start = lp->mem_start; + + if (!mem_start || lp->adapter < DEPCA || lp->adapter >=unknown) + return -ENXIO; + + printk("%s: %s at 0x%04lx", + dev_name(device), depca_signature[lp->adapter], ioaddr); + + switch (lp->depca_bus) { +#ifdef CONFIG_MCA + case DEPCA_BUS_MCA: + printk(" (MCA slot %d)", to_mca_device(device)->slot + 1); + break; +#endif + +#ifdef CONFIG_EISA + case DEPCA_BUS_EISA: + printk(" (EISA slot %d)", to_eisa_device(device)->slot); + break; +#endif + + case DEPCA_BUS_ISA: + break; + + default: + printk("Unknown DEPCA bus %d\n", lp->depca_bus); + return -ENXIO; + } + + printk(", h/w address "); + status = get_hw_addr(dev); + printk("%pM", dev->dev_addr); + if (status != 0) { + printk(" which has an Ethernet PROM CRC error.\n"); + return -ENXIO; + } + + /* Set up the maximum amount of network RAM(kB) */ + netRAM = ((lp->adapter != DEPCA) ? 64 : 48); + if ((nicsr & _128KB) && (lp->adapter == de422)) + netRAM = 128; + + /* Shared Memory Base Address */ + if (nicsr & BUF) { + nicsr &= ~BS; /* DEPCA RAM in top 32k */ + netRAM -= 32; + + /* Only EISA/ISA needs start address to be re-computed */ + if (lp->depca_bus != DEPCA_BUS_MCA) + mem_start += 0x8000; + } + + if ((mem_len = (NUM_RX_DESC * (sizeof(struct depca_rx_desc) + RX_BUFF_SZ) + NUM_TX_DESC * (sizeof(struct depca_tx_desc) + TX_BUFF_SZ) + sizeof(struct depca_init))) + > (netRAM << 10)) { + printk(",\n requests %dkB RAM: only %dkB is available!\n", (mem_len >> 10), netRAM); + return -ENXIO; + } + + printk(",\n has %dkB RAM at 0x%.5lx", netRAM, mem_start); + + /* Enable the shadow RAM. */ + if (lp->adapter != DEPCA) { + nicsr |= SHE; + outb(nicsr, DEPCA_NICSR); + } + + spin_lock_init(&lp->lock); + sprintf(lp->adapter_name, "%s (%s)", + depca_signature[lp->adapter], dev_name(device)); + status = -EBUSY; + + /* Initialisation Block */ + if (!request_mem_region (mem_start, mem_len, lp->adapter_name)) { + printk(KERN_ERR "depca: cannot request ISA memory, aborting\n"); + goto out_priv; + } + + status = -EIO; + lp->sh_mem = ioremap(mem_start, mem_len); + if (lp->sh_mem == NULL) { + printk(KERN_ERR "depca: cannot remap ISA memory, aborting\n"); + goto out1; + } + + lp->mem_start = mem_start; + lp->mem_len = mem_len; + lp->device_ram_start = mem_start & LA_MASK; + + offset = 0; + offset += sizeof(struct depca_init); + + /* Tx & Rx descriptors (aligned to a quadword boundary) */ + offset = (offset + DEPCA_ALIGN) & ~DEPCA_ALIGN; + lp->rx_ring = lp->sh_mem + offset; + lp->rx_ring_offset = offset; + + offset += (sizeof(struct depca_rx_desc) * NUM_RX_DESC); + lp->tx_ring = lp->sh_mem + offset; + lp->tx_ring_offset = offset; + + offset += (sizeof(struct depca_tx_desc) * NUM_TX_DESC); + + lp->buffs_offset = offset; + + /* Finish initialising the ring information. */ + lp->rxRingMask = NUM_RX_DESC - 1; + lp->txRingMask = NUM_TX_DESC - 1; + + /* Calculate Tx/Rx RLEN size for the descriptors. */ + for (i = 0, j = lp->rxRingMask; j > 0; i++) { + j >>= 1; + } + lp->rx_rlen = (s32) (i << 29); + for (i = 0, j = lp->txRingMask; j > 0; i++) { + j >>= 1; + } + lp->tx_rlen = (s32) (i << 29); + + /* Load the initialisation block */ + depca_init_ring(dev); + + /* Initialise the control and status registers */ + LoadCSRs(dev); + + /* Enable DEPCA board interrupts for autoprobing */ + nicsr = ((nicsr & ~IM) | IEN); + outb(nicsr, DEPCA_NICSR); + + /* To auto-IRQ we enable the initialization-done and DMA err, + interrupts. For now we will always get a DMA error. */ + if (dev->irq < 2) { + unsigned char irqnum; + unsigned long irq_mask, delay; + + irq_mask = probe_irq_on(); + + /* Assign the correct irq list */ + switch (lp->adapter) { + case DEPCA: + case de100: + case de101: + depca_irq = de1xx_irq; + break; + case de200: + case de201: + case de202: + case de210: + case de212: + depca_irq = de2xx_irq; + break; + case de422: + depca_irq = de422_irq; + break; + + default: + break; /* Not reached */ + } + + /* Trigger an initialization just for the interrupt. */ + outw(INEA | INIT, DEPCA_DATA); + + delay = jiffies + HZ/50; + while (time_before(jiffies, delay)) + yield(); + + irqnum = probe_irq_off(irq_mask); + + status = -ENXIO; + if (!irqnum) { + printk(" and failed to detect IRQ line.\n"); + goto out2; + } else { + for (dev->irq = 0, i = 0; (depca_irq[i]) && (!dev->irq); i++) + if (irqnum == depca_irq[i]) { + dev->irq = irqnum; + printk(" and uses IRQ%d.\n", dev->irq); + } + + if (!dev->irq) { + printk(" but incorrect IRQ line detected.\n"); + goto out2; + } + } + } else { + printk(" and assigned IRQ%d.\n", dev->irq); + } + + if (depca_debug > 1) { + printk(version); + } + + /* The DEPCA-specific entries in the device structure. */ + dev->netdev_ops = &depca_netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + + dev->mem_start = 0; + + dev_set_drvdata(device, dev); + SET_NETDEV_DEV (dev, device); + + status = register_netdev(dev); + if (status == 0) + return 0; +out2: + iounmap(lp->sh_mem); +out1: + release_mem_region (mem_start, mem_len); +out_priv: + return status; +} + + +static int depca_open(struct net_device *dev) +{ + struct depca_private *lp = netdev_priv(dev); + u_long ioaddr = dev->base_addr; + s16 nicsr; + int status = 0; + + STOP_DEPCA; + nicsr = inb(DEPCA_NICSR); + + /* Make sure the shadow RAM is enabled */ + if (lp->adapter != DEPCA) { + nicsr |= SHE; + outb(nicsr, DEPCA_NICSR); + } + + /* Re-initialize the DEPCA... */ + depca_init_ring(dev); + LoadCSRs(dev); + + depca_dbg_open(dev); + + if (request_irq(dev->irq, depca_interrupt, 0, lp->adapter_name, dev)) { + printk("depca_open(): Requested IRQ%d is busy\n", dev->irq); + status = -EAGAIN; + } else { + + /* Enable DEPCA board interrupts and turn off LED */ + nicsr = ((nicsr & ~IM & ~LED) | IEN); + outb(nicsr, DEPCA_NICSR); + outw(CSR0, DEPCA_ADDR); + + netif_start_queue(dev); + + status = InitRestartDepca(dev); + + if (depca_debug > 1) { + printk("CSR0: 0x%4.4x\n", inw(DEPCA_DATA)); + printk("nicsr: 0x%02x\n", inb(DEPCA_NICSR)); + } + } + return status; +} + +/* Initialize the lance Rx and Tx descriptor rings. */ +static void depca_init_ring(struct net_device *dev) +{ + struct depca_private *lp = netdev_priv(dev); + u_int i; + u_long offset; + + /* Lock out other processes whilst setting up the hardware */ + netif_stop_queue(dev); + + lp->rx_new = lp->tx_new = 0; + lp->rx_old = lp->tx_old = 0; + + /* Initialize the base address and length of each buffer in the ring */ + for (i = 0; i <= lp->rxRingMask; i++) { + offset = lp->buffs_offset + i * RX_BUFF_SZ; + writel((lp->device_ram_start + offset) | R_OWN, &lp->rx_ring[i].base); + writew(-RX_BUFF_SZ, &lp->rx_ring[i].buf_length); + lp->rx_buff[i] = lp->sh_mem + offset; + } + + for (i = 0; i <= lp->txRingMask; i++) { + offset = lp->buffs_offset + (i + lp->rxRingMask + 1) * TX_BUFF_SZ; + writel((lp->device_ram_start + offset) & 0x00ffffff, &lp->tx_ring[i].base); + lp->tx_buff[i] = lp->sh_mem + offset; + } + + /* Set up the initialization block */ + lp->init_block.rx_ring = (lp->device_ram_start + lp->rx_ring_offset) | lp->rx_rlen; + lp->init_block.tx_ring = (lp->device_ram_start + lp->tx_ring_offset) | lp->tx_rlen; + + SetMulticastFilter(dev); + + for (i = 0; i < ETH_ALEN; i++) { + lp->init_block.phys_addr[i] = dev->dev_addr[i]; + } + + lp->init_block.mode = 0x0000; /* Enable the Tx and Rx */ +} + + +static void depca_tx_timeout(struct net_device *dev) +{ + u_long ioaddr = dev->base_addr; + + printk("%s: transmit timed out, status %04x, resetting.\n", dev->name, inw(DEPCA_DATA)); + + STOP_DEPCA; + depca_init_ring(dev); + LoadCSRs(dev); + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue(dev); + InitRestartDepca(dev); +} + + +/* +** Writes a socket buffer to TX descriptor ring and starts transmission +*/ +static netdev_tx_t depca_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct depca_private *lp = netdev_priv(dev); + u_long ioaddr = dev->base_addr; + int status = 0; + + /* Transmitter timeout, serious problems. */ + if (skb->len < 1) + goto out; + + if (skb_padto(skb, ETH_ZLEN)) + goto out; + + netif_stop_queue(dev); + + if (TX_BUFFS_AVAIL) { /* Fill in a Tx ring entry */ + status = load_packet(dev, skb); + + if (!status) { + /* Trigger an immediate send demand. */ + outw(CSR0, DEPCA_ADDR); + outw(INEA | TDMD, DEPCA_DATA); + + dev_kfree_skb(skb); + } + if (TX_BUFFS_AVAIL) + netif_start_queue(dev); + } else + status = NETDEV_TX_LOCKED; + + out: + return status; +} + +/* +** The DEPCA interrupt handler. +*/ +static irqreturn_t depca_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct depca_private *lp; + s16 csr0, nicsr; + u_long ioaddr; + + if (dev == NULL) { + printk("depca_interrupt(): irq %d for unknown device.\n", irq); + return IRQ_NONE; + } + + lp = netdev_priv(dev); + ioaddr = dev->base_addr; + + spin_lock(&lp->lock); + + /* mask the DEPCA board interrupts and turn on the LED */ + nicsr = inb(DEPCA_NICSR); + nicsr |= (IM | LED); + outb(nicsr, DEPCA_NICSR); + + outw(CSR0, DEPCA_ADDR); + csr0 = inw(DEPCA_DATA); + + /* Acknowledge all of the current interrupt sources ASAP. */ + outw(csr0 & INTE, DEPCA_DATA); + + if (csr0 & RINT) /* Rx interrupt (packet arrived) */ + depca_rx(dev); + + if (csr0 & TINT) /* Tx interrupt (packet sent) */ + depca_tx(dev); + + /* Any resources available? */ + if ((TX_BUFFS_AVAIL >= 0) && netif_queue_stopped(dev)) { + netif_wake_queue(dev); + } + + /* Unmask the DEPCA board interrupts and turn off the LED */ + nicsr = (nicsr & ~IM & ~LED); + outb(nicsr, DEPCA_NICSR); + + spin_unlock(&lp->lock); + return IRQ_HANDLED; +} + +/* Called with lp->lock held */ +static int depca_rx(struct net_device *dev) +{ + struct depca_private *lp = netdev_priv(dev); + int i, entry; + s32 status; + + for (entry = lp->rx_new; !(readl(&lp->rx_ring[entry].base) & R_OWN); entry = lp->rx_new) { + status = readl(&lp->rx_ring[entry].base) >> 16; + if (status & R_STP) { /* Remember start of frame */ + lp->rx_old = entry; + } + if (status & R_ENP) { /* Valid frame status */ + if (status & R_ERR) { /* There was an error. */ + dev->stats.rx_errors++; /* Update the error stats. */ + if (status & R_FRAM) + dev->stats.rx_frame_errors++; + if (status & R_OFLO) + dev->stats.rx_over_errors++; + if (status & R_CRC) + dev->stats.rx_crc_errors++; + if (status & R_BUFF) + dev->stats.rx_fifo_errors++; + } else { + short len, pkt_len = readw(&lp->rx_ring[entry].msg_length) - 4; + struct sk_buff *skb; + + skb = dev_alloc_skb(pkt_len + 2); + if (skb != NULL) { + unsigned char *buf; + skb_reserve(skb, 2); /* 16 byte align the IP header */ + buf = skb_put(skb, pkt_len); + if (entry < lp->rx_old) { /* Wrapped buffer */ + len = (lp->rxRingMask - lp->rx_old + 1) * RX_BUFF_SZ; + memcpy_fromio(buf, lp->rx_buff[lp->rx_old], len); + memcpy_fromio(buf + len, lp->rx_buff[0], pkt_len - len); + } else { /* Linear buffer */ + memcpy_fromio(buf, lp->rx_buff[lp->rx_old], pkt_len); + } + + /* + ** Notify the upper protocol layers that there is another + ** packet to handle + */ + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + + /* + ** Update stats + */ + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + for (i = 1; i < DEPCA_PKT_STAT_SZ - 1; i++) { + if (pkt_len < (i * DEPCA_PKT_BIN_SZ)) { + lp->pktStats.bins[i]++; + i = DEPCA_PKT_STAT_SZ; + } + } + if (is_multicast_ether_addr(buf)) { + if (is_broadcast_ether_addr(buf)) { + lp->pktStats.broadcast++; + } else { + lp->pktStats.multicast++; + } + } else if (compare_ether_addr(buf, dev->dev_addr) == 0) { + lp->pktStats.unicast++; + } + + lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */ + if (lp->pktStats.bins[0] == 0) { /* Reset counters */ + memset((char *) &lp->pktStats, 0, sizeof(lp->pktStats)); + } + } else { + printk("%s: Memory squeeze, deferring packet.\n", dev->name); + dev->stats.rx_dropped++; /* Really, deferred. */ + break; + } + } + /* Change buffer ownership for this last frame, back to the adapter */ + for (; lp->rx_old != entry; lp->rx_old = (lp->rx_old + 1) & lp->rxRingMask) { + writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN, &lp->rx_ring[lp->rx_old].base); + } + writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base); + } + + /* + ** Update entry information + */ + lp->rx_new = (lp->rx_new + 1) & lp->rxRingMask; + } + + return 0; +} + +/* +** Buffer sent - check for buffer errors. +** Called with lp->lock held +*/ +static int depca_tx(struct net_device *dev) +{ + struct depca_private *lp = netdev_priv(dev); + int entry; + s32 status; + u_long ioaddr = dev->base_addr; + + for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) { + status = readl(&lp->tx_ring[entry].base) >> 16; + + if (status < 0) { /* Packet not yet sent! */ + break; + } else if (status & T_ERR) { /* An error occurred. */ + status = readl(&lp->tx_ring[entry].misc); + dev->stats.tx_errors++; + if (status & TMD3_RTRY) + dev->stats.tx_aborted_errors++; + if (status & TMD3_LCAR) + dev->stats.tx_carrier_errors++; + if (status & TMD3_LCOL) + dev->stats.tx_window_errors++; + if (status & TMD3_UFLO) + dev->stats.tx_fifo_errors++; + if (status & (TMD3_BUFF | TMD3_UFLO)) { + /* Trigger an immediate send demand. */ + outw(CSR0, DEPCA_ADDR); + outw(INEA | TDMD, DEPCA_DATA); + } + } else if (status & (T_MORE | T_ONE)) { + dev->stats.collisions++; + } else { + dev->stats.tx_packets++; + } + + /* Update all the pointers */ + lp->tx_old = (lp->tx_old + 1) & lp->txRingMask; + } + + return 0; +} + +static int depca_close(struct net_device *dev) +{ + struct depca_private *lp = netdev_priv(dev); + s16 nicsr; + u_long ioaddr = dev->base_addr; + + netif_stop_queue(dev); + + outw(CSR0, DEPCA_ADDR); + + if (depca_debug > 1) { + printk("%s: Shutting down ethercard, status was %2.2x.\n", dev->name, inw(DEPCA_DATA)); + } + + /* + ** We stop the DEPCA here -- it occasionally polls + ** memory if we don't. + */ + outw(STOP, DEPCA_DATA); + + /* + ** Give back the ROM in case the user wants to go to DOS + */ + if (lp->adapter != DEPCA) { + nicsr = inb(DEPCA_NICSR); + nicsr &= ~SHE; + outb(nicsr, DEPCA_NICSR); + } + + /* + ** Free the associated irq + */ + free_irq(dev->irq, dev); + return 0; +} + +static void LoadCSRs(struct net_device *dev) +{ + struct depca_private *lp = netdev_priv(dev); + u_long ioaddr = dev->base_addr; + + outw(CSR1, DEPCA_ADDR); /* initialisation block address LSW */ + outw((u16) lp->device_ram_start, DEPCA_DATA); + outw(CSR2, DEPCA_ADDR); /* initialisation block address MSW */ + outw((u16) (lp->device_ram_start >> 16), DEPCA_DATA); + outw(CSR3, DEPCA_ADDR); /* ALE control */ + outw(ACON, DEPCA_DATA); + + outw(CSR0, DEPCA_ADDR); /* Point back to CSR0 */ +} + +static int InitRestartDepca(struct net_device *dev) +{ + struct depca_private *lp = netdev_priv(dev); + u_long ioaddr = dev->base_addr; + int i, status = 0; + + /* Copy the shadow init_block to shared memory */ + memcpy_toio(lp->sh_mem, &lp->init_block, sizeof(struct depca_init)); + + outw(CSR0, DEPCA_ADDR); /* point back to CSR0 */ + outw(INIT, DEPCA_DATA); /* initialize DEPCA */ + + /* wait for lance to complete initialisation */ + for (i = 0; (i < 100) && !(inw(DEPCA_DATA) & IDON); i++); + + if (i != 100) { + /* clear IDON by writing a "1", enable interrupts and start lance */ + outw(IDON | INEA | STRT, DEPCA_DATA); + if (depca_debug > 2) { + printk("%s: DEPCA open after %d ticks, init block 0x%08lx csr0 %4.4x.\n", dev->name, i, lp->mem_start, inw(DEPCA_DATA)); + } + } else { + printk("%s: DEPCA unopen after %d ticks, init block 0x%08lx csr0 %4.4x.\n", dev->name, i, lp->mem_start, inw(DEPCA_DATA)); + status = -1; + } + + return status; +} + +/* +** Set or clear the multicast filter for this adaptor. +*/ +static void set_multicast_list(struct net_device *dev) +{ + struct depca_private *lp = netdev_priv(dev); + u_long ioaddr = dev->base_addr; + + netif_stop_queue(dev); + while (lp->tx_old != lp->tx_new); /* Wait for the ring to empty */ + + STOP_DEPCA; /* Temporarily stop the depca. */ + depca_init_ring(dev); /* Initialize the descriptor rings */ + + if (dev->flags & IFF_PROMISC) { /* Set promiscuous mode */ + lp->init_block.mode |= PROM; + } else { + SetMulticastFilter(dev); + lp->init_block.mode &= ~PROM; /* Unset promiscuous mode */ + } + + LoadCSRs(dev); /* Reload CSR3 */ + InitRestartDepca(dev); /* Resume normal operation. */ + netif_start_queue(dev); /* Unlock the TX ring */ +} + +/* +** Calculate the hash code and update the logical address filter +** from a list of ethernet multicast addresses. +** Big endian crc one liner is mine, all mine, ha ha ha ha! +** LANCE calculates its hash codes big endian. +*/ +static void SetMulticastFilter(struct net_device *dev) +{ + struct depca_private *lp = netdev_priv(dev); + struct netdev_hw_addr *ha; + int i, j, bit, byte; + u16 hashcode; + u32 crc; + + if (dev->flags & IFF_ALLMULTI) { /* Set all multicast bits */ + for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) { + lp->init_block.mcast_table[i] = (char) 0xff; + } + } else { + for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) { /* Clear the multicast table */ + lp->init_block.mcast_table[i] = 0; + } + /* Add multicast addresses */ + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc(ETH_ALEN, ha->addr); + hashcode = (crc & 1); /* hashcode is 6 LSb of CRC ... */ + for (j = 0; j < 5; j++) { /* ... in reverse order. */ + hashcode = (hashcode << 1) | ((crc >>= 1) & 1); + } + + byte = hashcode >> 3; /* bit[3-5] -> byte in filter */ + bit = 1 << (hashcode & 0x07); /* bit[0-2] -> bit in byte */ + lp->init_block.mcast_table[byte] |= bit; + } + } +} + +static int __init depca_common_init (u_long ioaddr, struct net_device **devp) +{ + int status = 0; + + if (!request_region (ioaddr, DEPCA_TOTAL_SIZE, depca_string)) { + status = -EBUSY; + goto out; + } + + if (DevicePresent(ioaddr)) { + status = -ENODEV; + goto out_release; + } + + if (!(*devp = alloc_etherdev (sizeof (struct depca_private)))) { + status = -ENOMEM; + goto out_release; + } + + return 0; + + out_release: + release_region (ioaddr, DEPCA_TOTAL_SIZE); + out: + return status; +} + +#ifdef CONFIG_MCA +/* +** Microchannel bus I/O device probe +*/ +static int __init depca_mca_probe(struct device *device) +{ + unsigned char pos[2]; + unsigned char where; + unsigned long iobase, mem_start; + int irq, err; + struct mca_device *mdev = to_mca_device (device); + struct net_device *dev; + struct depca_private *lp; + + /* + ** Search for the adapter. If an address has been given, search + ** specifically for the card at that address. Otherwise find the + ** first card in the system. + */ + + pos[0] = mca_device_read_stored_pos(mdev, 2); + pos[1] = mca_device_read_stored_pos(mdev, 3); + + /* + ** IO of card is handled by bits 1 and 2 of pos0. + ** + ** bit2 bit1 IO + ** 0 0 0x2c00 + ** 0 1 0x2c10 + ** 1 0 0x2c20 + ** 1 1 0x2c30 + */ + where = (pos[0] & 6) >> 1; + iobase = 0x2c00 + (0x10 * where); + + /* + ** Found the adapter we were looking for. Now start setting it up. + ** + ** First work on decoding the IRQ. It's stored in the lower 4 bits + ** of pos1. Bits are as follows (from the ADF file): + ** + ** Bits + ** 3 2 1 0 IRQ + ** -------------------- + ** 0 0 1 0 5 + ** 0 0 0 1 9 + ** 0 1 0 0 10 + ** 1 0 0 0 11 + */ + where = pos[1] & 0x0f; + switch (where) { + case 1: + irq = 9; + break; + case 2: + irq = 5; + break; + case 4: + irq = 10; + break; + case 8: + irq = 11; + break; + default: + printk("%s: mca_probe IRQ error. You should never get here (%d).\n", mdev->name, where); + return -EINVAL; + } + + /* + ** Shared memory address of adapter is stored in bits 3-5 of pos0. + ** They are mapped as follows: + ** + ** Bit + ** 5 4 3 Memory Addresses + ** 0 0 0 C0000-CFFFF (64K) + ** 1 0 0 C8000-CFFFF (32K) + ** 0 0 1 D0000-DFFFF (64K) + ** 1 0 1 D8000-DFFFF (32K) + ** 0 1 0 E0000-EFFFF (64K) + ** 1 1 0 E8000-EFFFF (32K) + */ + where = (pos[0] & 0x18) >> 3; + mem_start = 0xc0000 + (where * 0x10000); + if (pos[0] & 0x20) { + mem_start += 0x8000; + } + + /* claim the slot */ + strncpy(mdev->name, depca_mca_adapter_name[mdev->index], + sizeof(mdev->name)); + mca_device_set_claim(mdev, 1); + + /* + ** Get everything allocated and initialized... (almost just + ** like the ISA and EISA probes) + */ + irq = mca_device_transform_irq(mdev, irq); + iobase = mca_device_transform_ioport(mdev, iobase); + + if ((err = depca_common_init (iobase, &dev))) + goto out_unclaim; + + dev->irq = irq; + dev->base_addr = iobase; + lp = netdev_priv(dev); + lp->depca_bus = DEPCA_BUS_MCA; + lp->adapter = depca_mca_adapter_type[mdev->index]; + lp->mem_start = mem_start; + + if ((err = depca_hw_init(dev, device))) + goto out_free; + + return 0; + + out_free: + free_netdev (dev); + release_region (iobase, DEPCA_TOTAL_SIZE); + out_unclaim: + mca_device_set_claim(mdev, 0); + + return err; +} +#endif + +/* +** ISA bus I/O device probe +*/ + +static void __init depca_platform_probe (void) +{ + int i; + struct platform_device *pldev; + + for (i = 0; depca_io_ports[i].iobase; i++) { + depca_io_ports[i].device = NULL; + + /* if an address has been specified on the command + * line, use it (if valid) */ + if (io && io != depca_io_ports[i].iobase) + continue; + + pldev = platform_device_alloc(depca_string, i); + if (!pldev) + continue; + + pldev->dev.platform_data = (void *) depca_io_ports[i].iobase; + depca_io_ports[i].device = pldev; + + if (platform_device_add(pldev)) { + depca_io_ports[i].device = NULL; + pldev->dev.platform_data = NULL; + platform_device_put(pldev); + continue; + } + + if (!pldev->dev.driver) { + /* The driver was not bound to this device, there was + * no hardware at this address. Unregister it, as the + * release function will take care of freeing the + * allocated structure */ + + depca_io_ports[i].device = NULL; + pldev->dev.platform_data = NULL; + platform_device_unregister (pldev); + } + } +} + +static enum depca_type __init depca_shmem_probe (ulong *mem_start) +{ + u_long mem_base[] = DEPCA_RAM_BASE_ADDRESSES; + enum depca_type adapter = unknown; + int i; + + for (i = 0; mem_base[i]; i++) { + *mem_start = mem ? mem : mem_base[i]; + adapter = DepcaSignature (adapter_name, *mem_start); + if (adapter != unknown) + break; + } + + return adapter; +} + +static int __devinit depca_isa_probe (struct platform_device *device) +{ + struct net_device *dev; + struct depca_private *lp; + u_long ioaddr, mem_start = 0; + enum depca_type adapter = unknown; + int status = 0; + + ioaddr = (u_long) device->dev.platform_data; + + if ((status = depca_common_init (ioaddr, &dev))) + goto out; + + adapter = depca_shmem_probe (&mem_start); + + if (adapter == unknown) { + status = -ENODEV; + goto out_free; + } + + dev->base_addr = ioaddr; + dev->irq = irq; /* Use whatever value the user gave + * us, and 0 if he didn't. */ + lp = netdev_priv(dev); + lp->depca_bus = DEPCA_BUS_ISA; + lp->adapter = adapter; + lp->mem_start = mem_start; + + if ((status = depca_hw_init(dev, &device->dev))) + goto out_free; + + return 0; + + out_free: + free_netdev (dev); + release_region (ioaddr, DEPCA_TOTAL_SIZE); + out: + return status; +} + +/* +** EISA callbacks from sysfs. +*/ + +#ifdef CONFIG_EISA +static int __init depca_eisa_probe (struct device *device) +{ + enum depca_type adapter = unknown; + struct eisa_device *edev; + struct net_device *dev; + struct depca_private *lp; + u_long ioaddr, mem_start; + int status = 0; + + edev = to_eisa_device (device); + ioaddr = edev->base_addr + DEPCA_EISA_IO_PORTS; + + if ((status = depca_common_init (ioaddr, &dev))) + goto out; + + /* It would have been nice to get card configuration from the + * card. Unfortunately, this register is write-only (shares + * it's address with the ethernet prom)... As we don't parse + * the EISA configuration structures (yet... :-), just rely on + * the ISA probing to sort it out... */ + + adapter = depca_shmem_probe (&mem_start); + if (adapter == unknown) { + status = -ENODEV; + goto out_free; + } + + dev->base_addr = ioaddr; + dev->irq = irq; + lp = netdev_priv(dev); + lp->depca_bus = DEPCA_BUS_EISA; + lp->adapter = edev->id.driver_data; + lp->mem_start = mem_start; + + if ((status = depca_hw_init(dev, device))) + goto out_free; + + return 0; + + out_free: + free_netdev (dev); + release_region (ioaddr, DEPCA_TOTAL_SIZE); + out: + return status; +} +#endif + +static int __devexit depca_device_remove (struct device *device) +{ + struct net_device *dev; + struct depca_private *lp; + int bus; + + dev = dev_get_drvdata(device); + lp = netdev_priv(dev); + + unregister_netdev (dev); + iounmap (lp->sh_mem); + release_mem_region (lp->mem_start, lp->mem_len); + release_region (dev->base_addr, DEPCA_TOTAL_SIZE); + bus = lp->depca_bus; + free_netdev (dev); + + return 0; +} + +/* +** Look for a particular board name in the on-board Remote Diagnostics +** and Boot (readb) ROM. This will also give us a clue to the network RAM +** base address. +*/ +static int __init DepcaSignature(char *name, u_long base_addr) +{ + u_int i, j, k; + void __iomem *ptr; + char tmpstr[16]; + u_long prom_addr = base_addr + 0xc000; + u_long mem_addr = base_addr + 0x8000; /* 32KB */ + + /* Can't reserve the prom region, it is already marked as + * used, at least on x86. Instead, reserve a memory region a + * board would certainly use. If it works, go ahead. If not, + * run like hell... */ + + if (!request_mem_region (mem_addr, 16, depca_string)) + return unknown; + + /* Copy the first 16 bytes of ROM */ + + ptr = ioremap(prom_addr, 16); + if (ptr == NULL) { + printk(KERN_ERR "depca: I/O remap failed at %lx\n", prom_addr); + return unknown; + } + for (i = 0; i < 16; i++) { + tmpstr[i] = readb(ptr + i); + } + iounmap(ptr); + + release_mem_region (mem_addr, 16); + + /* Check if PROM contains a valid string */ + for (i = 0; *depca_signature[i] != '\0'; i++) { + for (j = 0, k = 0; j < 16 && k < strlen(depca_signature[i]); j++) { + if (depca_signature[i][k] == tmpstr[j]) { /* track signature */ + k++; + } else { /* lost signature; begin search again */ + k = 0; + } + } + if (k == strlen(depca_signature[i])) + break; + } + + /* Check if name string is valid, provided there's no PROM */ + if (name && *name && (i == unknown)) { + for (i = 0; *depca_signature[i] != '\0'; i++) { + if (strcmp(name, depca_signature[i]) == 0) + break; + } + } + + return i; +} + +/* +** Look for a special sequence in the Ethernet station address PROM that +** is common across all DEPCA products. Note that the original DEPCA needs +** its ROM address counter to be initialized and enabled. Only enable +** if the first address octet is a 0x08 - this minimises the chances of +** messing around with some other hardware, but it assumes that this DEPCA +** card initialized itself correctly. +** +** Search the Ethernet address ROM for the signature. Since the ROM address +** counter can start at an arbitrary point, the search must include the entire +** probe sequence length plus the (length_of_the_signature - 1). +** Stop the search IMMEDIATELY after the signature is found so that the +** PROM address counter is correctly positioned at the start of the +** ethernet address for later read out. +*/ +static int __init DevicePresent(u_long ioaddr) +{ + union { + struct { + u32 a; + u32 b; + } llsig; + char Sig[sizeof(u32) << 1]; + } + dev; + short sigLength = 0; + s8 data; + s16 nicsr; + int i, j, status = 0; + + data = inb(DEPCA_PROM); /* clear counter on DEPCA */ + data = inb(DEPCA_PROM); /* read data */ + + if (data == 0x08) { /* Enable counter on DEPCA */ + nicsr = inb(DEPCA_NICSR); + nicsr |= AAC; + outb(nicsr, DEPCA_NICSR); + } + + dev.llsig.a = ETH_PROM_SIG; + dev.llsig.b = ETH_PROM_SIG; + sigLength = sizeof(u32) << 1; + + for (i = 0, j = 0; j < sigLength && i < PROBE_LENGTH + sigLength - 1; i++) { + data = inb(DEPCA_PROM); + if (dev.Sig[j] == data) { /* track signature */ + j++; + } else { /* lost signature; begin search again */ + if (data == dev.Sig[0]) { /* rare case.... */ + j = 1; + } else { + j = 0; + } + } + } + + if (j != sigLength) { + status = -ENODEV; /* search failed */ + } + + return status; +} + +/* +** The DE100 and DE101 PROM accesses were made non-standard for some bizarre +** reason: access the upper half of the PROM with x=0; access the lower half +** with x=1. +*/ +static int __init get_hw_addr(struct net_device *dev) +{ + u_long ioaddr = dev->base_addr; + struct depca_private *lp = netdev_priv(dev); + int i, k, tmp, status = 0; + u_short j, x, chksum; + + x = (((lp->adapter == de100) || (lp->adapter == de101)) ? 1 : 0); + + for (i = 0, k = 0, j = 0; j < 3; j++) { + k <<= 1; + if (k > 0xffff) + k -= 0xffff; + + k += (u_char) (tmp = inb(DEPCA_PROM + x)); + dev->dev_addr[i++] = (u_char) tmp; + k += (u_short) ((tmp = inb(DEPCA_PROM + x)) << 8); + dev->dev_addr[i++] = (u_char) tmp; + + if (k > 0xffff) + k -= 0xffff; + } + if (k == 0xffff) + k = 0; + + chksum = (u_char) inb(DEPCA_PROM + x); + chksum |= (u_short) (inb(DEPCA_PROM + x) << 8); + if (k != chksum) + status = -1; + + return status; +} + +/* +** Load a packet into the shared memory +*/ +static int load_packet(struct net_device *dev, struct sk_buff *skb) +{ + struct depca_private *lp = netdev_priv(dev); + int i, entry, end, len, status = NETDEV_TX_OK; + + entry = lp->tx_new; /* Ring around buffer number. */ + end = (entry + (skb->len - 1) / TX_BUFF_SZ) & lp->txRingMask; + if (!(readl(&lp->tx_ring[end].base) & T_OWN)) { /* Enough room? */ + /* + ** Caution: the write order is important here... don't set up the + ** ownership rights until all the other information is in place. + */ + if (end < entry) { /* wrapped buffer */ + len = (lp->txRingMask - entry + 1) * TX_BUFF_SZ; + memcpy_toio(lp->tx_buff[entry], skb->data, len); + memcpy_toio(lp->tx_buff[0], skb->data + len, skb->len - len); + } else { /* linear buffer */ + memcpy_toio(lp->tx_buff[entry], skb->data, skb->len); + } + + /* set up the buffer descriptors */ + len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len; + for (i = entry; i != end; i = (i+1) & lp->txRingMask) { + /* clean out flags */ + writel(readl(&lp->tx_ring[i].base) & ~T_FLAGS, &lp->tx_ring[i].base); + writew(0x0000, &lp->tx_ring[i].misc); /* clears other error flags */ + writew(-TX_BUFF_SZ, &lp->tx_ring[i].length); /* packet length in buffer */ + len -= TX_BUFF_SZ; + } + /* clean out flags */ + writel(readl(&lp->tx_ring[end].base) & ~T_FLAGS, &lp->tx_ring[end].base); + writew(0x0000, &lp->tx_ring[end].misc); /* clears other error flags */ + writew(-len, &lp->tx_ring[end].length); /* packet length in last buff */ + + /* start of packet */ + writel(readl(&lp->tx_ring[entry].base) | T_STP, &lp->tx_ring[entry].base); + /* end of packet */ + writel(readl(&lp->tx_ring[end].base) | T_ENP, &lp->tx_ring[end].base); + + for (i = end; i != entry; --i) { + /* ownership of packet */ + writel(readl(&lp->tx_ring[i].base) | T_OWN, &lp->tx_ring[i].base); + if (i == 0) + i = lp->txRingMask + 1; + } + writel(readl(&lp->tx_ring[entry].base) | T_OWN, &lp->tx_ring[entry].base); + + lp->tx_new = (++end) & lp->txRingMask; /* update current pointers */ + } else { + status = NETDEV_TX_LOCKED; + } + + return status; +} + +static void depca_dbg_open(struct net_device *dev) +{ + struct depca_private *lp = netdev_priv(dev); + u_long ioaddr = dev->base_addr; + struct depca_init *p = &lp->init_block; + int i; + + if (depca_debug > 1) { + /* Do not copy the shadow init block into shared memory */ + /* Debugging should not affect normal operation! */ + /* The shadow init block will get copied across during InitRestartDepca */ + printk("%s: depca open with irq %d\n", dev->name, dev->irq); + printk("Descriptor head addresses (CPU):\n"); + printk(" 0x%lx 0x%lx\n", (u_long) lp->rx_ring, (u_long) lp->tx_ring); + printk("Descriptor addresses (CPU):\nRX: "); + for (i = 0; i < lp->rxRingMask; i++) { + if (i < 3) { + printk("%p ", &lp->rx_ring[i].base); + } + } + printk("...%p\n", &lp->rx_ring[i].base); + printk("TX: "); + for (i = 0; i < lp->txRingMask; i++) { + if (i < 3) { + printk("%p ", &lp->tx_ring[i].base); + } + } + printk("...%p\n", &lp->tx_ring[i].base); + printk("\nDescriptor buffers (Device):\nRX: "); + for (i = 0; i < lp->rxRingMask; i++) { + if (i < 3) { + printk("0x%8.8x ", readl(&lp->rx_ring[i].base)); + } + } + printk("...0x%8.8x\n", readl(&lp->rx_ring[i].base)); + printk("TX: "); + for (i = 0; i < lp->txRingMask; i++) { + if (i < 3) { + printk("0x%8.8x ", readl(&lp->tx_ring[i].base)); + } + } + printk("...0x%8.8x\n", readl(&lp->tx_ring[i].base)); + printk("Initialisation block at 0x%8.8lx(Phys)\n", lp->mem_start); + printk(" mode: 0x%4.4x\n", p->mode); + printk(" physical address: %pM\n", p->phys_addr); + printk(" multicast hash table: "); + for (i = 0; i < (HASH_TABLE_LEN >> 3) - 1; i++) { + printk("%2.2x:", p->mcast_table[i]); + } + printk("%2.2x\n", p->mcast_table[i]); + printk(" rx_ring at: 0x%8.8x\n", p->rx_ring); + printk(" tx_ring at: 0x%8.8x\n", p->tx_ring); + printk("buffers (Phys): 0x%8.8lx\n", lp->mem_start + lp->buffs_offset); + printk("Ring size:\nRX: %d Log2(rxRingMask): 0x%8.8x\n", (int) lp->rxRingMask + 1, lp->rx_rlen); + printk("TX: %d Log2(txRingMask): 0x%8.8x\n", (int) lp->txRingMask + 1, lp->tx_rlen); + outw(CSR2, DEPCA_ADDR); + printk("CSR2&1: 0x%4.4x", inw(DEPCA_DATA)); + outw(CSR1, DEPCA_ADDR); + printk("%4.4x\n", inw(DEPCA_DATA)); + outw(CSR3, DEPCA_ADDR); + printk("CSR3: 0x%4.4x\n", inw(DEPCA_DATA)); + } +} + +/* +** Perform IOCTL call functions here. Some are privileged operations and the +** effective uid is checked in those cases. +** All multicast IOCTLs will not work here and are for testing purposes only. +*/ +static int depca_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct depca_private *lp = netdev_priv(dev); + struct depca_ioctl *ioc = (struct depca_ioctl *) &rq->ifr_ifru; + int i, status = 0; + u_long ioaddr = dev->base_addr; + union { + u8 addr[(HASH_TABLE_LEN * ETH_ALEN)]; + u16 sval[(HASH_TABLE_LEN * ETH_ALEN) >> 1]; + u32 lval[(HASH_TABLE_LEN * ETH_ALEN) >> 2]; + } tmp; + unsigned long flags; + void *buf; + + switch (ioc->cmd) { + case DEPCA_GET_HWADDR: /* Get the hardware address */ + for (i = 0; i < ETH_ALEN; i++) { + tmp.addr[i] = dev->dev_addr[i]; + } + ioc->len = ETH_ALEN; + if (copy_to_user(ioc->data, tmp.addr, ioc->len)) + return -EFAULT; + break; + + case DEPCA_SET_HWADDR: /* Set the hardware address */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN)) + return -EFAULT; + for (i = 0; i < ETH_ALEN; i++) { + dev->dev_addr[i] = tmp.addr[i]; + } + netif_stop_queue(dev); + while (lp->tx_old != lp->tx_new) + cpu_relax(); /* Wait for the ring to empty */ + + STOP_DEPCA; /* Temporarily stop the depca. */ + depca_init_ring(dev); /* Initialize the descriptor rings */ + LoadCSRs(dev); /* Reload CSR3 */ + InitRestartDepca(dev); /* Resume normal operation. */ + netif_start_queue(dev); /* Unlock the TX ring */ + break; + + case DEPCA_SET_PROM: /* Set Promiscuous Mode */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + netif_stop_queue(dev); + while (lp->tx_old != lp->tx_new) + cpu_relax(); /* Wait for the ring to empty */ + + STOP_DEPCA; /* Temporarily stop the depca. */ + depca_init_ring(dev); /* Initialize the descriptor rings */ + lp->init_block.mode |= PROM; /* Set promiscuous mode */ + + LoadCSRs(dev); /* Reload CSR3 */ + InitRestartDepca(dev); /* Resume normal operation. */ + netif_start_queue(dev); /* Unlock the TX ring */ + break; + + case DEPCA_CLR_PROM: /* Clear Promiscuous Mode */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + netif_stop_queue(dev); + while (lp->tx_old != lp->tx_new) + cpu_relax(); /* Wait for the ring to empty */ + + STOP_DEPCA; /* Temporarily stop the depca. */ + depca_init_ring(dev); /* Initialize the descriptor rings */ + lp->init_block.mode &= ~PROM; /* Clear promiscuous mode */ + + LoadCSRs(dev); /* Reload CSR3 */ + InitRestartDepca(dev); /* Resume normal operation. */ + netif_start_queue(dev); /* Unlock the TX ring */ + break; + + case DEPCA_SAY_BOO: /* Say "Boo!" to the kernel log file */ + if(!capable(CAP_NET_ADMIN)) + return -EPERM; + printk("%s: Boo!\n", dev->name); + break; + + case DEPCA_GET_MCA: /* Get the multicast address table */ + ioc->len = (HASH_TABLE_LEN >> 3); + if (copy_to_user(ioc->data, lp->init_block.mcast_table, ioc->len)) + return -EFAULT; + break; + + case DEPCA_SET_MCA: /* Set a multicast address */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + if (ioc->len >= HASH_TABLE_LEN) + return -EINVAL; + if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN * ioc->len)) + return -EFAULT; + set_multicast_list(dev); + break; + + case DEPCA_CLR_MCA: /* Clear all multicast addresses */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + set_multicast_list(dev); + break; + + case DEPCA_MCA_EN: /* Enable pass all multicast addressing */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + set_multicast_list(dev); + break; + + case DEPCA_GET_STATS: /* Get the driver statistics */ + ioc->len = sizeof(lp->pktStats); + buf = kmalloc(ioc->len, GFP_KERNEL); + if(!buf) + return -ENOMEM; + spin_lock_irqsave(&lp->lock, flags); + memcpy(buf, &lp->pktStats, ioc->len); + spin_unlock_irqrestore(&lp->lock, flags); + if (copy_to_user(ioc->data, buf, ioc->len)) + status = -EFAULT; + kfree(buf); + break; + + case DEPCA_CLR_STATS: /* Zero out the driver statistics */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + spin_lock_irqsave(&lp->lock, flags); + memset(&lp->pktStats, 0, sizeof(lp->pktStats)); + spin_unlock_irqrestore(&lp->lock, flags); + break; + + case DEPCA_GET_REG: /* Get the DEPCA Registers */ + i = 0; + tmp.sval[i++] = inw(DEPCA_NICSR); + outw(CSR0, DEPCA_ADDR); /* status register */ + tmp.sval[i++] = inw(DEPCA_DATA); + memcpy(&tmp.sval[i], &lp->init_block, sizeof(struct depca_init)); + ioc->len = i + sizeof(struct depca_init); + if (copy_to_user(ioc->data, tmp.addr, ioc->len)) + return -EFAULT; + break; + + default: + return -EOPNOTSUPP; + } + + return status; +} + +static int __init depca_module_init (void) +{ + int err = 0; + +#ifdef CONFIG_MCA + err = mca_register_driver(&depca_mca_driver); + if (err) + goto err; +#endif +#ifdef CONFIG_EISA + err = eisa_driver_register(&depca_eisa_driver); + if (err) + goto err_mca; +#endif + err = platform_driver_register(&depca_isa_driver); + if (err) + goto err_eisa; + + depca_platform_probe(); + return 0; + +err_eisa: +#ifdef CONFIG_EISA + eisa_driver_unregister(&depca_eisa_driver); +err_mca: +#endif +#ifdef CONFIG_MCA + mca_unregister_driver(&depca_mca_driver); +err: +#endif + return err; +} + +static void __exit depca_module_exit (void) +{ + int i; +#ifdef CONFIG_MCA + mca_unregister_driver (&depca_mca_driver); +#endif +#ifdef CONFIG_EISA + eisa_driver_unregister (&depca_eisa_driver); +#endif + platform_driver_unregister (&depca_isa_driver); + + for (i = 0; depca_io_ports[i].iobase; i++) { + if (depca_io_ports[i].device) { + depca_io_ports[i].device->dev.platform_data = NULL; + platform_device_unregister (depca_io_ports[i].device); + depca_io_ports[i].device = NULL; + } + } +} + +module_init (depca_module_init); +module_exit (depca_module_exit); diff --git a/drivers/net/ethernet/amd/depca.h b/drivers/net/ethernet/amd/depca.h new file mode 100644 index 0000000..ee42648 --- /dev/null +++ b/drivers/net/ethernet/amd/depca.h @@ -0,0 +1,185 @@ +/* + Written 1994 by David C. Davies. + + Copyright 1994 David C. Davies. This software may be used and distributed + according to the terms of the GNU General Public License, incorporated herein by + reference. +*/ + +/* +** I/O addresses. Note that the 2k buffer option is not supported in +** this driver. +*/ +#define DEPCA_NICSR ioaddr+0x00 /* Network interface CSR */ +#define DEPCA_RBI ioaddr+0x02 /* RAM buffer index (2k buffer mode) */ +#define DEPCA_DATA ioaddr+0x04 /* LANCE registers' data port */ +#define DEPCA_ADDR ioaddr+0x06 /* LANCE registers' address port */ +#define DEPCA_HBASE ioaddr+0x08 /* EISA high memory base address reg. */ +#define DEPCA_PROM ioaddr+0x0c /* Ethernet address ROM data port */ +#define DEPCA_CNFG ioaddr+0x0c /* EISA Configuration port */ +#define DEPCA_RBSA ioaddr+0x0e /* RAM buffer starting address (2k buff.) */ + +/* +** These are LANCE registers addressable through DEPCA_ADDR +*/ +#define CSR0 0 +#define CSR1 1 +#define CSR2 2 +#define CSR3 3 + +/* +** NETWORK INTERFACE CSR (NI_CSR) bit definitions +*/ + +#define TO 0x0100 /* Time Out for remote boot */ +#define SHE 0x0080 /* SHadow memory Enable */ +#define BS 0x0040 /* Bank Select */ +#define BUF 0x0020 /* BUFfer size (1->32k, 0->64k) */ +#define RBE 0x0010 /* Remote Boot Enable (1->net boot) */ +#define AAC 0x0008 /* Address ROM Address Counter (1->enable) */ +#define _128KB 0x0008 /* 128kB Network RAM (1->enable) */ +#define IM 0x0004 /* Interrupt Mask (1->mask) */ +#define IEN 0x0002 /* Interrupt tristate ENable (1->enable) */ +#define LED 0x0001 /* LED control */ + +/* +** Control and Status Register 0 (CSR0) bit definitions +*/ + +#define ERR 0x8000 /* Error summary */ +#define BABL 0x4000 /* Babble transmitter timeout error */ +#define CERR 0x2000 /* Collision Error */ +#define MISS 0x1000 /* Missed packet */ +#define MERR 0x0800 /* Memory Error */ +#define RINT 0x0400 /* Receiver Interrupt */ +#define TINT 0x0200 /* Transmit Interrupt */ +#define IDON 0x0100 /* Initialization Done */ +#define INTR 0x0080 /* Interrupt Flag */ +#define INEA 0x0040 /* Interrupt Enable */ +#define RXON 0x0020 /* Receiver on */ +#define TXON 0x0010 /* Transmitter on */ +#define TDMD 0x0008 /* Transmit Demand */ +#define STOP 0x0004 /* Stop */ +#define STRT 0x0002 /* Start */ +#define INIT 0x0001 /* Initialize */ +#define INTM 0xff00 /* Interrupt Mask */ +#define INTE 0xfff0 /* Interrupt Enable */ + +/* +** CONTROL AND STATUS REGISTER 3 (CSR3) +*/ + +#define BSWP 0x0004 /* Byte SWaP */ +#define ACON 0x0002 /* ALE control */ +#define BCON 0x0001 /* Byte CONtrol */ + +/* +** Initialization Block Mode Register +*/ + +#define PROM 0x8000 /* Promiscuous Mode */ +#define EMBA 0x0080 /* Enable Modified Back-off Algorithm */ +#define INTL 0x0040 /* Internal Loopback */ +#define DRTY 0x0020 /* Disable Retry */ +#define COLL 0x0010 /* Force Collision */ +#define DTCR 0x0008 /* Disable Transmit CRC */ +#define LOOP 0x0004 /* Loopback */ +#define DTX 0x0002 /* Disable the Transmitter */ +#define DRX 0x0001 /* Disable the Receiver */ + +/* +** Receive Message Descriptor 1 (RMD1) bit definitions. +*/ + +#define R_OWN 0x80000000 /* Owner bit 0 = host, 1 = lance */ +#define R_ERR 0x4000 /* Error Summary */ +#define R_FRAM 0x2000 /* Framing Error */ +#define R_OFLO 0x1000 /* Overflow Error */ +#define R_CRC 0x0800 /* CRC Error */ +#define R_BUFF 0x0400 /* Buffer Error */ +#define R_STP 0x0200 /* Start of Packet */ +#define R_ENP 0x0100 /* End of Packet */ + +/* +** Transmit Message Descriptor 1 (TMD1) bit definitions. +*/ + +#define T_OWN 0x80000000 /* Owner bit 0 = host, 1 = lance */ +#define T_ERR 0x4000 /* Error Summary */ +#define T_ADD_FCS 0x2000 /* More the 1 retry needed to Xmit */ +#define T_MORE 0x1000 /* >1 retry to transmit packet */ +#define T_ONE 0x0800 /* 1 try needed to transmit the packet */ +#define T_DEF 0x0400 /* Deferred */ +#define T_STP 0x02000000 /* Start of Packet */ +#define T_ENP 0x01000000 /* End of Packet */ +#define T_FLAGS 0xff000000 /* TX Flags Field */ + +/* +** Transmit Message Descriptor 3 (TMD3) bit definitions. +*/ + +#define TMD3_BUFF 0x8000 /* BUFFer error */ +#define TMD3_UFLO 0x4000 /* UnderFLOw error */ +#define TMD3_RES 0x2000 /* REServed */ +#define TMD3_LCOL 0x1000 /* Late COLlision */ +#define TMD3_LCAR 0x0800 /* Loss of CARrier */ +#define TMD3_RTRY 0x0400 /* ReTRY error */ + +/* +** EISA configuration Register (CNFG) bit definitions +*/ + +#define TIMEOUT 0x0100 /* 0:2.5 mins, 1: 30 secs */ +#define REMOTE 0x0080 /* Remote Boot Enable -> 1 */ +#define IRQ11 0x0040 /* Enable -> 1 */ +#define IRQ10 0x0020 /* Enable -> 1 */ +#define IRQ9 0x0010 /* Enable -> 1 */ +#define IRQ5 0x0008 /* Enable -> 1 */ +#define BUFF 0x0004 /* 0: 64kB or 128kB, 1: 32kB */ +#define PADR16 0x0002 /* RAM on 64kB boundary */ +#define PADR17 0x0001 /* RAM on 128kB boundary */ + +/* +** Miscellaneous +*/ +#define HASH_TABLE_LEN 64 /* Bits */ +#define HASH_BITS 0x003f /* 6 LS bits */ + +#define MASK_INTERRUPTS 1 +#define UNMASK_INTERRUPTS 0 + +#define EISA_EN 0x0001 /* Enable EISA bus buffers */ +#define EISA_ID iobase+0x0080 /* ID long word for EISA card */ +#define EISA_CTRL iobase+0x0084 /* Control word for EISA card */ + +/* +** Include the IOCTL stuff +*/ +#include + +#define DEPCAIOCTL SIOCDEVPRIVATE + +struct depca_ioctl { + unsigned short cmd; /* Command to run */ + unsigned short len; /* Length of the data buffer */ + unsigned char __user *data; /* Pointer to the data buffer */ +}; + +/* +** Recognised commands for the driver +*/ +#define DEPCA_GET_HWADDR 0x01 /* Get the hardware address */ +#define DEPCA_SET_HWADDR 0x02 /* Get the hardware address */ +#define DEPCA_SET_PROM 0x03 /* Set Promiscuous Mode */ +#define DEPCA_CLR_PROM 0x04 /* Clear Promiscuous Mode */ +#define DEPCA_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */ +#define DEPCA_GET_MCA 0x06 /* Get a multicast address */ +#define DEPCA_SET_MCA 0x07 /* Set a multicast address */ +#define DEPCA_CLR_MCA 0x08 /* Clear a multicast address */ +#define DEPCA_MCA_EN 0x09 /* Enable a multicast address group */ +#define DEPCA_GET_STATS 0x0a /* Get the driver statistics */ +#define DEPCA_CLR_STATS 0x0b /* Zero out the driver statistics */ +#define DEPCA_GET_REG 0x0c /* Get the Register contents */ +#define DEPCA_SET_REG 0x0d /* Set the Register contents */ +#define DEPCA_DUMP 0x0f /* Dump the DEPCA Status */ + diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c new file mode 100644 index 0000000..a900d5b --- /dev/null +++ b/drivers/net/ethernet/amd/hplance.c @@ -0,0 +1,242 @@ +/* hplance.c : the Linux/hp300/lance ethernet driver + * + * Copyright (C) 05/1998 Peter Maydell + * Based on the Sun Lance driver and the NetBSD HP Lance driver + * Uses the generic 7990.c LANCE code. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +/* Used for the temporal inet entries and routing */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "hplance.h" + +/* We have 16834 bytes of RAM for the init block and buffers. This places + * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx + * buffers and 2 Tx buffers. + */ +#define LANCE_LOG_TX_BUFFERS 1 +#define LANCE_LOG_RX_BUFFERS 3 + +#include "7990.h" /* use generic LANCE code */ + +/* Our private data structure */ +struct hplance_private { + struct lance_private lance; +}; + +/* function prototypes... This is easy because all the grot is in the + * generic LANCE support. All we have to support is probing for boards, + * plus board-specific init, open and close actions. + * Oh, and we need to tell the generic code how to read and write LANCE registers... + */ +static int __devinit hplance_init_one(struct dio_dev *d, + const struct dio_device_id *ent); +static void __devinit hplance_init(struct net_device *dev, + struct dio_dev *d); +static void __devexit hplance_remove_one(struct dio_dev *d); +static void hplance_writerap(void *priv, unsigned short value); +static void hplance_writerdp(void *priv, unsigned short value); +static unsigned short hplance_readrdp(void *priv); +static int hplance_open(struct net_device *dev); +static int hplance_close(struct net_device *dev); + +static struct dio_device_id hplance_dio_tbl[] = { + { DIO_ID_LAN }, + { 0 } +}; + +static struct dio_driver hplance_driver = { + .name = "hplance", + .id_table = hplance_dio_tbl, + .probe = hplance_init_one, + .remove = __devexit_p(hplance_remove_one), +}; + +static const struct net_device_ops hplance_netdev_ops = { + .ndo_open = hplance_open, + .ndo_stop = hplance_close, + .ndo_start_xmit = lance_start_xmit, + .ndo_set_multicast_list = lance_set_multicast, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = lance_poll, +#endif +}; + +/* Find all the HP Lance boards and initialise them... */ +static int __devinit hplance_init_one(struct dio_dev *d, + const struct dio_device_id *ent) +{ + struct net_device *dev; + int err = -ENOMEM; + int i; + + dev = alloc_etherdev(sizeof(struct hplance_private)); + if (!dev) + goto out; + + err = -EBUSY; + if (!request_mem_region(dio_resource_start(d), + dio_resource_len(d), d->name)) + goto out_free_netdev; + + hplance_init(dev, d); + err = register_netdev(dev); + if (err) + goto out_release_mem_region; + + dio_set_drvdata(d, dev); + + printk(KERN_INFO "%s: %s; select code %d, addr %2.2x", dev->name, d->name, d->scode, dev->dev_addr[0]); + + for (i=1; i<6; i++) { + printk(":%2.2x", dev->dev_addr[i]); + } + + printk(", irq %d\n", d->ipl); + + return 0; + + out_release_mem_region: + release_mem_region(dio_resource_start(d), dio_resource_len(d)); + out_free_netdev: + free_netdev(dev); + out: + return err; +} + +static void __devexit hplance_remove_one(struct dio_dev *d) +{ + struct net_device *dev = dio_get_drvdata(d); + + unregister_netdev(dev); + release_mem_region(dio_resource_start(d), dio_resource_len(d)); + free_netdev(dev); +} + +/* Initialise a single lance board at the given DIO device */ +static void __devinit hplance_init(struct net_device *dev, struct dio_dev *d) +{ + unsigned long va = (d->resource.start + DIO_VIRADDRBASE); + struct hplance_private *lp; + int i; + + /* reset the board */ + out_8(va+DIO_IDOFF, 0xff); + udelay(100); /* ariba! ariba! udelay! udelay! */ + + /* Fill the dev fields */ + dev->base_addr = va; + dev->netdev_ops = &hplance_netdev_ops; + dev->dma = 0; + + for (i=0; i<6; i++) { + /* The NVRAM holds our ethernet address, one nibble per byte, + * at bytes NVRAMOFF+1,3,5,7,9... + */ + dev->dev_addr[i] = ((in_8(va + HPLANCE_NVRAMOFF + i*4 + 1) & 0xF) << 4) + | (in_8(va + HPLANCE_NVRAMOFF + i*4 + 3) & 0xF); + } + + lp = netdev_priv(dev); + lp->lance.name = (char*)d->name; /* discards const, shut up gcc */ + lp->lance.base = va; + lp->lance.init_block = (struct lance_init_block *)(va + HPLANCE_MEMOFF); /* CPU addr */ + lp->lance.lance_init_block = NULL; /* LANCE addr of same RAM */ + lp->lance.busmaster_regval = LE_C3_BSWP; /* we're bigendian */ + lp->lance.irq = d->ipl; + lp->lance.writerap = hplance_writerap; + lp->lance.writerdp = hplance_writerdp; + lp->lance.readrdp = hplance_readrdp; + lp->lance.lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS; + lp->lance.lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS; + lp->lance.rx_ring_mod_mask = RX_RING_MOD_MASK; + lp->lance.tx_ring_mod_mask = TX_RING_MOD_MASK; +} + +/* This is disgusting. We have to check the DIO status register for ack every + * time we read or write the LANCE registers. + */ +static void hplance_writerap(void *priv, unsigned short value) +{ + struct lance_private *lp = (struct lance_private *)priv; + do { + out_be16(lp->base + HPLANCE_REGOFF + LANCE_RAP, value); + } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); +} + +static void hplance_writerdp(void *priv, unsigned short value) +{ + struct lance_private *lp = (struct lance_private *)priv; + do { + out_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP, value); + } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); +} + +static unsigned short hplance_readrdp(void *priv) +{ + struct lance_private *lp = (struct lance_private *)priv; + __u16 value; + do { + value = in_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP); + } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); + return value; +} + +static int hplance_open(struct net_device *dev) +{ + int status; + struct lance_private *lp = netdev_priv(dev); + + status = lance_open(dev); /* call generic lance open code */ + if (status) + return status; + /* enable interrupts at board level. */ + out_8(lp->base + HPLANCE_STATUS, LE_IE); + + return 0; +} + +static int hplance_close(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + + out_8(lp->base + HPLANCE_STATUS, 0); /* disable interrupts at boardlevel */ + lance_close(dev); + return 0; +} + +static int __init hplance_init_module(void) +{ + return dio_register_driver(&hplance_driver); +} + +static void __exit hplance_cleanup_module(void) +{ + dio_unregister_driver(&hplance_driver); +} + +module_init(hplance_init_module); +module_exit(hplance_cleanup_module); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/amd/hplance.h b/drivers/net/ethernet/amd/hplance.h new file mode 100644 index 0000000..04aee9e --- /dev/null +++ b/drivers/net/ethernet/amd/hplance.h @@ -0,0 +1,26 @@ +/* Random defines and structures for the HP Lance driver. + * Copyright (C) 05/1998 Peter Maydell + * Based on the Sun Lance driver and the NetBSD HP Lance driver + */ + +/* Registers */ +#define HPLANCE_ID 0x01 /* DIO register: ID byte */ +#define HPLANCE_STATUS 0x03 /* DIO register: interrupt enable/status */ + +/* Control and status bits for the status register */ +#define LE_IE 0x80 /* interrupt enable */ +#define LE_IR 0x40 /* interrupt requested */ +#define LE_LOCK 0x08 /* lock status register */ +#define LE_ACK 0x04 /* ack of lock */ +#define LE_JAB 0x02 /* loss of tx clock (???) */ +/* We can also extract the IPL from the status register with the standard + * DIO_IPL(hplance) macro, or using dio_scodetoipl() + */ + +/* These are the offsets for the DIO regs (hplance_reg), lance_ioreg, + * memory and NVRAM: + */ +#define HPLANCE_IDOFF 0 /* board baseaddr */ +#define HPLANCE_REGOFF 0x4000 /* lance registers */ +#define HPLANCE_MEMOFF 0x8000 /* struct lance_init_block */ +#define HPLANCE_NVRAMOFF 0xC008 /* etheraddress as one *nibble* per byte */ diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c new file mode 100644 index 0000000..02336ed --- /dev/null +++ b/drivers/net/ethernet/amd/lance.c @@ -0,0 +1,1313 @@ +/* lance.c: An AMD LANCE/PCnet ethernet driver for Linux. */ +/* + Written/copyright 1993-1998 by Donald Becker. + + Copyright 1993 United States Government as represented by the + Director, National Security Agency. + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + This driver is for the Allied Telesis AT1500 and HP J2405A, and should work + with most other LANCE-based bus-master (NE2100/NE2500) ethercards. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + Andrey V. Savochkin: + - alignment problem with 1.3.* kernel and some minor changes. + Thomas Bogendoerfer (tsbogend@bigbug.franken.de): + - added support for Linux/Alpha, but removed most of it, because + it worked only for the PCI chip. + - added hook for the 32bit lance driver + - added PCnetPCI II (79C970A) to chip table + Paul Gortmaker (gpg109@rsphy1.anu.edu.au): + - hopefully fix above so Linux/Alpha can use ISA cards too. + 8/20/96 Fixed 7990 autoIRQ failure and reversed unneeded alignment -djb + v1.12 10/27/97 Module support -djb + v1.14 2/3/98 Module support modified, made PCI support optional -djb + v1.15 5/27/99 Fixed bug in the cleanup_module(). dev->priv was freed + before unregister_netdev() which caused NULL pointer + reference later in the chain (in rtnetlink_fill_ifinfo()) + -- Mika Kuoppala + + Forward ported v1.14 to 2.1.129, merged the PCI and misc changes from + the 2.1 version of the old driver - Alan Cox + + Get rid of check_region, check kmalloc return in lance_probe1 + Arnaldo Carvalho de Melo - 11/01/2001 + + Reworked detection, added support for Racal InterLan EtherBlaster cards + Vesselin Kostadinov - 22/4/2004 +*/ + +static const char version[] = "lance.c:v1.16 2006/11/09 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n"; + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +static unsigned int lance_portlist[] __initdata = { 0x300, 0x320, 0x340, 0x360, 0}; +static int lance_probe1(struct net_device *dev, int ioaddr, int irq, int options); +static int __init do_lance_probe(struct net_device *dev); + + +static struct card { + char id_offset14; + char id_offset15; +} cards[] = { + { //"normal" + .id_offset14 = 0x57, + .id_offset15 = 0x57, + }, + { //NI6510EB + .id_offset14 = 0x52, + .id_offset15 = 0x44, + }, + { //Racal InterLan EtherBlaster + .id_offset14 = 0x52, + .id_offset15 = 0x49, + }, +}; +#define NUM_CARDS 3 + +#ifdef LANCE_DEBUG +static int lance_debug = LANCE_DEBUG; +#else +static int lance_debug = 1; +#endif + +/* + Theory of Operation + +I. Board Compatibility + +This device driver is designed for the AMD 79C960, the "PCnet-ISA +single-chip ethernet controller for ISA". This chip is used in a wide +variety of boards from vendors such as Allied Telesis, HP, Kingston, +and Boca. This driver is also intended to work with older AMD 7990 +designs, such as the NE1500 and NE2100, and newer 79C961. For convenience, +I use the name LANCE to refer to all of the AMD chips, even though it properly +refers only to the original 7990. + +II. Board-specific settings + +The driver is designed to work the boards that use the faster +bus-master mode, rather than in shared memory mode. (Only older designs +have on-board buffer memory needed to support the slower shared memory mode.) + +Most ISA boards have jumpered settings for the I/O base, IRQ line, and DMA +channel. This driver probes the likely base addresses: +{0x300, 0x320, 0x340, 0x360}. +After the board is found it generates a DMA-timeout interrupt and uses +autoIRQ to find the IRQ line. The DMA channel can be set with the low bits +of the otherwise-unused dev->mem_start value (aka PARAM1). If unset it is +probed for by enabling each free DMA channel in turn and checking if +initialization succeeds. + +The HP-J2405A board is an exception: with this board it is easy to read the +EEPROM-set values for the base, IRQ, and DMA. (Of course you must already +_know_ the base address -- that field is for writing the EEPROM.) + +III. Driver operation + +IIIa. Ring buffers +The LANCE uses ring buffers of Tx and Rx descriptors. Each entry describes +the base and length of the data buffer, along with status bits. The length +of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of +the buffer length (rather than being directly the buffer length) for +implementation ease. The current values are 2 (Tx) and 4 (Rx), which leads to +ring sizes of 4 (Tx) and 16 (Rx). Increasing the number of ring entries +needlessly uses extra space and reduces the chance that an upper layer will +be able to reorder queued Tx packets based on priority. Decreasing the number +of entries makes it more difficult to achieve back-to-back packet transmission +and increases the chance that Rx ring will overflow. (Consider the worst case +of receiving back-to-back minimum-sized packets.) + +The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver +statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to +avoid the administrative overhead. For the Rx side this avoids dynamically +allocating full-sized buffers "just in case", at the expense of a +memory-to-memory data copy for each packet received. For most systems this +is a good tradeoff: the Rx buffer will always be in low memory, the copy +is inexpensive, and it primes the cache for later packet processing. For Tx +the buffers are only used when needed as low-memory bounce buffers. + +IIIB. 16M memory limitations. +For the ISA bus master mode all structures used directly by the LANCE, +the initialization block, Rx and Tx rings, and data buffers, must be +accessible from the ISA bus, i.e. in the lower 16M of real memory. +This is a problem for current Linux kernels on >16M machines. The network +devices are initialized after memory initialization, and the kernel doles out +memory from the top of memory downward. The current solution is to have a +special network initialization routine that's called before memory +initialization; this will eventually be generalized for all network devices. +As mentioned before, low-memory "bounce-buffers" are used when needed. + +IIIC. Synchronization +The driver runs as two independent, single-threaded flows of control. One +is the send-packet routine, which enforces single-threaded use by the +dev->tbusy flag. The other thread is the interrupt handler, which is single +threaded by the hardware and other software. + +The send packet thread has partial control over the Tx ring and 'dev->tbusy' +flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next +queue slot is empty, it clears the tbusy flag when finished otherwise it sets +the 'lp->tx_full' flag. + +The interrupt handler has exclusive control over the Rx ring and records stats +from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so +we can't avoid the interrupt overhead by having the Tx routine reap the Tx +stats.) After reaping the stats, it marks the queue entry as empty by setting +the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the +tx_full and tbusy flags. + +*/ + +/* Set the number of Tx and Rx buffers, using Log_2(# buffers). + Reasonable default values are 16 Tx buffers, and 16 Rx buffers. + That translates to 4 and 4 (16 == 2^^4). + This is a compile-time option for efficiency. + */ +#ifndef LANCE_LOG_TX_BUFFERS +#define LANCE_LOG_TX_BUFFERS 4 +#define LANCE_LOG_RX_BUFFERS 4 +#endif + +#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS)) +#define TX_RING_MOD_MASK (TX_RING_SIZE - 1) +#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29) + +#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS)) +#define RX_RING_MOD_MASK (RX_RING_SIZE - 1) +#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29) + +#define PKT_BUF_SZ 1544 + +/* Offsets from base I/O address. */ +#define LANCE_DATA 0x10 +#define LANCE_ADDR 0x12 +#define LANCE_RESET 0x14 +#define LANCE_BUS_IF 0x16 +#define LANCE_TOTAL_SIZE 0x18 + +#define TX_TIMEOUT (HZ/5) + +/* The LANCE Rx and Tx ring descriptors. */ +struct lance_rx_head { + s32 base; + s16 buf_length; /* This length is 2s complement (negative)! */ + s16 msg_length; /* This length is "normal". */ +}; + +struct lance_tx_head { + s32 base; + s16 length; /* Length is 2s complement (negative)! */ + s16 misc; +}; + +/* The LANCE initialization block, described in databook. */ +struct lance_init_block { + u16 mode; /* Pre-set mode (reg. 15) */ + u8 phys_addr[6]; /* Physical ethernet address */ + u32 filter[2]; /* Multicast filter (unused). */ + /* Receive and transmit ring base, along with extra bits. */ + u32 rx_ring; /* Tx and Rx ring base pointers */ + u32 tx_ring; +}; + +struct lance_private { + /* The Tx and Rx ring entries must be aligned on 8-byte boundaries. */ + struct lance_rx_head rx_ring[RX_RING_SIZE]; + struct lance_tx_head tx_ring[TX_RING_SIZE]; + struct lance_init_block init_block; + const char *name; + /* The saved address of a sent-in-place packet/buffer, for skfree(). */ + struct sk_buff* tx_skbuff[TX_RING_SIZE]; + /* The addresses of receive-in-place skbuffs. */ + struct sk_buff* rx_skbuff[RX_RING_SIZE]; + unsigned long rx_buffs; /* Address of Rx and Tx buffers. */ + /* Tx low-memory "bounce buffer" address. */ + char (*tx_bounce_buffs)[PKT_BUF_SZ]; + int cur_rx, cur_tx; /* The next free ring entry */ + int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ + int dma; + unsigned char chip_version; /* See lance_chip_type. */ + spinlock_t devlock; +}; + +#define LANCE_MUST_PAD 0x00000001 +#define LANCE_ENABLE_AUTOSELECT 0x00000002 +#define LANCE_MUST_REINIT_RING 0x00000004 +#define LANCE_MUST_UNRESET 0x00000008 +#define LANCE_HAS_MISSED_FRAME 0x00000010 + +/* A mapping from the chip ID number to the part number and features. + These are from the datasheets -- in real life the '970 version + reportedly has the same ID as the '965. */ +static struct lance_chip_type { + int id_number; + const char *name; + int flags; +} chip_table[] = { + {0x0000, "LANCE 7990", /* Ancient lance chip. */ + LANCE_MUST_PAD + LANCE_MUST_UNRESET}, + {0x0003, "PCnet/ISA 79C960", /* 79C960 PCnet/ISA. */ + LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING + + LANCE_HAS_MISSED_FRAME}, + {0x2260, "PCnet/ISA+ 79C961", /* 79C961 PCnet/ISA+, Plug-n-Play. */ + LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING + + LANCE_HAS_MISSED_FRAME}, + {0x2420, "PCnet/PCI 79C970", /* 79C970 or 79C974 PCnet-SCSI, PCI. */ + LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING + + LANCE_HAS_MISSED_FRAME}, + /* Bug: the PCnet/PCI actually uses the PCnet/VLB ID number, so just call + it the PCnet32. */ + {0x2430, "PCnet32", /* 79C965 PCnet for VL bus. */ + LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING + + LANCE_HAS_MISSED_FRAME}, + {0x2621, "PCnet/PCI-II 79C970A", /* 79C970A PCInetPCI II. */ + LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING + + LANCE_HAS_MISSED_FRAME}, + {0x0, "PCnet (unknown)", + LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING + + LANCE_HAS_MISSED_FRAME}, +}; + +enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_PCI_II=5, LANCE_UNKNOWN=6}; + + +/* Non-zero if lance_probe1() needs to allocate low-memory bounce buffers. + Assume yes until we know the memory size. */ +static unsigned char lance_need_isa_bounce_buffers = 1; + +static int lance_open(struct net_device *dev); +static void lance_init_ring(struct net_device *dev, gfp_t mode); +static netdev_tx_t lance_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static int lance_rx(struct net_device *dev); +static irqreturn_t lance_interrupt(int irq, void *dev_id); +static int lance_close(struct net_device *dev); +static struct net_device_stats *lance_get_stats(struct net_device *dev); +static void set_multicast_list(struct net_device *dev); +static void lance_tx_timeout (struct net_device *dev); + + + +#ifdef MODULE +#define MAX_CARDS 8 /* Max number of interfaces (cards) per module */ + +static struct net_device *dev_lance[MAX_CARDS]; +static int io[MAX_CARDS]; +static int dma[MAX_CARDS]; +static int irq[MAX_CARDS]; + +module_param_array(io, int, NULL, 0); +module_param_array(dma, int, NULL, 0); +module_param_array(irq, int, NULL, 0); +module_param(lance_debug, int, 0); +MODULE_PARM_DESC(io, "LANCE/PCnet I/O base address(es),required"); +MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)"); +MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)"); +MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)"); + +int __init init_module(void) +{ + struct net_device *dev; + int this_dev, found = 0; + + for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) { + if (io[this_dev] == 0) { + if (this_dev != 0) /* only complain once */ + break; + printk(KERN_NOTICE "lance.c: Module autoprobing not allowed. Append \"io=0xNNN\" value(s).\n"); + return -EPERM; + } + dev = alloc_etherdev(0); + if (!dev) + break; + dev->irq = irq[this_dev]; + dev->base_addr = io[this_dev]; + dev->dma = dma[this_dev]; + if (do_lance_probe(dev) == 0) { + dev_lance[found++] = dev; + continue; + } + free_netdev(dev); + break; + } + if (found != 0) + return 0; + return -ENXIO; +} + +static void cleanup_card(struct net_device *dev) +{ + struct lance_private *lp = dev->ml_priv; + if (dev->dma != 4) + free_dma(dev->dma); + release_region(dev->base_addr, LANCE_TOTAL_SIZE); + kfree(lp->tx_bounce_buffs); + kfree((void*)lp->rx_buffs); + kfree(lp); +} + +void __exit cleanup_module(void) +{ + int this_dev; + + for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) { + struct net_device *dev = dev_lance[this_dev]; + if (dev) { + unregister_netdev(dev); + cleanup_card(dev); + free_netdev(dev); + } + } +} +#endif /* MODULE */ +MODULE_LICENSE("GPL"); + + +/* Starting in v2.1.*, the LANCE/PCnet probe is now similar to the other + board probes now that kmalloc() can allocate ISA DMA-able regions. + This also allows the LANCE driver to be used as a module. + */ +static int __init do_lance_probe(struct net_device *dev) +{ + unsigned int *port; + int result; + + if (high_memory <= phys_to_virt(16*1024*1024)) + lance_need_isa_bounce_buffers = 0; + + for (port = lance_portlist; *port; port++) { + int ioaddr = *port; + struct resource *r = request_region(ioaddr, LANCE_TOTAL_SIZE, + "lance-probe"); + + if (r) { + /* Detect the card with minimal I/O reads */ + char offset14 = inb(ioaddr + 14); + int card; + for (card = 0; card < NUM_CARDS; ++card) + if (cards[card].id_offset14 == offset14) + break; + if (card < NUM_CARDS) {/*yes, the first byte matches*/ + char offset15 = inb(ioaddr + 15); + for (card = 0; card < NUM_CARDS; ++card) + if ((cards[card].id_offset14 == offset14) && + (cards[card].id_offset15 == offset15)) + break; + } + if (card < NUM_CARDS) { /*Signature OK*/ + result = lance_probe1(dev, ioaddr, 0, 0); + if (!result) { + struct lance_private *lp = dev->ml_priv; + int ver = lp->chip_version; + + r->name = chip_table[ver].name; + return 0; + } + } + release_region(ioaddr, LANCE_TOTAL_SIZE); + } + } + return -ENODEV; +} + +#ifndef MODULE +struct net_device * __init lance_probe(int unit) +{ + struct net_device *dev = alloc_etherdev(0); + int err; + + if (!dev) + return ERR_PTR(-ENODEV); + + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + + err = do_lance_probe(dev); + if (err) + goto out; + return dev; +out: + free_netdev(dev); + return ERR_PTR(err); +} +#endif + +static const struct net_device_ops lance_netdev_ops = { + .ndo_open = lance_open, + .ndo_start_xmit = lance_start_xmit, + .ndo_stop = lance_close, + .ndo_get_stats = lance_get_stats, + .ndo_set_multicast_list = set_multicast_list, + .ndo_tx_timeout = lance_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int options) +{ + struct lance_private *lp; + unsigned long dma_channels; /* Mark spuriously-busy DMA channels */ + int i, reset_val, lance_version; + const char *chipname; + /* Flags for specific chips or boards. */ + unsigned char hpJ2405A = 0; /* HP ISA adaptor */ + int hp_builtin = 0; /* HP on-board ethernet. */ + static int did_version; /* Already printed version info. */ + unsigned long flags; + int err = -ENOMEM; + void __iomem *bios; + + /* First we look for special cases. + Check for HP's on-board ethernet by looking for 'HP' in the BIOS. + There are two HP versions, check the BIOS for the configuration port. + This method provided by L. Julliard, Laurent_Julliard@grenoble.hp.com. + */ + bios = ioremap(0xf00f0, 0x14); + if (!bios) + return -ENOMEM; + if (readw(bios + 0x12) == 0x5048) { + static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360}; + int hp_port = (readl(bios + 1) & 1) ? 0x499 : 0x99; + /* We can have boards other than the built-in! Verify this is on-board. */ + if ((inb(hp_port) & 0xc0) == 0x80 && + ioaddr_table[inb(hp_port) & 3] == ioaddr) + hp_builtin = hp_port; + } + iounmap(bios); + /* We also recognize the HP Vectra on-board here, but check below. */ + hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00 && + inb(ioaddr+2) == 0x09); + + /* Reset the LANCE. */ + reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */ + + /* The Un-Reset needed is only needed for the real NE2100, and will + confuse the HP board. */ + if (!hpJ2405A) + outw(reset_val, ioaddr+LANCE_RESET); + + outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */ + if (inw(ioaddr+LANCE_DATA) != 0x0004) + return -ENODEV; + + /* Get the version of the chip. */ + outw(88, ioaddr+LANCE_ADDR); + if (inw(ioaddr+LANCE_ADDR) != 88) { + lance_version = 0; + } else { /* Good, it's a newer chip. */ + int chip_version = inw(ioaddr+LANCE_DATA); + outw(89, ioaddr+LANCE_ADDR); + chip_version |= inw(ioaddr+LANCE_DATA) << 16; + if (lance_debug > 2) + printk(" LANCE chip version is %#x.\n", chip_version); + if ((chip_version & 0xfff) != 0x003) + return -ENODEV; + chip_version = (chip_version >> 12) & 0xffff; + for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) { + if (chip_table[lance_version].id_number == chip_version) + break; + } + } + + /* We can't allocate private data from alloc_etherdev() because it must + a ISA DMA-able region. */ + chipname = chip_table[lance_version].name; + printk("%s: %s at %#3x, ", dev->name, chipname, ioaddr); + + /* There is a 16 byte station address PROM at the base address. + The first six bytes are the station address. */ + for (i = 0; i < 6; i++) + dev->dev_addr[i] = inb(ioaddr + i); + printk("%pM", dev->dev_addr); + + dev->base_addr = ioaddr; + /* Make certain the data structures used by the LANCE are aligned and DMAble. */ + + lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL); + if(lp==NULL) + return -ENODEV; + if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp); + dev->ml_priv = lp; + lp->name = chipname; + lp->rx_buffs = (unsigned long)kmalloc(PKT_BUF_SZ*RX_RING_SIZE, + GFP_DMA | GFP_KERNEL); + if (!lp->rx_buffs) + goto out_lp; + if (lance_need_isa_bounce_buffers) { + lp->tx_bounce_buffs = kmalloc(PKT_BUF_SZ*TX_RING_SIZE, + GFP_DMA | GFP_KERNEL); + if (!lp->tx_bounce_buffs) + goto out_rx; + } else + lp->tx_bounce_buffs = NULL; + + lp->chip_version = lance_version; + spin_lock_init(&lp->devlock); + + lp->init_block.mode = 0x0003; /* Disable Rx and Tx. */ + for (i = 0; i < 6; i++) + lp->init_block.phys_addr[i] = dev->dev_addr[i]; + lp->init_block.filter[0] = 0x00000000; + lp->init_block.filter[1] = 0x00000000; + lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS; + lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS; + + outw(0x0001, ioaddr+LANCE_ADDR); + inw(ioaddr+LANCE_ADDR); + outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA); + outw(0x0002, ioaddr+LANCE_ADDR); + inw(ioaddr+LANCE_ADDR); + outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA); + outw(0x0000, ioaddr+LANCE_ADDR); + inw(ioaddr+LANCE_ADDR); + + if (irq) { /* Set iff PCI card. */ + dev->dma = 4; /* Native bus-master, no DMA channel needed. */ + dev->irq = irq; + } else if (hp_builtin) { + static const char dma_tbl[4] = {3, 5, 6, 0}; + static const char irq_tbl[4] = {3, 4, 5, 9}; + unsigned char port_val = inb(hp_builtin); + dev->dma = dma_tbl[(port_val >> 4) & 3]; + dev->irq = irq_tbl[(port_val >> 2) & 3]; + printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma); + } else if (hpJ2405A) { + static const char dma_tbl[4] = {3, 5, 6, 7}; + static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15}; + short reset_val = inw(ioaddr+LANCE_RESET); + dev->dma = dma_tbl[(reset_val >> 2) & 3]; + dev->irq = irq_tbl[(reset_val >> 4) & 7]; + printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma); + } else if (lance_version == PCNET_ISAP) { /* The plug-n-play version. */ + short bus_info; + outw(8, ioaddr+LANCE_ADDR); + bus_info = inw(ioaddr+LANCE_BUS_IF); + dev->dma = bus_info & 0x07; + dev->irq = (bus_info >> 4) & 0x0F; + } else { + /* The DMA channel may be passed in PARAM1. */ + if (dev->mem_start & 0x07) + dev->dma = dev->mem_start & 0x07; + } + + if (dev->dma == 0) { + /* Read the DMA channel status register, so that we can avoid + stuck DMA channels in the DMA detection below. */ + dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) | + (inb(DMA2_STAT_REG) & 0xf0); + } + err = -ENODEV; + if (dev->irq >= 2) + printk(" assigned IRQ %d", dev->irq); + else if (lance_version != 0) { /* 7990 boards need DMA detection first. */ + unsigned long irq_mask; + + /* To auto-IRQ we enable the initialization-done and DMA error + interrupts. For ISA boards we get a DMA error, but VLB and PCI + boards will work. */ + irq_mask = probe_irq_on(); + + /* Trigger an initialization just for the interrupt. */ + outw(0x0041, ioaddr+LANCE_DATA); + + mdelay(20); + dev->irq = probe_irq_off(irq_mask); + if (dev->irq) + printk(", probed IRQ %d", dev->irq); + else { + printk(", failed to detect IRQ line.\n"); + goto out_tx; + } + + /* Check for the initialization done bit, 0x0100, which means + that we don't need a DMA channel. */ + if (inw(ioaddr+LANCE_DATA) & 0x0100) + dev->dma = 4; + } + + if (dev->dma == 4) { + printk(", no DMA needed.\n"); + } else if (dev->dma) { + if (request_dma(dev->dma, chipname)) { + printk("DMA %d allocation failed.\n", dev->dma); + goto out_tx; + } else + printk(", assigned DMA %d.\n", dev->dma); + } else { /* OK, we have to auto-DMA. */ + for (i = 0; i < 4; i++) { + static const char dmas[] = { 5, 6, 7, 3 }; + int dma = dmas[i]; + int boguscnt; + + /* Don't enable a permanently busy DMA channel, or the machine + will hang. */ + if (test_bit(dma, &dma_channels)) + continue; + outw(0x7f04, ioaddr+LANCE_DATA); /* Clear the memory error bits. */ + if (request_dma(dma, chipname)) + continue; + + flags=claim_dma_lock(); + set_dma_mode(dma, DMA_MODE_CASCADE); + enable_dma(dma); + release_dma_lock(flags); + + /* Trigger an initialization. */ + outw(0x0001, ioaddr+LANCE_DATA); + for (boguscnt = 100; boguscnt > 0; --boguscnt) + if (inw(ioaddr+LANCE_DATA) & 0x0900) + break; + if (inw(ioaddr+LANCE_DATA) & 0x0100) { + dev->dma = dma; + printk(", DMA %d.\n", dev->dma); + break; + } else { + flags=claim_dma_lock(); + disable_dma(dma); + release_dma_lock(flags); + free_dma(dma); + } + } + if (i == 4) { /* Failure: bail. */ + printk("DMA detection failed.\n"); + goto out_tx; + } + } + + if (lance_version == 0 && dev->irq == 0) { + /* We may auto-IRQ now that we have a DMA channel. */ + /* Trigger an initialization just for the interrupt. */ + unsigned long irq_mask; + + irq_mask = probe_irq_on(); + outw(0x0041, ioaddr+LANCE_DATA); + + mdelay(40); + dev->irq = probe_irq_off(irq_mask); + if (dev->irq == 0) { + printk(" Failed to detect the 7990 IRQ line.\n"); + goto out_dma; + } + printk(" Auto-IRQ detected IRQ%d.\n", dev->irq); + } + + if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) { + /* Turn on auto-select of media (10baseT or BNC) so that the user + can watch the LEDs even if the board isn't opened. */ + outw(0x0002, ioaddr+LANCE_ADDR); + /* Don't touch 10base2 power bit. */ + outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF); + } + + if (lance_debug > 0 && did_version++ == 0) + printk(version); + + /* The LANCE-specific entries in the device structure. */ + dev->netdev_ops = &lance_netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + + err = register_netdev(dev); + if (err) + goto out_dma; + return 0; +out_dma: + if (dev->dma != 4) + free_dma(dev->dma); +out_tx: + kfree(lp->tx_bounce_buffs); +out_rx: + kfree((void*)lp->rx_buffs); +out_lp: + kfree(lp); + return err; +} + + +static int +lance_open(struct net_device *dev) +{ + struct lance_private *lp = dev->ml_priv; + int ioaddr = dev->base_addr; + int i; + + if (dev->irq == 0 || + request_irq(dev->irq, lance_interrupt, 0, lp->name, dev)) { + return -EAGAIN; + } + + /* We used to allocate DMA here, but that was silly. + DMA lines can't be shared! We now permanently allocate them. */ + + /* Reset the LANCE */ + inw(ioaddr+LANCE_RESET); + + /* The DMA controller is used as a no-operation slave, "cascade mode". */ + if (dev->dma != 4) { + unsigned long flags=claim_dma_lock(); + enable_dma(dev->dma); + set_dma_mode(dev->dma, DMA_MODE_CASCADE); + release_dma_lock(flags); + } + + /* Un-Reset the LANCE, needed only for the NE2100. */ + if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET) + outw(0, ioaddr+LANCE_RESET); + + if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) { + /* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */ + outw(0x0002, ioaddr+LANCE_ADDR); + /* Only touch autoselect bit. */ + outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF); + } + + if (lance_debug > 1) + printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n", + dev->name, dev->irq, dev->dma, + (u32) isa_virt_to_bus(lp->tx_ring), + (u32) isa_virt_to_bus(lp->rx_ring), + (u32) isa_virt_to_bus(&lp->init_block)); + + lance_init_ring(dev, GFP_KERNEL); + /* Re-initialize the LANCE, and start it when done. */ + outw(0x0001, ioaddr+LANCE_ADDR); + outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA); + outw(0x0002, ioaddr+LANCE_ADDR); + outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA); + + outw(0x0004, ioaddr+LANCE_ADDR); + outw(0x0915, ioaddr+LANCE_DATA); + + outw(0x0000, ioaddr+LANCE_ADDR); + outw(0x0001, ioaddr+LANCE_DATA); + + netif_start_queue (dev); + + i = 0; + while (i++ < 100) + if (inw(ioaddr+LANCE_DATA) & 0x0100) + break; + /* + * We used to clear the InitDone bit, 0x0100, here but Mark Stockton + * reports that doing so triggers a bug in the '974. + */ + outw(0x0042, ioaddr+LANCE_DATA); + + if (lance_debug > 2) + printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n", + dev->name, i, (u32) isa_virt_to_bus(&lp->init_block), inw(ioaddr+LANCE_DATA)); + + return 0; /* Always succeed */ +} + +/* The LANCE has been halted for one reason or another (busmaster memory + arbitration error, Tx FIFO underflow, driver stopped it to reconfigure, + etc.). Modern LANCE variants always reload their ring-buffer + configuration when restarted, so we must reinitialize our ring + context before restarting. As part of this reinitialization, + find all packets still on the Tx ring and pretend that they had been + sent (in effect, drop the packets on the floor) - the higher-level + protocols will time out and retransmit. It'd be better to shuffle + these skbs to a temp list and then actually re-Tx them after + restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com +*/ + +static void +lance_purge_ring(struct net_device *dev) +{ + struct lance_private *lp = dev->ml_priv; + int i; + + /* Free all the skbuffs in the Rx and Tx queues. */ + for (i = 0; i < RX_RING_SIZE; i++) { + struct sk_buff *skb = lp->rx_skbuff[i]; + lp->rx_skbuff[i] = NULL; + lp->rx_ring[i].base = 0; /* Not owned by LANCE chip. */ + if (skb) + dev_kfree_skb_any(skb); + } + for (i = 0; i < TX_RING_SIZE; i++) { + if (lp->tx_skbuff[i]) { + dev_kfree_skb_any(lp->tx_skbuff[i]); + lp->tx_skbuff[i] = NULL; + } + } +} + + +/* Initialize the LANCE Rx and Tx rings. */ +static void +lance_init_ring(struct net_device *dev, gfp_t gfp) +{ + struct lance_private *lp = dev->ml_priv; + int i; + + lp->cur_rx = lp->cur_tx = 0; + lp->dirty_rx = lp->dirty_tx = 0; + + for (i = 0; i < RX_RING_SIZE; i++) { + struct sk_buff *skb; + void *rx_buff; + + skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp); + lp->rx_skbuff[i] = skb; + if (skb) { + skb->dev = dev; + rx_buff = skb->data; + } else + rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp); + if (rx_buff == NULL) + lp->rx_ring[i].base = 0; + else + lp->rx_ring[i].base = (u32)isa_virt_to_bus(rx_buff) | 0x80000000; + lp->rx_ring[i].buf_length = -PKT_BUF_SZ; + } + /* The Tx buffer address is filled in as needed, but we do need to clear + the upper ownership bit. */ + for (i = 0; i < TX_RING_SIZE; i++) { + lp->tx_skbuff[i] = NULL; + lp->tx_ring[i].base = 0; + } + + lp->init_block.mode = 0x0000; + for (i = 0; i < 6; i++) + lp->init_block.phys_addr[i] = dev->dev_addr[i]; + lp->init_block.filter[0] = 0x00000000; + lp->init_block.filter[1] = 0x00000000; + lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS; + lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS; +} + +static void +lance_restart(struct net_device *dev, unsigned int csr0_bits, int must_reinit) +{ + struct lance_private *lp = dev->ml_priv; + + if (must_reinit || + (chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) { + lance_purge_ring(dev); + lance_init_ring(dev, GFP_ATOMIC); + } + outw(0x0000, dev->base_addr + LANCE_ADDR); + outw(csr0_bits, dev->base_addr + LANCE_DATA); +} + + +static void lance_tx_timeout (struct net_device *dev) +{ + struct lance_private *lp = (struct lance_private *) dev->ml_priv; + int ioaddr = dev->base_addr; + + outw (0, ioaddr + LANCE_ADDR); + printk ("%s: transmit timed out, status %4.4x, resetting.\n", + dev->name, inw (ioaddr + LANCE_DATA)); + outw (0x0004, ioaddr + LANCE_DATA); + dev->stats.tx_errors++; +#ifndef final_version + if (lance_debug > 3) { + int i; + printk (" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.", + lp->dirty_tx, lp->cur_tx, netif_queue_stopped(dev) ? " (full)" : "", + lp->cur_rx); + for (i = 0; i < RX_RING_SIZE; i++) + printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ", + lp->rx_ring[i].base, -lp->rx_ring[i].buf_length, + lp->rx_ring[i].msg_length); + for (i = 0; i < TX_RING_SIZE; i++) + printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ", + lp->tx_ring[i].base, -lp->tx_ring[i].length, + lp->tx_ring[i].misc); + printk ("\n"); + } +#endif + lance_restart (dev, 0x0043, 1); + + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue (dev); +} + + +static netdev_tx_t lance_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct lance_private *lp = dev->ml_priv; + int ioaddr = dev->base_addr; + int entry; + unsigned long flags; + + spin_lock_irqsave(&lp->devlock, flags); + + if (lance_debug > 3) { + outw(0x0000, ioaddr+LANCE_ADDR); + printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name, + inw(ioaddr+LANCE_DATA)); + outw(0x0000, ioaddr+LANCE_DATA); + } + + /* Fill in a Tx ring entry */ + + /* Mask to ring buffer boundary. */ + entry = lp->cur_tx & TX_RING_MOD_MASK; + + /* Caution: the write order is important here, set the base address + with the "ownership" bits last. */ + + /* The old LANCE chips doesn't automatically pad buffers to min. size. */ + if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) { + if (skb->len < ETH_ZLEN) { + if (skb_padto(skb, ETH_ZLEN)) + goto out; + lp->tx_ring[entry].length = -ETH_ZLEN; + } + else + lp->tx_ring[entry].length = -skb->len; + } else + lp->tx_ring[entry].length = -skb->len; + + lp->tx_ring[entry].misc = 0x0000; + + dev->stats.tx_bytes += skb->len; + + /* If any part of this buffer is >16M we must copy it to a low-memory + buffer. */ + if ((u32)isa_virt_to_bus(skb->data) + skb->len > 0x01000000) { + if (lance_debug > 5) + printk("%s: bouncing a high-memory packet (%#x).\n", + dev->name, (u32)isa_virt_to_bus(skb->data)); + skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len); + lp->tx_ring[entry].base = + ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000; + dev_kfree_skb(skb); + } else { + lp->tx_skbuff[entry] = skb; + lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000; + } + lp->cur_tx++; + + /* Trigger an immediate send poll. */ + outw(0x0000, ioaddr+LANCE_ADDR); + outw(0x0048, ioaddr+LANCE_DATA); + + if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE) + netif_stop_queue(dev); + +out: + spin_unlock_irqrestore(&lp->devlock, flags); + return NETDEV_TX_OK; +} + +/* The LANCE interrupt handler. */ +static irqreturn_t lance_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct lance_private *lp; + int csr0, ioaddr, boguscnt=10; + int must_restart; + + ioaddr = dev->base_addr; + lp = dev->ml_priv; + + spin_lock (&lp->devlock); + + outw(0x00, dev->base_addr + LANCE_ADDR); + while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600 && + --boguscnt >= 0) { + /* Acknowledge all of the current interrupt sources ASAP. */ + outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA); + + must_restart = 0; + + if (lance_debug > 5) + printk("%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n", + dev->name, csr0, inw(dev->base_addr + LANCE_DATA)); + + if (csr0 & 0x0400) /* Rx interrupt */ + lance_rx(dev); + + if (csr0 & 0x0200) { /* Tx-done interrupt */ + int dirty_tx = lp->dirty_tx; + + while (dirty_tx < lp->cur_tx) { + int entry = dirty_tx & TX_RING_MOD_MASK; + int status = lp->tx_ring[entry].base; + + if (status < 0) + break; /* It still hasn't been Txed */ + + lp->tx_ring[entry].base = 0; + + if (status & 0x40000000) { + /* There was an major error, log it. */ + int err_status = lp->tx_ring[entry].misc; + dev->stats.tx_errors++; + if (err_status & 0x0400) + dev->stats.tx_aborted_errors++; + if (err_status & 0x0800) + dev->stats.tx_carrier_errors++; + if (err_status & 0x1000) + dev->stats.tx_window_errors++; + if (err_status & 0x4000) { + /* Ackk! On FIFO errors the Tx unit is turned off! */ + dev->stats.tx_fifo_errors++; + /* Remove this verbosity later! */ + printk("%s: Tx FIFO error! Status %4.4x.\n", + dev->name, csr0); + /* Restart the chip. */ + must_restart = 1; + } + } else { + if (status & 0x18000000) + dev->stats.collisions++; + dev->stats.tx_packets++; + } + + /* We must free the original skb if it's not a data-only copy + in the bounce buffer. */ + if (lp->tx_skbuff[entry]) { + dev_kfree_skb_irq(lp->tx_skbuff[entry]); + lp->tx_skbuff[entry] = NULL; + } + dirty_tx++; + } + +#ifndef final_version + if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) { + printk("out-of-sync dirty pointer, %d vs. %d, full=%s.\n", + dirty_tx, lp->cur_tx, + netif_queue_stopped(dev) ? "yes" : "no"); + dirty_tx += TX_RING_SIZE; + } +#endif + + /* if the ring is no longer full, accept more packets */ + if (netif_queue_stopped(dev) && + dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) + netif_wake_queue (dev); + + lp->dirty_tx = dirty_tx; + } + + /* Log misc errors. */ + if (csr0 & 0x4000) + dev->stats.tx_errors++; /* Tx babble. */ + if (csr0 & 0x1000) + dev->stats.rx_errors++; /* Missed a Rx frame. */ + if (csr0 & 0x0800) { + printk("%s: Bus master arbitration failure, status %4.4x.\n", + dev->name, csr0); + /* Restart the chip. */ + must_restart = 1; + } + + if (must_restart) { + /* stop the chip to clear the error condition, then restart */ + outw(0x0000, dev->base_addr + LANCE_ADDR); + outw(0x0004, dev->base_addr + LANCE_DATA); + lance_restart(dev, 0x0002, 0); + } + } + + /* Clear any other interrupt, and set interrupt enable. */ + outw(0x0000, dev->base_addr + LANCE_ADDR); + outw(0x7940, dev->base_addr + LANCE_DATA); + + if (lance_debug > 4) + printk("%s: exiting interrupt, csr%d=%#4.4x.\n", + dev->name, inw(ioaddr + LANCE_ADDR), + inw(dev->base_addr + LANCE_DATA)); + + spin_unlock (&lp->devlock); + return IRQ_HANDLED; +} + +static int +lance_rx(struct net_device *dev) +{ + struct lance_private *lp = dev->ml_priv; + int entry = lp->cur_rx & RX_RING_MOD_MASK; + int i; + + /* If we own the next entry, it's a new packet. Send it up. */ + while (lp->rx_ring[entry].base >= 0) { + int status = lp->rx_ring[entry].base >> 24; + + if (status != 0x03) { /* There was an error. */ + /* There is a tricky error noted by John Murphy, + to Russ Nelson: Even with full-sized + buffers it's possible for a jabber packet to use two + buffers, with only the last correctly noting the error. */ + if (status & 0x01) /* Only count a general error at the */ + dev->stats.rx_errors++; /* end of a packet.*/ + if (status & 0x20) + dev->stats.rx_frame_errors++; + if (status & 0x10) + dev->stats.rx_over_errors++; + if (status & 0x08) + dev->stats.rx_crc_errors++; + if (status & 0x04) + dev->stats.rx_fifo_errors++; + lp->rx_ring[entry].base &= 0x03ffffff; + } + else + { + /* Malloc up new buffer, compatible with net3. */ + short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4; + struct sk_buff *skb; + + if(pkt_len<60) + { + printk("%s: Runt packet!\n",dev->name); + dev->stats.rx_errors++; + } + else + { + skb = dev_alloc_skb(pkt_len+2); + if (skb == NULL) + { + printk("%s: Memory squeeze, deferring packet.\n", dev->name); + for (i=0; i < RX_RING_SIZE; i++) + if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0) + break; + + if (i > RX_RING_SIZE -2) + { + dev->stats.rx_dropped++; + lp->rx_ring[entry].base |= 0x80000000; + lp->cur_rx++; + } + break; + } + skb_reserve(skb,2); /* 16 byte align */ + skb_put(skb,pkt_len); /* Make room */ + skb_copy_to_linear_data(skb, + (unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)), + pkt_len); + skb->protocol=eth_type_trans(skb,dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + } + } + /* The docs say that the buffer length isn't touched, but Andrew Boyd + of QNX reports that some revs of the 79C965 clear it. */ + lp->rx_ring[entry].buf_length = -PKT_BUF_SZ; + lp->rx_ring[entry].base |= 0x80000000; + entry = (++lp->cur_rx) & RX_RING_MOD_MASK; + } + + /* We should check that at least two ring entries are free. If not, + we should free one and mark stats->rx_dropped++. */ + + return 0; +} + +static int +lance_close(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + struct lance_private *lp = dev->ml_priv; + + netif_stop_queue (dev); + + if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) { + outw(112, ioaddr+LANCE_ADDR); + dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA); + } + outw(0, ioaddr+LANCE_ADDR); + + if (lance_debug > 1) + printk("%s: Shutting down ethercard, status was %2.2x.\n", + dev->name, inw(ioaddr+LANCE_DATA)); + + /* We stop the LANCE here -- it occasionally polls + memory if we don't. */ + outw(0x0004, ioaddr+LANCE_DATA); + + if (dev->dma != 4) + { + unsigned long flags=claim_dma_lock(); + disable_dma(dev->dma); + release_dma_lock(flags); + } + free_irq(dev->irq, dev); + + lance_purge_ring(dev); + + return 0; +} + +static struct net_device_stats *lance_get_stats(struct net_device *dev) +{ + struct lance_private *lp = dev->ml_priv; + + if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) { + short ioaddr = dev->base_addr; + short saved_addr; + unsigned long flags; + + spin_lock_irqsave(&lp->devlock, flags); + saved_addr = inw(ioaddr+LANCE_ADDR); + outw(112, ioaddr+LANCE_ADDR); + dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA); + outw(saved_addr, ioaddr+LANCE_ADDR); + spin_unlock_irqrestore(&lp->devlock, flags); + } + + return &dev->stats; +} + +/* Set or clear the multicast filter for this adaptor. + */ + +static void set_multicast_list(struct net_device *dev) +{ + short ioaddr = dev->base_addr; + + outw(0, ioaddr+LANCE_ADDR); + outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance. */ + + if (dev->flags&IFF_PROMISC) { + outw(15, ioaddr+LANCE_ADDR); + outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */ + } else { + short multicast_table[4]; + int i; + int num_addrs=netdev_mc_count(dev); + if(dev->flags&IFF_ALLMULTI) + num_addrs=1; + /* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */ + memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table)); + for (i = 0; i < 4; i++) { + outw(8 + i, ioaddr+LANCE_ADDR); + outw(multicast_table[i], ioaddr+LANCE_DATA); + } + outw(15, ioaddr+LANCE_ADDR); + outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */ + } + + lance_restart(dev, 0x0142, 0); /* Resume normal operation */ + +} + diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c new file mode 100644 index 0000000..3a7ad84 --- /dev/null +++ b/drivers/net/ethernet/amd/mvme147.c @@ -0,0 +1,205 @@ +/* mvme147.c : the Linux/mvme147/lance ethernet driver + * + * Copyright (C) 05/1998 Peter Maydell + * Based on the Sun Lance driver and the NetBSD HP Lance driver + * Uses the generic 7990.c LANCE code. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +/* Used for the temporal inet entries and routing */ +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* We have 16834 bytes of RAM for the init block and buffers. This places + * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx + * buffers and 2 Tx buffers. + */ +#define LANCE_LOG_TX_BUFFERS 1 +#define LANCE_LOG_RX_BUFFERS 3 + +#include "7990.h" /* use generic LANCE code */ + +/* Our private data structure */ +struct m147lance_private { + struct lance_private lance; + unsigned long ram; +}; + +/* function prototypes... This is easy because all the grot is in the + * generic LANCE support. All we have to support is probing for boards, + * plus board-specific init, open and close actions. + * Oh, and we need to tell the generic code how to read and write LANCE registers... + */ +static int m147lance_open(struct net_device *dev); +static int m147lance_close(struct net_device *dev); +static void m147lance_writerap(struct lance_private *lp, unsigned short value); +static void m147lance_writerdp(struct lance_private *lp, unsigned short value); +static unsigned short m147lance_readrdp(struct lance_private *lp); + +typedef void (*writerap_t)(void *, unsigned short); +typedef void (*writerdp_t)(void *, unsigned short); +typedef unsigned short (*readrdp_t)(void *); + +static const struct net_device_ops lance_netdev_ops = { + .ndo_open = m147lance_open, + .ndo_stop = m147lance_close, + .ndo_start_xmit = lance_start_xmit, + .ndo_set_multicast_list = lance_set_multicast, + .ndo_tx_timeout = lance_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, +}; + +/* Initialise the one and only on-board 7990 */ +struct net_device * __init mvme147lance_probe(int unit) +{ + struct net_device *dev; + static int called; + static const char name[] = "MVME147 LANCE"; + struct m147lance_private *lp; + u_long *addr; + u_long address; + int err; + + if (!MACH_IS_MVME147 || called) + return ERR_PTR(-ENODEV); + called++; + + dev = alloc_etherdev(sizeof(struct m147lance_private)); + if (!dev) + return ERR_PTR(-ENOMEM); + + if (unit >= 0) + sprintf(dev->name, "eth%d", unit); + + /* Fill the dev fields */ + dev->base_addr = (unsigned long)MVME147_LANCE_BASE; + dev->netdev_ops = &lance_netdev_ops; + dev->dma = 0; + + addr=(u_long *)ETHERNET_ADDRESS; + address = *addr; + dev->dev_addr[0]=0x08; + dev->dev_addr[1]=0x00; + dev->dev_addr[2]=0x3e; + address=address>>8; + dev->dev_addr[5]=address&0xff; + address=address>>8; + dev->dev_addr[4]=address&0xff; + address=address>>8; + dev->dev_addr[3]=address&0xff; + + printk("%s: MVME147 at 0x%08lx, irq %d, " + "Hardware Address %pM\n", + dev->name, dev->base_addr, MVME147_LANCE_IRQ, + dev->dev_addr); + + lp = netdev_priv(dev); + lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 16K */ + if (!lp->ram) + { + printk("%s: No memory for LANCE buffers\n", dev->name); + free_netdev(dev); + return ERR_PTR(-ENOMEM); + } + + lp->lance.name = (char*)name; /* discards const, shut up gcc */ + lp->lance.base = dev->base_addr; + lp->lance.init_block = (struct lance_init_block *)(lp->ram); /* CPU addr */ + lp->lance.lance_init_block = (struct lance_init_block *)(lp->ram); /* LANCE addr of same RAM */ + lp->lance.busmaster_regval = LE_C3_BSWP; /* we're bigendian */ + lp->lance.irq = MVME147_LANCE_IRQ; + lp->lance.writerap = (writerap_t)m147lance_writerap; + lp->lance.writerdp = (writerdp_t)m147lance_writerdp; + lp->lance.readrdp = (readrdp_t)m147lance_readrdp; + lp->lance.lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS; + lp->lance.lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS; + lp->lance.rx_ring_mod_mask = RX_RING_MOD_MASK; + lp->lance.tx_ring_mod_mask = TX_RING_MOD_MASK; + + err = register_netdev(dev); + if (err) { + free_pages(lp->ram, 3); + free_netdev(dev); + return ERR_PTR(err); + } + + return dev; +} + +static void m147lance_writerap(struct lance_private *lp, unsigned short value) +{ + out_be16(lp->base + LANCE_RAP, value); +} + +static void m147lance_writerdp(struct lance_private *lp, unsigned short value) +{ + out_be16(lp->base + LANCE_RDP, value); +} + +static unsigned short m147lance_readrdp(struct lance_private *lp) +{ + return in_be16(lp->base + LANCE_RDP); +} + +static int m147lance_open(struct net_device *dev) +{ + int status; + + status = lance_open(dev); /* call generic lance open code */ + if (status) + return status; + /* enable interrupts at board level. */ + m147_pcc->lan_cntrl=0; /* clear the interrupts (if any) */ + m147_pcc->lan_cntrl=0x08 | 0x04; /* Enable irq 4 */ + + return 0; +} + +static int m147lance_close(struct net_device *dev) +{ + /* disable interrupts at boardlevel */ + m147_pcc->lan_cntrl=0x0; /* disable interrupts */ + lance_close(dev); + return 0; +} + +#ifdef MODULE +MODULE_LICENSE("GPL"); + +static struct net_device *dev_mvme147_lance; +int __init init_module(void) +{ + dev_mvme147_lance = mvme147lance_probe(-1); + if (IS_ERR(dev_mvme147_lance)) + return PTR_ERR(dev_mvme147_lance); + return 0; +} + +void __exit cleanup_module(void) +{ + struct m147lance_private *lp = netdev_priv(dev_mvme147_lance); + unregister_netdev(dev_mvme147_lance); + free_pages(lp->ram, 3); + free_netdev(dev_mvme147_lance); +} + +#endif /* MODULE */ diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c new file mode 100644 index 0000000..c75ae85 --- /dev/null +++ b/drivers/net/ethernet/amd/ni65.c @@ -0,0 +1,1254 @@ +/* + * ni6510 (am7990 'lance' chip) driver for Linux-net-3 + * BETAcode v0.71 (96/09/29) for 2.0.0 (or later) + * copyrights (c) 1994,1995,1996 by M.Hipp + * + * This driver can handle the old ni6510 board and the newer ni6510 + * EtherBlaster. (probably it also works with every full NE2100 + * compatible card) + * + * driver probes: io: 0x360,0x300,0x320,0x340 / dma: 3,5,6,7 + * + * This is an extension to the Linux operating system, and is covered by the + * same GNU General Public License that covers the Linux-kernel. + * + * comments/bugs/suggestions can be sent to: + * Michael Hipp + * email: hippm@informatik.uni-tuebingen.de + * + * sources: + * some things are from the 'ni6510-packet-driver for dos by Russ Nelson' + * and from the original drivers by D.Becker + * + * known problems: + * - on some PCI boards (including my own) the card/board/ISA-bridge has + * problems with bus master DMA. This results in lotsa overruns. + * It may help to '#define RCV_PARANOIA_CHECK' or try to #undef + * the XMT and RCV_VIA_SKB option .. this reduces driver performance. + * Or just play with your BIOS options to optimize ISA-DMA access. + * Maybe you also wanna play with the LOW_PERFORAMCE and MID_PERFORMANCE + * defines -> please report me your experience then + * - Harald reported for ASUS SP3G mainboards, that you should use + * the 'optimal settings' from the user's manual on page 3-12! + * + * credits: + * thanx to Jason Sullivan for sending me a ni6510 card! + * lot of debug runs with ASUS SP3G Boards (Intel Saturn) by Harald Koenig + * + * simple performance test: (486DX-33/Ni6510-EB receives from 486DX4-100/Ni6510-EB) + * average: FTP -> 8384421 bytes received in 8.5 seconds + * (no RCV_VIA_SKB,no XMT_VIA_SKB,PARANOIA_CHECK,4 XMIT BUFS, 8 RCV_BUFFS) + * peak: FTP -> 8384421 bytes received in 7.5 seconds + * (RCV_VIA_SKB,XMT_VIA_SKB,no PARANOIA_CHECK,1(!) XMIT BUF, 16 RCV BUFFS) + */ + +/* + * 99.Jun.8: added support for /proc/net/dev byte count for xosview (HK) + * 96.Sept.29: virt_to_bus stuff added for new memory modell + * 96.April.29: Added Harald Koenig's Patches (MH) + * 96.April.13: enhanced error handling .. more tests (MH) + * 96.April.5/6: a lot of performance tests. Got it stable now (hopefully) (MH) + * 96.April.1: (no joke ;) .. added EtherBlaster and Module support (MH) + * 96.Feb.19: fixed a few bugs .. cleanups .. tested for 1.3.66 (MH) + * hopefully no more 16MB limit + * + * 95.Nov.18: multicast tweaked (AC). + * + * 94.Aug.22: changes in xmit_intr (ack more than one xmitted-packet), ni65_send_packet (p->lock) (MH) + * + * 94.July.16: fixed bugs in recv_skb and skb-alloc stuff (MH) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "ni65.h" + +/* + * the current setting allows an acceptable performance + * for 'RCV_PARANOIA_CHECK' read the 'known problems' part in + * the header of this file + * 'invert' the defines for max. performance. This may cause DMA problems + * on some boards (e.g on my ASUS SP3G) + */ +#undef XMT_VIA_SKB +#undef RCV_VIA_SKB +#define RCV_PARANOIA_CHECK + +#define MID_PERFORMANCE + +#if defined( LOW_PERFORMANCE ) + static int isa0=7,isa1=7,csr80=0x0c10; +#elif defined( MID_PERFORMANCE ) + static int isa0=5,isa1=5,csr80=0x2810; +#else /* high performance */ + static int isa0=4,isa1=4,csr80=0x0017; +#endif + +/* + * a few card/vendor specific defines + */ +#define NI65_ID0 0x00 +#define NI65_ID1 0x55 +#define NI65_EB_ID0 0x52 +#define NI65_EB_ID1 0x44 +#define NE2100_ID0 0x57 +#define NE2100_ID1 0x57 + +#define PORT p->cmdr_addr + +/* + * buffer configuration + */ +#if 1 +#define RMDNUM 16 +#define RMDNUMMASK 0x80000000 +#else +#define RMDNUM 8 +#define RMDNUMMASK 0x60000000 /* log2(RMDNUM)<<29 */ +#endif + +#if 0 +#define TMDNUM 1 +#define TMDNUMMASK 0x00000000 +#else +#define TMDNUM 4 +#define TMDNUMMASK 0x40000000 /* log2(TMDNUM)<<29 */ +#endif + +/* slightly oversized */ +#define R_BUF_SIZE 1544 +#define T_BUF_SIZE 1544 + +/* + * lance register defines + */ +#define L_DATAREG 0x00 +#define L_ADDRREG 0x02 +#define L_RESET 0x04 +#define L_CONFIG 0x05 +#define L_BUSIF 0x06 + +/* + * to access the lance/am7990-regs, you have to write + * reg-number into L_ADDRREG, then you can access it using L_DATAREG + */ +#define CSR0 0x00 +#define CSR1 0x01 +#define CSR2 0x02 +#define CSR3 0x03 + +#define INIT_RING_BEFORE_START 0x1 +#define FULL_RESET_ON_ERROR 0x2 + +#if 0 +#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);inw(PORT+L_ADDRREG); \ + outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);} +#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_ADDRREG),\ + inw(PORT+L_DATAREG)) +#if 0 +#define writedatareg(val) {outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);} +#else +#define writedatareg(val) { writereg(val,CSR0); } +#endif +#else +#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);outw(val,PORT+L_DATAREG);} +#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_DATAREG)) +#define writedatareg(val) { writereg(val,CSR0); } +#endif + +static unsigned char ni_vendor[] = { 0x02,0x07,0x01 }; + +static struct card { + unsigned char id0,id1; + short id_offset; + short total_size; + short cmd_offset; + short addr_offset; + unsigned char *vendor_id; + char *cardname; + unsigned long config; +} cards[] = { + { + .id0 = NI65_ID0, + .id1 = NI65_ID1, + .id_offset = 0x0e, + .total_size = 0x10, + .cmd_offset = 0x0, + .addr_offset = 0x8, + .vendor_id = ni_vendor, + .cardname = "ni6510", + .config = 0x1, + }, + { + .id0 = NI65_EB_ID0, + .id1 = NI65_EB_ID1, + .id_offset = 0x0e, + .total_size = 0x18, + .cmd_offset = 0x10, + .addr_offset = 0x0, + .vendor_id = ni_vendor, + .cardname = "ni6510 EtherBlaster", + .config = 0x2, + }, + { + .id0 = NE2100_ID0, + .id1 = NE2100_ID1, + .id_offset = 0x0e, + .total_size = 0x18, + .cmd_offset = 0x10, + .addr_offset = 0x0, + .vendor_id = NULL, + .cardname = "generic NE2100", + .config = 0x0, + }, +}; +#define NUM_CARDS 3 + +struct priv +{ + struct rmd rmdhead[RMDNUM]; + struct tmd tmdhead[TMDNUM]; + struct init_block ib; + int rmdnum; + int tmdnum,tmdlast; +#ifdef RCV_VIA_SKB + struct sk_buff *recv_skb[RMDNUM]; +#else + void *recvbounce[RMDNUM]; +#endif +#ifdef XMT_VIA_SKB + struct sk_buff *tmd_skb[TMDNUM]; +#endif + void *tmdbounce[TMDNUM]; + int tmdbouncenum; + int lock,xmit_queued; + + void *self; + int cmdr_addr; + int cardno; + int features; + spinlock_t ring_lock; +}; + +static int ni65_probe1(struct net_device *dev,int); +static irqreturn_t ni65_interrupt(int irq, void * dev_id); +static void ni65_recv_intr(struct net_device *dev,int); +static void ni65_xmit_intr(struct net_device *dev,int); +static int ni65_open(struct net_device *dev); +static int ni65_lance_reinit(struct net_device *dev); +static void ni65_init_lance(struct priv *p,unsigned char*,int,int); +static netdev_tx_t ni65_send_packet(struct sk_buff *skb, + struct net_device *dev); +static void ni65_timeout(struct net_device *dev); +static int ni65_close(struct net_device *dev); +static int ni65_alloc_buffer(struct net_device *dev); +static void ni65_free_buffer(struct priv *p); +static void set_multicast_list(struct net_device *dev); + +static int irqtab[] __initdata = { 9,12,15,5 }; /* irq config-translate */ +static int dmatab[] __initdata = { 0,3,5,6,7 }; /* dma config-translate and autodetect */ + +static int debuglevel = 1; + +/* + * set 'performance' registers .. we must STOP lance for that + */ +static void ni65_set_performance(struct priv *p) +{ + writereg(CSR0_STOP | CSR0_CLRALL,CSR0); /* STOP */ + + if( !(cards[p->cardno].config & 0x02) ) + return; + + outw(80,PORT+L_ADDRREG); + if(inw(PORT+L_ADDRREG) != 80) + return; + + writereg( (csr80 & 0x3fff) ,80); /* FIFO watermarks */ + outw(0,PORT+L_ADDRREG); + outw((short)isa0,PORT+L_BUSIF); /* write ISA 0: DMA_R : isa0 * 50ns */ + outw(1,PORT+L_ADDRREG); + outw((short)isa1,PORT+L_BUSIF); /* write ISA 1: DMA_W : isa1 * 50ns */ + + outw(CSR0,PORT+L_ADDRREG); /* switch back to CSR0 */ +} + +/* + * open interface (up) + */ +static int ni65_open(struct net_device *dev) +{ + struct priv *p = dev->ml_priv; + int irqval = request_irq(dev->irq, ni65_interrupt,0, + cards[p->cardno].cardname,dev); + if (irqval) { + printk(KERN_ERR "%s: unable to get IRQ %d (irqval=%d).\n", + dev->name,dev->irq, irqval); + return -EAGAIN; + } + + if(ni65_lance_reinit(dev)) + { + netif_start_queue(dev); + return 0; + } + else + { + free_irq(dev->irq,dev); + return -EAGAIN; + } +} + +/* + * close interface (down) + */ +static int ni65_close(struct net_device *dev) +{ + struct priv *p = dev->ml_priv; + + netif_stop_queue(dev); + + outw(inw(PORT+L_RESET),PORT+L_RESET); /* that's the hard way */ + +#ifdef XMT_VIA_SKB + { + int i; + for(i=0;itmd_skb[i]) { + dev_kfree_skb(p->tmd_skb[i]); + p->tmd_skb[i] = NULL; + } + } + } +#endif + free_irq(dev->irq,dev); + return 0; +} + +static void cleanup_card(struct net_device *dev) +{ + struct priv *p = dev->ml_priv; + disable_dma(dev->dma); + free_dma(dev->dma); + release_region(dev->base_addr, cards[p->cardno].total_size); + ni65_free_buffer(p); +} + +/* set: io,irq,dma or set it when calling insmod */ +static int irq; +static int io; +static int dma; + +/* + * Probe The Card (not the lance-chip) + */ +struct net_device * __init ni65_probe(int unit) +{ + struct net_device *dev = alloc_etherdev(0); + static const int ports[] = { 0x360, 0x300, 0x320, 0x340, 0 }; + const int *port; + int err = 0; + + if (!dev) + return ERR_PTR(-ENOMEM); + + if (unit >= 0) { + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + irq = dev->irq; + dma = dev->dma; + } else { + dev->base_addr = io; + } + + if (dev->base_addr > 0x1ff) { /* Check a single specified location. */ + err = ni65_probe1(dev, dev->base_addr); + } else if (dev->base_addr > 0) { /* Don't probe at all. */ + err = -ENXIO; + } else { + for (port = ports; *port && ni65_probe1(dev, *port); port++) + ; + if (!*port) + err = -ENODEV; + } + if (err) + goto out; + + err = register_netdev(dev); + if (err) + goto out1; + return dev; +out1: + cleanup_card(dev); +out: + free_netdev(dev); + return ERR_PTR(err); +} + +static const struct net_device_ops ni65_netdev_ops = { + .ndo_open = ni65_open, + .ndo_stop = ni65_close, + .ndo_start_xmit = ni65_send_packet, + .ndo_tx_timeout = ni65_timeout, + .ndo_set_multicast_list = set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +/* + * this is the real card probe .. + */ +static int __init ni65_probe1(struct net_device *dev,int ioaddr) +{ + int i,j; + struct priv *p; + unsigned long flags; + + dev->irq = irq; + dev->dma = dma; + + for(i=0;i= 0) { + if(inb(ioaddr+cards[i].id_offset+0) != cards[i].id0 || + inb(ioaddr+cards[i].id_offset+1) != cards[i].id1) { + release_region(ioaddr, cards[i].total_size); + continue; + } + } + if(cards[i].vendor_id) { + for(j=0;j<3;j++) + if(inb(ioaddr+cards[i].addr_offset+j) != cards[i].vendor_id[j]) { + release_region(ioaddr, cards[i].total_size); + continue; + } + } + break; + } + if(i == NUM_CARDS) + return -ENODEV; + + for(j=0;j<6;j++) + dev->dev_addr[j] = inb(ioaddr+cards[i].addr_offset+j); + + if( (j=ni65_alloc_buffer(dev)) < 0) { + release_region(ioaddr, cards[i].total_size); + return j; + } + p = dev->ml_priv; + p->cmdr_addr = ioaddr + cards[i].cmd_offset; + p->cardno = i; + spin_lock_init(&p->ring_lock); + + printk(KERN_INFO "%s: %s found at %#3x, ", dev->name, cards[p->cardno].cardname , ioaddr); + + outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */ + if( (j=readreg(CSR0)) != 0x4) { + printk("failed.\n"); + printk(KERN_ERR "%s: Can't RESET card: %04x\n", dev->name, j); + ni65_free_buffer(p); + release_region(ioaddr, cards[p->cardno].total_size); + return -EAGAIN; + } + + outw(88,PORT+L_ADDRREG); + if(inw(PORT+L_ADDRREG) == 88) { + unsigned long v; + v = inw(PORT+L_DATAREG); + v <<= 16; + outw(89,PORT+L_ADDRREG); + v |= inw(PORT+L_DATAREG); + printk("Version %#08lx, ",v); + p->features = INIT_RING_BEFORE_START; + } + else { + printk("ancient LANCE, "); + p->features = 0x0; + } + + if(test_bit(0,&cards[i].config)) { + dev->irq = irqtab[(inw(ioaddr+L_CONFIG)>>2)&3]; + dev->dma = dmatab[inw(ioaddr+L_CONFIG)&3]; + printk("IRQ %d (from card), DMA %d (from card).\n",dev->irq,dev->dma); + } + else { + if(dev->dma == 0) { + /* 'stuck test' from lance.c */ + unsigned long dma_channels = + ((inb(DMA1_STAT_REG) >> 4) & 0x0f) + | (inb(DMA2_STAT_REG) & 0xf0); + for(i=1;i<5;i++) { + int dma = dmatab[i]; + if(test_bit(dma,&dma_channels) || request_dma(dma,"ni6510")) + continue; + + flags=claim_dma_lock(); + disable_dma(dma); + set_dma_mode(dma,DMA_MODE_CASCADE); + enable_dma(dma); + release_dma_lock(flags); + + ni65_init_lance(p,dev->dev_addr,0,0); /* trigger memory access */ + + flags=claim_dma_lock(); + disable_dma(dma); + free_dma(dma); + release_dma_lock(flags); + + if(readreg(CSR0) & CSR0_IDON) + break; + } + if(i == 5) { + printk("failed.\n"); + printk(KERN_ERR "%s: Can't detect DMA channel!\n", dev->name); + ni65_free_buffer(p); + release_region(ioaddr, cards[p->cardno].total_size); + return -EAGAIN; + } + dev->dma = dmatab[i]; + printk("DMA %d (autodetected), ",dev->dma); + } + else + printk("DMA %d (assigned), ",dev->dma); + + if(dev->irq < 2) + { + unsigned long irq_mask; + + ni65_init_lance(p,dev->dev_addr,0,0); + irq_mask = probe_irq_on(); + writereg(CSR0_INIT|CSR0_INEA,CSR0); /* trigger interrupt */ + msleep(20); + dev->irq = probe_irq_off(irq_mask); + if(!dev->irq) + { + printk("Failed to detect IRQ line!\n"); + ni65_free_buffer(p); + release_region(ioaddr, cards[p->cardno].total_size); + return -EAGAIN; + } + printk("IRQ %d (autodetected).\n",dev->irq); + } + else + printk("IRQ %d (assigned).\n",dev->irq); + } + + if(request_dma(dev->dma, cards[p->cardno].cardname ) != 0) + { + printk(KERN_ERR "%s: Can't request dma-channel %d\n",dev->name,(int) dev->dma); + ni65_free_buffer(p); + release_region(ioaddr, cards[p->cardno].total_size); + return -EAGAIN; + } + + dev->base_addr = ioaddr; + dev->netdev_ops = &ni65_netdev_ops; + dev->watchdog_timeo = HZ/2; + + return 0; /* everything is OK */ +} + +/* + * set lance register and trigger init + */ +static void ni65_init_lance(struct priv *p,unsigned char *daddr,int filter,int mode) +{ + int i; + u32 pib; + + writereg(CSR0_CLRALL|CSR0_STOP,CSR0); + + for(i=0;i<6;i++) + p->ib.eaddr[i] = daddr[i]; + + for(i=0;i<8;i++) + p->ib.filter[i] = filter; + p->ib.mode = mode; + + p->ib.trp = (u32) isa_virt_to_bus(p->tmdhead) | TMDNUMMASK; + p->ib.rrp = (u32) isa_virt_to_bus(p->rmdhead) | RMDNUMMASK; + writereg(0,CSR3); /* busmaster/no word-swap */ + pib = (u32) isa_virt_to_bus(&p->ib); + writereg(pib & 0xffff,CSR1); + writereg(pib >> 16,CSR2); + + writereg(CSR0_INIT,CSR0); /* this changes L_ADDRREG to CSR0 */ + + for(i=0;i<32;i++) + { + mdelay(4); + if(inw(PORT+L_DATAREG) & (CSR0_IDON | CSR0_MERR) ) + break; /* init ok ? */ + } +} + +/* + * allocate memory area and check the 16MB border + */ +static void *ni65_alloc_mem(struct net_device *dev,char *what,int size,int type) +{ + struct sk_buff *skb=NULL; + unsigned char *ptr; + void *ret; + + if(type) { + ret = skb = alloc_skb(2+16+size,GFP_KERNEL|GFP_DMA); + if(!skb) { + printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what); + return NULL; + } + skb_reserve(skb,2+16); + skb_put(skb,R_BUF_SIZE); /* grab the whole space .. (not necessary) */ + ptr = skb->data; + } + else { + ret = ptr = kmalloc(T_BUF_SIZE,GFP_KERNEL | GFP_DMA); + if(!ret) { + printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what); + return NULL; + } + } + if( (u32) virt_to_phys(ptr+size) > 0x1000000) { + printk(KERN_WARNING "%s: unable to allocate %s memory in lower 16MB!\n",dev->name,what); + if(type) + kfree_skb(skb); + else + kfree(ptr); + return NULL; + } + return ret; +} + +/* + * allocate all memory structures .. send/recv buffers etc ... + */ +static int ni65_alloc_buffer(struct net_device *dev) +{ + unsigned char *ptr; + struct priv *p; + int i; + + /* + * we need 8-aligned memory .. + */ + ptr = ni65_alloc_mem(dev,"BUFFER",sizeof(struct priv)+8,0); + if(!ptr) + return -ENOMEM; + + p = dev->ml_priv = (struct priv *) (((unsigned long) ptr + 7) & ~0x7); + memset((char *)p, 0, sizeof(struct priv)); + p->self = ptr; + + for(i=0;itmd_skb[i] = NULL; +#endif + p->tmdbounce[i] = ni65_alloc_mem(dev,"XMIT",T_BUF_SIZE,0); + if(!p->tmdbounce[i]) { + ni65_free_buffer(p); + return -ENOMEM; + } + } + + for(i=0;irecv_skb[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,1); + if(!p->recv_skb[i]) { + ni65_free_buffer(p); + return -ENOMEM; + } +#else + p->recvbounce[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,0); + if(!p->recvbounce[i]) { + ni65_free_buffer(p); + return -ENOMEM; + } +#endif + } + + return 0; /* everything is OK */ +} + +/* + * free buffers and private struct + */ +static void ni65_free_buffer(struct priv *p) +{ + int i; + + if(!p) + return; + + for(i=0;itmdbounce[i]); +#ifdef XMT_VIA_SKB + if(p->tmd_skb[i]) + dev_kfree_skb(p->tmd_skb[i]); +#endif + } + + for(i=0;irecv_skb[i]) + dev_kfree_skb(p->recv_skb[i]); +#else + kfree(p->recvbounce[i]); +#endif + } + kfree(p->self); +} + + +/* + * stop and (re)start lance .. e.g after an error + */ +static void ni65_stop_start(struct net_device *dev,struct priv *p) +{ + int csr0 = CSR0_INEA; + + writedatareg(CSR0_STOP); + + if(debuglevel > 1) + printk(KERN_DEBUG "ni65_stop_start\n"); + + if(p->features & INIT_RING_BEFORE_START) { + int i; +#ifdef XMT_VIA_SKB + struct sk_buff *skb_save[TMDNUM]; +#endif + unsigned long buffer[TMDNUM]; + short blen[TMDNUM]; + + if(p->xmit_queued) { + while(1) { + if((p->tmdhead[p->tmdlast].u.s.status & XMIT_OWN)) + break; + p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1); + if(p->tmdlast == p->tmdnum) + break; + } + } + + for(i=0;itmdhead + i; +#ifdef XMT_VIA_SKB + skb_save[i] = p->tmd_skb[i]; +#endif + buffer[i] = (u32) isa_bus_to_virt(tmdp->u.buffer); + blen[i] = tmdp->blen; + tmdp->u.s.status = 0x0; + } + + for(i=0;irmdhead + i; + rmdp->u.s.status = RCV_OWN; + } + p->tmdnum = p->xmit_queued = 0; + writedatareg(CSR0_STRT | csr0); + + for(i=0;itmdlast) & (TMDNUM-1); + p->tmdhead[i].u.buffer = (u32) isa_virt_to_bus((char *)buffer[num]); /* status is part of buffer field */ + p->tmdhead[i].blen = blen[num]; + if(p->tmdhead[i].u.s.status & XMIT_OWN) { + p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1); + p->xmit_queued = 1; + writedatareg(CSR0_TDMD | CSR0_INEA | csr0); + } +#ifdef XMT_VIA_SKB + p->tmd_skb[i] = skb_save[num]; +#endif + } + p->rmdnum = p->tmdlast = 0; + if(!p->lock) + if (p->tmdnum || !p->xmit_queued) + netif_wake_queue(dev); + dev->trans_start = jiffies; /* prevent tx timeout */ + } + else + writedatareg(CSR0_STRT | csr0); +} + +/* + * init lance (write init-values .. init-buffers) (open-helper) + */ +static int ni65_lance_reinit(struct net_device *dev) +{ + int i; + struct priv *p = dev->ml_priv; + unsigned long flags; + + p->lock = 0; + p->xmit_queued = 0; + + flags=claim_dma_lock(); + disable_dma(dev->dma); /* I've never worked with dma, but we do it like the packetdriver */ + set_dma_mode(dev->dma,DMA_MODE_CASCADE); + enable_dma(dev->dma); + release_dma_lock(flags); + + outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */ + if( (i=readreg(CSR0) ) != 0x4) + { + printk(KERN_ERR "%s: can't RESET %s card: %04x\n",dev->name, + cards[p->cardno].cardname,(int) i); + flags=claim_dma_lock(); + disable_dma(dev->dma); + release_dma_lock(flags); + return 0; + } + + p->rmdnum = p->tmdnum = p->tmdlast = p->tmdbouncenum = 0; + for(i=0;itmdhead + i; +#ifdef XMT_VIA_SKB + if(p->tmd_skb[i]) { + dev_kfree_skb(p->tmd_skb[i]); + p->tmd_skb[i] = NULL; + } +#endif + tmdp->u.buffer = 0x0; + tmdp->u.s.status = XMIT_START | XMIT_END; + tmdp->blen = tmdp->status2 = 0; + } + + for(i=0;irmdhead + i; +#ifdef RCV_VIA_SKB + rmdp->u.buffer = (u32) isa_virt_to_bus(p->recv_skb[i]->data); +#else + rmdp->u.buffer = (u32) isa_virt_to_bus(p->recvbounce[i]); +#endif + rmdp->blen = -(R_BUF_SIZE-8); + rmdp->mlen = 0; + rmdp->u.s.status = RCV_OWN; + } + + if(dev->flags & IFF_PROMISC) + ni65_init_lance(p,dev->dev_addr,0x00,M_PROM); + else if (netdev_mc_count(dev) || dev->flags & IFF_ALLMULTI) + ni65_init_lance(p,dev->dev_addr,0xff,0x0); + else + ni65_init_lance(p,dev->dev_addr,0x00,0x00); + + /* + * ni65_set_lance_mem() sets L_ADDRREG to CSR0 + * NOW, WE WILL NEVER CHANGE THE L_ADDRREG, CSR0 IS ALWAYS SELECTED + */ + + if(inw(PORT+L_DATAREG) & CSR0_IDON) { + ni65_set_performance(p); + /* init OK: start lance , enable interrupts */ + writedatareg(CSR0_CLRALL | CSR0_INEA | CSR0_STRT); + return 1; /* ->OK */ + } + printk(KERN_ERR "%s: can't init lance, status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG)); + flags=claim_dma_lock(); + disable_dma(dev->dma); + release_dma_lock(flags); + return 0; /* ->Error */ +} + +/* + * interrupt handler + */ +static irqreturn_t ni65_interrupt(int irq, void * dev_id) +{ + int csr0 = 0; + struct net_device *dev = dev_id; + struct priv *p; + int bcnt = 32; + + p = dev->ml_priv; + + spin_lock(&p->ring_lock); + + while(--bcnt) { + csr0 = inw(PORT+L_DATAREG); + +#if 0 + writedatareg( (csr0 & CSR0_CLRALL) ); /* ack interrupts, disable int. */ +#else + writedatareg( (csr0 & CSR0_CLRALL) | CSR0_INEA ); /* ack interrupts, interrupts enabled */ +#endif + + if(!(csr0 & (CSR0_ERR | CSR0_RINT | CSR0_TINT))) + break; + + if(csr0 & CSR0_RINT) /* RECV-int? */ + ni65_recv_intr(dev,csr0); + if(csr0 & CSR0_TINT) /* XMIT-int? */ + ni65_xmit_intr(dev,csr0); + + if(csr0 & CSR0_ERR) + { + if(debuglevel > 1) + printk(KERN_ERR "%s: general error: %04x.\n",dev->name,csr0); + if(csr0 & CSR0_BABL) + dev->stats.tx_errors++; + if(csr0 & CSR0_MISS) { + int i; + for(i=0;irmdhead[i].u.s.status); + printk("\n"); + dev->stats.rx_errors++; + } + if(csr0 & CSR0_MERR) { + if(debuglevel > 1) + printk(KERN_ERR "%s: Ooops .. memory error: %04x.\n",dev->name,csr0); + ni65_stop_start(dev,p); + } + } + } + +#ifdef RCV_PARANOIA_CHECK +{ + int j; + for(j=0;j0;i--) { + num2 = (p->rmdnum + i) & (RMDNUM-1); + if(!(p->rmdhead[num2].u.s.status & RCV_OWN)) + break; + } + + if(i) { + int k, num1; + for(k=0;krmdnum + k) & (RMDNUM-1); + if(!(p->rmdhead[num1].u.s.status & RCV_OWN)) + break; + } + if(!k) + break; + + if(debuglevel > 0) + { + char buf[256],*buf1; + buf1 = buf; + for(k=0;krmdhead[k].u.s.status)); /* & RCV_OWN) ); */ + buf1 += 3; + } + *buf1 = 0; + printk(KERN_ERR "%s: Ooops, receive ring corrupted %2d %2d | %s\n",dev->name,p->rmdnum,i,buf); + } + + p->rmdnum = num1; + ni65_recv_intr(dev,csr0); + if((p->rmdhead[num2].u.s.status & RCV_OWN)) + break; /* ok, we are 'in sync' again */ + } + else + break; + } +} +#endif + + if( (csr0 & (CSR0_RXON | CSR0_TXON)) != (CSR0_RXON | CSR0_TXON) ) { + printk(KERN_DEBUG "%s: RX or TX was offline -> restart\n",dev->name); + ni65_stop_start(dev,p); + } + else + writedatareg(CSR0_INEA); + + spin_unlock(&p->ring_lock); + return IRQ_HANDLED; +} + +/* + * We have received an Xmit-Interrupt .. + * send a new packet if necessary + */ +static void ni65_xmit_intr(struct net_device *dev,int csr0) +{ + struct priv *p = dev->ml_priv; + + while(p->xmit_queued) + { + struct tmd *tmdp = p->tmdhead + p->tmdlast; + int tmdstat = tmdp->u.s.status; + + if(tmdstat & XMIT_OWN) + break; + + if(tmdstat & XMIT_ERR) + { +#if 0 + if(tmdp->status2 & XMIT_TDRMASK && debuglevel > 3) + printk(KERN_ERR "%s: tdr-problems (e.g. no resistor)\n",dev->name); +#endif + /* checking some errors */ + if(tmdp->status2 & XMIT_RTRY) + dev->stats.tx_aborted_errors++; + if(tmdp->status2 & XMIT_LCAR) + dev->stats.tx_carrier_errors++; + if(tmdp->status2 & (XMIT_BUFF | XMIT_UFLO )) { + /* this stops the xmitter */ + dev->stats.tx_fifo_errors++; + if(debuglevel > 0) + printk(KERN_ERR "%s: Xmit FIFO/BUFF error\n",dev->name); + if(p->features & INIT_RING_BEFORE_START) { + tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END; /* test: resend this frame */ + ni65_stop_start(dev,p); + break; /* no more Xmit processing .. */ + } + else + ni65_stop_start(dev,p); + } + if(debuglevel > 2) + printk(KERN_ERR "%s: xmit-error: %04x %02x-%04x\n",dev->name,csr0,(int) tmdstat,(int) tmdp->status2); + if(!(csr0 & CSR0_BABL)) /* don't count errors twice */ + dev->stats.tx_errors++; + tmdp->status2 = 0; + } + else { + dev->stats.tx_bytes -= (short)(tmdp->blen); + dev->stats.tx_packets++; + } + +#ifdef XMT_VIA_SKB + if(p->tmd_skb[p->tmdlast]) { + dev_kfree_skb_irq(p->tmd_skb[p->tmdlast]); + p->tmd_skb[p->tmdlast] = NULL; + } +#endif + + p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1); + if(p->tmdlast == p->tmdnum) + p->xmit_queued = 0; + } + netif_wake_queue(dev); +} + +/* + * We have received a packet + */ +static void ni65_recv_intr(struct net_device *dev,int csr0) +{ + struct rmd *rmdp; + int rmdstat,len; + int cnt=0; + struct priv *p = dev->ml_priv; + + rmdp = p->rmdhead + p->rmdnum; + while(!( (rmdstat = rmdp->u.s.status) & RCV_OWN)) + { + cnt++; + if( (rmdstat & (RCV_START | RCV_END | RCV_ERR)) != (RCV_START | RCV_END) ) /* error or oversized? */ + { + if(!(rmdstat & RCV_ERR)) { + if(rmdstat & RCV_START) + { + dev->stats.rx_length_errors++; + printk(KERN_ERR "%s: recv, packet too long: %d\n",dev->name,rmdp->mlen & 0x0fff); + } + } + else { + if(debuglevel > 2) + printk(KERN_ERR "%s: receive-error: %04x, lance-status: %04x/%04x\n", + dev->name,(int) rmdstat,csr0,(int) inw(PORT+L_DATAREG) ); + if(rmdstat & RCV_FRAM) + dev->stats.rx_frame_errors++; + if(rmdstat & RCV_OFLO) + dev->stats.rx_over_errors++; + if(rmdstat & RCV_CRC) + dev->stats.rx_crc_errors++; + if(rmdstat & RCV_BUF_ERR) + dev->stats.rx_fifo_errors++; + } + if(!(csr0 & CSR0_MISS)) /* don't count errors twice */ + dev->stats.rx_errors++; + } + else if( (len = (rmdp->mlen & 0x0fff) - 4) >= 60) + { +#ifdef RCV_VIA_SKB + struct sk_buff *skb = alloc_skb(R_BUF_SIZE+2+16,GFP_ATOMIC); + if (skb) + skb_reserve(skb,16); +#else + struct sk_buff *skb = dev_alloc_skb(len+2); +#endif + if(skb) + { + skb_reserve(skb,2); +#ifdef RCV_VIA_SKB + if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) { + skb_put(skb,len); + skb_copy_to_linear_data(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len); + } + else { + struct sk_buff *skb1 = p->recv_skb[p->rmdnum]; + skb_put(skb,R_BUF_SIZE); + p->recv_skb[p->rmdnum] = skb; + rmdp->u.buffer = (u32) isa_virt_to_bus(skb->data); + skb = skb1; + skb_trim(skb,len); + } +#else + skb_put(skb,len); + skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len); +#endif + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + skb->protocol=eth_type_trans(skb,dev); + netif_rx(skb); + } + else + { + printk(KERN_ERR "%s: can't alloc new sk_buff\n",dev->name); + dev->stats.rx_dropped++; + } + } + else { + printk(KERN_INFO "%s: received runt packet\n",dev->name); + dev->stats.rx_errors++; + } + rmdp->blen = -(R_BUF_SIZE-8); + rmdp->mlen = 0; + rmdp->u.s.status = RCV_OWN; /* change owner */ + p->rmdnum = (p->rmdnum + 1) & (RMDNUM-1); + rmdp = p->rmdhead + p->rmdnum; + } +} + +/* + * kick xmitter .. + */ + +static void ni65_timeout(struct net_device *dev) +{ + int i; + struct priv *p = dev->ml_priv; + + printk(KERN_ERR "%s: xmitter timed out, try to restart!\n",dev->name); + for(i=0;itmdhead[i].u.s.status); + printk("\n"); + ni65_lance_reinit(dev); + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue(dev); +} + +/* + * Send a packet + */ + +static netdev_tx_t ni65_send_packet(struct sk_buff *skb, + struct net_device *dev) +{ + struct priv *p = dev->ml_priv; + + netif_stop_queue(dev); + + if (test_and_set_bit(0, (void*)&p->lock)) { + printk(KERN_ERR "%s: Queue was locked.\n", dev->name); + return NETDEV_TX_BUSY; + } + + { + short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; + struct tmd *tmdp; + unsigned long flags; + +#ifdef XMT_VIA_SKB + if( (unsigned long) (skb->data + skb->len) > 0x1000000) { +#endif + + skb_copy_from_linear_data(skb, p->tmdbounce[p->tmdbouncenum], + skb->len > T_BUF_SIZE ? T_BUF_SIZE : + skb->len); + if (len > skb->len) + memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len); + dev_kfree_skb (skb); + + spin_lock_irqsave(&p->ring_lock, flags); + tmdp = p->tmdhead + p->tmdnum; + tmdp->u.buffer = (u32) isa_virt_to_bus(p->tmdbounce[p->tmdbouncenum]); + p->tmdbouncenum = (p->tmdbouncenum + 1) & (TMDNUM - 1); + +#ifdef XMT_VIA_SKB + } + else { + spin_lock_irqsave(&p->ring_lock, flags); + + tmdp = p->tmdhead + p->tmdnum; + tmdp->u.buffer = (u32) isa_virt_to_bus(skb->data); + p->tmd_skb[p->tmdnum] = skb; + } +#endif + tmdp->blen = -len; + + tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END; + writedatareg(CSR0_TDMD | CSR0_INEA); /* enable xmit & interrupt */ + + p->xmit_queued = 1; + p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1); + + if(p->tmdnum != p->tmdlast) + netif_wake_queue(dev); + + p->lock = 0; + + spin_unlock_irqrestore(&p->ring_lock, flags); + } + + return NETDEV_TX_OK; +} + +static void set_multicast_list(struct net_device *dev) +{ + if(!ni65_lance_reinit(dev)) + printk(KERN_ERR "%s: Can't switch card into MC mode!\n",dev->name); + netif_wake_queue(dev); +} + +#ifdef MODULE +static struct net_device *dev_ni65; + +module_param(irq, int, 0); +module_param(io, int, 0); +module_param(dma, int, 0); +MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)"); +MODULE_PARM_DESC(io, "ni6510 I/O base address"); +MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)"); + +int __init init_module(void) +{ + dev_ni65 = ni65_probe(-1); + return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0; +} + +void __exit cleanup_module(void) +{ + unregister_netdev(dev_ni65); + cleanup_card(dev_ni65); + free_netdev(dev_ni65); +} +#endif /* MODULE */ + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/amd/ni65.h b/drivers/net/ethernet/amd/ni65.h new file mode 100644 index 0000000..e6217e3 --- /dev/null +++ b/drivers/net/ethernet/amd/ni65.h @@ -0,0 +1,121 @@ +/* am7990 (lance) definitions + * + * This is an extension to the Linux operating system, and is covered by + * same GNU General Public License that covers that work. + * + * Michael Hipp + * email: mhipp@student.uni-tuebingen.de + * + * sources: (mail me or ask archie if you need them) + * crynwr-packet-driver + */ + +/* + * Control and Status Register 0 (CSR0) bit definitions + * (R=Readable) (W=Writeable) (S=Set on write) (C-Clear on write) + * + */ + +#define CSR0_ERR 0x8000 /* Error summary (R) */ +#define CSR0_BABL 0x4000 /* Babble transmitter timeout error (RC) */ +#define CSR0_CERR 0x2000 /* Collision Error (RC) */ +#define CSR0_MISS 0x1000 /* Missed packet (RC) */ +#define CSR0_MERR 0x0800 /* Memory Error (RC) */ +#define CSR0_RINT 0x0400 /* Receiver Interrupt (RC) */ +#define CSR0_TINT 0x0200 /* Transmit Interrupt (RC) */ +#define CSR0_IDON 0x0100 /* Initialization Done (RC) */ +#define CSR0_INTR 0x0080 /* Interrupt Flag (R) */ +#define CSR0_INEA 0x0040 /* Interrupt Enable (RW) */ +#define CSR0_RXON 0x0020 /* Receiver on (R) */ +#define CSR0_TXON 0x0010 /* Transmitter on (R) */ +#define CSR0_TDMD 0x0008 /* Transmit Demand (RS) */ +#define CSR0_STOP 0x0004 /* Stop (RS) */ +#define CSR0_STRT 0x0002 /* Start (RS) */ +#define CSR0_INIT 0x0001 /* Initialize (RS) */ + +#define CSR0_CLRALL 0x7f00 /* mask for all clearable bits */ +/* + * Initialization Block Mode operation Bit Definitions. + */ + +#define M_PROM 0x8000 /* Promiscuous Mode */ +#define M_INTL 0x0040 /* Internal Loopback */ +#define M_DRTY 0x0020 /* Disable Retry */ +#define M_COLL 0x0010 /* Force Collision */ +#define M_DTCR 0x0008 /* Disable Transmit CRC) */ +#define M_LOOP 0x0004 /* Loopback */ +#define M_DTX 0x0002 /* Disable the Transmitter */ +#define M_DRX 0x0001 /* Disable the Receiver */ + + +/* + * Receive message descriptor bit definitions. + */ + +#define RCV_OWN 0x80 /* owner bit 0 = host, 1 = lance */ +#define RCV_ERR 0x40 /* Error Summary */ +#define RCV_FRAM 0x20 /* Framing Error */ +#define RCV_OFLO 0x10 /* Overflow Error */ +#define RCV_CRC 0x08 /* CRC Error */ +#define RCV_BUF_ERR 0x04 /* Buffer Error */ +#define RCV_START 0x02 /* Start of Packet */ +#define RCV_END 0x01 /* End of Packet */ + + +/* + * Transmit message descriptor bit definitions. + */ + +#define XMIT_OWN 0x80 /* owner bit 0 = host, 1 = lance */ +#define XMIT_ERR 0x40 /* Error Summary */ +#define XMIT_RETRY 0x10 /* more the 1 retry needed to Xmit */ +#define XMIT_1_RETRY 0x08 /* one retry needed to Xmit */ +#define XMIT_DEF 0x04 /* Deferred */ +#define XMIT_START 0x02 /* Start of Packet */ +#define XMIT_END 0x01 /* End of Packet */ + +/* + * transmit status (2) (valid if XMIT_ERR == 1) + */ + +#define XMIT_TDRMASK 0x03ff /* time-domain-reflectometer-value */ +#define XMIT_RTRY 0x0400 /* Failed after 16 retransmissions */ +#define XMIT_LCAR 0x0800 /* Loss of Carrier */ +#define XMIT_LCOL 0x1000 /* Late collision */ +#define XMIT_RESERV 0x2000 /* Reserved */ +#define XMIT_UFLO 0x4000 /* Underflow (late memory) */ +#define XMIT_BUFF 0x8000 /* Buffering error (no ENP) */ + +struct init_block { + unsigned short mode; + unsigned char eaddr[6]; + unsigned char filter[8]; + /* bit 29-31: number of rmd's (power of 2) */ + u32 rrp; /* receive ring pointer (align 8) */ + /* bit 29-31: number of tmd's (power of 2) */ + u32 trp; /* transmit ring pointer (align 8) */ +}; + +struct rmd { /* Receive Message Descriptor */ + union { + volatile u32 buffer; + struct { + volatile unsigned char dummy[3]; + volatile unsigned char status; + } s; + } u; + volatile short blen; + volatile unsigned short mlen; +}; + +struct tmd { + union { + volatile u32 buffer; + struct { + volatile unsigned char dummy[3]; + volatile unsigned char status; + } s; + } u; + volatile unsigned short blen; + volatile unsigned short status2; +}; diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c new file mode 100644 index 0000000..9d70b65 --- /dev/null +++ b/drivers/net/ethernet/amd/nmclan_cs.c @@ -0,0 +1,1525 @@ +/* ---------------------------------------------------------------------------- +Linux PCMCIA ethernet adapter driver for the New Media Ethernet LAN. + nmclan_cs.c,v 0.16 1995/07/01 06:42:17 rpao Exp rpao + + The Ethernet LAN uses the Advanced Micro Devices (AMD) Am79C940 Media + Access Controller for Ethernet (MACE). It is essentially the Am2150 + PCMCIA Ethernet card contained in the Am2150 Demo Kit. + +Written by Roger C. Pao + Copyright 1995 Roger C. Pao + Linux 2.5 cleanups Copyright Red Hat 2003 + + This software may be used and distributed according to the terms of + the GNU General Public License. + +Ported to Linux 1.3.* network driver environment by + Matti Aarnio + +References + + Am2150 Technical Reference Manual, Revision 1.0, August 17, 1993 + Am79C940 (MACE) Data Sheet, 1994 + Am79C90 (C-LANCE) Data Sheet, 1994 + Linux PCMCIA Programmer's Guide v1.17 + /usr/src/linux/net/inet/dev.c, Linux kernel 1.2.8 + + Eric Mears, New Media Corporation + Tom Pollard, New Media Corporation + Dean Siasoyco, New Media Corporation + Ken Lesniak, Silicon Graphics, Inc. + Donald Becker + David Hinds + + The Linux client driver is based on the 3c589_cs.c client driver by + David Hinds. + + The Linux network driver outline is based on the 3c589_cs.c driver, + the 8390.c driver, and the example skeleton.c kernel code, which are + by Donald Becker. + + The Am2150 network driver hardware interface code is based on the + OS/9000 driver for the New Media Ethernet LAN by Eric Mears. + + Special thanks for testing and help in debugging this driver goes + to Ken Lesniak. + +------------------------------------------------------------------------------- +Driver Notes and Issues +------------------------------------------------------------------------------- + +1. Developed on a Dell 320SLi + PCMCIA Card Services 2.6.2 + Linux dell 1.2.10 #1 Thu Jun 29 20:23:41 PDT 1995 i386 + +2. rc.pcmcia may require loading pcmcia_core with io_speed=300: + 'insmod pcmcia_core.o io_speed=300'. + This will avoid problems with fast systems which causes rx_framecnt + to return random values. + +3. If hot extraction does not work for you, use 'ifconfig eth0 down' + before extraction. + +4. There is a bad slow-down problem in this driver. + +5. Future: Multicast processing. In the meantime, do _not_ compile your + kernel with multicast ip enabled. + +------------------------------------------------------------------------------- +History +------------------------------------------------------------------------------- +Log: nmclan_cs.c,v + * 2.5.75-ac1 2003/07/11 Alan Cox + * Fixed hang on card eject as we probe it + * Cleaned up to use new style locking. + * + * Revision 0.16 1995/07/01 06:42:17 rpao + * Bug fix: nmclan_reset() called CardServices incorrectly. + * + * Revision 0.15 1995/05/24 08:09:47 rpao + * Re-implement MULTI_TX dev->tbusy handling. + * + * Revision 0.14 1995/05/23 03:19:30 rpao + * Added, in nmclan_config(), "tuple.Attributes = 0;". + * Modified MACE ID check to ignore chip revision level. + * Avoid tx_free_frames race condition between _start_xmit and _interrupt. + * + * Revision 0.13 1995/05/18 05:56:34 rpao + * Statistics changes. + * Bug fix: nmclan_reset did not enable TX and RX: call restore_multicast_list. + * Bug fix: mace_interrupt checks ~MACE_IMR_DEFAULT. Fixes driver lockup. + * + * Revision 0.12 1995/05/14 00:12:23 rpao + * Statistics overhaul. + * + +95/05/13 rpao V0.10a + Bug fix: MACE statistics counters used wrong I/O ports. + Bug fix: mace_interrupt() needed to allow statistics to be + processed without RX or TX interrupts pending. +95/05/11 rpao V0.10 + Multiple transmit request processing. + Modified statistics to use MACE counters where possible. +95/05/10 rpao V0.09 Bug fix: Must use IO_DATA_PATH_WIDTH_AUTO. + *Released +95/05/10 rpao V0.08 + Bug fix: Make all non-exported functions private by using + static keyword. + Bug fix: Test IntrCnt _before_ reading MACE_IR. +95/05/10 rpao V0.07 Statistics. +95/05/09 rpao V0.06 Fix rx_framecnt problem by addition of PCIC wait states. + +---------------------------------------------------------------------------- */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define DRV_NAME "nmclan_cs" +#define DRV_VERSION "0.16" + + +/* ---------------------------------------------------------------------------- +Conditional Compilation Options +---------------------------------------------------------------------------- */ + +#define MULTI_TX 0 +#define RESET_ON_TIMEOUT 1 +#define TX_INTERRUPTABLE 1 +#define RESET_XILINX 0 + +/* ---------------------------------------------------------------------------- +Include Files +---------------------------------------------------------------------------- */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +/* ---------------------------------------------------------------------------- +Defines +---------------------------------------------------------------------------- */ + +#define ETHER_ADDR_LEN ETH_ALEN + /* 6 bytes in an Ethernet Address */ +#define MACE_LADRF_LEN 8 + /* 8 bytes in Logical Address Filter */ + +/* Loop Control Defines */ +#define MACE_MAX_IR_ITERATIONS 10 +#define MACE_MAX_RX_ITERATIONS 12 + /* + TBD: Dean brought this up, and I assumed the hardware would + handle it: + + If MACE_MAX_RX_ITERATIONS is > 1, rx_framecnt may still be + non-zero when the isr exits. We may not get another interrupt + to process the remaining packets for some time. + */ + +/* +The Am2150 has a Xilinx XC3042 field programmable gate array (FPGA) +which manages the interface between the MACE and the PCMCIA bus. It +also includes buffer management for the 32K x 8 SRAM to control up to +four transmit and 12 receive frames at a time. +*/ +#define AM2150_MAX_TX_FRAMES 4 +#define AM2150_MAX_RX_FRAMES 12 + +/* Am2150 Ethernet Card I/O Mapping */ +#define AM2150_RCV 0x00 +#define AM2150_XMT 0x04 +#define AM2150_XMT_SKIP 0x09 +#define AM2150_RCV_NEXT 0x0A +#define AM2150_RCV_FRAME_COUNT 0x0B +#define AM2150_MACE_BANK 0x0C +#define AM2150_MACE_BASE 0x10 + +/* MACE Registers */ +#define MACE_RCVFIFO 0 +#define MACE_XMTFIFO 1 +#define MACE_XMTFC 2 +#define MACE_XMTFS 3 +#define MACE_XMTRC 4 +#define MACE_RCVFC 5 +#define MACE_RCVFS 6 +#define MACE_FIFOFC 7 +#define MACE_IR 8 +#define MACE_IMR 9 +#define MACE_PR 10 +#define MACE_BIUCC 11 +#define MACE_FIFOCC 12 +#define MACE_MACCC 13 +#define MACE_PLSCC 14 +#define MACE_PHYCC 15 +#define MACE_CHIPIDL 16 +#define MACE_CHIPIDH 17 +#define MACE_IAC 18 +/* Reserved */ +#define MACE_LADRF 20 +#define MACE_PADR 21 +/* Reserved */ +/* Reserved */ +#define MACE_MPC 24 +/* Reserved */ +#define MACE_RNTPC 26 +#define MACE_RCVCC 27 +/* Reserved */ +#define MACE_UTR 29 +#define MACE_RTR1 30 +#define MACE_RTR2 31 + +/* MACE Bit Masks */ +#define MACE_XMTRC_EXDEF 0x80 +#define MACE_XMTRC_XMTRC 0x0F + +#define MACE_XMTFS_XMTSV 0x80 +#define MACE_XMTFS_UFLO 0x40 +#define MACE_XMTFS_LCOL 0x20 +#define MACE_XMTFS_MORE 0x10 +#define MACE_XMTFS_ONE 0x08 +#define MACE_XMTFS_DEFER 0x04 +#define MACE_XMTFS_LCAR 0x02 +#define MACE_XMTFS_RTRY 0x01 + +#define MACE_RCVFS_RCVSTS 0xF000 +#define MACE_RCVFS_OFLO 0x8000 +#define MACE_RCVFS_CLSN 0x4000 +#define MACE_RCVFS_FRAM 0x2000 +#define MACE_RCVFS_FCS 0x1000 + +#define MACE_FIFOFC_RCVFC 0xF0 +#define MACE_FIFOFC_XMTFC 0x0F + +#define MACE_IR_JAB 0x80 +#define MACE_IR_BABL 0x40 +#define MACE_IR_CERR 0x20 +#define MACE_IR_RCVCCO 0x10 +#define MACE_IR_RNTPCO 0x08 +#define MACE_IR_MPCO 0x04 +#define MACE_IR_RCVINT 0x02 +#define MACE_IR_XMTINT 0x01 + +#define MACE_MACCC_PROM 0x80 +#define MACE_MACCC_DXMT2PD 0x40 +#define MACE_MACCC_EMBA 0x20 +#define MACE_MACCC_RESERVED 0x10 +#define MACE_MACCC_DRCVPA 0x08 +#define MACE_MACCC_DRCVBC 0x04 +#define MACE_MACCC_ENXMT 0x02 +#define MACE_MACCC_ENRCV 0x01 + +#define MACE_PHYCC_LNKFL 0x80 +#define MACE_PHYCC_DLNKTST 0x40 +#define MACE_PHYCC_REVPOL 0x20 +#define MACE_PHYCC_DAPC 0x10 +#define MACE_PHYCC_LRT 0x08 +#define MACE_PHYCC_ASEL 0x04 +#define MACE_PHYCC_RWAKE 0x02 +#define MACE_PHYCC_AWAKE 0x01 + +#define MACE_IAC_ADDRCHG 0x80 +#define MACE_IAC_PHYADDR 0x04 +#define MACE_IAC_LOGADDR 0x02 + +#define MACE_UTR_RTRE 0x80 +#define MACE_UTR_RTRD 0x40 +#define MACE_UTR_RPA 0x20 +#define MACE_UTR_FCOLL 0x10 +#define MACE_UTR_RCVFCSE 0x08 +#define MACE_UTR_LOOP_INCL_MENDEC 0x06 +#define MACE_UTR_LOOP_NO_MENDEC 0x04 +#define MACE_UTR_LOOP_EXTERNAL 0x02 +#define MACE_UTR_LOOP_NONE 0x00 +#define MACE_UTR_RESERVED 0x01 + +/* Switch MACE register bank (only 0 and 1 are valid) */ +#define MACEBANK(win_num) outb((win_num), ioaddr + AM2150_MACE_BANK) + +#define MACE_IMR_DEFAULT \ + (0xFF - \ + ( \ + MACE_IR_CERR | \ + MACE_IR_RCVCCO | \ + MACE_IR_RNTPCO | \ + MACE_IR_MPCO | \ + MACE_IR_RCVINT | \ + MACE_IR_XMTINT \ + ) \ + ) +#undef MACE_IMR_DEFAULT +#define MACE_IMR_DEFAULT 0x00 /* New statistics handling: grab everything */ + +#define TX_TIMEOUT ((400*HZ)/1000) + +/* ---------------------------------------------------------------------------- +Type Definitions +---------------------------------------------------------------------------- */ + +typedef struct _mace_statistics { + /* MACE_XMTFS */ + int xmtsv; + int uflo; + int lcol; + int more; + int one; + int defer; + int lcar; + int rtry; + + /* MACE_XMTRC */ + int exdef; + int xmtrc; + + /* RFS1--Receive Status (RCVSTS) */ + int oflo; + int clsn; + int fram; + int fcs; + + /* RFS2--Runt Packet Count (RNTPC) */ + int rfs_rntpc; + + /* RFS3--Receive Collision Count (RCVCC) */ + int rfs_rcvcc; + + /* MACE_IR */ + int jab; + int babl; + int cerr; + int rcvcco; + int rntpco; + int mpco; + + /* MACE_MPC */ + int mpc; + + /* MACE_RNTPC */ + int rntpc; + + /* MACE_RCVCC */ + int rcvcc; +} mace_statistics; + +typedef struct _mace_private { + struct pcmcia_device *p_dev; + struct net_device_stats linux_stats; /* Linux statistics counters */ + mace_statistics mace_stats; /* MACE chip statistics counters */ + + /* restore_multicast_list() state variables */ + int multicast_ladrf[MACE_LADRF_LEN]; /* Logical address filter */ + int multicast_num_addrs; + + char tx_free_frames; /* Number of free transmit frame buffers */ + char tx_irq_disabled; /* MACE TX interrupt disabled */ + + spinlock_t bank_lock; /* Must be held if you step off bank 0 */ +} mace_private; + +/* ---------------------------------------------------------------------------- +Private Global Variables +---------------------------------------------------------------------------- */ + +static const char *if_names[]={ + "Auto", "10baseT", "BNC", +}; + +/* ---------------------------------------------------------------------------- +Parameters + These are the parameters that can be set during loading with + 'insmod'. +---------------------------------------------------------------------------- */ + +MODULE_DESCRIPTION("New Media PCMCIA ethernet driver"); +MODULE_LICENSE("GPL"); + +#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) + +/* 0=auto, 1=10baseT, 2 = 10base2, default=auto */ +INT_MODULE_PARM(if_port, 0); + + +/* ---------------------------------------------------------------------------- +Function Prototypes +---------------------------------------------------------------------------- */ + +static int nmclan_config(struct pcmcia_device *link); +static void nmclan_release(struct pcmcia_device *link); + +static void nmclan_reset(struct net_device *dev); +static int mace_config(struct net_device *dev, struct ifmap *map); +static int mace_open(struct net_device *dev); +static int mace_close(struct net_device *dev); +static netdev_tx_t mace_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static void mace_tx_timeout(struct net_device *dev); +static irqreturn_t mace_interrupt(int irq, void *dev_id); +static struct net_device_stats *mace_get_stats(struct net_device *dev); +static int mace_rx(struct net_device *dev, unsigned char RxCnt); +static void restore_multicast_list(struct net_device *dev); +static void set_multicast_list(struct net_device *dev); +static const struct ethtool_ops netdev_ethtool_ops; + + +static void nmclan_detach(struct pcmcia_device *p_dev); + +static const struct net_device_ops mace_netdev_ops = { + .ndo_open = mace_open, + .ndo_stop = mace_close, + .ndo_start_xmit = mace_start_xmit, + .ndo_tx_timeout = mace_tx_timeout, + .ndo_set_config = mace_config, + .ndo_get_stats = mace_get_stats, + .ndo_set_multicast_list = set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int nmclan_probe(struct pcmcia_device *link) +{ + mace_private *lp; + struct net_device *dev; + + dev_dbg(&link->dev, "nmclan_attach()\n"); + + /* Create new ethernet device */ + dev = alloc_etherdev(sizeof(mace_private)); + if (!dev) + return -ENOMEM; + lp = netdev_priv(dev); + lp->p_dev = link; + link->priv = dev; + + spin_lock_init(&lp->bank_lock); + link->resource[0]->end = 32; + link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; + link->config_flags |= CONF_ENABLE_IRQ; + link->config_index = 1; + link->config_regs = PRESENT_OPTION; + + lp->tx_free_frames=AM2150_MAX_TX_FRAMES; + + dev->netdev_ops = &mace_netdev_ops; + SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); + dev->watchdog_timeo = TX_TIMEOUT; + + return nmclan_config(link); +} /* nmclan_attach */ + +static void nmclan_detach(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + dev_dbg(&link->dev, "nmclan_detach\n"); + + unregister_netdev(dev); + + nmclan_release(link); + + free_netdev(dev); +} /* nmclan_detach */ + +/* ---------------------------------------------------------------------------- +mace_read + Reads a MACE register. This is bank independent; however, the + caller must ensure that this call is not interruptable. We are + assuming that during normal operation, the MACE is always in + bank 0. +---------------------------------------------------------------------------- */ +static int mace_read(mace_private *lp, unsigned int ioaddr, int reg) +{ + int data = 0xFF; + unsigned long flags; + + switch (reg >> 4) { + case 0: /* register 0-15 */ + data = inb(ioaddr + AM2150_MACE_BASE + reg); + break; + case 1: /* register 16-31 */ + spin_lock_irqsave(&lp->bank_lock, flags); + MACEBANK(1); + data = inb(ioaddr + AM2150_MACE_BASE + (reg & 0x0F)); + MACEBANK(0); + spin_unlock_irqrestore(&lp->bank_lock, flags); + break; + } + return data & 0xFF; +} /* mace_read */ + +/* ---------------------------------------------------------------------------- +mace_write + Writes to a MACE register. This is bank independent; however, + the caller must ensure that this call is not interruptable. We + are assuming that during normal operation, the MACE is always in + bank 0. +---------------------------------------------------------------------------- */ +static void mace_write(mace_private *lp, unsigned int ioaddr, int reg, + int data) +{ + unsigned long flags; + + switch (reg >> 4) { + case 0: /* register 0-15 */ + outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + reg); + break; + case 1: /* register 16-31 */ + spin_lock_irqsave(&lp->bank_lock, flags); + MACEBANK(1); + outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + (reg & 0x0F)); + MACEBANK(0); + spin_unlock_irqrestore(&lp->bank_lock, flags); + break; + } +} /* mace_write */ + +/* ---------------------------------------------------------------------------- +mace_init + Resets the MACE chip. +---------------------------------------------------------------------------- */ +static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr) +{ + int i; + int ct = 0; + + /* MACE Software reset */ + mace_write(lp, ioaddr, MACE_BIUCC, 1); + while (mace_read(lp, ioaddr, MACE_BIUCC) & 0x01) { + /* Wait for reset bit to be cleared automatically after <= 200ns */; + if(++ct > 500) + { + pr_err("reset failed, card removed?\n"); + return -1; + } + udelay(1); + } + mace_write(lp, ioaddr, MACE_BIUCC, 0); + + /* The Am2150 requires that the MACE FIFOs operate in burst mode. */ + mace_write(lp, ioaddr, MACE_FIFOCC, 0x0F); + + mace_write(lp,ioaddr, MACE_RCVFC, 0); /* Disable Auto Strip Receive */ + mace_write(lp, ioaddr, MACE_IMR, 0xFF); /* Disable all interrupts until _open */ + + /* + * Bit 2-1 PORTSEL[1-0] Port Select. + * 00 AUI/10Base-2 + * 01 10Base-T + * 10 DAI Port (reserved in Am2150) + * 11 GPSI + * For this card, only the first two are valid. + * So, PLSCC should be set to + * 0x00 for 10Base-2 + * 0x02 for 10Base-T + * Or just set ASEL in PHYCC below! + */ + switch (if_port) { + case 1: + mace_write(lp, ioaddr, MACE_PLSCC, 0x02); + break; + case 2: + mace_write(lp, ioaddr, MACE_PLSCC, 0x00); + break; + default: + mace_write(lp, ioaddr, MACE_PHYCC, /* ASEL */ 4); + /* ASEL Auto Select. When set, the PORTSEL[1-0] bits are overridden, + and the MACE device will automatically select the operating media + interface port. */ + break; + } + + mace_write(lp, ioaddr, MACE_IAC, MACE_IAC_ADDRCHG | MACE_IAC_PHYADDR); + /* Poll ADDRCHG bit */ + ct = 0; + while (mace_read(lp, ioaddr, MACE_IAC) & MACE_IAC_ADDRCHG) + { + if(++ ct > 500) + { + pr_err("ADDRCHG timeout, card removed?\n"); + return -1; + } + } + /* Set PADR register */ + for (i = 0; i < ETHER_ADDR_LEN; i++) + mace_write(lp, ioaddr, MACE_PADR, enet_addr[i]); + + /* MAC Configuration Control Register should be written last */ + /* Let set_multicast_list set this. */ + /* mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV); */ + mace_write(lp, ioaddr, MACE_MACCC, 0x00); + return 0; +} /* mace_init */ + +static int nmclan_config(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + mace_private *lp = netdev_priv(dev); + u8 *buf; + size_t len; + int i, ret; + unsigned int ioaddr; + + dev_dbg(&link->dev, "nmclan_config\n"); + + link->io_lines = 5; + ret = pcmcia_request_io(link); + if (ret) + goto failed; + ret = pcmcia_request_exclusive_irq(link, mace_interrupt); + if (ret) + goto failed; + ret = pcmcia_enable_device(link); + if (ret) + goto failed; + + dev->irq = link->irq; + dev->base_addr = link->resource[0]->start; + + ioaddr = dev->base_addr; + + /* Read the ethernet address from the CIS. */ + len = pcmcia_get_tuple(link, 0x80, &buf); + if (!buf || len < ETHER_ADDR_LEN) { + kfree(buf); + goto failed; + } + memcpy(dev->dev_addr, buf, ETHER_ADDR_LEN); + kfree(buf); + + /* Verify configuration by reading the MACE ID. */ + { + char sig[2]; + + sig[0] = mace_read(lp, ioaddr, MACE_CHIPIDL); + sig[1] = mace_read(lp, ioaddr, MACE_CHIPIDH); + if ((sig[0] == 0x40) && ((sig[1] & 0x0F) == 0x09)) { + dev_dbg(&link->dev, "nmclan_cs configured: mace id=%x %x\n", + sig[0], sig[1]); + } else { + pr_notice("mace id not found: %x %x should be 0x40 0x?9\n", + sig[0], sig[1]); + return -ENODEV; + } + } + + if(mace_init(lp, ioaddr, dev->dev_addr) == -1) + goto failed; + + /* The if_port symbol can be set when the module is loaded */ + if (if_port <= 2) + dev->if_port = if_port; + else + pr_notice("invalid if_port requested\n"); + + SET_NETDEV_DEV(dev, &link->dev); + + i = register_netdev(dev); + if (i != 0) { + pr_notice("register_netdev() failed\n"); + goto failed; + } + + netdev_info(dev, "nmclan: port %#3lx, irq %d, %s port, hw_addr %pM\n", + dev->base_addr, dev->irq, if_names[dev->if_port], dev->dev_addr); + return 0; + +failed: + nmclan_release(link); + return -ENODEV; +} /* nmclan_config */ + +static void nmclan_release(struct pcmcia_device *link) +{ + dev_dbg(&link->dev, "nmclan_release\n"); + pcmcia_disable_device(link); +} + +static int nmclan_suspend(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + if (link->open) + netif_device_detach(dev); + + return 0; +} + +static int nmclan_resume(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + if (link->open) { + nmclan_reset(dev); + netif_device_attach(dev); + } + + return 0; +} + + +/* ---------------------------------------------------------------------------- +nmclan_reset + Reset and restore all of the Xilinx and MACE registers. +---------------------------------------------------------------------------- */ +static void nmclan_reset(struct net_device *dev) +{ + mace_private *lp = netdev_priv(dev); + +#if RESET_XILINX + struct pcmcia_device *link = &lp->link; + u8 OrigCorValue; + + /* Save original COR value */ + pcmcia_read_config_byte(link, CISREG_COR, &OrigCorValue); + + /* Reset Xilinx */ + dev_dbg(&link->dev, "nmclan_reset: OrigCorValue=0x%x, resetting...\n", + OrigCorValue); + pcmcia_write_config_byte(link, CISREG_COR, COR_SOFT_RESET); + /* Need to wait for 20 ms for PCMCIA to finish reset. */ + + /* Restore original COR configuration index */ + pcmcia_write_config_byte(link, CISREG_COR, + (COR_LEVEL_REQ | (OrigCorValue & COR_CONFIG_MASK))); + /* Xilinx is now completely reset along with the MACE chip. */ + lp->tx_free_frames=AM2150_MAX_TX_FRAMES; + +#endif /* #if RESET_XILINX */ + + /* Xilinx is now completely reset along with the MACE chip. */ + lp->tx_free_frames=AM2150_MAX_TX_FRAMES; + + /* Reinitialize the MACE chip for operation. */ + mace_init(lp, dev->base_addr, dev->dev_addr); + mace_write(lp, dev->base_addr, MACE_IMR, MACE_IMR_DEFAULT); + + /* Restore the multicast list and enable TX and RX. */ + restore_multicast_list(dev); +} /* nmclan_reset */ + +/* ---------------------------------------------------------------------------- +mace_config + [Someone tell me what this is supposed to do? Is if_port a defined + standard? If so, there should be defines to indicate 1=10Base-T, + 2=10Base-2, etc. including limited automatic detection.] +---------------------------------------------------------------------------- */ +static int mace_config(struct net_device *dev, struct ifmap *map) +{ + if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { + if (map->port <= 2) { + dev->if_port = map->port; + netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]); + } else + return -EINVAL; + } + return 0; +} /* mace_config */ + +/* ---------------------------------------------------------------------------- +mace_open + Open device driver. +---------------------------------------------------------------------------- */ +static int mace_open(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + mace_private *lp = netdev_priv(dev); + struct pcmcia_device *link = lp->p_dev; + + if (!pcmcia_dev_present(link)) + return -ENODEV; + + link->open++; + + MACEBANK(0); + + netif_start_queue(dev); + nmclan_reset(dev); + + return 0; /* Always succeed */ +} /* mace_open */ + +/* ---------------------------------------------------------------------------- +mace_close + Closes device driver. +---------------------------------------------------------------------------- */ +static int mace_close(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + mace_private *lp = netdev_priv(dev); + struct pcmcia_device *link = lp->p_dev; + + dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name); + + /* Mask off all interrupts from the MACE chip. */ + outb(0xFF, ioaddr + AM2150_MACE_BASE + MACE_IMR); + + link->open--; + netif_stop_queue(dev); + + return 0; +} /* mace_close */ + +static void netdev_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr); +} + +static const struct ethtool_ops netdev_ethtool_ops = { + .get_drvinfo = netdev_get_drvinfo, +}; + +/* ---------------------------------------------------------------------------- +mace_start_xmit + This routine begins the packet transmit function. When completed, + it will generate a transmit interrupt. + + According to /usr/src/linux/net/inet/dev.c, if _start_xmit + returns 0, the "packet is now solely the responsibility of the + driver." If _start_xmit returns non-zero, the "transmission + failed, put skb back into a list." +---------------------------------------------------------------------------- */ + +static void mace_tx_timeout(struct net_device *dev) +{ + mace_private *lp = netdev_priv(dev); + struct pcmcia_device *link = lp->p_dev; + + netdev_notice(dev, "transmit timed out -- "); +#if RESET_ON_TIMEOUT + pr_cont("resetting card\n"); + pcmcia_reset_card(link->socket); +#else /* #if RESET_ON_TIMEOUT */ + pr_cont("NOT resetting card\n"); +#endif /* #if RESET_ON_TIMEOUT */ + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue(dev); +} + +static netdev_tx_t mace_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + mace_private *lp = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + + netif_stop_queue(dev); + + pr_debug("%s: mace_start_xmit(length = %ld) called.\n", + dev->name, (long)skb->len); + +#if (!TX_INTERRUPTABLE) + /* Disable MACE TX interrupts. */ + outb(MACE_IMR_DEFAULT | MACE_IR_XMTINT, + ioaddr + AM2150_MACE_BASE + MACE_IMR); + lp->tx_irq_disabled=1; +#endif /* #if (!TX_INTERRUPTABLE) */ + + { + /* This block must not be interrupted by another transmit request! + mace_tx_timeout will take care of timer-based retransmissions from + the upper layers. The interrupt handler is guaranteed never to + service a transmit interrupt while we are in here. + */ + + lp->linux_stats.tx_bytes += skb->len; + lp->tx_free_frames--; + + /* WARNING: Write the _exact_ number of bytes written in the header! */ + /* Put out the word header [must be an outw()] . . . */ + outw(skb->len, ioaddr + AM2150_XMT); + /* . . . and the packet [may be any combination of outw() and outb()] */ + outsw(ioaddr + AM2150_XMT, skb->data, skb->len >> 1); + if (skb->len & 1) { + /* Odd byte transfer */ + outb(skb->data[skb->len-1], ioaddr + AM2150_XMT); + } + +#if MULTI_TX + if (lp->tx_free_frames > 0) + netif_start_queue(dev); +#endif /* #if MULTI_TX */ + } + +#if (!TX_INTERRUPTABLE) + /* Re-enable MACE TX interrupts. */ + lp->tx_irq_disabled=0; + outb(MACE_IMR_DEFAULT, ioaddr + AM2150_MACE_BASE + MACE_IMR); +#endif /* #if (!TX_INTERRUPTABLE) */ + + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} /* mace_start_xmit */ + +/* ---------------------------------------------------------------------------- +mace_interrupt + The interrupt handler. +---------------------------------------------------------------------------- */ +static irqreturn_t mace_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *) dev_id; + mace_private *lp = netdev_priv(dev); + unsigned int ioaddr; + int status; + int IntrCnt = MACE_MAX_IR_ITERATIONS; + + if (dev == NULL) { + pr_debug("mace_interrupt(): irq 0x%X for unknown device.\n", + irq); + return IRQ_NONE; + } + + ioaddr = dev->base_addr; + + if (lp->tx_irq_disabled) { + const char *msg; + if (lp->tx_irq_disabled) + msg = "Interrupt with tx_irq_disabled"; + else + msg = "Re-entering the interrupt handler"; + netdev_notice(dev, "%s [isr=%02X, imr=%02X]\n", + msg, + inb(ioaddr + AM2150_MACE_BASE + MACE_IR), + inb(ioaddr + AM2150_MACE_BASE + MACE_IMR)); + /* WARNING: MACE_IR has been read! */ + return IRQ_NONE; + } + + if (!netif_device_present(dev)) { + netdev_dbg(dev, "interrupt from dead card\n"); + return IRQ_NONE; + } + + do { + /* WARNING: MACE_IR is a READ/CLEAR port! */ + status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR); + + pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status); + + if (status & MACE_IR_RCVINT) { + mace_rx(dev, MACE_MAX_RX_ITERATIONS); + } + + if (status & MACE_IR_XMTINT) { + unsigned char fifofc; + unsigned char xmtrc; + unsigned char xmtfs; + + fifofc = inb(ioaddr + AM2150_MACE_BASE + MACE_FIFOFC); + if ((fifofc & MACE_FIFOFC_XMTFC)==0) { + lp->linux_stats.tx_errors++; + outb(0xFF, ioaddr + AM2150_XMT_SKIP); + } + + /* Transmit Retry Count (XMTRC, reg 4) */ + xmtrc = inb(ioaddr + AM2150_MACE_BASE + MACE_XMTRC); + if (xmtrc & MACE_XMTRC_EXDEF) lp->mace_stats.exdef++; + lp->mace_stats.xmtrc += (xmtrc & MACE_XMTRC_XMTRC); + + if ( + (xmtfs = inb(ioaddr + AM2150_MACE_BASE + MACE_XMTFS)) & + MACE_XMTFS_XMTSV /* Transmit Status Valid */ + ) { + lp->mace_stats.xmtsv++; + + if (xmtfs & ~MACE_XMTFS_XMTSV) { + if (xmtfs & MACE_XMTFS_UFLO) { + /* Underflow. Indicates that the Transmit FIFO emptied before + the end of frame was reached. */ + lp->mace_stats.uflo++; + } + if (xmtfs & MACE_XMTFS_LCOL) { + /* Late Collision */ + lp->mace_stats.lcol++; + } + if (xmtfs & MACE_XMTFS_MORE) { + /* MORE than one retry was needed */ + lp->mace_stats.more++; + } + if (xmtfs & MACE_XMTFS_ONE) { + /* Exactly ONE retry occurred */ + lp->mace_stats.one++; + } + if (xmtfs & MACE_XMTFS_DEFER) { + /* Transmission was defered */ + lp->mace_stats.defer++; + } + if (xmtfs & MACE_XMTFS_LCAR) { + /* Loss of carrier */ + lp->mace_stats.lcar++; + } + if (xmtfs & MACE_XMTFS_RTRY) { + /* Retry error: transmit aborted after 16 attempts */ + lp->mace_stats.rtry++; + } + } /* if (xmtfs & ~MACE_XMTFS_XMTSV) */ + + } /* if (xmtfs & MACE_XMTFS_XMTSV) */ + + lp->linux_stats.tx_packets++; + lp->tx_free_frames++; + netif_wake_queue(dev); + } /* if (status & MACE_IR_XMTINT) */ + + if (status & ~MACE_IMR_DEFAULT & ~MACE_IR_RCVINT & ~MACE_IR_XMTINT) { + if (status & MACE_IR_JAB) { + /* Jabber Error. Excessive transmit duration (20-150ms). */ + lp->mace_stats.jab++; + } + if (status & MACE_IR_BABL) { + /* Babble Error. >1518 bytes transmitted. */ + lp->mace_stats.babl++; + } + if (status & MACE_IR_CERR) { + /* Collision Error. CERR indicates the absence of the + Signal Quality Error Test message after a packet + transmission. */ + lp->mace_stats.cerr++; + } + if (status & MACE_IR_RCVCCO) { + /* Receive Collision Count Overflow; */ + lp->mace_stats.rcvcco++; + } + if (status & MACE_IR_RNTPCO) { + /* Runt Packet Count Overflow */ + lp->mace_stats.rntpco++; + } + if (status & MACE_IR_MPCO) { + /* Missed Packet Count Overflow */ + lp->mace_stats.mpco++; + } + } /* if (status & ~MACE_IMR_DEFAULT & ~MACE_IR_RCVINT & ~MACE_IR_XMTINT) */ + + } while ((status & ~MACE_IMR_DEFAULT) && (--IntrCnt)); + + return IRQ_HANDLED; +} /* mace_interrupt */ + +/* ---------------------------------------------------------------------------- +mace_rx + Receives packets. +---------------------------------------------------------------------------- */ +static int mace_rx(struct net_device *dev, unsigned char RxCnt) +{ + mace_private *lp = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + unsigned char rx_framecnt; + unsigned short rx_status; + + while ( + ((rx_framecnt = inb(ioaddr + AM2150_RCV_FRAME_COUNT)) > 0) && + (rx_framecnt <= 12) && /* rx_framecnt==0xFF if card is extracted. */ + (RxCnt--) + ) { + rx_status = inw(ioaddr + AM2150_RCV); + + pr_debug("%s: in mace_rx(), framecnt 0x%X, rx_status" + " 0x%X.\n", dev->name, rx_framecnt, rx_status); + + if (rx_status & MACE_RCVFS_RCVSTS) { /* Error, update stats. */ + lp->linux_stats.rx_errors++; + if (rx_status & MACE_RCVFS_OFLO) { + lp->mace_stats.oflo++; + } + if (rx_status & MACE_RCVFS_CLSN) { + lp->mace_stats.clsn++; + } + if (rx_status & MACE_RCVFS_FRAM) { + lp->mace_stats.fram++; + } + if (rx_status & MACE_RCVFS_FCS) { + lp->mace_stats.fcs++; + } + } else { + short pkt_len = (rx_status & ~MACE_RCVFS_RCVSTS) - 4; + /* Auto Strip is off, always subtract 4 */ + struct sk_buff *skb; + + lp->mace_stats.rfs_rntpc += inb(ioaddr + AM2150_RCV); + /* runt packet count */ + lp->mace_stats.rfs_rcvcc += inb(ioaddr + AM2150_RCV); + /* rcv collision count */ + + pr_debug(" receiving packet size 0x%X rx_status" + " 0x%X.\n", pkt_len, rx_status); + + skb = dev_alloc_skb(pkt_len+2); + + if (skb != NULL) { + skb_reserve(skb, 2); + insw(ioaddr + AM2150_RCV, skb_put(skb, pkt_len), pkt_len>>1); + if (pkt_len & 1) + *(skb_tail_pointer(skb) - 1) = inb(ioaddr + AM2150_RCV); + skb->protocol = eth_type_trans(skb, dev); + + netif_rx(skb); /* Send the packet to the upper (protocol) layers. */ + + lp->linux_stats.rx_packets++; + lp->linux_stats.rx_bytes += pkt_len; + outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */ + continue; + } else { + pr_debug("%s: couldn't allocate a sk_buff of size" + " %d.\n", dev->name, pkt_len); + lp->linux_stats.rx_dropped++; + } + } + outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */ + } /* while */ + + return 0; +} /* mace_rx */ + +/* ---------------------------------------------------------------------------- +pr_linux_stats +---------------------------------------------------------------------------- */ +static void pr_linux_stats(struct net_device_stats *pstats) +{ + pr_debug("pr_linux_stats\n"); + pr_debug(" rx_packets=%-7ld tx_packets=%ld\n", + (long)pstats->rx_packets, (long)pstats->tx_packets); + pr_debug(" rx_errors=%-7ld tx_errors=%ld\n", + (long)pstats->rx_errors, (long)pstats->tx_errors); + pr_debug(" rx_dropped=%-7ld tx_dropped=%ld\n", + (long)pstats->rx_dropped, (long)pstats->tx_dropped); + pr_debug(" multicast=%-7ld collisions=%ld\n", + (long)pstats->multicast, (long)pstats->collisions); + + pr_debug(" rx_length_errors=%-7ld rx_over_errors=%ld\n", + (long)pstats->rx_length_errors, (long)pstats->rx_over_errors); + pr_debug(" rx_crc_errors=%-7ld rx_frame_errors=%ld\n", + (long)pstats->rx_crc_errors, (long)pstats->rx_frame_errors); + pr_debug(" rx_fifo_errors=%-7ld rx_missed_errors=%ld\n", + (long)pstats->rx_fifo_errors, (long)pstats->rx_missed_errors); + + pr_debug(" tx_aborted_errors=%-7ld tx_carrier_errors=%ld\n", + (long)pstats->tx_aborted_errors, (long)pstats->tx_carrier_errors); + pr_debug(" tx_fifo_errors=%-7ld tx_heartbeat_errors=%ld\n", + (long)pstats->tx_fifo_errors, (long)pstats->tx_heartbeat_errors); + pr_debug(" tx_window_errors=%ld\n", + (long)pstats->tx_window_errors); +} /* pr_linux_stats */ + +/* ---------------------------------------------------------------------------- +pr_mace_stats +---------------------------------------------------------------------------- */ +static void pr_mace_stats(mace_statistics *pstats) +{ + pr_debug("pr_mace_stats\n"); + + pr_debug(" xmtsv=%-7d uflo=%d\n", + pstats->xmtsv, pstats->uflo); + pr_debug(" lcol=%-7d more=%d\n", + pstats->lcol, pstats->more); + pr_debug(" one=%-7d defer=%d\n", + pstats->one, pstats->defer); + pr_debug(" lcar=%-7d rtry=%d\n", + pstats->lcar, pstats->rtry); + + /* MACE_XMTRC */ + pr_debug(" exdef=%-7d xmtrc=%d\n", + pstats->exdef, pstats->xmtrc); + + /* RFS1--Receive Status (RCVSTS) */ + pr_debug(" oflo=%-7d clsn=%d\n", + pstats->oflo, pstats->clsn); + pr_debug(" fram=%-7d fcs=%d\n", + pstats->fram, pstats->fcs); + + /* RFS2--Runt Packet Count (RNTPC) */ + /* RFS3--Receive Collision Count (RCVCC) */ + pr_debug(" rfs_rntpc=%-7d rfs_rcvcc=%d\n", + pstats->rfs_rntpc, pstats->rfs_rcvcc); + + /* MACE_IR */ + pr_debug(" jab=%-7d babl=%d\n", + pstats->jab, pstats->babl); + pr_debug(" cerr=%-7d rcvcco=%d\n", + pstats->cerr, pstats->rcvcco); + pr_debug(" rntpco=%-7d mpco=%d\n", + pstats->rntpco, pstats->mpco); + + /* MACE_MPC */ + pr_debug(" mpc=%d\n", pstats->mpc); + + /* MACE_RNTPC */ + pr_debug(" rntpc=%d\n", pstats->rntpc); + + /* MACE_RCVCC */ + pr_debug(" rcvcc=%d\n", pstats->rcvcc); + +} /* pr_mace_stats */ + +/* ---------------------------------------------------------------------------- +update_stats + Update statistics. We change to register window 1, so this + should be run single-threaded if the device is active. This is + expected to be a rare operation, and it's simpler for the rest + of the driver to assume that window 0 is always valid rather + than use a special window-state variable. + + oflo & uflo should _never_ occur since it would mean the Xilinx + was not able to transfer data between the MACE FIFO and the + card's SRAM fast enough. If this happens, something is + seriously wrong with the hardware. +---------------------------------------------------------------------------- */ +static void update_stats(unsigned int ioaddr, struct net_device *dev) +{ + mace_private *lp = netdev_priv(dev); + + lp->mace_stats.rcvcc += mace_read(lp, ioaddr, MACE_RCVCC); + lp->mace_stats.rntpc += mace_read(lp, ioaddr, MACE_RNTPC); + lp->mace_stats.mpc += mace_read(lp, ioaddr, MACE_MPC); + /* At this point, mace_stats is fully updated for this call. + We may now update the linux_stats. */ + + /* The MACE has no equivalent for linux_stats field which are commented + out. */ + + /* lp->linux_stats.multicast; */ + lp->linux_stats.collisions = + lp->mace_stats.rcvcco * 256 + lp->mace_stats.rcvcc; + /* Collision: The MACE may retry sending a packet 15 times + before giving up. The retry count is in XMTRC. + Does each retry constitute a collision? + If so, why doesn't the RCVCC record these collisions? */ + + /* detailed rx_errors: */ + lp->linux_stats.rx_length_errors = + lp->mace_stats.rntpco * 256 + lp->mace_stats.rntpc; + /* lp->linux_stats.rx_over_errors */ + lp->linux_stats.rx_crc_errors = lp->mace_stats.fcs; + lp->linux_stats.rx_frame_errors = lp->mace_stats.fram; + lp->linux_stats.rx_fifo_errors = lp->mace_stats.oflo; + lp->linux_stats.rx_missed_errors = + lp->mace_stats.mpco * 256 + lp->mace_stats.mpc; + + /* detailed tx_errors */ + lp->linux_stats.tx_aborted_errors = lp->mace_stats.rtry; + lp->linux_stats.tx_carrier_errors = lp->mace_stats.lcar; + /* LCAR usually results from bad cabling. */ + lp->linux_stats.tx_fifo_errors = lp->mace_stats.uflo; + lp->linux_stats.tx_heartbeat_errors = lp->mace_stats.cerr; + /* lp->linux_stats.tx_window_errors; */ +} /* update_stats */ + +/* ---------------------------------------------------------------------------- +mace_get_stats + Gathers ethernet statistics from the MACE chip. +---------------------------------------------------------------------------- */ +static struct net_device_stats *mace_get_stats(struct net_device *dev) +{ + mace_private *lp = netdev_priv(dev); + + update_stats(dev->base_addr, dev); + + pr_debug("%s: updating the statistics.\n", dev->name); + pr_linux_stats(&lp->linux_stats); + pr_mace_stats(&lp->mace_stats); + + return &lp->linux_stats; +} /* net_device_stats */ + +/* ---------------------------------------------------------------------------- +updateCRC + Modified from Am79C90 data sheet. +---------------------------------------------------------------------------- */ + +#ifdef BROKEN_MULTICAST + +static void updateCRC(int *CRC, int bit) +{ + static const int poly[]={ + 1,1,1,0, 1,1,0,1, + 1,0,1,1, 1,0,0,0, + 1,0,0,0, 0,0,1,1, + 0,0,1,0, 0,0,0,0 + }; /* CRC polynomial. poly[n] = coefficient of the x**n term of the + CRC generator polynomial. */ + + int j; + + /* shift CRC and control bit (CRC[32]) */ + for (j = 32; j > 0; j--) + CRC[j] = CRC[j-1]; + CRC[0] = 0; + + /* If bit XOR(control bit) = 1, set CRC = CRC XOR polynomial. */ + if (bit ^ CRC[32]) + for (j = 0; j < 32; j++) + CRC[j] ^= poly[j]; +} /* updateCRC */ + +/* ---------------------------------------------------------------------------- +BuildLAF + Build logical address filter. + Modified from Am79C90 data sheet. + +Input + ladrf: logical address filter (contents initialized to 0) + adr: ethernet address +---------------------------------------------------------------------------- */ +static void BuildLAF(int *ladrf, int *adr) +{ + int CRC[33]={1}; /* CRC register, 1 word/bit + extra control bit */ + + int i, byte; /* temporary array indices */ + int hashcode; /* the output object */ + + CRC[32]=0; + + for (byte = 0; byte < 6; byte++) + for (i = 0; i < 8; i++) + updateCRC(CRC, (adr[byte] >> i) & 1); + + hashcode = 0; + for (i = 0; i < 6; i++) + hashcode = (hashcode << 1) + CRC[i]; + + byte = hashcode >> 3; + ladrf[byte] |= (1 << (hashcode & 7)); + +#ifdef PCMCIA_DEBUG + if (0) + printk(KERN_DEBUG " adr =%pM\n", adr); + printk(KERN_DEBUG " hashcode = %d(decimal), ladrf[0:63] =", hashcode); + for (i = 0; i < 8; i++) + pr_cont(" %02X", ladrf[i]); + pr_cont("\n"); +#endif +} /* BuildLAF */ + +/* ---------------------------------------------------------------------------- +restore_multicast_list + Restores the multicast filter for MACE chip to the last + set_multicast_list() call. + +Input + multicast_num_addrs + multicast_ladrf[] +---------------------------------------------------------------------------- */ +static void restore_multicast_list(struct net_device *dev) +{ + mace_private *lp = netdev_priv(dev); + int num_addrs = lp->multicast_num_addrs; + int *ladrf = lp->multicast_ladrf; + unsigned int ioaddr = dev->base_addr; + int i; + + pr_debug("%s: restoring Rx mode to %d addresses.\n", + dev->name, num_addrs); + + if (num_addrs > 0) { + + pr_debug("Attempt to restore multicast list detected.\n"); + + mace_write(lp, ioaddr, MACE_IAC, MACE_IAC_ADDRCHG | MACE_IAC_LOGADDR); + /* Poll ADDRCHG bit */ + while (mace_read(lp, ioaddr, MACE_IAC) & MACE_IAC_ADDRCHG) + ; + /* Set LADRF register */ + for (i = 0; i < MACE_LADRF_LEN; i++) + mace_write(lp, ioaddr, MACE_LADRF, ladrf[i]); + + mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_RCVFCSE | MACE_UTR_LOOP_EXTERNAL); + mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV); + + } else if (num_addrs < 0) { + + /* Promiscuous mode: receive all packets */ + mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL); + mace_write(lp, ioaddr, MACE_MACCC, + MACE_MACCC_PROM | MACE_MACCC_ENXMT | MACE_MACCC_ENRCV + ); + + } else { + + /* Normal mode */ + mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL); + mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV); + + } +} /* restore_multicast_list */ + +/* ---------------------------------------------------------------------------- +set_multicast_list + Set or clear the multicast filter for this adaptor. + +Input + num_addrs == -1 Promiscuous mode, receive all packets + num_addrs == 0 Normal mode, clear multicast list + num_addrs > 0 Multicast mode, receive normal and MC packets, and do + best-effort filtering. +Output + multicast_num_addrs + multicast_ladrf[] +---------------------------------------------------------------------------- */ + +static void set_multicast_list(struct net_device *dev) +{ + mace_private *lp = netdev_priv(dev); + int adr[ETHER_ADDR_LEN] = {0}; /* Ethernet address */ + struct netdev_hw_addr *ha; + +#ifdef PCMCIA_DEBUG + { + static int old; + if (netdev_mc_count(dev) != old) { + old = netdev_mc_count(dev); + pr_debug("%s: setting Rx mode to %d addresses.\n", + dev->name, old); + } + } +#endif + + /* Set multicast_num_addrs. */ + lp->multicast_num_addrs = netdev_mc_count(dev); + + /* Set multicast_ladrf. */ + if (num_addrs > 0) { + /* Calculate multicast logical address filter */ + memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN); + netdev_for_each_mc_addr(ha, dev) { + memcpy(adr, ha->addr, ETHER_ADDR_LEN); + BuildLAF(lp->multicast_ladrf, adr); + } + } + + restore_multicast_list(dev); + +} /* set_multicast_list */ + +#endif /* BROKEN_MULTICAST */ + +static void restore_multicast_list(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + mace_private *lp = netdev_priv(dev); + + pr_debug("%s: restoring Rx mode to %d addresses.\n", dev->name, + lp->multicast_num_addrs); + + if (dev->flags & IFF_PROMISC) { + /* Promiscuous mode: receive all packets */ + mace_write(lp,ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL); + mace_write(lp, ioaddr, MACE_MACCC, + MACE_MACCC_PROM | MACE_MACCC_ENXMT | MACE_MACCC_ENRCV + ); + } else { + /* Normal mode */ + mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL); + mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV); + } +} /* restore_multicast_list */ + +static void set_multicast_list(struct net_device *dev) +{ + mace_private *lp = netdev_priv(dev); + +#ifdef PCMCIA_DEBUG + { + static int old; + if (netdev_mc_count(dev) != old) { + old = netdev_mc_count(dev); + pr_debug("%s: setting Rx mode to %d addresses.\n", + dev->name, old); + } + } +#endif + + lp->multicast_num_addrs = netdev_mc_count(dev); + restore_multicast_list(dev); + +} /* set_multicast_list */ + +static const struct pcmcia_device_id nmclan_ids[] = { + PCMCIA_DEVICE_PROD_ID12("New Media Corporation", "Ethernet", 0x085a850b, 0x00b2e941), + PCMCIA_DEVICE_PROD_ID12("Portable Add-ons", "Ethernet+", 0xebf1d60, 0xad673aaf), + PCMCIA_DEVICE_NULL, +}; +MODULE_DEVICE_TABLE(pcmcia, nmclan_ids); + +static struct pcmcia_driver nmclan_cs_driver = { + .owner = THIS_MODULE, + .name = "nmclan_cs", + .probe = nmclan_probe, + .remove = nmclan_detach, + .id_table = nmclan_ids, + .suspend = nmclan_suspend, + .resume = nmclan_resume, +}; + +static int __init init_nmclan_cs(void) +{ + return pcmcia_register_driver(&nmclan_cs_driver); +} + +static void __exit exit_nmclan_cs(void) +{ + pcmcia_unregister_driver(&nmclan_cs_driver); +} + +module_init(init_nmclan_cs); +module_exit(exit_nmclan_cs); diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c new file mode 100644 index 0000000..8b3090d --- /dev/null +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -0,0 +1,2937 @@ +/* pcnet32.c: An AMD PCnet32 ethernet driver for linux. */ +/* + * Copyright 1996-1999 Thomas Bogendoerfer + * + * Derived from the lance driver written 1993,1994,1995 by Donald Becker. + * + * Copyright 1993 United States Government as represented by the + * Director, National Security Agency. + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + * This driver is for PCnet32 and PCnetPCI based ethercards + */ +/************************************************************************** + * 23 Oct, 2000. + * Fixed a few bugs, related to running the controller in 32bit mode. + * + * Carsten Langgaard, carstenl@mips.com + * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. + * + *************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define DRV_NAME "pcnet32" +#define DRV_VERSION "1.35" +#define DRV_RELDATE "21.Apr.2008" +#define PFX DRV_NAME ": " + +static const char *const version = + DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n"; + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +/* + * PCI device identifiers for "new style" Linux PCI Device Drivers + */ +static DEFINE_PCI_DEVICE_TABLE(pcnet32_pci_tbl) = { + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), }, + + /* + * Adapters that were sold with IBM's RS/6000 or pSeries hardware have + * the incorrect vendor id. + */ + { PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE), + .class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class_mask = 0xffff00, }, + + { } /* terminate list */ +}; + +MODULE_DEVICE_TABLE(pci, pcnet32_pci_tbl); + +static int cards_found; + +/* + * VLB I/O addresses + */ +static unsigned int pcnet32_portlist[] __initdata = + { 0x300, 0x320, 0x340, 0x360, 0 }; + +static int pcnet32_debug; +static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */ +static int pcnet32vlb; /* check for VLB cards ? */ + +static struct net_device *pcnet32_dev; + +static int max_interrupt_work = 2; +static int rx_copybreak = 200; + +#define PCNET32_PORT_AUI 0x00 +#define PCNET32_PORT_10BT 0x01 +#define PCNET32_PORT_GPSI 0x02 +#define PCNET32_PORT_MII 0x03 + +#define PCNET32_PORT_PORTSEL 0x03 +#define PCNET32_PORT_ASEL 0x04 +#define PCNET32_PORT_100 0x40 +#define PCNET32_PORT_FD 0x80 + +#define PCNET32_DMA_MASK 0xffffffff + +#define PCNET32_WATCHDOG_TIMEOUT (jiffies + (2 * HZ)) +#define PCNET32_BLINK_TIMEOUT (jiffies + (HZ/4)) + +/* + * table to translate option values from tulip + * to internal options + */ +static const unsigned char options_mapping[] = { + PCNET32_PORT_ASEL, /* 0 Auto-select */ + PCNET32_PORT_AUI, /* 1 BNC/AUI */ + PCNET32_PORT_AUI, /* 2 AUI/BNC */ + PCNET32_PORT_ASEL, /* 3 not supported */ + PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */ + PCNET32_PORT_ASEL, /* 5 not supported */ + PCNET32_PORT_ASEL, /* 6 not supported */ + PCNET32_PORT_ASEL, /* 7 not supported */ + PCNET32_PORT_ASEL, /* 8 not supported */ + PCNET32_PORT_MII, /* 9 MII 10baseT */ + PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */ + PCNET32_PORT_MII, /* 11 MII (autosel) */ + PCNET32_PORT_10BT, /* 12 10BaseT */ + PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */ + /* 14 MII 100BaseTx-FD */ + PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD, + PCNET32_PORT_ASEL /* 15 not supported */ +}; + +static const char pcnet32_gstrings_test[][ETH_GSTRING_LEN] = { + "Loopback test (offline)" +}; + +#define PCNET32_TEST_LEN ARRAY_SIZE(pcnet32_gstrings_test) + +#define PCNET32_NUM_REGS 136 + +#define MAX_UNITS 8 /* More are supported, limit only on options */ +static int options[MAX_UNITS]; +static int full_duplex[MAX_UNITS]; +static int homepna[MAX_UNITS]; + +/* + * Theory of Operation + * + * This driver uses the same software structure as the normal lance + * driver. So look for a verbose description in lance.c. The differences + * to the normal lance driver is the use of the 32bit mode of PCnet32 + * and PCnetPCI chips. Because these chips are 32bit chips, there is no + * 16MB limitation and we don't need bounce buffers. + */ + +/* + * Set the number of Tx and Rx buffers, using Log_2(# buffers). + * Reasonable default values are 4 Tx buffers, and 16 Rx buffers. + * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4). + */ +#ifndef PCNET32_LOG_TX_BUFFERS +#define PCNET32_LOG_TX_BUFFERS 4 +#define PCNET32_LOG_RX_BUFFERS 5 +#define PCNET32_LOG_MAX_TX_BUFFERS 9 /* 2^9 == 512 */ +#define PCNET32_LOG_MAX_RX_BUFFERS 9 +#endif + +#define TX_RING_SIZE (1 << (PCNET32_LOG_TX_BUFFERS)) +#define TX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_TX_BUFFERS)) + +#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS)) +#define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS)) + +#define PKT_BUF_SKB 1544 +/* actual buffer length after being aligned */ +#define PKT_BUF_SIZE (PKT_BUF_SKB - NET_IP_ALIGN) +/* chip wants twos complement of the (aligned) buffer length */ +#define NEG_BUF_SIZE (NET_IP_ALIGN - PKT_BUF_SKB) + +/* Offsets from base I/O address. */ +#define PCNET32_WIO_RDP 0x10 +#define PCNET32_WIO_RAP 0x12 +#define PCNET32_WIO_RESET 0x14 +#define PCNET32_WIO_BDP 0x16 + +#define PCNET32_DWIO_RDP 0x10 +#define PCNET32_DWIO_RAP 0x14 +#define PCNET32_DWIO_RESET 0x18 +#define PCNET32_DWIO_BDP 0x1C + +#define PCNET32_TOTAL_SIZE 0x20 + +#define CSR0 0 +#define CSR0_INIT 0x1 +#define CSR0_START 0x2 +#define CSR0_STOP 0x4 +#define CSR0_TXPOLL 0x8 +#define CSR0_INTEN 0x40 +#define CSR0_IDON 0x0100 +#define CSR0_NORMAL (CSR0_START | CSR0_INTEN) +#define PCNET32_INIT_LOW 1 +#define PCNET32_INIT_HIGH 2 +#define CSR3 3 +#define CSR4 4 +#define CSR5 5 +#define CSR5_SUSPEND 0x0001 +#define CSR15 15 +#define PCNET32_MC_FILTER 8 + +#define PCNET32_79C970A 0x2621 + +/* The PCNET32 Rx and Tx ring descriptors. */ +struct pcnet32_rx_head { + __le32 base; + __le16 buf_length; /* two`s complement of length */ + __le16 status; + __le32 msg_length; + __le32 reserved; +}; + +struct pcnet32_tx_head { + __le32 base; + __le16 length; /* two`s complement of length */ + __le16 status; + __le32 misc; + __le32 reserved; +}; + +/* The PCNET32 32-Bit initialization block, described in databook. */ +struct pcnet32_init_block { + __le16 mode; + __le16 tlen_rlen; + u8 phys_addr[6]; + __le16 reserved; + __le32 filter[2]; + /* Receive and transmit ring base, along with extra bits. */ + __le32 rx_ring; + __le32 tx_ring; +}; + +/* PCnet32 access functions */ +struct pcnet32_access { + u16 (*read_csr) (unsigned long, int); + void (*write_csr) (unsigned long, int, u16); + u16 (*read_bcr) (unsigned long, int); + void (*write_bcr) (unsigned long, int, u16); + u16 (*read_rap) (unsigned long); + void (*write_rap) (unsigned long, u16); + void (*reset) (unsigned long); +}; + +/* + * The first field of pcnet32_private is read by the ethernet device + * so the structure should be allocated using pci_alloc_consistent(). + */ +struct pcnet32_private { + struct pcnet32_init_block *init_block; + /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */ + struct pcnet32_rx_head *rx_ring; + struct pcnet32_tx_head *tx_ring; + dma_addr_t init_dma_addr;/* DMA address of beginning of the init block, + returned by pci_alloc_consistent */ + struct pci_dev *pci_dev; + const char *name; + /* The saved address of a sent-in-place packet/buffer, for skfree(). */ + struct sk_buff **tx_skbuff; + struct sk_buff **rx_skbuff; + dma_addr_t *tx_dma_addr; + dma_addr_t *rx_dma_addr; + struct pcnet32_access a; + spinlock_t lock; /* Guard lock */ + unsigned int cur_rx, cur_tx; /* The next free ring entry */ + unsigned int rx_ring_size; /* current rx ring size */ + unsigned int tx_ring_size; /* current tx ring size */ + unsigned int rx_mod_mask; /* rx ring modular mask */ + unsigned int tx_mod_mask; /* tx ring modular mask */ + unsigned short rx_len_bits; + unsigned short tx_len_bits; + dma_addr_t rx_ring_dma_addr; + dma_addr_t tx_ring_dma_addr; + unsigned int dirty_rx, /* ring entries to be freed. */ + dirty_tx; + + struct net_device *dev; + struct napi_struct napi; + char tx_full; + char phycount; /* number of phys found */ + int options; + unsigned int shared_irq:1, /* shared irq possible */ + dxsuflo:1, /* disable transmit stop on uflo */ + mii:1; /* mii port available */ + struct net_device *next; + struct mii_if_info mii_if; + struct timer_list watchdog_timer; + u32 msg_enable; /* debug message level */ + + /* each bit indicates an available PHY */ + u32 phymask; + unsigned short chip_version; /* which variant this is */ + + /* saved registers during ethtool blink */ + u16 save_regs[4]; +}; + +static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *); +static int pcnet32_probe1(unsigned long, int, struct pci_dev *); +static int pcnet32_open(struct net_device *); +static int pcnet32_init_ring(struct net_device *); +static netdev_tx_t pcnet32_start_xmit(struct sk_buff *, + struct net_device *); +static void pcnet32_tx_timeout(struct net_device *dev); +static irqreturn_t pcnet32_interrupt(int, void *); +static int pcnet32_close(struct net_device *); +static struct net_device_stats *pcnet32_get_stats(struct net_device *); +static void pcnet32_load_multicast(struct net_device *dev); +static void pcnet32_set_multicast_list(struct net_device *); +static int pcnet32_ioctl(struct net_device *, struct ifreq *, int); +static void pcnet32_watchdog(struct net_device *); +static int mdio_read(struct net_device *dev, int phy_id, int reg_num); +static void mdio_write(struct net_device *dev, int phy_id, int reg_num, + int val); +static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits); +static void pcnet32_ethtool_test(struct net_device *dev, + struct ethtool_test *eth_test, u64 * data); +static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1); +static int pcnet32_get_regs_len(struct net_device *dev); +static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *ptr); +static void pcnet32_purge_tx_ring(struct net_device *dev); +static int pcnet32_alloc_ring(struct net_device *dev, const char *name); +static void pcnet32_free_ring(struct net_device *dev); +static void pcnet32_check_media(struct net_device *dev, int verbose); + +static u16 pcnet32_wio_read_csr(unsigned long addr, int index) +{ + outw(index, addr + PCNET32_WIO_RAP); + return inw(addr + PCNET32_WIO_RDP); +} + +static void pcnet32_wio_write_csr(unsigned long addr, int index, u16 val) +{ + outw(index, addr + PCNET32_WIO_RAP); + outw(val, addr + PCNET32_WIO_RDP); +} + +static u16 pcnet32_wio_read_bcr(unsigned long addr, int index) +{ + outw(index, addr + PCNET32_WIO_RAP); + return inw(addr + PCNET32_WIO_BDP); +} + +static void pcnet32_wio_write_bcr(unsigned long addr, int index, u16 val) +{ + outw(index, addr + PCNET32_WIO_RAP); + outw(val, addr + PCNET32_WIO_BDP); +} + +static u16 pcnet32_wio_read_rap(unsigned long addr) +{ + return inw(addr + PCNET32_WIO_RAP); +} + +static void pcnet32_wio_write_rap(unsigned long addr, u16 val) +{ + outw(val, addr + PCNET32_WIO_RAP); +} + +static void pcnet32_wio_reset(unsigned long addr) +{ + inw(addr + PCNET32_WIO_RESET); +} + +static int pcnet32_wio_check(unsigned long addr) +{ + outw(88, addr + PCNET32_WIO_RAP); + return inw(addr + PCNET32_WIO_RAP) == 88; +} + +static struct pcnet32_access pcnet32_wio = { + .read_csr = pcnet32_wio_read_csr, + .write_csr = pcnet32_wio_write_csr, + .read_bcr = pcnet32_wio_read_bcr, + .write_bcr = pcnet32_wio_write_bcr, + .read_rap = pcnet32_wio_read_rap, + .write_rap = pcnet32_wio_write_rap, + .reset = pcnet32_wio_reset +}; + +static u16 pcnet32_dwio_read_csr(unsigned long addr, int index) +{ + outl(index, addr + PCNET32_DWIO_RAP); + return inl(addr + PCNET32_DWIO_RDP) & 0xffff; +} + +static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val) +{ + outl(index, addr + PCNET32_DWIO_RAP); + outl(val, addr + PCNET32_DWIO_RDP); +} + +static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index) +{ + outl(index, addr + PCNET32_DWIO_RAP); + return inl(addr + PCNET32_DWIO_BDP) & 0xffff; +} + +static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val) +{ + outl(index, addr + PCNET32_DWIO_RAP); + outl(val, addr + PCNET32_DWIO_BDP); +} + +static u16 pcnet32_dwio_read_rap(unsigned long addr) +{ + return inl(addr + PCNET32_DWIO_RAP) & 0xffff; +} + +static void pcnet32_dwio_write_rap(unsigned long addr, u16 val) +{ + outl(val, addr + PCNET32_DWIO_RAP); +} + +static void pcnet32_dwio_reset(unsigned long addr) +{ + inl(addr + PCNET32_DWIO_RESET); +} + +static int pcnet32_dwio_check(unsigned long addr) +{ + outl(88, addr + PCNET32_DWIO_RAP); + return (inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88; +} + +static struct pcnet32_access pcnet32_dwio = { + .read_csr = pcnet32_dwio_read_csr, + .write_csr = pcnet32_dwio_write_csr, + .read_bcr = pcnet32_dwio_read_bcr, + .write_bcr = pcnet32_dwio_write_bcr, + .read_rap = pcnet32_dwio_read_rap, + .write_rap = pcnet32_dwio_write_rap, + .reset = pcnet32_dwio_reset +}; + +static void pcnet32_netif_stop(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + + dev->trans_start = jiffies; /* prevent tx timeout */ + napi_disable(&lp->napi); + netif_tx_disable(dev); +} + +static void pcnet32_netif_start(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + ulong ioaddr = dev->base_addr; + u16 val; + + netif_wake_queue(dev); + val = lp->a.read_csr(ioaddr, CSR3); + val &= 0x00ff; + lp->a.write_csr(ioaddr, CSR3, val); + napi_enable(&lp->napi); +} + +/* + * Allocate space for the new sized tx ring. + * Free old resources + * Save new resources. + * Any failure keeps old resources. + * Must be called with lp->lock held. + */ +static void pcnet32_realloc_tx_ring(struct net_device *dev, + struct pcnet32_private *lp, + unsigned int size) +{ + dma_addr_t new_ring_dma_addr; + dma_addr_t *new_dma_addr_list; + struct pcnet32_tx_head *new_tx_ring; + struct sk_buff **new_skb_list; + + pcnet32_purge_tx_ring(dev); + + new_tx_ring = pci_alloc_consistent(lp->pci_dev, + sizeof(struct pcnet32_tx_head) * + (1 << size), + &new_ring_dma_addr); + if (new_tx_ring == NULL) { + netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); + return; + } + memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size)); + + new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t), + GFP_ATOMIC); + if (!new_dma_addr_list) { + netif_err(lp, drv, dev, "Memory allocation failed\n"); + goto free_new_tx_ring; + } + + new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *), + GFP_ATOMIC); + if (!new_skb_list) { + netif_err(lp, drv, dev, "Memory allocation failed\n"); + goto free_new_lists; + } + + kfree(lp->tx_skbuff); + kfree(lp->tx_dma_addr); + pci_free_consistent(lp->pci_dev, + sizeof(struct pcnet32_tx_head) * + lp->tx_ring_size, lp->tx_ring, + lp->tx_ring_dma_addr); + + lp->tx_ring_size = (1 << size); + lp->tx_mod_mask = lp->tx_ring_size - 1; + lp->tx_len_bits = (size << 12); + lp->tx_ring = new_tx_ring; + lp->tx_ring_dma_addr = new_ring_dma_addr; + lp->tx_dma_addr = new_dma_addr_list; + lp->tx_skbuff = new_skb_list; + return; + +free_new_lists: + kfree(new_dma_addr_list); +free_new_tx_ring: + pci_free_consistent(lp->pci_dev, + sizeof(struct pcnet32_tx_head) * + (1 << size), + new_tx_ring, + new_ring_dma_addr); +} + +/* + * Allocate space for the new sized rx ring. + * Re-use old receive buffers. + * alloc extra buffers + * free unneeded buffers + * free unneeded buffers + * Save new resources. + * Any failure keeps old resources. + * Must be called with lp->lock held. + */ +static void pcnet32_realloc_rx_ring(struct net_device *dev, + struct pcnet32_private *lp, + unsigned int size) +{ + dma_addr_t new_ring_dma_addr; + dma_addr_t *new_dma_addr_list; + struct pcnet32_rx_head *new_rx_ring; + struct sk_buff **new_skb_list; + int new, overlap; + + new_rx_ring = pci_alloc_consistent(lp->pci_dev, + sizeof(struct pcnet32_rx_head) * + (1 << size), + &new_ring_dma_addr); + if (new_rx_ring == NULL) { + netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); + return; + } + memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size)); + + new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t), + GFP_ATOMIC); + if (!new_dma_addr_list) { + netif_err(lp, drv, dev, "Memory allocation failed\n"); + goto free_new_rx_ring; + } + + new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *), + GFP_ATOMIC); + if (!new_skb_list) { + netif_err(lp, drv, dev, "Memory allocation failed\n"); + goto free_new_lists; + } + + /* first copy the current receive buffers */ + overlap = min(size, lp->rx_ring_size); + for (new = 0; new < overlap; new++) { + new_rx_ring[new] = lp->rx_ring[new]; + new_dma_addr_list[new] = lp->rx_dma_addr[new]; + new_skb_list[new] = lp->rx_skbuff[new]; + } + /* now allocate any new buffers needed */ + for (; new < size; new++) { + struct sk_buff *rx_skbuff; + new_skb_list[new] = dev_alloc_skb(PKT_BUF_SKB); + rx_skbuff = new_skb_list[new]; + if (!rx_skbuff) { + /* keep the original lists and buffers */ + netif_err(lp, drv, dev, "%s dev_alloc_skb failed\n", + __func__); + goto free_all_new; + } + skb_reserve(rx_skbuff, NET_IP_ALIGN); + + new_dma_addr_list[new] = + pci_map_single(lp->pci_dev, rx_skbuff->data, + PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); + new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]); + new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE); + new_rx_ring[new].status = cpu_to_le16(0x8000); + } + /* and free any unneeded buffers */ + for (; new < lp->rx_ring_size; new++) { + if (lp->rx_skbuff[new]) { + pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new], + PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); + dev_kfree_skb(lp->rx_skbuff[new]); + } + } + + kfree(lp->rx_skbuff); + kfree(lp->rx_dma_addr); + pci_free_consistent(lp->pci_dev, + sizeof(struct pcnet32_rx_head) * + lp->rx_ring_size, lp->rx_ring, + lp->rx_ring_dma_addr); + + lp->rx_ring_size = (1 << size); + lp->rx_mod_mask = lp->rx_ring_size - 1; + lp->rx_len_bits = (size << 4); + lp->rx_ring = new_rx_ring; + lp->rx_ring_dma_addr = new_ring_dma_addr; + lp->rx_dma_addr = new_dma_addr_list; + lp->rx_skbuff = new_skb_list; + return; + +free_all_new: + while (--new >= lp->rx_ring_size) { + if (new_skb_list[new]) { + pci_unmap_single(lp->pci_dev, new_dma_addr_list[new], + PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); + dev_kfree_skb(new_skb_list[new]); + } + } + kfree(new_skb_list); +free_new_lists: + kfree(new_dma_addr_list); +free_new_rx_ring: + pci_free_consistent(lp->pci_dev, + sizeof(struct pcnet32_rx_head) * + (1 << size), + new_rx_ring, + new_ring_dma_addr); +} + +static void pcnet32_purge_rx_ring(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + int i; + + /* free all allocated skbuffs */ + for (i = 0; i < lp->rx_ring_size; i++) { + lp->rx_ring[i].status = 0; /* CPU owns buffer */ + wmb(); /* Make sure adapter sees owner change */ + if (lp->rx_skbuff[i]) { + pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], + PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); + dev_kfree_skb_any(lp->rx_skbuff[i]); + } + lp->rx_skbuff[i] = NULL; + lp->rx_dma_addr[i] = 0; + } +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void pcnet32_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + pcnet32_interrupt(0, dev); + enable_irq(dev->irq); +} +#endif + +static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long flags; + int r = -EOPNOTSUPP; + + if (lp->mii) { + spin_lock_irqsave(&lp->lock, flags); + mii_ethtool_gset(&lp->mii_if, cmd); + spin_unlock_irqrestore(&lp->lock, flags); + r = 0; + } + return r; +} + +static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long flags; + int r = -EOPNOTSUPP; + + if (lp->mii) { + spin_lock_irqsave(&lp->lock, flags); + r = mii_ethtool_sset(&lp->mii_if, cmd); + spin_unlock_irqrestore(&lp->lock, flags); + } + return r; +} + +static void pcnet32_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct pcnet32_private *lp = netdev_priv(dev); + + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + if (lp->pci_dev) + strcpy(info->bus_info, pci_name(lp->pci_dev)); + else + sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr); +} + +static u32 pcnet32_get_link(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long flags; + int r; + + spin_lock_irqsave(&lp->lock, flags); + if (lp->mii) { + r = mii_link_ok(&lp->mii_if); + } else if (lp->chip_version >= PCNET32_79C970A) { + ulong ioaddr = dev->base_addr; /* card base I/O address */ + r = (lp->a.read_bcr(ioaddr, 4) != 0xc0); + } else { /* can not detect link on really old chips */ + r = 1; + } + spin_unlock_irqrestore(&lp->lock, flags); + + return r; +} + +static u32 pcnet32_get_msglevel(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + return lp->msg_enable; +} + +static void pcnet32_set_msglevel(struct net_device *dev, u32 value) +{ + struct pcnet32_private *lp = netdev_priv(dev); + lp->msg_enable = value; +} + +static int pcnet32_nway_reset(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long flags; + int r = -EOPNOTSUPP; + + if (lp->mii) { + spin_lock_irqsave(&lp->lock, flags); + r = mii_nway_restart(&lp->mii_if); + spin_unlock_irqrestore(&lp->lock, flags); + } + return r; +} + +static void pcnet32_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct pcnet32_private *lp = netdev_priv(dev); + + ering->tx_max_pending = TX_MAX_RING_SIZE; + ering->tx_pending = lp->tx_ring_size; + ering->rx_max_pending = RX_MAX_RING_SIZE; + ering->rx_pending = lp->rx_ring_size; +} + +static int pcnet32_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long flags; + unsigned int size; + ulong ioaddr = dev->base_addr; + int i; + + if (ering->rx_mini_pending || ering->rx_jumbo_pending) + return -EINVAL; + + if (netif_running(dev)) + pcnet32_netif_stop(dev); + + spin_lock_irqsave(&lp->lock, flags); + lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ + + size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE); + + /* set the minimum ring size to 4, to allow the loopback test to work + * unchanged. + */ + for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) { + if (size <= (1 << i)) + break; + } + if ((1 << i) != lp->tx_ring_size) + pcnet32_realloc_tx_ring(dev, lp, i); + + size = min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE); + for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) { + if (size <= (1 << i)) + break; + } + if ((1 << i) != lp->rx_ring_size) + pcnet32_realloc_rx_ring(dev, lp, i); + + lp->napi.weight = lp->rx_ring_size / 2; + + if (netif_running(dev)) { + pcnet32_netif_start(dev); + pcnet32_restart(dev, CSR0_NORMAL); + } + + spin_unlock_irqrestore(&lp->lock, flags); + + netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n", + lp->rx_ring_size, lp->tx_ring_size); + + return 0; +} + +static void pcnet32_get_strings(struct net_device *dev, u32 stringset, + u8 *data) +{ + memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test)); +} + +static int pcnet32_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return PCNET32_TEST_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void pcnet32_ethtool_test(struct net_device *dev, + struct ethtool_test *test, u64 * data) +{ + struct pcnet32_private *lp = netdev_priv(dev); + int rc; + + if (test->flags == ETH_TEST_FL_OFFLINE) { + rc = pcnet32_loopback_test(dev, data); + if (rc) { + netif_printk(lp, hw, KERN_DEBUG, dev, + "Loopback test failed\n"); + test->flags |= ETH_TEST_FL_FAILED; + } else + netif_printk(lp, hw, KERN_DEBUG, dev, + "Loopback test passed\n"); + } else + netif_printk(lp, hw, KERN_DEBUG, dev, + "No tests to run (specify 'Offline' on ethtool)\n"); +} /* end pcnet32_ethtool_test */ + +static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) +{ + struct pcnet32_private *lp = netdev_priv(dev); + struct pcnet32_access *a = &lp->a; /* access to registers */ + ulong ioaddr = dev->base_addr; /* card base I/O address */ + struct sk_buff *skb; /* sk buff */ + int x, i; /* counters */ + int numbuffs = 4; /* number of TX/RX buffers and descs */ + u16 status = 0x8300; /* TX ring status */ + __le16 teststatus; /* test of ring status */ + int rc; /* return code */ + int size; /* size of packets */ + unsigned char *packet; /* source packet data */ + static const int data_len = 60; /* length of source packets */ + unsigned long flags; + unsigned long ticks; + + rc = 1; /* default to fail */ + + if (netif_running(dev)) + pcnet32_netif_stop(dev); + + spin_lock_irqsave(&lp->lock, flags); + lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ + + numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size)); + + /* Reset the PCNET32 */ + lp->a.reset(ioaddr); + lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ + + /* switch pcnet32 to 32bit mode */ + lp->a.write_bcr(ioaddr, 20, 2); + + /* purge & init rings but don't actually restart */ + pcnet32_restart(dev, 0x0000); + + lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */ + + /* Initialize Transmit buffers. */ + size = data_len + 15; + for (x = 0; x < numbuffs; x++) { + skb = dev_alloc_skb(size); + if (!skb) { + netif_printk(lp, hw, KERN_DEBUG, dev, + "Cannot allocate skb at line: %d!\n", + __LINE__); + goto clean_up; + } + packet = skb->data; + skb_put(skb, size); /* create space for data */ + lp->tx_skbuff[x] = skb; + lp->tx_ring[x].length = cpu_to_le16(-skb->len); + lp->tx_ring[x].misc = 0; + + /* put DA and SA into the skb */ + for (i = 0; i < 6; i++) + *packet++ = dev->dev_addr[i]; + for (i = 0; i < 6; i++) + *packet++ = dev->dev_addr[i]; + /* type */ + *packet++ = 0x08; + *packet++ = 0x06; + /* packet number */ + *packet++ = x; + /* fill packet with data */ + for (i = 0; i < data_len; i++) + *packet++ = i; + + lp->tx_dma_addr[x] = + pci_map_single(lp->pci_dev, skb->data, skb->len, + PCI_DMA_TODEVICE); + lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]); + wmb(); /* Make sure owner changes after all others are visible */ + lp->tx_ring[x].status = cpu_to_le16(status); + } + + x = a->read_bcr(ioaddr, 32); /* set internal loopback in BCR32 */ + a->write_bcr(ioaddr, 32, x | 0x0002); + + /* set int loopback in CSR15 */ + x = a->read_csr(ioaddr, CSR15) & 0xfffc; + lp->a.write_csr(ioaddr, CSR15, x | 0x0044); + + teststatus = cpu_to_le16(0x8000); + lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */ + + /* Check status of descriptors */ + for (x = 0; x < numbuffs; x++) { + ticks = 0; + rmb(); + while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) { + spin_unlock_irqrestore(&lp->lock, flags); + msleep(1); + spin_lock_irqsave(&lp->lock, flags); + rmb(); + ticks++; + } + if (ticks == 200) { + netif_err(lp, hw, dev, "Desc %d failed to reset!\n", x); + break; + } + } + + lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */ + wmb(); + if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) { + netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n"); + + for (x = 0; x < numbuffs; x++) { + netdev_printk(KERN_DEBUG, dev, "Packet %d: ", x); + skb = lp->rx_skbuff[x]; + for (i = 0; i < size; i++) + pr_cont(" %02x", *(skb->data + i)); + pr_cont("\n"); + } + } + + x = 0; + rc = 0; + while (x < numbuffs && !rc) { + skb = lp->rx_skbuff[x]; + packet = lp->tx_skbuff[x]->data; + for (i = 0; i < size; i++) { + if (*(skb->data + i) != packet[i]) { + netif_printk(lp, hw, KERN_DEBUG, dev, + "Error in compare! %2x - %02x %02x\n", + i, *(skb->data + i), packet[i]); + rc = 1; + break; + } + } + x++; + } + +clean_up: + *data1 = rc; + pcnet32_purge_tx_ring(dev); + + x = a->read_csr(ioaddr, CSR15); + a->write_csr(ioaddr, CSR15, (x & ~0x0044)); /* reset bits 6 and 2 */ + + x = a->read_bcr(ioaddr, 32); /* reset internal loopback */ + a->write_bcr(ioaddr, 32, (x & ~0x0002)); + + if (netif_running(dev)) { + pcnet32_netif_start(dev); + pcnet32_restart(dev, CSR0_NORMAL); + } else { + pcnet32_purge_rx_ring(dev); + lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ + } + spin_unlock_irqrestore(&lp->lock, flags); + + return rc; +} /* end pcnet32_loopback_test */ + +static int pcnet32_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct pcnet32_private *lp = netdev_priv(dev); + struct pcnet32_access *a = &lp->a; + ulong ioaddr = dev->base_addr; + unsigned long flags; + int i; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + /* Save the current value of the bcrs */ + spin_lock_irqsave(&lp->lock, flags); + for (i = 4; i < 8; i++) + lp->save_regs[i - 4] = a->read_bcr(ioaddr, i); + spin_unlock_irqrestore(&lp->lock, flags); + return 2; /* cycle on/off twice per second */ + + case ETHTOOL_ID_ON: + case ETHTOOL_ID_OFF: + /* Blink the led */ + spin_lock_irqsave(&lp->lock, flags); + for (i = 4; i < 8; i++) + a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000); + spin_unlock_irqrestore(&lp->lock, flags); + break; + + case ETHTOOL_ID_INACTIVE: + /* Restore the original value of the bcrs */ + spin_lock_irqsave(&lp->lock, flags); + for (i = 4; i < 8; i++) + a->write_bcr(ioaddr, i, lp->save_regs[i - 4]); + spin_unlock_irqrestore(&lp->lock, flags); + } + return 0; +} + +/* + * lp->lock must be held. + */ +static int pcnet32_suspend(struct net_device *dev, unsigned long *flags, + int can_sleep) +{ + int csr5; + struct pcnet32_private *lp = netdev_priv(dev); + struct pcnet32_access *a = &lp->a; + ulong ioaddr = dev->base_addr; + int ticks; + + /* really old chips have to be stopped. */ + if (lp->chip_version < PCNET32_79C970A) + return 0; + + /* set SUSPEND (SPND) - CSR5 bit 0 */ + csr5 = a->read_csr(ioaddr, CSR5); + a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND); + + /* poll waiting for bit to be set */ + ticks = 0; + while (!(a->read_csr(ioaddr, CSR5) & CSR5_SUSPEND)) { + spin_unlock_irqrestore(&lp->lock, *flags); + if (can_sleep) + msleep(1); + else + mdelay(1); + spin_lock_irqsave(&lp->lock, *flags); + ticks++; + if (ticks > 200) { + netif_printk(lp, hw, KERN_DEBUG, dev, + "Error getting into suspend!\n"); + return 0; + } + } + return 1; +} + +/* + * process one receive descriptor entry + */ + +static void pcnet32_rx_entry(struct net_device *dev, + struct pcnet32_private *lp, + struct pcnet32_rx_head *rxp, + int entry) +{ + int status = (short)le16_to_cpu(rxp->status) >> 8; + int rx_in_place = 0; + struct sk_buff *skb; + short pkt_len; + + if (status != 0x03) { /* There was an error. */ + /* + * There is a tricky error noted by John Murphy, + * to Russ Nelson: Even with full-sized + * buffers it's possible for a jabber packet to use two + * buffers, with only the last correctly noting the error. + */ + if (status & 0x01) /* Only count a general error at the */ + dev->stats.rx_errors++; /* end of a packet. */ + if (status & 0x20) + dev->stats.rx_frame_errors++; + if (status & 0x10) + dev->stats.rx_over_errors++; + if (status & 0x08) + dev->stats.rx_crc_errors++; + if (status & 0x04) + dev->stats.rx_fifo_errors++; + return; + } + + pkt_len = (le32_to_cpu(rxp->msg_length) & 0xfff) - 4; + + /* Discard oversize frames. */ + if (unlikely(pkt_len > PKT_BUF_SIZE)) { + netif_err(lp, drv, dev, "Impossible packet size %d!\n", + pkt_len); + dev->stats.rx_errors++; + return; + } + if (pkt_len < 60) { + netif_err(lp, rx_err, dev, "Runt packet!\n"); + dev->stats.rx_errors++; + return; + } + + if (pkt_len > rx_copybreak) { + struct sk_buff *newskb; + + newskb = dev_alloc_skb(PKT_BUF_SKB); + if (newskb) { + skb_reserve(newskb, NET_IP_ALIGN); + skb = lp->rx_skbuff[entry]; + pci_unmap_single(lp->pci_dev, + lp->rx_dma_addr[entry], + PKT_BUF_SIZE, + PCI_DMA_FROMDEVICE); + skb_put(skb, pkt_len); + lp->rx_skbuff[entry] = newskb; + lp->rx_dma_addr[entry] = + pci_map_single(lp->pci_dev, + newskb->data, + PKT_BUF_SIZE, + PCI_DMA_FROMDEVICE); + rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]); + rx_in_place = 1; + } else + skb = NULL; + } else + skb = dev_alloc_skb(pkt_len + NET_IP_ALIGN); + + if (skb == NULL) { + netif_err(lp, drv, dev, "Memory squeeze, dropping packet\n"); + dev->stats.rx_dropped++; + return; + } + if (!rx_in_place) { + skb_reserve(skb, NET_IP_ALIGN); + skb_put(skb, pkt_len); /* Make room */ + pci_dma_sync_single_for_cpu(lp->pci_dev, + lp->rx_dma_addr[entry], + pkt_len, + PCI_DMA_FROMDEVICE); + skb_copy_to_linear_data(skb, + (unsigned char *)(lp->rx_skbuff[entry]->data), + pkt_len); + pci_dma_sync_single_for_device(lp->pci_dev, + lp->rx_dma_addr[entry], + pkt_len, + PCI_DMA_FROMDEVICE); + } + dev->stats.rx_bytes += skb->len; + skb->protocol = eth_type_trans(skb, dev); + netif_receive_skb(skb); + dev->stats.rx_packets++; +} + +static int pcnet32_rx(struct net_device *dev, int budget) +{ + struct pcnet32_private *lp = netdev_priv(dev); + int entry = lp->cur_rx & lp->rx_mod_mask; + struct pcnet32_rx_head *rxp = &lp->rx_ring[entry]; + int npackets = 0; + + /* If we own the next entry, it's a new packet. Send it up. */ + while (npackets < budget && (short)le16_to_cpu(rxp->status) >= 0) { + pcnet32_rx_entry(dev, lp, rxp, entry); + npackets += 1; + /* + * The docs say that the buffer length isn't touched, but Andrew + * Boyd of QNX reports that some revs of the 79C965 clear it. + */ + rxp->buf_length = cpu_to_le16(NEG_BUF_SIZE); + wmb(); /* Make sure owner changes after others are visible */ + rxp->status = cpu_to_le16(0x8000); + entry = (++lp->cur_rx) & lp->rx_mod_mask; + rxp = &lp->rx_ring[entry]; + } + + return npackets; +} + +static int pcnet32_tx(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned int dirty_tx = lp->dirty_tx; + int delta; + int must_restart = 0; + + while (dirty_tx != lp->cur_tx) { + int entry = dirty_tx & lp->tx_mod_mask; + int status = (short)le16_to_cpu(lp->tx_ring[entry].status); + + if (status < 0) + break; /* It still hasn't been Txed */ + + lp->tx_ring[entry].base = 0; + + if (status & 0x4000) { + /* There was a major error, log it. */ + int err_status = le32_to_cpu(lp->tx_ring[entry].misc); + dev->stats.tx_errors++; + netif_err(lp, tx_err, dev, + "Tx error status=%04x err_status=%08x\n", + status, err_status); + if (err_status & 0x04000000) + dev->stats.tx_aborted_errors++; + if (err_status & 0x08000000) + dev->stats.tx_carrier_errors++; + if (err_status & 0x10000000) + dev->stats.tx_window_errors++; +#ifndef DO_DXSUFLO + if (err_status & 0x40000000) { + dev->stats.tx_fifo_errors++; + /* Ackk! On FIFO errors the Tx unit is turned off! */ + /* Remove this verbosity later! */ + netif_err(lp, tx_err, dev, "Tx FIFO error!\n"); + must_restart = 1; + } +#else + if (err_status & 0x40000000) { + dev->stats.tx_fifo_errors++; + if (!lp->dxsuflo) { /* If controller doesn't recover ... */ + /* Ackk! On FIFO errors the Tx unit is turned off! */ + /* Remove this verbosity later! */ + netif_err(lp, tx_err, dev, "Tx FIFO error!\n"); + must_restart = 1; + } + } +#endif + } else { + if (status & 0x1800) + dev->stats.collisions++; + dev->stats.tx_packets++; + } + + /* We must free the original skb */ + if (lp->tx_skbuff[entry]) { + pci_unmap_single(lp->pci_dev, + lp->tx_dma_addr[entry], + lp->tx_skbuff[entry]-> + len, PCI_DMA_TODEVICE); + dev_kfree_skb_any(lp->tx_skbuff[entry]); + lp->tx_skbuff[entry] = NULL; + lp->tx_dma_addr[entry] = 0; + } + dirty_tx++; + } + + delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size); + if (delta > lp->tx_ring_size) { + netif_err(lp, drv, dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n", + dirty_tx, lp->cur_tx, lp->tx_full); + dirty_tx += lp->tx_ring_size; + delta -= lp->tx_ring_size; + } + + if (lp->tx_full && + netif_queue_stopped(dev) && + delta < lp->tx_ring_size - 2) { + /* The ring is no longer full, clear tbusy. */ + lp->tx_full = 0; + netif_wake_queue(dev); + } + lp->dirty_tx = dirty_tx; + + return must_restart; +} + +static int pcnet32_poll(struct napi_struct *napi, int budget) +{ + struct pcnet32_private *lp = container_of(napi, struct pcnet32_private, napi); + struct net_device *dev = lp->dev; + unsigned long ioaddr = dev->base_addr; + unsigned long flags; + int work_done; + u16 val; + + work_done = pcnet32_rx(dev, budget); + + spin_lock_irqsave(&lp->lock, flags); + if (pcnet32_tx(dev)) { + /* reset the chip to clear the error condition, then restart */ + lp->a.reset(ioaddr); + lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ + pcnet32_restart(dev, CSR0_START); + netif_wake_queue(dev); + } + spin_unlock_irqrestore(&lp->lock, flags); + + if (work_done < budget) { + spin_lock_irqsave(&lp->lock, flags); + + __napi_complete(napi); + + /* clear interrupt masks */ + val = lp->a.read_csr(ioaddr, CSR3); + val &= 0x00ff; + lp->a.write_csr(ioaddr, CSR3, val); + + /* Set interrupt enable. */ + lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN); + + spin_unlock_irqrestore(&lp->lock, flags); + } + return work_done; +} + +#define PCNET32_REGS_PER_PHY 32 +#define PCNET32_MAX_PHYS 32 +static int pcnet32_get_regs_len(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + int j = lp->phycount * PCNET32_REGS_PER_PHY; + + return (PCNET32_NUM_REGS + j) * sizeof(u16); +} + +static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *ptr) +{ + int i, csr0; + u16 *buff = ptr; + struct pcnet32_private *lp = netdev_priv(dev); + struct pcnet32_access *a = &lp->a; + ulong ioaddr = dev->base_addr; + unsigned long flags; + + spin_lock_irqsave(&lp->lock, flags); + + csr0 = a->read_csr(ioaddr, CSR0); + if (!(csr0 & CSR0_STOP)) /* If not stopped */ + pcnet32_suspend(dev, &flags, 1); + + /* read address PROM */ + for (i = 0; i < 16; i += 2) + *buff++ = inw(ioaddr + i); + + /* read control and status registers */ + for (i = 0; i < 90; i++) + *buff++ = a->read_csr(ioaddr, i); + + *buff++ = a->read_csr(ioaddr, 112); + *buff++ = a->read_csr(ioaddr, 114); + + /* read bus configuration registers */ + for (i = 0; i < 30; i++) + *buff++ = a->read_bcr(ioaddr, i); + + *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */ + + for (i = 31; i < 36; i++) + *buff++ = a->read_bcr(ioaddr, i); + + /* read mii phy registers */ + if (lp->mii) { + int j; + for (j = 0; j < PCNET32_MAX_PHYS; j++) { + if (lp->phymask & (1 << j)) { + for (i = 0; i < PCNET32_REGS_PER_PHY; i++) { + lp->a.write_bcr(ioaddr, 33, + (j << 5) | i); + *buff++ = lp->a.read_bcr(ioaddr, 34); + } + } + } + } + + if (!(csr0 & CSR0_STOP)) { /* If not stopped */ + int csr5; + + /* clear SUSPEND (SPND) - CSR5 bit 0 */ + csr5 = a->read_csr(ioaddr, CSR5); + a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND)); + } + + spin_unlock_irqrestore(&lp->lock, flags); +} + +static const struct ethtool_ops pcnet32_ethtool_ops = { + .get_settings = pcnet32_get_settings, + .set_settings = pcnet32_set_settings, + .get_drvinfo = pcnet32_get_drvinfo, + .get_msglevel = pcnet32_get_msglevel, + .set_msglevel = pcnet32_set_msglevel, + .nway_reset = pcnet32_nway_reset, + .get_link = pcnet32_get_link, + .get_ringparam = pcnet32_get_ringparam, + .set_ringparam = pcnet32_set_ringparam, + .get_strings = pcnet32_get_strings, + .self_test = pcnet32_ethtool_test, + .set_phys_id = pcnet32_set_phys_id, + .get_regs_len = pcnet32_get_regs_len, + .get_regs = pcnet32_get_regs, + .get_sset_count = pcnet32_get_sset_count, +}; + +/* only probes for non-PCI devices, the rest are handled by + * pci_register_driver via pcnet32_probe_pci */ + +static void __devinit pcnet32_probe_vlbus(unsigned int *pcnet32_portlist) +{ + unsigned int *port, ioaddr; + + /* search for PCnet32 VLB cards at known addresses */ + for (port = pcnet32_portlist; (ioaddr = *port); port++) { + if (request_region + (ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) { + /* check if there is really a pcnet chip on that ioaddr */ + if ((inb(ioaddr + 14) == 0x57) && + (inb(ioaddr + 15) == 0x57)) { + pcnet32_probe1(ioaddr, 0, NULL); + } else { + release_region(ioaddr, PCNET32_TOTAL_SIZE); + } + } + } +} + +static int __devinit +pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + unsigned long ioaddr; + int err; + + err = pci_enable_device(pdev); + if (err < 0) { + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_err("failed to enable device -- err=%d\n", err); + return err; + } + pci_set_master(pdev); + + ioaddr = pci_resource_start(pdev, 0); + if (!ioaddr) { + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_err("card has no PCI IO resources, aborting\n"); + return -ENODEV; + } + + if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) { + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_err("architecture does not support 32bit PCI busmaster DMA\n"); + return -ENODEV; + } + if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_err("io address range already allocated\n"); + return -EBUSY; + } + + err = pcnet32_probe1(ioaddr, 1, pdev); + if (err < 0) + pci_disable_device(pdev); + + return err; +} + +static const struct net_device_ops pcnet32_netdev_ops = { + .ndo_open = pcnet32_open, + .ndo_stop = pcnet32_close, + .ndo_start_xmit = pcnet32_start_xmit, + .ndo_tx_timeout = pcnet32_tx_timeout, + .ndo_get_stats = pcnet32_get_stats, + .ndo_set_multicast_list = pcnet32_set_multicast_list, + .ndo_do_ioctl = pcnet32_ioctl, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = pcnet32_poll_controller, +#endif +}; + +/* pcnet32_probe1 + * Called from both pcnet32_probe_vlbus and pcnet_probe_pci. + * pdev will be NULL when called from pcnet32_probe_vlbus. + */ +static int __devinit +pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) +{ + struct pcnet32_private *lp; + int i, media; + int fdx, mii, fset, dxsuflo; + int chip_version; + char *chipname; + struct net_device *dev; + struct pcnet32_access *a = NULL; + u8 promaddr[6]; + int ret = -ENODEV; + + /* reset the chip */ + pcnet32_wio_reset(ioaddr); + + /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */ + if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) { + a = &pcnet32_wio; + } else { + pcnet32_dwio_reset(ioaddr); + if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 && + pcnet32_dwio_check(ioaddr)) { + a = &pcnet32_dwio; + } else { + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_err("No access methods\n"); + goto err_release_region; + } + } + + chip_version = + a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16); + if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW)) + pr_info(" PCnet chip version is %#x\n", chip_version); + if ((chip_version & 0xfff) != 0x003) { + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_info("Unsupported chip version\n"); + goto err_release_region; + } + + /* initialize variables */ + fdx = mii = fset = dxsuflo = 0; + chip_version = (chip_version >> 12) & 0xffff; + + switch (chip_version) { + case 0x2420: + chipname = "PCnet/PCI 79C970"; /* PCI */ + break; + case 0x2430: + if (shared) + chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */ + else + chipname = "PCnet/32 79C965"; /* 486/VL bus */ + break; + case 0x2621: + chipname = "PCnet/PCI II 79C970A"; /* PCI */ + fdx = 1; + break; + case 0x2623: + chipname = "PCnet/FAST 79C971"; /* PCI */ + fdx = 1; + mii = 1; + fset = 1; + break; + case 0x2624: + chipname = "PCnet/FAST+ 79C972"; /* PCI */ + fdx = 1; + mii = 1; + fset = 1; + break; + case 0x2625: + chipname = "PCnet/FAST III 79C973"; /* PCI */ + fdx = 1; + mii = 1; + break; + case 0x2626: + chipname = "PCnet/Home 79C978"; /* PCI */ + fdx = 1; + /* + * This is based on specs published at www.amd.com. This section + * assumes that a card with a 79C978 wants to go into standard + * ethernet mode. The 79C978 can also go into 1Mb HomePNA mode, + * and the module option homepna=1 can select this instead. + */ + media = a->read_bcr(ioaddr, 49); + media &= ~3; /* default to 10Mb ethernet */ + if (cards_found < MAX_UNITS && homepna[cards_found]) + media |= 1; /* switch to home wiring mode */ + if (pcnet32_debug & NETIF_MSG_PROBE) + printk(KERN_DEBUG PFX "media set to %sMbit mode\n", + (media & 1) ? "1" : "10"); + a->write_bcr(ioaddr, 49, media); + break; + case 0x2627: + chipname = "PCnet/FAST III 79C975"; /* PCI */ + fdx = 1; + mii = 1; + break; + case 0x2628: + chipname = "PCnet/PRO 79C976"; + fdx = 1; + mii = 1; + break; + default: + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_info("PCnet version %#x, no PCnet32 chip\n", + chip_version); + goto err_release_region; + } + + /* + * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit + * starting until the packet is loaded. Strike one for reliability, lose + * one for latency - although on PCI this isn't a big loss. Older chips + * have FIFO's smaller than a packet, so you can't do this. + * Turn on BCR18:BurstRdEn and BCR18:BurstWrEn. + */ + + if (fset) { + a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860)); + a->write_csr(ioaddr, 80, + (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00); + dxsuflo = 1; + } + + dev = alloc_etherdev(sizeof(*lp)); + if (!dev) { + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_err("Memory allocation failed\n"); + ret = -ENOMEM; + goto err_release_region; + } + + if (pdev) + SET_NETDEV_DEV(dev, &pdev->dev); + + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_info("%s at %#3lx,", chipname, ioaddr); + + /* In most chips, after a chip reset, the ethernet address is read from the + * station address PROM at the base address and programmed into the + * "Physical Address Registers" CSR12-14. + * As a precautionary measure, we read the PROM values and complain if + * they disagree with the CSRs. If they miscompare, and the PROM addr + * is valid, then the PROM addr is used. + */ + for (i = 0; i < 3; i++) { + unsigned int val; + val = a->read_csr(ioaddr, i + 12) & 0x0ffff; + /* There may be endianness issues here. */ + dev->dev_addr[2 * i] = val & 0x0ff; + dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff; + } + + /* read PROM address and compare with CSR address */ + for (i = 0; i < 6; i++) + promaddr[i] = inb(ioaddr + i); + + if (memcmp(promaddr, dev->dev_addr, 6) || + !is_valid_ether_addr(dev->dev_addr)) { + if (is_valid_ether_addr(promaddr)) { + if (pcnet32_debug & NETIF_MSG_PROBE) { + pr_cont(" warning: CSR address invalid,\n"); + pr_info(" using instead PROM address of"); + } + memcpy(dev->dev_addr, promaddr, 6); + } + } + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); + + /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */ + if (!is_valid_ether_addr(dev->perm_addr)) + memset(dev->dev_addr, 0, ETH_ALEN); + + if (pcnet32_debug & NETIF_MSG_PROBE) { + pr_cont(" %pM", dev->dev_addr); + + /* Version 0x2623 and 0x2624 */ + if (((chip_version + 1) & 0xfffe) == 0x2624) { + i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */ + pr_info(" tx_start_pt(0x%04x):", i); + switch (i >> 10) { + case 0: + pr_cont(" 20 bytes,"); + break; + case 1: + pr_cont(" 64 bytes,"); + break; + case 2: + pr_cont(" 128 bytes,"); + break; + case 3: + pr_cont("~220 bytes,"); + break; + } + i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */ + pr_cont(" BCR18(%x):", i & 0xffff); + if (i & (1 << 5)) + pr_cont("BurstWrEn "); + if (i & (1 << 6)) + pr_cont("BurstRdEn "); + if (i & (1 << 7)) + pr_cont("DWordIO "); + if (i & (1 << 11)) + pr_cont("NoUFlow "); + i = a->read_bcr(ioaddr, 25); + pr_info(" SRAMSIZE=0x%04x,", i << 8); + i = a->read_bcr(ioaddr, 26); + pr_cont(" SRAM_BND=0x%04x,", i << 8); + i = a->read_bcr(ioaddr, 27); + if (i & (1 << 14)) + pr_cont("LowLatRx"); + } + } + + dev->base_addr = ioaddr; + lp = netdev_priv(dev); + /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */ + lp->init_block = pci_alloc_consistent(pdev, sizeof(*lp->init_block), + &lp->init_dma_addr); + if (!lp->init_block) { + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_err("Consistent memory allocation failed\n"); + ret = -ENOMEM; + goto err_free_netdev; + } + lp->pci_dev = pdev; + + lp->dev = dev; + + spin_lock_init(&lp->lock); + + lp->name = chipname; + lp->shared_irq = shared; + lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */ + lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */ + lp->tx_mod_mask = lp->tx_ring_size - 1; + lp->rx_mod_mask = lp->rx_ring_size - 1; + lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12); + lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4); + lp->mii_if.full_duplex = fdx; + lp->mii_if.phy_id_mask = 0x1f; + lp->mii_if.reg_num_mask = 0x1f; + lp->dxsuflo = dxsuflo; + lp->mii = mii; + lp->chip_version = chip_version; + lp->msg_enable = pcnet32_debug; + if ((cards_found >= MAX_UNITS) || + (options[cards_found] >= sizeof(options_mapping))) + lp->options = PCNET32_PORT_ASEL; + else + lp->options = options_mapping[options[cards_found]]; + lp->mii_if.dev = dev; + lp->mii_if.mdio_read = mdio_read; + lp->mii_if.mdio_write = mdio_write; + + /* napi.weight is used in both the napi and non-napi cases */ + lp->napi.weight = lp->rx_ring_size / 2; + + netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2); + + if (fdx && !(lp->options & PCNET32_PORT_ASEL) && + ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) + lp->options |= PCNET32_PORT_FD; + + lp->a = *a; + + /* prior to register_netdev, dev->name is not yet correct */ + if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) { + ret = -ENOMEM; + goto err_free_ring; + } + /* detect special T1/E1 WAN card by checking for MAC address */ + if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 && + dev->dev_addr[2] == 0x75) + lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI; + + lp->init_block->mode = cpu_to_le16(0x0003); /* Disable Rx and Tx. */ + lp->init_block->tlen_rlen = + cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits); + for (i = 0; i < 6; i++) + lp->init_block->phys_addr[i] = dev->dev_addr[i]; + lp->init_block->filter[0] = 0x00000000; + lp->init_block->filter[1] = 0x00000000; + lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr); + lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr); + + /* switch pcnet32 to 32bit mode */ + a->write_bcr(ioaddr, 20, 2); + + a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); + a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16)); + + if (pdev) { /* use the IRQ provided by PCI */ + dev->irq = pdev->irq; + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_cont(" assigned IRQ %d\n", dev->irq); + } else { + unsigned long irq_mask = probe_irq_on(); + + /* + * To auto-IRQ we enable the initialization-done and DMA error + * interrupts. For ISA boards we get a DMA error, but VLB and PCI + * boards will work. + */ + /* Trigger an initialization just for the interrupt. */ + a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_INIT); + mdelay(1); + + dev->irq = probe_irq_off(irq_mask); + if (!dev->irq) { + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_cont(", failed to detect IRQ line\n"); + ret = -ENODEV; + goto err_free_ring; + } + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_cont(", probed IRQ %d\n", dev->irq); + } + + /* Set the mii phy_id so that we can query the link state */ + if (lp->mii) { + /* lp->phycount and lp->phymask are set to 0 by memset above */ + + lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f; + /* scan for PHYs */ + for (i = 0; i < PCNET32_MAX_PHYS; i++) { + unsigned short id1, id2; + + id1 = mdio_read(dev, i, MII_PHYSID1); + if (id1 == 0xffff) + continue; + id2 = mdio_read(dev, i, MII_PHYSID2); + if (id2 == 0xffff) + continue; + if (i == 31 && ((chip_version + 1) & 0xfffe) == 0x2624) + continue; /* 79C971 & 79C972 have phantom phy at id 31 */ + lp->phycount++; + lp->phymask |= (1 << i); + lp->mii_if.phy_id = i; + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_info("Found PHY %04x:%04x at address %d\n", + id1, id2, i); + } + lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5); + if (lp->phycount > 1) + lp->options |= PCNET32_PORT_MII; + } + + init_timer(&lp->watchdog_timer); + lp->watchdog_timer.data = (unsigned long)dev; + lp->watchdog_timer.function = (void *)&pcnet32_watchdog; + + /* The PCNET32-specific entries in the device structure. */ + dev->netdev_ops = &pcnet32_netdev_ops; + dev->ethtool_ops = &pcnet32_ethtool_ops; + dev->watchdog_timeo = (5 * HZ); + + /* Fill in the generic fields of the device structure. */ + if (register_netdev(dev)) + goto err_free_ring; + + if (pdev) { + pci_set_drvdata(pdev, dev); + } else { + lp->next = pcnet32_dev; + pcnet32_dev = dev; + } + + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_info("%s: registered as %s\n", dev->name, lp->name); + cards_found++; + + /* enable LED writes */ + a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000); + + return 0; + +err_free_ring: + pcnet32_free_ring(dev); + pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), + lp->init_block, lp->init_dma_addr); +err_free_netdev: + free_netdev(dev); +err_release_region: + release_region(ioaddr, PCNET32_TOTAL_SIZE); + return ret; +} + +/* if any allocation fails, caller must also call pcnet32_free_ring */ +static int pcnet32_alloc_ring(struct net_device *dev, const char *name) +{ + struct pcnet32_private *lp = netdev_priv(dev); + + lp->tx_ring = pci_alloc_consistent(lp->pci_dev, + sizeof(struct pcnet32_tx_head) * + lp->tx_ring_size, + &lp->tx_ring_dma_addr); + if (lp->tx_ring == NULL) { + netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); + return -ENOMEM; + } + + lp->rx_ring = pci_alloc_consistent(lp->pci_dev, + sizeof(struct pcnet32_rx_head) * + lp->rx_ring_size, + &lp->rx_ring_dma_addr); + if (lp->rx_ring == NULL) { + netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); + return -ENOMEM; + } + + lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t), + GFP_ATOMIC); + if (!lp->tx_dma_addr) { + netif_err(lp, drv, dev, "Memory allocation failed\n"); + return -ENOMEM; + } + + lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t), + GFP_ATOMIC); + if (!lp->rx_dma_addr) { + netif_err(lp, drv, dev, "Memory allocation failed\n"); + return -ENOMEM; + } + + lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *), + GFP_ATOMIC); + if (!lp->tx_skbuff) { + netif_err(lp, drv, dev, "Memory allocation failed\n"); + return -ENOMEM; + } + + lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *), + GFP_ATOMIC); + if (!lp->rx_skbuff) { + netif_err(lp, drv, dev, "Memory allocation failed\n"); + return -ENOMEM; + } + + return 0; +} + +static void pcnet32_free_ring(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + + kfree(lp->tx_skbuff); + lp->tx_skbuff = NULL; + + kfree(lp->rx_skbuff); + lp->rx_skbuff = NULL; + + kfree(lp->tx_dma_addr); + lp->tx_dma_addr = NULL; + + kfree(lp->rx_dma_addr); + lp->rx_dma_addr = NULL; + + if (lp->tx_ring) { + pci_free_consistent(lp->pci_dev, + sizeof(struct pcnet32_tx_head) * + lp->tx_ring_size, lp->tx_ring, + lp->tx_ring_dma_addr); + lp->tx_ring = NULL; + } + + if (lp->rx_ring) { + pci_free_consistent(lp->pci_dev, + sizeof(struct pcnet32_rx_head) * + lp->rx_ring_size, lp->rx_ring, + lp->rx_ring_dma_addr); + lp->rx_ring = NULL; + } +} + +static int pcnet32_open(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + struct pci_dev *pdev = lp->pci_dev; + unsigned long ioaddr = dev->base_addr; + u16 val; + int i; + int rc; + unsigned long flags; + + if (request_irq(dev->irq, pcnet32_interrupt, + lp->shared_irq ? IRQF_SHARED : 0, dev->name, + (void *)dev)) { + return -EAGAIN; + } + + spin_lock_irqsave(&lp->lock, flags); + /* Check for a valid station address */ + if (!is_valid_ether_addr(dev->dev_addr)) { + rc = -EINVAL; + goto err_free_irq; + } + + /* Reset the PCNET32 */ + lp->a.reset(ioaddr); + + /* switch pcnet32 to 32bit mode */ + lp->a.write_bcr(ioaddr, 20, 2); + + netif_printk(lp, ifup, KERN_DEBUG, dev, + "%s() irq %d tx/rx rings %#x/%#x init %#x\n", + __func__, dev->irq, (u32) (lp->tx_ring_dma_addr), + (u32) (lp->rx_ring_dma_addr), + (u32) (lp->init_dma_addr)); + + /* set/reset autoselect bit */ + val = lp->a.read_bcr(ioaddr, 2) & ~2; + if (lp->options & PCNET32_PORT_ASEL) + val |= 2; + lp->a.write_bcr(ioaddr, 2, val); + + /* handle full duplex setting */ + if (lp->mii_if.full_duplex) { + val = lp->a.read_bcr(ioaddr, 9) & ~3; + if (lp->options & PCNET32_PORT_FD) { + val |= 1; + if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI)) + val |= 2; + } else if (lp->options & PCNET32_PORT_ASEL) { + /* workaround of xSeries250, turn on for 79C975 only */ + if (lp->chip_version == 0x2627) + val |= 3; + } + lp->a.write_bcr(ioaddr, 9, val); + } + + /* set/reset GPSI bit in test register */ + val = lp->a.read_csr(ioaddr, 124) & ~0x10; + if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI) + val |= 0x10; + lp->a.write_csr(ioaddr, 124, val); + + /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */ + if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT && + (pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX || + pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) { + if (lp->options & PCNET32_PORT_ASEL) { + lp->options = PCNET32_PORT_FD | PCNET32_PORT_100; + netif_printk(lp, link, KERN_DEBUG, dev, + "Setting 100Mb-Full Duplex\n"); + } + } + if (lp->phycount < 2) { + /* + * 24 Jun 2004 according AMD, in order to change the PHY, + * DANAS (or DISPM for 79C976) must be set; then select the speed, + * duplex, and/or enable auto negotiation, and clear DANAS + */ + if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) { + lp->a.write_bcr(ioaddr, 32, + lp->a.read_bcr(ioaddr, 32) | 0x0080); + /* disable Auto Negotiation, set 10Mpbs, HD */ + val = lp->a.read_bcr(ioaddr, 32) & ~0xb8; + if (lp->options & PCNET32_PORT_FD) + val |= 0x10; + if (lp->options & PCNET32_PORT_100) + val |= 0x08; + lp->a.write_bcr(ioaddr, 32, val); + } else { + if (lp->options & PCNET32_PORT_ASEL) { + lp->a.write_bcr(ioaddr, 32, + lp->a.read_bcr(ioaddr, + 32) | 0x0080); + /* enable auto negotiate, setup, disable fd */ + val = lp->a.read_bcr(ioaddr, 32) & ~0x98; + val |= 0x20; + lp->a.write_bcr(ioaddr, 32, val); + } + } + } else { + int first_phy = -1; + u16 bmcr; + u32 bcr9; + struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET }; + + /* + * There is really no good other way to handle multiple PHYs + * other than turning off all automatics + */ + val = lp->a.read_bcr(ioaddr, 2); + lp->a.write_bcr(ioaddr, 2, val & ~2); + val = lp->a.read_bcr(ioaddr, 32); + lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */ + + if (!(lp->options & PCNET32_PORT_ASEL)) { + /* setup ecmd */ + ecmd.port = PORT_MII; + ecmd.transceiver = XCVR_INTERNAL; + ecmd.autoneg = AUTONEG_DISABLE; + ethtool_cmd_speed_set(&ecmd, + (lp->options & PCNET32_PORT_100) ? + SPEED_100 : SPEED_10); + bcr9 = lp->a.read_bcr(ioaddr, 9); + + if (lp->options & PCNET32_PORT_FD) { + ecmd.duplex = DUPLEX_FULL; + bcr9 |= (1 << 0); + } else { + ecmd.duplex = DUPLEX_HALF; + bcr9 |= ~(1 << 0); + } + lp->a.write_bcr(ioaddr, 9, bcr9); + } + + for (i = 0; i < PCNET32_MAX_PHYS; i++) { + if (lp->phymask & (1 << i)) { + /* isolate all but the first PHY */ + bmcr = mdio_read(dev, i, MII_BMCR); + if (first_phy == -1) { + first_phy = i; + mdio_write(dev, i, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + } else { + mdio_write(dev, i, MII_BMCR, + bmcr | BMCR_ISOLATE); + } + /* use mii_ethtool_sset to setup PHY */ + lp->mii_if.phy_id = i; + ecmd.phy_address = i; + if (lp->options & PCNET32_PORT_ASEL) { + mii_ethtool_gset(&lp->mii_if, &ecmd); + ecmd.autoneg = AUTONEG_ENABLE; + } + mii_ethtool_sset(&lp->mii_if, &ecmd); + } + } + lp->mii_if.phy_id = first_phy; + netif_info(lp, link, dev, "Using PHY number %d\n", first_phy); + } + +#ifdef DO_DXSUFLO + if (lp->dxsuflo) { /* Disable transmit stop on underflow */ + val = lp->a.read_csr(ioaddr, CSR3); + val |= 0x40; + lp->a.write_csr(ioaddr, CSR3, val); + } +#endif + + lp->init_block->mode = + cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7); + pcnet32_load_multicast(dev); + + if (pcnet32_init_ring(dev)) { + rc = -ENOMEM; + goto err_free_ring; + } + + napi_enable(&lp->napi); + + /* Re-initialize the PCNET32, and start it when done. */ + lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); + lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16)); + + lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ + lp->a.write_csr(ioaddr, CSR0, CSR0_INIT); + + netif_start_queue(dev); + + if (lp->chip_version >= PCNET32_79C970A) { + /* Print the link status and start the watchdog */ + pcnet32_check_media(dev, 1); + mod_timer(&lp->watchdog_timer, PCNET32_WATCHDOG_TIMEOUT); + } + + i = 0; + while (i++ < 100) + if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON) + break; + /* + * We used to clear the InitDone bit, 0x0100, here but Mark Stockton + * reports that doing so triggers a bug in the '974. + */ + lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL); + + netif_printk(lp, ifup, KERN_DEBUG, dev, + "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n", + i, + (u32) (lp->init_dma_addr), + lp->a.read_csr(ioaddr, CSR0)); + + spin_unlock_irqrestore(&lp->lock, flags); + + return 0; /* Always succeed */ + +err_free_ring: + /* free any allocated skbuffs */ + pcnet32_purge_rx_ring(dev); + + /* + * Switch back to 16bit mode to avoid problems with dumb + * DOS packet driver after a warm reboot + */ + lp->a.write_bcr(ioaddr, 20, 4); + +err_free_irq: + spin_unlock_irqrestore(&lp->lock, flags); + free_irq(dev->irq, dev); + return rc; +} + +/* + * The LANCE has been halted for one reason or another (busmaster memory + * arbitration error, Tx FIFO underflow, driver stopped it to reconfigure, + * etc.). Modern LANCE variants always reload their ring-buffer + * configuration when restarted, so we must reinitialize our ring + * context before restarting. As part of this reinitialization, + * find all packets still on the Tx ring and pretend that they had been + * sent (in effect, drop the packets on the floor) - the higher-level + * protocols will time out and retransmit. It'd be better to shuffle + * these skbs to a temp list and then actually re-Tx them after + * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com + */ + +static void pcnet32_purge_tx_ring(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + int i; + + for (i = 0; i < lp->tx_ring_size; i++) { + lp->tx_ring[i].status = 0; /* CPU owns buffer */ + wmb(); /* Make sure adapter sees owner change */ + if (lp->tx_skbuff[i]) { + pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], + lp->tx_skbuff[i]->len, + PCI_DMA_TODEVICE); + dev_kfree_skb_any(lp->tx_skbuff[i]); + } + lp->tx_skbuff[i] = NULL; + lp->tx_dma_addr[i] = 0; + } +} + +/* Initialize the PCNET32 Rx and Tx rings. */ +static int pcnet32_init_ring(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + int i; + + lp->tx_full = 0; + lp->cur_rx = lp->cur_tx = 0; + lp->dirty_rx = lp->dirty_tx = 0; + + for (i = 0; i < lp->rx_ring_size; i++) { + struct sk_buff *rx_skbuff = lp->rx_skbuff[i]; + if (rx_skbuff == NULL) { + lp->rx_skbuff[i] = dev_alloc_skb(PKT_BUF_SKB); + rx_skbuff = lp->rx_skbuff[i]; + if (!rx_skbuff) { + /* there is not much we can do at this point */ + netif_err(lp, drv, dev, "%s dev_alloc_skb failed\n", + __func__); + return -1; + } + skb_reserve(rx_skbuff, NET_IP_ALIGN); + } + + rmb(); + if (lp->rx_dma_addr[i] == 0) + lp->rx_dma_addr[i] = + pci_map_single(lp->pci_dev, rx_skbuff->data, + PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); + lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]); + lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE); + wmb(); /* Make sure owner changes after all others are visible */ + lp->rx_ring[i].status = cpu_to_le16(0x8000); + } + /* The Tx buffer address is filled in as needed, but we do need to clear + * the upper ownership bit. */ + for (i = 0; i < lp->tx_ring_size; i++) { + lp->tx_ring[i].status = 0; /* CPU owns buffer */ + wmb(); /* Make sure adapter sees owner change */ + lp->tx_ring[i].base = 0; + lp->tx_dma_addr[i] = 0; + } + + lp->init_block->tlen_rlen = + cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits); + for (i = 0; i < 6; i++) + lp->init_block->phys_addr[i] = dev->dev_addr[i]; + lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr); + lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr); + wmb(); /* Make sure all changes are visible */ + return 0; +} + +/* the pcnet32 has been issued a stop or reset. Wait for the stop bit + * then flush the pending transmit operations, re-initialize the ring, + * and tell the chip to initialize. + */ +static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + int i; + + /* wait for stop */ + for (i = 0; i < 100; i++) + if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP) + break; + + if (i >= 100) + netif_err(lp, drv, dev, "%s timed out waiting for stop\n", + __func__); + + pcnet32_purge_tx_ring(dev); + if (pcnet32_init_ring(dev)) + return; + + /* ReInit Ring */ + lp->a.write_csr(ioaddr, CSR0, CSR0_INIT); + i = 0; + while (i++ < 1000) + if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON) + break; + + lp->a.write_csr(ioaddr, CSR0, csr0_bits); +} + +static void pcnet32_tx_timeout(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr, flags; + + spin_lock_irqsave(&lp->lock, flags); + /* Transmitter timeout, serious problems. */ + if (pcnet32_debug & NETIF_MSG_DRV) + pr_err("%s: transmit timed out, status %4.4x, resetting\n", + dev->name, lp->a.read_csr(ioaddr, CSR0)); + lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); + dev->stats.tx_errors++; + if (netif_msg_tx_err(lp)) { + int i; + printk(KERN_DEBUG + " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.", + lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "", + lp->cur_rx); + for (i = 0; i < lp->rx_ring_size; i++) + printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", + le32_to_cpu(lp->rx_ring[i].base), + (-le16_to_cpu(lp->rx_ring[i].buf_length)) & + 0xffff, le32_to_cpu(lp->rx_ring[i].msg_length), + le16_to_cpu(lp->rx_ring[i].status)); + for (i = 0; i < lp->tx_ring_size; i++) + printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", + le32_to_cpu(lp->tx_ring[i].base), + (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff, + le32_to_cpu(lp->tx_ring[i].misc), + le16_to_cpu(lp->tx_ring[i].status)); + printk("\n"); + } + pcnet32_restart(dev, CSR0_NORMAL); + + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue(dev); + + spin_unlock_irqrestore(&lp->lock, flags); +} + +static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + u16 status; + int entry; + unsigned long flags; + + spin_lock_irqsave(&lp->lock, flags); + + netif_printk(lp, tx_queued, KERN_DEBUG, dev, + "%s() called, csr0 %4.4x\n", + __func__, lp->a.read_csr(ioaddr, CSR0)); + + /* Default status -- will not enable Successful-TxDone + * interrupt when that option is available to us. + */ + status = 0x8300; + + /* Fill in a Tx ring entry */ + + /* Mask to ring buffer boundary. */ + entry = lp->cur_tx & lp->tx_mod_mask; + + /* Caution: the write order is important here, set the status + * with the "ownership" bits last. */ + + lp->tx_ring[entry].length = cpu_to_le16(-skb->len); + + lp->tx_ring[entry].misc = 0x00000000; + + lp->tx_skbuff[entry] = skb; + lp->tx_dma_addr[entry] = + pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); + lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]); + wmb(); /* Make sure owner changes after all others are visible */ + lp->tx_ring[entry].status = cpu_to_le16(status); + + lp->cur_tx++; + dev->stats.tx_bytes += skb->len; + + /* Trigger an immediate send poll. */ + lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL); + + if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) { + lp->tx_full = 1; + netif_stop_queue(dev); + } + spin_unlock_irqrestore(&lp->lock, flags); + return NETDEV_TX_OK; +} + +/* The PCNET32 interrupt handler. */ +static irqreturn_t +pcnet32_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct pcnet32_private *lp; + unsigned long ioaddr; + u16 csr0; + int boguscnt = max_interrupt_work; + + ioaddr = dev->base_addr; + lp = netdev_priv(dev); + + spin_lock(&lp->lock); + + csr0 = lp->a.read_csr(ioaddr, CSR0); + while ((csr0 & 0x8f00) && --boguscnt >= 0) { + if (csr0 == 0xffff) + break; /* PCMCIA remove happened */ + /* Acknowledge all of the current interrupt sources ASAP. */ + lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f); + + netif_printk(lp, intr, KERN_DEBUG, dev, + "interrupt csr0=%#2.2x new csr=%#2.2x\n", + csr0, lp->a.read_csr(ioaddr, CSR0)); + + /* Log misc errors. */ + if (csr0 & 0x4000) + dev->stats.tx_errors++; /* Tx babble. */ + if (csr0 & 0x1000) { + /* + * This happens when our receive ring is full. This + * shouldn't be a problem as we will see normal rx + * interrupts for the frames in the receive ring. But + * there are some PCI chipsets (I can reproduce this + * on SP3G with Intel saturn chipset) which have + * sometimes problems and will fill up the receive + * ring with error descriptors. In this situation we + * don't get a rx interrupt, but a missed frame + * interrupt sooner or later. + */ + dev->stats.rx_errors++; /* Missed a Rx frame. */ + } + if (csr0 & 0x0800) { + netif_err(lp, drv, dev, "Bus master arbitration failure, status %4.4x\n", + csr0); + /* unlike for the lance, there is no restart needed */ + } + if (napi_schedule_prep(&lp->napi)) { + u16 val; + /* set interrupt masks */ + val = lp->a.read_csr(ioaddr, CSR3); + val |= 0x5f00; + lp->a.write_csr(ioaddr, CSR3, val); + + __napi_schedule(&lp->napi); + break; + } + csr0 = lp->a.read_csr(ioaddr, CSR0); + } + + netif_printk(lp, intr, KERN_DEBUG, dev, + "exiting interrupt, csr0=%#4.4x\n", + lp->a.read_csr(ioaddr, CSR0)); + + spin_unlock(&lp->lock); + + return IRQ_HANDLED; +} + +static int pcnet32_close(struct net_device *dev) +{ + unsigned long ioaddr = dev->base_addr; + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long flags; + + del_timer_sync(&lp->watchdog_timer); + + netif_stop_queue(dev); + napi_disable(&lp->napi); + + spin_lock_irqsave(&lp->lock, flags); + + dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112); + + netif_printk(lp, ifdown, KERN_DEBUG, dev, + "Shutting down ethercard, status was %2.2x\n", + lp->a.read_csr(ioaddr, CSR0)); + + /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */ + lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); + + /* + * Switch back to 16bit mode to avoid problems with dumb + * DOS packet driver after a warm reboot + */ + lp->a.write_bcr(ioaddr, 20, 4); + + spin_unlock_irqrestore(&lp->lock, flags); + + free_irq(dev->irq, dev); + + spin_lock_irqsave(&lp->lock, flags); + + pcnet32_purge_rx_ring(dev); + pcnet32_purge_tx_ring(dev); + + spin_unlock_irqrestore(&lp->lock, flags); + + return 0; +} + +static struct net_device_stats *pcnet32_get_stats(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + unsigned long flags; + + spin_lock_irqsave(&lp->lock, flags); + dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112); + spin_unlock_irqrestore(&lp->lock, flags); + + return &dev->stats; +} + +/* taken from the sunlance driver, which it took from the depca driver */ +static void pcnet32_load_multicast(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + volatile struct pcnet32_init_block *ib = lp->init_block; + volatile __le16 *mcast_table = (__le16 *)ib->filter; + struct netdev_hw_addr *ha; + unsigned long ioaddr = dev->base_addr; + int i; + u32 crc; + + /* set all multicast bits */ + if (dev->flags & IFF_ALLMULTI) { + ib->filter[0] = cpu_to_le32(~0U); + ib->filter[1] = cpu_to_le32(~0U); + lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff); + lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff); + lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff); + lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff); + return; + } + /* clear the multicast filter */ + ib->filter[0] = 0; + ib->filter[1] = 0; + + /* Add addresses */ + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(6, ha->addr); + crc = crc >> 26; + mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf)); + } + for (i = 0; i < 4; i++) + lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i, + le16_to_cpu(mcast_table[i])); +} + +/* + * Set or clear the multicast filter for this adaptor. + */ +static void pcnet32_set_multicast_list(struct net_device *dev) +{ + unsigned long ioaddr = dev->base_addr, flags; + struct pcnet32_private *lp = netdev_priv(dev); + int csr15, suspended; + + spin_lock_irqsave(&lp->lock, flags); + suspended = pcnet32_suspend(dev, &flags, 0); + csr15 = lp->a.read_csr(ioaddr, CSR15); + if (dev->flags & IFF_PROMISC) { + /* Log any net taps. */ + netif_info(lp, hw, dev, "Promiscuous mode enabled\n"); + lp->init_block->mode = + cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << + 7); + lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000); + } else { + lp->init_block->mode = + cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7); + lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff); + pcnet32_load_multicast(dev); + } + + if (suspended) { + int csr5; + /* clear SUSPEND (SPND) - CSR5 bit 0 */ + csr5 = lp->a.read_csr(ioaddr, CSR5); + lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND)); + } else { + lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); + pcnet32_restart(dev, CSR0_NORMAL); + netif_wake_queue(dev); + } + + spin_unlock_irqrestore(&lp->lock, flags); +} + +/* This routine assumes that the lp->lock is held */ +static int mdio_read(struct net_device *dev, int phy_id, int reg_num) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + u16 val_out; + + if (!lp->mii) + return 0; + + lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); + val_out = lp->a.read_bcr(ioaddr, 34); + + return val_out; +} + +/* This routine assumes that the lp->lock is held */ +static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + + if (!lp->mii) + return; + + lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); + lp->a.write_bcr(ioaddr, 34, val); +} + +static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct pcnet32_private *lp = netdev_priv(dev); + int rc; + unsigned long flags; + + /* SIOC[GS]MIIxxx ioctls */ + if (lp->mii) { + spin_lock_irqsave(&lp->lock, flags); + rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL); + spin_unlock_irqrestore(&lp->lock, flags); + } else { + rc = -EOPNOTSUPP; + } + + return rc; +} + +static int pcnet32_check_otherphy(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + struct mii_if_info mii = lp->mii_if; + u16 bmcr; + int i; + + for (i = 0; i < PCNET32_MAX_PHYS; i++) { + if (i == lp->mii_if.phy_id) + continue; /* skip active phy */ + if (lp->phymask & (1 << i)) { + mii.phy_id = i; + if (mii_link_ok(&mii)) { + /* found PHY with active link */ + netif_info(lp, link, dev, "Using PHY number %d\n", + i); + + /* isolate inactive phy */ + bmcr = + mdio_read(dev, lp->mii_if.phy_id, MII_BMCR); + mdio_write(dev, lp->mii_if.phy_id, MII_BMCR, + bmcr | BMCR_ISOLATE); + + /* de-isolate new phy */ + bmcr = mdio_read(dev, i, MII_BMCR); + mdio_write(dev, i, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + + /* set new phy address */ + lp->mii_if.phy_id = i; + return 1; + } + } + } + return 0; +} + +/* + * Show the status of the media. Similar to mii_check_media however it + * correctly shows the link speed for all (tested) pcnet32 variants. + * Devices with no mii just report link state without speed. + * + * Caller is assumed to hold and release the lp->lock. + */ + +static void pcnet32_check_media(struct net_device *dev, int verbose) +{ + struct pcnet32_private *lp = netdev_priv(dev); + int curr_link; + int prev_link = netif_carrier_ok(dev) ? 1 : 0; + u32 bcr9; + + if (lp->mii) { + curr_link = mii_link_ok(&lp->mii_if); + } else { + ulong ioaddr = dev->base_addr; /* card base I/O address */ + curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0); + } + if (!curr_link) { + if (prev_link || verbose) { + netif_carrier_off(dev); + netif_info(lp, link, dev, "link down\n"); + } + if (lp->phycount > 1) { + curr_link = pcnet32_check_otherphy(dev); + prev_link = 0; + } + } else if (verbose || !prev_link) { + netif_carrier_on(dev); + if (lp->mii) { + if (netif_msg_link(lp)) { + struct ethtool_cmd ecmd = { + .cmd = ETHTOOL_GSET }; + mii_ethtool_gset(&lp->mii_if, &ecmd); + netdev_info(dev, "link up, %uMbps, %s-duplex\n", + ethtool_cmd_speed(&ecmd), + (ecmd.duplex == DUPLEX_FULL) + ? "full" : "half"); + } + bcr9 = lp->a.read_bcr(dev->base_addr, 9); + if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) { + if (lp->mii_if.full_duplex) + bcr9 |= (1 << 0); + else + bcr9 &= ~(1 << 0); + lp->a.write_bcr(dev->base_addr, 9, bcr9); + } + } else { + netif_info(lp, link, dev, "link up\n"); + } + } +} + +/* + * Check for loss of link and link establishment. + * Can not use mii_check_media because it does nothing if mode is forced. + */ + +static void pcnet32_watchdog(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long flags; + + /* Print the link status if it has changed */ + spin_lock_irqsave(&lp->lock, flags); + pcnet32_check_media(dev, 0); + spin_unlock_irqrestore(&lp->lock, flags); + + mod_timer(&lp->watchdog_timer, round_jiffies(PCNET32_WATCHDOG_TIMEOUT)); +} + +static int pcnet32_pm_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + if (netif_running(dev)) { + netif_device_detach(dev); + pcnet32_close(dev); + } + pci_save_state(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + return 0; +} + +static int pcnet32_pm_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + if (netif_running(dev)) { + pcnet32_open(dev); + netif_device_attach(dev); + } + return 0; +} + +static void __devexit pcnet32_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + if (dev) { + struct pcnet32_private *lp = netdev_priv(dev); + + unregister_netdev(dev); + pcnet32_free_ring(dev); + release_region(dev->base_addr, PCNET32_TOTAL_SIZE); + pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), + lp->init_block, lp->init_dma_addr); + free_netdev(dev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + } +} + +static struct pci_driver pcnet32_driver = { + .name = DRV_NAME, + .probe = pcnet32_probe_pci, + .remove = __devexit_p(pcnet32_remove_one), + .id_table = pcnet32_pci_tbl, + .suspend = pcnet32_pm_suspend, + .resume = pcnet32_pm_resume, +}; + +/* An additional parameter that may be passed in... */ +static int debug = -1; +static int tx_start_pt = -1; +static int pcnet32_have_pci; + +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, DRV_NAME " debug level"); +module_param(max_interrupt_work, int, 0); +MODULE_PARM_DESC(max_interrupt_work, + DRV_NAME " maximum events handled per interrupt"); +module_param(rx_copybreak, int, 0); +MODULE_PARM_DESC(rx_copybreak, + DRV_NAME " copy breakpoint for copy-only-tiny-frames"); +module_param(tx_start_pt, int, 0); +MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)"); +module_param(pcnet32vlb, int, 0); +MODULE_PARM_DESC(pcnet32vlb, DRV_NAME " Vesa local bus (VLB) support (0/1)"); +module_param_array(options, int, NULL, 0); +MODULE_PARM_DESC(options, DRV_NAME " initial option setting(s) (0-15)"); +module_param_array(full_duplex, int, NULL, 0); +MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)"); +/* Module Parameter for HomePNA cards added by Patrick Simmons, 2004 */ +module_param_array(homepna, int, NULL, 0); +MODULE_PARM_DESC(homepna, + DRV_NAME + " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet"); + +MODULE_AUTHOR("Thomas Bogendoerfer"); +MODULE_DESCRIPTION("Driver for PCnet32 and PCnetPCI based ethercards"); +MODULE_LICENSE("GPL"); + +#define PCNET32_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) + +static int __init pcnet32_init_module(void) +{ + pr_info("%s", version); + + pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT); + + if ((tx_start_pt >= 0) && (tx_start_pt <= 3)) + tx_start = tx_start_pt; + + /* find the PCI devices */ + if (!pci_register_driver(&pcnet32_driver)) + pcnet32_have_pci = 1; + + /* should we find any remaining VLbus devices ? */ + if (pcnet32vlb) + pcnet32_probe_vlbus(pcnet32_portlist); + + if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE)) + pr_info("%d cards_found\n", cards_found); + + return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV; +} + +static void __exit pcnet32_cleanup_module(void) +{ + struct net_device *next_dev; + + while (pcnet32_dev) { + struct pcnet32_private *lp = netdev_priv(pcnet32_dev); + next_dev = lp->next; + unregister_netdev(pcnet32_dev); + pcnet32_free_ring(pcnet32_dev); + release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE); + pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), + lp->init_block, lp->init_dma_addr); + free_netdev(pcnet32_dev); + pcnet32_dev = next_dev; + } + + if (pcnet32_have_pci) + pci_unregister_driver(&pcnet32_driver); +} + +module_init(pcnet32_init_module); +module_exit(pcnet32_cleanup_module); + +/* + * Local variables: + * c-indent-level: 4 + * tab-width: 8 + * End: + */ diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c new file mode 100644 index 0000000..7d9ec23 --- /dev/null +++ b/drivers/net/ethernet/amd/sun3lance.c @@ -0,0 +1,961 @@ +/* sun3lance.c: Ethernet driver for SUN3 Lance chip */ +/* + + Sun3 Lance ethernet driver, by Sam Creasey (sammy@users.qual.net). + This driver is a part of the linux kernel, and is thus distributed + under the GNU General Public License. + + The values used in LANCE_OBIO and LANCE_IRQ seem to be empirically + true for the correct IRQ and address of the lance registers. They + have not been widely tested, however. What we probably need is a + "proper" way to search for a device in the sun3's prom, but, alas, + linux has no such thing. + + This driver is largely based on atarilance.c, by Roman Hodek. Other + sources of inspiration were the NetBSD sun3 am7990 driver, and the + linux sparc lance driver (sunlance.c). + + There are more assumptions made throughout this driver, it almost + certainly still needs work, but it does work at least for RARP/BOOTP and + mounting the root NFS filesystem. + +*/ + +static char *version = "sun3lance.c: v1.2 1/12/2001 Sam Creasey (sammy@sammy.net)\n"; + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_SUN3 +#include +#else +#include +#endif + +/* sun3/60 addr/irq for the lance chip. If your sun is different, + change this. */ +#define LANCE_OBIO 0x120000 +#define LANCE_IRQ IRQ_AUTO_3 + +/* Debug level: + * 0 = silent, print only serious errors + * 1 = normal, print error messages + * 2 = debug, print debug infos + * 3 = debug, print even more debug infos (packet data) + */ + +#define LANCE_DEBUG 0 + +#ifdef LANCE_DEBUG +static int lance_debug = LANCE_DEBUG; +#else +static int lance_debug = 1; +#endif +module_param(lance_debug, int, 0); +MODULE_PARM_DESC(lance_debug, "SUN3 Lance debug level (0-3)"); +MODULE_LICENSE("GPL"); + +#define DPRINTK(n,a) \ + do { \ + if (lance_debug >= n) \ + printk a; \ + } while( 0 ) + + +/* we're only using 32k of memory, so we use 4 TX + buffers and 16 RX buffers. These values are expressed as log2. */ + +#define TX_LOG_RING_SIZE 3 +#define RX_LOG_RING_SIZE 5 + +/* These are the derived values */ + +#define TX_RING_SIZE (1 << TX_LOG_RING_SIZE) +#define TX_RING_LEN_BITS (TX_LOG_RING_SIZE << 5) +#define TX_RING_MOD_MASK (TX_RING_SIZE - 1) + +#define RX_RING_SIZE (1 << RX_LOG_RING_SIZE) +#define RX_RING_LEN_BITS (RX_LOG_RING_SIZE << 5) +#define RX_RING_MOD_MASK (RX_RING_SIZE - 1) + +/* Definitions for packet buffer access: */ +#define PKT_BUF_SZ 1544 + +/* Get the address of a packet buffer corresponding to a given buffer head */ +#define PKTBUF_ADDR(head) (void *)((unsigned long)(MEM) | (head)->base) + + +/* The LANCE Rx and Tx ring descriptors. */ +struct lance_rx_head { + unsigned short base; /* Low word of base addr */ + volatile unsigned char flag; + unsigned char base_hi; /* High word of base addr (unused) */ + short buf_length; /* This length is 2s complement! */ + volatile short msg_length; /* This length is "normal". */ +}; + +struct lance_tx_head { + unsigned short base; /* Low word of base addr */ + volatile unsigned char flag; + unsigned char base_hi; /* High word of base addr (unused) */ + short length; /* Length is 2s complement! */ + volatile short misc; +}; + +/* The LANCE initialization block, described in databook. */ +struct lance_init_block { + unsigned short mode; /* Pre-set mode */ + unsigned char hwaddr[6]; /* Physical ethernet address */ + unsigned int filter[2]; /* Multicast filter (unused). */ + /* Receive and transmit ring base, along with length bits. */ + unsigned short rdra; + unsigned short rlen; + unsigned short tdra; + unsigned short tlen; + unsigned short pad[4]; /* is thie needed? */ +}; + +/* The whole layout of the Lance shared memory */ +struct lance_memory { + struct lance_init_block init; + struct lance_tx_head tx_head[TX_RING_SIZE]; + struct lance_rx_head rx_head[RX_RING_SIZE]; + char rx_data[RX_RING_SIZE][PKT_BUF_SZ]; + char tx_data[TX_RING_SIZE][PKT_BUF_SZ]; +}; + +/* The driver's private device structure */ + +struct lance_private { + volatile unsigned short *iobase; + struct lance_memory *mem; + int new_rx, new_tx; /* The next free ring entry */ + int old_tx, old_rx; /* ring entry to be processed */ +/* These two must be longs for set_bit() */ + long tx_full; + long lock; +}; + +/* I/O register access macros */ + +#define MEM lp->mem +#define DREG lp->iobase[0] +#define AREG lp->iobase[1] +#define REGA(a) (*( AREG = (a), &DREG )) + +/* Definitions for the Lance */ + +/* tx_head flags */ +#define TMD1_ENP 0x01 /* end of packet */ +#define TMD1_STP 0x02 /* start of packet */ +#define TMD1_DEF 0x04 /* deferred */ +#define TMD1_ONE 0x08 /* one retry needed */ +#define TMD1_MORE 0x10 /* more than one retry needed */ +#define TMD1_ERR 0x40 /* error summary */ +#define TMD1_OWN 0x80 /* ownership (set: chip owns) */ + +#define TMD1_OWN_CHIP TMD1_OWN +#define TMD1_OWN_HOST 0 + +/* tx_head misc field */ +#define TMD3_TDR 0x03FF /* Time Domain Reflectometry counter */ +#define TMD3_RTRY 0x0400 /* failed after 16 retries */ +#define TMD3_LCAR 0x0800 /* carrier lost */ +#define TMD3_LCOL 0x1000 /* late collision */ +#define TMD3_UFLO 0x4000 /* underflow (late memory) */ +#define TMD3_BUFF 0x8000 /* buffering error (no ENP) */ + +/* rx_head flags */ +#define RMD1_ENP 0x01 /* end of packet */ +#define RMD1_STP 0x02 /* start of packet */ +#define RMD1_BUFF 0x04 /* buffer error */ +#define RMD1_CRC 0x08 /* CRC error */ +#define RMD1_OFLO 0x10 /* overflow */ +#define RMD1_FRAM 0x20 /* framing error */ +#define RMD1_ERR 0x40 /* error summary */ +#define RMD1_OWN 0x80 /* ownership (set: ship owns) */ + +#define RMD1_OWN_CHIP RMD1_OWN +#define RMD1_OWN_HOST 0 + +/* register names */ +#define CSR0 0 /* mode/status */ +#define CSR1 1 /* init block addr (low) */ +#define CSR2 2 /* init block addr (high) */ +#define CSR3 3 /* misc */ +#define CSR8 8 /* address filter */ +#define CSR15 15 /* promiscuous mode */ + +/* CSR0 */ +/* (R=readable, W=writeable, S=set on write, C=clear on write) */ +#define CSR0_INIT 0x0001 /* initialize (RS) */ +#define CSR0_STRT 0x0002 /* start (RS) */ +#define CSR0_STOP 0x0004 /* stop (RS) */ +#define CSR0_TDMD 0x0008 /* transmit demand (RS) */ +#define CSR0_TXON 0x0010 /* transmitter on (R) */ +#define CSR0_RXON 0x0020 /* receiver on (R) */ +#define CSR0_INEA 0x0040 /* interrupt enable (RW) */ +#define CSR0_INTR 0x0080 /* interrupt active (R) */ +#define CSR0_IDON 0x0100 /* initialization done (RC) */ +#define CSR0_TINT 0x0200 /* transmitter interrupt (RC) */ +#define CSR0_RINT 0x0400 /* receiver interrupt (RC) */ +#define CSR0_MERR 0x0800 /* memory error (RC) */ +#define CSR0_MISS 0x1000 /* missed frame (RC) */ +#define CSR0_CERR 0x2000 /* carrier error (no heartbeat :-) (RC) */ +#define CSR0_BABL 0x4000 /* babble: tx-ed too many bits (RC) */ +#define CSR0_ERR 0x8000 /* error (RC) */ + +/* CSR3 */ +#define CSR3_BCON 0x0001 /* byte control */ +#define CSR3_ACON 0x0002 /* ALE control */ +#define CSR3_BSWP 0x0004 /* byte swap (1=big endian) */ + +/***************************** Prototypes *****************************/ + +static int lance_probe( struct net_device *dev); +static int lance_open( struct net_device *dev ); +static void lance_init_ring( struct net_device *dev ); +static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ); +static irqreturn_t lance_interrupt( int irq, void *dev_id); +static int lance_rx( struct net_device *dev ); +static int lance_close( struct net_device *dev ); +static void set_multicast_list( struct net_device *dev ); + +/************************* End of Prototypes **************************/ + +struct net_device * __init sun3lance_probe(int unit) +{ + struct net_device *dev; + static int found; + int err = -ENODEV; + + if (!MACH_IS_SUN3 && !MACH_IS_SUN3X) + return ERR_PTR(-ENODEV); + + /* check that this machine has an onboard lance */ + switch(idprom->id_machtype) { + case SM_SUN3|SM_3_50: + case SM_SUN3|SM_3_60: + case SM_SUN3X|SM_3_80: + /* these machines have lance */ + break; + + default: + return ERR_PTR(-ENODEV); + } + + if (found) + return ERR_PTR(-ENODEV); + + dev = alloc_etherdev(sizeof(struct lance_private)); + if (!dev) + return ERR_PTR(-ENOMEM); + if (unit >= 0) { + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + } + + if (!lance_probe(dev)) + goto out; + + err = register_netdev(dev); + if (err) + goto out1; + found = 1; + return dev; + +out1: +#ifdef CONFIG_SUN3 + iounmap((void __iomem *)dev->base_addr); +#endif +out: + free_netdev(dev); + return ERR_PTR(err); +} + +static const struct net_device_ops lance_netdev_ops = { + .ndo_open = lance_open, + .ndo_stop = lance_close, + .ndo_start_xmit = lance_start_xmit, + .ndo_set_multicast_list = set_multicast_list, + .ndo_set_mac_address = NULL, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, +}; + +static int __init lance_probe( struct net_device *dev) +{ + unsigned long ioaddr; + + struct lance_private *lp; + int i; + static int did_version; + volatile unsigned short *ioaddr_probe; + unsigned short tmp1, tmp2; + +#ifdef CONFIG_SUN3 + ioaddr = (unsigned long)ioremap(LANCE_OBIO, PAGE_SIZE); + if (!ioaddr) + return 0; +#else + ioaddr = SUN3X_LANCE; +#endif + + /* test to see if there's really a lance here */ + /* (CSRO_INIT shouldn't be readable) */ + + ioaddr_probe = (volatile unsigned short *)ioaddr; + tmp1 = ioaddr_probe[0]; + tmp2 = ioaddr_probe[1]; + + ioaddr_probe[1] = CSR0; + ioaddr_probe[0] = CSR0_INIT | CSR0_STOP; + + if(ioaddr_probe[0] != CSR0_STOP) { + ioaddr_probe[0] = tmp1; + ioaddr_probe[1] = tmp2; + +#ifdef CONFIG_SUN3 + iounmap((void __iomem *)ioaddr); +#endif + return 0; + } + + lp = netdev_priv(dev); + + /* XXX - leak? */ + MEM = dvma_malloc_align(sizeof(struct lance_memory), 0x10000); + if (MEM == NULL) { +#ifdef CONFIG_SUN3 + iounmap((void __iomem *)ioaddr); +#endif + printk(KERN_WARNING "SUN3 Lance couldn't allocate DVMA memory\n"); + return 0; + } + + lp->iobase = (volatile unsigned short *)ioaddr; + dev->base_addr = (unsigned long)ioaddr; /* informational only */ + + REGA(CSR0) = CSR0_STOP; + + if (request_irq(LANCE_IRQ, lance_interrupt, IRQF_DISABLED, "SUN3 Lance", dev) < 0) { +#ifdef CONFIG_SUN3 + iounmap((void __iomem *)ioaddr); +#endif + dvma_free((void *)MEM); + printk(KERN_WARNING "SUN3 Lance unable to allocate IRQ\n"); + return 0; + } + dev->irq = (unsigned short)LANCE_IRQ; + + + printk("%s: SUN3 Lance at io %#lx, mem %#lx, irq %d, hwaddr ", + dev->name, + (unsigned long)ioaddr, + (unsigned long)MEM, + dev->irq); + + /* copy in the ethernet address from the prom */ + for(i = 0; i < 6 ; i++) + dev->dev_addr[i] = idprom->id_ethaddr[i]; + + /* tell the card it's ether address, bytes swapped */ + MEM->init.hwaddr[0] = dev->dev_addr[1]; + MEM->init.hwaddr[1] = dev->dev_addr[0]; + MEM->init.hwaddr[2] = dev->dev_addr[3]; + MEM->init.hwaddr[3] = dev->dev_addr[2]; + MEM->init.hwaddr[4] = dev->dev_addr[5]; + MEM->init.hwaddr[5] = dev->dev_addr[4]; + + printk("%pM\n", dev->dev_addr); + + MEM->init.mode = 0x0000; + MEM->init.filter[0] = 0x00000000; + MEM->init.filter[1] = 0x00000000; + MEM->init.rdra = dvma_vtob(MEM->rx_head); + MEM->init.rlen = (RX_LOG_RING_SIZE << 13) | + (dvma_vtob(MEM->rx_head) >> 16); + MEM->init.tdra = dvma_vtob(MEM->tx_head); + MEM->init.tlen = (TX_LOG_RING_SIZE << 13) | + (dvma_vtob(MEM->tx_head) >> 16); + + DPRINTK(2, ("initaddr: %08lx rx_ring: %08lx tx_ring: %08lx\n", + dvma_vtob(&(MEM->init)), dvma_vtob(MEM->rx_head), + (dvma_vtob(MEM->tx_head)))); + + if (did_version++ == 0) + printk( version ); + + dev->netdev_ops = &lance_netdev_ops; +// KLUDGE -- REMOVE ME + set_bit(__LINK_STATE_PRESENT, &dev->state); + + + return 1; +} + +static int lance_open( struct net_device *dev ) +{ + struct lance_private *lp = netdev_priv(dev); + int i; + + DPRINTK( 2, ( "%s: lance_open()\n", dev->name )); + + REGA(CSR0) = CSR0_STOP; + + lance_init_ring(dev); + + /* From now on, AREG is kept to point to CSR0 */ + REGA(CSR0) = CSR0_INIT; + + i = 1000000; + while (--i > 0) + if (DREG & CSR0_IDON) + break; + if (i <= 0 || (DREG & CSR0_ERR)) { + DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n", + dev->name, i, DREG )); + DREG = CSR0_STOP; + return -EIO; + } + + DREG = CSR0_IDON | CSR0_STRT | CSR0_INEA; + + netif_start_queue(dev); + + DPRINTK( 2, ( "%s: LANCE is open, csr0 %04x\n", dev->name, DREG )); + + return 0; +} + + +/* Initialize the LANCE Rx and Tx rings. */ + +static void lance_init_ring( struct net_device *dev ) +{ + struct lance_private *lp = netdev_priv(dev); + int i; + + lp->lock = 0; + lp->tx_full = 0; + lp->new_rx = lp->new_tx = 0; + lp->old_rx = lp->old_tx = 0; + + for( i = 0; i < TX_RING_SIZE; i++ ) { + MEM->tx_head[i].base = dvma_vtob(MEM->tx_data[i]); + MEM->tx_head[i].flag = 0; + MEM->tx_head[i].base_hi = + (dvma_vtob(MEM->tx_data[i])) >>16; + MEM->tx_head[i].length = 0; + MEM->tx_head[i].misc = 0; + } + + for( i = 0; i < RX_RING_SIZE; i++ ) { + MEM->rx_head[i].base = dvma_vtob(MEM->rx_data[i]); + MEM->rx_head[i].flag = RMD1_OWN_CHIP; + MEM->rx_head[i].base_hi = + (dvma_vtob(MEM->rx_data[i])) >> 16; + MEM->rx_head[i].buf_length = -PKT_BUF_SZ | 0xf000; + MEM->rx_head[i].msg_length = 0; + } + + /* tell the card it's ether address, bytes swapped */ + MEM->init.hwaddr[0] = dev->dev_addr[1]; + MEM->init.hwaddr[1] = dev->dev_addr[0]; + MEM->init.hwaddr[2] = dev->dev_addr[3]; + MEM->init.hwaddr[3] = dev->dev_addr[2]; + MEM->init.hwaddr[4] = dev->dev_addr[5]; + MEM->init.hwaddr[5] = dev->dev_addr[4]; + + MEM->init.mode = 0x0000; + MEM->init.filter[0] = 0x00000000; + MEM->init.filter[1] = 0x00000000; + MEM->init.rdra = dvma_vtob(MEM->rx_head); + MEM->init.rlen = (RX_LOG_RING_SIZE << 13) | + (dvma_vtob(MEM->rx_head) >> 16); + MEM->init.tdra = dvma_vtob(MEM->tx_head); + MEM->init.tlen = (TX_LOG_RING_SIZE << 13) | + (dvma_vtob(MEM->tx_head) >> 16); + + + /* tell the lance the address of its init block */ + REGA(CSR1) = dvma_vtob(&(MEM->init)); + REGA(CSR2) = dvma_vtob(&(MEM->init)) >> 16; + +#ifdef CONFIG_SUN3X + REGA(CSR3) = CSR3_BSWP | CSR3_ACON | CSR3_BCON; +#else + REGA(CSR3) = CSR3_BSWP; +#endif + +} + + +static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) +{ + struct lance_private *lp = netdev_priv(dev); + int entry, len; + struct lance_tx_head *head; + unsigned long flags; + + DPRINTK( 1, ( "%s: transmit start.\n", + dev->name)); + + /* Transmitter timeout, serious problems. */ + if (netif_queue_stopped(dev)) { + int tickssofar = jiffies - dev_trans_start(dev); + if (tickssofar < HZ/5) + return NETDEV_TX_BUSY; + + DPRINTK( 1, ( "%s: transmit timed out, status %04x, resetting.\n", + dev->name, DREG )); + DREG = CSR0_STOP; + /* + * Always set BSWP after a STOP as STOP puts it back into + * little endian mode. + */ + REGA(CSR3) = CSR3_BSWP; + dev->stats.tx_errors++; + + if(lance_debug >= 2) { + int i; + printk("Ring data: old_tx %d new_tx %d%s new_rx %d\n", + lp->old_tx, lp->new_tx, + lp->tx_full ? " (full)" : "", + lp->new_rx ); + for( i = 0 ; i < RX_RING_SIZE; i++ ) + printk( "rx #%d: base=%04x blen=%04x mlen=%04x\n", + i, MEM->rx_head[i].base, + -MEM->rx_head[i].buf_length, + MEM->rx_head[i].msg_length); + for( i = 0 ; i < TX_RING_SIZE; i++ ) + printk("tx #%d: base=%04x len=%04x misc=%04x\n", + i, MEM->tx_head[i].base, + -MEM->tx_head[i].length, + MEM->tx_head[i].misc ); + } + + lance_init_ring(dev); + REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT; + + netif_start_queue(dev); + + return NETDEV_TX_OK; + } + + + /* Block a timer-based transmit from overlapping. This could better be + done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */ + + /* Block a timer-based transmit from overlapping with us by + stopping the queue for a bit... */ + + netif_stop_queue(dev); + + if (test_and_set_bit( 0, (void*)&lp->lock ) != 0) { + printk( "%s: tx queue lock!.\n", dev->name); + /* don't clear dev->tbusy flag. */ + return NETDEV_TX_BUSY; + } + + AREG = CSR0; + DPRINTK( 2, ( "%s: lance_start_xmit() called, csr0 %4.4x.\n", + dev->name, DREG )); + +#ifdef CONFIG_SUN3X + /* this weirdness doesn't appear on sun3... */ + if(!(DREG & CSR0_INIT)) { + DPRINTK( 1, ("INIT not set, reinitializing...\n")); + REGA( CSR0 ) = CSR0_STOP; + lance_init_ring(dev); + REGA( CSR0 ) = CSR0_INIT | CSR0_STRT; + } +#endif + + /* Fill in a Tx ring entry */ +#if 0 + if (lance_debug >= 2) { + printk( "%s: TX pkt %d type 0x%04x" + " from %s to %s" + " data at 0x%08x len %d\n", + dev->name, lp->new_tx, ((u_short *)skb->data)[6], + DEV_ADDR(&skb->data[6]), DEV_ADDR(skb->data), + (int)skb->data, (int)skb->len ); + } +#endif + /* We're not prepared for the int until the last flags are set/reset. + * And the int may happen already after setting the OWN_CHIP... */ + local_irq_save(flags); + + /* Mask to ring buffer boundary. */ + entry = lp->new_tx; + head = &(MEM->tx_head[entry]); + + /* Caution: the write order is important here, set the "ownership" bits + * last. + */ + + /* the sun3's lance needs it's buffer padded to the minimum + size */ + len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; + +// head->length = -len; + head->length = (-len) | 0xf000; + head->misc = 0; + + skb_copy_from_linear_data(skb, PKTBUF_ADDR(head), skb->len); + if (len != skb->len) + memset(PKTBUF_ADDR(head) + skb->len, 0, len-skb->len); + + head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP; + lp->new_tx = (lp->new_tx + 1) & TX_RING_MOD_MASK; + dev->stats.tx_bytes += skb->len; + + /* Trigger an immediate send poll. */ + REGA(CSR0) = CSR0_INEA | CSR0_TDMD | CSR0_STRT; + AREG = CSR0; + DPRINTK( 2, ( "%s: lance_start_xmit() exiting, csr0 %4.4x.\n", + dev->name, DREG )); + dev_kfree_skb(skb); + + lp->lock = 0; + if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) == + TMD1_OWN_HOST) + netif_start_queue(dev); + + local_irq_restore(flags); + + return NETDEV_TX_OK; +} + +/* The LANCE interrupt handler. */ + +static irqreturn_t lance_interrupt( int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct lance_private *lp = netdev_priv(dev); + int csr0; + static int in_interrupt; + + if (dev == NULL) { + DPRINTK( 1, ( "lance_interrupt(): invalid dev_id\n" )); + return IRQ_NONE; + } + + if (in_interrupt) + DPRINTK( 2, ( "%s: Re-entering the interrupt handler.\n", dev->name )); + in_interrupt = 1; + + still_more: + flush_cache_all(); + + AREG = CSR0; + csr0 = DREG; + + /* ack interrupts */ + DREG = csr0 & (CSR0_TINT | CSR0_RINT | CSR0_IDON); + + /* clear errors */ + if(csr0 & CSR0_ERR) + DREG = CSR0_BABL | CSR0_MERR | CSR0_CERR | CSR0_MISS; + + + DPRINTK( 2, ( "%s: interrupt csr0=%04x new csr=%04x.\n", + dev->name, csr0, DREG )); + + if (csr0 & CSR0_TINT) { /* Tx-done interrupt */ + int old_tx = lp->old_tx; + +// if(lance_debug >= 3) { +// int i; +// +// printk("%s: tx int\n", dev->name); +// +// for(i = 0; i < TX_RING_SIZE; i++) +// printk("ring %d flag=%04x\n", i, +// MEM->tx_head[i].flag); +// } + + while( old_tx != lp->new_tx) { + struct lance_tx_head *head = &(MEM->tx_head[old_tx]); + + DPRINTK(3, ("on tx_ring %d\n", old_tx)); + + if (head->flag & TMD1_OWN_CHIP) + break; /* It still hasn't been Txed */ + + if (head->flag & TMD1_ERR) { + int status = head->misc; + dev->stats.tx_errors++; + if (status & TMD3_RTRY) dev->stats.tx_aborted_errors++; + if (status & TMD3_LCAR) dev->stats.tx_carrier_errors++; + if (status & TMD3_LCOL) dev->stats.tx_window_errors++; + if (status & (TMD3_UFLO | TMD3_BUFF)) { + dev->stats.tx_fifo_errors++; + printk("%s: Tx FIFO error\n", + dev->name); + REGA(CSR0) = CSR0_STOP; + REGA(CSR3) = CSR3_BSWP; + lance_init_ring(dev); + REGA(CSR0) = CSR0_STRT | CSR0_INEA; + return IRQ_HANDLED; + } + } else if(head->flag & (TMD1_ENP | TMD1_STP)) { + + head->flag &= ~(TMD1_ENP | TMD1_STP); + if(head->flag & (TMD1_ONE | TMD1_MORE)) + dev->stats.collisions++; + + dev->stats.tx_packets++; + DPRINTK(3, ("cleared tx ring %d\n", old_tx)); + } + old_tx = (old_tx +1) & TX_RING_MOD_MASK; + } + + lp->old_tx = old_tx; + } + + + if (netif_queue_stopped(dev)) { + /* The ring is no longer full, clear tbusy. */ + netif_start_queue(dev); + netif_wake_queue(dev); + } + + if (csr0 & CSR0_RINT) /* Rx interrupt */ + lance_rx( dev ); + + /* Log misc errors. */ + if (csr0 & CSR0_BABL) dev->stats.tx_errors++; /* Tx babble. */ + if (csr0 & CSR0_MISS) dev->stats.rx_errors++; /* Missed a Rx frame. */ + if (csr0 & CSR0_MERR) { + DPRINTK( 1, ( "%s: Bus master arbitration failure (?!?), " + "status %04x.\n", dev->name, csr0 )); + /* Restart the chip. */ + REGA(CSR0) = CSR0_STOP; + REGA(CSR3) = CSR3_BSWP; + lance_init_ring(dev); + REGA(CSR0) = CSR0_STRT | CSR0_INEA; + } + + + /* Clear any other interrupt, and set interrupt enable. */ +// DREG = CSR0_BABL | CSR0_CERR | CSR0_MISS | CSR0_MERR | +// CSR0_IDON | CSR0_INEA; + + REGA(CSR0) = CSR0_INEA; + + if(DREG & (CSR0_RINT | CSR0_TINT)) { + DPRINTK(2, ("restarting interrupt, csr0=%#04x\n", DREG)); + goto still_more; + } + + DPRINTK( 2, ( "%s: exiting interrupt, csr0=%#04x.\n", + dev->name, DREG )); + in_interrupt = 0; + return IRQ_HANDLED; +} + +/* get packet, toss into skbuff */ +static int lance_rx( struct net_device *dev ) +{ + struct lance_private *lp = netdev_priv(dev); + int entry = lp->new_rx; + + /* If we own the next entry, it's a new packet. Send it up. */ + while( (MEM->rx_head[entry].flag & RMD1_OWN) == RMD1_OWN_HOST ) { + struct lance_rx_head *head = &(MEM->rx_head[entry]); + int status = head->flag; + + if (status != (RMD1_ENP|RMD1_STP)) { /* There was an error. */ + /* There is a tricky error noted by John Murphy, + to Russ Nelson: Even with + full-sized buffers it's possible for a jabber packet to use two + buffers, with only the last correctly noting the error. */ + if (status & RMD1_ENP) /* Only count a general error at the */ + dev->stats.rx_errors++; /* end of a packet.*/ + if (status & RMD1_FRAM) dev->stats.rx_frame_errors++; + if (status & RMD1_OFLO) dev->stats.rx_over_errors++; + if (status & RMD1_CRC) dev->stats.rx_crc_errors++; + if (status & RMD1_BUFF) dev->stats.rx_fifo_errors++; + head->flag &= (RMD1_ENP|RMD1_STP); + } else { + /* Malloc up new buffer, compatible with net-3. */ +// short pkt_len = head->msg_length;// & 0xfff; + short pkt_len = (head->msg_length & 0xfff) - 4; + struct sk_buff *skb; + + if (pkt_len < 60) { + printk( "%s: Runt packet!\n", dev->name ); + dev->stats.rx_errors++; + } + else { + skb = dev_alloc_skb( pkt_len+2 ); + if (skb == NULL) { + DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n", + dev->name )); + + dev->stats.rx_dropped++; + head->msg_length = 0; + head->flag |= RMD1_OWN_CHIP; + lp->new_rx = (lp->new_rx+1) & + RX_RING_MOD_MASK; + } + +#if 0 + if (lance_debug >= 3) { + u_char *data = PKTBUF_ADDR(head); + printk("%s: RX pkt %d type 0x%04x" + " from %pM to %pM", + dev->name, lp->new_tx, ((u_short *)data)[6], + &data[6], data); + + printk(" data %02x %02x %02x %02x %02x %02x %02x %02x " + "len %d at %08x\n", + data[15], data[16], data[17], data[18], + data[19], data[20], data[21], data[22], + pkt_len, data); + } +#endif + if (lance_debug >= 3) { + u_char *data = PKTBUF_ADDR(head); + printk( "%s: RX pkt %d type 0x%04x len %d\n ", dev->name, entry, ((u_short *)data)[6], pkt_len); + } + + + skb_reserve( skb, 2 ); /* 16 byte align */ + skb_put( skb, pkt_len ); /* Make room */ + skb_copy_to_linear_data(skb, + PKTBUF_ADDR(head), + pkt_len); + + skb->protocol = eth_type_trans( skb, dev ); + netif_rx( skb ); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + } + } + +// head->buf_length = -PKT_BUF_SZ | 0xf000; + head->msg_length = 0; + head->flag = RMD1_OWN_CHIP; + + entry = lp->new_rx = (lp->new_rx +1) & RX_RING_MOD_MASK; + } + + /* From lance.c (Donald Becker): */ + /* We should check that at least two ring entries are free. + If not, we should free one and mark stats->rx_dropped++. */ + + return 0; +} + + +static int lance_close( struct net_device *dev ) +{ + struct lance_private *lp = netdev_priv(dev); + + netif_stop_queue(dev); + + AREG = CSR0; + + DPRINTK( 2, ( "%s: Shutting down ethercard, status was %2.2x.\n", + dev->name, DREG )); + + /* We stop the LANCE here -- it occasionally polls + memory if we don't. */ + DREG = CSR0_STOP; + return 0; +} + + +/* Set or clear the multicast filter for this adaptor. + num_addrs == -1 Promiscuous mode, receive all packets + num_addrs == 0 Normal mode, clear multicast list + num_addrs > 0 Multicast mode, receive normal and MC packets, and do + best-effort filtering. + */ + +/* completely untested on a sun3 */ +static void set_multicast_list( struct net_device *dev ) +{ + struct lance_private *lp = netdev_priv(dev); + + if(netif_queue_stopped(dev)) + /* Only possible if board is already started */ + return; + + /* We take the simple way out and always enable promiscuous mode. */ + DREG = CSR0_STOP; /* Temporarily stop the lance. */ + + if (dev->flags & IFF_PROMISC) { + /* Log any net taps. */ + DPRINTK( 3, ( "%s: Promiscuous mode enabled.\n", dev->name )); + REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */ + } else { + short multicast_table[4]; + int num_addrs = netdev_mc_count(dev); + int i; + /* We don't use the multicast table, but rely on upper-layer + * filtering. */ + memset( multicast_table, (num_addrs == 0) ? 0 : -1, + sizeof(multicast_table) ); + for( i = 0; i < 4; i++ ) + REGA( CSR8+i ) = multicast_table[i]; + REGA( CSR15 ) = 0; /* Unset promiscuous mode */ + } + + /* + * Always set BSWP after a STOP as STOP puts it back into + * little endian mode. + */ + REGA( CSR3 ) = CSR3_BSWP; + + /* Resume normal operation and reset AREG to CSR0 */ + REGA( CSR0 ) = CSR0_IDON | CSR0_INEA | CSR0_STRT; +} + + +#ifdef MODULE + +static struct net_device *sun3lance_dev; + +int __init init_module(void) +{ + sun3lance_dev = sun3lance_probe(-1); + if (IS_ERR(sun3lance_dev)) + return PTR_ERR(sun3lance_dev); + return 0; +} + +void __exit cleanup_module(void) +{ + unregister_netdev(sun3lance_dev); +#ifdef CONFIG_SUN3 + iounmap((void __iomem *)sun3lance_dev->base_addr); +#endif + free_netdev(sun3lance_dev); +} + +#endif /* MODULE */ + diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c new file mode 100644 index 0000000..06f2d43 --- /dev/null +++ b/drivers/net/ethernet/amd/sunlance.c @@ -0,0 +1,1556 @@ +/* $Id: sunlance.c,v 1.112 2002/01/15 06:48:55 davem Exp $ + * lance.c: Linux/Sparc/Lance driver + * + * Written 1995, 1996 by Miguel de Icaza + * Sources: + * The Linux depca driver + * The Linux lance driver. + * The Linux skeleton driver. + * The NetBSD Sparc/Lance driver. + * Theo de Raadt (deraadt@openbsd.org) + * NCR92C990 Lan Controller manual + * + * 1.4: + * Added support to run with a ledma on the Sun4m + * + * 1.5: + * Added multiple card detection. + * + * 4/17/96: Burst sizes and tpe selection on sun4m by Eddie C. Dost + * (ecd@skynet.be) + * + * 5/15/96: auto carrier detection on sun4m by Eddie C. Dost + * (ecd@skynet.be) + * + * 5/17/96: lebuffer on scsi/ether cards now work David S. Miller + * (davem@caip.rutgers.edu) + * + * 5/29/96: override option 'tpe-link-test?', if it is 'false', as + * this disables auto carrier detection on sun4m. Eddie C. Dost + * (ecd@skynet.be) + * + * 1.7: + * 6/26/96: Bug fix for multiple ledmas, miguel. + * + * 1.8: + * Stole multicast code from depca.c, fixed lance_tx. + * + * 1.9: + * 8/21/96: Fixed the multicast code (Pedro Roque) + * + * 8/28/96: Send fake packet in lance_open() if auto_select is true, + * so we can detect the carrier loss condition in time. + * Eddie C. Dost (ecd@skynet.be) + * + * 9/15/96: Align rx_buf so that eth_copy_and_sum() won't cause an + * MNA trap during chksum_partial_copy(). (ecd@skynet.be) + * + * 11/17/96: Handle LE_C0_MERR in lance_interrupt(). (ecd@skynet.be) + * + * 12/22/96: Don't loop forever in lance_rx() on incomplete packets. + * This was the sun4c killer. Shit, stupid bug. + * (ecd@skynet.be) + * + * 1.10: + * 1/26/97: Modularize driver. (ecd@skynet.be) + * + * 1.11: + * 12/27/97: Added sun4d support. (jj@sunsite.mff.cuni.cz) + * + * 1.12: + * 11/3/99: Fixed SMP race in lance_start_xmit found by davem. + * Anton Blanchard (anton@progsoc.uts.edu.au) + * 2.00: 11/9/99: Massive overhaul and port to new SBUS driver interfaces. + * David S. Miller (davem@redhat.com) + * 2.01: + * 11/08/01: Use library crc32 functions (Matt_Domsch@dell.com) + * + */ + +#undef DEBUG_DRIVER + +static char lancestr[] = "LANCE"; + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* Used for the temporal inet entries and routing */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include /* Used by the checksum routines */ +#include +#include +#include /* For tpe-link-test? setting */ +#include + +#define DRV_NAME "sunlance" +#define DRV_VERSION "2.02" +#define DRV_RELDATE "8/24/03" +#define DRV_AUTHOR "Miguel de Icaza (miguel@nuclecu.unam.mx)" + +static char version[] = + DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; + +MODULE_VERSION(DRV_VERSION); +MODULE_AUTHOR(DRV_AUTHOR); +MODULE_DESCRIPTION("Sun Lance ethernet driver"); +MODULE_LICENSE("GPL"); + +/* Define: 2^4 Tx buffers and 2^4 Rx buffers */ +#ifndef LANCE_LOG_TX_BUFFERS +#define LANCE_LOG_TX_BUFFERS 4 +#define LANCE_LOG_RX_BUFFERS 4 +#endif + +#define LE_CSR0 0 +#define LE_CSR1 1 +#define LE_CSR2 2 +#define LE_CSR3 3 + +#define LE_MO_PROM 0x8000 /* Enable promiscuous mode */ + +#define LE_C0_ERR 0x8000 /* Error: set if BAB, SQE, MISS or ME is set */ +#define LE_C0_BABL 0x4000 /* BAB: Babble: tx timeout. */ +#define LE_C0_CERR 0x2000 /* SQE: Signal quality error */ +#define LE_C0_MISS 0x1000 /* MISS: Missed a packet */ +#define LE_C0_MERR 0x0800 /* ME: Memory error */ +#define LE_C0_RINT 0x0400 /* Received interrupt */ +#define LE_C0_TINT 0x0200 /* Transmitter Interrupt */ +#define LE_C0_IDON 0x0100 /* IFIN: Init finished. */ +#define LE_C0_INTR 0x0080 /* Interrupt or error */ +#define LE_C0_INEA 0x0040 /* Interrupt enable */ +#define LE_C0_RXON 0x0020 /* Receiver on */ +#define LE_C0_TXON 0x0010 /* Transmitter on */ +#define LE_C0_TDMD 0x0008 /* Transmitter demand */ +#define LE_C0_STOP 0x0004 /* Stop the card */ +#define LE_C0_STRT 0x0002 /* Start the card */ +#define LE_C0_INIT 0x0001 /* Init the card */ + +#define LE_C3_BSWP 0x4 /* SWAP */ +#define LE_C3_ACON 0x2 /* ALE Control */ +#define LE_C3_BCON 0x1 /* Byte control */ + +/* Receive message descriptor 1 */ +#define LE_R1_OWN 0x80 /* Who owns the entry */ +#define LE_R1_ERR 0x40 /* Error: if FRA, OFL, CRC or BUF is set */ +#define LE_R1_FRA 0x20 /* FRA: Frame error */ +#define LE_R1_OFL 0x10 /* OFL: Frame overflow */ +#define LE_R1_CRC 0x08 /* CRC error */ +#define LE_R1_BUF 0x04 /* BUF: Buffer error */ +#define LE_R1_SOP 0x02 /* Start of packet */ +#define LE_R1_EOP 0x01 /* End of packet */ +#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */ + +#define LE_T1_OWN 0x80 /* Lance owns the packet */ +#define LE_T1_ERR 0x40 /* Error summary */ +#define LE_T1_EMORE 0x10 /* Error: more than one retry needed */ +#define LE_T1_EONE 0x08 /* Error: one retry needed */ +#define LE_T1_EDEF 0x04 /* Error: deferred */ +#define LE_T1_SOP 0x02 /* Start of packet */ +#define LE_T1_EOP 0x01 /* End of packet */ +#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */ + +#define LE_T3_BUF 0x8000 /* Buffer error */ +#define LE_T3_UFL 0x4000 /* Error underflow */ +#define LE_T3_LCOL 0x1000 /* Error late collision */ +#define LE_T3_CLOS 0x0800 /* Error carrier loss */ +#define LE_T3_RTY 0x0400 /* Error retry */ +#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry counter */ + +#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS)) +#define TX_RING_MOD_MASK (TX_RING_SIZE - 1) +#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29) +#define TX_NEXT(__x) (((__x)+1) & TX_RING_MOD_MASK) + +#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS)) +#define RX_RING_MOD_MASK (RX_RING_SIZE - 1) +#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29) +#define RX_NEXT(__x) (((__x)+1) & RX_RING_MOD_MASK) + +#define PKT_BUF_SZ 1544 +#define RX_BUFF_SIZE PKT_BUF_SZ +#define TX_BUFF_SIZE PKT_BUF_SZ + +struct lance_rx_desc { + u16 rmd0; /* low address of packet */ + u8 rmd1_bits; /* descriptor bits */ + u8 rmd1_hadr; /* high address of packet */ + s16 length; /* This length is 2s complement (negative)! + * Buffer length + */ + u16 mblength; /* This is the actual number of bytes received */ +}; + +struct lance_tx_desc { + u16 tmd0; /* low address of packet */ + u8 tmd1_bits; /* descriptor bits */ + u8 tmd1_hadr; /* high address of packet */ + s16 length; /* Length is 2s complement (negative)! */ + u16 misc; +}; + +/* The LANCE initialization block, described in databook. */ +/* On the Sparc, this block should be on a DMA region */ +struct lance_init_block { + u16 mode; /* Pre-set mode (reg. 15) */ + u8 phys_addr[6]; /* Physical ethernet address */ + u32 filter[2]; /* Multicast filter. */ + + /* Receive and transmit ring base, along with extra bits. */ + u16 rx_ptr; /* receive descriptor addr */ + u16 rx_len; /* receive len and high addr */ + u16 tx_ptr; /* transmit descriptor addr */ + u16 tx_len; /* transmit len and high addr */ + + /* The Tx and Rx ring entries must aligned on 8-byte boundaries. */ + struct lance_rx_desc brx_ring[RX_RING_SIZE]; + struct lance_tx_desc btx_ring[TX_RING_SIZE]; + + u8 tx_buf [TX_RING_SIZE][TX_BUFF_SIZE]; + u8 pad[2]; /* align rx_buf for copy_and_sum(). */ + u8 rx_buf [RX_RING_SIZE][RX_BUFF_SIZE]; +}; + +#define libdesc_offset(rt, elem) \ +((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem]))))) + +#define libbuff_offset(rt, elem) \ +((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem][0]))))) + +struct lance_private { + void __iomem *lregs; /* Lance RAP/RDP regs. */ + void __iomem *dregs; /* DMA controller regs. */ + struct lance_init_block __iomem *init_block_iomem; + struct lance_init_block *init_block_mem; + + spinlock_t lock; + + int rx_new, tx_new; + int rx_old, tx_old; + + struct platform_device *ledma; /* If set this points to ledma */ + char tpe; /* cable-selection is TPE */ + char auto_select; /* cable-selection by carrier */ + char burst_sizes; /* ledma SBus burst sizes */ + char pio_buffer; /* init block in PIO space? */ + + unsigned short busmaster_regval; + + void (*init_ring)(struct net_device *); + void (*rx)(struct net_device *); + void (*tx)(struct net_device *); + + char *name; + dma_addr_t init_block_dvma; + struct net_device *dev; /* Backpointer */ + struct platform_device *op; + struct platform_device *lebuffer; + struct timer_list multicast_timer; +}; + +#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\ + lp->tx_old+TX_RING_MOD_MASK-lp->tx_new:\ + lp->tx_old - lp->tx_new-1) + +/* Lance registers. */ +#define RDP 0x00UL /* register data port */ +#define RAP 0x02UL /* register address port */ +#define LANCE_REG_SIZE 0x04UL + +#define STOP_LANCE(__lp) \ +do { void __iomem *__base = (__lp)->lregs; \ + sbus_writew(LE_CSR0, __base + RAP); \ + sbus_writew(LE_C0_STOP, __base + RDP); \ +} while (0) + +int sparc_lance_debug = 2; + +/* The Lance uses 24 bit addresses */ +/* On the Sun4c the DVMA will provide the remaining bytes for us */ +/* On the Sun4m we have to instruct the ledma to provide them */ +/* Even worse, on scsi/ether SBUS cards, the init block and the + * transmit/receive buffers are addresses as offsets from absolute + * zero on the lebuffer PIO area. -DaveM + */ + +#define LANCE_ADDR(x) ((long)(x) & ~0xff000000) + +/* Load the CSR registers */ +static void load_csrs(struct lance_private *lp) +{ + u32 leptr; + + if (lp->pio_buffer) + leptr = 0; + else + leptr = LANCE_ADDR(lp->init_block_dvma); + + sbus_writew(LE_CSR1, lp->lregs + RAP); + sbus_writew(leptr & 0xffff, lp->lregs + RDP); + sbus_writew(LE_CSR2, lp->lregs + RAP); + sbus_writew(leptr >> 16, lp->lregs + RDP); + sbus_writew(LE_CSR3, lp->lregs + RAP); + sbus_writew(lp->busmaster_regval, lp->lregs + RDP); + + /* Point back to csr0 */ + sbus_writew(LE_CSR0, lp->lregs + RAP); +} + +/* Setup the Lance Rx and Tx rings */ +static void lance_init_ring_dvma(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + struct lance_init_block *ib = lp->init_block_mem; + dma_addr_t aib = lp->init_block_dvma; + __u32 leptr; + int i; + + /* Lock out other processes while setting up hardware */ + netif_stop_queue(dev); + lp->rx_new = lp->tx_new = 0; + lp->rx_old = lp->tx_old = 0; + + /* Copy the ethernet address to the lance init block + * Note that on the sparc you need to swap the ethernet address. + */ + ib->phys_addr [0] = dev->dev_addr [1]; + ib->phys_addr [1] = dev->dev_addr [0]; + ib->phys_addr [2] = dev->dev_addr [3]; + ib->phys_addr [3] = dev->dev_addr [2]; + ib->phys_addr [4] = dev->dev_addr [5]; + ib->phys_addr [5] = dev->dev_addr [4]; + + /* Setup the Tx ring entries */ + for (i = 0; i < TX_RING_SIZE; i++) { + leptr = LANCE_ADDR(aib + libbuff_offset(tx_buf, i)); + ib->btx_ring [i].tmd0 = leptr; + ib->btx_ring [i].tmd1_hadr = leptr >> 16; + ib->btx_ring [i].tmd1_bits = 0; + ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */ + ib->btx_ring [i].misc = 0; + } + + /* Setup the Rx ring entries */ + for (i = 0; i < RX_RING_SIZE; i++) { + leptr = LANCE_ADDR(aib + libbuff_offset(rx_buf, i)); + + ib->brx_ring [i].rmd0 = leptr; + ib->brx_ring [i].rmd1_hadr = leptr >> 16; + ib->brx_ring [i].rmd1_bits = LE_R1_OWN; + ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000; + ib->brx_ring [i].mblength = 0; + } + + /* Setup the initialization block */ + + /* Setup rx descriptor pointer */ + leptr = LANCE_ADDR(aib + libdesc_offset(brx_ring, 0)); + ib->rx_len = (LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16); + ib->rx_ptr = leptr; + + /* Setup tx descriptor pointer */ + leptr = LANCE_ADDR(aib + libdesc_offset(btx_ring, 0)); + ib->tx_len = (LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16); + ib->tx_ptr = leptr; +} + +static void lance_init_ring_pio(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + struct lance_init_block __iomem *ib = lp->init_block_iomem; + u32 leptr; + int i; + + /* Lock out other processes while setting up hardware */ + netif_stop_queue(dev); + lp->rx_new = lp->tx_new = 0; + lp->rx_old = lp->tx_old = 0; + + /* Copy the ethernet address to the lance init block + * Note that on the sparc you need to swap the ethernet address. + */ + sbus_writeb(dev->dev_addr[1], &ib->phys_addr[0]); + sbus_writeb(dev->dev_addr[0], &ib->phys_addr[1]); + sbus_writeb(dev->dev_addr[3], &ib->phys_addr[2]); + sbus_writeb(dev->dev_addr[2], &ib->phys_addr[3]); + sbus_writeb(dev->dev_addr[5], &ib->phys_addr[4]); + sbus_writeb(dev->dev_addr[4], &ib->phys_addr[5]); + + /* Setup the Tx ring entries */ + for (i = 0; i < TX_RING_SIZE; i++) { + leptr = libbuff_offset(tx_buf, i); + sbus_writew(leptr, &ib->btx_ring [i].tmd0); + sbus_writeb(leptr >> 16,&ib->btx_ring [i].tmd1_hadr); + sbus_writeb(0, &ib->btx_ring [i].tmd1_bits); + + /* The ones required by tmd2 */ + sbus_writew(0xf000, &ib->btx_ring [i].length); + sbus_writew(0, &ib->btx_ring [i].misc); + } + + /* Setup the Rx ring entries */ + for (i = 0; i < RX_RING_SIZE; i++) { + leptr = libbuff_offset(rx_buf, i); + + sbus_writew(leptr, &ib->brx_ring [i].rmd0); + sbus_writeb(leptr >> 16,&ib->brx_ring [i].rmd1_hadr); + sbus_writeb(LE_R1_OWN, &ib->brx_ring [i].rmd1_bits); + sbus_writew(-RX_BUFF_SIZE|0xf000, + &ib->brx_ring [i].length); + sbus_writew(0, &ib->brx_ring [i].mblength); + } + + /* Setup the initialization block */ + + /* Setup rx descriptor pointer */ + leptr = libdesc_offset(brx_ring, 0); + sbus_writew((LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16), + &ib->rx_len); + sbus_writew(leptr, &ib->rx_ptr); + + /* Setup tx descriptor pointer */ + leptr = libdesc_offset(btx_ring, 0); + sbus_writew((LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16), + &ib->tx_len); + sbus_writew(leptr, &ib->tx_ptr); +} + +static void init_restart_ledma(struct lance_private *lp) +{ + u32 csr = sbus_readl(lp->dregs + DMA_CSR); + + if (!(csr & DMA_HNDL_ERROR)) { + /* E-Cache draining */ + while (sbus_readl(lp->dregs + DMA_CSR) & DMA_FIFO_ISDRAIN) + barrier(); + } + + csr = sbus_readl(lp->dregs + DMA_CSR); + csr &= ~DMA_E_BURSTS; + if (lp->burst_sizes & DMA_BURST32) + csr |= DMA_E_BURST32; + else + csr |= DMA_E_BURST16; + + csr |= (DMA_DSBL_RD_DRN | DMA_DSBL_WR_INV | DMA_FIFO_INV); + + if (lp->tpe) + csr |= DMA_EN_ENETAUI; + else + csr &= ~DMA_EN_ENETAUI; + udelay(20); + sbus_writel(csr, lp->dregs + DMA_CSR); + udelay(200); +} + +static int init_restart_lance(struct lance_private *lp) +{ + u16 regval = 0; + int i; + + if (lp->dregs) + init_restart_ledma(lp); + + sbus_writew(LE_CSR0, lp->lregs + RAP); + sbus_writew(LE_C0_INIT, lp->lregs + RDP); + + /* Wait for the lance to complete initialization */ + for (i = 0; i < 100; i++) { + regval = sbus_readw(lp->lregs + RDP); + + if (regval & (LE_C0_ERR | LE_C0_IDON)) + break; + barrier(); + } + if (i == 100 || (regval & LE_C0_ERR)) { + printk(KERN_ERR "LANCE unopened after %d ticks, csr0=%4.4x.\n", + i, regval); + if (lp->dregs) + printk("dcsr=%8.8x\n", sbus_readl(lp->dregs + DMA_CSR)); + return -1; + } + + /* Clear IDON by writing a "1", enable interrupts and start lance */ + sbus_writew(LE_C0_IDON, lp->lregs + RDP); + sbus_writew(LE_C0_INEA | LE_C0_STRT, lp->lregs + RDP); + + if (lp->dregs) { + u32 csr = sbus_readl(lp->dregs + DMA_CSR); + + csr |= DMA_INT_ENAB; + sbus_writel(csr, lp->dregs + DMA_CSR); + } + + return 0; +} + +static void lance_rx_dvma(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + struct lance_init_block *ib = lp->init_block_mem; + struct lance_rx_desc *rd; + u8 bits; + int len, entry = lp->rx_new; + struct sk_buff *skb; + + for (rd = &ib->brx_ring [entry]; + !((bits = rd->rmd1_bits) & LE_R1_OWN); + rd = &ib->brx_ring [entry]) { + + /* We got an incomplete frame? */ + if ((bits & LE_R1_POK) != LE_R1_POK) { + dev->stats.rx_over_errors++; + dev->stats.rx_errors++; + } else if (bits & LE_R1_ERR) { + /* Count only the end frame as a rx error, + * not the beginning + */ + if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++; + if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++; + if (bits & LE_R1_OFL) dev->stats.rx_over_errors++; + if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++; + if (bits & LE_R1_EOP) dev->stats.rx_errors++; + } else { + len = (rd->mblength & 0xfff) - 4; + skb = dev_alloc_skb(len + 2); + + if (skb == NULL) { + printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", + dev->name); + dev->stats.rx_dropped++; + rd->mblength = 0; + rd->rmd1_bits = LE_R1_OWN; + lp->rx_new = RX_NEXT(entry); + return; + } + + dev->stats.rx_bytes += len; + + skb_reserve(skb, 2); /* 16 byte align */ + skb_put(skb, len); /* make room */ + skb_copy_to_linear_data(skb, + (unsigned char *)&(ib->rx_buf [entry][0]), + len); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + } + + /* Return the packet to the pool */ + rd->mblength = 0; + rd->rmd1_bits = LE_R1_OWN; + entry = RX_NEXT(entry); + } + + lp->rx_new = entry; +} + +static void lance_tx_dvma(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + struct lance_init_block *ib = lp->init_block_mem; + int i, j; + + spin_lock(&lp->lock); + + j = lp->tx_old; + for (i = j; i != lp->tx_new; i = j) { + struct lance_tx_desc *td = &ib->btx_ring [i]; + u8 bits = td->tmd1_bits; + + /* If we hit a packet not owned by us, stop */ + if (bits & LE_T1_OWN) + break; + + if (bits & LE_T1_ERR) { + u16 status = td->misc; + + dev->stats.tx_errors++; + if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++; + if (status & LE_T3_LCOL) dev->stats.tx_window_errors++; + + if (status & LE_T3_CLOS) { + dev->stats.tx_carrier_errors++; + if (lp->auto_select) { + lp->tpe = 1 - lp->tpe; + printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n", + dev->name, lp->tpe?"TPE":"AUI"); + STOP_LANCE(lp); + lp->init_ring(dev); + load_csrs(lp); + init_restart_lance(lp); + goto out; + } + } + + /* Buffer errors and underflows turn off the + * transmitter, restart the adapter. + */ + if (status & (LE_T3_BUF|LE_T3_UFL)) { + dev->stats.tx_fifo_errors++; + + printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n", + dev->name); + STOP_LANCE(lp); + lp->init_ring(dev); + load_csrs(lp); + init_restart_lance(lp); + goto out; + } + } else if ((bits & LE_T1_POK) == LE_T1_POK) { + /* + * So we don't count the packet more than once. + */ + td->tmd1_bits = bits & ~(LE_T1_POK); + + /* One collision before packet was sent. */ + if (bits & LE_T1_EONE) + dev->stats.collisions++; + + /* More than one collision, be optimistic. */ + if (bits & LE_T1_EMORE) + dev->stats.collisions += 2; + + dev->stats.tx_packets++; + } + + j = TX_NEXT(j); + } + lp->tx_old = j; +out: + if (netif_queue_stopped(dev) && + TX_BUFFS_AVAIL > 0) + netif_wake_queue(dev); + + spin_unlock(&lp->lock); +} + +static void lance_piocopy_to_skb(struct sk_buff *skb, void __iomem *piobuf, int len) +{ + u16 *p16 = (u16 *) skb->data; + u32 *p32; + u8 *p8; + void __iomem *pbuf = piobuf; + + /* We know here that both src and dest are on a 16bit boundary. */ + *p16++ = sbus_readw(pbuf); + p32 = (u32 *) p16; + pbuf += 2; + len -= 2; + + while (len >= 4) { + *p32++ = sbus_readl(pbuf); + pbuf += 4; + len -= 4; + } + p8 = (u8 *) p32; + if (len >= 2) { + p16 = (u16 *) p32; + *p16++ = sbus_readw(pbuf); + pbuf += 2; + len -= 2; + p8 = (u8 *) p16; + } + if (len >= 1) + *p8 = sbus_readb(pbuf); +} + +static void lance_rx_pio(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + struct lance_init_block __iomem *ib = lp->init_block_iomem; + struct lance_rx_desc __iomem *rd; + unsigned char bits; + int len, entry; + struct sk_buff *skb; + + entry = lp->rx_new; + for (rd = &ib->brx_ring [entry]; + !((bits = sbus_readb(&rd->rmd1_bits)) & LE_R1_OWN); + rd = &ib->brx_ring [entry]) { + + /* We got an incomplete frame? */ + if ((bits & LE_R1_POK) != LE_R1_POK) { + dev->stats.rx_over_errors++; + dev->stats.rx_errors++; + } else if (bits & LE_R1_ERR) { + /* Count only the end frame as a rx error, + * not the beginning + */ + if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++; + if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++; + if (bits & LE_R1_OFL) dev->stats.rx_over_errors++; + if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++; + if (bits & LE_R1_EOP) dev->stats.rx_errors++; + } else { + len = (sbus_readw(&rd->mblength) & 0xfff) - 4; + skb = dev_alloc_skb(len + 2); + + if (skb == NULL) { + printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", + dev->name); + dev->stats.rx_dropped++; + sbus_writew(0, &rd->mblength); + sbus_writeb(LE_R1_OWN, &rd->rmd1_bits); + lp->rx_new = RX_NEXT(entry); + return; + } + + dev->stats.rx_bytes += len; + + skb_reserve (skb, 2); /* 16 byte align */ + skb_put(skb, len); /* make room */ + lance_piocopy_to_skb(skb, &(ib->rx_buf[entry][0]), len); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + } + + /* Return the packet to the pool */ + sbus_writew(0, &rd->mblength); + sbus_writeb(LE_R1_OWN, &rd->rmd1_bits); + entry = RX_NEXT(entry); + } + + lp->rx_new = entry; +} + +static void lance_tx_pio(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + struct lance_init_block __iomem *ib = lp->init_block_iomem; + int i, j; + + spin_lock(&lp->lock); + + j = lp->tx_old; + for (i = j; i != lp->tx_new; i = j) { + struct lance_tx_desc __iomem *td = &ib->btx_ring [i]; + u8 bits = sbus_readb(&td->tmd1_bits); + + /* If we hit a packet not owned by us, stop */ + if (bits & LE_T1_OWN) + break; + + if (bits & LE_T1_ERR) { + u16 status = sbus_readw(&td->misc); + + dev->stats.tx_errors++; + if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++; + if (status & LE_T3_LCOL) dev->stats.tx_window_errors++; + + if (status & LE_T3_CLOS) { + dev->stats.tx_carrier_errors++; + if (lp->auto_select) { + lp->tpe = 1 - lp->tpe; + printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n", + dev->name, lp->tpe?"TPE":"AUI"); + STOP_LANCE(lp); + lp->init_ring(dev); + load_csrs(lp); + init_restart_lance(lp); + goto out; + } + } + + /* Buffer errors and underflows turn off the + * transmitter, restart the adapter. + */ + if (status & (LE_T3_BUF|LE_T3_UFL)) { + dev->stats.tx_fifo_errors++; + + printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n", + dev->name); + STOP_LANCE(lp); + lp->init_ring(dev); + load_csrs(lp); + init_restart_lance(lp); + goto out; + } + } else if ((bits & LE_T1_POK) == LE_T1_POK) { + /* + * So we don't count the packet more than once. + */ + sbus_writeb(bits & ~(LE_T1_POK), &td->tmd1_bits); + + /* One collision before packet was sent. */ + if (bits & LE_T1_EONE) + dev->stats.collisions++; + + /* More than one collision, be optimistic. */ + if (bits & LE_T1_EMORE) + dev->stats.collisions += 2; + + dev->stats.tx_packets++; + } + + j = TX_NEXT(j); + } + lp->tx_old = j; + + if (netif_queue_stopped(dev) && + TX_BUFFS_AVAIL > 0) + netif_wake_queue(dev); +out: + spin_unlock(&lp->lock); +} + +static irqreturn_t lance_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct lance_private *lp = netdev_priv(dev); + int csr0; + + sbus_writew(LE_CSR0, lp->lregs + RAP); + csr0 = sbus_readw(lp->lregs + RDP); + + /* Acknowledge all the interrupt sources ASAP */ + sbus_writew(csr0 & (LE_C0_INTR | LE_C0_TINT | LE_C0_RINT), + lp->lregs + RDP); + + if ((csr0 & LE_C0_ERR) != 0) { + /* Clear the error condition */ + sbus_writew((LE_C0_BABL | LE_C0_ERR | LE_C0_MISS | + LE_C0_CERR | LE_C0_MERR), + lp->lregs + RDP); + } + + if (csr0 & LE_C0_RINT) + lp->rx(dev); + + if (csr0 & LE_C0_TINT) + lp->tx(dev); + + if (csr0 & LE_C0_BABL) + dev->stats.tx_errors++; + + if (csr0 & LE_C0_MISS) + dev->stats.rx_errors++; + + if (csr0 & LE_C0_MERR) { + if (lp->dregs) { + u32 addr = sbus_readl(lp->dregs + DMA_ADDR); + + printk(KERN_ERR "%s: Memory error, status %04x, addr %06x\n", + dev->name, csr0, addr & 0xffffff); + } else { + printk(KERN_ERR "%s: Memory error, status %04x\n", + dev->name, csr0); + } + + sbus_writew(LE_C0_STOP, lp->lregs + RDP); + + if (lp->dregs) { + u32 dma_csr = sbus_readl(lp->dregs + DMA_CSR); + + dma_csr |= DMA_FIFO_INV; + sbus_writel(dma_csr, lp->dregs + DMA_CSR); + } + + lp->init_ring(dev); + load_csrs(lp); + init_restart_lance(lp); + netif_wake_queue(dev); + } + + sbus_writew(LE_C0_INEA, lp->lregs + RDP); + + return IRQ_HANDLED; +} + +/* Build a fake network packet and send it to ourselves. */ +static void build_fake_packet(struct lance_private *lp) +{ + struct net_device *dev = lp->dev; + int i, entry; + + entry = lp->tx_new & TX_RING_MOD_MASK; + if (lp->pio_buffer) { + struct lance_init_block __iomem *ib = lp->init_block_iomem; + u16 __iomem *packet = (u16 __iomem *) &(ib->tx_buf[entry][0]); + struct ethhdr __iomem *eth = (struct ethhdr __iomem *) packet; + for (i = 0; i < (ETH_ZLEN / sizeof(u16)); i++) + sbus_writew(0, &packet[i]); + for (i = 0; i < 6; i++) { + sbus_writeb(dev->dev_addr[i], ð->h_dest[i]); + sbus_writeb(dev->dev_addr[i], ð->h_source[i]); + } + sbus_writew((-ETH_ZLEN) | 0xf000, &ib->btx_ring[entry].length); + sbus_writew(0, &ib->btx_ring[entry].misc); + sbus_writeb(LE_T1_POK|LE_T1_OWN, &ib->btx_ring[entry].tmd1_bits); + } else { + struct lance_init_block *ib = lp->init_block_mem; + u16 *packet = (u16 *) &(ib->tx_buf[entry][0]); + struct ethhdr *eth = (struct ethhdr *) packet; + memset(packet, 0, ETH_ZLEN); + for (i = 0; i < 6; i++) { + eth->h_dest[i] = dev->dev_addr[i]; + eth->h_source[i] = dev->dev_addr[i]; + } + ib->btx_ring[entry].length = (-ETH_ZLEN) | 0xf000; + ib->btx_ring[entry].misc = 0; + ib->btx_ring[entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN); + } + lp->tx_new = TX_NEXT(entry); +} + +static int lance_open(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + int status = 0; + + STOP_LANCE(lp); + + if (request_irq(dev->irq, lance_interrupt, IRQF_SHARED, + lancestr, (void *) dev)) { + printk(KERN_ERR "Lance: Can't get irq %d\n", dev->irq); + return -EAGAIN; + } + + /* On the 4m, setup the ledma to provide the upper bits for buffers */ + if (lp->dregs) { + u32 regval = lp->init_block_dvma & 0xff000000; + + sbus_writel(regval, lp->dregs + DMA_TEST); + } + + /* Set mode and clear multicast filter only at device open, + * so that lance_init_ring() called at any error will not + * forget multicast filters. + * + * BTW it is common bug in all lance drivers! --ANK + */ + if (lp->pio_buffer) { + struct lance_init_block __iomem *ib = lp->init_block_iomem; + sbus_writew(0, &ib->mode); + sbus_writel(0, &ib->filter[0]); + sbus_writel(0, &ib->filter[1]); + } else { + struct lance_init_block *ib = lp->init_block_mem; + ib->mode = 0; + ib->filter [0] = 0; + ib->filter [1] = 0; + } + + lp->init_ring(dev); + load_csrs(lp); + + netif_start_queue(dev); + + status = init_restart_lance(lp); + if (!status && lp->auto_select) { + build_fake_packet(lp); + sbus_writew(LE_C0_INEA | LE_C0_TDMD, lp->lregs + RDP); + } + + return status; +} + +static int lance_close(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + + netif_stop_queue(dev); + del_timer_sync(&lp->multicast_timer); + + STOP_LANCE(lp); + + free_irq(dev->irq, (void *) dev); + return 0; +} + +static int lance_reset(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + int status; + + STOP_LANCE(lp); + + /* On the 4m, reset the dma too */ + if (lp->dregs) { + u32 csr, addr; + + printk(KERN_ERR "resetting ledma\n"); + csr = sbus_readl(lp->dregs + DMA_CSR); + sbus_writel(csr | DMA_RST_ENET, lp->dregs + DMA_CSR); + udelay(200); + sbus_writel(csr & ~DMA_RST_ENET, lp->dregs + DMA_CSR); + + addr = lp->init_block_dvma & 0xff000000; + sbus_writel(addr, lp->dregs + DMA_TEST); + } + lp->init_ring(dev); + load_csrs(lp); + dev->trans_start = jiffies; /* prevent tx timeout */ + status = init_restart_lance(lp); + return status; +} + +static void lance_piocopy_from_skb(void __iomem *dest, unsigned char *src, int len) +{ + void __iomem *piobuf = dest; + u32 *p32; + u16 *p16; + u8 *p8; + + switch ((unsigned long)src & 0x3) { + case 0: + p32 = (u32 *) src; + while (len >= 4) { + sbus_writel(*p32, piobuf); + p32++; + piobuf += 4; + len -= 4; + } + src = (char *) p32; + break; + case 1: + case 3: + p8 = (u8 *) src; + while (len >= 4) { + u32 val; + + val = p8[0] << 24; + val |= p8[1] << 16; + val |= p8[2] << 8; + val |= p8[3]; + sbus_writel(val, piobuf); + p8 += 4; + piobuf += 4; + len -= 4; + } + src = (char *) p8; + break; + case 2: + p16 = (u16 *) src; + while (len >= 4) { + u32 val = p16[0]<<16 | p16[1]; + sbus_writel(val, piobuf); + p16 += 2; + piobuf += 4; + len -= 4; + } + src = (char *) p16; + break; + } + if (len >= 2) { + u16 val = src[0] << 8 | src[1]; + sbus_writew(val, piobuf); + src += 2; + piobuf += 2; + len -= 2; + } + if (len >= 1) + sbus_writeb(src[0], piobuf); +} + +static void lance_piozero(void __iomem *dest, int len) +{ + void __iomem *piobuf = dest; + + if ((unsigned long)piobuf & 1) { + sbus_writeb(0, piobuf); + piobuf += 1; + len -= 1; + if (len == 0) + return; + } + if (len == 1) { + sbus_writeb(0, piobuf); + return; + } + if ((unsigned long)piobuf & 2) { + sbus_writew(0, piobuf); + piobuf += 2; + len -= 2; + if (len == 0) + return; + } + while (len >= 4) { + sbus_writel(0, piobuf); + piobuf += 4; + len -= 4; + } + if (len >= 2) { + sbus_writew(0, piobuf); + piobuf += 2; + len -= 2; + } + if (len >= 1) + sbus_writeb(0, piobuf); +} + +static void lance_tx_timeout(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + + printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n", + dev->name, sbus_readw(lp->lregs + RDP)); + lance_reset(dev); + netif_wake_queue(dev); +} + +static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + int entry, skblen, len; + + skblen = skb->len; + + len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen; + + spin_lock_irq(&lp->lock); + + dev->stats.tx_bytes += len; + + entry = lp->tx_new & TX_RING_MOD_MASK; + if (lp->pio_buffer) { + struct lance_init_block __iomem *ib = lp->init_block_iomem; + sbus_writew((-len) | 0xf000, &ib->btx_ring[entry].length); + sbus_writew(0, &ib->btx_ring[entry].misc); + lance_piocopy_from_skb(&ib->tx_buf[entry][0], skb->data, skblen); + if (len != skblen) + lance_piozero(&ib->tx_buf[entry][skblen], len - skblen); + sbus_writeb(LE_T1_POK | LE_T1_OWN, &ib->btx_ring[entry].tmd1_bits); + } else { + struct lance_init_block *ib = lp->init_block_mem; + ib->btx_ring [entry].length = (-len) | 0xf000; + ib->btx_ring [entry].misc = 0; + skb_copy_from_linear_data(skb, &ib->tx_buf [entry][0], skblen); + if (len != skblen) + memset((char *) &ib->tx_buf [entry][skblen], 0, len - skblen); + ib->btx_ring [entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN); + } + + lp->tx_new = TX_NEXT(entry); + + if (TX_BUFFS_AVAIL <= 0) + netif_stop_queue(dev); + + /* Kick the lance: transmit now */ + sbus_writew(LE_C0_INEA | LE_C0_TDMD, lp->lregs + RDP); + + /* Read back CSR to invalidate the E-Cache. + * This is needed, because DMA_DSBL_WR_INV is set. + */ + if (lp->dregs) + sbus_readw(lp->lregs + RDP); + + spin_unlock_irq(&lp->lock); + + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +/* taken from the depca driver */ +static void lance_load_multicast(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + struct netdev_hw_addr *ha; + u32 crc; + u32 val; + + /* set all multicast bits */ + if (dev->flags & IFF_ALLMULTI) + val = ~0; + else + val = 0; + + if (lp->pio_buffer) { + struct lance_init_block __iomem *ib = lp->init_block_iomem; + sbus_writel(val, &ib->filter[0]); + sbus_writel(val, &ib->filter[1]); + } else { + struct lance_init_block *ib = lp->init_block_mem; + ib->filter [0] = val; + ib->filter [1] = val; + } + + if (dev->flags & IFF_ALLMULTI) + return; + + /* Add addresses */ + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(6, ha->addr); + crc = crc >> 26; + if (lp->pio_buffer) { + struct lance_init_block __iomem *ib = lp->init_block_iomem; + u16 __iomem *mcast_table = (u16 __iomem *) &ib->filter; + u16 tmp = sbus_readw(&mcast_table[crc>>4]); + tmp |= 1 << (crc & 0xf); + sbus_writew(tmp, &mcast_table[crc>>4]); + } else { + struct lance_init_block *ib = lp->init_block_mem; + u16 *mcast_table = (u16 *) &ib->filter; + mcast_table [crc >> 4] |= 1 << (crc & 0xf); + } + } +} + +static void lance_set_multicast(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + struct lance_init_block *ib_mem = lp->init_block_mem; + struct lance_init_block __iomem *ib_iomem = lp->init_block_iomem; + u16 mode; + + if (!netif_running(dev)) + return; + + if (lp->tx_old != lp->tx_new) { + mod_timer(&lp->multicast_timer, jiffies + 4); + netif_wake_queue(dev); + return; + } + + netif_stop_queue(dev); + + STOP_LANCE(lp); + lp->init_ring(dev); + + if (lp->pio_buffer) + mode = sbus_readw(&ib_iomem->mode); + else + mode = ib_mem->mode; + if (dev->flags & IFF_PROMISC) { + mode |= LE_MO_PROM; + if (lp->pio_buffer) + sbus_writew(mode, &ib_iomem->mode); + else + ib_mem->mode = mode; + } else { + mode &= ~LE_MO_PROM; + if (lp->pio_buffer) + sbus_writew(mode, &ib_iomem->mode); + else + ib_mem->mode = mode; + lance_load_multicast(dev); + } + load_csrs(lp); + init_restart_lance(lp); + netif_wake_queue(dev); +} + +static void lance_set_multicast_retry(unsigned long _opaque) +{ + struct net_device *dev = (struct net_device *) _opaque; + + lance_set_multicast(dev); +} + +static void lance_free_hwresources(struct lance_private *lp) +{ + if (lp->lregs) + of_iounmap(&lp->op->resource[0], lp->lregs, LANCE_REG_SIZE); + if (lp->dregs) { + struct platform_device *ledma = lp->ledma; + + of_iounmap(&ledma->resource[0], lp->dregs, + resource_size(&ledma->resource[0])); + } + if (lp->init_block_iomem) { + of_iounmap(&lp->lebuffer->resource[0], lp->init_block_iomem, + sizeof(struct lance_init_block)); + } else if (lp->init_block_mem) { + dma_free_coherent(&lp->op->dev, + sizeof(struct lance_init_block), + lp->init_block_mem, + lp->init_block_dvma); + } +} + +/* Ethtool support... */ +static void sparc_lance_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + strcpy(info->driver, "sunlance"); + strcpy(info->version, "2.02"); +} + +static const struct ethtool_ops sparc_lance_ethtool_ops = { + .get_drvinfo = sparc_lance_get_drvinfo, + .get_link = ethtool_op_get_link, +}; + +static const struct net_device_ops sparc_lance_ops = { + .ndo_open = lance_open, + .ndo_stop = lance_close, + .ndo_start_xmit = lance_start_xmit, + .ndo_set_multicast_list = lance_set_multicast, + .ndo_tx_timeout = lance_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int __devinit sparc_lance_probe_one(struct platform_device *op, + struct platform_device *ledma, + struct platform_device *lebuffer) +{ + struct device_node *dp = op->dev.of_node; + static unsigned version_printed; + struct lance_private *lp; + struct net_device *dev; + int i; + + dev = alloc_etherdev(sizeof(struct lance_private) + 8); + if (!dev) + return -ENOMEM; + + lp = netdev_priv(dev); + + if (sparc_lance_debug && version_printed++ == 0) + printk (KERN_INFO "%s", version); + + spin_lock_init(&lp->lock); + + /* Copy the IDPROM ethernet address to the device structure, later we + * will copy the address in the device structure to the lance + * initialization block. + */ + for (i = 0; i < 6; i++) + dev->dev_addr[i] = idprom->id_ethaddr[i]; + + /* Get the IO region */ + lp->lregs = of_ioremap(&op->resource[0], 0, + LANCE_REG_SIZE, lancestr); + if (!lp->lregs) { + printk(KERN_ERR "SunLance: Cannot map registers.\n"); + goto fail; + } + + lp->ledma = ledma; + if (lp->ledma) { + lp->dregs = of_ioremap(&ledma->resource[0], 0, + resource_size(&ledma->resource[0]), + "ledma"); + if (!lp->dregs) { + printk(KERN_ERR "SunLance: Cannot map " + "ledma registers.\n"); + goto fail; + } + } + + lp->op = op; + lp->lebuffer = lebuffer; + if (lebuffer) { + /* sanity check */ + if (lebuffer->resource[0].start & 7) { + printk(KERN_ERR "SunLance: ERROR: Rx and Tx rings not on even boundary.\n"); + goto fail; + } + lp->init_block_iomem = + of_ioremap(&lebuffer->resource[0], 0, + sizeof(struct lance_init_block), "lebuffer"); + if (!lp->init_block_iomem) { + printk(KERN_ERR "SunLance: Cannot map PIO buffer.\n"); + goto fail; + } + lp->init_block_dvma = 0; + lp->pio_buffer = 1; + lp->init_ring = lance_init_ring_pio; + lp->rx = lance_rx_pio; + lp->tx = lance_tx_pio; + } else { + lp->init_block_mem = + dma_alloc_coherent(&op->dev, + sizeof(struct lance_init_block), + &lp->init_block_dvma, GFP_ATOMIC); + if (!lp->init_block_mem) { + printk(KERN_ERR "SunLance: Cannot allocate consistent DMA memory.\n"); + goto fail; + } + lp->pio_buffer = 0; + lp->init_ring = lance_init_ring_dvma; + lp->rx = lance_rx_dvma; + lp->tx = lance_tx_dvma; + } + lp->busmaster_regval = of_getintprop_default(dp, "busmaster-regval", + (LE_C3_BSWP | + LE_C3_ACON | + LE_C3_BCON)); + + lp->name = lancestr; + + lp->burst_sizes = 0; + if (lp->ledma) { + struct device_node *ledma_dp = ledma->dev.of_node; + struct device_node *sbus_dp; + unsigned int sbmask; + const char *prop; + u32 csr; + + /* Find burst-size property for ledma */ + lp->burst_sizes = of_getintprop_default(ledma_dp, + "burst-sizes", 0); + + /* ledma may be capable of fast bursts, but sbus may not. */ + sbus_dp = ledma_dp->parent; + sbmask = of_getintprop_default(sbus_dp, "burst-sizes", + DMA_BURSTBITS); + lp->burst_sizes &= sbmask; + + /* Get the cable-selection property */ + prop = of_get_property(ledma_dp, "cable-selection", NULL); + if (!prop || prop[0] == '\0') { + struct device_node *nd; + + printk(KERN_INFO "SunLance: using " + "auto-carrier-detection.\n"); + + nd = of_find_node_by_path("/options"); + if (!nd) + goto no_link_test; + + prop = of_get_property(nd, "tpe-link-test?", NULL); + if (!prop) + goto no_link_test; + + if (strcmp(prop, "true")) { + printk(KERN_NOTICE "SunLance: warning: overriding option " + "'tpe-link-test?'\n"); + printk(KERN_NOTICE "SunLance: warning: mail any problems " + "to ecd@skynet.be\n"); + auxio_set_lte(AUXIO_LTE_ON); + } +no_link_test: + lp->auto_select = 1; + lp->tpe = 0; + } else if (!strcmp(prop, "aui")) { + lp->auto_select = 0; + lp->tpe = 0; + } else { + lp->auto_select = 0; + lp->tpe = 1; + } + + /* Reset ledma */ + csr = sbus_readl(lp->dregs + DMA_CSR); + sbus_writel(csr | DMA_RST_ENET, lp->dregs + DMA_CSR); + udelay(200); + sbus_writel(csr & ~DMA_RST_ENET, lp->dregs + DMA_CSR); + } else + lp->dregs = NULL; + + lp->dev = dev; + SET_NETDEV_DEV(dev, &op->dev); + dev->watchdog_timeo = 5*HZ; + dev->ethtool_ops = &sparc_lance_ethtool_ops; + dev->netdev_ops = &sparc_lance_ops; + + dev->irq = op->archdata.irqs[0]; + + /* We cannot sleep if the chip is busy during a + * multicast list update event, because such events + * can occur from interrupts (ex. IPv6). So we + * use a timer to try again later when necessary. -DaveM + */ + init_timer(&lp->multicast_timer); + lp->multicast_timer.data = (unsigned long) dev; + lp->multicast_timer.function = lance_set_multicast_retry; + + if (register_netdev(dev)) { + printk(KERN_ERR "SunLance: Cannot register device.\n"); + goto fail; + } + + dev_set_drvdata(&op->dev, lp); + + printk(KERN_INFO "%s: LANCE %pM\n", + dev->name, dev->dev_addr); + + return 0; + +fail: + lance_free_hwresources(lp); + free_netdev(dev); + return -ENODEV; +} + +static int __devinit sunlance_sbus_probe(struct platform_device *op) +{ + struct platform_device *parent = to_platform_device(op->dev.parent); + struct device_node *parent_dp = parent->dev.of_node; + int err; + + if (!strcmp(parent_dp->name, "ledma")) { + err = sparc_lance_probe_one(op, parent, NULL); + } else if (!strcmp(parent_dp->name, "lebuffer")) { + err = sparc_lance_probe_one(op, NULL, parent); + } else + err = sparc_lance_probe_one(op, NULL, NULL); + + return err; +} + +static int __devexit sunlance_sbus_remove(struct platform_device *op) +{ + struct lance_private *lp = dev_get_drvdata(&op->dev); + struct net_device *net_dev = lp->dev; + + unregister_netdev(net_dev); + + lance_free_hwresources(lp); + + free_netdev(net_dev); + + dev_set_drvdata(&op->dev, NULL); + + return 0; +} + +static const struct of_device_id sunlance_sbus_match[] = { + { + .name = "le", + }, + {}, +}; + +MODULE_DEVICE_TABLE(of, sunlance_sbus_match); + +static struct platform_driver sunlance_sbus_driver = { + .driver = { + .name = "sunlance", + .owner = THIS_MODULE, + .of_match_table = sunlance_sbus_match, + }, + .probe = sunlance_sbus_probe, + .remove = __devexit_p(sunlance_sbus_remove), +}; + + +/* Find all the lance cards on the system and initialize them */ +static int __init sparc_lance_init(void) +{ + return platform_driver_register(&sunlance_sbus_driver); +} + +static void __exit sparc_lance_exit(void) +{ + platform_driver_unregister(&sunlance_sbus_driver); +} + +module_init(sparc_lance_init); +module_exit(sparc_lance_exit); diff --git a/drivers/net/hplance.c b/drivers/net/hplance.c deleted file mode 100644 index a900d5b..0000000 --- a/drivers/net/hplance.c +++ /dev/null @@ -1,242 +0,0 @@ -/* hplance.c : the Linux/hp300/lance ethernet driver - * - * Copyright (C) 05/1998 Peter Maydell - * Based on the Sun Lance driver and the NetBSD HP Lance driver - * Uses the generic 7990.c LANCE code. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -/* Used for the temporal inet entries and routing */ -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "hplance.h" - -/* We have 16834 bytes of RAM for the init block and buffers. This places - * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx - * buffers and 2 Tx buffers. - */ -#define LANCE_LOG_TX_BUFFERS 1 -#define LANCE_LOG_RX_BUFFERS 3 - -#include "7990.h" /* use generic LANCE code */ - -/* Our private data structure */ -struct hplance_private { - struct lance_private lance; -}; - -/* function prototypes... This is easy because all the grot is in the - * generic LANCE support. All we have to support is probing for boards, - * plus board-specific init, open and close actions. - * Oh, and we need to tell the generic code how to read and write LANCE registers... - */ -static int __devinit hplance_init_one(struct dio_dev *d, - const struct dio_device_id *ent); -static void __devinit hplance_init(struct net_device *dev, - struct dio_dev *d); -static void __devexit hplance_remove_one(struct dio_dev *d); -static void hplance_writerap(void *priv, unsigned short value); -static void hplance_writerdp(void *priv, unsigned short value); -static unsigned short hplance_readrdp(void *priv); -static int hplance_open(struct net_device *dev); -static int hplance_close(struct net_device *dev); - -static struct dio_device_id hplance_dio_tbl[] = { - { DIO_ID_LAN }, - { 0 } -}; - -static struct dio_driver hplance_driver = { - .name = "hplance", - .id_table = hplance_dio_tbl, - .probe = hplance_init_one, - .remove = __devexit_p(hplance_remove_one), -}; - -static const struct net_device_ops hplance_netdev_ops = { - .ndo_open = hplance_open, - .ndo_stop = hplance_close, - .ndo_start_xmit = lance_start_xmit, - .ndo_set_multicast_list = lance_set_multicast, - .ndo_change_mtu = eth_change_mtu, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = lance_poll, -#endif -}; - -/* Find all the HP Lance boards and initialise them... */ -static int __devinit hplance_init_one(struct dio_dev *d, - const struct dio_device_id *ent) -{ - struct net_device *dev; - int err = -ENOMEM; - int i; - - dev = alloc_etherdev(sizeof(struct hplance_private)); - if (!dev) - goto out; - - err = -EBUSY; - if (!request_mem_region(dio_resource_start(d), - dio_resource_len(d), d->name)) - goto out_free_netdev; - - hplance_init(dev, d); - err = register_netdev(dev); - if (err) - goto out_release_mem_region; - - dio_set_drvdata(d, dev); - - printk(KERN_INFO "%s: %s; select code %d, addr %2.2x", dev->name, d->name, d->scode, dev->dev_addr[0]); - - for (i=1; i<6; i++) { - printk(":%2.2x", dev->dev_addr[i]); - } - - printk(", irq %d\n", d->ipl); - - return 0; - - out_release_mem_region: - release_mem_region(dio_resource_start(d), dio_resource_len(d)); - out_free_netdev: - free_netdev(dev); - out: - return err; -} - -static void __devexit hplance_remove_one(struct dio_dev *d) -{ - struct net_device *dev = dio_get_drvdata(d); - - unregister_netdev(dev); - release_mem_region(dio_resource_start(d), dio_resource_len(d)); - free_netdev(dev); -} - -/* Initialise a single lance board at the given DIO device */ -static void __devinit hplance_init(struct net_device *dev, struct dio_dev *d) -{ - unsigned long va = (d->resource.start + DIO_VIRADDRBASE); - struct hplance_private *lp; - int i; - - /* reset the board */ - out_8(va+DIO_IDOFF, 0xff); - udelay(100); /* ariba! ariba! udelay! udelay! */ - - /* Fill the dev fields */ - dev->base_addr = va; - dev->netdev_ops = &hplance_netdev_ops; - dev->dma = 0; - - for (i=0; i<6; i++) { - /* The NVRAM holds our ethernet address, one nibble per byte, - * at bytes NVRAMOFF+1,3,5,7,9... - */ - dev->dev_addr[i] = ((in_8(va + HPLANCE_NVRAMOFF + i*4 + 1) & 0xF) << 4) - | (in_8(va + HPLANCE_NVRAMOFF + i*4 + 3) & 0xF); - } - - lp = netdev_priv(dev); - lp->lance.name = (char*)d->name; /* discards const, shut up gcc */ - lp->lance.base = va; - lp->lance.init_block = (struct lance_init_block *)(va + HPLANCE_MEMOFF); /* CPU addr */ - lp->lance.lance_init_block = NULL; /* LANCE addr of same RAM */ - lp->lance.busmaster_regval = LE_C3_BSWP; /* we're bigendian */ - lp->lance.irq = d->ipl; - lp->lance.writerap = hplance_writerap; - lp->lance.writerdp = hplance_writerdp; - lp->lance.readrdp = hplance_readrdp; - lp->lance.lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS; - lp->lance.lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS; - lp->lance.rx_ring_mod_mask = RX_RING_MOD_MASK; - lp->lance.tx_ring_mod_mask = TX_RING_MOD_MASK; -} - -/* This is disgusting. We have to check the DIO status register for ack every - * time we read or write the LANCE registers. - */ -static void hplance_writerap(void *priv, unsigned short value) -{ - struct lance_private *lp = (struct lance_private *)priv; - do { - out_be16(lp->base + HPLANCE_REGOFF + LANCE_RAP, value); - } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); -} - -static void hplance_writerdp(void *priv, unsigned short value) -{ - struct lance_private *lp = (struct lance_private *)priv; - do { - out_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP, value); - } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); -} - -static unsigned short hplance_readrdp(void *priv) -{ - struct lance_private *lp = (struct lance_private *)priv; - __u16 value; - do { - value = in_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP); - } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); - return value; -} - -static int hplance_open(struct net_device *dev) -{ - int status; - struct lance_private *lp = netdev_priv(dev); - - status = lance_open(dev); /* call generic lance open code */ - if (status) - return status; - /* enable interrupts at board level. */ - out_8(lp->base + HPLANCE_STATUS, LE_IE); - - return 0; -} - -static int hplance_close(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - - out_8(lp->base + HPLANCE_STATUS, 0); /* disable interrupts at boardlevel */ - lance_close(dev); - return 0; -} - -static int __init hplance_init_module(void) -{ - return dio_register_driver(&hplance_driver); -} - -static void __exit hplance_cleanup_module(void) -{ - dio_unregister_driver(&hplance_driver); -} - -module_init(hplance_init_module); -module_exit(hplance_cleanup_module); - -MODULE_LICENSE("GPL"); diff --git a/drivers/net/hplance.h b/drivers/net/hplance.h deleted file mode 100644 index 04aee9e..0000000 --- a/drivers/net/hplance.h +++ /dev/null @@ -1,26 +0,0 @@ -/* Random defines and structures for the HP Lance driver. - * Copyright (C) 05/1998 Peter Maydell - * Based on the Sun Lance driver and the NetBSD HP Lance driver - */ - -/* Registers */ -#define HPLANCE_ID 0x01 /* DIO register: ID byte */ -#define HPLANCE_STATUS 0x03 /* DIO register: interrupt enable/status */ - -/* Control and status bits for the status register */ -#define LE_IE 0x80 /* interrupt enable */ -#define LE_IR 0x40 /* interrupt requested */ -#define LE_LOCK 0x08 /* lock status register */ -#define LE_ACK 0x04 /* ack of lock */ -#define LE_JAB 0x02 /* loss of tx clock (???) */ -/* We can also extract the IPL from the status register with the standard - * DIO_IPL(hplance) macro, or using dio_scodetoipl() - */ - -/* These are the offsets for the DIO regs (hplance_reg), lance_ioreg, - * memory and NVRAM: - */ -#define HPLANCE_IDOFF 0 /* board baseaddr */ -#define HPLANCE_REGOFF 0x4000 /* lance registers */ -#define HPLANCE_MEMOFF 0x8000 /* struct lance_init_block */ -#define HPLANCE_NVRAMOFF 0xC008 /* etheraddress as one *nibble* per byte */ diff --git a/drivers/net/lance.c b/drivers/net/lance.c deleted file mode 100644 index 02336ed..0000000 --- a/drivers/net/lance.c +++ /dev/null @@ -1,1313 +0,0 @@ -/* lance.c: An AMD LANCE/PCnet ethernet driver for Linux. */ -/* - Written/copyright 1993-1998 by Donald Becker. - - Copyright 1993 United States Government as represented by the - Director, National Security Agency. - This software may be used and distributed according to the terms - of the GNU General Public License, incorporated herein by reference. - - This driver is for the Allied Telesis AT1500 and HP J2405A, and should work - with most other LANCE-based bus-master (NE2100/NE2500) ethercards. - - The author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation - 410 Severn Ave., Suite 210 - Annapolis MD 21403 - - Andrey V. Savochkin: - - alignment problem with 1.3.* kernel and some minor changes. - Thomas Bogendoerfer (tsbogend@bigbug.franken.de): - - added support for Linux/Alpha, but removed most of it, because - it worked only for the PCI chip. - - added hook for the 32bit lance driver - - added PCnetPCI II (79C970A) to chip table - Paul Gortmaker (gpg109@rsphy1.anu.edu.au): - - hopefully fix above so Linux/Alpha can use ISA cards too. - 8/20/96 Fixed 7990 autoIRQ failure and reversed unneeded alignment -djb - v1.12 10/27/97 Module support -djb - v1.14 2/3/98 Module support modified, made PCI support optional -djb - v1.15 5/27/99 Fixed bug in the cleanup_module(). dev->priv was freed - before unregister_netdev() which caused NULL pointer - reference later in the chain (in rtnetlink_fill_ifinfo()) - -- Mika Kuoppala - - Forward ported v1.14 to 2.1.129, merged the PCI and misc changes from - the 2.1 version of the old driver - Alan Cox - - Get rid of check_region, check kmalloc return in lance_probe1 - Arnaldo Carvalho de Melo - 11/01/2001 - - Reworked detection, added support for Racal InterLan EtherBlaster cards - Vesselin Kostadinov - 22/4/2004 -*/ - -static const char version[] = "lance.c:v1.16 2006/11/09 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n"; - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -static unsigned int lance_portlist[] __initdata = { 0x300, 0x320, 0x340, 0x360, 0}; -static int lance_probe1(struct net_device *dev, int ioaddr, int irq, int options); -static int __init do_lance_probe(struct net_device *dev); - - -static struct card { - char id_offset14; - char id_offset15; -} cards[] = { - { //"normal" - .id_offset14 = 0x57, - .id_offset15 = 0x57, - }, - { //NI6510EB - .id_offset14 = 0x52, - .id_offset15 = 0x44, - }, - { //Racal InterLan EtherBlaster - .id_offset14 = 0x52, - .id_offset15 = 0x49, - }, -}; -#define NUM_CARDS 3 - -#ifdef LANCE_DEBUG -static int lance_debug = LANCE_DEBUG; -#else -static int lance_debug = 1; -#endif - -/* - Theory of Operation - -I. Board Compatibility - -This device driver is designed for the AMD 79C960, the "PCnet-ISA -single-chip ethernet controller for ISA". This chip is used in a wide -variety of boards from vendors such as Allied Telesis, HP, Kingston, -and Boca. This driver is also intended to work with older AMD 7990 -designs, such as the NE1500 and NE2100, and newer 79C961. For convenience, -I use the name LANCE to refer to all of the AMD chips, even though it properly -refers only to the original 7990. - -II. Board-specific settings - -The driver is designed to work the boards that use the faster -bus-master mode, rather than in shared memory mode. (Only older designs -have on-board buffer memory needed to support the slower shared memory mode.) - -Most ISA boards have jumpered settings for the I/O base, IRQ line, and DMA -channel. This driver probes the likely base addresses: -{0x300, 0x320, 0x340, 0x360}. -After the board is found it generates a DMA-timeout interrupt and uses -autoIRQ to find the IRQ line. The DMA channel can be set with the low bits -of the otherwise-unused dev->mem_start value (aka PARAM1). If unset it is -probed for by enabling each free DMA channel in turn and checking if -initialization succeeds. - -The HP-J2405A board is an exception: with this board it is easy to read the -EEPROM-set values for the base, IRQ, and DMA. (Of course you must already -_know_ the base address -- that field is for writing the EEPROM.) - -III. Driver operation - -IIIa. Ring buffers -The LANCE uses ring buffers of Tx and Rx descriptors. Each entry describes -the base and length of the data buffer, along with status bits. The length -of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of -the buffer length (rather than being directly the buffer length) for -implementation ease. The current values are 2 (Tx) and 4 (Rx), which leads to -ring sizes of 4 (Tx) and 16 (Rx). Increasing the number of ring entries -needlessly uses extra space and reduces the chance that an upper layer will -be able to reorder queued Tx packets based on priority. Decreasing the number -of entries makes it more difficult to achieve back-to-back packet transmission -and increases the chance that Rx ring will overflow. (Consider the worst case -of receiving back-to-back minimum-sized packets.) - -The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver -statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to -avoid the administrative overhead. For the Rx side this avoids dynamically -allocating full-sized buffers "just in case", at the expense of a -memory-to-memory data copy for each packet received. For most systems this -is a good tradeoff: the Rx buffer will always be in low memory, the copy -is inexpensive, and it primes the cache for later packet processing. For Tx -the buffers are only used when needed as low-memory bounce buffers. - -IIIB. 16M memory limitations. -For the ISA bus master mode all structures used directly by the LANCE, -the initialization block, Rx and Tx rings, and data buffers, must be -accessible from the ISA bus, i.e. in the lower 16M of real memory. -This is a problem for current Linux kernels on >16M machines. The network -devices are initialized after memory initialization, and the kernel doles out -memory from the top of memory downward. The current solution is to have a -special network initialization routine that's called before memory -initialization; this will eventually be generalized for all network devices. -As mentioned before, low-memory "bounce-buffers" are used when needed. - -IIIC. Synchronization -The driver runs as two independent, single-threaded flows of control. One -is the send-packet routine, which enforces single-threaded use by the -dev->tbusy flag. The other thread is the interrupt handler, which is single -threaded by the hardware and other software. - -The send packet thread has partial control over the Tx ring and 'dev->tbusy' -flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next -queue slot is empty, it clears the tbusy flag when finished otherwise it sets -the 'lp->tx_full' flag. - -The interrupt handler has exclusive control over the Rx ring and records stats -from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so -we can't avoid the interrupt overhead by having the Tx routine reap the Tx -stats.) After reaping the stats, it marks the queue entry as empty by setting -the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the -tx_full and tbusy flags. - -*/ - -/* Set the number of Tx and Rx buffers, using Log_2(# buffers). - Reasonable default values are 16 Tx buffers, and 16 Rx buffers. - That translates to 4 and 4 (16 == 2^^4). - This is a compile-time option for efficiency. - */ -#ifndef LANCE_LOG_TX_BUFFERS -#define LANCE_LOG_TX_BUFFERS 4 -#define LANCE_LOG_RX_BUFFERS 4 -#endif - -#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS)) -#define TX_RING_MOD_MASK (TX_RING_SIZE - 1) -#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29) - -#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS)) -#define RX_RING_MOD_MASK (RX_RING_SIZE - 1) -#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29) - -#define PKT_BUF_SZ 1544 - -/* Offsets from base I/O address. */ -#define LANCE_DATA 0x10 -#define LANCE_ADDR 0x12 -#define LANCE_RESET 0x14 -#define LANCE_BUS_IF 0x16 -#define LANCE_TOTAL_SIZE 0x18 - -#define TX_TIMEOUT (HZ/5) - -/* The LANCE Rx and Tx ring descriptors. */ -struct lance_rx_head { - s32 base; - s16 buf_length; /* This length is 2s complement (negative)! */ - s16 msg_length; /* This length is "normal". */ -}; - -struct lance_tx_head { - s32 base; - s16 length; /* Length is 2s complement (negative)! */ - s16 misc; -}; - -/* The LANCE initialization block, described in databook. */ -struct lance_init_block { - u16 mode; /* Pre-set mode (reg. 15) */ - u8 phys_addr[6]; /* Physical ethernet address */ - u32 filter[2]; /* Multicast filter (unused). */ - /* Receive and transmit ring base, along with extra bits. */ - u32 rx_ring; /* Tx and Rx ring base pointers */ - u32 tx_ring; -}; - -struct lance_private { - /* The Tx and Rx ring entries must be aligned on 8-byte boundaries. */ - struct lance_rx_head rx_ring[RX_RING_SIZE]; - struct lance_tx_head tx_ring[TX_RING_SIZE]; - struct lance_init_block init_block; - const char *name; - /* The saved address of a sent-in-place packet/buffer, for skfree(). */ - struct sk_buff* tx_skbuff[TX_RING_SIZE]; - /* The addresses of receive-in-place skbuffs. */ - struct sk_buff* rx_skbuff[RX_RING_SIZE]; - unsigned long rx_buffs; /* Address of Rx and Tx buffers. */ - /* Tx low-memory "bounce buffer" address. */ - char (*tx_bounce_buffs)[PKT_BUF_SZ]; - int cur_rx, cur_tx; /* The next free ring entry */ - int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ - int dma; - unsigned char chip_version; /* See lance_chip_type. */ - spinlock_t devlock; -}; - -#define LANCE_MUST_PAD 0x00000001 -#define LANCE_ENABLE_AUTOSELECT 0x00000002 -#define LANCE_MUST_REINIT_RING 0x00000004 -#define LANCE_MUST_UNRESET 0x00000008 -#define LANCE_HAS_MISSED_FRAME 0x00000010 - -/* A mapping from the chip ID number to the part number and features. - These are from the datasheets -- in real life the '970 version - reportedly has the same ID as the '965. */ -static struct lance_chip_type { - int id_number; - const char *name; - int flags; -} chip_table[] = { - {0x0000, "LANCE 7990", /* Ancient lance chip. */ - LANCE_MUST_PAD + LANCE_MUST_UNRESET}, - {0x0003, "PCnet/ISA 79C960", /* 79C960 PCnet/ISA. */ - LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING + - LANCE_HAS_MISSED_FRAME}, - {0x2260, "PCnet/ISA+ 79C961", /* 79C961 PCnet/ISA+, Plug-n-Play. */ - LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING + - LANCE_HAS_MISSED_FRAME}, - {0x2420, "PCnet/PCI 79C970", /* 79C970 or 79C974 PCnet-SCSI, PCI. */ - LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING + - LANCE_HAS_MISSED_FRAME}, - /* Bug: the PCnet/PCI actually uses the PCnet/VLB ID number, so just call - it the PCnet32. */ - {0x2430, "PCnet32", /* 79C965 PCnet for VL bus. */ - LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING + - LANCE_HAS_MISSED_FRAME}, - {0x2621, "PCnet/PCI-II 79C970A", /* 79C970A PCInetPCI II. */ - LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING + - LANCE_HAS_MISSED_FRAME}, - {0x0, "PCnet (unknown)", - LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING + - LANCE_HAS_MISSED_FRAME}, -}; - -enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_PCI_II=5, LANCE_UNKNOWN=6}; - - -/* Non-zero if lance_probe1() needs to allocate low-memory bounce buffers. - Assume yes until we know the memory size. */ -static unsigned char lance_need_isa_bounce_buffers = 1; - -static int lance_open(struct net_device *dev); -static void lance_init_ring(struct net_device *dev, gfp_t mode); -static netdev_tx_t lance_start_xmit(struct sk_buff *skb, - struct net_device *dev); -static int lance_rx(struct net_device *dev); -static irqreturn_t lance_interrupt(int irq, void *dev_id); -static int lance_close(struct net_device *dev); -static struct net_device_stats *lance_get_stats(struct net_device *dev); -static void set_multicast_list(struct net_device *dev); -static void lance_tx_timeout (struct net_device *dev); - - - -#ifdef MODULE -#define MAX_CARDS 8 /* Max number of interfaces (cards) per module */ - -static struct net_device *dev_lance[MAX_CARDS]; -static int io[MAX_CARDS]; -static int dma[MAX_CARDS]; -static int irq[MAX_CARDS]; - -module_param_array(io, int, NULL, 0); -module_param_array(dma, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -module_param(lance_debug, int, 0); -MODULE_PARM_DESC(io, "LANCE/PCnet I/O base address(es),required"); -MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)"); -MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)"); -MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)"); - -int __init init_module(void) -{ - struct net_device *dev; - int this_dev, found = 0; - - for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) { - if (io[this_dev] == 0) { - if (this_dev != 0) /* only complain once */ - break; - printk(KERN_NOTICE "lance.c: Module autoprobing not allowed. Append \"io=0xNNN\" value(s).\n"); - return -EPERM; - } - dev = alloc_etherdev(0); - if (!dev) - break; - dev->irq = irq[this_dev]; - dev->base_addr = io[this_dev]; - dev->dma = dma[this_dev]; - if (do_lance_probe(dev) == 0) { - dev_lance[found++] = dev; - continue; - } - free_netdev(dev); - break; - } - if (found != 0) - return 0; - return -ENXIO; -} - -static void cleanup_card(struct net_device *dev) -{ - struct lance_private *lp = dev->ml_priv; - if (dev->dma != 4) - free_dma(dev->dma); - release_region(dev->base_addr, LANCE_TOTAL_SIZE); - kfree(lp->tx_bounce_buffs); - kfree((void*)lp->rx_buffs); - kfree(lp); -} - -void __exit cleanup_module(void) -{ - int this_dev; - - for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) { - struct net_device *dev = dev_lance[this_dev]; - if (dev) { - unregister_netdev(dev); - cleanup_card(dev); - free_netdev(dev); - } - } -} -#endif /* MODULE */ -MODULE_LICENSE("GPL"); - - -/* Starting in v2.1.*, the LANCE/PCnet probe is now similar to the other - board probes now that kmalloc() can allocate ISA DMA-able regions. - This also allows the LANCE driver to be used as a module. - */ -static int __init do_lance_probe(struct net_device *dev) -{ - unsigned int *port; - int result; - - if (high_memory <= phys_to_virt(16*1024*1024)) - lance_need_isa_bounce_buffers = 0; - - for (port = lance_portlist; *port; port++) { - int ioaddr = *port; - struct resource *r = request_region(ioaddr, LANCE_TOTAL_SIZE, - "lance-probe"); - - if (r) { - /* Detect the card with minimal I/O reads */ - char offset14 = inb(ioaddr + 14); - int card; - for (card = 0; card < NUM_CARDS; ++card) - if (cards[card].id_offset14 == offset14) - break; - if (card < NUM_CARDS) {/*yes, the first byte matches*/ - char offset15 = inb(ioaddr + 15); - for (card = 0; card < NUM_CARDS; ++card) - if ((cards[card].id_offset14 == offset14) && - (cards[card].id_offset15 == offset15)) - break; - } - if (card < NUM_CARDS) { /*Signature OK*/ - result = lance_probe1(dev, ioaddr, 0, 0); - if (!result) { - struct lance_private *lp = dev->ml_priv; - int ver = lp->chip_version; - - r->name = chip_table[ver].name; - return 0; - } - } - release_region(ioaddr, LANCE_TOTAL_SIZE); - } - } - return -ENODEV; -} - -#ifndef MODULE -struct net_device * __init lance_probe(int unit) -{ - struct net_device *dev = alloc_etherdev(0); - int err; - - if (!dev) - return ERR_PTR(-ENODEV); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - - err = do_lance_probe(dev); - if (err) - goto out; - return dev; -out: - free_netdev(dev); - return ERR_PTR(err); -} -#endif - -static const struct net_device_ops lance_netdev_ops = { - .ndo_open = lance_open, - .ndo_start_xmit = lance_start_xmit, - .ndo_stop = lance_close, - .ndo_get_stats = lance_get_stats, - .ndo_set_multicast_list = set_multicast_list, - .ndo_tx_timeout = lance_tx_timeout, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int options) -{ - struct lance_private *lp; - unsigned long dma_channels; /* Mark spuriously-busy DMA channels */ - int i, reset_val, lance_version; - const char *chipname; - /* Flags for specific chips or boards. */ - unsigned char hpJ2405A = 0; /* HP ISA adaptor */ - int hp_builtin = 0; /* HP on-board ethernet. */ - static int did_version; /* Already printed version info. */ - unsigned long flags; - int err = -ENOMEM; - void __iomem *bios; - - /* First we look for special cases. - Check for HP's on-board ethernet by looking for 'HP' in the BIOS. - There are two HP versions, check the BIOS for the configuration port. - This method provided by L. Julliard, Laurent_Julliard@grenoble.hp.com. - */ - bios = ioremap(0xf00f0, 0x14); - if (!bios) - return -ENOMEM; - if (readw(bios + 0x12) == 0x5048) { - static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360}; - int hp_port = (readl(bios + 1) & 1) ? 0x499 : 0x99; - /* We can have boards other than the built-in! Verify this is on-board. */ - if ((inb(hp_port) & 0xc0) == 0x80 && - ioaddr_table[inb(hp_port) & 3] == ioaddr) - hp_builtin = hp_port; - } - iounmap(bios); - /* We also recognize the HP Vectra on-board here, but check below. */ - hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00 && - inb(ioaddr+2) == 0x09); - - /* Reset the LANCE. */ - reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */ - - /* The Un-Reset needed is only needed for the real NE2100, and will - confuse the HP board. */ - if (!hpJ2405A) - outw(reset_val, ioaddr+LANCE_RESET); - - outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */ - if (inw(ioaddr+LANCE_DATA) != 0x0004) - return -ENODEV; - - /* Get the version of the chip. */ - outw(88, ioaddr+LANCE_ADDR); - if (inw(ioaddr+LANCE_ADDR) != 88) { - lance_version = 0; - } else { /* Good, it's a newer chip. */ - int chip_version = inw(ioaddr+LANCE_DATA); - outw(89, ioaddr+LANCE_ADDR); - chip_version |= inw(ioaddr+LANCE_DATA) << 16; - if (lance_debug > 2) - printk(" LANCE chip version is %#x.\n", chip_version); - if ((chip_version & 0xfff) != 0x003) - return -ENODEV; - chip_version = (chip_version >> 12) & 0xffff; - for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) { - if (chip_table[lance_version].id_number == chip_version) - break; - } - } - - /* We can't allocate private data from alloc_etherdev() because it must - a ISA DMA-able region. */ - chipname = chip_table[lance_version].name; - printk("%s: %s at %#3x, ", dev->name, chipname, ioaddr); - - /* There is a 16 byte station address PROM at the base address. - The first six bytes are the station address. */ - for (i = 0; i < 6; i++) - dev->dev_addr[i] = inb(ioaddr + i); - printk("%pM", dev->dev_addr); - - dev->base_addr = ioaddr; - /* Make certain the data structures used by the LANCE are aligned and DMAble. */ - - lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL); - if(lp==NULL) - return -ENODEV; - if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp); - dev->ml_priv = lp; - lp->name = chipname; - lp->rx_buffs = (unsigned long)kmalloc(PKT_BUF_SZ*RX_RING_SIZE, - GFP_DMA | GFP_KERNEL); - if (!lp->rx_buffs) - goto out_lp; - if (lance_need_isa_bounce_buffers) { - lp->tx_bounce_buffs = kmalloc(PKT_BUF_SZ*TX_RING_SIZE, - GFP_DMA | GFP_KERNEL); - if (!lp->tx_bounce_buffs) - goto out_rx; - } else - lp->tx_bounce_buffs = NULL; - - lp->chip_version = lance_version; - spin_lock_init(&lp->devlock); - - lp->init_block.mode = 0x0003; /* Disable Rx and Tx. */ - for (i = 0; i < 6; i++) - lp->init_block.phys_addr[i] = dev->dev_addr[i]; - lp->init_block.filter[0] = 0x00000000; - lp->init_block.filter[1] = 0x00000000; - lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS; - lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS; - - outw(0x0001, ioaddr+LANCE_ADDR); - inw(ioaddr+LANCE_ADDR); - outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA); - outw(0x0002, ioaddr+LANCE_ADDR); - inw(ioaddr+LANCE_ADDR); - outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA); - outw(0x0000, ioaddr+LANCE_ADDR); - inw(ioaddr+LANCE_ADDR); - - if (irq) { /* Set iff PCI card. */ - dev->dma = 4; /* Native bus-master, no DMA channel needed. */ - dev->irq = irq; - } else if (hp_builtin) { - static const char dma_tbl[4] = {3, 5, 6, 0}; - static const char irq_tbl[4] = {3, 4, 5, 9}; - unsigned char port_val = inb(hp_builtin); - dev->dma = dma_tbl[(port_val >> 4) & 3]; - dev->irq = irq_tbl[(port_val >> 2) & 3]; - printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma); - } else if (hpJ2405A) { - static const char dma_tbl[4] = {3, 5, 6, 7}; - static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15}; - short reset_val = inw(ioaddr+LANCE_RESET); - dev->dma = dma_tbl[(reset_val >> 2) & 3]; - dev->irq = irq_tbl[(reset_val >> 4) & 7]; - printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma); - } else if (lance_version == PCNET_ISAP) { /* The plug-n-play version. */ - short bus_info; - outw(8, ioaddr+LANCE_ADDR); - bus_info = inw(ioaddr+LANCE_BUS_IF); - dev->dma = bus_info & 0x07; - dev->irq = (bus_info >> 4) & 0x0F; - } else { - /* The DMA channel may be passed in PARAM1. */ - if (dev->mem_start & 0x07) - dev->dma = dev->mem_start & 0x07; - } - - if (dev->dma == 0) { - /* Read the DMA channel status register, so that we can avoid - stuck DMA channels in the DMA detection below. */ - dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) | - (inb(DMA2_STAT_REG) & 0xf0); - } - err = -ENODEV; - if (dev->irq >= 2) - printk(" assigned IRQ %d", dev->irq); - else if (lance_version != 0) { /* 7990 boards need DMA detection first. */ - unsigned long irq_mask; - - /* To auto-IRQ we enable the initialization-done and DMA error - interrupts. For ISA boards we get a DMA error, but VLB and PCI - boards will work. */ - irq_mask = probe_irq_on(); - - /* Trigger an initialization just for the interrupt. */ - outw(0x0041, ioaddr+LANCE_DATA); - - mdelay(20); - dev->irq = probe_irq_off(irq_mask); - if (dev->irq) - printk(", probed IRQ %d", dev->irq); - else { - printk(", failed to detect IRQ line.\n"); - goto out_tx; - } - - /* Check for the initialization done bit, 0x0100, which means - that we don't need a DMA channel. */ - if (inw(ioaddr+LANCE_DATA) & 0x0100) - dev->dma = 4; - } - - if (dev->dma == 4) { - printk(", no DMA needed.\n"); - } else if (dev->dma) { - if (request_dma(dev->dma, chipname)) { - printk("DMA %d allocation failed.\n", dev->dma); - goto out_tx; - } else - printk(", assigned DMA %d.\n", dev->dma); - } else { /* OK, we have to auto-DMA. */ - for (i = 0; i < 4; i++) { - static const char dmas[] = { 5, 6, 7, 3 }; - int dma = dmas[i]; - int boguscnt; - - /* Don't enable a permanently busy DMA channel, or the machine - will hang. */ - if (test_bit(dma, &dma_channels)) - continue; - outw(0x7f04, ioaddr+LANCE_DATA); /* Clear the memory error bits. */ - if (request_dma(dma, chipname)) - continue; - - flags=claim_dma_lock(); - set_dma_mode(dma, DMA_MODE_CASCADE); - enable_dma(dma); - release_dma_lock(flags); - - /* Trigger an initialization. */ - outw(0x0001, ioaddr+LANCE_DATA); - for (boguscnt = 100; boguscnt > 0; --boguscnt) - if (inw(ioaddr+LANCE_DATA) & 0x0900) - break; - if (inw(ioaddr+LANCE_DATA) & 0x0100) { - dev->dma = dma; - printk(", DMA %d.\n", dev->dma); - break; - } else { - flags=claim_dma_lock(); - disable_dma(dma); - release_dma_lock(flags); - free_dma(dma); - } - } - if (i == 4) { /* Failure: bail. */ - printk("DMA detection failed.\n"); - goto out_tx; - } - } - - if (lance_version == 0 && dev->irq == 0) { - /* We may auto-IRQ now that we have a DMA channel. */ - /* Trigger an initialization just for the interrupt. */ - unsigned long irq_mask; - - irq_mask = probe_irq_on(); - outw(0x0041, ioaddr+LANCE_DATA); - - mdelay(40); - dev->irq = probe_irq_off(irq_mask); - if (dev->irq == 0) { - printk(" Failed to detect the 7990 IRQ line.\n"); - goto out_dma; - } - printk(" Auto-IRQ detected IRQ%d.\n", dev->irq); - } - - if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) { - /* Turn on auto-select of media (10baseT or BNC) so that the user - can watch the LEDs even if the board isn't opened. */ - outw(0x0002, ioaddr+LANCE_ADDR); - /* Don't touch 10base2 power bit. */ - outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF); - } - - if (lance_debug > 0 && did_version++ == 0) - printk(version); - - /* The LANCE-specific entries in the device structure. */ - dev->netdev_ops = &lance_netdev_ops; - dev->watchdog_timeo = TX_TIMEOUT; - - err = register_netdev(dev); - if (err) - goto out_dma; - return 0; -out_dma: - if (dev->dma != 4) - free_dma(dev->dma); -out_tx: - kfree(lp->tx_bounce_buffs); -out_rx: - kfree((void*)lp->rx_buffs); -out_lp: - kfree(lp); - return err; -} - - -static int -lance_open(struct net_device *dev) -{ - struct lance_private *lp = dev->ml_priv; - int ioaddr = dev->base_addr; - int i; - - if (dev->irq == 0 || - request_irq(dev->irq, lance_interrupt, 0, lp->name, dev)) { - return -EAGAIN; - } - - /* We used to allocate DMA here, but that was silly. - DMA lines can't be shared! We now permanently allocate them. */ - - /* Reset the LANCE */ - inw(ioaddr+LANCE_RESET); - - /* The DMA controller is used as a no-operation slave, "cascade mode". */ - if (dev->dma != 4) { - unsigned long flags=claim_dma_lock(); - enable_dma(dev->dma); - set_dma_mode(dev->dma, DMA_MODE_CASCADE); - release_dma_lock(flags); - } - - /* Un-Reset the LANCE, needed only for the NE2100. */ - if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET) - outw(0, ioaddr+LANCE_RESET); - - if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) { - /* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */ - outw(0x0002, ioaddr+LANCE_ADDR); - /* Only touch autoselect bit. */ - outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF); - } - - if (lance_debug > 1) - printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n", - dev->name, dev->irq, dev->dma, - (u32) isa_virt_to_bus(lp->tx_ring), - (u32) isa_virt_to_bus(lp->rx_ring), - (u32) isa_virt_to_bus(&lp->init_block)); - - lance_init_ring(dev, GFP_KERNEL); - /* Re-initialize the LANCE, and start it when done. */ - outw(0x0001, ioaddr+LANCE_ADDR); - outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA); - outw(0x0002, ioaddr+LANCE_ADDR); - outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA); - - outw(0x0004, ioaddr+LANCE_ADDR); - outw(0x0915, ioaddr+LANCE_DATA); - - outw(0x0000, ioaddr+LANCE_ADDR); - outw(0x0001, ioaddr+LANCE_DATA); - - netif_start_queue (dev); - - i = 0; - while (i++ < 100) - if (inw(ioaddr+LANCE_DATA) & 0x0100) - break; - /* - * We used to clear the InitDone bit, 0x0100, here but Mark Stockton - * reports that doing so triggers a bug in the '974. - */ - outw(0x0042, ioaddr+LANCE_DATA); - - if (lance_debug > 2) - printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n", - dev->name, i, (u32) isa_virt_to_bus(&lp->init_block), inw(ioaddr+LANCE_DATA)); - - return 0; /* Always succeed */ -} - -/* The LANCE has been halted for one reason or another (busmaster memory - arbitration error, Tx FIFO underflow, driver stopped it to reconfigure, - etc.). Modern LANCE variants always reload their ring-buffer - configuration when restarted, so we must reinitialize our ring - context before restarting. As part of this reinitialization, - find all packets still on the Tx ring and pretend that they had been - sent (in effect, drop the packets on the floor) - the higher-level - protocols will time out and retransmit. It'd be better to shuffle - these skbs to a temp list and then actually re-Tx them after - restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com -*/ - -static void -lance_purge_ring(struct net_device *dev) -{ - struct lance_private *lp = dev->ml_priv; - int i; - - /* Free all the skbuffs in the Rx and Tx queues. */ - for (i = 0; i < RX_RING_SIZE; i++) { - struct sk_buff *skb = lp->rx_skbuff[i]; - lp->rx_skbuff[i] = NULL; - lp->rx_ring[i].base = 0; /* Not owned by LANCE chip. */ - if (skb) - dev_kfree_skb_any(skb); - } - for (i = 0; i < TX_RING_SIZE; i++) { - if (lp->tx_skbuff[i]) { - dev_kfree_skb_any(lp->tx_skbuff[i]); - lp->tx_skbuff[i] = NULL; - } - } -} - - -/* Initialize the LANCE Rx and Tx rings. */ -static void -lance_init_ring(struct net_device *dev, gfp_t gfp) -{ - struct lance_private *lp = dev->ml_priv; - int i; - - lp->cur_rx = lp->cur_tx = 0; - lp->dirty_rx = lp->dirty_tx = 0; - - for (i = 0; i < RX_RING_SIZE; i++) { - struct sk_buff *skb; - void *rx_buff; - - skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp); - lp->rx_skbuff[i] = skb; - if (skb) { - skb->dev = dev; - rx_buff = skb->data; - } else - rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp); - if (rx_buff == NULL) - lp->rx_ring[i].base = 0; - else - lp->rx_ring[i].base = (u32)isa_virt_to_bus(rx_buff) | 0x80000000; - lp->rx_ring[i].buf_length = -PKT_BUF_SZ; - } - /* The Tx buffer address is filled in as needed, but we do need to clear - the upper ownership bit. */ - for (i = 0; i < TX_RING_SIZE; i++) { - lp->tx_skbuff[i] = NULL; - lp->tx_ring[i].base = 0; - } - - lp->init_block.mode = 0x0000; - for (i = 0; i < 6; i++) - lp->init_block.phys_addr[i] = dev->dev_addr[i]; - lp->init_block.filter[0] = 0x00000000; - lp->init_block.filter[1] = 0x00000000; - lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS; - lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS; -} - -static void -lance_restart(struct net_device *dev, unsigned int csr0_bits, int must_reinit) -{ - struct lance_private *lp = dev->ml_priv; - - if (must_reinit || - (chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) { - lance_purge_ring(dev); - lance_init_ring(dev, GFP_ATOMIC); - } - outw(0x0000, dev->base_addr + LANCE_ADDR); - outw(csr0_bits, dev->base_addr + LANCE_DATA); -} - - -static void lance_tx_timeout (struct net_device *dev) -{ - struct lance_private *lp = (struct lance_private *) dev->ml_priv; - int ioaddr = dev->base_addr; - - outw (0, ioaddr + LANCE_ADDR); - printk ("%s: transmit timed out, status %4.4x, resetting.\n", - dev->name, inw (ioaddr + LANCE_DATA)); - outw (0x0004, ioaddr + LANCE_DATA); - dev->stats.tx_errors++; -#ifndef final_version - if (lance_debug > 3) { - int i; - printk (" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.", - lp->dirty_tx, lp->cur_tx, netif_queue_stopped(dev) ? " (full)" : "", - lp->cur_rx); - for (i = 0; i < RX_RING_SIZE; i++) - printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ", - lp->rx_ring[i].base, -lp->rx_ring[i].buf_length, - lp->rx_ring[i].msg_length); - for (i = 0; i < TX_RING_SIZE; i++) - printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ", - lp->tx_ring[i].base, -lp->tx_ring[i].length, - lp->tx_ring[i].misc); - printk ("\n"); - } -#endif - lance_restart (dev, 0x0043, 1); - - dev->trans_start = jiffies; /* prevent tx timeout */ - netif_wake_queue (dev); -} - - -static netdev_tx_t lance_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - struct lance_private *lp = dev->ml_priv; - int ioaddr = dev->base_addr; - int entry; - unsigned long flags; - - spin_lock_irqsave(&lp->devlock, flags); - - if (lance_debug > 3) { - outw(0x0000, ioaddr+LANCE_ADDR); - printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name, - inw(ioaddr+LANCE_DATA)); - outw(0x0000, ioaddr+LANCE_DATA); - } - - /* Fill in a Tx ring entry */ - - /* Mask to ring buffer boundary. */ - entry = lp->cur_tx & TX_RING_MOD_MASK; - - /* Caution: the write order is important here, set the base address - with the "ownership" bits last. */ - - /* The old LANCE chips doesn't automatically pad buffers to min. size. */ - if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) { - if (skb->len < ETH_ZLEN) { - if (skb_padto(skb, ETH_ZLEN)) - goto out; - lp->tx_ring[entry].length = -ETH_ZLEN; - } - else - lp->tx_ring[entry].length = -skb->len; - } else - lp->tx_ring[entry].length = -skb->len; - - lp->tx_ring[entry].misc = 0x0000; - - dev->stats.tx_bytes += skb->len; - - /* If any part of this buffer is >16M we must copy it to a low-memory - buffer. */ - if ((u32)isa_virt_to_bus(skb->data) + skb->len > 0x01000000) { - if (lance_debug > 5) - printk("%s: bouncing a high-memory packet (%#x).\n", - dev->name, (u32)isa_virt_to_bus(skb->data)); - skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len); - lp->tx_ring[entry].base = - ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000; - dev_kfree_skb(skb); - } else { - lp->tx_skbuff[entry] = skb; - lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000; - } - lp->cur_tx++; - - /* Trigger an immediate send poll. */ - outw(0x0000, ioaddr+LANCE_ADDR); - outw(0x0048, ioaddr+LANCE_DATA); - - if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE) - netif_stop_queue(dev); - -out: - spin_unlock_irqrestore(&lp->devlock, flags); - return NETDEV_TX_OK; -} - -/* The LANCE interrupt handler. */ -static irqreturn_t lance_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct lance_private *lp; - int csr0, ioaddr, boguscnt=10; - int must_restart; - - ioaddr = dev->base_addr; - lp = dev->ml_priv; - - spin_lock (&lp->devlock); - - outw(0x00, dev->base_addr + LANCE_ADDR); - while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600 && - --boguscnt >= 0) { - /* Acknowledge all of the current interrupt sources ASAP. */ - outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA); - - must_restart = 0; - - if (lance_debug > 5) - printk("%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n", - dev->name, csr0, inw(dev->base_addr + LANCE_DATA)); - - if (csr0 & 0x0400) /* Rx interrupt */ - lance_rx(dev); - - if (csr0 & 0x0200) { /* Tx-done interrupt */ - int dirty_tx = lp->dirty_tx; - - while (dirty_tx < lp->cur_tx) { - int entry = dirty_tx & TX_RING_MOD_MASK; - int status = lp->tx_ring[entry].base; - - if (status < 0) - break; /* It still hasn't been Txed */ - - lp->tx_ring[entry].base = 0; - - if (status & 0x40000000) { - /* There was an major error, log it. */ - int err_status = lp->tx_ring[entry].misc; - dev->stats.tx_errors++; - if (err_status & 0x0400) - dev->stats.tx_aborted_errors++; - if (err_status & 0x0800) - dev->stats.tx_carrier_errors++; - if (err_status & 0x1000) - dev->stats.tx_window_errors++; - if (err_status & 0x4000) { - /* Ackk! On FIFO errors the Tx unit is turned off! */ - dev->stats.tx_fifo_errors++; - /* Remove this verbosity later! */ - printk("%s: Tx FIFO error! Status %4.4x.\n", - dev->name, csr0); - /* Restart the chip. */ - must_restart = 1; - } - } else { - if (status & 0x18000000) - dev->stats.collisions++; - dev->stats.tx_packets++; - } - - /* We must free the original skb if it's not a data-only copy - in the bounce buffer. */ - if (lp->tx_skbuff[entry]) { - dev_kfree_skb_irq(lp->tx_skbuff[entry]); - lp->tx_skbuff[entry] = NULL; - } - dirty_tx++; - } - -#ifndef final_version - if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) { - printk("out-of-sync dirty pointer, %d vs. %d, full=%s.\n", - dirty_tx, lp->cur_tx, - netif_queue_stopped(dev) ? "yes" : "no"); - dirty_tx += TX_RING_SIZE; - } -#endif - - /* if the ring is no longer full, accept more packets */ - if (netif_queue_stopped(dev) && - dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) - netif_wake_queue (dev); - - lp->dirty_tx = dirty_tx; - } - - /* Log misc errors. */ - if (csr0 & 0x4000) - dev->stats.tx_errors++; /* Tx babble. */ - if (csr0 & 0x1000) - dev->stats.rx_errors++; /* Missed a Rx frame. */ - if (csr0 & 0x0800) { - printk("%s: Bus master arbitration failure, status %4.4x.\n", - dev->name, csr0); - /* Restart the chip. */ - must_restart = 1; - } - - if (must_restart) { - /* stop the chip to clear the error condition, then restart */ - outw(0x0000, dev->base_addr + LANCE_ADDR); - outw(0x0004, dev->base_addr + LANCE_DATA); - lance_restart(dev, 0x0002, 0); - } - } - - /* Clear any other interrupt, and set interrupt enable. */ - outw(0x0000, dev->base_addr + LANCE_ADDR); - outw(0x7940, dev->base_addr + LANCE_DATA); - - if (lance_debug > 4) - printk("%s: exiting interrupt, csr%d=%#4.4x.\n", - dev->name, inw(ioaddr + LANCE_ADDR), - inw(dev->base_addr + LANCE_DATA)); - - spin_unlock (&lp->devlock); - return IRQ_HANDLED; -} - -static int -lance_rx(struct net_device *dev) -{ - struct lance_private *lp = dev->ml_priv; - int entry = lp->cur_rx & RX_RING_MOD_MASK; - int i; - - /* If we own the next entry, it's a new packet. Send it up. */ - while (lp->rx_ring[entry].base >= 0) { - int status = lp->rx_ring[entry].base >> 24; - - if (status != 0x03) { /* There was an error. */ - /* There is a tricky error noted by John Murphy, - to Russ Nelson: Even with full-sized - buffers it's possible for a jabber packet to use two - buffers, with only the last correctly noting the error. */ - if (status & 0x01) /* Only count a general error at the */ - dev->stats.rx_errors++; /* end of a packet.*/ - if (status & 0x20) - dev->stats.rx_frame_errors++; - if (status & 0x10) - dev->stats.rx_over_errors++; - if (status & 0x08) - dev->stats.rx_crc_errors++; - if (status & 0x04) - dev->stats.rx_fifo_errors++; - lp->rx_ring[entry].base &= 0x03ffffff; - } - else - { - /* Malloc up new buffer, compatible with net3. */ - short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4; - struct sk_buff *skb; - - if(pkt_len<60) - { - printk("%s: Runt packet!\n",dev->name); - dev->stats.rx_errors++; - } - else - { - skb = dev_alloc_skb(pkt_len+2); - if (skb == NULL) - { - printk("%s: Memory squeeze, deferring packet.\n", dev->name); - for (i=0; i < RX_RING_SIZE; i++) - if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0) - break; - - if (i > RX_RING_SIZE -2) - { - dev->stats.rx_dropped++; - lp->rx_ring[entry].base |= 0x80000000; - lp->cur_rx++; - } - break; - } - skb_reserve(skb,2); /* 16 byte align */ - skb_put(skb,pkt_len); /* Make room */ - skb_copy_to_linear_data(skb, - (unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)), - pkt_len); - skb->protocol=eth_type_trans(skb,dev); - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; - } - } - /* The docs say that the buffer length isn't touched, but Andrew Boyd - of QNX reports that some revs of the 79C965 clear it. */ - lp->rx_ring[entry].buf_length = -PKT_BUF_SZ; - lp->rx_ring[entry].base |= 0x80000000; - entry = (++lp->cur_rx) & RX_RING_MOD_MASK; - } - - /* We should check that at least two ring entries are free. If not, - we should free one and mark stats->rx_dropped++. */ - - return 0; -} - -static int -lance_close(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - struct lance_private *lp = dev->ml_priv; - - netif_stop_queue (dev); - - if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) { - outw(112, ioaddr+LANCE_ADDR); - dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA); - } - outw(0, ioaddr+LANCE_ADDR); - - if (lance_debug > 1) - printk("%s: Shutting down ethercard, status was %2.2x.\n", - dev->name, inw(ioaddr+LANCE_DATA)); - - /* We stop the LANCE here -- it occasionally polls - memory if we don't. */ - outw(0x0004, ioaddr+LANCE_DATA); - - if (dev->dma != 4) - { - unsigned long flags=claim_dma_lock(); - disable_dma(dev->dma); - release_dma_lock(flags); - } - free_irq(dev->irq, dev); - - lance_purge_ring(dev); - - return 0; -} - -static struct net_device_stats *lance_get_stats(struct net_device *dev) -{ - struct lance_private *lp = dev->ml_priv; - - if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) { - short ioaddr = dev->base_addr; - short saved_addr; - unsigned long flags; - - spin_lock_irqsave(&lp->devlock, flags); - saved_addr = inw(ioaddr+LANCE_ADDR); - outw(112, ioaddr+LANCE_ADDR); - dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA); - outw(saved_addr, ioaddr+LANCE_ADDR); - spin_unlock_irqrestore(&lp->devlock, flags); - } - - return &dev->stats; -} - -/* Set or clear the multicast filter for this adaptor. - */ - -static void set_multicast_list(struct net_device *dev) -{ - short ioaddr = dev->base_addr; - - outw(0, ioaddr+LANCE_ADDR); - outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance. */ - - if (dev->flags&IFF_PROMISC) { - outw(15, ioaddr+LANCE_ADDR); - outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */ - } else { - short multicast_table[4]; - int i; - int num_addrs=netdev_mc_count(dev); - if(dev->flags&IFF_ALLMULTI) - num_addrs=1; - /* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */ - memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table)); - for (i = 0; i < 4; i++) { - outw(8 + i, ioaddr+LANCE_ADDR); - outw(multicast_table[i], ioaddr+LANCE_DATA); - } - outw(15, ioaddr+LANCE_ADDR); - outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */ - } - - lance_restart(dev, 0x0142, 0); /* Resume normal operation */ - -} - diff --git a/drivers/net/mvme147.c b/drivers/net/mvme147.c deleted file mode 100644 index 3a7ad84..0000000 --- a/drivers/net/mvme147.c +++ /dev/null @@ -1,205 +0,0 @@ -/* mvme147.c : the Linux/mvme147/lance ethernet driver - * - * Copyright (C) 05/1998 Peter Maydell - * Based on the Sun Lance driver and the NetBSD HP Lance driver - * Uses the generic 7990.c LANCE code. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -/* Used for the temporal inet entries and routing */ -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -/* We have 16834 bytes of RAM for the init block and buffers. This places - * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx - * buffers and 2 Tx buffers. - */ -#define LANCE_LOG_TX_BUFFERS 1 -#define LANCE_LOG_RX_BUFFERS 3 - -#include "7990.h" /* use generic LANCE code */ - -/* Our private data structure */ -struct m147lance_private { - struct lance_private lance; - unsigned long ram; -}; - -/* function prototypes... This is easy because all the grot is in the - * generic LANCE support. All we have to support is probing for boards, - * plus board-specific init, open and close actions. - * Oh, and we need to tell the generic code how to read and write LANCE registers... - */ -static int m147lance_open(struct net_device *dev); -static int m147lance_close(struct net_device *dev); -static void m147lance_writerap(struct lance_private *lp, unsigned short value); -static void m147lance_writerdp(struct lance_private *lp, unsigned short value); -static unsigned short m147lance_readrdp(struct lance_private *lp); - -typedef void (*writerap_t)(void *, unsigned short); -typedef void (*writerdp_t)(void *, unsigned short); -typedef unsigned short (*readrdp_t)(void *); - -static const struct net_device_ops lance_netdev_ops = { - .ndo_open = m147lance_open, - .ndo_stop = m147lance_close, - .ndo_start_xmit = lance_start_xmit, - .ndo_set_multicast_list = lance_set_multicast, - .ndo_tx_timeout = lance_tx_timeout, - .ndo_change_mtu = eth_change_mtu, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, -}; - -/* Initialise the one and only on-board 7990 */ -struct net_device * __init mvme147lance_probe(int unit) -{ - struct net_device *dev; - static int called; - static const char name[] = "MVME147 LANCE"; - struct m147lance_private *lp; - u_long *addr; - u_long address; - int err; - - if (!MACH_IS_MVME147 || called) - return ERR_PTR(-ENODEV); - called++; - - dev = alloc_etherdev(sizeof(struct m147lance_private)); - if (!dev) - return ERR_PTR(-ENOMEM); - - if (unit >= 0) - sprintf(dev->name, "eth%d", unit); - - /* Fill the dev fields */ - dev->base_addr = (unsigned long)MVME147_LANCE_BASE; - dev->netdev_ops = &lance_netdev_ops; - dev->dma = 0; - - addr=(u_long *)ETHERNET_ADDRESS; - address = *addr; - dev->dev_addr[0]=0x08; - dev->dev_addr[1]=0x00; - dev->dev_addr[2]=0x3e; - address=address>>8; - dev->dev_addr[5]=address&0xff; - address=address>>8; - dev->dev_addr[4]=address&0xff; - address=address>>8; - dev->dev_addr[3]=address&0xff; - - printk("%s: MVME147 at 0x%08lx, irq %d, " - "Hardware Address %pM\n", - dev->name, dev->base_addr, MVME147_LANCE_IRQ, - dev->dev_addr); - - lp = netdev_priv(dev); - lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 16K */ - if (!lp->ram) - { - printk("%s: No memory for LANCE buffers\n", dev->name); - free_netdev(dev); - return ERR_PTR(-ENOMEM); - } - - lp->lance.name = (char*)name; /* discards const, shut up gcc */ - lp->lance.base = dev->base_addr; - lp->lance.init_block = (struct lance_init_block *)(lp->ram); /* CPU addr */ - lp->lance.lance_init_block = (struct lance_init_block *)(lp->ram); /* LANCE addr of same RAM */ - lp->lance.busmaster_regval = LE_C3_BSWP; /* we're bigendian */ - lp->lance.irq = MVME147_LANCE_IRQ; - lp->lance.writerap = (writerap_t)m147lance_writerap; - lp->lance.writerdp = (writerdp_t)m147lance_writerdp; - lp->lance.readrdp = (readrdp_t)m147lance_readrdp; - lp->lance.lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS; - lp->lance.lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS; - lp->lance.rx_ring_mod_mask = RX_RING_MOD_MASK; - lp->lance.tx_ring_mod_mask = TX_RING_MOD_MASK; - - err = register_netdev(dev); - if (err) { - free_pages(lp->ram, 3); - free_netdev(dev); - return ERR_PTR(err); - } - - return dev; -} - -static void m147lance_writerap(struct lance_private *lp, unsigned short value) -{ - out_be16(lp->base + LANCE_RAP, value); -} - -static void m147lance_writerdp(struct lance_private *lp, unsigned short value) -{ - out_be16(lp->base + LANCE_RDP, value); -} - -static unsigned short m147lance_readrdp(struct lance_private *lp) -{ - return in_be16(lp->base + LANCE_RDP); -} - -static int m147lance_open(struct net_device *dev) -{ - int status; - - status = lance_open(dev); /* call generic lance open code */ - if (status) - return status; - /* enable interrupts at board level. */ - m147_pcc->lan_cntrl=0; /* clear the interrupts (if any) */ - m147_pcc->lan_cntrl=0x08 | 0x04; /* Enable irq 4 */ - - return 0; -} - -static int m147lance_close(struct net_device *dev) -{ - /* disable interrupts at boardlevel */ - m147_pcc->lan_cntrl=0x0; /* disable interrupts */ - lance_close(dev); - return 0; -} - -#ifdef MODULE -MODULE_LICENSE("GPL"); - -static struct net_device *dev_mvme147_lance; -int __init init_module(void) -{ - dev_mvme147_lance = mvme147lance_probe(-1); - if (IS_ERR(dev_mvme147_lance)) - return PTR_ERR(dev_mvme147_lance); - return 0; -} - -void __exit cleanup_module(void) -{ - struct m147lance_private *lp = netdev_priv(dev_mvme147_lance); - unregister_netdev(dev_mvme147_lance); - free_pages(lp->ram, 3); - free_netdev(dev_mvme147_lance); -} - -#endif /* MODULE */ diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c deleted file mode 100644 index c75ae85..0000000 --- a/drivers/net/ni65.c +++ /dev/null @@ -1,1254 +0,0 @@ -/* - * ni6510 (am7990 'lance' chip) driver for Linux-net-3 - * BETAcode v0.71 (96/09/29) for 2.0.0 (or later) - * copyrights (c) 1994,1995,1996 by M.Hipp - * - * This driver can handle the old ni6510 board and the newer ni6510 - * EtherBlaster. (probably it also works with every full NE2100 - * compatible card) - * - * driver probes: io: 0x360,0x300,0x320,0x340 / dma: 3,5,6,7 - * - * This is an extension to the Linux operating system, and is covered by the - * same GNU General Public License that covers the Linux-kernel. - * - * comments/bugs/suggestions can be sent to: - * Michael Hipp - * email: hippm@informatik.uni-tuebingen.de - * - * sources: - * some things are from the 'ni6510-packet-driver for dos by Russ Nelson' - * and from the original drivers by D.Becker - * - * known problems: - * - on some PCI boards (including my own) the card/board/ISA-bridge has - * problems with bus master DMA. This results in lotsa overruns. - * It may help to '#define RCV_PARANOIA_CHECK' or try to #undef - * the XMT and RCV_VIA_SKB option .. this reduces driver performance. - * Or just play with your BIOS options to optimize ISA-DMA access. - * Maybe you also wanna play with the LOW_PERFORAMCE and MID_PERFORMANCE - * defines -> please report me your experience then - * - Harald reported for ASUS SP3G mainboards, that you should use - * the 'optimal settings' from the user's manual on page 3-12! - * - * credits: - * thanx to Jason Sullivan for sending me a ni6510 card! - * lot of debug runs with ASUS SP3G Boards (Intel Saturn) by Harald Koenig - * - * simple performance test: (486DX-33/Ni6510-EB receives from 486DX4-100/Ni6510-EB) - * average: FTP -> 8384421 bytes received in 8.5 seconds - * (no RCV_VIA_SKB,no XMT_VIA_SKB,PARANOIA_CHECK,4 XMIT BUFS, 8 RCV_BUFFS) - * peak: FTP -> 8384421 bytes received in 7.5 seconds - * (RCV_VIA_SKB,XMT_VIA_SKB,no PARANOIA_CHECK,1(!) XMIT BUF, 16 RCV BUFFS) - */ - -/* - * 99.Jun.8: added support for /proc/net/dev byte count for xosview (HK) - * 96.Sept.29: virt_to_bus stuff added for new memory modell - * 96.April.29: Added Harald Koenig's Patches (MH) - * 96.April.13: enhanced error handling .. more tests (MH) - * 96.April.5/6: a lot of performance tests. Got it stable now (hopefully) (MH) - * 96.April.1: (no joke ;) .. added EtherBlaster and Module support (MH) - * 96.Feb.19: fixed a few bugs .. cleanups .. tested for 1.3.66 (MH) - * hopefully no more 16MB limit - * - * 95.Nov.18: multicast tweaked (AC). - * - * 94.Aug.22: changes in xmit_intr (ack more than one xmitted-packet), ni65_send_packet (p->lock) (MH) - * - * 94.July.16: fixed bugs in recv_skb and skb-alloc stuff (MH) - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "ni65.h" - -/* - * the current setting allows an acceptable performance - * for 'RCV_PARANOIA_CHECK' read the 'known problems' part in - * the header of this file - * 'invert' the defines for max. performance. This may cause DMA problems - * on some boards (e.g on my ASUS SP3G) - */ -#undef XMT_VIA_SKB -#undef RCV_VIA_SKB -#define RCV_PARANOIA_CHECK - -#define MID_PERFORMANCE - -#if defined( LOW_PERFORMANCE ) - static int isa0=7,isa1=7,csr80=0x0c10; -#elif defined( MID_PERFORMANCE ) - static int isa0=5,isa1=5,csr80=0x2810; -#else /* high performance */ - static int isa0=4,isa1=4,csr80=0x0017; -#endif - -/* - * a few card/vendor specific defines - */ -#define NI65_ID0 0x00 -#define NI65_ID1 0x55 -#define NI65_EB_ID0 0x52 -#define NI65_EB_ID1 0x44 -#define NE2100_ID0 0x57 -#define NE2100_ID1 0x57 - -#define PORT p->cmdr_addr - -/* - * buffer configuration - */ -#if 1 -#define RMDNUM 16 -#define RMDNUMMASK 0x80000000 -#else -#define RMDNUM 8 -#define RMDNUMMASK 0x60000000 /* log2(RMDNUM)<<29 */ -#endif - -#if 0 -#define TMDNUM 1 -#define TMDNUMMASK 0x00000000 -#else -#define TMDNUM 4 -#define TMDNUMMASK 0x40000000 /* log2(TMDNUM)<<29 */ -#endif - -/* slightly oversized */ -#define R_BUF_SIZE 1544 -#define T_BUF_SIZE 1544 - -/* - * lance register defines - */ -#define L_DATAREG 0x00 -#define L_ADDRREG 0x02 -#define L_RESET 0x04 -#define L_CONFIG 0x05 -#define L_BUSIF 0x06 - -/* - * to access the lance/am7990-regs, you have to write - * reg-number into L_ADDRREG, then you can access it using L_DATAREG - */ -#define CSR0 0x00 -#define CSR1 0x01 -#define CSR2 0x02 -#define CSR3 0x03 - -#define INIT_RING_BEFORE_START 0x1 -#define FULL_RESET_ON_ERROR 0x2 - -#if 0 -#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);inw(PORT+L_ADDRREG); \ - outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);} -#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_ADDRREG),\ - inw(PORT+L_DATAREG)) -#if 0 -#define writedatareg(val) {outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);} -#else -#define writedatareg(val) { writereg(val,CSR0); } -#endif -#else -#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);outw(val,PORT+L_DATAREG);} -#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_DATAREG)) -#define writedatareg(val) { writereg(val,CSR0); } -#endif - -static unsigned char ni_vendor[] = { 0x02,0x07,0x01 }; - -static struct card { - unsigned char id0,id1; - short id_offset; - short total_size; - short cmd_offset; - short addr_offset; - unsigned char *vendor_id; - char *cardname; - unsigned long config; -} cards[] = { - { - .id0 = NI65_ID0, - .id1 = NI65_ID1, - .id_offset = 0x0e, - .total_size = 0x10, - .cmd_offset = 0x0, - .addr_offset = 0x8, - .vendor_id = ni_vendor, - .cardname = "ni6510", - .config = 0x1, - }, - { - .id0 = NI65_EB_ID0, - .id1 = NI65_EB_ID1, - .id_offset = 0x0e, - .total_size = 0x18, - .cmd_offset = 0x10, - .addr_offset = 0x0, - .vendor_id = ni_vendor, - .cardname = "ni6510 EtherBlaster", - .config = 0x2, - }, - { - .id0 = NE2100_ID0, - .id1 = NE2100_ID1, - .id_offset = 0x0e, - .total_size = 0x18, - .cmd_offset = 0x10, - .addr_offset = 0x0, - .vendor_id = NULL, - .cardname = "generic NE2100", - .config = 0x0, - }, -}; -#define NUM_CARDS 3 - -struct priv -{ - struct rmd rmdhead[RMDNUM]; - struct tmd tmdhead[TMDNUM]; - struct init_block ib; - int rmdnum; - int tmdnum,tmdlast; -#ifdef RCV_VIA_SKB - struct sk_buff *recv_skb[RMDNUM]; -#else - void *recvbounce[RMDNUM]; -#endif -#ifdef XMT_VIA_SKB - struct sk_buff *tmd_skb[TMDNUM]; -#endif - void *tmdbounce[TMDNUM]; - int tmdbouncenum; - int lock,xmit_queued; - - void *self; - int cmdr_addr; - int cardno; - int features; - spinlock_t ring_lock; -}; - -static int ni65_probe1(struct net_device *dev,int); -static irqreturn_t ni65_interrupt(int irq, void * dev_id); -static void ni65_recv_intr(struct net_device *dev,int); -static void ni65_xmit_intr(struct net_device *dev,int); -static int ni65_open(struct net_device *dev); -static int ni65_lance_reinit(struct net_device *dev); -static void ni65_init_lance(struct priv *p,unsigned char*,int,int); -static netdev_tx_t ni65_send_packet(struct sk_buff *skb, - struct net_device *dev); -static void ni65_timeout(struct net_device *dev); -static int ni65_close(struct net_device *dev); -static int ni65_alloc_buffer(struct net_device *dev); -static void ni65_free_buffer(struct priv *p); -static void set_multicast_list(struct net_device *dev); - -static int irqtab[] __initdata = { 9,12,15,5 }; /* irq config-translate */ -static int dmatab[] __initdata = { 0,3,5,6,7 }; /* dma config-translate and autodetect */ - -static int debuglevel = 1; - -/* - * set 'performance' registers .. we must STOP lance for that - */ -static void ni65_set_performance(struct priv *p) -{ - writereg(CSR0_STOP | CSR0_CLRALL,CSR0); /* STOP */ - - if( !(cards[p->cardno].config & 0x02) ) - return; - - outw(80,PORT+L_ADDRREG); - if(inw(PORT+L_ADDRREG) != 80) - return; - - writereg( (csr80 & 0x3fff) ,80); /* FIFO watermarks */ - outw(0,PORT+L_ADDRREG); - outw((short)isa0,PORT+L_BUSIF); /* write ISA 0: DMA_R : isa0 * 50ns */ - outw(1,PORT+L_ADDRREG); - outw((short)isa1,PORT+L_BUSIF); /* write ISA 1: DMA_W : isa1 * 50ns */ - - outw(CSR0,PORT+L_ADDRREG); /* switch back to CSR0 */ -} - -/* - * open interface (up) - */ -static int ni65_open(struct net_device *dev) -{ - struct priv *p = dev->ml_priv; - int irqval = request_irq(dev->irq, ni65_interrupt,0, - cards[p->cardno].cardname,dev); - if (irqval) { - printk(KERN_ERR "%s: unable to get IRQ %d (irqval=%d).\n", - dev->name,dev->irq, irqval); - return -EAGAIN; - } - - if(ni65_lance_reinit(dev)) - { - netif_start_queue(dev); - return 0; - } - else - { - free_irq(dev->irq,dev); - return -EAGAIN; - } -} - -/* - * close interface (down) - */ -static int ni65_close(struct net_device *dev) -{ - struct priv *p = dev->ml_priv; - - netif_stop_queue(dev); - - outw(inw(PORT+L_RESET),PORT+L_RESET); /* that's the hard way */ - -#ifdef XMT_VIA_SKB - { - int i; - for(i=0;itmd_skb[i]) { - dev_kfree_skb(p->tmd_skb[i]); - p->tmd_skb[i] = NULL; - } - } - } -#endif - free_irq(dev->irq,dev); - return 0; -} - -static void cleanup_card(struct net_device *dev) -{ - struct priv *p = dev->ml_priv; - disable_dma(dev->dma); - free_dma(dev->dma); - release_region(dev->base_addr, cards[p->cardno].total_size); - ni65_free_buffer(p); -} - -/* set: io,irq,dma or set it when calling insmod */ -static int irq; -static int io; -static int dma; - -/* - * Probe The Card (not the lance-chip) - */ -struct net_device * __init ni65_probe(int unit) -{ - struct net_device *dev = alloc_etherdev(0); - static const int ports[] = { 0x360, 0x300, 0x320, 0x340, 0 }; - const int *port; - int err = 0; - - if (!dev) - return ERR_PTR(-ENOMEM); - - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - irq = dev->irq; - dma = dev->dma; - } else { - dev->base_addr = io; - } - - if (dev->base_addr > 0x1ff) { /* Check a single specified location. */ - err = ni65_probe1(dev, dev->base_addr); - } else if (dev->base_addr > 0) { /* Don't probe at all. */ - err = -ENXIO; - } else { - for (port = ports; *port && ni65_probe1(dev, *port); port++) - ; - if (!*port) - err = -ENODEV; - } - if (err) - goto out; - - err = register_netdev(dev); - if (err) - goto out1; - return dev; -out1: - cleanup_card(dev); -out: - free_netdev(dev); - return ERR_PTR(err); -} - -static const struct net_device_ops ni65_netdev_ops = { - .ndo_open = ni65_open, - .ndo_stop = ni65_close, - .ndo_start_xmit = ni65_send_packet, - .ndo_tx_timeout = ni65_timeout, - .ndo_set_multicast_list = set_multicast_list, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -/* - * this is the real card probe .. - */ -static int __init ni65_probe1(struct net_device *dev,int ioaddr) -{ - int i,j; - struct priv *p; - unsigned long flags; - - dev->irq = irq; - dev->dma = dma; - - for(i=0;i= 0) { - if(inb(ioaddr+cards[i].id_offset+0) != cards[i].id0 || - inb(ioaddr+cards[i].id_offset+1) != cards[i].id1) { - release_region(ioaddr, cards[i].total_size); - continue; - } - } - if(cards[i].vendor_id) { - for(j=0;j<3;j++) - if(inb(ioaddr+cards[i].addr_offset+j) != cards[i].vendor_id[j]) { - release_region(ioaddr, cards[i].total_size); - continue; - } - } - break; - } - if(i == NUM_CARDS) - return -ENODEV; - - for(j=0;j<6;j++) - dev->dev_addr[j] = inb(ioaddr+cards[i].addr_offset+j); - - if( (j=ni65_alloc_buffer(dev)) < 0) { - release_region(ioaddr, cards[i].total_size); - return j; - } - p = dev->ml_priv; - p->cmdr_addr = ioaddr + cards[i].cmd_offset; - p->cardno = i; - spin_lock_init(&p->ring_lock); - - printk(KERN_INFO "%s: %s found at %#3x, ", dev->name, cards[p->cardno].cardname , ioaddr); - - outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */ - if( (j=readreg(CSR0)) != 0x4) { - printk("failed.\n"); - printk(KERN_ERR "%s: Can't RESET card: %04x\n", dev->name, j); - ni65_free_buffer(p); - release_region(ioaddr, cards[p->cardno].total_size); - return -EAGAIN; - } - - outw(88,PORT+L_ADDRREG); - if(inw(PORT+L_ADDRREG) == 88) { - unsigned long v; - v = inw(PORT+L_DATAREG); - v <<= 16; - outw(89,PORT+L_ADDRREG); - v |= inw(PORT+L_DATAREG); - printk("Version %#08lx, ",v); - p->features = INIT_RING_BEFORE_START; - } - else { - printk("ancient LANCE, "); - p->features = 0x0; - } - - if(test_bit(0,&cards[i].config)) { - dev->irq = irqtab[(inw(ioaddr+L_CONFIG)>>2)&3]; - dev->dma = dmatab[inw(ioaddr+L_CONFIG)&3]; - printk("IRQ %d (from card), DMA %d (from card).\n",dev->irq,dev->dma); - } - else { - if(dev->dma == 0) { - /* 'stuck test' from lance.c */ - unsigned long dma_channels = - ((inb(DMA1_STAT_REG) >> 4) & 0x0f) - | (inb(DMA2_STAT_REG) & 0xf0); - for(i=1;i<5;i++) { - int dma = dmatab[i]; - if(test_bit(dma,&dma_channels) || request_dma(dma,"ni6510")) - continue; - - flags=claim_dma_lock(); - disable_dma(dma); - set_dma_mode(dma,DMA_MODE_CASCADE); - enable_dma(dma); - release_dma_lock(flags); - - ni65_init_lance(p,dev->dev_addr,0,0); /* trigger memory access */ - - flags=claim_dma_lock(); - disable_dma(dma); - free_dma(dma); - release_dma_lock(flags); - - if(readreg(CSR0) & CSR0_IDON) - break; - } - if(i == 5) { - printk("failed.\n"); - printk(KERN_ERR "%s: Can't detect DMA channel!\n", dev->name); - ni65_free_buffer(p); - release_region(ioaddr, cards[p->cardno].total_size); - return -EAGAIN; - } - dev->dma = dmatab[i]; - printk("DMA %d (autodetected), ",dev->dma); - } - else - printk("DMA %d (assigned), ",dev->dma); - - if(dev->irq < 2) - { - unsigned long irq_mask; - - ni65_init_lance(p,dev->dev_addr,0,0); - irq_mask = probe_irq_on(); - writereg(CSR0_INIT|CSR0_INEA,CSR0); /* trigger interrupt */ - msleep(20); - dev->irq = probe_irq_off(irq_mask); - if(!dev->irq) - { - printk("Failed to detect IRQ line!\n"); - ni65_free_buffer(p); - release_region(ioaddr, cards[p->cardno].total_size); - return -EAGAIN; - } - printk("IRQ %d (autodetected).\n",dev->irq); - } - else - printk("IRQ %d (assigned).\n",dev->irq); - } - - if(request_dma(dev->dma, cards[p->cardno].cardname ) != 0) - { - printk(KERN_ERR "%s: Can't request dma-channel %d\n",dev->name,(int) dev->dma); - ni65_free_buffer(p); - release_region(ioaddr, cards[p->cardno].total_size); - return -EAGAIN; - } - - dev->base_addr = ioaddr; - dev->netdev_ops = &ni65_netdev_ops; - dev->watchdog_timeo = HZ/2; - - return 0; /* everything is OK */ -} - -/* - * set lance register and trigger init - */ -static void ni65_init_lance(struct priv *p,unsigned char *daddr,int filter,int mode) -{ - int i; - u32 pib; - - writereg(CSR0_CLRALL|CSR0_STOP,CSR0); - - for(i=0;i<6;i++) - p->ib.eaddr[i] = daddr[i]; - - for(i=0;i<8;i++) - p->ib.filter[i] = filter; - p->ib.mode = mode; - - p->ib.trp = (u32) isa_virt_to_bus(p->tmdhead) | TMDNUMMASK; - p->ib.rrp = (u32) isa_virt_to_bus(p->rmdhead) | RMDNUMMASK; - writereg(0,CSR3); /* busmaster/no word-swap */ - pib = (u32) isa_virt_to_bus(&p->ib); - writereg(pib & 0xffff,CSR1); - writereg(pib >> 16,CSR2); - - writereg(CSR0_INIT,CSR0); /* this changes L_ADDRREG to CSR0 */ - - for(i=0;i<32;i++) - { - mdelay(4); - if(inw(PORT+L_DATAREG) & (CSR0_IDON | CSR0_MERR) ) - break; /* init ok ? */ - } -} - -/* - * allocate memory area and check the 16MB border - */ -static void *ni65_alloc_mem(struct net_device *dev,char *what,int size,int type) -{ - struct sk_buff *skb=NULL; - unsigned char *ptr; - void *ret; - - if(type) { - ret = skb = alloc_skb(2+16+size,GFP_KERNEL|GFP_DMA); - if(!skb) { - printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what); - return NULL; - } - skb_reserve(skb,2+16); - skb_put(skb,R_BUF_SIZE); /* grab the whole space .. (not necessary) */ - ptr = skb->data; - } - else { - ret = ptr = kmalloc(T_BUF_SIZE,GFP_KERNEL | GFP_DMA); - if(!ret) { - printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what); - return NULL; - } - } - if( (u32) virt_to_phys(ptr+size) > 0x1000000) { - printk(KERN_WARNING "%s: unable to allocate %s memory in lower 16MB!\n",dev->name,what); - if(type) - kfree_skb(skb); - else - kfree(ptr); - return NULL; - } - return ret; -} - -/* - * allocate all memory structures .. send/recv buffers etc ... - */ -static int ni65_alloc_buffer(struct net_device *dev) -{ - unsigned char *ptr; - struct priv *p; - int i; - - /* - * we need 8-aligned memory .. - */ - ptr = ni65_alloc_mem(dev,"BUFFER",sizeof(struct priv)+8,0); - if(!ptr) - return -ENOMEM; - - p = dev->ml_priv = (struct priv *) (((unsigned long) ptr + 7) & ~0x7); - memset((char *)p, 0, sizeof(struct priv)); - p->self = ptr; - - for(i=0;itmd_skb[i] = NULL; -#endif - p->tmdbounce[i] = ni65_alloc_mem(dev,"XMIT",T_BUF_SIZE,0); - if(!p->tmdbounce[i]) { - ni65_free_buffer(p); - return -ENOMEM; - } - } - - for(i=0;irecv_skb[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,1); - if(!p->recv_skb[i]) { - ni65_free_buffer(p); - return -ENOMEM; - } -#else - p->recvbounce[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,0); - if(!p->recvbounce[i]) { - ni65_free_buffer(p); - return -ENOMEM; - } -#endif - } - - return 0; /* everything is OK */ -} - -/* - * free buffers and private struct - */ -static void ni65_free_buffer(struct priv *p) -{ - int i; - - if(!p) - return; - - for(i=0;itmdbounce[i]); -#ifdef XMT_VIA_SKB - if(p->tmd_skb[i]) - dev_kfree_skb(p->tmd_skb[i]); -#endif - } - - for(i=0;irecv_skb[i]) - dev_kfree_skb(p->recv_skb[i]); -#else - kfree(p->recvbounce[i]); -#endif - } - kfree(p->self); -} - - -/* - * stop and (re)start lance .. e.g after an error - */ -static void ni65_stop_start(struct net_device *dev,struct priv *p) -{ - int csr0 = CSR0_INEA; - - writedatareg(CSR0_STOP); - - if(debuglevel > 1) - printk(KERN_DEBUG "ni65_stop_start\n"); - - if(p->features & INIT_RING_BEFORE_START) { - int i; -#ifdef XMT_VIA_SKB - struct sk_buff *skb_save[TMDNUM]; -#endif - unsigned long buffer[TMDNUM]; - short blen[TMDNUM]; - - if(p->xmit_queued) { - while(1) { - if((p->tmdhead[p->tmdlast].u.s.status & XMIT_OWN)) - break; - p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1); - if(p->tmdlast == p->tmdnum) - break; - } - } - - for(i=0;itmdhead + i; -#ifdef XMT_VIA_SKB - skb_save[i] = p->tmd_skb[i]; -#endif - buffer[i] = (u32) isa_bus_to_virt(tmdp->u.buffer); - blen[i] = tmdp->blen; - tmdp->u.s.status = 0x0; - } - - for(i=0;irmdhead + i; - rmdp->u.s.status = RCV_OWN; - } - p->tmdnum = p->xmit_queued = 0; - writedatareg(CSR0_STRT | csr0); - - for(i=0;itmdlast) & (TMDNUM-1); - p->tmdhead[i].u.buffer = (u32) isa_virt_to_bus((char *)buffer[num]); /* status is part of buffer field */ - p->tmdhead[i].blen = blen[num]; - if(p->tmdhead[i].u.s.status & XMIT_OWN) { - p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1); - p->xmit_queued = 1; - writedatareg(CSR0_TDMD | CSR0_INEA | csr0); - } -#ifdef XMT_VIA_SKB - p->tmd_skb[i] = skb_save[num]; -#endif - } - p->rmdnum = p->tmdlast = 0; - if(!p->lock) - if (p->tmdnum || !p->xmit_queued) - netif_wake_queue(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ - } - else - writedatareg(CSR0_STRT | csr0); -} - -/* - * init lance (write init-values .. init-buffers) (open-helper) - */ -static int ni65_lance_reinit(struct net_device *dev) -{ - int i; - struct priv *p = dev->ml_priv; - unsigned long flags; - - p->lock = 0; - p->xmit_queued = 0; - - flags=claim_dma_lock(); - disable_dma(dev->dma); /* I've never worked with dma, but we do it like the packetdriver */ - set_dma_mode(dev->dma,DMA_MODE_CASCADE); - enable_dma(dev->dma); - release_dma_lock(flags); - - outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */ - if( (i=readreg(CSR0) ) != 0x4) - { - printk(KERN_ERR "%s: can't RESET %s card: %04x\n",dev->name, - cards[p->cardno].cardname,(int) i); - flags=claim_dma_lock(); - disable_dma(dev->dma); - release_dma_lock(flags); - return 0; - } - - p->rmdnum = p->tmdnum = p->tmdlast = p->tmdbouncenum = 0; - for(i=0;itmdhead + i; -#ifdef XMT_VIA_SKB - if(p->tmd_skb[i]) { - dev_kfree_skb(p->tmd_skb[i]); - p->tmd_skb[i] = NULL; - } -#endif - tmdp->u.buffer = 0x0; - tmdp->u.s.status = XMIT_START | XMIT_END; - tmdp->blen = tmdp->status2 = 0; - } - - for(i=0;irmdhead + i; -#ifdef RCV_VIA_SKB - rmdp->u.buffer = (u32) isa_virt_to_bus(p->recv_skb[i]->data); -#else - rmdp->u.buffer = (u32) isa_virt_to_bus(p->recvbounce[i]); -#endif - rmdp->blen = -(R_BUF_SIZE-8); - rmdp->mlen = 0; - rmdp->u.s.status = RCV_OWN; - } - - if(dev->flags & IFF_PROMISC) - ni65_init_lance(p,dev->dev_addr,0x00,M_PROM); - else if (netdev_mc_count(dev) || dev->flags & IFF_ALLMULTI) - ni65_init_lance(p,dev->dev_addr,0xff,0x0); - else - ni65_init_lance(p,dev->dev_addr,0x00,0x00); - - /* - * ni65_set_lance_mem() sets L_ADDRREG to CSR0 - * NOW, WE WILL NEVER CHANGE THE L_ADDRREG, CSR0 IS ALWAYS SELECTED - */ - - if(inw(PORT+L_DATAREG) & CSR0_IDON) { - ni65_set_performance(p); - /* init OK: start lance , enable interrupts */ - writedatareg(CSR0_CLRALL | CSR0_INEA | CSR0_STRT); - return 1; /* ->OK */ - } - printk(KERN_ERR "%s: can't init lance, status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG)); - flags=claim_dma_lock(); - disable_dma(dev->dma); - release_dma_lock(flags); - return 0; /* ->Error */ -} - -/* - * interrupt handler - */ -static irqreturn_t ni65_interrupt(int irq, void * dev_id) -{ - int csr0 = 0; - struct net_device *dev = dev_id; - struct priv *p; - int bcnt = 32; - - p = dev->ml_priv; - - spin_lock(&p->ring_lock); - - while(--bcnt) { - csr0 = inw(PORT+L_DATAREG); - -#if 0 - writedatareg( (csr0 & CSR0_CLRALL) ); /* ack interrupts, disable int. */ -#else - writedatareg( (csr0 & CSR0_CLRALL) | CSR0_INEA ); /* ack interrupts, interrupts enabled */ -#endif - - if(!(csr0 & (CSR0_ERR | CSR0_RINT | CSR0_TINT))) - break; - - if(csr0 & CSR0_RINT) /* RECV-int? */ - ni65_recv_intr(dev,csr0); - if(csr0 & CSR0_TINT) /* XMIT-int? */ - ni65_xmit_intr(dev,csr0); - - if(csr0 & CSR0_ERR) - { - if(debuglevel > 1) - printk(KERN_ERR "%s: general error: %04x.\n",dev->name,csr0); - if(csr0 & CSR0_BABL) - dev->stats.tx_errors++; - if(csr0 & CSR0_MISS) { - int i; - for(i=0;irmdhead[i].u.s.status); - printk("\n"); - dev->stats.rx_errors++; - } - if(csr0 & CSR0_MERR) { - if(debuglevel > 1) - printk(KERN_ERR "%s: Ooops .. memory error: %04x.\n",dev->name,csr0); - ni65_stop_start(dev,p); - } - } - } - -#ifdef RCV_PARANOIA_CHECK -{ - int j; - for(j=0;j0;i--) { - num2 = (p->rmdnum + i) & (RMDNUM-1); - if(!(p->rmdhead[num2].u.s.status & RCV_OWN)) - break; - } - - if(i) { - int k, num1; - for(k=0;krmdnum + k) & (RMDNUM-1); - if(!(p->rmdhead[num1].u.s.status & RCV_OWN)) - break; - } - if(!k) - break; - - if(debuglevel > 0) - { - char buf[256],*buf1; - buf1 = buf; - for(k=0;krmdhead[k].u.s.status)); /* & RCV_OWN) ); */ - buf1 += 3; - } - *buf1 = 0; - printk(KERN_ERR "%s: Ooops, receive ring corrupted %2d %2d | %s\n",dev->name,p->rmdnum,i,buf); - } - - p->rmdnum = num1; - ni65_recv_intr(dev,csr0); - if((p->rmdhead[num2].u.s.status & RCV_OWN)) - break; /* ok, we are 'in sync' again */ - } - else - break; - } -} -#endif - - if( (csr0 & (CSR0_RXON | CSR0_TXON)) != (CSR0_RXON | CSR0_TXON) ) { - printk(KERN_DEBUG "%s: RX or TX was offline -> restart\n",dev->name); - ni65_stop_start(dev,p); - } - else - writedatareg(CSR0_INEA); - - spin_unlock(&p->ring_lock); - return IRQ_HANDLED; -} - -/* - * We have received an Xmit-Interrupt .. - * send a new packet if necessary - */ -static void ni65_xmit_intr(struct net_device *dev,int csr0) -{ - struct priv *p = dev->ml_priv; - - while(p->xmit_queued) - { - struct tmd *tmdp = p->tmdhead + p->tmdlast; - int tmdstat = tmdp->u.s.status; - - if(tmdstat & XMIT_OWN) - break; - - if(tmdstat & XMIT_ERR) - { -#if 0 - if(tmdp->status2 & XMIT_TDRMASK && debuglevel > 3) - printk(KERN_ERR "%s: tdr-problems (e.g. no resistor)\n",dev->name); -#endif - /* checking some errors */ - if(tmdp->status2 & XMIT_RTRY) - dev->stats.tx_aborted_errors++; - if(tmdp->status2 & XMIT_LCAR) - dev->stats.tx_carrier_errors++; - if(tmdp->status2 & (XMIT_BUFF | XMIT_UFLO )) { - /* this stops the xmitter */ - dev->stats.tx_fifo_errors++; - if(debuglevel > 0) - printk(KERN_ERR "%s: Xmit FIFO/BUFF error\n",dev->name); - if(p->features & INIT_RING_BEFORE_START) { - tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END; /* test: resend this frame */ - ni65_stop_start(dev,p); - break; /* no more Xmit processing .. */ - } - else - ni65_stop_start(dev,p); - } - if(debuglevel > 2) - printk(KERN_ERR "%s: xmit-error: %04x %02x-%04x\n",dev->name,csr0,(int) tmdstat,(int) tmdp->status2); - if(!(csr0 & CSR0_BABL)) /* don't count errors twice */ - dev->stats.tx_errors++; - tmdp->status2 = 0; - } - else { - dev->stats.tx_bytes -= (short)(tmdp->blen); - dev->stats.tx_packets++; - } - -#ifdef XMT_VIA_SKB - if(p->tmd_skb[p->tmdlast]) { - dev_kfree_skb_irq(p->tmd_skb[p->tmdlast]); - p->tmd_skb[p->tmdlast] = NULL; - } -#endif - - p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1); - if(p->tmdlast == p->tmdnum) - p->xmit_queued = 0; - } - netif_wake_queue(dev); -} - -/* - * We have received a packet - */ -static void ni65_recv_intr(struct net_device *dev,int csr0) -{ - struct rmd *rmdp; - int rmdstat,len; - int cnt=0; - struct priv *p = dev->ml_priv; - - rmdp = p->rmdhead + p->rmdnum; - while(!( (rmdstat = rmdp->u.s.status) & RCV_OWN)) - { - cnt++; - if( (rmdstat & (RCV_START | RCV_END | RCV_ERR)) != (RCV_START | RCV_END) ) /* error or oversized? */ - { - if(!(rmdstat & RCV_ERR)) { - if(rmdstat & RCV_START) - { - dev->stats.rx_length_errors++; - printk(KERN_ERR "%s: recv, packet too long: %d\n",dev->name,rmdp->mlen & 0x0fff); - } - } - else { - if(debuglevel > 2) - printk(KERN_ERR "%s: receive-error: %04x, lance-status: %04x/%04x\n", - dev->name,(int) rmdstat,csr0,(int) inw(PORT+L_DATAREG) ); - if(rmdstat & RCV_FRAM) - dev->stats.rx_frame_errors++; - if(rmdstat & RCV_OFLO) - dev->stats.rx_over_errors++; - if(rmdstat & RCV_CRC) - dev->stats.rx_crc_errors++; - if(rmdstat & RCV_BUF_ERR) - dev->stats.rx_fifo_errors++; - } - if(!(csr0 & CSR0_MISS)) /* don't count errors twice */ - dev->stats.rx_errors++; - } - else if( (len = (rmdp->mlen & 0x0fff) - 4) >= 60) - { -#ifdef RCV_VIA_SKB - struct sk_buff *skb = alloc_skb(R_BUF_SIZE+2+16,GFP_ATOMIC); - if (skb) - skb_reserve(skb,16); -#else - struct sk_buff *skb = dev_alloc_skb(len+2); -#endif - if(skb) - { - skb_reserve(skb,2); -#ifdef RCV_VIA_SKB - if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) { - skb_put(skb,len); - skb_copy_to_linear_data(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len); - } - else { - struct sk_buff *skb1 = p->recv_skb[p->rmdnum]; - skb_put(skb,R_BUF_SIZE); - p->recv_skb[p->rmdnum] = skb; - rmdp->u.buffer = (u32) isa_virt_to_bus(skb->data); - skb = skb1; - skb_trim(skb,len); - } -#else - skb_put(skb,len); - skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len); -#endif - dev->stats.rx_packets++; - dev->stats.rx_bytes += len; - skb->protocol=eth_type_trans(skb,dev); - netif_rx(skb); - } - else - { - printk(KERN_ERR "%s: can't alloc new sk_buff\n",dev->name); - dev->stats.rx_dropped++; - } - } - else { - printk(KERN_INFO "%s: received runt packet\n",dev->name); - dev->stats.rx_errors++; - } - rmdp->blen = -(R_BUF_SIZE-8); - rmdp->mlen = 0; - rmdp->u.s.status = RCV_OWN; /* change owner */ - p->rmdnum = (p->rmdnum + 1) & (RMDNUM-1); - rmdp = p->rmdhead + p->rmdnum; - } -} - -/* - * kick xmitter .. - */ - -static void ni65_timeout(struct net_device *dev) -{ - int i; - struct priv *p = dev->ml_priv; - - printk(KERN_ERR "%s: xmitter timed out, try to restart!\n",dev->name); - for(i=0;itmdhead[i].u.s.status); - printk("\n"); - ni65_lance_reinit(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ - netif_wake_queue(dev); -} - -/* - * Send a packet - */ - -static netdev_tx_t ni65_send_packet(struct sk_buff *skb, - struct net_device *dev) -{ - struct priv *p = dev->ml_priv; - - netif_stop_queue(dev); - - if (test_and_set_bit(0, (void*)&p->lock)) { - printk(KERN_ERR "%s: Queue was locked.\n", dev->name); - return NETDEV_TX_BUSY; - } - - { - short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; - struct tmd *tmdp; - unsigned long flags; - -#ifdef XMT_VIA_SKB - if( (unsigned long) (skb->data + skb->len) > 0x1000000) { -#endif - - skb_copy_from_linear_data(skb, p->tmdbounce[p->tmdbouncenum], - skb->len > T_BUF_SIZE ? T_BUF_SIZE : - skb->len); - if (len > skb->len) - memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len); - dev_kfree_skb (skb); - - spin_lock_irqsave(&p->ring_lock, flags); - tmdp = p->tmdhead + p->tmdnum; - tmdp->u.buffer = (u32) isa_virt_to_bus(p->tmdbounce[p->tmdbouncenum]); - p->tmdbouncenum = (p->tmdbouncenum + 1) & (TMDNUM - 1); - -#ifdef XMT_VIA_SKB - } - else { - spin_lock_irqsave(&p->ring_lock, flags); - - tmdp = p->tmdhead + p->tmdnum; - tmdp->u.buffer = (u32) isa_virt_to_bus(skb->data); - p->tmd_skb[p->tmdnum] = skb; - } -#endif - tmdp->blen = -len; - - tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END; - writedatareg(CSR0_TDMD | CSR0_INEA); /* enable xmit & interrupt */ - - p->xmit_queued = 1; - p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1); - - if(p->tmdnum != p->tmdlast) - netif_wake_queue(dev); - - p->lock = 0; - - spin_unlock_irqrestore(&p->ring_lock, flags); - } - - return NETDEV_TX_OK; -} - -static void set_multicast_list(struct net_device *dev) -{ - if(!ni65_lance_reinit(dev)) - printk(KERN_ERR "%s: Can't switch card into MC mode!\n",dev->name); - netif_wake_queue(dev); -} - -#ifdef MODULE -static struct net_device *dev_ni65; - -module_param(irq, int, 0); -module_param(io, int, 0); -module_param(dma, int, 0); -MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)"); -MODULE_PARM_DESC(io, "ni6510 I/O base address"); -MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)"); - -int __init init_module(void) -{ - dev_ni65 = ni65_probe(-1); - return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0; -} - -void __exit cleanup_module(void) -{ - unregister_netdev(dev_ni65); - cleanup_card(dev_ni65); - free_netdev(dev_ni65); -} -#endif /* MODULE */ - -MODULE_LICENSE("GPL"); diff --git a/drivers/net/ni65.h b/drivers/net/ni65.h deleted file mode 100644 index e6217e3..0000000 --- a/drivers/net/ni65.h +++ /dev/null @@ -1,121 +0,0 @@ -/* am7990 (lance) definitions - * - * This is an extension to the Linux operating system, and is covered by - * same GNU General Public License that covers that work. - * - * Michael Hipp - * email: mhipp@student.uni-tuebingen.de - * - * sources: (mail me or ask archie if you need them) - * crynwr-packet-driver - */ - -/* - * Control and Status Register 0 (CSR0) bit definitions - * (R=Readable) (W=Writeable) (S=Set on write) (C-Clear on write) - * - */ - -#define CSR0_ERR 0x8000 /* Error summary (R) */ -#define CSR0_BABL 0x4000 /* Babble transmitter timeout error (RC) */ -#define CSR0_CERR 0x2000 /* Collision Error (RC) */ -#define CSR0_MISS 0x1000 /* Missed packet (RC) */ -#define CSR0_MERR 0x0800 /* Memory Error (RC) */ -#define CSR0_RINT 0x0400 /* Receiver Interrupt (RC) */ -#define CSR0_TINT 0x0200 /* Transmit Interrupt (RC) */ -#define CSR0_IDON 0x0100 /* Initialization Done (RC) */ -#define CSR0_INTR 0x0080 /* Interrupt Flag (R) */ -#define CSR0_INEA 0x0040 /* Interrupt Enable (RW) */ -#define CSR0_RXON 0x0020 /* Receiver on (R) */ -#define CSR0_TXON 0x0010 /* Transmitter on (R) */ -#define CSR0_TDMD 0x0008 /* Transmit Demand (RS) */ -#define CSR0_STOP 0x0004 /* Stop (RS) */ -#define CSR0_STRT 0x0002 /* Start (RS) */ -#define CSR0_INIT 0x0001 /* Initialize (RS) */ - -#define CSR0_CLRALL 0x7f00 /* mask for all clearable bits */ -/* - * Initialization Block Mode operation Bit Definitions. - */ - -#define M_PROM 0x8000 /* Promiscuous Mode */ -#define M_INTL 0x0040 /* Internal Loopback */ -#define M_DRTY 0x0020 /* Disable Retry */ -#define M_COLL 0x0010 /* Force Collision */ -#define M_DTCR 0x0008 /* Disable Transmit CRC) */ -#define M_LOOP 0x0004 /* Loopback */ -#define M_DTX 0x0002 /* Disable the Transmitter */ -#define M_DRX 0x0001 /* Disable the Receiver */ - - -/* - * Receive message descriptor bit definitions. - */ - -#define RCV_OWN 0x80 /* owner bit 0 = host, 1 = lance */ -#define RCV_ERR 0x40 /* Error Summary */ -#define RCV_FRAM 0x20 /* Framing Error */ -#define RCV_OFLO 0x10 /* Overflow Error */ -#define RCV_CRC 0x08 /* CRC Error */ -#define RCV_BUF_ERR 0x04 /* Buffer Error */ -#define RCV_START 0x02 /* Start of Packet */ -#define RCV_END 0x01 /* End of Packet */ - - -/* - * Transmit message descriptor bit definitions. - */ - -#define XMIT_OWN 0x80 /* owner bit 0 = host, 1 = lance */ -#define XMIT_ERR 0x40 /* Error Summary */ -#define XMIT_RETRY 0x10 /* more the 1 retry needed to Xmit */ -#define XMIT_1_RETRY 0x08 /* one retry needed to Xmit */ -#define XMIT_DEF 0x04 /* Deferred */ -#define XMIT_START 0x02 /* Start of Packet */ -#define XMIT_END 0x01 /* End of Packet */ - -/* - * transmit status (2) (valid if XMIT_ERR == 1) - */ - -#define XMIT_TDRMASK 0x03ff /* time-domain-reflectometer-value */ -#define XMIT_RTRY 0x0400 /* Failed after 16 retransmissions */ -#define XMIT_LCAR 0x0800 /* Loss of Carrier */ -#define XMIT_LCOL 0x1000 /* Late collision */ -#define XMIT_RESERV 0x2000 /* Reserved */ -#define XMIT_UFLO 0x4000 /* Underflow (late memory) */ -#define XMIT_BUFF 0x8000 /* Buffering error (no ENP) */ - -struct init_block { - unsigned short mode; - unsigned char eaddr[6]; - unsigned char filter[8]; - /* bit 29-31: number of rmd's (power of 2) */ - u32 rrp; /* receive ring pointer (align 8) */ - /* bit 29-31: number of tmd's (power of 2) */ - u32 trp; /* transmit ring pointer (align 8) */ -}; - -struct rmd { /* Receive Message Descriptor */ - union { - volatile u32 buffer; - struct { - volatile unsigned char dummy[3]; - volatile unsigned char status; - } s; - } u; - volatile short blen; - volatile unsigned short mlen; -}; - -struct tmd { - union { - volatile u32 buffer; - struct { - volatile unsigned char dummy[3]; - volatile unsigned char status; - } s; - } u; - volatile unsigned short blen; - volatile unsigned short status2; -}; diff --git a/drivers/net/pcmcia/Kconfig b/drivers/net/pcmcia/Kconfig index b67c5ed..e17ad95 100644 --- a/drivers/net/pcmcia/Kconfig +++ b/drivers/net/pcmcia/Kconfig @@ -41,15 +41,6 @@ config PCMCIA_PCNET To compile this driver as a module, choose M here: the module will be called pcnet_cs. If unsure, say N. -config PCMCIA_NMCLAN - tristate "New Media PCMCIA support" - help - Say Y here if you intend to attach a New Media Ethernet or LiveWire - PCMCIA (PC-card) Ethernet card to your computer. - - To compile this driver as a module, choose M here: the module will be - called nmclan_cs. If unsure, say N. - config PCMCIA_SMC91C92 tristate "SMC 91Cxx PCMCIA support" select CRC32 diff --git a/drivers/net/pcmcia/Makefile b/drivers/net/pcmcia/Makefile index 2f2ab3b..985f0ae 100644 --- a/drivers/net/pcmcia/Makefile +++ b/drivers/net/pcmcia/Makefile @@ -4,7 +4,6 @@ # 16-bit client drivers obj-$(CONFIG_PCMCIA_FMVJ18X) += fmvj18x_cs.o -obj-$(CONFIG_PCMCIA_NMCLAN) += nmclan_cs.o obj-$(CONFIG_PCMCIA_PCNET) += pcnet_cs.o obj-$(CONFIG_PCMCIA_SMC91C92) += smc91c92_cs.o obj-$(CONFIG_PCMCIA_XIRC2PS) += xirc2ps_cs.o diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c deleted file mode 100644 index 9d70b65..0000000 --- a/drivers/net/pcmcia/nmclan_cs.c +++ /dev/null @@ -1,1525 +0,0 @@ -/* ---------------------------------------------------------------------------- -Linux PCMCIA ethernet adapter driver for the New Media Ethernet LAN. - nmclan_cs.c,v 0.16 1995/07/01 06:42:17 rpao Exp rpao - - The Ethernet LAN uses the Advanced Micro Devices (AMD) Am79C940 Media - Access Controller for Ethernet (MACE). It is essentially the Am2150 - PCMCIA Ethernet card contained in the Am2150 Demo Kit. - -Written by Roger C. Pao - Copyright 1995 Roger C. Pao - Linux 2.5 cleanups Copyright Red Hat 2003 - - This software may be used and distributed according to the terms of - the GNU General Public License. - -Ported to Linux 1.3.* network driver environment by - Matti Aarnio - -References - - Am2150 Technical Reference Manual, Revision 1.0, August 17, 1993 - Am79C940 (MACE) Data Sheet, 1994 - Am79C90 (C-LANCE) Data Sheet, 1994 - Linux PCMCIA Programmer's Guide v1.17 - /usr/src/linux/net/inet/dev.c, Linux kernel 1.2.8 - - Eric Mears, New Media Corporation - Tom Pollard, New Media Corporation - Dean Siasoyco, New Media Corporation - Ken Lesniak, Silicon Graphics, Inc. - Donald Becker - David Hinds - - The Linux client driver is based on the 3c589_cs.c client driver by - David Hinds. - - The Linux network driver outline is based on the 3c589_cs.c driver, - the 8390.c driver, and the example skeleton.c kernel code, which are - by Donald Becker. - - The Am2150 network driver hardware interface code is based on the - OS/9000 driver for the New Media Ethernet LAN by Eric Mears. - - Special thanks for testing and help in debugging this driver goes - to Ken Lesniak. - -------------------------------------------------------------------------------- -Driver Notes and Issues -------------------------------------------------------------------------------- - -1. Developed on a Dell 320SLi - PCMCIA Card Services 2.6.2 - Linux dell 1.2.10 #1 Thu Jun 29 20:23:41 PDT 1995 i386 - -2. rc.pcmcia may require loading pcmcia_core with io_speed=300: - 'insmod pcmcia_core.o io_speed=300'. - This will avoid problems with fast systems which causes rx_framecnt - to return random values. - -3. If hot extraction does not work for you, use 'ifconfig eth0 down' - before extraction. - -4. There is a bad slow-down problem in this driver. - -5. Future: Multicast processing. In the meantime, do _not_ compile your - kernel with multicast ip enabled. - -------------------------------------------------------------------------------- -History -------------------------------------------------------------------------------- -Log: nmclan_cs.c,v - * 2.5.75-ac1 2003/07/11 Alan Cox - * Fixed hang on card eject as we probe it - * Cleaned up to use new style locking. - * - * Revision 0.16 1995/07/01 06:42:17 rpao - * Bug fix: nmclan_reset() called CardServices incorrectly. - * - * Revision 0.15 1995/05/24 08:09:47 rpao - * Re-implement MULTI_TX dev->tbusy handling. - * - * Revision 0.14 1995/05/23 03:19:30 rpao - * Added, in nmclan_config(), "tuple.Attributes = 0;". - * Modified MACE ID check to ignore chip revision level. - * Avoid tx_free_frames race condition between _start_xmit and _interrupt. - * - * Revision 0.13 1995/05/18 05:56:34 rpao - * Statistics changes. - * Bug fix: nmclan_reset did not enable TX and RX: call restore_multicast_list. - * Bug fix: mace_interrupt checks ~MACE_IMR_DEFAULT. Fixes driver lockup. - * - * Revision 0.12 1995/05/14 00:12:23 rpao - * Statistics overhaul. - * - -95/05/13 rpao V0.10a - Bug fix: MACE statistics counters used wrong I/O ports. - Bug fix: mace_interrupt() needed to allow statistics to be - processed without RX or TX interrupts pending. -95/05/11 rpao V0.10 - Multiple transmit request processing. - Modified statistics to use MACE counters where possible. -95/05/10 rpao V0.09 Bug fix: Must use IO_DATA_PATH_WIDTH_AUTO. - *Released -95/05/10 rpao V0.08 - Bug fix: Make all non-exported functions private by using - static keyword. - Bug fix: Test IntrCnt _before_ reading MACE_IR. -95/05/10 rpao V0.07 Statistics. -95/05/09 rpao V0.06 Fix rx_framecnt problem by addition of PCIC wait states. - ----------------------------------------------------------------------------- */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#define DRV_NAME "nmclan_cs" -#define DRV_VERSION "0.16" - - -/* ---------------------------------------------------------------------------- -Conditional Compilation Options ----------------------------------------------------------------------------- */ - -#define MULTI_TX 0 -#define RESET_ON_TIMEOUT 1 -#define TX_INTERRUPTABLE 1 -#define RESET_XILINX 0 - -/* ---------------------------------------------------------------------------- -Include Files ----------------------------------------------------------------------------- */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include -#include -#include - -/* ---------------------------------------------------------------------------- -Defines ----------------------------------------------------------------------------- */ - -#define ETHER_ADDR_LEN ETH_ALEN - /* 6 bytes in an Ethernet Address */ -#define MACE_LADRF_LEN 8 - /* 8 bytes in Logical Address Filter */ - -/* Loop Control Defines */ -#define MACE_MAX_IR_ITERATIONS 10 -#define MACE_MAX_RX_ITERATIONS 12 - /* - TBD: Dean brought this up, and I assumed the hardware would - handle it: - - If MACE_MAX_RX_ITERATIONS is > 1, rx_framecnt may still be - non-zero when the isr exits. We may not get another interrupt - to process the remaining packets for some time. - */ - -/* -The Am2150 has a Xilinx XC3042 field programmable gate array (FPGA) -which manages the interface between the MACE and the PCMCIA bus. It -also includes buffer management for the 32K x 8 SRAM to control up to -four transmit and 12 receive frames at a time. -*/ -#define AM2150_MAX_TX_FRAMES 4 -#define AM2150_MAX_RX_FRAMES 12 - -/* Am2150 Ethernet Card I/O Mapping */ -#define AM2150_RCV 0x00 -#define AM2150_XMT 0x04 -#define AM2150_XMT_SKIP 0x09 -#define AM2150_RCV_NEXT 0x0A -#define AM2150_RCV_FRAME_COUNT 0x0B -#define AM2150_MACE_BANK 0x0C -#define AM2150_MACE_BASE 0x10 - -/* MACE Registers */ -#define MACE_RCVFIFO 0 -#define MACE_XMTFIFO 1 -#define MACE_XMTFC 2 -#define MACE_XMTFS 3 -#define MACE_XMTRC 4 -#define MACE_RCVFC 5 -#define MACE_RCVFS 6 -#define MACE_FIFOFC 7 -#define MACE_IR 8 -#define MACE_IMR 9 -#define MACE_PR 10 -#define MACE_BIUCC 11 -#define MACE_FIFOCC 12 -#define MACE_MACCC 13 -#define MACE_PLSCC 14 -#define MACE_PHYCC 15 -#define MACE_CHIPIDL 16 -#define MACE_CHIPIDH 17 -#define MACE_IAC 18 -/* Reserved */ -#define MACE_LADRF 20 -#define MACE_PADR 21 -/* Reserved */ -/* Reserved */ -#define MACE_MPC 24 -/* Reserved */ -#define MACE_RNTPC 26 -#define MACE_RCVCC 27 -/* Reserved */ -#define MACE_UTR 29 -#define MACE_RTR1 30 -#define MACE_RTR2 31 - -/* MACE Bit Masks */ -#define MACE_XMTRC_EXDEF 0x80 -#define MACE_XMTRC_XMTRC 0x0F - -#define MACE_XMTFS_XMTSV 0x80 -#define MACE_XMTFS_UFLO 0x40 -#define MACE_XMTFS_LCOL 0x20 -#define MACE_XMTFS_MORE 0x10 -#define MACE_XMTFS_ONE 0x08 -#define MACE_XMTFS_DEFER 0x04 -#define MACE_XMTFS_LCAR 0x02 -#define MACE_XMTFS_RTRY 0x01 - -#define MACE_RCVFS_RCVSTS 0xF000 -#define MACE_RCVFS_OFLO 0x8000 -#define MACE_RCVFS_CLSN 0x4000 -#define MACE_RCVFS_FRAM 0x2000 -#define MACE_RCVFS_FCS 0x1000 - -#define MACE_FIFOFC_RCVFC 0xF0 -#define MACE_FIFOFC_XMTFC 0x0F - -#define MACE_IR_JAB 0x80 -#define MACE_IR_BABL 0x40 -#define MACE_IR_CERR 0x20 -#define MACE_IR_RCVCCO 0x10 -#define MACE_IR_RNTPCO 0x08 -#define MACE_IR_MPCO 0x04 -#define MACE_IR_RCVINT 0x02 -#define MACE_IR_XMTINT 0x01 - -#define MACE_MACCC_PROM 0x80 -#define MACE_MACCC_DXMT2PD 0x40 -#define MACE_MACCC_EMBA 0x20 -#define MACE_MACCC_RESERVED 0x10 -#define MACE_MACCC_DRCVPA 0x08 -#define MACE_MACCC_DRCVBC 0x04 -#define MACE_MACCC_ENXMT 0x02 -#define MACE_MACCC_ENRCV 0x01 - -#define MACE_PHYCC_LNKFL 0x80 -#define MACE_PHYCC_DLNKTST 0x40 -#define MACE_PHYCC_REVPOL 0x20 -#define MACE_PHYCC_DAPC 0x10 -#define MACE_PHYCC_LRT 0x08 -#define MACE_PHYCC_ASEL 0x04 -#define MACE_PHYCC_RWAKE 0x02 -#define MACE_PHYCC_AWAKE 0x01 - -#define MACE_IAC_ADDRCHG 0x80 -#define MACE_IAC_PHYADDR 0x04 -#define MACE_IAC_LOGADDR 0x02 - -#define MACE_UTR_RTRE 0x80 -#define MACE_UTR_RTRD 0x40 -#define MACE_UTR_RPA 0x20 -#define MACE_UTR_FCOLL 0x10 -#define MACE_UTR_RCVFCSE 0x08 -#define MACE_UTR_LOOP_INCL_MENDEC 0x06 -#define MACE_UTR_LOOP_NO_MENDEC 0x04 -#define MACE_UTR_LOOP_EXTERNAL 0x02 -#define MACE_UTR_LOOP_NONE 0x00 -#define MACE_UTR_RESERVED 0x01 - -/* Switch MACE register bank (only 0 and 1 are valid) */ -#define MACEBANK(win_num) outb((win_num), ioaddr + AM2150_MACE_BANK) - -#define MACE_IMR_DEFAULT \ - (0xFF - \ - ( \ - MACE_IR_CERR | \ - MACE_IR_RCVCCO | \ - MACE_IR_RNTPCO | \ - MACE_IR_MPCO | \ - MACE_IR_RCVINT | \ - MACE_IR_XMTINT \ - ) \ - ) -#undef MACE_IMR_DEFAULT -#define MACE_IMR_DEFAULT 0x00 /* New statistics handling: grab everything */ - -#define TX_TIMEOUT ((400*HZ)/1000) - -/* ---------------------------------------------------------------------------- -Type Definitions ----------------------------------------------------------------------------- */ - -typedef struct _mace_statistics { - /* MACE_XMTFS */ - int xmtsv; - int uflo; - int lcol; - int more; - int one; - int defer; - int lcar; - int rtry; - - /* MACE_XMTRC */ - int exdef; - int xmtrc; - - /* RFS1--Receive Status (RCVSTS) */ - int oflo; - int clsn; - int fram; - int fcs; - - /* RFS2--Runt Packet Count (RNTPC) */ - int rfs_rntpc; - - /* RFS3--Receive Collision Count (RCVCC) */ - int rfs_rcvcc; - - /* MACE_IR */ - int jab; - int babl; - int cerr; - int rcvcco; - int rntpco; - int mpco; - - /* MACE_MPC */ - int mpc; - - /* MACE_RNTPC */ - int rntpc; - - /* MACE_RCVCC */ - int rcvcc; -} mace_statistics; - -typedef struct _mace_private { - struct pcmcia_device *p_dev; - struct net_device_stats linux_stats; /* Linux statistics counters */ - mace_statistics mace_stats; /* MACE chip statistics counters */ - - /* restore_multicast_list() state variables */ - int multicast_ladrf[MACE_LADRF_LEN]; /* Logical address filter */ - int multicast_num_addrs; - - char tx_free_frames; /* Number of free transmit frame buffers */ - char tx_irq_disabled; /* MACE TX interrupt disabled */ - - spinlock_t bank_lock; /* Must be held if you step off bank 0 */ -} mace_private; - -/* ---------------------------------------------------------------------------- -Private Global Variables ----------------------------------------------------------------------------- */ - -static const char *if_names[]={ - "Auto", "10baseT", "BNC", -}; - -/* ---------------------------------------------------------------------------- -Parameters - These are the parameters that can be set during loading with - 'insmod'. ----------------------------------------------------------------------------- */ - -MODULE_DESCRIPTION("New Media PCMCIA ethernet driver"); -MODULE_LICENSE("GPL"); - -#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) - -/* 0=auto, 1=10baseT, 2 = 10base2, default=auto */ -INT_MODULE_PARM(if_port, 0); - - -/* ---------------------------------------------------------------------------- -Function Prototypes ----------------------------------------------------------------------------- */ - -static int nmclan_config(struct pcmcia_device *link); -static void nmclan_release(struct pcmcia_device *link); - -static void nmclan_reset(struct net_device *dev); -static int mace_config(struct net_device *dev, struct ifmap *map); -static int mace_open(struct net_device *dev); -static int mace_close(struct net_device *dev); -static netdev_tx_t mace_start_xmit(struct sk_buff *skb, - struct net_device *dev); -static void mace_tx_timeout(struct net_device *dev); -static irqreturn_t mace_interrupt(int irq, void *dev_id); -static struct net_device_stats *mace_get_stats(struct net_device *dev); -static int mace_rx(struct net_device *dev, unsigned char RxCnt); -static void restore_multicast_list(struct net_device *dev); -static void set_multicast_list(struct net_device *dev); -static const struct ethtool_ops netdev_ethtool_ops; - - -static void nmclan_detach(struct pcmcia_device *p_dev); - -static const struct net_device_ops mace_netdev_ops = { - .ndo_open = mace_open, - .ndo_stop = mace_close, - .ndo_start_xmit = mace_start_xmit, - .ndo_tx_timeout = mace_tx_timeout, - .ndo_set_config = mace_config, - .ndo_get_stats = mace_get_stats, - .ndo_set_multicast_list = set_multicast_list, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -static int nmclan_probe(struct pcmcia_device *link) -{ - mace_private *lp; - struct net_device *dev; - - dev_dbg(&link->dev, "nmclan_attach()\n"); - - /* Create new ethernet device */ - dev = alloc_etherdev(sizeof(mace_private)); - if (!dev) - return -ENOMEM; - lp = netdev_priv(dev); - lp->p_dev = link; - link->priv = dev; - - spin_lock_init(&lp->bank_lock); - link->resource[0]->end = 32; - link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; - link->config_flags |= CONF_ENABLE_IRQ; - link->config_index = 1; - link->config_regs = PRESENT_OPTION; - - lp->tx_free_frames=AM2150_MAX_TX_FRAMES; - - dev->netdev_ops = &mace_netdev_ops; - SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); - dev->watchdog_timeo = TX_TIMEOUT; - - return nmclan_config(link); -} /* nmclan_attach */ - -static void nmclan_detach(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - - dev_dbg(&link->dev, "nmclan_detach\n"); - - unregister_netdev(dev); - - nmclan_release(link); - - free_netdev(dev); -} /* nmclan_detach */ - -/* ---------------------------------------------------------------------------- -mace_read - Reads a MACE register. This is bank independent; however, the - caller must ensure that this call is not interruptable. We are - assuming that during normal operation, the MACE is always in - bank 0. ----------------------------------------------------------------------------- */ -static int mace_read(mace_private *lp, unsigned int ioaddr, int reg) -{ - int data = 0xFF; - unsigned long flags; - - switch (reg >> 4) { - case 0: /* register 0-15 */ - data = inb(ioaddr + AM2150_MACE_BASE + reg); - break; - case 1: /* register 16-31 */ - spin_lock_irqsave(&lp->bank_lock, flags); - MACEBANK(1); - data = inb(ioaddr + AM2150_MACE_BASE + (reg & 0x0F)); - MACEBANK(0); - spin_unlock_irqrestore(&lp->bank_lock, flags); - break; - } - return data & 0xFF; -} /* mace_read */ - -/* ---------------------------------------------------------------------------- -mace_write - Writes to a MACE register. This is bank independent; however, - the caller must ensure that this call is not interruptable. We - are assuming that during normal operation, the MACE is always in - bank 0. ----------------------------------------------------------------------------- */ -static void mace_write(mace_private *lp, unsigned int ioaddr, int reg, - int data) -{ - unsigned long flags; - - switch (reg >> 4) { - case 0: /* register 0-15 */ - outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + reg); - break; - case 1: /* register 16-31 */ - spin_lock_irqsave(&lp->bank_lock, flags); - MACEBANK(1); - outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + (reg & 0x0F)); - MACEBANK(0); - spin_unlock_irqrestore(&lp->bank_lock, flags); - break; - } -} /* mace_write */ - -/* ---------------------------------------------------------------------------- -mace_init - Resets the MACE chip. ----------------------------------------------------------------------------- */ -static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr) -{ - int i; - int ct = 0; - - /* MACE Software reset */ - mace_write(lp, ioaddr, MACE_BIUCC, 1); - while (mace_read(lp, ioaddr, MACE_BIUCC) & 0x01) { - /* Wait for reset bit to be cleared automatically after <= 200ns */; - if(++ct > 500) - { - pr_err("reset failed, card removed?\n"); - return -1; - } - udelay(1); - } - mace_write(lp, ioaddr, MACE_BIUCC, 0); - - /* The Am2150 requires that the MACE FIFOs operate in burst mode. */ - mace_write(lp, ioaddr, MACE_FIFOCC, 0x0F); - - mace_write(lp,ioaddr, MACE_RCVFC, 0); /* Disable Auto Strip Receive */ - mace_write(lp, ioaddr, MACE_IMR, 0xFF); /* Disable all interrupts until _open */ - - /* - * Bit 2-1 PORTSEL[1-0] Port Select. - * 00 AUI/10Base-2 - * 01 10Base-T - * 10 DAI Port (reserved in Am2150) - * 11 GPSI - * For this card, only the first two are valid. - * So, PLSCC should be set to - * 0x00 for 10Base-2 - * 0x02 for 10Base-T - * Or just set ASEL in PHYCC below! - */ - switch (if_port) { - case 1: - mace_write(lp, ioaddr, MACE_PLSCC, 0x02); - break; - case 2: - mace_write(lp, ioaddr, MACE_PLSCC, 0x00); - break; - default: - mace_write(lp, ioaddr, MACE_PHYCC, /* ASEL */ 4); - /* ASEL Auto Select. When set, the PORTSEL[1-0] bits are overridden, - and the MACE device will automatically select the operating media - interface port. */ - break; - } - - mace_write(lp, ioaddr, MACE_IAC, MACE_IAC_ADDRCHG | MACE_IAC_PHYADDR); - /* Poll ADDRCHG bit */ - ct = 0; - while (mace_read(lp, ioaddr, MACE_IAC) & MACE_IAC_ADDRCHG) - { - if(++ ct > 500) - { - pr_err("ADDRCHG timeout, card removed?\n"); - return -1; - } - } - /* Set PADR register */ - for (i = 0; i < ETHER_ADDR_LEN; i++) - mace_write(lp, ioaddr, MACE_PADR, enet_addr[i]); - - /* MAC Configuration Control Register should be written last */ - /* Let set_multicast_list set this. */ - /* mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV); */ - mace_write(lp, ioaddr, MACE_MACCC, 0x00); - return 0; -} /* mace_init */ - -static int nmclan_config(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - mace_private *lp = netdev_priv(dev); - u8 *buf; - size_t len; - int i, ret; - unsigned int ioaddr; - - dev_dbg(&link->dev, "nmclan_config\n"); - - link->io_lines = 5; - ret = pcmcia_request_io(link); - if (ret) - goto failed; - ret = pcmcia_request_exclusive_irq(link, mace_interrupt); - if (ret) - goto failed; - ret = pcmcia_enable_device(link); - if (ret) - goto failed; - - dev->irq = link->irq; - dev->base_addr = link->resource[0]->start; - - ioaddr = dev->base_addr; - - /* Read the ethernet address from the CIS. */ - len = pcmcia_get_tuple(link, 0x80, &buf); - if (!buf || len < ETHER_ADDR_LEN) { - kfree(buf); - goto failed; - } - memcpy(dev->dev_addr, buf, ETHER_ADDR_LEN); - kfree(buf); - - /* Verify configuration by reading the MACE ID. */ - { - char sig[2]; - - sig[0] = mace_read(lp, ioaddr, MACE_CHIPIDL); - sig[1] = mace_read(lp, ioaddr, MACE_CHIPIDH); - if ((sig[0] == 0x40) && ((sig[1] & 0x0F) == 0x09)) { - dev_dbg(&link->dev, "nmclan_cs configured: mace id=%x %x\n", - sig[0], sig[1]); - } else { - pr_notice("mace id not found: %x %x should be 0x40 0x?9\n", - sig[0], sig[1]); - return -ENODEV; - } - } - - if(mace_init(lp, ioaddr, dev->dev_addr) == -1) - goto failed; - - /* The if_port symbol can be set when the module is loaded */ - if (if_port <= 2) - dev->if_port = if_port; - else - pr_notice("invalid if_port requested\n"); - - SET_NETDEV_DEV(dev, &link->dev); - - i = register_netdev(dev); - if (i != 0) { - pr_notice("register_netdev() failed\n"); - goto failed; - } - - netdev_info(dev, "nmclan: port %#3lx, irq %d, %s port, hw_addr %pM\n", - dev->base_addr, dev->irq, if_names[dev->if_port], dev->dev_addr); - return 0; - -failed: - nmclan_release(link); - return -ENODEV; -} /* nmclan_config */ - -static void nmclan_release(struct pcmcia_device *link) -{ - dev_dbg(&link->dev, "nmclan_release\n"); - pcmcia_disable_device(link); -} - -static int nmclan_suspend(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - - if (link->open) - netif_device_detach(dev); - - return 0; -} - -static int nmclan_resume(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - - if (link->open) { - nmclan_reset(dev); - netif_device_attach(dev); - } - - return 0; -} - - -/* ---------------------------------------------------------------------------- -nmclan_reset - Reset and restore all of the Xilinx and MACE registers. ----------------------------------------------------------------------------- */ -static void nmclan_reset(struct net_device *dev) -{ - mace_private *lp = netdev_priv(dev); - -#if RESET_XILINX - struct pcmcia_device *link = &lp->link; - u8 OrigCorValue; - - /* Save original COR value */ - pcmcia_read_config_byte(link, CISREG_COR, &OrigCorValue); - - /* Reset Xilinx */ - dev_dbg(&link->dev, "nmclan_reset: OrigCorValue=0x%x, resetting...\n", - OrigCorValue); - pcmcia_write_config_byte(link, CISREG_COR, COR_SOFT_RESET); - /* Need to wait for 20 ms for PCMCIA to finish reset. */ - - /* Restore original COR configuration index */ - pcmcia_write_config_byte(link, CISREG_COR, - (COR_LEVEL_REQ | (OrigCorValue & COR_CONFIG_MASK))); - /* Xilinx is now completely reset along with the MACE chip. */ - lp->tx_free_frames=AM2150_MAX_TX_FRAMES; - -#endif /* #if RESET_XILINX */ - - /* Xilinx is now completely reset along with the MACE chip. */ - lp->tx_free_frames=AM2150_MAX_TX_FRAMES; - - /* Reinitialize the MACE chip for operation. */ - mace_init(lp, dev->base_addr, dev->dev_addr); - mace_write(lp, dev->base_addr, MACE_IMR, MACE_IMR_DEFAULT); - - /* Restore the multicast list and enable TX and RX. */ - restore_multicast_list(dev); -} /* nmclan_reset */ - -/* ---------------------------------------------------------------------------- -mace_config - [Someone tell me what this is supposed to do? Is if_port a defined - standard? If so, there should be defines to indicate 1=10Base-T, - 2=10Base-2, etc. including limited automatic detection.] ----------------------------------------------------------------------------- */ -static int mace_config(struct net_device *dev, struct ifmap *map) -{ - if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { - if (map->port <= 2) { - dev->if_port = map->port; - netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]); - } else - return -EINVAL; - } - return 0; -} /* mace_config */ - -/* ---------------------------------------------------------------------------- -mace_open - Open device driver. ----------------------------------------------------------------------------- */ -static int mace_open(struct net_device *dev) -{ - unsigned int ioaddr = dev->base_addr; - mace_private *lp = netdev_priv(dev); - struct pcmcia_device *link = lp->p_dev; - - if (!pcmcia_dev_present(link)) - return -ENODEV; - - link->open++; - - MACEBANK(0); - - netif_start_queue(dev); - nmclan_reset(dev); - - return 0; /* Always succeed */ -} /* mace_open */ - -/* ---------------------------------------------------------------------------- -mace_close - Closes device driver. ----------------------------------------------------------------------------- */ -static int mace_close(struct net_device *dev) -{ - unsigned int ioaddr = dev->base_addr; - mace_private *lp = netdev_priv(dev); - struct pcmcia_device *link = lp->p_dev; - - dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name); - - /* Mask off all interrupts from the MACE chip. */ - outb(0xFF, ioaddr + AM2150_MACE_BASE + MACE_IMR); - - link->open--; - netif_stop_queue(dev); - - return 0; -} /* mace_close */ - -static void netdev_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr); -} - -static const struct ethtool_ops netdev_ethtool_ops = { - .get_drvinfo = netdev_get_drvinfo, -}; - -/* ---------------------------------------------------------------------------- -mace_start_xmit - This routine begins the packet transmit function. When completed, - it will generate a transmit interrupt. - - According to /usr/src/linux/net/inet/dev.c, if _start_xmit - returns 0, the "packet is now solely the responsibility of the - driver." If _start_xmit returns non-zero, the "transmission - failed, put skb back into a list." ----------------------------------------------------------------------------- */ - -static void mace_tx_timeout(struct net_device *dev) -{ - mace_private *lp = netdev_priv(dev); - struct pcmcia_device *link = lp->p_dev; - - netdev_notice(dev, "transmit timed out -- "); -#if RESET_ON_TIMEOUT - pr_cont("resetting card\n"); - pcmcia_reset_card(link->socket); -#else /* #if RESET_ON_TIMEOUT */ - pr_cont("NOT resetting card\n"); -#endif /* #if RESET_ON_TIMEOUT */ - dev->trans_start = jiffies; /* prevent tx timeout */ - netif_wake_queue(dev); -} - -static netdev_tx_t mace_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - mace_private *lp = netdev_priv(dev); - unsigned int ioaddr = dev->base_addr; - - netif_stop_queue(dev); - - pr_debug("%s: mace_start_xmit(length = %ld) called.\n", - dev->name, (long)skb->len); - -#if (!TX_INTERRUPTABLE) - /* Disable MACE TX interrupts. */ - outb(MACE_IMR_DEFAULT | MACE_IR_XMTINT, - ioaddr + AM2150_MACE_BASE + MACE_IMR); - lp->tx_irq_disabled=1; -#endif /* #if (!TX_INTERRUPTABLE) */ - - { - /* This block must not be interrupted by another transmit request! - mace_tx_timeout will take care of timer-based retransmissions from - the upper layers. The interrupt handler is guaranteed never to - service a transmit interrupt while we are in here. - */ - - lp->linux_stats.tx_bytes += skb->len; - lp->tx_free_frames--; - - /* WARNING: Write the _exact_ number of bytes written in the header! */ - /* Put out the word header [must be an outw()] . . . */ - outw(skb->len, ioaddr + AM2150_XMT); - /* . . . and the packet [may be any combination of outw() and outb()] */ - outsw(ioaddr + AM2150_XMT, skb->data, skb->len >> 1); - if (skb->len & 1) { - /* Odd byte transfer */ - outb(skb->data[skb->len-1], ioaddr + AM2150_XMT); - } - -#if MULTI_TX - if (lp->tx_free_frames > 0) - netif_start_queue(dev); -#endif /* #if MULTI_TX */ - } - -#if (!TX_INTERRUPTABLE) - /* Re-enable MACE TX interrupts. */ - lp->tx_irq_disabled=0; - outb(MACE_IMR_DEFAULT, ioaddr + AM2150_MACE_BASE + MACE_IMR); -#endif /* #if (!TX_INTERRUPTABLE) */ - - dev_kfree_skb(skb); - - return NETDEV_TX_OK; -} /* mace_start_xmit */ - -/* ---------------------------------------------------------------------------- -mace_interrupt - The interrupt handler. ----------------------------------------------------------------------------- */ -static irqreturn_t mace_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = (struct net_device *) dev_id; - mace_private *lp = netdev_priv(dev); - unsigned int ioaddr; - int status; - int IntrCnt = MACE_MAX_IR_ITERATIONS; - - if (dev == NULL) { - pr_debug("mace_interrupt(): irq 0x%X for unknown device.\n", - irq); - return IRQ_NONE; - } - - ioaddr = dev->base_addr; - - if (lp->tx_irq_disabled) { - const char *msg; - if (lp->tx_irq_disabled) - msg = "Interrupt with tx_irq_disabled"; - else - msg = "Re-entering the interrupt handler"; - netdev_notice(dev, "%s [isr=%02X, imr=%02X]\n", - msg, - inb(ioaddr + AM2150_MACE_BASE + MACE_IR), - inb(ioaddr + AM2150_MACE_BASE + MACE_IMR)); - /* WARNING: MACE_IR has been read! */ - return IRQ_NONE; - } - - if (!netif_device_present(dev)) { - netdev_dbg(dev, "interrupt from dead card\n"); - return IRQ_NONE; - } - - do { - /* WARNING: MACE_IR is a READ/CLEAR port! */ - status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR); - - pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status); - - if (status & MACE_IR_RCVINT) { - mace_rx(dev, MACE_MAX_RX_ITERATIONS); - } - - if (status & MACE_IR_XMTINT) { - unsigned char fifofc; - unsigned char xmtrc; - unsigned char xmtfs; - - fifofc = inb(ioaddr + AM2150_MACE_BASE + MACE_FIFOFC); - if ((fifofc & MACE_FIFOFC_XMTFC)==0) { - lp->linux_stats.tx_errors++; - outb(0xFF, ioaddr + AM2150_XMT_SKIP); - } - - /* Transmit Retry Count (XMTRC, reg 4) */ - xmtrc = inb(ioaddr + AM2150_MACE_BASE + MACE_XMTRC); - if (xmtrc & MACE_XMTRC_EXDEF) lp->mace_stats.exdef++; - lp->mace_stats.xmtrc += (xmtrc & MACE_XMTRC_XMTRC); - - if ( - (xmtfs = inb(ioaddr + AM2150_MACE_BASE + MACE_XMTFS)) & - MACE_XMTFS_XMTSV /* Transmit Status Valid */ - ) { - lp->mace_stats.xmtsv++; - - if (xmtfs & ~MACE_XMTFS_XMTSV) { - if (xmtfs & MACE_XMTFS_UFLO) { - /* Underflow. Indicates that the Transmit FIFO emptied before - the end of frame was reached. */ - lp->mace_stats.uflo++; - } - if (xmtfs & MACE_XMTFS_LCOL) { - /* Late Collision */ - lp->mace_stats.lcol++; - } - if (xmtfs & MACE_XMTFS_MORE) { - /* MORE than one retry was needed */ - lp->mace_stats.more++; - } - if (xmtfs & MACE_XMTFS_ONE) { - /* Exactly ONE retry occurred */ - lp->mace_stats.one++; - } - if (xmtfs & MACE_XMTFS_DEFER) { - /* Transmission was defered */ - lp->mace_stats.defer++; - } - if (xmtfs & MACE_XMTFS_LCAR) { - /* Loss of carrier */ - lp->mace_stats.lcar++; - } - if (xmtfs & MACE_XMTFS_RTRY) { - /* Retry error: transmit aborted after 16 attempts */ - lp->mace_stats.rtry++; - } - } /* if (xmtfs & ~MACE_XMTFS_XMTSV) */ - - } /* if (xmtfs & MACE_XMTFS_XMTSV) */ - - lp->linux_stats.tx_packets++; - lp->tx_free_frames++; - netif_wake_queue(dev); - } /* if (status & MACE_IR_XMTINT) */ - - if (status & ~MACE_IMR_DEFAULT & ~MACE_IR_RCVINT & ~MACE_IR_XMTINT) { - if (status & MACE_IR_JAB) { - /* Jabber Error. Excessive transmit duration (20-150ms). */ - lp->mace_stats.jab++; - } - if (status & MACE_IR_BABL) { - /* Babble Error. >1518 bytes transmitted. */ - lp->mace_stats.babl++; - } - if (status & MACE_IR_CERR) { - /* Collision Error. CERR indicates the absence of the - Signal Quality Error Test message after a packet - transmission. */ - lp->mace_stats.cerr++; - } - if (status & MACE_IR_RCVCCO) { - /* Receive Collision Count Overflow; */ - lp->mace_stats.rcvcco++; - } - if (status & MACE_IR_RNTPCO) { - /* Runt Packet Count Overflow */ - lp->mace_stats.rntpco++; - } - if (status & MACE_IR_MPCO) { - /* Missed Packet Count Overflow */ - lp->mace_stats.mpco++; - } - } /* if (status & ~MACE_IMR_DEFAULT & ~MACE_IR_RCVINT & ~MACE_IR_XMTINT) */ - - } while ((status & ~MACE_IMR_DEFAULT) && (--IntrCnt)); - - return IRQ_HANDLED; -} /* mace_interrupt */ - -/* ---------------------------------------------------------------------------- -mace_rx - Receives packets. ----------------------------------------------------------------------------- */ -static int mace_rx(struct net_device *dev, unsigned char RxCnt) -{ - mace_private *lp = netdev_priv(dev); - unsigned int ioaddr = dev->base_addr; - unsigned char rx_framecnt; - unsigned short rx_status; - - while ( - ((rx_framecnt = inb(ioaddr + AM2150_RCV_FRAME_COUNT)) > 0) && - (rx_framecnt <= 12) && /* rx_framecnt==0xFF if card is extracted. */ - (RxCnt--) - ) { - rx_status = inw(ioaddr + AM2150_RCV); - - pr_debug("%s: in mace_rx(), framecnt 0x%X, rx_status" - " 0x%X.\n", dev->name, rx_framecnt, rx_status); - - if (rx_status & MACE_RCVFS_RCVSTS) { /* Error, update stats. */ - lp->linux_stats.rx_errors++; - if (rx_status & MACE_RCVFS_OFLO) { - lp->mace_stats.oflo++; - } - if (rx_status & MACE_RCVFS_CLSN) { - lp->mace_stats.clsn++; - } - if (rx_status & MACE_RCVFS_FRAM) { - lp->mace_stats.fram++; - } - if (rx_status & MACE_RCVFS_FCS) { - lp->mace_stats.fcs++; - } - } else { - short pkt_len = (rx_status & ~MACE_RCVFS_RCVSTS) - 4; - /* Auto Strip is off, always subtract 4 */ - struct sk_buff *skb; - - lp->mace_stats.rfs_rntpc += inb(ioaddr + AM2150_RCV); - /* runt packet count */ - lp->mace_stats.rfs_rcvcc += inb(ioaddr + AM2150_RCV); - /* rcv collision count */ - - pr_debug(" receiving packet size 0x%X rx_status" - " 0x%X.\n", pkt_len, rx_status); - - skb = dev_alloc_skb(pkt_len+2); - - if (skb != NULL) { - skb_reserve(skb, 2); - insw(ioaddr + AM2150_RCV, skb_put(skb, pkt_len), pkt_len>>1); - if (pkt_len & 1) - *(skb_tail_pointer(skb) - 1) = inb(ioaddr + AM2150_RCV); - skb->protocol = eth_type_trans(skb, dev); - - netif_rx(skb); /* Send the packet to the upper (protocol) layers. */ - - lp->linux_stats.rx_packets++; - lp->linux_stats.rx_bytes += pkt_len; - outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */ - continue; - } else { - pr_debug("%s: couldn't allocate a sk_buff of size" - " %d.\n", dev->name, pkt_len); - lp->linux_stats.rx_dropped++; - } - } - outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */ - } /* while */ - - return 0; -} /* mace_rx */ - -/* ---------------------------------------------------------------------------- -pr_linux_stats ----------------------------------------------------------------------------- */ -static void pr_linux_stats(struct net_device_stats *pstats) -{ - pr_debug("pr_linux_stats\n"); - pr_debug(" rx_packets=%-7ld tx_packets=%ld\n", - (long)pstats->rx_packets, (long)pstats->tx_packets); - pr_debug(" rx_errors=%-7ld tx_errors=%ld\n", - (long)pstats->rx_errors, (long)pstats->tx_errors); - pr_debug(" rx_dropped=%-7ld tx_dropped=%ld\n", - (long)pstats->rx_dropped, (long)pstats->tx_dropped); - pr_debug(" multicast=%-7ld collisions=%ld\n", - (long)pstats->multicast, (long)pstats->collisions); - - pr_debug(" rx_length_errors=%-7ld rx_over_errors=%ld\n", - (long)pstats->rx_length_errors, (long)pstats->rx_over_errors); - pr_debug(" rx_crc_errors=%-7ld rx_frame_errors=%ld\n", - (long)pstats->rx_crc_errors, (long)pstats->rx_frame_errors); - pr_debug(" rx_fifo_errors=%-7ld rx_missed_errors=%ld\n", - (long)pstats->rx_fifo_errors, (long)pstats->rx_missed_errors); - - pr_debug(" tx_aborted_errors=%-7ld tx_carrier_errors=%ld\n", - (long)pstats->tx_aborted_errors, (long)pstats->tx_carrier_errors); - pr_debug(" tx_fifo_errors=%-7ld tx_heartbeat_errors=%ld\n", - (long)pstats->tx_fifo_errors, (long)pstats->tx_heartbeat_errors); - pr_debug(" tx_window_errors=%ld\n", - (long)pstats->tx_window_errors); -} /* pr_linux_stats */ - -/* ---------------------------------------------------------------------------- -pr_mace_stats ----------------------------------------------------------------------------- */ -static void pr_mace_stats(mace_statistics *pstats) -{ - pr_debug("pr_mace_stats\n"); - - pr_debug(" xmtsv=%-7d uflo=%d\n", - pstats->xmtsv, pstats->uflo); - pr_debug(" lcol=%-7d more=%d\n", - pstats->lcol, pstats->more); - pr_debug(" one=%-7d defer=%d\n", - pstats->one, pstats->defer); - pr_debug(" lcar=%-7d rtry=%d\n", - pstats->lcar, pstats->rtry); - - /* MACE_XMTRC */ - pr_debug(" exdef=%-7d xmtrc=%d\n", - pstats->exdef, pstats->xmtrc); - - /* RFS1--Receive Status (RCVSTS) */ - pr_debug(" oflo=%-7d clsn=%d\n", - pstats->oflo, pstats->clsn); - pr_debug(" fram=%-7d fcs=%d\n", - pstats->fram, pstats->fcs); - - /* RFS2--Runt Packet Count (RNTPC) */ - /* RFS3--Receive Collision Count (RCVCC) */ - pr_debug(" rfs_rntpc=%-7d rfs_rcvcc=%d\n", - pstats->rfs_rntpc, pstats->rfs_rcvcc); - - /* MACE_IR */ - pr_debug(" jab=%-7d babl=%d\n", - pstats->jab, pstats->babl); - pr_debug(" cerr=%-7d rcvcco=%d\n", - pstats->cerr, pstats->rcvcco); - pr_debug(" rntpco=%-7d mpco=%d\n", - pstats->rntpco, pstats->mpco); - - /* MACE_MPC */ - pr_debug(" mpc=%d\n", pstats->mpc); - - /* MACE_RNTPC */ - pr_debug(" rntpc=%d\n", pstats->rntpc); - - /* MACE_RCVCC */ - pr_debug(" rcvcc=%d\n", pstats->rcvcc); - -} /* pr_mace_stats */ - -/* ---------------------------------------------------------------------------- -update_stats - Update statistics. We change to register window 1, so this - should be run single-threaded if the device is active. This is - expected to be a rare operation, and it's simpler for the rest - of the driver to assume that window 0 is always valid rather - than use a special window-state variable. - - oflo & uflo should _never_ occur since it would mean the Xilinx - was not able to transfer data between the MACE FIFO and the - card's SRAM fast enough. If this happens, something is - seriously wrong with the hardware. ----------------------------------------------------------------------------- */ -static void update_stats(unsigned int ioaddr, struct net_device *dev) -{ - mace_private *lp = netdev_priv(dev); - - lp->mace_stats.rcvcc += mace_read(lp, ioaddr, MACE_RCVCC); - lp->mace_stats.rntpc += mace_read(lp, ioaddr, MACE_RNTPC); - lp->mace_stats.mpc += mace_read(lp, ioaddr, MACE_MPC); - /* At this point, mace_stats is fully updated for this call. - We may now update the linux_stats. */ - - /* The MACE has no equivalent for linux_stats field which are commented - out. */ - - /* lp->linux_stats.multicast; */ - lp->linux_stats.collisions = - lp->mace_stats.rcvcco * 256 + lp->mace_stats.rcvcc; - /* Collision: The MACE may retry sending a packet 15 times - before giving up. The retry count is in XMTRC. - Does each retry constitute a collision? - If so, why doesn't the RCVCC record these collisions? */ - - /* detailed rx_errors: */ - lp->linux_stats.rx_length_errors = - lp->mace_stats.rntpco * 256 + lp->mace_stats.rntpc; - /* lp->linux_stats.rx_over_errors */ - lp->linux_stats.rx_crc_errors = lp->mace_stats.fcs; - lp->linux_stats.rx_frame_errors = lp->mace_stats.fram; - lp->linux_stats.rx_fifo_errors = lp->mace_stats.oflo; - lp->linux_stats.rx_missed_errors = - lp->mace_stats.mpco * 256 + lp->mace_stats.mpc; - - /* detailed tx_errors */ - lp->linux_stats.tx_aborted_errors = lp->mace_stats.rtry; - lp->linux_stats.tx_carrier_errors = lp->mace_stats.lcar; - /* LCAR usually results from bad cabling. */ - lp->linux_stats.tx_fifo_errors = lp->mace_stats.uflo; - lp->linux_stats.tx_heartbeat_errors = lp->mace_stats.cerr; - /* lp->linux_stats.tx_window_errors; */ -} /* update_stats */ - -/* ---------------------------------------------------------------------------- -mace_get_stats - Gathers ethernet statistics from the MACE chip. ----------------------------------------------------------------------------- */ -static struct net_device_stats *mace_get_stats(struct net_device *dev) -{ - mace_private *lp = netdev_priv(dev); - - update_stats(dev->base_addr, dev); - - pr_debug("%s: updating the statistics.\n", dev->name); - pr_linux_stats(&lp->linux_stats); - pr_mace_stats(&lp->mace_stats); - - return &lp->linux_stats; -} /* net_device_stats */ - -/* ---------------------------------------------------------------------------- -updateCRC - Modified from Am79C90 data sheet. ----------------------------------------------------------------------------- */ - -#ifdef BROKEN_MULTICAST - -static void updateCRC(int *CRC, int bit) -{ - static const int poly[]={ - 1,1,1,0, 1,1,0,1, - 1,0,1,1, 1,0,0,0, - 1,0,0,0, 0,0,1,1, - 0,0,1,0, 0,0,0,0 - }; /* CRC polynomial. poly[n] = coefficient of the x**n term of the - CRC generator polynomial. */ - - int j; - - /* shift CRC and control bit (CRC[32]) */ - for (j = 32; j > 0; j--) - CRC[j] = CRC[j-1]; - CRC[0] = 0; - - /* If bit XOR(control bit) = 1, set CRC = CRC XOR polynomial. */ - if (bit ^ CRC[32]) - for (j = 0; j < 32; j++) - CRC[j] ^= poly[j]; -} /* updateCRC */ - -/* ---------------------------------------------------------------------------- -BuildLAF - Build logical address filter. - Modified from Am79C90 data sheet. - -Input - ladrf: logical address filter (contents initialized to 0) - adr: ethernet address ----------------------------------------------------------------------------- */ -static void BuildLAF(int *ladrf, int *adr) -{ - int CRC[33]={1}; /* CRC register, 1 word/bit + extra control bit */ - - int i, byte; /* temporary array indices */ - int hashcode; /* the output object */ - - CRC[32]=0; - - for (byte = 0; byte < 6; byte++) - for (i = 0; i < 8; i++) - updateCRC(CRC, (adr[byte] >> i) & 1); - - hashcode = 0; - for (i = 0; i < 6; i++) - hashcode = (hashcode << 1) + CRC[i]; - - byte = hashcode >> 3; - ladrf[byte] |= (1 << (hashcode & 7)); - -#ifdef PCMCIA_DEBUG - if (0) - printk(KERN_DEBUG " adr =%pM\n", adr); - printk(KERN_DEBUG " hashcode = %d(decimal), ladrf[0:63] =", hashcode); - for (i = 0; i < 8; i++) - pr_cont(" %02X", ladrf[i]); - pr_cont("\n"); -#endif -} /* BuildLAF */ - -/* ---------------------------------------------------------------------------- -restore_multicast_list - Restores the multicast filter for MACE chip to the last - set_multicast_list() call. - -Input - multicast_num_addrs - multicast_ladrf[] ----------------------------------------------------------------------------- */ -static void restore_multicast_list(struct net_device *dev) -{ - mace_private *lp = netdev_priv(dev); - int num_addrs = lp->multicast_num_addrs; - int *ladrf = lp->multicast_ladrf; - unsigned int ioaddr = dev->base_addr; - int i; - - pr_debug("%s: restoring Rx mode to %d addresses.\n", - dev->name, num_addrs); - - if (num_addrs > 0) { - - pr_debug("Attempt to restore multicast list detected.\n"); - - mace_write(lp, ioaddr, MACE_IAC, MACE_IAC_ADDRCHG | MACE_IAC_LOGADDR); - /* Poll ADDRCHG bit */ - while (mace_read(lp, ioaddr, MACE_IAC) & MACE_IAC_ADDRCHG) - ; - /* Set LADRF register */ - for (i = 0; i < MACE_LADRF_LEN; i++) - mace_write(lp, ioaddr, MACE_LADRF, ladrf[i]); - - mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_RCVFCSE | MACE_UTR_LOOP_EXTERNAL); - mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV); - - } else if (num_addrs < 0) { - - /* Promiscuous mode: receive all packets */ - mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL); - mace_write(lp, ioaddr, MACE_MACCC, - MACE_MACCC_PROM | MACE_MACCC_ENXMT | MACE_MACCC_ENRCV - ); - - } else { - - /* Normal mode */ - mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL); - mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV); - - } -} /* restore_multicast_list */ - -/* ---------------------------------------------------------------------------- -set_multicast_list - Set or clear the multicast filter for this adaptor. - -Input - num_addrs == -1 Promiscuous mode, receive all packets - num_addrs == 0 Normal mode, clear multicast list - num_addrs > 0 Multicast mode, receive normal and MC packets, and do - best-effort filtering. -Output - multicast_num_addrs - multicast_ladrf[] ----------------------------------------------------------------------------- */ - -static void set_multicast_list(struct net_device *dev) -{ - mace_private *lp = netdev_priv(dev); - int adr[ETHER_ADDR_LEN] = {0}; /* Ethernet address */ - struct netdev_hw_addr *ha; - -#ifdef PCMCIA_DEBUG - { - static int old; - if (netdev_mc_count(dev) != old) { - old = netdev_mc_count(dev); - pr_debug("%s: setting Rx mode to %d addresses.\n", - dev->name, old); - } - } -#endif - - /* Set multicast_num_addrs. */ - lp->multicast_num_addrs = netdev_mc_count(dev); - - /* Set multicast_ladrf. */ - if (num_addrs > 0) { - /* Calculate multicast logical address filter */ - memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN); - netdev_for_each_mc_addr(ha, dev) { - memcpy(adr, ha->addr, ETHER_ADDR_LEN); - BuildLAF(lp->multicast_ladrf, adr); - } - } - - restore_multicast_list(dev); - -} /* set_multicast_list */ - -#endif /* BROKEN_MULTICAST */ - -static void restore_multicast_list(struct net_device *dev) -{ - unsigned int ioaddr = dev->base_addr; - mace_private *lp = netdev_priv(dev); - - pr_debug("%s: restoring Rx mode to %d addresses.\n", dev->name, - lp->multicast_num_addrs); - - if (dev->flags & IFF_PROMISC) { - /* Promiscuous mode: receive all packets */ - mace_write(lp,ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL); - mace_write(lp, ioaddr, MACE_MACCC, - MACE_MACCC_PROM | MACE_MACCC_ENXMT | MACE_MACCC_ENRCV - ); - } else { - /* Normal mode */ - mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL); - mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV); - } -} /* restore_multicast_list */ - -static void set_multicast_list(struct net_device *dev) -{ - mace_private *lp = netdev_priv(dev); - -#ifdef PCMCIA_DEBUG - { - static int old; - if (netdev_mc_count(dev) != old) { - old = netdev_mc_count(dev); - pr_debug("%s: setting Rx mode to %d addresses.\n", - dev->name, old); - } - } -#endif - - lp->multicast_num_addrs = netdev_mc_count(dev); - restore_multicast_list(dev); - -} /* set_multicast_list */ - -static const struct pcmcia_device_id nmclan_ids[] = { - PCMCIA_DEVICE_PROD_ID12("New Media Corporation", "Ethernet", 0x085a850b, 0x00b2e941), - PCMCIA_DEVICE_PROD_ID12("Portable Add-ons", "Ethernet+", 0xebf1d60, 0xad673aaf), - PCMCIA_DEVICE_NULL, -}; -MODULE_DEVICE_TABLE(pcmcia, nmclan_ids); - -static struct pcmcia_driver nmclan_cs_driver = { - .owner = THIS_MODULE, - .name = "nmclan_cs", - .probe = nmclan_probe, - .remove = nmclan_detach, - .id_table = nmclan_ids, - .suspend = nmclan_suspend, - .resume = nmclan_resume, -}; - -static int __init init_nmclan_cs(void) -{ - return pcmcia_register_driver(&nmclan_cs_driver); -} - -static void __exit exit_nmclan_cs(void) -{ - pcmcia_unregister_driver(&nmclan_cs_driver); -} - -module_init(init_nmclan_cs); -module_exit(exit_nmclan_cs); diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c deleted file mode 100644 index 8b3090d..0000000 --- a/drivers/net/pcnet32.c +++ /dev/null @@ -1,2937 +0,0 @@ -/* pcnet32.c: An AMD PCnet32 ethernet driver for linux. */ -/* - * Copyright 1996-1999 Thomas Bogendoerfer - * - * Derived from the lance driver written 1993,1994,1995 by Donald Becker. - * - * Copyright 1993 United States Government as represented by the - * Director, National Security Agency. - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - * This driver is for PCnet32 and PCnetPCI based ethercards - */ -/************************************************************************** - * 23 Oct, 2000. - * Fixed a few bugs, related to running the controller in 32bit mode. - * - * Carsten Langgaard, carstenl@mips.com - * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. - * - *************************************************************************/ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#define DRV_NAME "pcnet32" -#define DRV_VERSION "1.35" -#define DRV_RELDATE "21.Apr.2008" -#define PFX DRV_NAME ": " - -static const char *const version = - DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n"; - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -/* - * PCI device identifiers for "new style" Linux PCI Device Drivers - */ -static DEFINE_PCI_DEVICE_TABLE(pcnet32_pci_tbl) = { - { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), }, - { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), }, - - /* - * Adapters that were sold with IBM's RS/6000 or pSeries hardware have - * the incorrect vendor id. - */ - { PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE), - .class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class_mask = 0xffff00, }, - - { } /* terminate list */ -}; - -MODULE_DEVICE_TABLE(pci, pcnet32_pci_tbl); - -static int cards_found; - -/* - * VLB I/O addresses - */ -static unsigned int pcnet32_portlist[] __initdata = - { 0x300, 0x320, 0x340, 0x360, 0 }; - -static int pcnet32_debug; -static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */ -static int pcnet32vlb; /* check for VLB cards ? */ - -static struct net_device *pcnet32_dev; - -static int max_interrupt_work = 2; -static int rx_copybreak = 200; - -#define PCNET32_PORT_AUI 0x00 -#define PCNET32_PORT_10BT 0x01 -#define PCNET32_PORT_GPSI 0x02 -#define PCNET32_PORT_MII 0x03 - -#define PCNET32_PORT_PORTSEL 0x03 -#define PCNET32_PORT_ASEL 0x04 -#define PCNET32_PORT_100 0x40 -#define PCNET32_PORT_FD 0x80 - -#define PCNET32_DMA_MASK 0xffffffff - -#define PCNET32_WATCHDOG_TIMEOUT (jiffies + (2 * HZ)) -#define PCNET32_BLINK_TIMEOUT (jiffies + (HZ/4)) - -/* - * table to translate option values from tulip - * to internal options - */ -static const unsigned char options_mapping[] = { - PCNET32_PORT_ASEL, /* 0 Auto-select */ - PCNET32_PORT_AUI, /* 1 BNC/AUI */ - PCNET32_PORT_AUI, /* 2 AUI/BNC */ - PCNET32_PORT_ASEL, /* 3 not supported */ - PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */ - PCNET32_PORT_ASEL, /* 5 not supported */ - PCNET32_PORT_ASEL, /* 6 not supported */ - PCNET32_PORT_ASEL, /* 7 not supported */ - PCNET32_PORT_ASEL, /* 8 not supported */ - PCNET32_PORT_MII, /* 9 MII 10baseT */ - PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */ - PCNET32_PORT_MII, /* 11 MII (autosel) */ - PCNET32_PORT_10BT, /* 12 10BaseT */ - PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */ - /* 14 MII 100BaseTx-FD */ - PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD, - PCNET32_PORT_ASEL /* 15 not supported */ -}; - -static const char pcnet32_gstrings_test[][ETH_GSTRING_LEN] = { - "Loopback test (offline)" -}; - -#define PCNET32_TEST_LEN ARRAY_SIZE(pcnet32_gstrings_test) - -#define PCNET32_NUM_REGS 136 - -#define MAX_UNITS 8 /* More are supported, limit only on options */ -static int options[MAX_UNITS]; -static int full_duplex[MAX_UNITS]; -static int homepna[MAX_UNITS]; - -/* - * Theory of Operation - * - * This driver uses the same software structure as the normal lance - * driver. So look for a verbose description in lance.c. The differences - * to the normal lance driver is the use of the 32bit mode of PCnet32 - * and PCnetPCI chips. Because these chips are 32bit chips, there is no - * 16MB limitation and we don't need bounce buffers. - */ - -/* - * Set the number of Tx and Rx buffers, using Log_2(# buffers). - * Reasonable default values are 4 Tx buffers, and 16 Rx buffers. - * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4). - */ -#ifndef PCNET32_LOG_TX_BUFFERS -#define PCNET32_LOG_TX_BUFFERS 4 -#define PCNET32_LOG_RX_BUFFERS 5 -#define PCNET32_LOG_MAX_TX_BUFFERS 9 /* 2^9 == 512 */ -#define PCNET32_LOG_MAX_RX_BUFFERS 9 -#endif - -#define TX_RING_SIZE (1 << (PCNET32_LOG_TX_BUFFERS)) -#define TX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_TX_BUFFERS)) - -#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS)) -#define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS)) - -#define PKT_BUF_SKB 1544 -/* actual buffer length after being aligned */ -#define PKT_BUF_SIZE (PKT_BUF_SKB - NET_IP_ALIGN) -/* chip wants twos complement of the (aligned) buffer length */ -#define NEG_BUF_SIZE (NET_IP_ALIGN - PKT_BUF_SKB) - -/* Offsets from base I/O address. */ -#define PCNET32_WIO_RDP 0x10 -#define PCNET32_WIO_RAP 0x12 -#define PCNET32_WIO_RESET 0x14 -#define PCNET32_WIO_BDP 0x16 - -#define PCNET32_DWIO_RDP 0x10 -#define PCNET32_DWIO_RAP 0x14 -#define PCNET32_DWIO_RESET 0x18 -#define PCNET32_DWIO_BDP 0x1C - -#define PCNET32_TOTAL_SIZE 0x20 - -#define CSR0 0 -#define CSR0_INIT 0x1 -#define CSR0_START 0x2 -#define CSR0_STOP 0x4 -#define CSR0_TXPOLL 0x8 -#define CSR0_INTEN 0x40 -#define CSR0_IDON 0x0100 -#define CSR0_NORMAL (CSR0_START | CSR0_INTEN) -#define PCNET32_INIT_LOW 1 -#define PCNET32_INIT_HIGH 2 -#define CSR3 3 -#define CSR4 4 -#define CSR5 5 -#define CSR5_SUSPEND 0x0001 -#define CSR15 15 -#define PCNET32_MC_FILTER 8 - -#define PCNET32_79C970A 0x2621 - -/* The PCNET32 Rx and Tx ring descriptors. */ -struct pcnet32_rx_head { - __le32 base; - __le16 buf_length; /* two`s complement of length */ - __le16 status; - __le32 msg_length; - __le32 reserved; -}; - -struct pcnet32_tx_head { - __le32 base; - __le16 length; /* two`s complement of length */ - __le16 status; - __le32 misc; - __le32 reserved; -}; - -/* The PCNET32 32-Bit initialization block, described in databook. */ -struct pcnet32_init_block { - __le16 mode; - __le16 tlen_rlen; - u8 phys_addr[6]; - __le16 reserved; - __le32 filter[2]; - /* Receive and transmit ring base, along with extra bits. */ - __le32 rx_ring; - __le32 tx_ring; -}; - -/* PCnet32 access functions */ -struct pcnet32_access { - u16 (*read_csr) (unsigned long, int); - void (*write_csr) (unsigned long, int, u16); - u16 (*read_bcr) (unsigned long, int); - void (*write_bcr) (unsigned long, int, u16); - u16 (*read_rap) (unsigned long); - void (*write_rap) (unsigned long, u16); - void (*reset) (unsigned long); -}; - -/* - * The first field of pcnet32_private is read by the ethernet device - * so the structure should be allocated using pci_alloc_consistent(). - */ -struct pcnet32_private { - struct pcnet32_init_block *init_block; - /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */ - struct pcnet32_rx_head *rx_ring; - struct pcnet32_tx_head *tx_ring; - dma_addr_t init_dma_addr;/* DMA address of beginning of the init block, - returned by pci_alloc_consistent */ - struct pci_dev *pci_dev; - const char *name; - /* The saved address of a sent-in-place packet/buffer, for skfree(). */ - struct sk_buff **tx_skbuff; - struct sk_buff **rx_skbuff; - dma_addr_t *tx_dma_addr; - dma_addr_t *rx_dma_addr; - struct pcnet32_access a; - spinlock_t lock; /* Guard lock */ - unsigned int cur_rx, cur_tx; /* The next free ring entry */ - unsigned int rx_ring_size; /* current rx ring size */ - unsigned int tx_ring_size; /* current tx ring size */ - unsigned int rx_mod_mask; /* rx ring modular mask */ - unsigned int tx_mod_mask; /* tx ring modular mask */ - unsigned short rx_len_bits; - unsigned short tx_len_bits; - dma_addr_t rx_ring_dma_addr; - dma_addr_t tx_ring_dma_addr; - unsigned int dirty_rx, /* ring entries to be freed. */ - dirty_tx; - - struct net_device *dev; - struct napi_struct napi; - char tx_full; - char phycount; /* number of phys found */ - int options; - unsigned int shared_irq:1, /* shared irq possible */ - dxsuflo:1, /* disable transmit stop on uflo */ - mii:1; /* mii port available */ - struct net_device *next; - struct mii_if_info mii_if; - struct timer_list watchdog_timer; - u32 msg_enable; /* debug message level */ - - /* each bit indicates an available PHY */ - u32 phymask; - unsigned short chip_version; /* which variant this is */ - - /* saved registers during ethtool blink */ - u16 save_regs[4]; -}; - -static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *); -static int pcnet32_probe1(unsigned long, int, struct pci_dev *); -static int pcnet32_open(struct net_device *); -static int pcnet32_init_ring(struct net_device *); -static netdev_tx_t pcnet32_start_xmit(struct sk_buff *, - struct net_device *); -static void pcnet32_tx_timeout(struct net_device *dev); -static irqreturn_t pcnet32_interrupt(int, void *); -static int pcnet32_close(struct net_device *); -static struct net_device_stats *pcnet32_get_stats(struct net_device *); -static void pcnet32_load_multicast(struct net_device *dev); -static void pcnet32_set_multicast_list(struct net_device *); -static int pcnet32_ioctl(struct net_device *, struct ifreq *, int); -static void pcnet32_watchdog(struct net_device *); -static int mdio_read(struct net_device *dev, int phy_id, int reg_num); -static void mdio_write(struct net_device *dev, int phy_id, int reg_num, - int val); -static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits); -static void pcnet32_ethtool_test(struct net_device *dev, - struct ethtool_test *eth_test, u64 * data); -static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1); -static int pcnet32_get_regs_len(struct net_device *dev); -static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, - void *ptr); -static void pcnet32_purge_tx_ring(struct net_device *dev); -static int pcnet32_alloc_ring(struct net_device *dev, const char *name); -static void pcnet32_free_ring(struct net_device *dev); -static void pcnet32_check_media(struct net_device *dev, int verbose); - -static u16 pcnet32_wio_read_csr(unsigned long addr, int index) -{ - outw(index, addr + PCNET32_WIO_RAP); - return inw(addr + PCNET32_WIO_RDP); -} - -static void pcnet32_wio_write_csr(unsigned long addr, int index, u16 val) -{ - outw(index, addr + PCNET32_WIO_RAP); - outw(val, addr + PCNET32_WIO_RDP); -} - -static u16 pcnet32_wio_read_bcr(unsigned long addr, int index) -{ - outw(index, addr + PCNET32_WIO_RAP); - return inw(addr + PCNET32_WIO_BDP); -} - -static void pcnet32_wio_write_bcr(unsigned long addr, int index, u16 val) -{ - outw(index, addr + PCNET32_WIO_RAP); - outw(val, addr + PCNET32_WIO_BDP); -} - -static u16 pcnet32_wio_read_rap(unsigned long addr) -{ - return inw(addr + PCNET32_WIO_RAP); -} - -static void pcnet32_wio_write_rap(unsigned long addr, u16 val) -{ - outw(val, addr + PCNET32_WIO_RAP); -} - -static void pcnet32_wio_reset(unsigned long addr) -{ - inw(addr + PCNET32_WIO_RESET); -} - -static int pcnet32_wio_check(unsigned long addr) -{ - outw(88, addr + PCNET32_WIO_RAP); - return inw(addr + PCNET32_WIO_RAP) == 88; -} - -static struct pcnet32_access pcnet32_wio = { - .read_csr = pcnet32_wio_read_csr, - .write_csr = pcnet32_wio_write_csr, - .read_bcr = pcnet32_wio_read_bcr, - .write_bcr = pcnet32_wio_write_bcr, - .read_rap = pcnet32_wio_read_rap, - .write_rap = pcnet32_wio_write_rap, - .reset = pcnet32_wio_reset -}; - -static u16 pcnet32_dwio_read_csr(unsigned long addr, int index) -{ - outl(index, addr + PCNET32_DWIO_RAP); - return inl(addr + PCNET32_DWIO_RDP) & 0xffff; -} - -static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val) -{ - outl(index, addr + PCNET32_DWIO_RAP); - outl(val, addr + PCNET32_DWIO_RDP); -} - -static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index) -{ - outl(index, addr + PCNET32_DWIO_RAP); - return inl(addr + PCNET32_DWIO_BDP) & 0xffff; -} - -static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val) -{ - outl(index, addr + PCNET32_DWIO_RAP); - outl(val, addr + PCNET32_DWIO_BDP); -} - -static u16 pcnet32_dwio_read_rap(unsigned long addr) -{ - return inl(addr + PCNET32_DWIO_RAP) & 0xffff; -} - -static void pcnet32_dwio_write_rap(unsigned long addr, u16 val) -{ - outl(val, addr + PCNET32_DWIO_RAP); -} - -static void pcnet32_dwio_reset(unsigned long addr) -{ - inl(addr + PCNET32_DWIO_RESET); -} - -static int pcnet32_dwio_check(unsigned long addr) -{ - outl(88, addr + PCNET32_DWIO_RAP); - return (inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88; -} - -static struct pcnet32_access pcnet32_dwio = { - .read_csr = pcnet32_dwio_read_csr, - .write_csr = pcnet32_dwio_write_csr, - .read_bcr = pcnet32_dwio_read_bcr, - .write_bcr = pcnet32_dwio_write_bcr, - .read_rap = pcnet32_dwio_read_rap, - .write_rap = pcnet32_dwio_write_rap, - .reset = pcnet32_dwio_reset -}; - -static void pcnet32_netif_stop(struct net_device *dev) -{ - struct pcnet32_private *lp = netdev_priv(dev); - - dev->trans_start = jiffies; /* prevent tx timeout */ - napi_disable(&lp->napi); - netif_tx_disable(dev); -} - -static void pcnet32_netif_start(struct net_device *dev) -{ - struct pcnet32_private *lp = netdev_priv(dev); - ulong ioaddr = dev->base_addr; - u16 val; - - netif_wake_queue(dev); - val = lp->a.read_csr(ioaddr, CSR3); - val &= 0x00ff; - lp->a.write_csr(ioaddr, CSR3, val); - napi_enable(&lp->napi); -} - -/* - * Allocate space for the new sized tx ring. - * Free old resources - * Save new resources. - * Any failure keeps old resources. - * Must be called with lp->lock held. - */ -static void pcnet32_realloc_tx_ring(struct net_device *dev, - struct pcnet32_private *lp, - unsigned int size) -{ - dma_addr_t new_ring_dma_addr; - dma_addr_t *new_dma_addr_list; - struct pcnet32_tx_head *new_tx_ring; - struct sk_buff **new_skb_list; - - pcnet32_purge_tx_ring(dev); - - new_tx_ring = pci_alloc_consistent(lp->pci_dev, - sizeof(struct pcnet32_tx_head) * - (1 << size), - &new_ring_dma_addr); - if (new_tx_ring == NULL) { - netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); - return; - } - memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size)); - - new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t), - GFP_ATOMIC); - if (!new_dma_addr_list) { - netif_err(lp, drv, dev, "Memory allocation failed\n"); - goto free_new_tx_ring; - } - - new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *), - GFP_ATOMIC); - if (!new_skb_list) { - netif_err(lp, drv, dev, "Memory allocation failed\n"); - goto free_new_lists; - } - - kfree(lp->tx_skbuff); - kfree(lp->tx_dma_addr); - pci_free_consistent(lp->pci_dev, - sizeof(struct pcnet32_tx_head) * - lp->tx_ring_size, lp->tx_ring, - lp->tx_ring_dma_addr); - - lp->tx_ring_size = (1 << size); - lp->tx_mod_mask = lp->tx_ring_size - 1; - lp->tx_len_bits = (size << 12); - lp->tx_ring = new_tx_ring; - lp->tx_ring_dma_addr = new_ring_dma_addr; - lp->tx_dma_addr = new_dma_addr_list; - lp->tx_skbuff = new_skb_list; - return; - -free_new_lists: - kfree(new_dma_addr_list); -free_new_tx_ring: - pci_free_consistent(lp->pci_dev, - sizeof(struct pcnet32_tx_head) * - (1 << size), - new_tx_ring, - new_ring_dma_addr); -} - -/* - * Allocate space for the new sized rx ring. - * Re-use old receive buffers. - * alloc extra buffers - * free unneeded buffers - * free unneeded buffers - * Save new resources. - * Any failure keeps old resources. - * Must be called with lp->lock held. - */ -static void pcnet32_realloc_rx_ring(struct net_device *dev, - struct pcnet32_private *lp, - unsigned int size) -{ - dma_addr_t new_ring_dma_addr; - dma_addr_t *new_dma_addr_list; - struct pcnet32_rx_head *new_rx_ring; - struct sk_buff **new_skb_list; - int new, overlap; - - new_rx_ring = pci_alloc_consistent(lp->pci_dev, - sizeof(struct pcnet32_rx_head) * - (1 << size), - &new_ring_dma_addr); - if (new_rx_ring == NULL) { - netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); - return; - } - memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size)); - - new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t), - GFP_ATOMIC); - if (!new_dma_addr_list) { - netif_err(lp, drv, dev, "Memory allocation failed\n"); - goto free_new_rx_ring; - } - - new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *), - GFP_ATOMIC); - if (!new_skb_list) { - netif_err(lp, drv, dev, "Memory allocation failed\n"); - goto free_new_lists; - } - - /* first copy the current receive buffers */ - overlap = min(size, lp->rx_ring_size); - for (new = 0; new < overlap; new++) { - new_rx_ring[new] = lp->rx_ring[new]; - new_dma_addr_list[new] = lp->rx_dma_addr[new]; - new_skb_list[new] = lp->rx_skbuff[new]; - } - /* now allocate any new buffers needed */ - for (; new < size; new++) { - struct sk_buff *rx_skbuff; - new_skb_list[new] = dev_alloc_skb(PKT_BUF_SKB); - rx_skbuff = new_skb_list[new]; - if (!rx_skbuff) { - /* keep the original lists and buffers */ - netif_err(lp, drv, dev, "%s dev_alloc_skb failed\n", - __func__); - goto free_all_new; - } - skb_reserve(rx_skbuff, NET_IP_ALIGN); - - new_dma_addr_list[new] = - pci_map_single(lp->pci_dev, rx_skbuff->data, - PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); - new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]); - new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE); - new_rx_ring[new].status = cpu_to_le16(0x8000); - } - /* and free any unneeded buffers */ - for (; new < lp->rx_ring_size; new++) { - if (lp->rx_skbuff[new]) { - pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new], - PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); - dev_kfree_skb(lp->rx_skbuff[new]); - } - } - - kfree(lp->rx_skbuff); - kfree(lp->rx_dma_addr); - pci_free_consistent(lp->pci_dev, - sizeof(struct pcnet32_rx_head) * - lp->rx_ring_size, lp->rx_ring, - lp->rx_ring_dma_addr); - - lp->rx_ring_size = (1 << size); - lp->rx_mod_mask = lp->rx_ring_size - 1; - lp->rx_len_bits = (size << 4); - lp->rx_ring = new_rx_ring; - lp->rx_ring_dma_addr = new_ring_dma_addr; - lp->rx_dma_addr = new_dma_addr_list; - lp->rx_skbuff = new_skb_list; - return; - -free_all_new: - while (--new >= lp->rx_ring_size) { - if (new_skb_list[new]) { - pci_unmap_single(lp->pci_dev, new_dma_addr_list[new], - PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); - dev_kfree_skb(new_skb_list[new]); - } - } - kfree(new_skb_list); -free_new_lists: - kfree(new_dma_addr_list); -free_new_rx_ring: - pci_free_consistent(lp->pci_dev, - sizeof(struct pcnet32_rx_head) * - (1 << size), - new_rx_ring, - new_ring_dma_addr); -} - -static void pcnet32_purge_rx_ring(struct net_device *dev) -{ - struct pcnet32_private *lp = netdev_priv(dev); - int i; - - /* free all allocated skbuffs */ - for (i = 0; i < lp->rx_ring_size; i++) { - lp->rx_ring[i].status = 0; /* CPU owns buffer */ - wmb(); /* Make sure adapter sees owner change */ - if (lp->rx_skbuff[i]) { - pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], - PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); - dev_kfree_skb_any(lp->rx_skbuff[i]); - } - lp->rx_skbuff[i] = NULL; - lp->rx_dma_addr[i] = 0; - } -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void pcnet32_poll_controller(struct net_device *dev) -{ - disable_irq(dev->irq); - pcnet32_interrupt(0, dev); - enable_irq(dev->irq); -} -#endif - -static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct pcnet32_private *lp = netdev_priv(dev); - unsigned long flags; - int r = -EOPNOTSUPP; - - if (lp->mii) { - spin_lock_irqsave(&lp->lock, flags); - mii_ethtool_gset(&lp->mii_if, cmd); - spin_unlock_irqrestore(&lp->lock, flags); - r = 0; - } - return r; -} - -static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct pcnet32_private *lp = netdev_priv(dev); - unsigned long flags; - int r = -EOPNOTSUPP; - - if (lp->mii) { - spin_lock_irqsave(&lp->lock, flags); - r = mii_ethtool_sset(&lp->mii_if, cmd); - spin_unlock_irqrestore(&lp->lock, flags); - } - return r; -} - -static void pcnet32_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - struct pcnet32_private *lp = netdev_priv(dev); - - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - if (lp->pci_dev) - strcpy(info->bus_info, pci_name(lp->pci_dev)); - else - sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr); -} - -static u32 pcnet32_get_link(struct net_device *dev) -{ - struct pcnet32_private *lp = netdev_priv(dev); - unsigned long flags; - int r; - - spin_lock_irqsave(&lp->lock, flags); - if (lp->mii) { - r = mii_link_ok(&lp->mii_if); - } else if (lp->chip_version >= PCNET32_79C970A) { - ulong ioaddr = dev->base_addr; /* card base I/O address */ - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0); - } else { /* can not detect link on really old chips */ - r = 1; - } - spin_unlock_irqrestore(&lp->lock, flags); - - return r; -} - -static u32 pcnet32_get_msglevel(struct net_device *dev) -{ - struct pcnet32_private *lp = netdev_priv(dev); - return lp->msg_enable; -} - -static void pcnet32_set_msglevel(struct net_device *dev, u32 value) -{ - struct pcnet32_private *lp = netdev_priv(dev); - lp->msg_enable = value; -} - -static int pcnet32_nway_reset(struct net_device *dev) -{ - struct pcnet32_private *lp = netdev_priv(dev); - unsigned long flags; - int r = -EOPNOTSUPP; - - if (lp->mii) { - spin_lock_irqsave(&lp->lock, flags); - r = mii_nway_restart(&lp->mii_if); - spin_unlock_irqrestore(&lp->lock, flags); - } - return r; -} - -static void pcnet32_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ering) -{ - struct pcnet32_private *lp = netdev_priv(dev); - - ering->tx_max_pending = TX_MAX_RING_SIZE; - ering->tx_pending = lp->tx_ring_size; - ering->rx_max_pending = RX_MAX_RING_SIZE; - ering->rx_pending = lp->rx_ring_size; -} - -static int pcnet32_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ering) -{ - struct pcnet32_private *lp = netdev_priv(dev); - unsigned long flags; - unsigned int size; - ulong ioaddr = dev->base_addr; - int i; - - if (ering->rx_mini_pending || ering->rx_jumbo_pending) - return -EINVAL; - - if (netif_running(dev)) - pcnet32_netif_stop(dev); - - spin_lock_irqsave(&lp->lock, flags); - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ - - size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE); - - /* set the minimum ring size to 4, to allow the loopback test to work - * unchanged. - */ - for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) { - if (size <= (1 << i)) - break; - } - if ((1 << i) != lp->tx_ring_size) - pcnet32_realloc_tx_ring(dev, lp, i); - - size = min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE); - for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) { - if (size <= (1 << i)) - break; - } - if ((1 << i) != lp->rx_ring_size) - pcnet32_realloc_rx_ring(dev, lp, i); - - lp->napi.weight = lp->rx_ring_size / 2; - - if (netif_running(dev)) { - pcnet32_netif_start(dev); - pcnet32_restart(dev, CSR0_NORMAL); - } - - spin_unlock_irqrestore(&lp->lock, flags); - - netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n", - lp->rx_ring_size, lp->tx_ring_size); - - return 0; -} - -static void pcnet32_get_strings(struct net_device *dev, u32 stringset, - u8 *data) -{ - memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test)); -} - -static int pcnet32_get_sset_count(struct net_device *dev, int sset) -{ - switch (sset) { - case ETH_SS_TEST: - return PCNET32_TEST_LEN; - default: - return -EOPNOTSUPP; - } -} - -static void pcnet32_ethtool_test(struct net_device *dev, - struct ethtool_test *test, u64 * data) -{ - struct pcnet32_private *lp = netdev_priv(dev); - int rc; - - if (test->flags == ETH_TEST_FL_OFFLINE) { - rc = pcnet32_loopback_test(dev, data); - if (rc) { - netif_printk(lp, hw, KERN_DEBUG, dev, - "Loopback test failed\n"); - test->flags |= ETH_TEST_FL_FAILED; - } else - netif_printk(lp, hw, KERN_DEBUG, dev, - "Loopback test passed\n"); - } else - netif_printk(lp, hw, KERN_DEBUG, dev, - "No tests to run (specify 'Offline' on ethtool)\n"); -} /* end pcnet32_ethtool_test */ - -static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) -{ - struct pcnet32_private *lp = netdev_priv(dev); - struct pcnet32_access *a = &lp->a; /* access to registers */ - ulong ioaddr = dev->base_addr; /* card base I/O address */ - struct sk_buff *skb; /* sk buff */ - int x, i; /* counters */ - int numbuffs = 4; /* number of TX/RX buffers and descs */ - u16 status = 0x8300; /* TX ring status */ - __le16 teststatus; /* test of ring status */ - int rc; /* return code */ - int size; /* size of packets */ - unsigned char *packet; /* source packet data */ - static const int data_len = 60; /* length of source packets */ - unsigned long flags; - unsigned long ticks; - - rc = 1; /* default to fail */ - - if (netif_running(dev)) - pcnet32_netif_stop(dev); - - spin_lock_irqsave(&lp->lock, flags); - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ - - numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size)); - - /* Reset the PCNET32 */ - lp->a.reset(ioaddr); - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ - - /* switch pcnet32 to 32bit mode */ - lp->a.write_bcr(ioaddr, 20, 2); - - /* purge & init rings but don't actually restart */ - pcnet32_restart(dev, 0x0000); - - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */ - - /* Initialize Transmit buffers. */ - size = data_len + 15; - for (x = 0; x < numbuffs; x++) { - skb = dev_alloc_skb(size); - if (!skb) { - netif_printk(lp, hw, KERN_DEBUG, dev, - "Cannot allocate skb at line: %d!\n", - __LINE__); - goto clean_up; - } - packet = skb->data; - skb_put(skb, size); /* create space for data */ - lp->tx_skbuff[x] = skb; - lp->tx_ring[x].length = cpu_to_le16(-skb->len); - lp->tx_ring[x].misc = 0; - - /* put DA and SA into the skb */ - for (i = 0; i < 6; i++) - *packet++ = dev->dev_addr[i]; - for (i = 0; i < 6; i++) - *packet++ = dev->dev_addr[i]; - /* type */ - *packet++ = 0x08; - *packet++ = 0x06; - /* packet number */ - *packet++ = x; - /* fill packet with data */ - for (i = 0; i < data_len; i++) - *packet++ = i; - - lp->tx_dma_addr[x] = - pci_map_single(lp->pci_dev, skb->data, skb->len, - PCI_DMA_TODEVICE); - lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]); - wmb(); /* Make sure owner changes after all others are visible */ - lp->tx_ring[x].status = cpu_to_le16(status); - } - - x = a->read_bcr(ioaddr, 32); /* set internal loopback in BCR32 */ - a->write_bcr(ioaddr, 32, x | 0x0002); - - /* set int loopback in CSR15 */ - x = a->read_csr(ioaddr, CSR15) & 0xfffc; - lp->a.write_csr(ioaddr, CSR15, x | 0x0044); - - teststatus = cpu_to_le16(0x8000); - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */ - - /* Check status of descriptors */ - for (x = 0; x < numbuffs; x++) { - ticks = 0; - rmb(); - while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) { - spin_unlock_irqrestore(&lp->lock, flags); - msleep(1); - spin_lock_irqsave(&lp->lock, flags); - rmb(); - ticks++; - } - if (ticks == 200) { - netif_err(lp, hw, dev, "Desc %d failed to reset!\n", x); - break; - } - } - - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */ - wmb(); - if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) { - netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n"); - - for (x = 0; x < numbuffs; x++) { - netdev_printk(KERN_DEBUG, dev, "Packet %d: ", x); - skb = lp->rx_skbuff[x]; - for (i = 0; i < size; i++) - pr_cont(" %02x", *(skb->data + i)); - pr_cont("\n"); - } - } - - x = 0; - rc = 0; - while (x < numbuffs && !rc) { - skb = lp->rx_skbuff[x]; - packet = lp->tx_skbuff[x]->data; - for (i = 0; i < size; i++) { - if (*(skb->data + i) != packet[i]) { - netif_printk(lp, hw, KERN_DEBUG, dev, - "Error in compare! %2x - %02x %02x\n", - i, *(skb->data + i), packet[i]); - rc = 1; - break; - } - } - x++; - } - -clean_up: - *data1 = rc; - pcnet32_purge_tx_ring(dev); - - x = a->read_csr(ioaddr, CSR15); - a->write_csr(ioaddr, CSR15, (x & ~0x0044)); /* reset bits 6 and 2 */ - - x = a->read_bcr(ioaddr, 32); /* reset internal loopback */ - a->write_bcr(ioaddr, 32, (x & ~0x0002)); - - if (netif_running(dev)) { - pcnet32_netif_start(dev); - pcnet32_restart(dev, CSR0_NORMAL); - } else { - pcnet32_purge_rx_ring(dev); - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ - } - spin_unlock_irqrestore(&lp->lock, flags); - - return rc; -} /* end pcnet32_loopback_test */ - -static int pcnet32_set_phys_id(struct net_device *dev, - enum ethtool_phys_id_state state) -{ - struct pcnet32_private *lp = netdev_priv(dev); - struct pcnet32_access *a = &lp->a; - ulong ioaddr = dev->base_addr; - unsigned long flags; - int i; - - switch (state) { - case ETHTOOL_ID_ACTIVE: - /* Save the current value of the bcrs */ - spin_lock_irqsave(&lp->lock, flags); - for (i = 4; i < 8; i++) - lp->save_regs[i - 4] = a->read_bcr(ioaddr, i); - spin_unlock_irqrestore(&lp->lock, flags); - return 2; /* cycle on/off twice per second */ - - case ETHTOOL_ID_ON: - case ETHTOOL_ID_OFF: - /* Blink the led */ - spin_lock_irqsave(&lp->lock, flags); - for (i = 4; i < 8; i++) - a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000); - spin_unlock_irqrestore(&lp->lock, flags); - break; - - case ETHTOOL_ID_INACTIVE: - /* Restore the original value of the bcrs */ - spin_lock_irqsave(&lp->lock, flags); - for (i = 4; i < 8; i++) - a->write_bcr(ioaddr, i, lp->save_regs[i - 4]); - spin_unlock_irqrestore(&lp->lock, flags); - } - return 0; -} - -/* - * lp->lock must be held. - */ -static int pcnet32_suspend(struct net_device *dev, unsigned long *flags, - int can_sleep) -{ - int csr5; - struct pcnet32_private *lp = netdev_priv(dev); - struct pcnet32_access *a = &lp->a; - ulong ioaddr = dev->base_addr; - int ticks; - - /* really old chips have to be stopped. */ - if (lp->chip_version < PCNET32_79C970A) - return 0; - - /* set SUSPEND (SPND) - CSR5 bit 0 */ - csr5 = a->read_csr(ioaddr, CSR5); - a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND); - - /* poll waiting for bit to be set */ - ticks = 0; - while (!(a->read_csr(ioaddr, CSR5) & CSR5_SUSPEND)) { - spin_unlock_irqrestore(&lp->lock, *flags); - if (can_sleep) - msleep(1); - else - mdelay(1); - spin_lock_irqsave(&lp->lock, *flags); - ticks++; - if (ticks > 200) { - netif_printk(lp, hw, KERN_DEBUG, dev, - "Error getting into suspend!\n"); - return 0; - } - } - return 1; -} - -/* - * process one receive descriptor entry - */ - -static void pcnet32_rx_entry(struct net_device *dev, - struct pcnet32_private *lp, - struct pcnet32_rx_head *rxp, - int entry) -{ - int status = (short)le16_to_cpu(rxp->status) >> 8; - int rx_in_place = 0; - struct sk_buff *skb; - short pkt_len; - - if (status != 0x03) { /* There was an error. */ - /* - * There is a tricky error noted by John Murphy, - * to Russ Nelson: Even with full-sized - * buffers it's possible for a jabber packet to use two - * buffers, with only the last correctly noting the error. - */ - if (status & 0x01) /* Only count a general error at the */ - dev->stats.rx_errors++; /* end of a packet. */ - if (status & 0x20) - dev->stats.rx_frame_errors++; - if (status & 0x10) - dev->stats.rx_over_errors++; - if (status & 0x08) - dev->stats.rx_crc_errors++; - if (status & 0x04) - dev->stats.rx_fifo_errors++; - return; - } - - pkt_len = (le32_to_cpu(rxp->msg_length) & 0xfff) - 4; - - /* Discard oversize frames. */ - if (unlikely(pkt_len > PKT_BUF_SIZE)) { - netif_err(lp, drv, dev, "Impossible packet size %d!\n", - pkt_len); - dev->stats.rx_errors++; - return; - } - if (pkt_len < 60) { - netif_err(lp, rx_err, dev, "Runt packet!\n"); - dev->stats.rx_errors++; - return; - } - - if (pkt_len > rx_copybreak) { - struct sk_buff *newskb; - - newskb = dev_alloc_skb(PKT_BUF_SKB); - if (newskb) { - skb_reserve(newskb, NET_IP_ALIGN); - skb = lp->rx_skbuff[entry]; - pci_unmap_single(lp->pci_dev, - lp->rx_dma_addr[entry], - PKT_BUF_SIZE, - PCI_DMA_FROMDEVICE); - skb_put(skb, pkt_len); - lp->rx_skbuff[entry] = newskb; - lp->rx_dma_addr[entry] = - pci_map_single(lp->pci_dev, - newskb->data, - PKT_BUF_SIZE, - PCI_DMA_FROMDEVICE); - rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]); - rx_in_place = 1; - } else - skb = NULL; - } else - skb = dev_alloc_skb(pkt_len + NET_IP_ALIGN); - - if (skb == NULL) { - netif_err(lp, drv, dev, "Memory squeeze, dropping packet\n"); - dev->stats.rx_dropped++; - return; - } - if (!rx_in_place) { - skb_reserve(skb, NET_IP_ALIGN); - skb_put(skb, pkt_len); /* Make room */ - pci_dma_sync_single_for_cpu(lp->pci_dev, - lp->rx_dma_addr[entry], - pkt_len, - PCI_DMA_FROMDEVICE); - skb_copy_to_linear_data(skb, - (unsigned char *)(lp->rx_skbuff[entry]->data), - pkt_len); - pci_dma_sync_single_for_device(lp->pci_dev, - lp->rx_dma_addr[entry], - pkt_len, - PCI_DMA_FROMDEVICE); - } - dev->stats.rx_bytes += skb->len; - skb->protocol = eth_type_trans(skb, dev); - netif_receive_skb(skb); - dev->stats.rx_packets++; -} - -static int pcnet32_rx(struct net_device *dev, int budget) -{ - struct pcnet32_private *lp = netdev_priv(dev); - int entry = lp->cur_rx & lp->rx_mod_mask; - struct pcnet32_rx_head *rxp = &lp->rx_ring[entry]; - int npackets = 0; - - /* If we own the next entry, it's a new packet. Send it up. */ - while (npackets < budget && (short)le16_to_cpu(rxp->status) >= 0) { - pcnet32_rx_entry(dev, lp, rxp, entry); - npackets += 1; - /* - * The docs say that the buffer length isn't touched, but Andrew - * Boyd of QNX reports that some revs of the 79C965 clear it. - */ - rxp->buf_length = cpu_to_le16(NEG_BUF_SIZE); - wmb(); /* Make sure owner changes after others are visible */ - rxp->status = cpu_to_le16(0x8000); - entry = (++lp->cur_rx) & lp->rx_mod_mask; - rxp = &lp->rx_ring[entry]; - } - - return npackets; -} - -static int pcnet32_tx(struct net_device *dev) -{ - struct pcnet32_private *lp = netdev_priv(dev); - unsigned int dirty_tx = lp->dirty_tx; - int delta; - int must_restart = 0; - - while (dirty_tx != lp->cur_tx) { - int entry = dirty_tx & lp->tx_mod_mask; - int status = (short)le16_to_cpu(lp->tx_ring[entry].status); - - if (status < 0) - break; /* It still hasn't been Txed */ - - lp->tx_ring[entry].base = 0; - - if (status & 0x4000) { - /* There was a major error, log it. */ - int err_status = le32_to_cpu(lp->tx_ring[entry].misc); - dev->stats.tx_errors++; - netif_err(lp, tx_err, dev, - "Tx error status=%04x err_status=%08x\n", - status, err_status); - if (err_status & 0x04000000) - dev->stats.tx_aborted_errors++; - if (err_status & 0x08000000) - dev->stats.tx_carrier_errors++; - if (err_status & 0x10000000) - dev->stats.tx_window_errors++; -#ifndef DO_DXSUFLO - if (err_status & 0x40000000) { - dev->stats.tx_fifo_errors++; - /* Ackk! On FIFO errors the Tx unit is turned off! */ - /* Remove this verbosity later! */ - netif_err(lp, tx_err, dev, "Tx FIFO error!\n"); - must_restart = 1; - } -#else - if (err_status & 0x40000000) { - dev->stats.tx_fifo_errors++; - if (!lp->dxsuflo) { /* If controller doesn't recover ... */ - /* Ackk! On FIFO errors the Tx unit is turned off! */ - /* Remove this verbosity later! */ - netif_err(lp, tx_err, dev, "Tx FIFO error!\n"); - must_restart = 1; - } - } -#endif - } else { - if (status & 0x1800) - dev->stats.collisions++; - dev->stats.tx_packets++; - } - - /* We must free the original skb */ - if (lp->tx_skbuff[entry]) { - pci_unmap_single(lp->pci_dev, - lp->tx_dma_addr[entry], - lp->tx_skbuff[entry]-> - len, PCI_DMA_TODEVICE); - dev_kfree_skb_any(lp->tx_skbuff[entry]); - lp->tx_skbuff[entry] = NULL; - lp->tx_dma_addr[entry] = 0; - } - dirty_tx++; - } - - delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size); - if (delta > lp->tx_ring_size) { - netif_err(lp, drv, dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n", - dirty_tx, lp->cur_tx, lp->tx_full); - dirty_tx += lp->tx_ring_size; - delta -= lp->tx_ring_size; - } - - if (lp->tx_full && - netif_queue_stopped(dev) && - delta < lp->tx_ring_size - 2) { - /* The ring is no longer full, clear tbusy. */ - lp->tx_full = 0; - netif_wake_queue(dev); - } - lp->dirty_tx = dirty_tx; - - return must_restart; -} - -static int pcnet32_poll(struct napi_struct *napi, int budget) -{ - struct pcnet32_private *lp = container_of(napi, struct pcnet32_private, napi); - struct net_device *dev = lp->dev; - unsigned long ioaddr = dev->base_addr; - unsigned long flags; - int work_done; - u16 val; - - work_done = pcnet32_rx(dev, budget); - - spin_lock_irqsave(&lp->lock, flags); - if (pcnet32_tx(dev)) { - /* reset the chip to clear the error condition, then restart */ - lp->a.reset(ioaddr); - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ - pcnet32_restart(dev, CSR0_START); - netif_wake_queue(dev); - } - spin_unlock_irqrestore(&lp->lock, flags); - - if (work_done < budget) { - spin_lock_irqsave(&lp->lock, flags); - - __napi_complete(napi); - - /* clear interrupt masks */ - val = lp->a.read_csr(ioaddr, CSR3); - val &= 0x00ff; - lp->a.write_csr(ioaddr, CSR3, val); - - /* Set interrupt enable. */ - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN); - - spin_unlock_irqrestore(&lp->lock, flags); - } - return work_done; -} - -#define PCNET32_REGS_PER_PHY 32 -#define PCNET32_MAX_PHYS 32 -static int pcnet32_get_regs_len(struct net_device *dev) -{ - struct pcnet32_private *lp = netdev_priv(dev); - int j = lp->phycount * PCNET32_REGS_PER_PHY; - - return (PCNET32_NUM_REGS + j) * sizeof(u16); -} - -static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, - void *ptr) -{ - int i, csr0; - u16 *buff = ptr; - struct pcnet32_private *lp = netdev_priv(dev); - struct pcnet32_access *a = &lp->a; - ulong ioaddr = dev->base_addr; - unsigned long flags; - - spin_lock_irqsave(&lp->lock, flags); - - csr0 = a->read_csr(ioaddr, CSR0); - if (!(csr0 & CSR0_STOP)) /* If not stopped */ - pcnet32_suspend(dev, &flags, 1); - - /* read address PROM */ - for (i = 0; i < 16; i += 2) - *buff++ = inw(ioaddr + i); - - /* read control and status registers */ - for (i = 0; i < 90; i++) - *buff++ = a->read_csr(ioaddr, i); - - *buff++ = a->read_csr(ioaddr, 112); - *buff++ = a->read_csr(ioaddr, 114); - - /* read bus configuration registers */ - for (i = 0; i < 30; i++) - *buff++ = a->read_bcr(ioaddr, i); - - *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */ - - for (i = 31; i < 36; i++) - *buff++ = a->read_bcr(ioaddr, i); - - /* read mii phy registers */ - if (lp->mii) { - int j; - for (j = 0; j < PCNET32_MAX_PHYS; j++) { - if (lp->phymask & (1 << j)) { - for (i = 0; i < PCNET32_REGS_PER_PHY; i++) { - lp->a.write_bcr(ioaddr, 33, - (j << 5) | i); - *buff++ = lp->a.read_bcr(ioaddr, 34); - } - } - } - } - - if (!(csr0 & CSR0_STOP)) { /* If not stopped */ - int csr5; - - /* clear SUSPEND (SPND) - CSR5 bit 0 */ - csr5 = a->read_csr(ioaddr, CSR5); - a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND)); - } - - spin_unlock_irqrestore(&lp->lock, flags); -} - -static const struct ethtool_ops pcnet32_ethtool_ops = { - .get_settings = pcnet32_get_settings, - .set_settings = pcnet32_set_settings, - .get_drvinfo = pcnet32_get_drvinfo, - .get_msglevel = pcnet32_get_msglevel, - .set_msglevel = pcnet32_set_msglevel, - .nway_reset = pcnet32_nway_reset, - .get_link = pcnet32_get_link, - .get_ringparam = pcnet32_get_ringparam, - .set_ringparam = pcnet32_set_ringparam, - .get_strings = pcnet32_get_strings, - .self_test = pcnet32_ethtool_test, - .set_phys_id = pcnet32_set_phys_id, - .get_regs_len = pcnet32_get_regs_len, - .get_regs = pcnet32_get_regs, - .get_sset_count = pcnet32_get_sset_count, -}; - -/* only probes for non-PCI devices, the rest are handled by - * pci_register_driver via pcnet32_probe_pci */ - -static void __devinit pcnet32_probe_vlbus(unsigned int *pcnet32_portlist) -{ - unsigned int *port, ioaddr; - - /* search for PCnet32 VLB cards at known addresses */ - for (port = pcnet32_portlist; (ioaddr = *port); port++) { - if (request_region - (ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) { - /* check if there is really a pcnet chip on that ioaddr */ - if ((inb(ioaddr + 14) == 0x57) && - (inb(ioaddr + 15) == 0x57)) { - pcnet32_probe1(ioaddr, 0, NULL); - } else { - release_region(ioaddr, PCNET32_TOTAL_SIZE); - } - } - } -} - -static int __devinit -pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - unsigned long ioaddr; - int err; - - err = pci_enable_device(pdev); - if (err < 0) { - if (pcnet32_debug & NETIF_MSG_PROBE) - pr_err("failed to enable device -- err=%d\n", err); - return err; - } - pci_set_master(pdev); - - ioaddr = pci_resource_start(pdev, 0); - if (!ioaddr) { - if (pcnet32_debug & NETIF_MSG_PROBE) - pr_err("card has no PCI IO resources, aborting\n"); - return -ENODEV; - } - - if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) { - if (pcnet32_debug & NETIF_MSG_PROBE) - pr_err("architecture does not support 32bit PCI busmaster DMA\n"); - return -ENODEV; - } - if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { - if (pcnet32_debug & NETIF_MSG_PROBE) - pr_err("io address range already allocated\n"); - return -EBUSY; - } - - err = pcnet32_probe1(ioaddr, 1, pdev); - if (err < 0) - pci_disable_device(pdev); - - return err; -} - -static const struct net_device_ops pcnet32_netdev_ops = { - .ndo_open = pcnet32_open, - .ndo_stop = pcnet32_close, - .ndo_start_xmit = pcnet32_start_xmit, - .ndo_tx_timeout = pcnet32_tx_timeout, - .ndo_get_stats = pcnet32_get_stats, - .ndo_set_multicast_list = pcnet32_set_multicast_list, - .ndo_do_ioctl = pcnet32_ioctl, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = pcnet32_poll_controller, -#endif -}; - -/* pcnet32_probe1 - * Called from both pcnet32_probe_vlbus and pcnet_probe_pci. - * pdev will be NULL when called from pcnet32_probe_vlbus. - */ -static int __devinit -pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) -{ - struct pcnet32_private *lp; - int i, media; - int fdx, mii, fset, dxsuflo; - int chip_version; - char *chipname; - struct net_device *dev; - struct pcnet32_access *a = NULL; - u8 promaddr[6]; - int ret = -ENODEV; - - /* reset the chip */ - pcnet32_wio_reset(ioaddr); - - /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */ - if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) { - a = &pcnet32_wio; - } else { - pcnet32_dwio_reset(ioaddr); - if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 && - pcnet32_dwio_check(ioaddr)) { - a = &pcnet32_dwio; - } else { - if (pcnet32_debug & NETIF_MSG_PROBE) - pr_err("No access methods\n"); - goto err_release_region; - } - } - - chip_version = - a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16); - if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW)) - pr_info(" PCnet chip version is %#x\n", chip_version); - if ((chip_version & 0xfff) != 0x003) { - if (pcnet32_debug & NETIF_MSG_PROBE) - pr_info("Unsupported chip version\n"); - goto err_release_region; - } - - /* initialize variables */ - fdx = mii = fset = dxsuflo = 0; - chip_version = (chip_version >> 12) & 0xffff; - - switch (chip_version) { - case 0x2420: - chipname = "PCnet/PCI 79C970"; /* PCI */ - break; - case 0x2430: - if (shared) - chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */ - else - chipname = "PCnet/32 79C965"; /* 486/VL bus */ - break; - case 0x2621: - chipname = "PCnet/PCI II 79C970A"; /* PCI */ - fdx = 1; - break; - case 0x2623: - chipname = "PCnet/FAST 79C971"; /* PCI */ - fdx = 1; - mii = 1; - fset = 1; - break; - case 0x2624: - chipname = "PCnet/FAST+ 79C972"; /* PCI */ - fdx = 1; - mii = 1; - fset = 1; - break; - case 0x2625: - chipname = "PCnet/FAST III 79C973"; /* PCI */ - fdx = 1; - mii = 1; - break; - case 0x2626: - chipname = "PCnet/Home 79C978"; /* PCI */ - fdx = 1; - /* - * This is based on specs published at www.amd.com. This section - * assumes that a card with a 79C978 wants to go into standard - * ethernet mode. The 79C978 can also go into 1Mb HomePNA mode, - * and the module option homepna=1 can select this instead. - */ - media = a->read_bcr(ioaddr, 49); - media &= ~3; /* default to 10Mb ethernet */ - if (cards_found < MAX_UNITS && homepna[cards_found]) - media |= 1; /* switch to home wiring mode */ - if (pcnet32_debug & NETIF_MSG_PROBE) - printk(KERN_DEBUG PFX "media set to %sMbit mode\n", - (media & 1) ? "1" : "10"); - a->write_bcr(ioaddr, 49, media); - break; - case 0x2627: - chipname = "PCnet/FAST III 79C975"; /* PCI */ - fdx = 1; - mii = 1; - break; - case 0x2628: - chipname = "PCnet/PRO 79C976"; - fdx = 1; - mii = 1; - break; - default: - if (pcnet32_debug & NETIF_MSG_PROBE) - pr_info("PCnet version %#x, no PCnet32 chip\n", - chip_version); - goto err_release_region; - } - - /* - * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit - * starting until the packet is loaded. Strike one for reliability, lose - * one for latency - although on PCI this isn't a big loss. Older chips - * have FIFO's smaller than a packet, so you can't do this. - * Turn on BCR18:BurstRdEn and BCR18:BurstWrEn. - */ - - if (fset) { - a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860)); - a->write_csr(ioaddr, 80, - (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00); - dxsuflo = 1; - } - - dev = alloc_etherdev(sizeof(*lp)); - if (!dev) { - if (pcnet32_debug & NETIF_MSG_PROBE) - pr_err("Memory allocation failed\n"); - ret = -ENOMEM; - goto err_release_region; - } - - if (pdev) - SET_NETDEV_DEV(dev, &pdev->dev); - - if (pcnet32_debug & NETIF_MSG_PROBE) - pr_info("%s at %#3lx,", chipname, ioaddr); - - /* In most chips, after a chip reset, the ethernet address is read from the - * station address PROM at the base address and programmed into the - * "Physical Address Registers" CSR12-14. - * As a precautionary measure, we read the PROM values and complain if - * they disagree with the CSRs. If they miscompare, and the PROM addr - * is valid, then the PROM addr is used. - */ - for (i = 0; i < 3; i++) { - unsigned int val; - val = a->read_csr(ioaddr, i + 12) & 0x0ffff; - /* There may be endianness issues here. */ - dev->dev_addr[2 * i] = val & 0x0ff; - dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff; - } - - /* read PROM address and compare with CSR address */ - for (i = 0; i < 6; i++) - promaddr[i] = inb(ioaddr + i); - - if (memcmp(promaddr, dev->dev_addr, 6) || - !is_valid_ether_addr(dev->dev_addr)) { - if (is_valid_ether_addr(promaddr)) { - if (pcnet32_debug & NETIF_MSG_PROBE) { - pr_cont(" warning: CSR address invalid,\n"); - pr_info(" using instead PROM address of"); - } - memcpy(dev->dev_addr, promaddr, 6); - } - } - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); - - /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */ - if (!is_valid_ether_addr(dev->perm_addr)) - memset(dev->dev_addr, 0, ETH_ALEN); - - if (pcnet32_debug & NETIF_MSG_PROBE) { - pr_cont(" %pM", dev->dev_addr); - - /* Version 0x2623 and 0x2624 */ - if (((chip_version + 1) & 0xfffe) == 0x2624) { - i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */ - pr_info(" tx_start_pt(0x%04x):", i); - switch (i >> 10) { - case 0: - pr_cont(" 20 bytes,"); - break; - case 1: - pr_cont(" 64 bytes,"); - break; - case 2: - pr_cont(" 128 bytes,"); - break; - case 3: - pr_cont("~220 bytes,"); - break; - } - i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */ - pr_cont(" BCR18(%x):", i & 0xffff); - if (i & (1 << 5)) - pr_cont("BurstWrEn "); - if (i & (1 << 6)) - pr_cont("BurstRdEn "); - if (i & (1 << 7)) - pr_cont("DWordIO "); - if (i & (1 << 11)) - pr_cont("NoUFlow "); - i = a->read_bcr(ioaddr, 25); - pr_info(" SRAMSIZE=0x%04x,", i << 8); - i = a->read_bcr(ioaddr, 26); - pr_cont(" SRAM_BND=0x%04x,", i << 8); - i = a->read_bcr(ioaddr, 27); - if (i & (1 << 14)) - pr_cont("LowLatRx"); - } - } - - dev->base_addr = ioaddr; - lp = netdev_priv(dev); - /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */ - lp->init_block = pci_alloc_consistent(pdev, sizeof(*lp->init_block), - &lp->init_dma_addr); - if (!lp->init_block) { - if (pcnet32_debug & NETIF_MSG_PROBE) - pr_err("Consistent memory allocation failed\n"); - ret = -ENOMEM; - goto err_free_netdev; - } - lp->pci_dev = pdev; - - lp->dev = dev; - - spin_lock_init(&lp->lock); - - lp->name = chipname; - lp->shared_irq = shared; - lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */ - lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */ - lp->tx_mod_mask = lp->tx_ring_size - 1; - lp->rx_mod_mask = lp->rx_ring_size - 1; - lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12); - lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4); - lp->mii_if.full_duplex = fdx; - lp->mii_if.phy_id_mask = 0x1f; - lp->mii_if.reg_num_mask = 0x1f; - lp->dxsuflo = dxsuflo; - lp->mii = mii; - lp->chip_version = chip_version; - lp->msg_enable = pcnet32_debug; - if ((cards_found >= MAX_UNITS) || - (options[cards_found] >= sizeof(options_mapping))) - lp->options = PCNET32_PORT_ASEL; - else - lp->options = options_mapping[options[cards_found]]; - lp->mii_if.dev = dev; - lp->mii_if.mdio_read = mdio_read; - lp->mii_if.mdio_write = mdio_write; - - /* napi.weight is used in both the napi and non-napi cases */ - lp->napi.weight = lp->rx_ring_size / 2; - - netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2); - - if (fdx && !(lp->options & PCNET32_PORT_ASEL) && - ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) - lp->options |= PCNET32_PORT_FD; - - lp->a = *a; - - /* prior to register_netdev, dev->name is not yet correct */ - if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) { - ret = -ENOMEM; - goto err_free_ring; - } - /* detect special T1/E1 WAN card by checking for MAC address */ - if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 && - dev->dev_addr[2] == 0x75) - lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI; - - lp->init_block->mode = cpu_to_le16(0x0003); /* Disable Rx and Tx. */ - lp->init_block->tlen_rlen = - cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits); - for (i = 0; i < 6; i++) - lp->init_block->phys_addr[i] = dev->dev_addr[i]; - lp->init_block->filter[0] = 0x00000000; - lp->init_block->filter[1] = 0x00000000; - lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr); - lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr); - - /* switch pcnet32 to 32bit mode */ - a->write_bcr(ioaddr, 20, 2); - - a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); - a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16)); - - if (pdev) { /* use the IRQ provided by PCI */ - dev->irq = pdev->irq; - if (pcnet32_debug & NETIF_MSG_PROBE) - pr_cont(" assigned IRQ %d\n", dev->irq); - } else { - unsigned long irq_mask = probe_irq_on(); - - /* - * To auto-IRQ we enable the initialization-done and DMA error - * interrupts. For ISA boards we get a DMA error, but VLB and PCI - * boards will work. - */ - /* Trigger an initialization just for the interrupt. */ - a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_INIT); - mdelay(1); - - dev->irq = probe_irq_off(irq_mask); - if (!dev->irq) { - if (pcnet32_debug & NETIF_MSG_PROBE) - pr_cont(", failed to detect IRQ line\n"); - ret = -ENODEV; - goto err_free_ring; - } - if (pcnet32_debug & NETIF_MSG_PROBE) - pr_cont(", probed IRQ %d\n", dev->irq); - } - - /* Set the mii phy_id so that we can query the link state */ - if (lp->mii) { - /* lp->phycount and lp->phymask are set to 0 by memset above */ - - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f; - /* scan for PHYs */ - for (i = 0; i < PCNET32_MAX_PHYS; i++) { - unsigned short id1, id2; - - id1 = mdio_read(dev, i, MII_PHYSID1); - if (id1 == 0xffff) - continue; - id2 = mdio_read(dev, i, MII_PHYSID2); - if (id2 == 0xffff) - continue; - if (i == 31 && ((chip_version + 1) & 0xfffe) == 0x2624) - continue; /* 79C971 & 79C972 have phantom phy at id 31 */ - lp->phycount++; - lp->phymask |= (1 << i); - lp->mii_if.phy_id = i; - if (pcnet32_debug & NETIF_MSG_PROBE) - pr_info("Found PHY %04x:%04x at address %d\n", - id1, id2, i); - } - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5); - if (lp->phycount > 1) - lp->options |= PCNET32_PORT_MII; - } - - init_timer(&lp->watchdog_timer); - lp->watchdog_timer.data = (unsigned long)dev; - lp->watchdog_timer.function = (void *)&pcnet32_watchdog; - - /* The PCNET32-specific entries in the device structure. */ - dev->netdev_ops = &pcnet32_netdev_ops; - dev->ethtool_ops = &pcnet32_ethtool_ops; - dev->watchdog_timeo = (5 * HZ); - - /* Fill in the generic fields of the device structure. */ - if (register_netdev(dev)) - goto err_free_ring; - - if (pdev) { - pci_set_drvdata(pdev, dev); - } else { - lp->next = pcnet32_dev; - pcnet32_dev = dev; - } - - if (pcnet32_debug & NETIF_MSG_PROBE) - pr_info("%s: registered as %s\n", dev->name, lp->name); - cards_found++; - - /* enable LED writes */ - a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000); - - return 0; - -err_free_ring: - pcnet32_free_ring(dev); - pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), - lp->init_block, lp->init_dma_addr); -err_free_netdev: - free_netdev(dev); -err_release_region: - release_region(ioaddr, PCNET32_TOTAL_SIZE); - return ret; -} - -/* if any allocation fails, caller must also call pcnet32_free_ring */ -static int pcnet32_alloc_ring(struct net_device *dev, const char *name) -{ - struct pcnet32_private *lp = netdev_priv(dev); - - lp->tx_ring = pci_alloc_consistent(lp->pci_dev, - sizeof(struct pcnet32_tx_head) * - lp->tx_ring_size, - &lp->tx_ring_dma_addr); - if (lp->tx_ring == NULL) { - netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); - return -ENOMEM; - } - - lp->rx_ring = pci_alloc_consistent(lp->pci_dev, - sizeof(struct pcnet32_rx_head) * - lp->rx_ring_size, - &lp->rx_ring_dma_addr); - if (lp->rx_ring == NULL) { - netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); - return -ENOMEM; - } - - lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t), - GFP_ATOMIC); - if (!lp->tx_dma_addr) { - netif_err(lp, drv, dev, "Memory allocation failed\n"); - return -ENOMEM; - } - - lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t), - GFP_ATOMIC); - if (!lp->rx_dma_addr) { - netif_err(lp, drv, dev, "Memory allocation failed\n"); - return -ENOMEM; - } - - lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *), - GFP_ATOMIC); - if (!lp->tx_skbuff) { - netif_err(lp, drv, dev, "Memory allocation failed\n"); - return -ENOMEM; - } - - lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *), - GFP_ATOMIC); - if (!lp->rx_skbuff) { - netif_err(lp, drv, dev, "Memory allocation failed\n"); - return -ENOMEM; - } - - return 0; -} - -static void pcnet32_free_ring(struct net_device *dev) -{ - struct pcnet32_private *lp = netdev_priv(dev); - - kfree(lp->tx_skbuff); - lp->tx_skbuff = NULL; - - kfree(lp->rx_skbuff); - lp->rx_skbuff = NULL; - - kfree(lp->tx_dma_addr); - lp->tx_dma_addr = NULL; - - kfree(lp->rx_dma_addr); - lp->rx_dma_addr = NULL; - - if (lp->tx_ring) { - pci_free_consistent(lp->pci_dev, - sizeof(struct pcnet32_tx_head) * - lp->tx_ring_size, lp->tx_ring, - lp->tx_ring_dma_addr); - lp->tx_ring = NULL; - } - - if (lp->rx_ring) { - pci_free_consistent(lp->pci_dev, - sizeof(struct pcnet32_rx_head) * - lp->rx_ring_size, lp->rx_ring, - lp->rx_ring_dma_addr); - lp->rx_ring = NULL; - } -} - -static int pcnet32_open(struct net_device *dev) -{ - struct pcnet32_private *lp = netdev_priv(dev); - struct pci_dev *pdev = lp->pci_dev; - unsigned long ioaddr = dev->base_addr; - u16 val; - int i; - int rc; - unsigned long flags; - - if (request_irq(dev->irq, pcnet32_interrupt, - lp->shared_irq ? IRQF_SHARED : 0, dev->name, - (void *)dev)) { - return -EAGAIN; - } - - spin_lock_irqsave(&lp->lock, flags); - /* Check for a valid station address */ - if (!is_valid_ether_addr(dev->dev_addr)) { - rc = -EINVAL; - goto err_free_irq; - } - - /* Reset the PCNET32 */ - lp->a.reset(ioaddr); - - /* switch pcnet32 to 32bit mode */ - lp->a.write_bcr(ioaddr, 20, 2); - - netif_printk(lp, ifup, KERN_DEBUG, dev, - "%s() irq %d tx/rx rings %#x/%#x init %#x\n", - __func__, dev->irq, (u32) (lp->tx_ring_dma_addr), - (u32) (lp->rx_ring_dma_addr), - (u32) (lp->init_dma_addr)); - - /* set/reset autoselect bit */ - val = lp->a.read_bcr(ioaddr, 2) & ~2; - if (lp->options & PCNET32_PORT_ASEL) - val |= 2; - lp->a.write_bcr(ioaddr, 2, val); - - /* handle full duplex setting */ - if (lp->mii_if.full_duplex) { - val = lp->a.read_bcr(ioaddr, 9) & ~3; - if (lp->options & PCNET32_PORT_FD) { - val |= 1; - if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI)) - val |= 2; - } else if (lp->options & PCNET32_PORT_ASEL) { - /* workaround of xSeries250, turn on for 79C975 only */ - if (lp->chip_version == 0x2627) - val |= 3; - } - lp->a.write_bcr(ioaddr, 9, val); - } - - /* set/reset GPSI bit in test register */ - val = lp->a.read_csr(ioaddr, 124) & ~0x10; - if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI) - val |= 0x10; - lp->a.write_csr(ioaddr, 124, val); - - /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */ - if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT && - (pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX || - pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) { - if (lp->options & PCNET32_PORT_ASEL) { - lp->options = PCNET32_PORT_FD | PCNET32_PORT_100; - netif_printk(lp, link, KERN_DEBUG, dev, - "Setting 100Mb-Full Duplex\n"); - } - } - if (lp->phycount < 2) { - /* - * 24 Jun 2004 according AMD, in order to change the PHY, - * DANAS (or DISPM for 79C976) must be set; then select the speed, - * duplex, and/or enable auto negotiation, and clear DANAS - */ - if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) { - lp->a.write_bcr(ioaddr, 32, - lp->a.read_bcr(ioaddr, 32) | 0x0080); - /* disable Auto Negotiation, set 10Mpbs, HD */ - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8; - if (lp->options & PCNET32_PORT_FD) - val |= 0x10; - if (lp->options & PCNET32_PORT_100) - val |= 0x08; - lp->a.write_bcr(ioaddr, 32, val); - } else { - if (lp->options & PCNET32_PORT_ASEL) { - lp->a.write_bcr(ioaddr, 32, - lp->a.read_bcr(ioaddr, - 32) | 0x0080); - /* enable auto negotiate, setup, disable fd */ - val = lp->a.read_bcr(ioaddr, 32) & ~0x98; - val |= 0x20; - lp->a.write_bcr(ioaddr, 32, val); - } - } - } else { - int first_phy = -1; - u16 bmcr; - u32 bcr9; - struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET }; - - /* - * There is really no good other way to handle multiple PHYs - * other than turning off all automatics - */ - val = lp->a.read_bcr(ioaddr, 2); - lp->a.write_bcr(ioaddr, 2, val & ~2); - val = lp->a.read_bcr(ioaddr, 32); - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */ - - if (!(lp->options & PCNET32_PORT_ASEL)) { - /* setup ecmd */ - ecmd.port = PORT_MII; - ecmd.transceiver = XCVR_INTERNAL; - ecmd.autoneg = AUTONEG_DISABLE; - ethtool_cmd_speed_set(&ecmd, - (lp->options & PCNET32_PORT_100) ? - SPEED_100 : SPEED_10); - bcr9 = lp->a.read_bcr(ioaddr, 9); - - if (lp->options & PCNET32_PORT_FD) { - ecmd.duplex = DUPLEX_FULL; - bcr9 |= (1 << 0); - } else { - ecmd.duplex = DUPLEX_HALF; - bcr9 |= ~(1 << 0); - } - lp->a.write_bcr(ioaddr, 9, bcr9); - } - - for (i = 0; i < PCNET32_MAX_PHYS; i++) { - if (lp->phymask & (1 << i)) { - /* isolate all but the first PHY */ - bmcr = mdio_read(dev, i, MII_BMCR); - if (first_phy == -1) { - first_phy = i; - mdio_write(dev, i, MII_BMCR, - bmcr & ~BMCR_ISOLATE); - } else { - mdio_write(dev, i, MII_BMCR, - bmcr | BMCR_ISOLATE); - } - /* use mii_ethtool_sset to setup PHY */ - lp->mii_if.phy_id = i; - ecmd.phy_address = i; - if (lp->options & PCNET32_PORT_ASEL) { - mii_ethtool_gset(&lp->mii_if, &ecmd); - ecmd.autoneg = AUTONEG_ENABLE; - } - mii_ethtool_sset(&lp->mii_if, &ecmd); - } - } - lp->mii_if.phy_id = first_phy; - netif_info(lp, link, dev, "Using PHY number %d\n", first_phy); - } - -#ifdef DO_DXSUFLO - if (lp->dxsuflo) { /* Disable transmit stop on underflow */ - val = lp->a.read_csr(ioaddr, CSR3); - val |= 0x40; - lp->a.write_csr(ioaddr, CSR3, val); - } -#endif - - lp->init_block->mode = - cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7); - pcnet32_load_multicast(dev); - - if (pcnet32_init_ring(dev)) { - rc = -ENOMEM; - goto err_free_ring; - } - - napi_enable(&lp->napi); - - /* Re-initialize the PCNET32, and start it when done. */ - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16)); - - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT); - - netif_start_queue(dev); - - if (lp->chip_version >= PCNET32_79C970A) { - /* Print the link status and start the watchdog */ - pcnet32_check_media(dev, 1); - mod_timer(&lp->watchdog_timer, PCNET32_WATCHDOG_TIMEOUT); - } - - i = 0; - while (i++ < 100) - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON) - break; - /* - * We used to clear the InitDone bit, 0x0100, here but Mark Stockton - * reports that doing so triggers a bug in the '974. - */ - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL); - - netif_printk(lp, ifup, KERN_DEBUG, dev, - "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n", - i, - (u32) (lp->init_dma_addr), - lp->a.read_csr(ioaddr, CSR0)); - - spin_unlock_irqrestore(&lp->lock, flags); - - return 0; /* Always succeed */ - -err_free_ring: - /* free any allocated skbuffs */ - pcnet32_purge_rx_ring(dev); - - /* - * Switch back to 16bit mode to avoid problems with dumb - * DOS packet driver after a warm reboot - */ - lp->a.write_bcr(ioaddr, 20, 4); - -err_free_irq: - spin_unlock_irqrestore(&lp->lock, flags); - free_irq(dev->irq, dev); - return rc; -} - -/* - * The LANCE has been halted for one reason or another (busmaster memory - * arbitration error, Tx FIFO underflow, driver stopped it to reconfigure, - * etc.). Modern LANCE variants always reload their ring-buffer - * configuration when restarted, so we must reinitialize our ring - * context before restarting. As part of this reinitialization, - * find all packets still on the Tx ring and pretend that they had been - * sent (in effect, drop the packets on the floor) - the higher-level - * protocols will time out and retransmit. It'd be better to shuffle - * these skbs to a temp list and then actually re-Tx them after - * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com - */ - -static void pcnet32_purge_tx_ring(struct net_device *dev) -{ - struct pcnet32_private *lp = netdev_priv(dev); - int i; - - for (i = 0; i < lp->tx_ring_size; i++) { - lp->tx_ring[i].status = 0; /* CPU owns buffer */ - wmb(); /* Make sure adapter sees owner change */ - if (lp->tx_skbuff[i]) { - pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], - lp->tx_skbuff[i]->len, - PCI_DMA_TODEVICE); - dev_kfree_skb_any(lp->tx_skbuff[i]); - } - lp->tx_skbuff[i] = NULL; - lp->tx_dma_addr[i] = 0; - } -} - -/* Initialize the PCNET32 Rx and Tx rings. */ -static int pcnet32_init_ring(struct net_device *dev) -{ - struct pcnet32_private *lp = netdev_priv(dev); - int i; - - lp->tx_full = 0; - lp->cur_rx = lp->cur_tx = 0; - lp->dirty_rx = lp->dirty_tx = 0; - - for (i = 0; i < lp->rx_ring_size; i++) { - struct sk_buff *rx_skbuff = lp->rx_skbuff[i]; - if (rx_skbuff == NULL) { - lp->rx_skbuff[i] = dev_alloc_skb(PKT_BUF_SKB); - rx_skbuff = lp->rx_skbuff[i]; - if (!rx_skbuff) { - /* there is not much we can do at this point */ - netif_err(lp, drv, dev, "%s dev_alloc_skb failed\n", - __func__); - return -1; - } - skb_reserve(rx_skbuff, NET_IP_ALIGN); - } - - rmb(); - if (lp->rx_dma_addr[i] == 0) - lp->rx_dma_addr[i] = - pci_map_single(lp->pci_dev, rx_skbuff->data, - PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); - lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]); - lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE); - wmb(); /* Make sure owner changes after all others are visible */ - lp->rx_ring[i].status = cpu_to_le16(0x8000); - } - /* The Tx buffer address is filled in as needed, but we do need to clear - * the upper ownership bit. */ - for (i = 0; i < lp->tx_ring_size; i++) { - lp->tx_ring[i].status = 0; /* CPU owns buffer */ - wmb(); /* Make sure adapter sees owner change */ - lp->tx_ring[i].base = 0; - lp->tx_dma_addr[i] = 0; - } - - lp->init_block->tlen_rlen = - cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits); - for (i = 0; i < 6; i++) - lp->init_block->phys_addr[i] = dev->dev_addr[i]; - lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr); - lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr); - wmb(); /* Make sure all changes are visible */ - return 0; -} - -/* the pcnet32 has been issued a stop or reset. Wait for the stop bit - * then flush the pending transmit operations, re-initialize the ring, - * and tell the chip to initialize. - */ -static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits) -{ - struct pcnet32_private *lp = netdev_priv(dev); - unsigned long ioaddr = dev->base_addr; - int i; - - /* wait for stop */ - for (i = 0; i < 100; i++) - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP) - break; - - if (i >= 100) - netif_err(lp, drv, dev, "%s timed out waiting for stop\n", - __func__); - - pcnet32_purge_tx_ring(dev); - if (pcnet32_init_ring(dev)) - return; - - /* ReInit Ring */ - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT); - i = 0; - while (i++ < 1000) - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON) - break; - - lp->a.write_csr(ioaddr, CSR0, csr0_bits); -} - -static void pcnet32_tx_timeout(struct net_device *dev) -{ - struct pcnet32_private *lp = netdev_priv(dev); - unsigned long ioaddr = dev->base_addr, flags; - - spin_lock_irqsave(&lp->lock, flags); - /* Transmitter timeout, serious problems. */ - if (pcnet32_debug & NETIF_MSG_DRV) - pr_err("%s: transmit timed out, status %4.4x, resetting\n", - dev->name, lp->a.read_csr(ioaddr, CSR0)); - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); - dev->stats.tx_errors++; - if (netif_msg_tx_err(lp)) { - int i; - printk(KERN_DEBUG - " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.", - lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "", - lp->cur_rx); - for (i = 0; i < lp->rx_ring_size; i++) - printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", - le32_to_cpu(lp->rx_ring[i].base), - (-le16_to_cpu(lp->rx_ring[i].buf_length)) & - 0xffff, le32_to_cpu(lp->rx_ring[i].msg_length), - le16_to_cpu(lp->rx_ring[i].status)); - for (i = 0; i < lp->tx_ring_size; i++) - printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", - le32_to_cpu(lp->tx_ring[i].base), - (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff, - le32_to_cpu(lp->tx_ring[i].misc), - le16_to_cpu(lp->tx_ring[i].status)); - printk("\n"); - } - pcnet32_restart(dev, CSR0_NORMAL); - - dev->trans_start = jiffies; /* prevent tx timeout */ - netif_wake_queue(dev); - - spin_unlock_irqrestore(&lp->lock, flags); -} - -static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - struct pcnet32_private *lp = netdev_priv(dev); - unsigned long ioaddr = dev->base_addr; - u16 status; - int entry; - unsigned long flags; - - spin_lock_irqsave(&lp->lock, flags); - - netif_printk(lp, tx_queued, KERN_DEBUG, dev, - "%s() called, csr0 %4.4x\n", - __func__, lp->a.read_csr(ioaddr, CSR0)); - - /* Default status -- will not enable Successful-TxDone - * interrupt when that option is available to us. - */ - status = 0x8300; - - /* Fill in a Tx ring entry */ - - /* Mask to ring buffer boundary. */ - entry = lp->cur_tx & lp->tx_mod_mask; - - /* Caution: the write order is important here, set the status - * with the "ownership" bits last. */ - - lp->tx_ring[entry].length = cpu_to_le16(-skb->len); - - lp->tx_ring[entry].misc = 0x00000000; - - lp->tx_skbuff[entry] = skb; - lp->tx_dma_addr[entry] = - pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); - lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]); - wmb(); /* Make sure owner changes after all others are visible */ - lp->tx_ring[entry].status = cpu_to_le16(status); - - lp->cur_tx++; - dev->stats.tx_bytes += skb->len; - - /* Trigger an immediate send poll. */ - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL); - - if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) { - lp->tx_full = 1; - netif_stop_queue(dev); - } - spin_unlock_irqrestore(&lp->lock, flags); - return NETDEV_TX_OK; -} - -/* The PCNET32 interrupt handler. */ -static irqreturn_t -pcnet32_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct pcnet32_private *lp; - unsigned long ioaddr; - u16 csr0; - int boguscnt = max_interrupt_work; - - ioaddr = dev->base_addr; - lp = netdev_priv(dev); - - spin_lock(&lp->lock); - - csr0 = lp->a.read_csr(ioaddr, CSR0); - while ((csr0 & 0x8f00) && --boguscnt >= 0) { - if (csr0 == 0xffff) - break; /* PCMCIA remove happened */ - /* Acknowledge all of the current interrupt sources ASAP. */ - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f); - - netif_printk(lp, intr, KERN_DEBUG, dev, - "interrupt csr0=%#2.2x new csr=%#2.2x\n", - csr0, lp->a.read_csr(ioaddr, CSR0)); - - /* Log misc errors. */ - if (csr0 & 0x4000) - dev->stats.tx_errors++; /* Tx babble. */ - if (csr0 & 0x1000) { - /* - * This happens when our receive ring is full. This - * shouldn't be a problem as we will see normal rx - * interrupts for the frames in the receive ring. But - * there are some PCI chipsets (I can reproduce this - * on SP3G with Intel saturn chipset) which have - * sometimes problems and will fill up the receive - * ring with error descriptors. In this situation we - * don't get a rx interrupt, but a missed frame - * interrupt sooner or later. - */ - dev->stats.rx_errors++; /* Missed a Rx frame. */ - } - if (csr0 & 0x0800) { - netif_err(lp, drv, dev, "Bus master arbitration failure, status %4.4x\n", - csr0); - /* unlike for the lance, there is no restart needed */ - } - if (napi_schedule_prep(&lp->napi)) { - u16 val; - /* set interrupt masks */ - val = lp->a.read_csr(ioaddr, CSR3); - val |= 0x5f00; - lp->a.write_csr(ioaddr, CSR3, val); - - __napi_schedule(&lp->napi); - break; - } - csr0 = lp->a.read_csr(ioaddr, CSR0); - } - - netif_printk(lp, intr, KERN_DEBUG, dev, - "exiting interrupt, csr0=%#4.4x\n", - lp->a.read_csr(ioaddr, CSR0)); - - spin_unlock(&lp->lock); - - return IRQ_HANDLED; -} - -static int pcnet32_close(struct net_device *dev) -{ - unsigned long ioaddr = dev->base_addr; - struct pcnet32_private *lp = netdev_priv(dev); - unsigned long flags; - - del_timer_sync(&lp->watchdog_timer); - - netif_stop_queue(dev); - napi_disable(&lp->napi); - - spin_lock_irqsave(&lp->lock, flags); - - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112); - - netif_printk(lp, ifdown, KERN_DEBUG, dev, - "Shutting down ethercard, status was %2.2x\n", - lp->a.read_csr(ioaddr, CSR0)); - - /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */ - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); - - /* - * Switch back to 16bit mode to avoid problems with dumb - * DOS packet driver after a warm reboot - */ - lp->a.write_bcr(ioaddr, 20, 4); - - spin_unlock_irqrestore(&lp->lock, flags); - - free_irq(dev->irq, dev); - - spin_lock_irqsave(&lp->lock, flags); - - pcnet32_purge_rx_ring(dev); - pcnet32_purge_tx_ring(dev); - - spin_unlock_irqrestore(&lp->lock, flags); - - return 0; -} - -static struct net_device_stats *pcnet32_get_stats(struct net_device *dev) -{ - struct pcnet32_private *lp = netdev_priv(dev); - unsigned long ioaddr = dev->base_addr; - unsigned long flags; - - spin_lock_irqsave(&lp->lock, flags); - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112); - spin_unlock_irqrestore(&lp->lock, flags); - - return &dev->stats; -} - -/* taken from the sunlance driver, which it took from the depca driver */ -static void pcnet32_load_multicast(struct net_device *dev) -{ - struct pcnet32_private *lp = netdev_priv(dev); - volatile struct pcnet32_init_block *ib = lp->init_block; - volatile __le16 *mcast_table = (__le16 *)ib->filter; - struct netdev_hw_addr *ha; - unsigned long ioaddr = dev->base_addr; - int i; - u32 crc; - - /* set all multicast bits */ - if (dev->flags & IFF_ALLMULTI) { - ib->filter[0] = cpu_to_le32(~0U); - ib->filter[1] = cpu_to_le32(~0U); - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff); - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff); - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff); - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff); - return; - } - /* clear the multicast filter */ - ib->filter[0] = 0; - ib->filter[1] = 0; - - /* Add addresses */ - netdev_for_each_mc_addr(ha, dev) { - crc = ether_crc_le(6, ha->addr); - crc = crc >> 26; - mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf)); - } - for (i = 0; i < 4; i++) - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i, - le16_to_cpu(mcast_table[i])); -} - -/* - * Set or clear the multicast filter for this adaptor. - */ -static void pcnet32_set_multicast_list(struct net_device *dev) -{ - unsigned long ioaddr = dev->base_addr, flags; - struct pcnet32_private *lp = netdev_priv(dev); - int csr15, suspended; - - spin_lock_irqsave(&lp->lock, flags); - suspended = pcnet32_suspend(dev, &flags, 0); - csr15 = lp->a.read_csr(ioaddr, CSR15); - if (dev->flags & IFF_PROMISC) { - /* Log any net taps. */ - netif_info(lp, hw, dev, "Promiscuous mode enabled\n"); - lp->init_block->mode = - cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << - 7); - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000); - } else { - lp->init_block->mode = - cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7); - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff); - pcnet32_load_multicast(dev); - } - - if (suspended) { - int csr5; - /* clear SUSPEND (SPND) - CSR5 bit 0 */ - csr5 = lp->a.read_csr(ioaddr, CSR5); - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND)); - } else { - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); - pcnet32_restart(dev, CSR0_NORMAL); - netif_wake_queue(dev); - } - - spin_unlock_irqrestore(&lp->lock, flags); -} - -/* This routine assumes that the lp->lock is held */ -static int mdio_read(struct net_device *dev, int phy_id, int reg_num) -{ - struct pcnet32_private *lp = netdev_priv(dev); - unsigned long ioaddr = dev->base_addr; - u16 val_out; - - if (!lp->mii) - return 0; - - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); - val_out = lp->a.read_bcr(ioaddr, 34); - - return val_out; -} - -/* This routine assumes that the lp->lock is held */ -static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val) -{ - struct pcnet32_private *lp = netdev_priv(dev); - unsigned long ioaddr = dev->base_addr; - - if (!lp->mii) - return; - - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); - lp->a.write_bcr(ioaddr, 34, val); -} - -static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -{ - struct pcnet32_private *lp = netdev_priv(dev); - int rc; - unsigned long flags; - - /* SIOC[GS]MIIxxx ioctls */ - if (lp->mii) { - spin_lock_irqsave(&lp->lock, flags); - rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL); - spin_unlock_irqrestore(&lp->lock, flags); - } else { - rc = -EOPNOTSUPP; - } - - return rc; -} - -static int pcnet32_check_otherphy(struct net_device *dev) -{ - struct pcnet32_private *lp = netdev_priv(dev); - struct mii_if_info mii = lp->mii_if; - u16 bmcr; - int i; - - for (i = 0; i < PCNET32_MAX_PHYS; i++) { - if (i == lp->mii_if.phy_id) - continue; /* skip active phy */ - if (lp->phymask & (1 << i)) { - mii.phy_id = i; - if (mii_link_ok(&mii)) { - /* found PHY with active link */ - netif_info(lp, link, dev, "Using PHY number %d\n", - i); - - /* isolate inactive phy */ - bmcr = - mdio_read(dev, lp->mii_if.phy_id, MII_BMCR); - mdio_write(dev, lp->mii_if.phy_id, MII_BMCR, - bmcr | BMCR_ISOLATE); - - /* de-isolate new phy */ - bmcr = mdio_read(dev, i, MII_BMCR); - mdio_write(dev, i, MII_BMCR, - bmcr & ~BMCR_ISOLATE); - - /* set new phy address */ - lp->mii_if.phy_id = i; - return 1; - } - } - } - return 0; -} - -/* - * Show the status of the media. Similar to mii_check_media however it - * correctly shows the link speed for all (tested) pcnet32 variants. - * Devices with no mii just report link state without speed. - * - * Caller is assumed to hold and release the lp->lock. - */ - -static void pcnet32_check_media(struct net_device *dev, int verbose) -{ - struct pcnet32_private *lp = netdev_priv(dev); - int curr_link; - int prev_link = netif_carrier_ok(dev) ? 1 : 0; - u32 bcr9; - - if (lp->mii) { - curr_link = mii_link_ok(&lp->mii_if); - } else { - ulong ioaddr = dev->base_addr; /* card base I/O address */ - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0); - } - if (!curr_link) { - if (prev_link || verbose) { - netif_carrier_off(dev); - netif_info(lp, link, dev, "link down\n"); - } - if (lp->phycount > 1) { - curr_link = pcnet32_check_otherphy(dev); - prev_link = 0; - } - } else if (verbose || !prev_link) { - netif_carrier_on(dev); - if (lp->mii) { - if (netif_msg_link(lp)) { - struct ethtool_cmd ecmd = { - .cmd = ETHTOOL_GSET }; - mii_ethtool_gset(&lp->mii_if, &ecmd); - netdev_info(dev, "link up, %uMbps, %s-duplex\n", - ethtool_cmd_speed(&ecmd), - (ecmd.duplex == DUPLEX_FULL) - ? "full" : "half"); - } - bcr9 = lp->a.read_bcr(dev->base_addr, 9); - if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) { - if (lp->mii_if.full_duplex) - bcr9 |= (1 << 0); - else - bcr9 &= ~(1 << 0); - lp->a.write_bcr(dev->base_addr, 9, bcr9); - } - } else { - netif_info(lp, link, dev, "link up\n"); - } - } -} - -/* - * Check for loss of link and link establishment. - * Can not use mii_check_media because it does nothing if mode is forced. - */ - -static void pcnet32_watchdog(struct net_device *dev) -{ - struct pcnet32_private *lp = netdev_priv(dev); - unsigned long flags; - - /* Print the link status if it has changed */ - spin_lock_irqsave(&lp->lock, flags); - pcnet32_check_media(dev, 0); - spin_unlock_irqrestore(&lp->lock, flags); - - mod_timer(&lp->watchdog_timer, round_jiffies(PCNET32_WATCHDOG_TIMEOUT)); -} - -static int pcnet32_pm_suspend(struct pci_dev *pdev, pm_message_t state) -{ - struct net_device *dev = pci_get_drvdata(pdev); - - if (netif_running(dev)) { - netif_device_detach(dev); - pcnet32_close(dev); - } - pci_save_state(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - return 0; -} - -static int pcnet32_pm_resume(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - - if (netif_running(dev)) { - pcnet32_open(dev); - netif_device_attach(dev); - } - return 0; -} - -static void __devexit pcnet32_remove_one(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - - if (dev) { - struct pcnet32_private *lp = netdev_priv(dev); - - unregister_netdev(dev); - pcnet32_free_ring(dev); - release_region(dev->base_addr, PCNET32_TOTAL_SIZE); - pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), - lp->init_block, lp->init_dma_addr); - free_netdev(dev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - } -} - -static struct pci_driver pcnet32_driver = { - .name = DRV_NAME, - .probe = pcnet32_probe_pci, - .remove = __devexit_p(pcnet32_remove_one), - .id_table = pcnet32_pci_tbl, - .suspend = pcnet32_pm_suspend, - .resume = pcnet32_pm_resume, -}; - -/* An additional parameter that may be passed in... */ -static int debug = -1; -static int tx_start_pt = -1; -static int pcnet32_have_pci; - -module_param(debug, int, 0); -MODULE_PARM_DESC(debug, DRV_NAME " debug level"); -module_param(max_interrupt_work, int, 0); -MODULE_PARM_DESC(max_interrupt_work, - DRV_NAME " maximum events handled per interrupt"); -module_param(rx_copybreak, int, 0); -MODULE_PARM_DESC(rx_copybreak, - DRV_NAME " copy breakpoint for copy-only-tiny-frames"); -module_param(tx_start_pt, int, 0); -MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)"); -module_param(pcnet32vlb, int, 0); -MODULE_PARM_DESC(pcnet32vlb, DRV_NAME " Vesa local bus (VLB) support (0/1)"); -module_param_array(options, int, NULL, 0); -MODULE_PARM_DESC(options, DRV_NAME " initial option setting(s) (0-15)"); -module_param_array(full_duplex, int, NULL, 0); -MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)"); -/* Module Parameter for HomePNA cards added by Patrick Simmons, 2004 */ -module_param_array(homepna, int, NULL, 0); -MODULE_PARM_DESC(homepna, - DRV_NAME - " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet"); - -MODULE_AUTHOR("Thomas Bogendoerfer"); -MODULE_DESCRIPTION("Driver for PCnet32 and PCnetPCI based ethercards"); -MODULE_LICENSE("GPL"); - -#define PCNET32_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) - -static int __init pcnet32_init_module(void) -{ - pr_info("%s", version); - - pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT); - - if ((tx_start_pt >= 0) && (tx_start_pt <= 3)) - tx_start = tx_start_pt; - - /* find the PCI devices */ - if (!pci_register_driver(&pcnet32_driver)) - pcnet32_have_pci = 1; - - /* should we find any remaining VLbus devices ? */ - if (pcnet32vlb) - pcnet32_probe_vlbus(pcnet32_portlist); - - if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE)) - pr_info("%d cards_found\n", cards_found); - - return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV; -} - -static void __exit pcnet32_cleanup_module(void) -{ - struct net_device *next_dev; - - while (pcnet32_dev) { - struct pcnet32_private *lp = netdev_priv(pcnet32_dev); - next_dev = lp->next; - unregister_netdev(pcnet32_dev); - pcnet32_free_ring(pcnet32_dev); - release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE); - pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), - lp->init_block, lp->init_dma_addr); - free_netdev(pcnet32_dev); - pcnet32_dev = next_dev; - } - - if (pcnet32_have_pci) - pci_unregister_driver(&pcnet32_driver); -} - -module_init(pcnet32_init_module); -module_exit(pcnet32_cleanup_module); - -/* - * Local variables: - * c-indent-level: 4 - * tab-width: 8 - * End: - */ diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c deleted file mode 100644 index 7d9ec23..0000000 --- a/drivers/net/sun3lance.c +++ /dev/null @@ -1,961 +0,0 @@ -/* sun3lance.c: Ethernet driver for SUN3 Lance chip */ -/* - - Sun3 Lance ethernet driver, by Sam Creasey (sammy@users.qual.net). - This driver is a part of the linux kernel, and is thus distributed - under the GNU General Public License. - - The values used in LANCE_OBIO and LANCE_IRQ seem to be empirically - true for the correct IRQ and address of the lance registers. They - have not been widely tested, however. What we probably need is a - "proper" way to search for a device in the sun3's prom, but, alas, - linux has no such thing. - - This driver is largely based on atarilance.c, by Roman Hodek. Other - sources of inspiration were the NetBSD sun3 am7990 driver, and the - linux sparc lance driver (sunlance.c). - - There are more assumptions made throughout this driver, it almost - certainly still needs work, but it does work at least for RARP/BOOTP and - mounting the root NFS filesystem. - -*/ - -static char *version = "sun3lance.c: v1.2 1/12/2001 Sam Creasey (sammy@sammy.net)\n"; - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef CONFIG_SUN3 -#include -#else -#include -#endif - -/* sun3/60 addr/irq for the lance chip. If your sun is different, - change this. */ -#define LANCE_OBIO 0x120000 -#define LANCE_IRQ IRQ_AUTO_3 - -/* Debug level: - * 0 = silent, print only serious errors - * 1 = normal, print error messages - * 2 = debug, print debug infos - * 3 = debug, print even more debug infos (packet data) - */ - -#define LANCE_DEBUG 0 - -#ifdef LANCE_DEBUG -static int lance_debug = LANCE_DEBUG; -#else -static int lance_debug = 1; -#endif -module_param(lance_debug, int, 0); -MODULE_PARM_DESC(lance_debug, "SUN3 Lance debug level (0-3)"); -MODULE_LICENSE("GPL"); - -#define DPRINTK(n,a) \ - do { \ - if (lance_debug >= n) \ - printk a; \ - } while( 0 ) - - -/* we're only using 32k of memory, so we use 4 TX - buffers and 16 RX buffers. These values are expressed as log2. */ - -#define TX_LOG_RING_SIZE 3 -#define RX_LOG_RING_SIZE 5 - -/* These are the derived values */ - -#define TX_RING_SIZE (1 << TX_LOG_RING_SIZE) -#define TX_RING_LEN_BITS (TX_LOG_RING_SIZE << 5) -#define TX_RING_MOD_MASK (TX_RING_SIZE - 1) - -#define RX_RING_SIZE (1 << RX_LOG_RING_SIZE) -#define RX_RING_LEN_BITS (RX_LOG_RING_SIZE << 5) -#define RX_RING_MOD_MASK (RX_RING_SIZE - 1) - -/* Definitions for packet buffer access: */ -#define PKT_BUF_SZ 1544 - -/* Get the address of a packet buffer corresponding to a given buffer head */ -#define PKTBUF_ADDR(head) (void *)((unsigned long)(MEM) | (head)->base) - - -/* The LANCE Rx and Tx ring descriptors. */ -struct lance_rx_head { - unsigned short base; /* Low word of base addr */ - volatile unsigned char flag; - unsigned char base_hi; /* High word of base addr (unused) */ - short buf_length; /* This length is 2s complement! */ - volatile short msg_length; /* This length is "normal". */ -}; - -struct lance_tx_head { - unsigned short base; /* Low word of base addr */ - volatile unsigned char flag; - unsigned char base_hi; /* High word of base addr (unused) */ - short length; /* Length is 2s complement! */ - volatile short misc; -}; - -/* The LANCE initialization block, described in databook. */ -struct lance_init_block { - unsigned short mode; /* Pre-set mode */ - unsigned char hwaddr[6]; /* Physical ethernet address */ - unsigned int filter[2]; /* Multicast filter (unused). */ - /* Receive and transmit ring base, along with length bits. */ - unsigned short rdra; - unsigned short rlen; - unsigned short tdra; - unsigned short tlen; - unsigned short pad[4]; /* is thie needed? */ -}; - -/* The whole layout of the Lance shared memory */ -struct lance_memory { - struct lance_init_block init; - struct lance_tx_head tx_head[TX_RING_SIZE]; - struct lance_rx_head rx_head[RX_RING_SIZE]; - char rx_data[RX_RING_SIZE][PKT_BUF_SZ]; - char tx_data[TX_RING_SIZE][PKT_BUF_SZ]; -}; - -/* The driver's private device structure */ - -struct lance_private { - volatile unsigned short *iobase; - struct lance_memory *mem; - int new_rx, new_tx; /* The next free ring entry */ - int old_tx, old_rx; /* ring entry to be processed */ -/* These two must be longs for set_bit() */ - long tx_full; - long lock; -}; - -/* I/O register access macros */ - -#define MEM lp->mem -#define DREG lp->iobase[0] -#define AREG lp->iobase[1] -#define REGA(a) (*( AREG = (a), &DREG )) - -/* Definitions for the Lance */ - -/* tx_head flags */ -#define TMD1_ENP 0x01 /* end of packet */ -#define TMD1_STP 0x02 /* start of packet */ -#define TMD1_DEF 0x04 /* deferred */ -#define TMD1_ONE 0x08 /* one retry needed */ -#define TMD1_MORE 0x10 /* more than one retry needed */ -#define TMD1_ERR 0x40 /* error summary */ -#define TMD1_OWN 0x80 /* ownership (set: chip owns) */ - -#define TMD1_OWN_CHIP TMD1_OWN -#define TMD1_OWN_HOST 0 - -/* tx_head misc field */ -#define TMD3_TDR 0x03FF /* Time Domain Reflectometry counter */ -#define TMD3_RTRY 0x0400 /* failed after 16 retries */ -#define TMD3_LCAR 0x0800 /* carrier lost */ -#define TMD3_LCOL 0x1000 /* late collision */ -#define TMD3_UFLO 0x4000 /* underflow (late memory) */ -#define TMD3_BUFF 0x8000 /* buffering error (no ENP) */ - -/* rx_head flags */ -#define RMD1_ENP 0x01 /* end of packet */ -#define RMD1_STP 0x02 /* start of packet */ -#define RMD1_BUFF 0x04 /* buffer error */ -#define RMD1_CRC 0x08 /* CRC error */ -#define RMD1_OFLO 0x10 /* overflow */ -#define RMD1_FRAM 0x20 /* framing error */ -#define RMD1_ERR 0x40 /* error summary */ -#define RMD1_OWN 0x80 /* ownership (set: ship owns) */ - -#define RMD1_OWN_CHIP RMD1_OWN -#define RMD1_OWN_HOST 0 - -/* register names */ -#define CSR0 0 /* mode/status */ -#define CSR1 1 /* init block addr (low) */ -#define CSR2 2 /* init block addr (high) */ -#define CSR3 3 /* misc */ -#define CSR8 8 /* address filter */ -#define CSR15 15 /* promiscuous mode */ - -/* CSR0 */ -/* (R=readable, W=writeable, S=set on write, C=clear on write) */ -#define CSR0_INIT 0x0001 /* initialize (RS) */ -#define CSR0_STRT 0x0002 /* start (RS) */ -#define CSR0_STOP 0x0004 /* stop (RS) */ -#define CSR0_TDMD 0x0008 /* transmit demand (RS) */ -#define CSR0_TXON 0x0010 /* transmitter on (R) */ -#define CSR0_RXON 0x0020 /* receiver on (R) */ -#define CSR0_INEA 0x0040 /* interrupt enable (RW) */ -#define CSR0_INTR 0x0080 /* interrupt active (R) */ -#define CSR0_IDON 0x0100 /* initialization done (RC) */ -#define CSR0_TINT 0x0200 /* transmitter interrupt (RC) */ -#define CSR0_RINT 0x0400 /* receiver interrupt (RC) */ -#define CSR0_MERR 0x0800 /* memory error (RC) */ -#define CSR0_MISS 0x1000 /* missed frame (RC) */ -#define CSR0_CERR 0x2000 /* carrier error (no heartbeat :-) (RC) */ -#define CSR0_BABL 0x4000 /* babble: tx-ed too many bits (RC) */ -#define CSR0_ERR 0x8000 /* error (RC) */ - -/* CSR3 */ -#define CSR3_BCON 0x0001 /* byte control */ -#define CSR3_ACON 0x0002 /* ALE control */ -#define CSR3_BSWP 0x0004 /* byte swap (1=big endian) */ - -/***************************** Prototypes *****************************/ - -static int lance_probe( struct net_device *dev); -static int lance_open( struct net_device *dev ); -static void lance_init_ring( struct net_device *dev ); -static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ); -static irqreturn_t lance_interrupt( int irq, void *dev_id); -static int lance_rx( struct net_device *dev ); -static int lance_close( struct net_device *dev ); -static void set_multicast_list( struct net_device *dev ); - -/************************* End of Prototypes **************************/ - -struct net_device * __init sun3lance_probe(int unit) -{ - struct net_device *dev; - static int found; - int err = -ENODEV; - - if (!MACH_IS_SUN3 && !MACH_IS_SUN3X) - return ERR_PTR(-ENODEV); - - /* check that this machine has an onboard lance */ - switch(idprom->id_machtype) { - case SM_SUN3|SM_3_50: - case SM_SUN3|SM_3_60: - case SM_SUN3X|SM_3_80: - /* these machines have lance */ - break; - - default: - return ERR_PTR(-ENODEV); - } - - if (found) - return ERR_PTR(-ENODEV); - - dev = alloc_etherdev(sizeof(struct lance_private)); - if (!dev) - return ERR_PTR(-ENOMEM); - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - } - - if (!lance_probe(dev)) - goto out; - - err = register_netdev(dev); - if (err) - goto out1; - found = 1; - return dev; - -out1: -#ifdef CONFIG_SUN3 - iounmap((void __iomem *)dev->base_addr); -#endif -out: - free_netdev(dev); - return ERR_PTR(err); -} - -static const struct net_device_ops lance_netdev_ops = { - .ndo_open = lance_open, - .ndo_stop = lance_close, - .ndo_start_xmit = lance_start_xmit, - .ndo_set_multicast_list = set_multicast_list, - .ndo_set_mac_address = NULL, - .ndo_change_mtu = eth_change_mtu, - .ndo_validate_addr = eth_validate_addr, -}; - -static int __init lance_probe( struct net_device *dev) -{ - unsigned long ioaddr; - - struct lance_private *lp; - int i; - static int did_version; - volatile unsigned short *ioaddr_probe; - unsigned short tmp1, tmp2; - -#ifdef CONFIG_SUN3 - ioaddr = (unsigned long)ioremap(LANCE_OBIO, PAGE_SIZE); - if (!ioaddr) - return 0; -#else - ioaddr = SUN3X_LANCE; -#endif - - /* test to see if there's really a lance here */ - /* (CSRO_INIT shouldn't be readable) */ - - ioaddr_probe = (volatile unsigned short *)ioaddr; - tmp1 = ioaddr_probe[0]; - tmp2 = ioaddr_probe[1]; - - ioaddr_probe[1] = CSR0; - ioaddr_probe[0] = CSR0_INIT | CSR0_STOP; - - if(ioaddr_probe[0] != CSR0_STOP) { - ioaddr_probe[0] = tmp1; - ioaddr_probe[1] = tmp2; - -#ifdef CONFIG_SUN3 - iounmap((void __iomem *)ioaddr); -#endif - return 0; - } - - lp = netdev_priv(dev); - - /* XXX - leak? */ - MEM = dvma_malloc_align(sizeof(struct lance_memory), 0x10000); - if (MEM == NULL) { -#ifdef CONFIG_SUN3 - iounmap((void __iomem *)ioaddr); -#endif - printk(KERN_WARNING "SUN3 Lance couldn't allocate DVMA memory\n"); - return 0; - } - - lp->iobase = (volatile unsigned short *)ioaddr; - dev->base_addr = (unsigned long)ioaddr; /* informational only */ - - REGA(CSR0) = CSR0_STOP; - - if (request_irq(LANCE_IRQ, lance_interrupt, IRQF_DISABLED, "SUN3 Lance", dev) < 0) { -#ifdef CONFIG_SUN3 - iounmap((void __iomem *)ioaddr); -#endif - dvma_free((void *)MEM); - printk(KERN_WARNING "SUN3 Lance unable to allocate IRQ\n"); - return 0; - } - dev->irq = (unsigned short)LANCE_IRQ; - - - printk("%s: SUN3 Lance at io %#lx, mem %#lx, irq %d, hwaddr ", - dev->name, - (unsigned long)ioaddr, - (unsigned long)MEM, - dev->irq); - - /* copy in the ethernet address from the prom */ - for(i = 0; i < 6 ; i++) - dev->dev_addr[i] = idprom->id_ethaddr[i]; - - /* tell the card it's ether address, bytes swapped */ - MEM->init.hwaddr[0] = dev->dev_addr[1]; - MEM->init.hwaddr[1] = dev->dev_addr[0]; - MEM->init.hwaddr[2] = dev->dev_addr[3]; - MEM->init.hwaddr[3] = dev->dev_addr[2]; - MEM->init.hwaddr[4] = dev->dev_addr[5]; - MEM->init.hwaddr[5] = dev->dev_addr[4]; - - printk("%pM\n", dev->dev_addr); - - MEM->init.mode = 0x0000; - MEM->init.filter[0] = 0x00000000; - MEM->init.filter[1] = 0x00000000; - MEM->init.rdra = dvma_vtob(MEM->rx_head); - MEM->init.rlen = (RX_LOG_RING_SIZE << 13) | - (dvma_vtob(MEM->rx_head) >> 16); - MEM->init.tdra = dvma_vtob(MEM->tx_head); - MEM->init.tlen = (TX_LOG_RING_SIZE << 13) | - (dvma_vtob(MEM->tx_head) >> 16); - - DPRINTK(2, ("initaddr: %08lx rx_ring: %08lx tx_ring: %08lx\n", - dvma_vtob(&(MEM->init)), dvma_vtob(MEM->rx_head), - (dvma_vtob(MEM->tx_head)))); - - if (did_version++ == 0) - printk( version ); - - dev->netdev_ops = &lance_netdev_ops; -// KLUDGE -- REMOVE ME - set_bit(__LINK_STATE_PRESENT, &dev->state); - - - return 1; -} - -static int lance_open( struct net_device *dev ) -{ - struct lance_private *lp = netdev_priv(dev); - int i; - - DPRINTK( 2, ( "%s: lance_open()\n", dev->name )); - - REGA(CSR0) = CSR0_STOP; - - lance_init_ring(dev); - - /* From now on, AREG is kept to point to CSR0 */ - REGA(CSR0) = CSR0_INIT; - - i = 1000000; - while (--i > 0) - if (DREG & CSR0_IDON) - break; - if (i <= 0 || (DREG & CSR0_ERR)) { - DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n", - dev->name, i, DREG )); - DREG = CSR0_STOP; - return -EIO; - } - - DREG = CSR0_IDON | CSR0_STRT | CSR0_INEA; - - netif_start_queue(dev); - - DPRINTK( 2, ( "%s: LANCE is open, csr0 %04x\n", dev->name, DREG )); - - return 0; -} - - -/* Initialize the LANCE Rx and Tx rings. */ - -static void lance_init_ring( struct net_device *dev ) -{ - struct lance_private *lp = netdev_priv(dev); - int i; - - lp->lock = 0; - lp->tx_full = 0; - lp->new_rx = lp->new_tx = 0; - lp->old_rx = lp->old_tx = 0; - - for( i = 0; i < TX_RING_SIZE; i++ ) { - MEM->tx_head[i].base = dvma_vtob(MEM->tx_data[i]); - MEM->tx_head[i].flag = 0; - MEM->tx_head[i].base_hi = - (dvma_vtob(MEM->tx_data[i])) >>16; - MEM->tx_head[i].length = 0; - MEM->tx_head[i].misc = 0; - } - - for( i = 0; i < RX_RING_SIZE; i++ ) { - MEM->rx_head[i].base = dvma_vtob(MEM->rx_data[i]); - MEM->rx_head[i].flag = RMD1_OWN_CHIP; - MEM->rx_head[i].base_hi = - (dvma_vtob(MEM->rx_data[i])) >> 16; - MEM->rx_head[i].buf_length = -PKT_BUF_SZ | 0xf000; - MEM->rx_head[i].msg_length = 0; - } - - /* tell the card it's ether address, bytes swapped */ - MEM->init.hwaddr[0] = dev->dev_addr[1]; - MEM->init.hwaddr[1] = dev->dev_addr[0]; - MEM->init.hwaddr[2] = dev->dev_addr[3]; - MEM->init.hwaddr[3] = dev->dev_addr[2]; - MEM->init.hwaddr[4] = dev->dev_addr[5]; - MEM->init.hwaddr[5] = dev->dev_addr[4]; - - MEM->init.mode = 0x0000; - MEM->init.filter[0] = 0x00000000; - MEM->init.filter[1] = 0x00000000; - MEM->init.rdra = dvma_vtob(MEM->rx_head); - MEM->init.rlen = (RX_LOG_RING_SIZE << 13) | - (dvma_vtob(MEM->rx_head) >> 16); - MEM->init.tdra = dvma_vtob(MEM->tx_head); - MEM->init.tlen = (TX_LOG_RING_SIZE << 13) | - (dvma_vtob(MEM->tx_head) >> 16); - - - /* tell the lance the address of its init block */ - REGA(CSR1) = dvma_vtob(&(MEM->init)); - REGA(CSR2) = dvma_vtob(&(MEM->init)) >> 16; - -#ifdef CONFIG_SUN3X - REGA(CSR3) = CSR3_BSWP | CSR3_ACON | CSR3_BCON; -#else - REGA(CSR3) = CSR3_BSWP; -#endif - -} - - -static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) -{ - struct lance_private *lp = netdev_priv(dev); - int entry, len; - struct lance_tx_head *head; - unsigned long flags; - - DPRINTK( 1, ( "%s: transmit start.\n", - dev->name)); - - /* Transmitter timeout, serious problems. */ - if (netif_queue_stopped(dev)) { - int tickssofar = jiffies - dev_trans_start(dev); - if (tickssofar < HZ/5) - return NETDEV_TX_BUSY; - - DPRINTK( 1, ( "%s: transmit timed out, status %04x, resetting.\n", - dev->name, DREG )); - DREG = CSR0_STOP; - /* - * Always set BSWP after a STOP as STOP puts it back into - * little endian mode. - */ - REGA(CSR3) = CSR3_BSWP; - dev->stats.tx_errors++; - - if(lance_debug >= 2) { - int i; - printk("Ring data: old_tx %d new_tx %d%s new_rx %d\n", - lp->old_tx, lp->new_tx, - lp->tx_full ? " (full)" : "", - lp->new_rx ); - for( i = 0 ; i < RX_RING_SIZE; i++ ) - printk( "rx #%d: base=%04x blen=%04x mlen=%04x\n", - i, MEM->rx_head[i].base, - -MEM->rx_head[i].buf_length, - MEM->rx_head[i].msg_length); - for( i = 0 ; i < TX_RING_SIZE; i++ ) - printk("tx #%d: base=%04x len=%04x misc=%04x\n", - i, MEM->tx_head[i].base, - -MEM->tx_head[i].length, - MEM->tx_head[i].misc ); - } - - lance_init_ring(dev); - REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT; - - netif_start_queue(dev); - - return NETDEV_TX_OK; - } - - - /* Block a timer-based transmit from overlapping. This could better be - done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */ - - /* Block a timer-based transmit from overlapping with us by - stopping the queue for a bit... */ - - netif_stop_queue(dev); - - if (test_and_set_bit( 0, (void*)&lp->lock ) != 0) { - printk( "%s: tx queue lock!.\n", dev->name); - /* don't clear dev->tbusy flag. */ - return NETDEV_TX_BUSY; - } - - AREG = CSR0; - DPRINTK( 2, ( "%s: lance_start_xmit() called, csr0 %4.4x.\n", - dev->name, DREG )); - -#ifdef CONFIG_SUN3X - /* this weirdness doesn't appear on sun3... */ - if(!(DREG & CSR0_INIT)) { - DPRINTK( 1, ("INIT not set, reinitializing...\n")); - REGA( CSR0 ) = CSR0_STOP; - lance_init_ring(dev); - REGA( CSR0 ) = CSR0_INIT | CSR0_STRT; - } -#endif - - /* Fill in a Tx ring entry */ -#if 0 - if (lance_debug >= 2) { - printk( "%s: TX pkt %d type 0x%04x" - " from %s to %s" - " data at 0x%08x len %d\n", - dev->name, lp->new_tx, ((u_short *)skb->data)[6], - DEV_ADDR(&skb->data[6]), DEV_ADDR(skb->data), - (int)skb->data, (int)skb->len ); - } -#endif - /* We're not prepared for the int until the last flags are set/reset. - * And the int may happen already after setting the OWN_CHIP... */ - local_irq_save(flags); - - /* Mask to ring buffer boundary. */ - entry = lp->new_tx; - head = &(MEM->tx_head[entry]); - - /* Caution: the write order is important here, set the "ownership" bits - * last. - */ - - /* the sun3's lance needs it's buffer padded to the minimum - size */ - len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; - -// head->length = -len; - head->length = (-len) | 0xf000; - head->misc = 0; - - skb_copy_from_linear_data(skb, PKTBUF_ADDR(head), skb->len); - if (len != skb->len) - memset(PKTBUF_ADDR(head) + skb->len, 0, len-skb->len); - - head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP; - lp->new_tx = (lp->new_tx + 1) & TX_RING_MOD_MASK; - dev->stats.tx_bytes += skb->len; - - /* Trigger an immediate send poll. */ - REGA(CSR0) = CSR0_INEA | CSR0_TDMD | CSR0_STRT; - AREG = CSR0; - DPRINTK( 2, ( "%s: lance_start_xmit() exiting, csr0 %4.4x.\n", - dev->name, DREG )); - dev_kfree_skb(skb); - - lp->lock = 0; - if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) == - TMD1_OWN_HOST) - netif_start_queue(dev); - - local_irq_restore(flags); - - return NETDEV_TX_OK; -} - -/* The LANCE interrupt handler. */ - -static irqreturn_t lance_interrupt( int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct lance_private *lp = netdev_priv(dev); - int csr0; - static int in_interrupt; - - if (dev == NULL) { - DPRINTK( 1, ( "lance_interrupt(): invalid dev_id\n" )); - return IRQ_NONE; - } - - if (in_interrupt) - DPRINTK( 2, ( "%s: Re-entering the interrupt handler.\n", dev->name )); - in_interrupt = 1; - - still_more: - flush_cache_all(); - - AREG = CSR0; - csr0 = DREG; - - /* ack interrupts */ - DREG = csr0 & (CSR0_TINT | CSR0_RINT | CSR0_IDON); - - /* clear errors */ - if(csr0 & CSR0_ERR) - DREG = CSR0_BABL | CSR0_MERR | CSR0_CERR | CSR0_MISS; - - - DPRINTK( 2, ( "%s: interrupt csr0=%04x new csr=%04x.\n", - dev->name, csr0, DREG )); - - if (csr0 & CSR0_TINT) { /* Tx-done interrupt */ - int old_tx = lp->old_tx; - -// if(lance_debug >= 3) { -// int i; -// -// printk("%s: tx int\n", dev->name); -// -// for(i = 0; i < TX_RING_SIZE; i++) -// printk("ring %d flag=%04x\n", i, -// MEM->tx_head[i].flag); -// } - - while( old_tx != lp->new_tx) { - struct lance_tx_head *head = &(MEM->tx_head[old_tx]); - - DPRINTK(3, ("on tx_ring %d\n", old_tx)); - - if (head->flag & TMD1_OWN_CHIP) - break; /* It still hasn't been Txed */ - - if (head->flag & TMD1_ERR) { - int status = head->misc; - dev->stats.tx_errors++; - if (status & TMD3_RTRY) dev->stats.tx_aborted_errors++; - if (status & TMD3_LCAR) dev->stats.tx_carrier_errors++; - if (status & TMD3_LCOL) dev->stats.tx_window_errors++; - if (status & (TMD3_UFLO | TMD3_BUFF)) { - dev->stats.tx_fifo_errors++; - printk("%s: Tx FIFO error\n", - dev->name); - REGA(CSR0) = CSR0_STOP; - REGA(CSR3) = CSR3_BSWP; - lance_init_ring(dev); - REGA(CSR0) = CSR0_STRT | CSR0_INEA; - return IRQ_HANDLED; - } - } else if(head->flag & (TMD1_ENP | TMD1_STP)) { - - head->flag &= ~(TMD1_ENP | TMD1_STP); - if(head->flag & (TMD1_ONE | TMD1_MORE)) - dev->stats.collisions++; - - dev->stats.tx_packets++; - DPRINTK(3, ("cleared tx ring %d\n", old_tx)); - } - old_tx = (old_tx +1) & TX_RING_MOD_MASK; - } - - lp->old_tx = old_tx; - } - - - if (netif_queue_stopped(dev)) { - /* The ring is no longer full, clear tbusy. */ - netif_start_queue(dev); - netif_wake_queue(dev); - } - - if (csr0 & CSR0_RINT) /* Rx interrupt */ - lance_rx( dev ); - - /* Log misc errors. */ - if (csr0 & CSR0_BABL) dev->stats.tx_errors++; /* Tx babble. */ - if (csr0 & CSR0_MISS) dev->stats.rx_errors++; /* Missed a Rx frame. */ - if (csr0 & CSR0_MERR) { - DPRINTK( 1, ( "%s: Bus master arbitration failure (?!?), " - "status %04x.\n", dev->name, csr0 )); - /* Restart the chip. */ - REGA(CSR0) = CSR0_STOP; - REGA(CSR3) = CSR3_BSWP; - lance_init_ring(dev); - REGA(CSR0) = CSR0_STRT | CSR0_INEA; - } - - - /* Clear any other interrupt, and set interrupt enable. */ -// DREG = CSR0_BABL | CSR0_CERR | CSR0_MISS | CSR0_MERR | -// CSR0_IDON | CSR0_INEA; - - REGA(CSR0) = CSR0_INEA; - - if(DREG & (CSR0_RINT | CSR0_TINT)) { - DPRINTK(2, ("restarting interrupt, csr0=%#04x\n", DREG)); - goto still_more; - } - - DPRINTK( 2, ( "%s: exiting interrupt, csr0=%#04x.\n", - dev->name, DREG )); - in_interrupt = 0; - return IRQ_HANDLED; -} - -/* get packet, toss into skbuff */ -static int lance_rx( struct net_device *dev ) -{ - struct lance_private *lp = netdev_priv(dev); - int entry = lp->new_rx; - - /* If we own the next entry, it's a new packet. Send it up. */ - while( (MEM->rx_head[entry].flag & RMD1_OWN) == RMD1_OWN_HOST ) { - struct lance_rx_head *head = &(MEM->rx_head[entry]); - int status = head->flag; - - if (status != (RMD1_ENP|RMD1_STP)) { /* There was an error. */ - /* There is a tricky error noted by John Murphy, - to Russ Nelson: Even with - full-sized buffers it's possible for a jabber packet to use two - buffers, with only the last correctly noting the error. */ - if (status & RMD1_ENP) /* Only count a general error at the */ - dev->stats.rx_errors++; /* end of a packet.*/ - if (status & RMD1_FRAM) dev->stats.rx_frame_errors++; - if (status & RMD1_OFLO) dev->stats.rx_over_errors++; - if (status & RMD1_CRC) dev->stats.rx_crc_errors++; - if (status & RMD1_BUFF) dev->stats.rx_fifo_errors++; - head->flag &= (RMD1_ENP|RMD1_STP); - } else { - /* Malloc up new buffer, compatible with net-3. */ -// short pkt_len = head->msg_length;// & 0xfff; - short pkt_len = (head->msg_length & 0xfff) - 4; - struct sk_buff *skb; - - if (pkt_len < 60) { - printk( "%s: Runt packet!\n", dev->name ); - dev->stats.rx_errors++; - } - else { - skb = dev_alloc_skb( pkt_len+2 ); - if (skb == NULL) { - DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n", - dev->name )); - - dev->stats.rx_dropped++; - head->msg_length = 0; - head->flag |= RMD1_OWN_CHIP; - lp->new_rx = (lp->new_rx+1) & - RX_RING_MOD_MASK; - } - -#if 0 - if (lance_debug >= 3) { - u_char *data = PKTBUF_ADDR(head); - printk("%s: RX pkt %d type 0x%04x" - " from %pM to %pM", - dev->name, lp->new_tx, ((u_short *)data)[6], - &data[6], data); - - printk(" data %02x %02x %02x %02x %02x %02x %02x %02x " - "len %d at %08x\n", - data[15], data[16], data[17], data[18], - data[19], data[20], data[21], data[22], - pkt_len, data); - } -#endif - if (lance_debug >= 3) { - u_char *data = PKTBUF_ADDR(head); - printk( "%s: RX pkt %d type 0x%04x len %d\n ", dev->name, entry, ((u_short *)data)[6], pkt_len); - } - - - skb_reserve( skb, 2 ); /* 16 byte align */ - skb_put( skb, pkt_len ); /* Make room */ - skb_copy_to_linear_data(skb, - PKTBUF_ADDR(head), - pkt_len); - - skb->protocol = eth_type_trans( skb, dev ); - netif_rx( skb ); - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; - } - } - -// head->buf_length = -PKT_BUF_SZ | 0xf000; - head->msg_length = 0; - head->flag = RMD1_OWN_CHIP; - - entry = lp->new_rx = (lp->new_rx +1) & RX_RING_MOD_MASK; - } - - /* From lance.c (Donald Becker): */ - /* We should check that at least two ring entries are free. - If not, we should free one and mark stats->rx_dropped++. */ - - return 0; -} - - -static int lance_close( struct net_device *dev ) -{ - struct lance_private *lp = netdev_priv(dev); - - netif_stop_queue(dev); - - AREG = CSR0; - - DPRINTK( 2, ( "%s: Shutting down ethercard, status was %2.2x.\n", - dev->name, DREG )); - - /* We stop the LANCE here -- it occasionally polls - memory if we don't. */ - DREG = CSR0_STOP; - return 0; -} - - -/* Set or clear the multicast filter for this adaptor. - num_addrs == -1 Promiscuous mode, receive all packets - num_addrs == 0 Normal mode, clear multicast list - num_addrs > 0 Multicast mode, receive normal and MC packets, and do - best-effort filtering. - */ - -/* completely untested on a sun3 */ -static void set_multicast_list( struct net_device *dev ) -{ - struct lance_private *lp = netdev_priv(dev); - - if(netif_queue_stopped(dev)) - /* Only possible if board is already started */ - return; - - /* We take the simple way out and always enable promiscuous mode. */ - DREG = CSR0_STOP; /* Temporarily stop the lance. */ - - if (dev->flags & IFF_PROMISC) { - /* Log any net taps. */ - DPRINTK( 3, ( "%s: Promiscuous mode enabled.\n", dev->name )); - REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */ - } else { - short multicast_table[4]; - int num_addrs = netdev_mc_count(dev); - int i; - /* We don't use the multicast table, but rely on upper-layer - * filtering. */ - memset( multicast_table, (num_addrs == 0) ? 0 : -1, - sizeof(multicast_table) ); - for( i = 0; i < 4; i++ ) - REGA( CSR8+i ) = multicast_table[i]; - REGA( CSR15 ) = 0; /* Unset promiscuous mode */ - } - - /* - * Always set BSWP after a STOP as STOP puts it back into - * little endian mode. - */ - REGA( CSR3 ) = CSR3_BSWP; - - /* Resume normal operation and reset AREG to CSR0 */ - REGA( CSR0 ) = CSR0_IDON | CSR0_INEA | CSR0_STRT; -} - - -#ifdef MODULE - -static struct net_device *sun3lance_dev; - -int __init init_module(void) -{ - sun3lance_dev = sun3lance_probe(-1); - if (IS_ERR(sun3lance_dev)) - return PTR_ERR(sun3lance_dev); - return 0; -} - -void __exit cleanup_module(void) -{ - unregister_netdev(sun3lance_dev); -#ifdef CONFIG_SUN3 - iounmap((void __iomem *)sun3lance_dev->base_addr); -#endif - free_netdev(sun3lance_dev); -} - -#endif /* MODULE */ - diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c deleted file mode 100644 index 06f2d43..0000000 --- a/drivers/net/sunlance.c +++ /dev/null @@ -1,1556 +0,0 @@ -/* $Id: sunlance.c,v 1.112 2002/01/15 06:48:55 davem Exp $ - * lance.c: Linux/Sparc/Lance driver - * - * Written 1995, 1996 by Miguel de Icaza - * Sources: - * The Linux depca driver - * The Linux lance driver. - * The Linux skeleton driver. - * The NetBSD Sparc/Lance driver. - * Theo de Raadt (deraadt@openbsd.org) - * NCR92C990 Lan Controller manual - * - * 1.4: - * Added support to run with a ledma on the Sun4m - * - * 1.5: - * Added multiple card detection. - * - * 4/17/96: Burst sizes and tpe selection on sun4m by Eddie C. Dost - * (ecd@skynet.be) - * - * 5/15/96: auto carrier detection on sun4m by Eddie C. Dost - * (ecd@skynet.be) - * - * 5/17/96: lebuffer on scsi/ether cards now work David S. Miller - * (davem@caip.rutgers.edu) - * - * 5/29/96: override option 'tpe-link-test?', if it is 'false', as - * this disables auto carrier detection on sun4m. Eddie C. Dost - * (ecd@skynet.be) - * - * 1.7: - * 6/26/96: Bug fix for multiple ledmas, miguel. - * - * 1.8: - * Stole multicast code from depca.c, fixed lance_tx. - * - * 1.9: - * 8/21/96: Fixed the multicast code (Pedro Roque) - * - * 8/28/96: Send fake packet in lance_open() if auto_select is true, - * so we can detect the carrier loss condition in time. - * Eddie C. Dost (ecd@skynet.be) - * - * 9/15/96: Align rx_buf so that eth_copy_and_sum() won't cause an - * MNA trap during chksum_partial_copy(). (ecd@skynet.be) - * - * 11/17/96: Handle LE_C0_MERR in lance_interrupt(). (ecd@skynet.be) - * - * 12/22/96: Don't loop forever in lance_rx() on incomplete packets. - * This was the sun4c killer. Shit, stupid bug. - * (ecd@skynet.be) - * - * 1.10: - * 1/26/97: Modularize driver. (ecd@skynet.be) - * - * 1.11: - * 12/27/97: Added sun4d support. (jj@sunsite.mff.cuni.cz) - * - * 1.12: - * 11/3/99: Fixed SMP race in lance_start_xmit found by davem. - * Anton Blanchard (anton@progsoc.uts.edu.au) - * 2.00: 11/9/99: Massive overhaul and port to new SBUS driver interfaces. - * David S. Miller (davem@redhat.com) - * 2.01: - * 11/08/01: Use library crc32 functions (Matt_Domsch@dell.com) - * - */ - -#undef DEBUG_DRIVER - -static char lancestr[] = "LANCE"; - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include /* Used for the temporal inet entries and routing */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include /* Used by the checksum routines */ -#include -#include -#include /* For tpe-link-test? setting */ -#include - -#define DRV_NAME "sunlance" -#define DRV_VERSION "2.02" -#define DRV_RELDATE "8/24/03" -#define DRV_AUTHOR "Miguel de Icaza (miguel@nuclecu.unam.mx)" - -static char version[] = - DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; - -MODULE_VERSION(DRV_VERSION); -MODULE_AUTHOR(DRV_AUTHOR); -MODULE_DESCRIPTION("Sun Lance ethernet driver"); -MODULE_LICENSE("GPL"); - -/* Define: 2^4 Tx buffers and 2^4 Rx buffers */ -#ifndef LANCE_LOG_TX_BUFFERS -#define LANCE_LOG_TX_BUFFERS 4 -#define LANCE_LOG_RX_BUFFERS 4 -#endif - -#define LE_CSR0 0 -#define LE_CSR1 1 -#define LE_CSR2 2 -#define LE_CSR3 3 - -#define LE_MO_PROM 0x8000 /* Enable promiscuous mode */ - -#define LE_C0_ERR 0x8000 /* Error: set if BAB, SQE, MISS or ME is set */ -#define LE_C0_BABL 0x4000 /* BAB: Babble: tx timeout. */ -#define LE_C0_CERR 0x2000 /* SQE: Signal quality error */ -#define LE_C0_MISS 0x1000 /* MISS: Missed a packet */ -#define LE_C0_MERR 0x0800 /* ME: Memory error */ -#define LE_C0_RINT 0x0400 /* Received interrupt */ -#define LE_C0_TINT 0x0200 /* Transmitter Interrupt */ -#define LE_C0_IDON 0x0100 /* IFIN: Init finished. */ -#define LE_C0_INTR 0x0080 /* Interrupt or error */ -#define LE_C0_INEA 0x0040 /* Interrupt enable */ -#define LE_C0_RXON 0x0020 /* Receiver on */ -#define LE_C0_TXON 0x0010 /* Transmitter on */ -#define LE_C0_TDMD 0x0008 /* Transmitter demand */ -#define LE_C0_STOP 0x0004 /* Stop the card */ -#define LE_C0_STRT 0x0002 /* Start the card */ -#define LE_C0_INIT 0x0001 /* Init the card */ - -#define LE_C3_BSWP 0x4 /* SWAP */ -#define LE_C3_ACON 0x2 /* ALE Control */ -#define LE_C3_BCON 0x1 /* Byte control */ - -/* Receive message descriptor 1 */ -#define LE_R1_OWN 0x80 /* Who owns the entry */ -#define LE_R1_ERR 0x40 /* Error: if FRA, OFL, CRC or BUF is set */ -#define LE_R1_FRA 0x20 /* FRA: Frame error */ -#define LE_R1_OFL 0x10 /* OFL: Frame overflow */ -#define LE_R1_CRC 0x08 /* CRC error */ -#define LE_R1_BUF 0x04 /* BUF: Buffer error */ -#define LE_R1_SOP 0x02 /* Start of packet */ -#define LE_R1_EOP 0x01 /* End of packet */ -#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */ - -#define LE_T1_OWN 0x80 /* Lance owns the packet */ -#define LE_T1_ERR 0x40 /* Error summary */ -#define LE_T1_EMORE 0x10 /* Error: more than one retry needed */ -#define LE_T1_EONE 0x08 /* Error: one retry needed */ -#define LE_T1_EDEF 0x04 /* Error: deferred */ -#define LE_T1_SOP 0x02 /* Start of packet */ -#define LE_T1_EOP 0x01 /* End of packet */ -#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */ - -#define LE_T3_BUF 0x8000 /* Buffer error */ -#define LE_T3_UFL 0x4000 /* Error underflow */ -#define LE_T3_LCOL 0x1000 /* Error late collision */ -#define LE_T3_CLOS 0x0800 /* Error carrier loss */ -#define LE_T3_RTY 0x0400 /* Error retry */ -#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry counter */ - -#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS)) -#define TX_RING_MOD_MASK (TX_RING_SIZE - 1) -#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29) -#define TX_NEXT(__x) (((__x)+1) & TX_RING_MOD_MASK) - -#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS)) -#define RX_RING_MOD_MASK (RX_RING_SIZE - 1) -#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29) -#define RX_NEXT(__x) (((__x)+1) & RX_RING_MOD_MASK) - -#define PKT_BUF_SZ 1544 -#define RX_BUFF_SIZE PKT_BUF_SZ -#define TX_BUFF_SIZE PKT_BUF_SZ - -struct lance_rx_desc { - u16 rmd0; /* low address of packet */ - u8 rmd1_bits; /* descriptor bits */ - u8 rmd1_hadr; /* high address of packet */ - s16 length; /* This length is 2s complement (negative)! - * Buffer length - */ - u16 mblength; /* This is the actual number of bytes received */ -}; - -struct lance_tx_desc { - u16 tmd0; /* low address of packet */ - u8 tmd1_bits; /* descriptor bits */ - u8 tmd1_hadr; /* high address of packet */ - s16 length; /* Length is 2s complement (negative)! */ - u16 misc; -}; - -/* The LANCE initialization block, described in databook. */ -/* On the Sparc, this block should be on a DMA region */ -struct lance_init_block { - u16 mode; /* Pre-set mode (reg. 15) */ - u8 phys_addr[6]; /* Physical ethernet address */ - u32 filter[2]; /* Multicast filter. */ - - /* Receive and transmit ring base, along with extra bits. */ - u16 rx_ptr; /* receive descriptor addr */ - u16 rx_len; /* receive len and high addr */ - u16 tx_ptr; /* transmit descriptor addr */ - u16 tx_len; /* transmit len and high addr */ - - /* The Tx and Rx ring entries must aligned on 8-byte boundaries. */ - struct lance_rx_desc brx_ring[RX_RING_SIZE]; - struct lance_tx_desc btx_ring[TX_RING_SIZE]; - - u8 tx_buf [TX_RING_SIZE][TX_BUFF_SIZE]; - u8 pad[2]; /* align rx_buf for copy_and_sum(). */ - u8 rx_buf [RX_RING_SIZE][RX_BUFF_SIZE]; -}; - -#define libdesc_offset(rt, elem) \ -((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem]))))) - -#define libbuff_offset(rt, elem) \ -((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem][0]))))) - -struct lance_private { - void __iomem *lregs; /* Lance RAP/RDP regs. */ - void __iomem *dregs; /* DMA controller regs. */ - struct lance_init_block __iomem *init_block_iomem; - struct lance_init_block *init_block_mem; - - spinlock_t lock; - - int rx_new, tx_new; - int rx_old, tx_old; - - struct platform_device *ledma; /* If set this points to ledma */ - char tpe; /* cable-selection is TPE */ - char auto_select; /* cable-selection by carrier */ - char burst_sizes; /* ledma SBus burst sizes */ - char pio_buffer; /* init block in PIO space? */ - - unsigned short busmaster_regval; - - void (*init_ring)(struct net_device *); - void (*rx)(struct net_device *); - void (*tx)(struct net_device *); - - char *name; - dma_addr_t init_block_dvma; - struct net_device *dev; /* Backpointer */ - struct platform_device *op; - struct platform_device *lebuffer; - struct timer_list multicast_timer; -}; - -#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\ - lp->tx_old+TX_RING_MOD_MASK-lp->tx_new:\ - lp->tx_old - lp->tx_new-1) - -/* Lance registers. */ -#define RDP 0x00UL /* register data port */ -#define RAP 0x02UL /* register address port */ -#define LANCE_REG_SIZE 0x04UL - -#define STOP_LANCE(__lp) \ -do { void __iomem *__base = (__lp)->lregs; \ - sbus_writew(LE_CSR0, __base + RAP); \ - sbus_writew(LE_C0_STOP, __base + RDP); \ -} while (0) - -int sparc_lance_debug = 2; - -/* The Lance uses 24 bit addresses */ -/* On the Sun4c the DVMA will provide the remaining bytes for us */ -/* On the Sun4m we have to instruct the ledma to provide them */ -/* Even worse, on scsi/ether SBUS cards, the init block and the - * transmit/receive buffers are addresses as offsets from absolute - * zero on the lebuffer PIO area. -DaveM - */ - -#define LANCE_ADDR(x) ((long)(x) & ~0xff000000) - -/* Load the CSR registers */ -static void load_csrs(struct lance_private *lp) -{ - u32 leptr; - - if (lp->pio_buffer) - leptr = 0; - else - leptr = LANCE_ADDR(lp->init_block_dvma); - - sbus_writew(LE_CSR1, lp->lregs + RAP); - sbus_writew(leptr & 0xffff, lp->lregs + RDP); - sbus_writew(LE_CSR2, lp->lregs + RAP); - sbus_writew(leptr >> 16, lp->lregs + RDP); - sbus_writew(LE_CSR3, lp->lregs + RAP); - sbus_writew(lp->busmaster_regval, lp->lregs + RDP); - - /* Point back to csr0 */ - sbus_writew(LE_CSR0, lp->lregs + RAP); -} - -/* Setup the Lance Rx and Tx rings */ -static void lance_init_ring_dvma(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - struct lance_init_block *ib = lp->init_block_mem; - dma_addr_t aib = lp->init_block_dvma; - __u32 leptr; - int i; - - /* Lock out other processes while setting up hardware */ - netif_stop_queue(dev); - lp->rx_new = lp->tx_new = 0; - lp->rx_old = lp->tx_old = 0; - - /* Copy the ethernet address to the lance init block - * Note that on the sparc you need to swap the ethernet address. - */ - ib->phys_addr [0] = dev->dev_addr [1]; - ib->phys_addr [1] = dev->dev_addr [0]; - ib->phys_addr [2] = dev->dev_addr [3]; - ib->phys_addr [3] = dev->dev_addr [2]; - ib->phys_addr [4] = dev->dev_addr [5]; - ib->phys_addr [5] = dev->dev_addr [4]; - - /* Setup the Tx ring entries */ - for (i = 0; i < TX_RING_SIZE; i++) { - leptr = LANCE_ADDR(aib + libbuff_offset(tx_buf, i)); - ib->btx_ring [i].tmd0 = leptr; - ib->btx_ring [i].tmd1_hadr = leptr >> 16; - ib->btx_ring [i].tmd1_bits = 0; - ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */ - ib->btx_ring [i].misc = 0; - } - - /* Setup the Rx ring entries */ - for (i = 0; i < RX_RING_SIZE; i++) { - leptr = LANCE_ADDR(aib + libbuff_offset(rx_buf, i)); - - ib->brx_ring [i].rmd0 = leptr; - ib->brx_ring [i].rmd1_hadr = leptr >> 16; - ib->brx_ring [i].rmd1_bits = LE_R1_OWN; - ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000; - ib->brx_ring [i].mblength = 0; - } - - /* Setup the initialization block */ - - /* Setup rx descriptor pointer */ - leptr = LANCE_ADDR(aib + libdesc_offset(brx_ring, 0)); - ib->rx_len = (LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16); - ib->rx_ptr = leptr; - - /* Setup tx descriptor pointer */ - leptr = LANCE_ADDR(aib + libdesc_offset(btx_ring, 0)); - ib->tx_len = (LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16); - ib->tx_ptr = leptr; -} - -static void lance_init_ring_pio(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - struct lance_init_block __iomem *ib = lp->init_block_iomem; - u32 leptr; - int i; - - /* Lock out other processes while setting up hardware */ - netif_stop_queue(dev); - lp->rx_new = lp->tx_new = 0; - lp->rx_old = lp->tx_old = 0; - - /* Copy the ethernet address to the lance init block - * Note that on the sparc you need to swap the ethernet address. - */ - sbus_writeb(dev->dev_addr[1], &ib->phys_addr[0]); - sbus_writeb(dev->dev_addr[0], &ib->phys_addr[1]); - sbus_writeb(dev->dev_addr[3], &ib->phys_addr[2]); - sbus_writeb(dev->dev_addr[2], &ib->phys_addr[3]); - sbus_writeb(dev->dev_addr[5], &ib->phys_addr[4]); - sbus_writeb(dev->dev_addr[4], &ib->phys_addr[5]); - - /* Setup the Tx ring entries */ - for (i = 0; i < TX_RING_SIZE; i++) { - leptr = libbuff_offset(tx_buf, i); - sbus_writew(leptr, &ib->btx_ring [i].tmd0); - sbus_writeb(leptr >> 16,&ib->btx_ring [i].tmd1_hadr); - sbus_writeb(0, &ib->btx_ring [i].tmd1_bits); - - /* The ones required by tmd2 */ - sbus_writew(0xf000, &ib->btx_ring [i].length); - sbus_writew(0, &ib->btx_ring [i].misc); - } - - /* Setup the Rx ring entries */ - for (i = 0; i < RX_RING_SIZE; i++) { - leptr = libbuff_offset(rx_buf, i); - - sbus_writew(leptr, &ib->brx_ring [i].rmd0); - sbus_writeb(leptr >> 16,&ib->brx_ring [i].rmd1_hadr); - sbus_writeb(LE_R1_OWN, &ib->brx_ring [i].rmd1_bits); - sbus_writew(-RX_BUFF_SIZE|0xf000, - &ib->brx_ring [i].length); - sbus_writew(0, &ib->brx_ring [i].mblength); - } - - /* Setup the initialization block */ - - /* Setup rx descriptor pointer */ - leptr = libdesc_offset(brx_ring, 0); - sbus_writew((LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16), - &ib->rx_len); - sbus_writew(leptr, &ib->rx_ptr); - - /* Setup tx descriptor pointer */ - leptr = libdesc_offset(btx_ring, 0); - sbus_writew((LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16), - &ib->tx_len); - sbus_writew(leptr, &ib->tx_ptr); -} - -static void init_restart_ledma(struct lance_private *lp) -{ - u32 csr = sbus_readl(lp->dregs + DMA_CSR); - - if (!(csr & DMA_HNDL_ERROR)) { - /* E-Cache draining */ - while (sbus_readl(lp->dregs + DMA_CSR) & DMA_FIFO_ISDRAIN) - barrier(); - } - - csr = sbus_readl(lp->dregs + DMA_CSR); - csr &= ~DMA_E_BURSTS; - if (lp->burst_sizes & DMA_BURST32) - csr |= DMA_E_BURST32; - else - csr |= DMA_E_BURST16; - - csr |= (DMA_DSBL_RD_DRN | DMA_DSBL_WR_INV | DMA_FIFO_INV); - - if (lp->tpe) - csr |= DMA_EN_ENETAUI; - else - csr &= ~DMA_EN_ENETAUI; - udelay(20); - sbus_writel(csr, lp->dregs + DMA_CSR); - udelay(200); -} - -static int init_restart_lance(struct lance_private *lp) -{ - u16 regval = 0; - int i; - - if (lp->dregs) - init_restart_ledma(lp); - - sbus_writew(LE_CSR0, lp->lregs + RAP); - sbus_writew(LE_C0_INIT, lp->lregs + RDP); - - /* Wait for the lance to complete initialization */ - for (i = 0; i < 100; i++) { - regval = sbus_readw(lp->lregs + RDP); - - if (regval & (LE_C0_ERR | LE_C0_IDON)) - break; - barrier(); - } - if (i == 100 || (regval & LE_C0_ERR)) { - printk(KERN_ERR "LANCE unopened after %d ticks, csr0=%4.4x.\n", - i, regval); - if (lp->dregs) - printk("dcsr=%8.8x\n", sbus_readl(lp->dregs + DMA_CSR)); - return -1; - } - - /* Clear IDON by writing a "1", enable interrupts and start lance */ - sbus_writew(LE_C0_IDON, lp->lregs + RDP); - sbus_writew(LE_C0_INEA | LE_C0_STRT, lp->lregs + RDP); - - if (lp->dregs) { - u32 csr = sbus_readl(lp->dregs + DMA_CSR); - - csr |= DMA_INT_ENAB; - sbus_writel(csr, lp->dregs + DMA_CSR); - } - - return 0; -} - -static void lance_rx_dvma(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - struct lance_init_block *ib = lp->init_block_mem; - struct lance_rx_desc *rd; - u8 bits; - int len, entry = lp->rx_new; - struct sk_buff *skb; - - for (rd = &ib->brx_ring [entry]; - !((bits = rd->rmd1_bits) & LE_R1_OWN); - rd = &ib->brx_ring [entry]) { - - /* We got an incomplete frame? */ - if ((bits & LE_R1_POK) != LE_R1_POK) { - dev->stats.rx_over_errors++; - dev->stats.rx_errors++; - } else if (bits & LE_R1_ERR) { - /* Count only the end frame as a rx error, - * not the beginning - */ - if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++; - if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++; - if (bits & LE_R1_OFL) dev->stats.rx_over_errors++; - if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++; - if (bits & LE_R1_EOP) dev->stats.rx_errors++; - } else { - len = (rd->mblength & 0xfff) - 4; - skb = dev_alloc_skb(len + 2); - - if (skb == NULL) { - printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", - dev->name); - dev->stats.rx_dropped++; - rd->mblength = 0; - rd->rmd1_bits = LE_R1_OWN; - lp->rx_new = RX_NEXT(entry); - return; - } - - dev->stats.rx_bytes += len; - - skb_reserve(skb, 2); /* 16 byte align */ - skb_put(skb, len); /* make room */ - skb_copy_to_linear_data(skb, - (unsigned char *)&(ib->rx_buf [entry][0]), - len); - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); - dev->stats.rx_packets++; - } - - /* Return the packet to the pool */ - rd->mblength = 0; - rd->rmd1_bits = LE_R1_OWN; - entry = RX_NEXT(entry); - } - - lp->rx_new = entry; -} - -static void lance_tx_dvma(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - struct lance_init_block *ib = lp->init_block_mem; - int i, j; - - spin_lock(&lp->lock); - - j = lp->tx_old; - for (i = j; i != lp->tx_new; i = j) { - struct lance_tx_desc *td = &ib->btx_ring [i]; - u8 bits = td->tmd1_bits; - - /* If we hit a packet not owned by us, stop */ - if (bits & LE_T1_OWN) - break; - - if (bits & LE_T1_ERR) { - u16 status = td->misc; - - dev->stats.tx_errors++; - if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++; - if (status & LE_T3_LCOL) dev->stats.tx_window_errors++; - - if (status & LE_T3_CLOS) { - dev->stats.tx_carrier_errors++; - if (lp->auto_select) { - lp->tpe = 1 - lp->tpe; - printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n", - dev->name, lp->tpe?"TPE":"AUI"); - STOP_LANCE(lp); - lp->init_ring(dev); - load_csrs(lp); - init_restart_lance(lp); - goto out; - } - } - - /* Buffer errors and underflows turn off the - * transmitter, restart the adapter. - */ - if (status & (LE_T3_BUF|LE_T3_UFL)) { - dev->stats.tx_fifo_errors++; - - printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n", - dev->name); - STOP_LANCE(lp); - lp->init_ring(dev); - load_csrs(lp); - init_restart_lance(lp); - goto out; - } - } else if ((bits & LE_T1_POK) == LE_T1_POK) { - /* - * So we don't count the packet more than once. - */ - td->tmd1_bits = bits & ~(LE_T1_POK); - - /* One collision before packet was sent. */ - if (bits & LE_T1_EONE) - dev->stats.collisions++; - - /* More than one collision, be optimistic. */ - if (bits & LE_T1_EMORE) - dev->stats.collisions += 2; - - dev->stats.tx_packets++; - } - - j = TX_NEXT(j); - } - lp->tx_old = j; -out: - if (netif_queue_stopped(dev) && - TX_BUFFS_AVAIL > 0) - netif_wake_queue(dev); - - spin_unlock(&lp->lock); -} - -static void lance_piocopy_to_skb(struct sk_buff *skb, void __iomem *piobuf, int len) -{ - u16 *p16 = (u16 *) skb->data; - u32 *p32; - u8 *p8; - void __iomem *pbuf = piobuf; - - /* We know here that both src and dest are on a 16bit boundary. */ - *p16++ = sbus_readw(pbuf); - p32 = (u32 *) p16; - pbuf += 2; - len -= 2; - - while (len >= 4) { - *p32++ = sbus_readl(pbuf); - pbuf += 4; - len -= 4; - } - p8 = (u8 *) p32; - if (len >= 2) { - p16 = (u16 *) p32; - *p16++ = sbus_readw(pbuf); - pbuf += 2; - len -= 2; - p8 = (u8 *) p16; - } - if (len >= 1) - *p8 = sbus_readb(pbuf); -} - -static void lance_rx_pio(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - struct lance_init_block __iomem *ib = lp->init_block_iomem; - struct lance_rx_desc __iomem *rd; - unsigned char bits; - int len, entry; - struct sk_buff *skb; - - entry = lp->rx_new; - for (rd = &ib->brx_ring [entry]; - !((bits = sbus_readb(&rd->rmd1_bits)) & LE_R1_OWN); - rd = &ib->brx_ring [entry]) { - - /* We got an incomplete frame? */ - if ((bits & LE_R1_POK) != LE_R1_POK) { - dev->stats.rx_over_errors++; - dev->stats.rx_errors++; - } else if (bits & LE_R1_ERR) { - /* Count only the end frame as a rx error, - * not the beginning - */ - if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++; - if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++; - if (bits & LE_R1_OFL) dev->stats.rx_over_errors++; - if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++; - if (bits & LE_R1_EOP) dev->stats.rx_errors++; - } else { - len = (sbus_readw(&rd->mblength) & 0xfff) - 4; - skb = dev_alloc_skb(len + 2); - - if (skb == NULL) { - printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", - dev->name); - dev->stats.rx_dropped++; - sbus_writew(0, &rd->mblength); - sbus_writeb(LE_R1_OWN, &rd->rmd1_bits); - lp->rx_new = RX_NEXT(entry); - return; - } - - dev->stats.rx_bytes += len; - - skb_reserve (skb, 2); /* 16 byte align */ - skb_put(skb, len); /* make room */ - lance_piocopy_to_skb(skb, &(ib->rx_buf[entry][0]), len); - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); - dev->stats.rx_packets++; - } - - /* Return the packet to the pool */ - sbus_writew(0, &rd->mblength); - sbus_writeb(LE_R1_OWN, &rd->rmd1_bits); - entry = RX_NEXT(entry); - } - - lp->rx_new = entry; -} - -static void lance_tx_pio(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - struct lance_init_block __iomem *ib = lp->init_block_iomem; - int i, j; - - spin_lock(&lp->lock); - - j = lp->tx_old; - for (i = j; i != lp->tx_new; i = j) { - struct lance_tx_desc __iomem *td = &ib->btx_ring [i]; - u8 bits = sbus_readb(&td->tmd1_bits); - - /* If we hit a packet not owned by us, stop */ - if (bits & LE_T1_OWN) - break; - - if (bits & LE_T1_ERR) { - u16 status = sbus_readw(&td->misc); - - dev->stats.tx_errors++; - if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++; - if (status & LE_T3_LCOL) dev->stats.tx_window_errors++; - - if (status & LE_T3_CLOS) { - dev->stats.tx_carrier_errors++; - if (lp->auto_select) { - lp->tpe = 1 - lp->tpe; - printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n", - dev->name, lp->tpe?"TPE":"AUI"); - STOP_LANCE(lp); - lp->init_ring(dev); - load_csrs(lp); - init_restart_lance(lp); - goto out; - } - } - - /* Buffer errors and underflows turn off the - * transmitter, restart the adapter. - */ - if (status & (LE_T3_BUF|LE_T3_UFL)) { - dev->stats.tx_fifo_errors++; - - printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n", - dev->name); - STOP_LANCE(lp); - lp->init_ring(dev); - load_csrs(lp); - init_restart_lance(lp); - goto out; - } - } else if ((bits & LE_T1_POK) == LE_T1_POK) { - /* - * So we don't count the packet more than once. - */ - sbus_writeb(bits & ~(LE_T1_POK), &td->tmd1_bits); - - /* One collision before packet was sent. */ - if (bits & LE_T1_EONE) - dev->stats.collisions++; - - /* More than one collision, be optimistic. */ - if (bits & LE_T1_EMORE) - dev->stats.collisions += 2; - - dev->stats.tx_packets++; - } - - j = TX_NEXT(j); - } - lp->tx_old = j; - - if (netif_queue_stopped(dev) && - TX_BUFFS_AVAIL > 0) - netif_wake_queue(dev); -out: - spin_unlock(&lp->lock); -} - -static irqreturn_t lance_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct lance_private *lp = netdev_priv(dev); - int csr0; - - sbus_writew(LE_CSR0, lp->lregs + RAP); - csr0 = sbus_readw(lp->lregs + RDP); - - /* Acknowledge all the interrupt sources ASAP */ - sbus_writew(csr0 & (LE_C0_INTR | LE_C0_TINT | LE_C0_RINT), - lp->lregs + RDP); - - if ((csr0 & LE_C0_ERR) != 0) { - /* Clear the error condition */ - sbus_writew((LE_C0_BABL | LE_C0_ERR | LE_C0_MISS | - LE_C0_CERR | LE_C0_MERR), - lp->lregs + RDP); - } - - if (csr0 & LE_C0_RINT) - lp->rx(dev); - - if (csr0 & LE_C0_TINT) - lp->tx(dev); - - if (csr0 & LE_C0_BABL) - dev->stats.tx_errors++; - - if (csr0 & LE_C0_MISS) - dev->stats.rx_errors++; - - if (csr0 & LE_C0_MERR) { - if (lp->dregs) { - u32 addr = sbus_readl(lp->dregs + DMA_ADDR); - - printk(KERN_ERR "%s: Memory error, status %04x, addr %06x\n", - dev->name, csr0, addr & 0xffffff); - } else { - printk(KERN_ERR "%s: Memory error, status %04x\n", - dev->name, csr0); - } - - sbus_writew(LE_C0_STOP, lp->lregs + RDP); - - if (lp->dregs) { - u32 dma_csr = sbus_readl(lp->dregs + DMA_CSR); - - dma_csr |= DMA_FIFO_INV; - sbus_writel(dma_csr, lp->dregs + DMA_CSR); - } - - lp->init_ring(dev); - load_csrs(lp); - init_restart_lance(lp); - netif_wake_queue(dev); - } - - sbus_writew(LE_C0_INEA, lp->lregs + RDP); - - return IRQ_HANDLED; -} - -/* Build a fake network packet and send it to ourselves. */ -static void build_fake_packet(struct lance_private *lp) -{ - struct net_device *dev = lp->dev; - int i, entry; - - entry = lp->tx_new & TX_RING_MOD_MASK; - if (lp->pio_buffer) { - struct lance_init_block __iomem *ib = lp->init_block_iomem; - u16 __iomem *packet = (u16 __iomem *) &(ib->tx_buf[entry][0]); - struct ethhdr __iomem *eth = (struct ethhdr __iomem *) packet; - for (i = 0; i < (ETH_ZLEN / sizeof(u16)); i++) - sbus_writew(0, &packet[i]); - for (i = 0; i < 6; i++) { - sbus_writeb(dev->dev_addr[i], ð->h_dest[i]); - sbus_writeb(dev->dev_addr[i], ð->h_source[i]); - } - sbus_writew((-ETH_ZLEN) | 0xf000, &ib->btx_ring[entry].length); - sbus_writew(0, &ib->btx_ring[entry].misc); - sbus_writeb(LE_T1_POK|LE_T1_OWN, &ib->btx_ring[entry].tmd1_bits); - } else { - struct lance_init_block *ib = lp->init_block_mem; - u16 *packet = (u16 *) &(ib->tx_buf[entry][0]); - struct ethhdr *eth = (struct ethhdr *) packet; - memset(packet, 0, ETH_ZLEN); - for (i = 0; i < 6; i++) { - eth->h_dest[i] = dev->dev_addr[i]; - eth->h_source[i] = dev->dev_addr[i]; - } - ib->btx_ring[entry].length = (-ETH_ZLEN) | 0xf000; - ib->btx_ring[entry].misc = 0; - ib->btx_ring[entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN); - } - lp->tx_new = TX_NEXT(entry); -} - -static int lance_open(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - int status = 0; - - STOP_LANCE(lp); - - if (request_irq(dev->irq, lance_interrupt, IRQF_SHARED, - lancestr, (void *) dev)) { - printk(KERN_ERR "Lance: Can't get irq %d\n", dev->irq); - return -EAGAIN; - } - - /* On the 4m, setup the ledma to provide the upper bits for buffers */ - if (lp->dregs) { - u32 regval = lp->init_block_dvma & 0xff000000; - - sbus_writel(regval, lp->dregs + DMA_TEST); - } - - /* Set mode and clear multicast filter only at device open, - * so that lance_init_ring() called at any error will not - * forget multicast filters. - * - * BTW it is common bug in all lance drivers! --ANK - */ - if (lp->pio_buffer) { - struct lance_init_block __iomem *ib = lp->init_block_iomem; - sbus_writew(0, &ib->mode); - sbus_writel(0, &ib->filter[0]); - sbus_writel(0, &ib->filter[1]); - } else { - struct lance_init_block *ib = lp->init_block_mem; - ib->mode = 0; - ib->filter [0] = 0; - ib->filter [1] = 0; - } - - lp->init_ring(dev); - load_csrs(lp); - - netif_start_queue(dev); - - status = init_restart_lance(lp); - if (!status && lp->auto_select) { - build_fake_packet(lp); - sbus_writew(LE_C0_INEA | LE_C0_TDMD, lp->lregs + RDP); - } - - return status; -} - -static int lance_close(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - - netif_stop_queue(dev); - del_timer_sync(&lp->multicast_timer); - - STOP_LANCE(lp); - - free_irq(dev->irq, (void *) dev); - return 0; -} - -static int lance_reset(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - int status; - - STOP_LANCE(lp); - - /* On the 4m, reset the dma too */ - if (lp->dregs) { - u32 csr, addr; - - printk(KERN_ERR "resetting ledma\n"); - csr = sbus_readl(lp->dregs + DMA_CSR); - sbus_writel(csr | DMA_RST_ENET, lp->dregs + DMA_CSR); - udelay(200); - sbus_writel(csr & ~DMA_RST_ENET, lp->dregs + DMA_CSR); - - addr = lp->init_block_dvma & 0xff000000; - sbus_writel(addr, lp->dregs + DMA_TEST); - } - lp->init_ring(dev); - load_csrs(lp); - dev->trans_start = jiffies; /* prevent tx timeout */ - status = init_restart_lance(lp); - return status; -} - -static void lance_piocopy_from_skb(void __iomem *dest, unsigned char *src, int len) -{ - void __iomem *piobuf = dest; - u32 *p32; - u16 *p16; - u8 *p8; - - switch ((unsigned long)src & 0x3) { - case 0: - p32 = (u32 *) src; - while (len >= 4) { - sbus_writel(*p32, piobuf); - p32++; - piobuf += 4; - len -= 4; - } - src = (char *) p32; - break; - case 1: - case 3: - p8 = (u8 *) src; - while (len >= 4) { - u32 val; - - val = p8[0] << 24; - val |= p8[1] << 16; - val |= p8[2] << 8; - val |= p8[3]; - sbus_writel(val, piobuf); - p8 += 4; - piobuf += 4; - len -= 4; - } - src = (char *) p8; - break; - case 2: - p16 = (u16 *) src; - while (len >= 4) { - u32 val = p16[0]<<16 | p16[1]; - sbus_writel(val, piobuf); - p16 += 2; - piobuf += 4; - len -= 4; - } - src = (char *) p16; - break; - } - if (len >= 2) { - u16 val = src[0] << 8 | src[1]; - sbus_writew(val, piobuf); - src += 2; - piobuf += 2; - len -= 2; - } - if (len >= 1) - sbus_writeb(src[0], piobuf); -} - -static void lance_piozero(void __iomem *dest, int len) -{ - void __iomem *piobuf = dest; - - if ((unsigned long)piobuf & 1) { - sbus_writeb(0, piobuf); - piobuf += 1; - len -= 1; - if (len == 0) - return; - } - if (len == 1) { - sbus_writeb(0, piobuf); - return; - } - if ((unsigned long)piobuf & 2) { - sbus_writew(0, piobuf); - piobuf += 2; - len -= 2; - if (len == 0) - return; - } - while (len >= 4) { - sbus_writel(0, piobuf); - piobuf += 4; - len -= 4; - } - if (len >= 2) { - sbus_writew(0, piobuf); - piobuf += 2; - len -= 2; - } - if (len >= 1) - sbus_writeb(0, piobuf); -} - -static void lance_tx_timeout(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - - printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n", - dev->name, sbus_readw(lp->lregs + RDP)); - lance_reset(dev); - netif_wake_queue(dev); -} - -static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - int entry, skblen, len; - - skblen = skb->len; - - len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen; - - spin_lock_irq(&lp->lock); - - dev->stats.tx_bytes += len; - - entry = lp->tx_new & TX_RING_MOD_MASK; - if (lp->pio_buffer) { - struct lance_init_block __iomem *ib = lp->init_block_iomem; - sbus_writew((-len) | 0xf000, &ib->btx_ring[entry].length); - sbus_writew(0, &ib->btx_ring[entry].misc); - lance_piocopy_from_skb(&ib->tx_buf[entry][0], skb->data, skblen); - if (len != skblen) - lance_piozero(&ib->tx_buf[entry][skblen], len - skblen); - sbus_writeb(LE_T1_POK | LE_T1_OWN, &ib->btx_ring[entry].tmd1_bits); - } else { - struct lance_init_block *ib = lp->init_block_mem; - ib->btx_ring [entry].length = (-len) | 0xf000; - ib->btx_ring [entry].misc = 0; - skb_copy_from_linear_data(skb, &ib->tx_buf [entry][0], skblen); - if (len != skblen) - memset((char *) &ib->tx_buf [entry][skblen], 0, len - skblen); - ib->btx_ring [entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN); - } - - lp->tx_new = TX_NEXT(entry); - - if (TX_BUFFS_AVAIL <= 0) - netif_stop_queue(dev); - - /* Kick the lance: transmit now */ - sbus_writew(LE_C0_INEA | LE_C0_TDMD, lp->lregs + RDP); - - /* Read back CSR to invalidate the E-Cache. - * This is needed, because DMA_DSBL_WR_INV is set. - */ - if (lp->dregs) - sbus_readw(lp->lregs + RDP); - - spin_unlock_irq(&lp->lock); - - dev_kfree_skb(skb); - - return NETDEV_TX_OK; -} - -/* taken from the depca driver */ -static void lance_load_multicast(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - struct netdev_hw_addr *ha; - u32 crc; - u32 val; - - /* set all multicast bits */ - if (dev->flags & IFF_ALLMULTI) - val = ~0; - else - val = 0; - - if (lp->pio_buffer) { - struct lance_init_block __iomem *ib = lp->init_block_iomem; - sbus_writel(val, &ib->filter[0]); - sbus_writel(val, &ib->filter[1]); - } else { - struct lance_init_block *ib = lp->init_block_mem; - ib->filter [0] = val; - ib->filter [1] = val; - } - - if (dev->flags & IFF_ALLMULTI) - return; - - /* Add addresses */ - netdev_for_each_mc_addr(ha, dev) { - crc = ether_crc_le(6, ha->addr); - crc = crc >> 26; - if (lp->pio_buffer) { - struct lance_init_block __iomem *ib = lp->init_block_iomem; - u16 __iomem *mcast_table = (u16 __iomem *) &ib->filter; - u16 tmp = sbus_readw(&mcast_table[crc>>4]); - tmp |= 1 << (crc & 0xf); - sbus_writew(tmp, &mcast_table[crc>>4]); - } else { - struct lance_init_block *ib = lp->init_block_mem; - u16 *mcast_table = (u16 *) &ib->filter; - mcast_table [crc >> 4] |= 1 << (crc & 0xf); - } - } -} - -static void lance_set_multicast(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - struct lance_init_block *ib_mem = lp->init_block_mem; - struct lance_init_block __iomem *ib_iomem = lp->init_block_iomem; - u16 mode; - - if (!netif_running(dev)) - return; - - if (lp->tx_old != lp->tx_new) { - mod_timer(&lp->multicast_timer, jiffies + 4); - netif_wake_queue(dev); - return; - } - - netif_stop_queue(dev); - - STOP_LANCE(lp); - lp->init_ring(dev); - - if (lp->pio_buffer) - mode = sbus_readw(&ib_iomem->mode); - else - mode = ib_mem->mode; - if (dev->flags & IFF_PROMISC) { - mode |= LE_MO_PROM; - if (lp->pio_buffer) - sbus_writew(mode, &ib_iomem->mode); - else - ib_mem->mode = mode; - } else { - mode &= ~LE_MO_PROM; - if (lp->pio_buffer) - sbus_writew(mode, &ib_iomem->mode); - else - ib_mem->mode = mode; - lance_load_multicast(dev); - } - load_csrs(lp); - init_restart_lance(lp); - netif_wake_queue(dev); -} - -static void lance_set_multicast_retry(unsigned long _opaque) -{ - struct net_device *dev = (struct net_device *) _opaque; - - lance_set_multicast(dev); -} - -static void lance_free_hwresources(struct lance_private *lp) -{ - if (lp->lregs) - of_iounmap(&lp->op->resource[0], lp->lregs, LANCE_REG_SIZE); - if (lp->dregs) { - struct platform_device *ledma = lp->ledma; - - of_iounmap(&ledma->resource[0], lp->dregs, - resource_size(&ledma->resource[0])); - } - if (lp->init_block_iomem) { - of_iounmap(&lp->lebuffer->resource[0], lp->init_block_iomem, - sizeof(struct lance_init_block)); - } else if (lp->init_block_mem) { - dma_free_coherent(&lp->op->dev, - sizeof(struct lance_init_block), - lp->init_block_mem, - lp->init_block_dvma); - } -} - -/* Ethtool support... */ -static void sparc_lance_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) -{ - strcpy(info->driver, "sunlance"); - strcpy(info->version, "2.02"); -} - -static const struct ethtool_ops sparc_lance_ethtool_ops = { - .get_drvinfo = sparc_lance_get_drvinfo, - .get_link = ethtool_op_get_link, -}; - -static const struct net_device_ops sparc_lance_ops = { - .ndo_open = lance_open, - .ndo_stop = lance_close, - .ndo_start_xmit = lance_start_xmit, - .ndo_set_multicast_list = lance_set_multicast, - .ndo_tx_timeout = lance_tx_timeout, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -static int __devinit sparc_lance_probe_one(struct platform_device *op, - struct platform_device *ledma, - struct platform_device *lebuffer) -{ - struct device_node *dp = op->dev.of_node; - static unsigned version_printed; - struct lance_private *lp; - struct net_device *dev; - int i; - - dev = alloc_etherdev(sizeof(struct lance_private) + 8); - if (!dev) - return -ENOMEM; - - lp = netdev_priv(dev); - - if (sparc_lance_debug && version_printed++ == 0) - printk (KERN_INFO "%s", version); - - spin_lock_init(&lp->lock); - - /* Copy the IDPROM ethernet address to the device structure, later we - * will copy the address in the device structure to the lance - * initialization block. - */ - for (i = 0; i < 6; i++) - dev->dev_addr[i] = idprom->id_ethaddr[i]; - - /* Get the IO region */ - lp->lregs = of_ioremap(&op->resource[0], 0, - LANCE_REG_SIZE, lancestr); - if (!lp->lregs) { - printk(KERN_ERR "SunLance: Cannot map registers.\n"); - goto fail; - } - - lp->ledma = ledma; - if (lp->ledma) { - lp->dregs = of_ioremap(&ledma->resource[0], 0, - resource_size(&ledma->resource[0]), - "ledma"); - if (!lp->dregs) { - printk(KERN_ERR "SunLance: Cannot map " - "ledma registers.\n"); - goto fail; - } - } - - lp->op = op; - lp->lebuffer = lebuffer; - if (lebuffer) { - /* sanity check */ - if (lebuffer->resource[0].start & 7) { - printk(KERN_ERR "SunLance: ERROR: Rx and Tx rings not on even boundary.\n"); - goto fail; - } - lp->init_block_iomem = - of_ioremap(&lebuffer->resource[0], 0, - sizeof(struct lance_init_block), "lebuffer"); - if (!lp->init_block_iomem) { - printk(KERN_ERR "SunLance: Cannot map PIO buffer.\n"); - goto fail; - } - lp->init_block_dvma = 0; - lp->pio_buffer = 1; - lp->init_ring = lance_init_ring_pio; - lp->rx = lance_rx_pio; - lp->tx = lance_tx_pio; - } else { - lp->init_block_mem = - dma_alloc_coherent(&op->dev, - sizeof(struct lance_init_block), - &lp->init_block_dvma, GFP_ATOMIC); - if (!lp->init_block_mem) { - printk(KERN_ERR "SunLance: Cannot allocate consistent DMA memory.\n"); - goto fail; - } - lp->pio_buffer = 0; - lp->init_ring = lance_init_ring_dvma; - lp->rx = lance_rx_dvma; - lp->tx = lance_tx_dvma; - } - lp->busmaster_regval = of_getintprop_default(dp, "busmaster-regval", - (LE_C3_BSWP | - LE_C3_ACON | - LE_C3_BCON)); - - lp->name = lancestr; - - lp->burst_sizes = 0; - if (lp->ledma) { - struct device_node *ledma_dp = ledma->dev.of_node; - struct device_node *sbus_dp; - unsigned int sbmask; - const char *prop; - u32 csr; - - /* Find burst-size property for ledma */ - lp->burst_sizes = of_getintprop_default(ledma_dp, - "burst-sizes", 0); - - /* ledma may be capable of fast bursts, but sbus may not. */ - sbus_dp = ledma_dp->parent; - sbmask = of_getintprop_default(sbus_dp, "burst-sizes", - DMA_BURSTBITS); - lp->burst_sizes &= sbmask; - - /* Get the cable-selection property */ - prop = of_get_property(ledma_dp, "cable-selection", NULL); - if (!prop || prop[0] == '\0') { - struct device_node *nd; - - printk(KERN_INFO "SunLance: using " - "auto-carrier-detection.\n"); - - nd = of_find_node_by_path("/options"); - if (!nd) - goto no_link_test; - - prop = of_get_property(nd, "tpe-link-test?", NULL); - if (!prop) - goto no_link_test; - - if (strcmp(prop, "true")) { - printk(KERN_NOTICE "SunLance: warning: overriding option " - "'tpe-link-test?'\n"); - printk(KERN_NOTICE "SunLance: warning: mail any problems " - "to ecd@skynet.be\n"); - auxio_set_lte(AUXIO_LTE_ON); - } -no_link_test: - lp->auto_select = 1; - lp->tpe = 0; - } else if (!strcmp(prop, "aui")) { - lp->auto_select = 0; - lp->tpe = 0; - } else { - lp->auto_select = 0; - lp->tpe = 1; - } - - /* Reset ledma */ - csr = sbus_readl(lp->dregs + DMA_CSR); - sbus_writel(csr | DMA_RST_ENET, lp->dregs + DMA_CSR); - udelay(200); - sbus_writel(csr & ~DMA_RST_ENET, lp->dregs + DMA_CSR); - } else - lp->dregs = NULL; - - lp->dev = dev; - SET_NETDEV_DEV(dev, &op->dev); - dev->watchdog_timeo = 5*HZ; - dev->ethtool_ops = &sparc_lance_ethtool_ops; - dev->netdev_ops = &sparc_lance_ops; - - dev->irq = op->archdata.irqs[0]; - - /* We cannot sleep if the chip is busy during a - * multicast list update event, because such events - * can occur from interrupts (ex. IPv6). So we - * use a timer to try again later when necessary. -DaveM - */ - init_timer(&lp->multicast_timer); - lp->multicast_timer.data = (unsigned long) dev; - lp->multicast_timer.function = lance_set_multicast_retry; - - if (register_netdev(dev)) { - printk(KERN_ERR "SunLance: Cannot register device.\n"); - goto fail; - } - - dev_set_drvdata(&op->dev, lp); - - printk(KERN_INFO "%s: LANCE %pM\n", - dev->name, dev->dev_addr); - - return 0; - -fail: - lance_free_hwresources(lp); - free_netdev(dev); - return -ENODEV; -} - -static int __devinit sunlance_sbus_probe(struct platform_device *op) -{ - struct platform_device *parent = to_platform_device(op->dev.parent); - struct device_node *parent_dp = parent->dev.of_node; - int err; - - if (!strcmp(parent_dp->name, "ledma")) { - err = sparc_lance_probe_one(op, parent, NULL); - } else if (!strcmp(parent_dp->name, "lebuffer")) { - err = sparc_lance_probe_one(op, NULL, parent); - } else - err = sparc_lance_probe_one(op, NULL, NULL); - - return err; -} - -static int __devexit sunlance_sbus_remove(struct platform_device *op) -{ - struct lance_private *lp = dev_get_drvdata(&op->dev); - struct net_device *net_dev = lp->dev; - - unregister_netdev(net_dev); - - lance_free_hwresources(lp); - - free_netdev(net_dev); - - dev_set_drvdata(&op->dev, NULL); - - return 0; -} - -static const struct of_device_id sunlance_sbus_match[] = { - { - .name = "le", - }, - {}, -}; - -MODULE_DEVICE_TABLE(of, sunlance_sbus_match); - -static struct platform_driver sunlance_sbus_driver = { - .driver = { - .name = "sunlance", - .owner = THIS_MODULE, - .of_match_table = sunlance_sbus_match, - }, - .probe = sunlance_sbus_probe, - .remove = __devexit_p(sunlance_sbus_remove), -}; - - -/* Find all the lance cards on the system and initialize them */ -static int __init sparc_lance_init(void) -{ - return platform_driver_register(&sunlance_sbus_driver); -} - -static void __exit sparc_lance_exit(void) -{ - platform_driver_unregister(&sunlance_sbus_driver); -} - -module_init(sparc_lance_init); -module_exit(sparc_lance_exit); -- cgit v0.10.2 From 644570b830266ff33ff5f3542b9c838f93a55ea6 Mon Sep 17 00:00:00 2001 From: Jeff Kirsher Date: Sat, 2 Apr 2011 06:20:12 -0700 Subject: 8390: Move the 8390 related drivers Moves the drivers for the National Semi-conductor 8390 chipset into drivers/net/ethernet/8390/ and the necessary Kconfig and Makefile changes. CC: Donald Becker CC: Paul Gortmaker CC: Alain Malek CC: Peter De Schrijver CC: "David Huggins-Daines" CC: Wim Dumon CC: Yoshinori Sato CC: David Hinds CC: Russell King Signed-off-by: Jeff Kirsher diff --git a/MAINTAINERS b/MAINTAINERS index 6f9dc94..c694583 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -170,8 +170,7 @@ F: include/linux/serial_8250.h 8390 NETWORK DRIVERS [WD80x3/SMC-ELITE, SMC-ULTRA, NE2000, 3C503, etc.] L: netdev@vger.kernel.org S: Orphan / Obsolete -F: drivers/net/*8390* -F: drivers/net/ax88796.c +F: drivers/net/ethernet/8390/ 9P FILE SYSTEM M: Eric Van Hensbergen @@ -6568,7 +6567,7 @@ W: http://uclinux-h8.sourceforge.jp/ S: Supported F: arch/h8300/ F: drivers/ide/ide-h8300.c -F: drivers/net/ne-h8300.c +F: drivers/net/ethernet/8390/ne-h8300.c UDF FILESYSTEM M: Jan Kara diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c deleted file mode 100644 index 84e68f1..0000000 --- a/drivers/net/3c503.c +++ /dev/null @@ -1,778 +0,0 @@ -/* 3c503.c: A shared-memory NS8390 ethernet driver for linux. */ -/* - Written 1992-94 by Donald Becker. - - Copyright 1993 United States Government as represented by the - Director, National Security Agency. This software may be used and - distributed according to the terms of the GNU General Public License, - incorporated herein by reference. - - The author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation - 410 Severn Ave., Suite 210 - Annapolis MD 21403 - - - This driver should work with the 3c503 and 3c503/16. It should be used - in shared memory mode for best performance, although it may also work - in programmed-I/O mode. - - Sources: - EtherLink II Technical Reference Manual, - EtherLink II/16 Technical Reference Manual Supplement, - 3Com Corporation, 5400 Bayfront Plaza, Santa Clara CA 95052-8145 - - The Crynwr 3c503 packet driver. - - Changelog: - - Paul Gortmaker : add support for the 2nd 8kB of RAM on 16 bit cards. - Paul Gortmaker : multiple card support for module users. - rjohnson@analogic.com : Fix up PIO interface for efficient operation. - Jeff Garzik : ethtool support - -*/ - -#define DRV_NAME "3c503" -#define DRV_VERSION "1.10a" -#define DRV_RELDATE "11/17/2001" - - -static const char version[] = - DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Donald Becker (becker@scyld.com)\n"; - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "8390.h" -#include "3c503.h" -#define WRD_COUNT 4 - -static int el2_pio_probe(struct net_device *dev); -static int el2_probe1(struct net_device *dev, int ioaddr); - -/* A zero-terminated list of I/O addresses to be probed in PIO mode. */ -static unsigned int netcard_portlist[] __initdata = - { 0x300,0x310,0x330,0x350,0x250,0x280,0x2a0,0x2e0,0}; - -#define EL2_IO_EXTENT 16 - -static int el2_open(struct net_device *dev); -static int el2_close(struct net_device *dev); -static void el2_reset_8390(struct net_device *dev); -static void el2_init_card(struct net_device *dev); -static void el2_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page); -static void el2_block_input(struct net_device *dev, int count, struct sk_buff *skb, - int ring_offset); -static void el2_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, - int ring_page); -static const struct ethtool_ops netdev_ethtool_ops; - - -/* This routine probes for a memory-mapped 3c503 board by looking for - the "location register" at the end of the jumpered boot PROM space. - This works even if a PROM isn't there. - - If the ethercard isn't found there is an optional probe for - ethercard jumpered to programmed-I/O mode. - */ -static int __init do_el2_probe(struct net_device *dev) -{ - int *addr, addrs[] = { 0xddffe, 0xd9ffe, 0xcdffe, 0xc9ffe, 0}; - int base_addr = dev->base_addr; - int irq = dev->irq; - - if (base_addr > 0x1ff) /* Check a single specified location. */ - return el2_probe1(dev, base_addr); - else if (base_addr != 0) /* Don't probe at all. */ - return -ENXIO; - - for (addr = addrs; *addr; addr++) { - void __iomem *p = ioremap(*addr, 1); - unsigned base_bits; - int i; - - if (!p) - continue; - base_bits = readb(p); - iounmap(p); - i = ffs(base_bits) - 1; - if (i == -1 || base_bits != (1 << i)) - continue; - if (el2_probe1(dev, netcard_portlist[i]) == 0) - return 0; - dev->irq = irq; - } -#if ! defined(no_probe_nonshared_memory) - return el2_pio_probe(dev); -#else - return -ENODEV; -#endif -} - -/* Try all of the locations that aren't obviously empty. This touches - a lot of locations, and is much riskier than the code above. */ -static int __init -el2_pio_probe(struct net_device *dev) -{ - int i; - int base_addr = dev->base_addr; - int irq = dev->irq; - - if (base_addr > 0x1ff) /* Check a single specified location. */ - return el2_probe1(dev, base_addr); - else if (base_addr != 0) /* Don't probe at all. */ - return -ENXIO; - - for (i = 0; netcard_portlist[i]; i++) { - if (el2_probe1(dev, netcard_portlist[i]) == 0) - return 0; - dev->irq = irq; - } - - return -ENODEV; -} - -#ifndef MODULE -struct net_device * __init el2_probe(int unit) -{ - struct net_device *dev = alloc_eip_netdev(); - int err; - - if (!dev) - return ERR_PTR(-ENOMEM); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - - err = do_el2_probe(dev); - if (err) - goto out; - return dev; -out: - free_netdev(dev); - return ERR_PTR(err); -} -#endif - -static const struct net_device_ops el2_netdev_ops = { - .ndo_open = el2_open, - .ndo_stop = el2_close, - - .ndo_start_xmit = eip_start_xmit, - .ndo_tx_timeout = eip_tx_timeout, - .ndo_get_stats = eip_get_stats, - .ndo_set_multicast_list = eip_set_multicast_list, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, - .ndo_change_mtu = eth_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = eip_poll, -#endif -}; - -/* Probe for the Etherlink II card at I/O port base IOADDR, - returning non-zero on success. If found, set the station - address and memory parameters in DEVICE. */ -static int __init -el2_probe1(struct net_device *dev, int ioaddr) -{ - int i, iobase_reg, membase_reg, saved_406, wordlength, retval; - static unsigned version_printed; - unsigned long vendor_id; - - if (!request_region(ioaddr, EL2_IO_EXTENT, DRV_NAME)) - return -EBUSY; - - if (!request_region(ioaddr + 0x400, 8, DRV_NAME)) { - retval = -EBUSY; - goto out; - } - - /* Reset and/or avoid any lurking NE2000 */ - if (inb(ioaddr + 0x408) == 0xff) { - mdelay(1); - retval = -ENODEV; - goto out1; - } - - /* We verify that it's a 3C503 board by checking the first three octets - of its ethernet address. */ - iobase_reg = inb(ioaddr+0x403); - membase_reg = inb(ioaddr+0x404); - /* ASIC location registers should be 0 or have only a single bit set. */ - if ((iobase_reg & (iobase_reg - 1)) || - (membase_reg & (membase_reg - 1))) { - retval = -ENODEV; - goto out1; - } - saved_406 = inb_p(ioaddr + 0x406); - outb_p(ECNTRL_RESET|ECNTRL_THIN, ioaddr + 0x406); /* Reset it... */ - outb_p(ECNTRL_THIN, ioaddr + 0x406); - /* Map the station addr PROM into the lower I/O ports. We now check - for both the old and new 3Com prefix */ - outb(ECNTRL_SAPROM|ECNTRL_THIN, ioaddr + 0x406); - vendor_id = inb(ioaddr)*0x10000 + inb(ioaddr + 1)*0x100 + inb(ioaddr + 2); - if ((vendor_id != OLD_3COM_ID) && (vendor_id != NEW_3COM_ID)) { - /* Restore the register we frobbed. */ - outb(saved_406, ioaddr + 0x406); - retval = -ENODEV; - goto out1; - } - - if (ei_debug && version_printed++ == 0) - pr_debug("%s", version); - - dev->base_addr = ioaddr; - - pr_info("%s: 3c503 at i/o base %#3x, node ", dev->name, ioaddr); - - /* Retrieve and print the ethernet address. */ - for (i = 0; i < 6; i++) - dev->dev_addr[i] = inb(ioaddr + i); - pr_cont("%pM", dev->dev_addr); - - /* Map the 8390 back into the window. */ - outb(ECNTRL_THIN, ioaddr + 0x406); - - /* Check for EL2/16 as described in tech. man. */ - outb_p(E8390_PAGE0, ioaddr + E8390_CMD); - outb_p(0, ioaddr + EN0_DCFG); - outb_p(E8390_PAGE2, ioaddr + E8390_CMD); - wordlength = inb_p(ioaddr + EN0_DCFG) & ENDCFG_WTS; - outb_p(E8390_PAGE0, ioaddr + E8390_CMD); - - /* Probe for, turn on and clear the board's shared memory. */ - if (ei_debug > 2) - pr_cont(" memory jumpers %2.2x ", membase_reg); - outb(EGACFR_NORM, ioaddr + 0x405); /* Enable RAM */ - - /* This should be probed for (or set via an ioctl()) at run-time. - Right now we use a sleazy hack to pass in the interface number - at boot-time via the low bits of the mem_end field. That value is - unused, and the low bits would be discarded even if it was used. */ -#if defined(EI8390_THICK) || defined(EL2_AUI) - ei_status.interface_num = 1; -#else - ei_status.interface_num = dev->mem_end & 0xf; -#endif - pr_cont(", using %sternal xcvr.\n", ei_status.interface_num == 0 ? "in" : "ex"); - - if ((membase_reg & 0xf0) == 0) { - dev->mem_start = 0; - ei_status.name = "3c503-PIO"; - ei_status.mem = NULL; - } else { - dev->mem_start = ((membase_reg & 0xc0) ? 0xD8000 : 0xC8000) + - ((membase_reg & 0xA0) ? 0x4000 : 0); -#define EL2_MEMSIZE (EL2_MB1_STOP_PG - EL2_MB1_START_PG)*256 - ei_status.mem = ioremap(dev->mem_start, EL2_MEMSIZE); - -#ifdef EL2MEMTEST - /* This has never found an error, but someone might care. - Note that it only tests the 2nd 8kB on 16kB 3c503/16 - cards between card addr. 0x2000 and 0x3fff. */ - { /* Check the card's memory. */ - void __iomem *mem_base = ei_status.mem; - unsigned int test_val = 0xbbadf00d; - writel(0xba5eba5e, mem_base); - for (i = sizeof(test_val); i < EL2_MEMSIZE; i+=sizeof(test_val)) { - writel(test_val, mem_base + i); - if (readl(mem_base) != 0xba5eba5e || - readl(mem_base + i) != test_val) { - pr_warning("3c503: memory failure or memory address conflict.\n"); - dev->mem_start = 0; - ei_status.name = "3c503-PIO"; - iounmap(mem_base); - ei_status.mem = NULL; - break; - } - test_val += 0x55555555; - writel(0, mem_base + i); - } - } -#endif /* EL2MEMTEST */ - - if (dev->mem_start) - dev->mem_end = dev->mem_start + EL2_MEMSIZE; - - if (wordlength) { /* No Tx pages to skip over to get to Rx */ - ei_status.priv = 0; - ei_status.name = "3c503/16"; - } else { - ei_status.priv = TX_PAGES * 256; - ei_status.name = "3c503"; - } - } - - /* - Divide up the memory on the card. This is the same regardless of - whether shared-mem or PIO is used. For 16 bit cards (16kB RAM), - we use the entire 8k of bank1 for an Rx ring. We only use 3k - of the bank0 for 2 full size Tx packet slots. For 8 bit cards, - (8kB RAM) we use 3kB of bank1 for two Tx slots, and the remaining - 5kB for an Rx ring. */ - - if (wordlength) { - ei_status.tx_start_page = EL2_MB0_START_PG; - ei_status.rx_start_page = EL2_MB1_START_PG; - } else { - ei_status.tx_start_page = EL2_MB1_START_PG; - ei_status.rx_start_page = EL2_MB1_START_PG + TX_PAGES; - } - - /* Finish setting the board's parameters. */ - ei_status.stop_page = EL2_MB1_STOP_PG; - ei_status.word16 = wordlength; - ei_status.reset_8390 = el2_reset_8390; - ei_status.get_8390_hdr = el2_get_8390_hdr; - ei_status.block_input = el2_block_input; - ei_status.block_output = el2_block_output; - - if (dev->irq == 2) - dev->irq = 9; - else if (dev->irq > 5 && dev->irq != 9) { - pr_warning("3c503: configured interrupt %d invalid, will use autoIRQ.\n", - dev->irq); - dev->irq = 0; - } - - ei_status.saved_irq = dev->irq; - - dev->netdev_ops = &el2_netdev_ops; - dev->ethtool_ops = &netdev_ethtool_ops; - - retval = register_netdev(dev); - if (retval) - goto out1; - - if (dev->mem_start) - pr_info("%s: %s - %dkB RAM, 8kB shared mem window at %#6lx-%#6lx.\n", - dev->name, ei_status.name, (wordlength+1)<<3, - dev->mem_start, dev->mem_end-1); - - else - { - ei_status.tx_start_page = EL2_MB1_START_PG; - ei_status.rx_start_page = EL2_MB1_START_PG + TX_PAGES; - pr_info("%s: %s, %dkB RAM, using programmed I/O (REJUMPER for SHARED MEMORY).\n", - dev->name, ei_status.name, (wordlength+1)<<3); - } - release_region(ioaddr + 0x400, 8); - return 0; -out1: - release_region(ioaddr + 0x400, 8); -out: - release_region(ioaddr, EL2_IO_EXTENT); - return retval; -} - -static irqreturn_t el2_probe_interrupt(int irq, void *seen) -{ - *(bool *)seen = true; - return IRQ_HANDLED; -} - -static int -el2_open(struct net_device *dev) -{ - int retval; - - if (dev->irq < 2) { - static const int irqlist[] = {5, 9, 3, 4, 0}; - const int *irqp = irqlist; - - outb(EGACFR_NORM, E33G_GACFR); /* Enable RAM and interrupts. */ - do { - bool seen; - - retval = request_irq(*irqp, el2_probe_interrupt, 0, - dev->name, &seen); - if (retval == -EBUSY) - continue; - if (retval < 0) - goto err_disable; - - /* Twinkle the interrupt, and check if it's seen. */ - seen = false; - smp_wmb(); - outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR); - outb_p(0x00, E33G_IDCFR); - msleep(1); - free_irq(*irqp, &seen); - if (!seen) - continue; - - retval = request_irq(dev->irq = *irqp, eip_interrupt, 0, - dev->name, dev); - if (retval == -EBUSY) - continue; - if (retval < 0) - goto err_disable; - break; - } while (*++irqp); - - if (*irqp == 0) { - err_disable: - outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */ - return -EAGAIN; - } - } else { - if ((retval = request_irq(dev->irq, eip_interrupt, 0, dev->name, dev))) { - return retval; - } - } - - el2_init_card(dev); - eip_open(dev); - return 0; -} - -static int -el2_close(struct net_device *dev) -{ - free_irq(dev->irq, dev); - dev->irq = ei_status.saved_irq; - outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */ - - eip_close(dev); - return 0; -} - -/* This is called whenever we have a unrecoverable failure: - transmit timeout - Bad ring buffer packet header - */ -static void -el2_reset_8390(struct net_device *dev) -{ - if (ei_debug > 1) { - pr_debug("%s: Resetting the 3c503 board...", dev->name); - pr_cont(" %#lx=%#02x %#lx=%#02x %#lx=%#02x...", E33G_IDCFR, inb(E33G_IDCFR), - E33G_CNTRL, inb(E33G_CNTRL), E33G_GACFR, inb(E33G_GACFR)); - } - outb_p(ECNTRL_RESET|ECNTRL_THIN, E33G_CNTRL); - ei_status.txing = 0; - outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL); - el2_init_card(dev); - if (ei_debug > 1) - pr_cont("done\n"); -} - -/* Initialize the 3c503 GA registers after a reset. */ -static void -el2_init_card(struct net_device *dev) -{ - /* Unmap the station PROM and select the DIX or BNC connector. */ - outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL); - - /* Set ASIC copy of rx's first and last+1 buffer pages */ - /* These must be the same as in the 8390. */ - outb(ei_status.rx_start_page, E33G_STARTPG); - outb(ei_status.stop_page, E33G_STOPPG); - - /* Point the vector pointer registers somewhere ?harmless?. */ - outb(0xff, E33G_VP2); /* Point at the ROM restart location 0xffff0 */ - outb(0xff, E33G_VP1); - outb(0x00, E33G_VP0); - /* Turn off all interrupts until we're opened. */ - outb_p(0x00, dev->base_addr + EN0_IMR); - /* Enable IRQs iff started. */ - outb(EGACFR_NORM, E33G_GACFR); - - /* Set the interrupt line. */ - outb_p((0x04 << (dev->irq == 9 ? 2 : dev->irq)), E33G_IDCFR); - outb_p((WRD_COUNT << 1), E33G_DRQCNT); /* Set burst size to 8 */ - outb_p(0x20, E33G_DMAAH); /* Put a valid addr in the GA DMA */ - outb_p(0x00, E33G_DMAAL); - return; /* We always succeed */ -} - -/* - * Either use the shared memory (if enabled on the board) or put the packet - * out through the ASIC FIFO. - */ -static void -el2_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page) -{ - unsigned short int *wrd; - int boguscount; /* timeout counter */ - unsigned short word; /* temporary for better machine code */ - void __iomem *base = ei_status.mem; - - if (ei_status.word16) /* Tx packets go into bank 0 on EL2/16 card */ - outb(EGACFR_RSEL|EGACFR_TCM, E33G_GACFR); - else - outb(EGACFR_NORM, E33G_GACFR); - - if (base) { /* Shared memory transfer */ - memcpy_toio(base + ((start_page - ei_status.tx_start_page) << 8), - buf, count); - outb(EGACFR_NORM, E33G_GACFR); /* Back to bank1 in case on bank0 */ - return; - } - -/* - * No shared memory, put the packet out the other way. - * Set up then start the internal memory transfer to Tx Start Page - */ - - word = (unsigned short)start_page; - outb(word&0xFF, E33G_DMAAH); - outb(word>>8, E33G_DMAAL); - - outb_p((ei_status.interface_num ? ECNTRL_AUI : ECNTRL_THIN ) | ECNTRL_OUTPUT - | ECNTRL_START, E33G_CNTRL); - -/* - * Here I am going to write data to the FIFO as quickly as possible. - * Note that E33G_FIFOH is defined incorrectly. It is really - * E33G_FIFOL, the lowest port address for both the byte and - * word write. Variable 'count' is NOT checked. Caller must supply a - * valid count. Note that I may write a harmless extra byte to the - * 8390 if the byte-count was not even. - */ - wrd = (unsigned short int *) buf; - count = (count + 1) >> 1; - for(;;) - { - boguscount = 0x1000; - while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0) - { - if(!boguscount--) - { - pr_notice("%s: FIFO blocked in el2_block_output.\n", dev->name); - el2_reset_8390(dev); - goto blocked; - } - } - if(count > WRD_COUNT) - { - outsw(E33G_FIFOH, wrd, WRD_COUNT); - wrd += WRD_COUNT; - count -= WRD_COUNT; - } - else - { - outsw(E33G_FIFOH, wrd, count); - break; - } - } - blocked:; - outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL); -} - -/* Read the 4 byte, page aligned 8390 specific header. */ -static void -el2_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) -{ - int boguscount; - void __iomem *base = ei_status.mem; - unsigned short word; - - if (base) { /* Use the shared memory. */ - void __iomem *hdr_start = base + ((ring_page - EL2_MB1_START_PG)<<8); - memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); - hdr->count = le16_to_cpu(hdr->count); - return; - } - -/* - * No shared memory, use programmed I/O. - */ - - word = (unsigned short)ring_page; - outb(word&0xFF, E33G_DMAAH); - outb(word>>8, E33G_DMAAL); - - outb_p((ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI) | ECNTRL_INPUT - | ECNTRL_START, E33G_CNTRL); - boguscount = 0x1000; - while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0) - { - if(!boguscount--) - { - pr_notice("%s: FIFO blocked in el2_get_8390_hdr.\n", dev->name); - memset(hdr, 0x00, sizeof(struct e8390_pkt_hdr)); - el2_reset_8390(dev); - goto blocked; - } - } - insw(E33G_FIFOH, hdr, (sizeof(struct e8390_pkt_hdr))>> 1); - blocked:; - outb_p(ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL); -} - - -static void -el2_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) -{ - int boguscount = 0; - void __iomem *base = ei_status.mem; - unsigned short int *buf; - unsigned short word; - - /* Maybe enable shared memory just be to be safe... nahh.*/ - if (base) { /* Use the shared memory. */ - ring_offset -= (EL2_MB1_START_PG<<8); - if (ring_offset + count > EL2_MEMSIZE) { - /* We must wrap the input move. */ - int semi_count = EL2_MEMSIZE - ring_offset; - memcpy_fromio(skb->data, base + ring_offset, semi_count); - count -= semi_count; - memcpy_fromio(skb->data + semi_count, base + ei_status.priv, count); - } else { - memcpy_fromio(skb->data, base + ring_offset, count); - } - return; - } - -/* - * No shared memory, use programmed I/O. - */ - word = (unsigned short) ring_offset; - outb(word>>8, E33G_DMAAH); - outb(word&0xFF, E33G_DMAAL); - - outb_p((ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI) | ECNTRL_INPUT - | ECNTRL_START, E33G_CNTRL); - -/* - * Here I also try to get data as fast as possible. I am betting that I - * can read one extra byte without clobbering anything in the kernel because - * this would only occur on an odd byte-count and allocation of skb->data - * is word-aligned. Variable 'count' is NOT checked. Caller must check - * for a valid count. - * [This is currently quite safe.... but if one day the 3c503 explodes - * you know where to come looking ;)] - */ - - buf = (unsigned short int *) skb->data; - count = (count + 1) >> 1; - for(;;) - { - boguscount = 0x1000; - while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0) - { - if(!boguscount--) - { - pr_notice("%s: FIFO blocked in el2_block_input.\n", dev->name); - el2_reset_8390(dev); - goto blocked; - } - } - if(count > WRD_COUNT) - { - insw(E33G_FIFOH, buf, WRD_COUNT); - buf += WRD_COUNT; - count -= WRD_COUNT; - } - else - { - insw(E33G_FIFOH, buf, count); - break; - } - } - blocked:; - outb_p(ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL); -} - - -static void netdev_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr); -} - -static const struct ethtool_ops netdev_ethtool_ops = { - .get_drvinfo = netdev_get_drvinfo, -}; - -#ifdef MODULE -#define MAX_EL2_CARDS 4 /* Max number of EL2 cards per module */ - -static struct net_device *dev_el2[MAX_EL2_CARDS]; -static int io[MAX_EL2_CARDS]; -static int irq[MAX_EL2_CARDS]; -static int xcvr[MAX_EL2_CARDS]; /* choose int. or ext. xcvr */ -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -module_param_array(xcvr, int, NULL, 0); -MODULE_PARM_DESC(io, "I/O base address(es)"); -MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)"); -MODULE_PARM_DESC(xcvr, "transceiver(s) (0=internal, 1=external)"); -MODULE_DESCRIPTION("3Com ISA EtherLink II, II/16 (3c503, 3c503/16) driver"); -MODULE_LICENSE("GPL"); - -/* This is set up so that only a single autoprobe takes place per call. -ISA device autoprobes on a running machine are not recommended. */ -int __init -init_module(void) -{ - struct net_device *dev; - int this_dev, found = 0; - - for (this_dev = 0; this_dev < MAX_EL2_CARDS; this_dev++) { - if (io[this_dev] == 0) { - if (this_dev != 0) break; /* only autoprobe 1st one */ - pr_notice("3c503.c: Presently autoprobing (not recommended) for a single card.\n"); - } - dev = alloc_eip_netdev(); - if (!dev) - break; - dev->irq = irq[this_dev]; - dev->base_addr = io[this_dev]; - dev->mem_end = xcvr[this_dev]; /* low 4bits = xcvr sel. */ - if (do_el2_probe(dev) == 0) { - dev_el2[found++] = dev; - continue; - } - free_netdev(dev); - pr_warning("3c503.c: No 3c503 card found (i/o = 0x%x).\n", io[this_dev]); - break; - } - if (found) - return 0; - return -ENXIO; -} - -static void cleanup_card(struct net_device *dev) -{ - /* NB: el2_close() handles free_irq */ - release_region(dev->base_addr, EL2_IO_EXTENT); - if (ei_status.mem) - iounmap(ei_status.mem); -} - -void __exit -cleanup_module(void) -{ - int this_dev; - - for (this_dev = 0; this_dev < MAX_EL2_CARDS; this_dev++) { - struct net_device *dev = dev_el2[this_dev]; - if (dev) { - unregister_netdev(dev); - cleanup_card(dev); - free_netdev(dev); - } - } -} -#endif /* MODULE */ diff --git a/drivers/net/3c503.h b/drivers/net/3c503.h deleted file mode 100644 index e2367b8..0000000 --- a/drivers/net/3c503.h +++ /dev/null @@ -1,91 +0,0 @@ -/* Definitions for the 3Com 3c503 Etherlink 2. */ -/* This file is distributed under the GPL. - Many of these names and comments are directly from the Crynwr packet - drivers, which are released under the GPL. */ - -#define EL2H (dev->base_addr + 0x400) -#define EL2L (dev->base_addr) - -/* Vendor unique hardware addr. prefix. 3Com has 2 because they ran - out of available addresses on the first one... */ - -#define OLD_3COM_ID 0x02608c -#define NEW_3COM_ID 0x0020af - -/* Shared memory management parameters. NB: The 8 bit cards have only - one bank (MB1) which serves both Tx and Rx packet space. The 16bit - cards have 2 banks, MB0 for Tx packets, and MB1 for Rx packets. - You choose which bank appears in the sh. mem window with EGACFR_MBSn */ - -#define EL2_MB0_START_PG (0x00) /* EL2/16 Tx packets go in bank 0 */ -#define EL2_MB1_START_PG (0x20) /* First page of bank 1 */ -#define EL2_MB1_STOP_PG (0x40) /* Last page +1 of bank 1 */ - -/* 3Com 3c503 ASIC registers */ -#define E33G_STARTPG (EL2H+0) /* Start page, matching EN0_STARTPG */ -#define E33G_STOPPG (EL2H+1) /* Stop page, must match EN0_STOPPG */ -#define E33G_DRQCNT (EL2H+2) /* DMA burst count */ -#define E33G_IOBASE (EL2H+3) /* Read of I/O base jumpers. */ - /* (non-useful, but it also appears at the end of EPROM space) */ -#define E33G_ROMBASE (EL2H+4) /* Read of memory base jumpers. */ -#define E33G_GACFR (EL2H+5) /* Config/setup bits for the ASIC GA */ -#define E33G_CNTRL (EL2H+6) /* Board's main control register */ -#define E33G_STATUS (EL2H+7) /* Status on completions. */ -#define E33G_IDCFR (EL2H+8) /* Interrupt/DMA config register */ - /* (Which IRQ to assert, DMA chan to use) */ -#define E33G_DMAAH (EL2H+9) /* High byte of DMA address reg */ -#define E33G_DMAAL (EL2H+10) /* Low byte of DMA address reg */ -/* "Vector pointer" - if this address matches a read, the EPROM (rather than - shared RAM) is mapped into memory space. */ -#define E33G_VP2 (EL2H+11) -#define E33G_VP1 (EL2H+12) -#define E33G_VP0 (EL2H+13) -#define E33G_FIFOH (EL2H+14) /* FIFO for programmed I/O moves */ -#define E33G_FIFOL (EL2H+15) /* ... low byte of above. */ - -/* Bits in E33G_CNTRL register: */ - -#define ECNTRL_RESET (0x01) /* Software reset of the ASIC and 8390 */ -#define ECNTRL_THIN (0x02) /* Onboard xcvr enable, AUI disable */ -#define ECNTRL_AUI (0x00) /* Onboard xcvr disable, AUI enable */ -#define ECNTRL_SAPROM (0x04) /* Map the station address prom */ -#define ECNTRL_DBLBFR (0x20) /* FIFO configuration bit */ -#define ECNTRL_OUTPUT (0x40) /* PC-to-3C503 direction if 1 */ -#define ECNTRL_INPUT (0x00) /* 3C503-to-PC direction if 0 */ -#define ECNTRL_START (0x80) /* Start the DMA logic */ - -/* Bits in E33G_STATUS register: */ - -#define ESTAT_DPRDY (0x80) /* Data port (of FIFO) ready */ -#define ESTAT_UFLW (0x40) /* Tried to read FIFO when it was empty */ -#define ESTAT_OFLW (0x20) /* Tried to write FIFO when it was full */ -#define ESTAT_DTC (0x10) /* Terminal Count from PC bus DMA logic */ -#define ESTAT_DIP (0x08) /* DMA In Progress */ - -/* Bits in E33G_GACFR register: */ - -#define EGACFR_NIM (0x80) /* NIC interrupt mask */ -#define EGACFR_TCM (0x40) /* DMA term. count interrupt mask */ -#define EGACFR_RSEL (0x08) /* Map a bank of card mem into system mem */ -#define EGACFR_MBS2 (0x04) /* Memory bank select, bit 2. */ -#define EGACFR_MBS1 (0x02) /* Memory bank select, bit 1. */ -#define EGACFR_MBS0 (0x01) /* Memory bank select, bit 0. */ - -#define EGACFR_NORM (0x49) /* TCM | RSEL | MBS0 */ -#define EGACFR_IRQOFF (0xc9) /* TCM | RSEL | MBS0 | NIM */ - -/* - MBS2 MBS1 MBS0 Sh. mem windows card mem at: - ---- ---- ---- ----------------------------- - 0 0 0 0x0000 -- bank 0 - 0 0 1 0x2000 -- bank 1 (only choice for 8bit card) - 0 1 0 0x4000 -- bank 2, not used - 0 1 1 0x6000 -- bank 3, not used - -There was going to be a 32k card that used bank 2 and 3, but it -never got produced. - -*/ - - -/* End of 3C503 parameter definitions */ diff --git a/drivers/net/8390.c b/drivers/net/8390.c deleted file mode 100644 index 7c7518b..0000000 --- a/drivers/net/8390.c +++ /dev/null @@ -1,103 +0,0 @@ -/* 8390 core for usual drivers */ - -static const char version[] = - "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; - -#include "lib8390.c" - -int ei_open(struct net_device *dev) -{ - return __ei_open(dev); -} -EXPORT_SYMBOL(ei_open); - -int ei_close(struct net_device *dev) -{ - return __ei_close(dev); -} -EXPORT_SYMBOL(ei_close); - -netdev_tx_t ei_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - return __ei_start_xmit(skb, dev); -} -EXPORT_SYMBOL(ei_start_xmit); - -struct net_device_stats *ei_get_stats(struct net_device *dev) -{ - return __ei_get_stats(dev); -} -EXPORT_SYMBOL(ei_get_stats); - -void ei_set_multicast_list(struct net_device *dev) -{ - __ei_set_multicast_list(dev); -} -EXPORT_SYMBOL(ei_set_multicast_list); - -void ei_tx_timeout(struct net_device *dev) -{ - __ei_tx_timeout(dev); -} -EXPORT_SYMBOL(ei_tx_timeout); - -irqreturn_t ei_interrupt(int irq, void *dev_id) -{ - return __ei_interrupt(irq, dev_id); -} -EXPORT_SYMBOL(ei_interrupt); - -#ifdef CONFIG_NET_POLL_CONTROLLER -void ei_poll(struct net_device *dev) -{ - __ei_poll(dev); -} -EXPORT_SYMBOL(ei_poll); -#endif - -const struct net_device_ops ei_netdev_ops = { - .ndo_open = ei_open, - .ndo_stop = ei_close, - .ndo_start_xmit = ei_start_xmit, - .ndo_tx_timeout = ei_tx_timeout, - .ndo_get_stats = ei_get_stats, - .ndo_set_multicast_list = ei_set_multicast_list, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, - .ndo_change_mtu = eth_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ei_poll, -#endif -}; -EXPORT_SYMBOL(ei_netdev_ops); - -struct net_device *__alloc_ei_netdev(int size) -{ - struct net_device *dev = ____alloc_ei_netdev(size); - if (dev) - dev->netdev_ops = &ei_netdev_ops; - return dev; -} -EXPORT_SYMBOL(__alloc_ei_netdev); - -void NS8390_init(struct net_device *dev, int startp) -{ - __NS8390_init(dev, startp); -} -EXPORT_SYMBOL(NS8390_init); - -#if defined(MODULE) - -static int __init ns8390_module_init(void) -{ - return 0; -} - -static void __exit ns8390_module_exit(void) -{ -} - -module_init(ns8390_module_init); -module_exit(ns8390_module_exit); -#endif /* MODULE */ -MODULE_LICENSE("GPL"); diff --git a/drivers/net/8390.h b/drivers/net/8390.h deleted file mode 100644 index 58a12e4..0000000 --- a/drivers/net/8390.h +++ /dev/null @@ -1,232 +0,0 @@ -/* Generic NS8390 register definitions. */ -/* This file is part of Donald Becker's 8390 drivers, and is distributed - under the same license. Auto-loading of 8390.o only in v2.2 - Paul G. - Some of these names and comments originated from the Crynwr - packet drivers, which are distributed under the GPL. */ - -#ifndef _8390_h -#define _8390_h - -#include -#include -#include -#include - -#define TX_PAGES 12 /* Two Tx slots */ - -#define ETHER_ADDR_LEN 6 - -/* The 8390 specific per-packet-header format. */ -struct e8390_pkt_hdr { - unsigned char status; /* status */ - unsigned char next; /* pointer to next packet. */ - unsigned short count; /* header + packet length in bytes */ -}; - -#ifdef notdef -extern int ei_debug; -#else -#define ei_debug 1 -#endif - -#ifdef CONFIG_NET_POLL_CONTROLLER -extern void ei_poll(struct net_device *dev); -extern void eip_poll(struct net_device *dev); -#endif - - -/* Without I/O delay - non ISA or later chips */ -extern void NS8390_init(struct net_device *dev, int startp); -extern int ei_open(struct net_device *dev); -extern int ei_close(struct net_device *dev); -extern irqreturn_t ei_interrupt(int irq, void *dev_id); -extern void ei_tx_timeout(struct net_device *dev); -extern netdev_tx_t ei_start_xmit(struct sk_buff *skb, struct net_device *dev); -extern void ei_set_multicast_list(struct net_device *dev); -extern struct net_device_stats *ei_get_stats(struct net_device *dev); - -extern const struct net_device_ops ei_netdev_ops; - -extern struct net_device *__alloc_ei_netdev(int size); -static inline struct net_device *alloc_ei_netdev(void) -{ - return __alloc_ei_netdev(0); -} - -/* With I/O delay form */ -extern void NS8390p_init(struct net_device *dev, int startp); -extern int eip_open(struct net_device *dev); -extern int eip_close(struct net_device *dev); -extern irqreturn_t eip_interrupt(int irq, void *dev_id); -extern void eip_tx_timeout(struct net_device *dev); -extern netdev_tx_t eip_start_xmit(struct sk_buff *skb, struct net_device *dev); -extern void eip_set_multicast_list(struct net_device *dev); -extern struct net_device_stats *eip_get_stats(struct net_device *dev); - -extern const struct net_device_ops eip_netdev_ops; - -extern struct net_device *__alloc_eip_netdev(int size); -static inline struct net_device *alloc_eip_netdev(void) -{ - return __alloc_eip_netdev(0); -} - -/* You have one of these per-board */ -struct ei_device { - const char *name; - void (*reset_8390)(struct net_device *); - void (*get_8390_hdr)(struct net_device *, struct e8390_pkt_hdr *, int); - void (*block_output)(struct net_device *, int, const unsigned char *, int); - void (*block_input)(struct net_device *, int, struct sk_buff *, int); - unsigned long rmem_start; - unsigned long rmem_end; - void __iomem *mem; - unsigned char mcfilter[8]; - unsigned open:1; - unsigned word16:1; /* We have the 16-bit (vs 8-bit) version of the card. */ - unsigned bigendian:1; /* 16-bit big endian mode. Do NOT */ - /* set this on random 8390 clones! */ - unsigned txing:1; /* Transmit Active */ - unsigned irqlock:1; /* 8390's intrs disabled when '1'. */ - unsigned dmaing:1; /* Remote DMA Active */ - unsigned char tx_start_page, rx_start_page, stop_page; - unsigned char current_page; /* Read pointer in buffer */ - unsigned char interface_num; /* Net port (AUI, 10bT.) to use. */ - unsigned char txqueue; /* Tx Packet buffer queue length. */ - short tx1, tx2; /* Packet lengths for ping-pong tx. */ - short lasttx; /* Alpha version consistency check. */ - unsigned char reg0; /* Register '0' in a WD8013 */ - unsigned char reg5; /* Register '5' in a WD8013 */ - unsigned char saved_irq; /* Original dev->irq value. */ - u32 *reg_offset; /* Register mapping table */ - spinlock_t page_lock; /* Page register locks */ - unsigned long priv; /* Private field to store bus IDs etc. */ -#ifdef AX88796_PLATFORM - unsigned char rxcr_base; /* default value for RXCR */ -#endif -}; - -/* The maximum number of 8390 interrupt service routines called per IRQ. */ -#define MAX_SERVICE 12 - -/* The maximum time waited (in jiffies) before assuming a Tx failed. (20ms) */ -#define TX_TIMEOUT (20*HZ/100) - -#define ei_status (*(struct ei_device *)netdev_priv(dev)) - -/* Some generic ethernet register configurations. */ -#define E8390_TX_IRQ_MASK 0xa /* For register EN0_ISR */ -#define E8390_RX_IRQ_MASK 0x5 - -#ifdef AX88796_PLATFORM -#define E8390_RXCONFIG (ei_status.rxcr_base | 0x04) -#define E8390_RXOFF (ei_status.rxcr_base | 0x20) -#else -#define E8390_RXCONFIG 0x4 /* EN0_RXCR: broadcasts, no multicast,errors */ -#define E8390_RXOFF 0x20 /* EN0_RXCR: Accept no packets */ -#endif - -#define E8390_TXCONFIG 0x00 /* EN0_TXCR: Normal transmit mode */ -#define E8390_TXOFF 0x02 /* EN0_TXCR: Transmitter off */ - - -/* Register accessed at EN_CMD, the 8390 base addr. */ -#define E8390_STOP 0x01 /* Stop and reset the chip */ -#define E8390_START 0x02 /* Start the chip, clear reset */ -#define E8390_TRANS 0x04 /* Transmit a frame */ -#define E8390_RREAD 0x08 /* Remote read */ -#define E8390_RWRITE 0x10 /* Remote write */ -#define E8390_NODMA 0x20 /* Remote DMA */ -#define E8390_PAGE0 0x00 /* Select page chip registers */ -#define E8390_PAGE1 0x40 /* using the two high-order bits */ -#define E8390_PAGE2 0x80 /* Page 3 is invalid. */ - -/* - * Only generate indirect loads given a machine that needs them. - * - removed AMIGA_PCMCIA from this list, handled as ISA io now - * - the _p for generates no delay by default 8390p.c overrides this. - */ - -#ifndef ei_inb -#define ei_inb(_p) inb(_p) -#define ei_outb(_v,_p) outb(_v,_p) -#define ei_inb_p(_p) inb(_p) -#define ei_outb_p(_v,_p) outb(_v,_p) -#endif - -#ifndef EI_SHIFT -#define EI_SHIFT(x) (x) -#endif - -#define E8390_CMD EI_SHIFT(0x00) /* The command register (for all pages) */ -/* Page 0 register offsets. */ -#define EN0_CLDALO EI_SHIFT(0x01) /* Low byte of current local dma addr RD */ -#define EN0_STARTPG EI_SHIFT(0x01) /* Starting page of ring bfr WR */ -#define EN0_CLDAHI EI_SHIFT(0x02) /* High byte of current local dma addr RD */ -#define EN0_STOPPG EI_SHIFT(0x02) /* Ending page +1 of ring bfr WR */ -#define EN0_BOUNDARY EI_SHIFT(0x03) /* Boundary page of ring bfr RD WR */ -#define EN0_TSR EI_SHIFT(0x04) /* Transmit status reg RD */ -#define EN0_TPSR EI_SHIFT(0x04) /* Transmit starting page WR */ -#define EN0_NCR EI_SHIFT(0x05) /* Number of collision reg RD */ -#define EN0_TCNTLO EI_SHIFT(0x05) /* Low byte of tx byte count WR */ -#define EN0_FIFO EI_SHIFT(0x06) /* FIFO RD */ -#define EN0_TCNTHI EI_SHIFT(0x06) /* High byte of tx byte count WR */ -#define EN0_ISR EI_SHIFT(0x07) /* Interrupt status reg RD WR */ -#define EN0_CRDALO EI_SHIFT(0x08) /* low byte of current remote dma address RD */ -#define EN0_RSARLO EI_SHIFT(0x08) /* Remote start address reg 0 */ -#define EN0_CRDAHI EI_SHIFT(0x09) /* high byte, current remote dma address RD */ -#define EN0_RSARHI EI_SHIFT(0x09) /* Remote start address reg 1 */ -#define EN0_RCNTLO EI_SHIFT(0x0a) /* Remote byte count reg WR */ -#define EN0_RCNTHI EI_SHIFT(0x0b) /* Remote byte count reg WR */ -#define EN0_RSR EI_SHIFT(0x0c) /* rx status reg RD */ -#define EN0_RXCR EI_SHIFT(0x0c) /* RX configuration reg WR */ -#define EN0_TXCR EI_SHIFT(0x0d) /* TX configuration reg WR */ -#define EN0_COUNTER0 EI_SHIFT(0x0d) /* Rcv alignment error counter RD */ -#define EN0_DCFG EI_SHIFT(0x0e) /* Data configuration reg WR */ -#define EN0_COUNTER1 EI_SHIFT(0x0e) /* Rcv CRC error counter RD */ -#define EN0_IMR EI_SHIFT(0x0f) /* Interrupt mask reg WR */ -#define EN0_COUNTER2 EI_SHIFT(0x0f) /* Rcv missed frame error counter RD */ - -/* Bits in EN0_ISR - Interrupt status register */ -#define ENISR_RX 0x01 /* Receiver, no error */ -#define ENISR_TX 0x02 /* Transmitter, no error */ -#define ENISR_RX_ERR 0x04 /* Receiver, with error */ -#define ENISR_TX_ERR 0x08 /* Transmitter, with error */ -#define ENISR_OVER 0x10 /* Receiver overwrote the ring */ -#define ENISR_COUNTERS 0x20 /* Counters need emptying */ -#define ENISR_RDC 0x40 /* remote dma complete */ -#define ENISR_RESET 0x80 /* Reset completed */ -#define ENISR_ALL 0x3f /* Interrupts we will enable */ - -/* Bits in EN0_DCFG - Data config register */ -#define ENDCFG_WTS 0x01 /* word transfer mode selection */ -#define ENDCFG_BOS 0x02 /* byte order selection */ - -/* Page 1 register offsets. */ -#define EN1_PHYS EI_SHIFT(0x01) /* This board's physical enet addr RD WR */ -#define EN1_PHYS_SHIFT(i) EI_SHIFT(i+1) /* Get and set mac address */ -#define EN1_CURPAG EI_SHIFT(0x07) /* Current memory page RD WR */ -#define EN1_MULT EI_SHIFT(0x08) /* Multicast filter mask array (8 bytes) RD WR */ -#define EN1_MULT_SHIFT(i) EI_SHIFT(8+i) /* Get and set multicast filter */ - -/* Bits in received packet status byte and EN0_RSR*/ -#define ENRSR_RXOK 0x01 /* Received a good packet */ -#define ENRSR_CRC 0x02 /* CRC error */ -#define ENRSR_FAE 0x04 /* frame alignment error */ -#define ENRSR_FO 0x08 /* FIFO overrun */ -#define ENRSR_MPA 0x10 /* missed pkt */ -#define ENRSR_PHY 0x20 /* physical/multicast address */ -#define ENRSR_DIS 0x40 /* receiver disable. set in monitor mode */ -#define ENRSR_DEF 0x80 /* deferring */ - -/* Transmitted packet status, EN0_TSR. */ -#define ENTSR_PTX 0x01 /* Packet transmitted without error */ -#define ENTSR_ND 0x02 /* The transmit wasn't deferred. */ -#define ENTSR_COL 0x04 /* The transmit collided at least once. */ -#define ENTSR_ABT 0x08 /* The transmit collided 16 times, and was deferred. */ -#define ENTSR_CRS 0x10 /* The carrier sense was lost. */ -#define ENTSR_FU 0x20 /* A "FIFO underrun" occurred during transmit. */ -#define ENTSR_CDH 0x40 /* The collision detect "heartbeat" signal was lost. */ -#define ENTSR_OWC 0x80 /* There was an out-of-window collision. */ - -#endif /* _8390_h */ diff --git a/drivers/net/8390p.c b/drivers/net/8390p.c deleted file mode 100644 index a2a64ea..0000000 --- a/drivers/net/8390p.c +++ /dev/null @@ -1,105 +0,0 @@ -/* 8390 core for ISA devices needing bus delays */ - -static const char version[] = - "8390p.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; - -#define ei_inb(_p) inb(_p) -#define ei_outb(_v, _p) outb(_v, _p) -#define ei_inb_p(_p) inb_p(_p) -#define ei_outb_p(_v, _p) outb_p(_v, _p) - -#include "lib8390.c" - -int eip_open(struct net_device *dev) -{ - return __ei_open(dev); -} -EXPORT_SYMBOL(eip_open); - -int eip_close(struct net_device *dev) -{ - return __ei_close(dev); -} -EXPORT_SYMBOL(eip_close); - -netdev_tx_t eip_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - return __ei_start_xmit(skb, dev); -} -EXPORT_SYMBOL(eip_start_xmit); - -struct net_device_stats *eip_get_stats(struct net_device *dev) -{ - return __ei_get_stats(dev); -} -EXPORT_SYMBOL(eip_get_stats); - -void eip_set_multicast_list(struct net_device *dev) -{ - __ei_set_multicast_list(dev); -} -EXPORT_SYMBOL(eip_set_multicast_list); - -void eip_tx_timeout(struct net_device *dev) -{ - __ei_tx_timeout(dev); -} -EXPORT_SYMBOL(eip_tx_timeout); - -irqreturn_t eip_interrupt(int irq, void *dev_id) -{ - return __ei_interrupt(irq, dev_id); -} -EXPORT_SYMBOL(eip_interrupt); - -#ifdef CONFIG_NET_POLL_CONTROLLER -void eip_poll(struct net_device *dev) -{ - __ei_poll(dev); -} -EXPORT_SYMBOL(eip_poll); -#endif - -const struct net_device_ops eip_netdev_ops = { - .ndo_open = eip_open, - .ndo_stop = eip_close, - .ndo_start_xmit = eip_start_xmit, - .ndo_tx_timeout = eip_tx_timeout, - .ndo_get_stats = eip_get_stats, - .ndo_set_multicast_list = eip_set_multicast_list, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, - .ndo_change_mtu = eth_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = eip_poll, -#endif -}; -EXPORT_SYMBOL(eip_netdev_ops); - -struct net_device *__alloc_eip_netdev(int size) -{ - struct net_device *dev = ____alloc_ei_netdev(size); - if (dev) - dev->netdev_ops = &eip_netdev_ops; - return dev; -} -EXPORT_SYMBOL(__alloc_eip_netdev); - -void NS8390p_init(struct net_device *dev, int startp) -{ - __NS8390_init(dev, startp); -} -EXPORT_SYMBOL(NS8390p_init); - -static int __init NS8390p_init_module(void) -{ - return 0; -} - -static void __exit NS8390p_cleanup_module(void) -{ -} - -module_init(NS8390p_init_module); -module_exit(NS8390p_cleanup_module); -MODULE_LICENSE("GPL"); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index b686dab..c877f41 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -237,22 +237,6 @@ config MACB source "drivers/net/arm/Kconfig" -config AX88796 - tristate "ASIX AX88796 NE2000 clone support" - depends on ARM || MIPS || SUPERH - select PHYLIB - select MDIO_BITBANG - help - AX88796 driver, using platform bus to provide - chip detection and resources - -config AX88796_93CX6 - bool "ASIX AX88796 external 93CX6 eeprom support" - depends on AX88796 - select EEPROM_93CX6 - help - Select this if your platform comes with an external 93CX6 eeprom. - config MACE tristate "MACE (Power Mac ethernet) support" depends on PPC_PMAC && PPC32 @@ -287,50 +271,6 @@ config BMAC To compile this driver as a module, choose M here: the module will be called bmac. -config HYDRA - tristate "Hydra support" - depends on ZORRO - select CRC32 - help - If you have a Hydra Ethernet adapter, say Y. Otherwise, say N. - - To compile this driver as a module, choose M here: the module - will be called hydra. - -config ZORRO8390 - tristate "Zorro NS8390-based Ethernet support" - depends on ZORRO - select CRC32 - help - This driver is for Zorro Ethernet cards using an NS8390-compatible - chipset, like the Village Tronic Ariadne II and the Individual - Computers X-Surf Ethernet cards. If you have such a card, say Y. - Otherwise, say N. - - To compile this driver as a module, choose M here: the module - will be called zorro8390. - -config APNE - tristate "PCMCIA NE2000 support" - depends on AMIGA_PCMCIA - select CRC32 - help - If you have a PCMCIA NE2000 compatible adapter, say Y. Otherwise, - say N. - - To compile this driver as a module, choose M here: the module - will be called apne. - -config MAC8390 - bool "Macintosh NS 8390 based ethernet cards" - depends on MAC - select CRC32 - help - If you want to include a driver to support Nubus or LC-PDS - Ethernet cards using an NS8390 chipset or its equivalent, say Y - and read the Ethernet-HOWTO, available from - . - config MAC89x0 tristate "Macintosh CS89x0 based ethernet cards" depends on MAC @@ -449,18 +389,6 @@ config SGI_O2MACE_ETH tristate "SGI O2 MACE Fast Ethernet support" depends on SGI_IP32=y -config STNIC - tristate "National DP83902AV support" - depends on SUPERH - select CRC32 - help - Support for cards based on the National Semiconductor DP83902AV - ST-NIC Serial Network Interface Controller for Twisted Pair. This - is a 10Mbit/sec Ethernet controller. Product overview and specs at - . - - If unsure, say N. - config SH_ETH tristate "Renesas SuperH Ethernet support" depends on SUPERH && \ @@ -591,74 +519,6 @@ config ELMC_II To compile this driver as a module, choose M here. The module will be called 3c527. -config NET_VENDOR_SMC - bool "Western Digital/SMC cards" - depends on ISA || MCA || EISA || MAC - help - If you have a network (Ethernet) card belonging to this class, say Y - and read the Ethernet-HOWTO, available from - . - - Note that the answer to this question doesn't directly affect the - kernel: saying N will just cause the configurator to skip all - the questions about Western Digital cards. If you say Y, you will be - asked for your specific card in the following questions. - -config WD80x3 - tristate "WD80*3 support" - depends on NET_VENDOR_SMC && ISA - select CRC32 - help - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - . - - To compile this driver as a module, choose M here. The module - will be called wd. - -config ULTRAMCA - tristate "SMC Ultra MCA support" - depends on NET_VENDOR_SMC && MCA - select CRC32 - help - If you have a network (Ethernet) card of this type and are running - an MCA based system (PS/2), say Y and read the Ethernet-HOWTO, - available from . - - To compile this driver as a module, choose M here. The module - will be called smc-mca. - -config ULTRA - tristate "SMC Ultra support" - depends on NET_VENDOR_SMC && ISA - select CRC32 - ---help--- - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - . - - Important: There have been many reports that, with some motherboards - mixing an SMC Ultra and an Adaptec AHA154x SCSI card (or compatible, - such as some BusLogic models) causes corruption problems with many - operating systems. The Linux smc-ultra driver has a work-around for - this but keep it in mind if you have such a SCSI card and have - problems. - - To compile this driver as a module, choose M here. The module - will be called smc-ultra. - -config ULTRA32 - tristate "SMC Ultra32 EISA support" - depends on NET_VENDOR_SMC && EISA - select CRC32 - help - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - . - - To compile this driver as a module, choose M here. The module - will be called smc-ultra32. - config BFIN_MAC tristate "Blackfin on-chip MAC support" depends on NET_ETHERNET && (BF516 || BF518 || BF526 || BF527 || BF536 || BF537) @@ -979,18 +839,6 @@ config NET_ISA the remaining ISA network card questions. If you say Y, you will be asked for your specific card in the following questions. -config E2100 - tristate "Cabletron E21xx support" - depends on NET_ISA - select CRC32 - help - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - . - - To compile this driver as a module, choose M here. The module - will be called e2100. - config EWRK3 tristate "EtherWORKS 3 (DE203, DE204, DE205) support" depends on NET_ISA @@ -1032,30 +880,6 @@ config EEXPRESS_PRO To compile this driver as a module, choose M here. The module will be called eepro. -config HPLAN_PLUS - tristate "HP PCLAN+ (27247B and 27252A) support" - depends on NET_ISA - select CRC32 - help - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - . - - To compile this driver as a module, choose M here. The module - will be called hp-plus. - -config HPLAN - tristate "HP PCLAN (27245 and other 27xxx series) support" - depends on NET_ISA - select CRC32 - help - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - . - - To compile this driver as a module, choose M here. The module - will be called hp. - config LP486E tristate "LP486E on board Ethernet" depends on NET_ISA @@ -1075,26 +899,6 @@ config ETH16I To compile this driver as a module, choose M here. The module will be called eth16i. -config NE2000 - tristate "NE2000/NE1000 support" - depends on NET_ISA || (Q40 && m) || M32R || MACH_TX49XX - select CRC32 - ---help--- - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - . Many Ethernet cards - without a specific driver are compatible with NE2000. - - If you have a PCI NE2000 card however, say N here and Y to "PCI - NE2000 and clone support" under "EISA, VLB, PCI and on board - controllers" below. If you have a NE2000 card and are running on - an MCA system (a bus system used on some IBM PS/2 computers and - laptops), say N here and Y to "NE/2 (ne2000 MCA version) support", - below. - - To compile this driver as a module, choose M here. The module - will be called ne. - config ZNET tristate "Zenith Z-Note support (EXPERIMENTAL)" depends on NET_ISA && EXPERIMENTAL && ISA_DMA_API @@ -1116,18 +920,6 @@ config SEEQ8005 To compile this driver as a module, choose M here. The module will be called seeq8005. -config NE2_MCA - tristate "NE/2 (ne2000 MCA version) support" - depends on MCA_LEGACY - select CRC32 - help - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - . - - To compile this driver as a module, choose M here. The module - will be called ne2. - config IBMLANA tristate "IBM LAN Adapter/A support" depends on MCA @@ -1183,18 +975,6 @@ config ADAPTEC_STARFIRE To compile this driver as a module, choose M here: the module will be called starfire. This is recommended. -config AC3200 - tristate "Ansel Communications EISA 3200 support (EXPERIMENTAL)" - depends on NET_PCI && (ISA || EISA) && EXPERIMENTAL - select CRC32 - help - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - . - - To compile this driver as a module, choose M here. The module - will be called ac3200. - config KSZ884X_PCI tristate "Micrel KSZ8841/2 PCI" depends on NET_PCI && PCI @@ -1310,18 +1090,6 @@ config E100 To compile this driver as a module, choose M here. The module will be called e100. -config LNE390 - tristate "Mylex EISA LNE390A/B support (EXPERIMENTAL)" - depends on NET_PCI && EISA && EXPERIMENTAL - select CRC32 - help - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - . - - To compile this driver as a module, choose M here. The module - will be called lne390. - config FEALNX tristate "Myson MTD-8xx PCI Ethernet support" depends on NET_PCI && PCI @@ -1342,50 +1110,6 @@ config NATSEMI More specific information and updates are available from . -config NE2K_PCI - tristate "PCI NE2000 and clones support (see help)" - depends on NET_PCI && PCI - select CRC32 - ---help--- - This driver is for NE2000 compatible PCI cards. It will not work - with ISA NE2000 cards (they have their own driver, "NE2000/NE1000 - support" below). If you have a PCI NE2000 network (Ethernet) card, - say Y and read the Ethernet-HOWTO, available from - . - - This driver also works for the following NE2000 clone cards: - RealTek RTL-8029 Winbond 89C940 Compex RL2000 KTI ET32P2 - NetVin NV5000SC Via 86C926 SureCom NE34 Winbond - Holtek HT80232 Holtek HT80229 - - To compile this driver as a module, choose M here. The module - will be called ne2k-pci. - -config NE3210 - tristate "Novell/Eagle/Microdyne NE3210 EISA support (EXPERIMENTAL)" - depends on NET_PCI && EISA && EXPERIMENTAL - select CRC32 - ---help--- - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - . Note that this driver - will NOT WORK for NE3200 cards as they are completely different. - - To compile this driver as a module, choose M here. The module - will be called ne3210. - -config ES3210 - tristate "Racal-Interlan EISA ES3210 support (EXPERIMENTAL)" - depends on NET_PCI && EISA && EXPERIMENTAL - select CRC32 - help - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - . - - To compile this driver as a module, choose M here. The module - will be called es3210. - config 8139CP tristate "RealTek RTL-8139 C+ PCI Fast Ethernet Adapter support (EXPERIMENTAL)" depends on NET_PCI && PCI && EXPERIMENTAL @@ -1723,13 +1447,6 @@ config FEC_MPC52xx_MDIO If not sure, enable. If compiled as module, it will be called fec_mpc52xx_phy. -config NE_H8300 - tristate "NE2000 compatible support for H8/300" - depends on H8300 - help - Say Y here if you want to use the NE2000 compatible - controller on the Renesas H8/300 processor. - config ATL2 tristate "Atheros L2 Fast Ethernet support" depends on PCI diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 59b6cc9..4e8fa73 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -65,7 +65,6 @@ obj-$(CONFIG_SUNVNET) += sunvnet.o obj-$(CONFIG_MACE) += mace.o obj-$(CONFIG_BMAC) += bmac.o -obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o obj-$(CONFIG_E100) += e100.o obj-$(CONFIG_TLAN) += tlan.o obj-$(CONFIG_EPIC100) += epic100.o @@ -77,7 +76,6 @@ obj-$(CONFIG_YELLOWFIN) += yellowfin.o obj-$(CONFIG_ISERIES_VETH) += iseries_veth.o obj-$(CONFIG_NATSEMI) += natsemi.o obj-$(CONFIG_NS83820) += ns83820.o -obj-$(CONFIG_STNIC) += stnic.o 8390.o obj-$(CONFIG_FEALNX) += fealnx.o obj-$(CONFIG_TIGON3) += tg3.o obj-$(CONFIG_BNX2) += bnx2.o @@ -112,9 +110,6 @@ obj-$(CONFIG_HAMACHI) += hamachi.o obj-$(CONFIG_NET) += Space.o loopback.o obj-$(CONFIG_SEEQ8005) += seeq8005.o obj-$(CONFIG_NET_SB1000) += sb1000.o -obj-$(CONFIG_MAC8390) += mac8390.o -obj-$(CONFIG_APNE) += apne.o 8390.o -obj-$(CONFIG_PCMCIA_PCNET) += 8390.o obj-$(CONFIG_HP100) += hp100.o obj-$(CONFIG_SMC9194) += smc9194.o obj-$(CONFIG_FEC) += fec.o @@ -122,24 +117,9 @@ obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y) obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o endif -obj-$(CONFIG_WD80x3) += wd.o 8390.o -obj-$(CONFIG_EL2) += 3c503.o 8390p.o -obj-$(CONFIG_NE2000) += ne.o 8390p.o -obj-$(CONFIG_NE2_MCA) += ne2.o 8390p.o -obj-$(CONFIG_HPLAN) += hp.o 8390p.o -obj-$(CONFIG_HPLAN_PLUS) += hp-plus.o 8390p.o -obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o -obj-$(CONFIG_ULTRAMCA) += smc-mca.o 8390.o -obj-$(CONFIG_ULTRA32) += smc-ultra32.o 8390.o -obj-$(CONFIG_E2100) += e2100.o 8390.o -obj-$(CONFIG_ES3210) += es3210.o 8390.o -obj-$(CONFIG_LNE390) += lne390.o 8390.o -obj-$(CONFIG_NE3210) += ne3210.o 8390.o obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o obj-$(CONFIG_B44) += b44.o obj-$(CONFIG_FORCEDETH) += forcedeth.o -obj-$(CONFIG_NE_H8300) += ne-h8300.o -obj-$(CONFIG_AX88796) += ax88796.o obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o obj-$(CONFIG_FTGMAC100) += ftgmac100.o obj-$(CONFIG_FTMAC100) += ftmac100.o @@ -195,7 +175,6 @@ obj-$(CONFIG_ATP) += atp.o obj-$(CONFIG_NI5010) += ni5010.o obj-$(CONFIG_NI52) += ni52.o obj-$(CONFIG_ELPLUS) += 3c505.o -obj-$(CONFIG_AC3200) += ac3200.o 8390.o obj-$(CONFIG_APRICOT) += 82596.o obj-$(CONFIG_LASI_82596) += lasi_82596.o obj-$(CONFIG_SNI_82596) += sni_82596.o @@ -207,13 +186,11 @@ obj-$(CONFIG_SC92031) += sc92031.o obj-$(CONFIG_LP486E) += lp486e.o obj-$(CONFIG_ETH16I) += eth16i.o -obj-$(CONFIG_ZORRO8390) += zorro8390.o obj-$(CONFIG_EQUALIZER) += eql.o obj-$(CONFIG_KORINA) += korina.o obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o -obj-$(CONFIG_HYDRA) += hydra.o obj-$(CONFIG_CS89x0) += cs89x0.o obj-$(CONFIG_MACSONIC) += macsonic.o obj-$(CONFIG_MACMACE) += macmace.o diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c deleted file mode 100644 index f07b2e9..0000000 --- a/drivers/net/ac3200.c +++ /dev/null @@ -1,432 +0,0 @@ -/* ac3200.c: A driver for the Ansel Communications EISA ethernet adaptor. */ -/* - Written 1993, 1994 by Donald Becker. - Copyright 1993 United States Government as represented by the Director, - National Security Agency. This software may only be used and distributed - according to the terms of the GNU General Public License as modified by SRC, - incorporated herein by reference. - - The author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation - 410 Severn Ave., Suite 210 - Annapolis MD 21403 - - This is driver for the Ansel Communications Model 3200 EISA Ethernet LAN - Adapter. The programming information is from the users manual, as related - by glee@ardnassak.math.clemson.edu. - - Changelog: - - Paul Gortmaker 05/98 : add support for shared mem above 1MB. - - */ - -static const char version[] = - "ac3200.c:v1.01 7/1/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "8390.h" - -#define DRV_NAME "ac3200" - -/* Offsets from the base address. */ -#define AC_NIC_BASE 0x00 -#define AC_SA_PROM 0x16 /* The station address PROM. */ -#define AC_ADDR0 0x00 /* Prefix station address values. */ -#define AC_ADDR1 0x40 -#define AC_ADDR2 0x90 -#define AC_ID_PORT 0xC80 -#define AC_EISA_ID 0x0110d305 -#define AC_RESET_PORT 0xC84 -#define AC_RESET 0x00 -#define AC_ENABLE 0x01 -#define AC_CONFIG 0xC90 /* The configuration port. */ - -#define AC_IO_EXTENT 0x20 - /* Actually accessed is: - * AC_NIC_BASE (0-15) - * AC_SA_PROM (0-5) - * AC_ID_PORT (0-3) - * AC_RESET_PORT - * AC_CONFIG - */ - -/* Decoding of the configuration register. */ -static unsigned char config2irqmap[8] __initdata = {15, 12, 11, 10, 9, 7, 5, 3}; -static int addrmap[8] = -{0xFF0000, 0xFE0000, 0xFD0000, 0xFFF0000, 0xFFE0000, 0xFFC0000, 0xD0000, 0 }; -static const char *port_name[4] = { "10baseT", "invalid", "AUI", "10base2"}; - -#define config2irq(configval) config2irqmap[((configval) >> 3) & 7] -#define config2mem(configval) addrmap[(configval) & 7] -#define config2name(configval) port_name[((configval) >> 6) & 3] - -/* First and last 8390 pages. */ -#define AC_START_PG 0x00 /* First page of 8390 TX buffer */ -#define AC_STOP_PG 0x80 /* Last page +1 of the 8390 RX ring */ - -static int ac_probe1(int ioaddr, struct net_device *dev); - -static int ac_open(struct net_device *dev); -static void ac_reset_8390(struct net_device *dev); -static void ac_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset); -static void ac_block_output(struct net_device *dev, const int count, - const unsigned char *buf, const int start_page); -static void ac_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, - int ring_page); - -static int ac_close_card(struct net_device *dev); - - -/* Probe for the AC3200. - - The AC3200 can be identified by either the EISA configuration registers, - or the unique value in the station address PROM. - */ - -static int __init do_ac3200_probe(struct net_device *dev) -{ - unsigned short ioaddr = dev->base_addr; - int irq = dev->irq; - int mem_start = dev->mem_start; - - if (ioaddr > 0x1ff) /* Check a single specified location. */ - return ac_probe1(ioaddr, dev); - else if (ioaddr > 0) /* Don't probe at all. */ - return -ENXIO; - - if ( ! EISA_bus) - return -ENXIO; - - for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) { - if (ac_probe1(ioaddr, dev) == 0) - return 0; - dev->irq = irq; - dev->mem_start = mem_start; - } - - return -ENODEV; -} - -#ifndef MODULE -struct net_device * __init ac3200_probe(int unit) -{ - struct net_device *dev = alloc_ei_netdev(); - int err; - - if (!dev) - return ERR_PTR(-ENOMEM); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - - err = do_ac3200_probe(dev); - if (err) - goto out; - return dev; -out: - free_netdev(dev); - return ERR_PTR(err); -} -#endif - -static const struct net_device_ops ac_netdev_ops = { - .ndo_open = ac_open, - .ndo_stop = ac_close_card, - - .ndo_start_xmit = ei_start_xmit, - .ndo_tx_timeout = ei_tx_timeout, - .ndo_get_stats = ei_get_stats, - .ndo_set_multicast_list = ei_set_multicast_list, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, - .ndo_change_mtu = eth_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ei_poll, -#endif -}; - -static int __init ac_probe1(int ioaddr, struct net_device *dev) -{ - int i, retval; - - if (!request_region(ioaddr, AC_IO_EXTENT, DRV_NAME)) - return -EBUSY; - - if (inb_p(ioaddr + AC_ID_PORT) == 0xff) { - retval = -ENODEV; - goto out; - } - - if (inl(ioaddr + AC_ID_PORT) != AC_EISA_ID) { - retval = -ENODEV; - goto out; - } - -#ifndef final_version - printk(KERN_DEBUG "AC3200 ethercard configuration register is %#02x," - " EISA ID %02x %02x %02x %02x.\n", inb(ioaddr + AC_CONFIG), - inb(ioaddr + AC_ID_PORT + 0), inb(ioaddr + AC_ID_PORT + 1), - inb(ioaddr + AC_ID_PORT + 2), inb(ioaddr + AC_ID_PORT + 3)); -#endif - - for (i = 0; i < 6; i++) - dev->dev_addr[i] = inb(ioaddr + AC_SA_PROM + i); - - printk(KERN_DEBUG "AC3200 in EISA slot %d, node %pM", - ioaddr/0x1000, dev->dev_addr); -#if 0 - /* Check the vendor ID/prefix. Redundant after checking the EISA ID */ - if (inb(ioaddr + AC_SA_PROM + 0) != AC_ADDR0 - || inb(ioaddr + AC_SA_PROM + 1) != AC_ADDR1 - || inb(ioaddr + AC_SA_PROM + 2) != AC_ADDR2 ) { - printk(", not found (invalid prefix).\n"); - retval = -ENODEV; - goto out; - } -#endif - - /* Assign and allocate the interrupt now. */ - if (dev->irq == 0) { - dev->irq = config2irq(inb(ioaddr + AC_CONFIG)); - printk(", using"); - } else { - dev->irq = irq_canonicalize(dev->irq); - printk(", assigning"); - } - - retval = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev); - if (retval) { - printk (" nothing! Unable to get IRQ %d.\n", dev->irq); - goto out; - } - - printk(" IRQ %d, %s port\n", dev->irq, port_name[dev->if_port]); - - dev->base_addr = ioaddr; - -#ifdef notyet - if (dev->mem_start) { /* Override the value from the board. */ - for (i = 0; i < 7; i++) - if (addrmap[i] == dev->mem_start) - break; - if (i >= 7) - i = 0; - outb((inb(ioaddr + AC_CONFIG) & ~7) | i, ioaddr + AC_CONFIG); - } -#endif - - dev->if_port = inb(ioaddr + AC_CONFIG) >> 6; - dev->mem_start = config2mem(inb(ioaddr + AC_CONFIG)); - - printk("%s: AC3200 at %#3x with %dkB memory at physical address %#lx.\n", - dev->name, ioaddr, AC_STOP_PG/4, dev->mem_start); - - /* - * BEWARE!! Some dain-bramaged EISA SCUs will allow you to put - * the card mem within the region covered by `normal' RAM !!! - * - * ioremap() will fail in that case. - */ - ei_status.mem = ioremap(dev->mem_start, AC_STOP_PG*0x100); - if (!ei_status.mem) { - printk(KERN_ERR "ac3200.c: Unable to remap card memory above 1MB !!\n"); - printk(KERN_ERR "ac3200.c: Try using EISA SCU to set memory below 1MB.\n"); - printk(KERN_ERR "ac3200.c: Driver NOT installed.\n"); - retval = -EINVAL; - goto out1; - } - printk("ac3200.c: remapped %dkB card memory to virtual address %p\n", - AC_STOP_PG/4, ei_status.mem); - - dev->mem_start = (unsigned long)ei_status.mem; - dev->mem_end = dev->mem_start + (AC_STOP_PG - AC_START_PG)*256; - - ei_status.name = "AC3200"; - ei_status.tx_start_page = AC_START_PG; - ei_status.rx_start_page = AC_START_PG + TX_PAGES; - ei_status.stop_page = AC_STOP_PG; - ei_status.word16 = 1; - - if (ei_debug > 0) - printk(version); - - ei_status.reset_8390 = &ac_reset_8390; - ei_status.block_input = &ac_block_input; - ei_status.block_output = &ac_block_output; - ei_status.get_8390_hdr = &ac_get_8390_hdr; - - dev->netdev_ops = &ac_netdev_ops; - NS8390_init(dev, 0); - - retval = register_netdev(dev); - if (retval) - goto out2; - return 0; -out2: - if (ei_status.reg0) - iounmap(ei_status.mem); -out1: - free_irq(dev->irq, dev); -out: - release_region(ioaddr, AC_IO_EXTENT); - return retval; -} - -static int ac_open(struct net_device *dev) -{ -#ifdef notyet - /* Someday we may enable the IRQ and shared memory here. */ - int ioaddr = dev->base_addr; -#endif - - ei_open(dev); - return 0; -} - -static void ac_reset_8390(struct net_device *dev) -{ - ushort ioaddr = dev->base_addr; - - outb(AC_RESET, ioaddr + AC_RESET_PORT); - if (ei_debug > 1) printk("resetting AC3200, t=%ld...", jiffies); - - ei_status.txing = 0; - outb(AC_ENABLE, ioaddr + AC_RESET_PORT); - if (ei_debug > 1) printk("reset done\n"); -} - -/* Grab the 8390 specific header. Similar to the block_input routine, but - we don't need to be concerned with ring wrap as the header will be at - the start of a page, so we optimize accordingly. */ - -static void -ac_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) -{ - void __iomem *hdr_start = ei_status.mem + ((ring_page - AC_START_PG)<<8); - memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); -} - -/* Block input and output are easy on shared memory ethercards, the only - complication is when the ring buffer wraps. */ - -static void ac_block_input(struct net_device *dev, int count, struct sk_buff *skb, - int ring_offset) -{ - void __iomem *start = ei_status.mem + ring_offset - AC_START_PG*256; - - if (ring_offset + count > AC_STOP_PG*256) { - /* We must wrap the input move. */ - int semi_count = AC_STOP_PG*256 - ring_offset; - memcpy_fromio(skb->data, start, semi_count); - count -= semi_count; - memcpy_fromio(skb->data + semi_count, - ei_status.mem + TX_PAGES*256, count); - } else { - memcpy_fromio(skb->data, start, count); - } -} - -static void ac_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page) -{ - void __iomem *shmem = ei_status.mem + ((start_page - AC_START_PG)<<8); - - memcpy_toio(shmem, buf, count); -} - -static int ac_close_card(struct net_device *dev) -{ - if (ei_debug > 1) - printk("%s: Shutting down ethercard.\n", dev->name); - -#ifdef notyet - /* We should someday disable shared memory and interrupts. */ - outb(0x00, ioaddr + 6); /* Disable interrupts. */ - free_irq(dev->irq, dev); -#endif - - ei_close(dev); - return 0; -} - -#ifdef MODULE -#define MAX_AC32_CARDS 4 /* Max number of AC32 cards per module */ -static struct net_device *dev_ac32[MAX_AC32_CARDS]; -static int io[MAX_AC32_CARDS]; -static int irq[MAX_AC32_CARDS]; -static int mem[MAX_AC32_CARDS]; -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -module_param_array(mem, int, NULL, 0); -MODULE_PARM_DESC(io, "I/O base address(es)"); -MODULE_PARM_DESC(irq, "IRQ number(s)"); -MODULE_PARM_DESC(mem, "Memory base address(es)"); -MODULE_DESCRIPTION("Ansel AC3200 EISA ethernet driver"); -MODULE_LICENSE("GPL"); - -static int __init ac3200_module_init(void) -{ - struct net_device *dev; - int this_dev, found = 0; - - for (this_dev = 0; this_dev < MAX_AC32_CARDS; this_dev++) { - if (io[this_dev] == 0 && this_dev != 0) - break; - dev = alloc_ei_netdev(); - if (!dev) - break; - dev->irq = irq[this_dev]; - dev->base_addr = io[this_dev]; - dev->mem_start = mem[this_dev]; /* Currently ignored by driver */ - if (do_ac3200_probe(dev) == 0) { - dev_ac32[found++] = dev; - continue; - } - free_netdev(dev); - printk(KERN_WARNING "ac3200.c: No ac3200 card found (i/o = 0x%x).\n", io[this_dev]); - break; - } - if (found) - return 0; - return -ENXIO; -} - -static void cleanup_card(struct net_device *dev) -{ - /* Someday free_irq may be in ac_close_card() */ - free_irq(dev->irq, dev); - release_region(dev->base_addr, AC_IO_EXTENT); - iounmap(ei_status.mem); -} - -static void __exit ac3200_module_exit(void) -{ - int this_dev; - - for (this_dev = 0; this_dev < MAX_AC32_CARDS; this_dev++) { - struct net_device *dev = dev_ac32[this_dev]; - if (dev) { - unregister_netdev(dev); - cleanup_card(dev); - free_netdev(dev); - } - } -} -module_init(ac3200_module_init); -module_exit(ac3200_module_exit); -#endif /* MODULE */ diff --git a/drivers/net/apne.c b/drivers/net/apne.c deleted file mode 100644 index 5477373..0000000 --- a/drivers/net/apne.c +++ /dev/null @@ -1,620 +0,0 @@ -/* - * Amiga Linux/68k 8390 based PCMCIA Ethernet Driver for the Amiga 1200 - * - * (C) Copyright 1997 Alain Malek - * (Alain.Malek@cryogen.com) - * - * ---------------------------------------------------------------------------- - * - * This program is based on - * - * ne.c: A general non-shared-memory NS8390 ethernet driver for linux - * Written 1992-94 by Donald Becker. - * - * 8390.c: A general NS8390 ethernet driver core for linux. - * Written 1992-94 by Donald Becker. - * - * cnetdevice: A Sana-II ethernet driver for AmigaOS - * Written by Bruce Abbott (bhabbott@inhb.co.nz) - * - * ---------------------------------------------------------------------------- - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file COPYING in the main directory of the Linux - * distribution for more details. - * - * ---------------------------------------------------------------------------- - * - */ - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "8390.h" - -/* ---- No user-serviceable parts below ---- */ - -#define DRV_NAME "apne" - -#define NE_BASE (dev->base_addr) -#define NE_CMD 0x00 -#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */ -#define NE_RESET 0x1f /* Issue a read to reset, a write to clear. */ -#define NE_IO_EXTENT 0x20 - -#define NE_EN0_ISR 0x07 -#define NE_EN0_DCFG 0x0e - -#define NE_EN0_RSARLO 0x08 -#define NE_EN0_RSARHI 0x09 -#define NE_EN0_RCNTLO 0x0a -#define NE_EN0_RXCR 0x0c -#define NE_EN0_TXCR 0x0d -#define NE_EN0_RCNTHI 0x0b -#define NE_EN0_IMR 0x0f - -#define NE1SM_START_PG 0x20 /* First page of TX buffer */ -#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */ -#define NESM_START_PG 0x40 /* First page of TX buffer */ -#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ - - -struct net_device * __init apne_probe(int unit); -static int apne_probe1(struct net_device *dev, int ioaddr); - -static void apne_reset_8390(struct net_device *dev); -static void apne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, - int ring_page); -static void apne_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset); -static void apne_block_output(struct net_device *dev, const int count, - const unsigned char *buf, const int start_page); -static irqreturn_t apne_interrupt(int irq, void *dev_id); - -static int init_pcmcia(void); - -/* IO base address used for nic */ - -#define IOBASE 0x300 - -/* - use MANUAL_CONFIG and MANUAL_OFFSET for enabling IO by hand - you can find the values to use by looking at the cnet.device - config file example (the default values are for the CNET40BC card) -*/ - -/* -#define MANUAL_CONFIG 0x20 -#define MANUAL_OFFSET 0x3f8 - -#define MANUAL_HWADDR0 0x00 -#define MANUAL_HWADDR1 0x12 -#define MANUAL_HWADDR2 0x34 -#define MANUAL_HWADDR3 0x56 -#define MANUAL_HWADDR4 0x78 -#define MANUAL_HWADDR5 0x9a -*/ - -static const char version[] = - "apne.c:v1.1 7/10/98 Alain Malek (Alain.Malek@cryogen.ch)\n"; - -static int apne_owned; /* signal if card already owned */ - -struct net_device * __init apne_probe(int unit) -{ - struct net_device *dev; -#ifndef MANUAL_CONFIG - char tuple[8]; -#endif - int err; - - if (!MACH_IS_AMIGA) - return ERR_PTR(-ENODEV); - - if (apne_owned) - return ERR_PTR(-ENODEV); - - if ( !(AMIGAHW_PRESENT(PCMCIA)) ) - return ERR_PTR(-ENODEV); - - printk("Looking for PCMCIA ethernet card : "); - - /* check if a card is inserted */ - if (!(PCMCIA_INSERTED)) { - printk("NO PCMCIA card inserted\n"); - return ERR_PTR(-ENODEV); - } - - dev = alloc_ei_netdev(); - if (!dev) - return ERR_PTR(-ENOMEM); - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - } - - /* disable pcmcia irq for readtuple */ - pcmcia_disable_irq(); - -#ifndef MANUAL_CONFIG - if ((pcmcia_copy_tuple(CISTPL_FUNCID, tuple, 8) < 3) || - (tuple[2] != CISTPL_FUNCID_NETWORK)) { - printk("not an ethernet card\n"); - /* XXX: shouldn't we re-enable irq here? */ - free_netdev(dev); - return ERR_PTR(-ENODEV); - } -#endif - - printk("ethernet PCMCIA card inserted\n"); - - if (!init_pcmcia()) { - /* XXX: shouldn't we re-enable irq here? */ - free_netdev(dev); - return ERR_PTR(-ENODEV); - } - - if (!request_region(IOBASE, 0x20, DRV_NAME)) { - free_netdev(dev); - return ERR_PTR(-EBUSY); - } - - err = apne_probe1(dev, IOBASE); - if (err) { - release_region(IOBASE, 0x20); - free_netdev(dev); - return ERR_PTR(err); - } - err = register_netdev(dev); - if (!err) - return dev; - - pcmcia_disable_irq(); - free_irq(IRQ_AMIGA_PORTS, dev); - pcmcia_reset(); - release_region(IOBASE, 0x20); - free_netdev(dev); - return ERR_PTR(err); -} - -static int __init apne_probe1(struct net_device *dev, int ioaddr) -{ - int i; - unsigned char SA_prom[32]; - int wordlength = 2; - const char *name = NULL; - int start_page, stop_page; -#ifndef MANUAL_HWADDR0 - int neX000, ctron; -#endif - static unsigned version_printed; - - if (ei_debug && version_printed++ == 0) - printk(version); - - printk("PCMCIA NE*000 ethercard probe"); - - /* Reset card. Who knows what dain-bramaged state it was left in. */ - { unsigned long reset_start_time = jiffies; - - outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET); - - while ((inb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0) - if (time_after(jiffies, reset_start_time + 2*HZ/100)) { - printk(" not found (no reset ack).\n"); - return -ENODEV; - } - - outb(0xff, ioaddr + NE_EN0_ISR); /* Ack all intr. */ - } - -#ifndef MANUAL_HWADDR0 - - /* Read the 16 bytes of station address PROM. - We must first initialize registers, similar to NS8390_init(eifdev, 0). - We can't reliably read the SAPROM address without this. - (I learned the hard way!). */ - { - struct {unsigned long value, offset; } program_seq[] = { - {E8390_NODMA+E8390_PAGE0+E8390_STOP, NE_CMD}, /* Select page 0*/ - {0x48, NE_EN0_DCFG}, /* Set byte-wide (0x48) access. */ - {0x00, NE_EN0_RCNTLO}, /* Clear the count regs. */ - {0x00, NE_EN0_RCNTHI}, - {0x00, NE_EN0_IMR}, /* Mask completion irq. */ - {0xFF, NE_EN0_ISR}, - {E8390_RXOFF, NE_EN0_RXCR}, /* 0x20 Set to monitor */ - {E8390_TXOFF, NE_EN0_TXCR}, /* 0x02 and loopback mode. */ - {32, NE_EN0_RCNTLO}, - {0x00, NE_EN0_RCNTHI}, - {0x00, NE_EN0_RSARLO}, /* DMA starting at 0x0000. */ - {0x00, NE_EN0_RSARHI}, - {E8390_RREAD+E8390_START, NE_CMD}, - }; - for (i = 0; i < ARRAY_SIZE(program_seq); i++) { - outb(program_seq[i].value, ioaddr + program_seq[i].offset); - } - - } - for(i = 0; i < 32 /*sizeof(SA_prom)*/; i+=2) { - SA_prom[i] = inb(ioaddr + NE_DATAPORT); - SA_prom[i+1] = inb(ioaddr + NE_DATAPORT); - if (SA_prom[i] != SA_prom[i+1]) - wordlength = 1; - } - - /* At this point, wordlength *only* tells us if the SA_prom is doubled - up or not because some broken PCI cards don't respect the byte-wide - request in program_seq above, and hence don't have doubled up values. - These broken cards would otherwise be detected as an ne1000. */ - - if (wordlength == 2) - for (i = 0; i < 16; i++) - SA_prom[i] = SA_prom[i+i]; - - if (wordlength == 2) { - /* We must set the 8390 for word mode. */ - outb(0x49, ioaddr + NE_EN0_DCFG); - start_page = NESM_START_PG; - stop_page = NESM_STOP_PG; - } else { - start_page = NE1SM_START_PG; - stop_page = NE1SM_STOP_PG; - } - - neX000 = (SA_prom[14] == 0x57 && SA_prom[15] == 0x57); - ctron = (SA_prom[0] == 0x00 && SA_prom[1] == 0x00 && SA_prom[2] == 0x1d); - - /* Set up the rest of the parameters. */ - if (neX000) { - name = (wordlength == 2) ? "NE2000" : "NE1000"; - } else if (ctron) { - name = (wordlength == 2) ? "Ctron-8" : "Ctron-16"; - start_page = 0x01; - stop_page = (wordlength == 2) ? 0x40 : 0x20; - } else { - printk(" not found.\n"); - return -ENXIO; - - } - -#else - wordlength = 2; - /* We must set the 8390 for word mode. */ - outb(0x49, ioaddr + NE_EN0_DCFG); - start_page = NESM_START_PG; - stop_page = NESM_STOP_PG; - - SA_prom[0] = MANUAL_HWADDR0; - SA_prom[1] = MANUAL_HWADDR1; - SA_prom[2] = MANUAL_HWADDR2; - SA_prom[3] = MANUAL_HWADDR3; - SA_prom[4] = MANUAL_HWADDR4; - SA_prom[5] = MANUAL_HWADDR5; - name = "NE2000"; -#endif - - dev->base_addr = ioaddr; - dev->irq = IRQ_AMIGA_PORTS; - dev->netdev_ops = &ei_netdev_ops; - - /* Install the Interrupt handler */ - i = request_irq(dev->irq, apne_interrupt, IRQF_SHARED, DRV_NAME, dev); - if (i) return i; - - for(i = 0; i < ETHER_ADDR_LEN; i++) - dev->dev_addr[i] = SA_prom[i]; - - printk(" %pM\n", dev->dev_addr); - - printk("%s: %s found.\n", dev->name, name); - - ei_status.name = name; - ei_status.tx_start_page = start_page; - ei_status.stop_page = stop_page; - ei_status.word16 = (wordlength == 2); - - ei_status.rx_start_page = start_page + TX_PAGES; - - ei_status.reset_8390 = &apne_reset_8390; - ei_status.block_input = &apne_block_input; - ei_status.block_output = &apne_block_output; - ei_status.get_8390_hdr = &apne_get_8390_hdr; - - NS8390_init(dev, 0); - - pcmcia_ack_int(pcmcia_get_intreq()); /* ack PCMCIA int req */ - pcmcia_enable_irq(); - - apne_owned = 1; - - return 0; -} - -/* Hard reset the card. This used to pause for the same period that a - 8390 reset command required, but that shouldn't be necessary. */ -static void -apne_reset_8390(struct net_device *dev) -{ - unsigned long reset_start_time = jiffies; - - init_pcmcia(); - - if (ei_debug > 1) printk("resetting the 8390 t=%ld...", jiffies); - - outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET); - - ei_status.txing = 0; - ei_status.dmaing = 0; - - /* This check _should_not_ be necessary, omit eventually. */ - while ((inb(NE_BASE+NE_EN0_ISR) & ENISR_RESET) == 0) - if (time_after(jiffies, reset_start_time + 2*HZ/100)) { - printk("%s: ne_reset_8390() did not complete.\n", dev->name); - break; - } - outb(ENISR_RESET, NE_BASE + NE_EN0_ISR); /* Ack intr. */ -} - -/* Grab the 8390 specific header. Similar to the block_input routine, but - we don't need to be concerned with ring wrap as the header will be at - the start of a page, so we optimize accordingly. */ - -static void -apne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) -{ - - int nic_base = dev->base_addr; - int cnt; - char *ptrc; - short *ptrs; - - /* This *shouldn't* happen. If it does, it's the last thing you'll see */ - if (ei_status.dmaing) { - printk("%s: DMAing conflict in ne_get_8390_hdr " - "[DMAstat:%d][irqlock:%d][intr:%d].\n", - dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq); - return; - } - - ei_status.dmaing |= 0x01; - outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); - outb(ENISR_RDC, nic_base + NE_EN0_ISR); - outb(sizeof(struct e8390_pkt_hdr), nic_base + NE_EN0_RCNTLO); - outb(0, nic_base + NE_EN0_RCNTHI); - outb(0, nic_base + NE_EN0_RSARLO); /* On page boundary */ - outb(ring_page, nic_base + NE_EN0_RSARHI); - outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); - - if (ei_status.word16) { - ptrs = (short*)hdr; - for(cnt = 0; cnt < (sizeof(struct e8390_pkt_hdr)>>1); cnt++) - *ptrs++ = inw(NE_BASE + NE_DATAPORT); - } else { - ptrc = (char*)hdr; - for(cnt = 0; cnt < sizeof(struct e8390_pkt_hdr); cnt++) - *ptrc++ = inb(NE_BASE + NE_DATAPORT); - } - - outb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */ - ei_status.dmaing &= ~0x01; - - le16_to_cpus(&hdr->count); -} - -/* Block input and output, similar to the Crynwr packet driver. If you - are porting to a new ethercard, look at the packet driver source for hints. - The NEx000 doesn't share the on-board packet memory -- you have to put - the packet out through the "remote DMA" dataport using outb. */ - -static void -apne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) -{ - int nic_base = dev->base_addr; - char *buf = skb->data; - char *ptrc; - short *ptrs; - int cnt; - - /* This *shouldn't* happen. If it does, it's the last thing you'll see */ - if (ei_status.dmaing) { - printk("%s: DMAing conflict in ne_block_input " - "[DMAstat:%d][irqlock:%d][intr:%d].\n", - dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq); - return; - } - ei_status.dmaing |= 0x01; - outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); - outb(ENISR_RDC, nic_base + NE_EN0_ISR); - outb(count & 0xff, nic_base + NE_EN0_RCNTLO); - outb(count >> 8, nic_base + NE_EN0_RCNTHI); - outb(ring_offset & 0xff, nic_base + NE_EN0_RSARLO); - outb(ring_offset >> 8, nic_base + NE_EN0_RSARHI); - outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); - if (ei_status.word16) { - ptrs = (short*)buf; - for (cnt = 0; cnt < (count>>1); cnt++) - *ptrs++ = inw(NE_BASE + NE_DATAPORT); - if (count & 0x01) { - buf[count-1] = inb(NE_BASE + NE_DATAPORT); - } - } else { - ptrc = (char*)buf; - for (cnt = 0; cnt < count; cnt++) - *ptrc++ = inb(NE_BASE + NE_DATAPORT); - } - - outb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */ - ei_status.dmaing &= ~0x01; -} - -static void -apne_block_output(struct net_device *dev, int count, - const unsigned char *buf, const int start_page) -{ - int nic_base = NE_BASE; - unsigned long dma_start; - char *ptrc; - short *ptrs; - int cnt; - - /* Round the count up for word writes. Do we need to do this? - What effect will an odd byte count have on the 8390? - I should check someday. */ - if (ei_status.word16 && (count & 0x01)) - count++; - - /* This *shouldn't* happen. If it does, it's the last thing you'll see */ - if (ei_status.dmaing) { - printk("%s: DMAing conflict in ne_block_output." - "[DMAstat:%d][irqlock:%d][intr:%d]\n", - dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq); - return; - } - ei_status.dmaing |= 0x01; - /* We should already be in page 0, but to be safe... */ - outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD); - - outb(ENISR_RDC, nic_base + NE_EN0_ISR); - - /* Now the normal output. */ - outb(count & 0xff, nic_base + NE_EN0_RCNTLO); - outb(count >> 8, nic_base + NE_EN0_RCNTHI); - outb(0x00, nic_base + NE_EN0_RSARLO); - outb(start_page, nic_base + NE_EN0_RSARHI); - - outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD); - if (ei_status.word16) { - ptrs = (short*)buf; - for (cnt = 0; cnt < count>>1; cnt++) - outw(*ptrs++, NE_BASE+NE_DATAPORT); - } else { - ptrc = (char*)buf; - for (cnt = 0; cnt < count; cnt++) - outb(*ptrc++, NE_BASE + NE_DATAPORT); - } - - dma_start = jiffies; - - while ((inb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0) - if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ - printk("%s: timeout waiting for Tx RDC.\n", dev->name); - apne_reset_8390(dev); - NS8390_init(dev,1); - break; - } - - outb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */ - ei_status.dmaing &= ~0x01; -} - -static irqreturn_t apne_interrupt(int irq, void *dev_id) -{ - unsigned char pcmcia_intreq; - - if (!(gayle.inten & GAYLE_IRQ_IRQ)) - return IRQ_NONE; - - pcmcia_intreq = pcmcia_get_intreq(); - - if (!(pcmcia_intreq & GAYLE_IRQ_IRQ)) { - pcmcia_ack_int(pcmcia_intreq); - return IRQ_NONE; - } - if (ei_debug > 3) - printk("pcmcia intreq = %x\n", pcmcia_intreq); - pcmcia_disable_irq(); /* to get rid of the sti() within ei_interrupt */ - ei_interrupt(irq, dev_id); - pcmcia_ack_int(pcmcia_get_intreq()); - pcmcia_enable_irq(); - return IRQ_HANDLED; -} - -#ifdef MODULE -static struct net_device *apne_dev; - -static int __init apne_module_init(void) -{ - apne_dev = apne_probe(-1); - if (IS_ERR(apne_dev)) - return PTR_ERR(apne_dev); - return 0; -} - -static void __exit apne_module_exit(void) -{ - unregister_netdev(apne_dev); - - pcmcia_disable_irq(); - - free_irq(IRQ_AMIGA_PORTS, apne_dev); - - pcmcia_reset(); - - release_region(IOBASE, 0x20); - - free_netdev(apne_dev); -} -module_init(apne_module_init); -module_exit(apne_module_exit); -#endif - -static int init_pcmcia(void) -{ - u_char config; -#ifndef MANUAL_CONFIG - u_char tuple[32]; - int offset_len; -#endif - u_long offset; - - pcmcia_reset(); - pcmcia_program_voltage(PCMCIA_0V); - pcmcia_access_speed(PCMCIA_SPEED_250NS); - pcmcia_write_enable(); - -#ifdef MANUAL_CONFIG - config = MANUAL_CONFIG; -#else - /* get and write config byte to enable IO port */ - - if (pcmcia_copy_tuple(CISTPL_CFTABLE_ENTRY, tuple, 32) < 3) - return 0; - - config = tuple[2] & 0x3f; -#endif -#ifdef MANUAL_OFFSET - offset = MANUAL_OFFSET; -#else - if (pcmcia_copy_tuple(CISTPL_CONFIG, tuple, 32) < 6) - return 0; - - offset_len = (tuple[2] & 0x3) + 1; - offset = 0; - while(offset_len--) { - offset = (offset << 8) | tuple[4+offset_len]; - } -#endif - - out_8(GAYLE_ATTRIBUTE+offset, config); - - return 1; -} - -MODULE_LICENSE("GPL"); diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig index d0c8cf2..715bf2a 100644 --- a/drivers/net/arm/Kconfig +++ b/drivers/net/arm/Kconfig @@ -17,14 +17,6 @@ config ARM_ETHER3 If you have an Acorn system with one of these network cards, you should say Y to this option if you wish to use it with Linux. -config ARM_ETHERH - tristate "I-cubed EtherH/ANT EtherM support" - depends on ARM && ARCH_ACORN - select CRC32 - help - If you have an Acorn system with one of these network cards, you - should say Y to this option if you wish to use it with Linux. - config ARM_AT91_ETHER tristate "AT91RM9200 Ethernet support" depends on ARM && ARCH_AT91RM9200 diff --git a/drivers/net/arm/Makefile b/drivers/net/arm/Makefile index 63c57be3..f1e6150 100644 --- a/drivers/net/arm/Makefile +++ b/drivers/net/arm/Makefile @@ -3,7 +3,6 @@ # Makefile for the ARM network device drivers # -obj-$(CONFIG_ARM_ETHERH) += etherh.o obj-$(CONFIG_ARM_ETHER3) += ether3.o obj-$(CONFIG_ARM_ETHER1) += ether1.o obj-$(CONFIG_ARM_AT91_ETHER) += at91_ether.o diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c deleted file mode 100644 index 03e217a..0000000 --- a/drivers/net/arm/etherh.c +++ /dev/null @@ -1,866 +0,0 @@ -/* - * linux/drivers/acorn/net/etherh.c - * - * Copyright (C) 2000-2002 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * NS8390 I-cubed EtherH and ANT EtherM specific driver - * Thanks to I-Cubed for information on their cards. - * EtherM conversion (C) 1999 Chris Kemp and Tim Watterton - * EtherM integration (C) 2000 Aleph One Ltd (Tak-Shing Chan) - * EtherM integration re-engineered by Russell King. - * - * Changelog: - * 08-12-1996 RMK 1.00 Created - * RMK 1.03 Added support for EtherLan500 cards - * 23-11-1997 RMK 1.04 Added media autodetection - * 16-04-1998 RMK 1.05 Improved media autodetection - * 10-02-2000 RMK 1.06 Updated for 2.3.43 - * 13-05-2000 RMK 1.07 Updated for 2.3.99-pre8 - * 12-10-1999 CK/TEW EtherM driver first release - * 21-12-2000 TTC EtherH/EtherM integration - * 25-12-2000 RMK 1.08 Clean integration of EtherM into this driver. - * 03-01-2002 RMK 1.09 Always enable IRQs if we're in the nic slot. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#define EI_SHIFT(x) (ei_local->reg_offset[x]) - -#define ei_inb(_p) readb((void __iomem *)_p) -#define ei_outb(_v,_p) writeb(_v,(void __iomem *)_p) -#define ei_inb_p(_p) readb((void __iomem *)_p) -#define ei_outb_p(_v,_p) writeb(_v,(void __iomem *)_p) - -#define NET_DEBUG 0 -#define DEBUG_INIT 2 - -#define DRV_NAME "etherh" -#define DRV_VERSION "1.11" - -static char version[] __initdata = - "EtherH/EtherM Driver (c) 2002-2004 Russell King " DRV_VERSION "\n"; - -#include "../lib8390.c" - -static unsigned int net_debug = NET_DEBUG; - -struct etherh_priv { - void __iomem *ioc_fast; - void __iomem *memc; - void __iomem *dma_base; - unsigned int id; - void __iomem *ctrl_port; - unsigned char ctrl; - u32 supported; -}; - -struct etherh_data { - unsigned long ns8390_offset; - unsigned long dataport_offset; - unsigned long ctrlport_offset; - int ctrl_ioc; - const char name[16]; - u32 supported; - unsigned char tx_start_page; - unsigned char stop_page; -}; - -MODULE_AUTHOR("Russell King"); -MODULE_DESCRIPTION("EtherH/EtherM driver"); -MODULE_LICENSE("GPL"); - -#define ETHERH500_DATAPORT 0x800 /* MEMC */ -#define ETHERH500_NS8390 0x000 /* MEMC */ -#define ETHERH500_CTRLPORT 0x800 /* IOC */ - -#define ETHERH600_DATAPORT 0x040 /* MEMC */ -#define ETHERH600_NS8390 0x800 /* MEMC */ -#define ETHERH600_CTRLPORT 0x200 /* MEMC */ - -#define ETHERH_CP_IE 1 -#define ETHERH_CP_IF 2 -#define ETHERH_CP_HEARTBEAT 2 - -#define ETHERH_TX_START_PAGE 1 -#define ETHERH_STOP_PAGE 127 - -/* - * These came from CK/TEW - */ -#define ETHERM_DATAPORT 0x200 /* MEMC */ -#define ETHERM_NS8390 0x800 /* MEMC */ -#define ETHERM_CTRLPORT 0x23c /* MEMC */ - -#define ETHERM_TX_START_PAGE 64 -#define ETHERM_STOP_PAGE 127 - -/* ------------------------------------------------------------------------ */ - -#define etherh_priv(dev) \ - ((struct etherh_priv *)(((char *)netdev_priv(dev)) + sizeof(struct ei_device))) - -static inline void etherh_set_ctrl(struct etherh_priv *eh, unsigned char mask) -{ - unsigned char ctrl = eh->ctrl | mask; - eh->ctrl = ctrl; - writeb(ctrl, eh->ctrl_port); -} - -static inline void etherh_clr_ctrl(struct etherh_priv *eh, unsigned char mask) -{ - unsigned char ctrl = eh->ctrl & ~mask; - eh->ctrl = ctrl; - writeb(ctrl, eh->ctrl_port); -} - -static inline unsigned int etherh_get_stat(struct etherh_priv *eh) -{ - return readb(eh->ctrl_port); -} - - - - -static void etherh_irq_enable(ecard_t *ec, int irqnr) -{ - struct etherh_priv *eh = ec->irq_data; - - etherh_set_ctrl(eh, ETHERH_CP_IE); -} - -static void etherh_irq_disable(ecard_t *ec, int irqnr) -{ - struct etherh_priv *eh = ec->irq_data; - - etherh_clr_ctrl(eh, ETHERH_CP_IE); -} - -static expansioncard_ops_t etherh_ops = { - .irqenable = etherh_irq_enable, - .irqdisable = etherh_irq_disable, -}; - - - - -static void -etherh_setif(struct net_device *dev) -{ - struct ei_device *ei_local = netdev_priv(dev); - unsigned long flags; - void __iomem *addr; - - local_irq_save(flags); - - /* set the interface type */ - switch (etherh_priv(dev)->id) { - case PROD_I3_ETHERLAN600: - case PROD_I3_ETHERLAN600A: - addr = (void __iomem *)dev->base_addr + EN0_RCNTHI; - - switch (dev->if_port) { - case IF_PORT_10BASE2: - writeb((readb(addr) & 0xf8) | 1, addr); - break; - case IF_PORT_10BASET: - writeb((readb(addr) & 0xf8), addr); - break; - } - break; - - case PROD_I3_ETHERLAN500: - switch (dev->if_port) { - case IF_PORT_10BASE2: - etherh_clr_ctrl(etherh_priv(dev), ETHERH_CP_IF); - break; - - case IF_PORT_10BASET: - etherh_set_ctrl(etherh_priv(dev), ETHERH_CP_IF); - break; - } - break; - - default: - break; - } - - local_irq_restore(flags); -} - -static int -etherh_getifstat(struct net_device *dev) -{ - struct ei_device *ei_local = netdev_priv(dev); - void __iomem *addr; - int stat = 0; - - switch (etherh_priv(dev)->id) { - case PROD_I3_ETHERLAN600: - case PROD_I3_ETHERLAN600A: - addr = (void __iomem *)dev->base_addr + EN0_RCNTHI; - switch (dev->if_port) { - case IF_PORT_10BASE2: - stat = 1; - break; - case IF_PORT_10BASET: - stat = readb(addr) & 4; - break; - } - break; - - case PROD_I3_ETHERLAN500: - switch (dev->if_port) { - case IF_PORT_10BASE2: - stat = 1; - break; - case IF_PORT_10BASET: - stat = etherh_get_stat(etherh_priv(dev)) & ETHERH_CP_HEARTBEAT; - break; - } - break; - - default: - stat = 0; - break; - } - - return stat != 0; -} - -/* - * Configure the interface. Note that we ignore the other - * parts of ifmap, since its mostly meaningless for this driver. - */ -static int etherh_set_config(struct net_device *dev, struct ifmap *map) -{ - switch (map->port) { - case IF_PORT_10BASE2: - case IF_PORT_10BASET: - /* - * If the user explicitly sets the interface - * media type, turn off automedia detection. - */ - dev->flags &= ~IFF_AUTOMEDIA; - dev->if_port = map->port; - break; - - default: - return -EINVAL; - } - - etherh_setif(dev); - - return 0; -} - -/* - * Reset the 8390 (hard reset). Note that we can't actually do this. - */ -static void -etherh_reset(struct net_device *dev) -{ - struct ei_device *ei_local = netdev_priv(dev); - void __iomem *addr = (void __iomem *)dev->base_addr; - - writeb(E8390_NODMA+E8390_PAGE0+E8390_STOP, addr); - - /* - * See if we need to change the interface type. - * Note that we use 'interface_num' as a flag - * to indicate that we need to change the media. - */ - if (dev->flags & IFF_AUTOMEDIA && ei_local->interface_num) { - ei_local->interface_num = 0; - - if (dev->if_port == IF_PORT_10BASET) - dev->if_port = IF_PORT_10BASE2; - else - dev->if_port = IF_PORT_10BASET; - - etherh_setif(dev); - } -} - -/* - * Write a block of data out to the 8390 - */ -static void -etherh_block_output (struct net_device *dev, int count, const unsigned char *buf, int start_page) -{ - struct ei_device *ei_local = netdev_priv(dev); - unsigned long dma_start; - void __iomem *dma_base, *addr; - - if (ei_local->dmaing) { - printk(KERN_ERR "%s: DMAing conflict in etherh_block_input: " - " DMAstat %d irqlock %d\n", dev->name, - ei_local->dmaing, ei_local->irqlock); - return; - } - - /* - * Make sure we have a round number of bytes if we're in word mode. - */ - if (count & 1 && ei_local->word16) - count++; - - ei_local->dmaing = 1; - - addr = (void __iomem *)dev->base_addr; - dma_base = etherh_priv(dev)->dma_base; - - count = (count + 1) & ~1; - writeb (E8390_NODMA | E8390_PAGE0 | E8390_START, addr + E8390_CMD); - - writeb (0x42, addr + EN0_RCNTLO); - writeb (0x00, addr + EN0_RCNTHI); - writeb (0x42, addr + EN0_RSARLO); - writeb (0x00, addr + EN0_RSARHI); - writeb (E8390_RREAD | E8390_START, addr + E8390_CMD); - - udelay (1); - - writeb (ENISR_RDC, addr + EN0_ISR); - writeb (count, addr + EN0_RCNTLO); - writeb (count >> 8, addr + EN0_RCNTHI); - writeb (0, addr + EN0_RSARLO); - writeb (start_page, addr + EN0_RSARHI); - writeb (E8390_RWRITE | E8390_START, addr + E8390_CMD); - - if (ei_local->word16) - writesw (dma_base, buf, count >> 1); - else - writesb (dma_base, buf, count); - - dma_start = jiffies; - - while ((readb (addr + EN0_ISR) & ENISR_RDC) == 0) - if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ - printk(KERN_ERR "%s: timeout waiting for TX RDC\n", - dev->name); - etherh_reset (dev); - __NS8390_init (dev, 1); - break; - } - - writeb (ENISR_RDC, addr + EN0_ISR); - ei_local->dmaing = 0; -} - -/* - * Read a block of data from the 8390 - */ -static void -etherh_block_input (struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) -{ - struct ei_device *ei_local = netdev_priv(dev); - unsigned char *buf; - void __iomem *dma_base, *addr; - - if (ei_local->dmaing) { - printk(KERN_ERR "%s: DMAing conflict in etherh_block_input: " - " DMAstat %d irqlock %d\n", dev->name, - ei_local->dmaing, ei_local->irqlock); - return; - } - - ei_local->dmaing = 1; - - addr = (void __iomem *)dev->base_addr; - dma_base = etherh_priv(dev)->dma_base; - - buf = skb->data; - writeb (E8390_NODMA | E8390_PAGE0 | E8390_START, addr + E8390_CMD); - writeb (count, addr + EN0_RCNTLO); - writeb (count >> 8, addr + EN0_RCNTHI); - writeb (ring_offset, addr + EN0_RSARLO); - writeb (ring_offset >> 8, addr + EN0_RSARHI); - writeb (E8390_RREAD | E8390_START, addr + E8390_CMD); - - if (ei_local->word16) { - readsw (dma_base, buf, count >> 1); - if (count & 1) - buf[count - 1] = readb (dma_base); - } else - readsb (dma_base, buf, count); - - writeb (ENISR_RDC, addr + EN0_ISR); - ei_local->dmaing = 0; -} - -/* - * Read a header from the 8390 - */ -static void -etherh_get_header (struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) -{ - struct ei_device *ei_local = netdev_priv(dev); - void __iomem *dma_base, *addr; - - if (ei_local->dmaing) { - printk(KERN_ERR "%s: DMAing conflict in etherh_get_header: " - " DMAstat %d irqlock %d\n", dev->name, - ei_local->dmaing, ei_local->irqlock); - return; - } - - ei_local->dmaing = 1; - - addr = (void __iomem *)dev->base_addr; - dma_base = etherh_priv(dev)->dma_base; - - writeb (E8390_NODMA | E8390_PAGE0 | E8390_START, addr + E8390_CMD); - writeb (sizeof (*hdr), addr + EN0_RCNTLO); - writeb (0, addr + EN0_RCNTHI); - writeb (0, addr + EN0_RSARLO); - writeb (ring_page, addr + EN0_RSARHI); - writeb (E8390_RREAD | E8390_START, addr + E8390_CMD); - - if (ei_local->word16) - readsw (dma_base, hdr, sizeof (*hdr) >> 1); - else - readsb (dma_base, hdr, sizeof (*hdr)); - - writeb (ENISR_RDC, addr + EN0_ISR); - ei_local->dmaing = 0; -} - -/* - * Open/initialize the board. This is called (in the current kernel) - * sometime after booting when the 'ifconfig' program is run. - * - * This routine should set everything up anew at each open, even - * registers that "should" only need to be set once at boot, so that - * there is non-reboot way to recover if something goes wrong. - */ -static int -etherh_open(struct net_device *dev) -{ - struct ei_device *ei_local = netdev_priv(dev); - - if (!is_valid_ether_addr(dev->dev_addr)) { - printk(KERN_WARNING "%s: invalid ethernet MAC address\n", - dev->name); - return -EINVAL; - } - - if (request_irq(dev->irq, __ei_interrupt, 0, dev->name, dev)) - return -EAGAIN; - - /* - * Make sure that we aren't going to change the - * media type on the next reset - we are about to - * do automedia manually now. - */ - ei_local->interface_num = 0; - - /* - * If we are doing automedia detection, do it now. - * This is more reliable than the 8390's detection. - */ - if (dev->flags & IFF_AUTOMEDIA) { - dev->if_port = IF_PORT_10BASET; - etherh_setif(dev); - mdelay(1); - if (!etherh_getifstat(dev)) { - dev->if_port = IF_PORT_10BASE2; - etherh_setif(dev); - } - } else - etherh_setif(dev); - - etherh_reset(dev); - __ei_open(dev); - - return 0; -} - -/* - * The inverse routine to etherh_open(). - */ -static int -etherh_close(struct net_device *dev) -{ - __ei_close (dev); - free_irq (dev->irq, dev); - return 0; -} - -/* - * Initialisation - */ - -static void __init etherh_banner(void) -{ - static int version_printed; - - if (net_debug && version_printed++ == 0) - printk(KERN_INFO "%s", version); -} - -/* - * Read the ethernet address string from the on board rom. - * This is an ascii string... - */ -static int __devinit etherh_addr(char *addr, struct expansion_card *ec) -{ - struct in_chunk_dir cd; - char *s; - - if (!ecard_readchunk(&cd, ec, 0xf5, 0)) { - printk(KERN_ERR "%s: unable to read podule description string\n", - dev_name(&ec->dev)); - goto no_addr; - } - - s = strchr(cd.d.string, '('); - if (s) { - int i; - - for (i = 0; i < 6; i++) { - addr[i] = simple_strtoul(s + 1, &s, 0x10); - if (*s != (i == 5? ')' : ':')) - break; - } - - if (i == 6) - return 0; - } - - printk(KERN_ERR "%s: unable to parse MAC address: %s\n", - dev_name(&ec->dev), cd.d.string); - - no_addr: - return -ENODEV; -} - -/* - * Create an ethernet address from the system serial number. - */ -static int __init etherm_addr(char *addr) -{ - unsigned int serial; - - if (system_serial_low == 0 && system_serial_high == 0) - return -ENODEV; - - serial = system_serial_low | system_serial_high; - - addr[0] = 0; - addr[1] = 0; - addr[2] = 0xa4; - addr[3] = 0x10 + (serial >> 24); - addr[4] = serial >> 16; - addr[5] = serial >> 8; - return 0; -} - -static void etherh_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) -{ - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, dev_name(dev->dev.parent), - sizeof(info->bus_info)); -} - -static int etherh_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - cmd->supported = etherh_priv(dev)->supported; - ethtool_cmd_speed_set(cmd, SPEED_10); - cmd->duplex = DUPLEX_HALF; - cmd->port = dev->if_port == IF_PORT_10BASET ? PORT_TP : PORT_BNC; - cmd->autoneg = (dev->flags & IFF_AUTOMEDIA ? - AUTONEG_ENABLE : AUTONEG_DISABLE); - return 0; -} - -static int etherh_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - switch (cmd->autoneg) { - case AUTONEG_ENABLE: - dev->flags |= IFF_AUTOMEDIA; - break; - - case AUTONEG_DISABLE: - switch (cmd->port) { - case PORT_TP: - dev->if_port = IF_PORT_10BASET; - break; - - case PORT_BNC: - dev->if_port = IF_PORT_10BASE2; - break; - - default: - return -EINVAL; - } - dev->flags &= ~IFF_AUTOMEDIA; - break; - - default: - return -EINVAL; - } - - etherh_setif(dev); - - return 0; -} - -static const struct ethtool_ops etherh_ethtool_ops = { - .get_settings = etherh_get_settings, - .set_settings = etherh_set_settings, - .get_drvinfo = etherh_get_drvinfo, -}; - -static const struct net_device_ops etherh_netdev_ops = { - .ndo_open = etherh_open, - .ndo_stop = etherh_close, - .ndo_set_config = etherh_set_config, - .ndo_start_xmit = __ei_start_xmit, - .ndo_tx_timeout = __ei_tx_timeout, - .ndo_get_stats = __ei_get_stats, - .ndo_set_multicast_list = __ei_set_multicast_list, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, - .ndo_change_mtu = eth_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = __ei_poll, -#endif -}; - -static u32 etherh_regoffsets[16]; -static u32 etherm_regoffsets[16]; - -static int __devinit -etherh_probe(struct expansion_card *ec, const struct ecard_id *id) -{ - const struct etherh_data *data = id->data; - struct ei_device *ei_local; - struct net_device *dev; - struct etherh_priv *eh; - int ret; - - etherh_banner(); - - ret = ecard_request_resources(ec); - if (ret) - goto out; - - dev = ____alloc_ei_netdev(sizeof(struct etherh_priv)); - if (!dev) { - ret = -ENOMEM; - goto release; - } - - SET_NETDEV_DEV(dev, &ec->dev); - - dev->netdev_ops = ðerh_netdev_ops; - dev->irq = ec->irq; - dev->ethtool_ops = ðerh_ethtool_ops; - - if (data->supported & SUPPORTED_Autoneg) - dev->flags |= IFF_AUTOMEDIA; - if (data->supported & SUPPORTED_TP) { - dev->flags |= IFF_PORTSEL; - dev->if_port = IF_PORT_10BASET; - } else if (data->supported & SUPPORTED_BNC) { - dev->flags |= IFF_PORTSEL; - dev->if_port = IF_PORT_10BASE2; - } else - dev->if_port = IF_PORT_UNKNOWN; - - eh = etherh_priv(dev); - eh->supported = data->supported; - eh->ctrl = 0; - eh->id = ec->cid.product; - eh->memc = ecardm_iomap(ec, ECARD_RES_MEMC, 0, PAGE_SIZE); - if (!eh->memc) { - ret = -ENOMEM; - goto free; - } - - eh->ctrl_port = eh->memc; - if (data->ctrl_ioc) { - eh->ioc_fast = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, PAGE_SIZE); - if (!eh->ioc_fast) { - ret = -ENOMEM; - goto free; - } - eh->ctrl_port = eh->ioc_fast; - } - - dev->base_addr = (unsigned long)eh->memc + data->ns8390_offset; - eh->dma_base = eh->memc + data->dataport_offset; - eh->ctrl_port += data->ctrlport_offset; - - /* - * IRQ and control port handling - only for non-NIC slot cards. - */ - if (ec->slot_no != 8) { - ecard_setirq(ec, ðerh_ops, eh); - } else { - /* - * If we're in the NIC slot, make sure the IRQ is enabled - */ - etherh_set_ctrl(eh, ETHERH_CP_IE); - } - - ei_local = netdev_priv(dev); - spin_lock_init(&ei_local->page_lock); - - if (ec->cid.product == PROD_ANT_ETHERM) { - etherm_addr(dev->dev_addr); - ei_local->reg_offset = etherm_regoffsets; - } else { - etherh_addr(dev->dev_addr, ec); - ei_local->reg_offset = etherh_regoffsets; - } - - ei_local->name = dev->name; - ei_local->word16 = 1; - ei_local->tx_start_page = data->tx_start_page; - ei_local->rx_start_page = ei_local->tx_start_page + TX_PAGES; - ei_local->stop_page = data->stop_page; - ei_local->reset_8390 = etherh_reset; - ei_local->block_input = etherh_block_input; - ei_local->block_output = etherh_block_output; - ei_local->get_8390_hdr = etherh_get_header; - ei_local->interface_num = 0; - - etherh_reset(dev); - __NS8390_init(dev, 0); - - ret = register_netdev(dev); - if (ret) - goto free; - - printk(KERN_INFO "%s: %s in slot %d, %pM\n", - dev->name, data->name, ec->slot_no, dev->dev_addr); - - ecard_set_drvdata(ec, dev); - - return 0; - - free: - free_netdev(dev); - release: - ecard_release_resources(ec); - out: - return ret; -} - -static void __devexit etherh_remove(struct expansion_card *ec) -{ - struct net_device *dev = ecard_get_drvdata(ec); - - ecard_set_drvdata(ec, NULL); - - unregister_netdev(dev); - - free_netdev(dev); - - ecard_release_resources(ec); -} - -static struct etherh_data etherm_data = { - .ns8390_offset = ETHERM_NS8390, - .dataport_offset = ETHERM_NS8390 + ETHERM_DATAPORT, - .ctrlport_offset = ETHERM_NS8390 + ETHERM_CTRLPORT, - .name = "ANT EtherM", - .supported = SUPPORTED_10baseT_Half, - .tx_start_page = ETHERM_TX_START_PAGE, - .stop_page = ETHERM_STOP_PAGE, -}; - -static struct etherh_data etherlan500_data = { - .ns8390_offset = ETHERH500_NS8390, - .dataport_offset = ETHERH500_NS8390 + ETHERH500_DATAPORT, - .ctrlport_offset = ETHERH500_CTRLPORT, - .ctrl_ioc = 1, - .name = "i3 EtherH 500", - .supported = SUPPORTED_10baseT_Half, - .tx_start_page = ETHERH_TX_START_PAGE, - .stop_page = ETHERH_STOP_PAGE, -}; - -static struct etherh_data etherlan600_data = { - .ns8390_offset = ETHERH600_NS8390, - .dataport_offset = ETHERH600_NS8390 + ETHERH600_DATAPORT, - .ctrlport_offset = ETHERH600_NS8390 + ETHERH600_CTRLPORT, - .name = "i3 EtherH 600", - .supported = SUPPORTED_10baseT_Half | SUPPORTED_TP | SUPPORTED_BNC | SUPPORTED_Autoneg, - .tx_start_page = ETHERH_TX_START_PAGE, - .stop_page = ETHERH_STOP_PAGE, -}; - -static struct etherh_data etherlan600a_data = { - .ns8390_offset = ETHERH600_NS8390, - .dataport_offset = ETHERH600_NS8390 + ETHERH600_DATAPORT, - .ctrlport_offset = ETHERH600_NS8390 + ETHERH600_CTRLPORT, - .name = "i3 EtherH 600A", - .supported = SUPPORTED_10baseT_Half | SUPPORTED_TP | SUPPORTED_BNC | SUPPORTED_Autoneg, - .tx_start_page = ETHERH_TX_START_PAGE, - .stop_page = ETHERH_STOP_PAGE, -}; - -static const struct ecard_id etherh_ids[] = { - { MANU_ANT, PROD_ANT_ETHERM, ðerm_data }, - { MANU_I3, PROD_I3_ETHERLAN500, ðerlan500_data }, - { MANU_I3, PROD_I3_ETHERLAN600, ðerlan600_data }, - { MANU_I3, PROD_I3_ETHERLAN600A, ðerlan600a_data }, - { 0xffff, 0xffff } -}; - -static struct ecard_driver etherh_driver = { - .probe = etherh_probe, - .remove = __devexit_p(etherh_remove), - .id_table = etherh_ids, - .drv = { - .name = DRV_NAME, - }, -}; - -static int __init etherh_init(void) -{ - int i; - - for (i = 0; i < 16; i++) { - etherh_regoffsets[i] = i << 2; - etherm_regoffsets[i] = i << 5; - } - - return ecard_register_driver(ðerh_driver); -} - -static void __exit etherh_exit(void) -{ - ecard_remove_driver(ðerh_driver); -} - -module_init(etherh_init); -module_exit(etherh_exit); diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c deleted file mode 100644 index e7cb8c8..0000000 --- a/drivers/net/ax88796.c +++ /dev/null @@ -1,1010 +0,0 @@ -/* drivers/net/ax88796.c - * - * Copyright 2005,2007 Simtec Electronics - * Ben Dooks - * - * Asix AX88796 10/100 Ethernet controller support - * Based on ne.c, by Donald Becker, et-al. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include - -/* Rename the lib8390.c functions to show that they are in this driver */ -#define __ei_open ax_ei_open -#define __ei_close ax_ei_close -#define __ei_poll ax_ei_poll -#define __ei_start_xmit ax_ei_start_xmit -#define __ei_tx_timeout ax_ei_tx_timeout -#define __ei_get_stats ax_ei_get_stats -#define __ei_set_multicast_list ax_ei_set_multicast_list -#define __ei_interrupt ax_ei_interrupt -#define ____alloc_ei_netdev ax__alloc_ei_netdev -#define __NS8390_init ax_NS8390_init - -/* force unsigned long back to 'void __iomem *' */ -#define ax_convert_addr(_a) ((void __force __iomem *)(_a)) - -#define ei_inb(_a) readb(ax_convert_addr(_a)) -#define ei_outb(_v, _a) writeb(_v, ax_convert_addr(_a)) - -#define ei_inb_p(_a) ei_inb(_a) -#define ei_outb_p(_v, _a) ei_outb(_v, _a) - -/* define EI_SHIFT() to take into account our register offsets */ -#define EI_SHIFT(x) (ei_local->reg_offset[(x)]) - -/* Ensure we have our RCR base value */ -#define AX88796_PLATFORM - -static unsigned char version[] = "ax88796.c: Copyright 2005,2007 Simtec Electronics\n"; - -#include "lib8390.c" - -#define DRV_NAME "ax88796" -#define DRV_VERSION "1.00" - -/* from ne.c */ -#define NE_CMD EI_SHIFT(0x00) -#define NE_RESET EI_SHIFT(0x1f) -#define NE_DATAPORT EI_SHIFT(0x10) - -#define NE1SM_START_PG 0x20 /* First page of TX buffer */ -#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */ -#define NESM_START_PG 0x40 /* First page of TX buffer */ -#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ - -#define AX_GPOC_PPDSET BIT(6) - -/* device private data */ - -struct ax_device { - struct mii_bus *mii_bus; - struct mdiobb_ctrl bb_ctrl; - struct phy_device *phy_dev; - void __iomem *addr_memr; - u8 reg_memr; - int link; - int speed; - int duplex; - - void __iomem *map2; - const struct ax_plat_data *plat; - - unsigned char running; - unsigned char resume_open; - unsigned int irqflags; - - u32 reg_offsets[0x20]; -}; - -static inline struct ax_device *to_ax_dev(struct net_device *dev) -{ - struct ei_device *ei_local = netdev_priv(dev); - return (struct ax_device *)(ei_local + 1); -} - -/* - * ax_initial_check - * - * do an initial probe for the card to check wether it exists - * and is functional - */ -static int ax_initial_check(struct net_device *dev) -{ - struct ei_device *ei_local = netdev_priv(dev); - void __iomem *ioaddr = ei_local->mem; - int reg0; - int regd; - - reg0 = ei_inb(ioaddr); - if (reg0 == 0xFF) - return -ENODEV; - - ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ioaddr + E8390_CMD); - regd = ei_inb(ioaddr + 0x0d); - ei_outb(0xff, ioaddr + 0x0d); - ei_outb(E8390_NODMA + E8390_PAGE0, ioaddr + E8390_CMD); - ei_inb(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */ - if (ei_inb(ioaddr + EN0_COUNTER0) != 0) { - ei_outb(reg0, ioaddr); - ei_outb(regd, ioaddr + 0x0d); /* Restore the old values. */ - return -ENODEV; - } - - return 0; -} - -/* - * Hard reset the card. This used to pause for the same period that a - * 8390 reset command required, but that shouldn't be necessary. - */ -static void ax_reset_8390(struct net_device *dev) -{ - struct ei_device *ei_local = netdev_priv(dev); - unsigned long reset_start_time = jiffies; - void __iomem *addr = (void __iomem *)dev->base_addr; - - if (ei_debug > 1) - netdev_dbg(dev, "resetting the 8390 t=%ld\n", jiffies); - - ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET); - - ei_local->txing = 0; - ei_local->dmaing = 0; - - /* This check _should_not_ be necessary, omit eventually. */ - while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) { - if (jiffies - reset_start_time > 2 * HZ / 100) { - netdev_warn(dev, "%s: did not complete.\n", __func__); - break; - } - } - - ei_outb(ENISR_RESET, addr + EN0_ISR); /* Ack intr. */ -} - - -static void ax_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, - int ring_page) -{ - struct ei_device *ei_local = netdev_priv(dev); - void __iomem *nic_base = ei_local->mem; - - /* This *shouldn't* happen. If it does, it's the last thing you'll see */ - if (ei_local->dmaing) { - netdev_err(dev, "DMAing conflict in %s " - "[DMAstat:%d][irqlock:%d].\n", - __func__, - ei_local->dmaing, ei_local->irqlock); - return; - } - - ei_local->dmaing |= 0x01; - ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD); - ei_outb(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO); - ei_outb(0, nic_base + EN0_RCNTHI); - ei_outb(0, nic_base + EN0_RSARLO); /* On page boundary */ - ei_outb(ring_page, nic_base + EN0_RSARHI); - ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); - - if (ei_local->word16) - readsw(nic_base + NE_DATAPORT, hdr, - sizeof(struct e8390_pkt_hdr) >> 1); - else - readsb(nic_base + NE_DATAPORT, hdr, - sizeof(struct e8390_pkt_hdr)); - - ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ - ei_local->dmaing &= ~0x01; - - le16_to_cpus(&hdr->count); -} - - -/* - * Block input and output, similar to the Crynwr packet driver. If - * you are porting to a new ethercard, look at the packet driver - * source for hints. The NEx000 doesn't share the on-board packet - * memory -- you have to put the packet out through the "remote DMA" - * dataport using ei_outb. - */ -static void ax_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset) -{ - struct ei_device *ei_local = netdev_priv(dev); - void __iomem *nic_base = ei_local->mem; - char *buf = skb->data; - - if (ei_local->dmaing) { - netdev_err(dev, - "DMAing conflict in %s " - "[DMAstat:%d][irqlock:%d].\n", - __func__, - ei_local->dmaing, ei_local->irqlock); - return; - } - - ei_local->dmaing |= 0x01; - - ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + NE_CMD); - ei_outb(count & 0xff, nic_base + EN0_RCNTLO); - ei_outb(count >> 8, nic_base + EN0_RCNTHI); - ei_outb(ring_offset & 0xff, nic_base + EN0_RSARLO); - ei_outb(ring_offset >> 8, nic_base + EN0_RSARHI); - ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); - - if (ei_local->word16) { - readsw(nic_base + NE_DATAPORT, buf, count >> 1); - if (count & 0x01) - buf[count-1] = ei_inb(nic_base + NE_DATAPORT); - - } else { - readsb(nic_base + NE_DATAPORT, buf, count); - } - - ei_local->dmaing &= ~1; -} - -static void ax_block_output(struct net_device *dev, int count, - const unsigned char *buf, const int start_page) -{ - struct ei_device *ei_local = netdev_priv(dev); - void __iomem *nic_base = ei_local->mem; - unsigned long dma_start; - - /* - * Round the count up for word writes. Do we need to do this? - * What effect will an odd byte count have on the 8390? I - * should check someday. - */ - if (ei_local->word16 && (count & 0x01)) - count++; - - /* This *shouldn't* happen. If it does, it's the last thing you'll see */ - if (ei_local->dmaing) { - netdev_err(dev, "DMAing conflict in %s." - "[DMAstat:%d][irqlock:%d]\n", - __func__, - ei_local->dmaing, ei_local->irqlock); - return; - } - - ei_local->dmaing |= 0x01; - /* We should already be in page 0, but to be safe... */ - ei_outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD); - - ei_outb(ENISR_RDC, nic_base + EN0_ISR); - - /* Now the normal output. */ - ei_outb(count & 0xff, nic_base + EN0_RCNTLO); - ei_outb(count >> 8, nic_base + EN0_RCNTHI); - ei_outb(0x00, nic_base + EN0_RSARLO); - ei_outb(start_page, nic_base + EN0_RSARHI); - - ei_outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD); - if (ei_local->word16) - writesw(nic_base + NE_DATAPORT, buf, count >> 1); - else - writesb(nic_base + NE_DATAPORT, buf, count); - - dma_start = jiffies; - - while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) { - if (jiffies - dma_start > 2 * HZ / 100) { /* 20ms */ - netdev_warn(dev, "timeout waiting for Tx RDC.\n"); - ax_reset_8390(dev); - ax_NS8390_init(dev, 1); - break; - } - } - - ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ - ei_local->dmaing &= ~0x01; -} - -/* definitions for accessing MII/EEPROM interface */ - -#define AX_MEMR EI_SHIFT(0x14) -#define AX_MEMR_MDC BIT(0) -#define AX_MEMR_MDIR BIT(1) -#define AX_MEMR_MDI BIT(2) -#define AX_MEMR_MDO BIT(3) -#define AX_MEMR_EECS BIT(4) -#define AX_MEMR_EEI BIT(5) -#define AX_MEMR_EEO BIT(6) -#define AX_MEMR_EECLK BIT(7) - -static void ax_handle_link_change(struct net_device *dev) -{ - struct ax_device *ax = to_ax_dev(dev); - struct phy_device *phy_dev = ax->phy_dev; - int status_change = 0; - - if (phy_dev->link && ((ax->speed != phy_dev->speed) || - (ax->duplex != phy_dev->duplex))) { - - ax->speed = phy_dev->speed; - ax->duplex = phy_dev->duplex; - status_change = 1; - } - - if (phy_dev->link != ax->link) { - if (!phy_dev->link) { - ax->speed = 0; - ax->duplex = -1; - } - ax->link = phy_dev->link; - - status_change = 1; - } - - if (status_change) - phy_print_status(phy_dev); -} - -static int ax_mii_probe(struct net_device *dev) -{ - struct ax_device *ax = to_ax_dev(dev); - struct phy_device *phy_dev = NULL; - int ret; - - /* find the first phy */ - phy_dev = phy_find_first(ax->mii_bus); - if (!phy_dev) { - netdev_err(dev, "no PHY found\n"); - return -ENODEV; - } - - ret = phy_connect_direct(dev, phy_dev, ax_handle_link_change, 0, - PHY_INTERFACE_MODE_MII); - if (ret) { - netdev_err(dev, "Could not attach to PHY\n"); - return ret; - } - - /* mask with MAC supported features */ - phy_dev->supported &= PHY_BASIC_FEATURES; - phy_dev->advertising = phy_dev->supported; - - ax->phy_dev = phy_dev; - - netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - phy_dev->drv->name, dev_name(&phy_dev->dev), phy_dev->irq); - - return 0; -} - -static void ax_phy_switch(struct net_device *dev, int on) -{ - struct ei_device *ei_local = netdev_priv(dev); - struct ax_device *ax = to_ax_dev(dev); - - u8 reg_gpoc = ax->plat->gpoc_val; - - if (!!on) - reg_gpoc &= ~AX_GPOC_PPDSET; - else - reg_gpoc |= AX_GPOC_PPDSET; - - ei_outb(reg_gpoc, ei_local->mem + EI_SHIFT(0x17)); -} - -static int ax_open(struct net_device *dev) -{ - struct ax_device *ax = to_ax_dev(dev); - int ret; - - netdev_dbg(dev, "open\n"); - - ret = request_irq(dev->irq, ax_ei_interrupt, ax->irqflags, - dev->name, dev); - if (ret) - goto failed_request_irq; - - /* turn the phy on (if turned off) */ - ax_phy_switch(dev, 1); - - ret = ax_mii_probe(dev); - if (ret) - goto failed_mii_probe; - phy_start(ax->phy_dev); - - ret = ax_ei_open(dev); - if (ret) - goto failed_ax_ei_open; - - ax->running = 1; - - return 0; - - failed_ax_ei_open: - phy_disconnect(ax->phy_dev); - failed_mii_probe: - ax_phy_switch(dev, 0); - free_irq(dev->irq, dev); - failed_request_irq: - return ret; -} - -static int ax_close(struct net_device *dev) -{ - struct ax_device *ax = to_ax_dev(dev); - - netdev_dbg(dev, "close\n"); - - ax->running = 0; - wmb(); - - ax_ei_close(dev); - - /* turn the phy off */ - ax_phy_switch(dev, 0); - phy_disconnect(ax->phy_dev); - - free_irq(dev->irq, dev); - return 0; -} - -static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd) -{ - struct ax_device *ax = to_ax_dev(dev); - struct phy_device *phy_dev = ax->phy_dev; - - if (!netif_running(dev)) - return -EINVAL; - - if (!phy_dev) - return -ENODEV; - - return phy_mii_ioctl(phy_dev, req, cmd); -} - -/* ethtool ops */ - -static void ax_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - struct platform_device *pdev = to_platform_device(dev->dev.parent); - - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, pdev->name); -} - -static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct ax_device *ax = to_ax_dev(dev); - struct phy_device *phy_dev = ax->phy_dev; - - if (!phy_dev) - return -ENODEV; - - return phy_ethtool_gset(phy_dev, cmd); -} - -static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct ax_device *ax = to_ax_dev(dev); - struct phy_device *phy_dev = ax->phy_dev; - - if (!phy_dev) - return -ENODEV; - - return phy_ethtool_sset(phy_dev, cmd); -} - -static const struct ethtool_ops ax_ethtool_ops = { - .get_drvinfo = ax_get_drvinfo, - .get_settings = ax_get_settings, - .set_settings = ax_set_settings, - .get_link = ethtool_op_get_link, -}; - -#ifdef CONFIG_AX88796_93CX6 -static void ax_eeprom_register_read(struct eeprom_93cx6 *eeprom) -{ - struct ei_device *ei_local = eeprom->data; - u8 reg = ei_inb(ei_local->mem + AX_MEMR); - - eeprom->reg_data_in = reg & AX_MEMR_EEI; - eeprom->reg_data_out = reg & AX_MEMR_EEO; /* Input pin */ - eeprom->reg_data_clock = reg & AX_MEMR_EECLK; - eeprom->reg_chip_select = reg & AX_MEMR_EECS; -} - -static void ax_eeprom_register_write(struct eeprom_93cx6 *eeprom) -{ - struct ei_device *ei_local = eeprom->data; - u8 reg = ei_inb(ei_local->mem + AX_MEMR); - - reg &= ~(AX_MEMR_EEI | AX_MEMR_EECLK | AX_MEMR_EECS); - - if (eeprom->reg_data_in) - reg |= AX_MEMR_EEI; - if (eeprom->reg_data_clock) - reg |= AX_MEMR_EECLK; - if (eeprom->reg_chip_select) - reg |= AX_MEMR_EECS; - - ei_outb(reg, ei_local->mem + AX_MEMR); - udelay(10); -} -#endif - -static const struct net_device_ops ax_netdev_ops = { - .ndo_open = ax_open, - .ndo_stop = ax_close, - .ndo_do_ioctl = ax_ioctl, - - .ndo_start_xmit = ax_ei_start_xmit, - .ndo_tx_timeout = ax_ei_tx_timeout, - .ndo_get_stats = ax_ei_get_stats, - .ndo_set_multicast_list = ax_ei_set_multicast_list, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, - .ndo_change_mtu = eth_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ax_ei_poll, -#endif -}; - -static void ax_bb_mdc(struct mdiobb_ctrl *ctrl, int level) -{ - struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); - - if (level) - ax->reg_memr |= AX_MEMR_MDC; - else - ax->reg_memr &= ~AX_MEMR_MDC; - - ei_outb(ax->reg_memr, ax->addr_memr); -} - -static void ax_bb_dir(struct mdiobb_ctrl *ctrl, int output) -{ - struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); - - if (output) - ax->reg_memr &= ~AX_MEMR_MDIR; - else - ax->reg_memr |= AX_MEMR_MDIR; - - ei_outb(ax->reg_memr, ax->addr_memr); -} - -static void ax_bb_set_data(struct mdiobb_ctrl *ctrl, int value) -{ - struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); - - if (value) - ax->reg_memr |= AX_MEMR_MDO; - else - ax->reg_memr &= ~AX_MEMR_MDO; - - ei_outb(ax->reg_memr, ax->addr_memr); -} - -static int ax_bb_get_data(struct mdiobb_ctrl *ctrl) -{ - struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); - int reg_memr = ei_inb(ax->addr_memr); - - return reg_memr & AX_MEMR_MDI ? 1 : 0; -} - -static struct mdiobb_ops bb_ops = { - .owner = THIS_MODULE, - .set_mdc = ax_bb_mdc, - .set_mdio_dir = ax_bb_dir, - .set_mdio_data = ax_bb_set_data, - .get_mdio_data = ax_bb_get_data, -}; - -/* setup code */ - -static int ax_mii_init(struct net_device *dev) -{ - struct platform_device *pdev = to_platform_device(dev->dev.parent); - struct ei_device *ei_local = netdev_priv(dev); - struct ax_device *ax = to_ax_dev(dev); - int err, i; - - ax->bb_ctrl.ops = &bb_ops; - ax->addr_memr = ei_local->mem + AX_MEMR; - ax->mii_bus = alloc_mdio_bitbang(&ax->bb_ctrl); - if (!ax->mii_bus) { - err = -ENOMEM; - goto out; - } - - ax->mii_bus->name = "ax88796_mii_bus"; - ax->mii_bus->parent = dev->dev.parent; - snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id); - - ax->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!ax->mii_bus->irq) { - err = -ENOMEM; - goto out_free_mdio_bitbang; - } - - for (i = 0; i < PHY_MAX_ADDR; i++) - ax->mii_bus->irq[i] = PHY_POLL; - - err = mdiobus_register(ax->mii_bus); - if (err) - goto out_free_irq; - - return 0; - - out_free_irq: - kfree(ax->mii_bus->irq); - out_free_mdio_bitbang: - free_mdio_bitbang(ax->mii_bus); - out: - return err; -} - -static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local) -{ - void __iomem *ioaddr = ei_local->mem; - struct ax_device *ax = to_ax_dev(dev); - - /* Select page 0 */ - ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_STOP, ioaddr + E8390_CMD); - - /* set to byte access */ - ei_outb(ax->plat->dcr_val & ~1, ioaddr + EN0_DCFG); - ei_outb(ax->plat->gpoc_val, ioaddr + EI_SHIFT(0x17)); -} - -/* - * ax_init_dev - * - * initialise the specified device, taking care to note the MAC - * address it may already have (if configured), ensure - * the device is ready to be used by lib8390.c and registerd with - * the network layer. - */ -static int ax_init_dev(struct net_device *dev) -{ - struct ei_device *ei_local = netdev_priv(dev); - struct ax_device *ax = to_ax_dev(dev); - void __iomem *ioaddr = ei_local->mem; - unsigned int start_page; - unsigned int stop_page; - int ret; - int i; - - ret = ax_initial_check(dev); - if (ret) - goto err_out; - - /* setup goes here */ - - ax_initial_setup(dev, ei_local); - - /* read the mac from the card prom if we need it */ - - if (ax->plat->flags & AXFLG_HAS_EEPROM) { - unsigned char SA_prom[32]; - - for (i = 0; i < sizeof(SA_prom); i += 2) { - SA_prom[i] = ei_inb(ioaddr + NE_DATAPORT); - SA_prom[i + 1] = ei_inb(ioaddr + NE_DATAPORT); - } - - if (ax->plat->wordlength == 2) - for (i = 0; i < 16; i++) - SA_prom[i] = SA_prom[i+i]; - - memcpy(dev->dev_addr, SA_prom, 6); - } - -#ifdef CONFIG_AX88796_93CX6 - if (ax->plat->flags & AXFLG_HAS_93CX6) { - unsigned char mac_addr[6]; - struct eeprom_93cx6 eeprom; - - eeprom.data = ei_local; - eeprom.register_read = ax_eeprom_register_read; - eeprom.register_write = ax_eeprom_register_write; - eeprom.width = PCI_EEPROM_WIDTH_93C56; - - eeprom_93cx6_multiread(&eeprom, 0, - (__le16 __force *)mac_addr, - sizeof(mac_addr) >> 1); - - memcpy(dev->dev_addr, mac_addr, 6); - } -#endif - if (ax->plat->wordlength == 2) { - /* We must set the 8390 for word mode. */ - ei_outb(ax->plat->dcr_val, ei_local->mem + EN0_DCFG); - start_page = NESM_START_PG; - stop_page = NESM_STOP_PG; - } else { - start_page = NE1SM_START_PG; - stop_page = NE1SM_STOP_PG; - } - - /* load the mac-address from the device */ - if (ax->plat->flags & AXFLG_MAC_FROMDEV) { - ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, - ei_local->mem + E8390_CMD); /* 0x61 */ - for (i = 0; i < ETHER_ADDR_LEN; i++) - dev->dev_addr[i] = - ei_inb(ioaddr + EN1_PHYS_SHIFT(i)); - } - - if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) && - ax->plat->mac_addr) - memcpy(dev->dev_addr, ax->plat->mac_addr, - ETHER_ADDR_LEN); - - ax_reset_8390(dev); - - ei_local->name = "AX88796"; - ei_local->tx_start_page = start_page; - ei_local->stop_page = stop_page; - ei_local->word16 = (ax->plat->wordlength == 2); - ei_local->rx_start_page = start_page + TX_PAGES; - -#ifdef PACKETBUF_MEMSIZE - /* Allow the packet buffer size to be overridden by know-it-alls. */ - ei_local->stop_page = ei_local->tx_start_page + PACKETBUF_MEMSIZE; -#endif - - ei_local->reset_8390 = &ax_reset_8390; - ei_local->block_input = &ax_block_input; - ei_local->block_output = &ax_block_output; - ei_local->get_8390_hdr = &ax_get_8390_hdr; - ei_local->priv = 0; - - dev->netdev_ops = &ax_netdev_ops; - dev->ethtool_ops = &ax_ethtool_ops; - - ret = ax_mii_init(dev); - if (ret) - goto out_irq; - - ax_NS8390_init(dev, 0); - - ret = register_netdev(dev); - if (ret) - goto out_irq; - - netdev_info(dev, "%dbit, irq %d, %lx, MAC: %pM\n", - ei_local->word16 ? 16 : 8, dev->irq, dev->base_addr, - dev->dev_addr); - - return 0; - - out_irq: - /* cleanup irq */ - free_irq(dev->irq, dev); - err_out: - return ret; -} - -static int ax_remove(struct platform_device *pdev) -{ - struct net_device *dev = platform_get_drvdata(pdev); - struct ei_device *ei_local = netdev_priv(dev); - struct ax_device *ax = to_ax_dev(dev); - struct resource *mem; - - unregister_netdev(dev); - free_irq(dev->irq, dev); - - iounmap(ei_local->mem); - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(mem->start, resource_size(mem)); - - if (ax->map2) { - iounmap(ax->map2); - mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); - release_mem_region(mem->start, resource_size(mem)); - } - - free_netdev(dev); - - return 0; -} - -/* - * ax_probe - * - * This is the entry point when the platform device system uses to - * notify us of a new device to attach to. Allocate memory, find the - * resources and information passed, and map the necessary registers. - */ -static int ax_probe(struct platform_device *pdev) -{ - struct net_device *dev; - struct ei_device *ei_local; - struct ax_device *ax; - struct resource *irq, *mem, *mem2; - resource_size_t mem_size, mem2_size = 0; - int ret = 0; - - dev = ax__alloc_ei_netdev(sizeof(struct ax_device)); - if (dev == NULL) - return -ENOMEM; - - /* ok, let's setup our device */ - SET_NETDEV_DEV(dev, &pdev->dev); - ei_local = netdev_priv(dev); - ax = to_ax_dev(dev); - - ax->plat = pdev->dev.platform_data; - platform_set_drvdata(pdev, dev); - - ei_local->rxcr_base = ax->plat->rcr_val; - - /* find the platform resources */ - irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!irq) { - dev_err(&pdev->dev, "no IRQ specified\n"); - ret = -ENXIO; - goto exit_mem; - } - - dev->irq = irq->start; - ax->irqflags = irq->flags & IRQF_TRIGGER_MASK; - - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!mem) { - dev_err(&pdev->dev, "no MEM specified\n"); - ret = -ENXIO; - goto exit_mem; - } - - mem_size = resource_size(mem); - - /* - * setup the register offsets from either the platform data or - * by using the size of the resource provided - */ - if (ax->plat->reg_offsets) - ei_local->reg_offset = ax->plat->reg_offsets; - else { - ei_local->reg_offset = ax->reg_offsets; - for (ret = 0; ret < 0x18; ret++) - ax->reg_offsets[ret] = (mem_size / 0x18) * ret; - } - - if (!request_mem_region(mem->start, mem_size, pdev->name)) { - dev_err(&pdev->dev, "cannot reserve registers\n"); - ret = -ENXIO; - goto exit_mem; - } - - ei_local->mem = ioremap(mem->start, mem_size); - dev->base_addr = (unsigned long)ei_local->mem; - - if (ei_local->mem == NULL) { - dev_err(&pdev->dev, "Cannot ioremap area %pR\n", mem); - - ret = -ENXIO; - goto exit_req; - } - - /* look for reset area */ - mem2 = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!mem2) { - if (!ax->plat->reg_offsets) { - for (ret = 0; ret < 0x20; ret++) - ax->reg_offsets[ret] = (mem_size / 0x20) * ret; - } - } else { - mem2_size = resource_size(mem2); - - if (!request_mem_region(mem2->start, mem2_size, pdev->name)) { - dev_err(&pdev->dev, "cannot reserve registers\n"); - ret = -ENXIO; - goto exit_mem1; - } - - ax->map2 = ioremap(mem2->start, mem2_size); - if (!ax->map2) { - dev_err(&pdev->dev, "cannot map reset register\n"); - ret = -ENXIO; - goto exit_mem2; - } - - ei_local->reg_offset[0x1f] = ax->map2 - ei_local->mem; - } - - /* got resources, now initialise and register device */ - ret = ax_init_dev(dev); - if (!ret) - return 0; - - if (!ax->map2) - goto exit_mem1; - - iounmap(ax->map2); - - exit_mem2: - release_mem_region(mem2->start, mem2_size); - - exit_mem1: - iounmap(ei_local->mem); - - exit_req: - release_mem_region(mem->start, mem_size); - - exit_mem: - free_netdev(dev); - - return ret; -} - -/* suspend and resume */ - -#ifdef CONFIG_PM -static int ax_suspend(struct platform_device *dev, pm_message_t state) -{ - struct net_device *ndev = platform_get_drvdata(dev); - struct ax_device *ax = to_ax_dev(ndev); - - ax->resume_open = ax->running; - - netif_device_detach(ndev); - ax_close(ndev); - - return 0; -} - -static int ax_resume(struct platform_device *pdev) -{ - struct net_device *ndev = platform_get_drvdata(pdev); - struct ax_device *ax = to_ax_dev(ndev); - - ax_initial_setup(ndev, netdev_priv(ndev)); - ax_NS8390_init(ndev, ax->resume_open); - netif_device_attach(ndev); - - if (ax->resume_open) - ax_open(ndev); - - return 0; -} - -#else -#define ax_suspend NULL -#define ax_resume NULL -#endif - -static struct platform_driver axdrv = { - .driver = { - .name = "ax88796", - .owner = THIS_MODULE, - }, - .probe = ax_probe, - .remove = ax_remove, - .suspend = ax_suspend, - .resume = ax_resume, -}; - -static int __init axdrv_init(void) -{ - return platform_driver_register(&axdrv); -} - -static void __exit axdrv_exit(void) -{ - platform_driver_unregister(&axdrv); -} - -module_init(axdrv_init); -module_exit(axdrv_exit); - -MODULE_DESCRIPTION("AX88796 10/100 Ethernet platform driver"); -MODULE_AUTHOR("Ben Dooks, "); -MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("platform:ax88796"); diff --git a/drivers/net/e2100.c b/drivers/net/e2100.c deleted file mode 100644 index d50a999..0000000 --- a/drivers/net/e2100.c +++ /dev/null @@ -1,490 +0,0 @@ -/* e2100.c: A Cabletron E2100 series ethernet driver for linux. */ -/* - Written 1993-1994 by Donald Becker. - - Copyright 1994 by Donald Becker. - Copyright 1993 United States Government as represented by the - Director, National Security Agency. This software may be used and - distributed according to the terms of the GNU General Public License, - incorporated herein by reference. - - This is a driver for the Cabletron E2100 series ethercards. - - The Author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation - 410 Severn Ave., Suite 210 - Annapolis MD 21403 - - The E2100 series ethercard is a fairly generic shared memory 8390 - implementation. The only unusual aspect is the way the shared memory - registers are set: first you do an inb() in what is normally the - station address region, and the low three bits of next outb() *address* - is used as the write value for that register. Either someone wasn't - too used to dem bit en bites, or they were trying to obfuscate the - programming interface. - - There is an additional complication when setting the window on the packet - buffer. You must first do a read into the packet buffer region with the - low 8 address bits the address setting the page for the start of the packet - buffer window, and then do the above operation. See mem_on() for details. - - One bug on the chip is that even a hard reset won't disable the memory - window, usually resulting in a hung machine if mem_off() isn't called. - If this happens, you must power down the machine for about 30 seconds. -*/ - -static const char version[] = - "e2100.c:v1.01 7/21/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "8390.h" - -#define DRV_NAME "e2100" - -static int e21_probe_list[] = {0x300, 0x280, 0x380, 0x220, 0}; - -/* Offsets from the base_addr. - Read from the ASIC register, and the low three bits of the next outb() - address is used to set the corresponding register. */ -#define E21_NIC_OFFSET 0 /* Offset to the 8390 NIC. */ -#define E21_ASIC 0x10 -#define E21_MEM_ENABLE 0x10 -#define E21_MEM_ON 0x05 /* Enable memory in 16 bit mode. */ -#define E21_MEM_ON_8 0x07 /* Enable memory in 8 bit mode. */ -#define E21_MEM_BASE 0x11 -#define E21_IRQ_LOW 0x12 /* The low three bits of the IRQ number. */ -#define E21_IRQ_HIGH 0x14 /* The high IRQ bit and media select ... */ -#define E21_MEDIA 0x14 /* (alias). */ -#define E21_ALT_IFPORT 0x02 /* Set to use the other (BNC,AUI) port. */ -#define E21_BIG_MEM 0x04 /* Use a bigger (64K) buffer (we don't) */ -#define E21_SAPROM 0x10 /* Offset to station address data. */ -#define E21_IO_EXTENT 0x20 - -static inline void mem_on(short port, volatile char __iomem *mem_base, - unsigned char start_page ) -{ - /* This is a little weird: set the shared memory window by doing a - read. The low address bits specify the starting page. */ - readb(mem_base+start_page); - inb(port + E21_MEM_ENABLE); - outb(E21_MEM_ON, port + E21_MEM_ENABLE + E21_MEM_ON); -} - -static inline void mem_off(short port) -{ - inb(port + E21_MEM_ENABLE); - outb(0x00, port + E21_MEM_ENABLE); -} - -/* In other drivers I put the TX pages first, but the E2100 window circuitry - is designed to have a 4K Tx region last. The windowing circuitry wraps the - window at 0x2fff->0x0000 so that the packets at e.g. 0x2f00 in the RX ring - appear contiguously in the window. */ -#define E21_RX_START_PG 0x00 /* First page of RX buffer */ -#define E21_RX_STOP_PG 0x30 /* Last page +1 of RX ring */ -#define E21_BIG_RX_STOP_PG 0xF0 /* Last page +1 of RX ring */ -#define E21_TX_START_PG E21_RX_STOP_PG /* First page of TX buffer */ - -static int e21_probe1(struct net_device *dev, int ioaddr); - -static int e21_open(struct net_device *dev); -static void e21_reset_8390(struct net_device *dev); -static void e21_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset); -static void e21_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page); -static void e21_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, - int ring_page); -static int e21_open(struct net_device *dev); -static int e21_close(struct net_device *dev); - - -/* Probe for the E2100 series ethercards. These cards have an 8390 at the - base address and the station address at both offset 0x10 and 0x18. I read - the station address from offset 0x18 to avoid the dataport of NE2000 - ethercards, and look for Ctron's unique ID (first three octets of the - station address). - */ - -static int __init do_e2100_probe(struct net_device *dev) -{ - int *port; - int base_addr = dev->base_addr; - int irq = dev->irq; - - if (base_addr > 0x1ff) /* Check a single specified location. */ - return e21_probe1(dev, base_addr); - else if (base_addr != 0) /* Don't probe at all. */ - return -ENXIO; - - for (port = e21_probe_list; *port; port++) { - dev->irq = irq; - if (e21_probe1(dev, *port) == 0) - return 0; - } - - return -ENODEV; -} - -#ifndef MODULE -struct net_device * __init e2100_probe(int unit) -{ - struct net_device *dev = alloc_ei_netdev(); - int err; - - if (!dev) - return ERR_PTR(-ENOMEM); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - - err = do_e2100_probe(dev); - if (err) - goto out; - return dev; -out: - free_netdev(dev); - return ERR_PTR(err); -} -#endif - -static const struct net_device_ops e21_netdev_ops = { - .ndo_open = e21_open, - .ndo_stop = e21_close, - - .ndo_start_xmit = ei_start_xmit, - .ndo_tx_timeout = ei_tx_timeout, - .ndo_get_stats = ei_get_stats, - .ndo_set_multicast_list = ei_set_multicast_list, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, - .ndo_change_mtu = eth_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ei_poll, -#endif -}; - -static int __init e21_probe1(struct net_device *dev, int ioaddr) -{ - int i, status, retval; - unsigned char *station_addr = dev->dev_addr; - static unsigned version_printed; - - if (!request_region(ioaddr, E21_IO_EXTENT, DRV_NAME)) - return -EBUSY; - - /* First check the station address for the Ctron prefix. */ - if (inb(ioaddr + E21_SAPROM + 0) != 0x00 || - inb(ioaddr + E21_SAPROM + 1) != 0x00 || - inb(ioaddr + E21_SAPROM + 2) != 0x1d) { - retval = -ENODEV; - goto out; - } - - /* Verify by making certain that there is a 8390 at there. */ - outb(E8390_NODMA + E8390_STOP, ioaddr); - udelay(1); /* we want to delay one I/O cycle - which is 2MHz */ - status = inb(ioaddr); - if (status != 0x21 && status != 0x23) { - retval = -ENODEV; - goto out; - } - - /* Read the station address PROM. */ - for (i = 0; i < 6; i++) - station_addr[i] = inb(ioaddr + E21_SAPROM + i); - - inb(ioaddr + E21_MEDIA); /* Point to media selection. */ - outb(0, ioaddr + E21_ASIC); /* and disable the secondary interface. */ - - if (ei_debug && version_printed++ == 0) - printk(version); - - for (i = 0; i < 6; i++) - printk(" %02X", station_addr[i]); - - if (dev->irq < 2) { - static const int irqlist[] = {15, 11, 10, 12, 5, 9, 3, 4}; - for (i = 0; i < ARRAY_SIZE(irqlist); i++) - if (request_irq (irqlist[i], NULL, 0, "bogus", NULL) != -EBUSY) { - dev->irq = irqlist[i]; - break; - } - if (i >= ARRAY_SIZE(irqlist)) { - printk(" unable to get IRQ %d.\n", dev->irq); - retval = -EAGAIN; - goto out; - } - } else if (dev->irq == 2) /* Fixup luser bogosity: IRQ2 is really IRQ9 */ - dev->irq = 9; - - /* The 8390 is at the base address. */ - dev->base_addr = ioaddr; - - ei_status.name = "E2100"; - ei_status.word16 = 1; - ei_status.tx_start_page = E21_TX_START_PG; - ei_status.rx_start_page = E21_RX_START_PG; - ei_status.stop_page = E21_RX_STOP_PG; - ei_status.saved_irq = dev->irq; - - /* Check the media port used. The port can be passed in on the - low mem_end bits. */ - if (dev->mem_end & 15) - dev->if_port = dev->mem_end & 7; - else { - dev->if_port = 0; - inb(ioaddr + E21_MEDIA); /* Turn automatic media detection on. */ - for(i = 0; i < 6; i++) - if (station_addr[i] != inb(ioaddr + E21_SAPROM + 8 + i)) { - dev->if_port = 1; - break; - } - } - - /* Never map in the E21 shared memory unless you are actively using it. - Also, the shared memory has effective only one setting -- spread all - over the 128K region! */ - if (dev->mem_start == 0) - dev->mem_start = 0xd0000; - - ei_status.mem = ioremap(dev->mem_start, 2*1024); - if (!ei_status.mem) { - printk("unable to remap memory\n"); - retval = -EAGAIN; - goto out; - } - -#ifdef notdef - /* These values are unused. The E2100 has a 2K window into the packet - buffer. The window can be set to start on any page boundary. */ - ei_status.rmem_start = dev->mem_start + TX_PAGES*256; - dev->mem_end = ei_status.rmem_end = dev->mem_start + 2*1024; -#endif - - printk(", IRQ %d, %s media, memory @ %#lx.\n", dev->irq, - dev->if_port ? "secondary" : "primary", dev->mem_start); - - ei_status.reset_8390 = &e21_reset_8390; - ei_status.block_input = &e21_block_input; - ei_status.block_output = &e21_block_output; - ei_status.get_8390_hdr = &e21_get_8390_hdr; - - dev->netdev_ops = &e21_netdev_ops; - NS8390_init(dev, 0); - - retval = register_netdev(dev); - if (retval) - goto out; - return 0; -out: - release_region(ioaddr, E21_IO_EXTENT); - return retval; -} - -static int -e21_open(struct net_device *dev) -{ - short ioaddr = dev->base_addr; - int retval; - - if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev))) - return retval; - - /* Set the interrupt line and memory base on the hardware. */ - inb(ioaddr + E21_IRQ_LOW); - outb(0, ioaddr + E21_ASIC + (dev->irq & 7)); - inb(ioaddr + E21_IRQ_HIGH); /* High IRQ bit, and if_port. */ - outb(0, ioaddr + E21_ASIC + (dev->irq > 7 ? 1:0) - + (dev->if_port ? E21_ALT_IFPORT : 0)); - inb(ioaddr + E21_MEM_BASE); - outb(0, ioaddr + E21_ASIC + ((dev->mem_start >> 17) & 7)); - - ei_open(dev); - return 0; -} - -static void -e21_reset_8390(struct net_device *dev) -{ - short ioaddr = dev->base_addr; - - outb(0x01, ioaddr); - if (ei_debug > 1) printk("resetting the E2180x3 t=%ld...", jiffies); - ei_status.txing = 0; - - /* Set up the ASIC registers, just in case something changed them. */ - - if (ei_debug > 1) printk("reset done\n"); -} - -/* Grab the 8390 specific header. We put the 2k window so the header page - appears at the start of the shared memory. */ - -static void -e21_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) -{ - - short ioaddr = dev->base_addr; - char __iomem *shared_mem = ei_status.mem; - - mem_on(ioaddr, shared_mem, ring_page); - -#ifdef notdef - /* Officially this is what we are doing, but the readl() is faster */ - memcpy_fromio(hdr, shared_mem, sizeof(struct e8390_pkt_hdr)); -#else - ((unsigned int*)hdr)[0] = readl(shared_mem); -#endif - - /* Turn off memory access: we would need to reprogram the window anyway. */ - mem_off(ioaddr); - -} - -/* Block input and output are easy on shared memory ethercards. - The E21xx makes block_input() especially easy by wrapping the top - ring buffer to the bottom automatically. */ -static void -e21_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) -{ - short ioaddr = dev->base_addr; - char __iomem *shared_mem = ei_status.mem; - - mem_on(ioaddr, shared_mem, (ring_offset>>8)); - - memcpy_fromio(skb->data, ei_status.mem + (ring_offset & 0xff), count); - - mem_off(ioaddr); -} - -static void -e21_block_output(struct net_device *dev, int count, const unsigned char *buf, - int start_page) -{ - short ioaddr = dev->base_addr; - volatile char __iomem *shared_mem = ei_status.mem; - - /* Set the shared memory window start by doing a read, with the low address - bits specifying the starting page. */ - readb(shared_mem + start_page); - mem_on(ioaddr, shared_mem, start_page); - - memcpy_toio(shared_mem, buf, count); - mem_off(ioaddr); -} - -static int -e21_close(struct net_device *dev) -{ - short ioaddr = dev->base_addr; - - if (ei_debug > 1) - printk("%s: Shutting down ethercard.\n", dev->name); - - free_irq(dev->irq, dev); - dev->irq = ei_status.saved_irq; - - /* Shut off the interrupt line and secondary interface. */ - inb(ioaddr + E21_IRQ_LOW); - outb(0, ioaddr + E21_ASIC); - inb(ioaddr + E21_IRQ_HIGH); /* High IRQ bit, and if_port. */ - outb(0, ioaddr + E21_ASIC); - - ei_close(dev); - - /* Double-check that the memory has been turned off, because really - really bad things happen if it isn't. */ - mem_off(ioaddr); - - return 0; -} - - -#ifdef MODULE -#define MAX_E21_CARDS 4 /* Max number of E21 cards per module */ -static struct net_device *dev_e21[MAX_E21_CARDS]; -static int io[MAX_E21_CARDS]; -static int irq[MAX_E21_CARDS]; -static int mem[MAX_E21_CARDS]; -static int xcvr[MAX_E21_CARDS]; /* choose int. or ext. xcvr */ - -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -module_param_array(mem, int, NULL, 0); -module_param_array(xcvr, int, NULL, 0); -MODULE_PARM_DESC(io, "I/O base address(es)"); -MODULE_PARM_DESC(irq, "IRQ number(s)"); -MODULE_PARM_DESC(mem, " memory base address(es)"); -MODULE_PARM_DESC(xcvr, "transceiver(s) (0=internal, 1=external)"); -MODULE_DESCRIPTION("Cabletron E2100 ISA ethernet driver"); -MODULE_LICENSE("GPL"); - -/* This is set up so that only a single autoprobe takes place per call. -ISA device autoprobes on a running machine are not recommended. */ - -int __init init_module(void) -{ - struct net_device *dev; - int this_dev, found = 0; - - for (this_dev = 0; this_dev < MAX_E21_CARDS; this_dev++) { - if (io[this_dev] == 0) { - if (this_dev != 0) break; /* only autoprobe 1st one */ - printk(KERN_NOTICE "e2100.c: Presently autoprobing (not recommended) for a single card.\n"); - } - dev = alloc_ei_netdev(); - if (!dev) - break; - dev->irq = irq[this_dev]; - dev->base_addr = io[this_dev]; - dev->mem_start = mem[this_dev]; - dev->mem_end = xcvr[this_dev]; /* low 4bits = xcvr sel. */ - if (do_e2100_probe(dev) == 0) { - dev_e21[found++] = dev; - continue; - } - free_netdev(dev); - printk(KERN_WARNING "e2100.c: No E2100 card found (i/o = 0x%x).\n", io[this_dev]); - break; - } - if (found) - return 0; - return -ENXIO; -} - -static void cleanup_card(struct net_device *dev) -{ - /* NB: e21_close() handles free_irq */ - iounmap(ei_status.mem); - release_region(dev->base_addr, E21_IO_EXTENT); -} - -void __exit -cleanup_module(void) -{ - int this_dev; - - for (this_dev = 0; this_dev < MAX_E21_CARDS; this_dev++) { - struct net_device *dev = dev_e21[this_dev]; - if (dev) { - unregister_netdev(dev); - cleanup_card(dev); - free_netdev(dev); - } - } -} -#endif /* MODULE */ diff --git a/drivers/net/es3210.c b/drivers/net/es3210.c deleted file mode 100644 index 7a09575..0000000 --- a/drivers/net/es3210.c +++ /dev/null @@ -1,446 +0,0 @@ -/* - es3210.c - - Linux driver for Racal-Interlan ES3210 EISA Network Adapter - - Copyright (C) 1996, Paul Gortmaker. - - This software may be used and distributed according to the terms - of the GNU General Public License, incorporated herein by reference. - - Information and Code Sources: - - 1) The existing myriad of Linux 8390 drivers written by Donald Becker. - - 2) Once again Russ Nelson's asm packet driver provided additional info. - - 3) Info for getting IRQ and sh-mem gleaned from the EISA cfg files. - Too bad it doesn't work -- see below. - - The ES3210 is an EISA shared memory NS8390 implementation. Note - that all memory copies to/from the board must be 32bit transfers. - Which rules out using eth_io_copy_and_sum() in this driver. - - Apparently there are two slightly different revisions of the - card, since there are two distinct EISA cfg files (!rii0101.cfg - and !rii0102.cfg) One has media select in the cfg file and the - other doesn't. Hopefully this will work with either. - - That is about all I can tell you about it, having never actually - even seen one of these cards. :) Try http://www.interlan.com - if you want more info. - - Thanks go to Mark Salazar for testing v0.02 of this driver. - - Bugs, to-fix, etc: - - 1) The EISA cfg ports that are *supposed* to have the IRQ and shared - mem values just read 0xff all the time. Hrrmpf. Apparently the - same happens with the packet driver as the code for reading - these registers is disabled there. In the meantime, boot with: - ether=,0,0x,eth0 to override the IRQ and - shared memory detection. (The i/o port detection is okay.) - - 2) Module support currently untested. Probably works though. - -*/ - -static const char version[] = - "es3210.c: Driver revision v0.03, 14/09/96\n"; - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "8390.h" - -static int es_probe1(struct net_device *dev, int ioaddr); - -static void es_reset_8390(struct net_device *dev); - -static void es_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page); -static void es_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset); -static void es_block_output(struct net_device *dev, int count, const unsigned char *buf, int start_page); - -#define ES_START_PG 0x00 /* First page of TX buffer */ -#define ES_STOP_PG 0x40 /* Last page +1 of RX ring */ - -#define ES_IO_EXTENT 0x37 /* The cfg file says 0xc90 -> 0xcc7 */ -#define ES_ID_PORT 0xc80 /* Same for all EISA cards */ -#define ES_SA_PROM 0xc90 /* Start of e'net addr. */ -#define ES_RESET_PORT 0xc84 /* From the packet driver source */ -#define ES_NIC_OFFSET 0xca0 /* Hello, the 8390 is *here* */ - -#define ES_ADDR0 0x02 /* 3 byte vendor prefix */ -#define ES_ADDR1 0x07 -#define ES_ADDR2 0x01 - -/* - * Two card revisions. EISA ID's are always rev. minor, rev. major,, and - * then the three vendor letters stored in 5 bits each, with an "a" = 1. - * For eg: "rii" = 10010 01001 01001 = 0x4929, which is how the EISA - * config utility determines automagically what config file(s) to use. - */ -#define ES_EISA_ID1 0x01012949 /* !rii0101.cfg */ -#define ES_EISA_ID2 0x02012949 /* !rii0102.cfg */ - -#define ES_CFG1 0xcc0 /* IOPORT(1) --> IOPORT(6) in cfg file */ -#define ES_CFG2 0xcc1 -#define ES_CFG3 0xcc2 -#define ES_CFG4 0xcc3 -#define ES_CFG5 0xcc4 -#define ES_CFG6 0xc84 /* NB: 0xc84 is also "reset" port. */ - -/* - * You can OR any of the following bits together and assign it - * to ES_DEBUG to get verbose driver info during operation. - * Some of these don't do anything yet. - */ - -#define ES_D_PROBE 0x01 -#define ES_D_RX_PKT 0x02 -#define ES_D_TX_PKT 0x04 -#define ED_D_IRQ 0x08 - -#define ES_DEBUG 0 - -static unsigned char lo_irq_map[] __initdata = {3, 4, 5, 6, 7, 9, 10}; -static unsigned char hi_irq_map[] __initdata = {11, 12, 0, 14, 0, 0, 0, 15}; - -/* - * Probe for the card. The best way is to read the EISA ID if it - * is known. Then we check the prefix of the station address - * PROM for a match against the Racal-Interlan assigned value. - */ - -static int __init do_es_probe(struct net_device *dev) -{ - unsigned short ioaddr = dev->base_addr; - int irq = dev->irq; - int mem_start = dev->mem_start; - - if (ioaddr > 0x1ff) /* Check a single specified location. */ - return es_probe1(dev, ioaddr); - else if (ioaddr > 0) /* Don't probe at all. */ - return -ENXIO; - - if (!EISA_bus) { -#if ES_DEBUG & ES_D_PROBE - printk("es3210.c: Not EISA bus. Not probing high ports.\n"); -#endif - return -ENXIO; - } - - /* EISA spec allows for up to 16 slots, but 8 is typical. */ - for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) { - if (es_probe1(dev, ioaddr) == 0) - return 0; - dev->irq = irq; - dev->mem_start = mem_start; - } - - return -ENODEV; -} - -#ifndef MODULE -struct net_device * __init es_probe(int unit) -{ - struct net_device *dev = alloc_ei_netdev(); - int err; - - if (!dev) - return ERR_PTR(-ENOMEM); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - - err = do_es_probe(dev); - if (err) - goto out; - return dev; -out: - free_netdev(dev); - return ERR_PTR(err); -} -#endif - -static int __init es_probe1(struct net_device *dev, int ioaddr) -{ - int i, retval; - unsigned long eisa_id; - - if (!request_region(ioaddr + ES_SA_PROM, ES_IO_EXTENT, "es3210")) - return -ENODEV; - -#if ES_DEBUG & ES_D_PROBE - printk("es3210.c: probe at %#x, ID %#8x\n", ioaddr, inl(ioaddr + ES_ID_PORT)); - printk("es3210.c: config regs: %#x %#x %#x %#x %#x %#x\n", - inb(ioaddr + ES_CFG1), inb(ioaddr + ES_CFG2), inb(ioaddr + ES_CFG3), - inb(ioaddr + ES_CFG4), inb(ioaddr + ES_CFG5), inb(ioaddr + ES_CFG6)); -#endif - -/* Check the EISA ID of the card. */ - eisa_id = inl(ioaddr + ES_ID_PORT); - if ((eisa_id != ES_EISA_ID1) && (eisa_id != ES_EISA_ID2)) { - retval = -ENODEV; - goto out; - } - - for (i = 0; i < ETHER_ADDR_LEN ; i++) - dev->dev_addr[i] = inb(ioaddr + ES_SA_PROM + i); - -/* Check the Racal vendor ID as well. */ - if (dev->dev_addr[0] != ES_ADDR0 || - dev->dev_addr[1] != ES_ADDR1 || - dev->dev_addr[2] != ES_ADDR2) { - printk("es3210.c: card not found %pM (invalid_prefix).\n", - dev->dev_addr); - retval = -ENODEV; - goto out; - } - - printk("es3210.c: ES3210 rev. %ld at %#x, node %pM", - eisa_id>>24, ioaddr, dev->dev_addr); - - /* Snarf the interrupt now. */ - if (dev->irq == 0) { - unsigned char hi_irq = inb(ioaddr + ES_CFG2) & 0x07; - unsigned char lo_irq = inb(ioaddr + ES_CFG1) & 0xfe; - - if (hi_irq != 0) { - dev->irq = hi_irq_map[hi_irq - 1]; - } else { - int i = 0; - while (lo_irq > (1<irq = lo_irq_map[i]; - } - printk(" using IRQ %d", dev->irq); -#if ES_DEBUG & ES_D_PROBE - printk("es3210.c: hi_irq %#x, lo_irq %#x, dev->irq = %d\n", - hi_irq, lo_irq, dev->irq); -#endif - } else { - if (dev->irq == 2) - dev->irq = 9; /* Doh! */ - printk(" assigning IRQ %d", dev->irq); - } - - if (request_irq(dev->irq, ei_interrupt, 0, "es3210", dev)) { - printk (" unable to get IRQ %d.\n", dev->irq); - retval = -EAGAIN; - goto out; - } - - if (dev->mem_start == 0) { - unsigned char mem_enabled = inb(ioaddr + ES_CFG2) & 0xc0; - unsigned char mem_bits = inb(ioaddr + ES_CFG3) & 0x07; - - if (mem_enabled != 0x80) { - printk(" shared mem disabled - giving up\n"); - retval = -ENXIO; - goto out1; - } - dev->mem_start = 0xC0000 + mem_bits*0x4000; - printk(" using "); - } else { - printk(" assigning "); - } - - ei_status.mem = ioremap(dev->mem_start, (ES_STOP_PG - ES_START_PG)*256); - if (!ei_status.mem) { - printk("ioremap failed - giving up\n"); - retval = -ENXIO; - goto out1; - } - - dev->mem_end = dev->mem_start + (ES_STOP_PG - ES_START_PG)*256; - - printk("mem %#lx-%#lx\n", dev->mem_start, dev->mem_end-1); - -#if ES_DEBUG & ES_D_PROBE - if (inb(ioaddr + ES_CFG5)) - printk("es3210: Warning - DMA channel enabled, but not used here.\n"); -#endif - /* Note, point at the 8390, and not the card... */ - dev->base_addr = ioaddr + ES_NIC_OFFSET; - - ei_status.name = "ES3210"; - ei_status.tx_start_page = ES_START_PG; - ei_status.rx_start_page = ES_START_PG + TX_PAGES; - ei_status.stop_page = ES_STOP_PG; - ei_status.word16 = 1; - - if (ei_debug > 0) - printk(version); - - ei_status.reset_8390 = &es_reset_8390; - ei_status.block_input = &es_block_input; - ei_status.block_output = &es_block_output; - ei_status.get_8390_hdr = &es_get_8390_hdr; - - dev->netdev_ops = &ei_netdev_ops; - NS8390_init(dev, 0); - - retval = register_netdev(dev); - if (retval) - goto out1; - return 0; -out1: - free_irq(dev->irq, dev); -out: - release_region(ioaddr + ES_SA_PROM, ES_IO_EXTENT); - return retval; -} - -/* - * Reset as per the packet driver method. Judging by the EISA cfg - * file, this just toggles the "Board Enable" bits (bit 2 and 0). - */ - -static void es_reset_8390(struct net_device *dev) -{ - unsigned short ioaddr = dev->base_addr; - unsigned long end; - - outb(0x04, ioaddr + ES_RESET_PORT); - if (ei_debug > 1) printk("%s: resetting the ES3210...", dev->name); - - end = jiffies + 2*HZ/100; - while ((signed)(end - jiffies) > 0) continue; - - ei_status.txing = 0; - outb(0x01, ioaddr + ES_RESET_PORT); - if (ei_debug > 1) printk("reset done\n"); -} - -/* - * Note: In the following three functions is the implicit assumption - * that the associated memcpy will only use "rep; movsl" as long as - * we keep the counts as some multiple of doublewords. This is a - * requirement of the hardware, and also prevents us from using - * eth_io_copy_and_sum() since we can't guarantee it will limit - * itself to doubleword access. - */ - -/* - * Grab the 8390 specific header. Similar to the block_input routine, but - * we don't need to be concerned with ring wrap as the header will be at - * the start of a page, so we optimize accordingly. (A single doubleword.) - */ - -static void -es_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) -{ - void __iomem *hdr_start = ei_status.mem + ((ring_page - ES_START_PG)<<8); - memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); - hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */ -} - -/* - * Block input and output are easy on shared memory ethercards, the only - * complication is when the ring buffer wraps. The count will already - * be rounded up to a doubleword value via es_get_8390_hdr() above. - */ - -static void es_block_input(struct net_device *dev, int count, struct sk_buff *skb, - int ring_offset) -{ - void __iomem *xfer_start = ei_status.mem + ring_offset - ES_START_PG*256; - - if (ring_offset + count > ES_STOP_PG*256) { - /* Packet wraps over end of ring buffer. */ - int semi_count = ES_STOP_PG*256 - ring_offset; - memcpy_fromio(skb->data, xfer_start, semi_count); - count -= semi_count; - memcpy_fromio(skb->data + semi_count, ei_status.mem, count); - } else { - /* Packet is in one chunk. */ - memcpy_fromio(skb->data, xfer_start, count); - } -} - -static void es_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page) -{ - void __iomem *shmem = ei_status.mem + ((start_page - ES_START_PG)<<8); - - count = (count + 3) & ~3; /* Round up to doubleword */ - memcpy_toio(shmem, buf, count); -} - -#ifdef MODULE -#define MAX_ES_CARDS 4 /* Max number of ES3210 cards per module */ -#define NAMELEN 8 /* # of chars for storing dev->name */ -static struct net_device *dev_es3210[MAX_ES_CARDS]; -static int io[MAX_ES_CARDS]; -static int irq[MAX_ES_CARDS]; -static int mem[MAX_ES_CARDS]; - -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -module_param_array(mem, int, NULL, 0); -MODULE_PARM_DESC(io, "I/O base address(es)"); -MODULE_PARM_DESC(irq, "IRQ number(s)"); -MODULE_PARM_DESC(mem, "memory base address(es)"); -MODULE_DESCRIPTION("Racal-Interlan ES3210 EISA ethernet driver"); -MODULE_LICENSE("GPL"); - -int __init init_module(void) -{ - struct net_device *dev; - int this_dev, found = 0; - - for (this_dev = 0; this_dev < MAX_ES_CARDS; this_dev++) { - if (io[this_dev] == 0 && this_dev != 0) - break; - dev = alloc_ei_netdev(); - if (!dev) - break; - dev->irq = irq[this_dev]; - dev->base_addr = io[this_dev]; - dev->mem_start = mem[this_dev]; - if (do_es_probe(dev) == 0) { - dev_es3210[found++] = dev; - continue; - } - free_netdev(dev); - printk(KERN_WARNING "es3210.c: No es3210 card found (i/o = 0x%x).\n", io[this_dev]); - break; - } - if (found) - return 0; - return -ENXIO; -} - -static void cleanup_card(struct net_device *dev) -{ - free_irq(dev->irq, dev); - release_region(dev->base_addr, ES_IO_EXTENT); - iounmap(ei_status.mem); -} - -void __exit -cleanup_module(void) -{ - int this_dev; - - for (this_dev = 0; this_dev < MAX_ES_CARDS; this_dev++) { - struct net_device *dev = dev_es3210[this_dev]; - if (dev) { - unregister_netdev(dev); - cleanup_card(dev); - free_netdev(dev); - } - } -} -#endif /* MODULE */ - diff --git a/drivers/net/ethernet/8390/3c503.c b/drivers/net/ethernet/8390/3c503.c new file mode 100644 index 0000000..84e68f1 --- /dev/null +++ b/drivers/net/ethernet/8390/3c503.c @@ -0,0 +1,778 @@ +/* 3c503.c: A shared-memory NS8390 ethernet driver for linux. */ +/* + Written 1992-94 by Donald Becker. + + Copyright 1993 United States Government as represented by the + Director, National Security Agency. This software may be used and + distributed according to the terms of the GNU General Public License, + incorporated herein by reference. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + + This driver should work with the 3c503 and 3c503/16. It should be used + in shared memory mode for best performance, although it may also work + in programmed-I/O mode. + + Sources: + EtherLink II Technical Reference Manual, + EtherLink II/16 Technical Reference Manual Supplement, + 3Com Corporation, 5400 Bayfront Plaza, Santa Clara CA 95052-8145 + + The Crynwr 3c503 packet driver. + + Changelog: + + Paul Gortmaker : add support for the 2nd 8kB of RAM on 16 bit cards. + Paul Gortmaker : multiple card support for module users. + rjohnson@analogic.com : Fix up PIO interface for efficient operation. + Jeff Garzik : ethtool support + +*/ + +#define DRV_NAME "3c503" +#define DRV_VERSION "1.10a" +#define DRV_RELDATE "11/17/2001" + + +static const char version[] = + DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Donald Becker (becker@scyld.com)\n"; + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "8390.h" +#include "3c503.h" +#define WRD_COUNT 4 + +static int el2_pio_probe(struct net_device *dev); +static int el2_probe1(struct net_device *dev, int ioaddr); + +/* A zero-terminated list of I/O addresses to be probed in PIO mode. */ +static unsigned int netcard_portlist[] __initdata = + { 0x300,0x310,0x330,0x350,0x250,0x280,0x2a0,0x2e0,0}; + +#define EL2_IO_EXTENT 16 + +static int el2_open(struct net_device *dev); +static int el2_close(struct net_device *dev); +static void el2_reset_8390(struct net_device *dev); +static void el2_init_card(struct net_device *dev); +static void el2_block_output(struct net_device *dev, int count, + const unsigned char *buf, int start_page); +static void el2_block_input(struct net_device *dev, int count, struct sk_buff *skb, + int ring_offset); +static void el2_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page); +static const struct ethtool_ops netdev_ethtool_ops; + + +/* This routine probes for a memory-mapped 3c503 board by looking for + the "location register" at the end of the jumpered boot PROM space. + This works even if a PROM isn't there. + + If the ethercard isn't found there is an optional probe for + ethercard jumpered to programmed-I/O mode. + */ +static int __init do_el2_probe(struct net_device *dev) +{ + int *addr, addrs[] = { 0xddffe, 0xd9ffe, 0xcdffe, 0xc9ffe, 0}; + int base_addr = dev->base_addr; + int irq = dev->irq; + + if (base_addr > 0x1ff) /* Check a single specified location. */ + return el2_probe1(dev, base_addr); + else if (base_addr != 0) /* Don't probe at all. */ + return -ENXIO; + + for (addr = addrs; *addr; addr++) { + void __iomem *p = ioremap(*addr, 1); + unsigned base_bits; + int i; + + if (!p) + continue; + base_bits = readb(p); + iounmap(p); + i = ffs(base_bits) - 1; + if (i == -1 || base_bits != (1 << i)) + continue; + if (el2_probe1(dev, netcard_portlist[i]) == 0) + return 0; + dev->irq = irq; + } +#if ! defined(no_probe_nonshared_memory) + return el2_pio_probe(dev); +#else + return -ENODEV; +#endif +} + +/* Try all of the locations that aren't obviously empty. This touches + a lot of locations, and is much riskier than the code above. */ +static int __init +el2_pio_probe(struct net_device *dev) +{ + int i; + int base_addr = dev->base_addr; + int irq = dev->irq; + + if (base_addr > 0x1ff) /* Check a single specified location. */ + return el2_probe1(dev, base_addr); + else if (base_addr != 0) /* Don't probe at all. */ + return -ENXIO; + + for (i = 0; netcard_portlist[i]; i++) { + if (el2_probe1(dev, netcard_portlist[i]) == 0) + return 0; + dev->irq = irq; + } + + return -ENODEV; +} + +#ifndef MODULE +struct net_device * __init el2_probe(int unit) +{ + struct net_device *dev = alloc_eip_netdev(); + int err; + + if (!dev) + return ERR_PTR(-ENOMEM); + + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + + err = do_el2_probe(dev); + if (err) + goto out; + return dev; +out: + free_netdev(dev); + return ERR_PTR(err); +} +#endif + +static const struct net_device_ops el2_netdev_ops = { + .ndo_open = el2_open, + .ndo_stop = el2_close, + + .ndo_start_xmit = eip_start_xmit, + .ndo_tx_timeout = eip_tx_timeout, + .ndo_get_stats = eip_get_stats, + .ndo_set_multicast_list = eip_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = eip_poll, +#endif +}; + +/* Probe for the Etherlink II card at I/O port base IOADDR, + returning non-zero on success. If found, set the station + address and memory parameters in DEVICE. */ +static int __init +el2_probe1(struct net_device *dev, int ioaddr) +{ + int i, iobase_reg, membase_reg, saved_406, wordlength, retval; + static unsigned version_printed; + unsigned long vendor_id; + + if (!request_region(ioaddr, EL2_IO_EXTENT, DRV_NAME)) + return -EBUSY; + + if (!request_region(ioaddr + 0x400, 8, DRV_NAME)) { + retval = -EBUSY; + goto out; + } + + /* Reset and/or avoid any lurking NE2000 */ + if (inb(ioaddr + 0x408) == 0xff) { + mdelay(1); + retval = -ENODEV; + goto out1; + } + + /* We verify that it's a 3C503 board by checking the first three octets + of its ethernet address. */ + iobase_reg = inb(ioaddr+0x403); + membase_reg = inb(ioaddr+0x404); + /* ASIC location registers should be 0 or have only a single bit set. */ + if ((iobase_reg & (iobase_reg - 1)) || + (membase_reg & (membase_reg - 1))) { + retval = -ENODEV; + goto out1; + } + saved_406 = inb_p(ioaddr + 0x406); + outb_p(ECNTRL_RESET|ECNTRL_THIN, ioaddr + 0x406); /* Reset it... */ + outb_p(ECNTRL_THIN, ioaddr + 0x406); + /* Map the station addr PROM into the lower I/O ports. We now check + for both the old and new 3Com prefix */ + outb(ECNTRL_SAPROM|ECNTRL_THIN, ioaddr + 0x406); + vendor_id = inb(ioaddr)*0x10000 + inb(ioaddr + 1)*0x100 + inb(ioaddr + 2); + if ((vendor_id != OLD_3COM_ID) && (vendor_id != NEW_3COM_ID)) { + /* Restore the register we frobbed. */ + outb(saved_406, ioaddr + 0x406); + retval = -ENODEV; + goto out1; + } + + if (ei_debug && version_printed++ == 0) + pr_debug("%s", version); + + dev->base_addr = ioaddr; + + pr_info("%s: 3c503 at i/o base %#3x, node ", dev->name, ioaddr); + + /* Retrieve and print the ethernet address. */ + for (i = 0; i < 6; i++) + dev->dev_addr[i] = inb(ioaddr + i); + pr_cont("%pM", dev->dev_addr); + + /* Map the 8390 back into the window. */ + outb(ECNTRL_THIN, ioaddr + 0x406); + + /* Check for EL2/16 as described in tech. man. */ + outb_p(E8390_PAGE0, ioaddr + E8390_CMD); + outb_p(0, ioaddr + EN0_DCFG); + outb_p(E8390_PAGE2, ioaddr + E8390_CMD); + wordlength = inb_p(ioaddr + EN0_DCFG) & ENDCFG_WTS; + outb_p(E8390_PAGE0, ioaddr + E8390_CMD); + + /* Probe for, turn on and clear the board's shared memory. */ + if (ei_debug > 2) + pr_cont(" memory jumpers %2.2x ", membase_reg); + outb(EGACFR_NORM, ioaddr + 0x405); /* Enable RAM */ + + /* This should be probed for (or set via an ioctl()) at run-time. + Right now we use a sleazy hack to pass in the interface number + at boot-time via the low bits of the mem_end field. That value is + unused, and the low bits would be discarded even if it was used. */ +#if defined(EI8390_THICK) || defined(EL2_AUI) + ei_status.interface_num = 1; +#else + ei_status.interface_num = dev->mem_end & 0xf; +#endif + pr_cont(", using %sternal xcvr.\n", ei_status.interface_num == 0 ? "in" : "ex"); + + if ((membase_reg & 0xf0) == 0) { + dev->mem_start = 0; + ei_status.name = "3c503-PIO"; + ei_status.mem = NULL; + } else { + dev->mem_start = ((membase_reg & 0xc0) ? 0xD8000 : 0xC8000) + + ((membase_reg & 0xA0) ? 0x4000 : 0); +#define EL2_MEMSIZE (EL2_MB1_STOP_PG - EL2_MB1_START_PG)*256 + ei_status.mem = ioremap(dev->mem_start, EL2_MEMSIZE); + +#ifdef EL2MEMTEST + /* This has never found an error, but someone might care. + Note that it only tests the 2nd 8kB on 16kB 3c503/16 + cards between card addr. 0x2000 and 0x3fff. */ + { /* Check the card's memory. */ + void __iomem *mem_base = ei_status.mem; + unsigned int test_val = 0xbbadf00d; + writel(0xba5eba5e, mem_base); + for (i = sizeof(test_val); i < EL2_MEMSIZE; i+=sizeof(test_val)) { + writel(test_val, mem_base + i); + if (readl(mem_base) != 0xba5eba5e || + readl(mem_base + i) != test_val) { + pr_warning("3c503: memory failure or memory address conflict.\n"); + dev->mem_start = 0; + ei_status.name = "3c503-PIO"; + iounmap(mem_base); + ei_status.mem = NULL; + break; + } + test_val += 0x55555555; + writel(0, mem_base + i); + } + } +#endif /* EL2MEMTEST */ + + if (dev->mem_start) + dev->mem_end = dev->mem_start + EL2_MEMSIZE; + + if (wordlength) { /* No Tx pages to skip over to get to Rx */ + ei_status.priv = 0; + ei_status.name = "3c503/16"; + } else { + ei_status.priv = TX_PAGES * 256; + ei_status.name = "3c503"; + } + } + + /* + Divide up the memory on the card. This is the same regardless of + whether shared-mem or PIO is used. For 16 bit cards (16kB RAM), + we use the entire 8k of bank1 for an Rx ring. We only use 3k + of the bank0 for 2 full size Tx packet slots. For 8 bit cards, + (8kB RAM) we use 3kB of bank1 for two Tx slots, and the remaining + 5kB for an Rx ring. */ + + if (wordlength) { + ei_status.tx_start_page = EL2_MB0_START_PG; + ei_status.rx_start_page = EL2_MB1_START_PG; + } else { + ei_status.tx_start_page = EL2_MB1_START_PG; + ei_status.rx_start_page = EL2_MB1_START_PG + TX_PAGES; + } + + /* Finish setting the board's parameters. */ + ei_status.stop_page = EL2_MB1_STOP_PG; + ei_status.word16 = wordlength; + ei_status.reset_8390 = el2_reset_8390; + ei_status.get_8390_hdr = el2_get_8390_hdr; + ei_status.block_input = el2_block_input; + ei_status.block_output = el2_block_output; + + if (dev->irq == 2) + dev->irq = 9; + else if (dev->irq > 5 && dev->irq != 9) { + pr_warning("3c503: configured interrupt %d invalid, will use autoIRQ.\n", + dev->irq); + dev->irq = 0; + } + + ei_status.saved_irq = dev->irq; + + dev->netdev_ops = &el2_netdev_ops; + dev->ethtool_ops = &netdev_ethtool_ops; + + retval = register_netdev(dev); + if (retval) + goto out1; + + if (dev->mem_start) + pr_info("%s: %s - %dkB RAM, 8kB shared mem window at %#6lx-%#6lx.\n", + dev->name, ei_status.name, (wordlength+1)<<3, + dev->mem_start, dev->mem_end-1); + + else + { + ei_status.tx_start_page = EL2_MB1_START_PG; + ei_status.rx_start_page = EL2_MB1_START_PG + TX_PAGES; + pr_info("%s: %s, %dkB RAM, using programmed I/O (REJUMPER for SHARED MEMORY).\n", + dev->name, ei_status.name, (wordlength+1)<<3); + } + release_region(ioaddr + 0x400, 8); + return 0; +out1: + release_region(ioaddr + 0x400, 8); +out: + release_region(ioaddr, EL2_IO_EXTENT); + return retval; +} + +static irqreturn_t el2_probe_interrupt(int irq, void *seen) +{ + *(bool *)seen = true; + return IRQ_HANDLED; +} + +static int +el2_open(struct net_device *dev) +{ + int retval; + + if (dev->irq < 2) { + static const int irqlist[] = {5, 9, 3, 4, 0}; + const int *irqp = irqlist; + + outb(EGACFR_NORM, E33G_GACFR); /* Enable RAM and interrupts. */ + do { + bool seen; + + retval = request_irq(*irqp, el2_probe_interrupt, 0, + dev->name, &seen); + if (retval == -EBUSY) + continue; + if (retval < 0) + goto err_disable; + + /* Twinkle the interrupt, and check if it's seen. */ + seen = false; + smp_wmb(); + outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR); + outb_p(0x00, E33G_IDCFR); + msleep(1); + free_irq(*irqp, &seen); + if (!seen) + continue; + + retval = request_irq(dev->irq = *irqp, eip_interrupt, 0, + dev->name, dev); + if (retval == -EBUSY) + continue; + if (retval < 0) + goto err_disable; + break; + } while (*++irqp); + + if (*irqp == 0) { + err_disable: + outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */ + return -EAGAIN; + } + } else { + if ((retval = request_irq(dev->irq, eip_interrupt, 0, dev->name, dev))) { + return retval; + } + } + + el2_init_card(dev); + eip_open(dev); + return 0; +} + +static int +el2_close(struct net_device *dev) +{ + free_irq(dev->irq, dev); + dev->irq = ei_status.saved_irq; + outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */ + + eip_close(dev); + return 0; +} + +/* This is called whenever we have a unrecoverable failure: + transmit timeout + Bad ring buffer packet header + */ +static void +el2_reset_8390(struct net_device *dev) +{ + if (ei_debug > 1) { + pr_debug("%s: Resetting the 3c503 board...", dev->name); + pr_cont(" %#lx=%#02x %#lx=%#02x %#lx=%#02x...", E33G_IDCFR, inb(E33G_IDCFR), + E33G_CNTRL, inb(E33G_CNTRL), E33G_GACFR, inb(E33G_GACFR)); + } + outb_p(ECNTRL_RESET|ECNTRL_THIN, E33G_CNTRL); + ei_status.txing = 0; + outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL); + el2_init_card(dev); + if (ei_debug > 1) + pr_cont("done\n"); +} + +/* Initialize the 3c503 GA registers after a reset. */ +static void +el2_init_card(struct net_device *dev) +{ + /* Unmap the station PROM and select the DIX or BNC connector. */ + outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL); + + /* Set ASIC copy of rx's first and last+1 buffer pages */ + /* These must be the same as in the 8390. */ + outb(ei_status.rx_start_page, E33G_STARTPG); + outb(ei_status.stop_page, E33G_STOPPG); + + /* Point the vector pointer registers somewhere ?harmless?. */ + outb(0xff, E33G_VP2); /* Point at the ROM restart location 0xffff0 */ + outb(0xff, E33G_VP1); + outb(0x00, E33G_VP0); + /* Turn off all interrupts until we're opened. */ + outb_p(0x00, dev->base_addr + EN0_IMR); + /* Enable IRQs iff started. */ + outb(EGACFR_NORM, E33G_GACFR); + + /* Set the interrupt line. */ + outb_p((0x04 << (dev->irq == 9 ? 2 : dev->irq)), E33G_IDCFR); + outb_p((WRD_COUNT << 1), E33G_DRQCNT); /* Set burst size to 8 */ + outb_p(0x20, E33G_DMAAH); /* Put a valid addr in the GA DMA */ + outb_p(0x00, E33G_DMAAL); + return; /* We always succeed */ +} + +/* + * Either use the shared memory (if enabled on the board) or put the packet + * out through the ASIC FIFO. + */ +static void +el2_block_output(struct net_device *dev, int count, + const unsigned char *buf, int start_page) +{ + unsigned short int *wrd; + int boguscount; /* timeout counter */ + unsigned short word; /* temporary for better machine code */ + void __iomem *base = ei_status.mem; + + if (ei_status.word16) /* Tx packets go into bank 0 on EL2/16 card */ + outb(EGACFR_RSEL|EGACFR_TCM, E33G_GACFR); + else + outb(EGACFR_NORM, E33G_GACFR); + + if (base) { /* Shared memory transfer */ + memcpy_toio(base + ((start_page - ei_status.tx_start_page) << 8), + buf, count); + outb(EGACFR_NORM, E33G_GACFR); /* Back to bank1 in case on bank0 */ + return; + } + +/* + * No shared memory, put the packet out the other way. + * Set up then start the internal memory transfer to Tx Start Page + */ + + word = (unsigned short)start_page; + outb(word&0xFF, E33G_DMAAH); + outb(word>>8, E33G_DMAAL); + + outb_p((ei_status.interface_num ? ECNTRL_AUI : ECNTRL_THIN ) | ECNTRL_OUTPUT + | ECNTRL_START, E33G_CNTRL); + +/* + * Here I am going to write data to the FIFO as quickly as possible. + * Note that E33G_FIFOH is defined incorrectly. It is really + * E33G_FIFOL, the lowest port address for both the byte and + * word write. Variable 'count' is NOT checked. Caller must supply a + * valid count. Note that I may write a harmless extra byte to the + * 8390 if the byte-count was not even. + */ + wrd = (unsigned short int *) buf; + count = (count + 1) >> 1; + for(;;) + { + boguscount = 0x1000; + while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0) + { + if(!boguscount--) + { + pr_notice("%s: FIFO blocked in el2_block_output.\n", dev->name); + el2_reset_8390(dev); + goto blocked; + } + } + if(count > WRD_COUNT) + { + outsw(E33G_FIFOH, wrd, WRD_COUNT); + wrd += WRD_COUNT; + count -= WRD_COUNT; + } + else + { + outsw(E33G_FIFOH, wrd, count); + break; + } + } + blocked:; + outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL); +} + +/* Read the 4 byte, page aligned 8390 specific header. */ +static void +el2_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) +{ + int boguscount; + void __iomem *base = ei_status.mem; + unsigned short word; + + if (base) { /* Use the shared memory. */ + void __iomem *hdr_start = base + ((ring_page - EL2_MB1_START_PG)<<8); + memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); + hdr->count = le16_to_cpu(hdr->count); + return; + } + +/* + * No shared memory, use programmed I/O. + */ + + word = (unsigned short)ring_page; + outb(word&0xFF, E33G_DMAAH); + outb(word>>8, E33G_DMAAL); + + outb_p((ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI) | ECNTRL_INPUT + | ECNTRL_START, E33G_CNTRL); + boguscount = 0x1000; + while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0) + { + if(!boguscount--) + { + pr_notice("%s: FIFO blocked in el2_get_8390_hdr.\n", dev->name); + memset(hdr, 0x00, sizeof(struct e8390_pkt_hdr)); + el2_reset_8390(dev); + goto blocked; + } + } + insw(E33G_FIFOH, hdr, (sizeof(struct e8390_pkt_hdr))>> 1); + blocked:; + outb_p(ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL); +} + + +static void +el2_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) +{ + int boguscount = 0; + void __iomem *base = ei_status.mem; + unsigned short int *buf; + unsigned short word; + + /* Maybe enable shared memory just be to be safe... nahh.*/ + if (base) { /* Use the shared memory. */ + ring_offset -= (EL2_MB1_START_PG<<8); + if (ring_offset + count > EL2_MEMSIZE) { + /* We must wrap the input move. */ + int semi_count = EL2_MEMSIZE - ring_offset; + memcpy_fromio(skb->data, base + ring_offset, semi_count); + count -= semi_count; + memcpy_fromio(skb->data + semi_count, base + ei_status.priv, count); + } else { + memcpy_fromio(skb->data, base + ring_offset, count); + } + return; + } + +/* + * No shared memory, use programmed I/O. + */ + word = (unsigned short) ring_offset; + outb(word>>8, E33G_DMAAH); + outb(word&0xFF, E33G_DMAAL); + + outb_p((ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI) | ECNTRL_INPUT + | ECNTRL_START, E33G_CNTRL); + +/* + * Here I also try to get data as fast as possible. I am betting that I + * can read one extra byte without clobbering anything in the kernel because + * this would only occur on an odd byte-count and allocation of skb->data + * is word-aligned. Variable 'count' is NOT checked. Caller must check + * for a valid count. + * [This is currently quite safe.... but if one day the 3c503 explodes + * you know where to come looking ;)] + */ + + buf = (unsigned short int *) skb->data; + count = (count + 1) >> 1; + for(;;) + { + boguscount = 0x1000; + while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0) + { + if(!boguscount--) + { + pr_notice("%s: FIFO blocked in el2_block_input.\n", dev->name); + el2_reset_8390(dev); + goto blocked; + } + } + if(count > WRD_COUNT) + { + insw(E33G_FIFOH, buf, WRD_COUNT); + buf += WRD_COUNT; + count -= WRD_COUNT; + } + else + { + insw(E33G_FIFOH, buf, count); + break; + } + } + blocked:; + outb_p(ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL); +} + + +static void netdev_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr); +} + +static const struct ethtool_ops netdev_ethtool_ops = { + .get_drvinfo = netdev_get_drvinfo, +}; + +#ifdef MODULE +#define MAX_EL2_CARDS 4 /* Max number of EL2 cards per module */ + +static struct net_device *dev_el2[MAX_EL2_CARDS]; +static int io[MAX_EL2_CARDS]; +static int irq[MAX_EL2_CARDS]; +static int xcvr[MAX_EL2_CARDS]; /* choose int. or ext. xcvr */ +module_param_array(io, int, NULL, 0); +module_param_array(irq, int, NULL, 0); +module_param_array(xcvr, int, NULL, 0); +MODULE_PARM_DESC(io, "I/O base address(es)"); +MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)"); +MODULE_PARM_DESC(xcvr, "transceiver(s) (0=internal, 1=external)"); +MODULE_DESCRIPTION("3Com ISA EtherLink II, II/16 (3c503, 3c503/16) driver"); +MODULE_LICENSE("GPL"); + +/* This is set up so that only a single autoprobe takes place per call. +ISA device autoprobes on a running machine are not recommended. */ +int __init +init_module(void) +{ + struct net_device *dev; + int this_dev, found = 0; + + for (this_dev = 0; this_dev < MAX_EL2_CARDS; this_dev++) { + if (io[this_dev] == 0) { + if (this_dev != 0) break; /* only autoprobe 1st one */ + pr_notice("3c503.c: Presently autoprobing (not recommended) for a single card.\n"); + } + dev = alloc_eip_netdev(); + if (!dev) + break; + dev->irq = irq[this_dev]; + dev->base_addr = io[this_dev]; + dev->mem_end = xcvr[this_dev]; /* low 4bits = xcvr sel. */ + if (do_el2_probe(dev) == 0) { + dev_el2[found++] = dev; + continue; + } + free_netdev(dev); + pr_warning("3c503.c: No 3c503 card found (i/o = 0x%x).\n", io[this_dev]); + break; + } + if (found) + return 0; + return -ENXIO; +} + +static void cleanup_card(struct net_device *dev) +{ + /* NB: el2_close() handles free_irq */ + release_region(dev->base_addr, EL2_IO_EXTENT); + if (ei_status.mem) + iounmap(ei_status.mem); +} + +void __exit +cleanup_module(void) +{ + int this_dev; + + for (this_dev = 0; this_dev < MAX_EL2_CARDS; this_dev++) { + struct net_device *dev = dev_el2[this_dev]; + if (dev) { + unregister_netdev(dev); + cleanup_card(dev); + free_netdev(dev); + } + } +} +#endif /* MODULE */ diff --git a/drivers/net/ethernet/8390/3c503.h b/drivers/net/ethernet/8390/3c503.h new file mode 100644 index 0000000..e2367b8 --- /dev/null +++ b/drivers/net/ethernet/8390/3c503.h @@ -0,0 +1,91 @@ +/* Definitions for the 3Com 3c503 Etherlink 2. */ +/* This file is distributed under the GPL. + Many of these names and comments are directly from the Crynwr packet + drivers, which are released under the GPL. */ + +#define EL2H (dev->base_addr + 0x400) +#define EL2L (dev->base_addr) + +/* Vendor unique hardware addr. prefix. 3Com has 2 because they ran + out of available addresses on the first one... */ + +#define OLD_3COM_ID 0x02608c +#define NEW_3COM_ID 0x0020af + +/* Shared memory management parameters. NB: The 8 bit cards have only + one bank (MB1) which serves both Tx and Rx packet space. The 16bit + cards have 2 banks, MB0 for Tx packets, and MB1 for Rx packets. + You choose which bank appears in the sh. mem window with EGACFR_MBSn */ + +#define EL2_MB0_START_PG (0x00) /* EL2/16 Tx packets go in bank 0 */ +#define EL2_MB1_START_PG (0x20) /* First page of bank 1 */ +#define EL2_MB1_STOP_PG (0x40) /* Last page +1 of bank 1 */ + +/* 3Com 3c503 ASIC registers */ +#define E33G_STARTPG (EL2H+0) /* Start page, matching EN0_STARTPG */ +#define E33G_STOPPG (EL2H+1) /* Stop page, must match EN0_STOPPG */ +#define E33G_DRQCNT (EL2H+2) /* DMA burst count */ +#define E33G_IOBASE (EL2H+3) /* Read of I/O base jumpers. */ + /* (non-useful, but it also appears at the end of EPROM space) */ +#define E33G_ROMBASE (EL2H+4) /* Read of memory base jumpers. */ +#define E33G_GACFR (EL2H+5) /* Config/setup bits for the ASIC GA */ +#define E33G_CNTRL (EL2H+6) /* Board's main control register */ +#define E33G_STATUS (EL2H+7) /* Status on completions. */ +#define E33G_IDCFR (EL2H+8) /* Interrupt/DMA config register */ + /* (Which IRQ to assert, DMA chan to use) */ +#define E33G_DMAAH (EL2H+9) /* High byte of DMA address reg */ +#define E33G_DMAAL (EL2H+10) /* Low byte of DMA address reg */ +/* "Vector pointer" - if this address matches a read, the EPROM (rather than + shared RAM) is mapped into memory space. */ +#define E33G_VP2 (EL2H+11) +#define E33G_VP1 (EL2H+12) +#define E33G_VP0 (EL2H+13) +#define E33G_FIFOH (EL2H+14) /* FIFO for programmed I/O moves */ +#define E33G_FIFOL (EL2H+15) /* ... low byte of above. */ + +/* Bits in E33G_CNTRL register: */ + +#define ECNTRL_RESET (0x01) /* Software reset of the ASIC and 8390 */ +#define ECNTRL_THIN (0x02) /* Onboard xcvr enable, AUI disable */ +#define ECNTRL_AUI (0x00) /* Onboard xcvr disable, AUI enable */ +#define ECNTRL_SAPROM (0x04) /* Map the station address prom */ +#define ECNTRL_DBLBFR (0x20) /* FIFO configuration bit */ +#define ECNTRL_OUTPUT (0x40) /* PC-to-3C503 direction if 1 */ +#define ECNTRL_INPUT (0x00) /* 3C503-to-PC direction if 0 */ +#define ECNTRL_START (0x80) /* Start the DMA logic */ + +/* Bits in E33G_STATUS register: */ + +#define ESTAT_DPRDY (0x80) /* Data port (of FIFO) ready */ +#define ESTAT_UFLW (0x40) /* Tried to read FIFO when it was empty */ +#define ESTAT_OFLW (0x20) /* Tried to write FIFO when it was full */ +#define ESTAT_DTC (0x10) /* Terminal Count from PC bus DMA logic */ +#define ESTAT_DIP (0x08) /* DMA In Progress */ + +/* Bits in E33G_GACFR register: */ + +#define EGACFR_NIM (0x80) /* NIC interrupt mask */ +#define EGACFR_TCM (0x40) /* DMA term. count interrupt mask */ +#define EGACFR_RSEL (0x08) /* Map a bank of card mem into system mem */ +#define EGACFR_MBS2 (0x04) /* Memory bank select, bit 2. */ +#define EGACFR_MBS1 (0x02) /* Memory bank select, bit 1. */ +#define EGACFR_MBS0 (0x01) /* Memory bank select, bit 0. */ + +#define EGACFR_NORM (0x49) /* TCM | RSEL | MBS0 */ +#define EGACFR_IRQOFF (0xc9) /* TCM | RSEL | MBS0 | NIM */ + +/* + MBS2 MBS1 MBS0 Sh. mem windows card mem at: + ---- ---- ---- ----------------------------- + 0 0 0 0x0000 -- bank 0 + 0 0 1 0x2000 -- bank 1 (only choice for 8bit card) + 0 1 0 0x4000 -- bank 2, not used + 0 1 1 0x6000 -- bank 3, not used + +There was going to be a 32k card that used bank 2 and 3, but it +never got produced. + +*/ + + +/* End of 3C503 parameter definitions */ diff --git a/drivers/net/ethernet/8390/8390.c b/drivers/net/ethernet/8390/8390.c new file mode 100644 index 0000000..7c7518b --- /dev/null +++ b/drivers/net/ethernet/8390/8390.c @@ -0,0 +1,103 @@ +/* 8390 core for usual drivers */ + +static const char version[] = + "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; + +#include "lib8390.c" + +int ei_open(struct net_device *dev) +{ + return __ei_open(dev); +} +EXPORT_SYMBOL(ei_open); + +int ei_close(struct net_device *dev) +{ + return __ei_close(dev); +} +EXPORT_SYMBOL(ei_close); + +netdev_tx_t ei_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + return __ei_start_xmit(skb, dev); +} +EXPORT_SYMBOL(ei_start_xmit); + +struct net_device_stats *ei_get_stats(struct net_device *dev) +{ + return __ei_get_stats(dev); +} +EXPORT_SYMBOL(ei_get_stats); + +void ei_set_multicast_list(struct net_device *dev) +{ + __ei_set_multicast_list(dev); +} +EXPORT_SYMBOL(ei_set_multicast_list); + +void ei_tx_timeout(struct net_device *dev) +{ + __ei_tx_timeout(dev); +} +EXPORT_SYMBOL(ei_tx_timeout); + +irqreturn_t ei_interrupt(int irq, void *dev_id) +{ + return __ei_interrupt(irq, dev_id); +} +EXPORT_SYMBOL(ei_interrupt); + +#ifdef CONFIG_NET_POLL_CONTROLLER +void ei_poll(struct net_device *dev) +{ + __ei_poll(dev); +} +EXPORT_SYMBOL(ei_poll); +#endif + +const struct net_device_ops ei_netdev_ops = { + .ndo_open = ei_open, + .ndo_stop = ei_close, + .ndo_start_xmit = ei_start_xmit, + .ndo_tx_timeout = ei_tx_timeout, + .ndo_get_stats = ei_get_stats, + .ndo_set_multicast_list = ei_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ei_poll, +#endif +}; +EXPORT_SYMBOL(ei_netdev_ops); + +struct net_device *__alloc_ei_netdev(int size) +{ + struct net_device *dev = ____alloc_ei_netdev(size); + if (dev) + dev->netdev_ops = &ei_netdev_ops; + return dev; +} +EXPORT_SYMBOL(__alloc_ei_netdev); + +void NS8390_init(struct net_device *dev, int startp) +{ + __NS8390_init(dev, startp); +} +EXPORT_SYMBOL(NS8390_init); + +#if defined(MODULE) + +static int __init ns8390_module_init(void) +{ + return 0; +} + +static void __exit ns8390_module_exit(void) +{ +} + +module_init(ns8390_module_init); +module_exit(ns8390_module_exit); +#endif /* MODULE */ +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/8390/8390.h b/drivers/net/ethernet/8390/8390.h new file mode 100644 index 0000000..58a12e4 --- /dev/null +++ b/drivers/net/ethernet/8390/8390.h @@ -0,0 +1,232 @@ +/* Generic NS8390 register definitions. */ +/* This file is part of Donald Becker's 8390 drivers, and is distributed + under the same license. Auto-loading of 8390.o only in v2.2 - Paul G. + Some of these names and comments originated from the Crynwr + packet drivers, which are distributed under the GPL. */ + +#ifndef _8390_h +#define _8390_h + +#include +#include +#include +#include + +#define TX_PAGES 12 /* Two Tx slots */ + +#define ETHER_ADDR_LEN 6 + +/* The 8390 specific per-packet-header format. */ +struct e8390_pkt_hdr { + unsigned char status; /* status */ + unsigned char next; /* pointer to next packet. */ + unsigned short count; /* header + packet length in bytes */ +}; + +#ifdef notdef +extern int ei_debug; +#else +#define ei_debug 1 +#endif + +#ifdef CONFIG_NET_POLL_CONTROLLER +extern void ei_poll(struct net_device *dev); +extern void eip_poll(struct net_device *dev); +#endif + + +/* Without I/O delay - non ISA or later chips */ +extern void NS8390_init(struct net_device *dev, int startp); +extern int ei_open(struct net_device *dev); +extern int ei_close(struct net_device *dev); +extern irqreturn_t ei_interrupt(int irq, void *dev_id); +extern void ei_tx_timeout(struct net_device *dev); +extern netdev_tx_t ei_start_xmit(struct sk_buff *skb, struct net_device *dev); +extern void ei_set_multicast_list(struct net_device *dev); +extern struct net_device_stats *ei_get_stats(struct net_device *dev); + +extern const struct net_device_ops ei_netdev_ops; + +extern struct net_device *__alloc_ei_netdev(int size); +static inline struct net_device *alloc_ei_netdev(void) +{ + return __alloc_ei_netdev(0); +} + +/* With I/O delay form */ +extern void NS8390p_init(struct net_device *dev, int startp); +extern int eip_open(struct net_device *dev); +extern int eip_close(struct net_device *dev); +extern irqreturn_t eip_interrupt(int irq, void *dev_id); +extern void eip_tx_timeout(struct net_device *dev); +extern netdev_tx_t eip_start_xmit(struct sk_buff *skb, struct net_device *dev); +extern void eip_set_multicast_list(struct net_device *dev); +extern struct net_device_stats *eip_get_stats(struct net_device *dev); + +extern const struct net_device_ops eip_netdev_ops; + +extern struct net_device *__alloc_eip_netdev(int size); +static inline struct net_device *alloc_eip_netdev(void) +{ + return __alloc_eip_netdev(0); +} + +/* You have one of these per-board */ +struct ei_device { + const char *name; + void (*reset_8390)(struct net_device *); + void (*get_8390_hdr)(struct net_device *, struct e8390_pkt_hdr *, int); + void (*block_output)(struct net_device *, int, const unsigned char *, int); + void (*block_input)(struct net_device *, int, struct sk_buff *, int); + unsigned long rmem_start; + unsigned long rmem_end; + void __iomem *mem; + unsigned char mcfilter[8]; + unsigned open:1; + unsigned word16:1; /* We have the 16-bit (vs 8-bit) version of the card. */ + unsigned bigendian:1; /* 16-bit big endian mode. Do NOT */ + /* set this on random 8390 clones! */ + unsigned txing:1; /* Transmit Active */ + unsigned irqlock:1; /* 8390's intrs disabled when '1'. */ + unsigned dmaing:1; /* Remote DMA Active */ + unsigned char tx_start_page, rx_start_page, stop_page; + unsigned char current_page; /* Read pointer in buffer */ + unsigned char interface_num; /* Net port (AUI, 10bT.) to use. */ + unsigned char txqueue; /* Tx Packet buffer queue length. */ + short tx1, tx2; /* Packet lengths for ping-pong tx. */ + short lasttx; /* Alpha version consistency check. */ + unsigned char reg0; /* Register '0' in a WD8013 */ + unsigned char reg5; /* Register '5' in a WD8013 */ + unsigned char saved_irq; /* Original dev->irq value. */ + u32 *reg_offset; /* Register mapping table */ + spinlock_t page_lock; /* Page register locks */ + unsigned long priv; /* Private field to store bus IDs etc. */ +#ifdef AX88796_PLATFORM + unsigned char rxcr_base; /* default value for RXCR */ +#endif +}; + +/* The maximum number of 8390 interrupt service routines called per IRQ. */ +#define MAX_SERVICE 12 + +/* The maximum time waited (in jiffies) before assuming a Tx failed. (20ms) */ +#define TX_TIMEOUT (20*HZ/100) + +#define ei_status (*(struct ei_device *)netdev_priv(dev)) + +/* Some generic ethernet register configurations. */ +#define E8390_TX_IRQ_MASK 0xa /* For register EN0_ISR */ +#define E8390_RX_IRQ_MASK 0x5 + +#ifdef AX88796_PLATFORM +#define E8390_RXCONFIG (ei_status.rxcr_base | 0x04) +#define E8390_RXOFF (ei_status.rxcr_base | 0x20) +#else +#define E8390_RXCONFIG 0x4 /* EN0_RXCR: broadcasts, no multicast,errors */ +#define E8390_RXOFF 0x20 /* EN0_RXCR: Accept no packets */ +#endif + +#define E8390_TXCONFIG 0x00 /* EN0_TXCR: Normal transmit mode */ +#define E8390_TXOFF 0x02 /* EN0_TXCR: Transmitter off */ + + +/* Register accessed at EN_CMD, the 8390 base addr. */ +#define E8390_STOP 0x01 /* Stop and reset the chip */ +#define E8390_START 0x02 /* Start the chip, clear reset */ +#define E8390_TRANS 0x04 /* Transmit a frame */ +#define E8390_RREAD 0x08 /* Remote read */ +#define E8390_RWRITE 0x10 /* Remote write */ +#define E8390_NODMA 0x20 /* Remote DMA */ +#define E8390_PAGE0 0x00 /* Select page chip registers */ +#define E8390_PAGE1 0x40 /* using the two high-order bits */ +#define E8390_PAGE2 0x80 /* Page 3 is invalid. */ + +/* + * Only generate indirect loads given a machine that needs them. + * - removed AMIGA_PCMCIA from this list, handled as ISA io now + * - the _p for generates no delay by default 8390p.c overrides this. + */ + +#ifndef ei_inb +#define ei_inb(_p) inb(_p) +#define ei_outb(_v,_p) outb(_v,_p) +#define ei_inb_p(_p) inb(_p) +#define ei_outb_p(_v,_p) outb(_v,_p) +#endif + +#ifndef EI_SHIFT +#define EI_SHIFT(x) (x) +#endif + +#define E8390_CMD EI_SHIFT(0x00) /* The command register (for all pages) */ +/* Page 0 register offsets. */ +#define EN0_CLDALO EI_SHIFT(0x01) /* Low byte of current local dma addr RD */ +#define EN0_STARTPG EI_SHIFT(0x01) /* Starting page of ring bfr WR */ +#define EN0_CLDAHI EI_SHIFT(0x02) /* High byte of current local dma addr RD */ +#define EN0_STOPPG EI_SHIFT(0x02) /* Ending page +1 of ring bfr WR */ +#define EN0_BOUNDARY EI_SHIFT(0x03) /* Boundary page of ring bfr RD WR */ +#define EN0_TSR EI_SHIFT(0x04) /* Transmit status reg RD */ +#define EN0_TPSR EI_SHIFT(0x04) /* Transmit starting page WR */ +#define EN0_NCR EI_SHIFT(0x05) /* Number of collision reg RD */ +#define EN0_TCNTLO EI_SHIFT(0x05) /* Low byte of tx byte count WR */ +#define EN0_FIFO EI_SHIFT(0x06) /* FIFO RD */ +#define EN0_TCNTHI EI_SHIFT(0x06) /* High byte of tx byte count WR */ +#define EN0_ISR EI_SHIFT(0x07) /* Interrupt status reg RD WR */ +#define EN0_CRDALO EI_SHIFT(0x08) /* low byte of current remote dma address RD */ +#define EN0_RSARLO EI_SHIFT(0x08) /* Remote start address reg 0 */ +#define EN0_CRDAHI EI_SHIFT(0x09) /* high byte, current remote dma address RD */ +#define EN0_RSARHI EI_SHIFT(0x09) /* Remote start address reg 1 */ +#define EN0_RCNTLO EI_SHIFT(0x0a) /* Remote byte count reg WR */ +#define EN0_RCNTHI EI_SHIFT(0x0b) /* Remote byte count reg WR */ +#define EN0_RSR EI_SHIFT(0x0c) /* rx status reg RD */ +#define EN0_RXCR EI_SHIFT(0x0c) /* RX configuration reg WR */ +#define EN0_TXCR EI_SHIFT(0x0d) /* TX configuration reg WR */ +#define EN0_COUNTER0 EI_SHIFT(0x0d) /* Rcv alignment error counter RD */ +#define EN0_DCFG EI_SHIFT(0x0e) /* Data configuration reg WR */ +#define EN0_COUNTER1 EI_SHIFT(0x0e) /* Rcv CRC error counter RD */ +#define EN0_IMR EI_SHIFT(0x0f) /* Interrupt mask reg WR */ +#define EN0_COUNTER2 EI_SHIFT(0x0f) /* Rcv missed frame error counter RD */ + +/* Bits in EN0_ISR - Interrupt status register */ +#define ENISR_RX 0x01 /* Receiver, no error */ +#define ENISR_TX 0x02 /* Transmitter, no error */ +#define ENISR_RX_ERR 0x04 /* Receiver, with error */ +#define ENISR_TX_ERR 0x08 /* Transmitter, with error */ +#define ENISR_OVER 0x10 /* Receiver overwrote the ring */ +#define ENISR_COUNTERS 0x20 /* Counters need emptying */ +#define ENISR_RDC 0x40 /* remote dma complete */ +#define ENISR_RESET 0x80 /* Reset completed */ +#define ENISR_ALL 0x3f /* Interrupts we will enable */ + +/* Bits in EN0_DCFG - Data config register */ +#define ENDCFG_WTS 0x01 /* word transfer mode selection */ +#define ENDCFG_BOS 0x02 /* byte order selection */ + +/* Page 1 register offsets. */ +#define EN1_PHYS EI_SHIFT(0x01) /* This board's physical enet addr RD WR */ +#define EN1_PHYS_SHIFT(i) EI_SHIFT(i+1) /* Get and set mac address */ +#define EN1_CURPAG EI_SHIFT(0x07) /* Current memory page RD WR */ +#define EN1_MULT EI_SHIFT(0x08) /* Multicast filter mask array (8 bytes) RD WR */ +#define EN1_MULT_SHIFT(i) EI_SHIFT(8+i) /* Get and set multicast filter */ + +/* Bits in received packet status byte and EN0_RSR*/ +#define ENRSR_RXOK 0x01 /* Received a good packet */ +#define ENRSR_CRC 0x02 /* CRC error */ +#define ENRSR_FAE 0x04 /* frame alignment error */ +#define ENRSR_FO 0x08 /* FIFO overrun */ +#define ENRSR_MPA 0x10 /* missed pkt */ +#define ENRSR_PHY 0x20 /* physical/multicast address */ +#define ENRSR_DIS 0x40 /* receiver disable. set in monitor mode */ +#define ENRSR_DEF 0x80 /* deferring */ + +/* Transmitted packet status, EN0_TSR. */ +#define ENTSR_PTX 0x01 /* Packet transmitted without error */ +#define ENTSR_ND 0x02 /* The transmit wasn't deferred. */ +#define ENTSR_COL 0x04 /* The transmit collided at least once. */ +#define ENTSR_ABT 0x08 /* The transmit collided 16 times, and was deferred. */ +#define ENTSR_CRS 0x10 /* The carrier sense was lost. */ +#define ENTSR_FU 0x20 /* A "FIFO underrun" occurred during transmit. */ +#define ENTSR_CDH 0x40 /* The collision detect "heartbeat" signal was lost. */ +#define ENTSR_OWC 0x80 /* There was an out-of-window collision. */ + +#endif /* _8390_h */ diff --git a/drivers/net/ethernet/8390/8390p.c b/drivers/net/ethernet/8390/8390p.c new file mode 100644 index 0000000..a2a64ea --- /dev/null +++ b/drivers/net/ethernet/8390/8390p.c @@ -0,0 +1,105 @@ +/* 8390 core for ISA devices needing bus delays */ + +static const char version[] = + "8390p.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; + +#define ei_inb(_p) inb(_p) +#define ei_outb(_v, _p) outb(_v, _p) +#define ei_inb_p(_p) inb_p(_p) +#define ei_outb_p(_v, _p) outb_p(_v, _p) + +#include "lib8390.c" + +int eip_open(struct net_device *dev) +{ + return __ei_open(dev); +} +EXPORT_SYMBOL(eip_open); + +int eip_close(struct net_device *dev) +{ + return __ei_close(dev); +} +EXPORT_SYMBOL(eip_close); + +netdev_tx_t eip_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + return __ei_start_xmit(skb, dev); +} +EXPORT_SYMBOL(eip_start_xmit); + +struct net_device_stats *eip_get_stats(struct net_device *dev) +{ + return __ei_get_stats(dev); +} +EXPORT_SYMBOL(eip_get_stats); + +void eip_set_multicast_list(struct net_device *dev) +{ + __ei_set_multicast_list(dev); +} +EXPORT_SYMBOL(eip_set_multicast_list); + +void eip_tx_timeout(struct net_device *dev) +{ + __ei_tx_timeout(dev); +} +EXPORT_SYMBOL(eip_tx_timeout); + +irqreturn_t eip_interrupt(int irq, void *dev_id) +{ + return __ei_interrupt(irq, dev_id); +} +EXPORT_SYMBOL(eip_interrupt); + +#ifdef CONFIG_NET_POLL_CONTROLLER +void eip_poll(struct net_device *dev) +{ + __ei_poll(dev); +} +EXPORT_SYMBOL(eip_poll); +#endif + +const struct net_device_ops eip_netdev_ops = { + .ndo_open = eip_open, + .ndo_stop = eip_close, + .ndo_start_xmit = eip_start_xmit, + .ndo_tx_timeout = eip_tx_timeout, + .ndo_get_stats = eip_get_stats, + .ndo_set_multicast_list = eip_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = eip_poll, +#endif +}; +EXPORT_SYMBOL(eip_netdev_ops); + +struct net_device *__alloc_eip_netdev(int size) +{ + struct net_device *dev = ____alloc_ei_netdev(size); + if (dev) + dev->netdev_ops = &eip_netdev_ops; + return dev; +} +EXPORT_SYMBOL(__alloc_eip_netdev); + +void NS8390p_init(struct net_device *dev, int startp) +{ + __NS8390_init(dev, startp); +} +EXPORT_SYMBOL(NS8390p_init); + +static int __init NS8390p_init_module(void) +{ + return 0; +} + +static void __exit NS8390p_cleanup_module(void) +{ +} + +module_init(NS8390p_init_module); +module_exit(NS8390p_cleanup_module); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig new file mode 100644 index 0000000..5cd53f1 --- /dev/null +++ b/drivers/net/ethernet/8390/Kconfig @@ -0,0 +1,348 @@ +# +# 8390 device configuration +# + +config NET_VENDOR_8390 + bool "National Semi-conductor 8390 devices" + depends on AMIGA_PCMCIA || PCI || SUPERH || ISA || MCA || EISA || \ + MAC || M32R || MACH_TX49XX || MCA_LEGACY || H8300 || \ + ARM || MIPS || ZORRO || PCMCIA || EXPERIMENTAL + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Western Digital cards. If you say Y, you will be + asked for your specific card in the following questions. + +if NET_VENDOR_8390 + +config EL2 + tristate "3c503 \"EtherLink II\" support" + depends on ISA + select CRC32 + ---help--- + If you have a network (Ethernet) card of this type, say Y and read + the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called 3c503. + +config AC3200 + tristate "Ansel Communications EISA 3200 support (EXPERIMENTAL)" + depends on PCI && (ISA || EISA) && EXPERIMENTAL + select CRC32 + ---help--- + If you have a network (Ethernet) card of this type, say Y and read + the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called ac3200. + +config PCMCIA_AXNET + tristate "Asix AX88190 PCMCIA support" + depends on PCMCIA + ---help--- + Say Y here if you intend to attach an Asix AX88190-based PCMCIA + (PC-card) Fast Ethernet card to your computer. These cards are + nearly NE2000 compatible but need a separate driver due to a few + misfeatures. + + To compile this driver as a module, choose M here: the module will be + called axnet_cs. If unsure, say N. + +config AX88796 + tristate "ASIX AX88796 NE2000 clone support" + depends on (ARM || MIPS || SUPERH) + select PHYLIB + select MDIO_BITBANG + ---help--- + AX88796 driver, using platform bus to provide + chip detection and resources + +config AX88796_93CX6 + bool "ASIX AX88796 external 93CX6 eeprom support" + depends on AX88796 + select EEPROM_93CX6 + ---help--- + Select this if your platform comes with an external 93CX6 eeprom. + +config E2100 + tristate "Cabletron E21xx support" + depends on ISA + select CRC32 + ---help--- + If you have a network (Ethernet) card of this type, say Y and read + the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called e2100. + +config ES3210 + tristate "Racal-Interlan EISA ES3210 support (EXPERIMENTAL)" + depends on PCI && EISA && EXPERIMENTAL + select CRC32 + ---help--- + If you have a network (Ethernet) card of this type, say Y and read + the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called es3210. + +config HPLAN_PLUS + tristate "HP PCLAN+ (27247B and 27252A) support" + depends on ISA + select CRC32 + ---help--- + If you have a network (Ethernet) card of this type, say Y and read + the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called hp-plus. + +config HPLAN + tristate "HP PCLAN (27245 and other 27xxx series) support" + depends on ISA + select CRC32 + ---help--- + If you have a network (Ethernet) card of this type, say Y and read + the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called hp. + +config HYDRA + tristate "Hydra support" + depends on ZORRO + select CRC32 + ---help--- + If you have a Hydra Ethernet adapter, say Y. Otherwise, say N. + + To compile this driver as a module, choose M here: the module + will be called hydra. + +config ARM_ETHERH + tristate "I-cubed EtherH/ANT EtherM support" + depends on ARM && ARCH_ACORN + select CRC32 + ---help--- + If you have an Acorn system with one of these network cards, you + should say Y to this option if you wish to use it with Linux. + +config LNE390 + tristate "Mylex EISA LNE390A/B support (EXPERIMENTAL)" + depends on PCI && EISA && EXPERIMENTAL + select CRC32 + ---help--- + If you have a network (Ethernet) card of this type, say Y and read + the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called lne390. + +config MAC8390 + bool "Macintosh NS 8390 based ethernet cards" + depends on MAC + select CRC32 + ---help--- + If you want to include a driver to support Nubus or LC-PDS + Ethernet cards using an NS8390 chipset or its equivalent, say Y + and read the Ethernet-HOWTO, available from + . + +config NE2000 + tristate "NE2000/NE1000 support" + depends on (ISA || (Q40 && m) || M32R || MACH_TX49XX) + select CRC32 + ---help--- + If you have a network (Ethernet) card of this type, say Y and read + the Ethernet-HOWTO, available from + . Many Ethernet cards + without a specific driver are compatible with NE2000. + + If you have a PCI NE2000 card however, say N here and Y to "PCI + NE2000 and clone support" under "EISA, VLB, PCI and on board + controllers" below. If you have a NE2000 card and are running on + an MCA system (a bus system used on some IBM PS/2 computers and + laptops), say N here and Y to "NE/2 (ne2000 MCA version) support", + below. + + To compile this driver as a module, choose M here. The module + will be called ne. + +config NE2_MCA + tristate "NE/2 (ne2000 MCA version) support" + depends on MCA_LEGACY + select CRC32 + ---help--- + If you have a network (Ethernet) card of this type, say Y and read + the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called ne2. + +config NE2K_PCI + tristate "PCI NE2000 and clones support (see help)" + depends on PCI + select CRC32 + ---help--- + This driver is for NE2000 compatible PCI cards. It will not work + with ISA NE2000 cards (they have their own driver, "NE2000/NE1000 + support" below). If you have a PCI NE2000 network (Ethernet) card, + say Y and read the Ethernet-HOWTO, available from + . + + This driver also works for the following NE2000 clone cards: + RealTek RTL-8029 Winbond 89C940 Compex RL2000 KTI ET32P2 + NetVin NV5000SC Via 86C926 SureCom NE34 Winbond + Holtek HT80232 Holtek HT80229 + + To compile this driver as a module, choose M here. The module + will be called ne2k-pci. + +config APNE + tristate "PCMCIA NE2000 support" + depends on AMIGA_PCMCIA + select CRC32 + ---help--- + If you have a PCMCIA NE2000 compatible adapter, say Y. Otherwise, + say N. + + To compile this driver as a module, choose M here: the module + will be called apne. + +config NE3210 + tristate "Novell/Eagle/Microdyne NE3210 EISA support (EXPERIMENTAL)" + depends on PCI && EISA && EXPERIMENTAL + select CRC32 + ---help--- + If you have a network (Ethernet) card of this type, say Y and read + the Ethernet-HOWTO, available from + . Note that this driver + will NOT WORK for NE3200 cards as they are completely different. + + To compile this driver as a module, choose M here. The module + will be called ne3210. + +config PCMCIA_PCNET + tristate "NE2000 compatible PCMCIA support" + depends on PCMCIA + select CRC32 + ---help--- + Say Y here if you intend to attach an NE2000 compatible PCMCIA + (PC-card) Ethernet or Fast Ethernet card to your computer. + + To compile this driver as a module, choose M here: the module will be + called pcnet_cs. If unsure, say N. + +config NE_H8300 + tristate "NE2000 compatible support for H8/300" + depends on H8300 + ---help--- + Say Y here if you want to use the NE2000 compatible + controller on the Renesas H8/300 processor. + +config STNIC + tristate "National DP83902AV support" + depends on SUPERH + select CRC32 + ---help--- + Support for cards based on the National Semiconductor DP83902AV + ST-NIC Serial Network Interface Controller for Twisted Pair. This + is a 10Mbit/sec Ethernet controller. Product overview and specs at + . + + If unsure, say N. + +config NET_VENDOR_SMC + bool "Western Digital/SMC cards" + depends on (ISA || MCA || EISA || MAC) + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Western Digital cards. If you say Y, you will be + asked for your specific card in the following questions. + +config ULTRAMCA + tristate "SMC Ultra MCA support" + depends on NET_VENDOR_SMC && MCA + select CRC32 + ---help--- + If you have a network (Ethernet) card of this type and are running + an MCA based system (PS/2), say Y and read the Ethernet-HOWTO, + available from . + + To compile this driver as a module, choose M here. The module + will be called smc-mca. + +config ULTRA + tristate "SMC Ultra support" + depends on NET_VENDOR_SMC && ISA + select CRC32 + ---help--- + If you have a network (Ethernet) card of this type, say Y and read + the Ethernet-HOWTO, available from + . + + Important: There have been many reports that, with some motherboards + mixing an SMC Ultra and an Adaptec AHA154x SCSI card (or compatible, + such as some BusLogic models) causes corruption problems with many + operating systems. The Linux smc-ultra driver has a work-around for + this but keep it in mind if you have such a SCSI card and have + problems. + + To compile this driver as a module, choose M here. The module + will be called smc-ultra. + +config ULTRA32 + tristate "SMC Ultra32 EISA support" + depends on NET_VENDOR_SMC && EISA + select CRC32 + ---help--- + If you have a network (Ethernet) card of this type, say Y and read + the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called smc-ultra32. + +config WD80x3 + tristate "WD80*3 support" + depends on NET_VENDOR_SMC && ISA + select CRC32 + ---help--- + If you have a network (Ethernet) card of this type, say Y and read + the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called wd. + +config ZORRO8390 + tristate "Zorro NS8390-based Ethernet support" + depends on ZORRO + select CRC32 + ---help--- + This driver is for Zorro Ethernet cards using an NS8390-compatible + chipset, like the Village Tronic Ariadne II and the Individual + Computers X-Surf Ethernet cards. If you have such a card, say Y. + Otherwise, say N. + + To compile this driver as a module, choose M here: the module + will be called zorro8390. + +endif # NET_VENDOR_8390 diff --git a/drivers/net/ethernet/8390/Makefile b/drivers/net/ethernet/8390/Makefile new file mode 100644 index 0000000..3337d7f --- /dev/null +++ b/drivers/net/ethernet/8390/Makefile @@ -0,0 +1,29 @@ +# +# Makefile for the 8390 network device drivers. +# + +obj-$(CONFIG_MAC8390) += mac8390.o +obj-$(CONFIG_AC3200) += ac3200.o 8390.o +obj-$(CONFIG_APNE) += apne.o 8390.o +obj-$(CONFIG_ARM_ETHERH) += etherh.o +obj-$(CONFIG_AX88796) += ax88796.o +obj-$(CONFIG_E2100) += e2100.o 8390.o +obj-$(CONFIG_EL2) += 3c503.o 8390p.o +obj-$(CONFIG_ES3210) += es3210.o 8390.o +obj-$(CONFIG_HPLAN_PLUS) += hp-plus.o 8390p.o +obj-$(CONFIG_HPLAN) += hp.o 8390p.o +obj-$(CONFIG_HYDRA) += hydra.o 8390.o +obj-$(CONFIG_LNE390) += lne390.o 8390.o +obj-$(CONFIG_NE2000) += ne.o 8390p.o +obj-$(CONFIG_NE2_MCA) += ne2.o 8390p.o +obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o +obj-$(CONFIG_NE3210) += ne3210.o 8390.o +obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o +obj-$(CONFIG_PCMCIA_AXNET) += axnet_cs.o 8390.o +obj-$(CONFIG_PCMCIA_PCNET) += pcnet_cs.o 8390.o +obj-$(CONFIG_STNIC) += stnic.o 8390.o +obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o +obj-$(CONFIG_ULTRA32) += smc-ultra32.o 8390.o +obj-$(CONFIG_ULTRAMCA) += smc-mca.o 8390.o +obj-$(CONFIG_WD80x3) += wd.o 8390.o +obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o diff --git a/drivers/net/ethernet/8390/ac3200.c b/drivers/net/ethernet/8390/ac3200.c new file mode 100644 index 0000000..f07b2e9 --- /dev/null +++ b/drivers/net/ethernet/8390/ac3200.c @@ -0,0 +1,432 @@ +/* ac3200.c: A driver for the Ansel Communications EISA ethernet adaptor. */ +/* + Written 1993, 1994 by Donald Becker. + Copyright 1993 United States Government as represented by the Director, + National Security Agency. This software may only be used and distributed + according to the terms of the GNU General Public License as modified by SRC, + incorporated herein by reference. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + This is driver for the Ansel Communications Model 3200 EISA Ethernet LAN + Adapter. The programming information is from the users manual, as related + by glee@ardnassak.math.clemson.edu. + + Changelog: + + Paul Gortmaker 05/98 : add support for shared mem above 1MB. + + */ + +static const char version[] = + "ac3200.c:v1.01 7/1/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "8390.h" + +#define DRV_NAME "ac3200" + +/* Offsets from the base address. */ +#define AC_NIC_BASE 0x00 +#define AC_SA_PROM 0x16 /* The station address PROM. */ +#define AC_ADDR0 0x00 /* Prefix station address values. */ +#define AC_ADDR1 0x40 +#define AC_ADDR2 0x90 +#define AC_ID_PORT 0xC80 +#define AC_EISA_ID 0x0110d305 +#define AC_RESET_PORT 0xC84 +#define AC_RESET 0x00 +#define AC_ENABLE 0x01 +#define AC_CONFIG 0xC90 /* The configuration port. */ + +#define AC_IO_EXTENT 0x20 + /* Actually accessed is: + * AC_NIC_BASE (0-15) + * AC_SA_PROM (0-5) + * AC_ID_PORT (0-3) + * AC_RESET_PORT + * AC_CONFIG + */ + +/* Decoding of the configuration register. */ +static unsigned char config2irqmap[8] __initdata = {15, 12, 11, 10, 9, 7, 5, 3}; +static int addrmap[8] = +{0xFF0000, 0xFE0000, 0xFD0000, 0xFFF0000, 0xFFE0000, 0xFFC0000, 0xD0000, 0 }; +static const char *port_name[4] = { "10baseT", "invalid", "AUI", "10base2"}; + +#define config2irq(configval) config2irqmap[((configval) >> 3) & 7] +#define config2mem(configval) addrmap[(configval) & 7] +#define config2name(configval) port_name[((configval) >> 6) & 3] + +/* First and last 8390 pages. */ +#define AC_START_PG 0x00 /* First page of 8390 TX buffer */ +#define AC_STOP_PG 0x80 /* Last page +1 of the 8390 RX ring */ + +static int ac_probe1(int ioaddr, struct net_device *dev); + +static int ac_open(struct net_device *dev); +static void ac_reset_8390(struct net_device *dev); +static void ac_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); +static void ac_block_output(struct net_device *dev, const int count, + const unsigned char *buf, const int start_page); +static void ac_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page); + +static int ac_close_card(struct net_device *dev); + + +/* Probe for the AC3200. + + The AC3200 can be identified by either the EISA configuration registers, + or the unique value in the station address PROM. + */ + +static int __init do_ac3200_probe(struct net_device *dev) +{ + unsigned short ioaddr = dev->base_addr; + int irq = dev->irq; + int mem_start = dev->mem_start; + + if (ioaddr > 0x1ff) /* Check a single specified location. */ + return ac_probe1(ioaddr, dev); + else if (ioaddr > 0) /* Don't probe at all. */ + return -ENXIO; + + if ( ! EISA_bus) + return -ENXIO; + + for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) { + if (ac_probe1(ioaddr, dev) == 0) + return 0; + dev->irq = irq; + dev->mem_start = mem_start; + } + + return -ENODEV; +} + +#ifndef MODULE +struct net_device * __init ac3200_probe(int unit) +{ + struct net_device *dev = alloc_ei_netdev(); + int err; + + if (!dev) + return ERR_PTR(-ENOMEM); + + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + + err = do_ac3200_probe(dev); + if (err) + goto out; + return dev; +out: + free_netdev(dev); + return ERR_PTR(err); +} +#endif + +static const struct net_device_ops ac_netdev_ops = { + .ndo_open = ac_open, + .ndo_stop = ac_close_card, + + .ndo_start_xmit = ei_start_xmit, + .ndo_tx_timeout = ei_tx_timeout, + .ndo_get_stats = ei_get_stats, + .ndo_set_multicast_list = ei_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ei_poll, +#endif +}; + +static int __init ac_probe1(int ioaddr, struct net_device *dev) +{ + int i, retval; + + if (!request_region(ioaddr, AC_IO_EXTENT, DRV_NAME)) + return -EBUSY; + + if (inb_p(ioaddr + AC_ID_PORT) == 0xff) { + retval = -ENODEV; + goto out; + } + + if (inl(ioaddr + AC_ID_PORT) != AC_EISA_ID) { + retval = -ENODEV; + goto out; + } + +#ifndef final_version + printk(KERN_DEBUG "AC3200 ethercard configuration register is %#02x," + " EISA ID %02x %02x %02x %02x.\n", inb(ioaddr + AC_CONFIG), + inb(ioaddr + AC_ID_PORT + 0), inb(ioaddr + AC_ID_PORT + 1), + inb(ioaddr + AC_ID_PORT + 2), inb(ioaddr + AC_ID_PORT + 3)); +#endif + + for (i = 0; i < 6; i++) + dev->dev_addr[i] = inb(ioaddr + AC_SA_PROM + i); + + printk(KERN_DEBUG "AC3200 in EISA slot %d, node %pM", + ioaddr/0x1000, dev->dev_addr); +#if 0 + /* Check the vendor ID/prefix. Redundant after checking the EISA ID */ + if (inb(ioaddr + AC_SA_PROM + 0) != AC_ADDR0 + || inb(ioaddr + AC_SA_PROM + 1) != AC_ADDR1 + || inb(ioaddr + AC_SA_PROM + 2) != AC_ADDR2 ) { + printk(", not found (invalid prefix).\n"); + retval = -ENODEV; + goto out; + } +#endif + + /* Assign and allocate the interrupt now. */ + if (dev->irq == 0) { + dev->irq = config2irq(inb(ioaddr + AC_CONFIG)); + printk(", using"); + } else { + dev->irq = irq_canonicalize(dev->irq); + printk(", assigning"); + } + + retval = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev); + if (retval) { + printk (" nothing! Unable to get IRQ %d.\n", dev->irq); + goto out; + } + + printk(" IRQ %d, %s port\n", dev->irq, port_name[dev->if_port]); + + dev->base_addr = ioaddr; + +#ifdef notyet + if (dev->mem_start) { /* Override the value from the board. */ + for (i = 0; i < 7; i++) + if (addrmap[i] == dev->mem_start) + break; + if (i >= 7) + i = 0; + outb((inb(ioaddr + AC_CONFIG) & ~7) | i, ioaddr + AC_CONFIG); + } +#endif + + dev->if_port = inb(ioaddr + AC_CONFIG) >> 6; + dev->mem_start = config2mem(inb(ioaddr + AC_CONFIG)); + + printk("%s: AC3200 at %#3x with %dkB memory at physical address %#lx.\n", + dev->name, ioaddr, AC_STOP_PG/4, dev->mem_start); + + /* + * BEWARE!! Some dain-bramaged EISA SCUs will allow you to put + * the card mem within the region covered by `normal' RAM !!! + * + * ioremap() will fail in that case. + */ + ei_status.mem = ioremap(dev->mem_start, AC_STOP_PG*0x100); + if (!ei_status.mem) { + printk(KERN_ERR "ac3200.c: Unable to remap card memory above 1MB !!\n"); + printk(KERN_ERR "ac3200.c: Try using EISA SCU to set memory below 1MB.\n"); + printk(KERN_ERR "ac3200.c: Driver NOT installed.\n"); + retval = -EINVAL; + goto out1; + } + printk("ac3200.c: remapped %dkB card memory to virtual address %p\n", + AC_STOP_PG/4, ei_status.mem); + + dev->mem_start = (unsigned long)ei_status.mem; + dev->mem_end = dev->mem_start + (AC_STOP_PG - AC_START_PG)*256; + + ei_status.name = "AC3200"; + ei_status.tx_start_page = AC_START_PG; + ei_status.rx_start_page = AC_START_PG + TX_PAGES; + ei_status.stop_page = AC_STOP_PG; + ei_status.word16 = 1; + + if (ei_debug > 0) + printk(version); + + ei_status.reset_8390 = &ac_reset_8390; + ei_status.block_input = &ac_block_input; + ei_status.block_output = &ac_block_output; + ei_status.get_8390_hdr = &ac_get_8390_hdr; + + dev->netdev_ops = &ac_netdev_ops; + NS8390_init(dev, 0); + + retval = register_netdev(dev); + if (retval) + goto out2; + return 0; +out2: + if (ei_status.reg0) + iounmap(ei_status.mem); +out1: + free_irq(dev->irq, dev); +out: + release_region(ioaddr, AC_IO_EXTENT); + return retval; +} + +static int ac_open(struct net_device *dev) +{ +#ifdef notyet + /* Someday we may enable the IRQ and shared memory here. */ + int ioaddr = dev->base_addr; +#endif + + ei_open(dev); + return 0; +} + +static void ac_reset_8390(struct net_device *dev) +{ + ushort ioaddr = dev->base_addr; + + outb(AC_RESET, ioaddr + AC_RESET_PORT); + if (ei_debug > 1) printk("resetting AC3200, t=%ld...", jiffies); + + ei_status.txing = 0; + outb(AC_ENABLE, ioaddr + AC_RESET_PORT); + if (ei_debug > 1) printk("reset done\n"); +} + +/* Grab the 8390 specific header. Similar to the block_input routine, but + we don't need to be concerned with ring wrap as the header will be at + the start of a page, so we optimize accordingly. */ + +static void +ac_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) +{ + void __iomem *hdr_start = ei_status.mem + ((ring_page - AC_START_PG)<<8); + memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); +} + +/* Block input and output are easy on shared memory ethercards, the only + complication is when the ring buffer wraps. */ + +static void ac_block_input(struct net_device *dev, int count, struct sk_buff *skb, + int ring_offset) +{ + void __iomem *start = ei_status.mem + ring_offset - AC_START_PG*256; + + if (ring_offset + count > AC_STOP_PG*256) { + /* We must wrap the input move. */ + int semi_count = AC_STOP_PG*256 - ring_offset; + memcpy_fromio(skb->data, start, semi_count); + count -= semi_count; + memcpy_fromio(skb->data + semi_count, + ei_status.mem + TX_PAGES*256, count); + } else { + memcpy_fromio(skb->data, start, count); + } +} + +static void ac_block_output(struct net_device *dev, int count, + const unsigned char *buf, int start_page) +{ + void __iomem *shmem = ei_status.mem + ((start_page - AC_START_PG)<<8); + + memcpy_toio(shmem, buf, count); +} + +static int ac_close_card(struct net_device *dev) +{ + if (ei_debug > 1) + printk("%s: Shutting down ethercard.\n", dev->name); + +#ifdef notyet + /* We should someday disable shared memory and interrupts. */ + outb(0x00, ioaddr + 6); /* Disable interrupts. */ + free_irq(dev->irq, dev); +#endif + + ei_close(dev); + return 0; +} + +#ifdef MODULE +#define MAX_AC32_CARDS 4 /* Max number of AC32 cards per module */ +static struct net_device *dev_ac32[MAX_AC32_CARDS]; +static int io[MAX_AC32_CARDS]; +static int irq[MAX_AC32_CARDS]; +static int mem[MAX_AC32_CARDS]; +module_param_array(io, int, NULL, 0); +module_param_array(irq, int, NULL, 0); +module_param_array(mem, int, NULL, 0); +MODULE_PARM_DESC(io, "I/O base address(es)"); +MODULE_PARM_DESC(irq, "IRQ number(s)"); +MODULE_PARM_DESC(mem, "Memory base address(es)"); +MODULE_DESCRIPTION("Ansel AC3200 EISA ethernet driver"); +MODULE_LICENSE("GPL"); + +static int __init ac3200_module_init(void) +{ + struct net_device *dev; + int this_dev, found = 0; + + for (this_dev = 0; this_dev < MAX_AC32_CARDS; this_dev++) { + if (io[this_dev] == 0 && this_dev != 0) + break; + dev = alloc_ei_netdev(); + if (!dev) + break; + dev->irq = irq[this_dev]; + dev->base_addr = io[this_dev]; + dev->mem_start = mem[this_dev]; /* Currently ignored by driver */ + if (do_ac3200_probe(dev) == 0) { + dev_ac32[found++] = dev; + continue; + } + free_netdev(dev); + printk(KERN_WARNING "ac3200.c: No ac3200 card found (i/o = 0x%x).\n", io[this_dev]); + break; + } + if (found) + return 0; + return -ENXIO; +} + +static void cleanup_card(struct net_device *dev) +{ + /* Someday free_irq may be in ac_close_card() */ + free_irq(dev->irq, dev); + release_region(dev->base_addr, AC_IO_EXTENT); + iounmap(ei_status.mem); +} + +static void __exit ac3200_module_exit(void) +{ + int this_dev; + + for (this_dev = 0; this_dev < MAX_AC32_CARDS; this_dev++) { + struct net_device *dev = dev_ac32[this_dev]; + if (dev) { + unregister_netdev(dev); + cleanup_card(dev); + free_netdev(dev); + } + } +} +module_init(ac3200_module_init); +module_exit(ac3200_module_exit); +#endif /* MODULE */ diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c new file mode 100644 index 0000000..5477373 --- /dev/null +++ b/drivers/net/ethernet/8390/apne.c @@ -0,0 +1,620 @@ +/* + * Amiga Linux/68k 8390 based PCMCIA Ethernet Driver for the Amiga 1200 + * + * (C) Copyright 1997 Alain Malek + * (Alain.Malek@cryogen.com) + * + * ---------------------------------------------------------------------------- + * + * This program is based on + * + * ne.c: A general non-shared-memory NS8390 ethernet driver for linux + * Written 1992-94 by Donald Becker. + * + * 8390.c: A general NS8390 ethernet driver core for linux. + * Written 1992-94 by Donald Becker. + * + * cnetdevice: A Sana-II ethernet driver for AmigaOS + * Written by Bruce Abbott (bhabbott@inhb.co.nz) + * + * ---------------------------------------------------------------------------- + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of the Linux + * distribution for more details. + * + * ---------------------------------------------------------------------------- + * + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "8390.h" + +/* ---- No user-serviceable parts below ---- */ + +#define DRV_NAME "apne" + +#define NE_BASE (dev->base_addr) +#define NE_CMD 0x00 +#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */ +#define NE_RESET 0x1f /* Issue a read to reset, a write to clear. */ +#define NE_IO_EXTENT 0x20 + +#define NE_EN0_ISR 0x07 +#define NE_EN0_DCFG 0x0e + +#define NE_EN0_RSARLO 0x08 +#define NE_EN0_RSARHI 0x09 +#define NE_EN0_RCNTLO 0x0a +#define NE_EN0_RXCR 0x0c +#define NE_EN0_TXCR 0x0d +#define NE_EN0_RCNTHI 0x0b +#define NE_EN0_IMR 0x0f + +#define NE1SM_START_PG 0x20 /* First page of TX buffer */ +#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */ +#define NESM_START_PG 0x40 /* First page of TX buffer */ +#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ + + +struct net_device * __init apne_probe(int unit); +static int apne_probe1(struct net_device *dev, int ioaddr); + +static void apne_reset_8390(struct net_device *dev); +static void apne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page); +static void apne_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); +static void apne_block_output(struct net_device *dev, const int count, + const unsigned char *buf, const int start_page); +static irqreturn_t apne_interrupt(int irq, void *dev_id); + +static int init_pcmcia(void); + +/* IO base address used for nic */ + +#define IOBASE 0x300 + +/* + use MANUAL_CONFIG and MANUAL_OFFSET for enabling IO by hand + you can find the values to use by looking at the cnet.device + config file example (the default values are for the CNET40BC card) +*/ + +/* +#define MANUAL_CONFIG 0x20 +#define MANUAL_OFFSET 0x3f8 + +#define MANUAL_HWADDR0 0x00 +#define MANUAL_HWADDR1 0x12 +#define MANUAL_HWADDR2 0x34 +#define MANUAL_HWADDR3 0x56 +#define MANUAL_HWADDR4 0x78 +#define MANUAL_HWADDR5 0x9a +*/ + +static const char version[] = + "apne.c:v1.1 7/10/98 Alain Malek (Alain.Malek@cryogen.ch)\n"; + +static int apne_owned; /* signal if card already owned */ + +struct net_device * __init apne_probe(int unit) +{ + struct net_device *dev; +#ifndef MANUAL_CONFIG + char tuple[8]; +#endif + int err; + + if (!MACH_IS_AMIGA) + return ERR_PTR(-ENODEV); + + if (apne_owned) + return ERR_PTR(-ENODEV); + + if ( !(AMIGAHW_PRESENT(PCMCIA)) ) + return ERR_PTR(-ENODEV); + + printk("Looking for PCMCIA ethernet card : "); + + /* check if a card is inserted */ + if (!(PCMCIA_INSERTED)) { + printk("NO PCMCIA card inserted\n"); + return ERR_PTR(-ENODEV); + } + + dev = alloc_ei_netdev(); + if (!dev) + return ERR_PTR(-ENOMEM); + if (unit >= 0) { + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + } + + /* disable pcmcia irq for readtuple */ + pcmcia_disable_irq(); + +#ifndef MANUAL_CONFIG + if ((pcmcia_copy_tuple(CISTPL_FUNCID, tuple, 8) < 3) || + (tuple[2] != CISTPL_FUNCID_NETWORK)) { + printk("not an ethernet card\n"); + /* XXX: shouldn't we re-enable irq here? */ + free_netdev(dev); + return ERR_PTR(-ENODEV); + } +#endif + + printk("ethernet PCMCIA card inserted\n"); + + if (!init_pcmcia()) { + /* XXX: shouldn't we re-enable irq here? */ + free_netdev(dev); + return ERR_PTR(-ENODEV); + } + + if (!request_region(IOBASE, 0x20, DRV_NAME)) { + free_netdev(dev); + return ERR_PTR(-EBUSY); + } + + err = apne_probe1(dev, IOBASE); + if (err) { + release_region(IOBASE, 0x20); + free_netdev(dev); + return ERR_PTR(err); + } + err = register_netdev(dev); + if (!err) + return dev; + + pcmcia_disable_irq(); + free_irq(IRQ_AMIGA_PORTS, dev); + pcmcia_reset(); + release_region(IOBASE, 0x20); + free_netdev(dev); + return ERR_PTR(err); +} + +static int __init apne_probe1(struct net_device *dev, int ioaddr) +{ + int i; + unsigned char SA_prom[32]; + int wordlength = 2; + const char *name = NULL; + int start_page, stop_page; +#ifndef MANUAL_HWADDR0 + int neX000, ctron; +#endif + static unsigned version_printed; + + if (ei_debug && version_printed++ == 0) + printk(version); + + printk("PCMCIA NE*000 ethercard probe"); + + /* Reset card. Who knows what dain-bramaged state it was left in. */ + { unsigned long reset_start_time = jiffies; + + outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET); + + while ((inb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0) + if (time_after(jiffies, reset_start_time + 2*HZ/100)) { + printk(" not found (no reset ack).\n"); + return -ENODEV; + } + + outb(0xff, ioaddr + NE_EN0_ISR); /* Ack all intr. */ + } + +#ifndef MANUAL_HWADDR0 + + /* Read the 16 bytes of station address PROM. + We must first initialize registers, similar to NS8390_init(eifdev, 0). + We can't reliably read the SAPROM address without this. + (I learned the hard way!). */ + { + struct {unsigned long value, offset; } program_seq[] = { + {E8390_NODMA+E8390_PAGE0+E8390_STOP, NE_CMD}, /* Select page 0*/ + {0x48, NE_EN0_DCFG}, /* Set byte-wide (0x48) access. */ + {0x00, NE_EN0_RCNTLO}, /* Clear the count regs. */ + {0x00, NE_EN0_RCNTHI}, + {0x00, NE_EN0_IMR}, /* Mask completion irq. */ + {0xFF, NE_EN0_ISR}, + {E8390_RXOFF, NE_EN0_RXCR}, /* 0x20 Set to monitor */ + {E8390_TXOFF, NE_EN0_TXCR}, /* 0x02 and loopback mode. */ + {32, NE_EN0_RCNTLO}, + {0x00, NE_EN0_RCNTHI}, + {0x00, NE_EN0_RSARLO}, /* DMA starting at 0x0000. */ + {0x00, NE_EN0_RSARHI}, + {E8390_RREAD+E8390_START, NE_CMD}, + }; + for (i = 0; i < ARRAY_SIZE(program_seq); i++) { + outb(program_seq[i].value, ioaddr + program_seq[i].offset); + } + + } + for(i = 0; i < 32 /*sizeof(SA_prom)*/; i+=2) { + SA_prom[i] = inb(ioaddr + NE_DATAPORT); + SA_prom[i+1] = inb(ioaddr + NE_DATAPORT); + if (SA_prom[i] != SA_prom[i+1]) + wordlength = 1; + } + + /* At this point, wordlength *only* tells us if the SA_prom is doubled + up or not because some broken PCI cards don't respect the byte-wide + request in program_seq above, and hence don't have doubled up values. + These broken cards would otherwise be detected as an ne1000. */ + + if (wordlength == 2) + for (i = 0; i < 16; i++) + SA_prom[i] = SA_prom[i+i]; + + if (wordlength == 2) { + /* We must set the 8390 for word mode. */ + outb(0x49, ioaddr + NE_EN0_DCFG); + start_page = NESM_START_PG; + stop_page = NESM_STOP_PG; + } else { + start_page = NE1SM_START_PG; + stop_page = NE1SM_STOP_PG; + } + + neX000 = (SA_prom[14] == 0x57 && SA_prom[15] == 0x57); + ctron = (SA_prom[0] == 0x00 && SA_prom[1] == 0x00 && SA_prom[2] == 0x1d); + + /* Set up the rest of the parameters. */ + if (neX000) { + name = (wordlength == 2) ? "NE2000" : "NE1000"; + } else if (ctron) { + name = (wordlength == 2) ? "Ctron-8" : "Ctron-16"; + start_page = 0x01; + stop_page = (wordlength == 2) ? 0x40 : 0x20; + } else { + printk(" not found.\n"); + return -ENXIO; + + } + +#else + wordlength = 2; + /* We must set the 8390 for word mode. */ + outb(0x49, ioaddr + NE_EN0_DCFG); + start_page = NESM_START_PG; + stop_page = NESM_STOP_PG; + + SA_prom[0] = MANUAL_HWADDR0; + SA_prom[1] = MANUAL_HWADDR1; + SA_prom[2] = MANUAL_HWADDR2; + SA_prom[3] = MANUAL_HWADDR3; + SA_prom[4] = MANUAL_HWADDR4; + SA_prom[5] = MANUAL_HWADDR5; + name = "NE2000"; +#endif + + dev->base_addr = ioaddr; + dev->irq = IRQ_AMIGA_PORTS; + dev->netdev_ops = &ei_netdev_ops; + + /* Install the Interrupt handler */ + i = request_irq(dev->irq, apne_interrupt, IRQF_SHARED, DRV_NAME, dev); + if (i) return i; + + for(i = 0; i < ETHER_ADDR_LEN; i++) + dev->dev_addr[i] = SA_prom[i]; + + printk(" %pM\n", dev->dev_addr); + + printk("%s: %s found.\n", dev->name, name); + + ei_status.name = name; + ei_status.tx_start_page = start_page; + ei_status.stop_page = stop_page; + ei_status.word16 = (wordlength == 2); + + ei_status.rx_start_page = start_page + TX_PAGES; + + ei_status.reset_8390 = &apne_reset_8390; + ei_status.block_input = &apne_block_input; + ei_status.block_output = &apne_block_output; + ei_status.get_8390_hdr = &apne_get_8390_hdr; + + NS8390_init(dev, 0); + + pcmcia_ack_int(pcmcia_get_intreq()); /* ack PCMCIA int req */ + pcmcia_enable_irq(); + + apne_owned = 1; + + return 0; +} + +/* Hard reset the card. This used to pause for the same period that a + 8390 reset command required, but that shouldn't be necessary. */ +static void +apne_reset_8390(struct net_device *dev) +{ + unsigned long reset_start_time = jiffies; + + init_pcmcia(); + + if (ei_debug > 1) printk("resetting the 8390 t=%ld...", jiffies); + + outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET); + + ei_status.txing = 0; + ei_status.dmaing = 0; + + /* This check _should_not_ be necessary, omit eventually. */ + while ((inb(NE_BASE+NE_EN0_ISR) & ENISR_RESET) == 0) + if (time_after(jiffies, reset_start_time + 2*HZ/100)) { + printk("%s: ne_reset_8390() did not complete.\n", dev->name); + break; + } + outb(ENISR_RESET, NE_BASE + NE_EN0_ISR); /* Ack intr. */ +} + +/* Grab the 8390 specific header. Similar to the block_input routine, but + we don't need to be concerned with ring wrap as the header will be at + the start of a page, so we optimize accordingly. */ + +static void +apne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) +{ + + int nic_base = dev->base_addr; + int cnt; + char *ptrc; + short *ptrs; + + /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + if (ei_status.dmaing) { + printk("%s: DMAing conflict in ne_get_8390_hdr " + "[DMAstat:%d][irqlock:%d][intr:%d].\n", + dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq); + return; + } + + ei_status.dmaing |= 0x01; + outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); + outb(ENISR_RDC, nic_base + NE_EN0_ISR); + outb(sizeof(struct e8390_pkt_hdr), nic_base + NE_EN0_RCNTLO); + outb(0, nic_base + NE_EN0_RCNTHI); + outb(0, nic_base + NE_EN0_RSARLO); /* On page boundary */ + outb(ring_page, nic_base + NE_EN0_RSARHI); + outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); + + if (ei_status.word16) { + ptrs = (short*)hdr; + for(cnt = 0; cnt < (sizeof(struct e8390_pkt_hdr)>>1); cnt++) + *ptrs++ = inw(NE_BASE + NE_DATAPORT); + } else { + ptrc = (char*)hdr; + for(cnt = 0; cnt < sizeof(struct e8390_pkt_hdr); cnt++) + *ptrc++ = inb(NE_BASE + NE_DATAPORT); + } + + outb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; + + le16_to_cpus(&hdr->count); +} + +/* Block input and output, similar to the Crynwr packet driver. If you + are porting to a new ethercard, look at the packet driver source for hints. + The NEx000 doesn't share the on-board packet memory -- you have to put + the packet out through the "remote DMA" dataport using outb. */ + +static void +apne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) +{ + int nic_base = dev->base_addr; + char *buf = skb->data; + char *ptrc; + short *ptrs; + int cnt; + + /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + if (ei_status.dmaing) { + printk("%s: DMAing conflict in ne_block_input " + "[DMAstat:%d][irqlock:%d][intr:%d].\n", + dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq); + return; + } + ei_status.dmaing |= 0x01; + outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); + outb(ENISR_RDC, nic_base + NE_EN0_ISR); + outb(count & 0xff, nic_base + NE_EN0_RCNTLO); + outb(count >> 8, nic_base + NE_EN0_RCNTHI); + outb(ring_offset & 0xff, nic_base + NE_EN0_RSARLO); + outb(ring_offset >> 8, nic_base + NE_EN0_RSARHI); + outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); + if (ei_status.word16) { + ptrs = (short*)buf; + for (cnt = 0; cnt < (count>>1); cnt++) + *ptrs++ = inw(NE_BASE + NE_DATAPORT); + if (count & 0x01) { + buf[count-1] = inb(NE_BASE + NE_DATAPORT); + } + } else { + ptrc = (char*)buf; + for (cnt = 0; cnt < count; cnt++) + *ptrc++ = inb(NE_BASE + NE_DATAPORT); + } + + outb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; +} + +static void +apne_block_output(struct net_device *dev, int count, + const unsigned char *buf, const int start_page) +{ + int nic_base = NE_BASE; + unsigned long dma_start; + char *ptrc; + short *ptrs; + int cnt; + + /* Round the count up for word writes. Do we need to do this? + What effect will an odd byte count have on the 8390? + I should check someday. */ + if (ei_status.word16 && (count & 0x01)) + count++; + + /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + if (ei_status.dmaing) { + printk("%s: DMAing conflict in ne_block_output." + "[DMAstat:%d][irqlock:%d][intr:%d]\n", + dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq); + return; + } + ei_status.dmaing |= 0x01; + /* We should already be in page 0, but to be safe... */ + outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD); + + outb(ENISR_RDC, nic_base + NE_EN0_ISR); + + /* Now the normal output. */ + outb(count & 0xff, nic_base + NE_EN0_RCNTLO); + outb(count >> 8, nic_base + NE_EN0_RCNTHI); + outb(0x00, nic_base + NE_EN0_RSARLO); + outb(start_page, nic_base + NE_EN0_RSARHI); + + outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD); + if (ei_status.word16) { + ptrs = (short*)buf; + for (cnt = 0; cnt < count>>1; cnt++) + outw(*ptrs++, NE_BASE+NE_DATAPORT); + } else { + ptrc = (char*)buf; + for (cnt = 0; cnt < count; cnt++) + outb(*ptrc++, NE_BASE + NE_DATAPORT); + } + + dma_start = jiffies; + + while ((inb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0) + if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ + printk("%s: timeout waiting for Tx RDC.\n", dev->name); + apne_reset_8390(dev); + NS8390_init(dev,1); + break; + } + + outb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; +} + +static irqreturn_t apne_interrupt(int irq, void *dev_id) +{ + unsigned char pcmcia_intreq; + + if (!(gayle.inten & GAYLE_IRQ_IRQ)) + return IRQ_NONE; + + pcmcia_intreq = pcmcia_get_intreq(); + + if (!(pcmcia_intreq & GAYLE_IRQ_IRQ)) { + pcmcia_ack_int(pcmcia_intreq); + return IRQ_NONE; + } + if (ei_debug > 3) + printk("pcmcia intreq = %x\n", pcmcia_intreq); + pcmcia_disable_irq(); /* to get rid of the sti() within ei_interrupt */ + ei_interrupt(irq, dev_id); + pcmcia_ack_int(pcmcia_get_intreq()); + pcmcia_enable_irq(); + return IRQ_HANDLED; +} + +#ifdef MODULE +static struct net_device *apne_dev; + +static int __init apne_module_init(void) +{ + apne_dev = apne_probe(-1); + if (IS_ERR(apne_dev)) + return PTR_ERR(apne_dev); + return 0; +} + +static void __exit apne_module_exit(void) +{ + unregister_netdev(apne_dev); + + pcmcia_disable_irq(); + + free_irq(IRQ_AMIGA_PORTS, apne_dev); + + pcmcia_reset(); + + release_region(IOBASE, 0x20); + + free_netdev(apne_dev); +} +module_init(apne_module_init); +module_exit(apne_module_exit); +#endif + +static int init_pcmcia(void) +{ + u_char config; +#ifndef MANUAL_CONFIG + u_char tuple[32]; + int offset_len; +#endif + u_long offset; + + pcmcia_reset(); + pcmcia_program_voltage(PCMCIA_0V); + pcmcia_access_speed(PCMCIA_SPEED_250NS); + pcmcia_write_enable(); + +#ifdef MANUAL_CONFIG + config = MANUAL_CONFIG; +#else + /* get and write config byte to enable IO port */ + + if (pcmcia_copy_tuple(CISTPL_CFTABLE_ENTRY, tuple, 32) < 3) + return 0; + + config = tuple[2] & 0x3f; +#endif +#ifdef MANUAL_OFFSET + offset = MANUAL_OFFSET; +#else + if (pcmcia_copy_tuple(CISTPL_CONFIG, tuple, 32) < 6) + return 0; + + offset_len = (tuple[2] & 0x3) + 1; + offset = 0; + while(offset_len--) { + offset = (offset << 8) | tuple[4+offset_len]; + } +#endif + + out_8(GAYLE_ATTRIBUTE+offset, config); + + return 1; +} + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c new file mode 100644 index 0000000..e7cb8c8 --- /dev/null +++ b/drivers/net/ethernet/8390/ax88796.c @@ -0,0 +1,1010 @@ +/* drivers/net/ax88796.c + * + * Copyright 2005,2007 Simtec Electronics + * Ben Dooks + * + * Asix AX88796 10/100 Ethernet controller support + * Based on ne.c, by Donald Becker, et-al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +/* Rename the lib8390.c functions to show that they are in this driver */ +#define __ei_open ax_ei_open +#define __ei_close ax_ei_close +#define __ei_poll ax_ei_poll +#define __ei_start_xmit ax_ei_start_xmit +#define __ei_tx_timeout ax_ei_tx_timeout +#define __ei_get_stats ax_ei_get_stats +#define __ei_set_multicast_list ax_ei_set_multicast_list +#define __ei_interrupt ax_ei_interrupt +#define ____alloc_ei_netdev ax__alloc_ei_netdev +#define __NS8390_init ax_NS8390_init + +/* force unsigned long back to 'void __iomem *' */ +#define ax_convert_addr(_a) ((void __force __iomem *)(_a)) + +#define ei_inb(_a) readb(ax_convert_addr(_a)) +#define ei_outb(_v, _a) writeb(_v, ax_convert_addr(_a)) + +#define ei_inb_p(_a) ei_inb(_a) +#define ei_outb_p(_v, _a) ei_outb(_v, _a) + +/* define EI_SHIFT() to take into account our register offsets */ +#define EI_SHIFT(x) (ei_local->reg_offset[(x)]) + +/* Ensure we have our RCR base value */ +#define AX88796_PLATFORM + +static unsigned char version[] = "ax88796.c: Copyright 2005,2007 Simtec Electronics\n"; + +#include "lib8390.c" + +#define DRV_NAME "ax88796" +#define DRV_VERSION "1.00" + +/* from ne.c */ +#define NE_CMD EI_SHIFT(0x00) +#define NE_RESET EI_SHIFT(0x1f) +#define NE_DATAPORT EI_SHIFT(0x10) + +#define NE1SM_START_PG 0x20 /* First page of TX buffer */ +#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */ +#define NESM_START_PG 0x40 /* First page of TX buffer */ +#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ + +#define AX_GPOC_PPDSET BIT(6) + +/* device private data */ + +struct ax_device { + struct mii_bus *mii_bus; + struct mdiobb_ctrl bb_ctrl; + struct phy_device *phy_dev; + void __iomem *addr_memr; + u8 reg_memr; + int link; + int speed; + int duplex; + + void __iomem *map2; + const struct ax_plat_data *plat; + + unsigned char running; + unsigned char resume_open; + unsigned int irqflags; + + u32 reg_offsets[0x20]; +}; + +static inline struct ax_device *to_ax_dev(struct net_device *dev) +{ + struct ei_device *ei_local = netdev_priv(dev); + return (struct ax_device *)(ei_local + 1); +} + +/* + * ax_initial_check + * + * do an initial probe for the card to check wether it exists + * and is functional + */ +static int ax_initial_check(struct net_device *dev) +{ + struct ei_device *ei_local = netdev_priv(dev); + void __iomem *ioaddr = ei_local->mem; + int reg0; + int regd; + + reg0 = ei_inb(ioaddr); + if (reg0 == 0xFF) + return -ENODEV; + + ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ioaddr + E8390_CMD); + regd = ei_inb(ioaddr + 0x0d); + ei_outb(0xff, ioaddr + 0x0d); + ei_outb(E8390_NODMA + E8390_PAGE0, ioaddr + E8390_CMD); + ei_inb(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */ + if (ei_inb(ioaddr + EN0_COUNTER0) != 0) { + ei_outb(reg0, ioaddr); + ei_outb(regd, ioaddr + 0x0d); /* Restore the old values. */ + return -ENODEV; + } + + return 0; +} + +/* + * Hard reset the card. This used to pause for the same period that a + * 8390 reset command required, but that shouldn't be necessary. + */ +static void ax_reset_8390(struct net_device *dev) +{ + struct ei_device *ei_local = netdev_priv(dev); + unsigned long reset_start_time = jiffies; + void __iomem *addr = (void __iomem *)dev->base_addr; + + if (ei_debug > 1) + netdev_dbg(dev, "resetting the 8390 t=%ld\n", jiffies); + + ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET); + + ei_local->txing = 0; + ei_local->dmaing = 0; + + /* This check _should_not_ be necessary, omit eventually. */ + while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) { + if (jiffies - reset_start_time > 2 * HZ / 100) { + netdev_warn(dev, "%s: did not complete.\n", __func__); + break; + } + } + + ei_outb(ENISR_RESET, addr + EN0_ISR); /* Ack intr. */ +} + + +static void ax_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page) +{ + struct ei_device *ei_local = netdev_priv(dev); + void __iomem *nic_base = ei_local->mem; + + /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + if (ei_local->dmaing) { + netdev_err(dev, "DMAing conflict in %s " + "[DMAstat:%d][irqlock:%d].\n", + __func__, + ei_local->dmaing, ei_local->irqlock); + return; + } + + ei_local->dmaing |= 0x01; + ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD); + ei_outb(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO); + ei_outb(0, nic_base + EN0_RCNTHI); + ei_outb(0, nic_base + EN0_RSARLO); /* On page boundary */ + ei_outb(ring_page, nic_base + EN0_RSARHI); + ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); + + if (ei_local->word16) + readsw(nic_base + NE_DATAPORT, hdr, + sizeof(struct e8390_pkt_hdr) >> 1); + else + readsb(nic_base + NE_DATAPORT, hdr, + sizeof(struct e8390_pkt_hdr)); + + ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ + ei_local->dmaing &= ~0x01; + + le16_to_cpus(&hdr->count); +} + + +/* + * Block input and output, similar to the Crynwr packet driver. If + * you are porting to a new ethercard, look at the packet driver + * source for hints. The NEx000 doesn't share the on-board packet + * memory -- you have to put the packet out through the "remote DMA" + * dataport using ei_outb. + */ +static void ax_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset) +{ + struct ei_device *ei_local = netdev_priv(dev); + void __iomem *nic_base = ei_local->mem; + char *buf = skb->data; + + if (ei_local->dmaing) { + netdev_err(dev, + "DMAing conflict in %s " + "[DMAstat:%d][irqlock:%d].\n", + __func__, + ei_local->dmaing, ei_local->irqlock); + return; + } + + ei_local->dmaing |= 0x01; + + ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + NE_CMD); + ei_outb(count & 0xff, nic_base + EN0_RCNTLO); + ei_outb(count >> 8, nic_base + EN0_RCNTHI); + ei_outb(ring_offset & 0xff, nic_base + EN0_RSARLO); + ei_outb(ring_offset >> 8, nic_base + EN0_RSARHI); + ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); + + if (ei_local->word16) { + readsw(nic_base + NE_DATAPORT, buf, count >> 1); + if (count & 0x01) + buf[count-1] = ei_inb(nic_base + NE_DATAPORT); + + } else { + readsb(nic_base + NE_DATAPORT, buf, count); + } + + ei_local->dmaing &= ~1; +} + +static void ax_block_output(struct net_device *dev, int count, + const unsigned char *buf, const int start_page) +{ + struct ei_device *ei_local = netdev_priv(dev); + void __iomem *nic_base = ei_local->mem; + unsigned long dma_start; + + /* + * Round the count up for word writes. Do we need to do this? + * What effect will an odd byte count have on the 8390? I + * should check someday. + */ + if (ei_local->word16 && (count & 0x01)) + count++; + + /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + if (ei_local->dmaing) { + netdev_err(dev, "DMAing conflict in %s." + "[DMAstat:%d][irqlock:%d]\n", + __func__, + ei_local->dmaing, ei_local->irqlock); + return; + } + + ei_local->dmaing |= 0x01; + /* We should already be in page 0, but to be safe... */ + ei_outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD); + + ei_outb(ENISR_RDC, nic_base + EN0_ISR); + + /* Now the normal output. */ + ei_outb(count & 0xff, nic_base + EN0_RCNTLO); + ei_outb(count >> 8, nic_base + EN0_RCNTHI); + ei_outb(0x00, nic_base + EN0_RSARLO); + ei_outb(start_page, nic_base + EN0_RSARHI); + + ei_outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD); + if (ei_local->word16) + writesw(nic_base + NE_DATAPORT, buf, count >> 1); + else + writesb(nic_base + NE_DATAPORT, buf, count); + + dma_start = jiffies; + + while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) { + if (jiffies - dma_start > 2 * HZ / 100) { /* 20ms */ + netdev_warn(dev, "timeout waiting for Tx RDC.\n"); + ax_reset_8390(dev); + ax_NS8390_init(dev, 1); + break; + } + } + + ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ + ei_local->dmaing &= ~0x01; +} + +/* definitions for accessing MII/EEPROM interface */ + +#define AX_MEMR EI_SHIFT(0x14) +#define AX_MEMR_MDC BIT(0) +#define AX_MEMR_MDIR BIT(1) +#define AX_MEMR_MDI BIT(2) +#define AX_MEMR_MDO BIT(3) +#define AX_MEMR_EECS BIT(4) +#define AX_MEMR_EEI BIT(5) +#define AX_MEMR_EEO BIT(6) +#define AX_MEMR_EECLK BIT(7) + +static void ax_handle_link_change(struct net_device *dev) +{ + struct ax_device *ax = to_ax_dev(dev); + struct phy_device *phy_dev = ax->phy_dev; + int status_change = 0; + + if (phy_dev->link && ((ax->speed != phy_dev->speed) || + (ax->duplex != phy_dev->duplex))) { + + ax->speed = phy_dev->speed; + ax->duplex = phy_dev->duplex; + status_change = 1; + } + + if (phy_dev->link != ax->link) { + if (!phy_dev->link) { + ax->speed = 0; + ax->duplex = -1; + } + ax->link = phy_dev->link; + + status_change = 1; + } + + if (status_change) + phy_print_status(phy_dev); +} + +static int ax_mii_probe(struct net_device *dev) +{ + struct ax_device *ax = to_ax_dev(dev); + struct phy_device *phy_dev = NULL; + int ret; + + /* find the first phy */ + phy_dev = phy_find_first(ax->mii_bus); + if (!phy_dev) { + netdev_err(dev, "no PHY found\n"); + return -ENODEV; + } + + ret = phy_connect_direct(dev, phy_dev, ax_handle_link_change, 0, + PHY_INTERFACE_MODE_MII); + if (ret) { + netdev_err(dev, "Could not attach to PHY\n"); + return ret; + } + + /* mask with MAC supported features */ + phy_dev->supported &= PHY_BASIC_FEATURES; + phy_dev->advertising = phy_dev->supported; + + ax->phy_dev = phy_dev; + + netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", + phy_dev->drv->name, dev_name(&phy_dev->dev), phy_dev->irq); + + return 0; +} + +static void ax_phy_switch(struct net_device *dev, int on) +{ + struct ei_device *ei_local = netdev_priv(dev); + struct ax_device *ax = to_ax_dev(dev); + + u8 reg_gpoc = ax->plat->gpoc_val; + + if (!!on) + reg_gpoc &= ~AX_GPOC_PPDSET; + else + reg_gpoc |= AX_GPOC_PPDSET; + + ei_outb(reg_gpoc, ei_local->mem + EI_SHIFT(0x17)); +} + +static int ax_open(struct net_device *dev) +{ + struct ax_device *ax = to_ax_dev(dev); + int ret; + + netdev_dbg(dev, "open\n"); + + ret = request_irq(dev->irq, ax_ei_interrupt, ax->irqflags, + dev->name, dev); + if (ret) + goto failed_request_irq; + + /* turn the phy on (if turned off) */ + ax_phy_switch(dev, 1); + + ret = ax_mii_probe(dev); + if (ret) + goto failed_mii_probe; + phy_start(ax->phy_dev); + + ret = ax_ei_open(dev); + if (ret) + goto failed_ax_ei_open; + + ax->running = 1; + + return 0; + + failed_ax_ei_open: + phy_disconnect(ax->phy_dev); + failed_mii_probe: + ax_phy_switch(dev, 0); + free_irq(dev->irq, dev); + failed_request_irq: + return ret; +} + +static int ax_close(struct net_device *dev) +{ + struct ax_device *ax = to_ax_dev(dev); + + netdev_dbg(dev, "close\n"); + + ax->running = 0; + wmb(); + + ax_ei_close(dev); + + /* turn the phy off */ + ax_phy_switch(dev, 0); + phy_disconnect(ax->phy_dev); + + free_irq(dev->irq, dev); + return 0; +} + +static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd) +{ + struct ax_device *ax = to_ax_dev(dev); + struct phy_device *phy_dev = ax->phy_dev; + + if (!netif_running(dev)) + return -EINVAL; + + if (!phy_dev) + return -ENODEV; + + return phy_mii_ioctl(phy_dev, req, cmd); +} + +/* ethtool ops */ + +static void ax_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct platform_device *pdev = to_platform_device(dev->dev.parent); + + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->bus_info, pdev->name); +} + +static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct ax_device *ax = to_ax_dev(dev); + struct phy_device *phy_dev = ax->phy_dev; + + if (!phy_dev) + return -ENODEV; + + return phy_ethtool_gset(phy_dev, cmd); +} + +static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct ax_device *ax = to_ax_dev(dev); + struct phy_device *phy_dev = ax->phy_dev; + + if (!phy_dev) + return -ENODEV; + + return phy_ethtool_sset(phy_dev, cmd); +} + +static const struct ethtool_ops ax_ethtool_ops = { + .get_drvinfo = ax_get_drvinfo, + .get_settings = ax_get_settings, + .set_settings = ax_set_settings, + .get_link = ethtool_op_get_link, +}; + +#ifdef CONFIG_AX88796_93CX6 +static void ax_eeprom_register_read(struct eeprom_93cx6 *eeprom) +{ + struct ei_device *ei_local = eeprom->data; + u8 reg = ei_inb(ei_local->mem + AX_MEMR); + + eeprom->reg_data_in = reg & AX_MEMR_EEI; + eeprom->reg_data_out = reg & AX_MEMR_EEO; /* Input pin */ + eeprom->reg_data_clock = reg & AX_MEMR_EECLK; + eeprom->reg_chip_select = reg & AX_MEMR_EECS; +} + +static void ax_eeprom_register_write(struct eeprom_93cx6 *eeprom) +{ + struct ei_device *ei_local = eeprom->data; + u8 reg = ei_inb(ei_local->mem + AX_MEMR); + + reg &= ~(AX_MEMR_EEI | AX_MEMR_EECLK | AX_MEMR_EECS); + + if (eeprom->reg_data_in) + reg |= AX_MEMR_EEI; + if (eeprom->reg_data_clock) + reg |= AX_MEMR_EECLK; + if (eeprom->reg_chip_select) + reg |= AX_MEMR_EECS; + + ei_outb(reg, ei_local->mem + AX_MEMR); + udelay(10); +} +#endif + +static const struct net_device_ops ax_netdev_ops = { + .ndo_open = ax_open, + .ndo_stop = ax_close, + .ndo_do_ioctl = ax_ioctl, + + .ndo_start_xmit = ax_ei_start_xmit, + .ndo_tx_timeout = ax_ei_tx_timeout, + .ndo_get_stats = ax_ei_get_stats, + .ndo_set_multicast_list = ax_ei_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ax_ei_poll, +#endif +}; + +static void ax_bb_mdc(struct mdiobb_ctrl *ctrl, int level) +{ + struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); + + if (level) + ax->reg_memr |= AX_MEMR_MDC; + else + ax->reg_memr &= ~AX_MEMR_MDC; + + ei_outb(ax->reg_memr, ax->addr_memr); +} + +static void ax_bb_dir(struct mdiobb_ctrl *ctrl, int output) +{ + struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); + + if (output) + ax->reg_memr &= ~AX_MEMR_MDIR; + else + ax->reg_memr |= AX_MEMR_MDIR; + + ei_outb(ax->reg_memr, ax->addr_memr); +} + +static void ax_bb_set_data(struct mdiobb_ctrl *ctrl, int value) +{ + struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); + + if (value) + ax->reg_memr |= AX_MEMR_MDO; + else + ax->reg_memr &= ~AX_MEMR_MDO; + + ei_outb(ax->reg_memr, ax->addr_memr); +} + +static int ax_bb_get_data(struct mdiobb_ctrl *ctrl) +{ + struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); + int reg_memr = ei_inb(ax->addr_memr); + + return reg_memr & AX_MEMR_MDI ? 1 : 0; +} + +static struct mdiobb_ops bb_ops = { + .owner = THIS_MODULE, + .set_mdc = ax_bb_mdc, + .set_mdio_dir = ax_bb_dir, + .set_mdio_data = ax_bb_set_data, + .get_mdio_data = ax_bb_get_data, +}; + +/* setup code */ + +static int ax_mii_init(struct net_device *dev) +{ + struct platform_device *pdev = to_platform_device(dev->dev.parent); + struct ei_device *ei_local = netdev_priv(dev); + struct ax_device *ax = to_ax_dev(dev); + int err, i; + + ax->bb_ctrl.ops = &bb_ops; + ax->addr_memr = ei_local->mem + AX_MEMR; + ax->mii_bus = alloc_mdio_bitbang(&ax->bb_ctrl); + if (!ax->mii_bus) { + err = -ENOMEM; + goto out; + } + + ax->mii_bus->name = "ax88796_mii_bus"; + ax->mii_bus->parent = dev->dev.parent; + snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id); + + ax->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + if (!ax->mii_bus->irq) { + err = -ENOMEM; + goto out_free_mdio_bitbang; + } + + for (i = 0; i < PHY_MAX_ADDR; i++) + ax->mii_bus->irq[i] = PHY_POLL; + + err = mdiobus_register(ax->mii_bus); + if (err) + goto out_free_irq; + + return 0; + + out_free_irq: + kfree(ax->mii_bus->irq); + out_free_mdio_bitbang: + free_mdio_bitbang(ax->mii_bus); + out: + return err; +} + +static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local) +{ + void __iomem *ioaddr = ei_local->mem; + struct ax_device *ax = to_ax_dev(dev); + + /* Select page 0 */ + ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_STOP, ioaddr + E8390_CMD); + + /* set to byte access */ + ei_outb(ax->plat->dcr_val & ~1, ioaddr + EN0_DCFG); + ei_outb(ax->plat->gpoc_val, ioaddr + EI_SHIFT(0x17)); +} + +/* + * ax_init_dev + * + * initialise the specified device, taking care to note the MAC + * address it may already have (if configured), ensure + * the device is ready to be used by lib8390.c and registerd with + * the network layer. + */ +static int ax_init_dev(struct net_device *dev) +{ + struct ei_device *ei_local = netdev_priv(dev); + struct ax_device *ax = to_ax_dev(dev); + void __iomem *ioaddr = ei_local->mem; + unsigned int start_page; + unsigned int stop_page; + int ret; + int i; + + ret = ax_initial_check(dev); + if (ret) + goto err_out; + + /* setup goes here */ + + ax_initial_setup(dev, ei_local); + + /* read the mac from the card prom if we need it */ + + if (ax->plat->flags & AXFLG_HAS_EEPROM) { + unsigned char SA_prom[32]; + + for (i = 0; i < sizeof(SA_prom); i += 2) { + SA_prom[i] = ei_inb(ioaddr + NE_DATAPORT); + SA_prom[i + 1] = ei_inb(ioaddr + NE_DATAPORT); + } + + if (ax->plat->wordlength == 2) + for (i = 0; i < 16; i++) + SA_prom[i] = SA_prom[i+i]; + + memcpy(dev->dev_addr, SA_prom, 6); + } + +#ifdef CONFIG_AX88796_93CX6 + if (ax->plat->flags & AXFLG_HAS_93CX6) { + unsigned char mac_addr[6]; + struct eeprom_93cx6 eeprom; + + eeprom.data = ei_local; + eeprom.register_read = ax_eeprom_register_read; + eeprom.register_write = ax_eeprom_register_write; + eeprom.width = PCI_EEPROM_WIDTH_93C56; + + eeprom_93cx6_multiread(&eeprom, 0, + (__le16 __force *)mac_addr, + sizeof(mac_addr) >> 1); + + memcpy(dev->dev_addr, mac_addr, 6); + } +#endif + if (ax->plat->wordlength == 2) { + /* We must set the 8390 for word mode. */ + ei_outb(ax->plat->dcr_val, ei_local->mem + EN0_DCFG); + start_page = NESM_START_PG; + stop_page = NESM_STOP_PG; + } else { + start_page = NE1SM_START_PG; + stop_page = NE1SM_STOP_PG; + } + + /* load the mac-address from the device */ + if (ax->plat->flags & AXFLG_MAC_FROMDEV) { + ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, + ei_local->mem + E8390_CMD); /* 0x61 */ + for (i = 0; i < ETHER_ADDR_LEN; i++) + dev->dev_addr[i] = + ei_inb(ioaddr + EN1_PHYS_SHIFT(i)); + } + + if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) && + ax->plat->mac_addr) + memcpy(dev->dev_addr, ax->plat->mac_addr, + ETHER_ADDR_LEN); + + ax_reset_8390(dev); + + ei_local->name = "AX88796"; + ei_local->tx_start_page = start_page; + ei_local->stop_page = stop_page; + ei_local->word16 = (ax->plat->wordlength == 2); + ei_local->rx_start_page = start_page + TX_PAGES; + +#ifdef PACKETBUF_MEMSIZE + /* Allow the packet buffer size to be overridden by know-it-alls. */ + ei_local->stop_page = ei_local->tx_start_page + PACKETBUF_MEMSIZE; +#endif + + ei_local->reset_8390 = &ax_reset_8390; + ei_local->block_input = &ax_block_input; + ei_local->block_output = &ax_block_output; + ei_local->get_8390_hdr = &ax_get_8390_hdr; + ei_local->priv = 0; + + dev->netdev_ops = &ax_netdev_ops; + dev->ethtool_ops = &ax_ethtool_ops; + + ret = ax_mii_init(dev); + if (ret) + goto out_irq; + + ax_NS8390_init(dev, 0); + + ret = register_netdev(dev); + if (ret) + goto out_irq; + + netdev_info(dev, "%dbit, irq %d, %lx, MAC: %pM\n", + ei_local->word16 ? 16 : 8, dev->irq, dev->base_addr, + dev->dev_addr); + + return 0; + + out_irq: + /* cleanup irq */ + free_irq(dev->irq, dev); + err_out: + return ret; +} + +static int ax_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct ei_device *ei_local = netdev_priv(dev); + struct ax_device *ax = to_ax_dev(dev); + struct resource *mem; + + unregister_netdev(dev); + free_irq(dev->irq, dev); + + iounmap(ei_local->mem); + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(mem->start, resource_size(mem)); + + if (ax->map2) { + iounmap(ax->map2); + mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); + release_mem_region(mem->start, resource_size(mem)); + } + + free_netdev(dev); + + return 0; +} + +/* + * ax_probe + * + * This is the entry point when the platform device system uses to + * notify us of a new device to attach to. Allocate memory, find the + * resources and information passed, and map the necessary registers. + */ +static int ax_probe(struct platform_device *pdev) +{ + struct net_device *dev; + struct ei_device *ei_local; + struct ax_device *ax; + struct resource *irq, *mem, *mem2; + resource_size_t mem_size, mem2_size = 0; + int ret = 0; + + dev = ax__alloc_ei_netdev(sizeof(struct ax_device)); + if (dev == NULL) + return -ENOMEM; + + /* ok, let's setup our device */ + SET_NETDEV_DEV(dev, &pdev->dev); + ei_local = netdev_priv(dev); + ax = to_ax_dev(dev); + + ax->plat = pdev->dev.platform_data; + platform_set_drvdata(pdev, dev); + + ei_local->rxcr_base = ax->plat->rcr_val; + + /* find the platform resources */ + irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!irq) { + dev_err(&pdev->dev, "no IRQ specified\n"); + ret = -ENXIO; + goto exit_mem; + } + + dev->irq = irq->start; + ax->irqflags = irq->flags & IRQF_TRIGGER_MASK; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + dev_err(&pdev->dev, "no MEM specified\n"); + ret = -ENXIO; + goto exit_mem; + } + + mem_size = resource_size(mem); + + /* + * setup the register offsets from either the platform data or + * by using the size of the resource provided + */ + if (ax->plat->reg_offsets) + ei_local->reg_offset = ax->plat->reg_offsets; + else { + ei_local->reg_offset = ax->reg_offsets; + for (ret = 0; ret < 0x18; ret++) + ax->reg_offsets[ret] = (mem_size / 0x18) * ret; + } + + if (!request_mem_region(mem->start, mem_size, pdev->name)) { + dev_err(&pdev->dev, "cannot reserve registers\n"); + ret = -ENXIO; + goto exit_mem; + } + + ei_local->mem = ioremap(mem->start, mem_size); + dev->base_addr = (unsigned long)ei_local->mem; + + if (ei_local->mem == NULL) { + dev_err(&pdev->dev, "Cannot ioremap area %pR\n", mem); + + ret = -ENXIO; + goto exit_req; + } + + /* look for reset area */ + mem2 = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!mem2) { + if (!ax->plat->reg_offsets) { + for (ret = 0; ret < 0x20; ret++) + ax->reg_offsets[ret] = (mem_size / 0x20) * ret; + } + } else { + mem2_size = resource_size(mem2); + + if (!request_mem_region(mem2->start, mem2_size, pdev->name)) { + dev_err(&pdev->dev, "cannot reserve registers\n"); + ret = -ENXIO; + goto exit_mem1; + } + + ax->map2 = ioremap(mem2->start, mem2_size); + if (!ax->map2) { + dev_err(&pdev->dev, "cannot map reset register\n"); + ret = -ENXIO; + goto exit_mem2; + } + + ei_local->reg_offset[0x1f] = ax->map2 - ei_local->mem; + } + + /* got resources, now initialise and register device */ + ret = ax_init_dev(dev); + if (!ret) + return 0; + + if (!ax->map2) + goto exit_mem1; + + iounmap(ax->map2); + + exit_mem2: + release_mem_region(mem2->start, mem2_size); + + exit_mem1: + iounmap(ei_local->mem); + + exit_req: + release_mem_region(mem->start, mem_size); + + exit_mem: + free_netdev(dev); + + return ret; +} + +/* suspend and resume */ + +#ifdef CONFIG_PM +static int ax_suspend(struct platform_device *dev, pm_message_t state) +{ + struct net_device *ndev = platform_get_drvdata(dev); + struct ax_device *ax = to_ax_dev(ndev); + + ax->resume_open = ax->running; + + netif_device_detach(ndev); + ax_close(ndev); + + return 0; +} + +static int ax_resume(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct ax_device *ax = to_ax_dev(ndev); + + ax_initial_setup(ndev, netdev_priv(ndev)); + ax_NS8390_init(ndev, ax->resume_open); + netif_device_attach(ndev); + + if (ax->resume_open) + ax_open(ndev); + + return 0; +} + +#else +#define ax_suspend NULL +#define ax_resume NULL +#endif + +static struct platform_driver axdrv = { + .driver = { + .name = "ax88796", + .owner = THIS_MODULE, + }, + .probe = ax_probe, + .remove = ax_remove, + .suspend = ax_suspend, + .resume = ax_resume, +}; + +static int __init axdrv_init(void) +{ + return platform_driver_register(&axdrv); +} + +static void __exit axdrv_exit(void) +{ + platform_driver_unregister(&axdrv); +} + +module_init(axdrv_init); +module_exit(axdrv_exit); + +MODULE_DESCRIPTION("AX88796 10/100 Ethernet platform driver"); +MODULE_AUTHOR("Ben Dooks, "); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:ax88796"); diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c new file mode 100644 index 0000000..3e4b926 --- /dev/null +++ b/drivers/net/ethernet/8390/axnet_cs.c @@ -0,0 +1,1725 @@ +/*====================================================================== + + A PCMCIA ethernet driver for Asix AX88190-based cards + + The Asix AX88190 is a NS8390-derived chipset with a few nasty + idiosyncracies that make it very inconvenient to support with a + standard 8390 driver. This driver is based on pcnet_cs, with the + tweaked 8390 code grafted on the end. Much of what I did was to + clean up and update a similar driver supplied by Asix, which was + adapted by William Lee, william@asix.com.tw. + + Copyright (C) 2001 David A. Hinds -- dahinds@users.sourceforge.net + + axnet_cs.c 1.28 2002/06/29 06:27:37 + + The network driver code is based on Donald Becker's NE2000 code: + + Written 1992,1993 by Donald Becker. + Copyright 1993 United States Government as represented by the + Director, National Security Agency. This software may be used and + distributed according to the terms of the GNU General Public License, + incorporated herein by reference. + Donald Becker may be reached at becker@scyld.com + +======================================================================*/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "8390.h" + +#include +#include +#include +#include + +#include +#include +#include +#include + +#define AXNET_CMD 0x00 +#define AXNET_DATAPORT 0x10 /* NatSemi-defined port window offset. */ +#define AXNET_RESET 0x1f /* Issue a read to reset, a write to clear. */ +#define AXNET_MII_EEP 0x14 /* Offset of MII access port */ +#define AXNET_TEST 0x15 /* Offset of TEST Register port */ +#define AXNET_GPIO 0x17 /* Offset of General Purpose Register Port */ + +#define AXNET_START_PG 0x40 /* First page of TX buffer */ +#define AXNET_STOP_PG 0x80 /* Last page +1 of RX ring */ + +#define AXNET_RDC_TIMEOUT 0x02 /* Max wait in jiffies for Tx RDC */ + +#define IS_AX88190 0x0001 +#define IS_AX88790 0x0002 + +/*====================================================================*/ + +/* Module parameters */ + +MODULE_AUTHOR("David Hinds "); +MODULE_DESCRIPTION("Asix AX88190 PCMCIA ethernet driver"); +MODULE_LICENSE("GPL"); + + +/*====================================================================*/ + +static int axnet_config(struct pcmcia_device *link); +static void axnet_release(struct pcmcia_device *link); +static int axnet_open(struct net_device *dev); +static int axnet_close(struct net_device *dev); +static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static netdev_tx_t axnet_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static struct net_device_stats *get_stats(struct net_device *dev); +static void set_multicast_list(struct net_device *dev); +static void axnet_tx_timeout(struct net_device *dev); +static irqreturn_t ei_irq_wrapper(int irq, void *dev_id); +static void ei_watchdog(u_long arg); +static void axnet_reset_8390(struct net_device *dev); + +static int mdio_read(unsigned int addr, int phy_id, int loc); +static void mdio_write(unsigned int addr, int phy_id, int loc, int value); + +static void get_8390_hdr(struct net_device *, + struct e8390_pkt_hdr *, int); +static void block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); +static void block_output(struct net_device *dev, int count, + const u_char *buf, const int start_page); + +static void axnet_detach(struct pcmcia_device *p_dev); + +static void AX88190_init(struct net_device *dev, int startp); +static int ax_open(struct net_device *dev); +static int ax_close(struct net_device *dev); +static irqreturn_t ax_interrupt(int irq, void *dev_id); + +/*====================================================================*/ + +typedef struct axnet_dev_t { + struct pcmcia_device *p_dev; + caddr_t base; + struct timer_list watchdog; + int stale, fast_poll; + u_short link_status; + u_char duplex_flag; + int phy_id; + int flags; + int active_low; +} axnet_dev_t; + +static inline axnet_dev_t *PRIV(struct net_device *dev) +{ + void *p = (char *)netdev_priv(dev) + sizeof(struct ei_device); + return p; +} + +static const struct net_device_ops axnet_netdev_ops = { + .ndo_open = axnet_open, + .ndo_stop = axnet_close, + .ndo_do_ioctl = axnet_ioctl, + .ndo_start_xmit = axnet_start_xmit, + .ndo_tx_timeout = axnet_tx_timeout, + .ndo_get_stats = get_stats, + .ndo_set_multicast_list = set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int axnet_probe(struct pcmcia_device *link) +{ + axnet_dev_t *info; + struct net_device *dev; + struct ei_device *ei_local; + + dev_dbg(&link->dev, "axnet_attach()\n"); + + dev = alloc_etherdev(sizeof(struct ei_device) + sizeof(axnet_dev_t)); + if (!dev) + return -ENOMEM; + + ei_local = netdev_priv(dev); + spin_lock_init(&ei_local->page_lock); + + info = PRIV(dev); + info->p_dev = link; + link->priv = dev; + link->config_flags |= CONF_ENABLE_IRQ; + + dev->netdev_ops = &axnet_netdev_ops; + + dev->watchdog_timeo = TX_TIMEOUT; + + return axnet_config(link); +} /* axnet_attach */ + +static void axnet_detach(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + dev_dbg(&link->dev, "axnet_detach(0x%p)\n", link); + + unregister_netdev(dev); + + axnet_release(link); + + free_netdev(dev); +} /* axnet_detach */ + +/*====================================================================== + + This probes for a card's hardware address by reading the PROM. + +======================================================================*/ + +static int get_prom(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + unsigned int ioaddr = dev->base_addr; + int i, j; + + /* This is based on drivers/net/ne.c */ + struct { + u_char value, offset; + } program_seq[] = { + {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/ + {0x01, EN0_DCFG}, /* Set word-wide access. */ + {0x00, EN0_RCNTLO}, /* Clear the count regs. */ + {0x00, EN0_RCNTHI}, + {0x00, EN0_IMR}, /* Mask completion irq. */ + {0xFF, EN0_ISR}, + {E8390_RXOFF|0x40, EN0_RXCR}, /* 0x60 Set to monitor */ + {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */ + {0x10, EN0_RCNTLO}, + {0x00, EN0_RCNTHI}, + {0x00, EN0_RSARLO}, /* DMA starting at 0x0400. */ + {0x04, EN0_RSARHI}, + {E8390_RREAD+E8390_START, E8390_CMD}, + }; + + /* Not much of a test, but the alternatives are messy */ + if (link->config_base != 0x03c0) + return 0; + + axnet_reset_8390(dev); + mdelay(10); + + for (i = 0; i < ARRAY_SIZE(program_seq); i++) + outb_p(program_seq[i].value, ioaddr + program_seq[i].offset); + + for (i = 0; i < 6; i += 2) { + j = inw(ioaddr + AXNET_DATAPORT); + dev->dev_addr[i] = j & 0xff; + dev->dev_addr[i+1] = j >> 8; + } + return 1; +} /* get_prom */ + +static int try_io_port(struct pcmcia_device *link) +{ + int j, ret; + link->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + link->resource[1]->flags &= ~IO_DATA_PATH_WIDTH; + if (link->resource[0]->end == 32) { + link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; + /* for master/slave multifunction cards */ + if (link->resource[1]->end > 0) + link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; + } else { + /* This should be two 16-port windows */ + link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; + link->resource[1]->flags |= IO_DATA_PATH_WIDTH_16; + } + if (link->resource[0]->start == 0) { + for (j = 0; j < 0x400; j += 0x20) { + link->resource[0]->start = j ^ 0x300; + link->resource[1]->start = (j ^ 0x300) + 0x10; + link->io_lines = 16; + ret = pcmcia_request_io(link); + if (ret == 0) + return ret; + } + return ret; + } else { + return pcmcia_request_io(link); + } +} + +static int axnet_configcheck(struct pcmcia_device *p_dev, void *priv_data) +{ + if (p_dev->config_index == 0) + return -EINVAL; + + p_dev->config_index = 0x05; + if (p_dev->resource[0]->end + p_dev->resource[1]->end < 32) + return -ENODEV; + + return try_io_port(p_dev); +} + +static int axnet_config(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + axnet_dev_t *info = PRIV(dev); + int i, j, j2, ret; + + dev_dbg(&link->dev, "axnet_config(0x%p)\n", link); + + /* don't trust the CIS on this; Linksys got it wrong */ + link->config_regs = 0x63; + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; + ret = pcmcia_loop_config(link, axnet_configcheck, NULL); + if (ret != 0) + goto failed; + + if (!link->irq) + goto failed; + + if (resource_size(link->resource[1]) == 8) + link->config_flags |= CONF_ENABLE_SPKR; + + ret = pcmcia_enable_device(link); + if (ret) + goto failed; + + dev->irq = link->irq; + dev->base_addr = link->resource[0]->start; + + if (!get_prom(link)) { + pr_notice("this is not an AX88190 card!\n"); + pr_notice("use pcnet_cs instead.\n"); + goto failed; + } + + ei_status.name = "AX88190"; + ei_status.word16 = 1; + ei_status.tx_start_page = AXNET_START_PG; + ei_status.rx_start_page = AXNET_START_PG + TX_PAGES; + ei_status.stop_page = AXNET_STOP_PG; + ei_status.reset_8390 = axnet_reset_8390; + ei_status.get_8390_hdr = get_8390_hdr; + ei_status.block_input = block_input; + ei_status.block_output = block_output; + + if (inb(dev->base_addr + AXNET_TEST) != 0) + info->flags |= IS_AX88790; + else + info->flags |= IS_AX88190; + + if (info->flags & IS_AX88790) + outb(0x10, dev->base_addr + AXNET_GPIO); /* select Internal PHY */ + + info->active_low = 0; + + for (i = 0; i < 32; i++) { + j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1); + j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2); + if (j == j2) continue; + if ((j != 0) && (j != 0xffff)) break; + } + + if (i == 32) { + /* Maybe PHY is in power down mode. (PPD_SET = 1) + Bit 2 of CCSR is active low. */ + pcmcia_write_config_byte(link, CISREG_CCSR, 0x04); + for (i = 0; i < 32; i++) { + j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1); + j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2); + if (j == j2) continue; + if ((j != 0) && (j != 0xffff)) { + info->active_low = 1; + break; + } + } + } + + info->phy_id = (i < 32) ? i : -1; + SET_NETDEV_DEV(dev, &link->dev); + + if (register_netdev(dev) != 0) { + pr_notice("register_netdev() failed\n"); + goto failed; + } + + netdev_info(dev, "Asix AX88%d90: io %#3lx, irq %d, hw_addr %pM\n", + ((info->flags & IS_AX88790) ? 7 : 1), + dev->base_addr, dev->irq, dev->dev_addr); + if (info->phy_id != -1) { + netdev_dbg(dev, " MII transceiver at index %d, status %x\n", + info->phy_id, j); + } else { + netdev_notice(dev, " No MII transceivers found!\n"); + } + return 0; + +failed: + axnet_release(link); + return -ENODEV; +} /* axnet_config */ + +static void axnet_release(struct pcmcia_device *link) +{ + pcmcia_disable_device(link); +} + +static int axnet_suspend(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + if (link->open) + netif_device_detach(dev); + + return 0; +} + +static int axnet_resume(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + axnet_dev_t *info = PRIV(dev); + + if (link->open) { + if (info->active_low == 1) + pcmcia_write_config_byte(link, CISREG_CCSR, 0x04); + + axnet_reset_8390(dev); + AX88190_init(dev, 1); + netif_device_attach(dev); + } + + return 0; +} + + +/*====================================================================== + + MII interface support + +======================================================================*/ + +#define MDIO_SHIFT_CLK 0x01 +#define MDIO_DATA_WRITE0 0x00 +#define MDIO_DATA_WRITE1 0x08 +#define MDIO_DATA_READ 0x04 +#define MDIO_MASK 0x0f +#define MDIO_ENB_IN 0x02 + +static void mdio_sync(unsigned int addr) +{ + int bits; + for (bits = 0; bits < 32; bits++) { + outb_p(MDIO_DATA_WRITE1, addr); + outb_p(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, addr); + } +} + +static int mdio_read(unsigned int addr, int phy_id, int loc) +{ + u_int cmd = (0xf6<<10)|(phy_id<<5)|loc; + int i, retval = 0; + + mdio_sync(addr); + for (i = 14; i >= 0; i--) { + int dat = (cmd&(1< 0; i--) { + outb_p(MDIO_ENB_IN, addr); + retval = (retval << 1) | ((inb_p(addr) & MDIO_DATA_READ) != 0); + outb_p(MDIO_ENB_IN | MDIO_SHIFT_CLK, addr); + } + return (retval>>1) & 0xffff; +} + +static void mdio_write(unsigned int addr, int phy_id, int loc, int value) +{ + u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value; + int i; + + mdio_sync(addr); + for (i = 31; i >= 0; i--) { + int dat = (cmd&(1<= 0; i--) { + outb_p(MDIO_ENB_IN, addr); + outb_p(MDIO_ENB_IN | MDIO_SHIFT_CLK, addr); + } +} + +/*====================================================================*/ + +static int axnet_open(struct net_device *dev) +{ + int ret; + axnet_dev_t *info = PRIV(dev); + struct pcmcia_device *link = info->p_dev; + unsigned int nic_base = dev->base_addr; + + dev_dbg(&link->dev, "axnet_open('%s')\n", dev->name); + + if (!pcmcia_dev_present(link)) + return -ENODEV; + + outb_p(0xFF, nic_base + EN0_ISR); /* Clear bogus intr. */ + ret = request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, "axnet_cs", dev); + if (ret) + return ret; + + link->open++; + + info->link_status = 0x00; + init_timer(&info->watchdog); + info->watchdog.function = ei_watchdog; + info->watchdog.data = (u_long)dev; + info->watchdog.expires = jiffies + HZ; + add_timer(&info->watchdog); + + return ax_open(dev); +} /* axnet_open */ + +/*====================================================================*/ + +static int axnet_close(struct net_device *dev) +{ + axnet_dev_t *info = PRIV(dev); + struct pcmcia_device *link = info->p_dev; + + dev_dbg(&link->dev, "axnet_close('%s')\n", dev->name); + + ax_close(dev); + free_irq(dev->irq, dev); + + link->open--; + netif_stop_queue(dev); + del_timer_sync(&info->watchdog); + + return 0; +} /* axnet_close */ + +/*====================================================================== + + Hard reset the card. This used to pause for the same period that + a 8390 reset command required, but that shouldn't be necessary. + +======================================================================*/ + +static void axnet_reset_8390(struct net_device *dev) +{ + unsigned int nic_base = dev->base_addr; + int i; + + ei_status.txing = ei_status.dmaing = 0; + + outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, nic_base + E8390_CMD); + + outb(inb(nic_base + AXNET_RESET), nic_base + AXNET_RESET); + + for (i = 0; i < 100; i++) { + if ((inb_p(nic_base+EN0_ISR) & ENISR_RESET) != 0) + break; + udelay(100); + } + outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */ + + if (i == 100) + netdev_err(dev, "axnet_reset_8390() did not complete\n"); + +} /* axnet_reset_8390 */ + +/*====================================================================*/ + +static irqreturn_t ei_irq_wrapper(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + PRIV(dev)->stale = 0; + return ax_interrupt(irq, dev_id); +} + +static void ei_watchdog(u_long arg) +{ + struct net_device *dev = (struct net_device *)(arg); + axnet_dev_t *info = PRIV(dev); + unsigned int nic_base = dev->base_addr; + unsigned int mii_addr = nic_base + AXNET_MII_EEP; + u_short link; + + if (!netif_device_present(dev)) goto reschedule; + + /* Check for pending interrupt with expired latency timer: with + this, we can limp along even if the interrupt is blocked */ + if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) { + if (!info->fast_poll) + netdev_info(dev, "interrupt(s) dropped!\n"); + ei_irq_wrapper(dev->irq, dev); + info->fast_poll = HZ; + } + if (info->fast_poll) { + info->fast_poll--; + info->watchdog.expires = jiffies + 1; + add_timer(&info->watchdog); + return; + } + + if (info->phy_id < 0) + goto reschedule; + link = mdio_read(mii_addr, info->phy_id, 1); + if (!link || (link == 0xffff)) { + netdev_info(dev, "MII is missing!\n"); + info->phy_id = -1; + goto reschedule; + } + + link &= 0x0004; + if (link != info->link_status) { + u_short p = mdio_read(mii_addr, info->phy_id, 5); + netdev_info(dev, "%s link beat\n", link ? "found" : "lost"); + if (link) { + info->duplex_flag = (p & 0x0140) ? 0x80 : 0x00; + if (p) + netdev_info(dev, "autonegotiation complete: %dbaseT-%cD selected\n", + (p & 0x0180) ? 100 : 10, (p & 0x0140) ? 'F' : 'H'); + else + netdev_info(dev, "link partner did not autonegotiate\n"); + AX88190_init(dev, 1); + } + info->link_status = link; + } + +reschedule: + info->watchdog.expires = jiffies + HZ; + add_timer(&info->watchdog); +} + +/*====================================================================*/ + +static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + axnet_dev_t *info = PRIV(dev); + struct mii_ioctl_data *data = if_mii(rq); + unsigned int mii_addr = dev->base_addr + AXNET_MII_EEP; + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = info->phy_id; + case SIOCGMIIREG: /* Read MII PHY register. */ + data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f); + return 0; + case SIOCSMIIREG: /* Write MII PHY register. */ + mdio_write(mii_addr, data->phy_id, data->reg_num & 0x1f, data->val_in); + return 0; + } + return -EOPNOTSUPP; +} + +/*====================================================================*/ + +static void get_8390_hdr(struct net_device *dev, + struct e8390_pkt_hdr *hdr, + int ring_page) +{ + unsigned int nic_base = dev->base_addr; + + outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */ + outb_p(ring_page, nic_base + EN0_RSARHI); + outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD); + + insw(nic_base + AXNET_DATAPORT, hdr, + sizeof(struct e8390_pkt_hdr)>>1); + /* Fix for big endian systems */ + hdr->count = le16_to_cpu(hdr->count); + +} + +/*====================================================================*/ + +static void block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset) +{ + unsigned int nic_base = dev->base_addr; + int xfer_count = count; + char *buf = skb->data; + + if ((ei_debug > 4) && (count != 4)) + pr_debug("%s: [bi=%d]\n", dev->name, count+4); + outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO); + outb_p(ring_offset >> 8, nic_base + EN0_RSARHI); + outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD); + + insw(nic_base + AXNET_DATAPORT,buf,count>>1); + if (count & 0x01) + buf[count-1] = inb(nic_base + AXNET_DATAPORT), xfer_count++; + +} + +/*====================================================================*/ + +static void block_output(struct net_device *dev, int count, + const u_char *buf, const int start_page) +{ + unsigned int nic_base = dev->base_addr; + + pr_debug("%s: [bo=%d]\n", dev->name, count); + + /* Round the count up for word writes. Do we need to do this? + What effect will an odd byte count have on the 8390? + I should check someday. */ + if (count & 0x01) + count++; + + outb_p(0x00, nic_base + EN0_RSARLO); + outb_p(start_page, nic_base + EN0_RSARHI); + outb_p(E8390_RWRITE+E8390_START, nic_base + AXNET_CMD); + outsw(nic_base + AXNET_DATAPORT, buf, count>>1); +} + +static const struct pcmcia_device_id axnet_ids[] = { + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x016c, 0x0081), + PCMCIA_DEVICE_MANF_CARD(0x018a, 0x0301), + PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x2328), + PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0301), + PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0303), + PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0309), + PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1106), + PCMCIA_DEVICE_MANF_CARD(0x8a01, 0xc1ab), + PCMCIA_DEVICE_MANF_CARD(0x021b, 0x0202), + PCMCIA_DEVICE_MANF_CARD(0xffff, 0x1090), + PCMCIA_DEVICE_PROD_ID12("AmbiCom,Inc.", "Fast Ethernet PC Card(AMB8110)", 0x49b020a7, 0x119cc9fc), + PCMCIA_DEVICE_PROD_ID124("Fast Ethernet", "16-bit PC Card", "AX88190", 0xb4be14e3, 0x9a12eb6a, 0xab9be5ef), + PCMCIA_DEVICE_PROD_ID12("ASIX", "AX88190", 0x0959823b, 0xab9be5ef), + PCMCIA_DEVICE_PROD_ID12("Billionton", "LNA-100B", 0x552ab682, 0xbc3b87e1), + PCMCIA_DEVICE_PROD_ID12("CHEETAH ETHERCARD", "EN2228", 0x00fa7bc8, 0x00e990cc), + PCMCIA_DEVICE_PROD_ID12("CNet", "CNF301", 0xbc477dde, 0x78c5f40b), + PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEther PCC-TXD", 0x5261440f, 0x436768c5), + PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEtherII PCC-TXD", 0x5261440f, 0x730df72e), + PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEther PCC-TXM", 0x5261440f, 0x3abbd061), + PCMCIA_DEVICE_PROD_ID12("Dynalink", "L100C16", 0x55632fd5, 0x66bc2a90), + PCMCIA_DEVICE_PROD_ID12("IO DATA", "ETXPCM", 0x547e66dc, 0x233adac2), + PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V3)", 0x0733cc81, 0x232019a8), + PCMCIA_DEVICE_PROD_ID12("MELCO", "LPC3-TX", 0x481e0094, 0xf91af609), + PCMCIA_DEVICE_PROD_ID12("NETGEAR", "FA411", 0x9aa79dc3, 0x40fad875), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "100BASE", 0x281f1c5d, 0x7c2add04), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FastEtherCard", 0x281f1c5d, 0x7ef26116), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FEP501", 0x281f1c5d, 0x2e272058), + PCMCIA_DEVICE_PROD_ID14("Network Everywhere", "AX88190", 0x820a67b6, 0xab9be5ef), + PCMCIA_DEVICE_NULL, +}; +MODULE_DEVICE_TABLE(pcmcia, axnet_ids); + +static struct pcmcia_driver axnet_cs_driver = { + .owner = THIS_MODULE, + .name = "axnet_cs", + .probe = axnet_probe, + .remove = axnet_detach, + .id_table = axnet_ids, + .suspend = axnet_suspend, + .resume = axnet_resume, +}; + +static int __init init_axnet_cs(void) +{ + return pcmcia_register_driver(&axnet_cs_driver); +} + +static void __exit exit_axnet_cs(void) +{ + pcmcia_unregister_driver(&axnet_cs_driver); +} + +module_init(init_axnet_cs); +module_exit(exit_axnet_cs); + +/*====================================================================*/ + +/* 8390.c: A general NS8390 ethernet driver core for linux. */ +/* + Written 1992-94 by Donald Becker. + + Copyright 1993 United States Government as represented by the + Director, National Security Agency. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + This is the chip-specific code for many 8390-based ethernet adaptors. + This is not a complete driver, it must be combined with board-specific + code such as ne.c, wd.c, 3c503.c, etc. + + Seeing how at least eight drivers use this code, (not counting the + PCMCIA ones either) it is easy to break some card by what seems like + a simple innocent change. Please contact me or Donald if you think + you have found something that needs changing. -- PG + + Changelog: + + Paul Gortmaker : remove set_bit lock, other cleanups. + Paul Gortmaker : add ei_get_8390_hdr() so we can pass skb's to + ei_block_input() for eth_io_copy_and_sum(). + Paul Gortmaker : exchange static int ei_pingpong for a #define, + also add better Tx error handling. + Paul Gortmaker : rewrite Rx overrun handling as per NS specs. + Alexey Kuznetsov : use the 8390's six bit hash multicast filter. + Paul Gortmaker : tweak ANK's above multicast changes a bit. + Paul Gortmaker : update packet statistics for v2.1.x + Alan Cox : support arbitrary stupid port mappings on the + 68K Macintosh. Support >16bit I/O spaces + Paul Gortmaker : add kmod support for auto-loading of the 8390 + module by all drivers that require it. + Alan Cox : Spinlocking work, added 'BUG_83C690' + Paul Gortmaker : Separate out Tx timeout code from Tx path. + + Sources: + The National Semiconductor LAN Databook, and the 3Com 3c503 databook. + + */ + +#include +#include +#include +#include +#include + +#define BUG_83C690 + +/* These are the operational function interfaces to board-specific + routines. + void reset_8390(struct net_device *dev) + Resets the board associated with DEV, including a hardware reset of + the 8390. This is only called when there is a transmit timeout, and + it is always followed by 8390_init(). + void block_output(struct net_device *dev, int count, const unsigned char *buf, + int start_page) + Write the COUNT bytes of BUF to the packet buffer at START_PAGE. The + "page" value uses the 8390's 256-byte pages. + void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page) + Read the 4 byte, page aligned 8390 header. *If* there is a + subsequent read, it will be of the rest of the packet. + void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) + Read COUNT bytes from the packet buffer into the skb data area. Start + reading from RING_OFFSET, the address as the 8390 sees it. This will always + follow the read of the 8390 header. +*/ +#define ei_reset_8390 (ei_local->reset_8390) +#define ei_block_output (ei_local->block_output) +#define ei_block_input (ei_local->block_input) +#define ei_get_8390_hdr (ei_local->get_8390_hdr) + +/* use 0 for production, 1 for verification, >2 for debug */ +#ifndef ei_debug +int ei_debug = 1; +#endif + +/* Index to functions. */ +static void ei_tx_intr(struct net_device *dev); +static void ei_tx_err(struct net_device *dev); +static void ei_receive(struct net_device *dev); +static void ei_rx_overrun(struct net_device *dev); + +/* Routines generic to NS8390-based boards. */ +static void NS8390_trigger_send(struct net_device *dev, unsigned int length, + int start_page); +static void do_set_multicast_list(struct net_device *dev); + +/* + * SMP and the 8390 setup. + * + * The 8390 isn't exactly designed to be multithreaded on RX/TX. There is + * a page register that controls bank and packet buffer access. We guard + * this with ei_local->page_lock. Nobody should assume or set the page other + * than zero when the lock is not held. Lock holders must restore page 0 + * before unlocking. Even pure readers must take the lock to protect in + * page 0. + * + * To make life difficult the chip can also be very slow. We therefore can't + * just use spinlocks. For the longer lockups we disable the irq the device + * sits on and hold the lock. We must hold the lock because there is a dual + * processor case other than interrupts (get stats/set multicast list in + * parallel with each other and transmit). + * + * Note: in theory we can just disable the irq on the card _but_ there is + * a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs" + * enter lock, take the queued irq. So we waddle instead of flying. + * + * Finally by special arrangement for the purpose of being generally + * annoying the transmit function is called bh atomic. That places + * restrictions on the user context callers as disable_irq won't save + * them. + */ + +/** + * ax_open - Open/initialize the board. + * @dev: network device to initialize + * + * This routine goes all-out, setting everything + * up anew at each open, even though many of these registers should only + * need to be set once at boot. + */ +static int ax_open(struct net_device *dev) +{ + unsigned long flags; + struct ei_device *ei_local = netdev_priv(dev); + + /* + * Grab the page lock so we own the register set, then call + * the init function. + */ + + spin_lock_irqsave(&ei_local->page_lock, flags); + AX88190_init(dev, 1); + /* Set the flag before we drop the lock, That way the IRQ arrives + after its set and we get no silly warnings */ + netif_start_queue(dev); + spin_unlock_irqrestore(&ei_local->page_lock, flags); + ei_local->irqlock = 0; + return 0; +} + +#define dev_lock(dev) (((struct ei_device *)netdev_priv(dev))->page_lock) + +/** + * ax_close - shut down network device + * @dev: network device to close + * + * Opposite of ax_open(). Only used when "ifconfig down" is done. + */ +static int ax_close(struct net_device *dev) +{ + unsigned long flags; + + /* + * Hold the page lock during close + */ + + spin_lock_irqsave(&dev_lock(dev), flags); + AX88190_init(dev, 0); + spin_unlock_irqrestore(&dev_lock(dev), flags); + netif_stop_queue(dev); + return 0; +} + +/** + * axnet_tx_timeout - handle transmit time out condition + * @dev: network device which has apparently fallen asleep + * + * Called by kernel when device never acknowledges a transmit has + * completed (or failed) - i.e. never posted a Tx related interrupt. + */ + +static void axnet_tx_timeout(struct net_device *dev) +{ + long e8390_base = dev->base_addr; + struct ei_device *ei_local = netdev_priv(dev); + int txsr, isr, tickssofar = jiffies - dev_trans_start(dev); + unsigned long flags; + + dev->stats.tx_errors++; + + spin_lock_irqsave(&ei_local->page_lock, flags); + txsr = inb(e8390_base+EN0_TSR); + isr = inb(e8390_base+EN0_ISR); + spin_unlock_irqrestore(&ei_local->page_lock, flags); + + netdev_printk(KERN_DEBUG, dev, + "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n", + (txsr & ENTSR_ABT) ? "excess collisions." : + (isr) ? "lost interrupt?" : "cable problem?", + txsr, isr, tickssofar); + + if (!isr && !dev->stats.tx_packets) + { + /* The 8390 probably hasn't gotten on the cable yet. */ + ei_local->interface_num ^= 1; /* Try a different xcvr. */ + } + + /* Ugly but a reset can be slow, yet must be protected */ + + spin_lock_irqsave(&ei_local->page_lock, flags); + + /* Try to restart the card. Perhaps the user has fixed something. */ + ei_reset_8390(dev); + AX88190_init(dev, 1); + + spin_unlock_irqrestore(&ei_local->page_lock, flags); + netif_wake_queue(dev); +} + +/** + * axnet_start_xmit - begin packet transmission + * @skb: packet to be sent + * @dev: network device to which packet is sent + * + * Sends a packet to an 8390 network device. + */ + +static netdev_tx_t axnet_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + long e8390_base = dev->base_addr; + struct ei_device *ei_local = netdev_priv(dev); + int length, send_length, output_page; + unsigned long flags; + u8 packet[ETH_ZLEN]; + + netif_stop_queue(dev); + + length = skb->len; + + /* Mask interrupts from the ethercard. + SMP: We have to grab the lock here otherwise the IRQ handler + on another CPU can flip window and race the IRQ mask set. We end + up trashing the mcast filter not disabling irqs if we don't lock */ + + spin_lock_irqsave(&ei_local->page_lock, flags); + outb_p(0x00, e8390_base + EN0_IMR); + + /* + * Slow phase with lock held. + */ + + ei_local->irqlock = 1; + + send_length = max(length, ETH_ZLEN); + + /* + * We have two Tx slots available for use. Find the first free + * slot, and then perform some sanity checks. With two Tx bufs, + * you get very close to transmitting back-to-back packets. With + * only one Tx buf, the transmitter sits idle while you reload the + * card, leaving a substantial gap between each transmitted packet. + */ + + if (ei_local->tx1 == 0) + { + output_page = ei_local->tx_start_page; + ei_local->tx1 = send_length; + if (ei_debug && ei_local->tx2 > 0) + netdev_printk(KERN_DEBUG, dev, + "idle transmitter tx2=%d, lasttx=%d, txing=%d\n", + ei_local->tx2, ei_local->lasttx, + ei_local->txing); + } + else if (ei_local->tx2 == 0) + { + output_page = ei_local->tx_start_page + TX_PAGES/2; + ei_local->tx2 = send_length; + if (ei_debug && ei_local->tx1 > 0) + netdev_printk(KERN_DEBUG, dev, + "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n", + ei_local->tx1, ei_local->lasttx, + ei_local->txing); + } + else + { /* We should never get here. */ + if (ei_debug) + netdev_printk(KERN_DEBUG, dev, + "No Tx buffers free! tx1=%d tx2=%d last=%d\n", + ei_local->tx1, ei_local->tx2, + ei_local->lasttx); + ei_local->irqlock = 0; + netif_stop_queue(dev); + outb_p(ENISR_ALL, e8390_base + EN0_IMR); + spin_unlock_irqrestore(&ei_local->page_lock, flags); + dev->stats.tx_errors++; + return NETDEV_TX_BUSY; + } + + /* + * Okay, now upload the packet and trigger a send if the transmitter + * isn't already sending. If it is busy, the interrupt handler will + * trigger the send later, upon receiving a Tx done interrupt. + */ + + if (length == skb->len) + ei_block_output(dev, length, skb->data, output_page); + else { + memset(packet, 0, ETH_ZLEN); + skb_copy_from_linear_data(skb, packet, skb->len); + ei_block_output(dev, length, packet, output_page); + } + + if (! ei_local->txing) + { + ei_local->txing = 1; + NS8390_trigger_send(dev, send_length, output_page); + dev->trans_start = jiffies; + if (output_page == ei_local->tx_start_page) + { + ei_local->tx1 = -1; + ei_local->lasttx = -1; + } + else + { + ei_local->tx2 = -1; + ei_local->lasttx = -2; + } + } + else ei_local->txqueue++; + + if (ei_local->tx1 && ei_local->tx2) + netif_stop_queue(dev); + else + netif_start_queue(dev); + + /* Turn 8390 interrupts back on. */ + ei_local->irqlock = 0; + outb_p(ENISR_ALL, e8390_base + EN0_IMR); + + spin_unlock_irqrestore(&ei_local->page_lock, flags); + + dev_kfree_skb (skb); + dev->stats.tx_bytes += send_length; + + return NETDEV_TX_OK; +} + +/** + * ax_interrupt - handle the interrupts from an 8390 + * @irq: interrupt number + * @dev_id: a pointer to the net_device + * + * Handle the ether interface interrupts. We pull packets from + * the 8390 via the card specific functions and fire them at the networking + * stack. We also handle transmit completions and wake the transmit path if + * necessary. We also update the counters and do other housekeeping as + * needed. + */ + +static irqreturn_t ax_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + long e8390_base; + int interrupts, nr_serviced = 0, i; + struct ei_device *ei_local; + int handled = 0; + unsigned long flags; + + e8390_base = dev->base_addr; + ei_local = netdev_priv(dev); + + /* + * Protect the irq test too. + */ + + spin_lock_irqsave(&ei_local->page_lock, flags); + + if (ei_local->irqlock) { +#if 1 /* This might just be an interrupt for a PCI device sharing this line */ + const char *msg; + /* The "irqlock" check is only for testing. */ + if (ei_local->irqlock) + msg = "Interrupted while interrupts are masked!"; + else + msg = "Reentering the interrupt handler!"; + netdev_info(dev, "%s, isr=%#2x imr=%#2x\n", + msg, + inb_p(e8390_base + EN0_ISR), + inb_p(e8390_base + EN0_IMR)); +#endif + spin_unlock_irqrestore(&ei_local->page_lock, flags); + return IRQ_NONE; + } + + if (ei_debug > 3) + netdev_printk(KERN_DEBUG, dev, "interrupt(isr=%#2.2x)\n", + inb_p(e8390_base + EN0_ISR)); + + outb_p(0x00, e8390_base + EN0_ISR); + ei_local->irqlock = 1; + + /* !!Assumption!! -- we stay in page 0. Don't break this. */ + while ((interrupts = inb_p(e8390_base + EN0_ISR)) != 0 && + ++nr_serviced < MAX_SERVICE) + { + if (!netif_running(dev) || (interrupts == 0xff)) { + if (ei_debug > 1) + netdev_warn(dev, + "interrupt from stopped card\n"); + outb_p(interrupts, e8390_base + EN0_ISR); + interrupts = 0; + break; + } + handled = 1; + + /* AX88190 bug fix. */ + outb_p(interrupts, e8390_base + EN0_ISR); + for (i = 0; i < 10; i++) { + if (!(inb(e8390_base + EN0_ISR) & interrupts)) + break; + outb_p(0, e8390_base + EN0_ISR); + outb_p(interrupts, e8390_base + EN0_ISR); + } + if (interrupts & ENISR_OVER) + ei_rx_overrun(dev); + else if (interrupts & (ENISR_RX+ENISR_RX_ERR)) + { + /* Got a good (?) packet. */ + ei_receive(dev); + } + /* Push the next to-transmit packet through. */ + if (interrupts & ENISR_TX) + ei_tx_intr(dev); + else if (interrupts & ENISR_TX_ERR) + ei_tx_err(dev); + + if (interrupts & ENISR_COUNTERS) + { + dev->stats.rx_frame_errors += inb_p(e8390_base + EN0_COUNTER0); + dev->stats.rx_crc_errors += inb_p(e8390_base + EN0_COUNTER1); + dev->stats.rx_missed_errors+= inb_p(e8390_base + EN0_COUNTER2); + } + } + + if (interrupts && ei_debug > 3) + { + handled = 1; + if (nr_serviced >= MAX_SERVICE) + { + /* 0xFF is valid for a card removal */ + if(interrupts!=0xFF) + netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n", + interrupts); + outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */ + } else { + netdev_warn(dev, "unknown interrupt %#2x\n", + interrupts); + outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */ + } + } + + /* Turn 8390 interrupts back on. */ + ei_local->irqlock = 0; + outb_p(ENISR_ALL, e8390_base + EN0_IMR); + + spin_unlock_irqrestore(&ei_local->page_lock, flags); + return IRQ_RETVAL(handled); +} + +/** + * ei_tx_err - handle transmitter error + * @dev: network device which threw the exception + * + * A transmitter error has happened. Most likely excess collisions (which + * is a fairly normal condition). If the error is one where the Tx will + * have been aborted, we try and send another one right away, instead of + * letting the failed packet sit and collect dust in the Tx buffer. This + * is a much better solution as it avoids kernel based Tx timeouts, and + * an unnecessary card reset. + * + * Called with lock held. + */ + +static void ei_tx_err(struct net_device *dev) +{ + long e8390_base = dev->base_addr; + unsigned char txsr = inb_p(e8390_base+EN0_TSR); + unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU); + +#ifdef VERBOSE_ERROR_DUMP + netdev_printk(KERN_DEBUG, dev, + "transmitter error (%#2x):", txsr); + if (txsr & ENTSR_ABT) + pr_cont(" excess-collisions"); + if (txsr & ENTSR_ND) + pr_cont(" non-deferral"); + if (txsr & ENTSR_CRS) + pr_cont(" lost-carrier"); + if (txsr & ENTSR_FU) + pr_cont(" FIFO-underrun"); + if (txsr & ENTSR_CDH) + pr_cont(" lost-heartbeat"); + pr_cont("\n"); +#endif + + if (tx_was_aborted) + ei_tx_intr(dev); + else + { + dev->stats.tx_errors++; + if (txsr & ENTSR_CRS) dev->stats.tx_carrier_errors++; + if (txsr & ENTSR_CDH) dev->stats.tx_heartbeat_errors++; + if (txsr & ENTSR_OWC) dev->stats.tx_window_errors++; + } +} + +/** + * ei_tx_intr - transmit interrupt handler + * @dev: network device for which tx intr is handled + * + * We have finished a transmit: check for errors and then trigger the next + * packet to be sent. Called with lock held. + */ + +static void ei_tx_intr(struct net_device *dev) +{ + long e8390_base = dev->base_addr; + struct ei_device *ei_local = netdev_priv(dev); + int status = inb(e8390_base + EN0_TSR); + + /* + * There are two Tx buffers, see which one finished, and trigger + * the send of another one if it exists. + */ + ei_local->txqueue--; + + if (ei_local->tx1 < 0) + { + if (ei_local->lasttx != 1 && ei_local->lasttx != -1) + netdev_err(dev, "%s: bogus last_tx_buffer %d, tx1=%d\n", + ei_local->name, ei_local->lasttx, + ei_local->tx1); + ei_local->tx1 = 0; + if (ei_local->tx2 > 0) + { + ei_local->txing = 1; + NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6); + dev->trans_start = jiffies; + ei_local->tx2 = -1, + ei_local->lasttx = 2; + } + else ei_local->lasttx = 20, ei_local->txing = 0; + } + else if (ei_local->tx2 < 0) + { + if (ei_local->lasttx != 2 && ei_local->lasttx != -2) + netdev_info(dev, "%s: bogus last_tx_buffer %d, tx2=%d\n", + ei_local->name, ei_local->lasttx, + ei_local->tx2); + ei_local->tx2 = 0; + if (ei_local->tx1 > 0) + { + ei_local->txing = 1; + NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page); + dev->trans_start = jiffies; + ei_local->tx1 = -1; + ei_local->lasttx = 1; + } + else + ei_local->lasttx = 10, ei_local->txing = 0; + } +// else +// netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n", +// ei_local->lasttx); + + /* Minimize Tx latency: update the statistics after we restart TXing. */ + if (status & ENTSR_COL) + dev->stats.collisions++; + if (status & ENTSR_PTX) + dev->stats.tx_packets++; + else + { + dev->stats.tx_errors++; + if (status & ENTSR_ABT) + { + dev->stats.tx_aborted_errors++; + dev->stats.collisions += 16; + } + if (status & ENTSR_CRS) + dev->stats.tx_carrier_errors++; + if (status & ENTSR_FU) + dev->stats.tx_fifo_errors++; + if (status & ENTSR_CDH) + dev->stats.tx_heartbeat_errors++; + if (status & ENTSR_OWC) + dev->stats.tx_window_errors++; + } + netif_wake_queue(dev); +} + +/** + * ei_receive - receive some packets + * @dev: network device with which receive will be run + * + * We have a good packet(s), get it/them out of the buffers. + * Called with lock held. + */ + +static void ei_receive(struct net_device *dev) +{ + long e8390_base = dev->base_addr; + struct ei_device *ei_local = netdev_priv(dev); + unsigned char rxing_page, this_frame, next_frame; + unsigned short current_offset; + int rx_pkt_count = 0; + struct e8390_pkt_hdr rx_frame; + + while (++rx_pkt_count < 10) + { + int pkt_len, pkt_stat; + + /* Get the rx page (incoming packet pointer). */ + rxing_page = inb_p(e8390_base + EN1_CURPAG -1); + + /* Remove one frame from the ring. Boundary is always a page behind. */ + this_frame = inb_p(e8390_base + EN0_BOUNDARY) + 1; + if (this_frame >= ei_local->stop_page) + this_frame = ei_local->rx_start_page; + + /* Someday we'll omit the previous, iff we never get this message. + (There is at least one clone claimed to have a problem.) + + Keep quiet if it looks like a card removal. One problem here + is that some clones crash in roughly the same way. + */ + if (ei_debug > 0 && this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF)) + netdev_err(dev, "mismatched read page pointers %2x vs %2x\n", + this_frame, ei_local->current_page); + + if (this_frame == rxing_page) /* Read all the frames? */ + break; /* Done for now */ + + current_offset = this_frame << 8; + ei_get_8390_hdr(dev, &rx_frame, this_frame); + + pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr); + pkt_stat = rx_frame.status; + + next_frame = this_frame + 1 + ((pkt_len+4)>>8); + + if (pkt_len < 60 || pkt_len > 1518) + { + if (ei_debug) + netdev_printk(KERN_DEBUG, dev, + "bogus packet size: %d, status=%#2x nxpg=%#2x\n", + rx_frame.count, rx_frame.status, + rx_frame.next); + dev->stats.rx_errors++; + dev->stats.rx_length_errors++; + } + else if ((pkt_stat & 0x0F) == ENRSR_RXOK) + { + struct sk_buff *skb; + + skb = dev_alloc_skb(pkt_len+2); + if (skb == NULL) + { + if (ei_debug > 1) + netdev_printk(KERN_DEBUG, dev, + "Couldn't allocate a sk_buff of size %d\n", + pkt_len); + dev->stats.rx_dropped++; + break; + } + else + { + skb_reserve(skb,2); /* IP headers on 16 byte boundaries */ + skb_put(skb, pkt_len); /* Make room */ + ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame)); + skb->protocol=eth_type_trans(skb,dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + if (pkt_stat & ENRSR_PHY) + dev->stats.multicast++; + } + } + else + { + if (ei_debug) + netdev_printk(KERN_DEBUG, dev, + "bogus packet: status=%#2x nxpg=%#2x size=%d\n", + rx_frame.status, rx_frame.next, + rx_frame.count); + dev->stats.rx_errors++; + /* NB: The NIC counts CRC, frame and missed errors. */ + if (pkt_stat & ENRSR_FO) + dev->stats.rx_fifo_errors++; + } + next_frame = rx_frame.next; + + /* This _should_ never happen: it's here for avoiding bad clones. */ + if (next_frame >= ei_local->stop_page) { + netdev_info(dev, "next frame inconsistency, %#2x\n", + next_frame); + next_frame = ei_local->rx_start_page; + } + ei_local->current_page = next_frame; + outb_p(next_frame-1, e8390_base+EN0_BOUNDARY); + } +} + +/** + * ei_rx_overrun - handle receiver overrun + * @dev: network device which threw exception + * + * We have a receiver overrun: we have to kick the 8390 to get it started + * again. Problem is that you have to kick it exactly as NS prescribes in + * the updated datasheets, or "the NIC may act in an unpredictable manner." + * This includes causing "the NIC to defer indefinitely when it is stopped + * on a busy network." Ugh. + * Called with lock held. Don't call this with the interrupts off or your + * computer will hate you - it takes 10ms or so. + */ + +static void ei_rx_overrun(struct net_device *dev) +{ + axnet_dev_t *info = PRIV(dev); + long e8390_base = dev->base_addr; + unsigned char was_txing, must_resend = 0; + + /* + * Record whether a Tx was in progress and then issue the + * stop command. + */ + was_txing = inb_p(e8390_base+E8390_CMD) & E8390_TRANS; + outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); + + if (ei_debug > 1) + netdev_printk(KERN_DEBUG, dev, "Receiver overrun\n"); + dev->stats.rx_over_errors++; + + /* + * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total. + * We wait at least 2ms. + */ + + mdelay(2); + + /* + * Reset RBCR[01] back to zero as per magic incantation. + */ + outb_p(0x00, e8390_base+EN0_RCNTLO); + outb_p(0x00, e8390_base+EN0_RCNTHI); + + /* + * See if any Tx was interrupted or not. According to NS, this + * step is vital, and skipping it will cause no end of havoc. + */ + + if (was_txing) + { + unsigned char tx_completed = inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR); + if (!tx_completed) + must_resend = 1; + } + + /* + * Have to enter loopback mode and then restart the NIC before + * you are allowed to slurp packets up off the ring. + */ + outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); + outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD); + + /* + * Clear the Rx ring of all the debris, and ack the interrupt. + */ + ei_receive(dev); + + /* + * Leave loopback mode, and resend any packet that got stopped. + */ + outb_p(E8390_TXCONFIG | info->duplex_flag, e8390_base + EN0_TXCR); + if (must_resend) + outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD); +} + +/* + * Collect the stats. This is called unlocked and from several contexts. + */ + +static struct net_device_stats *get_stats(struct net_device *dev) +{ + long ioaddr = dev->base_addr; + struct ei_device *ei_local = netdev_priv(dev); + unsigned long flags; + + /* If the card is stopped, just return the present stats. */ + if (!netif_running(dev)) + return &dev->stats; + + spin_lock_irqsave(&ei_local->page_lock,flags); + /* Read the counter registers, assuming we are in page 0. */ + dev->stats.rx_frame_errors += inb_p(ioaddr + EN0_COUNTER0); + dev->stats.rx_crc_errors += inb_p(ioaddr + EN0_COUNTER1); + dev->stats.rx_missed_errors+= inb_p(ioaddr + EN0_COUNTER2); + spin_unlock_irqrestore(&ei_local->page_lock, flags); + + return &dev->stats; +} + +/* + * Form the 64 bit 8390 multicast table from the linked list of addresses + * associated with this dev structure. + */ + +static inline void make_mc_bits(u8 *bits, struct net_device *dev) +{ + struct netdev_hw_addr *ha; + u32 crc; + + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc(ETH_ALEN, ha->addr); + /* + * The 8390 uses the 6 most significant bits of the + * CRC to index the multicast table. + */ + bits[crc>>29] |= (1<<((crc>>26)&7)); + } +} + +/** + * do_set_multicast_list - set/clear multicast filter + * @dev: net device for which multicast filter is adjusted + * + * Set or clear the multicast filter for this adaptor. + * Must be called with lock held. + */ + +static void do_set_multicast_list(struct net_device *dev) +{ + long e8390_base = dev->base_addr; + int i; + struct ei_device *ei_local = netdev_priv(dev); + + if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) { + memset(ei_local->mcfilter, 0, 8); + if (!netdev_mc_empty(dev)) + make_mc_bits(ei_local->mcfilter, dev); + } else { + /* set to accept-all */ + memset(ei_local->mcfilter, 0xFF, 8); + } + + outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD); + for(i = 0; i < 8; i++) + { + outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i)); + } + outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD); + + if(dev->flags&IFF_PROMISC) + outb_p(E8390_RXCONFIG | 0x58, e8390_base + EN0_RXCR); + else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) + outb_p(E8390_RXCONFIG | 0x48, e8390_base + EN0_RXCR); + else + outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR); + + outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD); +} + +/* + * Called without lock held. This is invoked from user context and may + * be parallel to just about everything else. Its also fairly quick and + * not called too often. Must protect against both bh and irq users + */ + +static void set_multicast_list(struct net_device *dev) +{ + unsigned long flags; + + spin_lock_irqsave(&dev_lock(dev), flags); + do_set_multicast_list(dev); + spin_unlock_irqrestore(&dev_lock(dev), flags); +} + +/* This page of functions should be 8390 generic */ +/* Follow National Semi's recommendations for initializing the "NIC". */ + +/** + * AX88190_init - initialize 8390 hardware + * @dev: network device to initialize + * @startp: boolean. non-zero value to initiate chip processing + * + * Must be called with lock held. + */ + +static void AX88190_init(struct net_device *dev, int startp) +{ + axnet_dev_t *info = PRIV(dev); + long e8390_base = dev->base_addr; + struct ei_device *ei_local = netdev_priv(dev); + int i; + int endcfg = ei_local->word16 ? (0x48 | ENDCFG_WTS) : 0x48; + + if(sizeof(struct e8390_pkt_hdr)!=4) + panic("8390.c: header struct mispacked\n"); + /* Follow National Semi's recommendations for initing the DP83902. */ + outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */ + outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */ + /* Clear the remote byte count registers. */ + outb_p(0x00, e8390_base + EN0_RCNTLO); + outb_p(0x00, e8390_base + EN0_RCNTHI); + /* Set to monitor and loopback mode -- this is vital!. */ + outb_p(E8390_RXOFF|0x40, e8390_base + EN0_RXCR); /* 0x60 */ + outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */ + /* Set the transmit page and receive ring. */ + outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR); + ei_local->tx1 = ei_local->tx2 = 0; + outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG); + outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY); /* 3c503 says 0x3f,NS0x26*/ + ei_local->current_page = ei_local->rx_start_page; /* assert boundary+1 */ + outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG); + /* Clear the pending interrupts and mask. */ + outb_p(0xFF, e8390_base + EN0_ISR); + outb_p(0x00, e8390_base + EN0_IMR); + + /* Copy the station address into the DS8390 registers. */ + + outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */ + for(i = 0; i < 6; i++) + { + outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i)); + if(inb_p(e8390_base + EN1_PHYS_SHIFT(i))!=dev->dev_addr[i]) + netdev_err(dev, "Hw. address read/write mismap %d\n", i); + } + + outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG); + outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); + + netif_start_queue(dev); + ei_local->tx1 = ei_local->tx2 = 0; + ei_local->txing = 0; + + if (info->flags & IS_AX88790) /* select Internal PHY */ + outb(0x10, e8390_base + AXNET_GPIO); + + if (startp) + { + outb_p(0xff, e8390_base + EN0_ISR); + outb_p(ENISR_ALL, e8390_base + EN0_IMR); + outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD); + outb_p(E8390_TXCONFIG | info->duplex_flag, + e8390_base + EN0_TXCR); /* xmit on. */ + /* 3c503 TechMan says rxconfig only after the NIC is started. */ + outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR); /* rx on, */ + do_set_multicast_list(dev); /* (re)load the mcast table */ + } +} + +/* Trigger a transmit start, assuming the length is valid. + Always called with the page lock held */ + +static void NS8390_trigger_send(struct net_device *dev, unsigned int length, + int start_page) +{ + long e8390_base = dev->base_addr; + struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev); + + if (inb_p(e8390_base) & E8390_TRANS) + { + netdev_warn(dev, "trigger_send() called with the transmitter busy\n"); + return; + } + outb_p(length & 0xff, e8390_base + EN0_TCNTLO); + outb_p(length >> 8, e8390_base + EN0_TCNTHI); + outb_p(start_page, e8390_base + EN0_TPSR); + outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD); +} diff --git a/drivers/net/ethernet/8390/e2100.c b/drivers/net/ethernet/8390/e2100.c new file mode 100644 index 0000000..d50a999 --- /dev/null +++ b/drivers/net/ethernet/8390/e2100.c @@ -0,0 +1,490 @@ +/* e2100.c: A Cabletron E2100 series ethernet driver for linux. */ +/* + Written 1993-1994 by Donald Becker. + + Copyright 1994 by Donald Becker. + Copyright 1993 United States Government as represented by the + Director, National Security Agency. This software may be used and + distributed according to the terms of the GNU General Public License, + incorporated herein by reference. + + This is a driver for the Cabletron E2100 series ethercards. + + The Author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + The E2100 series ethercard is a fairly generic shared memory 8390 + implementation. The only unusual aspect is the way the shared memory + registers are set: first you do an inb() in what is normally the + station address region, and the low three bits of next outb() *address* + is used as the write value for that register. Either someone wasn't + too used to dem bit en bites, or they were trying to obfuscate the + programming interface. + + There is an additional complication when setting the window on the packet + buffer. You must first do a read into the packet buffer region with the + low 8 address bits the address setting the page for the start of the packet + buffer window, and then do the above operation. See mem_on() for details. + + One bug on the chip is that even a hard reset won't disable the memory + window, usually resulting in a hung machine if mem_off() isn't called. + If this happens, you must power down the machine for about 30 seconds. +*/ + +static const char version[] = + "e2100.c:v1.01 7/21/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "8390.h" + +#define DRV_NAME "e2100" + +static int e21_probe_list[] = {0x300, 0x280, 0x380, 0x220, 0}; + +/* Offsets from the base_addr. + Read from the ASIC register, and the low three bits of the next outb() + address is used to set the corresponding register. */ +#define E21_NIC_OFFSET 0 /* Offset to the 8390 NIC. */ +#define E21_ASIC 0x10 +#define E21_MEM_ENABLE 0x10 +#define E21_MEM_ON 0x05 /* Enable memory in 16 bit mode. */ +#define E21_MEM_ON_8 0x07 /* Enable memory in 8 bit mode. */ +#define E21_MEM_BASE 0x11 +#define E21_IRQ_LOW 0x12 /* The low three bits of the IRQ number. */ +#define E21_IRQ_HIGH 0x14 /* The high IRQ bit and media select ... */ +#define E21_MEDIA 0x14 /* (alias). */ +#define E21_ALT_IFPORT 0x02 /* Set to use the other (BNC,AUI) port. */ +#define E21_BIG_MEM 0x04 /* Use a bigger (64K) buffer (we don't) */ +#define E21_SAPROM 0x10 /* Offset to station address data. */ +#define E21_IO_EXTENT 0x20 + +static inline void mem_on(short port, volatile char __iomem *mem_base, + unsigned char start_page ) +{ + /* This is a little weird: set the shared memory window by doing a + read. The low address bits specify the starting page. */ + readb(mem_base+start_page); + inb(port + E21_MEM_ENABLE); + outb(E21_MEM_ON, port + E21_MEM_ENABLE + E21_MEM_ON); +} + +static inline void mem_off(short port) +{ + inb(port + E21_MEM_ENABLE); + outb(0x00, port + E21_MEM_ENABLE); +} + +/* In other drivers I put the TX pages first, but the E2100 window circuitry + is designed to have a 4K Tx region last. The windowing circuitry wraps the + window at 0x2fff->0x0000 so that the packets at e.g. 0x2f00 in the RX ring + appear contiguously in the window. */ +#define E21_RX_START_PG 0x00 /* First page of RX buffer */ +#define E21_RX_STOP_PG 0x30 /* Last page +1 of RX ring */ +#define E21_BIG_RX_STOP_PG 0xF0 /* Last page +1 of RX ring */ +#define E21_TX_START_PG E21_RX_STOP_PG /* First page of TX buffer */ + +static int e21_probe1(struct net_device *dev, int ioaddr); + +static int e21_open(struct net_device *dev); +static void e21_reset_8390(struct net_device *dev); +static void e21_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); +static void e21_block_output(struct net_device *dev, int count, + const unsigned char *buf, int start_page); +static void e21_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page); +static int e21_open(struct net_device *dev); +static int e21_close(struct net_device *dev); + + +/* Probe for the E2100 series ethercards. These cards have an 8390 at the + base address and the station address at both offset 0x10 and 0x18. I read + the station address from offset 0x18 to avoid the dataport of NE2000 + ethercards, and look for Ctron's unique ID (first three octets of the + station address). + */ + +static int __init do_e2100_probe(struct net_device *dev) +{ + int *port; + int base_addr = dev->base_addr; + int irq = dev->irq; + + if (base_addr > 0x1ff) /* Check a single specified location. */ + return e21_probe1(dev, base_addr); + else if (base_addr != 0) /* Don't probe at all. */ + return -ENXIO; + + for (port = e21_probe_list; *port; port++) { + dev->irq = irq; + if (e21_probe1(dev, *port) == 0) + return 0; + } + + return -ENODEV; +} + +#ifndef MODULE +struct net_device * __init e2100_probe(int unit) +{ + struct net_device *dev = alloc_ei_netdev(); + int err; + + if (!dev) + return ERR_PTR(-ENOMEM); + + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + + err = do_e2100_probe(dev); + if (err) + goto out; + return dev; +out: + free_netdev(dev); + return ERR_PTR(err); +} +#endif + +static const struct net_device_ops e21_netdev_ops = { + .ndo_open = e21_open, + .ndo_stop = e21_close, + + .ndo_start_xmit = ei_start_xmit, + .ndo_tx_timeout = ei_tx_timeout, + .ndo_get_stats = ei_get_stats, + .ndo_set_multicast_list = ei_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ei_poll, +#endif +}; + +static int __init e21_probe1(struct net_device *dev, int ioaddr) +{ + int i, status, retval; + unsigned char *station_addr = dev->dev_addr; + static unsigned version_printed; + + if (!request_region(ioaddr, E21_IO_EXTENT, DRV_NAME)) + return -EBUSY; + + /* First check the station address for the Ctron prefix. */ + if (inb(ioaddr + E21_SAPROM + 0) != 0x00 || + inb(ioaddr + E21_SAPROM + 1) != 0x00 || + inb(ioaddr + E21_SAPROM + 2) != 0x1d) { + retval = -ENODEV; + goto out; + } + + /* Verify by making certain that there is a 8390 at there. */ + outb(E8390_NODMA + E8390_STOP, ioaddr); + udelay(1); /* we want to delay one I/O cycle - which is 2MHz */ + status = inb(ioaddr); + if (status != 0x21 && status != 0x23) { + retval = -ENODEV; + goto out; + } + + /* Read the station address PROM. */ + for (i = 0; i < 6; i++) + station_addr[i] = inb(ioaddr + E21_SAPROM + i); + + inb(ioaddr + E21_MEDIA); /* Point to media selection. */ + outb(0, ioaddr + E21_ASIC); /* and disable the secondary interface. */ + + if (ei_debug && version_printed++ == 0) + printk(version); + + for (i = 0; i < 6; i++) + printk(" %02X", station_addr[i]); + + if (dev->irq < 2) { + static const int irqlist[] = {15, 11, 10, 12, 5, 9, 3, 4}; + for (i = 0; i < ARRAY_SIZE(irqlist); i++) + if (request_irq (irqlist[i], NULL, 0, "bogus", NULL) != -EBUSY) { + dev->irq = irqlist[i]; + break; + } + if (i >= ARRAY_SIZE(irqlist)) { + printk(" unable to get IRQ %d.\n", dev->irq); + retval = -EAGAIN; + goto out; + } + } else if (dev->irq == 2) /* Fixup luser bogosity: IRQ2 is really IRQ9 */ + dev->irq = 9; + + /* The 8390 is at the base address. */ + dev->base_addr = ioaddr; + + ei_status.name = "E2100"; + ei_status.word16 = 1; + ei_status.tx_start_page = E21_TX_START_PG; + ei_status.rx_start_page = E21_RX_START_PG; + ei_status.stop_page = E21_RX_STOP_PG; + ei_status.saved_irq = dev->irq; + + /* Check the media port used. The port can be passed in on the + low mem_end bits. */ + if (dev->mem_end & 15) + dev->if_port = dev->mem_end & 7; + else { + dev->if_port = 0; + inb(ioaddr + E21_MEDIA); /* Turn automatic media detection on. */ + for(i = 0; i < 6; i++) + if (station_addr[i] != inb(ioaddr + E21_SAPROM + 8 + i)) { + dev->if_port = 1; + break; + } + } + + /* Never map in the E21 shared memory unless you are actively using it. + Also, the shared memory has effective only one setting -- spread all + over the 128K region! */ + if (dev->mem_start == 0) + dev->mem_start = 0xd0000; + + ei_status.mem = ioremap(dev->mem_start, 2*1024); + if (!ei_status.mem) { + printk("unable to remap memory\n"); + retval = -EAGAIN; + goto out; + } + +#ifdef notdef + /* These values are unused. The E2100 has a 2K window into the packet + buffer. The window can be set to start on any page boundary. */ + ei_status.rmem_start = dev->mem_start + TX_PAGES*256; + dev->mem_end = ei_status.rmem_end = dev->mem_start + 2*1024; +#endif + + printk(", IRQ %d, %s media, memory @ %#lx.\n", dev->irq, + dev->if_port ? "secondary" : "primary", dev->mem_start); + + ei_status.reset_8390 = &e21_reset_8390; + ei_status.block_input = &e21_block_input; + ei_status.block_output = &e21_block_output; + ei_status.get_8390_hdr = &e21_get_8390_hdr; + + dev->netdev_ops = &e21_netdev_ops; + NS8390_init(dev, 0); + + retval = register_netdev(dev); + if (retval) + goto out; + return 0; +out: + release_region(ioaddr, E21_IO_EXTENT); + return retval; +} + +static int +e21_open(struct net_device *dev) +{ + short ioaddr = dev->base_addr; + int retval; + + if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev))) + return retval; + + /* Set the interrupt line and memory base on the hardware. */ + inb(ioaddr + E21_IRQ_LOW); + outb(0, ioaddr + E21_ASIC + (dev->irq & 7)); + inb(ioaddr + E21_IRQ_HIGH); /* High IRQ bit, and if_port. */ + outb(0, ioaddr + E21_ASIC + (dev->irq > 7 ? 1:0) + + (dev->if_port ? E21_ALT_IFPORT : 0)); + inb(ioaddr + E21_MEM_BASE); + outb(0, ioaddr + E21_ASIC + ((dev->mem_start >> 17) & 7)); + + ei_open(dev); + return 0; +} + +static void +e21_reset_8390(struct net_device *dev) +{ + short ioaddr = dev->base_addr; + + outb(0x01, ioaddr); + if (ei_debug > 1) printk("resetting the E2180x3 t=%ld...", jiffies); + ei_status.txing = 0; + + /* Set up the ASIC registers, just in case something changed them. */ + + if (ei_debug > 1) printk("reset done\n"); +} + +/* Grab the 8390 specific header. We put the 2k window so the header page + appears at the start of the shared memory. */ + +static void +e21_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) +{ + + short ioaddr = dev->base_addr; + char __iomem *shared_mem = ei_status.mem; + + mem_on(ioaddr, shared_mem, ring_page); + +#ifdef notdef + /* Officially this is what we are doing, but the readl() is faster */ + memcpy_fromio(hdr, shared_mem, sizeof(struct e8390_pkt_hdr)); +#else + ((unsigned int*)hdr)[0] = readl(shared_mem); +#endif + + /* Turn off memory access: we would need to reprogram the window anyway. */ + mem_off(ioaddr); + +} + +/* Block input and output are easy on shared memory ethercards. + The E21xx makes block_input() especially easy by wrapping the top + ring buffer to the bottom automatically. */ +static void +e21_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) +{ + short ioaddr = dev->base_addr; + char __iomem *shared_mem = ei_status.mem; + + mem_on(ioaddr, shared_mem, (ring_offset>>8)); + + memcpy_fromio(skb->data, ei_status.mem + (ring_offset & 0xff), count); + + mem_off(ioaddr); +} + +static void +e21_block_output(struct net_device *dev, int count, const unsigned char *buf, + int start_page) +{ + short ioaddr = dev->base_addr; + volatile char __iomem *shared_mem = ei_status.mem; + + /* Set the shared memory window start by doing a read, with the low address + bits specifying the starting page. */ + readb(shared_mem + start_page); + mem_on(ioaddr, shared_mem, start_page); + + memcpy_toio(shared_mem, buf, count); + mem_off(ioaddr); +} + +static int +e21_close(struct net_device *dev) +{ + short ioaddr = dev->base_addr; + + if (ei_debug > 1) + printk("%s: Shutting down ethercard.\n", dev->name); + + free_irq(dev->irq, dev); + dev->irq = ei_status.saved_irq; + + /* Shut off the interrupt line and secondary interface. */ + inb(ioaddr + E21_IRQ_LOW); + outb(0, ioaddr + E21_ASIC); + inb(ioaddr + E21_IRQ_HIGH); /* High IRQ bit, and if_port. */ + outb(0, ioaddr + E21_ASIC); + + ei_close(dev); + + /* Double-check that the memory has been turned off, because really + really bad things happen if it isn't. */ + mem_off(ioaddr); + + return 0; +} + + +#ifdef MODULE +#define MAX_E21_CARDS 4 /* Max number of E21 cards per module */ +static struct net_device *dev_e21[MAX_E21_CARDS]; +static int io[MAX_E21_CARDS]; +static int irq[MAX_E21_CARDS]; +static int mem[MAX_E21_CARDS]; +static int xcvr[MAX_E21_CARDS]; /* choose int. or ext. xcvr */ + +module_param_array(io, int, NULL, 0); +module_param_array(irq, int, NULL, 0); +module_param_array(mem, int, NULL, 0); +module_param_array(xcvr, int, NULL, 0); +MODULE_PARM_DESC(io, "I/O base address(es)"); +MODULE_PARM_DESC(irq, "IRQ number(s)"); +MODULE_PARM_DESC(mem, " memory base address(es)"); +MODULE_PARM_DESC(xcvr, "transceiver(s) (0=internal, 1=external)"); +MODULE_DESCRIPTION("Cabletron E2100 ISA ethernet driver"); +MODULE_LICENSE("GPL"); + +/* This is set up so that only a single autoprobe takes place per call. +ISA device autoprobes on a running machine are not recommended. */ + +int __init init_module(void) +{ + struct net_device *dev; + int this_dev, found = 0; + + for (this_dev = 0; this_dev < MAX_E21_CARDS; this_dev++) { + if (io[this_dev] == 0) { + if (this_dev != 0) break; /* only autoprobe 1st one */ + printk(KERN_NOTICE "e2100.c: Presently autoprobing (not recommended) for a single card.\n"); + } + dev = alloc_ei_netdev(); + if (!dev) + break; + dev->irq = irq[this_dev]; + dev->base_addr = io[this_dev]; + dev->mem_start = mem[this_dev]; + dev->mem_end = xcvr[this_dev]; /* low 4bits = xcvr sel. */ + if (do_e2100_probe(dev) == 0) { + dev_e21[found++] = dev; + continue; + } + free_netdev(dev); + printk(KERN_WARNING "e2100.c: No E2100 card found (i/o = 0x%x).\n", io[this_dev]); + break; + } + if (found) + return 0; + return -ENXIO; +} + +static void cleanup_card(struct net_device *dev) +{ + /* NB: e21_close() handles free_irq */ + iounmap(ei_status.mem); + release_region(dev->base_addr, E21_IO_EXTENT); +} + +void __exit +cleanup_module(void) +{ + int this_dev; + + for (this_dev = 0; this_dev < MAX_E21_CARDS; this_dev++) { + struct net_device *dev = dev_e21[this_dev]; + if (dev) { + unregister_netdev(dev); + cleanup_card(dev); + free_netdev(dev); + } + } +} +#endif /* MODULE */ diff --git a/drivers/net/ethernet/8390/es3210.c b/drivers/net/ethernet/8390/es3210.c new file mode 100644 index 0000000..7a09575 --- /dev/null +++ b/drivers/net/ethernet/8390/es3210.c @@ -0,0 +1,446 @@ +/* + es3210.c + + Linux driver for Racal-Interlan ES3210 EISA Network Adapter + + Copyright (C) 1996, Paul Gortmaker. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + Information and Code Sources: + + 1) The existing myriad of Linux 8390 drivers written by Donald Becker. + + 2) Once again Russ Nelson's asm packet driver provided additional info. + + 3) Info for getting IRQ and sh-mem gleaned from the EISA cfg files. + Too bad it doesn't work -- see below. + + The ES3210 is an EISA shared memory NS8390 implementation. Note + that all memory copies to/from the board must be 32bit transfers. + Which rules out using eth_io_copy_and_sum() in this driver. + + Apparently there are two slightly different revisions of the + card, since there are two distinct EISA cfg files (!rii0101.cfg + and !rii0102.cfg) One has media select in the cfg file and the + other doesn't. Hopefully this will work with either. + + That is about all I can tell you about it, having never actually + even seen one of these cards. :) Try http://www.interlan.com + if you want more info. + + Thanks go to Mark Salazar for testing v0.02 of this driver. + + Bugs, to-fix, etc: + + 1) The EISA cfg ports that are *supposed* to have the IRQ and shared + mem values just read 0xff all the time. Hrrmpf. Apparently the + same happens with the packet driver as the code for reading + these registers is disabled there. In the meantime, boot with: + ether=,0,0x,eth0 to override the IRQ and + shared memory detection. (The i/o port detection is okay.) + + 2) Module support currently untested. Probably works though. + +*/ + +static const char version[] = + "es3210.c: Driver revision v0.03, 14/09/96\n"; + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "8390.h" + +static int es_probe1(struct net_device *dev, int ioaddr); + +static void es_reset_8390(struct net_device *dev); + +static void es_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page); +static void es_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset); +static void es_block_output(struct net_device *dev, int count, const unsigned char *buf, int start_page); + +#define ES_START_PG 0x00 /* First page of TX buffer */ +#define ES_STOP_PG 0x40 /* Last page +1 of RX ring */ + +#define ES_IO_EXTENT 0x37 /* The cfg file says 0xc90 -> 0xcc7 */ +#define ES_ID_PORT 0xc80 /* Same for all EISA cards */ +#define ES_SA_PROM 0xc90 /* Start of e'net addr. */ +#define ES_RESET_PORT 0xc84 /* From the packet driver source */ +#define ES_NIC_OFFSET 0xca0 /* Hello, the 8390 is *here* */ + +#define ES_ADDR0 0x02 /* 3 byte vendor prefix */ +#define ES_ADDR1 0x07 +#define ES_ADDR2 0x01 + +/* + * Two card revisions. EISA ID's are always rev. minor, rev. major,, and + * then the three vendor letters stored in 5 bits each, with an "a" = 1. + * For eg: "rii" = 10010 01001 01001 = 0x4929, which is how the EISA + * config utility determines automagically what config file(s) to use. + */ +#define ES_EISA_ID1 0x01012949 /* !rii0101.cfg */ +#define ES_EISA_ID2 0x02012949 /* !rii0102.cfg */ + +#define ES_CFG1 0xcc0 /* IOPORT(1) --> IOPORT(6) in cfg file */ +#define ES_CFG2 0xcc1 +#define ES_CFG3 0xcc2 +#define ES_CFG4 0xcc3 +#define ES_CFG5 0xcc4 +#define ES_CFG6 0xc84 /* NB: 0xc84 is also "reset" port. */ + +/* + * You can OR any of the following bits together and assign it + * to ES_DEBUG to get verbose driver info during operation. + * Some of these don't do anything yet. + */ + +#define ES_D_PROBE 0x01 +#define ES_D_RX_PKT 0x02 +#define ES_D_TX_PKT 0x04 +#define ED_D_IRQ 0x08 + +#define ES_DEBUG 0 + +static unsigned char lo_irq_map[] __initdata = {3, 4, 5, 6, 7, 9, 10}; +static unsigned char hi_irq_map[] __initdata = {11, 12, 0, 14, 0, 0, 0, 15}; + +/* + * Probe for the card. The best way is to read the EISA ID if it + * is known. Then we check the prefix of the station address + * PROM for a match against the Racal-Interlan assigned value. + */ + +static int __init do_es_probe(struct net_device *dev) +{ + unsigned short ioaddr = dev->base_addr; + int irq = dev->irq; + int mem_start = dev->mem_start; + + if (ioaddr > 0x1ff) /* Check a single specified location. */ + return es_probe1(dev, ioaddr); + else if (ioaddr > 0) /* Don't probe at all. */ + return -ENXIO; + + if (!EISA_bus) { +#if ES_DEBUG & ES_D_PROBE + printk("es3210.c: Not EISA bus. Not probing high ports.\n"); +#endif + return -ENXIO; + } + + /* EISA spec allows for up to 16 slots, but 8 is typical. */ + for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) { + if (es_probe1(dev, ioaddr) == 0) + return 0; + dev->irq = irq; + dev->mem_start = mem_start; + } + + return -ENODEV; +} + +#ifndef MODULE +struct net_device * __init es_probe(int unit) +{ + struct net_device *dev = alloc_ei_netdev(); + int err; + + if (!dev) + return ERR_PTR(-ENOMEM); + + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + + err = do_es_probe(dev); + if (err) + goto out; + return dev; +out: + free_netdev(dev); + return ERR_PTR(err); +} +#endif + +static int __init es_probe1(struct net_device *dev, int ioaddr) +{ + int i, retval; + unsigned long eisa_id; + + if (!request_region(ioaddr + ES_SA_PROM, ES_IO_EXTENT, "es3210")) + return -ENODEV; + +#if ES_DEBUG & ES_D_PROBE + printk("es3210.c: probe at %#x, ID %#8x\n", ioaddr, inl(ioaddr + ES_ID_PORT)); + printk("es3210.c: config regs: %#x %#x %#x %#x %#x %#x\n", + inb(ioaddr + ES_CFG1), inb(ioaddr + ES_CFG2), inb(ioaddr + ES_CFG3), + inb(ioaddr + ES_CFG4), inb(ioaddr + ES_CFG5), inb(ioaddr + ES_CFG6)); +#endif + +/* Check the EISA ID of the card. */ + eisa_id = inl(ioaddr + ES_ID_PORT); + if ((eisa_id != ES_EISA_ID1) && (eisa_id != ES_EISA_ID2)) { + retval = -ENODEV; + goto out; + } + + for (i = 0; i < ETHER_ADDR_LEN ; i++) + dev->dev_addr[i] = inb(ioaddr + ES_SA_PROM + i); + +/* Check the Racal vendor ID as well. */ + if (dev->dev_addr[0] != ES_ADDR0 || + dev->dev_addr[1] != ES_ADDR1 || + dev->dev_addr[2] != ES_ADDR2) { + printk("es3210.c: card not found %pM (invalid_prefix).\n", + dev->dev_addr); + retval = -ENODEV; + goto out; + } + + printk("es3210.c: ES3210 rev. %ld at %#x, node %pM", + eisa_id>>24, ioaddr, dev->dev_addr); + + /* Snarf the interrupt now. */ + if (dev->irq == 0) { + unsigned char hi_irq = inb(ioaddr + ES_CFG2) & 0x07; + unsigned char lo_irq = inb(ioaddr + ES_CFG1) & 0xfe; + + if (hi_irq != 0) { + dev->irq = hi_irq_map[hi_irq - 1]; + } else { + int i = 0; + while (lo_irq > (1<irq = lo_irq_map[i]; + } + printk(" using IRQ %d", dev->irq); +#if ES_DEBUG & ES_D_PROBE + printk("es3210.c: hi_irq %#x, lo_irq %#x, dev->irq = %d\n", + hi_irq, lo_irq, dev->irq); +#endif + } else { + if (dev->irq == 2) + dev->irq = 9; /* Doh! */ + printk(" assigning IRQ %d", dev->irq); + } + + if (request_irq(dev->irq, ei_interrupt, 0, "es3210", dev)) { + printk (" unable to get IRQ %d.\n", dev->irq); + retval = -EAGAIN; + goto out; + } + + if (dev->mem_start == 0) { + unsigned char mem_enabled = inb(ioaddr + ES_CFG2) & 0xc0; + unsigned char mem_bits = inb(ioaddr + ES_CFG3) & 0x07; + + if (mem_enabled != 0x80) { + printk(" shared mem disabled - giving up\n"); + retval = -ENXIO; + goto out1; + } + dev->mem_start = 0xC0000 + mem_bits*0x4000; + printk(" using "); + } else { + printk(" assigning "); + } + + ei_status.mem = ioremap(dev->mem_start, (ES_STOP_PG - ES_START_PG)*256); + if (!ei_status.mem) { + printk("ioremap failed - giving up\n"); + retval = -ENXIO; + goto out1; + } + + dev->mem_end = dev->mem_start + (ES_STOP_PG - ES_START_PG)*256; + + printk("mem %#lx-%#lx\n", dev->mem_start, dev->mem_end-1); + +#if ES_DEBUG & ES_D_PROBE + if (inb(ioaddr + ES_CFG5)) + printk("es3210: Warning - DMA channel enabled, but not used here.\n"); +#endif + /* Note, point at the 8390, and not the card... */ + dev->base_addr = ioaddr + ES_NIC_OFFSET; + + ei_status.name = "ES3210"; + ei_status.tx_start_page = ES_START_PG; + ei_status.rx_start_page = ES_START_PG + TX_PAGES; + ei_status.stop_page = ES_STOP_PG; + ei_status.word16 = 1; + + if (ei_debug > 0) + printk(version); + + ei_status.reset_8390 = &es_reset_8390; + ei_status.block_input = &es_block_input; + ei_status.block_output = &es_block_output; + ei_status.get_8390_hdr = &es_get_8390_hdr; + + dev->netdev_ops = &ei_netdev_ops; + NS8390_init(dev, 0); + + retval = register_netdev(dev); + if (retval) + goto out1; + return 0; +out1: + free_irq(dev->irq, dev); +out: + release_region(ioaddr + ES_SA_PROM, ES_IO_EXTENT); + return retval; +} + +/* + * Reset as per the packet driver method. Judging by the EISA cfg + * file, this just toggles the "Board Enable" bits (bit 2 and 0). + */ + +static void es_reset_8390(struct net_device *dev) +{ + unsigned short ioaddr = dev->base_addr; + unsigned long end; + + outb(0x04, ioaddr + ES_RESET_PORT); + if (ei_debug > 1) printk("%s: resetting the ES3210...", dev->name); + + end = jiffies + 2*HZ/100; + while ((signed)(end - jiffies) > 0) continue; + + ei_status.txing = 0; + outb(0x01, ioaddr + ES_RESET_PORT); + if (ei_debug > 1) printk("reset done\n"); +} + +/* + * Note: In the following three functions is the implicit assumption + * that the associated memcpy will only use "rep; movsl" as long as + * we keep the counts as some multiple of doublewords. This is a + * requirement of the hardware, and also prevents us from using + * eth_io_copy_and_sum() since we can't guarantee it will limit + * itself to doubleword access. + */ + +/* + * Grab the 8390 specific header. Similar to the block_input routine, but + * we don't need to be concerned with ring wrap as the header will be at + * the start of a page, so we optimize accordingly. (A single doubleword.) + */ + +static void +es_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) +{ + void __iomem *hdr_start = ei_status.mem + ((ring_page - ES_START_PG)<<8); + memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); + hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */ +} + +/* + * Block input and output are easy on shared memory ethercards, the only + * complication is when the ring buffer wraps. The count will already + * be rounded up to a doubleword value via es_get_8390_hdr() above. + */ + +static void es_block_input(struct net_device *dev, int count, struct sk_buff *skb, + int ring_offset) +{ + void __iomem *xfer_start = ei_status.mem + ring_offset - ES_START_PG*256; + + if (ring_offset + count > ES_STOP_PG*256) { + /* Packet wraps over end of ring buffer. */ + int semi_count = ES_STOP_PG*256 - ring_offset; + memcpy_fromio(skb->data, xfer_start, semi_count); + count -= semi_count; + memcpy_fromio(skb->data + semi_count, ei_status.mem, count); + } else { + /* Packet is in one chunk. */ + memcpy_fromio(skb->data, xfer_start, count); + } +} + +static void es_block_output(struct net_device *dev, int count, + const unsigned char *buf, int start_page) +{ + void __iomem *shmem = ei_status.mem + ((start_page - ES_START_PG)<<8); + + count = (count + 3) & ~3; /* Round up to doubleword */ + memcpy_toio(shmem, buf, count); +} + +#ifdef MODULE +#define MAX_ES_CARDS 4 /* Max number of ES3210 cards per module */ +#define NAMELEN 8 /* # of chars for storing dev->name */ +static struct net_device *dev_es3210[MAX_ES_CARDS]; +static int io[MAX_ES_CARDS]; +static int irq[MAX_ES_CARDS]; +static int mem[MAX_ES_CARDS]; + +module_param_array(io, int, NULL, 0); +module_param_array(irq, int, NULL, 0); +module_param_array(mem, int, NULL, 0); +MODULE_PARM_DESC(io, "I/O base address(es)"); +MODULE_PARM_DESC(irq, "IRQ number(s)"); +MODULE_PARM_DESC(mem, "memory base address(es)"); +MODULE_DESCRIPTION("Racal-Interlan ES3210 EISA ethernet driver"); +MODULE_LICENSE("GPL"); + +int __init init_module(void) +{ + struct net_device *dev; + int this_dev, found = 0; + + for (this_dev = 0; this_dev < MAX_ES_CARDS; this_dev++) { + if (io[this_dev] == 0 && this_dev != 0) + break; + dev = alloc_ei_netdev(); + if (!dev) + break; + dev->irq = irq[this_dev]; + dev->base_addr = io[this_dev]; + dev->mem_start = mem[this_dev]; + if (do_es_probe(dev) == 0) { + dev_es3210[found++] = dev; + continue; + } + free_netdev(dev); + printk(KERN_WARNING "es3210.c: No es3210 card found (i/o = 0x%x).\n", io[this_dev]); + break; + } + if (found) + return 0; + return -ENXIO; +} + +static void cleanup_card(struct net_device *dev) +{ + free_irq(dev->irq, dev); + release_region(dev->base_addr, ES_IO_EXTENT); + iounmap(ei_status.mem); +} + +void __exit +cleanup_module(void) +{ + int this_dev; + + for (this_dev = 0; this_dev < MAX_ES_CARDS; this_dev++) { + struct net_device *dev = dev_es3210[this_dev]; + if (dev) { + unregister_netdev(dev); + cleanup_card(dev); + free_netdev(dev); + } + } +} +#endif /* MODULE */ + diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c new file mode 100644 index 0000000..cf851fa --- /dev/null +++ b/drivers/net/ethernet/8390/etherh.c @@ -0,0 +1,866 @@ +/* + * linux/drivers/acorn/net/etherh.c + * + * Copyright (C) 2000-2002 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * NS8390 I-cubed EtherH and ANT EtherM specific driver + * Thanks to I-Cubed for information on their cards. + * EtherM conversion (C) 1999 Chris Kemp and Tim Watterton + * EtherM integration (C) 2000 Aleph One Ltd (Tak-Shing Chan) + * EtherM integration re-engineered by Russell King. + * + * Changelog: + * 08-12-1996 RMK 1.00 Created + * RMK 1.03 Added support for EtherLan500 cards + * 23-11-1997 RMK 1.04 Added media autodetection + * 16-04-1998 RMK 1.05 Improved media autodetection + * 10-02-2000 RMK 1.06 Updated for 2.3.43 + * 13-05-2000 RMK 1.07 Updated for 2.3.99-pre8 + * 12-10-1999 CK/TEW EtherM driver first release + * 21-12-2000 TTC EtherH/EtherM integration + * 25-12-2000 RMK 1.08 Clean integration of EtherM into this driver. + * 03-01-2002 RMK 1.09 Always enable IRQs if we're in the nic slot. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define EI_SHIFT(x) (ei_local->reg_offset[x]) + +#define ei_inb(_p) readb((void __iomem *)_p) +#define ei_outb(_v,_p) writeb(_v,(void __iomem *)_p) +#define ei_inb_p(_p) readb((void __iomem *)_p) +#define ei_outb_p(_v,_p) writeb(_v,(void __iomem *)_p) + +#define NET_DEBUG 0 +#define DEBUG_INIT 2 + +#define DRV_NAME "etherh" +#define DRV_VERSION "1.11" + +static char version[] __initdata = + "EtherH/EtherM Driver (c) 2002-2004 Russell King " DRV_VERSION "\n"; + +#include "lib8390.c" + +static unsigned int net_debug = NET_DEBUG; + +struct etherh_priv { + void __iomem *ioc_fast; + void __iomem *memc; + void __iomem *dma_base; + unsigned int id; + void __iomem *ctrl_port; + unsigned char ctrl; + u32 supported; +}; + +struct etherh_data { + unsigned long ns8390_offset; + unsigned long dataport_offset; + unsigned long ctrlport_offset; + int ctrl_ioc; + const char name[16]; + u32 supported; + unsigned char tx_start_page; + unsigned char stop_page; +}; + +MODULE_AUTHOR("Russell King"); +MODULE_DESCRIPTION("EtherH/EtherM driver"); +MODULE_LICENSE("GPL"); + +#define ETHERH500_DATAPORT 0x800 /* MEMC */ +#define ETHERH500_NS8390 0x000 /* MEMC */ +#define ETHERH500_CTRLPORT 0x800 /* IOC */ + +#define ETHERH600_DATAPORT 0x040 /* MEMC */ +#define ETHERH600_NS8390 0x800 /* MEMC */ +#define ETHERH600_CTRLPORT 0x200 /* MEMC */ + +#define ETHERH_CP_IE 1 +#define ETHERH_CP_IF 2 +#define ETHERH_CP_HEARTBEAT 2 + +#define ETHERH_TX_START_PAGE 1 +#define ETHERH_STOP_PAGE 127 + +/* + * These came from CK/TEW + */ +#define ETHERM_DATAPORT 0x200 /* MEMC */ +#define ETHERM_NS8390 0x800 /* MEMC */ +#define ETHERM_CTRLPORT 0x23c /* MEMC */ + +#define ETHERM_TX_START_PAGE 64 +#define ETHERM_STOP_PAGE 127 + +/* ------------------------------------------------------------------------ */ + +#define etherh_priv(dev) \ + ((struct etherh_priv *)(((char *)netdev_priv(dev)) + sizeof(struct ei_device))) + +static inline void etherh_set_ctrl(struct etherh_priv *eh, unsigned char mask) +{ + unsigned char ctrl = eh->ctrl | mask; + eh->ctrl = ctrl; + writeb(ctrl, eh->ctrl_port); +} + +static inline void etherh_clr_ctrl(struct etherh_priv *eh, unsigned char mask) +{ + unsigned char ctrl = eh->ctrl & ~mask; + eh->ctrl = ctrl; + writeb(ctrl, eh->ctrl_port); +} + +static inline unsigned int etherh_get_stat(struct etherh_priv *eh) +{ + return readb(eh->ctrl_port); +} + + + + +static void etherh_irq_enable(ecard_t *ec, int irqnr) +{ + struct etherh_priv *eh = ec->irq_data; + + etherh_set_ctrl(eh, ETHERH_CP_IE); +} + +static void etherh_irq_disable(ecard_t *ec, int irqnr) +{ + struct etherh_priv *eh = ec->irq_data; + + etherh_clr_ctrl(eh, ETHERH_CP_IE); +} + +static expansioncard_ops_t etherh_ops = { + .irqenable = etherh_irq_enable, + .irqdisable = etherh_irq_disable, +}; + + + + +static void +etherh_setif(struct net_device *dev) +{ + struct ei_device *ei_local = netdev_priv(dev); + unsigned long flags; + void __iomem *addr; + + local_irq_save(flags); + + /* set the interface type */ + switch (etherh_priv(dev)->id) { + case PROD_I3_ETHERLAN600: + case PROD_I3_ETHERLAN600A: + addr = (void __iomem *)dev->base_addr + EN0_RCNTHI; + + switch (dev->if_port) { + case IF_PORT_10BASE2: + writeb((readb(addr) & 0xf8) | 1, addr); + break; + case IF_PORT_10BASET: + writeb((readb(addr) & 0xf8), addr); + break; + } + break; + + case PROD_I3_ETHERLAN500: + switch (dev->if_port) { + case IF_PORT_10BASE2: + etherh_clr_ctrl(etherh_priv(dev), ETHERH_CP_IF); + break; + + case IF_PORT_10BASET: + etherh_set_ctrl(etherh_priv(dev), ETHERH_CP_IF); + break; + } + break; + + default: + break; + } + + local_irq_restore(flags); +} + +static int +etherh_getifstat(struct net_device *dev) +{ + struct ei_device *ei_local = netdev_priv(dev); + void __iomem *addr; + int stat = 0; + + switch (etherh_priv(dev)->id) { + case PROD_I3_ETHERLAN600: + case PROD_I3_ETHERLAN600A: + addr = (void __iomem *)dev->base_addr + EN0_RCNTHI; + switch (dev->if_port) { + case IF_PORT_10BASE2: + stat = 1; + break; + case IF_PORT_10BASET: + stat = readb(addr) & 4; + break; + } + break; + + case PROD_I3_ETHERLAN500: + switch (dev->if_port) { + case IF_PORT_10BASE2: + stat = 1; + break; + case IF_PORT_10BASET: + stat = etherh_get_stat(etherh_priv(dev)) & ETHERH_CP_HEARTBEAT; + break; + } + break; + + default: + stat = 0; + break; + } + + return stat != 0; +} + +/* + * Configure the interface. Note that we ignore the other + * parts of ifmap, since its mostly meaningless for this driver. + */ +static int etherh_set_config(struct net_device *dev, struct ifmap *map) +{ + switch (map->port) { + case IF_PORT_10BASE2: + case IF_PORT_10BASET: + /* + * If the user explicitly sets the interface + * media type, turn off automedia detection. + */ + dev->flags &= ~IFF_AUTOMEDIA; + dev->if_port = map->port; + break; + + default: + return -EINVAL; + } + + etherh_setif(dev); + + return 0; +} + +/* + * Reset the 8390 (hard reset). Note that we can't actually do this. + */ +static void +etherh_reset(struct net_device *dev) +{ + struct ei_device *ei_local = netdev_priv(dev); + void __iomem *addr = (void __iomem *)dev->base_addr; + + writeb(E8390_NODMA+E8390_PAGE0+E8390_STOP, addr); + + /* + * See if we need to change the interface type. + * Note that we use 'interface_num' as a flag + * to indicate that we need to change the media. + */ + if (dev->flags & IFF_AUTOMEDIA && ei_local->interface_num) { + ei_local->interface_num = 0; + + if (dev->if_port == IF_PORT_10BASET) + dev->if_port = IF_PORT_10BASE2; + else + dev->if_port = IF_PORT_10BASET; + + etherh_setif(dev); + } +} + +/* + * Write a block of data out to the 8390 + */ +static void +etherh_block_output (struct net_device *dev, int count, const unsigned char *buf, int start_page) +{ + struct ei_device *ei_local = netdev_priv(dev); + unsigned long dma_start; + void __iomem *dma_base, *addr; + + if (ei_local->dmaing) { + printk(KERN_ERR "%s: DMAing conflict in etherh_block_input: " + " DMAstat %d irqlock %d\n", dev->name, + ei_local->dmaing, ei_local->irqlock); + return; + } + + /* + * Make sure we have a round number of bytes if we're in word mode. + */ + if (count & 1 && ei_local->word16) + count++; + + ei_local->dmaing = 1; + + addr = (void __iomem *)dev->base_addr; + dma_base = etherh_priv(dev)->dma_base; + + count = (count + 1) & ~1; + writeb (E8390_NODMA | E8390_PAGE0 | E8390_START, addr + E8390_CMD); + + writeb (0x42, addr + EN0_RCNTLO); + writeb (0x00, addr + EN0_RCNTHI); + writeb (0x42, addr + EN0_RSARLO); + writeb (0x00, addr + EN0_RSARHI); + writeb (E8390_RREAD | E8390_START, addr + E8390_CMD); + + udelay (1); + + writeb (ENISR_RDC, addr + EN0_ISR); + writeb (count, addr + EN0_RCNTLO); + writeb (count >> 8, addr + EN0_RCNTHI); + writeb (0, addr + EN0_RSARLO); + writeb (start_page, addr + EN0_RSARHI); + writeb (E8390_RWRITE | E8390_START, addr + E8390_CMD); + + if (ei_local->word16) + writesw (dma_base, buf, count >> 1); + else + writesb (dma_base, buf, count); + + dma_start = jiffies; + + while ((readb (addr + EN0_ISR) & ENISR_RDC) == 0) + if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ + printk(KERN_ERR "%s: timeout waiting for TX RDC\n", + dev->name); + etherh_reset (dev); + __NS8390_init (dev, 1); + break; + } + + writeb (ENISR_RDC, addr + EN0_ISR); + ei_local->dmaing = 0; +} + +/* + * Read a block of data from the 8390 + */ +static void +etherh_block_input (struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) +{ + struct ei_device *ei_local = netdev_priv(dev); + unsigned char *buf; + void __iomem *dma_base, *addr; + + if (ei_local->dmaing) { + printk(KERN_ERR "%s: DMAing conflict in etherh_block_input: " + " DMAstat %d irqlock %d\n", dev->name, + ei_local->dmaing, ei_local->irqlock); + return; + } + + ei_local->dmaing = 1; + + addr = (void __iomem *)dev->base_addr; + dma_base = etherh_priv(dev)->dma_base; + + buf = skb->data; + writeb (E8390_NODMA | E8390_PAGE0 | E8390_START, addr + E8390_CMD); + writeb (count, addr + EN0_RCNTLO); + writeb (count >> 8, addr + EN0_RCNTHI); + writeb (ring_offset, addr + EN0_RSARLO); + writeb (ring_offset >> 8, addr + EN0_RSARHI); + writeb (E8390_RREAD | E8390_START, addr + E8390_CMD); + + if (ei_local->word16) { + readsw (dma_base, buf, count >> 1); + if (count & 1) + buf[count - 1] = readb (dma_base); + } else + readsb (dma_base, buf, count); + + writeb (ENISR_RDC, addr + EN0_ISR); + ei_local->dmaing = 0; +} + +/* + * Read a header from the 8390 + */ +static void +etherh_get_header (struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) +{ + struct ei_device *ei_local = netdev_priv(dev); + void __iomem *dma_base, *addr; + + if (ei_local->dmaing) { + printk(KERN_ERR "%s: DMAing conflict in etherh_get_header: " + " DMAstat %d irqlock %d\n", dev->name, + ei_local->dmaing, ei_local->irqlock); + return; + } + + ei_local->dmaing = 1; + + addr = (void __iomem *)dev->base_addr; + dma_base = etherh_priv(dev)->dma_base; + + writeb (E8390_NODMA | E8390_PAGE0 | E8390_START, addr + E8390_CMD); + writeb (sizeof (*hdr), addr + EN0_RCNTLO); + writeb (0, addr + EN0_RCNTHI); + writeb (0, addr + EN0_RSARLO); + writeb (ring_page, addr + EN0_RSARHI); + writeb (E8390_RREAD | E8390_START, addr + E8390_CMD); + + if (ei_local->word16) + readsw (dma_base, hdr, sizeof (*hdr) >> 1); + else + readsb (dma_base, hdr, sizeof (*hdr)); + + writeb (ENISR_RDC, addr + EN0_ISR); + ei_local->dmaing = 0; +} + +/* + * Open/initialize the board. This is called (in the current kernel) + * sometime after booting when the 'ifconfig' program is run. + * + * This routine should set everything up anew at each open, even + * registers that "should" only need to be set once at boot, so that + * there is non-reboot way to recover if something goes wrong. + */ +static int +etherh_open(struct net_device *dev) +{ + struct ei_device *ei_local = netdev_priv(dev); + + if (!is_valid_ether_addr(dev->dev_addr)) { + printk(KERN_WARNING "%s: invalid ethernet MAC address\n", + dev->name); + return -EINVAL; + } + + if (request_irq(dev->irq, __ei_interrupt, 0, dev->name, dev)) + return -EAGAIN; + + /* + * Make sure that we aren't going to change the + * media type on the next reset - we are about to + * do automedia manually now. + */ + ei_local->interface_num = 0; + + /* + * If we are doing automedia detection, do it now. + * This is more reliable than the 8390's detection. + */ + if (dev->flags & IFF_AUTOMEDIA) { + dev->if_port = IF_PORT_10BASET; + etherh_setif(dev); + mdelay(1); + if (!etherh_getifstat(dev)) { + dev->if_port = IF_PORT_10BASE2; + etherh_setif(dev); + } + } else + etherh_setif(dev); + + etherh_reset(dev); + __ei_open(dev); + + return 0; +} + +/* + * The inverse routine to etherh_open(). + */ +static int +etherh_close(struct net_device *dev) +{ + __ei_close (dev); + free_irq (dev->irq, dev); + return 0; +} + +/* + * Initialisation + */ + +static void __init etherh_banner(void) +{ + static int version_printed; + + if (net_debug && version_printed++ == 0) + printk(KERN_INFO "%s", version); +} + +/* + * Read the ethernet address string from the on board rom. + * This is an ascii string... + */ +static int __devinit etherh_addr(char *addr, struct expansion_card *ec) +{ + struct in_chunk_dir cd; + char *s; + + if (!ecard_readchunk(&cd, ec, 0xf5, 0)) { + printk(KERN_ERR "%s: unable to read podule description string\n", + dev_name(&ec->dev)); + goto no_addr; + } + + s = strchr(cd.d.string, '('); + if (s) { + int i; + + for (i = 0; i < 6; i++) { + addr[i] = simple_strtoul(s + 1, &s, 0x10); + if (*s != (i == 5? ')' : ':')) + break; + } + + if (i == 6) + return 0; + } + + printk(KERN_ERR "%s: unable to parse MAC address: %s\n", + dev_name(&ec->dev), cd.d.string); + + no_addr: + return -ENODEV; +} + +/* + * Create an ethernet address from the system serial number. + */ +static int __init etherm_addr(char *addr) +{ + unsigned int serial; + + if (system_serial_low == 0 && system_serial_high == 0) + return -ENODEV; + + serial = system_serial_low | system_serial_high; + + addr[0] = 0; + addr[1] = 0; + addr[2] = 0xa4; + addr[3] = 0x10 + (serial >> 24); + addr[4] = serial >> 16; + addr[5] = serial >> 8; + return 0; +} + +static void etherh_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, dev_name(dev->dev.parent), + sizeof(info->bus_info)); +} + +static int etherh_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + cmd->supported = etherh_priv(dev)->supported; + ethtool_cmd_speed_set(cmd, SPEED_10); + cmd->duplex = DUPLEX_HALF; + cmd->port = dev->if_port == IF_PORT_10BASET ? PORT_TP : PORT_BNC; + cmd->autoneg = (dev->flags & IFF_AUTOMEDIA ? + AUTONEG_ENABLE : AUTONEG_DISABLE); + return 0; +} + +static int etherh_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + switch (cmd->autoneg) { + case AUTONEG_ENABLE: + dev->flags |= IFF_AUTOMEDIA; + break; + + case AUTONEG_DISABLE: + switch (cmd->port) { + case PORT_TP: + dev->if_port = IF_PORT_10BASET; + break; + + case PORT_BNC: + dev->if_port = IF_PORT_10BASE2; + break; + + default: + return -EINVAL; + } + dev->flags &= ~IFF_AUTOMEDIA; + break; + + default: + return -EINVAL; + } + + etherh_setif(dev); + + return 0; +} + +static const struct ethtool_ops etherh_ethtool_ops = { + .get_settings = etherh_get_settings, + .set_settings = etherh_set_settings, + .get_drvinfo = etherh_get_drvinfo, +}; + +static const struct net_device_ops etherh_netdev_ops = { + .ndo_open = etherh_open, + .ndo_stop = etherh_close, + .ndo_set_config = etherh_set_config, + .ndo_start_xmit = __ei_start_xmit, + .ndo_tx_timeout = __ei_tx_timeout, + .ndo_get_stats = __ei_get_stats, + .ndo_set_multicast_list = __ei_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = __ei_poll, +#endif +}; + +static u32 etherh_regoffsets[16]; +static u32 etherm_regoffsets[16]; + +static int __devinit +etherh_probe(struct expansion_card *ec, const struct ecard_id *id) +{ + const struct etherh_data *data = id->data; + struct ei_device *ei_local; + struct net_device *dev; + struct etherh_priv *eh; + int ret; + + etherh_banner(); + + ret = ecard_request_resources(ec); + if (ret) + goto out; + + dev = ____alloc_ei_netdev(sizeof(struct etherh_priv)); + if (!dev) { + ret = -ENOMEM; + goto release; + } + + SET_NETDEV_DEV(dev, &ec->dev); + + dev->netdev_ops = ðerh_netdev_ops; + dev->irq = ec->irq; + dev->ethtool_ops = ðerh_ethtool_ops; + + if (data->supported & SUPPORTED_Autoneg) + dev->flags |= IFF_AUTOMEDIA; + if (data->supported & SUPPORTED_TP) { + dev->flags |= IFF_PORTSEL; + dev->if_port = IF_PORT_10BASET; + } else if (data->supported & SUPPORTED_BNC) { + dev->flags |= IFF_PORTSEL; + dev->if_port = IF_PORT_10BASE2; + } else + dev->if_port = IF_PORT_UNKNOWN; + + eh = etherh_priv(dev); + eh->supported = data->supported; + eh->ctrl = 0; + eh->id = ec->cid.product; + eh->memc = ecardm_iomap(ec, ECARD_RES_MEMC, 0, PAGE_SIZE); + if (!eh->memc) { + ret = -ENOMEM; + goto free; + } + + eh->ctrl_port = eh->memc; + if (data->ctrl_ioc) { + eh->ioc_fast = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, PAGE_SIZE); + if (!eh->ioc_fast) { + ret = -ENOMEM; + goto free; + } + eh->ctrl_port = eh->ioc_fast; + } + + dev->base_addr = (unsigned long)eh->memc + data->ns8390_offset; + eh->dma_base = eh->memc + data->dataport_offset; + eh->ctrl_port += data->ctrlport_offset; + + /* + * IRQ and control port handling - only for non-NIC slot cards. + */ + if (ec->slot_no != 8) { + ecard_setirq(ec, ðerh_ops, eh); + } else { + /* + * If we're in the NIC slot, make sure the IRQ is enabled + */ + etherh_set_ctrl(eh, ETHERH_CP_IE); + } + + ei_local = netdev_priv(dev); + spin_lock_init(&ei_local->page_lock); + + if (ec->cid.product == PROD_ANT_ETHERM) { + etherm_addr(dev->dev_addr); + ei_local->reg_offset = etherm_regoffsets; + } else { + etherh_addr(dev->dev_addr, ec); + ei_local->reg_offset = etherh_regoffsets; + } + + ei_local->name = dev->name; + ei_local->word16 = 1; + ei_local->tx_start_page = data->tx_start_page; + ei_local->rx_start_page = ei_local->tx_start_page + TX_PAGES; + ei_local->stop_page = data->stop_page; + ei_local->reset_8390 = etherh_reset; + ei_local->block_input = etherh_block_input; + ei_local->block_output = etherh_block_output; + ei_local->get_8390_hdr = etherh_get_header; + ei_local->interface_num = 0; + + etherh_reset(dev); + __NS8390_init(dev, 0); + + ret = register_netdev(dev); + if (ret) + goto free; + + printk(KERN_INFO "%s: %s in slot %d, %pM\n", + dev->name, data->name, ec->slot_no, dev->dev_addr); + + ecard_set_drvdata(ec, dev); + + return 0; + + free: + free_netdev(dev); + release: + ecard_release_resources(ec); + out: + return ret; +} + +static void __devexit etherh_remove(struct expansion_card *ec) +{ + struct net_device *dev = ecard_get_drvdata(ec); + + ecard_set_drvdata(ec, NULL); + + unregister_netdev(dev); + + free_netdev(dev); + + ecard_release_resources(ec); +} + +static struct etherh_data etherm_data = { + .ns8390_offset = ETHERM_NS8390, + .dataport_offset = ETHERM_NS8390 + ETHERM_DATAPORT, + .ctrlport_offset = ETHERM_NS8390 + ETHERM_CTRLPORT, + .name = "ANT EtherM", + .supported = SUPPORTED_10baseT_Half, + .tx_start_page = ETHERM_TX_START_PAGE, + .stop_page = ETHERM_STOP_PAGE, +}; + +static struct etherh_data etherlan500_data = { + .ns8390_offset = ETHERH500_NS8390, + .dataport_offset = ETHERH500_NS8390 + ETHERH500_DATAPORT, + .ctrlport_offset = ETHERH500_CTRLPORT, + .ctrl_ioc = 1, + .name = "i3 EtherH 500", + .supported = SUPPORTED_10baseT_Half, + .tx_start_page = ETHERH_TX_START_PAGE, + .stop_page = ETHERH_STOP_PAGE, +}; + +static struct etherh_data etherlan600_data = { + .ns8390_offset = ETHERH600_NS8390, + .dataport_offset = ETHERH600_NS8390 + ETHERH600_DATAPORT, + .ctrlport_offset = ETHERH600_NS8390 + ETHERH600_CTRLPORT, + .name = "i3 EtherH 600", + .supported = SUPPORTED_10baseT_Half | SUPPORTED_TP | SUPPORTED_BNC | SUPPORTED_Autoneg, + .tx_start_page = ETHERH_TX_START_PAGE, + .stop_page = ETHERH_STOP_PAGE, +}; + +static struct etherh_data etherlan600a_data = { + .ns8390_offset = ETHERH600_NS8390, + .dataport_offset = ETHERH600_NS8390 + ETHERH600_DATAPORT, + .ctrlport_offset = ETHERH600_NS8390 + ETHERH600_CTRLPORT, + .name = "i3 EtherH 600A", + .supported = SUPPORTED_10baseT_Half | SUPPORTED_TP | SUPPORTED_BNC | SUPPORTED_Autoneg, + .tx_start_page = ETHERH_TX_START_PAGE, + .stop_page = ETHERH_STOP_PAGE, +}; + +static const struct ecard_id etherh_ids[] = { + { MANU_ANT, PROD_ANT_ETHERM, ðerm_data }, + { MANU_I3, PROD_I3_ETHERLAN500, ðerlan500_data }, + { MANU_I3, PROD_I3_ETHERLAN600, ðerlan600_data }, + { MANU_I3, PROD_I3_ETHERLAN600A, ðerlan600a_data }, + { 0xffff, 0xffff } +}; + +static struct ecard_driver etherh_driver = { + .probe = etherh_probe, + .remove = __devexit_p(etherh_remove), + .id_table = etherh_ids, + .drv = { + .name = DRV_NAME, + }, +}; + +static int __init etherh_init(void) +{ + int i; + + for (i = 0; i < 16; i++) { + etherh_regoffsets[i] = i << 2; + etherm_regoffsets[i] = i << 5; + } + + return ecard_register_driver(ðerh_driver); +} + +static void __exit etherh_exit(void) +{ + ecard_remove_driver(ðerh_driver); +} + +module_init(etherh_init); +module_exit(etherh_exit); diff --git a/drivers/net/ethernet/8390/hp-plus.c b/drivers/net/ethernet/8390/hp-plus.c new file mode 100644 index 0000000..2991736 --- /dev/null +++ b/drivers/net/ethernet/8390/hp-plus.c @@ -0,0 +1,506 @@ +/* hp-plus.c: A HP PCLAN/plus ethernet driver for linux. */ +/* + Written 1994 by Donald Becker. + + This driver is for the Hewlett Packard PC LAN (27***) plus ethercards. + These cards are sold under several model numbers, usually 2724*. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + As is often the case, a great deal of credit is owed to Russ Nelson. + The Crynwr packet driver was my primary source of HP-specific + programming information. +*/ + +static const char version[] = +"hp-plus.c:v1.10 9/24/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; + +#include + +#include /* Important -- this inlines word moves. */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "8390.h" + +#define DRV_NAME "hp-plus" + +/* A zero-terminated list of I/O addresses to be probed. */ +static unsigned int hpplus_portlist[] __initdata = +{0x200, 0x240, 0x280, 0x2C0, 0x300, 0x320, 0x340, 0}; + +/* + The HP EtherTwist chip implementation is a fairly routine DP8390 + implementation. It allows both shared memory and programmed-I/O buffer + access, using a custom interface for both. The programmed-I/O mode is + entirely implemented in the HP EtherTwist chip, bypassing the problem + ridden built-in 8390 facilities used on NE2000 designs. The shared + memory mode is likewise special, with an offset register used to make + packets appear at the shared memory base. Both modes use a base and bounds + page register to hide the Rx ring buffer wrap -- a packet that spans the + end of physical buffer memory appears continuous to the driver. (c.f. the + 3c503 and Cabletron E2100) + + A special note: the internal buffer of the board is only 8 bits wide. + This lays several nasty traps for the unaware: + - the 8390 must be programmed for byte-wide operations + - all I/O and memory operations must work on whole words (the access + latches are serially preloaded and have no byte-swapping ability). + + This board is laid out in I/O space much like the earlier HP boards: + the first 16 locations are for the board registers, and the second 16 are + for the 8390. The board is easy to identify, with both a dedicated 16 bit + ID register and a constant 0x530* value in the upper bits of the paging + register. +*/ + +#define HP_ID 0x00 /* ID register, always 0x4850. */ +#define HP_PAGING 0x02 /* Registers visible @ 8-f, see PageName. */ +#define HPP_OPTION 0x04 /* Bitmapped options, see HP_Option. */ +#define HPP_OUT_ADDR 0x08 /* I/O output location in Perf_Page. */ +#define HPP_IN_ADDR 0x0A /* I/O input location in Perf_Page. */ +#define HP_DATAPORT 0x0c /* I/O data transfer in Perf_Page. */ +#define NIC_OFFSET 0x10 /* Offset to the 8390 registers. */ +#define HP_IO_EXTENT 32 + +#define HP_START_PG 0x00 /* First page of TX buffer */ +#define HP_STOP_PG 0x80 /* Last page +1 of RX ring */ + +/* The register set selected in HP_PAGING. */ +enum PageName { + Perf_Page = 0, /* Normal operation. */ + MAC_Page = 1, /* The ethernet address (+checksum). */ + HW_Page = 2, /* EEPROM-loaded hardware parameters. */ + LAN_Page = 4, /* Transceiver selection, testing, etc. */ + ID_Page = 6 }; + +/* The bit definitions for the HPP_OPTION register. */ +enum HP_Option { + NICReset = 1, ChipReset = 2, /* Active low, really UNreset. */ + EnableIRQ = 4, FakeIntr = 8, BootROMEnb = 0x10, IOEnb = 0x20, + MemEnable = 0x40, ZeroWait = 0x80, MemDisable = 0x1000, }; + +static int hpp_probe1(struct net_device *dev, int ioaddr); + +static void hpp_reset_8390(struct net_device *dev); +static int hpp_open(struct net_device *dev); +static int hpp_close(struct net_device *dev); +static void hpp_mem_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); +static void hpp_mem_block_output(struct net_device *dev, int count, + const unsigned char *buf, int start_page); +static void hpp_mem_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page); +static void hpp_io_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); +static void hpp_io_block_output(struct net_device *dev, int count, + const unsigned char *buf, int start_page); +static void hpp_io_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page); + + +/* Probe a list of addresses for an HP LAN+ adaptor. + This routine is almost boilerplate. */ + +static int __init do_hpp_probe(struct net_device *dev) +{ + int i; + int base_addr = dev->base_addr; + int irq = dev->irq; + + if (base_addr > 0x1ff) /* Check a single specified location. */ + return hpp_probe1(dev, base_addr); + else if (base_addr != 0) /* Don't probe at all. */ + return -ENXIO; + + for (i = 0; hpplus_portlist[i]; i++) { + if (hpp_probe1(dev, hpplus_portlist[i]) == 0) + return 0; + dev->irq = irq; + } + + return -ENODEV; +} + +#ifndef MODULE +struct net_device * __init hp_plus_probe(int unit) +{ + struct net_device *dev = alloc_eip_netdev(); + int err; + + if (!dev) + return ERR_PTR(-ENOMEM); + + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + + err = do_hpp_probe(dev); + if (err) + goto out; + return dev; +out: + free_netdev(dev); + return ERR_PTR(err); +} +#endif + +static const struct net_device_ops hpp_netdev_ops = { + .ndo_open = hpp_open, + .ndo_stop = hpp_close, + .ndo_start_xmit = eip_start_xmit, + .ndo_tx_timeout = eip_tx_timeout, + .ndo_get_stats = eip_get_stats, + .ndo_set_multicast_list = eip_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = eip_poll, +#endif +}; + + +/* Do the interesting part of the probe at a single address. */ +static int __init hpp_probe1(struct net_device *dev, int ioaddr) +{ + int i, retval; + unsigned char checksum = 0; + const char name[] = "HP-PC-LAN+"; + int mem_start; + static unsigned version_printed; + + if (!request_region(ioaddr, HP_IO_EXTENT, DRV_NAME)) + return -EBUSY; + + /* Check for the HP+ signature, 50 48 0x 53. */ + if (inw(ioaddr + HP_ID) != 0x4850 || + (inw(ioaddr + HP_PAGING) & 0xfff0) != 0x5300) { + retval = -ENODEV; + goto out; + } + + if (ei_debug && version_printed++ == 0) + printk(version); + + printk("%s: %s at %#3x, ", dev->name, name, ioaddr); + + /* Retrieve and checksum the station address. */ + outw(MAC_Page, ioaddr + HP_PAGING); + + for(i = 0; i < ETHER_ADDR_LEN; i++) { + unsigned char inval = inb(ioaddr + 8 + i); + dev->dev_addr[i] = inval; + checksum += inval; + } + checksum += inb(ioaddr + 14); + + printk("%pM", dev->dev_addr); + + if (checksum != 0xff) { + printk(" bad checksum %2.2x.\n", checksum); + retval = -ENODEV; + goto out; + } else { + /* Point at the Software Configuration Flags. */ + outw(ID_Page, ioaddr + HP_PAGING); + printk(" ID %4.4x", inw(ioaddr + 12)); + } + + /* Read the IRQ line. */ + outw(HW_Page, ioaddr + HP_PAGING); + { + int irq = inb(ioaddr + 13) & 0x0f; + int option = inw(ioaddr + HPP_OPTION); + + dev->irq = irq; + if (option & MemEnable) { + mem_start = inw(ioaddr + 9) << 8; + printk(", IRQ %d, memory address %#x.\n", irq, mem_start); + } else { + mem_start = 0; + printk(", IRQ %d, programmed-I/O mode.\n", irq); + } + } + + /* Set the wrap registers for string I/O reads. */ + outw((HP_START_PG + TX_PAGES/2) | ((HP_STOP_PG - 1) << 8), ioaddr + 14); + + /* Set the base address to point to the NIC, not the "real" base! */ + dev->base_addr = ioaddr + NIC_OFFSET; + + dev->netdev_ops = &hpp_netdev_ops; + + ei_status.name = name; + ei_status.word16 = 0; /* Agggghhhhh! Debug time: 2 days! */ + ei_status.tx_start_page = HP_START_PG; + ei_status.rx_start_page = HP_START_PG + TX_PAGES/2; + ei_status.stop_page = HP_STOP_PG; + + ei_status.reset_8390 = &hpp_reset_8390; + ei_status.block_input = &hpp_io_block_input; + ei_status.block_output = &hpp_io_block_output; + ei_status.get_8390_hdr = &hpp_io_get_8390_hdr; + + /* Check if the memory_enable flag is set in the option register. */ + if (mem_start) { + ei_status.block_input = &hpp_mem_block_input; + ei_status.block_output = &hpp_mem_block_output; + ei_status.get_8390_hdr = &hpp_mem_get_8390_hdr; + dev->mem_start = mem_start; + ei_status.mem = ioremap(mem_start, + (HP_STOP_PG - HP_START_PG)*256); + if (!ei_status.mem) { + retval = -ENOMEM; + goto out; + } + ei_status.rmem_start = dev->mem_start + TX_PAGES/2*256; + dev->mem_end = ei_status.rmem_end + = dev->mem_start + (HP_STOP_PG - HP_START_PG)*256; + } + + outw(Perf_Page, ioaddr + HP_PAGING); + NS8390p_init(dev, 0); + /* Leave the 8390 and HP chip reset. */ + outw(inw(ioaddr + HPP_OPTION) & ~EnableIRQ, ioaddr + HPP_OPTION); + + retval = register_netdev(dev); + if (retval) + goto out1; + return 0; +out1: + iounmap(ei_status.mem); +out: + release_region(ioaddr, HP_IO_EXTENT); + return retval; +} + +static int +hpp_open(struct net_device *dev) +{ + int ioaddr = dev->base_addr - NIC_OFFSET; + int option_reg; + int retval; + + if ((retval = request_irq(dev->irq, eip_interrupt, 0, dev->name, dev))) { + return retval; + } + + /* Reset the 8390 and HP chip. */ + option_reg = inw(ioaddr + HPP_OPTION); + outw(option_reg & ~(NICReset + ChipReset), ioaddr + HPP_OPTION); + udelay(5); + /* Unreset the board and enable interrupts. */ + outw(option_reg | (EnableIRQ + NICReset + ChipReset), ioaddr + HPP_OPTION); + + /* Set the wrap registers for programmed-I/O operation. */ + outw(HW_Page, ioaddr + HP_PAGING); + outw((HP_START_PG + TX_PAGES/2) | ((HP_STOP_PG - 1) << 8), ioaddr + 14); + + /* Select the operational page. */ + outw(Perf_Page, ioaddr + HP_PAGING); + + return eip_open(dev); +} + +static int +hpp_close(struct net_device *dev) +{ + int ioaddr = dev->base_addr - NIC_OFFSET; + int option_reg = inw(ioaddr + HPP_OPTION); + + free_irq(dev->irq, dev); + eip_close(dev); + outw((option_reg & ~EnableIRQ) | MemDisable | NICReset | ChipReset, + ioaddr + HPP_OPTION); + + return 0; +} + +static void +hpp_reset_8390(struct net_device *dev) +{ + int ioaddr = dev->base_addr - NIC_OFFSET; + int option_reg = inw(ioaddr + HPP_OPTION); + + if (ei_debug > 1) printk("resetting the 8390 time=%ld...", jiffies); + + outw(option_reg & ~(NICReset + ChipReset), ioaddr + HPP_OPTION); + /* Pause a few cycles for the hardware reset to take place. */ + udelay(5); + ei_status.txing = 0; + outw(option_reg | (EnableIRQ + NICReset + ChipReset), ioaddr + HPP_OPTION); + + udelay(5); + + + if ((inb_p(ioaddr+NIC_OFFSET+EN0_ISR) & ENISR_RESET) == 0) + printk("%s: hp_reset_8390() did not complete.\n", dev->name); + + if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies); +} + +/* The programmed-I/O version of reading the 4 byte 8390 specific header. + Note that transfer with the EtherTwist+ must be on word boundaries. */ + +static void +hpp_io_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) +{ + int ioaddr = dev->base_addr - NIC_OFFSET; + + outw((ring_page<<8), ioaddr + HPP_IN_ADDR); + insw(ioaddr + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1); +} + +/* Block input and output, similar to the Crynwr packet driver. */ + +static void +hpp_io_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) +{ + int ioaddr = dev->base_addr - NIC_OFFSET; + char *buf = skb->data; + + outw(ring_offset, ioaddr + HPP_IN_ADDR); + insw(ioaddr + HP_DATAPORT, buf, count>>1); + if (count & 0x01) + buf[count-1] = inw(ioaddr + HP_DATAPORT); +} + +/* The corresponding shared memory versions of the above 2 functions. */ + +static void +hpp_mem_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) +{ + int ioaddr = dev->base_addr - NIC_OFFSET; + int option_reg = inw(ioaddr + HPP_OPTION); + + outw((ring_page<<8), ioaddr + HPP_IN_ADDR); + outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION); + memcpy_fromio(hdr, ei_status.mem, sizeof(struct e8390_pkt_hdr)); + outw(option_reg, ioaddr + HPP_OPTION); + hdr->count = (le16_to_cpu(hdr->count) + 3) & ~3; /* Round up allocation. */ +} + +static void +hpp_mem_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) +{ + int ioaddr = dev->base_addr - NIC_OFFSET; + int option_reg = inw(ioaddr + HPP_OPTION); + + outw(ring_offset, ioaddr + HPP_IN_ADDR); + + outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION); + + /* Caution: this relies on get_8390_hdr() rounding up count! + Also note that we *can't* use eth_io_copy_and_sum() because + it will not always copy "count" bytes (e.g. padded IP). */ + + memcpy_fromio(skb->data, ei_status.mem, count); + outw(option_reg, ioaddr + HPP_OPTION); +} + +/* A special note: we *must* always transfer >=16 bit words. + It's always safe to round up, so we do. */ +static void +hpp_io_block_output(struct net_device *dev, int count, + const unsigned char *buf, int start_page) +{ + int ioaddr = dev->base_addr - NIC_OFFSET; + outw(start_page << 8, ioaddr + HPP_OUT_ADDR); + outsl(ioaddr + HP_DATAPORT, buf, (count+3)>>2); +} + +static void +hpp_mem_block_output(struct net_device *dev, int count, + const unsigned char *buf, int start_page) +{ + int ioaddr = dev->base_addr - NIC_OFFSET; + int option_reg = inw(ioaddr + HPP_OPTION); + + outw(start_page << 8, ioaddr + HPP_OUT_ADDR); + outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION); + memcpy_toio(ei_status.mem, buf, (count + 3) & ~3); + outw(option_reg, ioaddr + HPP_OPTION); +} + + +#ifdef MODULE +#define MAX_HPP_CARDS 4 /* Max number of HPP cards per module */ +static struct net_device *dev_hpp[MAX_HPP_CARDS]; +static int io[MAX_HPP_CARDS]; +static int irq[MAX_HPP_CARDS]; + +module_param_array(io, int, NULL, 0); +module_param_array(irq, int, NULL, 0); +MODULE_PARM_DESC(io, "I/O port address(es)"); +MODULE_PARM_DESC(irq, "IRQ number(s); ignored if properly detected"); +MODULE_DESCRIPTION("HP PC-LAN+ ISA ethernet driver"); +MODULE_LICENSE("GPL"); + +/* This is set up so that only a single autoprobe takes place per call. +ISA device autoprobes on a running machine are not recommended. */ +int __init +init_module(void) +{ + struct net_device *dev; + int this_dev, found = 0; + + for (this_dev = 0; this_dev < MAX_HPP_CARDS; this_dev++) { + if (io[this_dev] == 0) { + if (this_dev != 0) break; /* only autoprobe 1st one */ + printk(KERN_NOTICE "hp-plus.c: Presently autoprobing (not recommended) for a single card.\n"); + } + dev = alloc_eip_netdev(); + if (!dev) + break; + dev->irq = irq[this_dev]; + dev->base_addr = io[this_dev]; + if (do_hpp_probe(dev) == 0) { + dev_hpp[found++] = dev; + continue; + } + free_netdev(dev); + printk(KERN_WARNING "hp-plus.c: No HP-Plus card found (i/o = 0x%x).\n", io[this_dev]); + break; + } + if (found) + return 0; + return -ENXIO; +} + +static void cleanup_card(struct net_device *dev) +{ + /* NB: hpp_close() handles free_irq */ + iounmap(ei_status.mem); + release_region(dev->base_addr - NIC_OFFSET, HP_IO_EXTENT); +} + +void __exit +cleanup_module(void) +{ + int this_dev; + + for (this_dev = 0; this_dev < MAX_HPP_CARDS; this_dev++) { + struct net_device *dev = dev_hpp[this_dev]; + if (dev) { + unregister_netdev(dev); + cleanup_card(dev); + free_netdev(dev); + } + } +} +#endif /* MODULE */ diff --git a/drivers/net/ethernet/8390/hp.c b/drivers/net/ethernet/8390/hp.c new file mode 100644 index 0000000..18564d4 --- /dev/null +++ b/drivers/net/ethernet/8390/hp.c @@ -0,0 +1,439 @@ +/* hp.c: A HP LAN ethernet driver for linux. */ +/* + Written 1993-94 by Donald Becker. + + Copyright 1993 United States Government as represented by the + Director, National Security Agency. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + This is a driver for the HP PC-LAN adaptors. + + Sources: + The Crynwr packet driver. +*/ + +static const char version[] = + "hp.c:v1.10 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; + + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "8390.h" + +#define DRV_NAME "hp" + +/* A zero-terminated list of I/O addresses to be probed. */ +static unsigned int hppclan_portlist[] __initdata = +{ 0x300, 0x320, 0x340, 0x280, 0x2C0, 0x200, 0x240, 0}; + +#define HP_IO_EXTENT 32 + +#define HP_DATAPORT 0x0c /* "Remote DMA" data port. */ +#define HP_ID 0x07 +#define HP_CONFIGURE 0x08 /* Configuration register. */ +#define HP_RUN 0x01 /* 1 == Run, 0 == reset. */ +#define HP_IRQ 0x0E /* Mask for software-configured IRQ line. */ +#define HP_DATAON 0x10 /* Turn on dataport */ +#define NIC_OFFSET 0x10 /* Offset the 8390 registers. */ + +#define HP_START_PG 0x00 /* First page of TX buffer */ +#define HP_8BSTOP_PG 0x80 /* Last page +1 of RX ring */ +#define HP_16BSTOP_PG 0xFF /* Same, for 16 bit cards. */ + +static int hp_probe1(struct net_device *dev, int ioaddr); + +static void hp_reset_8390(struct net_device *dev); +static void hp_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page); +static void hp_block_input(struct net_device *dev, int count, + struct sk_buff *skb , int ring_offset); +static void hp_block_output(struct net_device *dev, int count, + const unsigned char *buf, int start_page); + +static void hp_init_card(struct net_device *dev); + +/* The map from IRQ number to HP_CONFIGURE register setting. */ +/* My default is IRQ5 0 1 2 3 4 5 6 7 8 9 10 11 */ +static char irqmap[16] __initdata= { 0, 0, 4, 6, 8,10, 0,14, 0, 4, 2,12,0,0,0,0}; + + +/* Probe for an HP LAN adaptor. + Also initialize the card and fill in STATION_ADDR with the station + address. */ + +static int __init do_hp_probe(struct net_device *dev) +{ + int i; + int base_addr = dev->base_addr; + int irq = dev->irq; + + if (base_addr > 0x1ff) /* Check a single specified location. */ + return hp_probe1(dev, base_addr); + else if (base_addr != 0) /* Don't probe at all. */ + return -ENXIO; + + for (i = 0; hppclan_portlist[i]; i++) { + if (hp_probe1(dev, hppclan_portlist[i]) == 0) + return 0; + dev->irq = irq; + } + + return -ENODEV; +} + +#ifndef MODULE +struct net_device * __init hp_probe(int unit) +{ + struct net_device *dev = alloc_eip_netdev(); + int err; + + if (!dev) + return ERR_PTR(-ENOMEM); + + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + + err = do_hp_probe(dev); + if (err) + goto out; + return dev; +out: + free_netdev(dev); + return ERR_PTR(err); +} +#endif + +static int __init hp_probe1(struct net_device *dev, int ioaddr) +{ + int i, retval, board_id, wordmode; + const char *name; + static unsigned version_printed; + + if (!request_region(ioaddr, HP_IO_EXTENT, DRV_NAME)) + return -EBUSY; + + /* Check for the HP physical address, 08 00 09 xx xx xx. */ + /* This really isn't good enough: we may pick up HP LANCE boards + also! Avoid the lance 0x5757 signature. */ + if (inb(ioaddr) != 0x08 + || inb(ioaddr+1) != 0x00 + || inb(ioaddr+2) != 0x09 + || inb(ioaddr+14) == 0x57) { + retval = -ENODEV; + goto out; + } + + /* Set up the parameters based on the board ID. + If you have additional mappings, please mail them to me -djb. */ + if ((board_id = inb(ioaddr + HP_ID)) & 0x80) { + name = "HP27247"; + wordmode = 1; + } else { + name = "HP27250"; + wordmode = 0; + } + + if (ei_debug && version_printed++ == 0) + printk(version); + + printk("%s: %s (ID %02x) at %#3x,", dev->name, name, board_id, ioaddr); + + for(i = 0; i < ETHER_ADDR_LEN; i++) + dev->dev_addr[i] = inb(ioaddr + i); + + printk(" %pM", dev->dev_addr); + + /* Snarf the interrupt now. Someday this could be moved to open(). */ + if (dev->irq < 2) { + static const int irq_16list[] = { 11, 10, 5, 3, 4, 7, 9, 0}; + static const int irq_8list[] = { 7, 5, 3, 4, 9, 0}; + const int *irqp = wordmode ? irq_16list : irq_8list; + do { + int irq = *irqp; + if (request_irq (irq, NULL, 0, "bogus", NULL) != -EBUSY) { + unsigned long cookie = probe_irq_on(); + /* Twinkle the interrupt, and check if it's seen. */ + outb_p(irqmap[irq] | HP_RUN, ioaddr + HP_CONFIGURE); + outb_p( 0x00 | HP_RUN, ioaddr + HP_CONFIGURE); + if (irq == probe_irq_off(cookie) /* It's a good IRQ line! */ + && request_irq (irq, eip_interrupt, 0, DRV_NAME, dev) == 0) { + printk(" selecting IRQ %d.\n", irq); + dev->irq = *irqp; + break; + } + } + } while (*++irqp); + if (*irqp == 0) { + printk(" no free IRQ lines.\n"); + retval = -EBUSY; + goto out; + } + } else { + if (dev->irq == 2) + dev->irq = 9; + if ((retval = request_irq(dev->irq, eip_interrupt, 0, DRV_NAME, dev))) { + printk (" unable to get IRQ %d.\n", dev->irq); + goto out; + } + } + + /* Set the base address to point to the NIC, not the "real" base! */ + dev->base_addr = ioaddr + NIC_OFFSET; + dev->netdev_ops = &eip_netdev_ops; + + ei_status.name = name; + ei_status.word16 = wordmode; + ei_status.tx_start_page = HP_START_PG; + ei_status.rx_start_page = HP_START_PG + TX_PAGES; + ei_status.stop_page = wordmode ? HP_16BSTOP_PG : HP_8BSTOP_PG; + + ei_status.reset_8390 = hp_reset_8390; + ei_status.get_8390_hdr = hp_get_8390_hdr; + ei_status.block_input = hp_block_input; + ei_status.block_output = hp_block_output; + hp_init_card(dev); + + retval = register_netdev(dev); + if (retval) + goto out1; + return 0; +out1: + free_irq(dev->irq, dev); +out: + release_region(ioaddr, HP_IO_EXTENT); + return retval; +} + +static void +hp_reset_8390(struct net_device *dev) +{ + int hp_base = dev->base_addr - NIC_OFFSET; + int saved_config = inb_p(hp_base + HP_CONFIGURE); + + if (ei_debug > 1) printk("resetting the 8390 time=%ld...", jiffies); + outb_p(0x00, hp_base + HP_CONFIGURE); + ei_status.txing = 0; + /* Pause just a few cycles for the hardware reset to take place. */ + udelay(5); + + outb_p(saved_config, hp_base + HP_CONFIGURE); + udelay(5); + + if ((inb_p(hp_base+NIC_OFFSET+EN0_ISR) & ENISR_RESET) == 0) + printk("%s: hp_reset_8390() did not complete.\n", dev->name); + + if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies); +} + +static void +hp_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) +{ + int nic_base = dev->base_addr; + int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE); + + outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE); + outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base); + outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO); + outb_p(0, nic_base + EN0_RCNTHI); + outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */ + outb_p(ring_page, nic_base + EN0_RSARHI); + outb_p(E8390_RREAD+E8390_START, nic_base); + + if (ei_status.word16) + insw(nic_base - NIC_OFFSET + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1); + else + insb(nic_base - NIC_OFFSET + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)); + + outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE); +} + +/* Block input and output, similar to the Crynwr packet driver. If you are + porting to a new ethercard look at the packet driver source for hints. + The HP LAN doesn't use shared memory -- we put the packet + out through the "remote DMA" dataport. */ + +static void +hp_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) +{ + int nic_base = dev->base_addr; + int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE); + int xfer_count = count; + char *buf = skb->data; + + outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE); + outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base); + outb_p(count & 0xff, nic_base + EN0_RCNTLO); + outb_p(count >> 8, nic_base + EN0_RCNTHI); + outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO); + outb_p(ring_offset >> 8, nic_base + EN0_RSARHI); + outb_p(E8390_RREAD+E8390_START, nic_base); + if (ei_status.word16) { + insw(nic_base - NIC_OFFSET + HP_DATAPORT,buf,count>>1); + if (count & 0x01) + buf[count-1] = inb(nic_base - NIC_OFFSET + HP_DATAPORT), xfer_count++; + } else { + insb(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count); + } + /* This is for the ALPHA version only, remove for later releases. */ + if (ei_debug > 0) { /* DMA termination address check... */ + int high = inb_p(nic_base + EN0_RSARHI); + int low = inb_p(nic_base + EN0_RSARLO); + int addr = (high << 8) + low; + /* Check only the lower 8 bits so we can ignore ring wrap. */ + if (((ring_offset + xfer_count) & 0xff) != (addr & 0xff)) + printk("%s: RX transfer address mismatch, %#4.4x vs. %#4.4x (actual).\n", + dev->name, ring_offset + xfer_count, addr); + } + outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE); +} + +static void +hp_block_output(struct net_device *dev, int count, + const unsigned char *buf, int start_page) +{ + int nic_base = dev->base_addr; + int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE); + + outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE); + /* Round the count up for word writes. Do we need to do this? + What effect will an odd byte count have on the 8390? + I should check someday. */ + if (ei_status.word16 && (count & 0x01)) + count++; + /* We should already be in page 0, but to be safe... */ + outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base); + +#ifdef NE8390_RW_BUGFIX + /* Handle the read-before-write bug the same way as the + Crynwr packet driver -- the NatSemi method doesn't work. */ + outb_p(0x42, nic_base + EN0_RCNTLO); + outb_p(0, nic_base + EN0_RCNTHI); + outb_p(0xff, nic_base + EN0_RSARLO); + outb_p(0x00, nic_base + EN0_RSARHI); +#define NE_CMD 0x00 + outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD); + /* Make certain that the dummy read has occurred. */ + inb_p(0x61); + inb_p(0x61); +#endif + + outb_p(count & 0xff, nic_base + EN0_RCNTLO); + outb_p(count >> 8, nic_base + EN0_RCNTHI); + outb_p(0x00, nic_base + EN0_RSARLO); + outb_p(start_page, nic_base + EN0_RSARHI); + + outb_p(E8390_RWRITE+E8390_START, nic_base); + if (ei_status.word16) { + /* Use the 'rep' sequence for 16 bit boards. */ + outsw(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count>>1); + } else { + outsb(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count); + } + + /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here -- it's broken! */ + + /* This is for the ALPHA version only, remove for later releases. */ + if (ei_debug > 0) { /* DMA termination address check... */ + int high = inb_p(nic_base + EN0_RSARHI); + int low = inb_p(nic_base + EN0_RSARLO); + int addr = (high << 8) + low; + if ((start_page << 8) + count != addr) + printk("%s: TX Transfer address mismatch, %#4.4x vs. %#4.4x.\n", + dev->name, (start_page << 8) + count, addr); + } + outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE); +} + +/* This function resets the ethercard if something screws up. */ +static void __init +hp_init_card(struct net_device *dev) +{ + int irq = dev->irq; + NS8390p_init(dev, 0); + outb_p(irqmap[irq&0x0f] | HP_RUN, + dev->base_addr - NIC_OFFSET + HP_CONFIGURE); +} + +#ifdef MODULE +#define MAX_HP_CARDS 4 /* Max number of HP cards per module */ +static struct net_device *dev_hp[MAX_HP_CARDS]; +static int io[MAX_HP_CARDS]; +static int irq[MAX_HP_CARDS]; + +module_param_array(io, int, NULL, 0); +module_param_array(irq, int, NULL, 0); +MODULE_PARM_DESC(io, "I/O base address(es)"); +MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)"); +MODULE_DESCRIPTION("HP PC-LAN ISA ethernet driver"); +MODULE_LICENSE("GPL"); + +/* This is set up so that only a single autoprobe takes place per call. +ISA device autoprobes on a running machine are not recommended. */ +int __init +init_module(void) +{ + struct net_device *dev; + int this_dev, found = 0; + + for (this_dev = 0; this_dev < MAX_HP_CARDS; this_dev++) { + if (io[this_dev] == 0) { + if (this_dev != 0) break; /* only autoprobe 1st one */ + printk(KERN_NOTICE "hp.c: Presently autoprobing (not recommended) for a single card.\n"); + } + dev = alloc_eip_netdev(); + if (!dev) + break; + dev->irq = irq[this_dev]; + dev->base_addr = io[this_dev]; + if (do_hp_probe(dev) == 0) { + dev_hp[found++] = dev; + continue; + } + free_netdev(dev); + printk(KERN_WARNING "hp.c: No HP card found (i/o = 0x%x).\n", io[this_dev]); + break; + } + if (found) + return 0; + return -ENXIO; +} + +static void cleanup_card(struct net_device *dev) +{ + free_irq(dev->irq, dev); + release_region(dev->base_addr - NIC_OFFSET, HP_IO_EXTENT); +} + +void __exit +cleanup_module(void) +{ + int this_dev; + + for (this_dev = 0; this_dev < MAX_HP_CARDS; this_dev++) { + struct net_device *dev = dev_hp[this_dev]; + if (dev) { + unregister_netdev(dev); + cleanup_card(dev); + free_netdev(dev); + } + } +} +#endif /* MODULE */ diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c new file mode 100644 index 0000000..1cd481c --- /dev/null +++ b/drivers/net/ethernet/8390/hydra.c @@ -0,0 +1,273 @@ +/* New Hydra driver using generic 8390 core */ +/* Based on old hydra driver by Topi Kanerva (topi@susanna.oulu.fi) */ + +/* This file is subject to the terms and conditions of the GNU General */ +/* Public License. See the file COPYING in the main directory of the */ +/* Linux distribution for more details. */ + +/* Peter De Schrijver (p2@mind.be) */ +/* Oldenburg 2000 */ + +/* The Amiganet is a Zorro-II board made by Hydra Systems. It contains a */ +/* NS8390 NIC (network interface controller) clone, 16 or 64K on-board RAM */ +/* and 10BASE-2 (thin coax) and AUI connectors. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#define EI_SHIFT(x) (ei_local->reg_offset[x]) +#define ei_inb(port) in_8(port) +#define ei_outb(val,port) out_8(port,val) +#define ei_inb_p(port) in_8(port) +#define ei_outb_p(val,port) out_8(port,val) + +static const char version[] = + "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; + +#include "lib8390.c" + +#define NE_EN0_DCFG (0x0e*2) + +#define NESM_START_PG 0x0 /* First page of TX buffer */ +#define NESM_STOP_PG 0x40 /* Last page +1 of RX ring */ + +#define HYDRA_NIC_BASE 0xffe1 +#define HYDRA_ADDRPROM 0xffc0 +#define HYDRA_VERSION "v3.0alpha" + +#define WORDSWAP(a) ((((a)>>8)&0xff) | ((a)<<8)) + + +static int __devinit hydra_init_one(struct zorro_dev *z, + const struct zorro_device_id *ent); +static int __devinit hydra_init(struct zorro_dev *z); +static int hydra_open(struct net_device *dev); +static int hydra_close(struct net_device *dev); +static void hydra_reset_8390(struct net_device *dev); +static void hydra_get_8390_hdr(struct net_device *dev, + struct e8390_pkt_hdr *hdr, int ring_page); +static void hydra_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); +static void hydra_block_output(struct net_device *dev, int count, + const unsigned char *buf, int start_page); +static void __devexit hydra_remove_one(struct zorro_dev *z); + +static struct zorro_device_id hydra_zorro_tbl[] __devinitdata = { + { ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET }, + { 0 } +}; +MODULE_DEVICE_TABLE(zorro, hydra_zorro_tbl); + +static struct zorro_driver hydra_driver = { + .name = "hydra", + .id_table = hydra_zorro_tbl, + .probe = hydra_init_one, + .remove = __devexit_p(hydra_remove_one), +}; + +static int __devinit hydra_init_one(struct zorro_dev *z, + const struct zorro_device_id *ent) +{ + int err; + + if (!request_mem_region(z->resource.start, 0x10000, "Hydra")) + return -EBUSY; + if ((err = hydra_init(z))) { + release_mem_region(z->resource.start, 0x10000); + return -EBUSY; + } + return 0; +} + +static const struct net_device_ops hydra_netdev_ops = { + .ndo_open = hydra_open, + .ndo_stop = hydra_close, + + .ndo_start_xmit = __ei_start_xmit, + .ndo_tx_timeout = __ei_tx_timeout, + .ndo_get_stats = __ei_get_stats, + .ndo_set_multicast_list = __ei_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = __ei_poll, +#endif +}; + +static int __devinit hydra_init(struct zorro_dev *z) +{ + struct net_device *dev; + unsigned long board = ZTWO_VADDR(z->resource.start); + unsigned long ioaddr = board+HYDRA_NIC_BASE; + const char name[] = "NE2000"; + int start_page, stop_page; + int j; + int err; + + static u32 hydra_offsets[16] = { + 0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, + 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, + }; + + dev = ____alloc_ei_netdev(0); + if (!dev) + return -ENOMEM; + + for(j = 0; j < ETHER_ADDR_LEN; j++) + dev->dev_addr[j] = *((u8 *)(board + HYDRA_ADDRPROM + 2*j)); + + /* We must set the 8390 for word mode. */ + z_writeb(0x4b, ioaddr + NE_EN0_DCFG); + start_page = NESM_START_PG; + stop_page = NESM_STOP_PG; + + dev->base_addr = ioaddr; + dev->irq = IRQ_AMIGA_PORTS; + + /* Install the Interrupt handler */ + if (request_irq(IRQ_AMIGA_PORTS, __ei_interrupt, IRQF_SHARED, "Hydra Ethernet", + dev)) { + free_netdev(dev); + return -EAGAIN; + } + + ei_status.name = name; + ei_status.tx_start_page = start_page; + ei_status.stop_page = stop_page; + ei_status.word16 = 1; + ei_status.bigendian = 1; + + ei_status.rx_start_page = start_page + TX_PAGES; + + ei_status.reset_8390 = hydra_reset_8390; + ei_status.block_input = hydra_block_input; + ei_status.block_output = hydra_block_output; + ei_status.get_8390_hdr = hydra_get_8390_hdr; + ei_status.reg_offset = hydra_offsets; + + dev->netdev_ops = &hydra_netdev_ops; + __NS8390_init(dev, 0); + + err = register_netdev(dev); + if (err) { + free_irq(IRQ_AMIGA_PORTS, dev); + free_netdev(dev); + return err; + } + + zorro_set_drvdata(z, dev); + + pr_info("%s: Hydra at %pR, address %pM (hydra.c " HYDRA_VERSION ")\n", + dev->name, &z->resource, dev->dev_addr); + + return 0; +} + +static int hydra_open(struct net_device *dev) +{ + __ei_open(dev); + return 0; +} + +static int hydra_close(struct net_device *dev) +{ + if (ei_debug > 1) + printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name); + __ei_close(dev); + return 0; +} + +static void hydra_reset_8390(struct net_device *dev) +{ + printk(KERN_INFO "Hydra hw reset not there\n"); +} + +static void hydra_get_8390_hdr(struct net_device *dev, + struct e8390_pkt_hdr *hdr, int ring_page) +{ + int nic_base = dev->base_addr; + short *ptrs; + unsigned long hdr_start= (nic_base-HYDRA_NIC_BASE) + + ((ring_page - NESM_START_PG)<<8); + ptrs = (short *)hdr; + + *(ptrs++) = z_readw(hdr_start); + *((short *)hdr) = WORDSWAP(*((short *)hdr)); + hdr_start += 2; + *(ptrs++) = z_readw(hdr_start); + *((short *)hdr+1) = WORDSWAP(*((short *)hdr+1)); +} + +static void hydra_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset) +{ + unsigned long nic_base = dev->base_addr; + unsigned long mem_base = nic_base - HYDRA_NIC_BASE; + unsigned long xfer_start = mem_base + ring_offset - (NESM_START_PG<<8); + + if (count&1) + count++; + + if (xfer_start+count > mem_base + (NESM_STOP_PG<<8)) { + int semi_count = (mem_base + (NESM_STOP_PG<<8)) - xfer_start; + + z_memcpy_fromio(skb->data,xfer_start,semi_count); + count -= semi_count; + z_memcpy_fromio(skb->data+semi_count, mem_base, count); + } else + z_memcpy_fromio(skb->data, xfer_start,count); + +} + +static void hydra_block_output(struct net_device *dev, int count, + const unsigned char *buf, int start_page) +{ + unsigned long nic_base = dev->base_addr; + unsigned long mem_base = nic_base - HYDRA_NIC_BASE; + + if (count&1) + count++; + + z_memcpy_toio(mem_base+((start_page - NESM_START_PG)<<8), buf, count); +} + +static void __devexit hydra_remove_one(struct zorro_dev *z) +{ + struct net_device *dev = zorro_get_drvdata(z); + + unregister_netdev(dev); + free_irq(IRQ_AMIGA_PORTS, dev); + release_mem_region(ZTWO_PADDR(dev->base_addr)-HYDRA_NIC_BASE, 0x10000); + free_netdev(dev); +} + +static int __init hydra_init_module(void) +{ + return zorro_register_driver(&hydra_driver); +} + +static void __exit hydra_cleanup_module(void) +{ + zorro_unregister_driver(&hydra_driver); +} + +module_init(hydra_init_module); +module_exit(hydra_cleanup_module); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/8390/lib8390.c b/drivers/net/ethernet/8390/lib8390.c new file mode 100644 index 0000000..05ae214 --- /dev/null +++ b/drivers/net/ethernet/8390/lib8390.c @@ -0,0 +1,1080 @@ +/* 8390.c: A general NS8390 ethernet driver core for linux. */ +/* + Written 1992-94 by Donald Becker. + + Copyright 1993 United States Government as represented by the + Director, National Security Agency. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + + This is the chip-specific code for many 8390-based ethernet adaptors. + This is not a complete driver, it must be combined with board-specific + code such as ne.c, wd.c, 3c503.c, etc. + + Seeing how at least eight drivers use this code, (not counting the + PCMCIA ones either) it is easy to break some card by what seems like + a simple innocent change. Please contact me or Donald if you think + you have found something that needs changing. -- PG + + + Changelog: + + Paul Gortmaker : remove set_bit lock, other cleanups. + Paul Gortmaker : add ei_get_8390_hdr() so we can pass skb's to + ei_block_input() for eth_io_copy_and_sum(). + Paul Gortmaker : exchange static int ei_pingpong for a #define, + also add better Tx error handling. + Paul Gortmaker : rewrite Rx overrun handling as per NS specs. + Alexey Kuznetsov : use the 8390's six bit hash multicast filter. + Paul Gortmaker : tweak ANK's above multicast changes a bit. + Paul Gortmaker : update packet statistics for v2.1.x + Alan Cox : support arbitrary stupid port mappings on the + 68K Macintosh. Support >16bit I/O spaces + Paul Gortmaker : add kmod support for auto-loading of the 8390 + module by all drivers that require it. + Alan Cox : Spinlocking work, added 'BUG_83C690' + Paul Gortmaker : Separate out Tx timeout code from Tx path. + Paul Gortmaker : Remove old unused single Tx buffer code. + Hayato Fujiwara : Add m32r support. + Paul Gortmaker : use skb_padto() instead of stack scratch area + + Sources: + The National Semiconductor LAN Databook, and the 3Com 3c503 databook. + + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define NS8390_CORE +#include "8390.h" + +#define BUG_83C690 + +/* These are the operational function interfaces to board-specific + routines. + void reset_8390(struct net_device *dev) + Resets the board associated with DEV, including a hardware reset of + the 8390. This is only called when there is a transmit timeout, and + it is always followed by 8390_init(). + void block_output(struct net_device *dev, int count, const unsigned char *buf, + int start_page) + Write the COUNT bytes of BUF to the packet buffer at START_PAGE. The + "page" value uses the 8390's 256-byte pages. + void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page) + Read the 4 byte, page aligned 8390 header. *If* there is a + subsequent read, it will be of the rest of the packet. + void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) + Read COUNT bytes from the packet buffer into the skb data area. Start + reading from RING_OFFSET, the address as the 8390 sees it. This will always + follow the read of the 8390 header. +*/ +#define ei_reset_8390 (ei_local->reset_8390) +#define ei_block_output (ei_local->block_output) +#define ei_block_input (ei_local->block_input) +#define ei_get_8390_hdr (ei_local->get_8390_hdr) + +/* use 0 for production, 1 for verification, >2 for debug */ +#ifndef ei_debug +int ei_debug = 1; +#endif + +/* Index to functions. */ +static void ei_tx_intr(struct net_device *dev); +static void ei_tx_err(struct net_device *dev); +static void ei_receive(struct net_device *dev); +static void ei_rx_overrun(struct net_device *dev); + +/* Routines generic to NS8390-based boards. */ +static void NS8390_trigger_send(struct net_device *dev, unsigned int length, + int start_page); +static void do_set_multicast_list(struct net_device *dev); +static void __NS8390_init(struct net_device *dev, int startp); + +/* + * SMP and the 8390 setup. + * + * The 8390 isn't exactly designed to be multithreaded on RX/TX. There is + * a page register that controls bank and packet buffer access. We guard + * this with ei_local->page_lock. Nobody should assume or set the page other + * than zero when the lock is not held. Lock holders must restore page 0 + * before unlocking. Even pure readers must take the lock to protect in + * page 0. + * + * To make life difficult the chip can also be very slow. We therefore can't + * just use spinlocks. For the longer lockups we disable the irq the device + * sits on and hold the lock. We must hold the lock because there is a dual + * processor case other than interrupts (get stats/set multicast list in + * parallel with each other and transmit). + * + * Note: in theory we can just disable the irq on the card _but_ there is + * a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs" + * enter lock, take the queued irq. So we waddle instead of flying. + * + * Finally by special arrangement for the purpose of being generally + * annoying the transmit function is called bh atomic. That places + * restrictions on the user context callers as disable_irq won't save + * them. + * + * Additional explanation of problems with locking by Alan Cox: + * + * "The author (me) didn't use spin_lock_irqsave because the slowness of the + * card means that approach caused horrible problems like losing serial data + * at 38400 baud on some chips. Remember many 8390 nics on PCI were ISA + * chips with FPGA front ends. + * + * Ok the logic behind the 8390 is very simple: + * + * Things to know + * - IRQ delivery is asynchronous to the PCI bus + * - Blocking the local CPU IRQ via spin locks was too slow + * - The chip has register windows needing locking work + * + * So the path was once (I say once as people appear to have changed it + * in the mean time and it now looks rather bogus if the changes to use + * disable_irq_nosync_irqsave are disabling the local IRQ) + * + * + * Take the page lock + * Mask the IRQ on chip + * Disable the IRQ (but not mask locally- someone seems to have + * broken this with the lock validator stuff) + * [This must be _nosync as the page lock may otherwise + * deadlock us] + * Drop the page lock and turn IRQs back on + * + * At this point an existing IRQ may still be running but we can't + * get a new one + * + * Take the lock (so we know the IRQ has terminated) but don't mask + * the IRQs on the processor + * Set irqlock [for debug] + * + * Transmit (slow as ****) + * + * re-enable the IRQ + * + * + * We have to use disable_irq because otherwise you will get delayed + * interrupts on the APIC bus deadlocking the transmit path. + * + * Quite hairy but the chip simply wasn't designed for SMP and you can't + * even ACK an interrupt without risking corrupting other parallel + * activities on the chip." [lkml, 25 Jul 2007] + */ + + + +/** + * ei_open - Open/initialize the board. + * @dev: network device to initialize + * + * This routine goes all-out, setting everything + * up anew at each open, even though many of these registers should only + * need to be set once at boot. + */ +static int __ei_open(struct net_device *dev) +{ + unsigned long flags; + struct ei_device *ei_local = netdev_priv(dev); + + if (dev->watchdog_timeo <= 0) + dev->watchdog_timeo = TX_TIMEOUT; + + /* + * Grab the page lock so we own the register set, then call + * the init function. + */ + + spin_lock_irqsave(&ei_local->page_lock, flags); + __NS8390_init(dev, 1); + /* Set the flag before we drop the lock, That way the IRQ arrives + after its set and we get no silly warnings */ + netif_start_queue(dev); + spin_unlock_irqrestore(&ei_local->page_lock, flags); + ei_local->irqlock = 0; + return 0; +} + +/** + * ei_close - shut down network device + * @dev: network device to close + * + * Opposite of ei_open(). Only used when "ifconfig down" is done. + */ +static int __ei_close(struct net_device *dev) +{ + struct ei_device *ei_local = netdev_priv(dev); + unsigned long flags; + + /* + * Hold the page lock during close + */ + + spin_lock_irqsave(&ei_local->page_lock, flags); + __NS8390_init(dev, 0); + spin_unlock_irqrestore(&ei_local->page_lock, flags); + netif_stop_queue(dev); + return 0; +} + +/** + * ei_tx_timeout - handle transmit time out condition + * @dev: network device which has apparently fallen asleep + * + * Called by kernel when device never acknowledges a transmit has + * completed (or failed) - i.e. never posted a Tx related interrupt. + */ + +static void __ei_tx_timeout(struct net_device *dev) +{ + unsigned long e8390_base = dev->base_addr; + struct ei_device *ei_local = netdev_priv(dev); + int txsr, isr, tickssofar = jiffies - dev_trans_start(dev); + unsigned long flags; + + dev->stats.tx_errors++; + + spin_lock_irqsave(&ei_local->page_lock, flags); + txsr = ei_inb(e8390_base+EN0_TSR); + isr = ei_inb(e8390_base+EN0_ISR); + spin_unlock_irqrestore(&ei_local->page_lock, flags); + + netdev_dbg(dev, "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d\n", + (txsr & ENTSR_ABT) ? "excess collisions." : + (isr) ? "lost interrupt?" : "cable problem?", + txsr, isr, tickssofar); + + if (!isr && !dev->stats.tx_packets) { + /* The 8390 probably hasn't gotten on the cable yet. */ + ei_local->interface_num ^= 1; /* Try a different xcvr. */ + } + + /* Ugly but a reset can be slow, yet must be protected */ + + disable_irq_nosync_lockdep(dev->irq); + spin_lock(&ei_local->page_lock); + + /* Try to restart the card. Perhaps the user has fixed something. */ + ei_reset_8390(dev); + __NS8390_init(dev, 1); + + spin_unlock(&ei_local->page_lock); + enable_irq_lockdep(dev->irq); + netif_wake_queue(dev); +} + +/** + * ei_start_xmit - begin packet transmission + * @skb: packet to be sent + * @dev: network device to which packet is sent + * + * Sends a packet to an 8390 network device. + */ + +static netdev_tx_t __ei_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + unsigned long e8390_base = dev->base_addr; + struct ei_device *ei_local = netdev_priv(dev); + int send_length = skb->len, output_page; + unsigned long flags; + char buf[ETH_ZLEN]; + char *data = skb->data; + + if (skb->len < ETH_ZLEN) { + memset(buf, 0, ETH_ZLEN); /* more efficient than doing just the needed bits */ + memcpy(buf, data, skb->len); + send_length = ETH_ZLEN; + data = buf; + } + + /* Mask interrupts from the ethercard. + SMP: We have to grab the lock here otherwise the IRQ handler + on another CPU can flip window and race the IRQ mask set. We end + up trashing the mcast filter not disabling irqs if we don't lock */ + + spin_lock_irqsave(&ei_local->page_lock, flags); + ei_outb_p(0x00, e8390_base + EN0_IMR); + spin_unlock_irqrestore(&ei_local->page_lock, flags); + + + /* + * Slow phase with lock held. + */ + + disable_irq_nosync_lockdep_irqsave(dev->irq, &flags); + + spin_lock(&ei_local->page_lock); + + ei_local->irqlock = 1; + + /* + * We have two Tx slots available for use. Find the first free + * slot, and then perform some sanity checks. With two Tx bufs, + * you get very close to transmitting back-to-back packets. With + * only one Tx buf, the transmitter sits idle while you reload the + * card, leaving a substantial gap between each transmitted packet. + */ + + if (ei_local->tx1 == 0) { + output_page = ei_local->tx_start_page; + ei_local->tx1 = send_length; + if (ei_debug && ei_local->tx2 > 0) + netdev_dbg(dev, "idle transmitter tx2=%d, lasttx=%d, txing=%d\n", + ei_local->tx2, ei_local->lasttx, ei_local->txing); + } else if (ei_local->tx2 == 0) { + output_page = ei_local->tx_start_page + TX_PAGES/2; + ei_local->tx2 = send_length; + if (ei_debug && ei_local->tx1 > 0) + netdev_dbg(dev, "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n", + ei_local->tx1, ei_local->lasttx, ei_local->txing); + } else { /* We should never get here. */ + if (ei_debug) + netdev_dbg(dev, "No Tx buffers free! tx1=%d tx2=%d last=%d\n", + ei_local->tx1, ei_local->tx2, ei_local->lasttx); + ei_local->irqlock = 0; + netif_stop_queue(dev); + ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR); + spin_unlock(&ei_local->page_lock); + enable_irq_lockdep_irqrestore(dev->irq, &flags); + dev->stats.tx_errors++; + return NETDEV_TX_BUSY; + } + + /* + * Okay, now upload the packet and trigger a send if the transmitter + * isn't already sending. If it is busy, the interrupt handler will + * trigger the send later, upon receiving a Tx done interrupt. + */ + + ei_block_output(dev, send_length, data, output_page); + + if (!ei_local->txing) { + ei_local->txing = 1; + NS8390_trigger_send(dev, send_length, output_page); + if (output_page == ei_local->tx_start_page) { + ei_local->tx1 = -1; + ei_local->lasttx = -1; + } else { + ei_local->tx2 = -1; + ei_local->lasttx = -2; + } + } else + ei_local->txqueue++; + + if (ei_local->tx1 && ei_local->tx2) + netif_stop_queue(dev); + else + netif_start_queue(dev); + + /* Turn 8390 interrupts back on. */ + ei_local->irqlock = 0; + ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR); + + spin_unlock(&ei_local->page_lock); + enable_irq_lockdep_irqrestore(dev->irq, &flags); + skb_tx_timestamp(skb); + dev_kfree_skb(skb); + dev->stats.tx_bytes += send_length; + + return NETDEV_TX_OK; +} + +/** + * ei_interrupt - handle the interrupts from an 8390 + * @irq: interrupt number + * @dev_id: a pointer to the net_device + * + * Handle the ether interface interrupts. We pull packets from + * the 8390 via the card specific functions and fire them at the networking + * stack. We also handle transmit completions and wake the transmit path if + * necessary. We also update the counters and do other housekeeping as + * needed. + */ + +static irqreturn_t __ei_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + unsigned long e8390_base = dev->base_addr; + int interrupts, nr_serviced = 0; + struct ei_device *ei_local = netdev_priv(dev); + + /* + * Protect the irq test too. + */ + + spin_lock(&ei_local->page_lock); + + if (ei_local->irqlock) { + /* + * This might just be an interrupt for a PCI device sharing + * this line + */ + netdev_err(dev, "Interrupted while interrupts are masked! isr=%#2x imr=%#2x\n", + ei_inb_p(e8390_base + EN0_ISR), + ei_inb_p(e8390_base + EN0_IMR)); + spin_unlock(&ei_local->page_lock); + return IRQ_NONE; + } + + /* Change to page 0 and read the intr status reg. */ + ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD); + if (ei_debug > 3) + netdev_dbg(dev, "interrupt(isr=%#2.2x)\n", + ei_inb_p(e8390_base + EN0_ISR)); + + /* !!Assumption!! -- we stay in page 0. Don't break this. */ + while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0 && + ++nr_serviced < MAX_SERVICE) { + if (!netif_running(dev)) { + netdev_warn(dev, "interrupt from stopped card\n"); + /* rmk - acknowledge the interrupts */ + ei_outb_p(interrupts, e8390_base + EN0_ISR); + interrupts = 0; + break; + } + if (interrupts & ENISR_OVER) + ei_rx_overrun(dev); + else if (interrupts & (ENISR_RX+ENISR_RX_ERR)) { + /* Got a good (?) packet. */ + ei_receive(dev); + } + /* Push the next to-transmit packet through. */ + if (interrupts & ENISR_TX) + ei_tx_intr(dev); + else if (interrupts & ENISR_TX_ERR) + ei_tx_err(dev); + + if (interrupts & ENISR_COUNTERS) { + dev->stats.rx_frame_errors += ei_inb_p(e8390_base + EN0_COUNTER0); + dev->stats.rx_crc_errors += ei_inb_p(e8390_base + EN0_COUNTER1); + dev->stats.rx_missed_errors += ei_inb_p(e8390_base + EN0_COUNTER2); + ei_outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */ + } + + /* Ignore any RDC interrupts that make it back to here. */ + if (interrupts & ENISR_RDC) + ei_outb_p(ENISR_RDC, e8390_base + EN0_ISR); + + ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD); + } + + if (interrupts && ei_debug) { + ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD); + if (nr_serviced >= MAX_SERVICE) { + /* 0xFF is valid for a card removal */ + if (interrupts != 0xFF) + netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n", + interrupts); + ei_outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */ + } else { + netdev_warn(dev, "unknown interrupt %#2x\n", interrupts); + ei_outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */ + } + } + spin_unlock(&ei_local->page_lock); + return IRQ_RETVAL(nr_serviced > 0); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void __ei_poll(struct net_device *dev) +{ + disable_irq(dev->irq); + __ei_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +/** + * ei_tx_err - handle transmitter error + * @dev: network device which threw the exception + * + * A transmitter error has happened. Most likely excess collisions (which + * is a fairly normal condition). If the error is one where the Tx will + * have been aborted, we try and send another one right away, instead of + * letting the failed packet sit and collect dust in the Tx buffer. This + * is a much better solution as it avoids kernel based Tx timeouts, and + * an unnecessary card reset. + * + * Called with lock held. + */ + +static void ei_tx_err(struct net_device *dev) +{ + unsigned long e8390_base = dev->base_addr; + /* ei_local is used on some platforms via the EI_SHIFT macro */ + struct ei_device *ei_local __maybe_unused = netdev_priv(dev); + unsigned char txsr = ei_inb_p(e8390_base+EN0_TSR); + unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU); + +#ifdef VERBOSE_ERROR_DUMP + netdev_dbg(dev, "transmitter error (%#2x):", txsr); + if (txsr & ENTSR_ABT) + pr_cont(" excess-collisions "); + if (txsr & ENTSR_ND) + pr_cont(" non-deferral "); + if (txsr & ENTSR_CRS) + pr_cont(" lost-carrier "); + if (txsr & ENTSR_FU) + pr_cont(" FIFO-underrun "); + if (txsr & ENTSR_CDH) + pr_cont(" lost-heartbeat "); + pr_cont("\n"); +#endif + + ei_outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR); /* Ack intr. */ + + if (tx_was_aborted) + ei_tx_intr(dev); + else { + dev->stats.tx_errors++; + if (txsr & ENTSR_CRS) + dev->stats.tx_carrier_errors++; + if (txsr & ENTSR_CDH) + dev->stats.tx_heartbeat_errors++; + if (txsr & ENTSR_OWC) + dev->stats.tx_window_errors++; + } +} + +/** + * ei_tx_intr - transmit interrupt handler + * @dev: network device for which tx intr is handled + * + * We have finished a transmit: check for errors and then trigger the next + * packet to be sent. Called with lock held. + */ + +static void ei_tx_intr(struct net_device *dev) +{ + unsigned long e8390_base = dev->base_addr; + struct ei_device *ei_local = netdev_priv(dev); + int status = ei_inb(e8390_base + EN0_TSR); + + ei_outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */ + + /* + * There are two Tx buffers, see which one finished, and trigger + * the send of another one if it exists. + */ + ei_local->txqueue--; + + if (ei_local->tx1 < 0) { + if (ei_local->lasttx != 1 && ei_local->lasttx != -1) + pr_err("%s: bogus last_tx_buffer %d, tx1=%d\n", + ei_local->name, ei_local->lasttx, ei_local->tx1); + ei_local->tx1 = 0; + if (ei_local->tx2 > 0) { + ei_local->txing = 1; + NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6); + dev->trans_start = jiffies; + ei_local->tx2 = -1, + ei_local->lasttx = 2; + } else + ei_local->lasttx = 20, ei_local->txing = 0; + } else if (ei_local->tx2 < 0) { + if (ei_local->lasttx != 2 && ei_local->lasttx != -2) + pr_err("%s: bogus last_tx_buffer %d, tx2=%d\n", + ei_local->name, ei_local->lasttx, ei_local->tx2); + ei_local->tx2 = 0; + if (ei_local->tx1 > 0) { + ei_local->txing = 1; + NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page); + dev->trans_start = jiffies; + ei_local->tx1 = -1; + ei_local->lasttx = 1; + } else + ei_local->lasttx = 10, ei_local->txing = 0; + } /* else + netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n", + ei_local->lasttx); +*/ + + /* Minimize Tx latency: update the statistics after we restart TXing. */ + if (status & ENTSR_COL) + dev->stats.collisions++; + if (status & ENTSR_PTX) + dev->stats.tx_packets++; + else { + dev->stats.tx_errors++; + if (status & ENTSR_ABT) { + dev->stats.tx_aborted_errors++; + dev->stats.collisions += 16; + } + if (status & ENTSR_CRS) + dev->stats.tx_carrier_errors++; + if (status & ENTSR_FU) + dev->stats.tx_fifo_errors++; + if (status & ENTSR_CDH) + dev->stats.tx_heartbeat_errors++; + if (status & ENTSR_OWC) + dev->stats.tx_window_errors++; + } + netif_wake_queue(dev); +} + +/** + * ei_receive - receive some packets + * @dev: network device with which receive will be run + * + * We have a good packet(s), get it/them out of the buffers. + * Called with lock held. + */ + +static void ei_receive(struct net_device *dev) +{ + unsigned long e8390_base = dev->base_addr; + struct ei_device *ei_local = netdev_priv(dev); + unsigned char rxing_page, this_frame, next_frame; + unsigned short current_offset; + int rx_pkt_count = 0; + struct e8390_pkt_hdr rx_frame; + int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page; + + while (++rx_pkt_count < 10) { + int pkt_len, pkt_stat; + + /* Get the rx page (incoming packet pointer). */ + ei_outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD); + rxing_page = ei_inb_p(e8390_base + EN1_CURPAG); + ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD); + + /* Remove one frame from the ring. Boundary is always a page behind. */ + this_frame = ei_inb_p(e8390_base + EN0_BOUNDARY) + 1; + if (this_frame >= ei_local->stop_page) + this_frame = ei_local->rx_start_page; + + /* Someday we'll omit the previous, iff we never get this message. + (There is at least one clone claimed to have a problem.) + + Keep quiet if it looks like a card removal. One problem here + is that some clones crash in roughly the same way. + */ + if (ei_debug > 0 && + this_frame != ei_local->current_page && + (this_frame != 0x0 || rxing_page != 0xFF)) + netdev_err(dev, "mismatched read page pointers %2x vs %2x\n", + this_frame, ei_local->current_page); + + if (this_frame == rxing_page) /* Read all the frames? */ + break; /* Done for now */ + + current_offset = this_frame << 8; + ei_get_8390_hdr(dev, &rx_frame, this_frame); + + pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr); + pkt_stat = rx_frame.status; + + next_frame = this_frame + 1 + ((pkt_len+4)>>8); + + /* Check for bogosity warned by 3c503 book: the status byte is never + written. This happened a lot during testing! This code should be + cleaned up someday. */ + if (rx_frame.next != next_frame && + rx_frame.next != next_frame + 1 && + rx_frame.next != next_frame - num_rx_pages && + rx_frame.next != next_frame + 1 - num_rx_pages) { + ei_local->current_page = rxing_page; + ei_outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY); + dev->stats.rx_errors++; + continue; + } + + if (pkt_len < 60 || pkt_len > 1518) { + if (ei_debug) + netdev_dbg(dev, "bogus packet size: %d, status=%#2x nxpg=%#2x\n", + rx_frame.count, rx_frame.status, + rx_frame.next); + dev->stats.rx_errors++; + dev->stats.rx_length_errors++; + } else if ((pkt_stat & 0x0F) == ENRSR_RXOK) { + struct sk_buff *skb; + + skb = dev_alloc_skb(pkt_len+2); + if (skb == NULL) { + if (ei_debug > 1) + netdev_dbg(dev, "Couldn't allocate a sk_buff of size %d\n", + pkt_len); + dev->stats.rx_dropped++; + break; + } else { + skb_reserve(skb, 2); /* IP headers on 16 byte boundaries */ + skb_put(skb, pkt_len); /* Make room */ + ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame)); + skb->protocol = eth_type_trans(skb, dev); + if (!skb_defer_rx_timestamp(skb)) + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + if (pkt_stat & ENRSR_PHY) + dev->stats.multicast++; + } + } else { + if (ei_debug) + netdev_dbg(dev, "bogus packet: status=%#2x nxpg=%#2x size=%d\n", + rx_frame.status, rx_frame.next, + rx_frame.count); + dev->stats.rx_errors++; + /* NB: The NIC counts CRC, frame and missed errors. */ + if (pkt_stat & ENRSR_FO) + dev->stats.rx_fifo_errors++; + } + next_frame = rx_frame.next; + + /* This _should_ never happen: it's here for avoiding bad clones. */ + if (next_frame >= ei_local->stop_page) { + netdev_notice(dev, "next frame inconsistency, %#2x\n", + next_frame); + next_frame = ei_local->rx_start_page; + } + ei_local->current_page = next_frame; + ei_outb_p(next_frame-1, e8390_base+EN0_BOUNDARY); + } + + /* We used to also ack ENISR_OVER here, but that would sometimes mask + a real overrun, leaving the 8390 in a stopped state with rec'vr off. */ + ei_outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR); +} + +/** + * ei_rx_overrun - handle receiver overrun + * @dev: network device which threw exception + * + * We have a receiver overrun: we have to kick the 8390 to get it started + * again. Problem is that you have to kick it exactly as NS prescribes in + * the updated datasheets, or "the NIC may act in an unpredictable manner." + * This includes causing "the NIC to defer indefinitely when it is stopped + * on a busy network." Ugh. + * Called with lock held. Don't call this with the interrupts off or your + * computer will hate you - it takes 10ms or so. + */ + +static void ei_rx_overrun(struct net_device *dev) +{ + unsigned long e8390_base = dev->base_addr; + unsigned char was_txing, must_resend = 0; + /* ei_local is used on some platforms via the EI_SHIFT macro */ + struct ei_device *ei_local __maybe_unused = netdev_priv(dev); + + /* + * Record whether a Tx was in progress and then issue the + * stop command. + */ + was_txing = ei_inb_p(e8390_base+E8390_CMD) & E8390_TRANS; + ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); + + if (ei_debug > 1) + netdev_dbg(dev, "Receiver overrun\n"); + dev->stats.rx_over_errors++; + + /* + * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total. + * Early datasheets said to poll the reset bit, but now they say that + * it "is not a reliable indicator and subsequently should be ignored." + * We wait at least 10ms. + */ + + mdelay(10); + + /* + * Reset RBCR[01] back to zero as per magic incantation. + */ + ei_outb_p(0x00, e8390_base+EN0_RCNTLO); + ei_outb_p(0x00, e8390_base+EN0_RCNTHI); + + /* + * See if any Tx was interrupted or not. According to NS, this + * step is vital, and skipping it will cause no end of havoc. + */ + + if (was_txing) { + unsigned char tx_completed = ei_inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR); + if (!tx_completed) + must_resend = 1; + } + + /* + * Have to enter loopback mode and then restart the NIC before + * you are allowed to slurp packets up off the ring. + */ + ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); + ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD); + + /* + * Clear the Rx ring of all the debris, and ack the interrupt. + */ + ei_receive(dev); + ei_outb_p(ENISR_OVER, e8390_base+EN0_ISR); + + /* + * Leave loopback mode, and resend any packet that got stopped. + */ + ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); + if (must_resend) + ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD); +} + +/* + * Collect the stats. This is called unlocked and from several contexts. + */ + +static struct net_device_stats *__ei_get_stats(struct net_device *dev) +{ + unsigned long ioaddr = dev->base_addr; + struct ei_device *ei_local = netdev_priv(dev); + unsigned long flags; + + /* If the card is stopped, just return the present stats. */ + if (!netif_running(dev)) + return &dev->stats; + + spin_lock_irqsave(&ei_local->page_lock, flags); + /* Read the counter registers, assuming we are in page 0. */ + dev->stats.rx_frame_errors += ei_inb_p(ioaddr + EN0_COUNTER0); + dev->stats.rx_crc_errors += ei_inb_p(ioaddr + EN0_COUNTER1); + dev->stats.rx_missed_errors += ei_inb_p(ioaddr + EN0_COUNTER2); + spin_unlock_irqrestore(&ei_local->page_lock, flags); + + return &dev->stats; +} + +/* + * Form the 64 bit 8390 multicast table from the linked list of addresses + * associated with this dev structure. + */ + +static inline void make_mc_bits(u8 *bits, struct net_device *dev) +{ + struct netdev_hw_addr *ha; + + netdev_for_each_mc_addr(ha, dev) { + u32 crc = ether_crc(ETH_ALEN, ha->addr); + /* + * The 8390 uses the 6 most significant bits of the + * CRC to index the multicast table. + */ + bits[crc>>29] |= (1<<((crc>>26)&7)); + } +} + +/** + * do_set_multicast_list - set/clear multicast filter + * @dev: net device for which multicast filter is adjusted + * + * Set or clear the multicast filter for this adaptor. May be called + * from a BH in 2.1.x. Must be called with lock held. + */ + +static void do_set_multicast_list(struct net_device *dev) +{ + unsigned long e8390_base = dev->base_addr; + int i; + struct ei_device *ei_local = netdev_priv(dev); + + if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) { + memset(ei_local->mcfilter, 0, 8); + if (!netdev_mc_empty(dev)) + make_mc_bits(ei_local->mcfilter, dev); + } else + memset(ei_local->mcfilter, 0xFF, 8); /* mcast set to accept-all */ + + /* + * DP8390 manuals don't specify any magic sequence for altering + * the multicast regs on an already running card. To be safe, we + * ensure multicast mode is off prior to loading up the new hash + * table. If this proves to be not enough, we can always resort + * to stopping the NIC, loading the table and then restarting. + * + * Bug Alert! The MC regs on the SMC 83C690 (SMC Elite and SMC + * Elite16) appear to be write-only. The NS 8390 data sheet lists + * them as r/w so this is a bug. The SMC 83C790 (SMC Ultra and + * Ultra32 EISA) appears to have this bug fixed. + */ + + if (netif_running(dev)) + ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); + ei_outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD); + for (i = 0; i < 8; i++) { + ei_outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i)); +#ifndef BUG_83C690 + if (ei_inb_p(e8390_base + EN1_MULT_SHIFT(i)) != ei_local->mcfilter[i]) + netdev_err(dev, "Multicast filter read/write mismap %d\n", + i); +#endif + } + ei_outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD); + + if (dev->flags&IFF_PROMISC) + ei_outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR); + else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) + ei_outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR); + else + ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); +} + +/* + * Called without lock held. This is invoked from user context and may + * be parallel to just about everything else. Its also fairly quick and + * not called too often. Must protect against both bh and irq users + */ + +static void __ei_set_multicast_list(struct net_device *dev) +{ + unsigned long flags; + struct ei_device *ei_local = netdev_priv(dev); + + spin_lock_irqsave(&ei_local->page_lock, flags); + do_set_multicast_list(dev); + spin_unlock_irqrestore(&ei_local->page_lock, flags); +} + +/** + * ethdev_setup - init rest of 8390 device struct + * @dev: network device structure to init + * + * Initialize the rest of the 8390 device structure. Do NOT __init + * this, as it is used by 8390 based modular drivers too. + */ + +static void ethdev_setup(struct net_device *dev) +{ + struct ei_device *ei_local = netdev_priv(dev); + if (ei_debug > 1) + printk(version); + + ether_setup(dev); + + spin_lock_init(&ei_local->page_lock); +} + +/** + * alloc_ei_netdev - alloc_etherdev counterpart for 8390 + * @size: extra bytes to allocate + * + * Allocate 8390-specific net_device. + */ +static struct net_device *____alloc_ei_netdev(int size) +{ + return alloc_netdev(sizeof(struct ei_device) + size, "eth%d", + ethdev_setup); +} + + + + +/* This page of functions should be 8390 generic */ +/* Follow National Semi's recommendations for initializing the "NIC". */ + +/** + * NS8390_init - initialize 8390 hardware + * @dev: network device to initialize + * @startp: boolean. non-zero value to initiate chip processing + * + * Must be called with lock held. + */ + +static void __NS8390_init(struct net_device *dev, int startp) +{ + unsigned long e8390_base = dev->base_addr; + struct ei_device *ei_local = netdev_priv(dev); + int i; + int endcfg = ei_local->word16 + ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0)) + : 0x48; + + if (sizeof(struct e8390_pkt_hdr) != 4) + panic("8390.c: header struct mispacked\n"); + /* Follow National Semi's recommendations for initing the DP83902. */ + ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */ + ei_outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */ + /* Clear the remote byte count registers. */ + ei_outb_p(0x00, e8390_base + EN0_RCNTLO); + ei_outb_p(0x00, e8390_base + EN0_RCNTHI); + /* Set to monitor and loopback mode -- this is vital!. */ + ei_outb_p(E8390_RXOFF, e8390_base + EN0_RXCR); /* 0x20 */ + ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */ + /* Set the transmit page and receive ring. */ + ei_outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR); + ei_local->tx1 = ei_local->tx2 = 0; + ei_outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG); + ei_outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY); /* 3c503 says 0x3f,NS0x26*/ + ei_local->current_page = ei_local->rx_start_page; /* assert boundary+1 */ + ei_outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG); + /* Clear the pending interrupts and mask. */ + ei_outb_p(0xFF, e8390_base + EN0_ISR); + ei_outb_p(0x00, e8390_base + EN0_IMR); + + /* Copy the station address into the DS8390 registers. */ + + ei_outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */ + for (i = 0; i < 6; i++) { + ei_outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i)); + if (ei_debug > 1 && + ei_inb_p(e8390_base + EN1_PHYS_SHIFT(i)) != dev->dev_addr[i]) + netdev_err(dev, "Hw. address read/write mismap %d\n", i); + } + + ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG); + ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); + + ei_local->tx1 = ei_local->tx2 = 0; + ei_local->txing = 0; + + if (startp) { + ei_outb_p(0xff, e8390_base + EN0_ISR); + ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR); + ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD); + ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); /* xmit on. */ + /* 3c503 TechMan says rxconfig only after the NIC is started. */ + ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); /* rx on, */ + do_set_multicast_list(dev); /* (re)load the mcast table */ + } +} + +/* Trigger a transmit start, assuming the length is valid. + Always called with the page lock held */ + +static void NS8390_trigger_send(struct net_device *dev, unsigned int length, + int start_page) +{ + unsigned long e8390_base = dev->base_addr; + struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev); + + ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD); + + if (ei_inb_p(e8390_base + E8390_CMD) & E8390_TRANS) { + netdev_warn(dev, "trigger_send() called with the transmitter busy\n"); + return; + } + ei_outb_p(length & 0xff, e8390_base + EN0_TCNTLO); + ei_outb_p(length >> 8, e8390_base + EN0_TCNTHI); + ei_outb_p(start_page, e8390_base + EN0_TPSR); + ei_outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD); +} diff --git a/drivers/net/ethernet/8390/lne390.c b/drivers/net/ethernet/8390/lne390.c new file mode 100644 index 0000000..f9888d2 --- /dev/null +++ b/drivers/net/ethernet/8390/lne390.c @@ -0,0 +1,434 @@ +/* + lne390.c + + Linux driver for Mylex LNE390 EISA Network Adapter + + Copyright (C) 1996-1998, Paul Gortmaker. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + Information and Code Sources: + + 1) Based upon framework of es3210 driver. + 2) The existing myriad of other Linux 8390 drivers by Donald Becker. + 3) Russ Nelson's asm packet driver provided additional info. + 4) Info for getting IRQ and sh-mem gleaned from the EISA cfg files. + + The LNE390 is an EISA shared memory NS8390 implementation. Note + that all memory copies to/from the board must be 32bit transfers. + There are two versions of the card: the lne390a and the lne390b. + Going by the EISA cfg files, the "a" has jumpers to select between + BNC/AUI, but the "b" also has RJ-45 and selection is via the SCU. + The shared memory address selection is also slightly different. + Note that shared memory address > 1MB are supported with this driver. + + You can try if you want more info, as I've + never even seen one of these cards. :) + + Arnaldo Carvalho de Melo - 2000/09/01 + - get rid of check_region + - no need to check if dev == NULL in lne390_probe1 +*/ + +static const char *version = + "lne390.c: Driver revision v0.99.1, 01/09/2000\n"; + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "8390.h" + +#define DRV_NAME "lne390" + +static int lne390_probe1(struct net_device *dev, int ioaddr); + +static void lne390_reset_8390(struct net_device *dev); + +static void lne390_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page); +static void lne390_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset); +static void lne390_block_output(struct net_device *dev, int count, const unsigned char *buf, const int start_page); + +#define LNE390_START_PG 0x00 /* First page of TX buffer */ +#define LNE390_STOP_PG 0x80 /* Last page +1 of RX ring */ + +#define LNE390_ID_PORT 0xc80 /* Same for all EISA cards */ +#define LNE390_IO_EXTENT 0x20 +#define LNE390_SA_PROM 0x16 /* Start of e'net addr. */ +#define LNE390_RESET_PORT 0xc84 /* From the pkt driver source */ +#define LNE390_NIC_OFFSET 0x00 /* Hello, the 8390 is *here* */ + +#define LNE390_ADDR0 0x00 /* 3 byte vendor prefix */ +#define LNE390_ADDR1 0x80 +#define LNE390_ADDR2 0xe5 + +#define LNE390_ID0 0x10009835 /* 0x3598 = 01101 01100 11000 = mlx */ +#define LNE390_ID1 0x11009835 /* above is the 390A, this is 390B */ + +#define LNE390_CFG1 0xc84 /* NB: 0xc84 is also "reset" port. */ +#define LNE390_CFG2 0xc90 + +/* + * You can OR any of the following bits together and assign it + * to LNE390_DEBUG to get verbose driver info during operation. + * Currently only the probe one is implemented. + */ + +#define LNE390_D_PROBE 0x01 +#define LNE390_D_RX_PKT 0x02 +#define LNE390_D_TX_PKT 0x04 +#define LNE390_D_IRQ 0x08 + +#define LNE390_DEBUG 0 + +static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3}; +static unsigned int shmem_mapA[] __initdata = {0xff, 0xfe, 0xfd, 0xfff, 0xffe, 0xffc, 0x0d, 0x0}; +static unsigned int shmem_mapB[] __initdata = {0xff, 0xfe, 0x0e, 0xfff, 0xffe, 0xffc, 0x0d, 0x0}; + +/* + * Probe for the card. The best way is to read the EISA ID if it + * is known. Then we can check the prefix of the station address + * PROM for a match against the value assigned to Mylex. + */ + +static int __init do_lne390_probe(struct net_device *dev) +{ + unsigned short ioaddr = dev->base_addr; + int irq = dev->irq; + int mem_start = dev->mem_start; + int ret; + + if (ioaddr > 0x1ff) { /* Check a single specified location. */ + if (!request_region(ioaddr, LNE390_IO_EXTENT, DRV_NAME)) + return -EBUSY; + ret = lne390_probe1(dev, ioaddr); + if (ret) + release_region(ioaddr, LNE390_IO_EXTENT); + return ret; + } + else if (ioaddr > 0) /* Don't probe at all. */ + return -ENXIO; + + if (!EISA_bus) { +#if LNE390_DEBUG & LNE390_D_PROBE + printk("lne390-debug: Not an EISA bus. Not probing high ports.\n"); +#endif + return -ENXIO; + } + + /* EISA spec allows for up to 16 slots, but 8 is typical. */ + for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) { + if (!request_region(ioaddr, LNE390_IO_EXTENT, DRV_NAME)) + continue; + if (lne390_probe1(dev, ioaddr) == 0) + return 0; + release_region(ioaddr, LNE390_IO_EXTENT); + dev->irq = irq; + dev->mem_start = mem_start; + } + + return -ENODEV; +} + +#ifndef MODULE +struct net_device * __init lne390_probe(int unit) +{ + struct net_device *dev = alloc_ei_netdev(); + int err; + + if (!dev) + return ERR_PTR(-ENOMEM); + + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + + err = do_lne390_probe(dev); + if (err) + goto out; + return dev; +out: + free_netdev(dev); + return ERR_PTR(err); +} +#endif + +static int __init lne390_probe1(struct net_device *dev, int ioaddr) +{ + int i, revision, ret; + unsigned long eisa_id; + + if (inb_p(ioaddr + LNE390_ID_PORT) == 0xff) return -ENODEV; + +#if LNE390_DEBUG & LNE390_D_PROBE + printk("lne390-debug: probe at %#x, ID %#8x\n", ioaddr, inl(ioaddr + LNE390_ID_PORT)); + printk("lne390-debug: config regs: %#x %#x\n", + inb(ioaddr + LNE390_CFG1), inb(ioaddr + LNE390_CFG2)); +#endif + + +/* Check the EISA ID of the card. */ + eisa_id = inl(ioaddr + LNE390_ID_PORT); + if ((eisa_id != LNE390_ID0) && (eisa_id != LNE390_ID1)) { + return -ENODEV; + } + + revision = (eisa_id >> 24) & 0x01; /* 0 = rev A, 1 rev B */ + +#if 0 +/* Check the Mylex vendor ID as well. Not really required. */ + if (inb(ioaddr + LNE390_SA_PROM + 0) != LNE390_ADDR0 + || inb(ioaddr + LNE390_SA_PROM + 1) != LNE390_ADDR1 + || inb(ioaddr + LNE390_SA_PROM + 2) != LNE390_ADDR2 ) { + printk("lne390.c: card not found"); + for(i = 0; i < ETHER_ADDR_LEN; i++) + printk(" %02x", inb(ioaddr + LNE390_SA_PROM + i)); + printk(" (invalid prefix).\n"); + return -ENODEV; + } +#endif + + for(i = 0; i < ETHER_ADDR_LEN; i++) + dev->dev_addr[i] = inb(ioaddr + LNE390_SA_PROM + i); + printk("lne390.c: LNE390%X in EISA slot %d, address %pM.\n", + 0xa+revision, ioaddr/0x1000, dev->dev_addr); + + printk("lne390.c: "); + + /* Snarf the interrupt now. CFG file has them all listed as `edge' with share=NO */ + if (dev->irq == 0) { + unsigned char irq_reg = inb(ioaddr + LNE390_CFG2) >> 3; + dev->irq = irq_map[irq_reg & 0x07]; + printk("using"); + } else { + /* This is useless unless we reprogram the card here too */ + if (dev->irq == 2) dev->irq = 9; /* Doh! */ + printk("assigning"); + } + printk(" IRQ %d,", dev->irq); + + if ((ret = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev))) { + printk (" unable to get IRQ %d.\n", dev->irq); + return ret; + } + + if (dev->mem_start == 0) { + unsigned char mem_reg = inb(ioaddr + LNE390_CFG2) & 0x07; + + if (revision) /* LNE390B */ + dev->mem_start = shmem_mapB[mem_reg] * 0x10000; + else /* LNE390A */ + dev->mem_start = shmem_mapA[mem_reg] * 0x10000; + printk(" using "); + } else { + /* Should check for value in shmem_map and reprogram the card to use it */ + dev->mem_start &= 0xfff0000; + printk(" assigning "); + } + + printk("%dkB memory at physical address %#lx\n", + LNE390_STOP_PG/4, dev->mem_start); + + /* + BEWARE!! Some dain-bramaged EISA SCUs will allow you to put + the card mem within the region covered by `normal' RAM !!! + + ioremap() will fail in that case. + */ + ei_status.mem = ioremap(dev->mem_start, LNE390_STOP_PG*0x100); + if (!ei_status.mem) { + printk(KERN_ERR "lne390.c: Unable to remap card memory above 1MB !!\n"); + printk(KERN_ERR "lne390.c: Try using EISA SCU to set memory below 1MB.\n"); + printk(KERN_ERR "lne390.c: Driver NOT installed.\n"); + ret = -EAGAIN; + goto cleanup; + } + printk("lne390.c: remapped %dkB card memory to virtual address %p\n", + LNE390_STOP_PG/4, ei_status.mem); + + dev->mem_start = (unsigned long)ei_status.mem; + dev->mem_end = dev->mem_start + (LNE390_STOP_PG - LNE390_START_PG)*256; + + /* The 8390 offset is zero for the LNE390 */ + dev->base_addr = ioaddr; + + ei_status.name = "LNE390"; + ei_status.tx_start_page = LNE390_START_PG; + ei_status.rx_start_page = LNE390_START_PG + TX_PAGES; + ei_status.stop_page = LNE390_STOP_PG; + ei_status.word16 = 1; + + if (ei_debug > 0) + printk(version); + + ei_status.reset_8390 = &lne390_reset_8390; + ei_status.block_input = &lne390_block_input; + ei_status.block_output = &lne390_block_output; + ei_status.get_8390_hdr = &lne390_get_8390_hdr; + + dev->netdev_ops = &ei_netdev_ops; + NS8390_init(dev, 0); + + ret = register_netdev(dev); + if (ret) + goto unmap; + return 0; +unmap: + if (ei_status.reg0) + iounmap(ei_status.mem); +cleanup: + free_irq(dev->irq, dev); + return ret; +} + +/* + * Reset as per the packet driver method. Judging by the EISA cfg + * file, this just toggles the "Board Enable" bits (bit 2 and 0). + */ + +static void lne390_reset_8390(struct net_device *dev) +{ + unsigned short ioaddr = dev->base_addr; + + outb(0x04, ioaddr + LNE390_RESET_PORT); + if (ei_debug > 1) printk("%s: resetting the LNE390...", dev->name); + + mdelay(2); + + ei_status.txing = 0; + outb(0x01, ioaddr + LNE390_RESET_PORT); + if (ei_debug > 1) printk("reset done\n"); +} + +/* + * Note: In the following three functions is the implicit assumption + * that the associated memcpy will only use "rep; movsl" as long as + * we keep the counts as some multiple of doublewords. This is a + * requirement of the hardware, and also prevents us from using + * eth_io_copy_and_sum() since we can't guarantee it will limit + * itself to doubleword access. + */ + +/* + * Grab the 8390 specific header. Similar to the block_input routine, but + * we don't need to be concerned with ring wrap as the header will be at + * the start of a page, so we optimize accordingly. (A single doubleword.) + */ + +static void +lne390_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) +{ + void __iomem *hdr_start = ei_status.mem + ((ring_page - LNE390_START_PG)<<8); + memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); + hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */ +} + +/* + * Block input and output are easy on shared memory ethercards, the only + * complication is when the ring buffer wraps. The count will already + * be rounded up to a doubleword value via lne390_get_8390_hdr() above. + */ + +static void lne390_block_input(struct net_device *dev, int count, struct sk_buff *skb, + int ring_offset) +{ + void __iomem *xfer_start = ei_status.mem + ring_offset - (LNE390_START_PG<<8); + + if (ring_offset + count > (LNE390_STOP_PG<<8)) { + /* Packet wraps over end of ring buffer. */ + int semi_count = (LNE390_STOP_PG<<8) - ring_offset; + memcpy_fromio(skb->data, xfer_start, semi_count); + count -= semi_count; + memcpy_fromio(skb->data + semi_count, + ei_status.mem + (TX_PAGES<<8), count); + } else { + /* Packet is in one chunk. */ + memcpy_fromio(skb->data, xfer_start, count); + } +} + +static void lne390_block_output(struct net_device *dev, int count, + const unsigned char *buf, int start_page) +{ + void __iomem *shmem = ei_status.mem + ((start_page - LNE390_START_PG)<<8); + + count = (count + 3) & ~3; /* Round up to doubleword */ + memcpy_toio(shmem, buf, count); +} + + +#ifdef MODULE +#define MAX_LNE_CARDS 4 /* Max number of LNE390 cards per module */ +static struct net_device *dev_lne[MAX_LNE_CARDS]; +static int io[MAX_LNE_CARDS]; +static int irq[MAX_LNE_CARDS]; +static int mem[MAX_LNE_CARDS]; + +module_param_array(io, int, NULL, 0); +module_param_array(irq, int, NULL, 0); +module_param_array(mem, int, NULL, 0); +MODULE_PARM_DESC(io, "I/O base address(es)"); +MODULE_PARM_DESC(irq, "IRQ number(s)"); +MODULE_PARM_DESC(mem, "memory base address(es)"); +MODULE_DESCRIPTION("Mylex LNE390A/B EISA Ethernet driver"); +MODULE_LICENSE("GPL"); + +int __init init_module(void) +{ + struct net_device *dev; + int this_dev, found = 0; + + for (this_dev = 0; this_dev < MAX_LNE_CARDS; this_dev++) { + if (io[this_dev] == 0 && this_dev != 0) + break; + dev = alloc_ei_netdev(); + if (!dev) + break; + dev->irq = irq[this_dev]; + dev->base_addr = io[this_dev]; + dev->mem_start = mem[this_dev]; + if (do_lne390_probe(dev) == 0) { + dev_lne[found++] = dev; + continue; + } + free_netdev(dev); + printk(KERN_WARNING "lne390.c: No LNE390 card found (i/o = 0x%x).\n", io[this_dev]); + break; + } + if (found) + return 0; + return -ENXIO; +} + +static void cleanup_card(struct net_device *dev) +{ + free_irq(dev->irq, dev); + release_region(dev->base_addr, LNE390_IO_EXTENT); + iounmap(ei_status.mem); +} + +void __exit cleanup_module(void) +{ + int this_dev; + + for (this_dev = 0; this_dev < MAX_LNE_CARDS; this_dev++) { + struct net_device *dev = dev_lne[this_dev]; + if (dev) { + unregister_netdev(dev); + cleanup_card(dev); + free_netdev(dev); + } + } +} +#endif /* MODULE */ + diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c new file mode 100644 index 0000000..f84f5e6 --- /dev/null +++ b/drivers/net/ethernet/8390/mac8390.c @@ -0,0 +1,874 @@ +/* mac8390.c: New driver for 8390-based Nubus (or Nubus-alike) + Ethernet cards on Linux */ +/* Based on the former daynaport.c driver, by Alan Cox. Some code + taken from or inspired by skeleton.c by Donald Becker, acenic.c by + Jes Sorensen, and ne2k-pci.c by Donald Becker and Paul Gortmaker. + + This software may be used and distributed according to the terms of + the GNU Public License, incorporated herein by reference. */ + +/* 2000-02-28: support added for Dayna and Kinetics cards by + A.G.deWijn@phys.uu.nl */ +/* 2000-04-04: support added for Dayna2 by bart@etpmod.phys.tue.nl */ +/* 2001-04-18: support for DaynaPort E/LC-M by rayk@knightsmanor.org */ +/* 2001-05-15: support for Cabletron ported from old daynaport driver + * and fixed access to Sonic Sys card which masquerades as a Farallon + * by rayk@knightsmanor.org */ +/* 2002-12-30: Try to support more cards, some clues from NetBSD driver */ +/* 2003-12-26: Make sure Asante cards always work. */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +static char version[] = + "v0.4 2001-05-15 David Huggins-Daines and others\n"; + +#define EI_SHIFT(x) (ei_local->reg_offset[x]) +#define ei_inb(port) in_8(port) +#define ei_outb(val, port) out_8(port, val) +#define ei_inb_p(port) in_8(port) +#define ei_outb_p(val, port) out_8(port, val) + +#include "lib8390.c" + +#define WD_START_PG 0x00 /* First page of TX buffer */ +#define CABLETRON_RX_START_PG 0x00 /* First page of RX buffer */ +#define CABLETRON_RX_STOP_PG 0x30 /* Last page +1 of RX ring */ +#define CABLETRON_TX_START_PG CABLETRON_RX_STOP_PG + /* First page of TX buffer */ + +/* + * Unfortunately it seems we have to hardcode these for the moment + * Shouldn't the card know about this? + * Does anyone know where to read it off the card? + * Do we trust the data provided by the card? + */ + +#define DAYNA_8390_BASE 0x80000 +#define DAYNA_8390_MEM 0x00000 + +#define CABLETRON_8390_BASE 0x90000 +#define CABLETRON_8390_MEM 0x00000 + +#define INTERLAN_8390_BASE 0xE0000 +#define INTERLAN_8390_MEM 0xD0000 + +enum mac8390_type { + MAC8390_NONE = -1, + MAC8390_APPLE, + MAC8390_ASANTE, + MAC8390_FARALLON, + MAC8390_CABLETRON, + MAC8390_DAYNA, + MAC8390_INTERLAN, + MAC8390_KINETICS, +}; + +static const char *cardname[] = { + "apple", + "asante", + "farallon", + "cabletron", + "dayna", + "interlan", + "kinetics", +}; + +static const int word16[] = { + 1, /* apple */ + 1, /* asante */ + 1, /* farallon */ + 1, /* cabletron */ + 0, /* dayna */ + 1, /* interlan */ + 0, /* kinetics */ +}; + +/* on which cards do we use NuBus resources? */ +static const int useresources[] = { + 1, /* apple */ + 1, /* asante */ + 1, /* farallon */ + 0, /* cabletron */ + 0, /* dayna */ + 0, /* interlan */ + 0, /* kinetics */ +}; + +enum mac8390_access { + ACCESS_UNKNOWN = 0, + ACCESS_32, + ACCESS_16, +}; + +extern int mac8390_memtest(struct net_device *dev); +static int mac8390_initdev(struct net_device *dev, struct nubus_dev *ndev, + enum mac8390_type type); + +static int mac8390_open(struct net_device *dev); +static int mac8390_close(struct net_device *dev); +static void mac8390_no_reset(struct net_device *dev); +static void interlan_reset(struct net_device *dev); + +/* Sane (32-bit chunk memory read/write) - Some Farallon and Apple do this*/ +static void sane_get_8390_hdr(struct net_device *dev, + struct e8390_pkt_hdr *hdr, int ring_page); +static void sane_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); +static void sane_block_output(struct net_device *dev, int count, + const unsigned char *buf, const int start_page); + +/* dayna_memcpy to and from card */ +static void dayna_memcpy_fromcard(struct net_device *dev, void *to, + int from, int count); +static void dayna_memcpy_tocard(struct net_device *dev, int to, + const void *from, int count); + +/* Dayna - Dayna/Kinetics use this */ +static void dayna_get_8390_hdr(struct net_device *dev, + struct e8390_pkt_hdr *hdr, int ring_page); +static void dayna_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); +static void dayna_block_output(struct net_device *dev, int count, + const unsigned char *buf, int start_page); + +#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c)) +#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c)) + +#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c)) + +/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */ +static void slow_sane_get_8390_hdr(struct net_device *dev, + struct e8390_pkt_hdr *hdr, int ring_page); +static void slow_sane_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); +static void slow_sane_block_output(struct net_device *dev, int count, + const unsigned char *buf, int start_page); +static void word_memcpy_tocard(unsigned long tp, const void *fp, int count); +static void word_memcpy_fromcard(void *tp, unsigned long fp, int count); + +static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev) +{ + switch (dev->dr_sw) { + case NUBUS_DRSW_3COM: + switch (dev->dr_hw) { + case NUBUS_DRHW_APPLE_SONIC_NB: + case NUBUS_DRHW_APPLE_SONIC_LC: + case NUBUS_DRHW_SONNET: + return MAC8390_NONE; + break; + default: + return MAC8390_APPLE; + break; + } + break; + + case NUBUS_DRSW_APPLE: + switch (dev->dr_hw) { + case NUBUS_DRHW_ASANTE_LC: + return MAC8390_NONE; + break; + case NUBUS_DRHW_CABLETRON: + return MAC8390_CABLETRON; + break; + default: + return MAC8390_APPLE; + break; + } + break; + + case NUBUS_DRSW_ASANTE: + return MAC8390_ASANTE; + break; + + case NUBUS_DRSW_TECHWORKS: + case NUBUS_DRSW_DAYNA2: + case NUBUS_DRSW_DAYNA_LC: + if (dev->dr_hw == NUBUS_DRHW_CABLETRON) + return MAC8390_CABLETRON; + else + return MAC8390_APPLE; + break; + + case NUBUS_DRSW_FARALLON: + return MAC8390_FARALLON; + break; + + case NUBUS_DRSW_KINETICS: + switch (dev->dr_hw) { + case NUBUS_DRHW_INTERLAN: + return MAC8390_INTERLAN; + break; + default: + return MAC8390_KINETICS; + break; + } + break; + + case NUBUS_DRSW_DAYNA: + /* + * These correspond to Dayna Sonic cards + * which use the macsonic driver + */ + if (dev->dr_hw == NUBUS_DRHW_SMC9194 || + dev->dr_hw == NUBUS_DRHW_INTERLAN) + return MAC8390_NONE; + else + return MAC8390_DAYNA; + break; + } + return MAC8390_NONE; +} + +static enum mac8390_access __init mac8390_testio(volatile unsigned long membase) +{ + unsigned long outdata = 0xA5A0B5B0; + unsigned long indata = 0x00000000; + /* Try writing 32 bits */ + memcpy_toio(membase, &outdata, 4); + /* Now compare them */ + if (memcmp_withio(&outdata, membase, 4) == 0) + return ACCESS_32; + /* Write 16 bit output */ + word_memcpy_tocard(membase, &outdata, 4); + /* Now read it back */ + word_memcpy_fromcard(&indata, membase, 4); + if (outdata == indata) + return ACCESS_16; + return ACCESS_UNKNOWN; +} + +static int __init mac8390_memsize(unsigned long membase) +{ + unsigned long flags; + int i, j; + + local_irq_save(flags); + /* Check up to 32K in 4K increments */ + for (i = 0; i < 8; i++) { + volatile unsigned short *m = (unsigned short *)(membase + (i * 0x1000)); + + /* Unwriteable - we have a fully decoded card and the + RAM end located */ + if (hwreg_present(m) == 0) + break; + + /* write a distinctive byte */ + *m = 0xA5A0 | i; + /* check that we read back what we wrote */ + if (*m != (0xA5A0 | i)) + break; + + /* check for partial decode and wrap */ + for (j = 0; j < i; j++) { + volatile unsigned short *p = (unsigned short *)(membase + (j * 0x1000)); + if (*p != (0xA5A0 | j)) + break; + } + } + local_irq_restore(flags); + /* + * in any case, we stopped once we tried one block too many, + * or once we reached 32K + */ + return i * 0x1000; +} + +static bool __init mac8390_init(struct net_device *dev, struct nubus_dev *ndev, + enum mac8390_type cardtype) +{ + struct nubus_dir dir; + struct nubus_dirent ent; + int offset; + volatile unsigned short *i; + + printk_once(KERN_INFO pr_fmt("%s"), version); + + dev->irq = SLOT2IRQ(ndev->board->slot); + /* This is getting to be a habit */ + dev->base_addr = (ndev->board->slot_addr | + ((ndev->board->slot & 0xf) << 20)); + + /* + * Get some Nubus info - we will trust the card's idea + * of where its memory and registers are. + */ + + if (nubus_get_func_dir(ndev, &dir) == -1) { + pr_err("%s: Unable to get Nubus functional directory for slot %X!\n", + dev->name, ndev->board->slot); + return false; + } + + /* Get the MAC address */ + if (nubus_find_rsrc(&dir, NUBUS_RESID_MAC_ADDRESS, &ent) == -1) { + pr_info("%s: Couldn't get MAC address!\n", dev->name); + return false; + } + + nubus_get_rsrc_mem(dev->dev_addr, &ent, 6); + + if (useresources[cardtype] == 1) { + nubus_rewinddir(&dir); + if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_BASEOS, + &ent) == -1) { + pr_err("%s: Memory offset resource for slot %X not found!\n", + dev->name, ndev->board->slot); + return false; + } + nubus_get_rsrc_mem(&offset, &ent, 4); + dev->mem_start = dev->base_addr + offset; + /* yes, this is how the Apple driver does it */ + dev->base_addr = dev->mem_start + 0x10000; + nubus_rewinddir(&dir); + if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_LENGTH, + &ent) == -1) { + pr_info("%s: Memory length resource for slot %X not found, probing\n", + dev->name, ndev->board->slot); + offset = mac8390_memsize(dev->mem_start); + } else { + nubus_get_rsrc_mem(&offset, &ent, 4); + } + dev->mem_end = dev->mem_start + offset; + } else { + switch (cardtype) { + case MAC8390_KINETICS: + case MAC8390_DAYNA: /* it's the same */ + dev->base_addr = (int)(ndev->board->slot_addr + + DAYNA_8390_BASE); + dev->mem_start = (int)(ndev->board->slot_addr + + DAYNA_8390_MEM); + dev->mem_end = dev->mem_start + + mac8390_memsize(dev->mem_start); + break; + case MAC8390_INTERLAN: + dev->base_addr = (int)(ndev->board->slot_addr + + INTERLAN_8390_BASE); + dev->mem_start = (int)(ndev->board->slot_addr + + INTERLAN_8390_MEM); + dev->mem_end = dev->mem_start + + mac8390_memsize(dev->mem_start); + break; + case MAC8390_CABLETRON: + dev->base_addr = (int)(ndev->board->slot_addr + + CABLETRON_8390_BASE); + dev->mem_start = (int)(ndev->board->slot_addr + + CABLETRON_8390_MEM); + /* The base address is unreadable if 0x00 + * has been written to the command register + * Reset the chip by writing E8390_NODMA + + * E8390_PAGE0 + E8390_STOP just to be + * sure + */ + i = (void *)dev->base_addr; + *i = 0x21; + dev->mem_end = dev->mem_start + + mac8390_memsize(dev->mem_start); + break; + + default: + pr_err("Card type %s is unsupported, sorry\n", + ndev->board->name); + return false; + } + } + + return true; +} + +struct net_device * __init mac8390_probe(int unit) +{ + struct net_device *dev; + struct nubus_dev *ndev = NULL; + int err = -ENODEV; + + static unsigned int slots; + + enum mac8390_type cardtype; + + /* probably should check for Nubus instead */ + + if (!MACH_IS_MAC) + return ERR_PTR(-ENODEV); + + dev = ____alloc_ei_netdev(0); + if (!dev) + return ERR_PTR(-ENOMEM); + + if (unit >= 0) + sprintf(dev->name, "eth%d", unit); + + while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, NUBUS_TYPE_ETHERNET, + ndev))) { + /* Have we seen it already? */ + if (slots & (1 << ndev->board->slot)) + continue; + slots |= 1 << ndev->board->slot; + + cardtype = mac8390_ident(ndev); + if (cardtype == MAC8390_NONE) + continue; + + if (!mac8390_init(dev, ndev, cardtype)) + continue; + + /* Do the nasty 8390 stuff */ + if (!mac8390_initdev(dev, ndev, cardtype)) + break; + } + + if (!ndev) + goto out; + err = register_netdev(dev); + if (err) + goto out; + return dev; + +out: + free_netdev(dev); + return ERR_PTR(err); +} + +#ifdef MODULE +MODULE_AUTHOR("David Huggins-Daines and others"); +MODULE_DESCRIPTION("Macintosh NS8390-based Nubus Ethernet driver"); +MODULE_LICENSE("GPL"); + +/* overkill, of course */ +static struct net_device *dev_mac8390[15]; +int init_module(void) +{ + int i; + for (i = 0; i < 15; i++) { + struct net_device *dev = mac8390_probe(-1); + if (IS_ERR(dev)) + break; + dev_mac890[i] = dev; + } + if (!i) { + pr_notice("No useable cards found, driver NOT installed.\n"); + return -ENODEV; + } + return 0; +} + +void cleanup_module(void) +{ + int i; + for (i = 0; i < 15; i++) { + struct net_device *dev = dev_mac890[i]; + if (dev) { + unregister_netdev(dev); + free_netdev(dev); + } + } +} + +#endif /* MODULE */ + +static const struct net_device_ops mac8390_netdev_ops = { + .ndo_open = mac8390_open, + .ndo_stop = mac8390_close, + .ndo_start_xmit = __ei_start_xmit, + .ndo_tx_timeout = __ei_tx_timeout, + .ndo_get_stats = __ei_get_stats, + .ndo_set_multicast_list = __ei_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = __ei_poll, +#endif +}; + +static int __init mac8390_initdev(struct net_device *dev, + struct nubus_dev *ndev, + enum mac8390_type type) +{ + static u32 fwrd4_offsets[16] = { + 0, 4, 8, 12, + 16, 20, 24, 28, + 32, 36, 40, 44, + 48, 52, 56, 60 + }; + static u32 back4_offsets[16] = { + 60, 56, 52, 48, + 44, 40, 36, 32, + 28, 24, 20, 16, + 12, 8, 4, 0 + }; + static u32 fwrd2_offsets[16] = { + 0, 2, 4, 6, + 8, 10, 12, 14, + 16, 18, 20, 22, + 24, 26, 28, 30 + }; + + int access_bitmode = 0; + + /* Now fill in our stuff */ + dev->netdev_ops = &mac8390_netdev_ops; + + /* GAR, ei_status is actually a macro even though it looks global */ + ei_status.name = cardname[type]; + ei_status.word16 = word16[type]; + + /* Cabletron's TX/RX buffers are backwards */ + if (type == MAC8390_CABLETRON) { + ei_status.tx_start_page = CABLETRON_TX_START_PG; + ei_status.rx_start_page = CABLETRON_RX_START_PG; + ei_status.stop_page = CABLETRON_RX_STOP_PG; + ei_status.rmem_start = dev->mem_start; + ei_status.rmem_end = dev->mem_start + CABLETRON_RX_STOP_PG*256; + } else { + ei_status.tx_start_page = WD_START_PG; + ei_status.rx_start_page = WD_START_PG + TX_PAGES; + ei_status.stop_page = (dev->mem_end - dev->mem_start)/256; + ei_status.rmem_start = dev->mem_start + TX_PAGES*256; + ei_status.rmem_end = dev->mem_end; + } + + /* Fill in model-specific information and functions */ + switch (type) { + case MAC8390_FARALLON: + case MAC8390_APPLE: + switch (mac8390_testio(dev->mem_start)) { + case ACCESS_UNKNOWN: + pr_err("Don't know how to access card memory!\n"); + return -ENODEV; + break; + + case ACCESS_16: + /* 16 bit card, register map is reversed */ + ei_status.reset_8390 = mac8390_no_reset; + ei_status.block_input = slow_sane_block_input; + ei_status.block_output = slow_sane_block_output; + ei_status.get_8390_hdr = slow_sane_get_8390_hdr; + ei_status.reg_offset = back4_offsets; + break; + + case ACCESS_32: + /* 32 bit card, register map is reversed */ + ei_status.reset_8390 = mac8390_no_reset; + ei_status.block_input = sane_block_input; + ei_status.block_output = sane_block_output; + ei_status.get_8390_hdr = sane_get_8390_hdr; + ei_status.reg_offset = back4_offsets; + access_bitmode = 1; + break; + } + break; + + case MAC8390_ASANTE: + /* Some Asante cards pass the 32 bit test + * but overwrite system memory when run at 32 bit. + * so we run them all at 16 bit. + */ + ei_status.reset_8390 = mac8390_no_reset; + ei_status.block_input = slow_sane_block_input; + ei_status.block_output = slow_sane_block_output; + ei_status.get_8390_hdr = slow_sane_get_8390_hdr; + ei_status.reg_offset = back4_offsets; + break; + + case MAC8390_CABLETRON: + /* 16 bit card, register map is short forward */ + ei_status.reset_8390 = mac8390_no_reset; + ei_status.block_input = slow_sane_block_input; + ei_status.block_output = slow_sane_block_output; + ei_status.get_8390_hdr = slow_sane_get_8390_hdr; + ei_status.reg_offset = fwrd2_offsets; + break; + + case MAC8390_DAYNA: + case MAC8390_KINETICS: + /* 16 bit memory, register map is forward */ + /* dayna and similar */ + ei_status.reset_8390 = mac8390_no_reset; + ei_status.block_input = dayna_block_input; + ei_status.block_output = dayna_block_output; + ei_status.get_8390_hdr = dayna_get_8390_hdr; + ei_status.reg_offset = fwrd4_offsets; + break; + + case MAC8390_INTERLAN: + /* 16 bit memory, register map is forward */ + ei_status.reset_8390 = interlan_reset; + ei_status.block_input = slow_sane_block_input; + ei_status.block_output = slow_sane_block_output; + ei_status.get_8390_hdr = slow_sane_get_8390_hdr; + ei_status.reg_offset = fwrd4_offsets; + break; + + default: + pr_err("Card type %s is unsupported, sorry\n", + ndev->board->name); + return -ENODEV; + } + + __NS8390_init(dev, 0); + + /* Good, done, now spit out some messages */ + pr_info("%s: %s in slot %X (type %s)\n", + dev->name, ndev->board->name, ndev->board->slot, + cardname[type]); + pr_info("MAC %pM IRQ %d, %d KB shared memory at %#lx, %d-bit access.\n", + dev->dev_addr, dev->irq, + (unsigned int)(dev->mem_end - dev->mem_start) >> 10, + dev->mem_start, access_bitmode ? 32 : 16); + return 0; +} + +static int mac8390_open(struct net_device *dev) +{ + int err; + + __ei_open(dev); + err = request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev); + if (err) + pr_err("%s: unable to get IRQ %d\n", dev->name, dev->irq); + return err; +} + +static int mac8390_close(struct net_device *dev) +{ + free_irq(dev->irq, dev); + __ei_close(dev); + return 0; +} + +static void mac8390_no_reset(struct net_device *dev) +{ + ei_status.txing = 0; + if (ei_debug > 1) + pr_info("reset not supported\n"); +} + +static void interlan_reset(struct net_device *dev) +{ + unsigned char *target = nubus_slot_addr(IRQ2SLOT(dev->irq)); + if (ei_debug > 1) + pr_info("Need to reset the NS8390 t=%lu...", jiffies); + ei_status.txing = 0; + target[0xC0000] = 0; + if (ei_debug > 1) + pr_cont("reset complete\n"); +} + +/* dayna_memcpy_fromio/dayna_memcpy_toio */ +/* directly from daynaport.c by Alan Cox */ +static void dayna_memcpy_fromcard(struct net_device *dev, void *to, int from, + int count) +{ + volatile unsigned char *ptr; + unsigned char *target = to; + from <<= 1; /* word, skip overhead */ + ptr = (unsigned char *)(dev->mem_start+from); + /* Leading byte? */ + if (from & 2) { + *target++ = ptr[-1]; + ptr += 2; + count--; + } + while (count >= 2) { + *(unsigned short *)target = *(unsigned short volatile *)ptr; + ptr += 4; /* skip cruft */ + target += 2; + count -= 2; + } + /* Trailing byte? */ + if (count) + *target = *ptr; +} + +static void dayna_memcpy_tocard(struct net_device *dev, int to, + const void *from, int count) +{ + volatile unsigned short *ptr; + const unsigned char *src = from; + to <<= 1; /* word, skip overhead */ + ptr = (unsigned short *)(dev->mem_start+to); + /* Leading byte? */ + if (to & 2) { /* avoid a byte write (stomps on other data) */ + ptr[-1] = (ptr[-1]&0xFF00)|*src++; + ptr++; + count--; + } + while (count >= 2) { + *ptr++ = *(unsigned short *)src; /* Copy and */ + ptr++; /* skip cruft */ + src += 2; + count -= 2; + } + /* Trailing byte? */ + if (count) { + /* card doesn't like byte writes */ + *ptr = (*ptr & 0x00FF) | (*src << 8); + } +} + +/* sane block input/output */ +static void sane_get_8390_hdr(struct net_device *dev, + struct e8390_pkt_hdr *hdr, int ring_page) +{ + unsigned long hdr_start = (ring_page - WD_START_PG)<<8; + memcpy_fromio(hdr, dev->mem_start + hdr_start, 4); + /* Fix endianness */ + hdr->count = swab16(hdr->count); +} + +static void sane_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset) +{ + unsigned long xfer_base = ring_offset - (WD_START_PG<<8); + unsigned long xfer_start = xfer_base + dev->mem_start; + + if (xfer_start + count > ei_status.rmem_end) { + /* We must wrap the input move. */ + int semi_count = ei_status.rmem_end - xfer_start; + memcpy_fromio(skb->data, dev->mem_start + xfer_base, + semi_count); + count -= semi_count; + memcpy_fromio(skb->data + semi_count, ei_status.rmem_start, + count); + } else { + memcpy_fromio(skb->data, dev->mem_start + xfer_base, count); + } +} + +static void sane_block_output(struct net_device *dev, int count, + const unsigned char *buf, int start_page) +{ + long shmem = (start_page - WD_START_PG)<<8; + + memcpy_toio(dev->mem_start + shmem, buf, count); +} + +/* dayna block input/output */ +static void dayna_get_8390_hdr(struct net_device *dev, + struct e8390_pkt_hdr *hdr, int ring_page) +{ + unsigned long hdr_start = (ring_page - WD_START_PG)<<8; + + dayna_memcpy_fromcard(dev, hdr, hdr_start, 4); + /* Fix endianness */ + hdr->count = (hdr->count & 0xFF) << 8 | (hdr->count >> 8); +} + +static void dayna_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset) +{ + unsigned long xfer_base = ring_offset - (WD_START_PG<<8); + unsigned long xfer_start = xfer_base+dev->mem_start; + + /* Note the offset math is done in card memory space which is word + per long onto our space. */ + + if (xfer_start + count > ei_status.rmem_end) { + /* We must wrap the input move. */ + int semi_count = ei_status.rmem_end - xfer_start; + dayna_memcpy_fromcard(dev, skb->data, xfer_base, semi_count); + count -= semi_count; + dayna_memcpy_fromcard(dev, skb->data + semi_count, + ei_status.rmem_start - dev->mem_start, + count); + } else { + dayna_memcpy_fromcard(dev, skb->data, xfer_base, count); + } +} + +static void dayna_block_output(struct net_device *dev, int count, + const unsigned char *buf, + int start_page) +{ + long shmem = (start_page - WD_START_PG)<<8; + + dayna_memcpy_tocard(dev, shmem, buf, count); +} + +/* Cabletron block I/O */ +static void slow_sane_get_8390_hdr(struct net_device *dev, + struct e8390_pkt_hdr *hdr, + int ring_page) +{ + unsigned long hdr_start = (ring_page - WD_START_PG)<<8; + word_memcpy_fromcard(hdr, dev->mem_start + hdr_start, 4); + /* Register endianism - fix here rather than 8390.c */ + hdr->count = (hdr->count&0xFF)<<8|(hdr->count>>8); +} + +static void slow_sane_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset) +{ + unsigned long xfer_base = ring_offset - (WD_START_PG<<8); + unsigned long xfer_start = xfer_base+dev->mem_start; + + if (xfer_start + count > ei_status.rmem_end) { + /* We must wrap the input move. */ + int semi_count = ei_status.rmem_end - xfer_start; + word_memcpy_fromcard(skb->data, dev->mem_start + xfer_base, + semi_count); + count -= semi_count; + word_memcpy_fromcard(skb->data + semi_count, + ei_status.rmem_start, count); + } else { + word_memcpy_fromcard(skb->data, dev->mem_start + xfer_base, + count); + } +} + +static void slow_sane_block_output(struct net_device *dev, int count, + const unsigned char *buf, int start_page) +{ + long shmem = (start_page - WD_START_PG)<<8; + + word_memcpy_tocard(dev->mem_start + shmem, buf, count); +} + +static void word_memcpy_tocard(unsigned long tp, const void *fp, int count) +{ + volatile unsigned short *to = (void *)tp; + const unsigned short *from = fp; + + count++; + count /= 2; + + while (count--) + *to++ = *from++; +} + +static void word_memcpy_fromcard(void *tp, unsigned long fp, int count) +{ + unsigned short *to = tp; + const volatile unsigned short *from = (const void *)fp; + + count++; + count /= 2; + + while (count--) + *to++ = *from++; +} + + diff --git a/drivers/net/ethernet/8390/ne-h8300.c b/drivers/net/ethernet/8390/ne-h8300.c new file mode 100644 index 0000000..7298a34 --- /dev/null +++ b/drivers/net/ethernet/8390/ne-h8300.c @@ -0,0 +1,685 @@ +/* ne-h8300.c: A NE2000 clone on H8/300 driver for linux. */ +/* + original ne.c + Written 1992-94 by Donald Becker. + + Copyright 1993 United States Government as represented by the + Director, National Security Agency. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403 + + H8/300 modified + Yoshinori Sato +*/ + +static const char version1[] = +"ne-h8300.c:v1.00 2004/04/11 ysato\n"; + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define EI_SHIFT(x) (ei_local->reg_offset[x]) + +#include "8390.h" + +#define DRV_NAME "ne-h8300" + +/* Some defines that people can play with if so inclined. */ + +/* Do we perform extra sanity checks on stuff ? */ +/* #define NE_SANITY_CHECK */ + +/* Do we implement the read before write bugfix ? */ +/* #define NE_RW_BUGFIX */ + +/* Do we have a non std. amount of memory? (in units of 256 byte pages) */ +/* #define PACKETBUF_MEMSIZE 0x40 */ + +/* A zero-terminated list of I/O addresses to be probed at boot. */ + +/* ---- No user-serviceable parts below ---- */ + +static const char version[] = + "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; + +#include "lib8390.c" + +#define NE_BASE (dev->base_addr) +#define NE_CMD 0x00 +#define NE_DATAPORT (ei_status.word16?0x20:0x10) /* NatSemi-defined port window offset. */ +#define NE_RESET (ei_status.word16?0x3f:0x1f) /* Issue a read to reset, a write to clear. */ +#define NE_IO_EXTENT (ei_status.word16?0x40:0x20) + +#define NESM_START_PG 0x40 /* First page of TX buffer */ +#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ + +static int ne_probe1(struct net_device *dev, int ioaddr); + +static int ne_open(struct net_device *dev); +static int ne_close(struct net_device *dev); + +static void ne_reset_8390(struct net_device *dev); +static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page); +static void ne_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); +static void ne_block_output(struct net_device *dev, const int count, + const unsigned char *buf, const int start_page); + + +static u32 reg_offset[16]; + +static int __init init_reg_offset(struct net_device *dev,unsigned long base_addr) +{ + struct ei_device *ei_local = netdev_priv(dev); + int i; + unsigned char bus_width; + + bus_width = *(volatile unsigned char *)ABWCR; + bus_width &= 1 << ((base_addr >> 21) & 7); + + for (i = 0; i < ARRAY_SIZE(reg_offset); i++) + if (bus_width == 0) + reg_offset[i] = i * 2 + 1; + else + reg_offset[i] = i; + + ei_local->reg_offset = reg_offset; + return 0; +} + +static int __initdata h8300_ne_count = 0; +#ifdef CONFIG_H8300H_H8MAX +static unsigned long __initdata h8300_ne_base[] = { 0x800600 }; +static int h8300_ne_irq[] = {EXT_IRQ4}; +#endif +#ifdef CONFIG_H8300H_AKI3068NET +static unsigned long __initdata h8300_ne_base[] = { 0x200000 }; +static int h8300_ne_irq[] = {EXT_IRQ5}; +#endif + +static inline int init_dev(struct net_device *dev) +{ + if (h8300_ne_count < ARRAY_SIZE(h8300_ne_base)) { + dev->base_addr = h8300_ne_base[h8300_ne_count]; + dev->irq = h8300_ne_irq[h8300_ne_count]; + h8300_ne_count++; + return 0; + } else + return -ENODEV; +} + +/* Probe for various non-shared-memory ethercards. + + NEx000-clone boards have a Station Address PROM (SAPROM) in the packet + buffer memory space. NE2000 clones have 0x57,0x57 in bytes 0x0e,0x0f of + the SAPROM, while other supposed NE2000 clones must be detected by their + SA prefix. + + Reading the SAPROM from a word-wide card with the 8390 set in byte-wide + mode results in doubled values, which can be detected and compensated for. + + The probe is also responsible for initializing the card and filling + in the 'dev' and 'ei_status' structures. + + We use the minimum memory size for some ethercard product lines, iff we can't + distinguish models. You can increase the packet buffer size by setting + PACKETBUF_MEMSIZE. Reported Cabletron packet buffer locations are: + E1010 starts at 0x100 and ends at 0x2000. + E1010-x starts at 0x100 and ends at 0x8000. ("-x" means "more memory") + E2010 starts at 0x100 and ends at 0x4000. + E2010-x starts at 0x100 and ends at 0xffff. */ + +static int __init do_ne_probe(struct net_device *dev) +{ + unsigned int base_addr = dev->base_addr; + + /* First check any supplied i/o locations. User knows best. */ + if (base_addr > 0x1ff) /* Check a single specified location. */ + return ne_probe1(dev, base_addr); + else if (base_addr != 0) /* Don't probe at all. */ + return -ENXIO; + + return -ENODEV; +} + +static void cleanup_card(struct net_device *dev) +{ + free_irq(dev->irq, dev); + release_region(dev->base_addr, NE_IO_EXTENT); +} + +#ifndef MODULE +struct net_device * __init ne_probe(int unit) +{ + struct net_device *dev = ____alloc_ei_netdev(0); + int err; + + if (!dev) + return ERR_PTR(-ENOMEM); + + if (init_dev(dev)) + return ERR_PTR(-ENODEV); + + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + + err = init_reg_offset(dev, dev->base_addr); + if (err) + goto out; + + err = do_ne_probe(dev); + if (err) + goto out; + return dev; +out: + free_netdev(dev); + return ERR_PTR(err); +} +#endif + +static const struct net_device_ops ne_netdev_ops = { + .ndo_open = ne_open, + .ndo_stop = ne_close, + + .ndo_start_xmit = __ei_start_xmit, + .ndo_tx_timeout = __ei_tx_timeout, + .ndo_get_stats = __ei_get_stats, + .ndo_set_multicast_list = __ei_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = __ei_poll, +#endif +}; + +static int __init ne_probe1(struct net_device *dev, int ioaddr) +{ + int i; + unsigned char SA_prom[16]; + int wordlength = 2; + const char *name = NULL; + int start_page, stop_page; + int reg0, ret; + static unsigned version_printed; + struct ei_device *ei_local = netdev_priv(dev); + unsigned char bus_width; + + if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME)) + return -EBUSY; + + reg0 = inb_p(ioaddr); + if (reg0 == 0xFF) { + ret = -ENODEV; + goto err_out; + } + + /* Do a preliminary verification that we have a 8390. */ + { + int regd; + outb_p(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD); + regd = inb_p(ioaddr + EI_SHIFT(0x0d)); + outb_p(0xff, ioaddr + EI_SHIFT(0x0d)); + outb_p(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD); + inb_p(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */ + if (inb_p(ioaddr + EN0_COUNTER0) != 0) { + outb_p(reg0, ioaddr + EI_SHIFT(0)); + outb_p(regd, ioaddr + EI_SHIFT(0x0d)); /* Restore the old values. */ + ret = -ENODEV; + goto err_out; + } + } + + if (ei_debug && version_printed++ == 0) + printk(KERN_INFO "%s", version1); + + printk(KERN_INFO "NE*000 ethercard probe at %08x:", ioaddr); + + /* Read the 16 bytes of station address PROM. + We must first initialize registers, similar to NS8390_init(eifdev, 0). + We can't reliably read the SAPROM address without this. + (I learned the hard way!). */ + { + struct {unsigned char value, offset; } program_seq[] = + { + {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/ + {0x48, EN0_DCFG}, /* Set byte-wide (0x48) access. */ + {0x00, EN0_RCNTLO}, /* Clear the count regs. */ + {0x00, EN0_RCNTHI}, + {0x00, EN0_IMR}, /* Mask completion irq. */ + {0xFF, EN0_ISR}, + {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */ + {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */ + {32, EN0_RCNTLO}, + {0x00, EN0_RCNTHI}, + {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */ + {0x00, EN0_RSARHI}, + {E8390_RREAD+E8390_START, E8390_CMD}, + }; + + for (i = 0; i < ARRAY_SIZE(program_seq); i++) + outb_p(program_seq[i].value, ioaddr + program_seq[i].offset); + + } + bus_width = *(volatile unsigned char *)ABWCR; + bus_width &= 1 << ((ioaddr >> 21) & 7); + ei_status.word16 = (bus_width == 0); /* temporary setting */ + for(i = 0; i < 16 /*sizeof(SA_prom)*/; i++) { + SA_prom[i] = inb_p(ioaddr + NE_DATAPORT); + inb_p(ioaddr + NE_DATAPORT); /* dummy read */ + } + + start_page = NESM_START_PG; + stop_page = NESM_STOP_PG; + + if (bus_width) + wordlength = 1; + else + outb_p(0x49, ioaddr + EN0_DCFG); + + /* Set up the rest of the parameters. */ + name = (wordlength == 2) ? "NE2000" : "NE1000"; + + if (! dev->irq) { + printk(" failed to detect IRQ line.\n"); + ret = -EAGAIN; + goto err_out; + } + + /* Snarf the interrupt now. There's no point in waiting since we cannot + share and the board will usually be enabled. */ + ret = request_irq(dev->irq, __ei_interrupt, 0, name, dev); + if (ret) { + printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret); + goto err_out; + } + + dev->base_addr = ioaddr; + + for(i = 0; i < ETHER_ADDR_LEN; i++) + dev->dev_addr[i] = SA_prom[i]; + printk(" %pM\n", dev->dev_addr); + + printk("%s: %s found at %#x, using IRQ %d.\n", + dev->name, name, ioaddr, dev->irq); + + ei_status.name = name; + ei_status.tx_start_page = start_page; + ei_status.stop_page = stop_page; + ei_status.word16 = (wordlength == 2); + + ei_status.rx_start_page = start_page + TX_PAGES; +#ifdef PACKETBUF_MEMSIZE + /* Allow the packet buffer size to be overridden by know-it-alls. */ + ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE; +#endif + + ei_status.reset_8390 = &ne_reset_8390; + ei_status.block_input = &ne_block_input; + ei_status.block_output = &ne_block_output; + ei_status.get_8390_hdr = &ne_get_8390_hdr; + ei_status.priv = 0; + + dev->netdev_ops = &ne_netdev_ops; + + __NS8390_init(dev, 0); + + ret = register_netdev(dev); + if (ret) + goto out_irq; + return 0; +out_irq: + free_irq(dev->irq, dev); +err_out: + release_region(ioaddr, NE_IO_EXTENT); + return ret; +} + +static int ne_open(struct net_device *dev) +{ + __ei_open(dev); + return 0; +} + +static int ne_close(struct net_device *dev) +{ + if (ei_debug > 1) + printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name); + __ei_close(dev); + return 0; +} + +/* Hard reset the card. This used to pause for the same period that a + 8390 reset command required, but that shouldn't be necessary. */ + +static void ne_reset_8390(struct net_device *dev) +{ + unsigned long reset_start_time = jiffies; + struct ei_device *ei_local = netdev_priv(dev); + + if (ei_debug > 1) + printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies); + + /* DON'T change these to inb_p/outb_p or reset will fail on clones. */ + outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET); + + ei_status.txing = 0; + ei_status.dmaing = 0; + + /* This check _should_not_ be necessary, omit eventually. */ + while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) + if (time_after(jiffies, reset_start_time + 2*HZ/100)) { + printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name); + break; + } + outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */ +} + +/* Grab the 8390 specific header. Similar to the block_input routine, but + we don't need to be concerned with ring wrap as the header will be at + the start of a page, so we optimize accordingly. */ + +static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) +{ + struct ei_device *ei_local = netdev_priv(dev); + /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + + if (ei_status.dmaing) + { + printk(KERN_EMERG "%s: DMAing conflict in ne_get_8390_hdr " + "[DMAstat:%d][irqlock:%d].\n", + dev->name, ei_status.dmaing, ei_status.irqlock); + return; + } + + ei_status.dmaing |= 0x01; + outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, NE_BASE + NE_CMD); + outb_p(sizeof(struct e8390_pkt_hdr), NE_BASE + EN0_RCNTLO); + outb_p(0, NE_BASE + EN0_RCNTHI); + outb_p(0, NE_BASE + EN0_RSARLO); /* On page boundary */ + outb_p(ring_page, NE_BASE + EN0_RSARHI); + outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD); + + if (ei_status.word16) { + int len; + unsigned short *p = (unsigned short *)hdr; + for (len = sizeof(struct e8390_pkt_hdr)>>1; len > 0; len--) + *p++ = inw(NE_BASE + NE_DATAPORT); + } else + insb(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)); + + outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; + + le16_to_cpus(&hdr->count); +} + +/* Block input and output, similar to the Crynwr packet driver. If you + are porting to a new ethercard, look at the packet driver source for hints. + The NEx000 doesn't share the on-board packet memory -- you have to put + the packet out through the "remote DMA" dataport using outb. */ + +static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) +{ + struct ei_device *ei_local = netdev_priv(dev); +#ifdef NE_SANITY_CHECK + int xfer_count = count; +#endif + char *buf = skb->data; + + /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + if (ei_status.dmaing) + { + printk(KERN_EMERG "%s: DMAing conflict in ne_block_input " + "[DMAstat:%d][irqlock:%d].\n", + dev->name, ei_status.dmaing, ei_status.irqlock); + return; + } + ei_status.dmaing |= 0x01; + outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, NE_BASE + NE_CMD); + outb_p(count & 0xff, NE_BASE + EN0_RCNTLO); + outb_p(count >> 8, NE_BASE + EN0_RCNTHI); + outb_p(ring_offset & 0xff, NE_BASE + EN0_RSARLO); + outb_p(ring_offset >> 8, NE_BASE + EN0_RSARHI); + outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD); + if (ei_status.word16) + { + int len; + unsigned short *p = (unsigned short *)buf; + for (len = count>>1; len > 0; len--) + *p++ = inw(NE_BASE + NE_DATAPORT); + if (count & 0x01) + { + buf[count-1] = inb(NE_BASE + NE_DATAPORT); +#ifdef NE_SANITY_CHECK + xfer_count++; +#endif + } + } else { + insb(NE_BASE + NE_DATAPORT, buf, count); + } + +#ifdef NE_SANITY_CHECK + /* This was for the ALPHA version only, but enough people have + been encountering problems so it is still here. If you see + this message you either 1) have a slightly incompatible clone + or 2) have noise/speed problems with your bus. */ + + if (ei_debug > 1) + { + /* DMA termination address check... */ + int addr, tries = 20; + do { + /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here + -- it's broken for Rx on some cards! */ + int high = inb_p(NE_BASE + EN0_RSARHI); + int low = inb_p(NE_BASE + EN0_RSARLO); + addr = (high << 8) + low; + if (((ring_offset + xfer_count) & 0xff) == low) + break; + } while (--tries > 0); + if (tries <= 0) + printk(KERN_WARNING "%s: RX transfer address mismatch," + "%#4.4x (expected) vs. %#4.4x (actual).\n", + dev->name, ring_offset + xfer_count, addr); + } +#endif + outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; +} + +static void ne_block_output(struct net_device *dev, int count, + const unsigned char *buf, const int start_page) +{ + struct ei_device *ei_local = netdev_priv(dev); + unsigned long dma_start; +#ifdef NE_SANITY_CHECK + int retries = 0; +#endif + + /* Round the count up for word writes. Do we need to do this? + What effect will an odd byte count have on the 8390? + I should check someday. */ + + if (ei_status.word16 && (count & 0x01)) + count++; + + /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + if (ei_status.dmaing) + { + printk(KERN_EMERG "%s: DMAing conflict in ne_block_output." + "[DMAstat:%d][irqlock:%d]\n", + dev->name, ei_status.dmaing, ei_status.irqlock); + return; + } + ei_status.dmaing |= 0x01; + /* We should already be in page 0, but to be safe... */ + outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, NE_BASE + NE_CMD); + +#ifdef NE_SANITY_CHECK +retry: +#endif + +#ifdef NE8390_RW_BUGFIX + /* Handle the read-before-write bug the same way as the + Crynwr packet driver -- the NatSemi method doesn't work. + Actually this doesn't always work either, but if you have + problems with your NEx000 this is better than nothing! */ + + outb_p(0x42, NE_BASE + EN0_RCNTLO); + outb_p(0x00, NE_BASE + EN0_RCNTHI); + outb_p(0x42, NE_BASE + EN0_RSARLO); + outb_p(0x00, NE_BASE + EN0_RSARHI); + outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD); + /* Make certain that the dummy read has occurred. */ + udelay(6); +#endif + + outb_p(ENISR_RDC, NE_BASE + EN0_ISR); + + /* Now the normal output. */ + outb_p(count & 0xff, NE_BASE + EN0_RCNTLO); + outb_p(count >> 8, NE_BASE + EN0_RCNTHI); + outb_p(0x00, NE_BASE + EN0_RSARLO); + outb_p(start_page, NE_BASE + EN0_RSARHI); + + outb_p(E8390_RWRITE+E8390_START, NE_BASE + NE_CMD); + if (ei_status.word16) { + int len; + unsigned short *p = (unsigned short *)buf; + for (len = count>>1; len > 0; len--) + outw(*p++, NE_BASE + NE_DATAPORT); + } else { + outsb(NE_BASE + NE_DATAPORT, buf, count); + } + + dma_start = jiffies; + +#ifdef NE_SANITY_CHECK + /* This was for the ALPHA version only, but enough people have + been encountering problems so it is still here. */ + + if (ei_debug > 1) + { + /* DMA termination address check... */ + int addr, tries = 20; + do { + int high = inb_p(NE_BASE + EN0_RSARHI); + int low = inb_p(NE_BASE + EN0_RSARLO); + addr = (high << 8) + low; + if ((start_page << 8) + count == addr) + break; + } while (--tries > 0); + + if (tries <= 0) + { + printk(KERN_WARNING "%s: Tx packet transfer address mismatch," + "%#4.4x (expected) vs. %#4.4x (actual).\n", + dev->name, (start_page << 8) + count, addr); + if (retries++ == 0) + goto retry; + } + } +#endif + + while ((inb_p(NE_BASE + EN0_ISR) & ENISR_RDC) == 0) + if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ + printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name); + ne_reset_8390(dev); + __NS8390_init(dev,1); + break; + } + + outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; +} + + +#ifdef MODULE +#define MAX_NE_CARDS 1 /* Max number of NE cards per module */ +static struct net_device *dev_ne[MAX_NE_CARDS]; +static int io[MAX_NE_CARDS]; +static int irq[MAX_NE_CARDS]; +static int bad[MAX_NE_CARDS]; /* 0xbad = bad sig or no reset ack */ + +module_param_array(io, int, NULL, 0); +module_param_array(irq, int, NULL, 0); +module_param_array(bad, int, NULL, 0); +MODULE_PARM_DESC(io, "I/O base address(es)"); +MODULE_PARM_DESC(irq, "IRQ number(s)"); +MODULE_DESCRIPTION("H8/300 NE2000 Ethernet driver"); +MODULE_LICENSE("GPL"); + +/* This is set up so that no ISA autoprobe takes place. We can't guarantee +that the ne2k probe is the last 8390 based probe to take place (as it +is at boot) and so the probe will get confused by any other 8390 cards. +ISA device autoprobes on a running machine are not recommended anyway. */ + +int init_module(void) +{ + int this_dev, found = 0; + int err; + + for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { + struct net_device *dev = ____alloc_ei_netdev(0); + if (!dev) + break; + if (io[this_dev]) { + dev->irq = irq[this_dev]; + dev->mem_end = bad[this_dev]; + dev->base_addr = io[this_dev]; + } else { + dev->base_addr = h8300_ne_base[this_dev]; + dev->irq = h8300_ne_irq[this_dev]; + } + err = init_reg_offset(dev, dev->base_addr); + if (!err) { + if (do_ne_probe(dev) == 0) { + dev_ne[found++] = dev; + continue; + } + } + free_netdev(dev); + if (found) + break; + if (io[this_dev] != 0) + printk(KERN_WARNING "ne.c: No NE*000 card found at i/o = %#x\n", dev->base_addr); + else + printk(KERN_NOTICE "ne.c: You must supply \"io=0xNNN\" value(s) for ISA cards.\n"); + return -ENXIO; + } + if (found) + return 0; + return -ENODEV; +} + +void cleanup_module(void) +{ + int this_dev; + + for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { + struct net_device *dev = dev_ne[this_dev]; + if (dev) { + unregister_netdev(dev); + cleanup_card(dev); + free_netdev(dev); + } + } +} +#endif /* MODULE */ diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c new file mode 100644 index 0000000..1063093 --- /dev/null +++ b/drivers/net/ethernet/8390/ne.c @@ -0,0 +1,1008 @@ +/* ne.c: A general non-shared-memory NS8390 ethernet driver for linux. */ +/* + Written 1992-94 by Donald Becker. + + Copyright 1993 United States Government as represented by the + Director, National Security Agency. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403 + + This driver should work with many programmed-I/O 8390-based ethernet + boards. Currently it supports the NE1000, NE2000, many clones, + and some Cabletron products. + + Changelog: + + Paul Gortmaker : use ENISR_RDC to monitor Tx PIO uploads, made + sanity checks and bad clone support optional. + Paul Gortmaker : new reset code, reset card after probe at boot. + Paul Gortmaker : multiple card support for module users. + Paul Gortmaker : Support for PCI ne2k clones, similar to lance.c + Paul Gortmaker : Allow users with bad cards to avoid full probe. + Paul Gortmaker : PCI probe changes, more PCI cards supported. + rjohnson@analogic.com : Changed init order so an interrupt will only + occur after memory is allocated for dev->priv. Deallocated memory + last in cleanup_modue() + Richard Guenther : Added support for ISAPnP cards + Paul Gortmaker : Discontinued PCI support - use ne2k-pci.c instead. + Hayato Fujiwara : Add m32r support. + +*/ + +/* Routines for the NatSemi-based designs (NE[12]000). */ + +static const char version1[] = +"ne.c:v1.10 9/23/94 Donald Becker (becker@scyld.com)\n"; +static const char version2[] = +"Last modified Nov 1, 2000 by Paul Gortmaker\n"; + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "8390.h" + +#define DRV_NAME "ne" + +/* Some defines that people can play with if so inclined. */ + +/* Do we support clones that don't adhere to 14,15 of the SAprom ? */ +#define SUPPORT_NE_BAD_CLONES +/* 0xbad = bad sig or no reset ack */ +#define BAD 0xbad + +#define MAX_NE_CARDS 4 /* Max number of NE cards per module */ +static struct platform_device *pdev_ne[MAX_NE_CARDS]; +static int io[MAX_NE_CARDS]; +static int irq[MAX_NE_CARDS]; +static int bad[MAX_NE_CARDS]; + +#ifdef MODULE +module_param_array(io, int, NULL, 0); +module_param_array(irq, int, NULL, 0); +module_param_array(bad, int, NULL, 0); +MODULE_PARM_DESC(io, "I/O base address(es),required"); +MODULE_PARM_DESC(irq, "IRQ number(s)"); +MODULE_PARM_DESC(bad, "Accept card(s) with bad signatures"); +MODULE_DESCRIPTION("NE1000/NE2000 ISA/PnP Ethernet driver"); +MODULE_LICENSE("GPL"); +#endif /* MODULE */ + +/* Do we perform extra sanity checks on stuff ? */ +/* #define NE_SANITY_CHECK */ + +/* Do we implement the read before write bugfix ? */ +/* #define NE_RW_BUGFIX */ + +/* Do we have a non std. amount of memory? (in units of 256 byte pages) */ +/* #define PACKETBUF_MEMSIZE 0x40 */ + +/* This is set up so that no ISA autoprobe takes place. We can't guarantee +that the ne2k probe is the last 8390 based probe to take place (as it +is at boot) and so the probe will get confused by any other 8390 cards. +ISA device autoprobes on a running machine are not recommended anyway. */ +#if !defined(MODULE) && (defined(CONFIG_ISA) || defined(CONFIG_M32R)) +/* Do we need a portlist for the ISA auto-probe ? */ +#define NEEDS_PORTLIST +#endif + +/* A zero-terminated list of I/O addresses to be probed at boot. */ +#ifdef NEEDS_PORTLIST +static unsigned int netcard_portlist[] __initdata = { + 0x300, 0x280, 0x320, 0x340, 0x360, 0x380, 0 +}; +#endif + +static struct isapnp_device_id isapnp_clone_list[] __initdata = { + { ISAPNP_CARD_ID('A','X','E',0x2011), + ISAPNP_VENDOR('A','X','E'), ISAPNP_FUNCTION(0x2011), + (long) "NetGear EA201" }, + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, + ISAPNP_VENDOR('E','D','I'), ISAPNP_FUNCTION(0x0216), + (long) "NN NE2000" }, + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, + ISAPNP_VENDOR('P','N','P'), ISAPNP_FUNCTION(0x80d6), + (long) "Generic PNP" }, + { } /* terminate list */ +}; + +MODULE_DEVICE_TABLE(isapnp, isapnp_clone_list); + +#ifdef SUPPORT_NE_BAD_CLONES +/* A list of bad clones that we none-the-less recognize. */ +static struct { const char *name8, *name16; unsigned char SAprefix[4];} +bad_clone_list[] __initdata = { + {"DE100", "DE200", {0x00, 0xDE, 0x01,}}, + {"DE120", "DE220", {0x00, 0x80, 0xc8,}}, + {"DFI1000", "DFI2000", {'D', 'F', 'I',}}, /* Original, eh? */ + {"EtherNext UTP8", "EtherNext UTP16", {0x00, 0x00, 0x79}}, + {"NE1000","NE2000-invalid", {0x00, 0x00, 0xd8}}, /* Ancient real NE1000. */ + {"NN1000", "NN2000", {0x08, 0x03, 0x08}}, /* Outlaw no-name clone. */ + {"4-DIM8","4-DIM16", {0x00,0x00,0x4d,}}, /* Outlaw 4-Dimension cards. */ + {"Con-Intl_8", "Con-Intl_16", {0x00, 0x00, 0x24}}, /* Connect Int'nl */ + {"ET-100","ET-200", {0x00, 0x45, 0x54}}, /* YANG and YA clone */ + {"COMPEX","COMPEX16",{0x00,0x80,0x48}}, /* Broken ISA Compex cards */ + {"E-LAN100", "E-LAN200", {0x00, 0x00, 0x5d}}, /* Broken ne1000 clones */ + {"PCM-4823", "PCM-4823", {0x00, 0xc0, 0x6c}}, /* Broken Advantech MoBo */ + {"REALTEK", "RTL8019", {0x00, 0x00, 0xe8}}, /* no-name with Realtek chip */ +#ifdef CONFIG_MACH_TX49XX + {"RBHMA4X00-RTL8019", "RBHMA4X00-RTL8019", {0x00, 0x60, 0x0a}}, /* Toshiba built-in */ +#endif + {"LCS-8834", "LCS-8836", {0x04, 0x04, 0x37}}, /* ShinyNet (SET) */ + {NULL,} +}; +#endif + +/* ---- No user-serviceable parts below ---- */ + +#define NE_BASE (dev->base_addr) +#define NE_CMD 0x00 +#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */ +#define NE_RESET 0x1f /* Issue a read to reset, a write to clear. */ +#define NE_IO_EXTENT 0x20 + +#define NE1SM_START_PG 0x20 /* First page of TX buffer */ +#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */ +#define NESM_START_PG 0x40 /* First page of TX buffer */ +#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ + +#if defined(CONFIG_PLAT_MAPPI) +# define DCR_VAL 0x4b +#elif defined(CONFIG_PLAT_OAKS32R) || \ + defined(CONFIG_MACH_TX49XX) +# define DCR_VAL 0x48 /* 8-bit mode */ +#else +# define DCR_VAL 0x49 +#endif + +static int ne_probe1(struct net_device *dev, unsigned long ioaddr); +static int ne_probe_isapnp(struct net_device *dev); + +static void ne_reset_8390(struct net_device *dev); +static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page); +static void ne_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); +static void ne_block_output(struct net_device *dev, const int count, + const unsigned char *buf, const int start_page); + + +/* Probe for various non-shared-memory ethercards. + + NEx000-clone boards have a Station Address PROM (SAPROM) in the packet + buffer memory space. NE2000 clones have 0x57,0x57 in bytes 0x0e,0x0f of + the SAPROM, while other supposed NE2000 clones must be detected by their + SA prefix. + + Reading the SAPROM from a word-wide card with the 8390 set in byte-wide + mode results in doubled values, which can be detected and compensated for. + + The probe is also responsible for initializing the card and filling + in the 'dev' and 'ei_status' structures. + + We use the minimum memory size for some ethercard product lines, iff we can't + distinguish models. You can increase the packet buffer size by setting + PACKETBUF_MEMSIZE. Reported Cabletron packet buffer locations are: + E1010 starts at 0x100 and ends at 0x2000. + E1010-x starts at 0x100 and ends at 0x8000. ("-x" means "more memory") + E2010 starts at 0x100 and ends at 0x4000. + E2010-x starts at 0x100 and ends at 0xffff. */ + +static int __init do_ne_probe(struct net_device *dev) +{ + unsigned long base_addr = dev->base_addr; +#ifdef NEEDS_PORTLIST + int orig_irq = dev->irq; +#endif + + /* First check any supplied i/o locations. User knows best. */ + if (base_addr > 0x1ff) { /* Check a single specified location. */ + int ret = ne_probe1(dev, base_addr); + if (ret) + printk(KERN_WARNING "ne.c: No NE*000 card found at " + "i/o = %#lx\n", base_addr); + return ret; + } + else if (base_addr != 0) /* Don't probe at all. */ + return -ENXIO; + + /* Then look for any installed ISAPnP clones */ + if (isapnp_present() && (ne_probe_isapnp(dev) == 0)) + return 0; + +#ifdef NEEDS_PORTLIST + /* Last resort. The semi-risky ISA auto-probe. */ + for (base_addr = 0; netcard_portlist[base_addr] != 0; base_addr++) { + int ioaddr = netcard_portlist[base_addr]; + dev->irq = orig_irq; + if (ne_probe1(dev, ioaddr) == 0) + return 0; + } +#endif + + return -ENODEV; +} + +static int __init ne_probe_isapnp(struct net_device *dev) +{ + int i; + + for (i = 0; isapnp_clone_list[i].vendor != 0; i++) { + struct pnp_dev *idev = NULL; + + while ((idev = pnp_find_dev(NULL, + isapnp_clone_list[i].vendor, + isapnp_clone_list[i].function, + idev))) { + /* Avoid already found cards from previous calls */ + if (pnp_device_attach(idev) < 0) + continue; + if (pnp_activate_dev(idev) < 0) { + pnp_device_detach(idev); + continue; + } + /* if no io and irq, search for next */ + if (!pnp_port_valid(idev, 0) || !pnp_irq_valid(idev, 0)) { + pnp_device_detach(idev); + continue; + } + /* found it */ + dev->base_addr = pnp_port_start(idev, 0); + dev->irq = pnp_irq(idev, 0); + printk(KERN_INFO "ne.c: ISAPnP reports %s at i/o %#lx, irq %d.\n", + (char *) isapnp_clone_list[i].driver_data, + dev->base_addr, dev->irq); + if (ne_probe1(dev, dev->base_addr) != 0) { /* Shouldn't happen. */ + printk(KERN_ERR "ne.c: Probe of ISAPnP card at %#lx failed.\n", dev->base_addr); + pnp_device_detach(idev); + return -ENXIO; + } + ei_status.priv = (unsigned long)idev; + break; + } + if (!idev) + continue; + return 0; + } + + return -ENODEV; +} + +static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr) +{ + int i; + unsigned char SA_prom[32]; + int wordlength = 2; + const char *name = NULL; + int start_page, stop_page; + int neX000, ctron, copam, bad_card; + int reg0, ret; + static unsigned version_printed; + + if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME)) + return -EBUSY; + + reg0 = inb_p(ioaddr); + if (reg0 == 0xFF) { + ret = -ENODEV; + goto err_out; + } + + /* Do a preliminary verification that we have a 8390. */ + { + int regd; + outb_p(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD); + regd = inb_p(ioaddr + 0x0d); + outb_p(0xff, ioaddr + 0x0d); + outb_p(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD); + inb_p(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */ + if (inb_p(ioaddr + EN0_COUNTER0) != 0) { + outb_p(reg0, ioaddr); + outb_p(regd, ioaddr + 0x0d); /* Restore the old values. */ + ret = -ENODEV; + goto err_out; + } + } + + if (ei_debug && version_printed++ == 0) + printk(KERN_INFO "%s%s", version1, version2); + + printk(KERN_INFO "NE*000 ethercard probe at %#3lx:", ioaddr); + + /* A user with a poor card that fails to ack the reset, or that + does not have a valid 0x57,0x57 signature can still use this + without having to recompile. Specifying an i/o address along + with an otherwise unused dev->mem_end value of "0xBAD" will + cause the driver to skip these parts of the probe. */ + + bad_card = ((dev->base_addr != 0) && (dev->mem_end == BAD)); + + /* Reset card. Who knows what dain-bramaged state it was left in. */ + + { + unsigned long reset_start_time = jiffies; + + /* DON'T change these to inb_p/outb_p or reset will fail on clones. */ + outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET); + + while ((inb_p(ioaddr + EN0_ISR) & ENISR_RESET) == 0) + if (time_after(jiffies, reset_start_time + 2*HZ/100)) { + if (bad_card) { + printk(" (warning: no reset ack)"); + break; + } else { + printk(" not found (no reset ack).\n"); + ret = -ENODEV; + goto err_out; + } + } + + outb_p(0xff, ioaddr + EN0_ISR); /* Ack all intr. */ + } + + /* Read the 16 bytes of station address PROM. + We must first initialize registers, similar to NS8390p_init(eifdev, 0). + We can't reliably read the SAPROM address without this. + (I learned the hard way!). */ + { + struct {unsigned char value, offset; } program_seq[] = + { + {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/ + {0x48, EN0_DCFG}, /* Set byte-wide (0x48) access. */ + {0x00, EN0_RCNTLO}, /* Clear the count regs. */ + {0x00, EN0_RCNTHI}, + {0x00, EN0_IMR}, /* Mask completion irq. */ + {0xFF, EN0_ISR}, + {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */ + {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */ + {32, EN0_RCNTLO}, + {0x00, EN0_RCNTHI}, + {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */ + {0x00, EN0_RSARHI}, + {E8390_RREAD+E8390_START, E8390_CMD}, + }; + + for (i = 0; i < ARRAY_SIZE(program_seq); i++) + outb_p(program_seq[i].value, ioaddr + program_seq[i].offset); + + } + for(i = 0; i < 32 /*sizeof(SA_prom)*/; i+=2) { + SA_prom[i] = inb(ioaddr + NE_DATAPORT); + SA_prom[i+1] = inb(ioaddr + NE_DATAPORT); + if (SA_prom[i] != SA_prom[i+1]) + wordlength = 1; + } + + if (wordlength == 2) + { + for (i = 0; i < 16; i++) + SA_prom[i] = SA_prom[i+i]; + /* We must set the 8390 for word mode. */ + outb_p(DCR_VAL, ioaddr + EN0_DCFG); + start_page = NESM_START_PG; + + /* + * Realtek RTL8019AS datasheet says that the PSTOP register + * shouldn't exceed 0x60 in 8-bit mode. + * This chip can be identified by reading the signature from + * the remote byte count registers (otherwise write-only)... + */ + if ((DCR_VAL & 0x01) == 0 && /* 8-bit mode */ + inb(ioaddr + EN0_RCNTLO) == 0x50 && + inb(ioaddr + EN0_RCNTHI) == 0x70) + stop_page = 0x60; + else + stop_page = NESM_STOP_PG; + } else { + start_page = NE1SM_START_PG; + stop_page = NE1SM_STOP_PG; + } + +#if defined(CONFIG_PLAT_MAPPI) || defined(CONFIG_PLAT_OAKS32R) + neX000 = ((SA_prom[14] == 0x57 && SA_prom[15] == 0x57) + || (SA_prom[14] == 0x42 && SA_prom[15] == 0x42)); +#else + neX000 = (SA_prom[14] == 0x57 && SA_prom[15] == 0x57); +#endif + ctron = (SA_prom[0] == 0x00 && SA_prom[1] == 0x00 && SA_prom[2] == 0x1d); + copam = (SA_prom[14] == 0x49 && SA_prom[15] == 0x00); + + /* Set up the rest of the parameters. */ + if (neX000 || bad_card || copam) { + name = (wordlength == 2) ? "NE2000" : "NE1000"; + } + else if (ctron) + { + name = (wordlength == 2) ? "Ctron-8" : "Ctron-16"; + start_page = 0x01; + stop_page = (wordlength == 2) ? 0x40 : 0x20; + } + else + { +#ifdef SUPPORT_NE_BAD_CLONES + /* Ack! Well, there might be a *bad* NE*000 clone there. + Check for total bogus addresses. */ + for (i = 0; bad_clone_list[i].name8; i++) + { + if (SA_prom[0] == bad_clone_list[i].SAprefix[0] && + SA_prom[1] == bad_clone_list[i].SAprefix[1] && + SA_prom[2] == bad_clone_list[i].SAprefix[2]) + { + if (wordlength == 2) + { + name = bad_clone_list[i].name16; + } else { + name = bad_clone_list[i].name8; + } + break; + } + } + if (bad_clone_list[i].name8 == NULL) + { + printk(" not found (invalid signature %2.2x %2.2x).\n", + SA_prom[14], SA_prom[15]); + ret = -ENXIO; + goto err_out; + } +#else + printk(" not found.\n"); + ret = -ENXIO; + goto err_out; +#endif + } + + if (dev->irq < 2) + { + unsigned long cookie = probe_irq_on(); + outb_p(0x50, ioaddr + EN0_IMR); /* Enable one interrupt. */ + outb_p(0x00, ioaddr + EN0_RCNTLO); + outb_p(0x00, ioaddr + EN0_RCNTHI); + outb_p(E8390_RREAD+E8390_START, ioaddr); /* Trigger it... */ + mdelay(10); /* wait 10ms for interrupt to propagate */ + outb_p(0x00, ioaddr + EN0_IMR); /* Mask it again. */ + dev->irq = probe_irq_off(cookie); + if (ei_debug > 2) + printk(" autoirq is %d\n", dev->irq); + } else if (dev->irq == 2) + /* Fixup for users that don't know that IRQ 2 is really IRQ 9, + or don't know which one to set. */ + dev->irq = 9; + + if (! dev->irq) { + printk(" failed to detect IRQ line.\n"); + ret = -EAGAIN; + goto err_out; + } + + /* Snarf the interrupt now. There's no point in waiting since we cannot + share and the board will usually be enabled. */ + ret = request_irq(dev->irq, eip_interrupt, 0, name, dev); + if (ret) { + printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret); + goto err_out; + } + + dev->base_addr = ioaddr; + +#ifdef CONFIG_PLAT_MAPPI + outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, + ioaddr + E8390_CMD); /* 0x61 */ + for (i = 0 ; i < ETHER_ADDR_LEN ; i++) { + dev->dev_addr[i] = SA_prom[i] + = inb_p(ioaddr + EN1_PHYS_SHIFT(i)); + } +#else + for(i = 0; i < ETHER_ADDR_LEN; i++) { + dev->dev_addr[i] = SA_prom[i]; + } +#endif + + printk("%pM\n", dev->dev_addr); + + ei_status.name = name; + ei_status.tx_start_page = start_page; + ei_status.stop_page = stop_page; + + /* Use 16-bit mode only if this wasn't overridden by DCR_VAL */ + ei_status.word16 = (wordlength == 2 && (DCR_VAL & 0x01)); + + ei_status.rx_start_page = start_page + TX_PAGES; +#ifdef PACKETBUF_MEMSIZE + /* Allow the packet buffer size to be overridden by know-it-alls. */ + ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE; +#endif + + ei_status.reset_8390 = &ne_reset_8390; + ei_status.block_input = &ne_block_input; + ei_status.block_output = &ne_block_output; + ei_status.get_8390_hdr = &ne_get_8390_hdr; + ei_status.priv = 0; + + dev->netdev_ops = &eip_netdev_ops; + NS8390p_init(dev, 0); + + ret = register_netdev(dev); + if (ret) + goto out_irq; + printk(KERN_INFO "%s: %s found at %#lx, using IRQ %d.\n", + dev->name, name, ioaddr, dev->irq); + return 0; + +out_irq: + free_irq(dev->irq, dev); +err_out: + release_region(ioaddr, NE_IO_EXTENT); + return ret; +} + +/* Hard reset the card. This used to pause for the same period that a + 8390 reset command required, but that shouldn't be necessary. */ + +static void ne_reset_8390(struct net_device *dev) +{ + unsigned long reset_start_time = jiffies; + + if (ei_debug > 1) + printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies); + + /* DON'T change these to inb_p/outb_p or reset will fail on clones. */ + outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET); + + ei_status.txing = 0; + ei_status.dmaing = 0; + + /* This check _should_not_ be necessary, omit eventually. */ + while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) + if (time_after(jiffies, reset_start_time + 2*HZ/100)) { + printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name); + break; + } + outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */ +} + +/* Grab the 8390 specific header. Similar to the block_input routine, but + we don't need to be concerned with ring wrap as the header will be at + the start of a page, so we optimize accordingly. */ + +static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) +{ + int nic_base = dev->base_addr; + + /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + + if (ei_status.dmaing) + { + printk(KERN_EMERG "%s: DMAing conflict in ne_get_8390_hdr " + "[DMAstat:%d][irqlock:%d].\n", + dev->name, ei_status.dmaing, ei_status.irqlock); + return; + } + + ei_status.dmaing |= 0x01; + outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); + outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO); + outb_p(0, nic_base + EN0_RCNTHI); + outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */ + outb_p(ring_page, nic_base + EN0_RSARHI); + outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD); + + if (ei_status.word16) + insw(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1); + else + insb(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)); + + outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; + + le16_to_cpus(&hdr->count); +} + +/* Block input and output, similar to the Crynwr packet driver. If you + are porting to a new ethercard, look at the packet driver source for hints. + The NEx000 doesn't share the on-board packet memory -- you have to put + the packet out through the "remote DMA" dataport using outb. */ + +static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) +{ +#ifdef NE_SANITY_CHECK + int xfer_count = count; +#endif + int nic_base = dev->base_addr; + char *buf = skb->data; + + /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + if (ei_status.dmaing) + { + printk(KERN_EMERG "%s: DMAing conflict in ne_block_input " + "[DMAstat:%d][irqlock:%d].\n", + dev->name, ei_status.dmaing, ei_status.irqlock); + return; + } + ei_status.dmaing |= 0x01; + outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); + outb_p(count & 0xff, nic_base + EN0_RCNTLO); + outb_p(count >> 8, nic_base + EN0_RCNTHI); + outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO); + outb_p(ring_offset >> 8, nic_base + EN0_RSARHI); + outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD); + if (ei_status.word16) + { + insw(NE_BASE + NE_DATAPORT,buf,count>>1); + if (count & 0x01) + { + buf[count-1] = inb(NE_BASE + NE_DATAPORT); +#ifdef NE_SANITY_CHECK + xfer_count++; +#endif + } + } else { + insb(NE_BASE + NE_DATAPORT, buf, count); + } + +#ifdef NE_SANITY_CHECK + /* This was for the ALPHA version only, but enough people have + been encountering problems so it is still here. If you see + this message you either 1) have a slightly incompatible clone + or 2) have noise/speed problems with your bus. */ + + if (ei_debug > 1) + { + /* DMA termination address check... */ + int addr, tries = 20; + do { + /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here + -- it's broken for Rx on some cards! */ + int high = inb_p(nic_base + EN0_RSARHI); + int low = inb_p(nic_base + EN0_RSARLO); + addr = (high << 8) + low; + if (((ring_offset + xfer_count) & 0xff) == low) + break; + } while (--tries > 0); + if (tries <= 0) + printk(KERN_WARNING "%s: RX transfer address mismatch," + "%#4.4x (expected) vs. %#4.4x (actual).\n", + dev->name, ring_offset + xfer_count, addr); + } +#endif + outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; +} + +static void ne_block_output(struct net_device *dev, int count, + const unsigned char *buf, const int start_page) +{ + int nic_base = NE_BASE; + unsigned long dma_start; +#ifdef NE_SANITY_CHECK + int retries = 0; +#endif + + /* Round the count up for word writes. Do we need to do this? + What effect will an odd byte count have on the 8390? + I should check someday. */ + + if (ei_status.word16 && (count & 0x01)) + count++; + + /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + if (ei_status.dmaing) + { + printk(KERN_EMERG "%s: DMAing conflict in ne_block_output." + "[DMAstat:%d][irqlock:%d]\n", + dev->name, ei_status.dmaing, ei_status.irqlock); + return; + } + ei_status.dmaing |= 0x01; + /* We should already be in page 0, but to be safe... */ + outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD); + +#ifdef NE_SANITY_CHECK +retry: +#endif + +#ifdef NE8390_RW_BUGFIX + /* Handle the read-before-write bug the same way as the + Crynwr packet driver -- the NatSemi method doesn't work. + Actually this doesn't always work either, but if you have + problems with your NEx000 this is better than nothing! */ + + outb_p(0x42, nic_base + EN0_RCNTLO); + outb_p(0x00, nic_base + EN0_RCNTHI); + outb_p(0x42, nic_base + EN0_RSARLO); + outb_p(0x00, nic_base + EN0_RSARHI); + outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD); + /* Make certain that the dummy read has occurred. */ + udelay(6); +#endif + + outb_p(ENISR_RDC, nic_base + EN0_ISR); + + /* Now the normal output. */ + outb_p(count & 0xff, nic_base + EN0_RCNTLO); + outb_p(count >> 8, nic_base + EN0_RCNTHI); + outb_p(0x00, nic_base + EN0_RSARLO); + outb_p(start_page, nic_base + EN0_RSARHI); + + outb_p(E8390_RWRITE+E8390_START, nic_base + NE_CMD); + if (ei_status.word16) { + outsw(NE_BASE + NE_DATAPORT, buf, count>>1); + } else { + outsb(NE_BASE + NE_DATAPORT, buf, count); + } + + dma_start = jiffies; + +#ifdef NE_SANITY_CHECK + /* This was for the ALPHA version only, but enough people have + been encountering problems so it is still here. */ + + if (ei_debug > 1) + { + /* DMA termination address check... */ + int addr, tries = 20; + do { + int high = inb_p(nic_base + EN0_RSARHI); + int low = inb_p(nic_base + EN0_RSARLO); + addr = (high << 8) + low; + if ((start_page << 8) + count == addr) + break; + } while (--tries > 0); + + if (tries <= 0) + { + printk(KERN_WARNING "%s: Tx packet transfer address mismatch," + "%#4.4x (expected) vs. %#4.4x (actual).\n", + dev->name, (start_page << 8) + count, addr); + if (retries++ == 0) + goto retry; + } + } +#endif + + while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0) + if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ + printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name); + ne_reset_8390(dev); + NS8390p_init(dev, 1); + break; + } + + outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; +} + +static int __init ne_drv_probe(struct platform_device *pdev) +{ + struct net_device *dev; + int err, this_dev = pdev->id; + struct resource *res; + + dev = alloc_eip_netdev(); + if (!dev) + return -ENOMEM; + + /* ne.c doesn't populate resources in platform_device, but + * rbtx4927_ne_init and rbtx4938_ne_init do register devices + * with resources. + */ + res = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (res) { + dev->base_addr = res->start; + dev->irq = platform_get_irq(pdev, 0); + } else { + if (this_dev < 0 || this_dev >= MAX_NE_CARDS) { + free_netdev(dev); + return -EINVAL; + } + dev->base_addr = io[this_dev]; + dev->irq = irq[this_dev]; + dev->mem_end = bad[this_dev]; + } + err = do_ne_probe(dev); + if (err) { + free_netdev(dev); + return err; + } + platform_set_drvdata(pdev, dev); + + /* Update with any values found by probing, don't update if + * resources were specified. + */ + if (!res) { + io[this_dev] = dev->base_addr; + irq[this_dev] = dev->irq; + } + return 0; +} + +static int ne_drv_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + + if (dev) { + struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv; + netif_device_detach(dev); + unregister_netdev(dev); + if (idev) + pnp_device_detach(idev); + /* Careful ne_drv_remove can be called twice, once from + * the platform_driver.remove and again when the + * platform_device is being removed. + */ + ei_status.priv = 0; + free_irq(dev->irq, dev); + release_region(dev->base_addr, NE_IO_EXTENT); + free_netdev(dev); + platform_set_drvdata(pdev, NULL); + } + return 0; +} + +/* Remove unused devices or all if true. */ +static void ne_loop_rm_unreg(int all) +{ + int this_dev; + struct platform_device *pdev; + for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { + pdev = pdev_ne[this_dev]; + /* No network device == unused */ + if (pdev && (!platform_get_drvdata(pdev) || all)) { + ne_drv_remove(pdev); + platform_device_unregister(pdev); + pdev_ne[this_dev] = NULL; + } + } +} + +#ifdef CONFIG_PM +static int ne_drv_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct net_device *dev = platform_get_drvdata(pdev); + + if (netif_running(dev)) { + struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv; + netif_device_detach(dev); + if (idev) + pnp_stop_dev(idev); + } + return 0; +} + +static int ne_drv_resume(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + + if (netif_running(dev)) { + struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv; + if (idev) + pnp_start_dev(idev); + ne_reset_8390(dev); + NS8390p_init(dev, 1); + netif_device_attach(dev); + } + return 0; +} +#else +#define ne_drv_suspend NULL +#define ne_drv_resume NULL +#endif + +static struct platform_driver ne_driver = { + .remove = ne_drv_remove, + .suspend = ne_drv_suspend, + .resume = ne_drv_resume, + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, +}; + +static void __init ne_add_devices(void) +{ + int this_dev; + struct platform_device *pdev; + + for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { + if (pdev_ne[this_dev]) + continue; + pdev = platform_device_register_simple( + DRV_NAME, this_dev, NULL, 0); + if (IS_ERR(pdev)) + continue; + pdev_ne[this_dev] = pdev; + } +} + +#ifdef MODULE +int __init init_module(void) +{ + int retval; + ne_add_devices(); + retval = platform_driver_probe(&ne_driver, ne_drv_probe); + if (retval) { + if (io[0] == 0) + printk(KERN_NOTICE "ne.c: You must supply \"io=0xNNN\"" + " value(s) for ISA cards.\n"); + ne_loop_rm_unreg(1); + return retval; + } + + /* Unregister unused platform_devices. */ + ne_loop_rm_unreg(0); + return retval; +} +#else /* MODULE */ +static int __init ne_init(void) +{ + int retval = platform_driver_probe(&ne_driver, ne_drv_probe); + + /* Unregister unused platform_devices. */ + ne_loop_rm_unreg(0); + return retval; +} +module_init(ne_init); + +struct net_device * __init ne_probe(int unit) +{ + int this_dev; + struct net_device *dev; + + /* Find an empty slot, that is no net_device and zero io port. */ + this_dev = 0; + while ((pdev_ne[this_dev] && platform_get_drvdata(pdev_ne[this_dev])) || + io[this_dev]) { + if (++this_dev == MAX_NE_CARDS) + return ERR_PTR(-ENOMEM); + } + + /* Get irq, io from kernel command line */ + dev = alloc_eip_netdev(); + if (!dev) + return ERR_PTR(-ENOMEM); + + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + + io[this_dev] = dev->base_addr; + irq[this_dev] = dev->irq; + bad[this_dev] = dev->mem_end; + + free_netdev(dev); + + ne_add_devices(); + + /* return the first device found */ + for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { + if (pdev_ne[this_dev]) { + dev = platform_get_drvdata(pdev_ne[this_dev]); + if (dev) + return dev; + } + } + + return ERR_PTR(-ENODEV); +} +#endif /* MODULE */ + +static void __exit ne_exit(void) +{ + platform_driver_unregister(&ne_driver); + ne_loop_rm_unreg(1); +} +module_exit(ne_exit); diff --git a/drivers/net/ethernet/8390/ne2.c b/drivers/net/ethernet/8390/ne2.c new file mode 100644 index 0000000..70cdc69 --- /dev/null +++ b/drivers/net/ethernet/8390/ne2.c @@ -0,0 +1,799 @@ +/* ne2.c: A NE/2 Ethernet Driver for Linux. */ +/* + Based on the NE2000 driver written by Donald Becker (1992-94). + modified by Wim Dumon (Apr 1996) + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + The author may be reached as wimpie@linux.cc.kuleuven.ac.be + + Currently supported: NE/2 + This patch was never tested on other MCA-ethernet adapters, but it + might work. Just give it a try and let me know if you have problems. + Also mail me if it really works, please! + + Changelog: + Mon Feb 3 16:26:02 MET 1997 + - adapted the driver to work with the 2.1.25 kernel + - multiple ne2 support (untested) + - module support (untested) + + Fri Aug 28 00:18:36 CET 1998 (David Weinehall) + - fixed a few minor typos + - made the MODULE_PARM conditional (it only works with the v2.1.x kernels) + - fixed the module support (Now it's working...) + + Mon Sep 7 19:01:44 CET 1998 (David Weinehall) + - added support for Arco Electronics AE/2-card (experimental) + + Mon Sep 14 09:53:42 CET 1998 (David Weinehall) + - added support for Compex ENET-16MC/P (experimental) + + Tue Sep 15 16:21:12 CET 1998 (David Weinehall, Magnus Jonsson, Tomas Ogren) + - Miscellaneous bugfixes + + Tue Sep 19 16:21:12 CET 1998 (Magnus Jonsson) + - Cleanup + + Wed Sep 23 14:33:34 CET 1998 (David Weinehall) + - Restructuring and rewriting for v2.1.x compliance + + Wed Oct 14 17:19:21 CET 1998 (David Weinehall) + - Added code that unregisters irq and proc-info + - Version# bump + + Mon Nov 16 15:28:23 CET 1998 (Wim Dumon) + - pass 'dev' as last parameter of request_irq in stead of 'NULL' + + Wed Feb 7 21:24:00 CET 2001 (Alfred Arnold) + - added support for the D-Link DE-320CT + + * WARNING + ------- + This is alpha-test software. It is not guaranteed to work. As a + matter of fact, I'm quite sure there are *LOTS* of bugs in here. I + would like to hear from you if you use this driver, even if it works. + If it doesn't work, be sure to send me a mail with the problems ! +*/ + +static const char *version = "ne2.c:v0.91 Nov 16 1998 Wim Dumon \n"; + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "8390.h" + +#define DRV_NAME "ne2" + +/* Some defines that people can play with if so inclined. */ + +/* Do we perform extra sanity checks on stuff ? */ +/* #define NE_SANITY_CHECK */ + +/* Do we implement the read before write bugfix ? */ +/* #define NE_RW_BUGFIX */ + +/* Do we have a non std. amount of memory? (in units of 256 byte pages) */ +/* #define PACKETBUF_MEMSIZE 0x40 */ + + +/* ---- No user-serviceable parts below ---- */ + +#define NE_BASE (dev->base_addr) +#define NE_CMD 0x00 +#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */ +#define NE_RESET 0x20 /* Issue a read to reset, a write to clear. */ +#define NE_IO_EXTENT 0x30 + +#define NE1SM_START_PG 0x20 /* First page of TX buffer */ +#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */ +#define NESM_START_PG 0x40 /* First page of TX buffer */ +#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ + +/* From the .ADF file: */ +static unsigned int addresses[7] __initdata = + {0x1000, 0x2020, 0x8020, 0xa0a0, 0xb0b0, 0xc0c0, 0xc3d0}; +static int irqs[4] __initdata = {3, 4, 5, 9}; + +/* From the D-Link ADF file: */ +static unsigned int dlink_addresses[4] __initdata = + {0x300, 0x320, 0x340, 0x360}; +static int dlink_irqs[8] __initdata = {3, 4, 5, 9, 10, 11, 14, 15}; + +struct ne2_adapters_t { + unsigned int id; + char *name; +}; + +static struct ne2_adapters_t ne2_adapters[] __initdata = { + { 0x6354, "Arco Ethernet Adapter AE/2" }, + { 0x70DE, "Compex ENET-16 MC/P" }, + { 0x7154, "Novell Ethernet Adapter NE/2" }, + { 0x56ea, "D-Link DE-320CT" }, + { 0x0000, NULL } +}; + +extern int netcard_probe(struct net_device *dev); + +static int ne2_probe1(struct net_device *dev, int slot); + +static void ne_reset_8390(struct net_device *dev); +static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page); +static void ne_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); +static void ne_block_output(struct net_device *dev, const int count, + const unsigned char *buf, const int start_page); + + +/* + * special code to read the DE-320's MAC address EEPROM. In contrast to a + * standard NE design, this is a serial EEPROM (93C46) that has to be read + * bit by bit. The EEPROM cotrol port at base + 0x1e has the following + * layout: + * + * Bit 0 = Data out (read from EEPROM) + * Bit 1 = Data in (write to EEPROM) + * Bit 2 = Clock + * Bit 3 = Chip Select + * Bit 7 = ~50 kHz clock for defined delays + * + */ + +static void __init dlink_put_eeprom(unsigned char value, unsigned int addr) +{ + int z; + unsigned char v1, v2; + + /* write the value to the NIC EEPROM register */ + + outb(value, addr + 0x1e); + + /* now wait the clock line to toggle twice. Effectively, we are + waiting (at least) for one clock cycle */ + + for (z = 0; z < 2; z++) { + do { + v1 = inb(addr + 0x1e); + v2 = inb(addr + 0x1e); + } + while (!((v1 ^ v2) & 0x80)); + } +} + +static void __init dlink_send_eeprom_bit(unsigned int bit, unsigned int addr) +{ + /* shift data bit into correct position */ + + bit = bit << 1; + + /* write value, keep clock line high for two cycles */ + + dlink_put_eeprom(0x09 | bit, addr); + dlink_put_eeprom(0x0d | bit, addr); + dlink_put_eeprom(0x0d | bit, addr); + dlink_put_eeprom(0x09 | bit, addr); +} + +static void __init dlink_send_eeprom_word(unsigned int value, unsigned int len, unsigned int addr) +{ + int z; + + /* adjust bits so that they are left-aligned in a 16-bit-word */ + + value = value << (16 - len); + + /* shift bits out to the EEPROM */ + + for (z = 0; z < len; z++) { + dlink_send_eeprom_bit((value & 0x8000) >> 15, addr); + value = value << 1; + } +} + +static unsigned int __init dlink_get_eeprom(unsigned int eeaddr, unsigned int addr) +{ + int z; + unsigned int value = 0; + + /* pull the CS line low for a moment. This resets the EEPROM- + internal logic, and makes it ready for a new command. */ + + dlink_put_eeprom(0x01, addr); + dlink_put_eeprom(0x09, addr); + + /* send one start bit, read command (1 - 0), plus the address to + the EEPROM */ + + dlink_send_eeprom_word(0x0180 | (eeaddr & 0x3f), 9, addr); + + /* get the data word. We clock by sending 0s to the EEPROM, which + get ignored during the read process */ + + for (z = 0; z < 16; z++) { + dlink_send_eeprom_bit(0, addr); + value = (value << 1) | (inb(addr + 0x1e) & 0x01); + } + + return value; +} + +/* + * Note that at boot, this probe only picks up one card at a time. + */ + +static int __init do_ne2_probe(struct net_device *dev) +{ + static int current_mca_slot = -1; + int i; + int adapter_found = 0; + + /* Do not check any supplied i/o locations. + POS registers usually don't fail :) */ + + /* MCA cards have POS registers. + Autodetecting MCA cards is extremely simple. + Just search for the card. */ + + for(i = 0; (ne2_adapters[i].name != NULL) && !adapter_found; i++) { + current_mca_slot = + mca_find_unused_adapter(ne2_adapters[i].id, 0); + + if((current_mca_slot != MCA_NOTFOUND) && !adapter_found) { + int res; + mca_set_adapter_name(current_mca_slot, + ne2_adapters[i].name); + mca_mark_as_used(current_mca_slot); + + res = ne2_probe1(dev, current_mca_slot); + if (res) + mca_mark_as_unused(current_mca_slot); + return res; + } + } + return -ENODEV; +} + +#ifndef MODULE +struct net_device * __init ne2_probe(int unit) +{ + struct net_device *dev = alloc_eip_netdev(); + int err; + + if (!dev) + return ERR_PTR(-ENOMEM); + + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + + err = do_ne2_probe(dev); + if (err) + goto out; + return dev; +out: + free_netdev(dev); + return ERR_PTR(err); +} +#endif + +static int ne2_procinfo(char *buf, int slot, struct net_device *dev) +{ + int len=0; + + len += sprintf(buf+len, "The NE/2 Ethernet Adapter\n" ); + len += sprintf(buf+len, "Driver written by Wim Dumon "); + len += sprintf(buf+len, "\n"); + len += sprintf(buf+len, "Modified by "); + len += sprintf(buf+len, "David Weinehall \n"); + len += sprintf(buf+len, "and by Magnus Jonsson \n"); + len += sprintf(buf+len, "Based on the original NE2000 drivers\n" ); + len += sprintf(buf+len, "Base IO: %#x\n", (unsigned int)dev->base_addr); + len += sprintf(buf+len, "IRQ : %d\n", dev->irq); + len += sprintf(buf+len, "HW addr : %pM\n", dev->dev_addr); + + return len; +} + +static int __init ne2_probe1(struct net_device *dev, int slot) +{ + int i, base_addr, irq, retval; + unsigned char POS; + unsigned char SA_prom[32]; + const char *name = "NE/2"; + int start_page, stop_page; + static unsigned version_printed; + + if (ei_debug && version_printed++ == 0) + printk(version); + + printk("NE/2 ethercard found in slot %d:", slot); + + /* Read base IO and IRQ from the POS-registers */ + POS = mca_read_stored_pos(slot, 2); + if(!(POS % 2)) { + printk(" disabled.\n"); + return -ENODEV; + } + + /* handle different POS register structure for D-Link card */ + + if (mca_read_stored_pos(slot, 0) == 0xea) { + base_addr = dlink_addresses[(POS >> 5) & 0x03]; + irq = dlink_irqs[(POS >> 2) & 0x07]; + } + else { + i = (POS & 0xE)>>1; + /* printk("Halleluja sdog, als er na de pijl een 1 staat is 1 - 1 == 0" + " en zou het moeten werken -> %d\n", i); + The above line was for remote testing, thanx to sdog ... */ + base_addr = addresses[i - 1]; + irq = irqs[(POS & 0x60)>>5]; + } + + if (!request_region(base_addr, NE_IO_EXTENT, DRV_NAME)) + return -EBUSY; + +#ifdef DEBUG + printk("POS info : pos 2 = %#x ; base = %#x ; irq = %ld\n", POS, + base_addr, irq); +#endif + +#ifndef CRYNWR_WAY + /* Reset the card the way they do it in the Crynwr packet driver */ + for (i=0; i<8; i++) + outb(0x0, base_addr + NE_RESET); + inb(base_addr + NE_RESET); + outb(0x21, base_addr + NE_CMD); + if (inb(base_addr + NE_CMD) != 0x21) { + printk("NE/2 adapter not responding\n"); + retval = -ENODEV; + goto out; + } + + /* In the crynwr sources they do a RAM-test here. I skip it. I suppose + my RAM is okay. Suppose your memory is broken. Then this test + should fail and you won't be able to use your card. But if I do not + test, you won't be able to use your card, neither. So this test + won't help you. */ + +#else /* _I_ never tested it this way .. Go ahead and try ...*/ + /* Reset card. Who knows what dain-bramaged state it was left in. */ + { + unsigned long reset_start_time = jiffies; + + /* DON'T change these to inb_p/outb_p or reset will fail on + clones.. */ + outb(inb(base_addr + NE_RESET), base_addr + NE_RESET); + + while ((inb_p(base_addr + EN0_ISR) & ENISR_RESET) == 0) + if (time_after(jiffies, reset_start_time + 2*HZ/100)) { + printk(" not found (no reset ack).\n"); + retval = -ENODEV; + goto out; + } + + outb_p(0xff, base_addr + EN0_ISR); /* Ack all intr. */ + } +#endif + + + /* Read the 16 bytes of station address PROM. + We must first initialize registers, similar to + NS8390p_init(eifdev, 0). + We can't reliably read the SAPROM address without this. + (I learned the hard way!). */ + { + struct { + unsigned char value, offset; + } program_seq[] = { + /* Select page 0 */ + {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, + {0x49, EN0_DCFG}, /* Set WORD-wide (0x49) access. */ + {0x00, EN0_RCNTLO}, /* Clear the count regs. */ + {0x00, EN0_RCNTHI}, + {0x00, EN0_IMR}, /* Mask completion irq. */ + {0xFF, EN0_ISR}, + {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */ + {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */ + {32, EN0_RCNTLO}, + {0x00, EN0_RCNTHI}, + {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */ + {0x00, EN0_RSARHI}, + {E8390_RREAD+E8390_START, E8390_CMD}, + }; + + for (i = 0; i < ARRAY_SIZE(program_seq); i++) + outb_p(program_seq[i].value, base_addr + + program_seq[i].offset); + + } + for(i = 0; i < 6 /*sizeof(SA_prom)*/; i+=1) { + SA_prom[i] = inb(base_addr + NE_DATAPORT); + } + + /* I don't know whether the previous sequence includes the general + board reset procedure, so better don't omit it and just overwrite + the garbage read from a DE-320 with correct stuff. */ + + if (mca_read_stored_pos(slot, 0) == 0xea) { + unsigned int v; + + for (i = 0; i < 3; i++) { + v = dlink_get_eeprom(i, base_addr); + SA_prom[(i << 1) ] = v & 0xff; + SA_prom[(i << 1) + 1] = (v >> 8) & 0xff; + } + } + + start_page = NESM_START_PG; + stop_page = NESM_STOP_PG; + + dev->irq=irq; + + /* Snarf the interrupt now. There's no point in waiting since we cannot + share and the board will usually be enabled. */ + retval = request_irq(dev->irq, eip_interrupt, 0, DRV_NAME, dev); + if (retval) { + printk (" unable to get IRQ %d (irqval=%d).\n", + dev->irq, retval); + goto out; + } + + dev->base_addr = base_addr; + + for(i = 0; i < ETHER_ADDR_LEN; i++) + dev->dev_addr[i] = SA_prom[i]; + + printk(" %pM\n", dev->dev_addr); + + printk("%s: %s found at %#x, using IRQ %d.\n", + dev->name, name, base_addr, dev->irq); + + mca_set_adapter_procfn(slot, (MCA_ProcFn) ne2_procinfo, dev); + + ei_status.name = name; + ei_status.tx_start_page = start_page; + ei_status.stop_page = stop_page; + ei_status.word16 = (2 == 2); + + ei_status.rx_start_page = start_page + TX_PAGES; +#ifdef PACKETBUF_MEMSIZE + /* Allow the packet buffer size to be overridden by know-it-alls. */ + ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE; +#endif + + ei_status.reset_8390 = &ne_reset_8390; + ei_status.block_input = &ne_block_input; + ei_status.block_output = &ne_block_output; + ei_status.get_8390_hdr = &ne_get_8390_hdr; + + ei_status.priv = slot; + + dev->netdev_ops = &eip_netdev_ops; + NS8390p_init(dev, 0); + + retval = register_netdev(dev); + if (retval) + goto out1; + return 0; +out1: + mca_set_adapter_procfn( ei_status.priv, NULL, NULL); + free_irq(dev->irq, dev); +out: + release_region(base_addr, NE_IO_EXTENT); + return retval; +} + +/* Hard reset the card. This used to pause for the same period that a + 8390 reset command required, but that shouldn't be necessary. */ +static void ne_reset_8390(struct net_device *dev) +{ + unsigned long reset_start_time = jiffies; + + if (ei_debug > 1) + printk("resetting the 8390 t=%ld...", jiffies); + + /* DON'T change these to inb_p/outb_p or reset will fail on clones. */ + outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET); + + ei_status.txing = 0; + ei_status.dmaing = 0; + + /* This check _should_not_ be necessary, omit eventually. */ + while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) + if (time_after(jiffies, reset_start_time + 2*HZ/100)) { + printk("%s: ne_reset_8390() did not complete.\n", + dev->name); + break; + } + outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */ +} + +/* Grab the 8390 specific header. Similar to the block_input routine, but + we don't need to be concerned with ring wrap as the header will be at + the start of a page, so we optimize accordingly. */ + +static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page) +{ + + int nic_base = dev->base_addr; + + /* This *shouldn't* happen. + If it does, it's the last thing you'll see */ + if (ei_status.dmaing) { + printk("%s: DMAing conflict in ne_get_8390_hdr " + "[DMAstat:%d][irqlock:%d].\n", + dev->name, ei_status.dmaing, ei_status.irqlock); + return; + } + + ei_status.dmaing |= 0x01; + outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); + outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO); + outb_p(0, nic_base + EN0_RCNTHI); + outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */ + outb_p(ring_page, nic_base + EN0_RSARHI); + outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD); + + if (ei_status.word16) + insw(NE_BASE + NE_DATAPORT, hdr, + sizeof(struct e8390_pkt_hdr)>>1); + else + insb(NE_BASE + NE_DATAPORT, hdr, + sizeof(struct e8390_pkt_hdr)); + + outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; +} + +/* Block input and output, similar to the Crynwr packet driver. If you + are porting to a new ethercard, look at the packet driver source for + hints. The NEx000 doesn't share the on-board packet memory -- you have + to put the packet out through the "remote DMA" dataport using outb. */ + +static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, + int ring_offset) +{ +#ifdef NE_SANITY_CHECK + int xfer_count = count; +#endif + int nic_base = dev->base_addr; + char *buf = skb->data; + + /* This *shouldn't* happen. + If it does, it's the last thing you'll see */ + if (ei_status.dmaing) { + printk("%s: DMAing conflict in ne_block_input " + "[DMAstat:%d][irqlock:%d].\n", + dev->name, ei_status.dmaing, ei_status.irqlock); + return; + } + ei_status.dmaing |= 0x01; + outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); + outb_p(count & 0xff, nic_base + EN0_RCNTLO); + outb_p(count >> 8, nic_base + EN0_RCNTHI); + outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO); + outb_p(ring_offset >> 8, nic_base + EN0_RSARHI); + outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD); + if (ei_status.word16) { + insw(NE_BASE + NE_DATAPORT,buf,count>>1); + if (count & 0x01) { + buf[count-1] = inb(NE_BASE + NE_DATAPORT); +#ifdef NE_SANITY_CHECK + xfer_count++; +#endif + } + } else { + insb(NE_BASE + NE_DATAPORT, buf, count); + } + +#ifdef NE_SANITY_CHECK + /* This was for the ALPHA version only, but enough people have + been encountering problems so it is still here. If you see + this message you either 1) have a slightly incompatible clone + or 2) have noise/speed problems with your bus. */ + if (ei_debug > 1) { /* DMA termination address check... */ + int addr, tries = 20; + do { + /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here + -- it's broken for Rx on some cards! */ + int high = inb_p(nic_base + EN0_RSARHI); + int low = inb_p(nic_base + EN0_RSARLO); + addr = (high << 8) + low; + if (((ring_offset + xfer_count) & 0xff) == low) + break; + } while (--tries > 0); + if (tries <= 0) + printk("%s: RX transfer address mismatch," + "%#4.4x (expected) vs. %#4.4x (actual).\n", + dev->name, ring_offset + xfer_count, addr); + } +#endif + outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; +} + +static void ne_block_output(struct net_device *dev, int count, + const unsigned char *buf, const int start_page) +{ + int nic_base = NE_BASE; + unsigned long dma_start; +#ifdef NE_SANITY_CHECK + int retries = 0; +#endif + + /* Round the count up for word writes. Do we need to do this? + What effect will an odd byte count have on the 8390? + I should check someday. */ + if (ei_status.word16 && (count & 0x01)) + count++; + + /* This *shouldn't* happen. + If it does, it's the last thing you'll see */ + if (ei_status.dmaing) { + printk("%s: DMAing conflict in ne_block_output." + "[DMAstat:%d][irqlock:%d]\n", + dev->name, ei_status.dmaing, ei_status.irqlock); + return; + } + ei_status.dmaing |= 0x01; + /* We should already be in page 0, but to be safe... */ + outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD); + +#ifdef NE_SANITY_CHECK +retry: +#endif + +#ifdef NE8390_RW_BUGFIX + /* Handle the read-before-write bug the same way as the + Crynwr packet driver -- the NatSemi method doesn't work. + Actually this doesn't always work either, but if you have + problems with your NEx000 this is better than nothing! */ + outb_p(0x42, nic_base + EN0_RCNTLO); + outb_p(0x00, nic_base + EN0_RCNTHI); + outb_p(0x42, nic_base + EN0_RSARLO); + outb_p(0x00, nic_base + EN0_RSARHI); + outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD); + /* Make certain that the dummy read has occurred. */ + SLOW_DOWN_IO; + SLOW_DOWN_IO; + SLOW_DOWN_IO; +#endif + + outb_p(ENISR_RDC, nic_base + EN0_ISR); + + /* Now the normal output. */ + outb_p(count & 0xff, nic_base + EN0_RCNTLO); + outb_p(count >> 8, nic_base + EN0_RCNTHI); + outb_p(0x00, nic_base + EN0_RSARLO); + outb_p(start_page, nic_base + EN0_RSARHI); + + outb_p(E8390_RWRITE+E8390_START, nic_base + NE_CMD); + if (ei_status.word16) { + outsw(NE_BASE + NE_DATAPORT, buf, count>>1); + } else { + outsb(NE_BASE + NE_DATAPORT, buf, count); + } + + dma_start = jiffies; + +#ifdef NE_SANITY_CHECK + /* This was for the ALPHA version only, but enough people have + been encountering problems so it is still here. */ + + if (ei_debug > 1) { /* DMA termination address check... */ + int addr, tries = 20; + do { + int high = inb_p(nic_base + EN0_RSARHI); + int low = inb_p(nic_base + EN0_RSARLO); + addr = (high << 8) + low; + if ((start_page << 8) + count == addr) + break; + } while (--tries > 0); + if (tries <= 0) { + printk("%s: Tx packet transfer address mismatch," + "%#4.4x (expected) vs. %#4.4x (actual).\n", + dev->name, (start_page << 8) + count, addr); + if (retries++ == 0) + goto retry; + } + } +#endif + + while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0) + if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ + printk("%s: timeout waiting for Tx RDC.\n", dev->name); + ne_reset_8390(dev); + NS8390p_init(dev, 1); + break; + } + + outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; +} + + +#ifdef MODULE +#define MAX_NE_CARDS 4 /* Max number of NE cards per module */ +static struct net_device *dev_ne[MAX_NE_CARDS]; +static int io[MAX_NE_CARDS]; +static int irq[MAX_NE_CARDS]; +static int bad[MAX_NE_CARDS]; /* 0xbad = bad sig or no reset ack */ +MODULE_LICENSE("GPL"); + +module_param_array(io, int, NULL, 0); +module_param_array(irq, int, NULL, 0); +module_param_array(bad, int, NULL, 0); +MODULE_PARM_DESC(io, "(ignored)"); +MODULE_PARM_DESC(irq, "(ignored)"); +MODULE_PARM_DESC(bad, "(ignored)"); + +/* Module code fixed by David Weinehall */ + +int __init init_module(void) +{ + struct net_device *dev; + int this_dev, found = 0; + + for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { + dev = alloc_eip_netdev(); + if (!dev) + break; + dev->irq = irq[this_dev]; + dev->mem_end = bad[this_dev]; + dev->base_addr = io[this_dev]; + if (do_ne2_probe(dev) == 0) { + dev_ne[found++] = dev; + continue; + } + free_netdev(dev); + break; + } + if (found) + return 0; + printk(KERN_WARNING "ne2.c: No NE/2 card found\n"); + return -ENXIO; +} + +static void cleanup_card(struct net_device *dev) +{ + mca_mark_as_unused(ei_status.priv); + mca_set_adapter_procfn( ei_status.priv, NULL, NULL); + free_irq(dev->irq, dev); + release_region(dev->base_addr, NE_IO_EXTENT); +} + +void __exit cleanup_module(void) +{ + int this_dev; + + for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { + struct net_device *dev = dev_ne[this_dev]; + if (dev) { + unregister_netdev(dev); + cleanup_card(dev); + free_netdev(dev); + } + } +} +#endif /* MODULE */ diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c new file mode 100644 index 0000000..3c333cb --- /dev/null +++ b/drivers/net/ethernet/8390/ne2k-pci.c @@ -0,0 +1,726 @@ +/* ne2k-pci.c: A NE2000 clone on PCI bus driver for Linux. */ +/* + A Linux device driver for PCI NE2000 clones. + + Authors and other copyright holders: + 1992-2000 by Donald Becker, NE2000 core and various modifications. + 1995-1998 by Paul Gortmaker, core modifications and PCI support. + Copyright 1993 assigned to the United States Government as represented + by the Director, National Security Agency. + + This software may be used and distributed according to the terms of + the GNU General Public License (GPL), incorporated herein by reference. + Drivers based on or derived from this code fall under the GPL and must + retain the authorship, copyright and license notice. This file is not + a complete program and may only be used when the entire operating + system is licensed under the GPL. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + Issues remaining: + People are making PCI ne2000 clones! Oh the horror, the horror... + Limited full-duplex support. +*/ + +#define DRV_NAME "ne2k-pci" +#define DRV_VERSION "1.03" +#define DRV_RELDATE "9/22/2003" + + +/* The user-configurable values. + These may be modified when a driver module is loaded.*/ + +static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ + +#define MAX_UNITS 8 /* More are supported, limit only on options */ +/* Used to pass the full-duplex flag, etc. */ +static int full_duplex[MAX_UNITS]; +static int options[MAX_UNITS]; + +/* Force a non std. amount of memory. Units are 256 byte pages. */ +/* #define PACKETBUF_MEMSIZE 0x40 */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "8390.h" + +/* These identify the driver base version and may not be removed. */ +static const char version[] __devinitconst = + KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE + " D. Becker/P. Gortmaker\n"; + +#if defined(__powerpc__) +#define inl_le(addr) le32_to_cpu(inl(addr)) +#define inw_le(addr) le16_to_cpu(inw(addr)) +#endif + +#define PFX DRV_NAME ": " + +MODULE_AUTHOR("Donald Becker / Paul Gortmaker"); +MODULE_DESCRIPTION("PCI NE2000 clone driver"); +MODULE_LICENSE("GPL"); + +module_param(debug, int, 0); +module_param_array(options, int, NULL, 0); +module_param_array(full_duplex, int, NULL, 0); +MODULE_PARM_DESC(debug, "debug level (1-2)"); +MODULE_PARM_DESC(options, "Bit 5: full duplex"); +MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)"); + +/* Some defines that people can play with if so inclined. */ + +/* Use 32 bit data-movement operations instead of 16 bit. */ +#define USE_LONGIO + +/* Do we implement the read before write bugfix ? */ +/* #define NE_RW_BUGFIX */ + +/* Flags. We rename an existing ei_status field to store flags! */ +/* Thus only the low 8 bits are usable for non-init-time flags. */ +#define ne2k_flags reg0 +enum { + ONLY_16BIT_IO=8, ONLY_32BIT_IO=4, /* Chip can do only 16/32-bit xfers. */ + FORCE_FDX=0x20, /* User override. */ + REALTEK_FDX=0x40, HOLTEK_FDX=0x80, + STOP_PG_0x60=0x100, +}; + +enum ne2k_pci_chipsets { + CH_RealTek_RTL_8029 = 0, + CH_Winbond_89C940, + CH_Compex_RL2000, + CH_KTI_ET32P2, + CH_NetVin_NV5000SC, + CH_Via_86C926, + CH_SureCom_NE34, + CH_Winbond_W89C940F, + CH_Holtek_HT80232, + CH_Holtek_HT80229, + CH_Winbond_89C940_8c4a, +}; + + +static struct { + char *name; + int flags; +} pci_clone_list[] __devinitdata = { + {"RealTek RTL-8029", REALTEK_FDX}, + {"Winbond 89C940", 0}, + {"Compex RL2000", 0}, + {"KTI ET32P2", 0}, + {"NetVin NV5000SC", 0}, + {"Via 86C926", ONLY_16BIT_IO}, + {"SureCom NE34", 0}, + {"Winbond W89C940F", 0}, + {"Holtek HT80232", ONLY_16BIT_IO | HOLTEK_FDX}, + {"Holtek HT80229", ONLY_32BIT_IO | HOLTEK_FDX | STOP_PG_0x60 }, + {"Winbond W89C940(misprogrammed)", 0}, + {NULL,} +}; + + +static DEFINE_PCI_DEVICE_TABLE(ne2k_pci_tbl) = { + { 0x10ec, 0x8029, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_RealTek_RTL_8029 }, + { 0x1050, 0x0940, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Winbond_89C940 }, + { 0x11f6, 0x1401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Compex_RL2000 }, + { 0x8e2e, 0x3000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_KTI_ET32P2 }, + { 0x4a14, 0x5000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_NetVin_NV5000SC }, + { 0x1106, 0x0926, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Via_86C926 }, + { 0x10bd, 0x0e34, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_SureCom_NE34 }, + { 0x1050, 0x5a5a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Winbond_W89C940F }, + { 0x12c3, 0x0058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Holtek_HT80232 }, + { 0x12c3, 0x5598, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Holtek_HT80229 }, + { 0x8c4a, 0x1980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Winbond_89C940_8c4a }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, ne2k_pci_tbl); + + +/* ---- No user-serviceable parts below ---- */ + +#define NE_BASE (dev->base_addr) +#define NE_CMD 0x00 +#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */ +#define NE_RESET 0x1f /* Issue a read to reset, a write to clear. */ +#define NE_IO_EXTENT 0x20 + +#define NESM_START_PG 0x40 /* First page of TX buffer */ +#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ + + +static int ne2k_pci_open(struct net_device *dev); +static int ne2k_pci_close(struct net_device *dev); + +static void ne2k_pci_reset_8390(struct net_device *dev); +static void ne2k_pci_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page); +static void ne2k_pci_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); +static void ne2k_pci_block_output(struct net_device *dev, const int count, + const unsigned char *buf, const int start_page); +static const struct ethtool_ops ne2k_pci_ethtool_ops; + + + +/* There is no room in the standard 8390 structure for extra info we need, + so we build a meta/outer-wrapper structure.. */ +struct ne2k_pci_card { + struct net_device *dev; + struct pci_dev *pci_dev; +}; + + + +/* + NEx000-clone boards have a Station Address (SA) PROM (SAPROM) in the packet + buffer memory space. By-the-spec NE2000 clones have 0x57,0x57 in bytes + 0x0e,0x0f of the SAPROM, while other supposed NE2000 clones must be + detected by their SA prefix. + + Reading the SAPROM from a word-wide card with the 8390 set in byte-wide + mode results in doubled values, which can be detected and compensated for. + + The probe is also responsible for initializing the card and filling + in the 'dev' and 'ei_status' structures. +*/ + +static const struct net_device_ops ne2k_netdev_ops = { + .ndo_open = ne2k_pci_open, + .ndo_stop = ne2k_pci_close, + .ndo_start_xmit = ei_start_xmit, + .ndo_tx_timeout = ei_tx_timeout, + .ndo_get_stats = ei_get_stats, + .ndo_set_multicast_list = ei_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ei_poll, +#endif +}; + +static int __devinit ne2k_pci_init_one (struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *dev; + int i; + unsigned char SA_prom[32]; + int start_page, stop_page; + int irq, reg0, chip_idx = ent->driver_data; + static unsigned int fnd_cnt; + long ioaddr; + int flags = pci_clone_list[chip_idx].flags; + +/* when built into the kernel, we only print version if device is found */ +#ifndef MODULE + static int printed_version; + if (!printed_version++) + printk(version); +#endif + + fnd_cnt++; + + i = pci_enable_device (pdev); + if (i) + return i; + + ioaddr = pci_resource_start (pdev, 0); + irq = pdev->irq; + + if (!ioaddr || ((pci_resource_flags (pdev, 0) & IORESOURCE_IO) == 0)) { + dev_err(&pdev->dev, "no I/O resource at PCI BAR #0\n"); + return -ENODEV; + } + + if (request_region (ioaddr, NE_IO_EXTENT, DRV_NAME) == NULL) { + dev_err(&pdev->dev, "I/O resource 0x%x @ 0x%lx busy\n", + NE_IO_EXTENT, ioaddr); + return -EBUSY; + } + + reg0 = inb(ioaddr); + if (reg0 == 0xFF) + goto err_out_free_res; + + /* Do a preliminary verification that we have a 8390. */ + { + int regd; + outb(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD); + regd = inb(ioaddr + 0x0d); + outb(0xff, ioaddr + 0x0d); + outb(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD); + inb(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */ + if (inb(ioaddr + EN0_COUNTER0) != 0) { + outb(reg0, ioaddr); + outb(regd, ioaddr + 0x0d); /* Restore the old values. */ + goto err_out_free_res; + } + } + + /* Allocate net_device, dev->priv; fill in 8390 specific dev fields. */ + dev = alloc_ei_netdev(); + if (!dev) { + dev_err(&pdev->dev, "cannot allocate ethernet device\n"); + goto err_out_free_res; + } + dev->netdev_ops = &ne2k_netdev_ops; + + SET_NETDEV_DEV(dev, &pdev->dev); + + /* Reset card. Who knows what dain-bramaged state it was left in. */ + { + unsigned long reset_start_time = jiffies; + + outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET); + + /* This looks like a horrible timing loop, but it should never take + more than a few cycles. + */ + while ((inb(ioaddr + EN0_ISR) & ENISR_RESET) == 0) + /* Limit wait: '2' avoids jiffy roll-over. */ + if (jiffies - reset_start_time > 2) { + dev_err(&pdev->dev, + "Card failure (no reset ack).\n"); + goto err_out_free_netdev; + } + + outb(0xff, ioaddr + EN0_ISR); /* Ack all intr. */ + } + + /* Read the 16 bytes of station address PROM. + We must first initialize registers, similar to NS8390_init(eifdev, 0). + We can't reliably read the SAPROM address without this. + (I learned the hard way!). */ + { + struct {unsigned char value, offset; } program_seq[] = { + {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/ + {0x49, EN0_DCFG}, /* Set word-wide access. */ + {0x00, EN0_RCNTLO}, /* Clear the count regs. */ + {0x00, EN0_RCNTHI}, + {0x00, EN0_IMR}, /* Mask completion irq. */ + {0xFF, EN0_ISR}, + {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */ + {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */ + {32, EN0_RCNTLO}, + {0x00, EN0_RCNTHI}, + {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */ + {0x00, EN0_RSARHI}, + {E8390_RREAD+E8390_START, E8390_CMD}, + }; + for (i = 0; i < ARRAY_SIZE(program_seq); i++) + outb(program_seq[i].value, ioaddr + program_seq[i].offset); + + } + + /* Note: all PCI cards have at least 16 bit access, so we don't have + to check for 8 bit cards. Most cards permit 32 bit access. */ + if (flags & ONLY_32BIT_IO) { + for (i = 0; i < 4 ; i++) + ((u32 *)SA_prom)[i] = le32_to_cpu(inl(ioaddr + NE_DATAPORT)); + } else + for(i = 0; i < 32 /*sizeof(SA_prom)*/; i++) + SA_prom[i] = inb(ioaddr + NE_DATAPORT); + + /* We always set the 8390 registers for word mode. */ + outb(0x49, ioaddr + EN0_DCFG); + start_page = NESM_START_PG; + + stop_page = flags & STOP_PG_0x60 ? 0x60 : NESM_STOP_PG; + + /* Set up the rest of the parameters. */ + dev->irq = irq; + dev->base_addr = ioaddr; + pci_set_drvdata(pdev, dev); + + ei_status.name = pci_clone_list[chip_idx].name; + ei_status.tx_start_page = start_page; + ei_status.stop_page = stop_page; + ei_status.word16 = 1; + ei_status.ne2k_flags = flags; + if (fnd_cnt < MAX_UNITS) { + if (full_duplex[fnd_cnt] > 0 || (options[fnd_cnt] & FORCE_FDX)) + ei_status.ne2k_flags |= FORCE_FDX; + } + + ei_status.rx_start_page = start_page + TX_PAGES; +#ifdef PACKETBUF_MEMSIZE + /* Allow the packet buffer size to be overridden by know-it-alls. */ + ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE; +#endif + + ei_status.reset_8390 = &ne2k_pci_reset_8390; + ei_status.block_input = &ne2k_pci_block_input; + ei_status.block_output = &ne2k_pci_block_output; + ei_status.get_8390_hdr = &ne2k_pci_get_8390_hdr; + ei_status.priv = (unsigned long) pdev; + + dev->ethtool_ops = &ne2k_pci_ethtool_ops; + NS8390_init(dev, 0); + + memcpy(dev->dev_addr, SA_prom, dev->addr_len); + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); + + i = register_netdev(dev); + if (i) + goto err_out_free_netdev; + + printk("%s: %s found at %#lx, IRQ %d, %pM.\n", + dev->name, pci_clone_list[chip_idx].name, ioaddr, dev->irq, + dev->dev_addr); + + return 0; + +err_out_free_netdev: + free_netdev (dev); +err_out_free_res: + release_region (ioaddr, NE_IO_EXTENT); + pci_set_drvdata (pdev, NULL); + return -ENODEV; + +} + +/* + * Magic incantation sequence for full duplex on the supported cards. + */ +static inline int set_realtek_fdx(struct net_device *dev) +{ + long ioaddr = dev->base_addr; + + outb(0xC0 + E8390_NODMA, ioaddr + NE_CMD); /* Page 3 */ + outb(0xC0, ioaddr + 0x01); /* Enable writes to CONFIG3 */ + outb(0x40, ioaddr + 0x06); /* Enable full duplex */ + outb(0x00, ioaddr + 0x01); /* Disable writes to CONFIG3 */ + outb(E8390_PAGE0 + E8390_NODMA, ioaddr + NE_CMD); /* Page 0 */ + return 0; +} + +static inline int set_holtek_fdx(struct net_device *dev) +{ + long ioaddr = dev->base_addr; + + outb(inb(ioaddr + 0x20) | 0x80, ioaddr + 0x20); + return 0; +} + +static int ne2k_pci_set_fdx(struct net_device *dev) +{ + if (ei_status.ne2k_flags & REALTEK_FDX) + return set_realtek_fdx(dev); + else if (ei_status.ne2k_flags & HOLTEK_FDX) + return set_holtek_fdx(dev); + + return -EOPNOTSUPP; +} + +static int ne2k_pci_open(struct net_device *dev) +{ + int ret = request_irq(dev->irq, ei_interrupt, IRQF_SHARED, dev->name, dev); + if (ret) + return ret; + + if (ei_status.ne2k_flags & FORCE_FDX) + ne2k_pci_set_fdx(dev); + + ei_open(dev); + return 0; +} + +static int ne2k_pci_close(struct net_device *dev) +{ + ei_close(dev); + free_irq(dev->irq, dev); + return 0; +} + +/* Hard reset the card. This used to pause for the same period that a + 8390 reset command required, but that shouldn't be necessary. */ +static void ne2k_pci_reset_8390(struct net_device *dev) +{ + unsigned long reset_start_time = jiffies; + + if (debug > 1) printk("%s: Resetting the 8390 t=%ld...", + dev->name, jiffies); + + outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET); + + ei_status.txing = 0; + ei_status.dmaing = 0; + + /* This check _should_not_ be necessary, omit eventually. */ + while ((inb(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) + if (jiffies - reset_start_time > 2) { + printk("%s: ne2k_pci_reset_8390() did not complete.\n", dev->name); + break; + } + outb(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */ +} + +/* Grab the 8390 specific header. Similar to the block_input routine, but + we don't need to be concerned with ring wrap as the header will be at + the start of a page, so we optimize accordingly. */ + +static void ne2k_pci_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) +{ + + long nic_base = dev->base_addr; + + /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + if (ei_status.dmaing) { + printk("%s: DMAing conflict in ne2k_pci_get_8390_hdr " + "[DMAstat:%d][irqlock:%d].\n", + dev->name, ei_status.dmaing, ei_status.irqlock); + return; + } + + ei_status.dmaing |= 0x01; + outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); + outb(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO); + outb(0, nic_base + EN0_RCNTHI); + outb(0, nic_base + EN0_RSARLO); /* On page boundary */ + outb(ring_page, nic_base + EN0_RSARHI); + outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); + + if (ei_status.ne2k_flags & ONLY_16BIT_IO) { + insw(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1); + } else { + *(u32*)hdr = le32_to_cpu(inl(NE_BASE + NE_DATAPORT)); + le16_to_cpus(&hdr->count); + } + + outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; +} + +/* Block input and output, similar to the Crynwr packet driver. If you + are porting to a new ethercard, look at the packet driver source for hints. + The NEx000 doesn't share the on-board packet memory -- you have to put + the packet out through the "remote DMA" dataport using outb. */ + +static void ne2k_pci_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset) +{ + long nic_base = dev->base_addr; + char *buf = skb->data; + + /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + if (ei_status.dmaing) { + printk("%s: DMAing conflict in ne2k_pci_block_input " + "[DMAstat:%d][irqlock:%d].\n", + dev->name, ei_status.dmaing, ei_status.irqlock); + return; + } + ei_status.dmaing |= 0x01; + if (ei_status.ne2k_flags & ONLY_32BIT_IO) + count = (count + 3) & 0xFFFC; + outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); + outb(count & 0xff, nic_base + EN0_RCNTLO); + outb(count >> 8, nic_base + EN0_RCNTHI); + outb(ring_offset & 0xff, nic_base + EN0_RSARLO); + outb(ring_offset >> 8, nic_base + EN0_RSARHI); + outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); + + if (ei_status.ne2k_flags & ONLY_16BIT_IO) { + insw(NE_BASE + NE_DATAPORT,buf,count>>1); + if (count & 0x01) { + buf[count-1] = inb(NE_BASE + NE_DATAPORT); + } + } else { + insl(NE_BASE + NE_DATAPORT, buf, count>>2); + if (count & 3) { + buf += count & ~3; + if (count & 2) { + __le16 *b = (__le16 *)buf; + + *b++ = cpu_to_le16(inw(NE_BASE + NE_DATAPORT)); + buf = (char *)b; + } + if (count & 1) + *buf = inb(NE_BASE + NE_DATAPORT); + } + } + + outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; +} + +static void ne2k_pci_block_output(struct net_device *dev, int count, + const unsigned char *buf, const int start_page) +{ + long nic_base = NE_BASE; + unsigned long dma_start; + + /* On little-endian it's always safe to round the count up for + word writes. */ + if (ei_status.ne2k_flags & ONLY_32BIT_IO) + count = (count + 3) & 0xFFFC; + else + if (count & 0x01) + count++; + + /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + if (ei_status.dmaing) { + printk("%s: DMAing conflict in ne2k_pci_block_output." + "[DMAstat:%d][irqlock:%d]\n", + dev->name, ei_status.dmaing, ei_status.irqlock); + return; + } + ei_status.dmaing |= 0x01; + /* We should already be in page 0, but to be safe... */ + outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD); + +#ifdef NE8390_RW_BUGFIX + /* Handle the read-before-write bug the same way as the + Crynwr packet driver -- the NatSemi method doesn't work. + Actually this doesn't always work either, but if you have + problems with your NEx000 this is better than nothing! */ + outb(0x42, nic_base + EN0_RCNTLO); + outb(0x00, nic_base + EN0_RCNTHI); + outb(0x42, nic_base + EN0_RSARLO); + outb(0x00, nic_base + EN0_RSARHI); + outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); +#endif + outb(ENISR_RDC, nic_base + EN0_ISR); + + /* Now the normal output. */ + outb(count & 0xff, nic_base + EN0_RCNTLO); + outb(count >> 8, nic_base + EN0_RCNTHI); + outb(0x00, nic_base + EN0_RSARLO); + outb(start_page, nic_base + EN0_RSARHI); + outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD); + if (ei_status.ne2k_flags & ONLY_16BIT_IO) { + outsw(NE_BASE + NE_DATAPORT, buf, count>>1); + } else { + outsl(NE_BASE + NE_DATAPORT, buf, count>>2); + if (count & 3) { + buf += count & ~3; + if (count & 2) { + __le16 *b = (__le16 *)buf; + + outw(le16_to_cpu(*b++), NE_BASE + NE_DATAPORT); + buf = (char *)b; + } + } + } + + dma_start = jiffies; + + while ((inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) + if (jiffies - dma_start > 2) { /* Avoid clock roll-over. */ + printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name); + ne2k_pci_reset_8390(dev); + NS8390_init(dev,1); + break; + } + + outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; +} + +static void ne2k_pci_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct ei_device *ei = netdev_priv(dev); + struct pci_dev *pci_dev = (struct pci_dev *) ei->priv; + + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->bus_info, pci_name(pci_dev)); +} + +static const struct ethtool_ops ne2k_pci_ethtool_ops = { + .get_drvinfo = ne2k_pci_get_drvinfo, +}; + +static void __devexit ne2k_pci_remove_one (struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + BUG_ON(!dev); + unregister_netdev(dev); + release_region(dev->base_addr, NE_IO_EXTENT); + free_netdev(dev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +#ifdef CONFIG_PM +static int ne2k_pci_suspend (struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata (pdev); + + netif_device_detach(dev); + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +static int ne2k_pci_resume (struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata (pdev); + int rc; + + pci_set_power_state(pdev, 0); + pci_restore_state(pdev); + + rc = pci_enable_device(pdev); + if (rc) + return rc; + + NS8390_init(dev, 1); + netif_device_attach(dev); + + return 0; +} + +#endif /* CONFIG_PM */ + + +static struct pci_driver ne2k_driver = { + .name = DRV_NAME, + .probe = ne2k_pci_init_one, + .remove = __devexit_p(ne2k_pci_remove_one), + .id_table = ne2k_pci_tbl, +#ifdef CONFIG_PM + .suspend = ne2k_pci_suspend, + .resume = ne2k_pci_resume, +#endif /* CONFIG_PM */ + +}; + + +static int __init ne2k_pci_init(void) +{ +/* when a module, this is printed whether or not devices are found in probe */ +#ifdef MODULE + printk(version); +#endif + return pci_register_driver(&ne2k_driver); +} + + +static void __exit ne2k_pci_cleanup(void) +{ + pci_unregister_driver (&ne2k_driver); +} + +module_init(ne2k_pci_init); +module_exit(ne2k_pci_cleanup); diff --git a/drivers/net/ethernet/8390/ne3210.c b/drivers/net/ethernet/8390/ne3210.c new file mode 100644 index 0000000..243ed2a --- /dev/null +++ b/drivers/net/ethernet/8390/ne3210.c @@ -0,0 +1,347 @@ +/* + ne3210.c + + Linux driver for Novell NE3210 EISA Network Adapter + + Copyright (C) 1998, Paul Gortmaker. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + Information and Code Sources: + + 1) Based upon my other EISA 8390 drivers (lne390, es3210, smc-ultra32) + 2) The existing myriad of other Linux 8390 drivers by Donald Becker. + 3) Info for getting IRQ and sh-mem gleaned from the EISA cfg file + + The NE3210 is an EISA shared memory NS8390 implementation. Shared + memory address > 1MB should work with this driver. + + Note that the .cfg file (3/11/93, v1.0) has AUI and BNC switched + around (or perhaps there are some defective/backwards cards ???) + + This driver WILL NOT WORK FOR THE NE3200 - it is completely different + and does not use an 8390 at all. + + Updated to EISA probing API 5/2003 by Marc Zyngier. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "8390.h" + +#define DRV_NAME "ne3210" + +static void ne3210_reset_8390(struct net_device *dev); + +static void ne3210_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page); +static void ne3210_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset); +static void ne3210_block_output(struct net_device *dev, int count, const unsigned char *buf, const int start_page); + +#define NE3210_START_PG 0x00 /* First page of TX buffer */ +#define NE3210_STOP_PG 0x80 /* Last page +1 of RX ring */ + +#define NE3210_IO_EXTENT 0x20 +#define NE3210_SA_PROM 0x16 /* Start of e'net addr. */ +#define NE3210_RESET_PORT 0xc84 +#define NE3210_NIC_OFFSET 0x00 /* Hello, the 8390 is *here* */ + +#define NE3210_ADDR0 0x00 /* 3 byte vendor prefix */ +#define NE3210_ADDR1 0x00 +#define NE3210_ADDR2 0x1b + +#define NE3210_CFG1 0xc84 /* NB: 0xc84 is also "reset" port. */ +#define NE3210_CFG2 0xc90 +#define NE3210_CFG_EXTENT (NE3210_CFG2 - NE3210_CFG1 + 1) + +/* + * You can OR any of the following bits together and assign it + * to NE3210_DEBUG to get verbose driver info during operation. + * Currently only the probe one is implemented. + */ + +#define NE3210_D_PROBE 0x01 +#define NE3210_D_RX_PKT 0x02 +#define NE3210_D_TX_PKT 0x04 +#define NE3210_D_IRQ 0x08 + +#define NE3210_DEBUG 0x0 + +static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3}; +static unsigned int shmem_map[] __initdata = {0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0}; +static const char *ifmap[] __initdata = {"UTP", "?", "BNC", "AUI"}; +static int ifmap_val[] __initdata = { + IF_PORT_10BASET, + IF_PORT_UNKNOWN, + IF_PORT_10BASE2, + IF_PORT_AUI, +}; + +static int __init ne3210_eisa_probe (struct device *device) +{ + unsigned long ioaddr, phys_mem; + int i, retval, port_index; + struct eisa_device *edev = to_eisa_device (device); + struct net_device *dev; + + /* Allocate dev->priv and fill in 8390 specific dev fields. */ + if (!(dev = alloc_ei_netdev ())) { + printk ("ne3210.c: unable to allocate memory for dev!\n"); + return -ENOMEM; + } + + SET_NETDEV_DEV(dev, device); + dev_set_drvdata(device, dev); + ioaddr = edev->base_addr; + + if (!request_region(ioaddr, NE3210_IO_EXTENT, DRV_NAME)) { + retval = -EBUSY; + goto out; + } + + if (!request_region(ioaddr + NE3210_CFG1, + NE3210_CFG_EXTENT, DRV_NAME)) { + retval = -EBUSY; + goto out1; + } + +#if NE3210_DEBUG & NE3210_D_PROBE + printk("ne3210-debug: probe at %#x, ID %s\n", ioaddr, edev->id.sig); + printk("ne3210-debug: config regs: %#x %#x\n", + inb(ioaddr + NE3210_CFG1), inb(ioaddr + NE3210_CFG2)); +#endif + + port_index = inb(ioaddr + NE3210_CFG2) >> 6; + for(i = 0; i < ETHER_ADDR_LEN; i++) + dev->dev_addr[i] = inb(ioaddr + NE3210_SA_PROM + i); + printk("ne3210.c: NE3210 in EISA slot %d, media: %s, addr: %pM.\n", + edev->slot, ifmap[port_index], dev->dev_addr); + + /* Snarf the interrupt now. CFG file has them all listed as `edge' with share=NO */ + dev->irq = irq_map[(inb(ioaddr + NE3210_CFG2) >> 3) & 0x07]; + printk("ne3210.c: using IRQ %d, ", dev->irq); + + retval = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev); + if (retval) { + printk (" unable to get IRQ %d.\n", dev->irq); + goto out2; + } + + phys_mem = shmem_map[inb(ioaddr + NE3210_CFG2) & 0x07] * 0x1000; + + /* + BEWARE!! Some dain-bramaged EISA SCUs will allow you to put + the card mem within the region covered by `normal' RAM !!! + */ + if (phys_mem > 1024*1024) { /* phys addr > 1MB */ + if (phys_mem < virt_to_phys(high_memory)) { + printk(KERN_CRIT "ne3210.c: Card RAM overlaps with normal memory!!!\n"); + printk(KERN_CRIT "ne3210.c: Use EISA SCU to set card memory below 1MB,\n"); + printk(KERN_CRIT "ne3210.c: or to an address above 0x%llx.\n", + (u64)virt_to_phys(high_memory)); + printk(KERN_CRIT "ne3210.c: Driver NOT installed.\n"); + retval = -EINVAL; + goto out3; + } + } + + if (!request_mem_region (phys_mem, NE3210_STOP_PG*0x100, DRV_NAME)) { + printk ("ne3210.c: Unable to request shared memory at physical address %#lx\n", + phys_mem); + goto out3; + } + + printk("%dkB memory at physical address %#lx\n", + NE3210_STOP_PG/4, phys_mem); + + ei_status.mem = ioremap(phys_mem, NE3210_STOP_PG*0x100); + if (!ei_status.mem) { + printk(KERN_ERR "ne3210.c: Unable to remap card memory !!\n"); + printk(KERN_ERR "ne3210.c: Driver NOT installed.\n"); + retval = -EAGAIN; + goto out4; + } + printk("ne3210.c: remapped %dkB card memory to virtual address %p\n", + NE3210_STOP_PG/4, ei_status.mem); + dev->mem_start = (unsigned long)ei_status.mem; + dev->mem_end = dev->mem_start + (NE3210_STOP_PG - NE3210_START_PG)*256; + + /* The 8390 offset is zero for the NE3210 */ + dev->base_addr = ioaddr; + + ei_status.name = "NE3210"; + ei_status.tx_start_page = NE3210_START_PG; + ei_status.rx_start_page = NE3210_START_PG + TX_PAGES; + ei_status.stop_page = NE3210_STOP_PG; + ei_status.word16 = 1; + ei_status.priv = phys_mem; + + if (ei_debug > 0) + printk("ne3210 loaded.\n"); + + ei_status.reset_8390 = &ne3210_reset_8390; + ei_status.block_input = &ne3210_block_input; + ei_status.block_output = &ne3210_block_output; + ei_status.get_8390_hdr = &ne3210_get_8390_hdr; + + dev->netdev_ops = &ei_netdev_ops; + + dev->if_port = ifmap_val[port_index]; + + if ((retval = register_netdev (dev))) + goto out5; + + NS8390_init(dev, 0); + return 0; + + out5: + iounmap(ei_status.mem); + out4: + release_mem_region (phys_mem, NE3210_STOP_PG*0x100); + out3: + free_irq (dev->irq, dev); + out2: + release_region (ioaddr + NE3210_CFG1, NE3210_CFG_EXTENT); + out1: + release_region (ioaddr, NE3210_IO_EXTENT); + out: + free_netdev (dev); + + return retval; +} + +static int __devexit ne3210_eisa_remove (struct device *device) +{ + struct net_device *dev = dev_get_drvdata(device); + unsigned long ioaddr = to_eisa_device (device)->base_addr; + + unregister_netdev (dev); + iounmap(ei_status.mem); + release_mem_region (ei_status.priv, NE3210_STOP_PG*0x100); + free_irq (dev->irq, dev); + release_region (ioaddr + NE3210_CFG1, NE3210_CFG_EXTENT); + release_region (ioaddr, NE3210_IO_EXTENT); + free_netdev (dev); + + return 0; +} + +/* + * Reset by toggling the "Board Enable" bits (bit 2 and 0). + */ + +static void ne3210_reset_8390(struct net_device *dev) +{ + unsigned short ioaddr = dev->base_addr; + + outb(0x04, ioaddr + NE3210_RESET_PORT); + if (ei_debug > 1) printk("%s: resetting the NE3210...", dev->name); + + mdelay(2); + + ei_status.txing = 0; + outb(0x01, ioaddr + NE3210_RESET_PORT); + if (ei_debug > 1) printk("reset done\n"); +} + +/* + * Note: In the following three functions is the implicit assumption + * that the associated memcpy will only use "rep; movsl" as long as + * we keep the counts as some multiple of doublewords. This is a + * requirement of the hardware, and also prevents us from using + * eth_io_copy_and_sum() since we can't guarantee it will limit + * itself to doubleword access. + */ + +/* + * Grab the 8390 specific header. Similar to the block_input routine, but + * we don't need to be concerned with ring wrap as the header will be at + * the start of a page, so we optimize accordingly. (A single doubleword.) + */ + +static void +ne3210_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) +{ + void __iomem *hdr_start = ei_status.mem + ((ring_page - NE3210_START_PG)<<8); + memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); + hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */ +} + +/* + * Block input and output are easy on shared memory ethercards, the only + * complication is when the ring buffer wraps. The count will already + * be rounded up to a doubleword value via ne3210_get_8390_hdr() above. + */ + +static void ne3210_block_input(struct net_device *dev, int count, struct sk_buff *skb, + int ring_offset) +{ + void __iomem *start = ei_status.mem + ring_offset - NE3210_START_PG*256; + + if (ring_offset + count > NE3210_STOP_PG*256) { + /* Packet wraps over end of ring buffer. */ + int semi_count = NE3210_STOP_PG*256 - ring_offset; + memcpy_fromio(skb->data, start, semi_count); + count -= semi_count; + memcpy_fromio(skb->data + semi_count, + ei_status.mem + TX_PAGES*256, count); + } else { + /* Packet is in one chunk. */ + memcpy_fromio(skb->data, start, count); + } +} + +static void ne3210_block_output(struct net_device *dev, int count, + const unsigned char *buf, int start_page) +{ + void __iomem *shmem = ei_status.mem + ((start_page - NE3210_START_PG)<<8); + + count = (count + 3) & ~3; /* Round up to doubleword */ + memcpy_toio(shmem, buf, count); +} + +static struct eisa_device_id ne3210_ids[] = { + { "EGL0101" }, + { "NVL1801" }, + { "" }, +}; +MODULE_DEVICE_TABLE(eisa, ne3210_ids); + +static struct eisa_driver ne3210_eisa_driver = { + .id_table = ne3210_ids, + .driver = { + .name = "ne3210", + .probe = ne3210_eisa_probe, + .remove = __devexit_p (ne3210_eisa_remove), + }, +}; + +MODULE_DESCRIPTION("NE3210 EISA Ethernet driver"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(eisa, ne3210_ids); + +static int ne3210_init(void) +{ + return eisa_driver_register (&ne3210_eisa_driver); +} + +static void ne3210_cleanup(void) +{ + eisa_driver_unregister (&ne3210_eisa_driver); +} + +module_init (ne3210_init); +module_exit (ne3210_cleanup); diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c new file mode 100644 index 0000000..4010761 --- /dev/null +++ b/drivers/net/ethernet/8390/pcnet_cs.c @@ -0,0 +1,1710 @@ +/*====================================================================== + + A PCMCIA ethernet driver for NS8390-based cards + + This driver supports the D-Link DE-650 and Linksys EthernetCard + cards, the newer D-Link and Linksys combo cards, Accton EN2212 + cards, the RPTI EP400, and the PreMax PE-200 in non-shared-memory + mode, and the IBM Credit Card Adapter, the NE4100, the Thomas + Conrad ethernet card, and the Kingston KNE-PCM/x in shared-memory + mode. It will also handle the Socket EA card in either mode. + + Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net + + pcnet_cs.c 1.153 2003/11/09 18:53:09 + + The network driver code is based on Donald Becker's NE2000 code: + + Written 1992,1993 by Donald Becker. + Copyright 1993 United States Government as represented by the + Director, National Security Agency. This software may be used and + distributed according to the terms of the GNU General Public License, + incorporated herein by reference. + Donald Becker may be reached at becker@scyld.com + + Based also on Keith Moore's changes to Don Becker's code, for IBM + CCAE support. Drivers merged back together, and shared-memory + Socket EA support added, by Ken Raeburn, September 1995. + +======================================================================*/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "8390.h" + +#include +#include +#include +#include + +#include +#include +#include +#include + +#define PCNET_CMD 0x00 +#define PCNET_DATAPORT 0x10 /* NatSemi-defined port window offset. */ +#define PCNET_RESET 0x1f /* Issue a read to reset, a write to clear. */ +#define PCNET_MISC 0x18 /* For IBM CCAE and Socket EA cards */ + +#define PCNET_START_PG 0x40 /* First page of TX buffer */ +#define PCNET_STOP_PG 0x80 /* Last page +1 of RX ring */ + +/* Socket EA cards have a larger packet buffer */ +#define SOCKET_START_PG 0x01 +#define SOCKET_STOP_PG 0xff + +#define PCNET_RDC_TIMEOUT (2*HZ/100) /* Max wait in jiffies for Tx RDC */ + +static const char *if_names[] = { "auto", "10baseT", "10base2"}; + + +/*====================================================================*/ + +/* Module parameters */ + +MODULE_AUTHOR("David Hinds "); +MODULE_DESCRIPTION("NE2000 compatible PCMCIA ethernet driver"); +MODULE_LICENSE("GPL"); + +#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) + +INT_MODULE_PARM(if_port, 1); /* Transceiver type */ +INT_MODULE_PARM(use_big_buf, 1); /* use 64K packet buffer? */ +INT_MODULE_PARM(mem_speed, 0); /* shared mem speed, in ns */ +INT_MODULE_PARM(delay_output, 0); /* pause after xmit? */ +INT_MODULE_PARM(delay_time, 4); /* in usec */ +INT_MODULE_PARM(use_shmem, -1); /* use shared memory? */ +INT_MODULE_PARM(full_duplex, 0); /* full duplex? */ + +/* Ugh! Let the user hardwire the hardware address for queer cards */ +static int hw_addr[6] = { 0, /* ... */ }; +module_param_array(hw_addr, int, NULL, 0); + +/*====================================================================*/ + +static void mii_phy_probe(struct net_device *dev); +static int pcnet_config(struct pcmcia_device *link); +static void pcnet_release(struct pcmcia_device *link); +static int pcnet_open(struct net_device *dev); +static int pcnet_close(struct net_device *dev); +static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static irqreturn_t ei_irq_wrapper(int irq, void *dev_id); +static void ei_watchdog(u_long arg); +static void pcnet_reset_8390(struct net_device *dev); +static int set_config(struct net_device *dev, struct ifmap *map); +static int setup_shmem_window(struct pcmcia_device *link, int start_pg, + int stop_pg, int cm_offset); +static int setup_dma_config(struct pcmcia_device *link, int start_pg, + int stop_pg); + +static void pcnet_detach(struct pcmcia_device *p_dev); + +/*====================================================================*/ + +typedef struct hw_info_t { + u_int offset; + u_char a0, a1, a2; + u_int flags; +} hw_info_t; + +#define DELAY_OUTPUT 0x01 +#define HAS_MISC_REG 0x02 +#define USE_BIG_BUF 0x04 +#define HAS_IBM_MISC 0x08 +#define IS_DL10019 0x10 +#define IS_DL10022 0x20 +#define HAS_MII 0x40 +#define USE_SHMEM 0x80 /* autodetected */ + +#define AM79C9XX_HOME_PHY 0x00006B90 /* HomePNA PHY */ +#define AM79C9XX_ETH_PHY 0x00006B70 /* 10baseT PHY */ +#define MII_PHYID_REV_MASK 0xfffffff0 +#define MII_PHYID_REG1 0x02 +#define MII_PHYID_REG2 0x03 + +static hw_info_t hw_info[] = { + { /* Accton EN2212 */ 0x0ff0, 0x00, 0x00, 0xe8, DELAY_OUTPUT }, + { /* Allied Telesis LA-PCM */ 0x0ff0, 0x00, 0x00, 0xf4, 0 }, + { /* APEX MultiCard */ 0x03f4, 0x00, 0x20, 0xe5, 0 }, + { /* ASANTE FriendlyNet */ 0x4910, 0x00, 0x00, 0x94, + DELAY_OUTPUT | HAS_IBM_MISC }, + { /* Danpex EN-6200P2 */ 0x0110, 0x00, 0x40, 0xc7, 0 }, + { /* DataTrek NetCard */ 0x0ff0, 0x00, 0x20, 0xe8, 0 }, + { /* Dayna CommuniCard E */ 0x0110, 0x00, 0x80, 0x19, 0 }, + { /* D-Link DE-650 */ 0x0040, 0x00, 0x80, 0xc8, 0 }, + { /* EP-210 Ethernet */ 0x0110, 0x00, 0x40, 0x33, 0 }, + { /* EP4000 Ethernet */ 0x01c0, 0x00, 0x00, 0xb4, 0 }, + { /* Epson EEN10B */ 0x0ff0, 0x00, 0x00, 0x48, + HAS_MISC_REG | HAS_IBM_MISC }, + { /* ELECOM Laneed LD-CDWA */ 0xb8, 0x08, 0x00, 0x42, 0 }, + { /* Hypertec Ethernet */ 0x01c0, 0x00, 0x40, 0x4c, 0 }, + { /* IBM CCAE */ 0x0ff0, 0x08, 0x00, 0x5a, + HAS_MISC_REG | HAS_IBM_MISC }, + { /* IBM CCAE */ 0x0ff0, 0x00, 0x04, 0xac, + HAS_MISC_REG | HAS_IBM_MISC }, + { /* IBM CCAE */ 0x0ff0, 0x00, 0x06, 0x29, + HAS_MISC_REG | HAS_IBM_MISC }, + { /* IBM FME */ 0x0374, 0x08, 0x00, 0x5a, + HAS_MISC_REG | HAS_IBM_MISC }, + { /* IBM FME */ 0x0374, 0x00, 0x04, 0xac, + HAS_MISC_REG | HAS_IBM_MISC }, + { /* Kansai KLA-PCM/T */ 0x0ff0, 0x00, 0x60, 0x87, + HAS_MISC_REG | HAS_IBM_MISC }, + { /* NSC DP83903 */ 0x0374, 0x08, 0x00, 0x17, + HAS_MISC_REG | HAS_IBM_MISC }, + { /* NSC DP83903 */ 0x0374, 0x00, 0xc0, 0xa8, + HAS_MISC_REG | HAS_IBM_MISC }, + { /* NSC DP83903 */ 0x0374, 0x00, 0xa0, 0xb0, + HAS_MISC_REG | HAS_IBM_MISC }, + { /* NSC DP83903 */ 0x0198, 0x00, 0x20, 0xe0, + HAS_MISC_REG | HAS_IBM_MISC }, + { /* I-O DATA PCLA/T */ 0x0ff0, 0x00, 0xa0, 0xb0, 0 }, + { /* Katron PE-520 */ 0x0110, 0x00, 0x40, 0xf6, 0 }, + { /* Kingston KNE-PCM/x */ 0x0ff0, 0x00, 0xc0, 0xf0, + HAS_MISC_REG | HAS_IBM_MISC }, + { /* Kingston KNE-PCM/x */ 0x0ff0, 0xe2, 0x0c, 0x0f, + HAS_MISC_REG | HAS_IBM_MISC }, + { /* Kingston KNE-PC2 */ 0x0180, 0x00, 0xc0, 0xf0, 0 }, + { /* Maxtech PCN2000 */ 0x5000, 0x00, 0x00, 0xe8, 0 }, + { /* NDC Instant-Link */ 0x003a, 0x00, 0x80, 0xc6, 0 }, + { /* NE2000 Compatible */ 0x0ff0, 0x00, 0xa0, 0x0c, 0 }, + { /* Network General Sniffer */ 0x0ff0, 0x00, 0x00, 0x65, + HAS_MISC_REG | HAS_IBM_MISC }, + { /* Panasonic VEL211 */ 0x0ff0, 0x00, 0x80, 0x45, + HAS_MISC_REG | HAS_IBM_MISC }, + { /* PreMax PE-200 */ 0x07f0, 0x00, 0x20, 0xe0, 0 }, + { /* RPTI EP400 */ 0x0110, 0x00, 0x40, 0x95, 0 }, + { /* SCM Ethernet */ 0x0ff0, 0x00, 0x20, 0xcb, 0 }, + { /* Socket EA */ 0x4000, 0x00, 0xc0, 0x1b, + DELAY_OUTPUT | HAS_MISC_REG | USE_BIG_BUF }, + { /* Socket LP-E CF+ */ 0x01c0, 0x00, 0xc0, 0x1b, 0 }, + { /* SuperSocket RE450T */ 0x0110, 0x00, 0xe0, 0x98, 0 }, + { /* Volktek NPL-402CT */ 0x0060, 0x00, 0x40, 0x05, 0 }, + { /* NEC PC-9801N-J12 */ 0x0ff0, 0x00, 0x00, 0x4c, 0 }, + { /* PCMCIA Technology OEM */ 0x01c8, 0x00, 0xa0, 0x0c, 0 } +}; + +#define NR_INFO ARRAY_SIZE(hw_info) + +static hw_info_t default_info = { 0, 0, 0, 0, 0 }; +static hw_info_t dl10019_info = { 0, 0, 0, 0, IS_DL10019|HAS_MII }; +static hw_info_t dl10022_info = { 0, 0, 0, 0, IS_DL10022|HAS_MII }; + +typedef struct pcnet_dev_t { + struct pcmcia_device *p_dev; + u_int flags; + void __iomem *base; + struct timer_list watchdog; + int stale, fast_poll; + u_char phy_id; + u_char eth_phy, pna_phy; + u_short link_status; + u_long mii_reset; +} pcnet_dev_t; + +static inline pcnet_dev_t *PRIV(struct net_device *dev) +{ + char *p = netdev_priv(dev); + return (pcnet_dev_t *)(p + sizeof(struct ei_device)); +} + +static const struct net_device_ops pcnet_netdev_ops = { + .ndo_open = pcnet_open, + .ndo_stop = pcnet_close, + .ndo_set_config = set_config, + .ndo_start_xmit = ei_start_xmit, + .ndo_get_stats = ei_get_stats, + .ndo_do_ioctl = ei_ioctl, + .ndo_set_multicast_list = ei_set_multicast_list, + .ndo_tx_timeout = ei_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ei_poll, +#endif +}; + +static int pcnet_probe(struct pcmcia_device *link) +{ + pcnet_dev_t *info; + struct net_device *dev; + + dev_dbg(&link->dev, "pcnet_attach()\n"); + + /* Create new ethernet device */ + dev = __alloc_ei_netdev(sizeof(pcnet_dev_t)); + if (!dev) return -ENOMEM; + info = PRIV(dev); + info->p_dev = link; + link->priv = dev; + + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; + + dev->netdev_ops = &pcnet_netdev_ops; + + return pcnet_config(link); +} /* pcnet_attach */ + +static void pcnet_detach(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + dev_dbg(&link->dev, "pcnet_detach\n"); + + unregister_netdev(dev); + + pcnet_release(link); + + free_netdev(dev); +} /* pcnet_detach */ + +/*====================================================================== + + This probes for a card's hardware address, for card types that + encode this information in their CIS. + +======================================================================*/ + +static hw_info_t *get_hwinfo(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + u_char __iomem *base, *virt; + int i, j; + + /* Allocate a small memory window */ + link->resource[2]->flags |= WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; + link->resource[2]->start = 0; link->resource[2]->end = 0; + i = pcmcia_request_window(link, link->resource[2], 0); + if (i != 0) + return NULL; + + virt = ioremap(link->resource[2]->start, + resource_size(link->resource[2])); + for (i = 0; i < NR_INFO; i++) { + pcmcia_map_mem_page(link, link->resource[2], + hw_info[i].offset & ~(resource_size(link->resource[2])-1)); + base = &virt[hw_info[i].offset & (resource_size(link->resource[2])-1)]; + if ((readb(base+0) == hw_info[i].a0) && + (readb(base+2) == hw_info[i].a1) && + (readb(base+4) == hw_info[i].a2)) { + for (j = 0; j < 6; j++) + dev->dev_addr[j] = readb(base + (j<<1)); + break; + } + } + + iounmap(virt); + j = pcmcia_release_window(link, link->resource[2]); + return (i < NR_INFO) ? hw_info+i : NULL; +} /* get_hwinfo */ + +/*====================================================================== + + This probes for a card's hardware address by reading the PROM. + It checks the address against a list of known types, then falls + back to a simple NE2000 clone signature check. + +======================================================================*/ + +static hw_info_t *get_prom(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + unsigned int ioaddr = dev->base_addr; + u_char prom[32]; + int i, j; + + /* This is lifted straight from drivers/net/ne.c */ + struct { + u_char value, offset; + } program_seq[] = { + {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/ + {0x48, EN0_DCFG}, /* Set byte-wide (0x48) access. */ + {0x00, EN0_RCNTLO}, /* Clear the count regs. */ + {0x00, EN0_RCNTHI}, + {0x00, EN0_IMR}, /* Mask completion irq. */ + {0xFF, EN0_ISR}, + {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */ + {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */ + {32, EN0_RCNTLO}, + {0x00, EN0_RCNTHI}, + {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */ + {0x00, EN0_RSARHI}, + {E8390_RREAD+E8390_START, E8390_CMD}, + }; + + pcnet_reset_8390(dev); + mdelay(10); + + for (i = 0; i < ARRAY_SIZE(program_seq); i++) + outb_p(program_seq[i].value, ioaddr + program_seq[i].offset); + + for (i = 0; i < 32; i++) + prom[i] = inb(ioaddr + PCNET_DATAPORT); + for (i = 0; i < NR_INFO; i++) { + if ((prom[0] == hw_info[i].a0) && + (prom[2] == hw_info[i].a1) && + (prom[4] == hw_info[i].a2)) + break; + } + if ((i < NR_INFO) || ((prom[28] == 0x57) && (prom[30] == 0x57))) { + for (j = 0; j < 6; j++) + dev->dev_addr[j] = prom[j<<1]; + return (i < NR_INFO) ? hw_info+i : &default_info; + } + return NULL; +} /* get_prom */ + +/*====================================================================== + + For DL10019 based cards, like the Linksys EtherFast + +======================================================================*/ + +static hw_info_t *get_dl10019(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + int i; + u_char sum; + + for (sum = 0, i = 0x14; i < 0x1c; i++) + sum += inb_p(dev->base_addr + i); + if (sum != 0xff) + return NULL; + for (i = 0; i < 6; i++) + dev->dev_addr[i] = inb_p(dev->base_addr + 0x14 + i); + i = inb(dev->base_addr + 0x1f); + return ((i == 0x91)||(i == 0x99)) ? &dl10022_info : &dl10019_info; +} + +/*====================================================================== + + For Asix AX88190 based cards + +======================================================================*/ + +static hw_info_t *get_ax88190(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + unsigned int ioaddr = dev->base_addr; + int i, j; + + /* Not much of a test, but the alternatives are messy */ + if (link->config_base != 0x03c0) + return NULL; + + outb_p(0x01, ioaddr + EN0_DCFG); /* Set word-wide access. */ + outb_p(0x00, ioaddr + EN0_RSARLO); /* DMA starting at 0x0400. */ + outb_p(0x04, ioaddr + EN0_RSARHI); + outb_p(E8390_RREAD+E8390_START, ioaddr + E8390_CMD); + + for (i = 0; i < 6; i += 2) { + j = inw(ioaddr + PCNET_DATAPORT); + dev->dev_addr[i] = j & 0xff; + dev->dev_addr[i+1] = j >> 8; + } + return NULL; +} + +/*====================================================================== + + This should be totally unnecessary... but when we can't figure + out the hardware address any other way, we'll let the user hard + wire it when the module is initialized. + +======================================================================*/ + +static hw_info_t *get_hwired(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + int i; + + for (i = 0; i < 6; i++) + if (hw_addr[i] != 0) break; + if (i == 6) + return NULL; + + for (i = 0; i < 6; i++) + dev->dev_addr[i] = hw_addr[i]; + + return &default_info; +} /* get_hwired */ + +static int try_io_port(struct pcmcia_device *link) +{ + int j, ret; + link->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + link->resource[1]->flags &= ~IO_DATA_PATH_WIDTH; + if (link->resource[0]->end == 32) { + link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; + if (link->resource[1]->end > 0) { + /* for master/slave multifunction cards */ + link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; + } + } else { + /* This should be two 16-port windows */ + link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; + link->resource[1]->flags |= IO_DATA_PATH_WIDTH_16; + } + if (link->resource[0]->start == 0) { + for (j = 0; j < 0x400; j += 0x20) { + link->resource[0]->start = j ^ 0x300; + link->resource[1]->start = (j ^ 0x300) + 0x10; + link->io_lines = 16; + ret = pcmcia_request_io(link); + if (ret == 0) + return ret; + } + return ret; + } else { + return pcmcia_request_io(link); + } +} + +static int pcnet_confcheck(struct pcmcia_device *p_dev, void *priv_data) +{ + int *priv = priv_data; + int try = (*priv & 0x1); + + *priv &= (p_dev->resource[2]->end >= 0x4000) ? 0x10 : ~0x10; + + if (p_dev->config_index == 0) + return -EINVAL; + + if (p_dev->resource[0]->end + p_dev->resource[1]->end < 32) + return -EINVAL; + + if (try) + p_dev->io_lines = 16; + return try_io_port(p_dev); +} + +static hw_info_t *pcnet_try_config(struct pcmcia_device *link, + int *has_shmem, int try) +{ + struct net_device *dev = link->priv; + hw_info_t *local_hw_info; + pcnet_dev_t *info = PRIV(dev); + int priv = try; + int ret; + + ret = pcmcia_loop_config(link, pcnet_confcheck, &priv); + if (ret) { + dev_warn(&link->dev, "no useable port range found\n"); + return NULL; + } + *has_shmem = (priv & 0x10); + + if (!link->irq) + return NULL; + + if (resource_size(link->resource[1]) == 8) + link->config_flags |= CONF_ENABLE_SPKR; + + if ((link->manf_id == MANFID_IBM) && + (link->card_id == PRODID_IBM_HOME_AND_AWAY)) + link->config_index |= 0x10; + + ret = pcmcia_enable_device(link); + if (ret) + return NULL; + + dev->irq = link->irq; + dev->base_addr = link->resource[0]->start; + + if (info->flags & HAS_MISC_REG) { + if ((if_port == 1) || (if_port == 2)) + dev->if_port = if_port; + else + dev_notice(&link->dev, "invalid if_port requested\n"); + } else + dev->if_port = 0; + + if ((link->config_base == 0x03c0) && + (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) { + dev_info(&link->dev, + "this is an AX88190 card - use axnet_cs instead.\n"); + return NULL; + } + + local_hw_info = get_hwinfo(link); + if (!local_hw_info) + local_hw_info = get_prom(link); + if (!local_hw_info) + local_hw_info = get_dl10019(link); + if (!local_hw_info) + local_hw_info = get_ax88190(link); + if (!local_hw_info) + local_hw_info = get_hwired(link); + + return local_hw_info; +} + +static int pcnet_config(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + pcnet_dev_t *info = PRIV(dev); + int start_pg, stop_pg, cm_offset; + int has_shmem = 0; + hw_info_t *local_hw_info; + + dev_dbg(&link->dev, "pcnet_config\n"); + + local_hw_info = pcnet_try_config(link, &has_shmem, 0); + if (!local_hw_info) { + /* check whether forcing io_lines to 16 helps... */ + pcmcia_disable_device(link); + local_hw_info = pcnet_try_config(link, &has_shmem, 1); + if (local_hw_info == NULL) { + dev_notice(&link->dev, "unable to read hardware net" + " address for io base %#3lx\n", dev->base_addr); + goto failed; + } + } + + info->flags = local_hw_info->flags; + /* Check for user overrides */ + info->flags |= (delay_output) ? DELAY_OUTPUT : 0; + if ((link->manf_id == MANFID_SOCKET) && + ((link->card_id == PRODID_SOCKET_LPE) || + (link->card_id == PRODID_SOCKET_LPE_CF) || + (link->card_id == PRODID_SOCKET_EIO))) + info->flags &= ~USE_BIG_BUF; + if (!use_big_buf) + info->flags &= ~USE_BIG_BUF; + + if (info->flags & USE_BIG_BUF) { + start_pg = SOCKET_START_PG; + stop_pg = SOCKET_STOP_PG; + cm_offset = 0x10000; + } else { + start_pg = PCNET_START_PG; + stop_pg = PCNET_STOP_PG; + cm_offset = 0; + } + + /* has_shmem is ignored if use_shmem != -1 */ + if ((use_shmem == 0) || (!has_shmem && (use_shmem == -1)) || + (setup_shmem_window(link, start_pg, stop_pg, cm_offset) != 0)) + setup_dma_config(link, start_pg, stop_pg); + + ei_status.name = "NE2000"; + ei_status.word16 = 1; + ei_status.reset_8390 = pcnet_reset_8390; + + if (info->flags & (IS_DL10019|IS_DL10022)) + mii_phy_probe(dev); + + SET_NETDEV_DEV(dev, &link->dev); + + if (register_netdev(dev) != 0) { + pr_notice("register_netdev() failed\n"); + goto failed; + } + + if (info->flags & (IS_DL10019|IS_DL10022)) { + u_char id = inb(dev->base_addr + 0x1a); + netdev_info(dev, "NE2000 (DL100%d rev %02x): ", + (info->flags & IS_DL10022) ? 22 : 19, id); + if (info->pna_phy) + pr_cont("PNA, "); + } else { + netdev_info(dev, "NE2000 Compatible: "); + } + pr_cont("io %#3lx, irq %d,", dev->base_addr, dev->irq); + if (info->flags & USE_SHMEM) + pr_cont(" mem %#5lx,", dev->mem_start); + if (info->flags & HAS_MISC_REG) + pr_cont(" %s xcvr,", if_names[dev->if_port]); + pr_cont(" hw_addr %pM\n", dev->dev_addr); + return 0; + +failed: + pcnet_release(link); + return -ENODEV; +} /* pcnet_config */ + +static void pcnet_release(struct pcmcia_device *link) +{ + pcnet_dev_t *info = PRIV(link->priv); + + dev_dbg(&link->dev, "pcnet_release\n"); + + if (info->flags & USE_SHMEM) + iounmap(info->base); + + pcmcia_disable_device(link); +} + +static int pcnet_suspend(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + if (link->open) + netif_device_detach(dev); + + return 0; +} + +static int pcnet_resume(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + if (link->open) { + pcnet_reset_8390(dev); + NS8390_init(dev, 1); + netif_device_attach(dev); + } + + return 0; +} + + +/*====================================================================== + + MII interface support for DL10019 and DL10022 based cards + + On the DL10019, the MII IO direction bit is 0x10; on the DL10022 + it is 0x20. Setting both bits seems to work on both card types. + +======================================================================*/ + +#define DLINK_GPIO 0x1c +#define DLINK_DIAG 0x1d +#define DLINK_EEPROM 0x1e + +#define MDIO_SHIFT_CLK 0x80 +#define MDIO_DATA_OUT 0x40 +#define MDIO_DIR_WRITE 0x30 +#define MDIO_DATA_WRITE0 (MDIO_DIR_WRITE) +#define MDIO_DATA_WRITE1 (MDIO_DIR_WRITE | MDIO_DATA_OUT) +#define MDIO_DATA_READ 0x10 +#define MDIO_MASK 0x0f + +static void mdio_sync(unsigned int addr) +{ + int bits, mask = inb(addr) & MDIO_MASK; + for (bits = 0; bits < 32; bits++) { + outb(mask | MDIO_DATA_WRITE1, addr); + outb(mask | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, addr); + } +} + +static int mdio_read(unsigned int addr, int phy_id, int loc) +{ + u_int cmd = (0x06<<10)|(phy_id<<5)|loc; + int i, retval = 0, mask = inb(addr) & MDIO_MASK; + + mdio_sync(addr); + for (i = 13; i >= 0; i--) { + int dat = (cmd&(1< 0; i--) { + outb(mask, addr); + retval = (retval << 1) | ((inb(addr) & MDIO_DATA_READ) != 0); + outb(mask | MDIO_SHIFT_CLK, addr); + } + return (retval>>1) & 0xffff; +} + +static void mdio_write(unsigned int addr, int phy_id, int loc, int value) +{ + u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value; + int i, mask = inb(addr) & MDIO_MASK; + + mdio_sync(addr); + for (i = 31; i >= 0; i--) { + int dat = (cmd&(1<= 0; i--) { + outb(mask, addr); + outb(mask | MDIO_SHIFT_CLK, addr); + } +} + +/*====================================================================== + + EEPROM access routines for DL10019 and DL10022 based cards + +======================================================================*/ + +#define EE_EEP 0x40 +#define EE_ASIC 0x10 +#define EE_CS 0x08 +#define EE_CK 0x04 +#define EE_DO 0x02 +#define EE_DI 0x01 +#define EE_ADOT 0x01 /* DataOut for ASIC */ +#define EE_READ_CMD 0x06 + +#define DL19FDUPLX 0x0400 /* DL10019 Full duplex mode */ + +static int read_eeprom(unsigned int ioaddr, int location) +{ + int i, retval = 0; + unsigned int ee_addr = ioaddr + DLINK_EEPROM; + int read_cmd = location | (EE_READ_CMD << 8); + + outb(0, ee_addr); + outb(EE_EEP|EE_CS, ee_addr); + + /* Shift the read command bits out. */ + for (i = 10; i >= 0; i--) { + short dataval = (read_cmd & (1 << i)) ? EE_DO : 0; + outb_p(EE_EEP|EE_CS|dataval, ee_addr); + outb_p(EE_EEP|EE_CS|dataval|EE_CK, ee_addr); + } + outb(EE_EEP|EE_CS, ee_addr); + + for (i = 16; i > 0; i--) { + outb_p(EE_EEP|EE_CS | EE_CK, ee_addr); + retval = (retval << 1) | ((inb(ee_addr) & EE_DI) ? 1 : 0); + outb_p(EE_EEP|EE_CS, ee_addr); + } + + /* Terminate the EEPROM access. */ + outb(0, ee_addr); + return retval; +} + +/* + The internal ASIC registers can be changed by EEPROM READ access + with EE_ASIC bit set. + In ASIC mode, EE_ADOT is used to output the data to the ASIC. +*/ + +static void write_asic(unsigned int ioaddr, int location, short asic_data) +{ + int i; + unsigned int ee_addr = ioaddr + DLINK_EEPROM; + short dataval; + int read_cmd = location | (EE_READ_CMD << 8); + + asic_data |= read_eeprom(ioaddr, location); + + outb(0, ee_addr); + outb(EE_ASIC|EE_CS|EE_DI, ee_addr); + + read_cmd = read_cmd >> 1; + + /* Shift the read command bits out. */ + for (i = 9; i >= 0; i--) { + dataval = (read_cmd & (1 << i)) ? EE_DO : 0; + outb_p(EE_ASIC|EE_CS|EE_DI|dataval, ee_addr); + outb_p(EE_ASIC|EE_CS|EE_DI|dataval|EE_CK, ee_addr); + outb_p(EE_ASIC|EE_CS|EE_DI|dataval, ee_addr); + } + // sync + outb(EE_ASIC|EE_CS, ee_addr); + outb(EE_ASIC|EE_CS|EE_CK, ee_addr); + outb(EE_ASIC|EE_CS, ee_addr); + + for (i = 15; i >= 0; i--) { + dataval = (asic_data & (1 << i)) ? EE_ADOT : 0; + outb_p(EE_ASIC|EE_CS|dataval, ee_addr); + outb_p(EE_ASIC|EE_CS|dataval|EE_CK, ee_addr); + outb_p(EE_ASIC|EE_CS|dataval, ee_addr); + } + + /* Terminate the ASIC access. */ + outb(EE_ASIC|EE_DI, ee_addr); + outb(EE_ASIC|EE_DI| EE_CK, ee_addr); + outb(EE_ASIC|EE_DI, ee_addr); + + outb(0, ee_addr); +} + +/*====================================================================*/ + +static void set_misc_reg(struct net_device *dev) +{ + unsigned int nic_base = dev->base_addr; + pcnet_dev_t *info = PRIV(dev); + u_char tmp; + + if (info->flags & HAS_MISC_REG) { + tmp = inb_p(nic_base + PCNET_MISC) & ~3; + if (dev->if_port == 2) + tmp |= 1; + if (info->flags & USE_BIG_BUF) + tmp |= 2; + if (info->flags & HAS_IBM_MISC) + tmp |= 8; + outb_p(tmp, nic_base + PCNET_MISC); + } + if (info->flags & IS_DL10022) { + if (info->flags & HAS_MII) { + /* Advertise 100F, 100H, 10F, 10H */ + mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 4, 0x01e1); + /* Restart MII autonegotiation */ + mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x0000); + mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x1200); + info->mii_reset = jiffies; + } else { + outb(full_duplex ? 4 : 0, nic_base + DLINK_DIAG); + } + } else if (info->flags & IS_DL10019) { + /* Advertise 100F, 100H, 10F, 10H */ + mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 4, 0x01e1); + /* Restart MII autonegotiation */ + mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x0000); + mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x1200); + } +} + +/*====================================================================*/ + +static void mii_phy_probe(struct net_device *dev) +{ + pcnet_dev_t *info = PRIV(dev); + unsigned int mii_addr = dev->base_addr + DLINK_GPIO; + int i; + u_int tmp, phyid; + + for (i = 31; i >= 0; i--) { + tmp = mdio_read(mii_addr, i, 1); + if ((tmp == 0) || (tmp == 0xffff)) + continue; + tmp = mdio_read(mii_addr, i, MII_PHYID_REG1); + phyid = tmp << 16; + phyid |= mdio_read(mii_addr, i, MII_PHYID_REG2); + phyid &= MII_PHYID_REV_MASK; + netdev_dbg(dev, "MII at %d is 0x%08x\n", i, phyid); + if (phyid == AM79C9XX_HOME_PHY) { + info->pna_phy = i; + } else if (phyid != AM79C9XX_ETH_PHY) { + info->eth_phy = i; + } + } +} + +static int pcnet_open(struct net_device *dev) +{ + int ret; + pcnet_dev_t *info = PRIV(dev); + struct pcmcia_device *link = info->p_dev; + unsigned int nic_base = dev->base_addr; + + dev_dbg(&link->dev, "pcnet_open('%s')\n", dev->name); + + if (!pcmcia_dev_present(link)) + return -ENODEV; + + set_misc_reg(dev); + + outb_p(0xFF, nic_base + EN0_ISR); /* Clear bogus intr. */ + ret = request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, dev->name, dev); + if (ret) + return ret; + + link->open++; + + info->phy_id = info->eth_phy; + info->link_status = 0x00; + init_timer(&info->watchdog); + info->watchdog.function = ei_watchdog; + info->watchdog.data = (u_long)dev; + info->watchdog.expires = jiffies + HZ; + add_timer(&info->watchdog); + + return ei_open(dev); +} /* pcnet_open */ + +/*====================================================================*/ + +static int pcnet_close(struct net_device *dev) +{ + pcnet_dev_t *info = PRIV(dev); + struct pcmcia_device *link = info->p_dev; + + dev_dbg(&link->dev, "pcnet_close('%s')\n", dev->name); + + ei_close(dev); + free_irq(dev->irq, dev); + + link->open--; + netif_stop_queue(dev); + del_timer_sync(&info->watchdog); + + return 0; +} /* pcnet_close */ + +/*====================================================================== + + Hard reset the card. This used to pause for the same period that + a 8390 reset command required, but that shouldn't be necessary. + +======================================================================*/ + +static void pcnet_reset_8390(struct net_device *dev) +{ + unsigned int nic_base = dev->base_addr; + int i; + + ei_status.txing = ei_status.dmaing = 0; + + outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, nic_base + E8390_CMD); + + outb(inb(nic_base + PCNET_RESET), nic_base + PCNET_RESET); + + for (i = 0; i < 100; i++) { + if ((inb_p(nic_base+EN0_ISR) & ENISR_RESET) != 0) + break; + udelay(100); + } + outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */ + + if (i == 100) + netdev_err(dev, "pcnet_reset_8390() did not complete.\n"); + + set_misc_reg(dev); + +} /* pcnet_reset_8390 */ + +/*====================================================================*/ + +static int set_config(struct net_device *dev, struct ifmap *map) +{ + pcnet_dev_t *info = PRIV(dev); + if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { + if (!(info->flags & HAS_MISC_REG)) + return -EOPNOTSUPP; + else if ((map->port < 1) || (map->port > 2)) + return -EINVAL; + dev->if_port = map->port; + netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]); + NS8390_init(dev, 1); + } + return 0; +} + +/*====================================================================*/ + +static irqreturn_t ei_irq_wrapper(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + pcnet_dev_t *info; + irqreturn_t ret = ei_interrupt(irq, dev_id); + + if (ret == IRQ_HANDLED) { + info = PRIV(dev); + info->stale = 0; + } + return ret; +} + +static void ei_watchdog(u_long arg) +{ + struct net_device *dev = (struct net_device *)arg; + pcnet_dev_t *info = PRIV(dev); + unsigned int nic_base = dev->base_addr; + unsigned int mii_addr = nic_base + DLINK_GPIO; + u_short link; + + if (!netif_device_present(dev)) goto reschedule; + + /* Check for pending interrupt with expired latency timer: with + this, we can limp along even if the interrupt is blocked */ + if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) { + if (!info->fast_poll) + netdev_info(dev, "interrupt(s) dropped!\n"); + ei_irq_wrapper(dev->irq, dev); + info->fast_poll = HZ; + } + if (info->fast_poll) { + info->fast_poll--; + info->watchdog.expires = jiffies + 1; + add_timer(&info->watchdog); + return; + } + + if (!(info->flags & HAS_MII)) + goto reschedule; + + mdio_read(mii_addr, info->phy_id, 1); + link = mdio_read(mii_addr, info->phy_id, 1); + if (!link || (link == 0xffff)) { + if (info->eth_phy) { + info->phy_id = info->eth_phy = 0; + } else { + netdev_info(dev, "MII is missing!\n"); + info->flags &= ~HAS_MII; + } + goto reschedule; + } + + link &= 0x0004; + if (link != info->link_status) { + u_short p = mdio_read(mii_addr, info->phy_id, 5); + netdev_info(dev, "%s link beat\n", link ? "found" : "lost"); + if (link && (info->flags & IS_DL10022)) { + /* Disable collision detection on full duplex links */ + outb((p & 0x0140) ? 4 : 0, nic_base + DLINK_DIAG); + } else if (link && (info->flags & IS_DL10019)) { + /* Disable collision detection on full duplex links */ + write_asic(dev->base_addr, 4, (p & 0x140) ? DL19FDUPLX : 0); + } + if (link) { + if (info->phy_id == info->eth_phy) { + if (p) + netdev_info(dev, "autonegotiation complete: " + "%sbaseT-%cD selected\n", + ((p & 0x0180) ? "100" : "10"), + ((p & 0x0140) ? 'F' : 'H')); + else + netdev_info(dev, "link partner did not autonegotiate\n"); + } + NS8390_init(dev, 1); + } + info->link_status = link; + } + if (info->pna_phy && time_after(jiffies, info->mii_reset + 6*HZ)) { + link = mdio_read(mii_addr, info->eth_phy, 1) & 0x0004; + if (((info->phy_id == info->pna_phy) && link) || + ((info->phy_id != info->pna_phy) && !link)) { + /* isolate this MII and try flipping to the other one */ + mdio_write(mii_addr, info->phy_id, 0, 0x0400); + info->phy_id ^= info->pna_phy ^ info->eth_phy; + netdev_info(dev, "switched to %s transceiver\n", + (info->phy_id == info->eth_phy) ? "ethernet" : "PNA"); + mdio_write(mii_addr, info->phy_id, 0, + (info->phy_id == info->eth_phy) ? 0x1000 : 0); + info->link_status = 0; + info->mii_reset = jiffies; + } + } + +reschedule: + info->watchdog.expires = jiffies + HZ; + add_timer(&info->watchdog); +} + +/*====================================================================*/ + + +static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + pcnet_dev_t *info = PRIV(dev); + struct mii_ioctl_data *data = if_mii(rq); + unsigned int mii_addr = dev->base_addr + DLINK_GPIO; + + if (!(info->flags & (IS_DL10019|IS_DL10022))) + return -EINVAL; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = info->phy_id; + case SIOCGMIIREG: /* Read MII PHY register. */ + data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f); + return 0; + case SIOCSMIIREG: /* Write MII PHY register. */ + mdio_write(mii_addr, data->phy_id, data->reg_num & 0x1f, data->val_in); + return 0; + } + return -EOPNOTSUPP; +} + +/*====================================================================*/ + +static void dma_get_8390_hdr(struct net_device *dev, + struct e8390_pkt_hdr *hdr, + int ring_page) +{ + unsigned int nic_base = dev->base_addr; + + if (ei_status.dmaing) { + netdev_notice(dev, "DMAing conflict in dma_block_input." + "[DMAstat:%1x][irqlock:%1x]\n", + ei_status.dmaing, ei_status.irqlock); + return; + } + + ei_status.dmaing |= 0x01; + outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + PCNET_CMD); + outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO); + outb_p(0, nic_base + EN0_RCNTHI); + outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */ + outb_p(ring_page, nic_base + EN0_RSARHI); + outb_p(E8390_RREAD+E8390_START, nic_base + PCNET_CMD); + + insw(nic_base + PCNET_DATAPORT, hdr, + sizeof(struct e8390_pkt_hdr)>>1); + /* Fix for big endian systems */ + hdr->count = le16_to_cpu(hdr->count); + + outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; +} + +/*====================================================================*/ + +static void dma_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset) +{ + unsigned int nic_base = dev->base_addr; + int xfer_count = count; + char *buf = skb->data; + + if ((ei_debug > 4) && (count != 4)) + netdev_dbg(dev, "[bi=%d]\n", count+4); + if (ei_status.dmaing) { + netdev_notice(dev, "DMAing conflict in dma_block_input." + "[DMAstat:%1x][irqlock:%1x]\n", + ei_status.dmaing, ei_status.irqlock); + return; + } + ei_status.dmaing |= 0x01; + outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + PCNET_CMD); + outb_p(count & 0xff, nic_base + EN0_RCNTLO); + outb_p(count >> 8, nic_base + EN0_RCNTHI); + outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO); + outb_p(ring_offset >> 8, nic_base + EN0_RSARHI); + outb_p(E8390_RREAD+E8390_START, nic_base + PCNET_CMD); + + insw(nic_base + PCNET_DATAPORT,buf,count>>1); + if (count & 0x01) + buf[count-1] = inb(nic_base + PCNET_DATAPORT), xfer_count++; + + /* This was for the ALPHA version only, but enough people have been + encountering problems that it is still here. */ +#ifdef PCMCIA_DEBUG + if (ei_debug > 4) { /* DMA termination address check... */ + int addr, tries = 20; + do { + /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here + -- it's broken for Rx on some cards! */ + int high = inb_p(nic_base + EN0_RSARHI); + int low = inb_p(nic_base + EN0_RSARLO); + addr = (high << 8) + low; + if (((ring_offset + xfer_count) & 0xff) == (addr & 0xff)) + break; + } while (--tries > 0); + if (tries <= 0) + netdev_notice(dev, "RX transfer address mismatch," + "%#4.4x (expected) vs. %#4.4x (actual).\n", + ring_offset + xfer_count, addr); + } +#endif + outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; +} /* dma_block_input */ + +/*====================================================================*/ + +static void dma_block_output(struct net_device *dev, int count, + const u_char *buf, const int start_page) +{ + unsigned int nic_base = dev->base_addr; + pcnet_dev_t *info = PRIV(dev); +#ifdef PCMCIA_DEBUG + int retries = 0; +#endif + u_long dma_start; + +#ifdef PCMCIA_DEBUG + if (ei_debug > 4) + netdev_dbg(dev, "[bo=%d]\n", count); +#endif + + /* Round the count up for word writes. Do we need to do this? + What effect will an odd byte count have on the 8390? + I should check someday. */ + if (count & 0x01) + count++; + if (ei_status.dmaing) { + netdev_notice(dev, "DMAing conflict in dma_block_output." + "[DMAstat:%1x][irqlock:%1x]\n", + ei_status.dmaing, ei_status.irqlock); + return; + } + ei_status.dmaing |= 0x01; + /* We should already be in page 0, but to be safe... */ + outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base+PCNET_CMD); + +#ifdef PCMCIA_DEBUG + retry: +#endif + + outb_p(ENISR_RDC, nic_base + EN0_ISR); + + /* Now the normal output. */ + outb_p(count & 0xff, nic_base + EN0_RCNTLO); + outb_p(count >> 8, nic_base + EN0_RCNTHI); + outb_p(0x00, nic_base + EN0_RSARLO); + outb_p(start_page, nic_base + EN0_RSARHI); + + outb_p(E8390_RWRITE+E8390_START, nic_base + PCNET_CMD); + outsw(nic_base + PCNET_DATAPORT, buf, count>>1); + + dma_start = jiffies; + +#ifdef PCMCIA_DEBUG + /* This was for the ALPHA version only, but enough people have been + encountering problems that it is still here. */ + if (ei_debug > 4) { /* DMA termination address check... */ + int addr, tries = 20; + do { + int high = inb_p(nic_base + EN0_RSARHI); + int low = inb_p(nic_base + EN0_RSARLO); + addr = (high << 8) + low; + if ((start_page << 8) + count == addr) + break; + } while (--tries > 0); + if (tries <= 0) { + netdev_notice(dev, "Tx packet transfer address mismatch," + "%#4.4x (expected) vs. %#4.4x (actual).\n", + (start_page << 8) + count, addr); + if (retries++ == 0) + goto retry; + } + } +#endif + + while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0) + if (time_after(jiffies, dma_start + PCNET_RDC_TIMEOUT)) { + netdev_notice(dev, "timeout waiting for Tx RDC.\n"); + pcnet_reset_8390(dev); + NS8390_init(dev, 1); + break; + } + + outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ + if (info->flags & DELAY_OUTPUT) + udelay((long)delay_time); + ei_status.dmaing &= ~0x01; +} + +/*====================================================================*/ + +static int setup_dma_config(struct pcmcia_device *link, int start_pg, + int stop_pg) +{ + struct net_device *dev = link->priv; + + ei_status.tx_start_page = start_pg; + ei_status.rx_start_page = start_pg + TX_PAGES; + ei_status.stop_page = stop_pg; + + /* set up block i/o functions */ + ei_status.get_8390_hdr = dma_get_8390_hdr; + ei_status.block_input = dma_block_input; + ei_status.block_output = dma_block_output; + + return 0; +} + +/*====================================================================*/ + +static void copyin(void *dest, void __iomem *src, int c) +{ + u_short *d = dest; + u_short __iomem *s = src; + int odd; + + if (c <= 0) + return; + odd = (c & 1); c >>= 1; + + if (c) { + do { *d++ = __raw_readw(s++); } while (--c); + } + /* get last byte by fetching a word and masking */ + if (odd) + *((u_char *)d) = readw(s) & 0xff; +} + +static void copyout(void __iomem *dest, const void *src, int c) +{ + u_short __iomem *d = dest; + const u_short *s = src; + int odd; + + if (c <= 0) + return; + odd = (c & 1); c >>= 1; + + if (c) { + do { __raw_writew(*s++, d++); } while (--c); + } + /* copy last byte doing a read-modify-write */ + if (odd) + writew((readw(d) & 0xff00) | *(u_char *)s, d); +} + +/*====================================================================*/ + +static void shmem_get_8390_hdr(struct net_device *dev, + struct e8390_pkt_hdr *hdr, + int ring_page) +{ + void __iomem *xfer_start = ei_status.mem + (TX_PAGES<<8) + + (ring_page << 8) + - (ei_status.rx_start_page << 8); + + copyin(hdr, xfer_start, sizeof(struct e8390_pkt_hdr)); + /* Fix for big endian systems */ + hdr->count = le16_to_cpu(hdr->count); +} + +/*====================================================================*/ + +static void shmem_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset) +{ + void __iomem *base = ei_status.mem; + unsigned long offset = (TX_PAGES<<8) + ring_offset + - (ei_status.rx_start_page << 8); + char *buf = skb->data; + + if (offset + count > ei_status.priv) { + /* We must wrap the input move. */ + int semi_count = ei_status.priv - offset; + copyin(buf, base + offset, semi_count); + buf += semi_count; + offset = TX_PAGES<<8; + count -= semi_count; + } + copyin(buf, base + offset, count); +} + +/*====================================================================*/ + +static void shmem_block_output(struct net_device *dev, int count, + const u_char *buf, const int start_page) +{ + void __iomem *shmem = ei_status.mem + (start_page << 8); + shmem -= ei_status.tx_start_page << 8; + copyout(shmem, buf, count); +} + +/*====================================================================*/ + +static int setup_shmem_window(struct pcmcia_device *link, int start_pg, + int stop_pg, int cm_offset) +{ + struct net_device *dev = link->priv; + pcnet_dev_t *info = PRIV(dev); + int i, window_size, offset, ret; + + window_size = (stop_pg - start_pg) << 8; + if (window_size > 32 * 1024) + window_size = 32 * 1024; + + /* Make sure it's a power of two. */ + window_size = roundup_pow_of_two(window_size); + + /* Allocate a memory window */ + link->resource[3]->flags |= WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM|WIN_ENABLE; + link->resource[3]->flags |= WIN_USE_WAIT; + link->resource[3]->start = 0; link->resource[3]->end = window_size; + ret = pcmcia_request_window(link, link->resource[3], mem_speed); + if (ret) + goto failed; + + offset = (start_pg << 8) + cm_offset; + offset -= offset % window_size; + ret = pcmcia_map_mem_page(link, link->resource[3], offset); + if (ret) + goto failed; + + /* Try scribbling on the buffer */ + info->base = ioremap(link->resource[3]->start, + resource_size(link->resource[3])); + for (i = 0; i < (TX_PAGES<<8); i += 2) + __raw_writew((i>>1), info->base+offset+i); + udelay(100); + for (i = 0; i < (TX_PAGES<<8); i += 2) + if (__raw_readw(info->base+offset+i) != (i>>1)) break; + pcnet_reset_8390(dev); + if (i != (TX_PAGES<<8)) { + iounmap(info->base); + pcmcia_release_window(link, link->resource[3]); + info->base = NULL; + goto failed; + } + + ei_status.mem = info->base + offset; + ei_status.priv = resource_size(link->resource[3]); + dev->mem_start = (u_long)ei_status.mem; + dev->mem_end = dev->mem_start + resource_size(link->resource[3]); + + ei_status.tx_start_page = start_pg; + ei_status.rx_start_page = start_pg + TX_PAGES; + ei_status.stop_page = start_pg + ( + (resource_size(link->resource[3]) - offset) >> 8); + + /* set up block i/o functions */ + ei_status.get_8390_hdr = shmem_get_8390_hdr; + ei_status.block_input = shmem_block_input; + ei_status.block_output = shmem_block_output; + + info->flags |= USE_SHMEM; + return 0; + +failed: + return 1; +} + +/*====================================================================*/ + +static const struct pcmcia_device_id pcnet_ids[] = { + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0057, 0x0021), + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0104, 0x000a), + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0xea15), + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0143, 0x3341), + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0143, 0xc0ab), + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x021b, 0x0101), + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x08a1, 0xc0ab), + PCMCIA_PFC_DEVICE_PROD_ID12(0, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4), + PCMCIA_PFC_DEVICE_PROD_ID12(0, "ATKK", "LM33-PCM-T", 0xba9eb7e2, 0x077c174e), + PCMCIA_PFC_DEVICE_PROD_ID12(0, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff), + PCMCIA_PFC_DEVICE_PROD_ID12(0, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae), + PCMCIA_PFC_DEVICE_PROD_ID12(0, "Linksys", "EtherFast 10&100 + 56K PC Card (PCMLM56)", 0x0733cc81, 0xb3765033), + PCMCIA_PFC_DEVICE_PROD_ID12(0, "LINKSYS", "PCMLM336", 0xf7cb0b07, 0x7a821b58), + PCMCIA_PFC_DEVICE_PROD_ID12(0, "MICRO RESEARCH", "COMBO-L/M-336", 0xb2ced065, 0x3ced0555), + PCMCIA_PFC_DEVICE_PROD_ID12(0, "PCMCIAs", "ComboCard", 0xdcfe12d3, 0xcd8906cc), + PCMCIA_PFC_DEVICE_PROD_ID12(0, "PCMCIAs", "LanModem", 0xdcfe12d3, 0xc67c648f), + PCMCIA_MFC_DEVICE_PROD_ID12(0, "IBM", "Home and Away 28.8 PC Card ", 0xb569a6e5, 0x5bd4ff2c), + PCMCIA_MFC_DEVICE_PROD_ID12(0, "IBM", "Home and Away Credit Card Adapter", 0xb569a6e5, 0x4bdf15c3), + PCMCIA_MFC_DEVICE_PROD_ID12(0, "IBM", "w95 Home and Away Credit Card ", 0xb569a6e5, 0xae911c15), + PCMCIA_MFC_DEVICE_PROD_ID123(0, "APEX DATA", "MULTICARD", "ETHERNET-MODEM", 0x11c2da09, 0x7289dc5d, 0xaad95e1f), + PCMCIA_MFC_DEVICE_PROD_ID2(0, "FAX/Modem/Ethernet Combo Card ", 0x1ed59302), + PCMCIA_DEVICE_MANF_CARD(0x0057, 0x1004), + PCMCIA_DEVICE_MANF_CARD(0x0104, 0x000d), + PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0075), + PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0145), + PCMCIA_DEVICE_MANF_CARD(0x0149, 0x0230), + PCMCIA_DEVICE_MANF_CARD(0x0149, 0x4530), + PCMCIA_DEVICE_MANF_CARD(0x0149, 0xc1ab), + PCMCIA_DEVICE_MANF_CARD(0x0186, 0x0110), + PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x8041), + PCMCIA_DEVICE_MANF_CARD(0x0213, 0x2452), + PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0300), + PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0307), + PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030a), + PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1103), + PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1121), + PCMCIA_DEVICE_PROD_ID12("2408LAN", "Ethernet", 0x352fff7f, 0x00b2e941), + PCMCIA_DEVICE_PROD_ID1234("Socket", "CF 10/100 Ethernet Card", "Revision B", "05/11/06", 0xb38bcc2e, 0x4de88352, 0xeaca6c8d, 0x7e57c22e), + PCMCIA_DEVICE_PROD_ID123("Cardwell", "PCMCIA", "ETHERNET", 0x9533672e, 0x281f1c5d, 0x3ff7175b), + PCMCIA_DEVICE_PROD_ID123("CNet ", "CN30BC", "ETHERNET", 0x9fe55d3d, 0x85601198, 0x3ff7175b), + PCMCIA_DEVICE_PROD_ID123("Digital", "Ethernet", "Adapter", 0x9999ab35, 0x00b2e941, 0x4b0d829e), + PCMCIA_DEVICE_PROD_ID123("Edimax Technology Inc.", "PCMCIA", "Ethernet Card", 0x738a0019, 0x281f1c5d, 0x5e9d92c0), + PCMCIA_DEVICE_PROD_ID123("EFA ", "EFA207", "ETHERNET", 0x3d294be4, 0xeb9aab6c, 0x3ff7175b), + PCMCIA_DEVICE_PROD_ID123("I-O DATA", "PCLA", "ETHERNET", 0x1d55d7ec, 0xe4c64d34, 0x3ff7175b), + PCMCIA_DEVICE_PROD_ID123("IO DATA", "PCLATE", "ETHERNET", 0x547e66dc, 0x6b260753, 0x3ff7175b), + PCMCIA_DEVICE_PROD_ID123("KingMax Technology Inc.", "EN10-T2", "PCMCIA Ethernet Card", 0x932b7189, 0x699e4436, 0x6f6652e0), + PCMCIA_DEVICE_PROD_ID123("PCMCIA", "PCMCIA-ETHERNET-CARD", "UE2216", 0x281f1c5d, 0xd4cd2f20, 0xb87add82), + PCMCIA_DEVICE_PROD_ID123("PCMCIA", "PCMCIA-ETHERNET-CARD", "UE2620", 0x281f1c5d, 0xd4cd2f20, 0x7d3d83a8), + PCMCIA_DEVICE_PROD_ID1("2412LAN", 0x67f236ab), + PCMCIA_DEVICE_PROD_ID12("ACCTON", "EN2212", 0xdfc6b5b2, 0xcb112a11), + PCMCIA_DEVICE_PROD_ID12("ACCTON", "EN2216-PCMCIA-ETHERNET", 0xdfc6b5b2, 0x5542bfff), + PCMCIA_DEVICE_PROD_ID12("Allied Telesis, K.K.", "CentreCOM LA100-PCM-T V2 100/10M LAN PC Card", 0xbb7fbdd7, 0xcd91cc68), + PCMCIA_DEVICE_PROD_ID12("Allied Telesis K.K.", "LA100-PCM V2", 0x36634a66, 0xc6d05997), + PCMCIA_DEVICE_PROD_ID12("Allied Telesis, K.K.", "CentreCOM LA-PCM_V2", 0xbb7fBdd7, 0x28e299f8), + PCMCIA_DEVICE_PROD_ID12("Allied Telesis K.K.", "LA-PCM V3", 0x36634a66, 0x62241d96), + PCMCIA_DEVICE_PROD_ID12("AmbiCom", "AMB8010", 0x5070a7f9, 0x82f96e96), + PCMCIA_DEVICE_PROD_ID12("AmbiCom", "AMB8610", 0x5070a7f9, 0x86741224), + PCMCIA_DEVICE_PROD_ID12("AmbiCom Inc", "AMB8002", 0x93b15570, 0x75ec3efb), + PCMCIA_DEVICE_PROD_ID12("AmbiCom Inc", "AMB8002T", 0x93b15570, 0x461c5247), + PCMCIA_DEVICE_PROD_ID12("AmbiCom Inc", "AMB8010", 0x93b15570, 0x82f96e96), + PCMCIA_DEVICE_PROD_ID12("AnyCom", "ECO Ethernet", 0x578ba6e7, 0x0a9888c1), + PCMCIA_DEVICE_PROD_ID12("AnyCom", "ECO Ethernet 10/100", 0x578ba6e7, 0x939fedbd), + PCMCIA_DEVICE_PROD_ID12("AROWANA", "PCMCIA Ethernet LAN Card", 0x313adbc8, 0x08d9f190), + PCMCIA_DEVICE_PROD_ID12("ASANTE", "FriendlyNet PC Card", 0x3a7ade0f, 0x41c64504), + PCMCIA_DEVICE_PROD_ID12("Billionton", "LNT-10TB", 0x552ab682, 0xeeb1ba6a), + PCMCIA_DEVICE_PROD_ID12("CF", "10Base-Ethernet", 0x44ebf863, 0x93ae4d79), + PCMCIA_DEVICE_PROD_ID12("CNet", "CN40BC Ethernet", 0xbc477dde, 0xfba775a7), + PCMCIA_DEVICE_PROD_ID12("COMPU-SHACK", "BASEline PCMCIA 10 MBit Ethernetadapter", 0xfa2e424d, 0xe9190d8a), + PCMCIA_DEVICE_PROD_ID12("COMPU-SHACK", "FASTline PCMCIA 10/100 Fast-Ethernet", 0xfa2e424d, 0x3953d9b9), + PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722), + PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2), + PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a), + PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether CF-TD LAN Card", 0x5261440f, 0x8797663b), + PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd), + PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d), + PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d), + PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether PCC-T", 0x5261440f, 0x6705fcaa), + PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether PCC-TD", 0x5261440f, 0x47d5ca83), + PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FastEther PCC-TX", 0x5261440f, 0x485e85d9), + PCMCIA_DEVICE_PROD_ID12("Corega,K.K.", "Ethernet LAN Card", 0x110d26d9, 0x9fd2f0a2), + PCMCIA_DEVICE_PROD_ID12("corega,K.K.", "Ethernet LAN Card", 0x9791a90e, 0x9fd2f0a2), + PCMCIA_DEVICE_PROD_ID12("corega K.K.", "(CG-LAPCCTXD)", 0x5261440f, 0x73ec0d88), + PCMCIA_DEVICE_PROD_ID12("CouplerlessPCMCIA", "100BASE", 0xee5af0ad, 0x7c2add04), + PCMCIA_DEVICE_PROD_ID12("CyQ've", "ELA-010", 0x77008979, 0x9d8d445d), + PCMCIA_DEVICE_PROD_ID12("CyQ've", "ELA-110E 10/100M LAN Card", 0x77008979, 0xfd184814), + PCMCIA_DEVICE_PROD_ID12("DataTrek.", "NetCard ", 0x5cd66d9d, 0x84697ce0), + PCMCIA_DEVICE_PROD_ID12("Dayna Communications, Inc.", "CommuniCard E", 0x0c629325, 0xb4e7dbaf), + PCMCIA_DEVICE_PROD_ID12("Digicom", "Palladio LAN 10/100", 0x697403d8, 0xe160b995), + PCMCIA_DEVICE_PROD_ID12("Digicom", "Palladio LAN 10/100 Dongless", 0x697403d8, 0xa6d3b233), + PCMCIA_DEVICE_PROD_ID12("DIGITAL", "DEPCM-XX", 0x69616cb3, 0xe600e76e), + PCMCIA_DEVICE_PROD_ID12("D-Link", "DE-650", 0x1a424a1c, 0xf28c8398), + PCMCIA_DEVICE_PROD_ID12("D-Link", "DE-660", 0x1a424a1c, 0xd9a1d05b), + PCMCIA_DEVICE_PROD_ID12("D-Link", "DE-660+", 0x1a424a1c, 0x50dcd0ec), + PCMCIA_DEVICE_PROD_ID12("D-Link", "DFE-650", 0x1a424a1c, 0x0f0073f9), + PCMCIA_DEVICE_PROD_ID12("Dual Speed", "10/100 PC Card", 0x725b842d, 0xf1efee84), + PCMCIA_DEVICE_PROD_ID12("Dual Speed", "10/100 Port Attached PC Card", 0x725b842d, 0x2db1f8e9), + PCMCIA_DEVICE_PROD_ID12("Dynalink", "L10BC", 0x55632fd5, 0xdc65f2b1), + PCMCIA_DEVICE_PROD_ID12("DYNALINK", "L10BC", 0x6a26d1cf, 0xdc65f2b1), + PCMCIA_DEVICE_PROD_ID12("DYNALINK", "L10C", 0x6a26d1cf, 0xc4f84efb), + PCMCIA_DEVICE_PROD_ID12("E-CARD", "E-CARD", 0x6701da11, 0x6701da11), + PCMCIA_DEVICE_PROD_ID12("EIGER Labs Inc.", "Ethernet 10BaseT card", 0x53c864c6, 0xedd059f6), + PCMCIA_DEVICE_PROD_ID12("EIGER Labs Inc.", "Ethernet Combo card", 0x53c864c6, 0x929c486c), + PCMCIA_DEVICE_PROD_ID12("Ethernet", "Adapter", 0x00b2e941, 0x4b0d829e), + PCMCIA_DEVICE_PROD_ID12("Ethernet Adapter", "E2000 PCMCIA Ethernet", 0x96767301, 0x71fbbc61), + PCMCIA_DEVICE_PROD_ID12("Ethernet PCMCIA adapter", "EP-210", 0x8dd86181, 0xf2b52517), + PCMCIA_DEVICE_PROD_ID12("Fast Ethernet", "Adapter", 0xb4be14e3, 0x4b0d829e), + PCMCIA_DEVICE_PROD_ID12("Grey Cell", "GCS2000", 0x2a151fac, 0xf00555cb), + PCMCIA_DEVICE_PROD_ID12("Grey Cell", "GCS2220", 0x2a151fac, 0xc1b7e327), + PCMCIA_DEVICE_PROD_ID12("GVC", "NIC-2000p", 0x76e171bd, 0x6eb1c947), + PCMCIA_DEVICE_PROD_ID12("IBM Corp.", "Ethernet", 0xe3736c88, 0x00b2e941), + PCMCIA_DEVICE_PROD_ID12("IC-CARD", "IC-CARD", 0x60cb09a6, 0x60cb09a6), + PCMCIA_DEVICE_PROD_ID12("IC-CARD+", "IC-CARD+", 0x93693494, 0x93693494), + PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCETTX", 0x547e66dc, 0x6fc5459b), + PCMCIA_DEVICE_PROD_ID12("iPort", "10/100 Ethernet Card", 0x56c538d2, 0x11b0ffc0), + PCMCIA_DEVICE_PROD_ID12("KANSAI ELECTRIC CO.,LTD", "KLA-PCM/T", 0xb18dc3b4, 0xcc51a956), + PCMCIA_DEVICE_PROD_ID12("KENTRONICS", "KEP-230", 0xaf8144c9, 0x868f6616), + PCMCIA_DEVICE_PROD_ID12("KCI", "PE520 PCMCIA Ethernet Adapter", 0xa89b87d3, 0x1eb88e64), + PCMCIA_DEVICE_PROD_ID12("KINGMAX", "EN10T2T", 0x7bcb459a, 0xa5c81fa5), + PCMCIA_DEVICE_PROD_ID12("Kingston", "KNE-PC2", 0x1128e633, 0xce2a89b3), + PCMCIA_DEVICE_PROD_ID12("Kingston Technology Corp.", "EtheRx PC Card Ethernet Adapter", 0x313c7be3, 0x0afb54a2), + PCMCIA_DEVICE_PROD_ID12("Laneed", "LD-10/100CD", 0x1b7827b2, 0xcda71d1c), + PCMCIA_DEVICE_PROD_ID12("Laneed", "LD-CDF", 0x1b7827b2, 0xfec71e40), + PCMCIA_DEVICE_PROD_ID12("Laneed", "LD-CDL/T", 0x1b7827b2, 0x79fba4f7), + PCMCIA_DEVICE_PROD_ID12("Laneed", "LD-CDS", 0x1b7827b2, 0x931afaab), + PCMCIA_DEVICE_PROD_ID12("LEMEL", "LM-N89TX PRO", 0xbbefb52f, 0xd2897a97), + PCMCIA_DEVICE_PROD_ID12("Linksys", "Combo PCMCIA EthernetCard (EC2T)", 0x0733cc81, 0x32ee8c78), + PCMCIA_DEVICE_PROD_ID12("LINKSYS", "E-CARD", 0xf7cb0b07, 0x6701da11), + PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 Integrated PC Card (PCM100)", 0x0733cc81, 0x453c3f9d), + PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100)", 0x0733cc81, 0x66c5a389), + PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V2)", 0x0733cc81, 0x3a3b28e9), + PCMCIA_DEVICE_PROD_ID12("Linksys", "HomeLink Phoneline + 10/100 Network PC Card (PCM100H1)", 0x733cc81, 0x7a3e5c3a), + PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TX", 0x88fcdeda, 0x6d772737), + PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TE", 0x88fcdeda, 0x0e714bee), + PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN20T", 0x88fcdeda, 0x81090922), + PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN10TE", 0x88fcdeda, 0xc1e2521c), + PCMCIA_DEVICE_PROD_ID12("LONGSHINE", "PCMCIA Ethernet Card", 0xf866b0b0, 0x6f6652e0), + PCMCIA_DEVICE_PROD_ID12("MACNICA", "ME1-JEIDA", 0x20841b68, 0xaf8a3578), + PCMCIA_DEVICE_PROD_ID12("Macsense", "MPC-10", 0xd830297f, 0xd265c307), + PCMCIA_DEVICE_PROD_ID12("Matsushita Electric Industrial Co.,LTD.", "CF-VEL211", 0x44445376, 0x8ded41d4), + PCMCIA_DEVICE_PROD_ID12("MAXTECH", "PCN2000", 0x78d64bc0, 0xca0ca4b8), + PCMCIA_DEVICE_PROD_ID12("MELCO", "LPC2-T", 0x481e0094, 0xa2eb0cf3), + PCMCIA_DEVICE_PROD_ID12("MELCO", "LPC2-TX", 0x481e0094, 0x41a6916c), + PCMCIA_DEVICE_PROD_ID12("Microcom C.E.", "Travel Card LAN 10/100", 0x4b91cec7, 0xe70220d6), + PCMCIA_DEVICE_PROD_ID12("Microdyne", "NE4200", 0x2e6da59b, 0x0478e472), + PCMCIA_DEVICE_PROD_ID12("MIDORI ELEC.", "LT-PCMT", 0x648d55c1, 0xbde526c7), + PCMCIA_DEVICE_PROD_ID12("National Semiconductor", "InfoMover 4100", 0x36e1191f, 0x60c229b9), + PCMCIA_DEVICE_PROD_ID12("National Semiconductor", "InfoMover NE4100", 0x36e1191f, 0xa6617ec8), + PCMCIA_DEVICE_PROD_ID12("NEC", "PC-9801N-J12", 0x18df0ba0, 0xbc912d76), + PCMCIA_DEVICE_PROD_ID12("NETGEAR", "FA410TX", 0x9aa79dc3, 0x60e5bc0e), + PCMCIA_DEVICE_PROD_ID12("Network Everywhere", "Fast Ethernet 10/100 PC Card", 0x820a67b6, 0x31ed1a5f), + PCMCIA_DEVICE_PROD_ID12("NextCom K.K.", "Next Hawk", 0xaedaec74, 0xad050ef1), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "10/100Mbps Ethernet Card", 0x281f1c5d, 0x6e41773b), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "Ethernet", 0x281f1c5d, 0x00b2e941), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "ETHERNET", 0x281f1c5d, 0x3ff7175b), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "Ethernet 10BaseT Card", 0x281f1c5d, 0x4de2f6c8), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "Ethernet Card", 0x281f1c5d, 0x5e9d92c0), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "Ethernet Combo card", 0x281f1c5d, 0x929c486c), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "ETHERNET V1.0", 0x281f1c5d, 0x4d8817c8), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FastEthernet", 0x281f1c5d, 0xfe871eeb), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "Fast-Ethernet", 0x281f1c5d, 0x45f1f3b4), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FAST ETHERNET CARD", 0x281f1c5d, 0xec5dbca7), + PCMCIA_DEVICE_PROD_ID12("PCMCIA LAN", "Ethernet", 0x7500e246, 0x00b2e941), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "LNT-10TN", 0x281f1c5d, 0xe707f641), + PCMCIA_DEVICE_PROD_ID12("PCMCIAs", "ComboCard", 0xdcfe12d3, 0xcd8906cc), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "UE2212", 0x281f1c5d, 0xbf17199b), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", " Ethernet NE2000 Compatible", 0x281f1c5d, 0x42d5d7e1), + PCMCIA_DEVICE_PROD_ID12("PRETEC", "Ethernet CompactLAN 10baseT 3.3V", 0xebf91155, 0x30074c80), + PCMCIA_DEVICE_PROD_ID12("PRETEC", "Ethernet CompactLAN 10BaseT 3.3V", 0xebf91155, 0x7f5a4f50), + PCMCIA_DEVICE_PROD_ID12("Psion Dacom", "Gold Card Ethernet", 0xf5f025c2, 0x3a30e110), + PCMCIA_DEVICE_PROD_ID12("=RELIA==", "Ethernet", 0xcdd0644a, 0x00b2e941), + PCMCIA_DEVICE_PROD_ID12("RIOS Systems Co.", "PC CARD3 ETHERNET", 0x7dd33481, 0x10b41826), + PCMCIA_DEVICE_PROD_ID12("RP", "1625B Ethernet NE2000 Compatible", 0xe3e66e22, 0xb96150df), + PCMCIA_DEVICE_PROD_ID12("RPTI", "EP400 Ethernet NE2000 Compatible", 0xdc6f88fd, 0x4a7e2ae0), + PCMCIA_DEVICE_PROD_ID12("RPTI", "EP401 Ethernet NE2000 Compatible", 0xdc6f88fd, 0x4bcbd7fd), + PCMCIA_DEVICE_PROD_ID12("RPTI LTD.", "EP400", 0xc53ac515, 0x81e39388), + PCMCIA_DEVICE_PROD_ID12("SCM", "Ethernet Combo card", 0xbdc3b102, 0x929c486c), + PCMCIA_DEVICE_PROD_ID12("Seiko Epson Corp.", "Ethernet", 0x09928730, 0x00b2e941), + PCMCIA_DEVICE_PROD_ID12("SMC", "EZCard-10-PCMCIA", 0xc4f8b18b, 0xfb21d265), + PCMCIA_DEVICE_PROD_ID12("Socket Communications Inc", "Socket EA PCMCIA LAN Adapter Revision D", 0xc70a4760, 0x2ade483e), + PCMCIA_DEVICE_PROD_ID12("Socket Communications Inc", "Socket EA PCMCIA LAN Adapter Revision E", 0xc70a4760, 0x5dd978a8), + PCMCIA_DEVICE_PROD_ID12("TDK", "LAK-CD031 for PCMCIA", 0x1eae9475, 0x0ed386fa), + PCMCIA_DEVICE_PROD_ID12("Telecom Device K.K.", "SuperSocket RE450T", 0x466b05f0, 0x8b74bc4f), + PCMCIA_DEVICE_PROD_ID12("Telecom Device K.K.", "SuperSocket RE550T", 0x466b05f0, 0x33c8db2a), + PCMCIA_DEVICE_PROD_ID13("Hypertec", "EP401", 0x8787bec7, 0xf6e4a31e), + PCMCIA_DEVICE_PROD_ID13("KingMax Technology Inc.", "Ethernet Card", 0x932b7189, 0x5e9d92c0), + PCMCIA_DEVICE_PROD_ID13("LONGSHINE", "EP401", 0xf866b0b0, 0xf6e4a31e), + PCMCIA_DEVICE_PROD_ID13("Xircom", "CFE-10", 0x2e3ee845, 0x22a49f89), + PCMCIA_DEVICE_PROD_ID1("CyQ've 10 Base-T LAN CARD", 0x94faf360), + PCMCIA_DEVICE_PROD_ID1("EP-210 PCMCIA LAN CARD.", 0x8850b4de), + PCMCIA_DEVICE_PROD_ID1("ETHER-C16", 0x06a8514f), + PCMCIA_DEVICE_PROD_ID1("NE2000 Compatible", 0x75b8ad5a), + PCMCIA_DEVICE_PROD_ID2("EN-6200P2", 0xa996d078), + /* too generic! */ + /* PCMCIA_DEVICE_PROD_ID12("PCMCIA", "10/100 Ethernet Card", 0x281f1c5d, 0x11b0ffc0), */ + PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "PCMCIA", "EN2218-LAN/MODEM", 0x281f1c5d, 0x570f348e, "cis/PCMLM28.cis"), + PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "PCMCIA", "UE2218-LAN/MODEM", 0x281f1c5d, 0x6fdcacee, "cis/PCMLM28.cis"), + PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"), + PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"), + PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"), + PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "TOSHIBA", "Modem/LAN Card", 0xb4585a1a, 0x53f922f8, "cis/PCMLM28.cis"), + PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"), + PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"), + PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"), + PCMCIA_DEVICE_CIS_PROD_ID12("Allied Telesis,K.K", "Ethernet LAN Card", 0x2ad62f3c, 0x9fd2f0a2, "cis/LA-PCM.cis"), + PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "cis/PE520.cis"), + PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"), + PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "cis/PE-200.cis"), + PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "cis/tamarack.cis"), + PCMCIA_DEVICE_PROD_ID12("Ethernet", "CF Size PC Card", 0x00b2e941, 0x43ac239b), + PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0", + 0xb4be14e3, 0x43ac239b, 0x0877b627), + PCMCIA_DEVICE_NULL +}; +MODULE_DEVICE_TABLE(pcmcia, pcnet_ids); +MODULE_FIRMWARE("cis/PCMLM28.cis"); +MODULE_FIRMWARE("cis/DP83903.cis"); +MODULE_FIRMWARE("cis/LA-PCM.cis"); +MODULE_FIRMWARE("cis/PE520.cis"); +MODULE_FIRMWARE("cis/NE2K.cis"); +MODULE_FIRMWARE("cis/PE-200.cis"); +MODULE_FIRMWARE("cis/tamarack.cis"); + +static struct pcmcia_driver pcnet_driver = { + .name = "pcnet_cs", + .probe = pcnet_probe, + .remove = pcnet_detach, + .owner = THIS_MODULE, + .id_table = pcnet_ids, + .suspend = pcnet_suspend, + .resume = pcnet_resume, +}; + +static int __init init_pcnet_cs(void) +{ + return pcmcia_register_driver(&pcnet_driver); +} + +static void __exit exit_pcnet_cs(void) +{ + pcmcia_unregister_driver(&pcnet_driver); +} + +module_init(init_pcnet_cs); +module_exit(exit_pcnet_cs); diff --git a/drivers/net/ethernet/8390/smc-mca.c b/drivers/net/ethernet/8390/smc-mca.c new file mode 100644 index 0000000..34934fb --- /dev/null +++ b/drivers/net/ethernet/8390/smc-mca.c @@ -0,0 +1,576 @@ +/* smc-mca.c: A SMC Ultra ethernet driver for linux. */ +/* + Most of this driver, except for ultramca_probe is nearly + verbatim from smc-ultra.c by Donald Becker. The rest is + written and copyright 1996 by David Weis, weisd3458@uni.edu + + This is a driver for the SMC Ultra and SMC EtherEZ ethercards. + + This driver uses the cards in the 8390-compatible, shared memory mode. + Most of the run-time complexity is handled by the generic code in + 8390.c. + + This driver enables the shared memory only when doing the actual data + transfers to avoid a bug in early version of the card that corrupted + data transferred by a AHA1542. + + This driver does not support the programmed-I/O data transfer mode of + the EtherEZ. That support (if available) is smc-ez.c. Nor does it + use the non-8390-compatible "Altego" mode. (No support currently planned.) + + Changelog: + + Paul Gortmaker : multiple card support for module users. + David Weis : Micro Channel-ized it. + Tom Sightler : Added support for IBM PS/2 Ethernet Adapter/A + Christopher Turcksin : Changed MCA-probe so that multiple adapters are + found correctly (Jul 16, 1997) + Chris Beauregard : Tried to merge the two changes above (Dec 15, 1997) + Tom Sightler : Fixed minor detection bug caused by above merge + Tom Sightler : Added support for three more Western Digital + MCA-adapters + Tom Sightler : Added support for 2.2.x mca_find_unused_adapter + Hartmut Schmidt : - Modified parameter detection to handle each + card differently depending on a switch-list + - 'card_ver' removed from the adapter list + - Some minor bug fixes +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "8390.h" + +#define DRV_NAME "smc-mca" + +static int ultramca_open(struct net_device *dev); +static void ultramca_reset_8390(struct net_device *dev); +static void ultramca_get_8390_hdr(struct net_device *dev, + struct e8390_pkt_hdr *hdr, + int ring_page); +static void ultramca_block_input(struct net_device *dev, int count, + struct sk_buff *skb, + int ring_offset); +static void ultramca_block_output(struct net_device *dev, int count, + const unsigned char *buf, + const int start_page); +static int ultramca_close_card(struct net_device *dev); + +#define START_PG 0x00 /* First page of TX buffer */ + +#define ULTRA_CMDREG 0 /* Offset to ASIC command register. */ +#define ULTRA_RESET 0x80 /* Board reset, in ULTRA_CMDREG. */ +#define ULTRA_MEMENB 0x40 /* Enable the shared memory. */ +#define ULTRA_NIC_OFFSET 16 /* NIC register offset from the base_addr. */ +#define ULTRA_IO_EXTENT 32 +#define EN0_ERWCNT 0x08 /* Early receive warning count. */ + +#define _61c8_SMC_Ethercard_PLUS_Elite_A_BNC_AUI_WD8013EP_A 0 +#define _61c9_SMC_Ethercard_PLUS_Elite_A_UTP_AUI_WD8013EP_A 1 +#define _6fc0_WD_Ethercard_PLUS_A_WD8003E_A_OR_WD8003ET_A 2 +#define _6fc1_WD_Starcard_PLUS_A_WD8003ST_A 3 +#define _6fc2_WD_Ethercard_PLUS_10T_A_WD8003W_A 4 +#define _efd4_IBM_PS2_Adapter_A_for_Ethernet_UTP_AUI_WD8013WP_A 5 +#define _efd5_IBM_PS2_Adapter_A_for_Ethernet_BNC_AUI_WD8013WP_A 6 +#define _efe5_IBM_PS2_Adapter_A_for_Ethernet 7 + +struct smc_mca_adapters_t { + unsigned int id; + char *name; +}; + +#define MAX_ULTRAMCA_CARDS 4 /* Max number of Ultra cards per module */ + +static int ultra_io[MAX_ULTRAMCA_CARDS]; +static int ultra_irq[MAX_ULTRAMCA_CARDS]; +MODULE_LICENSE("GPL"); + +module_param_array(ultra_io, int, NULL, 0); +module_param_array(ultra_irq, int, NULL, 0); +MODULE_PARM_DESC(ultra_io, "SMC Ultra/EtherEZ MCA I/O base address(es)"); +MODULE_PARM_DESC(ultra_irq, "SMC Ultra/EtherEZ MCA IRQ number(s)"); + +static const struct { + unsigned int base_addr; +} addr_table[] = { + { 0x0800 }, + { 0x1800 }, + { 0x2800 }, + { 0x3800 }, + { 0x4800 }, + { 0x5800 }, + { 0x6800 }, + { 0x7800 }, + { 0x8800 }, + { 0x9800 }, + { 0xa800 }, + { 0xb800 }, + { 0xc800 }, + { 0xd800 }, + { 0xe800 }, + { 0xf800 } +}; + +#define MEM_MASK 64 + +static const struct { + unsigned char mem_index; + unsigned long mem_start; + unsigned char num_pages; +} mem_table[] = { + { 16, 0x0c0000, 40 }, + { 18, 0x0c4000, 40 }, + { 20, 0x0c8000, 40 }, + { 22, 0x0cc000, 40 }, + { 24, 0x0d0000, 40 }, + { 26, 0x0d4000, 40 }, + { 28, 0x0d8000, 40 }, + { 30, 0x0dc000, 40 }, + {144, 0xfc0000, 40 }, + {148, 0xfc8000, 40 }, + {154, 0xfd0000, 40 }, + {156, 0xfd8000, 40 }, + { 0, 0x0c0000, 20 }, + { 1, 0x0c2000, 20 }, + { 2, 0x0c4000, 20 }, + { 3, 0x0c6000, 20 } +}; + +#define IRQ_MASK 243 +static const struct { + unsigned char new_irq; + unsigned char old_irq; +} irq_table[] = { + { 3, 3 }, + { 4, 4 }, + { 10, 10 }, + { 14, 15 } +}; + +static short smc_mca_adapter_ids[] __initdata = { + 0x61c8, + 0x61c9, + 0x6fc0, + 0x6fc1, + 0x6fc2, + 0xefd4, + 0xefd5, + 0xefe5, + 0x0000 +}; + +static char *smc_mca_adapter_names[] __initdata = { + "SMC Ethercard PLUS Elite/A BNC/AUI (WD8013EP/A)", + "SMC Ethercard PLUS Elite/A UTP/AUI (WD8013WP/A)", + "WD Ethercard PLUS/A (WD8003E/A or WD8003ET/A)", + "WD Starcard PLUS/A (WD8003ST/A)", + "WD Ethercard PLUS 10T/A (WD8003W/A)", + "IBM PS/2 Adapter/A for Ethernet UTP/AUI (WD8013WP/A)", + "IBM PS/2 Adapter/A for Ethernet BNC/AUI (WD8013EP/A)", + "IBM PS/2 Adapter/A for Ethernet", + NULL +}; + +static int ultra_found = 0; + + +static const struct net_device_ops ultramca_netdev_ops = { + .ndo_open = ultramca_open, + .ndo_stop = ultramca_close_card, + + .ndo_start_xmit = ei_start_xmit, + .ndo_tx_timeout = ei_tx_timeout, + .ndo_get_stats = ei_get_stats, + .ndo_set_multicast_list = ei_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ei_poll, +#endif +}; + +static int __init ultramca_probe(struct device *gen_dev) +{ + unsigned short ioaddr; + struct net_device *dev; + unsigned char reg4, num_pages; + struct mca_device *mca_dev = to_mca_device(gen_dev); + char slot = mca_dev->slot; + unsigned char pos2 = 0xff, pos3 = 0xff, pos4 = 0xff, pos5 = 0xff; + int i, rc; + int adapter = mca_dev->index; + int tbase = 0; + int tirq = 0; + int base_addr = ultra_io[ultra_found]; + int irq = ultra_irq[ultra_found]; + + if (base_addr || irq) { + printk(KERN_INFO "Probing for SMC MCA adapter"); + if (base_addr) { + printk(KERN_INFO " at I/O address 0x%04x%c", + base_addr, irq ? ' ' : '\n'); + } + if (irq) { + printk(KERN_INFO "using irq %d\n", irq); + } + } + + tirq = 0; + tbase = 0; + + /* If we're trying to match a specificied irq or io address, + * we'll reject the adapter found unless it's the one we're + * looking for */ + + pos2 = mca_device_read_stored_pos(mca_dev, 2); /* io_addr */ + pos3 = mca_device_read_stored_pos(mca_dev, 3); /* shared mem */ + pos4 = mca_device_read_stored_pos(mca_dev, 4); /* ROM bios addr range */ + pos5 = mca_device_read_stored_pos(mca_dev, 5); /* irq, media and RIPL */ + + /* Test the following conditions: + * - If an irq parameter is supplied, compare it + * with the irq of the adapter we found + * - If a base_addr paramater is given, compare it + * with the base_addr of the adapter we found + * - Check that the irq and the base_addr of the + * adapter we found is not already in use by + * this driver + */ + + switch (mca_dev->index) { + case _61c8_SMC_Ethercard_PLUS_Elite_A_BNC_AUI_WD8013EP_A: + case _61c9_SMC_Ethercard_PLUS_Elite_A_UTP_AUI_WD8013EP_A: + case _efd4_IBM_PS2_Adapter_A_for_Ethernet_UTP_AUI_WD8013WP_A: + case _efd5_IBM_PS2_Adapter_A_for_Ethernet_BNC_AUI_WD8013WP_A: + { + tbase = addr_table[(pos2 & 0xf0) >> 4].base_addr; + tirq = irq_table[(pos5 & 0xc) >> 2].new_irq; + break; + } + case _6fc0_WD_Ethercard_PLUS_A_WD8003E_A_OR_WD8003ET_A: + case _6fc1_WD_Starcard_PLUS_A_WD8003ST_A: + case _6fc2_WD_Ethercard_PLUS_10T_A_WD8003W_A: + case _efe5_IBM_PS2_Adapter_A_for_Ethernet: + { + tbase = ((pos2 & 0x0fe) * 0x10); + tirq = irq_table[(pos5 & 3)].old_irq; + break; + } + } + + if(!tirq || !tbase || + (irq && irq != tirq) || + (base_addr && tbase != base_addr)) + /* FIXME: we're trying to force the ordering of the + * devices here, there should be a way of getting this + * to happen */ + return -ENXIO; + + /* Adapter found. */ + dev = alloc_ei_netdev(); + if(!dev) + return -ENODEV; + + SET_NETDEV_DEV(dev, gen_dev); + mca_device_set_name(mca_dev, smc_mca_adapter_names[adapter]); + mca_device_set_claim(mca_dev, 1); + + printk(KERN_INFO "smc_mca: %s found in slot %d\n", + smc_mca_adapter_names[adapter], slot + 1); + + ultra_found++; + + dev->base_addr = ioaddr = mca_device_transform_ioport(mca_dev, tbase); + dev->irq = mca_device_transform_irq(mca_dev, tirq); + dev->mem_start = 0; + num_pages = 40; + + switch (adapter) { /* card-# in const array above [hs] */ + case _61c8_SMC_Ethercard_PLUS_Elite_A_BNC_AUI_WD8013EP_A: + case _61c9_SMC_Ethercard_PLUS_Elite_A_UTP_AUI_WD8013EP_A: + { + for (i = 0; i < 16; i++) { /* taking 16 counts + * up to 15 [hs] */ + if (mem_table[i].mem_index == (pos3 & ~MEM_MASK)) { + dev->mem_start = (unsigned long) + mca_device_transform_memory(mca_dev, (void *)mem_table[i].mem_start); + num_pages = mem_table[i].num_pages; + } + } + break; + } + case _6fc0_WD_Ethercard_PLUS_A_WD8003E_A_OR_WD8003ET_A: + case _6fc1_WD_Starcard_PLUS_A_WD8003ST_A: + case _6fc2_WD_Ethercard_PLUS_10T_A_WD8003W_A: + case _efe5_IBM_PS2_Adapter_A_for_Ethernet: + { + dev->mem_start = (unsigned long) + mca_device_transform_memory(mca_dev, (void *)((pos3 & 0xfc) * 0x1000)); + num_pages = 0x40; + break; + } + case _efd4_IBM_PS2_Adapter_A_for_Ethernet_UTP_AUI_WD8013WP_A: + case _efd5_IBM_PS2_Adapter_A_for_Ethernet_BNC_AUI_WD8013WP_A: + { + /* courtesy of gamera@quartz.ocn.ne.jp, pos3 indicates + * the index of the 0x2000 step. + * beware different number of pages [hs] + */ + dev->mem_start = (unsigned long) + mca_device_transform_memory(mca_dev, (void *)(0xc0000 + (0x2000 * (pos3 & 0xf)))); + num_pages = 0x20 + (2 * (pos3 & 0x10)); + break; + } + } + + /* sanity check, shouldn't happen */ + if (dev->mem_start == 0) { + rc = -ENODEV; + goto err_unclaim; + } + + if (!request_region(ioaddr, ULTRA_IO_EXTENT, DRV_NAME)) { + rc = -ENODEV; + goto err_unclaim; + } + + reg4 = inb(ioaddr + 4) & 0x7f; + outb(reg4, ioaddr + 4); + + for (i = 0; i < 6; i++) + dev->dev_addr[i] = inb(ioaddr + 8 + i); + + printk(KERN_INFO "smc_mca[%d]: Parameters: %#3x, %pM", + slot + 1, ioaddr, dev->dev_addr); + + /* Switch from the station address to the alternate register set + * and read the useful registers there. + */ + + outb(0x80 | reg4, ioaddr + 4); + + /* Enable FINE16 mode to avoid BIOS ROM width mismatches @ reboot. + */ + + outb(0x80 | inb(ioaddr + 0x0c), ioaddr + 0x0c); + + /* Switch back to the station address register set so that + * the MS-DOS driver can find the card after a warm boot. + */ + + outb(reg4, ioaddr + 4); + + dev_set_drvdata(gen_dev, dev); + + /* The 8390 isn't at the base address, so fake the offset + */ + + dev->base_addr = ioaddr + ULTRA_NIC_OFFSET; + + ei_status.name = "SMC Ultra MCA"; + ei_status.word16 = 1; + ei_status.tx_start_page = START_PG; + ei_status.rx_start_page = START_PG + TX_PAGES; + ei_status.stop_page = num_pages; + + ei_status.mem = ioremap(dev->mem_start, (ei_status.stop_page - START_PG) * 256); + if (!ei_status.mem) { + rc = -ENOMEM; + goto err_release_region; + } + + dev->mem_end = dev->mem_start + (ei_status.stop_page - START_PG) * 256; + + printk(", IRQ %d memory %#lx-%#lx.\n", + dev->irq, dev->mem_start, dev->mem_end - 1); + + ei_status.reset_8390 = &ultramca_reset_8390; + ei_status.block_input = &ultramca_block_input; + ei_status.block_output = &ultramca_block_output; + ei_status.get_8390_hdr = &ultramca_get_8390_hdr; + + ei_status.priv = slot; + + dev->netdev_ops = &ultramca_netdev_ops; + + NS8390_init(dev, 0); + + rc = register_netdev(dev); + if (rc) + goto err_unmap; + + return 0; + +err_unmap: + iounmap(ei_status.mem); +err_release_region: + release_region(ioaddr, ULTRA_IO_EXTENT); +err_unclaim: + mca_device_set_claim(mca_dev, 0); + free_netdev(dev); + return rc; +} + +static int ultramca_open(struct net_device *dev) +{ + int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */ + int retval; + + if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev))) + return retval; + + outb(ULTRA_MEMENB, ioaddr); /* Enable memory */ + outb(0x80, ioaddr + 5); /* ??? */ + outb(0x01, ioaddr + 6); /* Enable interrupts and memory. */ + outb(0x04, ioaddr + 5); /* ??? */ + + /* Set the early receive warning level in window 0 high enough not + * to receive ERW interrupts. + */ + + /* outb_p(E8390_NODMA + E8390_PAGE0, dev->base_addr); + * outb(0xff, dev->base_addr + EN0_ERWCNT); + */ + + ei_open(dev); + return 0; +} + +static void ultramca_reset_8390(struct net_device *dev) +{ + int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */ + + outb(ULTRA_RESET, ioaddr); + if (ei_debug > 1) + printk("resetting Ultra, t=%ld...", jiffies); + ei_status.txing = 0; + + outb(0x80, ioaddr + 5); /* ??? */ + outb(0x01, ioaddr + 6); /* Enable interrupts and memory. */ + + if (ei_debug > 1) + printk("reset done\n"); +} + +/* Grab the 8390 specific header. Similar to the block_input routine, but + * we don't need to be concerned with ring wrap as the header will be at + * the start of a page, so we optimize accordingly. + */ + +static void ultramca_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) +{ + void __iomem *hdr_start = ei_status.mem + ((ring_page - START_PG) << 8); + +#ifdef notdef + /* Officially this is what we are doing, but the readl() is faster */ + memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); +#else + ((unsigned int*)hdr)[0] = readl(hdr_start); +#endif +} + +/* Block input and output are easy on shared memory ethercards, the only + * complication is when the ring buffer wraps. + */ + +static void ultramca_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) +{ + void __iomem *xfer_start = ei_status.mem + ring_offset - START_PG * 256; + + if (ring_offset + count > ei_status.stop_page * 256) { + /* We must wrap the input move. */ + int semi_count = ei_status.stop_page * 256 - ring_offset; + memcpy_fromio(skb->data, xfer_start, semi_count); + count -= semi_count; + memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count); + } else { + memcpy_fromio(skb->data, xfer_start, count); + } + +} + +static void ultramca_block_output(struct net_device *dev, int count, const unsigned char *buf, + int start_page) +{ + void __iomem *shmem = ei_status.mem + ((start_page - START_PG) << 8); + + memcpy_toio(shmem, buf, count); +} + +static int ultramca_close_card(struct net_device *dev) +{ + int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */ + + netif_stop_queue(dev); + + if (ei_debug > 1) + printk("%s: Shutting down ethercard.\n", dev->name); + + outb(0x00, ioaddr + 6); /* Disable interrupts. */ + free_irq(dev->irq, dev); + + NS8390_init(dev, 0); + /* We should someday disable shared memory and change to 8-bit mode + * "just in case"... + */ + + return 0; +} + +static int ultramca_remove(struct device *gen_dev) +{ + struct mca_device *mca_dev = to_mca_device(gen_dev); + struct net_device *dev = dev_get_drvdata(gen_dev); + + if (dev) { + /* NB: ultra_close_card() does free_irq */ + int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; + + unregister_netdev(dev); + mca_device_set_claim(mca_dev, 0); + release_region(ioaddr, ULTRA_IO_EXTENT); + iounmap(ei_status.mem); + free_netdev(dev); + } + return 0; +} + + +static struct mca_driver ultra_driver = { + .id_table = smc_mca_adapter_ids, + .driver = { + .name = "smc-mca", + .bus = &mca_bus_type, + .probe = ultramca_probe, + .remove = ultramca_remove, + } +}; + +static int __init ultramca_init_module(void) +{ + if(!MCA_bus) + return -ENXIO; + + mca_register_driver(&ultra_driver); + + return ultra_found ? 0 : -ENXIO; +} + +static void __exit ultramca_cleanup_module(void) +{ + mca_unregister_driver(&ultra_driver); +} +module_init(ultramca_init_module); +module_exit(ultramca_cleanup_module); + diff --git a/drivers/net/ethernet/8390/smc-ultra.c b/drivers/net/ethernet/8390/smc-ultra.c new file mode 100644 index 0000000..ba44ede --- /dev/null +++ b/drivers/net/ethernet/8390/smc-ultra.c @@ -0,0 +1,622 @@ +/* smc-ultra.c: A SMC Ultra ethernet driver for linux. */ +/* + This is a driver for the SMC Ultra and SMC EtherEZ ISA ethercards. + + Written 1993-1998 by Donald Becker. + + Copyright 1993 United States Government as represented by the + Director, National Security Agency. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + This driver uses the cards in the 8390-compatible mode. + Most of the run-time complexity is handled by the generic code in + 8390.c. The code in this file is responsible for + + ultra_probe() Detecting and initializing the card. + ultra_probe1() + ultra_probe_isapnp() + + ultra_open() The card-specific details of starting, stopping + ultra_reset_8390() and resetting the 8390 NIC core. + ultra_close() + + ultra_block_input() Routines for reading and writing blocks of + ultra_block_output() packet buffer memory. + ultra_pio_input() + ultra_pio_output() + + This driver enables the shared memory only when doing the actual data + transfers to avoid a bug in early version of the card that corrupted + data transferred by a AHA1542. + + This driver now supports the programmed-I/O (PIO) data transfer mode of + the EtherEZ. It does not use the non-8390-compatible "Altego" mode. + That support (if available) is in smc-ez.c. + + Changelog: + + Paul Gortmaker : multiple card support for module users. + Donald Becker : 4/17/96 PIO support, minor potential problems avoided. + Donald Becker : 6/6/96 correctly set auto-wrap bit. + Alexander Sotirov : 1/20/01 Added support for ISAPnP cards + + Note about the ISA PnP support: + + This driver can not autoprobe for more than one SMC EtherEZ PnP card. + You have to configure the second card manually through the /proc/isapnp + interface and then load the module with an explicit io=0x___ option. +*/ + +static const char version[] = + "smc-ultra.c:v2.02 2/3/98 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "8390.h" + +#define DRV_NAME "smc-ultra" + +/* A zero-terminated list of I/O addresses to be probed. */ +static unsigned int ultra_portlist[] __initdata = +{0x200, 0x220, 0x240, 0x280, 0x300, 0x340, 0x380, 0}; + +static int ultra_probe1(struct net_device *dev, int ioaddr); + +#ifdef __ISAPNP__ +static int ultra_probe_isapnp(struct net_device *dev); +#endif + +static int ultra_open(struct net_device *dev); +static void ultra_reset_8390(struct net_device *dev); +static void ultra_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page); +static void ultra_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); +static void ultra_block_output(struct net_device *dev, int count, + const unsigned char *buf, const int start_page); +static void ultra_pio_get_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page); +static void ultra_pio_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); +static void ultra_pio_output(struct net_device *dev, int count, + const unsigned char *buf, const int start_page); +static int ultra_close_card(struct net_device *dev); + +#ifdef __ISAPNP__ +static struct isapnp_device_id ultra_device_ids[] __initdata = { + { ISAPNP_VENDOR('S','M','C'), ISAPNP_FUNCTION(0x8416), + ISAPNP_VENDOR('S','M','C'), ISAPNP_FUNCTION(0x8416), + (long) "SMC EtherEZ (8416)" }, + { } /* terminate list */ +}; + +MODULE_DEVICE_TABLE(isapnp, ultra_device_ids); +#endif + + +#define START_PG 0x00 /* First page of TX buffer */ + +#define ULTRA_CMDREG 0 /* Offset to ASIC command register. */ +#define ULTRA_RESET 0x80 /* Board reset, in ULTRA_CMDREG. */ +#define ULTRA_MEMENB 0x40 /* Enable the shared memory. */ +#define IOPD 0x02 /* I/O Pipe Data (16 bits), PIO operation. */ +#define IOPA 0x07 /* I/O Pipe Address for PIO operation. */ +#define ULTRA_NIC_OFFSET 16 /* NIC register offset from the base_addr. */ +#define ULTRA_IO_EXTENT 32 +#define EN0_ERWCNT 0x08 /* Early receive warning count. */ + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void ultra_poll(struct net_device *dev) +{ + disable_irq(dev->irq); + ei_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif +/* Probe for the Ultra. This looks like a 8013 with the station + address PROM at I/O ports +8 to +13, with a checksum + following. +*/ + +static int __init do_ultra_probe(struct net_device *dev) +{ + int i; + int base_addr = dev->base_addr; + int irq = dev->irq; + + if (base_addr > 0x1ff) /* Check a single specified location. */ + return ultra_probe1(dev, base_addr); + else if (base_addr != 0) /* Don't probe at all. */ + return -ENXIO; + +#ifdef __ISAPNP__ + /* Look for any installed ISAPnP cards */ + if (isapnp_present() && (ultra_probe_isapnp(dev) == 0)) + return 0; +#endif + + for (i = 0; ultra_portlist[i]; i++) { + dev->irq = irq; + if (ultra_probe1(dev, ultra_portlist[i]) == 0) + return 0; + } + + return -ENODEV; +} + +#ifndef MODULE +struct net_device * __init ultra_probe(int unit) +{ + struct net_device *dev = alloc_ei_netdev(); + int err; + + if (!dev) + return ERR_PTR(-ENOMEM); + + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + + err = do_ultra_probe(dev); + if (err) + goto out; + return dev; +out: + free_netdev(dev); + return ERR_PTR(err); +} +#endif + +static const struct net_device_ops ultra_netdev_ops = { + .ndo_open = ultra_open, + .ndo_stop = ultra_close_card, + + .ndo_start_xmit = ei_start_xmit, + .ndo_tx_timeout = ei_tx_timeout, + .ndo_get_stats = ei_get_stats, + .ndo_set_multicast_list = ei_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ultra_poll, +#endif +}; + +static int __init ultra_probe1(struct net_device *dev, int ioaddr) +{ + int i, retval; + int checksum = 0; + const char *model_name; + unsigned char eeprom_irq = 0; + static unsigned version_printed; + /* Values from various config regs. */ + unsigned char num_pages, irqreg, addr, piomode; + unsigned char idreg = inb(ioaddr + 7); + unsigned char reg4 = inb(ioaddr + 4) & 0x7f; + + if (!request_region(ioaddr, ULTRA_IO_EXTENT, DRV_NAME)) + return -EBUSY; + + /* Check the ID nibble. */ + if ((idreg & 0xF0) != 0x20 /* SMC Ultra */ + && (idreg & 0xF0) != 0x40) { /* SMC EtherEZ */ + retval = -ENODEV; + goto out; + } + + /* Select the station address register set. */ + outb(reg4, ioaddr + 4); + + for (i = 0; i < 8; i++) + checksum += inb(ioaddr + 8 + i); + if ((checksum & 0xff) != 0xFF) { + retval = -ENODEV; + goto out; + } + + if (ei_debug && version_printed++ == 0) + printk(version); + + model_name = (idreg & 0xF0) == 0x20 ? "SMC Ultra" : "SMC EtherEZ"; + + for (i = 0; i < 6; i++) + dev->dev_addr[i] = inb(ioaddr + 8 + i); + + printk("%s: %s at %#3x, %pM", dev->name, model_name, + ioaddr, dev->dev_addr); + + /* Switch from the station address to the alternate register set and + read the useful registers there. */ + outb(0x80 | reg4, ioaddr + 4); + + /* Enabled FINE16 mode to avoid BIOS ROM width mismatches @ reboot. */ + outb(0x80 | inb(ioaddr + 0x0c), ioaddr + 0x0c); + piomode = inb(ioaddr + 0x8); + addr = inb(ioaddr + 0xb); + irqreg = inb(ioaddr + 0xd); + + /* Switch back to the station address register set so that the MS-DOS driver + can find the card after a warm boot. */ + outb(reg4, ioaddr + 4); + + if (dev->irq < 2) { + unsigned char irqmap[] = {0, 9, 3, 5, 7, 10, 11, 15}; + int irq; + + /* The IRQ bits are split. */ + irq = irqmap[((irqreg & 0x40) >> 4) + ((irqreg & 0x0c) >> 2)]; + + if (irq == 0) { + printk(", failed to detect IRQ line.\n"); + retval = -EAGAIN; + goto out; + } + dev->irq = irq; + eeprom_irq = 1; + } + + /* The 8390 isn't at the base address, so fake the offset */ + dev->base_addr = ioaddr+ULTRA_NIC_OFFSET; + + { + static const int addr_tbl[4] = { + 0x0C0000, 0x0E0000, 0xFC0000, 0xFE0000 + }; + static const short num_pages_tbl[4] = { + 0x20, 0x40, 0x80, 0xff + }; + + dev->mem_start = ((addr & 0x0f) << 13) + addr_tbl[(addr >> 6) & 3] ; + num_pages = num_pages_tbl[(addr >> 4) & 3]; + } + + ei_status.name = model_name; + ei_status.word16 = 1; + ei_status.tx_start_page = START_PG; + ei_status.rx_start_page = START_PG + TX_PAGES; + ei_status.stop_page = num_pages; + + ei_status.mem = ioremap(dev->mem_start, (ei_status.stop_page - START_PG)*256); + if (!ei_status.mem) { + printk(", failed to ioremap.\n"); + retval = -ENOMEM; + goto out; + } + + dev->mem_end = dev->mem_start + (ei_status.stop_page - START_PG)*256; + + if (piomode) { + printk(",%s IRQ %d programmed-I/O mode.\n", + eeprom_irq ? "EEPROM" : "assigned ", dev->irq); + ei_status.block_input = &ultra_pio_input; + ei_status.block_output = &ultra_pio_output; + ei_status.get_8390_hdr = &ultra_pio_get_hdr; + } else { + printk(",%s IRQ %d memory %#lx-%#lx.\n", eeprom_irq ? "" : "assigned ", + dev->irq, dev->mem_start, dev->mem_end-1); + ei_status.block_input = &ultra_block_input; + ei_status.block_output = &ultra_block_output; + ei_status.get_8390_hdr = &ultra_get_8390_hdr; + } + ei_status.reset_8390 = &ultra_reset_8390; + + dev->netdev_ops = &ultra_netdev_ops; + NS8390_init(dev, 0); + + retval = register_netdev(dev); + if (retval) + goto out; + return 0; +out: + release_region(ioaddr, ULTRA_IO_EXTENT); + return retval; +} + +#ifdef __ISAPNP__ +static int __init ultra_probe_isapnp(struct net_device *dev) +{ + int i; + + for (i = 0; ultra_device_ids[i].vendor != 0; i++) { + struct pnp_dev *idev = NULL; + + while ((idev = pnp_find_dev(NULL, + ultra_device_ids[i].vendor, + ultra_device_ids[i].function, + idev))) { + /* Avoid already found cards from previous calls */ + if (pnp_device_attach(idev) < 0) + continue; + if (pnp_activate_dev(idev) < 0) { + __again: + pnp_device_detach(idev); + continue; + } + /* if no io and irq, search for next */ + if (!pnp_port_valid(idev, 0) || !pnp_irq_valid(idev, 0)) + goto __again; + /* found it */ + dev->base_addr = pnp_port_start(idev, 0); + dev->irq = pnp_irq(idev, 0); + printk(KERN_INFO "smc-ultra.c: ISAPnP reports %s at i/o %#lx, irq %d.\n", + (char *) ultra_device_ids[i].driver_data, + dev->base_addr, dev->irq); + if (ultra_probe1(dev, dev->base_addr) != 0) { /* Shouldn't happen. */ + printk(KERN_ERR "smc-ultra.c: Probe of ISAPnP card at %#lx failed.\n", dev->base_addr); + pnp_device_detach(idev); + return -ENXIO; + } + ei_status.priv = (unsigned long)idev; + break; + } + if (!idev) + continue; + return 0; + } + + return -ENODEV; +} +#endif + +static int +ultra_open(struct net_device *dev) +{ + int retval; + int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */ + unsigned char irq2reg[] = {0, 0, 0x04, 0x08, 0, 0x0C, 0, 0x40, + 0, 0x04, 0x44, 0x48, 0, 0, 0, 0x4C, }; + + retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev); + if (retval) + return retval; + + outb(0x00, ioaddr); /* Disable shared memory for safety. */ + outb(0x80, ioaddr + 5); + /* Set the IRQ line. */ + outb(inb(ioaddr + 4) | 0x80, ioaddr + 4); + outb((inb(ioaddr + 13) & ~0x4C) | irq2reg[dev->irq], ioaddr + 13); + outb(inb(ioaddr + 4) & 0x7f, ioaddr + 4); + + if (ei_status.block_input == &ultra_pio_input) { + outb(0x11, ioaddr + 6); /* Enable interrupts and PIO. */ + outb(0x01, ioaddr + 0x19); /* Enable ring read auto-wrap. */ + } else + outb(0x01, ioaddr + 6); /* Enable interrupts and memory. */ + /* Set the early receive warning level in window 0 high enough not + to receive ERW interrupts. */ + outb_p(E8390_NODMA+E8390_PAGE0, dev->base_addr); + outb(0xff, dev->base_addr + EN0_ERWCNT); + ei_open(dev); + return 0; +} + +static void +ultra_reset_8390(struct net_device *dev) +{ + int cmd_port = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC base addr */ + + outb(ULTRA_RESET, cmd_port); + if (ei_debug > 1) printk("resetting Ultra, t=%ld...", jiffies); + ei_status.txing = 0; + + outb(0x00, cmd_port); /* Disable shared memory for safety. */ + outb(0x80, cmd_port + 5); + if (ei_status.block_input == &ultra_pio_input) + outb(0x11, cmd_port + 6); /* Enable interrupts and PIO. */ + else + outb(0x01, cmd_port + 6); /* Enable interrupts and memory. */ + + if (ei_debug > 1) printk("reset done\n"); +} + +/* Grab the 8390 specific header. Similar to the block_input routine, but + we don't need to be concerned with ring wrap as the header will be at + the start of a page, so we optimize accordingly. */ + +static void +ultra_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) +{ + void __iomem *hdr_start = ei_status.mem + ((ring_page - START_PG)<<8); + + outb(ULTRA_MEMENB, dev->base_addr - ULTRA_NIC_OFFSET); /* shmem on */ +#ifdef __BIG_ENDIAN + /* Officially this is what we are doing, but the readl() is faster */ + /* unfortunately it isn't endian aware of the struct */ + memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); + hdr->count = le16_to_cpu(hdr->count); +#else + ((unsigned int*)hdr)[0] = readl(hdr_start); +#endif + outb(0x00, dev->base_addr - ULTRA_NIC_OFFSET); /* shmem off */ +} + +/* Block input and output are easy on shared memory ethercards, the only + complication is when the ring buffer wraps. */ + +static void +ultra_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) +{ + void __iomem *xfer_start = ei_status.mem + ring_offset - (START_PG<<8); + + /* Enable shared memory. */ + outb(ULTRA_MEMENB, dev->base_addr - ULTRA_NIC_OFFSET); + + if (ring_offset + count > ei_status.stop_page*256) { + /* We must wrap the input move. */ + int semi_count = ei_status.stop_page*256 - ring_offset; + memcpy_fromio(skb->data, xfer_start, semi_count); + count -= semi_count; + memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count); + } else { + memcpy_fromio(skb->data, xfer_start, count); + } + + outb(0x00, dev->base_addr - ULTRA_NIC_OFFSET); /* Disable memory. */ +} + +static void +ultra_block_output(struct net_device *dev, int count, const unsigned char *buf, + int start_page) +{ + void __iomem *shmem = ei_status.mem + ((start_page - START_PG)<<8); + + /* Enable shared memory. */ + outb(ULTRA_MEMENB, dev->base_addr - ULTRA_NIC_OFFSET); + + memcpy_toio(shmem, buf, count); + + outb(0x00, dev->base_addr - ULTRA_NIC_OFFSET); /* Disable memory. */ +} + +/* The identical operations for programmed I/O cards. + The PIO model is trivial to use: the 16 bit start address is written + byte-sequentially to IOPA, with no intervening I/O operations, and the + data is read or written to the IOPD data port. + The only potential complication is that the address register is shared + and must be always be rewritten between each read/write direction change. + This is no problem for us, as the 8390 code ensures that we are single + threaded. */ +static void ultra_pio_get_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page) +{ + int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */ + outb(0x00, ioaddr + IOPA); /* Set the address, LSB first. */ + outb(ring_page, ioaddr + IOPA); + insw(ioaddr + IOPD, hdr, sizeof(struct e8390_pkt_hdr)>>1); +} + +static void ultra_pio_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset) +{ + int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */ + char *buf = skb->data; + + /* For now set the address again, although it should already be correct. */ + outb(ring_offset, ioaddr + IOPA); /* Set the address, LSB first. */ + outb(ring_offset >> 8, ioaddr + IOPA); + /* We know skbuffs are padded to at least word alignment. */ + insw(ioaddr + IOPD, buf, (count+1)>>1); +} + +static void ultra_pio_output(struct net_device *dev, int count, + const unsigned char *buf, const int start_page) +{ + int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */ + outb(0x00, ioaddr + IOPA); /* Set the address, LSB first. */ + outb(start_page, ioaddr + IOPA); + /* An extra odd byte is OK here as well. */ + outsw(ioaddr + IOPD, buf, (count+1)>>1); +} + +static int +ultra_close_card(struct net_device *dev) +{ + int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* CMDREG */ + + netif_stop_queue(dev); + + if (ei_debug > 1) + printk("%s: Shutting down ethercard.\n", dev->name); + + outb(0x00, ioaddr + 6); /* Disable interrupts. */ + free_irq(dev->irq, dev); + + NS8390_init(dev, 0); + + /* We should someday disable shared memory and change to 8-bit mode + "just in case"... */ + + return 0; +} + + +#ifdef MODULE +#define MAX_ULTRA_CARDS 4 /* Max number of Ultra cards per module */ +static struct net_device *dev_ultra[MAX_ULTRA_CARDS]; +static int io[MAX_ULTRA_CARDS]; +static int irq[MAX_ULTRA_CARDS]; + +module_param_array(io, int, NULL, 0); +module_param_array(irq, int, NULL, 0); +MODULE_PARM_DESC(io, "I/O base address(es)"); +MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)"); +MODULE_DESCRIPTION("SMC Ultra/EtherEZ ISA/PnP Ethernet driver"); +MODULE_LICENSE("GPL"); + +/* This is set up so that only a single autoprobe takes place per call. +ISA device autoprobes on a running machine are not recommended. */ +int __init +init_module(void) +{ + struct net_device *dev; + int this_dev, found = 0; + + for (this_dev = 0; this_dev < MAX_ULTRA_CARDS; this_dev++) { + if (io[this_dev] == 0) { + if (this_dev != 0) break; /* only autoprobe 1st one */ + printk(KERN_NOTICE "smc-ultra.c: Presently autoprobing (not recommended) for a single card.\n"); + } + dev = alloc_ei_netdev(); + if (!dev) + break; + dev->irq = irq[this_dev]; + dev->base_addr = io[this_dev]; + if (do_ultra_probe(dev) == 0) { + dev_ultra[found++] = dev; + continue; + } + free_netdev(dev); + printk(KERN_WARNING "smc-ultra.c: No SMC Ultra card found (i/o = 0x%x).\n", io[this_dev]); + break; + } + if (found) + return 0; + return -ENXIO; +} + +static void cleanup_card(struct net_device *dev) +{ + /* NB: ultra_close_card() does free_irq */ +#ifdef __ISAPNP__ + struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv; + if (idev) + pnp_device_detach(idev); +#endif + release_region(dev->base_addr - ULTRA_NIC_OFFSET, ULTRA_IO_EXTENT); + iounmap(ei_status.mem); +} + +void __exit +cleanup_module(void) +{ + int this_dev; + + for (this_dev = 0; this_dev < MAX_ULTRA_CARDS; this_dev++) { + struct net_device *dev = dev_ultra[this_dev]; + if (dev) { + unregister_netdev(dev); + cleanup_card(dev); + free_netdev(dev); + } + } +} +#endif /* MODULE */ diff --git a/drivers/net/ethernet/8390/smc-ultra32.c b/drivers/net/ethernet/8390/smc-ultra32.c new file mode 100644 index 0000000..e459c3b --- /dev/null +++ b/drivers/net/ethernet/8390/smc-ultra32.c @@ -0,0 +1,464 @@ +/* smc-ultra32.c: An SMC Ultra32 EISA ethernet driver for linux. + +Sources: + + This driver is based on (cloned from) the ISA SMC Ultra driver + written by Donald Becker. Modifications to support the EISA + version of the card by Paul Gortmaker and Leonard N. Zubkoff. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + +Theory of Operation: + + The SMC Ultra32C card uses the SMC 83c790 chip which is also + found on the ISA SMC Ultra cards. It has a shared memory mode of + operation that makes it similar to the ISA version of the card. + The main difference is that the EISA card has 32KB of RAM, but + only an 8KB window into that memory. The EISA card also can be + set for a bus-mastering mode of operation via the ECU, but that + is not (and probably will never be) supported by this driver. + The ECU should be run to enable shared memory and to disable the + bus-mastering feature for use with linux. + + By programming the 8390 to use only 8KB RAM, the modifications + to the ISA driver can be limited to the probe and initialization + code. This allows easy integration of EISA support into the ISA + driver. However, the driver development kit from SMC provided the + register information for sliding the 8KB window, and hence the 8390 + is programmed to use the full 32KB RAM. + + Unfortunately this required code changes outside the probe/init + routines, and thus we decided to separate the EISA driver from + the ISA one. In this way, ISA users don't end up with a larger + driver due to the EISA code, and EISA users don't end up with a + larger driver due to the ISA EtherEZ PIO code. The driver is + similar to the 3c503/16 driver, in that the window must be set + back to the 1st 8KB of space for access to the two 8390 Tx slots. + + In testing, using only 8KB RAM (3 Tx / 5 Rx) didn't appear to + be a limiting factor, since the EISA bus could get packets off + the card fast enough, but having the use of lots of RAM as Rx + space is extra insurance if interrupt latencies become excessive. + +*/ + +static const char *version = "smc-ultra32.c: 06/97 v1.00\n"; + + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "8390.h" + +#define DRV_NAME "smc-ultra32" + +static int ultra32_probe1(struct net_device *dev, int ioaddr); +static int ultra32_open(struct net_device *dev); +static void ultra32_reset_8390(struct net_device *dev); +static void ultra32_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page); +static void ultra32_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); +static void ultra32_block_output(struct net_device *dev, int count, + const unsigned char *buf, + const int start_page); +static int ultra32_close(struct net_device *dev); + +#define ULTRA32_CMDREG 0 /* Offset to ASIC command register. */ +#define ULTRA32_RESET 0x80 /* Board reset, in ULTRA32_CMDREG. */ +#define ULTRA32_MEMENB 0x40 /* Enable the shared memory. */ +#define ULTRA32_NIC_OFFSET 16 /* NIC register offset from the base_addr. */ +#define ULTRA32_IO_EXTENT 32 +#define EN0_ERWCNT 0x08 /* Early receive warning count. */ + +/* + * Defines that apply only to the Ultra32 EISA card. Note that + * "smc" = 10011 01101 00011 = 0x4da3, and hence !smc8010.cfg translates + * into an EISA ID of 0x1080A34D + */ +#define ULTRA32_BASE 0xca0 +#define ULTRA32_ID 0x1080a34d +#define ULTRA32_IDPORT (-0x20) /* 0xc80 */ +/* Config regs 1->7 from the EISA !SMC8010.CFG file. */ +#define ULTRA32_CFG1 0x04 /* 0xca4 */ +#define ULTRA32_CFG2 0x05 /* 0xca5 */ +#define ULTRA32_CFG3 (-0x18) /* 0xc88 */ +#define ULTRA32_CFG4 (-0x17) /* 0xc89 */ +#define ULTRA32_CFG5 (-0x16) /* 0xc8a */ +#define ULTRA32_CFG6 (-0x15) /* 0xc8b */ +#define ULTRA32_CFG7 0x0d /* 0xcad */ + +static void cleanup_card(struct net_device *dev) +{ + int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; + /* NB: ultra32_close_card() does free_irq */ + release_region(ioaddr, ULTRA32_IO_EXTENT); + iounmap(ei_status.mem); +} + +/* Probe for the Ultra32. This looks like a 8013 with the station + address PROM at I/O ports +8 to +13, with a checksum + following. +*/ + +struct net_device * __init ultra32_probe(int unit) +{ + struct net_device *dev; + int base; + int irq; + int err = -ENODEV; + + if (!EISA_bus) + return ERR_PTR(-ENODEV); + + dev = alloc_ei_netdev(); + + if (!dev) + return ERR_PTR(-ENOMEM); + + if (unit >= 0) { + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + } + + irq = dev->irq; + + /* EISA spec allows for up to 16 slots, but 8 is typical. */ + for (base = 0x1000 + ULTRA32_BASE; base < 0x9000; base += 0x1000) { + if (ultra32_probe1(dev, base) == 0) + break; + dev->irq = irq; + } + if (base >= 0x9000) + goto out; + err = register_netdev(dev); + if (err) + goto out1; + return dev; +out1: + cleanup_card(dev); +out: + free_netdev(dev); + return ERR_PTR(err); +} + + +static const struct net_device_ops ultra32_netdev_ops = { + .ndo_open = ultra32_open, + .ndo_stop = ultra32_close, + .ndo_start_xmit = ei_start_xmit, + .ndo_tx_timeout = ei_tx_timeout, + .ndo_get_stats = ei_get_stats, + .ndo_set_multicast_list = ei_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ei_poll, +#endif +}; + +static int __init ultra32_probe1(struct net_device *dev, int ioaddr) +{ + int i, edge, media, retval; + int checksum = 0; + const char *model_name; + static unsigned version_printed; + /* Values from various config regs. */ + unsigned char idreg; + unsigned char reg4; + const char *ifmap[] = {"UTP No Link", "", "UTP/AUI", "UTP/BNC"}; + + if (!request_region(ioaddr, ULTRA32_IO_EXTENT, DRV_NAME)) + return -EBUSY; + + if (inb(ioaddr + ULTRA32_IDPORT) == 0xff || + inl(ioaddr + ULTRA32_IDPORT) != ULTRA32_ID) { + retval = -ENODEV; + goto out; + } + + media = inb(ioaddr + ULTRA32_CFG7) & 0x03; + edge = inb(ioaddr + ULTRA32_CFG5) & 0x08; + printk("SMC Ultra32 in EISA Slot %d, Media: %s, %s IRQs.\n", + ioaddr >> 12, ifmap[media], + (edge ? "Edge Triggered" : "Level Sensitive")); + + idreg = inb(ioaddr + 7); + reg4 = inb(ioaddr + 4) & 0x7f; + + /* Check the ID nibble. */ + if ((idreg & 0xf0) != 0x20) { /* SMC Ultra */ + retval = -ENODEV; + goto out; + } + + /* Select the station address register set. */ + outb(reg4, ioaddr + 4); + + for (i = 0; i < 8; i++) + checksum += inb(ioaddr + 8 + i); + if ((checksum & 0xff) != 0xff) { + retval = -ENODEV; + goto out; + } + + if (ei_debug && version_printed++ == 0) + printk(version); + + model_name = "SMC Ultra32"; + + for (i = 0; i < 6; i++) + dev->dev_addr[i] = inb(ioaddr + 8 + i); + + printk("%s: %s at 0x%X, %pM", + dev->name, model_name, ioaddr, dev->dev_addr); + + /* Switch from the station address to the alternate register set and + read the useful registers there. */ + outb(0x80 | reg4, ioaddr + 4); + + /* Enable FINE16 mode to avoid BIOS ROM width mismatches @ reboot. */ + outb(0x80 | inb(ioaddr + 0x0c), ioaddr + 0x0c); + + /* Reset RAM addr. */ + outb(0x00, ioaddr + 0x0b); + + /* Switch back to the station address register set so that the + MS-DOS driver can find the card after a warm boot. */ + outb(reg4, ioaddr + 4); + + if ((inb(ioaddr + ULTRA32_CFG5) & 0x40) == 0) { + printk("\nsmc-ultra32: Card RAM is disabled! " + "Run EISA config utility.\n"); + retval = -ENODEV; + goto out; + } + if ((inb(ioaddr + ULTRA32_CFG2) & 0x04) == 0) + printk("\nsmc-ultra32: Ignoring Bus-Master enable bit. " + "Run EISA config utility.\n"); + + if (dev->irq < 2) { + unsigned char irqmap[] = {0, 9, 3, 5, 7, 10, 11, 15}; + int irq = irqmap[inb(ioaddr + ULTRA32_CFG5) & 0x07]; + if (irq == 0) { + printk(", failed to detect IRQ line.\n"); + retval = -EAGAIN; + goto out; + } + dev->irq = irq; + } + + /* The 8390 isn't at the base address, so fake the offset */ + dev->base_addr = ioaddr + ULTRA32_NIC_OFFSET; + + /* Save RAM address in the unused reg0 to avoid excess inb's. */ + ei_status.reg0 = inb(ioaddr + ULTRA32_CFG3) & 0xfc; + + dev->mem_start = 0xc0000 + ((ei_status.reg0 & 0x7c) << 11); + + ei_status.name = model_name; + ei_status.word16 = 1; + ei_status.tx_start_page = 0; + ei_status.rx_start_page = TX_PAGES; + /* All Ultra32 cards have 32KB memory with an 8KB window. */ + ei_status.stop_page = 128; + + ei_status.mem = ioremap(dev->mem_start, 0x2000); + if (!ei_status.mem) { + printk(", failed to ioremap.\n"); + retval = -ENOMEM; + goto out; + } + dev->mem_end = dev->mem_start + 0x1fff; + + printk(", IRQ %d, 32KB memory, 8KB window at 0x%lx-0x%lx.\n", + dev->irq, dev->mem_start, dev->mem_end); + ei_status.block_input = &ultra32_block_input; + ei_status.block_output = &ultra32_block_output; + ei_status.get_8390_hdr = &ultra32_get_8390_hdr; + ei_status.reset_8390 = &ultra32_reset_8390; + + dev->netdev_ops = &ultra32_netdev_ops; + NS8390_init(dev, 0); + + return 0; +out: + release_region(ioaddr, ULTRA32_IO_EXTENT); + return retval; +} + +static int ultra32_open(struct net_device *dev) +{ + int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* ASIC addr */ + int irq_flags = (inb(ioaddr + ULTRA32_CFG5) & 0x08) ? 0 : IRQF_SHARED; + int retval; + + retval = request_irq(dev->irq, ei_interrupt, irq_flags, dev->name, dev); + if (retval) + return retval; + + outb(ULTRA32_MEMENB, ioaddr); /* Enable Shared Memory. */ + outb(0x80, ioaddr + ULTRA32_CFG6); /* Enable Interrupts. */ + outb(0x84, ioaddr + 5); /* Enable MEM16 & Disable Bus Master. */ + outb(0x01, ioaddr + 6); /* Enable Interrupts. */ + /* Set the early receive warning level in window 0 high enough not + to receive ERW interrupts. */ + outb_p(E8390_NODMA+E8390_PAGE0, dev->base_addr); + outb(0xff, dev->base_addr + EN0_ERWCNT); + ei_open(dev); + return 0; +} + +static int ultra32_close(struct net_device *dev) +{ + int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* CMDREG */ + + netif_stop_queue(dev); + + if (ei_debug > 1) + printk("%s: Shutting down ethercard.\n", dev->name); + + outb(0x00, ioaddr + ULTRA32_CFG6); /* Disable Interrupts. */ + outb(0x00, ioaddr + 6); /* Disable interrupts. */ + free_irq(dev->irq, dev); + + NS8390_init(dev, 0); + + return 0; +} + +static void ultra32_reset_8390(struct net_device *dev) +{ + int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* ASIC base addr */ + + outb(ULTRA32_RESET, ioaddr); + if (ei_debug > 1) printk("resetting Ultra32, t=%ld...", jiffies); + ei_status.txing = 0; + + outb(ULTRA32_MEMENB, ioaddr); /* Enable Shared Memory. */ + outb(0x80, ioaddr + ULTRA32_CFG6); /* Enable Interrupts. */ + outb(0x84, ioaddr + 5); /* Enable MEM16 & Disable Bus Master. */ + outb(0x01, ioaddr + 6); /* Enable Interrupts. */ + if (ei_debug > 1) printk("reset done\n"); +} + +/* Grab the 8390 specific header. Similar to the block_input routine, but + we don't need to be concerned with ring wrap as the header will be at + the start of a page, so we optimize accordingly. */ + +static void ultra32_get_8390_hdr(struct net_device *dev, + struct e8390_pkt_hdr *hdr, + int ring_page) +{ + void __iomem *hdr_start = ei_status.mem + ((ring_page & 0x1f) << 8); + unsigned int RamReg = dev->base_addr - ULTRA32_NIC_OFFSET + ULTRA32_CFG3; + + /* Select correct 8KB Window. */ + outb(ei_status.reg0 | ((ring_page & 0x60) >> 5), RamReg); + +#ifdef __BIG_ENDIAN + /* Officially this is what we are doing, but the readl() is faster */ + /* unfortunately it isn't endian aware of the struct */ + memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); + hdr->count = le16_to_cpu(hdr->count); +#else + ((unsigned int*)hdr)[0] = readl(hdr_start); +#endif +} + +/* Block input and output are easy on shared memory ethercards, the only + complication is when the ring buffer wraps, or in this case, when a + packet spans an 8KB boundary. Note that the current 8KB segment is + already set by the get_8390_hdr routine. */ + +static void ultra32_block_input(struct net_device *dev, + int count, + struct sk_buff *skb, + int ring_offset) +{ + void __iomem *xfer_start = ei_status.mem + (ring_offset & 0x1fff); + unsigned int RamReg = dev->base_addr - ULTRA32_NIC_OFFSET + ULTRA32_CFG3; + + if ((ring_offset & ~0x1fff) != ((ring_offset + count - 1) & ~0x1fff)) { + int semi_count = 8192 - (ring_offset & 0x1FFF); + memcpy_fromio(skb->data, xfer_start, semi_count); + count -= semi_count; + if (ring_offset < 96*256) { + /* Select next 8KB Window. */ + ring_offset += semi_count; + outb(ei_status.reg0 | ((ring_offset & 0x6000) >> 13), RamReg); + memcpy_fromio(skb->data + semi_count, ei_status.mem, count); + } else { + /* Select first 8KB Window. */ + outb(ei_status.reg0, RamReg); + memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count); + } + } else { + memcpy_fromio(skb->data, xfer_start, count); + } +} + +static void ultra32_block_output(struct net_device *dev, + int count, + const unsigned char *buf, + int start_page) +{ + void __iomem *xfer_start = ei_status.mem + (start_page<<8); + unsigned int RamReg = dev->base_addr - ULTRA32_NIC_OFFSET + ULTRA32_CFG3; + + /* Select first 8KB Window. */ + outb(ei_status.reg0, RamReg); + + memcpy_toio(xfer_start, buf, count); +} + +#ifdef MODULE +#define MAX_ULTRA32_CARDS 4 /* Max number of Ultra cards per module */ +static struct net_device *dev_ultra[MAX_ULTRA32_CARDS]; + +MODULE_DESCRIPTION("SMC Ultra32 EISA ethernet driver"); +MODULE_LICENSE("GPL"); + +int __init init_module(void) +{ + int this_dev, found = 0; + + for (this_dev = 0; this_dev < MAX_ULTRA32_CARDS; this_dev++) { + struct net_device *dev = ultra32_probe(-1); + if (IS_ERR(dev)) + break; + dev_ultra[found++] = dev; + } + if (found) + return 0; + printk(KERN_WARNING "smc-ultra32.c: No SMC Ultra32 found.\n"); + return -ENXIO; +} + +void __exit cleanup_module(void) +{ + int this_dev; + + for (this_dev = 0; this_dev < MAX_ULTRA32_CARDS; this_dev++) { + struct net_device *dev = dev_ultra[this_dev]; + if (dev) { + unregister_netdev(dev); + cleanup_card(dev); + free_netdev(dev); + } + } +} +#endif /* MODULE */ + diff --git a/drivers/net/ethernet/8390/stnic.c b/drivers/net/ethernet/8390/stnic.c new file mode 100644 index 0000000..d85f0a8 --- /dev/null +++ b/drivers/net/ethernet/8390/stnic.c @@ -0,0 +1,294 @@ +/* stnic.c : A SH7750 specific part of driver for NS DP83902A ST-NIC. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1999 kaz Kojima + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#ifdef CONFIG_SH_STANDARD_BIOS +#include +#endif + +#include "8390.h" + +#define DRV_NAME "stnic" + +#define byte unsigned char +#define half unsigned short +#define word unsigned int +#define vbyte volatile unsigned char +#define vhalf volatile unsigned short +#define vword volatile unsigned int + +#define STNIC_RUN 0x01 /* 1 == Run, 0 == reset. */ + +#define START_PG 0 /* First page of TX buffer */ +#define STOP_PG 128 /* Last page +1 of RX ring */ + +/* Alias */ +#define STNIC_CR E8390_CMD +#define PG0_RSAR0 EN0_RSARLO +#define PG0_RSAR1 EN0_RSARHI +#define PG0_RBCR0 EN0_RCNTLO +#define PG0_RBCR1 EN0_RCNTHI + +#define CR_RRD E8390_RREAD +#define CR_RWR E8390_RWRITE +#define CR_PG0 E8390_PAGE0 +#define CR_STA E8390_START +#define CR_RDMA E8390_NODMA + +/* FIXME! YOU MUST SET YOUR OWN ETHER ADDRESS. */ +static byte stnic_eadr[6] = +{0x00, 0xc0, 0x6e, 0x00, 0x00, 0x07}; + +static struct net_device *stnic_dev; + +static void stnic_reset (struct net_device *dev); +static void stnic_get_hdr (struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page); +static void stnic_block_input (struct net_device *dev, int count, + struct sk_buff *skb , int ring_offset); +static void stnic_block_output (struct net_device *dev, int count, + const unsigned char *buf, int start_page); + +static void stnic_init (struct net_device *dev); + +/* SH7750 specific read/write io. */ +static inline void +STNIC_DELAY (void) +{ + vword trash; + trash = *(vword *) 0xa0000000; + trash = *(vword *) 0xa0000000; + trash = *(vword *) 0xa0000000; +} + +static inline byte +STNIC_READ (int reg) +{ + byte val; + + val = (*(vhalf *) (PA_83902 + ((reg) << 1)) >> 8) & 0xff; + STNIC_DELAY (); + return val; +} + +static inline void +STNIC_WRITE (int reg, byte val) +{ + *(vhalf *) (PA_83902 + ((reg) << 1)) = ((half) (val) << 8); + STNIC_DELAY (); +} + +static int __init stnic_probe(void) +{ + struct net_device *dev; + int i, err; + + /* If we are not running on a SolutionEngine, give up now */ + if (! MACH_SE) + return -ENODEV; + + /* New style probing API */ + dev = alloc_ei_netdev(); + if (!dev) + return -ENOMEM; + +#ifdef CONFIG_SH_STANDARD_BIOS + sh_bios_get_node_addr (stnic_eadr); +#endif + for (i = 0; i < ETHER_ADDR_LEN; i++) + dev->dev_addr[i] = stnic_eadr[i]; + + /* Set the base address to point to the NIC, not the "real" base! */ + dev->base_addr = 0x1000; + dev->irq = IRQ_STNIC; + dev->netdev_ops = &ei_netdev_ops; + + /* Snarf the interrupt now. There's no point in waiting since we cannot + share and the board will usually be enabled. */ + err = request_irq (dev->irq, ei_interrupt, 0, DRV_NAME, dev); + if (err) { + printk (KERN_EMERG " unable to get IRQ %d.\n", dev->irq); + free_netdev(dev); + return err; + } + + ei_status.name = dev->name; + ei_status.word16 = 1; +#ifdef __LITTLE_ENDIAN__ + ei_status.bigendian = 0; +#else + ei_status.bigendian = 1; +#endif + ei_status.tx_start_page = START_PG; + ei_status.rx_start_page = START_PG + TX_PAGES; + ei_status.stop_page = STOP_PG; + + ei_status.reset_8390 = &stnic_reset; + ei_status.get_8390_hdr = &stnic_get_hdr; + ei_status.block_input = &stnic_block_input; + ei_status.block_output = &stnic_block_output; + + stnic_init (dev); + + err = register_netdev(dev); + if (err) { + free_irq(dev->irq, dev); + free_netdev(dev); + return err; + } + stnic_dev = dev; + + printk (KERN_INFO "NS ST-NIC 83902A\n"); + + return 0; +} + +static void +stnic_reset (struct net_device *dev) +{ + *(vhalf *) PA_83902_RST = 0; + udelay (5); + if (ei_debug > 1) + printk (KERN_WARNING "8390 reset done (%ld).\n", jiffies); + *(vhalf *) PA_83902_RST = ~0; + udelay (5); +} + +static void +stnic_get_hdr (struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page) +{ + half buf[2]; + + STNIC_WRITE (PG0_RSAR0, 0); + STNIC_WRITE (PG0_RSAR1, ring_page); + STNIC_WRITE (PG0_RBCR0, 4); + STNIC_WRITE (PG0_RBCR1, 0); + STNIC_WRITE (STNIC_CR, CR_RRD | CR_PG0 | CR_STA); + + buf[0] = *(vhalf *) PA_83902_IF; + STNIC_DELAY (); + buf[1] = *(vhalf *) PA_83902_IF; + STNIC_DELAY (); + hdr->next = buf[0] >> 8; + hdr->status = buf[0] & 0xff; +#ifdef __LITTLE_ENDIAN__ + hdr->count = buf[1]; +#else + hdr->count = ((buf[1] >> 8) & 0xff) | (buf[1] << 8); +#endif + + if (ei_debug > 1) + printk (KERN_DEBUG "ring %x status %02x next %02x count %04x.\n", + ring_page, hdr->status, hdr->next, hdr->count); + + STNIC_WRITE (STNIC_CR, CR_RDMA | CR_PG0 | CR_STA); +} + +/* Block input and output, similar to the Crynwr packet driver. If you are + porting to a new ethercard look at the packet driver source for hints. + The HP LAN doesn't use shared memory -- we put the packet + out through the "remote DMA" dataport. */ + +static void +stnic_block_input (struct net_device *dev, int length, struct sk_buff *skb, + int offset) +{ + char *buf = skb->data; + half val; + + STNIC_WRITE (PG0_RSAR0, offset & 0xff); + STNIC_WRITE (PG0_RSAR1, offset >> 8); + STNIC_WRITE (PG0_RBCR0, length & 0xff); + STNIC_WRITE (PG0_RBCR1, length >> 8); + STNIC_WRITE (STNIC_CR, CR_RRD | CR_PG0 | CR_STA); + + if (length & 1) + length++; + + while (length > 0) + { + val = *(vhalf *) PA_83902_IF; +#ifdef __LITTLE_ENDIAN__ + *buf++ = val & 0xff; + *buf++ = val >> 8; +#else + *buf++ = val >> 8; + *buf++ = val & 0xff; +#endif + STNIC_DELAY (); + length -= sizeof (half); + } + + STNIC_WRITE (STNIC_CR, CR_RDMA | CR_PG0 | CR_STA); +} + +static void +stnic_block_output (struct net_device *dev, int length, + const unsigned char *buf, int output_page) +{ + STNIC_WRITE (PG0_RBCR0, 1); /* Write non-zero value */ + STNIC_WRITE (STNIC_CR, CR_RRD | CR_PG0 | CR_STA); + STNIC_DELAY (); + + STNIC_WRITE (PG0_RBCR0, length & 0xff); + STNIC_WRITE (PG0_RBCR1, length >> 8); + STNIC_WRITE (PG0_RSAR0, 0); + STNIC_WRITE (PG0_RSAR1, output_page); + STNIC_WRITE (STNIC_CR, CR_RWR | CR_PG0 | CR_STA); + + if (length & 1) + length++; + + while (length > 0) + { +#ifdef __LITTLE_ENDIAN__ + *(vhalf *) PA_83902_IF = ((half) buf[1] << 8) | buf[0]; +#else + *(vhalf *) PA_83902_IF = ((half) buf[0] << 8) | buf[1]; +#endif + STNIC_DELAY (); + buf += sizeof (half); + length -= sizeof (half); + } + + STNIC_WRITE (STNIC_CR, CR_RDMA | CR_PG0 | CR_STA); +} + +/* This function resets the STNIC if something screws up. */ +static void +stnic_init (struct net_device *dev) +{ + stnic_reset (dev); + NS8390_init (dev, 0); +} + +static void __exit stnic_cleanup(void) +{ + unregister_netdev(stnic_dev); + free_irq(stnic_dev->irq, stnic_dev); + free_netdev(stnic_dev); +} + +module_init(stnic_probe); +module_exit(stnic_cleanup); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/8390/wd.c b/drivers/net/ethernet/8390/wd.c new file mode 100644 index 0000000..8831a33 --- /dev/null +++ b/drivers/net/ethernet/8390/wd.c @@ -0,0 +1,567 @@ +/* wd.c: A WD80x3 ethernet driver for linux. */ +/* + Written 1993-94 by Donald Becker. + + Copyright 1993 United States Government as represented by the + Director, National Security Agency. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + This is a driver for WD8003 and WD8013 "compatible" ethercards. + + Thanks to Russ Nelson (nelson@crnwyr.com) for loaning me a WD8013. + + Changelog: + + Paul Gortmaker : multiple card support for module users, support + for non-standard memory sizes. + + +*/ + +static const char version[] = + "wd.c:v1.10 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "8390.h" + +#define DRV_NAME "wd" + +/* A zero-terminated list of I/O addresses to be probed. */ +static unsigned int wd_portlist[] __initdata = +{0x300, 0x280, 0x380, 0x240, 0}; + +static int wd_probe1(struct net_device *dev, int ioaddr); + +static int wd_open(struct net_device *dev); +static void wd_reset_8390(struct net_device *dev); +static void wd_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page); +static void wd_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); +static void wd_block_output(struct net_device *dev, int count, + const unsigned char *buf, int start_page); +static int wd_close(struct net_device *dev); + + +#define WD_START_PG 0x00 /* First page of TX buffer */ +#define WD03_STOP_PG 0x20 /* Last page +1 of RX ring */ +#define WD13_STOP_PG 0x40 /* Last page +1 of RX ring */ + +#define WD_CMDREG 0 /* Offset to ASIC command register. */ +#define WD_RESET 0x80 /* Board reset, in WD_CMDREG. */ +#define WD_MEMENB 0x40 /* Enable the shared memory. */ +#define WD_CMDREG5 5 /* Offset to 16-bit-only ASIC register 5. */ +#define ISA16 0x80 /* Enable 16 bit access from the ISA bus. */ +#define NIC16 0x40 /* Enable 16 bit access from the 8390. */ +#define WD_NIC_OFFSET 16 /* Offset to the 8390 from the base_addr. */ +#define WD_IO_EXTENT 32 + + +/* Probe for the WD8003 and WD8013. These cards have the station + address PROM at I/O ports +8 to +13, with a checksum + following. A Soundblaster can have the same checksum as an WDethercard, + so we have an extra exclusionary check for it. + + The wd_probe1() routine initializes the card and fills the + station address field. */ + +static int __init do_wd_probe(struct net_device *dev) +{ + int i; + struct resource *r; + int base_addr = dev->base_addr; + int irq = dev->irq; + int mem_start = dev->mem_start; + int mem_end = dev->mem_end; + + if (base_addr > 0x1ff) { /* Check a user specified location. */ + r = request_region(base_addr, WD_IO_EXTENT, "wd-probe"); + if ( r == NULL) + return -EBUSY; + i = wd_probe1(dev, base_addr); + if (i != 0) + release_region(base_addr, WD_IO_EXTENT); + else + r->name = dev->name; + return i; + } + else if (base_addr != 0) /* Don't probe at all. */ + return -ENXIO; + + for (i = 0; wd_portlist[i]; i++) { + int ioaddr = wd_portlist[i]; + r = request_region(ioaddr, WD_IO_EXTENT, "wd-probe"); + if (r == NULL) + continue; + if (wd_probe1(dev, ioaddr) == 0) { + r->name = dev->name; + return 0; + } + release_region(ioaddr, WD_IO_EXTENT); + dev->irq = irq; + dev->mem_start = mem_start; + dev->mem_end = mem_end; + } + + return -ENODEV; +} + +#ifndef MODULE +struct net_device * __init wd_probe(int unit) +{ + struct net_device *dev = alloc_ei_netdev(); + int err; + + if (!dev) + return ERR_PTR(-ENOMEM); + + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + + err = do_wd_probe(dev); + if (err) + goto out; + return dev; +out: + free_netdev(dev); + return ERR_PTR(err); +} +#endif + +static const struct net_device_ops wd_netdev_ops = { + .ndo_open = wd_open, + .ndo_stop = wd_close, + .ndo_start_xmit = ei_start_xmit, + .ndo_tx_timeout = ei_tx_timeout, + .ndo_get_stats = ei_get_stats, + .ndo_set_multicast_list = ei_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ei_poll, +#endif +}; + +static int __init wd_probe1(struct net_device *dev, int ioaddr) +{ + int i; + int err; + int checksum = 0; + int ancient = 0; /* An old card without config registers. */ + int word16 = 0; /* 0 = 8 bit, 1 = 16 bit */ + const char *model_name; + static unsigned version_printed; + + for (i = 0; i < 8; i++) + checksum += inb(ioaddr + 8 + i); + if (inb(ioaddr + 8) == 0xff /* Extra check to avoid soundcard. */ + || inb(ioaddr + 9) == 0xff + || (checksum & 0xff) != 0xFF) + return -ENODEV; + + /* Check for semi-valid mem_start/end values if supplied. */ + if ((dev->mem_start % 0x2000) || (dev->mem_end % 0x2000)) { + printk(KERN_WARNING "wd.c: user supplied mem_start or mem_end not on 8kB boundary - ignored.\n"); + dev->mem_start = 0; + dev->mem_end = 0; + } + + if (ei_debug && version_printed++ == 0) + printk(version); + + for (i = 0; i < 6; i++) + dev->dev_addr[i] = inb(ioaddr + 8 + i); + + printk("%s: WD80x3 at %#3x, %pM", + dev->name, ioaddr, dev->dev_addr); + + /* The following PureData probe code was contributed by + Mike Jagdis . Puredata does software + configuration differently from others so we have to check for them. + This detects an 8 bit, 16 bit or dumb (Toshiba, jumpered) card. + */ + if (inb(ioaddr+0) == 'P' && inb(ioaddr+1) == 'D') { + unsigned char reg5 = inb(ioaddr+5); + + switch (inb(ioaddr+2)) { + case 0x03: word16 = 0; model_name = "PDI8023-8"; break; + case 0x05: word16 = 0; model_name = "PDUC8023"; break; + case 0x0a: word16 = 1; model_name = "PDI8023-16"; break; + /* Either 0x01 (dumb) or they've released a new version. */ + default: word16 = 0; model_name = "PDI8023"; break; + } + dev->mem_start = ((reg5 & 0x1c) + 0xc0) << 12; + dev->irq = (reg5 & 0xe0) == 0xe0 ? 10 : (reg5 >> 5) + 1; + } else { /* End of PureData probe */ + /* This method of checking for a 16-bit board is borrowed from the + we.c driver. A simpler method is just to look in ASIC reg. 0x03. + I'm comparing the two method in alpha test to make certain they + return the same result. */ + /* Check for the old 8 bit board - it has register 0/8 aliasing. + Do NOT check i>=6 here -- it hangs the old 8003 boards! */ + for (i = 0; i < 6; i++) + if (inb(ioaddr+i) != inb(ioaddr+8+i)) + break; + if (i >= 6) { + ancient = 1; + model_name = "WD8003-old"; + word16 = 0; + } else { + int tmp = inb(ioaddr+1); /* fiddle with 16bit bit */ + outb( tmp ^ 0x01, ioaddr+1 ); /* attempt to clear 16bit bit */ + if (((inb( ioaddr+1) & 0x01) == 0x01) /* A 16 bit card */ + && (tmp & 0x01) == 0x01 ) { /* In a 16 slot. */ + int asic_reg5 = inb(ioaddr+WD_CMDREG5); + /* Magic to set ASIC to word-wide mode. */ + outb( NIC16 | (asic_reg5&0x1f), ioaddr+WD_CMDREG5); + outb(tmp, ioaddr+1); + model_name = "WD8013"; + word16 = 1; /* We have a 16bit board here! */ + } else { + model_name = "WD8003"; + word16 = 0; + } + outb(tmp, ioaddr+1); /* Restore original reg1 value. */ + } +#ifndef final_version + if ( !ancient && (inb(ioaddr+1) & 0x01) != (word16 & 0x01)) + printk("\nWD80?3: Bus width conflict, %d (probe) != %d (reg report).", + word16 ? 16 : 8, (inb(ioaddr+1) & 0x01) ? 16 : 8); +#endif + } + +#if defined(WD_SHMEM) && WD_SHMEM > 0x80000 + /* Allow a compile-time override. */ + dev->mem_start = WD_SHMEM; +#else + if (dev->mem_start == 0) { + /* Sanity and old 8003 check */ + int reg0 = inb(ioaddr); + if (reg0 == 0xff || reg0 == 0) { + /* Future plan: this could check a few likely locations first. */ + dev->mem_start = 0xd0000; + printk(" assigning address %#lx", dev->mem_start); + } else { + int high_addr_bits = inb(ioaddr+WD_CMDREG5) & 0x1f; + /* Some boards don't have the register 5 -- it returns 0xff. */ + if (high_addr_bits == 0x1f || word16 == 0) + high_addr_bits = 0x01; + dev->mem_start = ((reg0&0x3f) << 13) + (high_addr_bits << 19); + } + } +#endif + + /* The 8390 isn't at the base address -- the ASIC regs are there! */ + dev->base_addr = ioaddr+WD_NIC_OFFSET; + + if (dev->irq < 2) { + static const int irqmap[] = {9, 3, 5, 7, 10, 11, 15, 4}; + int reg1 = inb(ioaddr+1); + int reg4 = inb(ioaddr+4); + if (ancient || reg1 == 0xff) { /* Ack!! No way to read the IRQ! */ + short nic_addr = ioaddr+WD_NIC_OFFSET; + unsigned long irq_mask; + + /* We have an old-style ethercard that doesn't report its IRQ + line. Do autoirq to find the IRQ line. Note that this IS NOT + a reliable way to trigger an interrupt. */ + outb_p(E8390_NODMA + E8390_STOP, nic_addr); + outb(0x00, nic_addr+EN0_IMR); /* Disable all intrs. */ + + irq_mask = probe_irq_on(); + outb_p(0xff, nic_addr + EN0_IMR); /* Enable all interrupts. */ + outb_p(0x00, nic_addr + EN0_RCNTLO); + outb_p(0x00, nic_addr + EN0_RCNTHI); + outb(E8390_RREAD+E8390_START, nic_addr); /* Trigger it... */ + mdelay(20); + dev->irq = probe_irq_off(irq_mask); + + outb_p(0x00, nic_addr+EN0_IMR); /* Mask all intrs. again. */ + + if (ei_debug > 2) + printk(" autoirq is %d", dev->irq); + if (dev->irq < 2) + dev->irq = word16 ? 10 : 5; + } else + dev->irq = irqmap[((reg4 >> 5) & 0x03) + (reg1 & 0x04)]; + } else if (dev->irq == 2) /* Fixup bogosity: IRQ2 is really IRQ9 */ + dev->irq = 9; + + /* Snarf the interrupt now. There's no point in waiting since we cannot + share and the board will usually be enabled. */ + i = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev); + if (i) { + printk (" unable to get IRQ %d.\n", dev->irq); + return i; + } + + /* OK, were are certain this is going to work. Setup the device. */ + ei_status.name = model_name; + ei_status.word16 = word16; + ei_status.tx_start_page = WD_START_PG; + ei_status.rx_start_page = WD_START_PG + TX_PAGES; + + /* Don't map in the shared memory until the board is actually opened. */ + + /* Some cards (eg WD8003EBT) can be jumpered for more (32k!) memory. */ + if (dev->mem_end != 0) { + ei_status.stop_page = (dev->mem_end - dev->mem_start)/256; + ei_status.priv = dev->mem_end - dev->mem_start; + } else { + ei_status.stop_page = word16 ? WD13_STOP_PG : WD03_STOP_PG; + dev->mem_end = dev->mem_start + (ei_status.stop_page - WD_START_PG)*256; + ei_status.priv = (ei_status.stop_page - WD_START_PG)*256; + } + + ei_status.mem = ioremap(dev->mem_start, ei_status.priv); + if (!ei_status.mem) { + free_irq(dev->irq, dev); + return -ENOMEM; + } + + printk(" %s, IRQ %d, shared memory at %#lx-%#lx.\n", + model_name, dev->irq, dev->mem_start, dev->mem_end-1); + + ei_status.reset_8390 = wd_reset_8390; + ei_status.block_input = wd_block_input; + ei_status.block_output = wd_block_output; + ei_status.get_8390_hdr = wd_get_8390_hdr; + + dev->netdev_ops = &wd_netdev_ops; + NS8390_init(dev, 0); + +#if 1 + /* Enable interrupt generation on softconfig cards -- M.U */ + /* .. but possibly potentially unsafe - Donald */ + if (inb(ioaddr+14) & 0x20) + outb(inb(ioaddr+4)|0x80, ioaddr+4); +#endif + + err = register_netdev(dev); + if (err) { + free_irq(dev->irq, dev); + iounmap(ei_status.mem); + } + return err; +} + +static int +wd_open(struct net_device *dev) +{ + int ioaddr = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */ + + /* Map in the shared memory. Always set register 0 last to remain + compatible with very old boards. */ + ei_status.reg0 = ((dev->mem_start>>13) & 0x3f) | WD_MEMENB; + ei_status.reg5 = ((dev->mem_start>>19) & 0x1f) | NIC16; + + if (ei_status.word16) + outb(ei_status.reg5, ioaddr+WD_CMDREG5); + outb(ei_status.reg0, ioaddr); /* WD_CMDREG */ + + return ei_open(dev); +} + +static void +wd_reset_8390(struct net_device *dev) +{ + int wd_cmd_port = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */ + + outb(WD_RESET, wd_cmd_port); + if (ei_debug > 1) printk("resetting the WD80x3 t=%lu...", jiffies); + ei_status.txing = 0; + + /* Set up the ASIC registers, just in case something changed them. */ + outb((((dev->mem_start>>13) & 0x3f)|WD_MEMENB), wd_cmd_port); + if (ei_status.word16) + outb(NIC16 | ((dev->mem_start>>19) & 0x1f), wd_cmd_port+WD_CMDREG5); + + if (ei_debug > 1) printk("reset done\n"); +} + +/* Grab the 8390 specific header. Similar to the block_input routine, but + we don't need to be concerned with ring wrap as the header will be at + the start of a page, so we optimize accordingly. */ + +static void +wd_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) +{ + + int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */ + void __iomem *hdr_start = ei_status.mem + ((ring_page - WD_START_PG)<<8); + + /* We'll always get a 4 byte header read followed by a packet read, so + we enable 16 bit mode before the header, and disable after the body. */ + if (ei_status.word16) + outb(ISA16 | ei_status.reg5, wd_cmdreg+WD_CMDREG5); + +#ifdef __BIG_ENDIAN + /* Officially this is what we are doing, but the readl() is faster */ + /* unfortunately it isn't endian aware of the struct */ + memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); + hdr->count = le16_to_cpu(hdr->count); +#else + ((unsigned int*)hdr)[0] = readl(hdr_start); +#endif +} + +/* Block input and output are easy on shared memory ethercards, and trivial + on the Western digital card where there is no choice of how to do it. + The only complications are that the ring buffer wraps, and need to map + switch between 8- and 16-bit modes. */ + +static void +wd_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) +{ + int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */ + unsigned long offset = ring_offset - (WD_START_PG<<8); + void __iomem *xfer_start = ei_status.mem + offset; + + if (offset + count > ei_status.priv) { + /* We must wrap the input move. */ + int semi_count = ei_status.priv - offset; + memcpy_fromio(skb->data, xfer_start, semi_count); + count -= semi_count; + memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count); + } else { + /* Packet is in one chunk -- we can copy + cksum. */ + memcpy_fromio(skb->data, xfer_start, count); + } + + /* Turn off 16 bit access so that reboot works. ISA brain-damage */ + if (ei_status.word16) + outb(ei_status.reg5, wd_cmdreg+WD_CMDREG5); +} + +static void +wd_block_output(struct net_device *dev, int count, const unsigned char *buf, + int start_page) +{ + int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */ + void __iomem *shmem = ei_status.mem + ((start_page - WD_START_PG)<<8); + + + if (ei_status.word16) { + /* Turn on and off 16 bit access so that reboot works. */ + outb(ISA16 | ei_status.reg5, wd_cmdreg+WD_CMDREG5); + memcpy_toio(shmem, buf, count); + outb(ei_status.reg5, wd_cmdreg+WD_CMDREG5); + } else + memcpy_toio(shmem, buf, count); +} + + +static int +wd_close(struct net_device *dev) +{ + int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */ + + if (ei_debug > 1) + printk("%s: Shutting down ethercard.\n", dev->name); + ei_close(dev); + + /* Change from 16-bit to 8-bit shared memory so reboot works. */ + if (ei_status.word16) + outb(ei_status.reg5, wd_cmdreg + WD_CMDREG5 ); + + /* And disable the shared memory. */ + outb(ei_status.reg0 & ~WD_MEMENB, wd_cmdreg); + + return 0; +} + + +#ifdef MODULE +#define MAX_WD_CARDS 4 /* Max number of wd cards per module */ +static struct net_device *dev_wd[MAX_WD_CARDS]; +static int io[MAX_WD_CARDS]; +static int irq[MAX_WD_CARDS]; +static int mem[MAX_WD_CARDS]; +static int mem_end[MAX_WD_CARDS]; /* for non std. mem size */ + +module_param_array(io, int, NULL, 0); +module_param_array(irq, int, NULL, 0); +module_param_array(mem, int, NULL, 0); +module_param_array(mem_end, int, NULL, 0); +MODULE_PARM_DESC(io, "I/O base address(es)"); +MODULE_PARM_DESC(irq, "IRQ number(s) (ignored for PureData boards)"); +MODULE_PARM_DESC(mem, "memory base address(es)(ignored for PureData boards)"); +MODULE_PARM_DESC(mem_end, "memory end address(es)"); +MODULE_DESCRIPTION("ISA Western Digital wd8003/wd8013 ; SMC Elite, Elite16 ethernet driver"); +MODULE_LICENSE("GPL"); + +/* This is set up so that only a single autoprobe takes place per call. +ISA device autoprobes on a running machine are not recommended. */ + +int __init init_module(void) +{ + struct net_device *dev; + int this_dev, found = 0; + + for (this_dev = 0; this_dev < MAX_WD_CARDS; this_dev++) { + if (io[this_dev] == 0) { + if (this_dev != 0) break; /* only autoprobe 1st one */ + printk(KERN_NOTICE "wd.c: Presently autoprobing (not recommended) for a single card.\n"); + } + dev = alloc_ei_netdev(); + if (!dev) + break; + dev->irq = irq[this_dev]; + dev->base_addr = io[this_dev]; + dev->mem_start = mem[this_dev]; + dev->mem_end = mem_end[this_dev]; + if (do_wd_probe(dev) == 0) { + dev_wd[found++] = dev; + continue; + } + free_netdev(dev); + printk(KERN_WARNING "wd.c: No wd80x3 card found (i/o = 0x%x).\n", io[this_dev]); + break; + } + if (found) + return 0; + return -ENXIO; +} + +static void cleanup_card(struct net_device *dev) +{ + free_irq(dev->irq, dev); + release_region(dev->base_addr - WD_NIC_OFFSET, WD_IO_EXTENT); + iounmap(ei_status.mem); +} + +void __exit +cleanup_module(void) +{ + int this_dev; + + for (this_dev = 0; this_dev < MAX_WD_CARDS; this_dev++) { + struct net_device *dev = dev_wd[this_dev]; + if (dev) { + unregister_netdev(dev); + cleanup_card(dev); + free_netdev(dev); + } + } +} +#endif /* MODULE */ diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c new file mode 100644 index 0000000..15e7751a --- /dev/null +++ b/drivers/net/ethernet/8390/zorro8390.c @@ -0,0 +1,452 @@ +/* + * Amiga Linux/m68k and Linux/PPC Zorro NS8390 Ethernet Driver + * + * (C) Copyright 1998-2000 by some Elitist 680x0 Users(TM) + * + * --------------------------------------------------------------------------- + * + * This program is based on all the other NE2000 drivers for Linux + * + * --------------------------------------------------------------------------- + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of the Linux + * distribution for more details. + * + * --------------------------------------------------------------------------- + * + * The Ariadne II and X-Surf are Zorro-II boards containing Realtek RTL8019AS + * Ethernet Controllers. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define EI_SHIFT(x) (ei_local->reg_offset[x]) +#define ei_inb(port) in_8(port) +#define ei_outb(val, port) out_8(port, val) +#define ei_inb_p(port) in_8(port) +#define ei_outb_p(val, port) out_8(port, val) + +static const char version[] = + "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; + +#include "lib8390.c" + +#define DRV_NAME "zorro8390" + +#define NE_BASE (dev->base_addr) +#define NE_CMD (0x00 * 2) +#define NE_DATAPORT (0x10 * 2) /* NatSemi-defined port window offset */ +#define NE_RESET (0x1f * 2) /* Issue a read to reset, + * a write to clear. */ +#define NE_IO_EXTENT (0x20 * 2) + +#define NE_EN0_ISR (0x07 * 2) +#define NE_EN0_DCFG (0x0e * 2) + +#define NE_EN0_RSARLO (0x08 * 2) +#define NE_EN0_RSARHI (0x09 * 2) +#define NE_EN0_RCNTLO (0x0a * 2) +#define NE_EN0_RXCR (0x0c * 2) +#define NE_EN0_TXCR (0x0d * 2) +#define NE_EN0_RCNTHI (0x0b * 2) +#define NE_EN0_IMR (0x0f * 2) + +#define NESM_START_PG 0x40 /* First page of TX buffer */ +#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ + +#define WORDSWAP(a) ((((a) >> 8) & 0xff) | ((a) << 8)) + +static struct card_info { + zorro_id id; + const char *name; + unsigned int offset; +} cards[] __devinitdata = { + { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE2, "Ariadne II", 0x0600 }, + { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, "X-Surf", 0x8600 }, +}; + +/* Hard reset the card. This used to pause for the same period that a + * 8390 reset command required, but that shouldn't be necessary. + */ +static void zorro8390_reset_8390(struct net_device *dev) +{ + unsigned long reset_start_time = jiffies; + + if (ei_debug > 1) + netdev_dbg(dev, "resetting - t=%ld...\n", jiffies); + + z_writeb(z_readb(NE_BASE + NE_RESET), NE_BASE + NE_RESET); + + ei_status.txing = 0; + ei_status.dmaing = 0; + + /* This check _should_not_ be necessary, omit eventually. */ + while ((z_readb(NE_BASE + NE_EN0_ISR) & ENISR_RESET) == 0) + if (time_after(jiffies, reset_start_time + 2 * HZ / 100)) { + netdev_warn(dev, "%s: did not complete\n", __func__); + break; + } + z_writeb(ENISR_RESET, NE_BASE + NE_EN0_ISR); /* Ack intr */ +} + +/* Grab the 8390 specific header. Similar to the block_input routine, but + * we don't need to be concerned with ring wrap as the header will be at + * the start of a page, so we optimize accordingly. + */ +static void zorro8390_get_8390_hdr(struct net_device *dev, + struct e8390_pkt_hdr *hdr, int ring_page) +{ + int nic_base = dev->base_addr; + int cnt; + short *ptrs; + + /* This *shouldn't* happen. + * If it does, it's the last thing you'll see + */ + if (ei_status.dmaing) { + netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n", + __func__, ei_status.dmaing, ei_status.irqlock); + return; + } + + ei_status.dmaing |= 0x01; + z_writeb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD); + z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); + z_writeb(sizeof(struct e8390_pkt_hdr), nic_base + NE_EN0_RCNTLO); + z_writeb(0, nic_base + NE_EN0_RCNTHI); + z_writeb(0, nic_base + NE_EN0_RSARLO); /* On page boundary */ + z_writeb(ring_page, nic_base + NE_EN0_RSARHI); + z_writeb(E8390_RREAD+E8390_START, nic_base + NE_CMD); + + ptrs = (short *)hdr; + for (cnt = 0; cnt < sizeof(struct e8390_pkt_hdr) >> 1; cnt++) + *ptrs++ = z_readw(NE_BASE + NE_DATAPORT); + + z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr */ + + hdr->count = WORDSWAP(hdr->count); + + ei_status.dmaing &= ~0x01; +} + +/* Block input and output, similar to the Crynwr packet driver. + * If you are porting to a new ethercard, look at the packet driver source + * for hints. The NEx000 doesn't share the on-board packet memory -- + * you have to put the packet out through the "remote DMA" dataport + * using z_writeb. + */ +static void zorro8390_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset) +{ + int nic_base = dev->base_addr; + char *buf = skb->data; + short *ptrs; + int cnt; + + /* This *shouldn't* happen. + * If it does, it's the last thing you'll see + */ + if (ei_status.dmaing) { + netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n", + __func__, ei_status.dmaing, ei_status.irqlock); + return; + } + ei_status.dmaing |= 0x01; + z_writeb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD); + z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); + z_writeb(count & 0xff, nic_base + NE_EN0_RCNTLO); + z_writeb(count >> 8, nic_base + NE_EN0_RCNTHI); + z_writeb(ring_offset & 0xff, nic_base + NE_EN0_RSARLO); + z_writeb(ring_offset >> 8, nic_base + NE_EN0_RSARHI); + z_writeb(E8390_RREAD+E8390_START, nic_base + NE_CMD); + ptrs = (short *)buf; + for (cnt = 0; cnt < count >> 1; cnt++) + *ptrs++ = z_readw(NE_BASE + NE_DATAPORT); + if (count & 0x01) + buf[count - 1] = z_readb(NE_BASE + NE_DATAPORT); + + z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr */ + ei_status.dmaing &= ~0x01; +} + +static void zorro8390_block_output(struct net_device *dev, int count, + const unsigned char *buf, + const int start_page) +{ + int nic_base = NE_BASE; + unsigned long dma_start; + short *ptrs; + int cnt; + + /* Round the count up for word writes. Do we need to do this? + * What effect will an odd byte count have on the 8390? + * I should check someday. + */ + if (count & 0x01) + count++; + + /* This *shouldn't* happen. + * If it does, it's the last thing you'll see + */ + if (ei_status.dmaing) { + netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n", + __func__, ei_status.dmaing, ei_status.irqlock); + return; + } + ei_status.dmaing |= 0x01; + /* We should already be in page 0, but to be safe... */ + z_writeb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD); + + z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); + + /* Now the normal output. */ + z_writeb(count & 0xff, nic_base + NE_EN0_RCNTLO); + z_writeb(count >> 8, nic_base + NE_EN0_RCNTHI); + z_writeb(0x00, nic_base + NE_EN0_RSARLO); + z_writeb(start_page, nic_base + NE_EN0_RSARHI); + + z_writeb(E8390_RWRITE + E8390_START, nic_base + NE_CMD); + ptrs = (short *)buf; + for (cnt = 0; cnt < count >> 1; cnt++) + z_writew(*ptrs++, NE_BASE + NE_DATAPORT); + + dma_start = jiffies; + + while ((z_readb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0) + if (time_after(jiffies, dma_start + 2 * HZ / 100)) { + /* 20ms */ + netdev_err(dev, "timeout waiting for Tx RDC\n"); + zorro8390_reset_8390(dev); + __NS8390_init(dev, 1); + break; + } + + z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr */ + ei_status.dmaing &= ~0x01; +} + +static int zorro8390_open(struct net_device *dev) +{ + __ei_open(dev); + return 0; +} + +static int zorro8390_close(struct net_device *dev) +{ + if (ei_debug > 1) + netdev_dbg(dev, "Shutting down ethercard\n"); + __ei_close(dev); + return 0; +} + +static void __devexit zorro8390_remove_one(struct zorro_dev *z) +{ + struct net_device *dev = zorro_get_drvdata(z); + + unregister_netdev(dev); + free_irq(IRQ_AMIGA_PORTS, dev); + release_mem_region(ZTWO_PADDR(dev->base_addr), NE_IO_EXTENT * 2); + free_netdev(dev); +} + +static struct zorro_device_id zorro8390_zorro_tbl[] __devinitdata = { + { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE2, }, + { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, }, + { 0 } +}; +MODULE_DEVICE_TABLE(zorro, zorro8390_zorro_tbl); + +static const struct net_device_ops zorro8390_netdev_ops = { + .ndo_open = zorro8390_open, + .ndo_stop = zorro8390_close, + .ndo_start_xmit = __ei_start_xmit, + .ndo_tx_timeout = __ei_tx_timeout, + .ndo_get_stats = __ei_get_stats, + .ndo_set_multicast_list = __ei_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = __ei_poll, +#endif +}; + +static int __devinit zorro8390_init(struct net_device *dev, + unsigned long board, const char *name, + unsigned long ioaddr) +{ + int i; + int err; + unsigned char SA_prom[32]; + int start_page, stop_page; + static u32 zorro8390_offsets[16] = { + 0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, + 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, + }; + + /* Reset card. Who knows what dain-bramaged state it was left in. */ + { + unsigned long reset_start_time = jiffies; + + z_writeb(z_readb(ioaddr + NE_RESET), ioaddr + NE_RESET); + + while ((z_readb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0) + if (time_after(jiffies, + reset_start_time + 2 * HZ / 100)) { + netdev_warn(dev, "not found (no reset ack)\n"); + return -ENODEV; + } + + z_writeb(0xff, ioaddr + NE_EN0_ISR); /* Ack all intr. */ + } + + /* Read the 16 bytes of station address PROM. + * We must first initialize registers, + * similar to NS8390_init(eifdev, 0). + * We can't reliably read the SAPROM address without this. + * (I learned the hard way!). + */ + { + static const struct { + u32 value; + u32 offset; + } program_seq[] = { + {E8390_NODMA + E8390_PAGE0 + E8390_STOP, NE_CMD}, + /* Select page 0 */ + {0x48, NE_EN0_DCFG}, /* 0x48: Set byte-wide access */ + {0x00, NE_EN0_RCNTLO}, /* Clear the count regs */ + {0x00, NE_EN0_RCNTHI}, + {0x00, NE_EN0_IMR}, /* Mask completion irq */ + {0xFF, NE_EN0_ISR}, + {E8390_RXOFF, NE_EN0_RXCR}, /* 0x20 Set to monitor */ + {E8390_TXOFF, NE_EN0_TXCR}, /* 0x02 and loopback mode */ + {32, NE_EN0_RCNTLO}, + {0x00, NE_EN0_RCNTHI}, + {0x00, NE_EN0_RSARLO}, /* DMA starting at 0x0000 */ + {0x00, NE_EN0_RSARHI}, + {E8390_RREAD + E8390_START, NE_CMD}, + }; + for (i = 0; i < ARRAY_SIZE(program_seq); i++) + z_writeb(program_seq[i].value, + ioaddr + program_seq[i].offset); + } + for (i = 0; i < 16; i++) { + SA_prom[i] = z_readb(ioaddr + NE_DATAPORT); + (void)z_readb(ioaddr + NE_DATAPORT); + } + + /* We must set the 8390 for word mode. */ + z_writeb(0x49, ioaddr + NE_EN0_DCFG); + start_page = NESM_START_PG; + stop_page = NESM_STOP_PG; + + dev->base_addr = ioaddr; + dev->irq = IRQ_AMIGA_PORTS; + + /* Install the Interrupt handler */ + i = request_irq(IRQ_AMIGA_PORTS, __ei_interrupt, + IRQF_SHARED, DRV_NAME, dev); + if (i) + return i; + + for (i = 0; i < ETHER_ADDR_LEN; i++) + dev->dev_addr[i] = SA_prom[i]; + + pr_debug("Found ethernet address: %pM\n", dev->dev_addr); + + ei_status.name = name; + ei_status.tx_start_page = start_page; + ei_status.stop_page = stop_page; + ei_status.word16 = 1; + + ei_status.rx_start_page = start_page + TX_PAGES; + + ei_status.reset_8390 = zorro8390_reset_8390; + ei_status.block_input = zorro8390_block_input; + ei_status.block_output = zorro8390_block_output; + ei_status.get_8390_hdr = zorro8390_get_8390_hdr; + ei_status.reg_offset = zorro8390_offsets; + + dev->netdev_ops = &zorro8390_netdev_ops; + __NS8390_init(dev, 0); + err = register_netdev(dev); + if (err) { + free_irq(IRQ_AMIGA_PORTS, dev); + return err; + } + + netdev_info(dev, "%s at 0x%08lx, Ethernet Address %pM\n", + name, board, dev->dev_addr); + + return 0; +} + +static int __devinit zorro8390_init_one(struct zorro_dev *z, + const struct zorro_device_id *ent) +{ + struct net_device *dev; + unsigned long board, ioaddr; + int err, i; + + for (i = ARRAY_SIZE(cards) - 1; i >= 0; i--) + if (z->id == cards[i].id) + break; + if (i < 0) + return -ENODEV; + + board = z->resource.start; + ioaddr = board + cards[i].offset; + dev = ____alloc_ei_netdev(0); + if (!dev) + return -ENOMEM; + if (!request_mem_region(ioaddr, NE_IO_EXTENT * 2, DRV_NAME)) { + free_netdev(dev); + return -EBUSY; + } + err = zorro8390_init(dev, board, cards[i].name, ZTWO_VADDR(ioaddr)); + if (err) { + release_mem_region(ioaddr, NE_IO_EXTENT * 2); + free_netdev(dev); + return err; + } + zorro_set_drvdata(z, dev); + return 0; +} + +static struct zorro_driver zorro8390_driver = { + .name = "zorro8390", + .id_table = zorro8390_zorro_tbl, + .probe = zorro8390_init_one, + .remove = __devexit_p(zorro8390_remove_one), +}; + +static int __init zorro8390_init_module(void) +{ + return zorro_register_driver(&zorro8390_driver); +} + +static void __exit zorro8390_cleanup_module(void) +{ + zorro_unregister_driver(&zorro8390_driver); +} + +module_init(zorro8390_init_module); +module_exit(zorro8390_cleanup_module); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 5e62efd..56ed5ec 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -12,6 +12,7 @@ menuconfig ETHERNET if ETHERNET source "drivers/net/ethernet/3com/Kconfig" +source "drivers/net/ethernet/8390/Kconfig" source "drivers/net/ethernet/amd/Kconfig" endif # ETHERNET diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 1bc2ac2..fc82588 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -3,4 +3,5 @@ # obj-$(CONFIG_NET_VENDOR_3COM) += 3com/ +obj-$(CONFIG_NET_VENDOR_8390) += 8390/ obj-$(CONFIG_NET_VENDOR_AMD) += amd/ diff --git a/drivers/net/hp-plus.c b/drivers/net/hp-plus.c deleted file mode 100644 index 2991736..0000000 --- a/drivers/net/hp-plus.c +++ /dev/null @@ -1,506 +0,0 @@ -/* hp-plus.c: A HP PCLAN/plus ethernet driver for linux. */ -/* - Written 1994 by Donald Becker. - - This driver is for the Hewlett Packard PC LAN (27***) plus ethercards. - These cards are sold under several model numbers, usually 2724*. - - This software may be used and distributed according to the terms - of the GNU General Public License, incorporated herein by reference. - - The author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation - 410 Severn Ave., Suite 210 - Annapolis MD 21403 - - As is often the case, a great deal of credit is owed to Russ Nelson. - The Crynwr packet driver was my primary source of HP-specific - programming information. -*/ - -static const char version[] = -"hp-plus.c:v1.10 9/24/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; - -#include - -#include /* Important -- this inlines word moves. */ -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "8390.h" - -#define DRV_NAME "hp-plus" - -/* A zero-terminated list of I/O addresses to be probed. */ -static unsigned int hpplus_portlist[] __initdata = -{0x200, 0x240, 0x280, 0x2C0, 0x300, 0x320, 0x340, 0}; - -/* - The HP EtherTwist chip implementation is a fairly routine DP8390 - implementation. It allows both shared memory and programmed-I/O buffer - access, using a custom interface for both. The programmed-I/O mode is - entirely implemented in the HP EtherTwist chip, bypassing the problem - ridden built-in 8390 facilities used on NE2000 designs. The shared - memory mode is likewise special, with an offset register used to make - packets appear at the shared memory base. Both modes use a base and bounds - page register to hide the Rx ring buffer wrap -- a packet that spans the - end of physical buffer memory appears continuous to the driver. (c.f. the - 3c503 and Cabletron E2100) - - A special note: the internal buffer of the board is only 8 bits wide. - This lays several nasty traps for the unaware: - - the 8390 must be programmed for byte-wide operations - - all I/O and memory operations must work on whole words (the access - latches are serially preloaded and have no byte-swapping ability). - - This board is laid out in I/O space much like the earlier HP boards: - the first 16 locations are for the board registers, and the second 16 are - for the 8390. The board is easy to identify, with both a dedicated 16 bit - ID register and a constant 0x530* value in the upper bits of the paging - register. -*/ - -#define HP_ID 0x00 /* ID register, always 0x4850. */ -#define HP_PAGING 0x02 /* Registers visible @ 8-f, see PageName. */ -#define HPP_OPTION 0x04 /* Bitmapped options, see HP_Option. */ -#define HPP_OUT_ADDR 0x08 /* I/O output location in Perf_Page. */ -#define HPP_IN_ADDR 0x0A /* I/O input location in Perf_Page. */ -#define HP_DATAPORT 0x0c /* I/O data transfer in Perf_Page. */ -#define NIC_OFFSET 0x10 /* Offset to the 8390 registers. */ -#define HP_IO_EXTENT 32 - -#define HP_START_PG 0x00 /* First page of TX buffer */ -#define HP_STOP_PG 0x80 /* Last page +1 of RX ring */ - -/* The register set selected in HP_PAGING. */ -enum PageName { - Perf_Page = 0, /* Normal operation. */ - MAC_Page = 1, /* The ethernet address (+checksum). */ - HW_Page = 2, /* EEPROM-loaded hardware parameters. */ - LAN_Page = 4, /* Transceiver selection, testing, etc. */ - ID_Page = 6 }; - -/* The bit definitions for the HPP_OPTION register. */ -enum HP_Option { - NICReset = 1, ChipReset = 2, /* Active low, really UNreset. */ - EnableIRQ = 4, FakeIntr = 8, BootROMEnb = 0x10, IOEnb = 0x20, - MemEnable = 0x40, ZeroWait = 0x80, MemDisable = 0x1000, }; - -static int hpp_probe1(struct net_device *dev, int ioaddr); - -static void hpp_reset_8390(struct net_device *dev); -static int hpp_open(struct net_device *dev); -static int hpp_close(struct net_device *dev); -static void hpp_mem_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset); -static void hpp_mem_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page); -static void hpp_mem_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, - int ring_page); -static void hpp_io_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset); -static void hpp_io_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page); -static void hpp_io_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, - int ring_page); - - -/* Probe a list of addresses for an HP LAN+ adaptor. - This routine is almost boilerplate. */ - -static int __init do_hpp_probe(struct net_device *dev) -{ - int i; - int base_addr = dev->base_addr; - int irq = dev->irq; - - if (base_addr > 0x1ff) /* Check a single specified location. */ - return hpp_probe1(dev, base_addr); - else if (base_addr != 0) /* Don't probe at all. */ - return -ENXIO; - - for (i = 0; hpplus_portlist[i]; i++) { - if (hpp_probe1(dev, hpplus_portlist[i]) == 0) - return 0; - dev->irq = irq; - } - - return -ENODEV; -} - -#ifndef MODULE -struct net_device * __init hp_plus_probe(int unit) -{ - struct net_device *dev = alloc_eip_netdev(); - int err; - - if (!dev) - return ERR_PTR(-ENOMEM); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - - err = do_hpp_probe(dev); - if (err) - goto out; - return dev; -out: - free_netdev(dev); - return ERR_PTR(err); -} -#endif - -static const struct net_device_ops hpp_netdev_ops = { - .ndo_open = hpp_open, - .ndo_stop = hpp_close, - .ndo_start_xmit = eip_start_xmit, - .ndo_tx_timeout = eip_tx_timeout, - .ndo_get_stats = eip_get_stats, - .ndo_set_multicast_list = eip_set_multicast_list, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, - .ndo_change_mtu = eth_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = eip_poll, -#endif -}; - - -/* Do the interesting part of the probe at a single address. */ -static int __init hpp_probe1(struct net_device *dev, int ioaddr) -{ - int i, retval; - unsigned char checksum = 0; - const char name[] = "HP-PC-LAN+"; - int mem_start; - static unsigned version_printed; - - if (!request_region(ioaddr, HP_IO_EXTENT, DRV_NAME)) - return -EBUSY; - - /* Check for the HP+ signature, 50 48 0x 53. */ - if (inw(ioaddr + HP_ID) != 0x4850 || - (inw(ioaddr + HP_PAGING) & 0xfff0) != 0x5300) { - retval = -ENODEV; - goto out; - } - - if (ei_debug && version_printed++ == 0) - printk(version); - - printk("%s: %s at %#3x, ", dev->name, name, ioaddr); - - /* Retrieve and checksum the station address. */ - outw(MAC_Page, ioaddr + HP_PAGING); - - for(i = 0; i < ETHER_ADDR_LEN; i++) { - unsigned char inval = inb(ioaddr + 8 + i); - dev->dev_addr[i] = inval; - checksum += inval; - } - checksum += inb(ioaddr + 14); - - printk("%pM", dev->dev_addr); - - if (checksum != 0xff) { - printk(" bad checksum %2.2x.\n", checksum); - retval = -ENODEV; - goto out; - } else { - /* Point at the Software Configuration Flags. */ - outw(ID_Page, ioaddr + HP_PAGING); - printk(" ID %4.4x", inw(ioaddr + 12)); - } - - /* Read the IRQ line. */ - outw(HW_Page, ioaddr + HP_PAGING); - { - int irq = inb(ioaddr + 13) & 0x0f; - int option = inw(ioaddr + HPP_OPTION); - - dev->irq = irq; - if (option & MemEnable) { - mem_start = inw(ioaddr + 9) << 8; - printk(", IRQ %d, memory address %#x.\n", irq, mem_start); - } else { - mem_start = 0; - printk(", IRQ %d, programmed-I/O mode.\n", irq); - } - } - - /* Set the wrap registers for string I/O reads. */ - outw((HP_START_PG + TX_PAGES/2) | ((HP_STOP_PG - 1) << 8), ioaddr + 14); - - /* Set the base address to point to the NIC, not the "real" base! */ - dev->base_addr = ioaddr + NIC_OFFSET; - - dev->netdev_ops = &hpp_netdev_ops; - - ei_status.name = name; - ei_status.word16 = 0; /* Agggghhhhh! Debug time: 2 days! */ - ei_status.tx_start_page = HP_START_PG; - ei_status.rx_start_page = HP_START_PG + TX_PAGES/2; - ei_status.stop_page = HP_STOP_PG; - - ei_status.reset_8390 = &hpp_reset_8390; - ei_status.block_input = &hpp_io_block_input; - ei_status.block_output = &hpp_io_block_output; - ei_status.get_8390_hdr = &hpp_io_get_8390_hdr; - - /* Check if the memory_enable flag is set in the option register. */ - if (mem_start) { - ei_status.block_input = &hpp_mem_block_input; - ei_status.block_output = &hpp_mem_block_output; - ei_status.get_8390_hdr = &hpp_mem_get_8390_hdr; - dev->mem_start = mem_start; - ei_status.mem = ioremap(mem_start, - (HP_STOP_PG - HP_START_PG)*256); - if (!ei_status.mem) { - retval = -ENOMEM; - goto out; - } - ei_status.rmem_start = dev->mem_start + TX_PAGES/2*256; - dev->mem_end = ei_status.rmem_end - = dev->mem_start + (HP_STOP_PG - HP_START_PG)*256; - } - - outw(Perf_Page, ioaddr + HP_PAGING); - NS8390p_init(dev, 0); - /* Leave the 8390 and HP chip reset. */ - outw(inw(ioaddr + HPP_OPTION) & ~EnableIRQ, ioaddr + HPP_OPTION); - - retval = register_netdev(dev); - if (retval) - goto out1; - return 0; -out1: - iounmap(ei_status.mem); -out: - release_region(ioaddr, HP_IO_EXTENT); - return retval; -} - -static int -hpp_open(struct net_device *dev) -{ - int ioaddr = dev->base_addr - NIC_OFFSET; - int option_reg; - int retval; - - if ((retval = request_irq(dev->irq, eip_interrupt, 0, dev->name, dev))) { - return retval; - } - - /* Reset the 8390 and HP chip. */ - option_reg = inw(ioaddr + HPP_OPTION); - outw(option_reg & ~(NICReset + ChipReset), ioaddr + HPP_OPTION); - udelay(5); - /* Unreset the board and enable interrupts. */ - outw(option_reg | (EnableIRQ + NICReset + ChipReset), ioaddr + HPP_OPTION); - - /* Set the wrap registers for programmed-I/O operation. */ - outw(HW_Page, ioaddr + HP_PAGING); - outw((HP_START_PG + TX_PAGES/2) | ((HP_STOP_PG - 1) << 8), ioaddr + 14); - - /* Select the operational page. */ - outw(Perf_Page, ioaddr + HP_PAGING); - - return eip_open(dev); -} - -static int -hpp_close(struct net_device *dev) -{ - int ioaddr = dev->base_addr - NIC_OFFSET; - int option_reg = inw(ioaddr + HPP_OPTION); - - free_irq(dev->irq, dev); - eip_close(dev); - outw((option_reg & ~EnableIRQ) | MemDisable | NICReset | ChipReset, - ioaddr + HPP_OPTION); - - return 0; -} - -static void -hpp_reset_8390(struct net_device *dev) -{ - int ioaddr = dev->base_addr - NIC_OFFSET; - int option_reg = inw(ioaddr + HPP_OPTION); - - if (ei_debug > 1) printk("resetting the 8390 time=%ld...", jiffies); - - outw(option_reg & ~(NICReset + ChipReset), ioaddr + HPP_OPTION); - /* Pause a few cycles for the hardware reset to take place. */ - udelay(5); - ei_status.txing = 0; - outw(option_reg | (EnableIRQ + NICReset + ChipReset), ioaddr + HPP_OPTION); - - udelay(5); - - - if ((inb_p(ioaddr+NIC_OFFSET+EN0_ISR) & ENISR_RESET) == 0) - printk("%s: hp_reset_8390() did not complete.\n", dev->name); - - if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies); -} - -/* The programmed-I/O version of reading the 4 byte 8390 specific header. - Note that transfer with the EtherTwist+ must be on word boundaries. */ - -static void -hpp_io_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) -{ - int ioaddr = dev->base_addr - NIC_OFFSET; - - outw((ring_page<<8), ioaddr + HPP_IN_ADDR); - insw(ioaddr + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1); -} - -/* Block input and output, similar to the Crynwr packet driver. */ - -static void -hpp_io_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) -{ - int ioaddr = dev->base_addr - NIC_OFFSET; - char *buf = skb->data; - - outw(ring_offset, ioaddr + HPP_IN_ADDR); - insw(ioaddr + HP_DATAPORT, buf, count>>1); - if (count & 0x01) - buf[count-1] = inw(ioaddr + HP_DATAPORT); -} - -/* The corresponding shared memory versions of the above 2 functions. */ - -static void -hpp_mem_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) -{ - int ioaddr = dev->base_addr - NIC_OFFSET; - int option_reg = inw(ioaddr + HPP_OPTION); - - outw((ring_page<<8), ioaddr + HPP_IN_ADDR); - outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION); - memcpy_fromio(hdr, ei_status.mem, sizeof(struct e8390_pkt_hdr)); - outw(option_reg, ioaddr + HPP_OPTION); - hdr->count = (le16_to_cpu(hdr->count) + 3) & ~3; /* Round up allocation. */ -} - -static void -hpp_mem_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) -{ - int ioaddr = dev->base_addr - NIC_OFFSET; - int option_reg = inw(ioaddr + HPP_OPTION); - - outw(ring_offset, ioaddr + HPP_IN_ADDR); - - outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION); - - /* Caution: this relies on get_8390_hdr() rounding up count! - Also note that we *can't* use eth_io_copy_and_sum() because - it will not always copy "count" bytes (e.g. padded IP). */ - - memcpy_fromio(skb->data, ei_status.mem, count); - outw(option_reg, ioaddr + HPP_OPTION); -} - -/* A special note: we *must* always transfer >=16 bit words. - It's always safe to round up, so we do. */ -static void -hpp_io_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page) -{ - int ioaddr = dev->base_addr - NIC_OFFSET; - outw(start_page << 8, ioaddr + HPP_OUT_ADDR); - outsl(ioaddr + HP_DATAPORT, buf, (count+3)>>2); -} - -static void -hpp_mem_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page) -{ - int ioaddr = dev->base_addr - NIC_OFFSET; - int option_reg = inw(ioaddr + HPP_OPTION); - - outw(start_page << 8, ioaddr + HPP_OUT_ADDR); - outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION); - memcpy_toio(ei_status.mem, buf, (count + 3) & ~3); - outw(option_reg, ioaddr + HPP_OPTION); -} - - -#ifdef MODULE -#define MAX_HPP_CARDS 4 /* Max number of HPP cards per module */ -static struct net_device *dev_hpp[MAX_HPP_CARDS]; -static int io[MAX_HPP_CARDS]; -static int irq[MAX_HPP_CARDS]; - -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -MODULE_PARM_DESC(io, "I/O port address(es)"); -MODULE_PARM_DESC(irq, "IRQ number(s); ignored if properly detected"); -MODULE_DESCRIPTION("HP PC-LAN+ ISA ethernet driver"); -MODULE_LICENSE("GPL"); - -/* This is set up so that only a single autoprobe takes place per call. -ISA device autoprobes on a running machine are not recommended. */ -int __init -init_module(void) -{ - struct net_device *dev; - int this_dev, found = 0; - - for (this_dev = 0; this_dev < MAX_HPP_CARDS; this_dev++) { - if (io[this_dev] == 0) { - if (this_dev != 0) break; /* only autoprobe 1st one */ - printk(KERN_NOTICE "hp-plus.c: Presently autoprobing (not recommended) for a single card.\n"); - } - dev = alloc_eip_netdev(); - if (!dev) - break; - dev->irq = irq[this_dev]; - dev->base_addr = io[this_dev]; - if (do_hpp_probe(dev) == 0) { - dev_hpp[found++] = dev; - continue; - } - free_netdev(dev); - printk(KERN_WARNING "hp-plus.c: No HP-Plus card found (i/o = 0x%x).\n", io[this_dev]); - break; - } - if (found) - return 0; - return -ENXIO; -} - -static void cleanup_card(struct net_device *dev) -{ - /* NB: hpp_close() handles free_irq */ - iounmap(ei_status.mem); - release_region(dev->base_addr - NIC_OFFSET, HP_IO_EXTENT); -} - -void __exit -cleanup_module(void) -{ - int this_dev; - - for (this_dev = 0; this_dev < MAX_HPP_CARDS; this_dev++) { - struct net_device *dev = dev_hpp[this_dev]; - if (dev) { - unregister_netdev(dev); - cleanup_card(dev); - free_netdev(dev); - } - } -} -#endif /* MODULE */ diff --git a/drivers/net/hp.c b/drivers/net/hp.c deleted file mode 100644 index 18564d4..0000000 --- a/drivers/net/hp.c +++ /dev/null @@ -1,439 +0,0 @@ -/* hp.c: A HP LAN ethernet driver for linux. */ -/* - Written 1993-94 by Donald Becker. - - Copyright 1993 United States Government as represented by the - Director, National Security Agency. - - This software may be used and distributed according to the terms - of the GNU General Public License, incorporated herein by reference. - - The author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation - 410 Severn Ave., Suite 210 - Annapolis MD 21403 - - This is a driver for the HP PC-LAN adaptors. - - Sources: - The Crynwr packet driver. -*/ - -static const char version[] = - "hp.c:v1.10 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; - - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "8390.h" - -#define DRV_NAME "hp" - -/* A zero-terminated list of I/O addresses to be probed. */ -static unsigned int hppclan_portlist[] __initdata = -{ 0x300, 0x320, 0x340, 0x280, 0x2C0, 0x200, 0x240, 0}; - -#define HP_IO_EXTENT 32 - -#define HP_DATAPORT 0x0c /* "Remote DMA" data port. */ -#define HP_ID 0x07 -#define HP_CONFIGURE 0x08 /* Configuration register. */ -#define HP_RUN 0x01 /* 1 == Run, 0 == reset. */ -#define HP_IRQ 0x0E /* Mask for software-configured IRQ line. */ -#define HP_DATAON 0x10 /* Turn on dataport */ -#define NIC_OFFSET 0x10 /* Offset the 8390 registers. */ - -#define HP_START_PG 0x00 /* First page of TX buffer */ -#define HP_8BSTOP_PG 0x80 /* Last page +1 of RX ring */ -#define HP_16BSTOP_PG 0xFF /* Same, for 16 bit cards. */ - -static int hp_probe1(struct net_device *dev, int ioaddr); - -static void hp_reset_8390(struct net_device *dev); -static void hp_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, - int ring_page); -static void hp_block_input(struct net_device *dev, int count, - struct sk_buff *skb , int ring_offset); -static void hp_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page); - -static void hp_init_card(struct net_device *dev); - -/* The map from IRQ number to HP_CONFIGURE register setting. */ -/* My default is IRQ5 0 1 2 3 4 5 6 7 8 9 10 11 */ -static char irqmap[16] __initdata= { 0, 0, 4, 6, 8,10, 0,14, 0, 4, 2,12,0,0,0,0}; - - -/* Probe for an HP LAN adaptor. - Also initialize the card and fill in STATION_ADDR with the station - address. */ - -static int __init do_hp_probe(struct net_device *dev) -{ - int i; - int base_addr = dev->base_addr; - int irq = dev->irq; - - if (base_addr > 0x1ff) /* Check a single specified location. */ - return hp_probe1(dev, base_addr); - else if (base_addr != 0) /* Don't probe at all. */ - return -ENXIO; - - for (i = 0; hppclan_portlist[i]; i++) { - if (hp_probe1(dev, hppclan_portlist[i]) == 0) - return 0; - dev->irq = irq; - } - - return -ENODEV; -} - -#ifndef MODULE -struct net_device * __init hp_probe(int unit) -{ - struct net_device *dev = alloc_eip_netdev(); - int err; - - if (!dev) - return ERR_PTR(-ENOMEM); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - - err = do_hp_probe(dev); - if (err) - goto out; - return dev; -out: - free_netdev(dev); - return ERR_PTR(err); -} -#endif - -static int __init hp_probe1(struct net_device *dev, int ioaddr) -{ - int i, retval, board_id, wordmode; - const char *name; - static unsigned version_printed; - - if (!request_region(ioaddr, HP_IO_EXTENT, DRV_NAME)) - return -EBUSY; - - /* Check for the HP physical address, 08 00 09 xx xx xx. */ - /* This really isn't good enough: we may pick up HP LANCE boards - also! Avoid the lance 0x5757 signature. */ - if (inb(ioaddr) != 0x08 - || inb(ioaddr+1) != 0x00 - || inb(ioaddr+2) != 0x09 - || inb(ioaddr+14) == 0x57) { - retval = -ENODEV; - goto out; - } - - /* Set up the parameters based on the board ID. - If you have additional mappings, please mail them to me -djb. */ - if ((board_id = inb(ioaddr + HP_ID)) & 0x80) { - name = "HP27247"; - wordmode = 1; - } else { - name = "HP27250"; - wordmode = 0; - } - - if (ei_debug && version_printed++ == 0) - printk(version); - - printk("%s: %s (ID %02x) at %#3x,", dev->name, name, board_id, ioaddr); - - for(i = 0; i < ETHER_ADDR_LEN; i++) - dev->dev_addr[i] = inb(ioaddr + i); - - printk(" %pM", dev->dev_addr); - - /* Snarf the interrupt now. Someday this could be moved to open(). */ - if (dev->irq < 2) { - static const int irq_16list[] = { 11, 10, 5, 3, 4, 7, 9, 0}; - static const int irq_8list[] = { 7, 5, 3, 4, 9, 0}; - const int *irqp = wordmode ? irq_16list : irq_8list; - do { - int irq = *irqp; - if (request_irq (irq, NULL, 0, "bogus", NULL) != -EBUSY) { - unsigned long cookie = probe_irq_on(); - /* Twinkle the interrupt, and check if it's seen. */ - outb_p(irqmap[irq] | HP_RUN, ioaddr + HP_CONFIGURE); - outb_p( 0x00 | HP_RUN, ioaddr + HP_CONFIGURE); - if (irq == probe_irq_off(cookie) /* It's a good IRQ line! */ - && request_irq (irq, eip_interrupt, 0, DRV_NAME, dev) == 0) { - printk(" selecting IRQ %d.\n", irq); - dev->irq = *irqp; - break; - } - } - } while (*++irqp); - if (*irqp == 0) { - printk(" no free IRQ lines.\n"); - retval = -EBUSY; - goto out; - } - } else { - if (dev->irq == 2) - dev->irq = 9; - if ((retval = request_irq(dev->irq, eip_interrupt, 0, DRV_NAME, dev))) { - printk (" unable to get IRQ %d.\n", dev->irq); - goto out; - } - } - - /* Set the base address to point to the NIC, not the "real" base! */ - dev->base_addr = ioaddr + NIC_OFFSET; - dev->netdev_ops = &eip_netdev_ops; - - ei_status.name = name; - ei_status.word16 = wordmode; - ei_status.tx_start_page = HP_START_PG; - ei_status.rx_start_page = HP_START_PG + TX_PAGES; - ei_status.stop_page = wordmode ? HP_16BSTOP_PG : HP_8BSTOP_PG; - - ei_status.reset_8390 = hp_reset_8390; - ei_status.get_8390_hdr = hp_get_8390_hdr; - ei_status.block_input = hp_block_input; - ei_status.block_output = hp_block_output; - hp_init_card(dev); - - retval = register_netdev(dev); - if (retval) - goto out1; - return 0; -out1: - free_irq(dev->irq, dev); -out: - release_region(ioaddr, HP_IO_EXTENT); - return retval; -} - -static void -hp_reset_8390(struct net_device *dev) -{ - int hp_base = dev->base_addr - NIC_OFFSET; - int saved_config = inb_p(hp_base + HP_CONFIGURE); - - if (ei_debug > 1) printk("resetting the 8390 time=%ld...", jiffies); - outb_p(0x00, hp_base + HP_CONFIGURE); - ei_status.txing = 0; - /* Pause just a few cycles for the hardware reset to take place. */ - udelay(5); - - outb_p(saved_config, hp_base + HP_CONFIGURE); - udelay(5); - - if ((inb_p(hp_base+NIC_OFFSET+EN0_ISR) & ENISR_RESET) == 0) - printk("%s: hp_reset_8390() did not complete.\n", dev->name); - - if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies); -} - -static void -hp_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) -{ - int nic_base = dev->base_addr; - int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE); - - outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE); - outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base); - outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO); - outb_p(0, nic_base + EN0_RCNTHI); - outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */ - outb_p(ring_page, nic_base + EN0_RSARHI); - outb_p(E8390_RREAD+E8390_START, nic_base); - - if (ei_status.word16) - insw(nic_base - NIC_OFFSET + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1); - else - insb(nic_base - NIC_OFFSET + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)); - - outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE); -} - -/* Block input and output, similar to the Crynwr packet driver. If you are - porting to a new ethercard look at the packet driver source for hints. - The HP LAN doesn't use shared memory -- we put the packet - out through the "remote DMA" dataport. */ - -static void -hp_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) -{ - int nic_base = dev->base_addr; - int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE); - int xfer_count = count; - char *buf = skb->data; - - outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE); - outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base); - outb_p(count & 0xff, nic_base + EN0_RCNTLO); - outb_p(count >> 8, nic_base + EN0_RCNTHI); - outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO); - outb_p(ring_offset >> 8, nic_base + EN0_RSARHI); - outb_p(E8390_RREAD+E8390_START, nic_base); - if (ei_status.word16) { - insw(nic_base - NIC_OFFSET + HP_DATAPORT,buf,count>>1); - if (count & 0x01) - buf[count-1] = inb(nic_base - NIC_OFFSET + HP_DATAPORT), xfer_count++; - } else { - insb(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count); - } - /* This is for the ALPHA version only, remove for later releases. */ - if (ei_debug > 0) { /* DMA termination address check... */ - int high = inb_p(nic_base + EN0_RSARHI); - int low = inb_p(nic_base + EN0_RSARLO); - int addr = (high << 8) + low; - /* Check only the lower 8 bits so we can ignore ring wrap. */ - if (((ring_offset + xfer_count) & 0xff) != (addr & 0xff)) - printk("%s: RX transfer address mismatch, %#4.4x vs. %#4.4x (actual).\n", - dev->name, ring_offset + xfer_count, addr); - } - outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE); -} - -static void -hp_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page) -{ - int nic_base = dev->base_addr; - int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE); - - outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE); - /* Round the count up for word writes. Do we need to do this? - What effect will an odd byte count have on the 8390? - I should check someday. */ - if (ei_status.word16 && (count & 0x01)) - count++; - /* We should already be in page 0, but to be safe... */ - outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base); - -#ifdef NE8390_RW_BUGFIX - /* Handle the read-before-write bug the same way as the - Crynwr packet driver -- the NatSemi method doesn't work. */ - outb_p(0x42, nic_base + EN0_RCNTLO); - outb_p(0, nic_base + EN0_RCNTHI); - outb_p(0xff, nic_base + EN0_RSARLO); - outb_p(0x00, nic_base + EN0_RSARHI); -#define NE_CMD 0x00 - outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD); - /* Make certain that the dummy read has occurred. */ - inb_p(0x61); - inb_p(0x61); -#endif - - outb_p(count & 0xff, nic_base + EN0_RCNTLO); - outb_p(count >> 8, nic_base + EN0_RCNTHI); - outb_p(0x00, nic_base + EN0_RSARLO); - outb_p(start_page, nic_base + EN0_RSARHI); - - outb_p(E8390_RWRITE+E8390_START, nic_base); - if (ei_status.word16) { - /* Use the 'rep' sequence for 16 bit boards. */ - outsw(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count>>1); - } else { - outsb(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count); - } - - /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here -- it's broken! */ - - /* This is for the ALPHA version only, remove for later releases. */ - if (ei_debug > 0) { /* DMA termination address check... */ - int high = inb_p(nic_base + EN0_RSARHI); - int low = inb_p(nic_base + EN0_RSARLO); - int addr = (high << 8) + low; - if ((start_page << 8) + count != addr) - printk("%s: TX Transfer address mismatch, %#4.4x vs. %#4.4x.\n", - dev->name, (start_page << 8) + count, addr); - } - outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE); -} - -/* This function resets the ethercard if something screws up. */ -static void __init -hp_init_card(struct net_device *dev) -{ - int irq = dev->irq; - NS8390p_init(dev, 0); - outb_p(irqmap[irq&0x0f] | HP_RUN, - dev->base_addr - NIC_OFFSET + HP_CONFIGURE); -} - -#ifdef MODULE -#define MAX_HP_CARDS 4 /* Max number of HP cards per module */ -static struct net_device *dev_hp[MAX_HP_CARDS]; -static int io[MAX_HP_CARDS]; -static int irq[MAX_HP_CARDS]; - -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -MODULE_PARM_DESC(io, "I/O base address(es)"); -MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)"); -MODULE_DESCRIPTION("HP PC-LAN ISA ethernet driver"); -MODULE_LICENSE("GPL"); - -/* This is set up so that only a single autoprobe takes place per call. -ISA device autoprobes on a running machine are not recommended. */ -int __init -init_module(void) -{ - struct net_device *dev; - int this_dev, found = 0; - - for (this_dev = 0; this_dev < MAX_HP_CARDS; this_dev++) { - if (io[this_dev] == 0) { - if (this_dev != 0) break; /* only autoprobe 1st one */ - printk(KERN_NOTICE "hp.c: Presently autoprobing (not recommended) for a single card.\n"); - } - dev = alloc_eip_netdev(); - if (!dev) - break; - dev->irq = irq[this_dev]; - dev->base_addr = io[this_dev]; - if (do_hp_probe(dev) == 0) { - dev_hp[found++] = dev; - continue; - } - free_netdev(dev); - printk(KERN_WARNING "hp.c: No HP card found (i/o = 0x%x).\n", io[this_dev]); - break; - } - if (found) - return 0; - return -ENXIO; -} - -static void cleanup_card(struct net_device *dev) -{ - free_irq(dev->irq, dev); - release_region(dev->base_addr - NIC_OFFSET, HP_IO_EXTENT); -} - -void __exit -cleanup_module(void) -{ - int this_dev; - - for (this_dev = 0; this_dev < MAX_HP_CARDS; this_dev++) { - struct net_device *dev = dev_hp[this_dev]; - if (dev) { - unregister_netdev(dev); - cleanup_card(dev); - free_netdev(dev); - } - } -} -#endif /* MODULE */ diff --git a/drivers/net/hydra.c b/drivers/net/hydra.c deleted file mode 100644 index 1cd481c..0000000 --- a/drivers/net/hydra.c +++ /dev/null @@ -1,273 +0,0 @@ -/* New Hydra driver using generic 8390 core */ -/* Based on old hydra driver by Topi Kanerva (topi@susanna.oulu.fi) */ - -/* This file is subject to the terms and conditions of the GNU General */ -/* Public License. See the file COPYING in the main directory of the */ -/* Linux distribution for more details. */ - -/* Peter De Schrijver (p2@mind.be) */ -/* Oldenburg 2000 */ - -/* The Amiganet is a Zorro-II board made by Hydra Systems. It contains a */ -/* NS8390 NIC (network interface controller) clone, 16 or 64K on-board RAM */ -/* and 10BASE-2 (thin coax) and AUI connectors. */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#define EI_SHIFT(x) (ei_local->reg_offset[x]) -#define ei_inb(port) in_8(port) -#define ei_outb(val,port) out_8(port,val) -#define ei_inb_p(port) in_8(port) -#define ei_outb_p(val,port) out_8(port,val) - -static const char version[] = - "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; - -#include "lib8390.c" - -#define NE_EN0_DCFG (0x0e*2) - -#define NESM_START_PG 0x0 /* First page of TX buffer */ -#define NESM_STOP_PG 0x40 /* Last page +1 of RX ring */ - -#define HYDRA_NIC_BASE 0xffe1 -#define HYDRA_ADDRPROM 0xffc0 -#define HYDRA_VERSION "v3.0alpha" - -#define WORDSWAP(a) ((((a)>>8)&0xff) | ((a)<<8)) - - -static int __devinit hydra_init_one(struct zorro_dev *z, - const struct zorro_device_id *ent); -static int __devinit hydra_init(struct zorro_dev *z); -static int hydra_open(struct net_device *dev); -static int hydra_close(struct net_device *dev); -static void hydra_reset_8390(struct net_device *dev); -static void hydra_get_8390_hdr(struct net_device *dev, - struct e8390_pkt_hdr *hdr, int ring_page); -static void hydra_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset); -static void hydra_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page); -static void __devexit hydra_remove_one(struct zorro_dev *z); - -static struct zorro_device_id hydra_zorro_tbl[] __devinitdata = { - { ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET }, - { 0 } -}; -MODULE_DEVICE_TABLE(zorro, hydra_zorro_tbl); - -static struct zorro_driver hydra_driver = { - .name = "hydra", - .id_table = hydra_zorro_tbl, - .probe = hydra_init_one, - .remove = __devexit_p(hydra_remove_one), -}; - -static int __devinit hydra_init_one(struct zorro_dev *z, - const struct zorro_device_id *ent) -{ - int err; - - if (!request_mem_region(z->resource.start, 0x10000, "Hydra")) - return -EBUSY; - if ((err = hydra_init(z))) { - release_mem_region(z->resource.start, 0x10000); - return -EBUSY; - } - return 0; -} - -static const struct net_device_ops hydra_netdev_ops = { - .ndo_open = hydra_open, - .ndo_stop = hydra_close, - - .ndo_start_xmit = __ei_start_xmit, - .ndo_tx_timeout = __ei_tx_timeout, - .ndo_get_stats = __ei_get_stats, - .ndo_set_multicast_list = __ei_set_multicast_list, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, - .ndo_change_mtu = eth_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = __ei_poll, -#endif -}; - -static int __devinit hydra_init(struct zorro_dev *z) -{ - struct net_device *dev; - unsigned long board = ZTWO_VADDR(z->resource.start); - unsigned long ioaddr = board+HYDRA_NIC_BASE; - const char name[] = "NE2000"; - int start_page, stop_page; - int j; - int err; - - static u32 hydra_offsets[16] = { - 0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, - 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, - }; - - dev = ____alloc_ei_netdev(0); - if (!dev) - return -ENOMEM; - - for(j = 0; j < ETHER_ADDR_LEN; j++) - dev->dev_addr[j] = *((u8 *)(board + HYDRA_ADDRPROM + 2*j)); - - /* We must set the 8390 for word mode. */ - z_writeb(0x4b, ioaddr + NE_EN0_DCFG); - start_page = NESM_START_PG; - stop_page = NESM_STOP_PG; - - dev->base_addr = ioaddr; - dev->irq = IRQ_AMIGA_PORTS; - - /* Install the Interrupt handler */ - if (request_irq(IRQ_AMIGA_PORTS, __ei_interrupt, IRQF_SHARED, "Hydra Ethernet", - dev)) { - free_netdev(dev); - return -EAGAIN; - } - - ei_status.name = name; - ei_status.tx_start_page = start_page; - ei_status.stop_page = stop_page; - ei_status.word16 = 1; - ei_status.bigendian = 1; - - ei_status.rx_start_page = start_page + TX_PAGES; - - ei_status.reset_8390 = hydra_reset_8390; - ei_status.block_input = hydra_block_input; - ei_status.block_output = hydra_block_output; - ei_status.get_8390_hdr = hydra_get_8390_hdr; - ei_status.reg_offset = hydra_offsets; - - dev->netdev_ops = &hydra_netdev_ops; - __NS8390_init(dev, 0); - - err = register_netdev(dev); - if (err) { - free_irq(IRQ_AMIGA_PORTS, dev); - free_netdev(dev); - return err; - } - - zorro_set_drvdata(z, dev); - - pr_info("%s: Hydra at %pR, address %pM (hydra.c " HYDRA_VERSION ")\n", - dev->name, &z->resource, dev->dev_addr); - - return 0; -} - -static int hydra_open(struct net_device *dev) -{ - __ei_open(dev); - return 0; -} - -static int hydra_close(struct net_device *dev) -{ - if (ei_debug > 1) - printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name); - __ei_close(dev); - return 0; -} - -static void hydra_reset_8390(struct net_device *dev) -{ - printk(KERN_INFO "Hydra hw reset not there\n"); -} - -static void hydra_get_8390_hdr(struct net_device *dev, - struct e8390_pkt_hdr *hdr, int ring_page) -{ - int nic_base = dev->base_addr; - short *ptrs; - unsigned long hdr_start= (nic_base-HYDRA_NIC_BASE) + - ((ring_page - NESM_START_PG)<<8); - ptrs = (short *)hdr; - - *(ptrs++) = z_readw(hdr_start); - *((short *)hdr) = WORDSWAP(*((short *)hdr)); - hdr_start += 2; - *(ptrs++) = z_readw(hdr_start); - *((short *)hdr+1) = WORDSWAP(*((short *)hdr+1)); -} - -static void hydra_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset) -{ - unsigned long nic_base = dev->base_addr; - unsigned long mem_base = nic_base - HYDRA_NIC_BASE; - unsigned long xfer_start = mem_base + ring_offset - (NESM_START_PG<<8); - - if (count&1) - count++; - - if (xfer_start+count > mem_base + (NESM_STOP_PG<<8)) { - int semi_count = (mem_base + (NESM_STOP_PG<<8)) - xfer_start; - - z_memcpy_fromio(skb->data,xfer_start,semi_count); - count -= semi_count; - z_memcpy_fromio(skb->data+semi_count, mem_base, count); - } else - z_memcpy_fromio(skb->data, xfer_start,count); - -} - -static void hydra_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page) -{ - unsigned long nic_base = dev->base_addr; - unsigned long mem_base = nic_base - HYDRA_NIC_BASE; - - if (count&1) - count++; - - z_memcpy_toio(mem_base+((start_page - NESM_START_PG)<<8), buf, count); -} - -static void __devexit hydra_remove_one(struct zorro_dev *z) -{ - struct net_device *dev = zorro_get_drvdata(z); - - unregister_netdev(dev); - free_irq(IRQ_AMIGA_PORTS, dev); - release_mem_region(ZTWO_PADDR(dev->base_addr)-HYDRA_NIC_BASE, 0x10000); - free_netdev(dev); -} - -static int __init hydra_init_module(void) -{ - return zorro_register_driver(&hydra_driver); -} - -static void __exit hydra_cleanup_module(void) -{ - zorro_unregister_driver(&hydra_driver); -} - -module_init(hydra_init_module); -module_exit(hydra_cleanup_module); - -MODULE_LICENSE("GPL"); diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c deleted file mode 100644 index 05ae214..0000000 --- a/drivers/net/lib8390.c +++ /dev/null @@ -1,1080 +0,0 @@ -/* 8390.c: A general NS8390 ethernet driver core for linux. */ -/* - Written 1992-94 by Donald Becker. - - Copyright 1993 United States Government as represented by the - Director, National Security Agency. - - This software may be used and distributed according to the terms - of the GNU General Public License, incorporated herein by reference. - - The author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation - 410 Severn Ave., Suite 210 - Annapolis MD 21403 - - - This is the chip-specific code for many 8390-based ethernet adaptors. - This is not a complete driver, it must be combined with board-specific - code such as ne.c, wd.c, 3c503.c, etc. - - Seeing how at least eight drivers use this code, (not counting the - PCMCIA ones either) it is easy to break some card by what seems like - a simple innocent change. Please contact me or Donald if you think - you have found something that needs changing. -- PG - - - Changelog: - - Paul Gortmaker : remove set_bit lock, other cleanups. - Paul Gortmaker : add ei_get_8390_hdr() so we can pass skb's to - ei_block_input() for eth_io_copy_and_sum(). - Paul Gortmaker : exchange static int ei_pingpong for a #define, - also add better Tx error handling. - Paul Gortmaker : rewrite Rx overrun handling as per NS specs. - Alexey Kuznetsov : use the 8390's six bit hash multicast filter. - Paul Gortmaker : tweak ANK's above multicast changes a bit. - Paul Gortmaker : update packet statistics for v2.1.x - Alan Cox : support arbitrary stupid port mappings on the - 68K Macintosh. Support >16bit I/O spaces - Paul Gortmaker : add kmod support for auto-loading of the 8390 - module by all drivers that require it. - Alan Cox : Spinlocking work, added 'BUG_83C690' - Paul Gortmaker : Separate out Tx timeout code from Tx path. - Paul Gortmaker : Remove old unused single Tx buffer code. - Hayato Fujiwara : Add m32r support. - Paul Gortmaker : use skb_padto() instead of stack scratch area - - Sources: - The National Semiconductor LAN Databook, and the 3Com 3c503 databook. - - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#define NS8390_CORE -#include "8390.h" - -#define BUG_83C690 - -/* These are the operational function interfaces to board-specific - routines. - void reset_8390(struct net_device *dev) - Resets the board associated with DEV, including a hardware reset of - the 8390. This is only called when there is a transmit timeout, and - it is always followed by 8390_init(). - void block_output(struct net_device *dev, int count, const unsigned char *buf, - int start_page) - Write the COUNT bytes of BUF to the packet buffer at START_PAGE. The - "page" value uses the 8390's 256-byte pages. - void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page) - Read the 4 byte, page aligned 8390 header. *If* there is a - subsequent read, it will be of the rest of the packet. - void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) - Read COUNT bytes from the packet buffer into the skb data area. Start - reading from RING_OFFSET, the address as the 8390 sees it. This will always - follow the read of the 8390 header. -*/ -#define ei_reset_8390 (ei_local->reset_8390) -#define ei_block_output (ei_local->block_output) -#define ei_block_input (ei_local->block_input) -#define ei_get_8390_hdr (ei_local->get_8390_hdr) - -/* use 0 for production, 1 for verification, >2 for debug */ -#ifndef ei_debug -int ei_debug = 1; -#endif - -/* Index to functions. */ -static void ei_tx_intr(struct net_device *dev); -static void ei_tx_err(struct net_device *dev); -static void ei_receive(struct net_device *dev); -static void ei_rx_overrun(struct net_device *dev); - -/* Routines generic to NS8390-based boards. */ -static void NS8390_trigger_send(struct net_device *dev, unsigned int length, - int start_page); -static void do_set_multicast_list(struct net_device *dev); -static void __NS8390_init(struct net_device *dev, int startp); - -/* - * SMP and the 8390 setup. - * - * The 8390 isn't exactly designed to be multithreaded on RX/TX. There is - * a page register that controls bank and packet buffer access. We guard - * this with ei_local->page_lock. Nobody should assume or set the page other - * than zero when the lock is not held. Lock holders must restore page 0 - * before unlocking. Even pure readers must take the lock to protect in - * page 0. - * - * To make life difficult the chip can also be very slow. We therefore can't - * just use spinlocks. For the longer lockups we disable the irq the device - * sits on and hold the lock. We must hold the lock because there is a dual - * processor case other than interrupts (get stats/set multicast list in - * parallel with each other and transmit). - * - * Note: in theory we can just disable the irq on the card _but_ there is - * a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs" - * enter lock, take the queued irq. So we waddle instead of flying. - * - * Finally by special arrangement for the purpose of being generally - * annoying the transmit function is called bh atomic. That places - * restrictions on the user context callers as disable_irq won't save - * them. - * - * Additional explanation of problems with locking by Alan Cox: - * - * "The author (me) didn't use spin_lock_irqsave because the slowness of the - * card means that approach caused horrible problems like losing serial data - * at 38400 baud on some chips. Remember many 8390 nics on PCI were ISA - * chips with FPGA front ends. - * - * Ok the logic behind the 8390 is very simple: - * - * Things to know - * - IRQ delivery is asynchronous to the PCI bus - * - Blocking the local CPU IRQ via spin locks was too slow - * - The chip has register windows needing locking work - * - * So the path was once (I say once as people appear to have changed it - * in the mean time and it now looks rather bogus if the changes to use - * disable_irq_nosync_irqsave are disabling the local IRQ) - * - * - * Take the page lock - * Mask the IRQ on chip - * Disable the IRQ (but not mask locally- someone seems to have - * broken this with the lock validator stuff) - * [This must be _nosync as the page lock may otherwise - * deadlock us] - * Drop the page lock and turn IRQs back on - * - * At this point an existing IRQ may still be running but we can't - * get a new one - * - * Take the lock (so we know the IRQ has terminated) but don't mask - * the IRQs on the processor - * Set irqlock [for debug] - * - * Transmit (slow as ****) - * - * re-enable the IRQ - * - * - * We have to use disable_irq because otherwise you will get delayed - * interrupts on the APIC bus deadlocking the transmit path. - * - * Quite hairy but the chip simply wasn't designed for SMP and you can't - * even ACK an interrupt without risking corrupting other parallel - * activities on the chip." [lkml, 25 Jul 2007] - */ - - - -/** - * ei_open - Open/initialize the board. - * @dev: network device to initialize - * - * This routine goes all-out, setting everything - * up anew at each open, even though many of these registers should only - * need to be set once at boot. - */ -static int __ei_open(struct net_device *dev) -{ - unsigned long flags; - struct ei_device *ei_local = netdev_priv(dev); - - if (dev->watchdog_timeo <= 0) - dev->watchdog_timeo = TX_TIMEOUT; - - /* - * Grab the page lock so we own the register set, then call - * the init function. - */ - - spin_lock_irqsave(&ei_local->page_lock, flags); - __NS8390_init(dev, 1); - /* Set the flag before we drop the lock, That way the IRQ arrives - after its set and we get no silly warnings */ - netif_start_queue(dev); - spin_unlock_irqrestore(&ei_local->page_lock, flags); - ei_local->irqlock = 0; - return 0; -} - -/** - * ei_close - shut down network device - * @dev: network device to close - * - * Opposite of ei_open(). Only used when "ifconfig down" is done. - */ -static int __ei_close(struct net_device *dev) -{ - struct ei_device *ei_local = netdev_priv(dev); - unsigned long flags; - - /* - * Hold the page lock during close - */ - - spin_lock_irqsave(&ei_local->page_lock, flags); - __NS8390_init(dev, 0); - spin_unlock_irqrestore(&ei_local->page_lock, flags); - netif_stop_queue(dev); - return 0; -} - -/** - * ei_tx_timeout - handle transmit time out condition - * @dev: network device which has apparently fallen asleep - * - * Called by kernel when device never acknowledges a transmit has - * completed (or failed) - i.e. never posted a Tx related interrupt. - */ - -static void __ei_tx_timeout(struct net_device *dev) -{ - unsigned long e8390_base = dev->base_addr; - struct ei_device *ei_local = netdev_priv(dev); - int txsr, isr, tickssofar = jiffies - dev_trans_start(dev); - unsigned long flags; - - dev->stats.tx_errors++; - - spin_lock_irqsave(&ei_local->page_lock, flags); - txsr = ei_inb(e8390_base+EN0_TSR); - isr = ei_inb(e8390_base+EN0_ISR); - spin_unlock_irqrestore(&ei_local->page_lock, flags); - - netdev_dbg(dev, "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d\n", - (txsr & ENTSR_ABT) ? "excess collisions." : - (isr) ? "lost interrupt?" : "cable problem?", - txsr, isr, tickssofar); - - if (!isr && !dev->stats.tx_packets) { - /* The 8390 probably hasn't gotten on the cable yet. */ - ei_local->interface_num ^= 1; /* Try a different xcvr. */ - } - - /* Ugly but a reset can be slow, yet must be protected */ - - disable_irq_nosync_lockdep(dev->irq); - spin_lock(&ei_local->page_lock); - - /* Try to restart the card. Perhaps the user has fixed something. */ - ei_reset_8390(dev); - __NS8390_init(dev, 1); - - spin_unlock(&ei_local->page_lock); - enable_irq_lockdep(dev->irq); - netif_wake_queue(dev); -} - -/** - * ei_start_xmit - begin packet transmission - * @skb: packet to be sent - * @dev: network device to which packet is sent - * - * Sends a packet to an 8390 network device. - */ - -static netdev_tx_t __ei_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - unsigned long e8390_base = dev->base_addr; - struct ei_device *ei_local = netdev_priv(dev); - int send_length = skb->len, output_page; - unsigned long flags; - char buf[ETH_ZLEN]; - char *data = skb->data; - - if (skb->len < ETH_ZLEN) { - memset(buf, 0, ETH_ZLEN); /* more efficient than doing just the needed bits */ - memcpy(buf, data, skb->len); - send_length = ETH_ZLEN; - data = buf; - } - - /* Mask interrupts from the ethercard. - SMP: We have to grab the lock here otherwise the IRQ handler - on another CPU can flip window and race the IRQ mask set. We end - up trashing the mcast filter not disabling irqs if we don't lock */ - - spin_lock_irqsave(&ei_local->page_lock, flags); - ei_outb_p(0x00, e8390_base + EN0_IMR); - spin_unlock_irqrestore(&ei_local->page_lock, flags); - - - /* - * Slow phase with lock held. - */ - - disable_irq_nosync_lockdep_irqsave(dev->irq, &flags); - - spin_lock(&ei_local->page_lock); - - ei_local->irqlock = 1; - - /* - * We have two Tx slots available for use. Find the first free - * slot, and then perform some sanity checks. With two Tx bufs, - * you get very close to transmitting back-to-back packets. With - * only one Tx buf, the transmitter sits idle while you reload the - * card, leaving a substantial gap between each transmitted packet. - */ - - if (ei_local->tx1 == 0) { - output_page = ei_local->tx_start_page; - ei_local->tx1 = send_length; - if (ei_debug && ei_local->tx2 > 0) - netdev_dbg(dev, "idle transmitter tx2=%d, lasttx=%d, txing=%d\n", - ei_local->tx2, ei_local->lasttx, ei_local->txing); - } else if (ei_local->tx2 == 0) { - output_page = ei_local->tx_start_page + TX_PAGES/2; - ei_local->tx2 = send_length; - if (ei_debug && ei_local->tx1 > 0) - netdev_dbg(dev, "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n", - ei_local->tx1, ei_local->lasttx, ei_local->txing); - } else { /* We should never get here. */ - if (ei_debug) - netdev_dbg(dev, "No Tx buffers free! tx1=%d tx2=%d last=%d\n", - ei_local->tx1, ei_local->tx2, ei_local->lasttx); - ei_local->irqlock = 0; - netif_stop_queue(dev); - ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR); - spin_unlock(&ei_local->page_lock); - enable_irq_lockdep_irqrestore(dev->irq, &flags); - dev->stats.tx_errors++; - return NETDEV_TX_BUSY; - } - - /* - * Okay, now upload the packet and trigger a send if the transmitter - * isn't already sending. If it is busy, the interrupt handler will - * trigger the send later, upon receiving a Tx done interrupt. - */ - - ei_block_output(dev, send_length, data, output_page); - - if (!ei_local->txing) { - ei_local->txing = 1; - NS8390_trigger_send(dev, send_length, output_page); - if (output_page == ei_local->tx_start_page) { - ei_local->tx1 = -1; - ei_local->lasttx = -1; - } else { - ei_local->tx2 = -1; - ei_local->lasttx = -2; - } - } else - ei_local->txqueue++; - - if (ei_local->tx1 && ei_local->tx2) - netif_stop_queue(dev); - else - netif_start_queue(dev); - - /* Turn 8390 interrupts back on. */ - ei_local->irqlock = 0; - ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR); - - spin_unlock(&ei_local->page_lock); - enable_irq_lockdep_irqrestore(dev->irq, &flags); - skb_tx_timestamp(skb); - dev_kfree_skb(skb); - dev->stats.tx_bytes += send_length; - - return NETDEV_TX_OK; -} - -/** - * ei_interrupt - handle the interrupts from an 8390 - * @irq: interrupt number - * @dev_id: a pointer to the net_device - * - * Handle the ether interface interrupts. We pull packets from - * the 8390 via the card specific functions and fire them at the networking - * stack. We also handle transmit completions and wake the transmit path if - * necessary. We also update the counters and do other housekeeping as - * needed. - */ - -static irqreturn_t __ei_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - unsigned long e8390_base = dev->base_addr; - int interrupts, nr_serviced = 0; - struct ei_device *ei_local = netdev_priv(dev); - - /* - * Protect the irq test too. - */ - - spin_lock(&ei_local->page_lock); - - if (ei_local->irqlock) { - /* - * This might just be an interrupt for a PCI device sharing - * this line - */ - netdev_err(dev, "Interrupted while interrupts are masked! isr=%#2x imr=%#2x\n", - ei_inb_p(e8390_base + EN0_ISR), - ei_inb_p(e8390_base + EN0_IMR)); - spin_unlock(&ei_local->page_lock); - return IRQ_NONE; - } - - /* Change to page 0 and read the intr status reg. */ - ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD); - if (ei_debug > 3) - netdev_dbg(dev, "interrupt(isr=%#2.2x)\n", - ei_inb_p(e8390_base + EN0_ISR)); - - /* !!Assumption!! -- we stay in page 0. Don't break this. */ - while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0 && - ++nr_serviced < MAX_SERVICE) { - if (!netif_running(dev)) { - netdev_warn(dev, "interrupt from stopped card\n"); - /* rmk - acknowledge the interrupts */ - ei_outb_p(interrupts, e8390_base + EN0_ISR); - interrupts = 0; - break; - } - if (interrupts & ENISR_OVER) - ei_rx_overrun(dev); - else if (interrupts & (ENISR_RX+ENISR_RX_ERR)) { - /* Got a good (?) packet. */ - ei_receive(dev); - } - /* Push the next to-transmit packet through. */ - if (interrupts & ENISR_TX) - ei_tx_intr(dev); - else if (interrupts & ENISR_TX_ERR) - ei_tx_err(dev); - - if (interrupts & ENISR_COUNTERS) { - dev->stats.rx_frame_errors += ei_inb_p(e8390_base + EN0_COUNTER0); - dev->stats.rx_crc_errors += ei_inb_p(e8390_base + EN0_COUNTER1); - dev->stats.rx_missed_errors += ei_inb_p(e8390_base + EN0_COUNTER2); - ei_outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */ - } - - /* Ignore any RDC interrupts that make it back to here. */ - if (interrupts & ENISR_RDC) - ei_outb_p(ENISR_RDC, e8390_base + EN0_ISR); - - ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD); - } - - if (interrupts && ei_debug) { - ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD); - if (nr_serviced >= MAX_SERVICE) { - /* 0xFF is valid for a card removal */ - if (interrupts != 0xFF) - netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n", - interrupts); - ei_outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */ - } else { - netdev_warn(dev, "unknown interrupt %#2x\n", interrupts); - ei_outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */ - } - } - spin_unlock(&ei_local->page_lock); - return IRQ_RETVAL(nr_serviced > 0); -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void __ei_poll(struct net_device *dev) -{ - disable_irq(dev->irq); - __ei_interrupt(dev->irq, dev); - enable_irq(dev->irq); -} -#endif - -/** - * ei_tx_err - handle transmitter error - * @dev: network device which threw the exception - * - * A transmitter error has happened. Most likely excess collisions (which - * is a fairly normal condition). If the error is one where the Tx will - * have been aborted, we try and send another one right away, instead of - * letting the failed packet sit and collect dust in the Tx buffer. This - * is a much better solution as it avoids kernel based Tx timeouts, and - * an unnecessary card reset. - * - * Called with lock held. - */ - -static void ei_tx_err(struct net_device *dev) -{ - unsigned long e8390_base = dev->base_addr; - /* ei_local is used on some platforms via the EI_SHIFT macro */ - struct ei_device *ei_local __maybe_unused = netdev_priv(dev); - unsigned char txsr = ei_inb_p(e8390_base+EN0_TSR); - unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU); - -#ifdef VERBOSE_ERROR_DUMP - netdev_dbg(dev, "transmitter error (%#2x):", txsr); - if (txsr & ENTSR_ABT) - pr_cont(" excess-collisions "); - if (txsr & ENTSR_ND) - pr_cont(" non-deferral "); - if (txsr & ENTSR_CRS) - pr_cont(" lost-carrier "); - if (txsr & ENTSR_FU) - pr_cont(" FIFO-underrun "); - if (txsr & ENTSR_CDH) - pr_cont(" lost-heartbeat "); - pr_cont("\n"); -#endif - - ei_outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR); /* Ack intr. */ - - if (tx_was_aborted) - ei_tx_intr(dev); - else { - dev->stats.tx_errors++; - if (txsr & ENTSR_CRS) - dev->stats.tx_carrier_errors++; - if (txsr & ENTSR_CDH) - dev->stats.tx_heartbeat_errors++; - if (txsr & ENTSR_OWC) - dev->stats.tx_window_errors++; - } -} - -/** - * ei_tx_intr - transmit interrupt handler - * @dev: network device for which tx intr is handled - * - * We have finished a transmit: check for errors and then trigger the next - * packet to be sent. Called with lock held. - */ - -static void ei_tx_intr(struct net_device *dev) -{ - unsigned long e8390_base = dev->base_addr; - struct ei_device *ei_local = netdev_priv(dev); - int status = ei_inb(e8390_base + EN0_TSR); - - ei_outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */ - - /* - * There are two Tx buffers, see which one finished, and trigger - * the send of another one if it exists. - */ - ei_local->txqueue--; - - if (ei_local->tx1 < 0) { - if (ei_local->lasttx != 1 && ei_local->lasttx != -1) - pr_err("%s: bogus last_tx_buffer %d, tx1=%d\n", - ei_local->name, ei_local->lasttx, ei_local->tx1); - ei_local->tx1 = 0; - if (ei_local->tx2 > 0) { - ei_local->txing = 1; - NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6); - dev->trans_start = jiffies; - ei_local->tx2 = -1, - ei_local->lasttx = 2; - } else - ei_local->lasttx = 20, ei_local->txing = 0; - } else if (ei_local->tx2 < 0) { - if (ei_local->lasttx != 2 && ei_local->lasttx != -2) - pr_err("%s: bogus last_tx_buffer %d, tx2=%d\n", - ei_local->name, ei_local->lasttx, ei_local->tx2); - ei_local->tx2 = 0; - if (ei_local->tx1 > 0) { - ei_local->txing = 1; - NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page); - dev->trans_start = jiffies; - ei_local->tx1 = -1; - ei_local->lasttx = 1; - } else - ei_local->lasttx = 10, ei_local->txing = 0; - } /* else - netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n", - ei_local->lasttx); -*/ - - /* Minimize Tx latency: update the statistics after we restart TXing. */ - if (status & ENTSR_COL) - dev->stats.collisions++; - if (status & ENTSR_PTX) - dev->stats.tx_packets++; - else { - dev->stats.tx_errors++; - if (status & ENTSR_ABT) { - dev->stats.tx_aborted_errors++; - dev->stats.collisions += 16; - } - if (status & ENTSR_CRS) - dev->stats.tx_carrier_errors++; - if (status & ENTSR_FU) - dev->stats.tx_fifo_errors++; - if (status & ENTSR_CDH) - dev->stats.tx_heartbeat_errors++; - if (status & ENTSR_OWC) - dev->stats.tx_window_errors++; - } - netif_wake_queue(dev); -} - -/** - * ei_receive - receive some packets - * @dev: network device with which receive will be run - * - * We have a good packet(s), get it/them out of the buffers. - * Called with lock held. - */ - -static void ei_receive(struct net_device *dev) -{ - unsigned long e8390_base = dev->base_addr; - struct ei_device *ei_local = netdev_priv(dev); - unsigned char rxing_page, this_frame, next_frame; - unsigned short current_offset; - int rx_pkt_count = 0; - struct e8390_pkt_hdr rx_frame; - int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page; - - while (++rx_pkt_count < 10) { - int pkt_len, pkt_stat; - - /* Get the rx page (incoming packet pointer). */ - ei_outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD); - rxing_page = ei_inb_p(e8390_base + EN1_CURPAG); - ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD); - - /* Remove one frame from the ring. Boundary is always a page behind. */ - this_frame = ei_inb_p(e8390_base + EN0_BOUNDARY) + 1; - if (this_frame >= ei_local->stop_page) - this_frame = ei_local->rx_start_page; - - /* Someday we'll omit the previous, iff we never get this message. - (There is at least one clone claimed to have a problem.) - - Keep quiet if it looks like a card removal. One problem here - is that some clones crash in roughly the same way. - */ - if (ei_debug > 0 && - this_frame != ei_local->current_page && - (this_frame != 0x0 || rxing_page != 0xFF)) - netdev_err(dev, "mismatched read page pointers %2x vs %2x\n", - this_frame, ei_local->current_page); - - if (this_frame == rxing_page) /* Read all the frames? */ - break; /* Done for now */ - - current_offset = this_frame << 8; - ei_get_8390_hdr(dev, &rx_frame, this_frame); - - pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr); - pkt_stat = rx_frame.status; - - next_frame = this_frame + 1 + ((pkt_len+4)>>8); - - /* Check for bogosity warned by 3c503 book: the status byte is never - written. This happened a lot during testing! This code should be - cleaned up someday. */ - if (rx_frame.next != next_frame && - rx_frame.next != next_frame + 1 && - rx_frame.next != next_frame - num_rx_pages && - rx_frame.next != next_frame + 1 - num_rx_pages) { - ei_local->current_page = rxing_page; - ei_outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY); - dev->stats.rx_errors++; - continue; - } - - if (pkt_len < 60 || pkt_len > 1518) { - if (ei_debug) - netdev_dbg(dev, "bogus packet size: %d, status=%#2x nxpg=%#2x\n", - rx_frame.count, rx_frame.status, - rx_frame.next); - dev->stats.rx_errors++; - dev->stats.rx_length_errors++; - } else if ((pkt_stat & 0x0F) == ENRSR_RXOK) { - struct sk_buff *skb; - - skb = dev_alloc_skb(pkt_len+2); - if (skb == NULL) { - if (ei_debug > 1) - netdev_dbg(dev, "Couldn't allocate a sk_buff of size %d\n", - pkt_len); - dev->stats.rx_dropped++; - break; - } else { - skb_reserve(skb, 2); /* IP headers on 16 byte boundaries */ - skb_put(skb, pkt_len); /* Make room */ - ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame)); - skb->protocol = eth_type_trans(skb, dev); - if (!skb_defer_rx_timestamp(skb)) - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; - if (pkt_stat & ENRSR_PHY) - dev->stats.multicast++; - } - } else { - if (ei_debug) - netdev_dbg(dev, "bogus packet: status=%#2x nxpg=%#2x size=%d\n", - rx_frame.status, rx_frame.next, - rx_frame.count); - dev->stats.rx_errors++; - /* NB: The NIC counts CRC, frame and missed errors. */ - if (pkt_stat & ENRSR_FO) - dev->stats.rx_fifo_errors++; - } - next_frame = rx_frame.next; - - /* This _should_ never happen: it's here for avoiding bad clones. */ - if (next_frame >= ei_local->stop_page) { - netdev_notice(dev, "next frame inconsistency, %#2x\n", - next_frame); - next_frame = ei_local->rx_start_page; - } - ei_local->current_page = next_frame; - ei_outb_p(next_frame-1, e8390_base+EN0_BOUNDARY); - } - - /* We used to also ack ENISR_OVER here, but that would sometimes mask - a real overrun, leaving the 8390 in a stopped state with rec'vr off. */ - ei_outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR); -} - -/** - * ei_rx_overrun - handle receiver overrun - * @dev: network device which threw exception - * - * We have a receiver overrun: we have to kick the 8390 to get it started - * again. Problem is that you have to kick it exactly as NS prescribes in - * the updated datasheets, or "the NIC may act in an unpredictable manner." - * This includes causing "the NIC to defer indefinitely when it is stopped - * on a busy network." Ugh. - * Called with lock held. Don't call this with the interrupts off or your - * computer will hate you - it takes 10ms or so. - */ - -static void ei_rx_overrun(struct net_device *dev) -{ - unsigned long e8390_base = dev->base_addr; - unsigned char was_txing, must_resend = 0; - /* ei_local is used on some platforms via the EI_SHIFT macro */ - struct ei_device *ei_local __maybe_unused = netdev_priv(dev); - - /* - * Record whether a Tx was in progress and then issue the - * stop command. - */ - was_txing = ei_inb_p(e8390_base+E8390_CMD) & E8390_TRANS; - ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); - - if (ei_debug > 1) - netdev_dbg(dev, "Receiver overrun\n"); - dev->stats.rx_over_errors++; - - /* - * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total. - * Early datasheets said to poll the reset bit, but now they say that - * it "is not a reliable indicator and subsequently should be ignored." - * We wait at least 10ms. - */ - - mdelay(10); - - /* - * Reset RBCR[01] back to zero as per magic incantation. - */ - ei_outb_p(0x00, e8390_base+EN0_RCNTLO); - ei_outb_p(0x00, e8390_base+EN0_RCNTHI); - - /* - * See if any Tx was interrupted or not. According to NS, this - * step is vital, and skipping it will cause no end of havoc. - */ - - if (was_txing) { - unsigned char tx_completed = ei_inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR); - if (!tx_completed) - must_resend = 1; - } - - /* - * Have to enter loopback mode and then restart the NIC before - * you are allowed to slurp packets up off the ring. - */ - ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); - ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD); - - /* - * Clear the Rx ring of all the debris, and ack the interrupt. - */ - ei_receive(dev); - ei_outb_p(ENISR_OVER, e8390_base+EN0_ISR); - - /* - * Leave loopback mode, and resend any packet that got stopped. - */ - ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); - if (must_resend) - ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD); -} - -/* - * Collect the stats. This is called unlocked and from several contexts. - */ - -static struct net_device_stats *__ei_get_stats(struct net_device *dev) -{ - unsigned long ioaddr = dev->base_addr; - struct ei_device *ei_local = netdev_priv(dev); - unsigned long flags; - - /* If the card is stopped, just return the present stats. */ - if (!netif_running(dev)) - return &dev->stats; - - spin_lock_irqsave(&ei_local->page_lock, flags); - /* Read the counter registers, assuming we are in page 0. */ - dev->stats.rx_frame_errors += ei_inb_p(ioaddr + EN0_COUNTER0); - dev->stats.rx_crc_errors += ei_inb_p(ioaddr + EN0_COUNTER1); - dev->stats.rx_missed_errors += ei_inb_p(ioaddr + EN0_COUNTER2); - spin_unlock_irqrestore(&ei_local->page_lock, flags); - - return &dev->stats; -} - -/* - * Form the 64 bit 8390 multicast table from the linked list of addresses - * associated with this dev structure. - */ - -static inline void make_mc_bits(u8 *bits, struct net_device *dev) -{ - struct netdev_hw_addr *ha; - - netdev_for_each_mc_addr(ha, dev) { - u32 crc = ether_crc(ETH_ALEN, ha->addr); - /* - * The 8390 uses the 6 most significant bits of the - * CRC to index the multicast table. - */ - bits[crc>>29] |= (1<<((crc>>26)&7)); - } -} - -/** - * do_set_multicast_list - set/clear multicast filter - * @dev: net device for which multicast filter is adjusted - * - * Set or clear the multicast filter for this adaptor. May be called - * from a BH in 2.1.x. Must be called with lock held. - */ - -static void do_set_multicast_list(struct net_device *dev) -{ - unsigned long e8390_base = dev->base_addr; - int i; - struct ei_device *ei_local = netdev_priv(dev); - - if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) { - memset(ei_local->mcfilter, 0, 8); - if (!netdev_mc_empty(dev)) - make_mc_bits(ei_local->mcfilter, dev); - } else - memset(ei_local->mcfilter, 0xFF, 8); /* mcast set to accept-all */ - - /* - * DP8390 manuals don't specify any magic sequence for altering - * the multicast regs on an already running card. To be safe, we - * ensure multicast mode is off prior to loading up the new hash - * table. If this proves to be not enough, we can always resort - * to stopping the NIC, loading the table and then restarting. - * - * Bug Alert! The MC regs on the SMC 83C690 (SMC Elite and SMC - * Elite16) appear to be write-only. The NS 8390 data sheet lists - * them as r/w so this is a bug. The SMC 83C790 (SMC Ultra and - * Ultra32 EISA) appears to have this bug fixed. - */ - - if (netif_running(dev)) - ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); - ei_outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD); - for (i = 0; i < 8; i++) { - ei_outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i)); -#ifndef BUG_83C690 - if (ei_inb_p(e8390_base + EN1_MULT_SHIFT(i)) != ei_local->mcfilter[i]) - netdev_err(dev, "Multicast filter read/write mismap %d\n", - i); -#endif - } - ei_outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD); - - if (dev->flags&IFF_PROMISC) - ei_outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR); - else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) - ei_outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR); - else - ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); -} - -/* - * Called without lock held. This is invoked from user context and may - * be parallel to just about everything else. Its also fairly quick and - * not called too often. Must protect against both bh and irq users - */ - -static void __ei_set_multicast_list(struct net_device *dev) -{ - unsigned long flags; - struct ei_device *ei_local = netdev_priv(dev); - - spin_lock_irqsave(&ei_local->page_lock, flags); - do_set_multicast_list(dev); - spin_unlock_irqrestore(&ei_local->page_lock, flags); -} - -/** - * ethdev_setup - init rest of 8390 device struct - * @dev: network device structure to init - * - * Initialize the rest of the 8390 device structure. Do NOT __init - * this, as it is used by 8390 based modular drivers too. - */ - -static void ethdev_setup(struct net_device *dev) -{ - struct ei_device *ei_local = netdev_priv(dev); - if (ei_debug > 1) - printk(version); - - ether_setup(dev); - - spin_lock_init(&ei_local->page_lock); -} - -/** - * alloc_ei_netdev - alloc_etherdev counterpart for 8390 - * @size: extra bytes to allocate - * - * Allocate 8390-specific net_device. - */ -static struct net_device *____alloc_ei_netdev(int size) -{ - return alloc_netdev(sizeof(struct ei_device) + size, "eth%d", - ethdev_setup); -} - - - - -/* This page of functions should be 8390 generic */ -/* Follow National Semi's recommendations for initializing the "NIC". */ - -/** - * NS8390_init - initialize 8390 hardware - * @dev: network device to initialize - * @startp: boolean. non-zero value to initiate chip processing - * - * Must be called with lock held. - */ - -static void __NS8390_init(struct net_device *dev, int startp) -{ - unsigned long e8390_base = dev->base_addr; - struct ei_device *ei_local = netdev_priv(dev); - int i; - int endcfg = ei_local->word16 - ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0)) - : 0x48; - - if (sizeof(struct e8390_pkt_hdr) != 4) - panic("8390.c: header struct mispacked\n"); - /* Follow National Semi's recommendations for initing the DP83902. */ - ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */ - ei_outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */ - /* Clear the remote byte count registers. */ - ei_outb_p(0x00, e8390_base + EN0_RCNTLO); - ei_outb_p(0x00, e8390_base + EN0_RCNTHI); - /* Set to monitor and loopback mode -- this is vital!. */ - ei_outb_p(E8390_RXOFF, e8390_base + EN0_RXCR); /* 0x20 */ - ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */ - /* Set the transmit page and receive ring. */ - ei_outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR); - ei_local->tx1 = ei_local->tx2 = 0; - ei_outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG); - ei_outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY); /* 3c503 says 0x3f,NS0x26*/ - ei_local->current_page = ei_local->rx_start_page; /* assert boundary+1 */ - ei_outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG); - /* Clear the pending interrupts and mask. */ - ei_outb_p(0xFF, e8390_base + EN0_ISR); - ei_outb_p(0x00, e8390_base + EN0_IMR); - - /* Copy the station address into the DS8390 registers. */ - - ei_outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */ - for (i = 0; i < 6; i++) { - ei_outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i)); - if (ei_debug > 1 && - ei_inb_p(e8390_base + EN1_PHYS_SHIFT(i)) != dev->dev_addr[i]) - netdev_err(dev, "Hw. address read/write mismap %d\n", i); - } - - ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG); - ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); - - ei_local->tx1 = ei_local->tx2 = 0; - ei_local->txing = 0; - - if (startp) { - ei_outb_p(0xff, e8390_base + EN0_ISR); - ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR); - ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD); - ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); /* xmit on. */ - /* 3c503 TechMan says rxconfig only after the NIC is started. */ - ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); /* rx on, */ - do_set_multicast_list(dev); /* (re)load the mcast table */ - } -} - -/* Trigger a transmit start, assuming the length is valid. - Always called with the page lock held */ - -static void NS8390_trigger_send(struct net_device *dev, unsigned int length, - int start_page) -{ - unsigned long e8390_base = dev->base_addr; - struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev); - - ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD); - - if (ei_inb_p(e8390_base + E8390_CMD) & E8390_TRANS) { - netdev_warn(dev, "trigger_send() called with the transmitter busy\n"); - return; - } - ei_outb_p(length & 0xff, e8390_base + EN0_TCNTLO); - ei_outb_p(length >> 8, e8390_base + EN0_TCNTHI); - ei_outb_p(start_page, e8390_base + EN0_TPSR); - ei_outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD); -} diff --git a/drivers/net/lne390.c b/drivers/net/lne390.c deleted file mode 100644 index f9888d2..0000000 --- a/drivers/net/lne390.c +++ /dev/null @@ -1,434 +0,0 @@ -/* - lne390.c - - Linux driver for Mylex LNE390 EISA Network Adapter - - Copyright (C) 1996-1998, Paul Gortmaker. - - This software may be used and distributed according to the terms - of the GNU General Public License, incorporated herein by reference. - - Information and Code Sources: - - 1) Based upon framework of es3210 driver. - 2) The existing myriad of other Linux 8390 drivers by Donald Becker. - 3) Russ Nelson's asm packet driver provided additional info. - 4) Info for getting IRQ and sh-mem gleaned from the EISA cfg files. - - The LNE390 is an EISA shared memory NS8390 implementation. Note - that all memory copies to/from the board must be 32bit transfers. - There are two versions of the card: the lne390a and the lne390b. - Going by the EISA cfg files, the "a" has jumpers to select between - BNC/AUI, but the "b" also has RJ-45 and selection is via the SCU. - The shared memory address selection is also slightly different. - Note that shared memory address > 1MB are supported with this driver. - - You can try if you want more info, as I've - never even seen one of these cards. :) - - Arnaldo Carvalho de Melo - 2000/09/01 - - get rid of check_region - - no need to check if dev == NULL in lne390_probe1 -*/ - -static const char *version = - "lne390.c: Driver revision v0.99.1, 01/09/2000\n"; - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "8390.h" - -#define DRV_NAME "lne390" - -static int lne390_probe1(struct net_device *dev, int ioaddr); - -static void lne390_reset_8390(struct net_device *dev); - -static void lne390_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page); -static void lne390_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset); -static void lne390_block_output(struct net_device *dev, int count, const unsigned char *buf, const int start_page); - -#define LNE390_START_PG 0x00 /* First page of TX buffer */ -#define LNE390_STOP_PG 0x80 /* Last page +1 of RX ring */ - -#define LNE390_ID_PORT 0xc80 /* Same for all EISA cards */ -#define LNE390_IO_EXTENT 0x20 -#define LNE390_SA_PROM 0x16 /* Start of e'net addr. */ -#define LNE390_RESET_PORT 0xc84 /* From the pkt driver source */ -#define LNE390_NIC_OFFSET 0x00 /* Hello, the 8390 is *here* */ - -#define LNE390_ADDR0 0x00 /* 3 byte vendor prefix */ -#define LNE390_ADDR1 0x80 -#define LNE390_ADDR2 0xe5 - -#define LNE390_ID0 0x10009835 /* 0x3598 = 01101 01100 11000 = mlx */ -#define LNE390_ID1 0x11009835 /* above is the 390A, this is 390B */ - -#define LNE390_CFG1 0xc84 /* NB: 0xc84 is also "reset" port. */ -#define LNE390_CFG2 0xc90 - -/* - * You can OR any of the following bits together and assign it - * to LNE390_DEBUG to get verbose driver info during operation. - * Currently only the probe one is implemented. - */ - -#define LNE390_D_PROBE 0x01 -#define LNE390_D_RX_PKT 0x02 -#define LNE390_D_TX_PKT 0x04 -#define LNE390_D_IRQ 0x08 - -#define LNE390_DEBUG 0 - -static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3}; -static unsigned int shmem_mapA[] __initdata = {0xff, 0xfe, 0xfd, 0xfff, 0xffe, 0xffc, 0x0d, 0x0}; -static unsigned int shmem_mapB[] __initdata = {0xff, 0xfe, 0x0e, 0xfff, 0xffe, 0xffc, 0x0d, 0x0}; - -/* - * Probe for the card. The best way is to read the EISA ID if it - * is known. Then we can check the prefix of the station address - * PROM for a match against the value assigned to Mylex. - */ - -static int __init do_lne390_probe(struct net_device *dev) -{ - unsigned short ioaddr = dev->base_addr; - int irq = dev->irq; - int mem_start = dev->mem_start; - int ret; - - if (ioaddr > 0x1ff) { /* Check a single specified location. */ - if (!request_region(ioaddr, LNE390_IO_EXTENT, DRV_NAME)) - return -EBUSY; - ret = lne390_probe1(dev, ioaddr); - if (ret) - release_region(ioaddr, LNE390_IO_EXTENT); - return ret; - } - else if (ioaddr > 0) /* Don't probe at all. */ - return -ENXIO; - - if (!EISA_bus) { -#if LNE390_DEBUG & LNE390_D_PROBE - printk("lne390-debug: Not an EISA bus. Not probing high ports.\n"); -#endif - return -ENXIO; - } - - /* EISA spec allows for up to 16 slots, but 8 is typical. */ - for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) { - if (!request_region(ioaddr, LNE390_IO_EXTENT, DRV_NAME)) - continue; - if (lne390_probe1(dev, ioaddr) == 0) - return 0; - release_region(ioaddr, LNE390_IO_EXTENT); - dev->irq = irq; - dev->mem_start = mem_start; - } - - return -ENODEV; -} - -#ifndef MODULE -struct net_device * __init lne390_probe(int unit) -{ - struct net_device *dev = alloc_ei_netdev(); - int err; - - if (!dev) - return ERR_PTR(-ENOMEM); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - - err = do_lne390_probe(dev); - if (err) - goto out; - return dev; -out: - free_netdev(dev); - return ERR_PTR(err); -} -#endif - -static int __init lne390_probe1(struct net_device *dev, int ioaddr) -{ - int i, revision, ret; - unsigned long eisa_id; - - if (inb_p(ioaddr + LNE390_ID_PORT) == 0xff) return -ENODEV; - -#if LNE390_DEBUG & LNE390_D_PROBE - printk("lne390-debug: probe at %#x, ID %#8x\n", ioaddr, inl(ioaddr + LNE390_ID_PORT)); - printk("lne390-debug: config regs: %#x %#x\n", - inb(ioaddr + LNE390_CFG1), inb(ioaddr + LNE390_CFG2)); -#endif - - -/* Check the EISA ID of the card. */ - eisa_id = inl(ioaddr + LNE390_ID_PORT); - if ((eisa_id != LNE390_ID0) && (eisa_id != LNE390_ID1)) { - return -ENODEV; - } - - revision = (eisa_id >> 24) & 0x01; /* 0 = rev A, 1 rev B */ - -#if 0 -/* Check the Mylex vendor ID as well. Not really required. */ - if (inb(ioaddr + LNE390_SA_PROM + 0) != LNE390_ADDR0 - || inb(ioaddr + LNE390_SA_PROM + 1) != LNE390_ADDR1 - || inb(ioaddr + LNE390_SA_PROM + 2) != LNE390_ADDR2 ) { - printk("lne390.c: card not found"); - for(i = 0; i < ETHER_ADDR_LEN; i++) - printk(" %02x", inb(ioaddr + LNE390_SA_PROM + i)); - printk(" (invalid prefix).\n"); - return -ENODEV; - } -#endif - - for(i = 0; i < ETHER_ADDR_LEN; i++) - dev->dev_addr[i] = inb(ioaddr + LNE390_SA_PROM + i); - printk("lne390.c: LNE390%X in EISA slot %d, address %pM.\n", - 0xa+revision, ioaddr/0x1000, dev->dev_addr); - - printk("lne390.c: "); - - /* Snarf the interrupt now. CFG file has them all listed as `edge' with share=NO */ - if (dev->irq == 0) { - unsigned char irq_reg = inb(ioaddr + LNE390_CFG2) >> 3; - dev->irq = irq_map[irq_reg & 0x07]; - printk("using"); - } else { - /* This is useless unless we reprogram the card here too */ - if (dev->irq == 2) dev->irq = 9; /* Doh! */ - printk("assigning"); - } - printk(" IRQ %d,", dev->irq); - - if ((ret = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev))) { - printk (" unable to get IRQ %d.\n", dev->irq); - return ret; - } - - if (dev->mem_start == 0) { - unsigned char mem_reg = inb(ioaddr + LNE390_CFG2) & 0x07; - - if (revision) /* LNE390B */ - dev->mem_start = shmem_mapB[mem_reg] * 0x10000; - else /* LNE390A */ - dev->mem_start = shmem_mapA[mem_reg] * 0x10000; - printk(" using "); - } else { - /* Should check for value in shmem_map and reprogram the card to use it */ - dev->mem_start &= 0xfff0000; - printk(" assigning "); - } - - printk("%dkB memory at physical address %#lx\n", - LNE390_STOP_PG/4, dev->mem_start); - - /* - BEWARE!! Some dain-bramaged EISA SCUs will allow you to put - the card mem within the region covered by `normal' RAM !!! - - ioremap() will fail in that case. - */ - ei_status.mem = ioremap(dev->mem_start, LNE390_STOP_PG*0x100); - if (!ei_status.mem) { - printk(KERN_ERR "lne390.c: Unable to remap card memory above 1MB !!\n"); - printk(KERN_ERR "lne390.c: Try using EISA SCU to set memory below 1MB.\n"); - printk(KERN_ERR "lne390.c: Driver NOT installed.\n"); - ret = -EAGAIN; - goto cleanup; - } - printk("lne390.c: remapped %dkB card memory to virtual address %p\n", - LNE390_STOP_PG/4, ei_status.mem); - - dev->mem_start = (unsigned long)ei_status.mem; - dev->mem_end = dev->mem_start + (LNE390_STOP_PG - LNE390_START_PG)*256; - - /* The 8390 offset is zero for the LNE390 */ - dev->base_addr = ioaddr; - - ei_status.name = "LNE390"; - ei_status.tx_start_page = LNE390_START_PG; - ei_status.rx_start_page = LNE390_START_PG + TX_PAGES; - ei_status.stop_page = LNE390_STOP_PG; - ei_status.word16 = 1; - - if (ei_debug > 0) - printk(version); - - ei_status.reset_8390 = &lne390_reset_8390; - ei_status.block_input = &lne390_block_input; - ei_status.block_output = &lne390_block_output; - ei_status.get_8390_hdr = &lne390_get_8390_hdr; - - dev->netdev_ops = &ei_netdev_ops; - NS8390_init(dev, 0); - - ret = register_netdev(dev); - if (ret) - goto unmap; - return 0; -unmap: - if (ei_status.reg0) - iounmap(ei_status.mem); -cleanup: - free_irq(dev->irq, dev); - return ret; -} - -/* - * Reset as per the packet driver method. Judging by the EISA cfg - * file, this just toggles the "Board Enable" bits (bit 2 and 0). - */ - -static void lne390_reset_8390(struct net_device *dev) -{ - unsigned short ioaddr = dev->base_addr; - - outb(0x04, ioaddr + LNE390_RESET_PORT); - if (ei_debug > 1) printk("%s: resetting the LNE390...", dev->name); - - mdelay(2); - - ei_status.txing = 0; - outb(0x01, ioaddr + LNE390_RESET_PORT); - if (ei_debug > 1) printk("reset done\n"); -} - -/* - * Note: In the following three functions is the implicit assumption - * that the associated memcpy will only use "rep; movsl" as long as - * we keep the counts as some multiple of doublewords. This is a - * requirement of the hardware, and also prevents us from using - * eth_io_copy_and_sum() since we can't guarantee it will limit - * itself to doubleword access. - */ - -/* - * Grab the 8390 specific header. Similar to the block_input routine, but - * we don't need to be concerned with ring wrap as the header will be at - * the start of a page, so we optimize accordingly. (A single doubleword.) - */ - -static void -lne390_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) -{ - void __iomem *hdr_start = ei_status.mem + ((ring_page - LNE390_START_PG)<<8); - memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); - hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */ -} - -/* - * Block input and output are easy on shared memory ethercards, the only - * complication is when the ring buffer wraps. The count will already - * be rounded up to a doubleword value via lne390_get_8390_hdr() above. - */ - -static void lne390_block_input(struct net_device *dev, int count, struct sk_buff *skb, - int ring_offset) -{ - void __iomem *xfer_start = ei_status.mem + ring_offset - (LNE390_START_PG<<8); - - if (ring_offset + count > (LNE390_STOP_PG<<8)) { - /* Packet wraps over end of ring buffer. */ - int semi_count = (LNE390_STOP_PG<<8) - ring_offset; - memcpy_fromio(skb->data, xfer_start, semi_count); - count -= semi_count; - memcpy_fromio(skb->data + semi_count, - ei_status.mem + (TX_PAGES<<8), count); - } else { - /* Packet is in one chunk. */ - memcpy_fromio(skb->data, xfer_start, count); - } -} - -static void lne390_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page) -{ - void __iomem *shmem = ei_status.mem + ((start_page - LNE390_START_PG)<<8); - - count = (count + 3) & ~3; /* Round up to doubleword */ - memcpy_toio(shmem, buf, count); -} - - -#ifdef MODULE -#define MAX_LNE_CARDS 4 /* Max number of LNE390 cards per module */ -static struct net_device *dev_lne[MAX_LNE_CARDS]; -static int io[MAX_LNE_CARDS]; -static int irq[MAX_LNE_CARDS]; -static int mem[MAX_LNE_CARDS]; - -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -module_param_array(mem, int, NULL, 0); -MODULE_PARM_DESC(io, "I/O base address(es)"); -MODULE_PARM_DESC(irq, "IRQ number(s)"); -MODULE_PARM_DESC(mem, "memory base address(es)"); -MODULE_DESCRIPTION("Mylex LNE390A/B EISA Ethernet driver"); -MODULE_LICENSE("GPL"); - -int __init init_module(void) -{ - struct net_device *dev; - int this_dev, found = 0; - - for (this_dev = 0; this_dev < MAX_LNE_CARDS; this_dev++) { - if (io[this_dev] == 0 && this_dev != 0) - break; - dev = alloc_ei_netdev(); - if (!dev) - break; - dev->irq = irq[this_dev]; - dev->base_addr = io[this_dev]; - dev->mem_start = mem[this_dev]; - if (do_lne390_probe(dev) == 0) { - dev_lne[found++] = dev; - continue; - } - free_netdev(dev); - printk(KERN_WARNING "lne390.c: No LNE390 card found (i/o = 0x%x).\n", io[this_dev]); - break; - } - if (found) - return 0; - return -ENXIO; -} - -static void cleanup_card(struct net_device *dev) -{ - free_irq(dev->irq, dev); - release_region(dev->base_addr, LNE390_IO_EXTENT); - iounmap(ei_status.mem); -} - -void __exit cleanup_module(void) -{ - int this_dev; - - for (this_dev = 0; this_dev < MAX_LNE_CARDS; this_dev++) { - struct net_device *dev = dev_lne[this_dev]; - if (dev) { - unregister_netdev(dev); - cleanup_card(dev); - free_netdev(dev); - } - } -} -#endif /* MODULE */ - diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c deleted file mode 100644 index f84f5e6..0000000 --- a/drivers/net/mac8390.c +++ /dev/null @@ -1,874 +0,0 @@ -/* mac8390.c: New driver for 8390-based Nubus (or Nubus-alike) - Ethernet cards on Linux */ -/* Based on the former daynaport.c driver, by Alan Cox. Some code - taken from or inspired by skeleton.c by Donald Becker, acenic.c by - Jes Sorensen, and ne2k-pci.c by Donald Becker and Paul Gortmaker. - - This software may be used and distributed according to the terms of - the GNU Public License, incorporated herein by reference. */ - -/* 2000-02-28: support added for Dayna and Kinetics cards by - A.G.deWijn@phys.uu.nl */ -/* 2000-04-04: support added for Dayna2 by bart@etpmod.phys.tue.nl */ -/* 2001-04-18: support for DaynaPort E/LC-M by rayk@knightsmanor.org */ -/* 2001-05-15: support for Cabletron ported from old daynaport driver - * and fixed access to Sonic Sys card which masquerades as a Farallon - * by rayk@knightsmanor.org */ -/* 2002-12-30: Try to support more cards, some clues from NetBSD driver */ -/* 2003-12-26: Make sure Asante cards always work. */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -static char version[] = - "v0.4 2001-05-15 David Huggins-Daines and others\n"; - -#define EI_SHIFT(x) (ei_local->reg_offset[x]) -#define ei_inb(port) in_8(port) -#define ei_outb(val, port) out_8(port, val) -#define ei_inb_p(port) in_8(port) -#define ei_outb_p(val, port) out_8(port, val) - -#include "lib8390.c" - -#define WD_START_PG 0x00 /* First page of TX buffer */ -#define CABLETRON_RX_START_PG 0x00 /* First page of RX buffer */ -#define CABLETRON_RX_STOP_PG 0x30 /* Last page +1 of RX ring */ -#define CABLETRON_TX_START_PG CABLETRON_RX_STOP_PG - /* First page of TX buffer */ - -/* - * Unfortunately it seems we have to hardcode these for the moment - * Shouldn't the card know about this? - * Does anyone know where to read it off the card? - * Do we trust the data provided by the card? - */ - -#define DAYNA_8390_BASE 0x80000 -#define DAYNA_8390_MEM 0x00000 - -#define CABLETRON_8390_BASE 0x90000 -#define CABLETRON_8390_MEM 0x00000 - -#define INTERLAN_8390_BASE 0xE0000 -#define INTERLAN_8390_MEM 0xD0000 - -enum mac8390_type { - MAC8390_NONE = -1, - MAC8390_APPLE, - MAC8390_ASANTE, - MAC8390_FARALLON, - MAC8390_CABLETRON, - MAC8390_DAYNA, - MAC8390_INTERLAN, - MAC8390_KINETICS, -}; - -static const char *cardname[] = { - "apple", - "asante", - "farallon", - "cabletron", - "dayna", - "interlan", - "kinetics", -}; - -static const int word16[] = { - 1, /* apple */ - 1, /* asante */ - 1, /* farallon */ - 1, /* cabletron */ - 0, /* dayna */ - 1, /* interlan */ - 0, /* kinetics */ -}; - -/* on which cards do we use NuBus resources? */ -static const int useresources[] = { - 1, /* apple */ - 1, /* asante */ - 1, /* farallon */ - 0, /* cabletron */ - 0, /* dayna */ - 0, /* interlan */ - 0, /* kinetics */ -}; - -enum mac8390_access { - ACCESS_UNKNOWN = 0, - ACCESS_32, - ACCESS_16, -}; - -extern int mac8390_memtest(struct net_device *dev); -static int mac8390_initdev(struct net_device *dev, struct nubus_dev *ndev, - enum mac8390_type type); - -static int mac8390_open(struct net_device *dev); -static int mac8390_close(struct net_device *dev); -static void mac8390_no_reset(struct net_device *dev); -static void interlan_reset(struct net_device *dev); - -/* Sane (32-bit chunk memory read/write) - Some Farallon and Apple do this*/ -static void sane_get_8390_hdr(struct net_device *dev, - struct e8390_pkt_hdr *hdr, int ring_page); -static void sane_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset); -static void sane_block_output(struct net_device *dev, int count, - const unsigned char *buf, const int start_page); - -/* dayna_memcpy to and from card */ -static void dayna_memcpy_fromcard(struct net_device *dev, void *to, - int from, int count); -static void dayna_memcpy_tocard(struct net_device *dev, int to, - const void *from, int count); - -/* Dayna - Dayna/Kinetics use this */ -static void dayna_get_8390_hdr(struct net_device *dev, - struct e8390_pkt_hdr *hdr, int ring_page); -static void dayna_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset); -static void dayna_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page); - -#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c)) -#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c)) - -#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c)) - -/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */ -static void slow_sane_get_8390_hdr(struct net_device *dev, - struct e8390_pkt_hdr *hdr, int ring_page); -static void slow_sane_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset); -static void slow_sane_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page); -static void word_memcpy_tocard(unsigned long tp, const void *fp, int count); -static void word_memcpy_fromcard(void *tp, unsigned long fp, int count); - -static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev) -{ - switch (dev->dr_sw) { - case NUBUS_DRSW_3COM: - switch (dev->dr_hw) { - case NUBUS_DRHW_APPLE_SONIC_NB: - case NUBUS_DRHW_APPLE_SONIC_LC: - case NUBUS_DRHW_SONNET: - return MAC8390_NONE; - break; - default: - return MAC8390_APPLE; - break; - } - break; - - case NUBUS_DRSW_APPLE: - switch (dev->dr_hw) { - case NUBUS_DRHW_ASANTE_LC: - return MAC8390_NONE; - break; - case NUBUS_DRHW_CABLETRON: - return MAC8390_CABLETRON; - break; - default: - return MAC8390_APPLE; - break; - } - break; - - case NUBUS_DRSW_ASANTE: - return MAC8390_ASANTE; - break; - - case NUBUS_DRSW_TECHWORKS: - case NUBUS_DRSW_DAYNA2: - case NUBUS_DRSW_DAYNA_LC: - if (dev->dr_hw == NUBUS_DRHW_CABLETRON) - return MAC8390_CABLETRON; - else - return MAC8390_APPLE; - break; - - case NUBUS_DRSW_FARALLON: - return MAC8390_FARALLON; - break; - - case NUBUS_DRSW_KINETICS: - switch (dev->dr_hw) { - case NUBUS_DRHW_INTERLAN: - return MAC8390_INTERLAN; - break; - default: - return MAC8390_KINETICS; - break; - } - break; - - case NUBUS_DRSW_DAYNA: - /* - * These correspond to Dayna Sonic cards - * which use the macsonic driver - */ - if (dev->dr_hw == NUBUS_DRHW_SMC9194 || - dev->dr_hw == NUBUS_DRHW_INTERLAN) - return MAC8390_NONE; - else - return MAC8390_DAYNA; - break; - } - return MAC8390_NONE; -} - -static enum mac8390_access __init mac8390_testio(volatile unsigned long membase) -{ - unsigned long outdata = 0xA5A0B5B0; - unsigned long indata = 0x00000000; - /* Try writing 32 bits */ - memcpy_toio(membase, &outdata, 4); - /* Now compare them */ - if (memcmp_withio(&outdata, membase, 4) == 0) - return ACCESS_32; - /* Write 16 bit output */ - word_memcpy_tocard(membase, &outdata, 4); - /* Now read it back */ - word_memcpy_fromcard(&indata, membase, 4); - if (outdata == indata) - return ACCESS_16; - return ACCESS_UNKNOWN; -} - -static int __init mac8390_memsize(unsigned long membase) -{ - unsigned long flags; - int i, j; - - local_irq_save(flags); - /* Check up to 32K in 4K increments */ - for (i = 0; i < 8; i++) { - volatile unsigned short *m = (unsigned short *)(membase + (i * 0x1000)); - - /* Unwriteable - we have a fully decoded card and the - RAM end located */ - if (hwreg_present(m) == 0) - break; - - /* write a distinctive byte */ - *m = 0xA5A0 | i; - /* check that we read back what we wrote */ - if (*m != (0xA5A0 | i)) - break; - - /* check for partial decode and wrap */ - for (j = 0; j < i; j++) { - volatile unsigned short *p = (unsigned short *)(membase + (j * 0x1000)); - if (*p != (0xA5A0 | j)) - break; - } - } - local_irq_restore(flags); - /* - * in any case, we stopped once we tried one block too many, - * or once we reached 32K - */ - return i * 0x1000; -} - -static bool __init mac8390_init(struct net_device *dev, struct nubus_dev *ndev, - enum mac8390_type cardtype) -{ - struct nubus_dir dir; - struct nubus_dirent ent; - int offset; - volatile unsigned short *i; - - printk_once(KERN_INFO pr_fmt("%s"), version); - - dev->irq = SLOT2IRQ(ndev->board->slot); - /* This is getting to be a habit */ - dev->base_addr = (ndev->board->slot_addr | - ((ndev->board->slot & 0xf) << 20)); - - /* - * Get some Nubus info - we will trust the card's idea - * of where its memory and registers are. - */ - - if (nubus_get_func_dir(ndev, &dir) == -1) { - pr_err("%s: Unable to get Nubus functional directory for slot %X!\n", - dev->name, ndev->board->slot); - return false; - } - - /* Get the MAC address */ - if (nubus_find_rsrc(&dir, NUBUS_RESID_MAC_ADDRESS, &ent) == -1) { - pr_info("%s: Couldn't get MAC address!\n", dev->name); - return false; - } - - nubus_get_rsrc_mem(dev->dev_addr, &ent, 6); - - if (useresources[cardtype] == 1) { - nubus_rewinddir(&dir); - if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_BASEOS, - &ent) == -1) { - pr_err("%s: Memory offset resource for slot %X not found!\n", - dev->name, ndev->board->slot); - return false; - } - nubus_get_rsrc_mem(&offset, &ent, 4); - dev->mem_start = dev->base_addr + offset; - /* yes, this is how the Apple driver does it */ - dev->base_addr = dev->mem_start + 0x10000; - nubus_rewinddir(&dir); - if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_LENGTH, - &ent) == -1) { - pr_info("%s: Memory length resource for slot %X not found, probing\n", - dev->name, ndev->board->slot); - offset = mac8390_memsize(dev->mem_start); - } else { - nubus_get_rsrc_mem(&offset, &ent, 4); - } - dev->mem_end = dev->mem_start + offset; - } else { - switch (cardtype) { - case MAC8390_KINETICS: - case MAC8390_DAYNA: /* it's the same */ - dev->base_addr = (int)(ndev->board->slot_addr + - DAYNA_8390_BASE); - dev->mem_start = (int)(ndev->board->slot_addr + - DAYNA_8390_MEM); - dev->mem_end = dev->mem_start + - mac8390_memsize(dev->mem_start); - break; - case MAC8390_INTERLAN: - dev->base_addr = (int)(ndev->board->slot_addr + - INTERLAN_8390_BASE); - dev->mem_start = (int)(ndev->board->slot_addr + - INTERLAN_8390_MEM); - dev->mem_end = dev->mem_start + - mac8390_memsize(dev->mem_start); - break; - case MAC8390_CABLETRON: - dev->base_addr = (int)(ndev->board->slot_addr + - CABLETRON_8390_BASE); - dev->mem_start = (int)(ndev->board->slot_addr + - CABLETRON_8390_MEM); - /* The base address is unreadable if 0x00 - * has been written to the command register - * Reset the chip by writing E8390_NODMA + - * E8390_PAGE0 + E8390_STOP just to be - * sure - */ - i = (void *)dev->base_addr; - *i = 0x21; - dev->mem_end = dev->mem_start + - mac8390_memsize(dev->mem_start); - break; - - default: - pr_err("Card type %s is unsupported, sorry\n", - ndev->board->name); - return false; - } - } - - return true; -} - -struct net_device * __init mac8390_probe(int unit) -{ - struct net_device *dev; - struct nubus_dev *ndev = NULL; - int err = -ENODEV; - - static unsigned int slots; - - enum mac8390_type cardtype; - - /* probably should check for Nubus instead */ - - if (!MACH_IS_MAC) - return ERR_PTR(-ENODEV); - - dev = ____alloc_ei_netdev(0); - if (!dev) - return ERR_PTR(-ENOMEM); - - if (unit >= 0) - sprintf(dev->name, "eth%d", unit); - - while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, NUBUS_TYPE_ETHERNET, - ndev))) { - /* Have we seen it already? */ - if (slots & (1 << ndev->board->slot)) - continue; - slots |= 1 << ndev->board->slot; - - cardtype = mac8390_ident(ndev); - if (cardtype == MAC8390_NONE) - continue; - - if (!mac8390_init(dev, ndev, cardtype)) - continue; - - /* Do the nasty 8390 stuff */ - if (!mac8390_initdev(dev, ndev, cardtype)) - break; - } - - if (!ndev) - goto out; - err = register_netdev(dev); - if (err) - goto out; - return dev; - -out: - free_netdev(dev); - return ERR_PTR(err); -} - -#ifdef MODULE -MODULE_AUTHOR("David Huggins-Daines and others"); -MODULE_DESCRIPTION("Macintosh NS8390-based Nubus Ethernet driver"); -MODULE_LICENSE("GPL"); - -/* overkill, of course */ -static struct net_device *dev_mac8390[15]; -int init_module(void) -{ - int i; - for (i = 0; i < 15; i++) { - struct net_device *dev = mac8390_probe(-1); - if (IS_ERR(dev)) - break; - dev_mac890[i] = dev; - } - if (!i) { - pr_notice("No useable cards found, driver NOT installed.\n"); - return -ENODEV; - } - return 0; -} - -void cleanup_module(void) -{ - int i; - for (i = 0; i < 15; i++) { - struct net_device *dev = dev_mac890[i]; - if (dev) { - unregister_netdev(dev); - free_netdev(dev); - } - } -} - -#endif /* MODULE */ - -static const struct net_device_ops mac8390_netdev_ops = { - .ndo_open = mac8390_open, - .ndo_stop = mac8390_close, - .ndo_start_xmit = __ei_start_xmit, - .ndo_tx_timeout = __ei_tx_timeout, - .ndo_get_stats = __ei_get_stats, - .ndo_set_multicast_list = __ei_set_multicast_list, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, - .ndo_change_mtu = eth_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = __ei_poll, -#endif -}; - -static int __init mac8390_initdev(struct net_device *dev, - struct nubus_dev *ndev, - enum mac8390_type type) -{ - static u32 fwrd4_offsets[16] = { - 0, 4, 8, 12, - 16, 20, 24, 28, - 32, 36, 40, 44, - 48, 52, 56, 60 - }; - static u32 back4_offsets[16] = { - 60, 56, 52, 48, - 44, 40, 36, 32, - 28, 24, 20, 16, - 12, 8, 4, 0 - }; - static u32 fwrd2_offsets[16] = { - 0, 2, 4, 6, - 8, 10, 12, 14, - 16, 18, 20, 22, - 24, 26, 28, 30 - }; - - int access_bitmode = 0; - - /* Now fill in our stuff */ - dev->netdev_ops = &mac8390_netdev_ops; - - /* GAR, ei_status is actually a macro even though it looks global */ - ei_status.name = cardname[type]; - ei_status.word16 = word16[type]; - - /* Cabletron's TX/RX buffers are backwards */ - if (type == MAC8390_CABLETRON) { - ei_status.tx_start_page = CABLETRON_TX_START_PG; - ei_status.rx_start_page = CABLETRON_RX_START_PG; - ei_status.stop_page = CABLETRON_RX_STOP_PG; - ei_status.rmem_start = dev->mem_start; - ei_status.rmem_end = dev->mem_start + CABLETRON_RX_STOP_PG*256; - } else { - ei_status.tx_start_page = WD_START_PG; - ei_status.rx_start_page = WD_START_PG + TX_PAGES; - ei_status.stop_page = (dev->mem_end - dev->mem_start)/256; - ei_status.rmem_start = dev->mem_start + TX_PAGES*256; - ei_status.rmem_end = dev->mem_end; - } - - /* Fill in model-specific information and functions */ - switch (type) { - case MAC8390_FARALLON: - case MAC8390_APPLE: - switch (mac8390_testio(dev->mem_start)) { - case ACCESS_UNKNOWN: - pr_err("Don't know how to access card memory!\n"); - return -ENODEV; - break; - - case ACCESS_16: - /* 16 bit card, register map is reversed */ - ei_status.reset_8390 = mac8390_no_reset; - ei_status.block_input = slow_sane_block_input; - ei_status.block_output = slow_sane_block_output; - ei_status.get_8390_hdr = slow_sane_get_8390_hdr; - ei_status.reg_offset = back4_offsets; - break; - - case ACCESS_32: - /* 32 bit card, register map is reversed */ - ei_status.reset_8390 = mac8390_no_reset; - ei_status.block_input = sane_block_input; - ei_status.block_output = sane_block_output; - ei_status.get_8390_hdr = sane_get_8390_hdr; - ei_status.reg_offset = back4_offsets; - access_bitmode = 1; - break; - } - break; - - case MAC8390_ASANTE: - /* Some Asante cards pass the 32 bit test - * but overwrite system memory when run at 32 bit. - * so we run them all at 16 bit. - */ - ei_status.reset_8390 = mac8390_no_reset; - ei_status.block_input = slow_sane_block_input; - ei_status.block_output = slow_sane_block_output; - ei_status.get_8390_hdr = slow_sane_get_8390_hdr; - ei_status.reg_offset = back4_offsets; - break; - - case MAC8390_CABLETRON: - /* 16 bit card, register map is short forward */ - ei_status.reset_8390 = mac8390_no_reset; - ei_status.block_input = slow_sane_block_input; - ei_status.block_output = slow_sane_block_output; - ei_status.get_8390_hdr = slow_sane_get_8390_hdr; - ei_status.reg_offset = fwrd2_offsets; - break; - - case MAC8390_DAYNA: - case MAC8390_KINETICS: - /* 16 bit memory, register map is forward */ - /* dayna and similar */ - ei_status.reset_8390 = mac8390_no_reset; - ei_status.block_input = dayna_block_input; - ei_status.block_output = dayna_block_output; - ei_status.get_8390_hdr = dayna_get_8390_hdr; - ei_status.reg_offset = fwrd4_offsets; - break; - - case MAC8390_INTERLAN: - /* 16 bit memory, register map is forward */ - ei_status.reset_8390 = interlan_reset; - ei_status.block_input = slow_sane_block_input; - ei_status.block_output = slow_sane_block_output; - ei_status.get_8390_hdr = slow_sane_get_8390_hdr; - ei_status.reg_offset = fwrd4_offsets; - break; - - default: - pr_err("Card type %s is unsupported, sorry\n", - ndev->board->name); - return -ENODEV; - } - - __NS8390_init(dev, 0); - - /* Good, done, now spit out some messages */ - pr_info("%s: %s in slot %X (type %s)\n", - dev->name, ndev->board->name, ndev->board->slot, - cardname[type]); - pr_info("MAC %pM IRQ %d, %d KB shared memory at %#lx, %d-bit access.\n", - dev->dev_addr, dev->irq, - (unsigned int)(dev->mem_end - dev->mem_start) >> 10, - dev->mem_start, access_bitmode ? 32 : 16); - return 0; -} - -static int mac8390_open(struct net_device *dev) -{ - int err; - - __ei_open(dev); - err = request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev); - if (err) - pr_err("%s: unable to get IRQ %d\n", dev->name, dev->irq); - return err; -} - -static int mac8390_close(struct net_device *dev) -{ - free_irq(dev->irq, dev); - __ei_close(dev); - return 0; -} - -static void mac8390_no_reset(struct net_device *dev) -{ - ei_status.txing = 0; - if (ei_debug > 1) - pr_info("reset not supported\n"); -} - -static void interlan_reset(struct net_device *dev) -{ - unsigned char *target = nubus_slot_addr(IRQ2SLOT(dev->irq)); - if (ei_debug > 1) - pr_info("Need to reset the NS8390 t=%lu...", jiffies); - ei_status.txing = 0; - target[0xC0000] = 0; - if (ei_debug > 1) - pr_cont("reset complete\n"); -} - -/* dayna_memcpy_fromio/dayna_memcpy_toio */ -/* directly from daynaport.c by Alan Cox */ -static void dayna_memcpy_fromcard(struct net_device *dev, void *to, int from, - int count) -{ - volatile unsigned char *ptr; - unsigned char *target = to; - from <<= 1; /* word, skip overhead */ - ptr = (unsigned char *)(dev->mem_start+from); - /* Leading byte? */ - if (from & 2) { - *target++ = ptr[-1]; - ptr += 2; - count--; - } - while (count >= 2) { - *(unsigned short *)target = *(unsigned short volatile *)ptr; - ptr += 4; /* skip cruft */ - target += 2; - count -= 2; - } - /* Trailing byte? */ - if (count) - *target = *ptr; -} - -static void dayna_memcpy_tocard(struct net_device *dev, int to, - const void *from, int count) -{ - volatile unsigned short *ptr; - const unsigned char *src = from; - to <<= 1; /* word, skip overhead */ - ptr = (unsigned short *)(dev->mem_start+to); - /* Leading byte? */ - if (to & 2) { /* avoid a byte write (stomps on other data) */ - ptr[-1] = (ptr[-1]&0xFF00)|*src++; - ptr++; - count--; - } - while (count >= 2) { - *ptr++ = *(unsigned short *)src; /* Copy and */ - ptr++; /* skip cruft */ - src += 2; - count -= 2; - } - /* Trailing byte? */ - if (count) { - /* card doesn't like byte writes */ - *ptr = (*ptr & 0x00FF) | (*src << 8); - } -} - -/* sane block input/output */ -static void sane_get_8390_hdr(struct net_device *dev, - struct e8390_pkt_hdr *hdr, int ring_page) -{ - unsigned long hdr_start = (ring_page - WD_START_PG)<<8; - memcpy_fromio(hdr, dev->mem_start + hdr_start, 4); - /* Fix endianness */ - hdr->count = swab16(hdr->count); -} - -static void sane_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset) -{ - unsigned long xfer_base = ring_offset - (WD_START_PG<<8); - unsigned long xfer_start = xfer_base + dev->mem_start; - - if (xfer_start + count > ei_status.rmem_end) { - /* We must wrap the input move. */ - int semi_count = ei_status.rmem_end - xfer_start; - memcpy_fromio(skb->data, dev->mem_start + xfer_base, - semi_count); - count -= semi_count; - memcpy_fromio(skb->data + semi_count, ei_status.rmem_start, - count); - } else { - memcpy_fromio(skb->data, dev->mem_start + xfer_base, count); - } -} - -static void sane_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page) -{ - long shmem = (start_page - WD_START_PG)<<8; - - memcpy_toio(dev->mem_start + shmem, buf, count); -} - -/* dayna block input/output */ -static void dayna_get_8390_hdr(struct net_device *dev, - struct e8390_pkt_hdr *hdr, int ring_page) -{ - unsigned long hdr_start = (ring_page - WD_START_PG)<<8; - - dayna_memcpy_fromcard(dev, hdr, hdr_start, 4); - /* Fix endianness */ - hdr->count = (hdr->count & 0xFF) << 8 | (hdr->count >> 8); -} - -static void dayna_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset) -{ - unsigned long xfer_base = ring_offset - (WD_START_PG<<8); - unsigned long xfer_start = xfer_base+dev->mem_start; - - /* Note the offset math is done in card memory space which is word - per long onto our space. */ - - if (xfer_start + count > ei_status.rmem_end) { - /* We must wrap the input move. */ - int semi_count = ei_status.rmem_end - xfer_start; - dayna_memcpy_fromcard(dev, skb->data, xfer_base, semi_count); - count -= semi_count; - dayna_memcpy_fromcard(dev, skb->data + semi_count, - ei_status.rmem_start - dev->mem_start, - count); - } else { - dayna_memcpy_fromcard(dev, skb->data, xfer_base, count); - } -} - -static void dayna_block_output(struct net_device *dev, int count, - const unsigned char *buf, - int start_page) -{ - long shmem = (start_page - WD_START_PG)<<8; - - dayna_memcpy_tocard(dev, shmem, buf, count); -} - -/* Cabletron block I/O */ -static void slow_sane_get_8390_hdr(struct net_device *dev, - struct e8390_pkt_hdr *hdr, - int ring_page) -{ - unsigned long hdr_start = (ring_page - WD_START_PG)<<8; - word_memcpy_fromcard(hdr, dev->mem_start + hdr_start, 4); - /* Register endianism - fix here rather than 8390.c */ - hdr->count = (hdr->count&0xFF)<<8|(hdr->count>>8); -} - -static void slow_sane_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset) -{ - unsigned long xfer_base = ring_offset - (WD_START_PG<<8); - unsigned long xfer_start = xfer_base+dev->mem_start; - - if (xfer_start + count > ei_status.rmem_end) { - /* We must wrap the input move. */ - int semi_count = ei_status.rmem_end - xfer_start; - word_memcpy_fromcard(skb->data, dev->mem_start + xfer_base, - semi_count); - count -= semi_count; - word_memcpy_fromcard(skb->data + semi_count, - ei_status.rmem_start, count); - } else { - word_memcpy_fromcard(skb->data, dev->mem_start + xfer_base, - count); - } -} - -static void slow_sane_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page) -{ - long shmem = (start_page - WD_START_PG)<<8; - - word_memcpy_tocard(dev->mem_start + shmem, buf, count); -} - -static void word_memcpy_tocard(unsigned long tp, const void *fp, int count) -{ - volatile unsigned short *to = (void *)tp; - const unsigned short *from = fp; - - count++; - count /= 2; - - while (count--) - *to++ = *from++; -} - -static void word_memcpy_fromcard(void *tp, unsigned long fp, int count) -{ - unsigned short *to = tp; - const volatile unsigned short *from = (const void *)fp; - - count++; - count /= 2; - - while (count--) - *to++ = *from++; -} - - diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c deleted file mode 100644 index 7298a34..0000000 --- a/drivers/net/ne-h8300.c +++ /dev/null @@ -1,685 +0,0 @@ -/* ne-h8300.c: A NE2000 clone on H8/300 driver for linux. */ -/* - original ne.c - Written 1992-94 by Donald Becker. - - Copyright 1993 United States Government as represented by the - Director, National Security Agency. - - This software may be used and distributed according to the terms - of the GNU General Public License, incorporated herein by reference. - - The author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403 - - H8/300 modified - Yoshinori Sato -*/ - -static const char version1[] = -"ne-h8300.c:v1.00 2004/04/11 ysato\n"; - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#define EI_SHIFT(x) (ei_local->reg_offset[x]) - -#include "8390.h" - -#define DRV_NAME "ne-h8300" - -/* Some defines that people can play with if so inclined. */ - -/* Do we perform extra sanity checks on stuff ? */ -/* #define NE_SANITY_CHECK */ - -/* Do we implement the read before write bugfix ? */ -/* #define NE_RW_BUGFIX */ - -/* Do we have a non std. amount of memory? (in units of 256 byte pages) */ -/* #define PACKETBUF_MEMSIZE 0x40 */ - -/* A zero-terminated list of I/O addresses to be probed at boot. */ - -/* ---- No user-serviceable parts below ---- */ - -static const char version[] = - "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; - -#include "lib8390.c" - -#define NE_BASE (dev->base_addr) -#define NE_CMD 0x00 -#define NE_DATAPORT (ei_status.word16?0x20:0x10) /* NatSemi-defined port window offset. */ -#define NE_RESET (ei_status.word16?0x3f:0x1f) /* Issue a read to reset, a write to clear. */ -#define NE_IO_EXTENT (ei_status.word16?0x40:0x20) - -#define NESM_START_PG 0x40 /* First page of TX buffer */ -#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ - -static int ne_probe1(struct net_device *dev, int ioaddr); - -static int ne_open(struct net_device *dev); -static int ne_close(struct net_device *dev); - -static void ne_reset_8390(struct net_device *dev); -static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, - int ring_page); -static void ne_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset); -static void ne_block_output(struct net_device *dev, const int count, - const unsigned char *buf, const int start_page); - - -static u32 reg_offset[16]; - -static int __init init_reg_offset(struct net_device *dev,unsigned long base_addr) -{ - struct ei_device *ei_local = netdev_priv(dev); - int i; - unsigned char bus_width; - - bus_width = *(volatile unsigned char *)ABWCR; - bus_width &= 1 << ((base_addr >> 21) & 7); - - for (i = 0; i < ARRAY_SIZE(reg_offset); i++) - if (bus_width == 0) - reg_offset[i] = i * 2 + 1; - else - reg_offset[i] = i; - - ei_local->reg_offset = reg_offset; - return 0; -} - -static int __initdata h8300_ne_count = 0; -#ifdef CONFIG_H8300H_H8MAX -static unsigned long __initdata h8300_ne_base[] = { 0x800600 }; -static int h8300_ne_irq[] = {EXT_IRQ4}; -#endif -#ifdef CONFIG_H8300H_AKI3068NET -static unsigned long __initdata h8300_ne_base[] = { 0x200000 }; -static int h8300_ne_irq[] = {EXT_IRQ5}; -#endif - -static inline int init_dev(struct net_device *dev) -{ - if (h8300_ne_count < ARRAY_SIZE(h8300_ne_base)) { - dev->base_addr = h8300_ne_base[h8300_ne_count]; - dev->irq = h8300_ne_irq[h8300_ne_count]; - h8300_ne_count++; - return 0; - } else - return -ENODEV; -} - -/* Probe for various non-shared-memory ethercards. - - NEx000-clone boards have a Station Address PROM (SAPROM) in the packet - buffer memory space. NE2000 clones have 0x57,0x57 in bytes 0x0e,0x0f of - the SAPROM, while other supposed NE2000 clones must be detected by their - SA prefix. - - Reading the SAPROM from a word-wide card with the 8390 set in byte-wide - mode results in doubled values, which can be detected and compensated for. - - The probe is also responsible for initializing the card and filling - in the 'dev' and 'ei_status' structures. - - We use the minimum memory size for some ethercard product lines, iff we can't - distinguish models. You can increase the packet buffer size by setting - PACKETBUF_MEMSIZE. Reported Cabletron packet buffer locations are: - E1010 starts at 0x100 and ends at 0x2000. - E1010-x starts at 0x100 and ends at 0x8000. ("-x" means "more memory") - E2010 starts at 0x100 and ends at 0x4000. - E2010-x starts at 0x100 and ends at 0xffff. */ - -static int __init do_ne_probe(struct net_device *dev) -{ - unsigned int base_addr = dev->base_addr; - - /* First check any supplied i/o locations. User knows best. */ - if (base_addr > 0x1ff) /* Check a single specified location. */ - return ne_probe1(dev, base_addr); - else if (base_addr != 0) /* Don't probe at all. */ - return -ENXIO; - - return -ENODEV; -} - -static void cleanup_card(struct net_device *dev) -{ - free_irq(dev->irq, dev); - release_region(dev->base_addr, NE_IO_EXTENT); -} - -#ifndef MODULE -struct net_device * __init ne_probe(int unit) -{ - struct net_device *dev = ____alloc_ei_netdev(0); - int err; - - if (!dev) - return ERR_PTR(-ENOMEM); - - if (init_dev(dev)) - return ERR_PTR(-ENODEV); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - - err = init_reg_offset(dev, dev->base_addr); - if (err) - goto out; - - err = do_ne_probe(dev); - if (err) - goto out; - return dev; -out: - free_netdev(dev); - return ERR_PTR(err); -} -#endif - -static const struct net_device_ops ne_netdev_ops = { - .ndo_open = ne_open, - .ndo_stop = ne_close, - - .ndo_start_xmit = __ei_start_xmit, - .ndo_tx_timeout = __ei_tx_timeout, - .ndo_get_stats = __ei_get_stats, - .ndo_set_multicast_list = __ei_set_multicast_list, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, - .ndo_change_mtu = eth_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = __ei_poll, -#endif -}; - -static int __init ne_probe1(struct net_device *dev, int ioaddr) -{ - int i; - unsigned char SA_prom[16]; - int wordlength = 2; - const char *name = NULL; - int start_page, stop_page; - int reg0, ret; - static unsigned version_printed; - struct ei_device *ei_local = netdev_priv(dev); - unsigned char bus_width; - - if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME)) - return -EBUSY; - - reg0 = inb_p(ioaddr); - if (reg0 == 0xFF) { - ret = -ENODEV; - goto err_out; - } - - /* Do a preliminary verification that we have a 8390. */ - { - int regd; - outb_p(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD); - regd = inb_p(ioaddr + EI_SHIFT(0x0d)); - outb_p(0xff, ioaddr + EI_SHIFT(0x0d)); - outb_p(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD); - inb_p(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */ - if (inb_p(ioaddr + EN0_COUNTER0) != 0) { - outb_p(reg0, ioaddr + EI_SHIFT(0)); - outb_p(regd, ioaddr + EI_SHIFT(0x0d)); /* Restore the old values. */ - ret = -ENODEV; - goto err_out; - } - } - - if (ei_debug && version_printed++ == 0) - printk(KERN_INFO "%s", version1); - - printk(KERN_INFO "NE*000 ethercard probe at %08x:", ioaddr); - - /* Read the 16 bytes of station address PROM. - We must first initialize registers, similar to NS8390_init(eifdev, 0). - We can't reliably read the SAPROM address without this. - (I learned the hard way!). */ - { - struct {unsigned char value, offset; } program_seq[] = - { - {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/ - {0x48, EN0_DCFG}, /* Set byte-wide (0x48) access. */ - {0x00, EN0_RCNTLO}, /* Clear the count regs. */ - {0x00, EN0_RCNTHI}, - {0x00, EN0_IMR}, /* Mask completion irq. */ - {0xFF, EN0_ISR}, - {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */ - {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */ - {32, EN0_RCNTLO}, - {0x00, EN0_RCNTHI}, - {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */ - {0x00, EN0_RSARHI}, - {E8390_RREAD+E8390_START, E8390_CMD}, - }; - - for (i = 0; i < ARRAY_SIZE(program_seq); i++) - outb_p(program_seq[i].value, ioaddr + program_seq[i].offset); - - } - bus_width = *(volatile unsigned char *)ABWCR; - bus_width &= 1 << ((ioaddr >> 21) & 7); - ei_status.word16 = (bus_width == 0); /* temporary setting */ - for(i = 0; i < 16 /*sizeof(SA_prom)*/; i++) { - SA_prom[i] = inb_p(ioaddr + NE_DATAPORT); - inb_p(ioaddr + NE_DATAPORT); /* dummy read */ - } - - start_page = NESM_START_PG; - stop_page = NESM_STOP_PG; - - if (bus_width) - wordlength = 1; - else - outb_p(0x49, ioaddr + EN0_DCFG); - - /* Set up the rest of the parameters. */ - name = (wordlength == 2) ? "NE2000" : "NE1000"; - - if (! dev->irq) { - printk(" failed to detect IRQ line.\n"); - ret = -EAGAIN; - goto err_out; - } - - /* Snarf the interrupt now. There's no point in waiting since we cannot - share and the board will usually be enabled. */ - ret = request_irq(dev->irq, __ei_interrupt, 0, name, dev); - if (ret) { - printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret); - goto err_out; - } - - dev->base_addr = ioaddr; - - for(i = 0; i < ETHER_ADDR_LEN; i++) - dev->dev_addr[i] = SA_prom[i]; - printk(" %pM\n", dev->dev_addr); - - printk("%s: %s found at %#x, using IRQ %d.\n", - dev->name, name, ioaddr, dev->irq); - - ei_status.name = name; - ei_status.tx_start_page = start_page; - ei_status.stop_page = stop_page; - ei_status.word16 = (wordlength == 2); - - ei_status.rx_start_page = start_page + TX_PAGES; -#ifdef PACKETBUF_MEMSIZE - /* Allow the packet buffer size to be overridden by know-it-alls. */ - ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE; -#endif - - ei_status.reset_8390 = &ne_reset_8390; - ei_status.block_input = &ne_block_input; - ei_status.block_output = &ne_block_output; - ei_status.get_8390_hdr = &ne_get_8390_hdr; - ei_status.priv = 0; - - dev->netdev_ops = &ne_netdev_ops; - - __NS8390_init(dev, 0); - - ret = register_netdev(dev); - if (ret) - goto out_irq; - return 0; -out_irq: - free_irq(dev->irq, dev); -err_out: - release_region(ioaddr, NE_IO_EXTENT); - return ret; -} - -static int ne_open(struct net_device *dev) -{ - __ei_open(dev); - return 0; -} - -static int ne_close(struct net_device *dev) -{ - if (ei_debug > 1) - printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name); - __ei_close(dev); - return 0; -} - -/* Hard reset the card. This used to pause for the same period that a - 8390 reset command required, but that shouldn't be necessary. */ - -static void ne_reset_8390(struct net_device *dev) -{ - unsigned long reset_start_time = jiffies; - struct ei_device *ei_local = netdev_priv(dev); - - if (ei_debug > 1) - printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies); - - /* DON'T change these to inb_p/outb_p or reset will fail on clones. */ - outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET); - - ei_status.txing = 0; - ei_status.dmaing = 0; - - /* This check _should_not_ be necessary, omit eventually. */ - while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) - if (time_after(jiffies, reset_start_time + 2*HZ/100)) { - printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name); - break; - } - outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */ -} - -/* Grab the 8390 specific header. Similar to the block_input routine, but - we don't need to be concerned with ring wrap as the header will be at - the start of a page, so we optimize accordingly. */ - -static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) -{ - struct ei_device *ei_local = netdev_priv(dev); - /* This *shouldn't* happen. If it does, it's the last thing you'll see */ - - if (ei_status.dmaing) - { - printk(KERN_EMERG "%s: DMAing conflict in ne_get_8390_hdr " - "[DMAstat:%d][irqlock:%d].\n", - dev->name, ei_status.dmaing, ei_status.irqlock); - return; - } - - ei_status.dmaing |= 0x01; - outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, NE_BASE + NE_CMD); - outb_p(sizeof(struct e8390_pkt_hdr), NE_BASE + EN0_RCNTLO); - outb_p(0, NE_BASE + EN0_RCNTHI); - outb_p(0, NE_BASE + EN0_RSARLO); /* On page boundary */ - outb_p(ring_page, NE_BASE + EN0_RSARHI); - outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD); - - if (ei_status.word16) { - int len; - unsigned short *p = (unsigned short *)hdr; - for (len = sizeof(struct e8390_pkt_hdr)>>1; len > 0; len--) - *p++ = inw(NE_BASE + NE_DATAPORT); - } else - insb(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)); - - outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */ - ei_status.dmaing &= ~0x01; - - le16_to_cpus(&hdr->count); -} - -/* Block input and output, similar to the Crynwr packet driver. If you - are porting to a new ethercard, look at the packet driver source for hints. - The NEx000 doesn't share the on-board packet memory -- you have to put - the packet out through the "remote DMA" dataport using outb. */ - -static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) -{ - struct ei_device *ei_local = netdev_priv(dev); -#ifdef NE_SANITY_CHECK - int xfer_count = count; -#endif - char *buf = skb->data; - - /* This *shouldn't* happen. If it does, it's the last thing you'll see */ - if (ei_status.dmaing) - { - printk(KERN_EMERG "%s: DMAing conflict in ne_block_input " - "[DMAstat:%d][irqlock:%d].\n", - dev->name, ei_status.dmaing, ei_status.irqlock); - return; - } - ei_status.dmaing |= 0x01; - outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, NE_BASE + NE_CMD); - outb_p(count & 0xff, NE_BASE + EN0_RCNTLO); - outb_p(count >> 8, NE_BASE + EN0_RCNTHI); - outb_p(ring_offset & 0xff, NE_BASE + EN0_RSARLO); - outb_p(ring_offset >> 8, NE_BASE + EN0_RSARHI); - outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD); - if (ei_status.word16) - { - int len; - unsigned short *p = (unsigned short *)buf; - for (len = count>>1; len > 0; len--) - *p++ = inw(NE_BASE + NE_DATAPORT); - if (count & 0x01) - { - buf[count-1] = inb(NE_BASE + NE_DATAPORT); -#ifdef NE_SANITY_CHECK - xfer_count++; -#endif - } - } else { - insb(NE_BASE + NE_DATAPORT, buf, count); - } - -#ifdef NE_SANITY_CHECK - /* This was for the ALPHA version only, but enough people have - been encountering problems so it is still here. If you see - this message you either 1) have a slightly incompatible clone - or 2) have noise/speed problems with your bus. */ - - if (ei_debug > 1) - { - /* DMA termination address check... */ - int addr, tries = 20; - do { - /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here - -- it's broken for Rx on some cards! */ - int high = inb_p(NE_BASE + EN0_RSARHI); - int low = inb_p(NE_BASE + EN0_RSARLO); - addr = (high << 8) + low; - if (((ring_offset + xfer_count) & 0xff) == low) - break; - } while (--tries > 0); - if (tries <= 0) - printk(KERN_WARNING "%s: RX transfer address mismatch," - "%#4.4x (expected) vs. %#4.4x (actual).\n", - dev->name, ring_offset + xfer_count, addr); - } -#endif - outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */ - ei_status.dmaing &= ~0x01; -} - -static void ne_block_output(struct net_device *dev, int count, - const unsigned char *buf, const int start_page) -{ - struct ei_device *ei_local = netdev_priv(dev); - unsigned long dma_start; -#ifdef NE_SANITY_CHECK - int retries = 0; -#endif - - /* Round the count up for word writes. Do we need to do this? - What effect will an odd byte count have on the 8390? - I should check someday. */ - - if (ei_status.word16 && (count & 0x01)) - count++; - - /* This *shouldn't* happen. If it does, it's the last thing you'll see */ - if (ei_status.dmaing) - { - printk(KERN_EMERG "%s: DMAing conflict in ne_block_output." - "[DMAstat:%d][irqlock:%d]\n", - dev->name, ei_status.dmaing, ei_status.irqlock); - return; - } - ei_status.dmaing |= 0x01; - /* We should already be in page 0, but to be safe... */ - outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, NE_BASE + NE_CMD); - -#ifdef NE_SANITY_CHECK -retry: -#endif - -#ifdef NE8390_RW_BUGFIX - /* Handle the read-before-write bug the same way as the - Crynwr packet driver -- the NatSemi method doesn't work. - Actually this doesn't always work either, but if you have - problems with your NEx000 this is better than nothing! */ - - outb_p(0x42, NE_BASE + EN0_RCNTLO); - outb_p(0x00, NE_BASE + EN0_RCNTHI); - outb_p(0x42, NE_BASE + EN0_RSARLO); - outb_p(0x00, NE_BASE + EN0_RSARHI); - outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD); - /* Make certain that the dummy read has occurred. */ - udelay(6); -#endif - - outb_p(ENISR_RDC, NE_BASE + EN0_ISR); - - /* Now the normal output. */ - outb_p(count & 0xff, NE_BASE + EN0_RCNTLO); - outb_p(count >> 8, NE_BASE + EN0_RCNTHI); - outb_p(0x00, NE_BASE + EN0_RSARLO); - outb_p(start_page, NE_BASE + EN0_RSARHI); - - outb_p(E8390_RWRITE+E8390_START, NE_BASE + NE_CMD); - if (ei_status.word16) { - int len; - unsigned short *p = (unsigned short *)buf; - for (len = count>>1; len > 0; len--) - outw(*p++, NE_BASE + NE_DATAPORT); - } else { - outsb(NE_BASE + NE_DATAPORT, buf, count); - } - - dma_start = jiffies; - -#ifdef NE_SANITY_CHECK - /* This was for the ALPHA version only, but enough people have - been encountering problems so it is still here. */ - - if (ei_debug > 1) - { - /* DMA termination address check... */ - int addr, tries = 20; - do { - int high = inb_p(NE_BASE + EN0_RSARHI); - int low = inb_p(NE_BASE + EN0_RSARLO); - addr = (high << 8) + low; - if ((start_page << 8) + count == addr) - break; - } while (--tries > 0); - - if (tries <= 0) - { - printk(KERN_WARNING "%s: Tx packet transfer address mismatch," - "%#4.4x (expected) vs. %#4.4x (actual).\n", - dev->name, (start_page << 8) + count, addr); - if (retries++ == 0) - goto retry; - } - } -#endif - - while ((inb_p(NE_BASE + EN0_ISR) & ENISR_RDC) == 0) - if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ - printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name); - ne_reset_8390(dev); - __NS8390_init(dev,1); - break; - } - - outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */ - ei_status.dmaing &= ~0x01; -} - - -#ifdef MODULE -#define MAX_NE_CARDS 1 /* Max number of NE cards per module */ -static struct net_device *dev_ne[MAX_NE_CARDS]; -static int io[MAX_NE_CARDS]; -static int irq[MAX_NE_CARDS]; -static int bad[MAX_NE_CARDS]; /* 0xbad = bad sig or no reset ack */ - -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -module_param_array(bad, int, NULL, 0); -MODULE_PARM_DESC(io, "I/O base address(es)"); -MODULE_PARM_DESC(irq, "IRQ number(s)"); -MODULE_DESCRIPTION("H8/300 NE2000 Ethernet driver"); -MODULE_LICENSE("GPL"); - -/* This is set up so that no ISA autoprobe takes place. We can't guarantee -that the ne2k probe is the last 8390 based probe to take place (as it -is at boot) and so the probe will get confused by any other 8390 cards. -ISA device autoprobes on a running machine are not recommended anyway. */ - -int init_module(void) -{ - int this_dev, found = 0; - int err; - - for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { - struct net_device *dev = ____alloc_ei_netdev(0); - if (!dev) - break; - if (io[this_dev]) { - dev->irq = irq[this_dev]; - dev->mem_end = bad[this_dev]; - dev->base_addr = io[this_dev]; - } else { - dev->base_addr = h8300_ne_base[this_dev]; - dev->irq = h8300_ne_irq[this_dev]; - } - err = init_reg_offset(dev, dev->base_addr); - if (!err) { - if (do_ne_probe(dev) == 0) { - dev_ne[found++] = dev; - continue; - } - } - free_netdev(dev); - if (found) - break; - if (io[this_dev] != 0) - printk(KERN_WARNING "ne.c: No NE*000 card found at i/o = %#x\n", dev->base_addr); - else - printk(KERN_NOTICE "ne.c: You must supply \"io=0xNNN\" value(s) for ISA cards.\n"); - return -ENXIO; - } - if (found) - return 0; - return -ENODEV; -} - -void cleanup_module(void) -{ - int this_dev; - - for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { - struct net_device *dev = dev_ne[this_dev]; - if (dev) { - unregister_netdev(dev); - cleanup_card(dev); - free_netdev(dev); - } - } -} -#endif /* MODULE */ diff --git a/drivers/net/ne.c b/drivers/net/ne.c deleted file mode 100644 index 1063093..0000000 --- a/drivers/net/ne.c +++ /dev/null @@ -1,1008 +0,0 @@ -/* ne.c: A general non-shared-memory NS8390 ethernet driver for linux. */ -/* - Written 1992-94 by Donald Becker. - - Copyright 1993 United States Government as represented by the - Director, National Security Agency. - - This software may be used and distributed according to the terms - of the GNU General Public License, incorporated herein by reference. - - The author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403 - - This driver should work with many programmed-I/O 8390-based ethernet - boards. Currently it supports the NE1000, NE2000, many clones, - and some Cabletron products. - - Changelog: - - Paul Gortmaker : use ENISR_RDC to monitor Tx PIO uploads, made - sanity checks and bad clone support optional. - Paul Gortmaker : new reset code, reset card after probe at boot. - Paul Gortmaker : multiple card support for module users. - Paul Gortmaker : Support for PCI ne2k clones, similar to lance.c - Paul Gortmaker : Allow users with bad cards to avoid full probe. - Paul Gortmaker : PCI probe changes, more PCI cards supported. - rjohnson@analogic.com : Changed init order so an interrupt will only - occur after memory is allocated for dev->priv. Deallocated memory - last in cleanup_modue() - Richard Guenther : Added support for ISAPnP cards - Paul Gortmaker : Discontinued PCI support - use ne2k-pci.c instead. - Hayato Fujiwara : Add m32r support. - -*/ - -/* Routines for the NatSemi-based designs (NE[12]000). */ - -static const char version1[] = -"ne.c:v1.10 9/23/94 Donald Becker (becker@scyld.com)\n"; -static const char version2[] = -"Last modified Nov 1, 2000 by Paul Gortmaker\n"; - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "8390.h" - -#define DRV_NAME "ne" - -/* Some defines that people can play with if so inclined. */ - -/* Do we support clones that don't adhere to 14,15 of the SAprom ? */ -#define SUPPORT_NE_BAD_CLONES -/* 0xbad = bad sig or no reset ack */ -#define BAD 0xbad - -#define MAX_NE_CARDS 4 /* Max number of NE cards per module */ -static struct platform_device *pdev_ne[MAX_NE_CARDS]; -static int io[MAX_NE_CARDS]; -static int irq[MAX_NE_CARDS]; -static int bad[MAX_NE_CARDS]; - -#ifdef MODULE -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -module_param_array(bad, int, NULL, 0); -MODULE_PARM_DESC(io, "I/O base address(es),required"); -MODULE_PARM_DESC(irq, "IRQ number(s)"); -MODULE_PARM_DESC(bad, "Accept card(s) with bad signatures"); -MODULE_DESCRIPTION("NE1000/NE2000 ISA/PnP Ethernet driver"); -MODULE_LICENSE("GPL"); -#endif /* MODULE */ - -/* Do we perform extra sanity checks on stuff ? */ -/* #define NE_SANITY_CHECK */ - -/* Do we implement the read before write bugfix ? */ -/* #define NE_RW_BUGFIX */ - -/* Do we have a non std. amount of memory? (in units of 256 byte pages) */ -/* #define PACKETBUF_MEMSIZE 0x40 */ - -/* This is set up so that no ISA autoprobe takes place. We can't guarantee -that the ne2k probe is the last 8390 based probe to take place (as it -is at boot) and so the probe will get confused by any other 8390 cards. -ISA device autoprobes on a running machine are not recommended anyway. */ -#if !defined(MODULE) && (defined(CONFIG_ISA) || defined(CONFIG_M32R)) -/* Do we need a portlist for the ISA auto-probe ? */ -#define NEEDS_PORTLIST -#endif - -/* A zero-terminated list of I/O addresses to be probed at boot. */ -#ifdef NEEDS_PORTLIST -static unsigned int netcard_portlist[] __initdata = { - 0x300, 0x280, 0x320, 0x340, 0x360, 0x380, 0 -}; -#endif - -static struct isapnp_device_id isapnp_clone_list[] __initdata = { - { ISAPNP_CARD_ID('A','X','E',0x2011), - ISAPNP_VENDOR('A','X','E'), ISAPNP_FUNCTION(0x2011), - (long) "NetGear EA201" }, - { ISAPNP_ANY_ID, ISAPNP_ANY_ID, - ISAPNP_VENDOR('E','D','I'), ISAPNP_FUNCTION(0x0216), - (long) "NN NE2000" }, - { ISAPNP_ANY_ID, ISAPNP_ANY_ID, - ISAPNP_VENDOR('P','N','P'), ISAPNP_FUNCTION(0x80d6), - (long) "Generic PNP" }, - { } /* terminate list */ -}; - -MODULE_DEVICE_TABLE(isapnp, isapnp_clone_list); - -#ifdef SUPPORT_NE_BAD_CLONES -/* A list of bad clones that we none-the-less recognize. */ -static struct { const char *name8, *name16; unsigned char SAprefix[4];} -bad_clone_list[] __initdata = { - {"DE100", "DE200", {0x00, 0xDE, 0x01,}}, - {"DE120", "DE220", {0x00, 0x80, 0xc8,}}, - {"DFI1000", "DFI2000", {'D', 'F', 'I',}}, /* Original, eh? */ - {"EtherNext UTP8", "EtherNext UTP16", {0x00, 0x00, 0x79}}, - {"NE1000","NE2000-invalid", {0x00, 0x00, 0xd8}}, /* Ancient real NE1000. */ - {"NN1000", "NN2000", {0x08, 0x03, 0x08}}, /* Outlaw no-name clone. */ - {"4-DIM8","4-DIM16", {0x00,0x00,0x4d,}}, /* Outlaw 4-Dimension cards. */ - {"Con-Intl_8", "Con-Intl_16", {0x00, 0x00, 0x24}}, /* Connect Int'nl */ - {"ET-100","ET-200", {0x00, 0x45, 0x54}}, /* YANG and YA clone */ - {"COMPEX","COMPEX16",{0x00,0x80,0x48}}, /* Broken ISA Compex cards */ - {"E-LAN100", "E-LAN200", {0x00, 0x00, 0x5d}}, /* Broken ne1000 clones */ - {"PCM-4823", "PCM-4823", {0x00, 0xc0, 0x6c}}, /* Broken Advantech MoBo */ - {"REALTEK", "RTL8019", {0x00, 0x00, 0xe8}}, /* no-name with Realtek chip */ -#ifdef CONFIG_MACH_TX49XX - {"RBHMA4X00-RTL8019", "RBHMA4X00-RTL8019", {0x00, 0x60, 0x0a}}, /* Toshiba built-in */ -#endif - {"LCS-8834", "LCS-8836", {0x04, 0x04, 0x37}}, /* ShinyNet (SET) */ - {NULL,} -}; -#endif - -/* ---- No user-serviceable parts below ---- */ - -#define NE_BASE (dev->base_addr) -#define NE_CMD 0x00 -#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */ -#define NE_RESET 0x1f /* Issue a read to reset, a write to clear. */ -#define NE_IO_EXTENT 0x20 - -#define NE1SM_START_PG 0x20 /* First page of TX buffer */ -#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */ -#define NESM_START_PG 0x40 /* First page of TX buffer */ -#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ - -#if defined(CONFIG_PLAT_MAPPI) -# define DCR_VAL 0x4b -#elif defined(CONFIG_PLAT_OAKS32R) || \ - defined(CONFIG_MACH_TX49XX) -# define DCR_VAL 0x48 /* 8-bit mode */ -#else -# define DCR_VAL 0x49 -#endif - -static int ne_probe1(struct net_device *dev, unsigned long ioaddr); -static int ne_probe_isapnp(struct net_device *dev); - -static void ne_reset_8390(struct net_device *dev); -static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, - int ring_page); -static void ne_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset); -static void ne_block_output(struct net_device *dev, const int count, - const unsigned char *buf, const int start_page); - - -/* Probe for various non-shared-memory ethercards. - - NEx000-clone boards have a Station Address PROM (SAPROM) in the packet - buffer memory space. NE2000 clones have 0x57,0x57 in bytes 0x0e,0x0f of - the SAPROM, while other supposed NE2000 clones must be detected by their - SA prefix. - - Reading the SAPROM from a word-wide card with the 8390 set in byte-wide - mode results in doubled values, which can be detected and compensated for. - - The probe is also responsible for initializing the card and filling - in the 'dev' and 'ei_status' structures. - - We use the minimum memory size for some ethercard product lines, iff we can't - distinguish models. You can increase the packet buffer size by setting - PACKETBUF_MEMSIZE. Reported Cabletron packet buffer locations are: - E1010 starts at 0x100 and ends at 0x2000. - E1010-x starts at 0x100 and ends at 0x8000. ("-x" means "more memory") - E2010 starts at 0x100 and ends at 0x4000. - E2010-x starts at 0x100 and ends at 0xffff. */ - -static int __init do_ne_probe(struct net_device *dev) -{ - unsigned long base_addr = dev->base_addr; -#ifdef NEEDS_PORTLIST - int orig_irq = dev->irq; -#endif - - /* First check any supplied i/o locations. User knows best. */ - if (base_addr > 0x1ff) { /* Check a single specified location. */ - int ret = ne_probe1(dev, base_addr); - if (ret) - printk(KERN_WARNING "ne.c: No NE*000 card found at " - "i/o = %#lx\n", base_addr); - return ret; - } - else if (base_addr != 0) /* Don't probe at all. */ - return -ENXIO; - - /* Then look for any installed ISAPnP clones */ - if (isapnp_present() && (ne_probe_isapnp(dev) == 0)) - return 0; - -#ifdef NEEDS_PORTLIST - /* Last resort. The semi-risky ISA auto-probe. */ - for (base_addr = 0; netcard_portlist[base_addr] != 0; base_addr++) { - int ioaddr = netcard_portlist[base_addr]; - dev->irq = orig_irq; - if (ne_probe1(dev, ioaddr) == 0) - return 0; - } -#endif - - return -ENODEV; -} - -static int __init ne_probe_isapnp(struct net_device *dev) -{ - int i; - - for (i = 0; isapnp_clone_list[i].vendor != 0; i++) { - struct pnp_dev *idev = NULL; - - while ((idev = pnp_find_dev(NULL, - isapnp_clone_list[i].vendor, - isapnp_clone_list[i].function, - idev))) { - /* Avoid already found cards from previous calls */ - if (pnp_device_attach(idev) < 0) - continue; - if (pnp_activate_dev(idev) < 0) { - pnp_device_detach(idev); - continue; - } - /* if no io and irq, search for next */ - if (!pnp_port_valid(idev, 0) || !pnp_irq_valid(idev, 0)) { - pnp_device_detach(idev); - continue; - } - /* found it */ - dev->base_addr = pnp_port_start(idev, 0); - dev->irq = pnp_irq(idev, 0); - printk(KERN_INFO "ne.c: ISAPnP reports %s at i/o %#lx, irq %d.\n", - (char *) isapnp_clone_list[i].driver_data, - dev->base_addr, dev->irq); - if (ne_probe1(dev, dev->base_addr) != 0) { /* Shouldn't happen. */ - printk(KERN_ERR "ne.c: Probe of ISAPnP card at %#lx failed.\n", dev->base_addr); - pnp_device_detach(idev); - return -ENXIO; - } - ei_status.priv = (unsigned long)idev; - break; - } - if (!idev) - continue; - return 0; - } - - return -ENODEV; -} - -static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr) -{ - int i; - unsigned char SA_prom[32]; - int wordlength = 2; - const char *name = NULL; - int start_page, stop_page; - int neX000, ctron, copam, bad_card; - int reg0, ret; - static unsigned version_printed; - - if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME)) - return -EBUSY; - - reg0 = inb_p(ioaddr); - if (reg0 == 0xFF) { - ret = -ENODEV; - goto err_out; - } - - /* Do a preliminary verification that we have a 8390. */ - { - int regd; - outb_p(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD); - regd = inb_p(ioaddr + 0x0d); - outb_p(0xff, ioaddr + 0x0d); - outb_p(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD); - inb_p(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */ - if (inb_p(ioaddr + EN0_COUNTER0) != 0) { - outb_p(reg0, ioaddr); - outb_p(regd, ioaddr + 0x0d); /* Restore the old values. */ - ret = -ENODEV; - goto err_out; - } - } - - if (ei_debug && version_printed++ == 0) - printk(KERN_INFO "%s%s", version1, version2); - - printk(KERN_INFO "NE*000 ethercard probe at %#3lx:", ioaddr); - - /* A user with a poor card that fails to ack the reset, or that - does not have a valid 0x57,0x57 signature can still use this - without having to recompile. Specifying an i/o address along - with an otherwise unused dev->mem_end value of "0xBAD" will - cause the driver to skip these parts of the probe. */ - - bad_card = ((dev->base_addr != 0) && (dev->mem_end == BAD)); - - /* Reset card. Who knows what dain-bramaged state it was left in. */ - - { - unsigned long reset_start_time = jiffies; - - /* DON'T change these to inb_p/outb_p or reset will fail on clones. */ - outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET); - - while ((inb_p(ioaddr + EN0_ISR) & ENISR_RESET) == 0) - if (time_after(jiffies, reset_start_time + 2*HZ/100)) { - if (bad_card) { - printk(" (warning: no reset ack)"); - break; - } else { - printk(" not found (no reset ack).\n"); - ret = -ENODEV; - goto err_out; - } - } - - outb_p(0xff, ioaddr + EN0_ISR); /* Ack all intr. */ - } - - /* Read the 16 bytes of station address PROM. - We must first initialize registers, similar to NS8390p_init(eifdev, 0). - We can't reliably read the SAPROM address without this. - (I learned the hard way!). */ - { - struct {unsigned char value, offset; } program_seq[] = - { - {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/ - {0x48, EN0_DCFG}, /* Set byte-wide (0x48) access. */ - {0x00, EN0_RCNTLO}, /* Clear the count regs. */ - {0x00, EN0_RCNTHI}, - {0x00, EN0_IMR}, /* Mask completion irq. */ - {0xFF, EN0_ISR}, - {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */ - {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */ - {32, EN0_RCNTLO}, - {0x00, EN0_RCNTHI}, - {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */ - {0x00, EN0_RSARHI}, - {E8390_RREAD+E8390_START, E8390_CMD}, - }; - - for (i = 0; i < ARRAY_SIZE(program_seq); i++) - outb_p(program_seq[i].value, ioaddr + program_seq[i].offset); - - } - for(i = 0; i < 32 /*sizeof(SA_prom)*/; i+=2) { - SA_prom[i] = inb(ioaddr + NE_DATAPORT); - SA_prom[i+1] = inb(ioaddr + NE_DATAPORT); - if (SA_prom[i] != SA_prom[i+1]) - wordlength = 1; - } - - if (wordlength == 2) - { - for (i = 0; i < 16; i++) - SA_prom[i] = SA_prom[i+i]; - /* We must set the 8390 for word mode. */ - outb_p(DCR_VAL, ioaddr + EN0_DCFG); - start_page = NESM_START_PG; - - /* - * Realtek RTL8019AS datasheet says that the PSTOP register - * shouldn't exceed 0x60 in 8-bit mode. - * This chip can be identified by reading the signature from - * the remote byte count registers (otherwise write-only)... - */ - if ((DCR_VAL & 0x01) == 0 && /* 8-bit mode */ - inb(ioaddr + EN0_RCNTLO) == 0x50 && - inb(ioaddr + EN0_RCNTHI) == 0x70) - stop_page = 0x60; - else - stop_page = NESM_STOP_PG; - } else { - start_page = NE1SM_START_PG; - stop_page = NE1SM_STOP_PG; - } - -#if defined(CONFIG_PLAT_MAPPI) || defined(CONFIG_PLAT_OAKS32R) - neX000 = ((SA_prom[14] == 0x57 && SA_prom[15] == 0x57) - || (SA_prom[14] == 0x42 && SA_prom[15] == 0x42)); -#else - neX000 = (SA_prom[14] == 0x57 && SA_prom[15] == 0x57); -#endif - ctron = (SA_prom[0] == 0x00 && SA_prom[1] == 0x00 && SA_prom[2] == 0x1d); - copam = (SA_prom[14] == 0x49 && SA_prom[15] == 0x00); - - /* Set up the rest of the parameters. */ - if (neX000 || bad_card || copam) { - name = (wordlength == 2) ? "NE2000" : "NE1000"; - } - else if (ctron) - { - name = (wordlength == 2) ? "Ctron-8" : "Ctron-16"; - start_page = 0x01; - stop_page = (wordlength == 2) ? 0x40 : 0x20; - } - else - { -#ifdef SUPPORT_NE_BAD_CLONES - /* Ack! Well, there might be a *bad* NE*000 clone there. - Check for total bogus addresses. */ - for (i = 0; bad_clone_list[i].name8; i++) - { - if (SA_prom[0] == bad_clone_list[i].SAprefix[0] && - SA_prom[1] == bad_clone_list[i].SAprefix[1] && - SA_prom[2] == bad_clone_list[i].SAprefix[2]) - { - if (wordlength == 2) - { - name = bad_clone_list[i].name16; - } else { - name = bad_clone_list[i].name8; - } - break; - } - } - if (bad_clone_list[i].name8 == NULL) - { - printk(" not found (invalid signature %2.2x %2.2x).\n", - SA_prom[14], SA_prom[15]); - ret = -ENXIO; - goto err_out; - } -#else - printk(" not found.\n"); - ret = -ENXIO; - goto err_out; -#endif - } - - if (dev->irq < 2) - { - unsigned long cookie = probe_irq_on(); - outb_p(0x50, ioaddr + EN0_IMR); /* Enable one interrupt. */ - outb_p(0x00, ioaddr + EN0_RCNTLO); - outb_p(0x00, ioaddr + EN0_RCNTHI); - outb_p(E8390_RREAD+E8390_START, ioaddr); /* Trigger it... */ - mdelay(10); /* wait 10ms for interrupt to propagate */ - outb_p(0x00, ioaddr + EN0_IMR); /* Mask it again. */ - dev->irq = probe_irq_off(cookie); - if (ei_debug > 2) - printk(" autoirq is %d\n", dev->irq); - } else if (dev->irq == 2) - /* Fixup for users that don't know that IRQ 2 is really IRQ 9, - or don't know which one to set. */ - dev->irq = 9; - - if (! dev->irq) { - printk(" failed to detect IRQ line.\n"); - ret = -EAGAIN; - goto err_out; - } - - /* Snarf the interrupt now. There's no point in waiting since we cannot - share and the board will usually be enabled. */ - ret = request_irq(dev->irq, eip_interrupt, 0, name, dev); - if (ret) { - printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret); - goto err_out; - } - - dev->base_addr = ioaddr; - -#ifdef CONFIG_PLAT_MAPPI - outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, - ioaddr + E8390_CMD); /* 0x61 */ - for (i = 0 ; i < ETHER_ADDR_LEN ; i++) { - dev->dev_addr[i] = SA_prom[i] - = inb_p(ioaddr + EN1_PHYS_SHIFT(i)); - } -#else - for(i = 0; i < ETHER_ADDR_LEN; i++) { - dev->dev_addr[i] = SA_prom[i]; - } -#endif - - printk("%pM\n", dev->dev_addr); - - ei_status.name = name; - ei_status.tx_start_page = start_page; - ei_status.stop_page = stop_page; - - /* Use 16-bit mode only if this wasn't overridden by DCR_VAL */ - ei_status.word16 = (wordlength == 2 && (DCR_VAL & 0x01)); - - ei_status.rx_start_page = start_page + TX_PAGES; -#ifdef PACKETBUF_MEMSIZE - /* Allow the packet buffer size to be overridden by know-it-alls. */ - ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE; -#endif - - ei_status.reset_8390 = &ne_reset_8390; - ei_status.block_input = &ne_block_input; - ei_status.block_output = &ne_block_output; - ei_status.get_8390_hdr = &ne_get_8390_hdr; - ei_status.priv = 0; - - dev->netdev_ops = &eip_netdev_ops; - NS8390p_init(dev, 0); - - ret = register_netdev(dev); - if (ret) - goto out_irq; - printk(KERN_INFO "%s: %s found at %#lx, using IRQ %d.\n", - dev->name, name, ioaddr, dev->irq); - return 0; - -out_irq: - free_irq(dev->irq, dev); -err_out: - release_region(ioaddr, NE_IO_EXTENT); - return ret; -} - -/* Hard reset the card. This used to pause for the same period that a - 8390 reset command required, but that shouldn't be necessary. */ - -static void ne_reset_8390(struct net_device *dev) -{ - unsigned long reset_start_time = jiffies; - - if (ei_debug > 1) - printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies); - - /* DON'T change these to inb_p/outb_p or reset will fail on clones. */ - outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET); - - ei_status.txing = 0; - ei_status.dmaing = 0; - - /* This check _should_not_ be necessary, omit eventually. */ - while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) - if (time_after(jiffies, reset_start_time + 2*HZ/100)) { - printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name); - break; - } - outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */ -} - -/* Grab the 8390 specific header. Similar to the block_input routine, but - we don't need to be concerned with ring wrap as the header will be at - the start of a page, so we optimize accordingly. */ - -static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) -{ - int nic_base = dev->base_addr; - - /* This *shouldn't* happen. If it does, it's the last thing you'll see */ - - if (ei_status.dmaing) - { - printk(KERN_EMERG "%s: DMAing conflict in ne_get_8390_hdr " - "[DMAstat:%d][irqlock:%d].\n", - dev->name, ei_status.dmaing, ei_status.irqlock); - return; - } - - ei_status.dmaing |= 0x01; - outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); - outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO); - outb_p(0, nic_base + EN0_RCNTHI); - outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */ - outb_p(ring_page, nic_base + EN0_RSARHI); - outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD); - - if (ei_status.word16) - insw(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1); - else - insb(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)); - - outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ - ei_status.dmaing &= ~0x01; - - le16_to_cpus(&hdr->count); -} - -/* Block input and output, similar to the Crynwr packet driver. If you - are porting to a new ethercard, look at the packet driver source for hints. - The NEx000 doesn't share the on-board packet memory -- you have to put - the packet out through the "remote DMA" dataport using outb. */ - -static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) -{ -#ifdef NE_SANITY_CHECK - int xfer_count = count; -#endif - int nic_base = dev->base_addr; - char *buf = skb->data; - - /* This *shouldn't* happen. If it does, it's the last thing you'll see */ - if (ei_status.dmaing) - { - printk(KERN_EMERG "%s: DMAing conflict in ne_block_input " - "[DMAstat:%d][irqlock:%d].\n", - dev->name, ei_status.dmaing, ei_status.irqlock); - return; - } - ei_status.dmaing |= 0x01; - outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); - outb_p(count & 0xff, nic_base + EN0_RCNTLO); - outb_p(count >> 8, nic_base + EN0_RCNTHI); - outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO); - outb_p(ring_offset >> 8, nic_base + EN0_RSARHI); - outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD); - if (ei_status.word16) - { - insw(NE_BASE + NE_DATAPORT,buf,count>>1); - if (count & 0x01) - { - buf[count-1] = inb(NE_BASE + NE_DATAPORT); -#ifdef NE_SANITY_CHECK - xfer_count++; -#endif - } - } else { - insb(NE_BASE + NE_DATAPORT, buf, count); - } - -#ifdef NE_SANITY_CHECK - /* This was for the ALPHA version only, but enough people have - been encountering problems so it is still here. If you see - this message you either 1) have a slightly incompatible clone - or 2) have noise/speed problems with your bus. */ - - if (ei_debug > 1) - { - /* DMA termination address check... */ - int addr, tries = 20; - do { - /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here - -- it's broken for Rx on some cards! */ - int high = inb_p(nic_base + EN0_RSARHI); - int low = inb_p(nic_base + EN0_RSARLO); - addr = (high << 8) + low; - if (((ring_offset + xfer_count) & 0xff) == low) - break; - } while (--tries > 0); - if (tries <= 0) - printk(KERN_WARNING "%s: RX transfer address mismatch," - "%#4.4x (expected) vs. %#4.4x (actual).\n", - dev->name, ring_offset + xfer_count, addr); - } -#endif - outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ - ei_status.dmaing &= ~0x01; -} - -static void ne_block_output(struct net_device *dev, int count, - const unsigned char *buf, const int start_page) -{ - int nic_base = NE_BASE; - unsigned long dma_start; -#ifdef NE_SANITY_CHECK - int retries = 0; -#endif - - /* Round the count up for word writes. Do we need to do this? - What effect will an odd byte count have on the 8390? - I should check someday. */ - - if (ei_status.word16 && (count & 0x01)) - count++; - - /* This *shouldn't* happen. If it does, it's the last thing you'll see */ - if (ei_status.dmaing) - { - printk(KERN_EMERG "%s: DMAing conflict in ne_block_output." - "[DMAstat:%d][irqlock:%d]\n", - dev->name, ei_status.dmaing, ei_status.irqlock); - return; - } - ei_status.dmaing |= 0x01; - /* We should already be in page 0, but to be safe... */ - outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD); - -#ifdef NE_SANITY_CHECK -retry: -#endif - -#ifdef NE8390_RW_BUGFIX - /* Handle the read-before-write bug the same way as the - Crynwr packet driver -- the NatSemi method doesn't work. - Actually this doesn't always work either, but if you have - problems with your NEx000 this is better than nothing! */ - - outb_p(0x42, nic_base + EN0_RCNTLO); - outb_p(0x00, nic_base + EN0_RCNTHI); - outb_p(0x42, nic_base + EN0_RSARLO); - outb_p(0x00, nic_base + EN0_RSARHI); - outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD); - /* Make certain that the dummy read has occurred. */ - udelay(6); -#endif - - outb_p(ENISR_RDC, nic_base + EN0_ISR); - - /* Now the normal output. */ - outb_p(count & 0xff, nic_base + EN0_RCNTLO); - outb_p(count >> 8, nic_base + EN0_RCNTHI); - outb_p(0x00, nic_base + EN0_RSARLO); - outb_p(start_page, nic_base + EN0_RSARHI); - - outb_p(E8390_RWRITE+E8390_START, nic_base + NE_CMD); - if (ei_status.word16) { - outsw(NE_BASE + NE_DATAPORT, buf, count>>1); - } else { - outsb(NE_BASE + NE_DATAPORT, buf, count); - } - - dma_start = jiffies; - -#ifdef NE_SANITY_CHECK - /* This was for the ALPHA version only, but enough people have - been encountering problems so it is still here. */ - - if (ei_debug > 1) - { - /* DMA termination address check... */ - int addr, tries = 20; - do { - int high = inb_p(nic_base + EN0_RSARHI); - int low = inb_p(nic_base + EN0_RSARLO); - addr = (high << 8) + low; - if ((start_page << 8) + count == addr) - break; - } while (--tries > 0); - - if (tries <= 0) - { - printk(KERN_WARNING "%s: Tx packet transfer address mismatch," - "%#4.4x (expected) vs. %#4.4x (actual).\n", - dev->name, (start_page << 8) + count, addr); - if (retries++ == 0) - goto retry; - } - } -#endif - - while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0) - if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ - printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name); - ne_reset_8390(dev); - NS8390p_init(dev, 1); - break; - } - - outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ - ei_status.dmaing &= ~0x01; -} - -static int __init ne_drv_probe(struct platform_device *pdev) -{ - struct net_device *dev; - int err, this_dev = pdev->id; - struct resource *res; - - dev = alloc_eip_netdev(); - if (!dev) - return -ENOMEM; - - /* ne.c doesn't populate resources in platform_device, but - * rbtx4927_ne_init and rbtx4938_ne_init do register devices - * with resources. - */ - res = platform_get_resource(pdev, IORESOURCE_IO, 0); - if (res) { - dev->base_addr = res->start; - dev->irq = platform_get_irq(pdev, 0); - } else { - if (this_dev < 0 || this_dev >= MAX_NE_CARDS) { - free_netdev(dev); - return -EINVAL; - } - dev->base_addr = io[this_dev]; - dev->irq = irq[this_dev]; - dev->mem_end = bad[this_dev]; - } - err = do_ne_probe(dev); - if (err) { - free_netdev(dev); - return err; - } - platform_set_drvdata(pdev, dev); - - /* Update with any values found by probing, don't update if - * resources were specified. - */ - if (!res) { - io[this_dev] = dev->base_addr; - irq[this_dev] = dev->irq; - } - return 0; -} - -static int ne_drv_remove(struct platform_device *pdev) -{ - struct net_device *dev = platform_get_drvdata(pdev); - - if (dev) { - struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv; - netif_device_detach(dev); - unregister_netdev(dev); - if (idev) - pnp_device_detach(idev); - /* Careful ne_drv_remove can be called twice, once from - * the platform_driver.remove and again when the - * platform_device is being removed. - */ - ei_status.priv = 0; - free_irq(dev->irq, dev); - release_region(dev->base_addr, NE_IO_EXTENT); - free_netdev(dev); - platform_set_drvdata(pdev, NULL); - } - return 0; -} - -/* Remove unused devices or all if true. */ -static void ne_loop_rm_unreg(int all) -{ - int this_dev; - struct platform_device *pdev; - for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { - pdev = pdev_ne[this_dev]; - /* No network device == unused */ - if (pdev && (!platform_get_drvdata(pdev) || all)) { - ne_drv_remove(pdev); - platform_device_unregister(pdev); - pdev_ne[this_dev] = NULL; - } - } -} - -#ifdef CONFIG_PM -static int ne_drv_suspend(struct platform_device *pdev, pm_message_t state) -{ - struct net_device *dev = platform_get_drvdata(pdev); - - if (netif_running(dev)) { - struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv; - netif_device_detach(dev); - if (idev) - pnp_stop_dev(idev); - } - return 0; -} - -static int ne_drv_resume(struct platform_device *pdev) -{ - struct net_device *dev = platform_get_drvdata(pdev); - - if (netif_running(dev)) { - struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv; - if (idev) - pnp_start_dev(idev); - ne_reset_8390(dev); - NS8390p_init(dev, 1); - netif_device_attach(dev); - } - return 0; -} -#else -#define ne_drv_suspend NULL -#define ne_drv_resume NULL -#endif - -static struct platform_driver ne_driver = { - .remove = ne_drv_remove, - .suspend = ne_drv_suspend, - .resume = ne_drv_resume, - .driver = { - .name = DRV_NAME, - .owner = THIS_MODULE, - }, -}; - -static void __init ne_add_devices(void) -{ - int this_dev; - struct platform_device *pdev; - - for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { - if (pdev_ne[this_dev]) - continue; - pdev = platform_device_register_simple( - DRV_NAME, this_dev, NULL, 0); - if (IS_ERR(pdev)) - continue; - pdev_ne[this_dev] = pdev; - } -} - -#ifdef MODULE -int __init init_module(void) -{ - int retval; - ne_add_devices(); - retval = platform_driver_probe(&ne_driver, ne_drv_probe); - if (retval) { - if (io[0] == 0) - printk(KERN_NOTICE "ne.c: You must supply \"io=0xNNN\"" - " value(s) for ISA cards.\n"); - ne_loop_rm_unreg(1); - return retval; - } - - /* Unregister unused platform_devices. */ - ne_loop_rm_unreg(0); - return retval; -} -#else /* MODULE */ -static int __init ne_init(void) -{ - int retval = platform_driver_probe(&ne_driver, ne_drv_probe); - - /* Unregister unused platform_devices. */ - ne_loop_rm_unreg(0); - return retval; -} -module_init(ne_init); - -struct net_device * __init ne_probe(int unit) -{ - int this_dev; - struct net_device *dev; - - /* Find an empty slot, that is no net_device and zero io port. */ - this_dev = 0; - while ((pdev_ne[this_dev] && platform_get_drvdata(pdev_ne[this_dev])) || - io[this_dev]) { - if (++this_dev == MAX_NE_CARDS) - return ERR_PTR(-ENOMEM); - } - - /* Get irq, io from kernel command line */ - dev = alloc_eip_netdev(); - if (!dev) - return ERR_PTR(-ENOMEM); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - - io[this_dev] = dev->base_addr; - irq[this_dev] = dev->irq; - bad[this_dev] = dev->mem_end; - - free_netdev(dev); - - ne_add_devices(); - - /* return the first device found */ - for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { - if (pdev_ne[this_dev]) { - dev = platform_get_drvdata(pdev_ne[this_dev]); - if (dev) - return dev; - } - } - - return ERR_PTR(-ENODEV); -} -#endif /* MODULE */ - -static void __exit ne_exit(void) -{ - platform_driver_unregister(&ne_driver); - ne_loop_rm_unreg(1); -} -module_exit(ne_exit); diff --git a/drivers/net/ne2.c b/drivers/net/ne2.c deleted file mode 100644 index 70cdc69..0000000 --- a/drivers/net/ne2.c +++ /dev/null @@ -1,799 +0,0 @@ -/* ne2.c: A NE/2 Ethernet Driver for Linux. */ -/* - Based on the NE2000 driver written by Donald Becker (1992-94). - modified by Wim Dumon (Apr 1996) - - This software may be used and distributed according to the terms - of the GNU General Public License, incorporated herein by reference. - - The author may be reached as wimpie@linux.cc.kuleuven.ac.be - - Currently supported: NE/2 - This patch was never tested on other MCA-ethernet adapters, but it - might work. Just give it a try and let me know if you have problems. - Also mail me if it really works, please! - - Changelog: - Mon Feb 3 16:26:02 MET 1997 - - adapted the driver to work with the 2.1.25 kernel - - multiple ne2 support (untested) - - module support (untested) - - Fri Aug 28 00:18:36 CET 1998 (David Weinehall) - - fixed a few minor typos - - made the MODULE_PARM conditional (it only works with the v2.1.x kernels) - - fixed the module support (Now it's working...) - - Mon Sep 7 19:01:44 CET 1998 (David Weinehall) - - added support for Arco Electronics AE/2-card (experimental) - - Mon Sep 14 09:53:42 CET 1998 (David Weinehall) - - added support for Compex ENET-16MC/P (experimental) - - Tue Sep 15 16:21:12 CET 1998 (David Weinehall, Magnus Jonsson, Tomas Ogren) - - Miscellaneous bugfixes - - Tue Sep 19 16:21:12 CET 1998 (Magnus Jonsson) - - Cleanup - - Wed Sep 23 14:33:34 CET 1998 (David Weinehall) - - Restructuring and rewriting for v2.1.x compliance - - Wed Oct 14 17:19:21 CET 1998 (David Weinehall) - - Added code that unregisters irq and proc-info - - Version# bump - - Mon Nov 16 15:28:23 CET 1998 (Wim Dumon) - - pass 'dev' as last parameter of request_irq in stead of 'NULL' - - Wed Feb 7 21:24:00 CET 2001 (Alfred Arnold) - - added support for the D-Link DE-320CT - - * WARNING - ------- - This is alpha-test software. It is not guaranteed to work. As a - matter of fact, I'm quite sure there are *LOTS* of bugs in here. I - would like to hear from you if you use this driver, even if it works. - If it doesn't work, be sure to send me a mail with the problems ! -*/ - -static const char *version = "ne2.c:v0.91 Nov 16 1998 Wim Dumon \n"; - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "8390.h" - -#define DRV_NAME "ne2" - -/* Some defines that people can play with if so inclined. */ - -/* Do we perform extra sanity checks on stuff ? */ -/* #define NE_SANITY_CHECK */ - -/* Do we implement the read before write bugfix ? */ -/* #define NE_RW_BUGFIX */ - -/* Do we have a non std. amount of memory? (in units of 256 byte pages) */ -/* #define PACKETBUF_MEMSIZE 0x40 */ - - -/* ---- No user-serviceable parts below ---- */ - -#define NE_BASE (dev->base_addr) -#define NE_CMD 0x00 -#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */ -#define NE_RESET 0x20 /* Issue a read to reset, a write to clear. */ -#define NE_IO_EXTENT 0x30 - -#define NE1SM_START_PG 0x20 /* First page of TX buffer */ -#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */ -#define NESM_START_PG 0x40 /* First page of TX buffer */ -#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ - -/* From the .ADF file: */ -static unsigned int addresses[7] __initdata = - {0x1000, 0x2020, 0x8020, 0xa0a0, 0xb0b0, 0xc0c0, 0xc3d0}; -static int irqs[4] __initdata = {3, 4, 5, 9}; - -/* From the D-Link ADF file: */ -static unsigned int dlink_addresses[4] __initdata = - {0x300, 0x320, 0x340, 0x360}; -static int dlink_irqs[8] __initdata = {3, 4, 5, 9, 10, 11, 14, 15}; - -struct ne2_adapters_t { - unsigned int id; - char *name; -}; - -static struct ne2_adapters_t ne2_adapters[] __initdata = { - { 0x6354, "Arco Ethernet Adapter AE/2" }, - { 0x70DE, "Compex ENET-16 MC/P" }, - { 0x7154, "Novell Ethernet Adapter NE/2" }, - { 0x56ea, "D-Link DE-320CT" }, - { 0x0000, NULL } -}; - -extern int netcard_probe(struct net_device *dev); - -static int ne2_probe1(struct net_device *dev, int slot); - -static void ne_reset_8390(struct net_device *dev); -static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, - int ring_page); -static void ne_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset); -static void ne_block_output(struct net_device *dev, const int count, - const unsigned char *buf, const int start_page); - - -/* - * special code to read the DE-320's MAC address EEPROM. In contrast to a - * standard NE design, this is a serial EEPROM (93C46) that has to be read - * bit by bit. The EEPROM cotrol port at base + 0x1e has the following - * layout: - * - * Bit 0 = Data out (read from EEPROM) - * Bit 1 = Data in (write to EEPROM) - * Bit 2 = Clock - * Bit 3 = Chip Select - * Bit 7 = ~50 kHz clock for defined delays - * - */ - -static void __init dlink_put_eeprom(unsigned char value, unsigned int addr) -{ - int z; - unsigned char v1, v2; - - /* write the value to the NIC EEPROM register */ - - outb(value, addr + 0x1e); - - /* now wait the clock line to toggle twice. Effectively, we are - waiting (at least) for one clock cycle */ - - for (z = 0; z < 2; z++) { - do { - v1 = inb(addr + 0x1e); - v2 = inb(addr + 0x1e); - } - while (!((v1 ^ v2) & 0x80)); - } -} - -static void __init dlink_send_eeprom_bit(unsigned int bit, unsigned int addr) -{ - /* shift data bit into correct position */ - - bit = bit << 1; - - /* write value, keep clock line high for two cycles */ - - dlink_put_eeprom(0x09 | bit, addr); - dlink_put_eeprom(0x0d | bit, addr); - dlink_put_eeprom(0x0d | bit, addr); - dlink_put_eeprom(0x09 | bit, addr); -} - -static void __init dlink_send_eeprom_word(unsigned int value, unsigned int len, unsigned int addr) -{ - int z; - - /* adjust bits so that they are left-aligned in a 16-bit-word */ - - value = value << (16 - len); - - /* shift bits out to the EEPROM */ - - for (z = 0; z < len; z++) { - dlink_send_eeprom_bit((value & 0x8000) >> 15, addr); - value = value << 1; - } -} - -static unsigned int __init dlink_get_eeprom(unsigned int eeaddr, unsigned int addr) -{ - int z; - unsigned int value = 0; - - /* pull the CS line low for a moment. This resets the EEPROM- - internal logic, and makes it ready for a new command. */ - - dlink_put_eeprom(0x01, addr); - dlink_put_eeprom(0x09, addr); - - /* send one start bit, read command (1 - 0), plus the address to - the EEPROM */ - - dlink_send_eeprom_word(0x0180 | (eeaddr & 0x3f), 9, addr); - - /* get the data word. We clock by sending 0s to the EEPROM, which - get ignored during the read process */ - - for (z = 0; z < 16; z++) { - dlink_send_eeprom_bit(0, addr); - value = (value << 1) | (inb(addr + 0x1e) & 0x01); - } - - return value; -} - -/* - * Note that at boot, this probe only picks up one card at a time. - */ - -static int __init do_ne2_probe(struct net_device *dev) -{ - static int current_mca_slot = -1; - int i; - int adapter_found = 0; - - /* Do not check any supplied i/o locations. - POS registers usually don't fail :) */ - - /* MCA cards have POS registers. - Autodetecting MCA cards is extremely simple. - Just search for the card. */ - - for(i = 0; (ne2_adapters[i].name != NULL) && !adapter_found; i++) { - current_mca_slot = - mca_find_unused_adapter(ne2_adapters[i].id, 0); - - if((current_mca_slot != MCA_NOTFOUND) && !adapter_found) { - int res; - mca_set_adapter_name(current_mca_slot, - ne2_adapters[i].name); - mca_mark_as_used(current_mca_slot); - - res = ne2_probe1(dev, current_mca_slot); - if (res) - mca_mark_as_unused(current_mca_slot); - return res; - } - } - return -ENODEV; -} - -#ifndef MODULE -struct net_device * __init ne2_probe(int unit) -{ - struct net_device *dev = alloc_eip_netdev(); - int err; - - if (!dev) - return ERR_PTR(-ENOMEM); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - - err = do_ne2_probe(dev); - if (err) - goto out; - return dev; -out: - free_netdev(dev); - return ERR_PTR(err); -} -#endif - -static int ne2_procinfo(char *buf, int slot, struct net_device *dev) -{ - int len=0; - - len += sprintf(buf+len, "The NE/2 Ethernet Adapter\n" ); - len += sprintf(buf+len, "Driver written by Wim Dumon "); - len += sprintf(buf+len, "\n"); - len += sprintf(buf+len, "Modified by "); - len += sprintf(buf+len, "David Weinehall \n"); - len += sprintf(buf+len, "and by Magnus Jonsson \n"); - len += sprintf(buf+len, "Based on the original NE2000 drivers\n" ); - len += sprintf(buf+len, "Base IO: %#x\n", (unsigned int)dev->base_addr); - len += sprintf(buf+len, "IRQ : %d\n", dev->irq); - len += sprintf(buf+len, "HW addr : %pM\n", dev->dev_addr); - - return len; -} - -static int __init ne2_probe1(struct net_device *dev, int slot) -{ - int i, base_addr, irq, retval; - unsigned char POS; - unsigned char SA_prom[32]; - const char *name = "NE/2"; - int start_page, stop_page; - static unsigned version_printed; - - if (ei_debug && version_printed++ == 0) - printk(version); - - printk("NE/2 ethercard found in slot %d:", slot); - - /* Read base IO and IRQ from the POS-registers */ - POS = mca_read_stored_pos(slot, 2); - if(!(POS % 2)) { - printk(" disabled.\n"); - return -ENODEV; - } - - /* handle different POS register structure for D-Link card */ - - if (mca_read_stored_pos(slot, 0) == 0xea) { - base_addr = dlink_addresses[(POS >> 5) & 0x03]; - irq = dlink_irqs[(POS >> 2) & 0x07]; - } - else { - i = (POS & 0xE)>>1; - /* printk("Halleluja sdog, als er na de pijl een 1 staat is 1 - 1 == 0" - " en zou het moeten werken -> %d\n", i); - The above line was for remote testing, thanx to sdog ... */ - base_addr = addresses[i - 1]; - irq = irqs[(POS & 0x60)>>5]; - } - - if (!request_region(base_addr, NE_IO_EXTENT, DRV_NAME)) - return -EBUSY; - -#ifdef DEBUG - printk("POS info : pos 2 = %#x ; base = %#x ; irq = %ld\n", POS, - base_addr, irq); -#endif - -#ifndef CRYNWR_WAY - /* Reset the card the way they do it in the Crynwr packet driver */ - for (i=0; i<8; i++) - outb(0x0, base_addr + NE_RESET); - inb(base_addr + NE_RESET); - outb(0x21, base_addr + NE_CMD); - if (inb(base_addr + NE_CMD) != 0x21) { - printk("NE/2 adapter not responding\n"); - retval = -ENODEV; - goto out; - } - - /* In the crynwr sources they do a RAM-test here. I skip it. I suppose - my RAM is okay. Suppose your memory is broken. Then this test - should fail and you won't be able to use your card. But if I do not - test, you won't be able to use your card, neither. So this test - won't help you. */ - -#else /* _I_ never tested it this way .. Go ahead and try ...*/ - /* Reset card. Who knows what dain-bramaged state it was left in. */ - { - unsigned long reset_start_time = jiffies; - - /* DON'T change these to inb_p/outb_p or reset will fail on - clones.. */ - outb(inb(base_addr + NE_RESET), base_addr + NE_RESET); - - while ((inb_p(base_addr + EN0_ISR) & ENISR_RESET) == 0) - if (time_after(jiffies, reset_start_time + 2*HZ/100)) { - printk(" not found (no reset ack).\n"); - retval = -ENODEV; - goto out; - } - - outb_p(0xff, base_addr + EN0_ISR); /* Ack all intr. */ - } -#endif - - - /* Read the 16 bytes of station address PROM. - We must first initialize registers, similar to - NS8390p_init(eifdev, 0). - We can't reliably read the SAPROM address without this. - (I learned the hard way!). */ - { - struct { - unsigned char value, offset; - } program_seq[] = { - /* Select page 0 */ - {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, - {0x49, EN0_DCFG}, /* Set WORD-wide (0x49) access. */ - {0x00, EN0_RCNTLO}, /* Clear the count regs. */ - {0x00, EN0_RCNTHI}, - {0x00, EN0_IMR}, /* Mask completion irq. */ - {0xFF, EN0_ISR}, - {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */ - {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */ - {32, EN0_RCNTLO}, - {0x00, EN0_RCNTHI}, - {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */ - {0x00, EN0_RSARHI}, - {E8390_RREAD+E8390_START, E8390_CMD}, - }; - - for (i = 0; i < ARRAY_SIZE(program_seq); i++) - outb_p(program_seq[i].value, base_addr + - program_seq[i].offset); - - } - for(i = 0; i < 6 /*sizeof(SA_prom)*/; i+=1) { - SA_prom[i] = inb(base_addr + NE_DATAPORT); - } - - /* I don't know whether the previous sequence includes the general - board reset procedure, so better don't omit it and just overwrite - the garbage read from a DE-320 with correct stuff. */ - - if (mca_read_stored_pos(slot, 0) == 0xea) { - unsigned int v; - - for (i = 0; i < 3; i++) { - v = dlink_get_eeprom(i, base_addr); - SA_prom[(i << 1) ] = v & 0xff; - SA_prom[(i << 1) + 1] = (v >> 8) & 0xff; - } - } - - start_page = NESM_START_PG; - stop_page = NESM_STOP_PG; - - dev->irq=irq; - - /* Snarf the interrupt now. There's no point in waiting since we cannot - share and the board will usually be enabled. */ - retval = request_irq(dev->irq, eip_interrupt, 0, DRV_NAME, dev); - if (retval) { - printk (" unable to get IRQ %d (irqval=%d).\n", - dev->irq, retval); - goto out; - } - - dev->base_addr = base_addr; - - for(i = 0; i < ETHER_ADDR_LEN; i++) - dev->dev_addr[i] = SA_prom[i]; - - printk(" %pM\n", dev->dev_addr); - - printk("%s: %s found at %#x, using IRQ %d.\n", - dev->name, name, base_addr, dev->irq); - - mca_set_adapter_procfn(slot, (MCA_ProcFn) ne2_procinfo, dev); - - ei_status.name = name; - ei_status.tx_start_page = start_page; - ei_status.stop_page = stop_page; - ei_status.word16 = (2 == 2); - - ei_status.rx_start_page = start_page + TX_PAGES; -#ifdef PACKETBUF_MEMSIZE - /* Allow the packet buffer size to be overridden by know-it-alls. */ - ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE; -#endif - - ei_status.reset_8390 = &ne_reset_8390; - ei_status.block_input = &ne_block_input; - ei_status.block_output = &ne_block_output; - ei_status.get_8390_hdr = &ne_get_8390_hdr; - - ei_status.priv = slot; - - dev->netdev_ops = &eip_netdev_ops; - NS8390p_init(dev, 0); - - retval = register_netdev(dev); - if (retval) - goto out1; - return 0; -out1: - mca_set_adapter_procfn( ei_status.priv, NULL, NULL); - free_irq(dev->irq, dev); -out: - release_region(base_addr, NE_IO_EXTENT); - return retval; -} - -/* Hard reset the card. This used to pause for the same period that a - 8390 reset command required, but that shouldn't be necessary. */ -static void ne_reset_8390(struct net_device *dev) -{ - unsigned long reset_start_time = jiffies; - - if (ei_debug > 1) - printk("resetting the 8390 t=%ld...", jiffies); - - /* DON'T change these to inb_p/outb_p or reset will fail on clones. */ - outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET); - - ei_status.txing = 0; - ei_status.dmaing = 0; - - /* This check _should_not_ be necessary, omit eventually. */ - while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) - if (time_after(jiffies, reset_start_time + 2*HZ/100)) { - printk("%s: ne_reset_8390() did not complete.\n", - dev->name); - break; - } - outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */ -} - -/* Grab the 8390 specific header. Similar to the block_input routine, but - we don't need to be concerned with ring wrap as the header will be at - the start of a page, so we optimize accordingly. */ - -static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, - int ring_page) -{ - - int nic_base = dev->base_addr; - - /* This *shouldn't* happen. - If it does, it's the last thing you'll see */ - if (ei_status.dmaing) { - printk("%s: DMAing conflict in ne_get_8390_hdr " - "[DMAstat:%d][irqlock:%d].\n", - dev->name, ei_status.dmaing, ei_status.irqlock); - return; - } - - ei_status.dmaing |= 0x01; - outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); - outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO); - outb_p(0, nic_base + EN0_RCNTHI); - outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */ - outb_p(ring_page, nic_base + EN0_RSARHI); - outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD); - - if (ei_status.word16) - insw(NE_BASE + NE_DATAPORT, hdr, - sizeof(struct e8390_pkt_hdr)>>1); - else - insb(NE_BASE + NE_DATAPORT, hdr, - sizeof(struct e8390_pkt_hdr)); - - outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ - ei_status.dmaing &= ~0x01; -} - -/* Block input and output, similar to the Crynwr packet driver. If you - are porting to a new ethercard, look at the packet driver source for - hints. The NEx000 doesn't share the on-board packet memory -- you have - to put the packet out through the "remote DMA" dataport using outb. */ - -static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, - int ring_offset) -{ -#ifdef NE_SANITY_CHECK - int xfer_count = count; -#endif - int nic_base = dev->base_addr; - char *buf = skb->data; - - /* This *shouldn't* happen. - If it does, it's the last thing you'll see */ - if (ei_status.dmaing) { - printk("%s: DMAing conflict in ne_block_input " - "[DMAstat:%d][irqlock:%d].\n", - dev->name, ei_status.dmaing, ei_status.irqlock); - return; - } - ei_status.dmaing |= 0x01; - outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); - outb_p(count & 0xff, nic_base + EN0_RCNTLO); - outb_p(count >> 8, nic_base + EN0_RCNTHI); - outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO); - outb_p(ring_offset >> 8, nic_base + EN0_RSARHI); - outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD); - if (ei_status.word16) { - insw(NE_BASE + NE_DATAPORT,buf,count>>1); - if (count & 0x01) { - buf[count-1] = inb(NE_BASE + NE_DATAPORT); -#ifdef NE_SANITY_CHECK - xfer_count++; -#endif - } - } else { - insb(NE_BASE + NE_DATAPORT, buf, count); - } - -#ifdef NE_SANITY_CHECK - /* This was for the ALPHA version only, but enough people have - been encountering problems so it is still here. If you see - this message you either 1) have a slightly incompatible clone - or 2) have noise/speed problems with your bus. */ - if (ei_debug > 1) { /* DMA termination address check... */ - int addr, tries = 20; - do { - /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here - -- it's broken for Rx on some cards! */ - int high = inb_p(nic_base + EN0_RSARHI); - int low = inb_p(nic_base + EN0_RSARLO); - addr = (high << 8) + low; - if (((ring_offset + xfer_count) & 0xff) == low) - break; - } while (--tries > 0); - if (tries <= 0) - printk("%s: RX transfer address mismatch," - "%#4.4x (expected) vs. %#4.4x (actual).\n", - dev->name, ring_offset + xfer_count, addr); - } -#endif - outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ - ei_status.dmaing &= ~0x01; -} - -static void ne_block_output(struct net_device *dev, int count, - const unsigned char *buf, const int start_page) -{ - int nic_base = NE_BASE; - unsigned long dma_start; -#ifdef NE_SANITY_CHECK - int retries = 0; -#endif - - /* Round the count up for word writes. Do we need to do this? - What effect will an odd byte count have on the 8390? - I should check someday. */ - if (ei_status.word16 && (count & 0x01)) - count++; - - /* This *shouldn't* happen. - If it does, it's the last thing you'll see */ - if (ei_status.dmaing) { - printk("%s: DMAing conflict in ne_block_output." - "[DMAstat:%d][irqlock:%d]\n", - dev->name, ei_status.dmaing, ei_status.irqlock); - return; - } - ei_status.dmaing |= 0x01; - /* We should already be in page 0, but to be safe... */ - outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD); - -#ifdef NE_SANITY_CHECK -retry: -#endif - -#ifdef NE8390_RW_BUGFIX - /* Handle the read-before-write bug the same way as the - Crynwr packet driver -- the NatSemi method doesn't work. - Actually this doesn't always work either, but if you have - problems with your NEx000 this is better than nothing! */ - outb_p(0x42, nic_base + EN0_RCNTLO); - outb_p(0x00, nic_base + EN0_RCNTHI); - outb_p(0x42, nic_base + EN0_RSARLO); - outb_p(0x00, nic_base + EN0_RSARHI); - outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD); - /* Make certain that the dummy read has occurred. */ - SLOW_DOWN_IO; - SLOW_DOWN_IO; - SLOW_DOWN_IO; -#endif - - outb_p(ENISR_RDC, nic_base + EN0_ISR); - - /* Now the normal output. */ - outb_p(count & 0xff, nic_base + EN0_RCNTLO); - outb_p(count >> 8, nic_base + EN0_RCNTHI); - outb_p(0x00, nic_base + EN0_RSARLO); - outb_p(start_page, nic_base + EN0_RSARHI); - - outb_p(E8390_RWRITE+E8390_START, nic_base + NE_CMD); - if (ei_status.word16) { - outsw(NE_BASE + NE_DATAPORT, buf, count>>1); - } else { - outsb(NE_BASE + NE_DATAPORT, buf, count); - } - - dma_start = jiffies; - -#ifdef NE_SANITY_CHECK - /* This was for the ALPHA version only, but enough people have - been encountering problems so it is still here. */ - - if (ei_debug > 1) { /* DMA termination address check... */ - int addr, tries = 20; - do { - int high = inb_p(nic_base + EN0_RSARHI); - int low = inb_p(nic_base + EN0_RSARLO); - addr = (high << 8) + low; - if ((start_page << 8) + count == addr) - break; - } while (--tries > 0); - if (tries <= 0) { - printk("%s: Tx packet transfer address mismatch," - "%#4.4x (expected) vs. %#4.4x (actual).\n", - dev->name, (start_page << 8) + count, addr); - if (retries++ == 0) - goto retry; - } - } -#endif - - while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0) - if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ - printk("%s: timeout waiting for Tx RDC.\n", dev->name); - ne_reset_8390(dev); - NS8390p_init(dev, 1); - break; - } - - outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ - ei_status.dmaing &= ~0x01; -} - - -#ifdef MODULE -#define MAX_NE_CARDS 4 /* Max number of NE cards per module */ -static struct net_device *dev_ne[MAX_NE_CARDS]; -static int io[MAX_NE_CARDS]; -static int irq[MAX_NE_CARDS]; -static int bad[MAX_NE_CARDS]; /* 0xbad = bad sig or no reset ack */ -MODULE_LICENSE("GPL"); - -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -module_param_array(bad, int, NULL, 0); -MODULE_PARM_DESC(io, "(ignored)"); -MODULE_PARM_DESC(irq, "(ignored)"); -MODULE_PARM_DESC(bad, "(ignored)"); - -/* Module code fixed by David Weinehall */ - -int __init init_module(void) -{ - struct net_device *dev; - int this_dev, found = 0; - - for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { - dev = alloc_eip_netdev(); - if (!dev) - break; - dev->irq = irq[this_dev]; - dev->mem_end = bad[this_dev]; - dev->base_addr = io[this_dev]; - if (do_ne2_probe(dev) == 0) { - dev_ne[found++] = dev; - continue; - } - free_netdev(dev); - break; - } - if (found) - return 0; - printk(KERN_WARNING "ne2.c: No NE/2 card found\n"); - return -ENXIO; -} - -static void cleanup_card(struct net_device *dev) -{ - mca_mark_as_unused(ei_status.priv); - mca_set_adapter_procfn( ei_status.priv, NULL, NULL); - free_irq(dev->irq, dev); - release_region(dev->base_addr, NE_IO_EXTENT); -} - -void __exit cleanup_module(void) -{ - int this_dev; - - for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { - struct net_device *dev = dev_ne[this_dev]; - if (dev) { - unregister_netdev(dev); - cleanup_card(dev); - free_netdev(dev); - } - } -} -#endif /* MODULE */ diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c deleted file mode 100644 index 3c333cb..0000000 --- a/drivers/net/ne2k-pci.c +++ /dev/null @@ -1,726 +0,0 @@ -/* ne2k-pci.c: A NE2000 clone on PCI bus driver for Linux. */ -/* - A Linux device driver for PCI NE2000 clones. - - Authors and other copyright holders: - 1992-2000 by Donald Becker, NE2000 core and various modifications. - 1995-1998 by Paul Gortmaker, core modifications and PCI support. - Copyright 1993 assigned to the United States Government as represented - by the Director, National Security Agency. - - This software may be used and distributed according to the terms of - the GNU General Public License (GPL), incorporated herein by reference. - Drivers based on or derived from this code fall under the GPL and must - retain the authorship, copyright and license notice. This file is not - a complete program and may only be used when the entire operating - system is licensed under the GPL. - - The author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation - 410 Severn Ave., Suite 210 - Annapolis MD 21403 - - Issues remaining: - People are making PCI ne2000 clones! Oh the horror, the horror... - Limited full-duplex support. -*/ - -#define DRV_NAME "ne2k-pci" -#define DRV_VERSION "1.03" -#define DRV_RELDATE "9/22/2003" - - -/* The user-configurable values. - These may be modified when a driver module is loaded.*/ - -static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ - -#define MAX_UNITS 8 /* More are supported, limit only on options */ -/* Used to pass the full-duplex flag, etc. */ -static int full_duplex[MAX_UNITS]; -static int options[MAX_UNITS]; - -/* Force a non std. amount of memory. Units are 256 byte pages. */ -/* #define PACKETBUF_MEMSIZE 0x40 */ - - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "8390.h" - -/* These identify the driver base version and may not be removed. */ -static const char version[] __devinitconst = - KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE - " D. Becker/P. Gortmaker\n"; - -#if defined(__powerpc__) -#define inl_le(addr) le32_to_cpu(inl(addr)) -#define inw_le(addr) le16_to_cpu(inw(addr)) -#endif - -#define PFX DRV_NAME ": " - -MODULE_AUTHOR("Donald Becker / Paul Gortmaker"); -MODULE_DESCRIPTION("PCI NE2000 clone driver"); -MODULE_LICENSE("GPL"); - -module_param(debug, int, 0); -module_param_array(options, int, NULL, 0); -module_param_array(full_duplex, int, NULL, 0); -MODULE_PARM_DESC(debug, "debug level (1-2)"); -MODULE_PARM_DESC(options, "Bit 5: full duplex"); -MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)"); - -/* Some defines that people can play with if so inclined. */ - -/* Use 32 bit data-movement operations instead of 16 bit. */ -#define USE_LONGIO - -/* Do we implement the read before write bugfix ? */ -/* #define NE_RW_BUGFIX */ - -/* Flags. We rename an existing ei_status field to store flags! */ -/* Thus only the low 8 bits are usable for non-init-time flags. */ -#define ne2k_flags reg0 -enum { - ONLY_16BIT_IO=8, ONLY_32BIT_IO=4, /* Chip can do only 16/32-bit xfers. */ - FORCE_FDX=0x20, /* User override. */ - REALTEK_FDX=0x40, HOLTEK_FDX=0x80, - STOP_PG_0x60=0x100, -}; - -enum ne2k_pci_chipsets { - CH_RealTek_RTL_8029 = 0, - CH_Winbond_89C940, - CH_Compex_RL2000, - CH_KTI_ET32P2, - CH_NetVin_NV5000SC, - CH_Via_86C926, - CH_SureCom_NE34, - CH_Winbond_W89C940F, - CH_Holtek_HT80232, - CH_Holtek_HT80229, - CH_Winbond_89C940_8c4a, -}; - - -static struct { - char *name; - int flags; -} pci_clone_list[] __devinitdata = { - {"RealTek RTL-8029", REALTEK_FDX}, - {"Winbond 89C940", 0}, - {"Compex RL2000", 0}, - {"KTI ET32P2", 0}, - {"NetVin NV5000SC", 0}, - {"Via 86C926", ONLY_16BIT_IO}, - {"SureCom NE34", 0}, - {"Winbond W89C940F", 0}, - {"Holtek HT80232", ONLY_16BIT_IO | HOLTEK_FDX}, - {"Holtek HT80229", ONLY_32BIT_IO | HOLTEK_FDX | STOP_PG_0x60 }, - {"Winbond W89C940(misprogrammed)", 0}, - {NULL,} -}; - - -static DEFINE_PCI_DEVICE_TABLE(ne2k_pci_tbl) = { - { 0x10ec, 0x8029, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_RealTek_RTL_8029 }, - { 0x1050, 0x0940, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Winbond_89C940 }, - { 0x11f6, 0x1401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Compex_RL2000 }, - { 0x8e2e, 0x3000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_KTI_ET32P2 }, - { 0x4a14, 0x5000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_NetVin_NV5000SC }, - { 0x1106, 0x0926, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Via_86C926 }, - { 0x10bd, 0x0e34, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_SureCom_NE34 }, - { 0x1050, 0x5a5a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Winbond_W89C940F }, - { 0x12c3, 0x0058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Holtek_HT80232 }, - { 0x12c3, 0x5598, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Holtek_HT80229 }, - { 0x8c4a, 0x1980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Winbond_89C940_8c4a }, - { 0, } -}; -MODULE_DEVICE_TABLE(pci, ne2k_pci_tbl); - - -/* ---- No user-serviceable parts below ---- */ - -#define NE_BASE (dev->base_addr) -#define NE_CMD 0x00 -#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */ -#define NE_RESET 0x1f /* Issue a read to reset, a write to clear. */ -#define NE_IO_EXTENT 0x20 - -#define NESM_START_PG 0x40 /* First page of TX buffer */ -#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ - - -static int ne2k_pci_open(struct net_device *dev); -static int ne2k_pci_close(struct net_device *dev); - -static void ne2k_pci_reset_8390(struct net_device *dev); -static void ne2k_pci_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, - int ring_page); -static void ne2k_pci_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset); -static void ne2k_pci_block_output(struct net_device *dev, const int count, - const unsigned char *buf, const int start_page); -static const struct ethtool_ops ne2k_pci_ethtool_ops; - - - -/* There is no room in the standard 8390 structure for extra info we need, - so we build a meta/outer-wrapper structure.. */ -struct ne2k_pci_card { - struct net_device *dev; - struct pci_dev *pci_dev; -}; - - - -/* - NEx000-clone boards have a Station Address (SA) PROM (SAPROM) in the packet - buffer memory space. By-the-spec NE2000 clones have 0x57,0x57 in bytes - 0x0e,0x0f of the SAPROM, while other supposed NE2000 clones must be - detected by their SA prefix. - - Reading the SAPROM from a word-wide card with the 8390 set in byte-wide - mode results in doubled values, which can be detected and compensated for. - - The probe is also responsible for initializing the card and filling - in the 'dev' and 'ei_status' structures. -*/ - -static const struct net_device_ops ne2k_netdev_ops = { - .ndo_open = ne2k_pci_open, - .ndo_stop = ne2k_pci_close, - .ndo_start_xmit = ei_start_xmit, - .ndo_tx_timeout = ei_tx_timeout, - .ndo_get_stats = ei_get_stats, - .ndo_set_multicast_list = ei_set_multicast_list, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, - .ndo_change_mtu = eth_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ei_poll, -#endif -}; - -static int __devinit ne2k_pci_init_one (struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - struct net_device *dev; - int i; - unsigned char SA_prom[32]; - int start_page, stop_page; - int irq, reg0, chip_idx = ent->driver_data; - static unsigned int fnd_cnt; - long ioaddr; - int flags = pci_clone_list[chip_idx].flags; - -/* when built into the kernel, we only print version if device is found */ -#ifndef MODULE - static int printed_version; - if (!printed_version++) - printk(version); -#endif - - fnd_cnt++; - - i = pci_enable_device (pdev); - if (i) - return i; - - ioaddr = pci_resource_start (pdev, 0); - irq = pdev->irq; - - if (!ioaddr || ((pci_resource_flags (pdev, 0) & IORESOURCE_IO) == 0)) { - dev_err(&pdev->dev, "no I/O resource at PCI BAR #0\n"); - return -ENODEV; - } - - if (request_region (ioaddr, NE_IO_EXTENT, DRV_NAME) == NULL) { - dev_err(&pdev->dev, "I/O resource 0x%x @ 0x%lx busy\n", - NE_IO_EXTENT, ioaddr); - return -EBUSY; - } - - reg0 = inb(ioaddr); - if (reg0 == 0xFF) - goto err_out_free_res; - - /* Do a preliminary verification that we have a 8390. */ - { - int regd; - outb(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD); - regd = inb(ioaddr + 0x0d); - outb(0xff, ioaddr + 0x0d); - outb(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD); - inb(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */ - if (inb(ioaddr + EN0_COUNTER0) != 0) { - outb(reg0, ioaddr); - outb(regd, ioaddr + 0x0d); /* Restore the old values. */ - goto err_out_free_res; - } - } - - /* Allocate net_device, dev->priv; fill in 8390 specific dev fields. */ - dev = alloc_ei_netdev(); - if (!dev) { - dev_err(&pdev->dev, "cannot allocate ethernet device\n"); - goto err_out_free_res; - } - dev->netdev_ops = &ne2k_netdev_ops; - - SET_NETDEV_DEV(dev, &pdev->dev); - - /* Reset card. Who knows what dain-bramaged state it was left in. */ - { - unsigned long reset_start_time = jiffies; - - outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET); - - /* This looks like a horrible timing loop, but it should never take - more than a few cycles. - */ - while ((inb(ioaddr + EN0_ISR) & ENISR_RESET) == 0) - /* Limit wait: '2' avoids jiffy roll-over. */ - if (jiffies - reset_start_time > 2) { - dev_err(&pdev->dev, - "Card failure (no reset ack).\n"); - goto err_out_free_netdev; - } - - outb(0xff, ioaddr + EN0_ISR); /* Ack all intr. */ - } - - /* Read the 16 bytes of station address PROM. - We must first initialize registers, similar to NS8390_init(eifdev, 0). - We can't reliably read the SAPROM address without this. - (I learned the hard way!). */ - { - struct {unsigned char value, offset; } program_seq[] = { - {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/ - {0x49, EN0_DCFG}, /* Set word-wide access. */ - {0x00, EN0_RCNTLO}, /* Clear the count regs. */ - {0x00, EN0_RCNTHI}, - {0x00, EN0_IMR}, /* Mask completion irq. */ - {0xFF, EN0_ISR}, - {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */ - {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */ - {32, EN0_RCNTLO}, - {0x00, EN0_RCNTHI}, - {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */ - {0x00, EN0_RSARHI}, - {E8390_RREAD+E8390_START, E8390_CMD}, - }; - for (i = 0; i < ARRAY_SIZE(program_seq); i++) - outb(program_seq[i].value, ioaddr + program_seq[i].offset); - - } - - /* Note: all PCI cards have at least 16 bit access, so we don't have - to check for 8 bit cards. Most cards permit 32 bit access. */ - if (flags & ONLY_32BIT_IO) { - for (i = 0; i < 4 ; i++) - ((u32 *)SA_prom)[i] = le32_to_cpu(inl(ioaddr + NE_DATAPORT)); - } else - for(i = 0; i < 32 /*sizeof(SA_prom)*/; i++) - SA_prom[i] = inb(ioaddr + NE_DATAPORT); - - /* We always set the 8390 registers for word mode. */ - outb(0x49, ioaddr + EN0_DCFG); - start_page = NESM_START_PG; - - stop_page = flags & STOP_PG_0x60 ? 0x60 : NESM_STOP_PG; - - /* Set up the rest of the parameters. */ - dev->irq = irq; - dev->base_addr = ioaddr; - pci_set_drvdata(pdev, dev); - - ei_status.name = pci_clone_list[chip_idx].name; - ei_status.tx_start_page = start_page; - ei_status.stop_page = stop_page; - ei_status.word16 = 1; - ei_status.ne2k_flags = flags; - if (fnd_cnt < MAX_UNITS) { - if (full_duplex[fnd_cnt] > 0 || (options[fnd_cnt] & FORCE_FDX)) - ei_status.ne2k_flags |= FORCE_FDX; - } - - ei_status.rx_start_page = start_page + TX_PAGES; -#ifdef PACKETBUF_MEMSIZE - /* Allow the packet buffer size to be overridden by know-it-alls. */ - ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE; -#endif - - ei_status.reset_8390 = &ne2k_pci_reset_8390; - ei_status.block_input = &ne2k_pci_block_input; - ei_status.block_output = &ne2k_pci_block_output; - ei_status.get_8390_hdr = &ne2k_pci_get_8390_hdr; - ei_status.priv = (unsigned long) pdev; - - dev->ethtool_ops = &ne2k_pci_ethtool_ops; - NS8390_init(dev, 0); - - memcpy(dev->dev_addr, SA_prom, dev->addr_len); - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); - - i = register_netdev(dev); - if (i) - goto err_out_free_netdev; - - printk("%s: %s found at %#lx, IRQ %d, %pM.\n", - dev->name, pci_clone_list[chip_idx].name, ioaddr, dev->irq, - dev->dev_addr); - - return 0; - -err_out_free_netdev: - free_netdev (dev); -err_out_free_res: - release_region (ioaddr, NE_IO_EXTENT); - pci_set_drvdata (pdev, NULL); - return -ENODEV; - -} - -/* - * Magic incantation sequence for full duplex on the supported cards. - */ -static inline int set_realtek_fdx(struct net_device *dev) -{ - long ioaddr = dev->base_addr; - - outb(0xC0 + E8390_NODMA, ioaddr + NE_CMD); /* Page 3 */ - outb(0xC0, ioaddr + 0x01); /* Enable writes to CONFIG3 */ - outb(0x40, ioaddr + 0x06); /* Enable full duplex */ - outb(0x00, ioaddr + 0x01); /* Disable writes to CONFIG3 */ - outb(E8390_PAGE0 + E8390_NODMA, ioaddr + NE_CMD); /* Page 0 */ - return 0; -} - -static inline int set_holtek_fdx(struct net_device *dev) -{ - long ioaddr = dev->base_addr; - - outb(inb(ioaddr + 0x20) | 0x80, ioaddr + 0x20); - return 0; -} - -static int ne2k_pci_set_fdx(struct net_device *dev) -{ - if (ei_status.ne2k_flags & REALTEK_FDX) - return set_realtek_fdx(dev); - else if (ei_status.ne2k_flags & HOLTEK_FDX) - return set_holtek_fdx(dev); - - return -EOPNOTSUPP; -} - -static int ne2k_pci_open(struct net_device *dev) -{ - int ret = request_irq(dev->irq, ei_interrupt, IRQF_SHARED, dev->name, dev); - if (ret) - return ret; - - if (ei_status.ne2k_flags & FORCE_FDX) - ne2k_pci_set_fdx(dev); - - ei_open(dev); - return 0; -} - -static int ne2k_pci_close(struct net_device *dev) -{ - ei_close(dev); - free_irq(dev->irq, dev); - return 0; -} - -/* Hard reset the card. This used to pause for the same period that a - 8390 reset command required, but that shouldn't be necessary. */ -static void ne2k_pci_reset_8390(struct net_device *dev) -{ - unsigned long reset_start_time = jiffies; - - if (debug > 1) printk("%s: Resetting the 8390 t=%ld...", - dev->name, jiffies); - - outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET); - - ei_status.txing = 0; - ei_status.dmaing = 0; - - /* This check _should_not_ be necessary, omit eventually. */ - while ((inb(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) - if (jiffies - reset_start_time > 2) { - printk("%s: ne2k_pci_reset_8390() did not complete.\n", dev->name); - break; - } - outb(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */ -} - -/* Grab the 8390 specific header. Similar to the block_input routine, but - we don't need to be concerned with ring wrap as the header will be at - the start of a page, so we optimize accordingly. */ - -static void ne2k_pci_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) -{ - - long nic_base = dev->base_addr; - - /* This *shouldn't* happen. If it does, it's the last thing you'll see */ - if (ei_status.dmaing) { - printk("%s: DMAing conflict in ne2k_pci_get_8390_hdr " - "[DMAstat:%d][irqlock:%d].\n", - dev->name, ei_status.dmaing, ei_status.irqlock); - return; - } - - ei_status.dmaing |= 0x01; - outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); - outb(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO); - outb(0, nic_base + EN0_RCNTHI); - outb(0, nic_base + EN0_RSARLO); /* On page boundary */ - outb(ring_page, nic_base + EN0_RSARHI); - outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); - - if (ei_status.ne2k_flags & ONLY_16BIT_IO) { - insw(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1); - } else { - *(u32*)hdr = le32_to_cpu(inl(NE_BASE + NE_DATAPORT)); - le16_to_cpus(&hdr->count); - } - - outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ - ei_status.dmaing &= ~0x01; -} - -/* Block input and output, similar to the Crynwr packet driver. If you - are porting to a new ethercard, look at the packet driver source for hints. - The NEx000 doesn't share the on-board packet memory -- you have to put - the packet out through the "remote DMA" dataport using outb. */ - -static void ne2k_pci_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset) -{ - long nic_base = dev->base_addr; - char *buf = skb->data; - - /* This *shouldn't* happen. If it does, it's the last thing you'll see */ - if (ei_status.dmaing) { - printk("%s: DMAing conflict in ne2k_pci_block_input " - "[DMAstat:%d][irqlock:%d].\n", - dev->name, ei_status.dmaing, ei_status.irqlock); - return; - } - ei_status.dmaing |= 0x01; - if (ei_status.ne2k_flags & ONLY_32BIT_IO) - count = (count + 3) & 0xFFFC; - outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); - outb(count & 0xff, nic_base + EN0_RCNTLO); - outb(count >> 8, nic_base + EN0_RCNTHI); - outb(ring_offset & 0xff, nic_base + EN0_RSARLO); - outb(ring_offset >> 8, nic_base + EN0_RSARHI); - outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); - - if (ei_status.ne2k_flags & ONLY_16BIT_IO) { - insw(NE_BASE + NE_DATAPORT,buf,count>>1); - if (count & 0x01) { - buf[count-1] = inb(NE_BASE + NE_DATAPORT); - } - } else { - insl(NE_BASE + NE_DATAPORT, buf, count>>2); - if (count & 3) { - buf += count & ~3; - if (count & 2) { - __le16 *b = (__le16 *)buf; - - *b++ = cpu_to_le16(inw(NE_BASE + NE_DATAPORT)); - buf = (char *)b; - } - if (count & 1) - *buf = inb(NE_BASE + NE_DATAPORT); - } - } - - outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ - ei_status.dmaing &= ~0x01; -} - -static void ne2k_pci_block_output(struct net_device *dev, int count, - const unsigned char *buf, const int start_page) -{ - long nic_base = NE_BASE; - unsigned long dma_start; - - /* On little-endian it's always safe to round the count up for - word writes. */ - if (ei_status.ne2k_flags & ONLY_32BIT_IO) - count = (count + 3) & 0xFFFC; - else - if (count & 0x01) - count++; - - /* This *shouldn't* happen. If it does, it's the last thing you'll see */ - if (ei_status.dmaing) { - printk("%s: DMAing conflict in ne2k_pci_block_output." - "[DMAstat:%d][irqlock:%d]\n", - dev->name, ei_status.dmaing, ei_status.irqlock); - return; - } - ei_status.dmaing |= 0x01; - /* We should already be in page 0, but to be safe... */ - outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD); - -#ifdef NE8390_RW_BUGFIX - /* Handle the read-before-write bug the same way as the - Crynwr packet driver -- the NatSemi method doesn't work. - Actually this doesn't always work either, but if you have - problems with your NEx000 this is better than nothing! */ - outb(0x42, nic_base + EN0_RCNTLO); - outb(0x00, nic_base + EN0_RCNTHI); - outb(0x42, nic_base + EN0_RSARLO); - outb(0x00, nic_base + EN0_RSARHI); - outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); -#endif - outb(ENISR_RDC, nic_base + EN0_ISR); - - /* Now the normal output. */ - outb(count & 0xff, nic_base + EN0_RCNTLO); - outb(count >> 8, nic_base + EN0_RCNTHI); - outb(0x00, nic_base + EN0_RSARLO); - outb(start_page, nic_base + EN0_RSARHI); - outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD); - if (ei_status.ne2k_flags & ONLY_16BIT_IO) { - outsw(NE_BASE + NE_DATAPORT, buf, count>>1); - } else { - outsl(NE_BASE + NE_DATAPORT, buf, count>>2); - if (count & 3) { - buf += count & ~3; - if (count & 2) { - __le16 *b = (__le16 *)buf; - - outw(le16_to_cpu(*b++), NE_BASE + NE_DATAPORT); - buf = (char *)b; - } - } - } - - dma_start = jiffies; - - while ((inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) - if (jiffies - dma_start > 2) { /* Avoid clock roll-over. */ - printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name); - ne2k_pci_reset_8390(dev); - NS8390_init(dev,1); - break; - } - - outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ - ei_status.dmaing &= ~0x01; -} - -static void ne2k_pci_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - struct ei_device *ei = netdev_priv(dev); - struct pci_dev *pci_dev = (struct pci_dev *) ei->priv; - - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, pci_name(pci_dev)); -} - -static const struct ethtool_ops ne2k_pci_ethtool_ops = { - .get_drvinfo = ne2k_pci_get_drvinfo, -}; - -static void __devexit ne2k_pci_remove_one (struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - - BUG_ON(!dev); - unregister_netdev(dev); - release_region(dev->base_addr, NE_IO_EXTENT); - free_netdev(dev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); -} - -#ifdef CONFIG_PM -static int ne2k_pci_suspend (struct pci_dev *pdev, pm_message_t state) -{ - struct net_device *dev = pci_get_drvdata (pdev); - - netif_device_detach(dev); - pci_save_state(pdev); - pci_disable_device(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - - return 0; -} - -static int ne2k_pci_resume (struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata (pdev); - int rc; - - pci_set_power_state(pdev, 0); - pci_restore_state(pdev); - - rc = pci_enable_device(pdev); - if (rc) - return rc; - - NS8390_init(dev, 1); - netif_device_attach(dev); - - return 0; -} - -#endif /* CONFIG_PM */ - - -static struct pci_driver ne2k_driver = { - .name = DRV_NAME, - .probe = ne2k_pci_init_one, - .remove = __devexit_p(ne2k_pci_remove_one), - .id_table = ne2k_pci_tbl, -#ifdef CONFIG_PM - .suspend = ne2k_pci_suspend, - .resume = ne2k_pci_resume, -#endif /* CONFIG_PM */ - -}; - - -static int __init ne2k_pci_init(void) -{ -/* when a module, this is printed whether or not devices are found in probe */ -#ifdef MODULE - printk(version); -#endif - return pci_register_driver(&ne2k_driver); -} - - -static void __exit ne2k_pci_cleanup(void) -{ - pci_unregister_driver (&ne2k_driver); -} - -module_init(ne2k_pci_init); -module_exit(ne2k_pci_cleanup); diff --git a/drivers/net/ne3210.c b/drivers/net/ne3210.c deleted file mode 100644 index 243ed2a..0000000 --- a/drivers/net/ne3210.c +++ /dev/null @@ -1,347 +0,0 @@ -/* - ne3210.c - - Linux driver for Novell NE3210 EISA Network Adapter - - Copyright (C) 1998, Paul Gortmaker. - - This software may be used and distributed according to the terms - of the GNU General Public License, incorporated herein by reference. - - Information and Code Sources: - - 1) Based upon my other EISA 8390 drivers (lne390, es3210, smc-ultra32) - 2) The existing myriad of other Linux 8390 drivers by Donald Becker. - 3) Info for getting IRQ and sh-mem gleaned from the EISA cfg file - - The NE3210 is an EISA shared memory NS8390 implementation. Shared - memory address > 1MB should work with this driver. - - Note that the .cfg file (3/11/93, v1.0) has AUI and BNC switched - around (or perhaps there are some defective/backwards cards ???) - - This driver WILL NOT WORK FOR THE NE3200 - it is completely different - and does not use an 8390 at all. - - Updated to EISA probing API 5/2003 by Marc Zyngier. -*/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "8390.h" - -#define DRV_NAME "ne3210" - -static void ne3210_reset_8390(struct net_device *dev); - -static void ne3210_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page); -static void ne3210_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset); -static void ne3210_block_output(struct net_device *dev, int count, const unsigned char *buf, const int start_page); - -#define NE3210_START_PG 0x00 /* First page of TX buffer */ -#define NE3210_STOP_PG 0x80 /* Last page +1 of RX ring */ - -#define NE3210_IO_EXTENT 0x20 -#define NE3210_SA_PROM 0x16 /* Start of e'net addr. */ -#define NE3210_RESET_PORT 0xc84 -#define NE3210_NIC_OFFSET 0x00 /* Hello, the 8390 is *here* */ - -#define NE3210_ADDR0 0x00 /* 3 byte vendor prefix */ -#define NE3210_ADDR1 0x00 -#define NE3210_ADDR2 0x1b - -#define NE3210_CFG1 0xc84 /* NB: 0xc84 is also "reset" port. */ -#define NE3210_CFG2 0xc90 -#define NE3210_CFG_EXTENT (NE3210_CFG2 - NE3210_CFG1 + 1) - -/* - * You can OR any of the following bits together and assign it - * to NE3210_DEBUG to get verbose driver info during operation. - * Currently only the probe one is implemented. - */ - -#define NE3210_D_PROBE 0x01 -#define NE3210_D_RX_PKT 0x02 -#define NE3210_D_TX_PKT 0x04 -#define NE3210_D_IRQ 0x08 - -#define NE3210_DEBUG 0x0 - -static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3}; -static unsigned int shmem_map[] __initdata = {0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0}; -static const char *ifmap[] __initdata = {"UTP", "?", "BNC", "AUI"}; -static int ifmap_val[] __initdata = { - IF_PORT_10BASET, - IF_PORT_UNKNOWN, - IF_PORT_10BASE2, - IF_PORT_AUI, -}; - -static int __init ne3210_eisa_probe (struct device *device) -{ - unsigned long ioaddr, phys_mem; - int i, retval, port_index; - struct eisa_device *edev = to_eisa_device (device); - struct net_device *dev; - - /* Allocate dev->priv and fill in 8390 specific dev fields. */ - if (!(dev = alloc_ei_netdev ())) { - printk ("ne3210.c: unable to allocate memory for dev!\n"); - return -ENOMEM; - } - - SET_NETDEV_DEV(dev, device); - dev_set_drvdata(device, dev); - ioaddr = edev->base_addr; - - if (!request_region(ioaddr, NE3210_IO_EXTENT, DRV_NAME)) { - retval = -EBUSY; - goto out; - } - - if (!request_region(ioaddr + NE3210_CFG1, - NE3210_CFG_EXTENT, DRV_NAME)) { - retval = -EBUSY; - goto out1; - } - -#if NE3210_DEBUG & NE3210_D_PROBE - printk("ne3210-debug: probe at %#x, ID %s\n", ioaddr, edev->id.sig); - printk("ne3210-debug: config regs: %#x %#x\n", - inb(ioaddr + NE3210_CFG1), inb(ioaddr + NE3210_CFG2)); -#endif - - port_index = inb(ioaddr + NE3210_CFG2) >> 6; - for(i = 0; i < ETHER_ADDR_LEN; i++) - dev->dev_addr[i] = inb(ioaddr + NE3210_SA_PROM + i); - printk("ne3210.c: NE3210 in EISA slot %d, media: %s, addr: %pM.\n", - edev->slot, ifmap[port_index], dev->dev_addr); - - /* Snarf the interrupt now. CFG file has them all listed as `edge' with share=NO */ - dev->irq = irq_map[(inb(ioaddr + NE3210_CFG2) >> 3) & 0x07]; - printk("ne3210.c: using IRQ %d, ", dev->irq); - - retval = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev); - if (retval) { - printk (" unable to get IRQ %d.\n", dev->irq); - goto out2; - } - - phys_mem = shmem_map[inb(ioaddr + NE3210_CFG2) & 0x07] * 0x1000; - - /* - BEWARE!! Some dain-bramaged EISA SCUs will allow you to put - the card mem within the region covered by `normal' RAM !!! - */ - if (phys_mem > 1024*1024) { /* phys addr > 1MB */ - if (phys_mem < virt_to_phys(high_memory)) { - printk(KERN_CRIT "ne3210.c: Card RAM overlaps with normal memory!!!\n"); - printk(KERN_CRIT "ne3210.c: Use EISA SCU to set card memory below 1MB,\n"); - printk(KERN_CRIT "ne3210.c: or to an address above 0x%llx.\n", - (u64)virt_to_phys(high_memory)); - printk(KERN_CRIT "ne3210.c: Driver NOT installed.\n"); - retval = -EINVAL; - goto out3; - } - } - - if (!request_mem_region (phys_mem, NE3210_STOP_PG*0x100, DRV_NAME)) { - printk ("ne3210.c: Unable to request shared memory at physical address %#lx\n", - phys_mem); - goto out3; - } - - printk("%dkB memory at physical address %#lx\n", - NE3210_STOP_PG/4, phys_mem); - - ei_status.mem = ioremap(phys_mem, NE3210_STOP_PG*0x100); - if (!ei_status.mem) { - printk(KERN_ERR "ne3210.c: Unable to remap card memory !!\n"); - printk(KERN_ERR "ne3210.c: Driver NOT installed.\n"); - retval = -EAGAIN; - goto out4; - } - printk("ne3210.c: remapped %dkB card memory to virtual address %p\n", - NE3210_STOP_PG/4, ei_status.mem); - dev->mem_start = (unsigned long)ei_status.mem; - dev->mem_end = dev->mem_start + (NE3210_STOP_PG - NE3210_START_PG)*256; - - /* The 8390 offset is zero for the NE3210 */ - dev->base_addr = ioaddr; - - ei_status.name = "NE3210"; - ei_status.tx_start_page = NE3210_START_PG; - ei_status.rx_start_page = NE3210_START_PG + TX_PAGES; - ei_status.stop_page = NE3210_STOP_PG; - ei_status.word16 = 1; - ei_status.priv = phys_mem; - - if (ei_debug > 0) - printk("ne3210 loaded.\n"); - - ei_status.reset_8390 = &ne3210_reset_8390; - ei_status.block_input = &ne3210_block_input; - ei_status.block_output = &ne3210_block_output; - ei_status.get_8390_hdr = &ne3210_get_8390_hdr; - - dev->netdev_ops = &ei_netdev_ops; - - dev->if_port = ifmap_val[port_index]; - - if ((retval = register_netdev (dev))) - goto out5; - - NS8390_init(dev, 0); - return 0; - - out5: - iounmap(ei_status.mem); - out4: - release_mem_region (phys_mem, NE3210_STOP_PG*0x100); - out3: - free_irq (dev->irq, dev); - out2: - release_region (ioaddr + NE3210_CFG1, NE3210_CFG_EXTENT); - out1: - release_region (ioaddr, NE3210_IO_EXTENT); - out: - free_netdev (dev); - - return retval; -} - -static int __devexit ne3210_eisa_remove (struct device *device) -{ - struct net_device *dev = dev_get_drvdata(device); - unsigned long ioaddr = to_eisa_device (device)->base_addr; - - unregister_netdev (dev); - iounmap(ei_status.mem); - release_mem_region (ei_status.priv, NE3210_STOP_PG*0x100); - free_irq (dev->irq, dev); - release_region (ioaddr + NE3210_CFG1, NE3210_CFG_EXTENT); - release_region (ioaddr, NE3210_IO_EXTENT); - free_netdev (dev); - - return 0; -} - -/* - * Reset by toggling the "Board Enable" bits (bit 2 and 0). - */ - -static void ne3210_reset_8390(struct net_device *dev) -{ - unsigned short ioaddr = dev->base_addr; - - outb(0x04, ioaddr + NE3210_RESET_PORT); - if (ei_debug > 1) printk("%s: resetting the NE3210...", dev->name); - - mdelay(2); - - ei_status.txing = 0; - outb(0x01, ioaddr + NE3210_RESET_PORT); - if (ei_debug > 1) printk("reset done\n"); -} - -/* - * Note: In the following three functions is the implicit assumption - * that the associated memcpy will only use "rep; movsl" as long as - * we keep the counts as some multiple of doublewords. This is a - * requirement of the hardware, and also prevents us from using - * eth_io_copy_and_sum() since we can't guarantee it will limit - * itself to doubleword access. - */ - -/* - * Grab the 8390 specific header. Similar to the block_input routine, but - * we don't need to be concerned with ring wrap as the header will be at - * the start of a page, so we optimize accordingly. (A single doubleword.) - */ - -static void -ne3210_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) -{ - void __iomem *hdr_start = ei_status.mem + ((ring_page - NE3210_START_PG)<<8); - memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); - hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */ -} - -/* - * Block input and output are easy on shared memory ethercards, the only - * complication is when the ring buffer wraps. The count will already - * be rounded up to a doubleword value via ne3210_get_8390_hdr() above. - */ - -static void ne3210_block_input(struct net_device *dev, int count, struct sk_buff *skb, - int ring_offset) -{ - void __iomem *start = ei_status.mem + ring_offset - NE3210_START_PG*256; - - if (ring_offset + count > NE3210_STOP_PG*256) { - /* Packet wraps over end of ring buffer. */ - int semi_count = NE3210_STOP_PG*256 - ring_offset; - memcpy_fromio(skb->data, start, semi_count); - count -= semi_count; - memcpy_fromio(skb->data + semi_count, - ei_status.mem + TX_PAGES*256, count); - } else { - /* Packet is in one chunk. */ - memcpy_fromio(skb->data, start, count); - } -} - -static void ne3210_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page) -{ - void __iomem *shmem = ei_status.mem + ((start_page - NE3210_START_PG)<<8); - - count = (count + 3) & ~3; /* Round up to doubleword */ - memcpy_toio(shmem, buf, count); -} - -static struct eisa_device_id ne3210_ids[] = { - { "EGL0101" }, - { "NVL1801" }, - { "" }, -}; -MODULE_DEVICE_TABLE(eisa, ne3210_ids); - -static struct eisa_driver ne3210_eisa_driver = { - .id_table = ne3210_ids, - .driver = { - .name = "ne3210", - .probe = ne3210_eisa_probe, - .remove = __devexit_p (ne3210_eisa_remove), - }, -}; - -MODULE_DESCRIPTION("NE3210 EISA Ethernet driver"); -MODULE_LICENSE("GPL"); -MODULE_DEVICE_TABLE(eisa, ne3210_ids); - -static int ne3210_init(void) -{ - return eisa_driver_register (&ne3210_eisa_driver); -} - -static void ne3210_cleanup(void) -{ - eisa_driver_unregister (&ne3210_eisa_driver); -} - -module_init (ne3210_init); -module_exit (ne3210_cleanup); diff --git a/drivers/net/pcmcia/Kconfig b/drivers/net/pcmcia/Kconfig index e17ad95..72aa257 100644 --- a/drivers/net/pcmcia/Kconfig +++ b/drivers/net/pcmcia/Kconfig @@ -31,16 +31,6 @@ config PCMCIA_FMVJ18X To compile this driver as a module, choose M here: the module will be called fmvj18x_cs. If unsure, say N. -config PCMCIA_PCNET - tristate "NE2000 compatible PCMCIA support" - select CRC32 - help - Say Y here if you intend to attach an NE2000 compatible PCMCIA - (PC-card) Ethernet or Fast Ethernet card to your computer. - - To compile this driver as a module, choose M here: the module will be - called pcnet_cs. If unsure, say N. - config PCMCIA_SMC91C92 tristate "SMC 91Cxx PCMCIA support" select CRC32 @@ -61,17 +51,6 @@ config PCMCIA_XIRC2PS To compile this driver as a module, choose M here: the module will be called xirc2ps_cs. If unsure, say N. -config PCMCIA_AXNET - tristate "Asix AX88190 PCMCIA support" - ---help--- - Say Y here if you intend to attach an Asix AX88190-based PCMCIA - (PC-card) Fast Ethernet card to your computer. These cards are - nearly NE2000 compatible but need a separate driver due to a few - misfeatures. - - To compile this driver as a module, choose M here: the module will be - called axnet_cs. If unsure, say N. - config ARCNET_COM20020_CS tristate "COM20020 ARCnet PCMCIA support" depends on ARCNET_COM20020 diff --git a/drivers/net/pcmcia/Makefile b/drivers/net/pcmcia/Makefile index 985f0ae..c2b8b44 100644 --- a/drivers/net/pcmcia/Makefile +++ b/drivers/net/pcmcia/Makefile @@ -4,10 +4,8 @@ # 16-bit client drivers obj-$(CONFIG_PCMCIA_FMVJ18X) += fmvj18x_cs.o -obj-$(CONFIG_PCMCIA_PCNET) += pcnet_cs.o obj-$(CONFIG_PCMCIA_SMC91C92) += smc91c92_cs.o obj-$(CONFIG_PCMCIA_XIRC2PS) += xirc2ps_cs.o obj-$(CONFIG_ARCNET_COM20020_CS)+= com20020_cs.o -obj-$(CONFIG_PCMCIA_AXNET) += axnet_cs.o obj-$(CONFIG_PCMCIA_IBMTR) += ibmtr_cs.o diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c deleted file mode 100644 index 9953db7..0000000 --- a/drivers/net/pcmcia/axnet_cs.c +++ /dev/null @@ -1,1725 +0,0 @@ -/*====================================================================== - - A PCMCIA ethernet driver for Asix AX88190-based cards - - The Asix AX88190 is a NS8390-derived chipset with a few nasty - idiosyncracies that make it very inconvenient to support with a - standard 8390 driver. This driver is based on pcnet_cs, with the - tweaked 8390 code grafted on the end. Much of what I did was to - clean up and update a similar driver supplied by Asix, which was - adapted by William Lee, william@asix.com.tw. - - Copyright (C) 2001 David A. Hinds -- dahinds@users.sourceforge.net - - axnet_cs.c 1.28 2002/06/29 06:27:37 - - The network driver code is based on Donald Becker's NE2000 code: - - Written 1992,1993 by Donald Becker. - Copyright 1993 United States Government as represented by the - Director, National Security Agency. This software may be used and - distributed according to the terms of the GNU General Public License, - incorporated herein by reference. - Donald Becker may be reached at becker@scyld.com - -======================================================================*/ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "../8390.h" - -#include -#include -#include -#include - -#include -#include -#include -#include - -#define AXNET_CMD 0x00 -#define AXNET_DATAPORT 0x10 /* NatSemi-defined port window offset. */ -#define AXNET_RESET 0x1f /* Issue a read to reset, a write to clear. */ -#define AXNET_MII_EEP 0x14 /* Offset of MII access port */ -#define AXNET_TEST 0x15 /* Offset of TEST Register port */ -#define AXNET_GPIO 0x17 /* Offset of General Purpose Register Port */ - -#define AXNET_START_PG 0x40 /* First page of TX buffer */ -#define AXNET_STOP_PG 0x80 /* Last page +1 of RX ring */ - -#define AXNET_RDC_TIMEOUT 0x02 /* Max wait in jiffies for Tx RDC */ - -#define IS_AX88190 0x0001 -#define IS_AX88790 0x0002 - -/*====================================================================*/ - -/* Module parameters */ - -MODULE_AUTHOR("David Hinds "); -MODULE_DESCRIPTION("Asix AX88190 PCMCIA ethernet driver"); -MODULE_LICENSE("GPL"); - - -/*====================================================================*/ - -static int axnet_config(struct pcmcia_device *link); -static void axnet_release(struct pcmcia_device *link); -static int axnet_open(struct net_device *dev); -static int axnet_close(struct net_device *dev); -static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); -static netdev_tx_t axnet_start_xmit(struct sk_buff *skb, - struct net_device *dev); -static struct net_device_stats *get_stats(struct net_device *dev); -static void set_multicast_list(struct net_device *dev); -static void axnet_tx_timeout(struct net_device *dev); -static irqreturn_t ei_irq_wrapper(int irq, void *dev_id); -static void ei_watchdog(u_long arg); -static void axnet_reset_8390(struct net_device *dev); - -static int mdio_read(unsigned int addr, int phy_id, int loc); -static void mdio_write(unsigned int addr, int phy_id, int loc, int value); - -static void get_8390_hdr(struct net_device *, - struct e8390_pkt_hdr *, int); -static void block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset); -static void block_output(struct net_device *dev, int count, - const u_char *buf, const int start_page); - -static void axnet_detach(struct pcmcia_device *p_dev); - -static void AX88190_init(struct net_device *dev, int startp); -static int ax_open(struct net_device *dev); -static int ax_close(struct net_device *dev); -static irqreturn_t ax_interrupt(int irq, void *dev_id); - -/*====================================================================*/ - -typedef struct axnet_dev_t { - struct pcmcia_device *p_dev; - caddr_t base; - struct timer_list watchdog; - int stale, fast_poll; - u_short link_status; - u_char duplex_flag; - int phy_id; - int flags; - int active_low; -} axnet_dev_t; - -static inline axnet_dev_t *PRIV(struct net_device *dev) -{ - void *p = (char *)netdev_priv(dev) + sizeof(struct ei_device); - return p; -} - -static const struct net_device_ops axnet_netdev_ops = { - .ndo_open = axnet_open, - .ndo_stop = axnet_close, - .ndo_do_ioctl = axnet_ioctl, - .ndo_start_xmit = axnet_start_xmit, - .ndo_tx_timeout = axnet_tx_timeout, - .ndo_get_stats = get_stats, - .ndo_set_multicast_list = set_multicast_list, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -static int axnet_probe(struct pcmcia_device *link) -{ - axnet_dev_t *info; - struct net_device *dev; - struct ei_device *ei_local; - - dev_dbg(&link->dev, "axnet_attach()\n"); - - dev = alloc_etherdev(sizeof(struct ei_device) + sizeof(axnet_dev_t)); - if (!dev) - return -ENOMEM; - - ei_local = netdev_priv(dev); - spin_lock_init(&ei_local->page_lock); - - info = PRIV(dev); - info->p_dev = link; - link->priv = dev; - link->config_flags |= CONF_ENABLE_IRQ; - - dev->netdev_ops = &axnet_netdev_ops; - - dev->watchdog_timeo = TX_TIMEOUT; - - return axnet_config(link); -} /* axnet_attach */ - -static void axnet_detach(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - - dev_dbg(&link->dev, "axnet_detach(0x%p)\n", link); - - unregister_netdev(dev); - - axnet_release(link); - - free_netdev(dev); -} /* axnet_detach */ - -/*====================================================================== - - This probes for a card's hardware address by reading the PROM. - -======================================================================*/ - -static int get_prom(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - unsigned int ioaddr = dev->base_addr; - int i, j; - - /* This is based on drivers/net/ne.c */ - struct { - u_char value, offset; - } program_seq[] = { - {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/ - {0x01, EN0_DCFG}, /* Set word-wide access. */ - {0x00, EN0_RCNTLO}, /* Clear the count regs. */ - {0x00, EN0_RCNTHI}, - {0x00, EN0_IMR}, /* Mask completion irq. */ - {0xFF, EN0_ISR}, - {E8390_RXOFF|0x40, EN0_RXCR}, /* 0x60 Set to monitor */ - {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */ - {0x10, EN0_RCNTLO}, - {0x00, EN0_RCNTHI}, - {0x00, EN0_RSARLO}, /* DMA starting at 0x0400. */ - {0x04, EN0_RSARHI}, - {E8390_RREAD+E8390_START, E8390_CMD}, - }; - - /* Not much of a test, but the alternatives are messy */ - if (link->config_base != 0x03c0) - return 0; - - axnet_reset_8390(dev); - mdelay(10); - - for (i = 0; i < ARRAY_SIZE(program_seq); i++) - outb_p(program_seq[i].value, ioaddr + program_seq[i].offset); - - for (i = 0; i < 6; i += 2) { - j = inw(ioaddr + AXNET_DATAPORT); - dev->dev_addr[i] = j & 0xff; - dev->dev_addr[i+1] = j >> 8; - } - return 1; -} /* get_prom */ - -static int try_io_port(struct pcmcia_device *link) -{ - int j, ret; - link->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; - link->resource[1]->flags &= ~IO_DATA_PATH_WIDTH; - if (link->resource[0]->end == 32) { - link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; - /* for master/slave multifunction cards */ - if (link->resource[1]->end > 0) - link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; - } else { - /* This should be two 16-port windows */ - link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; - link->resource[1]->flags |= IO_DATA_PATH_WIDTH_16; - } - if (link->resource[0]->start == 0) { - for (j = 0; j < 0x400; j += 0x20) { - link->resource[0]->start = j ^ 0x300; - link->resource[1]->start = (j ^ 0x300) + 0x10; - link->io_lines = 16; - ret = pcmcia_request_io(link); - if (ret == 0) - return ret; - } - return ret; - } else { - return pcmcia_request_io(link); - } -} - -static int axnet_configcheck(struct pcmcia_device *p_dev, void *priv_data) -{ - if (p_dev->config_index == 0) - return -EINVAL; - - p_dev->config_index = 0x05; - if (p_dev->resource[0]->end + p_dev->resource[1]->end < 32) - return -ENODEV; - - return try_io_port(p_dev); -} - -static int axnet_config(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - axnet_dev_t *info = PRIV(dev); - int i, j, j2, ret; - - dev_dbg(&link->dev, "axnet_config(0x%p)\n", link); - - /* don't trust the CIS on this; Linksys got it wrong */ - link->config_regs = 0x63; - link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; - ret = pcmcia_loop_config(link, axnet_configcheck, NULL); - if (ret != 0) - goto failed; - - if (!link->irq) - goto failed; - - if (resource_size(link->resource[1]) == 8) - link->config_flags |= CONF_ENABLE_SPKR; - - ret = pcmcia_enable_device(link); - if (ret) - goto failed; - - dev->irq = link->irq; - dev->base_addr = link->resource[0]->start; - - if (!get_prom(link)) { - pr_notice("this is not an AX88190 card!\n"); - pr_notice("use pcnet_cs instead.\n"); - goto failed; - } - - ei_status.name = "AX88190"; - ei_status.word16 = 1; - ei_status.tx_start_page = AXNET_START_PG; - ei_status.rx_start_page = AXNET_START_PG + TX_PAGES; - ei_status.stop_page = AXNET_STOP_PG; - ei_status.reset_8390 = axnet_reset_8390; - ei_status.get_8390_hdr = get_8390_hdr; - ei_status.block_input = block_input; - ei_status.block_output = block_output; - - if (inb(dev->base_addr + AXNET_TEST) != 0) - info->flags |= IS_AX88790; - else - info->flags |= IS_AX88190; - - if (info->flags & IS_AX88790) - outb(0x10, dev->base_addr + AXNET_GPIO); /* select Internal PHY */ - - info->active_low = 0; - - for (i = 0; i < 32; i++) { - j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1); - j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2); - if (j == j2) continue; - if ((j != 0) && (j != 0xffff)) break; - } - - if (i == 32) { - /* Maybe PHY is in power down mode. (PPD_SET = 1) - Bit 2 of CCSR is active low. */ - pcmcia_write_config_byte(link, CISREG_CCSR, 0x04); - for (i = 0; i < 32; i++) { - j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1); - j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2); - if (j == j2) continue; - if ((j != 0) && (j != 0xffff)) { - info->active_low = 1; - break; - } - } - } - - info->phy_id = (i < 32) ? i : -1; - SET_NETDEV_DEV(dev, &link->dev); - - if (register_netdev(dev) != 0) { - pr_notice("register_netdev() failed\n"); - goto failed; - } - - netdev_info(dev, "Asix AX88%d90: io %#3lx, irq %d, hw_addr %pM\n", - ((info->flags & IS_AX88790) ? 7 : 1), - dev->base_addr, dev->irq, dev->dev_addr); - if (info->phy_id != -1) { - netdev_dbg(dev, " MII transceiver at index %d, status %x\n", - info->phy_id, j); - } else { - netdev_notice(dev, " No MII transceivers found!\n"); - } - return 0; - -failed: - axnet_release(link); - return -ENODEV; -} /* axnet_config */ - -static void axnet_release(struct pcmcia_device *link) -{ - pcmcia_disable_device(link); -} - -static int axnet_suspend(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - - if (link->open) - netif_device_detach(dev); - - return 0; -} - -static int axnet_resume(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - axnet_dev_t *info = PRIV(dev); - - if (link->open) { - if (info->active_low == 1) - pcmcia_write_config_byte(link, CISREG_CCSR, 0x04); - - axnet_reset_8390(dev); - AX88190_init(dev, 1); - netif_device_attach(dev); - } - - return 0; -} - - -/*====================================================================== - - MII interface support - -======================================================================*/ - -#define MDIO_SHIFT_CLK 0x01 -#define MDIO_DATA_WRITE0 0x00 -#define MDIO_DATA_WRITE1 0x08 -#define MDIO_DATA_READ 0x04 -#define MDIO_MASK 0x0f -#define MDIO_ENB_IN 0x02 - -static void mdio_sync(unsigned int addr) -{ - int bits; - for (bits = 0; bits < 32; bits++) { - outb_p(MDIO_DATA_WRITE1, addr); - outb_p(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, addr); - } -} - -static int mdio_read(unsigned int addr, int phy_id, int loc) -{ - u_int cmd = (0xf6<<10)|(phy_id<<5)|loc; - int i, retval = 0; - - mdio_sync(addr); - for (i = 14; i >= 0; i--) { - int dat = (cmd&(1< 0; i--) { - outb_p(MDIO_ENB_IN, addr); - retval = (retval << 1) | ((inb_p(addr) & MDIO_DATA_READ) != 0); - outb_p(MDIO_ENB_IN | MDIO_SHIFT_CLK, addr); - } - return (retval>>1) & 0xffff; -} - -static void mdio_write(unsigned int addr, int phy_id, int loc, int value) -{ - u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value; - int i; - - mdio_sync(addr); - for (i = 31; i >= 0; i--) { - int dat = (cmd&(1<= 0; i--) { - outb_p(MDIO_ENB_IN, addr); - outb_p(MDIO_ENB_IN | MDIO_SHIFT_CLK, addr); - } -} - -/*====================================================================*/ - -static int axnet_open(struct net_device *dev) -{ - int ret; - axnet_dev_t *info = PRIV(dev); - struct pcmcia_device *link = info->p_dev; - unsigned int nic_base = dev->base_addr; - - dev_dbg(&link->dev, "axnet_open('%s')\n", dev->name); - - if (!pcmcia_dev_present(link)) - return -ENODEV; - - outb_p(0xFF, nic_base + EN0_ISR); /* Clear bogus intr. */ - ret = request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, "axnet_cs", dev); - if (ret) - return ret; - - link->open++; - - info->link_status = 0x00; - init_timer(&info->watchdog); - info->watchdog.function = ei_watchdog; - info->watchdog.data = (u_long)dev; - info->watchdog.expires = jiffies + HZ; - add_timer(&info->watchdog); - - return ax_open(dev); -} /* axnet_open */ - -/*====================================================================*/ - -static int axnet_close(struct net_device *dev) -{ - axnet_dev_t *info = PRIV(dev); - struct pcmcia_device *link = info->p_dev; - - dev_dbg(&link->dev, "axnet_close('%s')\n", dev->name); - - ax_close(dev); - free_irq(dev->irq, dev); - - link->open--; - netif_stop_queue(dev); - del_timer_sync(&info->watchdog); - - return 0; -} /* axnet_close */ - -/*====================================================================== - - Hard reset the card. This used to pause for the same period that - a 8390 reset command required, but that shouldn't be necessary. - -======================================================================*/ - -static void axnet_reset_8390(struct net_device *dev) -{ - unsigned int nic_base = dev->base_addr; - int i; - - ei_status.txing = ei_status.dmaing = 0; - - outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, nic_base + E8390_CMD); - - outb(inb(nic_base + AXNET_RESET), nic_base + AXNET_RESET); - - for (i = 0; i < 100; i++) { - if ((inb_p(nic_base+EN0_ISR) & ENISR_RESET) != 0) - break; - udelay(100); - } - outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */ - - if (i == 100) - netdev_err(dev, "axnet_reset_8390() did not complete\n"); - -} /* axnet_reset_8390 */ - -/*====================================================================*/ - -static irqreturn_t ei_irq_wrapper(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - PRIV(dev)->stale = 0; - return ax_interrupt(irq, dev_id); -} - -static void ei_watchdog(u_long arg) -{ - struct net_device *dev = (struct net_device *)(arg); - axnet_dev_t *info = PRIV(dev); - unsigned int nic_base = dev->base_addr; - unsigned int mii_addr = nic_base + AXNET_MII_EEP; - u_short link; - - if (!netif_device_present(dev)) goto reschedule; - - /* Check for pending interrupt with expired latency timer: with - this, we can limp along even if the interrupt is blocked */ - if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) { - if (!info->fast_poll) - netdev_info(dev, "interrupt(s) dropped!\n"); - ei_irq_wrapper(dev->irq, dev); - info->fast_poll = HZ; - } - if (info->fast_poll) { - info->fast_poll--; - info->watchdog.expires = jiffies + 1; - add_timer(&info->watchdog); - return; - } - - if (info->phy_id < 0) - goto reschedule; - link = mdio_read(mii_addr, info->phy_id, 1); - if (!link || (link == 0xffff)) { - netdev_info(dev, "MII is missing!\n"); - info->phy_id = -1; - goto reschedule; - } - - link &= 0x0004; - if (link != info->link_status) { - u_short p = mdio_read(mii_addr, info->phy_id, 5); - netdev_info(dev, "%s link beat\n", link ? "found" : "lost"); - if (link) { - info->duplex_flag = (p & 0x0140) ? 0x80 : 0x00; - if (p) - netdev_info(dev, "autonegotiation complete: %dbaseT-%cD selected\n", - (p & 0x0180) ? 100 : 10, (p & 0x0140) ? 'F' : 'H'); - else - netdev_info(dev, "link partner did not autonegotiate\n"); - AX88190_init(dev, 1); - } - info->link_status = link; - } - -reschedule: - info->watchdog.expires = jiffies + HZ; - add_timer(&info->watchdog); -} - -/*====================================================================*/ - -static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -{ - axnet_dev_t *info = PRIV(dev); - struct mii_ioctl_data *data = if_mii(rq); - unsigned int mii_addr = dev->base_addr + AXNET_MII_EEP; - switch (cmd) { - case SIOCGMIIPHY: - data->phy_id = info->phy_id; - case SIOCGMIIREG: /* Read MII PHY register. */ - data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f); - return 0; - case SIOCSMIIREG: /* Write MII PHY register. */ - mdio_write(mii_addr, data->phy_id, data->reg_num & 0x1f, data->val_in); - return 0; - } - return -EOPNOTSUPP; -} - -/*====================================================================*/ - -static void get_8390_hdr(struct net_device *dev, - struct e8390_pkt_hdr *hdr, - int ring_page) -{ - unsigned int nic_base = dev->base_addr; - - outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */ - outb_p(ring_page, nic_base + EN0_RSARHI); - outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD); - - insw(nic_base + AXNET_DATAPORT, hdr, - sizeof(struct e8390_pkt_hdr)>>1); - /* Fix for big endian systems */ - hdr->count = le16_to_cpu(hdr->count); - -} - -/*====================================================================*/ - -static void block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset) -{ - unsigned int nic_base = dev->base_addr; - int xfer_count = count; - char *buf = skb->data; - - if ((ei_debug > 4) && (count != 4)) - pr_debug("%s: [bi=%d]\n", dev->name, count+4); - outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO); - outb_p(ring_offset >> 8, nic_base + EN0_RSARHI); - outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD); - - insw(nic_base + AXNET_DATAPORT,buf,count>>1); - if (count & 0x01) - buf[count-1] = inb(nic_base + AXNET_DATAPORT), xfer_count++; - -} - -/*====================================================================*/ - -static void block_output(struct net_device *dev, int count, - const u_char *buf, const int start_page) -{ - unsigned int nic_base = dev->base_addr; - - pr_debug("%s: [bo=%d]\n", dev->name, count); - - /* Round the count up for word writes. Do we need to do this? - What effect will an odd byte count have on the 8390? - I should check someday. */ - if (count & 0x01) - count++; - - outb_p(0x00, nic_base + EN0_RSARLO); - outb_p(start_page, nic_base + EN0_RSARHI); - outb_p(E8390_RWRITE+E8390_START, nic_base + AXNET_CMD); - outsw(nic_base + AXNET_DATAPORT, buf, count>>1); -} - -static const struct pcmcia_device_id axnet_ids[] = { - PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x016c, 0x0081), - PCMCIA_DEVICE_MANF_CARD(0x018a, 0x0301), - PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x2328), - PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0301), - PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0303), - PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0309), - PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1106), - PCMCIA_DEVICE_MANF_CARD(0x8a01, 0xc1ab), - PCMCIA_DEVICE_MANF_CARD(0x021b, 0x0202), - PCMCIA_DEVICE_MANF_CARD(0xffff, 0x1090), - PCMCIA_DEVICE_PROD_ID12("AmbiCom,Inc.", "Fast Ethernet PC Card(AMB8110)", 0x49b020a7, 0x119cc9fc), - PCMCIA_DEVICE_PROD_ID124("Fast Ethernet", "16-bit PC Card", "AX88190", 0xb4be14e3, 0x9a12eb6a, 0xab9be5ef), - PCMCIA_DEVICE_PROD_ID12("ASIX", "AX88190", 0x0959823b, 0xab9be5ef), - PCMCIA_DEVICE_PROD_ID12("Billionton", "LNA-100B", 0x552ab682, 0xbc3b87e1), - PCMCIA_DEVICE_PROD_ID12("CHEETAH ETHERCARD", "EN2228", 0x00fa7bc8, 0x00e990cc), - PCMCIA_DEVICE_PROD_ID12("CNet", "CNF301", 0xbc477dde, 0x78c5f40b), - PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEther PCC-TXD", 0x5261440f, 0x436768c5), - PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEtherII PCC-TXD", 0x5261440f, 0x730df72e), - PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEther PCC-TXM", 0x5261440f, 0x3abbd061), - PCMCIA_DEVICE_PROD_ID12("Dynalink", "L100C16", 0x55632fd5, 0x66bc2a90), - PCMCIA_DEVICE_PROD_ID12("IO DATA", "ETXPCM", 0x547e66dc, 0x233adac2), - PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V3)", 0x0733cc81, 0x232019a8), - PCMCIA_DEVICE_PROD_ID12("MELCO", "LPC3-TX", 0x481e0094, 0xf91af609), - PCMCIA_DEVICE_PROD_ID12("NETGEAR", "FA411", 0x9aa79dc3, 0x40fad875), - PCMCIA_DEVICE_PROD_ID12("PCMCIA", "100BASE", 0x281f1c5d, 0x7c2add04), - PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FastEtherCard", 0x281f1c5d, 0x7ef26116), - PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FEP501", 0x281f1c5d, 0x2e272058), - PCMCIA_DEVICE_PROD_ID14("Network Everywhere", "AX88190", 0x820a67b6, 0xab9be5ef), - PCMCIA_DEVICE_NULL, -}; -MODULE_DEVICE_TABLE(pcmcia, axnet_ids); - -static struct pcmcia_driver axnet_cs_driver = { - .owner = THIS_MODULE, - .name = "axnet_cs", - .probe = axnet_probe, - .remove = axnet_detach, - .id_table = axnet_ids, - .suspend = axnet_suspend, - .resume = axnet_resume, -}; - -static int __init init_axnet_cs(void) -{ - return pcmcia_register_driver(&axnet_cs_driver); -} - -static void __exit exit_axnet_cs(void) -{ - pcmcia_unregister_driver(&axnet_cs_driver); -} - -module_init(init_axnet_cs); -module_exit(exit_axnet_cs); - -/*====================================================================*/ - -/* 8390.c: A general NS8390 ethernet driver core for linux. */ -/* - Written 1992-94 by Donald Becker. - - Copyright 1993 United States Government as represented by the - Director, National Security Agency. - - This software may be used and distributed according to the terms - of the GNU General Public License, incorporated herein by reference. - - The author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation - 410 Severn Ave., Suite 210 - Annapolis MD 21403 - - This is the chip-specific code for many 8390-based ethernet adaptors. - This is not a complete driver, it must be combined with board-specific - code such as ne.c, wd.c, 3c503.c, etc. - - Seeing how at least eight drivers use this code, (not counting the - PCMCIA ones either) it is easy to break some card by what seems like - a simple innocent change. Please contact me or Donald if you think - you have found something that needs changing. -- PG - - Changelog: - - Paul Gortmaker : remove set_bit lock, other cleanups. - Paul Gortmaker : add ei_get_8390_hdr() so we can pass skb's to - ei_block_input() for eth_io_copy_and_sum(). - Paul Gortmaker : exchange static int ei_pingpong for a #define, - also add better Tx error handling. - Paul Gortmaker : rewrite Rx overrun handling as per NS specs. - Alexey Kuznetsov : use the 8390's six bit hash multicast filter. - Paul Gortmaker : tweak ANK's above multicast changes a bit. - Paul Gortmaker : update packet statistics for v2.1.x - Alan Cox : support arbitrary stupid port mappings on the - 68K Macintosh. Support >16bit I/O spaces - Paul Gortmaker : add kmod support for auto-loading of the 8390 - module by all drivers that require it. - Alan Cox : Spinlocking work, added 'BUG_83C690' - Paul Gortmaker : Separate out Tx timeout code from Tx path. - - Sources: - The National Semiconductor LAN Databook, and the 3Com 3c503 databook. - - */ - -#include -#include -#include -#include -#include - -#define BUG_83C690 - -/* These are the operational function interfaces to board-specific - routines. - void reset_8390(struct net_device *dev) - Resets the board associated with DEV, including a hardware reset of - the 8390. This is only called when there is a transmit timeout, and - it is always followed by 8390_init(). - void block_output(struct net_device *dev, int count, const unsigned char *buf, - int start_page) - Write the COUNT bytes of BUF to the packet buffer at START_PAGE. The - "page" value uses the 8390's 256-byte pages. - void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page) - Read the 4 byte, page aligned 8390 header. *If* there is a - subsequent read, it will be of the rest of the packet. - void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) - Read COUNT bytes from the packet buffer into the skb data area. Start - reading from RING_OFFSET, the address as the 8390 sees it. This will always - follow the read of the 8390 header. -*/ -#define ei_reset_8390 (ei_local->reset_8390) -#define ei_block_output (ei_local->block_output) -#define ei_block_input (ei_local->block_input) -#define ei_get_8390_hdr (ei_local->get_8390_hdr) - -/* use 0 for production, 1 for verification, >2 for debug */ -#ifndef ei_debug -int ei_debug = 1; -#endif - -/* Index to functions. */ -static void ei_tx_intr(struct net_device *dev); -static void ei_tx_err(struct net_device *dev); -static void ei_receive(struct net_device *dev); -static void ei_rx_overrun(struct net_device *dev); - -/* Routines generic to NS8390-based boards. */ -static void NS8390_trigger_send(struct net_device *dev, unsigned int length, - int start_page); -static void do_set_multicast_list(struct net_device *dev); - -/* - * SMP and the 8390 setup. - * - * The 8390 isn't exactly designed to be multithreaded on RX/TX. There is - * a page register that controls bank and packet buffer access. We guard - * this with ei_local->page_lock. Nobody should assume or set the page other - * than zero when the lock is not held. Lock holders must restore page 0 - * before unlocking. Even pure readers must take the lock to protect in - * page 0. - * - * To make life difficult the chip can also be very slow. We therefore can't - * just use spinlocks. For the longer lockups we disable the irq the device - * sits on and hold the lock. We must hold the lock because there is a dual - * processor case other than interrupts (get stats/set multicast list in - * parallel with each other and transmit). - * - * Note: in theory we can just disable the irq on the card _but_ there is - * a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs" - * enter lock, take the queued irq. So we waddle instead of flying. - * - * Finally by special arrangement for the purpose of being generally - * annoying the transmit function is called bh atomic. That places - * restrictions on the user context callers as disable_irq won't save - * them. - */ - -/** - * ax_open - Open/initialize the board. - * @dev: network device to initialize - * - * This routine goes all-out, setting everything - * up anew at each open, even though many of these registers should only - * need to be set once at boot. - */ -static int ax_open(struct net_device *dev) -{ - unsigned long flags; - struct ei_device *ei_local = netdev_priv(dev); - - /* - * Grab the page lock so we own the register set, then call - * the init function. - */ - - spin_lock_irqsave(&ei_local->page_lock, flags); - AX88190_init(dev, 1); - /* Set the flag before we drop the lock, That way the IRQ arrives - after its set and we get no silly warnings */ - netif_start_queue(dev); - spin_unlock_irqrestore(&ei_local->page_lock, flags); - ei_local->irqlock = 0; - return 0; -} - -#define dev_lock(dev) (((struct ei_device *)netdev_priv(dev))->page_lock) - -/** - * ax_close - shut down network device - * @dev: network device to close - * - * Opposite of ax_open(). Only used when "ifconfig down" is done. - */ -static int ax_close(struct net_device *dev) -{ - unsigned long flags; - - /* - * Hold the page lock during close - */ - - spin_lock_irqsave(&dev_lock(dev), flags); - AX88190_init(dev, 0); - spin_unlock_irqrestore(&dev_lock(dev), flags); - netif_stop_queue(dev); - return 0; -} - -/** - * axnet_tx_timeout - handle transmit time out condition - * @dev: network device which has apparently fallen asleep - * - * Called by kernel when device never acknowledges a transmit has - * completed (or failed) - i.e. never posted a Tx related interrupt. - */ - -static void axnet_tx_timeout(struct net_device *dev) -{ - long e8390_base = dev->base_addr; - struct ei_device *ei_local = netdev_priv(dev); - int txsr, isr, tickssofar = jiffies - dev_trans_start(dev); - unsigned long flags; - - dev->stats.tx_errors++; - - spin_lock_irqsave(&ei_local->page_lock, flags); - txsr = inb(e8390_base+EN0_TSR); - isr = inb(e8390_base+EN0_ISR); - spin_unlock_irqrestore(&ei_local->page_lock, flags); - - netdev_printk(KERN_DEBUG, dev, - "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n", - (txsr & ENTSR_ABT) ? "excess collisions." : - (isr) ? "lost interrupt?" : "cable problem?", - txsr, isr, tickssofar); - - if (!isr && !dev->stats.tx_packets) - { - /* The 8390 probably hasn't gotten on the cable yet. */ - ei_local->interface_num ^= 1; /* Try a different xcvr. */ - } - - /* Ugly but a reset can be slow, yet must be protected */ - - spin_lock_irqsave(&ei_local->page_lock, flags); - - /* Try to restart the card. Perhaps the user has fixed something. */ - ei_reset_8390(dev); - AX88190_init(dev, 1); - - spin_unlock_irqrestore(&ei_local->page_lock, flags); - netif_wake_queue(dev); -} - -/** - * axnet_start_xmit - begin packet transmission - * @skb: packet to be sent - * @dev: network device to which packet is sent - * - * Sends a packet to an 8390 network device. - */ - -static netdev_tx_t axnet_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - long e8390_base = dev->base_addr; - struct ei_device *ei_local = netdev_priv(dev); - int length, send_length, output_page; - unsigned long flags; - u8 packet[ETH_ZLEN]; - - netif_stop_queue(dev); - - length = skb->len; - - /* Mask interrupts from the ethercard. - SMP: We have to grab the lock here otherwise the IRQ handler - on another CPU can flip window and race the IRQ mask set. We end - up trashing the mcast filter not disabling irqs if we don't lock */ - - spin_lock_irqsave(&ei_local->page_lock, flags); - outb_p(0x00, e8390_base + EN0_IMR); - - /* - * Slow phase with lock held. - */ - - ei_local->irqlock = 1; - - send_length = max(length, ETH_ZLEN); - - /* - * We have two Tx slots available for use. Find the first free - * slot, and then perform some sanity checks. With two Tx bufs, - * you get very close to transmitting back-to-back packets. With - * only one Tx buf, the transmitter sits idle while you reload the - * card, leaving a substantial gap between each transmitted packet. - */ - - if (ei_local->tx1 == 0) - { - output_page = ei_local->tx_start_page; - ei_local->tx1 = send_length; - if (ei_debug && ei_local->tx2 > 0) - netdev_printk(KERN_DEBUG, dev, - "idle transmitter tx2=%d, lasttx=%d, txing=%d\n", - ei_local->tx2, ei_local->lasttx, - ei_local->txing); - } - else if (ei_local->tx2 == 0) - { - output_page = ei_local->tx_start_page + TX_PAGES/2; - ei_local->tx2 = send_length; - if (ei_debug && ei_local->tx1 > 0) - netdev_printk(KERN_DEBUG, dev, - "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n", - ei_local->tx1, ei_local->lasttx, - ei_local->txing); - } - else - { /* We should never get here. */ - if (ei_debug) - netdev_printk(KERN_DEBUG, dev, - "No Tx buffers free! tx1=%d tx2=%d last=%d\n", - ei_local->tx1, ei_local->tx2, - ei_local->lasttx); - ei_local->irqlock = 0; - netif_stop_queue(dev); - outb_p(ENISR_ALL, e8390_base + EN0_IMR); - spin_unlock_irqrestore(&ei_local->page_lock, flags); - dev->stats.tx_errors++; - return NETDEV_TX_BUSY; - } - - /* - * Okay, now upload the packet and trigger a send if the transmitter - * isn't already sending. If it is busy, the interrupt handler will - * trigger the send later, upon receiving a Tx done interrupt. - */ - - if (length == skb->len) - ei_block_output(dev, length, skb->data, output_page); - else { - memset(packet, 0, ETH_ZLEN); - skb_copy_from_linear_data(skb, packet, skb->len); - ei_block_output(dev, length, packet, output_page); - } - - if (! ei_local->txing) - { - ei_local->txing = 1; - NS8390_trigger_send(dev, send_length, output_page); - dev->trans_start = jiffies; - if (output_page == ei_local->tx_start_page) - { - ei_local->tx1 = -1; - ei_local->lasttx = -1; - } - else - { - ei_local->tx2 = -1; - ei_local->lasttx = -2; - } - } - else ei_local->txqueue++; - - if (ei_local->tx1 && ei_local->tx2) - netif_stop_queue(dev); - else - netif_start_queue(dev); - - /* Turn 8390 interrupts back on. */ - ei_local->irqlock = 0; - outb_p(ENISR_ALL, e8390_base + EN0_IMR); - - spin_unlock_irqrestore(&ei_local->page_lock, flags); - - dev_kfree_skb (skb); - dev->stats.tx_bytes += send_length; - - return NETDEV_TX_OK; -} - -/** - * ax_interrupt - handle the interrupts from an 8390 - * @irq: interrupt number - * @dev_id: a pointer to the net_device - * - * Handle the ether interface interrupts. We pull packets from - * the 8390 via the card specific functions and fire them at the networking - * stack. We also handle transmit completions and wake the transmit path if - * necessary. We also update the counters and do other housekeeping as - * needed. - */ - -static irqreturn_t ax_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - long e8390_base; - int interrupts, nr_serviced = 0, i; - struct ei_device *ei_local; - int handled = 0; - unsigned long flags; - - e8390_base = dev->base_addr; - ei_local = netdev_priv(dev); - - /* - * Protect the irq test too. - */ - - spin_lock_irqsave(&ei_local->page_lock, flags); - - if (ei_local->irqlock) { -#if 1 /* This might just be an interrupt for a PCI device sharing this line */ - const char *msg; - /* The "irqlock" check is only for testing. */ - if (ei_local->irqlock) - msg = "Interrupted while interrupts are masked!"; - else - msg = "Reentering the interrupt handler!"; - netdev_info(dev, "%s, isr=%#2x imr=%#2x\n", - msg, - inb_p(e8390_base + EN0_ISR), - inb_p(e8390_base + EN0_IMR)); -#endif - spin_unlock_irqrestore(&ei_local->page_lock, flags); - return IRQ_NONE; - } - - if (ei_debug > 3) - netdev_printk(KERN_DEBUG, dev, "interrupt(isr=%#2.2x)\n", - inb_p(e8390_base + EN0_ISR)); - - outb_p(0x00, e8390_base + EN0_ISR); - ei_local->irqlock = 1; - - /* !!Assumption!! -- we stay in page 0. Don't break this. */ - while ((interrupts = inb_p(e8390_base + EN0_ISR)) != 0 && - ++nr_serviced < MAX_SERVICE) - { - if (!netif_running(dev) || (interrupts == 0xff)) { - if (ei_debug > 1) - netdev_warn(dev, - "interrupt from stopped card\n"); - outb_p(interrupts, e8390_base + EN0_ISR); - interrupts = 0; - break; - } - handled = 1; - - /* AX88190 bug fix. */ - outb_p(interrupts, e8390_base + EN0_ISR); - for (i = 0; i < 10; i++) { - if (!(inb(e8390_base + EN0_ISR) & interrupts)) - break; - outb_p(0, e8390_base + EN0_ISR); - outb_p(interrupts, e8390_base + EN0_ISR); - } - if (interrupts & ENISR_OVER) - ei_rx_overrun(dev); - else if (interrupts & (ENISR_RX+ENISR_RX_ERR)) - { - /* Got a good (?) packet. */ - ei_receive(dev); - } - /* Push the next to-transmit packet through. */ - if (interrupts & ENISR_TX) - ei_tx_intr(dev); - else if (interrupts & ENISR_TX_ERR) - ei_tx_err(dev); - - if (interrupts & ENISR_COUNTERS) - { - dev->stats.rx_frame_errors += inb_p(e8390_base + EN0_COUNTER0); - dev->stats.rx_crc_errors += inb_p(e8390_base + EN0_COUNTER1); - dev->stats.rx_missed_errors+= inb_p(e8390_base + EN0_COUNTER2); - } - } - - if (interrupts && ei_debug > 3) - { - handled = 1; - if (nr_serviced >= MAX_SERVICE) - { - /* 0xFF is valid for a card removal */ - if(interrupts!=0xFF) - netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n", - interrupts); - outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */ - } else { - netdev_warn(dev, "unknown interrupt %#2x\n", - interrupts); - outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */ - } - } - - /* Turn 8390 interrupts back on. */ - ei_local->irqlock = 0; - outb_p(ENISR_ALL, e8390_base + EN0_IMR); - - spin_unlock_irqrestore(&ei_local->page_lock, flags); - return IRQ_RETVAL(handled); -} - -/** - * ei_tx_err - handle transmitter error - * @dev: network device which threw the exception - * - * A transmitter error has happened. Most likely excess collisions (which - * is a fairly normal condition). If the error is one where the Tx will - * have been aborted, we try and send another one right away, instead of - * letting the failed packet sit and collect dust in the Tx buffer. This - * is a much better solution as it avoids kernel based Tx timeouts, and - * an unnecessary card reset. - * - * Called with lock held. - */ - -static void ei_tx_err(struct net_device *dev) -{ - long e8390_base = dev->base_addr; - unsigned char txsr = inb_p(e8390_base+EN0_TSR); - unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU); - -#ifdef VERBOSE_ERROR_DUMP - netdev_printk(KERN_DEBUG, dev, - "transmitter error (%#2x):", txsr); - if (txsr & ENTSR_ABT) - pr_cont(" excess-collisions"); - if (txsr & ENTSR_ND) - pr_cont(" non-deferral"); - if (txsr & ENTSR_CRS) - pr_cont(" lost-carrier"); - if (txsr & ENTSR_FU) - pr_cont(" FIFO-underrun"); - if (txsr & ENTSR_CDH) - pr_cont(" lost-heartbeat"); - pr_cont("\n"); -#endif - - if (tx_was_aborted) - ei_tx_intr(dev); - else - { - dev->stats.tx_errors++; - if (txsr & ENTSR_CRS) dev->stats.tx_carrier_errors++; - if (txsr & ENTSR_CDH) dev->stats.tx_heartbeat_errors++; - if (txsr & ENTSR_OWC) dev->stats.tx_window_errors++; - } -} - -/** - * ei_tx_intr - transmit interrupt handler - * @dev: network device for which tx intr is handled - * - * We have finished a transmit: check for errors and then trigger the next - * packet to be sent. Called with lock held. - */ - -static void ei_tx_intr(struct net_device *dev) -{ - long e8390_base = dev->base_addr; - struct ei_device *ei_local = netdev_priv(dev); - int status = inb(e8390_base + EN0_TSR); - - /* - * There are two Tx buffers, see which one finished, and trigger - * the send of another one if it exists. - */ - ei_local->txqueue--; - - if (ei_local->tx1 < 0) - { - if (ei_local->lasttx != 1 && ei_local->lasttx != -1) - netdev_err(dev, "%s: bogus last_tx_buffer %d, tx1=%d\n", - ei_local->name, ei_local->lasttx, - ei_local->tx1); - ei_local->tx1 = 0; - if (ei_local->tx2 > 0) - { - ei_local->txing = 1; - NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6); - dev->trans_start = jiffies; - ei_local->tx2 = -1, - ei_local->lasttx = 2; - } - else ei_local->lasttx = 20, ei_local->txing = 0; - } - else if (ei_local->tx2 < 0) - { - if (ei_local->lasttx != 2 && ei_local->lasttx != -2) - netdev_info(dev, "%s: bogus last_tx_buffer %d, tx2=%d\n", - ei_local->name, ei_local->lasttx, - ei_local->tx2); - ei_local->tx2 = 0; - if (ei_local->tx1 > 0) - { - ei_local->txing = 1; - NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page); - dev->trans_start = jiffies; - ei_local->tx1 = -1; - ei_local->lasttx = 1; - } - else - ei_local->lasttx = 10, ei_local->txing = 0; - } -// else -// netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n", -// ei_local->lasttx); - - /* Minimize Tx latency: update the statistics after we restart TXing. */ - if (status & ENTSR_COL) - dev->stats.collisions++; - if (status & ENTSR_PTX) - dev->stats.tx_packets++; - else - { - dev->stats.tx_errors++; - if (status & ENTSR_ABT) - { - dev->stats.tx_aborted_errors++; - dev->stats.collisions += 16; - } - if (status & ENTSR_CRS) - dev->stats.tx_carrier_errors++; - if (status & ENTSR_FU) - dev->stats.tx_fifo_errors++; - if (status & ENTSR_CDH) - dev->stats.tx_heartbeat_errors++; - if (status & ENTSR_OWC) - dev->stats.tx_window_errors++; - } - netif_wake_queue(dev); -} - -/** - * ei_receive - receive some packets - * @dev: network device with which receive will be run - * - * We have a good packet(s), get it/them out of the buffers. - * Called with lock held. - */ - -static void ei_receive(struct net_device *dev) -{ - long e8390_base = dev->base_addr; - struct ei_device *ei_local = netdev_priv(dev); - unsigned char rxing_page, this_frame, next_frame; - unsigned short current_offset; - int rx_pkt_count = 0; - struct e8390_pkt_hdr rx_frame; - - while (++rx_pkt_count < 10) - { - int pkt_len, pkt_stat; - - /* Get the rx page (incoming packet pointer). */ - rxing_page = inb_p(e8390_base + EN1_CURPAG -1); - - /* Remove one frame from the ring. Boundary is always a page behind. */ - this_frame = inb_p(e8390_base + EN0_BOUNDARY) + 1; - if (this_frame >= ei_local->stop_page) - this_frame = ei_local->rx_start_page; - - /* Someday we'll omit the previous, iff we never get this message. - (There is at least one clone claimed to have a problem.) - - Keep quiet if it looks like a card removal. One problem here - is that some clones crash in roughly the same way. - */ - if (ei_debug > 0 && this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF)) - netdev_err(dev, "mismatched read page pointers %2x vs %2x\n", - this_frame, ei_local->current_page); - - if (this_frame == rxing_page) /* Read all the frames? */ - break; /* Done for now */ - - current_offset = this_frame << 8; - ei_get_8390_hdr(dev, &rx_frame, this_frame); - - pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr); - pkt_stat = rx_frame.status; - - next_frame = this_frame + 1 + ((pkt_len+4)>>8); - - if (pkt_len < 60 || pkt_len > 1518) - { - if (ei_debug) - netdev_printk(KERN_DEBUG, dev, - "bogus packet size: %d, status=%#2x nxpg=%#2x\n", - rx_frame.count, rx_frame.status, - rx_frame.next); - dev->stats.rx_errors++; - dev->stats.rx_length_errors++; - } - else if ((pkt_stat & 0x0F) == ENRSR_RXOK) - { - struct sk_buff *skb; - - skb = dev_alloc_skb(pkt_len+2); - if (skb == NULL) - { - if (ei_debug > 1) - netdev_printk(KERN_DEBUG, dev, - "Couldn't allocate a sk_buff of size %d\n", - pkt_len); - dev->stats.rx_dropped++; - break; - } - else - { - skb_reserve(skb,2); /* IP headers on 16 byte boundaries */ - skb_put(skb, pkt_len); /* Make room */ - ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame)); - skb->protocol=eth_type_trans(skb,dev); - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; - if (pkt_stat & ENRSR_PHY) - dev->stats.multicast++; - } - } - else - { - if (ei_debug) - netdev_printk(KERN_DEBUG, dev, - "bogus packet: status=%#2x nxpg=%#2x size=%d\n", - rx_frame.status, rx_frame.next, - rx_frame.count); - dev->stats.rx_errors++; - /* NB: The NIC counts CRC, frame and missed errors. */ - if (pkt_stat & ENRSR_FO) - dev->stats.rx_fifo_errors++; - } - next_frame = rx_frame.next; - - /* This _should_ never happen: it's here for avoiding bad clones. */ - if (next_frame >= ei_local->stop_page) { - netdev_info(dev, "next frame inconsistency, %#2x\n", - next_frame); - next_frame = ei_local->rx_start_page; - } - ei_local->current_page = next_frame; - outb_p(next_frame-1, e8390_base+EN0_BOUNDARY); - } -} - -/** - * ei_rx_overrun - handle receiver overrun - * @dev: network device which threw exception - * - * We have a receiver overrun: we have to kick the 8390 to get it started - * again. Problem is that you have to kick it exactly as NS prescribes in - * the updated datasheets, or "the NIC may act in an unpredictable manner." - * This includes causing "the NIC to defer indefinitely when it is stopped - * on a busy network." Ugh. - * Called with lock held. Don't call this with the interrupts off or your - * computer will hate you - it takes 10ms or so. - */ - -static void ei_rx_overrun(struct net_device *dev) -{ - axnet_dev_t *info = PRIV(dev); - long e8390_base = dev->base_addr; - unsigned char was_txing, must_resend = 0; - - /* - * Record whether a Tx was in progress and then issue the - * stop command. - */ - was_txing = inb_p(e8390_base+E8390_CMD) & E8390_TRANS; - outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); - - if (ei_debug > 1) - netdev_printk(KERN_DEBUG, dev, "Receiver overrun\n"); - dev->stats.rx_over_errors++; - - /* - * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total. - * We wait at least 2ms. - */ - - mdelay(2); - - /* - * Reset RBCR[01] back to zero as per magic incantation. - */ - outb_p(0x00, e8390_base+EN0_RCNTLO); - outb_p(0x00, e8390_base+EN0_RCNTHI); - - /* - * See if any Tx was interrupted or not. According to NS, this - * step is vital, and skipping it will cause no end of havoc. - */ - - if (was_txing) - { - unsigned char tx_completed = inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR); - if (!tx_completed) - must_resend = 1; - } - - /* - * Have to enter loopback mode and then restart the NIC before - * you are allowed to slurp packets up off the ring. - */ - outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); - outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD); - - /* - * Clear the Rx ring of all the debris, and ack the interrupt. - */ - ei_receive(dev); - - /* - * Leave loopback mode, and resend any packet that got stopped. - */ - outb_p(E8390_TXCONFIG | info->duplex_flag, e8390_base + EN0_TXCR); - if (must_resend) - outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD); -} - -/* - * Collect the stats. This is called unlocked and from several contexts. - */ - -static struct net_device_stats *get_stats(struct net_device *dev) -{ - long ioaddr = dev->base_addr; - struct ei_device *ei_local = netdev_priv(dev); - unsigned long flags; - - /* If the card is stopped, just return the present stats. */ - if (!netif_running(dev)) - return &dev->stats; - - spin_lock_irqsave(&ei_local->page_lock,flags); - /* Read the counter registers, assuming we are in page 0. */ - dev->stats.rx_frame_errors += inb_p(ioaddr + EN0_COUNTER0); - dev->stats.rx_crc_errors += inb_p(ioaddr + EN0_COUNTER1); - dev->stats.rx_missed_errors+= inb_p(ioaddr + EN0_COUNTER2); - spin_unlock_irqrestore(&ei_local->page_lock, flags); - - return &dev->stats; -} - -/* - * Form the 64 bit 8390 multicast table from the linked list of addresses - * associated with this dev structure. - */ - -static inline void make_mc_bits(u8 *bits, struct net_device *dev) -{ - struct netdev_hw_addr *ha; - u32 crc; - - netdev_for_each_mc_addr(ha, dev) { - crc = ether_crc(ETH_ALEN, ha->addr); - /* - * The 8390 uses the 6 most significant bits of the - * CRC to index the multicast table. - */ - bits[crc>>29] |= (1<<((crc>>26)&7)); - } -} - -/** - * do_set_multicast_list - set/clear multicast filter - * @dev: net device for which multicast filter is adjusted - * - * Set or clear the multicast filter for this adaptor. - * Must be called with lock held. - */ - -static void do_set_multicast_list(struct net_device *dev) -{ - long e8390_base = dev->base_addr; - int i; - struct ei_device *ei_local = netdev_priv(dev); - - if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) { - memset(ei_local->mcfilter, 0, 8); - if (!netdev_mc_empty(dev)) - make_mc_bits(ei_local->mcfilter, dev); - } else { - /* set to accept-all */ - memset(ei_local->mcfilter, 0xFF, 8); - } - - outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD); - for(i = 0; i < 8; i++) - { - outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i)); - } - outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD); - - if(dev->flags&IFF_PROMISC) - outb_p(E8390_RXCONFIG | 0x58, e8390_base + EN0_RXCR); - else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) - outb_p(E8390_RXCONFIG | 0x48, e8390_base + EN0_RXCR); - else - outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR); - - outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD); -} - -/* - * Called without lock held. This is invoked from user context and may - * be parallel to just about everything else. Its also fairly quick and - * not called too often. Must protect against both bh and irq users - */ - -static void set_multicast_list(struct net_device *dev) -{ - unsigned long flags; - - spin_lock_irqsave(&dev_lock(dev), flags); - do_set_multicast_list(dev); - spin_unlock_irqrestore(&dev_lock(dev), flags); -} - -/* This page of functions should be 8390 generic */ -/* Follow National Semi's recommendations for initializing the "NIC". */ - -/** - * AX88190_init - initialize 8390 hardware - * @dev: network device to initialize - * @startp: boolean. non-zero value to initiate chip processing - * - * Must be called with lock held. - */ - -static void AX88190_init(struct net_device *dev, int startp) -{ - axnet_dev_t *info = PRIV(dev); - long e8390_base = dev->base_addr; - struct ei_device *ei_local = netdev_priv(dev); - int i; - int endcfg = ei_local->word16 ? (0x48 | ENDCFG_WTS) : 0x48; - - if(sizeof(struct e8390_pkt_hdr)!=4) - panic("8390.c: header struct mispacked\n"); - /* Follow National Semi's recommendations for initing the DP83902. */ - outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */ - outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */ - /* Clear the remote byte count registers. */ - outb_p(0x00, e8390_base + EN0_RCNTLO); - outb_p(0x00, e8390_base + EN0_RCNTHI); - /* Set to monitor and loopback mode -- this is vital!. */ - outb_p(E8390_RXOFF|0x40, e8390_base + EN0_RXCR); /* 0x60 */ - outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */ - /* Set the transmit page and receive ring. */ - outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR); - ei_local->tx1 = ei_local->tx2 = 0; - outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG); - outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY); /* 3c503 says 0x3f,NS0x26*/ - ei_local->current_page = ei_local->rx_start_page; /* assert boundary+1 */ - outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG); - /* Clear the pending interrupts and mask. */ - outb_p(0xFF, e8390_base + EN0_ISR); - outb_p(0x00, e8390_base + EN0_IMR); - - /* Copy the station address into the DS8390 registers. */ - - outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */ - for(i = 0; i < 6; i++) - { - outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i)); - if(inb_p(e8390_base + EN1_PHYS_SHIFT(i))!=dev->dev_addr[i]) - netdev_err(dev, "Hw. address read/write mismap %d\n", i); - } - - outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG); - outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); - - netif_start_queue(dev); - ei_local->tx1 = ei_local->tx2 = 0; - ei_local->txing = 0; - - if (info->flags & IS_AX88790) /* select Internal PHY */ - outb(0x10, e8390_base + AXNET_GPIO); - - if (startp) - { - outb_p(0xff, e8390_base + EN0_ISR); - outb_p(ENISR_ALL, e8390_base + EN0_IMR); - outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD); - outb_p(E8390_TXCONFIG | info->duplex_flag, - e8390_base + EN0_TXCR); /* xmit on. */ - /* 3c503 TechMan says rxconfig only after the NIC is started. */ - outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR); /* rx on, */ - do_set_multicast_list(dev); /* (re)load the mcast table */ - } -} - -/* Trigger a transmit start, assuming the length is valid. - Always called with the page lock held */ - -static void NS8390_trigger_send(struct net_device *dev, unsigned int length, - int start_page) -{ - long e8390_base = dev->base_addr; - struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev); - - if (inb_p(e8390_base) & E8390_TRANS) - { - netdev_warn(dev, "trigger_send() called with the transmitter busy\n"); - return; - } - outb_p(length & 0xff, e8390_base + EN0_TCNTLO); - outb_p(length >> 8, e8390_base + EN0_TCNTHI); - outb_p(start_page, e8390_base + EN0_TPSR); - outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD); -} diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c deleted file mode 100644 index b4fd7c3..0000000 --- a/drivers/net/pcmcia/pcnet_cs.c +++ /dev/null @@ -1,1710 +0,0 @@ -/*====================================================================== - - A PCMCIA ethernet driver for NS8390-based cards - - This driver supports the D-Link DE-650 and Linksys EthernetCard - cards, the newer D-Link and Linksys combo cards, Accton EN2212 - cards, the RPTI EP400, and the PreMax PE-200 in non-shared-memory - mode, and the IBM Credit Card Adapter, the NE4100, the Thomas - Conrad ethernet card, and the Kingston KNE-PCM/x in shared-memory - mode. It will also handle the Socket EA card in either mode. - - Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net - - pcnet_cs.c 1.153 2003/11/09 18:53:09 - - The network driver code is based on Donald Becker's NE2000 code: - - Written 1992,1993 by Donald Becker. - Copyright 1993 United States Government as represented by the - Director, National Security Agency. This software may be used and - distributed according to the terms of the GNU General Public License, - incorporated herein by reference. - Donald Becker may be reached at becker@scyld.com - - Based also on Keith Moore's changes to Don Becker's code, for IBM - CCAE support. Drivers merged back together, and shared-memory - Socket EA support added, by Ken Raeburn, September 1995. - -======================================================================*/ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "../8390.h" - -#include -#include -#include -#include - -#include -#include -#include -#include - -#define PCNET_CMD 0x00 -#define PCNET_DATAPORT 0x10 /* NatSemi-defined port window offset. */ -#define PCNET_RESET 0x1f /* Issue a read to reset, a write to clear. */ -#define PCNET_MISC 0x18 /* For IBM CCAE and Socket EA cards */ - -#define PCNET_START_PG 0x40 /* First page of TX buffer */ -#define PCNET_STOP_PG 0x80 /* Last page +1 of RX ring */ - -/* Socket EA cards have a larger packet buffer */ -#define SOCKET_START_PG 0x01 -#define SOCKET_STOP_PG 0xff - -#define PCNET_RDC_TIMEOUT (2*HZ/100) /* Max wait in jiffies for Tx RDC */ - -static const char *if_names[] = { "auto", "10baseT", "10base2"}; - - -/*====================================================================*/ - -/* Module parameters */ - -MODULE_AUTHOR("David Hinds "); -MODULE_DESCRIPTION("NE2000 compatible PCMCIA ethernet driver"); -MODULE_LICENSE("GPL"); - -#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) - -INT_MODULE_PARM(if_port, 1); /* Transceiver type */ -INT_MODULE_PARM(use_big_buf, 1); /* use 64K packet buffer? */ -INT_MODULE_PARM(mem_speed, 0); /* shared mem speed, in ns */ -INT_MODULE_PARM(delay_output, 0); /* pause after xmit? */ -INT_MODULE_PARM(delay_time, 4); /* in usec */ -INT_MODULE_PARM(use_shmem, -1); /* use shared memory? */ -INT_MODULE_PARM(full_duplex, 0); /* full duplex? */ - -/* Ugh! Let the user hardwire the hardware address for queer cards */ -static int hw_addr[6] = { 0, /* ... */ }; -module_param_array(hw_addr, int, NULL, 0); - -/*====================================================================*/ - -static void mii_phy_probe(struct net_device *dev); -static int pcnet_config(struct pcmcia_device *link); -static void pcnet_release(struct pcmcia_device *link); -static int pcnet_open(struct net_device *dev); -static int pcnet_close(struct net_device *dev); -static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); -static irqreturn_t ei_irq_wrapper(int irq, void *dev_id); -static void ei_watchdog(u_long arg); -static void pcnet_reset_8390(struct net_device *dev); -static int set_config(struct net_device *dev, struct ifmap *map); -static int setup_shmem_window(struct pcmcia_device *link, int start_pg, - int stop_pg, int cm_offset); -static int setup_dma_config(struct pcmcia_device *link, int start_pg, - int stop_pg); - -static void pcnet_detach(struct pcmcia_device *p_dev); - -/*====================================================================*/ - -typedef struct hw_info_t { - u_int offset; - u_char a0, a1, a2; - u_int flags; -} hw_info_t; - -#define DELAY_OUTPUT 0x01 -#define HAS_MISC_REG 0x02 -#define USE_BIG_BUF 0x04 -#define HAS_IBM_MISC 0x08 -#define IS_DL10019 0x10 -#define IS_DL10022 0x20 -#define HAS_MII 0x40 -#define USE_SHMEM 0x80 /* autodetected */ - -#define AM79C9XX_HOME_PHY 0x00006B90 /* HomePNA PHY */ -#define AM79C9XX_ETH_PHY 0x00006B70 /* 10baseT PHY */ -#define MII_PHYID_REV_MASK 0xfffffff0 -#define MII_PHYID_REG1 0x02 -#define MII_PHYID_REG2 0x03 - -static hw_info_t hw_info[] = { - { /* Accton EN2212 */ 0x0ff0, 0x00, 0x00, 0xe8, DELAY_OUTPUT }, - { /* Allied Telesis LA-PCM */ 0x0ff0, 0x00, 0x00, 0xf4, 0 }, - { /* APEX MultiCard */ 0x03f4, 0x00, 0x20, 0xe5, 0 }, - { /* ASANTE FriendlyNet */ 0x4910, 0x00, 0x00, 0x94, - DELAY_OUTPUT | HAS_IBM_MISC }, - { /* Danpex EN-6200P2 */ 0x0110, 0x00, 0x40, 0xc7, 0 }, - { /* DataTrek NetCard */ 0x0ff0, 0x00, 0x20, 0xe8, 0 }, - { /* Dayna CommuniCard E */ 0x0110, 0x00, 0x80, 0x19, 0 }, - { /* D-Link DE-650 */ 0x0040, 0x00, 0x80, 0xc8, 0 }, - { /* EP-210 Ethernet */ 0x0110, 0x00, 0x40, 0x33, 0 }, - { /* EP4000 Ethernet */ 0x01c0, 0x00, 0x00, 0xb4, 0 }, - { /* Epson EEN10B */ 0x0ff0, 0x00, 0x00, 0x48, - HAS_MISC_REG | HAS_IBM_MISC }, - { /* ELECOM Laneed LD-CDWA */ 0xb8, 0x08, 0x00, 0x42, 0 }, - { /* Hypertec Ethernet */ 0x01c0, 0x00, 0x40, 0x4c, 0 }, - { /* IBM CCAE */ 0x0ff0, 0x08, 0x00, 0x5a, - HAS_MISC_REG | HAS_IBM_MISC }, - { /* IBM CCAE */ 0x0ff0, 0x00, 0x04, 0xac, - HAS_MISC_REG | HAS_IBM_MISC }, - { /* IBM CCAE */ 0x0ff0, 0x00, 0x06, 0x29, - HAS_MISC_REG | HAS_IBM_MISC }, - { /* IBM FME */ 0x0374, 0x08, 0x00, 0x5a, - HAS_MISC_REG | HAS_IBM_MISC }, - { /* IBM FME */ 0x0374, 0x00, 0x04, 0xac, - HAS_MISC_REG | HAS_IBM_MISC }, - { /* Kansai KLA-PCM/T */ 0x0ff0, 0x00, 0x60, 0x87, - HAS_MISC_REG | HAS_IBM_MISC }, - { /* NSC DP83903 */ 0x0374, 0x08, 0x00, 0x17, - HAS_MISC_REG | HAS_IBM_MISC }, - { /* NSC DP83903 */ 0x0374, 0x00, 0xc0, 0xa8, - HAS_MISC_REG | HAS_IBM_MISC }, - { /* NSC DP83903 */ 0x0374, 0x00, 0xa0, 0xb0, - HAS_MISC_REG | HAS_IBM_MISC }, - { /* NSC DP83903 */ 0x0198, 0x00, 0x20, 0xe0, - HAS_MISC_REG | HAS_IBM_MISC }, - { /* I-O DATA PCLA/T */ 0x0ff0, 0x00, 0xa0, 0xb0, 0 }, - { /* Katron PE-520 */ 0x0110, 0x00, 0x40, 0xf6, 0 }, - { /* Kingston KNE-PCM/x */ 0x0ff0, 0x00, 0xc0, 0xf0, - HAS_MISC_REG | HAS_IBM_MISC }, - { /* Kingston KNE-PCM/x */ 0x0ff0, 0xe2, 0x0c, 0x0f, - HAS_MISC_REG | HAS_IBM_MISC }, - { /* Kingston KNE-PC2 */ 0x0180, 0x00, 0xc0, 0xf0, 0 }, - { /* Maxtech PCN2000 */ 0x5000, 0x00, 0x00, 0xe8, 0 }, - { /* NDC Instant-Link */ 0x003a, 0x00, 0x80, 0xc6, 0 }, - { /* NE2000 Compatible */ 0x0ff0, 0x00, 0xa0, 0x0c, 0 }, - { /* Network General Sniffer */ 0x0ff0, 0x00, 0x00, 0x65, - HAS_MISC_REG | HAS_IBM_MISC }, - { /* Panasonic VEL211 */ 0x0ff0, 0x00, 0x80, 0x45, - HAS_MISC_REG | HAS_IBM_MISC }, - { /* PreMax PE-200 */ 0x07f0, 0x00, 0x20, 0xe0, 0 }, - { /* RPTI EP400 */ 0x0110, 0x00, 0x40, 0x95, 0 }, - { /* SCM Ethernet */ 0x0ff0, 0x00, 0x20, 0xcb, 0 }, - { /* Socket EA */ 0x4000, 0x00, 0xc0, 0x1b, - DELAY_OUTPUT | HAS_MISC_REG | USE_BIG_BUF }, - { /* Socket LP-E CF+ */ 0x01c0, 0x00, 0xc0, 0x1b, 0 }, - { /* SuperSocket RE450T */ 0x0110, 0x00, 0xe0, 0x98, 0 }, - { /* Volktek NPL-402CT */ 0x0060, 0x00, 0x40, 0x05, 0 }, - { /* NEC PC-9801N-J12 */ 0x0ff0, 0x00, 0x00, 0x4c, 0 }, - { /* PCMCIA Technology OEM */ 0x01c8, 0x00, 0xa0, 0x0c, 0 } -}; - -#define NR_INFO ARRAY_SIZE(hw_info) - -static hw_info_t default_info = { 0, 0, 0, 0, 0 }; -static hw_info_t dl10019_info = { 0, 0, 0, 0, IS_DL10019|HAS_MII }; -static hw_info_t dl10022_info = { 0, 0, 0, 0, IS_DL10022|HAS_MII }; - -typedef struct pcnet_dev_t { - struct pcmcia_device *p_dev; - u_int flags; - void __iomem *base; - struct timer_list watchdog; - int stale, fast_poll; - u_char phy_id; - u_char eth_phy, pna_phy; - u_short link_status; - u_long mii_reset; -} pcnet_dev_t; - -static inline pcnet_dev_t *PRIV(struct net_device *dev) -{ - char *p = netdev_priv(dev); - return (pcnet_dev_t *)(p + sizeof(struct ei_device)); -} - -static const struct net_device_ops pcnet_netdev_ops = { - .ndo_open = pcnet_open, - .ndo_stop = pcnet_close, - .ndo_set_config = set_config, - .ndo_start_xmit = ei_start_xmit, - .ndo_get_stats = ei_get_stats, - .ndo_do_ioctl = ei_ioctl, - .ndo_set_multicast_list = ei_set_multicast_list, - .ndo_tx_timeout = ei_tx_timeout, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ei_poll, -#endif -}; - -static int pcnet_probe(struct pcmcia_device *link) -{ - pcnet_dev_t *info; - struct net_device *dev; - - dev_dbg(&link->dev, "pcnet_attach()\n"); - - /* Create new ethernet device */ - dev = __alloc_ei_netdev(sizeof(pcnet_dev_t)); - if (!dev) return -ENOMEM; - info = PRIV(dev); - info->p_dev = link; - link->priv = dev; - - link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; - - dev->netdev_ops = &pcnet_netdev_ops; - - return pcnet_config(link); -} /* pcnet_attach */ - -static void pcnet_detach(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - - dev_dbg(&link->dev, "pcnet_detach\n"); - - unregister_netdev(dev); - - pcnet_release(link); - - free_netdev(dev); -} /* pcnet_detach */ - -/*====================================================================== - - This probes for a card's hardware address, for card types that - encode this information in their CIS. - -======================================================================*/ - -static hw_info_t *get_hwinfo(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - u_char __iomem *base, *virt; - int i, j; - - /* Allocate a small memory window */ - link->resource[2]->flags |= WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; - link->resource[2]->start = 0; link->resource[2]->end = 0; - i = pcmcia_request_window(link, link->resource[2], 0); - if (i != 0) - return NULL; - - virt = ioremap(link->resource[2]->start, - resource_size(link->resource[2])); - for (i = 0; i < NR_INFO; i++) { - pcmcia_map_mem_page(link, link->resource[2], - hw_info[i].offset & ~(resource_size(link->resource[2])-1)); - base = &virt[hw_info[i].offset & (resource_size(link->resource[2])-1)]; - if ((readb(base+0) == hw_info[i].a0) && - (readb(base+2) == hw_info[i].a1) && - (readb(base+4) == hw_info[i].a2)) { - for (j = 0; j < 6; j++) - dev->dev_addr[j] = readb(base + (j<<1)); - break; - } - } - - iounmap(virt); - j = pcmcia_release_window(link, link->resource[2]); - return (i < NR_INFO) ? hw_info+i : NULL; -} /* get_hwinfo */ - -/*====================================================================== - - This probes for a card's hardware address by reading the PROM. - It checks the address against a list of known types, then falls - back to a simple NE2000 clone signature check. - -======================================================================*/ - -static hw_info_t *get_prom(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - unsigned int ioaddr = dev->base_addr; - u_char prom[32]; - int i, j; - - /* This is lifted straight from drivers/net/ne.c */ - struct { - u_char value, offset; - } program_seq[] = { - {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/ - {0x48, EN0_DCFG}, /* Set byte-wide (0x48) access. */ - {0x00, EN0_RCNTLO}, /* Clear the count regs. */ - {0x00, EN0_RCNTHI}, - {0x00, EN0_IMR}, /* Mask completion irq. */ - {0xFF, EN0_ISR}, - {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */ - {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */ - {32, EN0_RCNTLO}, - {0x00, EN0_RCNTHI}, - {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */ - {0x00, EN0_RSARHI}, - {E8390_RREAD+E8390_START, E8390_CMD}, - }; - - pcnet_reset_8390(dev); - mdelay(10); - - for (i = 0; i < ARRAY_SIZE(program_seq); i++) - outb_p(program_seq[i].value, ioaddr + program_seq[i].offset); - - for (i = 0; i < 32; i++) - prom[i] = inb(ioaddr + PCNET_DATAPORT); - for (i = 0; i < NR_INFO; i++) { - if ((prom[0] == hw_info[i].a0) && - (prom[2] == hw_info[i].a1) && - (prom[4] == hw_info[i].a2)) - break; - } - if ((i < NR_INFO) || ((prom[28] == 0x57) && (prom[30] == 0x57))) { - for (j = 0; j < 6; j++) - dev->dev_addr[j] = prom[j<<1]; - return (i < NR_INFO) ? hw_info+i : &default_info; - } - return NULL; -} /* get_prom */ - -/*====================================================================== - - For DL10019 based cards, like the Linksys EtherFast - -======================================================================*/ - -static hw_info_t *get_dl10019(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - int i; - u_char sum; - - for (sum = 0, i = 0x14; i < 0x1c; i++) - sum += inb_p(dev->base_addr + i); - if (sum != 0xff) - return NULL; - for (i = 0; i < 6; i++) - dev->dev_addr[i] = inb_p(dev->base_addr + 0x14 + i); - i = inb(dev->base_addr + 0x1f); - return ((i == 0x91)||(i == 0x99)) ? &dl10022_info : &dl10019_info; -} - -/*====================================================================== - - For Asix AX88190 based cards - -======================================================================*/ - -static hw_info_t *get_ax88190(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - unsigned int ioaddr = dev->base_addr; - int i, j; - - /* Not much of a test, but the alternatives are messy */ - if (link->config_base != 0x03c0) - return NULL; - - outb_p(0x01, ioaddr + EN0_DCFG); /* Set word-wide access. */ - outb_p(0x00, ioaddr + EN0_RSARLO); /* DMA starting at 0x0400. */ - outb_p(0x04, ioaddr + EN0_RSARHI); - outb_p(E8390_RREAD+E8390_START, ioaddr + E8390_CMD); - - for (i = 0; i < 6; i += 2) { - j = inw(ioaddr + PCNET_DATAPORT); - dev->dev_addr[i] = j & 0xff; - dev->dev_addr[i+1] = j >> 8; - } - return NULL; -} - -/*====================================================================== - - This should be totally unnecessary... but when we can't figure - out the hardware address any other way, we'll let the user hard - wire it when the module is initialized. - -======================================================================*/ - -static hw_info_t *get_hwired(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - int i; - - for (i = 0; i < 6; i++) - if (hw_addr[i] != 0) break; - if (i == 6) - return NULL; - - for (i = 0; i < 6; i++) - dev->dev_addr[i] = hw_addr[i]; - - return &default_info; -} /* get_hwired */ - -static int try_io_port(struct pcmcia_device *link) -{ - int j, ret; - link->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; - link->resource[1]->flags &= ~IO_DATA_PATH_WIDTH; - if (link->resource[0]->end == 32) { - link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; - if (link->resource[1]->end > 0) { - /* for master/slave multifunction cards */ - link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; - } - } else { - /* This should be two 16-port windows */ - link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; - link->resource[1]->flags |= IO_DATA_PATH_WIDTH_16; - } - if (link->resource[0]->start == 0) { - for (j = 0; j < 0x400; j += 0x20) { - link->resource[0]->start = j ^ 0x300; - link->resource[1]->start = (j ^ 0x300) + 0x10; - link->io_lines = 16; - ret = pcmcia_request_io(link); - if (ret == 0) - return ret; - } - return ret; - } else { - return pcmcia_request_io(link); - } -} - -static int pcnet_confcheck(struct pcmcia_device *p_dev, void *priv_data) -{ - int *priv = priv_data; - int try = (*priv & 0x1); - - *priv &= (p_dev->resource[2]->end >= 0x4000) ? 0x10 : ~0x10; - - if (p_dev->config_index == 0) - return -EINVAL; - - if (p_dev->resource[0]->end + p_dev->resource[1]->end < 32) - return -EINVAL; - - if (try) - p_dev->io_lines = 16; - return try_io_port(p_dev); -} - -static hw_info_t *pcnet_try_config(struct pcmcia_device *link, - int *has_shmem, int try) -{ - struct net_device *dev = link->priv; - hw_info_t *local_hw_info; - pcnet_dev_t *info = PRIV(dev); - int priv = try; - int ret; - - ret = pcmcia_loop_config(link, pcnet_confcheck, &priv); - if (ret) { - dev_warn(&link->dev, "no useable port range found\n"); - return NULL; - } - *has_shmem = (priv & 0x10); - - if (!link->irq) - return NULL; - - if (resource_size(link->resource[1]) == 8) - link->config_flags |= CONF_ENABLE_SPKR; - - if ((link->manf_id == MANFID_IBM) && - (link->card_id == PRODID_IBM_HOME_AND_AWAY)) - link->config_index |= 0x10; - - ret = pcmcia_enable_device(link); - if (ret) - return NULL; - - dev->irq = link->irq; - dev->base_addr = link->resource[0]->start; - - if (info->flags & HAS_MISC_REG) { - if ((if_port == 1) || (if_port == 2)) - dev->if_port = if_port; - else - dev_notice(&link->dev, "invalid if_port requested\n"); - } else - dev->if_port = 0; - - if ((link->config_base == 0x03c0) && - (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) { - dev_info(&link->dev, - "this is an AX88190 card - use axnet_cs instead.\n"); - return NULL; - } - - local_hw_info = get_hwinfo(link); - if (!local_hw_info) - local_hw_info = get_prom(link); - if (!local_hw_info) - local_hw_info = get_dl10019(link); - if (!local_hw_info) - local_hw_info = get_ax88190(link); - if (!local_hw_info) - local_hw_info = get_hwired(link); - - return local_hw_info; -} - -static int pcnet_config(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - pcnet_dev_t *info = PRIV(dev); - int start_pg, stop_pg, cm_offset; - int has_shmem = 0; - hw_info_t *local_hw_info; - - dev_dbg(&link->dev, "pcnet_config\n"); - - local_hw_info = pcnet_try_config(link, &has_shmem, 0); - if (!local_hw_info) { - /* check whether forcing io_lines to 16 helps... */ - pcmcia_disable_device(link); - local_hw_info = pcnet_try_config(link, &has_shmem, 1); - if (local_hw_info == NULL) { - dev_notice(&link->dev, "unable to read hardware net" - " address for io base %#3lx\n", dev->base_addr); - goto failed; - } - } - - info->flags = local_hw_info->flags; - /* Check for user overrides */ - info->flags |= (delay_output) ? DELAY_OUTPUT : 0; - if ((link->manf_id == MANFID_SOCKET) && - ((link->card_id == PRODID_SOCKET_LPE) || - (link->card_id == PRODID_SOCKET_LPE_CF) || - (link->card_id == PRODID_SOCKET_EIO))) - info->flags &= ~USE_BIG_BUF; - if (!use_big_buf) - info->flags &= ~USE_BIG_BUF; - - if (info->flags & USE_BIG_BUF) { - start_pg = SOCKET_START_PG; - stop_pg = SOCKET_STOP_PG; - cm_offset = 0x10000; - } else { - start_pg = PCNET_START_PG; - stop_pg = PCNET_STOP_PG; - cm_offset = 0; - } - - /* has_shmem is ignored if use_shmem != -1 */ - if ((use_shmem == 0) || (!has_shmem && (use_shmem == -1)) || - (setup_shmem_window(link, start_pg, stop_pg, cm_offset) != 0)) - setup_dma_config(link, start_pg, stop_pg); - - ei_status.name = "NE2000"; - ei_status.word16 = 1; - ei_status.reset_8390 = pcnet_reset_8390; - - if (info->flags & (IS_DL10019|IS_DL10022)) - mii_phy_probe(dev); - - SET_NETDEV_DEV(dev, &link->dev); - - if (register_netdev(dev) != 0) { - pr_notice("register_netdev() failed\n"); - goto failed; - } - - if (info->flags & (IS_DL10019|IS_DL10022)) { - u_char id = inb(dev->base_addr + 0x1a); - netdev_info(dev, "NE2000 (DL100%d rev %02x): ", - (info->flags & IS_DL10022) ? 22 : 19, id); - if (info->pna_phy) - pr_cont("PNA, "); - } else { - netdev_info(dev, "NE2000 Compatible: "); - } - pr_cont("io %#3lx, irq %d,", dev->base_addr, dev->irq); - if (info->flags & USE_SHMEM) - pr_cont(" mem %#5lx,", dev->mem_start); - if (info->flags & HAS_MISC_REG) - pr_cont(" %s xcvr,", if_names[dev->if_port]); - pr_cont(" hw_addr %pM\n", dev->dev_addr); - return 0; - -failed: - pcnet_release(link); - return -ENODEV; -} /* pcnet_config */ - -static void pcnet_release(struct pcmcia_device *link) -{ - pcnet_dev_t *info = PRIV(link->priv); - - dev_dbg(&link->dev, "pcnet_release\n"); - - if (info->flags & USE_SHMEM) - iounmap(info->base); - - pcmcia_disable_device(link); -} - -static int pcnet_suspend(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - - if (link->open) - netif_device_detach(dev); - - return 0; -} - -static int pcnet_resume(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - - if (link->open) { - pcnet_reset_8390(dev); - NS8390_init(dev, 1); - netif_device_attach(dev); - } - - return 0; -} - - -/*====================================================================== - - MII interface support for DL10019 and DL10022 based cards - - On the DL10019, the MII IO direction bit is 0x10; on the DL10022 - it is 0x20. Setting both bits seems to work on both card types. - -======================================================================*/ - -#define DLINK_GPIO 0x1c -#define DLINK_DIAG 0x1d -#define DLINK_EEPROM 0x1e - -#define MDIO_SHIFT_CLK 0x80 -#define MDIO_DATA_OUT 0x40 -#define MDIO_DIR_WRITE 0x30 -#define MDIO_DATA_WRITE0 (MDIO_DIR_WRITE) -#define MDIO_DATA_WRITE1 (MDIO_DIR_WRITE | MDIO_DATA_OUT) -#define MDIO_DATA_READ 0x10 -#define MDIO_MASK 0x0f - -static void mdio_sync(unsigned int addr) -{ - int bits, mask = inb(addr) & MDIO_MASK; - for (bits = 0; bits < 32; bits++) { - outb(mask | MDIO_DATA_WRITE1, addr); - outb(mask | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, addr); - } -} - -static int mdio_read(unsigned int addr, int phy_id, int loc) -{ - u_int cmd = (0x06<<10)|(phy_id<<5)|loc; - int i, retval = 0, mask = inb(addr) & MDIO_MASK; - - mdio_sync(addr); - for (i = 13; i >= 0; i--) { - int dat = (cmd&(1< 0; i--) { - outb(mask, addr); - retval = (retval << 1) | ((inb(addr) & MDIO_DATA_READ) != 0); - outb(mask | MDIO_SHIFT_CLK, addr); - } - return (retval>>1) & 0xffff; -} - -static void mdio_write(unsigned int addr, int phy_id, int loc, int value) -{ - u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value; - int i, mask = inb(addr) & MDIO_MASK; - - mdio_sync(addr); - for (i = 31; i >= 0; i--) { - int dat = (cmd&(1<= 0; i--) { - outb(mask, addr); - outb(mask | MDIO_SHIFT_CLK, addr); - } -} - -/*====================================================================== - - EEPROM access routines for DL10019 and DL10022 based cards - -======================================================================*/ - -#define EE_EEP 0x40 -#define EE_ASIC 0x10 -#define EE_CS 0x08 -#define EE_CK 0x04 -#define EE_DO 0x02 -#define EE_DI 0x01 -#define EE_ADOT 0x01 /* DataOut for ASIC */ -#define EE_READ_CMD 0x06 - -#define DL19FDUPLX 0x0400 /* DL10019 Full duplex mode */ - -static int read_eeprom(unsigned int ioaddr, int location) -{ - int i, retval = 0; - unsigned int ee_addr = ioaddr + DLINK_EEPROM; - int read_cmd = location | (EE_READ_CMD << 8); - - outb(0, ee_addr); - outb(EE_EEP|EE_CS, ee_addr); - - /* Shift the read command bits out. */ - for (i = 10; i >= 0; i--) { - short dataval = (read_cmd & (1 << i)) ? EE_DO : 0; - outb_p(EE_EEP|EE_CS|dataval, ee_addr); - outb_p(EE_EEP|EE_CS|dataval|EE_CK, ee_addr); - } - outb(EE_EEP|EE_CS, ee_addr); - - for (i = 16; i > 0; i--) { - outb_p(EE_EEP|EE_CS | EE_CK, ee_addr); - retval = (retval << 1) | ((inb(ee_addr) & EE_DI) ? 1 : 0); - outb_p(EE_EEP|EE_CS, ee_addr); - } - - /* Terminate the EEPROM access. */ - outb(0, ee_addr); - return retval; -} - -/* - The internal ASIC registers can be changed by EEPROM READ access - with EE_ASIC bit set. - In ASIC mode, EE_ADOT is used to output the data to the ASIC. -*/ - -static void write_asic(unsigned int ioaddr, int location, short asic_data) -{ - int i; - unsigned int ee_addr = ioaddr + DLINK_EEPROM; - short dataval; - int read_cmd = location | (EE_READ_CMD << 8); - - asic_data |= read_eeprom(ioaddr, location); - - outb(0, ee_addr); - outb(EE_ASIC|EE_CS|EE_DI, ee_addr); - - read_cmd = read_cmd >> 1; - - /* Shift the read command bits out. */ - for (i = 9; i >= 0; i--) { - dataval = (read_cmd & (1 << i)) ? EE_DO : 0; - outb_p(EE_ASIC|EE_CS|EE_DI|dataval, ee_addr); - outb_p(EE_ASIC|EE_CS|EE_DI|dataval|EE_CK, ee_addr); - outb_p(EE_ASIC|EE_CS|EE_DI|dataval, ee_addr); - } - // sync - outb(EE_ASIC|EE_CS, ee_addr); - outb(EE_ASIC|EE_CS|EE_CK, ee_addr); - outb(EE_ASIC|EE_CS, ee_addr); - - for (i = 15; i >= 0; i--) { - dataval = (asic_data & (1 << i)) ? EE_ADOT : 0; - outb_p(EE_ASIC|EE_CS|dataval, ee_addr); - outb_p(EE_ASIC|EE_CS|dataval|EE_CK, ee_addr); - outb_p(EE_ASIC|EE_CS|dataval, ee_addr); - } - - /* Terminate the ASIC access. */ - outb(EE_ASIC|EE_DI, ee_addr); - outb(EE_ASIC|EE_DI| EE_CK, ee_addr); - outb(EE_ASIC|EE_DI, ee_addr); - - outb(0, ee_addr); -} - -/*====================================================================*/ - -static void set_misc_reg(struct net_device *dev) -{ - unsigned int nic_base = dev->base_addr; - pcnet_dev_t *info = PRIV(dev); - u_char tmp; - - if (info->flags & HAS_MISC_REG) { - tmp = inb_p(nic_base + PCNET_MISC) & ~3; - if (dev->if_port == 2) - tmp |= 1; - if (info->flags & USE_BIG_BUF) - tmp |= 2; - if (info->flags & HAS_IBM_MISC) - tmp |= 8; - outb_p(tmp, nic_base + PCNET_MISC); - } - if (info->flags & IS_DL10022) { - if (info->flags & HAS_MII) { - /* Advertise 100F, 100H, 10F, 10H */ - mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 4, 0x01e1); - /* Restart MII autonegotiation */ - mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x0000); - mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x1200); - info->mii_reset = jiffies; - } else { - outb(full_duplex ? 4 : 0, nic_base + DLINK_DIAG); - } - } else if (info->flags & IS_DL10019) { - /* Advertise 100F, 100H, 10F, 10H */ - mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 4, 0x01e1); - /* Restart MII autonegotiation */ - mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x0000); - mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x1200); - } -} - -/*====================================================================*/ - -static void mii_phy_probe(struct net_device *dev) -{ - pcnet_dev_t *info = PRIV(dev); - unsigned int mii_addr = dev->base_addr + DLINK_GPIO; - int i; - u_int tmp, phyid; - - for (i = 31; i >= 0; i--) { - tmp = mdio_read(mii_addr, i, 1); - if ((tmp == 0) || (tmp == 0xffff)) - continue; - tmp = mdio_read(mii_addr, i, MII_PHYID_REG1); - phyid = tmp << 16; - phyid |= mdio_read(mii_addr, i, MII_PHYID_REG2); - phyid &= MII_PHYID_REV_MASK; - netdev_dbg(dev, "MII at %d is 0x%08x\n", i, phyid); - if (phyid == AM79C9XX_HOME_PHY) { - info->pna_phy = i; - } else if (phyid != AM79C9XX_ETH_PHY) { - info->eth_phy = i; - } - } -} - -static int pcnet_open(struct net_device *dev) -{ - int ret; - pcnet_dev_t *info = PRIV(dev); - struct pcmcia_device *link = info->p_dev; - unsigned int nic_base = dev->base_addr; - - dev_dbg(&link->dev, "pcnet_open('%s')\n", dev->name); - - if (!pcmcia_dev_present(link)) - return -ENODEV; - - set_misc_reg(dev); - - outb_p(0xFF, nic_base + EN0_ISR); /* Clear bogus intr. */ - ret = request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, dev->name, dev); - if (ret) - return ret; - - link->open++; - - info->phy_id = info->eth_phy; - info->link_status = 0x00; - init_timer(&info->watchdog); - info->watchdog.function = ei_watchdog; - info->watchdog.data = (u_long)dev; - info->watchdog.expires = jiffies + HZ; - add_timer(&info->watchdog); - - return ei_open(dev); -} /* pcnet_open */ - -/*====================================================================*/ - -static int pcnet_close(struct net_device *dev) -{ - pcnet_dev_t *info = PRIV(dev); - struct pcmcia_device *link = info->p_dev; - - dev_dbg(&link->dev, "pcnet_close('%s')\n", dev->name); - - ei_close(dev); - free_irq(dev->irq, dev); - - link->open--; - netif_stop_queue(dev); - del_timer_sync(&info->watchdog); - - return 0; -} /* pcnet_close */ - -/*====================================================================== - - Hard reset the card. This used to pause for the same period that - a 8390 reset command required, but that shouldn't be necessary. - -======================================================================*/ - -static void pcnet_reset_8390(struct net_device *dev) -{ - unsigned int nic_base = dev->base_addr; - int i; - - ei_status.txing = ei_status.dmaing = 0; - - outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, nic_base + E8390_CMD); - - outb(inb(nic_base + PCNET_RESET), nic_base + PCNET_RESET); - - for (i = 0; i < 100; i++) { - if ((inb_p(nic_base+EN0_ISR) & ENISR_RESET) != 0) - break; - udelay(100); - } - outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */ - - if (i == 100) - netdev_err(dev, "pcnet_reset_8390() did not complete.\n"); - - set_misc_reg(dev); - -} /* pcnet_reset_8390 */ - -/*====================================================================*/ - -static int set_config(struct net_device *dev, struct ifmap *map) -{ - pcnet_dev_t *info = PRIV(dev); - if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { - if (!(info->flags & HAS_MISC_REG)) - return -EOPNOTSUPP; - else if ((map->port < 1) || (map->port > 2)) - return -EINVAL; - dev->if_port = map->port; - netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]); - NS8390_init(dev, 1); - } - return 0; -} - -/*====================================================================*/ - -static irqreturn_t ei_irq_wrapper(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - pcnet_dev_t *info; - irqreturn_t ret = ei_interrupt(irq, dev_id); - - if (ret == IRQ_HANDLED) { - info = PRIV(dev); - info->stale = 0; - } - return ret; -} - -static void ei_watchdog(u_long arg) -{ - struct net_device *dev = (struct net_device *)arg; - pcnet_dev_t *info = PRIV(dev); - unsigned int nic_base = dev->base_addr; - unsigned int mii_addr = nic_base + DLINK_GPIO; - u_short link; - - if (!netif_device_present(dev)) goto reschedule; - - /* Check for pending interrupt with expired latency timer: with - this, we can limp along even if the interrupt is blocked */ - if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) { - if (!info->fast_poll) - netdev_info(dev, "interrupt(s) dropped!\n"); - ei_irq_wrapper(dev->irq, dev); - info->fast_poll = HZ; - } - if (info->fast_poll) { - info->fast_poll--; - info->watchdog.expires = jiffies + 1; - add_timer(&info->watchdog); - return; - } - - if (!(info->flags & HAS_MII)) - goto reschedule; - - mdio_read(mii_addr, info->phy_id, 1); - link = mdio_read(mii_addr, info->phy_id, 1); - if (!link || (link == 0xffff)) { - if (info->eth_phy) { - info->phy_id = info->eth_phy = 0; - } else { - netdev_info(dev, "MII is missing!\n"); - info->flags &= ~HAS_MII; - } - goto reschedule; - } - - link &= 0x0004; - if (link != info->link_status) { - u_short p = mdio_read(mii_addr, info->phy_id, 5); - netdev_info(dev, "%s link beat\n", link ? "found" : "lost"); - if (link && (info->flags & IS_DL10022)) { - /* Disable collision detection on full duplex links */ - outb((p & 0x0140) ? 4 : 0, nic_base + DLINK_DIAG); - } else if (link && (info->flags & IS_DL10019)) { - /* Disable collision detection on full duplex links */ - write_asic(dev->base_addr, 4, (p & 0x140) ? DL19FDUPLX : 0); - } - if (link) { - if (info->phy_id == info->eth_phy) { - if (p) - netdev_info(dev, "autonegotiation complete: " - "%sbaseT-%cD selected\n", - ((p & 0x0180) ? "100" : "10"), - ((p & 0x0140) ? 'F' : 'H')); - else - netdev_info(dev, "link partner did not autonegotiate\n"); - } - NS8390_init(dev, 1); - } - info->link_status = link; - } - if (info->pna_phy && time_after(jiffies, info->mii_reset + 6*HZ)) { - link = mdio_read(mii_addr, info->eth_phy, 1) & 0x0004; - if (((info->phy_id == info->pna_phy) && link) || - ((info->phy_id != info->pna_phy) && !link)) { - /* isolate this MII and try flipping to the other one */ - mdio_write(mii_addr, info->phy_id, 0, 0x0400); - info->phy_id ^= info->pna_phy ^ info->eth_phy; - netdev_info(dev, "switched to %s transceiver\n", - (info->phy_id == info->eth_phy) ? "ethernet" : "PNA"); - mdio_write(mii_addr, info->phy_id, 0, - (info->phy_id == info->eth_phy) ? 0x1000 : 0); - info->link_status = 0; - info->mii_reset = jiffies; - } - } - -reschedule: - info->watchdog.expires = jiffies + HZ; - add_timer(&info->watchdog); -} - -/*====================================================================*/ - - -static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -{ - pcnet_dev_t *info = PRIV(dev); - struct mii_ioctl_data *data = if_mii(rq); - unsigned int mii_addr = dev->base_addr + DLINK_GPIO; - - if (!(info->flags & (IS_DL10019|IS_DL10022))) - return -EINVAL; - - switch (cmd) { - case SIOCGMIIPHY: - data->phy_id = info->phy_id; - case SIOCGMIIREG: /* Read MII PHY register. */ - data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f); - return 0; - case SIOCSMIIREG: /* Write MII PHY register. */ - mdio_write(mii_addr, data->phy_id, data->reg_num & 0x1f, data->val_in); - return 0; - } - return -EOPNOTSUPP; -} - -/*====================================================================*/ - -static void dma_get_8390_hdr(struct net_device *dev, - struct e8390_pkt_hdr *hdr, - int ring_page) -{ - unsigned int nic_base = dev->base_addr; - - if (ei_status.dmaing) { - netdev_notice(dev, "DMAing conflict in dma_block_input." - "[DMAstat:%1x][irqlock:%1x]\n", - ei_status.dmaing, ei_status.irqlock); - return; - } - - ei_status.dmaing |= 0x01; - outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + PCNET_CMD); - outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO); - outb_p(0, nic_base + EN0_RCNTHI); - outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */ - outb_p(ring_page, nic_base + EN0_RSARHI); - outb_p(E8390_RREAD+E8390_START, nic_base + PCNET_CMD); - - insw(nic_base + PCNET_DATAPORT, hdr, - sizeof(struct e8390_pkt_hdr)>>1); - /* Fix for big endian systems */ - hdr->count = le16_to_cpu(hdr->count); - - outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ - ei_status.dmaing &= ~0x01; -} - -/*====================================================================*/ - -static void dma_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset) -{ - unsigned int nic_base = dev->base_addr; - int xfer_count = count; - char *buf = skb->data; - - if ((ei_debug > 4) && (count != 4)) - netdev_dbg(dev, "[bi=%d]\n", count+4); - if (ei_status.dmaing) { - netdev_notice(dev, "DMAing conflict in dma_block_input." - "[DMAstat:%1x][irqlock:%1x]\n", - ei_status.dmaing, ei_status.irqlock); - return; - } - ei_status.dmaing |= 0x01; - outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + PCNET_CMD); - outb_p(count & 0xff, nic_base + EN0_RCNTLO); - outb_p(count >> 8, nic_base + EN0_RCNTHI); - outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO); - outb_p(ring_offset >> 8, nic_base + EN0_RSARHI); - outb_p(E8390_RREAD+E8390_START, nic_base + PCNET_CMD); - - insw(nic_base + PCNET_DATAPORT,buf,count>>1); - if (count & 0x01) - buf[count-1] = inb(nic_base + PCNET_DATAPORT), xfer_count++; - - /* This was for the ALPHA version only, but enough people have been - encountering problems that it is still here. */ -#ifdef PCMCIA_DEBUG - if (ei_debug > 4) { /* DMA termination address check... */ - int addr, tries = 20; - do { - /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here - -- it's broken for Rx on some cards! */ - int high = inb_p(nic_base + EN0_RSARHI); - int low = inb_p(nic_base + EN0_RSARLO); - addr = (high << 8) + low; - if (((ring_offset + xfer_count) & 0xff) == (addr & 0xff)) - break; - } while (--tries > 0); - if (tries <= 0) - netdev_notice(dev, "RX transfer address mismatch," - "%#4.4x (expected) vs. %#4.4x (actual).\n", - ring_offset + xfer_count, addr); - } -#endif - outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ - ei_status.dmaing &= ~0x01; -} /* dma_block_input */ - -/*====================================================================*/ - -static void dma_block_output(struct net_device *dev, int count, - const u_char *buf, const int start_page) -{ - unsigned int nic_base = dev->base_addr; - pcnet_dev_t *info = PRIV(dev); -#ifdef PCMCIA_DEBUG - int retries = 0; -#endif - u_long dma_start; - -#ifdef PCMCIA_DEBUG - if (ei_debug > 4) - netdev_dbg(dev, "[bo=%d]\n", count); -#endif - - /* Round the count up for word writes. Do we need to do this? - What effect will an odd byte count have on the 8390? - I should check someday. */ - if (count & 0x01) - count++; - if (ei_status.dmaing) { - netdev_notice(dev, "DMAing conflict in dma_block_output." - "[DMAstat:%1x][irqlock:%1x]\n", - ei_status.dmaing, ei_status.irqlock); - return; - } - ei_status.dmaing |= 0x01; - /* We should already be in page 0, but to be safe... */ - outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base+PCNET_CMD); - -#ifdef PCMCIA_DEBUG - retry: -#endif - - outb_p(ENISR_RDC, nic_base + EN0_ISR); - - /* Now the normal output. */ - outb_p(count & 0xff, nic_base + EN0_RCNTLO); - outb_p(count >> 8, nic_base + EN0_RCNTHI); - outb_p(0x00, nic_base + EN0_RSARLO); - outb_p(start_page, nic_base + EN0_RSARHI); - - outb_p(E8390_RWRITE+E8390_START, nic_base + PCNET_CMD); - outsw(nic_base + PCNET_DATAPORT, buf, count>>1); - - dma_start = jiffies; - -#ifdef PCMCIA_DEBUG - /* This was for the ALPHA version only, but enough people have been - encountering problems that it is still here. */ - if (ei_debug > 4) { /* DMA termination address check... */ - int addr, tries = 20; - do { - int high = inb_p(nic_base + EN0_RSARHI); - int low = inb_p(nic_base + EN0_RSARLO); - addr = (high << 8) + low; - if ((start_page << 8) + count == addr) - break; - } while (--tries > 0); - if (tries <= 0) { - netdev_notice(dev, "Tx packet transfer address mismatch," - "%#4.4x (expected) vs. %#4.4x (actual).\n", - (start_page << 8) + count, addr); - if (retries++ == 0) - goto retry; - } - } -#endif - - while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0) - if (time_after(jiffies, dma_start + PCNET_RDC_TIMEOUT)) { - netdev_notice(dev, "timeout waiting for Tx RDC.\n"); - pcnet_reset_8390(dev); - NS8390_init(dev, 1); - break; - } - - outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ - if (info->flags & DELAY_OUTPUT) - udelay((long)delay_time); - ei_status.dmaing &= ~0x01; -} - -/*====================================================================*/ - -static int setup_dma_config(struct pcmcia_device *link, int start_pg, - int stop_pg) -{ - struct net_device *dev = link->priv; - - ei_status.tx_start_page = start_pg; - ei_status.rx_start_page = start_pg + TX_PAGES; - ei_status.stop_page = stop_pg; - - /* set up block i/o functions */ - ei_status.get_8390_hdr = dma_get_8390_hdr; - ei_status.block_input = dma_block_input; - ei_status.block_output = dma_block_output; - - return 0; -} - -/*====================================================================*/ - -static void copyin(void *dest, void __iomem *src, int c) -{ - u_short *d = dest; - u_short __iomem *s = src; - int odd; - - if (c <= 0) - return; - odd = (c & 1); c >>= 1; - - if (c) { - do { *d++ = __raw_readw(s++); } while (--c); - } - /* get last byte by fetching a word and masking */ - if (odd) - *((u_char *)d) = readw(s) & 0xff; -} - -static void copyout(void __iomem *dest, const void *src, int c) -{ - u_short __iomem *d = dest; - const u_short *s = src; - int odd; - - if (c <= 0) - return; - odd = (c & 1); c >>= 1; - - if (c) { - do { __raw_writew(*s++, d++); } while (--c); - } - /* copy last byte doing a read-modify-write */ - if (odd) - writew((readw(d) & 0xff00) | *(u_char *)s, d); -} - -/*====================================================================*/ - -static void shmem_get_8390_hdr(struct net_device *dev, - struct e8390_pkt_hdr *hdr, - int ring_page) -{ - void __iomem *xfer_start = ei_status.mem + (TX_PAGES<<8) - + (ring_page << 8) - - (ei_status.rx_start_page << 8); - - copyin(hdr, xfer_start, sizeof(struct e8390_pkt_hdr)); - /* Fix for big endian systems */ - hdr->count = le16_to_cpu(hdr->count); -} - -/*====================================================================*/ - -static void shmem_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset) -{ - void __iomem *base = ei_status.mem; - unsigned long offset = (TX_PAGES<<8) + ring_offset - - (ei_status.rx_start_page << 8); - char *buf = skb->data; - - if (offset + count > ei_status.priv) { - /* We must wrap the input move. */ - int semi_count = ei_status.priv - offset; - copyin(buf, base + offset, semi_count); - buf += semi_count; - offset = TX_PAGES<<8; - count -= semi_count; - } - copyin(buf, base + offset, count); -} - -/*====================================================================*/ - -static void shmem_block_output(struct net_device *dev, int count, - const u_char *buf, const int start_page) -{ - void __iomem *shmem = ei_status.mem + (start_page << 8); - shmem -= ei_status.tx_start_page << 8; - copyout(shmem, buf, count); -} - -/*====================================================================*/ - -static int setup_shmem_window(struct pcmcia_device *link, int start_pg, - int stop_pg, int cm_offset) -{ - struct net_device *dev = link->priv; - pcnet_dev_t *info = PRIV(dev); - int i, window_size, offset, ret; - - window_size = (stop_pg - start_pg) << 8; - if (window_size > 32 * 1024) - window_size = 32 * 1024; - - /* Make sure it's a power of two. */ - window_size = roundup_pow_of_two(window_size); - - /* Allocate a memory window */ - link->resource[3]->flags |= WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM|WIN_ENABLE; - link->resource[3]->flags |= WIN_USE_WAIT; - link->resource[3]->start = 0; link->resource[3]->end = window_size; - ret = pcmcia_request_window(link, link->resource[3], mem_speed); - if (ret) - goto failed; - - offset = (start_pg << 8) + cm_offset; - offset -= offset % window_size; - ret = pcmcia_map_mem_page(link, link->resource[3], offset); - if (ret) - goto failed; - - /* Try scribbling on the buffer */ - info->base = ioremap(link->resource[3]->start, - resource_size(link->resource[3])); - for (i = 0; i < (TX_PAGES<<8); i += 2) - __raw_writew((i>>1), info->base+offset+i); - udelay(100); - for (i = 0; i < (TX_PAGES<<8); i += 2) - if (__raw_readw(info->base+offset+i) != (i>>1)) break; - pcnet_reset_8390(dev); - if (i != (TX_PAGES<<8)) { - iounmap(info->base); - pcmcia_release_window(link, link->resource[3]); - info->base = NULL; - goto failed; - } - - ei_status.mem = info->base + offset; - ei_status.priv = resource_size(link->resource[3]); - dev->mem_start = (u_long)ei_status.mem; - dev->mem_end = dev->mem_start + resource_size(link->resource[3]); - - ei_status.tx_start_page = start_pg; - ei_status.rx_start_page = start_pg + TX_PAGES; - ei_status.stop_page = start_pg + ( - (resource_size(link->resource[3]) - offset) >> 8); - - /* set up block i/o functions */ - ei_status.get_8390_hdr = shmem_get_8390_hdr; - ei_status.block_input = shmem_block_input; - ei_status.block_output = shmem_block_output; - - info->flags |= USE_SHMEM; - return 0; - -failed: - return 1; -} - -/*====================================================================*/ - -static const struct pcmcia_device_id pcnet_ids[] = { - PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0057, 0x0021), - PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0104, 0x000a), - PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0xea15), - PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0143, 0x3341), - PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0143, 0xc0ab), - PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x021b, 0x0101), - PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x08a1, 0xc0ab), - PCMCIA_PFC_DEVICE_PROD_ID12(0, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4), - PCMCIA_PFC_DEVICE_PROD_ID12(0, "ATKK", "LM33-PCM-T", 0xba9eb7e2, 0x077c174e), - PCMCIA_PFC_DEVICE_PROD_ID12(0, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff), - PCMCIA_PFC_DEVICE_PROD_ID12(0, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae), - PCMCIA_PFC_DEVICE_PROD_ID12(0, "Linksys", "EtherFast 10&100 + 56K PC Card (PCMLM56)", 0x0733cc81, 0xb3765033), - PCMCIA_PFC_DEVICE_PROD_ID12(0, "LINKSYS", "PCMLM336", 0xf7cb0b07, 0x7a821b58), - PCMCIA_PFC_DEVICE_PROD_ID12(0, "MICRO RESEARCH", "COMBO-L/M-336", 0xb2ced065, 0x3ced0555), - PCMCIA_PFC_DEVICE_PROD_ID12(0, "PCMCIAs", "ComboCard", 0xdcfe12d3, 0xcd8906cc), - PCMCIA_PFC_DEVICE_PROD_ID12(0, "PCMCIAs", "LanModem", 0xdcfe12d3, 0xc67c648f), - PCMCIA_MFC_DEVICE_PROD_ID12(0, "IBM", "Home and Away 28.8 PC Card ", 0xb569a6e5, 0x5bd4ff2c), - PCMCIA_MFC_DEVICE_PROD_ID12(0, "IBM", "Home and Away Credit Card Adapter", 0xb569a6e5, 0x4bdf15c3), - PCMCIA_MFC_DEVICE_PROD_ID12(0, "IBM", "w95 Home and Away Credit Card ", 0xb569a6e5, 0xae911c15), - PCMCIA_MFC_DEVICE_PROD_ID123(0, "APEX DATA", "MULTICARD", "ETHERNET-MODEM", 0x11c2da09, 0x7289dc5d, 0xaad95e1f), - PCMCIA_MFC_DEVICE_PROD_ID2(0, "FAX/Modem/Ethernet Combo Card ", 0x1ed59302), - PCMCIA_DEVICE_MANF_CARD(0x0057, 0x1004), - PCMCIA_DEVICE_MANF_CARD(0x0104, 0x000d), - PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0075), - PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0145), - PCMCIA_DEVICE_MANF_CARD(0x0149, 0x0230), - PCMCIA_DEVICE_MANF_CARD(0x0149, 0x4530), - PCMCIA_DEVICE_MANF_CARD(0x0149, 0xc1ab), - PCMCIA_DEVICE_MANF_CARD(0x0186, 0x0110), - PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x8041), - PCMCIA_DEVICE_MANF_CARD(0x0213, 0x2452), - PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0300), - PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0307), - PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030a), - PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1103), - PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1121), - PCMCIA_DEVICE_PROD_ID12("2408LAN", "Ethernet", 0x352fff7f, 0x00b2e941), - PCMCIA_DEVICE_PROD_ID1234("Socket", "CF 10/100 Ethernet Card", "Revision B", "05/11/06", 0xb38bcc2e, 0x4de88352, 0xeaca6c8d, 0x7e57c22e), - PCMCIA_DEVICE_PROD_ID123("Cardwell", "PCMCIA", "ETHERNET", 0x9533672e, 0x281f1c5d, 0x3ff7175b), - PCMCIA_DEVICE_PROD_ID123("CNet ", "CN30BC", "ETHERNET", 0x9fe55d3d, 0x85601198, 0x3ff7175b), - PCMCIA_DEVICE_PROD_ID123("Digital", "Ethernet", "Adapter", 0x9999ab35, 0x00b2e941, 0x4b0d829e), - PCMCIA_DEVICE_PROD_ID123("Edimax Technology Inc.", "PCMCIA", "Ethernet Card", 0x738a0019, 0x281f1c5d, 0x5e9d92c0), - PCMCIA_DEVICE_PROD_ID123("EFA ", "EFA207", "ETHERNET", 0x3d294be4, 0xeb9aab6c, 0x3ff7175b), - PCMCIA_DEVICE_PROD_ID123("I-O DATA", "PCLA", "ETHERNET", 0x1d55d7ec, 0xe4c64d34, 0x3ff7175b), - PCMCIA_DEVICE_PROD_ID123("IO DATA", "PCLATE", "ETHERNET", 0x547e66dc, 0x6b260753, 0x3ff7175b), - PCMCIA_DEVICE_PROD_ID123("KingMax Technology Inc.", "EN10-T2", "PCMCIA Ethernet Card", 0x932b7189, 0x699e4436, 0x6f6652e0), - PCMCIA_DEVICE_PROD_ID123("PCMCIA", "PCMCIA-ETHERNET-CARD", "UE2216", 0x281f1c5d, 0xd4cd2f20, 0xb87add82), - PCMCIA_DEVICE_PROD_ID123("PCMCIA", "PCMCIA-ETHERNET-CARD", "UE2620", 0x281f1c5d, 0xd4cd2f20, 0x7d3d83a8), - PCMCIA_DEVICE_PROD_ID1("2412LAN", 0x67f236ab), - PCMCIA_DEVICE_PROD_ID12("ACCTON", "EN2212", 0xdfc6b5b2, 0xcb112a11), - PCMCIA_DEVICE_PROD_ID12("ACCTON", "EN2216-PCMCIA-ETHERNET", 0xdfc6b5b2, 0x5542bfff), - PCMCIA_DEVICE_PROD_ID12("Allied Telesis, K.K.", "CentreCOM LA100-PCM-T V2 100/10M LAN PC Card", 0xbb7fbdd7, 0xcd91cc68), - PCMCIA_DEVICE_PROD_ID12("Allied Telesis K.K.", "LA100-PCM V2", 0x36634a66, 0xc6d05997), - PCMCIA_DEVICE_PROD_ID12("Allied Telesis, K.K.", "CentreCOM LA-PCM_V2", 0xbb7fBdd7, 0x28e299f8), - PCMCIA_DEVICE_PROD_ID12("Allied Telesis K.K.", "LA-PCM V3", 0x36634a66, 0x62241d96), - PCMCIA_DEVICE_PROD_ID12("AmbiCom", "AMB8010", 0x5070a7f9, 0x82f96e96), - PCMCIA_DEVICE_PROD_ID12("AmbiCom", "AMB8610", 0x5070a7f9, 0x86741224), - PCMCIA_DEVICE_PROD_ID12("AmbiCom Inc", "AMB8002", 0x93b15570, 0x75ec3efb), - PCMCIA_DEVICE_PROD_ID12("AmbiCom Inc", "AMB8002T", 0x93b15570, 0x461c5247), - PCMCIA_DEVICE_PROD_ID12("AmbiCom Inc", "AMB8010", 0x93b15570, 0x82f96e96), - PCMCIA_DEVICE_PROD_ID12("AnyCom", "ECO Ethernet", 0x578ba6e7, 0x0a9888c1), - PCMCIA_DEVICE_PROD_ID12("AnyCom", "ECO Ethernet 10/100", 0x578ba6e7, 0x939fedbd), - PCMCIA_DEVICE_PROD_ID12("AROWANA", "PCMCIA Ethernet LAN Card", 0x313adbc8, 0x08d9f190), - PCMCIA_DEVICE_PROD_ID12("ASANTE", "FriendlyNet PC Card", 0x3a7ade0f, 0x41c64504), - PCMCIA_DEVICE_PROD_ID12("Billionton", "LNT-10TB", 0x552ab682, 0xeeb1ba6a), - PCMCIA_DEVICE_PROD_ID12("CF", "10Base-Ethernet", 0x44ebf863, 0x93ae4d79), - PCMCIA_DEVICE_PROD_ID12("CNet", "CN40BC Ethernet", 0xbc477dde, 0xfba775a7), - PCMCIA_DEVICE_PROD_ID12("COMPU-SHACK", "BASEline PCMCIA 10 MBit Ethernetadapter", 0xfa2e424d, 0xe9190d8a), - PCMCIA_DEVICE_PROD_ID12("COMPU-SHACK", "FASTline PCMCIA 10/100 Fast-Ethernet", 0xfa2e424d, 0x3953d9b9), - PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722), - PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2), - PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a), - PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether CF-TD LAN Card", 0x5261440f, 0x8797663b), - PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd), - PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d), - PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d), - PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether PCC-T", 0x5261440f, 0x6705fcaa), - PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether PCC-TD", 0x5261440f, 0x47d5ca83), - PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FastEther PCC-TX", 0x5261440f, 0x485e85d9), - PCMCIA_DEVICE_PROD_ID12("Corega,K.K.", "Ethernet LAN Card", 0x110d26d9, 0x9fd2f0a2), - PCMCIA_DEVICE_PROD_ID12("corega,K.K.", "Ethernet LAN Card", 0x9791a90e, 0x9fd2f0a2), - PCMCIA_DEVICE_PROD_ID12("corega K.K.", "(CG-LAPCCTXD)", 0x5261440f, 0x73ec0d88), - PCMCIA_DEVICE_PROD_ID12("CouplerlessPCMCIA", "100BASE", 0xee5af0ad, 0x7c2add04), - PCMCIA_DEVICE_PROD_ID12("CyQ've", "ELA-010", 0x77008979, 0x9d8d445d), - PCMCIA_DEVICE_PROD_ID12("CyQ've", "ELA-110E 10/100M LAN Card", 0x77008979, 0xfd184814), - PCMCIA_DEVICE_PROD_ID12("DataTrek.", "NetCard ", 0x5cd66d9d, 0x84697ce0), - PCMCIA_DEVICE_PROD_ID12("Dayna Communications, Inc.", "CommuniCard E", 0x0c629325, 0xb4e7dbaf), - PCMCIA_DEVICE_PROD_ID12("Digicom", "Palladio LAN 10/100", 0x697403d8, 0xe160b995), - PCMCIA_DEVICE_PROD_ID12("Digicom", "Palladio LAN 10/100 Dongless", 0x697403d8, 0xa6d3b233), - PCMCIA_DEVICE_PROD_ID12("DIGITAL", "DEPCM-XX", 0x69616cb3, 0xe600e76e), - PCMCIA_DEVICE_PROD_ID12("D-Link", "DE-650", 0x1a424a1c, 0xf28c8398), - PCMCIA_DEVICE_PROD_ID12("D-Link", "DE-660", 0x1a424a1c, 0xd9a1d05b), - PCMCIA_DEVICE_PROD_ID12("D-Link", "DE-660+", 0x1a424a1c, 0x50dcd0ec), - PCMCIA_DEVICE_PROD_ID12("D-Link", "DFE-650", 0x1a424a1c, 0x0f0073f9), - PCMCIA_DEVICE_PROD_ID12("Dual Speed", "10/100 PC Card", 0x725b842d, 0xf1efee84), - PCMCIA_DEVICE_PROD_ID12("Dual Speed", "10/100 Port Attached PC Card", 0x725b842d, 0x2db1f8e9), - PCMCIA_DEVICE_PROD_ID12("Dynalink", "L10BC", 0x55632fd5, 0xdc65f2b1), - PCMCIA_DEVICE_PROD_ID12("DYNALINK", "L10BC", 0x6a26d1cf, 0xdc65f2b1), - PCMCIA_DEVICE_PROD_ID12("DYNALINK", "L10C", 0x6a26d1cf, 0xc4f84efb), - PCMCIA_DEVICE_PROD_ID12("E-CARD", "E-CARD", 0x6701da11, 0x6701da11), - PCMCIA_DEVICE_PROD_ID12("EIGER Labs Inc.", "Ethernet 10BaseT card", 0x53c864c6, 0xedd059f6), - PCMCIA_DEVICE_PROD_ID12("EIGER Labs Inc.", "Ethernet Combo card", 0x53c864c6, 0x929c486c), - PCMCIA_DEVICE_PROD_ID12("Ethernet", "Adapter", 0x00b2e941, 0x4b0d829e), - PCMCIA_DEVICE_PROD_ID12("Ethernet Adapter", "E2000 PCMCIA Ethernet", 0x96767301, 0x71fbbc61), - PCMCIA_DEVICE_PROD_ID12("Ethernet PCMCIA adapter", "EP-210", 0x8dd86181, 0xf2b52517), - PCMCIA_DEVICE_PROD_ID12("Fast Ethernet", "Adapter", 0xb4be14e3, 0x4b0d829e), - PCMCIA_DEVICE_PROD_ID12("Grey Cell", "GCS2000", 0x2a151fac, 0xf00555cb), - PCMCIA_DEVICE_PROD_ID12("Grey Cell", "GCS2220", 0x2a151fac, 0xc1b7e327), - PCMCIA_DEVICE_PROD_ID12("GVC", "NIC-2000p", 0x76e171bd, 0x6eb1c947), - PCMCIA_DEVICE_PROD_ID12("IBM Corp.", "Ethernet", 0xe3736c88, 0x00b2e941), - PCMCIA_DEVICE_PROD_ID12("IC-CARD", "IC-CARD", 0x60cb09a6, 0x60cb09a6), - PCMCIA_DEVICE_PROD_ID12("IC-CARD+", "IC-CARD+", 0x93693494, 0x93693494), - PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCETTX", 0x547e66dc, 0x6fc5459b), - PCMCIA_DEVICE_PROD_ID12("iPort", "10/100 Ethernet Card", 0x56c538d2, 0x11b0ffc0), - PCMCIA_DEVICE_PROD_ID12("KANSAI ELECTRIC CO.,LTD", "KLA-PCM/T", 0xb18dc3b4, 0xcc51a956), - PCMCIA_DEVICE_PROD_ID12("KENTRONICS", "KEP-230", 0xaf8144c9, 0x868f6616), - PCMCIA_DEVICE_PROD_ID12("KCI", "PE520 PCMCIA Ethernet Adapter", 0xa89b87d3, 0x1eb88e64), - PCMCIA_DEVICE_PROD_ID12("KINGMAX", "EN10T2T", 0x7bcb459a, 0xa5c81fa5), - PCMCIA_DEVICE_PROD_ID12("Kingston", "KNE-PC2", 0x1128e633, 0xce2a89b3), - PCMCIA_DEVICE_PROD_ID12("Kingston Technology Corp.", "EtheRx PC Card Ethernet Adapter", 0x313c7be3, 0x0afb54a2), - PCMCIA_DEVICE_PROD_ID12("Laneed", "LD-10/100CD", 0x1b7827b2, 0xcda71d1c), - PCMCIA_DEVICE_PROD_ID12("Laneed", "LD-CDF", 0x1b7827b2, 0xfec71e40), - PCMCIA_DEVICE_PROD_ID12("Laneed", "LD-CDL/T", 0x1b7827b2, 0x79fba4f7), - PCMCIA_DEVICE_PROD_ID12("Laneed", "LD-CDS", 0x1b7827b2, 0x931afaab), - PCMCIA_DEVICE_PROD_ID12("LEMEL", "LM-N89TX PRO", 0xbbefb52f, 0xd2897a97), - PCMCIA_DEVICE_PROD_ID12("Linksys", "Combo PCMCIA EthernetCard (EC2T)", 0x0733cc81, 0x32ee8c78), - PCMCIA_DEVICE_PROD_ID12("LINKSYS", "E-CARD", 0xf7cb0b07, 0x6701da11), - PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 Integrated PC Card (PCM100)", 0x0733cc81, 0x453c3f9d), - PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100)", 0x0733cc81, 0x66c5a389), - PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V2)", 0x0733cc81, 0x3a3b28e9), - PCMCIA_DEVICE_PROD_ID12("Linksys", "HomeLink Phoneline + 10/100 Network PC Card (PCM100H1)", 0x733cc81, 0x7a3e5c3a), - PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TX", 0x88fcdeda, 0x6d772737), - PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TE", 0x88fcdeda, 0x0e714bee), - PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN20T", 0x88fcdeda, 0x81090922), - PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN10TE", 0x88fcdeda, 0xc1e2521c), - PCMCIA_DEVICE_PROD_ID12("LONGSHINE", "PCMCIA Ethernet Card", 0xf866b0b0, 0x6f6652e0), - PCMCIA_DEVICE_PROD_ID12("MACNICA", "ME1-JEIDA", 0x20841b68, 0xaf8a3578), - PCMCIA_DEVICE_PROD_ID12("Macsense", "MPC-10", 0xd830297f, 0xd265c307), - PCMCIA_DEVICE_PROD_ID12("Matsushita Electric Industrial Co.,LTD.", "CF-VEL211", 0x44445376, 0x8ded41d4), - PCMCIA_DEVICE_PROD_ID12("MAXTECH", "PCN2000", 0x78d64bc0, 0xca0ca4b8), - PCMCIA_DEVICE_PROD_ID12("MELCO", "LPC2-T", 0x481e0094, 0xa2eb0cf3), - PCMCIA_DEVICE_PROD_ID12("MELCO", "LPC2-TX", 0x481e0094, 0x41a6916c), - PCMCIA_DEVICE_PROD_ID12("Microcom C.E.", "Travel Card LAN 10/100", 0x4b91cec7, 0xe70220d6), - PCMCIA_DEVICE_PROD_ID12("Microdyne", "NE4200", 0x2e6da59b, 0x0478e472), - PCMCIA_DEVICE_PROD_ID12("MIDORI ELEC.", "LT-PCMT", 0x648d55c1, 0xbde526c7), - PCMCIA_DEVICE_PROD_ID12("National Semiconductor", "InfoMover 4100", 0x36e1191f, 0x60c229b9), - PCMCIA_DEVICE_PROD_ID12("National Semiconductor", "InfoMover NE4100", 0x36e1191f, 0xa6617ec8), - PCMCIA_DEVICE_PROD_ID12("NEC", "PC-9801N-J12", 0x18df0ba0, 0xbc912d76), - PCMCIA_DEVICE_PROD_ID12("NETGEAR", "FA410TX", 0x9aa79dc3, 0x60e5bc0e), - PCMCIA_DEVICE_PROD_ID12("Network Everywhere", "Fast Ethernet 10/100 PC Card", 0x820a67b6, 0x31ed1a5f), - PCMCIA_DEVICE_PROD_ID12("NextCom K.K.", "Next Hawk", 0xaedaec74, 0xad050ef1), - PCMCIA_DEVICE_PROD_ID12("PCMCIA", "10/100Mbps Ethernet Card", 0x281f1c5d, 0x6e41773b), - PCMCIA_DEVICE_PROD_ID12("PCMCIA", "Ethernet", 0x281f1c5d, 0x00b2e941), - PCMCIA_DEVICE_PROD_ID12("PCMCIA", "ETHERNET", 0x281f1c5d, 0x3ff7175b), - PCMCIA_DEVICE_PROD_ID12("PCMCIA", "Ethernet 10BaseT Card", 0x281f1c5d, 0x4de2f6c8), - PCMCIA_DEVICE_PROD_ID12("PCMCIA", "Ethernet Card", 0x281f1c5d, 0x5e9d92c0), - PCMCIA_DEVICE_PROD_ID12("PCMCIA", "Ethernet Combo card", 0x281f1c5d, 0x929c486c), - PCMCIA_DEVICE_PROD_ID12("PCMCIA", "ETHERNET V1.0", 0x281f1c5d, 0x4d8817c8), - PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FastEthernet", 0x281f1c5d, 0xfe871eeb), - PCMCIA_DEVICE_PROD_ID12("PCMCIA", "Fast-Ethernet", 0x281f1c5d, 0x45f1f3b4), - PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FAST ETHERNET CARD", 0x281f1c5d, 0xec5dbca7), - PCMCIA_DEVICE_PROD_ID12("PCMCIA LAN", "Ethernet", 0x7500e246, 0x00b2e941), - PCMCIA_DEVICE_PROD_ID12("PCMCIA", "LNT-10TN", 0x281f1c5d, 0xe707f641), - PCMCIA_DEVICE_PROD_ID12("PCMCIAs", "ComboCard", 0xdcfe12d3, 0xcd8906cc), - PCMCIA_DEVICE_PROD_ID12("PCMCIA", "UE2212", 0x281f1c5d, 0xbf17199b), - PCMCIA_DEVICE_PROD_ID12("PCMCIA", " Ethernet NE2000 Compatible", 0x281f1c5d, 0x42d5d7e1), - PCMCIA_DEVICE_PROD_ID12("PRETEC", "Ethernet CompactLAN 10baseT 3.3V", 0xebf91155, 0x30074c80), - PCMCIA_DEVICE_PROD_ID12("PRETEC", "Ethernet CompactLAN 10BaseT 3.3V", 0xebf91155, 0x7f5a4f50), - PCMCIA_DEVICE_PROD_ID12("Psion Dacom", "Gold Card Ethernet", 0xf5f025c2, 0x3a30e110), - PCMCIA_DEVICE_PROD_ID12("=RELIA==", "Ethernet", 0xcdd0644a, 0x00b2e941), - PCMCIA_DEVICE_PROD_ID12("RIOS Systems Co.", "PC CARD3 ETHERNET", 0x7dd33481, 0x10b41826), - PCMCIA_DEVICE_PROD_ID12("RP", "1625B Ethernet NE2000 Compatible", 0xe3e66e22, 0xb96150df), - PCMCIA_DEVICE_PROD_ID12("RPTI", "EP400 Ethernet NE2000 Compatible", 0xdc6f88fd, 0x4a7e2ae0), - PCMCIA_DEVICE_PROD_ID12("RPTI", "EP401 Ethernet NE2000 Compatible", 0xdc6f88fd, 0x4bcbd7fd), - PCMCIA_DEVICE_PROD_ID12("RPTI LTD.", "EP400", 0xc53ac515, 0x81e39388), - PCMCIA_DEVICE_PROD_ID12("SCM", "Ethernet Combo card", 0xbdc3b102, 0x929c486c), - PCMCIA_DEVICE_PROD_ID12("Seiko Epson Corp.", "Ethernet", 0x09928730, 0x00b2e941), - PCMCIA_DEVICE_PROD_ID12("SMC", "EZCard-10-PCMCIA", 0xc4f8b18b, 0xfb21d265), - PCMCIA_DEVICE_PROD_ID12("Socket Communications Inc", "Socket EA PCMCIA LAN Adapter Revision D", 0xc70a4760, 0x2ade483e), - PCMCIA_DEVICE_PROD_ID12("Socket Communications Inc", "Socket EA PCMCIA LAN Adapter Revision E", 0xc70a4760, 0x5dd978a8), - PCMCIA_DEVICE_PROD_ID12("TDK", "LAK-CD031 for PCMCIA", 0x1eae9475, 0x0ed386fa), - PCMCIA_DEVICE_PROD_ID12("Telecom Device K.K.", "SuperSocket RE450T", 0x466b05f0, 0x8b74bc4f), - PCMCIA_DEVICE_PROD_ID12("Telecom Device K.K.", "SuperSocket RE550T", 0x466b05f0, 0x33c8db2a), - PCMCIA_DEVICE_PROD_ID13("Hypertec", "EP401", 0x8787bec7, 0xf6e4a31e), - PCMCIA_DEVICE_PROD_ID13("KingMax Technology Inc.", "Ethernet Card", 0x932b7189, 0x5e9d92c0), - PCMCIA_DEVICE_PROD_ID13("LONGSHINE", "EP401", 0xf866b0b0, 0xf6e4a31e), - PCMCIA_DEVICE_PROD_ID13("Xircom", "CFE-10", 0x2e3ee845, 0x22a49f89), - PCMCIA_DEVICE_PROD_ID1("CyQ've 10 Base-T LAN CARD", 0x94faf360), - PCMCIA_DEVICE_PROD_ID1("EP-210 PCMCIA LAN CARD.", 0x8850b4de), - PCMCIA_DEVICE_PROD_ID1("ETHER-C16", 0x06a8514f), - PCMCIA_DEVICE_PROD_ID1("NE2000 Compatible", 0x75b8ad5a), - PCMCIA_DEVICE_PROD_ID2("EN-6200P2", 0xa996d078), - /* too generic! */ - /* PCMCIA_DEVICE_PROD_ID12("PCMCIA", "10/100 Ethernet Card", 0x281f1c5d, 0x11b0ffc0), */ - PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "PCMCIA", "EN2218-LAN/MODEM", 0x281f1c5d, 0x570f348e, "cis/PCMLM28.cis"), - PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "PCMCIA", "UE2218-LAN/MODEM", 0x281f1c5d, 0x6fdcacee, "cis/PCMLM28.cis"), - PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"), - PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"), - PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"), - PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "TOSHIBA", "Modem/LAN Card", 0xb4585a1a, 0x53f922f8, "cis/PCMLM28.cis"), - PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"), - PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"), - PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"), - PCMCIA_DEVICE_CIS_PROD_ID12("Allied Telesis,K.K", "Ethernet LAN Card", 0x2ad62f3c, 0x9fd2f0a2, "cis/LA-PCM.cis"), - PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "cis/PE520.cis"), - PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"), - PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "cis/PE-200.cis"), - PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "cis/tamarack.cis"), - PCMCIA_DEVICE_PROD_ID12("Ethernet", "CF Size PC Card", 0x00b2e941, 0x43ac239b), - PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0", - 0xb4be14e3, 0x43ac239b, 0x0877b627), - PCMCIA_DEVICE_NULL -}; -MODULE_DEVICE_TABLE(pcmcia, pcnet_ids); -MODULE_FIRMWARE("cis/PCMLM28.cis"); -MODULE_FIRMWARE("cis/DP83903.cis"); -MODULE_FIRMWARE("cis/LA-PCM.cis"); -MODULE_FIRMWARE("cis/PE520.cis"); -MODULE_FIRMWARE("cis/NE2K.cis"); -MODULE_FIRMWARE("cis/PE-200.cis"); -MODULE_FIRMWARE("cis/tamarack.cis"); - -static struct pcmcia_driver pcnet_driver = { - .name = "pcnet_cs", - .probe = pcnet_probe, - .remove = pcnet_detach, - .owner = THIS_MODULE, - .id_table = pcnet_ids, - .suspend = pcnet_suspend, - .resume = pcnet_resume, -}; - -static int __init init_pcnet_cs(void) -{ - return pcmcia_register_driver(&pcnet_driver); -} - -static void __exit exit_pcnet_cs(void) -{ - pcmcia_unregister_driver(&pcnet_driver); -} - -module_init(init_pcnet_cs); -module_exit(exit_pcnet_cs); diff --git a/drivers/net/smc-mca.c b/drivers/net/smc-mca.c deleted file mode 100644 index 34934fb..0000000 --- a/drivers/net/smc-mca.c +++ /dev/null @@ -1,576 +0,0 @@ -/* smc-mca.c: A SMC Ultra ethernet driver for linux. */ -/* - Most of this driver, except for ultramca_probe is nearly - verbatim from smc-ultra.c by Donald Becker. The rest is - written and copyright 1996 by David Weis, weisd3458@uni.edu - - This is a driver for the SMC Ultra and SMC EtherEZ ethercards. - - This driver uses the cards in the 8390-compatible, shared memory mode. - Most of the run-time complexity is handled by the generic code in - 8390.c. - - This driver enables the shared memory only when doing the actual data - transfers to avoid a bug in early version of the card that corrupted - data transferred by a AHA1542. - - This driver does not support the programmed-I/O data transfer mode of - the EtherEZ. That support (if available) is smc-ez.c. Nor does it - use the non-8390-compatible "Altego" mode. (No support currently planned.) - - Changelog: - - Paul Gortmaker : multiple card support for module users. - David Weis : Micro Channel-ized it. - Tom Sightler : Added support for IBM PS/2 Ethernet Adapter/A - Christopher Turcksin : Changed MCA-probe so that multiple adapters are - found correctly (Jul 16, 1997) - Chris Beauregard : Tried to merge the two changes above (Dec 15, 1997) - Tom Sightler : Fixed minor detection bug caused by above merge - Tom Sightler : Added support for three more Western Digital - MCA-adapters - Tom Sightler : Added support for 2.2.x mca_find_unused_adapter - Hartmut Schmidt : - Modified parameter detection to handle each - card differently depending on a switch-list - - 'card_ver' removed from the adapter list - - Some minor bug fixes -*/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "8390.h" - -#define DRV_NAME "smc-mca" - -static int ultramca_open(struct net_device *dev); -static void ultramca_reset_8390(struct net_device *dev); -static void ultramca_get_8390_hdr(struct net_device *dev, - struct e8390_pkt_hdr *hdr, - int ring_page); -static void ultramca_block_input(struct net_device *dev, int count, - struct sk_buff *skb, - int ring_offset); -static void ultramca_block_output(struct net_device *dev, int count, - const unsigned char *buf, - const int start_page); -static int ultramca_close_card(struct net_device *dev); - -#define START_PG 0x00 /* First page of TX buffer */ - -#define ULTRA_CMDREG 0 /* Offset to ASIC command register. */ -#define ULTRA_RESET 0x80 /* Board reset, in ULTRA_CMDREG. */ -#define ULTRA_MEMENB 0x40 /* Enable the shared memory. */ -#define ULTRA_NIC_OFFSET 16 /* NIC register offset from the base_addr. */ -#define ULTRA_IO_EXTENT 32 -#define EN0_ERWCNT 0x08 /* Early receive warning count. */ - -#define _61c8_SMC_Ethercard_PLUS_Elite_A_BNC_AUI_WD8013EP_A 0 -#define _61c9_SMC_Ethercard_PLUS_Elite_A_UTP_AUI_WD8013EP_A 1 -#define _6fc0_WD_Ethercard_PLUS_A_WD8003E_A_OR_WD8003ET_A 2 -#define _6fc1_WD_Starcard_PLUS_A_WD8003ST_A 3 -#define _6fc2_WD_Ethercard_PLUS_10T_A_WD8003W_A 4 -#define _efd4_IBM_PS2_Adapter_A_for_Ethernet_UTP_AUI_WD8013WP_A 5 -#define _efd5_IBM_PS2_Adapter_A_for_Ethernet_BNC_AUI_WD8013WP_A 6 -#define _efe5_IBM_PS2_Adapter_A_for_Ethernet 7 - -struct smc_mca_adapters_t { - unsigned int id; - char *name; -}; - -#define MAX_ULTRAMCA_CARDS 4 /* Max number of Ultra cards per module */ - -static int ultra_io[MAX_ULTRAMCA_CARDS]; -static int ultra_irq[MAX_ULTRAMCA_CARDS]; -MODULE_LICENSE("GPL"); - -module_param_array(ultra_io, int, NULL, 0); -module_param_array(ultra_irq, int, NULL, 0); -MODULE_PARM_DESC(ultra_io, "SMC Ultra/EtherEZ MCA I/O base address(es)"); -MODULE_PARM_DESC(ultra_irq, "SMC Ultra/EtherEZ MCA IRQ number(s)"); - -static const struct { - unsigned int base_addr; -} addr_table[] = { - { 0x0800 }, - { 0x1800 }, - { 0x2800 }, - { 0x3800 }, - { 0x4800 }, - { 0x5800 }, - { 0x6800 }, - { 0x7800 }, - { 0x8800 }, - { 0x9800 }, - { 0xa800 }, - { 0xb800 }, - { 0xc800 }, - { 0xd800 }, - { 0xe800 }, - { 0xf800 } -}; - -#define MEM_MASK 64 - -static const struct { - unsigned char mem_index; - unsigned long mem_start; - unsigned char num_pages; -} mem_table[] = { - { 16, 0x0c0000, 40 }, - { 18, 0x0c4000, 40 }, - { 20, 0x0c8000, 40 }, - { 22, 0x0cc000, 40 }, - { 24, 0x0d0000, 40 }, - { 26, 0x0d4000, 40 }, - { 28, 0x0d8000, 40 }, - { 30, 0x0dc000, 40 }, - {144, 0xfc0000, 40 }, - {148, 0xfc8000, 40 }, - {154, 0xfd0000, 40 }, - {156, 0xfd8000, 40 }, - { 0, 0x0c0000, 20 }, - { 1, 0x0c2000, 20 }, - { 2, 0x0c4000, 20 }, - { 3, 0x0c6000, 20 } -}; - -#define IRQ_MASK 243 -static const struct { - unsigned char new_irq; - unsigned char old_irq; -} irq_table[] = { - { 3, 3 }, - { 4, 4 }, - { 10, 10 }, - { 14, 15 } -}; - -static short smc_mca_adapter_ids[] __initdata = { - 0x61c8, - 0x61c9, - 0x6fc0, - 0x6fc1, - 0x6fc2, - 0xefd4, - 0xefd5, - 0xefe5, - 0x0000 -}; - -static char *smc_mca_adapter_names[] __initdata = { - "SMC Ethercard PLUS Elite/A BNC/AUI (WD8013EP/A)", - "SMC Ethercard PLUS Elite/A UTP/AUI (WD8013WP/A)", - "WD Ethercard PLUS/A (WD8003E/A or WD8003ET/A)", - "WD Starcard PLUS/A (WD8003ST/A)", - "WD Ethercard PLUS 10T/A (WD8003W/A)", - "IBM PS/2 Adapter/A for Ethernet UTP/AUI (WD8013WP/A)", - "IBM PS/2 Adapter/A for Ethernet BNC/AUI (WD8013EP/A)", - "IBM PS/2 Adapter/A for Ethernet", - NULL -}; - -static int ultra_found = 0; - - -static const struct net_device_ops ultramca_netdev_ops = { - .ndo_open = ultramca_open, - .ndo_stop = ultramca_close_card, - - .ndo_start_xmit = ei_start_xmit, - .ndo_tx_timeout = ei_tx_timeout, - .ndo_get_stats = ei_get_stats, - .ndo_set_multicast_list = ei_set_multicast_list, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, - .ndo_change_mtu = eth_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ei_poll, -#endif -}; - -static int __init ultramca_probe(struct device *gen_dev) -{ - unsigned short ioaddr; - struct net_device *dev; - unsigned char reg4, num_pages; - struct mca_device *mca_dev = to_mca_device(gen_dev); - char slot = mca_dev->slot; - unsigned char pos2 = 0xff, pos3 = 0xff, pos4 = 0xff, pos5 = 0xff; - int i, rc; - int adapter = mca_dev->index; - int tbase = 0; - int tirq = 0; - int base_addr = ultra_io[ultra_found]; - int irq = ultra_irq[ultra_found]; - - if (base_addr || irq) { - printk(KERN_INFO "Probing for SMC MCA adapter"); - if (base_addr) { - printk(KERN_INFO " at I/O address 0x%04x%c", - base_addr, irq ? ' ' : '\n'); - } - if (irq) { - printk(KERN_INFO "using irq %d\n", irq); - } - } - - tirq = 0; - tbase = 0; - - /* If we're trying to match a specificied irq or io address, - * we'll reject the adapter found unless it's the one we're - * looking for */ - - pos2 = mca_device_read_stored_pos(mca_dev, 2); /* io_addr */ - pos3 = mca_device_read_stored_pos(mca_dev, 3); /* shared mem */ - pos4 = mca_device_read_stored_pos(mca_dev, 4); /* ROM bios addr range */ - pos5 = mca_device_read_stored_pos(mca_dev, 5); /* irq, media and RIPL */ - - /* Test the following conditions: - * - If an irq parameter is supplied, compare it - * with the irq of the adapter we found - * - If a base_addr paramater is given, compare it - * with the base_addr of the adapter we found - * - Check that the irq and the base_addr of the - * adapter we found is not already in use by - * this driver - */ - - switch (mca_dev->index) { - case _61c8_SMC_Ethercard_PLUS_Elite_A_BNC_AUI_WD8013EP_A: - case _61c9_SMC_Ethercard_PLUS_Elite_A_UTP_AUI_WD8013EP_A: - case _efd4_IBM_PS2_Adapter_A_for_Ethernet_UTP_AUI_WD8013WP_A: - case _efd5_IBM_PS2_Adapter_A_for_Ethernet_BNC_AUI_WD8013WP_A: - { - tbase = addr_table[(pos2 & 0xf0) >> 4].base_addr; - tirq = irq_table[(pos5 & 0xc) >> 2].new_irq; - break; - } - case _6fc0_WD_Ethercard_PLUS_A_WD8003E_A_OR_WD8003ET_A: - case _6fc1_WD_Starcard_PLUS_A_WD8003ST_A: - case _6fc2_WD_Ethercard_PLUS_10T_A_WD8003W_A: - case _efe5_IBM_PS2_Adapter_A_for_Ethernet: - { - tbase = ((pos2 & 0x0fe) * 0x10); - tirq = irq_table[(pos5 & 3)].old_irq; - break; - } - } - - if(!tirq || !tbase || - (irq && irq != tirq) || - (base_addr && tbase != base_addr)) - /* FIXME: we're trying to force the ordering of the - * devices here, there should be a way of getting this - * to happen */ - return -ENXIO; - - /* Adapter found. */ - dev = alloc_ei_netdev(); - if(!dev) - return -ENODEV; - - SET_NETDEV_DEV(dev, gen_dev); - mca_device_set_name(mca_dev, smc_mca_adapter_names[adapter]); - mca_device_set_claim(mca_dev, 1); - - printk(KERN_INFO "smc_mca: %s found in slot %d\n", - smc_mca_adapter_names[adapter], slot + 1); - - ultra_found++; - - dev->base_addr = ioaddr = mca_device_transform_ioport(mca_dev, tbase); - dev->irq = mca_device_transform_irq(mca_dev, tirq); - dev->mem_start = 0; - num_pages = 40; - - switch (adapter) { /* card-# in const array above [hs] */ - case _61c8_SMC_Ethercard_PLUS_Elite_A_BNC_AUI_WD8013EP_A: - case _61c9_SMC_Ethercard_PLUS_Elite_A_UTP_AUI_WD8013EP_A: - { - for (i = 0; i < 16; i++) { /* taking 16 counts - * up to 15 [hs] */ - if (mem_table[i].mem_index == (pos3 & ~MEM_MASK)) { - dev->mem_start = (unsigned long) - mca_device_transform_memory(mca_dev, (void *)mem_table[i].mem_start); - num_pages = mem_table[i].num_pages; - } - } - break; - } - case _6fc0_WD_Ethercard_PLUS_A_WD8003E_A_OR_WD8003ET_A: - case _6fc1_WD_Starcard_PLUS_A_WD8003ST_A: - case _6fc2_WD_Ethercard_PLUS_10T_A_WD8003W_A: - case _efe5_IBM_PS2_Adapter_A_for_Ethernet: - { - dev->mem_start = (unsigned long) - mca_device_transform_memory(mca_dev, (void *)((pos3 & 0xfc) * 0x1000)); - num_pages = 0x40; - break; - } - case _efd4_IBM_PS2_Adapter_A_for_Ethernet_UTP_AUI_WD8013WP_A: - case _efd5_IBM_PS2_Adapter_A_for_Ethernet_BNC_AUI_WD8013WP_A: - { - /* courtesy of gamera@quartz.ocn.ne.jp, pos3 indicates - * the index of the 0x2000 step. - * beware different number of pages [hs] - */ - dev->mem_start = (unsigned long) - mca_device_transform_memory(mca_dev, (void *)(0xc0000 + (0x2000 * (pos3 & 0xf)))); - num_pages = 0x20 + (2 * (pos3 & 0x10)); - break; - } - } - - /* sanity check, shouldn't happen */ - if (dev->mem_start == 0) { - rc = -ENODEV; - goto err_unclaim; - } - - if (!request_region(ioaddr, ULTRA_IO_EXTENT, DRV_NAME)) { - rc = -ENODEV; - goto err_unclaim; - } - - reg4 = inb(ioaddr + 4) & 0x7f; - outb(reg4, ioaddr + 4); - - for (i = 0; i < 6; i++) - dev->dev_addr[i] = inb(ioaddr + 8 + i); - - printk(KERN_INFO "smc_mca[%d]: Parameters: %#3x, %pM", - slot + 1, ioaddr, dev->dev_addr); - - /* Switch from the station address to the alternate register set - * and read the useful registers there. - */ - - outb(0x80 | reg4, ioaddr + 4); - - /* Enable FINE16 mode to avoid BIOS ROM width mismatches @ reboot. - */ - - outb(0x80 | inb(ioaddr + 0x0c), ioaddr + 0x0c); - - /* Switch back to the station address register set so that - * the MS-DOS driver can find the card after a warm boot. - */ - - outb(reg4, ioaddr + 4); - - dev_set_drvdata(gen_dev, dev); - - /* The 8390 isn't at the base address, so fake the offset - */ - - dev->base_addr = ioaddr + ULTRA_NIC_OFFSET; - - ei_status.name = "SMC Ultra MCA"; - ei_status.word16 = 1; - ei_status.tx_start_page = START_PG; - ei_status.rx_start_page = START_PG + TX_PAGES; - ei_status.stop_page = num_pages; - - ei_status.mem = ioremap(dev->mem_start, (ei_status.stop_page - START_PG) * 256); - if (!ei_status.mem) { - rc = -ENOMEM; - goto err_release_region; - } - - dev->mem_end = dev->mem_start + (ei_status.stop_page - START_PG) * 256; - - printk(", IRQ %d memory %#lx-%#lx.\n", - dev->irq, dev->mem_start, dev->mem_end - 1); - - ei_status.reset_8390 = &ultramca_reset_8390; - ei_status.block_input = &ultramca_block_input; - ei_status.block_output = &ultramca_block_output; - ei_status.get_8390_hdr = &ultramca_get_8390_hdr; - - ei_status.priv = slot; - - dev->netdev_ops = &ultramca_netdev_ops; - - NS8390_init(dev, 0); - - rc = register_netdev(dev); - if (rc) - goto err_unmap; - - return 0; - -err_unmap: - iounmap(ei_status.mem); -err_release_region: - release_region(ioaddr, ULTRA_IO_EXTENT); -err_unclaim: - mca_device_set_claim(mca_dev, 0); - free_netdev(dev); - return rc; -} - -static int ultramca_open(struct net_device *dev) -{ - int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */ - int retval; - - if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev))) - return retval; - - outb(ULTRA_MEMENB, ioaddr); /* Enable memory */ - outb(0x80, ioaddr + 5); /* ??? */ - outb(0x01, ioaddr + 6); /* Enable interrupts and memory. */ - outb(0x04, ioaddr + 5); /* ??? */ - - /* Set the early receive warning level in window 0 high enough not - * to receive ERW interrupts. - */ - - /* outb_p(E8390_NODMA + E8390_PAGE0, dev->base_addr); - * outb(0xff, dev->base_addr + EN0_ERWCNT); - */ - - ei_open(dev); - return 0; -} - -static void ultramca_reset_8390(struct net_device *dev) -{ - int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */ - - outb(ULTRA_RESET, ioaddr); - if (ei_debug > 1) - printk("resetting Ultra, t=%ld...", jiffies); - ei_status.txing = 0; - - outb(0x80, ioaddr + 5); /* ??? */ - outb(0x01, ioaddr + 6); /* Enable interrupts and memory. */ - - if (ei_debug > 1) - printk("reset done\n"); -} - -/* Grab the 8390 specific header. Similar to the block_input routine, but - * we don't need to be concerned with ring wrap as the header will be at - * the start of a page, so we optimize accordingly. - */ - -static void ultramca_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) -{ - void __iomem *hdr_start = ei_status.mem + ((ring_page - START_PG) << 8); - -#ifdef notdef - /* Officially this is what we are doing, but the readl() is faster */ - memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); -#else - ((unsigned int*)hdr)[0] = readl(hdr_start); -#endif -} - -/* Block input and output are easy on shared memory ethercards, the only - * complication is when the ring buffer wraps. - */ - -static void ultramca_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) -{ - void __iomem *xfer_start = ei_status.mem + ring_offset - START_PG * 256; - - if (ring_offset + count > ei_status.stop_page * 256) { - /* We must wrap the input move. */ - int semi_count = ei_status.stop_page * 256 - ring_offset; - memcpy_fromio(skb->data, xfer_start, semi_count); - count -= semi_count; - memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count); - } else { - memcpy_fromio(skb->data, xfer_start, count); - } - -} - -static void ultramca_block_output(struct net_device *dev, int count, const unsigned char *buf, - int start_page) -{ - void __iomem *shmem = ei_status.mem + ((start_page - START_PG) << 8); - - memcpy_toio(shmem, buf, count); -} - -static int ultramca_close_card(struct net_device *dev) -{ - int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */ - - netif_stop_queue(dev); - - if (ei_debug > 1) - printk("%s: Shutting down ethercard.\n", dev->name); - - outb(0x00, ioaddr + 6); /* Disable interrupts. */ - free_irq(dev->irq, dev); - - NS8390_init(dev, 0); - /* We should someday disable shared memory and change to 8-bit mode - * "just in case"... - */ - - return 0; -} - -static int ultramca_remove(struct device *gen_dev) -{ - struct mca_device *mca_dev = to_mca_device(gen_dev); - struct net_device *dev = dev_get_drvdata(gen_dev); - - if (dev) { - /* NB: ultra_close_card() does free_irq */ - int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; - - unregister_netdev(dev); - mca_device_set_claim(mca_dev, 0); - release_region(ioaddr, ULTRA_IO_EXTENT); - iounmap(ei_status.mem); - free_netdev(dev); - } - return 0; -} - - -static struct mca_driver ultra_driver = { - .id_table = smc_mca_adapter_ids, - .driver = { - .name = "smc-mca", - .bus = &mca_bus_type, - .probe = ultramca_probe, - .remove = ultramca_remove, - } -}; - -static int __init ultramca_init_module(void) -{ - if(!MCA_bus) - return -ENXIO; - - mca_register_driver(&ultra_driver); - - return ultra_found ? 0 : -ENXIO; -} - -static void __exit ultramca_cleanup_module(void) -{ - mca_unregister_driver(&ultra_driver); -} -module_init(ultramca_init_module); -module_exit(ultramca_cleanup_module); - diff --git a/drivers/net/smc-ultra.c b/drivers/net/smc-ultra.c deleted file mode 100644 index ba44ede..0000000 --- a/drivers/net/smc-ultra.c +++ /dev/null @@ -1,622 +0,0 @@ -/* smc-ultra.c: A SMC Ultra ethernet driver for linux. */ -/* - This is a driver for the SMC Ultra and SMC EtherEZ ISA ethercards. - - Written 1993-1998 by Donald Becker. - - Copyright 1993 United States Government as represented by the - Director, National Security Agency. - - This software may be used and distributed according to the terms - of the GNU General Public License, incorporated herein by reference. - - The author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation - 410 Severn Ave., Suite 210 - Annapolis MD 21403 - - This driver uses the cards in the 8390-compatible mode. - Most of the run-time complexity is handled by the generic code in - 8390.c. The code in this file is responsible for - - ultra_probe() Detecting and initializing the card. - ultra_probe1() - ultra_probe_isapnp() - - ultra_open() The card-specific details of starting, stopping - ultra_reset_8390() and resetting the 8390 NIC core. - ultra_close() - - ultra_block_input() Routines for reading and writing blocks of - ultra_block_output() packet buffer memory. - ultra_pio_input() - ultra_pio_output() - - This driver enables the shared memory only when doing the actual data - transfers to avoid a bug in early version of the card that corrupted - data transferred by a AHA1542. - - This driver now supports the programmed-I/O (PIO) data transfer mode of - the EtherEZ. It does not use the non-8390-compatible "Altego" mode. - That support (if available) is in smc-ez.c. - - Changelog: - - Paul Gortmaker : multiple card support for module users. - Donald Becker : 4/17/96 PIO support, minor potential problems avoided. - Donald Becker : 6/6/96 correctly set auto-wrap bit. - Alexander Sotirov : 1/20/01 Added support for ISAPnP cards - - Note about the ISA PnP support: - - This driver can not autoprobe for more than one SMC EtherEZ PnP card. - You have to configure the second card manually through the /proc/isapnp - interface and then load the module with an explicit io=0x___ option. -*/ - -static const char version[] = - "smc-ultra.c:v2.02 2/3/98 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "8390.h" - -#define DRV_NAME "smc-ultra" - -/* A zero-terminated list of I/O addresses to be probed. */ -static unsigned int ultra_portlist[] __initdata = -{0x200, 0x220, 0x240, 0x280, 0x300, 0x340, 0x380, 0}; - -static int ultra_probe1(struct net_device *dev, int ioaddr); - -#ifdef __ISAPNP__ -static int ultra_probe_isapnp(struct net_device *dev); -#endif - -static int ultra_open(struct net_device *dev); -static void ultra_reset_8390(struct net_device *dev); -static void ultra_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, - int ring_page); -static void ultra_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset); -static void ultra_block_output(struct net_device *dev, int count, - const unsigned char *buf, const int start_page); -static void ultra_pio_get_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, - int ring_page); -static void ultra_pio_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset); -static void ultra_pio_output(struct net_device *dev, int count, - const unsigned char *buf, const int start_page); -static int ultra_close_card(struct net_device *dev); - -#ifdef __ISAPNP__ -static struct isapnp_device_id ultra_device_ids[] __initdata = { - { ISAPNP_VENDOR('S','M','C'), ISAPNP_FUNCTION(0x8416), - ISAPNP_VENDOR('S','M','C'), ISAPNP_FUNCTION(0x8416), - (long) "SMC EtherEZ (8416)" }, - { } /* terminate list */ -}; - -MODULE_DEVICE_TABLE(isapnp, ultra_device_ids); -#endif - - -#define START_PG 0x00 /* First page of TX buffer */ - -#define ULTRA_CMDREG 0 /* Offset to ASIC command register. */ -#define ULTRA_RESET 0x80 /* Board reset, in ULTRA_CMDREG. */ -#define ULTRA_MEMENB 0x40 /* Enable the shared memory. */ -#define IOPD 0x02 /* I/O Pipe Data (16 bits), PIO operation. */ -#define IOPA 0x07 /* I/O Pipe Address for PIO operation. */ -#define ULTRA_NIC_OFFSET 16 /* NIC register offset from the base_addr. */ -#define ULTRA_IO_EXTENT 32 -#define EN0_ERWCNT 0x08 /* Early receive warning count. */ - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void ultra_poll(struct net_device *dev) -{ - disable_irq(dev->irq); - ei_interrupt(dev->irq, dev); - enable_irq(dev->irq); -} -#endif -/* Probe for the Ultra. This looks like a 8013 with the station - address PROM at I/O ports +8 to +13, with a checksum - following. -*/ - -static int __init do_ultra_probe(struct net_device *dev) -{ - int i; - int base_addr = dev->base_addr; - int irq = dev->irq; - - if (base_addr > 0x1ff) /* Check a single specified location. */ - return ultra_probe1(dev, base_addr); - else if (base_addr != 0) /* Don't probe at all. */ - return -ENXIO; - -#ifdef __ISAPNP__ - /* Look for any installed ISAPnP cards */ - if (isapnp_present() && (ultra_probe_isapnp(dev) == 0)) - return 0; -#endif - - for (i = 0; ultra_portlist[i]; i++) { - dev->irq = irq; - if (ultra_probe1(dev, ultra_portlist[i]) == 0) - return 0; - } - - return -ENODEV; -} - -#ifndef MODULE -struct net_device * __init ultra_probe(int unit) -{ - struct net_device *dev = alloc_ei_netdev(); - int err; - - if (!dev) - return ERR_PTR(-ENOMEM); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - - err = do_ultra_probe(dev); - if (err) - goto out; - return dev; -out: - free_netdev(dev); - return ERR_PTR(err); -} -#endif - -static const struct net_device_ops ultra_netdev_ops = { - .ndo_open = ultra_open, - .ndo_stop = ultra_close_card, - - .ndo_start_xmit = ei_start_xmit, - .ndo_tx_timeout = ei_tx_timeout, - .ndo_get_stats = ei_get_stats, - .ndo_set_multicast_list = ei_set_multicast_list, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, - .ndo_change_mtu = eth_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ultra_poll, -#endif -}; - -static int __init ultra_probe1(struct net_device *dev, int ioaddr) -{ - int i, retval; - int checksum = 0; - const char *model_name; - unsigned char eeprom_irq = 0; - static unsigned version_printed; - /* Values from various config regs. */ - unsigned char num_pages, irqreg, addr, piomode; - unsigned char idreg = inb(ioaddr + 7); - unsigned char reg4 = inb(ioaddr + 4) & 0x7f; - - if (!request_region(ioaddr, ULTRA_IO_EXTENT, DRV_NAME)) - return -EBUSY; - - /* Check the ID nibble. */ - if ((idreg & 0xF0) != 0x20 /* SMC Ultra */ - && (idreg & 0xF0) != 0x40) { /* SMC EtherEZ */ - retval = -ENODEV; - goto out; - } - - /* Select the station address register set. */ - outb(reg4, ioaddr + 4); - - for (i = 0; i < 8; i++) - checksum += inb(ioaddr + 8 + i); - if ((checksum & 0xff) != 0xFF) { - retval = -ENODEV; - goto out; - } - - if (ei_debug && version_printed++ == 0) - printk(version); - - model_name = (idreg & 0xF0) == 0x20 ? "SMC Ultra" : "SMC EtherEZ"; - - for (i = 0; i < 6; i++) - dev->dev_addr[i] = inb(ioaddr + 8 + i); - - printk("%s: %s at %#3x, %pM", dev->name, model_name, - ioaddr, dev->dev_addr); - - /* Switch from the station address to the alternate register set and - read the useful registers there. */ - outb(0x80 | reg4, ioaddr + 4); - - /* Enabled FINE16 mode to avoid BIOS ROM width mismatches @ reboot. */ - outb(0x80 | inb(ioaddr + 0x0c), ioaddr + 0x0c); - piomode = inb(ioaddr + 0x8); - addr = inb(ioaddr + 0xb); - irqreg = inb(ioaddr + 0xd); - - /* Switch back to the station address register set so that the MS-DOS driver - can find the card after a warm boot. */ - outb(reg4, ioaddr + 4); - - if (dev->irq < 2) { - unsigned char irqmap[] = {0, 9, 3, 5, 7, 10, 11, 15}; - int irq; - - /* The IRQ bits are split. */ - irq = irqmap[((irqreg & 0x40) >> 4) + ((irqreg & 0x0c) >> 2)]; - - if (irq == 0) { - printk(", failed to detect IRQ line.\n"); - retval = -EAGAIN; - goto out; - } - dev->irq = irq; - eeprom_irq = 1; - } - - /* The 8390 isn't at the base address, so fake the offset */ - dev->base_addr = ioaddr+ULTRA_NIC_OFFSET; - - { - static const int addr_tbl[4] = { - 0x0C0000, 0x0E0000, 0xFC0000, 0xFE0000 - }; - static const short num_pages_tbl[4] = { - 0x20, 0x40, 0x80, 0xff - }; - - dev->mem_start = ((addr & 0x0f) << 13) + addr_tbl[(addr >> 6) & 3] ; - num_pages = num_pages_tbl[(addr >> 4) & 3]; - } - - ei_status.name = model_name; - ei_status.word16 = 1; - ei_status.tx_start_page = START_PG; - ei_status.rx_start_page = START_PG + TX_PAGES; - ei_status.stop_page = num_pages; - - ei_status.mem = ioremap(dev->mem_start, (ei_status.stop_page - START_PG)*256); - if (!ei_status.mem) { - printk(", failed to ioremap.\n"); - retval = -ENOMEM; - goto out; - } - - dev->mem_end = dev->mem_start + (ei_status.stop_page - START_PG)*256; - - if (piomode) { - printk(",%s IRQ %d programmed-I/O mode.\n", - eeprom_irq ? "EEPROM" : "assigned ", dev->irq); - ei_status.block_input = &ultra_pio_input; - ei_status.block_output = &ultra_pio_output; - ei_status.get_8390_hdr = &ultra_pio_get_hdr; - } else { - printk(",%s IRQ %d memory %#lx-%#lx.\n", eeprom_irq ? "" : "assigned ", - dev->irq, dev->mem_start, dev->mem_end-1); - ei_status.block_input = &ultra_block_input; - ei_status.block_output = &ultra_block_output; - ei_status.get_8390_hdr = &ultra_get_8390_hdr; - } - ei_status.reset_8390 = &ultra_reset_8390; - - dev->netdev_ops = &ultra_netdev_ops; - NS8390_init(dev, 0); - - retval = register_netdev(dev); - if (retval) - goto out; - return 0; -out: - release_region(ioaddr, ULTRA_IO_EXTENT); - return retval; -} - -#ifdef __ISAPNP__ -static int __init ultra_probe_isapnp(struct net_device *dev) -{ - int i; - - for (i = 0; ultra_device_ids[i].vendor != 0; i++) { - struct pnp_dev *idev = NULL; - - while ((idev = pnp_find_dev(NULL, - ultra_device_ids[i].vendor, - ultra_device_ids[i].function, - idev))) { - /* Avoid already found cards from previous calls */ - if (pnp_device_attach(idev) < 0) - continue; - if (pnp_activate_dev(idev) < 0) { - __again: - pnp_device_detach(idev); - continue; - } - /* if no io and irq, search for next */ - if (!pnp_port_valid(idev, 0) || !pnp_irq_valid(idev, 0)) - goto __again; - /* found it */ - dev->base_addr = pnp_port_start(idev, 0); - dev->irq = pnp_irq(idev, 0); - printk(KERN_INFO "smc-ultra.c: ISAPnP reports %s at i/o %#lx, irq %d.\n", - (char *) ultra_device_ids[i].driver_data, - dev->base_addr, dev->irq); - if (ultra_probe1(dev, dev->base_addr) != 0) { /* Shouldn't happen. */ - printk(KERN_ERR "smc-ultra.c: Probe of ISAPnP card at %#lx failed.\n", dev->base_addr); - pnp_device_detach(idev); - return -ENXIO; - } - ei_status.priv = (unsigned long)idev; - break; - } - if (!idev) - continue; - return 0; - } - - return -ENODEV; -} -#endif - -static int -ultra_open(struct net_device *dev) -{ - int retval; - int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */ - unsigned char irq2reg[] = {0, 0, 0x04, 0x08, 0, 0x0C, 0, 0x40, - 0, 0x04, 0x44, 0x48, 0, 0, 0, 0x4C, }; - - retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev); - if (retval) - return retval; - - outb(0x00, ioaddr); /* Disable shared memory for safety. */ - outb(0x80, ioaddr + 5); - /* Set the IRQ line. */ - outb(inb(ioaddr + 4) | 0x80, ioaddr + 4); - outb((inb(ioaddr + 13) & ~0x4C) | irq2reg[dev->irq], ioaddr + 13); - outb(inb(ioaddr + 4) & 0x7f, ioaddr + 4); - - if (ei_status.block_input == &ultra_pio_input) { - outb(0x11, ioaddr + 6); /* Enable interrupts and PIO. */ - outb(0x01, ioaddr + 0x19); /* Enable ring read auto-wrap. */ - } else - outb(0x01, ioaddr + 6); /* Enable interrupts and memory. */ - /* Set the early receive warning level in window 0 high enough not - to receive ERW interrupts. */ - outb_p(E8390_NODMA+E8390_PAGE0, dev->base_addr); - outb(0xff, dev->base_addr + EN0_ERWCNT); - ei_open(dev); - return 0; -} - -static void -ultra_reset_8390(struct net_device *dev) -{ - int cmd_port = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC base addr */ - - outb(ULTRA_RESET, cmd_port); - if (ei_debug > 1) printk("resetting Ultra, t=%ld...", jiffies); - ei_status.txing = 0; - - outb(0x00, cmd_port); /* Disable shared memory for safety. */ - outb(0x80, cmd_port + 5); - if (ei_status.block_input == &ultra_pio_input) - outb(0x11, cmd_port + 6); /* Enable interrupts and PIO. */ - else - outb(0x01, cmd_port + 6); /* Enable interrupts and memory. */ - - if (ei_debug > 1) printk("reset done\n"); -} - -/* Grab the 8390 specific header. Similar to the block_input routine, but - we don't need to be concerned with ring wrap as the header will be at - the start of a page, so we optimize accordingly. */ - -static void -ultra_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) -{ - void __iomem *hdr_start = ei_status.mem + ((ring_page - START_PG)<<8); - - outb(ULTRA_MEMENB, dev->base_addr - ULTRA_NIC_OFFSET); /* shmem on */ -#ifdef __BIG_ENDIAN - /* Officially this is what we are doing, but the readl() is faster */ - /* unfortunately it isn't endian aware of the struct */ - memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); - hdr->count = le16_to_cpu(hdr->count); -#else - ((unsigned int*)hdr)[0] = readl(hdr_start); -#endif - outb(0x00, dev->base_addr - ULTRA_NIC_OFFSET); /* shmem off */ -} - -/* Block input and output are easy on shared memory ethercards, the only - complication is when the ring buffer wraps. */ - -static void -ultra_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) -{ - void __iomem *xfer_start = ei_status.mem + ring_offset - (START_PG<<8); - - /* Enable shared memory. */ - outb(ULTRA_MEMENB, dev->base_addr - ULTRA_NIC_OFFSET); - - if (ring_offset + count > ei_status.stop_page*256) { - /* We must wrap the input move. */ - int semi_count = ei_status.stop_page*256 - ring_offset; - memcpy_fromio(skb->data, xfer_start, semi_count); - count -= semi_count; - memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count); - } else { - memcpy_fromio(skb->data, xfer_start, count); - } - - outb(0x00, dev->base_addr - ULTRA_NIC_OFFSET); /* Disable memory. */ -} - -static void -ultra_block_output(struct net_device *dev, int count, const unsigned char *buf, - int start_page) -{ - void __iomem *shmem = ei_status.mem + ((start_page - START_PG)<<8); - - /* Enable shared memory. */ - outb(ULTRA_MEMENB, dev->base_addr - ULTRA_NIC_OFFSET); - - memcpy_toio(shmem, buf, count); - - outb(0x00, dev->base_addr - ULTRA_NIC_OFFSET); /* Disable memory. */ -} - -/* The identical operations for programmed I/O cards. - The PIO model is trivial to use: the 16 bit start address is written - byte-sequentially to IOPA, with no intervening I/O operations, and the - data is read or written to the IOPD data port. - The only potential complication is that the address register is shared - and must be always be rewritten between each read/write direction change. - This is no problem for us, as the 8390 code ensures that we are single - threaded. */ -static void ultra_pio_get_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, - int ring_page) -{ - int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */ - outb(0x00, ioaddr + IOPA); /* Set the address, LSB first. */ - outb(ring_page, ioaddr + IOPA); - insw(ioaddr + IOPD, hdr, sizeof(struct e8390_pkt_hdr)>>1); -} - -static void ultra_pio_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset) -{ - int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */ - char *buf = skb->data; - - /* For now set the address again, although it should already be correct. */ - outb(ring_offset, ioaddr + IOPA); /* Set the address, LSB first. */ - outb(ring_offset >> 8, ioaddr + IOPA); - /* We know skbuffs are padded to at least word alignment. */ - insw(ioaddr + IOPD, buf, (count+1)>>1); -} - -static void ultra_pio_output(struct net_device *dev, int count, - const unsigned char *buf, const int start_page) -{ - int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */ - outb(0x00, ioaddr + IOPA); /* Set the address, LSB first. */ - outb(start_page, ioaddr + IOPA); - /* An extra odd byte is OK here as well. */ - outsw(ioaddr + IOPD, buf, (count+1)>>1); -} - -static int -ultra_close_card(struct net_device *dev) -{ - int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* CMDREG */ - - netif_stop_queue(dev); - - if (ei_debug > 1) - printk("%s: Shutting down ethercard.\n", dev->name); - - outb(0x00, ioaddr + 6); /* Disable interrupts. */ - free_irq(dev->irq, dev); - - NS8390_init(dev, 0); - - /* We should someday disable shared memory and change to 8-bit mode - "just in case"... */ - - return 0; -} - - -#ifdef MODULE -#define MAX_ULTRA_CARDS 4 /* Max number of Ultra cards per module */ -static struct net_device *dev_ultra[MAX_ULTRA_CARDS]; -static int io[MAX_ULTRA_CARDS]; -static int irq[MAX_ULTRA_CARDS]; - -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -MODULE_PARM_DESC(io, "I/O base address(es)"); -MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)"); -MODULE_DESCRIPTION("SMC Ultra/EtherEZ ISA/PnP Ethernet driver"); -MODULE_LICENSE("GPL"); - -/* This is set up so that only a single autoprobe takes place per call. -ISA device autoprobes on a running machine are not recommended. */ -int __init -init_module(void) -{ - struct net_device *dev; - int this_dev, found = 0; - - for (this_dev = 0; this_dev < MAX_ULTRA_CARDS; this_dev++) { - if (io[this_dev] == 0) { - if (this_dev != 0) break; /* only autoprobe 1st one */ - printk(KERN_NOTICE "smc-ultra.c: Presently autoprobing (not recommended) for a single card.\n"); - } - dev = alloc_ei_netdev(); - if (!dev) - break; - dev->irq = irq[this_dev]; - dev->base_addr = io[this_dev]; - if (do_ultra_probe(dev) == 0) { - dev_ultra[found++] = dev; - continue; - } - free_netdev(dev); - printk(KERN_WARNING "smc-ultra.c: No SMC Ultra card found (i/o = 0x%x).\n", io[this_dev]); - break; - } - if (found) - return 0; - return -ENXIO; -} - -static void cleanup_card(struct net_device *dev) -{ - /* NB: ultra_close_card() does free_irq */ -#ifdef __ISAPNP__ - struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv; - if (idev) - pnp_device_detach(idev); -#endif - release_region(dev->base_addr - ULTRA_NIC_OFFSET, ULTRA_IO_EXTENT); - iounmap(ei_status.mem); -} - -void __exit -cleanup_module(void) -{ - int this_dev; - - for (this_dev = 0; this_dev < MAX_ULTRA_CARDS; this_dev++) { - struct net_device *dev = dev_ultra[this_dev]; - if (dev) { - unregister_netdev(dev); - cleanup_card(dev); - free_netdev(dev); - } - } -} -#endif /* MODULE */ diff --git a/drivers/net/smc-ultra32.c b/drivers/net/smc-ultra32.c deleted file mode 100644 index e459c3b..0000000 --- a/drivers/net/smc-ultra32.c +++ /dev/null @@ -1,464 +0,0 @@ -/* smc-ultra32.c: An SMC Ultra32 EISA ethernet driver for linux. - -Sources: - - This driver is based on (cloned from) the ISA SMC Ultra driver - written by Donald Becker. Modifications to support the EISA - version of the card by Paul Gortmaker and Leonard N. Zubkoff. - - This software may be used and distributed according to the terms - of the GNU General Public License, incorporated herein by reference. - -Theory of Operation: - - The SMC Ultra32C card uses the SMC 83c790 chip which is also - found on the ISA SMC Ultra cards. It has a shared memory mode of - operation that makes it similar to the ISA version of the card. - The main difference is that the EISA card has 32KB of RAM, but - only an 8KB window into that memory. The EISA card also can be - set for a bus-mastering mode of operation via the ECU, but that - is not (and probably will never be) supported by this driver. - The ECU should be run to enable shared memory and to disable the - bus-mastering feature for use with linux. - - By programming the 8390 to use only 8KB RAM, the modifications - to the ISA driver can be limited to the probe and initialization - code. This allows easy integration of EISA support into the ISA - driver. However, the driver development kit from SMC provided the - register information for sliding the 8KB window, and hence the 8390 - is programmed to use the full 32KB RAM. - - Unfortunately this required code changes outside the probe/init - routines, and thus we decided to separate the EISA driver from - the ISA one. In this way, ISA users don't end up with a larger - driver due to the EISA code, and EISA users don't end up with a - larger driver due to the ISA EtherEZ PIO code. The driver is - similar to the 3c503/16 driver, in that the window must be set - back to the 1st 8KB of space for access to the two 8390 Tx slots. - - In testing, using only 8KB RAM (3 Tx / 5 Rx) didn't appear to - be a limiting factor, since the EISA bus could get packets off - the card fast enough, but having the use of lots of RAM as Rx - space is extra insurance if interrupt latencies become excessive. - -*/ - -static const char *version = "smc-ultra32.c: 06/97 v1.00\n"; - - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "8390.h" - -#define DRV_NAME "smc-ultra32" - -static int ultra32_probe1(struct net_device *dev, int ioaddr); -static int ultra32_open(struct net_device *dev); -static void ultra32_reset_8390(struct net_device *dev); -static void ultra32_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, - int ring_page); -static void ultra32_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset); -static void ultra32_block_output(struct net_device *dev, int count, - const unsigned char *buf, - const int start_page); -static int ultra32_close(struct net_device *dev); - -#define ULTRA32_CMDREG 0 /* Offset to ASIC command register. */ -#define ULTRA32_RESET 0x80 /* Board reset, in ULTRA32_CMDREG. */ -#define ULTRA32_MEMENB 0x40 /* Enable the shared memory. */ -#define ULTRA32_NIC_OFFSET 16 /* NIC register offset from the base_addr. */ -#define ULTRA32_IO_EXTENT 32 -#define EN0_ERWCNT 0x08 /* Early receive warning count. */ - -/* - * Defines that apply only to the Ultra32 EISA card. Note that - * "smc" = 10011 01101 00011 = 0x4da3, and hence !smc8010.cfg translates - * into an EISA ID of 0x1080A34D - */ -#define ULTRA32_BASE 0xca0 -#define ULTRA32_ID 0x1080a34d -#define ULTRA32_IDPORT (-0x20) /* 0xc80 */ -/* Config regs 1->7 from the EISA !SMC8010.CFG file. */ -#define ULTRA32_CFG1 0x04 /* 0xca4 */ -#define ULTRA32_CFG2 0x05 /* 0xca5 */ -#define ULTRA32_CFG3 (-0x18) /* 0xc88 */ -#define ULTRA32_CFG4 (-0x17) /* 0xc89 */ -#define ULTRA32_CFG5 (-0x16) /* 0xc8a */ -#define ULTRA32_CFG6 (-0x15) /* 0xc8b */ -#define ULTRA32_CFG7 0x0d /* 0xcad */ - -static void cleanup_card(struct net_device *dev) -{ - int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; - /* NB: ultra32_close_card() does free_irq */ - release_region(ioaddr, ULTRA32_IO_EXTENT); - iounmap(ei_status.mem); -} - -/* Probe for the Ultra32. This looks like a 8013 with the station - address PROM at I/O ports +8 to +13, with a checksum - following. -*/ - -struct net_device * __init ultra32_probe(int unit) -{ - struct net_device *dev; - int base; - int irq; - int err = -ENODEV; - - if (!EISA_bus) - return ERR_PTR(-ENODEV); - - dev = alloc_ei_netdev(); - - if (!dev) - return ERR_PTR(-ENOMEM); - - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - } - - irq = dev->irq; - - /* EISA spec allows for up to 16 slots, but 8 is typical. */ - for (base = 0x1000 + ULTRA32_BASE; base < 0x9000; base += 0x1000) { - if (ultra32_probe1(dev, base) == 0) - break; - dev->irq = irq; - } - if (base >= 0x9000) - goto out; - err = register_netdev(dev); - if (err) - goto out1; - return dev; -out1: - cleanup_card(dev); -out: - free_netdev(dev); - return ERR_PTR(err); -} - - -static const struct net_device_ops ultra32_netdev_ops = { - .ndo_open = ultra32_open, - .ndo_stop = ultra32_close, - .ndo_start_xmit = ei_start_xmit, - .ndo_tx_timeout = ei_tx_timeout, - .ndo_get_stats = ei_get_stats, - .ndo_set_multicast_list = ei_set_multicast_list, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, - .ndo_change_mtu = eth_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ei_poll, -#endif -}; - -static int __init ultra32_probe1(struct net_device *dev, int ioaddr) -{ - int i, edge, media, retval; - int checksum = 0; - const char *model_name; - static unsigned version_printed; - /* Values from various config regs. */ - unsigned char idreg; - unsigned char reg4; - const char *ifmap[] = {"UTP No Link", "", "UTP/AUI", "UTP/BNC"}; - - if (!request_region(ioaddr, ULTRA32_IO_EXTENT, DRV_NAME)) - return -EBUSY; - - if (inb(ioaddr + ULTRA32_IDPORT) == 0xff || - inl(ioaddr + ULTRA32_IDPORT) != ULTRA32_ID) { - retval = -ENODEV; - goto out; - } - - media = inb(ioaddr + ULTRA32_CFG7) & 0x03; - edge = inb(ioaddr + ULTRA32_CFG5) & 0x08; - printk("SMC Ultra32 in EISA Slot %d, Media: %s, %s IRQs.\n", - ioaddr >> 12, ifmap[media], - (edge ? "Edge Triggered" : "Level Sensitive")); - - idreg = inb(ioaddr + 7); - reg4 = inb(ioaddr + 4) & 0x7f; - - /* Check the ID nibble. */ - if ((idreg & 0xf0) != 0x20) { /* SMC Ultra */ - retval = -ENODEV; - goto out; - } - - /* Select the station address register set. */ - outb(reg4, ioaddr + 4); - - for (i = 0; i < 8; i++) - checksum += inb(ioaddr + 8 + i); - if ((checksum & 0xff) != 0xff) { - retval = -ENODEV; - goto out; - } - - if (ei_debug && version_printed++ == 0) - printk(version); - - model_name = "SMC Ultra32"; - - for (i = 0; i < 6; i++) - dev->dev_addr[i] = inb(ioaddr + 8 + i); - - printk("%s: %s at 0x%X, %pM", - dev->name, model_name, ioaddr, dev->dev_addr); - - /* Switch from the station address to the alternate register set and - read the useful registers there. */ - outb(0x80 | reg4, ioaddr + 4); - - /* Enable FINE16 mode to avoid BIOS ROM width mismatches @ reboot. */ - outb(0x80 | inb(ioaddr + 0x0c), ioaddr + 0x0c); - - /* Reset RAM addr. */ - outb(0x00, ioaddr + 0x0b); - - /* Switch back to the station address register set so that the - MS-DOS driver can find the card after a warm boot. */ - outb(reg4, ioaddr + 4); - - if ((inb(ioaddr + ULTRA32_CFG5) & 0x40) == 0) { - printk("\nsmc-ultra32: Card RAM is disabled! " - "Run EISA config utility.\n"); - retval = -ENODEV; - goto out; - } - if ((inb(ioaddr + ULTRA32_CFG2) & 0x04) == 0) - printk("\nsmc-ultra32: Ignoring Bus-Master enable bit. " - "Run EISA config utility.\n"); - - if (dev->irq < 2) { - unsigned char irqmap[] = {0, 9, 3, 5, 7, 10, 11, 15}; - int irq = irqmap[inb(ioaddr + ULTRA32_CFG5) & 0x07]; - if (irq == 0) { - printk(", failed to detect IRQ line.\n"); - retval = -EAGAIN; - goto out; - } - dev->irq = irq; - } - - /* The 8390 isn't at the base address, so fake the offset */ - dev->base_addr = ioaddr + ULTRA32_NIC_OFFSET; - - /* Save RAM address in the unused reg0 to avoid excess inb's. */ - ei_status.reg0 = inb(ioaddr + ULTRA32_CFG3) & 0xfc; - - dev->mem_start = 0xc0000 + ((ei_status.reg0 & 0x7c) << 11); - - ei_status.name = model_name; - ei_status.word16 = 1; - ei_status.tx_start_page = 0; - ei_status.rx_start_page = TX_PAGES; - /* All Ultra32 cards have 32KB memory with an 8KB window. */ - ei_status.stop_page = 128; - - ei_status.mem = ioremap(dev->mem_start, 0x2000); - if (!ei_status.mem) { - printk(", failed to ioremap.\n"); - retval = -ENOMEM; - goto out; - } - dev->mem_end = dev->mem_start + 0x1fff; - - printk(", IRQ %d, 32KB memory, 8KB window at 0x%lx-0x%lx.\n", - dev->irq, dev->mem_start, dev->mem_end); - ei_status.block_input = &ultra32_block_input; - ei_status.block_output = &ultra32_block_output; - ei_status.get_8390_hdr = &ultra32_get_8390_hdr; - ei_status.reset_8390 = &ultra32_reset_8390; - - dev->netdev_ops = &ultra32_netdev_ops; - NS8390_init(dev, 0); - - return 0; -out: - release_region(ioaddr, ULTRA32_IO_EXTENT); - return retval; -} - -static int ultra32_open(struct net_device *dev) -{ - int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* ASIC addr */ - int irq_flags = (inb(ioaddr + ULTRA32_CFG5) & 0x08) ? 0 : IRQF_SHARED; - int retval; - - retval = request_irq(dev->irq, ei_interrupt, irq_flags, dev->name, dev); - if (retval) - return retval; - - outb(ULTRA32_MEMENB, ioaddr); /* Enable Shared Memory. */ - outb(0x80, ioaddr + ULTRA32_CFG6); /* Enable Interrupts. */ - outb(0x84, ioaddr + 5); /* Enable MEM16 & Disable Bus Master. */ - outb(0x01, ioaddr + 6); /* Enable Interrupts. */ - /* Set the early receive warning level in window 0 high enough not - to receive ERW interrupts. */ - outb_p(E8390_NODMA+E8390_PAGE0, dev->base_addr); - outb(0xff, dev->base_addr + EN0_ERWCNT); - ei_open(dev); - return 0; -} - -static int ultra32_close(struct net_device *dev) -{ - int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* CMDREG */ - - netif_stop_queue(dev); - - if (ei_debug > 1) - printk("%s: Shutting down ethercard.\n", dev->name); - - outb(0x00, ioaddr + ULTRA32_CFG6); /* Disable Interrupts. */ - outb(0x00, ioaddr + 6); /* Disable interrupts. */ - free_irq(dev->irq, dev); - - NS8390_init(dev, 0); - - return 0; -} - -static void ultra32_reset_8390(struct net_device *dev) -{ - int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* ASIC base addr */ - - outb(ULTRA32_RESET, ioaddr); - if (ei_debug > 1) printk("resetting Ultra32, t=%ld...", jiffies); - ei_status.txing = 0; - - outb(ULTRA32_MEMENB, ioaddr); /* Enable Shared Memory. */ - outb(0x80, ioaddr + ULTRA32_CFG6); /* Enable Interrupts. */ - outb(0x84, ioaddr + 5); /* Enable MEM16 & Disable Bus Master. */ - outb(0x01, ioaddr + 6); /* Enable Interrupts. */ - if (ei_debug > 1) printk("reset done\n"); -} - -/* Grab the 8390 specific header. Similar to the block_input routine, but - we don't need to be concerned with ring wrap as the header will be at - the start of a page, so we optimize accordingly. */ - -static void ultra32_get_8390_hdr(struct net_device *dev, - struct e8390_pkt_hdr *hdr, - int ring_page) -{ - void __iomem *hdr_start = ei_status.mem + ((ring_page & 0x1f) << 8); - unsigned int RamReg = dev->base_addr - ULTRA32_NIC_OFFSET + ULTRA32_CFG3; - - /* Select correct 8KB Window. */ - outb(ei_status.reg0 | ((ring_page & 0x60) >> 5), RamReg); - -#ifdef __BIG_ENDIAN - /* Officially this is what we are doing, but the readl() is faster */ - /* unfortunately it isn't endian aware of the struct */ - memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); - hdr->count = le16_to_cpu(hdr->count); -#else - ((unsigned int*)hdr)[0] = readl(hdr_start); -#endif -} - -/* Block input and output are easy on shared memory ethercards, the only - complication is when the ring buffer wraps, or in this case, when a - packet spans an 8KB boundary. Note that the current 8KB segment is - already set by the get_8390_hdr routine. */ - -static void ultra32_block_input(struct net_device *dev, - int count, - struct sk_buff *skb, - int ring_offset) -{ - void __iomem *xfer_start = ei_status.mem + (ring_offset & 0x1fff); - unsigned int RamReg = dev->base_addr - ULTRA32_NIC_OFFSET + ULTRA32_CFG3; - - if ((ring_offset & ~0x1fff) != ((ring_offset + count - 1) & ~0x1fff)) { - int semi_count = 8192 - (ring_offset & 0x1FFF); - memcpy_fromio(skb->data, xfer_start, semi_count); - count -= semi_count; - if (ring_offset < 96*256) { - /* Select next 8KB Window. */ - ring_offset += semi_count; - outb(ei_status.reg0 | ((ring_offset & 0x6000) >> 13), RamReg); - memcpy_fromio(skb->data + semi_count, ei_status.mem, count); - } else { - /* Select first 8KB Window. */ - outb(ei_status.reg0, RamReg); - memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count); - } - } else { - memcpy_fromio(skb->data, xfer_start, count); - } -} - -static void ultra32_block_output(struct net_device *dev, - int count, - const unsigned char *buf, - int start_page) -{ - void __iomem *xfer_start = ei_status.mem + (start_page<<8); - unsigned int RamReg = dev->base_addr - ULTRA32_NIC_OFFSET + ULTRA32_CFG3; - - /* Select first 8KB Window. */ - outb(ei_status.reg0, RamReg); - - memcpy_toio(xfer_start, buf, count); -} - -#ifdef MODULE -#define MAX_ULTRA32_CARDS 4 /* Max number of Ultra cards per module */ -static struct net_device *dev_ultra[MAX_ULTRA32_CARDS]; - -MODULE_DESCRIPTION("SMC Ultra32 EISA ethernet driver"); -MODULE_LICENSE("GPL"); - -int __init init_module(void) -{ - int this_dev, found = 0; - - for (this_dev = 0; this_dev < MAX_ULTRA32_CARDS; this_dev++) { - struct net_device *dev = ultra32_probe(-1); - if (IS_ERR(dev)) - break; - dev_ultra[found++] = dev; - } - if (found) - return 0; - printk(KERN_WARNING "smc-ultra32.c: No SMC Ultra32 found.\n"); - return -ENXIO; -} - -void __exit cleanup_module(void) -{ - int this_dev; - - for (this_dev = 0; this_dev < MAX_ULTRA32_CARDS; this_dev++) { - struct net_device *dev = dev_ultra[this_dev]; - if (dev) { - unregister_netdev(dev); - cleanup_card(dev); - free_netdev(dev); - } - } -} -#endif /* MODULE */ - diff --git a/drivers/net/stnic.c b/drivers/net/stnic.c deleted file mode 100644 index d85f0a8..0000000 --- a/drivers/net/stnic.c +++ /dev/null @@ -1,294 +0,0 @@ -/* stnic.c : A SH7750 specific part of driver for NS DP83902A ST-NIC. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1999 kaz Kojima - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#ifdef CONFIG_SH_STANDARD_BIOS -#include -#endif - -#include "8390.h" - -#define DRV_NAME "stnic" - -#define byte unsigned char -#define half unsigned short -#define word unsigned int -#define vbyte volatile unsigned char -#define vhalf volatile unsigned short -#define vword volatile unsigned int - -#define STNIC_RUN 0x01 /* 1 == Run, 0 == reset. */ - -#define START_PG 0 /* First page of TX buffer */ -#define STOP_PG 128 /* Last page +1 of RX ring */ - -/* Alias */ -#define STNIC_CR E8390_CMD -#define PG0_RSAR0 EN0_RSARLO -#define PG0_RSAR1 EN0_RSARHI -#define PG0_RBCR0 EN0_RCNTLO -#define PG0_RBCR1 EN0_RCNTHI - -#define CR_RRD E8390_RREAD -#define CR_RWR E8390_RWRITE -#define CR_PG0 E8390_PAGE0 -#define CR_STA E8390_START -#define CR_RDMA E8390_NODMA - -/* FIXME! YOU MUST SET YOUR OWN ETHER ADDRESS. */ -static byte stnic_eadr[6] = -{0x00, 0xc0, 0x6e, 0x00, 0x00, 0x07}; - -static struct net_device *stnic_dev; - -static void stnic_reset (struct net_device *dev); -static void stnic_get_hdr (struct net_device *dev, struct e8390_pkt_hdr *hdr, - int ring_page); -static void stnic_block_input (struct net_device *dev, int count, - struct sk_buff *skb , int ring_offset); -static void stnic_block_output (struct net_device *dev, int count, - const unsigned char *buf, int start_page); - -static void stnic_init (struct net_device *dev); - -/* SH7750 specific read/write io. */ -static inline void -STNIC_DELAY (void) -{ - vword trash; - trash = *(vword *) 0xa0000000; - trash = *(vword *) 0xa0000000; - trash = *(vword *) 0xa0000000; -} - -static inline byte -STNIC_READ (int reg) -{ - byte val; - - val = (*(vhalf *) (PA_83902 + ((reg) << 1)) >> 8) & 0xff; - STNIC_DELAY (); - return val; -} - -static inline void -STNIC_WRITE (int reg, byte val) -{ - *(vhalf *) (PA_83902 + ((reg) << 1)) = ((half) (val) << 8); - STNIC_DELAY (); -} - -static int __init stnic_probe(void) -{ - struct net_device *dev; - int i, err; - - /* If we are not running on a SolutionEngine, give up now */ - if (! MACH_SE) - return -ENODEV; - - /* New style probing API */ - dev = alloc_ei_netdev(); - if (!dev) - return -ENOMEM; - -#ifdef CONFIG_SH_STANDARD_BIOS - sh_bios_get_node_addr (stnic_eadr); -#endif - for (i = 0; i < ETHER_ADDR_LEN; i++) - dev->dev_addr[i] = stnic_eadr[i]; - - /* Set the base address to point to the NIC, not the "real" base! */ - dev->base_addr = 0x1000; - dev->irq = IRQ_STNIC; - dev->netdev_ops = &ei_netdev_ops; - - /* Snarf the interrupt now. There's no point in waiting since we cannot - share and the board will usually be enabled. */ - err = request_irq (dev->irq, ei_interrupt, 0, DRV_NAME, dev); - if (err) { - printk (KERN_EMERG " unable to get IRQ %d.\n", dev->irq); - free_netdev(dev); - return err; - } - - ei_status.name = dev->name; - ei_status.word16 = 1; -#ifdef __LITTLE_ENDIAN__ - ei_status.bigendian = 0; -#else - ei_status.bigendian = 1; -#endif - ei_status.tx_start_page = START_PG; - ei_status.rx_start_page = START_PG + TX_PAGES; - ei_status.stop_page = STOP_PG; - - ei_status.reset_8390 = &stnic_reset; - ei_status.get_8390_hdr = &stnic_get_hdr; - ei_status.block_input = &stnic_block_input; - ei_status.block_output = &stnic_block_output; - - stnic_init (dev); - - err = register_netdev(dev); - if (err) { - free_irq(dev->irq, dev); - free_netdev(dev); - return err; - } - stnic_dev = dev; - - printk (KERN_INFO "NS ST-NIC 83902A\n"); - - return 0; -} - -static void -stnic_reset (struct net_device *dev) -{ - *(vhalf *) PA_83902_RST = 0; - udelay (5); - if (ei_debug > 1) - printk (KERN_WARNING "8390 reset done (%ld).\n", jiffies); - *(vhalf *) PA_83902_RST = ~0; - udelay (5); -} - -static void -stnic_get_hdr (struct net_device *dev, struct e8390_pkt_hdr *hdr, - int ring_page) -{ - half buf[2]; - - STNIC_WRITE (PG0_RSAR0, 0); - STNIC_WRITE (PG0_RSAR1, ring_page); - STNIC_WRITE (PG0_RBCR0, 4); - STNIC_WRITE (PG0_RBCR1, 0); - STNIC_WRITE (STNIC_CR, CR_RRD | CR_PG0 | CR_STA); - - buf[0] = *(vhalf *) PA_83902_IF; - STNIC_DELAY (); - buf[1] = *(vhalf *) PA_83902_IF; - STNIC_DELAY (); - hdr->next = buf[0] >> 8; - hdr->status = buf[0] & 0xff; -#ifdef __LITTLE_ENDIAN__ - hdr->count = buf[1]; -#else - hdr->count = ((buf[1] >> 8) & 0xff) | (buf[1] << 8); -#endif - - if (ei_debug > 1) - printk (KERN_DEBUG "ring %x status %02x next %02x count %04x.\n", - ring_page, hdr->status, hdr->next, hdr->count); - - STNIC_WRITE (STNIC_CR, CR_RDMA | CR_PG0 | CR_STA); -} - -/* Block input and output, similar to the Crynwr packet driver. If you are - porting to a new ethercard look at the packet driver source for hints. - The HP LAN doesn't use shared memory -- we put the packet - out through the "remote DMA" dataport. */ - -static void -stnic_block_input (struct net_device *dev, int length, struct sk_buff *skb, - int offset) -{ - char *buf = skb->data; - half val; - - STNIC_WRITE (PG0_RSAR0, offset & 0xff); - STNIC_WRITE (PG0_RSAR1, offset >> 8); - STNIC_WRITE (PG0_RBCR0, length & 0xff); - STNIC_WRITE (PG0_RBCR1, length >> 8); - STNIC_WRITE (STNIC_CR, CR_RRD | CR_PG0 | CR_STA); - - if (length & 1) - length++; - - while (length > 0) - { - val = *(vhalf *) PA_83902_IF; -#ifdef __LITTLE_ENDIAN__ - *buf++ = val & 0xff; - *buf++ = val >> 8; -#else - *buf++ = val >> 8; - *buf++ = val & 0xff; -#endif - STNIC_DELAY (); - length -= sizeof (half); - } - - STNIC_WRITE (STNIC_CR, CR_RDMA | CR_PG0 | CR_STA); -} - -static void -stnic_block_output (struct net_device *dev, int length, - const unsigned char *buf, int output_page) -{ - STNIC_WRITE (PG0_RBCR0, 1); /* Write non-zero value */ - STNIC_WRITE (STNIC_CR, CR_RRD | CR_PG0 | CR_STA); - STNIC_DELAY (); - - STNIC_WRITE (PG0_RBCR0, length & 0xff); - STNIC_WRITE (PG0_RBCR1, length >> 8); - STNIC_WRITE (PG0_RSAR0, 0); - STNIC_WRITE (PG0_RSAR1, output_page); - STNIC_WRITE (STNIC_CR, CR_RWR | CR_PG0 | CR_STA); - - if (length & 1) - length++; - - while (length > 0) - { -#ifdef __LITTLE_ENDIAN__ - *(vhalf *) PA_83902_IF = ((half) buf[1] << 8) | buf[0]; -#else - *(vhalf *) PA_83902_IF = ((half) buf[0] << 8) | buf[1]; -#endif - STNIC_DELAY (); - buf += sizeof (half); - length -= sizeof (half); - } - - STNIC_WRITE (STNIC_CR, CR_RDMA | CR_PG0 | CR_STA); -} - -/* This function resets the STNIC if something screws up. */ -static void -stnic_init (struct net_device *dev) -{ - stnic_reset (dev); - NS8390_init (dev, 0); -} - -static void __exit stnic_cleanup(void) -{ - unregister_netdev(stnic_dev); - free_irq(stnic_dev->irq, stnic_dev); - free_netdev(stnic_dev); -} - -module_init(stnic_probe); -module_exit(stnic_cleanup); -MODULE_LICENSE("GPL"); diff --git a/drivers/net/wd.c b/drivers/net/wd.c deleted file mode 100644 index 8831a33..0000000 --- a/drivers/net/wd.c +++ /dev/null @@ -1,567 +0,0 @@ -/* wd.c: A WD80x3 ethernet driver for linux. */ -/* - Written 1993-94 by Donald Becker. - - Copyright 1993 United States Government as represented by the - Director, National Security Agency. - - This software may be used and distributed according to the terms - of the GNU General Public License, incorporated herein by reference. - - The author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation - 410 Severn Ave., Suite 210 - Annapolis MD 21403 - - This is a driver for WD8003 and WD8013 "compatible" ethercards. - - Thanks to Russ Nelson (nelson@crnwyr.com) for loaning me a WD8013. - - Changelog: - - Paul Gortmaker : multiple card support for module users, support - for non-standard memory sizes. - - -*/ - -static const char version[] = - "wd.c:v1.10 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "8390.h" - -#define DRV_NAME "wd" - -/* A zero-terminated list of I/O addresses to be probed. */ -static unsigned int wd_portlist[] __initdata = -{0x300, 0x280, 0x380, 0x240, 0}; - -static int wd_probe1(struct net_device *dev, int ioaddr); - -static int wd_open(struct net_device *dev); -static void wd_reset_8390(struct net_device *dev); -static void wd_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, - int ring_page); -static void wd_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset); -static void wd_block_output(struct net_device *dev, int count, - const unsigned char *buf, int start_page); -static int wd_close(struct net_device *dev); - - -#define WD_START_PG 0x00 /* First page of TX buffer */ -#define WD03_STOP_PG 0x20 /* Last page +1 of RX ring */ -#define WD13_STOP_PG 0x40 /* Last page +1 of RX ring */ - -#define WD_CMDREG 0 /* Offset to ASIC command register. */ -#define WD_RESET 0x80 /* Board reset, in WD_CMDREG. */ -#define WD_MEMENB 0x40 /* Enable the shared memory. */ -#define WD_CMDREG5 5 /* Offset to 16-bit-only ASIC register 5. */ -#define ISA16 0x80 /* Enable 16 bit access from the ISA bus. */ -#define NIC16 0x40 /* Enable 16 bit access from the 8390. */ -#define WD_NIC_OFFSET 16 /* Offset to the 8390 from the base_addr. */ -#define WD_IO_EXTENT 32 - - -/* Probe for the WD8003 and WD8013. These cards have the station - address PROM at I/O ports +8 to +13, with a checksum - following. A Soundblaster can have the same checksum as an WDethercard, - so we have an extra exclusionary check for it. - - The wd_probe1() routine initializes the card and fills the - station address field. */ - -static int __init do_wd_probe(struct net_device *dev) -{ - int i; - struct resource *r; - int base_addr = dev->base_addr; - int irq = dev->irq; - int mem_start = dev->mem_start; - int mem_end = dev->mem_end; - - if (base_addr > 0x1ff) { /* Check a user specified location. */ - r = request_region(base_addr, WD_IO_EXTENT, "wd-probe"); - if ( r == NULL) - return -EBUSY; - i = wd_probe1(dev, base_addr); - if (i != 0) - release_region(base_addr, WD_IO_EXTENT); - else - r->name = dev->name; - return i; - } - else if (base_addr != 0) /* Don't probe at all. */ - return -ENXIO; - - for (i = 0; wd_portlist[i]; i++) { - int ioaddr = wd_portlist[i]; - r = request_region(ioaddr, WD_IO_EXTENT, "wd-probe"); - if (r == NULL) - continue; - if (wd_probe1(dev, ioaddr) == 0) { - r->name = dev->name; - return 0; - } - release_region(ioaddr, WD_IO_EXTENT); - dev->irq = irq; - dev->mem_start = mem_start; - dev->mem_end = mem_end; - } - - return -ENODEV; -} - -#ifndef MODULE -struct net_device * __init wd_probe(int unit) -{ - struct net_device *dev = alloc_ei_netdev(); - int err; - - if (!dev) - return ERR_PTR(-ENOMEM); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - - err = do_wd_probe(dev); - if (err) - goto out; - return dev; -out: - free_netdev(dev); - return ERR_PTR(err); -} -#endif - -static const struct net_device_ops wd_netdev_ops = { - .ndo_open = wd_open, - .ndo_stop = wd_close, - .ndo_start_xmit = ei_start_xmit, - .ndo_tx_timeout = ei_tx_timeout, - .ndo_get_stats = ei_get_stats, - .ndo_set_multicast_list = ei_set_multicast_list, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, - .ndo_change_mtu = eth_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ei_poll, -#endif -}; - -static int __init wd_probe1(struct net_device *dev, int ioaddr) -{ - int i; - int err; - int checksum = 0; - int ancient = 0; /* An old card without config registers. */ - int word16 = 0; /* 0 = 8 bit, 1 = 16 bit */ - const char *model_name; - static unsigned version_printed; - - for (i = 0; i < 8; i++) - checksum += inb(ioaddr + 8 + i); - if (inb(ioaddr + 8) == 0xff /* Extra check to avoid soundcard. */ - || inb(ioaddr + 9) == 0xff - || (checksum & 0xff) != 0xFF) - return -ENODEV; - - /* Check for semi-valid mem_start/end values if supplied. */ - if ((dev->mem_start % 0x2000) || (dev->mem_end % 0x2000)) { - printk(KERN_WARNING "wd.c: user supplied mem_start or mem_end not on 8kB boundary - ignored.\n"); - dev->mem_start = 0; - dev->mem_end = 0; - } - - if (ei_debug && version_printed++ == 0) - printk(version); - - for (i = 0; i < 6; i++) - dev->dev_addr[i] = inb(ioaddr + 8 + i); - - printk("%s: WD80x3 at %#3x, %pM", - dev->name, ioaddr, dev->dev_addr); - - /* The following PureData probe code was contributed by - Mike Jagdis . Puredata does software - configuration differently from others so we have to check for them. - This detects an 8 bit, 16 bit or dumb (Toshiba, jumpered) card. - */ - if (inb(ioaddr+0) == 'P' && inb(ioaddr+1) == 'D') { - unsigned char reg5 = inb(ioaddr+5); - - switch (inb(ioaddr+2)) { - case 0x03: word16 = 0; model_name = "PDI8023-8"; break; - case 0x05: word16 = 0; model_name = "PDUC8023"; break; - case 0x0a: word16 = 1; model_name = "PDI8023-16"; break; - /* Either 0x01 (dumb) or they've released a new version. */ - default: word16 = 0; model_name = "PDI8023"; break; - } - dev->mem_start = ((reg5 & 0x1c) + 0xc0) << 12; - dev->irq = (reg5 & 0xe0) == 0xe0 ? 10 : (reg5 >> 5) + 1; - } else { /* End of PureData probe */ - /* This method of checking for a 16-bit board is borrowed from the - we.c driver. A simpler method is just to look in ASIC reg. 0x03. - I'm comparing the two method in alpha test to make certain they - return the same result. */ - /* Check for the old 8 bit board - it has register 0/8 aliasing. - Do NOT check i>=6 here -- it hangs the old 8003 boards! */ - for (i = 0; i < 6; i++) - if (inb(ioaddr+i) != inb(ioaddr+8+i)) - break; - if (i >= 6) { - ancient = 1; - model_name = "WD8003-old"; - word16 = 0; - } else { - int tmp = inb(ioaddr+1); /* fiddle with 16bit bit */ - outb( tmp ^ 0x01, ioaddr+1 ); /* attempt to clear 16bit bit */ - if (((inb( ioaddr+1) & 0x01) == 0x01) /* A 16 bit card */ - && (tmp & 0x01) == 0x01 ) { /* In a 16 slot. */ - int asic_reg5 = inb(ioaddr+WD_CMDREG5); - /* Magic to set ASIC to word-wide mode. */ - outb( NIC16 | (asic_reg5&0x1f), ioaddr+WD_CMDREG5); - outb(tmp, ioaddr+1); - model_name = "WD8013"; - word16 = 1; /* We have a 16bit board here! */ - } else { - model_name = "WD8003"; - word16 = 0; - } - outb(tmp, ioaddr+1); /* Restore original reg1 value. */ - } -#ifndef final_version - if ( !ancient && (inb(ioaddr+1) & 0x01) != (word16 & 0x01)) - printk("\nWD80?3: Bus width conflict, %d (probe) != %d (reg report).", - word16 ? 16 : 8, (inb(ioaddr+1) & 0x01) ? 16 : 8); -#endif - } - -#if defined(WD_SHMEM) && WD_SHMEM > 0x80000 - /* Allow a compile-time override. */ - dev->mem_start = WD_SHMEM; -#else - if (dev->mem_start == 0) { - /* Sanity and old 8003 check */ - int reg0 = inb(ioaddr); - if (reg0 == 0xff || reg0 == 0) { - /* Future plan: this could check a few likely locations first. */ - dev->mem_start = 0xd0000; - printk(" assigning address %#lx", dev->mem_start); - } else { - int high_addr_bits = inb(ioaddr+WD_CMDREG5) & 0x1f; - /* Some boards don't have the register 5 -- it returns 0xff. */ - if (high_addr_bits == 0x1f || word16 == 0) - high_addr_bits = 0x01; - dev->mem_start = ((reg0&0x3f) << 13) + (high_addr_bits << 19); - } - } -#endif - - /* The 8390 isn't at the base address -- the ASIC regs are there! */ - dev->base_addr = ioaddr+WD_NIC_OFFSET; - - if (dev->irq < 2) { - static const int irqmap[] = {9, 3, 5, 7, 10, 11, 15, 4}; - int reg1 = inb(ioaddr+1); - int reg4 = inb(ioaddr+4); - if (ancient || reg1 == 0xff) { /* Ack!! No way to read the IRQ! */ - short nic_addr = ioaddr+WD_NIC_OFFSET; - unsigned long irq_mask; - - /* We have an old-style ethercard that doesn't report its IRQ - line. Do autoirq to find the IRQ line. Note that this IS NOT - a reliable way to trigger an interrupt. */ - outb_p(E8390_NODMA + E8390_STOP, nic_addr); - outb(0x00, nic_addr+EN0_IMR); /* Disable all intrs. */ - - irq_mask = probe_irq_on(); - outb_p(0xff, nic_addr + EN0_IMR); /* Enable all interrupts. */ - outb_p(0x00, nic_addr + EN0_RCNTLO); - outb_p(0x00, nic_addr + EN0_RCNTHI); - outb(E8390_RREAD+E8390_START, nic_addr); /* Trigger it... */ - mdelay(20); - dev->irq = probe_irq_off(irq_mask); - - outb_p(0x00, nic_addr+EN0_IMR); /* Mask all intrs. again. */ - - if (ei_debug > 2) - printk(" autoirq is %d", dev->irq); - if (dev->irq < 2) - dev->irq = word16 ? 10 : 5; - } else - dev->irq = irqmap[((reg4 >> 5) & 0x03) + (reg1 & 0x04)]; - } else if (dev->irq == 2) /* Fixup bogosity: IRQ2 is really IRQ9 */ - dev->irq = 9; - - /* Snarf the interrupt now. There's no point in waiting since we cannot - share and the board will usually be enabled. */ - i = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev); - if (i) { - printk (" unable to get IRQ %d.\n", dev->irq); - return i; - } - - /* OK, were are certain this is going to work. Setup the device. */ - ei_status.name = model_name; - ei_status.word16 = word16; - ei_status.tx_start_page = WD_START_PG; - ei_status.rx_start_page = WD_START_PG + TX_PAGES; - - /* Don't map in the shared memory until the board is actually opened. */ - - /* Some cards (eg WD8003EBT) can be jumpered for more (32k!) memory. */ - if (dev->mem_end != 0) { - ei_status.stop_page = (dev->mem_end - dev->mem_start)/256; - ei_status.priv = dev->mem_end - dev->mem_start; - } else { - ei_status.stop_page = word16 ? WD13_STOP_PG : WD03_STOP_PG; - dev->mem_end = dev->mem_start + (ei_status.stop_page - WD_START_PG)*256; - ei_status.priv = (ei_status.stop_page - WD_START_PG)*256; - } - - ei_status.mem = ioremap(dev->mem_start, ei_status.priv); - if (!ei_status.mem) { - free_irq(dev->irq, dev); - return -ENOMEM; - } - - printk(" %s, IRQ %d, shared memory at %#lx-%#lx.\n", - model_name, dev->irq, dev->mem_start, dev->mem_end-1); - - ei_status.reset_8390 = wd_reset_8390; - ei_status.block_input = wd_block_input; - ei_status.block_output = wd_block_output; - ei_status.get_8390_hdr = wd_get_8390_hdr; - - dev->netdev_ops = &wd_netdev_ops; - NS8390_init(dev, 0); - -#if 1 - /* Enable interrupt generation on softconfig cards -- M.U */ - /* .. but possibly potentially unsafe - Donald */ - if (inb(ioaddr+14) & 0x20) - outb(inb(ioaddr+4)|0x80, ioaddr+4); -#endif - - err = register_netdev(dev); - if (err) { - free_irq(dev->irq, dev); - iounmap(ei_status.mem); - } - return err; -} - -static int -wd_open(struct net_device *dev) -{ - int ioaddr = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */ - - /* Map in the shared memory. Always set register 0 last to remain - compatible with very old boards. */ - ei_status.reg0 = ((dev->mem_start>>13) & 0x3f) | WD_MEMENB; - ei_status.reg5 = ((dev->mem_start>>19) & 0x1f) | NIC16; - - if (ei_status.word16) - outb(ei_status.reg5, ioaddr+WD_CMDREG5); - outb(ei_status.reg0, ioaddr); /* WD_CMDREG */ - - return ei_open(dev); -} - -static void -wd_reset_8390(struct net_device *dev) -{ - int wd_cmd_port = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */ - - outb(WD_RESET, wd_cmd_port); - if (ei_debug > 1) printk("resetting the WD80x3 t=%lu...", jiffies); - ei_status.txing = 0; - - /* Set up the ASIC registers, just in case something changed them. */ - outb((((dev->mem_start>>13) & 0x3f)|WD_MEMENB), wd_cmd_port); - if (ei_status.word16) - outb(NIC16 | ((dev->mem_start>>19) & 0x1f), wd_cmd_port+WD_CMDREG5); - - if (ei_debug > 1) printk("reset done\n"); -} - -/* Grab the 8390 specific header. Similar to the block_input routine, but - we don't need to be concerned with ring wrap as the header will be at - the start of a page, so we optimize accordingly. */ - -static void -wd_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) -{ - - int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */ - void __iomem *hdr_start = ei_status.mem + ((ring_page - WD_START_PG)<<8); - - /* We'll always get a 4 byte header read followed by a packet read, so - we enable 16 bit mode before the header, and disable after the body. */ - if (ei_status.word16) - outb(ISA16 | ei_status.reg5, wd_cmdreg+WD_CMDREG5); - -#ifdef __BIG_ENDIAN - /* Officially this is what we are doing, but the readl() is faster */ - /* unfortunately it isn't endian aware of the struct */ - memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); - hdr->count = le16_to_cpu(hdr->count); -#else - ((unsigned int*)hdr)[0] = readl(hdr_start); -#endif -} - -/* Block input and output are easy on shared memory ethercards, and trivial - on the Western digital card where there is no choice of how to do it. - The only complications are that the ring buffer wraps, and need to map - switch between 8- and 16-bit modes. */ - -static void -wd_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) -{ - int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */ - unsigned long offset = ring_offset - (WD_START_PG<<8); - void __iomem *xfer_start = ei_status.mem + offset; - - if (offset + count > ei_status.priv) { - /* We must wrap the input move. */ - int semi_count = ei_status.priv - offset; - memcpy_fromio(skb->data, xfer_start, semi_count); - count -= semi_count; - memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count); - } else { - /* Packet is in one chunk -- we can copy + cksum. */ - memcpy_fromio(skb->data, xfer_start, count); - } - - /* Turn off 16 bit access so that reboot works. ISA brain-damage */ - if (ei_status.word16) - outb(ei_status.reg5, wd_cmdreg+WD_CMDREG5); -} - -static void -wd_block_output(struct net_device *dev, int count, const unsigned char *buf, - int start_page) -{ - int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */ - void __iomem *shmem = ei_status.mem + ((start_page - WD_START_PG)<<8); - - - if (ei_status.word16) { - /* Turn on and off 16 bit access so that reboot works. */ - outb(ISA16 | ei_status.reg5, wd_cmdreg+WD_CMDREG5); - memcpy_toio(shmem, buf, count); - outb(ei_status.reg5, wd_cmdreg+WD_CMDREG5); - } else - memcpy_toio(shmem, buf, count); -} - - -static int -wd_close(struct net_device *dev) -{ - int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */ - - if (ei_debug > 1) - printk("%s: Shutting down ethercard.\n", dev->name); - ei_close(dev); - - /* Change from 16-bit to 8-bit shared memory so reboot works. */ - if (ei_status.word16) - outb(ei_status.reg5, wd_cmdreg + WD_CMDREG5 ); - - /* And disable the shared memory. */ - outb(ei_status.reg0 & ~WD_MEMENB, wd_cmdreg); - - return 0; -} - - -#ifdef MODULE -#define MAX_WD_CARDS 4 /* Max number of wd cards per module */ -static struct net_device *dev_wd[MAX_WD_CARDS]; -static int io[MAX_WD_CARDS]; -static int irq[MAX_WD_CARDS]; -static int mem[MAX_WD_CARDS]; -static int mem_end[MAX_WD_CARDS]; /* for non std. mem size */ - -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -module_param_array(mem, int, NULL, 0); -module_param_array(mem_end, int, NULL, 0); -MODULE_PARM_DESC(io, "I/O base address(es)"); -MODULE_PARM_DESC(irq, "IRQ number(s) (ignored for PureData boards)"); -MODULE_PARM_DESC(mem, "memory base address(es)(ignored for PureData boards)"); -MODULE_PARM_DESC(mem_end, "memory end address(es)"); -MODULE_DESCRIPTION("ISA Western Digital wd8003/wd8013 ; SMC Elite, Elite16 ethernet driver"); -MODULE_LICENSE("GPL"); - -/* This is set up so that only a single autoprobe takes place per call. -ISA device autoprobes on a running machine are not recommended. */ - -int __init init_module(void) -{ - struct net_device *dev; - int this_dev, found = 0; - - for (this_dev = 0; this_dev < MAX_WD_CARDS; this_dev++) { - if (io[this_dev] == 0) { - if (this_dev != 0) break; /* only autoprobe 1st one */ - printk(KERN_NOTICE "wd.c: Presently autoprobing (not recommended) for a single card.\n"); - } - dev = alloc_ei_netdev(); - if (!dev) - break; - dev->irq = irq[this_dev]; - dev->base_addr = io[this_dev]; - dev->mem_start = mem[this_dev]; - dev->mem_end = mem_end[this_dev]; - if (do_wd_probe(dev) == 0) { - dev_wd[found++] = dev; - continue; - } - free_netdev(dev); - printk(KERN_WARNING "wd.c: No wd80x3 card found (i/o = 0x%x).\n", io[this_dev]); - break; - } - if (found) - return 0; - return -ENXIO; -} - -static void cleanup_card(struct net_device *dev) -{ - free_irq(dev->irq, dev); - release_region(dev->base_addr - WD_NIC_OFFSET, WD_IO_EXTENT); - iounmap(ei_status.mem); -} - -void __exit -cleanup_module(void) -{ - int this_dev; - - for (this_dev = 0; this_dev < MAX_WD_CARDS; this_dev++) { - struct net_device *dev = dev_wd[this_dev]; - if (dev) { - unregister_netdev(dev); - cleanup_card(dev); - free_netdev(dev); - } - } -} -#endif /* MODULE */ diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c deleted file mode 100644 index 15e7751a..0000000 --- a/drivers/net/zorro8390.c +++ /dev/null @@ -1,452 +0,0 @@ -/* - * Amiga Linux/m68k and Linux/PPC Zorro NS8390 Ethernet Driver - * - * (C) Copyright 1998-2000 by some Elitist 680x0 Users(TM) - * - * --------------------------------------------------------------------------- - * - * This program is based on all the other NE2000 drivers for Linux - * - * --------------------------------------------------------------------------- - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file COPYING in the main directory of the Linux - * distribution for more details. - * - * --------------------------------------------------------------------------- - * - * The Ariadne II and X-Surf are Zorro-II boards containing Realtek RTL8019AS - * Ethernet Controllers. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#define EI_SHIFT(x) (ei_local->reg_offset[x]) -#define ei_inb(port) in_8(port) -#define ei_outb(val, port) out_8(port, val) -#define ei_inb_p(port) in_8(port) -#define ei_outb_p(val, port) out_8(port, val) - -static const char version[] = - "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; - -#include "lib8390.c" - -#define DRV_NAME "zorro8390" - -#define NE_BASE (dev->base_addr) -#define NE_CMD (0x00 * 2) -#define NE_DATAPORT (0x10 * 2) /* NatSemi-defined port window offset */ -#define NE_RESET (0x1f * 2) /* Issue a read to reset, - * a write to clear. */ -#define NE_IO_EXTENT (0x20 * 2) - -#define NE_EN0_ISR (0x07 * 2) -#define NE_EN0_DCFG (0x0e * 2) - -#define NE_EN0_RSARLO (0x08 * 2) -#define NE_EN0_RSARHI (0x09 * 2) -#define NE_EN0_RCNTLO (0x0a * 2) -#define NE_EN0_RXCR (0x0c * 2) -#define NE_EN0_TXCR (0x0d * 2) -#define NE_EN0_RCNTHI (0x0b * 2) -#define NE_EN0_IMR (0x0f * 2) - -#define NESM_START_PG 0x40 /* First page of TX buffer */ -#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ - -#define WORDSWAP(a) ((((a) >> 8) & 0xff) | ((a) << 8)) - -static struct card_info { - zorro_id id; - const char *name; - unsigned int offset; -} cards[] __devinitdata = { - { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE2, "Ariadne II", 0x0600 }, - { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, "X-Surf", 0x8600 }, -}; - -/* Hard reset the card. This used to pause for the same period that a - * 8390 reset command required, but that shouldn't be necessary. - */ -static void zorro8390_reset_8390(struct net_device *dev) -{ - unsigned long reset_start_time = jiffies; - - if (ei_debug > 1) - netdev_dbg(dev, "resetting - t=%ld...\n", jiffies); - - z_writeb(z_readb(NE_BASE + NE_RESET), NE_BASE + NE_RESET); - - ei_status.txing = 0; - ei_status.dmaing = 0; - - /* This check _should_not_ be necessary, omit eventually. */ - while ((z_readb(NE_BASE + NE_EN0_ISR) & ENISR_RESET) == 0) - if (time_after(jiffies, reset_start_time + 2 * HZ / 100)) { - netdev_warn(dev, "%s: did not complete\n", __func__); - break; - } - z_writeb(ENISR_RESET, NE_BASE + NE_EN0_ISR); /* Ack intr */ -} - -/* Grab the 8390 specific header. Similar to the block_input routine, but - * we don't need to be concerned with ring wrap as the header will be at - * the start of a page, so we optimize accordingly. - */ -static void zorro8390_get_8390_hdr(struct net_device *dev, - struct e8390_pkt_hdr *hdr, int ring_page) -{ - int nic_base = dev->base_addr; - int cnt; - short *ptrs; - - /* This *shouldn't* happen. - * If it does, it's the last thing you'll see - */ - if (ei_status.dmaing) { - netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n", - __func__, ei_status.dmaing, ei_status.irqlock); - return; - } - - ei_status.dmaing |= 0x01; - z_writeb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD); - z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); - z_writeb(sizeof(struct e8390_pkt_hdr), nic_base + NE_EN0_RCNTLO); - z_writeb(0, nic_base + NE_EN0_RCNTHI); - z_writeb(0, nic_base + NE_EN0_RSARLO); /* On page boundary */ - z_writeb(ring_page, nic_base + NE_EN0_RSARHI); - z_writeb(E8390_RREAD+E8390_START, nic_base + NE_CMD); - - ptrs = (short *)hdr; - for (cnt = 0; cnt < sizeof(struct e8390_pkt_hdr) >> 1; cnt++) - *ptrs++ = z_readw(NE_BASE + NE_DATAPORT); - - z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr */ - - hdr->count = WORDSWAP(hdr->count); - - ei_status.dmaing &= ~0x01; -} - -/* Block input and output, similar to the Crynwr packet driver. - * If you are porting to a new ethercard, look at the packet driver source - * for hints. The NEx000 doesn't share the on-board packet memory -- - * you have to put the packet out through the "remote DMA" dataport - * using z_writeb. - */ -static void zorro8390_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset) -{ - int nic_base = dev->base_addr; - char *buf = skb->data; - short *ptrs; - int cnt; - - /* This *shouldn't* happen. - * If it does, it's the last thing you'll see - */ - if (ei_status.dmaing) { - netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n", - __func__, ei_status.dmaing, ei_status.irqlock); - return; - } - ei_status.dmaing |= 0x01; - z_writeb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD); - z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); - z_writeb(count & 0xff, nic_base + NE_EN0_RCNTLO); - z_writeb(count >> 8, nic_base + NE_EN0_RCNTHI); - z_writeb(ring_offset & 0xff, nic_base + NE_EN0_RSARLO); - z_writeb(ring_offset >> 8, nic_base + NE_EN0_RSARHI); - z_writeb(E8390_RREAD+E8390_START, nic_base + NE_CMD); - ptrs = (short *)buf; - for (cnt = 0; cnt < count >> 1; cnt++) - *ptrs++ = z_readw(NE_BASE + NE_DATAPORT); - if (count & 0x01) - buf[count - 1] = z_readb(NE_BASE + NE_DATAPORT); - - z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr */ - ei_status.dmaing &= ~0x01; -} - -static void zorro8390_block_output(struct net_device *dev, int count, - const unsigned char *buf, - const int start_page) -{ - int nic_base = NE_BASE; - unsigned long dma_start; - short *ptrs; - int cnt; - - /* Round the count up for word writes. Do we need to do this? - * What effect will an odd byte count have on the 8390? - * I should check someday. - */ - if (count & 0x01) - count++; - - /* This *shouldn't* happen. - * If it does, it's the last thing you'll see - */ - if (ei_status.dmaing) { - netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n", - __func__, ei_status.dmaing, ei_status.irqlock); - return; - } - ei_status.dmaing |= 0x01; - /* We should already be in page 0, but to be safe... */ - z_writeb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD); - - z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); - - /* Now the normal output. */ - z_writeb(count & 0xff, nic_base + NE_EN0_RCNTLO); - z_writeb(count >> 8, nic_base + NE_EN0_RCNTHI); - z_writeb(0x00, nic_base + NE_EN0_RSARLO); - z_writeb(start_page, nic_base + NE_EN0_RSARHI); - - z_writeb(E8390_RWRITE + E8390_START, nic_base + NE_CMD); - ptrs = (short *)buf; - for (cnt = 0; cnt < count >> 1; cnt++) - z_writew(*ptrs++, NE_BASE + NE_DATAPORT); - - dma_start = jiffies; - - while ((z_readb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0) - if (time_after(jiffies, dma_start + 2 * HZ / 100)) { - /* 20ms */ - netdev_err(dev, "timeout waiting for Tx RDC\n"); - zorro8390_reset_8390(dev); - __NS8390_init(dev, 1); - break; - } - - z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr */ - ei_status.dmaing &= ~0x01; -} - -static int zorro8390_open(struct net_device *dev) -{ - __ei_open(dev); - return 0; -} - -static int zorro8390_close(struct net_device *dev) -{ - if (ei_debug > 1) - netdev_dbg(dev, "Shutting down ethercard\n"); - __ei_close(dev); - return 0; -} - -static void __devexit zorro8390_remove_one(struct zorro_dev *z) -{ - struct net_device *dev = zorro_get_drvdata(z); - - unregister_netdev(dev); - free_irq(IRQ_AMIGA_PORTS, dev); - release_mem_region(ZTWO_PADDR(dev->base_addr), NE_IO_EXTENT * 2); - free_netdev(dev); -} - -static struct zorro_device_id zorro8390_zorro_tbl[] __devinitdata = { - { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE2, }, - { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, }, - { 0 } -}; -MODULE_DEVICE_TABLE(zorro, zorro8390_zorro_tbl); - -static const struct net_device_ops zorro8390_netdev_ops = { - .ndo_open = zorro8390_open, - .ndo_stop = zorro8390_close, - .ndo_start_xmit = __ei_start_xmit, - .ndo_tx_timeout = __ei_tx_timeout, - .ndo_get_stats = __ei_get_stats, - .ndo_set_multicast_list = __ei_set_multicast_list, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, - .ndo_change_mtu = eth_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = __ei_poll, -#endif -}; - -static int __devinit zorro8390_init(struct net_device *dev, - unsigned long board, const char *name, - unsigned long ioaddr) -{ - int i; - int err; - unsigned char SA_prom[32]; - int start_page, stop_page; - static u32 zorro8390_offsets[16] = { - 0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, - 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, - }; - - /* Reset card. Who knows what dain-bramaged state it was left in. */ - { - unsigned long reset_start_time = jiffies; - - z_writeb(z_readb(ioaddr + NE_RESET), ioaddr + NE_RESET); - - while ((z_readb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0) - if (time_after(jiffies, - reset_start_time + 2 * HZ / 100)) { - netdev_warn(dev, "not found (no reset ack)\n"); - return -ENODEV; - } - - z_writeb(0xff, ioaddr + NE_EN0_ISR); /* Ack all intr. */ - } - - /* Read the 16 bytes of station address PROM. - * We must first initialize registers, - * similar to NS8390_init(eifdev, 0). - * We can't reliably read the SAPROM address without this. - * (I learned the hard way!). - */ - { - static const struct { - u32 value; - u32 offset; - } program_seq[] = { - {E8390_NODMA + E8390_PAGE0 + E8390_STOP, NE_CMD}, - /* Select page 0 */ - {0x48, NE_EN0_DCFG}, /* 0x48: Set byte-wide access */ - {0x00, NE_EN0_RCNTLO}, /* Clear the count regs */ - {0x00, NE_EN0_RCNTHI}, - {0x00, NE_EN0_IMR}, /* Mask completion irq */ - {0xFF, NE_EN0_ISR}, - {E8390_RXOFF, NE_EN0_RXCR}, /* 0x20 Set to monitor */ - {E8390_TXOFF, NE_EN0_TXCR}, /* 0x02 and loopback mode */ - {32, NE_EN0_RCNTLO}, - {0x00, NE_EN0_RCNTHI}, - {0x00, NE_EN0_RSARLO}, /* DMA starting at 0x0000 */ - {0x00, NE_EN0_RSARHI}, - {E8390_RREAD + E8390_START, NE_CMD}, - }; - for (i = 0; i < ARRAY_SIZE(program_seq); i++) - z_writeb(program_seq[i].value, - ioaddr + program_seq[i].offset); - } - for (i = 0; i < 16; i++) { - SA_prom[i] = z_readb(ioaddr + NE_DATAPORT); - (void)z_readb(ioaddr + NE_DATAPORT); - } - - /* We must set the 8390 for word mode. */ - z_writeb(0x49, ioaddr + NE_EN0_DCFG); - start_page = NESM_START_PG; - stop_page = NESM_STOP_PG; - - dev->base_addr = ioaddr; - dev->irq = IRQ_AMIGA_PORTS; - - /* Install the Interrupt handler */ - i = request_irq(IRQ_AMIGA_PORTS, __ei_interrupt, - IRQF_SHARED, DRV_NAME, dev); - if (i) - return i; - - for (i = 0; i < ETHER_ADDR_LEN; i++) - dev->dev_addr[i] = SA_prom[i]; - - pr_debug("Found ethernet address: %pM\n", dev->dev_addr); - - ei_status.name = name; - ei_status.tx_start_page = start_page; - ei_status.stop_page = stop_page; - ei_status.word16 = 1; - - ei_status.rx_start_page = start_page + TX_PAGES; - - ei_status.reset_8390 = zorro8390_reset_8390; - ei_status.block_input = zorro8390_block_input; - ei_status.block_output = zorro8390_block_output; - ei_status.get_8390_hdr = zorro8390_get_8390_hdr; - ei_status.reg_offset = zorro8390_offsets; - - dev->netdev_ops = &zorro8390_netdev_ops; - __NS8390_init(dev, 0); - err = register_netdev(dev); - if (err) { - free_irq(IRQ_AMIGA_PORTS, dev); - return err; - } - - netdev_info(dev, "%s at 0x%08lx, Ethernet Address %pM\n", - name, board, dev->dev_addr); - - return 0; -} - -static int __devinit zorro8390_init_one(struct zorro_dev *z, - const struct zorro_device_id *ent) -{ - struct net_device *dev; - unsigned long board, ioaddr; - int err, i; - - for (i = ARRAY_SIZE(cards) - 1; i >= 0; i--) - if (z->id == cards[i].id) - break; - if (i < 0) - return -ENODEV; - - board = z->resource.start; - ioaddr = board + cards[i].offset; - dev = ____alloc_ei_netdev(0); - if (!dev) - return -ENOMEM; - if (!request_mem_region(ioaddr, NE_IO_EXTENT * 2, DRV_NAME)) { - free_netdev(dev); - return -EBUSY; - } - err = zorro8390_init(dev, board, cards[i].name, ZTWO_VADDR(ioaddr)); - if (err) { - release_mem_region(ioaddr, NE_IO_EXTENT * 2); - free_netdev(dev); - return err; - } - zorro_set_drvdata(z, dev); - return 0; -} - -static struct zorro_driver zorro8390_driver = { - .name = "zorro8390", - .id_table = zorro8390_zorro_tbl, - .probe = zorro8390_init_one, - .remove = __devexit_p(zorro8390_remove_one), -}; - -static int __init zorro8390_init_module(void) -{ - return zorro_register_driver(&zorro8390_driver); -} - -static void __exit zorro8390_cleanup_module(void) -{ - zorro_unregister_driver(&zorro8390_driver); -} - -module_init(zorro8390_init_module); -module_exit(zorro8390_cleanup_module); - -MODULE_LICENSE("GPL"); -- cgit v0.10.2 From adfc5217e9db68d3f0cec8dd847c1a6d3ab549ee Mon Sep 17 00:00:00 2001 From: Jeff Kirsher Date: Thu, 7 Apr 2011 06:03:04 -0700 Subject: broadcom: Move the Broadcom drivers Moves the drivers for Broadcom devices into drivers/net/ethernet/broadcom/ and the necessary Kconfig and Makefile changes. CC: Eilon Greenstein CC: Michael Chan CC: Matt Carlson CC: Gary Zambrano CC: "Maciej W. Rozycki" Signed-off-by: Jeff Kirsher diff --git a/MAINTAINERS b/MAINTAINERS index c694583..af9de64 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1526,27 +1526,27 @@ BROADCOM B44 10/100 ETHERNET DRIVER M: Gary Zambrano L: netdev@vger.kernel.org S: Supported -F: drivers/net/b44.* +F: drivers/net/ethernet/broadcom/b44.* BROADCOM BNX2 GIGABIT ETHERNET DRIVER M: Michael Chan L: netdev@vger.kernel.org S: Supported -F: drivers/net/bnx2.* -F: drivers/net/bnx2_* +F: drivers/net/ethernet/broadcom/bnx2.* +F: drivers/net/ethernet/broadcom/bnx2_* BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER M: Eilon Greenstein L: netdev@vger.kernel.org S: Supported -F: drivers/net/bnx2x/ +F: drivers/net/ethernet/broadcom/bnx2x/ BROADCOM TG3 GIGABIT ETHERNET DRIVER M: Matt Carlson M: Michael Chan L: netdev@vger.kernel.org S: Supported -F: drivers/net/tg3.* +F: drivers/net/ethernet/broadcom/tg3.* BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER M: Brett Rudley diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index c877f41..2f9f208 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -997,38 +997,6 @@ config APRICOT To compile this driver as a module, choose M here. The module will be called apricot. -config B44 - tristate "Broadcom 440x/47xx ethernet support" - depends on SSB_POSSIBLE && HAS_DMA - select SSB - select MII - help - If you have a network (Ethernet) controller of this type, say Y - or M and read the Ethernet-HOWTO, available from - . - - To compile this driver as a module, choose M here. The module - will be called b44. - -# Auto-select SSB PCI-HOST support, if possible -config B44_PCI_AUTOSELECT - bool - depends on B44 && SSB_PCIHOST_POSSIBLE - select SSB_PCIHOST - default y - -# Auto-select SSB PCICORE driver, if possible -config B44_PCICORE_AUTOSELECT - bool - depends on B44 && SSB_DRIVER_PCICORE_POSSIBLE - select SSB_DRIVER_PCICORE - default y - -config B44_PCI - bool - depends on B44_PCI_AUTOSELECT && B44_PCICORE_AUTOSELECT - default y - config FORCEDETH tristate "nForce Ethernet support" depends on NET_PCI && PCI @@ -1465,15 +1433,6 @@ config XILINX_EMACLITE help This driver supports the 10/100 Ethernet Lite from Xilinx. -config BCM63XX_ENET - tristate "Broadcom 63xx internal mac support" - depends on BCM63XX - select MII - select PHYLIB - help - This driver supports the ethernet MACs in the Broadcom 63xx - MIPS chipset family (BCM63XX). - config FTMAC100 tristate "Faraday FTMAC100 10/100 Ethernet support" depends on ARM @@ -1683,19 +1642,6 @@ config R8169 To compile this driver as a module, choose M here: the module will be called r8169. This is recommended. -config SB1250_MAC - tristate "SB1250 Gigabit Ethernet support" - depends on SIBYTE_SB1xxx_SOC - select PHYLIB - ---help--- - This driver supports Gigabit Ethernet interfaces based on the - Broadcom SiByte family of System-On-a-Chip parts. They include - the BCM1120, BCM1125, BCM1125H, BCM1250, BCM1255, BCM1280, BCM1455 - and BCM1480 chips. - - To compile this driver as a module, choose M here: the module - will be called sb1250-mac. - config SIS190 tristate "SiS190/SiS191 gigabit ethernet support" depends on PCI @@ -1789,39 +1735,6 @@ config VIA_VELOCITY To compile this driver as a module, choose M here. The module will be called via-velocity. -config TIGON3 - tristate "Broadcom Tigon3 support" - depends on PCI - select PHYLIB - help - This driver supports Broadcom Tigon3 based gigabit Ethernet cards. - - To compile this driver as a module, choose M here: the module - will be called tg3. This is recommended. - -config BNX2 - tristate "Broadcom NetXtremeII support" - depends on PCI - select CRC32 - select FW_LOADER - help - This driver supports Broadcom NetXtremeII gigabit Ethernet cards. - - To compile this driver as a module, choose M here: the module - will be called bnx2. This is recommended. - -config CNIC - tristate "Broadcom CNIC support" - depends on PCI - select BNX2 - select UIO - help - This driver supports offload features of Broadcom NetXtremeII - gigabit Ethernet cards. - - To compile this driver as a module, choose M here: the module - will be called cnic. This is recommended. - config SPIDER_NET tristate "Spider Gigabit Ethernet driver" depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB) @@ -2327,18 +2240,6 @@ config TEHUTI help Tehuti Networks 10G Ethernet NIC -config BNX2X - tristate "Broadcom NetXtremeII 10Gb support" - depends on PCI - select FW_LOADER - select ZLIB_INFLATE - select LIBCRC32C - select MDIO - help - This driver supports Broadcom NetXtremeII 10 gigabit Ethernet cards. - To compile this driver as a module, choose M here: the module - will be called bnx2x. This is recommended. - config QLCNIC tristate "QLOGIC QLCNIC 1/10Gb Converged Ethernet NIC Support" depends on PCI diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 4e8fa73..4cc2dcd 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -77,10 +77,6 @@ obj-$(CONFIG_ISERIES_VETH) += iseries_veth.o obj-$(CONFIG_NATSEMI) += natsemi.o obj-$(CONFIG_NS83820) += ns83820.o obj-$(CONFIG_FEALNX) += fealnx.o -obj-$(CONFIG_TIGON3) += tg3.o -obj-$(CONFIG_BNX2) += bnx2.o -obj-$(CONFIG_CNIC) += cnic.o -obj-$(CONFIG_BNX2X) += bnx2x/ spidernet-y += spider_net.o spider_net_ethtool.o obj-$(CONFIG_SPIDER_NET) += spidernet.o sungem_phy.o obj-$(CONFIG_GELIC_NET) += ps3_gelic.o @@ -117,10 +113,7 @@ obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y) obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o endif -obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o -obj-$(CONFIG_B44) += b44.o obj-$(CONFIG_FORCEDETH) += forcedeth.o -obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o obj-$(CONFIG_FTGMAC100) += ftgmac100.o obj-$(CONFIG_FTMAC100) += ftmac100.o diff --git a/drivers/net/b44.c b/drivers/net/b44.c deleted file mode 100644 index 41ea84e..0000000 --- a/drivers/net/b44.c +++ /dev/null @@ -1,2374 +0,0 @@ -/* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver. - * - * Copyright (C) 2002 David S. Miller (davem@redhat.com) - * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi) - * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org) - * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org) - * Copyright (C) 2006 Broadcom Corporation. - * Copyright (C) 2007 Michael Buesch - * - * Distribute under GPL. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - - -#include "b44.h" - -#define DRV_MODULE_NAME "b44" -#define DRV_MODULE_VERSION "2.0" -#define DRV_DESCRIPTION "Broadcom 44xx/47xx 10/100 PCI ethernet driver" - -#define B44_DEF_MSG_ENABLE \ - (NETIF_MSG_DRV | \ - NETIF_MSG_PROBE | \ - NETIF_MSG_LINK | \ - NETIF_MSG_TIMER | \ - NETIF_MSG_IFDOWN | \ - NETIF_MSG_IFUP | \ - NETIF_MSG_RX_ERR | \ - NETIF_MSG_TX_ERR) - -/* length of time before we decide the hardware is borked, - * and dev->tx_timeout() should be called to fix the problem - */ -#define B44_TX_TIMEOUT (5 * HZ) - -/* hardware minimum and maximum for a single frame's data payload */ -#define B44_MIN_MTU 60 -#define B44_MAX_MTU 1500 - -#define B44_RX_RING_SIZE 512 -#define B44_DEF_RX_RING_PENDING 200 -#define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \ - B44_RX_RING_SIZE) -#define B44_TX_RING_SIZE 512 -#define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1) -#define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \ - B44_TX_RING_SIZE) - -#define TX_RING_GAP(BP) \ - (B44_TX_RING_SIZE - (BP)->tx_pending) -#define TX_BUFFS_AVAIL(BP) \ - (((BP)->tx_cons <= (BP)->tx_prod) ? \ - (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \ - (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP)) -#define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1)) - -#define RX_PKT_OFFSET (RX_HEADER_LEN + 2) -#define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET) - -/* minimum number of free TX descriptors required to wake up TX process */ -#define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4) - -/* b44 internal pattern match filter info */ -#define B44_PATTERN_BASE 0x400 -#define B44_PATTERN_SIZE 0x80 -#define B44_PMASK_BASE 0x600 -#define B44_PMASK_SIZE 0x10 -#define B44_MAX_PATTERNS 16 -#define B44_ETHIPV6UDP_HLEN 62 -#define B44_ETHIPV4UDP_HLEN 42 - -MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller"); -MODULE_DESCRIPTION(DRV_DESCRIPTION); -MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_MODULE_VERSION); - -static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */ -module_param(b44_debug, int, 0); -MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value"); - - -#ifdef CONFIG_B44_PCI -static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = { - { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) }, - { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) }, - { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) }, - { 0 } /* terminate list with empty entry */ -}; -MODULE_DEVICE_TABLE(pci, b44_pci_tbl); - -static struct pci_driver b44_pci_driver = { - .name = DRV_MODULE_NAME, - .id_table = b44_pci_tbl, -}; -#endif /* CONFIG_B44_PCI */ - -static const struct ssb_device_id b44_ssb_tbl[] = { - SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV), - SSB_DEVTABLE_END -}; -MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl); - -static void b44_halt(struct b44 *); -static void b44_init_rings(struct b44 *); - -#define B44_FULL_RESET 1 -#define B44_FULL_RESET_SKIP_PHY 2 -#define B44_PARTIAL_RESET 3 -#define B44_CHIP_RESET_FULL 4 -#define B44_CHIP_RESET_PARTIAL 5 - -static void b44_init_hw(struct b44 *, int); - -static int dma_desc_sync_size; -static int instance; - -static const char b44_gstrings[][ETH_GSTRING_LEN] = { -#define _B44(x...) # x, -B44_STAT_REG_DECLARE -#undef _B44 -}; - -static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev, - dma_addr_t dma_base, - unsigned long offset, - enum dma_data_direction dir) -{ - dma_sync_single_for_device(sdev->dma_dev, dma_base + offset, - dma_desc_sync_size, dir); -} - -static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev, - dma_addr_t dma_base, - unsigned long offset, - enum dma_data_direction dir) -{ - dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset, - dma_desc_sync_size, dir); -} - -static inline unsigned long br32(const struct b44 *bp, unsigned long reg) -{ - return ssb_read32(bp->sdev, reg); -} - -static inline void bw32(const struct b44 *bp, - unsigned long reg, unsigned long val) -{ - ssb_write32(bp->sdev, reg, val); -} - -static int b44_wait_bit(struct b44 *bp, unsigned long reg, - u32 bit, unsigned long timeout, const int clear) -{ - unsigned long i; - - for (i = 0; i < timeout; i++) { - u32 val = br32(bp, reg); - - if (clear && !(val & bit)) - break; - if (!clear && (val & bit)) - break; - udelay(10); - } - if (i == timeout) { - if (net_ratelimit()) - netdev_err(bp->dev, "BUG! Timeout waiting for bit %08x of register %lx to %s\n", - bit, reg, clear ? "clear" : "set"); - - return -ENODEV; - } - return 0; -} - -static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index) -{ - u32 val; - - bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ | - (index << CAM_CTRL_INDEX_SHIFT))); - - b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1); - - val = br32(bp, B44_CAM_DATA_LO); - - data[2] = (val >> 24) & 0xFF; - data[3] = (val >> 16) & 0xFF; - data[4] = (val >> 8) & 0xFF; - data[5] = (val >> 0) & 0xFF; - - val = br32(bp, B44_CAM_DATA_HI); - - data[0] = (val >> 8) & 0xFF; - data[1] = (val >> 0) & 0xFF; -} - -static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index) -{ - u32 val; - - val = ((u32) data[2]) << 24; - val |= ((u32) data[3]) << 16; - val |= ((u32) data[4]) << 8; - val |= ((u32) data[5]) << 0; - bw32(bp, B44_CAM_DATA_LO, val); - val = (CAM_DATA_HI_VALID | - (((u32) data[0]) << 8) | - (((u32) data[1]) << 0)); - bw32(bp, B44_CAM_DATA_HI, val); - bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE | - (index << CAM_CTRL_INDEX_SHIFT))); - b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1); -} - -static inline void __b44_disable_ints(struct b44 *bp) -{ - bw32(bp, B44_IMASK, 0); -} - -static void b44_disable_ints(struct b44 *bp) -{ - __b44_disable_ints(bp); - - /* Flush posted writes. */ - br32(bp, B44_IMASK); -} - -static void b44_enable_ints(struct b44 *bp) -{ - bw32(bp, B44_IMASK, bp->imask); -} - -static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val) -{ - int err; - - bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII); - bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | - (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) | - (phy_addr << MDIO_DATA_PMD_SHIFT) | - (reg << MDIO_DATA_RA_SHIFT) | - (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT))); - err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0); - *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA; - - return err; -} - -static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val) -{ - bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII); - bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | - (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) | - (phy_addr << MDIO_DATA_PMD_SHIFT) | - (reg << MDIO_DATA_RA_SHIFT) | - (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) | - (val & MDIO_DATA_DATA))); - return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0); -} - -static inline int b44_readphy(struct b44 *bp, int reg, u32 *val) -{ - if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) - return 0; - - return __b44_readphy(bp, bp->phy_addr, reg, val); -} - -static inline int b44_writephy(struct b44 *bp, int reg, u32 val) -{ - if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) - return 0; - - return __b44_writephy(bp, bp->phy_addr, reg, val); -} - -/* miilib interface */ -static int b44_mii_read(struct net_device *dev, int phy_id, int location) -{ - u32 val; - struct b44 *bp = netdev_priv(dev); - int rc = __b44_readphy(bp, phy_id, location, &val); - if (rc) - return 0xffffffff; - return val; -} - -static void b44_mii_write(struct net_device *dev, int phy_id, int location, - int val) -{ - struct b44 *bp = netdev_priv(dev); - __b44_writephy(bp, phy_id, location, val); -} - -static int b44_phy_reset(struct b44 *bp) -{ - u32 val; - int err; - - if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) - return 0; - err = b44_writephy(bp, MII_BMCR, BMCR_RESET); - if (err) - return err; - udelay(100); - err = b44_readphy(bp, MII_BMCR, &val); - if (!err) { - if (val & BMCR_RESET) { - netdev_err(bp->dev, "PHY Reset would not complete\n"); - err = -ENODEV; - } - } - - return err; -} - -static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags) -{ - u32 val; - - bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE); - bp->flags |= pause_flags; - - val = br32(bp, B44_RXCONFIG); - if (pause_flags & B44_FLAG_RX_PAUSE) - val |= RXCONFIG_FLOW; - else - val &= ~RXCONFIG_FLOW; - bw32(bp, B44_RXCONFIG, val); - - val = br32(bp, B44_MAC_FLOW); - if (pause_flags & B44_FLAG_TX_PAUSE) - val |= (MAC_FLOW_PAUSE_ENAB | - (0xc0 & MAC_FLOW_RX_HI_WATER)); - else - val &= ~MAC_FLOW_PAUSE_ENAB; - bw32(bp, B44_MAC_FLOW, val); -} - -static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote) -{ - u32 pause_enab = 0; - - /* The driver supports only rx pause by default because - the b44 mac tx pause mechanism generates excessive - pause frames. - Use ethtool to turn on b44 tx pause if necessary. - */ - if ((local & ADVERTISE_PAUSE_CAP) && - (local & ADVERTISE_PAUSE_ASYM)){ - if ((remote & LPA_PAUSE_ASYM) && - !(remote & LPA_PAUSE_CAP)) - pause_enab |= B44_FLAG_RX_PAUSE; - } - - __b44_set_flow_ctrl(bp, pause_enab); -} - -#ifdef CONFIG_BCM47XX -#include -static void b44_wap54g10_workaround(struct b44 *bp) -{ - char buf[20]; - u32 val; - int err; - - /* - * workaround for bad hardware design in Linksys WAP54G v1.0 - * see https://dev.openwrt.org/ticket/146 - * check and reset bit "isolate" - */ - if (nvram_getenv("boardnum", buf, sizeof(buf)) < 0) - return; - if (simple_strtoul(buf, NULL, 0) == 2) { - err = __b44_readphy(bp, 0, MII_BMCR, &val); - if (err) - goto error; - if (!(val & BMCR_ISOLATE)) - return; - val &= ~BMCR_ISOLATE; - err = __b44_writephy(bp, 0, MII_BMCR, val); - if (err) - goto error; - } - return; -error: - pr_warning("PHY: cannot reset MII transceiver isolate bit\n"); -} -#else -static inline void b44_wap54g10_workaround(struct b44 *bp) -{ -} -#endif - -static int b44_setup_phy(struct b44 *bp) -{ - u32 val; - int err; - - b44_wap54g10_workaround(bp); - - if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) - return 0; - if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0) - goto out; - if ((err = b44_writephy(bp, B44_MII_ALEDCTRL, - val & MII_ALEDCTRL_ALLMSK)) != 0) - goto out; - if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0) - goto out; - if ((err = b44_writephy(bp, B44_MII_TLEDCTRL, - val | MII_TLEDCTRL_ENABLE)) != 0) - goto out; - - if (!(bp->flags & B44_FLAG_FORCE_LINK)) { - u32 adv = ADVERTISE_CSMA; - - if (bp->flags & B44_FLAG_ADV_10HALF) - adv |= ADVERTISE_10HALF; - if (bp->flags & B44_FLAG_ADV_10FULL) - adv |= ADVERTISE_10FULL; - if (bp->flags & B44_FLAG_ADV_100HALF) - adv |= ADVERTISE_100HALF; - if (bp->flags & B44_FLAG_ADV_100FULL) - adv |= ADVERTISE_100FULL; - - if (bp->flags & B44_FLAG_PAUSE_AUTO) - adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; - - if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0) - goto out; - if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE | - BMCR_ANRESTART))) != 0) - goto out; - } else { - u32 bmcr; - - if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0) - goto out; - bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100); - if (bp->flags & B44_FLAG_100_BASE_T) - bmcr |= BMCR_SPEED100; - if (bp->flags & B44_FLAG_FULL_DUPLEX) - bmcr |= BMCR_FULLDPLX; - if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0) - goto out; - - /* Since we will not be negotiating there is no safe way - * to determine if the link partner supports flow control - * or not. So just disable it completely in this case. - */ - b44_set_flow_ctrl(bp, 0, 0); - } - -out: - return err; -} - -static void b44_stats_update(struct b44 *bp) -{ - unsigned long reg; - u32 *val; - - val = &bp->hw_stats.tx_good_octets; - for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) { - *val++ += br32(bp, reg); - } - - /* Pad */ - reg += 8*4UL; - - for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) { - *val++ += br32(bp, reg); - } -} - -static void b44_link_report(struct b44 *bp) -{ - if (!netif_carrier_ok(bp->dev)) { - netdev_info(bp->dev, "Link is down\n"); - } else { - netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n", - (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10, - (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half"); - - netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n", - (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off", - (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off"); - } -} - -static void b44_check_phy(struct b44 *bp) -{ - u32 bmsr, aux; - - if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) { - bp->flags |= B44_FLAG_100_BASE_T; - bp->flags |= B44_FLAG_FULL_DUPLEX; - if (!netif_carrier_ok(bp->dev)) { - u32 val = br32(bp, B44_TX_CTRL); - val |= TX_CTRL_DUPLEX; - bw32(bp, B44_TX_CTRL, val); - netif_carrier_on(bp->dev); - b44_link_report(bp); - } - return; - } - - if (!b44_readphy(bp, MII_BMSR, &bmsr) && - !b44_readphy(bp, B44_MII_AUXCTRL, &aux) && - (bmsr != 0xffff)) { - if (aux & MII_AUXCTRL_SPEED) - bp->flags |= B44_FLAG_100_BASE_T; - else - bp->flags &= ~B44_FLAG_100_BASE_T; - if (aux & MII_AUXCTRL_DUPLEX) - bp->flags |= B44_FLAG_FULL_DUPLEX; - else - bp->flags &= ~B44_FLAG_FULL_DUPLEX; - - if (!netif_carrier_ok(bp->dev) && - (bmsr & BMSR_LSTATUS)) { - u32 val = br32(bp, B44_TX_CTRL); - u32 local_adv, remote_adv; - - if (bp->flags & B44_FLAG_FULL_DUPLEX) - val |= TX_CTRL_DUPLEX; - else - val &= ~TX_CTRL_DUPLEX; - bw32(bp, B44_TX_CTRL, val); - - if (!(bp->flags & B44_FLAG_FORCE_LINK) && - !b44_readphy(bp, MII_ADVERTISE, &local_adv) && - !b44_readphy(bp, MII_LPA, &remote_adv)) - b44_set_flow_ctrl(bp, local_adv, remote_adv); - - /* Link now up */ - netif_carrier_on(bp->dev); - b44_link_report(bp); - } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) { - /* Link now down */ - netif_carrier_off(bp->dev); - b44_link_report(bp); - } - - if (bmsr & BMSR_RFAULT) - netdev_warn(bp->dev, "Remote fault detected in PHY\n"); - if (bmsr & BMSR_JCD) - netdev_warn(bp->dev, "Jabber detected in PHY\n"); - } -} - -static void b44_timer(unsigned long __opaque) -{ - struct b44 *bp = (struct b44 *) __opaque; - - spin_lock_irq(&bp->lock); - - b44_check_phy(bp); - - b44_stats_update(bp); - - spin_unlock_irq(&bp->lock); - - mod_timer(&bp->timer, round_jiffies(jiffies + HZ)); -} - -static void b44_tx(struct b44 *bp) -{ - u32 cur, cons; - - cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK; - cur /= sizeof(struct dma_desc); - - /* XXX needs updating when NETIF_F_SG is supported */ - for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) { - struct ring_info *rp = &bp->tx_buffers[cons]; - struct sk_buff *skb = rp->skb; - - BUG_ON(skb == NULL); - - dma_unmap_single(bp->sdev->dma_dev, - rp->mapping, - skb->len, - DMA_TO_DEVICE); - rp->skb = NULL; - dev_kfree_skb(skb); - } - - bp->tx_cons = cons; - if (netif_queue_stopped(bp->dev) && - TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH) - netif_wake_queue(bp->dev); - - bw32(bp, B44_GPTIMER, 0); -} - -/* Works like this. This chip writes a 'struct rx_header" 30 bytes - * before the DMA address you give it. So we allocate 30 more bytes - * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then - * point the chip at 30 bytes past where the rx_header will go. - */ -static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) -{ - struct dma_desc *dp; - struct ring_info *src_map, *map; - struct rx_header *rh; - struct sk_buff *skb; - dma_addr_t mapping; - int dest_idx; - u32 ctrl; - - src_map = NULL; - if (src_idx >= 0) - src_map = &bp->rx_buffers[src_idx]; - dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1); - map = &bp->rx_buffers[dest_idx]; - skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ); - if (skb == NULL) - return -ENOMEM; - - mapping = dma_map_single(bp->sdev->dma_dev, skb->data, - RX_PKT_BUF_SZ, - DMA_FROM_DEVICE); - - /* Hardware bug work-around, the chip is unable to do PCI DMA - to/from anything above 1GB :-( */ - if (dma_mapping_error(bp->sdev->dma_dev, mapping) || - mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) { - /* Sigh... */ - if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) - dma_unmap_single(bp->sdev->dma_dev, mapping, - RX_PKT_BUF_SZ, DMA_FROM_DEVICE); - dev_kfree_skb_any(skb); - skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); - if (skb == NULL) - return -ENOMEM; - mapping = dma_map_single(bp->sdev->dma_dev, skb->data, - RX_PKT_BUF_SZ, - DMA_FROM_DEVICE); - if (dma_mapping_error(bp->sdev->dma_dev, mapping) || - mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) { - if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) - dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); - dev_kfree_skb_any(skb); - return -ENOMEM; - } - bp->force_copybreak = 1; - } - - rh = (struct rx_header *) skb->data; - - rh->len = 0; - rh->flags = 0; - - map->skb = skb; - map->mapping = mapping; - - if (src_map != NULL) - src_map->skb = NULL; - - ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ); - if (dest_idx == (B44_RX_RING_SIZE - 1)) - ctrl |= DESC_CTRL_EOT; - - dp = &bp->rx_ring[dest_idx]; - dp->ctrl = cpu_to_le32(ctrl); - dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset); - - if (bp->flags & B44_FLAG_RX_RING_HACK) - b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma, - dest_idx * sizeof(*dp), - DMA_BIDIRECTIONAL); - - return RX_PKT_BUF_SZ; -} - -static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) -{ - struct dma_desc *src_desc, *dest_desc; - struct ring_info *src_map, *dest_map; - struct rx_header *rh; - int dest_idx; - __le32 ctrl; - - dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1); - dest_desc = &bp->rx_ring[dest_idx]; - dest_map = &bp->rx_buffers[dest_idx]; - src_desc = &bp->rx_ring[src_idx]; - src_map = &bp->rx_buffers[src_idx]; - - dest_map->skb = src_map->skb; - rh = (struct rx_header *) src_map->skb->data; - rh->len = 0; - rh->flags = 0; - dest_map->mapping = src_map->mapping; - - if (bp->flags & B44_FLAG_RX_RING_HACK) - b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma, - src_idx * sizeof(*src_desc), - DMA_BIDIRECTIONAL); - - ctrl = src_desc->ctrl; - if (dest_idx == (B44_RX_RING_SIZE - 1)) - ctrl |= cpu_to_le32(DESC_CTRL_EOT); - else - ctrl &= cpu_to_le32(~DESC_CTRL_EOT); - - dest_desc->ctrl = ctrl; - dest_desc->addr = src_desc->addr; - - src_map->skb = NULL; - - if (bp->flags & B44_FLAG_RX_RING_HACK) - b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma, - dest_idx * sizeof(*dest_desc), - DMA_BIDIRECTIONAL); - - dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping, - RX_PKT_BUF_SZ, - DMA_FROM_DEVICE); -} - -static int b44_rx(struct b44 *bp, int budget) -{ - int received; - u32 cons, prod; - - received = 0; - prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK; - prod /= sizeof(struct dma_desc); - cons = bp->rx_cons; - - while (cons != prod && budget > 0) { - struct ring_info *rp = &bp->rx_buffers[cons]; - struct sk_buff *skb = rp->skb; - dma_addr_t map = rp->mapping; - struct rx_header *rh; - u16 len; - - dma_sync_single_for_cpu(bp->sdev->dma_dev, map, - RX_PKT_BUF_SZ, - DMA_FROM_DEVICE); - rh = (struct rx_header *) skb->data; - len = le16_to_cpu(rh->len); - if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) || - (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) { - drop_it: - b44_recycle_rx(bp, cons, bp->rx_prod); - drop_it_no_recycle: - bp->dev->stats.rx_dropped++; - goto next_pkt; - } - - if (len == 0) { - int i = 0; - - do { - udelay(2); - barrier(); - len = le16_to_cpu(rh->len); - } while (len == 0 && i++ < 5); - if (len == 0) - goto drop_it; - } - - /* Omit CRC. */ - len -= 4; - - if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) { - int skb_size; - skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod); - if (skb_size < 0) - goto drop_it; - dma_unmap_single(bp->sdev->dma_dev, map, - skb_size, DMA_FROM_DEVICE); - /* Leave out rx_header */ - skb_put(skb, len + RX_PKT_OFFSET); - skb_pull(skb, RX_PKT_OFFSET); - } else { - struct sk_buff *copy_skb; - - b44_recycle_rx(bp, cons, bp->rx_prod); - copy_skb = netdev_alloc_skb(bp->dev, len + 2); - if (copy_skb == NULL) - goto drop_it_no_recycle; - - skb_reserve(copy_skb, 2); - skb_put(copy_skb, len); - /* DMA sync done above, copy just the actual packet */ - skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET, - copy_skb->data, len); - skb = copy_skb; - } - skb_checksum_none_assert(skb); - skb->protocol = eth_type_trans(skb, bp->dev); - netif_receive_skb(skb); - received++; - budget--; - next_pkt: - bp->rx_prod = (bp->rx_prod + 1) & - (B44_RX_RING_SIZE - 1); - cons = (cons + 1) & (B44_RX_RING_SIZE - 1); - } - - bp->rx_cons = cons; - bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc)); - - return received; -} - -static int b44_poll(struct napi_struct *napi, int budget) -{ - struct b44 *bp = container_of(napi, struct b44, napi); - int work_done; - unsigned long flags; - - spin_lock_irqsave(&bp->lock, flags); - - if (bp->istat & (ISTAT_TX | ISTAT_TO)) { - /* spin_lock(&bp->tx_lock); */ - b44_tx(bp); - /* spin_unlock(&bp->tx_lock); */ - } - if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */ - bp->istat &= ~ISTAT_RFO; - b44_disable_ints(bp); - ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */ - b44_init_rings(bp); - b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY); - netif_wake_queue(bp->dev); - } - - spin_unlock_irqrestore(&bp->lock, flags); - - work_done = 0; - if (bp->istat & ISTAT_RX) - work_done += b44_rx(bp, budget); - - if (bp->istat & ISTAT_ERRORS) { - spin_lock_irqsave(&bp->lock, flags); - b44_halt(bp); - b44_init_rings(bp); - b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY); - netif_wake_queue(bp->dev); - spin_unlock_irqrestore(&bp->lock, flags); - work_done = 0; - } - - if (work_done < budget) { - napi_complete(napi); - b44_enable_ints(bp); - } - - return work_done; -} - -static irqreturn_t b44_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct b44 *bp = netdev_priv(dev); - u32 istat, imask; - int handled = 0; - - spin_lock(&bp->lock); - - istat = br32(bp, B44_ISTAT); - imask = br32(bp, B44_IMASK); - - /* The interrupt mask register controls which interrupt bits - * will actually raise an interrupt to the CPU when set by hw/firmware, - * but doesn't mask off the bits. - */ - istat &= imask; - if (istat) { - handled = 1; - - if (unlikely(!netif_running(dev))) { - netdev_info(dev, "late interrupt\n"); - goto irq_ack; - } - - if (napi_schedule_prep(&bp->napi)) { - /* NOTE: These writes are posted by the readback of - * the ISTAT register below. - */ - bp->istat = istat; - __b44_disable_ints(bp); - __napi_schedule(&bp->napi); - } - -irq_ack: - bw32(bp, B44_ISTAT, istat); - br32(bp, B44_ISTAT); - } - spin_unlock(&bp->lock); - return IRQ_RETVAL(handled); -} - -static void b44_tx_timeout(struct net_device *dev) -{ - struct b44 *bp = netdev_priv(dev); - - netdev_err(dev, "transmit timed out, resetting\n"); - - spin_lock_irq(&bp->lock); - - b44_halt(bp); - b44_init_rings(bp); - b44_init_hw(bp, B44_FULL_RESET); - - spin_unlock_irq(&bp->lock); - - b44_enable_ints(bp); - - netif_wake_queue(dev); -} - -static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct b44 *bp = netdev_priv(dev); - int rc = NETDEV_TX_OK; - dma_addr_t mapping; - u32 len, entry, ctrl; - unsigned long flags; - - len = skb->len; - spin_lock_irqsave(&bp->lock, flags); - - /* This is a hard error, log it. */ - if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) { - netif_stop_queue(dev); - netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); - goto err_out; - } - - mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE); - if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) { - struct sk_buff *bounce_skb; - - /* Chip can't handle DMA to/from >1GB, use bounce buffer */ - if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) - dma_unmap_single(bp->sdev->dma_dev, mapping, len, - DMA_TO_DEVICE); - - bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA); - if (!bounce_skb) - goto err_out; - - mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data, - len, DMA_TO_DEVICE); - if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) { - if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) - dma_unmap_single(bp->sdev->dma_dev, mapping, - len, DMA_TO_DEVICE); - dev_kfree_skb_any(bounce_skb); - goto err_out; - } - - skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len); - dev_kfree_skb_any(skb); - skb = bounce_skb; - } - - entry = bp->tx_prod; - bp->tx_buffers[entry].skb = skb; - bp->tx_buffers[entry].mapping = mapping; - - ctrl = (len & DESC_CTRL_LEN); - ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF; - if (entry == (B44_TX_RING_SIZE - 1)) - ctrl |= DESC_CTRL_EOT; - - bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl); - bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset); - - if (bp->flags & B44_FLAG_TX_RING_HACK) - b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma, - entry * sizeof(bp->tx_ring[0]), - DMA_TO_DEVICE); - - entry = NEXT_TX(entry); - - bp->tx_prod = entry; - - wmb(); - - bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc)); - if (bp->flags & B44_FLAG_BUGGY_TXPTR) - bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc)); - if (bp->flags & B44_FLAG_REORDER_BUG) - br32(bp, B44_DMATX_PTR); - - if (TX_BUFFS_AVAIL(bp) < 1) - netif_stop_queue(dev); - -out_unlock: - spin_unlock_irqrestore(&bp->lock, flags); - - return rc; - -err_out: - rc = NETDEV_TX_BUSY; - goto out_unlock; -} - -static int b44_change_mtu(struct net_device *dev, int new_mtu) -{ - struct b44 *bp = netdev_priv(dev); - - if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU) - return -EINVAL; - - if (!netif_running(dev)) { - /* We'll just catch it later when the - * device is up'd. - */ - dev->mtu = new_mtu; - return 0; - } - - spin_lock_irq(&bp->lock); - b44_halt(bp); - dev->mtu = new_mtu; - b44_init_rings(bp); - b44_init_hw(bp, B44_FULL_RESET); - spin_unlock_irq(&bp->lock); - - b44_enable_ints(bp); - - return 0; -} - -/* Free up pending packets in all rx/tx rings. - * - * The chip has been shut down and the driver detached from - * the networking, so no interrupts or new tx packets will - * end up in the driver. bp->lock is not held and we are not - * in an interrupt context and thus may sleep. - */ -static void b44_free_rings(struct b44 *bp) -{ - struct ring_info *rp; - int i; - - for (i = 0; i < B44_RX_RING_SIZE; i++) { - rp = &bp->rx_buffers[i]; - - if (rp->skb == NULL) - continue; - dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ, - DMA_FROM_DEVICE); - dev_kfree_skb_any(rp->skb); - rp->skb = NULL; - } - - /* XXX needs changes once NETIF_F_SG is set... */ - for (i = 0; i < B44_TX_RING_SIZE; i++) { - rp = &bp->tx_buffers[i]; - - if (rp->skb == NULL) - continue; - dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len, - DMA_TO_DEVICE); - dev_kfree_skb_any(rp->skb); - rp->skb = NULL; - } -} - -/* Initialize tx/rx rings for packet processing. - * - * The chip has been shut down and the driver detached from - * the networking, so no interrupts or new tx packets will - * end up in the driver. - */ -static void b44_init_rings(struct b44 *bp) -{ - int i; - - b44_free_rings(bp); - - memset(bp->rx_ring, 0, B44_RX_RING_BYTES); - memset(bp->tx_ring, 0, B44_TX_RING_BYTES); - - if (bp->flags & B44_FLAG_RX_RING_HACK) - dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma, - DMA_TABLE_BYTES, DMA_BIDIRECTIONAL); - - if (bp->flags & B44_FLAG_TX_RING_HACK) - dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma, - DMA_TABLE_BYTES, DMA_TO_DEVICE); - - for (i = 0; i < bp->rx_pending; i++) { - if (b44_alloc_rx_skb(bp, -1, i) < 0) - break; - } -} - -/* - * Must not be invoked with interrupt sources disabled and - * the hardware shutdown down. - */ -static void b44_free_consistent(struct b44 *bp) -{ - kfree(bp->rx_buffers); - bp->rx_buffers = NULL; - kfree(bp->tx_buffers); - bp->tx_buffers = NULL; - if (bp->rx_ring) { - if (bp->flags & B44_FLAG_RX_RING_HACK) { - dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma, - DMA_TABLE_BYTES, DMA_BIDIRECTIONAL); - kfree(bp->rx_ring); - } else - dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES, - bp->rx_ring, bp->rx_ring_dma); - bp->rx_ring = NULL; - bp->flags &= ~B44_FLAG_RX_RING_HACK; - } - if (bp->tx_ring) { - if (bp->flags & B44_FLAG_TX_RING_HACK) { - dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma, - DMA_TABLE_BYTES, DMA_TO_DEVICE); - kfree(bp->tx_ring); - } else - dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES, - bp->tx_ring, bp->tx_ring_dma); - bp->tx_ring = NULL; - bp->flags &= ~B44_FLAG_TX_RING_HACK; - } -} - -/* - * Must not be invoked with interrupt sources disabled and - * the hardware shutdown down. Can sleep. - */ -static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) -{ - int size; - - size = B44_RX_RING_SIZE * sizeof(struct ring_info); - bp->rx_buffers = kzalloc(size, gfp); - if (!bp->rx_buffers) - goto out_err; - - size = B44_TX_RING_SIZE * sizeof(struct ring_info); - bp->tx_buffers = kzalloc(size, gfp); - if (!bp->tx_buffers) - goto out_err; - - size = DMA_TABLE_BYTES; - bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, - &bp->rx_ring_dma, gfp); - if (!bp->rx_ring) { - /* Allocation may have failed due to pci_alloc_consistent - insisting on use of GFP_DMA, which is more restrictive - than necessary... */ - struct dma_desc *rx_ring; - dma_addr_t rx_ring_dma; - - rx_ring = kzalloc(size, gfp); - if (!rx_ring) - goto out_err; - - rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring, - DMA_TABLE_BYTES, - DMA_BIDIRECTIONAL); - - if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) || - rx_ring_dma + size > DMA_BIT_MASK(30)) { - kfree(rx_ring); - goto out_err; - } - - bp->rx_ring = rx_ring; - bp->rx_ring_dma = rx_ring_dma; - bp->flags |= B44_FLAG_RX_RING_HACK; - } - - bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, - &bp->tx_ring_dma, gfp); - if (!bp->tx_ring) { - /* Allocation may have failed due to ssb_dma_alloc_consistent - insisting on use of GFP_DMA, which is more restrictive - than necessary... */ - struct dma_desc *tx_ring; - dma_addr_t tx_ring_dma; - - tx_ring = kzalloc(size, gfp); - if (!tx_ring) - goto out_err; - - tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring, - DMA_TABLE_BYTES, - DMA_TO_DEVICE); - - if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) || - tx_ring_dma + size > DMA_BIT_MASK(30)) { - kfree(tx_ring); - goto out_err; - } - - bp->tx_ring = tx_ring; - bp->tx_ring_dma = tx_ring_dma; - bp->flags |= B44_FLAG_TX_RING_HACK; - } - - return 0; - -out_err: - b44_free_consistent(bp); - return -ENOMEM; -} - -/* bp->lock is held. */ -static void b44_clear_stats(struct b44 *bp) -{ - unsigned long reg; - - bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ); - for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) - br32(bp, reg); - for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) - br32(bp, reg); -} - -/* bp->lock is held. */ -static void b44_chip_reset(struct b44 *bp, int reset_kind) -{ - struct ssb_device *sdev = bp->sdev; - bool was_enabled; - - was_enabled = ssb_device_is_enabled(bp->sdev); - - ssb_device_enable(bp->sdev, 0); - ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev); - - if (was_enabled) { - bw32(bp, B44_RCV_LAZY, 0); - bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE); - b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1); - bw32(bp, B44_DMATX_CTRL, 0); - bp->tx_prod = bp->tx_cons = 0; - if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) { - b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE, - 100, 0); - } - bw32(bp, B44_DMARX_CTRL, 0); - bp->rx_prod = bp->rx_cons = 0; - } - - b44_clear_stats(bp); - - /* - * Don't enable PHY if we are doing a partial reset - * we are probably going to power down - */ - if (reset_kind == B44_CHIP_RESET_PARTIAL) - return; - - switch (sdev->bus->bustype) { - case SSB_BUSTYPE_SSB: - bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE | - (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus), - B44_MDC_RATIO) - & MDIO_CTRL_MAXF_MASK))); - break; - case SSB_BUSTYPE_PCI: - bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE | - (0x0d & MDIO_CTRL_MAXF_MASK))); - break; - case SSB_BUSTYPE_PCMCIA: - case SSB_BUSTYPE_SDIO: - WARN_ON(1); /* A device with this bus does not exist. */ - break; - } - - br32(bp, B44_MDIO_CTRL); - - if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) { - bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL); - br32(bp, B44_ENET_CTRL); - bp->flags &= ~B44_FLAG_INTERNAL_PHY; - } else { - u32 val = br32(bp, B44_DEVCTRL); - - if (val & DEVCTRL_EPR) { - bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR)); - br32(bp, B44_DEVCTRL); - udelay(100); - } - bp->flags |= B44_FLAG_INTERNAL_PHY; - } -} - -/* bp->lock is held. */ -static void b44_halt(struct b44 *bp) -{ - b44_disable_ints(bp); - /* reset PHY */ - b44_phy_reset(bp); - /* power down PHY */ - netdev_info(bp->dev, "powering down PHY\n"); - bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN); - /* now reset the chip, but without enabling the MAC&PHY - * part of it. This has to be done _after_ we shut down the PHY */ - b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL); -} - -/* bp->lock is held. */ -static void __b44_set_mac_addr(struct b44 *bp) -{ - bw32(bp, B44_CAM_CTRL, 0); - if (!(bp->dev->flags & IFF_PROMISC)) { - u32 val; - - __b44_cam_write(bp, bp->dev->dev_addr, 0); - val = br32(bp, B44_CAM_CTRL); - bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE); - } -} - -static int b44_set_mac_addr(struct net_device *dev, void *p) -{ - struct b44 *bp = netdev_priv(dev); - struct sockaddr *addr = p; - u32 val; - - if (netif_running(dev)) - return -EBUSY; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EINVAL; - - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - - spin_lock_irq(&bp->lock); - - val = br32(bp, B44_RXCONFIG); - if (!(val & RXCONFIG_CAM_ABSENT)) - __b44_set_mac_addr(bp); - - spin_unlock_irq(&bp->lock); - - return 0; -} - -/* Called at device open time to get the chip ready for - * packet processing. Invoked with bp->lock held. - */ -static void __b44_set_rx_mode(struct net_device *); -static void b44_init_hw(struct b44 *bp, int reset_kind) -{ - u32 val; - - b44_chip_reset(bp, B44_CHIP_RESET_FULL); - if (reset_kind == B44_FULL_RESET) { - b44_phy_reset(bp); - b44_setup_phy(bp); - } - - /* Enable CRC32, set proper LED modes and power on PHY */ - bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL); - bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT)); - - /* This sets the MAC address too. */ - __b44_set_rx_mode(bp->dev); - - /* MTU + eth header + possible VLAN tag + struct rx_header */ - bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN); - bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN); - - bw32(bp, B44_TX_WMARK, 56); /* XXX magic */ - if (reset_kind == B44_PARTIAL_RESET) { - bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE | - (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT))); - } else { - bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE); - bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset); - bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE | - (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT))); - bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset); - - bw32(bp, B44_DMARX_PTR, bp->rx_pending); - bp->rx_prod = bp->rx_pending; - - bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ); - } - - val = br32(bp, B44_ENET_CTRL); - bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE)); -} - -static int b44_open(struct net_device *dev) -{ - struct b44 *bp = netdev_priv(dev); - int err; - - err = b44_alloc_consistent(bp, GFP_KERNEL); - if (err) - goto out; - - napi_enable(&bp->napi); - - b44_init_rings(bp); - b44_init_hw(bp, B44_FULL_RESET); - - b44_check_phy(bp); - - err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev); - if (unlikely(err < 0)) { - napi_disable(&bp->napi); - b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL); - b44_free_rings(bp); - b44_free_consistent(bp); - goto out; - } - - init_timer(&bp->timer); - bp->timer.expires = jiffies + HZ; - bp->timer.data = (unsigned long) bp; - bp->timer.function = b44_timer; - add_timer(&bp->timer); - - b44_enable_ints(bp); - netif_start_queue(dev); -out: - return err; -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -/* - * Polling receive - used by netconsole and other diagnostic tools - * to allow network i/o with interrupts disabled. - */ -static void b44_poll_controller(struct net_device *dev) -{ - disable_irq(dev->irq); - b44_interrupt(dev->irq, dev); - enable_irq(dev->irq); -} -#endif - -static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset) -{ - u32 i; - u32 *pattern = (u32 *) pp; - - for (i = 0; i < bytes; i += sizeof(u32)) { - bw32(bp, B44_FILT_ADDR, table_offset + i); - bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]); - } -} - -static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset) -{ - int magicsync = 6; - int k, j, len = offset; - int ethaddr_bytes = ETH_ALEN; - - memset(ppattern + offset, 0xff, magicsync); - for (j = 0; j < magicsync; j++) - set_bit(len++, (unsigned long *) pmask); - - for (j = 0; j < B44_MAX_PATTERNS; j++) { - if ((B44_PATTERN_SIZE - len) >= ETH_ALEN) - ethaddr_bytes = ETH_ALEN; - else - ethaddr_bytes = B44_PATTERN_SIZE - len; - if (ethaddr_bytes <=0) - break; - for (k = 0; k< ethaddr_bytes; k++) { - ppattern[offset + magicsync + - (j * ETH_ALEN) + k] = macaddr[k]; - set_bit(len++, (unsigned long *) pmask); - } - } - return len - 1; -} - -/* Setup magic packet patterns in the b44 WOL - * pattern matching filter. - */ -static void b44_setup_pseudo_magicp(struct b44 *bp) -{ - - u32 val; - int plen0, plen1, plen2; - u8 *pwol_pattern; - u8 pwol_mask[B44_PMASK_SIZE]; - - pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL); - if (!pwol_pattern) { - pr_err("Memory not available for WOL\n"); - return; - } - - /* Ipv4 magic packet pattern - pattern 0.*/ - memset(pwol_mask, 0, B44_PMASK_SIZE); - plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask, - B44_ETHIPV4UDP_HLEN); - - bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE); - bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE); - - /* Raw ethernet II magic packet pattern - pattern 1 */ - memset(pwol_pattern, 0, B44_PATTERN_SIZE); - memset(pwol_mask, 0, B44_PMASK_SIZE); - plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask, - ETH_HLEN); - - bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, - B44_PATTERN_BASE + B44_PATTERN_SIZE); - bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, - B44_PMASK_BASE + B44_PMASK_SIZE); - - /* Ipv6 magic packet pattern - pattern 2 */ - memset(pwol_pattern, 0, B44_PATTERN_SIZE); - memset(pwol_mask, 0, B44_PMASK_SIZE); - plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask, - B44_ETHIPV6UDP_HLEN); - - bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, - B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE); - bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, - B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE); - - kfree(pwol_pattern); - - /* set these pattern's lengths: one less than each real length */ - val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE; - bw32(bp, B44_WKUP_LEN, val); - - /* enable wakeup pattern matching */ - val = br32(bp, B44_DEVCTRL); - bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE); - -} - -#ifdef CONFIG_B44_PCI -static void b44_setup_wol_pci(struct b44 *bp) -{ - u16 val; - - if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) { - bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE); - pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val); - pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE); - } -} -#else -static inline void b44_setup_wol_pci(struct b44 *bp) { } -#endif /* CONFIG_B44_PCI */ - -static void b44_setup_wol(struct b44 *bp) -{ - u32 val; - - bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI); - - if (bp->flags & B44_FLAG_B0_ANDLATER) { - - bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE); - - val = bp->dev->dev_addr[2] << 24 | - bp->dev->dev_addr[3] << 16 | - bp->dev->dev_addr[4] << 8 | - bp->dev->dev_addr[5]; - bw32(bp, B44_ADDR_LO, val); - - val = bp->dev->dev_addr[0] << 8 | - bp->dev->dev_addr[1]; - bw32(bp, B44_ADDR_HI, val); - - val = br32(bp, B44_DEVCTRL); - bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE); - - } else { - b44_setup_pseudo_magicp(bp); - } - b44_setup_wol_pci(bp); -} - -static int b44_close(struct net_device *dev) -{ - struct b44 *bp = netdev_priv(dev); - - netif_stop_queue(dev); - - napi_disable(&bp->napi); - - del_timer_sync(&bp->timer); - - spin_lock_irq(&bp->lock); - - b44_halt(bp); - b44_free_rings(bp); - netif_carrier_off(dev); - - spin_unlock_irq(&bp->lock); - - free_irq(dev->irq, dev); - - if (bp->flags & B44_FLAG_WOL_ENABLE) { - b44_init_hw(bp, B44_PARTIAL_RESET); - b44_setup_wol(bp); - } - - b44_free_consistent(bp); - - return 0; -} - -static struct net_device_stats *b44_get_stats(struct net_device *dev) -{ - struct b44 *bp = netdev_priv(dev); - struct net_device_stats *nstat = &dev->stats; - struct b44_hw_stats *hwstat = &bp->hw_stats; - - /* Convert HW stats into netdevice stats. */ - nstat->rx_packets = hwstat->rx_pkts; - nstat->tx_packets = hwstat->tx_pkts; - nstat->rx_bytes = hwstat->rx_octets; - nstat->tx_bytes = hwstat->tx_octets; - nstat->tx_errors = (hwstat->tx_jabber_pkts + - hwstat->tx_oversize_pkts + - hwstat->tx_underruns + - hwstat->tx_excessive_cols + - hwstat->tx_late_cols); - nstat->multicast = hwstat->tx_multicast_pkts; - nstat->collisions = hwstat->tx_total_cols; - - nstat->rx_length_errors = (hwstat->rx_oversize_pkts + - hwstat->rx_undersize); - nstat->rx_over_errors = hwstat->rx_missed_pkts; - nstat->rx_frame_errors = hwstat->rx_align_errs; - nstat->rx_crc_errors = hwstat->rx_crc_errs; - nstat->rx_errors = (hwstat->rx_jabber_pkts + - hwstat->rx_oversize_pkts + - hwstat->rx_missed_pkts + - hwstat->rx_crc_align_errs + - hwstat->rx_undersize + - hwstat->rx_crc_errs + - hwstat->rx_align_errs + - hwstat->rx_symbol_errs); - - nstat->tx_aborted_errors = hwstat->tx_underruns; -#if 0 - /* Carrier lost counter seems to be broken for some devices */ - nstat->tx_carrier_errors = hwstat->tx_carrier_lost; -#endif - - return nstat; -} - -static int __b44_load_mcast(struct b44 *bp, struct net_device *dev) -{ - struct netdev_hw_addr *ha; - int i, num_ents; - - num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE); - i = 0; - netdev_for_each_mc_addr(ha, dev) { - if (i == num_ents) - break; - __b44_cam_write(bp, ha->addr, i++ + 1); - } - return i+1; -} - -static void __b44_set_rx_mode(struct net_device *dev) -{ - struct b44 *bp = netdev_priv(dev); - u32 val; - - val = br32(bp, B44_RXCONFIG); - val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI); - if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) { - val |= RXCONFIG_PROMISC; - bw32(bp, B44_RXCONFIG, val); - } else { - unsigned char zero[6] = {0, 0, 0, 0, 0, 0}; - int i = 1; - - __b44_set_mac_addr(bp); - - if ((dev->flags & IFF_ALLMULTI) || - (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE)) - val |= RXCONFIG_ALLMULTI; - else - i = __b44_load_mcast(bp, dev); - - for (; i < 64; i++) - __b44_cam_write(bp, zero, i); - - bw32(bp, B44_RXCONFIG, val); - val = br32(bp, B44_CAM_CTRL); - bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE); - } -} - -static void b44_set_rx_mode(struct net_device *dev) -{ - struct b44 *bp = netdev_priv(dev); - - spin_lock_irq(&bp->lock); - __b44_set_rx_mode(dev); - spin_unlock_irq(&bp->lock); -} - -static u32 b44_get_msglevel(struct net_device *dev) -{ - struct b44 *bp = netdev_priv(dev); - return bp->msg_enable; -} - -static void b44_set_msglevel(struct net_device *dev, u32 value) -{ - struct b44 *bp = netdev_priv(dev); - bp->msg_enable = value; -} - -static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) -{ - struct b44 *bp = netdev_priv(dev); - struct ssb_bus *bus = bp->sdev->bus; - - strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); - switch (bus->bustype) { - case SSB_BUSTYPE_PCI: - strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info)); - break; - case SSB_BUSTYPE_SSB: - strlcpy(info->bus_info, "SSB", sizeof(info->bus_info)); - break; - case SSB_BUSTYPE_PCMCIA: - case SSB_BUSTYPE_SDIO: - WARN_ON(1); /* A device with this bus does not exist. */ - break; - } -} - -static int b44_nway_reset(struct net_device *dev) -{ - struct b44 *bp = netdev_priv(dev); - u32 bmcr; - int r; - - spin_lock_irq(&bp->lock); - b44_readphy(bp, MII_BMCR, &bmcr); - b44_readphy(bp, MII_BMCR, &bmcr); - r = -EINVAL; - if (bmcr & BMCR_ANENABLE) { - b44_writephy(bp, MII_BMCR, - bmcr | BMCR_ANRESTART); - r = 0; - } - spin_unlock_irq(&bp->lock); - - return r; -} - -static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct b44 *bp = netdev_priv(dev); - - cmd->supported = (SUPPORTED_Autoneg); - cmd->supported |= (SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_MII); - - cmd->advertising = 0; - if (bp->flags & B44_FLAG_ADV_10HALF) - cmd->advertising |= ADVERTISED_10baseT_Half; - if (bp->flags & B44_FLAG_ADV_10FULL) - cmd->advertising |= ADVERTISED_10baseT_Full; - if (bp->flags & B44_FLAG_ADV_100HALF) - cmd->advertising |= ADVERTISED_100baseT_Half; - if (bp->flags & B44_FLAG_ADV_100FULL) - cmd->advertising |= ADVERTISED_100baseT_Full; - cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; - ethtool_cmd_speed_set(cmd, ((bp->flags & B44_FLAG_100_BASE_T) ? - SPEED_100 : SPEED_10)); - cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ? - DUPLEX_FULL : DUPLEX_HALF; - cmd->port = 0; - cmd->phy_address = bp->phy_addr; - cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ? - XCVR_INTERNAL : XCVR_EXTERNAL; - cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ? - AUTONEG_DISABLE : AUTONEG_ENABLE; - if (cmd->autoneg == AUTONEG_ENABLE) - cmd->advertising |= ADVERTISED_Autoneg; - if (!netif_running(dev)){ - ethtool_cmd_speed_set(cmd, 0); - cmd->duplex = 0xff; - } - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 0; - return 0; -} - -static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct b44 *bp = netdev_priv(dev); - u32 speed = ethtool_cmd_speed(cmd); - - /* We do not support gigabit. */ - if (cmd->autoneg == AUTONEG_ENABLE) { - if (cmd->advertising & - (ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full)) - return -EINVAL; - } else if ((speed != SPEED_100 && - speed != SPEED_10) || - (cmd->duplex != DUPLEX_HALF && - cmd->duplex != DUPLEX_FULL)) { - return -EINVAL; - } - - spin_lock_irq(&bp->lock); - - if (cmd->autoneg == AUTONEG_ENABLE) { - bp->flags &= ~(B44_FLAG_FORCE_LINK | - B44_FLAG_100_BASE_T | - B44_FLAG_FULL_DUPLEX | - B44_FLAG_ADV_10HALF | - B44_FLAG_ADV_10FULL | - B44_FLAG_ADV_100HALF | - B44_FLAG_ADV_100FULL); - if (cmd->advertising == 0) { - bp->flags |= (B44_FLAG_ADV_10HALF | - B44_FLAG_ADV_10FULL | - B44_FLAG_ADV_100HALF | - B44_FLAG_ADV_100FULL); - } else { - if (cmd->advertising & ADVERTISED_10baseT_Half) - bp->flags |= B44_FLAG_ADV_10HALF; - if (cmd->advertising & ADVERTISED_10baseT_Full) - bp->flags |= B44_FLAG_ADV_10FULL; - if (cmd->advertising & ADVERTISED_100baseT_Half) - bp->flags |= B44_FLAG_ADV_100HALF; - if (cmd->advertising & ADVERTISED_100baseT_Full) - bp->flags |= B44_FLAG_ADV_100FULL; - } - } else { - bp->flags |= B44_FLAG_FORCE_LINK; - bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX); - if (speed == SPEED_100) - bp->flags |= B44_FLAG_100_BASE_T; - if (cmd->duplex == DUPLEX_FULL) - bp->flags |= B44_FLAG_FULL_DUPLEX; - } - - if (netif_running(dev)) - b44_setup_phy(bp); - - spin_unlock_irq(&bp->lock); - - return 0; -} - -static void b44_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ering) -{ - struct b44 *bp = netdev_priv(dev); - - ering->rx_max_pending = B44_RX_RING_SIZE - 1; - ering->rx_pending = bp->rx_pending; - - /* XXX ethtool lacks a tx_max_pending, oops... */ -} - -static int b44_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ering) -{ - struct b44 *bp = netdev_priv(dev); - - if ((ering->rx_pending > B44_RX_RING_SIZE - 1) || - (ering->rx_mini_pending != 0) || - (ering->rx_jumbo_pending != 0) || - (ering->tx_pending > B44_TX_RING_SIZE - 1)) - return -EINVAL; - - spin_lock_irq(&bp->lock); - - bp->rx_pending = ering->rx_pending; - bp->tx_pending = ering->tx_pending; - - b44_halt(bp); - b44_init_rings(bp); - b44_init_hw(bp, B44_FULL_RESET); - netif_wake_queue(bp->dev); - spin_unlock_irq(&bp->lock); - - b44_enable_ints(bp); - - return 0; -} - -static void b44_get_pauseparam(struct net_device *dev, - struct ethtool_pauseparam *epause) -{ - struct b44 *bp = netdev_priv(dev); - - epause->autoneg = - (bp->flags & B44_FLAG_PAUSE_AUTO) != 0; - epause->rx_pause = - (bp->flags & B44_FLAG_RX_PAUSE) != 0; - epause->tx_pause = - (bp->flags & B44_FLAG_TX_PAUSE) != 0; -} - -static int b44_set_pauseparam(struct net_device *dev, - struct ethtool_pauseparam *epause) -{ - struct b44 *bp = netdev_priv(dev); - - spin_lock_irq(&bp->lock); - if (epause->autoneg) - bp->flags |= B44_FLAG_PAUSE_AUTO; - else - bp->flags &= ~B44_FLAG_PAUSE_AUTO; - if (epause->rx_pause) - bp->flags |= B44_FLAG_RX_PAUSE; - else - bp->flags &= ~B44_FLAG_RX_PAUSE; - if (epause->tx_pause) - bp->flags |= B44_FLAG_TX_PAUSE; - else - bp->flags &= ~B44_FLAG_TX_PAUSE; - if (bp->flags & B44_FLAG_PAUSE_AUTO) { - b44_halt(bp); - b44_init_rings(bp); - b44_init_hw(bp, B44_FULL_RESET); - } else { - __b44_set_flow_ctrl(bp, bp->flags); - } - spin_unlock_irq(&bp->lock); - - b44_enable_ints(bp); - - return 0; -} - -static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data) -{ - switch(stringset) { - case ETH_SS_STATS: - memcpy(data, *b44_gstrings, sizeof(b44_gstrings)); - break; - } -} - -static int b44_get_sset_count(struct net_device *dev, int sset) -{ - switch (sset) { - case ETH_SS_STATS: - return ARRAY_SIZE(b44_gstrings); - default: - return -EOPNOTSUPP; - } -} - -static void b44_get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 *data) -{ - struct b44 *bp = netdev_priv(dev); - u32 *val = &bp->hw_stats.tx_good_octets; - u32 i; - - spin_lock_irq(&bp->lock); - - b44_stats_update(bp); - - for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++) - *data++ = *val++; - - spin_unlock_irq(&bp->lock); -} - -static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - struct b44 *bp = netdev_priv(dev); - - wol->supported = WAKE_MAGIC; - if (bp->flags & B44_FLAG_WOL_ENABLE) - wol->wolopts = WAKE_MAGIC; - else - wol->wolopts = 0; - memset(&wol->sopass, 0, sizeof(wol->sopass)); -} - -static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - struct b44 *bp = netdev_priv(dev); - - spin_lock_irq(&bp->lock); - if (wol->wolopts & WAKE_MAGIC) - bp->flags |= B44_FLAG_WOL_ENABLE; - else - bp->flags &= ~B44_FLAG_WOL_ENABLE; - spin_unlock_irq(&bp->lock); - - return 0; -} - -static const struct ethtool_ops b44_ethtool_ops = { - .get_drvinfo = b44_get_drvinfo, - .get_settings = b44_get_settings, - .set_settings = b44_set_settings, - .nway_reset = b44_nway_reset, - .get_link = ethtool_op_get_link, - .get_wol = b44_get_wol, - .set_wol = b44_set_wol, - .get_ringparam = b44_get_ringparam, - .set_ringparam = b44_set_ringparam, - .get_pauseparam = b44_get_pauseparam, - .set_pauseparam = b44_set_pauseparam, - .get_msglevel = b44_get_msglevel, - .set_msglevel = b44_set_msglevel, - .get_strings = b44_get_strings, - .get_sset_count = b44_get_sset_count, - .get_ethtool_stats = b44_get_ethtool_stats, -}; - -static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - struct mii_ioctl_data *data = if_mii(ifr); - struct b44 *bp = netdev_priv(dev); - int err = -EINVAL; - - if (!netif_running(dev)) - goto out; - - spin_lock_irq(&bp->lock); - err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL); - spin_unlock_irq(&bp->lock); -out: - return err; -} - -static int __devinit b44_get_invariants(struct b44 *bp) -{ - struct ssb_device *sdev = bp->sdev; - int err = 0; - u8 *addr; - - bp->dma_offset = ssb_dma_translation(sdev); - - if (sdev->bus->bustype == SSB_BUSTYPE_SSB && - instance > 1) { - addr = sdev->bus->sprom.et1mac; - bp->phy_addr = sdev->bus->sprom.et1phyaddr; - } else { - addr = sdev->bus->sprom.et0mac; - bp->phy_addr = sdev->bus->sprom.et0phyaddr; - } - /* Some ROMs have buggy PHY addresses with the high - * bits set (sign extension?). Truncate them to a - * valid PHY address. */ - bp->phy_addr &= 0x1F; - - memcpy(bp->dev->dev_addr, addr, 6); - - if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){ - pr_err("Invalid MAC address found in EEPROM\n"); - return -EINVAL; - } - - memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len); - - bp->imask = IMASK_DEF; - - /* XXX - really required? - bp->flags |= B44_FLAG_BUGGY_TXPTR; - */ - - if (bp->sdev->id.revision >= 7) - bp->flags |= B44_FLAG_B0_ANDLATER; - - return err; -} - -static const struct net_device_ops b44_netdev_ops = { - .ndo_open = b44_open, - .ndo_stop = b44_close, - .ndo_start_xmit = b44_start_xmit, - .ndo_get_stats = b44_get_stats, - .ndo_set_multicast_list = b44_set_rx_mode, - .ndo_set_mac_address = b44_set_mac_addr, - .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = b44_ioctl, - .ndo_tx_timeout = b44_tx_timeout, - .ndo_change_mtu = b44_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = b44_poll_controller, -#endif -}; - -static int __devinit b44_init_one(struct ssb_device *sdev, - const struct ssb_device_id *ent) -{ - struct net_device *dev; - struct b44 *bp; - int err; - - instance++; - - pr_info_once("%s version %s\n", DRV_DESCRIPTION, DRV_MODULE_VERSION); - - dev = alloc_etherdev(sizeof(*bp)); - if (!dev) { - dev_err(sdev->dev, "Etherdev alloc failed, aborting\n"); - err = -ENOMEM; - goto out; - } - - SET_NETDEV_DEV(dev, sdev->dev); - - /* No interesting netdevice features in this card... */ - dev->features |= 0; - - bp = netdev_priv(dev); - bp->sdev = sdev; - bp->dev = dev; - bp->force_copybreak = 0; - - bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE); - - spin_lock_init(&bp->lock); - - bp->rx_pending = B44_DEF_RX_RING_PENDING; - bp->tx_pending = B44_DEF_TX_RING_PENDING; - - dev->netdev_ops = &b44_netdev_ops; - netif_napi_add(dev, &bp->napi, b44_poll, 64); - dev->watchdog_timeo = B44_TX_TIMEOUT; - dev->irq = sdev->irq; - SET_ETHTOOL_OPS(dev, &b44_ethtool_ops); - - err = ssb_bus_powerup(sdev->bus, 0); - if (err) { - dev_err(sdev->dev, - "Failed to powerup the bus\n"); - goto err_out_free_dev; - } - - if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) || - dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) { - dev_err(sdev->dev, - "Required 30BIT DMA mask unsupported by the system\n"); - goto err_out_powerdown; - } - - err = b44_get_invariants(bp); - if (err) { - dev_err(sdev->dev, - "Problem fetching invariants of chip, aborting\n"); - goto err_out_powerdown; - } - - bp->mii_if.dev = dev; - bp->mii_if.mdio_read = b44_mii_read; - bp->mii_if.mdio_write = b44_mii_write; - bp->mii_if.phy_id = bp->phy_addr; - bp->mii_if.phy_id_mask = 0x1f; - bp->mii_if.reg_num_mask = 0x1f; - - /* By default, advertise all speed/duplex settings. */ - bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL | - B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL); - - /* By default, auto-negotiate PAUSE. */ - bp->flags |= B44_FLAG_PAUSE_AUTO; - - err = register_netdev(dev); - if (err) { - dev_err(sdev->dev, "Cannot register net device, aborting\n"); - goto err_out_powerdown; - } - - netif_carrier_off(dev); - - ssb_set_drvdata(sdev, dev); - - /* Chip reset provides power to the b44 MAC & PCI cores, which - * is necessary for MAC register access. - */ - b44_chip_reset(bp, B44_CHIP_RESET_FULL); - - /* do a phy reset to test if there is an active phy */ - if (b44_phy_reset(bp) < 0) - bp->phy_addr = B44_PHY_ADDR_NO_PHY; - - netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr); - - return 0; - -err_out_powerdown: - ssb_bus_may_powerdown(sdev->bus); - -err_out_free_dev: - free_netdev(dev); - -out: - return err; -} - -static void __devexit b44_remove_one(struct ssb_device *sdev) -{ - struct net_device *dev = ssb_get_drvdata(sdev); - - unregister_netdev(dev); - ssb_device_disable(sdev, 0); - ssb_bus_may_powerdown(sdev->bus); - free_netdev(dev); - ssb_pcihost_set_power_state(sdev, PCI_D3hot); - ssb_set_drvdata(sdev, NULL); -} - -static int b44_suspend(struct ssb_device *sdev, pm_message_t state) -{ - struct net_device *dev = ssb_get_drvdata(sdev); - struct b44 *bp = netdev_priv(dev); - - if (!netif_running(dev)) - return 0; - - del_timer_sync(&bp->timer); - - spin_lock_irq(&bp->lock); - - b44_halt(bp); - netif_carrier_off(bp->dev); - netif_device_detach(bp->dev); - b44_free_rings(bp); - - spin_unlock_irq(&bp->lock); - - free_irq(dev->irq, dev); - if (bp->flags & B44_FLAG_WOL_ENABLE) { - b44_init_hw(bp, B44_PARTIAL_RESET); - b44_setup_wol(bp); - } - - ssb_pcihost_set_power_state(sdev, PCI_D3hot); - return 0; -} - -static int b44_resume(struct ssb_device *sdev) -{ - struct net_device *dev = ssb_get_drvdata(sdev); - struct b44 *bp = netdev_priv(dev); - int rc = 0; - - rc = ssb_bus_powerup(sdev->bus, 0); - if (rc) { - dev_err(sdev->dev, - "Failed to powerup the bus\n"); - return rc; - } - - if (!netif_running(dev)) - return 0; - - spin_lock_irq(&bp->lock); - b44_init_rings(bp); - b44_init_hw(bp, B44_FULL_RESET); - spin_unlock_irq(&bp->lock); - - /* - * As a shared interrupt, the handler can be called immediately. To be - * able to check the interrupt status the hardware must already be - * powered back on (b44_init_hw). - */ - rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev); - if (rc) { - netdev_err(dev, "request_irq failed\n"); - spin_lock_irq(&bp->lock); - b44_halt(bp); - b44_free_rings(bp); - spin_unlock_irq(&bp->lock); - return rc; - } - - netif_device_attach(bp->dev); - - b44_enable_ints(bp); - netif_wake_queue(dev); - - mod_timer(&bp->timer, jiffies + 1); - - return 0; -} - -static struct ssb_driver b44_ssb_driver = { - .name = DRV_MODULE_NAME, - .id_table = b44_ssb_tbl, - .probe = b44_init_one, - .remove = __devexit_p(b44_remove_one), - .suspend = b44_suspend, - .resume = b44_resume, -}; - -static inline int __init b44_pci_init(void) -{ - int err = 0; -#ifdef CONFIG_B44_PCI - err = ssb_pcihost_register(&b44_pci_driver); -#endif - return err; -} - -static inline void __exit b44_pci_exit(void) -{ -#ifdef CONFIG_B44_PCI - ssb_pcihost_unregister(&b44_pci_driver); -#endif -} - -static int __init b44_init(void) -{ - unsigned int dma_desc_align_size = dma_get_cache_alignment(); - int err; - - /* Setup paramaters for syncing RX/TX DMA descriptors */ - dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc)); - - err = b44_pci_init(); - if (err) - return err; - err = ssb_driver_register(&b44_ssb_driver); - if (err) - b44_pci_exit(); - return err; -} - -static void __exit b44_cleanup(void) -{ - ssb_driver_unregister(&b44_ssb_driver); - b44_pci_exit(); -} - -module_init(b44_init); -module_exit(b44_cleanup); - diff --git a/drivers/net/b44.h b/drivers/net/b44.h deleted file mode 100644 index e1905a4..0000000 --- a/drivers/net/b44.h +++ /dev/null @@ -1,401 +0,0 @@ -#ifndef _B44_H -#define _B44_H - -/* Register layout. (These correspond to struct _bcmenettregs in bcm4400.) */ -#define B44_DEVCTRL 0x0000UL /* Device Control */ -#define DEVCTRL_MPM 0x00000040 /* Magic Packet PME Enable (B0 only) */ -#define DEVCTRL_PFE 0x00000080 /* Pattern Filtering Enable */ -#define DEVCTRL_IPP 0x00000400 /* Internal EPHY Present */ -#define DEVCTRL_EPR 0x00008000 /* EPHY Reset */ -#define DEVCTRL_PME 0x00001000 /* PHY Mode Enable */ -#define DEVCTRL_PMCE 0x00002000 /* PHY Mode Clocks Enable */ -#define DEVCTRL_PADDR 0x0007c000 /* PHY Address */ -#define DEVCTRL_PADDR_SHIFT 18 -#define B44_BIST_STAT 0x000CUL /* Built-In Self-Test Status */ -#define B44_WKUP_LEN 0x0010UL /* Wakeup Length */ -#define WKUP_LEN_P0_MASK 0x0000007f /* Pattern 0 */ -#define WKUP_LEN_D0 0x00000080 -#define WKUP_LEN_P1_MASK 0x00007f00 /* Pattern 1 */ -#define WKUP_LEN_P1_SHIFT 8 -#define WKUP_LEN_D1 0x00008000 -#define WKUP_LEN_P2_MASK 0x007f0000 /* Pattern 2 */ -#define WKUP_LEN_P2_SHIFT 16 -#define WKUP_LEN_D2 0x00000000 -#define WKUP_LEN_P3_MASK 0x7f000000 /* Pattern 3 */ -#define WKUP_LEN_P3_SHIFT 24 -#define WKUP_LEN_D3 0x80000000 -#define WKUP_LEN_DISABLE 0x80808080 -#define WKUP_LEN_ENABLE_TWO 0x80800000 -#define WKUP_LEN_ENABLE_THREE 0x80000000 -#define B44_ISTAT 0x0020UL /* Interrupt Status */ -#define ISTAT_LS 0x00000020 /* Link Change (B0 only) */ -#define ISTAT_PME 0x00000040 /* Power Management Event */ -#define ISTAT_TO 0x00000080 /* General Purpose Timeout */ -#define ISTAT_DSCE 0x00000400 /* Descriptor Error */ -#define ISTAT_DATAE 0x00000800 /* Data Error */ -#define ISTAT_DPE 0x00001000 /* Descr. Protocol Error */ -#define ISTAT_RDU 0x00002000 /* Receive Descr. Underflow */ -#define ISTAT_RFO 0x00004000 /* Receive FIFO Overflow */ -#define ISTAT_TFU 0x00008000 /* Transmit FIFO Underflow */ -#define ISTAT_RX 0x00010000 /* RX Interrupt */ -#define ISTAT_TX 0x01000000 /* TX Interrupt */ -#define ISTAT_EMAC 0x04000000 /* EMAC Interrupt */ -#define ISTAT_MII_WRITE 0x08000000 /* MII Write Interrupt */ -#define ISTAT_MII_READ 0x10000000 /* MII Read Interrupt */ -#define ISTAT_ERRORS (ISTAT_DSCE|ISTAT_DATAE|ISTAT_DPE|ISTAT_RDU|ISTAT_RFO|ISTAT_TFU) -#define B44_IMASK 0x0024UL /* Interrupt Mask */ -#define IMASK_DEF (ISTAT_ERRORS | ISTAT_TO | ISTAT_RX | ISTAT_TX) -#define B44_GPTIMER 0x0028UL /* General Purpose Timer */ -#define B44_ADDR_LO 0x0088UL /* ENET Address Lo (B0 only) */ -#define B44_ADDR_HI 0x008CUL /* ENET Address Hi (B0 only) */ -#define B44_FILT_ADDR 0x0090UL /* ENET Filter Address */ -#define B44_FILT_DATA 0x0094UL /* ENET Filter Data */ -#define B44_TXBURST 0x00A0UL /* TX Max Burst Length */ -#define B44_RXBURST 0x00A4UL /* RX Max Burst Length */ -#define B44_MAC_CTRL 0x00A8UL /* MAC Control */ -#define MAC_CTRL_CRC32_ENAB 0x00000001 /* CRC32 Generation Enable */ -#define MAC_CTRL_PHY_PDOWN 0x00000004 /* Onchip EPHY Powerdown */ -#define MAC_CTRL_PHY_EDET 0x00000008 /* Onchip EPHY Energy Detected */ -#define MAC_CTRL_PHY_LEDCTRL 0x000000e0 /* Onchip EPHY LED Control */ -#define MAC_CTRL_PHY_LEDCTRL_SHIFT 5 -#define B44_MAC_FLOW 0x00ACUL /* MAC Flow Control */ -#define MAC_FLOW_RX_HI_WATER 0x000000ff /* Receive FIFO HI Water Mark */ -#define MAC_FLOW_PAUSE_ENAB 0x00008000 /* Enable Pause Frame Generation */ -#define B44_RCV_LAZY 0x0100UL /* Lazy Interrupt Control */ -#define RCV_LAZY_TO_MASK 0x00ffffff /* Timeout */ -#define RCV_LAZY_FC_MASK 0xff000000 /* Frame Count */ -#define RCV_LAZY_FC_SHIFT 24 -#define B44_DMATX_CTRL 0x0200UL /* DMA TX Control */ -#define DMATX_CTRL_ENABLE 0x00000001 /* Enable */ -#define DMATX_CTRL_SUSPEND 0x00000002 /* Suepend Request */ -#define DMATX_CTRL_LPBACK 0x00000004 /* Loopback Enable */ -#define DMATX_CTRL_FAIRPRIOR 0x00000008 /* Fair Priority */ -#define DMATX_CTRL_FLUSH 0x00000010 /* Flush Request */ -#define B44_DMATX_ADDR 0x0204UL /* DMA TX Descriptor Ring Address */ -#define B44_DMATX_PTR 0x0208UL /* DMA TX Last Posted Descriptor */ -#define B44_DMATX_STAT 0x020CUL /* DMA TX Current Active Desc. + Status */ -#define DMATX_STAT_CDMASK 0x00000fff /* Current Descriptor Mask */ -#define DMATX_STAT_SMASK 0x0000f000 /* State Mask */ -#define DMATX_STAT_SDISABLED 0x00000000 /* State Disabled */ -#define DMATX_STAT_SACTIVE 0x00001000 /* State Active */ -#define DMATX_STAT_SIDLE 0x00002000 /* State Idle Wait */ -#define DMATX_STAT_SSTOPPED 0x00003000 /* State Stopped */ -#define DMATX_STAT_SSUSP 0x00004000 /* State Suspend Pending */ -#define DMATX_STAT_EMASK 0x000f0000 /* Error Mask */ -#define DMATX_STAT_ENONE 0x00000000 /* Error None */ -#define DMATX_STAT_EDPE 0x00010000 /* Error Desc. Protocol Error */ -#define DMATX_STAT_EDFU 0x00020000 /* Error Data FIFO Underrun */ -#define DMATX_STAT_EBEBR 0x00030000 /* Error Bus Error on Buffer Read */ -#define DMATX_STAT_EBEDA 0x00040000 /* Error Bus Error on Desc. Access */ -#define DMATX_STAT_FLUSHED 0x00100000 /* Flushed */ -#define B44_DMARX_CTRL 0x0210UL /* DMA RX Control */ -#define DMARX_CTRL_ENABLE 0x00000001 /* Enable */ -#define DMARX_CTRL_ROMASK 0x000000fe /* Receive Offset Mask */ -#define DMARX_CTRL_ROSHIFT 1 /* Receive Offset Shift */ -#define B44_DMARX_ADDR 0x0214UL /* DMA RX Descriptor Ring Address */ -#define B44_DMARX_PTR 0x0218UL /* DMA RX Last Posted Descriptor */ -#define B44_DMARX_STAT 0x021CUL /* DMA RX Current Active Desc. + Status */ -#define DMARX_STAT_CDMASK 0x00000fff /* Current Descriptor Mask */ -#define DMARX_STAT_SMASK 0x0000f000 /* State Mask */ -#define DMARX_STAT_SDISABLED 0x00000000 /* State Disabled */ -#define DMARX_STAT_SACTIVE 0x00001000 /* State Active */ -#define DMARX_STAT_SIDLE 0x00002000 /* State Idle Wait */ -#define DMARX_STAT_SSTOPPED 0x00003000 /* State Stopped */ -#define DMARX_STAT_EMASK 0x000f0000 /* Error Mask */ -#define DMARX_STAT_ENONE 0x00000000 /* Error None */ -#define DMARX_STAT_EDPE 0x00010000 /* Error Desc. Protocol Error */ -#define DMARX_STAT_EDFO 0x00020000 /* Error Data FIFO Overflow */ -#define DMARX_STAT_EBEBW 0x00030000 /* Error Bus Error on Buffer Write */ -#define DMARX_STAT_EBEDA 0x00040000 /* Error Bus Error on Desc. Access */ -#define B44_DMAFIFO_AD 0x0220UL /* DMA FIFO Diag Address */ -#define DMAFIFO_AD_OMASK 0x0000ffff /* Offset Mask */ -#define DMAFIFO_AD_SMASK 0x000f0000 /* Select Mask */ -#define DMAFIFO_AD_SXDD 0x00000000 /* Select Transmit DMA Data */ -#define DMAFIFO_AD_SXDP 0x00010000 /* Select Transmit DMA Pointers */ -#define DMAFIFO_AD_SRDD 0x00040000 /* Select Receive DMA Data */ -#define DMAFIFO_AD_SRDP 0x00050000 /* Select Receive DMA Pointers */ -#define DMAFIFO_AD_SXFD 0x00080000 /* Select Transmit FIFO Data */ -#define DMAFIFO_AD_SXFP 0x00090000 /* Select Transmit FIFO Pointers */ -#define DMAFIFO_AD_SRFD 0x000c0000 /* Select Receive FIFO Data */ -#define DMAFIFO_AD_SRFP 0x000c0000 /* Select Receive FIFO Pointers */ -#define B44_DMAFIFO_LO 0x0224UL /* DMA FIFO Diag Low Data */ -#define B44_DMAFIFO_HI 0x0228UL /* DMA FIFO Diag High Data */ -#define B44_RXCONFIG 0x0400UL /* EMAC RX Config */ -#define RXCONFIG_DBCAST 0x00000001 /* Disable Broadcast */ -#define RXCONFIG_ALLMULTI 0x00000002 /* Accept All Multicast */ -#define RXCONFIG_NORX_WHILE_TX 0x00000004 /* Receive Disable While Transmitting */ -#define RXCONFIG_PROMISC 0x00000008 /* Promiscuous Enable */ -#define RXCONFIG_LPBACK 0x00000010 /* Loopback Enable */ -#define RXCONFIG_FLOW 0x00000020 /* Flow Control Enable */ -#define RXCONFIG_FLOW_ACCEPT 0x00000040 /* Accept Unicast Flow Control Frame */ -#define RXCONFIG_RFILT 0x00000080 /* Reject Filter */ -#define RXCONFIG_CAM_ABSENT 0x00000100 /* CAM Absent */ -#define B44_RXMAXLEN 0x0404UL /* EMAC RX Max Packet Length */ -#define B44_TXMAXLEN 0x0408UL /* EMAC TX Max Packet Length */ -#define B44_MDIO_CTRL 0x0410UL /* EMAC MDIO Control */ -#define MDIO_CTRL_MAXF_MASK 0x0000007f /* MDC Frequency */ -#define MDIO_CTRL_PREAMBLE 0x00000080 /* MII Preamble Enable */ -#define B44_MDIO_DATA 0x0414UL /* EMAC MDIO Data */ -#define MDIO_DATA_DATA 0x0000ffff /* R/W Data */ -#define MDIO_DATA_TA_MASK 0x00030000 /* Turnaround Value */ -#define MDIO_DATA_TA_SHIFT 16 -#define MDIO_TA_VALID 2 -#define MDIO_DATA_RA_MASK 0x007c0000 /* Register Address */ -#define MDIO_DATA_RA_SHIFT 18 -#define MDIO_DATA_PMD_MASK 0x0f800000 /* Physical Media Device */ -#define MDIO_DATA_PMD_SHIFT 23 -#define MDIO_DATA_OP_MASK 0x30000000 /* Opcode */ -#define MDIO_DATA_OP_SHIFT 28 -#define MDIO_OP_WRITE 1 -#define MDIO_OP_READ 2 -#define MDIO_DATA_SB_MASK 0xc0000000 /* Start Bits */ -#define MDIO_DATA_SB_SHIFT 30 -#define MDIO_DATA_SB_START 0x40000000 /* Start Of Frame */ -#define B44_EMAC_IMASK 0x0418UL /* EMAC Interrupt Mask */ -#define B44_EMAC_ISTAT 0x041CUL /* EMAC Interrupt Status */ -#define EMAC_INT_MII 0x00000001 /* MII MDIO Interrupt */ -#define EMAC_INT_MIB 0x00000002 /* MIB Interrupt */ -#define EMAC_INT_FLOW 0x00000003 /* Flow Control Interrupt */ -#define B44_CAM_DATA_LO 0x0420UL /* EMAC CAM Data Low */ -#define B44_CAM_DATA_HI 0x0424UL /* EMAC CAM Data High */ -#define CAM_DATA_HI_VALID 0x00010000 /* Valid Bit */ -#define B44_CAM_CTRL 0x0428UL /* EMAC CAM Control */ -#define CAM_CTRL_ENABLE 0x00000001 /* CAM Enable */ -#define CAM_CTRL_MSEL 0x00000002 /* Mask Select */ -#define CAM_CTRL_READ 0x00000004 /* Read */ -#define CAM_CTRL_WRITE 0x00000008 /* Read */ -#define CAM_CTRL_INDEX_MASK 0x003f0000 /* Index Mask */ -#define CAM_CTRL_INDEX_SHIFT 16 -#define CAM_CTRL_BUSY 0x80000000 /* CAM Busy */ -#define B44_ENET_CTRL 0x042CUL /* EMAC ENET Control */ -#define ENET_CTRL_ENABLE 0x00000001 /* EMAC Enable */ -#define ENET_CTRL_DISABLE 0x00000002 /* EMAC Disable */ -#define ENET_CTRL_SRST 0x00000004 /* EMAC Soft Reset */ -#define ENET_CTRL_EPSEL 0x00000008 /* External PHY Select */ -#define B44_TX_CTRL 0x0430UL /* EMAC TX Control */ -#define TX_CTRL_DUPLEX 0x00000001 /* Full Duplex */ -#define TX_CTRL_FMODE 0x00000002 /* Flow Mode */ -#define TX_CTRL_SBENAB 0x00000004 /* Single Backoff Enable */ -#define TX_CTRL_SMALL_SLOT 0x00000008 /* Small Slottime */ -#define B44_TX_WMARK 0x0434UL /* EMAC TX Watermark */ -#define B44_MIB_CTRL 0x0438UL /* EMAC MIB Control */ -#define MIB_CTRL_CLR_ON_READ 0x00000001 /* Autoclear on Read */ -#define B44_TX_GOOD_O 0x0500UL /* MIB TX Good Octets */ -#define B44_TX_GOOD_P 0x0504UL /* MIB TX Good Packets */ -#define B44_TX_O 0x0508UL /* MIB TX Octets */ -#define B44_TX_P 0x050CUL /* MIB TX Packets */ -#define B44_TX_BCAST 0x0510UL /* MIB TX Broadcast Packets */ -#define B44_TX_MCAST 0x0514UL /* MIB TX Multicast Packets */ -#define B44_TX_64 0x0518UL /* MIB TX <= 64 byte Packets */ -#define B44_TX_65_127 0x051CUL /* MIB TX 65 to 127 byte Packets */ -#define B44_TX_128_255 0x0520UL /* MIB TX 128 to 255 byte Packets */ -#define B44_TX_256_511 0x0524UL /* MIB TX 256 to 511 byte Packets */ -#define B44_TX_512_1023 0x0528UL /* MIB TX 512 to 1023 byte Packets */ -#define B44_TX_1024_MAX 0x052CUL /* MIB TX 1024 to max byte Packets */ -#define B44_TX_JABBER 0x0530UL /* MIB TX Jabber Packets */ -#define B44_TX_OSIZE 0x0534UL /* MIB TX Oversize Packets */ -#define B44_TX_FRAG 0x0538UL /* MIB TX Fragment Packets */ -#define B44_TX_URUNS 0x053CUL /* MIB TX Underruns */ -#define B44_TX_TCOLS 0x0540UL /* MIB TX Total Collisions */ -#define B44_TX_SCOLS 0x0544UL /* MIB TX Single Collisions */ -#define B44_TX_MCOLS 0x0548UL /* MIB TX Multiple Collisions */ -#define B44_TX_ECOLS 0x054CUL /* MIB TX Excessive Collisions */ -#define B44_TX_LCOLS 0x0550UL /* MIB TX Late Collisions */ -#define B44_TX_DEFERED 0x0554UL /* MIB TX Defered Packets */ -#define B44_TX_CLOST 0x0558UL /* MIB TX Carrier Lost */ -#define B44_TX_PAUSE 0x055CUL /* MIB TX Pause Packets */ -#define B44_RX_GOOD_O 0x0580UL /* MIB RX Good Octets */ -#define B44_RX_GOOD_P 0x0584UL /* MIB RX Good Packets */ -#define B44_RX_O 0x0588UL /* MIB RX Octets */ -#define B44_RX_P 0x058CUL /* MIB RX Packets */ -#define B44_RX_BCAST 0x0590UL /* MIB RX Broadcast Packets */ -#define B44_RX_MCAST 0x0594UL /* MIB RX Multicast Packets */ -#define B44_RX_64 0x0598UL /* MIB RX <= 64 byte Packets */ -#define B44_RX_65_127 0x059CUL /* MIB RX 65 to 127 byte Packets */ -#define B44_RX_128_255 0x05A0UL /* MIB RX 128 to 255 byte Packets */ -#define B44_RX_256_511 0x05A4UL /* MIB RX 256 to 511 byte Packets */ -#define B44_RX_512_1023 0x05A8UL /* MIB RX 512 to 1023 byte Packets */ -#define B44_RX_1024_MAX 0x05ACUL /* MIB RX 1024 to max byte Packets */ -#define B44_RX_JABBER 0x05B0UL /* MIB RX Jabber Packets */ -#define B44_RX_OSIZE 0x05B4UL /* MIB RX Oversize Packets */ -#define B44_RX_FRAG 0x05B8UL /* MIB RX Fragment Packets */ -#define B44_RX_MISS 0x05BCUL /* MIB RX Missed Packets */ -#define B44_RX_CRCA 0x05C0UL /* MIB RX CRC Align Errors */ -#define B44_RX_USIZE 0x05C4UL /* MIB RX Undersize Packets */ -#define B44_RX_CRC 0x05C8UL /* MIB RX CRC Errors */ -#define B44_RX_ALIGN 0x05CCUL /* MIB RX Align Errors */ -#define B44_RX_SYM 0x05D0UL /* MIB RX Symbol Errors */ -#define B44_RX_PAUSE 0x05D4UL /* MIB RX Pause Packets */ -#define B44_RX_NPAUSE 0x05D8UL /* MIB RX Non-Pause Packets */ - -/* 4400 PHY registers */ -#define B44_MII_AUXCTRL 24 /* Auxiliary Control */ -#define MII_AUXCTRL_DUPLEX 0x0001 /* Full Duplex */ -#define MII_AUXCTRL_SPEED 0x0002 /* 1=100Mbps, 0=10Mbps */ -#define MII_AUXCTRL_FORCED 0x0004 /* Forced 10/100 */ -#define B44_MII_ALEDCTRL 26 /* Activity LED */ -#define MII_ALEDCTRL_ALLMSK 0x7fff -#define B44_MII_TLEDCTRL 27 /* Traffic Meter LED */ -#define MII_TLEDCTRL_ENABLE 0x0040 - -struct dma_desc { - __le32 ctrl; - __le32 addr; -}; - -/* There are only 12 bits in the DMA engine for descriptor offsetting - * so the table must be aligned on a boundary of this. - */ -#define DMA_TABLE_BYTES 4096 - -#define DESC_CTRL_LEN 0x00001fff -#define DESC_CTRL_CMASK 0x0ff00000 /* Core specific bits */ -#define DESC_CTRL_EOT 0x10000000 /* End of Table */ -#define DESC_CTRL_IOC 0x20000000 /* Interrupt On Completion */ -#define DESC_CTRL_EOF 0x40000000 /* End of Frame */ -#define DESC_CTRL_SOF 0x80000000 /* Start of Frame */ - -#define RX_COPY_THRESHOLD 256 - -struct rx_header { - __le16 len; - __le16 flags; - __le16 pad[12]; -}; -#define RX_HEADER_LEN 28 - -#define RX_FLAG_OFIFO 0x00000001 /* FIFO Overflow */ -#define RX_FLAG_CRCERR 0x00000002 /* CRC Error */ -#define RX_FLAG_SERR 0x00000004 /* Receive Symbol Error */ -#define RX_FLAG_ODD 0x00000008 /* Frame has odd number of nibbles */ -#define RX_FLAG_LARGE 0x00000010 /* Frame is > RX MAX Length */ -#define RX_FLAG_MCAST 0x00000020 /* Dest is Multicast Address */ -#define RX_FLAG_BCAST 0x00000040 /* Dest is Broadcast Address */ -#define RX_FLAG_MISS 0x00000080 /* Received due to promisc mode */ -#define RX_FLAG_LAST 0x00000800 /* Last buffer in frame */ -#define RX_FLAG_ERRORS (RX_FLAG_ODD | RX_FLAG_SERR | RX_FLAG_CRCERR | RX_FLAG_OFIFO) - -struct ring_info { - struct sk_buff *skb; - dma_addr_t mapping; -}; - -#define B44_MCAST_TABLE_SIZE 32 -#define B44_PHY_ADDR_NO_PHY 30 -#define B44_MDC_RATIO 5000000 - -#define B44_STAT_REG_DECLARE \ - _B44(tx_good_octets) \ - _B44(tx_good_pkts) \ - _B44(tx_octets) \ - _B44(tx_pkts) \ - _B44(tx_broadcast_pkts) \ - _B44(tx_multicast_pkts) \ - _B44(tx_len_64) \ - _B44(tx_len_65_to_127) \ - _B44(tx_len_128_to_255) \ - _B44(tx_len_256_to_511) \ - _B44(tx_len_512_to_1023) \ - _B44(tx_len_1024_to_max) \ - _B44(tx_jabber_pkts) \ - _B44(tx_oversize_pkts) \ - _B44(tx_fragment_pkts) \ - _B44(tx_underruns) \ - _B44(tx_total_cols) \ - _B44(tx_single_cols) \ - _B44(tx_multiple_cols) \ - _B44(tx_excessive_cols) \ - _B44(tx_late_cols) \ - _B44(tx_defered) \ - _B44(tx_carrier_lost) \ - _B44(tx_pause_pkts) \ - _B44(rx_good_octets) \ - _B44(rx_good_pkts) \ - _B44(rx_octets) \ - _B44(rx_pkts) \ - _B44(rx_broadcast_pkts) \ - _B44(rx_multicast_pkts) \ - _B44(rx_len_64) \ - _B44(rx_len_65_to_127) \ - _B44(rx_len_128_to_255) \ - _B44(rx_len_256_to_511) \ - _B44(rx_len_512_to_1023) \ - _B44(rx_len_1024_to_max) \ - _B44(rx_jabber_pkts) \ - _B44(rx_oversize_pkts) \ - _B44(rx_fragment_pkts) \ - _B44(rx_missed_pkts) \ - _B44(rx_crc_align_errs) \ - _B44(rx_undersize) \ - _B44(rx_crc_errs) \ - _B44(rx_align_errs) \ - _B44(rx_symbol_errs) \ - _B44(rx_pause_pkts) \ - _B44(rx_nonpause_pkts) - -/* SW copy of device statistics, kept up to date by periodic timer - * which probes HW values. Check b44_stats_update if you mess with - * the layout - */ -struct b44_hw_stats { -#define _B44(x) u32 x; -B44_STAT_REG_DECLARE -#undef _B44 -}; - -struct ssb_device; - -struct b44 { - spinlock_t lock; - - u32 imask, istat; - - struct dma_desc *rx_ring, *tx_ring; - - u32 tx_prod, tx_cons; - u32 rx_prod, rx_cons; - - struct ring_info *rx_buffers; - struct ring_info *tx_buffers; - - struct napi_struct napi; - - u32 dma_offset; - u32 flags; -#define B44_FLAG_B0_ANDLATER 0x00000001 -#define B44_FLAG_BUGGY_TXPTR 0x00000002 -#define B44_FLAG_REORDER_BUG 0x00000004 -#define B44_FLAG_PAUSE_AUTO 0x00008000 -#define B44_FLAG_FULL_DUPLEX 0x00010000 -#define B44_FLAG_100_BASE_T 0x00020000 -#define B44_FLAG_TX_PAUSE 0x00040000 -#define B44_FLAG_RX_PAUSE 0x00080000 -#define B44_FLAG_FORCE_LINK 0x00100000 -#define B44_FLAG_ADV_10HALF 0x01000000 -#define B44_FLAG_ADV_10FULL 0x02000000 -#define B44_FLAG_ADV_100HALF 0x04000000 -#define B44_FLAG_ADV_100FULL 0x08000000 -#define B44_FLAG_INTERNAL_PHY 0x10000000 -#define B44_FLAG_RX_RING_HACK 0x20000000 -#define B44_FLAG_TX_RING_HACK 0x40000000 -#define B44_FLAG_WOL_ENABLE 0x80000000 - - u32 msg_enable; - - struct timer_list timer; - - struct b44_hw_stats hw_stats; - - struct ssb_device *sdev; - struct net_device *dev; - - dma_addr_t rx_ring_dma, tx_ring_dma; - - u32 rx_pending; - u32 tx_pending; - u8 phy_addr; - u8 force_copybreak; - struct mii_if_info mii_if; -}; - -#endif /* _B44_H */ diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c deleted file mode 100644 index 1d9b985..0000000 --- a/drivers/net/bcm63xx_enet.c +++ /dev/null @@ -1,1966 +0,0 @@ -/* - * Driver for BCM963xx builtin Ethernet mac - * - * Copyright (C) 2008 Maxime Bizon - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include "bcm63xx_enet.h" - -static char bcm_enet_driver_name[] = "bcm63xx_enet"; -static char bcm_enet_driver_version[] = "1.0"; - -static int copybreak __read_mostly = 128; -module_param(copybreak, int, 0); -MODULE_PARM_DESC(copybreak, "Receive copy threshold"); - -/* io memory shared between all devices */ -static void __iomem *bcm_enet_shared_base; - -/* - * io helpers to access mac registers - */ -static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off) -{ - return bcm_readl(priv->base + off); -} - -static inline void enet_writel(struct bcm_enet_priv *priv, - u32 val, u32 off) -{ - bcm_writel(val, priv->base + off); -} - -/* - * io helpers to access shared registers - */ -static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off) -{ - return bcm_readl(bcm_enet_shared_base + off); -} - -static inline void enet_dma_writel(struct bcm_enet_priv *priv, - u32 val, u32 off) -{ - bcm_writel(val, bcm_enet_shared_base + off); -} - -/* - * write given data into mii register and wait for transfer to end - * with timeout (average measured transfer time is 25us) - */ -static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data) -{ - int limit; - - /* make sure mii interrupt status is cleared */ - enet_writel(priv, ENET_IR_MII, ENET_IR_REG); - - enet_writel(priv, data, ENET_MIIDATA_REG); - wmb(); - - /* busy wait on mii interrupt bit, with timeout */ - limit = 1000; - do { - if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII) - break; - udelay(1); - } while (limit-- > 0); - - return (limit < 0) ? 1 : 0; -} - -/* - * MII internal read callback - */ -static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id, - int regnum) -{ - u32 tmp, val; - - tmp = regnum << ENET_MIIDATA_REG_SHIFT; - tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; - tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; - tmp |= ENET_MIIDATA_OP_READ_MASK; - - if (do_mdio_op(priv, tmp)) - return -1; - - val = enet_readl(priv, ENET_MIIDATA_REG); - val &= 0xffff; - return val; -} - -/* - * MII internal write callback - */ -static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id, - int regnum, u16 value) -{ - u32 tmp; - - tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT; - tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; - tmp |= regnum << ENET_MIIDATA_REG_SHIFT; - tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; - tmp |= ENET_MIIDATA_OP_WRITE_MASK; - - (void)do_mdio_op(priv, tmp); - return 0; -} - -/* - * MII read callback from phylib - */ -static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id, - int regnum) -{ - return bcm_enet_mdio_read(bus->priv, mii_id, regnum); -} - -/* - * MII write callback from phylib - */ -static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id, - int regnum, u16 value) -{ - return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value); -} - -/* - * MII read callback from mii core - */ -static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id, - int regnum) -{ - return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum); -} - -/* - * MII write callback from mii core - */ -static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id, - int regnum, int value) -{ - bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value); -} - -/* - * refill rx queue - */ -static int bcm_enet_refill_rx(struct net_device *dev) -{ - struct bcm_enet_priv *priv; - - priv = netdev_priv(dev); - - while (priv->rx_desc_count < priv->rx_ring_size) { - struct bcm_enet_desc *desc; - struct sk_buff *skb; - dma_addr_t p; - int desc_idx; - u32 len_stat; - - desc_idx = priv->rx_dirty_desc; - desc = &priv->rx_desc_cpu[desc_idx]; - - if (!priv->rx_skb[desc_idx]) { - skb = netdev_alloc_skb(dev, priv->rx_skb_size); - if (!skb) - break; - priv->rx_skb[desc_idx] = skb; - - p = dma_map_single(&priv->pdev->dev, skb->data, - priv->rx_skb_size, - DMA_FROM_DEVICE); - desc->address = p; - } - - len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT; - len_stat |= DMADESC_OWNER_MASK; - if (priv->rx_dirty_desc == priv->rx_ring_size - 1) { - len_stat |= DMADESC_WRAP_MASK; - priv->rx_dirty_desc = 0; - } else { - priv->rx_dirty_desc++; - } - wmb(); - desc->len_stat = len_stat; - - priv->rx_desc_count++; - - /* tell dma engine we allocated one buffer */ - enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan)); - } - - /* If rx ring is still empty, set a timer to try allocating - * again at a later time. */ - if (priv->rx_desc_count == 0 && netif_running(dev)) { - dev_warn(&priv->pdev->dev, "unable to refill rx ring\n"); - priv->rx_timeout.expires = jiffies + HZ; - add_timer(&priv->rx_timeout); - } - - return 0; -} - -/* - * timer callback to defer refill rx queue in case we're OOM - */ -static void bcm_enet_refill_rx_timer(unsigned long data) -{ - struct net_device *dev; - struct bcm_enet_priv *priv; - - dev = (struct net_device *)data; - priv = netdev_priv(dev); - - spin_lock(&priv->rx_lock); - bcm_enet_refill_rx((struct net_device *)data); - spin_unlock(&priv->rx_lock); -} - -/* - * extract packet from rx queue - */ -static int bcm_enet_receive_queue(struct net_device *dev, int budget) -{ - struct bcm_enet_priv *priv; - struct device *kdev; - int processed; - - priv = netdev_priv(dev); - kdev = &priv->pdev->dev; - processed = 0; - - /* don't scan ring further than number of refilled - * descriptor */ - if (budget > priv->rx_desc_count) - budget = priv->rx_desc_count; - - do { - struct bcm_enet_desc *desc; - struct sk_buff *skb; - int desc_idx; - u32 len_stat; - unsigned int len; - - desc_idx = priv->rx_curr_desc; - desc = &priv->rx_desc_cpu[desc_idx]; - - /* make sure we actually read the descriptor status at - * each loop */ - rmb(); - - len_stat = desc->len_stat; - - /* break if dma ownership belongs to hw */ - if (len_stat & DMADESC_OWNER_MASK) - break; - - processed++; - priv->rx_curr_desc++; - if (priv->rx_curr_desc == priv->rx_ring_size) - priv->rx_curr_desc = 0; - priv->rx_desc_count--; - - /* if the packet does not have start of packet _and_ - * end of packet flag set, then just recycle it */ - if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) { - dev->stats.rx_dropped++; - continue; - } - - /* recycle packet if it's marked as bad */ - if (unlikely(len_stat & DMADESC_ERR_MASK)) { - dev->stats.rx_errors++; - - if (len_stat & DMADESC_OVSIZE_MASK) - dev->stats.rx_length_errors++; - if (len_stat & DMADESC_CRC_MASK) - dev->stats.rx_crc_errors++; - if (len_stat & DMADESC_UNDER_MASK) - dev->stats.rx_frame_errors++; - if (len_stat & DMADESC_OV_MASK) - dev->stats.rx_fifo_errors++; - continue; - } - - /* valid packet */ - skb = priv->rx_skb[desc_idx]; - len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT; - /* don't include FCS */ - len -= 4; - - if (len < copybreak) { - struct sk_buff *nskb; - - nskb = netdev_alloc_skb_ip_align(dev, len); - if (!nskb) { - /* forget packet, just rearm desc */ - dev->stats.rx_dropped++; - continue; - } - - dma_sync_single_for_cpu(kdev, desc->address, - len, DMA_FROM_DEVICE); - memcpy(nskb->data, skb->data, len); - dma_sync_single_for_device(kdev, desc->address, - len, DMA_FROM_DEVICE); - skb = nskb; - } else { - dma_unmap_single(&priv->pdev->dev, desc->address, - priv->rx_skb_size, DMA_FROM_DEVICE); - priv->rx_skb[desc_idx] = NULL; - } - - skb_put(skb, len); - skb->protocol = eth_type_trans(skb, dev); - dev->stats.rx_packets++; - dev->stats.rx_bytes += len; - netif_receive_skb(skb); - - } while (--budget > 0); - - if (processed || !priv->rx_desc_count) { - bcm_enet_refill_rx(dev); - - /* kick rx dma */ - enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, - ENETDMA_CHANCFG_REG(priv->rx_chan)); - } - - return processed; -} - - -/* - * try to or force reclaim of transmitted buffers - */ -static int bcm_enet_tx_reclaim(struct net_device *dev, int force) -{ - struct bcm_enet_priv *priv; - int released; - - priv = netdev_priv(dev); - released = 0; - - while (priv->tx_desc_count < priv->tx_ring_size) { - struct bcm_enet_desc *desc; - struct sk_buff *skb; - - /* We run in a bh and fight against start_xmit, which - * is called with bh disabled */ - spin_lock(&priv->tx_lock); - - desc = &priv->tx_desc_cpu[priv->tx_dirty_desc]; - - if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) { - spin_unlock(&priv->tx_lock); - break; - } - - /* ensure other field of the descriptor were not read - * before we checked ownership */ - rmb(); - - skb = priv->tx_skb[priv->tx_dirty_desc]; - priv->tx_skb[priv->tx_dirty_desc] = NULL; - dma_unmap_single(&priv->pdev->dev, desc->address, skb->len, - DMA_TO_DEVICE); - - priv->tx_dirty_desc++; - if (priv->tx_dirty_desc == priv->tx_ring_size) - priv->tx_dirty_desc = 0; - priv->tx_desc_count++; - - spin_unlock(&priv->tx_lock); - - if (desc->len_stat & DMADESC_UNDER_MASK) - dev->stats.tx_errors++; - - dev_kfree_skb(skb); - released++; - } - - if (netif_queue_stopped(dev) && released) - netif_wake_queue(dev); - - return released; -} - -/* - * poll func, called by network core - */ -static int bcm_enet_poll(struct napi_struct *napi, int budget) -{ - struct bcm_enet_priv *priv; - struct net_device *dev; - int tx_work_done, rx_work_done; - - priv = container_of(napi, struct bcm_enet_priv, napi); - dev = priv->net_dev; - - /* ack interrupts */ - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IR_REG(priv->rx_chan)); - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IR_REG(priv->tx_chan)); - - /* reclaim sent skb */ - tx_work_done = bcm_enet_tx_reclaim(dev, 0); - - spin_lock(&priv->rx_lock); - rx_work_done = bcm_enet_receive_queue(dev, budget); - spin_unlock(&priv->rx_lock); - - if (rx_work_done >= budget || tx_work_done > 0) { - /* rx/tx queue is not yet empty/clean */ - return rx_work_done; - } - - /* no more packet in rx/tx queue, remove device from poll - * queue */ - napi_complete(napi); - - /* restore rx/tx interrupt */ - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IRMASK_REG(priv->rx_chan)); - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IRMASK_REG(priv->tx_chan)); - - return rx_work_done; -} - -/* - * mac interrupt handler - */ -static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id) -{ - struct net_device *dev; - struct bcm_enet_priv *priv; - u32 stat; - - dev = dev_id; - priv = netdev_priv(dev); - - stat = enet_readl(priv, ENET_IR_REG); - if (!(stat & ENET_IR_MIB)) - return IRQ_NONE; - - /* clear & mask interrupt */ - enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); - enet_writel(priv, 0, ENET_IRMASK_REG); - - /* read mib registers in workqueue */ - schedule_work(&priv->mib_update_task); - - return IRQ_HANDLED; -} - -/* - * rx/tx dma interrupt handler - */ -static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id) -{ - struct net_device *dev; - struct bcm_enet_priv *priv; - - dev = dev_id; - priv = netdev_priv(dev); - - /* mask rx/tx interrupts */ - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); - - napi_schedule(&priv->napi); - - return IRQ_HANDLED; -} - -/* - * tx request callback - */ -static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct bcm_enet_priv *priv; - struct bcm_enet_desc *desc; - u32 len_stat; - int ret; - - priv = netdev_priv(dev); - - /* lock against tx reclaim */ - spin_lock(&priv->tx_lock); - - /* make sure the tx hw queue is not full, should not happen - * since we stop queue before it's the case */ - if (unlikely(!priv->tx_desc_count)) { - netif_stop_queue(dev); - dev_err(&priv->pdev->dev, "xmit called with no tx desc " - "available?\n"); - ret = NETDEV_TX_BUSY; - goto out_unlock; - } - - /* point to the next available desc */ - desc = &priv->tx_desc_cpu[priv->tx_curr_desc]; - priv->tx_skb[priv->tx_curr_desc] = skb; - - /* fill descriptor */ - desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len, - DMA_TO_DEVICE); - - len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK; - len_stat |= DMADESC_ESOP_MASK | - DMADESC_APPEND_CRC | - DMADESC_OWNER_MASK; - - priv->tx_curr_desc++; - if (priv->tx_curr_desc == priv->tx_ring_size) { - priv->tx_curr_desc = 0; - len_stat |= DMADESC_WRAP_MASK; - } - priv->tx_desc_count--; - - /* dma might be already polling, make sure we update desc - * fields in correct order */ - wmb(); - desc->len_stat = len_stat; - wmb(); - - /* kick tx dma */ - enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, - ENETDMA_CHANCFG_REG(priv->tx_chan)); - - /* stop queue if no more desc available */ - if (!priv->tx_desc_count) - netif_stop_queue(dev); - - dev->stats.tx_bytes += skb->len; - dev->stats.tx_packets++; - ret = NETDEV_TX_OK; - -out_unlock: - spin_unlock(&priv->tx_lock); - return ret; -} - -/* - * Change the interface's mac address. - */ -static int bcm_enet_set_mac_address(struct net_device *dev, void *p) -{ - struct bcm_enet_priv *priv; - struct sockaddr *addr = p; - u32 val; - - priv = netdev_priv(dev); - memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); - - /* use perfect match register 0 to store my mac address */ - val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) | - (dev->dev_addr[4] << 8) | dev->dev_addr[5]; - enet_writel(priv, val, ENET_PML_REG(0)); - - val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]); - val |= ENET_PMH_DATAVALID_MASK; - enet_writel(priv, val, ENET_PMH_REG(0)); - - return 0; -} - -/* - * Change rx mode (promiscuous/allmulti) and update multicast list - */ -static void bcm_enet_set_multicast_list(struct net_device *dev) -{ - struct bcm_enet_priv *priv; - struct netdev_hw_addr *ha; - u32 val; - int i; - - priv = netdev_priv(dev); - - val = enet_readl(priv, ENET_RXCFG_REG); - - if (dev->flags & IFF_PROMISC) - val |= ENET_RXCFG_PROMISC_MASK; - else - val &= ~ENET_RXCFG_PROMISC_MASK; - - /* only 3 perfect match registers left, first one is used for - * own mac address */ - if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3) - val |= ENET_RXCFG_ALLMCAST_MASK; - else - val &= ~ENET_RXCFG_ALLMCAST_MASK; - - /* no need to set perfect match registers if we catch all - * multicast */ - if (val & ENET_RXCFG_ALLMCAST_MASK) { - enet_writel(priv, val, ENET_RXCFG_REG); - return; - } - - i = 0; - netdev_for_each_mc_addr(ha, dev) { - u8 *dmi_addr; - u32 tmp; - - if (i == 3) - break; - /* update perfect match registers */ - dmi_addr = ha->addr; - tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) | - (dmi_addr[4] << 8) | dmi_addr[5]; - enet_writel(priv, tmp, ENET_PML_REG(i + 1)); - - tmp = (dmi_addr[0] << 8 | dmi_addr[1]); - tmp |= ENET_PMH_DATAVALID_MASK; - enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1)); - } - - for (; i < 3; i++) { - enet_writel(priv, 0, ENET_PML_REG(i + 1)); - enet_writel(priv, 0, ENET_PMH_REG(i + 1)); - } - - enet_writel(priv, val, ENET_RXCFG_REG); -} - -/* - * set mac duplex parameters - */ -static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex) -{ - u32 val; - - val = enet_readl(priv, ENET_TXCTL_REG); - if (fullduplex) - val |= ENET_TXCTL_FD_MASK; - else - val &= ~ENET_TXCTL_FD_MASK; - enet_writel(priv, val, ENET_TXCTL_REG); -} - -/* - * set mac flow control parameters - */ -static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en) -{ - u32 val; - - /* rx flow control (pause frame handling) */ - val = enet_readl(priv, ENET_RXCFG_REG); - if (rx_en) - val |= ENET_RXCFG_ENFLOW_MASK; - else - val &= ~ENET_RXCFG_ENFLOW_MASK; - enet_writel(priv, val, ENET_RXCFG_REG); - - /* tx flow control (pause frame generation) */ - val = enet_dma_readl(priv, ENETDMA_CFG_REG); - if (tx_en) - val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); - else - val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); - enet_dma_writel(priv, val, ENETDMA_CFG_REG); -} - -/* - * link changed callback (from phylib) - */ -static void bcm_enet_adjust_phy_link(struct net_device *dev) -{ - struct bcm_enet_priv *priv; - struct phy_device *phydev; - int status_changed; - - priv = netdev_priv(dev); - phydev = priv->phydev; - status_changed = 0; - - if (priv->old_link != phydev->link) { - status_changed = 1; - priv->old_link = phydev->link; - } - - /* reflect duplex change in mac configuration */ - if (phydev->link && phydev->duplex != priv->old_duplex) { - bcm_enet_set_duplex(priv, - (phydev->duplex == DUPLEX_FULL) ? 1 : 0); - status_changed = 1; - priv->old_duplex = phydev->duplex; - } - - /* enable flow control if remote advertise it (trust phylib to - * check that duplex is full */ - if (phydev->link && phydev->pause != priv->old_pause) { - int rx_pause_en, tx_pause_en; - - if (phydev->pause) { - /* pause was advertised by lpa and us */ - rx_pause_en = 1; - tx_pause_en = 1; - } else if (!priv->pause_auto) { - /* pause setting overrided by user */ - rx_pause_en = priv->pause_rx; - tx_pause_en = priv->pause_tx; - } else { - rx_pause_en = 0; - tx_pause_en = 0; - } - - bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en); - status_changed = 1; - priv->old_pause = phydev->pause; - } - - if (status_changed) { - pr_info("%s: link %s", dev->name, phydev->link ? - "UP" : "DOWN"); - if (phydev->link) - pr_cont(" - %d/%s - flow control %s", phydev->speed, - DUPLEX_FULL == phydev->duplex ? "full" : "half", - phydev->pause == 1 ? "rx&tx" : "off"); - - pr_cont("\n"); - } -} - -/* - * link changed callback (if phylib is not used) - */ -static void bcm_enet_adjust_link(struct net_device *dev) -{ - struct bcm_enet_priv *priv; - - priv = netdev_priv(dev); - bcm_enet_set_duplex(priv, priv->force_duplex_full); - bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx); - netif_carrier_on(dev); - - pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n", - dev->name, - priv->force_speed_100 ? 100 : 10, - priv->force_duplex_full ? "full" : "half", - priv->pause_rx ? "rx" : "off", - priv->pause_tx ? "tx" : "off"); -} - -/* - * open callback, allocate dma rings & buffers and start rx operation - */ -static int bcm_enet_open(struct net_device *dev) -{ - struct bcm_enet_priv *priv; - struct sockaddr addr; - struct device *kdev; - struct phy_device *phydev; - int i, ret; - unsigned int size; - char phy_id[MII_BUS_ID_SIZE + 3]; - void *p; - u32 val; - - priv = netdev_priv(dev); - kdev = &priv->pdev->dev; - - if (priv->has_phy) { - /* connect to PHY */ - snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, - priv->mac_id ? "1" : "0", priv->phy_id); - - phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link, 0, - PHY_INTERFACE_MODE_MII); - - if (IS_ERR(phydev)) { - dev_err(kdev, "could not attach to PHY\n"); - return PTR_ERR(phydev); - } - - /* mask with MAC supported features */ - phydev->supported &= (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_Autoneg | - SUPPORTED_Pause | - SUPPORTED_MII); - phydev->advertising = phydev->supported; - - if (priv->pause_auto && priv->pause_rx && priv->pause_tx) - phydev->advertising |= SUPPORTED_Pause; - else - phydev->advertising &= ~SUPPORTED_Pause; - - dev_info(kdev, "attached PHY at address %d [%s]\n", - phydev->addr, phydev->drv->name); - - priv->old_link = 0; - priv->old_duplex = -1; - priv->old_pause = -1; - priv->phydev = phydev; - } - - /* mask all interrupts and request them */ - enet_writel(priv, 0, ENET_IRMASK_REG); - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); - - ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev); - if (ret) - goto out_phy_disconnect; - - ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, IRQF_DISABLED, - dev->name, dev); - if (ret) - goto out_freeirq; - - ret = request_irq(priv->irq_tx, bcm_enet_isr_dma, - IRQF_DISABLED, dev->name, dev); - if (ret) - goto out_freeirq_rx; - - /* initialize perfect match registers */ - for (i = 0; i < 4; i++) { - enet_writel(priv, 0, ENET_PML_REG(i)); - enet_writel(priv, 0, ENET_PMH_REG(i)); - } - - /* write device mac address */ - memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN); - bcm_enet_set_mac_address(dev, &addr); - - /* allocate rx dma ring */ - size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); - p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); - if (!p) { - dev_err(kdev, "cannot allocate rx ring %u\n", size); - ret = -ENOMEM; - goto out_freeirq_tx; - } - - memset(p, 0, size); - priv->rx_desc_alloc_size = size; - priv->rx_desc_cpu = p; - - /* allocate tx dma ring */ - size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); - p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); - if (!p) { - dev_err(kdev, "cannot allocate tx ring\n"); - ret = -ENOMEM; - goto out_free_rx_ring; - } - - memset(p, 0, size); - priv->tx_desc_alloc_size = size; - priv->tx_desc_cpu = p; - - priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size, - GFP_KERNEL); - if (!priv->tx_skb) { - dev_err(kdev, "cannot allocate rx skb queue\n"); - ret = -ENOMEM; - goto out_free_tx_ring; - } - - priv->tx_desc_count = priv->tx_ring_size; - priv->tx_dirty_desc = 0; - priv->tx_curr_desc = 0; - spin_lock_init(&priv->tx_lock); - - /* init & fill rx ring with skbs */ - priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size, - GFP_KERNEL); - if (!priv->rx_skb) { - dev_err(kdev, "cannot allocate rx skb queue\n"); - ret = -ENOMEM; - goto out_free_tx_skb; - } - - priv->rx_desc_count = 0; - priv->rx_dirty_desc = 0; - priv->rx_curr_desc = 0; - - /* initialize flow control buffer allocation */ - enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, - ENETDMA_BUFALLOC_REG(priv->rx_chan)); - - if (bcm_enet_refill_rx(dev)) { - dev_err(kdev, "cannot allocate rx skb queue\n"); - ret = -ENOMEM; - goto out; - } - - /* write rx & tx ring addresses */ - enet_dma_writel(priv, priv->rx_desc_dma, - ENETDMA_RSTART_REG(priv->rx_chan)); - enet_dma_writel(priv, priv->tx_desc_dma, - ENETDMA_RSTART_REG(priv->tx_chan)); - - /* clear remaining state ram for rx & tx channel */ - enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan)); - enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan)); - enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan)); - enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan)); - enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan)); - enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan)); - - /* set max rx/tx length */ - enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG); - enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG); - - /* set dma maximum burst len */ - enet_dma_writel(priv, BCMENET_DMA_MAXBURST, - ENETDMA_MAXBURST_REG(priv->rx_chan)); - enet_dma_writel(priv, BCMENET_DMA_MAXBURST, - ENETDMA_MAXBURST_REG(priv->tx_chan)); - - /* set correct transmit fifo watermark */ - enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG); - - /* set flow control low/high threshold to 1/3 / 2/3 */ - val = priv->rx_ring_size / 3; - enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); - val = (priv->rx_ring_size * 2) / 3; - enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); - - /* all set, enable mac and interrupts, start dma engine and - * kick rx dma channel */ - wmb(); - val = enet_readl(priv, ENET_CTL_REG); - val |= ENET_CTL_ENABLE_MASK; - enet_writel(priv, val, ENET_CTL_REG); - enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); - enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, - ENETDMA_CHANCFG_REG(priv->rx_chan)); - - /* watch "mib counters about to overflow" interrupt */ - enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); - enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); - - /* watch "packet transferred" interrupt in rx and tx */ - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IR_REG(priv->rx_chan)); - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IR_REG(priv->tx_chan)); - - /* make sure we enable napi before rx interrupt */ - napi_enable(&priv->napi); - - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IRMASK_REG(priv->rx_chan)); - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IRMASK_REG(priv->tx_chan)); - - if (priv->has_phy) - phy_start(priv->phydev); - else - bcm_enet_adjust_link(dev); - - netif_start_queue(dev); - return 0; - -out: - for (i = 0; i < priv->rx_ring_size; i++) { - struct bcm_enet_desc *desc; - - if (!priv->rx_skb[i]) - continue; - - desc = &priv->rx_desc_cpu[i]; - dma_unmap_single(kdev, desc->address, priv->rx_skb_size, - DMA_FROM_DEVICE); - kfree_skb(priv->rx_skb[i]); - } - kfree(priv->rx_skb); - -out_free_tx_skb: - kfree(priv->tx_skb); - -out_free_tx_ring: - dma_free_coherent(kdev, priv->tx_desc_alloc_size, - priv->tx_desc_cpu, priv->tx_desc_dma); - -out_free_rx_ring: - dma_free_coherent(kdev, priv->rx_desc_alloc_size, - priv->rx_desc_cpu, priv->rx_desc_dma); - -out_freeirq_tx: - free_irq(priv->irq_tx, dev); - -out_freeirq_rx: - free_irq(priv->irq_rx, dev); - -out_freeirq: - free_irq(dev->irq, dev); - -out_phy_disconnect: - phy_disconnect(priv->phydev); - - return ret; -} - -/* - * disable mac - */ -static void bcm_enet_disable_mac(struct bcm_enet_priv *priv) -{ - int limit; - u32 val; - - val = enet_readl(priv, ENET_CTL_REG); - val |= ENET_CTL_DISABLE_MASK; - enet_writel(priv, val, ENET_CTL_REG); - - limit = 1000; - do { - u32 val; - - val = enet_readl(priv, ENET_CTL_REG); - if (!(val & ENET_CTL_DISABLE_MASK)) - break; - udelay(1); - } while (limit--); -} - -/* - * disable dma in given channel - */ -static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan) -{ - int limit; - - enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan)); - - limit = 1000; - do { - u32 val; - - val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan)); - if (!(val & ENETDMA_CHANCFG_EN_MASK)) - break; - udelay(1); - } while (limit--); -} - -/* - * stop callback - */ -static int bcm_enet_stop(struct net_device *dev) -{ - struct bcm_enet_priv *priv; - struct device *kdev; - int i; - - priv = netdev_priv(dev); - kdev = &priv->pdev->dev; - - netif_stop_queue(dev); - napi_disable(&priv->napi); - if (priv->has_phy) - phy_stop(priv->phydev); - del_timer_sync(&priv->rx_timeout); - - /* mask all interrupts */ - enet_writel(priv, 0, ENET_IRMASK_REG); - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); - - /* make sure no mib update is scheduled */ - cancel_work_sync(&priv->mib_update_task); - - /* disable dma & mac */ - bcm_enet_disable_dma(priv, priv->tx_chan); - bcm_enet_disable_dma(priv, priv->rx_chan); - bcm_enet_disable_mac(priv); - - /* force reclaim of all tx buffers */ - bcm_enet_tx_reclaim(dev, 1); - - /* free the rx skb ring */ - for (i = 0; i < priv->rx_ring_size; i++) { - struct bcm_enet_desc *desc; - - if (!priv->rx_skb[i]) - continue; - - desc = &priv->rx_desc_cpu[i]; - dma_unmap_single(kdev, desc->address, priv->rx_skb_size, - DMA_FROM_DEVICE); - kfree_skb(priv->rx_skb[i]); - } - - /* free remaining allocated memory */ - kfree(priv->rx_skb); - kfree(priv->tx_skb); - dma_free_coherent(kdev, priv->rx_desc_alloc_size, - priv->rx_desc_cpu, priv->rx_desc_dma); - dma_free_coherent(kdev, priv->tx_desc_alloc_size, - priv->tx_desc_cpu, priv->tx_desc_dma); - free_irq(priv->irq_tx, dev); - free_irq(priv->irq_rx, dev); - free_irq(dev->irq, dev); - - /* release phy */ - if (priv->has_phy) { - phy_disconnect(priv->phydev); - priv->phydev = NULL; - } - - return 0; -} - -/* - * ethtool callbacks - */ -struct bcm_enet_stats { - char stat_string[ETH_GSTRING_LEN]; - int sizeof_stat; - int stat_offset; - int mib_reg; -}; - -#define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \ - offsetof(struct bcm_enet_priv, m) -#define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m), \ - offsetof(struct net_device_stats, m) - -static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = { - { "rx_packets", DEV_STAT(rx_packets), -1 }, - { "tx_packets", DEV_STAT(tx_packets), -1 }, - { "rx_bytes", DEV_STAT(rx_bytes), -1 }, - { "tx_bytes", DEV_STAT(tx_bytes), -1 }, - { "rx_errors", DEV_STAT(rx_errors), -1 }, - { "tx_errors", DEV_STAT(tx_errors), -1 }, - { "rx_dropped", DEV_STAT(rx_dropped), -1 }, - { "tx_dropped", DEV_STAT(tx_dropped), -1 }, - - { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS}, - { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS }, - { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST }, - { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT }, - { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 }, - { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 }, - { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 }, - { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 }, - { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 }, - { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX }, - { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB }, - { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR }, - { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG }, - { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP }, - { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN }, - { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND }, - { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC }, - { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN }, - { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM }, - { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE }, - { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL }, - - { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS }, - { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS }, - { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST }, - { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT }, - { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 }, - { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 }, - { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 }, - { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 }, - { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023}, - { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX }, - { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB }, - { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR }, - { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG }, - { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN }, - { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL }, - { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL }, - { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL }, - { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL }, - { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE }, - { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF }, - { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS }, - { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE }, - -}; - -#define BCM_ENET_STATS_LEN \ - (sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats)) - -static const u32 unused_mib_regs[] = { - ETH_MIB_TX_ALL_OCTETS, - ETH_MIB_TX_ALL_PKTS, - ETH_MIB_RX_ALL_OCTETS, - ETH_MIB_RX_ALL_PKTS, -}; - - -static void bcm_enet_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) -{ - strncpy(drvinfo->driver, bcm_enet_driver_name, 32); - strncpy(drvinfo->version, bcm_enet_driver_version, 32); - strncpy(drvinfo->fw_version, "N/A", 32); - strncpy(drvinfo->bus_info, "bcm63xx", 32); - drvinfo->n_stats = BCM_ENET_STATS_LEN; -} - -static int bcm_enet_get_sset_count(struct net_device *netdev, - int string_set) -{ - switch (string_set) { - case ETH_SS_STATS: - return BCM_ENET_STATS_LEN; - default: - return -EINVAL; - } -} - -static void bcm_enet_get_strings(struct net_device *netdev, - u32 stringset, u8 *data) -{ - int i; - - switch (stringset) { - case ETH_SS_STATS: - for (i = 0; i < BCM_ENET_STATS_LEN; i++) { - memcpy(data + i * ETH_GSTRING_LEN, - bcm_enet_gstrings_stats[i].stat_string, - ETH_GSTRING_LEN); - } - break; - } -} - -static void update_mib_counters(struct bcm_enet_priv *priv) -{ - int i; - - for (i = 0; i < BCM_ENET_STATS_LEN; i++) { - const struct bcm_enet_stats *s; - u32 val; - char *p; - - s = &bcm_enet_gstrings_stats[i]; - if (s->mib_reg == -1) - continue; - - val = enet_readl(priv, ENET_MIB_REG(s->mib_reg)); - p = (char *)priv + s->stat_offset; - - if (s->sizeof_stat == sizeof(u64)) - *(u64 *)p += val; - else - *(u32 *)p += val; - } - - /* also empty unused mib counters to make sure mib counter - * overflow interrupt is cleared */ - for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++) - (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i])); -} - -static void bcm_enet_update_mib_counters_defer(struct work_struct *t) -{ - struct bcm_enet_priv *priv; - - priv = container_of(t, struct bcm_enet_priv, mib_update_task); - mutex_lock(&priv->mib_update_lock); - update_mib_counters(priv); - mutex_unlock(&priv->mib_update_lock); - - /* reenable mib interrupt */ - if (netif_running(priv->net_dev)) - enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); -} - -static void bcm_enet_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, - u64 *data) -{ - struct bcm_enet_priv *priv; - int i; - - priv = netdev_priv(netdev); - - mutex_lock(&priv->mib_update_lock); - update_mib_counters(priv); - - for (i = 0; i < BCM_ENET_STATS_LEN; i++) { - const struct bcm_enet_stats *s; - char *p; - - s = &bcm_enet_gstrings_stats[i]; - if (s->mib_reg == -1) - p = (char *)&netdev->stats; - else - p = (char *)priv; - p += s->stat_offset; - data[i] = (s->sizeof_stat == sizeof(u64)) ? - *(u64 *)p : *(u32 *)p; - } - mutex_unlock(&priv->mib_update_lock); -} - -static int bcm_enet_get_settings(struct net_device *dev, - struct ethtool_cmd *cmd) -{ - struct bcm_enet_priv *priv; - - priv = netdev_priv(dev); - - cmd->maxrxpkt = 0; - cmd->maxtxpkt = 0; - - if (priv->has_phy) { - if (!priv->phydev) - return -ENODEV; - return phy_ethtool_gset(priv->phydev, cmd); - } else { - cmd->autoneg = 0; - ethtool_cmd_speed_set(cmd, ((priv->force_speed_100) - ? SPEED_100 : SPEED_10)); - cmd->duplex = (priv->force_duplex_full) ? - DUPLEX_FULL : DUPLEX_HALF; - cmd->supported = ADVERTISED_10baseT_Half | - ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full; - cmd->advertising = 0; - cmd->port = PORT_MII; - cmd->transceiver = XCVR_EXTERNAL; - } - return 0; -} - -static int bcm_enet_set_settings(struct net_device *dev, - struct ethtool_cmd *cmd) -{ - struct bcm_enet_priv *priv; - - priv = netdev_priv(dev); - if (priv->has_phy) { - if (!priv->phydev) - return -ENODEV; - return phy_ethtool_sset(priv->phydev, cmd); - } else { - - if (cmd->autoneg || - (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) || - cmd->port != PORT_MII) - return -EINVAL; - - priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0; - priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0; - - if (netif_running(dev)) - bcm_enet_adjust_link(dev); - return 0; - } -} - -static void bcm_enet_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ering) -{ - struct bcm_enet_priv *priv; - - priv = netdev_priv(dev); - - /* rx/tx ring is actually only limited by memory */ - ering->rx_max_pending = 8192; - ering->tx_max_pending = 8192; - ering->rx_mini_max_pending = 0; - ering->rx_jumbo_max_pending = 0; - ering->rx_pending = priv->rx_ring_size; - ering->tx_pending = priv->tx_ring_size; -} - -static int bcm_enet_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ering) -{ - struct bcm_enet_priv *priv; - int was_running; - - priv = netdev_priv(dev); - - was_running = 0; - if (netif_running(dev)) { - bcm_enet_stop(dev); - was_running = 1; - } - - priv->rx_ring_size = ering->rx_pending; - priv->tx_ring_size = ering->tx_pending; - - if (was_running) { - int err; - - err = bcm_enet_open(dev); - if (err) - dev_close(dev); - else - bcm_enet_set_multicast_list(dev); - } - return 0; -} - -static void bcm_enet_get_pauseparam(struct net_device *dev, - struct ethtool_pauseparam *ecmd) -{ - struct bcm_enet_priv *priv; - - priv = netdev_priv(dev); - ecmd->autoneg = priv->pause_auto; - ecmd->rx_pause = priv->pause_rx; - ecmd->tx_pause = priv->pause_tx; -} - -static int bcm_enet_set_pauseparam(struct net_device *dev, - struct ethtool_pauseparam *ecmd) -{ - struct bcm_enet_priv *priv; - - priv = netdev_priv(dev); - - if (priv->has_phy) { - if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) { - /* asymetric pause mode not supported, - * actually possible but integrated PHY has RO - * asym_pause bit */ - return -EINVAL; - } - } else { - /* no pause autoneg on direct mii connection */ - if (ecmd->autoneg) - return -EINVAL; - } - - priv->pause_auto = ecmd->autoneg; - priv->pause_rx = ecmd->rx_pause; - priv->pause_tx = ecmd->tx_pause; - - return 0; -} - -static struct ethtool_ops bcm_enet_ethtool_ops = { - .get_strings = bcm_enet_get_strings, - .get_sset_count = bcm_enet_get_sset_count, - .get_ethtool_stats = bcm_enet_get_ethtool_stats, - .get_settings = bcm_enet_get_settings, - .set_settings = bcm_enet_set_settings, - .get_drvinfo = bcm_enet_get_drvinfo, - .get_link = ethtool_op_get_link, - .get_ringparam = bcm_enet_get_ringparam, - .set_ringparam = bcm_enet_set_ringparam, - .get_pauseparam = bcm_enet_get_pauseparam, - .set_pauseparam = bcm_enet_set_pauseparam, -}; - -static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -{ - struct bcm_enet_priv *priv; - - priv = netdev_priv(dev); - if (priv->has_phy) { - if (!priv->phydev) - return -ENODEV; - return phy_mii_ioctl(priv->phydev, rq, cmd); - } else { - struct mii_if_info mii; - - mii.dev = dev; - mii.mdio_read = bcm_enet_mdio_read_mii; - mii.mdio_write = bcm_enet_mdio_write_mii; - mii.phy_id = 0; - mii.phy_id_mask = 0x3f; - mii.reg_num_mask = 0x1f; - return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL); - } -} - -/* - * calculate actual hardware mtu - */ -static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu) -{ - int actual_mtu; - - actual_mtu = mtu; - - /* add ethernet header + vlan tag size */ - actual_mtu += VLAN_ETH_HLEN; - - if (actual_mtu < 64 || actual_mtu > BCMENET_MAX_MTU) - return -EINVAL; - - /* - * setup maximum size before we get overflow mark in - * descriptor, note that this will not prevent reception of - * big frames, they will be split into multiple buffers - * anyway - */ - priv->hw_mtu = actual_mtu; - - /* - * align rx buffer size to dma burst len, account FCS since - * it's appended - */ - priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN, - BCMENET_DMA_MAXBURST * 4); - return 0; -} - -/* - * adjust mtu, can't be called while device is running - */ -static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu) -{ - int ret; - - if (netif_running(dev)) - return -EBUSY; - - ret = compute_hw_mtu(netdev_priv(dev), new_mtu); - if (ret) - return ret; - dev->mtu = new_mtu; - return 0; -} - -/* - * preinit hardware to allow mii operation while device is down - */ -static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv) -{ - u32 val; - int limit; - - /* make sure mac is disabled */ - bcm_enet_disable_mac(priv); - - /* soft reset mac */ - val = ENET_CTL_SRESET_MASK; - enet_writel(priv, val, ENET_CTL_REG); - wmb(); - - limit = 1000; - do { - val = enet_readl(priv, ENET_CTL_REG); - if (!(val & ENET_CTL_SRESET_MASK)) - break; - udelay(1); - } while (limit--); - - /* select correct mii interface */ - val = enet_readl(priv, ENET_CTL_REG); - if (priv->use_external_mii) - val |= ENET_CTL_EPHYSEL_MASK; - else - val &= ~ENET_CTL_EPHYSEL_MASK; - enet_writel(priv, val, ENET_CTL_REG); - - /* turn on mdc clock */ - enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) | - ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG); - - /* set mib counters to self-clear when read */ - val = enet_readl(priv, ENET_MIBCTL_REG); - val |= ENET_MIBCTL_RDCLEAR_MASK; - enet_writel(priv, val, ENET_MIBCTL_REG); -} - -static const struct net_device_ops bcm_enet_ops = { - .ndo_open = bcm_enet_open, - .ndo_stop = bcm_enet_stop, - .ndo_start_xmit = bcm_enet_start_xmit, - .ndo_set_mac_address = bcm_enet_set_mac_address, - .ndo_set_multicast_list = bcm_enet_set_multicast_list, - .ndo_do_ioctl = bcm_enet_ioctl, - .ndo_change_mtu = bcm_enet_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = bcm_enet_netpoll, -#endif -}; - -/* - * allocate netdevice, request register memory and register device. - */ -static int __devinit bcm_enet_probe(struct platform_device *pdev) -{ - struct bcm_enet_priv *priv; - struct net_device *dev; - struct bcm63xx_enet_platform_data *pd; - struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx; - struct mii_bus *bus; - const char *clk_name; - unsigned int iomem_size; - int i, ret; - - /* stop if shared driver failed, assume driver->probe will be - * called in the same order we register devices (correct ?) */ - if (!bcm_enet_shared_base) - return -ENODEV; - - res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1); - res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2); - if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx) - return -ENODEV; - - ret = 0; - dev = alloc_etherdev(sizeof(*priv)); - if (!dev) - return -ENOMEM; - priv = netdev_priv(dev); - - ret = compute_hw_mtu(priv, dev->mtu); - if (ret) - goto out; - - iomem_size = resource_size(res_mem); - if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) { - ret = -EBUSY; - goto out; - } - - priv->base = ioremap(res_mem->start, iomem_size); - if (priv->base == NULL) { - ret = -ENOMEM; - goto out_release_mem; - } - dev->irq = priv->irq = res_irq->start; - priv->irq_rx = res_irq_rx->start; - priv->irq_tx = res_irq_tx->start; - priv->mac_id = pdev->id; - - /* get rx & tx dma channel id for this mac */ - if (priv->mac_id == 0) { - priv->rx_chan = 0; - priv->tx_chan = 1; - clk_name = "enet0"; - } else { - priv->rx_chan = 2; - priv->tx_chan = 3; - clk_name = "enet1"; - } - - priv->mac_clk = clk_get(&pdev->dev, clk_name); - if (IS_ERR(priv->mac_clk)) { - ret = PTR_ERR(priv->mac_clk); - goto out_unmap; - } - clk_enable(priv->mac_clk); - - /* initialize default and fetch platform data */ - priv->rx_ring_size = BCMENET_DEF_RX_DESC; - priv->tx_ring_size = BCMENET_DEF_TX_DESC; - - pd = pdev->dev.platform_data; - if (pd) { - memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); - priv->has_phy = pd->has_phy; - priv->phy_id = pd->phy_id; - priv->has_phy_interrupt = pd->has_phy_interrupt; - priv->phy_interrupt = pd->phy_interrupt; - priv->use_external_mii = !pd->use_internal_phy; - priv->pause_auto = pd->pause_auto; - priv->pause_rx = pd->pause_rx; - priv->pause_tx = pd->pause_tx; - priv->force_duplex_full = pd->force_duplex_full; - priv->force_speed_100 = pd->force_speed_100; - } - - if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) { - /* using internal PHY, enable clock */ - priv->phy_clk = clk_get(&pdev->dev, "ephy"); - if (IS_ERR(priv->phy_clk)) { - ret = PTR_ERR(priv->phy_clk); - priv->phy_clk = NULL; - goto out_put_clk_mac; - } - clk_enable(priv->phy_clk); - } - - /* do minimal hardware init to be able to probe mii bus */ - bcm_enet_hw_preinit(priv); - - /* MII bus registration */ - if (priv->has_phy) { - - priv->mii_bus = mdiobus_alloc(); - if (!priv->mii_bus) { - ret = -ENOMEM; - goto out_uninit_hw; - } - - bus = priv->mii_bus; - bus->name = "bcm63xx_enet MII bus"; - bus->parent = &pdev->dev; - bus->priv = priv; - bus->read = bcm_enet_mdio_read_phylib; - bus->write = bcm_enet_mdio_write_phylib; - sprintf(bus->id, "%d", priv->mac_id); - - /* only probe bus where we think the PHY is, because - * the mdio read operation return 0 instead of 0xffff - * if a slave is not present on hw */ - bus->phy_mask = ~(1 << priv->phy_id); - - bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!bus->irq) { - ret = -ENOMEM; - goto out_free_mdio; - } - - if (priv->has_phy_interrupt) - bus->irq[priv->phy_id] = priv->phy_interrupt; - else - bus->irq[priv->phy_id] = PHY_POLL; - - ret = mdiobus_register(bus); - if (ret) { - dev_err(&pdev->dev, "unable to register mdio bus\n"); - goto out_free_mdio; - } - } else { - - /* run platform code to initialize PHY device */ - if (pd->mii_config && - pd->mii_config(dev, 1, bcm_enet_mdio_read_mii, - bcm_enet_mdio_write_mii)) { - dev_err(&pdev->dev, "unable to configure mdio bus\n"); - goto out_uninit_hw; - } - } - - spin_lock_init(&priv->rx_lock); - - /* init rx timeout (used for oom) */ - init_timer(&priv->rx_timeout); - priv->rx_timeout.function = bcm_enet_refill_rx_timer; - priv->rx_timeout.data = (unsigned long)dev; - - /* init the mib update lock&work */ - mutex_init(&priv->mib_update_lock); - INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer); - - /* zero mib counters */ - for (i = 0; i < ENET_MIB_REG_COUNT; i++) - enet_writel(priv, 0, ENET_MIB_REG(i)); - - /* register netdevice */ - dev->netdev_ops = &bcm_enet_ops; - netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16); - - SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops); - SET_NETDEV_DEV(dev, &pdev->dev); - - ret = register_netdev(dev); - if (ret) - goto out_unregister_mdio; - - netif_carrier_off(dev); - platform_set_drvdata(pdev, dev); - priv->pdev = pdev; - priv->net_dev = dev; - - return 0; - -out_unregister_mdio: - if (priv->mii_bus) { - mdiobus_unregister(priv->mii_bus); - kfree(priv->mii_bus->irq); - } - -out_free_mdio: - if (priv->mii_bus) - mdiobus_free(priv->mii_bus); - -out_uninit_hw: - /* turn off mdc clock */ - enet_writel(priv, 0, ENET_MIISC_REG); - if (priv->phy_clk) { - clk_disable(priv->phy_clk); - clk_put(priv->phy_clk); - } - -out_put_clk_mac: - clk_disable(priv->mac_clk); - clk_put(priv->mac_clk); - -out_unmap: - iounmap(priv->base); - -out_release_mem: - release_mem_region(res_mem->start, iomem_size); -out: - free_netdev(dev); - return ret; -} - - -/* - * exit func, stops hardware and unregisters netdevice - */ -static int __devexit bcm_enet_remove(struct platform_device *pdev) -{ - struct bcm_enet_priv *priv; - struct net_device *dev; - struct resource *res; - - /* stop netdevice */ - dev = platform_get_drvdata(pdev); - priv = netdev_priv(dev); - unregister_netdev(dev); - - /* turn off mdc clock */ - enet_writel(priv, 0, ENET_MIISC_REG); - - if (priv->has_phy) { - mdiobus_unregister(priv->mii_bus); - kfree(priv->mii_bus->irq); - mdiobus_free(priv->mii_bus); - } else { - struct bcm63xx_enet_platform_data *pd; - - pd = pdev->dev.platform_data; - if (pd && pd->mii_config) - pd->mii_config(dev, 0, bcm_enet_mdio_read_mii, - bcm_enet_mdio_write_mii); - } - - /* release device resources */ - iounmap(priv->base); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, resource_size(res)); - - /* disable hw block clocks */ - if (priv->phy_clk) { - clk_disable(priv->phy_clk); - clk_put(priv->phy_clk); - } - clk_disable(priv->mac_clk); - clk_put(priv->mac_clk); - - platform_set_drvdata(pdev, NULL); - free_netdev(dev); - return 0; -} - -struct platform_driver bcm63xx_enet_driver = { - .probe = bcm_enet_probe, - .remove = __devexit_p(bcm_enet_remove), - .driver = { - .name = "bcm63xx_enet", - .owner = THIS_MODULE, - }, -}; - -/* - * reserve & remap memory space shared between all macs - */ -static int __devinit bcm_enet_shared_probe(struct platform_device *pdev) -{ - struct resource *res; - unsigned int iomem_size; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -ENODEV; - - iomem_size = resource_size(res); - if (!request_mem_region(res->start, iomem_size, "bcm63xx_enet_dma")) - return -EBUSY; - - bcm_enet_shared_base = ioremap(res->start, iomem_size); - if (!bcm_enet_shared_base) { - release_mem_region(res->start, iomem_size); - return -ENOMEM; - } - return 0; -} - -static int __devexit bcm_enet_shared_remove(struct platform_device *pdev) -{ - struct resource *res; - - iounmap(bcm_enet_shared_base); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, resource_size(res)); - return 0; -} - -/* - * this "shared" driver is needed because both macs share a single - * address space - */ -struct platform_driver bcm63xx_enet_shared_driver = { - .probe = bcm_enet_shared_probe, - .remove = __devexit_p(bcm_enet_shared_remove), - .driver = { - .name = "bcm63xx_enet_shared", - .owner = THIS_MODULE, - }, -}; - -/* - * entry point - */ -static int __init bcm_enet_init(void) -{ - int ret; - - ret = platform_driver_register(&bcm63xx_enet_shared_driver); - if (ret) - return ret; - - ret = platform_driver_register(&bcm63xx_enet_driver); - if (ret) - platform_driver_unregister(&bcm63xx_enet_shared_driver); - - return ret; -} - -static void __exit bcm_enet_exit(void) -{ - platform_driver_unregister(&bcm63xx_enet_driver); - platform_driver_unregister(&bcm63xx_enet_shared_driver); -} - - -module_init(bcm_enet_init); -module_exit(bcm_enet_exit); - -MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver"); -MODULE_AUTHOR("Maxime Bizon "); -MODULE_LICENSE("GPL"); diff --git a/drivers/net/bcm63xx_enet.h b/drivers/net/bcm63xx_enet.h deleted file mode 100644 index 0e3048b..0000000 --- a/drivers/net/bcm63xx_enet.h +++ /dev/null @@ -1,302 +0,0 @@ -#ifndef BCM63XX_ENET_H_ -#define BCM63XX_ENET_H_ - -#include -#include -#include -#include -#include - -#include -#include -#include - -/* default number of descriptor */ -#define BCMENET_DEF_RX_DESC 64 -#define BCMENET_DEF_TX_DESC 32 - -/* maximum burst len for dma (4 bytes unit) */ -#define BCMENET_DMA_MAXBURST 16 - -/* tx transmit threshold (4 bytes unit), fifo is 256 bytes, the value - * must be low enough so that a DMA transfer of above burst length can - * not overflow the fifo */ -#define BCMENET_TX_FIFO_TRESH 32 - -/* - * hardware maximum rx/tx packet size including FCS, max mtu is - * actually 2047, but if we set max rx size register to 2047 we won't - * get overflow information if packet size is 2048 or above - */ -#define BCMENET_MAX_MTU 2046 - -/* - * rx/tx dma descriptor - */ -struct bcm_enet_desc { - u32 len_stat; - u32 address; -}; - -#define DMADESC_LENGTH_SHIFT 16 -#define DMADESC_LENGTH_MASK (0xfff << DMADESC_LENGTH_SHIFT) -#define DMADESC_OWNER_MASK (1 << 15) -#define DMADESC_EOP_MASK (1 << 14) -#define DMADESC_SOP_MASK (1 << 13) -#define DMADESC_ESOP_MASK (DMADESC_EOP_MASK | DMADESC_SOP_MASK) -#define DMADESC_WRAP_MASK (1 << 12) - -#define DMADESC_UNDER_MASK (1 << 9) -#define DMADESC_APPEND_CRC (1 << 8) -#define DMADESC_OVSIZE_MASK (1 << 4) -#define DMADESC_RXER_MASK (1 << 2) -#define DMADESC_CRC_MASK (1 << 1) -#define DMADESC_OV_MASK (1 << 0) -#define DMADESC_ERR_MASK (DMADESC_UNDER_MASK | \ - DMADESC_OVSIZE_MASK | \ - DMADESC_RXER_MASK | \ - DMADESC_CRC_MASK | \ - DMADESC_OV_MASK) - - -/* - * MIB Counters register definitions -*/ -#define ETH_MIB_TX_GD_OCTETS 0 -#define ETH_MIB_TX_GD_PKTS 1 -#define ETH_MIB_TX_ALL_OCTETS 2 -#define ETH_MIB_TX_ALL_PKTS 3 -#define ETH_MIB_TX_BRDCAST 4 -#define ETH_MIB_TX_MULT 5 -#define ETH_MIB_TX_64 6 -#define ETH_MIB_TX_65_127 7 -#define ETH_MIB_TX_128_255 8 -#define ETH_MIB_TX_256_511 9 -#define ETH_MIB_TX_512_1023 10 -#define ETH_MIB_TX_1024_MAX 11 -#define ETH_MIB_TX_JAB 12 -#define ETH_MIB_TX_OVR 13 -#define ETH_MIB_TX_FRAG 14 -#define ETH_MIB_TX_UNDERRUN 15 -#define ETH_MIB_TX_COL 16 -#define ETH_MIB_TX_1_COL 17 -#define ETH_MIB_TX_M_COL 18 -#define ETH_MIB_TX_EX_COL 19 -#define ETH_MIB_TX_LATE 20 -#define ETH_MIB_TX_DEF 21 -#define ETH_MIB_TX_CRS 22 -#define ETH_MIB_TX_PAUSE 23 - -#define ETH_MIB_RX_GD_OCTETS 32 -#define ETH_MIB_RX_GD_PKTS 33 -#define ETH_MIB_RX_ALL_OCTETS 34 -#define ETH_MIB_RX_ALL_PKTS 35 -#define ETH_MIB_RX_BRDCAST 36 -#define ETH_MIB_RX_MULT 37 -#define ETH_MIB_RX_64 38 -#define ETH_MIB_RX_65_127 39 -#define ETH_MIB_RX_128_255 40 -#define ETH_MIB_RX_256_511 41 -#define ETH_MIB_RX_512_1023 42 -#define ETH_MIB_RX_1024_MAX 43 -#define ETH_MIB_RX_JAB 44 -#define ETH_MIB_RX_OVR 45 -#define ETH_MIB_RX_FRAG 46 -#define ETH_MIB_RX_DROP 47 -#define ETH_MIB_RX_CRC_ALIGN 48 -#define ETH_MIB_RX_UND 49 -#define ETH_MIB_RX_CRC 50 -#define ETH_MIB_RX_ALIGN 51 -#define ETH_MIB_RX_SYM 52 -#define ETH_MIB_RX_PAUSE 53 -#define ETH_MIB_RX_CNTRL 54 - - -struct bcm_enet_mib_counters { - u64 tx_gd_octets; - u32 tx_gd_pkts; - u32 tx_all_octets; - u32 tx_all_pkts; - u32 tx_brdcast; - u32 tx_mult; - u32 tx_64; - u32 tx_65_127; - u32 tx_128_255; - u32 tx_256_511; - u32 tx_512_1023; - u32 tx_1024_max; - u32 tx_jab; - u32 tx_ovr; - u32 tx_frag; - u32 tx_underrun; - u32 tx_col; - u32 tx_1_col; - u32 tx_m_col; - u32 tx_ex_col; - u32 tx_late; - u32 tx_def; - u32 tx_crs; - u32 tx_pause; - u64 rx_gd_octets; - u32 rx_gd_pkts; - u32 rx_all_octets; - u32 rx_all_pkts; - u32 rx_brdcast; - u32 rx_mult; - u32 rx_64; - u32 rx_65_127; - u32 rx_128_255; - u32 rx_256_511; - u32 rx_512_1023; - u32 rx_1024_max; - u32 rx_jab; - u32 rx_ovr; - u32 rx_frag; - u32 rx_drop; - u32 rx_crc_align; - u32 rx_und; - u32 rx_crc; - u32 rx_align; - u32 rx_sym; - u32 rx_pause; - u32 rx_cntrl; -}; - - -struct bcm_enet_priv { - - /* mac id (from platform device id) */ - int mac_id; - - /* base remapped address of device */ - void __iomem *base; - - /* mac irq, rx_dma irq, tx_dma irq */ - int irq; - int irq_rx; - int irq_tx; - - /* hw view of rx & tx dma ring */ - dma_addr_t rx_desc_dma; - dma_addr_t tx_desc_dma; - - /* allocated size (in bytes) for rx & tx dma ring */ - unsigned int rx_desc_alloc_size; - unsigned int tx_desc_alloc_size; - - - struct napi_struct napi; - - /* dma channel id for rx */ - int rx_chan; - - /* number of dma desc in rx ring */ - int rx_ring_size; - - /* cpu view of rx dma ring */ - struct bcm_enet_desc *rx_desc_cpu; - - /* current number of armed descriptor given to hardware for rx */ - int rx_desc_count; - - /* next rx descriptor to fetch from hardware */ - int rx_curr_desc; - - /* next dirty rx descriptor to refill */ - int rx_dirty_desc; - - /* size of allocated rx skbs */ - unsigned int rx_skb_size; - - /* list of skb given to hw for rx */ - struct sk_buff **rx_skb; - - /* used when rx skb allocation failed, so we defer rx queue - * refill */ - struct timer_list rx_timeout; - - /* lock rx_timeout against rx normal operation */ - spinlock_t rx_lock; - - - /* dma channel id for tx */ - int tx_chan; - - /* number of dma desc in tx ring */ - int tx_ring_size; - - /* cpu view of rx dma ring */ - struct bcm_enet_desc *tx_desc_cpu; - - /* number of available descriptor for tx */ - int tx_desc_count; - - /* next tx descriptor avaiable */ - int tx_curr_desc; - - /* next dirty tx descriptor to reclaim */ - int tx_dirty_desc; - - /* list of skb given to hw for tx */ - struct sk_buff **tx_skb; - - /* lock used by tx reclaim and xmit */ - spinlock_t tx_lock; - - - /* set if internal phy is ignored and external mii interface - * is selected */ - int use_external_mii; - - /* set if a phy is connected, phy address must be known, - * probing is not possible */ - int has_phy; - int phy_id; - - /* set if connected phy has an associated irq */ - int has_phy_interrupt; - int phy_interrupt; - - /* used when a phy is connected (phylib used) */ - struct mii_bus *mii_bus; - struct phy_device *phydev; - int old_link; - int old_duplex; - int old_pause; - - /* used when no phy is connected */ - int force_speed_100; - int force_duplex_full; - - /* pause parameters */ - int pause_auto; - int pause_rx; - int pause_tx; - - /* stats */ - struct bcm_enet_mib_counters mib; - - /* after mib interrupt, mib registers update is done in this - * work queue */ - struct work_struct mib_update_task; - - /* lock mib update between userspace request and workqueue */ - struct mutex mib_update_lock; - - /* mac clock */ - struct clk *mac_clk; - - /* phy clock if internal phy is used */ - struct clk *phy_clk; - - /* network device reference */ - struct net_device *net_dev; - - /* platform device reference */ - struct platform_device *pdev; - - /* maximum hardware transmit/receive size */ - unsigned int hw_mtu; -}; - -#endif /* ! BCM63XX_ENET_H_ */ diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c deleted file mode 100644 index 4b2b570..0000000 --- a/drivers/net/bnx2.c +++ /dev/null @@ -1,8607 +0,0 @@ -/* bnx2.c: Broadcom NX2 network driver. - * - * Copyright (c) 2004-2011 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - * Written by: Michael Chan (mchan@broadcom.com) - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) -#define BCM_CNIC 1 -#include "cnic_if.h" -#endif -#include "bnx2.h" -#include "bnx2_fw.h" - -#define DRV_MODULE_NAME "bnx2" -#define DRV_MODULE_VERSION "2.1.11" -#define DRV_MODULE_RELDATE "July 20, 2011" -#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.1.fw" -#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw" -#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1a.fw" -#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw" -#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw" - -#define RUN_AT(x) (jiffies + (x)) - -/* Time in jiffies before concluding the transmitter is hung. */ -#define TX_TIMEOUT (5*HZ) - -static char version[] __devinitdata = - "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; - -MODULE_AUTHOR("Michael Chan "); -MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_MODULE_VERSION); -MODULE_FIRMWARE(FW_MIPS_FILE_06); -MODULE_FIRMWARE(FW_RV2P_FILE_06); -MODULE_FIRMWARE(FW_MIPS_FILE_09); -MODULE_FIRMWARE(FW_RV2P_FILE_09); -MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax); - -static int disable_msi = 0; - -module_param(disable_msi, int, 0); -MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); - -typedef enum { - BCM5706 = 0, - NC370T, - NC370I, - BCM5706S, - NC370F, - BCM5708, - BCM5708S, - BCM5709, - BCM5709S, - BCM5716, - BCM5716S, -} board_t; - -/* indexed by board_t, above */ -static struct { - char *name; -} board_info[] __devinitdata = { - { "Broadcom NetXtreme II BCM5706 1000Base-T" }, - { "HP NC370T Multifunction Gigabit Server Adapter" }, - { "HP NC370i Multifunction Gigabit Server Adapter" }, - { "Broadcom NetXtreme II BCM5706 1000Base-SX" }, - { "HP NC370F Multifunction Gigabit Server Adapter" }, - { "Broadcom NetXtreme II BCM5708 1000Base-T" }, - { "Broadcom NetXtreme II BCM5708 1000Base-SX" }, - { "Broadcom NetXtreme II BCM5709 1000Base-T" }, - { "Broadcom NetXtreme II BCM5709 1000Base-SX" }, - { "Broadcom NetXtreme II BCM5716 1000Base-T" }, - { "Broadcom NetXtreme II BCM5716 1000Base-SX" }, - }; - -static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = { - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706, - PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706, - PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S, - PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S }, - { PCI_VENDOR_ID_BROADCOM, 0x163b, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 }, - { PCI_VENDOR_ID_BROADCOM, 0x163c, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S }, - { 0, } -}; - -static const struct flash_spec flash_table[] = -{ -#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE) -#define NONBUFFERED_FLAGS (BNX2_NV_WREN) - /* Slow EEPROM */ - {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, - BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, - SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, - "EEPROM - slow"}, - /* Expansion entry 0001 */ - {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, - NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, - SAIFUN_FLASH_BYTE_ADDR_MASK, 0, - "Entry 0001"}, - /* Saifun SA25F010 (non-buffered flash) */ - /* strap, cfg1, & write1 need updates */ - {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, - NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, - SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, - "Non-buffered flash (128kB)"}, - /* Saifun SA25F020 (non-buffered flash) */ - /* strap, cfg1, & write1 need updates */ - {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, - NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, - SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, - "Non-buffered flash (256kB)"}, - /* Expansion entry 0100 */ - {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, - NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, - SAIFUN_FLASH_BYTE_ADDR_MASK, 0, - "Entry 0100"}, - /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ - {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, - NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, - ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, - "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, - /* Entry 0110: ST M45PE20 (non-buffered flash)*/ - {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, - NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, - ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, - "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, - /* Saifun SA25F005 (non-buffered flash) */ - /* strap, cfg1, & write1 need updates */ - {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, - NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, - SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, - "Non-buffered flash (64kB)"}, - /* Fast EEPROM */ - {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, - BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, - SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, - "EEPROM - fast"}, - /* Expansion entry 1001 */ - {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, - NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, - SAIFUN_FLASH_BYTE_ADDR_MASK, 0, - "Entry 1001"}, - /* Expansion entry 1010 */ - {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, - NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, - SAIFUN_FLASH_BYTE_ADDR_MASK, 0, - "Entry 1010"}, - /* ATMEL AT45DB011B (buffered flash) */ - {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, - BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, - BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, - "Buffered flash (128kB)"}, - /* Expansion entry 1100 */ - {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, - NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, - SAIFUN_FLASH_BYTE_ADDR_MASK, 0, - "Entry 1100"}, - /* Expansion entry 1101 */ - {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, - NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, - SAIFUN_FLASH_BYTE_ADDR_MASK, 0, - "Entry 1101"}, - /* Ateml Expansion entry 1110 */ - {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, - BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, - BUFFERED_FLASH_BYTE_ADDR_MASK, 0, - "Entry 1110 (Atmel)"}, - /* ATMEL AT45DB021B (buffered flash) */ - {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, - BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, - BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, - "Buffered flash (256kB)"}, -}; - -static const struct flash_spec flash_5709 = { - .flags = BNX2_NV_BUFFERED, - .page_bits = BCM5709_FLASH_PAGE_BITS, - .page_size = BCM5709_FLASH_PAGE_SIZE, - .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK, - .total_size = BUFFERED_FLASH_TOTAL_SIZE*2, - .name = "5709 Buffered flash (256kB)", -}; - -MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); - -static void bnx2_init_napi(struct bnx2 *bp); -static void bnx2_del_napi(struct bnx2 *bp); - -static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr) -{ - u32 diff; - - /* Tell compiler to fetch tx_prod and tx_cons from memory. */ - barrier(); - - /* The ring uses 256 indices for 255 entries, one of them - * needs to be skipped. - */ - diff = txr->tx_prod - txr->tx_cons; - if (unlikely(diff >= TX_DESC_CNT)) { - diff &= 0xffff; - if (diff == TX_DESC_CNT) - diff = MAX_TX_DESC_CNT; - } - return bp->tx_ring_size - diff; -} - -static u32 -bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset) -{ - u32 val; - - spin_lock_bh(&bp->indirect_lock); - REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); - val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW); - spin_unlock_bh(&bp->indirect_lock); - return val; -} - -static void -bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val) -{ - spin_lock_bh(&bp->indirect_lock); - REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); - REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val); - spin_unlock_bh(&bp->indirect_lock); -} - -static void -bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val) -{ - bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val); -} - -static u32 -bnx2_shmem_rd(struct bnx2 *bp, u32 offset) -{ - return bnx2_reg_rd_ind(bp, bp->shmem_base + offset); -} - -static void -bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val) -{ - offset += cid_addr; - spin_lock_bh(&bp->indirect_lock); - if (CHIP_NUM(bp) == CHIP_NUM_5709) { - int i; - - REG_WR(bp, BNX2_CTX_CTX_DATA, val); - REG_WR(bp, BNX2_CTX_CTX_CTRL, - offset | BNX2_CTX_CTX_CTRL_WRITE_REQ); - for (i = 0; i < 5; i++) { - val = REG_RD(bp, BNX2_CTX_CTX_CTRL); - if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0) - break; - udelay(5); - } - } else { - REG_WR(bp, BNX2_CTX_DATA_ADR, offset); - REG_WR(bp, BNX2_CTX_DATA, val); - } - spin_unlock_bh(&bp->indirect_lock); -} - -#ifdef BCM_CNIC -static int -bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info) -{ - struct bnx2 *bp = netdev_priv(dev); - struct drv_ctl_io *io = &info->data.io; - - switch (info->cmd) { - case DRV_CTL_IO_WR_CMD: - bnx2_reg_wr_ind(bp, io->offset, io->data); - break; - case DRV_CTL_IO_RD_CMD: - io->data = bnx2_reg_rd_ind(bp, io->offset); - break; - case DRV_CTL_CTX_WR_CMD: - bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data); - break; - default: - return -EINVAL; - } - return 0; -} - -static void bnx2_setup_cnic_irq_info(struct bnx2 *bp) -{ - struct cnic_eth_dev *cp = &bp->cnic_eth_dev; - struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; - int sb_id; - - if (bp->flags & BNX2_FLAG_USING_MSIX) { - cp->drv_state |= CNIC_DRV_STATE_USING_MSIX; - bnapi->cnic_present = 0; - sb_id = bp->irq_nvecs; - cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX; - } else { - cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; - bnapi->cnic_tag = bnapi->last_status_idx; - bnapi->cnic_present = 1; - sb_id = 0; - cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; - } - - cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector; - cp->irq_arr[0].status_blk = (void *) - ((unsigned long) bnapi->status_blk.msi + - (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id)); - cp->irq_arr[0].status_blk_num = sb_id; - cp->num_irq = 1; -} - -static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops, - void *data) -{ - struct bnx2 *bp = netdev_priv(dev); - struct cnic_eth_dev *cp = &bp->cnic_eth_dev; - - if (ops == NULL) - return -EINVAL; - - if (cp->drv_state & CNIC_DRV_STATE_REGD) - return -EBUSY; - - if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN)) - return -ENODEV; - - bp->cnic_data = data; - rcu_assign_pointer(bp->cnic_ops, ops); - - cp->num_irq = 0; - cp->drv_state = CNIC_DRV_STATE_REGD; - - bnx2_setup_cnic_irq_info(bp); - - return 0; -} - -static int bnx2_unregister_cnic(struct net_device *dev) -{ - struct bnx2 *bp = netdev_priv(dev); - struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; - struct cnic_eth_dev *cp = &bp->cnic_eth_dev; - - mutex_lock(&bp->cnic_lock); - cp->drv_state = 0; - bnapi->cnic_present = 0; - rcu_assign_pointer(bp->cnic_ops, NULL); - mutex_unlock(&bp->cnic_lock); - synchronize_rcu(); - return 0; -} - -struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev) -{ - struct bnx2 *bp = netdev_priv(dev); - struct cnic_eth_dev *cp = &bp->cnic_eth_dev; - - if (!cp->max_iscsi_conn) - return NULL; - - cp->drv_owner = THIS_MODULE; - cp->chip_id = bp->chip_id; - cp->pdev = bp->pdev; - cp->io_base = bp->regview; - cp->drv_ctl = bnx2_drv_ctl; - cp->drv_register_cnic = bnx2_register_cnic; - cp->drv_unregister_cnic = bnx2_unregister_cnic; - - return cp; -} -EXPORT_SYMBOL(bnx2_cnic_probe); - -static void -bnx2_cnic_stop(struct bnx2 *bp) -{ - struct cnic_ops *c_ops; - struct cnic_ctl_info info; - - mutex_lock(&bp->cnic_lock); - c_ops = rcu_dereference_protected(bp->cnic_ops, - lockdep_is_held(&bp->cnic_lock)); - if (c_ops) { - info.cmd = CNIC_CTL_STOP_CMD; - c_ops->cnic_ctl(bp->cnic_data, &info); - } - mutex_unlock(&bp->cnic_lock); -} - -static void -bnx2_cnic_start(struct bnx2 *bp) -{ - struct cnic_ops *c_ops; - struct cnic_ctl_info info; - - mutex_lock(&bp->cnic_lock); - c_ops = rcu_dereference_protected(bp->cnic_ops, - lockdep_is_held(&bp->cnic_lock)); - if (c_ops) { - if (!(bp->flags & BNX2_FLAG_USING_MSIX)) { - struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; - - bnapi->cnic_tag = bnapi->last_status_idx; - } - info.cmd = CNIC_CTL_START_CMD; - c_ops->cnic_ctl(bp->cnic_data, &info); - } - mutex_unlock(&bp->cnic_lock); -} - -#else - -static void -bnx2_cnic_stop(struct bnx2 *bp) -{ -} - -static void -bnx2_cnic_start(struct bnx2 *bp) -{ -} - -#endif - -static int -bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val) -{ - u32 val1; - int i, ret; - - if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { - val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); - val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL; - - REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1); - REG_RD(bp, BNX2_EMAC_MDIO_MODE); - - udelay(40); - } - - val1 = (bp->phy_addr << 21) | (reg << 16) | - BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT | - BNX2_EMAC_MDIO_COMM_START_BUSY; - REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1); - - for (i = 0; i < 50; i++) { - udelay(10); - - val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM); - if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) { - udelay(5); - - val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM); - val1 &= BNX2_EMAC_MDIO_COMM_DATA; - - break; - } - } - - if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) { - *val = 0x0; - ret = -EBUSY; - } - else { - *val = val1; - ret = 0; - } - - if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { - val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); - val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL; - - REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1); - REG_RD(bp, BNX2_EMAC_MDIO_MODE); - - udelay(40); - } - - return ret; -} - -static int -bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val) -{ - u32 val1; - int i, ret; - - if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { - val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); - val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL; - - REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1); - REG_RD(bp, BNX2_EMAC_MDIO_MODE); - - udelay(40); - } - - val1 = (bp->phy_addr << 21) | (reg << 16) | val | - BNX2_EMAC_MDIO_COMM_COMMAND_WRITE | - BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT; - REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1); - - for (i = 0; i < 50; i++) { - udelay(10); - - val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM); - if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) { - udelay(5); - break; - } - } - - if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) - ret = -EBUSY; - else - ret = 0; - - if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { - val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); - val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL; - - REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1); - REG_RD(bp, BNX2_EMAC_MDIO_MODE); - - udelay(40); - } - - return ret; -} - -static void -bnx2_disable_int(struct bnx2 *bp) -{ - int i; - struct bnx2_napi *bnapi; - - for (i = 0; i < bp->irq_nvecs; i++) { - bnapi = &bp->bnx2_napi[i]; - REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | - BNX2_PCICFG_INT_ACK_CMD_MASK_INT); - } - REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD); -} - -static void -bnx2_enable_int(struct bnx2 *bp) -{ - int i; - struct bnx2_napi *bnapi; - - for (i = 0; i < bp->irq_nvecs; i++) { - bnapi = &bp->bnx2_napi[i]; - - REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | - BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | - BNX2_PCICFG_INT_ACK_CMD_MASK_INT | - bnapi->last_status_idx); - - REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | - BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | - bnapi->last_status_idx); - } - REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW); -} - -static void -bnx2_disable_int_sync(struct bnx2 *bp) -{ - int i; - - atomic_inc(&bp->intr_sem); - if (!netif_running(bp->dev)) - return; - - bnx2_disable_int(bp); - for (i = 0; i < bp->irq_nvecs; i++) - synchronize_irq(bp->irq_tbl[i].vector); -} - -static void -bnx2_napi_disable(struct bnx2 *bp) -{ - int i; - - for (i = 0; i < bp->irq_nvecs; i++) - napi_disable(&bp->bnx2_napi[i].napi); -} - -static void -bnx2_napi_enable(struct bnx2 *bp) -{ - int i; - - for (i = 0; i < bp->irq_nvecs; i++) - napi_enable(&bp->bnx2_napi[i].napi); -} - -static void -bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic) -{ - if (stop_cnic) - bnx2_cnic_stop(bp); - if (netif_running(bp->dev)) { - bnx2_napi_disable(bp); - netif_tx_disable(bp->dev); - } - bnx2_disable_int_sync(bp); - netif_carrier_off(bp->dev); /* prevent tx timeout */ -} - -static void -bnx2_netif_start(struct bnx2 *bp, bool start_cnic) -{ - if (atomic_dec_and_test(&bp->intr_sem)) { - if (netif_running(bp->dev)) { - netif_tx_wake_all_queues(bp->dev); - spin_lock_bh(&bp->phy_lock); - if (bp->link_up) - netif_carrier_on(bp->dev); - spin_unlock_bh(&bp->phy_lock); - bnx2_napi_enable(bp); - bnx2_enable_int(bp); - if (start_cnic) - bnx2_cnic_start(bp); - } - } -} - -static void -bnx2_free_tx_mem(struct bnx2 *bp) -{ - int i; - - for (i = 0; i < bp->num_tx_rings; i++) { - struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; - struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; - - if (txr->tx_desc_ring) { - dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE, - txr->tx_desc_ring, - txr->tx_desc_mapping); - txr->tx_desc_ring = NULL; - } - kfree(txr->tx_buf_ring); - txr->tx_buf_ring = NULL; - } -} - -static void -bnx2_free_rx_mem(struct bnx2 *bp) -{ - int i; - - for (i = 0; i < bp->num_rx_rings; i++) { - struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; - struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; - int j; - - for (j = 0; j < bp->rx_max_ring; j++) { - if (rxr->rx_desc_ring[j]) - dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE, - rxr->rx_desc_ring[j], - rxr->rx_desc_mapping[j]); - rxr->rx_desc_ring[j] = NULL; - } - vfree(rxr->rx_buf_ring); - rxr->rx_buf_ring = NULL; - - for (j = 0; j < bp->rx_max_pg_ring; j++) { - if (rxr->rx_pg_desc_ring[j]) - dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE, - rxr->rx_pg_desc_ring[j], - rxr->rx_pg_desc_mapping[j]); - rxr->rx_pg_desc_ring[j] = NULL; - } - vfree(rxr->rx_pg_ring); - rxr->rx_pg_ring = NULL; - } -} - -static int -bnx2_alloc_tx_mem(struct bnx2 *bp) -{ - int i; - - for (i = 0; i < bp->num_tx_rings; i++) { - struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; - struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; - - txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL); - if (txr->tx_buf_ring == NULL) - return -ENOMEM; - - txr->tx_desc_ring = - dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE, - &txr->tx_desc_mapping, GFP_KERNEL); - if (txr->tx_desc_ring == NULL) - return -ENOMEM; - } - return 0; -} - -static int -bnx2_alloc_rx_mem(struct bnx2 *bp) -{ - int i; - - for (i = 0; i < bp->num_rx_rings; i++) { - struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; - struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; - int j; - - rxr->rx_buf_ring = - vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring); - if (rxr->rx_buf_ring == NULL) - return -ENOMEM; - - for (j = 0; j < bp->rx_max_ring; j++) { - rxr->rx_desc_ring[j] = - dma_alloc_coherent(&bp->pdev->dev, - RXBD_RING_SIZE, - &rxr->rx_desc_mapping[j], - GFP_KERNEL); - if (rxr->rx_desc_ring[j] == NULL) - return -ENOMEM; - - } - - if (bp->rx_pg_ring_size) { - rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE * - bp->rx_max_pg_ring); - if (rxr->rx_pg_ring == NULL) - return -ENOMEM; - - } - - for (j = 0; j < bp->rx_max_pg_ring; j++) { - rxr->rx_pg_desc_ring[j] = - dma_alloc_coherent(&bp->pdev->dev, - RXBD_RING_SIZE, - &rxr->rx_pg_desc_mapping[j], - GFP_KERNEL); - if (rxr->rx_pg_desc_ring[j] == NULL) - return -ENOMEM; - - } - } - return 0; -} - -static void -bnx2_free_mem(struct bnx2 *bp) -{ - int i; - struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; - - bnx2_free_tx_mem(bp); - bnx2_free_rx_mem(bp); - - for (i = 0; i < bp->ctx_pages; i++) { - if (bp->ctx_blk[i]) { - dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE, - bp->ctx_blk[i], - bp->ctx_blk_mapping[i]); - bp->ctx_blk[i] = NULL; - } - } - if (bnapi->status_blk.msi) { - dma_free_coherent(&bp->pdev->dev, bp->status_stats_size, - bnapi->status_blk.msi, - bp->status_blk_mapping); - bnapi->status_blk.msi = NULL; - bp->stats_blk = NULL; - } -} - -static int -bnx2_alloc_mem(struct bnx2 *bp) -{ - int i, status_blk_size, err; - struct bnx2_napi *bnapi; - void *status_blk; - - /* Combine status and statistics blocks into one allocation. */ - status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block)); - if (bp->flags & BNX2_FLAG_MSIX_CAP) - status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC * - BNX2_SBLK_MSIX_ALIGN_SIZE); - bp->status_stats_size = status_blk_size + - sizeof(struct statistics_block); - - status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size, - &bp->status_blk_mapping, GFP_KERNEL); - if (status_blk == NULL) - goto alloc_mem_err; - - memset(status_blk, 0, bp->status_stats_size); - - bnapi = &bp->bnx2_napi[0]; - bnapi->status_blk.msi = status_blk; - bnapi->hw_tx_cons_ptr = - &bnapi->status_blk.msi->status_tx_quick_consumer_index0; - bnapi->hw_rx_cons_ptr = - &bnapi->status_blk.msi->status_rx_quick_consumer_index0; - if (bp->flags & BNX2_FLAG_MSIX_CAP) { - for (i = 1; i < bp->irq_nvecs; i++) { - struct status_block_msix *sblk; - - bnapi = &bp->bnx2_napi[i]; - - sblk = (void *) (status_blk + - BNX2_SBLK_MSIX_ALIGN_SIZE * i); - bnapi->status_blk.msix = sblk; - bnapi->hw_tx_cons_ptr = - &sblk->status_tx_quick_consumer_index; - bnapi->hw_rx_cons_ptr = - &sblk->status_rx_quick_consumer_index; - bnapi->int_num = i << 24; - } - } - - bp->stats_blk = status_blk + status_blk_size; - - bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size; - - if (CHIP_NUM(bp) == CHIP_NUM_5709) { - bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE; - if (bp->ctx_pages == 0) - bp->ctx_pages = 1; - for (i = 0; i < bp->ctx_pages; i++) { - bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev, - BCM_PAGE_SIZE, - &bp->ctx_blk_mapping[i], - GFP_KERNEL); - if (bp->ctx_blk[i] == NULL) - goto alloc_mem_err; - } - } - - err = bnx2_alloc_rx_mem(bp); - if (err) - goto alloc_mem_err; - - err = bnx2_alloc_tx_mem(bp); - if (err) - goto alloc_mem_err; - - return 0; - -alloc_mem_err: - bnx2_free_mem(bp); - return -ENOMEM; -} - -static void -bnx2_report_fw_link(struct bnx2 *bp) -{ - u32 fw_link_status = 0; - - if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) - return; - - if (bp->link_up) { - u32 bmsr; - - switch (bp->line_speed) { - case SPEED_10: - if (bp->duplex == DUPLEX_HALF) - fw_link_status = BNX2_LINK_STATUS_10HALF; - else - fw_link_status = BNX2_LINK_STATUS_10FULL; - break; - case SPEED_100: - if (bp->duplex == DUPLEX_HALF) - fw_link_status = BNX2_LINK_STATUS_100HALF; - else - fw_link_status = BNX2_LINK_STATUS_100FULL; - break; - case SPEED_1000: - if (bp->duplex == DUPLEX_HALF) - fw_link_status = BNX2_LINK_STATUS_1000HALF; - else - fw_link_status = BNX2_LINK_STATUS_1000FULL; - break; - case SPEED_2500: - if (bp->duplex == DUPLEX_HALF) - fw_link_status = BNX2_LINK_STATUS_2500HALF; - else - fw_link_status = BNX2_LINK_STATUS_2500FULL; - break; - } - - fw_link_status |= BNX2_LINK_STATUS_LINK_UP; - - if (bp->autoneg) { - fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED; - - bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); - bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); - - if (!(bmsr & BMSR_ANEGCOMPLETE) || - bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) - fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET; - else - fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE; - } - } - else - fw_link_status = BNX2_LINK_STATUS_LINK_DOWN; - - bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status); -} - -static char * -bnx2_xceiver_str(struct bnx2 *bp) -{ - return (bp->phy_port == PORT_FIBRE) ? "SerDes" : - ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" : - "Copper"); -} - -static void -bnx2_report_link(struct bnx2 *bp) -{ - if (bp->link_up) { - netif_carrier_on(bp->dev); - netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex", - bnx2_xceiver_str(bp), - bp->line_speed, - bp->duplex == DUPLEX_FULL ? "full" : "half"); - - if (bp->flow_ctrl) { - if (bp->flow_ctrl & FLOW_CTRL_RX) { - pr_cont(", receive "); - if (bp->flow_ctrl & FLOW_CTRL_TX) - pr_cont("& transmit "); - } - else { - pr_cont(", transmit "); - } - pr_cont("flow control ON"); - } - pr_cont("\n"); - } else { - netif_carrier_off(bp->dev); - netdev_err(bp->dev, "NIC %s Link is Down\n", - bnx2_xceiver_str(bp)); - } - - bnx2_report_fw_link(bp); -} - -static void -bnx2_resolve_flow_ctrl(struct bnx2 *bp) -{ - u32 local_adv, remote_adv; - - bp->flow_ctrl = 0; - if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) != - (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) { - - if (bp->duplex == DUPLEX_FULL) { - bp->flow_ctrl = bp->req_flow_ctrl; - } - return; - } - - if (bp->duplex != DUPLEX_FULL) { - return; - } - - if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && - (CHIP_NUM(bp) == CHIP_NUM_5708)) { - u32 val; - - bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val); - if (val & BCM5708S_1000X_STAT1_TX_PAUSE) - bp->flow_ctrl |= FLOW_CTRL_TX; - if (val & BCM5708S_1000X_STAT1_RX_PAUSE) - bp->flow_ctrl |= FLOW_CTRL_RX; - return; - } - - bnx2_read_phy(bp, bp->mii_adv, &local_adv); - bnx2_read_phy(bp, bp->mii_lpa, &remote_adv); - - if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { - u32 new_local_adv = 0; - u32 new_remote_adv = 0; - - if (local_adv & ADVERTISE_1000XPAUSE) - new_local_adv |= ADVERTISE_PAUSE_CAP; - if (local_adv & ADVERTISE_1000XPSE_ASYM) - new_local_adv |= ADVERTISE_PAUSE_ASYM; - if (remote_adv & ADVERTISE_1000XPAUSE) - new_remote_adv |= ADVERTISE_PAUSE_CAP; - if (remote_adv & ADVERTISE_1000XPSE_ASYM) - new_remote_adv |= ADVERTISE_PAUSE_ASYM; - - local_adv = new_local_adv; - remote_adv = new_remote_adv; - } - - /* See Table 28B-3 of 802.3ab-1999 spec. */ - if (local_adv & ADVERTISE_PAUSE_CAP) { - if(local_adv & ADVERTISE_PAUSE_ASYM) { - if (remote_adv & ADVERTISE_PAUSE_CAP) { - bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; - } - else if (remote_adv & ADVERTISE_PAUSE_ASYM) { - bp->flow_ctrl = FLOW_CTRL_RX; - } - } - else { - if (remote_adv & ADVERTISE_PAUSE_CAP) { - bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; - } - } - } - else if (local_adv & ADVERTISE_PAUSE_ASYM) { - if ((remote_adv & ADVERTISE_PAUSE_CAP) && - (remote_adv & ADVERTISE_PAUSE_ASYM)) { - - bp->flow_ctrl = FLOW_CTRL_TX; - } - } -} - -static int -bnx2_5709s_linkup(struct bnx2 *bp) -{ - u32 val, speed; - - bp->link_up = 1; - - bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS); - bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val); - bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); - - if ((bp->autoneg & AUTONEG_SPEED) == 0) { - bp->line_speed = bp->req_line_speed; - bp->duplex = bp->req_duplex; - return 0; - } - speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK; - switch (speed) { - case MII_BNX2_GP_TOP_AN_SPEED_10: - bp->line_speed = SPEED_10; - break; - case MII_BNX2_GP_TOP_AN_SPEED_100: - bp->line_speed = SPEED_100; - break; - case MII_BNX2_GP_TOP_AN_SPEED_1G: - case MII_BNX2_GP_TOP_AN_SPEED_1GKV: - bp->line_speed = SPEED_1000; - break; - case MII_BNX2_GP_TOP_AN_SPEED_2_5G: - bp->line_speed = SPEED_2500; - break; - } - if (val & MII_BNX2_GP_TOP_AN_FD) - bp->duplex = DUPLEX_FULL; - else - bp->duplex = DUPLEX_HALF; - return 0; -} - -static int -bnx2_5708s_linkup(struct bnx2 *bp) -{ - u32 val; - - bp->link_up = 1; - bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val); - switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) { - case BCM5708S_1000X_STAT1_SPEED_10: - bp->line_speed = SPEED_10; - break; - case BCM5708S_1000X_STAT1_SPEED_100: - bp->line_speed = SPEED_100; - break; - case BCM5708S_1000X_STAT1_SPEED_1G: - bp->line_speed = SPEED_1000; - break; - case BCM5708S_1000X_STAT1_SPEED_2G5: - bp->line_speed = SPEED_2500; - break; - } - if (val & BCM5708S_1000X_STAT1_FD) - bp->duplex = DUPLEX_FULL; - else - bp->duplex = DUPLEX_HALF; - - return 0; -} - -static int -bnx2_5706s_linkup(struct bnx2 *bp) -{ - u32 bmcr, local_adv, remote_adv, common; - - bp->link_up = 1; - bp->line_speed = SPEED_1000; - - bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); - if (bmcr & BMCR_FULLDPLX) { - bp->duplex = DUPLEX_FULL; - } - else { - bp->duplex = DUPLEX_HALF; - } - - if (!(bmcr & BMCR_ANENABLE)) { - return 0; - } - - bnx2_read_phy(bp, bp->mii_adv, &local_adv); - bnx2_read_phy(bp, bp->mii_lpa, &remote_adv); - - common = local_adv & remote_adv; - if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) { - - if (common & ADVERTISE_1000XFULL) { - bp->duplex = DUPLEX_FULL; - } - else { - bp->duplex = DUPLEX_HALF; - } - } - - return 0; -} - -static int -bnx2_copper_linkup(struct bnx2 *bp) -{ - u32 bmcr; - - bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); - if (bmcr & BMCR_ANENABLE) { - u32 local_adv, remote_adv, common; - - bnx2_read_phy(bp, MII_CTRL1000, &local_adv); - bnx2_read_phy(bp, MII_STAT1000, &remote_adv); - - common = local_adv & (remote_adv >> 2); - if (common & ADVERTISE_1000FULL) { - bp->line_speed = SPEED_1000; - bp->duplex = DUPLEX_FULL; - } - else if (common & ADVERTISE_1000HALF) { - bp->line_speed = SPEED_1000; - bp->duplex = DUPLEX_HALF; - } - else { - bnx2_read_phy(bp, bp->mii_adv, &local_adv); - bnx2_read_phy(bp, bp->mii_lpa, &remote_adv); - - common = local_adv & remote_adv; - if (common & ADVERTISE_100FULL) { - bp->line_speed = SPEED_100; - bp->duplex = DUPLEX_FULL; - } - else if (common & ADVERTISE_100HALF) { - bp->line_speed = SPEED_100; - bp->duplex = DUPLEX_HALF; - } - else if (common & ADVERTISE_10FULL) { - bp->line_speed = SPEED_10; - bp->duplex = DUPLEX_FULL; - } - else if (common & ADVERTISE_10HALF) { - bp->line_speed = SPEED_10; - bp->duplex = DUPLEX_HALF; - } - else { - bp->line_speed = 0; - bp->link_up = 0; - } - } - } - else { - if (bmcr & BMCR_SPEED100) { - bp->line_speed = SPEED_100; - } - else { - bp->line_speed = SPEED_10; - } - if (bmcr & BMCR_FULLDPLX) { - bp->duplex = DUPLEX_FULL; - } - else { - bp->duplex = DUPLEX_HALF; - } - } - - return 0; -} - -static void -bnx2_init_rx_context(struct bnx2 *bp, u32 cid) -{ - u32 val, rx_cid_addr = GET_CID_ADDR(cid); - - val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE; - val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2; - val |= 0x02 << 8; - - if (bp->flow_ctrl & FLOW_CTRL_TX) - val |= BNX2_L2CTX_FLOW_CTRL_ENABLE; - - bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val); -} - -static void -bnx2_init_all_rx_contexts(struct bnx2 *bp) -{ - int i; - u32 cid; - - for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) { - if (i == 1) - cid = RX_RSS_CID; - bnx2_init_rx_context(bp, cid); - } -} - -static void -bnx2_set_mac_link(struct bnx2 *bp) -{ - u32 val; - - REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620); - if (bp->link_up && (bp->line_speed == SPEED_1000) && - (bp->duplex == DUPLEX_HALF)) { - REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff); - } - - /* Configure the EMAC mode register. */ - val = REG_RD(bp, BNX2_EMAC_MODE); - - val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX | - BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK | - BNX2_EMAC_MODE_25G_MODE); - - if (bp->link_up) { - switch (bp->line_speed) { - case SPEED_10: - if (CHIP_NUM(bp) != CHIP_NUM_5706) { - val |= BNX2_EMAC_MODE_PORT_MII_10M; - break; - } - /* fall through */ - case SPEED_100: - val |= BNX2_EMAC_MODE_PORT_MII; - break; - case SPEED_2500: - val |= BNX2_EMAC_MODE_25G_MODE; - /* fall through */ - case SPEED_1000: - val |= BNX2_EMAC_MODE_PORT_GMII; - break; - } - } - else { - val |= BNX2_EMAC_MODE_PORT_GMII; - } - - /* Set the MAC to operate in the appropriate duplex mode. */ - if (bp->duplex == DUPLEX_HALF) - val |= BNX2_EMAC_MODE_HALF_DUPLEX; - REG_WR(bp, BNX2_EMAC_MODE, val); - - /* Enable/disable rx PAUSE. */ - bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN; - - if (bp->flow_ctrl & FLOW_CTRL_RX) - bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN; - REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode); - - /* Enable/disable tx PAUSE. */ - val = REG_RD(bp, BNX2_EMAC_TX_MODE); - val &= ~BNX2_EMAC_TX_MODE_FLOW_EN; - - if (bp->flow_ctrl & FLOW_CTRL_TX) - val |= BNX2_EMAC_TX_MODE_FLOW_EN; - REG_WR(bp, BNX2_EMAC_TX_MODE, val); - - /* Acknowledge the interrupt. */ - REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE); - - bnx2_init_all_rx_contexts(bp); -} - -static void -bnx2_enable_bmsr1(struct bnx2 *bp) -{ - if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && - (CHIP_NUM(bp) == CHIP_NUM_5709)) - bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, - MII_BNX2_BLK_ADDR_GP_STATUS); -} - -static void -bnx2_disable_bmsr1(struct bnx2 *bp) -{ - if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && - (CHIP_NUM(bp) == CHIP_NUM_5709)) - bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, - MII_BNX2_BLK_ADDR_COMBO_IEEEB0); -} - -static int -bnx2_test_and_enable_2g5(struct bnx2 *bp) -{ - u32 up1; - int ret = 1; - - if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) - return 0; - - if (bp->autoneg & AUTONEG_SPEED) - bp->advertising |= ADVERTISED_2500baseX_Full; - - if (CHIP_NUM(bp) == CHIP_NUM_5709) - bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G); - - bnx2_read_phy(bp, bp->mii_up1, &up1); - if (!(up1 & BCM5708S_UP1_2G5)) { - up1 |= BCM5708S_UP1_2G5; - bnx2_write_phy(bp, bp->mii_up1, up1); - ret = 0; - } - - if (CHIP_NUM(bp) == CHIP_NUM_5709) - bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, - MII_BNX2_BLK_ADDR_COMBO_IEEEB0); - - return ret; -} - -static int -bnx2_test_and_disable_2g5(struct bnx2 *bp) -{ - u32 up1; - int ret = 0; - - if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) - return 0; - - if (CHIP_NUM(bp) == CHIP_NUM_5709) - bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G); - - bnx2_read_phy(bp, bp->mii_up1, &up1); - if (up1 & BCM5708S_UP1_2G5) { - up1 &= ~BCM5708S_UP1_2G5; - bnx2_write_phy(bp, bp->mii_up1, up1); - ret = 1; - } - - if (CHIP_NUM(bp) == CHIP_NUM_5709) - bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, - MII_BNX2_BLK_ADDR_COMBO_IEEEB0); - - return ret; -} - -static void -bnx2_enable_forced_2g5(struct bnx2 *bp) -{ - u32 uninitialized_var(bmcr); - int err; - - if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) - return; - - if (CHIP_NUM(bp) == CHIP_NUM_5709) { - u32 val; - - bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, - MII_BNX2_BLK_ADDR_SERDES_DIG); - if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) { - val &= ~MII_BNX2_SD_MISC1_FORCE_MSK; - val |= MII_BNX2_SD_MISC1_FORCE | - MII_BNX2_SD_MISC1_FORCE_2_5G; - bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val); - } - - bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, - MII_BNX2_BLK_ADDR_COMBO_IEEEB0); - err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); - - } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { - err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); - if (!err) - bmcr |= BCM5708S_BMCR_FORCE_2500; - } else { - return; - } - - if (err) - return; - - if (bp->autoneg & AUTONEG_SPEED) { - bmcr &= ~BMCR_ANENABLE; - if (bp->req_duplex == DUPLEX_FULL) - bmcr |= BMCR_FULLDPLX; - } - bnx2_write_phy(bp, bp->mii_bmcr, bmcr); -} - -static void -bnx2_disable_forced_2g5(struct bnx2 *bp) -{ - u32 uninitialized_var(bmcr); - int err; - - if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) - return; - - if (CHIP_NUM(bp) == CHIP_NUM_5709) { - u32 val; - - bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, - MII_BNX2_BLK_ADDR_SERDES_DIG); - if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) { - val &= ~MII_BNX2_SD_MISC1_FORCE; - bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val); - } - - bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, - MII_BNX2_BLK_ADDR_COMBO_IEEEB0); - err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); - - } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { - err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); - if (!err) - bmcr &= ~BCM5708S_BMCR_FORCE_2500; - } else { - return; - } - - if (err) - return; - - if (bp->autoneg & AUTONEG_SPEED) - bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART; - bnx2_write_phy(bp, bp->mii_bmcr, bmcr); -} - -static void -bnx2_5706s_force_link_dn(struct bnx2 *bp, int start) -{ - u32 val; - - bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL); - bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val); - if (start) - bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f); - else - bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0); -} - -static int -bnx2_set_link(struct bnx2 *bp) -{ - u32 bmsr; - u8 link_up; - - if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) { - bp->link_up = 1; - return 0; - } - - if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) - return 0; - - link_up = bp->link_up; - - bnx2_enable_bmsr1(bp); - bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); - bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); - bnx2_disable_bmsr1(bp); - - if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && - (CHIP_NUM(bp) == CHIP_NUM_5706)) { - u32 val, an_dbg; - - if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) { - bnx2_5706s_force_link_dn(bp, 0); - bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN; - } - val = REG_RD(bp, BNX2_EMAC_STATUS); - - bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG); - bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg); - bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg); - - if ((val & BNX2_EMAC_STATUS_LINK) && - !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC)) - bmsr |= BMSR_LSTATUS; - else - bmsr &= ~BMSR_LSTATUS; - } - - if (bmsr & BMSR_LSTATUS) { - bp->link_up = 1; - - if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { - if (CHIP_NUM(bp) == CHIP_NUM_5706) - bnx2_5706s_linkup(bp); - else if (CHIP_NUM(bp) == CHIP_NUM_5708) - bnx2_5708s_linkup(bp); - else if (CHIP_NUM(bp) == CHIP_NUM_5709) - bnx2_5709s_linkup(bp); - } - else { - bnx2_copper_linkup(bp); - } - bnx2_resolve_flow_ctrl(bp); - } - else { - if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && - (bp->autoneg & AUTONEG_SPEED)) - bnx2_disable_forced_2g5(bp); - - if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) { - u32 bmcr; - - bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); - bmcr |= BMCR_ANENABLE; - bnx2_write_phy(bp, bp->mii_bmcr, bmcr); - - bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT; - } - bp->link_up = 0; - } - - if (bp->link_up != link_up) { - bnx2_report_link(bp); - } - - bnx2_set_mac_link(bp); - - return 0; -} - -static int -bnx2_reset_phy(struct bnx2 *bp) -{ - int i; - u32 reg; - - bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET); - -#define PHY_RESET_MAX_WAIT 100 - for (i = 0; i < PHY_RESET_MAX_WAIT; i++) { - udelay(10); - - bnx2_read_phy(bp, bp->mii_bmcr, ®); - if (!(reg & BMCR_RESET)) { - udelay(20); - break; - } - } - if (i == PHY_RESET_MAX_WAIT) { - return -EBUSY; - } - return 0; -} - -static u32 -bnx2_phy_get_pause_adv(struct bnx2 *bp) -{ - u32 adv = 0; - - if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) == - (FLOW_CTRL_RX | FLOW_CTRL_TX)) { - - if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { - adv = ADVERTISE_1000XPAUSE; - } - else { - adv = ADVERTISE_PAUSE_CAP; - } - } - else if (bp->req_flow_ctrl & FLOW_CTRL_TX) { - if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { - adv = ADVERTISE_1000XPSE_ASYM; - } - else { - adv = ADVERTISE_PAUSE_ASYM; - } - } - else if (bp->req_flow_ctrl & FLOW_CTRL_RX) { - if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { - adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; - } - else { - adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; - } - } - return adv; -} - -static int bnx2_fw_sync(struct bnx2 *, u32, int, int); - -static int -bnx2_setup_remote_phy(struct bnx2 *bp, u8 port) -__releases(&bp->phy_lock) -__acquires(&bp->phy_lock) -{ - u32 speed_arg = 0, pause_adv; - - pause_adv = bnx2_phy_get_pause_adv(bp); - - if (bp->autoneg & AUTONEG_SPEED) { - speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG; - if (bp->advertising & ADVERTISED_10baseT_Half) - speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF; - if (bp->advertising & ADVERTISED_10baseT_Full) - speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL; - if (bp->advertising & ADVERTISED_100baseT_Half) - speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF; - if (bp->advertising & ADVERTISED_100baseT_Full) - speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL; - if (bp->advertising & ADVERTISED_1000baseT_Full) - speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL; - if (bp->advertising & ADVERTISED_2500baseX_Full) - speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL; - } else { - if (bp->req_line_speed == SPEED_2500) - speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL; - else if (bp->req_line_speed == SPEED_1000) - speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL; - else if (bp->req_line_speed == SPEED_100) { - if (bp->req_duplex == DUPLEX_FULL) - speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL; - else - speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF; - } else if (bp->req_line_speed == SPEED_10) { - if (bp->req_duplex == DUPLEX_FULL) - speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL; - else - speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF; - } - } - - if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP)) - speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE; - if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM)) - speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE; - - if (port == PORT_TP) - speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE | - BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED; - - bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg); - - spin_unlock_bh(&bp->phy_lock); - bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0); - spin_lock_bh(&bp->phy_lock); - - return 0; -} - -static int -bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port) -__releases(&bp->phy_lock) -__acquires(&bp->phy_lock) -{ - u32 adv, bmcr; - u32 new_adv = 0; - - if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) - return bnx2_setup_remote_phy(bp, port); - - if (!(bp->autoneg & AUTONEG_SPEED)) { - u32 new_bmcr; - int force_link_down = 0; - - if (bp->req_line_speed == SPEED_2500) { - if (!bnx2_test_and_enable_2g5(bp)) - force_link_down = 1; - } else if (bp->req_line_speed == SPEED_1000) { - if (bnx2_test_and_disable_2g5(bp)) - force_link_down = 1; - } - bnx2_read_phy(bp, bp->mii_adv, &adv); - adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF); - - bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); - new_bmcr = bmcr & ~BMCR_ANENABLE; - new_bmcr |= BMCR_SPEED1000; - - if (CHIP_NUM(bp) == CHIP_NUM_5709) { - if (bp->req_line_speed == SPEED_2500) - bnx2_enable_forced_2g5(bp); - else if (bp->req_line_speed == SPEED_1000) { - bnx2_disable_forced_2g5(bp); - new_bmcr &= ~0x2000; - } - - } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { - if (bp->req_line_speed == SPEED_2500) - new_bmcr |= BCM5708S_BMCR_FORCE_2500; - else - new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500; - } - - if (bp->req_duplex == DUPLEX_FULL) { - adv |= ADVERTISE_1000XFULL; - new_bmcr |= BMCR_FULLDPLX; - } - else { - adv |= ADVERTISE_1000XHALF; - new_bmcr &= ~BMCR_FULLDPLX; - } - if ((new_bmcr != bmcr) || (force_link_down)) { - /* Force a link down visible on the other side */ - if (bp->link_up) { - bnx2_write_phy(bp, bp->mii_adv, adv & - ~(ADVERTISE_1000XFULL | - ADVERTISE_1000XHALF)); - bnx2_write_phy(bp, bp->mii_bmcr, bmcr | - BMCR_ANRESTART | BMCR_ANENABLE); - - bp->link_up = 0; - netif_carrier_off(bp->dev); - bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr); - bnx2_report_link(bp); - } - bnx2_write_phy(bp, bp->mii_adv, adv); - bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr); - } else { - bnx2_resolve_flow_ctrl(bp); - bnx2_set_mac_link(bp); - } - return 0; - } - - bnx2_test_and_enable_2g5(bp); - - if (bp->advertising & ADVERTISED_1000baseT_Full) - new_adv |= ADVERTISE_1000XFULL; - - new_adv |= bnx2_phy_get_pause_adv(bp); - - bnx2_read_phy(bp, bp->mii_adv, &adv); - bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); - - bp->serdes_an_pending = 0; - if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) { - /* Force a link down visible on the other side */ - if (bp->link_up) { - bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK); - spin_unlock_bh(&bp->phy_lock); - msleep(20); - spin_lock_bh(&bp->phy_lock); - } - - bnx2_write_phy(bp, bp->mii_adv, new_adv); - bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | - BMCR_ANENABLE); - /* Speed up link-up time when the link partner - * does not autonegotiate which is very common - * in blade servers. Some blade servers use - * IPMI for kerboard input and it's important - * to minimize link disruptions. Autoneg. involves - * exchanging base pages plus 3 next pages and - * normally completes in about 120 msec. - */ - bp->current_interval = BNX2_SERDES_AN_TIMEOUT; - bp->serdes_an_pending = 1; - mod_timer(&bp->timer, jiffies + bp->current_interval); - } else { - bnx2_resolve_flow_ctrl(bp); - bnx2_set_mac_link(bp); - } - - return 0; -} - -#define ETHTOOL_ALL_FIBRE_SPEED \ - (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \ - (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\ - (ADVERTISED_1000baseT_Full) - -#define ETHTOOL_ALL_COPPER_SPEED \ - (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \ - ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \ - ADVERTISED_1000baseT_Full) - -#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \ - ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA) - -#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL) - -static void -bnx2_set_default_remote_link(struct bnx2 *bp) -{ - u32 link; - - if (bp->phy_port == PORT_TP) - link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK); - else - link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK); - - if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) { - bp->req_line_speed = 0; - bp->autoneg |= AUTONEG_SPEED; - bp->advertising = ADVERTISED_Autoneg; - if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF) - bp->advertising |= ADVERTISED_10baseT_Half; - if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL) - bp->advertising |= ADVERTISED_10baseT_Full; - if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF) - bp->advertising |= ADVERTISED_100baseT_Half; - if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL) - bp->advertising |= ADVERTISED_100baseT_Full; - if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL) - bp->advertising |= ADVERTISED_1000baseT_Full; - if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL) - bp->advertising |= ADVERTISED_2500baseX_Full; - } else { - bp->autoneg = 0; - bp->advertising = 0; - bp->req_duplex = DUPLEX_FULL; - if (link & BNX2_NETLINK_SET_LINK_SPEED_10) { - bp->req_line_speed = SPEED_10; - if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF) - bp->req_duplex = DUPLEX_HALF; - } - if (link & BNX2_NETLINK_SET_LINK_SPEED_100) { - bp->req_line_speed = SPEED_100; - if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF) - bp->req_duplex = DUPLEX_HALF; - } - if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL) - bp->req_line_speed = SPEED_1000; - if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL) - bp->req_line_speed = SPEED_2500; - } -} - -static void -bnx2_set_default_link(struct bnx2 *bp) -{ - if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) { - bnx2_set_default_remote_link(bp); - return; - } - - bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL; - bp->req_line_speed = 0; - if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { - u32 reg; - - bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg; - - reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG); - reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK; - if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) { - bp->autoneg = 0; - bp->req_line_speed = bp->line_speed = SPEED_1000; - bp->req_duplex = DUPLEX_FULL; - } - } else - bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg; -} - -static void -bnx2_send_heart_beat(struct bnx2 *bp) -{ - u32 msg; - u32 addr; - - spin_lock(&bp->indirect_lock); - msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK); - addr = bp->shmem_base + BNX2_DRV_PULSE_MB; - REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr); - REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg); - spin_unlock(&bp->indirect_lock); -} - -static void -bnx2_remote_phy_event(struct bnx2 *bp) -{ - u32 msg; - u8 link_up = bp->link_up; - u8 old_port; - - msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS); - - if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED) - bnx2_send_heart_beat(bp); - - msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED; - - if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN) - bp->link_up = 0; - else { - u32 speed; - - bp->link_up = 1; - speed = msg & BNX2_LINK_STATUS_SPEED_MASK; - bp->duplex = DUPLEX_FULL; - switch (speed) { - case BNX2_LINK_STATUS_10HALF: - bp->duplex = DUPLEX_HALF; - case BNX2_LINK_STATUS_10FULL: - bp->line_speed = SPEED_10; - break; - case BNX2_LINK_STATUS_100HALF: - bp->duplex = DUPLEX_HALF; - case BNX2_LINK_STATUS_100BASE_T4: - case BNX2_LINK_STATUS_100FULL: - bp->line_speed = SPEED_100; - break; - case BNX2_LINK_STATUS_1000HALF: - bp->duplex = DUPLEX_HALF; - case BNX2_LINK_STATUS_1000FULL: - bp->line_speed = SPEED_1000; - break; - case BNX2_LINK_STATUS_2500HALF: - bp->duplex = DUPLEX_HALF; - case BNX2_LINK_STATUS_2500FULL: - bp->line_speed = SPEED_2500; - break; - default: - bp->line_speed = 0; - break; - } - - bp->flow_ctrl = 0; - if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) != - (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) { - if (bp->duplex == DUPLEX_FULL) - bp->flow_ctrl = bp->req_flow_ctrl; - } else { - if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED) - bp->flow_ctrl |= FLOW_CTRL_TX; - if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED) - bp->flow_ctrl |= FLOW_CTRL_RX; - } - - old_port = bp->phy_port; - if (msg & BNX2_LINK_STATUS_SERDES_LINK) - bp->phy_port = PORT_FIBRE; - else - bp->phy_port = PORT_TP; - - if (old_port != bp->phy_port) - bnx2_set_default_link(bp); - - } - if (bp->link_up != link_up) - bnx2_report_link(bp); - - bnx2_set_mac_link(bp); -} - -static int -bnx2_set_remote_link(struct bnx2 *bp) -{ - u32 evt_code; - - evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB); - switch (evt_code) { - case BNX2_FW_EVT_CODE_LINK_EVENT: - bnx2_remote_phy_event(bp); - break; - case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT: - default: - bnx2_send_heart_beat(bp); - break; - } - return 0; -} - -static int -bnx2_setup_copper_phy(struct bnx2 *bp) -__releases(&bp->phy_lock) -__acquires(&bp->phy_lock) -{ - u32 bmcr; - u32 new_bmcr; - - bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); - - if (bp->autoneg & AUTONEG_SPEED) { - u32 adv_reg, adv1000_reg; - u32 new_adv_reg = 0; - u32 new_adv1000_reg = 0; - - bnx2_read_phy(bp, bp->mii_adv, &adv_reg); - adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP | - ADVERTISE_PAUSE_ASYM); - - bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg); - adv1000_reg &= PHY_ALL_1000_SPEED; - - if (bp->advertising & ADVERTISED_10baseT_Half) - new_adv_reg |= ADVERTISE_10HALF; - if (bp->advertising & ADVERTISED_10baseT_Full) - new_adv_reg |= ADVERTISE_10FULL; - if (bp->advertising & ADVERTISED_100baseT_Half) - new_adv_reg |= ADVERTISE_100HALF; - if (bp->advertising & ADVERTISED_100baseT_Full) - new_adv_reg |= ADVERTISE_100FULL; - if (bp->advertising & ADVERTISED_1000baseT_Full) - new_adv1000_reg |= ADVERTISE_1000FULL; - - new_adv_reg |= ADVERTISE_CSMA; - - new_adv_reg |= bnx2_phy_get_pause_adv(bp); - - if ((adv1000_reg != new_adv1000_reg) || - (adv_reg != new_adv_reg) || - ((bmcr & BMCR_ANENABLE) == 0)) { - - bnx2_write_phy(bp, bp->mii_adv, new_adv_reg); - bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg); - bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART | - BMCR_ANENABLE); - } - else if (bp->link_up) { - /* Flow ctrl may have changed from auto to forced */ - /* or vice-versa. */ - - bnx2_resolve_flow_ctrl(bp); - bnx2_set_mac_link(bp); - } - return 0; - } - - new_bmcr = 0; - if (bp->req_line_speed == SPEED_100) { - new_bmcr |= BMCR_SPEED100; - } - if (bp->req_duplex == DUPLEX_FULL) { - new_bmcr |= BMCR_FULLDPLX; - } - if (new_bmcr != bmcr) { - u32 bmsr; - - bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); - bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); - - if (bmsr & BMSR_LSTATUS) { - /* Force link down */ - bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK); - spin_unlock_bh(&bp->phy_lock); - msleep(50); - spin_lock_bh(&bp->phy_lock); - - bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); - bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); - } - - bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr); - - /* Normally, the new speed is setup after the link has - * gone down and up again. In some cases, link will not go - * down so we need to set up the new speed here. - */ - if (bmsr & BMSR_LSTATUS) { - bp->line_speed = bp->req_line_speed; - bp->duplex = bp->req_duplex; - bnx2_resolve_flow_ctrl(bp); - bnx2_set_mac_link(bp); - } - } else { - bnx2_resolve_flow_ctrl(bp); - bnx2_set_mac_link(bp); - } - return 0; -} - -static int -bnx2_setup_phy(struct bnx2 *bp, u8 port) -__releases(&bp->phy_lock) -__acquires(&bp->phy_lock) -{ - if (bp->loopback == MAC_LOOPBACK) - return 0; - - if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { - return bnx2_setup_serdes_phy(bp, port); - } - else { - return bnx2_setup_copper_phy(bp); - } -} - -static int -bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy) -{ - u32 val; - - bp->mii_bmcr = MII_BMCR + 0x10; - bp->mii_bmsr = MII_BMSR + 0x10; - bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1; - bp->mii_adv = MII_ADVERTISE + 0x10; - bp->mii_lpa = MII_LPA + 0x10; - bp->mii_up1 = MII_BNX2_OVER1G_UP1; - - bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER); - bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD); - - bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); - if (reset_phy) - bnx2_reset_phy(bp); - - bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG); - - bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val); - val &= ~MII_BNX2_SD_1000XCTL1_AUTODET; - val |= MII_BNX2_SD_1000XCTL1_FIBER; - bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val); - - bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G); - bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val); - if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) - val |= BCM5708S_UP1_2G5; - else - val &= ~BCM5708S_UP1_2G5; - bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val); - - bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG); - bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val); - val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM; - bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val); - - bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0); - - val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN | - MII_BNX2_CL73_BAM_NP_AFT_BP_EN; - bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val); - - bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); - - return 0; -} - -static int -bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy) -{ - u32 val; - - if (reset_phy) - bnx2_reset_phy(bp); - - bp->mii_up1 = BCM5708S_UP1; - - bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3); - bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE); - bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG); - - bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val); - val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN; - bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val); - - bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val); - val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN; - bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val); - - if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) { - bnx2_read_phy(bp, BCM5708S_UP1, &val); - val |= BCM5708S_UP1_2G5; - bnx2_write_phy(bp, BCM5708S_UP1, val); - } - - if ((CHIP_ID(bp) == CHIP_ID_5708_A0) || - (CHIP_ID(bp) == CHIP_ID_5708_B0) || - (CHIP_ID(bp) == CHIP_ID_5708_B1)) { - /* increase tx signal amplitude */ - bnx2_write_phy(bp, BCM5708S_BLK_ADDR, - BCM5708S_BLK_ADDR_TX_MISC); - bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val); - val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM; - bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val); - bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG); - } - - val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) & - BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK; - - if (val) { - u32 is_backplane; - - is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG); - if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) { - bnx2_write_phy(bp, BCM5708S_BLK_ADDR, - BCM5708S_BLK_ADDR_TX_MISC); - bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val); - bnx2_write_phy(bp, BCM5708S_BLK_ADDR, - BCM5708S_BLK_ADDR_DIG); - } - } - return 0; -} - -static int -bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy) -{ - if (reset_phy) - bnx2_reset_phy(bp); - - bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT; - - if (CHIP_NUM(bp) == CHIP_NUM_5706) - REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300); - - if (bp->dev->mtu > 1500) { - u32 val; - - /* Set extended packet length bit */ - bnx2_write_phy(bp, 0x18, 0x7); - bnx2_read_phy(bp, 0x18, &val); - bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000); - - bnx2_write_phy(bp, 0x1c, 0x6c00); - bnx2_read_phy(bp, 0x1c, &val); - bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02); - } - else { - u32 val; - - bnx2_write_phy(bp, 0x18, 0x7); - bnx2_read_phy(bp, 0x18, &val); - bnx2_write_phy(bp, 0x18, val & ~0x4007); - - bnx2_write_phy(bp, 0x1c, 0x6c00); - bnx2_read_phy(bp, 0x1c, &val); - bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00); - } - - return 0; -} - -static int -bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy) -{ - u32 val; - - if (reset_phy) - bnx2_reset_phy(bp); - - if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) { - bnx2_write_phy(bp, 0x18, 0x0c00); - bnx2_write_phy(bp, 0x17, 0x000a); - bnx2_write_phy(bp, 0x15, 0x310b); - bnx2_write_phy(bp, 0x17, 0x201f); - bnx2_write_phy(bp, 0x15, 0x9506); - bnx2_write_phy(bp, 0x17, 0x401f); - bnx2_write_phy(bp, 0x15, 0x14e2); - bnx2_write_phy(bp, 0x18, 0x0400); - } - - if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) { - bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, - MII_BNX2_DSP_EXPAND_REG | 0x8); - bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val); - val &= ~(1 << 8); - bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val); - } - - if (bp->dev->mtu > 1500) { - /* Set extended packet length bit */ - bnx2_write_phy(bp, 0x18, 0x7); - bnx2_read_phy(bp, 0x18, &val); - bnx2_write_phy(bp, 0x18, val | 0x4000); - - bnx2_read_phy(bp, 0x10, &val); - bnx2_write_phy(bp, 0x10, val | 0x1); - } - else { - bnx2_write_phy(bp, 0x18, 0x7); - bnx2_read_phy(bp, 0x18, &val); - bnx2_write_phy(bp, 0x18, val & ~0x4007); - - bnx2_read_phy(bp, 0x10, &val); - bnx2_write_phy(bp, 0x10, val & ~0x1); - } - - /* ethernet@wirespeed */ - bnx2_write_phy(bp, 0x18, 0x7007); - bnx2_read_phy(bp, 0x18, &val); - bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4)); - return 0; -} - - -static int -bnx2_init_phy(struct bnx2 *bp, int reset_phy) -__releases(&bp->phy_lock) -__acquires(&bp->phy_lock) -{ - u32 val; - int rc = 0; - - bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK; - bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY; - - bp->mii_bmcr = MII_BMCR; - bp->mii_bmsr = MII_BMSR; - bp->mii_bmsr1 = MII_BMSR; - bp->mii_adv = MII_ADVERTISE; - bp->mii_lpa = MII_LPA; - - REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); - - if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) - goto setup_phy; - - bnx2_read_phy(bp, MII_PHYSID1, &val); - bp->phy_id = val << 16; - bnx2_read_phy(bp, MII_PHYSID2, &val); - bp->phy_id |= val & 0xffff; - - if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { - if (CHIP_NUM(bp) == CHIP_NUM_5706) - rc = bnx2_init_5706s_phy(bp, reset_phy); - else if (CHIP_NUM(bp) == CHIP_NUM_5708) - rc = bnx2_init_5708s_phy(bp, reset_phy); - else if (CHIP_NUM(bp) == CHIP_NUM_5709) - rc = bnx2_init_5709s_phy(bp, reset_phy); - } - else { - rc = bnx2_init_copper_phy(bp, reset_phy); - } - -setup_phy: - if (!rc) - rc = bnx2_setup_phy(bp, bp->phy_port); - - return rc; -} - -static int -bnx2_set_mac_loopback(struct bnx2 *bp) -{ - u32 mac_mode; - - mac_mode = REG_RD(bp, BNX2_EMAC_MODE); - mac_mode &= ~BNX2_EMAC_MODE_PORT; - mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK; - REG_WR(bp, BNX2_EMAC_MODE, mac_mode); - bp->link_up = 1; - return 0; -} - -static int bnx2_test_link(struct bnx2 *); - -static int -bnx2_set_phy_loopback(struct bnx2 *bp) -{ - u32 mac_mode; - int rc, i; - - spin_lock_bh(&bp->phy_lock); - rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX | - BMCR_SPEED1000); - spin_unlock_bh(&bp->phy_lock); - if (rc) - return rc; - - for (i = 0; i < 10; i++) { - if (bnx2_test_link(bp) == 0) - break; - msleep(100); - } - - mac_mode = REG_RD(bp, BNX2_EMAC_MODE); - mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX | - BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK | - BNX2_EMAC_MODE_25G_MODE); - - mac_mode |= BNX2_EMAC_MODE_PORT_GMII; - REG_WR(bp, BNX2_EMAC_MODE, mac_mode); - bp->link_up = 1; - return 0; -} - -static void -bnx2_dump_mcp_state(struct bnx2 *bp) -{ - struct net_device *dev = bp->dev; - u32 mcp_p0, mcp_p1; - - netdev_err(dev, "<--- start MCP states dump --->\n"); - if (CHIP_NUM(bp) == CHIP_NUM_5709) { - mcp_p0 = BNX2_MCP_STATE_P0; - mcp_p1 = BNX2_MCP_STATE_P1; - } else { - mcp_p0 = BNX2_MCP_STATE_P0_5708; - mcp_p1 = BNX2_MCP_STATE_P1_5708; - } - netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n", - bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1)); - netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n", - bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE), - bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE), - bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK)); - netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n", - bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER), - bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER), - bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION)); - netdev_err(dev, "DEBUG: shmem states:\n"); - netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]", - bnx2_shmem_rd(bp, BNX2_DRV_MB), - bnx2_shmem_rd(bp, BNX2_FW_MB), - bnx2_shmem_rd(bp, BNX2_LINK_STATUS)); - pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB)); - netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]", - bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE), - bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE)); - pr_cont(" condition[%08x]\n", - bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION)); - DP_SHMEM_LINE(bp, 0x3cc); - DP_SHMEM_LINE(bp, 0x3dc); - DP_SHMEM_LINE(bp, 0x3ec); - netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc)); - netdev_err(dev, "<--- end MCP states dump --->\n"); -} - -static int -bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent) -{ - int i; - u32 val; - - bp->fw_wr_seq++; - msg_data |= bp->fw_wr_seq; - - bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data); - - if (!ack) - return 0; - - /* wait for an acknowledgement. */ - for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) { - msleep(10); - - val = bnx2_shmem_rd(bp, BNX2_FW_MB); - - if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ)) - break; - } - if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0) - return 0; - - /* If we timed out, inform the firmware that this is the case. */ - if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) { - msg_data &= ~BNX2_DRV_MSG_CODE; - msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT; - - bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data); - if (!silent) { - pr_err("fw sync timeout, reset code = %x\n", msg_data); - bnx2_dump_mcp_state(bp); - } - - return -EBUSY; - } - - if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK) - return -EIO; - - return 0; -} - -static int -bnx2_init_5709_context(struct bnx2 *bp) -{ - int i, ret = 0; - u32 val; - - val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12); - val |= (BCM_PAGE_BITS - 8) << 16; - REG_WR(bp, BNX2_CTX_COMMAND, val); - for (i = 0; i < 10; i++) { - val = REG_RD(bp, BNX2_CTX_COMMAND); - if (!(val & BNX2_CTX_COMMAND_MEM_INIT)) - break; - udelay(2); - } - if (val & BNX2_CTX_COMMAND_MEM_INIT) - return -EBUSY; - - for (i = 0; i < bp->ctx_pages; i++) { - int j; - - if (bp->ctx_blk[i]) - memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE); - else - return -ENOMEM; - - REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0, - (bp->ctx_blk_mapping[i] & 0xffffffff) | - BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID); - REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1, - (u64) bp->ctx_blk_mapping[i] >> 32); - REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i | - BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); - for (j = 0; j < 10; j++) { - - val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL); - if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ)) - break; - udelay(5); - } - if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) { - ret = -EBUSY; - break; - } - } - return ret; -} - -static void -bnx2_init_context(struct bnx2 *bp) -{ - u32 vcid; - - vcid = 96; - while (vcid) { - u32 vcid_addr, pcid_addr, offset; - int i; - - vcid--; - - if (CHIP_ID(bp) == CHIP_ID_5706_A0) { - u32 new_vcid; - - vcid_addr = GET_PCID_ADDR(vcid); - if (vcid & 0x8) { - new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7); - } - else { - new_vcid = vcid; - } - pcid_addr = GET_PCID_ADDR(new_vcid); - } - else { - vcid_addr = GET_CID_ADDR(vcid); - pcid_addr = vcid_addr; - } - - for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) { - vcid_addr += (i << PHY_CTX_SHIFT); - pcid_addr += (i << PHY_CTX_SHIFT); - - REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr); - REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr); - - /* Zero out the context. */ - for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) - bnx2_ctx_wr(bp, vcid_addr, offset, 0); - } - } -} - -static int -bnx2_alloc_bad_rbuf(struct bnx2 *bp) -{ - u16 *good_mbuf; - u32 good_mbuf_cnt; - u32 val; - - good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL); - if (good_mbuf == NULL) { - pr_err("Failed to allocate memory in %s\n", __func__); - return -ENOMEM; - } - - REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, - BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE); - - good_mbuf_cnt = 0; - - /* Allocate a bunch of mbufs and save the good ones in an array. */ - val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1); - while (val & BNX2_RBUF_STATUS1_FREE_COUNT) { - bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND, - BNX2_RBUF_COMMAND_ALLOC_REQ); - - val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC); - - val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE; - - /* The addresses with Bit 9 set are bad memory blocks. */ - if (!(val & (1 << 9))) { - good_mbuf[good_mbuf_cnt] = (u16) val; - good_mbuf_cnt++; - } - - val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1); - } - - /* Free the good ones back to the mbuf pool thus discarding - * all the bad ones. */ - while (good_mbuf_cnt) { - good_mbuf_cnt--; - - val = good_mbuf[good_mbuf_cnt]; - val = (val << 9) | val | 1; - - bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val); - } - kfree(good_mbuf); - return 0; -} - -static void -bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos) -{ - u32 val; - - val = (mac_addr[0] << 8) | mac_addr[1]; - - REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val); - - val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | - (mac_addr[4] << 8) | mac_addr[5]; - - REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val); -} - -static inline int -bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp) -{ - dma_addr_t mapping; - struct sw_pg *rx_pg = &rxr->rx_pg_ring[index]; - struct rx_bd *rxbd = - &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)]; - struct page *page = alloc_page(gfp); - - if (!page) - return -ENOMEM; - mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE, - PCI_DMA_FROMDEVICE); - if (dma_mapping_error(&bp->pdev->dev, mapping)) { - __free_page(page); - return -EIO; - } - - rx_pg->page = page; - dma_unmap_addr_set(rx_pg, mapping, mapping); - rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; - rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff; - return 0; -} - -static void -bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index) -{ - struct sw_pg *rx_pg = &rxr->rx_pg_ring[index]; - struct page *page = rx_pg->page; - - if (!page) - return; - - dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping), - PAGE_SIZE, PCI_DMA_FROMDEVICE); - - __free_page(page); - rx_pg->page = NULL; -} - -static inline int -bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp) -{ - struct sk_buff *skb; - struct sw_bd *rx_buf = &rxr->rx_buf_ring[index]; - dma_addr_t mapping; - struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)]; - unsigned long align; - - skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp); - if (skb == NULL) { - return -ENOMEM; - } - - if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1)))) - skb_reserve(skb, BNX2_RX_ALIGN - align); - - mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size, - PCI_DMA_FROMDEVICE); - if (dma_mapping_error(&bp->pdev->dev, mapping)) { - dev_kfree_skb(skb); - return -EIO; - } - - rx_buf->skb = skb; - rx_buf->desc = (struct l2_fhdr *) skb->data; - dma_unmap_addr_set(rx_buf, mapping, mapping); - - rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; - rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff; - - rxr->rx_prod_bseq += bp->rx_buf_use_size; - - return 0; -} - -static int -bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event) -{ - struct status_block *sblk = bnapi->status_blk.msi; - u32 new_link_state, old_link_state; - int is_set = 1; - - new_link_state = sblk->status_attn_bits & event; - old_link_state = sblk->status_attn_bits_ack & event; - if (new_link_state != old_link_state) { - if (new_link_state) - REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event); - else - REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event); - } else - is_set = 0; - - return is_set; -} - -static void -bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi) -{ - spin_lock(&bp->phy_lock); - - if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) - bnx2_set_link(bp); - if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT)) - bnx2_set_remote_link(bp); - - spin_unlock(&bp->phy_lock); - -} - -static inline u16 -bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi) -{ - u16 cons; - - /* Tell compiler that status block fields can change. */ - barrier(); - cons = *bnapi->hw_tx_cons_ptr; - barrier(); - if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT)) - cons++; - return cons; -} - -static int -bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) -{ - struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; - u16 hw_cons, sw_cons, sw_ring_cons; - int tx_pkt = 0, index; - struct netdev_queue *txq; - - index = (bnapi - bp->bnx2_napi); - txq = netdev_get_tx_queue(bp->dev, index); - - hw_cons = bnx2_get_hw_tx_cons(bnapi); - sw_cons = txr->tx_cons; - - while (sw_cons != hw_cons) { - struct sw_tx_bd *tx_buf; - struct sk_buff *skb; - int i, last; - - sw_ring_cons = TX_RING_IDX(sw_cons); - - tx_buf = &txr->tx_buf_ring[sw_ring_cons]; - skb = tx_buf->skb; - - /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */ - prefetch(&skb->end); - - /* partial BD completions possible with TSO packets */ - if (tx_buf->is_gso) { - u16 last_idx, last_ring_idx; - - last_idx = sw_cons + tx_buf->nr_frags + 1; - last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1; - if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) { - last_idx++; - } - if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) { - break; - } - } - - dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), - skb_headlen(skb), PCI_DMA_TODEVICE); - - tx_buf->skb = NULL; - last = tx_buf->nr_frags; - - for (i = 0; i < last; i++) { - sw_cons = NEXT_TX_BD(sw_cons); - - dma_unmap_page(&bp->pdev->dev, - dma_unmap_addr( - &txr->tx_buf_ring[TX_RING_IDX(sw_cons)], - mapping), - skb_shinfo(skb)->frags[i].size, - PCI_DMA_TODEVICE); - } - - sw_cons = NEXT_TX_BD(sw_cons); - - dev_kfree_skb(skb); - tx_pkt++; - if (tx_pkt == budget) - break; - - if (hw_cons == sw_cons) - hw_cons = bnx2_get_hw_tx_cons(bnapi); - } - - txr->hw_tx_cons = hw_cons; - txr->tx_cons = sw_cons; - - /* Need to make the tx_cons update visible to bnx2_start_xmit() - * before checking for netif_tx_queue_stopped(). Without the - * memory barrier, there is a small possibility that bnx2_start_xmit() - * will miss it and cause the queue to be stopped forever. - */ - smp_mb(); - - if (unlikely(netif_tx_queue_stopped(txq)) && - (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) { - __netif_tx_lock(txq, smp_processor_id()); - if ((netif_tx_queue_stopped(txq)) && - (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) - netif_tx_wake_queue(txq); - __netif_tx_unlock(txq); - } - - return tx_pkt; -} - -static void -bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, - struct sk_buff *skb, int count) -{ - struct sw_pg *cons_rx_pg, *prod_rx_pg; - struct rx_bd *cons_bd, *prod_bd; - int i; - u16 hw_prod, prod; - u16 cons = rxr->rx_pg_cons; - - cons_rx_pg = &rxr->rx_pg_ring[cons]; - - /* The caller was unable to allocate a new page to replace the - * last one in the frags array, so we need to recycle that page - * and then free the skb. - */ - if (skb) { - struct page *page; - struct skb_shared_info *shinfo; - - shinfo = skb_shinfo(skb); - shinfo->nr_frags--; - page = shinfo->frags[shinfo->nr_frags].page; - shinfo->frags[shinfo->nr_frags].page = NULL; - - cons_rx_pg->page = page; - dev_kfree_skb(skb); - } - - hw_prod = rxr->rx_pg_prod; - - for (i = 0; i < count; i++) { - prod = RX_PG_RING_IDX(hw_prod); - - prod_rx_pg = &rxr->rx_pg_ring[prod]; - cons_rx_pg = &rxr->rx_pg_ring[cons]; - cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)]; - prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; - - if (prod != cons) { - prod_rx_pg->page = cons_rx_pg->page; - cons_rx_pg->page = NULL; - dma_unmap_addr_set(prod_rx_pg, mapping, - dma_unmap_addr(cons_rx_pg, mapping)); - - prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi; - prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; - - } - cons = RX_PG_RING_IDX(NEXT_RX_BD(cons)); - hw_prod = NEXT_RX_BD(hw_prod); - } - rxr->rx_pg_prod = hw_prod; - rxr->rx_pg_cons = cons; -} - -static inline void -bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, - struct sk_buff *skb, u16 cons, u16 prod) -{ - struct sw_bd *cons_rx_buf, *prod_rx_buf; - struct rx_bd *cons_bd, *prod_bd; - - cons_rx_buf = &rxr->rx_buf_ring[cons]; - prod_rx_buf = &rxr->rx_buf_ring[prod]; - - dma_sync_single_for_device(&bp->pdev->dev, - dma_unmap_addr(cons_rx_buf, mapping), - BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE); - - rxr->rx_prod_bseq += bp->rx_buf_use_size; - - prod_rx_buf->skb = skb; - prod_rx_buf->desc = (struct l2_fhdr *) skb->data; - - if (cons == prod) - return; - - dma_unmap_addr_set(prod_rx_buf, mapping, - dma_unmap_addr(cons_rx_buf, mapping)); - - cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; - prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; - prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi; - prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; -} - -static int -bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb, - unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr, - u32 ring_idx) -{ - int err; - u16 prod = ring_idx & 0xffff; - - err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC); - if (unlikely(err)) { - bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod); - if (hdr_len) { - unsigned int raw_len = len + 4; - int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT; - - bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages); - } - return err; - } - - skb_reserve(skb, BNX2_RX_OFFSET); - dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, - PCI_DMA_FROMDEVICE); - - if (hdr_len == 0) { - skb_put(skb, len); - return 0; - } else { - unsigned int i, frag_len, frag_size, pages; - struct sw_pg *rx_pg; - u16 pg_cons = rxr->rx_pg_cons; - u16 pg_prod = rxr->rx_pg_prod; - - frag_size = len + 4 - hdr_len; - pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT; - skb_put(skb, hdr_len); - - for (i = 0; i < pages; i++) { - dma_addr_t mapping_old; - - frag_len = min(frag_size, (unsigned int) PAGE_SIZE); - if (unlikely(frag_len <= 4)) { - unsigned int tail = 4 - frag_len; - - rxr->rx_pg_cons = pg_cons; - rxr->rx_pg_prod = pg_prod; - bnx2_reuse_rx_skb_pages(bp, rxr, NULL, - pages - i); - skb->len -= tail; - if (i == 0) { - skb->tail -= tail; - } else { - skb_frag_t *frag = - &skb_shinfo(skb)->frags[i - 1]; - frag->size -= tail; - skb->data_len -= tail; - skb->truesize -= tail; - } - return 0; - } - rx_pg = &rxr->rx_pg_ring[pg_cons]; - - /* Don't unmap yet. If we're unable to allocate a new - * page, we need to recycle the page and the DMA addr. - */ - mapping_old = dma_unmap_addr(rx_pg, mapping); - if (i == pages - 1) - frag_len -= 4; - - skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len); - rx_pg->page = NULL; - - err = bnx2_alloc_rx_page(bp, rxr, - RX_PG_RING_IDX(pg_prod), - GFP_ATOMIC); - if (unlikely(err)) { - rxr->rx_pg_cons = pg_cons; - rxr->rx_pg_prod = pg_prod; - bnx2_reuse_rx_skb_pages(bp, rxr, skb, - pages - i); - return err; - } - - dma_unmap_page(&bp->pdev->dev, mapping_old, - PAGE_SIZE, PCI_DMA_FROMDEVICE); - - frag_size -= frag_len; - skb->data_len += frag_len; - skb->truesize += frag_len; - skb->len += frag_len; - - pg_prod = NEXT_RX_BD(pg_prod); - pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons)); - } - rxr->rx_pg_prod = pg_prod; - rxr->rx_pg_cons = pg_cons; - } - return 0; -} - -static inline u16 -bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi) -{ - u16 cons; - - /* Tell compiler that status block fields can change. */ - barrier(); - cons = *bnapi->hw_rx_cons_ptr; - barrier(); - if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)) - cons++; - return cons; -} - -static int -bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) -{ - struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; - u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod; - struct l2_fhdr *rx_hdr; - int rx_pkt = 0, pg_ring_used = 0; - - hw_cons = bnx2_get_hw_rx_cons(bnapi); - sw_cons = rxr->rx_cons; - sw_prod = rxr->rx_prod; - - /* Memory barrier necessary as speculative reads of the rx - * buffer can be ahead of the index in the status block - */ - rmb(); - while (sw_cons != hw_cons) { - unsigned int len, hdr_len; - u32 status; - struct sw_bd *rx_buf, *next_rx_buf; - struct sk_buff *skb; - dma_addr_t dma_addr; - - sw_ring_cons = RX_RING_IDX(sw_cons); - sw_ring_prod = RX_RING_IDX(sw_prod); - - rx_buf = &rxr->rx_buf_ring[sw_ring_cons]; - skb = rx_buf->skb; - prefetchw(skb); - - next_rx_buf = - &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))]; - prefetch(next_rx_buf->desc); - - rx_buf->skb = NULL; - - dma_addr = dma_unmap_addr(rx_buf, mapping); - - dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, - BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, - PCI_DMA_FROMDEVICE); - - rx_hdr = rx_buf->desc; - len = rx_hdr->l2_fhdr_pkt_len; - status = rx_hdr->l2_fhdr_status; - - hdr_len = 0; - if (status & L2_FHDR_STATUS_SPLIT) { - hdr_len = rx_hdr->l2_fhdr_ip_xsum; - pg_ring_used = 1; - } else if (len > bp->rx_jumbo_thresh) { - hdr_len = bp->rx_jumbo_thresh; - pg_ring_used = 1; - } - - if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC | - L2_FHDR_ERRORS_PHY_DECODE | - L2_FHDR_ERRORS_ALIGNMENT | - L2_FHDR_ERRORS_TOO_SHORT | - L2_FHDR_ERRORS_GIANT_FRAME))) { - - bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons, - sw_ring_prod); - if (pg_ring_used) { - int pages; - - pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT; - - bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages); - } - goto next_rx; - } - - len -= 4; - - if (len <= bp->rx_copy_thresh) { - struct sk_buff *new_skb; - - new_skb = netdev_alloc_skb(bp->dev, len + 6); - if (new_skb == NULL) { - bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons, - sw_ring_prod); - goto next_rx; - } - - /* aligned copy */ - skb_copy_from_linear_data_offset(skb, - BNX2_RX_OFFSET - 6, - new_skb->data, len + 6); - skb_reserve(new_skb, 6); - skb_put(new_skb, len); - - bnx2_reuse_rx_skb(bp, rxr, skb, - sw_ring_cons, sw_ring_prod); - - skb = new_skb; - } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len, - dma_addr, (sw_ring_cons << 16) | sw_ring_prod))) - goto next_rx; - - if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && - !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) - __vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag); - - skb->protocol = eth_type_trans(skb, bp->dev); - - if ((len > (bp->dev->mtu + ETH_HLEN)) && - (ntohs(skb->protocol) != 0x8100)) { - - dev_kfree_skb(skb); - goto next_rx; - - } - - skb_checksum_none_assert(skb); - if ((bp->dev->features & NETIF_F_RXCSUM) && - (status & (L2_FHDR_STATUS_TCP_SEGMENT | - L2_FHDR_STATUS_UDP_DATAGRAM))) { - - if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM | - L2_FHDR_ERRORS_UDP_XSUM)) == 0)) - skb->ip_summed = CHECKSUM_UNNECESSARY; - } - if ((bp->dev->features & NETIF_F_RXHASH) && - ((status & L2_FHDR_STATUS_USE_RXHASH) == - L2_FHDR_STATUS_USE_RXHASH)) - skb->rxhash = rx_hdr->l2_fhdr_hash; - - skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]); - napi_gro_receive(&bnapi->napi, skb); - rx_pkt++; - -next_rx: - sw_cons = NEXT_RX_BD(sw_cons); - sw_prod = NEXT_RX_BD(sw_prod); - - if ((rx_pkt == budget)) - break; - - /* Refresh hw_cons to see if there is new work */ - if (sw_cons == hw_cons) { - hw_cons = bnx2_get_hw_rx_cons(bnapi); - rmb(); - } - } - rxr->rx_cons = sw_cons; - rxr->rx_prod = sw_prod; - - if (pg_ring_used) - REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod); - - REG_WR16(bp, rxr->rx_bidx_addr, sw_prod); - - REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq); - - mmiowb(); - - return rx_pkt; - -} - -/* MSI ISR - The only difference between this and the INTx ISR - * is that the MSI interrupt is always serviced. - */ -static irqreturn_t -bnx2_msi(int irq, void *dev_instance) -{ - struct bnx2_napi *bnapi = dev_instance; - struct bnx2 *bp = bnapi->bp; - - prefetch(bnapi->status_blk.msi); - REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, - BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | - BNX2_PCICFG_INT_ACK_CMD_MASK_INT); - - /* Return here if interrupt is disabled. */ - if (unlikely(atomic_read(&bp->intr_sem) != 0)) - return IRQ_HANDLED; - - napi_schedule(&bnapi->napi); - - return IRQ_HANDLED; -} - -static irqreturn_t -bnx2_msi_1shot(int irq, void *dev_instance) -{ - struct bnx2_napi *bnapi = dev_instance; - struct bnx2 *bp = bnapi->bp; - - prefetch(bnapi->status_blk.msi); - - /* Return here if interrupt is disabled. */ - if (unlikely(atomic_read(&bp->intr_sem) != 0)) - return IRQ_HANDLED; - - napi_schedule(&bnapi->napi); - - return IRQ_HANDLED; -} - -static irqreturn_t -bnx2_interrupt(int irq, void *dev_instance) -{ - struct bnx2_napi *bnapi = dev_instance; - struct bnx2 *bp = bnapi->bp; - struct status_block *sblk = bnapi->status_blk.msi; - - /* When using INTx, it is possible for the interrupt to arrive - * at the CPU before the status block posted prior to the - * interrupt. Reading a register will flush the status block. - * When using MSI, the MSI message will always complete after - * the status block write. - */ - if ((sblk->status_idx == bnapi->last_status_idx) && - (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) & - BNX2_PCICFG_MISC_STATUS_INTA_VALUE)) - return IRQ_NONE; - - REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, - BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | - BNX2_PCICFG_INT_ACK_CMD_MASK_INT); - - /* Read back to deassert IRQ immediately to avoid too many - * spurious interrupts. - */ - REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD); - - /* Return here if interrupt is shared and is disabled. */ - if (unlikely(atomic_read(&bp->intr_sem) != 0)) - return IRQ_HANDLED; - - if (napi_schedule_prep(&bnapi->napi)) { - bnapi->last_status_idx = sblk->status_idx; - __napi_schedule(&bnapi->napi); - } - - return IRQ_HANDLED; -} - -static inline int -bnx2_has_fast_work(struct bnx2_napi *bnapi) -{ - struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; - struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; - - if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) || - (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)) - return 1; - return 0; -} - -#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \ - STATUS_ATTN_BITS_TIMER_ABORT) - -static inline int -bnx2_has_work(struct bnx2_napi *bnapi) -{ - struct status_block *sblk = bnapi->status_blk.msi; - - if (bnx2_has_fast_work(bnapi)) - return 1; - -#ifdef BCM_CNIC - if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx)) - return 1; -#endif - - if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) != - (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS)) - return 1; - - return 0; -} - -static void -bnx2_chk_missed_msi(struct bnx2 *bp) -{ - struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; - u32 msi_ctrl; - - if (bnx2_has_work(bnapi)) { - msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL); - if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE)) - return; - - if (bnapi->last_status_idx == bp->idle_chk_status_idx) { - REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl & - ~BNX2_PCICFG_MSI_CONTROL_ENABLE); - REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl); - bnx2_msi(bp->irq_tbl[0].vector, bnapi); - } - } - - bp->idle_chk_status_idx = bnapi->last_status_idx; -} - -#ifdef BCM_CNIC -static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi) -{ - struct cnic_ops *c_ops; - - if (!bnapi->cnic_present) - return; - - rcu_read_lock(); - c_ops = rcu_dereference(bp->cnic_ops); - if (c_ops) - bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data, - bnapi->status_blk.msi); - rcu_read_unlock(); -} -#endif - -static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi) -{ - struct status_block *sblk = bnapi->status_blk.msi; - u32 status_attn_bits = sblk->status_attn_bits; - u32 status_attn_bits_ack = sblk->status_attn_bits_ack; - - if ((status_attn_bits & STATUS_ATTN_EVENTS) != - (status_attn_bits_ack & STATUS_ATTN_EVENTS)) { - - bnx2_phy_int(bp, bnapi); - - /* This is needed to take care of transient status - * during link changes. - */ - REG_WR(bp, BNX2_HC_COMMAND, - bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); - REG_RD(bp, BNX2_HC_COMMAND); - } -} - -static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi, - int work_done, int budget) -{ - struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; - struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; - - if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons) - bnx2_tx_int(bp, bnapi, 0); - - if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) - work_done += bnx2_rx_int(bp, bnapi, budget - work_done); - - return work_done; -} - -static int bnx2_poll_msix(struct napi_struct *napi, int budget) -{ - struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi); - struct bnx2 *bp = bnapi->bp; - int work_done = 0; - struct status_block_msix *sblk = bnapi->status_blk.msix; - - while (1) { - work_done = bnx2_poll_work(bp, bnapi, work_done, budget); - if (unlikely(work_done >= budget)) - break; - - bnapi->last_status_idx = sblk->status_idx; - /* status idx must be read before checking for more work. */ - rmb(); - if (likely(!bnx2_has_fast_work(bnapi))) { - - napi_complete(napi); - REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | - BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | - bnapi->last_status_idx); - break; - } - } - return work_done; -} - -static int bnx2_poll(struct napi_struct *napi, int budget) -{ - struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi); - struct bnx2 *bp = bnapi->bp; - int work_done = 0; - struct status_block *sblk = bnapi->status_blk.msi; - - while (1) { - bnx2_poll_link(bp, bnapi); - - work_done = bnx2_poll_work(bp, bnapi, work_done, budget); - -#ifdef BCM_CNIC - bnx2_poll_cnic(bp, bnapi); -#endif - - /* bnapi->last_status_idx is used below to tell the hw how - * much work has been processed, so we must read it before - * checking for more work. - */ - bnapi->last_status_idx = sblk->status_idx; - - if (unlikely(work_done >= budget)) - break; - - rmb(); - if (likely(!bnx2_has_work(bnapi))) { - napi_complete(napi); - if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) { - REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, - BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | - bnapi->last_status_idx); - break; - } - REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, - BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | - BNX2_PCICFG_INT_ACK_CMD_MASK_INT | - bnapi->last_status_idx); - - REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, - BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | - bnapi->last_status_idx); - break; - } - } - - return work_done; -} - -/* Called with rtnl_lock from vlan functions and also netif_tx_lock - * from set_multicast. - */ -static void -bnx2_set_rx_mode(struct net_device *dev) -{ - struct bnx2 *bp = netdev_priv(dev); - u32 rx_mode, sort_mode; - struct netdev_hw_addr *ha; - int i; - - if (!netif_running(dev)) - return; - - spin_lock_bh(&bp->phy_lock); - - rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS | - BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG); - sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN; - if (!(dev->features & NETIF_F_HW_VLAN_RX) && - (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)) - rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG; - if (dev->flags & IFF_PROMISC) { - /* Promiscuous mode. */ - rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS; - sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN | - BNX2_RPM_SORT_USER0_PROM_VLAN; - } - else if (dev->flags & IFF_ALLMULTI) { - for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { - REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), - 0xffffffff); - } - sort_mode |= BNX2_RPM_SORT_USER0_MC_EN; - } - else { - /* Accept one or more multicast(s). */ - u32 mc_filter[NUM_MC_HASH_REGISTERS]; - u32 regidx; - u32 bit; - u32 crc; - - memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS); - - netdev_for_each_mc_addr(ha, dev) { - crc = ether_crc_le(ETH_ALEN, ha->addr); - bit = crc & 0xff; - regidx = (bit & 0xe0) >> 5; - bit &= 0x1f; - mc_filter[regidx] |= (1 << bit); - } - - for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { - REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), - mc_filter[i]); - } - - sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN; - } - - if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) { - rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS; - sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN | - BNX2_RPM_SORT_USER0_PROM_VLAN; - } else if (!(dev->flags & IFF_PROMISC)) { - /* Add all entries into to the match filter list */ - i = 0; - netdev_for_each_uc_addr(ha, dev) { - bnx2_set_mac_addr(bp, ha->addr, - i + BNX2_START_UNICAST_ADDRESS_INDEX); - sort_mode |= (1 << - (i + BNX2_START_UNICAST_ADDRESS_INDEX)); - i++; - } - - } - - if (rx_mode != bp->rx_mode) { - bp->rx_mode = rx_mode; - REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode); - } - - REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0); - REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode); - REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA); - - spin_unlock_bh(&bp->phy_lock); -} - -static int __devinit -check_fw_section(const struct firmware *fw, - const struct bnx2_fw_file_section *section, - u32 alignment, bool non_empty) -{ - u32 offset = be32_to_cpu(section->offset); - u32 len = be32_to_cpu(section->len); - - if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3) - return -EINVAL; - if ((non_empty && len == 0) || len > fw->size - offset || - len & (alignment - 1)) - return -EINVAL; - return 0; -} - -static int __devinit -check_mips_fw_entry(const struct firmware *fw, - const struct bnx2_mips_fw_file_entry *entry) -{ - if (check_fw_section(fw, &entry->text, 4, true) || - check_fw_section(fw, &entry->data, 4, false) || - check_fw_section(fw, &entry->rodata, 4, false)) - return -EINVAL; - return 0; -} - -static int __devinit -bnx2_request_firmware(struct bnx2 *bp) -{ - const char *mips_fw_file, *rv2p_fw_file; - const struct bnx2_mips_fw_file *mips_fw; - const struct bnx2_rv2p_fw_file *rv2p_fw; - int rc; - - if (CHIP_NUM(bp) == CHIP_NUM_5709) { - mips_fw_file = FW_MIPS_FILE_09; - if ((CHIP_ID(bp) == CHIP_ID_5709_A0) || - (CHIP_ID(bp) == CHIP_ID_5709_A1)) - rv2p_fw_file = FW_RV2P_FILE_09_Ax; - else - rv2p_fw_file = FW_RV2P_FILE_09; - } else { - mips_fw_file = FW_MIPS_FILE_06; - rv2p_fw_file = FW_RV2P_FILE_06; - } - - rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev); - if (rc) { - pr_err("Can't load firmware file \"%s\"\n", mips_fw_file); - return rc; - } - - rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev); - if (rc) { - pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file); - return rc; - } - mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data; - rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data; - if (bp->mips_firmware->size < sizeof(*mips_fw) || - check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) || - check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) || - check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) || - check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) || - check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) { - pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file); - return -EINVAL; - } - if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) || - check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) || - check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) { - pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file); - return -EINVAL; - } - - return 0; -} - -static u32 -rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code) -{ - switch (idx) { - case RV2P_P1_FIXUP_PAGE_SIZE_IDX: - rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK; - rv2p_code |= RV2P_BD_PAGE_SIZE; - break; - } - return rv2p_code; -} - -static int -load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc, - const struct bnx2_rv2p_fw_file_entry *fw_entry) -{ - u32 rv2p_code_len, file_offset; - __be32 *rv2p_code; - int i; - u32 val, cmd, addr; - - rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len); - file_offset = be32_to_cpu(fw_entry->rv2p.offset); - - rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset); - - if (rv2p_proc == RV2P_PROC1) { - cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR; - addr = BNX2_RV2P_PROC1_ADDR_CMD; - } else { - cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR; - addr = BNX2_RV2P_PROC2_ADDR_CMD; - } - - for (i = 0; i < rv2p_code_len; i += 8) { - REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code)); - rv2p_code++; - REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code)); - rv2p_code++; - - val = (i / 8) | cmd; - REG_WR(bp, addr, val); - } - - rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset); - for (i = 0; i < 8; i++) { - u32 loc, code; - - loc = be32_to_cpu(fw_entry->fixup[i]); - if (loc && ((loc * 4) < rv2p_code_len)) { - code = be32_to_cpu(*(rv2p_code + loc - 1)); - REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code); - code = be32_to_cpu(*(rv2p_code + loc)); - code = rv2p_fw_fixup(rv2p_proc, i, loc, code); - REG_WR(bp, BNX2_RV2P_INSTR_LOW, code); - - val = (loc / 2) | cmd; - REG_WR(bp, addr, val); - } - } - - /* Reset the processor, un-stall is done later. */ - if (rv2p_proc == RV2P_PROC1) { - REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET); - } - else { - REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET); - } - - return 0; -} - -static int -load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, - const struct bnx2_mips_fw_file_entry *fw_entry) -{ - u32 addr, len, file_offset; - __be32 *data; - u32 offset; - u32 val; - - /* Halt the CPU. */ - val = bnx2_reg_rd_ind(bp, cpu_reg->mode); - val |= cpu_reg->mode_value_halt; - bnx2_reg_wr_ind(bp, cpu_reg->mode, val); - bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear); - - /* Load the Text area. */ - addr = be32_to_cpu(fw_entry->text.addr); - len = be32_to_cpu(fw_entry->text.len); - file_offset = be32_to_cpu(fw_entry->text.offset); - data = (__be32 *)(bp->mips_firmware->data + file_offset); - - offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base); - if (len) { - int j; - - for (j = 0; j < (len / 4); j++, offset += 4) - bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j])); - } - - /* Load the Data area. */ - addr = be32_to_cpu(fw_entry->data.addr); - len = be32_to_cpu(fw_entry->data.len); - file_offset = be32_to_cpu(fw_entry->data.offset); - data = (__be32 *)(bp->mips_firmware->data + file_offset); - - offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base); - if (len) { - int j; - - for (j = 0; j < (len / 4); j++, offset += 4) - bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j])); - } - - /* Load the Read-Only area. */ - addr = be32_to_cpu(fw_entry->rodata.addr); - len = be32_to_cpu(fw_entry->rodata.len); - file_offset = be32_to_cpu(fw_entry->rodata.offset); - data = (__be32 *)(bp->mips_firmware->data + file_offset); - - offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base); - if (len) { - int j; - - for (j = 0; j < (len / 4); j++, offset += 4) - bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j])); - } - - /* Clear the pre-fetch instruction. */ - bnx2_reg_wr_ind(bp, cpu_reg->inst, 0); - - val = be32_to_cpu(fw_entry->start_addr); - bnx2_reg_wr_ind(bp, cpu_reg->pc, val); - - /* Start the CPU. */ - val = bnx2_reg_rd_ind(bp, cpu_reg->mode); - val &= ~cpu_reg->mode_value_halt; - bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear); - bnx2_reg_wr_ind(bp, cpu_reg->mode, val); - - return 0; -} - -static int -bnx2_init_cpus(struct bnx2 *bp) -{ - const struct bnx2_mips_fw_file *mips_fw = - (const struct bnx2_mips_fw_file *) bp->mips_firmware->data; - const struct bnx2_rv2p_fw_file *rv2p_fw = - (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data; - int rc; - - /* Initialize the RV2P processor. */ - load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1); - load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2); - - /* Initialize the RX Processor. */ - rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp); - if (rc) - goto init_cpu_err; - - /* Initialize the TX Processor. */ - rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp); - if (rc) - goto init_cpu_err; - - /* Initialize the TX Patch-up Processor. */ - rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat); - if (rc) - goto init_cpu_err; - - /* Initialize the Completion Processor. */ - rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com); - if (rc) - goto init_cpu_err; - - /* Initialize the Command Processor. */ - rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp); - -init_cpu_err: - return rc; -} - -static int -bnx2_set_power_state(struct bnx2 *bp, pci_power_t state) -{ - u16 pmcsr; - - pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr); - - switch (state) { - case PCI_D0: { - u32 val; - - pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, - (pmcsr & ~PCI_PM_CTRL_STATE_MASK) | - PCI_PM_CTRL_PME_STATUS); - - if (pmcsr & PCI_PM_CTRL_STATE_MASK) - /* delay required during transition out of D3hot */ - msleep(20); - - val = REG_RD(bp, BNX2_EMAC_MODE); - val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD; - val &= ~BNX2_EMAC_MODE_MPKT; - REG_WR(bp, BNX2_EMAC_MODE, val); - - val = REG_RD(bp, BNX2_RPM_CONFIG); - val &= ~BNX2_RPM_CONFIG_ACPI_ENA; - REG_WR(bp, BNX2_RPM_CONFIG, val); - break; - } - case PCI_D3hot: { - int i; - u32 val, wol_msg; - - if (bp->wol) { - u32 advertising; - u8 autoneg; - - autoneg = bp->autoneg; - advertising = bp->advertising; - - if (bp->phy_port == PORT_TP) { - bp->autoneg = AUTONEG_SPEED; - bp->advertising = ADVERTISED_10baseT_Half | - ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | - ADVERTISED_Autoneg; - } - - spin_lock_bh(&bp->phy_lock); - bnx2_setup_phy(bp, bp->phy_port); - spin_unlock_bh(&bp->phy_lock); - - bp->autoneg = autoneg; - bp->advertising = advertising; - - bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); - - val = REG_RD(bp, BNX2_EMAC_MODE); - - /* Enable port mode. */ - val &= ~BNX2_EMAC_MODE_PORT; - val |= BNX2_EMAC_MODE_MPKT_RCVD | - BNX2_EMAC_MODE_ACPI_RCVD | - BNX2_EMAC_MODE_MPKT; - if (bp->phy_port == PORT_TP) - val |= BNX2_EMAC_MODE_PORT_MII; - else { - val |= BNX2_EMAC_MODE_PORT_GMII; - if (bp->line_speed == SPEED_2500) - val |= BNX2_EMAC_MODE_25G_MODE; - } - - REG_WR(bp, BNX2_EMAC_MODE, val); - - /* receive all multicast */ - for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { - REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), - 0xffffffff); - } - REG_WR(bp, BNX2_EMAC_RX_MODE, - BNX2_EMAC_RX_MODE_SORT_MODE); - - val = 1 | BNX2_RPM_SORT_USER0_BC_EN | - BNX2_RPM_SORT_USER0_MC_EN; - REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0); - REG_WR(bp, BNX2_RPM_SORT_USER0, val); - REG_WR(bp, BNX2_RPM_SORT_USER0, val | - BNX2_RPM_SORT_USER0_ENA); - - /* Need to enable EMAC and RPM for WOL. */ - REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, - BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE | - BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE | - BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE); - - val = REG_RD(bp, BNX2_RPM_CONFIG); - val &= ~BNX2_RPM_CONFIG_ACPI_ENA; - REG_WR(bp, BNX2_RPM_CONFIG, val); - - wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL; - } - else { - wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; - } - - if (!(bp->flags & BNX2_FLAG_NO_WOL)) - bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, - 1, 0); - - pmcsr &= ~PCI_PM_CTRL_STATE_MASK; - if ((CHIP_ID(bp) == CHIP_ID_5706_A0) || - (CHIP_ID(bp) == CHIP_ID_5706_A1)) { - - if (bp->wol) - pmcsr |= 3; - } - else { - pmcsr |= 3; - } - if (bp->wol) { - pmcsr |= PCI_PM_CTRL_PME_ENABLE; - } - pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, - pmcsr); - - /* No more memory access after this point until - * device is brought back to D0. - */ - udelay(50); - break; - } - default: - return -EINVAL; - } - return 0; -} - -static int -bnx2_acquire_nvram_lock(struct bnx2 *bp) -{ - u32 val; - int j; - - /* Request access to the flash interface. */ - REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2); - for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { - val = REG_RD(bp, BNX2_NVM_SW_ARB); - if (val & BNX2_NVM_SW_ARB_ARB_ARB2) - break; - - udelay(5); - } - - if (j >= NVRAM_TIMEOUT_COUNT) - return -EBUSY; - - return 0; -} - -static int -bnx2_release_nvram_lock(struct bnx2 *bp) -{ - int j; - u32 val; - - /* Relinquish nvram interface. */ - REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2); - - for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { - val = REG_RD(bp, BNX2_NVM_SW_ARB); - if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2)) - break; - - udelay(5); - } - - if (j >= NVRAM_TIMEOUT_COUNT) - return -EBUSY; - - return 0; -} - - -static int -bnx2_enable_nvram_write(struct bnx2 *bp) -{ - u32 val; - - val = REG_RD(bp, BNX2_MISC_CFG); - REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI); - - if (bp->flash_info->flags & BNX2_NV_WREN) { - int j; - - REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); - REG_WR(bp, BNX2_NVM_COMMAND, - BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT); - - for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { - udelay(5); - - val = REG_RD(bp, BNX2_NVM_COMMAND); - if (val & BNX2_NVM_COMMAND_DONE) - break; - } - - if (j >= NVRAM_TIMEOUT_COUNT) - return -EBUSY; - } - return 0; -} - -static void -bnx2_disable_nvram_write(struct bnx2 *bp) -{ - u32 val; - - val = REG_RD(bp, BNX2_MISC_CFG); - REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN); -} - - -static void -bnx2_enable_nvram_access(struct bnx2 *bp) -{ - u32 val; - - val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE); - /* Enable both bits, even on read. */ - REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, - val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN); -} - -static void -bnx2_disable_nvram_access(struct bnx2 *bp) -{ - u32 val; - - val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE); - /* Disable both bits, even after read. */ - REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, - val & ~(BNX2_NVM_ACCESS_ENABLE_EN | - BNX2_NVM_ACCESS_ENABLE_WR_EN)); -} - -static int -bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset) -{ - u32 cmd; - int j; - - if (bp->flash_info->flags & BNX2_NV_BUFFERED) - /* Buffered flash, no erase needed */ - return 0; - - /* Build an erase command */ - cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR | - BNX2_NVM_COMMAND_DOIT; - - /* Need to clear DONE bit separately. */ - REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); - - /* Address of the NVRAM to read from. */ - REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); - - /* Issue an erase command. */ - REG_WR(bp, BNX2_NVM_COMMAND, cmd); - - /* Wait for completion. */ - for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { - u32 val; - - udelay(5); - - val = REG_RD(bp, BNX2_NVM_COMMAND); - if (val & BNX2_NVM_COMMAND_DONE) - break; - } - - if (j >= NVRAM_TIMEOUT_COUNT) - return -EBUSY; - - return 0; -} - -static int -bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags) -{ - u32 cmd; - int j; - - /* Build the command word. */ - cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags; - - /* Calculate an offset of a buffered flash, not needed for 5709. */ - if (bp->flash_info->flags & BNX2_NV_TRANSLATE) { - offset = ((offset / bp->flash_info->page_size) << - bp->flash_info->page_bits) + - (offset % bp->flash_info->page_size); - } - - /* Need to clear DONE bit separately. */ - REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); - - /* Address of the NVRAM to read from. */ - REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); - - /* Issue a read command. */ - REG_WR(bp, BNX2_NVM_COMMAND, cmd); - - /* Wait for completion. */ - for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { - u32 val; - - udelay(5); - - val = REG_RD(bp, BNX2_NVM_COMMAND); - if (val & BNX2_NVM_COMMAND_DONE) { - __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ)); - memcpy(ret_val, &v, 4); - break; - } - } - if (j >= NVRAM_TIMEOUT_COUNT) - return -EBUSY; - - return 0; -} - - -static int -bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags) -{ - u32 cmd; - __be32 val32; - int j; - - /* Build the command word. */ - cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags; - - /* Calculate an offset of a buffered flash, not needed for 5709. */ - if (bp->flash_info->flags & BNX2_NV_TRANSLATE) { - offset = ((offset / bp->flash_info->page_size) << - bp->flash_info->page_bits) + - (offset % bp->flash_info->page_size); - } - - /* Need to clear DONE bit separately. */ - REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); - - memcpy(&val32, val, 4); - - /* Write the data. */ - REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32)); - - /* Address of the NVRAM to write to. */ - REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); - - /* Issue the write command. */ - REG_WR(bp, BNX2_NVM_COMMAND, cmd); - - /* Wait for completion. */ - for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { - udelay(5); - - if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE) - break; - } - if (j >= NVRAM_TIMEOUT_COUNT) - return -EBUSY; - - return 0; -} - -static int -bnx2_init_nvram(struct bnx2 *bp) -{ - u32 val; - int j, entry_count, rc = 0; - const struct flash_spec *flash; - - if (CHIP_NUM(bp) == CHIP_NUM_5709) { - bp->flash_info = &flash_5709; - goto get_flash_size; - } - - /* Determine the selected interface. */ - val = REG_RD(bp, BNX2_NVM_CFG1); - - entry_count = ARRAY_SIZE(flash_table); - - if (val & 0x40000000) { - - /* Flash interface has been reconfigured */ - for (j = 0, flash = &flash_table[0]; j < entry_count; - j++, flash++) { - if ((val & FLASH_BACKUP_STRAP_MASK) == - (flash->config1 & FLASH_BACKUP_STRAP_MASK)) { - bp->flash_info = flash; - break; - } - } - } - else { - u32 mask; - /* Not yet been reconfigured */ - - if (val & (1 << 23)) - mask = FLASH_BACKUP_STRAP_MASK; - else - mask = FLASH_STRAP_MASK; - - for (j = 0, flash = &flash_table[0]; j < entry_count; - j++, flash++) { - - if ((val & mask) == (flash->strapping & mask)) { - bp->flash_info = flash; - - /* Request access to the flash interface. */ - if ((rc = bnx2_acquire_nvram_lock(bp)) != 0) - return rc; - - /* Enable access to flash interface */ - bnx2_enable_nvram_access(bp); - - /* Reconfigure the flash interface */ - REG_WR(bp, BNX2_NVM_CFG1, flash->config1); - REG_WR(bp, BNX2_NVM_CFG2, flash->config2); - REG_WR(bp, BNX2_NVM_CFG3, flash->config3); - REG_WR(bp, BNX2_NVM_WRITE1, flash->write1); - - /* Disable access to flash interface */ - bnx2_disable_nvram_access(bp); - bnx2_release_nvram_lock(bp); - - break; - } - } - } /* if (val & 0x40000000) */ - - if (j == entry_count) { - bp->flash_info = NULL; - pr_alert("Unknown flash/EEPROM type\n"); - return -ENODEV; - } - -get_flash_size: - val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2); - val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK; - if (val) - bp->flash_size = val; - else - bp->flash_size = bp->flash_info->total_size; - - return rc; -} - -static int -bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf, - int buf_size) -{ - int rc = 0; - u32 cmd_flags, offset32, len32, extra; - - if (buf_size == 0) - return 0; - - /* Request access to the flash interface. */ - if ((rc = bnx2_acquire_nvram_lock(bp)) != 0) - return rc; - - /* Enable access to flash interface */ - bnx2_enable_nvram_access(bp); - - len32 = buf_size; - offset32 = offset; - extra = 0; - - cmd_flags = 0; - - if (offset32 & 3) { - u8 buf[4]; - u32 pre_len; - - offset32 &= ~3; - pre_len = 4 - (offset & 3); - - if (pre_len >= len32) { - pre_len = len32; - cmd_flags = BNX2_NVM_COMMAND_FIRST | - BNX2_NVM_COMMAND_LAST; - } - else { - cmd_flags = BNX2_NVM_COMMAND_FIRST; - } - - rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags); - - if (rc) - return rc; - - memcpy(ret_buf, buf + (offset & 3), pre_len); - - offset32 += 4; - ret_buf += pre_len; - len32 -= pre_len; - } - if (len32 & 3) { - extra = 4 - (len32 & 3); - len32 = (len32 + 4) & ~3; - } - - if (len32 == 4) { - u8 buf[4]; - - if (cmd_flags) - cmd_flags = BNX2_NVM_COMMAND_LAST; - else - cmd_flags = BNX2_NVM_COMMAND_FIRST | - BNX2_NVM_COMMAND_LAST; - - rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags); - - memcpy(ret_buf, buf, 4 - extra); - } - else if (len32 > 0) { - u8 buf[4]; - - /* Read the first word. */ - if (cmd_flags) - cmd_flags = 0; - else - cmd_flags = BNX2_NVM_COMMAND_FIRST; - - rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags); - - /* Advance to the next dword. */ - offset32 += 4; - ret_buf += 4; - len32 -= 4; - - while (len32 > 4 && rc == 0) { - rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0); - - /* Advance to the next dword. */ - offset32 += 4; - ret_buf += 4; - len32 -= 4; - } - - if (rc) - return rc; - - cmd_flags = BNX2_NVM_COMMAND_LAST; - rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags); - - memcpy(ret_buf, buf, 4 - extra); - } - - /* Disable access to flash interface */ - bnx2_disable_nvram_access(bp); - - bnx2_release_nvram_lock(bp); - - return rc; -} - -static int -bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, - int buf_size) -{ - u32 written, offset32, len32; - u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL; - int rc = 0; - int align_start, align_end; - - buf = data_buf; - offset32 = offset; - len32 = buf_size; - align_start = align_end = 0; - - if ((align_start = (offset32 & 3))) { - offset32 &= ~3; - len32 += align_start; - if (len32 < 4) - len32 = 4; - if ((rc = bnx2_nvram_read(bp, offset32, start, 4))) - return rc; - } - - if (len32 & 3) { - align_end = 4 - (len32 & 3); - len32 += align_end; - if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4))) - return rc; - } - - if (align_start || align_end) { - align_buf = kmalloc(len32, GFP_KERNEL); - if (align_buf == NULL) - return -ENOMEM; - if (align_start) { - memcpy(align_buf, start, 4); - } - if (align_end) { - memcpy(align_buf + len32 - 4, end, 4); - } - memcpy(align_buf + align_start, data_buf, buf_size); - buf = align_buf; - } - - if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { - flash_buffer = kmalloc(264, GFP_KERNEL); - if (flash_buffer == NULL) { - rc = -ENOMEM; - goto nvram_write_end; - } - } - - written = 0; - while ((written < len32) && (rc == 0)) { - u32 page_start, page_end, data_start, data_end; - u32 addr, cmd_flags; - int i; - - /* Find the page_start addr */ - page_start = offset32 + written; - page_start -= (page_start % bp->flash_info->page_size); - /* Find the page_end addr */ - page_end = page_start + bp->flash_info->page_size; - /* Find the data_start addr */ - data_start = (written == 0) ? offset32 : page_start; - /* Find the data_end addr */ - data_end = (page_end > offset32 + len32) ? - (offset32 + len32) : page_end; - - /* Request access to the flash interface. */ - if ((rc = bnx2_acquire_nvram_lock(bp)) != 0) - goto nvram_write_end; - - /* Enable access to flash interface */ - bnx2_enable_nvram_access(bp); - - cmd_flags = BNX2_NVM_COMMAND_FIRST; - if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { - int j; - - /* Read the whole page into the buffer - * (non-buffer flash only) */ - for (j = 0; j < bp->flash_info->page_size; j += 4) { - if (j == (bp->flash_info->page_size - 4)) { - cmd_flags |= BNX2_NVM_COMMAND_LAST; - } - rc = bnx2_nvram_read_dword(bp, - page_start + j, - &flash_buffer[j], - cmd_flags); - - if (rc) - goto nvram_write_end; - - cmd_flags = 0; - } - } - - /* Enable writes to flash interface (unlock write-protect) */ - if ((rc = bnx2_enable_nvram_write(bp)) != 0) - goto nvram_write_end; - - /* Loop to write back the buffer data from page_start to - * data_start */ - i = 0; - if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { - /* Erase the page */ - if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0) - goto nvram_write_end; - - /* Re-enable the write again for the actual write */ - bnx2_enable_nvram_write(bp); - - for (addr = page_start; addr < data_start; - addr += 4, i += 4) { - - rc = bnx2_nvram_write_dword(bp, addr, - &flash_buffer[i], cmd_flags); - - if (rc != 0) - goto nvram_write_end; - - cmd_flags = 0; - } - } - - /* Loop to write the new data from data_start to data_end */ - for (addr = data_start; addr < data_end; addr += 4, i += 4) { - if ((addr == page_end - 4) || - ((bp->flash_info->flags & BNX2_NV_BUFFERED) && - (addr == data_end - 4))) { - - cmd_flags |= BNX2_NVM_COMMAND_LAST; - } - rc = bnx2_nvram_write_dword(bp, addr, buf, - cmd_flags); - - if (rc != 0) - goto nvram_write_end; - - cmd_flags = 0; - buf += 4; - } - - /* Loop to write back the buffer data from data_end - * to page_end */ - if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { - for (addr = data_end; addr < page_end; - addr += 4, i += 4) { - - if (addr == page_end-4) { - cmd_flags = BNX2_NVM_COMMAND_LAST; - } - rc = bnx2_nvram_write_dword(bp, addr, - &flash_buffer[i], cmd_flags); - - if (rc != 0) - goto nvram_write_end; - - cmd_flags = 0; - } - } - - /* Disable writes to flash interface (lock write-protect) */ - bnx2_disable_nvram_write(bp); - - /* Disable access to flash interface */ - bnx2_disable_nvram_access(bp); - bnx2_release_nvram_lock(bp); - - /* Increment written */ - written += data_end - data_start; - } - -nvram_write_end: - kfree(flash_buffer); - kfree(align_buf); - return rc; -} - -static void -bnx2_init_fw_cap(struct bnx2 *bp) -{ - u32 val, sig = 0; - - bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP; - bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN; - - if (!(bp->flags & BNX2_FLAG_ASF_ENABLE)) - bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN; - - val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB); - if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE) - return; - - if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) { - bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN; - sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN; - } - - if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && - (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) { - u32 link; - - bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP; - - link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS); - if (link & BNX2_LINK_STATUS_SERDES_LINK) - bp->phy_port = PORT_FIBRE; - else - bp->phy_port = PORT_TP; - - sig |= BNX2_DRV_ACK_CAP_SIGNATURE | - BNX2_FW_CAP_REMOTE_PHY_CAPABLE; - } - - if (netif_running(bp->dev) && sig) - bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig); -} - -static void -bnx2_setup_msix_tbl(struct bnx2 *bp) -{ - REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN); - - REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR); - REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR); -} - -static int -bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) -{ - u32 val; - int i, rc = 0; - u8 old_port; - - /* Wait for the current PCI transaction to complete before - * issuing a reset. */ - if ((CHIP_NUM(bp) == CHIP_NUM_5706) || - (CHIP_NUM(bp) == CHIP_NUM_5708)) { - REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS, - BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | - BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | - BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | - BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); - val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS); - udelay(5); - } else { /* 5709 */ - val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL); - val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE; - REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val); - val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL); - - for (i = 0; i < 100; i++) { - msleep(1); - val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL); - if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND)) - break; - } - } - - /* Wait for the firmware to tell us it is ok to issue a reset. */ - bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1); - - /* Deposit a driver reset signature so the firmware knows that - * this is a soft reset. */ - bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE, - BNX2_DRV_RESET_SIGNATURE_MAGIC); - - /* Do a dummy read to force the chip to complete all current transaction - * before we issue a reset. */ - val = REG_RD(bp, BNX2_MISC_ID); - - if (CHIP_NUM(bp) == CHIP_NUM_5709) { - REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET); - REG_RD(bp, BNX2_MISC_COMMAND); - udelay(5); - - val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | - BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; - - REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val); - - } else { - val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ | - BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | - BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; - - /* Chip reset. */ - REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val); - - /* Reading back any register after chip reset will hang the - * bus on 5706 A0 and A1. The msleep below provides plenty - * of margin for write posting. - */ - if ((CHIP_ID(bp) == CHIP_ID_5706_A0) || - (CHIP_ID(bp) == CHIP_ID_5706_A1)) - msleep(20); - - /* Reset takes approximate 30 usec */ - for (i = 0; i < 10; i++) { - val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG); - if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ | - BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) - break; - udelay(10); - } - - if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ | - BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) { - pr_err("Chip reset did not complete\n"); - return -EBUSY; - } - } - - /* Make sure byte swapping is properly configured. */ - val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0); - if (val != 0x01020304) { - pr_err("Chip not in correct endian mode\n"); - return -ENODEV; - } - - /* Wait for the firmware to finish its initialization. */ - rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0); - if (rc) - return rc; - - spin_lock_bh(&bp->phy_lock); - old_port = bp->phy_port; - bnx2_init_fw_cap(bp); - if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) && - old_port != bp->phy_port) - bnx2_set_default_remote_link(bp); - spin_unlock_bh(&bp->phy_lock); - - if (CHIP_ID(bp) == CHIP_ID_5706_A0) { - /* Adjust the voltage regular to two steps lower. The default - * of this register is 0x0000000e. */ - REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa); - - /* Remove bad rbuf memory from the free pool. */ - rc = bnx2_alloc_bad_rbuf(bp); - } - - if (bp->flags & BNX2_FLAG_USING_MSIX) { - bnx2_setup_msix_tbl(bp); - /* Prevent MSIX table reads and write from timing out */ - REG_WR(bp, BNX2_MISC_ECO_HW_CTL, - BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN); - } - - return rc; -} - -static int -bnx2_init_chip(struct bnx2 *bp) -{ - u32 val, mtu; - int rc, i; - - /* Make sure the interrupt is not active. */ - REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT); - - val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP | - BNX2_DMA_CONFIG_DATA_WORD_SWAP | -#ifdef __BIG_ENDIAN - BNX2_DMA_CONFIG_CNTL_BYTE_SWAP | -#endif - BNX2_DMA_CONFIG_CNTL_WORD_SWAP | - DMA_READ_CHANS << 12 | - DMA_WRITE_CHANS << 16; - - val |= (0x2 << 20) | (1 << 11); - - if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133)) - val |= (1 << 23); - - if ((CHIP_NUM(bp) == CHIP_NUM_5706) && - (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX)) - val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA; - - REG_WR(bp, BNX2_DMA_CONFIG, val); - - if (CHIP_ID(bp) == CHIP_ID_5706_A0) { - val = REG_RD(bp, BNX2_TDMA_CONFIG); - val |= BNX2_TDMA_CONFIG_ONE_DMA; - REG_WR(bp, BNX2_TDMA_CONFIG, val); - } - - if (bp->flags & BNX2_FLAG_PCIX) { - u16 val16; - - pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD, - &val16); - pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD, - val16 & ~PCI_X_CMD_ERO); - } - - REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, - BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | - BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | - BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); - - /* Initialize context mapping and zero out the quick contexts. The - * context block must have already been enabled. */ - if (CHIP_NUM(bp) == CHIP_NUM_5709) { - rc = bnx2_init_5709_context(bp); - if (rc) - return rc; - } else - bnx2_init_context(bp); - - if ((rc = bnx2_init_cpus(bp)) != 0) - return rc; - - bnx2_init_nvram(bp); - - bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); - - val = REG_RD(bp, BNX2_MQ_CONFIG); - val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; - val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; - if (CHIP_NUM(bp) == CHIP_NUM_5709) { - val |= BNX2_MQ_CONFIG_BIN_MQ_MODE; - if (CHIP_REV(bp) == CHIP_REV_Ax) - val |= BNX2_MQ_CONFIG_HALT_DIS; - } - - REG_WR(bp, BNX2_MQ_CONFIG, val); - - val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE); - REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val); - REG_WR(bp, BNX2_MQ_KNL_WIND_END, val); - - val = (BCM_PAGE_BITS - 8) << 24; - REG_WR(bp, BNX2_RV2P_CONFIG, val); - - /* Configure page size. */ - val = REG_RD(bp, BNX2_TBDR_CONFIG); - val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE; - val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; - REG_WR(bp, BNX2_TBDR_CONFIG, val); - - val = bp->mac_addr[0] + - (bp->mac_addr[1] << 8) + - (bp->mac_addr[2] << 16) + - bp->mac_addr[3] + - (bp->mac_addr[4] << 8) + - (bp->mac_addr[5] << 16); - REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val); - - /* Program the MTU. Also include 4 bytes for CRC32. */ - mtu = bp->dev->mtu; - val = mtu + ETH_HLEN + ETH_FCS_LEN; - if (val > (MAX_ETHERNET_PACKET_SIZE + 4)) - val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA; - REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val); - - if (mtu < 1500) - mtu = 1500; - - bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu)); - bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu)); - bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu)); - - memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size); - for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) - bp->bnx2_napi[i].last_status_idx = 0; - - bp->idle_chk_status_idx = 0xffff; - - bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE; - - /* Set up how to generate a link change interrupt. */ - REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); - - REG_WR(bp, BNX2_HC_STATUS_ADDR_L, - (u64) bp->status_blk_mapping & 0xffffffff); - REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32); - - REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L, - (u64) bp->stats_blk_mapping & 0xffffffff); - REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H, - (u64) bp->stats_blk_mapping >> 32); - - REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP, - (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip); - - REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP, - (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip); - - REG_WR(bp, BNX2_HC_COMP_PROD_TRIP, - (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip); - - REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks); - - REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks); - - REG_WR(bp, BNX2_HC_COM_TICKS, - (bp->com_ticks_int << 16) | bp->com_ticks); - - REG_WR(bp, BNX2_HC_CMD_TICKS, - (bp->cmd_ticks_int << 16) | bp->cmd_ticks); - - if (bp->flags & BNX2_FLAG_BROKEN_STATS) - REG_WR(bp, BNX2_HC_STATS_TICKS, 0); - else - REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks); - REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ - - if (CHIP_ID(bp) == CHIP_ID_5706_A1) - val = BNX2_HC_CONFIG_COLLECT_STATS; - else { - val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE | - BNX2_HC_CONFIG_COLLECT_STATS; - } - - if (bp->flags & BNX2_FLAG_USING_MSIX) { - REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR, - BNX2_HC_MSIX_BIT_VECTOR_VAL); - - val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B; - } - - if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI) - val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM; - - REG_WR(bp, BNX2_HC_CONFIG, val); - - if (bp->rx_ticks < 25) - bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1); - else - bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0); - - for (i = 1; i < bp->irq_nvecs; i++) { - u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) + - BNX2_HC_SB_CONFIG_1; - - REG_WR(bp, base, - BNX2_HC_SB_CONFIG_1_TX_TMR_MODE | - BNX2_HC_SB_CONFIG_1_RX_TMR_MODE | - BNX2_HC_SB_CONFIG_1_ONE_SHOT); - - REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF, - (bp->tx_quick_cons_trip_int << 16) | - bp->tx_quick_cons_trip); - - REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF, - (bp->tx_ticks_int << 16) | bp->tx_ticks); - - REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF, - (bp->rx_quick_cons_trip_int << 16) | - bp->rx_quick_cons_trip); - - REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF, - (bp->rx_ticks_int << 16) | bp->rx_ticks); - } - - /* Clear internal stats counters. */ - REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW); - - REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS); - - /* Initialize the receive filter. */ - bnx2_set_rx_mode(bp->dev); - - if (CHIP_NUM(bp) == CHIP_NUM_5709) { - val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL); - val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE; - REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val); - } - rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET, - 1, 0); - - REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT); - REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS); - - udelay(20); - - bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND); - - return rc; -} - -static void -bnx2_clear_ring_states(struct bnx2 *bp) -{ - struct bnx2_napi *bnapi; - struct bnx2_tx_ring_info *txr; - struct bnx2_rx_ring_info *rxr; - int i; - - for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) { - bnapi = &bp->bnx2_napi[i]; - txr = &bnapi->tx_ring; - rxr = &bnapi->rx_ring; - - txr->tx_cons = 0; - txr->hw_tx_cons = 0; - rxr->rx_prod_bseq = 0; - rxr->rx_prod = 0; - rxr->rx_cons = 0; - rxr->rx_pg_prod = 0; - rxr->rx_pg_cons = 0; - } -} - -static void -bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr) -{ - u32 val, offset0, offset1, offset2, offset3; - u32 cid_addr = GET_CID_ADDR(cid); - - if (CHIP_NUM(bp) == CHIP_NUM_5709) { - offset0 = BNX2_L2CTX_TYPE_XI; - offset1 = BNX2_L2CTX_CMD_TYPE_XI; - offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI; - offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI; - } else { - offset0 = BNX2_L2CTX_TYPE; - offset1 = BNX2_L2CTX_CMD_TYPE; - offset2 = BNX2_L2CTX_TBDR_BHADDR_HI; - offset3 = BNX2_L2CTX_TBDR_BHADDR_LO; - } - val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2; - bnx2_ctx_wr(bp, cid_addr, offset0, val); - - val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); - bnx2_ctx_wr(bp, cid_addr, offset1, val); - - val = (u64) txr->tx_desc_mapping >> 32; - bnx2_ctx_wr(bp, cid_addr, offset2, val); - - val = (u64) txr->tx_desc_mapping & 0xffffffff; - bnx2_ctx_wr(bp, cid_addr, offset3, val); -} - -static void -bnx2_init_tx_ring(struct bnx2 *bp, int ring_num) -{ - struct tx_bd *txbd; - u32 cid = TX_CID; - struct bnx2_napi *bnapi; - struct bnx2_tx_ring_info *txr; - - bnapi = &bp->bnx2_napi[ring_num]; - txr = &bnapi->tx_ring; - - if (ring_num == 0) - cid = TX_CID; - else - cid = TX_TSS_CID + ring_num - 1; - - bp->tx_wake_thresh = bp->tx_ring_size / 2; - - txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT]; - - txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32; - txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff; - - txr->tx_prod = 0; - txr->tx_prod_bseq = 0; - - txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX; - txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ; - - bnx2_init_tx_context(bp, cid, txr); -} - -static void -bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size, - int num_rings) -{ - int i; - struct rx_bd *rxbd; - - for (i = 0; i < num_rings; i++) { - int j; - - rxbd = &rx_ring[i][0]; - for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) { - rxbd->rx_bd_len = buf_size; - rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END; - } - if (i == (num_rings - 1)) - j = 0; - else - j = i + 1; - rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32; - rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff; - } -} - -static void -bnx2_init_rx_ring(struct bnx2 *bp, int ring_num) -{ - int i; - u16 prod, ring_prod; - u32 cid, rx_cid_addr, val; - struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num]; - struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; - - if (ring_num == 0) - cid = RX_CID; - else - cid = RX_RSS_CID + ring_num - 1; - - rx_cid_addr = GET_CID_ADDR(cid); - - bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping, - bp->rx_buf_use_size, bp->rx_max_ring); - - bnx2_init_rx_context(bp, cid); - - if (CHIP_NUM(bp) == CHIP_NUM_5709) { - val = REG_RD(bp, BNX2_MQ_MAP_L2_5); - REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM); - } - - bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0); - if (bp->rx_pg_ring_size) { - bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring, - rxr->rx_pg_desc_mapping, - PAGE_SIZE, bp->rx_max_pg_ring); - val = (bp->rx_buf_use_size << 16) | PAGE_SIZE; - bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val); - bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY, - BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num); - - val = (u64) rxr->rx_pg_desc_mapping[0] >> 32; - bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val); - - val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff; - bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val); - - if (CHIP_NUM(bp) == CHIP_NUM_5709) - REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT); - } - - val = (u64) rxr->rx_desc_mapping[0] >> 32; - bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); - - val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff; - bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); - - ring_prod = prod = rxr->rx_pg_prod; - for (i = 0; i < bp->rx_pg_ring_size; i++) { - if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) { - netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n", - ring_num, i, bp->rx_pg_ring_size); - break; - } - prod = NEXT_RX_BD(prod); - ring_prod = RX_PG_RING_IDX(prod); - } - rxr->rx_pg_prod = prod; - - ring_prod = prod = rxr->rx_prod; - for (i = 0; i < bp->rx_ring_size; i++) { - if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) { - netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n", - ring_num, i, bp->rx_ring_size); - break; - } - prod = NEXT_RX_BD(prod); - ring_prod = RX_RING_IDX(prod); - } - rxr->rx_prod = prod; - - rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX; - rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ; - rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX; - - REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod); - REG_WR16(bp, rxr->rx_bidx_addr, prod); - - REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq); -} - -static void -bnx2_init_all_rings(struct bnx2 *bp) -{ - int i; - u32 val; - - bnx2_clear_ring_states(bp); - - REG_WR(bp, BNX2_TSCH_TSS_CFG, 0); - for (i = 0; i < bp->num_tx_rings; i++) - bnx2_init_tx_ring(bp, i); - - if (bp->num_tx_rings > 1) - REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) | - (TX_TSS_CID << 7)); - - REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0); - bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0); - - for (i = 0; i < bp->num_rx_rings; i++) - bnx2_init_rx_ring(bp, i); - - if (bp->num_rx_rings > 1) { - u32 tbl_32 = 0; - - for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) { - int shift = (i % 8) << 2; - - tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift; - if ((i % 8) == 7) { - REG_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32); - REG_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) | - BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK | - BNX2_RLUP_RSS_COMMAND_WRITE | - BNX2_RLUP_RSS_COMMAND_HASH_MASK); - tbl_32 = 0; - } - } - - val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI | - BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI; - - REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val); - - } -} - -static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size) -{ - u32 max, num_rings = 1; - - while (ring_size > MAX_RX_DESC_CNT) { - ring_size -= MAX_RX_DESC_CNT; - num_rings++; - } - /* round to next power of 2 */ - max = max_size; - while ((max & num_rings) == 0) - max >>= 1; - - if (num_rings != max) - max <<= 1; - - return max; -} - -static void -bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size) -{ - u32 rx_size, rx_space, jumbo_size; - - /* 8 for CRC and VLAN */ - rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8; - - rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD + - sizeof(struct skb_shared_info); - - bp->rx_copy_thresh = BNX2_RX_COPY_THRESH; - bp->rx_pg_ring_size = 0; - bp->rx_max_pg_ring = 0; - bp->rx_max_pg_ring_idx = 0; - if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) { - int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; - - jumbo_size = size * pages; - if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT) - jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT; - - bp->rx_pg_ring_size = jumbo_size; - bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size, - MAX_RX_PG_RINGS); - bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1; - rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET; - bp->rx_copy_thresh = 0; - } - - bp->rx_buf_use_size = rx_size; - /* hw alignment */ - bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN; - bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET; - bp->rx_ring_size = size; - bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS); - bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1; -} - -static void -bnx2_free_tx_skbs(struct bnx2 *bp) -{ - int i; - - for (i = 0; i < bp->num_tx_rings; i++) { - struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; - struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; - int j; - - if (txr->tx_buf_ring == NULL) - continue; - - for (j = 0; j < TX_DESC_CNT; ) { - struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; - struct sk_buff *skb = tx_buf->skb; - int k, last; - - if (skb == NULL) { - j++; - continue; - } - - dma_unmap_single(&bp->pdev->dev, - dma_unmap_addr(tx_buf, mapping), - skb_headlen(skb), - PCI_DMA_TODEVICE); - - tx_buf->skb = NULL; - - last = tx_buf->nr_frags; - j++; - for (k = 0; k < last; k++, j++) { - tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)]; - dma_unmap_page(&bp->pdev->dev, - dma_unmap_addr(tx_buf, mapping), - skb_shinfo(skb)->frags[k].size, - PCI_DMA_TODEVICE); - } - dev_kfree_skb(skb); - } - } -} - -static void -bnx2_free_rx_skbs(struct bnx2 *bp) -{ - int i; - - for (i = 0; i < bp->num_rx_rings; i++) { - struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; - struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; - int j; - - if (rxr->rx_buf_ring == NULL) - return; - - for (j = 0; j < bp->rx_max_ring_idx; j++) { - struct sw_bd *rx_buf = &rxr->rx_buf_ring[j]; - struct sk_buff *skb = rx_buf->skb; - - if (skb == NULL) - continue; - - dma_unmap_single(&bp->pdev->dev, - dma_unmap_addr(rx_buf, mapping), - bp->rx_buf_use_size, - PCI_DMA_FROMDEVICE); - - rx_buf->skb = NULL; - - dev_kfree_skb(skb); - } - for (j = 0; j < bp->rx_max_pg_ring_idx; j++) - bnx2_free_rx_page(bp, rxr, j); - } -} - -static void -bnx2_free_skbs(struct bnx2 *bp) -{ - bnx2_free_tx_skbs(bp); - bnx2_free_rx_skbs(bp); -} - -static int -bnx2_reset_nic(struct bnx2 *bp, u32 reset_code) -{ - int rc; - - rc = bnx2_reset_chip(bp, reset_code); - bnx2_free_skbs(bp); - if (rc) - return rc; - - if ((rc = bnx2_init_chip(bp)) != 0) - return rc; - - bnx2_init_all_rings(bp); - return 0; -} - -static int -bnx2_init_nic(struct bnx2 *bp, int reset_phy) -{ - int rc; - - if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0) - return rc; - - spin_lock_bh(&bp->phy_lock); - bnx2_init_phy(bp, reset_phy); - bnx2_set_link(bp); - if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) - bnx2_remote_phy_event(bp); - spin_unlock_bh(&bp->phy_lock); - return 0; -} - -static int -bnx2_shutdown_chip(struct bnx2 *bp) -{ - u32 reset_code; - - if (bp->flags & BNX2_FLAG_NO_WOL) - reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN; - else if (bp->wol) - reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL; - else - reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; - - return bnx2_reset_chip(bp, reset_code); -} - -static int -bnx2_test_registers(struct bnx2 *bp) -{ - int ret; - int i, is_5709; - static const struct { - u16 offset; - u16 flags; -#define BNX2_FL_NOT_5709 1 - u32 rw_mask; - u32 ro_mask; - } reg_tbl[] = { - { 0x006c, 0, 0x00000000, 0x0000003f }, - { 0x0090, 0, 0xffffffff, 0x00000000 }, - { 0x0094, 0, 0x00000000, 0x00000000 }, - - { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 }, - { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff }, - { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff }, - { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff }, - { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 }, - { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 }, - { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff }, - { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff }, - { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff }, - - { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff }, - { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff }, - { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 }, - { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 }, - { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 }, - { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 }, - - { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 }, - { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 }, - { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 }, - - { 0x1000, 0, 0x00000000, 0x00000001 }, - { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 }, - - { 0x1408, 0, 0x01c00800, 0x00000000 }, - { 0x149c, 0, 0x8000ffff, 0x00000000 }, - { 0x14a8, 0, 0x00000000, 0x000001ff }, - { 0x14ac, 0, 0x0fffffff, 0x10000000 }, - { 0x14b0, 0, 0x00000002, 0x00000001 }, - { 0x14b8, 0, 0x00000000, 0x00000000 }, - { 0x14c0, 0, 0x00000000, 0x00000009 }, - { 0x14c4, 0, 0x00003fff, 0x00000000 }, - { 0x14cc, 0, 0x00000000, 0x00000001 }, - { 0x14d0, 0, 0xffffffff, 0x00000000 }, - - { 0x1800, 0, 0x00000000, 0x00000001 }, - { 0x1804, 0, 0x00000000, 0x00000003 }, - - { 0x2800, 0, 0x00000000, 0x00000001 }, - { 0x2804, 0, 0x00000000, 0x00003f01 }, - { 0x2808, 0, 0x0f3f3f03, 0x00000000 }, - { 0x2810, 0, 0xffff0000, 0x00000000 }, - { 0x2814, 0, 0xffff0000, 0x00000000 }, - { 0x2818, 0, 0xffff0000, 0x00000000 }, - { 0x281c, 0, 0xffff0000, 0x00000000 }, - { 0x2834, 0, 0xffffffff, 0x00000000 }, - { 0x2840, 0, 0x00000000, 0xffffffff }, - { 0x2844, 0, 0x00000000, 0xffffffff }, - { 0x2848, 0, 0xffffffff, 0x00000000 }, - { 0x284c, 0, 0xf800f800, 0x07ff07ff }, - - { 0x2c00, 0, 0x00000000, 0x00000011 }, - { 0x2c04, 0, 0x00000000, 0x00030007 }, - - { 0x3c00, 0, 0x00000000, 0x00000001 }, - { 0x3c04, 0, 0x00000000, 0x00070000 }, - { 0x3c08, 0, 0x00007f71, 0x07f00000 }, - { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 }, - { 0x3c10, 0, 0xffffffff, 0x00000000 }, - { 0x3c14, 0, 0x00000000, 0xffffffff }, - { 0x3c18, 0, 0x00000000, 0xffffffff }, - { 0x3c1c, 0, 0xfffff000, 0x00000000 }, - { 0x3c20, 0, 0xffffff00, 0x00000000 }, - - { 0x5004, 0, 0x00000000, 0x0000007f }, - { 0x5008, 0, 0x0f0007ff, 0x00000000 }, - - { 0x5c00, 0, 0x00000000, 0x00000001 }, - { 0x5c04, 0, 0x00000000, 0x0003000f }, - { 0x5c08, 0, 0x00000003, 0x00000000 }, - { 0x5c0c, 0, 0x0000fff8, 0x00000000 }, - { 0x5c10, 0, 0x00000000, 0xffffffff }, - { 0x5c80, 0, 0x00000000, 0x0f7113f1 }, - { 0x5c84, 0, 0x00000000, 0x0000f333 }, - { 0x5c88, 0, 0x00000000, 0x00077373 }, - { 0x5c8c, 0, 0x00000000, 0x0007f737 }, - - { 0x6808, 0, 0x0000ff7f, 0x00000000 }, - { 0x680c, 0, 0xffffffff, 0x00000000 }, - { 0x6810, 0, 0xffffffff, 0x00000000 }, - { 0x6814, 0, 0xffffffff, 0x00000000 }, - { 0x6818, 0, 0xffffffff, 0x00000000 }, - { 0x681c, 0, 0xffffffff, 0x00000000 }, - { 0x6820, 0, 0x00ff00ff, 0x00000000 }, - { 0x6824, 0, 0x00ff00ff, 0x00000000 }, - { 0x6828, 0, 0x00ff00ff, 0x00000000 }, - { 0x682c, 0, 0x03ff03ff, 0x00000000 }, - { 0x6830, 0, 0x03ff03ff, 0x00000000 }, - { 0x6834, 0, 0x03ff03ff, 0x00000000 }, - { 0x6838, 0, 0x03ff03ff, 0x00000000 }, - { 0x683c, 0, 0x0000ffff, 0x00000000 }, - { 0x6840, 0, 0x00000ff0, 0x00000000 }, - { 0x6844, 0, 0x00ffff00, 0x00000000 }, - { 0x684c, 0, 0xffffffff, 0x00000000 }, - { 0x6850, 0, 0x7f7f7f7f, 0x00000000 }, - { 0x6854, 0, 0x7f7f7f7f, 0x00000000 }, - { 0x6858, 0, 0x7f7f7f7f, 0x00000000 }, - { 0x685c, 0, 0x7f7f7f7f, 0x00000000 }, - { 0x6908, 0, 0x00000000, 0x0001ff0f }, - { 0x690c, 0, 0x00000000, 0x0ffe00f0 }, - - { 0xffff, 0, 0x00000000, 0x00000000 }, - }; - - ret = 0; - is_5709 = 0; - if (CHIP_NUM(bp) == CHIP_NUM_5709) - is_5709 = 1; - - for (i = 0; reg_tbl[i].offset != 0xffff; i++) { - u32 offset, rw_mask, ro_mask, save_val, val; - u16 flags = reg_tbl[i].flags; - - if (is_5709 && (flags & BNX2_FL_NOT_5709)) - continue; - - offset = (u32) reg_tbl[i].offset; - rw_mask = reg_tbl[i].rw_mask; - ro_mask = reg_tbl[i].ro_mask; - - save_val = readl(bp->regview + offset); - - writel(0, bp->regview + offset); - - val = readl(bp->regview + offset); - if ((val & rw_mask) != 0) { - goto reg_test_err; - } - - if ((val & ro_mask) != (save_val & ro_mask)) { - goto reg_test_err; - } - - writel(0xffffffff, bp->regview + offset); - - val = readl(bp->regview + offset); - if ((val & rw_mask) != rw_mask) { - goto reg_test_err; - } - - if ((val & ro_mask) != (save_val & ro_mask)) { - goto reg_test_err; - } - - writel(save_val, bp->regview + offset); - continue; - -reg_test_err: - writel(save_val, bp->regview + offset); - ret = -ENODEV; - break; - } - return ret; -} - -static int -bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size) -{ - static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555, - 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa }; - int i; - - for (i = 0; i < sizeof(test_pattern) / 4; i++) { - u32 offset; - - for (offset = 0; offset < size; offset += 4) { - - bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]); - - if (bnx2_reg_rd_ind(bp, start + offset) != - test_pattern[i]) { - return -ENODEV; - } - } - } - return 0; -} - -static int -bnx2_test_memory(struct bnx2 *bp) -{ - int ret = 0; - int i; - static struct mem_entry { - u32 offset; - u32 len; - } mem_tbl_5706[] = { - { 0x60000, 0x4000 }, - { 0xa0000, 0x3000 }, - { 0xe0000, 0x4000 }, - { 0x120000, 0x4000 }, - { 0x1a0000, 0x4000 }, - { 0x160000, 0x4000 }, - { 0xffffffff, 0 }, - }, - mem_tbl_5709[] = { - { 0x60000, 0x4000 }, - { 0xa0000, 0x3000 }, - { 0xe0000, 0x4000 }, - { 0x120000, 0x4000 }, - { 0x1a0000, 0x4000 }, - { 0xffffffff, 0 }, - }; - struct mem_entry *mem_tbl; - - if (CHIP_NUM(bp) == CHIP_NUM_5709) - mem_tbl = mem_tbl_5709; - else - mem_tbl = mem_tbl_5706; - - for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { - if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset, - mem_tbl[i].len)) != 0) { - return ret; - } - } - - return ret; -} - -#define BNX2_MAC_LOOPBACK 0 -#define BNX2_PHY_LOOPBACK 1 - -static int -bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) -{ - unsigned int pkt_size, num_pkts, i; - struct sk_buff *skb, *rx_skb; - unsigned char *packet; - u16 rx_start_idx, rx_idx; - dma_addr_t map; - struct tx_bd *txbd; - struct sw_bd *rx_buf; - struct l2_fhdr *rx_hdr; - int ret = -ENODEV; - struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi; - struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; - struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; - - tx_napi = bnapi; - - txr = &tx_napi->tx_ring; - rxr = &bnapi->rx_ring; - if (loopback_mode == BNX2_MAC_LOOPBACK) { - bp->loopback = MAC_LOOPBACK; - bnx2_set_mac_loopback(bp); - } - else if (loopback_mode == BNX2_PHY_LOOPBACK) { - if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) - return 0; - - bp->loopback = PHY_LOOPBACK; - bnx2_set_phy_loopback(bp); - } - else - return -EINVAL; - - pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4); - skb = netdev_alloc_skb(bp->dev, pkt_size); - if (!skb) - return -ENOMEM; - packet = skb_put(skb, pkt_size); - memcpy(packet, bp->dev->dev_addr, 6); - memset(packet + 6, 0x0, 8); - for (i = 14; i < pkt_size; i++) - packet[i] = (unsigned char) (i & 0xff); - - map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size, - PCI_DMA_TODEVICE); - if (dma_mapping_error(&bp->pdev->dev, map)) { - dev_kfree_skb(skb); - return -EIO; - } - - REG_WR(bp, BNX2_HC_COMMAND, - bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); - - REG_RD(bp, BNX2_HC_COMMAND); - - udelay(5); - rx_start_idx = bnx2_get_hw_rx_cons(bnapi); - - num_pkts = 0; - - txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)]; - - txbd->tx_bd_haddr_hi = (u64) map >> 32; - txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff; - txbd->tx_bd_mss_nbytes = pkt_size; - txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END; - - num_pkts++; - txr->tx_prod = NEXT_TX_BD(txr->tx_prod); - txr->tx_prod_bseq += pkt_size; - - REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod); - REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq); - - udelay(100); - - REG_WR(bp, BNX2_HC_COMMAND, - bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); - - REG_RD(bp, BNX2_HC_COMMAND); - - udelay(5); - - dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE); - dev_kfree_skb(skb); - - if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod) - goto loopback_test_done; - - rx_idx = bnx2_get_hw_rx_cons(bnapi); - if (rx_idx != rx_start_idx + num_pkts) { - goto loopback_test_done; - } - - rx_buf = &rxr->rx_buf_ring[rx_start_idx]; - rx_skb = rx_buf->skb; - - rx_hdr = rx_buf->desc; - skb_reserve(rx_skb, BNX2_RX_OFFSET); - - dma_sync_single_for_cpu(&bp->pdev->dev, - dma_unmap_addr(rx_buf, mapping), - bp->rx_buf_size, PCI_DMA_FROMDEVICE); - - if (rx_hdr->l2_fhdr_status & - (L2_FHDR_ERRORS_BAD_CRC | - L2_FHDR_ERRORS_PHY_DECODE | - L2_FHDR_ERRORS_ALIGNMENT | - L2_FHDR_ERRORS_TOO_SHORT | - L2_FHDR_ERRORS_GIANT_FRAME)) { - - goto loopback_test_done; - } - - if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) { - goto loopback_test_done; - } - - for (i = 14; i < pkt_size; i++) { - if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) { - goto loopback_test_done; - } - } - - ret = 0; - -loopback_test_done: - bp->loopback = 0; - return ret; -} - -#define BNX2_MAC_LOOPBACK_FAILED 1 -#define BNX2_PHY_LOOPBACK_FAILED 2 -#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \ - BNX2_PHY_LOOPBACK_FAILED) - -static int -bnx2_test_loopback(struct bnx2 *bp) -{ - int rc = 0; - - if (!netif_running(bp->dev)) - return BNX2_LOOPBACK_FAILED; - - bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET); - spin_lock_bh(&bp->phy_lock); - bnx2_init_phy(bp, 1); - spin_unlock_bh(&bp->phy_lock); - if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK)) - rc |= BNX2_MAC_LOOPBACK_FAILED; - if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK)) - rc |= BNX2_PHY_LOOPBACK_FAILED; - return rc; -} - -#define NVRAM_SIZE 0x200 -#define CRC32_RESIDUAL 0xdebb20e3 - -static int -bnx2_test_nvram(struct bnx2 *bp) -{ - __be32 buf[NVRAM_SIZE / 4]; - u8 *data = (u8 *) buf; - int rc = 0; - u32 magic, csum; - - if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0) - goto test_nvram_done; - - magic = be32_to_cpu(buf[0]); - if (magic != 0x669955aa) { - rc = -ENODEV; - goto test_nvram_done; - } - - if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0) - goto test_nvram_done; - - csum = ether_crc_le(0x100, data); - if (csum != CRC32_RESIDUAL) { - rc = -ENODEV; - goto test_nvram_done; - } - - csum = ether_crc_le(0x100, data + 0x100); - if (csum != CRC32_RESIDUAL) { - rc = -ENODEV; - } - -test_nvram_done: - return rc; -} - -static int -bnx2_test_link(struct bnx2 *bp) -{ - u32 bmsr; - - if (!netif_running(bp->dev)) - return -ENODEV; - - if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) { - if (bp->link_up) - return 0; - return -ENODEV; - } - spin_lock_bh(&bp->phy_lock); - bnx2_enable_bmsr1(bp); - bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); - bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); - bnx2_disable_bmsr1(bp); - spin_unlock_bh(&bp->phy_lock); - - if (bmsr & BMSR_LSTATUS) { - return 0; - } - return -ENODEV; -} - -static int -bnx2_test_intr(struct bnx2 *bp) -{ - int i; - u16 status_idx; - - if (!netif_running(bp->dev)) - return -ENODEV; - - status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff; - - /* This register is not touched during run-time. */ - REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW); - REG_RD(bp, BNX2_HC_COMMAND); - - for (i = 0; i < 10; i++) { - if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) != - status_idx) { - - break; - } - - msleep_interruptible(10); - } - if (i < 10) - return 0; - - return -ENODEV; -} - -/* Determining link for parallel detection. */ -static int -bnx2_5706_serdes_has_link(struct bnx2 *bp) -{ - u32 mode_ctl, an_dbg, exp; - - if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL) - return 0; - - bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL); - bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl); - - if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET)) - return 0; - - bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG); - bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg); - bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg); - - if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID)) - return 0; - - bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1); - bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp); - bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp); - - if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */ - return 0; - - return 1; -} - -static void -bnx2_5706_serdes_timer(struct bnx2 *bp) -{ - int check_link = 1; - - spin_lock(&bp->phy_lock); - if (bp->serdes_an_pending) { - bp->serdes_an_pending--; - check_link = 0; - } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) { - u32 bmcr; - - bp->current_interval = BNX2_TIMER_INTERVAL; - - bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); - - if (bmcr & BMCR_ANENABLE) { - if (bnx2_5706_serdes_has_link(bp)) { - bmcr &= ~BMCR_ANENABLE; - bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; - bnx2_write_phy(bp, bp->mii_bmcr, bmcr); - bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT; - } - } - } - else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) && - (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) { - u32 phy2; - - bnx2_write_phy(bp, 0x17, 0x0f01); - bnx2_read_phy(bp, 0x15, &phy2); - if (phy2 & 0x20) { - u32 bmcr; - - bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); - bmcr |= BMCR_ANENABLE; - bnx2_write_phy(bp, bp->mii_bmcr, bmcr); - - bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT; - } - } else - bp->current_interval = BNX2_TIMER_INTERVAL; - - if (check_link) { - u32 val; - - bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG); - bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val); - bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val); - - if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) { - if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) { - bnx2_5706s_force_link_dn(bp, 1); - bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN; - } else - bnx2_set_link(bp); - } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC)) - bnx2_set_link(bp); - } - spin_unlock(&bp->phy_lock); -} - -static void -bnx2_5708_serdes_timer(struct bnx2 *bp) -{ - if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) - return; - - if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) { - bp->serdes_an_pending = 0; - return; - } - - spin_lock(&bp->phy_lock); - if (bp->serdes_an_pending) - bp->serdes_an_pending--; - else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) { - u32 bmcr; - - bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); - if (bmcr & BMCR_ANENABLE) { - bnx2_enable_forced_2g5(bp); - bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT; - } else { - bnx2_disable_forced_2g5(bp); - bp->serdes_an_pending = 2; - bp->current_interval = BNX2_TIMER_INTERVAL; - } - - } else - bp->current_interval = BNX2_TIMER_INTERVAL; - - spin_unlock(&bp->phy_lock); -} - -static void -bnx2_timer(unsigned long data) -{ - struct bnx2 *bp = (struct bnx2 *) data; - - if (!netif_running(bp->dev)) - return; - - if (atomic_read(&bp->intr_sem) != 0) - goto bnx2_restart_timer; - - if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) == - BNX2_FLAG_USING_MSI) - bnx2_chk_missed_msi(bp); - - bnx2_send_heart_beat(bp); - - bp->stats_blk->stat_FwRxDrop = - bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT); - - /* workaround occasional corrupted counters */ - if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks) - REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | - BNX2_HC_COMMAND_STATS_NOW); - - if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { - if (CHIP_NUM(bp) == CHIP_NUM_5706) - bnx2_5706_serdes_timer(bp); - else - bnx2_5708_serdes_timer(bp); - } - -bnx2_restart_timer: - mod_timer(&bp->timer, jiffies + bp->current_interval); -} - -static int -bnx2_request_irq(struct bnx2 *bp) -{ - unsigned long flags; - struct bnx2_irq *irq; - int rc = 0, i; - - if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX) - flags = 0; - else - flags = IRQF_SHARED; - - for (i = 0; i < bp->irq_nvecs; i++) { - irq = &bp->irq_tbl[i]; - rc = request_irq(irq->vector, irq->handler, flags, irq->name, - &bp->bnx2_napi[i]); - if (rc) - break; - irq->requested = 1; - } - return rc; -} - -static void -__bnx2_free_irq(struct bnx2 *bp) -{ - struct bnx2_irq *irq; - int i; - - for (i = 0; i < bp->irq_nvecs; i++) { - irq = &bp->irq_tbl[i]; - if (irq->requested) - free_irq(irq->vector, &bp->bnx2_napi[i]); - irq->requested = 0; - } -} - -static void -bnx2_free_irq(struct bnx2 *bp) -{ - - __bnx2_free_irq(bp); - if (bp->flags & BNX2_FLAG_USING_MSI) - pci_disable_msi(bp->pdev); - else if (bp->flags & BNX2_FLAG_USING_MSIX) - pci_disable_msix(bp->pdev); - - bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI); -} - -static void -bnx2_enable_msix(struct bnx2 *bp, int msix_vecs) -{ - int i, total_vecs, rc; - struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC]; - struct net_device *dev = bp->dev; - const int len = sizeof(bp->irq_tbl[0].name); - - bnx2_setup_msix_tbl(bp); - REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1); - REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE); - REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE); - - /* Need to flush the previous three writes to ensure MSI-X - * is setup properly */ - REG_RD(bp, BNX2_PCI_MSIX_CONTROL); - - for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) { - msix_ent[i].entry = i; - msix_ent[i].vector = 0; - } - - total_vecs = msix_vecs; -#ifdef BCM_CNIC - total_vecs++; -#endif - rc = -ENOSPC; - while (total_vecs >= BNX2_MIN_MSIX_VEC) { - rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs); - if (rc <= 0) - break; - if (rc > 0) - total_vecs = rc; - } - - if (rc != 0) - return; - - msix_vecs = total_vecs; -#ifdef BCM_CNIC - msix_vecs--; -#endif - bp->irq_nvecs = msix_vecs; - bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI; - for (i = 0; i < total_vecs; i++) { - bp->irq_tbl[i].vector = msix_ent[i].vector; - snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i); - bp->irq_tbl[i].handler = bnx2_msi_1shot; - } -} - -static int -bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi) -{ - int cpus = num_online_cpus(); - int msix_vecs = min(cpus + 1, RX_MAX_RINGS); - - bp->irq_tbl[0].handler = bnx2_interrupt; - strcpy(bp->irq_tbl[0].name, bp->dev->name); - bp->irq_nvecs = 1; - bp->irq_tbl[0].vector = bp->pdev->irq; - - if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi) - bnx2_enable_msix(bp, msix_vecs); - - if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi && - !(bp->flags & BNX2_FLAG_USING_MSIX)) { - if (pci_enable_msi(bp->pdev) == 0) { - bp->flags |= BNX2_FLAG_USING_MSI; - if (CHIP_NUM(bp) == CHIP_NUM_5709) { - bp->flags |= BNX2_FLAG_ONE_SHOT_MSI; - bp->irq_tbl[0].handler = bnx2_msi_1shot; - } else - bp->irq_tbl[0].handler = bnx2_msi; - - bp->irq_tbl[0].vector = bp->pdev->irq; - } - } - - bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs); - netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings); - - bp->num_rx_rings = bp->irq_nvecs; - return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings); -} - -/* Called with rtnl_lock */ -static int -bnx2_open(struct net_device *dev) -{ - struct bnx2 *bp = netdev_priv(dev); - int rc; - - netif_carrier_off(dev); - - bnx2_set_power_state(bp, PCI_D0); - bnx2_disable_int(bp); - - rc = bnx2_setup_int_mode(bp, disable_msi); - if (rc) - goto open_err; - bnx2_init_napi(bp); - bnx2_napi_enable(bp); - rc = bnx2_alloc_mem(bp); - if (rc) - goto open_err; - - rc = bnx2_request_irq(bp); - if (rc) - goto open_err; - - rc = bnx2_init_nic(bp, 1); - if (rc) - goto open_err; - - mod_timer(&bp->timer, jiffies + bp->current_interval); - - atomic_set(&bp->intr_sem, 0); - - memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block)); - - bnx2_enable_int(bp); - - if (bp->flags & BNX2_FLAG_USING_MSI) { - /* Test MSI to make sure it is working - * If MSI test fails, go back to INTx mode - */ - if (bnx2_test_intr(bp) != 0) { - netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n"); - - bnx2_disable_int(bp); - bnx2_free_irq(bp); - - bnx2_setup_int_mode(bp, 1); - - rc = bnx2_init_nic(bp, 0); - - if (!rc) - rc = bnx2_request_irq(bp); - - if (rc) { - del_timer_sync(&bp->timer); - goto open_err; - } - bnx2_enable_int(bp); - } - } - if (bp->flags & BNX2_FLAG_USING_MSI) - netdev_info(dev, "using MSI\n"); - else if (bp->flags & BNX2_FLAG_USING_MSIX) - netdev_info(dev, "using MSIX\n"); - - netif_tx_start_all_queues(dev); - - return 0; - -open_err: - bnx2_napi_disable(bp); - bnx2_free_skbs(bp); - bnx2_free_irq(bp); - bnx2_free_mem(bp); - bnx2_del_napi(bp); - return rc; -} - -static void -bnx2_reset_task(struct work_struct *work) -{ - struct bnx2 *bp = container_of(work, struct bnx2, reset_task); - int rc; - - rtnl_lock(); - if (!netif_running(bp->dev)) { - rtnl_unlock(); - return; - } - - bnx2_netif_stop(bp, true); - - rc = bnx2_init_nic(bp, 1); - if (rc) { - netdev_err(bp->dev, "failed to reset NIC, closing\n"); - bnx2_napi_enable(bp); - dev_close(bp->dev); - rtnl_unlock(); - return; - } - - atomic_set(&bp->intr_sem, 1); - bnx2_netif_start(bp, true); - rtnl_unlock(); -} - -static void -bnx2_dump_state(struct bnx2 *bp) -{ - struct net_device *dev = bp->dev; - u32 val1, val2; - - pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1); - netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n", - atomic_read(&bp->intr_sem), val1); - pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1); - pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2); - netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2); - netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n", - REG_RD(bp, BNX2_EMAC_TX_STATUS), - REG_RD(bp, BNX2_EMAC_RX_STATUS)); - netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n", - REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL)); - netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n", - REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS)); - if (bp->flags & BNX2_FLAG_USING_MSIX) - netdev_err(dev, "DEBUG: PBA[%08x]\n", - REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE)); -} - -static void -bnx2_tx_timeout(struct net_device *dev) -{ - struct bnx2 *bp = netdev_priv(dev); - - bnx2_dump_state(bp); - bnx2_dump_mcp_state(bp); - - /* This allows the netif to be shutdown gracefully before resetting */ - schedule_work(&bp->reset_task); -} - -/* Called with netif_tx_lock. - * bnx2_tx_int() runs without netif_tx_lock unless it needs to call - * netif_wake_queue(). - */ -static netdev_tx_t -bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct bnx2 *bp = netdev_priv(dev); - dma_addr_t mapping; - struct tx_bd *txbd; - struct sw_tx_bd *tx_buf; - u32 len, vlan_tag_flags, last_frag, mss; - u16 prod, ring_prod; - int i; - struct bnx2_napi *bnapi; - struct bnx2_tx_ring_info *txr; - struct netdev_queue *txq; - - /* Determine which tx ring we will be placed on */ - i = skb_get_queue_mapping(skb); - bnapi = &bp->bnx2_napi[i]; - txr = &bnapi->tx_ring; - txq = netdev_get_tx_queue(dev, i); - - if (unlikely(bnx2_tx_avail(bp, txr) < - (skb_shinfo(skb)->nr_frags + 1))) { - netif_tx_stop_queue(txq); - netdev_err(dev, "BUG! Tx ring full when queue awake!\n"); - - return NETDEV_TX_BUSY; - } - len = skb_headlen(skb); - prod = txr->tx_prod; - ring_prod = TX_RING_IDX(prod); - - vlan_tag_flags = 0; - if (skb->ip_summed == CHECKSUM_PARTIAL) { - vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; - } - - if (vlan_tx_tag_present(skb)) { - vlan_tag_flags |= - (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16)); - } - - if ((mss = skb_shinfo(skb)->gso_size)) { - u32 tcp_opt_len; - struct iphdr *iph; - - vlan_tag_flags |= TX_BD_FLAGS_SW_LSO; - - tcp_opt_len = tcp_optlen(skb); - - if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { - u32 tcp_off = skb_transport_offset(skb) - - sizeof(struct ipv6hdr) - ETH_HLEN; - - vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) | - TX_BD_FLAGS_SW_FLAGS; - if (likely(tcp_off == 0)) - vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK; - else { - tcp_off >>= 3; - vlan_tag_flags |= ((tcp_off & 0x3) << - TX_BD_FLAGS_TCP6_OFF0_SHL) | - ((tcp_off & 0x10) << - TX_BD_FLAGS_TCP6_OFF4_SHL); - mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL; - } - } else { - iph = ip_hdr(skb); - if (tcp_opt_len || (iph->ihl > 5)) { - vlan_tag_flags |= ((iph->ihl - 5) + - (tcp_opt_len >> 2)) << 8; - } - } - } else - mss = 0; - - mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); - if (dma_mapping_error(&bp->pdev->dev, mapping)) { - dev_kfree_skb(skb); - return NETDEV_TX_OK; - } - - tx_buf = &txr->tx_buf_ring[ring_prod]; - tx_buf->skb = skb; - dma_unmap_addr_set(tx_buf, mapping, mapping); - - txbd = &txr->tx_desc_ring[ring_prod]; - - txbd->tx_bd_haddr_hi = (u64) mapping >> 32; - txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff; - txbd->tx_bd_mss_nbytes = len | (mss << 16); - txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START; - - last_frag = skb_shinfo(skb)->nr_frags; - tx_buf->nr_frags = last_frag; - tx_buf->is_gso = skb_is_gso(skb); - - for (i = 0; i < last_frag; i++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - - prod = NEXT_TX_BD(prod); - ring_prod = TX_RING_IDX(prod); - txbd = &txr->tx_desc_ring[ring_prod]; - - len = frag->size; - mapping = dma_map_page(&bp->pdev->dev, frag->page, frag->page_offset, - len, PCI_DMA_TODEVICE); - if (dma_mapping_error(&bp->pdev->dev, mapping)) - goto dma_error; - dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping, - mapping); - - txbd->tx_bd_haddr_hi = (u64) mapping >> 32; - txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff; - txbd->tx_bd_mss_nbytes = len | (mss << 16); - txbd->tx_bd_vlan_tag_flags = vlan_tag_flags; - - } - txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END; - - prod = NEXT_TX_BD(prod); - txr->tx_prod_bseq += skb->len; - - REG_WR16(bp, txr->tx_bidx_addr, prod); - REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq); - - mmiowb(); - - txr->tx_prod = prod; - - if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) { - netif_tx_stop_queue(txq); - - /* netif_tx_stop_queue() must be done before checking - * tx index in bnx2_tx_avail() below, because in - * bnx2_tx_int(), we update tx index before checking for - * netif_tx_queue_stopped(). - */ - smp_mb(); - if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh) - netif_tx_wake_queue(txq); - } - - return NETDEV_TX_OK; -dma_error: - /* save value of frag that failed */ - last_frag = i; - - /* start back at beginning and unmap skb */ - prod = txr->tx_prod; - ring_prod = TX_RING_IDX(prod); - tx_buf = &txr->tx_buf_ring[ring_prod]; - tx_buf->skb = NULL; - dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), - skb_headlen(skb), PCI_DMA_TODEVICE); - - /* unmap remaining mapped pages */ - for (i = 0; i < last_frag; i++) { - prod = NEXT_TX_BD(prod); - ring_prod = TX_RING_IDX(prod); - tx_buf = &txr->tx_buf_ring[ring_prod]; - dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), - skb_shinfo(skb)->frags[i].size, - PCI_DMA_TODEVICE); - } - - dev_kfree_skb(skb); - return NETDEV_TX_OK; -} - -/* Called with rtnl_lock */ -static int -bnx2_close(struct net_device *dev) -{ - struct bnx2 *bp = netdev_priv(dev); - - bnx2_disable_int_sync(bp); - bnx2_napi_disable(bp); - del_timer_sync(&bp->timer); - bnx2_shutdown_chip(bp); - bnx2_free_irq(bp); - bnx2_free_skbs(bp); - bnx2_free_mem(bp); - bnx2_del_napi(bp); - bp->link_up = 0; - netif_carrier_off(bp->dev); - bnx2_set_power_state(bp, PCI_D3hot); - return 0; -} - -static void -bnx2_save_stats(struct bnx2 *bp) -{ - u32 *hw_stats = (u32 *) bp->stats_blk; - u32 *temp_stats = (u32 *) bp->temp_stats_blk; - int i; - - /* The 1st 10 counters are 64-bit counters */ - for (i = 0; i < 20; i += 2) { - u32 hi; - u64 lo; - - hi = temp_stats[i] + hw_stats[i]; - lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1]; - if (lo > 0xffffffff) - hi++; - temp_stats[i] = hi; - temp_stats[i + 1] = lo & 0xffffffff; - } - - for ( ; i < sizeof(struct statistics_block) / 4; i++) - temp_stats[i] += hw_stats[i]; -} - -#define GET_64BIT_NET_STATS64(ctr) \ - (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo)) - -#define GET_64BIT_NET_STATS(ctr) \ - GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \ - GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr) - -#define GET_32BIT_NET_STATS(ctr) \ - (unsigned long) (bp->stats_blk->ctr + \ - bp->temp_stats_blk->ctr) - -static struct rtnl_link_stats64 * -bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) -{ - struct bnx2 *bp = netdev_priv(dev); - - if (bp->stats_blk == NULL) - return net_stats; - - net_stats->rx_packets = - GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) + - GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) + - GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts); - - net_stats->tx_packets = - GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) + - GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) + - GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts); - - net_stats->rx_bytes = - GET_64BIT_NET_STATS(stat_IfHCInOctets); - - net_stats->tx_bytes = - GET_64BIT_NET_STATS(stat_IfHCOutOctets); - - net_stats->multicast = - GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts); - - net_stats->collisions = - GET_32BIT_NET_STATS(stat_EtherStatsCollisions); - - net_stats->rx_length_errors = - GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) + - GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts); - - net_stats->rx_over_errors = - GET_32BIT_NET_STATS(stat_IfInFTQDiscards) + - GET_32BIT_NET_STATS(stat_IfInMBUFDiscards); - - net_stats->rx_frame_errors = - GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors); - - net_stats->rx_crc_errors = - GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors); - - net_stats->rx_errors = net_stats->rx_length_errors + - net_stats->rx_over_errors + net_stats->rx_frame_errors + - net_stats->rx_crc_errors; - - net_stats->tx_aborted_errors = - GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) + - GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions); - - if ((CHIP_NUM(bp) == CHIP_NUM_5706) || - (CHIP_ID(bp) == CHIP_ID_5708_A0)) - net_stats->tx_carrier_errors = 0; - else { - net_stats->tx_carrier_errors = - GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors); - } - - net_stats->tx_errors = - GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) + - net_stats->tx_aborted_errors + - net_stats->tx_carrier_errors; - - net_stats->rx_missed_errors = - GET_32BIT_NET_STATS(stat_IfInFTQDiscards) + - GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) + - GET_32BIT_NET_STATS(stat_FwRxDrop); - - return net_stats; -} - -/* All ethtool functions called with rtnl_lock */ - -static int -bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct bnx2 *bp = netdev_priv(dev); - int support_serdes = 0, support_copper = 0; - - cmd->supported = SUPPORTED_Autoneg; - if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) { - support_serdes = 1; - support_copper = 1; - } else if (bp->phy_port == PORT_FIBRE) - support_serdes = 1; - else - support_copper = 1; - - if (support_serdes) { - cmd->supported |= SUPPORTED_1000baseT_Full | - SUPPORTED_FIBRE; - if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) - cmd->supported |= SUPPORTED_2500baseX_Full; - - } - if (support_copper) { - cmd->supported |= SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_TP; - - } - - spin_lock_bh(&bp->phy_lock); - cmd->port = bp->phy_port; - cmd->advertising = bp->advertising; - - if (bp->autoneg & AUTONEG_SPEED) { - cmd->autoneg = AUTONEG_ENABLE; - } else { - cmd->autoneg = AUTONEG_DISABLE; - } - - if (netif_carrier_ok(dev)) { - ethtool_cmd_speed_set(cmd, bp->line_speed); - cmd->duplex = bp->duplex; - } - else { - ethtool_cmd_speed_set(cmd, -1); - cmd->duplex = -1; - } - spin_unlock_bh(&bp->phy_lock); - - cmd->transceiver = XCVR_INTERNAL; - cmd->phy_address = bp->phy_addr; - - return 0; -} - -static int -bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct bnx2 *bp = netdev_priv(dev); - u8 autoneg = bp->autoneg; - u8 req_duplex = bp->req_duplex; - u16 req_line_speed = bp->req_line_speed; - u32 advertising = bp->advertising; - int err = -EINVAL; - - spin_lock_bh(&bp->phy_lock); - - if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE) - goto err_out_unlock; - - if (cmd->port != bp->phy_port && - !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)) - goto err_out_unlock; - - /* If device is down, we can store the settings only if the user - * is setting the currently active port. - */ - if (!netif_running(dev) && cmd->port != bp->phy_port) - goto err_out_unlock; - - if (cmd->autoneg == AUTONEG_ENABLE) { - autoneg |= AUTONEG_SPEED; - - advertising = cmd->advertising; - if (cmd->port == PORT_TP) { - advertising &= ETHTOOL_ALL_COPPER_SPEED; - if (!advertising) - advertising = ETHTOOL_ALL_COPPER_SPEED; - } else { - advertising &= ETHTOOL_ALL_FIBRE_SPEED; - if (!advertising) - advertising = ETHTOOL_ALL_FIBRE_SPEED; - } - advertising |= ADVERTISED_Autoneg; - } - else { - u32 speed = ethtool_cmd_speed(cmd); - if (cmd->port == PORT_FIBRE) { - if ((speed != SPEED_1000 && - speed != SPEED_2500) || - (cmd->duplex != DUPLEX_FULL)) - goto err_out_unlock; - - if (speed == SPEED_2500 && - !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) - goto err_out_unlock; - } else if (speed == SPEED_1000 || speed == SPEED_2500) - goto err_out_unlock; - - autoneg &= ~AUTONEG_SPEED; - req_line_speed = speed; - req_duplex = cmd->duplex; - advertising = 0; - } - - bp->autoneg = autoneg; - bp->advertising = advertising; - bp->req_line_speed = req_line_speed; - bp->req_duplex = req_duplex; - - err = 0; - /* If device is down, the new settings will be picked up when it is - * brought up. - */ - if (netif_running(dev)) - err = bnx2_setup_phy(bp, cmd->port); - -err_out_unlock: - spin_unlock_bh(&bp->phy_lock); - - return err; -} - -static void -bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) -{ - struct bnx2 *bp = netdev_priv(dev); - - strcpy(info->driver, DRV_MODULE_NAME); - strcpy(info->version, DRV_MODULE_VERSION); - strcpy(info->bus_info, pci_name(bp->pdev)); - strcpy(info->fw_version, bp->fw_version); -} - -#define BNX2_REGDUMP_LEN (32 * 1024) - -static int -bnx2_get_regs_len(struct net_device *dev) -{ - return BNX2_REGDUMP_LEN; -} - -static void -bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) -{ - u32 *p = _p, i, offset; - u8 *orig_p = _p; - struct bnx2 *bp = netdev_priv(dev); - static const u32 reg_boundaries[] = { - 0x0000, 0x0098, 0x0400, 0x045c, - 0x0800, 0x0880, 0x0c00, 0x0c10, - 0x0c30, 0x0d08, 0x1000, 0x101c, - 0x1040, 0x1048, 0x1080, 0x10a4, - 0x1400, 0x1490, 0x1498, 0x14f0, - 0x1500, 0x155c, 0x1580, 0x15dc, - 0x1600, 0x1658, 0x1680, 0x16d8, - 0x1800, 0x1820, 0x1840, 0x1854, - 0x1880, 0x1894, 0x1900, 0x1984, - 0x1c00, 0x1c0c, 0x1c40, 0x1c54, - 0x1c80, 0x1c94, 0x1d00, 0x1d84, - 0x2000, 0x2030, 0x23c0, 0x2400, - 0x2800, 0x2820, 0x2830, 0x2850, - 0x2b40, 0x2c10, 0x2fc0, 0x3058, - 0x3c00, 0x3c94, 0x4000, 0x4010, - 0x4080, 0x4090, 0x43c0, 0x4458, - 0x4c00, 0x4c18, 0x4c40, 0x4c54, - 0x4fc0, 0x5010, 0x53c0, 0x5444, - 0x5c00, 0x5c18, 0x5c80, 0x5c90, - 0x5fc0, 0x6000, 0x6400, 0x6428, - 0x6800, 0x6848, 0x684c, 0x6860, - 0x6888, 0x6910, 0x8000 - }; - - regs->version = 0; - - memset(p, 0, BNX2_REGDUMP_LEN); - - if (!netif_running(bp->dev)) - return; - - i = 0; - offset = reg_boundaries[0]; - p += offset; - while (offset < BNX2_REGDUMP_LEN) { - *p++ = REG_RD(bp, offset); - offset += 4; - if (offset == reg_boundaries[i + 1]) { - offset = reg_boundaries[i + 2]; - p = (u32 *) (orig_p + offset); - i += 2; - } - } -} - -static void -bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - struct bnx2 *bp = netdev_priv(dev); - - if (bp->flags & BNX2_FLAG_NO_WOL) { - wol->supported = 0; - wol->wolopts = 0; - } - else { - wol->supported = WAKE_MAGIC; - if (bp->wol) - wol->wolopts = WAKE_MAGIC; - else - wol->wolopts = 0; - } - memset(&wol->sopass, 0, sizeof(wol->sopass)); -} - -static int -bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - struct bnx2 *bp = netdev_priv(dev); - - if (wol->wolopts & ~WAKE_MAGIC) - return -EINVAL; - - if (wol->wolopts & WAKE_MAGIC) { - if (bp->flags & BNX2_FLAG_NO_WOL) - return -EINVAL; - - bp->wol = 1; - } - else { - bp->wol = 0; - } - return 0; -} - -static int -bnx2_nway_reset(struct net_device *dev) -{ - struct bnx2 *bp = netdev_priv(dev); - u32 bmcr; - - if (!netif_running(dev)) - return -EAGAIN; - - if (!(bp->autoneg & AUTONEG_SPEED)) { - return -EINVAL; - } - - spin_lock_bh(&bp->phy_lock); - - if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) { - int rc; - - rc = bnx2_setup_remote_phy(bp, bp->phy_port); - spin_unlock_bh(&bp->phy_lock); - return rc; - } - - /* Force a link down visible on the other side */ - if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { - bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK); - spin_unlock_bh(&bp->phy_lock); - - msleep(20); - - spin_lock_bh(&bp->phy_lock); - - bp->current_interval = BNX2_SERDES_AN_TIMEOUT; - bp->serdes_an_pending = 1; - mod_timer(&bp->timer, jiffies + bp->current_interval); - } - - bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); - bmcr &= ~BMCR_LOOPBACK; - bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE); - - spin_unlock_bh(&bp->phy_lock); - - return 0; -} - -static u32 -bnx2_get_link(struct net_device *dev) -{ - struct bnx2 *bp = netdev_priv(dev); - - return bp->link_up; -} - -static int -bnx2_get_eeprom_len(struct net_device *dev) -{ - struct bnx2 *bp = netdev_priv(dev); - - if (bp->flash_info == NULL) - return 0; - - return (int) bp->flash_size; -} - -static int -bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, - u8 *eebuf) -{ - struct bnx2 *bp = netdev_priv(dev); - int rc; - - if (!netif_running(dev)) - return -EAGAIN; - - /* parameters already validated in ethtool_get_eeprom */ - - rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len); - - return rc; -} - -static int -bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, - u8 *eebuf) -{ - struct bnx2 *bp = netdev_priv(dev); - int rc; - - if (!netif_running(dev)) - return -EAGAIN; - - /* parameters already validated in ethtool_set_eeprom */ - - rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len); - - return rc; -} - -static int -bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) -{ - struct bnx2 *bp = netdev_priv(dev); - - memset(coal, 0, sizeof(struct ethtool_coalesce)); - - coal->rx_coalesce_usecs = bp->rx_ticks; - coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip; - coal->rx_coalesce_usecs_irq = bp->rx_ticks_int; - coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int; - - coal->tx_coalesce_usecs = bp->tx_ticks; - coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip; - coal->tx_coalesce_usecs_irq = bp->tx_ticks_int; - coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int; - - coal->stats_block_coalesce_usecs = bp->stats_ticks; - - return 0; -} - -static int -bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) -{ - struct bnx2 *bp = netdev_priv(dev); - - bp->rx_ticks = (u16) coal->rx_coalesce_usecs; - if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff; - - bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames; - if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff; - - bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq; - if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff; - - bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq; - if (bp->rx_quick_cons_trip_int > 0xff) - bp->rx_quick_cons_trip_int = 0xff; - - bp->tx_ticks = (u16) coal->tx_coalesce_usecs; - if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff; - - bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames; - if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff; - - bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq; - if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff; - - bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq; - if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int = - 0xff; - - bp->stats_ticks = coal->stats_block_coalesce_usecs; - if (bp->flags & BNX2_FLAG_BROKEN_STATS) { - if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC) - bp->stats_ticks = USEC_PER_SEC; - } - if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS) - bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS; - bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS; - - if (netif_running(bp->dev)) { - bnx2_netif_stop(bp, true); - bnx2_init_nic(bp, 0); - bnx2_netif_start(bp, true); - } - - return 0; -} - -static void -bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) -{ - struct bnx2 *bp = netdev_priv(dev); - - ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT; - ering->rx_mini_max_pending = 0; - ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT; - - ering->rx_pending = bp->rx_ring_size; - ering->rx_mini_pending = 0; - ering->rx_jumbo_pending = bp->rx_pg_ring_size; - - ering->tx_max_pending = MAX_TX_DESC_CNT; - ering->tx_pending = bp->tx_ring_size; -} - -static int -bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx) -{ - if (netif_running(bp->dev)) { - /* Reset will erase chipset stats; save them */ - bnx2_save_stats(bp); - - bnx2_netif_stop(bp, true); - bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); - __bnx2_free_irq(bp); - bnx2_free_skbs(bp); - bnx2_free_mem(bp); - } - - bnx2_set_rx_ring_size(bp, rx); - bp->tx_ring_size = tx; - - if (netif_running(bp->dev)) { - int rc; - - rc = bnx2_alloc_mem(bp); - if (!rc) - rc = bnx2_request_irq(bp); - - if (!rc) - rc = bnx2_init_nic(bp, 0); - - if (rc) { - bnx2_napi_enable(bp); - dev_close(bp->dev); - return rc; - } -#ifdef BCM_CNIC - mutex_lock(&bp->cnic_lock); - /* Let cnic know about the new status block. */ - if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) - bnx2_setup_cnic_irq_info(bp); - mutex_unlock(&bp->cnic_lock); -#endif - bnx2_netif_start(bp, true); - } - return 0; -} - -static int -bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) -{ - struct bnx2 *bp = netdev_priv(dev); - int rc; - - if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) || - (ering->tx_pending > MAX_TX_DESC_CNT) || - (ering->tx_pending <= MAX_SKB_FRAGS)) { - - return -EINVAL; - } - rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending); - return rc; -} - -static void -bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) -{ - struct bnx2 *bp = netdev_priv(dev); - - epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0); - epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0); - epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0); -} - -static int -bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) -{ - struct bnx2 *bp = netdev_priv(dev); - - bp->req_flow_ctrl = 0; - if (epause->rx_pause) - bp->req_flow_ctrl |= FLOW_CTRL_RX; - if (epause->tx_pause) - bp->req_flow_ctrl |= FLOW_CTRL_TX; - - if (epause->autoneg) { - bp->autoneg |= AUTONEG_FLOW_CTRL; - } - else { - bp->autoneg &= ~AUTONEG_FLOW_CTRL; - } - - if (netif_running(dev)) { - spin_lock_bh(&bp->phy_lock); - bnx2_setup_phy(bp, bp->phy_port); - spin_unlock_bh(&bp->phy_lock); - } - - return 0; -} - -static struct { - char string[ETH_GSTRING_LEN]; -} bnx2_stats_str_arr[] = { - { "rx_bytes" }, - { "rx_error_bytes" }, - { "tx_bytes" }, - { "tx_error_bytes" }, - { "rx_ucast_packets" }, - { "rx_mcast_packets" }, - { "rx_bcast_packets" }, - { "tx_ucast_packets" }, - { "tx_mcast_packets" }, - { "tx_bcast_packets" }, - { "tx_mac_errors" }, - { "tx_carrier_errors" }, - { "rx_crc_errors" }, - { "rx_align_errors" }, - { "tx_single_collisions" }, - { "tx_multi_collisions" }, - { "tx_deferred" }, - { "tx_excess_collisions" }, - { "tx_late_collisions" }, - { "tx_total_collisions" }, - { "rx_fragments" }, - { "rx_jabbers" }, - { "rx_undersize_packets" }, - { "rx_oversize_packets" }, - { "rx_64_byte_packets" }, - { "rx_65_to_127_byte_packets" }, - { "rx_128_to_255_byte_packets" }, - { "rx_256_to_511_byte_packets" }, - { "rx_512_to_1023_byte_packets" }, - { "rx_1024_to_1522_byte_packets" }, - { "rx_1523_to_9022_byte_packets" }, - { "tx_64_byte_packets" }, - { "tx_65_to_127_byte_packets" }, - { "tx_128_to_255_byte_packets" }, - { "tx_256_to_511_byte_packets" }, - { "tx_512_to_1023_byte_packets" }, - { "tx_1024_to_1522_byte_packets" }, - { "tx_1523_to_9022_byte_packets" }, - { "rx_xon_frames" }, - { "rx_xoff_frames" }, - { "tx_xon_frames" }, - { "tx_xoff_frames" }, - { "rx_mac_ctrl_frames" }, - { "rx_filtered_packets" }, - { "rx_ftq_discards" }, - { "rx_discards" }, - { "rx_fw_discards" }, -}; - -#define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\ - sizeof(bnx2_stats_str_arr[0])) - -#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4) - -static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = { - STATS_OFFSET32(stat_IfHCInOctets_hi), - STATS_OFFSET32(stat_IfHCInBadOctets_hi), - STATS_OFFSET32(stat_IfHCOutOctets_hi), - STATS_OFFSET32(stat_IfHCOutBadOctets_hi), - STATS_OFFSET32(stat_IfHCInUcastPkts_hi), - STATS_OFFSET32(stat_IfHCInMulticastPkts_hi), - STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi), - STATS_OFFSET32(stat_IfHCOutUcastPkts_hi), - STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi), - STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi), - STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors), - STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors), - STATS_OFFSET32(stat_Dot3StatsFCSErrors), - STATS_OFFSET32(stat_Dot3StatsAlignmentErrors), - STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames), - STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames), - STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions), - STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions), - STATS_OFFSET32(stat_Dot3StatsLateCollisions), - STATS_OFFSET32(stat_EtherStatsCollisions), - STATS_OFFSET32(stat_EtherStatsFragments), - STATS_OFFSET32(stat_EtherStatsJabbers), - STATS_OFFSET32(stat_EtherStatsUndersizePkts), - STATS_OFFSET32(stat_EtherStatsOverrsizePkts), - STATS_OFFSET32(stat_EtherStatsPktsRx64Octets), - STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets), - STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets), - STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets), - STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets), - STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets), - STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets), - STATS_OFFSET32(stat_EtherStatsPktsTx64Octets), - STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets), - STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets), - STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets), - STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets), - STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets), - STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets), - STATS_OFFSET32(stat_XonPauseFramesReceived), - STATS_OFFSET32(stat_XoffPauseFramesReceived), - STATS_OFFSET32(stat_OutXonSent), - STATS_OFFSET32(stat_OutXoffSent), - STATS_OFFSET32(stat_MacControlFramesReceived), - STATS_OFFSET32(stat_IfInFramesL2FilterDiscards), - STATS_OFFSET32(stat_IfInFTQDiscards), - STATS_OFFSET32(stat_IfInMBUFDiscards), - STATS_OFFSET32(stat_FwRxDrop), -}; - -/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are - * skipped because of errata. - */ -static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = { - 8,0,8,8,8,8,8,8,8,8, - 4,0,4,4,4,4,4,4,4,4, - 4,4,4,4,4,4,4,4,4,4, - 4,4,4,4,4,4,4,4,4,4, - 4,4,4,4,4,4,4, -}; - -static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = { - 8,0,8,8,8,8,8,8,8,8, - 4,4,4,4,4,4,4,4,4,4, - 4,4,4,4,4,4,4,4,4,4, - 4,4,4,4,4,4,4,4,4,4, - 4,4,4,4,4,4,4, -}; - -#define BNX2_NUM_TESTS 6 - -static struct { - char string[ETH_GSTRING_LEN]; -} bnx2_tests_str_arr[BNX2_NUM_TESTS] = { - { "register_test (offline)" }, - { "memory_test (offline)" }, - { "loopback_test (offline)" }, - { "nvram_test (online)" }, - { "interrupt_test (online)" }, - { "link_test (online)" }, -}; - -static int -bnx2_get_sset_count(struct net_device *dev, int sset) -{ - switch (sset) { - case ETH_SS_TEST: - return BNX2_NUM_TESTS; - case ETH_SS_STATS: - return BNX2_NUM_STATS; - default: - return -EOPNOTSUPP; - } -} - -static void -bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf) -{ - struct bnx2 *bp = netdev_priv(dev); - - bnx2_set_power_state(bp, PCI_D0); - - memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS); - if (etest->flags & ETH_TEST_FL_OFFLINE) { - int i; - - bnx2_netif_stop(bp, true); - bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG); - bnx2_free_skbs(bp); - - if (bnx2_test_registers(bp) != 0) { - buf[0] = 1; - etest->flags |= ETH_TEST_FL_FAILED; - } - if (bnx2_test_memory(bp) != 0) { - buf[1] = 1; - etest->flags |= ETH_TEST_FL_FAILED; - } - if ((buf[2] = bnx2_test_loopback(bp)) != 0) - etest->flags |= ETH_TEST_FL_FAILED; - - if (!netif_running(bp->dev)) - bnx2_shutdown_chip(bp); - else { - bnx2_init_nic(bp, 1); - bnx2_netif_start(bp, true); - } - - /* wait for link up */ - for (i = 0; i < 7; i++) { - if (bp->link_up) - break; - msleep_interruptible(1000); - } - } - - if (bnx2_test_nvram(bp) != 0) { - buf[3] = 1; - etest->flags |= ETH_TEST_FL_FAILED; - } - if (bnx2_test_intr(bp) != 0) { - buf[4] = 1; - etest->flags |= ETH_TEST_FL_FAILED; - } - - if (bnx2_test_link(bp) != 0) { - buf[5] = 1; - etest->flags |= ETH_TEST_FL_FAILED; - - } - if (!netif_running(bp->dev)) - bnx2_set_power_state(bp, PCI_D3hot); -} - -static void -bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf) -{ - switch (stringset) { - case ETH_SS_STATS: - memcpy(buf, bnx2_stats_str_arr, - sizeof(bnx2_stats_str_arr)); - break; - case ETH_SS_TEST: - memcpy(buf, bnx2_tests_str_arr, - sizeof(bnx2_tests_str_arr)); - break; - } -} - -static void -bnx2_get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 *buf) -{ - struct bnx2 *bp = netdev_priv(dev); - int i; - u32 *hw_stats = (u32 *) bp->stats_blk; - u32 *temp_stats = (u32 *) bp->temp_stats_blk; - u8 *stats_len_arr = NULL; - - if (hw_stats == NULL) { - memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS); - return; - } - - if ((CHIP_ID(bp) == CHIP_ID_5706_A0) || - (CHIP_ID(bp) == CHIP_ID_5706_A1) || - (CHIP_ID(bp) == CHIP_ID_5706_A2) || - (CHIP_ID(bp) == CHIP_ID_5708_A0)) - stats_len_arr = bnx2_5706_stats_len_arr; - else - stats_len_arr = bnx2_5708_stats_len_arr; - - for (i = 0; i < BNX2_NUM_STATS; i++) { - unsigned long offset; - - if (stats_len_arr[i] == 0) { - /* skip this counter */ - buf[i] = 0; - continue; - } - - offset = bnx2_stats_offset_arr[i]; - if (stats_len_arr[i] == 4) { - /* 4-byte counter */ - buf[i] = (u64) *(hw_stats + offset) + - *(temp_stats + offset); - continue; - } - /* 8-byte counter */ - buf[i] = (((u64) *(hw_stats + offset)) << 32) + - *(hw_stats + offset + 1) + - (((u64) *(temp_stats + offset)) << 32) + - *(temp_stats + offset + 1); - } -} - -static int -bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) -{ - struct bnx2 *bp = netdev_priv(dev); - - switch (state) { - case ETHTOOL_ID_ACTIVE: - bnx2_set_power_state(bp, PCI_D0); - - bp->leds_save = REG_RD(bp, BNX2_MISC_CFG); - REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC); - return 1; /* cycle on/off once per second */ - - case ETHTOOL_ID_ON: - REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE | - BNX2_EMAC_LED_1000MB_OVERRIDE | - BNX2_EMAC_LED_100MB_OVERRIDE | - BNX2_EMAC_LED_10MB_OVERRIDE | - BNX2_EMAC_LED_TRAFFIC_OVERRIDE | - BNX2_EMAC_LED_TRAFFIC); - break; - - case ETHTOOL_ID_OFF: - REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE); - break; - - case ETHTOOL_ID_INACTIVE: - REG_WR(bp, BNX2_EMAC_LED, 0); - REG_WR(bp, BNX2_MISC_CFG, bp->leds_save); - - if (!netif_running(dev)) - bnx2_set_power_state(bp, PCI_D3hot); - break; - } - - return 0; -} - -static u32 -bnx2_fix_features(struct net_device *dev, u32 features) -{ - struct bnx2 *bp = netdev_priv(dev); - - if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)) - features |= NETIF_F_HW_VLAN_RX; - - return features; -} - -static int -bnx2_set_features(struct net_device *dev, u32 features) -{ - struct bnx2 *bp = netdev_priv(dev); - - /* TSO with VLAN tag won't work with current firmware */ - if (features & NETIF_F_HW_VLAN_TX) - dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO); - else - dev->vlan_features &= ~NETIF_F_ALL_TSO; - - if ((!!(features & NETIF_F_HW_VLAN_RX) != - !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) && - netif_running(dev)) { - bnx2_netif_stop(bp, false); - dev->features = features; - bnx2_set_rx_mode(dev); - bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1); - bnx2_netif_start(bp, false); - return 1; - } - - return 0; -} - -static const struct ethtool_ops bnx2_ethtool_ops = { - .get_settings = bnx2_get_settings, - .set_settings = bnx2_set_settings, - .get_drvinfo = bnx2_get_drvinfo, - .get_regs_len = bnx2_get_regs_len, - .get_regs = bnx2_get_regs, - .get_wol = bnx2_get_wol, - .set_wol = bnx2_set_wol, - .nway_reset = bnx2_nway_reset, - .get_link = bnx2_get_link, - .get_eeprom_len = bnx2_get_eeprom_len, - .get_eeprom = bnx2_get_eeprom, - .set_eeprom = bnx2_set_eeprom, - .get_coalesce = bnx2_get_coalesce, - .set_coalesce = bnx2_set_coalesce, - .get_ringparam = bnx2_get_ringparam, - .set_ringparam = bnx2_set_ringparam, - .get_pauseparam = bnx2_get_pauseparam, - .set_pauseparam = bnx2_set_pauseparam, - .self_test = bnx2_self_test, - .get_strings = bnx2_get_strings, - .set_phys_id = bnx2_set_phys_id, - .get_ethtool_stats = bnx2_get_ethtool_stats, - .get_sset_count = bnx2_get_sset_count, -}; - -/* Called with rtnl_lock */ -static int -bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - struct mii_ioctl_data *data = if_mii(ifr); - struct bnx2 *bp = netdev_priv(dev); - int err; - - switch(cmd) { - case SIOCGMIIPHY: - data->phy_id = bp->phy_addr; - - /* fallthru */ - case SIOCGMIIREG: { - u32 mii_regval; - - if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) - return -EOPNOTSUPP; - - if (!netif_running(dev)) - return -EAGAIN; - - spin_lock_bh(&bp->phy_lock); - err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval); - spin_unlock_bh(&bp->phy_lock); - - data->val_out = mii_regval; - - return err; - } - - case SIOCSMIIREG: - if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) - return -EOPNOTSUPP; - - if (!netif_running(dev)) - return -EAGAIN; - - spin_lock_bh(&bp->phy_lock); - err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in); - spin_unlock_bh(&bp->phy_lock); - - return err; - - default: - /* do nothing */ - break; - } - return -EOPNOTSUPP; -} - -/* Called with rtnl_lock */ -static int -bnx2_change_mac_addr(struct net_device *dev, void *p) -{ - struct sockaddr *addr = p; - struct bnx2 *bp = netdev_priv(dev); - - if (!is_valid_ether_addr(addr->sa_data)) - return -EINVAL; - - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - if (netif_running(dev)) - bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); - - return 0; -} - -/* Called with rtnl_lock */ -static int -bnx2_change_mtu(struct net_device *dev, int new_mtu) -{ - struct bnx2 *bp = netdev_priv(dev); - - if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) || - ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE)) - return -EINVAL; - - dev->mtu = new_mtu; - return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size); -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void -poll_bnx2(struct net_device *dev) -{ - struct bnx2 *bp = netdev_priv(dev); - int i; - - for (i = 0; i < bp->irq_nvecs; i++) { - struct bnx2_irq *irq = &bp->irq_tbl[i]; - - disable_irq(irq->vector); - irq->handler(irq->vector, &bp->bnx2_napi[i]); - enable_irq(irq->vector); - } -} -#endif - -static void __devinit -bnx2_get_5709_media(struct bnx2 *bp) -{ - u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL); - u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID; - u32 strap; - - if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) - return; - else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) { - bp->phy_flags |= BNX2_PHY_FLAG_SERDES; - return; - } - - if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) - strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21; - else - strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8; - - if (PCI_FUNC(bp->pdev->devfn) == 0) { - switch (strap) { - case 0x4: - case 0x5: - case 0x6: - bp->phy_flags |= BNX2_PHY_FLAG_SERDES; - return; - } - } else { - switch (strap) { - case 0x1: - case 0x2: - case 0x4: - bp->phy_flags |= BNX2_PHY_FLAG_SERDES; - return; - } - } -} - -static void __devinit -bnx2_get_pci_speed(struct bnx2 *bp) -{ - u32 reg; - - reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS); - if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) { - u32 clkreg; - - bp->flags |= BNX2_FLAG_PCIX; - - clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS); - - clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; - switch (clkreg) { - case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ: - bp->bus_speed_mhz = 133; - break; - - case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ: - bp->bus_speed_mhz = 100; - break; - - case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ: - case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ: - bp->bus_speed_mhz = 66; - break; - - case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ: - case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ: - bp->bus_speed_mhz = 50; - break; - - case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW: - case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ: - case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ: - bp->bus_speed_mhz = 33; - break; - } - } - else { - if (reg & BNX2_PCICFG_MISC_STATUS_M66EN) - bp->bus_speed_mhz = 66; - else - bp->bus_speed_mhz = 33; - } - - if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET) - bp->flags |= BNX2_FLAG_PCI_32BIT; - -} - -static void __devinit -bnx2_read_vpd_fw_ver(struct bnx2 *bp) -{ - int rc, i, j; - u8 *data; - unsigned int block_end, rosize, len; - -#define BNX2_VPD_NVRAM_OFFSET 0x300 -#define BNX2_VPD_LEN 128 -#define BNX2_MAX_VER_SLEN 30 - - data = kmalloc(256, GFP_KERNEL); - if (!data) - return; - - rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN, - BNX2_VPD_LEN); - if (rc) - goto vpd_done; - - for (i = 0; i < BNX2_VPD_LEN; i += 4) { - data[i] = data[i + BNX2_VPD_LEN + 3]; - data[i + 1] = data[i + BNX2_VPD_LEN + 2]; - data[i + 2] = data[i + BNX2_VPD_LEN + 1]; - data[i + 3] = data[i + BNX2_VPD_LEN]; - } - - i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA); - if (i < 0) - goto vpd_done; - - rosize = pci_vpd_lrdt_size(&data[i]); - i += PCI_VPD_LRDT_TAG_SIZE; - block_end = i + rosize; - - if (block_end > BNX2_VPD_LEN) - goto vpd_done; - - j = pci_vpd_find_info_keyword(data, i, rosize, - PCI_VPD_RO_KEYWORD_MFR_ID); - if (j < 0) - goto vpd_done; - - len = pci_vpd_info_field_size(&data[j]); - - j += PCI_VPD_INFO_FLD_HDR_SIZE; - if (j + len > block_end || len != 4 || - memcmp(&data[j], "1028", 4)) - goto vpd_done; - - j = pci_vpd_find_info_keyword(data, i, rosize, - PCI_VPD_RO_KEYWORD_VENDOR0); - if (j < 0) - goto vpd_done; - - len = pci_vpd_info_field_size(&data[j]); - - j += PCI_VPD_INFO_FLD_HDR_SIZE; - if (j + len > block_end || len > BNX2_MAX_VER_SLEN) - goto vpd_done; - - memcpy(bp->fw_version, &data[j], len); - bp->fw_version[len] = ' '; - -vpd_done: - kfree(data); -} - -static int __devinit -bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) -{ - struct bnx2 *bp; - unsigned long mem_len; - int rc, i, j; - u32 reg; - u64 dma_mask, persist_dma_mask; - int err; - - SET_NETDEV_DEV(dev, &pdev->dev); - bp = netdev_priv(dev); - - bp->flags = 0; - bp->phy_flags = 0; - - bp->temp_stats_blk = - kzalloc(sizeof(struct statistics_block), GFP_KERNEL); - - if (bp->temp_stats_blk == NULL) { - rc = -ENOMEM; - goto err_out; - } - - /* enable device (incl. PCI PM wakeup), and bus-mastering */ - rc = pci_enable_device(pdev); - if (rc) { - dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); - goto err_out; - } - - if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { - dev_err(&pdev->dev, - "Cannot find PCI device base address, aborting\n"); - rc = -ENODEV; - goto err_out_disable; - } - - rc = pci_request_regions(pdev, DRV_MODULE_NAME); - if (rc) { - dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); - goto err_out_disable; - } - - pci_set_master(pdev); - - bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); - if (bp->pm_cap == 0) { - dev_err(&pdev->dev, - "Cannot find power management capability, aborting\n"); - rc = -EIO; - goto err_out_release; - } - - bp->dev = dev; - bp->pdev = pdev; - - spin_lock_init(&bp->phy_lock); - spin_lock_init(&bp->indirect_lock); -#ifdef BCM_CNIC - mutex_init(&bp->cnic_lock); -#endif - INIT_WORK(&bp->reset_task, bnx2_reset_task); - - dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); - mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1); - dev->mem_end = dev->mem_start + mem_len; - dev->irq = pdev->irq; - - bp->regview = ioremap_nocache(dev->base_addr, mem_len); - - if (!bp->regview) { - dev_err(&pdev->dev, "Cannot map register space, aborting\n"); - rc = -ENOMEM; - goto err_out_release; - } - - bnx2_set_power_state(bp, PCI_D0); - - /* Configure byte swap and enable write to the reg_window registers. - * Rely on CPU to do target byte swapping on big endian systems - * The chip's target access swapping will not swap all accesses - */ - REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, - BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | - BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP); - - bp->chip_id = REG_RD(bp, BNX2_MISC_ID); - - if (CHIP_NUM(bp) == CHIP_NUM_5709) { - if (!pci_is_pcie(pdev)) { - dev_err(&pdev->dev, "Not PCIE, aborting\n"); - rc = -EIO; - goto err_out_unmap; - } - bp->flags |= BNX2_FLAG_PCIE; - if (CHIP_REV(bp) == CHIP_REV_Ax) - bp->flags |= BNX2_FLAG_JUMBO_BROKEN; - - /* AER (Advanced Error Reporting) hooks */ - err = pci_enable_pcie_error_reporting(pdev); - if (!err) - bp->flags |= BNX2_FLAG_AER_ENABLED; - - } else { - bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX); - if (bp->pcix_cap == 0) { - dev_err(&pdev->dev, - "Cannot find PCIX capability, aborting\n"); - rc = -EIO; - goto err_out_unmap; - } - bp->flags |= BNX2_FLAG_BROKEN_STATS; - } - - if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) { - if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) - bp->flags |= BNX2_FLAG_MSIX_CAP; - } - - if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) { - if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) - bp->flags |= BNX2_FLAG_MSI_CAP; - } - - /* 5708 cannot support DMA addresses > 40-bit. */ - if (CHIP_NUM(bp) == CHIP_NUM_5708) - persist_dma_mask = dma_mask = DMA_BIT_MASK(40); - else - persist_dma_mask = dma_mask = DMA_BIT_MASK(64); - - /* Configure DMA attributes. */ - if (pci_set_dma_mask(pdev, dma_mask) == 0) { - dev->features |= NETIF_F_HIGHDMA; - rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask); - if (rc) { - dev_err(&pdev->dev, - "pci_set_consistent_dma_mask failed, aborting\n"); - goto err_out_unmap; - } - } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) { - dev_err(&pdev->dev, "System does not support DMA, aborting\n"); - goto err_out_unmap; - } - - if (!(bp->flags & BNX2_FLAG_PCIE)) - bnx2_get_pci_speed(bp); - - /* 5706A0 may falsely detect SERR and PERR. */ - if (CHIP_ID(bp) == CHIP_ID_5706_A0) { - reg = REG_RD(bp, PCI_COMMAND); - reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY); - REG_WR(bp, PCI_COMMAND, reg); - } - else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) && - !(bp->flags & BNX2_FLAG_PCIX)) { - - dev_err(&pdev->dev, - "5706 A1 can only be used in a PCIX bus, aborting\n"); - goto err_out_unmap; - } - - bnx2_init_nvram(bp); - - reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE); - - if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) == - BNX2_SHM_HDR_SIGNATURE_SIG) { - u32 off = PCI_FUNC(pdev->devfn) << 2; - - bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off); - } else - bp->shmem_base = HOST_VIEW_SHMEM_BASE; - - /* Get the permanent MAC address. First we need to make sure the - * firmware is actually running. - */ - reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE); - - if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) != - BNX2_DEV_INFO_SIGNATURE_MAGIC) { - dev_err(&pdev->dev, "Firmware not running, aborting\n"); - rc = -ENODEV; - goto err_out_unmap; - } - - bnx2_read_vpd_fw_ver(bp); - - j = strlen(bp->fw_version); - reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV); - for (i = 0; i < 3 && j < 24; i++) { - u8 num, k, skip0; - - if (i == 0) { - bp->fw_version[j++] = 'b'; - bp->fw_version[j++] = 'c'; - bp->fw_version[j++] = ' '; - } - num = (u8) (reg >> (24 - (i * 8))); - for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) { - if (num >= k || !skip0 || k == 1) { - bp->fw_version[j++] = (num / k) + '0'; - skip0 = 0; - } - } - if (i != 2) - bp->fw_version[j++] = '.'; - } - reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE); - if (reg & BNX2_PORT_FEATURE_WOL_ENABLED) - bp->wol = 1; - - if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) { - bp->flags |= BNX2_FLAG_ASF_ENABLE; - - for (i = 0; i < 30; i++) { - reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION); - if (reg & BNX2_CONDITION_MFW_RUN_MASK) - break; - msleep(10); - } - } - reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION); - reg &= BNX2_CONDITION_MFW_RUN_MASK; - if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN && - reg != BNX2_CONDITION_MFW_RUN_NONE) { - u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR); - - if (j < 32) - bp->fw_version[j++] = ' '; - for (i = 0; i < 3 && j < 28; i++) { - reg = bnx2_reg_rd_ind(bp, addr + i * 4); - reg = be32_to_cpu(reg); - memcpy(&bp->fw_version[j], ®, 4); - j += 4; - } - } - - reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER); - bp->mac_addr[0] = (u8) (reg >> 8); - bp->mac_addr[1] = (u8) reg; - - reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER); - bp->mac_addr[2] = (u8) (reg >> 24); - bp->mac_addr[3] = (u8) (reg >> 16); - bp->mac_addr[4] = (u8) (reg >> 8); - bp->mac_addr[5] = (u8) reg; - - bp->tx_ring_size = MAX_TX_DESC_CNT; - bnx2_set_rx_ring_size(bp, 255); - - bp->tx_quick_cons_trip_int = 2; - bp->tx_quick_cons_trip = 20; - bp->tx_ticks_int = 18; - bp->tx_ticks = 80; - - bp->rx_quick_cons_trip_int = 2; - bp->rx_quick_cons_trip = 12; - bp->rx_ticks_int = 18; - bp->rx_ticks = 18; - - bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS; - - bp->current_interval = BNX2_TIMER_INTERVAL; - - bp->phy_addr = 1; - - /* Disable WOL support if we are running on a SERDES chip. */ - if (CHIP_NUM(bp) == CHIP_NUM_5709) - bnx2_get_5709_media(bp); - else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) - bp->phy_flags |= BNX2_PHY_FLAG_SERDES; - - bp->phy_port = PORT_TP; - if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { - bp->phy_port = PORT_FIBRE; - reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG); - if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) { - bp->flags |= BNX2_FLAG_NO_WOL; - bp->wol = 0; - } - if (CHIP_NUM(bp) == CHIP_NUM_5706) { - /* Don't do parallel detect on this board because of - * some board problems. The link will not go down - * if we do parallel detect. - */ - if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP && - pdev->subsystem_device == 0x310c) - bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL; - } else { - bp->phy_addr = 2; - if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G) - bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE; - } - } else if (CHIP_NUM(bp) == CHIP_NUM_5706 || - CHIP_NUM(bp) == CHIP_NUM_5708) - bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX; - else if (CHIP_NUM(bp) == CHIP_NUM_5709 && - (CHIP_REV(bp) == CHIP_REV_Ax || - CHIP_REV(bp) == CHIP_REV_Bx)) - bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC; - - bnx2_init_fw_cap(bp); - - if ((CHIP_ID(bp) == CHIP_ID_5708_A0) || - (CHIP_ID(bp) == CHIP_ID_5708_B0) || - (CHIP_ID(bp) == CHIP_ID_5708_B1) || - !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) { - bp->flags |= BNX2_FLAG_NO_WOL; - bp->wol = 0; - } - - if (CHIP_ID(bp) == CHIP_ID_5706_A0) { - bp->tx_quick_cons_trip_int = - bp->tx_quick_cons_trip; - bp->tx_ticks_int = bp->tx_ticks; - bp->rx_quick_cons_trip_int = - bp->rx_quick_cons_trip; - bp->rx_ticks_int = bp->rx_ticks; - bp->comp_prod_trip_int = bp->comp_prod_trip; - bp->com_ticks_int = bp->com_ticks; - bp->cmd_ticks_int = bp->cmd_ticks; - } - - /* Disable MSI on 5706 if AMD 8132 bridge is found. - * - * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes - * with byte enables disabled on the unused 32-bit word. This is legal - * but causes problems on the AMD 8132 which will eventually stop - * responding after a while. - * - * AMD believes this incompatibility is unique to the 5706, and - * prefers to locally disable MSI rather than globally disabling it. - */ - if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) { - struct pci_dev *amd_8132 = NULL; - - while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD, - PCI_DEVICE_ID_AMD_8132_BRIDGE, - amd_8132))) { - - if (amd_8132->revision >= 0x10 && - amd_8132->revision <= 0x13) { - disable_msi = 1; - pci_dev_put(amd_8132); - break; - } - } - } - - bnx2_set_default_link(bp); - bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; - - init_timer(&bp->timer); - bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL); - bp->timer.data = (unsigned long) bp; - bp->timer.function = bnx2_timer; - -#ifdef BCM_CNIC - if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN) - bp->cnic_eth_dev.max_iscsi_conn = - (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) & - BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT; -#endif - pci_save_state(pdev); - - return 0; - -err_out_unmap: - if (bp->flags & BNX2_FLAG_AER_ENABLED) { - pci_disable_pcie_error_reporting(pdev); - bp->flags &= ~BNX2_FLAG_AER_ENABLED; - } - - if (bp->regview) { - iounmap(bp->regview); - bp->regview = NULL; - } - -err_out_release: - pci_release_regions(pdev); - -err_out_disable: - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - -err_out: - return rc; -} - -static char * __devinit -bnx2_bus_string(struct bnx2 *bp, char *str) -{ - char *s = str; - - if (bp->flags & BNX2_FLAG_PCIE) { - s += sprintf(s, "PCI Express"); - } else { - s += sprintf(s, "PCI"); - if (bp->flags & BNX2_FLAG_PCIX) - s += sprintf(s, "-X"); - if (bp->flags & BNX2_FLAG_PCI_32BIT) - s += sprintf(s, " 32-bit"); - else - s += sprintf(s, " 64-bit"); - s += sprintf(s, " %dMHz", bp->bus_speed_mhz); - } - return str; -} - -static void -bnx2_del_napi(struct bnx2 *bp) -{ - int i; - - for (i = 0; i < bp->irq_nvecs; i++) - netif_napi_del(&bp->bnx2_napi[i].napi); -} - -static void -bnx2_init_napi(struct bnx2 *bp) -{ - int i; - - for (i = 0; i < bp->irq_nvecs; i++) { - struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; - int (*poll)(struct napi_struct *, int); - - if (i == 0) - poll = bnx2_poll; - else - poll = bnx2_poll_msix; - - netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64); - bnapi->bp = bp; - } -} - -static const struct net_device_ops bnx2_netdev_ops = { - .ndo_open = bnx2_open, - .ndo_start_xmit = bnx2_start_xmit, - .ndo_stop = bnx2_close, - .ndo_get_stats64 = bnx2_get_stats64, - .ndo_set_rx_mode = bnx2_set_rx_mode, - .ndo_do_ioctl = bnx2_ioctl, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = bnx2_change_mac_addr, - .ndo_change_mtu = bnx2_change_mtu, - .ndo_fix_features = bnx2_fix_features, - .ndo_set_features = bnx2_set_features, - .ndo_tx_timeout = bnx2_tx_timeout, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = poll_bnx2, -#endif -}; - -static int __devinit -bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - static int version_printed = 0; - struct net_device *dev = NULL; - struct bnx2 *bp; - int rc; - char str[40]; - - if (version_printed++ == 0) - pr_info("%s", version); - - /* dev zeroed in init_etherdev */ - dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS); - - if (!dev) - return -ENOMEM; - - rc = bnx2_init_board(pdev, dev); - if (rc < 0) { - free_netdev(dev); - return rc; - } - - dev->netdev_ops = &bnx2_netdev_ops; - dev->watchdog_timeo = TX_TIMEOUT; - dev->ethtool_ops = &bnx2_ethtool_ops; - - bp = netdev_priv(dev); - - pci_set_drvdata(pdev, dev); - - rc = bnx2_request_firmware(bp); - if (rc) - goto error; - - memcpy(dev->dev_addr, bp->mac_addr, 6); - memcpy(dev->perm_addr, bp->mac_addr, 6); - - dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | - NETIF_F_TSO | NETIF_F_TSO_ECN | - NETIF_F_RXHASH | NETIF_F_RXCSUM; - - if (CHIP_NUM(bp) == CHIP_NUM_5709) - dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; - - dev->vlan_features = dev->hw_features; - dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; - dev->features |= dev->hw_features; - - if ((rc = register_netdev(dev))) { - dev_err(&pdev->dev, "Cannot register net device\n"); - goto error; - } - - netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n", - board_info[ent->driver_data].name, - ((CHIP_ID(bp) & 0xf000) >> 12) + 'A', - ((CHIP_ID(bp) & 0x0ff0) >> 4), - bnx2_bus_string(bp, str), - dev->base_addr, - bp->pdev->irq, dev->dev_addr); - - return 0; - -error: - if (bp->mips_firmware) - release_firmware(bp->mips_firmware); - if (bp->rv2p_firmware) - release_firmware(bp->rv2p_firmware); - - if (bp->regview) - iounmap(bp->regview); - pci_release_regions(pdev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - free_netdev(dev); - return rc; -} - -static void __devexit -bnx2_remove_one(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct bnx2 *bp = netdev_priv(dev); - - unregister_netdev(dev); - - del_timer_sync(&bp->timer); - cancel_work_sync(&bp->reset_task); - - if (bp->mips_firmware) - release_firmware(bp->mips_firmware); - if (bp->rv2p_firmware) - release_firmware(bp->rv2p_firmware); - - if (bp->regview) - iounmap(bp->regview); - - kfree(bp->temp_stats_blk); - - if (bp->flags & BNX2_FLAG_AER_ENABLED) { - pci_disable_pcie_error_reporting(pdev); - bp->flags &= ~BNX2_FLAG_AER_ENABLED; - } - - free_netdev(dev); - - pci_release_regions(pdev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); -} - -static int -bnx2_suspend(struct pci_dev *pdev, pm_message_t state) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct bnx2 *bp = netdev_priv(dev); - - /* PCI register 4 needs to be saved whether netif_running() or not. - * MSI address and data need to be saved if using MSI and - * netif_running(). - */ - pci_save_state(pdev); - if (!netif_running(dev)) - return 0; - - cancel_work_sync(&bp->reset_task); - bnx2_netif_stop(bp, true); - netif_device_detach(dev); - del_timer_sync(&bp->timer); - bnx2_shutdown_chip(bp); - bnx2_free_skbs(bp); - bnx2_set_power_state(bp, pci_choose_state(pdev, state)); - return 0; -} - -static int -bnx2_resume(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct bnx2 *bp = netdev_priv(dev); - - pci_restore_state(pdev); - if (!netif_running(dev)) - return 0; - - bnx2_set_power_state(bp, PCI_D0); - netif_device_attach(dev); - bnx2_init_nic(bp, 1); - bnx2_netif_start(bp, true); - return 0; -} - -/** - * bnx2_io_error_detected - called when PCI error is detected - * @pdev: Pointer to PCI device - * @state: The current pci connection state - * - * This function is called after a PCI bus error affecting - * this device has been detected. - */ -static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct bnx2 *bp = netdev_priv(dev); - - rtnl_lock(); - netif_device_detach(dev); - - if (state == pci_channel_io_perm_failure) { - rtnl_unlock(); - return PCI_ERS_RESULT_DISCONNECT; - } - - if (netif_running(dev)) { - bnx2_netif_stop(bp, true); - del_timer_sync(&bp->timer); - bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET); - } - - pci_disable_device(pdev); - rtnl_unlock(); - - /* Request a slot slot reset. */ - return PCI_ERS_RESULT_NEED_RESET; -} - -/** - * bnx2_io_slot_reset - called after the pci bus has been reset. - * @pdev: Pointer to PCI device - * - * Restart the card from scratch, as if from a cold-boot. - */ -static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct bnx2 *bp = netdev_priv(dev); - pci_ers_result_t result; - int err; - - rtnl_lock(); - if (pci_enable_device(pdev)) { - dev_err(&pdev->dev, - "Cannot re-enable PCI device after reset\n"); - result = PCI_ERS_RESULT_DISCONNECT; - } else { - pci_set_master(pdev); - pci_restore_state(pdev); - pci_save_state(pdev); - - if (netif_running(dev)) { - bnx2_set_power_state(bp, PCI_D0); - bnx2_init_nic(bp, 1); - } - result = PCI_ERS_RESULT_RECOVERED; - } - rtnl_unlock(); - - if (!(bp->flags & BNX2_FLAG_AER_ENABLED)) - return result; - - err = pci_cleanup_aer_uncorrect_error_status(pdev); - if (err) { - dev_err(&pdev->dev, - "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", - err); /* non-fatal, continue */ - } - - return result; -} - -/** - * bnx2_io_resume - called when traffic can start flowing again. - * @pdev: Pointer to PCI device - * - * This callback is called when the error recovery driver tells us that - * its OK to resume normal operation. - */ -static void bnx2_io_resume(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct bnx2 *bp = netdev_priv(dev); - - rtnl_lock(); - if (netif_running(dev)) - bnx2_netif_start(bp, true); - - netif_device_attach(dev); - rtnl_unlock(); -} - -static struct pci_error_handlers bnx2_err_handler = { - .error_detected = bnx2_io_error_detected, - .slot_reset = bnx2_io_slot_reset, - .resume = bnx2_io_resume, -}; - -static struct pci_driver bnx2_pci_driver = { - .name = DRV_MODULE_NAME, - .id_table = bnx2_pci_tbl, - .probe = bnx2_init_one, - .remove = __devexit_p(bnx2_remove_one), - .suspend = bnx2_suspend, - .resume = bnx2_resume, - .err_handler = &bnx2_err_handler, -}; - -static int __init bnx2_init(void) -{ - return pci_register_driver(&bnx2_pci_driver); -} - -static void __exit bnx2_cleanup(void) -{ - pci_unregister_driver(&bnx2_pci_driver); -} - -module_init(bnx2_init); -module_exit(bnx2_cleanup); - - - diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h deleted file mode 100644 index fc50d42..0000000 --- a/drivers/net/bnx2.h +++ /dev/null @@ -1,7388 +0,0 @@ -/* bnx2.h: Broadcom NX2 network driver. - * - * Copyright (c) 2004-2011 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - * Written by: Michael Chan (mchan@broadcom.com) - */ - - -#ifndef BNX2_H -#define BNX2_H - -/* Hardware data structures and register definitions automatically - * generated from RTL code. Do not modify. - */ - -/* - * tx_bd definition - */ -struct tx_bd { - u32 tx_bd_haddr_hi; - u32 tx_bd_haddr_lo; - u32 tx_bd_mss_nbytes; - #define TX_BD_TCP6_OFF2_SHL (14) - u32 tx_bd_vlan_tag_flags; - #define TX_BD_FLAGS_CONN_FAULT (1<<0) - #define TX_BD_FLAGS_TCP6_OFF0_MSK (3<<1) - #define TX_BD_FLAGS_TCP6_OFF0_SHL (1) - #define TX_BD_FLAGS_TCP_UDP_CKSUM (1<<1) - #define TX_BD_FLAGS_IP_CKSUM (1<<2) - #define TX_BD_FLAGS_VLAN_TAG (1<<3) - #define TX_BD_FLAGS_COAL_NOW (1<<4) - #define TX_BD_FLAGS_DONT_GEN_CRC (1<<5) - #define TX_BD_FLAGS_END (1<<6) - #define TX_BD_FLAGS_START (1<<7) - #define TX_BD_FLAGS_SW_OPTION_WORD (0x1f<<8) - #define TX_BD_FLAGS_TCP6_OFF4_SHL (12) - #define TX_BD_FLAGS_SW_FLAGS (1<<13) - #define TX_BD_FLAGS_SW_SNAP (1<<14) - #define TX_BD_FLAGS_SW_LSO (1<<15) - -}; - - -/* - * rx_bd definition - */ -struct rx_bd { - u32 rx_bd_haddr_hi; - u32 rx_bd_haddr_lo; - u32 rx_bd_len; - u32 rx_bd_flags; - #define RX_BD_FLAGS_NOPUSH (1<<0) - #define RX_BD_FLAGS_DUMMY (1<<1) - #define RX_BD_FLAGS_END (1<<2) - #define RX_BD_FLAGS_START (1<<3) - -}; - -#define BNX2_RX_ALIGN 16 - -/* - * status_block definition - */ -struct status_block { - u32 status_attn_bits; - #define STATUS_ATTN_BITS_LINK_STATE (1L<<0) - #define STATUS_ATTN_BITS_TX_SCHEDULER_ABORT (1L<<1) - #define STATUS_ATTN_BITS_TX_BD_READ_ABORT (1L<<2) - #define STATUS_ATTN_BITS_TX_BD_CACHE_ABORT (1L<<3) - #define STATUS_ATTN_BITS_TX_PROCESSOR_ABORT (1L<<4) - #define STATUS_ATTN_BITS_TX_DMA_ABORT (1L<<5) - #define STATUS_ATTN_BITS_TX_PATCHUP_ABORT (1L<<6) - #define STATUS_ATTN_BITS_TX_ASSEMBLER_ABORT (1L<<7) - #define STATUS_ATTN_BITS_RX_PARSER_MAC_ABORT (1L<<8) - #define STATUS_ATTN_BITS_RX_PARSER_CATCHUP_ABORT (1L<<9) - #define STATUS_ATTN_BITS_RX_MBUF_ABORT (1L<<10) - #define STATUS_ATTN_BITS_RX_LOOKUP_ABORT (1L<<11) - #define STATUS_ATTN_BITS_RX_PROCESSOR_ABORT (1L<<12) - #define STATUS_ATTN_BITS_RX_V2P_ABORT (1L<<13) - #define STATUS_ATTN_BITS_RX_BD_CACHE_ABORT (1L<<14) - #define STATUS_ATTN_BITS_RX_DMA_ABORT (1L<<15) - #define STATUS_ATTN_BITS_COMPLETION_ABORT (1L<<16) - #define STATUS_ATTN_BITS_HOST_COALESCE_ABORT (1L<<17) - #define STATUS_ATTN_BITS_MAILBOX_QUEUE_ABORT (1L<<18) - #define STATUS_ATTN_BITS_CONTEXT_ABORT (1L<<19) - #define STATUS_ATTN_BITS_CMD_SCHEDULER_ABORT (1L<<20) - #define STATUS_ATTN_BITS_CMD_PROCESSOR_ABORT (1L<<21) - #define STATUS_ATTN_BITS_MGMT_PROCESSOR_ABORT (1L<<22) - #define STATUS_ATTN_BITS_MAC_ABORT (1L<<23) - #define STATUS_ATTN_BITS_TIMER_ABORT (1L<<24) - #define STATUS_ATTN_BITS_DMAE_ABORT (1L<<25) - #define STATUS_ATTN_BITS_FLSH_ABORT (1L<<26) - #define STATUS_ATTN_BITS_GRC_ABORT (1L<<27) - #define STATUS_ATTN_BITS_EPB_ERROR (1L<<30) - #define STATUS_ATTN_BITS_PARITY_ERROR (1L<<31) - - u32 status_attn_bits_ack; -#if defined(__BIG_ENDIAN) - u16 status_tx_quick_consumer_index0; - u16 status_tx_quick_consumer_index1; - u16 status_tx_quick_consumer_index2; - u16 status_tx_quick_consumer_index3; - u16 status_rx_quick_consumer_index0; - u16 status_rx_quick_consumer_index1; - u16 status_rx_quick_consumer_index2; - u16 status_rx_quick_consumer_index3; - u16 status_rx_quick_consumer_index4; - u16 status_rx_quick_consumer_index5; - u16 status_rx_quick_consumer_index6; - u16 status_rx_quick_consumer_index7; - u16 status_rx_quick_consumer_index8; - u16 status_rx_quick_consumer_index9; - u16 status_rx_quick_consumer_index10; - u16 status_rx_quick_consumer_index11; - u16 status_rx_quick_consumer_index12; - u16 status_rx_quick_consumer_index13; - u16 status_rx_quick_consumer_index14; - u16 status_rx_quick_consumer_index15; - u16 status_completion_producer_index; - u16 status_cmd_consumer_index; - u16 status_idx; - u8 status_unused; - u8 status_blk_num; -#elif defined(__LITTLE_ENDIAN) - u16 status_tx_quick_consumer_index1; - u16 status_tx_quick_consumer_index0; - u16 status_tx_quick_consumer_index3; - u16 status_tx_quick_consumer_index2; - u16 status_rx_quick_consumer_index1; - u16 status_rx_quick_consumer_index0; - u16 status_rx_quick_consumer_index3; - u16 status_rx_quick_consumer_index2; - u16 status_rx_quick_consumer_index5; - u16 status_rx_quick_consumer_index4; - u16 status_rx_quick_consumer_index7; - u16 status_rx_quick_consumer_index6; - u16 status_rx_quick_consumer_index9; - u16 status_rx_quick_consumer_index8; - u16 status_rx_quick_consumer_index11; - u16 status_rx_quick_consumer_index10; - u16 status_rx_quick_consumer_index13; - u16 status_rx_quick_consumer_index12; - u16 status_rx_quick_consumer_index15; - u16 status_rx_quick_consumer_index14; - u16 status_cmd_consumer_index; - u16 status_completion_producer_index; - u8 status_blk_num; - u8 status_unused; - u16 status_idx; -#endif -}; - -/* - * status_block definition - */ -struct status_block_msix { -#if defined(__BIG_ENDIAN) - u16 status_tx_quick_consumer_index; - u16 status_rx_quick_consumer_index; - u16 status_completion_producer_index; - u16 status_cmd_consumer_index; - u32 status_unused; - u16 status_idx; - u8 status_unused2; - u8 status_blk_num; -#elif defined(__LITTLE_ENDIAN) - u16 status_rx_quick_consumer_index; - u16 status_tx_quick_consumer_index; - u16 status_cmd_consumer_index; - u16 status_completion_producer_index; - u32 status_unused; - u8 status_blk_num; - u8 status_unused2; - u16 status_idx; -#endif -}; - -#define BNX2_SBLK_MSIX_ALIGN_SIZE 128 - - -/* - * statistics_block definition - */ -struct statistics_block { - u32 stat_IfHCInOctets_hi; - u32 stat_IfHCInOctets_lo; - u32 stat_IfHCInBadOctets_hi; - u32 stat_IfHCInBadOctets_lo; - u32 stat_IfHCOutOctets_hi; - u32 stat_IfHCOutOctets_lo; - u32 stat_IfHCOutBadOctets_hi; - u32 stat_IfHCOutBadOctets_lo; - u32 stat_IfHCInUcastPkts_hi; - u32 stat_IfHCInUcastPkts_lo; - u32 stat_IfHCInMulticastPkts_hi; - u32 stat_IfHCInMulticastPkts_lo; - u32 stat_IfHCInBroadcastPkts_hi; - u32 stat_IfHCInBroadcastPkts_lo; - u32 stat_IfHCOutUcastPkts_hi; - u32 stat_IfHCOutUcastPkts_lo; - u32 stat_IfHCOutMulticastPkts_hi; - u32 stat_IfHCOutMulticastPkts_lo; - u32 stat_IfHCOutBroadcastPkts_hi; - u32 stat_IfHCOutBroadcastPkts_lo; - u32 stat_emac_tx_stat_dot3statsinternalmactransmiterrors; - u32 stat_Dot3StatsCarrierSenseErrors; - u32 stat_Dot3StatsFCSErrors; - u32 stat_Dot3StatsAlignmentErrors; - u32 stat_Dot3StatsSingleCollisionFrames; - u32 stat_Dot3StatsMultipleCollisionFrames; - u32 stat_Dot3StatsDeferredTransmissions; - u32 stat_Dot3StatsExcessiveCollisions; - u32 stat_Dot3StatsLateCollisions; - u32 stat_EtherStatsCollisions; - u32 stat_EtherStatsFragments; - u32 stat_EtherStatsJabbers; - u32 stat_EtherStatsUndersizePkts; - u32 stat_EtherStatsOverrsizePkts; - u32 stat_EtherStatsPktsRx64Octets; - u32 stat_EtherStatsPktsRx65Octetsto127Octets; - u32 stat_EtherStatsPktsRx128Octetsto255Octets; - u32 stat_EtherStatsPktsRx256Octetsto511Octets; - u32 stat_EtherStatsPktsRx512Octetsto1023Octets; - u32 stat_EtherStatsPktsRx1024Octetsto1522Octets; - u32 stat_EtherStatsPktsRx1523Octetsto9022Octets; - u32 stat_EtherStatsPktsTx64Octets; - u32 stat_EtherStatsPktsTx65Octetsto127Octets; - u32 stat_EtherStatsPktsTx128Octetsto255Octets; - u32 stat_EtherStatsPktsTx256Octetsto511Octets; - u32 stat_EtherStatsPktsTx512Octetsto1023Octets; - u32 stat_EtherStatsPktsTx1024Octetsto1522Octets; - u32 stat_EtherStatsPktsTx1523Octetsto9022Octets; - u32 stat_XonPauseFramesReceived; - u32 stat_XoffPauseFramesReceived; - u32 stat_OutXonSent; - u32 stat_OutXoffSent; - u32 stat_FlowControlDone; - u32 stat_MacControlFramesReceived; - u32 stat_XoffStateEntered; - u32 stat_IfInFramesL2FilterDiscards; - u32 stat_IfInRuleCheckerDiscards; - u32 stat_IfInFTQDiscards; - u32 stat_IfInMBUFDiscards; - u32 stat_IfInRuleCheckerP4Hit; - u32 stat_CatchupInRuleCheckerDiscards; - u32 stat_CatchupInFTQDiscards; - u32 stat_CatchupInMBUFDiscards; - u32 stat_CatchupInRuleCheckerP4Hit; - u32 stat_GenStat00; - u32 stat_GenStat01; - u32 stat_GenStat02; - u32 stat_GenStat03; - u32 stat_GenStat04; - u32 stat_GenStat05; - u32 stat_GenStat06; - u32 stat_GenStat07; - u32 stat_GenStat08; - u32 stat_GenStat09; - u32 stat_GenStat10; - u32 stat_GenStat11; - u32 stat_GenStat12; - u32 stat_GenStat13; - u32 stat_GenStat14; - u32 stat_GenStat15; - u32 stat_FwRxDrop; -}; - - -/* - * l2_fhdr definition - */ -struct l2_fhdr { - u32 l2_fhdr_status; - #define L2_FHDR_STATUS_RULE_CLASS (0x7<<0) - #define L2_FHDR_STATUS_RULE_P2 (1<<3) - #define L2_FHDR_STATUS_RULE_P3 (1<<4) - #define L2_FHDR_STATUS_RULE_P4 (1<<5) - #define L2_FHDR_STATUS_L2_VLAN_TAG (1<<6) - #define L2_FHDR_STATUS_L2_LLC_SNAP (1<<7) - #define L2_FHDR_STATUS_RSS_HASH (1<<8) - #define L2_FHDR_STATUS_IP_DATAGRAM (1<<13) - #define L2_FHDR_STATUS_TCP_SEGMENT (1<<14) - #define L2_FHDR_STATUS_UDP_DATAGRAM (1<<15) - - #define L2_FHDR_STATUS_SPLIT (1<<16) - #define L2_FHDR_ERRORS_BAD_CRC (1<<17) - #define L2_FHDR_ERRORS_PHY_DECODE (1<<18) - #define L2_FHDR_ERRORS_ALIGNMENT (1<<19) - #define L2_FHDR_ERRORS_TOO_SHORT (1<<20) - #define L2_FHDR_ERRORS_GIANT_FRAME (1<<21) - #define L2_FHDR_ERRORS_TCP_XSUM (1<<28) - #define L2_FHDR_ERRORS_UDP_XSUM (1<<31) - - #define L2_FHDR_STATUS_USE_RXHASH \ - (L2_FHDR_STATUS_TCP_SEGMENT | L2_FHDR_STATUS_RSS_HASH) - - u32 l2_fhdr_hash; -#if defined(__BIG_ENDIAN) - u16 l2_fhdr_pkt_len; - u16 l2_fhdr_vlan_tag; - u16 l2_fhdr_ip_xsum; - u16 l2_fhdr_tcp_udp_xsum; -#elif defined(__LITTLE_ENDIAN) - u16 l2_fhdr_vlan_tag; - u16 l2_fhdr_pkt_len; - u16 l2_fhdr_tcp_udp_xsum; - u16 l2_fhdr_ip_xsum; -#endif -}; - -#define BNX2_RX_OFFSET (sizeof(struct l2_fhdr) + 2) - -/* - * l2_context definition - */ -#define BNX2_L2CTX_TYPE 0x00000000 -#define BNX2_L2CTX_TYPE_SIZE_L2 ((0xc0/0x20)<<16) -#define BNX2_L2CTX_TYPE_TYPE (0xf<<28) -#define BNX2_L2CTX_TYPE_TYPE_EMPTY (0<<28) -#define BNX2_L2CTX_TYPE_TYPE_L2 (1<<28) - -#define BNX2_L2CTX_TX_HOST_BIDX 0x00000088 -#define BNX2_L2CTX_EST_NBD 0x00000088 -#define BNX2_L2CTX_CMD_TYPE 0x00000088 -#define BNX2_L2CTX_CMD_TYPE_TYPE (0xf<<24) -#define BNX2_L2CTX_CMD_TYPE_TYPE_L2 (0<<24) -#define BNX2_L2CTX_CMD_TYPE_TYPE_TCP (1<<24) - -#define BNX2_L2CTX_TX_HOST_BSEQ 0x00000090 -#define BNX2_L2CTX_TSCH_BSEQ 0x00000094 -#define BNX2_L2CTX_TBDR_BSEQ 0x00000098 -#define BNX2_L2CTX_TBDR_BOFF 0x0000009c -#define BNX2_L2CTX_TBDR_BIDX 0x0000009c -#define BNX2_L2CTX_TBDR_BHADDR_HI 0x000000a0 -#define BNX2_L2CTX_TBDR_BHADDR_LO 0x000000a4 -#define BNX2_L2CTX_TXP_BOFF 0x000000a8 -#define BNX2_L2CTX_TXP_BIDX 0x000000a8 -#define BNX2_L2CTX_TXP_BSEQ 0x000000ac - -#define BNX2_L2CTX_TYPE_XI 0x00000080 -#define BNX2_L2CTX_CMD_TYPE_XI 0x00000240 -#define BNX2_L2CTX_TBDR_BHADDR_HI_XI 0x00000258 -#define BNX2_L2CTX_TBDR_BHADDR_LO_XI 0x0000025c - -/* - * l2_bd_chain_context definition - */ -#define BNX2_L2CTX_BD_PRE_READ 0x00000000 -#define BNX2_L2CTX_CTX_SIZE 0x00000000 -#define BNX2_L2CTX_CTX_TYPE 0x00000000 -#define BNX2_L2CTX_FLOW_CTRL_ENABLE 0x000000ff -#define BNX2_L2CTX_CTX_TYPE_SIZE_L2 ((0x20/20)<<16) -#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE (0xf<<28) -#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_UNDEFINED (0<<28) -#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE (1<<28) - -#define BNX2_L2CTX_HOST_BDIDX 0x00000004 -#define BNX2_L2CTX_L5_STATUSB_NUM_SHIFT 16 -#define BNX2_L2CTX_L2_STATUSB_NUM_SHIFT 24 -#define BNX2_L2CTX_L5_STATUSB_NUM(sb_id) \ - (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_L5_STATUSB_NUM_SHIFT) : 0) -#define BNX2_L2CTX_L2_STATUSB_NUM(sb_id) \ - (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT) : 0) -#define BNX2_L2CTX_HOST_BSEQ 0x00000008 -#define BNX2_L2CTX_NX_BSEQ 0x0000000c -#define BNX2_L2CTX_NX_BDHADDR_HI 0x00000010 -#define BNX2_L2CTX_NX_BDHADDR_LO 0x00000014 -#define BNX2_L2CTX_NX_BDIDX 0x00000018 - -#define BNX2_L2CTX_HOST_PG_BDIDX 0x00000044 -#define BNX2_L2CTX_PG_BUF_SIZE 0x00000048 -#define BNX2_L2CTX_RBDC_KEY 0x0000004c -#define BNX2_L2CTX_RBDC_JUMBO_KEY 0x3ffe -#define BNX2_L2CTX_NX_PG_BDHADDR_HI 0x00000050 -#define BNX2_L2CTX_NX_PG_BDHADDR_LO 0x00000054 - -/* - * pci_config_l definition - * offset: 0000 - */ -#define BNX2_PCICFG_MSI_CONTROL 0x00000058 -#define BNX2_PCICFG_MSI_CONTROL_ENABLE (1L<<16) - -#define BNX2_PCICFG_MISC_CONFIG 0x00000068 -#define BNX2_PCICFG_MISC_CONFIG_TARGET_BYTE_SWAP (1L<<2) -#define BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP (1L<<3) -#define BNX2_PCICFG_MISC_CONFIG_RESERVED1 (1L<<4) -#define BNX2_PCICFG_MISC_CONFIG_CLOCK_CTL_ENA (1L<<5) -#define BNX2_PCICFG_MISC_CONFIG_TARGET_GRC_WORD_SWAP (1L<<6) -#define BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA (1L<<7) -#define BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ (1L<<8) -#define BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY (1L<<9) -#define BNX2_PCICFG_MISC_CONFIG_GRC_WIN1_SWAP_EN (1L<<10) -#define BNX2_PCICFG_MISC_CONFIG_GRC_WIN2_SWAP_EN (1L<<11) -#define BNX2_PCICFG_MISC_CONFIG_GRC_WIN3_SWAP_EN (1L<<12) -#define BNX2_PCICFG_MISC_CONFIG_ASIC_METAL_REV (0xffL<<16) -#define BNX2_PCICFG_MISC_CONFIG_ASIC_BASE_REV (0xfL<<24) -#define BNX2_PCICFG_MISC_CONFIG_ASIC_ID (0xfL<<28) - -#define BNX2_PCICFG_MISC_STATUS 0x0000006c -#define BNX2_PCICFG_MISC_STATUS_INTA_VALUE (1L<<0) -#define BNX2_PCICFG_MISC_STATUS_32BIT_DET (1L<<1) -#define BNX2_PCICFG_MISC_STATUS_M66EN (1L<<2) -#define BNX2_PCICFG_MISC_STATUS_PCIX_DET (1L<<3) -#define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED (0x3L<<4) -#define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED_66 (0L<<4) -#define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED_100 (1L<<4) -#define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED_133 (2L<<4) -#define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED_PCI_MODE (3L<<4) -#define BNX2_PCICFG_MISC_STATUS_BAD_MEM_WRITE_BE (1L<<8) - -#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS 0x00000070 -#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET (0xfL<<0) -#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ (0L<<0) -#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ (1L<<0) -#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ (2L<<0) -#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ (3L<<0) -#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ (4L<<0) -#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ (5L<<0) -#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ (6L<<0) -#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ (7L<<0) -#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW (0xfL<<0) -#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_DISABLE (1L<<6) -#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT (1L<<7) -#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC (0x7L<<8) -#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_UNDEF (0L<<8) -#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_12 (1L<<8) -#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_6 (2L<<8) -#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_62 (4L<<8) -#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_MIN_POWER (1L<<11) -#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED (0xfL<<12) -#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_100 (0L<<12) -#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_80 (1L<<12) -#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_50 (2L<<12) -#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_40 (4L<<12) -#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_25 (8L<<12) -#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_STOP (1L<<16) -#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_RESERVED_17 (1L<<17) -#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_RESERVED_18 (1L<<18) -#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_RESERVED_19 (1L<<19) -#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_RESERVED (0xfffL<<20) - -#define BNX2_PCICFG_REG_WINDOW_ADDRESS 0x00000078 -#define BNX2_PCICFG_REG_WINDOW_ADDRESS_VAL (0xfffffL<<2) - -#define BNX2_PCICFG_REG_WINDOW 0x00000080 -#define BNX2_PCICFG_INT_ACK_CMD 0x00000084 -#define BNX2_PCICFG_INT_ACK_CMD_INDEX (0xffffL<<0) -#define BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID (1L<<16) -#define BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM (1L<<17) -#define BNX2_PCICFG_INT_ACK_CMD_MASK_INT (1L<<18) -#define BNX2_PCICFG_INT_ACK_CMD_INTERRUPT_NUM (0xfL<<24) -#define BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT 24 - -#define BNX2_PCICFG_STATUS_BIT_SET_CMD 0x00000088 -#define BNX2_PCICFG_STATUS_BIT_CLEAR_CMD 0x0000008c -#define BNX2_PCICFG_MAILBOX_QUEUE_ADDR 0x00000090 -#define BNX2_PCICFG_MAILBOX_QUEUE_DATA 0x00000094 - -#define BNX2_PCICFG_DEVICE_CONTROL 0x000000b4 -#define BNX2_PCICFG_DEVICE_STATUS_NO_PEND ((1L<<5)<<16) - -/* - * pci_reg definition - * offset: 0x400 - */ -#define BNX2_PCI_GRC_WINDOW_ADDR 0x00000400 -#define BNX2_PCI_GRC_WINDOW_ADDR_VALUE (0x1ffL<<13) -#define BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN (1L<<31) - -#define BNX2_PCI_GRC_WINDOW2_BASE 0xc000 -#define BNX2_PCI_GRC_WINDOW3_BASE 0xe000 - -#define BNX2_PCI_CONFIG_1 0x00000404 -#define BNX2_PCI_CONFIG_1_RESERVED0 (0xffL<<0) -#define BNX2_PCI_CONFIG_1_READ_BOUNDARY (0x7L<<8) -#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_OFF (0L<<8) -#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_16 (1L<<8) -#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_32 (2L<<8) -#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_64 (3L<<8) -#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_128 (4L<<8) -#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_256 (5L<<8) -#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_512 (6L<<8) -#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_1024 (7L<<8) -#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY (0x7L<<11) -#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_OFF (0L<<11) -#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_16 (1L<<11) -#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_32 (2L<<11) -#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_64 (3L<<11) -#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_128 (4L<<11) -#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_256 (5L<<11) -#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_512 (6L<<11) -#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_1024 (7L<<11) -#define BNX2_PCI_CONFIG_1_RESERVED1 (0x3ffffL<<14) - -#define BNX2_PCI_CONFIG_2 0x00000408 -#define BNX2_PCI_CONFIG_2_BAR1_SIZE (0xfL<<0) -#define BNX2_PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0) -#define BNX2_PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0) -#define BNX2_PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0) -#define BNX2_PCI_CONFIG_2_BAR1_SIZE_256K (3L<<0) -#define BNX2_PCI_CONFIG_2_BAR1_SIZE_512K (4L<<0) -#define BNX2_PCI_CONFIG_2_BAR1_SIZE_1M (5L<<0) -#define BNX2_PCI_CONFIG_2_BAR1_SIZE_2M (6L<<0) -#define BNX2_PCI_CONFIG_2_BAR1_SIZE_4M (7L<<0) -#define BNX2_PCI_CONFIG_2_BAR1_SIZE_8M (8L<<0) -#define BNX2_PCI_CONFIG_2_BAR1_SIZE_16M (9L<<0) -#define BNX2_PCI_CONFIG_2_BAR1_SIZE_32M (10L<<0) -#define BNX2_PCI_CONFIG_2_BAR1_SIZE_64M (11L<<0) -#define BNX2_PCI_CONFIG_2_BAR1_SIZE_128M (12L<<0) -#define BNX2_PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0) -#define BNX2_PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0) -#define BNX2_PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0) -#define BNX2_PCI_CONFIG_2_BAR1_64ENA (1L<<4) -#define BNX2_PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5) -#define BNX2_PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6) -#define BNX2_PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7) -#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8) -#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8) -#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_1K (1L<<8) -#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_2K (2L<<8) -#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_4K (3L<<8) -#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_8K (4L<<8) -#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_16K (5L<<8) -#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_32K (6L<<8) -#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_64K (7L<<8) -#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_128K (8L<<8) -#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_256K (9L<<8) -#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_512K (10L<<8) -#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_1M (11L<<8) -#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_2M (12L<<8) -#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_4M (13L<<8) -#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_8M (14L<<8) -#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_16M (15L<<8) -#define BNX2_PCI_CONFIG_2_MAX_SPLIT_LIMIT (0x1fL<<16) -#define BNX2_PCI_CONFIG_2_MAX_READ_LIMIT (0x3L<<21) -#define BNX2_PCI_CONFIG_2_MAX_READ_LIMIT_512 (0L<<21) -#define BNX2_PCI_CONFIG_2_MAX_READ_LIMIT_1K (1L<<21) -#define BNX2_PCI_CONFIG_2_MAX_READ_LIMIT_2K (2L<<21) -#define BNX2_PCI_CONFIG_2_MAX_READ_LIMIT_4K (3L<<21) -#define BNX2_PCI_CONFIG_2_FORCE_32_BIT_MSTR (1L<<23) -#define BNX2_PCI_CONFIG_2_FORCE_32_BIT_TGT (1L<<24) -#define BNX2_PCI_CONFIG_2_KEEP_REQ_ASSERT (1L<<25) -#define BNX2_PCI_CONFIG_2_RESERVED0 (0x3fL<<26) -#define BNX2_PCI_CONFIG_2_BAR_PREFETCH_XI (1L<<16) -#define BNX2_PCI_CONFIG_2_RESERVED0_XI (0x7fffL<<17) - -#define BNX2_PCI_CONFIG_3 0x0000040c -#define BNX2_PCI_CONFIG_3_STICKY_BYTE (0xffL<<0) -#define BNX2_PCI_CONFIG_3_REG_STICKY_BYTE (0xffL<<8) -#define BNX2_PCI_CONFIG_3_FORCE_PME (1L<<24) -#define BNX2_PCI_CONFIG_3_PME_STATUS (1L<<25) -#define BNX2_PCI_CONFIG_3_PME_ENABLE (1L<<26) -#define BNX2_PCI_CONFIG_3_PM_STATE (0x3L<<27) -#define BNX2_PCI_CONFIG_3_VAUX_PRESET (1L<<30) -#define BNX2_PCI_CONFIG_3_PCI_POWER (1L<<31) - -#define BNX2_PCI_PM_DATA_A 0x00000410 -#define BNX2_PCI_PM_DATA_A_PM_DATA_0_PRG (0xffL<<0) -#define BNX2_PCI_PM_DATA_A_PM_DATA_1_PRG (0xffL<<8) -#define BNX2_PCI_PM_DATA_A_PM_DATA_2_PRG (0xffL<<16) -#define BNX2_PCI_PM_DATA_A_PM_DATA_3_PRG (0xffL<<24) - -#define BNX2_PCI_PM_DATA_B 0x00000414 -#define BNX2_PCI_PM_DATA_B_PM_DATA_4_PRG (0xffL<<0) -#define BNX2_PCI_PM_DATA_B_PM_DATA_5_PRG (0xffL<<8) -#define BNX2_PCI_PM_DATA_B_PM_DATA_6_PRG (0xffL<<16) -#define BNX2_PCI_PM_DATA_B_PM_DATA_7_PRG (0xffL<<24) - -#define BNX2_PCI_SWAP_DIAG0 0x00000418 -#define BNX2_PCI_SWAP_DIAG1 0x0000041c -#define BNX2_PCI_EXP_ROM_ADDR 0x00000420 -#define BNX2_PCI_EXP_ROM_ADDR_ADDRESS (0x3fffffL<<2) -#define BNX2_PCI_EXP_ROM_ADDR_REQ (1L<<31) - -#define BNX2_PCI_EXP_ROM_DATA 0x00000424 -#define BNX2_PCI_VPD_INTF 0x00000428 -#define BNX2_PCI_VPD_INTF_INTF_REQ (1L<<0) - -#define BNX2_PCI_VPD_ADDR_FLAG 0x0000042c -#define BNX2_PCI_VPD_ADDR_FLAG_MSK 0x0000ffff -#define BNX2_PCI_VPD_ADDR_FLAG_SL 0L -#define BNX2_PCI_VPD_ADDR_FLAG_ADDRESS (0x1fffL<<2) -#define BNX2_PCI_VPD_ADDR_FLAG_WR (1L<<15) - -#define BNX2_PCI_VPD_DATA 0x00000430 -#define BNX2_PCI_ID_VAL1 0x00000434 -#define BNX2_PCI_ID_VAL1_DEVICE_ID (0xffffL<<0) -#define BNX2_PCI_ID_VAL1_VENDOR_ID (0xffffL<<16) - -#define BNX2_PCI_ID_VAL2 0x00000438 -#define BNX2_PCI_ID_VAL2_SUBSYSTEM_VENDOR_ID (0xffffL<<0) -#define BNX2_PCI_ID_VAL2_SUBSYSTEM_ID (0xffffL<<16) - -#define BNX2_PCI_ID_VAL3 0x0000043c -#define BNX2_PCI_ID_VAL3_CLASS_CODE (0xffffffL<<0) -#define BNX2_PCI_ID_VAL3_REVISION_ID (0xffL<<24) - -#define BNX2_PCI_ID_VAL4 0x00000440 -#define BNX2_PCI_ID_VAL4_CAP_ENA (0xfL<<0) -#define BNX2_PCI_ID_VAL4_CAP_ENA_0 (0L<<0) -#define BNX2_PCI_ID_VAL4_CAP_ENA_1 (1L<<0) -#define BNX2_PCI_ID_VAL4_CAP_ENA_2 (2L<<0) -#define BNX2_PCI_ID_VAL4_CAP_ENA_3 (3L<<0) -#define BNX2_PCI_ID_VAL4_CAP_ENA_4 (4L<<0) -#define BNX2_PCI_ID_VAL4_CAP_ENA_5 (5L<<0) -#define BNX2_PCI_ID_VAL4_CAP_ENA_6 (6L<<0) -#define BNX2_PCI_ID_VAL4_CAP_ENA_7 (7L<<0) -#define BNX2_PCI_ID_VAL4_CAP_ENA_8 (8L<<0) -#define BNX2_PCI_ID_VAL4_CAP_ENA_9 (9L<<0) -#define BNX2_PCI_ID_VAL4_CAP_ENA_10 (10L<<0) -#define BNX2_PCI_ID_VAL4_CAP_ENA_11 (11L<<0) -#define BNX2_PCI_ID_VAL4_CAP_ENA_12 (12L<<0) -#define BNX2_PCI_ID_VAL4_CAP_ENA_13 (13L<<0) -#define BNX2_PCI_ID_VAL4_CAP_ENA_14 (14L<<0) -#define BNX2_PCI_ID_VAL4_CAP_ENA_15 (15L<<0) -#define BNX2_PCI_ID_VAL4_RESERVED0 (0x3L<<4) -#define BNX2_PCI_ID_VAL4_PM_SCALE_PRG (0x3L<<6) -#define BNX2_PCI_ID_VAL4_PM_SCALE_PRG_0 (0L<<6) -#define BNX2_PCI_ID_VAL4_PM_SCALE_PRG_1 (1L<<6) -#define BNX2_PCI_ID_VAL4_PM_SCALE_PRG_2 (2L<<6) -#define BNX2_PCI_ID_VAL4_PM_SCALE_PRG_3 (3L<<6) -#define BNX2_PCI_ID_VAL4_MSI_PV_MASK_CAP (1L<<8) -#define BNX2_PCI_ID_VAL4_MSI_LIMIT (0x7L<<9) -#define BNX2_PCI_ID_VAL4_MULTI_MSG_CAP (0x7L<<12) -#define BNX2_PCI_ID_VAL4_MSI_ENABLE (1L<<15) -#define BNX2_PCI_ID_VAL4_MAX_64_ADVERTIZE (1L<<16) -#define BNX2_PCI_ID_VAL4_MAX_133_ADVERTIZE (1L<<17) -#define BNX2_PCI_ID_VAL4_RESERVED2 (0x7L<<18) -#define BNX2_PCI_ID_VAL4_MAX_CUMULATIVE_SIZE_B21 (0x3L<<21) -#define BNX2_PCI_ID_VAL4_MAX_SPLIT_SIZE_B21 (0x3L<<23) -#define BNX2_PCI_ID_VAL4_MAX_CUMULATIVE_SIZE_B0 (1L<<25) -#define BNX2_PCI_ID_VAL4_MAX_MEM_READ_SIZE_B10 (0x3L<<26) -#define BNX2_PCI_ID_VAL4_MAX_SPLIT_SIZE_B0 (1L<<28) -#define BNX2_PCI_ID_VAL4_RESERVED3 (0x7L<<29) -#define BNX2_PCI_ID_VAL4_RESERVED3_XI (0xffffL<<16) - -#define BNX2_PCI_ID_VAL5 0x00000444 -#define BNX2_PCI_ID_VAL5_D1_SUPPORT (1L<<0) -#define BNX2_PCI_ID_VAL5_D2_SUPPORT (1L<<1) -#define BNX2_PCI_ID_VAL5_PME_IN_D0 (1L<<2) -#define BNX2_PCI_ID_VAL5_PME_IN_D1 (1L<<3) -#define BNX2_PCI_ID_VAL5_PME_IN_D2 (1L<<4) -#define BNX2_PCI_ID_VAL5_PME_IN_D3_HOT (1L<<5) -#define BNX2_PCI_ID_VAL5_RESERVED0_TE (0x3ffffffL<<6) -#define BNX2_PCI_ID_VAL5_PM_VERSION_XI (0x7L<<6) -#define BNX2_PCI_ID_VAL5_NO_SOFT_RESET_XI (1L<<9) -#define BNX2_PCI_ID_VAL5_RESERVED0_XI (0x3fffffL<<10) - -#define BNX2_PCI_PCIX_EXTENDED_STATUS 0x00000448 -#define BNX2_PCI_PCIX_EXTENDED_STATUS_NO_SNOOP (1L<<8) -#define BNX2_PCI_PCIX_EXTENDED_STATUS_LONG_BURST (1L<<9) -#define BNX2_PCI_PCIX_EXTENDED_STATUS_SPLIT_COMP_MSG_CLASS (0xfL<<16) -#define BNX2_PCI_PCIX_EXTENDED_STATUS_SPLIT_COMP_MSG_IDX (0xffL<<24) - -#define BNX2_PCI_ID_VAL6 0x0000044c -#define BNX2_PCI_ID_VAL6_MAX_LAT (0xffL<<0) -#define BNX2_PCI_ID_VAL6_MIN_GNT (0xffL<<8) -#define BNX2_PCI_ID_VAL6_BIST (0xffL<<16) -#define BNX2_PCI_ID_VAL6_RESERVED0 (0xffL<<24) - -#define BNX2_PCI_MSI_DATA 0x00000450 -#define BNX2_PCI_MSI_DATA_MSI_DATA (0xffffL<<0) - -#define BNX2_PCI_MSI_ADDR_H 0x00000454 -#define BNX2_PCI_MSI_ADDR_L 0x00000458 -#define BNX2_PCI_MSI_ADDR_L_VAL (0x3fffffffL<<2) - -#define BNX2_PCI_CFG_ACCESS_CMD 0x0000045c -#define BNX2_PCI_CFG_ACCESS_CMD_ADR (0x3fL<<2) -#define BNX2_PCI_CFG_ACCESS_CMD_RD_REQ (1L<<27) -#define BNX2_PCI_CFG_ACCESS_CMD_WR_REQ (0xfL<<28) - -#define BNX2_PCI_CFG_ACCESS_DATA 0x00000460 -#define BNX2_PCI_MSI_MASK 0x00000464 -#define BNX2_PCI_MSI_MASK_MSI_MASK (0xffffffffL<<0) - -#define BNX2_PCI_MSI_PEND 0x00000468 -#define BNX2_PCI_MSI_PEND_MSI_PEND (0xffffffffL<<0) - -#define BNX2_PCI_PM_DATA_C 0x0000046c -#define BNX2_PCI_PM_DATA_C_PM_DATA_8_PRG (0xffL<<0) -#define BNX2_PCI_PM_DATA_C_RESERVED0 (0xffffffL<<8) - -#define BNX2_PCI_MSIX_CONTROL 0x000004c0 -#define BNX2_PCI_MSIX_CONTROL_MSIX_TBL_SIZ (0x7ffL<<0) -#define BNX2_PCI_MSIX_CONTROL_RESERVED0 (0x1fffffL<<11) - -#define BNX2_PCI_MSIX_TBL_OFF_BIR 0x000004c4 -#define BNX2_PCI_MSIX_TBL_OFF_BIR_MSIX_TBL_BIR (0x7L<<0) -#define BNX2_PCI_MSIX_TBL_OFF_BIR_MSIX_TBL_OFF (0x1fffffffL<<3) - -#define BNX2_PCI_MSIX_PBA_OFF_BIT 0x000004c8 -#define BNX2_PCI_MSIX_PBA_OFF_BIT_MSIX_PBA_BIR (0x7L<<0) -#define BNX2_PCI_MSIX_PBA_OFF_BIT_MSIX_PBA_OFF (0x1fffffffL<<3) - -#define BNX2_PCI_PCIE_CAPABILITY 0x000004d0 -#define BNX2_PCI_PCIE_CAPABILITY_INTERRUPT_MSG_NUM (0x1fL<<0) -#define BNX2_PCI_PCIE_CAPABILITY_COMPLY_PCIE_1_1 (1L<<5) - -#define BNX2_PCI_DEVICE_CAPABILITY 0x000004d4 -#define BNX2_PCI_DEVICE_CAPABILITY_MAX_PL_SIZ_SUPPORTED (0x7L<<0) -#define BNX2_PCI_DEVICE_CAPABILITY_EXTENDED_TAG_SUPPORT (1L<<5) -#define BNX2_PCI_DEVICE_CAPABILITY_L0S_ACCEPTABLE_LATENCY (0x7L<<6) -#define BNX2_PCI_DEVICE_CAPABILITY_L1_ACCEPTABLE_LATENCY (0x7L<<9) -#define BNX2_PCI_DEVICE_CAPABILITY_ROLE_BASED_ERR_RPT (1L<<15) - -#define BNX2_PCI_LINK_CAPABILITY 0x000004dc -#define BNX2_PCI_LINK_CAPABILITY_MAX_LINK_SPEED (0xfL<<0) -#define BNX2_PCI_LINK_CAPABILITY_MAX_LINK_SPEED_0001 (1L<<0) -#define BNX2_PCI_LINK_CAPABILITY_MAX_LINK_SPEED_0010 (1L<<0) -#define BNX2_PCI_LINK_CAPABILITY_MAX_LINK_WIDTH (0x1fL<<4) -#define BNX2_PCI_LINK_CAPABILITY_CLK_POWER_MGMT (1L<<9) -#define BNX2_PCI_LINK_CAPABILITY_ASPM_SUPPORT (0x3L<<10) -#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_LAT (0x7L<<12) -#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_LAT_101 (5L<<12) -#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_LAT_110 (6L<<12) -#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_LAT (0x7L<<15) -#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_LAT_001 (1L<<15) -#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_LAT_010 (2L<<15) -#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_COMM_LAT (0x7L<<18) -#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_COMM_LAT_101 (5L<<18) -#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_COMM_LAT_110 (6L<<18) -#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_COMM_LAT (0x7L<<21) -#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_COMM_LAT_001 (1L<<21) -#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_COMM_LAT_010 (2L<<21) -#define BNX2_PCI_LINK_CAPABILITY_PORT_NUM (0xffL<<24) - -#define BNX2_PCI_PCIE_DEVICE_CAPABILITY_2 0x000004e4 -#define BNX2_PCI_PCIE_DEVICE_CAPABILITY_2_CMPL_TO_RANGE_SUPP (0xfL<<0) -#define BNX2_PCI_PCIE_DEVICE_CAPABILITY_2_CMPL_TO_DISABL_SUPP (1L<<4) -#define BNX2_PCI_PCIE_DEVICE_CAPABILITY_2_RESERVED (0x7ffffffL<<5) - -#define BNX2_PCI_PCIE_LINK_CAPABILITY_2 0x000004e8 -#define BNX2_PCI_PCIE_LINK_CAPABILITY_2_RESERVED (0xffffffffL<<0) - -#define BNX2_PCI_GRC_WINDOW1_ADDR 0x00000610 -#define BNX2_PCI_GRC_WINDOW1_ADDR_VALUE (0x1ffL<<13) - -#define BNX2_PCI_GRC_WINDOW2_ADDR 0x00000614 -#define BNX2_PCI_GRC_WINDOW2_ADDR_VALUE (0x1ffL<<13) - -#define BNX2_PCI_GRC_WINDOW3_ADDR 0x00000618 -#define BNX2_PCI_GRC_WINDOW3_ADDR_VALUE (0x1ffL<<13) - -#define BNX2_MSIX_TABLE_ADDR 0x318000 -#define BNX2_MSIX_PBA_ADDR 0x31c000 - -/* - * misc_reg definition - * offset: 0x800 - */ -#define BNX2_MISC_COMMAND 0x00000800 -#define BNX2_MISC_COMMAND_ENABLE_ALL (1L<<0) -#define BNX2_MISC_COMMAND_DISABLE_ALL (1L<<1) -#define BNX2_MISC_COMMAND_SW_RESET (1L<<4) -#define BNX2_MISC_COMMAND_POR_RESET (1L<<5) -#define BNX2_MISC_COMMAND_HD_RESET (1L<<6) -#define BNX2_MISC_COMMAND_CMN_SW_RESET (1L<<7) -#define BNX2_MISC_COMMAND_PAR_ERROR (1L<<8) -#define BNX2_MISC_COMMAND_CS16_ERR (1L<<9) -#define BNX2_MISC_COMMAND_CS16_ERR_LOC (0xfL<<12) -#define BNX2_MISC_COMMAND_PAR_ERR_RAM (0x7fL<<16) -#define BNX2_MISC_COMMAND_POWERDOWN_EVENT (1L<<23) -#define BNX2_MISC_COMMAND_SW_SHUTDOWN (1L<<24) -#define BNX2_MISC_COMMAND_SHUTDOWN_EN (1L<<25) -#define BNX2_MISC_COMMAND_DINTEG_ATTN_EN (1L<<26) -#define BNX2_MISC_COMMAND_PCIE_LINK_IN_L23 (1L<<27) -#define BNX2_MISC_COMMAND_PCIE_DIS (1L<<28) - -#define BNX2_MISC_CFG 0x00000804 -#define BNX2_MISC_CFG_GRC_TMOUT (1L<<0) -#define BNX2_MISC_CFG_NVM_WR_EN (0x3L<<1) -#define BNX2_MISC_CFG_NVM_WR_EN_PROTECT (0L<<1) -#define BNX2_MISC_CFG_NVM_WR_EN_PCI (1L<<1) -#define BNX2_MISC_CFG_NVM_WR_EN_ALLOW (2L<<1) -#define BNX2_MISC_CFG_NVM_WR_EN_ALLOW2 (3L<<1) -#define BNX2_MISC_CFG_BIST_EN (1L<<3) -#define BNX2_MISC_CFG_CK25_OUT_ALT_SRC (1L<<4) -#define BNX2_MISC_CFG_RESERVED5_TE (1L<<5) -#define BNX2_MISC_CFG_RESERVED6_TE (1L<<6) -#define BNX2_MISC_CFG_CLK_CTL_OVERRIDE (1L<<7) -#define BNX2_MISC_CFG_LEDMODE (0x7L<<8) -#define BNX2_MISC_CFG_LEDMODE_MAC (0L<<8) -#define BNX2_MISC_CFG_LEDMODE_PHY1_TE (1L<<8) -#define BNX2_MISC_CFG_LEDMODE_PHY2_TE (2L<<8) -#define BNX2_MISC_CFG_LEDMODE_PHY3_TE (3L<<8) -#define BNX2_MISC_CFG_LEDMODE_PHY4_TE (4L<<8) -#define BNX2_MISC_CFG_LEDMODE_PHY5_TE (5L<<8) -#define BNX2_MISC_CFG_LEDMODE_PHY6_TE (6L<<8) -#define BNX2_MISC_CFG_LEDMODE_PHY7_TE (7L<<8) -#define BNX2_MISC_CFG_MCP_GRC_TMOUT_TE (1L<<11) -#define BNX2_MISC_CFG_DBU_GRC_TMOUT_TE (1L<<12) -#define BNX2_MISC_CFG_LEDMODE_XI (0xfL<<8) -#define BNX2_MISC_CFG_LEDMODE_MAC_XI (0L<<8) -#define BNX2_MISC_CFG_LEDMODE_PHY1_XI (1L<<8) -#define BNX2_MISC_CFG_LEDMODE_PHY2_XI (2L<<8) -#define BNX2_MISC_CFG_LEDMODE_PHY3_XI (3L<<8) -#define BNX2_MISC_CFG_LEDMODE_MAC2_XI (4L<<8) -#define BNX2_MISC_CFG_LEDMODE_PHY4_XI (5L<<8) -#define BNX2_MISC_CFG_LEDMODE_PHY5_XI (6L<<8) -#define BNX2_MISC_CFG_LEDMODE_PHY6_XI (7L<<8) -#define BNX2_MISC_CFG_LEDMODE_MAC3_XI (8L<<8) -#define BNX2_MISC_CFG_LEDMODE_PHY7_XI (9L<<8) -#define BNX2_MISC_CFG_LEDMODE_PHY8_XI (10L<<8) -#define BNX2_MISC_CFG_LEDMODE_PHY9_XI (11L<<8) -#define BNX2_MISC_CFG_LEDMODE_MAC4_XI (12L<<8) -#define BNX2_MISC_CFG_LEDMODE_PHY10_XI (13L<<8) -#define BNX2_MISC_CFG_LEDMODE_PHY11_XI (14L<<8) -#define BNX2_MISC_CFG_LEDMODE_UNUSED_XI (15L<<8) -#define BNX2_MISC_CFG_PORT_SELECT_XI (1L<<13) -#define BNX2_MISC_CFG_PARITY_MODE_XI (1L<<14) - -#define BNX2_MISC_ID 0x00000808 -#define BNX2_MISC_ID_BOND_ID (0xfL<<0) -#define BNX2_MISC_ID_BOND_ID_X (0L<<0) -#define BNX2_MISC_ID_BOND_ID_C (3L<<0) -#define BNX2_MISC_ID_BOND_ID_S (12L<<0) -#define BNX2_MISC_ID_CHIP_METAL (0xffL<<4) -#define BNX2_MISC_ID_CHIP_REV (0xfL<<12) -#define BNX2_MISC_ID_CHIP_NUM (0xffffL<<16) - -#define BNX2_MISC_ENABLE_STATUS_BITS 0x0000080c -#define BNX2_MISC_ENABLE_STATUS_BITS_TX_SCHEDULER_ENABLE (1L<<0) -#define BNX2_MISC_ENABLE_STATUS_BITS_TX_BD_READ_ENABLE (1L<<1) -#define BNX2_MISC_ENABLE_STATUS_BITS_TX_BD_CACHE_ENABLE (1L<<2) -#define BNX2_MISC_ENABLE_STATUS_BITS_TX_PROCESSOR_ENABLE (1L<<3) -#define BNX2_MISC_ENABLE_STATUS_BITS_TX_DMA_ENABLE (1L<<4) -#define BNX2_MISC_ENABLE_STATUS_BITS_TX_PATCHUP_ENABLE (1L<<5) -#define BNX2_MISC_ENABLE_STATUS_BITS_TX_PAYLOAD_Q_ENABLE (1L<<6) -#define BNX2_MISC_ENABLE_STATUS_BITS_TX_HEADER_Q_ENABLE (1L<<7) -#define BNX2_MISC_ENABLE_STATUS_BITS_TX_ASSEMBLER_ENABLE (1L<<8) -#define BNX2_MISC_ENABLE_STATUS_BITS_EMAC_ENABLE (1L<<9) -#define BNX2_MISC_ENABLE_STATUS_BITS_RX_PARSER_MAC_ENABLE (1L<<10) -#define BNX2_MISC_ENABLE_STATUS_BITS_RX_PARSER_CATCHUP_ENABLE (1L<<11) -#define BNX2_MISC_ENABLE_STATUS_BITS_RX_MBUF_ENABLE (1L<<12) -#define BNX2_MISC_ENABLE_STATUS_BITS_RX_LOOKUP_ENABLE (1L<<13) -#define BNX2_MISC_ENABLE_STATUS_BITS_RX_PROCESSOR_ENABLE (1L<<14) -#define BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE (1L<<15) -#define BNX2_MISC_ENABLE_STATUS_BITS_RX_BD_CACHE_ENABLE (1L<<16) -#define BNX2_MISC_ENABLE_STATUS_BITS_RX_DMA_ENABLE (1L<<17) -#define BNX2_MISC_ENABLE_STATUS_BITS_COMPLETION_ENABLE (1L<<18) -#define BNX2_MISC_ENABLE_STATUS_BITS_HOST_COALESCE_ENABLE (1L<<19) -#define BNX2_MISC_ENABLE_STATUS_BITS_MAILBOX_QUEUE_ENABLE (1L<<20) -#define BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE (1L<<21) -#define BNX2_MISC_ENABLE_STATUS_BITS_CMD_SCHEDULER_ENABLE (1L<<22) -#define BNX2_MISC_ENABLE_STATUS_BITS_CMD_PROCESSOR_ENABLE (1L<<23) -#define BNX2_MISC_ENABLE_STATUS_BITS_MGMT_PROCESSOR_ENABLE (1L<<24) -#define BNX2_MISC_ENABLE_STATUS_BITS_TIMER_ENABLE (1L<<25) -#define BNX2_MISC_ENABLE_STATUS_BITS_DMA_ENGINE_ENABLE (1L<<26) -#define BNX2_MISC_ENABLE_STATUS_BITS_UMP_ENABLE (1L<<27) -#define BNX2_MISC_ENABLE_STATUS_BITS_RV2P_CMD_SCHEDULER_ENABLE (1L<<28) -#define BNX2_MISC_ENABLE_STATUS_BITS_RSVD_FUTURE_ENABLE (0x7L<<29) - -#define BNX2_MISC_ENABLE_SET_BITS 0x00000810 -#define BNX2_MISC_ENABLE_SET_BITS_TX_SCHEDULER_ENABLE (1L<<0) -#define BNX2_MISC_ENABLE_SET_BITS_TX_BD_READ_ENABLE (1L<<1) -#define BNX2_MISC_ENABLE_SET_BITS_TX_BD_CACHE_ENABLE (1L<<2) -#define BNX2_MISC_ENABLE_SET_BITS_TX_PROCESSOR_ENABLE (1L<<3) -#define BNX2_MISC_ENABLE_SET_BITS_TX_DMA_ENABLE (1L<<4) -#define BNX2_MISC_ENABLE_SET_BITS_TX_PATCHUP_ENABLE (1L<<5) -#define BNX2_MISC_ENABLE_SET_BITS_TX_PAYLOAD_Q_ENABLE (1L<<6) -#define BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE (1L<<7) -#define BNX2_MISC_ENABLE_SET_BITS_TX_ASSEMBLER_ENABLE (1L<<8) -#define BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE (1L<<9) -#define BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE (1L<<10) -#define BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_CATCHUP_ENABLE (1L<<11) -#define BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE (1L<<12) -#define BNX2_MISC_ENABLE_SET_BITS_RX_LOOKUP_ENABLE (1L<<13) -#define BNX2_MISC_ENABLE_SET_BITS_RX_PROCESSOR_ENABLE (1L<<14) -#define BNX2_MISC_ENABLE_SET_BITS_RX_V2P_ENABLE (1L<<15) -#define BNX2_MISC_ENABLE_SET_BITS_RX_BD_CACHE_ENABLE (1L<<16) -#define BNX2_MISC_ENABLE_SET_BITS_RX_DMA_ENABLE (1L<<17) -#define BNX2_MISC_ENABLE_SET_BITS_COMPLETION_ENABLE (1L<<18) -#define BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE (1L<<19) -#define BNX2_MISC_ENABLE_SET_BITS_MAILBOX_QUEUE_ENABLE (1L<<20) -#define BNX2_MISC_ENABLE_SET_BITS_CONTEXT_ENABLE (1L<<21) -#define BNX2_MISC_ENABLE_SET_BITS_CMD_SCHEDULER_ENABLE (1L<<22) -#define BNX2_MISC_ENABLE_SET_BITS_CMD_PROCESSOR_ENABLE (1L<<23) -#define BNX2_MISC_ENABLE_SET_BITS_MGMT_PROCESSOR_ENABLE (1L<<24) -#define BNX2_MISC_ENABLE_SET_BITS_TIMER_ENABLE (1L<<25) -#define BNX2_MISC_ENABLE_SET_BITS_DMA_ENGINE_ENABLE (1L<<26) -#define BNX2_MISC_ENABLE_SET_BITS_UMP_ENABLE (1L<<27) -#define BNX2_MISC_ENABLE_SET_BITS_RV2P_CMD_SCHEDULER_ENABLE (1L<<28) -#define BNX2_MISC_ENABLE_SET_BITS_RSVD_FUTURE_ENABLE (0x7L<<29) - -#define BNX2_MISC_ENABLE_CLR_BITS 0x00000814 -#define BNX2_MISC_ENABLE_CLR_BITS_TX_SCHEDULER_ENABLE (1L<<0) -#define BNX2_MISC_ENABLE_CLR_BITS_TX_BD_READ_ENABLE (1L<<1) -#define BNX2_MISC_ENABLE_CLR_BITS_TX_BD_CACHE_ENABLE (1L<<2) -#define BNX2_MISC_ENABLE_CLR_BITS_TX_PROCESSOR_ENABLE (1L<<3) -#define BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE (1L<<4) -#define BNX2_MISC_ENABLE_CLR_BITS_TX_PATCHUP_ENABLE (1L<<5) -#define BNX2_MISC_ENABLE_CLR_BITS_TX_PAYLOAD_Q_ENABLE (1L<<6) -#define BNX2_MISC_ENABLE_CLR_BITS_TX_HEADER_Q_ENABLE (1L<<7) -#define BNX2_MISC_ENABLE_CLR_BITS_TX_ASSEMBLER_ENABLE (1L<<8) -#define BNX2_MISC_ENABLE_CLR_BITS_EMAC_ENABLE (1L<<9) -#define BNX2_MISC_ENABLE_CLR_BITS_RX_PARSER_MAC_ENABLE (1L<<10) -#define BNX2_MISC_ENABLE_CLR_BITS_RX_PARSER_CATCHUP_ENABLE (1L<<11) -#define BNX2_MISC_ENABLE_CLR_BITS_RX_MBUF_ENABLE (1L<<12) -#define BNX2_MISC_ENABLE_CLR_BITS_RX_LOOKUP_ENABLE (1L<<13) -#define BNX2_MISC_ENABLE_CLR_BITS_RX_PROCESSOR_ENABLE (1L<<14) -#define BNX2_MISC_ENABLE_CLR_BITS_RX_V2P_ENABLE (1L<<15) -#define BNX2_MISC_ENABLE_CLR_BITS_RX_BD_CACHE_ENABLE (1L<<16) -#define BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE (1L<<17) -#define BNX2_MISC_ENABLE_CLR_BITS_COMPLETION_ENABLE (1L<<18) -#define BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE (1L<<19) -#define BNX2_MISC_ENABLE_CLR_BITS_MAILBOX_QUEUE_ENABLE (1L<<20) -#define BNX2_MISC_ENABLE_CLR_BITS_CONTEXT_ENABLE (1L<<21) -#define BNX2_MISC_ENABLE_CLR_BITS_CMD_SCHEDULER_ENABLE (1L<<22) -#define BNX2_MISC_ENABLE_CLR_BITS_CMD_PROCESSOR_ENABLE (1L<<23) -#define BNX2_MISC_ENABLE_CLR_BITS_MGMT_PROCESSOR_ENABLE (1L<<24) -#define BNX2_MISC_ENABLE_CLR_BITS_TIMER_ENABLE (1L<<25) -#define BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE (1L<<26) -#define BNX2_MISC_ENABLE_CLR_BITS_UMP_ENABLE (1L<<27) -#define BNX2_MISC_ENABLE_CLR_BITS_RV2P_CMD_SCHEDULER_ENABLE (1L<<28) -#define BNX2_MISC_ENABLE_CLR_BITS_RSVD_FUTURE_ENABLE (0x7L<<29) - -#define BNX2_MISC_CLOCK_CONTROL_BITS 0x00000818 -#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET (0xfL<<0) -#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ (0L<<0) -#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ (1L<<0) -#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ (2L<<0) -#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ (3L<<0) -#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ (4L<<0) -#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ (5L<<0) -#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ (6L<<0) -#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ (7L<<0) -#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW (0xfL<<0) -#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_DISABLE (1L<<6) -#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT (1L<<7) -#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC (0x7L<<8) -#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_UNDEF (0L<<8) -#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_12 (1L<<8) -#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_6 (2L<<8) -#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_62 (4L<<8) -#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED0_XI (0x7L<<8) -#define BNX2_MISC_CLOCK_CONTROL_BITS_MIN_POWER (1L<<11) -#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED (0xfL<<12) -#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_100 (0L<<12) -#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_80 (1L<<12) -#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_50 (2L<<12) -#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_40 (4L<<12) -#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_25 (8L<<12) -#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED1_XI (0xfL<<12) -#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_STOP (1L<<16) -#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED_17_TE (1L<<17) -#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED_18_TE (1L<<18) -#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED_19_TE (1L<<19) -#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED_TE (0xfffL<<20) -#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_MGMT_XI (1L<<17) -#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED2_XI (0x3fL<<18) -#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_VCO_XI (0x7L<<24) -#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED3_XI (1L<<27) -#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_XI (0xfL<<28) - -#define BNX2_MISC_SPIO 0x0000081c -#define BNX2_MISC_SPIO_VALUE (0xffL<<0) -#define BNX2_MISC_SPIO_SET (0xffL<<8) -#define BNX2_MISC_SPIO_CLR (0xffL<<16) -#define BNX2_MISC_SPIO_FLOAT (0xffL<<24) - -#define BNX2_MISC_SPIO_INT 0x00000820 -#define BNX2_MISC_SPIO_INT_INT_STATE_TE (0xfL<<0) -#define BNX2_MISC_SPIO_INT_OLD_VALUE_TE (0xfL<<8) -#define BNX2_MISC_SPIO_INT_OLD_SET_TE (0xfL<<16) -#define BNX2_MISC_SPIO_INT_OLD_CLR_TE (0xfL<<24) -#define BNX2_MISC_SPIO_INT_INT_STATE_XI (0xffL<<0) -#define BNX2_MISC_SPIO_INT_OLD_VALUE_XI (0xffL<<8) -#define BNX2_MISC_SPIO_INT_OLD_SET_XI (0xffL<<16) -#define BNX2_MISC_SPIO_INT_OLD_CLR_XI (0xffL<<24) - -#define BNX2_MISC_CONFIG_LFSR 0x00000824 -#define BNX2_MISC_CONFIG_LFSR_DIV (0xffffL<<0) - -#define BNX2_MISC_LFSR_MASK_BITS 0x00000828 -#define BNX2_MISC_LFSR_MASK_BITS_TX_SCHEDULER_ENABLE (1L<<0) -#define BNX2_MISC_LFSR_MASK_BITS_TX_BD_READ_ENABLE (1L<<1) -#define BNX2_MISC_LFSR_MASK_BITS_TX_BD_CACHE_ENABLE (1L<<2) -#define BNX2_MISC_LFSR_MASK_BITS_TX_PROCESSOR_ENABLE (1L<<3) -#define BNX2_MISC_LFSR_MASK_BITS_TX_DMA_ENABLE (1L<<4) -#define BNX2_MISC_LFSR_MASK_BITS_TX_PATCHUP_ENABLE (1L<<5) -#define BNX2_MISC_LFSR_MASK_BITS_TX_PAYLOAD_Q_ENABLE (1L<<6) -#define BNX2_MISC_LFSR_MASK_BITS_TX_HEADER_Q_ENABLE (1L<<7) -#define BNX2_MISC_LFSR_MASK_BITS_TX_ASSEMBLER_ENABLE (1L<<8) -#define BNX2_MISC_LFSR_MASK_BITS_EMAC_ENABLE (1L<<9) -#define BNX2_MISC_LFSR_MASK_BITS_RX_PARSER_MAC_ENABLE (1L<<10) -#define BNX2_MISC_LFSR_MASK_BITS_RX_PARSER_CATCHUP_ENABLE (1L<<11) -#define BNX2_MISC_LFSR_MASK_BITS_RX_MBUF_ENABLE (1L<<12) -#define BNX2_MISC_LFSR_MASK_BITS_RX_LOOKUP_ENABLE (1L<<13) -#define BNX2_MISC_LFSR_MASK_BITS_RX_PROCESSOR_ENABLE (1L<<14) -#define BNX2_MISC_LFSR_MASK_BITS_RX_V2P_ENABLE (1L<<15) -#define BNX2_MISC_LFSR_MASK_BITS_RX_BD_CACHE_ENABLE (1L<<16) -#define BNX2_MISC_LFSR_MASK_BITS_RX_DMA_ENABLE (1L<<17) -#define BNX2_MISC_LFSR_MASK_BITS_COMPLETION_ENABLE (1L<<18) -#define BNX2_MISC_LFSR_MASK_BITS_HOST_COALESCE_ENABLE (1L<<19) -#define BNX2_MISC_LFSR_MASK_BITS_MAILBOX_QUEUE_ENABLE (1L<<20) -#define BNX2_MISC_LFSR_MASK_BITS_CONTEXT_ENABLE (1L<<21) -#define BNX2_MISC_LFSR_MASK_BITS_CMD_SCHEDULER_ENABLE (1L<<22) -#define BNX2_MISC_LFSR_MASK_BITS_CMD_PROCESSOR_ENABLE (1L<<23) -#define BNX2_MISC_LFSR_MASK_BITS_MGMT_PROCESSOR_ENABLE (1L<<24) -#define BNX2_MISC_LFSR_MASK_BITS_TIMER_ENABLE (1L<<25) -#define BNX2_MISC_LFSR_MASK_BITS_DMA_ENGINE_ENABLE (1L<<26) -#define BNX2_MISC_LFSR_MASK_BITS_UMP_ENABLE (1L<<27) -#define BNX2_MISC_LFSR_MASK_BITS_RV2P_CMD_SCHEDULER_ENABLE (1L<<28) -#define BNX2_MISC_LFSR_MASK_BITS_RSVD_FUTURE_ENABLE (0x7L<<29) - -#define BNX2_MISC_ARB_REQ0 0x0000082c -#define BNX2_MISC_ARB_REQ1 0x00000830 -#define BNX2_MISC_ARB_REQ2 0x00000834 -#define BNX2_MISC_ARB_REQ3 0x00000838 -#define BNX2_MISC_ARB_REQ4 0x0000083c -#define BNX2_MISC_ARB_FREE0 0x00000840 -#define BNX2_MISC_ARB_FREE1 0x00000844 -#define BNX2_MISC_ARB_FREE2 0x00000848 -#define BNX2_MISC_ARB_FREE3 0x0000084c -#define BNX2_MISC_ARB_FREE4 0x00000850 -#define BNX2_MISC_ARB_REQ_STATUS0 0x00000854 -#define BNX2_MISC_ARB_REQ_STATUS1 0x00000858 -#define BNX2_MISC_ARB_REQ_STATUS2 0x0000085c -#define BNX2_MISC_ARB_REQ_STATUS3 0x00000860 -#define BNX2_MISC_ARB_REQ_STATUS4 0x00000864 -#define BNX2_MISC_ARB_GNT0 0x00000868 -#define BNX2_MISC_ARB_GNT0_0 (0x7L<<0) -#define BNX2_MISC_ARB_GNT0_1 (0x7L<<4) -#define BNX2_MISC_ARB_GNT0_2 (0x7L<<8) -#define BNX2_MISC_ARB_GNT0_3 (0x7L<<12) -#define BNX2_MISC_ARB_GNT0_4 (0x7L<<16) -#define BNX2_MISC_ARB_GNT0_5 (0x7L<<20) -#define BNX2_MISC_ARB_GNT0_6 (0x7L<<24) -#define BNX2_MISC_ARB_GNT0_7 (0x7L<<28) - -#define BNX2_MISC_ARB_GNT1 0x0000086c -#define BNX2_MISC_ARB_GNT1_8 (0x7L<<0) -#define BNX2_MISC_ARB_GNT1_9 (0x7L<<4) -#define BNX2_MISC_ARB_GNT1_10 (0x7L<<8) -#define BNX2_MISC_ARB_GNT1_11 (0x7L<<12) -#define BNX2_MISC_ARB_GNT1_12 (0x7L<<16) -#define BNX2_MISC_ARB_GNT1_13 (0x7L<<20) -#define BNX2_MISC_ARB_GNT1_14 (0x7L<<24) -#define BNX2_MISC_ARB_GNT1_15 (0x7L<<28) - -#define BNX2_MISC_ARB_GNT2 0x00000870 -#define BNX2_MISC_ARB_GNT2_16 (0x7L<<0) -#define BNX2_MISC_ARB_GNT2_17 (0x7L<<4) -#define BNX2_MISC_ARB_GNT2_18 (0x7L<<8) -#define BNX2_MISC_ARB_GNT2_19 (0x7L<<12) -#define BNX2_MISC_ARB_GNT2_20 (0x7L<<16) -#define BNX2_MISC_ARB_GNT2_21 (0x7L<<20) -#define BNX2_MISC_ARB_GNT2_22 (0x7L<<24) -#define BNX2_MISC_ARB_GNT2_23 (0x7L<<28) - -#define BNX2_MISC_ARB_GNT3 0x00000874 -#define BNX2_MISC_ARB_GNT3_24 (0x7L<<0) -#define BNX2_MISC_ARB_GNT3_25 (0x7L<<4) -#define BNX2_MISC_ARB_GNT3_26 (0x7L<<8) -#define BNX2_MISC_ARB_GNT3_27 (0x7L<<12) -#define BNX2_MISC_ARB_GNT3_28 (0x7L<<16) -#define BNX2_MISC_ARB_GNT3_29 (0x7L<<20) -#define BNX2_MISC_ARB_GNT3_30 (0x7L<<24) -#define BNX2_MISC_ARB_GNT3_31 (0x7L<<28) - -#define BNX2_MISC_RESERVED1 0x00000878 -#define BNX2_MISC_RESERVED1_MISC_RESERVED1_VALUE (0x3fL<<0) - -#define BNX2_MISC_RESERVED2 0x0000087c -#define BNX2_MISC_RESERVED2_PCIE_DIS (1L<<0) -#define BNX2_MISC_RESERVED2_LINK_IN_L23 (1L<<1) - -#define BNX2_MISC_SM_ASF_CONTROL 0x00000880 -#define BNX2_MISC_SM_ASF_CONTROL_ASF_RST (1L<<0) -#define BNX2_MISC_SM_ASF_CONTROL_TSC_EN (1L<<1) -#define BNX2_MISC_SM_ASF_CONTROL_WG_TO (1L<<2) -#define BNX2_MISC_SM_ASF_CONTROL_HB_TO (1L<<3) -#define BNX2_MISC_SM_ASF_CONTROL_PA_TO (1L<<4) -#define BNX2_MISC_SM_ASF_CONTROL_PL_TO (1L<<5) -#define BNX2_MISC_SM_ASF_CONTROL_RT_TO (1L<<6) -#define BNX2_MISC_SM_ASF_CONTROL_SMB_EVENT (1L<<7) -#define BNX2_MISC_SM_ASF_CONTROL_STRETCH_EN (1L<<8) -#define BNX2_MISC_SM_ASF_CONTROL_STRETCH_PULSE (1L<<9) -#define BNX2_MISC_SM_ASF_CONTROL_RES (0x3L<<10) -#define BNX2_MISC_SM_ASF_CONTROL_SMB_EN (1L<<12) -#define BNX2_MISC_SM_ASF_CONTROL_SMB_BB_EN (1L<<13) -#define BNX2_MISC_SM_ASF_CONTROL_SMB_NO_ADDR_FILT (1L<<14) -#define BNX2_MISC_SM_ASF_CONTROL_SMB_AUTOREAD (1L<<15) -#define BNX2_MISC_SM_ASF_CONTROL_NIC_SMB_ADDR1 (0x7fL<<16) -#define BNX2_MISC_SM_ASF_CONTROL_NIC_SMB_ADDR2 (0x7fL<<23) -#define BNX2_MISC_SM_ASF_CONTROL_EN_NIC_SMB_ADDR_0 (1L<<30) -#define BNX2_MISC_SM_ASF_CONTROL_SMB_EARLY_ATTN (1L<<31) - -#define BNX2_MISC_SMB_IN 0x00000884 -#define BNX2_MISC_SMB_IN_DAT_IN (0xffL<<0) -#define BNX2_MISC_SMB_IN_RDY (1L<<8) -#define BNX2_MISC_SMB_IN_DONE (1L<<9) -#define BNX2_MISC_SMB_IN_FIRSTBYTE (1L<<10) -#define BNX2_MISC_SMB_IN_STATUS (0x7L<<11) -#define BNX2_MISC_SMB_IN_STATUS_OK (0x0L<<11) -#define BNX2_MISC_SMB_IN_STATUS_PEC (0x1L<<11) -#define BNX2_MISC_SMB_IN_STATUS_OFLOW (0x2L<<11) -#define BNX2_MISC_SMB_IN_STATUS_STOP (0x3L<<11) -#define BNX2_MISC_SMB_IN_STATUS_TIMEOUT (0x4L<<11) - -#define BNX2_MISC_SMB_OUT 0x00000888 -#define BNX2_MISC_SMB_OUT_DAT_OUT (0xffL<<0) -#define BNX2_MISC_SMB_OUT_RDY (1L<<8) -#define BNX2_MISC_SMB_OUT_START (1L<<9) -#define BNX2_MISC_SMB_OUT_LAST (1L<<10) -#define BNX2_MISC_SMB_OUT_ACC_TYPE (1L<<11) -#define BNX2_MISC_SMB_OUT_ENB_PEC (1L<<12) -#define BNX2_MISC_SMB_OUT_GET_RX_LEN (1L<<13) -#define BNX2_MISC_SMB_OUT_SMB_READ_LEN (0x3fL<<14) -#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS (0xfL<<20) -#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_OK (0L<<20) -#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_FIRST_NACK (1L<<20) -#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_UFLOW (2L<<20) -#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_STOP (3L<<20) -#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_TIMEOUT (4L<<20) -#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_FIRST_LOST (5L<<20) -#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_BADACK (6L<<20) -#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_SUB_NACK (9L<<20) -#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_SUB_LOST (0xdL<<20) -#define BNX2_MISC_SMB_OUT_SMB_OUT_SLAVEMODE (1L<<24) -#define BNX2_MISC_SMB_OUT_SMB_OUT_DAT_EN (1L<<25) -#define BNX2_MISC_SMB_OUT_SMB_OUT_DAT_IN (1L<<26) -#define BNX2_MISC_SMB_OUT_SMB_OUT_CLK_EN (1L<<27) -#define BNX2_MISC_SMB_OUT_SMB_OUT_CLK_IN (1L<<28) - -#define BNX2_MISC_SMB_WATCHDOG 0x0000088c -#define BNX2_MISC_SMB_WATCHDOG_WATCHDOG (0xffffL<<0) - -#define BNX2_MISC_SMB_HEARTBEAT 0x00000890 -#define BNX2_MISC_SMB_HEARTBEAT_HEARTBEAT (0xffffL<<0) - -#define BNX2_MISC_SMB_POLL_ASF 0x00000894 -#define BNX2_MISC_SMB_POLL_ASF_POLL_ASF (0xffffL<<0) - -#define BNX2_MISC_SMB_POLL_LEGACY 0x00000898 -#define BNX2_MISC_SMB_POLL_LEGACY_POLL_LEGACY (0xffffL<<0) - -#define BNX2_MISC_SMB_RETRAN 0x0000089c -#define BNX2_MISC_SMB_RETRAN_RETRAN (0xffL<<0) - -#define BNX2_MISC_SMB_TIMESTAMP 0x000008a0 -#define BNX2_MISC_SMB_TIMESTAMP_TIMESTAMP (0xffffffffL<<0) - -#define BNX2_MISC_PERR_ENA0 0x000008a4 -#define BNX2_MISC_PERR_ENA0_COM_MISC_CTXC (1L<<0) -#define BNX2_MISC_PERR_ENA0_COM_MISC_REGF (1L<<1) -#define BNX2_MISC_PERR_ENA0_COM_MISC_SCPAD (1L<<2) -#define BNX2_MISC_PERR_ENA0_CP_MISC_CTXC (1L<<3) -#define BNX2_MISC_PERR_ENA0_CP_MISC_REGF (1L<<4) -#define BNX2_MISC_PERR_ENA0_CP_MISC_SCPAD (1L<<5) -#define BNX2_MISC_PERR_ENA0_CS_MISC_TMEM (1L<<6) -#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM0 (1L<<7) -#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM1 (1L<<8) -#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM2 (1L<<9) -#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM3 (1L<<10) -#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM4 (1L<<11) -#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM5 (1L<<12) -#define BNX2_MISC_PERR_ENA0_CTX_MISC_PGTBL (1L<<13) -#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DR0 (1L<<14) -#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DR1 (1L<<15) -#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DR2 (1L<<16) -#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DR3 (1L<<17) -#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DR4 (1L<<18) -#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DW0 (1L<<19) -#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DW1 (1L<<20) -#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DW2 (1L<<21) -#define BNX2_MISC_PERR_ENA0_HC_MISC_DMA (1L<<22) -#define BNX2_MISC_PERR_ENA0_MCP_MISC_REGF (1L<<23) -#define BNX2_MISC_PERR_ENA0_MCP_MISC_SCPAD (1L<<24) -#define BNX2_MISC_PERR_ENA0_MQ_MISC_CTX (1L<<25) -#define BNX2_MISC_PERR_ENA0_RBDC_MISC (1L<<26) -#define BNX2_MISC_PERR_ENA0_RBUF_MISC_MB (1L<<27) -#define BNX2_MISC_PERR_ENA0_RBUF_MISC_PTR (1L<<28) -#define BNX2_MISC_PERR_ENA0_RDE_MISC_RPC (1L<<29) -#define BNX2_MISC_PERR_ENA0_RDE_MISC_RPM (1L<<30) -#define BNX2_MISC_PERR_ENA0_RV2P_MISC_CB0REGS (1L<<31) -#define BNX2_MISC_PERR_ENA0_COM_DMAE_PERR_EN_XI (1L<<0) -#define BNX2_MISC_PERR_ENA0_CP_DMAE_PERR_EN_XI (1L<<1) -#define BNX2_MISC_PERR_ENA0_RPM_ACPIBEMEM_PERR_EN_XI (1L<<2) -#define BNX2_MISC_PERR_ENA0_CTX_USAGE_CNT_PERR_EN_XI (1L<<3) -#define BNX2_MISC_PERR_ENA0_CTX_PGTBL_PERR_EN_XI (1L<<4) -#define BNX2_MISC_PERR_ENA0_CTX_CACHE_PERR_EN_XI (1L<<5) -#define BNX2_MISC_PERR_ENA0_CTX_MIRROR_PERR_EN_XI (1L<<6) -#define BNX2_MISC_PERR_ENA0_COM_CTXC_PERR_EN_XI (1L<<7) -#define BNX2_MISC_PERR_ENA0_COM_SCPAD_PERR_EN_XI (1L<<8) -#define BNX2_MISC_PERR_ENA0_CP_CTXC_PERR_EN_XI (1L<<9) -#define BNX2_MISC_PERR_ENA0_CP_SCPAD_PERR_EN_XI (1L<<10) -#define BNX2_MISC_PERR_ENA0_RXP_RBUFC_PERR_EN_XI (1L<<11) -#define BNX2_MISC_PERR_ENA0_RXP_CTXC_PERR_EN_XI (1L<<12) -#define BNX2_MISC_PERR_ENA0_RXP_SCPAD_PERR_EN_XI (1L<<13) -#define BNX2_MISC_PERR_ENA0_TPAT_SCPAD_PERR_EN_XI (1L<<14) -#define BNX2_MISC_PERR_ENA0_TXP_CTXC_PERR_EN_XI (1L<<15) -#define BNX2_MISC_PERR_ENA0_TXP_SCPAD_PERR_EN_XI (1L<<16) -#define BNX2_MISC_PERR_ENA0_CS_TMEM_PERR_EN_XI (1L<<17) -#define BNX2_MISC_PERR_ENA0_MQ_CTX_PERR_EN_XI (1L<<18) -#define BNX2_MISC_PERR_ENA0_RPM_DFIFOMEM_PERR_EN_XI (1L<<19) -#define BNX2_MISC_PERR_ENA0_RPC_DFIFOMEM_PERR_EN_XI (1L<<20) -#define BNX2_MISC_PERR_ENA0_RBUF_PTRMEM_PERR_EN_XI (1L<<21) -#define BNX2_MISC_PERR_ENA0_RBUF_DATAMEM_PERR_EN_XI (1L<<22) -#define BNX2_MISC_PERR_ENA0_RV2P_P2IRAM_PERR_EN_XI (1L<<23) -#define BNX2_MISC_PERR_ENA0_RV2P_P1IRAM_PERR_EN_XI (1L<<24) -#define BNX2_MISC_PERR_ENA0_RV2P_CB1REGS_PERR_EN_XI (1L<<25) -#define BNX2_MISC_PERR_ENA0_RV2P_CB0REGS_PERR_EN_XI (1L<<26) -#define BNX2_MISC_PERR_ENA0_TPBUF_PERR_EN_XI (1L<<27) -#define BNX2_MISC_PERR_ENA0_THBUF_PERR_EN_XI (1L<<28) -#define BNX2_MISC_PERR_ENA0_TDMA_PERR_EN_XI (1L<<29) -#define BNX2_MISC_PERR_ENA0_TBDC_PERR_EN_XI (1L<<30) -#define BNX2_MISC_PERR_ENA0_TSCH_LR_PERR_EN_XI (1L<<31) - -#define BNX2_MISC_PERR_ENA1 0x000008a8 -#define BNX2_MISC_PERR_ENA1_RV2P_MISC_CB1REGS (1L<<0) -#define BNX2_MISC_PERR_ENA1_RV2P_MISC_P1IRAM (1L<<1) -#define BNX2_MISC_PERR_ENA1_RV2P_MISC_P2IRAM (1L<<2) -#define BNX2_MISC_PERR_ENA1_RXP_MISC_CTXC (1L<<3) -#define BNX2_MISC_PERR_ENA1_RXP_MISC_REGF (1L<<4) -#define BNX2_MISC_PERR_ENA1_RXP_MISC_SCPAD (1L<<5) -#define BNX2_MISC_PERR_ENA1_RXP_MISC_RBUFC (1L<<6) -#define BNX2_MISC_PERR_ENA1_TBDC_MISC (1L<<7) -#define BNX2_MISC_PERR_ENA1_TDMA_MISC (1L<<8) -#define BNX2_MISC_PERR_ENA1_THBUF_MISC_MB0 (1L<<9) -#define BNX2_MISC_PERR_ENA1_THBUF_MISC_MB1 (1L<<10) -#define BNX2_MISC_PERR_ENA1_TPAT_MISC_REGF (1L<<11) -#define BNX2_MISC_PERR_ENA1_TPAT_MISC_SCPAD (1L<<12) -#define BNX2_MISC_PERR_ENA1_TPBUF_MISC_MB (1L<<13) -#define BNX2_MISC_PERR_ENA1_TSCH_MISC_LR (1L<<14) -#define BNX2_MISC_PERR_ENA1_TXP_MISC_CTXC (1L<<15) -#define BNX2_MISC_PERR_ENA1_TXP_MISC_REGF (1L<<16) -#define BNX2_MISC_PERR_ENA1_TXP_MISC_SCPAD (1L<<17) -#define BNX2_MISC_PERR_ENA1_UMP_MISC_FIORX (1L<<18) -#define BNX2_MISC_PERR_ENA1_UMP_MISC_FIOTX (1L<<19) -#define BNX2_MISC_PERR_ENA1_UMP_MISC_RX (1L<<20) -#define BNX2_MISC_PERR_ENA1_UMP_MISC_TX (1L<<21) -#define BNX2_MISC_PERR_ENA1_RDMAQ_MISC (1L<<22) -#define BNX2_MISC_PERR_ENA1_CSQ_MISC (1L<<23) -#define BNX2_MISC_PERR_ENA1_CPQ_MISC (1L<<24) -#define BNX2_MISC_PERR_ENA1_MCPQ_MISC (1L<<25) -#define BNX2_MISC_PERR_ENA1_RV2PMQ_MISC (1L<<26) -#define BNX2_MISC_PERR_ENA1_RV2PPQ_MISC (1L<<27) -#define BNX2_MISC_PERR_ENA1_RV2PTQ_MISC (1L<<28) -#define BNX2_MISC_PERR_ENA1_RXPQ_MISC (1L<<29) -#define BNX2_MISC_PERR_ENA1_RXPCQ_MISC (1L<<30) -#define BNX2_MISC_PERR_ENA1_RLUPQ_MISC (1L<<31) -#define BNX2_MISC_PERR_ENA1_RBDC_PERR_EN_XI (1L<<0) -#define BNX2_MISC_PERR_ENA1_RDMA_DFIFO_PERR_EN_XI (1L<<2) -#define BNX2_MISC_PERR_ENA1_HC_STATS_PERR_EN_XI (1L<<3) -#define BNX2_MISC_PERR_ENA1_HC_MSIX_PERR_EN_XI (1L<<4) -#define BNX2_MISC_PERR_ENA1_HC_PRODUCSTB_PERR_EN_XI (1L<<5) -#define BNX2_MISC_PERR_ENA1_HC_CONSUMSTB_PERR_EN_XI (1L<<6) -#define BNX2_MISC_PERR_ENA1_TPATQ_PERR_EN_XI (1L<<7) -#define BNX2_MISC_PERR_ENA1_MCPQ_PERR_EN_XI (1L<<8) -#define BNX2_MISC_PERR_ENA1_TDMAQ_PERR_EN_XI (1L<<9) -#define BNX2_MISC_PERR_ENA1_TXPQ_PERR_EN_XI (1L<<10) -#define BNX2_MISC_PERR_ENA1_COMTQ_PERR_EN_XI (1L<<11) -#define BNX2_MISC_PERR_ENA1_COMQ_PERR_EN_XI (1L<<12) -#define BNX2_MISC_PERR_ENA1_RLUPQ_PERR_EN_XI (1L<<13) -#define BNX2_MISC_PERR_ENA1_RXPQ_PERR_EN_XI (1L<<14) -#define BNX2_MISC_PERR_ENA1_RV2PPQ_PERR_EN_XI (1L<<15) -#define BNX2_MISC_PERR_ENA1_RDMAQ_PERR_EN_XI (1L<<16) -#define BNX2_MISC_PERR_ENA1_TASQ_PERR_EN_XI (1L<<17) -#define BNX2_MISC_PERR_ENA1_TBDRQ_PERR_EN_XI (1L<<18) -#define BNX2_MISC_PERR_ENA1_TSCHQ_PERR_EN_XI (1L<<19) -#define BNX2_MISC_PERR_ENA1_COMXQ_PERR_EN_XI (1L<<20) -#define BNX2_MISC_PERR_ENA1_RXPCQ_PERR_EN_XI (1L<<21) -#define BNX2_MISC_PERR_ENA1_RV2PTQ_PERR_EN_XI (1L<<22) -#define BNX2_MISC_PERR_ENA1_RV2PMQ_PERR_EN_XI (1L<<23) -#define BNX2_MISC_PERR_ENA1_CPQ_PERR_EN_XI (1L<<24) -#define BNX2_MISC_PERR_ENA1_CSQ_PERR_EN_XI (1L<<25) -#define BNX2_MISC_PERR_ENA1_RLUP_CID_PERR_EN_XI (1L<<26) -#define BNX2_MISC_PERR_ENA1_RV2PCS_TMEM_PERR_EN_XI (1L<<27) -#define BNX2_MISC_PERR_ENA1_RV2PCSQ_PERR_EN_XI (1L<<28) -#define BNX2_MISC_PERR_ENA1_MQ_IDX_PERR_EN_XI (1L<<29) - -#define BNX2_MISC_PERR_ENA2 0x000008ac -#define BNX2_MISC_PERR_ENA2_COMQ_MISC (1L<<0) -#define BNX2_MISC_PERR_ENA2_COMXQ_MISC (1L<<1) -#define BNX2_MISC_PERR_ENA2_COMTQ_MISC (1L<<2) -#define BNX2_MISC_PERR_ENA2_TSCHQ_MISC (1L<<3) -#define BNX2_MISC_PERR_ENA2_TBDRQ_MISC (1L<<4) -#define BNX2_MISC_PERR_ENA2_TXPQ_MISC (1L<<5) -#define BNX2_MISC_PERR_ENA2_TDMAQ_MISC (1L<<6) -#define BNX2_MISC_PERR_ENA2_TPATQ_MISC (1L<<7) -#define BNX2_MISC_PERR_ENA2_TASQ_MISC (1L<<8) -#define BNX2_MISC_PERR_ENA2_TGT_FIFO_PERR_EN_XI (1L<<0) -#define BNX2_MISC_PERR_ENA2_UMP_TX_PERR_EN_XI (1L<<1) -#define BNX2_MISC_PERR_ENA2_UMP_RX_PERR_EN_XI (1L<<2) -#define BNX2_MISC_PERR_ENA2_MCP_ROM_PERR_EN_XI (1L<<3) -#define BNX2_MISC_PERR_ENA2_MCP_SCPAD_PERR_EN_XI (1L<<4) -#define BNX2_MISC_PERR_ENA2_HB_MEM_PERR_EN_XI (1L<<5) -#define BNX2_MISC_PERR_ENA2_PCIE_REPLAY_PERR_EN_XI (1L<<6) - -#define BNX2_MISC_DEBUG_VECTOR_SEL 0x000008b0 -#define BNX2_MISC_DEBUG_VECTOR_SEL_0 (0xfffL<<0) -#define BNX2_MISC_DEBUG_VECTOR_SEL_1 (0xfffL<<12) -#define BNX2_MISC_DEBUG_VECTOR_SEL_1_XI (0xfffL<<15) - -#define BNX2_MISC_VREG_CONTROL 0x000008b4 -#define BNX2_MISC_VREG_CONTROL_1_2 (0xfL<<0) -#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_XI (0xfL<<0) -#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS14_XI (0L<<0) -#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS12_XI (1L<<0) -#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS10_XI (2L<<0) -#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS8_XI (3L<<0) -#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS6_XI (4L<<0) -#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS4_XI (5L<<0) -#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS2_XI (6L<<0) -#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_NOM_XI (7L<<0) -#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS2_XI (8L<<0) -#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS4_XI (9L<<0) -#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS6_XI (10L<<0) -#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS8_XI (11L<<0) -#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS10_XI (12L<<0) -#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS12_XI (13L<<0) -#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS14_XI (14L<<0) -#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS16_XI (15L<<0) -#define BNX2_MISC_VREG_CONTROL_2_5 (0xfL<<4) -#define BNX2_MISC_VREG_CONTROL_2_5_PLUS14 (0L<<4) -#define BNX2_MISC_VREG_CONTROL_2_5_PLUS12 (1L<<4) -#define BNX2_MISC_VREG_CONTROL_2_5_PLUS10 (2L<<4) -#define BNX2_MISC_VREG_CONTROL_2_5_PLUS8 (3L<<4) -#define BNX2_MISC_VREG_CONTROL_2_5_PLUS6 (4L<<4) -#define BNX2_MISC_VREG_CONTROL_2_5_PLUS4 (5L<<4) -#define BNX2_MISC_VREG_CONTROL_2_5_PLUS2 (6L<<4) -#define BNX2_MISC_VREG_CONTROL_2_5_NOM (7L<<4) -#define BNX2_MISC_VREG_CONTROL_2_5_MINUS2 (8L<<4) -#define BNX2_MISC_VREG_CONTROL_2_5_MINUS4 (9L<<4) -#define BNX2_MISC_VREG_CONTROL_2_5_MINUS6 (10L<<4) -#define BNX2_MISC_VREG_CONTROL_2_5_MINUS8 (11L<<4) -#define BNX2_MISC_VREG_CONTROL_2_5_MINUS10 (12L<<4) -#define BNX2_MISC_VREG_CONTROL_2_5_MINUS12 (13L<<4) -#define BNX2_MISC_VREG_CONTROL_2_5_MINUS14 (14L<<4) -#define BNX2_MISC_VREG_CONTROL_2_5_MINUS16 (15L<<4) -#define BNX2_MISC_VREG_CONTROL_1_0_MGMT (0xfL<<8) -#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS14 (0L<<8) -#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS12 (1L<<8) -#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS10 (2L<<8) -#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS8 (3L<<8) -#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS6 (4L<<8) -#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS4 (5L<<8) -#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS2 (6L<<8) -#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_NOM (7L<<8) -#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS2 (8L<<8) -#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS4 (9L<<8) -#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS6 (10L<<8) -#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS8 (11L<<8) -#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS10 (12L<<8) -#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS12 (13L<<8) -#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS14 (14L<<8) -#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS16 (15L<<8) - -#define BNX2_MISC_FINAL_CLK_CTL_VAL 0x000008b8 -#define BNX2_MISC_FINAL_CLK_CTL_VAL_MISC_FINAL_CLK_CTL_VAL (0x3ffffffL<<6) - -#define BNX2_MISC_GP_HW_CTL0 0x000008bc -#define BNX2_MISC_GP_HW_CTL0_TX_DRIVE (1L<<0) -#define BNX2_MISC_GP_HW_CTL0_RMII_MODE (1L<<1) -#define BNX2_MISC_GP_HW_CTL0_RMII_CRSDV_SEL (1L<<2) -#define BNX2_MISC_GP_HW_CTL0_RVMII_MODE (1L<<3) -#define BNX2_MISC_GP_HW_CTL0_FLASH_SAMP_SCLK_NEGEDGE_TE (1L<<4) -#define BNX2_MISC_GP_HW_CTL0_HIDDEN_REVISION_ID_TE (1L<<5) -#define BNX2_MISC_GP_HW_CTL0_HC_CNTL_TMOUT_CTR_RST_TE (1L<<6) -#define BNX2_MISC_GP_HW_CTL0_RESERVED1_XI (0x7L<<4) -#define BNX2_MISC_GP_HW_CTL0_ENA_CORE_RST_ON_MAIN_PWR_GOING_AWAY (1L<<7) -#define BNX2_MISC_GP_HW_CTL0_ENA_SEL_VAUX_B_IN_L2_TE (1L<<8) -#define BNX2_MISC_GP_HW_CTL0_GRC_BNK_FREE_FIX_TE (1L<<9) -#define BNX2_MISC_GP_HW_CTL0_LED_ACT_SEL_TE (1L<<10) -#define BNX2_MISC_GP_HW_CTL0_RESERVED2_XI (0x7L<<8) -#define BNX2_MISC_GP_HW_CTL0_UP1_DEF0 (1L<<11) -#define BNX2_MISC_GP_HW_CTL0_FIBER_MODE_DIS_DEF (1L<<12) -#define BNX2_MISC_GP_HW_CTL0_FORCE2500_DEF (1L<<13) -#define BNX2_MISC_GP_HW_CTL0_AUTODETECT_DIS_DEF (1L<<14) -#define BNX2_MISC_GP_HW_CTL0_PARALLEL_DETECT_DEF (1L<<15) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI (0xfL<<16) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_3MA (0L<<16) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_2P5MA (1L<<16) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_2P0MA (3L<<16) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_1P5MA (5L<<16) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_1P0MA (7L<<16) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_PWRDN (15L<<16) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PRE2DIS (1L<<20) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PRE1DIS (1L<<21) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT (0x3L<<22) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT_M6P (0L<<22) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT_M0P (1L<<22) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT_P0P (2L<<22) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT_P6P (3L<<22) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT (0x3L<<24) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT_M6P (0L<<24) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT_M0P (1L<<24) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT_P0P (2L<<24) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT_P6P (3L<<24) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ (0x3L<<26) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ_240UA (0L<<26) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ_160UA (1L<<26) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ_400UA (2L<<26) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ_320UA (3L<<26) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ (0x3L<<28) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ_240UA (0L<<28) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ_160UA (1L<<28) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ_400UA (2L<<28) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ_320UA (3L<<28) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ (0x3L<<30) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ_1P57 (0L<<30) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ_1P45 (1L<<30) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ_1P62 (2L<<30) -#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ_1P66 (3L<<30) - -#define BNX2_MISC_GP_HW_CTL1 0x000008c0 -#define BNX2_MISC_GP_HW_CTL1_1_ATTN_BTN_PRSNT_TE (1L<<0) -#define BNX2_MISC_GP_HW_CTL1_1_ATTN_IND_PRSNT_TE (1L<<1) -#define BNX2_MISC_GP_HW_CTL1_1_PWR_IND_PRSNT_TE (1L<<2) -#define BNX2_MISC_GP_HW_CTL1_0_PCIE_LOOPBACK_TE (1L<<3) -#define BNX2_MISC_GP_HW_CTL1_RESERVED_SOFT_XI (0xffffL<<0) -#define BNX2_MISC_GP_HW_CTL1_RESERVED_HARD_XI (0xffffL<<16) - -#define BNX2_MISC_NEW_HW_CTL 0x000008c4 -#define BNX2_MISC_NEW_HW_CTL_MAIN_POR_BYPASS (1L<<0) -#define BNX2_MISC_NEW_HW_CTL_RINGOSC_ENABLE (1L<<1) -#define BNX2_MISC_NEW_HW_CTL_RINGOSC_SEL0 (1L<<2) -#define BNX2_MISC_NEW_HW_CTL_RINGOSC_SEL1 (1L<<3) -#define BNX2_MISC_NEW_HW_CTL_RESERVED_SHARED (0xfffL<<4) -#define BNX2_MISC_NEW_HW_CTL_RESERVED_SPLIT (0xffffL<<16) - -#define BNX2_MISC_NEW_CORE_CTL 0x000008c8 -#define BNX2_MISC_NEW_CORE_CTL_LINK_HOLDOFF_SUCCESS (1L<<0) -#define BNX2_MISC_NEW_CORE_CTL_LINK_HOLDOFF_REQ (1L<<1) -#define BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE (1L<<16) -#define BNX2_MISC_NEW_CORE_CTL_RESERVED_CMN (0x3fffL<<2) -#define BNX2_MISC_NEW_CORE_CTL_RESERVED_TC (0xffffL<<16) - -#define BNX2_MISC_ECO_HW_CTL 0x000008cc -#define BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN (1L<<0) -#define BNX2_MISC_ECO_HW_CTL_RESERVED_SOFT (0x7fffL<<1) -#define BNX2_MISC_ECO_HW_CTL_RESERVED_HARD (0xffffL<<16) - -#define BNX2_MISC_ECO_CORE_CTL 0x000008d0 -#define BNX2_MISC_ECO_CORE_CTL_RESERVED_SOFT (0xffffL<<0) -#define BNX2_MISC_ECO_CORE_CTL_RESERVED_HARD (0xffffL<<16) - -#define BNX2_MISC_PPIO 0x000008d4 -#define BNX2_MISC_PPIO_VALUE (0xfL<<0) -#define BNX2_MISC_PPIO_SET (0xfL<<8) -#define BNX2_MISC_PPIO_CLR (0xfL<<16) -#define BNX2_MISC_PPIO_FLOAT (0xfL<<24) - -#define BNX2_MISC_PPIO_INT 0x000008d8 -#define BNX2_MISC_PPIO_INT_INT_STATE (0xfL<<0) -#define BNX2_MISC_PPIO_INT_OLD_VALUE (0xfL<<8) -#define BNX2_MISC_PPIO_INT_OLD_SET (0xfL<<16) -#define BNX2_MISC_PPIO_INT_OLD_CLR (0xfL<<24) - -#define BNX2_MISC_RESET_NUMS 0x000008dc -#define BNX2_MISC_RESET_NUMS_NUM_HARD_RESETS (0x7L<<0) -#define BNX2_MISC_RESET_NUMS_NUM_PCIE_RESETS (0x7L<<4) -#define BNX2_MISC_RESET_NUMS_NUM_PERSTB_RESETS (0x7L<<8) -#define BNX2_MISC_RESET_NUMS_NUM_CMN_RESETS (0x7L<<12) -#define BNX2_MISC_RESET_NUMS_NUM_PORT_RESETS (0x7L<<16) - -#define BNX2_MISC_CS16_ERR 0x000008e0 -#define BNX2_MISC_CS16_ERR_ENA_PCI (1L<<0) -#define BNX2_MISC_CS16_ERR_ENA_RDMA (1L<<1) -#define BNX2_MISC_CS16_ERR_ENA_TDMA (1L<<2) -#define BNX2_MISC_CS16_ERR_ENA_EMAC (1L<<3) -#define BNX2_MISC_CS16_ERR_ENA_CTX (1L<<4) -#define BNX2_MISC_CS16_ERR_ENA_TBDR (1L<<5) -#define BNX2_MISC_CS16_ERR_ENA_RBDC (1L<<6) -#define BNX2_MISC_CS16_ERR_ENA_COM (1L<<7) -#define BNX2_MISC_CS16_ERR_ENA_CP (1L<<8) -#define BNX2_MISC_CS16_ERR_STA_PCI (1L<<16) -#define BNX2_MISC_CS16_ERR_STA_RDMA (1L<<17) -#define BNX2_MISC_CS16_ERR_STA_TDMA (1L<<18) -#define BNX2_MISC_CS16_ERR_STA_EMAC (1L<<19) -#define BNX2_MISC_CS16_ERR_STA_CTX (1L<<20) -#define BNX2_MISC_CS16_ERR_STA_TBDR (1L<<21) -#define BNX2_MISC_CS16_ERR_STA_RBDC (1L<<22) -#define BNX2_MISC_CS16_ERR_STA_COM (1L<<23) -#define BNX2_MISC_CS16_ERR_STA_CP (1L<<24) - -#define BNX2_MISC_SPIO_EVENT 0x000008e4 -#define BNX2_MISC_SPIO_EVENT_ENABLE (0xffL<<0) - -#define BNX2_MISC_PPIO_EVENT 0x000008e8 -#define BNX2_MISC_PPIO_EVENT_ENABLE (0xfL<<0) - -#define BNX2_MISC_DUAL_MEDIA_CTRL 0x000008ec -#define BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID (0xffL<<0) -#define BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_X (0L<<0) -#define BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C (3L<<0) -#define BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S (12L<<0) -#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP (0x7L<<8) -#define BNX2_MISC_DUAL_MEDIA_CTRL_PORT_SWAP_PIN (1L<<11) -#define BNX2_MISC_DUAL_MEDIA_CTRL_SERDES1_SIGDET (1L<<12) -#define BNX2_MISC_DUAL_MEDIA_CTRL_SERDES0_SIGDET (1L<<13) -#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY1_SIGDET (1L<<14) -#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY0_SIGDET (1L<<15) -#define BNX2_MISC_DUAL_MEDIA_CTRL_LCPLL_RST (1L<<16) -#define BNX2_MISC_DUAL_MEDIA_CTRL_SERDES1_RST (1L<<17) -#define BNX2_MISC_DUAL_MEDIA_CTRL_SERDES0_RST (1L<<18) -#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY1_RST (1L<<19) -#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY0_RST (1L<<20) -#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL (0x7L<<21) -#define BNX2_MISC_DUAL_MEDIA_CTRL_PORT_SWAP (1L<<24) -#define BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE (1L<<25) -#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ (0xfL<<26) -#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ_SER1_IDDQ (1L<<26) -#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ_SER0_IDDQ (2L<<26) -#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ_PHY1_IDDQ (4L<<26) -#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ_PHY0_IDDQ (8L<<26) - -#define BNX2_MISC_OTP_CMD1 0x000008f0 -#define BNX2_MISC_OTP_CMD1_FMODE (0x7L<<0) -#define BNX2_MISC_OTP_CMD1_FMODE_IDLE (0L<<0) -#define BNX2_MISC_OTP_CMD1_FMODE_WRITE (1L<<0) -#define BNX2_MISC_OTP_CMD1_FMODE_INIT (2L<<0) -#define BNX2_MISC_OTP_CMD1_FMODE_SET (3L<<0) -#define BNX2_MISC_OTP_CMD1_FMODE_RST (4L<<0) -#define BNX2_MISC_OTP_CMD1_FMODE_VERIFY (5L<<0) -#define BNX2_MISC_OTP_CMD1_FMODE_RESERVED0 (6L<<0) -#define BNX2_MISC_OTP_CMD1_FMODE_RESERVED1 (7L<<0) -#define BNX2_MISC_OTP_CMD1_USEPINS (1L<<8) -#define BNX2_MISC_OTP_CMD1_PROGSEL (1L<<9) -#define BNX2_MISC_OTP_CMD1_PROGSTART (1L<<10) -#define BNX2_MISC_OTP_CMD1_PCOUNT (0x7L<<16) -#define BNX2_MISC_OTP_CMD1_PBYP (1L<<19) -#define BNX2_MISC_OTP_CMD1_VSEL (0xfL<<20) -#define BNX2_MISC_OTP_CMD1_TM (0x7L<<27) -#define BNX2_MISC_OTP_CMD1_SADBYP (1L<<30) -#define BNX2_MISC_OTP_CMD1_DEBUG (1L<<31) - -#define BNX2_MISC_OTP_CMD2 0x000008f4 -#define BNX2_MISC_OTP_CMD2_OTP_ROM_ADDR (0x3ffL<<0) -#define BNX2_MISC_OTP_CMD2_DOSEL (0x7fL<<16) -#define BNX2_MISC_OTP_CMD2_DOSEL_0 (0L<<16) -#define BNX2_MISC_OTP_CMD2_DOSEL_1 (1L<<16) -#define BNX2_MISC_OTP_CMD2_DOSEL_127 (127L<<16) - -#define BNX2_MISC_OTP_STATUS 0x000008f8 -#define BNX2_MISC_OTP_STATUS_DATA (0xffL<<0) -#define BNX2_MISC_OTP_STATUS_VALID (1L<<8) -#define BNX2_MISC_OTP_STATUS_BUSY (1L<<9) -#define BNX2_MISC_OTP_STATUS_BUSYSM (1L<<10) -#define BNX2_MISC_OTP_STATUS_DONE (1L<<11) - -#define BNX2_MISC_OTP_SHIFT1_CMD 0x000008fc -#define BNX2_MISC_OTP_SHIFT1_CMD_RESET_MODE_N (1L<<0) -#define BNX2_MISC_OTP_SHIFT1_CMD_SHIFT_DONE (1L<<1) -#define BNX2_MISC_OTP_SHIFT1_CMD_SHIFT_START (1L<<2) -#define BNX2_MISC_OTP_SHIFT1_CMD_LOAD_DATA (1L<<3) -#define BNX2_MISC_OTP_SHIFT1_CMD_SHIFT_SELECT (0x1fL<<8) - -#define BNX2_MISC_OTP_SHIFT1_DATA 0x00000900 -#define BNX2_MISC_OTP_SHIFT2_CMD 0x00000904 -#define BNX2_MISC_OTP_SHIFT2_CMD_RESET_MODE_N (1L<<0) -#define BNX2_MISC_OTP_SHIFT2_CMD_SHIFT_DONE (1L<<1) -#define BNX2_MISC_OTP_SHIFT2_CMD_SHIFT_START (1L<<2) -#define BNX2_MISC_OTP_SHIFT2_CMD_LOAD_DATA (1L<<3) -#define BNX2_MISC_OTP_SHIFT2_CMD_SHIFT_SELECT (0x1fL<<8) - -#define BNX2_MISC_OTP_SHIFT2_DATA 0x00000908 -#define BNX2_MISC_BIST_CS0 0x0000090c -#define BNX2_MISC_BIST_CS0_MBIST_EN (1L<<0) -#define BNX2_MISC_BIST_CS0_BIST_SETUP (0x3L<<1) -#define BNX2_MISC_BIST_CS0_MBIST_ASYNC_RESET (1L<<3) -#define BNX2_MISC_BIST_CS0_MBIST_DONE (1L<<8) -#define BNX2_MISC_BIST_CS0_MBIST_GO (1L<<9) -#define BNX2_MISC_BIST_CS0_BIST_OVERRIDE (1L<<31) - -#define BNX2_MISC_BIST_MEMSTATUS0 0x00000910 -#define BNX2_MISC_BIST_CS1 0x00000914 -#define BNX2_MISC_BIST_CS1_MBIST_EN (1L<<0) -#define BNX2_MISC_BIST_CS1_BIST_SETUP (0x3L<<1) -#define BNX2_MISC_BIST_CS1_MBIST_ASYNC_RESET (1L<<3) -#define BNX2_MISC_BIST_CS1_MBIST_DONE (1L<<8) -#define BNX2_MISC_BIST_CS1_MBIST_GO (1L<<9) - -#define BNX2_MISC_BIST_MEMSTATUS1 0x00000918 -#define BNX2_MISC_BIST_CS2 0x0000091c -#define BNX2_MISC_BIST_CS2_MBIST_EN (1L<<0) -#define BNX2_MISC_BIST_CS2_BIST_SETUP (0x3L<<1) -#define BNX2_MISC_BIST_CS2_MBIST_ASYNC_RESET (1L<<3) -#define BNX2_MISC_BIST_CS2_MBIST_DONE (1L<<8) -#define BNX2_MISC_BIST_CS2_MBIST_GO (1L<<9) - -#define BNX2_MISC_BIST_MEMSTATUS2 0x00000920 -#define BNX2_MISC_BIST_CS3 0x00000924 -#define BNX2_MISC_BIST_CS3_MBIST_EN (1L<<0) -#define BNX2_MISC_BIST_CS3_BIST_SETUP (0x3L<<1) -#define BNX2_MISC_BIST_CS3_MBIST_ASYNC_RESET (1L<<3) -#define BNX2_MISC_BIST_CS3_MBIST_DONE (1L<<8) -#define BNX2_MISC_BIST_CS3_MBIST_GO (1L<<9) - -#define BNX2_MISC_BIST_MEMSTATUS3 0x00000928 -#define BNX2_MISC_BIST_CS4 0x0000092c -#define BNX2_MISC_BIST_CS4_MBIST_EN (1L<<0) -#define BNX2_MISC_BIST_CS4_BIST_SETUP (0x3L<<1) -#define BNX2_MISC_BIST_CS4_MBIST_ASYNC_RESET (1L<<3) -#define BNX2_MISC_BIST_CS4_MBIST_DONE (1L<<8) -#define BNX2_MISC_BIST_CS4_MBIST_GO (1L<<9) - -#define BNX2_MISC_BIST_MEMSTATUS4 0x00000930 -#define BNX2_MISC_BIST_CS5 0x00000934 -#define BNX2_MISC_BIST_CS5_MBIST_EN (1L<<0) -#define BNX2_MISC_BIST_CS5_BIST_SETUP (0x3L<<1) -#define BNX2_MISC_BIST_CS5_MBIST_ASYNC_RESET (1L<<3) -#define BNX2_MISC_BIST_CS5_MBIST_DONE (1L<<8) -#define BNX2_MISC_BIST_CS5_MBIST_GO (1L<<9) - -#define BNX2_MISC_BIST_MEMSTATUS5 0x00000938 -#define BNX2_MISC_MEM_TM0 0x0000093c -#define BNX2_MISC_MEM_TM0_PCIE_REPLAY_TM (0xfL<<0) -#define BNX2_MISC_MEM_TM0_MCP_SCPAD (0xfL<<8) -#define BNX2_MISC_MEM_TM0_UMP_TM (0xffL<<16) -#define BNX2_MISC_MEM_TM0_HB_MEM_TM (0xfL<<24) - -#define BNX2_MISC_USPLL_CTRL 0x00000940 -#define BNX2_MISC_USPLL_CTRL_PH_DET_DIS (1L<<0) -#define BNX2_MISC_USPLL_CTRL_FREQ_DET_DIS (1L<<1) -#define BNX2_MISC_USPLL_CTRL_LCPX (0x3fL<<2) -#define BNX2_MISC_USPLL_CTRL_RX (0x3L<<8) -#define BNX2_MISC_USPLL_CTRL_VC_EN (1L<<10) -#define BNX2_MISC_USPLL_CTRL_VCO_MG (0x3L<<11) -#define BNX2_MISC_USPLL_CTRL_KVCO_XF (0x7L<<13) -#define BNX2_MISC_USPLL_CTRL_KVCO_XS (0x7L<<16) -#define BNX2_MISC_USPLL_CTRL_TESTD_EN (1L<<19) -#define BNX2_MISC_USPLL_CTRL_TESTD_SEL (0x7L<<20) -#define BNX2_MISC_USPLL_CTRL_TESTA_EN (1L<<23) -#define BNX2_MISC_USPLL_CTRL_TESTA_SEL (0x3L<<24) -#define BNX2_MISC_USPLL_CTRL_ATTEN_FREF (1L<<26) -#define BNX2_MISC_USPLL_CTRL_DIGITAL_RST (1L<<27) -#define BNX2_MISC_USPLL_CTRL_ANALOG_RST (1L<<28) -#define BNX2_MISC_USPLL_CTRL_LOCK (1L<<29) - -#define BNX2_MISC_PERR_STATUS0 0x00000944 -#define BNX2_MISC_PERR_STATUS0_COM_DMAE_PERR (1L<<0) -#define BNX2_MISC_PERR_STATUS0_CP_DMAE_PERR (1L<<1) -#define BNX2_MISC_PERR_STATUS0_RPM_ACPIBEMEM_PERR (1L<<2) -#define BNX2_MISC_PERR_STATUS0_CTX_USAGE_CNT_PERR (1L<<3) -#define BNX2_MISC_PERR_STATUS0_CTX_PGTBL_PERR (1L<<4) -#define BNX2_MISC_PERR_STATUS0_CTX_CACHE_PERR (1L<<5) -#define BNX2_MISC_PERR_STATUS0_CTX_MIRROR_PERR (1L<<6) -#define BNX2_MISC_PERR_STATUS0_COM_CTXC_PERR (1L<<7) -#define BNX2_MISC_PERR_STATUS0_COM_SCPAD_PERR (1L<<8) -#define BNX2_MISC_PERR_STATUS0_CP_CTXC_PERR (1L<<9) -#define BNX2_MISC_PERR_STATUS0_CP_SCPAD_PERR (1L<<10) -#define BNX2_MISC_PERR_STATUS0_RXP_RBUFC_PERR (1L<<11) -#define BNX2_MISC_PERR_STATUS0_RXP_CTXC_PERR (1L<<12) -#define BNX2_MISC_PERR_STATUS0_RXP_SCPAD_PERR (1L<<13) -#define BNX2_MISC_PERR_STATUS0_TPAT_SCPAD_PERR (1L<<14) -#define BNX2_MISC_PERR_STATUS0_TXP_CTXC_PERR (1L<<15) -#define BNX2_MISC_PERR_STATUS0_TXP_SCPAD_PERR (1L<<16) -#define BNX2_MISC_PERR_STATUS0_CS_TMEM_PERR (1L<<17) -#define BNX2_MISC_PERR_STATUS0_MQ_CTX_PERR (1L<<18) -#define BNX2_MISC_PERR_STATUS0_RPM_DFIFOMEM_PERR (1L<<19) -#define BNX2_MISC_PERR_STATUS0_RPC_DFIFOMEM_PERR (1L<<20) -#define BNX2_MISC_PERR_STATUS0_RBUF_PTRMEM_PERR (1L<<21) -#define BNX2_MISC_PERR_STATUS0_RBUF_DATAMEM_PERR (1L<<22) -#define BNX2_MISC_PERR_STATUS0_RV2P_P2IRAM_PERR (1L<<23) -#define BNX2_MISC_PERR_STATUS0_RV2P_P1IRAM_PERR (1L<<24) -#define BNX2_MISC_PERR_STATUS0_RV2P_CB1REGS_PERR (1L<<25) -#define BNX2_MISC_PERR_STATUS0_RV2P_CB0REGS_PERR (1L<<26) -#define BNX2_MISC_PERR_STATUS0_TPBUF_PERR (1L<<27) -#define BNX2_MISC_PERR_STATUS0_THBUF_PERR (1L<<28) -#define BNX2_MISC_PERR_STATUS0_TDMA_PERR (1L<<29) -#define BNX2_MISC_PERR_STATUS0_TBDC_PERR (1L<<30) -#define BNX2_MISC_PERR_STATUS0_TSCH_LR_PERR (1L<<31) - -#define BNX2_MISC_PERR_STATUS1 0x00000948 -#define BNX2_MISC_PERR_STATUS1_RBDC_PERR (1L<<0) -#define BNX2_MISC_PERR_STATUS1_RDMA_DFIFO_PERR (1L<<2) -#define BNX2_MISC_PERR_STATUS1_HC_STATS_PERR (1L<<3) -#define BNX2_MISC_PERR_STATUS1_HC_MSIX_PERR (1L<<4) -#define BNX2_MISC_PERR_STATUS1_HC_PRODUCSTB_PERR (1L<<5) -#define BNX2_MISC_PERR_STATUS1_HC_CONSUMSTB_PERR (1L<<6) -#define BNX2_MISC_PERR_STATUS1_TPATQ_PERR (1L<<7) -#define BNX2_MISC_PERR_STATUS1_MCPQ_PERR (1L<<8) -#define BNX2_MISC_PERR_STATUS1_TDMAQ_PERR (1L<<9) -#define BNX2_MISC_PERR_STATUS1_TXPQ_PERR (1L<<10) -#define BNX2_MISC_PERR_STATUS1_COMTQ_PERR (1L<<11) -#define BNX2_MISC_PERR_STATUS1_COMQ_PERR (1L<<12) -#define BNX2_MISC_PERR_STATUS1_RLUPQ_PERR (1L<<13) -#define BNX2_MISC_PERR_STATUS1_RXPQ_PERR (1L<<14) -#define BNX2_MISC_PERR_STATUS1_RV2PPQ_PERR (1L<<15) -#define BNX2_MISC_PERR_STATUS1_RDMAQ_PERR (1L<<16) -#define BNX2_MISC_PERR_STATUS1_TASQ_PERR (1L<<17) -#define BNX2_MISC_PERR_STATUS1_TBDRQ_PERR (1L<<18) -#define BNX2_MISC_PERR_STATUS1_TSCHQ_PERR (1L<<19) -#define BNX2_MISC_PERR_STATUS1_COMXQ_PERR (1L<<20) -#define BNX2_MISC_PERR_STATUS1_RXPCQ_PERR (1L<<21) -#define BNX2_MISC_PERR_STATUS1_RV2PTQ_PERR (1L<<22) -#define BNX2_MISC_PERR_STATUS1_RV2PMQ_PERR (1L<<23) -#define BNX2_MISC_PERR_STATUS1_CPQ_PERR (1L<<24) -#define BNX2_MISC_PERR_STATUS1_CSQ_PERR (1L<<25) -#define BNX2_MISC_PERR_STATUS1_RLUP_CID_PERR (1L<<26) -#define BNX2_MISC_PERR_STATUS1_RV2PCS_TMEM_PERR (1L<<27) -#define BNX2_MISC_PERR_STATUS1_RV2PCSQ_PERR (1L<<28) -#define BNX2_MISC_PERR_STATUS1_MQ_IDX_PERR (1L<<29) - -#define BNX2_MISC_PERR_STATUS2 0x0000094c -#define BNX2_MISC_PERR_STATUS2_TGT_FIFO_PERR (1L<<0) -#define BNX2_MISC_PERR_STATUS2_UMP_TX_PERR (1L<<1) -#define BNX2_MISC_PERR_STATUS2_UMP_RX_PERR (1L<<2) -#define BNX2_MISC_PERR_STATUS2_MCP_ROM_PERR (1L<<3) -#define BNX2_MISC_PERR_STATUS2_MCP_SCPAD_PERR (1L<<4) -#define BNX2_MISC_PERR_STATUS2_HB_MEM_PERR (1L<<5) -#define BNX2_MISC_PERR_STATUS2_PCIE_REPLAY_PERR (1L<<6) - -#define BNX2_MISC_LCPLL_CTRL0 0x00000950 -#define BNX2_MISC_LCPLL_CTRL0_OAC (0x7L<<0) -#define BNX2_MISC_LCPLL_CTRL0_OAC_NEGTWENTY (0L<<0) -#define BNX2_MISC_LCPLL_CTRL0_OAC_ZERO (1L<<0) -#define BNX2_MISC_LCPLL_CTRL0_OAC_TWENTY (3L<<0) -#define BNX2_MISC_LCPLL_CTRL0_OAC_FORTY (7L<<0) -#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL (0x7L<<3) -#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL_360 (0L<<3) -#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL_480 (1L<<3) -#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL_600 (3L<<3) -#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL_720 (7L<<3) -#define BNX2_MISC_LCPLL_CTRL0_BIAS_CTRL (0x3L<<6) -#define BNX2_MISC_LCPLL_CTRL0_PLL_OBSERVE (0x7L<<8) -#define BNX2_MISC_LCPLL_CTRL0_VTH_CTRL (0x3L<<11) -#define BNX2_MISC_LCPLL_CTRL0_VTH_CTRL_0 (0L<<11) -#define BNX2_MISC_LCPLL_CTRL0_VTH_CTRL_1 (1L<<11) -#define BNX2_MISC_LCPLL_CTRL0_VTH_CTRL_2 (2L<<11) -#define BNX2_MISC_LCPLL_CTRL0_PLLSEQSTART (1L<<13) -#define BNX2_MISC_LCPLL_CTRL0_RESERVED (1L<<14) -#define BNX2_MISC_LCPLL_CTRL0_CAPRETRY_EN (1L<<15) -#define BNX2_MISC_LCPLL_CTRL0_FREQMONITOR_EN (1L<<16) -#define BNX2_MISC_LCPLL_CTRL0_FREQDETRESTART_EN (1L<<17) -#define BNX2_MISC_LCPLL_CTRL0_FREQDETRETRY_EN (1L<<18) -#define BNX2_MISC_LCPLL_CTRL0_PLLFORCEFDONE_EN (1L<<19) -#define BNX2_MISC_LCPLL_CTRL0_PLLFORCEFDONE (1L<<20) -#define BNX2_MISC_LCPLL_CTRL0_PLLFORCEFPASS (1L<<21) -#define BNX2_MISC_LCPLL_CTRL0_PLLFORCECAPDONE_EN (1L<<22) -#define BNX2_MISC_LCPLL_CTRL0_PLLFORCECAPDONE (1L<<23) -#define BNX2_MISC_LCPLL_CTRL0_PLLFORCECAPPASS_EN (1L<<24) -#define BNX2_MISC_LCPLL_CTRL0_PLLFORCECAPPASS (1L<<25) -#define BNX2_MISC_LCPLL_CTRL0_CAPRESTART (1L<<26) -#define BNX2_MISC_LCPLL_CTRL0_CAPSELECTM_EN (1L<<27) - -#define BNX2_MISC_LCPLL_CTRL1 0x00000954 -#define BNX2_MISC_LCPLL_CTRL1_CAPSELECTM (0x1fL<<0) -#define BNX2_MISC_LCPLL_CTRL1_CAPFORCESLOWDOWN_EN (1L<<5) -#define BNX2_MISC_LCPLL_CTRL1_CAPFORCESLOWDOWN (1L<<6) -#define BNX2_MISC_LCPLL_CTRL1_SLOWDN_XOR (1L<<7) - -#define BNX2_MISC_LCPLL_STATUS 0x00000958 -#define BNX2_MISC_LCPLL_STATUS_FREQDONE_SM (1L<<0) -#define BNX2_MISC_LCPLL_STATUS_FREQPASS_SM (1L<<1) -#define BNX2_MISC_LCPLL_STATUS_PLLSEQDONE (1L<<2) -#define BNX2_MISC_LCPLL_STATUS_PLLSEQPASS (1L<<3) -#define BNX2_MISC_LCPLL_STATUS_PLLSTATE (0x7L<<4) -#define BNX2_MISC_LCPLL_STATUS_CAPSTATE (0x7L<<7) -#define BNX2_MISC_LCPLL_STATUS_CAPSELECT (0x1fL<<10) -#define BNX2_MISC_LCPLL_STATUS_SLOWDN_INDICATOR (1L<<15) -#define BNX2_MISC_LCPLL_STATUS_SLOWDN_INDICATOR_0 (0L<<15) -#define BNX2_MISC_LCPLL_STATUS_SLOWDN_INDICATOR_1 (1L<<15) - -#define BNX2_MISC_OSCFUNDS_CTRL 0x0000095c -#define BNX2_MISC_OSCFUNDS_CTRL_FREQ_MON (1L<<5) -#define BNX2_MISC_OSCFUNDS_CTRL_FREQ_MON_OFF (0L<<5) -#define BNX2_MISC_OSCFUNDS_CTRL_FREQ_MON_ON (1L<<5) -#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM (0x3L<<6) -#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM_0 (0L<<6) -#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM_1 (1L<<6) -#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM_2 (2L<<6) -#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM_3 (3L<<6) -#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ (0x3L<<8) -#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ_0 (0L<<8) -#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ_1 (1L<<8) -#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ_2 (2L<<8) -#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ_3 (3L<<8) -#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ (0x3L<<10) -#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ_0 (0L<<10) -#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ_1 (1L<<10) -#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ_2 (2L<<10) -#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ_3 (3L<<10) - - -/* - * nvm_reg definition - * offset: 0x6400 - */ -#define BNX2_NVM_COMMAND 0x00006400 -#define BNX2_NVM_COMMAND_RST (1L<<0) -#define BNX2_NVM_COMMAND_DONE (1L<<3) -#define BNX2_NVM_COMMAND_DOIT (1L<<4) -#define BNX2_NVM_COMMAND_WR (1L<<5) -#define BNX2_NVM_COMMAND_ERASE (1L<<6) -#define BNX2_NVM_COMMAND_FIRST (1L<<7) -#define BNX2_NVM_COMMAND_LAST (1L<<8) -#define BNX2_NVM_COMMAND_WREN (1L<<16) -#define BNX2_NVM_COMMAND_WRDI (1L<<17) -#define BNX2_NVM_COMMAND_EWSR (1L<<18) -#define BNX2_NVM_COMMAND_WRSR (1L<<19) -#define BNX2_NVM_COMMAND_RD_ID (1L<<20) -#define BNX2_NVM_COMMAND_RD_STATUS (1L<<21) -#define BNX2_NVM_COMMAND_MODE_256 (1L<<22) - -#define BNX2_NVM_STATUS 0x00006404 -#define BNX2_NVM_STATUS_PI_FSM_STATE (0xfL<<0) -#define BNX2_NVM_STATUS_EE_FSM_STATE (0xfL<<4) -#define BNX2_NVM_STATUS_EQ_FSM_STATE (0xfL<<8) -#define BNX2_NVM_STATUS_SPI_FSM_STATE_XI (0x1fL<<0) -#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_IDLE_XI (0L<<0) -#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CMD0_XI (1L<<0) -#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CMD1_XI (2L<<0) -#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CMD_FINISH0_XI (3L<<0) -#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CMD_FINISH1_XI (4L<<0) -#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_ADDR0_XI (5L<<0) -#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_WRITE_DATA0_XI (6L<<0) -#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_WRITE_DATA1_XI (7L<<0) -#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_WRITE_DATA2_XI (8L<<0) -#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_DATA0_XI (9L<<0) -#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_DATA1_XI (10L<<0) -#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_DATA2_XI (11L<<0) -#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID0_XI (12L<<0) -#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID1_XI (13L<<0) -#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID2_XI (14L<<0) -#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID3_XI (15L<<0) -#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID4_XI (16L<<0) -#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CHECK_BUSY0_XI (17L<<0) -#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_ST_WREN_XI (18L<<0) -#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_WAIT_XI (19L<<0) - -#define BNX2_NVM_WRITE 0x00006408 -#define BNX2_NVM_WRITE_NVM_WRITE_VALUE (0xffffffffL<<0) -#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_BIT_BANG (0L<<0) -#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_EECLK (1L<<0) -#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_EEDATA (2L<<0) -#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SCLK (4L<<0) -#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_CS_B (8L<<0) -#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SO (16L<<0) -#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SI (32L<<0) -#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SI_XI (1L<<0) -#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SO_XI (2L<<0) -#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_CS_B_XI (4L<<0) -#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SCLK_XI (8L<<0) - -#define BNX2_NVM_ADDR 0x0000640c -#define BNX2_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0) -#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_BIT_BANG (0L<<0) -#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_EECLK (1L<<0) -#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_EEDATA (2L<<0) -#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SCLK (4L<<0) -#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_CS_B (8L<<0) -#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SO (16L<<0) -#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SI (32L<<0) -#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SI_XI (1L<<0) -#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SO_XI (2L<<0) -#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_CS_B_XI (4L<<0) -#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SCLK_XI (8L<<0) - -#define BNX2_NVM_READ 0x00006410 -#define BNX2_NVM_READ_NVM_READ_VALUE (0xffffffffL<<0) -#define BNX2_NVM_READ_NVM_READ_VALUE_BIT_BANG (0L<<0) -#define BNX2_NVM_READ_NVM_READ_VALUE_EECLK (1L<<0) -#define BNX2_NVM_READ_NVM_READ_VALUE_EEDATA (2L<<0) -#define BNX2_NVM_READ_NVM_READ_VALUE_SCLK (4L<<0) -#define BNX2_NVM_READ_NVM_READ_VALUE_CS_B (8L<<0) -#define BNX2_NVM_READ_NVM_READ_VALUE_SO (16L<<0) -#define BNX2_NVM_READ_NVM_READ_VALUE_SI (32L<<0) -#define BNX2_NVM_READ_NVM_READ_VALUE_SI_XI (1L<<0) -#define BNX2_NVM_READ_NVM_READ_VALUE_SO_XI (2L<<0) -#define BNX2_NVM_READ_NVM_READ_VALUE_CS_B_XI (4L<<0) -#define BNX2_NVM_READ_NVM_READ_VALUE_SCLK_XI (8L<<0) - -#define BNX2_NVM_CFG1 0x00006414 -#define BNX2_NVM_CFG1_FLASH_MODE (1L<<0) -#define BNX2_NVM_CFG1_BUFFER_MODE (1L<<1) -#define BNX2_NVM_CFG1_PASS_MODE (1L<<2) -#define BNX2_NVM_CFG1_BITBANG_MODE (1L<<3) -#define BNX2_NVM_CFG1_STATUS_BIT (0x7L<<4) -#define BNX2_NVM_CFG1_STATUS_BIT_FLASH_RDY (0L<<4) -#define BNX2_NVM_CFG1_STATUS_BIT_BUFFER_RDY (7L<<4) -#define BNX2_NVM_CFG1_SPI_CLK_DIV (0xfL<<7) -#define BNX2_NVM_CFG1_SEE_CLK_DIV (0x7ffL<<11) -#define BNX2_NVM_CFG1_STRAP_CONTROL_0 (1L<<23) -#define BNX2_NVM_CFG1_PROTECT_MODE (1L<<24) -#define BNX2_NVM_CFG1_FLASH_SIZE (1L<<25) -#define BNX2_NVM_CFG1_FW_USTRAP_1 (1L<<26) -#define BNX2_NVM_CFG1_FW_USTRAP_0 (1L<<27) -#define BNX2_NVM_CFG1_FW_USTRAP_2 (1L<<28) -#define BNX2_NVM_CFG1_FW_USTRAP_3 (1L<<29) -#define BNX2_NVM_CFG1_FW_FLASH_TYPE_EN (1L<<30) -#define BNX2_NVM_CFG1_COMPAT_BYPASSS (1L<<31) - -#define BNX2_NVM_CFG2 0x00006418 -#define BNX2_NVM_CFG2_ERASE_CMD (0xffL<<0) -#define BNX2_NVM_CFG2_DUMMY (0xffL<<8) -#define BNX2_NVM_CFG2_STATUS_CMD (0xffL<<16) -#define BNX2_NVM_CFG2_READ_ID (0xffL<<24) - -#define BNX2_NVM_CFG3 0x0000641c -#define BNX2_NVM_CFG3_BUFFER_RD_CMD (0xffL<<0) -#define BNX2_NVM_CFG3_WRITE_CMD (0xffL<<8) -#define BNX2_NVM_CFG3_BUFFER_WRITE_CMD (0xffL<<16) -#define BNX2_NVM_CFG3_READ_CMD (0xffL<<24) - -#define BNX2_NVM_SW_ARB 0x00006420 -#define BNX2_NVM_SW_ARB_ARB_REQ_SET0 (1L<<0) -#define BNX2_NVM_SW_ARB_ARB_REQ_SET1 (1L<<1) -#define BNX2_NVM_SW_ARB_ARB_REQ_SET2 (1L<<2) -#define BNX2_NVM_SW_ARB_ARB_REQ_SET3 (1L<<3) -#define BNX2_NVM_SW_ARB_ARB_REQ_CLR0 (1L<<4) -#define BNX2_NVM_SW_ARB_ARB_REQ_CLR1 (1L<<5) -#define BNX2_NVM_SW_ARB_ARB_REQ_CLR2 (1L<<6) -#define BNX2_NVM_SW_ARB_ARB_REQ_CLR3 (1L<<7) -#define BNX2_NVM_SW_ARB_ARB_ARB0 (1L<<8) -#define BNX2_NVM_SW_ARB_ARB_ARB1 (1L<<9) -#define BNX2_NVM_SW_ARB_ARB_ARB2 (1L<<10) -#define BNX2_NVM_SW_ARB_ARB_ARB3 (1L<<11) -#define BNX2_NVM_SW_ARB_REQ0 (1L<<12) -#define BNX2_NVM_SW_ARB_REQ1 (1L<<13) -#define BNX2_NVM_SW_ARB_REQ2 (1L<<14) -#define BNX2_NVM_SW_ARB_REQ3 (1L<<15) - -#define BNX2_NVM_ACCESS_ENABLE 0x00006424 -#define BNX2_NVM_ACCESS_ENABLE_EN (1L<<0) -#define BNX2_NVM_ACCESS_ENABLE_WR_EN (1L<<1) - -#define BNX2_NVM_WRITE1 0x00006428 -#define BNX2_NVM_WRITE1_WREN_CMD (0xffL<<0) -#define BNX2_NVM_WRITE1_WRDI_CMD (0xffL<<8) -#define BNX2_NVM_WRITE1_SR_DATA (0xffL<<16) - -#define BNX2_NVM_CFG4 0x0000642c -#define BNX2_NVM_CFG4_FLASH_SIZE (0x7L<<0) -#define BNX2_NVM_CFG4_FLASH_SIZE_1MBIT (0L<<0) -#define BNX2_NVM_CFG4_FLASH_SIZE_2MBIT (1L<<0) -#define BNX2_NVM_CFG4_FLASH_SIZE_4MBIT (2L<<0) -#define BNX2_NVM_CFG4_FLASH_SIZE_8MBIT (3L<<0) -#define BNX2_NVM_CFG4_FLASH_SIZE_16MBIT (4L<<0) -#define BNX2_NVM_CFG4_FLASH_SIZE_32MBIT (5L<<0) -#define BNX2_NVM_CFG4_FLASH_SIZE_64MBIT (6L<<0) -#define BNX2_NVM_CFG4_FLASH_SIZE_128MBIT (7L<<0) -#define BNX2_NVM_CFG4_FLASH_VENDOR (1L<<3) -#define BNX2_NVM_CFG4_FLASH_VENDOR_ST (0L<<3) -#define BNX2_NVM_CFG4_FLASH_VENDOR_ATMEL (1L<<3) -#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC (0x3L<<4) -#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC_BIT8 (0L<<4) -#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC_BIT9 (1L<<4) -#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC_BIT10 (2L<<4) -#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC_BIT11 (3L<<4) -#define BNX2_NVM_CFG4_STATUS_BIT_POLARITY (1L<<6) -#define BNX2_NVM_CFG4_RESERVED (0x1ffffffL<<7) - -#define BNX2_NVM_RECONFIG 0x00006430 -#define BNX2_NVM_RECONFIG_ORIG_STRAP_VALUE (0xfL<<0) -#define BNX2_NVM_RECONFIG_ORIG_STRAP_VALUE_ST (0L<<0) -#define BNX2_NVM_RECONFIG_ORIG_STRAP_VALUE_ATMEL (1L<<0) -#define BNX2_NVM_RECONFIG_RECONFIG_STRAP_VALUE (0xfL<<4) -#define BNX2_NVM_RECONFIG_RESERVED (0x7fffffL<<8) -#define BNX2_NVM_RECONFIG_RECONFIG_DONE (1L<<31) - - - -/* - * dma_reg definition - * offset: 0xc00 - */ -#define BNX2_DMA_COMMAND 0x00000c00 -#define BNX2_DMA_COMMAND_ENABLE (1L<<0) - -#define BNX2_DMA_STATUS 0x00000c04 -#define BNX2_DMA_STATUS_PAR_ERROR_STATE (1L<<0) -#define BNX2_DMA_STATUS_READ_TRANSFERS_STAT (1L<<16) -#define BNX2_DMA_STATUS_READ_DELAY_PCI_CLKS_STAT (1L<<17) -#define BNX2_DMA_STATUS_BIG_READ_TRANSFERS_STAT (1L<<18) -#define BNX2_DMA_STATUS_BIG_READ_DELAY_PCI_CLKS_STAT (1L<<19) -#define BNX2_DMA_STATUS_BIG_READ_RETRY_AFTER_DATA_STAT (1L<<20) -#define BNX2_DMA_STATUS_WRITE_TRANSFERS_STAT (1L<<21) -#define BNX2_DMA_STATUS_WRITE_DELAY_PCI_CLKS_STAT (1L<<22) -#define BNX2_DMA_STATUS_BIG_WRITE_TRANSFERS_STAT (1L<<23) -#define BNX2_DMA_STATUS_BIG_WRITE_DELAY_PCI_CLKS_STAT (1L<<24) -#define BNX2_DMA_STATUS_BIG_WRITE_RETRY_AFTER_DATA_STAT (1L<<25) -#define BNX2_DMA_STATUS_GLOBAL_ERR_XI (1L<<0) -#define BNX2_DMA_STATUS_BME_XI (1L<<4) - -#define BNX2_DMA_CONFIG 0x00000c08 -#define BNX2_DMA_CONFIG_DATA_BYTE_SWAP (1L<<0) -#define BNX2_DMA_CONFIG_DATA_WORD_SWAP (1L<<1) -#define BNX2_DMA_CONFIG_CNTL_BYTE_SWAP (1L<<4) -#define BNX2_DMA_CONFIG_CNTL_WORD_SWAP (1L<<5) -#define BNX2_DMA_CONFIG_ONE_DMA (1L<<6) -#define BNX2_DMA_CONFIG_CNTL_TWO_DMA (1L<<7) -#define BNX2_DMA_CONFIG_CNTL_FPGA_MODE (1L<<8) -#define BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA (1L<<10) -#define BNX2_DMA_CONFIG_CNTL_PCI_COMP_DLY (1L<<11) -#define BNX2_DMA_CONFIG_NO_RCHANS_IN_USE (0xfL<<12) -#define BNX2_DMA_CONFIG_NO_WCHANS_IN_USE (0xfL<<16) -#define BNX2_DMA_CONFIG_PCI_CLK_CMP_BITS (0x7L<<20) -#define BNX2_DMA_CONFIG_PCI_FAST_CLK_CMP (1L<<23) -#define BNX2_DMA_CONFIG_BIG_SIZE (0xfL<<24) -#define BNX2_DMA_CONFIG_BIG_SIZE_NONE (0x0L<<24) -#define BNX2_DMA_CONFIG_BIG_SIZE_64 (0x1L<<24) -#define BNX2_DMA_CONFIG_BIG_SIZE_128 (0x2L<<24) -#define BNX2_DMA_CONFIG_BIG_SIZE_256 (0x4L<<24) -#define BNX2_DMA_CONFIG_BIG_SIZE_512 (0x8L<<24) -#define BNX2_DMA_CONFIG_DAT_WBSWAP_MODE_XI (0x3L<<0) -#define BNX2_DMA_CONFIG_CTL_WBSWAP_MODE_XI (0x3L<<4) -#define BNX2_DMA_CONFIG_MAX_PL_XI (0x7L<<12) -#define BNX2_DMA_CONFIG_MAX_PL_128B_XI (0L<<12) -#define BNX2_DMA_CONFIG_MAX_PL_256B_XI (1L<<12) -#define BNX2_DMA_CONFIG_MAX_PL_512B_XI (2L<<12) -#define BNX2_DMA_CONFIG_MAX_PL_EN_XI (1L<<15) -#define BNX2_DMA_CONFIG_MAX_RRS_XI (0x7L<<16) -#define BNX2_DMA_CONFIG_MAX_RRS_128B_XI (0L<<16) -#define BNX2_DMA_CONFIG_MAX_RRS_256B_XI (1L<<16) -#define BNX2_DMA_CONFIG_MAX_RRS_512B_XI (2L<<16) -#define BNX2_DMA_CONFIG_MAX_RRS_1024B_XI (3L<<16) -#define BNX2_DMA_CONFIG_MAX_RRS_2048B_XI (4L<<16) -#define BNX2_DMA_CONFIG_MAX_RRS_4096B_XI (5L<<16) -#define BNX2_DMA_CONFIG_MAX_RRS_EN_XI (1L<<19) -#define BNX2_DMA_CONFIG_NO_64SWAP_EN_XI (1L<<31) - -#define BNX2_DMA_BLACKOUT 0x00000c0c -#define BNX2_DMA_BLACKOUT_RD_RETRY_BLACKOUT (0xffL<<0) -#define BNX2_DMA_BLACKOUT_2ND_RD_RETRY_BLACKOUT (0xffL<<8) -#define BNX2_DMA_BLACKOUT_WR_RETRY_BLACKOUT (0xffL<<16) - -#define BNX2_DMA_READ_MASTER_SETTING_0 0x00000c10 -#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_NO_SNOOP (1L<<0) -#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_RELAX_ORDER (1L<<1) -#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_PRIORITY (1L<<2) -#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_TRAFFIC_CLASS (0x7L<<4) -#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_PARAM_EN (1L<<7) -#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_NO_SNOOP (1L<<8) -#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_RELAX_ORDER (1L<<9) -#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_PRIORITY (1L<<10) -#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_TRAFFIC_CLASS (0x7L<<12) -#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_PARAM_EN (1L<<15) -#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_NO_SNOOP (1L<<16) -#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_RELAX_ORDER (1L<<17) -#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_PRIORITY (1L<<18) -#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_TRAFFIC_CLASS (0x7L<<20) -#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_PARAM_EN (1L<<23) -#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_NO_SNOOP (1L<<24) -#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_RELAX_ORDER (1L<<25) -#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_PRIORITY (1L<<26) -#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_TRAFFIC_CLASS (0x7L<<28) -#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_PARAM_EN (1L<<31) - -#define BNX2_DMA_READ_MASTER_SETTING_1 0x00000c14 -#define BNX2_DMA_READ_MASTER_SETTING_1_COM_NO_SNOOP (1L<<0) -#define BNX2_DMA_READ_MASTER_SETTING_1_COM_RELAX_ORDER (1L<<1) -#define BNX2_DMA_READ_MASTER_SETTING_1_COM_PRIORITY (1L<<2) -#define BNX2_DMA_READ_MASTER_SETTING_1_COM_TRAFFIC_CLASS (0x7L<<4) -#define BNX2_DMA_READ_MASTER_SETTING_1_COM_PARAM_EN (1L<<7) -#define BNX2_DMA_READ_MASTER_SETTING_1_CP_NO_SNOOP (1L<<8) -#define BNX2_DMA_READ_MASTER_SETTING_1_CP_RELAX_ORDER (1L<<9) -#define BNX2_DMA_READ_MASTER_SETTING_1_CP_PRIORITY (1L<<10) -#define BNX2_DMA_READ_MASTER_SETTING_1_CP_TRAFFIC_CLASS (0x7L<<12) -#define BNX2_DMA_READ_MASTER_SETTING_1_CP_PARAM_EN (1L<<15) - -#define BNX2_DMA_WRITE_MASTER_SETTING_0 0x00000c18 -#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_NO_SNOOP (1L<<0) -#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_RELAX_ORDER (1L<<1) -#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_PRIORITY (1L<<2) -#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_CS_VLD (1L<<3) -#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_TRAFFIC_CLASS (0x7L<<4) -#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_PARAM_EN (1L<<7) -#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_NO_SNOOP (1L<<8) -#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_RELAX_ORDER (1L<<9) -#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_PRIORITY (1L<<10) -#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_CS_VLD (1L<<11) -#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_TRAFFIC_CLASS (0x7L<<12) -#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_PARAM_EN (1L<<15) -#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_NO_SNOOP (1L<<24) -#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_RELAX_ORDER (1L<<25) -#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_PRIORITY (1L<<26) -#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_CS_VLD (1L<<27) -#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_TRAFFIC_CLASS (0x7L<<28) -#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_PARAM_EN (1L<<31) - -#define BNX2_DMA_WRITE_MASTER_SETTING_1 0x00000c1c -#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_NO_SNOOP (1L<<0) -#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_RELAX_ORDER (1L<<1) -#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_PRIORITY (1L<<2) -#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_CS_VLD (1L<<3) -#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_TRAFFIC_CLASS (0x7L<<4) -#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_PARAM_EN (1L<<7) -#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_NO_SNOOP (1L<<8) -#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_RELAX_ORDER (1L<<9) -#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_PRIORITY (1L<<10) -#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_CS_VLD (1L<<11) -#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_TRAFFIC_CLASS (0x7L<<12) -#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_PARAM_EN (1L<<15) - -#define BNX2_DMA_ARBITER 0x00000c20 -#define BNX2_DMA_ARBITER_NUM_READS (0x7L<<0) -#define BNX2_DMA_ARBITER_WR_ARB_MODE (1L<<4) -#define BNX2_DMA_ARBITER_WR_ARB_MODE_STRICT (0L<<4) -#define BNX2_DMA_ARBITER_WR_ARB_MODE_RND_RBN (1L<<4) -#define BNX2_DMA_ARBITER_RD_ARB_MODE (0x3L<<5) -#define BNX2_DMA_ARBITER_RD_ARB_MODE_STRICT (0L<<5) -#define BNX2_DMA_ARBITER_RD_ARB_MODE_RND_RBN (1L<<5) -#define BNX2_DMA_ARBITER_RD_ARB_MODE_WGT_RND_RBN (2L<<5) -#define BNX2_DMA_ARBITER_ALT_MODE_EN (1L<<8) -#define BNX2_DMA_ARBITER_RR_MODE (1L<<9) -#define BNX2_DMA_ARBITER_TIMER_MODE (1L<<10) -#define BNX2_DMA_ARBITER_OUSTD_READ_REQ (0xfL<<12) - -#define BNX2_DMA_ARB_TIMERS 0x00000c24 -#define BNX2_DMA_ARB_TIMERS_RD_DRR_WAIT_TIME (0xffL<<0) -#define BNX2_DMA_ARB_TIMERS_TM_MIN_TIMEOUT (0xffL<<12) -#define BNX2_DMA_ARB_TIMERS_TM_MAX_TIMEOUT (0xfffL<<20) - -#define BNX2_DMA_DEBUG_VECT_PEEK 0x00000c2c -#define BNX2_DMA_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0) -#define BNX2_DMA_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11) -#define BNX2_DMA_DEBUG_VECT_PEEK_1_SEL (0xfL<<12) -#define BNX2_DMA_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16) -#define BNX2_DMA_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27) -#define BNX2_DMA_DEBUG_VECT_PEEK_2_SEL (0xfL<<28) - -#define BNX2_DMA_TAG_RAM_00 0x00000c30 -#define BNX2_DMA_TAG_RAM_00_CHANNEL (0xfL<<0) -#define BNX2_DMA_TAG_RAM_00_MASTER (0x7L<<4) -#define BNX2_DMA_TAG_RAM_00_MASTER_CTX (0L<<4) -#define BNX2_DMA_TAG_RAM_00_MASTER_RBDC (1L<<4) -#define BNX2_DMA_TAG_RAM_00_MASTER_TBDC (2L<<4) -#define BNX2_DMA_TAG_RAM_00_MASTER_COM (3L<<4) -#define BNX2_DMA_TAG_RAM_00_MASTER_CP (4L<<4) -#define BNX2_DMA_TAG_RAM_00_MASTER_TDMA (5L<<4) -#define BNX2_DMA_TAG_RAM_00_SWAP (0x3L<<7) -#define BNX2_DMA_TAG_RAM_00_SWAP_CONFIG (0L<<7) -#define BNX2_DMA_TAG_RAM_00_SWAP_DATA (1L<<7) -#define BNX2_DMA_TAG_RAM_00_SWAP_CONTROL (2L<<7) -#define BNX2_DMA_TAG_RAM_00_FUNCTION (1L<<9) -#define BNX2_DMA_TAG_RAM_00_VALID (1L<<10) - -#define BNX2_DMA_TAG_RAM_01 0x00000c34 -#define BNX2_DMA_TAG_RAM_01_CHANNEL (0xfL<<0) -#define BNX2_DMA_TAG_RAM_01_MASTER (0x7L<<4) -#define BNX2_DMA_TAG_RAM_01_MASTER_CTX (0L<<4) -#define BNX2_DMA_TAG_RAM_01_MASTER_RBDC (1L<<4) -#define BNX2_DMA_TAG_RAM_01_MASTER_TBDC (2L<<4) -#define BNX2_DMA_TAG_RAM_01_MASTER_COM (3L<<4) -#define BNX2_DMA_TAG_RAM_01_MASTER_CP (4L<<4) -#define BNX2_DMA_TAG_RAM_01_MASTER_TDMA (5L<<4) -#define BNX2_DMA_TAG_RAM_01_SWAP (0x3L<<7) -#define BNX2_DMA_TAG_RAM_01_SWAP_CONFIG (0L<<7) -#define BNX2_DMA_TAG_RAM_01_SWAP_DATA (1L<<7) -#define BNX2_DMA_TAG_RAM_01_SWAP_CONTROL (2L<<7) -#define BNX2_DMA_TAG_RAM_01_FUNCTION (1L<<9) -#define BNX2_DMA_TAG_RAM_01_VALID (1L<<10) - -#define BNX2_DMA_TAG_RAM_02 0x00000c38 -#define BNX2_DMA_TAG_RAM_02_CHANNEL (0xfL<<0) -#define BNX2_DMA_TAG_RAM_02_MASTER (0x7L<<4) -#define BNX2_DMA_TAG_RAM_02_MASTER_CTX (0L<<4) -#define BNX2_DMA_TAG_RAM_02_MASTER_RBDC (1L<<4) -#define BNX2_DMA_TAG_RAM_02_MASTER_TBDC (2L<<4) -#define BNX2_DMA_TAG_RAM_02_MASTER_COM (3L<<4) -#define BNX2_DMA_TAG_RAM_02_MASTER_CP (4L<<4) -#define BNX2_DMA_TAG_RAM_02_MASTER_TDMA (5L<<4) -#define BNX2_DMA_TAG_RAM_02_SWAP (0x3L<<7) -#define BNX2_DMA_TAG_RAM_02_SWAP_CONFIG (0L<<7) -#define BNX2_DMA_TAG_RAM_02_SWAP_DATA (1L<<7) -#define BNX2_DMA_TAG_RAM_02_SWAP_CONTROL (2L<<7) -#define BNX2_DMA_TAG_RAM_02_FUNCTION (1L<<9) -#define BNX2_DMA_TAG_RAM_02_VALID (1L<<10) - -#define BNX2_DMA_TAG_RAM_03 0x00000c3c -#define BNX2_DMA_TAG_RAM_03_CHANNEL (0xfL<<0) -#define BNX2_DMA_TAG_RAM_03_MASTER (0x7L<<4) -#define BNX2_DMA_TAG_RAM_03_MASTER_CTX (0L<<4) -#define BNX2_DMA_TAG_RAM_03_MASTER_RBDC (1L<<4) -#define BNX2_DMA_TAG_RAM_03_MASTER_TBDC (2L<<4) -#define BNX2_DMA_TAG_RAM_03_MASTER_COM (3L<<4) -#define BNX2_DMA_TAG_RAM_03_MASTER_CP (4L<<4) -#define BNX2_DMA_TAG_RAM_03_MASTER_TDMA (5L<<4) -#define BNX2_DMA_TAG_RAM_03_SWAP (0x3L<<7) -#define BNX2_DMA_TAG_RAM_03_SWAP_CONFIG (0L<<7) -#define BNX2_DMA_TAG_RAM_03_SWAP_DATA (1L<<7) -#define BNX2_DMA_TAG_RAM_03_SWAP_CONTROL (2L<<7) -#define BNX2_DMA_TAG_RAM_03_FUNCTION (1L<<9) -#define BNX2_DMA_TAG_RAM_03_VALID (1L<<10) - -#define BNX2_DMA_TAG_RAM_04 0x00000c40 -#define BNX2_DMA_TAG_RAM_04_CHANNEL (0xfL<<0) -#define BNX2_DMA_TAG_RAM_04_MASTER (0x7L<<4) -#define BNX2_DMA_TAG_RAM_04_MASTER_CTX (0L<<4) -#define BNX2_DMA_TAG_RAM_04_MASTER_RBDC (1L<<4) -#define BNX2_DMA_TAG_RAM_04_MASTER_TBDC (2L<<4) -#define BNX2_DMA_TAG_RAM_04_MASTER_COM (3L<<4) -#define BNX2_DMA_TAG_RAM_04_MASTER_CP (4L<<4) -#define BNX2_DMA_TAG_RAM_04_MASTER_TDMA (5L<<4) -#define BNX2_DMA_TAG_RAM_04_SWAP (0x3L<<7) -#define BNX2_DMA_TAG_RAM_04_SWAP_CONFIG (0L<<7) -#define BNX2_DMA_TAG_RAM_04_SWAP_DATA (1L<<7) -#define BNX2_DMA_TAG_RAM_04_SWAP_CONTROL (2L<<7) -#define BNX2_DMA_TAG_RAM_04_FUNCTION (1L<<9) -#define BNX2_DMA_TAG_RAM_04_VALID (1L<<10) - -#define BNX2_DMA_TAG_RAM_05 0x00000c44 -#define BNX2_DMA_TAG_RAM_05_CHANNEL (0xfL<<0) -#define BNX2_DMA_TAG_RAM_05_MASTER (0x7L<<4) -#define BNX2_DMA_TAG_RAM_05_MASTER_CTX (0L<<4) -#define BNX2_DMA_TAG_RAM_05_MASTER_RBDC (1L<<4) -#define BNX2_DMA_TAG_RAM_05_MASTER_TBDC (2L<<4) -#define BNX2_DMA_TAG_RAM_05_MASTER_COM (3L<<4) -#define BNX2_DMA_TAG_RAM_05_MASTER_CP (4L<<4) -#define BNX2_DMA_TAG_RAM_05_MASTER_TDMA (5L<<4) -#define BNX2_DMA_TAG_RAM_05_SWAP (0x3L<<7) -#define BNX2_DMA_TAG_RAM_05_SWAP_CONFIG (0L<<7) -#define BNX2_DMA_TAG_RAM_05_SWAP_DATA (1L<<7) -#define BNX2_DMA_TAG_RAM_05_SWAP_CONTROL (2L<<7) -#define BNX2_DMA_TAG_RAM_05_FUNCTION (1L<<9) -#define BNX2_DMA_TAG_RAM_05_VALID (1L<<10) - -#define BNX2_DMA_TAG_RAM_06 0x00000c48 -#define BNX2_DMA_TAG_RAM_06_CHANNEL (0xfL<<0) -#define BNX2_DMA_TAG_RAM_06_MASTER (0x7L<<4) -#define BNX2_DMA_TAG_RAM_06_MASTER_CTX (0L<<4) -#define BNX2_DMA_TAG_RAM_06_MASTER_RBDC (1L<<4) -#define BNX2_DMA_TAG_RAM_06_MASTER_TBDC (2L<<4) -#define BNX2_DMA_TAG_RAM_06_MASTER_COM (3L<<4) -#define BNX2_DMA_TAG_RAM_06_MASTER_CP (4L<<4) -#define BNX2_DMA_TAG_RAM_06_MASTER_TDMA (5L<<4) -#define BNX2_DMA_TAG_RAM_06_SWAP (0x3L<<7) -#define BNX2_DMA_TAG_RAM_06_SWAP_CONFIG (0L<<7) -#define BNX2_DMA_TAG_RAM_06_SWAP_DATA (1L<<7) -#define BNX2_DMA_TAG_RAM_06_SWAP_CONTROL (2L<<7) -#define BNX2_DMA_TAG_RAM_06_FUNCTION (1L<<9) -#define BNX2_DMA_TAG_RAM_06_VALID (1L<<10) - -#define BNX2_DMA_TAG_RAM_07 0x00000c4c -#define BNX2_DMA_TAG_RAM_07_CHANNEL (0xfL<<0) -#define BNX2_DMA_TAG_RAM_07_MASTER (0x7L<<4) -#define BNX2_DMA_TAG_RAM_07_MASTER_CTX (0L<<4) -#define BNX2_DMA_TAG_RAM_07_MASTER_RBDC (1L<<4) -#define BNX2_DMA_TAG_RAM_07_MASTER_TBDC (2L<<4) -#define BNX2_DMA_TAG_RAM_07_MASTER_COM (3L<<4) -#define BNX2_DMA_TAG_RAM_07_MASTER_CP (4L<<4) -#define BNX2_DMA_TAG_RAM_07_MASTER_TDMA (5L<<4) -#define BNX2_DMA_TAG_RAM_07_SWAP (0x3L<<7) -#define BNX2_DMA_TAG_RAM_07_SWAP_CONFIG (0L<<7) -#define BNX2_DMA_TAG_RAM_07_SWAP_DATA (1L<<7) -#define BNX2_DMA_TAG_RAM_07_SWAP_CONTROL (2L<<7) -#define BNX2_DMA_TAG_RAM_07_FUNCTION (1L<<9) -#define BNX2_DMA_TAG_RAM_07_VALID (1L<<10) - -#define BNX2_DMA_TAG_RAM_08 0x00000c50 -#define BNX2_DMA_TAG_RAM_08_CHANNEL (0xfL<<0) -#define BNX2_DMA_TAG_RAM_08_MASTER (0x7L<<4) -#define BNX2_DMA_TAG_RAM_08_MASTER_CTX (0L<<4) -#define BNX2_DMA_TAG_RAM_08_MASTER_RBDC (1L<<4) -#define BNX2_DMA_TAG_RAM_08_MASTER_TBDC (2L<<4) -#define BNX2_DMA_TAG_RAM_08_MASTER_COM (3L<<4) -#define BNX2_DMA_TAG_RAM_08_MASTER_CP (4L<<4) -#define BNX2_DMA_TAG_RAM_08_MASTER_TDMA (5L<<4) -#define BNX2_DMA_TAG_RAM_08_SWAP (0x3L<<7) -#define BNX2_DMA_TAG_RAM_08_SWAP_CONFIG (0L<<7) -#define BNX2_DMA_TAG_RAM_08_SWAP_DATA (1L<<7) -#define BNX2_DMA_TAG_RAM_08_SWAP_CONTROL (2L<<7) -#define BNX2_DMA_TAG_RAM_08_FUNCTION (1L<<9) -#define BNX2_DMA_TAG_RAM_08_VALID (1L<<10) - -#define BNX2_DMA_TAG_RAM_09 0x00000c54 -#define BNX2_DMA_TAG_RAM_09_CHANNEL (0xfL<<0) -#define BNX2_DMA_TAG_RAM_09_MASTER (0x7L<<4) -#define BNX2_DMA_TAG_RAM_09_MASTER_CTX (0L<<4) -#define BNX2_DMA_TAG_RAM_09_MASTER_RBDC (1L<<4) -#define BNX2_DMA_TAG_RAM_09_MASTER_TBDC (2L<<4) -#define BNX2_DMA_TAG_RAM_09_MASTER_COM (3L<<4) -#define BNX2_DMA_TAG_RAM_09_MASTER_CP (4L<<4) -#define BNX2_DMA_TAG_RAM_09_MASTER_TDMA (5L<<4) -#define BNX2_DMA_TAG_RAM_09_SWAP (0x3L<<7) -#define BNX2_DMA_TAG_RAM_09_SWAP_CONFIG (0L<<7) -#define BNX2_DMA_TAG_RAM_09_SWAP_DATA (1L<<7) -#define BNX2_DMA_TAG_RAM_09_SWAP_CONTROL (2L<<7) -#define BNX2_DMA_TAG_RAM_09_FUNCTION (1L<<9) -#define BNX2_DMA_TAG_RAM_09_VALID (1L<<10) - -#define BNX2_DMA_TAG_RAM_10 0x00000c58 -#define BNX2_DMA_TAG_RAM_10_CHANNEL (0xfL<<0) -#define BNX2_DMA_TAG_RAM_10_MASTER (0x7L<<4) -#define BNX2_DMA_TAG_RAM_10_MASTER_CTX (0L<<4) -#define BNX2_DMA_TAG_RAM_10_MASTER_RBDC (1L<<4) -#define BNX2_DMA_TAG_RAM_10_MASTER_TBDC (2L<<4) -#define BNX2_DMA_TAG_RAM_10_MASTER_COM (3L<<4) -#define BNX2_DMA_TAG_RAM_10_MASTER_CP (4L<<4) -#define BNX2_DMA_TAG_RAM_10_MASTER_TDMA (5L<<4) -#define BNX2_DMA_TAG_RAM_10_SWAP (0x3L<<7) -#define BNX2_DMA_TAG_RAM_10_SWAP_CONFIG (0L<<7) -#define BNX2_DMA_TAG_RAM_10_SWAP_DATA (1L<<7) -#define BNX2_DMA_TAG_RAM_10_SWAP_CONTROL (2L<<7) -#define BNX2_DMA_TAG_RAM_10_FUNCTION (1L<<9) -#define BNX2_DMA_TAG_RAM_10_VALID (1L<<10) - -#define BNX2_DMA_TAG_RAM_11 0x00000c5c -#define BNX2_DMA_TAG_RAM_11_CHANNEL (0xfL<<0) -#define BNX2_DMA_TAG_RAM_11_MASTER (0x7L<<4) -#define BNX2_DMA_TAG_RAM_11_MASTER_CTX (0L<<4) -#define BNX2_DMA_TAG_RAM_11_MASTER_RBDC (1L<<4) -#define BNX2_DMA_TAG_RAM_11_MASTER_TBDC (2L<<4) -#define BNX2_DMA_TAG_RAM_11_MASTER_COM (3L<<4) -#define BNX2_DMA_TAG_RAM_11_MASTER_CP (4L<<4) -#define BNX2_DMA_TAG_RAM_11_MASTER_TDMA (5L<<4) -#define BNX2_DMA_TAG_RAM_11_SWAP (0x3L<<7) -#define BNX2_DMA_TAG_RAM_11_SWAP_CONFIG (0L<<7) -#define BNX2_DMA_TAG_RAM_11_SWAP_DATA (1L<<7) -#define BNX2_DMA_TAG_RAM_11_SWAP_CONTROL (2L<<7) -#define BNX2_DMA_TAG_RAM_11_FUNCTION (1L<<9) -#define BNX2_DMA_TAG_RAM_11_VALID (1L<<10) - -#define BNX2_DMA_RCHAN_STAT_22 0x00000c60 -#define BNX2_DMA_RCHAN_STAT_30 0x00000c64 -#define BNX2_DMA_RCHAN_STAT_31 0x00000c68 -#define BNX2_DMA_RCHAN_STAT_32 0x00000c6c -#define BNX2_DMA_RCHAN_STAT_40 0x00000c70 -#define BNX2_DMA_RCHAN_STAT_41 0x00000c74 -#define BNX2_DMA_RCHAN_STAT_42 0x00000c78 -#define BNX2_DMA_RCHAN_STAT_50 0x00000c7c -#define BNX2_DMA_RCHAN_STAT_51 0x00000c80 -#define BNX2_DMA_RCHAN_STAT_52 0x00000c84 -#define BNX2_DMA_RCHAN_STAT_60 0x00000c88 -#define BNX2_DMA_RCHAN_STAT_61 0x00000c8c -#define BNX2_DMA_RCHAN_STAT_62 0x00000c90 -#define BNX2_DMA_RCHAN_STAT_70 0x00000c94 -#define BNX2_DMA_RCHAN_STAT_71 0x00000c98 -#define BNX2_DMA_RCHAN_STAT_72 0x00000c9c -#define BNX2_DMA_WCHAN_STAT_00 0x00000ca0 -#define BNX2_DMA_WCHAN_STAT_00_WCHAN_STA_HOST_ADDR_LOW (0xffffffffL<<0) - -#define BNX2_DMA_WCHAN_STAT_01 0x00000ca4 -#define BNX2_DMA_WCHAN_STAT_01_WCHAN_STA_HOST_ADDR_HIGH (0xffffffffL<<0) - -#define BNX2_DMA_WCHAN_STAT_02 0x00000ca8 -#define BNX2_DMA_WCHAN_STAT_02_LENGTH (0xffffL<<0) -#define BNX2_DMA_WCHAN_STAT_02_WORD_SWAP (1L<<16) -#define BNX2_DMA_WCHAN_STAT_02_BYTE_SWAP (1L<<17) -#define BNX2_DMA_WCHAN_STAT_02_PRIORITY_LVL (1L<<18) - -#define BNX2_DMA_WCHAN_STAT_10 0x00000cac -#define BNX2_DMA_WCHAN_STAT_11 0x00000cb0 -#define BNX2_DMA_WCHAN_STAT_12 0x00000cb4 -#define BNX2_DMA_WCHAN_STAT_20 0x00000cb8 -#define BNX2_DMA_WCHAN_STAT_21 0x00000cbc -#define BNX2_DMA_WCHAN_STAT_22 0x00000cc0 -#define BNX2_DMA_WCHAN_STAT_30 0x00000cc4 -#define BNX2_DMA_WCHAN_STAT_31 0x00000cc8 -#define BNX2_DMA_WCHAN_STAT_32 0x00000ccc -#define BNX2_DMA_WCHAN_STAT_40 0x00000cd0 -#define BNX2_DMA_WCHAN_STAT_41 0x00000cd4 -#define BNX2_DMA_WCHAN_STAT_42 0x00000cd8 -#define BNX2_DMA_WCHAN_STAT_50 0x00000cdc -#define BNX2_DMA_WCHAN_STAT_51 0x00000ce0 -#define BNX2_DMA_WCHAN_STAT_52 0x00000ce4 -#define BNX2_DMA_WCHAN_STAT_60 0x00000ce8 -#define BNX2_DMA_WCHAN_STAT_61 0x00000cec -#define BNX2_DMA_WCHAN_STAT_62 0x00000cf0 -#define BNX2_DMA_WCHAN_STAT_70 0x00000cf4 -#define BNX2_DMA_WCHAN_STAT_71 0x00000cf8 -#define BNX2_DMA_WCHAN_STAT_72 0x00000cfc -#define BNX2_DMA_ARB_STAT_00 0x00000d00 -#define BNX2_DMA_ARB_STAT_00_MASTER (0xffffL<<0) -#define BNX2_DMA_ARB_STAT_00_MASTER_ENC (0xffL<<16) -#define BNX2_DMA_ARB_STAT_00_CUR_BINMSTR (0xffL<<24) - -#define BNX2_DMA_ARB_STAT_01 0x00000d04 -#define BNX2_DMA_ARB_STAT_01_LPR_RPTR (0xfL<<0) -#define BNX2_DMA_ARB_STAT_01_LPR_WPTR (0xfL<<4) -#define BNX2_DMA_ARB_STAT_01_LPB_RPTR (0xfL<<8) -#define BNX2_DMA_ARB_STAT_01_LPB_WPTR (0xfL<<12) -#define BNX2_DMA_ARB_STAT_01_HPR_RPTR (0xfL<<16) -#define BNX2_DMA_ARB_STAT_01_HPR_WPTR (0xfL<<20) -#define BNX2_DMA_ARB_STAT_01_HPB_RPTR (0xfL<<24) -#define BNX2_DMA_ARB_STAT_01_HPB_WPTR (0xfL<<28) - -#define BNX2_DMA_FUSE_CTRL0_CMD 0x00000f00 -#define BNX2_DMA_FUSE_CTRL0_CMD_PWRUP_DONE (1L<<0) -#define BNX2_DMA_FUSE_CTRL0_CMD_SHIFT_DONE (1L<<1) -#define BNX2_DMA_FUSE_CTRL0_CMD_SHIFT (1L<<2) -#define BNX2_DMA_FUSE_CTRL0_CMD_LOAD (1L<<3) -#define BNX2_DMA_FUSE_CTRL0_CMD_SEL (0xfL<<8) - -#define BNX2_DMA_FUSE_CTRL0_DATA 0x00000f04 -#define BNX2_DMA_FUSE_CTRL1_CMD 0x00000f08 -#define BNX2_DMA_FUSE_CTRL1_CMD_PWRUP_DONE (1L<<0) -#define BNX2_DMA_FUSE_CTRL1_CMD_SHIFT_DONE (1L<<1) -#define BNX2_DMA_FUSE_CTRL1_CMD_SHIFT (1L<<2) -#define BNX2_DMA_FUSE_CTRL1_CMD_LOAD (1L<<3) -#define BNX2_DMA_FUSE_CTRL1_CMD_SEL (0xfL<<8) - -#define BNX2_DMA_FUSE_CTRL1_DATA 0x00000f0c -#define BNX2_DMA_FUSE_CTRL2_CMD 0x00000f10 -#define BNX2_DMA_FUSE_CTRL2_CMD_PWRUP_DONE (1L<<0) -#define BNX2_DMA_FUSE_CTRL2_CMD_SHIFT_DONE (1L<<1) -#define BNX2_DMA_FUSE_CTRL2_CMD_SHIFT (1L<<2) -#define BNX2_DMA_FUSE_CTRL2_CMD_LOAD (1L<<3) -#define BNX2_DMA_FUSE_CTRL2_CMD_SEL (0xfL<<8) - -#define BNX2_DMA_FUSE_CTRL2_DATA 0x00000f14 - - -/* - * context_reg definition - * offset: 0x1000 - */ -#define BNX2_CTX_COMMAND 0x00001000 -#define BNX2_CTX_COMMAND_ENABLED (1L<<0) -#define BNX2_CTX_COMMAND_DISABLE_USAGE_CNT (1L<<1) -#define BNX2_CTX_COMMAND_DISABLE_PLRU (1L<<2) -#define BNX2_CTX_COMMAND_DISABLE_COMBINE_READ (1L<<3) -#define BNX2_CTX_COMMAND_FLUSH_AHEAD (0x1fL<<8) -#define BNX2_CTX_COMMAND_MEM_INIT (1L<<13) -#define BNX2_CTX_COMMAND_PAGE_SIZE (0xfL<<16) -#define BNX2_CTX_COMMAND_PAGE_SIZE_256 (0L<<16) -#define BNX2_CTX_COMMAND_PAGE_SIZE_512 (1L<<16) -#define BNX2_CTX_COMMAND_PAGE_SIZE_1K (2L<<16) -#define BNX2_CTX_COMMAND_PAGE_SIZE_2K (3L<<16) -#define BNX2_CTX_COMMAND_PAGE_SIZE_4K (4L<<16) -#define BNX2_CTX_COMMAND_PAGE_SIZE_8K (5L<<16) -#define BNX2_CTX_COMMAND_PAGE_SIZE_16K (6L<<16) -#define BNX2_CTX_COMMAND_PAGE_SIZE_32K (7L<<16) -#define BNX2_CTX_COMMAND_PAGE_SIZE_64K (8L<<16) -#define BNX2_CTX_COMMAND_PAGE_SIZE_128K (9L<<16) -#define BNX2_CTX_COMMAND_PAGE_SIZE_256K (10L<<16) -#define BNX2_CTX_COMMAND_PAGE_SIZE_512K (11L<<16) -#define BNX2_CTX_COMMAND_PAGE_SIZE_1M (12L<<16) - -#define BNX2_CTX_STATUS 0x00001004 -#define BNX2_CTX_STATUS_LOCK_WAIT (1L<<0) -#define BNX2_CTX_STATUS_READ_STAT (1L<<16) -#define BNX2_CTX_STATUS_WRITE_STAT (1L<<17) -#define BNX2_CTX_STATUS_ACC_STALL_STAT (1L<<18) -#define BNX2_CTX_STATUS_LOCK_STALL_STAT (1L<<19) -#define BNX2_CTX_STATUS_EXT_READ_STAT (1L<<20) -#define BNX2_CTX_STATUS_EXT_WRITE_STAT (1L<<21) -#define BNX2_CTX_STATUS_MISS_STAT (1L<<22) -#define BNX2_CTX_STATUS_HIT_STAT (1L<<23) -#define BNX2_CTX_STATUS_DEAD_LOCK (1L<<24) -#define BNX2_CTX_STATUS_USAGE_CNT_ERR (1L<<25) -#define BNX2_CTX_STATUS_INVALID_PAGE (1L<<26) - -#define BNX2_CTX_VIRT_ADDR 0x00001008 -#define BNX2_CTX_VIRT_ADDR_VIRT_ADDR (0x7fffL<<6) - -#define BNX2_CTX_PAGE_TBL 0x0000100c -#define BNX2_CTX_PAGE_TBL_PAGE_TBL (0x3fffL<<6) - -#define BNX2_CTX_DATA_ADR 0x00001010 -#define BNX2_CTX_DATA_ADR_DATA_ADR (0x7ffffL<<2) - -#define BNX2_CTX_DATA 0x00001014 -#define BNX2_CTX_LOCK 0x00001018 -#define BNX2_CTX_LOCK_TYPE (0x7L<<0) -#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_VOID (0x0L<<0) -#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_PROTOCOL (0x1L<<0) -#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_TX (0x2L<<0) -#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_TIMER (0x4L<<0) -#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_COMPLETE (0x7L<<0) -#define BNX2_CTX_LOCK_TYPE_VOID_XI (0L<<0) -#define BNX2_CTX_LOCK_TYPE_PROTOCOL_XI (1L<<0) -#define BNX2_CTX_LOCK_TYPE_TX_XI (2L<<0) -#define BNX2_CTX_LOCK_TYPE_TIMER_XI (4L<<0) -#define BNX2_CTX_LOCK_TYPE_COMPLETE_XI (7L<<0) -#define BNX2_CTX_LOCK_CID_VALUE (0x3fffL<<7) -#define BNX2_CTX_LOCK_GRANTED (1L<<26) -#define BNX2_CTX_LOCK_MODE (0x7L<<27) -#define BNX2_CTX_LOCK_MODE_UNLOCK (0x0L<<27) -#define BNX2_CTX_LOCK_MODE_IMMEDIATE (0x1L<<27) -#define BNX2_CTX_LOCK_MODE_SURE (0x2L<<27) -#define BNX2_CTX_LOCK_STATUS (1L<<30) -#define BNX2_CTX_LOCK_REQ (1L<<31) - -#define BNX2_CTX_CTX_CTRL 0x0000101c -#define BNX2_CTX_CTX_CTRL_CTX_ADDR (0x7ffffL<<2) -#define BNX2_CTX_CTX_CTRL_MOD_USAGE_CNT (0x3L<<21) -#define BNX2_CTX_CTX_CTRL_NO_RAM_ACC (1L<<23) -#define BNX2_CTX_CTX_CTRL_PREFETCH_SIZE (0x3L<<24) -#define BNX2_CTX_CTX_CTRL_ATTR (1L<<26) -#define BNX2_CTX_CTX_CTRL_WRITE_REQ (1L<<30) -#define BNX2_CTX_CTX_CTRL_READ_REQ (1L<<31) - -#define BNX2_CTX_CTX_DATA 0x00001020 -#define BNX2_CTX_ACCESS_STATUS 0x00001040 -#define BNX2_CTX_ACCESS_STATUS_MASTERENCODED (0xfL<<0) -#define BNX2_CTX_ACCESS_STATUS_ACCESSMEMORYSM (0x3L<<10) -#define BNX2_CTX_ACCESS_STATUS_PAGETABLEINITSM (0x3L<<12) -#define BNX2_CTX_ACCESS_STATUS_ACCESSMEMORYINITSM (0x3L<<14) -#define BNX2_CTX_ACCESS_STATUS_QUALIFIED_REQUEST (0x7ffL<<17) -#define BNX2_CTX_ACCESS_STATUS_CAMMASTERENCODED_XI (0x1fL<<0) -#define BNX2_CTX_ACCESS_STATUS_CACHEMASTERENCODED_XI (0x1fL<<5) -#define BNX2_CTX_ACCESS_STATUS_REQUEST_XI (0x3fffffL<<10) - -#define BNX2_CTX_DBG_LOCK_STATUS 0x00001044 -#define BNX2_CTX_DBG_LOCK_STATUS_SM (0x3ffL<<0) -#define BNX2_CTX_DBG_LOCK_STATUS_MATCH (0x3ffL<<22) - -#define BNX2_CTX_CACHE_CTRL_STATUS 0x00001048 -#define BNX2_CTX_CACHE_CTRL_STATUS_RFIFO_OVERFLOW (1L<<0) -#define BNX2_CTX_CACHE_CTRL_STATUS_INVALID_READ_COMP (1L<<1) -#define BNX2_CTX_CACHE_CTRL_STATUS_FLUSH_START (1L<<6) -#define BNX2_CTX_CACHE_CTRL_STATUS_FREE_ENTRY_CNT (0x3fL<<7) -#define BNX2_CTX_CACHE_CTRL_STATUS_CACHE_ENTRY_NEEDED (0x3fL<<13) -#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN0_ACTIVE (1L<<19) -#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN1_ACTIVE (1L<<20) -#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN2_ACTIVE (1L<<21) -#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN3_ACTIVE (1L<<22) -#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN4_ACTIVE (1L<<23) -#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN5_ACTIVE (1L<<24) -#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN6_ACTIVE (1L<<25) -#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN7_ACTIVE (1L<<26) -#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN8_ACTIVE (1L<<27) -#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN9_ACTIVE (1L<<28) -#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN10_ACTIVE (1L<<29) - -#define BNX2_CTX_CACHE_CTRL_SM_STATUS 0x0000104c -#define BNX2_CTX_CACHE_CTRL_SM_STATUS_CS_DWC (0x7L<<0) -#define BNX2_CTX_CACHE_CTRL_SM_STATUS_CS_WFIFOC (0x7L<<3) -#define BNX2_CTX_CACHE_CTRL_SM_STATUS_CS_RTAGC (0x7L<<6) -#define BNX2_CTX_CACHE_CTRL_SM_STATUS_CS_RFIFOC (0x7L<<9) -#define BNX2_CTX_CACHE_CTRL_SM_STATUS_INVALID_BLK_ADDR (0x7fffL<<16) - -#define BNX2_CTX_CACHE_STATUS 0x00001050 -#define BNX2_CTX_CACHE_STATUS_HELD_ENTRIES (0x3ffL<<0) -#define BNX2_CTX_CACHE_STATUS_MAX_HELD_ENTRIES (0x3ffL<<16) - -#define BNX2_CTX_DMA_STATUS 0x00001054 -#define BNX2_CTX_DMA_STATUS_RD_CHAN0_STATUS (0x3L<<0) -#define BNX2_CTX_DMA_STATUS_RD_CHAN1_STATUS (0x3L<<2) -#define BNX2_CTX_DMA_STATUS_RD_CHAN2_STATUS (0x3L<<4) -#define BNX2_CTX_DMA_STATUS_RD_CHAN3_STATUS (0x3L<<6) -#define BNX2_CTX_DMA_STATUS_RD_CHAN4_STATUS (0x3L<<8) -#define BNX2_CTX_DMA_STATUS_RD_CHAN5_STATUS (0x3L<<10) -#define BNX2_CTX_DMA_STATUS_RD_CHAN6_STATUS (0x3L<<12) -#define BNX2_CTX_DMA_STATUS_RD_CHAN7_STATUS (0x3L<<14) -#define BNX2_CTX_DMA_STATUS_RD_CHAN8_STATUS (0x3L<<16) -#define BNX2_CTX_DMA_STATUS_RD_CHAN9_STATUS (0x3L<<18) -#define BNX2_CTX_DMA_STATUS_RD_CHAN10_STATUS (0x3L<<20) - -#define BNX2_CTX_REP_STATUS 0x00001058 -#define BNX2_CTX_REP_STATUS_ERROR_ENTRY (0x3ffL<<0) -#define BNX2_CTX_REP_STATUS_ERROR_CLIENT_ID (0x1fL<<10) -#define BNX2_CTX_REP_STATUS_USAGE_CNT_MAX_ERR (1L<<16) -#define BNX2_CTX_REP_STATUS_USAGE_CNT_MIN_ERR (1L<<17) -#define BNX2_CTX_REP_STATUS_USAGE_CNT_MISS_ERR (1L<<18) - -#define BNX2_CTX_CKSUM_ERROR_STATUS 0x0000105c -#define BNX2_CTX_CKSUM_ERROR_STATUS_CALCULATED (0xffffL<<0) -#define BNX2_CTX_CKSUM_ERROR_STATUS_EXPECTED (0xffffL<<16) - -#define BNX2_CTX_CHNL_LOCK_STATUS_0 0x00001080 -#define BNX2_CTX_CHNL_LOCK_STATUS_0_CID (0x3fffL<<0) -#define BNX2_CTX_CHNL_LOCK_STATUS_0_TYPE (0x3L<<14) -#define BNX2_CTX_CHNL_LOCK_STATUS_0_MODE (1L<<16) -#define BNX2_CTX_CHNL_LOCK_STATUS_0_MODE_XI (1L<<14) -#define BNX2_CTX_CHNL_LOCK_STATUS_0_TYPE_XI (0x7L<<15) - -#define BNX2_CTX_CHNL_LOCK_STATUS_1 0x00001084 -#define BNX2_CTX_CHNL_LOCK_STATUS_2 0x00001088 -#define BNX2_CTX_CHNL_LOCK_STATUS_3 0x0000108c -#define BNX2_CTX_CHNL_LOCK_STATUS_4 0x00001090 -#define BNX2_CTX_CHNL_LOCK_STATUS_5 0x00001094 -#define BNX2_CTX_CHNL_LOCK_STATUS_6 0x00001098 -#define BNX2_CTX_CHNL_LOCK_STATUS_7 0x0000109c -#define BNX2_CTX_CHNL_LOCK_STATUS_8 0x000010a0 -#define BNX2_CTX_CHNL_LOCK_STATUS_9 0x000010a4 - -#define BNX2_CTX_CACHE_DATA 0x000010c4 -#define BNX2_CTX_HOST_PAGE_TBL_CTRL 0x000010c8 -#define BNX2_CTX_HOST_PAGE_TBL_CTRL_PAGE_TBL_ADDR (0x1ffL<<0) -#define BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ (1L<<30) -#define BNX2_CTX_HOST_PAGE_TBL_CTRL_READ_REQ (1L<<31) - -#define BNX2_CTX_HOST_PAGE_TBL_DATA0 0x000010cc -#define BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID (1L<<0) -#define BNX2_CTX_HOST_PAGE_TBL_DATA0_VALUE (0xffffffL<<8) - -#define BNX2_CTX_HOST_PAGE_TBL_DATA1 0x000010d0 -#define BNX2_CTX_CAM_CTRL 0x000010d4 -#define BNX2_CTX_CAM_CTRL_CAM_ADDR (0x3ffL<<0) -#define BNX2_CTX_CAM_CTRL_RESET (1L<<27) -#define BNX2_CTX_CAM_CTRL_INVALIDATE (1L<<28) -#define BNX2_CTX_CAM_CTRL_SEARCH (1L<<29) -#define BNX2_CTX_CAM_CTRL_WRITE_REQ (1L<<30) -#define BNX2_CTX_CAM_CTRL_READ_REQ (1L<<31) - - -/* - * emac_reg definition - * offset: 0x1400 - */ -#define BNX2_EMAC_MODE 0x00001400 -#define BNX2_EMAC_MODE_RESET (1L<<0) -#define BNX2_EMAC_MODE_HALF_DUPLEX (1L<<1) -#define BNX2_EMAC_MODE_PORT (0x3L<<2) -#define BNX2_EMAC_MODE_PORT_NONE (0L<<2) -#define BNX2_EMAC_MODE_PORT_MII (1L<<2) -#define BNX2_EMAC_MODE_PORT_GMII (2L<<2) -#define BNX2_EMAC_MODE_PORT_MII_10M (3L<<2) -#define BNX2_EMAC_MODE_MAC_LOOP (1L<<4) -#define BNX2_EMAC_MODE_25G_MODE (1L<<5) -#define BNX2_EMAC_MODE_TAGGED_MAC_CTL (1L<<7) -#define BNX2_EMAC_MODE_TX_BURST (1L<<8) -#define BNX2_EMAC_MODE_MAX_DEFER_DROP_ENA (1L<<9) -#define BNX2_EMAC_MODE_EXT_LINK_POL (1L<<10) -#define BNX2_EMAC_MODE_FORCE_LINK (1L<<11) -#define BNX2_EMAC_MODE_SERDES_MODE (1L<<12) -#define BNX2_EMAC_MODE_BOND_OVRD (1L<<13) -#define BNX2_EMAC_MODE_MPKT (1L<<18) -#define BNX2_EMAC_MODE_MPKT_RCVD (1L<<19) -#define BNX2_EMAC_MODE_ACPI_RCVD (1L<<20) - -#define BNX2_EMAC_STATUS 0x00001404 -#define BNX2_EMAC_STATUS_LINK (1L<<11) -#define BNX2_EMAC_STATUS_LINK_CHANGE (1L<<12) -#define BNX2_EMAC_STATUS_SERDES_AUTONEG_COMPLETE (1L<<13) -#define BNX2_EMAC_STATUS_SERDES_AUTONEG_CHANGE (1L<<14) -#define BNX2_EMAC_STATUS_SERDES_NXT_PG_CHANGE (1L<<16) -#define BNX2_EMAC_STATUS_SERDES_RX_CONFIG_IS_0 (1L<<17) -#define BNX2_EMAC_STATUS_SERDES_RX_CONFIG_IS_0_CHANGE (1L<<18) -#define BNX2_EMAC_STATUS_MI_COMPLETE (1L<<22) -#define BNX2_EMAC_STATUS_MI_INT (1L<<23) -#define BNX2_EMAC_STATUS_AP_ERROR (1L<<24) -#define BNX2_EMAC_STATUS_PARITY_ERROR_STATE (1L<<31) - -#define BNX2_EMAC_ATTENTION_ENA 0x00001408 -#define BNX2_EMAC_ATTENTION_ENA_LINK (1L<<11) -#define BNX2_EMAC_ATTENTION_ENA_AUTONEG_CHANGE (1L<<14) -#define BNX2_EMAC_ATTENTION_ENA_NXT_PG_CHANGE (1L<<16) -#define BNX2_EMAC_ATTENTION_ENA_SERDES_RX_CONFIG_IS_0_CHANGE (1L<<18) -#define BNX2_EMAC_ATTENTION_ENA_MI_COMPLETE (1L<<22) -#define BNX2_EMAC_ATTENTION_ENA_MI_INT (1L<<23) -#define BNX2_EMAC_ATTENTION_ENA_AP_ERROR (1L<<24) - -#define BNX2_EMAC_LED 0x0000140c -#define BNX2_EMAC_LED_OVERRIDE (1L<<0) -#define BNX2_EMAC_LED_1000MB_OVERRIDE (1L<<1) -#define BNX2_EMAC_LED_100MB_OVERRIDE (1L<<2) -#define BNX2_EMAC_LED_10MB_OVERRIDE (1L<<3) -#define BNX2_EMAC_LED_TRAFFIC_OVERRIDE (1L<<4) -#define BNX2_EMAC_LED_BLNK_TRAFFIC (1L<<5) -#define BNX2_EMAC_LED_TRAFFIC (1L<<6) -#define BNX2_EMAC_LED_1000MB (1L<<7) -#define BNX2_EMAC_LED_100MB (1L<<8) -#define BNX2_EMAC_LED_10MB (1L<<9) -#define BNX2_EMAC_LED_TRAFFIC_STAT (1L<<10) -#define BNX2_EMAC_LED_2500MB (1L<<11) -#define BNX2_EMAC_LED_2500MB_OVERRIDE (1L<<12) -#define BNX2_EMAC_LED_ACTIVITY_SEL (0x3L<<17) -#define BNX2_EMAC_LED_ACTIVITY_SEL_0 (0L<<17) -#define BNX2_EMAC_LED_ACTIVITY_SEL_1 (1L<<17) -#define BNX2_EMAC_LED_ACTIVITY_SEL_2 (2L<<17) -#define BNX2_EMAC_LED_ACTIVITY_SEL_3 (3L<<17) -#define BNX2_EMAC_LED_BLNK_RATE (0xfffL<<19) -#define BNX2_EMAC_LED_BLNK_RATE_ENA (1L<<31) - -#define BNX2_EMAC_MAC_MATCH0 0x00001410 -#define BNX2_EMAC_MAC_MATCH1 0x00001414 -#define BNX2_EMAC_MAC_MATCH2 0x00001418 -#define BNX2_EMAC_MAC_MATCH3 0x0000141c -#define BNX2_EMAC_MAC_MATCH4 0x00001420 -#define BNX2_EMAC_MAC_MATCH5 0x00001424 -#define BNX2_EMAC_MAC_MATCH6 0x00001428 -#define BNX2_EMAC_MAC_MATCH7 0x0000142c -#define BNX2_EMAC_MAC_MATCH8 0x00001430 -#define BNX2_EMAC_MAC_MATCH9 0x00001434 -#define BNX2_EMAC_MAC_MATCH10 0x00001438 -#define BNX2_EMAC_MAC_MATCH11 0x0000143c -#define BNX2_EMAC_MAC_MATCH12 0x00001440 -#define BNX2_EMAC_MAC_MATCH13 0x00001444 -#define BNX2_EMAC_MAC_MATCH14 0x00001448 -#define BNX2_EMAC_MAC_MATCH15 0x0000144c -#define BNX2_EMAC_MAC_MATCH16 0x00001450 -#define BNX2_EMAC_MAC_MATCH17 0x00001454 -#define BNX2_EMAC_MAC_MATCH18 0x00001458 -#define BNX2_EMAC_MAC_MATCH19 0x0000145c -#define BNX2_EMAC_MAC_MATCH20 0x00001460 -#define BNX2_EMAC_MAC_MATCH21 0x00001464 -#define BNX2_EMAC_MAC_MATCH22 0x00001468 -#define BNX2_EMAC_MAC_MATCH23 0x0000146c -#define BNX2_EMAC_MAC_MATCH24 0x00001470 -#define BNX2_EMAC_MAC_MATCH25 0x00001474 -#define BNX2_EMAC_MAC_MATCH26 0x00001478 -#define BNX2_EMAC_MAC_MATCH27 0x0000147c -#define BNX2_EMAC_MAC_MATCH28 0x00001480 -#define BNX2_EMAC_MAC_MATCH29 0x00001484 -#define BNX2_EMAC_MAC_MATCH30 0x00001488 -#define BNX2_EMAC_MAC_MATCH31 0x0000148c -#define BNX2_EMAC_BACKOFF_SEED 0x00001498 -#define BNX2_EMAC_BACKOFF_SEED_EMAC_BACKOFF_SEED (0x3ffL<<0) - -#define BNX2_EMAC_RX_MTU_SIZE 0x0000149c -#define BNX2_EMAC_RX_MTU_SIZE_MTU_SIZE (0xffffL<<0) -#define BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31) - -#define BNX2_EMAC_SERDES_CNTL 0x000014a4 -#define BNX2_EMAC_SERDES_CNTL_RXR (0x7L<<0) -#define BNX2_EMAC_SERDES_CNTL_RXG (0x3L<<3) -#define BNX2_EMAC_SERDES_CNTL_RXCKSEL (1L<<6) -#define BNX2_EMAC_SERDES_CNTL_TXBIAS (0x7L<<7) -#define BNX2_EMAC_SERDES_CNTL_BGMAX (1L<<10) -#define BNX2_EMAC_SERDES_CNTL_BGMIN (1L<<11) -#define BNX2_EMAC_SERDES_CNTL_TXMODE (1L<<12) -#define BNX2_EMAC_SERDES_CNTL_TXEDGE (1L<<13) -#define BNX2_EMAC_SERDES_CNTL_SERDES_MODE (1L<<14) -#define BNX2_EMAC_SERDES_CNTL_PLLTEST (1L<<15) -#define BNX2_EMAC_SERDES_CNTL_CDET_EN (1L<<16) -#define BNX2_EMAC_SERDES_CNTL_TBI_LBK (1L<<17) -#define BNX2_EMAC_SERDES_CNTL_REMOTE_LBK (1L<<18) -#define BNX2_EMAC_SERDES_CNTL_REV_PHASE (1L<<19) -#define BNX2_EMAC_SERDES_CNTL_REGCTL12 (0x3L<<20) -#define BNX2_EMAC_SERDES_CNTL_REGCTL25 (0x3L<<22) - -#define BNX2_EMAC_SERDES_STATUS 0x000014a8 -#define BNX2_EMAC_SERDES_STATUS_RX_STAT (0xffL<<0) -#define BNX2_EMAC_SERDES_STATUS_COMMA_DET (1L<<8) - -#define BNX2_EMAC_MDIO_COMM 0x000014ac -#define BNX2_EMAC_MDIO_COMM_DATA (0xffffL<<0) -#define BNX2_EMAC_MDIO_COMM_REG_ADDR (0x1fL<<16) -#define BNX2_EMAC_MDIO_COMM_PHY_ADDR (0x1fL<<21) -#define BNX2_EMAC_MDIO_COMM_COMMAND (0x3L<<26) -#define BNX2_EMAC_MDIO_COMM_COMMAND_UNDEFINED_0 (0L<<26) -#define BNX2_EMAC_MDIO_COMM_COMMAND_ADDRESS (0L<<26) -#define BNX2_EMAC_MDIO_COMM_COMMAND_WRITE (1L<<26) -#define BNX2_EMAC_MDIO_COMM_COMMAND_READ (2L<<26) -#define BNX2_EMAC_MDIO_COMM_COMMAND_WRITE_22_XI (1L<<26) -#define BNX2_EMAC_MDIO_COMM_COMMAND_WRITE_45_XI (1L<<26) -#define BNX2_EMAC_MDIO_COMM_COMMAND_READ_22_XI (2L<<26) -#define BNX2_EMAC_MDIO_COMM_COMMAND_READ_INC_45_XI (2L<<26) -#define BNX2_EMAC_MDIO_COMM_COMMAND_UNDEFINED_3 (3L<<26) -#define BNX2_EMAC_MDIO_COMM_COMMAND_READ_45 (3L<<26) -#define BNX2_EMAC_MDIO_COMM_FAIL (1L<<28) -#define BNX2_EMAC_MDIO_COMM_START_BUSY (1L<<29) -#define BNX2_EMAC_MDIO_COMM_DISEXT (1L<<30) - -#define BNX2_EMAC_MDIO_STATUS 0x000014b0 -#define BNX2_EMAC_MDIO_STATUS_LINK (1L<<0) -#define BNX2_EMAC_MDIO_STATUS_10MB (1L<<1) - -#define BNX2_EMAC_MDIO_MODE 0x000014b4 -#define BNX2_EMAC_MDIO_MODE_SHORT_PREAMBLE (1L<<1) -#define BNX2_EMAC_MDIO_MODE_AUTO_POLL (1L<<4) -#define BNX2_EMAC_MDIO_MODE_BIT_BANG (1L<<8) -#define BNX2_EMAC_MDIO_MODE_MDIO (1L<<9) -#define BNX2_EMAC_MDIO_MODE_MDIO_OE (1L<<10) -#define BNX2_EMAC_MDIO_MODE_MDC (1L<<11) -#define BNX2_EMAC_MDIO_MODE_MDINT (1L<<12) -#define BNX2_EMAC_MDIO_MODE_EXT_MDINT (1L<<13) -#define BNX2_EMAC_MDIO_MODE_CLOCK_CNT (0x1fL<<16) -#define BNX2_EMAC_MDIO_MODE_CLOCK_CNT_XI (0x3fL<<16) -#define BNX2_EMAC_MDIO_MODE_CLAUSE_45_XI (1L<<31) - -#define BNX2_EMAC_MDIO_AUTO_STATUS 0x000014b8 -#define BNX2_EMAC_MDIO_AUTO_STATUS_AUTO_ERR (1L<<0) - -#define BNX2_EMAC_TX_MODE 0x000014bc -#define BNX2_EMAC_TX_MODE_RESET (1L<<0) -#define BNX2_EMAC_TX_MODE_CS16_TEST (1L<<2) -#define BNX2_EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3) -#define BNX2_EMAC_TX_MODE_FLOW_EN (1L<<4) -#define BNX2_EMAC_TX_MODE_BIG_BACKOFF (1L<<5) -#define BNX2_EMAC_TX_MODE_LONG_PAUSE (1L<<6) -#define BNX2_EMAC_TX_MODE_LINK_AWARE (1L<<7) - -#define BNX2_EMAC_TX_STATUS 0x000014c0 -#define BNX2_EMAC_TX_STATUS_XOFFED (1L<<0) -#define BNX2_EMAC_TX_STATUS_XOFF_SENT (1L<<1) -#define BNX2_EMAC_TX_STATUS_XON_SENT (1L<<2) -#define BNX2_EMAC_TX_STATUS_LINK_UP (1L<<3) -#define BNX2_EMAC_TX_STATUS_UNDERRUN (1L<<4) -#define BNX2_EMAC_TX_STATUS_CS16_ERROR (1L<<5) - -#define BNX2_EMAC_TX_LENGTHS 0x000014c4 -#define BNX2_EMAC_TX_LENGTHS_SLOT (0xffL<<0) -#define BNX2_EMAC_TX_LENGTHS_IPG (0xfL<<8) -#define BNX2_EMAC_TX_LENGTHS_IPG_CRS (0x3L<<12) - -#define BNX2_EMAC_RX_MODE 0x000014c8 -#define BNX2_EMAC_RX_MODE_RESET (1L<<0) -#define BNX2_EMAC_RX_MODE_FLOW_EN (1L<<2) -#define BNX2_EMAC_RX_MODE_KEEP_MAC_CONTROL (1L<<3) -#define BNX2_EMAC_RX_MODE_KEEP_PAUSE (1L<<4) -#define BNX2_EMAC_RX_MODE_ACCEPT_OVERSIZE (1L<<5) -#define BNX2_EMAC_RX_MODE_ACCEPT_RUNTS (1L<<6) -#define BNX2_EMAC_RX_MODE_LLC_CHK (1L<<7) -#define BNX2_EMAC_RX_MODE_PROMISCUOUS (1L<<8) -#define BNX2_EMAC_RX_MODE_NO_CRC_CHK (1L<<9) -#define BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG (1L<<10) -#define BNX2_EMAC_RX_MODE_FILT_BROADCAST (1L<<11) -#define BNX2_EMAC_RX_MODE_SORT_MODE (1L<<12) - -#define BNX2_EMAC_RX_STATUS 0x000014cc -#define BNX2_EMAC_RX_STATUS_FFED (1L<<0) -#define BNX2_EMAC_RX_STATUS_FF_RECEIVED (1L<<1) -#define BNX2_EMAC_RX_STATUS_N_RECEIVED (1L<<2) - -#define BNX2_EMAC_MULTICAST_HASH0 0x000014d0 -#define BNX2_EMAC_MULTICAST_HASH1 0x000014d4 -#define BNX2_EMAC_MULTICAST_HASH2 0x000014d8 -#define BNX2_EMAC_MULTICAST_HASH3 0x000014dc -#define BNX2_EMAC_MULTICAST_HASH4 0x000014e0 -#define BNX2_EMAC_MULTICAST_HASH5 0x000014e4 -#define BNX2_EMAC_MULTICAST_HASH6 0x000014e8 -#define BNX2_EMAC_MULTICAST_HASH7 0x000014ec -#define BNX2_EMAC_CKSUM_ERROR_STATUS 0x000014f0 -#define BNX2_EMAC_CKSUM_ERROR_STATUS_CALCULATED (0xffffL<<0) -#define BNX2_EMAC_CKSUM_ERROR_STATUS_EXPECTED (0xffffL<<16) - -#define BNX2_EMAC_RX_STAT_IFHCINOCTETS 0x00001500 -#define BNX2_EMAC_RX_STAT_IFHCINBADOCTETS 0x00001504 -#define BNX2_EMAC_RX_STAT_ETHERSTATSFRAGMENTS 0x00001508 -#define BNX2_EMAC_RX_STAT_IFHCINUCASTPKTS 0x0000150c -#define BNX2_EMAC_RX_STAT_IFHCINMULTICASTPKTS 0x00001510 -#define BNX2_EMAC_RX_STAT_IFHCINBROADCASTPKTS 0x00001514 -#define BNX2_EMAC_RX_STAT_DOT3STATSFCSERRORS 0x00001518 -#define BNX2_EMAC_RX_STAT_DOT3STATSALIGNMENTERRORS 0x0000151c -#define BNX2_EMAC_RX_STAT_DOT3STATSCARRIERSENSEERRORS 0x00001520 -#define BNX2_EMAC_RX_STAT_XONPAUSEFRAMESRECEIVED 0x00001524 -#define BNX2_EMAC_RX_STAT_XOFFPAUSEFRAMESRECEIVED 0x00001528 -#define BNX2_EMAC_RX_STAT_MACCONTROLFRAMESRECEIVED 0x0000152c -#define BNX2_EMAC_RX_STAT_XOFFSTATEENTERED 0x00001530 -#define BNX2_EMAC_RX_STAT_DOT3STATSFRAMESTOOLONG 0x00001534 -#define BNX2_EMAC_RX_STAT_ETHERSTATSJABBERS 0x00001538 -#define BNX2_EMAC_RX_STAT_ETHERSTATSUNDERSIZEPKTS 0x0000153c -#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS64OCTETS 0x00001540 -#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS65OCTETSTO127OCTETS 0x00001544 -#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS128OCTETSTO255OCTETS 0x00001548 -#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS256OCTETSTO511OCTETS 0x0000154c -#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS512OCTETSTO1023OCTETS 0x00001550 -#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS1024OCTETSTO1522OCTETS 0x00001554 -#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTSOVER1522OCTETS 0x00001558 -#define BNX2_EMAC_RXMAC_DEBUG0 0x0000155c -#define BNX2_EMAC_RXMAC_DEBUG1 0x00001560 -#define BNX2_EMAC_RXMAC_DEBUG1_LENGTH_NE_BYTE_COUNT (1L<<0) -#define BNX2_EMAC_RXMAC_DEBUG1_LENGTH_OUT_RANGE (1L<<1) -#define BNX2_EMAC_RXMAC_DEBUG1_BAD_CRC (1L<<2) -#define BNX2_EMAC_RXMAC_DEBUG1_RX_ERROR (1L<<3) -#define BNX2_EMAC_RXMAC_DEBUG1_ALIGN_ERROR (1L<<4) -#define BNX2_EMAC_RXMAC_DEBUG1_LAST_DATA (1L<<5) -#define BNX2_EMAC_RXMAC_DEBUG1_ODD_BYTE_START (1L<<6) -#define BNX2_EMAC_RXMAC_DEBUG1_BYTE_COUNT (0xffffL<<7) -#define BNX2_EMAC_RXMAC_DEBUG1_SLOT_TIME (0xffL<<23) - -#define BNX2_EMAC_RXMAC_DEBUG2 0x00001564 -#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE (0x7L<<0) -#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_IDLE (0x0L<<0) -#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_SFD (0x1L<<0) -#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_DATA (0x2L<<0) -#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_SKEEP (0x3L<<0) -#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_EXT (0x4L<<0) -#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_DROP (0x5L<<0) -#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_SDROP (0x6L<<0) -#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_FC (0x7L<<0) -#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE (0xfL<<3) -#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_IDLE (0x0L<<3) -#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_DATA0 (0x1L<<3) -#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_DATA1 (0x2L<<3) -#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_DATA2 (0x3L<<3) -#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_DATA3 (0x4L<<3) -#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_ABORT (0x5L<<3) -#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_WAIT (0x6L<<3) -#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_STATUS (0x7L<<3) -#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_LAST (0x8L<<3) -#define BNX2_EMAC_RXMAC_DEBUG2_BYTE_IN (0xffL<<7) -#define BNX2_EMAC_RXMAC_DEBUG2_FALSEC (1L<<15) -#define BNX2_EMAC_RXMAC_DEBUG2_TAGGED (1L<<16) -#define BNX2_EMAC_RXMAC_DEBUG2_PAUSE_STATE (1L<<18) -#define BNX2_EMAC_RXMAC_DEBUG2_PAUSE_STATE_IDLE (0L<<18) -#define BNX2_EMAC_RXMAC_DEBUG2_PAUSE_STATE_PAUSED (1L<<18) -#define BNX2_EMAC_RXMAC_DEBUG2_SE_COUNTER (0xfL<<19) -#define BNX2_EMAC_RXMAC_DEBUG2_QUANTA (0x1fL<<23) - -#define BNX2_EMAC_RXMAC_DEBUG3 0x00001568 -#define BNX2_EMAC_RXMAC_DEBUG3_PAUSE_CTR (0xffffL<<0) -#define BNX2_EMAC_RXMAC_DEBUG3_TMP_PAUSE_CTR (0xffffL<<16) - -#define BNX2_EMAC_RXMAC_DEBUG4 0x0000156c -#define BNX2_EMAC_RXMAC_DEBUG4_TYPE_FIELD (0xffffL<<0) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE (0x3fL<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_IDLE (0x0L<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UMAC2 (0x1L<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UMAC3 (0x2L<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UNI (0x3L<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MMAC3 (0x5L<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PSA1 (0x6L<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MMAC2 (0x7L<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PSA2 (0x7L<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PSA3 (0x8L<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MC2 (0x9L<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MC3 (0xaL<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MWAIT1 (0xeL<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MWAIT2 (0xfL<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MCHECK (0x10L<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MC (0x11L<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BC2 (0x12L<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BC3 (0x13L<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BSA1 (0x14L<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BSA2 (0x15L<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BSA3 (0x16L<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BTYPE (0x17L<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BC (0x18L<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PTYPE (0x19L<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_CMD (0x1aL<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MAC (0x1bL<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_LATCH (0x1cL<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_XOFF (0x1dL<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_XON (0x1eL<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PAUSED (0x1fL<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_NPAUSED (0x20L<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_TTYPE (0x21L<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_TVAL (0x22L<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_USA1 (0x23L<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_USA2 (0x24L<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_USA3 (0x25L<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UTYPE (0x26L<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UTTYPE (0x27L<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UTVAL (0x28L<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MTYPE (0x29L<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_DROP (0x2aL<<16) -#define BNX2_EMAC_RXMAC_DEBUG4_DROP_PKT (1L<<22) -#define BNX2_EMAC_RXMAC_DEBUG4_SLOT_FILLED (1L<<23) -#define BNX2_EMAC_RXMAC_DEBUG4_FALSE_CARRIER (1L<<24) -#define BNX2_EMAC_RXMAC_DEBUG4_LAST_DATA (1L<<25) -#define BNX2_EMAC_RXMAC_DEBUG4_SFD_FOUND (1L<<26) -#define BNX2_EMAC_RXMAC_DEBUG4_ADVANCE (1L<<27) -#define BNX2_EMAC_RXMAC_DEBUG4_START (1L<<28) - -#define BNX2_EMAC_RXMAC_DEBUG5 0x00001570 -#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM (0x7L<<0) -#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_IDLE (0L<<0) -#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_WAIT_EOF (1L<<0) -#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_WAIT_STAT (2L<<0) -#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_SET_EOF4FCRC (3L<<0) -#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_SET_EOF4RDE (4L<<0) -#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_SET_EOF4ALL (5L<<0) -#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_1WD_WAIT_STAT (6L<<0) -#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1 (0x7L<<4) -#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_VDW (0x0L<<4) -#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_STAT (0x1L<<4) -#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_AEOF (0x2L<<4) -#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_NEOF (0x3L<<4) -#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_SOF (0x4L<<4) -#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_SAEOF (0x6L<<4) -#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_SNEOF (0x7L<<4) -#define BNX2_EMAC_RXMAC_DEBUG5_EOF_DETECTED (1L<<7) -#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF0 (0x7L<<8) -#define BNX2_EMAC_RXMAC_DEBUG5_RPM_IDI_FIFO_FULL (1L<<11) -#define BNX2_EMAC_RXMAC_DEBUG5_LOAD_CCODE (1L<<12) -#define BNX2_EMAC_RXMAC_DEBUG5_LOAD_DATA (1L<<13) -#define BNX2_EMAC_RXMAC_DEBUG5_LOAD_STAT (1L<<14) -#define BNX2_EMAC_RXMAC_DEBUG5_CLR_STAT (1L<<15) -#define BNX2_EMAC_RXMAC_DEBUG5_IDI_RPM_CCODE (0x3L<<16) -#define BNX2_EMAC_RXMAC_DEBUG5_IDI_RPM_ACCEPT (1L<<19) -#define BNX2_EMAC_RXMAC_DEBUG5_FMLEN (0xfffL<<20) - -#define BNX2_EMAC_RX_STAT_FALSECARRIERERRORS 0x00001574 -#define BNX2_EMAC_RX_STAT_AC0 0x00001580 -#define BNX2_EMAC_RX_STAT_AC1 0x00001584 -#define BNX2_EMAC_RX_STAT_AC2 0x00001588 -#define BNX2_EMAC_RX_STAT_AC3 0x0000158c -#define BNX2_EMAC_RX_STAT_AC4 0x00001590 -#define BNX2_EMAC_RX_STAT_AC5 0x00001594 -#define BNX2_EMAC_RX_STAT_AC6 0x00001598 -#define BNX2_EMAC_RX_STAT_AC7 0x0000159c -#define BNX2_EMAC_RX_STAT_AC8 0x000015a0 -#define BNX2_EMAC_RX_STAT_AC9 0x000015a4 -#define BNX2_EMAC_RX_STAT_AC10 0x000015a8 -#define BNX2_EMAC_RX_STAT_AC11 0x000015ac -#define BNX2_EMAC_RX_STAT_AC12 0x000015b0 -#define BNX2_EMAC_RX_STAT_AC13 0x000015b4 -#define BNX2_EMAC_RX_STAT_AC14 0x000015b8 -#define BNX2_EMAC_RX_STAT_AC15 0x000015bc -#define BNX2_EMAC_RX_STAT_AC16 0x000015c0 -#define BNX2_EMAC_RX_STAT_AC17 0x000015c4 -#define BNX2_EMAC_RX_STAT_AC18 0x000015c8 -#define BNX2_EMAC_RX_STAT_AC19 0x000015cc -#define BNX2_EMAC_RX_STAT_AC20 0x000015d0 -#define BNX2_EMAC_RX_STAT_AC21 0x000015d4 -#define BNX2_EMAC_RX_STAT_AC22 0x000015d8 -#define BNX2_EMAC_RXMAC_SUC_DBG_OVERRUNVEC 0x000015dc -#define BNX2_EMAC_RX_STAT_AC_28 0x000015f4 -#define BNX2_EMAC_TX_STAT_IFHCOUTOCTETS 0x00001600 -#define BNX2_EMAC_TX_STAT_IFHCOUTBADOCTETS 0x00001604 -#define BNX2_EMAC_TX_STAT_ETHERSTATSCOLLISIONS 0x00001608 -#define BNX2_EMAC_TX_STAT_OUTXONSENT 0x0000160c -#define BNX2_EMAC_TX_STAT_OUTXOFFSENT 0x00001610 -#define BNX2_EMAC_TX_STAT_FLOWCONTROLDONE 0x00001614 -#define BNX2_EMAC_TX_STAT_DOT3STATSSINGLECOLLISIONFRAMES 0x00001618 -#define BNX2_EMAC_TX_STAT_DOT3STATSMULTIPLECOLLISIONFRAMES 0x0000161c -#define BNX2_EMAC_TX_STAT_DOT3STATSDEFERREDTRANSMISSIONS 0x00001620 -#define BNX2_EMAC_TX_STAT_DOT3STATSEXCESSIVECOLLISIONS 0x00001624 -#define BNX2_EMAC_TX_STAT_DOT3STATSLATECOLLISIONS 0x00001628 -#define BNX2_EMAC_TX_STAT_IFHCOUTUCASTPKTS 0x0000162c -#define BNX2_EMAC_TX_STAT_IFHCOUTMULTICASTPKTS 0x00001630 -#define BNX2_EMAC_TX_STAT_IFHCOUTBROADCASTPKTS 0x00001634 -#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS64OCTETS 0x00001638 -#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS65OCTETSTO127OCTETS 0x0000163c -#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS128OCTETSTO255OCTETS 0x00001640 -#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS256OCTETSTO511OCTETS 0x00001644 -#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS512OCTETSTO1023OCTETS 0x00001648 -#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS1024OCTETSTO1522OCTETS 0x0000164c -#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTSOVER1522OCTETS 0x00001650 -#define BNX2_EMAC_TX_STAT_DOT3STATSINTERNALMACTRANSMITERRORS 0x00001654 -#define BNX2_EMAC_TXMAC_DEBUG0 0x00001658 -#define BNX2_EMAC_TXMAC_DEBUG1 0x0000165c -#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE (0xfL<<0) -#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_IDLE (0x0L<<0) -#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_START0 (0x1L<<0) -#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_DATA0 (0x4L<<0) -#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_DATA1 (0x5L<<0) -#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_DATA2 (0x6L<<0) -#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_DATA3 (0x7L<<0) -#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_WAIT0 (0x8L<<0) -#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_WAIT1 (0x9L<<0) -#define BNX2_EMAC_TXMAC_DEBUG1_CRS_ENABLE (1L<<4) -#define BNX2_EMAC_TXMAC_DEBUG1_BAD_CRC (1L<<5) -#define BNX2_EMAC_TXMAC_DEBUG1_SE_COUNTER (0xfL<<6) -#define BNX2_EMAC_TXMAC_DEBUG1_SEND_PAUSE (1L<<10) -#define BNX2_EMAC_TXMAC_DEBUG1_LATE_COLLISION (1L<<11) -#define BNX2_EMAC_TXMAC_DEBUG1_MAX_DEFER (1L<<12) -#define BNX2_EMAC_TXMAC_DEBUG1_DEFERRED (1L<<13) -#define BNX2_EMAC_TXMAC_DEBUG1_ONE_BYTE (1L<<14) -#define BNX2_EMAC_TXMAC_DEBUG1_IPG_TIME (0xfL<<15) -#define BNX2_EMAC_TXMAC_DEBUG1_SLOT_TIME (0xffL<<19) - -#define BNX2_EMAC_TXMAC_DEBUG2 0x00001660 -#define BNX2_EMAC_TXMAC_DEBUG2_BACK_OFF (0x3ffL<<0) -#define BNX2_EMAC_TXMAC_DEBUG2_BYTE_COUNT (0xffffL<<10) -#define BNX2_EMAC_TXMAC_DEBUG2_COL_COUNT (0x1fL<<26) -#define BNX2_EMAC_TXMAC_DEBUG2_COL_BIT (1L<<31) - -#define BNX2_EMAC_TXMAC_DEBUG3 0x00001664 -#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE (0xfL<<0) -#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_IDLE (0x0L<<0) -#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_PRE1 (0x1L<<0) -#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_PRE2 (0x2L<<0) -#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_SFD (0x3L<<0) -#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_DATA (0x4L<<0) -#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_CRC1 (0x5L<<0) -#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_CRC2 (0x6L<<0) -#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_EXT (0x7L<<0) -#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_STATB (0x8L<<0) -#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_STATG (0x9L<<0) -#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_JAM (0xaL<<0) -#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_EJAM (0xbL<<0) -#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_BJAM (0xcL<<0) -#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_SWAIT (0xdL<<0) -#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_BACKOFF (0xeL<<0) -#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE (0x7L<<4) -#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_IDLE (0x0L<<4) -#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_WAIT (0x1L<<4) -#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_UNI (0x2L<<4) -#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_MC (0x3L<<4) -#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_BC2 (0x4L<<4) -#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_BC3 (0x5L<<4) -#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_BC (0x6L<<4) -#define BNX2_EMAC_TXMAC_DEBUG3_CRS_DONE (1L<<7) -#define BNX2_EMAC_TXMAC_DEBUG3_XOFF (1L<<8) -#define BNX2_EMAC_TXMAC_DEBUG3_SE_COUNTER (0xfL<<9) -#define BNX2_EMAC_TXMAC_DEBUG3_QUANTA_COUNTER (0x1fL<<13) - -#define BNX2_EMAC_TXMAC_DEBUG4 0x00001668 -#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_COUNTER (0xffffL<<0) -#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE (0xfL<<16) -#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_IDLE (0x0L<<16) -#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_MCA1 (0x2L<<16) -#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_MCA2 (0x3L<<16) -#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_SRC3 (0x4L<<16) -#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_SRC2 (0x5L<<16) -#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_MCA3 (0x6L<<16) -#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_SRC1 (0x7L<<16) -#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_CRC1 (0x8L<<16) -#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_CRC2 (0x9L<<16) -#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_TIME (0xaL<<16) -#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_TYPE (0xcL<<16) -#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_WAIT (0xdL<<16) -#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_CMD (0xeL<<16) -#define BNX2_EMAC_TXMAC_DEBUG4_STATS0_VALID (1L<<20) -#define BNX2_EMAC_TXMAC_DEBUG4_APPEND_CRC (1L<<21) -#define BNX2_EMAC_TXMAC_DEBUG4_SLOT_FILLED (1L<<22) -#define BNX2_EMAC_TXMAC_DEBUG4_MAX_DEFER (1L<<23) -#define BNX2_EMAC_TXMAC_DEBUG4_SEND_EXTEND (1L<<24) -#define BNX2_EMAC_TXMAC_DEBUG4_SEND_PADDING (1L<<25) -#define BNX2_EMAC_TXMAC_DEBUG4_EOF_LOC (1L<<26) -#define BNX2_EMAC_TXMAC_DEBUG4_COLLIDING (1L<<27) -#define BNX2_EMAC_TXMAC_DEBUG4_COL_IN (1L<<28) -#define BNX2_EMAC_TXMAC_DEBUG4_BURSTING (1L<<29) -#define BNX2_EMAC_TXMAC_DEBUG4_ADVANCE (1L<<30) -#define BNX2_EMAC_TXMAC_DEBUG4_GO (1L<<31) - -#define BNX2_EMAC_TX_STAT_AC0 0x00001680 -#define BNX2_EMAC_TX_STAT_AC1 0x00001684 -#define BNX2_EMAC_TX_STAT_AC2 0x00001688 -#define BNX2_EMAC_TX_STAT_AC3 0x0000168c -#define BNX2_EMAC_TX_STAT_AC4 0x00001690 -#define BNX2_EMAC_TX_STAT_AC5 0x00001694 -#define BNX2_EMAC_TX_STAT_AC6 0x00001698 -#define BNX2_EMAC_TX_STAT_AC7 0x0000169c -#define BNX2_EMAC_TX_STAT_AC8 0x000016a0 -#define BNX2_EMAC_TX_STAT_AC9 0x000016a4 -#define BNX2_EMAC_TX_STAT_AC10 0x000016a8 -#define BNX2_EMAC_TX_STAT_AC11 0x000016ac -#define BNX2_EMAC_TX_STAT_AC12 0x000016b0 -#define BNX2_EMAC_TX_STAT_AC13 0x000016b4 -#define BNX2_EMAC_TX_STAT_AC14 0x000016b8 -#define BNX2_EMAC_TX_STAT_AC15 0x000016bc -#define BNX2_EMAC_TX_STAT_AC16 0x000016c0 -#define BNX2_EMAC_TX_STAT_AC17 0x000016c4 -#define BNX2_EMAC_TX_STAT_AC18 0x000016c8 -#define BNX2_EMAC_TX_STAT_AC19 0x000016cc -#define BNX2_EMAC_TX_STAT_AC20 0x000016d0 -#define BNX2_EMAC_TXMAC_SUC_DBG_OVERRUNVEC 0x000016d8 -#define BNX2_EMAC_TX_RATE_LIMIT_CTRL 0x000016fc -#define BNX2_EMAC_TX_RATE_LIMIT_CTRL_TX_THROTTLE_INC (0x7fL<<0) -#define BNX2_EMAC_TX_RATE_LIMIT_CTRL_TX_THROTTLE_NUM (0x7fL<<16) -#define BNX2_EMAC_TX_RATE_LIMIT_CTRL_RATE_LIMITER_EN (1L<<31) - - -/* - * rpm_reg definition - * offset: 0x1800 - */ -#define BNX2_RPM_COMMAND 0x00001800 -#define BNX2_RPM_COMMAND_ENABLED (1L<<0) -#define BNX2_RPM_COMMAND_OVERRUN_ABORT (1L<<4) - -#define BNX2_RPM_STATUS 0x00001804 -#define BNX2_RPM_STATUS_MBUF_WAIT (1L<<0) -#define BNX2_RPM_STATUS_FREE_WAIT (1L<<1) - -#define BNX2_RPM_CONFIG 0x00001808 -#define BNX2_RPM_CONFIG_NO_PSD_HDR_CKSUM (1L<<0) -#define BNX2_RPM_CONFIG_ACPI_ENA (1L<<1) -#define BNX2_RPM_CONFIG_ACPI_KEEP (1L<<2) -#define BNX2_RPM_CONFIG_MP_KEEP (1L<<3) -#define BNX2_RPM_CONFIG_SORT_VECT_VAL (0xfL<<4) -#define BNX2_RPM_CONFIG_DISABLE_WOL_ASSERT (1L<<30) -#define BNX2_RPM_CONFIG_IGNORE_VLAN (1L<<31) - -#define BNX2_RPM_MGMT_PKT_CTRL 0x0000180c -#define BNX2_RPM_MGMT_PKT_CTRL_MGMT_SORT (0xfL<<0) -#define BNX2_RPM_MGMT_PKT_CTRL_MGMT_RULE (0xfL<<4) -#define BNX2_RPM_MGMT_PKT_CTRL_MGMT_DISCARD_EN (1L<<30) -#define BNX2_RPM_MGMT_PKT_CTRL_MGMT_EN (1L<<31) - -#define BNX2_RPM_VLAN_MATCH0 0x00001810 -#define BNX2_RPM_VLAN_MATCH0_RPM_VLAN_MTCH0_VALUE (0xfffL<<0) - -#define BNX2_RPM_VLAN_MATCH1 0x00001814 -#define BNX2_RPM_VLAN_MATCH1_RPM_VLAN_MTCH1_VALUE (0xfffL<<0) - -#define BNX2_RPM_VLAN_MATCH2 0x00001818 -#define BNX2_RPM_VLAN_MATCH2_RPM_VLAN_MTCH2_VALUE (0xfffL<<0) - -#define BNX2_RPM_VLAN_MATCH3 0x0000181c -#define BNX2_RPM_VLAN_MATCH3_RPM_VLAN_MTCH3_VALUE (0xfffL<<0) - -#define BNX2_RPM_SORT_USER0 0x00001820 -#define BNX2_RPM_SORT_USER0_PM_EN (0xffffL<<0) -#define BNX2_RPM_SORT_USER0_BC_EN (1L<<16) -#define BNX2_RPM_SORT_USER0_MC_EN (1L<<17) -#define BNX2_RPM_SORT_USER0_MC_HSH_EN (1L<<18) -#define BNX2_RPM_SORT_USER0_PROM_EN (1L<<19) -#define BNX2_RPM_SORT_USER0_VLAN_EN (0xfL<<20) -#define BNX2_RPM_SORT_USER0_PROM_VLAN (1L<<24) -#define BNX2_RPM_SORT_USER0_VLAN_NOTMATCH (1L<<25) -#define BNX2_RPM_SORT_USER0_ENA (1L<<31) - -#define BNX2_RPM_SORT_USER1 0x00001824 -#define BNX2_RPM_SORT_USER1_PM_EN (0xffffL<<0) -#define BNX2_RPM_SORT_USER1_BC_EN (1L<<16) -#define BNX2_RPM_SORT_USER1_MC_EN (1L<<17) -#define BNX2_RPM_SORT_USER1_MC_HSH_EN (1L<<18) -#define BNX2_RPM_SORT_USER1_PROM_EN (1L<<19) -#define BNX2_RPM_SORT_USER1_VLAN_EN (0xfL<<20) -#define BNX2_RPM_SORT_USER1_PROM_VLAN (1L<<24) -#define BNX2_RPM_SORT_USER1_ENA (1L<<31) - -#define BNX2_RPM_SORT_USER2 0x00001828 -#define BNX2_RPM_SORT_USER2_PM_EN (0xffffL<<0) -#define BNX2_RPM_SORT_USER2_BC_EN (1L<<16) -#define BNX2_RPM_SORT_USER2_MC_EN (1L<<17) -#define BNX2_RPM_SORT_USER2_MC_HSH_EN (1L<<18) -#define BNX2_RPM_SORT_USER2_PROM_EN (1L<<19) -#define BNX2_RPM_SORT_USER2_VLAN_EN (0xfL<<20) -#define BNX2_RPM_SORT_USER2_PROM_VLAN (1L<<24) -#define BNX2_RPM_SORT_USER2_ENA (1L<<31) - -#define BNX2_RPM_SORT_USER3 0x0000182c -#define BNX2_RPM_SORT_USER3_PM_EN (0xffffL<<0) -#define BNX2_RPM_SORT_USER3_BC_EN (1L<<16) -#define BNX2_RPM_SORT_USER3_MC_EN (1L<<17) -#define BNX2_RPM_SORT_USER3_MC_HSH_EN (1L<<18) -#define BNX2_RPM_SORT_USER3_PROM_EN (1L<<19) -#define BNX2_RPM_SORT_USER3_VLAN_EN (0xfL<<20) -#define BNX2_RPM_SORT_USER3_PROM_VLAN (1L<<24) -#define BNX2_RPM_SORT_USER3_ENA (1L<<31) - -#define BNX2_RPM_STAT_L2_FILTER_DISCARDS 0x00001840 -#define BNX2_RPM_STAT_RULE_CHECKER_DISCARDS 0x00001844 -#define BNX2_RPM_STAT_IFINFTQDISCARDS 0x00001848 -#define BNX2_RPM_STAT_IFINMBUFDISCARD 0x0000184c -#define BNX2_RPM_STAT_RULE_CHECKER_P4_HIT 0x00001850 -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0 0x00001854 -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0_NEXT_HEADER_LEN (0xffL<<0) -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0_NEXT_HEADER (0xffL<<16) -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0_NEXT_HEADER_LEN_TYPE (1L<<30) -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0_NEXT_HEADER_EN (1L<<31) - -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1 0x00001858 -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1_NEXT_HEADER_LEN (0xffL<<0) -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1_NEXT_HEADER (0xffL<<16) -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1_NEXT_HEADER_LEN_TYPE (1L<<30) -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1_NEXT_HEADER_EN (1L<<31) - -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2 0x0000185c -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2_NEXT_HEADER_LEN (0xffL<<0) -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2_NEXT_HEADER (0xffL<<16) -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2_NEXT_HEADER_LEN_TYPE (1L<<30) -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2_NEXT_HEADER_EN (1L<<31) - -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3 0x00001860 -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3_NEXT_HEADER_LEN (0xffL<<0) -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3_NEXT_HEADER (0xffL<<16) -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3_NEXT_HEADER_LEN_TYPE (1L<<30) -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3_NEXT_HEADER_EN (1L<<31) - -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4 0x00001864 -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4_NEXT_HEADER_LEN (0xffL<<0) -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4_NEXT_HEADER (0xffL<<16) -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4_NEXT_HEADER_LEN_TYPE (1L<<30) -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4_NEXT_HEADER_EN (1L<<31) - -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5 0x00001868 -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5_NEXT_HEADER_LEN (0xffL<<0) -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5_NEXT_HEADER (0xffL<<16) -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5_NEXT_HEADER_LEN_TYPE (1L<<30) -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5_NEXT_HEADER_EN (1L<<31) - -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6 0x0000186c -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6_NEXT_HEADER_LEN (0xffL<<0) -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6_NEXT_HEADER (0xffL<<16) -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6_NEXT_HEADER_LEN_TYPE (1L<<30) -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6_NEXT_HEADER_EN (1L<<31) - -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7 0x00001870 -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7_NEXT_HEADER_LEN (0xffL<<0) -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7_NEXT_HEADER (0xffL<<16) -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7_NEXT_HEADER_LEN_TYPE (1L<<30) -#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7_NEXT_HEADER_EN (1L<<31) - -#define BNX2_RPM_STAT_AC0 0x00001880 -#define BNX2_RPM_STAT_AC1 0x00001884 -#define BNX2_RPM_STAT_AC2 0x00001888 -#define BNX2_RPM_STAT_AC3 0x0000188c -#define BNX2_RPM_STAT_AC4 0x00001890 -#define BNX2_RPM_RC_CNTL_16 0x000018e0 -#define BNX2_RPM_RC_CNTL_16_OFFSET (0xffL<<0) -#define BNX2_RPM_RC_CNTL_16_CLASS (0x7L<<8) -#define BNX2_RPM_RC_CNTL_16_PRIORITY (1L<<11) -#define BNX2_RPM_RC_CNTL_16_P4 (1L<<12) -#define BNX2_RPM_RC_CNTL_16_HDR_TYPE (0x7L<<13) -#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_START (0L<<13) -#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_IP (1L<<13) -#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_TCP (2L<<13) -#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_UDP (3L<<13) -#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_DATA (4L<<13) -#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_TCP_UDP (5L<<13) -#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_ICMPV6 (6L<<13) -#define BNX2_RPM_RC_CNTL_16_COMP (0x3L<<16) -#define BNX2_RPM_RC_CNTL_16_COMP_EQUAL (0L<<16) -#define BNX2_RPM_RC_CNTL_16_COMP_NEQUAL (1L<<16) -#define BNX2_RPM_RC_CNTL_16_COMP_GREATER (2L<<16) -#define BNX2_RPM_RC_CNTL_16_COMP_LESS (3L<<16) -#define BNX2_RPM_RC_CNTL_16_MAP (1L<<18) -#define BNX2_RPM_RC_CNTL_16_SBIT (1L<<19) -#define BNX2_RPM_RC_CNTL_16_CMDSEL (0x1fL<<20) -#define BNX2_RPM_RC_CNTL_16_DISCARD (1L<<25) -#define BNX2_RPM_RC_CNTL_16_MASK (1L<<26) -#define BNX2_RPM_RC_CNTL_16_P1 (1L<<27) -#define BNX2_RPM_RC_CNTL_16_P2 (1L<<28) -#define BNX2_RPM_RC_CNTL_16_P3 (1L<<29) -#define BNX2_RPM_RC_CNTL_16_NBIT (1L<<30) - -#define BNX2_RPM_RC_VALUE_MASK_16 0x000018e4 -#define BNX2_RPM_RC_VALUE_MASK_16_VALUE (0xffffL<<0) -#define BNX2_RPM_RC_VALUE_MASK_16_MASK (0xffffL<<16) - -#define BNX2_RPM_RC_CNTL_17 0x000018e8 -#define BNX2_RPM_RC_CNTL_17_OFFSET (0xffL<<0) -#define BNX2_RPM_RC_CNTL_17_CLASS (0x7L<<8) -#define BNX2_RPM_RC_CNTL_17_PRIORITY (1L<<11) -#define BNX2_RPM_RC_CNTL_17_P4 (1L<<12) -#define BNX2_RPM_RC_CNTL_17_HDR_TYPE (0x7L<<13) -#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_START (0L<<13) -#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_IP (1L<<13) -#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_TCP (2L<<13) -#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_UDP (3L<<13) -#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_DATA (4L<<13) -#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_TCP_UDP (5L<<13) -#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_ICMPV6 (6L<<13) -#define BNX2_RPM_RC_CNTL_17_COMP (0x3L<<16) -#define BNX2_RPM_RC_CNTL_17_COMP_EQUAL (0L<<16) -#define BNX2_RPM_RC_CNTL_17_COMP_NEQUAL (1L<<16) -#define BNX2_RPM_RC_CNTL_17_COMP_GREATER (2L<<16) -#define BNX2_RPM_RC_CNTL_17_COMP_LESS (3L<<16) -#define BNX2_RPM_RC_CNTL_17_MAP (1L<<18) -#define BNX2_RPM_RC_CNTL_17_SBIT (1L<<19) -#define BNX2_RPM_RC_CNTL_17_CMDSEL (0x1fL<<20) -#define BNX2_RPM_RC_CNTL_17_DISCARD (1L<<25) -#define BNX2_RPM_RC_CNTL_17_MASK (1L<<26) -#define BNX2_RPM_RC_CNTL_17_P1 (1L<<27) -#define BNX2_RPM_RC_CNTL_17_P2 (1L<<28) -#define BNX2_RPM_RC_CNTL_17_P3 (1L<<29) -#define BNX2_RPM_RC_CNTL_17_NBIT (1L<<30) - -#define BNX2_RPM_RC_VALUE_MASK_17 0x000018ec -#define BNX2_RPM_RC_VALUE_MASK_17_VALUE (0xffffL<<0) -#define BNX2_RPM_RC_VALUE_MASK_17_MASK (0xffffL<<16) - -#define BNX2_RPM_RC_CNTL_18 0x000018f0 -#define BNX2_RPM_RC_CNTL_18_OFFSET (0xffL<<0) -#define BNX2_RPM_RC_CNTL_18_CLASS (0x7L<<8) -#define BNX2_RPM_RC_CNTL_18_PRIORITY (1L<<11) -#define BNX2_RPM_RC_CNTL_18_P4 (1L<<12) -#define BNX2_RPM_RC_CNTL_18_HDR_TYPE (0x7L<<13) -#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_START (0L<<13) -#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_IP (1L<<13) -#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_TCP (2L<<13) -#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_UDP (3L<<13) -#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_DATA (4L<<13) -#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_TCP_UDP (5L<<13) -#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_ICMPV6 (6L<<13) -#define BNX2_RPM_RC_CNTL_18_COMP (0x3L<<16) -#define BNX2_RPM_RC_CNTL_18_COMP_EQUAL (0L<<16) -#define BNX2_RPM_RC_CNTL_18_COMP_NEQUAL (1L<<16) -#define BNX2_RPM_RC_CNTL_18_COMP_GREATER (2L<<16) -#define BNX2_RPM_RC_CNTL_18_COMP_LESS (3L<<16) -#define BNX2_RPM_RC_CNTL_18_MAP (1L<<18) -#define BNX2_RPM_RC_CNTL_18_SBIT (1L<<19) -#define BNX2_RPM_RC_CNTL_18_CMDSEL (0x1fL<<20) -#define BNX2_RPM_RC_CNTL_18_DISCARD (1L<<25) -#define BNX2_RPM_RC_CNTL_18_MASK (1L<<26) -#define BNX2_RPM_RC_CNTL_18_P1 (1L<<27) -#define BNX2_RPM_RC_CNTL_18_P2 (1L<<28) -#define BNX2_RPM_RC_CNTL_18_P3 (1L<<29) -#define BNX2_RPM_RC_CNTL_18_NBIT (1L<<30) - -#define BNX2_RPM_RC_VALUE_MASK_18 0x000018f4 -#define BNX2_RPM_RC_VALUE_MASK_18_VALUE (0xffffL<<0) -#define BNX2_RPM_RC_VALUE_MASK_18_MASK (0xffffL<<16) - -#define BNX2_RPM_RC_CNTL_19 0x000018f8 -#define BNX2_RPM_RC_CNTL_19_OFFSET (0xffL<<0) -#define BNX2_RPM_RC_CNTL_19_CLASS (0x7L<<8) -#define BNX2_RPM_RC_CNTL_19_PRIORITY (1L<<11) -#define BNX2_RPM_RC_CNTL_19_P4 (1L<<12) -#define BNX2_RPM_RC_CNTL_19_HDR_TYPE (0x7L<<13) -#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_START (0L<<13) -#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_IP (1L<<13) -#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_TCP (2L<<13) -#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_UDP (3L<<13) -#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_DATA (4L<<13) -#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_TCP_UDP (5L<<13) -#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_ICMPV6 (6L<<13) -#define BNX2_RPM_RC_CNTL_19_COMP (0x3L<<16) -#define BNX2_RPM_RC_CNTL_19_COMP_EQUAL (0L<<16) -#define BNX2_RPM_RC_CNTL_19_COMP_NEQUAL (1L<<16) -#define BNX2_RPM_RC_CNTL_19_COMP_GREATER (2L<<16) -#define BNX2_RPM_RC_CNTL_19_COMP_LESS (3L<<16) -#define BNX2_RPM_RC_CNTL_19_MAP (1L<<18) -#define BNX2_RPM_RC_CNTL_19_SBIT (1L<<19) -#define BNX2_RPM_RC_CNTL_19_CMDSEL (0x1fL<<20) -#define BNX2_RPM_RC_CNTL_19_DISCARD (1L<<25) -#define BNX2_RPM_RC_CNTL_19_MASK (1L<<26) -#define BNX2_RPM_RC_CNTL_19_P1 (1L<<27) -#define BNX2_RPM_RC_CNTL_19_P2 (1L<<28) -#define BNX2_RPM_RC_CNTL_19_P3 (1L<<29) -#define BNX2_RPM_RC_CNTL_19_NBIT (1L<<30) - -#define BNX2_RPM_RC_VALUE_MASK_19 0x000018fc -#define BNX2_RPM_RC_VALUE_MASK_19_VALUE (0xffffL<<0) -#define BNX2_RPM_RC_VALUE_MASK_19_MASK (0xffffL<<16) - -#define BNX2_RPM_RC_CNTL_0 0x00001900 -#define BNX2_RPM_RC_CNTL_0_OFFSET (0xffL<<0) -#define BNX2_RPM_RC_CNTL_0_CLASS (0x7L<<8) -#define BNX2_RPM_RC_CNTL_0_PRIORITY (1L<<11) -#define BNX2_RPM_RC_CNTL_0_P4 (1L<<12) -#define BNX2_RPM_RC_CNTL_0_HDR_TYPE (0x7L<<13) -#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_START (0L<<13) -#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_IP (1L<<13) -#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_TCP (2L<<13) -#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_UDP (3L<<13) -#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_DATA (4L<<13) -#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_TCP_UDP (5L<<13) -#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_ICMPV6 (6L<<13) -#define BNX2_RPM_RC_CNTL_0_COMP (0x3L<<16) -#define BNX2_RPM_RC_CNTL_0_COMP_EQUAL (0L<<16) -#define BNX2_RPM_RC_CNTL_0_COMP_NEQUAL (1L<<16) -#define BNX2_RPM_RC_CNTL_0_COMP_GREATER (2L<<16) -#define BNX2_RPM_RC_CNTL_0_COMP_LESS (3L<<16) -#define BNX2_RPM_RC_CNTL_0_MAP_XI (1L<<18) -#define BNX2_RPM_RC_CNTL_0_SBIT (1L<<19) -#define BNX2_RPM_RC_CNTL_0_CMDSEL (0xfL<<20) -#define BNX2_RPM_RC_CNTL_0_MAP (1L<<24) -#define BNX2_RPM_RC_CNTL_0_CMDSEL_XI (0x1fL<<20) -#define BNX2_RPM_RC_CNTL_0_DISCARD (1L<<25) -#define BNX2_RPM_RC_CNTL_0_MASK (1L<<26) -#define BNX2_RPM_RC_CNTL_0_P1 (1L<<27) -#define BNX2_RPM_RC_CNTL_0_P2 (1L<<28) -#define BNX2_RPM_RC_CNTL_0_P3 (1L<<29) -#define BNX2_RPM_RC_CNTL_0_NBIT (1L<<30) - -#define BNX2_RPM_RC_VALUE_MASK_0 0x00001904 -#define BNX2_RPM_RC_VALUE_MASK_0_VALUE (0xffffL<<0) -#define BNX2_RPM_RC_VALUE_MASK_0_MASK (0xffffL<<16) - -#define BNX2_RPM_RC_CNTL_1 0x00001908 -#define BNX2_RPM_RC_CNTL_1_A (0x3ffffL<<0) -#define BNX2_RPM_RC_CNTL_1_B (0xfffL<<19) -#define BNX2_RPM_RC_CNTL_1_OFFSET_XI (0xffL<<0) -#define BNX2_RPM_RC_CNTL_1_CLASS_XI (0x7L<<8) -#define BNX2_RPM_RC_CNTL_1_PRIORITY_XI (1L<<11) -#define BNX2_RPM_RC_CNTL_1_P4_XI (1L<<12) -#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_XI (0x7L<<13) -#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_START_XI (0L<<13) -#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_IP_XI (1L<<13) -#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_TCP_XI (2L<<13) -#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_UDP_XI (3L<<13) -#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_DATA_XI (4L<<13) -#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_TCP_UDP_XI (5L<<13) -#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_ICMPV6_XI (6L<<13) -#define BNX2_RPM_RC_CNTL_1_COMP_XI (0x3L<<16) -#define BNX2_RPM_RC_CNTL_1_COMP_EQUAL_XI (0L<<16) -#define BNX2_RPM_RC_CNTL_1_COMP_NEQUAL_XI (1L<<16) -#define BNX2_RPM_RC_CNTL_1_COMP_GREATER_XI (2L<<16) -#define BNX2_RPM_RC_CNTL_1_COMP_LESS_XI (3L<<16) -#define BNX2_RPM_RC_CNTL_1_MAP_XI (1L<<18) -#define BNX2_RPM_RC_CNTL_1_SBIT_XI (1L<<19) -#define BNX2_RPM_RC_CNTL_1_CMDSEL_XI (0x1fL<<20) -#define BNX2_RPM_RC_CNTL_1_DISCARD_XI (1L<<25) -#define BNX2_RPM_RC_CNTL_1_MASK_XI (1L<<26) -#define BNX2_RPM_RC_CNTL_1_P1_XI (1L<<27) -#define BNX2_RPM_RC_CNTL_1_P2_XI (1L<<28) -#define BNX2_RPM_RC_CNTL_1_P3_XI (1L<<29) -#define BNX2_RPM_RC_CNTL_1_NBIT_XI (1L<<30) - -#define BNX2_RPM_RC_VALUE_MASK_1 0x0000190c -#define BNX2_RPM_RC_VALUE_MASK_1_VALUE (0xffffL<<0) -#define BNX2_RPM_RC_VALUE_MASK_1_MASK (0xffffL<<16) - -#define BNX2_RPM_RC_CNTL_2 0x00001910 -#define BNX2_RPM_RC_CNTL_2_A (0x3ffffL<<0) -#define BNX2_RPM_RC_CNTL_2_B (0xfffL<<19) -#define BNX2_RPM_RC_CNTL_2_OFFSET_XI (0xffL<<0) -#define BNX2_RPM_RC_CNTL_2_CLASS_XI (0x7L<<8) -#define BNX2_RPM_RC_CNTL_2_PRIORITY_XI (1L<<11) -#define BNX2_RPM_RC_CNTL_2_P4_XI (1L<<12) -#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_XI (0x7L<<13) -#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_START_XI (0L<<13) -#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_IP_XI (1L<<13) -#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_TCP_XI (2L<<13) -#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_UDP_XI (3L<<13) -#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_DATA_XI (4L<<13) -#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_TCP_UDP_XI (5L<<13) -#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_ICMPV6_XI (6L<<13) -#define BNX2_RPM_RC_CNTL_2_COMP_XI (0x3L<<16) -#define BNX2_RPM_RC_CNTL_2_COMP_EQUAL_XI (0L<<16) -#define BNX2_RPM_RC_CNTL_2_COMP_NEQUAL_XI (1L<<16) -#define BNX2_RPM_RC_CNTL_2_COMP_GREATER_XI (2L<<16) -#define BNX2_RPM_RC_CNTL_2_COMP_LESS_XI (3L<<16) -#define BNX2_RPM_RC_CNTL_2_MAP_XI (1L<<18) -#define BNX2_RPM_RC_CNTL_2_SBIT_XI (1L<<19) -#define BNX2_RPM_RC_CNTL_2_CMDSEL_XI (0x1fL<<20) -#define BNX2_RPM_RC_CNTL_2_DISCARD_XI (1L<<25) -#define BNX2_RPM_RC_CNTL_2_MASK_XI (1L<<26) -#define BNX2_RPM_RC_CNTL_2_P1_XI (1L<<27) -#define BNX2_RPM_RC_CNTL_2_P2_XI (1L<<28) -#define BNX2_RPM_RC_CNTL_2_P3_XI (1L<<29) -#define BNX2_RPM_RC_CNTL_2_NBIT_XI (1L<<30) - -#define BNX2_RPM_RC_VALUE_MASK_2 0x00001914 -#define BNX2_RPM_RC_VALUE_MASK_2_VALUE (0xffffL<<0) -#define BNX2_RPM_RC_VALUE_MASK_2_MASK (0xffffL<<16) - -#define BNX2_RPM_RC_CNTL_3 0x00001918 -#define BNX2_RPM_RC_CNTL_3_A (0x3ffffL<<0) -#define BNX2_RPM_RC_CNTL_3_B (0xfffL<<19) -#define BNX2_RPM_RC_CNTL_3_OFFSET_XI (0xffL<<0) -#define BNX2_RPM_RC_CNTL_3_CLASS_XI (0x7L<<8) -#define BNX2_RPM_RC_CNTL_3_PRIORITY_XI (1L<<11) -#define BNX2_RPM_RC_CNTL_3_P4_XI (1L<<12) -#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_XI (0x7L<<13) -#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_START_XI (0L<<13) -#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_IP_XI (1L<<13) -#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_TCP_XI (2L<<13) -#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_UDP_XI (3L<<13) -#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_DATA_XI (4L<<13) -#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_TCP_UDP_XI (5L<<13) -#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_ICMPV6_XI (6L<<13) -#define BNX2_RPM_RC_CNTL_3_COMP_XI (0x3L<<16) -#define BNX2_RPM_RC_CNTL_3_COMP_EQUAL_XI (0L<<16) -#define BNX2_RPM_RC_CNTL_3_COMP_NEQUAL_XI (1L<<16) -#define BNX2_RPM_RC_CNTL_3_COMP_GREATER_XI (2L<<16) -#define BNX2_RPM_RC_CNTL_3_COMP_LESS_XI (3L<<16) -#define BNX2_RPM_RC_CNTL_3_MAP_XI (1L<<18) -#define BNX2_RPM_RC_CNTL_3_SBIT_XI (1L<<19) -#define BNX2_RPM_RC_CNTL_3_CMDSEL_XI (0x1fL<<20) -#define BNX2_RPM_RC_CNTL_3_DISCARD_XI (1L<<25) -#define BNX2_RPM_RC_CNTL_3_MASK_XI (1L<<26) -#define BNX2_RPM_RC_CNTL_3_P1_XI (1L<<27) -#define BNX2_RPM_RC_CNTL_3_P2_XI (1L<<28) -#define BNX2_RPM_RC_CNTL_3_P3_XI (1L<<29) -#define BNX2_RPM_RC_CNTL_3_NBIT_XI (1L<<30) - -#define BNX2_RPM_RC_VALUE_MASK_3 0x0000191c -#define BNX2_RPM_RC_VALUE_MASK_3_VALUE (0xffffL<<0) -#define BNX2_RPM_RC_VALUE_MASK_3_MASK (0xffffL<<16) - -#define BNX2_RPM_RC_CNTL_4 0x00001920 -#define BNX2_RPM_RC_CNTL_4_A (0x3ffffL<<0) -#define BNX2_RPM_RC_CNTL_4_B (0xfffL<<19) -#define BNX2_RPM_RC_CNTL_4_OFFSET_XI (0xffL<<0) -#define BNX2_RPM_RC_CNTL_4_CLASS_XI (0x7L<<8) -#define BNX2_RPM_RC_CNTL_4_PRIORITY_XI (1L<<11) -#define BNX2_RPM_RC_CNTL_4_P4_XI (1L<<12) -#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_XI (0x7L<<13) -#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_START_XI (0L<<13) -#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_IP_XI (1L<<13) -#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_TCP_XI (2L<<13) -#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_UDP_XI (3L<<13) -#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_DATA_XI (4L<<13) -#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_TCP_UDP_XI (5L<<13) -#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_ICMPV6_XI (6L<<13) -#define BNX2_RPM_RC_CNTL_4_COMP_XI (0x3L<<16) -#define BNX2_RPM_RC_CNTL_4_COMP_EQUAL_XI (0L<<16) -#define BNX2_RPM_RC_CNTL_4_COMP_NEQUAL_XI (1L<<16) -#define BNX2_RPM_RC_CNTL_4_COMP_GREATER_XI (2L<<16) -#define BNX2_RPM_RC_CNTL_4_COMP_LESS_XI (3L<<16) -#define BNX2_RPM_RC_CNTL_4_MAP_XI (1L<<18) -#define BNX2_RPM_RC_CNTL_4_SBIT_XI (1L<<19) -#define BNX2_RPM_RC_CNTL_4_CMDSEL_XI (0x1fL<<20) -#define BNX2_RPM_RC_CNTL_4_DISCARD_XI (1L<<25) -#define BNX2_RPM_RC_CNTL_4_MASK_XI (1L<<26) -#define BNX2_RPM_RC_CNTL_4_P1_XI (1L<<27) -#define BNX2_RPM_RC_CNTL_4_P2_XI (1L<<28) -#define BNX2_RPM_RC_CNTL_4_P3_XI (1L<<29) -#define BNX2_RPM_RC_CNTL_4_NBIT_XI (1L<<30) - -#define BNX2_RPM_RC_VALUE_MASK_4 0x00001924 -#define BNX2_RPM_RC_VALUE_MASK_4_VALUE (0xffffL<<0) -#define BNX2_RPM_RC_VALUE_MASK_4_MASK (0xffffL<<16) - -#define BNX2_RPM_RC_CNTL_5 0x00001928 -#define BNX2_RPM_RC_CNTL_5_A (0x3ffffL<<0) -#define BNX2_RPM_RC_CNTL_5_B (0xfffL<<19) -#define BNX2_RPM_RC_CNTL_5_OFFSET_XI (0xffL<<0) -#define BNX2_RPM_RC_CNTL_5_CLASS_XI (0x7L<<8) -#define BNX2_RPM_RC_CNTL_5_PRIORITY_XI (1L<<11) -#define BNX2_RPM_RC_CNTL_5_P4_XI (1L<<12) -#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_XI (0x7L<<13) -#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_START_XI (0L<<13) -#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_IP_XI (1L<<13) -#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_TCP_XI (2L<<13) -#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_UDP_XI (3L<<13) -#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_DATA_XI (4L<<13) -#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_TCP_UDP_XI (5L<<13) -#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_ICMPV6_XI (6L<<13) -#define BNX2_RPM_RC_CNTL_5_COMP_XI (0x3L<<16) -#define BNX2_RPM_RC_CNTL_5_COMP_EQUAL_XI (0L<<16) -#define BNX2_RPM_RC_CNTL_5_COMP_NEQUAL_XI (1L<<16) -#define BNX2_RPM_RC_CNTL_5_COMP_GREATER_XI (2L<<16) -#define BNX2_RPM_RC_CNTL_5_COMP_LESS_XI (3L<<16) -#define BNX2_RPM_RC_CNTL_5_MAP_XI (1L<<18) -#define BNX2_RPM_RC_CNTL_5_SBIT_XI (1L<<19) -#define BNX2_RPM_RC_CNTL_5_CMDSEL_XI (0x1fL<<20) -#define BNX2_RPM_RC_CNTL_5_DISCARD_XI (1L<<25) -#define BNX2_RPM_RC_CNTL_5_MASK_XI (1L<<26) -#define BNX2_RPM_RC_CNTL_5_P1_XI (1L<<27) -#define BNX2_RPM_RC_CNTL_5_P2_XI (1L<<28) -#define BNX2_RPM_RC_CNTL_5_P3_XI (1L<<29) -#define BNX2_RPM_RC_CNTL_5_NBIT_XI (1L<<30) - -#define BNX2_RPM_RC_VALUE_MASK_5 0x0000192c -#define BNX2_RPM_RC_VALUE_MASK_5_VALUE (0xffffL<<0) -#define BNX2_RPM_RC_VALUE_MASK_5_MASK (0xffffL<<16) - -#define BNX2_RPM_RC_CNTL_6 0x00001930 -#define BNX2_RPM_RC_CNTL_6_A (0x3ffffL<<0) -#define BNX2_RPM_RC_CNTL_6_B (0xfffL<<19) -#define BNX2_RPM_RC_CNTL_6_OFFSET_XI (0xffL<<0) -#define BNX2_RPM_RC_CNTL_6_CLASS_XI (0x7L<<8) -#define BNX2_RPM_RC_CNTL_6_PRIORITY_XI (1L<<11) -#define BNX2_RPM_RC_CNTL_6_P4_XI (1L<<12) -#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_XI (0x7L<<13) -#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_START_XI (0L<<13) -#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_IP_XI (1L<<13) -#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_TCP_XI (2L<<13) -#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_UDP_XI (3L<<13) -#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_DATA_XI (4L<<13) -#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_TCP_UDP_XI (5L<<13) -#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_ICMPV6_XI (6L<<13) -#define BNX2_RPM_RC_CNTL_6_COMP_XI (0x3L<<16) -#define BNX2_RPM_RC_CNTL_6_COMP_EQUAL_XI (0L<<16) -#define BNX2_RPM_RC_CNTL_6_COMP_NEQUAL_XI (1L<<16) -#define BNX2_RPM_RC_CNTL_6_COMP_GREATER_XI (2L<<16) -#define BNX2_RPM_RC_CNTL_6_COMP_LESS_XI (3L<<16) -#define BNX2_RPM_RC_CNTL_6_MAP_XI (1L<<18) -#define BNX2_RPM_RC_CNTL_6_SBIT_XI (1L<<19) -#define BNX2_RPM_RC_CNTL_6_CMDSEL_XI (0x1fL<<20) -#define BNX2_RPM_RC_CNTL_6_DISCARD_XI (1L<<25) -#define BNX2_RPM_RC_CNTL_6_MASK_XI (1L<<26) -#define BNX2_RPM_RC_CNTL_6_P1_XI (1L<<27) -#define BNX2_RPM_RC_CNTL_6_P2_XI (1L<<28) -#define BNX2_RPM_RC_CNTL_6_P3_XI (1L<<29) -#define BNX2_RPM_RC_CNTL_6_NBIT_XI (1L<<30) - -#define BNX2_RPM_RC_VALUE_MASK_6 0x00001934 -#define BNX2_RPM_RC_VALUE_MASK_6_VALUE (0xffffL<<0) -#define BNX2_RPM_RC_VALUE_MASK_6_MASK (0xffffL<<16) - -#define BNX2_RPM_RC_CNTL_7 0x00001938 -#define BNX2_RPM_RC_CNTL_7_A (0x3ffffL<<0) -#define BNX2_RPM_RC_CNTL_7_B (0xfffL<<19) -#define BNX2_RPM_RC_CNTL_7_OFFSET_XI (0xffL<<0) -#define BNX2_RPM_RC_CNTL_7_CLASS_XI (0x7L<<8) -#define BNX2_RPM_RC_CNTL_7_PRIORITY_XI (1L<<11) -#define BNX2_RPM_RC_CNTL_7_P4_XI (1L<<12) -#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_XI (0x7L<<13) -#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_START_XI (0L<<13) -#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_IP_XI (1L<<13) -#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_TCP_XI (2L<<13) -#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_UDP_XI (3L<<13) -#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_DATA_XI (4L<<13) -#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_TCP_UDP_XI (5L<<13) -#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_ICMPV6_XI (6L<<13) -#define BNX2_RPM_RC_CNTL_7_COMP_XI (0x3L<<16) -#define BNX2_RPM_RC_CNTL_7_COMP_EQUAL_XI (0L<<16) -#define BNX2_RPM_RC_CNTL_7_COMP_NEQUAL_XI (1L<<16) -#define BNX2_RPM_RC_CNTL_7_COMP_GREATER_XI (2L<<16) -#define BNX2_RPM_RC_CNTL_7_COMP_LESS_XI (3L<<16) -#define BNX2_RPM_RC_CNTL_7_MAP_XI (1L<<18) -#define BNX2_RPM_RC_CNTL_7_SBIT_XI (1L<<19) -#define BNX2_RPM_RC_CNTL_7_CMDSEL_XI (0x1fL<<20) -#define BNX2_RPM_RC_CNTL_7_DISCARD_XI (1L<<25) -#define BNX2_RPM_RC_CNTL_7_MASK_XI (1L<<26) -#define BNX2_RPM_RC_CNTL_7_P1_XI (1L<<27) -#define BNX2_RPM_RC_CNTL_7_P2_XI (1L<<28) -#define BNX2_RPM_RC_CNTL_7_P3_XI (1L<<29) -#define BNX2_RPM_RC_CNTL_7_NBIT_XI (1L<<30) - -#define BNX2_RPM_RC_VALUE_MASK_7 0x0000193c -#define BNX2_RPM_RC_VALUE_MASK_7_VALUE (0xffffL<<0) -#define BNX2_RPM_RC_VALUE_MASK_7_MASK (0xffffL<<16) - -#define BNX2_RPM_RC_CNTL_8 0x00001940 -#define BNX2_RPM_RC_CNTL_8_A (0x3ffffL<<0) -#define BNX2_RPM_RC_CNTL_8_B (0xfffL<<19) -#define BNX2_RPM_RC_CNTL_8_OFFSET_XI (0xffL<<0) -#define BNX2_RPM_RC_CNTL_8_CLASS_XI (0x7L<<8) -#define BNX2_RPM_RC_CNTL_8_PRIORITY_XI (1L<<11) -#define BNX2_RPM_RC_CNTL_8_P4_XI (1L<<12) -#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_XI (0x7L<<13) -#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_START_XI (0L<<13) -#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_IP_XI (1L<<13) -#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_TCP_XI (2L<<13) -#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_UDP_XI (3L<<13) -#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_DATA_XI (4L<<13) -#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_TCP_UDP_XI (5L<<13) -#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_ICMPV6_XI (6L<<13) -#define BNX2_RPM_RC_CNTL_8_COMP_XI (0x3L<<16) -#define BNX2_RPM_RC_CNTL_8_COMP_EQUAL_XI (0L<<16) -#define BNX2_RPM_RC_CNTL_8_COMP_NEQUAL_XI (1L<<16) -#define BNX2_RPM_RC_CNTL_8_COMP_GREATER_XI (2L<<16) -#define BNX2_RPM_RC_CNTL_8_COMP_LESS_XI (3L<<16) -#define BNX2_RPM_RC_CNTL_8_MAP_XI (1L<<18) -#define BNX2_RPM_RC_CNTL_8_SBIT_XI (1L<<19) -#define BNX2_RPM_RC_CNTL_8_CMDSEL_XI (0x1fL<<20) -#define BNX2_RPM_RC_CNTL_8_DISCARD_XI (1L<<25) -#define BNX2_RPM_RC_CNTL_8_MASK_XI (1L<<26) -#define BNX2_RPM_RC_CNTL_8_P1_XI (1L<<27) -#define BNX2_RPM_RC_CNTL_8_P2_XI (1L<<28) -#define BNX2_RPM_RC_CNTL_8_P3_XI (1L<<29) -#define BNX2_RPM_RC_CNTL_8_NBIT_XI (1L<<30) - -#define BNX2_RPM_RC_VALUE_MASK_8 0x00001944 -#define BNX2_RPM_RC_VALUE_MASK_8_VALUE (0xffffL<<0) -#define BNX2_RPM_RC_VALUE_MASK_8_MASK (0xffffL<<16) - -#define BNX2_RPM_RC_CNTL_9 0x00001948 -#define BNX2_RPM_RC_CNTL_9_A (0x3ffffL<<0) -#define BNX2_RPM_RC_CNTL_9_B (0xfffL<<19) -#define BNX2_RPM_RC_CNTL_9_OFFSET_XI (0xffL<<0) -#define BNX2_RPM_RC_CNTL_9_CLASS_XI (0x7L<<8) -#define BNX2_RPM_RC_CNTL_9_PRIORITY_XI (1L<<11) -#define BNX2_RPM_RC_CNTL_9_P4_XI (1L<<12) -#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_XI (0x7L<<13) -#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_START_XI (0L<<13) -#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_IP_XI (1L<<13) -#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_TCP_XI (2L<<13) -#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_UDP_XI (3L<<13) -#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_DATA_XI (4L<<13) -#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_TCP_UDP_XI (5L<<13) -#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_ICMPV6_XI (6L<<13) -#define BNX2_RPM_RC_CNTL_9_COMP_XI (0x3L<<16) -#define BNX2_RPM_RC_CNTL_9_COMP_EQUAL_XI (0L<<16) -#define BNX2_RPM_RC_CNTL_9_COMP_NEQUAL_XI (1L<<16) -#define BNX2_RPM_RC_CNTL_9_COMP_GREATER_XI (2L<<16) -#define BNX2_RPM_RC_CNTL_9_COMP_LESS_XI (3L<<16) -#define BNX2_RPM_RC_CNTL_9_MAP_XI (1L<<18) -#define BNX2_RPM_RC_CNTL_9_SBIT_XI (1L<<19) -#define BNX2_RPM_RC_CNTL_9_CMDSEL_XI (0x1fL<<20) -#define BNX2_RPM_RC_CNTL_9_DISCARD_XI (1L<<25) -#define BNX2_RPM_RC_CNTL_9_MASK_XI (1L<<26) -#define BNX2_RPM_RC_CNTL_9_P1_XI (1L<<27) -#define BNX2_RPM_RC_CNTL_9_P2_XI (1L<<28) -#define BNX2_RPM_RC_CNTL_9_P3_XI (1L<<29) -#define BNX2_RPM_RC_CNTL_9_NBIT_XI (1L<<30) - -#define BNX2_RPM_RC_VALUE_MASK_9 0x0000194c -#define BNX2_RPM_RC_VALUE_MASK_9_VALUE (0xffffL<<0) -#define BNX2_RPM_RC_VALUE_MASK_9_MASK (0xffffL<<16) - -#define BNX2_RPM_RC_CNTL_10 0x00001950 -#define BNX2_RPM_RC_CNTL_10_A (0x3ffffL<<0) -#define BNX2_RPM_RC_CNTL_10_B (0xfffL<<19) -#define BNX2_RPM_RC_CNTL_10_OFFSET_XI (0xffL<<0) -#define BNX2_RPM_RC_CNTL_10_CLASS_XI (0x7L<<8) -#define BNX2_RPM_RC_CNTL_10_PRIORITY_XI (1L<<11) -#define BNX2_RPM_RC_CNTL_10_P4_XI (1L<<12) -#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_XI (0x7L<<13) -#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_START_XI (0L<<13) -#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_IP_XI (1L<<13) -#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_TCP_XI (2L<<13) -#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_UDP_XI (3L<<13) -#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_DATA_XI (4L<<13) -#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_TCP_UDP_XI (5L<<13) -#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_ICMPV6_XI (6L<<13) -#define BNX2_RPM_RC_CNTL_10_COMP_XI (0x3L<<16) -#define BNX2_RPM_RC_CNTL_10_COMP_EQUAL_XI (0L<<16) -#define BNX2_RPM_RC_CNTL_10_COMP_NEQUAL_XI (1L<<16) -#define BNX2_RPM_RC_CNTL_10_COMP_GREATER_XI (2L<<16) -#define BNX2_RPM_RC_CNTL_10_COMP_LESS_XI (3L<<16) -#define BNX2_RPM_RC_CNTL_10_MAP_XI (1L<<18) -#define BNX2_RPM_RC_CNTL_10_SBIT_XI (1L<<19) -#define BNX2_RPM_RC_CNTL_10_CMDSEL_XI (0x1fL<<20) -#define BNX2_RPM_RC_CNTL_10_DISCARD_XI (1L<<25) -#define BNX2_RPM_RC_CNTL_10_MASK_XI (1L<<26) -#define BNX2_RPM_RC_CNTL_10_P1_XI (1L<<27) -#define BNX2_RPM_RC_CNTL_10_P2_XI (1L<<28) -#define BNX2_RPM_RC_CNTL_10_P3_XI (1L<<29) -#define BNX2_RPM_RC_CNTL_10_NBIT_XI (1L<<30) - -#define BNX2_RPM_RC_VALUE_MASK_10 0x00001954 -#define BNX2_RPM_RC_VALUE_MASK_10_VALUE (0xffffL<<0) -#define BNX2_RPM_RC_VALUE_MASK_10_MASK (0xffffL<<16) - -#define BNX2_RPM_RC_CNTL_11 0x00001958 -#define BNX2_RPM_RC_CNTL_11_A (0x3ffffL<<0) -#define BNX2_RPM_RC_CNTL_11_B (0xfffL<<19) -#define BNX2_RPM_RC_CNTL_11_OFFSET_XI (0xffL<<0) -#define BNX2_RPM_RC_CNTL_11_CLASS_XI (0x7L<<8) -#define BNX2_RPM_RC_CNTL_11_PRIORITY_XI (1L<<11) -#define BNX2_RPM_RC_CNTL_11_P4_XI (1L<<12) -#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_XI (0x7L<<13) -#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_START_XI (0L<<13) -#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_IP_XI (1L<<13) -#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_TCP_XI (2L<<13) -#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_UDP_XI (3L<<13) -#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_DATA_XI (4L<<13) -#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_TCP_UDP_XI (5L<<13) -#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_ICMPV6_XI (6L<<13) -#define BNX2_RPM_RC_CNTL_11_COMP_XI (0x3L<<16) -#define BNX2_RPM_RC_CNTL_11_COMP_EQUAL_XI (0L<<16) -#define BNX2_RPM_RC_CNTL_11_COMP_NEQUAL_XI (1L<<16) -#define BNX2_RPM_RC_CNTL_11_COMP_GREATER_XI (2L<<16) -#define BNX2_RPM_RC_CNTL_11_COMP_LESS_XI (3L<<16) -#define BNX2_RPM_RC_CNTL_11_MAP_XI (1L<<18) -#define BNX2_RPM_RC_CNTL_11_SBIT_XI (1L<<19) -#define BNX2_RPM_RC_CNTL_11_CMDSEL_XI (0x1fL<<20) -#define BNX2_RPM_RC_CNTL_11_DISCARD_XI (1L<<25) -#define BNX2_RPM_RC_CNTL_11_MASK_XI (1L<<26) -#define BNX2_RPM_RC_CNTL_11_P1_XI (1L<<27) -#define BNX2_RPM_RC_CNTL_11_P2_XI (1L<<28) -#define BNX2_RPM_RC_CNTL_11_P3_XI (1L<<29) -#define BNX2_RPM_RC_CNTL_11_NBIT_XI (1L<<30) - -#define BNX2_RPM_RC_VALUE_MASK_11 0x0000195c -#define BNX2_RPM_RC_VALUE_MASK_11_VALUE (0xffffL<<0) -#define BNX2_RPM_RC_VALUE_MASK_11_MASK (0xffffL<<16) - -#define BNX2_RPM_RC_CNTL_12 0x00001960 -#define BNX2_RPM_RC_CNTL_12_A (0x3ffffL<<0) -#define BNX2_RPM_RC_CNTL_12_B (0xfffL<<19) -#define BNX2_RPM_RC_CNTL_12_OFFSET_XI (0xffL<<0) -#define BNX2_RPM_RC_CNTL_12_CLASS_XI (0x7L<<8) -#define BNX2_RPM_RC_CNTL_12_PRIORITY_XI (1L<<11) -#define BNX2_RPM_RC_CNTL_12_P4_XI (1L<<12) -#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_XI (0x7L<<13) -#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_START_XI (0L<<13) -#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_IP_XI (1L<<13) -#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_TCP_XI (2L<<13) -#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_UDP_XI (3L<<13) -#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_DATA_XI (4L<<13) -#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_TCP_UDP_XI (5L<<13) -#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_ICMPV6_XI (6L<<13) -#define BNX2_RPM_RC_CNTL_12_COMP_XI (0x3L<<16) -#define BNX2_RPM_RC_CNTL_12_COMP_EQUAL_XI (0L<<16) -#define BNX2_RPM_RC_CNTL_12_COMP_NEQUAL_XI (1L<<16) -#define BNX2_RPM_RC_CNTL_12_COMP_GREATER_XI (2L<<16) -#define BNX2_RPM_RC_CNTL_12_COMP_LESS_XI (3L<<16) -#define BNX2_RPM_RC_CNTL_12_MAP_XI (1L<<18) -#define BNX2_RPM_RC_CNTL_12_SBIT_XI (1L<<19) -#define BNX2_RPM_RC_CNTL_12_CMDSEL_XI (0x1fL<<20) -#define BNX2_RPM_RC_CNTL_12_DISCARD_XI (1L<<25) -#define BNX2_RPM_RC_CNTL_12_MASK_XI (1L<<26) -#define BNX2_RPM_RC_CNTL_12_P1_XI (1L<<27) -#define BNX2_RPM_RC_CNTL_12_P2_XI (1L<<28) -#define BNX2_RPM_RC_CNTL_12_P3_XI (1L<<29) -#define BNX2_RPM_RC_CNTL_12_NBIT_XI (1L<<30) - -#define BNX2_RPM_RC_VALUE_MASK_12 0x00001964 -#define BNX2_RPM_RC_VALUE_MASK_12_VALUE (0xffffL<<0) -#define BNX2_RPM_RC_VALUE_MASK_12_MASK (0xffffL<<16) - -#define BNX2_RPM_RC_CNTL_13 0x00001968 -#define BNX2_RPM_RC_CNTL_13_A (0x3ffffL<<0) -#define BNX2_RPM_RC_CNTL_13_B (0xfffL<<19) -#define BNX2_RPM_RC_CNTL_13_OFFSET_XI (0xffL<<0) -#define BNX2_RPM_RC_CNTL_13_CLASS_XI (0x7L<<8) -#define BNX2_RPM_RC_CNTL_13_PRIORITY_XI (1L<<11) -#define BNX2_RPM_RC_CNTL_13_P4_XI (1L<<12) -#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_XI (0x7L<<13) -#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_START_XI (0L<<13) -#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_IP_XI (1L<<13) -#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_TCP_XI (2L<<13) -#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_UDP_XI (3L<<13) -#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_DATA_XI (4L<<13) -#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_TCP_UDP_XI (5L<<13) -#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_ICMPV6_XI (6L<<13) -#define BNX2_RPM_RC_CNTL_13_COMP_XI (0x3L<<16) -#define BNX2_RPM_RC_CNTL_13_COMP_EQUAL_XI (0L<<16) -#define BNX2_RPM_RC_CNTL_13_COMP_NEQUAL_XI (1L<<16) -#define BNX2_RPM_RC_CNTL_13_COMP_GREATER_XI (2L<<16) -#define BNX2_RPM_RC_CNTL_13_COMP_LESS_XI (3L<<16) -#define BNX2_RPM_RC_CNTL_13_MAP_XI (1L<<18) -#define BNX2_RPM_RC_CNTL_13_SBIT_XI (1L<<19) -#define BNX2_RPM_RC_CNTL_13_CMDSEL_XI (0x1fL<<20) -#define BNX2_RPM_RC_CNTL_13_DISCARD_XI (1L<<25) -#define BNX2_RPM_RC_CNTL_13_MASK_XI (1L<<26) -#define BNX2_RPM_RC_CNTL_13_P1_XI (1L<<27) -#define BNX2_RPM_RC_CNTL_13_P2_XI (1L<<28) -#define BNX2_RPM_RC_CNTL_13_P3_XI (1L<<29) -#define BNX2_RPM_RC_CNTL_13_NBIT_XI (1L<<30) - -#define BNX2_RPM_RC_VALUE_MASK_13 0x0000196c -#define BNX2_RPM_RC_VALUE_MASK_13_VALUE (0xffffL<<0) -#define BNX2_RPM_RC_VALUE_MASK_13_MASK (0xffffL<<16) - -#define BNX2_RPM_RC_CNTL_14 0x00001970 -#define BNX2_RPM_RC_CNTL_14_A (0x3ffffL<<0) -#define BNX2_RPM_RC_CNTL_14_B (0xfffL<<19) -#define BNX2_RPM_RC_CNTL_14_OFFSET_XI (0xffL<<0) -#define BNX2_RPM_RC_CNTL_14_CLASS_XI (0x7L<<8) -#define BNX2_RPM_RC_CNTL_14_PRIORITY_XI (1L<<11) -#define BNX2_RPM_RC_CNTL_14_P4_XI (1L<<12) -#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_XI (0x7L<<13) -#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_START_XI (0L<<13) -#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_IP_XI (1L<<13) -#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_TCP_XI (2L<<13) -#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_UDP_XI (3L<<13) -#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_DATA_XI (4L<<13) -#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_TCP_UDP_XI (5L<<13) -#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_ICMPV6_XI (6L<<13) -#define BNX2_RPM_RC_CNTL_14_COMP_XI (0x3L<<16) -#define BNX2_RPM_RC_CNTL_14_COMP_EQUAL_XI (0L<<16) -#define BNX2_RPM_RC_CNTL_14_COMP_NEQUAL_XI (1L<<16) -#define BNX2_RPM_RC_CNTL_14_COMP_GREATER_XI (2L<<16) -#define BNX2_RPM_RC_CNTL_14_COMP_LESS_XI (3L<<16) -#define BNX2_RPM_RC_CNTL_14_MAP_XI (1L<<18) -#define BNX2_RPM_RC_CNTL_14_SBIT_XI (1L<<19) -#define BNX2_RPM_RC_CNTL_14_CMDSEL_XI (0x1fL<<20) -#define BNX2_RPM_RC_CNTL_14_DISCARD_XI (1L<<25) -#define BNX2_RPM_RC_CNTL_14_MASK_XI (1L<<26) -#define BNX2_RPM_RC_CNTL_14_P1_XI (1L<<27) -#define BNX2_RPM_RC_CNTL_14_P2_XI (1L<<28) -#define BNX2_RPM_RC_CNTL_14_P3_XI (1L<<29) -#define BNX2_RPM_RC_CNTL_14_NBIT_XI (1L<<30) - -#define BNX2_RPM_RC_VALUE_MASK_14 0x00001974 -#define BNX2_RPM_RC_VALUE_MASK_14_VALUE (0xffffL<<0) -#define BNX2_RPM_RC_VALUE_MASK_14_MASK (0xffffL<<16) - -#define BNX2_RPM_RC_CNTL_15 0x00001978 -#define BNX2_RPM_RC_CNTL_15_A (0x3ffffL<<0) -#define BNX2_RPM_RC_CNTL_15_B (0xfffL<<19) -#define BNX2_RPM_RC_CNTL_15_OFFSET_XI (0xffL<<0) -#define BNX2_RPM_RC_CNTL_15_CLASS_XI (0x7L<<8) -#define BNX2_RPM_RC_CNTL_15_PRIORITY_XI (1L<<11) -#define BNX2_RPM_RC_CNTL_15_P4_XI (1L<<12) -#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_XI (0x7L<<13) -#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_START_XI (0L<<13) -#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_IP_XI (1L<<13) -#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_TCP_XI (2L<<13) -#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_UDP_XI (3L<<13) -#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_DATA_XI (4L<<13) -#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_TCP_UDP_XI (5L<<13) -#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_ICMPV6_XI (6L<<13) -#define BNX2_RPM_RC_CNTL_15_COMP_XI (0x3L<<16) -#define BNX2_RPM_RC_CNTL_15_COMP_EQUAL_XI (0L<<16) -#define BNX2_RPM_RC_CNTL_15_COMP_NEQUAL_XI (1L<<16) -#define BNX2_RPM_RC_CNTL_15_COMP_GREATER_XI (2L<<16) -#define BNX2_RPM_RC_CNTL_15_COMP_LESS_XI (3L<<16) -#define BNX2_RPM_RC_CNTL_15_MAP_XI (1L<<18) -#define BNX2_RPM_RC_CNTL_15_SBIT_XI (1L<<19) -#define BNX2_RPM_RC_CNTL_15_CMDSEL_XI (0x1fL<<20) -#define BNX2_RPM_RC_CNTL_15_DISCARD_XI (1L<<25) -#define BNX2_RPM_RC_CNTL_15_MASK_XI (1L<<26) -#define BNX2_RPM_RC_CNTL_15_P1_XI (1L<<27) -#define BNX2_RPM_RC_CNTL_15_P2_XI (1L<<28) -#define BNX2_RPM_RC_CNTL_15_P3_XI (1L<<29) -#define BNX2_RPM_RC_CNTL_15_NBIT_XI (1L<<30) - -#define BNX2_RPM_RC_VALUE_MASK_15 0x0000197c -#define BNX2_RPM_RC_VALUE_MASK_15_VALUE (0xffffL<<0) -#define BNX2_RPM_RC_VALUE_MASK_15_MASK (0xffffL<<16) - -#define BNX2_RPM_RC_CONFIG 0x00001980 -#define BNX2_RPM_RC_CONFIG_RULE_ENABLE (0xffffL<<0) -#define BNX2_RPM_RC_CONFIG_RULE_ENABLE_XI (0xfffffL<<0) -#define BNX2_RPM_RC_CONFIG_DEF_CLASS (0x7L<<24) -#define BNX2_RPM_RC_CONFIG_KNUM_OVERWRITE (1L<<31) - -#define BNX2_RPM_DEBUG0 0x00001984 -#define BNX2_RPM_DEBUG0_FM_BCNT (0xffffL<<0) -#define BNX2_RPM_DEBUG0_T_DATA_OFST_VLD (1L<<16) -#define BNX2_RPM_DEBUG0_T_UDP_OFST_VLD (1L<<17) -#define BNX2_RPM_DEBUG0_T_TCP_OFST_VLD (1L<<18) -#define BNX2_RPM_DEBUG0_T_IP_OFST_VLD (1L<<19) -#define BNX2_RPM_DEBUG0_IP_MORE_FRGMT (1L<<20) -#define BNX2_RPM_DEBUG0_T_IP_NO_TCP_UDP_HDR (1L<<21) -#define BNX2_RPM_DEBUG0_LLC_SNAP (1L<<22) -#define BNX2_RPM_DEBUG0_FM_STARTED (1L<<23) -#define BNX2_RPM_DEBUG0_DONE (1L<<24) -#define BNX2_RPM_DEBUG0_WAIT_4_DONE (1L<<25) -#define BNX2_RPM_DEBUG0_USE_TPBUF_CKSUM (1L<<26) -#define BNX2_RPM_DEBUG0_RX_NO_PSD_HDR_CKSUM (1L<<27) -#define BNX2_RPM_DEBUG0_IGNORE_VLAN (1L<<28) -#define BNX2_RPM_DEBUG0_RP_ENA_ACTIVE (1L<<31) - -#define BNX2_RPM_DEBUG1 0x00001988 -#define BNX2_RPM_DEBUG1_FSM_CUR_ST (0xffffL<<0) -#define BNX2_RPM_DEBUG1_FSM_CUR_ST_IDLE (0L<<0) -#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ETYPE_B6_ALL (1L<<0) -#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ETYPE_B2_IPLLC (2L<<0) -#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ETYPE_B6_IP (4L<<0) -#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ETYPE_B2_IP (8L<<0) -#define BNX2_RPM_DEBUG1_FSM_CUR_ST_IP_START (16L<<0) -#define BNX2_RPM_DEBUG1_FSM_CUR_ST_IP (32L<<0) -#define BNX2_RPM_DEBUG1_FSM_CUR_ST_TCP (64L<<0) -#define BNX2_RPM_DEBUG1_FSM_CUR_ST_UDP (128L<<0) -#define BNX2_RPM_DEBUG1_FSM_CUR_ST_AH (256L<<0) -#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ESP (512L<<0) -#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ESP_PAYLOAD (1024L<<0) -#define BNX2_RPM_DEBUG1_FSM_CUR_ST_DATA (2048L<<0) -#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ADD_CARRY (0x2000L<<0) -#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ADD_CARRYOUT (0x4000L<<0) -#define BNX2_RPM_DEBUG1_FSM_CUR_ST_LATCH_RESULT (0x8000L<<0) -#define BNX2_RPM_DEBUG1_HDR_BCNT (0x7ffL<<16) -#define BNX2_RPM_DEBUG1_UNKNOWN_ETYPE_D (1L<<28) -#define BNX2_RPM_DEBUG1_VLAN_REMOVED_D2 (1L<<29) -#define BNX2_RPM_DEBUG1_VLAN_REMOVED_D1 (1L<<30) -#define BNX2_RPM_DEBUG1_EOF_0XTRA_WD (1L<<31) - -#define BNX2_RPM_DEBUG2 0x0000198c -#define BNX2_RPM_DEBUG2_CMD_HIT_VEC (0xffffL<<0) -#define BNX2_RPM_DEBUG2_IP_BCNT (0xffL<<16) -#define BNX2_RPM_DEBUG2_THIS_CMD_M4 (1L<<24) -#define BNX2_RPM_DEBUG2_THIS_CMD_M3 (1L<<25) -#define BNX2_RPM_DEBUG2_THIS_CMD_M2 (1L<<26) -#define BNX2_RPM_DEBUG2_THIS_CMD_M1 (1L<<27) -#define BNX2_RPM_DEBUG2_IPIPE_EMPTY (1L<<28) -#define BNX2_RPM_DEBUG2_FM_DISCARD (1L<<29) -#define BNX2_RPM_DEBUG2_LAST_RULE_IN_FM_D2 (1L<<30) -#define BNX2_RPM_DEBUG2_LAST_RULE_IN_FM_D1 (1L<<31) - -#define BNX2_RPM_DEBUG3 0x00001990 -#define BNX2_RPM_DEBUG3_AVAIL_MBUF_PTR (0x1ffL<<0) -#define BNX2_RPM_DEBUG3_RDE_RLUPQ_WR_REQ_INT (1L<<9) -#define BNX2_RPM_DEBUG3_RDE_RBUF_WR_LAST_INT (1L<<10) -#define BNX2_RPM_DEBUG3_RDE_RBUF_WR_REQ_INT (1L<<11) -#define BNX2_RPM_DEBUG3_RDE_RBUF_FREE_REQ (1L<<12) -#define BNX2_RPM_DEBUG3_RDE_RBUF_ALLOC_REQ (1L<<13) -#define BNX2_RPM_DEBUG3_DFSM_MBUF_NOTAVAIL (1L<<14) -#define BNX2_RPM_DEBUG3_RBUF_RDE_SOF_DROP (1L<<15) -#define BNX2_RPM_DEBUG3_DFIFO_VLD_ENTRY_CT (0xfL<<16) -#define BNX2_RPM_DEBUG3_RDE_SRC_FIFO_ALMFULL (1L<<21) -#define BNX2_RPM_DEBUG3_DROP_NXT_VLD (1L<<22) -#define BNX2_RPM_DEBUG3_DROP_NXT (1L<<23) -#define BNX2_RPM_DEBUG3_FTQ_FSM (0x3L<<24) -#define BNX2_RPM_DEBUG3_FTQ_FSM_IDLE (0x0L<<24) -#define BNX2_RPM_DEBUG3_FTQ_FSM_WAIT_ACK (0x1L<<24) -#define BNX2_RPM_DEBUG3_FTQ_FSM_WAIT_FREE (0x2L<<24) -#define BNX2_RPM_DEBUG3_MBWRITE_FSM (0x3L<<26) -#define BNX2_RPM_DEBUG3_MBWRITE_FSM_WAIT_SOF (0x0L<<26) -#define BNX2_RPM_DEBUG3_MBWRITE_FSM_GET_MBUF (0x1L<<26) -#define BNX2_RPM_DEBUG3_MBWRITE_FSM_DMA_DATA (0x2L<<26) -#define BNX2_RPM_DEBUG3_MBWRITE_FSM_WAIT_DATA (0x3L<<26) -#define BNX2_RPM_DEBUG3_MBWRITE_FSM_WAIT_EOF (0x4L<<26) -#define BNX2_RPM_DEBUG3_MBWRITE_FSM_WAIT_MF_ACK (0x5L<<26) -#define BNX2_RPM_DEBUG3_MBWRITE_FSM_WAIT_DROP_NXT_VLD (0x6L<<26) -#define BNX2_RPM_DEBUG3_MBWRITE_FSM_DONE (0x7L<<26) -#define BNX2_RPM_DEBUG3_MBFREE_FSM (1L<<29) -#define BNX2_RPM_DEBUG3_MBFREE_FSM_IDLE (0L<<29) -#define BNX2_RPM_DEBUG3_MBFREE_FSM_WAIT_ACK (1L<<29) -#define BNX2_RPM_DEBUG3_MBALLOC_FSM (1L<<30) -#define BNX2_RPM_DEBUG3_MBALLOC_FSM_ET_MBUF (0x0L<<30) -#define BNX2_RPM_DEBUG3_MBALLOC_FSM_IVE_MBUF (0x1L<<30) -#define BNX2_RPM_DEBUG3_CCODE_EOF_ERROR (1L<<31) - -#define BNX2_RPM_DEBUG4 0x00001994 -#define BNX2_RPM_DEBUG4_DFSM_MBUF_CLUSTER (0x1ffffffL<<0) -#define BNX2_RPM_DEBUG4_DFIFO_CUR_CCODE (0x7L<<25) -#define BNX2_RPM_DEBUG4_MBWRITE_FSM (0x7L<<28) -#define BNX2_RPM_DEBUG4_DFIFO_EMPTY (1L<<31) - -#define BNX2_RPM_DEBUG5 0x00001998 -#define BNX2_RPM_DEBUG5_RDROP_WPTR (0x1fL<<0) -#define BNX2_RPM_DEBUG5_RDROP_ACPI_RPTR (0x1fL<<5) -#define BNX2_RPM_DEBUG5_RDROP_MC_RPTR (0x1fL<<10) -#define BNX2_RPM_DEBUG5_RDROP_RC_RPTR (0x1fL<<15) -#define BNX2_RPM_DEBUG5_RDROP_ACPI_EMPTY (1L<<20) -#define BNX2_RPM_DEBUG5_RDROP_MC_EMPTY (1L<<21) -#define BNX2_RPM_DEBUG5_RDROP_AEOF_VEC_AT_RDROP_MC_RPTR (1L<<22) -#define BNX2_RPM_DEBUG5_HOLDREG_WOL_DROP_INT (1L<<23) -#define BNX2_RPM_DEBUG5_HOLDREG_DISCARD (1L<<24) -#define BNX2_RPM_DEBUG5_HOLDREG_MBUF_NOTAVAIL (1L<<25) -#define BNX2_RPM_DEBUG5_HOLDREG_MC_EMPTY (1L<<26) -#define BNX2_RPM_DEBUG5_HOLDREG_RC_EMPTY (1L<<27) -#define BNX2_RPM_DEBUG5_HOLDREG_FC_EMPTY (1L<<28) -#define BNX2_RPM_DEBUG5_HOLDREG_ACPI_EMPTY (1L<<29) -#define BNX2_RPM_DEBUG5_HOLDREG_FULL_T (1L<<30) -#define BNX2_RPM_DEBUG5_HOLDREG_RD (1L<<31) - -#define BNX2_RPM_DEBUG6 0x0000199c -#define BNX2_RPM_DEBUG6_ACPI_VEC (0xffffL<<0) -#define BNX2_RPM_DEBUG6_VEC (0xffffL<<16) - -#define BNX2_RPM_DEBUG7 0x000019a0 -#define BNX2_RPM_DEBUG7_RPM_DBG7_LAST_CRC (0xffffffffL<<0) - -#define BNX2_RPM_DEBUG8 0x000019a4 -#define BNX2_RPM_DEBUG8_PS_ACPI_FSM (0xfL<<0) -#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_IDLE (0L<<0) -#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_SOF_W1_ADDR (1L<<0) -#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_SOF_W2_ADDR (2L<<0) -#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_SOF_W3_ADDR (3L<<0) -#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_SOF_WAIT_THBUF (4L<<0) -#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_W3_DATA (5L<<0) -#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_W0_ADDR (6L<<0) -#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_W1_ADDR (7L<<0) -#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_W2_ADDR (8L<<0) -#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_W3_ADDR (9L<<0) -#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_WAIT_THBUF (10L<<0) -#define BNX2_RPM_DEBUG8_COMPARE_AT_W0 (1L<<4) -#define BNX2_RPM_DEBUG8_COMPARE_AT_W3_DATA (1L<<5) -#define BNX2_RPM_DEBUG8_COMPARE_AT_SOF_WAIT (1L<<6) -#define BNX2_RPM_DEBUG8_COMPARE_AT_SOF_W3 (1L<<7) -#define BNX2_RPM_DEBUG8_COMPARE_AT_SOF_W2 (1L<<8) -#define BNX2_RPM_DEBUG8_EOF_W_LTEQ6_VLDBYTES (1L<<9) -#define BNX2_RPM_DEBUG8_EOF_W_LTEQ4_VLDBYTES (1L<<10) -#define BNX2_RPM_DEBUG8_NXT_EOF_W_12_VLDBYTES (1L<<11) -#define BNX2_RPM_DEBUG8_EOF_DET (1L<<12) -#define BNX2_RPM_DEBUG8_SOF_DET (1L<<13) -#define BNX2_RPM_DEBUG8_WAIT_4_SOF (1L<<14) -#define BNX2_RPM_DEBUG8_ALL_DONE (1L<<15) -#define BNX2_RPM_DEBUG8_THBUF_ADDR (0x7fL<<16) -#define BNX2_RPM_DEBUG8_BYTE_CTR (0xffL<<24) - -#define BNX2_RPM_DEBUG9 0x000019a8 -#define BNX2_RPM_DEBUG9_OUTFIFO_COUNT (0x7L<<0) -#define BNX2_RPM_DEBUG9_RDE_ACPI_RDY (1L<<3) -#define BNX2_RPM_DEBUG9_VLD_RD_ENTRY_CT (0x7L<<4) -#define BNX2_RPM_DEBUG9_OUTFIFO_OVERRUN_OCCURRED (1L<<28) -#define BNX2_RPM_DEBUG9_INFIFO_OVERRUN_OCCURRED (1L<<29) -#define BNX2_RPM_DEBUG9_ACPI_MATCH_INT (1L<<30) -#define BNX2_RPM_DEBUG9_ACPI_ENABLE_SYN (1L<<31) -#define BNX2_RPM_DEBUG9_BEMEM_R_XI (0x1fL<<0) -#define BNX2_RPM_DEBUG9_EO_XI (1L<<5) -#define BNX2_RPM_DEBUG9_AEOF_DE_XI (1L<<6) -#define BNX2_RPM_DEBUG9_SO_XI (1L<<7) -#define BNX2_RPM_DEBUG9_WD64_CT_XI (0x1fL<<8) -#define BNX2_RPM_DEBUG9_EOF_VLDBYTE_XI (0x7L<<13) -#define BNX2_RPM_DEBUG9_ACPI_RDE_PAT_ID_XI (0xfL<<16) -#define BNX2_RPM_DEBUG9_CALCRC_RESULT_XI (0x3ffL<<20) -#define BNX2_RPM_DEBUG9_DATA_IN_VL_XI (1L<<30) -#define BNX2_RPM_DEBUG9_CALCRC_BUFFER_VLD_XI (1L<<31) - -#define BNX2_RPM_ACPI_DBG_BUF_W00 0x000019c0 -#define BNX2_RPM_ACPI_DBG_BUF_W01 0x000019c4 -#define BNX2_RPM_ACPI_DBG_BUF_W02 0x000019c8 -#define BNX2_RPM_ACPI_DBG_BUF_W03 0x000019cc -#define BNX2_RPM_ACPI_DBG_BUF_W10 0x000019d0 -#define BNX2_RPM_ACPI_DBG_BUF_W11 0x000019d4 -#define BNX2_RPM_ACPI_DBG_BUF_W12 0x000019d8 -#define BNX2_RPM_ACPI_DBG_BUF_W13 0x000019dc -#define BNX2_RPM_ACPI_DBG_BUF_W20 0x000019e0 -#define BNX2_RPM_ACPI_DBG_BUF_W21 0x000019e4 -#define BNX2_RPM_ACPI_DBG_BUF_W22 0x000019e8 -#define BNX2_RPM_ACPI_DBG_BUF_W23 0x000019ec -#define BNX2_RPM_ACPI_DBG_BUF_W30 0x000019f0 -#define BNX2_RPM_ACPI_DBG_BUF_W31 0x000019f4 -#define BNX2_RPM_ACPI_DBG_BUF_W32 0x000019f8 -#define BNX2_RPM_ACPI_DBG_BUF_W33 0x000019fc -#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL 0x00001a00 -#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_BYTE_ADDRESS (0xffffL<<0) -#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_DEBUGRD (1L<<28) -#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_MODE (1L<<29) -#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_INIT (1L<<30) -#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_WR (1L<<31) - -#define BNX2_RPM_ACPI_PATTERN_CTRL 0x00001a04 -#define BNX2_RPM_ACPI_PATTERN_CTRL_PATTERN_ID (0xfL<<0) -#define BNX2_RPM_ACPI_PATTERN_CTRL_CRC_SM_CLR (1L<<30) -#define BNX2_RPM_ACPI_PATTERN_CTRL_WR (1L<<31) - -#define BNX2_RPM_ACPI_DATA 0x00001a08 -#define BNX2_RPM_ACPI_DATA_PATTERN_BE (0xffffffffL<<0) - -#define BNX2_RPM_ACPI_PATTERN_LEN0 0x00001a0c -#define BNX2_RPM_ACPI_PATTERN_LEN0_PATTERN_LEN3 (0xffL<<0) -#define BNX2_RPM_ACPI_PATTERN_LEN0_PATTERN_LEN2 (0xffL<<8) -#define BNX2_RPM_ACPI_PATTERN_LEN0_PATTERN_LEN1 (0xffL<<16) -#define BNX2_RPM_ACPI_PATTERN_LEN0_PATTERN_LEN0 (0xffL<<24) - -#define BNX2_RPM_ACPI_PATTERN_LEN1 0x00001a10 -#define BNX2_RPM_ACPI_PATTERN_LEN1_PATTERN_LEN7 (0xffL<<0) -#define BNX2_RPM_ACPI_PATTERN_LEN1_PATTERN_LEN6 (0xffL<<8) -#define BNX2_RPM_ACPI_PATTERN_LEN1_PATTERN_LEN5 (0xffL<<16) -#define BNX2_RPM_ACPI_PATTERN_LEN1_PATTERN_LEN4 (0xffL<<24) - -#define BNX2_RPM_ACPI_PATTERN_CRC0 0x00001a18 -#define BNX2_RPM_ACPI_PATTERN_CRC0_PATTERN_CRC0 (0xffffffffL<<0) - -#define BNX2_RPM_ACPI_PATTERN_CRC1 0x00001a1c -#define BNX2_RPM_ACPI_PATTERN_CRC1_PATTERN_CRC1 (0xffffffffL<<0) - -#define BNX2_RPM_ACPI_PATTERN_CRC2 0x00001a20 -#define BNX2_RPM_ACPI_PATTERN_CRC2_PATTERN_CRC2 (0xffffffffL<<0) - -#define BNX2_RPM_ACPI_PATTERN_CRC3 0x00001a24 -#define BNX2_RPM_ACPI_PATTERN_CRC3_PATTERN_CRC3 (0xffffffffL<<0) - -#define BNX2_RPM_ACPI_PATTERN_CRC4 0x00001a28 -#define BNX2_RPM_ACPI_PATTERN_CRC4_PATTERN_CRC4 (0xffffffffL<<0) - -#define BNX2_RPM_ACPI_PATTERN_CRC5 0x00001a2c -#define BNX2_RPM_ACPI_PATTERN_CRC5_PATTERN_CRC5 (0xffffffffL<<0) - -#define BNX2_RPM_ACPI_PATTERN_CRC6 0x00001a30 -#define BNX2_RPM_ACPI_PATTERN_CRC6_PATTERN_CRC6 (0xffffffffL<<0) - -#define BNX2_RPM_ACPI_PATTERN_CRC7 0x00001a34 -#define BNX2_RPM_ACPI_PATTERN_CRC7_PATTERN_CRC7 (0xffffffffL<<0) - - -/* - * rlup_reg definition - * offset: 0x2000 - */ -#define BNX2_RLUP_RSS_CONFIG 0x0000201c -#define BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_XI (0x3L<<0) -#define BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_OFF_XI (0L<<0) -#define BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI (1L<<0) -#define BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_IP_ONLY_XI (2L<<0) -#define BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_RES_XI (3L<<0) -#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_XI (0x3L<<2) -#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_OFF_XI (0L<<2) -#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI (1L<<2) -#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_IP_ONLY_XI (2L<<2) -#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_RES_XI (3L<<2) - -#define BNX2_RLUP_RSS_COMMAND 0x00002048 -#define BNX2_RLUP_RSS_COMMAND_RSS_IND_TABLE_ADDR (0xfUL<<0) -#define BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK (0xffUL<<4) -#define BNX2_RLUP_RSS_COMMAND_WRITE (1UL<<12) -#define BNX2_RLUP_RSS_COMMAND_READ (1UL<<13) -#define BNX2_RLUP_RSS_COMMAND_HASH_MASK (0x7UL<<14) - -#define BNX2_RLUP_RSS_DATA 0x0000204c - - -/* - * rbuf_reg definition - * offset: 0x200000 - */ -#define BNX2_RBUF_COMMAND 0x00200000 -#define BNX2_RBUF_COMMAND_ENABLED (1L<<0) -#define BNX2_RBUF_COMMAND_FREE_INIT (1L<<1) -#define BNX2_RBUF_COMMAND_RAM_INIT (1L<<2) -#define BNX2_RBUF_COMMAND_PKT_OFFSET_OVFL (1L<<3) -#define BNX2_RBUF_COMMAND_OVER_FREE (1L<<4) -#define BNX2_RBUF_COMMAND_ALLOC_REQ (1L<<5) -#define BNX2_RBUF_COMMAND_EN_PRI_CHNGE_TE (1L<<6) -#define BNX2_RBUF_COMMAND_CU_ISOLATE_XI (1L<<5) -#define BNX2_RBUF_COMMAND_EN_PRI_CHANGE_XI (1L<<6) -#define BNX2_RBUF_COMMAND_GRC_ENDIAN_CONV_DIS_XI (1L<<7) - -#define BNX2_RBUF_STATUS1 0x00200004 -#define BNX2_RBUF_STATUS1_FREE_COUNT (0x3ffL<<0) - -#define BNX2_RBUF_STATUS2 0x00200008 -#define BNX2_RBUF_STATUS2_FREE_TAIL (0x1ffL<<0) -#define BNX2_RBUF_STATUS2_FREE_HEAD (0x1ffL<<16) - -#define BNX2_RBUF_CONFIG 0x0020000c -#define BNX2_RBUF_CONFIG_XOFF_TRIP (0x3ffL<<0) -#define BNX2_RBUF_CONFIG_XOFF_TRIP_VAL(mtu) \ - ((((mtu) - 1500) * 31 / 1000) + 54) -#define BNX2_RBUF_CONFIG_XON_TRIP (0x3ffL<<16) -#define BNX2_RBUF_CONFIG_XON_TRIP_VAL(mtu) \ - ((((mtu) - 1500) * 39 / 1000) + 66) -#define BNX2_RBUF_CONFIG_VAL(mtu) \ - (BNX2_RBUF_CONFIG_XOFF_TRIP_VAL(mtu) | \ - (BNX2_RBUF_CONFIG_XON_TRIP_VAL(mtu) << 16)) - -#define BNX2_RBUF_FW_BUF_ALLOC 0x00200010 -#define BNX2_RBUF_FW_BUF_ALLOC_VALUE (0x1ffL<<7) -#define BNX2_RBUF_FW_BUF_ALLOC_TYPE (1L<<16) -#define BNX2_RBUF_FW_BUF_ALLOC_ALLOC_REQ (1L<<31) - -#define BNX2_RBUF_FW_BUF_FREE 0x00200014 -#define BNX2_RBUF_FW_BUF_FREE_COUNT (0x7fL<<0) -#define BNX2_RBUF_FW_BUF_FREE_TAIL (0x1ffL<<7) -#define BNX2_RBUF_FW_BUF_FREE_HEAD (0x1ffL<<16) -#define BNX2_RBUF_FW_BUF_FREE_TYPE (1L<<25) -#define BNX2_RBUF_FW_BUF_FREE_FREE_REQ (1L<<31) - -#define BNX2_RBUF_FW_BUF_SEL 0x00200018 -#define BNX2_RBUF_FW_BUF_SEL_COUNT (0x7fL<<0) -#define BNX2_RBUF_FW_BUF_SEL_TAIL (0x1ffL<<7) -#define BNX2_RBUF_FW_BUF_SEL_HEAD (0x1ffL<<16) -#define BNX2_RBUF_FW_BUF_SEL_SEL_REQ (1L<<31) - -#define BNX2_RBUF_CONFIG2 0x0020001c -#define BNX2_RBUF_CONFIG2_MAC_DROP_TRIP (0x3ffL<<0) -#define BNX2_RBUF_CONFIG2_MAC_DROP_TRIP_VAL(mtu) \ - ((((mtu) - 1500) * 4 / 1000) + 5) -#define BNX2_RBUF_CONFIG2_MAC_KEEP_TRIP (0x3ffL<<16) -#define BNX2_RBUF_CONFIG2_MAC_KEEP_TRIP_VAL(mtu) \ - ((((mtu) - 1500) * 2 / 100) + 30) -#define BNX2_RBUF_CONFIG2_VAL(mtu) \ - (BNX2_RBUF_CONFIG2_MAC_DROP_TRIP_VAL(mtu) | \ - (BNX2_RBUF_CONFIG2_MAC_KEEP_TRIP_VAL(mtu) << 16)) - -#define BNX2_RBUF_CONFIG3 0x00200020 -#define BNX2_RBUF_CONFIG3_CU_DROP_TRIP (0x3ffL<<0) -#define BNX2_RBUF_CONFIG3_CU_DROP_TRIP_VAL(mtu) \ - ((((mtu) - 1500) * 12 / 1000) + 18) -#define BNX2_RBUF_CONFIG3_CU_KEEP_TRIP (0x3ffL<<16) -#define BNX2_RBUF_CONFIG3_CU_KEEP_TRIP_VAL(mtu) \ - ((((mtu) - 1500) * 2 / 100) + 30) -#define BNX2_RBUF_CONFIG3_VAL(mtu) \ - (BNX2_RBUF_CONFIG3_CU_DROP_TRIP_VAL(mtu) | \ - (BNX2_RBUF_CONFIG3_CU_KEEP_TRIP_VAL(mtu) << 16)) - -#define BNX2_RBUF_PKT_DATA 0x00208000 -#define BNX2_RBUF_CLIST_DATA 0x00210000 -#define BNX2_RBUF_BUF_DATA 0x00220000 - - -/* - * rv2p_reg definition - * offset: 0x2800 - */ -#define BNX2_RV2P_COMMAND 0x00002800 -#define BNX2_RV2P_COMMAND_ENABLED (1L<<0) -#define BNX2_RV2P_COMMAND_PROC1_INTRPT (1L<<1) -#define BNX2_RV2P_COMMAND_PROC2_INTRPT (1L<<2) -#define BNX2_RV2P_COMMAND_ABORT0 (1L<<4) -#define BNX2_RV2P_COMMAND_ABORT1 (1L<<5) -#define BNX2_RV2P_COMMAND_ABORT2 (1L<<6) -#define BNX2_RV2P_COMMAND_ABORT3 (1L<<7) -#define BNX2_RV2P_COMMAND_ABORT4 (1L<<8) -#define BNX2_RV2P_COMMAND_ABORT5 (1L<<9) -#define BNX2_RV2P_COMMAND_PROC1_RESET (1L<<16) -#define BNX2_RV2P_COMMAND_PROC2_RESET (1L<<17) -#define BNX2_RV2P_COMMAND_CTXIF_RESET (1L<<18) - -#define BNX2_RV2P_STATUS 0x00002804 -#define BNX2_RV2P_STATUS_ALWAYS_0 (1L<<0) -#define BNX2_RV2P_STATUS_RV2P_GEN_STAT0_CNT (1L<<8) -#define BNX2_RV2P_STATUS_RV2P_GEN_STAT1_CNT (1L<<9) -#define BNX2_RV2P_STATUS_RV2P_GEN_STAT2_CNT (1L<<10) -#define BNX2_RV2P_STATUS_RV2P_GEN_STAT3_CNT (1L<<11) -#define BNX2_RV2P_STATUS_RV2P_GEN_STAT4_CNT (1L<<12) -#define BNX2_RV2P_STATUS_RV2P_GEN_STAT5_CNT (1L<<13) - -#define BNX2_RV2P_CONFIG 0x00002808 -#define BNX2_RV2P_CONFIG_STALL_PROC1 (1L<<0) -#define BNX2_RV2P_CONFIG_STALL_PROC2 (1L<<1) -#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT0 (1L<<8) -#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT1 (1L<<9) -#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT2 (1L<<10) -#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT3 (1L<<11) -#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT4 (1L<<12) -#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT5 (1L<<13) -#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT0 (1L<<16) -#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT1 (1L<<17) -#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT2 (1L<<18) -#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT3 (1L<<19) -#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT4 (1L<<20) -#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT5 (1L<<21) -#define BNX2_RV2P_CONFIG_PAGE_SIZE (0xfL<<24) -#define BNX2_RV2P_CONFIG_PAGE_SIZE_256 (0L<<24) -#define BNX2_RV2P_CONFIG_PAGE_SIZE_512 (1L<<24) -#define BNX2_RV2P_CONFIG_PAGE_SIZE_1K (2L<<24) -#define BNX2_RV2P_CONFIG_PAGE_SIZE_2K (3L<<24) -#define BNX2_RV2P_CONFIG_PAGE_SIZE_4K (4L<<24) -#define BNX2_RV2P_CONFIG_PAGE_SIZE_8K (5L<<24) -#define BNX2_RV2P_CONFIG_PAGE_SIZE_16K (6L<<24) -#define BNX2_RV2P_CONFIG_PAGE_SIZE_32K (7L<<24) -#define BNX2_RV2P_CONFIG_PAGE_SIZE_64K (8L<<24) -#define BNX2_RV2P_CONFIG_PAGE_SIZE_128K (9L<<24) -#define BNX2_RV2P_CONFIG_PAGE_SIZE_256K (10L<<24) -#define BNX2_RV2P_CONFIG_PAGE_SIZE_512K (11L<<24) -#define BNX2_RV2P_CONFIG_PAGE_SIZE_1M (12L<<24) - -#define BNX2_RV2P_GEN_BFR_ADDR_0 0x00002810 -#define BNX2_RV2P_GEN_BFR_ADDR_0_VALUE (0xffffL<<16) - -#define BNX2_RV2P_GEN_BFR_ADDR_1 0x00002814 -#define BNX2_RV2P_GEN_BFR_ADDR_1_VALUE (0xffffL<<16) - -#define BNX2_RV2P_GEN_BFR_ADDR_2 0x00002818 -#define BNX2_RV2P_GEN_BFR_ADDR_2_VALUE (0xffffL<<16) - -#define BNX2_RV2P_GEN_BFR_ADDR_3 0x0000281c -#define BNX2_RV2P_GEN_BFR_ADDR_3_VALUE (0xffffL<<16) - -#define BNX2_RV2P_INSTR_HIGH 0x00002830 -#define BNX2_RV2P_INSTR_HIGH_HIGH (0x1fL<<0) - -#define BNX2_RV2P_INSTR_LOW 0x00002834 -#define BNX2_RV2P_INSTR_LOW_LOW (0xffffffffL<<0) - -#define BNX2_RV2P_PROC1_ADDR_CMD 0x00002838 -#define BNX2_RV2P_PROC1_ADDR_CMD_ADD (0x3ffL<<0) -#define BNX2_RV2P_PROC1_ADDR_CMD_RDWR (1L<<31) - -#define BNX2_RV2P_PROC2_ADDR_CMD 0x0000283c -#define BNX2_RV2P_PROC2_ADDR_CMD_ADD (0x3ffL<<0) -#define BNX2_RV2P_PROC2_ADDR_CMD_RDWR (1L<<31) - -#define BNX2_RV2P_PROC1_GRC_DEBUG 0x00002840 -#define BNX2_RV2P_PROC2_GRC_DEBUG 0x00002844 -#define BNX2_RV2P_GRC_PROC_DEBUG 0x00002848 -#define BNX2_RV2P_DEBUG_VECT_PEEK 0x0000284c -#define BNX2_RV2P_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0) -#define BNX2_RV2P_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11) -#define BNX2_RV2P_DEBUG_VECT_PEEK_1_SEL (0xfL<<12) -#define BNX2_RV2P_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16) -#define BNX2_RV2P_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27) -#define BNX2_RV2P_DEBUG_VECT_PEEK_2_SEL (0xfL<<28) - -#define BNX2_RV2P_MPFE_PFE_CTL 0x00002afc -#define BNX2_RV2P_MPFE_PFE_CTL_INC_USAGE_CNT (1L<<0) -#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE (0xfL<<4) -#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_0 (0L<<4) -#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_1 (1L<<4) -#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_2 (2L<<4) -#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_3 (3L<<4) -#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_4 (4L<<4) -#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_5 (5L<<4) -#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_6 (6L<<4) -#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_7 (7L<<4) -#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_8 (8L<<4) -#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_9 (9L<<4) -#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_10 (10L<<4) -#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_11 (11L<<4) -#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_12 (12L<<4) -#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_13 (13L<<4) -#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_14 (14L<<4) -#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_15 (15L<<4) -#define BNX2_RV2P_MPFE_PFE_CTL_PFE_COUNT (0xfL<<12) -#define BNX2_RV2P_MPFE_PFE_CTL_OFFSET (0x1ffL<<16) - -#define BNX2_RV2P_RV2PPQ 0x00002b40 -#define BNX2_RV2P_PFTQ_CMD 0x00002b78 -#define BNX2_RV2P_PFTQ_CMD_OFFSET (0x3ffL<<0) -#define BNX2_RV2P_PFTQ_CMD_WR_TOP (1L<<10) -#define BNX2_RV2P_PFTQ_CMD_WR_TOP_0 (0L<<10) -#define BNX2_RV2P_PFTQ_CMD_WR_TOP_1 (1L<<10) -#define BNX2_RV2P_PFTQ_CMD_SFT_RESET (1L<<25) -#define BNX2_RV2P_PFTQ_CMD_RD_DATA (1L<<26) -#define BNX2_RV2P_PFTQ_CMD_ADD_INTERVEN (1L<<27) -#define BNX2_RV2P_PFTQ_CMD_ADD_DATA (1L<<28) -#define BNX2_RV2P_PFTQ_CMD_INTERVENE_CLR (1L<<29) -#define BNX2_RV2P_PFTQ_CMD_POP (1L<<30) -#define BNX2_RV2P_PFTQ_CMD_BUSY (1L<<31) - -#define BNX2_RV2P_PFTQ_CTL 0x00002b7c -#define BNX2_RV2P_PFTQ_CTL_INTERVENE (1L<<0) -#define BNX2_RV2P_PFTQ_CTL_OVERFLOW (1L<<1) -#define BNX2_RV2P_PFTQ_CTL_FORCE_INTERVENE (1L<<2) -#define BNX2_RV2P_PFTQ_CTL_MAX_DEPTH (0x3ffL<<12) -#define BNX2_RV2P_PFTQ_CTL_CUR_DEPTH (0x3ffL<<22) - -#define BNX2_RV2P_RV2PTQ 0x00002b80 -#define BNX2_RV2P_TFTQ_CMD 0x00002bb8 -#define BNX2_RV2P_TFTQ_CMD_OFFSET (0x3ffL<<0) -#define BNX2_RV2P_TFTQ_CMD_WR_TOP (1L<<10) -#define BNX2_RV2P_TFTQ_CMD_WR_TOP_0 (0L<<10) -#define BNX2_RV2P_TFTQ_CMD_WR_TOP_1 (1L<<10) -#define BNX2_RV2P_TFTQ_CMD_SFT_RESET (1L<<25) -#define BNX2_RV2P_TFTQ_CMD_RD_DATA (1L<<26) -#define BNX2_RV2P_TFTQ_CMD_ADD_INTERVEN (1L<<27) -#define BNX2_RV2P_TFTQ_CMD_ADD_DATA (1L<<28) -#define BNX2_RV2P_TFTQ_CMD_INTERVENE_CLR (1L<<29) -#define BNX2_RV2P_TFTQ_CMD_POP (1L<<30) -#define BNX2_RV2P_TFTQ_CMD_BUSY (1L<<31) - -#define BNX2_RV2P_TFTQ_CTL 0x00002bbc -#define BNX2_RV2P_TFTQ_CTL_INTERVENE (1L<<0) -#define BNX2_RV2P_TFTQ_CTL_OVERFLOW (1L<<1) -#define BNX2_RV2P_TFTQ_CTL_FORCE_INTERVENE (1L<<2) -#define BNX2_RV2P_TFTQ_CTL_MAX_DEPTH (0x3ffL<<12) -#define BNX2_RV2P_TFTQ_CTL_CUR_DEPTH (0x3ffL<<22) - -#define BNX2_RV2P_RV2PMQ 0x00002bc0 -#define BNX2_RV2P_MFTQ_CMD 0x00002bf8 -#define BNX2_RV2P_MFTQ_CMD_OFFSET (0x3ffL<<0) -#define BNX2_RV2P_MFTQ_CMD_WR_TOP (1L<<10) -#define BNX2_RV2P_MFTQ_CMD_WR_TOP_0 (0L<<10) -#define BNX2_RV2P_MFTQ_CMD_WR_TOP_1 (1L<<10) -#define BNX2_RV2P_MFTQ_CMD_SFT_RESET (1L<<25) -#define BNX2_RV2P_MFTQ_CMD_RD_DATA (1L<<26) -#define BNX2_RV2P_MFTQ_CMD_ADD_INTERVEN (1L<<27) -#define BNX2_RV2P_MFTQ_CMD_ADD_DATA (1L<<28) -#define BNX2_RV2P_MFTQ_CMD_INTERVENE_CLR (1L<<29) -#define BNX2_RV2P_MFTQ_CMD_POP (1L<<30) -#define BNX2_RV2P_MFTQ_CMD_BUSY (1L<<31) - -#define BNX2_RV2P_MFTQ_CTL 0x00002bfc -#define BNX2_RV2P_MFTQ_CTL_INTERVENE (1L<<0) -#define BNX2_RV2P_MFTQ_CTL_OVERFLOW (1L<<1) -#define BNX2_RV2P_MFTQ_CTL_FORCE_INTERVENE (1L<<2) -#define BNX2_RV2P_MFTQ_CTL_MAX_DEPTH (0x3ffL<<12) -#define BNX2_RV2P_MFTQ_CTL_CUR_DEPTH (0x3ffL<<22) - - - -/* - * mq_reg definition - * offset: 0x3c00 - */ -#define BNX2_MQ_COMMAND 0x00003c00 -#define BNX2_MQ_COMMAND_ENABLED (1L<<0) -#define BNX2_MQ_COMMAND_INIT (1L<<1) -#define BNX2_MQ_COMMAND_OVERFLOW (1L<<4) -#define BNX2_MQ_COMMAND_WR_ERROR (1L<<5) -#define BNX2_MQ_COMMAND_RD_ERROR (1L<<6) -#define BNX2_MQ_COMMAND_IDB_CFG_ERROR (1L<<7) -#define BNX2_MQ_COMMAND_IDB_OVERFLOW (1L<<10) -#define BNX2_MQ_COMMAND_NO_BIN_ERROR (1L<<11) -#define BNX2_MQ_COMMAND_NO_MAP_ERROR (1L<<12) - -#define BNX2_MQ_STATUS 0x00003c04 -#define BNX2_MQ_STATUS_CTX_ACCESS_STAT (1L<<16) -#define BNX2_MQ_STATUS_CTX_ACCESS64_STAT (1L<<17) -#define BNX2_MQ_STATUS_PCI_STALL_STAT (1L<<18) -#define BNX2_MQ_STATUS_IDB_OFLOW_STAT (1L<<19) - -#define BNX2_MQ_CONFIG 0x00003c08 -#define BNX2_MQ_CONFIG_TX_HIGH_PRI (1L<<0) -#define BNX2_MQ_CONFIG_HALT_DIS (1L<<1) -#define BNX2_MQ_CONFIG_BIN_MQ_MODE (1L<<2) -#define BNX2_MQ_CONFIG_DIS_IDB_DROP (1L<<3) -#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE (0x7L<<4) -#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256 (0L<<4) -#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_512 (1L<<4) -#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_1K (2L<<4) -#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_2K (3L<<4) -#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_4K (4L<<4) -#define BNX2_MQ_CONFIG_MAX_DEPTH (0x7fL<<8) -#define BNX2_MQ_CONFIG_CUR_DEPTH (0x7fL<<20) - -#define BNX2_MQ_ENQUEUE1 0x00003c0c -#define BNX2_MQ_ENQUEUE1_OFFSET (0x3fL<<2) -#define BNX2_MQ_ENQUEUE1_CID (0x3fffL<<8) -#define BNX2_MQ_ENQUEUE1_BYTE_MASK (0xfL<<24) -#define BNX2_MQ_ENQUEUE1_KNL_MODE (1L<<28) - -#define BNX2_MQ_ENQUEUE2 0x00003c10 -#define BNX2_MQ_BAD_WR_ADDR 0x00003c14 -#define BNX2_MQ_BAD_RD_ADDR 0x00003c18 -#define BNX2_MQ_KNL_BYP_WIND_START 0x00003c1c -#define BNX2_MQ_KNL_BYP_WIND_START_VALUE (0xfffffL<<12) - -#define BNX2_MQ_KNL_WIND_END 0x00003c20 -#define BNX2_MQ_KNL_WIND_END_VALUE (0xffffffL<<8) - -#define BNX2_MQ_KNL_WRITE_MASK1 0x00003c24 -#define BNX2_MQ_KNL_TX_MASK1 0x00003c28 -#define BNX2_MQ_KNL_CMD_MASK1 0x00003c2c -#define BNX2_MQ_KNL_COND_ENQUEUE_MASK1 0x00003c30 -#define BNX2_MQ_KNL_RX_V2P_MASK1 0x00003c34 -#define BNX2_MQ_KNL_WRITE_MASK2 0x00003c38 -#define BNX2_MQ_KNL_TX_MASK2 0x00003c3c -#define BNX2_MQ_KNL_CMD_MASK2 0x00003c40 -#define BNX2_MQ_KNL_COND_ENQUEUE_MASK2 0x00003c44 -#define BNX2_MQ_KNL_RX_V2P_MASK2 0x00003c48 -#define BNX2_MQ_KNL_BYP_WRITE_MASK1 0x00003c4c -#define BNX2_MQ_KNL_BYP_TX_MASK1 0x00003c50 -#define BNX2_MQ_KNL_BYP_CMD_MASK1 0x00003c54 -#define BNX2_MQ_KNL_BYP_COND_ENQUEUE_MASK1 0x00003c58 -#define BNX2_MQ_KNL_BYP_RX_V2P_MASK1 0x00003c5c -#define BNX2_MQ_KNL_BYP_WRITE_MASK2 0x00003c60 -#define BNX2_MQ_KNL_BYP_TX_MASK2 0x00003c64 -#define BNX2_MQ_KNL_BYP_CMD_MASK2 0x00003c68 -#define BNX2_MQ_KNL_BYP_COND_ENQUEUE_MASK2 0x00003c6c -#define BNX2_MQ_KNL_BYP_RX_V2P_MASK2 0x00003c70 -#define BNX2_MQ_MEM_WR_ADDR 0x00003c74 -#define BNX2_MQ_MEM_WR_ADDR_VALUE (0x3fL<<0) - -#define BNX2_MQ_MEM_WR_DATA0 0x00003c78 -#define BNX2_MQ_MEM_WR_DATA0_VALUE (0xffffffffL<<0) - -#define BNX2_MQ_MEM_WR_DATA1 0x00003c7c -#define BNX2_MQ_MEM_WR_DATA1_VALUE (0xffffffffL<<0) - -#define BNX2_MQ_MEM_WR_DATA2 0x00003c80 -#define BNX2_MQ_MEM_WR_DATA2_VALUE (0x3fffffffL<<0) -#define BNX2_MQ_MEM_WR_DATA2_VALUE_XI (0x7fffffffL<<0) - -#define BNX2_MQ_MEM_RD_ADDR 0x00003c84 -#define BNX2_MQ_MEM_RD_ADDR_VALUE (0x3fL<<0) - -#define BNX2_MQ_MEM_RD_DATA0 0x00003c88 -#define BNX2_MQ_MEM_RD_DATA0_VALUE (0xffffffffL<<0) - -#define BNX2_MQ_MEM_RD_DATA1 0x00003c8c -#define BNX2_MQ_MEM_RD_DATA1_VALUE (0xffffffffL<<0) - -#define BNX2_MQ_MEM_RD_DATA2 0x00003c90 -#define BNX2_MQ_MEM_RD_DATA2_VALUE (0x3fffffffL<<0) -#define BNX2_MQ_MEM_RD_DATA2_VALUE_XI (0x7fffffffL<<0) - -#define BNX2_MQ_MAP_L2_3 0x00003d2c -#define BNX2_MQ_MAP_L2_3_MQ_OFFSET (0xffL<<0) -#define BNX2_MQ_MAP_L2_3_SZ (0x3L<<8) -#define BNX2_MQ_MAP_L2_3_CTX_OFFSET (0x2ffL<<10) -#define BNX2_MQ_MAP_L2_3_BIN_OFFSET (0x7L<<23) -#define BNX2_MQ_MAP_L2_3_ARM (0x3L<<26) -#define BNX2_MQ_MAP_L2_3_ENA (0x1L<<31) -#define BNX2_MQ_MAP_L2_3_DEFAULT 0x82004646 - -#define BNX2_MQ_MAP_L2_5 0x00003d34 -#define BNX2_MQ_MAP_L2_5_ARM (0x3L<<26) - -/* - * tsch_reg definition - * offset: 0x4c00 - */ -#define BNX2_TSCH_TSS_CFG 0x00004c1c -#define BNX2_TSCH_TSS_CFG_TSS_START_CID (0x7ffL<<8) -#define BNX2_TSCH_TSS_CFG_NUM_OF_TSS_CON (0xfL<<24) - - - -/* - * tbdr_reg definition - * offset: 0x5000 - */ -#define BNX2_TBDR_COMMAND 0x00005000 -#define BNX2_TBDR_COMMAND_ENABLE (1L<<0) -#define BNX2_TBDR_COMMAND_SOFT_RST (1L<<1) -#define BNX2_TBDR_COMMAND_MSTR_ABORT (1L<<4) - -#define BNX2_TBDR_STATUS 0x00005004 -#define BNX2_TBDR_STATUS_DMA_WAIT (1L<<0) -#define BNX2_TBDR_STATUS_FTQ_WAIT (1L<<1) -#define BNX2_TBDR_STATUS_FIFO_OVERFLOW (1L<<2) -#define BNX2_TBDR_STATUS_FIFO_UNDERFLOW (1L<<3) -#define BNX2_TBDR_STATUS_SEARCHMISS_ERROR (1L<<4) -#define BNX2_TBDR_STATUS_FTQ_ENTRY_CNT (1L<<5) -#define BNX2_TBDR_STATUS_BURST_CNT (1L<<6) - -#define BNX2_TBDR_CONFIG 0x00005008 -#define BNX2_TBDR_CONFIG_MAX_BDS (0xffL<<0) -#define BNX2_TBDR_CONFIG_SWAP_MODE (1L<<8) -#define BNX2_TBDR_CONFIG_PRIORITY (1L<<9) -#define BNX2_TBDR_CONFIG_CACHE_NEXT_PAGE_PTRS (1L<<10) -#define BNX2_TBDR_CONFIG_PAGE_SIZE (0xfL<<24) -#define BNX2_TBDR_CONFIG_PAGE_SIZE_256 (0L<<24) -#define BNX2_TBDR_CONFIG_PAGE_SIZE_512 (1L<<24) -#define BNX2_TBDR_CONFIG_PAGE_SIZE_1K (2L<<24) -#define BNX2_TBDR_CONFIG_PAGE_SIZE_2K (3L<<24) -#define BNX2_TBDR_CONFIG_PAGE_SIZE_4K (4L<<24) -#define BNX2_TBDR_CONFIG_PAGE_SIZE_8K (5L<<24) -#define BNX2_TBDR_CONFIG_PAGE_SIZE_16K (6L<<24) -#define BNX2_TBDR_CONFIG_PAGE_SIZE_32K (7L<<24) -#define BNX2_TBDR_CONFIG_PAGE_SIZE_64K (8L<<24) -#define BNX2_TBDR_CONFIG_PAGE_SIZE_128K (9L<<24) -#define BNX2_TBDR_CONFIG_PAGE_SIZE_256K (10L<<24) -#define BNX2_TBDR_CONFIG_PAGE_SIZE_512K (11L<<24) -#define BNX2_TBDR_CONFIG_PAGE_SIZE_1M (12L<<24) - -#define BNX2_TBDR_DEBUG_VECT_PEEK 0x0000500c -#define BNX2_TBDR_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0) -#define BNX2_TBDR_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11) -#define BNX2_TBDR_DEBUG_VECT_PEEK_1_SEL (0xfL<<12) -#define BNX2_TBDR_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16) -#define BNX2_TBDR_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27) -#define BNX2_TBDR_DEBUG_VECT_PEEK_2_SEL (0xfL<<28) - -#define BNX2_TBDR_CKSUM_ERROR_STATUS 0x00005010 -#define BNX2_TBDR_CKSUM_ERROR_STATUS_CALCULATED (0xffffL<<0) -#define BNX2_TBDR_CKSUM_ERROR_STATUS_EXPECTED (0xffffL<<16) - -#define BNX2_TBDR_TBDRQ 0x000053c0 -#define BNX2_TBDR_FTQ_CMD 0x000053f8 -#define BNX2_TBDR_FTQ_CMD_OFFSET (0x3ffL<<0) -#define BNX2_TBDR_FTQ_CMD_WR_TOP (1L<<10) -#define BNX2_TBDR_FTQ_CMD_WR_TOP_0 (0L<<10) -#define BNX2_TBDR_FTQ_CMD_WR_TOP_1 (1L<<10) -#define BNX2_TBDR_FTQ_CMD_SFT_RESET (1L<<25) -#define BNX2_TBDR_FTQ_CMD_RD_DATA (1L<<26) -#define BNX2_TBDR_FTQ_CMD_ADD_INTERVEN (1L<<27) -#define BNX2_TBDR_FTQ_CMD_ADD_DATA (1L<<28) -#define BNX2_TBDR_FTQ_CMD_INTERVENE_CLR (1L<<29) -#define BNX2_TBDR_FTQ_CMD_POP (1L<<30) -#define BNX2_TBDR_FTQ_CMD_BUSY (1L<<31) - -#define BNX2_TBDR_FTQ_CTL 0x000053fc -#define BNX2_TBDR_FTQ_CTL_INTERVENE (1L<<0) -#define BNX2_TBDR_FTQ_CTL_OVERFLOW (1L<<1) -#define BNX2_TBDR_FTQ_CTL_FORCE_INTERVENE (1L<<2) -#define BNX2_TBDR_FTQ_CTL_MAX_DEPTH (0x3ffL<<12) -#define BNX2_TBDR_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) - - - -/* - * tdma_reg definition - * offset: 0x5c00 - */ -#define BNX2_TDMA_COMMAND 0x00005c00 -#define BNX2_TDMA_COMMAND_ENABLED (1L<<0) -#define BNX2_TDMA_COMMAND_MASTER_ABORT (1L<<4) -#define BNX2_TDMA_COMMAND_CS16_ERR (1L<<5) -#define BNX2_TDMA_COMMAND_BAD_L2_LENGTH_ABORT (1L<<7) -#define BNX2_TDMA_COMMAND_MASK_CS1 (1L<<20) -#define BNX2_TDMA_COMMAND_MASK_CS2 (1L<<21) -#define BNX2_TDMA_COMMAND_MASK_CS3 (1L<<22) -#define BNX2_TDMA_COMMAND_MASK_CS4 (1L<<23) -#define BNX2_TDMA_COMMAND_FORCE_ILOCK_CKERR (1L<<24) -#define BNX2_TDMA_COMMAND_OFIFO_CLR (1L<<30) -#define BNX2_TDMA_COMMAND_IFIFO_CLR (1L<<31) - -#define BNX2_TDMA_STATUS 0x00005c04 -#define BNX2_TDMA_STATUS_DMA_WAIT (1L<<0) -#define BNX2_TDMA_STATUS_PAYLOAD_WAIT (1L<<1) -#define BNX2_TDMA_STATUS_PATCH_FTQ_WAIT (1L<<2) -#define BNX2_TDMA_STATUS_LOCK_WAIT (1L<<3) -#define BNX2_TDMA_STATUS_FTQ_ENTRY_CNT (1L<<16) -#define BNX2_TDMA_STATUS_BURST_CNT (1L<<17) -#define BNX2_TDMA_STATUS_MAX_IFIFO_DEPTH (0x3fL<<20) -#define BNX2_TDMA_STATUS_OFIFO_OVERFLOW (1L<<30) -#define BNX2_TDMA_STATUS_IFIFO_OVERFLOW (1L<<31) - -#define BNX2_TDMA_CONFIG 0x00005c08 -#define BNX2_TDMA_CONFIG_ONE_DMA (1L<<0) -#define BNX2_TDMA_CONFIG_ONE_RECORD (1L<<1) -#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN (0x3L<<2) -#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN_0 (0L<<2) -#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN_1 (1L<<2) -#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN_2 (2L<<2) -#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN_3 (3L<<2) -#define BNX2_TDMA_CONFIG_LIMIT_SZ (0xfL<<4) -#define BNX2_TDMA_CONFIG_LIMIT_SZ_64 (0L<<4) -#define BNX2_TDMA_CONFIG_LIMIT_SZ_128 (0x4L<<4) -#define BNX2_TDMA_CONFIG_LIMIT_SZ_256 (0x6L<<4) -#define BNX2_TDMA_CONFIG_LIMIT_SZ_512 (0x8L<<4) -#define BNX2_TDMA_CONFIG_LINE_SZ (0xfL<<8) -#define BNX2_TDMA_CONFIG_LINE_SZ_64 (0L<<8) -#define BNX2_TDMA_CONFIG_LINE_SZ_128 (4L<<8) -#define BNX2_TDMA_CONFIG_LINE_SZ_256 (6L<<8) -#define BNX2_TDMA_CONFIG_LINE_SZ_512 (8L<<8) -#define BNX2_TDMA_CONFIG_ALIGN_ENA (1L<<15) -#define BNX2_TDMA_CONFIG_CHK_L2_BD (1L<<16) -#define BNX2_TDMA_CONFIG_CMPL_ENTRY (1L<<17) -#define BNX2_TDMA_CONFIG_OFIFO_CMP (1L<<19) -#define BNX2_TDMA_CONFIG_OFIFO_CMP_3 (0L<<19) -#define BNX2_TDMA_CONFIG_OFIFO_CMP_2 (1L<<19) -#define BNX2_TDMA_CONFIG_FIFO_CMP (0xfL<<20) -#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_XI (0x7L<<20) -#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_0_XI (0L<<20) -#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_4_XI (1L<<20) -#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_8_XI (2L<<20) -#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_16_XI (3L<<20) -#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_32_XI (4L<<20) -#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_64_XI (5L<<20) -#define BNX2_TDMA_CONFIG_FIFO_CMP_EN_XI (1L<<23) -#define BNX2_TDMA_CONFIG_BYTES_OST_XI (0x7L<<24) -#define BNX2_TDMA_CONFIG_BYTES_OST_512_XI (0L<<24) -#define BNX2_TDMA_CONFIG_BYTES_OST_1024_XI (1L<<24) -#define BNX2_TDMA_CONFIG_BYTES_OST_2048_XI (2L<<24) -#define BNX2_TDMA_CONFIG_BYTES_OST_4096_XI (3L<<24) -#define BNX2_TDMA_CONFIG_BYTES_OST_8192_XI (4L<<24) -#define BNX2_TDMA_CONFIG_BYTES_OST_16384_XI (5L<<24) -#define BNX2_TDMA_CONFIG_HC_BYPASS_XI (1L<<27) -#define BNX2_TDMA_CONFIG_LCL_MRRS_XI (0x7L<<28) -#define BNX2_TDMA_CONFIG_LCL_MRRS_128_XI (0L<<28) -#define BNX2_TDMA_CONFIG_LCL_MRRS_256_XI (1L<<28) -#define BNX2_TDMA_CONFIG_LCL_MRRS_512_XI (2L<<28) -#define BNX2_TDMA_CONFIG_LCL_MRRS_1024_XI (3L<<28) -#define BNX2_TDMA_CONFIG_LCL_MRRS_2048_XI (4L<<28) -#define BNX2_TDMA_CONFIG_LCL_MRRS_4096_XI (5L<<28) -#define BNX2_TDMA_CONFIG_LCL_MRRS_EN_XI (1L<<31) - -#define BNX2_TDMA_PAYLOAD_PROD 0x00005c0c -#define BNX2_TDMA_PAYLOAD_PROD_VALUE (0x1fffL<<3) - -#define BNX2_TDMA_DBG_WATCHDOG 0x00005c10 -#define BNX2_TDMA_DBG_TRIGGER 0x00005c14 -#define BNX2_TDMA_DMAD_FSM 0x00005c80 -#define BNX2_TDMA_DMAD_FSM_BD_INVLD (1L<<0) -#define BNX2_TDMA_DMAD_FSM_PUSH (0xfL<<4) -#define BNX2_TDMA_DMAD_FSM_ARB_TBDC (0x3L<<8) -#define BNX2_TDMA_DMAD_FSM_ARB_CTX (1L<<12) -#define BNX2_TDMA_DMAD_FSM_DR_INTF (1L<<16) -#define BNX2_TDMA_DMAD_FSM_DMAD (0x7L<<20) -#define BNX2_TDMA_DMAD_FSM_BD (0xfL<<24) - -#define BNX2_TDMA_DMAD_STATUS 0x00005c84 -#define BNX2_TDMA_DMAD_STATUS_RHOLD_PUSH_ENTRY (0x3L<<0) -#define BNX2_TDMA_DMAD_STATUS_RHOLD_DMAD_ENTRY (0x3L<<4) -#define BNX2_TDMA_DMAD_STATUS_RHOLD_BD_ENTRY (0x3L<<8) -#define BNX2_TDMA_DMAD_STATUS_IFTQ_ENUM (0xfL<<12) - -#define BNX2_TDMA_DR_INTF_FSM 0x00005c88 -#define BNX2_TDMA_DR_INTF_FSM_L2_COMP (0x3L<<0) -#define BNX2_TDMA_DR_INTF_FSM_TPATQ (0x7L<<4) -#define BNX2_TDMA_DR_INTF_FSM_TPBUF (0x3L<<8) -#define BNX2_TDMA_DR_INTF_FSM_DR_BUF (0x7L<<12) -#define BNX2_TDMA_DR_INTF_FSM_DMAD (0x7L<<16) - -#define BNX2_TDMA_DR_INTF_STATUS 0x00005c8c -#define BNX2_TDMA_DR_INTF_STATUS_HOLE_PHASE (0x7L<<0) -#define BNX2_TDMA_DR_INTF_STATUS_DATA_AVAIL (0x3L<<4) -#define BNX2_TDMA_DR_INTF_STATUS_SHIFT_ADDR (0x7L<<8) -#define BNX2_TDMA_DR_INTF_STATUS_NXT_PNTR (0xfL<<12) -#define BNX2_TDMA_DR_INTF_STATUS_BYTE_COUNT (0x7L<<16) - -#define BNX2_TDMA_PUSH_FSM 0x00005c90 -#define BNX2_TDMA_BD_IF_DEBUG 0x00005c94 -#define BNX2_TDMA_DMAD_IF_DEBUG 0x00005c98 -#define BNX2_TDMA_CTX_IF_DEBUG 0x00005c9c -#define BNX2_TDMA_TPBUF_IF_DEBUG 0x00005ca0 -#define BNX2_TDMA_DR_IF_DEBUG 0x00005ca4 -#define BNX2_TDMA_TPATQ_IF_DEBUG 0x00005ca8 -#define BNX2_TDMA_TDMA_ILOCK_CKSUM 0x00005cac -#define BNX2_TDMA_TDMA_ILOCK_CKSUM_CALCULATED (0xffffL<<0) -#define BNX2_TDMA_TDMA_ILOCK_CKSUM_EXPECTED (0xffffL<<16) - -#define BNX2_TDMA_TDMA_PCIE_CKSUM 0x00005cb0 -#define BNX2_TDMA_TDMA_PCIE_CKSUM_CALCULATED (0xffffL<<0) -#define BNX2_TDMA_TDMA_PCIE_CKSUM_EXPECTED (0xffffL<<16) - -#define BNX2_TDMA_TDMAQ 0x00005fc0 -#define BNX2_TDMA_FTQ_CMD 0x00005ff8 -#define BNX2_TDMA_FTQ_CMD_OFFSET (0x3ffL<<0) -#define BNX2_TDMA_FTQ_CMD_WR_TOP (1L<<10) -#define BNX2_TDMA_FTQ_CMD_WR_TOP_0 (0L<<10) -#define BNX2_TDMA_FTQ_CMD_WR_TOP_1 (1L<<10) -#define BNX2_TDMA_FTQ_CMD_SFT_RESET (1L<<25) -#define BNX2_TDMA_FTQ_CMD_RD_DATA (1L<<26) -#define BNX2_TDMA_FTQ_CMD_ADD_INTERVEN (1L<<27) -#define BNX2_TDMA_FTQ_CMD_ADD_DATA (1L<<28) -#define BNX2_TDMA_FTQ_CMD_INTERVENE_CLR (1L<<29) -#define BNX2_TDMA_FTQ_CMD_POP (1L<<30) -#define BNX2_TDMA_FTQ_CMD_BUSY (1L<<31) - -#define BNX2_TDMA_FTQ_CTL 0x00005ffc -#define BNX2_TDMA_FTQ_CTL_INTERVENE (1L<<0) -#define BNX2_TDMA_FTQ_CTL_OVERFLOW (1L<<1) -#define BNX2_TDMA_FTQ_CTL_FORCE_INTERVENE (1L<<2) -#define BNX2_TDMA_FTQ_CTL_MAX_DEPTH (0x3ffL<<12) -#define BNX2_TDMA_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) - - - -/* - * hc_reg definition - * offset: 0x6800 - */ -#define BNX2_HC_COMMAND 0x00006800 -#define BNX2_HC_COMMAND_ENABLE (1L<<0) -#define BNX2_HC_COMMAND_SKIP_ABORT (1L<<4) -#define BNX2_HC_COMMAND_COAL_NOW (1L<<16) -#define BNX2_HC_COMMAND_COAL_NOW_WO_INT (1L<<17) -#define BNX2_HC_COMMAND_STATS_NOW (1L<<18) -#define BNX2_HC_COMMAND_FORCE_INT (0x3L<<19) -#define BNX2_HC_COMMAND_FORCE_INT_NULL (0L<<19) -#define BNX2_HC_COMMAND_FORCE_INT_HIGH (1L<<19) -#define BNX2_HC_COMMAND_FORCE_INT_LOW (2L<<19) -#define BNX2_HC_COMMAND_FORCE_INT_FREE (3L<<19) -#define BNX2_HC_COMMAND_CLR_STAT_NOW (1L<<21) -#define BNX2_HC_COMMAND_MAIN_PWR_INT (1L<<22) -#define BNX2_HC_COMMAND_COAL_ON_NEXT_EVENT (1L<<27) - -#define BNX2_HC_STATUS 0x00006804 -#define BNX2_HC_STATUS_MASTER_ABORT (1L<<0) -#define BNX2_HC_STATUS_PARITY_ERROR_STATE (1L<<1) -#define BNX2_HC_STATUS_PCI_CLK_CNT_STAT (1L<<16) -#define BNX2_HC_STATUS_CORE_CLK_CNT_STAT (1L<<17) -#define BNX2_HC_STATUS_NUM_STATUS_BLOCKS_STAT (1L<<18) -#define BNX2_HC_STATUS_NUM_INT_GEN_STAT (1L<<19) -#define BNX2_HC_STATUS_NUM_INT_MBOX_WR_STAT (1L<<20) -#define BNX2_HC_STATUS_CORE_CLKS_TO_HW_INTACK_STAT (1L<<23) -#define BNX2_HC_STATUS_CORE_CLKS_TO_SW_INTACK_STAT (1L<<24) -#define BNX2_HC_STATUS_CORE_CLKS_DURING_SW_INTACK_STAT (1L<<25) - -#define BNX2_HC_CONFIG 0x00006808 -#define BNX2_HC_CONFIG_COLLECT_STATS (1L<<0) -#define BNX2_HC_CONFIG_RX_TMR_MODE (1L<<1) -#define BNX2_HC_CONFIG_TX_TMR_MODE (1L<<2) -#define BNX2_HC_CONFIG_COM_TMR_MODE (1L<<3) -#define BNX2_HC_CONFIG_CMD_TMR_MODE (1L<<4) -#define BNX2_HC_CONFIG_STATISTIC_PRIORITY (1L<<5) -#define BNX2_HC_CONFIG_STATUS_PRIORITY (1L<<6) -#define BNX2_HC_CONFIG_STAT_MEM_ADDR (0xffL<<8) -#define BNX2_HC_CONFIG_PER_MODE (1L<<16) -#define BNX2_HC_CONFIG_ONE_SHOT (1L<<17) -#define BNX2_HC_CONFIG_USE_INT_PARAM (1L<<18) -#define BNX2_HC_CONFIG_SET_MASK_AT_RD (1L<<19) -#define BNX2_HC_CONFIG_PER_COLLECT_LIMIT (0xfL<<20) -#define BNX2_HC_CONFIG_SB_ADDR_INC (0x7L<<24) -#define BNX2_HC_CONFIG_SB_ADDR_INC_64B (0L<<24) -#define BNX2_HC_CONFIG_SB_ADDR_INC_128B (1L<<24) -#define BNX2_HC_CONFIG_SB_ADDR_INC_256B (2L<<24) -#define BNX2_HC_CONFIG_SB_ADDR_INC_512B (3L<<24) -#define BNX2_HC_CONFIG_SB_ADDR_INC_1024B (4L<<24) -#define BNX2_HC_CONFIG_SB_ADDR_INC_2048B (5L<<24) -#define BNX2_HC_CONFIG_SB_ADDR_INC_4096B (6L<<24) -#define BNX2_HC_CONFIG_SB_ADDR_INC_8192B (7L<<24) -#define BNX2_HC_CONFIG_GEN_STAT_AVG_INTR (1L<<29) -#define BNX2_HC_CONFIG_UNMASK_ALL (1L<<30) -#define BNX2_HC_CONFIG_TX_SEL (1L<<31) - -#define BNX2_HC_ATTN_BITS_ENABLE 0x0000680c -#define BNX2_HC_STATUS_ADDR_L 0x00006810 -#define BNX2_HC_STATUS_ADDR_H 0x00006814 -#define BNX2_HC_STATISTICS_ADDR_L 0x00006818 -#define BNX2_HC_STATISTICS_ADDR_H 0x0000681c -#define BNX2_HC_TX_QUICK_CONS_TRIP 0x00006820 -#define BNX2_HC_TX_QUICK_CONS_TRIP_VALUE (0xffL<<0) -#define BNX2_HC_TX_QUICK_CONS_TRIP_INT (0xffL<<16) - -#define BNX2_HC_COMP_PROD_TRIP 0x00006824 -#define BNX2_HC_COMP_PROD_TRIP_VALUE (0xffL<<0) -#define BNX2_HC_COMP_PROD_TRIP_INT (0xffL<<16) - -#define BNX2_HC_RX_QUICK_CONS_TRIP 0x00006828 -#define BNX2_HC_RX_QUICK_CONS_TRIP_VALUE (0xffL<<0) -#define BNX2_HC_RX_QUICK_CONS_TRIP_INT (0xffL<<16) - -#define BNX2_HC_RX_TICKS 0x0000682c -#define BNX2_HC_RX_TICKS_VALUE (0x3ffL<<0) -#define BNX2_HC_RX_TICKS_INT (0x3ffL<<16) - -#define BNX2_HC_TX_TICKS 0x00006830 -#define BNX2_HC_TX_TICKS_VALUE (0x3ffL<<0) -#define BNX2_HC_TX_TICKS_INT (0x3ffL<<16) - -#define BNX2_HC_COM_TICKS 0x00006834 -#define BNX2_HC_COM_TICKS_VALUE (0x3ffL<<0) -#define BNX2_HC_COM_TICKS_INT (0x3ffL<<16) - -#define BNX2_HC_CMD_TICKS 0x00006838 -#define BNX2_HC_CMD_TICKS_VALUE (0x3ffL<<0) -#define BNX2_HC_CMD_TICKS_INT (0x3ffL<<16) - -#define BNX2_HC_PERIODIC_TICKS 0x0000683c -#define BNX2_HC_PERIODIC_TICKS_HC_PERIODIC_TICKS (0xffffL<<0) -#define BNX2_HC_PERIODIC_TICKS_HC_INT_PERIODIC_TICKS (0xffffL<<16) - -#define BNX2_HC_STAT_COLLECT_TICKS 0x00006840 -#define BNX2_HC_STAT_COLLECT_TICKS_HC_STAT_COLL_TICKS (0xffL<<4) - -#define BNX2_HC_STATS_TICKS 0x00006844 -#define BNX2_HC_STATS_TICKS_HC_STAT_TICKS (0xffffL<<8) - -#define BNX2_HC_STATS_INTERRUPT_STATUS 0x00006848 -#define BNX2_HC_STATS_INTERRUPT_STATUS_SB_STATUS (0x1ffL<<0) -#define BNX2_HC_STATS_INTERRUPT_STATUS_INT_STATUS (0x1ffL<<16) - -#define BNX2_HC_STAT_MEM_DATA 0x0000684c -#define BNX2_HC_STAT_GEN_SEL_0 0x00006850 -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0 (0x7fL<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT0 (0L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT1 (1L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT2 (2L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT3 (3L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT4 (4L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT5 (5L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT6 (6L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT7 (7L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT8 (8L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT9 (9L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT10 (10L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT11 (11L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT0 (12L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT1 (13L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT2 (14L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT3 (15L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT4 (16L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT5 (17L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT6 (18L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT7 (19L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT0 (20L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT1 (21L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT2 (22L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT3 (23L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT4 (24L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT5 (25L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT6 (26L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT7 (27L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT8 (28L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT9 (29L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT10 (30L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT11 (31L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPAT_STAT0 (32L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPAT_STAT1 (33L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPAT_STAT2 (34L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPAT_STAT3 (35L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT0 (36L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT1 (37L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT2 (38L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT3 (39L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT4 (40L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT5 (41L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT6 (42L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT7 (43L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT0 (44L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT1 (45L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT2 (46L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT3 (47L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT4 (48L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT5 (49L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT6 (50L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT7 (51L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_PCI_CLK_CNT (52L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CORE_CLK_CNT (53L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS (54L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN (55L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR (56L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK (59L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK (60L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK (61L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCH_CMD_CNT (62L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCH_SLOT_CNT (63L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSCH_CMD_CNT (64L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSCH_SLOT_CNT (65L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RLUPQ_VALID_CNT (66L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPQ_VALID_CNT (67L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPCQ_VALID_CNT (68L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PPQ_VALID_CNT (69L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PMQ_VALID_CNT (70L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PTQ_VALID_CNT (71L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMAQ_VALID_CNT (72L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCHQ_VALID_CNT (73L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDRQ_VALID_CNT (74L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXPQ_VALID_CNT (75L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMAQ_VALID_CNT (76L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPATQ_VALID_CNT (77L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TASQ_VALID_CNT (78L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSQ_VALID_CNT (79L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CPQ_VALID_CNT (80L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMXQ_VALID_CNT (81L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMTQ_VALID_CNT (82L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMQ_VALID_CNT (83L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MGMQ_VALID_CNT (84L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_READ_TRANSFERS_CNT (85L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_READ_DELAY_PCI_CLKS_CNT (86L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_READ_TRANSFERS_CNT (87L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_READ_DELAY_PCI_CLKS_CNT (88L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_READ_RETRY_AFTER_DATA_CNT (89L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_WRITE_TRANSFERS_CNT (90L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_WRITE_DELAY_PCI_CLKS_CNT (91L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_WRITE_TRANSFERS_CNT (92L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_WRITE_DELAY_PCI_CLKS_CNT (93L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_WRITE_RETRY_AFTER_DATA_CNT (94L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_WR_CNT64 (95L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_RD_CNT64 (96L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_ACC_STALL_CLKS (97L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_LOCK_STALL_CLKS (98L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MBQ_CTX_ACCESS_STAT (99L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MBQ_CTX_ACCESS64_STAT (100L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MBQ_PCI_STALL_STAT (101L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDR_FTQ_ENTRY_CNT (102L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDR_BURST_CNT (103L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMA_FTQ_ENTRY_CNT (104L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMA_BURST_CNT (105L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMA_FTQ_ENTRY_CNT (106L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMA_BURST_CNT (107L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RLUP_MATCH_CNT (108L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_POLL_PASS_CNT (109L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_TMR1_CNT (110L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_TMR2_CNT (111L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_TMR3_CNT (112L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_TMR4_CNT (113L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_TMR5_CNT (114L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT0 (115L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT1 (116L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT2 (117L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT3 (118L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT4 (119L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT5 (120L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RBDC_PROC1_MISS (121L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RBDC_PROC2_MISS (122L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RBDC_BURST_CNT (127L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_1 (0x7fL<<8) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_2 (0x7fL<<16) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_3 (0x7fL<<24) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_XI (0xffL<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UMP_RX_FRAME_DROP_XI (52L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S0_XI (57L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S1_XI (58L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S2_XI (85L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S3_XI (86L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S4_XI (87L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S5_XI (88L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S6_XI (89L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S7_XI (90L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S8_XI (91L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S9_XI (92L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S10_XI (93L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MQ_IDB_OFLOW_XI (94L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_BLK_RD_CNT_XI (123L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_BLK_WR_CNT_XI (124L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_HITS_XI (125L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_MISSES_XI (126L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC1_XI (128L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC1_XI (129L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC1_XI (130L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC1_XI (131L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC1_XI (132L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC1_XI (133L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC2_XI (134L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC2_XI (135L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC2_XI (136L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC2_XI (137L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC2_XI (138L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC2_XI (139L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC3_XI (140L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC3_XI (141L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC3_XI (142L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC3_XI (143L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC3_XI (144L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC3_XI (145L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC4_XI (146L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC4_XI (147L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC4_XI (148L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC4_XI (149L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC4_XI (150L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC4_XI (151L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC5_XI (152L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC5_XI (153L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC5_XI (154L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC5_XI (155L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC5_XI (156L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC5_XI (157L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC6_XI (158L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC6_XI (159L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC6_XI (160L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC6_XI (161L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC6_XI (162L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC6_XI (163L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC7_XI (164L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC7_XI (165L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC7_XI (166L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC7_XI (167L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC7_XI (168L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC7_XI (169L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC8_XI (170L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC8_XI (171L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC8_XI (172L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC8_XI (173L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC8_XI (174L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC8_XI (175L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCS_CMD_CNT_XI (176L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCS_SLOT_CNT_XI (177L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCSQ_VALID_CNT_XI (178L<<0) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_1_XI (0xffL<<8) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_2_XI (0xffL<<16) -#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_3_XI (0xffL<<24) - -#define BNX2_HC_STAT_GEN_SEL_1 0x00006854 -#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_4 (0x7fL<<0) -#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_5 (0x7fL<<8) -#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_6 (0x7fL<<16) -#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_7 (0x7fL<<24) -#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_4_XI (0xffL<<0) -#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_5_XI (0xffL<<8) -#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_6_XI (0xffL<<16) -#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_7_XI (0xffL<<24) - -#define BNX2_HC_STAT_GEN_SEL_2 0x00006858 -#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_8 (0x7fL<<0) -#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_9 (0x7fL<<8) -#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_10 (0x7fL<<16) -#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_11 (0x7fL<<24) -#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_8_XI (0xffL<<0) -#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_9_XI (0xffL<<8) -#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_10_XI (0xffL<<16) -#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_11_XI (0xffL<<24) - -#define BNX2_HC_STAT_GEN_SEL_3 0x0000685c -#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_12 (0x7fL<<0) -#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_13 (0x7fL<<8) -#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_14 (0x7fL<<16) -#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_15 (0x7fL<<24) -#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_12_XI (0xffL<<0) -#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_13_XI (0xffL<<8) -#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_14_XI (0xffL<<16) -#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_15_XI (0xffL<<24) - -#define BNX2_HC_STAT_GEN_STAT0 0x00006888 -#define BNX2_HC_STAT_GEN_STAT1 0x0000688c -#define BNX2_HC_STAT_GEN_STAT2 0x00006890 -#define BNX2_HC_STAT_GEN_STAT3 0x00006894 -#define BNX2_HC_STAT_GEN_STAT4 0x00006898 -#define BNX2_HC_STAT_GEN_STAT5 0x0000689c -#define BNX2_HC_STAT_GEN_STAT6 0x000068a0 -#define BNX2_HC_STAT_GEN_STAT7 0x000068a4 -#define BNX2_HC_STAT_GEN_STAT8 0x000068a8 -#define BNX2_HC_STAT_GEN_STAT9 0x000068ac -#define BNX2_HC_STAT_GEN_STAT10 0x000068b0 -#define BNX2_HC_STAT_GEN_STAT11 0x000068b4 -#define BNX2_HC_STAT_GEN_STAT12 0x000068b8 -#define BNX2_HC_STAT_GEN_STAT13 0x000068bc -#define BNX2_HC_STAT_GEN_STAT14 0x000068c0 -#define BNX2_HC_STAT_GEN_STAT15 0x000068c4 -#define BNX2_HC_STAT_GEN_STAT_AC0 0x000068c8 -#define BNX2_HC_STAT_GEN_STAT_AC1 0x000068cc -#define BNX2_HC_STAT_GEN_STAT_AC2 0x000068d0 -#define BNX2_HC_STAT_GEN_STAT_AC3 0x000068d4 -#define BNX2_HC_STAT_GEN_STAT_AC4 0x000068d8 -#define BNX2_HC_STAT_GEN_STAT_AC5 0x000068dc -#define BNX2_HC_STAT_GEN_STAT_AC6 0x000068e0 -#define BNX2_HC_STAT_GEN_STAT_AC7 0x000068e4 -#define BNX2_HC_STAT_GEN_STAT_AC8 0x000068e8 -#define BNX2_HC_STAT_GEN_STAT_AC9 0x000068ec -#define BNX2_HC_STAT_GEN_STAT_AC10 0x000068f0 -#define BNX2_HC_STAT_GEN_STAT_AC11 0x000068f4 -#define BNX2_HC_STAT_GEN_STAT_AC12 0x000068f8 -#define BNX2_HC_STAT_GEN_STAT_AC13 0x000068fc -#define BNX2_HC_STAT_GEN_STAT_AC14 0x00006900 -#define BNX2_HC_STAT_GEN_STAT_AC15 0x00006904 -#define BNX2_HC_STAT_GEN_STAT_AC 0x000068c8 -#define BNX2_HC_VIS 0x00006908 -#define BNX2_HC_VIS_STAT_BUILD_STATE (0xfL<<0) -#define BNX2_HC_VIS_STAT_BUILD_STATE_IDLE (0L<<0) -#define BNX2_HC_VIS_STAT_BUILD_STATE_START (1L<<0) -#define BNX2_HC_VIS_STAT_BUILD_STATE_REQUEST (2L<<0) -#define BNX2_HC_VIS_STAT_BUILD_STATE_UPDATE64 (3L<<0) -#define BNX2_HC_VIS_STAT_BUILD_STATE_UPDATE32 (4L<<0) -#define BNX2_HC_VIS_STAT_BUILD_STATE_UPDATE_DONE (5L<<0) -#define BNX2_HC_VIS_STAT_BUILD_STATE_DMA (6L<<0) -#define BNX2_HC_VIS_STAT_BUILD_STATE_MSI_CONTROL (7L<<0) -#define BNX2_HC_VIS_STAT_BUILD_STATE_MSI_LOW (8L<<0) -#define BNX2_HC_VIS_STAT_BUILD_STATE_MSI_HIGH (9L<<0) -#define BNX2_HC_VIS_STAT_BUILD_STATE_MSI_DATA (10L<<0) -#define BNX2_HC_VIS_DMA_STAT_STATE (0xfL<<8) -#define BNX2_HC_VIS_DMA_STAT_STATE_IDLE (0L<<8) -#define BNX2_HC_VIS_DMA_STAT_STATE_STATUS_PARAM (1L<<8) -#define BNX2_HC_VIS_DMA_STAT_STATE_STATUS_DMA (2L<<8) -#define BNX2_HC_VIS_DMA_STAT_STATE_WRITE_COMP (3L<<8) -#define BNX2_HC_VIS_DMA_STAT_STATE_COMP (4L<<8) -#define BNX2_HC_VIS_DMA_STAT_STATE_STATISTIC_PARAM (5L<<8) -#define BNX2_HC_VIS_DMA_STAT_STATE_STATISTIC_DMA (6L<<8) -#define BNX2_HC_VIS_DMA_STAT_STATE_WRITE_COMP_1 (7L<<8) -#define BNX2_HC_VIS_DMA_STAT_STATE_WRITE_COMP_2 (8L<<8) -#define BNX2_HC_VIS_DMA_STAT_STATE_WAIT (9L<<8) -#define BNX2_HC_VIS_DMA_STAT_STATE_ABORT (15L<<8) -#define BNX2_HC_VIS_DMA_MSI_STATE (0x7L<<12) -#define BNX2_HC_VIS_STATISTIC_DMA_EN_STATE (0x3L<<15) -#define BNX2_HC_VIS_STATISTIC_DMA_EN_STATE_IDLE (0L<<15) -#define BNX2_HC_VIS_STATISTIC_DMA_EN_STATE_COUNT (1L<<15) -#define BNX2_HC_VIS_STATISTIC_DMA_EN_STATE_START (2L<<15) - -#define BNX2_HC_VIS_1 0x0000690c -#define BNX2_HC_VIS_1_HW_INTACK_STATE (1L<<4) -#define BNX2_HC_VIS_1_HW_INTACK_STATE_IDLE (0L<<4) -#define BNX2_HC_VIS_1_HW_INTACK_STATE_COUNT (1L<<4) -#define BNX2_HC_VIS_1_SW_INTACK_STATE (1L<<5) -#define BNX2_HC_VIS_1_SW_INTACK_STATE_IDLE (0L<<5) -#define BNX2_HC_VIS_1_SW_INTACK_STATE_COUNT (1L<<5) -#define BNX2_HC_VIS_1_DURING_SW_INTACK_STATE (1L<<6) -#define BNX2_HC_VIS_1_DURING_SW_INTACK_STATE_IDLE (0L<<6) -#define BNX2_HC_VIS_1_DURING_SW_INTACK_STATE_COUNT (1L<<6) -#define BNX2_HC_VIS_1_MAILBOX_COUNT_STATE (1L<<7) -#define BNX2_HC_VIS_1_MAILBOX_COUNT_STATE_IDLE (0L<<7) -#define BNX2_HC_VIS_1_MAILBOX_COUNT_STATE_COUNT (1L<<7) -#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE (0xfL<<17) -#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_IDLE (0L<<17) -#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_DMA (1L<<17) -#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_UPDATE (2L<<17) -#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_ASSIGN (3L<<17) -#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_WAIT (4L<<17) -#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_REG_UPDATE (5L<<17) -#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_REG_ASSIGN (6L<<17) -#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_REG_WAIT (7L<<17) -#define BNX2_HC_VIS_1_RAM_WR_ARB_STATE (0x3L<<21) -#define BNX2_HC_VIS_1_RAM_WR_ARB_STATE_NORMAL (0L<<21) -#define BNX2_HC_VIS_1_RAM_WR_ARB_STATE_CLEAR (1L<<21) -#define BNX2_HC_VIS_1_INT_GEN_STATE (1L<<23) -#define BNX2_HC_VIS_1_INT_GEN_STATE_DLE (0L<<23) -#define BNX2_HC_VIS_1_INT_GEN_STATE_NTERRUPT (1L<<23) -#define BNX2_HC_VIS_1_STAT_CHAN_ID (0x7L<<24) -#define BNX2_HC_VIS_1_INT_B (1L<<27) - -#define BNX2_HC_DEBUG_VECT_PEEK 0x00006910 -#define BNX2_HC_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0) -#define BNX2_HC_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11) -#define BNX2_HC_DEBUG_VECT_PEEK_1_SEL (0xfL<<12) -#define BNX2_HC_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16) -#define BNX2_HC_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27) -#define BNX2_HC_DEBUG_VECT_PEEK_2_SEL (0xfL<<28) - -#define BNX2_HC_COALESCE_NOW 0x00006914 -#define BNX2_HC_COALESCE_NOW_COAL_NOW (0x1ffL<<1) -#define BNX2_HC_COALESCE_NOW_COAL_NOW_WO_INT (0x1ffL<<11) -#define BNX2_HC_COALESCE_NOW_COAL_ON_NXT_EVENT (0x1ffL<<21) - -#define BNX2_HC_MSIX_BIT_VECTOR 0x00006918 -#define BNX2_HC_MSIX_BIT_VECTOR_VAL (0x1ffL<<0) - -#define BNX2_HC_SB_CONFIG_1 0x00006a00 -#define BNX2_HC_SB_CONFIG_1_RX_TMR_MODE (1L<<1) -#define BNX2_HC_SB_CONFIG_1_TX_TMR_MODE (1L<<2) -#define BNX2_HC_SB_CONFIG_1_COM_TMR_MODE (1L<<3) -#define BNX2_HC_SB_CONFIG_1_CMD_TMR_MODE (1L<<4) -#define BNX2_HC_SB_CONFIG_1_PER_MODE (1L<<16) -#define BNX2_HC_SB_CONFIG_1_ONE_SHOT (1L<<17) -#define BNX2_HC_SB_CONFIG_1_USE_INT_PARAM (1L<<18) -#define BNX2_HC_SB_CONFIG_1_PER_COLLECT_LIMIT (0xfL<<20) - -#define BNX2_HC_TX_QUICK_CONS_TRIP_1 0x00006a04 -#define BNX2_HC_TX_QUICK_CONS_TRIP_1_VALUE (0xffL<<0) -#define BNX2_HC_TX_QUICK_CONS_TRIP_1_INT (0xffL<<16) - -#define BNX2_HC_COMP_PROD_TRIP_1 0x00006a08 -#define BNX2_HC_COMP_PROD_TRIP_1_VALUE (0xffL<<0) -#define BNX2_HC_COMP_PROD_TRIP_1_INT (0xffL<<16) - -#define BNX2_HC_RX_QUICK_CONS_TRIP_1 0x00006a0c -#define BNX2_HC_RX_QUICK_CONS_TRIP_1_VALUE (0xffL<<0) -#define BNX2_HC_RX_QUICK_CONS_TRIP_1_INT (0xffL<<16) - -#define BNX2_HC_RX_TICKS_1 0x00006a10 -#define BNX2_HC_RX_TICKS_1_VALUE (0x3ffL<<0) -#define BNX2_HC_RX_TICKS_1_INT (0x3ffL<<16) - -#define BNX2_HC_TX_TICKS_1 0x00006a14 -#define BNX2_HC_TX_TICKS_1_VALUE (0x3ffL<<0) -#define BNX2_HC_TX_TICKS_1_INT (0x3ffL<<16) - -#define BNX2_HC_COM_TICKS_1 0x00006a18 -#define BNX2_HC_COM_TICKS_1_VALUE (0x3ffL<<0) -#define BNX2_HC_COM_TICKS_1_INT (0x3ffL<<16) - -#define BNX2_HC_CMD_TICKS_1 0x00006a1c -#define BNX2_HC_CMD_TICKS_1_VALUE (0x3ffL<<0) -#define BNX2_HC_CMD_TICKS_1_INT (0x3ffL<<16) - -#define BNX2_HC_PERIODIC_TICKS_1 0x00006a20 -#define BNX2_HC_PERIODIC_TICKS_1_HC_PERIODIC_TICKS (0xffffL<<0) -#define BNX2_HC_PERIODIC_TICKS_1_HC_INT_PERIODIC_TICKS (0xffffL<<16) - -#define BNX2_HC_SB_CONFIG_2 0x00006a24 -#define BNX2_HC_SB_CONFIG_2_RX_TMR_MODE (1L<<1) -#define BNX2_HC_SB_CONFIG_2_TX_TMR_MODE (1L<<2) -#define BNX2_HC_SB_CONFIG_2_COM_TMR_MODE (1L<<3) -#define BNX2_HC_SB_CONFIG_2_CMD_TMR_MODE (1L<<4) -#define BNX2_HC_SB_CONFIG_2_PER_MODE (1L<<16) -#define BNX2_HC_SB_CONFIG_2_ONE_SHOT (1L<<17) -#define BNX2_HC_SB_CONFIG_2_USE_INT_PARAM (1L<<18) -#define BNX2_HC_SB_CONFIG_2_PER_COLLECT_LIMIT (0xfL<<20) - -#define BNX2_HC_TX_QUICK_CONS_TRIP_2 0x00006a28 -#define BNX2_HC_TX_QUICK_CONS_TRIP_2_VALUE (0xffL<<0) -#define BNX2_HC_TX_QUICK_CONS_TRIP_2_INT (0xffL<<16) - -#define BNX2_HC_COMP_PROD_TRIP_2 0x00006a2c -#define BNX2_HC_COMP_PROD_TRIP_2_VALUE (0xffL<<0) -#define BNX2_HC_COMP_PROD_TRIP_2_INT (0xffL<<16) - -#define BNX2_HC_RX_QUICK_CONS_TRIP_2 0x00006a30 -#define BNX2_HC_RX_QUICK_CONS_TRIP_2_VALUE (0xffL<<0) -#define BNX2_HC_RX_QUICK_CONS_TRIP_2_INT (0xffL<<16) - -#define BNX2_HC_RX_TICKS_2 0x00006a34 -#define BNX2_HC_RX_TICKS_2_VALUE (0x3ffL<<0) -#define BNX2_HC_RX_TICKS_2_INT (0x3ffL<<16) - -#define BNX2_HC_TX_TICKS_2 0x00006a38 -#define BNX2_HC_TX_TICKS_2_VALUE (0x3ffL<<0) -#define BNX2_HC_TX_TICKS_2_INT (0x3ffL<<16) - -#define BNX2_HC_COM_TICKS_2 0x00006a3c -#define BNX2_HC_COM_TICKS_2_VALUE (0x3ffL<<0) -#define BNX2_HC_COM_TICKS_2_INT (0x3ffL<<16) - -#define BNX2_HC_CMD_TICKS_2 0x00006a40 -#define BNX2_HC_CMD_TICKS_2_VALUE (0x3ffL<<0) -#define BNX2_HC_CMD_TICKS_2_INT (0x3ffL<<16) - -#define BNX2_HC_PERIODIC_TICKS_2 0x00006a44 -#define BNX2_HC_PERIODIC_TICKS_2_HC_PERIODIC_TICKS (0xffffL<<0) -#define BNX2_HC_PERIODIC_TICKS_2_HC_INT_PERIODIC_TICKS (0xffffL<<16) - -#define BNX2_HC_SB_CONFIG_3 0x00006a48 -#define BNX2_HC_SB_CONFIG_3_RX_TMR_MODE (1L<<1) -#define BNX2_HC_SB_CONFIG_3_TX_TMR_MODE (1L<<2) -#define BNX2_HC_SB_CONFIG_3_COM_TMR_MODE (1L<<3) -#define BNX2_HC_SB_CONFIG_3_CMD_TMR_MODE (1L<<4) -#define BNX2_HC_SB_CONFIG_3_PER_MODE (1L<<16) -#define BNX2_HC_SB_CONFIG_3_ONE_SHOT (1L<<17) -#define BNX2_HC_SB_CONFIG_3_USE_INT_PARAM (1L<<18) -#define BNX2_HC_SB_CONFIG_3_PER_COLLECT_LIMIT (0xfL<<20) - -#define BNX2_HC_TX_QUICK_CONS_TRIP_3 0x00006a4c -#define BNX2_HC_TX_QUICK_CONS_TRIP_3_VALUE (0xffL<<0) -#define BNX2_HC_TX_QUICK_CONS_TRIP_3_INT (0xffL<<16) - -#define BNX2_HC_COMP_PROD_TRIP_3 0x00006a50 -#define BNX2_HC_COMP_PROD_TRIP_3_VALUE (0xffL<<0) -#define BNX2_HC_COMP_PROD_TRIP_3_INT (0xffL<<16) - -#define BNX2_HC_RX_QUICK_CONS_TRIP_3 0x00006a54 -#define BNX2_HC_RX_QUICK_CONS_TRIP_3_VALUE (0xffL<<0) -#define BNX2_HC_RX_QUICK_CONS_TRIP_3_INT (0xffL<<16) - -#define BNX2_HC_RX_TICKS_3 0x00006a58 -#define BNX2_HC_RX_TICKS_3_VALUE (0x3ffL<<0) -#define BNX2_HC_RX_TICKS_3_INT (0x3ffL<<16) - -#define BNX2_HC_TX_TICKS_3 0x00006a5c -#define BNX2_HC_TX_TICKS_3_VALUE (0x3ffL<<0) -#define BNX2_HC_TX_TICKS_3_INT (0x3ffL<<16) - -#define BNX2_HC_COM_TICKS_3 0x00006a60 -#define BNX2_HC_COM_TICKS_3_VALUE (0x3ffL<<0) -#define BNX2_HC_COM_TICKS_3_INT (0x3ffL<<16) - -#define BNX2_HC_CMD_TICKS_3 0x00006a64 -#define BNX2_HC_CMD_TICKS_3_VALUE (0x3ffL<<0) -#define BNX2_HC_CMD_TICKS_3_INT (0x3ffL<<16) - -#define BNX2_HC_PERIODIC_TICKS_3 0x00006a68 -#define BNX2_HC_PERIODIC_TICKS_3_HC_PERIODIC_TICKS (0xffffL<<0) -#define BNX2_HC_PERIODIC_TICKS_3_HC_INT_PERIODIC_TICKS (0xffffL<<16) - -#define BNX2_HC_SB_CONFIG_4 0x00006a6c -#define BNX2_HC_SB_CONFIG_4_RX_TMR_MODE (1L<<1) -#define BNX2_HC_SB_CONFIG_4_TX_TMR_MODE (1L<<2) -#define BNX2_HC_SB_CONFIG_4_COM_TMR_MODE (1L<<3) -#define BNX2_HC_SB_CONFIG_4_CMD_TMR_MODE (1L<<4) -#define BNX2_HC_SB_CONFIG_4_PER_MODE (1L<<16) -#define BNX2_HC_SB_CONFIG_4_ONE_SHOT (1L<<17) -#define BNX2_HC_SB_CONFIG_4_USE_INT_PARAM (1L<<18) -#define BNX2_HC_SB_CONFIG_4_PER_COLLECT_LIMIT (0xfL<<20) - -#define BNX2_HC_TX_QUICK_CONS_TRIP_4 0x00006a70 -#define BNX2_HC_TX_QUICK_CONS_TRIP_4_VALUE (0xffL<<0) -#define BNX2_HC_TX_QUICK_CONS_TRIP_4_INT (0xffL<<16) - -#define BNX2_HC_COMP_PROD_TRIP_4 0x00006a74 -#define BNX2_HC_COMP_PROD_TRIP_4_VALUE (0xffL<<0) -#define BNX2_HC_COMP_PROD_TRIP_4_INT (0xffL<<16) - -#define BNX2_HC_RX_QUICK_CONS_TRIP_4 0x00006a78 -#define BNX2_HC_RX_QUICK_CONS_TRIP_4_VALUE (0xffL<<0) -#define BNX2_HC_RX_QUICK_CONS_TRIP_4_INT (0xffL<<16) - -#define BNX2_HC_RX_TICKS_4 0x00006a7c -#define BNX2_HC_RX_TICKS_4_VALUE (0x3ffL<<0) -#define BNX2_HC_RX_TICKS_4_INT (0x3ffL<<16) - -#define BNX2_HC_TX_TICKS_4 0x00006a80 -#define BNX2_HC_TX_TICKS_4_VALUE (0x3ffL<<0) -#define BNX2_HC_TX_TICKS_4_INT (0x3ffL<<16) - -#define BNX2_HC_COM_TICKS_4 0x00006a84 -#define BNX2_HC_COM_TICKS_4_VALUE (0x3ffL<<0) -#define BNX2_HC_COM_TICKS_4_INT (0x3ffL<<16) - -#define BNX2_HC_CMD_TICKS_4 0x00006a88 -#define BNX2_HC_CMD_TICKS_4_VALUE (0x3ffL<<0) -#define BNX2_HC_CMD_TICKS_4_INT (0x3ffL<<16) - -#define BNX2_HC_PERIODIC_TICKS_4 0x00006a8c -#define BNX2_HC_PERIODIC_TICKS_4_HC_PERIODIC_TICKS (0xffffL<<0) -#define BNX2_HC_PERIODIC_TICKS_4_HC_INT_PERIODIC_TICKS (0xffffL<<16) - -#define BNX2_HC_SB_CONFIG_5 0x00006a90 -#define BNX2_HC_SB_CONFIG_5_RX_TMR_MODE (1L<<1) -#define BNX2_HC_SB_CONFIG_5_TX_TMR_MODE (1L<<2) -#define BNX2_HC_SB_CONFIG_5_COM_TMR_MODE (1L<<3) -#define BNX2_HC_SB_CONFIG_5_CMD_TMR_MODE (1L<<4) -#define BNX2_HC_SB_CONFIG_5_PER_MODE (1L<<16) -#define BNX2_HC_SB_CONFIG_5_ONE_SHOT (1L<<17) -#define BNX2_HC_SB_CONFIG_5_USE_INT_PARAM (1L<<18) -#define BNX2_HC_SB_CONFIG_5_PER_COLLECT_LIMIT (0xfL<<20) - -#define BNX2_HC_TX_QUICK_CONS_TRIP_5 0x00006a94 -#define BNX2_HC_TX_QUICK_CONS_TRIP_5_VALUE (0xffL<<0) -#define BNX2_HC_TX_QUICK_CONS_TRIP_5_INT (0xffL<<16) - -#define BNX2_HC_COMP_PROD_TRIP_5 0x00006a98 -#define BNX2_HC_COMP_PROD_TRIP_5_VALUE (0xffL<<0) -#define BNX2_HC_COMP_PROD_TRIP_5_INT (0xffL<<16) - -#define BNX2_HC_RX_QUICK_CONS_TRIP_5 0x00006a9c -#define BNX2_HC_RX_QUICK_CONS_TRIP_5_VALUE (0xffL<<0) -#define BNX2_HC_RX_QUICK_CONS_TRIP_5_INT (0xffL<<16) - -#define BNX2_HC_RX_TICKS_5 0x00006aa0 -#define BNX2_HC_RX_TICKS_5_VALUE (0x3ffL<<0) -#define BNX2_HC_RX_TICKS_5_INT (0x3ffL<<16) - -#define BNX2_HC_TX_TICKS_5 0x00006aa4 -#define BNX2_HC_TX_TICKS_5_VALUE (0x3ffL<<0) -#define BNX2_HC_TX_TICKS_5_INT (0x3ffL<<16) - -#define BNX2_HC_COM_TICKS_5 0x00006aa8 -#define BNX2_HC_COM_TICKS_5_VALUE (0x3ffL<<0) -#define BNX2_HC_COM_TICKS_5_INT (0x3ffL<<16) - -#define BNX2_HC_CMD_TICKS_5 0x00006aac -#define BNX2_HC_CMD_TICKS_5_VALUE (0x3ffL<<0) -#define BNX2_HC_CMD_TICKS_5_INT (0x3ffL<<16) - -#define BNX2_HC_PERIODIC_TICKS_5 0x00006ab0 -#define BNX2_HC_PERIODIC_TICKS_5_HC_PERIODIC_TICKS (0xffffL<<0) -#define BNX2_HC_PERIODIC_TICKS_5_HC_INT_PERIODIC_TICKS (0xffffL<<16) - -#define BNX2_HC_SB_CONFIG_6 0x00006ab4 -#define BNX2_HC_SB_CONFIG_6_RX_TMR_MODE (1L<<1) -#define BNX2_HC_SB_CONFIG_6_TX_TMR_MODE (1L<<2) -#define BNX2_HC_SB_CONFIG_6_COM_TMR_MODE (1L<<3) -#define BNX2_HC_SB_CONFIG_6_CMD_TMR_MODE (1L<<4) -#define BNX2_HC_SB_CONFIG_6_PER_MODE (1L<<16) -#define BNX2_HC_SB_CONFIG_6_ONE_SHOT (1L<<17) -#define BNX2_HC_SB_CONFIG_6_USE_INT_PARAM (1L<<18) -#define BNX2_HC_SB_CONFIG_6_PER_COLLECT_LIMIT (0xfL<<20) - -#define BNX2_HC_TX_QUICK_CONS_TRIP_6 0x00006ab8 -#define BNX2_HC_TX_QUICK_CONS_TRIP_6_VALUE (0xffL<<0) -#define BNX2_HC_TX_QUICK_CONS_TRIP_6_INT (0xffL<<16) - -#define BNX2_HC_COMP_PROD_TRIP_6 0x00006abc -#define BNX2_HC_COMP_PROD_TRIP_6_VALUE (0xffL<<0) -#define BNX2_HC_COMP_PROD_TRIP_6_INT (0xffL<<16) - -#define BNX2_HC_RX_QUICK_CONS_TRIP_6 0x00006ac0 -#define BNX2_HC_RX_QUICK_CONS_TRIP_6_VALUE (0xffL<<0) -#define BNX2_HC_RX_QUICK_CONS_TRIP_6_INT (0xffL<<16) - -#define BNX2_HC_RX_TICKS_6 0x00006ac4 -#define BNX2_HC_RX_TICKS_6_VALUE (0x3ffL<<0) -#define BNX2_HC_RX_TICKS_6_INT (0x3ffL<<16) - -#define BNX2_HC_TX_TICKS_6 0x00006ac8 -#define BNX2_HC_TX_TICKS_6_VALUE (0x3ffL<<0) -#define BNX2_HC_TX_TICKS_6_INT (0x3ffL<<16) - -#define BNX2_HC_COM_TICKS_6 0x00006acc -#define BNX2_HC_COM_TICKS_6_VALUE (0x3ffL<<0) -#define BNX2_HC_COM_TICKS_6_INT (0x3ffL<<16) - -#define BNX2_HC_CMD_TICKS_6 0x00006ad0 -#define BNX2_HC_CMD_TICKS_6_VALUE (0x3ffL<<0) -#define BNX2_HC_CMD_TICKS_6_INT (0x3ffL<<16) - -#define BNX2_HC_PERIODIC_TICKS_6 0x00006ad4 -#define BNX2_HC_PERIODIC_TICKS_6_HC_PERIODIC_TICKS (0xffffL<<0) -#define BNX2_HC_PERIODIC_TICKS_6_HC_INT_PERIODIC_TICKS (0xffffL<<16) - -#define BNX2_HC_SB_CONFIG_7 0x00006ad8 -#define BNX2_HC_SB_CONFIG_7_RX_TMR_MODE (1L<<1) -#define BNX2_HC_SB_CONFIG_7_TX_TMR_MODE (1L<<2) -#define BNX2_HC_SB_CONFIG_7_COM_TMR_MODE (1L<<3) -#define BNX2_HC_SB_CONFIG_7_CMD_TMR_MODE (1L<<4) -#define BNX2_HC_SB_CONFIG_7_PER_MODE (1L<<16) -#define BNX2_HC_SB_CONFIG_7_ONE_SHOT (1L<<17) -#define BNX2_HC_SB_CONFIG_7_USE_INT_PARAM (1L<<18) -#define BNX2_HC_SB_CONFIG_7_PER_COLLECT_LIMIT (0xfL<<20) - -#define BNX2_HC_TX_QUICK_CONS_TRIP_7 0x00006adc -#define BNX2_HC_TX_QUICK_CONS_TRIP_7_VALUE (0xffL<<0) -#define BNX2_HC_TX_QUICK_CONS_TRIP_7_INT (0xffL<<16) - -#define BNX2_HC_COMP_PROD_TRIP_7 0x00006ae0 -#define BNX2_HC_COMP_PROD_TRIP_7_VALUE (0xffL<<0) -#define BNX2_HC_COMP_PROD_TRIP_7_INT (0xffL<<16) - -#define BNX2_HC_RX_QUICK_CONS_TRIP_7 0x00006ae4 -#define BNX2_HC_RX_QUICK_CONS_TRIP_7_VALUE (0xffL<<0) -#define BNX2_HC_RX_QUICK_CONS_TRIP_7_INT (0xffL<<16) - -#define BNX2_HC_RX_TICKS_7 0x00006ae8 -#define BNX2_HC_RX_TICKS_7_VALUE (0x3ffL<<0) -#define BNX2_HC_RX_TICKS_7_INT (0x3ffL<<16) - -#define BNX2_HC_TX_TICKS_7 0x00006aec -#define BNX2_HC_TX_TICKS_7_VALUE (0x3ffL<<0) -#define BNX2_HC_TX_TICKS_7_INT (0x3ffL<<16) - -#define BNX2_HC_COM_TICKS_7 0x00006af0 -#define BNX2_HC_COM_TICKS_7_VALUE (0x3ffL<<0) -#define BNX2_HC_COM_TICKS_7_INT (0x3ffL<<16) - -#define BNX2_HC_CMD_TICKS_7 0x00006af4 -#define BNX2_HC_CMD_TICKS_7_VALUE (0x3ffL<<0) -#define BNX2_HC_CMD_TICKS_7_INT (0x3ffL<<16) - -#define BNX2_HC_PERIODIC_TICKS_7 0x00006af8 -#define BNX2_HC_PERIODIC_TICKS_7_HC_PERIODIC_TICKS (0xffffL<<0) -#define BNX2_HC_PERIODIC_TICKS_7_HC_INT_PERIODIC_TICKS (0xffffL<<16) - -#define BNX2_HC_SB_CONFIG_8 0x00006afc -#define BNX2_HC_SB_CONFIG_8_RX_TMR_MODE (1L<<1) -#define BNX2_HC_SB_CONFIG_8_TX_TMR_MODE (1L<<2) -#define BNX2_HC_SB_CONFIG_8_COM_TMR_MODE (1L<<3) -#define BNX2_HC_SB_CONFIG_8_CMD_TMR_MODE (1L<<4) -#define BNX2_HC_SB_CONFIG_8_PER_MODE (1L<<16) -#define BNX2_HC_SB_CONFIG_8_ONE_SHOT (1L<<17) -#define BNX2_HC_SB_CONFIG_8_USE_INT_PARAM (1L<<18) -#define BNX2_HC_SB_CONFIG_8_PER_COLLECT_LIMIT (0xfL<<20) - -#define BNX2_HC_TX_QUICK_CONS_TRIP_8 0x00006b00 -#define BNX2_HC_TX_QUICK_CONS_TRIP_8_VALUE (0xffL<<0) -#define BNX2_HC_TX_QUICK_CONS_TRIP_8_INT (0xffL<<16) - -#define BNX2_HC_COMP_PROD_TRIP_8 0x00006b04 -#define BNX2_HC_COMP_PROD_TRIP_8_VALUE (0xffL<<0) -#define BNX2_HC_COMP_PROD_TRIP_8_INT (0xffL<<16) - -#define BNX2_HC_RX_QUICK_CONS_TRIP_8 0x00006b08 -#define BNX2_HC_RX_QUICK_CONS_TRIP_8_VALUE (0xffL<<0) -#define BNX2_HC_RX_QUICK_CONS_TRIP_8_INT (0xffL<<16) - -#define BNX2_HC_RX_TICKS_8 0x00006b0c -#define BNX2_HC_RX_TICKS_8_VALUE (0x3ffL<<0) -#define BNX2_HC_RX_TICKS_8_INT (0x3ffL<<16) - -#define BNX2_HC_TX_TICKS_8 0x00006b10 -#define BNX2_HC_TX_TICKS_8_VALUE (0x3ffL<<0) -#define BNX2_HC_TX_TICKS_8_INT (0x3ffL<<16) - -#define BNX2_HC_COM_TICKS_8 0x00006b14 -#define BNX2_HC_COM_TICKS_8_VALUE (0x3ffL<<0) -#define BNX2_HC_COM_TICKS_8_INT (0x3ffL<<16) - -#define BNX2_HC_CMD_TICKS_8 0x00006b18 -#define BNX2_HC_CMD_TICKS_8_VALUE (0x3ffL<<0) -#define BNX2_HC_CMD_TICKS_8_INT (0x3ffL<<16) - -#define BNX2_HC_PERIODIC_TICKS_8 0x00006b1c -#define BNX2_HC_PERIODIC_TICKS_8_HC_PERIODIC_TICKS (0xffffL<<0) -#define BNX2_HC_PERIODIC_TICKS_8_HC_INT_PERIODIC_TICKS (0xffffL<<16) - -#define BNX2_HC_SB_CONFIG_SIZE (BNX2_HC_SB_CONFIG_2 - BNX2_HC_SB_CONFIG_1) -#define BNX2_HC_COMP_PROD_TRIP_OFF (BNX2_HC_COMP_PROD_TRIP_1 - \ - BNX2_HC_SB_CONFIG_1) -#define BNX2_HC_COM_TICKS_OFF (BNX2_HC_COM_TICKS_1 - BNX2_HC_SB_CONFIG_1) -#define BNX2_HC_CMD_TICKS_OFF (BNX2_HC_CMD_TICKS_1 - BNX2_HC_SB_CONFIG_1) -#define BNX2_HC_TX_QUICK_CONS_TRIP_OFF (BNX2_HC_TX_QUICK_CONS_TRIP_1 - \ - BNX2_HC_SB_CONFIG_1) -#define BNX2_HC_TX_TICKS_OFF (BNX2_HC_TX_TICKS_1 - BNX2_HC_SB_CONFIG_1) -#define BNX2_HC_RX_QUICK_CONS_TRIP_OFF (BNX2_HC_RX_QUICK_CONS_TRIP_1 - \ - BNX2_HC_SB_CONFIG_1) -#define BNX2_HC_RX_TICKS_OFF (BNX2_HC_RX_TICKS_1 - BNX2_HC_SB_CONFIG_1) - - -/* - * txp_reg definition - * offset: 0x40000 - */ -#define BNX2_TXP_CPU_MODE 0x00045000 -#define BNX2_TXP_CPU_MODE_LOCAL_RST (1L<<0) -#define BNX2_TXP_CPU_MODE_STEP_ENA (1L<<1) -#define BNX2_TXP_CPU_MODE_PAGE_0_DATA_ENA (1L<<2) -#define BNX2_TXP_CPU_MODE_PAGE_0_INST_ENA (1L<<3) -#define BNX2_TXP_CPU_MODE_MSG_BIT1 (1L<<6) -#define BNX2_TXP_CPU_MODE_INTERRUPT_ENA (1L<<7) -#define BNX2_TXP_CPU_MODE_SOFT_HALT (1L<<10) -#define BNX2_TXP_CPU_MODE_BAD_DATA_HALT_ENA (1L<<11) -#define BNX2_TXP_CPU_MODE_BAD_INST_HALT_ENA (1L<<12) -#define BNX2_TXP_CPU_MODE_FIO_ABORT_HALT_ENA (1L<<13) -#define BNX2_TXP_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA (1L<<15) - -#define BNX2_TXP_CPU_STATE 0x00045004 -#define BNX2_TXP_CPU_STATE_BREAKPOINT (1L<<0) -#define BNX2_TXP_CPU_STATE_BAD_INST_HALTED (1L<<2) -#define BNX2_TXP_CPU_STATE_PAGE_0_DATA_HALTED (1L<<3) -#define BNX2_TXP_CPU_STATE_PAGE_0_INST_HALTED (1L<<4) -#define BNX2_TXP_CPU_STATE_BAD_DATA_ADDR_HALTED (1L<<5) -#define BNX2_TXP_CPU_STATE_BAD_PC_HALTED (1L<<6) -#define BNX2_TXP_CPU_STATE_ALIGN_HALTED (1L<<7) -#define BNX2_TXP_CPU_STATE_FIO_ABORT_HALTED (1L<<8) -#define BNX2_TXP_CPU_STATE_SOFT_HALTED (1L<<10) -#define BNX2_TXP_CPU_STATE_SPAD_UNDERFLOW (1L<<11) -#define BNX2_TXP_CPU_STATE_INTERRRUPT (1L<<12) -#define BNX2_TXP_CPU_STATE_DATA_ACCESS_STALL (1L<<14) -#define BNX2_TXP_CPU_STATE_INST_FETCH_STALL (1L<<15) -#define BNX2_TXP_CPU_STATE_BLOCKED_READ (1L<<31) - -#define BNX2_TXP_CPU_EVENT_MASK 0x00045008 -#define BNX2_TXP_CPU_EVENT_MASK_BREAKPOINT_MASK (1L<<0) -#define BNX2_TXP_CPU_EVENT_MASK_BAD_INST_HALTED_MASK (1L<<2) -#define BNX2_TXP_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK (1L<<3) -#define BNX2_TXP_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK (1L<<4) -#define BNX2_TXP_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK (1L<<5) -#define BNX2_TXP_CPU_EVENT_MASK_BAD_PC_HALTED_MASK (1L<<6) -#define BNX2_TXP_CPU_EVENT_MASK_ALIGN_HALTED_MASK (1L<<7) -#define BNX2_TXP_CPU_EVENT_MASK_FIO_ABORT_MASK (1L<<8) -#define BNX2_TXP_CPU_EVENT_MASK_SOFT_HALTED_MASK (1L<<10) -#define BNX2_TXP_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK (1L<<11) -#define BNX2_TXP_CPU_EVENT_MASK_INTERRUPT_MASK (1L<<12) - -#define BNX2_TXP_CPU_PROGRAM_COUNTER 0x0004501c -#define BNX2_TXP_CPU_INSTRUCTION 0x00045020 -#define BNX2_TXP_CPU_DATA_ACCESS 0x00045024 -#define BNX2_TXP_CPU_INTERRUPT_ENABLE 0x00045028 -#define BNX2_TXP_CPU_INTERRUPT_VECTOR 0x0004502c -#define BNX2_TXP_CPU_INTERRUPT_SAVED_PC 0x00045030 -#define BNX2_TXP_CPU_HW_BREAKPOINT 0x00045034 -#define BNX2_TXP_CPU_HW_BREAKPOINT_DISABLE (1L<<0) -#define BNX2_TXP_CPU_HW_BREAKPOINT_ADDRESS (0x3fffffffL<<2) - -#define BNX2_TXP_CPU_DEBUG_VECT_PEEK 0x00045038 -#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0) -#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11) -#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_1_SEL (0xfL<<12) -#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16) -#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27) -#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_2_SEL (0xfL<<28) - -#define BNX2_TXP_CPU_LAST_BRANCH_ADDR 0x00045048 -#define BNX2_TXP_CPU_LAST_BRANCH_ADDR_TYPE (1L<<1) -#define BNX2_TXP_CPU_LAST_BRANCH_ADDR_TYPE_JUMP (0L<<1) -#define BNX2_TXP_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH (1L<<1) -#define BNX2_TXP_CPU_LAST_BRANCH_ADDR_LBA (0x3fffffffL<<2) - -#define BNX2_TXP_CPU_REG_FILE 0x00045200 -#define BNX2_TXP_TXPQ 0x000453c0 -#define BNX2_TXP_FTQ_CMD 0x000453f8 -#define BNX2_TXP_FTQ_CMD_OFFSET (0x3ffL<<0) -#define BNX2_TXP_FTQ_CMD_WR_TOP (1L<<10) -#define BNX2_TXP_FTQ_CMD_WR_TOP_0 (0L<<10) -#define BNX2_TXP_FTQ_CMD_WR_TOP_1 (1L<<10) -#define BNX2_TXP_FTQ_CMD_SFT_RESET (1L<<25) -#define BNX2_TXP_FTQ_CMD_RD_DATA (1L<<26) -#define BNX2_TXP_FTQ_CMD_ADD_INTERVEN (1L<<27) -#define BNX2_TXP_FTQ_CMD_ADD_DATA (1L<<28) -#define BNX2_TXP_FTQ_CMD_INTERVENE_CLR (1L<<29) -#define BNX2_TXP_FTQ_CMD_POP (1L<<30) -#define BNX2_TXP_FTQ_CMD_BUSY (1L<<31) - -#define BNX2_TXP_FTQ_CTL 0x000453fc -#define BNX2_TXP_FTQ_CTL_INTERVENE (1L<<0) -#define BNX2_TXP_FTQ_CTL_OVERFLOW (1L<<1) -#define BNX2_TXP_FTQ_CTL_FORCE_INTERVENE (1L<<2) -#define BNX2_TXP_FTQ_CTL_MAX_DEPTH (0x3ffL<<12) -#define BNX2_TXP_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) - -#define BNX2_TXP_SCRATCH 0x00060000 - - -/* - * tpat_reg definition - * offset: 0x80000 - */ -#define BNX2_TPAT_CPU_MODE 0x00085000 -#define BNX2_TPAT_CPU_MODE_LOCAL_RST (1L<<0) -#define BNX2_TPAT_CPU_MODE_STEP_ENA (1L<<1) -#define BNX2_TPAT_CPU_MODE_PAGE_0_DATA_ENA (1L<<2) -#define BNX2_TPAT_CPU_MODE_PAGE_0_INST_ENA (1L<<3) -#define BNX2_TPAT_CPU_MODE_MSG_BIT1 (1L<<6) -#define BNX2_TPAT_CPU_MODE_INTERRUPT_ENA (1L<<7) -#define BNX2_TPAT_CPU_MODE_SOFT_HALT (1L<<10) -#define BNX2_TPAT_CPU_MODE_BAD_DATA_HALT_ENA (1L<<11) -#define BNX2_TPAT_CPU_MODE_BAD_INST_HALT_ENA (1L<<12) -#define BNX2_TPAT_CPU_MODE_FIO_ABORT_HALT_ENA (1L<<13) -#define BNX2_TPAT_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA (1L<<15) - -#define BNX2_TPAT_CPU_STATE 0x00085004 -#define BNX2_TPAT_CPU_STATE_BREAKPOINT (1L<<0) -#define BNX2_TPAT_CPU_STATE_BAD_INST_HALTED (1L<<2) -#define BNX2_TPAT_CPU_STATE_PAGE_0_DATA_HALTED (1L<<3) -#define BNX2_TPAT_CPU_STATE_PAGE_0_INST_HALTED (1L<<4) -#define BNX2_TPAT_CPU_STATE_BAD_DATA_ADDR_HALTED (1L<<5) -#define BNX2_TPAT_CPU_STATE_BAD_PC_HALTED (1L<<6) -#define BNX2_TPAT_CPU_STATE_ALIGN_HALTED (1L<<7) -#define BNX2_TPAT_CPU_STATE_FIO_ABORT_HALTED (1L<<8) -#define BNX2_TPAT_CPU_STATE_SOFT_HALTED (1L<<10) -#define BNX2_TPAT_CPU_STATE_SPAD_UNDERFLOW (1L<<11) -#define BNX2_TPAT_CPU_STATE_INTERRRUPT (1L<<12) -#define BNX2_TPAT_CPU_STATE_DATA_ACCESS_STALL (1L<<14) -#define BNX2_TPAT_CPU_STATE_INST_FETCH_STALL (1L<<15) -#define BNX2_TPAT_CPU_STATE_BLOCKED_READ (1L<<31) - -#define BNX2_TPAT_CPU_EVENT_MASK 0x00085008 -#define BNX2_TPAT_CPU_EVENT_MASK_BREAKPOINT_MASK (1L<<0) -#define BNX2_TPAT_CPU_EVENT_MASK_BAD_INST_HALTED_MASK (1L<<2) -#define BNX2_TPAT_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK (1L<<3) -#define BNX2_TPAT_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK (1L<<4) -#define BNX2_TPAT_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK (1L<<5) -#define BNX2_TPAT_CPU_EVENT_MASK_BAD_PC_HALTED_MASK (1L<<6) -#define BNX2_TPAT_CPU_EVENT_MASK_ALIGN_HALTED_MASK (1L<<7) -#define BNX2_TPAT_CPU_EVENT_MASK_FIO_ABORT_MASK (1L<<8) -#define BNX2_TPAT_CPU_EVENT_MASK_SOFT_HALTED_MASK (1L<<10) -#define BNX2_TPAT_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK (1L<<11) -#define BNX2_TPAT_CPU_EVENT_MASK_INTERRUPT_MASK (1L<<12) - -#define BNX2_TPAT_CPU_PROGRAM_COUNTER 0x0008501c -#define BNX2_TPAT_CPU_INSTRUCTION 0x00085020 -#define BNX2_TPAT_CPU_DATA_ACCESS 0x00085024 -#define BNX2_TPAT_CPU_INTERRUPT_ENABLE 0x00085028 -#define BNX2_TPAT_CPU_INTERRUPT_VECTOR 0x0008502c -#define BNX2_TPAT_CPU_INTERRUPT_SAVED_PC 0x00085030 -#define BNX2_TPAT_CPU_HW_BREAKPOINT 0x00085034 -#define BNX2_TPAT_CPU_HW_BREAKPOINT_DISABLE (1L<<0) -#define BNX2_TPAT_CPU_HW_BREAKPOINT_ADDRESS (0x3fffffffL<<2) - -#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK 0x00085038 -#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0) -#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11) -#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_1_SEL (0xfL<<12) -#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16) -#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27) -#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_2_SEL (0xfL<<28) - -#define BNX2_TPAT_CPU_LAST_BRANCH_ADDR 0x00085048 -#define BNX2_TPAT_CPU_LAST_BRANCH_ADDR_TYPE (1L<<1) -#define BNX2_TPAT_CPU_LAST_BRANCH_ADDR_TYPE_JUMP (0L<<1) -#define BNX2_TPAT_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH (1L<<1) -#define BNX2_TPAT_CPU_LAST_BRANCH_ADDR_LBA (0x3fffffffL<<2) - -#define BNX2_TPAT_CPU_REG_FILE 0x00085200 -#define BNX2_TPAT_TPATQ 0x000853c0 -#define BNX2_TPAT_FTQ_CMD 0x000853f8 -#define BNX2_TPAT_FTQ_CMD_OFFSET (0x3ffL<<0) -#define BNX2_TPAT_FTQ_CMD_WR_TOP (1L<<10) -#define BNX2_TPAT_FTQ_CMD_WR_TOP_0 (0L<<10) -#define BNX2_TPAT_FTQ_CMD_WR_TOP_1 (1L<<10) -#define BNX2_TPAT_FTQ_CMD_SFT_RESET (1L<<25) -#define BNX2_TPAT_FTQ_CMD_RD_DATA (1L<<26) -#define BNX2_TPAT_FTQ_CMD_ADD_INTERVEN (1L<<27) -#define BNX2_TPAT_FTQ_CMD_ADD_DATA (1L<<28) -#define BNX2_TPAT_FTQ_CMD_INTERVENE_CLR (1L<<29) -#define BNX2_TPAT_FTQ_CMD_POP (1L<<30) -#define BNX2_TPAT_FTQ_CMD_BUSY (1L<<31) - -#define BNX2_TPAT_FTQ_CTL 0x000853fc -#define BNX2_TPAT_FTQ_CTL_INTERVENE (1L<<0) -#define BNX2_TPAT_FTQ_CTL_OVERFLOW (1L<<1) -#define BNX2_TPAT_FTQ_CTL_FORCE_INTERVENE (1L<<2) -#define BNX2_TPAT_FTQ_CTL_MAX_DEPTH (0x3ffL<<12) -#define BNX2_TPAT_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) - -#define BNX2_TPAT_SCRATCH 0x000a0000 - - -/* - * rxp_reg definition - * offset: 0xc0000 - */ -#define BNX2_RXP_CPU_MODE 0x000c5000 -#define BNX2_RXP_CPU_MODE_LOCAL_RST (1L<<0) -#define BNX2_RXP_CPU_MODE_STEP_ENA (1L<<1) -#define BNX2_RXP_CPU_MODE_PAGE_0_DATA_ENA (1L<<2) -#define BNX2_RXP_CPU_MODE_PAGE_0_INST_ENA (1L<<3) -#define BNX2_RXP_CPU_MODE_MSG_BIT1 (1L<<6) -#define BNX2_RXP_CPU_MODE_INTERRUPT_ENA (1L<<7) -#define BNX2_RXP_CPU_MODE_SOFT_HALT (1L<<10) -#define BNX2_RXP_CPU_MODE_BAD_DATA_HALT_ENA (1L<<11) -#define BNX2_RXP_CPU_MODE_BAD_INST_HALT_ENA (1L<<12) -#define BNX2_RXP_CPU_MODE_FIO_ABORT_HALT_ENA (1L<<13) -#define BNX2_RXP_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA (1L<<15) - -#define BNX2_RXP_CPU_STATE 0x000c5004 -#define BNX2_RXP_CPU_STATE_BREAKPOINT (1L<<0) -#define BNX2_RXP_CPU_STATE_BAD_INST_HALTED (1L<<2) -#define BNX2_RXP_CPU_STATE_PAGE_0_DATA_HALTED (1L<<3) -#define BNX2_RXP_CPU_STATE_PAGE_0_INST_HALTED (1L<<4) -#define BNX2_RXP_CPU_STATE_BAD_DATA_ADDR_HALTED (1L<<5) -#define BNX2_RXP_CPU_STATE_BAD_PC_HALTED (1L<<6) -#define BNX2_RXP_CPU_STATE_ALIGN_HALTED (1L<<7) -#define BNX2_RXP_CPU_STATE_FIO_ABORT_HALTED (1L<<8) -#define BNX2_RXP_CPU_STATE_SOFT_HALTED (1L<<10) -#define BNX2_RXP_CPU_STATE_SPAD_UNDERFLOW (1L<<11) -#define BNX2_RXP_CPU_STATE_INTERRRUPT (1L<<12) -#define BNX2_RXP_CPU_STATE_DATA_ACCESS_STALL (1L<<14) -#define BNX2_RXP_CPU_STATE_INST_FETCH_STALL (1L<<15) -#define BNX2_RXP_CPU_STATE_BLOCKED_READ (1L<<31) - -#define BNX2_RXP_CPU_EVENT_MASK 0x000c5008 -#define BNX2_RXP_CPU_EVENT_MASK_BREAKPOINT_MASK (1L<<0) -#define BNX2_RXP_CPU_EVENT_MASK_BAD_INST_HALTED_MASK (1L<<2) -#define BNX2_RXP_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK (1L<<3) -#define BNX2_RXP_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK (1L<<4) -#define BNX2_RXP_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK (1L<<5) -#define BNX2_RXP_CPU_EVENT_MASK_BAD_PC_HALTED_MASK (1L<<6) -#define BNX2_RXP_CPU_EVENT_MASK_ALIGN_HALTED_MASK (1L<<7) -#define BNX2_RXP_CPU_EVENT_MASK_FIO_ABORT_MASK (1L<<8) -#define BNX2_RXP_CPU_EVENT_MASK_SOFT_HALTED_MASK (1L<<10) -#define BNX2_RXP_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK (1L<<11) -#define BNX2_RXP_CPU_EVENT_MASK_INTERRUPT_MASK (1L<<12) - -#define BNX2_RXP_CPU_PROGRAM_COUNTER 0x000c501c -#define BNX2_RXP_CPU_INSTRUCTION 0x000c5020 -#define BNX2_RXP_CPU_DATA_ACCESS 0x000c5024 -#define BNX2_RXP_CPU_INTERRUPT_ENABLE 0x000c5028 -#define BNX2_RXP_CPU_INTERRUPT_VECTOR 0x000c502c -#define BNX2_RXP_CPU_INTERRUPT_SAVED_PC 0x000c5030 -#define BNX2_RXP_CPU_HW_BREAKPOINT 0x000c5034 -#define BNX2_RXP_CPU_HW_BREAKPOINT_DISABLE (1L<<0) -#define BNX2_RXP_CPU_HW_BREAKPOINT_ADDRESS (0x3fffffffL<<2) - -#define BNX2_RXP_CPU_DEBUG_VECT_PEEK 0x000c5038 -#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0) -#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11) -#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_1_SEL (0xfL<<12) -#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16) -#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27) -#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_2_SEL (0xfL<<28) - -#define BNX2_RXP_CPU_LAST_BRANCH_ADDR 0x000c5048 -#define BNX2_RXP_CPU_LAST_BRANCH_ADDR_TYPE (1L<<1) -#define BNX2_RXP_CPU_LAST_BRANCH_ADDR_TYPE_JUMP (0L<<1) -#define BNX2_RXP_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH (1L<<1) -#define BNX2_RXP_CPU_LAST_BRANCH_ADDR_LBA (0x3fffffffL<<2) - -#define BNX2_RXP_CPU_REG_FILE 0x000c5200 -#define BNX2_RXP_PFE_PFE_CTL 0x000c537c -#define BNX2_RXP_PFE_PFE_CTL_INC_USAGE_CNT (1L<<0) -#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE (0xfL<<4) -#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_0 (0L<<4) -#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_1 (1L<<4) -#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_2 (2L<<4) -#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_3 (3L<<4) -#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_4 (4L<<4) -#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_5 (5L<<4) -#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_6 (6L<<4) -#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_7 (7L<<4) -#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_8 (8L<<4) -#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_9 (9L<<4) -#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_10 (10L<<4) -#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_11 (11L<<4) -#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_12 (12L<<4) -#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_13 (13L<<4) -#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_14 (14L<<4) -#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_15 (15L<<4) -#define BNX2_RXP_PFE_PFE_CTL_PFE_COUNT (0xfL<<12) -#define BNX2_RXP_PFE_PFE_CTL_OFFSET (0x1ffL<<16) - -#define BNX2_RXP_RXPCQ 0x000c5380 -#define BNX2_RXP_CFTQ_CMD 0x000c53b8 -#define BNX2_RXP_CFTQ_CMD_OFFSET (0x3ffL<<0) -#define BNX2_RXP_CFTQ_CMD_WR_TOP (1L<<10) -#define BNX2_RXP_CFTQ_CMD_WR_TOP_0 (0L<<10) -#define BNX2_RXP_CFTQ_CMD_WR_TOP_1 (1L<<10) -#define BNX2_RXP_CFTQ_CMD_SFT_RESET (1L<<25) -#define BNX2_RXP_CFTQ_CMD_RD_DATA (1L<<26) -#define BNX2_RXP_CFTQ_CMD_ADD_INTERVEN (1L<<27) -#define BNX2_RXP_CFTQ_CMD_ADD_DATA (1L<<28) -#define BNX2_RXP_CFTQ_CMD_INTERVENE_CLR (1L<<29) -#define BNX2_RXP_CFTQ_CMD_POP (1L<<30) -#define BNX2_RXP_CFTQ_CMD_BUSY (1L<<31) - -#define BNX2_RXP_CFTQ_CTL 0x000c53bc -#define BNX2_RXP_CFTQ_CTL_INTERVENE (1L<<0) -#define BNX2_RXP_CFTQ_CTL_OVERFLOW (1L<<1) -#define BNX2_RXP_CFTQ_CTL_FORCE_INTERVENE (1L<<2) -#define BNX2_RXP_CFTQ_CTL_MAX_DEPTH (0x3ffL<<12) -#define BNX2_RXP_CFTQ_CTL_CUR_DEPTH (0x3ffL<<22) - -#define BNX2_RXP_RXPQ 0x000c53c0 -#define BNX2_RXP_FTQ_CMD 0x000c53f8 -#define BNX2_RXP_FTQ_CMD_OFFSET (0x3ffL<<0) -#define BNX2_RXP_FTQ_CMD_WR_TOP (1L<<10) -#define BNX2_RXP_FTQ_CMD_WR_TOP_0 (0L<<10) -#define BNX2_RXP_FTQ_CMD_WR_TOP_1 (1L<<10) -#define BNX2_RXP_FTQ_CMD_SFT_RESET (1L<<25) -#define BNX2_RXP_FTQ_CMD_RD_DATA (1L<<26) -#define BNX2_RXP_FTQ_CMD_ADD_INTERVEN (1L<<27) -#define BNX2_RXP_FTQ_CMD_ADD_DATA (1L<<28) -#define BNX2_RXP_FTQ_CMD_INTERVENE_CLR (1L<<29) -#define BNX2_RXP_FTQ_CMD_POP (1L<<30) -#define BNX2_RXP_FTQ_CMD_BUSY (1L<<31) - -#define BNX2_RXP_FTQ_CTL 0x000c53fc -#define BNX2_RXP_FTQ_CTL_INTERVENE (1L<<0) -#define BNX2_RXP_FTQ_CTL_OVERFLOW (1L<<1) -#define BNX2_RXP_FTQ_CTL_FORCE_INTERVENE (1L<<2) -#define BNX2_RXP_FTQ_CTL_MAX_DEPTH (0x3ffL<<12) -#define BNX2_RXP_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) - -#define BNX2_RXP_SCRATCH 0x000e0000 -#define BNX2_RXP_SCRATCH_RXP_FLOOD 0x000e0024 -#define BNX2_RXP_SCRATCH_RSS_TBL_SZ 0x000e0038 -#define BNX2_RXP_SCRATCH_RSS_TBL 0x000e003c -#define BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES 128 - - -/* - * com_reg definition - * offset: 0x100000 - */ -#define BNX2_COM_CKSUM_ERROR_STATUS 0x00100000 -#define BNX2_COM_CKSUM_ERROR_STATUS_CALCULATED (0xffffL<<0) -#define BNX2_COM_CKSUM_ERROR_STATUS_EXPECTED (0xffffL<<16) - -#define BNX2_COM_CPU_MODE 0x00105000 -#define BNX2_COM_CPU_MODE_LOCAL_RST (1L<<0) -#define BNX2_COM_CPU_MODE_STEP_ENA (1L<<1) -#define BNX2_COM_CPU_MODE_PAGE_0_DATA_ENA (1L<<2) -#define BNX2_COM_CPU_MODE_PAGE_0_INST_ENA (1L<<3) -#define BNX2_COM_CPU_MODE_MSG_BIT1 (1L<<6) -#define BNX2_COM_CPU_MODE_INTERRUPT_ENA (1L<<7) -#define BNX2_COM_CPU_MODE_SOFT_HALT (1L<<10) -#define BNX2_COM_CPU_MODE_BAD_DATA_HALT_ENA (1L<<11) -#define BNX2_COM_CPU_MODE_BAD_INST_HALT_ENA (1L<<12) -#define BNX2_COM_CPU_MODE_FIO_ABORT_HALT_ENA (1L<<13) -#define BNX2_COM_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA (1L<<15) - -#define BNX2_COM_CPU_STATE 0x00105004 -#define BNX2_COM_CPU_STATE_BREAKPOINT (1L<<0) -#define BNX2_COM_CPU_STATE_BAD_INST_HALTED (1L<<2) -#define BNX2_COM_CPU_STATE_PAGE_0_DATA_HALTED (1L<<3) -#define BNX2_COM_CPU_STATE_PAGE_0_INST_HALTED (1L<<4) -#define BNX2_COM_CPU_STATE_BAD_DATA_ADDR_HALTED (1L<<5) -#define BNX2_COM_CPU_STATE_BAD_PC_HALTED (1L<<6) -#define BNX2_COM_CPU_STATE_ALIGN_HALTED (1L<<7) -#define BNX2_COM_CPU_STATE_FIO_ABORT_HALTED (1L<<8) -#define BNX2_COM_CPU_STATE_SOFT_HALTED (1L<<10) -#define BNX2_COM_CPU_STATE_SPAD_UNDERFLOW (1L<<11) -#define BNX2_COM_CPU_STATE_INTERRRUPT (1L<<12) -#define BNX2_COM_CPU_STATE_DATA_ACCESS_STALL (1L<<14) -#define BNX2_COM_CPU_STATE_INST_FETCH_STALL (1L<<15) -#define BNX2_COM_CPU_STATE_BLOCKED_READ (1L<<31) - -#define BNX2_COM_CPU_EVENT_MASK 0x00105008 -#define BNX2_COM_CPU_EVENT_MASK_BREAKPOINT_MASK (1L<<0) -#define BNX2_COM_CPU_EVENT_MASK_BAD_INST_HALTED_MASK (1L<<2) -#define BNX2_COM_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK (1L<<3) -#define BNX2_COM_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK (1L<<4) -#define BNX2_COM_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK (1L<<5) -#define BNX2_COM_CPU_EVENT_MASK_BAD_PC_HALTED_MASK (1L<<6) -#define BNX2_COM_CPU_EVENT_MASK_ALIGN_HALTED_MASK (1L<<7) -#define BNX2_COM_CPU_EVENT_MASK_FIO_ABORT_MASK (1L<<8) -#define BNX2_COM_CPU_EVENT_MASK_SOFT_HALTED_MASK (1L<<10) -#define BNX2_COM_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK (1L<<11) -#define BNX2_COM_CPU_EVENT_MASK_INTERRUPT_MASK (1L<<12) - -#define BNX2_COM_CPU_PROGRAM_COUNTER 0x0010501c -#define BNX2_COM_CPU_INSTRUCTION 0x00105020 -#define BNX2_COM_CPU_DATA_ACCESS 0x00105024 -#define BNX2_COM_CPU_INTERRUPT_ENABLE 0x00105028 -#define BNX2_COM_CPU_INTERRUPT_VECTOR 0x0010502c -#define BNX2_COM_CPU_INTERRUPT_SAVED_PC 0x00105030 -#define BNX2_COM_CPU_HW_BREAKPOINT 0x00105034 -#define BNX2_COM_CPU_HW_BREAKPOINT_DISABLE (1L<<0) -#define BNX2_COM_CPU_HW_BREAKPOINT_ADDRESS (0x3fffffffL<<2) - -#define BNX2_COM_CPU_DEBUG_VECT_PEEK 0x00105038 -#define BNX2_COM_CPU_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0) -#define BNX2_COM_CPU_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11) -#define BNX2_COM_CPU_DEBUG_VECT_PEEK_1_SEL (0xfL<<12) -#define BNX2_COM_CPU_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16) -#define BNX2_COM_CPU_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27) -#define BNX2_COM_CPU_DEBUG_VECT_PEEK_2_SEL (0xfL<<28) - -#define BNX2_COM_CPU_LAST_BRANCH_ADDR 0x00105048 -#define BNX2_COM_CPU_LAST_BRANCH_ADDR_TYPE (1L<<1) -#define BNX2_COM_CPU_LAST_BRANCH_ADDR_TYPE_JUMP (0L<<1) -#define BNX2_COM_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH (1L<<1) -#define BNX2_COM_CPU_LAST_BRANCH_ADDR_LBA (0x3fffffffL<<2) - -#define BNX2_COM_CPU_REG_FILE 0x00105200 -#define BNX2_COM_COMTQ_PFE_PFE_CTL 0x001052bc -#define BNX2_COM_COMTQ_PFE_PFE_CTL_INC_USAGE_CNT (1L<<0) -#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE (0xfL<<4) -#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_0 (0L<<4) -#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_1 (1L<<4) -#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_2 (2L<<4) -#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_3 (3L<<4) -#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_4 (4L<<4) -#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_5 (5L<<4) -#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_6 (6L<<4) -#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_7 (7L<<4) -#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_8 (8L<<4) -#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_9 (9L<<4) -#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_10 (10L<<4) -#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_11 (11L<<4) -#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_12 (12L<<4) -#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_13 (13L<<4) -#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_14 (14L<<4) -#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_15 (15L<<4) -#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_COUNT (0xfL<<12) -#define BNX2_COM_COMTQ_PFE_PFE_CTL_OFFSET (0x1ffL<<16) - -#define BNX2_COM_COMXQ 0x00105340 -#define BNX2_COM_COMXQ_FTQ_CMD 0x00105378 -#define BNX2_COM_COMXQ_FTQ_CMD_OFFSET (0x3ffL<<0) -#define BNX2_COM_COMXQ_FTQ_CMD_WR_TOP (1L<<10) -#define BNX2_COM_COMXQ_FTQ_CMD_WR_TOP_0 (0L<<10) -#define BNX2_COM_COMXQ_FTQ_CMD_WR_TOP_1 (1L<<10) -#define BNX2_COM_COMXQ_FTQ_CMD_SFT_RESET (1L<<25) -#define BNX2_COM_COMXQ_FTQ_CMD_RD_DATA (1L<<26) -#define BNX2_COM_COMXQ_FTQ_CMD_ADD_INTERVEN (1L<<27) -#define BNX2_COM_COMXQ_FTQ_CMD_ADD_DATA (1L<<28) -#define BNX2_COM_COMXQ_FTQ_CMD_INTERVENE_CLR (1L<<29) -#define BNX2_COM_COMXQ_FTQ_CMD_POP (1L<<30) -#define BNX2_COM_COMXQ_FTQ_CMD_BUSY (1L<<31) - -#define BNX2_COM_COMXQ_FTQ_CTL 0x0010537c -#define BNX2_COM_COMXQ_FTQ_CTL_INTERVENE (1L<<0) -#define BNX2_COM_COMXQ_FTQ_CTL_OVERFLOW (1L<<1) -#define BNX2_COM_COMXQ_FTQ_CTL_FORCE_INTERVENE (1L<<2) -#define BNX2_COM_COMXQ_FTQ_CTL_MAX_DEPTH (0x3ffL<<12) -#define BNX2_COM_COMXQ_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) - -#define BNX2_COM_COMTQ 0x00105380 -#define BNX2_COM_COMTQ_FTQ_CMD 0x001053b8 -#define BNX2_COM_COMTQ_FTQ_CMD_OFFSET (0x3ffL<<0) -#define BNX2_COM_COMTQ_FTQ_CMD_WR_TOP (1L<<10) -#define BNX2_COM_COMTQ_FTQ_CMD_WR_TOP_0 (0L<<10) -#define BNX2_COM_COMTQ_FTQ_CMD_WR_TOP_1 (1L<<10) -#define BNX2_COM_COMTQ_FTQ_CMD_SFT_RESET (1L<<25) -#define BNX2_COM_COMTQ_FTQ_CMD_RD_DATA (1L<<26) -#define BNX2_COM_COMTQ_FTQ_CMD_ADD_INTERVEN (1L<<27) -#define BNX2_COM_COMTQ_FTQ_CMD_ADD_DATA (1L<<28) -#define BNX2_COM_COMTQ_FTQ_CMD_INTERVENE_CLR (1L<<29) -#define BNX2_COM_COMTQ_FTQ_CMD_POP (1L<<30) -#define BNX2_COM_COMTQ_FTQ_CMD_BUSY (1L<<31) - -#define BNX2_COM_COMTQ_FTQ_CTL 0x001053bc -#define BNX2_COM_COMTQ_FTQ_CTL_INTERVENE (1L<<0) -#define BNX2_COM_COMTQ_FTQ_CTL_OVERFLOW (1L<<1) -#define BNX2_COM_COMTQ_FTQ_CTL_FORCE_INTERVENE (1L<<2) -#define BNX2_COM_COMTQ_FTQ_CTL_MAX_DEPTH (0x3ffL<<12) -#define BNX2_COM_COMTQ_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) - -#define BNX2_COM_COMQ 0x001053c0 -#define BNX2_COM_COMQ_FTQ_CMD 0x001053f8 -#define BNX2_COM_COMQ_FTQ_CMD_OFFSET (0x3ffL<<0) -#define BNX2_COM_COMQ_FTQ_CMD_WR_TOP (1L<<10) -#define BNX2_COM_COMQ_FTQ_CMD_WR_TOP_0 (0L<<10) -#define BNX2_COM_COMQ_FTQ_CMD_WR_TOP_1 (1L<<10) -#define BNX2_COM_COMQ_FTQ_CMD_SFT_RESET (1L<<25) -#define BNX2_COM_COMQ_FTQ_CMD_RD_DATA (1L<<26) -#define BNX2_COM_COMQ_FTQ_CMD_ADD_INTERVEN (1L<<27) -#define BNX2_COM_COMQ_FTQ_CMD_ADD_DATA (1L<<28) -#define BNX2_COM_COMQ_FTQ_CMD_INTERVENE_CLR (1L<<29) -#define BNX2_COM_COMQ_FTQ_CMD_POP (1L<<30) -#define BNX2_COM_COMQ_FTQ_CMD_BUSY (1L<<31) - -#define BNX2_COM_COMQ_FTQ_CTL 0x001053fc -#define BNX2_COM_COMQ_FTQ_CTL_INTERVENE (1L<<0) -#define BNX2_COM_COMQ_FTQ_CTL_OVERFLOW (1L<<1) -#define BNX2_COM_COMQ_FTQ_CTL_FORCE_INTERVENE (1L<<2) -#define BNX2_COM_COMQ_FTQ_CTL_MAX_DEPTH (0x3ffL<<12) -#define BNX2_COM_COMQ_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) - -#define BNX2_COM_SCRATCH 0x00120000 - -#define BNX2_FW_RX_LOW_LATENCY 0x00120058 -#define BNX2_FW_RX_DROP_COUNT 0x00120084 - - -/* - * cp_reg definition - * offset: 0x180000 - */ -#define BNX2_CP_CKSUM_ERROR_STATUS 0x00180000 -#define BNX2_CP_CKSUM_ERROR_STATUS_CALCULATED (0xffffL<<0) -#define BNX2_CP_CKSUM_ERROR_STATUS_EXPECTED (0xffffL<<16) - -#define BNX2_CP_CPU_MODE 0x00185000 -#define BNX2_CP_CPU_MODE_LOCAL_RST (1L<<0) -#define BNX2_CP_CPU_MODE_STEP_ENA (1L<<1) -#define BNX2_CP_CPU_MODE_PAGE_0_DATA_ENA (1L<<2) -#define BNX2_CP_CPU_MODE_PAGE_0_INST_ENA (1L<<3) -#define BNX2_CP_CPU_MODE_MSG_BIT1 (1L<<6) -#define BNX2_CP_CPU_MODE_INTERRUPT_ENA (1L<<7) -#define BNX2_CP_CPU_MODE_SOFT_HALT (1L<<10) -#define BNX2_CP_CPU_MODE_BAD_DATA_HALT_ENA (1L<<11) -#define BNX2_CP_CPU_MODE_BAD_INST_HALT_ENA (1L<<12) -#define BNX2_CP_CPU_MODE_FIO_ABORT_HALT_ENA (1L<<13) -#define BNX2_CP_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA (1L<<15) - -#define BNX2_CP_CPU_STATE 0x00185004 -#define BNX2_CP_CPU_STATE_BREAKPOINT (1L<<0) -#define BNX2_CP_CPU_STATE_BAD_INST_HALTED (1L<<2) -#define BNX2_CP_CPU_STATE_PAGE_0_DATA_HALTED (1L<<3) -#define BNX2_CP_CPU_STATE_PAGE_0_INST_HALTED (1L<<4) -#define BNX2_CP_CPU_STATE_BAD_DATA_ADDR_HALTED (1L<<5) -#define BNX2_CP_CPU_STATE_BAD_PC_HALTED (1L<<6) -#define BNX2_CP_CPU_STATE_ALIGN_HALTED (1L<<7) -#define BNX2_CP_CPU_STATE_FIO_ABORT_HALTED (1L<<8) -#define BNX2_CP_CPU_STATE_SOFT_HALTED (1L<<10) -#define BNX2_CP_CPU_STATE_SPAD_UNDERFLOW (1L<<11) -#define BNX2_CP_CPU_STATE_INTERRRUPT (1L<<12) -#define BNX2_CP_CPU_STATE_DATA_ACCESS_STALL (1L<<14) -#define BNX2_CP_CPU_STATE_INST_FETCH_STALL (1L<<15) -#define BNX2_CP_CPU_STATE_BLOCKED_READ (1L<<31) - -#define BNX2_CP_CPU_EVENT_MASK 0x00185008 -#define BNX2_CP_CPU_EVENT_MASK_BREAKPOINT_MASK (1L<<0) -#define BNX2_CP_CPU_EVENT_MASK_BAD_INST_HALTED_MASK (1L<<2) -#define BNX2_CP_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK (1L<<3) -#define BNX2_CP_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK (1L<<4) -#define BNX2_CP_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK (1L<<5) -#define BNX2_CP_CPU_EVENT_MASK_BAD_PC_HALTED_MASK (1L<<6) -#define BNX2_CP_CPU_EVENT_MASK_ALIGN_HALTED_MASK (1L<<7) -#define BNX2_CP_CPU_EVENT_MASK_FIO_ABORT_MASK (1L<<8) -#define BNX2_CP_CPU_EVENT_MASK_SOFT_HALTED_MASK (1L<<10) -#define BNX2_CP_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK (1L<<11) -#define BNX2_CP_CPU_EVENT_MASK_INTERRUPT_MASK (1L<<12) - -#define BNX2_CP_CPU_PROGRAM_COUNTER 0x0018501c -#define BNX2_CP_CPU_INSTRUCTION 0x00185020 -#define BNX2_CP_CPU_DATA_ACCESS 0x00185024 -#define BNX2_CP_CPU_INTERRUPT_ENABLE 0x00185028 -#define BNX2_CP_CPU_INTERRUPT_VECTOR 0x0018502c -#define BNX2_CP_CPU_INTERRUPT_SAVED_PC 0x00185030 -#define BNX2_CP_CPU_HW_BREAKPOINT 0x00185034 -#define BNX2_CP_CPU_HW_BREAKPOINT_DISABLE (1L<<0) -#define BNX2_CP_CPU_HW_BREAKPOINT_ADDRESS (0x3fffffffL<<2) - -#define BNX2_CP_CPU_DEBUG_VECT_PEEK 0x00185038 -#define BNX2_CP_CPU_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0) -#define BNX2_CP_CPU_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11) -#define BNX2_CP_CPU_DEBUG_VECT_PEEK_1_SEL (0xfL<<12) -#define BNX2_CP_CPU_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16) -#define BNX2_CP_CPU_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27) -#define BNX2_CP_CPU_DEBUG_VECT_PEEK_2_SEL (0xfL<<28) - -#define BNX2_CP_CPU_LAST_BRANCH_ADDR 0x00185048 -#define BNX2_CP_CPU_LAST_BRANCH_ADDR_TYPE (1L<<1) -#define BNX2_CP_CPU_LAST_BRANCH_ADDR_TYPE_JUMP (0L<<1) -#define BNX2_CP_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH (1L<<1) -#define BNX2_CP_CPU_LAST_BRANCH_ADDR_LBA (0x3fffffffL<<2) - -#define BNX2_CP_CPU_REG_FILE 0x00185200 -#define BNX2_CP_CPQ_PFE_PFE_CTL 0x001853bc -#define BNX2_CP_CPQ_PFE_PFE_CTL_INC_USAGE_CNT (1L<<0) -#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE (0xfL<<4) -#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_0 (0L<<4) -#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_1 (1L<<4) -#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_2 (2L<<4) -#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_3 (3L<<4) -#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_4 (4L<<4) -#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_5 (5L<<4) -#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_6 (6L<<4) -#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_7 (7L<<4) -#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_8 (8L<<4) -#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_9 (9L<<4) -#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_10 (10L<<4) -#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_11 (11L<<4) -#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_12 (12L<<4) -#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_13 (13L<<4) -#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_14 (14L<<4) -#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_15 (15L<<4) -#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_COUNT (0xfL<<12) -#define BNX2_CP_CPQ_PFE_PFE_CTL_OFFSET (0x1ffL<<16) - -#define BNX2_CP_CPQ 0x001853c0 -#define BNX2_CP_CPQ_FTQ_CMD 0x001853f8 -#define BNX2_CP_CPQ_FTQ_CMD_OFFSET (0x3ffL<<0) -#define BNX2_CP_CPQ_FTQ_CMD_WR_TOP (1L<<10) -#define BNX2_CP_CPQ_FTQ_CMD_WR_TOP_0 (0L<<10) -#define BNX2_CP_CPQ_FTQ_CMD_WR_TOP_1 (1L<<10) -#define BNX2_CP_CPQ_FTQ_CMD_SFT_RESET (1L<<25) -#define BNX2_CP_CPQ_FTQ_CMD_RD_DATA (1L<<26) -#define BNX2_CP_CPQ_FTQ_CMD_ADD_INTERVEN (1L<<27) -#define BNX2_CP_CPQ_FTQ_CMD_ADD_DATA (1L<<28) -#define BNX2_CP_CPQ_FTQ_CMD_INTERVENE_CLR (1L<<29) -#define BNX2_CP_CPQ_FTQ_CMD_POP (1L<<30) -#define BNX2_CP_CPQ_FTQ_CMD_BUSY (1L<<31) - -#define BNX2_CP_CPQ_FTQ_CTL 0x001853fc -#define BNX2_CP_CPQ_FTQ_CTL_INTERVENE (1L<<0) -#define BNX2_CP_CPQ_FTQ_CTL_OVERFLOW (1L<<1) -#define BNX2_CP_CPQ_FTQ_CTL_FORCE_INTERVENE (1L<<2) -#define BNX2_CP_CPQ_FTQ_CTL_MAX_DEPTH (0x3ffL<<12) -#define BNX2_CP_CPQ_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) - -#define BNX2_CP_SCRATCH 0x001a0000 - -#define BNX2_FW_MAX_ISCSI_CONN 0x001a0080 - - -/* - * mcp_reg definition - * offset: 0x140000 - */ -#define BNX2_MCP_MCP_CONTROL 0x00140080 -#define BNX2_MCP_MCP_CONTROL_SMBUS_SEL (1L<<30) -#define BNX2_MCP_MCP_CONTROL_MCP_ISOLATE (1L<<31) - -#define BNX2_MCP_MCP_ATTENTION_STATUS 0x00140084 -#define BNX2_MCP_MCP_ATTENTION_STATUS_DRV_DOORBELL (1L<<29) -#define BNX2_MCP_MCP_ATTENTION_STATUS_WATCHDOG_TIMEOUT (1L<<30) -#define BNX2_MCP_MCP_ATTENTION_STATUS_CPU_EVENT (1L<<31) - -#define BNX2_MCP_MCP_HEARTBEAT_CONTROL 0x00140088 -#define BNX2_MCP_MCP_HEARTBEAT_CONTROL_MCP_HEARTBEAT_ENABLE (1L<<31) - -#define BNX2_MCP_MCP_HEARTBEAT_STATUS 0x0014008c -#define BNX2_MCP_MCP_HEARTBEAT_STATUS_MCP_HEARTBEAT_PERIOD (0x7ffL<<0) -#define BNX2_MCP_MCP_HEARTBEAT_STATUS_VALID (1L<<31) - -#define BNX2_MCP_MCP_HEARTBEAT 0x00140090 -#define BNX2_MCP_MCP_HEARTBEAT_MCP_HEARTBEAT_COUNT (0x3fffffffL<<0) -#define BNX2_MCP_MCP_HEARTBEAT_MCP_HEARTBEAT_INC (1L<<30) -#define BNX2_MCP_MCP_HEARTBEAT_MCP_HEARTBEAT_RESET (1L<<31) - -#define BNX2_MCP_WATCHDOG_RESET 0x00140094 -#define BNX2_MCP_WATCHDOG_RESET_WATCHDOG_RESET (1L<<31) - -#define BNX2_MCP_WATCHDOG_CONTROL 0x00140098 -#define BNX2_MCP_WATCHDOG_CONTROL_WATCHDOG_TIMEOUT (0xfffffffL<<0) -#define BNX2_MCP_WATCHDOG_CONTROL_WATCHDOG_ATTN (1L<<29) -#define BNX2_MCP_WATCHDOG_CONTROL_MCP_RST_ENABLE (1L<<30) -#define BNX2_MCP_WATCHDOG_CONTROL_WATCHDOG_ENABLE (1L<<31) - -#define BNX2_MCP_ACCESS_LOCK 0x0014009c -#define BNX2_MCP_ACCESS_LOCK_LOCK (1L<<31) - -#define BNX2_MCP_TOE_ID 0x001400a0 -#define BNX2_MCP_TOE_ID_FUNCTION_ID (1L<<31) - -#define BNX2_MCP_MAILBOX_CFG 0x001400a4 -#define BNX2_MCP_MAILBOX_CFG_MAILBOX_OFFSET (0x3fffL<<0) -#define BNX2_MCP_MAILBOX_CFG_MAILBOX_SIZE (0xfffL<<20) - -#define BNX2_MCP_MAILBOX_CFG_OTHER_FUNC 0x001400a8 -#define BNX2_MCP_MAILBOX_CFG_OTHER_FUNC_MAILBOX_OFFSET (0x3fffL<<0) -#define BNX2_MCP_MAILBOX_CFG_OTHER_FUNC_MAILBOX_SIZE (0xfffL<<20) - -#define BNX2_MCP_MCP_DOORBELL 0x001400ac -#define BNX2_MCP_MCP_DOORBELL_MCP_DOORBELL (1L<<31) - -#define BNX2_MCP_DRIVER_DOORBELL 0x001400b0 -#define BNX2_MCP_DRIVER_DOORBELL_DRIVER_DOORBELL (1L<<31) - -#define BNX2_MCP_DRIVER_DOORBELL_OTHER_FUNC 0x001400b4 -#define BNX2_MCP_DRIVER_DOORBELL_OTHER_FUNC_DRIVER_DOORBELL (1L<<31) - -#define BNX2_MCP_CPU_MODE 0x00145000 -#define BNX2_MCP_CPU_MODE_LOCAL_RST (1L<<0) -#define BNX2_MCP_CPU_MODE_STEP_ENA (1L<<1) -#define BNX2_MCP_CPU_MODE_PAGE_0_DATA_ENA (1L<<2) -#define BNX2_MCP_CPU_MODE_PAGE_0_INST_ENA (1L<<3) -#define BNX2_MCP_CPU_MODE_MSG_BIT1 (1L<<6) -#define BNX2_MCP_CPU_MODE_INTERRUPT_ENA (1L<<7) -#define BNX2_MCP_CPU_MODE_SOFT_HALT (1L<<10) -#define BNX2_MCP_CPU_MODE_BAD_DATA_HALT_ENA (1L<<11) -#define BNX2_MCP_CPU_MODE_BAD_INST_HALT_ENA (1L<<12) -#define BNX2_MCP_CPU_MODE_FIO_ABORT_HALT_ENA (1L<<13) -#define BNX2_MCP_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA (1L<<15) - -#define BNX2_MCP_CPU_STATE 0x00145004 -#define BNX2_MCP_CPU_STATE_BREAKPOINT (1L<<0) -#define BNX2_MCP_CPU_STATE_BAD_INST_HALTED (1L<<2) -#define BNX2_MCP_CPU_STATE_PAGE_0_DATA_HALTED (1L<<3) -#define BNX2_MCP_CPU_STATE_PAGE_0_INST_HALTED (1L<<4) -#define BNX2_MCP_CPU_STATE_BAD_DATA_ADDR_HALTED (1L<<5) -#define BNX2_MCP_CPU_STATE_BAD_PC_HALTED (1L<<6) -#define BNX2_MCP_CPU_STATE_ALIGN_HALTED (1L<<7) -#define BNX2_MCP_CPU_STATE_FIO_ABORT_HALTED (1L<<8) -#define BNX2_MCP_CPU_STATE_SOFT_HALTED (1L<<10) -#define BNX2_MCP_CPU_STATE_SPAD_UNDERFLOW (1L<<11) -#define BNX2_MCP_CPU_STATE_INTERRRUPT (1L<<12) -#define BNX2_MCP_CPU_STATE_DATA_ACCESS_STALL (1L<<14) -#define BNX2_MCP_CPU_STATE_INST_FETCH_STALL (1L<<15) -#define BNX2_MCP_CPU_STATE_BLOCKED_READ (1L<<31) - -#define BNX2_MCP_CPU_EVENT_MASK 0x00145008 -#define BNX2_MCP_CPU_EVENT_MASK_BREAKPOINT_MASK (1L<<0) -#define BNX2_MCP_CPU_EVENT_MASK_BAD_INST_HALTED_MASK (1L<<2) -#define BNX2_MCP_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK (1L<<3) -#define BNX2_MCP_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK (1L<<4) -#define BNX2_MCP_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK (1L<<5) -#define BNX2_MCP_CPU_EVENT_MASK_BAD_PC_HALTED_MASK (1L<<6) -#define BNX2_MCP_CPU_EVENT_MASK_ALIGN_HALTED_MASK (1L<<7) -#define BNX2_MCP_CPU_EVENT_MASK_FIO_ABORT_MASK (1L<<8) -#define BNX2_MCP_CPU_EVENT_MASK_SOFT_HALTED_MASK (1L<<10) -#define BNX2_MCP_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK (1L<<11) -#define BNX2_MCP_CPU_EVENT_MASK_INTERRUPT_MASK (1L<<12) - -#define BNX2_MCP_CPU_PROGRAM_COUNTER 0x0014501c -#define BNX2_MCP_CPU_INSTRUCTION 0x00145020 -#define BNX2_MCP_CPU_DATA_ACCESS 0x00145024 -#define BNX2_MCP_CPU_INTERRUPT_ENABLE 0x00145028 -#define BNX2_MCP_CPU_INTERRUPT_VECTOR 0x0014502c -#define BNX2_MCP_CPU_INTERRUPT_SAVED_PC 0x00145030 -#define BNX2_MCP_CPU_HW_BREAKPOINT 0x00145034 -#define BNX2_MCP_CPU_HW_BREAKPOINT_DISABLE (1L<<0) -#define BNX2_MCP_CPU_HW_BREAKPOINT_ADDRESS (0x3fffffffL<<2) - -#define BNX2_MCP_CPU_DEBUG_VECT_PEEK 0x00145038 -#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0) -#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11) -#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_1_SEL (0xfL<<12) -#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16) -#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27) -#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_2_SEL (0xfL<<28) - -#define BNX2_MCP_CPU_LAST_BRANCH_ADDR 0x00145048 -#define BNX2_MCP_CPU_LAST_BRANCH_ADDR_TYPE (1L<<1) -#define BNX2_MCP_CPU_LAST_BRANCH_ADDR_TYPE_JUMP (0L<<1) -#define BNX2_MCP_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH (1L<<1) -#define BNX2_MCP_CPU_LAST_BRANCH_ADDR_LBA (0x3fffffffL<<2) - -#define BNX2_MCP_CPU_REG_FILE 0x00145200 -#define BNX2_MCP_MCPQ 0x001453c0 -#define BNX2_MCP_MCPQ_FTQ_CMD 0x001453f8 -#define BNX2_MCP_MCPQ_FTQ_CMD_OFFSET (0x3ffL<<0) -#define BNX2_MCP_MCPQ_FTQ_CMD_WR_TOP (1L<<10) -#define BNX2_MCP_MCPQ_FTQ_CMD_WR_TOP_0 (0L<<10) -#define BNX2_MCP_MCPQ_FTQ_CMD_WR_TOP_1 (1L<<10) -#define BNX2_MCP_MCPQ_FTQ_CMD_SFT_RESET (1L<<25) -#define BNX2_MCP_MCPQ_FTQ_CMD_RD_DATA (1L<<26) -#define BNX2_MCP_MCPQ_FTQ_CMD_ADD_INTERVEN (1L<<27) -#define BNX2_MCP_MCPQ_FTQ_CMD_ADD_DATA (1L<<28) -#define BNX2_MCP_MCPQ_FTQ_CMD_INTERVENE_CLR (1L<<29) -#define BNX2_MCP_MCPQ_FTQ_CMD_POP (1L<<30) -#define BNX2_MCP_MCPQ_FTQ_CMD_BUSY (1L<<31) - -#define BNX2_MCP_MCPQ_FTQ_CTL 0x001453fc -#define BNX2_MCP_MCPQ_FTQ_CTL_INTERVENE (1L<<0) -#define BNX2_MCP_MCPQ_FTQ_CTL_OVERFLOW (1L<<1) -#define BNX2_MCP_MCPQ_FTQ_CTL_FORCE_INTERVENE (1L<<2) -#define BNX2_MCP_MCPQ_FTQ_CTL_MAX_DEPTH (0x3ffL<<12) -#define BNX2_MCP_MCPQ_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) - -#define BNX2_MCP_ROM 0x00150000 -#define BNX2_MCP_SCRATCH 0x00160000 -#define BNX2_MCP_STATE_P1 0x0016f9c8 -#define BNX2_MCP_STATE_P0 0x0016fdc8 -#define BNX2_MCP_STATE_P1_5708 0x001699c8 -#define BNX2_MCP_STATE_P0_5708 0x00169dc8 - -#define BNX2_SHM_HDR_SIGNATURE BNX2_MCP_SCRATCH -#define BNX2_SHM_HDR_SIGNATURE_SIG_MASK 0xffff0000 -#define BNX2_SHM_HDR_SIGNATURE_SIG 0x53530000 -#define BNX2_SHM_HDR_SIGNATURE_VER_MASK 0x000000ff -#define BNX2_SHM_HDR_SIGNATURE_VER_ONE 0x00000001 - -#define BNX2_SHM_HDR_ADDR_0 BNX2_MCP_SCRATCH + 4 -#define BNX2_SHM_HDR_ADDR_1 BNX2_MCP_SCRATCH + 8 - - -#define NUM_MC_HASH_REGISTERS 8 - - -/* PHY_ID1: bits 31-16; PHY_ID2: bits 15-0. */ -#define PHY_BCM5706_PHY_ID 0x00206160 - -#define PHY_ID(id) ((id) & 0xfffffff0) -#define PHY_REV_ID(id) ((id) & 0xf) - -/* 5708 Serdes PHY registers */ - -#define BCM5708S_BMCR_FORCE_2500 0x20 - -#define BCM5708S_UP1 0xb - -#define BCM5708S_UP1_2G5 0x1 - -#define BCM5708S_BLK_ADDR 0x1f - -#define BCM5708S_BLK_ADDR_DIG 0x0000 -#define BCM5708S_BLK_ADDR_DIG3 0x0002 -#define BCM5708S_BLK_ADDR_TX_MISC 0x0005 - -/* Digital Block */ -#define BCM5708S_1000X_CTL1 0x10 - -#define BCM5708S_1000X_CTL1_FIBER_MODE 0x0001 -#define BCM5708S_1000X_CTL1_AUTODET_EN 0x0010 - -#define BCM5708S_1000X_CTL2 0x11 - -#define BCM5708S_1000X_CTL2_PLLEL_DET_EN 0x0001 - -#define BCM5708S_1000X_STAT1 0x14 - -#define BCM5708S_1000X_STAT1_SGMII 0x0001 -#define BCM5708S_1000X_STAT1_LINK 0x0002 -#define BCM5708S_1000X_STAT1_FD 0x0004 -#define BCM5708S_1000X_STAT1_SPEED_MASK 0x0018 -#define BCM5708S_1000X_STAT1_SPEED_10 0x0000 -#define BCM5708S_1000X_STAT1_SPEED_100 0x0008 -#define BCM5708S_1000X_STAT1_SPEED_1G 0x0010 -#define BCM5708S_1000X_STAT1_SPEED_2G5 0x0018 -#define BCM5708S_1000X_STAT1_TX_PAUSE 0x0020 -#define BCM5708S_1000X_STAT1_RX_PAUSE 0x0040 - -/* Digital3 Block */ -#define BCM5708S_DIG_3_0 0x10 - -#define BCM5708S_DIG_3_0_USE_IEEE 0x0001 - -/* Tx/Misc Block */ -#define BCM5708S_TX_ACTL1 0x15 - -#define BCM5708S_TX_ACTL1_DRIVER_VCM 0x30 - -#define BCM5708S_TX_ACTL3 0x17 - -#define MII_BNX2_DSP_RW_PORT 0x15 -#define MII_BNX2_DSP_ADDRESS 0x17 -#define MII_BNX2_DSP_EXPAND_REG 0x0f00 -#define MII_EXPAND_REG1 (MII_BNX2_DSP_EXPAND_REG | 1) -#define MII_EXPAND_REG1_RUDI_C 0x20 -#define MII_EXPAND_SERDES_CTL (MII_BNX2_DSP_EXPAND_REG | 3) - -#define MII_BNX2_MISC_SHADOW 0x1c -#define MISC_SHDW_AN_DBG 0x6800 -#define MISC_SHDW_AN_DBG_NOSYNC 0x0002 -#define MISC_SHDW_AN_DBG_RUDI_INVALID 0x0100 -#define MISC_SHDW_MODE_CTL 0x7c00 -#define MISC_SHDW_MODE_CTL_SIG_DET 0x0010 - -#define MII_BNX2_BLK_ADDR 0x1f -#define MII_BNX2_BLK_ADDR_IEEE0 0x0000 -#define MII_BNX2_BLK_ADDR_GP_STATUS 0x8120 -#define MII_BNX2_GP_TOP_AN_STATUS1 0x1b -#define MII_BNX2_GP_TOP_AN_SPEED_MSK 0x3f00 -#define MII_BNX2_GP_TOP_AN_SPEED_10 0x0000 -#define MII_BNX2_GP_TOP_AN_SPEED_100 0x0100 -#define MII_BNX2_GP_TOP_AN_SPEED_1G 0x0200 -#define MII_BNX2_GP_TOP_AN_SPEED_2_5G 0x0300 -#define MII_BNX2_GP_TOP_AN_SPEED_1GKV 0x0d00 -#define MII_BNX2_GP_TOP_AN_FD 0x8 -#define MII_BNX2_BLK_ADDR_SERDES_DIG 0x8300 -#define MII_BNX2_SERDES_DIG_1000XCTL1 0x10 -#define MII_BNX2_SD_1000XCTL1_FIBER 0x01 -#define MII_BNX2_SD_1000XCTL1_AUTODET 0x10 -#define MII_BNX2_SERDES_DIG_MISC1 0x18 -#define MII_BNX2_SD_MISC1_FORCE_MSK 0xf -#define MII_BNX2_SD_MISC1_FORCE_2_5G 0x0 -#define MII_BNX2_SD_MISC1_FORCE 0x10 -#define MII_BNX2_BLK_ADDR_OVER1G 0x8320 -#define MII_BNX2_OVER1G_UP1 0x19 -#define MII_BNX2_BLK_ADDR_BAM_NXTPG 0x8350 -#define MII_BNX2_BAM_NXTPG_CTL 0x10 -#define MII_BNX2_NXTPG_CTL_BAM 0x1 -#define MII_BNX2_NXTPG_CTL_T2 0x2 -#define MII_BNX2_BLK_ADDR_CL73_USERB0 0x8370 -#define MII_BNX2_CL73_BAM_CTL1 0x12 -#define MII_BNX2_CL73_BAM_EN 0x8000 -#define MII_BNX2_CL73_BAM_STA_MGR_EN 0x4000 -#define MII_BNX2_CL73_BAM_NP_AFT_BP_EN 0x2000 -#define MII_BNX2_BLK_ADDR_AER 0xffd0 -#define MII_BNX2_AER_AER 0x1e -#define MII_BNX2_AER_AER_AN_MMD 0x3800 -#define MII_BNX2_BLK_ADDR_COMBO_IEEEB0 0xffe0 - -#define MIN_ETHERNET_PACKET_SIZE 60 -#define MAX_ETHERNET_PACKET_SIZE 1514 -#define MAX_ETHERNET_JUMBO_PACKET_SIZE 9014 - -#define BNX2_RX_COPY_THRESH 128 - -#define BNX2_MISC_ENABLE_DEFAULT 0x17ffffff - -#define BNX2_START_UNICAST_ADDRESS_INDEX 4 -#define BNX2_END_UNICAST_ADDRESS_INDEX 7 -#define BNX2_MAX_UNICAST_ADDRESSES (BNX2_END_UNICAST_ADDRESS_INDEX - \ - BNX2_START_UNICAST_ADDRESS_INDEX + 1) - -#define DMA_READ_CHANS 5 -#define DMA_WRITE_CHANS 3 - -/* Use CPU native page size up to 16K for the ring sizes. */ -#if (PAGE_SHIFT > 14) -#define BCM_PAGE_BITS 14 -#else -#define BCM_PAGE_BITS PAGE_SHIFT -#endif -#define BCM_PAGE_SIZE (1 << BCM_PAGE_BITS) - -#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct tx_bd)) -#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) - -#define MAX_RX_RINGS 8 -#define MAX_RX_PG_RINGS 32 -#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct rx_bd)) -#define MAX_RX_DESC_CNT (RX_DESC_CNT - 1) -#define MAX_TOTAL_RX_DESC_CNT (MAX_RX_DESC_CNT * MAX_RX_RINGS) -#define MAX_TOTAL_RX_PG_DESC_CNT (MAX_RX_DESC_CNT * MAX_RX_PG_RINGS) - -#define NEXT_TX_BD(x) (((x) & (MAX_TX_DESC_CNT - 1)) == \ - (MAX_TX_DESC_CNT - 1)) ? \ - (x) + 2 : (x) + 1 - -#define TX_RING_IDX(x) ((x) & MAX_TX_DESC_CNT) - -#define NEXT_RX_BD(x) (((x) & (MAX_RX_DESC_CNT - 1)) == \ - (MAX_RX_DESC_CNT - 1)) ? \ - (x) + 2 : (x) + 1 - -#define RX_RING_IDX(x) ((x) & bp->rx_max_ring_idx) -#define RX_PG_RING_IDX(x) ((x) & bp->rx_max_pg_ring_idx) - -#define RX_RING(x) (((x) & ~MAX_RX_DESC_CNT) >> (BCM_PAGE_BITS - 4)) -#define RX_IDX(x) ((x) & MAX_RX_DESC_CNT) - -/* Context size. */ -#define CTX_SHIFT 7 -#define CTX_SIZE (1 << CTX_SHIFT) -#define CTX_MASK (CTX_SIZE - 1) -#define GET_CID_ADDR(_cid) ((_cid) << CTX_SHIFT) -#define GET_CID(_cid_addr) ((_cid_addr) >> CTX_SHIFT) - -#define PHY_CTX_SHIFT 6 -#define PHY_CTX_SIZE (1 << PHY_CTX_SHIFT) -#define PHY_CTX_MASK (PHY_CTX_SIZE - 1) -#define GET_PCID_ADDR(_pcid) ((_pcid) << PHY_CTX_SHIFT) -#define GET_PCID(_pcid_addr) ((_pcid_addr) >> PHY_CTX_SHIFT) - -#define MB_KERNEL_CTX_SHIFT 8 -#define MB_KERNEL_CTX_SIZE (1 << MB_KERNEL_CTX_SHIFT) -#define MB_KERNEL_CTX_MASK (MB_KERNEL_CTX_SIZE - 1) -#define MB_GET_CID_ADDR(_cid) (0x10000 + ((_cid) << MB_KERNEL_CTX_SHIFT)) - -#define MAX_CID_CNT 0x4000 -#define MAX_CID_ADDR (GET_CID_ADDR(MAX_CID_CNT)) -#define INVALID_CID_ADDR 0xffffffff - -#define TX_CID 16 -#define TX_TSS_CID 32 -#define RX_CID 0 -#define RX_RSS_CID 4 -#define RX_MAX_RSS_RINGS 7 -#define RX_MAX_RINGS (RX_MAX_RSS_RINGS + 1) -#define TX_MAX_TSS_RINGS 7 -#define TX_MAX_RINGS (TX_MAX_TSS_RINGS + 1) - -#define MB_TX_CID_ADDR MB_GET_CID_ADDR(TX_CID) -#define MB_RX_CID_ADDR MB_GET_CID_ADDR(RX_CID) - -struct sw_bd { - struct sk_buff *skb; - struct l2_fhdr *desc; - DEFINE_DMA_UNMAP_ADDR(mapping); -}; - -struct sw_pg { - struct page *page; - DEFINE_DMA_UNMAP_ADDR(mapping); -}; - -struct sw_tx_bd { - struct sk_buff *skb; - DEFINE_DMA_UNMAP_ADDR(mapping); - unsigned short is_gso; - unsigned short nr_frags; -}; - -#define SW_RXBD_RING_SIZE (sizeof(struct sw_bd) * RX_DESC_CNT) -#define SW_RXPG_RING_SIZE (sizeof(struct sw_pg) * RX_DESC_CNT) -#define RXBD_RING_SIZE (sizeof(struct rx_bd) * RX_DESC_CNT) -#define SW_TXBD_RING_SIZE (sizeof(struct sw_tx_bd) * TX_DESC_CNT) -#define TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT) - -/* Buffered flash (Atmel: AT45DB011B) specific information */ -#define SEEPROM_PAGE_BITS 2 -#define SEEPROM_PHY_PAGE_SIZE (1 << SEEPROM_PAGE_BITS) -#define SEEPROM_BYTE_ADDR_MASK (SEEPROM_PHY_PAGE_SIZE-1) -#define SEEPROM_PAGE_SIZE 4 -#define SEEPROM_TOTAL_SIZE 65536 - -#define BUFFERED_FLASH_PAGE_BITS 9 -#define BUFFERED_FLASH_PHY_PAGE_SIZE (1 << BUFFERED_FLASH_PAGE_BITS) -#define BUFFERED_FLASH_BYTE_ADDR_MASK (BUFFERED_FLASH_PHY_PAGE_SIZE-1) -#define BUFFERED_FLASH_PAGE_SIZE 264 -#define BUFFERED_FLASH_TOTAL_SIZE 0x21000 - -#define SAIFUN_FLASH_PAGE_BITS 8 -#define SAIFUN_FLASH_PHY_PAGE_SIZE (1 << SAIFUN_FLASH_PAGE_BITS) -#define SAIFUN_FLASH_BYTE_ADDR_MASK (SAIFUN_FLASH_PHY_PAGE_SIZE-1) -#define SAIFUN_FLASH_PAGE_SIZE 256 -#define SAIFUN_FLASH_BASE_TOTAL_SIZE 65536 - -#define ST_MICRO_FLASH_PAGE_BITS 8 -#define ST_MICRO_FLASH_PHY_PAGE_SIZE (1 << ST_MICRO_FLASH_PAGE_BITS) -#define ST_MICRO_FLASH_BYTE_ADDR_MASK (ST_MICRO_FLASH_PHY_PAGE_SIZE-1) -#define ST_MICRO_FLASH_PAGE_SIZE 256 -#define ST_MICRO_FLASH_BASE_TOTAL_SIZE 65536 - -#define BCM5709_FLASH_PAGE_BITS 8 -#define BCM5709_FLASH_PHY_PAGE_SIZE (1 << BCM5709_FLASH_PAGE_BITS) -#define BCM5709_FLASH_BYTE_ADDR_MASK (BCM5709_FLASH_PHY_PAGE_SIZE-1) -#define BCM5709_FLASH_PAGE_SIZE 256 - -#define NVRAM_TIMEOUT_COUNT 30000 - - -#define FLASH_STRAP_MASK (BNX2_NVM_CFG1_FLASH_MODE | \ - BNX2_NVM_CFG1_BUFFER_MODE | \ - BNX2_NVM_CFG1_PROTECT_MODE | \ - BNX2_NVM_CFG1_FLASH_SIZE) - -#define FLASH_BACKUP_STRAP_MASK (0xf << 26) - -struct flash_spec { - u32 strapping; - u32 config1; - u32 config2; - u32 config3; - u32 write1; - u32 flags; -#define BNX2_NV_BUFFERED 0x00000001 -#define BNX2_NV_TRANSLATE 0x00000002 -#define BNX2_NV_WREN 0x00000004 - u32 page_bits; - u32 page_size; - u32 addr_mask; - u32 total_size; - u8 *name; -}; - -#define BNX2_MAX_MSIX_HW_VEC 9 -#define BNX2_MAX_MSIX_VEC 9 -#ifdef BCM_CNIC -#define BNX2_MIN_MSIX_VEC 2 -#else -#define BNX2_MIN_MSIX_VEC 1 -#endif - - -struct bnx2_irq { - irq_handler_t handler; - unsigned int vector; - u8 requested; - char name[IFNAMSIZ + 2]; -}; - -struct bnx2_tx_ring_info { - u32 tx_prod_bseq; - u16 tx_prod; - u32 tx_bidx_addr; - u32 tx_bseq_addr; - - struct tx_bd *tx_desc_ring; - struct sw_tx_bd *tx_buf_ring; - - u16 tx_cons; - u16 hw_tx_cons; - - dma_addr_t tx_desc_mapping; -}; - -struct bnx2_rx_ring_info { - u32 rx_prod_bseq; - u16 rx_prod; - u16 rx_cons; - - u32 rx_bidx_addr; - u32 rx_bseq_addr; - u32 rx_pg_bidx_addr; - - u16 rx_pg_prod; - u16 rx_pg_cons; - - struct sw_bd *rx_buf_ring; - struct rx_bd *rx_desc_ring[MAX_RX_RINGS]; - struct sw_pg *rx_pg_ring; - struct rx_bd *rx_pg_desc_ring[MAX_RX_PG_RINGS]; - - dma_addr_t rx_desc_mapping[MAX_RX_RINGS]; - dma_addr_t rx_pg_desc_mapping[MAX_RX_PG_RINGS]; -}; - -struct bnx2_napi { - struct napi_struct napi ____cacheline_aligned; - struct bnx2 *bp; - union { - struct status_block *msi; - struct status_block_msix *msix; - } status_blk; - u16 *hw_tx_cons_ptr; - u16 *hw_rx_cons_ptr; - u32 last_status_idx; - u32 int_num; - -#ifdef BCM_CNIC - u32 cnic_tag; - int cnic_present; -#endif - - struct bnx2_rx_ring_info rx_ring; - struct bnx2_tx_ring_info tx_ring; -}; - -struct bnx2 { - /* Fields used in the tx and intr/napi performance paths are grouped */ - /* together in the beginning of the structure. */ - void __iomem *regview; - - struct net_device *dev; - struct pci_dev *pdev; - - atomic_t intr_sem; - - u32 flags; -#define BNX2_FLAG_PCIX 0x00000001 -#define BNX2_FLAG_PCI_32BIT 0x00000002 -#define BNX2_FLAG_MSIX_CAP 0x00000004 -#define BNX2_FLAG_NO_WOL 0x00000008 -#define BNX2_FLAG_USING_MSI 0x00000020 -#define BNX2_FLAG_ASF_ENABLE 0x00000040 -#define BNX2_FLAG_MSI_CAP 0x00000080 -#define BNX2_FLAG_ONE_SHOT_MSI 0x00000100 -#define BNX2_FLAG_PCIE 0x00000200 -#define BNX2_FLAG_USING_MSIX 0x00000400 -#define BNX2_FLAG_USING_MSI_OR_MSIX (BNX2_FLAG_USING_MSI | \ - BNX2_FLAG_USING_MSIX) -#define BNX2_FLAG_JUMBO_BROKEN 0x00000800 -#define BNX2_FLAG_CAN_KEEP_VLAN 0x00001000 -#define BNX2_FLAG_BROKEN_STATS 0x00002000 -#define BNX2_FLAG_AER_ENABLED 0x00004000 - - struct bnx2_napi bnx2_napi[BNX2_MAX_MSIX_VEC]; - - u32 rx_buf_use_size; /* useable size */ - u32 rx_buf_size; /* with alignment */ - u32 rx_copy_thresh; - u32 rx_jumbo_thresh; - u32 rx_max_ring_idx; - u32 rx_max_pg_ring_idx; - - /* TX constants */ - int tx_ring_size; - u32 tx_wake_thresh; - -#ifdef BCM_CNIC - struct cnic_ops __rcu *cnic_ops; - void *cnic_data; -#endif - - /* End of fields used in the performance code paths. */ - - unsigned int current_interval; -#define BNX2_TIMER_INTERVAL HZ -#define BNX2_SERDES_AN_TIMEOUT (HZ / 3) -#define BNX2_SERDES_FORCED_TIMEOUT (HZ / 10) - - struct timer_list timer; - struct work_struct reset_task; - - /* Used to synchronize phy accesses. */ - spinlock_t phy_lock; - spinlock_t indirect_lock; - - u32 phy_flags; -#define BNX2_PHY_FLAG_SERDES 0x00000001 -#define BNX2_PHY_FLAG_CRC_FIX 0x00000002 -#define BNX2_PHY_FLAG_PARALLEL_DETECT 0x00000004 -#define BNX2_PHY_FLAG_2_5G_CAPABLE 0x00000008 -#define BNX2_PHY_FLAG_INT_MODE_MASK 0x00000300 -#define BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING 0x00000100 -#define BNX2_PHY_FLAG_INT_MODE_LINK_READY 0x00000200 -#define BNX2_PHY_FLAG_DIS_EARLY_DAC 0x00000400 -#define BNX2_PHY_FLAG_REMOTE_PHY_CAP 0x00000800 -#define BNX2_PHY_FLAG_FORCED_DOWN 0x00001000 -#define BNX2_PHY_FLAG_NO_PARALLEL 0x00002000 - - u32 mii_bmcr; - u32 mii_bmsr; - u32 mii_bmsr1; - u32 mii_adv; - u32 mii_lpa; - u32 mii_up1; - - u32 chip_id; - /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ -#define CHIP_NUM(bp) (((bp)->chip_id) & 0xffff0000) -#define CHIP_NUM_5706 0x57060000 -#define CHIP_NUM_5708 0x57080000 -#define CHIP_NUM_5709 0x57090000 - -#define CHIP_REV(bp) (((bp)->chip_id) & 0x0000f000) -#define CHIP_REV_Ax 0x00000000 -#define CHIP_REV_Bx 0x00001000 -#define CHIP_REV_Cx 0x00002000 - -#define CHIP_METAL(bp) (((bp)->chip_id) & 0x00000ff0) -#define CHIP_BONDING(bp) (((bp)->chip_id) & 0x0000000f) - -#define CHIP_ID(bp) (((bp)->chip_id) & 0xfffffff0) -#define CHIP_ID_5706_A0 0x57060000 -#define CHIP_ID_5706_A1 0x57060010 -#define CHIP_ID_5706_A2 0x57060020 -#define CHIP_ID_5708_A0 0x57080000 -#define CHIP_ID_5708_B0 0x57081000 -#define CHIP_ID_5708_B1 0x57081010 -#define CHIP_ID_5709_A0 0x57090000 -#define CHIP_ID_5709_A1 0x57090010 - -#define CHIP_BOND_ID(bp) (((bp)->chip_id) & 0xf) - -/* A serdes chip will have the first bit of the bond id set. */ -#define CHIP_BOND_ID_SERDES_BIT 0x01 - - u32 phy_addr; - u32 phy_id; - - u16 bus_speed_mhz; - u8 wol; - - u8 pad; - - u16 fw_wr_seq; - u16 fw_drv_pulse_wr_seq; - - int rx_max_ring; - int rx_ring_size; - - int rx_max_pg_ring; - int rx_pg_ring_size; - - u16 tx_quick_cons_trip; - u16 tx_quick_cons_trip_int; - u16 rx_quick_cons_trip; - u16 rx_quick_cons_trip_int; - u16 comp_prod_trip; - u16 comp_prod_trip_int; - u16 tx_ticks; - u16 tx_ticks_int; - u16 com_ticks; - u16 com_ticks_int; - u16 cmd_ticks; - u16 cmd_ticks_int; - u16 rx_ticks; - u16 rx_ticks_int; - - u32 stats_ticks; - - dma_addr_t status_blk_mapping; - - struct statistics_block *stats_blk; - struct statistics_block *temp_stats_blk; - dma_addr_t stats_blk_mapping; - - int ctx_pages; - void *ctx_blk[4]; - dma_addr_t ctx_blk_mapping[4]; - - u32 hc_cmd; - u32 rx_mode; - - u16 req_line_speed; - u8 req_duplex; - - u8 phy_port; - u8 link_up; - - u16 line_speed; - u8 duplex; - u8 flow_ctrl; /* actual flow ctrl settings */ - /* may be different from */ - /* req_flow_ctrl if autoneg */ - u32 advertising; - - u8 req_flow_ctrl; /* flow ctrl advertisement */ - /* settings or forced */ - /* settings */ - u8 autoneg; -#define AUTONEG_SPEED 1 -#define AUTONEG_FLOW_CTRL 2 - - u8 loopback; -#define MAC_LOOPBACK 1 -#define PHY_LOOPBACK 2 - - u8 serdes_an_pending; - - u8 mac_addr[8]; - - u32 shmem_base; - - char fw_version[32]; - - int pm_cap; - int pcix_cap; - - const struct flash_spec *flash_info; - u32 flash_size; - - int status_stats_size; - - struct bnx2_irq irq_tbl[BNX2_MAX_MSIX_VEC]; - int irq_nvecs; - - u8 num_tx_rings; - u8 num_rx_rings; - - u32 leds_save; - u32 idle_chk_status_idx; - -#ifdef BCM_CNIC - struct mutex cnic_lock; - struct cnic_eth_dev cnic_eth_dev; -#endif - - const struct firmware *mips_firmware; - const struct firmware *rv2p_firmware; -}; - -#define REG_RD(bp, offset) \ - readl(bp->regview + offset) - -#define REG_WR(bp, offset, val) \ - writel(val, bp->regview + offset) - -#define REG_WR16(bp, offset, val) \ - writew(val, bp->regview + offset) - -struct cpu_reg { - u32 mode; - u32 mode_value_halt; - u32 mode_value_sstep; - - u32 state; - u32 state_value_clear; - - u32 gpr0; - u32 evmask; - u32 pc; - u32 inst; - u32 bp; - - u32 spad_base; - - u32 mips_view_base; -}; - -struct bnx2_fw_file_section { - __be32 addr; - __be32 len; - __be32 offset; -}; - -struct bnx2_mips_fw_file_entry { - __be32 start_addr; - struct bnx2_fw_file_section text; - struct bnx2_fw_file_section data; - struct bnx2_fw_file_section rodata; -}; - -struct bnx2_rv2p_fw_file_entry { - struct bnx2_fw_file_section rv2p; - __be32 fixup[8]; -}; - -struct bnx2_mips_fw_file { - struct bnx2_mips_fw_file_entry com; - struct bnx2_mips_fw_file_entry cp; - struct bnx2_mips_fw_file_entry rxp; - struct bnx2_mips_fw_file_entry tpat; - struct bnx2_mips_fw_file_entry txp; -}; - -struct bnx2_rv2p_fw_file { - struct bnx2_rv2p_fw_file_entry proc1; - struct bnx2_rv2p_fw_file_entry proc2; -}; - -#define RV2P_P1_FIXUP_PAGE_SIZE_IDX 0 -#define RV2P_BD_PAGE_SIZE_MSK 0xffff -#define RV2P_BD_PAGE_SIZE ((BCM_PAGE_SIZE / 16) - 1) - -#define RV2P_PROC1 0 -#define RV2P_PROC2 1 - - -/* This value (in milliseconds) determines the frequency of the driver - * issuing the PULSE message code. The firmware monitors this periodic - * pulse to determine when to switch to an OS-absent mode. */ -#define BNX2_DRV_PULSE_PERIOD_MS 250 - -/* This value (in milliseconds) determines how long the driver should - * wait for an acknowledgement from the firmware before timing out. Once - * the firmware has timed out, the driver will assume there is no firmware - * running and there won't be any firmware-driver synchronization during a - * driver reset. */ -#define BNX2_FW_ACK_TIME_OUT_MS 1000 - - -#define BNX2_DRV_RESET_SIGNATURE 0x00000000 -#define BNX2_DRV_RESET_SIGNATURE_MAGIC 0x4841564b /* HAVK */ -//#define DRV_RESET_SIGNATURE_MAGIC 0x47495352 /* RSIG */ - -#define BNX2_DRV_MB 0x00000004 -#define BNX2_DRV_MSG_CODE 0xff000000 -#define BNX2_DRV_MSG_CODE_RESET 0x01000000 -#define BNX2_DRV_MSG_CODE_UNLOAD 0x02000000 -#define BNX2_DRV_MSG_CODE_SHUTDOWN 0x03000000 -#define BNX2_DRV_MSG_CODE_SUSPEND_WOL 0x04000000 -#define BNX2_DRV_MSG_CODE_FW_TIMEOUT 0x05000000 -#define BNX2_DRV_MSG_CODE_PULSE 0x06000000 -#define BNX2_DRV_MSG_CODE_DIAG 0x07000000 -#define BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL 0x09000000 -#define BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN 0x0b000000 -#define BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE 0x0d000000 -#define BNX2_DRV_MSG_CODE_CMD_SET_LINK 0x10000000 - -#define BNX2_DRV_MSG_DATA 0x00ff0000 -#define BNX2_DRV_MSG_DATA_WAIT0 0x00010000 -#define BNX2_DRV_MSG_DATA_WAIT1 0x00020000 -#define BNX2_DRV_MSG_DATA_WAIT2 0x00030000 -#define BNX2_DRV_MSG_DATA_WAIT3 0x00040000 - -#define BNX2_DRV_MSG_SEQ 0x0000ffff - -#define BNX2_FW_MB 0x00000008 -#define BNX2_FW_MSG_ACK 0x0000ffff -#define BNX2_FW_MSG_STATUS_MASK 0x00ff0000 -#define BNX2_FW_MSG_STATUS_OK 0x00000000 -#define BNX2_FW_MSG_STATUS_FAILURE 0x00ff0000 - -#define BNX2_LINK_STATUS 0x0000000c -#define BNX2_LINK_STATUS_INIT_VALUE 0xffffffff -#define BNX2_LINK_STATUS_LINK_UP 0x1 -#define BNX2_LINK_STATUS_LINK_DOWN 0x0 -#define BNX2_LINK_STATUS_SPEED_MASK 0x1e -#define BNX2_LINK_STATUS_AN_INCOMPLETE (0<<1) -#define BNX2_LINK_STATUS_10HALF (1<<1) -#define BNX2_LINK_STATUS_10FULL (2<<1) -#define BNX2_LINK_STATUS_100HALF (3<<1) -#define BNX2_LINK_STATUS_100BASE_T4 (4<<1) -#define BNX2_LINK_STATUS_100FULL (5<<1) -#define BNX2_LINK_STATUS_1000HALF (6<<1) -#define BNX2_LINK_STATUS_1000FULL (7<<1) -#define BNX2_LINK_STATUS_2500HALF (8<<1) -#define BNX2_LINK_STATUS_2500FULL (9<<1) -#define BNX2_LINK_STATUS_AN_ENABLED (1<<5) -#define BNX2_LINK_STATUS_AN_COMPLETE (1<<6) -#define BNX2_LINK_STATUS_PARALLEL_DET (1<<7) -#define BNX2_LINK_STATUS_RESERVED (1<<8) -#define BNX2_LINK_STATUS_PARTNER_AD_1000FULL (1<<9) -#define BNX2_LINK_STATUS_PARTNER_AD_1000HALF (1<<10) -#define BNX2_LINK_STATUS_PARTNER_AD_100BT4 (1<<11) -#define BNX2_LINK_STATUS_PARTNER_AD_100FULL (1<<12) -#define BNX2_LINK_STATUS_PARTNER_AD_100HALF (1<<13) -#define BNX2_LINK_STATUS_PARTNER_AD_10FULL (1<<14) -#define BNX2_LINK_STATUS_PARTNER_AD_10HALF (1<<15) -#define BNX2_LINK_STATUS_TX_FC_ENABLED (1<<16) -#define BNX2_LINK_STATUS_RX_FC_ENABLED (1<<17) -#define BNX2_LINK_STATUS_PARTNER_SYM_PAUSE_CAP (1<<18) -#define BNX2_LINK_STATUS_PARTNER_ASYM_PAUSE_CAP (1<<19) -#define BNX2_LINK_STATUS_SERDES_LINK (1<<20) -#define BNX2_LINK_STATUS_PARTNER_AD_2500FULL (1<<21) -#define BNX2_LINK_STATUS_PARTNER_AD_2500HALF (1<<22) -#define BNX2_LINK_STATUS_HEART_BEAT_EXPIRED (1<<31) - -#define BNX2_DRV_PULSE_MB 0x00000010 -#define BNX2_DRV_PULSE_SEQ_MASK 0x00007fff - -/* Indicate to the firmware not to go into the - * OS absent when it is not getting driver pulse. - * This is used for debugging. */ -#define BNX2_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE 0x00080000 - -#define BNX2_DRV_MB_ARG0 0x00000014 -#define BNX2_NETLINK_SET_LINK_SPEED_10HALF (1<<0) -#define BNX2_NETLINK_SET_LINK_SPEED_10FULL (1<<1) -#define BNX2_NETLINK_SET_LINK_SPEED_10 \ - (BNX2_NETLINK_SET_LINK_SPEED_10HALF | \ - BNX2_NETLINK_SET_LINK_SPEED_10FULL) -#define BNX2_NETLINK_SET_LINK_SPEED_100HALF (1<<2) -#define BNX2_NETLINK_SET_LINK_SPEED_100FULL (1<<3) -#define BNX2_NETLINK_SET_LINK_SPEED_100 \ - (BNX2_NETLINK_SET_LINK_SPEED_100HALF | \ - BNX2_NETLINK_SET_LINK_SPEED_100FULL) -#define BNX2_NETLINK_SET_LINK_SPEED_1GHALF (1<<4) -#define BNX2_NETLINK_SET_LINK_SPEED_1GFULL (1<<5) -#define BNX2_NETLINK_SET_LINK_SPEED_2G5HALF (1<<6) -#define BNX2_NETLINK_SET_LINK_SPEED_2G5FULL (1<<7) -#define BNX2_NETLINK_SET_LINK_SPEED_10GHALF (1<<8) -#define BNX2_NETLINK_SET_LINK_SPEED_10GFULL (1<<9) -#define BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG (1<<10) -#define BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE (1<<11) -#define BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE (1<<12) -#define BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE (1<<13) -#define BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED (1<<14) -#define BNX2_NETLINK_SET_LINK_PHY_RESET (1<<15) - -#define BNX2_DEV_INFO_SIGNATURE 0x00000020 -#define BNX2_DEV_INFO_SIGNATURE_MAGIC 0x44564900 -#define BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK 0xffffff00 -#define BNX2_DEV_INFO_FEATURE_CFG_VALID 0x01 -#define BNX2_DEV_INFO_SECONDARY_PORT 0x80 -#define BNX2_DEV_INFO_DRV_ALWAYS_ALIVE 0x40 - -#define BNX2_SHARED_HW_CFG_PART_NUM 0x00000024 - -#define BNX2_SHARED_HW_CFG_POWER_DISSIPATED 0x00000034 -#define BNX2_SHARED_HW_CFG_POWER_STATE_D3_MASK 0xff000000 -#define BNX2_SHARED_HW_CFG_POWER_STATE_D2_MASK 0xff0000 -#define BNX2_SHARED_HW_CFG_POWER_STATE_D1_MASK 0xff00 -#define BNX2_SHARED_HW_CFG_POWER_STATE_D0_MASK 0xff - -#define BNX2_SHARED_HW_CFG POWER_CONSUMED 0x00000038 -#define BNX2_SHARED_HW_CFG_CONFIG 0x0000003c -#define BNX2_SHARED_HW_CFG_DESIGN_NIC 0 -#define BNX2_SHARED_HW_CFG_DESIGN_LOM 0x1 -#define BNX2_SHARED_HW_CFG_PHY_COPPER 0 -#define BNX2_SHARED_HW_CFG_PHY_FIBER 0x2 -#define BNX2_SHARED_HW_CFG_PHY_2_5G 0x20 -#define BNX2_SHARED_HW_CFG_PHY_BACKPLANE 0x40 -#define BNX2_SHARED_HW_CFG_LED_MODE_SHIFT_BITS 8 -#define BNX2_SHARED_HW_CFG_LED_MODE_MASK 0x300 -#define BNX2_SHARED_HW_CFG_LED_MODE_MAC 0 -#define BNX2_SHARED_HW_CFG_LED_MODE_GPHY1 0x100 -#define BNX2_SHARED_HW_CFG_LED_MODE_GPHY2 0x200 -#define BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX 0x8000 - -#define BNX2_SHARED_HW_CFG_CONFIG2 0x00000040 -#define BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK 0x00fff000 - -#define BNX2_DEV_INFO_BC_REV 0x0000004c - -#define BNX2_PORT_HW_CFG_MAC_UPPER 0x00000050 -#define BNX2_PORT_HW_CFG_UPPERMAC_MASK 0xffff - -#define BNX2_PORT_HW_CFG_MAC_LOWER 0x00000054 -#define BNX2_PORT_HW_CFG_CONFIG 0x00000058 -#define BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK 0x0000ffff -#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK 0x001f0000 -#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_AN 0x00000000 -#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G 0x00030000 -#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_2_5G 0x00040000 - -#define BNX2_PORT_HW_CFG_IMD_MAC_A_UPPER 0x00000068 -#define BNX2_PORT_HW_CFG_IMD_MAC_A_LOWER 0x0000006c -#define BNX2_PORT_HW_CFG_IMD_MAC_B_UPPER 0x00000070 -#define BNX2_PORT_HW_CFG_IMD_MAC_B_LOWER 0x00000074 -#define BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER 0x00000078 -#define BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER 0x0000007c - -#define BNX2_DEV_INFO_PER_PORT_HW_CONFIG2 0x000000b4 - -#define BNX2_DEV_INFO_FORMAT_REV 0x000000c4 -#define BNX2_DEV_INFO_FORMAT_REV_MASK 0xff000000 -#define BNX2_DEV_INFO_FORMAT_REV_ID ('A' << 24) - -#define BNX2_SHARED_FEATURE 0x000000c8 -#define BNX2_SHARED_FEATURE_MASK 0xffffffff - -#define BNX2_PORT_FEATURE 0x000000d8 -#define BNX2_PORT2_FEATURE 0x00000014c -#define BNX2_PORT_FEATURE_WOL_ENABLED 0x01000000 -#define BNX2_PORT_FEATURE_MBA_ENABLED 0x02000000 -#define BNX2_PORT_FEATURE_ASF_ENABLED 0x04000000 -#define BNX2_PORT_FEATURE_IMD_ENABLED 0x08000000 -#define BNX2_PORT_FEATURE_BAR1_SIZE_MASK 0xf -#define BNX2_PORT_FEATURE_BAR1_SIZE_DISABLED 0x0 -#define BNX2_PORT_FEATURE_BAR1_SIZE_64K 0x1 -#define BNX2_PORT_FEATURE_BAR1_SIZE_128K 0x2 -#define BNX2_PORT_FEATURE_BAR1_SIZE_256K 0x3 -#define BNX2_PORT_FEATURE_BAR1_SIZE_512K 0x4 -#define BNX2_PORT_FEATURE_BAR1_SIZE_1M 0x5 -#define BNX2_PORT_FEATURE_BAR1_SIZE_2M 0x6 -#define BNX2_PORT_FEATURE_BAR1_SIZE_4M 0x7 -#define BNX2_PORT_FEATURE_BAR1_SIZE_8M 0x8 -#define BNX2_PORT_FEATURE_BAR1_SIZE_16M 0x9 -#define BNX2_PORT_FEATURE_BAR1_SIZE_32M 0xa -#define BNX2_PORT_FEATURE_BAR1_SIZE_64M 0xb -#define BNX2_PORT_FEATURE_BAR1_SIZE_128M 0xc -#define BNX2_PORT_FEATURE_BAR1_SIZE_256M 0xd -#define BNX2_PORT_FEATURE_BAR1_SIZE_512M 0xe -#define BNX2_PORT_FEATURE_BAR1_SIZE_1G 0xf - -#define BNX2_PORT_FEATURE_WOL 0xdc -#define BNX2_PORT2_FEATURE_WOL 0x150 -#define BNX2_PORT_FEATURE_WOL_DEFAULT_SHIFT_BITS 4 -#define BNX2_PORT_FEATURE_WOL_DEFAULT_MASK 0x30 -#define BNX2_PORT_FEATURE_WOL_DEFAULT_DISABLE 0 -#define BNX2_PORT_FEATURE_WOL_DEFAULT_MAGIC 0x10 -#define BNX2_PORT_FEATURE_WOL_DEFAULT_ACPI 0x20 -#define BNX2_PORT_FEATURE_WOL_DEFAULT_MAGIC_AND_ACPI 0x30 -#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_MASK 0xf -#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_AUTONEG 0 -#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_10HALF 1 -#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_10FULL 2 -#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_100HALF 3 -#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_100FULL 4 -#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_1000HALF 5 -#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_1000FULL 6 -#define BNX2_PORT_FEATURE_WOL_AUTONEG_ADVERTISE_1000 0x40 -#define BNX2_PORT_FEATURE_WOL_RESERVED_PAUSE_CAP 0x400 -#define BNX2_PORT_FEATURE_WOL_RESERVED_ASYM_PAUSE_CAP 0x800 - -#define BNX2_PORT_FEATURE_MBA 0xe0 -#define BNX2_PORT2_FEATURE_MBA 0x154 -#define BNX2_PORT_FEATURE_MBA_BOOT_AGENT_TYPE_SHIFT_BITS 0 -#define BNX2_PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK 0x3 -#define BNX2_PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE 0 -#define BNX2_PORT_FEATURE_MBA_BOOT_AGENT_TYPE_RPL 1 -#define BNX2_PORT_FEATURE_MBA_BOOT_AGENT_TYPE_BOOTP 2 -#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_SHIFT_BITS 2 -#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_MASK 0x3c -#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_AUTONEG 0 -#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_10HALF 0x4 -#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_10FULL 0x8 -#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_100HALF 0xc -#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_100FULL 0x10 -#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_1000HALF 0x14 -#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_1000FULL 0x18 -#define BNX2_PORT_FEATURE_MBA_SETUP_PROMPT_ENABLE 0x40 -#define BNX2_PORT_FEATURE_MBA_HOTKEY_CTRL_S 0 -#define BNX2_PORT_FEATURE_MBA_HOTKEY_CTRL_B 0x80 -#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_SHIFT_BITS 8 -#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_MASK 0xff00 -#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_DISABLED 0 -#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_1K 0x100 -#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_2K 0x200 -#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_4K 0x300 -#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_8K 0x400 -#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_16K 0x500 -#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_32K 0x600 -#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_64K 0x700 -#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_128K 0x800 -#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_256K 0x900 -#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_512K 0xa00 -#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_1M 0xb00 -#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_2M 0xc00 -#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_4M 0xd00 -#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_8M 0xe00 -#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_16M 0xf00 -#define BNX2_PORT_FEATURE_MBA_MSG_TIMEOUT_SHIFT_BITS 16 -#define BNX2_PORT_FEATURE_MBA_MSG_TIMEOUT_MASK 0xf0000 -#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_SHIFT_BITS 20 -#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_MASK 0x300000 -#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_AUTO 0 -#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_BBS 0x100000 -#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT18H 0x200000 -#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT19H 0x300000 - -#define BNX2_PORT_FEATURE_IMD 0xe4 -#define BNX2_PORT2_FEATURE_IMD 0x158 -#define BNX2_PORT_FEATURE_IMD_LINK_OVERRIDE_DEFAULT 0 -#define BNX2_PORT_FEATURE_IMD_LINK_OVERRIDE_ENABLE 1 - -#define BNX2_PORT_FEATURE_VLAN 0xe8 -#define BNX2_PORT2_FEATURE_VLAN 0x15c -#define BNX2_PORT_FEATURE_MBA_VLAN_TAG_MASK 0xffff -#define BNX2_PORT_FEATURE_MBA_VLAN_ENABLE 0x10000 - -#define BNX2_MFW_VER_PTR 0x00000014c - -#define BNX2_BC_STATE_RESET_TYPE 0x000001c0 -#define BNX2_BC_STATE_RESET_TYPE_SIG 0x00005254 -#define BNX2_BC_STATE_RESET_TYPE_SIG_MASK 0x0000ffff -#define BNX2_BC_STATE_RESET_TYPE_NONE (BNX2_BC_STATE_RESET_TYPE_SIG | \ - 0x00010000) -#define BNX2_BC_STATE_RESET_TYPE_PCI (BNX2_BC_STATE_RESET_TYPE_SIG | \ - 0x00020000) -#define BNX2_BC_STATE_RESET_TYPE_VAUX (BNX2_BC_STATE_RESET_TYPE_SIG | \ - 0x00030000) -#define BNX2_BC_STATE_RESET_TYPE_DRV_MASK DRV_MSG_CODE -#define BNX2_BC_STATE_RESET_TYPE_DRV_RESET (BNX2_BC_STATE_RESET_TYPE_SIG | \ - DRV_MSG_CODE_RESET) -#define BNX2_BC_STATE_RESET_TYPE_DRV_UNLOAD (BNX2_BC_STATE_RESET_TYPE_SIG | \ - DRV_MSG_CODE_UNLOAD) -#define BNX2_BC_STATE_RESET_TYPE_DRV_SHUTDOWN (BNX2_BC_STATE_RESET_TYPE_SIG | \ - DRV_MSG_CODE_SHUTDOWN) -#define BNX2_BC_STATE_RESET_TYPE_DRV_WOL (BNX2_BC_STATE_RESET_TYPE_SIG | \ - DRV_MSG_CODE_WOL) -#define BNX2_BC_STATE_RESET_TYPE_DRV_DIAG (BNX2_BC_STATE_RESET_TYPE_SIG | \ - DRV_MSG_CODE_DIAG) -#define BNX2_BC_STATE_RESET_TYPE_VALUE(msg) (BNX2_BC_STATE_RESET_TYPE_SIG | \ - (msg)) - -#define BNX2_BC_STATE 0x000001c4 -#define BNX2_BC_STATE_ERR_MASK 0x0000ff00 -#define BNX2_BC_STATE_SIGN 0x42530000 -#define BNX2_BC_STATE_SIGN_MASK 0xffff0000 -#define BNX2_BC_STATE_BC1_START (BNX2_BC_STATE_SIGN | 0x1) -#define BNX2_BC_STATE_GET_NVM_CFG1 (BNX2_BC_STATE_SIGN | 0x2) -#define BNX2_BC_STATE_PROG_BAR (BNX2_BC_STATE_SIGN | 0x3) -#define BNX2_BC_STATE_INIT_VID (BNX2_BC_STATE_SIGN | 0x4) -#define BNX2_BC_STATE_GET_NVM_CFG2 (BNX2_BC_STATE_SIGN | 0x5) -#define BNX2_BC_STATE_APPLY_WKARND (BNX2_BC_STATE_SIGN | 0x6) -#define BNX2_BC_STATE_LOAD_BC2 (BNX2_BC_STATE_SIGN | 0x7) -#define BNX2_BC_STATE_GOING_BC2 (BNX2_BC_STATE_SIGN | 0x8) -#define BNX2_BC_STATE_GOING_DIAG (BNX2_BC_STATE_SIGN | 0x9) -#define BNX2_BC_STATE_RT_FINAL_INIT (BNX2_BC_STATE_SIGN | 0x81) -#define BNX2_BC_STATE_RT_WKARND (BNX2_BC_STATE_SIGN | 0x82) -#define BNX2_BC_STATE_RT_DRV_PULSE (BNX2_BC_STATE_SIGN | 0x83) -#define BNX2_BC_STATE_RT_FIOEVTS (BNX2_BC_STATE_SIGN | 0x84) -#define BNX2_BC_STATE_RT_DRV_CMD (BNX2_BC_STATE_SIGN | 0x85) -#define BNX2_BC_STATE_RT_LOW_POWER (BNX2_BC_STATE_SIGN | 0x86) -#define BNX2_BC_STATE_RT_SET_WOL (BNX2_BC_STATE_SIGN | 0x87) -#define BNX2_BC_STATE_RT_OTHER_FW (BNX2_BC_STATE_SIGN | 0x88) -#define BNX2_BC_STATE_RT_GOING_D3 (BNX2_BC_STATE_SIGN | 0x89) -#define BNX2_BC_STATE_ERR_BAD_VERSION (BNX2_BC_STATE_SIGN | 0x0100) -#define BNX2_BC_STATE_ERR_BAD_BC2_CRC (BNX2_BC_STATE_SIGN | 0x0200) -#define BNX2_BC_STATE_ERR_BC1_LOOP (BNX2_BC_STATE_SIGN | 0x0300) -#define BNX2_BC_STATE_ERR_UNKNOWN_CMD (BNX2_BC_STATE_SIGN | 0x0400) -#define BNX2_BC_STATE_ERR_DRV_DEAD (BNX2_BC_STATE_SIGN | 0x0500) -#define BNX2_BC_STATE_ERR_NO_RXP (BNX2_BC_STATE_SIGN | 0x0600) -#define BNX2_BC_STATE_ERR_TOO_MANY_RBUF (BNX2_BC_STATE_SIGN | 0x0700) - -#define BNX2_BC_STATE_CONDITION 0x000001c8 -#define BNX2_CONDITION_MFW_RUN_UNKNOWN 0x00000000 -#define BNX2_CONDITION_MFW_RUN_IPMI 0x00002000 -#define BNX2_CONDITION_MFW_RUN_UMP 0x00004000 -#define BNX2_CONDITION_MFW_RUN_NCSI 0x00006000 -#define BNX2_CONDITION_MFW_RUN_NONE 0x0000e000 -#define BNX2_CONDITION_MFW_RUN_MASK 0x0000e000 - -#define BNX2_BC_STATE_DEBUG_CMD 0x1dc -#define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE 0x42440000 -#define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE_MASK 0xffff0000 -#define BNX2_BC_STATE_BC_DBG_CMD_LOOP_CNT_MASK 0xffff -#define BNX2_BC_STATE_BC_DBG_CMD_LOOP_INFINITE 0xffff - -#define BNX2_FW_EVT_CODE_MB 0x354 -#define BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT 0x00000000 -#define BNX2_FW_EVT_CODE_LINK_EVENT 0x00000001 - -#define BNX2_DRV_ACK_CAP_MB 0x364 -#define BNX2_DRV_ACK_CAP_SIGNATURE 0x35450000 -#define BNX2_CAPABILITY_SIGNATURE_MASK 0xFFFF0000 - -#define BNX2_FW_CAP_MB 0x368 -#define BNX2_FW_CAP_SIGNATURE 0xaa550000 -#define BNX2_FW_ACK_DRV_SIGNATURE 0x52500000 -#define BNX2_FW_CAP_SIGNATURE_MASK 0xffff0000 -#define BNX2_FW_CAP_REMOTE_PHY_CAPABLE 0x00000001 -#define BNX2_FW_CAP_REMOTE_PHY_PRESENT 0x00000002 -#define BNX2_FW_CAP_MFW_CAN_KEEP_VLAN 0x00000008 -#define BNX2_FW_CAP_BC_CAN_KEEP_VLAN 0x00000010 -#define BNX2_FW_CAP_CAN_KEEP_VLAN (BNX2_FW_CAP_BC_CAN_KEEP_VLAN | \ - BNX2_FW_CAP_MFW_CAN_KEEP_VLAN) - -#define BNX2_RPHY_SIGNATURE 0x36c -#define BNX2_RPHY_LOAD_SIGNATURE 0x5a5a5a5a - -#define BNX2_RPHY_FLAGS 0x370 -#define BNX2_RPHY_SERDES_LINK 0x374 -#define BNX2_RPHY_COPPER_LINK 0x378 - -#define BNX2_ISCSI_INITIATOR 0x3dc -#define BNX2_ISCSI_INITIATOR_EN 0x00080000 - -#define BNX2_ISCSI_MAX_CONN 0x3e4 -#define BNX2_ISCSI_MAX_CONN_MASK 0xffff0000 -#define BNX2_ISCSI_MAX_CONN_SHIFT 16 - -#define HOST_VIEW_SHMEM_BASE 0x167c00 - -#define DP_SHMEM_LINE(bp, offset) \ - netdev_err(bp->dev, "DEBUG: %08x: %08x %08x %08x %08x\n", \ - offset, \ - bnx2_shmem_rd(bp, offset), \ - bnx2_shmem_rd(bp, offset + 4), \ - bnx2_shmem_rd(bp, offset + 8), \ - bnx2_shmem_rd(bp, offset + 12)) - -#endif diff --git a/drivers/net/bnx2_fw.h b/drivers/net/bnx2_fw.h deleted file mode 100644 index 940eb91..0000000 --- a/drivers/net/bnx2_fw.h +++ /dev/null @@ -1,88 +0,0 @@ -/* bnx2_fw.h: Broadcom NX2 network driver. - * - * Copyright (c) 2004, 2005, 2006, 2007 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - */ - -/* Initialized Values for the Completion Processor. */ -static const struct cpu_reg cpu_reg_com = { - .mode = BNX2_COM_CPU_MODE, - .mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT, - .mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA, - .state = BNX2_COM_CPU_STATE, - .state_value_clear = 0xffffff, - .gpr0 = BNX2_COM_CPU_REG_FILE, - .evmask = BNX2_COM_CPU_EVENT_MASK, - .pc = BNX2_COM_CPU_PROGRAM_COUNTER, - .inst = BNX2_COM_CPU_INSTRUCTION, - .bp = BNX2_COM_CPU_HW_BREAKPOINT, - .spad_base = BNX2_COM_SCRATCH, - .mips_view_base = 0x8000000, -}; - -/* Initialized Values the Command Processor. */ -static const struct cpu_reg cpu_reg_cp = { - .mode = BNX2_CP_CPU_MODE, - .mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT, - .mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA, - .state = BNX2_CP_CPU_STATE, - .state_value_clear = 0xffffff, - .gpr0 = BNX2_CP_CPU_REG_FILE, - .evmask = BNX2_CP_CPU_EVENT_MASK, - .pc = BNX2_CP_CPU_PROGRAM_COUNTER, - .inst = BNX2_CP_CPU_INSTRUCTION, - .bp = BNX2_CP_CPU_HW_BREAKPOINT, - .spad_base = BNX2_CP_SCRATCH, - .mips_view_base = 0x8000000, -}; - -/* Initialized Values for the RX Processor. */ -static const struct cpu_reg cpu_reg_rxp = { - .mode = BNX2_RXP_CPU_MODE, - .mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT, - .mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA, - .state = BNX2_RXP_CPU_STATE, - .state_value_clear = 0xffffff, - .gpr0 = BNX2_RXP_CPU_REG_FILE, - .evmask = BNX2_RXP_CPU_EVENT_MASK, - .pc = BNX2_RXP_CPU_PROGRAM_COUNTER, - .inst = BNX2_RXP_CPU_INSTRUCTION, - .bp = BNX2_RXP_CPU_HW_BREAKPOINT, - .spad_base = BNX2_RXP_SCRATCH, - .mips_view_base = 0x8000000, -}; - -/* Initialized Values for the TX Patch-up Processor. */ -static const struct cpu_reg cpu_reg_tpat = { - .mode = BNX2_TPAT_CPU_MODE, - .mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT, - .mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA, - .state = BNX2_TPAT_CPU_STATE, - .state_value_clear = 0xffffff, - .gpr0 = BNX2_TPAT_CPU_REG_FILE, - .evmask = BNX2_TPAT_CPU_EVENT_MASK, - .pc = BNX2_TPAT_CPU_PROGRAM_COUNTER, - .inst = BNX2_TPAT_CPU_INSTRUCTION, - .bp = BNX2_TPAT_CPU_HW_BREAKPOINT, - .spad_base = BNX2_TPAT_SCRATCH, - .mips_view_base = 0x8000000, -}; - -/* Initialized Values for the TX Processor. */ -static const struct cpu_reg cpu_reg_txp = { - .mode = BNX2_TXP_CPU_MODE, - .mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT, - .mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA, - .state = BNX2_TXP_CPU_STATE, - .state_value_clear = 0xffffff, - .gpr0 = BNX2_TXP_CPU_REG_FILE, - .evmask = BNX2_TXP_CPU_EVENT_MASK, - .pc = BNX2_TXP_CPU_PROGRAM_COUNTER, - .inst = BNX2_TXP_CPU_INSTRUCTION, - .bp = BNX2_TXP_CPU_HW_BREAKPOINT, - .spad_base = BNX2_TXP_SCRATCH, - .mips_view_base = 0x8000000, -}; diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile deleted file mode 100644 index 48fbdd4..0000000 --- a/drivers/net/bnx2x/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# -# Makefile for Broadcom 10-Gigabit ethernet driver -# - -obj-$(CONFIG_BNX2X) += bnx2x.o - -bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h deleted file mode 100644 index c423504..0000000 --- a/drivers/net/bnx2x/bnx2x.h +++ /dev/null @@ -1,2014 +0,0 @@ -/* bnx2x.h: Broadcom Everest network driver. - * - * Copyright (c) 2007-2011 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - * Maintained by: Eilon Greenstein - * Written by: Eliezer Tamir - * Based on code from Michael Chan's bnx2 driver - */ - -#ifndef BNX2X_H -#define BNX2X_H -#include -#include -#include - -/* compilation time flags */ - -/* define this to make the driver freeze on error to allow getting debug info - * (you will need to reboot afterwards) */ -/* #define BNX2X_STOP_ON_ERROR */ - -#define DRV_MODULE_VERSION "1.70.00-0" -#define DRV_MODULE_RELDATE "2011/06/13" -#define BNX2X_BC_VER 0x040200 - -#if defined(CONFIG_DCB) -#define BCM_DCBNL -#endif -#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) -#define BCM_CNIC 1 -#include "../cnic_if.h" -#endif - -#ifdef BCM_CNIC -#define BNX2X_MIN_MSIX_VEC_CNT 3 -#define BNX2X_MSIX_VEC_FP_START 2 -#else -#define BNX2X_MIN_MSIX_VEC_CNT 2 -#define BNX2X_MSIX_VEC_FP_START 1 -#endif - -#include - -#include "bnx2x_reg.h" -#include "bnx2x_fw_defs.h" -#include "bnx2x_hsi.h" -#include "bnx2x_link.h" -#include "bnx2x_sp.h" -#include "bnx2x_dcb.h" -#include "bnx2x_stats.h" - -/* error/debug prints */ - -#define DRV_MODULE_NAME "bnx2x" - -/* for messages that are currently off */ -#define BNX2X_MSG_OFF 0 -#define BNX2X_MSG_MCP 0x010000 /* was: NETIF_MSG_HW */ -#define BNX2X_MSG_STATS 0x020000 /* was: NETIF_MSG_TIMER */ -#define BNX2X_MSG_NVM 0x040000 /* was: NETIF_MSG_HW */ -#define BNX2X_MSG_DMAE 0x080000 /* was: NETIF_MSG_HW */ -#define BNX2X_MSG_SP 0x100000 /* was: NETIF_MSG_INTR */ -#define BNX2X_MSG_FP 0x200000 /* was: NETIF_MSG_INTR */ - -#define DP_LEVEL KERN_NOTICE /* was: KERN_DEBUG */ - -/* regular debug print */ -#define DP(__mask, __fmt, __args...) \ -do { \ - if (bp->msg_enable & (__mask)) \ - printk(DP_LEVEL "[%s:%d(%s)]" __fmt, \ - __func__, __LINE__, \ - bp->dev ? (bp->dev->name) : "?", \ - ##__args); \ -} while (0) - -#define DP_CONT(__mask, __fmt, __args...) \ -do { \ - if (bp->msg_enable & (__mask)) \ - pr_cont(__fmt, ##__args); \ -} while (0) - -/* errors debug print */ -#define BNX2X_DBG_ERR(__fmt, __args...) \ -do { \ - if (netif_msg_probe(bp)) \ - pr_err("[%s:%d(%s)]" __fmt, \ - __func__, __LINE__, \ - bp->dev ? (bp->dev->name) : "?", \ - ##__args); \ -} while (0) - -/* for errors (never masked) */ -#define BNX2X_ERR(__fmt, __args...) \ -do { \ - pr_err("[%s:%d(%s)]" __fmt, \ - __func__, __LINE__, \ - bp->dev ? (bp->dev->name) : "?", \ - ##__args); \ - } while (0) - -#define BNX2X_ERROR(__fmt, __args...) do { \ - pr_err("[%s:%d]" __fmt, __func__, __LINE__, ##__args); \ - } while (0) - - -/* before we have a dev->name use dev_info() */ -#define BNX2X_DEV_INFO(__fmt, __args...) \ -do { \ - if (netif_msg_probe(bp)) \ - dev_info(&bp->pdev->dev, __fmt, ##__args); \ -} while (0) - -#define BNX2X_MAC_FMT "%pM" -#define BNX2X_MAC_PRN_LIST(mac) (mac) - - -#ifdef BNX2X_STOP_ON_ERROR -void bnx2x_int_disable(struct bnx2x *bp); -#define bnx2x_panic() do { \ - bp->panic = 1; \ - BNX2X_ERR("driver assert\n"); \ - bnx2x_int_disable(bp); \ - bnx2x_panic_dump(bp); \ - } while (0) -#else -#define bnx2x_panic() do { \ - bp->panic = 1; \ - BNX2X_ERR("driver assert\n"); \ - bnx2x_panic_dump(bp); \ - } while (0) -#endif - -#define bnx2x_mc_addr(ha) ((ha)->addr) -#define bnx2x_uc_addr(ha) ((ha)->addr) - -#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff) -#define U64_HI(x) (u32)(((u64)(x)) >> 32) -#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) - - -#define REG_ADDR(bp, offset) ((bp->regview) + (offset)) - -#define REG_RD(bp, offset) readl(REG_ADDR(bp, offset)) -#define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset)) -#define REG_RD16(bp, offset) readw(REG_ADDR(bp, offset)) - -#define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset)) -#define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset)) -#define REG_WR16(bp, offset, val) writew((u16)val, REG_ADDR(bp, offset)) - -#define REG_RD_IND(bp, offset) bnx2x_reg_rd_ind(bp, offset) -#define REG_WR_IND(bp, offset, val) bnx2x_reg_wr_ind(bp, offset, val) - -#define REG_RD_DMAE(bp, offset, valp, len32) \ - do { \ - bnx2x_read_dmae(bp, offset, len32);\ - memcpy(valp, bnx2x_sp(bp, wb_data[0]), (len32) * 4); \ - } while (0) - -#define REG_WR_DMAE(bp, offset, valp, len32) \ - do { \ - memcpy(bnx2x_sp(bp, wb_data[0]), valp, (len32) * 4); \ - bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), \ - offset, len32); \ - } while (0) - -#define REG_WR_DMAE_LEN(bp, offset, valp, len32) \ - REG_WR_DMAE(bp, offset, valp, len32) - -#define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \ - do { \ - memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \ - bnx2x_write_big_buf_wb(bp, addr, len32); \ - } while (0) - -#define SHMEM_ADDR(bp, field) (bp->common.shmem_base + \ - offsetof(struct shmem_region, field)) -#define SHMEM_RD(bp, field) REG_RD(bp, SHMEM_ADDR(bp, field)) -#define SHMEM_WR(bp, field, val) REG_WR(bp, SHMEM_ADDR(bp, field), val) - -#define SHMEM2_ADDR(bp, field) (bp->common.shmem2_base + \ - offsetof(struct shmem2_region, field)) -#define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field)) -#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val) -#define MF_CFG_ADDR(bp, field) (bp->common.mf_cfg_base + \ - offsetof(struct mf_cfg, field)) -#define MF2_CFG_ADDR(bp, field) (bp->common.mf2_cfg_base + \ - offsetof(struct mf2_cfg, field)) - -#define MF_CFG_RD(bp, field) REG_RD(bp, MF_CFG_ADDR(bp, field)) -#define MF_CFG_WR(bp, field, val) REG_WR(bp,\ - MF_CFG_ADDR(bp, field), (val)) -#define MF2_CFG_RD(bp, field) REG_RD(bp, MF2_CFG_ADDR(bp, field)) - -#define SHMEM2_HAS(bp, field) ((bp)->common.shmem2_base && \ - (SHMEM2_RD((bp), size) > \ - offsetof(struct shmem2_region, field))) - -#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg) -#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val) - -/* SP SB indices */ - -/* General SP events - stats query, cfc delete, etc */ -#define HC_SP_INDEX_ETH_DEF_CONS 3 - -/* EQ completions */ -#define HC_SP_INDEX_EQ_CONS 7 - -/* FCoE L2 connection completions */ -#define HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS 6 -#define HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS 4 -/* iSCSI L2 */ -#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5 -#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1 - -/* Special clients parameters */ - -/* SB indices */ -/* FCoE L2 */ -#define BNX2X_FCOE_L2_RX_INDEX \ - (&bp->def_status_blk->sp_sb.\ - index_values[HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS]) - -#define BNX2X_FCOE_L2_TX_INDEX \ - (&bp->def_status_blk->sp_sb.\ - index_values[HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS]) - -/** - * CIDs and CLIDs: - * CLIDs below is a CLID for func 0, then the CLID for other - * functions will be calculated by the formula: - * - * FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X - * - */ -/* iSCSI L2 */ -#define BNX2X_ISCSI_ETH_CL_ID_IDX 1 -#define BNX2X_ISCSI_ETH_CID 49 - -/* FCoE L2 */ -#define BNX2X_FCOE_ETH_CL_ID_IDX 2 -#define BNX2X_FCOE_ETH_CID 50 - -/** Additional rings budgeting */ -#ifdef BCM_CNIC -#define CNIC_PRESENT 1 -#define FCOE_PRESENT 1 -#else -#define CNIC_PRESENT 0 -#define FCOE_PRESENT 0 -#endif /* BCM_CNIC */ -#define NON_ETH_CONTEXT_USE (FCOE_PRESENT) - -#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \ - AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR - -#define SM_RX_ID 0 -#define SM_TX_ID 1 - -/* defines for multiple tx priority indices */ -#define FIRST_TX_ONLY_COS_INDEX 1 -#define FIRST_TX_COS_INDEX 0 - -/* defines for decodeing the fastpath index and the cos index out of the - * transmission queue index - */ -#define MAX_TXQS_PER_COS FP_SB_MAX_E1x - -#define TXQ_TO_FP(txq_index) ((txq_index) % MAX_TXQS_PER_COS) -#define TXQ_TO_COS(txq_index) ((txq_index) / MAX_TXQS_PER_COS) - -/* rules for calculating the cids of tx-only connections */ -#define CID_TO_FP(cid) ((cid) % MAX_TXQS_PER_COS) -#define CID_COS_TO_TX_ONLY_CID(cid, cos) (cid + cos * MAX_TXQS_PER_COS) - -/* fp index inside class of service range */ -#define FP_COS_TO_TXQ(fp, cos) ((fp)->index + cos * MAX_TXQS_PER_COS) - -/* - * 0..15 eth cos0 - * 16..31 eth cos1 if applicable - * 32..47 eth cos2 If applicable - * fcoe queue follows eth queues (16, 32, 48 depending on cos) - */ -#define MAX_ETH_TXQ_IDX(bp) (MAX_TXQS_PER_COS * (bp)->max_cos) -#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp)) - -/* fast path */ -struct sw_rx_bd { - struct sk_buff *skb; - DEFINE_DMA_UNMAP_ADDR(mapping); -}; - -struct sw_tx_bd { - struct sk_buff *skb; - u16 first_bd; - u8 flags; -/* Set on the first BD descriptor when there is a split BD */ -#define BNX2X_TSO_SPLIT_BD (1<<0) -}; - -struct sw_rx_page { - struct page *page; - DEFINE_DMA_UNMAP_ADDR(mapping); -}; - -union db_prod { - struct doorbell_set_prod data; - u32 raw; -}; - - -/* MC hsi */ -#define BCM_PAGE_SHIFT 12 -#define BCM_PAGE_SIZE (1 << BCM_PAGE_SHIFT) -#define BCM_PAGE_MASK (~(BCM_PAGE_SIZE - 1)) -#define BCM_PAGE_ALIGN(addr) (((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK) - -#define PAGES_PER_SGE_SHIFT 0 -#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT) -#define SGE_PAGE_SIZE PAGE_SIZE -#define SGE_PAGE_SHIFT PAGE_SHIFT -#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr)) - -/* SGE ring related macros */ -#define NUM_RX_SGE_PAGES 2 -#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) -#define MAX_RX_SGE_CNT (RX_SGE_CNT - 2) -/* RX_SGE_CNT is promised to be a power of 2 */ -#define RX_SGE_MASK (RX_SGE_CNT - 1) -#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) -#define MAX_RX_SGE (NUM_RX_SGE - 1) -#define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \ - (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1) -#define RX_SGE(x) ((x) & MAX_RX_SGE) - -/* Manipulate a bit vector defined as an array of u64 */ - -/* Number of bits in one sge_mask array element */ -#define BIT_VEC64_ELEM_SZ 64 -#define BIT_VEC64_ELEM_SHIFT 6 -#define BIT_VEC64_ELEM_MASK ((u64)BIT_VEC64_ELEM_SZ - 1) - - -#define __BIT_VEC64_SET_BIT(el, bit) \ - do { \ - el = ((el) | ((u64)0x1 << (bit))); \ - } while (0) - -#define __BIT_VEC64_CLEAR_BIT(el, bit) \ - do { \ - el = ((el) & (~((u64)0x1 << (bit)))); \ - } while (0) - - -#define BIT_VEC64_SET_BIT(vec64, idx) \ - __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ - (idx) & BIT_VEC64_ELEM_MASK) - -#define BIT_VEC64_CLEAR_BIT(vec64, idx) \ - __BIT_VEC64_CLEAR_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ - (idx) & BIT_VEC64_ELEM_MASK) - -#define BIT_VEC64_TEST_BIT(vec64, idx) \ - (((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT] >> \ - ((idx) & BIT_VEC64_ELEM_MASK)) & 0x1) - -/* Creates a bitmask of all ones in less significant bits. - idx - index of the most significant bit in the created mask */ -#define BIT_VEC64_ONES_MASK(idx) \ - (((u64)0x1 << (((idx) & BIT_VEC64_ELEM_MASK) + 1)) - 1) -#define BIT_VEC64_ELEM_ONE_MASK ((u64)(~0)) - -/*******************************************************/ - - - -/* Number of u64 elements in SGE mask array */ -#define RX_SGE_MASK_LEN ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \ - BIT_VEC64_ELEM_SZ) -#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1) -#define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK) - -union host_hc_status_block { - /* pointer to fp status block e1x */ - struct host_hc_status_block_e1x *e1x_sb; - /* pointer to fp status block e2 */ - struct host_hc_status_block_e2 *e2_sb; -}; - -struct bnx2x_agg_info { - /* - * First aggregation buffer is an skb, the following - are pages. - * We will preallocate the skbs for each aggregation when - * we open the interface and will replace the BD at the consumer - * with this one when we receive the TPA_START CQE in order to - * keep the Rx BD ring consistent. - */ - struct sw_rx_bd first_buf; - u8 tpa_state; -#define BNX2X_TPA_START 1 -#define BNX2X_TPA_STOP 2 -#define BNX2X_TPA_ERROR 3 - u8 placement_offset; - u16 parsing_flags; - u16 vlan_tag; - u16 len_on_bd; -}; - -#define Q_STATS_OFFSET32(stat_name) \ - (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4) - -struct bnx2x_fp_txdata { - - struct sw_tx_bd *tx_buf_ring; - - union eth_tx_bd_types *tx_desc_ring; - dma_addr_t tx_desc_mapping; - - u32 cid; - - union db_prod tx_db; - - u16 tx_pkt_prod; - u16 tx_pkt_cons; - u16 tx_bd_prod; - u16 tx_bd_cons; - - unsigned long tx_pkt; - - __le16 *tx_cons_sb; - - int txq_index; -}; - -struct bnx2x_fastpath { - struct bnx2x *bp; /* parent */ - -#define BNX2X_NAPI_WEIGHT 128 - struct napi_struct napi; - union host_hc_status_block status_blk; - /* chip independed shortcuts into sb structure */ - __le16 *sb_index_values; - __le16 *sb_running_index; - /* chip independed shortcut into rx_prods_offset memory */ - u32 ustorm_rx_prods_offset; - - u32 rx_buf_size; - - dma_addr_t status_blk_mapping; - - u8 max_cos; /* actual number of active tx coses */ - struct bnx2x_fp_txdata txdata[BNX2X_MULTI_TX_COS]; - - struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */ - struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */ - - struct eth_rx_bd *rx_desc_ring; - dma_addr_t rx_desc_mapping; - - union eth_rx_cqe *rx_comp_ring; - dma_addr_t rx_comp_mapping; - - /* SGE ring */ - struct eth_rx_sge *rx_sge_ring; - dma_addr_t rx_sge_mapping; - - u64 sge_mask[RX_SGE_MASK_LEN]; - - u32 cid; - - __le16 fp_hc_idx; - - u8 index; /* number in fp array */ - u8 cl_id; /* eth client id */ - u8 cl_qzone_id; - u8 fw_sb_id; /* status block number in FW */ - u8 igu_sb_id; /* status block number in HW */ - - u16 rx_bd_prod; - u16 rx_bd_cons; - u16 rx_comp_prod; - u16 rx_comp_cons; - u16 rx_sge_prod; - /* The last maximal completed SGE */ - u16 last_max_sge; - __le16 *rx_cons_sb; - unsigned long rx_pkt, - rx_calls; - - /* TPA related */ - struct bnx2x_agg_info tpa_info[ETH_MAX_AGGREGATION_QUEUES_E1H_E2]; - u8 disable_tpa; -#ifdef BNX2X_STOP_ON_ERROR - u64 tpa_queue_used; -#endif - - struct tstorm_per_queue_stats old_tclient; - struct ustorm_per_queue_stats old_uclient; - struct xstorm_per_queue_stats old_xclient; - struct bnx2x_eth_q_stats eth_q_stats; - - /* The size is calculated using the following: - sizeof name field from netdev structure + - 4 ('-Xx-' string) + - 4 (for the digits and to make it DWORD aligned) */ -#define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8) - char name[FP_NAME_SIZE]; - - /* MACs object */ - struct bnx2x_vlan_mac_obj mac_obj; - - /* Queue State object */ - struct bnx2x_queue_sp_obj q_obj; - -}; - -#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) - -/* Use 2500 as a mini-jumbo MTU for FCoE */ -#define BNX2X_FCOE_MINI_JUMBO_MTU 2500 - -/* FCoE L2 `fastpath' entry is right after the eth entries */ -#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp) -#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX]) -#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var) -#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \ - txdata[FIRST_TX_COS_INDEX].var) - - -#define IS_ETH_FP(fp) (fp->index < \ - BNX2X_NUM_ETH_QUEUES(fp->bp)) -#ifdef BCM_CNIC -#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX) -#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX) -#else -#define IS_FCOE_FP(fp) false -#define IS_FCOE_IDX(idx) false -#endif - - -/* MC hsi */ -#define MAX_FETCH_BD 13 /* HW max BDs per packet */ -#define RX_COPY_THRESH 92 - -#define NUM_TX_RINGS 16 -#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) -#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) -#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) -#define MAX_TX_BD (NUM_TX_BD - 1) -#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) -#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ - (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) -#define TX_BD(x) ((x) & MAX_TX_BD) -#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT) - -/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */ -#define NUM_RX_RINGS 8 -#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) -#define MAX_RX_DESC_CNT (RX_DESC_CNT - 2) -#define RX_DESC_MASK (RX_DESC_CNT - 1) -#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) -#define MAX_RX_BD (NUM_RX_BD - 1) -#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) -#define MIN_RX_AVAIL 128 - -#define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \ - ETH_MIN_RX_CQES_WITH_TPA_E1 : \ - ETH_MIN_RX_CQES_WITH_TPA_E1H_E2) -#define MIN_RX_SIZE_NONTPA_HW ETH_MIN_RX_CQES_WITHOUT_TPA -#define MIN_RX_SIZE_TPA (max_t(u32, MIN_RX_SIZE_TPA_HW, MIN_RX_AVAIL)) -#define MIN_RX_SIZE_NONTPA (max_t(u32, MIN_RX_SIZE_NONTPA_HW,\ - MIN_RX_AVAIL)) - -#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ - (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1) -#define RX_BD(x) ((x) & MAX_RX_BD) - -/* - * As long as CQE is X times bigger than BD entry we have to allocate X times - * more pages for CQ ring in order to keep it balanced with BD ring - */ -#define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd)) -#define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL) -#define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) -#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1) -#define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS) -#define MAX_RCQ_BD (NUM_RCQ_BD - 1) -#define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2) -#define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \ - (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) -#define RCQ_BD(x) ((x) & MAX_RCQ_BD) - - -/* This is needed for determining of last_max */ -#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) -#define SUB_S32(a, b) (s32)((s32)(a) - (s32)(b)) - - -#define BNX2X_SWCID_SHIFT 17 -#define BNX2X_SWCID_MASK ((0x1 << BNX2X_SWCID_SHIFT) - 1) - -/* used on a CID received from the HW */ -#define SW_CID(x) (le32_to_cpu(x) & BNX2X_SWCID_MASK) -#define CQE_CMD(x) (le32_to_cpu(x) >> \ - COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT) - -#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr_hi), \ - le32_to_cpu((bd)->addr_lo)) -#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes)) - -#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */ -#define BNX2X_DB_SHIFT 7 /* 128 bytes*/ -#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT) -#error "Min DB doorbell stride is 8" -#endif -#define DPM_TRIGER_TYPE 0x40 -#define DOORBELL(bp, cid, val) \ - do { \ - writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \ - DPM_TRIGER_TYPE); \ - } while (0) - - -/* TX CSUM helpers */ -#define SKB_CS_OFF(skb) (offsetof(struct tcphdr, check) - \ - skb->csum_offset) -#define SKB_CS(skb) (*(u16 *)(skb_transport_header(skb) + \ - skb->csum_offset)) - -#define pbd_tcp_flags(skb) (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff) - -#define XMIT_PLAIN 0 -#define XMIT_CSUM_V4 0x1 -#define XMIT_CSUM_V6 0x2 -#define XMIT_CSUM_TCP 0x4 -#define XMIT_GSO_V4 0x8 -#define XMIT_GSO_V6 0x10 - -#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6) -#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6) - - -/* stuff added to make the code fit 80Col */ -#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE) -#define CQE_TYPE_START(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_START_AGG) -#define CQE_TYPE_STOP(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_STOP_AGG) -#define CQE_TYPE_SLOW(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_RAMROD) -#define CQE_TYPE_FAST(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_FASTPATH) - -#define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG - -#define BNX2X_IP_CSUM_ERR(cqe) \ - (!((cqe)->fast_path_cqe.status_flags & \ - ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \ - ((cqe)->fast_path_cqe.type_error_flags & \ - ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) - -#define BNX2X_L4_CSUM_ERR(cqe) \ - (!((cqe)->fast_path_cqe.status_flags & \ - ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \ - ((cqe)->fast_path_cqe.type_error_flags & \ - ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) - -#define BNX2X_RX_CSUM_OK(cqe) \ - (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe))) - -#define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \ - (((le16_to_cpu(flags) & \ - PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \ - PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) \ - == PRS_FLAG_OVERETH_IPV4) -#define BNX2X_RX_SUM_FIX(cqe) \ - BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags) - - -#define FP_USB_FUNC_OFF \ - offsetof(struct cstorm_status_block_u, func) -#define FP_CSB_FUNC_OFF \ - offsetof(struct cstorm_status_block_c, func) - -#define HC_INDEX_TOE_RX_CQ_CONS 0 /* Formerly Ustorm TOE CQ index */ - /* (HC_INDEX_U_TOE_RX_CQ_CONS) */ -#define HC_INDEX_ETH_RX_CQ_CONS 1 /* Formerly Ustorm ETH CQ index */ - /* (HC_INDEX_U_ETH_RX_CQ_CONS) */ -#define HC_INDEX_ETH_RX_BD_CONS 2 /* Formerly Ustorm ETH BD index */ - /* (HC_INDEX_U_ETH_RX_BD_CONS) */ - -#define HC_INDEX_TOE_TX_CQ_CONS 4 /* Formerly Cstorm TOE CQ index */ - /* (HC_INDEX_C_TOE_TX_CQ_CONS) */ -#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 /* Formerly Cstorm ETH CQ index */ - /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ -#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 /* Formerly Cstorm ETH CQ index */ - /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ -#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 /* Formerly Cstorm ETH CQ index */ - /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ - -#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0 - - -#define BNX2X_RX_SB_INDEX \ - (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]) - -#define BNX2X_TX_SB_INDEX_BASE BNX2X_TX_SB_INDEX_COS0 - -#define BNX2X_TX_SB_INDEX_COS0 \ - (&fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0]) - -/* end of fast path */ - -/* common */ - -struct bnx2x_common { - - u32 chip_id; -/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ -#define CHIP_ID(bp) (bp->common.chip_id & 0xfffffff0) - -#define CHIP_NUM(bp) (bp->common.chip_id >> 16) -#define CHIP_NUM_57710 0x164e -#define CHIP_NUM_57711 0x164f -#define CHIP_NUM_57711E 0x1650 -#define CHIP_NUM_57712 0x1662 -#define CHIP_NUM_57712_MF 0x1663 -#define CHIP_NUM_57713 0x1651 -#define CHIP_NUM_57713E 0x1652 -#define CHIP_NUM_57800 0x168a -#define CHIP_NUM_57800_MF 0x16a5 -#define CHIP_NUM_57810 0x168e -#define CHIP_NUM_57810_MF 0x16ae -#define CHIP_NUM_57840 0x168d -#define CHIP_NUM_57840_MF 0x16ab -#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710) -#define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711) -#define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E) -#define CHIP_IS_57712(bp) (CHIP_NUM(bp) == CHIP_NUM_57712) -#define CHIP_IS_57712_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_MF) -#define CHIP_IS_57800(bp) (CHIP_NUM(bp) == CHIP_NUM_57800) -#define CHIP_IS_57800_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_MF) -#define CHIP_IS_57810(bp) (CHIP_NUM(bp) == CHIP_NUM_57810) -#define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF) -#define CHIP_IS_57840(bp) (CHIP_NUM(bp) == CHIP_NUM_57840) -#define CHIP_IS_57840_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_MF) -#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \ - CHIP_IS_57711E(bp)) -#define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \ - CHIP_IS_57712_MF(bp)) -#define CHIP_IS_E3(bp) (CHIP_IS_57800(bp) || \ - CHIP_IS_57800_MF(bp) || \ - CHIP_IS_57810(bp) || \ - CHIP_IS_57810_MF(bp) || \ - CHIP_IS_57840(bp) || \ - CHIP_IS_57840_MF(bp)) -#define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp))) -#define USES_WARPCORE(bp) (CHIP_IS_E3(bp)) -#define IS_E1H_OFFSET (!CHIP_IS_E1(bp)) - -#define CHIP_REV_SHIFT 12 -#define CHIP_REV_MASK (0xF << CHIP_REV_SHIFT) -#define CHIP_REV_VAL(bp) (bp->common.chip_id & CHIP_REV_MASK) -#define CHIP_REV_Ax (0x0 << CHIP_REV_SHIFT) -#define CHIP_REV_Bx (0x1 << CHIP_REV_SHIFT) -/* assume maximum 5 revisions */ -#define CHIP_REV_IS_SLOW(bp) (CHIP_REV_VAL(bp) > 0x00005000) -/* Emul versions are A=>0xe, B=>0xc, C=>0xa, D=>8, E=>6 */ -#define CHIP_REV_IS_EMUL(bp) ((CHIP_REV_IS_SLOW(bp)) && \ - !(CHIP_REV_VAL(bp) & 0x00001000)) -/* FPGA versions are A=>0xf, B=>0xd, C=>0xb, D=>9, E=>7 */ -#define CHIP_REV_IS_FPGA(bp) ((CHIP_REV_IS_SLOW(bp)) && \ - (CHIP_REV_VAL(bp) & 0x00001000)) - -#define CHIP_TIME(bp) ((CHIP_REV_IS_EMUL(bp)) ? 2000 : \ - ((CHIP_REV_IS_FPGA(bp)) ? 200 : 1)) - -#define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0) -#define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f) -#define CHIP_REV_SIM(bp) (((CHIP_REV_MASK - CHIP_REV_VAL(bp)) >>\ - (CHIP_REV_SHIFT + 1)) \ - << CHIP_REV_SHIFT) -#define CHIP_REV(bp) (CHIP_REV_IS_SLOW(bp) ? \ - CHIP_REV_SIM(bp) :\ - CHIP_REV_VAL(bp)) -#define CHIP_IS_E3B0(bp) (CHIP_IS_E3(bp) && \ - (CHIP_REV(bp) == CHIP_REV_Bx)) -#define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \ - (CHIP_REV(bp) == CHIP_REV_Ax)) - - int flash_size; -#define BNX2X_NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */ -#define BNX2X_NVRAM_TIMEOUT_COUNT 30000 -#define BNX2X_NVRAM_PAGE_SIZE 256 - - u32 shmem_base; - u32 shmem2_base; - u32 mf_cfg_base; - u32 mf2_cfg_base; - - u32 hw_config; - - u32 bc_ver; - - u8 int_block; -#define INT_BLOCK_HC 0 -#define INT_BLOCK_IGU 1 -#define INT_BLOCK_MODE_NORMAL 0 -#define INT_BLOCK_MODE_BW_COMP 2 -#define CHIP_INT_MODE_IS_NBC(bp) \ - (!CHIP_IS_E1x(bp) && \ - !((bp)->common.int_block & INT_BLOCK_MODE_BW_COMP)) -#define CHIP_INT_MODE_IS_BC(bp) (!CHIP_INT_MODE_IS_NBC(bp)) - - u8 chip_port_mode; -#define CHIP_4_PORT_MODE 0x0 -#define CHIP_2_PORT_MODE 0x1 -#define CHIP_PORT_MODE_NONE 0x2 -#define CHIP_MODE(bp) (bp->common.chip_port_mode) -#define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE) -}; - -/* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */ -#define BNX2X_IGU_STAS_MSG_VF_CNT 64 -#define BNX2X_IGU_STAS_MSG_PF_CNT 4 - -/* end of common */ - -/* port */ - -struct bnx2x_port { - u32 pmf; - - u32 link_config[LINK_CONFIG_SIZE]; - - u32 supported[LINK_CONFIG_SIZE]; -/* link settings - missing defines */ -#define SUPPORTED_2500baseX_Full (1 << 15) - - u32 advertising[LINK_CONFIG_SIZE]; -/* link settings - missing defines */ -#define ADVERTISED_2500baseX_Full (1 << 15) - - u32 phy_addr; - - /* used to synchronize phy accesses */ - struct mutex phy_mutex; - int need_hw_lock; - - u32 port_stx; - - struct nig_stats old_nig_stats; -}; - -/* end of port */ - -#define STATS_OFFSET32(stat_name) \ - (offsetof(struct bnx2x_eth_stats, stat_name) / 4) - -/* slow path */ - -/* slow path work-queue */ -extern struct workqueue_struct *bnx2x_wq; - -#define BNX2X_MAX_NUM_OF_VFS 64 -#define BNX2X_VF_ID_INVALID 0xFF - -/* - * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is - * control by the number of fast-path status blocks supported by the - * device (HW/FW). Each fast-path status block (FP-SB) aka non-default - * status block represents an independent interrupts context that can - * serve a regular L2 networking queue. However special L2 queues such - * as the FCoE queue do not require a FP-SB and other components like - * the CNIC may consume FP-SB reducing the number of possible L2 queues - * - * If the maximum number of FP-SB available is X then: - * a. If CNIC is supported it consumes 1 FP-SB thus the max number of - * regular L2 queues is Y=X-1 - * b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor) - * c. If the FCoE L2 queue is supported the actual number of L2 queues - * is Y+1 - * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for - * slow-path interrupts) or Y+2 if CNIC is supported (one additional - * FP interrupt context for the CNIC). - * e. The number of HW context (CID count) is always X or X+1 if FCoE - * L2 queue is supported. the cid for the FCoE L2 queue is always X. - */ - -/* fast-path interrupt contexts E1x */ -#define FP_SB_MAX_E1x 16 -/* fast-path interrupt contexts E2 */ -#define FP_SB_MAX_E2 HC_SB_MAX_SB_E2 - -union cdu_context { - struct eth_context eth; - char pad[1024]; -}; - -/* CDU host DB constants */ -#define CDU_ILT_PAGE_SZ_HW 3 -#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 64K */ -#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context)) - -#ifdef BCM_CNIC -#define CNIC_ISCSI_CID_MAX 256 -#define CNIC_FCOE_CID_MAX 2048 -#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX) -#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS) -#endif - -#define QM_ILT_PAGE_SZ_HW 0 -#define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 4K */ -#define QM_CID_ROUND 1024 - -#ifdef BCM_CNIC -/* TM (timers) host DB constants */ -#define TM_ILT_PAGE_SZ_HW 0 -#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */ -/* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */ -#define TM_CONN_NUM 1024 -#define TM_ILT_SZ (8 * TM_CONN_NUM) -#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ) - -/* SRC (Searcher) host DB constants */ -#define SRC_ILT_PAGE_SZ_HW 0 -#define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 4K */ -#define SRC_HASH_BITS 10 -#define SRC_CONN_NUM (1 << SRC_HASH_BITS) /* 1024 */ -#define SRC_ILT_SZ (sizeof(struct src_ent) * SRC_CONN_NUM) -#define SRC_T2_SZ SRC_ILT_SZ -#define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ) - -#endif - -#define MAX_DMAE_C 8 - -/* DMA memory not used in fastpath */ -struct bnx2x_slowpath { - union { - struct mac_configuration_cmd e1x; - struct eth_classify_rules_ramrod_data e2; - } mac_rdata; - - - union { - struct tstorm_eth_mac_filter_config e1x; - struct eth_filter_rules_ramrod_data e2; - } rx_mode_rdata; - - union { - struct mac_configuration_cmd e1; - struct eth_multicast_rules_ramrod_data e2; - } mcast_rdata; - - struct eth_rss_update_ramrod_data rss_rdata; - - /* Queue State related ramrods are always sent under rtnl_lock */ - union { - struct client_init_ramrod_data init_data; - struct client_update_ramrod_data update_data; - } q_rdata; - - union { - struct function_start_data func_start; - /* pfc configuration for DCBX ramrod */ - struct flow_control_configuration pfc_config; - } func_rdata; - - /* used by dmae command executer */ - struct dmae_command dmae[MAX_DMAE_C]; - - u32 stats_comp; - union mac_stats mac_stats; - struct nig_stats nig_stats; - struct host_port_stats port_stats; - struct host_func_stats func_stats; - struct host_func_stats func_stats_base; - - u32 wb_comp; - u32 wb_data[4]; -}; - -#define bnx2x_sp(bp, var) (&bp->slowpath->var) -#define bnx2x_sp_mapping(bp, var) \ - (bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var)) - - -/* attn group wiring */ -#define MAX_DYNAMIC_ATTN_GRPS 8 - -struct attn_route { - u32 sig[5]; -}; - -struct iro { - u32 base; - u16 m1; - u16 m2; - u16 m3; - u16 size; -}; - -struct hw_context { - union cdu_context *vcxt; - dma_addr_t cxt_mapping; - size_t size; -}; - -/* forward */ -struct bnx2x_ilt; - - -enum bnx2x_recovery_state { - BNX2X_RECOVERY_DONE, - BNX2X_RECOVERY_INIT, - BNX2X_RECOVERY_WAIT, - BNX2X_RECOVERY_FAILED -}; - -/* - * Event queue (EQ or event ring) MC hsi - * NUM_EQ_PAGES and EQ_DESC_CNT_PAGE must be power of 2 - */ -#define NUM_EQ_PAGES 1 -#define EQ_DESC_CNT_PAGE (BCM_PAGE_SIZE / sizeof(union event_ring_elem)) -#define EQ_DESC_MAX_PAGE (EQ_DESC_CNT_PAGE - 1) -#define NUM_EQ_DESC (EQ_DESC_CNT_PAGE * NUM_EQ_PAGES) -#define EQ_DESC_MASK (NUM_EQ_DESC - 1) -#define MAX_EQ_AVAIL (EQ_DESC_MAX_PAGE * NUM_EQ_PAGES - 2) - -/* depends on EQ_DESC_CNT_PAGE being a power of 2 */ -#define NEXT_EQ_IDX(x) ((((x) & EQ_DESC_MAX_PAGE) == \ - (EQ_DESC_MAX_PAGE - 1)) ? (x) + 2 : (x) + 1) - -/* depends on the above and on NUM_EQ_PAGES being a power of 2 */ -#define EQ_DESC(x) ((x) & EQ_DESC_MASK) - -#define BNX2X_EQ_INDEX \ - (&bp->def_status_blk->sp_sb.\ - index_values[HC_SP_INDEX_EQ_CONS]) - -/* This is a data that will be used to create a link report message. - * We will keep the data used for the last link report in order - * to prevent reporting the same link parameters twice. - */ -struct bnx2x_link_report_data { - u16 line_speed; /* Effective line speed */ - unsigned long link_report_flags;/* BNX2X_LINK_REPORT_XXX flags */ -}; - -enum { - BNX2X_LINK_REPORT_FD, /* Full DUPLEX */ - BNX2X_LINK_REPORT_LINK_DOWN, - BNX2X_LINK_REPORT_RX_FC_ON, - BNX2X_LINK_REPORT_TX_FC_ON, -}; - -enum { - BNX2X_PORT_QUERY_IDX, - BNX2X_PF_QUERY_IDX, - BNX2X_FIRST_QUEUE_QUERY_IDX, -}; - -struct bnx2x_fw_stats_req { - struct stats_query_header hdr; - struct stats_query_entry query[STATS_QUERY_CMD_COUNT]; -}; - -struct bnx2x_fw_stats_data { - struct stats_counter storm_counters; - struct per_port_stats port; - struct per_pf_stats pf; - struct per_queue_stats queue_stats[1]; -}; - -/* Public slow path states */ -enum { - BNX2X_SP_RTNL_SETUP_TC, - BNX2X_SP_RTNL_TX_TIMEOUT, -}; - - -struct bnx2x { - /* Fields used in the tx and intr/napi performance paths - * are grouped together in the beginning of the structure - */ - struct bnx2x_fastpath *fp; - void __iomem *regview; - void __iomem *doorbells; - u16 db_size; - - u8 pf_num; /* absolute PF number */ - u8 pfid; /* per-path PF number */ - int base_fw_ndsb; /**/ -#define BP_PATH(bp) (CHIP_IS_E1x(bp) ? 0 : (bp->pf_num & 1)) -#define BP_PORT(bp) (bp->pfid & 1) -#define BP_FUNC(bp) (bp->pfid) -#define BP_ABS_FUNC(bp) (bp->pf_num) -#define BP_E1HVN(bp) (bp->pfid >> 1) -#define BP_VN(bp) (BP_E1HVN(bp)) /*remove when approved*/ -#define BP_L_ID(bp) (BP_E1HVN(bp) << 2) -#define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\ - BP_VN(bp) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1)) - - struct net_device *dev; - struct pci_dev *pdev; - - const struct iro *iro_arr; -#define IRO (bp->iro_arr) - - enum bnx2x_recovery_state recovery_state; - int is_leader; - struct msix_entry *msix_table; - - int tx_ring_size; - -/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ -#define ETH_OVREHEAD (ETH_HLEN + 8 + 8) -#define ETH_MIN_PACKET_SIZE 60 -#define ETH_MAX_PACKET_SIZE 1500 -#define ETH_MAX_JUMBO_PACKET_SIZE 9600 - - /* Max supported alignment is 256 (8 shift) */ -#define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \ - L1_CACHE_SHIFT : 8) - /* FW use 2 Cache lines Alignment for start packet and size */ -#define BNX2X_FW_RX_ALIGN (2 << BNX2X_RX_ALIGN_SHIFT) -#define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5) - - struct host_sp_status_block *def_status_blk; -#define DEF_SB_IGU_ID 16 -#define DEF_SB_ID HC_SP_SB_ID - __le16 def_idx; - __le16 def_att_idx; - u32 attn_state; - struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS]; - - /* slow path ring */ - struct eth_spe *spq; - dma_addr_t spq_mapping; - u16 spq_prod_idx; - struct eth_spe *spq_prod_bd; - struct eth_spe *spq_last_bd; - __le16 *dsb_sp_prod; - atomic_t cq_spq_left; /* ETH_XXX ramrods credit */ - /* used to synchronize spq accesses */ - spinlock_t spq_lock; - - /* event queue */ - union event_ring_elem *eq_ring; - dma_addr_t eq_mapping; - u16 eq_prod; - u16 eq_cons; - __le16 *eq_cons_sb; - atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */ - - - - /* Counter for marking that there is a STAT_QUERY ramrod pending */ - u16 stats_pending; - /* Counter for completed statistics ramrods */ - u16 stats_comp; - - /* End of fields used in the performance code paths */ - - int panic; - int msg_enable; - - u32 flags; -#define PCIX_FLAG (1 << 0) -#define PCI_32BIT_FLAG (1 << 1) -#define ONE_PORT_FLAG (1 << 2) -#define NO_WOL_FLAG (1 << 3) -#define USING_DAC_FLAG (1 << 4) -#define USING_MSIX_FLAG (1 << 5) -#define USING_MSI_FLAG (1 << 6) -#define DISABLE_MSI_FLAG (1 << 7) -#define TPA_ENABLE_FLAG (1 << 8) -#define NO_MCP_FLAG (1 << 9) - -#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG) -#define MF_FUNC_DIS (1 << 11) -#define OWN_CNIC_IRQ (1 << 12) -#define NO_ISCSI_OOO_FLAG (1 << 13) -#define NO_ISCSI_FLAG (1 << 14) -#define NO_FCOE_FLAG (1 << 15) - -#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG) -#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG) -#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG) - - int pm_cap; - int mrrs; - - struct delayed_work sp_task; - struct delayed_work sp_rtnl_task; - - struct delayed_work period_task; - struct timer_list timer; - int current_interval; - - u16 fw_seq; - u16 fw_drv_pulse_wr_seq; - u32 func_stx; - - struct link_params link_params; - struct link_vars link_vars; - u32 link_cnt; - struct bnx2x_link_report_data last_reported_link; - - struct mdio_if_info mdio; - - struct bnx2x_common common; - struct bnx2x_port port; - - struct cmng_struct_per_port cmng; - u32 vn_weight_sum; - u32 mf_config[E1HVN_MAX]; - u32 mf2_config[E2_FUNC_MAX]; - u32 path_has_ovlan; /* E3 */ - u16 mf_ov; - u8 mf_mode; -#define IS_MF(bp) (bp->mf_mode != 0) -#define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI) -#define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD) - - u8 wol; - - int rx_ring_size; - - u16 tx_quick_cons_trip_int; - u16 tx_quick_cons_trip; - u16 tx_ticks_int; - u16 tx_ticks; - - u16 rx_quick_cons_trip_int; - u16 rx_quick_cons_trip; - u16 rx_ticks_int; - u16 rx_ticks; -/* Maximal coalescing timeout in us */ -#define BNX2X_MAX_COALESCE_TOUT (0xf0*12) - - u32 lin_cnt; - - u16 state; -#define BNX2X_STATE_CLOSED 0 -#define BNX2X_STATE_OPENING_WAIT4_LOAD 0x1000 -#define BNX2X_STATE_OPENING_WAIT4_PORT 0x2000 -#define BNX2X_STATE_OPEN 0x3000 -#define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000 -#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000 - -#define BNX2X_STATE_DIAG 0xe000 -#define BNX2X_STATE_ERROR 0xf000 - - int multi_mode; -#define BNX2X_MAX_PRIORITY 8 -#define BNX2X_MAX_ENTRIES_PER_PRI 16 -#define BNX2X_MAX_COS 3 -#define BNX2X_MAX_TX_COS 2 - int num_queues; - int disable_tpa; - - u32 rx_mode; -#define BNX2X_RX_MODE_NONE 0 -#define BNX2X_RX_MODE_NORMAL 1 -#define BNX2X_RX_MODE_ALLMULTI 2 -#define BNX2X_RX_MODE_PROMISC 3 -#define BNX2X_MAX_MULTICAST 64 - - u8 igu_dsb_id; - u8 igu_base_sb; - u8 igu_sb_cnt; - dma_addr_t def_status_blk_mapping; - - struct bnx2x_slowpath *slowpath; - dma_addr_t slowpath_mapping; - - /* Total number of FW statistics requests */ - u8 fw_stats_num; - - /* - * This is a memory buffer that will contain both statistics - * ramrod request and data. - */ - void *fw_stats; - dma_addr_t fw_stats_mapping; - - /* - * FW statistics request shortcut (points at the - * beginning of fw_stats buffer). - */ - struct bnx2x_fw_stats_req *fw_stats_req; - dma_addr_t fw_stats_req_mapping; - int fw_stats_req_sz; - - /* - * FW statistics data shortcut (points at the begining of - * fw_stats buffer + fw_stats_req_sz). - */ - struct bnx2x_fw_stats_data *fw_stats_data; - dma_addr_t fw_stats_data_mapping; - int fw_stats_data_sz; - - struct hw_context context; - - struct bnx2x_ilt *ilt; -#define BP_ILT(bp) ((bp)->ilt) -#define ILT_MAX_LINES 256 -/* - * Maximum supported number of RSS queues: number of IGU SBs minus one that goes - * to CNIC. - */ -#define BNX2X_MAX_RSS_COUNT(bp) ((bp)->igu_sb_cnt - CNIC_PRESENT) - -/* - * Maximum CID count that might be required by the bnx2x: - * Max Tss * Max_Tx_Multi_Cos + CNIC L2 Clients (FCoE and iSCSI related) - */ -#define BNX2X_L2_CID_COUNT(bp) (MAX_TXQS_PER_COS * BNX2X_MULTI_TX_COS +\ - NON_ETH_CONTEXT_USE + CNIC_PRESENT) -#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\ - ILT_PAGE_CIDS)) -#define BNX2X_DB_SIZE(bp) (BNX2X_L2_CID_COUNT(bp) * (1 << BNX2X_DB_SHIFT)) - - int qm_cid_count; - - int dropless_fc; - -#ifdef BCM_CNIC - u32 cnic_flags; -#define BNX2X_CNIC_FLAG_MAC_SET 1 - void *t2; - dma_addr_t t2_mapping; - struct cnic_ops __rcu *cnic_ops; - void *cnic_data; - u32 cnic_tag; - struct cnic_eth_dev cnic_eth_dev; - union host_hc_status_block cnic_sb; - dma_addr_t cnic_sb_mapping; - struct eth_spe *cnic_kwq; - struct eth_spe *cnic_kwq_prod; - struct eth_spe *cnic_kwq_cons; - struct eth_spe *cnic_kwq_last; - u16 cnic_kwq_pending; - u16 cnic_spq_pending; - u8 fip_mac[ETH_ALEN]; - struct mutex cnic_mutex; - struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj; - - /* Start index of the "special" (CNIC related) L2 cleints */ - u8 cnic_base_cl_id; -#endif - - int dmae_ready; - /* used to synchronize dmae accesses */ - spinlock_t dmae_lock; - - /* used to protect the FW mail box */ - struct mutex fw_mb_mutex; - - /* used to synchronize stats collecting */ - int stats_state; - - /* used for synchronization of concurrent threads statistics handling */ - spinlock_t stats_lock; - - /* used by dmae command loader */ - struct dmae_command stats_dmae; - int executer_idx; - - u16 stats_counter; - struct bnx2x_eth_stats eth_stats; - - struct z_stream_s *strm; - void *gunzip_buf; - dma_addr_t gunzip_mapping; - int gunzip_outlen; -#define FW_BUF_SIZE 0x8000 -#define GUNZIP_BUF(bp) (bp->gunzip_buf) -#define GUNZIP_PHYS(bp) (bp->gunzip_mapping) -#define GUNZIP_OUTLEN(bp) (bp->gunzip_outlen) - - struct raw_op *init_ops; - /* Init blocks offsets inside init_ops */ - u16 *init_ops_offsets; - /* Data blob - has 32 bit granularity */ - u32 *init_data; - u32 init_mode_flags; -#define INIT_MODE_FLAGS(bp) (bp->init_mode_flags) - /* Zipped PRAM blobs - raw data */ - const u8 *tsem_int_table_data; - const u8 *tsem_pram_data; - const u8 *usem_int_table_data; - const u8 *usem_pram_data; - const u8 *xsem_int_table_data; - const u8 *xsem_pram_data; - const u8 *csem_int_table_data; - const u8 *csem_pram_data; -#define INIT_OPS(bp) (bp->init_ops) -#define INIT_OPS_OFFSETS(bp) (bp->init_ops_offsets) -#define INIT_DATA(bp) (bp->init_data) -#define INIT_TSEM_INT_TABLE_DATA(bp) (bp->tsem_int_table_data) -#define INIT_TSEM_PRAM_DATA(bp) (bp->tsem_pram_data) -#define INIT_USEM_INT_TABLE_DATA(bp) (bp->usem_int_table_data) -#define INIT_USEM_PRAM_DATA(bp) (bp->usem_pram_data) -#define INIT_XSEM_INT_TABLE_DATA(bp) (bp->xsem_int_table_data) -#define INIT_XSEM_PRAM_DATA(bp) (bp->xsem_pram_data) -#define INIT_CSEM_INT_TABLE_DATA(bp) (bp->csem_int_table_data) -#define INIT_CSEM_PRAM_DATA(bp) (bp->csem_pram_data) - -#define PHY_FW_VER_LEN 20 - char fw_ver[32]; - const struct firmware *firmware; - - /* DCB support on/off */ - u16 dcb_state; -#define BNX2X_DCB_STATE_OFF 0 -#define BNX2X_DCB_STATE_ON 1 - - /* DCBX engine mode */ - int dcbx_enabled; -#define BNX2X_DCBX_ENABLED_OFF 0 -#define BNX2X_DCBX_ENABLED_ON_NEG_OFF 1 -#define BNX2X_DCBX_ENABLED_ON_NEG_ON 2 -#define BNX2X_DCBX_ENABLED_INVALID (-1) - - bool dcbx_mode_uset; - - struct bnx2x_config_dcbx_params dcbx_config_params; - struct bnx2x_dcbx_port_params dcbx_port_params; - int dcb_version; - - /* CAM credit pools */ - struct bnx2x_credit_pool_obj macs_pool; - - /* RX_MODE object */ - struct bnx2x_rx_mode_obj rx_mode_obj; - - /* MCAST object */ - struct bnx2x_mcast_obj mcast_obj; - - /* RSS configuration object */ - struct bnx2x_rss_config_obj rss_conf_obj; - - /* Function State controlling object */ - struct bnx2x_func_sp_obj func_obj; - - unsigned long sp_state; - - /* operation indication for the sp_rtnl task */ - unsigned long sp_rtnl_state; - - /* DCBX Negotation results */ - struct dcbx_features dcbx_local_feat; - u32 dcbx_error; - -#ifdef BCM_DCBNL - struct dcbx_features dcbx_remote_feat; - u32 dcbx_remote_flags; -#endif - u32 pending_max; - - /* multiple tx classes of service */ - u8 max_cos; - - /* priority to cos mapping */ - u8 prio_to_cos[8]; -}; - -/* Tx queues may be less or equal to Rx queues */ -extern int num_queues; -#define BNX2X_NUM_QUEUES(bp) (bp->num_queues) -#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE) -#define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp) - -#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) - -#define BNX2X_MAX_QUEUES(bp) BNX2X_MAX_RSS_COUNT(bp) -/* #define is_eth_multi(bp) (BNX2X_NUM_ETH_QUEUES(bp) > 1) */ - -#define RSS_IPV4_CAP_MASK \ - TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY - -#define RSS_IPV4_TCP_CAP_MASK \ - TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY - -#define RSS_IPV6_CAP_MASK \ - TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY - -#define RSS_IPV6_TCP_CAP_MASK \ - TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY - -/* func init flags */ -#define FUNC_FLG_RSS 0x0001 -#define FUNC_FLG_STATS 0x0002 -/* removed FUNC_FLG_UNMATCHED 0x0004 */ -#define FUNC_FLG_TPA 0x0008 -#define FUNC_FLG_SPQ 0x0010 -#define FUNC_FLG_LEADING 0x0020 /* PF only */ - - -struct bnx2x_func_init_params { - /* dma */ - dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */ - dma_addr_t spq_map; /* valid iff FUNC_FLG_SPQ */ - - u16 func_flgs; - u16 func_id; /* abs fid */ - u16 pf_id; - u16 spq_prod; /* valid iff FUNC_FLG_SPQ */ -}; - -#define for_each_eth_queue(bp, var) \ - for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++) - -#define for_each_nondefault_eth_queue(bp, var) \ - for ((var) = 1; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++) - -#define for_each_queue(bp, var) \ - for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ - if (skip_queue(bp, var)) \ - continue; \ - else - -/* Skip forwarding FP */ -#define for_each_rx_queue(bp, var) \ - for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ - if (skip_rx_queue(bp, var)) \ - continue; \ - else - -/* Skip OOO FP */ -#define for_each_tx_queue(bp, var) \ - for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ - if (skip_tx_queue(bp, var)) \ - continue; \ - else - -#define for_each_nondefault_queue(bp, var) \ - for ((var) = 1; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ - if (skip_queue(bp, var)) \ - continue; \ - else - -#define for_each_cos_in_tx_queue(fp, var) \ - for ((var) = 0; (var) < (fp)->max_cos; (var)++) - -/* skip rx queue - * if FCOE l2 support is disabled and this is the fcoe L2 queue - */ -#define skip_rx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) - -/* skip tx queue - * if FCOE l2 support is disabled and this is the fcoe L2 queue - */ -#define skip_tx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) - -#define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) - - - - -/** - * bnx2x_set_mac_one - configure a single MAC address - * - * @bp: driver handle - * @mac: MAC to configure - * @obj: MAC object handle - * @set: if 'true' add a new MAC, otherwise - delete - * @mac_type: the type of the MAC to configure (e.g. ETH, UC list) - * @ramrod_flags: RAMROD_XXX flags (e.g. RAMROD_CONT, RAMROD_COMP_WAIT) - * - * Configures one MAC according to provided parameters or continues the - * execution of previously scheduled commands if RAMROD_CONT is set in - * ramrod_flags. - * - * Returns zero if operation has successfully completed, a positive value if the - * operation has been successfully scheduled and a negative - if a requested - * operations has failed. - */ -int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, - struct bnx2x_vlan_mac_obj *obj, bool set, - int mac_type, unsigned long *ramrod_flags); -/** - * Deletes all MACs configured for the specific MAC object. - * - * @param bp Function driver instance - * @param mac_obj MAC object to cleanup - * - * @return zero if all MACs were cleaned - */ - -/** - * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object - * - * @bp: driver handle - * @mac_obj: MAC object handle - * @mac_type: type of the MACs to clear (BNX2X_XXX_MAC) - * @wait_for_comp: if 'true' block until completion - * - * Deletes all MACs of the specific type (e.g. ETH, UC list). - * - * Returns zero if operation has successfully completed, a positive value if the - * operation has been successfully scheduled and a negative - if a requested - * operations has failed. - */ -int bnx2x_del_all_macs(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *mac_obj, - int mac_type, bool wait_for_comp); - -/* Init Function API */ -void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p); -int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port); -int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); -int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode); -int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); -void bnx2x_read_mf_cfg(struct bnx2x *bp); - - -/* dmae */ -void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); -void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, - u32 len32); -void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx); -u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type); -u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode); -u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, - bool with_comp, u8 comp_type); - - -void bnx2x_calc_fc_adv(struct bnx2x *bp); -int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, - u32 data_hi, u32 data_lo, int cmd_type); -void bnx2x_update_coalesce(struct bnx2x *bp); -int bnx2x_get_cur_phy_idx(struct bnx2x *bp); - -static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, - int wait) -{ - u32 val; - - do { - val = REG_RD(bp, reg); - if (val == expected) - break; - ms -= wait; - msleep(wait); - - } while (ms > 0); - - return val; -} - -#define BNX2X_ILT_ZALLOC(x, y, size) \ - do { \ - x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ - if (x) \ - memset(x, 0, size); \ - } while (0) - -#define BNX2X_ILT_FREE(x, y, size) \ - do { \ - if (x) { \ - dma_free_coherent(&bp->pdev->dev, size, x, y); \ - x = NULL; \ - y = 0; \ - } \ - } while (0) - -#define ILOG2(x) (ilog2((x))) - -#define ILT_NUM_PAGE_ENTRIES (3072) -/* In 57710/11 we use whole table since we have 8 func - * In 57712 we have only 4 func, but use same size per func, then only half of - * the table in use - */ -#define ILT_PER_FUNC (ILT_NUM_PAGE_ENTRIES/8) - -#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC) -/* - * the phys address is shifted right 12 bits and has an added - * 1=valid bit added to the 53rd bit - * then since this is a wide register(TM) - * we split it into two 32 bit writes - */ -#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF)) -#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44))) - -/* load/unload mode */ -#define LOAD_NORMAL 0 -#define LOAD_OPEN 1 -#define LOAD_DIAG 2 -#define UNLOAD_NORMAL 0 -#define UNLOAD_CLOSE 1 -#define UNLOAD_RECOVERY 2 - - -/* DMAE command defines */ -#define DMAE_TIMEOUT -1 -#define DMAE_PCI_ERROR -2 /* E2 and onward */ -#define DMAE_NOT_RDY -3 -#define DMAE_PCI_ERR_FLAG 0x80000000 - -#define DMAE_SRC_PCI 0 -#define DMAE_SRC_GRC 1 - -#define DMAE_DST_NONE 0 -#define DMAE_DST_PCI 1 -#define DMAE_DST_GRC 2 - -#define DMAE_COMP_PCI 0 -#define DMAE_COMP_GRC 1 - -/* E2 and onward - PCI error handling in the completion */ - -#define DMAE_COMP_REGULAR 0 -#define DMAE_COM_SET_ERR 1 - -#define DMAE_CMD_SRC_PCI (DMAE_SRC_PCI << \ - DMAE_COMMAND_SRC_SHIFT) -#define DMAE_CMD_SRC_GRC (DMAE_SRC_GRC << \ - DMAE_COMMAND_SRC_SHIFT) - -#define DMAE_CMD_DST_PCI (DMAE_DST_PCI << \ - DMAE_COMMAND_DST_SHIFT) -#define DMAE_CMD_DST_GRC (DMAE_DST_GRC << \ - DMAE_COMMAND_DST_SHIFT) - -#define DMAE_CMD_C_DST_PCI (DMAE_COMP_PCI << \ - DMAE_COMMAND_C_DST_SHIFT) -#define DMAE_CMD_C_DST_GRC (DMAE_COMP_GRC << \ - DMAE_COMMAND_C_DST_SHIFT) - -#define DMAE_CMD_C_ENABLE DMAE_COMMAND_C_TYPE_ENABLE - -#define DMAE_CMD_ENDIANITY_NO_SWAP (0 << DMAE_COMMAND_ENDIANITY_SHIFT) -#define DMAE_CMD_ENDIANITY_B_SWAP (1 << DMAE_COMMAND_ENDIANITY_SHIFT) -#define DMAE_CMD_ENDIANITY_DW_SWAP (2 << DMAE_COMMAND_ENDIANITY_SHIFT) -#define DMAE_CMD_ENDIANITY_B_DW_SWAP (3 << DMAE_COMMAND_ENDIANITY_SHIFT) - -#define DMAE_CMD_PORT_0 0 -#define DMAE_CMD_PORT_1 DMAE_COMMAND_PORT - -#define DMAE_CMD_SRC_RESET DMAE_COMMAND_SRC_RESET -#define DMAE_CMD_DST_RESET DMAE_COMMAND_DST_RESET -#define DMAE_CMD_E1HVN_SHIFT DMAE_COMMAND_E1HVN_SHIFT - -#define DMAE_SRC_PF 0 -#define DMAE_SRC_VF 1 - -#define DMAE_DST_PF 0 -#define DMAE_DST_VF 1 - -#define DMAE_C_SRC 0 -#define DMAE_C_DST 1 - -#define DMAE_LEN32_RD_MAX 0x80 -#define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000) - -#define DMAE_COMP_VAL 0x60d0d0ae /* E2 and on - upper bit - indicates eror */ - -#define MAX_DMAE_C_PER_PORT 8 -#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ - BP_E1HVN(bp)) -#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ - E1HVN_MAX) - -/* PCIE link and speed */ -#define PCICFG_LINK_WIDTH 0x1f00000 -#define PCICFG_LINK_WIDTH_SHIFT 20 -#define PCICFG_LINK_SPEED 0xf0000 -#define PCICFG_LINK_SPEED_SHIFT 16 - - -#define BNX2X_NUM_TESTS 7 - -#define BNX2X_PHY_LOOPBACK 0 -#define BNX2X_MAC_LOOPBACK 1 -#define BNX2X_PHY_LOOPBACK_FAILED 1 -#define BNX2X_MAC_LOOPBACK_FAILED 2 -#define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \ - BNX2X_PHY_LOOPBACK_FAILED) - - -#define STROM_ASSERT_ARRAY_SIZE 50 - - -/* must be used on a CID before placing it on a HW ring */ -#define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ - (BP_E1HVN(bp) << BNX2X_SWCID_SHIFT) | \ - (x)) - -#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) -#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1) - - -#define BNX2X_BTR 4 -#define MAX_SPQ_PENDING 8 - -/* CMNG constants, as derived from system spec calculations */ -/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */ -#define DEF_MIN_RATE 100 -/* resolution of the rate shaping timer - 400 usec */ -#define RS_PERIODIC_TIMEOUT_USEC 400 -/* number of bytes in single QM arbitration cycle - - * coefficient for calculating the fairness timer */ -#define QM_ARB_BYTES 160000 -/* resolution of Min algorithm 1:100 */ -#define MIN_RES 100 -/* how many bytes above threshold for the minimal credit of Min algorithm*/ -#define MIN_ABOVE_THRESH 32768 -/* Fairness algorithm integration time coefficient - - * for calculating the actual Tfair */ -#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES) -/* Memory of fairness algorithm . 2 cycles */ -#define FAIR_MEM 2 - - -#define ATTN_NIG_FOR_FUNC (1L << 8) -#define ATTN_SW_TIMER_4_FUNC (1L << 9) -#define GPIO_2_FUNC (1L << 10) -#define GPIO_3_FUNC (1L << 11) -#define GPIO_4_FUNC (1L << 12) -#define ATTN_GENERAL_ATTN_1 (1L << 13) -#define ATTN_GENERAL_ATTN_2 (1L << 14) -#define ATTN_GENERAL_ATTN_3 (1L << 15) -#define ATTN_GENERAL_ATTN_4 (1L << 13) -#define ATTN_GENERAL_ATTN_5 (1L << 14) -#define ATTN_GENERAL_ATTN_6 (1L << 15) - -#define ATTN_HARD_WIRED_MASK 0xff00 -#define ATTENTION_ID 4 - - -/* stuff added to make the code fit 80Col */ - -#define BNX2X_PMF_LINK_ASSERT \ - GENERAL_ATTEN_OFFSET(LINK_SYNC_ATTENTION_BIT_FUNC_0 + BP_FUNC(bp)) - -#define BNX2X_MC_ASSERT_BITS \ - (GENERAL_ATTEN_OFFSET(TSTORM_FATAL_ASSERT_ATTENTION_BIT) | \ - GENERAL_ATTEN_OFFSET(USTORM_FATAL_ASSERT_ATTENTION_BIT) | \ - GENERAL_ATTEN_OFFSET(CSTORM_FATAL_ASSERT_ATTENTION_BIT) | \ - GENERAL_ATTEN_OFFSET(XSTORM_FATAL_ASSERT_ATTENTION_BIT)) - -#define BNX2X_MCP_ASSERT \ - GENERAL_ATTEN_OFFSET(MCP_FATAL_ASSERT_ATTENTION_BIT) - -#define BNX2X_GRC_TIMEOUT GENERAL_ATTEN_OFFSET(LATCHED_ATTN_TIMEOUT_GRC) -#define BNX2X_GRC_RSV (GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCR) | \ - GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCT) | \ - GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCN) | \ - GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCU) | \ - GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \ - GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC)) - -#define HW_INTERRUT_ASSERT_SET_0 \ - (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \ - AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \ - AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \ - AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT) -#define HW_PRTY_ASSERT_SET_0 (AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \ - AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \ - AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR | \ - AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR |\ - AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\ - AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\ - AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR) -#define HW_INTERRUT_ASSERT_SET_1 \ - (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \ - AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \ - AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \ - AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT | \ - AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT | \ - AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT | \ - AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT | \ - AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT | \ - AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT | \ - AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT | \ - AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT) -#define HW_PRTY_ASSERT_SET_1 (AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR |\ - AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR | \ - AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR |\ - AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR | \ - AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR |\ - AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR | \ - AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR |\ - AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR |\ - AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR |\ - AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR | \ - AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR | \ - AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR |\ - AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR | \ - AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \ - AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\ - AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR) -#define HW_INTERRUT_ASSERT_SET_2 \ - (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \ - AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \ - AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \ - AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT |\ - AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT) -#define HW_PRTY_ASSERT_SET_2 (AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR | \ - AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR | \ - AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |\ - AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR | \ - AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR | \ - AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR |\ - AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \ - AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR) - -#define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \ - AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ - AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \ - AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) - -#define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \ - AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR) - -#define RSS_FLAGS(bp) \ - (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \ - TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \ - TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \ - TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \ - (bp->multi_mode << \ - TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT)) -#define MULTI_MASK 0x7f - - -#define DEF_USB_FUNC_OFF offsetof(struct cstorm_def_status_block_u, func) -#define DEF_CSB_FUNC_OFF offsetof(struct cstorm_def_status_block_c, func) -#define DEF_XSB_FUNC_OFF offsetof(struct xstorm_def_status_block, func) -#define DEF_TSB_FUNC_OFF offsetof(struct tstorm_def_status_block, func) - -#define DEF_USB_IGU_INDEX_OFF \ - offsetof(struct cstorm_def_status_block_u, igu_index) -#define DEF_CSB_IGU_INDEX_OFF \ - offsetof(struct cstorm_def_status_block_c, igu_index) -#define DEF_XSB_IGU_INDEX_OFF \ - offsetof(struct xstorm_def_status_block, igu_index) -#define DEF_TSB_IGU_INDEX_OFF \ - offsetof(struct tstorm_def_status_block, igu_index) - -#define DEF_USB_SEGMENT_OFF \ - offsetof(struct cstorm_def_status_block_u, segment) -#define DEF_CSB_SEGMENT_OFF \ - offsetof(struct cstorm_def_status_block_c, segment) -#define DEF_XSB_SEGMENT_OFF \ - offsetof(struct xstorm_def_status_block, segment) -#define DEF_TSB_SEGMENT_OFF \ - offsetof(struct tstorm_def_status_block, segment) - -#define BNX2X_SP_DSB_INDEX \ - (&bp->def_status_blk->sp_sb.\ - index_values[HC_SP_INDEX_ETH_DEF_CONS]) - -#define SET_FLAG(value, mask, flag) \ - do {\ - (value) &= ~(mask);\ - (value) |= ((flag) << (mask##_SHIFT));\ - } while (0) - -#define GET_FLAG(value, mask) \ - (((value) & (mask)) >> (mask##_SHIFT)) - -#define GET_FIELD(value, fname) \ - (((value) & (fname##_MASK)) >> (fname##_SHIFT)) - -#define CAM_IS_INVALID(x) \ - (GET_FLAG(x.flags, \ - MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \ - (T_ETH_MAC_COMMAND_INVALIDATE)) - -/* Number of u32 elements in MC hash array */ -#define MC_HASH_SIZE 8 -#define MC_HASH_OFFSET(bp, i) (BAR_TSTRORM_INTMEM + \ - TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(BP_FUNC(bp)) + i*4) - - -#ifndef PXP2_REG_PXP2_INT_STS -#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0 -#endif - -#ifndef ETH_MAX_RX_CLIENTS_E2 -#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H -#endif - -#define BNX2X_VPD_LEN 128 -#define VENDOR_ID_LEN 4 - -/* Congestion management fairness mode */ -#define CMNG_FNS_NONE 0 -#define CMNG_FNS_MINMAX 1 - -#define HC_SEG_ACCESS_DEF 0 /*Driver decision 0-3*/ -#define HC_SEG_ACCESS_ATTN 4 -#define HC_SEG_ACCESS_NORM 0 /*Driver decision 0-1*/ - -static const u32 dmae_reg_go_c[] = { - DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3, - DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7, - DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11, - DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15 -}; - -void bnx2x_set_ethtool_ops(struct net_device *netdev); -void bnx2x_notify_link_changed(struct bnx2x *bp); -#endif /* bnx2x.h */ diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c deleted file mode 100644 index d724a18..0000000 --- a/drivers/net/bnx2x/bnx2x_cmn.c +++ /dev/null @@ -1,3571 +0,0 @@ -/* bnx2x_cmn.c: Broadcom Everest network driver. - * - * Copyright (c) 2007-2011 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - * Maintained by: Eilon Greenstein - * Written by: Eliezer Tamir - * Based on code from Michael Chan's bnx2 driver - * UDP CSUM errata workaround by Arik Gendelman - * Slowpath and fastpath rework by Vladislav Zolotarov - * Statistics and Link management by Yitchak Gertner - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include "bnx2x_cmn.h" -#include "bnx2x_init.h" -#include "bnx2x_sp.h" - - - -/** - * bnx2x_bz_fp - zero content of the fastpath structure. - * - * @bp: driver handle - * @index: fastpath index to be zeroed - * - * Makes sure the contents of the bp->fp[index].napi is kept - * intact. - */ -static inline void bnx2x_bz_fp(struct bnx2x *bp, int index) -{ - struct bnx2x_fastpath *fp = &bp->fp[index]; - struct napi_struct orig_napi = fp->napi; - /* bzero bnx2x_fastpath contents */ - memset(fp, 0, sizeof(*fp)); - - /* Restore the NAPI object as it has been already initialized */ - fp->napi = orig_napi; - - fp->bp = bp; - fp->index = index; - if (IS_ETH_FP(fp)) - fp->max_cos = bp->max_cos; - else - /* Special queues support only one CoS */ - fp->max_cos = 1; - - /* - * set the tpa flag for each queue. The tpa flag determines the queue - * minimal size so it must be set prior to queue memory allocation - */ - fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0); - -#ifdef BCM_CNIC - /* We don't want TPA on FCoE, FWD and OOO L2 rings */ - bnx2x_fcoe(bp, disable_tpa) = 1; -#endif -} - -/** - * bnx2x_move_fp - move content of the fastpath structure. - * - * @bp: driver handle - * @from: source FP index - * @to: destination FP index - * - * Makes sure the contents of the bp->fp[to].napi is kept - * intact. - */ -static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) -{ - struct bnx2x_fastpath *from_fp = &bp->fp[from]; - struct bnx2x_fastpath *to_fp = &bp->fp[to]; - struct napi_struct orig_napi = to_fp->napi; - /* Move bnx2x_fastpath contents */ - memcpy(to_fp, from_fp, sizeof(*to_fp)); - to_fp->index = to; - - /* Restore the NAPI object as it has been already initialized */ - to_fp->napi = orig_napi; -} - -int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ - -/* free skb in the packet ring at pos idx - * return idx of last bd freed - */ -static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, - u16 idx) -{ - struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx]; - struct eth_tx_start_bd *tx_start_bd; - struct eth_tx_bd *tx_data_bd; - struct sk_buff *skb = tx_buf->skb; - u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; - int nbd; - - /* prefetch skb end pointer to speedup dev_kfree_skb() */ - prefetch(&skb->end); - - DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", - txdata->txq_index, idx, tx_buf, skb); - - /* unmap first bd */ - DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); - tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; - dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), - BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE); - - - nbd = le16_to_cpu(tx_start_bd->nbd) - 1; -#ifdef BNX2X_STOP_ON_ERROR - if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) { - BNX2X_ERR("BAD nbd!\n"); - bnx2x_panic(); - } -#endif - new_cons = nbd + tx_buf->first_bd; - - /* Get the next bd */ - bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); - - /* Skip a parse bd... */ - --nbd; - bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); - - /* ...and the TSO split header bd since they have no mapping */ - if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { - --nbd; - bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); - } - - /* now free frags */ - while (nbd > 0) { - - DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx); - tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; - dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), - BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); - if (--nbd) - bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); - } - - /* release skb */ - WARN_ON(!skb); - dev_kfree_skb_any(skb); - tx_buf->first_bd = 0; - tx_buf->skb = NULL; - - return new_cons; -} - -int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) -{ - struct netdev_queue *txq; - u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons; - -#ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) - return -1; -#endif - - txq = netdev_get_tx_queue(bp->dev, txdata->txq_index); - hw_cons = le16_to_cpu(*txdata->tx_cons_sb); - sw_cons = txdata->tx_pkt_cons; - - while (sw_cons != hw_cons) { - u16 pkt_cons; - - pkt_cons = TX_BD(sw_cons); - - DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u " - " pkt_cons %u\n", - txdata->txq_index, hw_cons, sw_cons, pkt_cons); - - bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons); - sw_cons++; - } - - txdata->tx_pkt_cons = sw_cons; - txdata->tx_bd_cons = bd_cons; - - /* Need to make the tx_bd_cons update visible to start_xmit() - * before checking for netif_tx_queue_stopped(). Without the - * memory barrier, there is a small possibility that - * start_xmit() will miss it and cause the queue to be stopped - * forever. - * On the other hand we need an rmb() here to ensure the proper - * ordering of bit testing in the following - * netif_tx_queue_stopped(txq) call. - */ - smp_mb(); - - if (unlikely(netif_tx_queue_stopped(txq))) { - /* Taking tx_lock() is needed to prevent reenabling the queue - * while it's empty. This could have happen if rx_action() gets - * suspended in bnx2x_tx_int() after the condition before - * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()): - * - * stops the queue->sees fresh tx_bd_cons->releases the queue-> - * sends some packets consuming the whole queue again-> - * stops the queue - */ - - __netif_tx_lock(txq, smp_processor_id()); - - if ((netif_tx_queue_stopped(txq)) && - (bp->state == BNX2X_STATE_OPEN) && - (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)) - netif_tx_wake_queue(txq); - - __netif_tx_unlock(txq); - } - return 0; -} - -static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp, - u16 idx) -{ - u16 last_max = fp->last_max_sge; - - if (SUB_S16(idx, last_max) > 0) - fp->last_max_sge = idx; -} - -static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, - struct eth_fast_path_rx_cqe *fp_cqe) -{ - struct bnx2x *bp = fp->bp; - u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) - - le16_to_cpu(fp_cqe->len_on_bd)) >> - SGE_PAGE_SHIFT; - u16 last_max, last_elem, first_elem; - u16 delta = 0; - u16 i; - - if (!sge_len) - return; - - /* First mark all used pages */ - for (i = 0; i < sge_len; i++) - BIT_VEC64_CLEAR_BIT(fp->sge_mask, - RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i]))); - - DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n", - sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1])); - - /* Here we assume that the last SGE index is the biggest */ - prefetch((void *)(fp->sge_mask)); - bnx2x_update_last_max_sge(fp, - le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1])); - - last_max = RX_SGE(fp->last_max_sge); - last_elem = last_max >> BIT_VEC64_ELEM_SHIFT; - first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; - - /* If ring is not full */ - if (last_elem + 1 != first_elem) - last_elem++; - - /* Now update the prod */ - for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) { - if (likely(fp->sge_mask[i])) - break; - - fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; - delta += BIT_VEC64_ELEM_SZ; - } - - if (delta > 0) { - fp->rx_sge_prod += delta; - /* clear page-end entries */ - bnx2x_clear_sge_mask_next_elems(fp); - } - - DP(NETIF_MSG_RX_STATUS, - "fp->last_max_sge = %d fp->rx_sge_prod = %d\n", - fp->last_max_sge, fp->rx_sge_prod); -} - -static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, - struct sk_buff *skb, u16 cons, u16 prod, - struct eth_fast_path_rx_cqe *cqe) -{ - struct bnx2x *bp = fp->bp; - struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; - struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; - struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; - dma_addr_t mapping; - struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; - struct sw_rx_bd *first_buf = &tpa_info->first_buf; - - /* print error if current state != stop */ - if (tpa_info->tpa_state != BNX2X_TPA_STOP) - BNX2X_ERR("start of bin not in stop [%d]\n", queue); - - /* Try to map an empty skb from the aggregation info */ - mapping = dma_map_single(&bp->pdev->dev, - first_buf->skb->data, - fp->rx_buf_size, DMA_FROM_DEVICE); - /* - * ...if it fails - move the skb from the consumer to the producer - * and set the current aggregation state as ERROR to drop it - * when TPA_STOP arrives. - */ - - if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { - /* Move the BD from the consumer to the producer */ - bnx2x_reuse_rx_skb(fp, cons, prod); - tpa_info->tpa_state = BNX2X_TPA_ERROR; - return; - } - - /* move empty skb from pool to prod */ - prod_rx_buf->skb = first_buf->skb; - dma_unmap_addr_set(prod_rx_buf, mapping, mapping); - /* point prod_bd to new skb */ - prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); - prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); - - /* move partial skb from cons to pool (don't unmap yet) */ - *first_buf = *cons_rx_buf; - - /* mark bin state as START */ - tpa_info->parsing_flags = - le16_to_cpu(cqe->pars_flags.flags); - tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); - tpa_info->tpa_state = BNX2X_TPA_START; - tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd); - tpa_info->placement_offset = cqe->placement_offset; - -#ifdef BNX2X_STOP_ON_ERROR - fp->tpa_queue_used |= (1 << queue); -#ifdef _ASM_GENERIC_INT_L64_H - DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n", -#else - DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n", -#endif - fp->tpa_queue_used); -#endif -} - -/* Timestamp option length allowed for TPA aggregation: - * - * nop nop kind length echo val - */ -#define TPA_TSTAMP_OPT_LEN 12 -/** - * bnx2x_set_lro_mss - calculate the approximate value of the MSS - * - * @bp: driver handle - * @parsing_flags: parsing flags from the START CQE - * @len_on_bd: total length of the first packet for the - * aggregation. - * - * Approximate value of the MSS for this aggregation calculated using - * the first packet of it. - */ -static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, - u16 len_on_bd) -{ - /* - * TPA arrgregation won't have either IP options or TCP options - * other than timestamp or IPv6 extension headers. - */ - u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr); - - if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == - PRS_FLAG_OVERETH_IPV6) - hdrs_len += sizeof(struct ipv6hdr); - else /* IPv4 */ - hdrs_len += sizeof(struct iphdr); - - - /* Check if there was a TCP timestamp, if there is it's will - * always be 12 bytes length: nop nop kind length echo val. - * - * Otherwise FW would close the aggregation. - */ - if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG) - hdrs_len += TPA_TSTAMP_OPT_LEN; - - return len_on_bd - hdrs_len; -} - -static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, - u16 queue, struct sk_buff *skb, - struct eth_end_agg_rx_cqe *cqe, - u16 cqe_idx) -{ - struct sw_rx_page *rx_pg, old_rx_pg; - u32 i, frag_len, frag_size, pages; - int err; - int j; - struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; - u16 len_on_bd = tpa_info->len_on_bd; - - frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd; - pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; - - /* This is needed in order to enable forwarding support */ - if (frag_size) - skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, - tpa_info->parsing_flags, len_on_bd); - -#ifdef BNX2X_STOP_ON_ERROR - if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { - BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", - pages, cqe_idx); - BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len); - bnx2x_panic(); - return -EINVAL; - } -#endif - - /* Run through the SGL and compose the fragmented skb */ - for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { - u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j])); - - /* FW gives the indices of the SGE as if the ring is an array - (meaning that "next" element will consume 2 indices) */ - frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE)); - rx_pg = &fp->rx_page_ring[sge_idx]; - old_rx_pg = *rx_pg; - - /* If we fail to allocate a substitute page, we simply stop - where we are and drop the whole packet */ - err = bnx2x_alloc_rx_sge(bp, fp, sge_idx); - if (unlikely(err)) { - fp->eth_q_stats.rx_skb_alloc_failed++; - return err; - } - - /* Unmap the page as we r going to pass it to the stack */ - dma_unmap_page(&bp->pdev->dev, - dma_unmap_addr(&old_rx_pg, mapping), - SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); - - /* Add one frag and update the appropriate fields in the skb */ - skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); - - skb->data_len += frag_len; - skb->truesize += frag_len; - skb->len += frag_len; - - frag_size -= frag_len; - } - - return 0; -} - -static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, - u16 queue, struct eth_end_agg_rx_cqe *cqe, - u16 cqe_idx) -{ - struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; - struct sw_rx_bd *rx_buf = &tpa_info->first_buf; - u8 pad = tpa_info->placement_offset; - u16 len = tpa_info->len_on_bd; - struct sk_buff *skb = rx_buf->skb; - /* alloc new skb */ - struct sk_buff *new_skb; - u8 old_tpa_state = tpa_info->tpa_state; - - tpa_info->tpa_state = BNX2X_TPA_STOP; - - /* If we there was an error during the handling of the TPA_START - - * drop this aggregation. - */ - if (old_tpa_state == BNX2X_TPA_ERROR) - goto drop; - - /* Try to allocate the new skb */ - new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size); - - /* Unmap skb in the pool anyway, as we are going to change - pool entry status to BNX2X_TPA_STOP even if new skb allocation - fails. */ - dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), - fp->rx_buf_size, DMA_FROM_DEVICE); - - if (likely(new_skb)) { - prefetch(skb); - prefetch(((char *)(skb)) + L1_CACHE_BYTES); - -#ifdef BNX2X_STOP_ON_ERROR - if (pad + len > fp->rx_buf_size) { - BNX2X_ERR("skb_put is about to fail... " - "pad %d len %d rx_buf_size %d\n", - pad, len, fp->rx_buf_size); - bnx2x_panic(); - return; - } -#endif - - skb_reserve(skb, pad); - skb_put(skb, len); - - skb->protocol = eth_type_trans(skb, bp->dev); - skb->ip_summed = CHECKSUM_UNNECESSARY; - - if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) { - if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) - __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag); - napi_gro_receive(&fp->napi, skb); - } else { - DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages" - " - dropping packet!\n"); - dev_kfree_skb_any(skb); - } - - - /* put new skb in bin */ - rx_buf->skb = new_skb; - - return; - } - -drop: - /* drop the packet and keep the buffer in the bin */ - DP(NETIF_MSG_RX_STATUS, - "Failed to allocate or map a new skb - dropping packet!\n"); - fp->eth_q_stats.rx_skb_alloc_failed++; -} - -/* Set Toeplitz hash value in the skb using the value from the - * CQE (calculated by HW). - */ -static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe, - struct sk_buff *skb) -{ - /* Set Toeplitz hash from CQE */ - if ((bp->dev->features & NETIF_F_RXHASH) && - (cqe->fast_path_cqe.status_flags & - ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) - skb->rxhash = - le32_to_cpu(cqe->fast_path_cqe.rss_hash_result); -} - -int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) -{ - struct bnx2x *bp = fp->bp; - u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; - u16 hw_comp_cons, sw_comp_cons, sw_comp_prod; - int rx_pkt = 0; - -#ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) - return 0; -#endif - - /* CQ "next element" is of the size of the regular element, - that's why it's ok here */ - hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb); - if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) - hw_comp_cons++; - - bd_cons = fp->rx_bd_cons; - bd_prod = fp->rx_bd_prod; - bd_prod_fw = bd_prod; - sw_comp_cons = fp->rx_comp_cons; - sw_comp_prod = fp->rx_comp_prod; - - /* Memory barrier necessary as speculative reads of the rx - * buffer can be ahead of the index in the status block - */ - rmb(); - - DP(NETIF_MSG_RX_STATUS, - "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n", - fp->index, hw_comp_cons, sw_comp_cons); - - while (sw_comp_cons != hw_comp_cons) { - struct sw_rx_bd *rx_buf = NULL; - struct sk_buff *skb; - union eth_rx_cqe *cqe; - struct eth_fast_path_rx_cqe *cqe_fp; - u8 cqe_fp_flags; - enum eth_rx_cqe_type cqe_fp_type; - u16 len, pad; - -#ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) - return 0; -#endif - - comp_ring_cons = RCQ_BD(sw_comp_cons); - bd_prod = RX_BD(bd_prod); - bd_cons = RX_BD(bd_cons); - - /* Prefetch the page containing the BD descriptor - at producer's index. It will be needed when new skb is - allocated */ - prefetch((void *)(PAGE_ALIGN((unsigned long) - (&fp->rx_desc_ring[bd_prod])) - - PAGE_SIZE + 1)); - - cqe = &fp->rx_comp_ring[comp_ring_cons]; - cqe_fp = &cqe->fast_path_cqe; - cqe_fp_flags = cqe_fp->type_error_flags; - cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; - - DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x" - " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags), - cqe_fp_flags, cqe_fp->status_flags, - le32_to_cpu(cqe_fp->rss_hash_result), - le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len)); - - /* is this a slowpath msg? */ - if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) { - bnx2x_sp_event(fp, cqe); - goto next_cqe; - - /* this is an rx packet */ - } else { - rx_buf = &fp->rx_buf_ring[bd_cons]; - skb = rx_buf->skb; - prefetch(skb); - - if (!CQE_TYPE_FAST(cqe_fp_type)) { -#ifdef BNX2X_STOP_ON_ERROR - /* sanity check */ - if (fp->disable_tpa && - (CQE_TYPE_START(cqe_fp_type) || - CQE_TYPE_STOP(cqe_fp_type))) - BNX2X_ERR("START/STOP packet while " - "disable_tpa type %x\n", - CQE_TYPE(cqe_fp_type)); -#endif - - if (CQE_TYPE_START(cqe_fp_type)) { - u16 queue = cqe_fp->queue_index; - DP(NETIF_MSG_RX_STATUS, - "calling tpa_start on queue %d\n", - queue); - - bnx2x_tpa_start(fp, queue, skb, - bd_cons, bd_prod, - cqe_fp); - - /* Set Toeplitz hash for LRO skb */ - bnx2x_set_skb_rxhash(bp, cqe, skb); - - goto next_rx; - - } else { - u16 queue = - cqe->end_agg_cqe.queue_index; - DP(NETIF_MSG_RX_STATUS, - "calling tpa_stop on queue %d\n", - queue); - - bnx2x_tpa_stop(bp, fp, queue, - &cqe->end_agg_cqe, - comp_ring_cons); -#ifdef BNX2X_STOP_ON_ERROR - if (bp->panic) - return 0; -#endif - - bnx2x_update_sge_prod(fp, cqe_fp); - goto next_cqe; - } - } - /* non TPA */ - len = le16_to_cpu(cqe_fp->pkt_len); - pad = cqe_fp->placement_offset; - dma_sync_single_for_cpu(&bp->pdev->dev, - dma_unmap_addr(rx_buf, mapping), - pad + RX_COPY_THRESH, - DMA_FROM_DEVICE); - prefetch(((char *)(skb)) + L1_CACHE_BYTES); - - /* is this an error packet? */ - if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { - DP(NETIF_MSG_RX_ERR, - "ERROR flags %x rx packet %u\n", - cqe_fp_flags, sw_comp_cons); - fp->eth_q_stats.rx_err_discard_pkt++; - goto reuse_rx; - } - - /* Since we don't have a jumbo ring - * copy small packets if mtu > 1500 - */ - if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) && - (len <= RX_COPY_THRESH)) { - struct sk_buff *new_skb; - - new_skb = netdev_alloc_skb(bp->dev, len + pad); - if (new_skb == NULL) { - DP(NETIF_MSG_RX_ERR, - "ERROR packet dropped " - "because of alloc failure\n"); - fp->eth_q_stats.rx_skb_alloc_failed++; - goto reuse_rx; - } - - /* aligned copy */ - skb_copy_from_linear_data_offset(skb, pad, - new_skb->data + pad, len); - skb_reserve(new_skb, pad); - skb_put(new_skb, len); - - bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod); - - skb = new_skb; - - } else - if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) { - dma_unmap_single(&bp->pdev->dev, - dma_unmap_addr(rx_buf, mapping), - fp->rx_buf_size, - DMA_FROM_DEVICE); - skb_reserve(skb, pad); - skb_put(skb, len); - - } else { - DP(NETIF_MSG_RX_ERR, - "ERROR packet dropped because " - "of alloc failure\n"); - fp->eth_q_stats.rx_skb_alloc_failed++; -reuse_rx: - bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod); - goto next_rx; - } - - skb->protocol = eth_type_trans(skb, bp->dev); - - /* Set Toeplitz hash for a none-LRO skb */ - bnx2x_set_skb_rxhash(bp, cqe, skb); - - skb_checksum_none_assert(skb); - - if (bp->dev->features & NETIF_F_RXCSUM) { - - if (likely(BNX2X_RX_CSUM_OK(cqe))) - skb->ip_summed = CHECKSUM_UNNECESSARY; - else - fp->eth_q_stats.hw_csum_err++; - } - } - - skb_record_rx_queue(skb, fp->index); - - if (le16_to_cpu(cqe_fp->pars_flags.flags) & - PARSING_FLAGS_VLAN) - __vlan_hwaccel_put_tag(skb, - le16_to_cpu(cqe_fp->vlan_tag)); - napi_gro_receive(&fp->napi, skb); - - -next_rx: - rx_buf->skb = NULL; - - bd_cons = NEXT_RX_IDX(bd_cons); - bd_prod = NEXT_RX_IDX(bd_prod); - bd_prod_fw = NEXT_RX_IDX(bd_prod_fw); - rx_pkt++; -next_cqe: - sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod); - sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons); - - if (rx_pkt == budget) - break; - } /* while */ - - fp->rx_bd_cons = bd_cons; - fp->rx_bd_prod = bd_prod_fw; - fp->rx_comp_cons = sw_comp_cons; - fp->rx_comp_prod = sw_comp_prod; - - /* Update producers */ - bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod, - fp->rx_sge_prod); - - fp->rx_pkt += rx_pkt; - fp->rx_calls++; - - return rx_pkt; -} - -static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) -{ - struct bnx2x_fastpath *fp = fp_cookie; - struct bnx2x *bp = fp->bp; - u8 cos; - - DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB " - "[fp %d fw_sd %d igusb %d]\n", - fp->index, fp->fw_sb_id, fp->igu_sb_id); - bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); - -#ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) - return IRQ_HANDLED; -#endif - - /* Handle Rx and Tx according to MSI-X vector */ - prefetch(fp->rx_cons_sb); - - for_each_cos_in_tx_queue(fp, cos) - prefetch(fp->txdata[cos].tx_cons_sb); - - prefetch(&fp->sb_running_index[SM_RX_ID]); - napi_schedule(&bnx2x_fp(bp, fp->index, napi)); - - return IRQ_HANDLED; -} - -/* HW Lock for shared dual port PHYs */ -void bnx2x_acquire_phy_lock(struct bnx2x *bp) -{ - mutex_lock(&bp->port.phy_mutex); - - if (bp->port.need_hw_lock) - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); -} - -void bnx2x_release_phy_lock(struct bnx2x *bp) -{ - if (bp->port.need_hw_lock) - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); - - mutex_unlock(&bp->port.phy_mutex); -} - -/* calculates MF speed according to current linespeed and MF configuration */ -u16 bnx2x_get_mf_speed(struct bnx2x *bp) -{ - u16 line_speed = bp->link_vars.line_speed; - if (IS_MF(bp)) { - u16 maxCfg = bnx2x_extract_max_cfg(bp, - bp->mf_config[BP_VN(bp)]); - - /* Calculate the current MAX line speed limit for the MF - * devices - */ - if (IS_MF_SI(bp)) - line_speed = (line_speed * maxCfg) / 100; - else { /* SD mode */ - u16 vn_max_rate = maxCfg * 100; - - if (vn_max_rate < line_speed) - line_speed = vn_max_rate; - } - } - - return line_speed; -} - -/** - * bnx2x_fill_report_data - fill link report data to report - * - * @bp: driver handle - * @data: link state to update - * - * It uses a none-atomic bit operations because is called under the mutex. - */ -static inline void bnx2x_fill_report_data(struct bnx2x *bp, - struct bnx2x_link_report_data *data) -{ - u16 line_speed = bnx2x_get_mf_speed(bp); - - memset(data, 0, sizeof(*data)); - - /* Fill the report data: efective line speed */ - data->line_speed = line_speed; - - /* Link is down */ - if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS)) - __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, - &data->link_report_flags); - - /* Full DUPLEX */ - if (bp->link_vars.duplex == DUPLEX_FULL) - __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags); - - /* Rx Flow Control is ON */ - if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) - __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags); - - /* Tx Flow Control is ON */ - if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) - __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags); -} - -/** - * bnx2x_link_report - report link status to OS. - * - * @bp: driver handle - * - * Calls the __bnx2x_link_report() under the same locking scheme - * as a link/PHY state managing code to ensure a consistent link - * reporting. - */ - -void bnx2x_link_report(struct bnx2x *bp) -{ - bnx2x_acquire_phy_lock(bp); - __bnx2x_link_report(bp); - bnx2x_release_phy_lock(bp); -} - -/** - * __bnx2x_link_report - report link status to OS. - * - * @bp: driver handle - * - * None atomic inmlementation. - * Should be called under the phy_lock. - */ -void __bnx2x_link_report(struct bnx2x *bp) -{ - struct bnx2x_link_report_data cur_data; - - /* reread mf_cfg */ - if (!CHIP_IS_E1(bp)) - bnx2x_read_mf_cfg(bp); - - /* Read the current link report info */ - bnx2x_fill_report_data(bp, &cur_data); - - /* Don't report link down or exactly the same link status twice */ - if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) || - (test_bit(BNX2X_LINK_REPORT_LINK_DOWN, - &bp->last_reported_link.link_report_flags) && - test_bit(BNX2X_LINK_REPORT_LINK_DOWN, - &cur_data.link_report_flags))) - return; - - bp->link_cnt++; - - /* We are going to report a new link parameters now - - * remember the current data for the next time. - */ - memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data)); - - if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN, - &cur_data.link_report_flags)) { - netif_carrier_off(bp->dev); - netdev_err(bp->dev, "NIC Link is Down\n"); - return; - } else { - netif_carrier_on(bp->dev); - netdev_info(bp->dev, "NIC Link is Up, "); - pr_cont("%d Mbps ", cur_data.line_speed); - - if (test_and_clear_bit(BNX2X_LINK_REPORT_FD, - &cur_data.link_report_flags)) - pr_cont("full duplex"); - else - pr_cont("half duplex"); - - /* Handle the FC at the end so that only these flags would be - * possibly set. This way we may easily check if there is no FC - * enabled. - */ - if (cur_data.link_report_flags) { - if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON, - &cur_data.link_report_flags)) { - pr_cont(", receive "); - if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON, - &cur_data.link_report_flags)) - pr_cont("& transmit "); - } else { - pr_cont(", transmit "); - } - pr_cont("flow control ON"); - } - pr_cont("\n"); - } -} - -void bnx2x_init_rx_rings(struct bnx2x *bp) -{ - int func = BP_FUNC(bp); - int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : - ETH_MAX_AGGREGATION_QUEUES_E1H_E2; - u16 ring_prod; - int i, j; - - /* Allocate TPA resources */ - for_each_rx_queue(bp, j) { - struct bnx2x_fastpath *fp = &bp->fp[j]; - - DP(NETIF_MSG_IFUP, - "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); - - if (!fp->disable_tpa) { - /* Fill the per-aggregtion pool */ - for (i = 0; i < max_agg_queues; i++) { - struct bnx2x_agg_info *tpa_info = - &fp->tpa_info[i]; - struct sw_rx_bd *first_buf = - &tpa_info->first_buf; - - first_buf->skb = netdev_alloc_skb(bp->dev, - fp->rx_buf_size); - if (!first_buf->skb) { - BNX2X_ERR("Failed to allocate TPA " - "skb pool for queue[%d] - " - "disabling TPA on this " - "queue!\n", j); - bnx2x_free_tpa_pool(bp, fp, i); - fp->disable_tpa = 1; - break; - } - dma_unmap_addr_set(first_buf, mapping, 0); - tpa_info->tpa_state = BNX2X_TPA_STOP; - } - - /* "next page" elements initialization */ - bnx2x_set_next_page_sgl(fp); - - /* set SGEs bit mask */ - bnx2x_init_sge_ring_bit_mask(fp); - - /* Allocate SGEs and initialize the ring elements */ - for (i = 0, ring_prod = 0; - i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) { - - if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) { - BNX2X_ERR("was only able to allocate " - "%d rx sges\n", i); - BNX2X_ERR("disabling TPA for " - "queue[%d]\n", j); - /* Cleanup already allocated elements */ - bnx2x_free_rx_sge_range(bp, fp, - ring_prod); - bnx2x_free_tpa_pool(bp, fp, - max_agg_queues); - fp->disable_tpa = 1; - ring_prod = 0; - break; - } - ring_prod = NEXT_SGE_IDX(ring_prod); - } - - fp->rx_sge_prod = ring_prod; - } - } - - for_each_rx_queue(bp, j) { - struct bnx2x_fastpath *fp = &bp->fp[j]; - - fp->rx_bd_cons = 0; - - /* Activate BD ring */ - /* Warning! - * this will generate an interrupt (to the TSTORM) - * must only be done after chip is initialized - */ - bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, - fp->rx_sge_prod); - - if (j != 0) - continue; - - if (CHIP_IS_E1(bp)) { - REG_WR(bp, BAR_USTRORM_INTMEM + - USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func), - U64_LO(fp->rx_comp_mapping)); - REG_WR(bp, BAR_USTRORM_INTMEM + - USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4, - U64_HI(fp->rx_comp_mapping)); - } - } -} - -static void bnx2x_free_tx_skbs(struct bnx2x *bp) -{ - int i; - u8 cos; - - for_each_tx_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - for_each_cos_in_tx_queue(fp, cos) { - struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; - - u16 bd_cons = txdata->tx_bd_cons; - u16 sw_prod = txdata->tx_pkt_prod; - u16 sw_cons = txdata->tx_pkt_cons; - - while (sw_cons != sw_prod) { - bd_cons = bnx2x_free_tx_pkt(bp, txdata, - TX_BD(sw_cons)); - sw_cons++; - } - } - } -} - -static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp) -{ - struct bnx2x *bp = fp->bp; - int i; - - /* ring wasn't allocated */ - if (fp->rx_buf_ring == NULL) - return; - - for (i = 0; i < NUM_RX_BD; i++) { - struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i]; - struct sk_buff *skb = rx_buf->skb; - - if (skb == NULL) - continue; - dma_unmap_single(&bp->pdev->dev, - dma_unmap_addr(rx_buf, mapping), - fp->rx_buf_size, DMA_FROM_DEVICE); - - rx_buf->skb = NULL; - dev_kfree_skb(skb); - } -} - -static void bnx2x_free_rx_skbs(struct bnx2x *bp) -{ - int j; - - for_each_rx_queue(bp, j) { - struct bnx2x_fastpath *fp = &bp->fp[j]; - - bnx2x_free_rx_bds(fp); - - if (!fp->disable_tpa) - bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? - ETH_MAX_AGGREGATION_QUEUES_E1 : - ETH_MAX_AGGREGATION_QUEUES_E1H_E2); - } -} - -void bnx2x_free_skbs(struct bnx2x *bp) -{ - bnx2x_free_tx_skbs(bp); - bnx2x_free_rx_skbs(bp); -} - -void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value) -{ - /* load old values */ - u32 mf_cfg = bp->mf_config[BP_VN(bp)]; - - if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) { - /* leave all but MAX value */ - mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK; - - /* set new MAX value */ - mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT) - & FUNC_MF_CFG_MAX_BW_MASK; - - bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg); - } -} - -/** - * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors - * - * @bp: driver handle - * @nvecs: number of vectors to be released - */ -static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs) -{ - int i, offset = 0; - - if (nvecs == offset) - return; - free_irq(bp->msix_table[offset].vector, bp->dev); - DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", - bp->msix_table[offset].vector); - offset++; -#ifdef BCM_CNIC - if (nvecs == offset) - return; - offset++; -#endif - - for_each_eth_queue(bp, i) { - if (nvecs == offset) - return; - DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d " - "irq\n", i, bp->msix_table[offset].vector); - - free_irq(bp->msix_table[offset++].vector, &bp->fp[i]); - } -} - -void bnx2x_free_irq(struct bnx2x *bp) -{ - if (bp->flags & USING_MSIX_FLAG) - bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) + - CNIC_PRESENT + 1); - else if (bp->flags & USING_MSI_FLAG) - free_irq(bp->pdev->irq, bp->dev); - else - free_irq(bp->pdev->irq, bp->dev); -} - -int bnx2x_enable_msix(struct bnx2x *bp) -{ - int msix_vec = 0, i, rc, req_cnt; - - bp->msix_table[msix_vec].entry = msix_vec; - DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", - bp->msix_table[0].entry); - msix_vec++; - -#ifdef BCM_CNIC - bp->msix_table[msix_vec].entry = msix_vec; - DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n", - bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry); - msix_vec++; -#endif - /* We need separate vectors for ETH queues only (not FCoE) */ - for_each_eth_queue(bp, i) { - bp->msix_table[msix_vec].entry = msix_vec; - DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d " - "(fastpath #%u)\n", msix_vec, msix_vec, i); - msix_vec++; - } - - req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1; - - rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt); - - /* - * reconfigure number of tx/rx queues according to available - * MSI-X vectors - */ - if (rc >= BNX2X_MIN_MSIX_VEC_CNT) { - /* how less vectors we will have? */ - int diff = req_cnt - rc; - - DP(NETIF_MSG_IFUP, - "Trying to use less MSI-X vectors: %d\n", rc); - - rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc); - - if (rc) { - DP(NETIF_MSG_IFUP, - "MSI-X is not attainable rc %d\n", rc); - return rc; - } - /* - * decrease number of queues by number of unallocated entries - */ - bp->num_queues -= diff; - - DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n", - bp->num_queues); - } else if (rc) { - /* fall to INTx if not enough memory */ - if (rc == -ENOMEM) - bp->flags |= DISABLE_MSI_FLAG; - DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc); - return rc; - } - - bp->flags |= USING_MSIX_FLAG; - - return 0; -} - -static int bnx2x_req_msix_irqs(struct bnx2x *bp) -{ - int i, rc, offset = 0; - - rc = request_irq(bp->msix_table[offset++].vector, - bnx2x_msix_sp_int, 0, - bp->dev->name, bp->dev); - if (rc) { - BNX2X_ERR("request sp irq failed\n"); - return -EBUSY; - } - -#ifdef BCM_CNIC - offset++; -#endif - for_each_eth_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", - bp->dev->name, i); - - rc = request_irq(bp->msix_table[offset].vector, - bnx2x_msix_fp_int, 0, fp->name, fp); - if (rc) { - BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i, - bp->msix_table[offset].vector, rc); - bnx2x_free_msix_irqs(bp, offset); - return -EBUSY; - } - - offset++; - } - - i = BNX2X_NUM_ETH_QUEUES(bp); - offset = 1 + CNIC_PRESENT; - netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d" - " ... fp[%d] %d\n", - bp->msix_table[0].vector, - 0, bp->msix_table[offset].vector, - i - 1, bp->msix_table[offset + i - 1].vector); - - return 0; -} - -int bnx2x_enable_msi(struct bnx2x *bp) -{ - int rc; - - rc = pci_enable_msi(bp->pdev); - if (rc) { - DP(NETIF_MSG_IFUP, "MSI is not attainable\n"); - return -1; - } - bp->flags |= USING_MSI_FLAG; - - return 0; -} - -static int bnx2x_req_irq(struct bnx2x *bp) -{ - unsigned long flags; - int rc; - - if (bp->flags & USING_MSI_FLAG) - flags = 0; - else - flags = IRQF_SHARED; - - rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags, - bp->dev->name, bp->dev); - return rc; -} - -static inline int bnx2x_setup_irqs(struct bnx2x *bp) -{ - int rc = 0; - if (bp->flags & USING_MSIX_FLAG) { - rc = bnx2x_req_msix_irqs(bp); - if (rc) - return rc; - } else { - bnx2x_ack_int(bp); - rc = bnx2x_req_irq(bp); - if (rc) { - BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc); - return rc; - } - if (bp->flags & USING_MSI_FLAG) { - bp->dev->irq = bp->pdev->irq; - netdev_info(bp->dev, "using MSI IRQ %d\n", - bp->pdev->irq); - } - } - - return 0; -} - -static inline void bnx2x_napi_enable(struct bnx2x *bp) -{ - int i; - - for_each_rx_queue(bp, i) - napi_enable(&bnx2x_fp(bp, i, napi)); -} - -static inline void bnx2x_napi_disable(struct bnx2x *bp) -{ - int i; - - for_each_rx_queue(bp, i) - napi_disable(&bnx2x_fp(bp, i, napi)); -} - -void bnx2x_netif_start(struct bnx2x *bp) -{ - if (netif_running(bp->dev)) { - bnx2x_napi_enable(bp); - bnx2x_int_enable(bp); - if (bp->state == BNX2X_STATE_OPEN) - netif_tx_wake_all_queues(bp->dev); - } -} - -void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) -{ - bnx2x_int_disable_sync(bp, disable_hw); - bnx2x_napi_disable(bp); -} - -u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) -{ - struct bnx2x *bp = netdev_priv(dev); -#ifdef BCM_CNIC - if (NO_FCOE(bp)) - return skb_tx_hash(dev, skb); - else { - struct ethhdr *hdr = (struct ethhdr *)skb->data; - u16 ether_type = ntohs(hdr->h_proto); - - /* Skip VLAN tag if present */ - if (ether_type == ETH_P_8021Q) { - struct vlan_ethhdr *vhdr = - (struct vlan_ethhdr *)skb->data; - - ether_type = ntohs(vhdr->h_vlan_encapsulated_proto); - } - - /* If ethertype is FCoE or FIP - use FCoE ring */ - if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP)) - return bnx2x_fcoe_tx(bp, txq_index); - } -#endif - /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring - */ - return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp)); -} - -void bnx2x_set_num_queues(struct bnx2x *bp) -{ - switch (bp->multi_mode) { - case ETH_RSS_MODE_DISABLED: - bp->num_queues = 1; - break; - case ETH_RSS_MODE_REGULAR: - bp->num_queues = bnx2x_calc_num_queues(bp); - break; - - default: - bp->num_queues = 1; - break; - } - - /* Add special queues */ - bp->num_queues += NON_ETH_CONTEXT_USE; -} - -static inline int bnx2x_set_real_num_queues(struct bnx2x *bp) -{ - int rc, tx, rx; - - tx = MAX_TXQS_PER_COS * bp->max_cos; - rx = BNX2X_NUM_ETH_QUEUES(bp); - -/* account for fcoe queue */ -#ifdef BCM_CNIC - if (!NO_FCOE(bp)) { - rx += FCOE_PRESENT; - tx += FCOE_PRESENT; - } -#endif - - rc = netif_set_real_num_tx_queues(bp->dev, tx); - if (rc) { - BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc); - return rc; - } - rc = netif_set_real_num_rx_queues(bp->dev, rx); - if (rc) { - BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc); - return rc; - } - - DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n", - tx, rx); - - return rc; -} - -static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp) -{ - int i; - - for_each_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - - /* Always use a mini-jumbo MTU for the FCoE L2 ring */ - if (IS_FCOE_IDX(i)) - /* - * Although there are no IP frames expected to arrive to - * this ring we still want to add an - * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer - * overrun attack. - */ - fp->rx_buf_size = - BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD + - BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING; - else - fp->rx_buf_size = - bp->dev->mtu + ETH_OVREHEAD + - BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING; - } -} - -static inline int bnx2x_init_rss_pf(struct bnx2x *bp) -{ - int i; - u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; - u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); - - /* - * Prepare the inital contents fo the indirection table if RSS is - * enabled - */ - if (bp->multi_mode != ETH_RSS_MODE_DISABLED) { - for (i = 0; i < sizeof(ind_table); i++) - ind_table[i] = - bp->fp->cl_id + (i % num_eth_queues); - } - - /* - * For 57710 and 57711 SEARCHER configuration (rss_keys) is - * per-port, so if explicit configuration is needed , do it only - * for a PMF. - * - * For 57712 and newer on the other hand it's a per-function - * configuration. - */ - return bnx2x_config_rss_pf(bp, ind_table, - bp->port.pmf || !CHIP_IS_E1x(bp)); -} - -int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash) -{ - struct bnx2x_config_rss_params params = {0}; - int i; - - /* Although RSS is meaningless when there is a single HW queue we - * still need it enabled in order to have HW Rx hash generated. - * - * if (!is_eth_multi(bp)) - * bp->multi_mode = ETH_RSS_MODE_DISABLED; - */ - - params.rss_obj = &bp->rss_conf_obj; - - __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); - - /* RSS mode */ - switch (bp->multi_mode) { - case ETH_RSS_MODE_DISABLED: - __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags); - break; - case ETH_RSS_MODE_REGULAR: - __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags); - break; - case ETH_RSS_MODE_VLAN_PRI: - __set_bit(BNX2X_RSS_MODE_VLAN_PRI, ¶ms.rss_flags); - break; - case ETH_RSS_MODE_E1HOV_PRI: - __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, ¶ms.rss_flags); - break; - case ETH_RSS_MODE_IP_DSCP: - __set_bit(BNX2X_RSS_MODE_IP_DSCP, ¶ms.rss_flags); - break; - default: - BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode); - return -EINVAL; - } - - /* If RSS is enabled */ - if (bp->multi_mode != ETH_RSS_MODE_DISABLED) { - /* RSS configuration */ - __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags); - __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags); - __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags); - __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags); - - /* Hash bits */ - params.rss_result_mask = MULTI_MASK; - - memcpy(params.ind_table, ind_table, sizeof(params.ind_table)); - - if (config_hash) { - /* RSS keys */ - for (i = 0; i < sizeof(params.rss_key) / 4; i++) - params.rss_key[i] = random32(); - - __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags); - } - } - - return bnx2x_config_rss(bp, ¶ms); -} - -static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) -{ - struct bnx2x_func_state_params func_params = {0}; - - /* Prepare parameters for function state transitions */ - __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); - - func_params.f_obj = &bp->func_obj; - func_params.cmd = BNX2X_F_CMD_HW_INIT; - - func_params.params.hw_init.load_phase = load_code; - - return bnx2x_func_state_change(bp, &func_params); -} - -/* - * Cleans the object that have internal lists without sending - * ramrods. Should be run when interrutps are disabled. - */ -static void bnx2x_squeeze_objects(struct bnx2x *bp) -{ - int rc; - unsigned long ramrod_flags = 0, vlan_mac_flags = 0; - struct bnx2x_mcast_ramrod_params rparam = {0}; - struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; - - /***************** Cleanup MACs' object first *************************/ - - /* Wait for completion of requested */ - __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); - /* Perform a dry cleanup */ - __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags); - - /* Clean ETH primary MAC */ - __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags); - rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags, - &ramrod_flags); - if (rc != 0) - BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc); - - /* Cleanup UC list */ - vlan_mac_flags = 0; - __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags); - rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, - &ramrod_flags); - if (rc != 0) - BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc); - - /***************** Now clean mcast object *****************************/ - rparam.mcast_obj = &bp->mcast_obj; - __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); - - /* Add a DEL command... */ - rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); - if (rc < 0) - BNX2X_ERR("Failed to add a new DEL command to a multi-cast " - "object: %d\n", rc); - - /* ...and wait until all pending commands are cleared */ - rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); - while (rc != 0) { - if (rc < 0) { - BNX2X_ERR("Failed to clean multi-cast object: %d\n", - rc); - return; - } - - rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); - } -} - -#ifndef BNX2X_STOP_ON_ERROR -#define LOAD_ERROR_EXIT(bp, label) \ - do { \ - (bp)->state = BNX2X_STATE_ERROR; \ - goto label; \ - } while (0) -#else -#define LOAD_ERROR_EXIT(bp, label) \ - do { \ - (bp)->state = BNX2X_STATE_ERROR; \ - (bp)->panic = 1; \ - return -EBUSY; \ - } while (0) -#endif - -/* must be called with rtnl_lock */ -int bnx2x_nic_load(struct bnx2x *bp, int load_mode) -{ - int port = BP_PORT(bp); - u32 load_code; - int i, rc; - -#ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) - return -EPERM; -#endif - - bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; - - /* Set the initial link reported state to link down */ - bnx2x_acquire_phy_lock(bp); - memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link)); - __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, - &bp->last_reported_link.link_report_flags); - bnx2x_release_phy_lock(bp); - - /* must be called before memory allocation and HW init */ - bnx2x_ilt_set_info(bp); - - /* - * Zero fastpath structures preserving invariants like napi, which are - * allocated only once, fp index, max_cos, bp pointer. - * Also set fp->disable_tpa. - */ - for_each_queue(bp, i) - bnx2x_bz_fp(bp, i); - - - /* Set the receive queues buffer size */ - bnx2x_set_rx_buf_size(bp); - - if (bnx2x_alloc_mem(bp)) - return -ENOMEM; - - /* As long as bnx2x_alloc_mem() may possibly update - * bp->num_queues, bnx2x_set_real_num_queues() should always - * come after it. - */ - rc = bnx2x_set_real_num_queues(bp); - if (rc) { - BNX2X_ERR("Unable to set real_num_queues\n"); - LOAD_ERROR_EXIT(bp, load_error0); - } - - /* configure multi cos mappings in kernel. - * this configuration may be overriden by a multi class queue discipline - * or by a dcbx negotiation result. - */ - bnx2x_setup_tc(bp->dev, bp->max_cos); - - bnx2x_napi_enable(bp); - - /* Send LOAD_REQUEST command to MCP - * Returns the type of LOAD command: - * if it is the first port to be initialized - * common blocks should be initialized, otherwise - not - */ - if (!BP_NOMCP(bp)) { - load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0); - if (!load_code) { - BNX2X_ERR("MCP response failure, aborting\n"); - rc = -EBUSY; - LOAD_ERROR_EXIT(bp, load_error1); - } - if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) { - rc = -EBUSY; /* other port in diagnostic mode */ - LOAD_ERROR_EXIT(bp, load_error1); - } - - } else { - int path = BP_PATH(bp); - - DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n", - path, load_count[path][0], load_count[path][1], - load_count[path][2]); - load_count[path][0]++; - load_count[path][1 + port]++; - DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n", - path, load_count[path][0], load_count[path][1], - load_count[path][2]); - if (load_count[path][0] == 1) - load_code = FW_MSG_CODE_DRV_LOAD_COMMON; - else if (load_count[path][1 + port] == 1) - load_code = FW_MSG_CODE_DRV_LOAD_PORT; - else - load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION; - } - - if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || - (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || - (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { - bp->port.pmf = 1; - /* - * We need the barrier to ensure the ordering between the - * writing to bp->port.pmf here and reading it from the - * bnx2x_periodic_task(). - */ - smp_mb(); - queue_delayed_work(bnx2x_wq, &bp->period_task, 0); - } else - bp->port.pmf = 0; - - DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); - - /* Init Function state controlling object */ - bnx2x__init_func_obj(bp); - - /* Initialize HW */ - rc = bnx2x_init_hw(bp, load_code); - if (rc) { - BNX2X_ERR("HW init failed, aborting\n"); - bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); - LOAD_ERROR_EXIT(bp, load_error2); - } - - /* Connect to IRQs */ - rc = bnx2x_setup_irqs(bp); - if (rc) { - bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); - LOAD_ERROR_EXIT(bp, load_error2); - } - - /* Setup NIC internals and enable interrupts */ - bnx2x_nic_init(bp, load_code); - - /* Init per-function objects */ - bnx2x_init_bp_objs(bp); - - if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || - (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) && - (bp->common.shmem2_base)) { - if (SHMEM2_HAS(bp, dcc_support)) - SHMEM2_WR(bp, dcc_support, - (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | - SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV)); - } - - bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; - rc = bnx2x_func_start(bp); - if (rc) { - BNX2X_ERR("Function start failed!\n"); - bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); - LOAD_ERROR_EXIT(bp, load_error3); - } - - /* Send LOAD_DONE command to MCP */ - if (!BP_NOMCP(bp)) { - load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); - if (!load_code) { - BNX2X_ERR("MCP response failure, aborting\n"); - rc = -EBUSY; - LOAD_ERROR_EXIT(bp, load_error3); - } - } - - rc = bnx2x_setup_leading(bp); - if (rc) { - BNX2X_ERR("Setup leading failed!\n"); - LOAD_ERROR_EXIT(bp, load_error3); - } - -#ifdef BCM_CNIC - /* Enable Timer scan */ - REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1); -#endif - - for_each_nondefault_queue(bp, i) { - rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); - if (rc) - LOAD_ERROR_EXIT(bp, load_error4); - } - - rc = bnx2x_init_rss_pf(bp); - if (rc) - LOAD_ERROR_EXIT(bp, load_error4); - - /* Now when Clients are configured we are ready to work */ - bp->state = BNX2X_STATE_OPEN; - - /* Configure a ucast MAC */ - rc = bnx2x_set_eth_mac(bp, true); - if (rc) - LOAD_ERROR_EXIT(bp, load_error4); - - if (bp->pending_max) { - bnx2x_update_max_mf_config(bp, bp->pending_max); - bp->pending_max = 0; - } - - if (bp->port.pmf) - bnx2x_initial_phy_init(bp, load_mode); - - /* Start fast path */ - - /* Initialize Rx filter. */ - netif_addr_lock_bh(bp->dev); - bnx2x_set_rx_mode(bp->dev); - netif_addr_unlock_bh(bp->dev); - - /* Start the Tx */ - switch (load_mode) { - case LOAD_NORMAL: - /* Tx queue should be only reenabled */ - netif_tx_wake_all_queues(bp->dev); - break; - - case LOAD_OPEN: - netif_tx_start_all_queues(bp->dev); - smp_mb__after_clear_bit(); - break; - - case LOAD_DIAG: - bp->state = BNX2X_STATE_DIAG; - break; - - default: - break; - } - - if (!bp->port.pmf) - bnx2x__link_status_update(bp); - - /* start the timer */ - mod_timer(&bp->timer, jiffies + bp->current_interval); - -#ifdef BCM_CNIC - bnx2x_setup_cnic_irq_info(bp); - if (bp->state == BNX2X_STATE_OPEN) - bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); -#endif - bnx2x_inc_load_cnt(bp); - - /* Wait for all pending SP commands to complete */ - if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) { - BNX2X_ERR("Timeout waiting for SP elements to complete\n"); - bnx2x_nic_unload(bp, UNLOAD_CLOSE); - return -EBUSY; - } - - bnx2x_dcbx_init(bp); - return 0; - -#ifndef BNX2X_STOP_ON_ERROR -load_error4: -#ifdef BCM_CNIC - /* Disable Timer scan */ - REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); -#endif -load_error3: - bnx2x_int_disable_sync(bp, 1); - - /* Clean queueable objects */ - bnx2x_squeeze_objects(bp); - - /* Free SKBs, SGEs, TPA pool and driver internals */ - bnx2x_free_skbs(bp); - for_each_rx_queue(bp, i) - bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); - - /* Release IRQs */ - bnx2x_free_irq(bp); -load_error2: - if (!BP_NOMCP(bp)) { - bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); - bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); - } - - bp->port.pmf = 0; -load_error1: - bnx2x_napi_disable(bp); -load_error0: - bnx2x_free_mem(bp); - - return rc; -#endif /* ! BNX2X_STOP_ON_ERROR */ -} - -/* must be called with rtnl_lock */ -int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) -{ - int i; - bool global = false; - - if ((bp->state == BNX2X_STATE_CLOSED) || - (bp->state == BNX2X_STATE_ERROR)) { - /* We can get here if the driver has been unloaded - * during parity error recovery and is either waiting for a - * leader to complete or for other functions to unload and - * then ifdown has been issued. In this case we want to - * unload and let other functions to complete a recovery - * process. - */ - bp->recovery_state = BNX2X_RECOVERY_DONE; - bp->is_leader = 0; - bnx2x_release_leader_lock(bp); - smp_mb(); - - DP(NETIF_MSG_HW, "Releasing a leadership...\n"); - - return -EINVAL; - } - - /* - * It's important to set the bp->state to the value different from - * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int() - * may restart the Tx from the NAPI context (see bnx2x_tx_int()). - */ - bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; - smp_mb(); - - /* Stop Tx */ - bnx2x_tx_disable(bp); - -#ifdef BCM_CNIC - bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); -#endif - - bp->rx_mode = BNX2X_RX_MODE_NONE; - - del_timer_sync(&bp->timer); - - /* Set ALWAYS_ALIVE bit in shmem */ - bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; - - bnx2x_drv_pulse(bp); - - bnx2x_stats_handle(bp, STATS_EVENT_STOP); - - /* Cleanup the chip if needed */ - if (unload_mode != UNLOAD_RECOVERY) - bnx2x_chip_cleanup(bp, unload_mode); - else { - /* Send the UNLOAD_REQUEST to the MCP */ - bnx2x_send_unload_req(bp, unload_mode); - - /* - * Prevent transactions to host from the functions on the - * engine that doesn't reset global blocks in case of global - * attention once gloabl blocks are reset and gates are opened - * (the engine which leader will perform the recovery - * last). - */ - if (!CHIP_IS_E1x(bp)) - bnx2x_pf_disable(bp); - - /* Disable HW interrupts, NAPI */ - bnx2x_netif_stop(bp, 1); - - /* Release IRQs */ - bnx2x_free_irq(bp); - - /* Report UNLOAD_DONE to MCP */ - bnx2x_send_unload_done(bp); - } - - /* - * At this stage no more interrupts will arrive so we may safly clean - * the queueable objects here in case they failed to get cleaned so far. - */ - bnx2x_squeeze_objects(bp); - - /* There should be no more pending SP commands at this stage */ - bp->sp_state = 0; - - bp->port.pmf = 0; - - /* Free SKBs, SGEs, TPA pool and driver internals */ - bnx2x_free_skbs(bp); - for_each_rx_queue(bp, i) - bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); - - bnx2x_free_mem(bp); - - bp->state = BNX2X_STATE_CLOSED; - - /* Check if there are pending parity attentions. If there are - set - * RECOVERY_IN_PROGRESS. - */ - if (bnx2x_chk_parity_attn(bp, &global, false)) { - bnx2x_set_reset_in_progress(bp); - - /* Set RESET_IS_GLOBAL if needed */ - if (global) - bnx2x_set_reset_global(bp); - } - - - /* The last driver must disable a "close the gate" if there is no - * parity attention or "process kill" pending. - */ - if (!bnx2x_dec_load_cnt(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp))) - bnx2x_disable_close_the_gate(bp); - - return 0; -} - -int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) -{ - u16 pmcsr; - - /* If there is no power capability, silently succeed */ - if (!bp->pm_cap) { - DP(NETIF_MSG_HW, "No power capability. Breaking.\n"); - return 0; - } - - pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr); - - switch (state) { - case PCI_D0: - pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, - ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) | - PCI_PM_CTRL_PME_STATUS)); - - if (pmcsr & PCI_PM_CTRL_STATE_MASK) - /* delay required during transition out of D3hot */ - msleep(20); - break; - - case PCI_D3hot: - /* If there are other clients above don't - shut down the power */ - if (atomic_read(&bp->pdev->enable_cnt) != 1) - return 0; - /* Don't shut down the power for emulation and FPGA */ - if (CHIP_REV_IS_SLOW(bp)) - return 0; - - pmcsr &= ~PCI_PM_CTRL_STATE_MASK; - pmcsr |= 3; - - if (bp->wol) - pmcsr |= PCI_PM_CTRL_PME_ENABLE; - - pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, - pmcsr); - - /* No more memory access after this point until - * device is brought back to D0. - */ - break; - - default: - return -EINVAL; - } - return 0; -} - -/* - * net_device service functions - */ -int bnx2x_poll(struct napi_struct *napi, int budget) -{ - int work_done = 0; - u8 cos; - struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, - napi); - struct bnx2x *bp = fp->bp; - - while (1) { -#ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) { - napi_complete(napi); - return 0; - } -#endif - - for_each_cos_in_tx_queue(fp, cos) - if (bnx2x_tx_queue_has_work(&fp->txdata[cos])) - bnx2x_tx_int(bp, &fp->txdata[cos]); - - - if (bnx2x_has_rx_work(fp)) { - work_done += bnx2x_rx_int(fp, budget - work_done); - - /* must not complete if we consumed full budget */ - if (work_done >= budget) - break; - } - - /* Fall out from the NAPI loop if needed */ - if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { -#ifdef BCM_CNIC - /* No need to update SB for FCoE L2 ring as long as - * it's connected to the default SB and the SB - * has been updated when NAPI was scheduled. - */ - if (IS_FCOE_FP(fp)) { - napi_complete(napi); - break; - } -#endif - - bnx2x_update_fpsb_idx(fp); - /* bnx2x_has_rx_work() reads the status block, - * thus we need to ensure that status block indices - * have been actually read (bnx2x_update_fpsb_idx) - * prior to this check (bnx2x_has_rx_work) so that - * we won't write the "newer" value of the status block - * to IGU (if there was a DMA right after - * bnx2x_has_rx_work and if there is no rmb, the memory - * reading (bnx2x_update_fpsb_idx) may be postponed - * to right before bnx2x_ack_sb). In this case there - * will never be another interrupt until there is - * another update of the status block, while there - * is still unhandled work. - */ - rmb(); - - if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { - napi_complete(napi); - /* Re-enable interrupts */ - DP(NETIF_MSG_HW, - "Update index to %d\n", fp->fp_hc_idx); - bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, - le16_to_cpu(fp->fp_hc_idx), - IGU_INT_ENABLE, 1); - break; - } - } - } - - return work_done; -} - -/* we split the first BD into headers and data BDs - * to ease the pain of our fellow microcode engineers - * we use one mapping for both BDs - * So far this has only been observed to happen - * in Other Operating Systems(TM) - */ -static noinline u16 bnx2x_tx_split(struct bnx2x *bp, - struct bnx2x_fp_txdata *txdata, - struct sw_tx_bd *tx_buf, - struct eth_tx_start_bd **tx_bd, u16 hlen, - u16 bd_prod, int nbd) -{ - struct eth_tx_start_bd *h_tx_bd = *tx_bd; - struct eth_tx_bd *d_tx_bd; - dma_addr_t mapping; - int old_len = le16_to_cpu(h_tx_bd->nbytes); - - /* first fix first BD */ - h_tx_bd->nbd = cpu_to_le16(nbd); - h_tx_bd->nbytes = cpu_to_le16(hlen); - - DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d " - "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi, - h_tx_bd->addr_lo, h_tx_bd->nbd); - - /* now get a new data BD - * (after the pbd) and fill it */ - bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); - d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; - - mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi), - le32_to_cpu(h_tx_bd->addr_lo)) + hlen; - - d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); - d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); - d_tx_bd->nbytes = cpu_to_le16(old_len - hlen); - - /* this marks the BD as one that has no individual mapping */ - tx_buf->flags |= BNX2X_TSO_SPLIT_BD; - - DP(NETIF_MSG_TX_QUEUED, - "TSO split data size is %d (%x:%x)\n", - d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo); - - /* update tx_bd */ - *tx_bd = (struct eth_tx_start_bd *)d_tx_bd; - - return bd_prod; -} - -static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix) -{ - if (fix > 0) - csum = (u16) ~csum_fold(csum_sub(csum, - csum_partial(t_header - fix, fix, 0))); - - else if (fix < 0) - csum = (u16) ~csum_fold(csum_add(csum, - csum_partial(t_header, -fix, 0))); - - return swab16(csum); -} - -static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) -{ - u32 rc; - - if (skb->ip_summed != CHECKSUM_PARTIAL) - rc = XMIT_PLAIN; - - else { - if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) { - rc = XMIT_CSUM_V6; - if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) - rc |= XMIT_CSUM_TCP; - - } else { - rc = XMIT_CSUM_V4; - if (ip_hdr(skb)->protocol == IPPROTO_TCP) - rc |= XMIT_CSUM_TCP; - } - } - - if (skb_is_gso_v6(skb)) - rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6; - else if (skb_is_gso(skb)) - rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP; - - return rc; -} - -#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) -/* check if packet requires linearization (packet is too fragmented) - no need to check fragmentation if page size > 8K (there will be no - violation to FW restrictions) */ -static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, - u32 xmit_type) -{ - int to_copy = 0; - int hlen = 0; - int first_bd_sz = 0; - - /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */ - if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) { - - if (xmit_type & XMIT_GSO) { - unsigned short lso_mss = skb_shinfo(skb)->gso_size; - /* Check if LSO packet needs to be copied: - 3 = 1 (for headers BD) + 2 (for PBD and last BD) */ - int wnd_size = MAX_FETCH_BD - 3; - /* Number of windows to check */ - int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size; - int wnd_idx = 0; - int frag_idx = 0; - u32 wnd_sum = 0; - - /* Headers length */ - hlen = (int)(skb_transport_header(skb) - skb->data) + - tcp_hdrlen(skb); - - /* Amount of data (w/o headers) on linear part of SKB*/ - first_bd_sz = skb_headlen(skb) - hlen; - - wnd_sum = first_bd_sz; - - /* Calculate the first sum - it's special */ - for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++) - wnd_sum += - skb_shinfo(skb)->frags[frag_idx].size; - - /* If there was data on linear skb data - check it */ - if (first_bd_sz > 0) { - if (unlikely(wnd_sum < lso_mss)) { - to_copy = 1; - goto exit_lbl; - } - - wnd_sum -= first_bd_sz; - } - - /* Others are easier: run through the frag list and - check all windows */ - for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) { - wnd_sum += - skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size; - - if (unlikely(wnd_sum < lso_mss)) { - to_copy = 1; - break; - } - wnd_sum -= - skb_shinfo(skb)->frags[wnd_idx].size; - } - } else { - /* in non-LSO too fragmented packet should always - be linearized */ - to_copy = 1; - } - } - -exit_lbl: - if (unlikely(to_copy)) - DP(NETIF_MSG_TX_QUEUED, - "Linearization IS REQUIRED for %s packet. " - "num_frags %d hlen %d first_bd_sz %d\n", - (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO", - skb_shinfo(skb)->nr_frags, hlen, first_bd_sz); - - return to_copy; -} -#endif - -static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data, - u32 xmit_type) -{ - *parsing_data |= (skb_shinfo(skb)->gso_size << - ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & - ETH_TX_PARSE_BD_E2_LSO_MSS; - if ((xmit_type & XMIT_GSO_V6) && - (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) - *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; -} - -/** - * bnx2x_set_pbd_gso - update PBD in GSO case. - * - * @skb: packet skb - * @pbd: parse BD - * @xmit_type: xmit flags - */ -static inline void bnx2x_set_pbd_gso(struct sk_buff *skb, - struct eth_tx_parse_bd_e1x *pbd, - u32 xmit_type) -{ - pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); - pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq); - pbd->tcp_flags = pbd_tcp_flags(skb); - - if (xmit_type & XMIT_GSO_V4) { - pbd->ip_id = swab16(ip_hdr(skb)->id); - pbd->tcp_pseudo_csum = - swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, - ip_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0)); - - } else - pbd->tcp_pseudo_csum = - swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0)); - - pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN; -} - -/** - * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length - * - * @bp: driver handle - * @skb: packet skb - * @parsing_data: data to be updated - * @xmit_type: xmit flags - * - * 57712 related - */ -static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, - u32 *parsing_data, u32 xmit_type) -{ - *parsing_data |= - ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) << - ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) & - ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W; - - if (xmit_type & XMIT_CSUM_TCP) { - *parsing_data |= ((tcp_hdrlen(skb) / 4) << - ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & - ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW; - - return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data; - } else - /* We support checksum offload for TCP and UDP only. - * No need to pass the UDP header length - it's a constant. - */ - return skb_transport_header(skb) + - sizeof(struct udphdr) - skb->data; -} - -static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, - struct eth_tx_start_bd *tx_start_bd, u32 xmit_type) -{ - tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; - - if (xmit_type & XMIT_CSUM_V4) - tx_start_bd->bd_flags.as_bitfield |= - ETH_TX_BD_FLAGS_IP_CSUM; - else - tx_start_bd->bd_flags.as_bitfield |= - ETH_TX_BD_FLAGS_IPV6; - - if (!(xmit_type & XMIT_CSUM_TCP)) - tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP; -} - -/** - * bnx2x_set_pbd_csum - update PBD with checksum and return header length - * - * @bp: driver handle - * @skb: packet skb - * @pbd: parse BD to be updated - * @xmit_type: xmit flags - */ -static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, - struct eth_tx_parse_bd_e1x *pbd, - u32 xmit_type) -{ - u8 hlen = (skb_network_header(skb) - skb->data) >> 1; - - /* for now NS flag is not used in Linux */ - pbd->global_data = - (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << - ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); - - pbd->ip_hlen_w = (skb_transport_header(skb) - - skb_network_header(skb)) >> 1; - - hlen += pbd->ip_hlen_w; - - /* We support checksum offload for TCP and UDP only */ - if (xmit_type & XMIT_CSUM_TCP) - hlen += tcp_hdrlen(skb) / 2; - else - hlen += sizeof(struct udphdr) / 2; - - pbd->total_hlen_w = cpu_to_le16(hlen); - hlen = hlen*2; - - if (xmit_type & XMIT_CSUM_TCP) { - pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check); - - } else { - s8 fix = SKB_CS_OFF(skb); /* signed! */ - - DP(NETIF_MSG_TX_QUEUED, - "hlen %d fix %d csum before fix %x\n", - le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb)); - - /* HW bug: fixup the CSUM */ - pbd->tcp_pseudo_csum = - bnx2x_csum_fix(skb_transport_header(skb), - SKB_CS(skb), fix); - - DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n", - pbd->tcp_pseudo_csum); - } - - return hlen; -} - -/* called with netif_tx_lock - * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call - * netif_wake_queue() - */ -netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct bnx2x *bp = netdev_priv(dev); - - struct bnx2x_fastpath *fp; - struct netdev_queue *txq; - struct bnx2x_fp_txdata *txdata; - struct sw_tx_bd *tx_buf; - struct eth_tx_start_bd *tx_start_bd, *first_bd; - struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; - struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; - struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; - u32 pbd_e2_parsing_data = 0; - u16 pkt_prod, bd_prod; - int nbd, txq_index, fp_index, txdata_index; - dma_addr_t mapping; - u32 xmit_type = bnx2x_xmit_type(bp, skb); - int i; - u8 hlen = 0; - __le16 pkt_size = 0; - struct ethhdr *eth; - u8 mac_type = UNICAST_ADDRESS; - -#ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) - return NETDEV_TX_BUSY; -#endif - - txq_index = skb_get_queue_mapping(skb); - txq = netdev_get_tx_queue(dev, txq_index); - - BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT); - - /* decode the fastpath index and the cos index from the txq */ - fp_index = TXQ_TO_FP(txq_index); - txdata_index = TXQ_TO_COS(txq_index); - -#ifdef BCM_CNIC - /* - * Override the above for the FCoE queue: - * - FCoE fp entry is right after the ETH entries. - * - FCoE L2 queue uses bp->txdata[0] only. - */ - if (unlikely(!NO_FCOE(bp) && (txq_index == - bnx2x_fcoe_tx(bp, txq_index)))) { - fp_index = FCOE_IDX; - txdata_index = 0; - } -#endif - - /* enable this debug print to view the transmission queue being used - DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d", - txq_index, fp_index, txdata_index); */ - - /* locate the fastpath and the txdata */ - fp = &bp->fp[fp_index]; - txdata = &fp->txdata[txdata_index]; - - /* enable this debug print to view the tranmission details - DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d" - " tx_data ptr %p fp pointer %p", - txdata->cid, fp_index, txdata_index, txdata, fp); */ - - if (unlikely(bnx2x_tx_avail(bp, txdata) < - (skb_shinfo(skb)->nr_frags + 3))) { - fp->eth_q_stats.driver_xoff++; - netif_tx_stop_queue(txq); - BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); - return NETDEV_TX_BUSY; - } - - DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x " - "protocol(%x,%x) gso type %x xmit_type %x\n", - txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, - ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); - - eth = (struct ethhdr *)skb->data; - - /* set flag according to packet type (UNICAST_ADDRESS is default)*/ - if (unlikely(is_multicast_ether_addr(eth->h_dest))) { - if (is_broadcast_ether_addr(eth->h_dest)) - mac_type = BROADCAST_ADDRESS; - else - mac_type = MULTICAST_ADDRESS; - } - -#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) - /* First, check if we need to linearize the skb (due to FW - restrictions). No need to check fragmentation if page size > 8K - (there will be no violation to FW restrictions) */ - if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { - /* Statistics of linearization */ - bp->lin_cnt++; - if (skb_linearize(skb) != 0) { - DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - " - "silently dropping this SKB\n"); - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - } -#endif - /* Map skb linear data for DMA */ - mapping = dma_map_single(&bp->pdev->dev, skb->data, - skb_headlen(skb), DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { - DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - " - "silently dropping this SKB\n"); - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - /* - Please read carefully. First we use one BD which we mark as start, - then we have a parsing info BD (used for TSO or xsum), - and only then we have the rest of the TSO BDs. - (don't forget to mark the last one as last, - and to unmap only AFTER you write to the BD ...) - And above all, all pdb sizes are in words - NOT DWORDS! - */ - - /* get current pkt produced now - advance it just before sending packet - * since mapping of pages may fail and cause packet to be dropped - */ - pkt_prod = txdata->tx_pkt_prod; - bd_prod = TX_BD(txdata->tx_bd_prod); - - /* get a tx_buf and first BD - * tx_start_bd may be changed during SPLIT, - * but first_bd will always stay first - */ - tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)]; - tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd; - first_bd = tx_start_bd; - - tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; - SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE, - mac_type); - - /* header nbd */ - SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1); - - /* remember the first BD of the packet */ - tx_buf->first_bd = txdata->tx_bd_prod; - tx_buf->skb = skb; - tx_buf->flags = 0; - - DP(NETIF_MSG_TX_QUEUED, - "sending pkt %u @%p next_idx %u bd %u @%p\n", - pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd); - - if (vlan_tx_tag_present(skb)) { - tx_start_bd->vlan_or_ethertype = - cpu_to_le16(vlan_tx_tag_get(skb)); - tx_start_bd->bd_flags.as_bitfield |= - (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); - } else - tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); - - /* turn on parsing and get a BD */ - bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); - - if (xmit_type & XMIT_CSUM) - bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type); - - if (!CHIP_IS_E1x(bp)) { - pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2; - memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); - /* Set PBD in checksum offload case */ - if (xmit_type & XMIT_CSUM) - hlen = bnx2x_set_pbd_csum_e2(bp, skb, - &pbd_e2_parsing_data, - xmit_type); - if (IS_MF_SI(bp)) { - /* - * fill in the MAC addresses in the PBD - for local - * switching - */ - bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi, - &pbd_e2->src_mac_addr_mid, - &pbd_e2->src_mac_addr_lo, - eth->h_source); - bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi, - &pbd_e2->dst_mac_addr_mid, - &pbd_e2->dst_mac_addr_lo, - eth->h_dest); - } - } else { - pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x; - memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); - /* Set PBD in checksum offload case */ - if (xmit_type & XMIT_CSUM) - hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type); - - } - - /* Setup the data pointer of the first BD of the packet */ - tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); - tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); - nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */ - tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); - pkt_size = tx_start_bd->nbytes; - - DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d" - " nbytes %d flags %x vlan %x\n", - tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo, - le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes), - tx_start_bd->bd_flags.as_bitfield, - le16_to_cpu(tx_start_bd->vlan_or_ethertype)); - - if (xmit_type & XMIT_GSO) { - - DP(NETIF_MSG_TX_QUEUED, - "TSO packet len %d hlen %d total len %d tso size %d\n", - skb->len, hlen, skb_headlen(skb), - skb_shinfo(skb)->gso_size); - - tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; - - if (unlikely(skb_headlen(skb) > hlen)) - bd_prod = bnx2x_tx_split(bp, txdata, tx_buf, - &tx_start_bd, hlen, - bd_prod, ++nbd); - if (!CHIP_IS_E1x(bp)) - bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data, - xmit_type); - else - bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type); - } - - /* Set the PBD's parsing_data field if not zero - * (for the chips newer than 57711). - */ - if (pbd_e2_parsing_data) - pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data); - - tx_data_bd = (struct eth_tx_bd *)tx_start_bd; - - /* Handle fragmented skb */ - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - - mapping = dma_map_page(&bp->pdev->dev, frag->page, - frag->page_offset, frag->size, - DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { - - DP(NETIF_MSG_TX_QUEUED, "Unable to map page - " - "dropping packet...\n"); - - /* we need unmap all buffers already mapped - * for this SKB; - * first_bd->nbd need to be properly updated - * before call to bnx2x_free_tx_pkt - */ - first_bd->nbd = cpu_to_le16(nbd); - bnx2x_free_tx_pkt(bp, txdata, - TX_BD(txdata->tx_pkt_prod)); - return NETDEV_TX_OK; - } - - bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); - tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; - if (total_pkt_bd == NULL) - total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; - - tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); - tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); - tx_data_bd->nbytes = cpu_to_le16(frag->size); - le16_add_cpu(&pkt_size, frag->size); - nbd++; - - DP(NETIF_MSG_TX_QUEUED, - "frag %d bd @%p addr (%x:%x) nbytes %d\n", - i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo, - le16_to_cpu(tx_data_bd->nbytes)); - } - - DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd); - - /* update with actual num BDs */ - first_bd->nbd = cpu_to_le16(nbd); - - bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); - - /* now send a tx doorbell, counting the next BD - * if the packet contains or ends with it - */ - if (TX_BD_POFF(bd_prod) < nbd) - nbd++; - - /* total_pkt_bytes should be set on the first data BD if - * it's not an LSO packet and there is more than one - * data BD. In this case pkt_size is limited by an MTU value. - * However we prefer to set it for an LSO packet (while we don't - * have to) in order to save some CPU cycles in a none-LSO - * case, when we much more care about them. - */ - if (total_pkt_bd != NULL) - total_pkt_bd->total_pkt_bytes = pkt_size; - - if (pbd_e1x) - DP(NETIF_MSG_TX_QUEUED, - "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u" - " tcp_flags %x xsum %x seq %u hlen %u\n", - pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w, - pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags, - pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq, - le16_to_cpu(pbd_e1x->total_hlen_w)); - if (pbd_e2) - DP(NETIF_MSG_TX_QUEUED, - "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n", - pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid, - pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi, - pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo, - pbd_e2->parsing_data); - DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); - - txdata->tx_pkt_prod++; - /* - * Make sure that the BD data is updated before updating the producer - * since FW might read the BD right after the producer is updated. - * This is only applicable for weak-ordered memory model archs such - * as IA-64. The following barrier is also mandatory since FW will - * assumes packets must have BDs. - */ - wmb(); - - txdata->tx_db.data.prod += nbd; - barrier(); - - DOORBELL(bp, txdata->cid, txdata->tx_db.raw); - - mmiowb(); - - txdata->tx_bd_prod += nbd; - - if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) { - netif_tx_stop_queue(txq); - - /* paired memory barrier is in bnx2x_tx_int(), we have to keep - * ordering of set_bit() in netif_tx_stop_queue() and read of - * fp->bd_tx_cons */ - smp_mb(); - - fp->eth_q_stats.driver_xoff++; - if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3) - netif_tx_wake_queue(txq); - } - txdata->tx_pkt++; - - return NETDEV_TX_OK; -} - -/** - * bnx2x_setup_tc - routine to configure net_device for multi tc - * - * @netdev: net device to configure - * @tc: number of traffic classes to enable - * - * callback connected to the ndo_setup_tc function pointer - */ -int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) -{ - int cos, prio, count, offset; - struct bnx2x *bp = netdev_priv(dev); - - /* setup tc must be called under rtnl lock */ - ASSERT_RTNL(); - - /* no traffic classes requested. aborting */ - if (!num_tc) { - netdev_reset_tc(dev); - return 0; - } - - /* requested to support too many traffic classes */ - if (num_tc > bp->max_cos) { - DP(NETIF_MSG_TX_ERR, "support for too many traffic classes" - " requested: %d. max supported is %d", - num_tc, bp->max_cos); - return -EINVAL; - } - - /* declare amount of supported traffic classes */ - if (netdev_set_num_tc(dev, num_tc)) { - DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes", - num_tc); - return -EINVAL; - } - - /* configure priority to traffic class mapping */ - for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) { - netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]); - DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", - prio, bp->prio_to_cos[prio]); - } - - - /* Use this configuration to diffrentiate tc0 from other COSes - This can be used for ets or pfc, and save the effort of setting - up a multio class queue disc or negotiating DCBX with a switch - netdev_set_prio_tc_map(dev, 0, 0); - DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", 0, 0); - for (prio = 1; prio < 16; prio++) { - netdev_set_prio_tc_map(dev, prio, 1); - DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", prio, 1); - } */ - - /* configure traffic class to transmission queue mapping */ - for (cos = 0; cos < bp->max_cos; cos++) { - count = BNX2X_NUM_ETH_QUEUES(bp); - offset = cos * MAX_TXQS_PER_COS; - netdev_set_tc_queue(dev, cos, count, offset); - DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d", - cos, offset, count); - } - - return 0; -} - -/* called with rtnl_lock */ -int bnx2x_change_mac_addr(struct net_device *dev, void *p) -{ - struct sockaddr *addr = p; - struct bnx2x *bp = netdev_priv(dev); - int rc = 0; - - if (!is_valid_ether_addr((u8 *)(addr->sa_data))) - return -EINVAL; - - if (netif_running(dev)) { - rc = bnx2x_set_eth_mac(bp, false); - if (rc) - return rc; - } - - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - - if (netif_running(dev)) - rc = bnx2x_set_eth_mac(bp, true); - - return rc; -} - -static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) -{ - union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk); - struct bnx2x_fastpath *fp = &bp->fp[fp_index]; - u8 cos; - - /* Common */ -#ifdef BCM_CNIC - if (IS_FCOE_IDX(fp_index)) { - memset(sb, 0, sizeof(union host_hc_status_block)); - fp->status_blk_mapping = 0; - - } else { -#endif - /* status blocks */ - if (!CHIP_IS_E1x(bp)) - BNX2X_PCI_FREE(sb->e2_sb, - bnx2x_fp(bp, fp_index, - status_blk_mapping), - sizeof(struct host_hc_status_block_e2)); - else - BNX2X_PCI_FREE(sb->e1x_sb, - bnx2x_fp(bp, fp_index, - status_blk_mapping), - sizeof(struct host_hc_status_block_e1x)); -#ifdef BCM_CNIC - } -#endif - /* Rx */ - if (!skip_rx_queue(bp, fp_index)) { - bnx2x_free_rx_bds(fp); - - /* fastpath rx rings: rx_buf rx_desc rx_comp */ - BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring)); - BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring), - bnx2x_fp(bp, fp_index, rx_desc_mapping), - sizeof(struct eth_rx_bd) * NUM_RX_BD); - - BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring), - bnx2x_fp(bp, fp_index, rx_comp_mapping), - sizeof(struct eth_fast_path_rx_cqe) * - NUM_RCQ_BD); - - /* SGE ring */ - BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring)); - BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring), - bnx2x_fp(bp, fp_index, rx_sge_mapping), - BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); - } - - /* Tx */ - if (!skip_tx_queue(bp, fp_index)) { - /* fastpath tx rings: tx_buf tx_desc */ - for_each_cos_in_tx_queue(fp, cos) { - struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; - - DP(BNX2X_MSG_SP, - "freeing tx memory of fp %d cos %d cid %d", - fp_index, cos, txdata->cid); - - BNX2X_FREE(txdata->tx_buf_ring); - BNX2X_PCI_FREE(txdata->tx_desc_ring, - txdata->tx_desc_mapping, - sizeof(union eth_tx_bd_types) * NUM_TX_BD); - } - } - /* end of fastpath */ -} - -void bnx2x_free_fp_mem(struct bnx2x *bp) -{ - int i; - for_each_queue(bp, i) - bnx2x_free_fp_mem_at(bp, i); -} - -static inline void set_sb_shortcuts(struct bnx2x *bp, int index) -{ - union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk); - if (!CHIP_IS_E1x(bp)) { - bnx2x_fp(bp, index, sb_index_values) = - (__le16 *)status_blk.e2_sb->sb.index_values; - bnx2x_fp(bp, index, sb_running_index) = - (__le16 *)status_blk.e2_sb->sb.running_index; - } else { - bnx2x_fp(bp, index, sb_index_values) = - (__le16 *)status_blk.e1x_sb->sb.index_values; - bnx2x_fp(bp, index, sb_running_index) = - (__le16 *)status_blk.e1x_sb->sb.running_index; - } -} - -static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) -{ - union host_hc_status_block *sb; - struct bnx2x_fastpath *fp = &bp->fp[index]; - int ring_size = 0; - u8 cos; - - /* if rx_ring_size specified - use it */ - int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size : - MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); - - /* allocate at least number of buffers required by FW */ - rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : - MIN_RX_SIZE_TPA, - rx_ring_size); - - /* Common */ - sb = &bnx2x_fp(bp, index, status_blk); -#ifdef BCM_CNIC - if (!IS_FCOE_IDX(index)) { -#endif - /* status blocks */ - if (!CHIP_IS_E1x(bp)) - BNX2X_PCI_ALLOC(sb->e2_sb, - &bnx2x_fp(bp, index, status_blk_mapping), - sizeof(struct host_hc_status_block_e2)); - else - BNX2X_PCI_ALLOC(sb->e1x_sb, - &bnx2x_fp(bp, index, status_blk_mapping), - sizeof(struct host_hc_status_block_e1x)); -#ifdef BCM_CNIC - } -#endif - - /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to - * set shortcuts for it. - */ - if (!IS_FCOE_IDX(index)) - set_sb_shortcuts(bp, index); - - /* Tx */ - if (!skip_tx_queue(bp, index)) { - /* fastpath tx rings: tx_buf tx_desc */ - for_each_cos_in_tx_queue(fp, cos) { - struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; - - DP(BNX2X_MSG_SP, "allocating tx memory of " - "fp %d cos %d", - index, cos); - - BNX2X_ALLOC(txdata->tx_buf_ring, - sizeof(struct sw_tx_bd) * NUM_TX_BD); - BNX2X_PCI_ALLOC(txdata->tx_desc_ring, - &txdata->tx_desc_mapping, - sizeof(union eth_tx_bd_types) * NUM_TX_BD); - } - } - - /* Rx */ - if (!skip_rx_queue(bp, index)) { - /* fastpath rx rings: rx_buf rx_desc rx_comp */ - BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring), - sizeof(struct sw_rx_bd) * NUM_RX_BD); - BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring), - &bnx2x_fp(bp, index, rx_desc_mapping), - sizeof(struct eth_rx_bd) * NUM_RX_BD); - - BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring), - &bnx2x_fp(bp, index, rx_comp_mapping), - sizeof(struct eth_fast_path_rx_cqe) * - NUM_RCQ_BD); - - /* SGE ring */ - BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring), - sizeof(struct sw_rx_page) * NUM_RX_SGE); - BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring), - &bnx2x_fp(bp, index, rx_sge_mapping), - BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); - /* RX BD ring */ - bnx2x_set_next_page_rx_bd(fp); - - /* CQ ring */ - bnx2x_set_next_page_rx_cq(fp); - - /* BDs */ - ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size); - if (ring_size < rx_ring_size) - goto alloc_mem_err; - } - - return 0; - -/* handles low memory cases */ -alloc_mem_err: - BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n", - index, ring_size); - /* FW will drop all packets if queue is not big enough, - * In these cases we disable the queue - * Min size is different for OOO, TPA and non-TPA queues - */ - if (ring_size < (fp->disable_tpa ? - MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) { - /* release memory allocated for this queue */ - bnx2x_free_fp_mem_at(bp, index); - return -ENOMEM; - } - return 0; -} - -int bnx2x_alloc_fp_mem(struct bnx2x *bp) -{ - int i; - - /** - * 1. Allocate FP for leading - fatal if error - * 2. {CNIC} Allocate FCoE FP - fatal if error - * 3. {CNIC} Allocate OOO + FWD - disable OOO if error - * 4. Allocate RSS - fix number of queues if error - */ - - /* leading */ - if (bnx2x_alloc_fp_mem_at(bp, 0)) - return -ENOMEM; - -#ifdef BCM_CNIC - if (!NO_FCOE(bp)) - /* FCoE */ - if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX)) - /* we will fail load process instead of mark - * NO_FCOE_FLAG - */ - return -ENOMEM; -#endif - - /* RSS */ - for_each_nondefault_eth_queue(bp, i) - if (bnx2x_alloc_fp_mem_at(bp, i)) - break; - - /* handle memory failures */ - if (i != BNX2X_NUM_ETH_QUEUES(bp)) { - int delta = BNX2X_NUM_ETH_QUEUES(bp) - i; - - WARN_ON(delta < 0); -#ifdef BCM_CNIC - /** - * move non eth FPs next to last eth FP - * must be done in that order - * FCOE_IDX < FWD_IDX < OOO_IDX - */ - - /* move FCoE fp even NO_FCOE_FLAG is on */ - bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta); -#endif - bp->num_queues -= delta; - BNX2X_ERR("Adjusted num of queues from %d to %d\n", - bp->num_queues + delta, bp->num_queues); - } - - return 0; -} - -void bnx2x_free_mem_bp(struct bnx2x *bp) -{ - kfree(bp->fp); - kfree(bp->msix_table); - kfree(bp->ilt); -} - -int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp) -{ - struct bnx2x_fastpath *fp; - struct msix_entry *tbl; - struct bnx2x_ilt *ilt; - int msix_table_size = 0; - - /* - * The biggest MSI-X table we might need is as a maximum number of fast - * path IGU SBs plus default SB (for PF). - */ - msix_table_size = bp->igu_sb_cnt + 1; - - /* fp array: RSS plus CNIC related L2 queues */ - fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) * - sizeof(*fp), GFP_KERNEL); - if (!fp) - goto alloc_err; - bp->fp = fp; - - /* msix table */ - tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL); - if (!tbl) - goto alloc_err; - bp->msix_table = tbl; - - /* ilt */ - ilt = kzalloc(sizeof(*ilt), GFP_KERNEL); - if (!ilt) - goto alloc_err; - bp->ilt = ilt; - - return 0; -alloc_err: - bnx2x_free_mem_bp(bp); - return -ENOMEM; - -} - -int bnx2x_reload_if_running(struct net_device *dev) -{ - struct bnx2x *bp = netdev_priv(dev); - - if (unlikely(!netif_running(dev))) - return 0; - - bnx2x_nic_unload(bp, UNLOAD_NORMAL); - return bnx2x_nic_load(bp, LOAD_NORMAL); -} - -int bnx2x_get_cur_phy_idx(struct bnx2x *bp) -{ - u32 sel_phy_idx = 0; - if (bp->link_params.num_phys <= 1) - return INT_PHY; - - if (bp->link_vars.link_up) { - sel_phy_idx = EXT_PHY1; - /* In case link is SERDES, check if the EXT_PHY2 is the one */ - if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) && - (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE)) - sel_phy_idx = EXT_PHY2; - } else { - - switch (bnx2x_phy_selection(&bp->link_params)) { - case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: - case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: - case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: - sel_phy_idx = EXT_PHY1; - break; - case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: - case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: - sel_phy_idx = EXT_PHY2; - break; - } - } - - return sel_phy_idx; - -} -int bnx2x_get_link_cfg_idx(struct bnx2x *bp) -{ - u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp); - /* - * The selected actived PHY is always after swapping (in case PHY - * swapping is enabled). So when swapping is enabled, we need to reverse - * the configuration - */ - - if (bp->link_params.multi_phy_config & - PORT_HW_CFG_PHY_SWAPPED_ENABLED) { - if (sel_phy_idx == EXT_PHY1) - sel_phy_idx = EXT_PHY2; - else if (sel_phy_idx == EXT_PHY2) - sel_phy_idx = EXT_PHY1; - } - return LINK_CONFIG_IDX(sel_phy_idx); -} - -#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) -int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) -{ - struct bnx2x *bp = netdev_priv(dev); - struct cnic_eth_dev *cp = &bp->cnic_eth_dev; - - switch (type) { - case NETDEV_FCOE_WWNN: - *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi, - cp->fcoe_wwn_node_name_lo); - break; - case NETDEV_FCOE_WWPN: - *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi, - cp->fcoe_wwn_port_name_lo); - break; - default: - return -EINVAL; - } - - return 0; -} -#endif - -/* called with rtnl_lock */ -int bnx2x_change_mtu(struct net_device *dev, int new_mtu) -{ - struct bnx2x *bp = netdev_priv(dev); - - if (bp->recovery_state != BNX2X_RECOVERY_DONE) { - printk(KERN_ERR "Handling parity error recovery. Try again later\n"); - return -EAGAIN; - } - - if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || - ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) - return -EINVAL; - - /* This does not race with packet allocation - * because the actual alloc size is - * only updated as part of load - */ - dev->mtu = new_mtu; - - return bnx2x_reload_if_running(dev); -} - -u32 bnx2x_fix_features(struct net_device *dev, u32 features) -{ - struct bnx2x *bp = netdev_priv(dev); - - /* TPA requires Rx CSUM offloading */ - if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) - features &= ~NETIF_F_LRO; - - return features; -} - -int bnx2x_set_features(struct net_device *dev, u32 features) -{ - struct bnx2x *bp = netdev_priv(dev); - u32 flags = bp->flags; - bool bnx2x_reload = false; - - if (features & NETIF_F_LRO) - flags |= TPA_ENABLE_FLAG; - else - flags &= ~TPA_ENABLE_FLAG; - - if (features & NETIF_F_LOOPBACK) { - if (bp->link_params.loopback_mode != LOOPBACK_BMAC) { - bp->link_params.loopback_mode = LOOPBACK_BMAC; - bnx2x_reload = true; - } - } else { - if (bp->link_params.loopback_mode != LOOPBACK_NONE) { - bp->link_params.loopback_mode = LOOPBACK_NONE; - bnx2x_reload = true; - } - } - - if (flags ^ bp->flags) { - bp->flags = flags; - bnx2x_reload = true; - } - - if (bnx2x_reload) { - if (bp->recovery_state == BNX2X_RECOVERY_DONE) - return bnx2x_reload_if_running(dev); - /* else: bnx2x_nic_load() will be called at end of recovery */ - } - - return 0; -} - -void bnx2x_tx_timeout(struct net_device *dev) -{ - struct bnx2x *bp = netdev_priv(dev); - -#ifdef BNX2X_STOP_ON_ERROR - if (!bp->panic) - bnx2x_panic(); -#endif - - smp_mb__before_clear_bit(); - set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state); - smp_mb__after_clear_bit(); - - /* This allows the netif to be shutdown gracefully before resetting */ - schedule_delayed_work(&bp->sp_rtnl_task, 0); -} - -int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct bnx2x *bp; - - if (!dev) { - dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); - return -ENODEV; - } - bp = netdev_priv(dev); - - rtnl_lock(); - - pci_save_state(pdev); - - if (!netif_running(dev)) { - rtnl_unlock(); - return 0; - } - - netif_device_detach(dev); - - bnx2x_nic_unload(bp, UNLOAD_CLOSE); - - bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); - - rtnl_unlock(); - - return 0; -} - -int bnx2x_resume(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct bnx2x *bp; - int rc; - - if (!dev) { - dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); - return -ENODEV; - } - bp = netdev_priv(dev); - - if (bp->recovery_state != BNX2X_RECOVERY_DONE) { - printk(KERN_ERR "Handling parity error recovery. Try again later\n"); - return -EAGAIN; - } - - rtnl_lock(); - - pci_restore_state(pdev); - - if (!netif_running(dev)) { - rtnl_unlock(); - return 0; - } - - bnx2x_set_power_state(bp, PCI_D0); - netif_device_attach(dev); - - /* Since the chip was reset, clear the FW sequence number */ - bp->fw_seq = 0; - rc = bnx2x_nic_load(bp, LOAD_OPEN); - - rtnl_unlock(); - - return rc; -} - - -void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, - u32 cid) -{ - /* ustorm cxt validation */ - cxt->ustorm_ag_context.cdu_usage = - CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), - CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE); - /* xcontext validation */ - cxt->xstorm_ag_context.cdu_reserved = - CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), - CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); -} - -static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, - u8 fw_sb_id, u8 sb_index, - u8 ticks) -{ - - u32 addr = BAR_CSTRORM_INTMEM + - CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index); - REG_WR8(bp, addr, ticks); - DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n", - port, fw_sb_id, sb_index, ticks); -} - -static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port, - u16 fw_sb_id, u8 sb_index, - u8 disable) -{ - u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); - u32 addr = BAR_CSTRORM_INTMEM + - CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index); - u16 flags = REG_RD16(bp, addr); - /* clear and set */ - flags &= ~HC_INDEX_DATA_HC_ENABLED; - flags |= enable_flag; - REG_WR16(bp, addr, flags); - DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n", - port, fw_sb_id, sb_index, disable); -} - -void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, - u8 sb_index, u8 disable, u16 usec) -{ - int port = BP_PORT(bp); - u8 ticks = usec / BNX2X_BTR; - - storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks); - - disable = disable ? 1 : (usec ? 0 : 1); - storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable); -} diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h deleted file mode 100644 index 223bfee..0000000 --- a/drivers/net/bnx2x/bnx2x_cmn.h +++ /dev/null @@ -1,1491 +0,0 @@ -/* bnx2x_cmn.h: Broadcom Everest network driver. - * - * Copyright (c) 2007-2011 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - * Maintained by: Eilon Greenstein - * Written by: Eliezer Tamir - * Based on code from Michael Chan's bnx2 driver - * UDP CSUM errata workaround by Arik Gendelman - * Slowpath and fastpath rework by Vladislav Zolotarov - * Statistics and Link management by Yitchak Gertner - * - */ -#ifndef BNX2X_CMN_H -#define BNX2X_CMN_H - -#include -#include -#include - - -#include "bnx2x.h" - -/* This is used as a replacement for an MCP if it's not present */ -extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */ - -extern int num_queues; - -/************************ Macros ********************************/ -#define BNX2X_PCI_FREE(x, y, size) \ - do { \ - if (x) { \ - dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \ - x = NULL; \ - y = 0; \ - } \ - } while (0) - -#define BNX2X_FREE(x) \ - do { \ - if (x) { \ - kfree((void *)x); \ - x = NULL; \ - } \ - } while (0) - -#define BNX2X_PCI_ALLOC(x, y, size) \ - do { \ - x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ - if (x == NULL) \ - goto alloc_mem_err; \ - memset((void *)x, 0, size); \ - } while (0) - -#define BNX2X_ALLOC(x, size) \ - do { \ - x = kzalloc(size, GFP_KERNEL); \ - if (x == NULL) \ - goto alloc_mem_err; \ - } while (0) - -/*********************** Interfaces **************************** - * Functions that need to be implemented by each driver version - */ -/* Init */ - -/** - * bnx2x_send_unload_req - request unload mode from the MCP. - * - * @bp: driver handle - * @unload_mode: requested function's unload mode - * - * Return unload mode returned by the MCP: COMMON, PORT or FUNC. - */ -u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode); - -/** - * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. - * - * @bp: driver handle - */ -void bnx2x_send_unload_done(struct bnx2x *bp); - -/** - * bnx2x_config_rss_pf - configure RSS parameters. - * - * @bp: driver handle - * @ind_table: indirection table to configure - * @config_hash: re-configure RSS hash keys configuration - */ -int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash); - -/** - * bnx2x__init_func_obj - init function object - * - * @bp: driver handle - * - * Initializes the Function Object with the appropriate - * parameters which include a function slow path driver - * interface. - */ -void bnx2x__init_func_obj(struct bnx2x *bp); - -/** - * bnx2x_setup_queue - setup eth queue. - * - * @bp: driver handle - * @fp: pointer to the fastpath structure - * @leading: boolean - * - */ -int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, - bool leading); - -/** - * bnx2x_setup_leading - bring up a leading eth queue. - * - * @bp: driver handle - */ -int bnx2x_setup_leading(struct bnx2x *bp); - -/** - * bnx2x_fw_command - send the MCP a request - * - * @bp: driver handle - * @command: request - * @param: request's parameter - * - * block until there is a reply - */ -u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param); - -/** - * bnx2x_initial_phy_init - initialize link parameters structure variables. - * - * @bp: driver handle - * @load_mode: current mode - */ -u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode); - -/** - * bnx2x_link_set - configure hw according to link parameters structure. - * - * @bp: driver handle - */ -void bnx2x_link_set(struct bnx2x *bp); - -/** - * bnx2x_link_test - query link status. - * - * @bp: driver handle - * @is_serdes: bool - * - * Returns 0 if link is UP. - */ -u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes); - -/** - * bnx2x_drv_pulse - write driver pulse to shmem - * - * @bp: driver handle - * - * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox - * in the shmem. - */ -void bnx2x_drv_pulse(struct bnx2x *bp); - -/** - * bnx2x_igu_ack_sb - update IGU with current SB value - * - * @bp: driver handle - * @igu_sb_id: SB id - * @segment: SB segment - * @index: SB index - * @op: SB operation - * @update: is HW update required - */ -void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, - u16 index, u8 op, u8 update); - -/* Disable transactions from chip to host */ -void bnx2x_pf_disable(struct bnx2x *bp); - -/** - * bnx2x__link_status_update - handles link status change. - * - * @bp: driver handle - */ -void bnx2x__link_status_update(struct bnx2x *bp); - -/** - * bnx2x_link_report - report link status to upper layer. - * - * @bp: driver handle - */ -void bnx2x_link_report(struct bnx2x *bp); - -/* None-atomic version of bnx2x_link_report() */ -void __bnx2x_link_report(struct bnx2x *bp); - -/** - * bnx2x_get_mf_speed - calculate MF speed. - * - * @bp: driver handle - * - * Takes into account current linespeed and MF configuration. - */ -u16 bnx2x_get_mf_speed(struct bnx2x *bp); - -/** - * bnx2x_msix_sp_int - MSI-X slowpath interrupt handler - * - * @irq: irq number - * @dev_instance: private instance - */ -irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance); - -/** - * bnx2x_interrupt - non MSI-X interrupt handler - * - * @irq: irq number - * @dev_instance: private instance - */ -irqreturn_t bnx2x_interrupt(int irq, void *dev_instance); -#ifdef BCM_CNIC - -/** - * bnx2x_cnic_notify - send command to cnic driver - * - * @bp: driver handle - * @cmd: command - */ -int bnx2x_cnic_notify(struct bnx2x *bp, int cmd); - -/** - * bnx2x_setup_cnic_irq_info - provides cnic with IRQ information - * - * @bp: driver handle - */ -void bnx2x_setup_cnic_irq_info(struct bnx2x *bp); -#endif - -/** - * bnx2x_int_enable - enable HW interrupts. - * - * @bp: driver handle - */ -void bnx2x_int_enable(struct bnx2x *bp); - -/** - * bnx2x_int_disable_sync - disable interrupts. - * - * @bp: driver handle - * @disable_hw: true, disable HW interrupts. - * - * This function ensures that there are no - * ISRs or SP DPCs (sp_task) are running after it returns. - */ -void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); - -/** - * bnx2x_nic_init - init driver internals. - * - * @bp: driver handle - * @load_code: COMMON, PORT or FUNCTION - * - * Initializes: - * - rings - * - status blocks - * - etc. - */ -void bnx2x_nic_init(struct bnx2x *bp, u32 load_code); - -/** - * bnx2x_alloc_mem - allocate driver's memory. - * - * @bp: driver handle - */ -int bnx2x_alloc_mem(struct bnx2x *bp); - -/** - * bnx2x_free_mem - release driver's memory. - * - * @bp: driver handle - */ -void bnx2x_free_mem(struct bnx2x *bp); - -/** - * bnx2x_set_num_queues - set number of queues according to mode. - * - * @bp: driver handle - */ -void bnx2x_set_num_queues(struct bnx2x *bp); - -/** - * bnx2x_chip_cleanup - cleanup chip internals. - * - * @bp: driver handle - * @unload_mode: COMMON, PORT, FUNCTION - * - * - Cleanup MAC configuration. - * - Closes clients. - * - etc. - */ -void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode); - -/** - * bnx2x_acquire_hw_lock - acquire HW lock. - * - * @bp: driver handle - * @resource: resource bit which was locked - */ -int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource); - -/** - * bnx2x_release_hw_lock - release HW lock. - * - * @bp: driver handle - * @resource: resource bit which was locked - */ -int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource); - -/** - * bnx2x_release_leader_lock - release recovery leader lock - * - * @bp: driver handle - */ -int bnx2x_release_leader_lock(struct bnx2x *bp); - -/** - * bnx2x_set_eth_mac - configure eth MAC address in the HW - * - * @bp: driver handle - * @set: set or clear - * - * Configures according to the value in netdev->dev_addr. - */ -int bnx2x_set_eth_mac(struct bnx2x *bp, bool set); - -/** - * bnx2x_set_rx_mode - set MAC filtering configurations. - * - * @dev: netdevice - * - * called with netif_tx_lock from dev_mcast.c - * If bp->state is OPEN, should be called with - * netif_addr_lock_bh() - */ -void bnx2x_set_rx_mode(struct net_device *dev); - -/** - * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW. - * - * @bp: driver handle - * - * If bp->state is OPEN, should be called with - * netif_addr_lock_bh(). - */ -void bnx2x_set_storm_rx_mode(struct bnx2x *bp); - -/** - * bnx2x_set_q_rx_mode - configures rx_mode for a single queue. - * - * @bp: driver handle - * @cl_id: client id - * @rx_mode_flags: rx mode configuration - * @rx_accept_flags: rx accept configuration - * @tx_accept_flags: tx accept configuration (tx switch) - * @ramrod_flags: ramrod configuration - */ -void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, - unsigned long rx_mode_flags, - unsigned long rx_accept_flags, - unsigned long tx_accept_flags, - unsigned long ramrod_flags); - -/* Parity errors related */ -void bnx2x_inc_load_cnt(struct bnx2x *bp); -u32 bnx2x_dec_load_cnt(struct bnx2x *bp); -bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print); -bool bnx2x_reset_is_done(struct bnx2x *bp, int engine); -void bnx2x_set_reset_in_progress(struct bnx2x *bp); -void bnx2x_set_reset_global(struct bnx2x *bp); -void bnx2x_disable_close_the_gate(struct bnx2x *bp); - -/** - * bnx2x_sp_event - handle ramrods completion. - * - * @fp: fastpath handle for the event - * @rr_cqe: eth_rx_cqe - */ -void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe); - -/** - * bnx2x_ilt_set_info - prepare ILT configurations. - * - * @bp: driver handle - */ -void bnx2x_ilt_set_info(struct bnx2x *bp); - -/** - * bnx2x_dcbx_init - initialize dcbx protocol. - * - * @bp: driver handle - */ -void bnx2x_dcbx_init(struct bnx2x *bp); - -/** - * bnx2x_set_power_state - set power state to the requested value. - * - * @bp: driver handle - * @state: required state D0 or D3hot - * - * Currently only D0 and D3hot are supported. - */ -int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); - -/** - * bnx2x_update_max_mf_config - update MAX part of MF configuration in HW. - * - * @bp: driver handle - * @value: new value - */ -void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value); -/* Error handling */ -void bnx2x_panic_dump(struct bnx2x *bp); - -void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl); - -/* dev_close main block */ -int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); - -/* dev_open main block */ -int bnx2x_nic_load(struct bnx2x *bp, int load_mode); - -/* hard_xmit callback */ -netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); - -/* setup_tc callback */ -int bnx2x_setup_tc(struct net_device *dev, u8 num_tc); - -/* select_queue callback */ -u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); - -/* reload helper */ -int bnx2x_reload_if_running(struct net_device *dev); - -int bnx2x_change_mac_addr(struct net_device *dev, void *p); - -/* NAPI poll Rx part */ -int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget); - -void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, - u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod); - -/* NAPI poll Tx part */ -int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata); - -/* suspend/resume callbacks */ -int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state); -int bnx2x_resume(struct pci_dev *pdev); - -/* Release IRQ vectors */ -void bnx2x_free_irq(struct bnx2x *bp); - -void bnx2x_free_fp_mem(struct bnx2x *bp); -int bnx2x_alloc_fp_mem(struct bnx2x *bp); -void bnx2x_init_rx_rings(struct bnx2x *bp); -void bnx2x_free_skbs(struct bnx2x *bp); -void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); -void bnx2x_netif_start(struct bnx2x *bp); - -/** - * bnx2x_enable_msix - set msix configuration. - * - * @bp: driver handle - * - * fills msix_table, requests vectors, updates num_queues - * according to number of available vectors. - */ -int bnx2x_enable_msix(struct bnx2x *bp); - -/** - * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly - * - * @bp: driver handle - */ -int bnx2x_enable_msi(struct bnx2x *bp); - -/** - * bnx2x_poll - NAPI callback - * - * @napi: napi structure - * @budget: - * - */ -int bnx2x_poll(struct napi_struct *napi, int budget); - -/** - * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure - * - * @bp: driver handle - */ -int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp); - -/** - * bnx2x_free_mem_bp - release memories outsize main driver structure - * - * @bp: driver handle - */ -void bnx2x_free_mem_bp(struct bnx2x *bp); - -/** - * bnx2x_change_mtu - change mtu netdev callback - * - * @dev: net device - * @new_mtu: requested mtu - * - */ -int bnx2x_change_mtu(struct net_device *dev, int new_mtu); - -#if defined(BCM_CNIC) && (defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)) -/** - * bnx2x_fcoe_get_wwn - return the requested WWN value for this port - * - * @dev: net_device - * @wwn: output buffer - * @type: WWN type: NETDEV_FCOE_WWNN (node) or NETDEV_FCOE_WWPN (port) - * - */ -int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type); -#endif -u32 bnx2x_fix_features(struct net_device *dev, u32 features); -int bnx2x_set_features(struct net_device *dev, u32 features); - -/** - * bnx2x_tx_timeout - tx timeout netdev callback - * - * @dev: net device - */ -void bnx2x_tx_timeout(struct net_device *dev); - -/*********************** Inlines **********************************/ -/*********************** Fast path ********************************/ -static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) -{ - barrier(); /* status block is written to by the chip */ - fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID]; -} - -static inline void bnx2x_update_rx_prod_gen(struct bnx2x *bp, - struct bnx2x_fastpath *fp, u16 bd_prod, - u16 rx_comp_prod, u16 rx_sge_prod, u32 start) -{ - struct ustorm_eth_rx_producers rx_prods = {0}; - u32 i; - - /* Update producers */ - rx_prods.bd_prod = bd_prod; - rx_prods.cqe_prod = rx_comp_prod; - rx_prods.sge_prod = rx_sge_prod; - - /* - * Make sure that the BD and SGE data is updated before updating the - * producers since FW might read the BD/SGE right after the producer - * is updated. - * This is only applicable for weak-ordered memory model archs such - * as IA-64. The following barrier is also mandatory since FW will - * assumes BDs must have buffers. - */ - wmb(); - - for (i = 0; i < sizeof(rx_prods)/4; i++) - REG_WR(bp, start + i*4, ((u32 *)&rx_prods)[i]); - - mmiowb(); /* keep prod updates ordered */ - - DP(NETIF_MSG_RX_STATUS, - "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n", - fp->index, bd_prod, rx_comp_prod, rx_sge_prod); -} - -static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id, - u8 segment, u16 index, u8 op, - u8 update, u32 igu_addr) -{ - struct igu_regular cmd_data = {0}; - - cmd_data.sb_id_and_flags = - ((index << IGU_REGULAR_SB_INDEX_SHIFT) | - (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | - (update << IGU_REGULAR_BUPDATE_SHIFT) | - (op << IGU_REGULAR_ENABLE_INT_SHIFT)); - - DP(NETIF_MSG_HW, "write 0x%08x to IGU addr 0x%x\n", - cmd_data.sb_id_and_flags, igu_addr); - REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags); - - /* Make sure that ACK is written */ - mmiowb(); - barrier(); -} - -static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, - u8 idu_sb_id, bool is_Pf) -{ - u32 data, ctl, cnt = 100; - u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; - u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; - u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; - u32 sb_bit = 1 << (idu_sb_id%32); - u32 func_encode = func | - ((is_Pf == true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT); - u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; - - /* Not supported in BC mode */ - if (CHIP_INT_MODE_IS_BC(bp)) - return; - - data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup - << IGU_REGULAR_CLEANUP_TYPE_SHIFT) | - IGU_REGULAR_CLEANUP_SET | - IGU_REGULAR_BCLEANUP; - - ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | - func_encode << IGU_CTRL_REG_FID_SHIFT | - IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; - - DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", - data, igu_addr_data); - REG_WR(bp, igu_addr_data, data); - mmiowb(); - barrier(); - DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", - ctl, igu_addr_ctl); - REG_WR(bp, igu_addr_ctl, ctl); - mmiowb(); - barrier(); - - /* wait for clean up to finish */ - while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt) - msleep(20); - - - if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) { - DP(NETIF_MSG_HW, "Unable to finish IGU cleanup: " - "idu_sb_id %d offset %d bit %d (cnt %d)\n", - idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); - } -} - -static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id, - u8 storm, u16 index, u8 op, u8 update) -{ - u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + - COMMAND_REG_INT_ACK); - struct igu_ack_register igu_ack; - - igu_ack.status_block_index = index; - igu_ack.sb_id_and_flags = - ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | - (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | - (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | - (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); - - DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n", - (*(u32 *)&igu_ack), hc_addr); - REG_WR(bp, hc_addr, (*(u32 *)&igu_ack)); - - /* Make sure that ACK is written */ - mmiowb(); - barrier(); -} - -static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm, - u16 index, u8 op, u8 update) -{ - if (bp->common.int_block == INT_BLOCK_HC) - bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update); - else { - u8 segment; - - if (CHIP_INT_MODE_IS_BC(bp)) - segment = storm; - else if (igu_sb_id != bp->igu_dsb_id) - segment = IGU_SEG_ACCESS_DEF; - else if (storm == ATTENTION_ID) - segment = IGU_SEG_ACCESS_ATTN; - else - segment = IGU_SEG_ACCESS_DEF; - bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update); - } -} - -static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp) -{ - u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + - COMMAND_REG_SIMD_MASK); - u32 result = REG_RD(bp, hc_addr); - - DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n", - result, hc_addr); - - barrier(); - return result; -} - -static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp) -{ - u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8); - u32 result = REG_RD(bp, igu_addr); - - DP(NETIF_MSG_HW, "read 0x%08x from IGU addr 0x%x\n", - result, igu_addr); - - barrier(); - return result; -} - -static inline u16 bnx2x_ack_int(struct bnx2x *bp) -{ - barrier(); - if (bp->common.int_block == INT_BLOCK_HC) - return bnx2x_hc_ack_int(bp); - else - return bnx2x_igu_ack_int(bp); -} - -static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata *txdata) -{ - /* Tell compiler that consumer and producer can change */ - barrier(); - return txdata->tx_pkt_prod != txdata->tx_pkt_cons; -} - -static inline u16 bnx2x_tx_avail(struct bnx2x *bp, - struct bnx2x_fp_txdata *txdata) -{ - s16 used; - u16 prod; - u16 cons; - - prod = txdata->tx_bd_prod; - cons = txdata->tx_bd_cons; - - /* NUM_TX_RINGS = number of "next-page" entries - It will be used as a threshold */ - used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS; - -#ifdef BNX2X_STOP_ON_ERROR - WARN_ON(used < 0); - WARN_ON(used > bp->tx_ring_size); - WARN_ON((bp->tx_ring_size - used) > MAX_TX_AVAIL); -#endif - - return (s16)(bp->tx_ring_size) - used; -} - -static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata) -{ - u16 hw_cons; - - /* Tell compiler that status block fields can change */ - barrier(); - hw_cons = le16_to_cpu(*txdata->tx_cons_sb); - return hw_cons != txdata->tx_pkt_cons; -} - -static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp) -{ - u8 cos; - for_each_cos_in_tx_queue(fp, cos) - if (bnx2x_tx_queue_has_work(&fp->txdata[cos])) - return true; - return false; -} - -static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) -{ - u16 rx_cons_sb; - - /* Tell compiler that status block fields can change */ - barrier(); - rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); - if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) - rx_cons_sb++; - return (fp->rx_comp_cons != rx_cons_sb); -} - -/** - * bnx2x_tx_disable - disables tx from stack point of view - * - * @bp: driver handle - */ -static inline void bnx2x_tx_disable(struct bnx2x *bp) -{ - netif_tx_disable(bp->dev); - netif_carrier_off(bp->dev); -} - -static inline void bnx2x_free_rx_sge(struct bnx2x *bp, - struct bnx2x_fastpath *fp, u16 index) -{ - struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; - struct page *page = sw_buf->page; - struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; - - /* Skip "next page" elements */ - if (!page) - return; - - dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), - SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); - __free_pages(page, PAGES_PER_SGE_SHIFT); - - sw_buf->page = NULL; - sge->addr_hi = 0; - sge->addr_lo = 0; -} - -static inline void bnx2x_add_all_napi(struct bnx2x *bp) -{ - int i; - - /* Add NAPI objects */ - for_each_rx_queue(bp, i) - netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), - bnx2x_poll, BNX2X_NAPI_WEIGHT); -} - -static inline void bnx2x_del_all_napi(struct bnx2x *bp) -{ - int i; - - for_each_rx_queue(bp, i) - netif_napi_del(&bnx2x_fp(bp, i, napi)); -} - -static inline void bnx2x_disable_msi(struct bnx2x *bp) -{ - if (bp->flags & USING_MSIX_FLAG) { - pci_disable_msix(bp->pdev); - bp->flags &= ~USING_MSIX_FLAG; - } else if (bp->flags & USING_MSI_FLAG) { - pci_disable_msi(bp->pdev); - bp->flags &= ~USING_MSI_FLAG; - } -} - -static inline int bnx2x_calc_num_queues(struct bnx2x *bp) -{ - return num_queues ? - min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) : - min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp)); -} - -static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) -{ - int i, j; - - for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { - int idx = RX_SGE_CNT * i - 1; - - for (j = 0; j < 2; j++) { - BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); - idx--; - } - } -} - -static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp) -{ - /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */ - memset(fp->sge_mask, 0xff, - (NUM_RX_SGE >> BIT_VEC64_ELEM_SHIFT)*sizeof(u64)); - - /* Clear the two last indices in the page to 1: - these are the indices that correspond to the "next" element, - hence will never be indicated and should be removed from - the calculations. */ - bnx2x_clear_sge_mask_next_elems(fp); -} - -static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, - struct bnx2x_fastpath *fp, u16 index) -{ - struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT); - struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; - struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; - dma_addr_t mapping; - - if (unlikely(page == NULL)) - return -ENOMEM; - - mapping = dma_map_page(&bp->pdev->dev, page, 0, - SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { - __free_pages(page, PAGES_PER_SGE_SHIFT); - return -ENOMEM; - } - - sw_buf->page = page; - dma_unmap_addr_set(sw_buf, mapping, mapping); - - sge->addr_hi = cpu_to_le32(U64_HI(mapping)); - sge->addr_lo = cpu_to_le32(U64_LO(mapping)); - - return 0; -} - -static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, - struct bnx2x_fastpath *fp, u16 index) -{ - struct sk_buff *skb; - struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; - struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; - dma_addr_t mapping; - - skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size); - if (unlikely(skb == NULL)) - return -ENOMEM; - - mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size, - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { - dev_kfree_skb_any(skb); - return -ENOMEM; - } - - rx_buf->skb = skb; - dma_unmap_addr_set(rx_buf, mapping, mapping); - - rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); - rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); - - return 0; -} - -/* note that we are not allocating a new skb, - * we are just moving one from cons to prod - * we are not creating a new mapping, - * so there is no need to check for dma_mapping_error(). - */ -static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp, - u16 cons, u16 prod) -{ - struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; - struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; - struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons]; - struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; - - dma_unmap_addr_set(prod_rx_buf, mapping, - dma_unmap_addr(cons_rx_buf, mapping)); - prod_rx_buf->skb = cons_rx_buf->skb; - *prod_bd = *cons_bd; -} - -/************************* Init ******************************************/ - -/** - * bnx2x_func_start - init function - * - * @bp: driver handle - * - * Must be called before sending CLIENT_SETUP for the first client. - */ -static inline int bnx2x_func_start(struct bnx2x *bp) -{ - struct bnx2x_func_state_params func_params = {0}; - struct bnx2x_func_start_params *start_params = - &func_params.params.start; - - /* Prepare parameters for function state transitions */ - __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); - - func_params.f_obj = &bp->func_obj; - func_params.cmd = BNX2X_F_CMD_START; - - /* Function parameters */ - start_params->mf_mode = bp->mf_mode; - start_params->sd_vlan_tag = bp->mf_ov; - if (CHIP_IS_E1x(bp)) - start_params->network_cos_mode = OVERRIDE_COS; - else - start_params->network_cos_mode = STATIC_COS; - - return bnx2x_func_state_change(bp, &func_params); -} - - -/** - * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format - * - * @fw_hi: pointer to upper part - * @fw_mid: pointer to middle part - * @fw_lo: pointer to lower part - * @mac: pointer to MAC address - */ -static inline void bnx2x_set_fw_mac_addr(u16 *fw_hi, u16 *fw_mid, u16 *fw_lo, - u8 *mac) -{ - ((u8 *)fw_hi)[0] = mac[1]; - ((u8 *)fw_hi)[1] = mac[0]; - ((u8 *)fw_mid)[0] = mac[3]; - ((u8 *)fw_mid)[1] = mac[2]; - ((u8 *)fw_lo)[0] = mac[5]; - ((u8 *)fw_lo)[1] = mac[4]; -} - -static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, - struct bnx2x_fastpath *fp, int last) -{ - int i; - - if (fp->disable_tpa) - return; - - for (i = 0; i < last; i++) - bnx2x_free_rx_sge(bp, fp, i); -} - -static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, - struct bnx2x_fastpath *fp, int last) -{ - int i; - - for (i = 0; i < last; i++) { - struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; - struct sw_rx_bd *first_buf = &tpa_info->first_buf; - struct sk_buff *skb = first_buf->skb; - - if (skb == NULL) { - DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i); - continue; - } - if (tpa_info->tpa_state == BNX2X_TPA_START) - dma_unmap_single(&bp->pdev->dev, - dma_unmap_addr(first_buf, mapping), - fp->rx_buf_size, DMA_FROM_DEVICE); - dev_kfree_skb(skb); - first_buf->skb = NULL; - } -} - -static inline void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata) -{ - int i; - - for (i = 1; i <= NUM_TX_RINGS; i++) { - struct eth_tx_next_bd *tx_next_bd = - &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; - - tx_next_bd->addr_hi = - cpu_to_le32(U64_HI(txdata->tx_desc_mapping + - BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); - tx_next_bd->addr_lo = - cpu_to_le32(U64_LO(txdata->tx_desc_mapping + - BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); - } - - SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); - txdata->tx_db.data.zero_fill1 = 0; - txdata->tx_db.data.prod = 0; - - txdata->tx_pkt_prod = 0; - txdata->tx_pkt_cons = 0; - txdata->tx_bd_prod = 0; - txdata->tx_bd_cons = 0; - txdata->tx_pkt = 0; -} - -static inline void bnx2x_init_tx_rings(struct bnx2x *bp) -{ - int i; - u8 cos; - - for_each_tx_queue(bp, i) - for_each_cos_in_tx_queue(&bp->fp[i], cos) - bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]); -} - -static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp) -{ - int i; - - for (i = 1; i <= NUM_RX_RINGS; i++) { - struct eth_rx_bd *rx_bd; - - rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2]; - rx_bd->addr_hi = - cpu_to_le32(U64_HI(fp->rx_desc_mapping + - BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); - rx_bd->addr_lo = - cpu_to_le32(U64_LO(fp->rx_desc_mapping + - BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); - } -} - -static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp) -{ - int i; - - for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { - struct eth_rx_sge *sge; - - sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2]; - sge->addr_hi = - cpu_to_le32(U64_HI(fp->rx_sge_mapping + - BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); - - sge->addr_lo = - cpu_to_le32(U64_LO(fp->rx_sge_mapping + - BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); - } -} - -static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp) -{ - int i; - for (i = 1; i <= NUM_RCQ_RINGS; i++) { - struct eth_rx_cqe_next_page *nextpg; - - nextpg = (struct eth_rx_cqe_next_page *) - &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; - nextpg->addr_hi = - cpu_to_le32(U64_HI(fp->rx_comp_mapping + - BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); - nextpg->addr_lo = - cpu_to_le32(U64_LO(fp->rx_comp_mapping + - BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); - } -} - -/* Returns the number of actually allocated BDs */ -static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, - int rx_ring_size) -{ - struct bnx2x *bp = fp->bp; - u16 ring_prod, cqe_ring_prod; - int i; - - fp->rx_comp_cons = 0; - cqe_ring_prod = ring_prod = 0; - - /* This routine is called only during fo init so - * fp->eth_q_stats.rx_skb_alloc_failed = 0 - */ - for (i = 0; i < rx_ring_size; i++) { - if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) { - fp->eth_q_stats.rx_skb_alloc_failed++; - continue; - } - ring_prod = NEXT_RX_IDX(ring_prod); - cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod); - WARN_ON(ring_prod <= (i - fp->eth_q_stats.rx_skb_alloc_failed)); - } - - if (fp->eth_q_stats.rx_skb_alloc_failed) - BNX2X_ERR("was only able to allocate " - "%d rx skbs on queue[%d]\n", - (i - fp->eth_q_stats.rx_skb_alloc_failed), fp->index); - - fp->rx_bd_prod = ring_prod; - /* Limit the CQE producer by the CQE ring size */ - fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT, - cqe_ring_prod); - fp->rx_pkt = fp->rx_calls = 0; - - return i - fp->eth_q_stats.rx_skb_alloc_failed; -} - -/* Statistics ID are global per chip/path, while Client IDs for E1x are per - * port. - */ -static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp) -{ - if (!CHIP_IS_E1x(fp->bp)) - return fp->cl_id; - else - return fp->cl_id + BP_PORT(fp->bp) * FP_SB_MAX_E1x; -} - -static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp, - bnx2x_obj_type obj_type) -{ - struct bnx2x *bp = fp->bp; - - /* Configure classification DBs */ - bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid, - BP_FUNC(bp), bnx2x_sp(bp, mac_rdata), - bnx2x_sp_mapping(bp, mac_rdata), - BNX2X_FILTER_MAC_PENDING, - &bp->sp_state, obj_type, - &bp->macs_pool); -} - -/** - * bnx2x_get_path_func_num - get number of active functions - * - * @bp: driver handle - * - * Calculates the number of active (not hidden) functions on the - * current path. - */ -static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp) -{ - u8 func_num = 0, i; - - /* 57710 has only one function per-port */ - if (CHIP_IS_E1(bp)) - return 1; - - /* Calculate a number of functions enabled on the current - * PATH/PORT. - */ - if (CHIP_REV_IS_SLOW(bp)) { - if (IS_MF(bp)) - func_num = 4; - else - func_num = 2; - } else { - for (i = 0; i < E1H_FUNC_MAX / 2; i++) { - u32 func_config = - MF_CFG_RD(bp, - func_mf_config[BP_PORT(bp) + 2 * i]. - config); - func_num += - ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1); - } - } - - WARN_ON(!func_num); - - return func_num; -} - -static inline void bnx2x_init_bp_objs(struct bnx2x *bp) -{ - /* RX_MODE controlling object */ - bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj); - - /* multicast configuration controlling object */ - bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid, - BP_FUNC(bp), BP_FUNC(bp), - bnx2x_sp(bp, mcast_rdata), - bnx2x_sp_mapping(bp, mcast_rdata), - BNX2X_FILTER_MCAST_PENDING, &bp->sp_state, - BNX2X_OBJ_TYPE_RX); - - /* Setup CAM credit pools */ - bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp), - bnx2x_get_path_func_num(bp)); - - /* RSS configuration object */ - bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id, - bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp), - bnx2x_sp(bp, rss_rdata), - bnx2x_sp_mapping(bp, rss_rdata), - BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state, - BNX2X_OBJ_TYPE_RX); -} - -static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp) -{ - if (CHIP_IS_E1x(fp->bp)) - return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H; - else - return fp->cl_id; -} - -static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp) -{ - struct bnx2x *bp = fp->bp; - - if (!CHIP_IS_E1x(bp)) - return USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); - else - return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); -} - -static inline void bnx2x_init_txdata(struct bnx2x *bp, - struct bnx2x_fp_txdata *txdata, u32 cid, int txq_index, - __le16 *tx_cons_sb) -{ - txdata->cid = cid; - txdata->txq_index = txq_index; - txdata->tx_cons_sb = tx_cons_sb; - - DP(BNX2X_MSG_SP, "created tx data cid %d, txq %d", - txdata->cid, txdata->txq_index); -} - -#ifdef BCM_CNIC -static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) -{ - return bp->cnic_base_cl_id + cl_idx + - (bp->pf_num >> 1) * NON_ETH_CONTEXT_USE; -} - -static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) -{ - - /* the 'first' id is allocated for the cnic */ - return bp->base_fw_ndsb; -} - -static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp) -{ - return bp->igu_base_sb; -} - - -static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) -{ - struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); - unsigned long q_type = 0; - - bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, - BNX2X_FCOE_ETH_CL_ID_IDX); - /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than - * 16 ETH clients per function when CNIC is enabled! - * - * Fix it ASAP!!! - */ - bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID; - bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; - bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; - bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; - - bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]), - fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX); - - DP(BNX2X_MSG_SP, "created fcoe tx data (fp index %d)", fp->index); - - /* qZone id equals to FW (per path) client id */ - bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp); - /* init shortcut */ - bnx2x_fcoe(bp, ustorm_rx_prods_offset) = - bnx2x_rx_ustorm_prods_offset(fp); - - /* Configure Queue State object */ - __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); - __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); - - /* No multi-CoS for FCoE L2 client */ - BUG_ON(fp->max_cos != 1); - - bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, &fp->cid, 1, - BP_FUNC(bp), bnx2x_sp(bp, q_rdata), - bnx2x_sp_mapping(bp, q_rdata), q_type); - - DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d " - "igu_sb %d\n", - fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, - fp->igu_sb_id); -} -#endif - -static inline int bnx2x_clean_tx_queue(struct bnx2x *bp, - struct bnx2x_fp_txdata *txdata) -{ - int cnt = 1000; - - while (bnx2x_has_tx_work_unload(txdata)) { - if (!cnt) { - BNX2X_ERR("timeout waiting for queue[%d]: " - "txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n", - txdata->txq_index, txdata->tx_pkt_prod, - txdata->tx_pkt_cons); -#ifdef BNX2X_STOP_ON_ERROR - bnx2x_panic(); - return -EBUSY; -#else - break; -#endif - } - cnt--; - usleep_range(1000, 1000); - } - - return 0; -} - -int bnx2x_get_link_cfg_idx(struct bnx2x *bp); - -static inline void __storm_memset_struct(struct bnx2x *bp, - u32 addr, size_t size, u32 *data) -{ - int i; - for (i = 0; i < size/4; i++) - REG_WR(bp, addr + (i * 4), data[i]); -} - -static inline void storm_memset_func_cfg(struct bnx2x *bp, - struct tstorm_eth_function_common_config *tcfg, - u16 abs_fid) -{ - size_t size = sizeof(struct tstorm_eth_function_common_config); - - u32 addr = BAR_TSTRORM_INTMEM + - TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid); - - __storm_memset_struct(bp, addr, size, (u32 *)tcfg); -} - -static inline void storm_memset_cmng(struct bnx2x *bp, - struct cmng_struct_per_port *cmng, - u8 port) -{ - size_t size = sizeof(struct cmng_struct_per_port); - - u32 addr = BAR_XSTRORM_INTMEM + - XSTORM_CMNG_PER_PORT_VARS_OFFSET(port); - - __storm_memset_struct(bp, addr, size, (u32 *)cmng); -} - -/** - * bnx2x_wait_sp_comp - wait for the outstanding SP commands. - * - * @bp: driver handle - * @mask: bits that need to be cleared - */ -static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask) -{ - int tout = 5000; /* Wait for 5 secs tops */ - - while (tout--) { - smp_mb(); - netif_addr_lock_bh(bp->dev); - if (!(bp->sp_state & mask)) { - netif_addr_unlock_bh(bp->dev); - return true; - } - netif_addr_unlock_bh(bp->dev); - - usleep_range(1000, 1000); - } - - smp_mb(); - - netif_addr_lock_bh(bp->dev); - if (bp->sp_state & mask) { - BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, " - "mask 0x%lx\n", bp->sp_state, mask); - netif_addr_unlock_bh(bp->dev); - return false; - } - netif_addr_unlock_bh(bp->dev); - - return true; -} - -/** - * bnx2x_set_ctx_validation - set CDU context validation values - * - * @bp: driver handle - * @cxt: context of the connection on the host memory - * @cid: SW CID of the connection to be configured - */ -void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, - u32 cid); - -void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, - u8 sb_index, u8 disable, u16 usec); -void bnx2x_acquire_phy_lock(struct bnx2x *bp); -void bnx2x_release_phy_lock(struct bnx2x *bp); - -/** - * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration. - * - * @bp: driver handle - * @mf_cfg: MF configuration - * - */ -static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg) -{ - u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> - FUNC_MF_CFG_MAX_BW_SHIFT; - if (!max_cfg) { - BNX2X_ERR("Illegal configuration detected for Max BW - " - "using 100 instead\n"); - max_cfg = 100; - } - return max_cfg; -} - -#endif /* BNX2X_CMN_H */ diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c deleted file mode 100644 index a4ea35f..0000000 --- a/drivers/net/bnx2x/bnx2x_dcb.c +++ /dev/null @@ -1,2507 +0,0 @@ -/* bnx2x_dcb.c: Broadcom Everest network driver. - * - * Copyright 2009-2011 Broadcom Corporation - * - * Unless you and Broadcom execute a separate written software license - * agreement governing use of this software, this software is licensed to you - * under the terms of the GNU General Public License version 2, available - * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). - * - * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written - * consent. - * - * Maintained by: Eilon Greenstein - * Written by: Dmitry Kravkov - * - */ -#include -#include -#include -#include -#include - -#include "bnx2x.h" -#include "bnx2x_cmn.h" -#include "bnx2x_dcb.h" - -/* forward declarations of dcbx related functions */ -static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp); -static void bnx2x_pfc_set_pfc(struct bnx2x *bp); -static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp); -static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp); -static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp, - u32 *set_configuration_ets_pg, - u32 *pri_pg_tbl); -static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp, - u32 *pg_pri_orginal_spread, - struct pg_help_data *help_data); -static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp, - struct pg_help_data *help_data, - struct dcbx_ets_feature *ets, - u32 *pg_pri_orginal_spread); -static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp, - struct cos_help_data *cos_data, - u32 *pg_pri_orginal_spread, - struct dcbx_ets_feature *ets); -static void bnx2x_dcbx_fw_struct(struct bnx2x *bp, - struct bnx2x_func_tx_start_params*); - -/* helpers: read/write len bytes from addr into buff by REG_RD/REG_WR */ -static void bnx2x_read_data(struct bnx2x *bp, u32 *buff, - u32 addr, u32 len) -{ - int i; - for (i = 0; i < len; i += 4, buff++) - *buff = REG_RD(bp, addr + i); -} - -static void bnx2x_write_data(struct bnx2x *bp, u32 *buff, - u32 addr, u32 len) -{ - int i; - for (i = 0; i < len; i += 4, buff++) - REG_WR(bp, addr + i, *buff); -} - -static void bnx2x_pfc_set(struct bnx2x *bp) -{ - struct bnx2x_nig_brb_pfc_port_params pfc_params = {0}; - u32 pri_bit, val = 0; - int i; - - pfc_params.num_of_rx_cos_priority_mask = - bp->dcbx_port_params.ets.num_of_cos; - - /* Tx COS configuration */ - for (i = 0; i < bp->dcbx_port_params.ets.num_of_cos; i++) - /* - * We configure only the pauseable bits (non pauseable aren't - * configured at all) it's done to avoid false pauses from - * network - */ - pfc_params.rx_cos_priority_mask[i] = - bp->dcbx_port_params.ets.cos_params[i].pri_bitmask - & DCBX_PFC_PRI_PAUSE_MASK(bp); - - /* - * Rx COS configuration - * Changing PFC RX configuration . - * In RX COS0 will always be configured to lossy and COS1 to lossless - */ - for (i = 0 ; i < MAX_PFC_PRIORITIES ; i++) { - pri_bit = 1 << i; - - if (pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp)) - val |= 1 << (i * 4); - } - - pfc_params.pkt_priority_to_cos = val; - - /* RX COS0 */ - pfc_params.llfc_low_priority_classes = 0; - /* RX COS1 */ - pfc_params.llfc_high_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp); - - /* BRB configuration */ - pfc_params.cos0_pauseable = false; - pfc_params.cos1_pauseable = true; - - bnx2x_acquire_phy_lock(bp); - bp->link_params.feature_config_flags |= FEATURE_CONFIG_PFC_ENABLED; - bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &pfc_params); - bnx2x_release_phy_lock(bp); -} - -static void bnx2x_pfc_clear(struct bnx2x *bp) -{ - struct bnx2x_nig_brb_pfc_port_params nig_params = {0}; - nig_params.pause_enable = 1; -#ifdef BNX2X_SAFC - if (bp->flags & SAFC_TX_FLAG) { - u32 high = 0, low = 0; - int i; - - for (i = 0; i < BNX2X_MAX_PRIORITY; i++) { - if (bp->pri_map[i] == 1) - high |= (1 << i); - if (bp->pri_map[i] == 0) - low |= (1 << i); - } - - nig_params.llfc_low_priority_classes = high; - nig_params.llfc_low_priority_classes = low; - - nig_params.pause_enable = 0; - nig_params.llfc_enable = 1; - nig_params.llfc_out_en = 1; - } -#endif /* BNX2X_SAFC */ - bnx2x_acquire_phy_lock(bp); - bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_PFC_ENABLED; - bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &nig_params); - bnx2x_release_phy_lock(bp); -} - -static void bnx2x_dump_dcbx_drv_param(struct bnx2x *bp, - struct dcbx_features *features, - u32 error) -{ - u8 i = 0; - DP(NETIF_MSG_LINK, "local_mib.error %x\n", error); - - /* PG */ - DP(NETIF_MSG_LINK, - "local_mib.features.ets.enabled %x\n", features->ets.enabled); - for (i = 0; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++) - DP(NETIF_MSG_LINK, - "local_mib.features.ets.pg_bw_tbl[%d] %d\n", i, - DCBX_PG_BW_GET(features->ets.pg_bw_tbl, i)); - for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) - DP(NETIF_MSG_LINK, - "local_mib.features.ets.pri_pg_tbl[%d] %d\n", i, - DCBX_PRI_PG_GET(features->ets.pri_pg_tbl, i)); - - /* pfc */ - DP(NETIF_MSG_LINK, "dcbx_features.pfc.pri_en_bitmap %x\n", - features->pfc.pri_en_bitmap); - DP(NETIF_MSG_LINK, "dcbx_features.pfc.pfc_caps %x\n", - features->pfc.pfc_caps); - DP(NETIF_MSG_LINK, "dcbx_features.pfc.enabled %x\n", - features->pfc.enabled); - - DP(NETIF_MSG_LINK, "dcbx_features.app.default_pri %x\n", - features->app.default_pri); - DP(NETIF_MSG_LINK, "dcbx_features.app.tc_supported %x\n", - features->app.tc_supported); - DP(NETIF_MSG_LINK, "dcbx_features.app.enabled %x\n", - features->app.enabled); - for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { - DP(NETIF_MSG_LINK, - "dcbx_features.app.app_pri_tbl[%x].app_id %x\n", - i, features->app.app_pri_tbl[i].app_id); - DP(NETIF_MSG_LINK, - "dcbx_features.app.app_pri_tbl[%x].pri_bitmap %x\n", - i, features->app.app_pri_tbl[i].pri_bitmap); - DP(NETIF_MSG_LINK, - "dcbx_features.app.app_pri_tbl[%x].appBitfield %x\n", - i, features->app.app_pri_tbl[i].appBitfield); - } -} - -static void bnx2x_dcbx_get_ap_priority(struct bnx2x *bp, - u8 pri_bitmap, - u8 llfc_traf_type) -{ - u32 pri = MAX_PFC_PRIORITIES; - u32 index = MAX_PFC_PRIORITIES - 1; - u32 pri_mask; - u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; - - /* Choose the highest priority */ - while ((MAX_PFC_PRIORITIES == pri) && (0 != index)) { - pri_mask = 1 << index; - if (GET_FLAGS(pri_bitmap, pri_mask)) - pri = index ; - index--; - } - - if (pri < MAX_PFC_PRIORITIES) - ttp[llfc_traf_type] = max_t(u32, ttp[llfc_traf_type], pri); -} - -static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp, - struct dcbx_app_priority_feature *app, - u32 error) { - u8 index; - u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; - - if (GET_FLAGS(error, DCBX_LOCAL_APP_ERROR)) - DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_ERROR\n"); - - if (GET_FLAGS(error, DCBX_LOCAL_APP_MISMATCH)) - DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_MISMATCH\n"); - - if (app->enabled && - !GET_FLAGS(error, DCBX_LOCAL_APP_ERROR | DCBX_LOCAL_APP_MISMATCH)) { - - bp->dcbx_port_params.app.enabled = true; - - for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++) - ttp[index] = 0; - - if (app->default_pri < MAX_PFC_PRIORITIES) - ttp[LLFC_TRAFFIC_TYPE_NW] = app->default_pri; - - for (index = 0 ; index < DCBX_MAX_APP_PROTOCOL; index++) { - struct dcbx_app_priority_entry *entry = - app->app_pri_tbl; - - if (GET_FLAGS(entry[index].appBitfield, - DCBX_APP_SF_ETH_TYPE) && - ETH_TYPE_FCOE == entry[index].app_id) - bnx2x_dcbx_get_ap_priority(bp, - entry[index].pri_bitmap, - LLFC_TRAFFIC_TYPE_FCOE); - - if (GET_FLAGS(entry[index].appBitfield, - DCBX_APP_SF_PORT) && - TCP_PORT_ISCSI == entry[index].app_id) - bnx2x_dcbx_get_ap_priority(bp, - entry[index].pri_bitmap, - LLFC_TRAFFIC_TYPE_ISCSI); - } - } else { - DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_DISABLED\n"); - bp->dcbx_port_params.app.enabled = false; - for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++) - ttp[index] = INVALID_TRAFFIC_TYPE_PRIORITY; - } -} - -static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp, - struct dcbx_ets_feature *ets, - u32 error) { - int i = 0; - u32 pg_pri_orginal_spread[DCBX_MAX_NUM_PG_BW_ENTRIES] = {0}; - struct pg_help_data pg_help_data; - struct bnx2x_dcbx_cos_params *cos_params = - bp->dcbx_port_params.ets.cos_params; - - memset(&pg_help_data, 0, sizeof(struct pg_help_data)); - - - if (GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR)) - DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ERROR\n"); - - - /* Clean up old settings of ets on COS */ - for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params) ; i++) { - cos_params[i].pauseable = false; - cos_params[i].strict = BNX2X_DCBX_STRICT_INVALID; - cos_params[i].bw_tbl = DCBX_INVALID_COS_BW; - cos_params[i].pri_bitmask = 0; - } - - if (bp->dcbx_port_params.app.enabled && - !GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR) && - ets->enabled) { - DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ENABLE\n"); - bp->dcbx_port_params.ets.enabled = true; - - bnx2x_dcbx_get_ets_pri_pg_tbl(bp, - pg_pri_orginal_spread, - ets->pri_pg_tbl); - - bnx2x_dcbx_get_num_pg_traf_type(bp, - pg_pri_orginal_spread, - &pg_help_data); - - bnx2x_dcbx_fill_cos_params(bp, &pg_help_data, - ets, pg_pri_orginal_spread); - - } else { - DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_DISABLED\n"); - bp->dcbx_port_params.ets.enabled = false; - ets->pri_pg_tbl[0] = 0; - - for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES ; i++) - DCBX_PG_BW_SET(ets->pg_bw_tbl, i, 1); - } -} - -static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp, - struct dcbx_pfc_feature *pfc, u32 error) -{ - - if (GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR)) - DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_ERROR\n"); - - if (bp->dcbx_port_params.app.enabled && - !GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR | DCBX_LOCAL_PFC_MISMATCH) && - pfc->enabled) { - bp->dcbx_port_params.pfc.enabled = true; - bp->dcbx_port_params.pfc.priority_non_pauseable_mask = - ~(pfc->pri_en_bitmap); - } else { - DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_DISABLED\n"); - bp->dcbx_port_params.pfc.enabled = false; - bp->dcbx_port_params.pfc.priority_non_pauseable_mask = 0; - } -} - -/* maps unmapped priorities to to the same COS as L2 */ -static void bnx2x_dcbx_map_nw(struct bnx2x *bp) -{ - int i; - u32 unmapped = (1 << MAX_PFC_PRIORITIES) - 1; /* all ones */ - u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; - u32 nw_prio = 1 << ttp[LLFC_TRAFFIC_TYPE_NW]; - struct bnx2x_dcbx_cos_params *cos_params = - bp->dcbx_port_params.ets.cos_params; - - /* get unmapped priorities by clearing mapped bits */ - for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) - unmapped &= ~(1 << ttp[i]); - - /* find cos for nw prio and extend it with unmapped */ - for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params); i++) { - if (cos_params[i].pri_bitmask & nw_prio) { - /* extend the bitmask with unmapped */ - DP(NETIF_MSG_LINK, - "cos %d extended with 0x%08x", i, unmapped); - cos_params[i].pri_bitmask |= unmapped; - break; - } - } -} - -static void bnx2x_get_dcbx_drv_param(struct bnx2x *bp, - struct dcbx_features *features, - u32 error) -{ - bnx2x_dcbx_get_ap_feature(bp, &features->app, error); - - bnx2x_dcbx_get_pfc_feature(bp, &features->pfc, error); - - bnx2x_dcbx_get_ets_feature(bp, &features->ets, error); - - bnx2x_dcbx_map_nw(bp); -} - -#define DCBX_LOCAL_MIB_MAX_TRY_READ (100) -static int bnx2x_dcbx_read_mib(struct bnx2x *bp, - u32 *base_mib_addr, - u32 offset, - int read_mib_type) -{ - int max_try_read = 0; - u32 mib_size, prefix_seq_num, suffix_seq_num; - struct lldp_remote_mib *remote_mib ; - struct lldp_local_mib *local_mib; - - - switch (read_mib_type) { - case DCBX_READ_LOCAL_MIB: - mib_size = sizeof(struct lldp_local_mib); - break; - case DCBX_READ_REMOTE_MIB: - mib_size = sizeof(struct lldp_remote_mib); - break; - default: - return 1; /*error*/ - } - - offset += BP_PORT(bp) * mib_size; - - do { - bnx2x_read_data(bp, base_mib_addr, offset, mib_size); - - max_try_read++; - - switch (read_mib_type) { - case DCBX_READ_LOCAL_MIB: - local_mib = (struct lldp_local_mib *) base_mib_addr; - prefix_seq_num = local_mib->prefix_seq_num; - suffix_seq_num = local_mib->suffix_seq_num; - break; - case DCBX_READ_REMOTE_MIB: - remote_mib = (struct lldp_remote_mib *) base_mib_addr; - prefix_seq_num = remote_mib->prefix_seq_num; - suffix_seq_num = remote_mib->suffix_seq_num; - break; - default: - return 1; /*error*/ - } - } while ((prefix_seq_num != suffix_seq_num) && - (max_try_read < DCBX_LOCAL_MIB_MAX_TRY_READ)); - - if (max_try_read >= DCBX_LOCAL_MIB_MAX_TRY_READ) { - BNX2X_ERR("MIB could not be read\n"); - return 1; - } - - return 0; -} - -static void bnx2x_pfc_set_pfc(struct bnx2x *bp) -{ - if (bp->dcbx_port_params.pfc.enabled && - !(bp->dcbx_error & DCBX_REMOTE_MIB_ERROR)) - /* - * 1. Fills up common PFC structures if required - * 2. Configure NIG, MAC and BRB via the elink - */ - bnx2x_pfc_set(bp); - else - bnx2x_pfc_clear(bp); -} - -static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp) -{ - struct bnx2x_func_state_params func_params = {0}; - - func_params.f_obj = &bp->func_obj; - func_params.cmd = BNX2X_F_CMD_TX_STOP; - - DP(NETIF_MSG_LINK, "STOP TRAFFIC\n"); - return bnx2x_func_state_change(bp, &func_params); -} - -static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp) -{ - struct bnx2x_func_state_params func_params = {0}; - struct bnx2x_func_tx_start_params *tx_params = - &func_params.params.tx_start; - - func_params.f_obj = &bp->func_obj; - func_params.cmd = BNX2X_F_CMD_TX_START; - - bnx2x_dcbx_fw_struct(bp, tx_params); - - DP(NETIF_MSG_LINK, "START TRAFFIC\n"); - return bnx2x_func_state_change(bp, &func_params); -} - -static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp) -{ - struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets); - int rc = 0; - - if (ets->num_of_cos == 0 || ets->num_of_cos > DCBX_COS_MAX_NUM_E2) { - BNX2X_ERR("Illegal number of COSes %d\n", ets->num_of_cos); - return; - } - - /* valid COS entries */ - if (ets->num_of_cos == 1) /* no ETS */ - return; - - /* sanity */ - if (((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[0].strict) && - (DCBX_INVALID_COS_BW == ets->cos_params[0].bw_tbl)) || - ((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[1].strict) && - (DCBX_INVALID_COS_BW == ets->cos_params[1].bw_tbl))) { - BNX2X_ERR("all COS should have at least bw_limit or strict" - "ets->cos_params[0].strict= %x" - "ets->cos_params[0].bw_tbl= %x" - "ets->cos_params[1].strict= %x" - "ets->cos_params[1].bw_tbl= %x", - ets->cos_params[0].strict, - ets->cos_params[0].bw_tbl, - ets->cos_params[1].strict, - ets->cos_params[1].bw_tbl); - return; - } - /* If we join a group and there is bw_tbl and strict then bw rules */ - if ((DCBX_INVALID_COS_BW != ets->cos_params[0].bw_tbl) && - (DCBX_INVALID_COS_BW != ets->cos_params[1].bw_tbl)) { - u32 bw_tbl_0 = ets->cos_params[0].bw_tbl; - u32 bw_tbl_1 = ets->cos_params[1].bw_tbl; - /* Do not allow 0-100 configuration - * since PBF does not support it - * force 1-99 instead - */ - if (bw_tbl_0 == 0) { - bw_tbl_0 = 1; - bw_tbl_1 = 99; - } else if (bw_tbl_1 == 0) { - bw_tbl_1 = 1; - bw_tbl_0 = 99; - } - - bnx2x_ets_bw_limit(&bp->link_params, bw_tbl_0, bw_tbl_1); - } else { - if (ets->cos_params[0].strict == BNX2X_DCBX_STRICT_COS_HIGHEST) - rc = bnx2x_ets_strict(&bp->link_params, 0); - else if (ets->cos_params[1].strict - == BNX2X_DCBX_STRICT_COS_HIGHEST) - rc = bnx2x_ets_strict(&bp->link_params, 1); - if (rc) - BNX2X_ERR("update_ets_params failed\n"); - } -} - -/* - * In E3B0 the configuration may have more than 2 COS. - */ -void bnx2x_dcbx_update_ets_config(struct bnx2x *bp) -{ - struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets); - struct bnx2x_ets_params ets_params = { 0 }; - u8 i; - - ets_params.num_of_cos = ets->num_of_cos; - - for (i = 0; i < ets->num_of_cos; i++) { - /* COS is SP */ - if (ets->cos_params[i].strict != BNX2X_DCBX_STRICT_INVALID) { - if (ets->cos_params[i].bw_tbl != DCBX_INVALID_COS_BW) { - BNX2X_ERR("COS can't be not BW and not SP\n"); - return; - } - - ets_params.cos[i].state = bnx2x_cos_state_strict; - ets_params.cos[i].params.sp_params.pri = - ets->cos_params[i].strict; - } else { /* COS is BW */ - if (ets->cos_params[i].bw_tbl == DCBX_INVALID_COS_BW) { - BNX2X_ERR("COS can't be not BW and not SP\n"); - return; - } - ets_params.cos[i].state = bnx2x_cos_state_bw; - ets_params.cos[i].params.bw_params.bw = - (u8)ets->cos_params[i].bw_tbl; - } - } - - /* Configure the ETS in HW */ - if (bnx2x_ets_e3b0_config(&bp->link_params, &bp->link_vars, - &ets_params)) { - BNX2X_ERR("bnx2x_ets_e3b0_config failed\n"); - bnx2x_ets_disabled(&bp->link_params, &bp->link_vars); - } -} - -static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp) -{ - bnx2x_ets_disabled(&bp->link_params, &bp->link_vars); - - if (!bp->dcbx_port_params.ets.enabled || - (bp->dcbx_error & DCBX_REMOTE_MIB_ERROR)) - return; - - if (CHIP_IS_E3B0(bp)) - bnx2x_dcbx_update_ets_config(bp); - else - bnx2x_dcbx_2cos_limit_update_ets_config(bp); -} - -#ifdef BCM_DCBNL -static int bnx2x_dcbx_read_shmem_remote_mib(struct bnx2x *bp) -{ - struct lldp_remote_mib remote_mib = {0}; - u32 dcbx_remote_mib_offset = SHMEM2_RD(bp, dcbx_remote_mib_offset); - int rc; - - DP(NETIF_MSG_LINK, "dcbx_remote_mib_offset 0x%x\n", - dcbx_remote_mib_offset); - - if (SHMEM_DCBX_REMOTE_MIB_NONE == dcbx_remote_mib_offset) { - BNX2X_ERR("FW doesn't support dcbx_remote_mib_offset\n"); - return -EINVAL; - } - - rc = bnx2x_dcbx_read_mib(bp, (u32 *)&remote_mib, dcbx_remote_mib_offset, - DCBX_READ_REMOTE_MIB); - - if (rc) { - BNX2X_ERR("Faild to read remote mib from FW\n"); - return rc; - } - - /* save features and flags */ - bp->dcbx_remote_feat = remote_mib.features; - bp->dcbx_remote_flags = remote_mib.flags; - return 0; -} -#endif - -static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp) -{ - struct lldp_local_mib local_mib = {0}; - u32 dcbx_neg_res_offset = SHMEM2_RD(bp, dcbx_neg_res_offset); - int rc; - - DP(NETIF_MSG_LINK, "dcbx_neg_res_offset 0x%x\n", dcbx_neg_res_offset); - - if (SHMEM_DCBX_NEG_RES_NONE == dcbx_neg_res_offset) { - BNX2X_ERR("FW doesn't support dcbx_neg_res_offset\n"); - return -EINVAL; - } - - rc = bnx2x_dcbx_read_mib(bp, (u32 *)&local_mib, dcbx_neg_res_offset, - DCBX_READ_LOCAL_MIB); - - if (rc) { - BNX2X_ERR("Faild to read local mib from FW\n"); - return rc; - } - - /* save features and error */ - bp->dcbx_local_feat = local_mib.features; - bp->dcbx_error = local_mib.error; - return 0; -} - - -#ifdef BCM_DCBNL -static inline -u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent) -{ - u8 pri; - - /* Choose the highest priority */ - for (pri = MAX_PFC_PRIORITIES - 1; pri > 0; pri--) - if (ent->pri_bitmap & (1 << pri)) - break; - return pri; -} - -static inline -u8 bnx2x_dcbx_dcbnl_app_idtype(struct dcbx_app_priority_entry *ent) -{ - return ((ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) == - DCBX_APP_SF_PORT) ? DCB_APP_IDTYPE_PORTNUM : - DCB_APP_IDTYPE_ETHTYPE; -} - -int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall) -{ - int i, err = 0; - - for (i = 0; i < DCBX_MAX_APP_PROTOCOL && err == 0; i++) { - struct dcbx_app_priority_entry *ent = - &bp->dcbx_local_feat.app.app_pri_tbl[i]; - - if (ent->appBitfield & DCBX_APP_ENTRY_VALID) { - u8 up = bnx2x_dcbx_dcbnl_app_up(ent); - - /* avoid invalid user-priority */ - if (up) { - struct dcb_app app; - app.selector = bnx2x_dcbx_dcbnl_app_idtype(ent); - app.protocol = ent->app_id; - app.priority = delall ? 0 : up; - err = dcb_setapp(bp->dev, &app); - } - } - } - return err; -} -#endif - -static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set) -{ - if (SHMEM2_HAS(bp, drv_flags)) { - u32 drv_flags; - bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS); - drv_flags = SHMEM2_RD(bp, drv_flags); - - if (set) - SET_FLAGS(drv_flags, flags); - else - RESET_FLAGS(drv_flags, flags); - - SHMEM2_WR(bp, drv_flags, drv_flags); - DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags); - bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS); - } -} - -static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp) -{ - u8 prio, cos; - for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++) { - for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) { - if (bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask - & (1 << prio)) { - bp->prio_to_cos[prio] = cos; - DP(NETIF_MSG_LINK, - "tx_mapping %d --> %d\n", prio, cos); - } - } - } - - /* setup tc must be called under rtnl lock, but we can't take it here - * as we are handling an attetntion on a work queue which must be - * flushed at some rtnl-locked contexts (e.g. if down) - */ - if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) - schedule_delayed_work(&bp->sp_rtnl_task, 0); -} - -void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) -{ - switch (state) { - case BNX2X_DCBX_STATE_NEG_RECEIVED: - { - DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n"); -#ifdef BCM_DCBNL - /** - * Delete app tlvs from dcbnl before reading new - * negotiation results - */ - bnx2x_dcbnl_update_applist(bp, true); - - /* Read rmeote mib if dcbx is in the FW */ - if (bnx2x_dcbx_read_shmem_remote_mib(bp)) - return; -#endif - /* Read neg results if dcbx is in the FW */ - if (bnx2x_dcbx_read_shmem_neg_results(bp)) - return; - - bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat, - bp->dcbx_error); - - bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat, - bp->dcbx_error); - - /* mark DCBX result for PMF migration */ - bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 1); -#ifdef BCM_DCBNL - /** - * Add new app tlvs to dcbnl - */ - bnx2x_dcbnl_update_applist(bp, false); -#endif - bnx2x_dcbx_stop_hw_tx(bp); - - /* reconfigure the netdevice with the results of the new - * dcbx negotiation. - */ - bnx2x_dcbx_update_tc_mapping(bp); - - return; - } - case BNX2X_DCBX_STATE_TX_PAUSED: - DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n"); - bnx2x_pfc_set_pfc(bp); - - bnx2x_dcbx_update_ets_params(bp); - bnx2x_dcbx_resume_hw_tx(bp); - return; - case BNX2X_DCBX_STATE_TX_RELEASED: - DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n"); - bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0); -#ifdef BCM_DCBNL - /* - * Send a notification for the new negotiated parameters - */ - dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0); -#endif - return; - default: - BNX2X_ERR("Unknown DCBX_STATE\n"); - } -} - -#define LLDP_ADMIN_MIB_OFFSET(bp) (PORT_MAX*sizeof(struct lldp_params) + \ - BP_PORT(bp)*sizeof(struct lldp_admin_mib)) - -static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, - u32 dcbx_lldp_params_offset) -{ - struct lldp_admin_mib admin_mib; - u32 i, other_traf_type = PREDEFINED_APP_IDX_MAX, traf_type = 0; - u32 offset = dcbx_lldp_params_offset + LLDP_ADMIN_MIB_OFFSET(bp); - - /*shortcuts*/ - struct dcbx_features *af = &admin_mib.features; - struct bnx2x_config_dcbx_params *dp = &bp->dcbx_config_params; - - memset(&admin_mib, 0, sizeof(struct lldp_admin_mib)); - - /* Read the data first */ - bnx2x_read_data(bp, (u32 *)&admin_mib, offset, - sizeof(struct lldp_admin_mib)); - - if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON) - SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED); - else - RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED); - - if (dp->overwrite_settings == BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE) { - - RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_CEE_VERSION_MASK); - admin_mib.ver_cfg_flags |= - (dp->admin_dcbx_version << DCBX_CEE_VERSION_SHIFT) & - DCBX_CEE_VERSION_MASK; - - af->ets.enabled = (u8)dp->admin_ets_enable; - - af->pfc.enabled = (u8)dp->admin_pfc_enable; - - /* FOR IEEE dp->admin_tc_supported_tx_enable */ - if (dp->admin_ets_configuration_tx_enable) - SET_FLAGS(admin_mib.ver_cfg_flags, - DCBX_ETS_CONFIG_TX_ENABLED); - else - RESET_FLAGS(admin_mib.ver_cfg_flags, - DCBX_ETS_CONFIG_TX_ENABLED); - /* For IEEE admin_ets_recommendation_tx_enable */ - if (dp->admin_pfc_tx_enable) - SET_FLAGS(admin_mib.ver_cfg_flags, - DCBX_PFC_CONFIG_TX_ENABLED); - else - RESET_FLAGS(admin_mib.ver_cfg_flags, - DCBX_PFC_CONFIG_TX_ENABLED); - - if (dp->admin_application_priority_tx_enable) - SET_FLAGS(admin_mib.ver_cfg_flags, - DCBX_APP_CONFIG_TX_ENABLED); - else - RESET_FLAGS(admin_mib.ver_cfg_flags, - DCBX_APP_CONFIG_TX_ENABLED); - - if (dp->admin_ets_willing) - SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING); - else - RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING); - /* For IEEE admin_ets_reco_valid */ - if (dp->admin_pfc_willing) - SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING); - else - RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING); - - if (dp->admin_app_priority_willing) - SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING); - else - RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING); - - for (i = 0 ; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++) { - DCBX_PG_BW_SET(af->ets.pg_bw_tbl, i, - (u8)dp->admin_configuration_bw_precentage[i]); - - DP(NETIF_MSG_LINK, "pg_bw_tbl[%d] = %02x\n", - i, DCBX_PG_BW_GET(af->ets.pg_bw_tbl, i)); - } - - for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) { - DCBX_PRI_PG_SET(af->ets.pri_pg_tbl, i, - (u8)dp->admin_configuration_ets_pg[i]); - - DP(NETIF_MSG_LINK, "pri_pg_tbl[%d] = %02x\n", - i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i)); - } - - /*For IEEE admin_recommendation_bw_precentage - *For IEEE admin_recommendation_ets_pg */ - af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap; - for (i = 0; i < 4; i++) { - if (dp->admin_priority_app_table[i].valid) { - struct bnx2x_admin_priority_app_table *table = - dp->admin_priority_app_table; - if ((ETH_TYPE_FCOE == table[i].app_id) && - (TRAFFIC_TYPE_ETH == table[i].traffic_type)) - traf_type = FCOE_APP_IDX; - else if ((TCP_PORT_ISCSI == table[i].app_id) && - (TRAFFIC_TYPE_PORT == table[i].traffic_type)) - traf_type = ISCSI_APP_IDX; - else - traf_type = other_traf_type++; - - af->app.app_pri_tbl[traf_type].app_id = - table[i].app_id; - - af->app.app_pri_tbl[traf_type].pri_bitmap = - (u8)(1 << table[i].priority); - - af->app.app_pri_tbl[traf_type].appBitfield = - (DCBX_APP_ENTRY_VALID); - - af->app.app_pri_tbl[traf_type].appBitfield |= - (TRAFFIC_TYPE_ETH == table[i].traffic_type) ? - DCBX_APP_SF_ETH_TYPE : DCBX_APP_SF_PORT; - } - } - - af->app.default_pri = (u8)dp->admin_default_priority; - - } - - /* Write the data. */ - bnx2x_write_data(bp, (u32 *)&admin_mib, offset, - sizeof(struct lldp_admin_mib)); - -} - -void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled) -{ - if (!CHIP_IS_E1x(bp)) { - bp->dcb_state = dcb_on; - bp->dcbx_enabled = dcbx_enabled; - } else { - bp->dcb_state = false; - bp->dcbx_enabled = BNX2X_DCBX_ENABLED_INVALID; - } - DP(NETIF_MSG_LINK, "DCB state [%s:%s]\n", - dcb_on ? "ON" : "OFF", - dcbx_enabled == BNX2X_DCBX_ENABLED_OFF ? "user-mode" : - dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF ? "on-chip static" : - dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON ? - "on-chip with negotiation" : "invalid"); -} - -void bnx2x_dcbx_init_params(struct bnx2x *bp) -{ - bp->dcbx_config_params.admin_dcbx_version = 0x0; /* 0 - CEE; 1 - IEEE */ - bp->dcbx_config_params.admin_ets_willing = 1; - bp->dcbx_config_params.admin_pfc_willing = 1; - bp->dcbx_config_params.overwrite_settings = 1; - bp->dcbx_config_params.admin_ets_enable = 1; - bp->dcbx_config_params.admin_pfc_enable = 1; - bp->dcbx_config_params.admin_tc_supported_tx_enable = 1; - bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1; - bp->dcbx_config_params.admin_pfc_tx_enable = 1; - bp->dcbx_config_params.admin_application_priority_tx_enable = 1; - bp->dcbx_config_params.admin_ets_reco_valid = 1; - bp->dcbx_config_params.admin_app_priority_willing = 1; - bp->dcbx_config_params.admin_configuration_bw_precentage[0] = 00; - bp->dcbx_config_params.admin_configuration_bw_precentage[1] = 50; - bp->dcbx_config_params.admin_configuration_bw_precentage[2] = 50; - bp->dcbx_config_params.admin_configuration_bw_precentage[3] = 0; - bp->dcbx_config_params.admin_configuration_bw_precentage[4] = 0; - bp->dcbx_config_params.admin_configuration_bw_precentage[5] = 0; - bp->dcbx_config_params.admin_configuration_bw_precentage[6] = 0; - bp->dcbx_config_params.admin_configuration_bw_precentage[7] = 0; - bp->dcbx_config_params.admin_configuration_ets_pg[0] = 1; - bp->dcbx_config_params.admin_configuration_ets_pg[1] = 0; - bp->dcbx_config_params.admin_configuration_ets_pg[2] = 0; - bp->dcbx_config_params.admin_configuration_ets_pg[3] = 2; - bp->dcbx_config_params.admin_configuration_ets_pg[4] = 0; - bp->dcbx_config_params.admin_configuration_ets_pg[5] = 0; - bp->dcbx_config_params.admin_configuration_ets_pg[6] = 0; - bp->dcbx_config_params.admin_configuration_ets_pg[7] = 0; - bp->dcbx_config_params.admin_recommendation_bw_precentage[0] = 0; - bp->dcbx_config_params.admin_recommendation_bw_precentage[1] = 1; - bp->dcbx_config_params.admin_recommendation_bw_precentage[2] = 2; - bp->dcbx_config_params.admin_recommendation_bw_precentage[3] = 0; - bp->dcbx_config_params.admin_recommendation_bw_precentage[4] = 7; - bp->dcbx_config_params.admin_recommendation_bw_precentage[5] = 5; - bp->dcbx_config_params.admin_recommendation_bw_precentage[6] = 6; - bp->dcbx_config_params.admin_recommendation_bw_precentage[7] = 7; - bp->dcbx_config_params.admin_recommendation_ets_pg[0] = 0; - bp->dcbx_config_params.admin_recommendation_ets_pg[1] = 1; - bp->dcbx_config_params.admin_recommendation_ets_pg[2] = 2; - bp->dcbx_config_params.admin_recommendation_ets_pg[3] = 3; - bp->dcbx_config_params.admin_recommendation_ets_pg[4] = 4; - bp->dcbx_config_params.admin_recommendation_ets_pg[5] = 5; - bp->dcbx_config_params.admin_recommendation_ets_pg[6] = 6; - bp->dcbx_config_params.admin_recommendation_ets_pg[7] = 7; - bp->dcbx_config_params.admin_pfc_bitmap = 0x8; /* FCoE(3) enable */ - bp->dcbx_config_params.admin_priority_app_table[0].valid = 1; - bp->dcbx_config_params.admin_priority_app_table[1].valid = 1; - bp->dcbx_config_params.admin_priority_app_table[2].valid = 0; - bp->dcbx_config_params.admin_priority_app_table[3].valid = 0; - bp->dcbx_config_params.admin_priority_app_table[0].priority = 3; - bp->dcbx_config_params.admin_priority_app_table[1].priority = 0; - bp->dcbx_config_params.admin_priority_app_table[2].priority = 0; - bp->dcbx_config_params.admin_priority_app_table[3].priority = 0; - bp->dcbx_config_params.admin_priority_app_table[0].traffic_type = 0; - bp->dcbx_config_params.admin_priority_app_table[1].traffic_type = 1; - bp->dcbx_config_params.admin_priority_app_table[2].traffic_type = 0; - bp->dcbx_config_params.admin_priority_app_table[3].traffic_type = 0; - bp->dcbx_config_params.admin_priority_app_table[0].app_id = 0x8906; - bp->dcbx_config_params.admin_priority_app_table[1].app_id = 3260; - bp->dcbx_config_params.admin_priority_app_table[2].app_id = 0; - bp->dcbx_config_params.admin_priority_app_table[3].app_id = 0; - bp->dcbx_config_params.admin_default_priority = - bp->dcbx_config_params.admin_priority_app_table[1].priority; -} - -void bnx2x_dcbx_init(struct bnx2x *bp) -{ - u32 dcbx_lldp_params_offset = SHMEM_LLDP_DCBX_PARAMS_NONE; - - if (bp->dcbx_enabled <= 0) - return; - - /* validate: - * chip of good for dcbx version, - * dcb is wanted - * the function is pmf - * shmem2 contains DCBX support fields - */ - DP(NETIF_MSG_LINK, "dcb_state %d bp->port.pmf %d\n", - bp->dcb_state, bp->port.pmf); - - if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf && - SHMEM2_HAS(bp, dcbx_lldp_params_offset)) { - dcbx_lldp_params_offset = - SHMEM2_RD(bp, dcbx_lldp_params_offset); - - DP(NETIF_MSG_LINK, "dcbx_lldp_params_offset 0x%x\n", - dcbx_lldp_params_offset); - - bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0); - - if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) { - bnx2x_dcbx_admin_mib_updated_params(bp, - dcbx_lldp_params_offset); - - /* Let HW start negotiation */ - bnx2x_fw_command(bp, - DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0); - } - } -} -static void -bnx2x_dcbx_print_cos_params(struct bnx2x *bp, - struct bnx2x_func_tx_start_params *pfc_fw_cfg) -{ - u8 pri = 0; - u8 cos = 0; - - DP(NETIF_MSG_LINK, - "pfc_fw_cfg->dcb_version %x\n", pfc_fw_cfg->dcb_version); - DP(NETIF_MSG_LINK, - "pdev->params.dcbx_port_params.pfc." - "priority_non_pauseable_mask %x\n", - bp->dcbx_port_params.pfc.priority_non_pauseable_mask); - - for (cos = 0 ; cos < bp->dcbx_port_params.ets.num_of_cos ; cos++) { - DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets." - "cos_params[%d].pri_bitmask %x\n", cos, - bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask); - - DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets." - "cos_params[%d].bw_tbl %x\n", cos, - bp->dcbx_port_params.ets.cos_params[cos].bw_tbl); - - DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets." - "cos_params[%d].strict %x\n", cos, - bp->dcbx_port_params.ets.cos_params[cos].strict); - - DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets." - "cos_params[%d].pauseable %x\n", cos, - bp->dcbx_port_params.ets.cos_params[cos].pauseable); - } - - for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) { - DP(NETIF_MSG_LINK, - "pfc_fw_cfg->traffic_type_to_priority_cos[%d]." - "priority %x\n", pri, - pfc_fw_cfg->traffic_type_to_priority_cos[pri].priority); - - DP(NETIF_MSG_LINK, - "pfc_fw_cfg->traffic_type_to_priority_cos[%d].cos %x\n", - pri, pfc_fw_cfg->traffic_type_to_priority_cos[pri].cos); - } -} - -/* fills help_data according to pg_info */ -static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp, - u32 *pg_pri_orginal_spread, - struct pg_help_data *help_data) -{ - bool pg_found = false; - u32 i, traf_type, add_traf_type, add_pg; - u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; - struct pg_entry_help_data *data = help_data->data; /*shotcut*/ - - /* Set to invalid */ - for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) - data[i].pg = DCBX_ILLEGAL_PG; - - for (add_traf_type = 0; - add_traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX; add_traf_type++) { - pg_found = false; - if (ttp[add_traf_type] < MAX_PFC_PRIORITIES) { - add_pg = (u8)pg_pri_orginal_spread[ttp[add_traf_type]]; - for (traf_type = 0; - traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX; - traf_type++) { - if (data[traf_type].pg == add_pg) { - if (!(data[traf_type].pg_priority & - (1 << ttp[add_traf_type]))) - data[traf_type]. - num_of_dif_pri++; - data[traf_type].pg_priority |= - (1 << ttp[add_traf_type]); - pg_found = true; - break; - } - } - if (false == pg_found) { - data[help_data->num_of_pg].pg = add_pg; - data[help_data->num_of_pg].pg_priority = - (1 << ttp[add_traf_type]); - data[help_data->num_of_pg].num_of_dif_pri = 1; - help_data->num_of_pg++; - } - } - DP(NETIF_MSG_LINK, - "add_traf_type %d pg_found %s num_of_pg %d\n", - add_traf_type, (false == pg_found) ? "NO" : "YES", - help_data->num_of_pg); - } -} - -static void bnx2x_dcbx_ets_disabled_entry_data(struct bnx2x *bp, - struct cos_help_data *cos_data, - u32 pri_join_mask) -{ - /* Only one priority than only one COS */ - cos_data->data[0].pausable = - IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask); - cos_data->data[0].pri_join_mask = pri_join_mask; - cos_data->data[0].cos_bw = 100; - cos_data->num_of_cos = 1; -} - -static inline void bnx2x_dcbx_add_to_cos_bw(struct bnx2x *bp, - struct cos_entry_help_data *data, - u8 pg_bw) -{ - if (data->cos_bw == DCBX_INVALID_COS_BW) - data->cos_bw = pg_bw; - else - data->cos_bw += pg_bw; -} - -static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp, - struct cos_help_data *cos_data, - u32 *pg_pri_orginal_spread, - struct dcbx_ets_feature *ets) -{ - u32 pri_tested = 0; - u8 i = 0; - u8 entry = 0; - u8 pg_entry = 0; - u8 num_of_pri = LLFC_DRIVER_TRAFFIC_TYPE_MAX; - - cos_data->data[0].pausable = true; - cos_data->data[1].pausable = false; - cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0; - - for (i = 0 ; i < num_of_pri ; i++) { - pri_tested = 1 << bp->dcbx_port_params. - app.traffic_type_priority[i]; - - if (pri_tested & DCBX_PFC_PRI_NON_PAUSE_MASK(bp)) { - cos_data->data[1].pri_join_mask |= pri_tested; - entry = 1; - } else { - cos_data->data[0].pri_join_mask |= pri_tested; - entry = 0; - } - pg_entry = (u8)pg_pri_orginal_spread[bp->dcbx_port_params. - app.traffic_type_priority[i]]; - /* There can be only one strict pg */ - if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES) - bnx2x_dcbx_add_to_cos_bw(bp, &cos_data->data[entry], - DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_entry)); - else - /* If we join a group and one is strict - * than the bw rulls */ - cos_data->data[entry].strict = - BNX2X_DCBX_STRICT_COS_HIGHEST; - } - if ((0 == cos_data->data[0].pri_join_mask) && - (0 == cos_data->data[1].pri_join_mask)) - BNX2X_ERR("dcbx error: Both groups must have priorities\n"); -} - - -#ifndef POWER_OF_2 -#define POWER_OF_2(x) ((0 != x) && (0 == (x & (x-1)))) -#endif - -static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp, - struct pg_help_data *pg_help_data, - struct cos_help_data *cos_data, - u32 pri_join_mask, - u8 num_of_dif_pri) -{ - u8 i = 0; - u32 pri_tested = 0; - u32 pri_mask_without_pri = 0; - u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; - /*debug*/ - if (num_of_dif_pri == 1) { - bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data, pri_join_mask); - return; - } - /* single priority group */ - if (pg_help_data->data[0].pg < DCBX_MAX_NUM_PG_BW_ENTRIES) { - /* If there are both pauseable and non-pauseable priorities, - * the pauseable priorities go to the first queue and - * the non-pauseable priorities go to the second queue. - */ - if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) { - /* Pauseable */ - cos_data->data[0].pausable = true; - /* Non pauseable.*/ - cos_data->data[1].pausable = false; - - if (2 == num_of_dif_pri) { - cos_data->data[0].cos_bw = 50; - cos_data->data[1].cos_bw = 50; - } - - if (3 == num_of_dif_pri) { - if (POWER_OF_2(DCBX_PFC_PRI_GET_PAUSE(bp, - pri_join_mask))) { - cos_data->data[0].cos_bw = 33; - cos_data->data[1].cos_bw = 67; - } else { - cos_data->data[0].cos_bw = 67; - cos_data->data[1].cos_bw = 33; - } - } - - } else if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask)) { - /* If there are only pauseable priorities, - * then one/two priorities go to the first queue - * and one priority goes to the second queue. - */ - if (2 == num_of_dif_pri) { - cos_data->data[0].cos_bw = 50; - cos_data->data[1].cos_bw = 50; - } else { - cos_data->data[0].cos_bw = 67; - cos_data->data[1].cos_bw = 33; - } - cos_data->data[1].pausable = true; - cos_data->data[0].pausable = true; - /* All priorities except FCOE */ - cos_data->data[0].pri_join_mask = (pri_join_mask & - ((u8)~(1 << ttp[LLFC_TRAFFIC_TYPE_FCOE]))); - /* Only FCOE priority.*/ - cos_data->data[1].pri_join_mask = - (1 << ttp[LLFC_TRAFFIC_TYPE_FCOE]); - } else - /* If there are only non-pauseable priorities, - * they will all go to the same queue. - */ - bnx2x_dcbx_ets_disabled_entry_data(bp, - cos_data, pri_join_mask); - } else { - /* priority group which is not BW limited (PG#15):*/ - if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) { - /* If there are both pauseable and non-pauseable - * priorities, the pauseable priorities go to the first - * queue and the non-pauseable priorities - * go to the second queue. - */ - if (DCBX_PFC_PRI_GET_PAUSE(bp, pri_join_mask) > - DCBX_PFC_PRI_GET_NON_PAUSE(bp, pri_join_mask)) { - cos_data->data[0].strict = - BNX2X_DCBX_STRICT_COS_HIGHEST; - cos_data->data[1].strict = - BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI( - BNX2X_DCBX_STRICT_COS_HIGHEST); - } else { - cos_data->data[0].strict = - BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI( - BNX2X_DCBX_STRICT_COS_HIGHEST); - cos_data->data[1].strict = - BNX2X_DCBX_STRICT_COS_HIGHEST; - } - /* Pauseable */ - cos_data->data[0].pausable = true; - /* Non pause-able.*/ - cos_data->data[1].pausable = false; - } else { - /* If there are only pauseable priorities or - * only non-pauseable,* the lower priorities go - * to the first queue and the higherpriorities go - * to the second queue. - */ - cos_data->data[0].pausable = - cos_data->data[1].pausable = - IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask); - - for (i = 0 ; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) { - pri_tested = 1 << bp->dcbx_port_params. - app.traffic_type_priority[i]; - /* Remove priority tested */ - pri_mask_without_pri = - (pri_join_mask & ((u8)(~pri_tested))); - if (pri_mask_without_pri < pri_tested) - break; - } - - if (i == LLFC_DRIVER_TRAFFIC_TYPE_MAX) - BNX2X_ERR("Invalid value for pri_join_mask -" - " could not find a priority\n"); - - cos_data->data[0].pri_join_mask = pri_mask_without_pri; - cos_data->data[1].pri_join_mask = pri_tested; - /* Both queues are strict priority, - * and that with the highest priority - * gets the highest strict priority in the arbiter. - */ - cos_data->data[0].strict = - BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI( - BNX2X_DCBX_STRICT_COS_HIGHEST); - cos_data->data[1].strict = - BNX2X_DCBX_STRICT_COS_HIGHEST; - } - } -} - -static void bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params( - struct bnx2x *bp, - struct pg_help_data *pg_help_data, - struct dcbx_ets_feature *ets, - struct cos_help_data *cos_data, - u32 *pg_pri_orginal_spread, - u32 pri_join_mask, - u8 num_of_dif_pri) -{ - u8 i = 0; - u8 pg[DCBX_COS_MAX_NUM_E2] = { 0 }; - - /* If there are both pauseable and non-pauseable priorities, - * the pauseable priorities go to the first queue and - * the non-pauseable priorities go to the second queue. - */ - if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) { - if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, - pg_help_data->data[0].pg_priority) || - IS_DCBX_PFC_PRI_MIX_PAUSE(bp, - pg_help_data->data[1].pg_priority)) { - /* If one PG contains both pauseable and - * non-pauseable priorities then ETS is disabled. - */ - bnx2x_dcbx_separate_pauseable_from_non(bp, cos_data, - pg_pri_orginal_spread, ets); - bp->dcbx_port_params.ets.enabled = false; - return; - } - - /* Pauseable */ - cos_data->data[0].pausable = true; - /* Non pauseable. */ - cos_data->data[1].pausable = false; - if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, - pg_help_data->data[0].pg_priority)) { - /* 0 is pauseable */ - cos_data->data[0].pri_join_mask = - pg_help_data->data[0].pg_priority; - pg[0] = pg_help_data->data[0].pg; - cos_data->data[1].pri_join_mask = - pg_help_data->data[1].pg_priority; - pg[1] = pg_help_data->data[1].pg; - } else {/* 1 is pauseable */ - cos_data->data[0].pri_join_mask = - pg_help_data->data[1].pg_priority; - pg[0] = pg_help_data->data[1].pg; - cos_data->data[1].pri_join_mask = - pg_help_data->data[0].pg_priority; - pg[1] = pg_help_data->data[0].pg; - } - } else { - /* If there are only pauseable priorities or - * only non-pauseable, each PG goes to a queue. - */ - cos_data->data[0].pausable = cos_data->data[1].pausable = - IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask); - cos_data->data[0].pri_join_mask = - pg_help_data->data[0].pg_priority; - pg[0] = pg_help_data->data[0].pg; - cos_data->data[1].pri_join_mask = - pg_help_data->data[1].pg_priority; - pg[1] = pg_help_data->data[1].pg; - } - - /* There can be only one strict pg */ - for (i = 0 ; i < ARRAY_SIZE(pg); i++) { - if (pg[i] < DCBX_MAX_NUM_PG_BW_ENTRIES) - cos_data->data[i].cos_bw = - DCBX_PG_BW_GET(ets->pg_bw_tbl, pg[i]); - else - cos_data->data[i].strict = - BNX2X_DCBX_STRICT_COS_HIGHEST; - } -} - -static int bnx2x_dcbx_join_pgs( - struct bnx2x *bp, - struct dcbx_ets_feature *ets, - struct pg_help_data *pg_help_data, - u8 required_num_of_pg) -{ - u8 entry_joined = pg_help_data->num_of_pg - 1; - u8 entry_removed = entry_joined + 1; - u8 pg_joined = 0; - - if (required_num_of_pg == 0 || ARRAY_SIZE(pg_help_data->data) - <= pg_help_data->num_of_pg) { - - BNX2X_ERR("required_num_of_pg can't be zero\n"); - return -EINVAL; - } - - while (required_num_of_pg < pg_help_data->num_of_pg) { - entry_joined = pg_help_data->num_of_pg - 2; - entry_removed = entry_joined + 1; - /* protect index */ - entry_removed %= ARRAY_SIZE(pg_help_data->data); - - pg_help_data->data[entry_joined].pg_priority |= - pg_help_data->data[entry_removed].pg_priority; - - pg_help_data->data[entry_joined].num_of_dif_pri += - pg_help_data->data[entry_removed].num_of_dif_pri; - - if (pg_help_data->data[entry_joined].pg == DCBX_STRICT_PRI_PG || - pg_help_data->data[entry_removed].pg == DCBX_STRICT_PRI_PG) - /* Entries joined strict priority rules */ - pg_help_data->data[entry_joined].pg = - DCBX_STRICT_PRI_PG; - else { - /* Entries can be joined join BW */ - pg_joined = DCBX_PG_BW_GET(ets->pg_bw_tbl, - pg_help_data->data[entry_joined].pg) + - DCBX_PG_BW_GET(ets->pg_bw_tbl, - pg_help_data->data[entry_removed].pg); - - DCBX_PG_BW_SET(ets->pg_bw_tbl, - pg_help_data->data[entry_joined].pg, pg_joined); - } - /* Joined the entries */ - pg_help_data->num_of_pg--; - } - - return 0; -} - -static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( - struct bnx2x *bp, - struct pg_help_data *pg_help_data, - struct dcbx_ets_feature *ets, - struct cos_help_data *cos_data, - u32 *pg_pri_orginal_spread, - u32 pri_join_mask, - u8 num_of_dif_pri) -{ - u8 i = 0; - u32 pri_tested = 0; - u8 entry = 0; - u8 pg_entry = 0; - bool b_found_strict = false; - u8 num_of_pri = LLFC_DRIVER_TRAFFIC_TYPE_MAX; - - cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0; - /* If there are both pauseable and non-pauseable priorities, - * the pauseable priorities go to the first queue and the - * non-pauseable priorities go to the second queue. - */ - if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) - bnx2x_dcbx_separate_pauseable_from_non(bp, - cos_data, pg_pri_orginal_spread, ets); - else { - /* If two BW-limited PG-s were combined to one queue, - * the BW is their sum. - * - * If there are only pauseable priorities or only non-pauseable, - * and there are both BW-limited and non-BW-limited PG-s, - * the BW-limited PG/s go to one queue and the non-BW-limited - * PG/s go to the second queue. - * - * If there are only pauseable priorities or only non-pauseable - * and all are BW limited, then two priorities go to the first - * queue and one priority goes to the second queue. - * - * We will join this two cases: - * if one is BW limited it will go to the secoend queue - * otherwise the last priority will get it - */ - - cos_data->data[0].pausable = cos_data->data[1].pausable = - IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask); - - for (i = 0 ; i < num_of_pri; i++) { - pri_tested = 1 << bp->dcbx_port_params. - app.traffic_type_priority[i]; - pg_entry = (u8)pg_pri_orginal_spread[bp-> - dcbx_port_params.app.traffic_type_priority[i]]; - - if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES) { - entry = 0; - - if (i == (num_of_pri-1) && - false == b_found_strict) - /* last entry will be handled separately - * If no priority is strict than last - * enty goes to last queue.*/ - entry = 1; - cos_data->data[entry].pri_join_mask |= - pri_tested; - bnx2x_dcbx_add_to_cos_bw(bp, - &cos_data->data[entry], - DCBX_PG_BW_GET(ets->pg_bw_tbl, - pg_entry)); - } else { - b_found_strict = true; - cos_data->data[1].pri_join_mask |= pri_tested; - /* If we join a group and one is strict - * than the bw rulls */ - cos_data->data[1].strict = - BNX2X_DCBX_STRICT_COS_HIGHEST; - } - } - } -} - - -static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp, - struct pg_help_data *help_data, - struct dcbx_ets_feature *ets, - struct cos_help_data *cos_data, - u32 *pg_pri_orginal_spread, - u32 pri_join_mask, - u8 num_of_dif_pri) -{ - - /* default E2 settings */ - cos_data->num_of_cos = DCBX_COS_MAX_NUM_E2; - - switch (help_data->num_of_pg) { - case 1: - bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params( - bp, - help_data, - cos_data, - pri_join_mask, - num_of_dif_pri); - break; - case 2: - bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params( - bp, - help_data, - ets, - cos_data, - pg_pri_orginal_spread, - pri_join_mask, - num_of_dif_pri); - break; - - case 3: - bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( - bp, - help_data, - ets, - cos_data, - pg_pri_orginal_spread, - pri_join_mask, - num_of_dif_pri); - break; - default: - BNX2X_ERR("Wrong pg_help_data.num_of_pg\n"); - bnx2x_dcbx_ets_disabled_entry_data(bp, - cos_data, pri_join_mask); - } -} - -static int bnx2x_dcbx_spread_strict_pri(struct bnx2x *bp, - struct cos_help_data *cos_data, - u8 entry, - u8 num_spread_of_entries, - u8 strict_app_pris) -{ - u8 strict_pri = BNX2X_DCBX_STRICT_COS_HIGHEST; - u8 num_of_app_pri = MAX_PFC_PRIORITIES; - u8 app_pri_bit = 0; - - while (num_spread_of_entries && num_of_app_pri > 0) { - app_pri_bit = 1 << (num_of_app_pri - 1); - if (app_pri_bit & strict_app_pris) { - struct cos_entry_help_data *data = &cos_data-> - data[entry]; - num_spread_of_entries--; - if (num_spread_of_entries == 0) { - /* last entry needed put all the entries left */ - data->cos_bw = DCBX_INVALID_COS_BW; - data->strict = strict_pri; - data->pri_join_mask = strict_app_pris; - data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, - data->pri_join_mask); - } else { - strict_app_pris &= ~app_pri_bit; - - data->cos_bw = DCBX_INVALID_COS_BW; - data->strict = strict_pri; - data->pri_join_mask = app_pri_bit; - data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, - data->pri_join_mask); - } - - strict_pri = - BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(strict_pri); - entry++; - } - - num_of_app_pri--; - } - - if (num_spread_of_entries) - return -EINVAL; - - return 0; -} - -static u8 bnx2x_dcbx_cee_fill_strict_pri(struct bnx2x *bp, - struct cos_help_data *cos_data, - u8 entry, - u8 num_spread_of_entries, - u8 strict_app_pris) -{ - - if (bnx2x_dcbx_spread_strict_pri(bp, cos_data, entry, - num_spread_of_entries, - strict_app_pris)) { - struct cos_entry_help_data *data = &cos_data-> - data[entry]; - /* Fill BW entry */ - data->cos_bw = DCBX_INVALID_COS_BW; - data->strict = BNX2X_DCBX_STRICT_COS_HIGHEST; - data->pri_join_mask = strict_app_pris; - data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, - data->pri_join_mask); - return 1; - } - - return num_spread_of_entries; -} - -static void bnx2x_dcbx_cee_fill_cos_params(struct bnx2x *bp, - struct pg_help_data *help_data, - struct dcbx_ets_feature *ets, - struct cos_help_data *cos_data, - u32 pri_join_mask) - -{ - u8 need_num_of_entries = 0; - u8 i = 0; - u8 entry = 0; - - /* - * if the number of requested PG-s in CEE is greater than 3 - * then the results are not determined since this is a violation - * of the standard. - */ - if (help_data->num_of_pg > DCBX_COS_MAX_NUM_E3B0) { - if (bnx2x_dcbx_join_pgs(bp, ets, help_data, - DCBX_COS_MAX_NUM_E3B0)) { - BNX2X_ERR("Unable to reduce the number of PGs -" - "we will disables ETS\n"); - bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data, - pri_join_mask); - return; - } - } - - for (i = 0 ; i < help_data->num_of_pg; i++) { - struct pg_entry_help_data *pg = &help_data->data[i]; - if (pg->pg < DCBX_MAX_NUM_PG_BW_ENTRIES) { - struct cos_entry_help_data *data = &cos_data-> - data[entry]; - /* Fill BW entry */ - data->cos_bw = DCBX_PG_BW_GET(ets->pg_bw_tbl, pg->pg); - data->strict = BNX2X_DCBX_STRICT_INVALID; - data->pri_join_mask = pg->pg_priority; - data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, - data->pri_join_mask); - - entry++; - } else { - need_num_of_entries = min_t(u8, - (u8)pg->num_of_dif_pri, - (u8)DCBX_COS_MAX_NUM_E3B0 - - help_data->num_of_pg + 1); - /* - * If there are still VOQ-s which have no associated PG, - * then associate these VOQ-s to PG15. These PG-s will - * be used for SP between priorities on PG15. - */ - entry += bnx2x_dcbx_cee_fill_strict_pri(bp, cos_data, - entry, need_num_of_entries, pg->pg_priority); - } - } - - /* the entry will represent the number of COSes used */ - cos_data->num_of_cos = entry; -} -static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp, - struct pg_help_data *help_data, - struct dcbx_ets_feature *ets, - u32 *pg_pri_orginal_spread) -{ - struct cos_help_data cos_data; - u8 i = 0; - u32 pri_join_mask = 0; - u8 num_of_dif_pri = 0; - - memset(&cos_data, 0, sizeof(cos_data)); - - /* Validate the pg value */ - for (i = 0; i < help_data->num_of_pg ; i++) { - if (DCBX_STRICT_PRIORITY != help_data->data[i].pg && - DCBX_MAX_NUM_PG_BW_ENTRIES <= help_data->data[i].pg) - BNX2X_ERR("Invalid pg[%d] data %x\n", i, - help_data->data[i].pg); - pri_join_mask |= help_data->data[i].pg_priority; - num_of_dif_pri += help_data->data[i].num_of_dif_pri; - } - - /* defaults */ - cos_data.num_of_cos = 1; - for (i = 0; i < ARRAY_SIZE(cos_data.data); i++) { - cos_data.data[i].pri_join_mask = 0; - cos_data.data[i].pausable = false; - cos_data.data[i].strict = BNX2X_DCBX_STRICT_INVALID; - cos_data.data[i].cos_bw = DCBX_INVALID_COS_BW; - } - - if (CHIP_IS_E3B0(bp)) - bnx2x_dcbx_cee_fill_cos_params(bp, help_data, ets, - &cos_data, pri_join_mask); - else /* E2 + E3A0 */ - bnx2x_dcbx_2cos_limit_cee_fill_cos_params(bp, - help_data, ets, - &cos_data, - pg_pri_orginal_spread, - pri_join_mask, - num_of_dif_pri); - - for (i = 0; i < cos_data.num_of_cos ; i++) { - struct bnx2x_dcbx_cos_params *p = - &bp->dcbx_port_params.ets.cos_params[i]; - - p->strict = cos_data.data[i].strict; - p->bw_tbl = cos_data.data[i].cos_bw; - p->pri_bitmask = cos_data.data[i].pri_join_mask; - p->pauseable = cos_data.data[i].pausable; - - /* sanity */ - if (p->bw_tbl != DCBX_INVALID_COS_BW || - p->strict != BNX2X_DCBX_STRICT_INVALID) { - if (p->pri_bitmask == 0) - BNX2X_ERR("Invalid pri_bitmask for %d\n", i); - - if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) { - - if (p->pauseable && - DCBX_PFC_PRI_GET_NON_PAUSE(bp, - p->pri_bitmask) != 0) - BNX2X_ERR("Inconsistent config for " - "pausable COS %d\n", i); - - if (!p->pauseable && - DCBX_PFC_PRI_GET_PAUSE(bp, - p->pri_bitmask) != 0) - BNX2X_ERR("Inconsistent config for " - "nonpausable COS %d\n", i); - } - } - - if (p->pauseable) - DP(NETIF_MSG_LINK, "COS %d PAUSABLE prijoinmask 0x%x\n", - i, cos_data.data[i].pri_join_mask); - else - DP(NETIF_MSG_LINK, "COS %d NONPAUSABLE prijoinmask " - "0x%x\n", - i, cos_data.data[i].pri_join_mask); - } - - bp->dcbx_port_params.ets.num_of_cos = cos_data.num_of_cos ; -} - -static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp, - u32 *set_configuration_ets_pg, - u32 *pri_pg_tbl) -{ - int i; - - for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) { - set_configuration_ets_pg[i] = DCBX_PRI_PG_GET(pri_pg_tbl, i); - - DP(NETIF_MSG_LINK, "set_configuration_ets_pg[%d] = 0x%x\n", - i, set_configuration_ets_pg[i]); - } -} - -static void bnx2x_dcbx_fw_struct(struct bnx2x *bp, - struct bnx2x_func_tx_start_params *pfc_fw_cfg) -{ - u16 pri_bit = 0; - u8 cos = 0, pri = 0; - struct priority_cos *tt2cos; - u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; - - memset(pfc_fw_cfg, 0, sizeof(*pfc_fw_cfg)); - - /* to disable DCB - the structure must be zeroed */ - if (bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) - return; - - /*shortcut*/ - tt2cos = pfc_fw_cfg->traffic_type_to_priority_cos; - - /* Fw version should be incremented each update */ - pfc_fw_cfg->dcb_version = ++bp->dcb_version; - pfc_fw_cfg->dcb_enabled = 1; - - /* Fill priority parameters */ - for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) { - tt2cos[pri].priority = ttp[pri]; - pri_bit = 1 << tt2cos[pri].priority; - - /* Fill COS parameters based on COS calculated to - * make it more general for future use */ - for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++) - if (bp->dcbx_port_params.ets.cos_params[cos]. - pri_bitmask & pri_bit) - tt2cos[pri].cos = cos; - } - - /* we never want the FW to add a 0 vlan tag */ - pfc_fw_cfg->dont_add_pri_0_en = 1; - - bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg); -} - -void bnx2x_dcbx_pmf_update(struct bnx2x *bp) -{ - /* if we need to syncronize DCBX result from prev PMF - * read it from shmem and update bp accordingly - */ - if (SHMEM2_HAS(bp, drv_flags) && - GET_FLAGS(SHMEM2_RD(bp, drv_flags), DRV_FLAGS_DCB_CONFIGURED)) { - /* Read neg results if dcbx is in the FW */ - if (bnx2x_dcbx_read_shmem_neg_results(bp)) - return; - - bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat, - bp->dcbx_error); - bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat, - bp->dcbx_error); - } -} - -/* DCB netlink */ -#ifdef BCM_DCBNL - -#define BNX2X_DCBX_CAPS (DCB_CAP_DCBX_LLD_MANAGED | \ - DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_STATIC) - -static inline bool bnx2x_dcbnl_set_valid(struct bnx2x *bp) -{ - /* validate dcbnl call that may change HW state: - * DCB is on and DCBX mode was SUCCESSFULLY set by the user. - */ - return bp->dcb_state && bp->dcbx_mode_uset; -} - -static u8 bnx2x_dcbnl_get_state(struct net_device *netdev) -{ - struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "state = %d\n", bp->dcb_state); - return bp->dcb_state; -} - -static u8 bnx2x_dcbnl_set_state(struct net_device *netdev, u8 state) -{ - struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "state = %s\n", state ? "on" : "off"); - - bnx2x_dcbx_set_state(bp, (state ? true : false), bp->dcbx_enabled); - return 0; -} - -static void bnx2x_dcbnl_get_perm_hw_addr(struct net_device *netdev, - u8 *perm_addr) -{ - struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "GET-PERM-ADDR\n"); - - /* first the HW mac address */ - memcpy(perm_addr, netdev->dev_addr, netdev->addr_len); - -#ifdef BCM_CNIC - /* second SAN address */ - memcpy(perm_addr+netdev->addr_len, bp->fip_mac, netdev->addr_len); -#endif -} - -static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio, - u8 prio_type, u8 pgid, u8 bw_pct, - u8 up_map) -{ - struct bnx2x *bp = netdev_priv(netdev); - - DP(NETIF_MSG_LINK, "prio[%d] = %d\n", prio, pgid); - if (!bnx2x_dcbnl_set_valid(bp) || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES) - return; - - /** - * bw_pct ingnored - band-width percentage devision between user - * priorities within the same group is not - * standard and hence not supported - * - * prio_type igonred - priority levels within the same group are not - * standard and hence are not supported. According - * to the standard pgid 15 is dedicated to strict - * prioirty traffic (on the port level). - * - * up_map ignored - */ - - bp->dcbx_config_params.admin_configuration_ets_pg[prio] = pgid; - bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1; -} - -static void bnx2x_dcbnl_set_pg_bwgcfg_tx(struct net_device *netdev, - int pgid, u8 bw_pct) -{ - struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "pgid[%d] = %d\n", pgid, bw_pct); - - if (!bnx2x_dcbnl_set_valid(bp) || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES) - return; - - bp->dcbx_config_params.admin_configuration_bw_precentage[pgid] = bw_pct; - bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1; -} - -static void bnx2x_dcbnl_set_pg_tccfg_rx(struct net_device *netdev, int prio, - u8 prio_type, u8 pgid, u8 bw_pct, - u8 up_map) -{ - struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "Nothing to set; No RX support\n"); -} - -static void bnx2x_dcbnl_set_pg_bwgcfg_rx(struct net_device *netdev, - int pgid, u8 bw_pct) -{ - struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "Nothing to set; No RX support\n"); -} - -static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio, - u8 *prio_type, u8 *pgid, u8 *bw_pct, - u8 *up_map) -{ - struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "prio = %d\n", prio); - - /** - * bw_pct ingnored - band-width percentage devision between user - * priorities within the same group is not - * standard and hence not supported - * - * prio_type igonred - priority levels within the same group are not - * standard and hence are not supported. According - * to the standard pgid 15 is dedicated to strict - * prioirty traffic (on the port level). - * - * up_map ignored - */ - *up_map = *bw_pct = *prio_type = *pgid = 0; - - if (!bp->dcb_state || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES) - return; - - *pgid = DCBX_PRI_PG_GET(bp->dcbx_local_feat.ets.pri_pg_tbl, prio); -} - -static void bnx2x_dcbnl_get_pg_bwgcfg_tx(struct net_device *netdev, - int pgid, u8 *bw_pct) -{ - struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "pgid = %d\n", pgid); - - *bw_pct = 0; - - if (!bp->dcb_state || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES) - return; - - *bw_pct = DCBX_PG_BW_GET(bp->dcbx_local_feat.ets.pg_bw_tbl, pgid); -} - -static void bnx2x_dcbnl_get_pg_tccfg_rx(struct net_device *netdev, int prio, - u8 *prio_type, u8 *pgid, u8 *bw_pct, - u8 *up_map) -{ - struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "Nothing to get; No RX support\n"); - - *prio_type = *pgid = *bw_pct = *up_map = 0; -} - -static void bnx2x_dcbnl_get_pg_bwgcfg_rx(struct net_device *netdev, - int pgid, u8 *bw_pct) -{ - struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "Nothing to get; No RX support\n"); - - *bw_pct = 0; -} - -static void bnx2x_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, - u8 setting) -{ - struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "prio[%d] = %d\n", prio, setting); - - if (!bnx2x_dcbnl_set_valid(bp) || prio >= MAX_PFC_PRIORITIES) - return; - - bp->dcbx_config_params.admin_pfc_bitmap |= ((setting ? 1 : 0) << prio); - - if (setting) - bp->dcbx_config_params.admin_pfc_tx_enable = 1; -} - -static void bnx2x_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, - u8 *setting) -{ - struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "prio = %d\n", prio); - - *setting = 0; - - if (!bp->dcb_state || prio >= MAX_PFC_PRIORITIES) - return; - - *setting = (bp->dcbx_local_feat.pfc.pri_en_bitmap >> prio) & 0x1; -} - -static u8 bnx2x_dcbnl_set_all(struct net_device *netdev) -{ - struct bnx2x *bp = netdev_priv(netdev); - int rc = 0; - - DP(NETIF_MSG_LINK, "SET-ALL\n"); - - if (!bnx2x_dcbnl_set_valid(bp)) - return 1; - - if (bp->recovery_state != BNX2X_RECOVERY_DONE) { - netdev_err(bp->dev, "Handling parity error recovery. " - "Try again later\n"); - return 1; - } - if (netif_running(bp->dev)) { - bnx2x_nic_unload(bp, UNLOAD_NORMAL); - rc = bnx2x_nic_load(bp, LOAD_NORMAL); - } - DP(NETIF_MSG_LINK, "set_dcbx_params done (%d)\n", rc); - if (rc) - return 1; - - return 0; -} - -static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap) -{ - struct bnx2x *bp = netdev_priv(netdev); - u8 rval = 0; - - if (bp->dcb_state) { - switch (capid) { - case DCB_CAP_ATTR_PG: - *cap = true; - break; - case DCB_CAP_ATTR_PFC: - *cap = true; - break; - case DCB_CAP_ATTR_UP2TC: - *cap = false; - break; - case DCB_CAP_ATTR_PG_TCS: - *cap = 0x80; /* 8 priorities for PGs */ - break; - case DCB_CAP_ATTR_PFC_TCS: - *cap = 0x80; /* 8 priorities for PFC */ - break; - case DCB_CAP_ATTR_GSP: - *cap = true; - break; - case DCB_CAP_ATTR_BCN: - *cap = false; - break; - case DCB_CAP_ATTR_DCBX: - *cap = BNX2X_DCBX_CAPS; - default: - rval = -EINVAL; - break; - } - } else - rval = -EINVAL; - - DP(NETIF_MSG_LINK, "capid %d:%x\n", capid, *cap); - return rval; -} - -static u8 bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num) -{ - struct bnx2x *bp = netdev_priv(netdev); - u8 rval = 0; - - DP(NETIF_MSG_LINK, "tcid %d\n", tcid); - - if (bp->dcb_state) { - switch (tcid) { - case DCB_NUMTCS_ATTR_PG: - *num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 : - DCBX_COS_MAX_NUM_E2; - break; - case DCB_NUMTCS_ATTR_PFC: - *num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 : - DCBX_COS_MAX_NUM_E2; - break; - default: - rval = -EINVAL; - break; - } - } else - rval = -EINVAL; - - return rval; -} - -static u8 bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num) -{ - struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "num tcs = %d; Not supported\n", num); - return -EINVAL; -} - -static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev) -{ - struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "state = %d\n", bp->dcbx_local_feat.pfc.enabled); - - if (!bp->dcb_state) - return 0; - - return bp->dcbx_local_feat.pfc.enabled; -} - -static void bnx2x_dcbnl_set_pfc_state(struct net_device *netdev, u8 state) -{ - struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "state = %s\n", state ? "on" : "off"); - - if (!bnx2x_dcbnl_set_valid(bp)) - return; - - bp->dcbx_config_params.admin_pfc_tx_enable = - bp->dcbx_config_params.admin_pfc_enable = (state ? 1 : 0); -} - -static void bnx2x_admin_app_set_ent( - struct bnx2x_admin_priority_app_table *app_ent, - u8 idtype, u16 idval, u8 up) -{ - app_ent->valid = 1; - - switch (idtype) { - case DCB_APP_IDTYPE_ETHTYPE: - app_ent->traffic_type = TRAFFIC_TYPE_ETH; - break; - case DCB_APP_IDTYPE_PORTNUM: - app_ent->traffic_type = TRAFFIC_TYPE_PORT; - break; - default: - break; /* never gets here */ - } - app_ent->app_id = idval; - app_ent->priority = up; -} - -static bool bnx2x_admin_app_is_equal( - struct bnx2x_admin_priority_app_table *app_ent, - u8 idtype, u16 idval) -{ - if (!app_ent->valid) - return false; - - switch (idtype) { - case DCB_APP_IDTYPE_ETHTYPE: - if (app_ent->traffic_type != TRAFFIC_TYPE_ETH) - return false; - break; - case DCB_APP_IDTYPE_PORTNUM: - if (app_ent->traffic_type != TRAFFIC_TYPE_PORT) - return false; - break; - default: - return false; - } - if (app_ent->app_id != idval) - return false; - - return true; -} - -static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up) -{ - int i, ff; - - /* iterate over the app entries looking for idtype and idval */ - for (i = 0, ff = -1; i < 4; i++) { - struct bnx2x_admin_priority_app_table *app_ent = - &bp->dcbx_config_params.admin_priority_app_table[i]; - if (bnx2x_admin_app_is_equal(app_ent, idtype, idval)) - break; - - if (ff < 0 && !app_ent->valid) - ff = i; - } - if (i < 4) - /* if found overwrite up */ - bp->dcbx_config_params. - admin_priority_app_table[i].priority = up; - else if (ff >= 0) - /* not found use first-free */ - bnx2x_admin_app_set_ent( - &bp->dcbx_config_params.admin_priority_app_table[ff], - idtype, idval, up); - else - /* app table is full */ - return -EBUSY; - - /* up configured, if not 0 make sure feature is enabled */ - if (up) - bp->dcbx_config_params.admin_application_priority_tx_enable = 1; - - return 0; -} - -static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype, - u16 idval, u8 up) -{ - struct bnx2x *bp = netdev_priv(netdev); - - DP(NETIF_MSG_LINK, "app_type %d, app_id %x, prio bitmap %d\n", - idtype, idval, up); - - if (!bnx2x_dcbnl_set_valid(bp)) - return -EINVAL; - - /* verify idtype */ - switch (idtype) { - case DCB_APP_IDTYPE_ETHTYPE: - case DCB_APP_IDTYPE_PORTNUM: - break; - default: - return -EINVAL; - } - return bnx2x_set_admin_app_up(bp, idtype, idval, up); -} - -static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev) -{ - struct bnx2x *bp = netdev_priv(netdev); - u8 state; - - state = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE; - - if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF) - state |= DCB_CAP_DCBX_STATIC; - - return state; -} - -static u8 bnx2x_dcbnl_set_dcbx(struct net_device *netdev, u8 state) -{ - struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "state = %02x\n", state); - - /* set dcbx mode */ - - if ((state & BNX2X_DCBX_CAPS) != state) { - BNX2X_ERR("Requested DCBX mode %x is beyond advertised " - "capabilities\n", state); - return 1; - } - - if (bp->dcb_state != BNX2X_DCB_STATE_ON) { - BNX2X_ERR("DCB turned off, DCBX configuration is invalid\n"); - return 1; - } - - if (state & DCB_CAP_DCBX_STATIC) - bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_OFF; - else - bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_ON; - - bp->dcbx_mode_uset = true; - return 0; -} - -static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid, - u8 *flags) -{ - struct bnx2x *bp = netdev_priv(netdev); - u8 rval = 0; - - DP(NETIF_MSG_LINK, "featid %d\n", featid); - - if (bp->dcb_state) { - *flags = 0; - switch (featid) { - case DCB_FEATCFG_ATTR_PG: - if (bp->dcbx_local_feat.ets.enabled) - *flags |= DCB_FEATCFG_ENABLE; - if (bp->dcbx_error & DCBX_LOCAL_ETS_ERROR) - *flags |= DCB_FEATCFG_ERROR; - break; - case DCB_FEATCFG_ATTR_PFC: - if (bp->dcbx_local_feat.pfc.enabled) - *flags |= DCB_FEATCFG_ENABLE; - if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR | - DCBX_LOCAL_PFC_MISMATCH)) - *flags |= DCB_FEATCFG_ERROR; - break; - case DCB_FEATCFG_ATTR_APP: - if (bp->dcbx_local_feat.app.enabled) - *flags |= DCB_FEATCFG_ENABLE; - if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR | - DCBX_LOCAL_APP_MISMATCH)) - *flags |= DCB_FEATCFG_ERROR; - break; - default: - rval = -EINVAL; - break; - } - } else - rval = -EINVAL; - - return rval; -} - -static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid, - u8 flags) -{ - struct bnx2x *bp = netdev_priv(netdev); - u8 rval = 0; - - DP(NETIF_MSG_LINK, "featid = %d flags = %02x\n", featid, flags); - - /* ignore the 'advertise' flag */ - if (bnx2x_dcbnl_set_valid(bp)) { - switch (featid) { - case DCB_FEATCFG_ATTR_PG: - bp->dcbx_config_params.admin_ets_enable = - flags & DCB_FEATCFG_ENABLE ? 1 : 0; - bp->dcbx_config_params.admin_ets_willing = - flags & DCB_FEATCFG_WILLING ? 1 : 0; - break; - case DCB_FEATCFG_ATTR_PFC: - bp->dcbx_config_params.admin_pfc_enable = - flags & DCB_FEATCFG_ENABLE ? 1 : 0; - bp->dcbx_config_params.admin_pfc_willing = - flags & DCB_FEATCFG_WILLING ? 1 : 0; - break; - case DCB_FEATCFG_ATTR_APP: - /* ignore enable, always enabled */ - bp->dcbx_config_params.admin_app_priority_willing = - flags & DCB_FEATCFG_WILLING ? 1 : 0; - break; - default: - rval = -EINVAL; - break; - } - } else - rval = -EINVAL; - - return rval; -} - -static int bnx2x_peer_appinfo(struct net_device *netdev, - struct dcb_peer_app_info *info, u16* app_count) -{ - int i; - struct bnx2x *bp = netdev_priv(netdev); - - DP(NETIF_MSG_LINK, "APP-INFO\n"); - - info->willing = (bp->dcbx_remote_flags & DCBX_APP_REM_WILLING) ?: 0; - info->error = (bp->dcbx_remote_flags & DCBX_APP_RX_ERROR) ?: 0; - *app_count = 0; - - for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) - if (bp->dcbx_remote_feat.app.app_pri_tbl[i].appBitfield & - DCBX_APP_ENTRY_VALID) - (*app_count)++; - return 0; -} - -static int bnx2x_peer_apptable(struct net_device *netdev, - struct dcb_app *table) -{ - int i, j; - struct bnx2x *bp = netdev_priv(netdev); - - DP(NETIF_MSG_LINK, "APP-TABLE\n"); - - for (i = 0, j = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { - struct dcbx_app_priority_entry *ent = - &bp->dcbx_remote_feat.app.app_pri_tbl[i]; - - if (ent->appBitfield & DCBX_APP_ENTRY_VALID) { - table[j].selector = bnx2x_dcbx_dcbnl_app_idtype(ent); - table[j].priority = bnx2x_dcbx_dcbnl_app_up(ent); - table[j++].protocol = ent->app_id; - } - } - return 0; -} - -static int bnx2x_cee_peer_getpg(struct net_device *netdev, struct cee_pg *pg) -{ - int i; - struct bnx2x *bp = netdev_priv(netdev); - - pg->willing = (bp->dcbx_remote_flags & DCBX_ETS_REM_WILLING) ?: 0; - - for (i = 0; i < CEE_DCBX_MAX_PGS; i++) { - pg->pg_bw[i] = - DCBX_PG_BW_GET(bp->dcbx_remote_feat.ets.pg_bw_tbl, i); - pg->prio_pg[i] = - DCBX_PRI_PG_GET(bp->dcbx_remote_feat.ets.pri_pg_tbl, i); - } - return 0; -} - -static int bnx2x_cee_peer_getpfc(struct net_device *netdev, - struct cee_pfc *pfc) -{ - struct bnx2x *bp = netdev_priv(netdev); - pfc->tcs_supported = bp->dcbx_remote_feat.pfc.pfc_caps; - pfc->pfc_en = bp->dcbx_remote_feat.pfc.pri_en_bitmap; - return 0; -} - -const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = { - .getstate = bnx2x_dcbnl_get_state, - .setstate = bnx2x_dcbnl_set_state, - .getpermhwaddr = bnx2x_dcbnl_get_perm_hw_addr, - .setpgtccfgtx = bnx2x_dcbnl_set_pg_tccfg_tx, - .setpgbwgcfgtx = bnx2x_dcbnl_set_pg_bwgcfg_tx, - .setpgtccfgrx = bnx2x_dcbnl_set_pg_tccfg_rx, - .setpgbwgcfgrx = bnx2x_dcbnl_set_pg_bwgcfg_rx, - .getpgtccfgtx = bnx2x_dcbnl_get_pg_tccfg_tx, - .getpgbwgcfgtx = bnx2x_dcbnl_get_pg_bwgcfg_tx, - .getpgtccfgrx = bnx2x_dcbnl_get_pg_tccfg_rx, - .getpgbwgcfgrx = bnx2x_dcbnl_get_pg_bwgcfg_rx, - .setpfccfg = bnx2x_dcbnl_set_pfc_cfg, - .getpfccfg = bnx2x_dcbnl_get_pfc_cfg, - .setall = bnx2x_dcbnl_set_all, - .getcap = bnx2x_dcbnl_get_cap, - .getnumtcs = bnx2x_dcbnl_get_numtcs, - .setnumtcs = bnx2x_dcbnl_set_numtcs, - .getpfcstate = bnx2x_dcbnl_get_pfc_state, - .setpfcstate = bnx2x_dcbnl_set_pfc_state, - .setapp = bnx2x_dcbnl_set_app_up, - .getdcbx = bnx2x_dcbnl_get_dcbx, - .setdcbx = bnx2x_dcbnl_set_dcbx, - .getfeatcfg = bnx2x_dcbnl_get_featcfg, - .setfeatcfg = bnx2x_dcbnl_set_featcfg, - .peer_getappinfo = bnx2x_peer_appinfo, - .peer_getapptable = bnx2x_peer_apptable, - .cee_peer_getpg = bnx2x_cee_peer_getpg, - .cee_peer_getpfc = bnx2x_cee_peer_getpfc, -}; - -#endif /* BCM_DCBNL */ diff --git a/drivers/net/bnx2x/bnx2x_dcb.h b/drivers/net/bnx2x/bnx2x_dcb.h deleted file mode 100644 index 2c6a3bc..0000000 --- a/drivers/net/bnx2x/bnx2x_dcb.h +++ /dev/null @@ -1,203 +0,0 @@ -/* bnx2x_dcb.h: Broadcom Everest network driver. - * - * Copyright 2009-2011 Broadcom Corporation - * - * Unless you and Broadcom execute a separate written software license - * agreement governing use of this software, this software is licensed to you - * under the terms of the GNU General Public License version 2, available - * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). - * - * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written - * consent. - * - * Maintained by: Eilon Greenstein - * Written by: Dmitry Kravkov - * - */ -#ifndef BNX2X_DCB_H -#define BNX2X_DCB_H - -#include "bnx2x_hsi.h" - -#define LLFC_DRIVER_TRAFFIC_TYPE_MAX 3 /* NW, iSCSI, FCoE */ -struct bnx2x_dcbx_app_params { - u32 enabled; - u32 traffic_type_priority[LLFC_DRIVER_TRAFFIC_TYPE_MAX]; -}; - -#define DCBX_COS_MAX_NUM_E2 DCBX_E2E3_MAX_NUM_COS -/* bnx2x currently limits numbers of supported COSes to 3 to be extended to 6 */ -#define BNX2X_MAX_COS_SUPPORT 3 -#define DCBX_COS_MAX_NUM_E3B0 BNX2X_MAX_COS_SUPPORT -#define DCBX_COS_MAX_NUM BNX2X_MAX_COS_SUPPORT - -struct bnx2x_dcbx_cos_params { - u32 bw_tbl; - u32 pri_bitmask; - /* - * strict priority: valid values are 0..5; 0 is highest priority. - * There can't be two COSes with the same priority. - */ - u8 strict; -#define BNX2X_DCBX_STRICT_INVALID DCBX_COS_MAX_NUM -#define BNX2X_DCBX_STRICT_COS_HIGHEST 0 -#define BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(sp) ((sp) + 1) - u8 pauseable; -}; - -struct bnx2x_dcbx_pg_params { - u32 enabled; - u8 num_of_cos; /* valid COS entries */ - struct bnx2x_dcbx_cos_params cos_params[DCBX_COS_MAX_NUM]; -}; - -struct bnx2x_dcbx_pfc_params { - u32 enabled; - u32 priority_non_pauseable_mask; -}; - -struct bnx2x_dcbx_port_params { - struct bnx2x_dcbx_pfc_params pfc; - struct bnx2x_dcbx_pg_params ets; - struct bnx2x_dcbx_app_params app; -}; - -#define BNX2X_DCBX_CONFIG_INV_VALUE (0xFFFFFFFF) -#define BNX2X_DCBX_OVERWRITE_SETTINGS_DISABLE 0 -#define BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE 1 -#define BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID (BNX2X_DCBX_CONFIG_INV_VALUE) -#define BNX2X_IS_ETS_ENABLED(bp) ((bp)->dcb_state == BNX2X_DCB_STATE_ON &&\ - (bp)->dcbx_port_params.ets.enabled) - -struct bnx2x_config_lldp_params { - u32 overwrite_settings; - u32 msg_tx_hold; - u32 msg_fast_tx; - u32 tx_credit_max; - u32 msg_tx_interval; - u32 tx_fast; -}; - -struct bnx2x_admin_priority_app_table { - u32 valid; - u32 priority; -#define INVALID_TRAFFIC_TYPE_PRIORITY (0xFFFFFFFF) - u32 traffic_type; -#define TRAFFIC_TYPE_ETH 0 -#define TRAFFIC_TYPE_PORT 1 - u32 app_id; -}; - -struct bnx2x_config_dcbx_params { - u32 overwrite_settings; - u32 admin_dcbx_version; - u32 admin_ets_enable; - u32 admin_pfc_enable; - u32 admin_tc_supported_tx_enable; - u32 admin_ets_configuration_tx_enable; - u32 admin_ets_recommendation_tx_enable; - u32 admin_pfc_tx_enable; - u32 admin_application_priority_tx_enable; - u32 admin_ets_willing; - u32 admin_ets_reco_valid; - u32 admin_pfc_willing; - u32 admin_app_priority_willing; - u32 admin_configuration_bw_precentage[8]; - u32 admin_configuration_ets_pg[8]; - u32 admin_recommendation_bw_precentage[8]; - u32 admin_recommendation_ets_pg[8]; - u32 admin_pfc_bitmap; - struct bnx2x_admin_priority_app_table admin_priority_app_table[4]; - u32 admin_default_priority; -}; - -#define GET_FLAGS(flags, bits) ((flags) & (bits)) -#define SET_FLAGS(flags, bits) ((flags) |= (bits)) -#define RESET_FLAGS(flags, bits) ((flags) &= ~(bits)) - -enum { - DCBX_READ_LOCAL_MIB, - DCBX_READ_REMOTE_MIB -}; - -#define ETH_TYPE_FCOE (0x8906) -#define TCP_PORT_ISCSI (0xCBC) - -#define PFC_VALUE_FRAME_SIZE (512) -#define PFC_QUANTA_IN_NANOSEC_FROM_SPEED_MEGA(mega_speed) \ - ((1000 * PFC_VALUE_FRAME_SIZE)/(mega_speed)) - -#define PFC_BRB1_REG_HIGH_LLFC_LOW_THRESHOLD 130 -#define PFC_BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD 170 - - - -struct cos_entry_help_data { - u32 pri_join_mask; - u32 cos_bw; - u8 strict; - bool pausable; -}; - -struct cos_help_data { - struct cos_entry_help_data data[DCBX_COS_MAX_NUM]; - u8 num_of_cos; -}; - -#define DCBX_ILLEGAL_PG (0xFF) -#define DCBX_PFC_PRI_MASK (0xFF) -#define DCBX_STRICT_PRIORITY (15) -#define DCBX_INVALID_COS_BW (0xFFFFFFFF) -#define DCBX_PFC_PRI_NON_PAUSE_MASK(bp) \ - ((bp)->dcbx_port_params.pfc.priority_non_pauseable_mask) -#define DCBX_PFC_PRI_PAUSE_MASK(bp) \ - ((u8)~DCBX_PFC_PRI_NON_PAUSE_MASK(bp)) -#define DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri) \ - ((pg_pri) & (DCBX_PFC_PRI_PAUSE_MASK(bp))) -#define DCBX_PFC_PRI_GET_NON_PAUSE(bp, pg_pri) \ - (DCBX_PFC_PRI_NON_PAUSE_MASK(bp) & (pg_pri)) -#define DCBX_IS_PFC_PRI_SOME_PAUSE(bp, pg_pri) \ - (0 != DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri)) -#define IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pg_pri) \ - (pg_pri == DCBX_PFC_PRI_GET_PAUSE((bp), (pg_pri))) -#define IS_DCBX_PFC_PRI_ONLY_NON_PAUSE(bp, pg_pri)\ - ((pg_pri) == DCBX_PFC_PRI_GET_NON_PAUSE((bp), (pg_pri))) -#define IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pg_pri) \ - (!(IS_DCBX_PFC_PRI_ONLY_NON_PAUSE((bp), (pg_pri)) || \ - IS_DCBX_PFC_PRI_ONLY_PAUSE((bp), (pg_pri)))) - - -struct pg_entry_help_data { - u8 num_of_dif_pri; - u8 pg; - u32 pg_priority; -}; - -struct pg_help_data { - struct pg_entry_help_data data[LLFC_DRIVER_TRAFFIC_TYPE_MAX]; - u8 num_of_pg; -}; - -/* forward DCB/PFC related declarations */ -struct bnx2x; -void bnx2x_dcbx_update(struct work_struct *work); -void bnx2x_dcbx_init_params(struct bnx2x *bp); -void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled); - -enum { - BNX2X_DCBX_STATE_NEG_RECEIVED = 0x1, - BNX2X_DCBX_STATE_TX_PAUSED, - BNX2X_DCBX_STATE_TX_RELEASED -}; - -void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state); -void bnx2x_dcbx_pmf_update(struct bnx2x *bp); -/* DCB netlink */ -#ifdef BCM_DCBNL -extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops; -int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall); -#endif /* BCM_DCBNL */ - -#endif /* BNX2X_DCB_H */ diff --git a/drivers/net/bnx2x/bnx2x_dump.h b/drivers/net/bnx2x/bnx2x_dump.h deleted file mode 100644 index b983825..0000000 --- a/drivers/net/bnx2x/bnx2x_dump.h +++ /dev/null @@ -1,1156 +0,0 @@ -/* bnx2x_dump.h: Broadcom Everest network driver. - * - * Copyright (c) 2011 Broadcom Corporation - * - * Unless you and Broadcom execute a separate written software license - * agreement governing use of this software, this software is licensed to you - * under the terms of the GNU General Public License version 2, available - * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). - * - * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written - * consent. - */ - - -/* This struct holds a signature to ensure the dump returned from the driver - * match the meta data file inserted to grc_dump.tcl - * The signature is time stamp, diag version and grc_dump version - */ - -#ifndef BNX2X_DUMP_H -#define BNX2X_DUMP_H - - - -/*definitions */ -#define XSTORM_WAITP_ADDR 0x2b8a80 -#define TSTORM_WAITP_ADDR 0x1b8a80 -#define USTORM_WAITP_ADDR 0x338a80 -#define CSTORM_WAITP_ADDR 0x238a80 -#define TSTORM_CAM_MODE 0x1B1440 - -#define MAX_TIMER_PENDING 200 -#define TIMER_SCAN_DONT_CARE 0xFF -#define RI_E1 0x1 -#define RI_E1H 0x2 -#define RI_E2 0x4 -#define RI_E3 0x8 -#define RI_E3B0 0x10 -#define RI_ONLINE 0x100 -#define RI_OFFLINE 0x0 -#define RI_PATH0_DUMP 0x200 -#define RI_PATH1_DUMP 0x400 - -#define RI_E1_ONLINE (RI_E1 | RI_ONLINE) -#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE) -#define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE) -#define RI_E2_ONLINE (RI_E2 | RI_ONLINE) -#define RI_E1E2_ONLINE (RI_E1 | RI_E2 | RI_ONLINE) -#define RI_E1HE2_ONLINE (RI_E1H | RI_E2 | RI_ONLINE) -#define RI_E1E1HE2_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE) -#define RI_E3_ONLINE (RI_E3 | RI_ONLINE) -#define RI_E1E3_ONLINE (RI_E1 | RI_E3 | RI_ONLINE) -#define RI_E1HE3_ONLINE (RI_E1H | RI_E3 | RI_ONLINE) -#define RI_E1E1HE3_ONLINE (RI_E1 | RI_E1H | RI_E3 | RI_ONLINE) -#define RI_E2E3_ONLINE (RI_E2 | RI_E3 | RI_ONLINE) -#define RI_E1E2E3_ONLINE (RI_E1 | RI_E2 | RI_E3 | RI_ONLINE) -#define RI_E1HE2E3_ONLINE (RI_E1H | RI_E2 | RI_E3 | RI_ONLINE) -#define RI_E1E1HE2E3_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_ONLINE) -#define RI_E3B0_ONLINE (RI_E3B0 | RI_ONLINE) -#define RI_E1E3B0_ONLINE (RI_E1 | RI_E3B0 | RI_ONLINE) -#define RI_E1HE3B0_ONLINE (RI_E1H | RI_E3B0 | RI_ONLINE) -#define RI_E1E1HE3B0_ONLINE (RI_E1 | RI_E1H | RI_E3B0 | RI_ONLINE) -#define RI_E2E3B0_ONLINE (RI_E2 | RI_E3B0 | RI_ONLINE) -#define RI_E1E2E3B0_ONLINE (RI_E1 | RI_E2 | RI_E3B0 | RI_ONLINE) -#define RI_E1HE2E3B0_ONLINE (RI_E1H | RI_E2 | RI_E3B0 | RI_ONLINE) -#define RI_E1E1HE2E3B0_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3B0 | RI_ONLINE) -#define RI_E3E3B0_ONLINE (RI_E3 | RI_E3B0 | RI_ONLINE) -#define RI_E1E3E3B0_ONLINE (RI_E1 | RI_E3 | RI_E3B0 | RI_ONLINE) -#define RI_E1HE3E3B0_ONLINE (RI_E1H | RI_E3 | RI_E3B0 | RI_ONLINE) -#define RI_E1E1HE3E3B0_ONLINE (RI_E1 | RI_E1H | RI_E3 | RI_E3B0 | RI_ONLINE) -#define RI_E2E3E3B0_ONLINE (RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE) -#define RI_E1E2E3E3B0_ONLINE (RI_E1 | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE) -#define RI_E1HE2E3E3B0_ONLINE (RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE) -#define RI_E1E1HE2E3E3B0_ONLINE \ - (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE) -#define RI_E1_OFFLINE (RI_E1 | RI_OFFLINE) -#define RI_E1H_OFFLINE (RI_E1H | RI_OFFLINE) -#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H | RI_OFFLINE) -#define RI_E2_OFFLINE (RI_E2 | RI_OFFLINE) -#define RI_E1E2_OFFLINE (RI_E1 | RI_E2 | RI_OFFLINE) -#define RI_E1HE2_OFFLINE (RI_E1H | RI_E2 | RI_OFFLINE) -#define RI_E1E1HE2_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_OFFLINE) -#define RI_E3_OFFLINE (RI_E3 | RI_OFFLINE) -#define RI_E1E3_OFFLINE (RI_E1 | RI_E3 | RI_OFFLINE) -#define RI_E1HE3_OFFLINE (RI_E1H | RI_E3 | RI_OFFLINE) -#define RI_E1E1HE3_OFFLINE (RI_E1 | RI_E1H | RI_E3 | RI_OFFLINE) -#define RI_E2E3_OFFLINE (RI_E2 | RI_E3 | RI_OFFLINE) -#define RI_E1E2E3_OFFLINE (RI_E1 | RI_E2 | RI_E3 | RI_OFFLINE) -#define RI_E1HE2E3_OFFLINE (RI_E1H | RI_E2 | RI_E3 | RI_OFFLINE) -#define RI_E1E1HE2E3_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_OFFLINE) -#define RI_E3B0_OFFLINE (RI_E3B0 | RI_OFFLINE) -#define RI_E1E3B0_OFFLINE (RI_E1 | RI_E3B0 | RI_OFFLINE) -#define RI_E1HE3B0_OFFLINE (RI_E1H | RI_E3B0 | RI_OFFLINE) -#define RI_E1E1HE3B0_OFFLINE (RI_E1 | RI_E1H | RI_E3B0 | RI_OFFLINE) -#define RI_E2E3B0_OFFLINE (RI_E2 | RI_E3B0 | RI_OFFLINE) -#define RI_E1E2E3B0_OFFLINE (RI_E1 | RI_E2 | RI_E3B0 | RI_OFFLINE) -#define RI_E1HE2E3B0_OFFLINE (RI_E1H | RI_E2 | RI_E3B0 | RI_OFFLINE) -#define RI_E1E1HE2E3B0_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3B0 | RI_OFFLINE) -#define RI_E3E3B0_OFFLINE (RI_E3 | RI_E3B0 | RI_OFFLINE) -#define RI_E1E3E3B0_OFFLINE (RI_E1 | RI_E3 | RI_E3B0 | RI_OFFLINE) -#define RI_E1HE3E3B0_OFFLINE (RI_E1H | RI_E3 | RI_E3B0 | RI_OFFLINE) -#define RI_E1E1HE3E3B0_OFFLINE (RI_E1 | RI_E1H | RI_E3 | RI_E3B0 | RI_OFFLINE) -#define RI_E2E3E3B0_OFFLINE (RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE) -#define RI_E1E2E3E3B0_OFFLINE (RI_E1 | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE) -#define RI_E1HE2E3E3B0_OFFLINE (RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE) -#define RI_E1E1HE2E3E3B0_OFFLINE \ - (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE) -#define RI_ALL_ONLINE RI_E1E1HE2E3E3B0_ONLINE -#define RI_ALL_OFFLINE RI_E1E1HE2E3E3B0_OFFLINE - -#define DBG_DMP_TRACE_BUFFER_SIZE 0x800 -#define DBG_DMP_TRACE_BUFFER_OFFSET(shmem0_offset) \ - ((shmem0_offset) - DBG_DMP_TRACE_BUFFER_SIZE) - -struct dump_sign { - u32 time_stamp; - u32 diag_ver; - u32 grc_dump_ver; -}; - -struct dump_hdr { - u32 hdr_size; /* in dwords, excluding this field */ - struct dump_sign dump_sign; - u32 xstorm_waitp; - u32 tstorm_waitp; - u32 ustorm_waitp; - u32 cstorm_waitp; - u16 info; - u8 idle_chk; - u8 reserved; -}; - -struct reg_addr { - u32 addr; - u32 size; - u16 info; -}; - -struct wreg_addr { - u32 addr; - u32 size; - u32 read_regs_count; - const u32 *read_regs; - u16 info; -}; - -static const struct reg_addr reg_addrs[] = { - { 0x2000, 341, RI_ALL_ONLINE }, - { 0x2800, 103, RI_ALL_ONLINE }, - { 0x3000, 287, RI_ALL_ONLINE }, - { 0x3800, 331, RI_ALL_ONLINE }, - { 0x8800, 6, RI_ALL_ONLINE }, - { 0x8818, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x9000, 147, RI_E2E3E3B0_ONLINE }, - { 0x924c, 1, RI_E2_ONLINE }, - { 0x9250, 16, RI_E2E3E3B0_ONLINE }, - { 0x9400, 33, RI_E2E3E3B0_ONLINE }, - { 0x9484, 5, RI_E3E3B0_ONLINE }, - { 0xa000, 27, RI_ALL_ONLINE }, - { 0xa06c, 1, RI_E1E1H_ONLINE }, - { 0xa070, 71, RI_ALL_ONLINE }, - { 0xa18c, 4, RI_E1E1H_ONLINE }, - { 0xa19c, 62, RI_ALL_ONLINE }, - { 0xa294, 2, RI_E1E1H_ONLINE }, - { 0xa29c, 2, RI_ALL_ONLINE }, - { 0xa2a4, 2, RI_E1E1HE2_ONLINE }, - { 0xa2ac, 52, RI_ALL_ONLINE }, - { 0xa39c, 7, RI_E1HE2E3E3B0_ONLINE }, - { 0xa3b8, 2, RI_E3E3B0_ONLINE }, - { 0xa3c0, 3, RI_E1HE2E3E3B0_ONLINE }, - { 0xa3d0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0xa3d8, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0xa3e0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0xa3e8, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0xa3f0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0xa3f8, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0xa400, 40, RI_ALL_ONLINE }, - { 0xa4a0, 1, RI_E1E1HE2_ONLINE }, - { 0xa4a4, 2, RI_ALL_ONLINE }, - { 0xa4ac, 2, RI_E1E1H_ONLINE }, - { 0xa4b4, 1, RI_E1E1HE2_ONLINE }, - { 0xa4b8, 2, RI_E1E1H_ONLINE }, - { 0xa4c0, 3, RI_ALL_ONLINE }, - { 0xa4cc, 5, RI_E1E1H_ONLINE }, - { 0xa4e0, 3, RI_ALL_ONLINE }, - { 0xa4fc, 2, RI_ALL_ONLINE }, - { 0xa504, 1, RI_E1E1H_ONLINE }, - { 0xa508, 3, RI_ALL_ONLINE }, - { 0xa518, 1, RI_ALL_ONLINE }, - { 0xa520, 1, RI_ALL_ONLINE }, - { 0xa528, 1, RI_ALL_ONLINE }, - { 0xa530, 1, RI_ALL_ONLINE }, - { 0xa538, 1, RI_ALL_ONLINE }, - { 0xa540, 1, RI_ALL_ONLINE }, - { 0xa548, 1, RI_E1E1H_ONLINE }, - { 0xa550, 1, RI_E1E1H_ONLINE }, - { 0xa558, 1, RI_E1E1H_ONLINE }, - { 0xa560, 1, RI_E1E1H_ONLINE }, - { 0xa568, 1, RI_E1E1H_ONLINE }, - { 0xa570, 1, RI_ALL_ONLINE }, - { 0xa580, 1, RI_ALL_ONLINE }, - { 0xa590, 1, RI_ALL_ONLINE }, - { 0xa5a0, 1, RI_E1E1HE2_ONLINE }, - { 0xa5c0, 1, RI_ALL_ONLINE }, - { 0xa5e0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0xa5e8, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0xa5f0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0xa5f8, 1, RI_E1HE2_ONLINE }, - { 0xa5fc, 9, RI_E1HE2E3E3B0_ONLINE }, - { 0xa620, 6, RI_E2E3E3B0_ONLINE }, - { 0xa638, 20, RI_E2_ONLINE }, - { 0xa688, 42, RI_E2E3E3B0_ONLINE }, - { 0xa730, 1, RI_E2_ONLINE }, - { 0xa734, 2, RI_E2E3E3B0_ONLINE }, - { 0xa73c, 4, RI_E2_ONLINE }, - { 0xa74c, 5, RI_E2E3E3B0_ONLINE }, - { 0xa760, 5, RI_E2_ONLINE }, - { 0xa774, 7, RI_E2E3E3B0_ONLINE }, - { 0xa790, 15, RI_E2_ONLINE }, - { 0xa7cc, 4, RI_E2E3E3B0_ONLINE }, - { 0xa7e0, 6, RI_E3E3B0_ONLINE }, - { 0xa800, 18, RI_E2_ONLINE }, - { 0xa848, 33, RI_E2E3E3B0_ONLINE }, - { 0xa8cc, 2, RI_E3E3B0_ONLINE }, - { 0xa8d4, 4, RI_E2E3E3B0_ONLINE }, - { 0xa8e4, 1, RI_E3E3B0_ONLINE }, - { 0xa8e8, 1, RI_E2E3E3B0_ONLINE }, - { 0xa8f0, 1, RI_E2E3E3B0_ONLINE }, - { 0xa8f8, 30, RI_E3E3B0_ONLINE }, - { 0xa974, 73, RI_E3E3B0_ONLINE }, - { 0xac30, 1, RI_E3E3B0_ONLINE }, - { 0xac40, 1, RI_E3E3B0_ONLINE }, - { 0xac50, 1, RI_E3E3B0_ONLINE }, - { 0xac60, 1, RI_E3B0_ONLINE }, - { 0x10000, 9, RI_ALL_ONLINE }, - { 0x10024, 1, RI_E1E1HE2_ONLINE }, - { 0x10028, 5, RI_ALL_ONLINE }, - { 0x1003c, 6, RI_E1E1HE2_ONLINE }, - { 0x10054, 20, RI_ALL_ONLINE }, - { 0x100a4, 4, RI_E1E1HE2_ONLINE }, - { 0x100b4, 11, RI_ALL_ONLINE }, - { 0x100e0, 4, RI_E1E1HE2_ONLINE }, - { 0x100f0, 8, RI_ALL_ONLINE }, - { 0x10110, 6, RI_E1E1HE2_ONLINE }, - { 0x10128, 110, RI_ALL_ONLINE }, - { 0x102e0, 4, RI_E1E1HE2_ONLINE }, - { 0x102f0, 18, RI_ALL_ONLINE }, - { 0x10338, 20, RI_E1E1HE2_ONLINE }, - { 0x10388, 10, RI_ALL_ONLINE }, - { 0x10400, 6, RI_E1E1HE2_ONLINE }, - { 0x10418, 6, RI_ALL_ONLINE }, - { 0x10430, 10, RI_E1E1HE2_ONLINE }, - { 0x10458, 22, RI_ALL_ONLINE }, - { 0x104b0, 12, RI_E1E1HE2_ONLINE }, - { 0x104e0, 1, RI_ALL_ONLINE }, - { 0x104e8, 2, RI_ALL_ONLINE }, - { 0x104f4, 2, RI_ALL_ONLINE }, - { 0x10500, 146, RI_ALL_ONLINE }, - { 0x10750, 2, RI_E1E1HE2_ONLINE }, - { 0x10760, 2, RI_E1E1HE2_ONLINE }, - { 0x10770, 2, RI_E1E1HE2_ONLINE }, - { 0x10780, 2, RI_E1E1HE2_ONLINE }, - { 0x10790, 2, RI_ALL_ONLINE }, - { 0x107a0, 2, RI_E1E1HE2_ONLINE }, - { 0x107b0, 2, RI_E1E1HE2_ONLINE }, - { 0x107c0, 2, RI_E1E1HE2_ONLINE }, - { 0x107d0, 2, RI_E1E1HE2_ONLINE }, - { 0x107e0, 2, RI_ALL_ONLINE }, - { 0x10880, 2, RI_ALL_ONLINE }, - { 0x10900, 2, RI_ALL_ONLINE }, - { 0x16000, 1, RI_E1HE2_ONLINE }, - { 0x16004, 25, RI_E1HE2E3E3B0_ONLINE }, - { 0x16070, 8, RI_E1HE2E3E3B0_ONLINE }, - { 0x16090, 4, RI_E1HE2E3_ONLINE }, - { 0x160a0, 6, RI_E1HE2E3E3B0_ONLINE }, - { 0x160c0, 7, RI_E1HE2E3E3B0_ONLINE }, - { 0x160dc, 2, RI_E1HE2_ONLINE }, - { 0x160e4, 10, RI_E1HE2E3E3B0_ONLINE }, - { 0x1610c, 2, RI_E1HE2_ONLINE }, - { 0x16114, 6, RI_E1HE2E3E3B0_ONLINE }, - { 0x16140, 48, RI_E1HE2E3E3B0_ONLINE }, - { 0x16204, 5, RI_E1HE2E3E3B0_ONLINE }, - { 0x18000, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x18008, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x18010, 35, RI_E2E3E3B0_ONLINE }, - { 0x180a4, 2, RI_E2E3E3B0_ONLINE }, - { 0x180c0, 9, RI_E2E3E3B0_ONLINE }, - { 0x180e4, 1, RI_E2E3_ONLINE }, - { 0x180e8, 2, RI_E2E3E3B0_ONLINE }, - { 0x180f0, 1, RI_E2E3_ONLINE }, - { 0x180f4, 79, RI_E2E3E3B0_ONLINE }, - { 0x18230, 1, RI_E2E3_ONLINE }, - { 0x18234, 2, RI_E2E3E3B0_ONLINE }, - { 0x1823c, 1, RI_E2E3_ONLINE }, - { 0x18240, 13, RI_E2E3E3B0_ONLINE }, - { 0x18274, 1, RI_E2_ONLINE }, - { 0x18278, 81, RI_E2E3E3B0_ONLINE }, - { 0x18440, 63, RI_E2E3E3B0_ONLINE }, - { 0x18570, 42, RI_E3E3B0_ONLINE }, - { 0x18618, 25, RI_E3B0_ONLINE }, - { 0x18680, 44, RI_E3B0_ONLINE }, - { 0x18748, 12, RI_E3B0_ONLINE }, - { 0x18788, 1, RI_E3B0_ONLINE }, - { 0x1879c, 6, RI_E3B0_ONLINE }, - { 0x187c4, 51, RI_E3B0_ONLINE }, - { 0x18a00, 48, RI_E3B0_ONLINE }, - { 0x20000, 24, RI_ALL_ONLINE }, - { 0x20060, 8, RI_ALL_ONLINE }, - { 0x20080, 94, RI_ALL_ONLINE }, - { 0x201f8, 1, RI_E1E1H_ONLINE }, - { 0x201fc, 1, RI_ALL_ONLINE }, - { 0x20200, 1, RI_E1E1H_ONLINE }, - { 0x20204, 1, RI_ALL_ONLINE }, - { 0x20208, 1, RI_E1E1H_ONLINE }, - { 0x2020c, 39, RI_ALL_ONLINE }, - { 0x202c8, 1, RI_E2E3E3B0_ONLINE }, - { 0x202d8, 4, RI_E2E3E3B0_ONLINE }, - { 0x202f0, 1, RI_E3B0_ONLINE }, - { 0x20400, 2, RI_ALL_ONLINE }, - { 0x2040c, 8, RI_ALL_ONLINE }, - { 0x2042c, 18, RI_E1HE2E3E3B0_ONLINE }, - { 0x20480, 1, RI_ALL_ONLINE }, - { 0x20500, 1, RI_ALL_ONLINE }, - { 0x20600, 1, RI_ALL_ONLINE }, - { 0x28000, 1, RI_ALL_ONLINE }, - { 0x28004, 8191, RI_ALL_OFFLINE }, - { 0x30000, 1, RI_ALL_ONLINE }, - { 0x30004, 16383, RI_ALL_OFFLINE }, - { 0x40000, 98, RI_ALL_ONLINE }, - { 0x401a8, 8, RI_E1HE2E3E3B0_ONLINE }, - { 0x401c8, 1, RI_E1H_ONLINE }, - { 0x401cc, 2, RI_E1HE2E3E3B0_ONLINE }, - { 0x401d4, 2, RI_E2E3E3B0_ONLINE }, - { 0x40200, 4, RI_ALL_ONLINE }, - { 0x40220, 6, RI_E2E3E3B0_ONLINE }, - { 0x40238, 8, RI_E2E3_ONLINE }, - { 0x40258, 4, RI_E2E3E3B0_ONLINE }, - { 0x40268, 2, RI_E3E3B0_ONLINE }, - { 0x40270, 17, RI_E3B0_ONLINE }, - { 0x40400, 43, RI_ALL_ONLINE }, - { 0x404cc, 3, RI_E1HE2E3E3B0_ONLINE }, - { 0x404e0, 1, RI_E2E3E3B0_ONLINE }, - { 0x40500, 2, RI_ALL_ONLINE }, - { 0x40510, 2, RI_ALL_ONLINE }, - { 0x40520, 2, RI_ALL_ONLINE }, - { 0x40530, 2, RI_ALL_ONLINE }, - { 0x40540, 2, RI_ALL_ONLINE }, - { 0x40550, 10, RI_E2E3E3B0_ONLINE }, - { 0x40610, 2, RI_E2E3E3B0_ONLINE }, - { 0x42000, 164, RI_ALL_ONLINE }, - { 0x422c0, 4, RI_E2E3E3B0_ONLINE }, - { 0x422d4, 5, RI_E1HE2E3E3B0_ONLINE }, - { 0x422e8, 1, RI_E2E3E3B0_ONLINE }, - { 0x42400, 49, RI_ALL_ONLINE }, - { 0x424c8, 38, RI_ALL_ONLINE }, - { 0x42568, 2, RI_ALL_ONLINE }, - { 0x42640, 5, RI_E2E3E3B0_ONLINE }, - { 0x42800, 1, RI_ALL_ONLINE }, - { 0x50000, 1, RI_ALL_ONLINE }, - { 0x50004, 19, RI_ALL_ONLINE }, - { 0x50050, 8, RI_ALL_ONLINE }, - { 0x50070, 88, RI_ALL_ONLINE }, - { 0x501f0, 4, RI_E1HE2E3E3B0_ONLINE }, - { 0x50200, 2, RI_ALL_ONLINE }, - { 0x5020c, 7, RI_ALL_ONLINE }, - { 0x50228, 6, RI_E1HE2E3E3B0_ONLINE }, - { 0x50240, 1, RI_ALL_ONLINE }, - { 0x50280, 1, RI_ALL_ONLINE }, - { 0x50300, 1, RI_E2E3E3B0_ONLINE }, - { 0x5030c, 1, RI_E2E3E3B0_ONLINE }, - { 0x50318, 1, RI_E2E3E3B0_ONLINE }, - { 0x5031c, 1, RI_E2E3E3B0_ONLINE }, - { 0x50320, 2, RI_E2E3E3B0_ONLINE }, - { 0x50330, 1, RI_E3B0_ONLINE }, - { 0x52000, 1, RI_ALL_ONLINE }, - { 0x54000, 1, RI_ALL_ONLINE }, - { 0x54004, 3327, RI_ALL_OFFLINE }, - { 0x58000, 1, RI_ALL_ONLINE }, - { 0x58004, 8191, RI_E1E1H_OFFLINE }, - { 0x60000, 26, RI_ALL_ONLINE }, - { 0x60068, 8, RI_E1E1H_ONLINE }, - { 0x60088, 12, RI_ALL_ONLINE }, - { 0x600b8, 9, RI_E1E1H_ONLINE }, - { 0x600dc, 1, RI_ALL_ONLINE }, - { 0x600e0, 5, RI_E1E1H_ONLINE }, - { 0x600f4, 1, RI_E1E1HE2_ONLINE }, - { 0x600f8, 1, RI_E1E1H_ONLINE }, - { 0x600fc, 8, RI_ALL_ONLINE }, - { 0x6013c, 24, RI_E1H_ONLINE }, - { 0x6019c, 2, RI_E2E3E3B0_ONLINE }, - { 0x601ac, 18, RI_E2E3E3B0_ONLINE }, - { 0x60200, 1, RI_ALL_ONLINE }, - { 0x60204, 2, RI_ALL_OFFLINE }, - { 0x60210, 13, RI_E2E3E3B0_ONLINE }, - { 0x60244, 16, RI_E3B0_ONLINE }, - { 0x61000, 1, RI_ALL_ONLINE }, - { 0x61004, 511, RI_ALL_OFFLINE }, - { 0x61800, 512, RI_E3E3B0_OFFLINE }, - { 0x70000, 8, RI_ALL_ONLINE }, - { 0x70020, 8184, RI_ALL_OFFLINE }, - { 0x78000, 8192, RI_E3E3B0_OFFLINE }, - { 0x85000, 3, RI_ALL_ONLINE }, - { 0x8501c, 7, RI_ALL_ONLINE }, - { 0x85048, 1, RI_ALL_ONLINE }, - { 0x85200, 32, RI_ALL_ONLINE }, - { 0xb0000, 16384, RI_E1H_ONLINE }, - { 0xc1000, 7, RI_ALL_ONLINE }, - { 0xc103c, 2, RI_E2E3E3B0_ONLINE }, - { 0xc1800, 2, RI_ALL_ONLINE }, - { 0xc2000, 164, RI_ALL_ONLINE }, - { 0xc22c0, 5, RI_E2E3E3B0_ONLINE }, - { 0xc22d8, 4, RI_E2E3E3B0_ONLINE }, - { 0xc2400, 49, RI_ALL_ONLINE }, - { 0xc24c8, 38, RI_ALL_ONLINE }, - { 0xc2568, 2, RI_ALL_ONLINE }, - { 0xc2600, 1, RI_ALL_ONLINE }, - { 0xc4000, 165, RI_ALL_ONLINE }, - { 0xc42d8, 2, RI_E2E3E3B0_ONLINE }, - { 0xc42e0, 7, RI_E1HE2E3E3B0_ONLINE }, - { 0xc42fc, 1, RI_E2E3E3B0_ONLINE }, - { 0xc4400, 51, RI_ALL_ONLINE }, - { 0xc44d0, 38, RI_ALL_ONLINE }, - { 0xc4570, 2, RI_ALL_ONLINE }, - { 0xc4578, 5, RI_E2E3E3B0_ONLINE }, - { 0xc4600, 1, RI_ALL_ONLINE }, - { 0xd0000, 19, RI_ALL_ONLINE }, - { 0xd004c, 8, RI_ALL_ONLINE }, - { 0xd006c, 91, RI_ALL_ONLINE }, - { 0xd01fc, 1, RI_E2E3E3B0_ONLINE }, - { 0xd0200, 2, RI_ALL_ONLINE }, - { 0xd020c, 7, RI_ALL_ONLINE }, - { 0xd0228, 18, RI_E1HE2E3E3B0_ONLINE }, - { 0xd0280, 1, RI_ALL_ONLINE }, - { 0xd0300, 1, RI_ALL_ONLINE }, - { 0xd0400, 1, RI_ALL_ONLINE }, - { 0xd0818, 1, RI_E3B0_ONLINE }, - { 0xd4000, 1, RI_ALL_ONLINE }, - { 0xd4004, 2559, RI_ALL_OFFLINE }, - { 0xd8000, 1, RI_ALL_ONLINE }, - { 0xd8004, 8191, RI_ALL_OFFLINE }, - { 0xe0000, 21, RI_ALL_ONLINE }, - { 0xe0054, 8, RI_ALL_ONLINE }, - { 0xe0074, 49, RI_ALL_ONLINE }, - { 0xe0138, 1, RI_E1E1H_ONLINE }, - { 0xe013c, 35, RI_ALL_ONLINE }, - { 0xe01f4, 1, RI_E2_ONLINE }, - { 0xe01f8, 1, RI_E2E3E3B0_ONLINE }, - { 0xe0200, 2, RI_ALL_ONLINE }, - { 0xe020c, 8, RI_ALL_ONLINE }, - { 0xe022c, 18, RI_E1HE2E3E3B0_ONLINE }, - { 0xe0280, 1, RI_ALL_ONLINE }, - { 0xe0300, 1, RI_ALL_ONLINE }, - { 0xe0400, 1, RI_E3B0_ONLINE }, - { 0xe1000, 1, RI_ALL_ONLINE }, - { 0xe2000, 1, RI_ALL_ONLINE }, - { 0xe2004, 2047, RI_ALL_OFFLINE }, - { 0xf0000, 1, RI_ALL_ONLINE }, - { 0xf0004, 16383, RI_ALL_OFFLINE }, - { 0x101000, 12, RI_ALL_ONLINE }, - { 0x101050, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x101054, 3, RI_E2E3E3B0_ONLINE }, - { 0x101100, 1, RI_ALL_ONLINE }, - { 0x101800, 8, RI_ALL_ONLINE }, - { 0x102000, 18, RI_ALL_ONLINE }, - { 0x102068, 6, RI_E2E3E3B0_ONLINE }, - { 0x102080, 17, RI_ALL_ONLINE }, - { 0x1020c8, 8, RI_E1H_ONLINE }, - { 0x1020e8, 9, RI_E2E3E3B0_ONLINE }, - { 0x102400, 1, RI_ALL_ONLINE }, - { 0x103000, 26, RI_ALL_ONLINE }, - { 0x103098, 5, RI_E1HE2E3E3B0_ONLINE }, - { 0x1030ac, 2, RI_E2E3E3B0_ONLINE }, - { 0x1030b4, 1, RI_E2_ONLINE }, - { 0x1030b8, 7, RI_E2E3E3B0_ONLINE }, - { 0x1030d8, 8, RI_E2E3E3B0_ONLINE }, - { 0x103400, 1, RI_E2E3E3B0_ONLINE }, - { 0x103404, 135, RI_E2E3E3B0_OFFLINE }, - { 0x103800, 8, RI_ALL_ONLINE }, - { 0x104000, 63, RI_ALL_ONLINE }, - { 0x10411c, 16, RI_E2E3E3B0_ONLINE }, - { 0x104200, 17, RI_ALL_ONLINE }, - { 0x104400, 64, RI_ALL_ONLINE }, - { 0x104500, 192, RI_ALL_OFFLINE }, - { 0x104800, 64, RI_ALL_ONLINE }, - { 0x104900, 192, RI_ALL_OFFLINE }, - { 0x105000, 256, RI_ALL_ONLINE }, - { 0x105400, 768, RI_ALL_OFFLINE }, - { 0x107000, 7, RI_E2E3E3B0_ONLINE }, - { 0x10701c, 1, RI_E3E3B0_ONLINE }, - { 0x108000, 33, RI_E1E1H_ONLINE }, - { 0x1080ac, 5, RI_E1H_ONLINE }, - { 0x108100, 5, RI_E1E1H_ONLINE }, - { 0x108120, 5, RI_E1E1H_ONLINE }, - { 0x108200, 74, RI_E1E1H_ONLINE }, - { 0x108400, 74, RI_E1E1H_ONLINE }, - { 0x108800, 152, RI_E1E1H_ONLINE }, - { 0x110000, 111, RI_E2E3E3B0_ONLINE }, - { 0x1101dc, 1, RI_E3E3B0_ONLINE }, - { 0x110200, 4, RI_E2E3E3B0_ONLINE }, - { 0x120000, 2, RI_ALL_ONLINE }, - { 0x120008, 4, RI_ALL_ONLINE }, - { 0x120018, 3, RI_ALL_ONLINE }, - { 0x120024, 4, RI_ALL_ONLINE }, - { 0x120034, 3, RI_ALL_ONLINE }, - { 0x120040, 4, RI_ALL_ONLINE }, - { 0x120050, 3, RI_ALL_ONLINE }, - { 0x12005c, 4, RI_ALL_ONLINE }, - { 0x12006c, 3, RI_ALL_ONLINE }, - { 0x120078, 4, RI_ALL_ONLINE }, - { 0x120088, 3, RI_ALL_ONLINE }, - { 0x120094, 4, RI_ALL_ONLINE }, - { 0x1200a4, 3, RI_ALL_ONLINE }, - { 0x1200b0, 4, RI_ALL_ONLINE }, - { 0x1200c0, 3, RI_ALL_ONLINE }, - { 0x1200cc, 4, RI_ALL_ONLINE }, - { 0x1200dc, 3, RI_ALL_ONLINE }, - { 0x1200e8, 4, RI_ALL_ONLINE }, - { 0x1200f8, 3, RI_ALL_ONLINE }, - { 0x120104, 4, RI_ALL_ONLINE }, - { 0x120114, 1, RI_ALL_ONLINE }, - { 0x120118, 22, RI_ALL_ONLINE }, - { 0x120170, 2, RI_E1E1H_ONLINE }, - { 0x120178, 243, RI_ALL_ONLINE }, - { 0x120544, 4, RI_E1E1H_ONLINE }, - { 0x120554, 6, RI_ALL_ONLINE }, - { 0x12059c, 6, RI_E1HE2E3E3B0_ONLINE }, - { 0x1205b4, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1205b8, 15, RI_E1HE2E3E3B0_ONLINE }, - { 0x1205f4, 1, RI_E1HE2_ONLINE }, - { 0x1205f8, 4, RI_E2E3E3B0_ONLINE }, - { 0x120618, 1, RI_E2E3E3B0_ONLINE }, - { 0x12061c, 20, RI_E1HE2E3E3B0_ONLINE }, - { 0x12066c, 11, RI_E1HE2E3E3B0_ONLINE }, - { 0x120698, 3, RI_E2E3E3B0_ONLINE }, - { 0x1206a4, 1, RI_E2_ONLINE }, - { 0x1206a8, 1, RI_E2E3E3B0_ONLINE }, - { 0x1206b0, 75, RI_E2E3E3B0_ONLINE }, - { 0x1207dc, 1, RI_E2_ONLINE }, - { 0x1207fc, 1, RI_E2E3E3B0_ONLINE }, - { 0x12080c, 65, RI_ALL_ONLINE }, - { 0x120910, 7, RI_E2E3E3B0_ONLINE }, - { 0x120930, 9, RI_E2E3E3B0_ONLINE }, - { 0x12095c, 37, RI_E3E3B0_ONLINE }, - { 0x120a00, 2, RI_E1E1HE2_ONLINE }, - { 0x120b00, 1, RI_E3E3B0_ONLINE }, - { 0x122000, 2, RI_ALL_ONLINE }, - { 0x122008, 2046, RI_E1_OFFLINE }, - { 0x128000, 2, RI_E1HE2E3E3B0_ONLINE }, - { 0x128008, 6142, RI_E1HE2E3E3B0_OFFLINE }, - { 0x130000, 35, RI_E2E3E3B0_ONLINE }, - { 0x130100, 29, RI_E2E3E3B0_ONLINE }, - { 0x130180, 1, RI_E2E3E3B0_ONLINE }, - { 0x130200, 1, RI_E2E3E3B0_ONLINE }, - { 0x130280, 1, RI_E2E3E3B0_ONLINE }, - { 0x130300, 5, RI_E2E3E3B0_ONLINE }, - { 0x130380, 1, RI_E2E3E3B0_ONLINE }, - { 0x130400, 1, RI_E2E3E3B0_ONLINE }, - { 0x130480, 5, RI_E2E3E3B0_ONLINE }, - { 0x130800, 72, RI_E2E3E3B0_ONLINE }, - { 0x131000, 136, RI_E2E3E3B0_ONLINE }, - { 0x132000, 148, RI_E2E3E3B0_ONLINE }, - { 0x134000, 544, RI_E2E3E3B0_ONLINE }, - { 0x140000, 1, RI_ALL_ONLINE }, - { 0x140004, 9, RI_E1E1HE2E3_ONLINE }, - { 0x140028, 8, RI_ALL_ONLINE }, - { 0x140048, 10, RI_E1E1HE2E3_ONLINE }, - { 0x140070, 1, RI_ALL_ONLINE }, - { 0x140074, 10, RI_E1E1HE2E3_ONLINE }, - { 0x14009c, 1, RI_ALL_ONLINE }, - { 0x1400a0, 5, RI_E1E1HE2E3_ONLINE }, - { 0x1400b4, 7, RI_ALL_ONLINE }, - { 0x1400d0, 10, RI_E1E1HE2E3_ONLINE }, - { 0x1400f8, 2, RI_ALL_ONLINE }, - { 0x140100, 5, RI_E1E1H_ONLINE }, - { 0x140114, 5, RI_E1E1HE2E3_ONLINE }, - { 0x140128, 7, RI_ALL_ONLINE }, - { 0x140144, 9, RI_E1E1HE2E3_ONLINE }, - { 0x140168, 8, RI_ALL_ONLINE }, - { 0x140188, 3, RI_E1E1HE2E3_ONLINE }, - { 0x140194, 13, RI_ALL_ONLINE }, - { 0x140200, 6, RI_E1E1HE2E3_ONLINE }, - { 0x140220, 4, RI_E2E3_ONLINE }, - { 0x140240, 4, RI_E2E3_ONLINE }, - { 0x140260, 4, RI_E2E3_ONLINE }, - { 0x140280, 4, RI_E2E3_ONLINE }, - { 0x1402a0, 4, RI_E2E3_ONLINE }, - { 0x1402c0, 4, RI_E2E3_ONLINE }, - { 0x1402e0, 2, RI_E2E3_ONLINE }, - { 0x1402e8, 2, RI_E2E3E3B0_ONLINE }, - { 0x1402f0, 9, RI_E2E3_ONLINE }, - { 0x140314, 44, RI_E3B0_ONLINE }, - { 0x1403d0, 70, RI_E3B0_ONLINE }, - { 0x144000, 4, RI_E1E1H_ONLINE }, - { 0x148000, 4, RI_E1E1H_ONLINE }, - { 0x14c000, 4, RI_E1E1H_ONLINE }, - { 0x150000, 4, RI_E1E1H_ONLINE }, - { 0x154000, 4, RI_E1E1H_ONLINE }, - { 0x158000, 4, RI_E1E1H_ONLINE }, - { 0x15c000, 2, RI_E1HE2E3E3B0_ONLINE }, - { 0x15c008, 5, RI_E1H_ONLINE }, - { 0x15c020, 8, RI_E2E3E3B0_ONLINE }, - { 0x15c040, 1, RI_E2E3_ONLINE }, - { 0x15c044, 2, RI_E2E3E3B0_ONLINE }, - { 0x15c04c, 8, RI_E2E3_ONLINE }, - { 0x15c06c, 8, RI_E2E3E3B0_ONLINE }, - { 0x15c090, 13, RI_E2E3E3B0_ONLINE }, - { 0x15c0c8, 24, RI_E2E3E3B0_ONLINE }, - { 0x15c128, 2, RI_E2E3_ONLINE }, - { 0x15c130, 8, RI_E2E3E3B0_ONLINE }, - { 0x15c150, 2, RI_E3E3B0_ONLINE }, - { 0x15c158, 2, RI_E3_ONLINE }, - { 0x15c160, 149, RI_E3B0_ONLINE }, - { 0x161000, 7, RI_ALL_ONLINE }, - { 0x16103c, 2, RI_E2E3E3B0_ONLINE }, - { 0x161800, 2, RI_ALL_ONLINE }, - { 0x162000, 54, RI_E3E3B0_ONLINE }, - { 0x162200, 60, RI_E3E3B0_ONLINE }, - { 0x162400, 54, RI_E3E3B0_ONLINE }, - { 0x162600, 60, RI_E3E3B0_ONLINE }, - { 0x162800, 54, RI_E3E3B0_ONLINE }, - { 0x162a00, 60, RI_E3E3B0_ONLINE }, - { 0x162c00, 54, RI_E3E3B0_ONLINE }, - { 0x162e00, 60, RI_E3E3B0_ONLINE }, - { 0x164000, 60, RI_ALL_ONLINE }, - { 0x164110, 2, RI_E1HE2E3E3B0_ONLINE }, - { 0x164118, 15, RI_E2E3E3B0_ONLINE }, - { 0x164200, 1, RI_ALL_ONLINE }, - { 0x164208, 1, RI_ALL_ONLINE }, - { 0x164210, 1, RI_ALL_ONLINE }, - { 0x164218, 1, RI_ALL_ONLINE }, - { 0x164220, 1, RI_ALL_ONLINE }, - { 0x164228, 1, RI_ALL_ONLINE }, - { 0x164230, 1, RI_ALL_ONLINE }, - { 0x164238, 1, RI_ALL_ONLINE }, - { 0x164240, 1, RI_ALL_ONLINE }, - { 0x164248, 1, RI_ALL_ONLINE }, - { 0x164250, 1, RI_ALL_ONLINE }, - { 0x164258, 1, RI_ALL_ONLINE }, - { 0x164260, 1, RI_ALL_ONLINE }, - { 0x164270, 2, RI_ALL_ONLINE }, - { 0x164280, 2, RI_ALL_ONLINE }, - { 0x164800, 2, RI_ALL_ONLINE }, - { 0x165000, 2, RI_ALL_ONLINE }, - { 0x166000, 164, RI_ALL_ONLINE }, - { 0x1662cc, 7, RI_E2E3E3B0_ONLINE }, - { 0x166400, 49, RI_ALL_ONLINE }, - { 0x1664c8, 38, RI_ALL_ONLINE }, - { 0x166568, 2, RI_ALL_ONLINE }, - { 0x166570, 5, RI_E2E3E3B0_ONLINE }, - { 0x166800, 1, RI_ALL_ONLINE }, - { 0x168000, 137, RI_ALL_ONLINE }, - { 0x168224, 2, RI_E1E1H_ONLINE }, - { 0x16822c, 29, RI_ALL_ONLINE }, - { 0x1682a0, 12, RI_E1E1H_ONLINE }, - { 0x1682d0, 12, RI_ALL_ONLINE }, - { 0x168300, 2, RI_E1E1H_ONLINE }, - { 0x168308, 68, RI_ALL_ONLINE }, - { 0x168418, 2, RI_E1E1H_ONLINE }, - { 0x168420, 6, RI_ALL_ONLINE }, - { 0x168800, 19, RI_ALL_ONLINE }, - { 0x168900, 1, RI_ALL_ONLINE }, - { 0x168a00, 128, RI_ALL_ONLINE }, - { 0x16a000, 1, RI_ALL_ONLINE }, - { 0x16a004, 1535, RI_ALL_OFFLINE }, - { 0x16c000, 1, RI_ALL_ONLINE }, - { 0x16c004, 1535, RI_ALL_OFFLINE }, - { 0x16e000, 16, RI_E1H_ONLINE }, - { 0x16e040, 8, RI_E2E3E3B0_ONLINE }, - { 0x16e100, 1, RI_E1H_ONLINE }, - { 0x16e200, 2, RI_E1H_ONLINE }, - { 0x16e400, 161, RI_E1H_ONLINE }, - { 0x16e684, 2, RI_E1HE2E3E3B0_ONLINE }, - { 0x16e68c, 12, RI_E1H_ONLINE }, - { 0x16e6bc, 4, RI_E1HE2E3E3B0_ONLINE }, - { 0x16e6cc, 4, RI_E1H_ONLINE }, - { 0x16e6e0, 2, RI_E2E3E3B0_ONLINE }, - { 0x16e6e8, 5, RI_E2E3_ONLINE }, - { 0x16e6fc, 5, RI_E2E3E3B0_ONLINE }, - { 0x16e768, 17, RI_E2E3E3B0_ONLINE }, - { 0x16e7ac, 12, RI_E3B0_ONLINE }, - { 0x170000, 24, RI_ALL_ONLINE }, - { 0x170060, 4, RI_E1E1H_ONLINE }, - { 0x170070, 65, RI_ALL_ONLINE }, - { 0x170194, 11, RI_E2E3E3B0_ONLINE }, - { 0x1701c4, 1, RI_E2E3E3B0_ONLINE }, - { 0x1701cc, 7, RI_E2E3E3B0_ONLINE }, - { 0x1701e8, 1, RI_E3E3B0_ONLINE }, - { 0x1701ec, 1, RI_E2E3E3B0_ONLINE }, - { 0x1701f4, 1, RI_E2E3E3B0_ONLINE }, - { 0x170200, 4, RI_ALL_ONLINE }, - { 0x170214, 1, RI_ALL_ONLINE }, - { 0x170218, 77, RI_E2E3E3B0_ONLINE }, - { 0x170400, 64, RI_E2E3E3B0_ONLINE }, - { 0x178000, 1, RI_ALL_ONLINE }, - { 0x180000, 61, RI_ALL_ONLINE }, - { 0x18013c, 2, RI_E1HE2E3E3B0_ONLINE }, - { 0x180200, 58, RI_ALL_ONLINE }, - { 0x180340, 4, RI_ALL_ONLINE }, - { 0x180380, 1, RI_E2E3E3B0_ONLINE }, - { 0x180388, 1, RI_E2E3E3B0_ONLINE }, - { 0x180390, 1, RI_E2E3E3B0_ONLINE }, - { 0x180398, 1, RI_E2E3E3B0_ONLINE }, - { 0x1803a0, 5, RI_E2E3E3B0_ONLINE }, - { 0x1803b4, 2, RI_E3E3B0_ONLINE }, - { 0x180400, 1, RI_ALL_ONLINE }, - { 0x180404, 255, RI_E1E1H_OFFLINE }, - { 0x181000, 4, RI_ALL_ONLINE }, - { 0x181010, 1020, RI_ALL_OFFLINE }, - { 0x182000, 4, RI_E3E3B0_ONLINE }, - { 0x1a0000, 1, RI_ALL_ONLINE }, - { 0x1a0004, 5631, RI_ALL_OFFLINE }, - { 0x1a5800, 2560, RI_E1HE2E3E3B0_OFFLINE }, - { 0x1a8000, 1, RI_ALL_ONLINE }, - { 0x1a8004, 8191, RI_E1HE2E3E3B0_OFFLINE }, - { 0x1b0000, 1, RI_ALL_ONLINE }, - { 0x1b0004, 15, RI_E1H_OFFLINE }, - { 0x1b0040, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b0044, 239, RI_E1H_OFFLINE }, - { 0x1b0400, 1, RI_ALL_ONLINE }, - { 0x1b0404, 255, RI_E1H_OFFLINE }, - { 0x1b0800, 1, RI_ALL_ONLINE }, - { 0x1b0840, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b0c00, 1, RI_ALL_ONLINE }, - { 0x1b1000, 1, RI_ALL_ONLINE }, - { 0x1b1040, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b1400, 1, RI_ALL_ONLINE }, - { 0x1b1440, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b1480, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b14c0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b1800, 128, RI_ALL_OFFLINE }, - { 0x1b1c00, 128, RI_ALL_OFFLINE }, - { 0x1b2000, 1, RI_ALL_ONLINE }, - { 0x1b2400, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b2404, 5631, RI_E2E3E3B0_OFFLINE }, - { 0x1b8000, 1, RI_ALL_ONLINE }, - { 0x1b8040, 1, RI_ALL_ONLINE }, - { 0x1b8080, 1, RI_ALL_ONLINE }, - { 0x1b80c0, 1, RI_ALL_ONLINE }, - { 0x1b8100, 1, RI_ALL_ONLINE }, - { 0x1b8140, 1, RI_ALL_ONLINE }, - { 0x1b8180, 1, RI_ALL_ONLINE }, - { 0x1b81c0, 1, RI_ALL_ONLINE }, - { 0x1b8200, 1, RI_ALL_ONLINE }, - { 0x1b8240, 1, RI_ALL_ONLINE }, - { 0x1b8280, 1, RI_ALL_ONLINE }, - { 0x1b82c0, 1, RI_ALL_ONLINE }, - { 0x1b8300, 1, RI_ALL_ONLINE }, - { 0x1b8340, 1, RI_ALL_ONLINE }, - { 0x1b8380, 1, RI_ALL_ONLINE }, - { 0x1b83c0, 1, RI_ALL_ONLINE }, - { 0x1b8400, 1, RI_ALL_ONLINE }, - { 0x1b8440, 1, RI_ALL_ONLINE }, - { 0x1b8480, 1, RI_ALL_ONLINE }, - { 0x1b84c0, 1, RI_ALL_ONLINE }, - { 0x1b8500, 1, RI_ALL_ONLINE }, - { 0x1b8540, 1, RI_ALL_ONLINE }, - { 0x1b8580, 1, RI_ALL_ONLINE }, - { 0x1b85c0, 19, RI_E2E3E3B0_ONLINE }, - { 0x1b8800, 1, RI_ALL_ONLINE }, - { 0x1b8840, 1, RI_ALL_ONLINE }, - { 0x1b8880, 1, RI_ALL_ONLINE }, - { 0x1b88c0, 1, RI_ALL_ONLINE }, - { 0x1b8900, 1, RI_ALL_ONLINE }, - { 0x1b8940, 1, RI_ALL_ONLINE }, - { 0x1b8980, 1, RI_ALL_ONLINE }, - { 0x1b89c0, 1, RI_ALL_ONLINE }, - { 0x1b8a00, 1, RI_ALL_ONLINE }, - { 0x1b8a40, 1, RI_ALL_ONLINE }, - { 0x1b8a80, 1, RI_ALL_ONLINE }, - { 0x1b8ac0, 1, RI_ALL_ONLINE }, - { 0x1b8b00, 1, RI_ALL_ONLINE }, - { 0x1b8b40, 1, RI_ALL_ONLINE }, - { 0x1b8b80, 1, RI_ALL_ONLINE }, - { 0x1b8bc0, 1, RI_ALL_ONLINE }, - { 0x1b8c00, 1, RI_ALL_ONLINE }, - { 0x1b8c40, 1, RI_ALL_ONLINE }, - { 0x1b8c80, 1, RI_ALL_ONLINE }, - { 0x1b8cc0, 1, RI_ALL_ONLINE }, - { 0x1b8cc4, 1, RI_E2E3E3B0_ONLINE }, - { 0x1b8d00, 1, RI_ALL_ONLINE }, - { 0x1b8d40, 1, RI_ALL_ONLINE }, - { 0x1b8d80, 1, RI_ALL_ONLINE }, - { 0x1b8dc0, 1, RI_ALL_ONLINE }, - { 0x1b8e00, 1, RI_ALL_ONLINE }, - { 0x1b8e40, 1, RI_ALL_ONLINE }, - { 0x1b8e80, 1, RI_ALL_ONLINE }, - { 0x1b8e84, 1, RI_E2E3E3B0_ONLINE }, - { 0x1b8ec0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b8f00, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b8f40, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b8f80, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b8fc0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x1b8fc4, 2, RI_E2E3E3B0_ONLINE }, - { 0x1b8fd0, 6, RI_E2E3E3B0_ONLINE }, - { 0x1b8fe8, 2, RI_E3E3B0_ONLINE }, - { 0x1b9000, 1, RI_E2E3E3B0_ONLINE }, - { 0x1b9040, 3, RI_E2E3E3B0_ONLINE }, - { 0x1b905c, 1, RI_E3E3B0_ONLINE }, - { 0x1b9064, 1, RI_E3B0_ONLINE }, - { 0x1b9080, 10, RI_E3B0_ONLINE }, - { 0x1b9400, 14, RI_E2E3E3B0_ONLINE }, - { 0x1b943c, 19, RI_E2E3E3B0_ONLINE }, - { 0x1b9490, 10, RI_E2E3E3B0_ONLINE }, - { 0x1c0000, 2, RI_ALL_ONLINE }, - { 0x200000, 65, RI_ALL_ONLINE }, - { 0x20014c, 2, RI_E1HE2E3E3B0_ONLINE }, - { 0x200200, 58, RI_ALL_ONLINE }, - { 0x200340, 4, RI_ALL_ONLINE }, - { 0x200380, 1, RI_E2E3E3B0_ONLINE }, - { 0x200388, 1, RI_E2E3E3B0_ONLINE }, - { 0x200390, 1, RI_E2E3E3B0_ONLINE }, - { 0x200398, 1, RI_E2E3E3B0_ONLINE }, - { 0x2003a0, 1, RI_E2E3E3B0_ONLINE }, - { 0x2003a8, 2, RI_E2E3E3B0_ONLINE }, - { 0x200400, 1, RI_ALL_ONLINE }, - { 0x200404, 255, RI_E1E1H_OFFLINE }, - { 0x202000, 4, RI_ALL_ONLINE }, - { 0x202010, 2044, RI_ALL_OFFLINE }, - { 0x204000, 4, RI_E3E3B0_ONLINE }, - { 0x220000, 1, RI_ALL_ONLINE }, - { 0x220004, 5631, RI_ALL_OFFLINE }, - { 0x225800, 2560, RI_E1HE2E3E3B0_OFFLINE }, - { 0x228000, 1, RI_ALL_ONLINE }, - { 0x228004, 8191, RI_E1HE2E3E3B0_OFFLINE }, - { 0x230000, 1, RI_ALL_ONLINE }, - { 0x230004, 15, RI_E1H_OFFLINE }, - { 0x230040, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x230044, 239, RI_E1H_OFFLINE }, - { 0x230400, 1, RI_ALL_ONLINE }, - { 0x230404, 255, RI_E1H_OFFLINE }, - { 0x230800, 1, RI_ALL_ONLINE }, - { 0x230840, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x230c00, 1, RI_ALL_ONLINE }, - { 0x231000, 1, RI_ALL_ONLINE }, - { 0x231040, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x231400, 1, RI_ALL_ONLINE }, - { 0x231440, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x231480, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2314c0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x231800, 128, RI_ALL_OFFLINE }, - { 0x231c00, 128, RI_ALL_OFFLINE }, - { 0x232000, 1, RI_ALL_ONLINE }, - { 0x232400, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x232404, 5631, RI_E2E3E3B0_OFFLINE }, - { 0x238000, 1, RI_ALL_ONLINE }, - { 0x238040, 1, RI_ALL_ONLINE }, - { 0x238080, 1, RI_ALL_ONLINE }, - { 0x2380c0, 1, RI_ALL_ONLINE }, - { 0x238100, 1, RI_ALL_ONLINE }, - { 0x238140, 1, RI_ALL_ONLINE }, - { 0x238180, 1, RI_ALL_ONLINE }, - { 0x2381c0, 1, RI_ALL_ONLINE }, - { 0x238200, 1, RI_ALL_ONLINE }, - { 0x238240, 1, RI_ALL_ONLINE }, - { 0x238280, 1, RI_ALL_ONLINE }, - { 0x2382c0, 1, RI_ALL_ONLINE }, - { 0x238300, 1, RI_ALL_ONLINE }, - { 0x238340, 1, RI_ALL_ONLINE }, - { 0x238380, 1, RI_ALL_ONLINE }, - { 0x2383c0, 1, RI_ALL_ONLINE }, - { 0x238400, 1, RI_ALL_ONLINE }, - { 0x238440, 1, RI_ALL_ONLINE }, - { 0x238480, 1, RI_ALL_ONLINE }, - { 0x2384c0, 1, RI_ALL_ONLINE }, - { 0x238500, 1, RI_ALL_ONLINE }, - { 0x238540, 1, RI_ALL_ONLINE }, - { 0x238580, 1, RI_ALL_ONLINE }, - { 0x2385c0, 19, RI_E2E3E3B0_ONLINE }, - { 0x238800, 1, RI_ALL_ONLINE }, - { 0x238840, 1, RI_ALL_ONLINE }, - { 0x238880, 1, RI_ALL_ONLINE }, - { 0x2388c0, 1, RI_ALL_ONLINE }, - { 0x238900, 1, RI_ALL_ONLINE }, - { 0x238940, 1, RI_ALL_ONLINE }, - { 0x238980, 1, RI_ALL_ONLINE }, - { 0x2389c0, 1, RI_ALL_ONLINE }, - { 0x238a00, 1, RI_ALL_ONLINE }, - { 0x238a40, 1, RI_ALL_ONLINE }, - { 0x238a80, 1, RI_ALL_ONLINE }, - { 0x238ac0, 1, RI_ALL_ONLINE }, - { 0x238b00, 1, RI_ALL_ONLINE }, - { 0x238b40, 1, RI_ALL_ONLINE }, - { 0x238b80, 1, RI_ALL_ONLINE }, - { 0x238bc0, 1, RI_ALL_ONLINE }, - { 0x238c00, 1, RI_ALL_ONLINE }, - { 0x238c40, 1, RI_ALL_ONLINE }, - { 0x238c80, 1, RI_ALL_ONLINE }, - { 0x238cc0, 1, RI_ALL_ONLINE }, - { 0x238cc4, 1, RI_E2E3E3B0_ONLINE }, - { 0x238d00, 1, RI_ALL_ONLINE }, - { 0x238d40, 1, RI_ALL_ONLINE }, - { 0x238d80, 1, RI_ALL_ONLINE }, - { 0x238dc0, 1, RI_ALL_ONLINE }, - { 0x238e00, 1, RI_ALL_ONLINE }, - { 0x238e40, 1, RI_ALL_ONLINE }, - { 0x238e80, 1, RI_ALL_ONLINE }, - { 0x238e84, 1, RI_E2E3E3B0_ONLINE }, - { 0x238ec0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x238f00, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x238f40, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x238f80, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x238fc0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x238fc4, 2, RI_E2E3E3B0_ONLINE }, - { 0x238fd0, 6, RI_E2E3E3B0_ONLINE }, - { 0x238fe8, 2, RI_E3E3B0_ONLINE }, - { 0x239000, 1, RI_E2E3E3B0_ONLINE }, - { 0x239040, 3, RI_E2E3E3B0_ONLINE }, - { 0x23905c, 1, RI_E3E3B0_ONLINE }, - { 0x239064, 1, RI_E3B0_ONLINE }, - { 0x239080, 10, RI_E3B0_ONLINE }, - { 0x240000, 2, RI_ALL_ONLINE }, - { 0x280000, 65, RI_ALL_ONLINE }, - { 0x28014c, 2, RI_E1HE2E3E3B0_ONLINE }, - { 0x280200, 58, RI_ALL_ONLINE }, - { 0x280340, 4, RI_ALL_ONLINE }, - { 0x280380, 1, RI_E2E3E3B0_ONLINE }, - { 0x280388, 1, RI_E2E3E3B0_ONLINE }, - { 0x280390, 1, RI_E2E3E3B0_ONLINE }, - { 0x280398, 1, RI_E2E3E3B0_ONLINE }, - { 0x2803a0, 1, RI_E2E3E3B0_ONLINE }, - { 0x2803a8, 2, RI_E2E3E3B0_ONLINE }, - { 0x280400, 1, RI_ALL_ONLINE }, - { 0x280404, 255, RI_E1E1H_OFFLINE }, - { 0x282000, 4, RI_ALL_ONLINE }, - { 0x282010, 2044, RI_ALL_OFFLINE }, - { 0x284000, 4, RI_E3E3B0_ONLINE }, - { 0x2a0000, 1, RI_ALL_ONLINE }, - { 0x2a0004, 5631, RI_ALL_OFFLINE }, - { 0x2a5800, 2560, RI_E1HE2E3E3B0_OFFLINE }, - { 0x2a8000, 1, RI_ALL_ONLINE }, - { 0x2a8004, 8191, RI_E1HE2E3E3B0_OFFLINE }, - { 0x2b0000, 1, RI_ALL_ONLINE }, - { 0x2b0004, 15, RI_E1H_OFFLINE }, - { 0x2b0040, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b0044, 239, RI_E1H_OFFLINE }, - { 0x2b0400, 1, RI_ALL_ONLINE }, - { 0x2b0404, 255, RI_E1H_OFFLINE }, - { 0x2b0800, 1, RI_ALL_ONLINE }, - { 0x2b0840, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b0c00, 1, RI_ALL_ONLINE }, - { 0x2b1000, 1, RI_ALL_ONLINE }, - { 0x2b1040, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b1400, 1, RI_ALL_ONLINE }, - { 0x2b1440, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b1480, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b14c0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b1800, 128, RI_ALL_OFFLINE }, - { 0x2b1c00, 128, RI_ALL_OFFLINE }, - { 0x2b2000, 1, RI_ALL_ONLINE }, - { 0x2b2400, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b2404, 5631, RI_E2E3E3B0_OFFLINE }, - { 0x2b8000, 1, RI_ALL_ONLINE }, - { 0x2b8040, 1, RI_ALL_ONLINE }, - { 0x2b8080, 1, RI_ALL_ONLINE }, - { 0x2b80c0, 1, RI_ALL_ONLINE }, - { 0x2b8100, 1, RI_ALL_ONLINE }, - { 0x2b8140, 1, RI_ALL_ONLINE }, - { 0x2b8180, 1, RI_ALL_ONLINE }, - { 0x2b81c0, 1, RI_ALL_ONLINE }, - { 0x2b8200, 1, RI_ALL_ONLINE }, - { 0x2b8240, 1, RI_ALL_ONLINE }, - { 0x2b8280, 1, RI_ALL_ONLINE }, - { 0x2b82c0, 1, RI_ALL_ONLINE }, - { 0x2b8300, 1, RI_ALL_ONLINE }, - { 0x2b8340, 1, RI_ALL_ONLINE }, - { 0x2b8380, 1, RI_ALL_ONLINE }, - { 0x2b83c0, 1, RI_ALL_ONLINE }, - { 0x2b8400, 1, RI_ALL_ONLINE }, - { 0x2b8440, 1, RI_ALL_ONLINE }, - { 0x2b8480, 1, RI_ALL_ONLINE }, - { 0x2b84c0, 1, RI_ALL_ONLINE }, - { 0x2b8500, 1, RI_ALL_ONLINE }, - { 0x2b8540, 1, RI_ALL_ONLINE }, - { 0x2b8580, 1, RI_ALL_ONLINE }, - { 0x2b85c0, 19, RI_E2E3E3B0_ONLINE }, - { 0x2b8800, 1, RI_ALL_ONLINE }, - { 0x2b8840, 1, RI_ALL_ONLINE }, - { 0x2b8880, 1, RI_ALL_ONLINE }, - { 0x2b88c0, 1, RI_ALL_ONLINE }, - { 0x2b8900, 1, RI_ALL_ONLINE }, - { 0x2b8940, 1, RI_ALL_ONLINE }, - { 0x2b8980, 1, RI_ALL_ONLINE }, - { 0x2b89c0, 1, RI_ALL_ONLINE }, - { 0x2b8a00, 1, RI_ALL_ONLINE }, - { 0x2b8a40, 1, RI_ALL_ONLINE }, - { 0x2b8a80, 1, RI_ALL_ONLINE }, - { 0x2b8ac0, 1, RI_ALL_ONLINE }, - { 0x2b8b00, 1, RI_ALL_ONLINE }, - { 0x2b8b40, 1, RI_ALL_ONLINE }, - { 0x2b8b80, 1, RI_ALL_ONLINE }, - { 0x2b8bc0, 1, RI_ALL_ONLINE }, - { 0x2b8c00, 1, RI_ALL_ONLINE }, - { 0x2b8c40, 1, RI_ALL_ONLINE }, - { 0x2b8c80, 1, RI_ALL_ONLINE }, - { 0x2b8cc0, 1, RI_ALL_ONLINE }, - { 0x2b8cc4, 1, RI_E2E3E3B0_ONLINE }, - { 0x2b8d00, 1, RI_ALL_ONLINE }, - { 0x2b8d40, 1, RI_ALL_ONLINE }, - { 0x2b8d80, 1, RI_ALL_ONLINE }, - { 0x2b8dc0, 1, RI_ALL_ONLINE }, - { 0x2b8e00, 1, RI_ALL_ONLINE }, - { 0x2b8e40, 1, RI_ALL_ONLINE }, - { 0x2b8e80, 1, RI_ALL_ONLINE }, - { 0x2b8e84, 1, RI_E2E3E3B0_ONLINE }, - { 0x2b8ec0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b8f00, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b8f40, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b8f80, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b8fc0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x2b8fc4, 2, RI_E2E3E3B0_ONLINE }, - { 0x2b8fd0, 6, RI_E2E3E3B0_ONLINE }, - { 0x2b8fe8, 2, RI_E3E3B0_ONLINE }, - { 0x2b9000, 1, RI_E2E3E3B0_ONLINE }, - { 0x2b9040, 3, RI_E2E3E3B0_ONLINE }, - { 0x2b905c, 1, RI_E3E3B0_ONLINE }, - { 0x2b9064, 1, RI_E3B0_ONLINE }, - { 0x2b9080, 10, RI_E3B0_ONLINE }, - { 0x2b9400, 14, RI_E2E3E3B0_ONLINE }, - { 0x2b943c, 19, RI_E2E3E3B0_ONLINE }, - { 0x2b9490, 10, RI_E2E3E3B0_ONLINE }, - { 0x2c0000, 2, RI_ALL_ONLINE }, - { 0x300000, 65, RI_ALL_ONLINE }, - { 0x30014c, 2, RI_E1HE2E3E3B0_ONLINE }, - { 0x300200, 58, RI_ALL_ONLINE }, - { 0x300340, 4, RI_ALL_ONLINE }, - { 0x300380, 1, RI_E2E3E3B0_ONLINE }, - { 0x300388, 1, RI_E2E3E3B0_ONLINE }, - { 0x300390, 1, RI_E2E3E3B0_ONLINE }, - { 0x300398, 1, RI_E2E3E3B0_ONLINE }, - { 0x3003a0, 1, RI_E2E3E3B0_ONLINE }, - { 0x3003a8, 2, RI_E2E3E3B0_ONLINE }, - { 0x300400, 1, RI_ALL_ONLINE }, - { 0x300404, 255, RI_E1E1H_OFFLINE }, - { 0x302000, 4, RI_ALL_ONLINE }, - { 0x302010, 2044, RI_ALL_OFFLINE }, - { 0x304000, 4, RI_E3E3B0_ONLINE }, - { 0x320000, 1, RI_ALL_ONLINE }, - { 0x320004, 5631, RI_ALL_OFFLINE }, - { 0x325800, 2560, RI_E1HE2E3E3B0_OFFLINE }, - { 0x328000, 1, RI_ALL_ONLINE }, - { 0x328004, 8191, RI_E1HE2E3E3B0_OFFLINE }, - { 0x330000, 1, RI_ALL_ONLINE }, - { 0x330004, 15, RI_E1H_OFFLINE }, - { 0x330040, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x330044, 239, RI_E1H_OFFLINE }, - { 0x330400, 1, RI_ALL_ONLINE }, - { 0x330404, 255, RI_E1H_OFFLINE }, - { 0x330800, 1, RI_ALL_ONLINE }, - { 0x330840, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x330c00, 1, RI_ALL_ONLINE }, - { 0x331000, 1, RI_ALL_ONLINE }, - { 0x331040, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x331400, 1, RI_ALL_ONLINE }, - { 0x331440, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x331480, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x3314c0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x331800, 128, RI_ALL_OFFLINE }, - { 0x331c00, 128, RI_ALL_OFFLINE }, - { 0x332000, 1, RI_ALL_ONLINE }, - { 0x332400, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x332404, 5631, RI_E2E3E3B0_OFFLINE }, - { 0x338000, 1, RI_ALL_ONLINE }, - { 0x338040, 1, RI_ALL_ONLINE }, - { 0x338080, 1, RI_ALL_ONLINE }, - { 0x3380c0, 1, RI_ALL_ONLINE }, - { 0x338100, 1, RI_ALL_ONLINE }, - { 0x338140, 1, RI_ALL_ONLINE }, - { 0x338180, 1, RI_ALL_ONLINE }, - { 0x3381c0, 1, RI_ALL_ONLINE }, - { 0x338200, 1, RI_ALL_ONLINE }, - { 0x338240, 1, RI_ALL_ONLINE }, - { 0x338280, 1, RI_ALL_ONLINE }, - { 0x3382c0, 1, RI_ALL_ONLINE }, - { 0x338300, 1, RI_ALL_ONLINE }, - { 0x338340, 1, RI_ALL_ONLINE }, - { 0x338380, 1, RI_ALL_ONLINE }, - { 0x3383c0, 1, RI_ALL_ONLINE }, - { 0x338400, 1, RI_ALL_ONLINE }, - { 0x338440, 1, RI_ALL_ONLINE }, - { 0x338480, 1, RI_ALL_ONLINE }, - { 0x3384c0, 1, RI_ALL_ONLINE }, - { 0x338500, 1, RI_ALL_ONLINE }, - { 0x338540, 1, RI_ALL_ONLINE }, - { 0x338580, 1, RI_ALL_ONLINE }, - { 0x3385c0, 19, RI_E2E3E3B0_ONLINE }, - { 0x338800, 1, RI_ALL_ONLINE }, - { 0x338840, 1, RI_ALL_ONLINE }, - { 0x338880, 1, RI_ALL_ONLINE }, - { 0x3388c0, 1, RI_ALL_ONLINE }, - { 0x338900, 1, RI_ALL_ONLINE }, - { 0x338940, 1, RI_ALL_ONLINE }, - { 0x338980, 1, RI_ALL_ONLINE }, - { 0x3389c0, 1, RI_ALL_ONLINE }, - { 0x338a00, 1, RI_ALL_ONLINE }, - { 0x338a40, 1, RI_ALL_ONLINE }, - { 0x338a80, 1, RI_ALL_ONLINE }, - { 0x338ac0, 1, RI_ALL_ONLINE }, - { 0x338b00, 1, RI_ALL_ONLINE }, - { 0x338b40, 1, RI_ALL_ONLINE }, - { 0x338b80, 1, RI_ALL_ONLINE }, - { 0x338bc0, 1, RI_ALL_ONLINE }, - { 0x338c00, 1, RI_ALL_ONLINE }, - { 0x338c40, 1, RI_ALL_ONLINE }, - { 0x338c80, 1, RI_ALL_ONLINE }, - { 0x338cc0, 1, RI_ALL_ONLINE }, - { 0x338cc4, 1, RI_E2E3E3B0_ONLINE }, - { 0x338d00, 1, RI_ALL_ONLINE }, - { 0x338d40, 1, RI_ALL_ONLINE }, - { 0x338d80, 1, RI_ALL_ONLINE }, - { 0x338dc0, 1, RI_ALL_ONLINE }, - { 0x338e00, 1, RI_ALL_ONLINE }, - { 0x338e40, 1, RI_ALL_ONLINE }, - { 0x338e80, 1, RI_ALL_ONLINE }, - { 0x338e84, 1, RI_E2E3E3B0_ONLINE }, - { 0x338ec0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x338f00, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x338f40, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x338f80, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x338fc0, 1, RI_E1HE2E3E3B0_ONLINE }, - { 0x338fc4, 2, RI_E2E3E3B0_ONLINE }, - { 0x338fd0, 6, RI_E2E3E3B0_ONLINE }, - { 0x338fe8, 2, RI_E3E3B0_ONLINE }, - { 0x339000, 1, RI_E2E3E3B0_ONLINE }, - { 0x339040, 3, RI_E2E3E3B0_ONLINE }, - { 0x33905c, 1, RI_E3E3B0_ONLINE }, - { 0x339064, 1, RI_E3B0_ONLINE }, - { 0x339080, 10, RI_E3B0_ONLINE }, - { 0x340000, 2, RI_ALL_ONLINE }, -}; -#define REGS_COUNT ARRAY_SIZE(reg_addrs) - -static const struct dump_sign dump_sign_all = { 0x4e23fde1, 0x70017, 0x3a }; - -static const u32 page_vals_e2[] = { 0, 128 }; -#define PAGE_MODE_VALUES_E2 ARRAY_SIZE(page_vals_e2) - -static const u32 page_write_regs_e2[] = { 328476 }; -#define PAGE_WRITE_REGS_E2 ARRAY_SIZE(page_write_regs_e2) - -static const struct reg_addr page_read_regs_e2[] = { - { 0x58000, 4608, RI_E2_ONLINE } }; -#define PAGE_READ_REGS_E2 ARRAY_SIZE(page_read_regs_e2) - -static const u32 page_vals_e3[] = { 0, 128 }; -#define PAGE_MODE_VALUES_E3 ARRAY_SIZE(page_vals_e3) - -static const u32 page_write_regs_e3[] = { 328476 }; -#define PAGE_WRITE_REGS_E3 ARRAY_SIZE(page_write_regs_e3) - -static const struct reg_addr page_read_regs_e3[] = { - { 0x58000, 4608, RI_E3E3B0_ONLINE } }; -#define PAGE_READ_REGS_E3 ARRAY_SIZE(page_read_regs_e3) - -#endif /* BNX2X_DUMP_H */ diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c deleted file mode 100644 index 2218630..0000000 --- a/drivers/net/bnx2x/bnx2x_ethtool.c +++ /dev/null @@ -1,2355 +0,0 @@ -/* bnx2x_ethtool.c: Broadcom Everest network driver. - * - * Copyright (c) 2007-2011 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - * Maintained by: Eilon Greenstein - * Written by: Eliezer Tamir - * Based on code from Michael Chan's bnx2 driver - * UDP CSUM errata workaround by Arik Gendelman - * Slowpath and fastpath rework by Vladislav Zolotarov - * Statistics and Link management by Yitchak Gertner - * - */ -#include -#include -#include -#include -#include - - -#include "bnx2x.h" -#include "bnx2x_cmn.h" -#include "bnx2x_dump.h" -#include "bnx2x_init.h" -#include "bnx2x_sp.h" - -/* Note: in the format strings below %s is replaced by the queue-name which is - * either its index or 'fcoe' for the fcoe queue. Make sure the format string - * length does not exceed ETH_GSTRING_LEN - MAX_QUEUE_NAME_LEN + 2 - */ -#define MAX_QUEUE_NAME_LEN 4 -static const struct { - long offset; - int size; - char string[ETH_GSTRING_LEN]; -} bnx2x_q_stats_arr[] = { -/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" }, - { Q_STATS_OFFSET32(total_unicast_packets_received_hi), - 8, "[%s]: rx_ucast_packets" }, - { Q_STATS_OFFSET32(total_multicast_packets_received_hi), - 8, "[%s]: rx_mcast_packets" }, - { Q_STATS_OFFSET32(total_broadcast_packets_received_hi), - 8, "[%s]: rx_bcast_packets" }, - { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%s]: rx_discards" }, - { Q_STATS_OFFSET32(rx_err_discard_pkt), - 4, "[%s]: rx_phy_ip_err_discards"}, - { Q_STATS_OFFSET32(rx_skb_alloc_failed), - 4, "[%s]: rx_skb_alloc_discard" }, - { Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" }, - - { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" }, -/* 10 */{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), - 8, "[%s]: tx_ucast_packets" }, - { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi), - 8, "[%s]: tx_mcast_packets" }, - { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi), - 8, "[%s]: tx_bcast_packets" }, - { Q_STATS_OFFSET32(total_tpa_aggregations_hi), - 8, "[%s]: tpa_aggregations" }, - { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi), - 8, "[%s]: tpa_aggregated_frames"}, - { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%s]: tpa_bytes"} -}; - -#define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr) - -static const struct { - long offset; - int size; - u32 flags; -#define STATS_FLAGS_PORT 1 -#define STATS_FLAGS_FUNC 2 -#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT) - char string[ETH_GSTRING_LEN]; -} bnx2x_stats_arr[] = { -/* 1 */ { STATS_OFFSET32(total_bytes_received_hi), - 8, STATS_FLAGS_BOTH, "rx_bytes" }, - { STATS_OFFSET32(error_bytes_received_hi), - 8, STATS_FLAGS_BOTH, "rx_error_bytes" }, - { STATS_OFFSET32(total_unicast_packets_received_hi), - 8, STATS_FLAGS_BOTH, "rx_ucast_packets" }, - { STATS_OFFSET32(total_multicast_packets_received_hi), - 8, STATS_FLAGS_BOTH, "rx_mcast_packets" }, - { STATS_OFFSET32(total_broadcast_packets_received_hi), - 8, STATS_FLAGS_BOTH, "rx_bcast_packets" }, - { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), - 8, STATS_FLAGS_PORT, "rx_crc_errors" }, - { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), - 8, STATS_FLAGS_PORT, "rx_align_errors" }, - { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), - 8, STATS_FLAGS_PORT, "rx_undersize_packets" }, - { STATS_OFFSET32(etherstatsoverrsizepkts_hi), - 8, STATS_FLAGS_PORT, "rx_oversize_packets" }, -/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi), - 8, STATS_FLAGS_PORT, "rx_fragments" }, - { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), - 8, STATS_FLAGS_PORT, "rx_jabbers" }, - { STATS_OFFSET32(no_buff_discard_hi), - 8, STATS_FLAGS_BOTH, "rx_discards" }, - { STATS_OFFSET32(mac_filter_discard), - 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, - { STATS_OFFSET32(mf_tag_discard), - 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" }, - { STATS_OFFSET32(brb_drop_hi), - 8, STATS_FLAGS_PORT, "rx_brb_discard" }, - { STATS_OFFSET32(brb_truncate_hi), - 8, STATS_FLAGS_PORT, "rx_brb_truncate" }, - { STATS_OFFSET32(pause_frames_received_hi), - 8, STATS_FLAGS_PORT, "rx_pause_frames" }, - { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), - 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" }, - { STATS_OFFSET32(nig_timer_max), - 4, STATS_FLAGS_PORT, "rx_constant_pause_events" }, -/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt), - 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"}, - { STATS_OFFSET32(rx_skb_alloc_failed), - 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" }, - { STATS_OFFSET32(hw_csum_err), - 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" }, - - { STATS_OFFSET32(total_bytes_transmitted_hi), - 8, STATS_FLAGS_BOTH, "tx_bytes" }, - { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), - 8, STATS_FLAGS_PORT, "tx_error_bytes" }, - { STATS_OFFSET32(total_unicast_packets_transmitted_hi), - 8, STATS_FLAGS_BOTH, "tx_ucast_packets" }, - { STATS_OFFSET32(total_multicast_packets_transmitted_hi), - 8, STATS_FLAGS_BOTH, "tx_mcast_packets" }, - { STATS_OFFSET32(total_broadcast_packets_transmitted_hi), - 8, STATS_FLAGS_BOTH, "tx_bcast_packets" }, - { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), - 8, STATS_FLAGS_PORT, "tx_mac_errors" }, - { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), - 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, -/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), - 8, STATS_FLAGS_PORT, "tx_single_collisions" }, - { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), - 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, - { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), - 8, STATS_FLAGS_PORT, "tx_deferred" }, - { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), - 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, - { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), - 8, STATS_FLAGS_PORT, "tx_late_collisions" }, - { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), - 8, STATS_FLAGS_PORT, "tx_total_collisions" }, - { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), - 8, STATS_FLAGS_PORT, "tx_64_byte_packets" }, - { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), - 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" }, - { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), - 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, - { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), - 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, -/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), - 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, - { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), - 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, - { STATS_OFFSET32(etherstatspktsover1522octets_hi), - 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, - { STATS_OFFSET32(pause_frames_sent_hi), - 8, STATS_FLAGS_PORT, "tx_pause_frames" }, - { STATS_OFFSET32(total_tpa_aggregations_hi), - 8, STATS_FLAGS_FUNC, "tpa_aggregations" }, - { STATS_OFFSET32(total_tpa_aggregated_frames_hi), - 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"}, - { STATS_OFFSET32(total_tpa_bytes_hi), - 8, STATS_FLAGS_FUNC, "tpa_bytes"} -}; - -#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr) -static int bnx2x_get_port_type(struct bnx2x *bp) -{ - int port_type; - u32 phy_idx = bnx2x_get_cur_phy_idx(bp); - switch (bp->link_params.phy[phy_idx].media_type) { - case ETH_PHY_SFP_FIBER: - case ETH_PHY_XFP_FIBER: - case ETH_PHY_KR: - case ETH_PHY_CX4: - port_type = PORT_FIBRE; - break; - case ETH_PHY_DA_TWINAX: - port_type = PORT_DA; - break; - case ETH_PHY_BASE_T: - port_type = PORT_TP; - break; - case ETH_PHY_NOT_PRESENT: - port_type = PORT_NONE; - break; - case ETH_PHY_UNSPECIFIED: - default: - port_type = PORT_OTHER; - break; - } - return port_type; -} - -static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct bnx2x *bp = netdev_priv(dev); - int cfg_idx = bnx2x_get_link_cfg_idx(bp); - - /* Dual Media boards present all available port types */ - cmd->supported = bp->port.supported[cfg_idx] | - (bp->port.supported[cfg_idx ^ 1] & - (SUPPORTED_TP | SUPPORTED_FIBRE)); - cmd->advertising = bp->port.advertising[cfg_idx]; - - if ((bp->state == BNX2X_STATE_OPEN) && - !(bp->flags & MF_FUNC_DIS) && - (bp->link_vars.link_up)) { - ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed); - cmd->duplex = bp->link_vars.duplex; - } else { - ethtool_cmd_speed_set( - cmd, bp->link_params.req_line_speed[cfg_idx]); - cmd->duplex = bp->link_params.req_duplex[cfg_idx]; - } - - if (IS_MF(bp)) - ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp)); - - cmd->port = bnx2x_get_port_type(bp); - - cmd->phy_address = bp->mdio.prtad; - cmd->transceiver = XCVR_INTERNAL; - - if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) - cmd->autoneg = AUTONEG_ENABLE; - else - cmd->autoneg = AUTONEG_DISABLE; - - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 0; - - DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n" - DP_LEVEL " supported 0x%x advertising 0x%x speed %u\n" - DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n" - DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n", - cmd->cmd, cmd->supported, cmd->advertising, - ethtool_cmd_speed(cmd), - cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, - cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); - - return 0; -} - -static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct bnx2x *bp = netdev_priv(dev); - u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config; - u32 speed; - - if (IS_MF_SD(bp)) - return 0; - - DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n" - " supported 0x%x advertising 0x%x speed %u\n" - " duplex %d port %d phy_address %d transceiver %d\n" - " autoneg %d maxtxpkt %d maxrxpkt %d\n", - cmd->cmd, cmd->supported, cmd->advertising, - ethtool_cmd_speed(cmd), - cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, - cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); - - speed = ethtool_cmd_speed(cmd); - - if (IS_MF_SI(bp)) { - u32 part; - u32 line_speed = bp->link_vars.line_speed; - - /* use 10G if no link detected */ - if (!line_speed) - line_speed = 10000; - - if (bp->common.bc_ver < REQ_BC_VER_4_SET_MF_BW) { - BNX2X_DEV_INFO("To set speed BC %X or higher " - "is required, please upgrade BC\n", - REQ_BC_VER_4_SET_MF_BW); - return -EINVAL; - } - - part = (speed * 100) / line_speed; - - if (line_speed < speed || !part) { - BNX2X_DEV_INFO("Speed setting should be in a range " - "from 1%% to 100%% " - "of actual line speed\n"); - return -EINVAL; - } - - if (bp->state != BNX2X_STATE_OPEN) - /* store value for following "load" */ - bp->pending_max = part; - else - bnx2x_update_max_mf_config(bp, part); - - return 0; - } - - cfg_idx = bnx2x_get_link_cfg_idx(bp); - old_multi_phy_config = bp->link_params.multi_phy_config; - switch (cmd->port) { - case PORT_TP: - if (bp->port.supported[cfg_idx] & SUPPORTED_TP) - break; /* no port change */ - - if (!(bp->port.supported[0] & SUPPORTED_TP || - bp->port.supported[1] & SUPPORTED_TP)) { - DP(NETIF_MSG_LINK, "Unsupported port type\n"); - return -EINVAL; - } - bp->link_params.multi_phy_config &= - ~PORT_HW_CFG_PHY_SELECTION_MASK; - if (bp->link_params.multi_phy_config & - PORT_HW_CFG_PHY_SWAPPED_ENABLED) - bp->link_params.multi_phy_config |= - PORT_HW_CFG_PHY_SELECTION_SECOND_PHY; - else - bp->link_params.multi_phy_config |= - PORT_HW_CFG_PHY_SELECTION_FIRST_PHY; - break; - case PORT_FIBRE: - if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE) - break; /* no port change */ - - if (!(bp->port.supported[0] & SUPPORTED_FIBRE || - bp->port.supported[1] & SUPPORTED_FIBRE)) { - DP(NETIF_MSG_LINK, "Unsupported port type\n"); - return -EINVAL; - } - bp->link_params.multi_phy_config &= - ~PORT_HW_CFG_PHY_SELECTION_MASK; - if (bp->link_params.multi_phy_config & - PORT_HW_CFG_PHY_SWAPPED_ENABLED) - bp->link_params.multi_phy_config |= - PORT_HW_CFG_PHY_SELECTION_FIRST_PHY; - else - bp->link_params.multi_phy_config |= - PORT_HW_CFG_PHY_SELECTION_SECOND_PHY; - break; - default: - DP(NETIF_MSG_LINK, "Unsupported port type\n"); - return -EINVAL; - } - /* Save new config in case command complete successuly */ - new_multi_phy_config = bp->link_params.multi_phy_config; - /* Get the new cfg_idx */ - cfg_idx = bnx2x_get_link_cfg_idx(bp); - /* Restore old config in case command failed */ - bp->link_params.multi_phy_config = old_multi_phy_config; - DP(NETIF_MSG_LINK, "cfg_idx = %x\n", cfg_idx); - - if (cmd->autoneg == AUTONEG_ENABLE) { - if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) { - DP(NETIF_MSG_LINK, "Autoneg not supported\n"); - return -EINVAL; - } - - /* advertise the requested speed and duplex if supported */ - cmd->advertising &= bp->port.supported[cfg_idx]; - - bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG; - bp->link_params.req_duplex[cfg_idx] = DUPLEX_FULL; - bp->port.advertising[cfg_idx] |= (ADVERTISED_Autoneg | - cmd->advertising); - - } else { /* forced speed */ - /* advertise the requested speed and duplex if supported */ - switch (speed) { - case SPEED_10: - if (cmd->duplex == DUPLEX_FULL) { - if (!(bp->port.supported[cfg_idx] & - SUPPORTED_10baseT_Full)) { - DP(NETIF_MSG_LINK, - "10M full not supported\n"); - return -EINVAL; - } - - advertising = (ADVERTISED_10baseT_Full | - ADVERTISED_TP); - } else { - if (!(bp->port.supported[cfg_idx] & - SUPPORTED_10baseT_Half)) { - DP(NETIF_MSG_LINK, - "10M half not supported\n"); - return -EINVAL; - } - - advertising = (ADVERTISED_10baseT_Half | - ADVERTISED_TP); - } - break; - - case SPEED_100: - if (cmd->duplex == DUPLEX_FULL) { - if (!(bp->port.supported[cfg_idx] & - SUPPORTED_100baseT_Full)) { - DP(NETIF_MSG_LINK, - "100M full not supported\n"); - return -EINVAL; - } - - advertising = (ADVERTISED_100baseT_Full | - ADVERTISED_TP); - } else { - if (!(bp->port.supported[cfg_idx] & - SUPPORTED_100baseT_Half)) { - DP(NETIF_MSG_LINK, - "100M half not supported\n"); - return -EINVAL; - } - - advertising = (ADVERTISED_100baseT_Half | - ADVERTISED_TP); - } - break; - - case SPEED_1000: - if (cmd->duplex != DUPLEX_FULL) { - DP(NETIF_MSG_LINK, "1G half not supported\n"); - return -EINVAL; - } - - if (!(bp->port.supported[cfg_idx] & - SUPPORTED_1000baseT_Full)) { - DP(NETIF_MSG_LINK, "1G full not supported\n"); - return -EINVAL; - } - - advertising = (ADVERTISED_1000baseT_Full | - ADVERTISED_TP); - break; - - case SPEED_2500: - if (cmd->duplex != DUPLEX_FULL) { - DP(NETIF_MSG_LINK, - "2.5G half not supported\n"); - return -EINVAL; - } - - if (!(bp->port.supported[cfg_idx] - & SUPPORTED_2500baseX_Full)) { - DP(NETIF_MSG_LINK, - "2.5G full not supported\n"); - return -EINVAL; - } - - advertising = (ADVERTISED_2500baseX_Full | - ADVERTISED_TP); - break; - - case SPEED_10000: - if (cmd->duplex != DUPLEX_FULL) { - DP(NETIF_MSG_LINK, "10G half not supported\n"); - return -EINVAL; - } - - if (!(bp->port.supported[cfg_idx] - & SUPPORTED_10000baseT_Full)) { - DP(NETIF_MSG_LINK, "10G full not supported\n"); - return -EINVAL; - } - - advertising = (ADVERTISED_10000baseT_Full | - ADVERTISED_FIBRE); - break; - - default: - DP(NETIF_MSG_LINK, "Unsupported speed %u\n", speed); - return -EINVAL; - } - - bp->link_params.req_line_speed[cfg_idx] = speed; - bp->link_params.req_duplex[cfg_idx] = cmd->duplex; - bp->port.advertising[cfg_idx] = advertising; - } - - DP(NETIF_MSG_LINK, "req_line_speed %d\n" - DP_LEVEL " req_duplex %d advertising 0x%x\n", - bp->link_params.req_line_speed[cfg_idx], - bp->link_params.req_duplex[cfg_idx], - bp->port.advertising[cfg_idx]); - - /* Set new config */ - bp->link_params.multi_phy_config = new_multi_phy_config; - if (netif_running(dev)) { - bnx2x_stats_handle(bp, STATS_EVENT_STOP); - bnx2x_link_set(bp); - } - - return 0; -} - -#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE) -#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE) -#define IS_E2_ONLINE(info) (((info) & RI_E2_ONLINE) == RI_E2_ONLINE) -#define IS_E3_ONLINE(info) (((info) & RI_E3_ONLINE) == RI_E3_ONLINE) -#define IS_E3B0_ONLINE(info) (((info) & RI_E3B0_ONLINE) == RI_E3B0_ONLINE) - -static inline bool bnx2x_is_reg_online(struct bnx2x *bp, - const struct reg_addr *reg_info) -{ - if (CHIP_IS_E1(bp)) - return IS_E1_ONLINE(reg_info->info); - else if (CHIP_IS_E1H(bp)) - return IS_E1H_ONLINE(reg_info->info); - else if (CHIP_IS_E2(bp)) - return IS_E2_ONLINE(reg_info->info); - else if (CHIP_IS_E3A0(bp)) - return IS_E3_ONLINE(reg_info->info); - else if (CHIP_IS_E3B0(bp)) - return IS_E3B0_ONLINE(reg_info->info); - else - return false; -} - -/******* Paged registers info selectors ********/ -static inline const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp) -{ - if (CHIP_IS_E2(bp)) - return page_vals_e2; - else if (CHIP_IS_E3(bp)) - return page_vals_e3; - else - return NULL; -} - -static inline u32 __bnx2x_get_page_reg_num(struct bnx2x *bp) -{ - if (CHIP_IS_E2(bp)) - return PAGE_MODE_VALUES_E2; - else if (CHIP_IS_E3(bp)) - return PAGE_MODE_VALUES_E3; - else - return 0; -} - -static inline const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp) -{ - if (CHIP_IS_E2(bp)) - return page_write_regs_e2; - else if (CHIP_IS_E3(bp)) - return page_write_regs_e3; - else - return NULL; -} - -static inline u32 __bnx2x_get_page_write_num(struct bnx2x *bp) -{ - if (CHIP_IS_E2(bp)) - return PAGE_WRITE_REGS_E2; - else if (CHIP_IS_E3(bp)) - return PAGE_WRITE_REGS_E3; - else - return 0; -} - -static inline const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp) -{ - if (CHIP_IS_E2(bp)) - return page_read_regs_e2; - else if (CHIP_IS_E3(bp)) - return page_read_regs_e3; - else - return NULL; -} - -static inline u32 __bnx2x_get_page_read_num(struct bnx2x *bp) -{ - if (CHIP_IS_E2(bp)) - return PAGE_READ_REGS_E2; - else if (CHIP_IS_E3(bp)) - return PAGE_READ_REGS_E3; - else - return 0; -} - -static inline int __bnx2x_get_regs_len(struct bnx2x *bp) -{ - int num_pages = __bnx2x_get_page_reg_num(bp); - int page_write_num = __bnx2x_get_page_write_num(bp); - const struct reg_addr *page_read_addr = __bnx2x_get_page_read_ar(bp); - int page_read_num = __bnx2x_get_page_read_num(bp); - int regdump_len = 0; - int i, j, k; - - for (i = 0; i < REGS_COUNT; i++) - if (bnx2x_is_reg_online(bp, ®_addrs[i])) - regdump_len += reg_addrs[i].size; - - for (i = 0; i < num_pages; i++) - for (j = 0; j < page_write_num; j++) - for (k = 0; k < page_read_num; k++) - if (bnx2x_is_reg_online(bp, &page_read_addr[k])) - regdump_len += page_read_addr[k].size; - - return regdump_len; -} - -static int bnx2x_get_regs_len(struct net_device *dev) -{ - struct bnx2x *bp = netdev_priv(dev); - int regdump_len = 0; - - regdump_len = __bnx2x_get_regs_len(bp); - regdump_len *= 4; - regdump_len += sizeof(struct dump_hdr); - - return regdump_len; -} - -/** - * bnx2x_read_pages_regs - read "paged" registers - * - * @bp device handle - * @p output buffer - * - * Reads "paged" memories: memories that may only be read by first writing to a - * specific address ("write address") and then reading from a specific address - * ("read address"). There may be more than one write address per "page" and - * more than one read address per write address. - */ -static inline void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p) -{ - u32 i, j, k, n; - /* addresses of the paged registers */ - const u32 *page_addr = __bnx2x_get_page_addr_ar(bp); - /* number of paged registers */ - int num_pages = __bnx2x_get_page_reg_num(bp); - /* write addresses */ - const u32 *write_addr = __bnx2x_get_page_write_ar(bp); - /* number of write addresses */ - int write_num = __bnx2x_get_page_write_num(bp); - /* read addresses info */ - const struct reg_addr *read_addr = __bnx2x_get_page_read_ar(bp); - /* number of read addresses */ - int read_num = __bnx2x_get_page_read_num(bp); - - for (i = 0; i < num_pages; i++) { - for (j = 0; j < write_num; j++) { - REG_WR(bp, write_addr[j], page_addr[i]); - for (k = 0; k < read_num; k++) - if (bnx2x_is_reg_online(bp, &read_addr[k])) - for (n = 0; n < - read_addr[k].size; n++) - *p++ = REG_RD(bp, - read_addr[k].addr + n*4); - } - } -} - -static inline void __bnx2x_get_regs(struct bnx2x *bp, u32 *p) -{ - u32 i, j; - - /* Read the regular registers */ - for (i = 0; i < REGS_COUNT; i++) - if (bnx2x_is_reg_online(bp, ®_addrs[i])) - for (j = 0; j < reg_addrs[i].size; j++) - *p++ = REG_RD(bp, reg_addrs[i].addr + j*4); - - /* Read "paged" registes */ - bnx2x_read_pages_regs(bp, p); -} - -static void bnx2x_get_regs(struct net_device *dev, - struct ethtool_regs *regs, void *_p) -{ - u32 *p = _p; - struct bnx2x *bp = netdev_priv(dev); - struct dump_hdr dump_hdr = {0}; - - regs->version = 0; - memset(p, 0, regs->len); - - if (!netif_running(bp->dev)) - return; - - /* Disable parity attentions as long as following dump may - * cause false alarms by reading never written registers. We - * will re-enable parity attentions right after the dump. - */ - bnx2x_disable_blocks_parity(bp); - - dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1; - dump_hdr.dump_sign = dump_sign_all; - dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR); - dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR); - dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR); - dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR); - - if (CHIP_IS_E1(bp)) - dump_hdr.info = RI_E1_ONLINE; - else if (CHIP_IS_E1H(bp)) - dump_hdr.info = RI_E1H_ONLINE; - else if (!CHIP_IS_E1x(bp)) - dump_hdr.info = RI_E2_ONLINE | - (BP_PATH(bp) ? RI_PATH1_DUMP : RI_PATH0_DUMP); - - memcpy(p, &dump_hdr, sizeof(struct dump_hdr)); - p += dump_hdr.hdr_size + 1; - - /* Actually read the registers */ - __bnx2x_get_regs(bp, p); - - /* Re-enable parity attentions */ - bnx2x_clear_blocks_parity(bp); - bnx2x_enable_blocks_parity(bp); -} - -static void bnx2x_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - struct bnx2x *bp = netdev_priv(dev); - u8 phy_fw_ver[PHY_FW_VER_LEN]; - - strcpy(info->driver, DRV_MODULE_NAME); - strcpy(info->version, DRV_MODULE_VERSION); - - phy_fw_ver[0] = '\0'; - if (bp->port.pmf) { - bnx2x_acquire_phy_lock(bp); - bnx2x_get_ext_phy_fw_version(&bp->link_params, - (bp->state != BNX2X_STATE_CLOSED), - phy_fw_ver, PHY_FW_VER_LEN); - bnx2x_release_phy_lock(bp); - } - - strncpy(info->fw_version, bp->fw_ver, 32); - snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver), - "bc %d.%d.%d%s%s", - (bp->common.bc_ver & 0xff0000) >> 16, - (bp->common.bc_ver & 0xff00) >> 8, - (bp->common.bc_ver & 0xff), - ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver); - strcpy(info->bus_info, pci_name(bp->pdev)); - info->n_stats = BNX2X_NUM_STATS; - info->testinfo_len = BNX2X_NUM_TESTS; - info->eedump_len = bp->common.flash_size; - info->regdump_len = bnx2x_get_regs_len(dev); -} - -static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - struct bnx2x *bp = netdev_priv(dev); - - if (bp->flags & NO_WOL_FLAG) { - wol->supported = 0; - wol->wolopts = 0; - } else { - wol->supported = WAKE_MAGIC; - if (bp->wol) - wol->wolopts = WAKE_MAGIC; - else - wol->wolopts = 0; - } - memset(&wol->sopass, 0, sizeof(wol->sopass)); -} - -static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - struct bnx2x *bp = netdev_priv(dev); - - if (wol->wolopts & ~WAKE_MAGIC) - return -EINVAL; - - if (wol->wolopts & WAKE_MAGIC) { - if (bp->flags & NO_WOL_FLAG) - return -EINVAL; - - bp->wol = 1; - } else - bp->wol = 0; - - return 0; -} - -static u32 bnx2x_get_msglevel(struct net_device *dev) -{ - struct bnx2x *bp = netdev_priv(dev); - - return bp->msg_enable; -} - -static void bnx2x_set_msglevel(struct net_device *dev, u32 level) -{ - struct bnx2x *bp = netdev_priv(dev); - - if (capable(CAP_NET_ADMIN)) { - /* dump MCP trace */ - if (level & BNX2X_MSG_MCP) - bnx2x_fw_dump_lvl(bp, KERN_INFO); - bp->msg_enable = level; - } -} - -static int bnx2x_nway_reset(struct net_device *dev) -{ - struct bnx2x *bp = netdev_priv(dev); - - if (!bp->port.pmf) - return 0; - - if (netif_running(dev)) { - bnx2x_stats_handle(bp, STATS_EVENT_STOP); - bnx2x_link_set(bp); - } - - return 0; -} - -static u32 bnx2x_get_link(struct net_device *dev) -{ - struct bnx2x *bp = netdev_priv(dev); - - if (bp->flags & MF_FUNC_DIS || (bp->state != BNX2X_STATE_OPEN)) - return 0; - - return bp->link_vars.link_up; -} - -static int bnx2x_get_eeprom_len(struct net_device *dev) -{ - struct bnx2x *bp = netdev_priv(dev); - - return bp->common.flash_size; -} - -static int bnx2x_acquire_nvram_lock(struct bnx2x *bp) -{ - int port = BP_PORT(bp); - int count, i; - u32 val = 0; - - /* adjust timeout for emulation/FPGA */ - count = BNX2X_NVRAM_TIMEOUT_COUNT; - if (CHIP_REV_IS_SLOW(bp)) - count *= 100; - - /* request access to nvram interface */ - REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, - (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port)); - - for (i = 0; i < count*10; i++) { - val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB); - if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) - break; - - udelay(5); - } - - if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { - DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n"); - return -EBUSY; - } - - return 0; -} - -static int bnx2x_release_nvram_lock(struct bnx2x *bp) -{ - int port = BP_PORT(bp); - int count, i; - u32 val = 0; - - /* adjust timeout for emulation/FPGA */ - count = BNX2X_NVRAM_TIMEOUT_COUNT; - if (CHIP_REV_IS_SLOW(bp)) - count *= 100; - - /* relinquish nvram interface */ - REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, - (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port)); - - for (i = 0; i < count*10; i++) { - val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB); - if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) - break; - - udelay(5); - } - - if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { - DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n"); - return -EBUSY; - } - - return 0; -} - -static void bnx2x_enable_nvram_access(struct bnx2x *bp) -{ - u32 val; - - val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE); - - /* enable both bits, even on read */ - REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE, - (val | MCPR_NVM_ACCESS_ENABLE_EN | - MCPR_NVM_ACCESS_ENABLE_WR_EN)); -} - -static void bnx2x_disable_nvram_access(struct bnx2x *bp) -{ - u32 val; - - val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE); - - /* disable both bits, even after read */ - REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE, - (val & ~(MCPR_NVM_ACCESS_ENABLE_EN | - MCPR_NVM_ACCESS_ENABLE_WR_EN))); -} - -static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val, - u32 cmd_flags) -{ - int count, i, rc; - u32 val; - - /* build the command word */ - cmd_flags |= MCPR_NVM_COMMAND_DOIT; - - /* need to clear DONE bit separately */ - REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); - - /* address of the NVRAM to read from */ - REG_WR(bp, MCP_REG_MCPR_NVM_ADDR, - (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); - - /* issue a read command */ - REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); - - /* adjust timeout for emulation/FPGA */ - count = BNX2X_NVRAM_TIMEOUT_COUNT; - if (CHIP_REV_IS_SLOW(bp)) - count *= 100; - - /* wait for completion */ - *ret_val = 0; - rc = -EBUSY; - for (i = 0; i < count; i++) { - udelay(5); - val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND); - - if (val & MCPR_NVM_COMMAND_DONE) { - val = REG_RD(bp, MCP_REG_MCPR_NVM_READ); - /* we read nvram data in cpu order - * but ethtool sees it as an array of bytes - * converting to big-endian will do the work */ - *ret_val = cpu_to_be32(val); - rc = 0; - break; - } - } - - return rc; -} - -static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf, - int buf_size) -{ - int rc; - u32 cmd_flags; - __be32 val; - - if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { - DP(BNX2X_MSG_NVM, - "Invalid parameter: offset 0x%x buf_size 0x%x\n", - offset, buf_size); - return -EINVAL; - } - - if (offset + buf_size > bp->common.flash_size) { - DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +" - " buf_size (0x%x) > flash_size (0x%x)\n", - offset, buf_size, bp->common.flash_size); - return -EINVAL; - } - - /* request access to nvram interface */ - rc = bnx2x_acquire_nvram_lock(bp); - if (rc) - return rc; - - /* enable access to nvram interface */ - bnx2x_enable_nvram_access(bp); - - /* read the first word(s) */ - cmd_flags = MCPR_NVM_COMMAND_FIRST; - while ((buf_size > sizeof(u32)) && (rc == 0)) { - rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags); - memcpy(ret_buf, &val, 4); - - /* advance to the next dword */ - offset += sizeof(u32); - ret_buf += sizeof(u32); - buf_size -= sizeof(u32); - cmd_flags = 0; - } - - if (rc == 0) { - cmd_flags |= MCPR_NVM_COMMAND_LAST; - rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags); - memcpy(ret_buf, &val, 4); - } - - /* disable access to nvram interface */ - bnx2x_disable_nvram_access(bp); - bnx2x_release_nvram_lock(bp); - - return rc; -} - -static int bnx2x_get_eeprom(struct net_device *dev, - struct ethtool_eeprom *eeprom, u8 *eebuf) -{ - struct bnx2x *bp = netdev_priv(dev); - int rc; - - if (!netif_running(dev)) - return -EAGAIN; - - DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" - DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", - eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, - eeprom->len, eeprom->len); - - /* parameters already validated in ethtool_get_eeprom */ - - rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len); - - return rc; -} - -static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val, - u32 cmd_flags) -{ - int count, i, rc; - - /* build the command word */ - cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR; - - /* need to clear DONE bit separately */ - REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); - - /* write the data */ - REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val); - - /* address of the NVRAM to write to */ - REG_WR(bp, MCP_REG_MCPR_NVM_ADDR, - (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); - - /* issue the write command */ - REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); - - /* adjust timeout for emulation/FPGA */ - count = BNX2X_NVRAM_TIMEOUT_COUNT; - if (CHIP_REV_IS_SLOW(bp)) - count *= 100; - - /* wait for completion */ - rc = -EBUSY; - for (i = 0; i < count; i++) { - udelay(5); - val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND); - if (val & MCPR_NVM_COMMAND_DONE) { - rc = 0; - break; - } - } - - return rc; -} - -#define BYTE_OFFSET(offset) (8 * (offset & 0x03)) - -static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf, - int buf_size) -{ - int rc; - u32 cmd_flags; - u32 align_offset; - __be32 val; - - if (offset + buf_size > bp->common.flash_size) { - DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +" - " buf_size (0x%x) > flash_size (0x%x)\n", - offset, buf_size, bp->common.flash_size); - return -EINVAL; - } - - /* request access to nvram interface */ - rc = bnx2x_acquire_nvram_lock(bp); - if (rc) - return rc; - - /* enable access to nvram interface */ - bnx2x_enable_nvram_access(bp); - - cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST); - align_offset = (offset & ~0x03); - rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags); - - if (rc == 0) { - val &= ~(0xff << BYTE_OFFSET(offset)); - val |= (*data_buf << BYTE_OFFSET(offset)); - - /* nvram data is returned as an array of bytes - * convert it back to cpu order */ - val = be32_to_cpu(val); - - rc = bnx2x_nvram_write_dword(bp, align_offset, val, - cmd_flags); - } - - /* disable access to nvram interface */ - bnx2x_disable_nvram_access(bp); - bnx2x_release_nvram_lock(bp); - - return rc; -} - -static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf, - int buf_size) -{ - int rc; - u32 cmd_flags; - u32 val; - u32 written_so_far; - - if (buf_size == 1) /* ethtool */ - return bnx2x_nvram_write1(bp, offset, data_buf, buf_size); - - if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { - DP(BNX2X_MSG_NVM, - "Invalid parameter: offset 0x%x buf_size 0x%x\n", - offset, buf_size); - return -EINVAL; - } - - if (offset + buf_size > bp->common.flash_size) { - DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +" - " buf_size (0x%x) > flash_size (0x%x)\n", - offset, buf_size, bp->common.flash_size); - return -EINVAL; - } - - /* request access to nvram interface */ - rc = bnx2x_acquire_nvram_lock(bp); - if (rc) - return rc; - - /* enable access to nvram interface */ - bnx2x_enable_nvram_access(bp); - - written_so_far = 0; - cmd_flags = MCPR_NVM_COMMAND_FIRST; - while ((written_so_far < buf_size) && (rc == 0)) { - if (written_so_far == (buf_size - sizeof(u32))) - cmd_flags |= MCPR_NVM_COMMAND_LAST; - else if (((offset + 4) % BNX2X_NVRAM_PAGE_SIZE) == 0) - cmd_flags |= MCPR_NVM_COMMAND_LAST; - else if ((offset % BNX2X_NVRAM_PAGE_SIZE) == 0) - cmd_flags |= MCPR_NVM_COMMAND_FIRST; - - memcpy(&val, data_buf, 4); - - rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags); - - /* advance to the next dword */ - offset += sizeof(u32); - data_buf += sizeof(u32); - written_so_far += sizeof(u32); - cmd_flags = 0; - } - - /* disable access to nvram interface */ - bnx2x_disable_nvram_access(bp); - bnx2x_release_nvram_lock(bp); - - return rc; -} - -static int bnx2x_set_eeprom(struct net_device *dev, - struct ethtool_eeprom *eeprom, u8 *eebuf) -{ - struct bnx2x *bp = netdev_priv(dev); - int port = BP_PORT(bp); - int rc = 0; - u32 ext_phy_config; - if (!netif_running(dev)) - return -EAGAIN; - - DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" - DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", - eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, - eeprom->len, eeprom->len); - - /* parameters already validated in ethtool_set_eeprom */ - - /* PHY eeprom can be accessed only by the PMF */ - if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) && - !bp->port.pmf) - return -EINVAL; - - ext_phy_config = - SHMEM_RD(bp, - dev_info.port_hw_config[port].external_phy_config); - - if (eeprom->magic == 0x50485950) { - /* 'PHYP' (0x50485950): prepare phy for FW upgrade */ - bnx2x_stats_handle(bp, STATS_EVENT_STOP); - - bnx2x_acquire_phy_lock(bp); - rc |= bnx2x_link_reset(&bp->link_params, - &bp->link_vars, 0); - if (XGXS_EXT_PHY_TYPE(ext_phy_config) == - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, - MISC_REGISTERS_GPIO_HIGH, port); - bnx2x_release_phy_lock(bp); - bnx2x_link_report(bp); - - } else if (eeprom->magic == 0x50485952) { - /* 'PHYR' (0x50485952): re-init link after FW upgrade */ - if (bp->state == BNX2X_STATE_OPEN) { - bnx2x_acquire_phy_lock(bp); - rc |= bnx2x_link_reset(&bp->link_params, - &bp->link_vars, 1); - - rc |= bnx2x_phy_init(&bp->link_params, - &bp->link_vars); - bnx2x_release_phy_lock(bp); - bnx2x_calc_fc_adv(bp); - } - } else if (eeprom->magic == 0x53985943) { - /* 'PHYC' (0x53985943): PHY FW upgrade completed */ - if (XGXS_EXT_PHY_TYPE(ext_phy_config) == - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) { - - /* DSP Remove Download Mode */ - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, - MISC_REGISTERS_GPIO_LOW, port); - - bnx2x_acquire_phy_lock(bp); - - bnx2x_sfx7101_sp_sw_reset(bp, - &bp->link_params.phy[EXT_PHY1]); - - /* wait 0.5 sec to allow it to run */ - msleep(500); - bnx2x_ext_phy_hw_reset(bp, port); - msleep(500); - bnx2x_release_phy_lock(bp); - } - } else - rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len); - - return rc; -} - -static int bnx2x_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *coal) -{ - struct bnx2x *bp = netdev_priv(dev); - - memset(coal, 0, sizeof(struct ethtool_coalesce)); - - coal->rx_coalesce_usecs = bp->rx_ticks; - coal->tx_coalesce_usecs = bp->tx_ticks; - - return 0; -} - -static int bnx2x_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *coal) -{ - struct bnx2x *bp = netdev_priv(dev); - - bp->rx_ticks = (u16)coal->rx_coalesce_usecs; - if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT) - bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT; - - bp->tx_ticks = (u16)coal->tx_coalesce_usecs; - if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT) - bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT; - - if (netif_running(dev)) - bnx2x_update_coalesce(bp); - - return 0; -} - -static void bnx2x_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ering) -{ - struct bnx2x *bp = netdev_priv(dev); - - ering->rx_max_pending = MAX_RX_AVAIL; - ering->rx_mini_max_pending = 0; - ering->rx_jumbo_max_pending = 0; - - if (bp->rx_ring_size) - ering->rx_pending = bp->rx_ring_size; - else - if (bp->state == BNX2X_STATE_OPEN && bp->num_queues) - ering->rx_pending = MAX_RX_AVAIL/bp->num_queues; - else - ering->rx_pending = MAX_RX_AVAIL; - - ering->rx_mini_pending = 0; - ering->rx_jumbo_pending = 0; - - ering->tx_max_pending = MAX_TX_AVAIL; - ering->tx_pending = bp->tx_ring_size; -} - -static int bnx2x_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ering) -{ - struct bnx2x *bp = netdev_priv(dev); - - if (bp->recovery_state != BNX2X_RECOVERY_DONE) { - printk(KERN_ERR "Handling parity error recovery. Try again later\n"); - return -EAGAIN; - } - - if ((ering->rx_pending > MAX_RX_AVAIL) || - (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA : - MIN_RX_SIZE_TPA)) || - (ering->tx_pending > MAX_TX_AVAIL) || - (ering->tx_pending <= MAX_SKB_FRAGS + 4)) - return -EINVAL; - - bp->rx_ring_size = ering->rx_pending; - bp->tx_ring_size = ering->tx_pending; - - return bnx2x_reload_if_running(dev); -} - -static void bnx2x_get_pauseparam(struct net_device *dev, - struct ethtool_pauseparam *epause) -{ - struct bnx2x *bp = netdev_priv(dev); - int cfg_idx = bnx2x_get_link_cfg_idx(bp); - epause->autoneg = (bp->link_params.req_flow_ctrl[cfg_idx] == - BNX2X_FLOW_CTRL_AUTO); - - epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) == - BNX2X_FLOW_CTRL_RX); - epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) == - BNX2X_FLOW_CTRL_TX); - - DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n" - DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n", - epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); -} - -static int bnx2x_set_pauseparam(struct net_device *dev, - struct ethtool_pauseparam *epause) -{ - struct bnx2x *bp = netdev_priv(dev); - u32 cfg_idx = bnx2x_get_link_cfg_idx(bp); - if (IS_MF(bp)) - return 0; - - DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n" - DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n", - epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); - - bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_AUTO; - - if (epause->rx_pause) - bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_RX; - - if (epause->tx_pause) - bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_TX; - - if (bp->link_params.req_flow_ctrl[cfg_idx] == BNX2X_FLOW_CTRL_AUTO) - bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_NONE; - - if (epause->autoneg) { - if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) { - DP(NETIF_MSG_LINK, "autoneg not supported\n"); - return -EINVAL; - } - - if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) { - bp->link_params.req_flow_ctrl[cfg_idx] = - BNX2X_FLOW_CTRL_AUTO; - } - } - - DP(NETIF_MSG_LINK, - "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl[cfg_idx]); - - if (netif_running(dev)) { - bnx2x_stats_handle(bp, STATS_EVENT_STOP); - bnx2x_link_set(bp); - } - - return 0; -} - -static const struct { - char string[ETH_GSTRING_LEN]; -} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = { - { "register_test (offline)" }, - { "memory_test (offline)" }, - { "loopback_test (offline)" }, - { "nvram_test (online)" }, - { "interrupt_test (online)" }, - { "link_test (online)" }, - { "idle check (online)" } -}; - -enum { - BNX2X_CHIP_E1_OFST = 0, - BNX2X_CHIP_E1H_OFST, - BNX2X_CHIP_E2_OFST, - BNX2X_CHIP_E3_OFST, - BNX2X_CHIP_E3B0_OFST, - BNX2X_CHIP_MAX_OFST -}; - -#define BNX2X_CHIP_MASK_E1 (1 << BNX2X_CHIP_E1_OFST) -#define BNX2X_CHIP_MASK_E1H (1 << BNX2X_CHIP_E1H_OFST) -#define BNX2X_CHIP_MASK_E2 (1 << BNX2X_CHIP_E2_OFST) -#define BNX2X_CHIP_MASK_E3 (1 << BNX2X_CHIP_E3_OFST) -#define BNX2X_CHIP_MASK_E3B0 (1 << BNX2X_CHIP_E3B0_OFST) - -#define BNX2X_CHIP_MASK_ALL ((1 << BNX2X_CHIP_MAX_OFST) - 1) -#define BNX2X_CHIP_MASK_E1X (BNX2X_CHIP_MASK_E1 | BNX2X_CHIP_MASK_E1H) - -static int bnx2x_test_registers(struct bnx2x *bp) -{ - int idx, i, rc = -ENODEV; - u32 wr_val = 0, hw; - int port = BP_PORT(bp); - static const struct { - u32 hw; - u32 offset0; - u32 offset1; - u32 mask; - } reg_tbl[] = { -/* 0 */ { BNX2X_CHIP_MASK_ALL, - BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff }, - { BNX2X_CHIP_MASK_ALL, - DORQ_REG_DB_ADDR0, 4, 0xffffffff }, - { BNX2X_CHIP_MASK_E1X, - HC_REG_AGG_INT_0, 4, 0x000003ff }, - { BNX2X_CHIP_MASK_ALL, - PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 }, - { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2 | BNX2X_CHIP_MASK_E3, - PBF_REG_P0_INIT_CRD, 4, 0x000007ff }, - { BNX2X_CHIP_MASK_E3B0, - PBF_REG_INIT_CRD_Q0, 4, 0x000007ff }, - { BNX2X_CHIP_MASK_ALL, - PRS_REG_CID_PORT_0, 4, 0x00ffffff }, - { BNX2X_CHIP_MASK_ALL, - PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff }, - { BNX2X_CHIP_MASK_ALL, - PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff }, - { BNX2X_CHIP_MASK_ALL, - PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff }, -/* 10 */ { BNX2X_CHIP_MASK_ALL, - PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff }, - { BNX2X_CHIP_MASK_ALL, - PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff }, - { BNX2X_CHIP_MASK_ALL, - QM_REG_CONNNUM_0, 4, 0x000fffff }, - { BNX2X_CHIP_MASK_ALL, - TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff }, - { BNX2X_CHIP_MASK_ALL, - SRC_REG_KEYRSS0_0, 40, 0xffffffff }, - { BNX2X_CHIP_MASK_ALL, - SRC_REG_KEYRSS0_7, 40, 0xffffffff }, - { BNX2X_CHIP_MASK_ALL, - XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 }, - { BNX2X_CHIP_MASK_ALL, - XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 }, - { BNX2X_CHIP_MASK_ALL, - XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff }, - { BNX2X_CHIP_MASK_ALL, - NIG_REG_LLH0_T_BIT, 4, 0x00000001 }, -/* 20 */ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, - NIG_REG_EMAC0_IN_EN, 4, 0x00000001 }, - { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, - NIG_REG_BMAC0_IN_EN, 4, 0x00000001 }, - { BNX2X_CHIP_MASK_ALL, - NIG_REG_XCM0_OUT_EN, 4, 0x00000001 }, - { BNX2X_CHIP_MASK_ALL, - NIG_REG_BRB0_OUT_EN, 4, 0x00000001 }, - { BNX2X_CHIP_MASK_ALL, - NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 }, - { BNX2X_CHIP_MASK_ALL, - NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff }, - { BNX2X_CHIP_MASK_ALL, - NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff }, - { BNX2X_CHIP_MASK_ALL, - NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff }, - { BNX2X_CHIP_MASK_ALL, - NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff }, - { BNX2X_CHIP_MASK_ALL, - NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 }, -/* 30 */ { BNX2X_CHIP_MASK_ALL, - NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff }, - { BNX2X_CHIP_MASK_ALL, - NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff }, - { BNX2X_CHIP_MASK_ALL, - NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff }, - { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, - NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 }, - { BNX2X_CHIP_MASK_ALL, - NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001}, - { BNX2X_CHIP_MASK_ALL, - NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff }, - { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, - NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 }, - { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, - NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f }, - - { BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 } - }; - - if (!netif_running(bp->dev)) - return rc; - - if (CHIP_IS_E1(bp)) - hw = BNX2X_CHIP_MASK_E1; - else if (CHIP_IS_E1H(bp)) - hw = BNX2X_CHIP_MASK_E1H; - else if (CHIP_IS_E2(bp)) - hw = BNX2X_CHIP_MASK_E2; - else if (CHIP_IS_E3B0(bp)) - hw = BNX2X_CHIP_MASK_E3B0; - else /* e3 A0 */ - hw = BNX2X_CHIP_MASK_E3; - - /* Repeat the test twice: - First by writing 0x00000000, second by writing 0xffffffff */ - for (idx = 0; idx < 2; idx++) { - - switch (idx) { - case 0: - wr_val = 0; - break; - case 1: - wr_val = 0xffffffff; - break; - } - - for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) { - u32 offset, mask, save_val, val; - if (!(hw & reg_tbl[i].hw)) - continue; - - offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1; - mask = reg_tbl[i].mask; - - save_val = REG_RD(bp, offset); - - REG_WR(bp, offset, wr_val & mask); - - val = REG_RD(bp, offset); - - /* Restore the original register's value */ - REG_WR(bp, offset, save_val); - - /* verify value is as expected */ - if ((val & mask) != (wr_val & mask)) { - DP(NETIF_MSG_HW, - "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n", - offset, val, wr_val, mask); - goto test_reg_exit; - } - } - } - - rc = 0; - -test_reg_exit: - return rc; -} - -static int bnx2x_test_memory(struct bnx2x *bp) -{ - int i, j, rc = -ENODEV; - u32 val, index; - static const struct { - u32 offset; - int size; - } mem_tbl[] = { - { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE }, - { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE }, - { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE }, - { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE }, - { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE }, - { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE }, - { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE }, - - { 0xffffffff, 0 } - }; - - static const struct { - char *name; - u32 offset; - u32 hw_mask[BNX2X_CHIP_MAX_OFST]; - } prty_tbl[] = { - { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, - {0x3ffc0, 0, 0, 0} }, - { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, - {0x2, 0x2, 0, 0} }, - { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, - {0, 0, 0, 0} }, - { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, - {0x3ffc0, 0, 0, 0} }, - { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, - {0x3ffc0, 0, 0, 0} }, - { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, - {0x3ffc1, 0, 0, 0} }, - - { NULL, 0xffffffff, {0, 0, 0, 0} } - }; - - if (!netif_running(bp->dev)) - return rc; - - if (CHIP_IS_E1(bp)) - index = BNX2X_CHIP_E1_OFST; - else if (CHIP_IS_E1H(bp)) - index = BNX2X_CHIP_E1H_OFST; - else if (CHIP_IS_E2(bp)) - index = BNX2X_CHIP_E2_OFST; - else /* e3 */ - index = BNX2X_CHIP_E3_OFST; - - /* pre-Check the parity status */ - for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { - val = REG_RD(bp, prty_tbl[i].offset); - if (val & ~(prty_tbl[i].hw_mask[index])) { - DP(NETIF_MSG_HW, - "%s is 0x%x\n", prty_tbl[i].name, val); - goto test_mem_exit; - } - } - - /* Go through all the memories */ - for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) - for (j = 0; j < mem_tbl[i].size; j++) - REG_RD(bp, mem_tbl[i].offset + j*4); - - /* Check the parity status */ - for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { - val = REG_RD(bp, prty_tbl[i].offset); - if (val & ~(prty_tbl[i].hw_mask[index])) { - DP(NETIF_MSG_HW, - "%s is 0x%x\n", prty_tbl[i].name, val); - goto test_mem_exit; - } - } - - rc = 0; - -test_mem_exit: - return rc; -} - -static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes) -{ - int cnt = 1400; - - if (link_up) { - while (bnx2x_link_test(bp, is_serdes) && cnt--) - msleep(20); - - if (cnt <= 0 && bnx2x_link_test(bp, is_serdes)) - DP(NETIF_MSG_LINK, "Timeout waiting for link up\n"); - } -} - -static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) -{ - unsigned int pkt_size, num_pkts, i; - struct sk_buff *skb; - unsigned char *packet; - struct bnx2x_fastpath *fp_rx = &bp->fp[0]; - struct bnx2x_fastpath *fp_tx = &bp->fp[0]; - struct bnx2x_fp_txdata *txdata = &fp_tx->txdata[0]; - u16 tx_start_idx, tx_idx; - u16 rx_start_idx, rx_idx; - u16 pkt_prod, bd_prod, rx_comp_cons; - struct sw_tx_bd *tx_buf; - struct eth_tx_start_bd *tx_start_bd; - struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; - struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; - dma_addr_t mapping; - union eth_rx_cqe *cqe; - u8 cqe_fp_flags, cqe_fp_type; - struct sw_rx_bd *rx_buf; - u16 len; - int rc = -ENODEV; - - /* check the loopback mode */ - switch (loopback_mode) { - case BNX2X_PHY_LOOPBACK: - if (bp->link_params.loopback_mode != LOOPBACK_XGXS) - return -EINVAL; - break; - case BNX2X_MAC_LOOPBACK: - bp->link_params.loopback_mode = CHIP_IS_E3(bp) ? - LOOPBACK_XMAC : LOOPBACK_BMAC; - bnx2x_phy_init(&bp->link_params, &bp->link_vars); - break; - default: - return -EINVAL; - } - - /* prepare the loopback packet */ - pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ? - bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN); - skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size); - if (!skb) { - rc = -ENOMEM; - goto test_loopback_exit; - } - packet = skb_put(skb, pkt_size); - memcpy(packet, bp->dev->dev_addr, ETH_ALEN); - memset(packet + ETH_ALEN, 0, ETH_ALEN); - memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN)); - for (i = ETH_HLEN; i < pkt_size; i++) - packet[i] = (unsigned char) (i & 0xff); - mapping = dma_map_single(&bp->pdev->dev, skb->data, - skb_headlen(skb), DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { - rc = -ENOMEM; - dev_kfree_skb(skb); - BNX2X_ERR("Unable to map SKB\n"); - goto test_loopback_exit; - } - - /* send the loopback packet */ - num_pkts = 0; - tx_start_idx = le16_to_cpu(*txdata->tx_cons_sb); - rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb); - - pkt_prod = txdata->tx_pkt_prod++; - tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)]; - tx_buf->first_bd = txdata->tx_bd_prod; - tx_buf->skb = skb; - tx_buf->flags = 0; - - bd_prod = TX_BD(txdata->tx_bd_prod); - tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd; - tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); - tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); - tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */ - tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); - tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); - tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; - SET_FLAG(tx_start_bd->general_data, - ETH_TX_START_BD_ETH_ADDR_TYPE, - UNICAST_ADDRESS); - SET_FLAG(tx_start_bd->general_data, - ETH_TX_START_BD_HDR_NBDS, - 1); - - /* turn on parsing and get a BD */ - bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); - - pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x; - pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2; - - memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); - memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); - - wmb(); - - txdata->tx_db.data.prod += 2; - barrier(); - DOORBELL(bp, txdata->cid, txdata->tx_db.raw); - - mmiowb(); - barrier(); - - num_pkts++; - txdata->tx_bd_prod += 2; /* start + pbd */ - - udelay(100); - - tx_idx = le16_to_cpu(*txdata->tx_cons_sb); - if (tx_idx != tx_start_idx + num_pkts) - goto test_loopback_exit; - - /* Unlike HC IGU won't generate an interrupt for status block - * updates that have been performed while interrupts were - * disabled. - */ - if (bp->common.int_block == INT_BLOCK_IGU) { - /* Disable local BHes to prevent a dead-lock situation between - * sch_direct_xmit() and bnx2x_run_loopback() (calling - * bnx2x_tx_int()), as both are taking netif_tx_lock(). - */ - local_bh_disable(); - bnx2x_tx_int(bp, txdata); - local_bh_enable(); - } - - rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb); - if (rx_idx != rx_start_idx + num_pkts) - goto test_loopback_exit; - - rx_comp_cons = le16_to_cpu(fp_rx->rx_comp_cons); - cqe = &fp_rx->rx_comp_ring[RCQ_BD(rx_comp_cons)]; - cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; - cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; - if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS)) - goto test_loopback_rx_exit; - - len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); - if (len != pkt_size) - goto test_loopback_rx_exit; - - rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)]; - dma_sync_single_for_cpu(&bp->pdev->dev, - dma_unmap_addr(rx_buf, mapping), - fp_rx->rx_buf_size, DMA_FROM_DEVICE); - skb = rx_buf->skb; - skb_reserve(skb, cqe->fast_path_cqe.placement_offset); - for (i = ETH_HLEN; i < pkt_size; i++) - if (*(skb->data + i) != (unsigned char) (i & 0xff)) - goto test_loopback_rx_exit; - - rc = 0; - -test_loopback_rx_exit: - - fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons); - fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod); - fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons); - fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod); - - /* Update producers */ - bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod, - fp_rx->rx_sge_prod); - -test_loopback_exit: - bp->link_params.loopback_mode = LOOPBACK_NONE; - - return rc; -} - -static int bnx2x_test_loopback(struct bnx2x *bp) -{ - int rc = 0, res; - - if (BP_NOMCP(bp)) - return rc; - - if (!netif_running(bp->dev)) - return BNX2X_LOOPBACK_FAILED; - - bnx2x_netif_stop(bp, 1); - bnx2x_acquire_phy_lock(bp); - - res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK); - if (res) { - DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res); - rc |= BNX2X_PHY_LOOPBACK_FAILED; - } - - res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK); - if (res) { - DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res); - rc |= BNX2X_MAC_LOOPBACK_FAILED; - } - - bnx2x_release_phy_lock(bp); - bnx2x_netif_start(bp); - - return rc; -} - -#define CRC32_RESIDUAL 0xdebb20e3 - -static int bnx2x_test_nvram(struct bnx2x *bp) -{ - static const struct { - int offset; - int size; - } nvram_tbl[] = { - { 0, 0x14 }, /* bootstrap */ - { 0x14, 0xec }, /* dir */ - { 0x100, 0x350 }, /* manuf_info */ - { 0x450, 0xf0 }, /* feature_info */ - { 0x640, 0x64 }, /* upgrade_key_info */ - { 0x708, 0x70 }, /* manuf_key_info */ - { 0, 0 } - }; - __be32 buf[0x350 / 4]; - u8 *data = (u8 *)buf; - int i, rc; - u32 magic, crc; - - if (BP_NOMCP(bp)) - return 0; - - rc = bnx2x_nvram_read(bp, 0, data, 4); - if (rc) { - DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc); - goto test_nvram_exit; - } - - magic = be32_to_cpu(buf[0]); - if (magic != 0x669955aa) { - DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic); - rc = -ENODEV; - goto test_nvram_exit; - } - - for (i = 0; nvram_tbl[i].size; i++) { - - rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data, - nvram_tbl[i].size); - if (rc) { - DP(NETIF_MSG_PROBE, - "nvram_tbl[%d] read data (rc %d)\n", i, rc); - goto test_nvram_exit; - } - - crc = ether_crc_le(nvram_tbl[i].size, data); - if (crc != CRC32_RESIDUAL) { - DP(NETIF_MSG_PROBE, - "nvram_tbl[%d] crc value (0x%08x)\n", i, crc); - rc = -ENODEV; - goto test_nvram_exit; - } - } - -test_nvram_exit: - return rc; -} - -/* Send an EMPTY ramrod on the first queue */ -static int bnx2x_test_intr(struct bnx2x *bp) -{ - struct bnx2x_queue_state_params params = {0}; - - if (!netif_running(bp->dev)) - return -ENODEV; - - params.q_obj = &bp->fp->q_obj; - params.cmd = BNX2X_Q_CMD_EMPTY; - - __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); - - return bnx2x_queue_state_change(bp, ¶ms); -} - -static void bnx2x_self_test(struct net_device *dev, - struct ethtool_test *etest, u64 *buf) -{ - struct bnx2x *bp = netdev_priv(dev); - u8 is_serdes; - if (bp->recovery_state != BNX2X_RECOVERY_DONE) { - printk(KERN_ERR "Handling parity error recovery. Try again later\n"); - etest->flags |= ETH_TEST_FL_FAILED; - return; - } - - memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS); - - if (!netif_running(dev)) - return; - - /* offline tests are not supported in MF mode */ - if (IS_MF(bp)) - etest->flags &= ~ETH_TEST_FL_OFFLINE; - is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0; - - if (etest->flags & ETH_TEST_FL_OFFLINE) { - int port = BP_PORT(bp); - u32 val; - u8 link_up; - - /* save current value of input enable for TX port IF */ - val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4); - /* disable input for TX port IF */ - REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0); - - link_up = bp->link_vars.link_up; - - bnx2x_nic_unload(bp, UNLOAD_NORMAL); - bnx2x_nic_load(bp, LOAD_DIAG); - /* wait until link state is restored */ - bnx2x_wait_for_link(bp, 1, is_serdes); - - if (bnx2x_test_registers(bp) != 0) { - buf[0] = 1; - etest->flags |= ETH_TEST_FL_FAILED; - } - if (bnx2x_test_memory(bp) != 0) { - buf[1] = 1; - etest->flags |= ETH_TEST_FL_FAILED; - } - - buf[2] = bnx2x_test_loopback(bp); - if (buf[2] != 0) - etest->flags |= ETH_TEST_FL_FAILED; - - bnx2x_nic_unload(bp, UNLOAD_NORMAL); - - /* restore input for TX port IF */ - REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val); - - bnx2x_nic_load(bp, LOAD_NORMAL); - /* wait until link state is restored */ - bnx2x_wait_for_link(bp, link_up, is_serdes); - } - if (bnx2x_test_nvram(bp) != 0) { - buf[3] = 1; - etest->flags |= ETH_TEST_FL_FAILED; - } - if (bnx2x_test_intr(bp) != 0) { - buf[4] = 1; - etest->flags |= ETH_TEST_FL_FAILED; - } - - if (bnx2x_link_test(bp, is_serdes) != 0) { - buf[5] = 1; - etest->flags |= ETH_TEST_FL_FAILED; - } - -#ifdef BNX2X_EXTRA_DEBUG - bnx2x_panic_dump(bp); -#endif -} - -#define IS_PORT_STAT(i) \ - ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT) -#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC) -#define IS_MF_MODE_STAT(bp) \ - (IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) - -/* ethtool statistics are displayed for all regular ethernet queues and the - * fcoe L2 queue if not disabled - */ -static inline int bnx2x_num_stat_queues(struct bnx2x *bp) -{ - return BNX2X_NUM_ETH_QUEUES(bp); -} - -static int bnx2x_get_sset_count(struct net_device *dev, int stringset) -{ - struct bnx2x *bp = netdev_priv(dev); - int i, num_stats; - - switch (stringset) { - case ETH_SS_STATS: - if (is_multi(bp)) { - num_stats = bnx2x_num_stat_queues(bp) * - BNX2X_NUM_Q_STATS; - if (!IS_MF_MODE_STAT(bp)) - num_stats += BNX2X_NUM_STATS; - } else { - if (IS_MF_MODE_STAT(bp)) { - num_stats = 0; - for (i = 0; i < BNX2X_NUM_STATS; i++) - if (IS_FUNC_STAT(i)) - num_stats++; - } else - num_stats = BNX2X_NUM_STATS; - } - return num_stats; - - case ETH_SS_TEST: - return BNX2X_NUM_TESTS; - - default: - return -EINVAL; - } -} - -static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) -{ - struct bnx2x *bp = netdev_priv(dev); - int i, j, k; - char queue_name[MAX_QUEUE_NAME_LEN+1]; - - switch (stringset) { - case ETH_SS_STATS: - if (is_multi(bp)) { - k = 0; - for_each_eth_queue(bp, i) { - memset(queue_name, 0, sizeof(queue_name)); - sprintf(queue_name, "%d", i); - for (j = 0; j < BNX2X_NUM_Q_STATS; j++) - snprintf(buf + (k + j)*ETH_GSTRING_LEN, - ETH_GSTRING_LEN, - bnx2x_q_stats_arr[j].string, - queue_name); - k += BNX2X_NUM_Q_STATS; - } - if (IS_MF_MODE_STAT(bp)) - break; - for (j = 0; j < BNX2X_NUM_STATS; j++) - strcpy(buf + (k + j)*ETH_GSTRING_LEN, - bnx2x_stats_arr[j].string); - } else { - for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { - if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i)) - continue; - strcpy(buf + j*ETH_GSTRING_LEN, - bnx2x_stats_arr[i].string); - j++; - } - } - break; - - case ETH_SS_TEST: - memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr)); - break; - } -} - -static void bnx2x_get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 *buf) -{ - struct bnx2x *bp = netdev_priv(dev); - u32 *hw_stats, *offset; - int i, j, k; - - if (is_multi(bp)) { - k = 0; - for_each_eth_queue(bp, i) { - hw_stats = (u32 *)&bp->fp[i].eth_q_stats; - for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { - if (bnx2x_q_stats_arr[j].size == 0) { - /* skip this counter */ - buf[k + j] = 0; - continue; - } - offset = (hw_stats + - bnx2x_q_stats_arr[j].offset); - if (bnx2x_q_stats_arr[j].size == 4) { - /* 4-byte counter */ - buf[k + j] = (u64) *offset; - continue; - } - /* 8-byte counter */ - buf[k + j] = HILO_U64(*offset, *(offset + 1)); - } - k += BNX2X_NUM_Q_STATS; - } - if (IS_MF_MODE_STAT(bp)) - return; - hw_stats = (u32 *)&bp->eth_stats; - for (j = 0; j < BNX2X_NUM_STATS; j++) { - if (bnx2x_stats_arr[j].size == 0) { - /* skip this counter */ - buf[k + j] = 0; - continue; - } - offset = (hw_stats + bnx2x_stats_arr[j].offset); - if (bnx2x_stats_arr[j].size == 4) { - /* 4-byte counter */ - buf[k + j] = (u64) *offset; - continue; - } - /* 8-byte counter */ - buf[k + j] = HILO_U64(*offset, *(offset + 1)); - } - } else { - hw_stats = (u32 *)&bp->eth_stats; - for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { - if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i)) - continue; - if (bnx2x_stats_arr[i].size == 0) { - /* skip this counter */ - buf[j] = 0; - j++; - continue; - } - offset = (hw_stats + bnx2x_stats_arr[i].offset); - if (bnx2x_stats_arr[i].size == 4) { - /* 4-byte counter */ - buf[j] = (u64) *offset; - j++; - continue; - } - /* 8-byte counter */ - buf[j] = HILO_U64(*offset, *(offset + 1)); - j++; - } - } -} - -static int bnx2x_set_phys_id(struct net_device *dev, - enum ethtool_phys_id_state state) -{ - struct bnx2x *bp = netdev_priv(dev); - - if (!netif_running(dev)) - return -EAGAIN; - - if (!bp->port.pmf) - return -EOPNOTSUPP; - - switch (state) { - case ETHTOOL_ID_ACTIVE: - return 1; /* cycle on/off once per second */ - - case ETHTOOL_ID_ON: - bnx2x_set_led(&bp->link_params, &bp->link_vars, - LED_MODE_ON, SPEED_1000); - break; - - case ETHTOOL_ID_OFF: - bnx2x_set_led(&bp->link_params, &bp->link_vars, - LED_MODE_FRONT_PANEL_OFF, 0); - - break; - - case ETHTOOL_ID_INACTIVE: - bnx2x_set_led(&bp->link_params, &bp->link_vars, - LED_MODE_OPER, - bp->link_vars.line_speed); - } - - return 0; -} - -static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, - void *rules __always_unused) -{ - struct bnx2x *bp = netdev_priv(dev); - - switch (info->cmd) { - case ETHTOOL_GRXRINGS: - info->data = BNX2X_NUM_ETH_QUEUES(bp); - return 0; - - default: - return -EOPNOTSUPP; - } -} - -static int bnx2x_get_rxfh_indir(struct net_device *dev, - struct ethtool_rxfh_indir *indir) -{ - struct bnx2x *bp = netdev_priv(dev); - size_t copy_size = - min_t(size_t, indir->size, T_ETH_INDIRECTION_TABLE_SIZE); - u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; - size_t i; - - if (bp->multi_mode == ETH_RSS_MODE_DISABLED) - return -EOPNOTSUPP; - - /* Get the current configuration of the RSS indirection table */ - bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table); - - /* - * We can't use a memcpy() as an internal storage of an - * indirection table is a u8 array while indir->ring_index - * points to an array of u32. - * - * Indirection table contains the FW Client IDs, so we need to - * align the returned table to the Client ID of the leading RSS - * queue. - */ - for (i = 0; i < copy_size; i++) - indir->ring_index[i] = ind_table[i] - bp->fp->cl_id; - - indir->size = T_ETH_INDIRECTION_TABLE_SIZE; - - return 0; -} - -static int bnx2x_set_rxfh_indir(struct net_device *dev, - const struct ethtool_rxfh_indir *indir) -{ - struct bnx2x *bp = netdev_priv(dev); - size_t i; - u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; - u32 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); - - if (bp->multi_mode == ETH_RSS_MODE_DISABLED) - return -EOPNOTSUPP; - - /* validate the size */ - if (indir->size != T_ETH_INDIRECTION_TABLE_SIZE) - return -EINVAL; - - for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { - /* validate the indices */ - if (indir->ring_index[i] >= num_eth_queues) - return -EINVAL; - /* - * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy() - * as an internal storage of an indirection table is a u8 array - * while indir->ring_index points to an array of u32. - * - * Indirection table contains the FW Client IDs, so we need to - * align the received table to the Client ID of the leading RSS - * queue - */ - ind_table[i] = indir->ring_index[i] + bp->fp->cl_id; - } - - return bnx2x_config_rss_pf(bp, ind_table, false); -} - -static const struct ethtool_ops bnx2x_ethtool_ops = { - .get_settings = bnx2x_get_settings, - .set_settings = bnx2x_set_settings, - .get_drvinfo = bnx2x_get_drvinfo, - .get_regs_len = bnx2x_get_regs_len, - .get_regs = bnx2x_get_regs, - .get_wol = bnx2x_get_wol, - .set_wol = bnx2x_set_wol, - .get_msglevel = bnx2x_get_msglevel, - .set_msglevel = bnx2x_set_msglevel, - .nway_reset = bnx2x_nway_reset, - .get_link = bnx2x_get_link, - .get_eeprom_len = bnx2x_get_eeprom_len, - .get_eeprom = bnx2x_get_eeprom, - .set_eeprom = bnx2x_set_eeprom, - .get_coalesce = bnx2x_get_coalesce, - .set_coalesce = bnx2x_set_coalesce, - .get_ringparam = bnx2x_get_ringparam, - .set_ringparam = bnx2x_set_ringparam, - .get_pauseparam = bnx2x_get_pauseparam, - .set_pauseparam = bnx2x_set_pauseparam, - .self_test = bnx2x_self_test, - .get_sset_count = bnx2x_get_sset_count, - .get_strings = bnx2x_get_strings, - .set_phys_id = bnx2x_set_phys_id, - .get_ethtool_stats = bnx2x_get_ethtool_stats, - .get_rxnfc = bnx2x_get_rxnfc, - .get_rxfh_indir = bnx2x_get_rxfh_indir, - .set_rxfh_indir = bnx2x_set_rxfh_indir, -}; - -void bnx2x_set_ethtool_ops(struct net_device *netdev) -{ - SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops); -} diff --git a/drivers/net/bnx2x/bnx2x_fw_defs.h b/drivers/net/bnx2x/bnx2x_fw_defs.h deleted file mode 100644 index 998652a..0000000 --- a/drivers/net/bnx2x/bnx2x_fw_defs.h +++ /dev/null @@ -1,410 +0,0 @@ -/* bnx2x_fw_defs.h: Broadcom Everest network driver. - * - * Copyright (c) 2007-2011 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - */ - -#ifndef BNX2X_FW_DEFS_H -#define BNX2X_FW_DEFS_H - -#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[148].base) -#define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ - (IRO[147].base + ((assertListEntry) * IRO[147].m1)) -#define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \ - (IRO[153].base + (((pfId)>>1) * IRO[153].m1) + (((pfId)&1) * \ - IRO[153].m2)) -#define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \ - (IRO[154].base + (((pfId)>>1) * IRO[154].m1) + (((pfId)&1) * \ - IRO[154].m2)) -#define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \ - (IRO[159].base + ((funcId) * IRO[159].m1)) -#define CSTORM_FUNC_EN_OFFSET(funcId) \ - (IRO[149].base + ((funcId) * IRO[149].m1)) -#define CSTORM_IGU_MODE_OFFSET (IRO[157].base) -#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ - (IRO[315].base + ((pfId) * IRO[315].m1)) -#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ - (IRO[316].base + ((pfId) * IRO[316].m1)) -#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \ - (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2)) -#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \ - (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2)) -#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \ - (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2)) -#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \ - (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2)) -#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \ - (IRO[307].base + ((pfId) * IRO[307].m1) + ((iscsiEqId) * IRO[307].m2)) -#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \ - (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2)) -#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \ - (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2)) -#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ - (IRO[314].base + ((pfId) * IRO[314].m1)) -#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ - (IRO[306].base + ((pfId) * IRO[306].m1)) -#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ - (IRO[305].base + ((pfId) * IRO[305].m1)) -#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ - (IRO[304].base + ((pfId) * IRO[304].m1)) -#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ - (IRO[151].base + ((funcId) * IRO[151].m1)) -#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \ - (IRO[142].base + ((pfId) * IRO[142].m1)) -#define CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(pfId) \ - (IRO[143].base + ((pfId) * IRO[143].m1)) -#define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \ - (IRO[141].base + ((pfId) * IRO[141].m1)) -#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[141].size) -#define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \ - (IRO[144].base + ((pfId) * IRO[144].m1)) -#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[144].size) -#define CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(sbId, hcIndex) \ - (IRO[136].base + ((sbId) * IRO[136].m1) + ((hcIndex) * IRO[136].m2)) -#define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \ - (IRO[133].base + ((sbId) * IRO[133].m1)) -#define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \ - (IRO[134].base + ((sbId) * IRO[134].m1)) -#define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \ - (IRO[135].base + ((sbId) * IRO[135].m1) + ((hcIndex) * IRO[135].m2)) -#define CSTORM_STATUS_BLOCK_OFFSET(sbId) \ - (IRO[132].base + ((sbId) * IRO[132].m1)) -#define CSTORM_STATUS_BLOCK_SIZE (IRO[132].size) -#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \ - (IRO[137].base + ((sbId) * IRO[137].m1)) -#define CSTORM_SYNC_BLOCK_SIZE (IRO[137].size) -#define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \ - (IRO[155].base + ((vfId) * IRO[155].m1)) -#define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \ - (IRO[156].base + ((vfId) * IRO[156].m1)) -#define CSTORM_VF_TO_PF_OFFSET(funcId) \ - (IRO[150].base + ((funcId) * IRO[150].m1)) -#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base) -#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \ - (IRO[203].base + ((pfId) * IRO[203].m1)) -#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base) -#define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ - (IRO[101].base + ((assertListEntry) * IRO[101].m1)) -#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[107].base) -#define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \ - (IRO[108].base) -#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \ - (IRO[201].base + ((pfId) * IRO[201].m1)) -#define TSTORM_FUNC_EN_OFFSET(funcId) \ - (IRO[103].base + ((funcId) * IRO[103].m1)) -#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ - (IRO[271].base + ((pfId) * IRO[271].m1)) -#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \ - (IRO[272].base + ((pfId) * IRO[272].m1)) -#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \ - (IRO[273].base + ((pfId) * IRO[273].m1)) -#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \ - (IRO[274].base + ((pfId) * IRO[274].m1)) -#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ - (IRO[270].base + ((pfId) * IRO[270].m1)) -#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ - (IRO[269].base + ((pfId) * IRO[269].m1)) -#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ - (IRO[268].base + ((pfId) * IRO[268].m1)) -#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ - (IRO[267].base + ((pfId) * IRO[267].m1)) -#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \ - (IRO[276].base + ((pfId) * IRO[276].m1)) -#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ - (IRO[263].base + ((pfId) * IRO[263].m1)) -#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ - (IRO[264].base + ((pfId) * IRO[264].m1)) -#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \ - (IRO[265].base + ((pfId) * IRO[265].m1)) -#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ - (IRO[266].base + ((pfId) * IRO[266].m1)) -#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \ - (IRO[202].base + ((pfId) * IRO[202].m1)) -#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ - (IRO[105].base + ((funcId) * IRO[105].m1)) -#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \ - (IRO[216].base + ((pfId) * IRO[216].m1)) -#define TSTORM_VF_TO_PF_OFFSET(funcId) \ - (IRO[104].base + ((funcId) * IRO[104].m1)) -#define USTORM_AGG_DATA_OFFSET (IRO[206].base) -#define USTORM_AGG_DATA_SIZE (IRO[206].size) -#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base) -#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \ - (IRO[176].base + ((assertListEntry) * IRO[176].m1)) -#define USTORM_CQE_PAGE_NEXT_OFFSET(portId, clientId) \ - (IRO[205].base + ((portId) * IRO[205].m1) + ((clientId) * \ - IRO[205].m2)) -#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \ - (IRO[183].base + ((portId) * IRO[183].m1)) -#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \ - (IRO[317].base + ((pfId) * IRO[317].m1)) -#define USTORM_FUNC_EN_OFFSET(funcId) \ - (IRO[178].base + ((funcId) * IRO[178].m1)) -#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ - (IRO[281].base + ((pfId) * IRO[281].m1)) -#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ - (IRO[282].base + ((pfId) * IRO[282].m1)) -#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ - (IRO[286].base + ((pfId) * IRO[286].m1)) -#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \ - (IRO[283].base + ((pfId) * IRO[283].m1)) -#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ - (IRO[279].base + ((pfId) * IRO[279].m1)) -#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ - (IRO[278].base + ((pfId) * IRO[278].m1)) -#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ - (IRO[277].base + ((pfId) * IRO[277].m1)) -#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ - (IRO[280].base + ((pfId) * IRO[280].m1)) -#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \ - (IRO[284].base + ((pfId) * IRO[284].m1)) -#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ - (IRO[285].base + ((pfId) * IRO[285].m1)) -#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \ - (IRO[182].base + ((pfId) * IRO[182].m1)) -#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ - (IRO[180].base + ((funcId) * IRO[180].m1)) -#define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \ - (IRO[209].base + ((portId) * IRO[209].m1) + ((clientId) * \ - IRO[209].m2)) -#define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \ - (IRO[210].base + ((qzoneId) * IRO[210].m1)) -#define USTORM_TPA_BTR_OFFSET (IRO[207].base) -#define USTORM_TPA_BTR_SIZE (IRO[207].size) -#define USTORM_VF_TO_PF_OFFSET(funcId) \ - (IRO[179].base + ((funcId) * IRO[179].m1)) -#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base) -#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[66].base) -#define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[51].base) -#define XSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ - (IRO[50].base + ((assertListEntry) * IRO[50].m1)) -#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(portId) \ - (IRO[43].base + ((portId) * IRO[43].m1)) -#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(pfId) \ - (IRO[45].base + ((pfId) * IRO[45].m1)) -#define XSTORM_FUNC_EN_OFFSET(funcId) \ - (IRO[47].base + ((funcId) * IRO[47].m1)) -#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ - (IRO[294].base + ((pfId) * IRO[294].m1)) -#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \ - (IRO[297].base + ((pfId) * IRO[297].m1)) -#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \ - (IRO[298].base + ((pfId) * IRO[298].m1)) -#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \ - (IRO[299].base + ((pfId) * IRO[299].m1)) -#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \ - (IRO[300].base + ((pfId) * IRO[300].m1)) -#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \ - (IRO[301].base + ((pfId) * IRO[301].m1)) -#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \ - (IRO[302].base + ((pfId) * IRO[302].m1)) -#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \ - (IRO[303].base + ((pfId) * IRO[303].m1)) -#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ - (IRO[293].base + ((pfId) * IRO[293].m1)) -#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ - (IRO[292].base + ((pfId) * IRO[292].m1)) -#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ - (IRO[291].base + ((pfId) * IRO[291].m1)) -#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ - (IRO[296].base + ((pfId) * IRO[296].m1)) -#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \ - (IRO[295].base + ((pfId) * IRO[295].m1)) -#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \ - (IRO[290].base + ((pfId) * IRO[290].m1)) -#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ - (IRO[289].base + ((pfId) * IRO[289].m1)) -#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \ - (IRO[288].base + ((pfId) * IRO[288].m1)) -#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \ - (IRO[287].base + ((pfId) * IRO[287].m1)) -#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \ - (IRO[44].base + ((pfId) * IRO[44].m1)) -#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ - (IRO[49].base + ((funcId) * IRO[49].m1)) -#define XSTORM_SPQ_DATA_OFFSET(funcId) \ - (IRO[32].base + ((funcId) * IRO[32].m1)) -#define XSTORM_SPQ_DATA_SIZE (IRO[32].size) -#define XSTORM_SPQ_PAGE_BASE_OFFSET(funcId) \ - (IRO[30].base + ((funcId) * IRO[30].m1)) -#define XSTORM_SPQ_PROD_OFFSET(funcId) \ - (IRO[31].base + ((funcId) * IRO[31].m1)) -#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \ - (IRO[211].base + ((portId) * IRO[211].m1)) -#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \ - (IRO[212].base + ((portId) * IRO[212].m1)) -#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \ - (IRO[214].base + (((pfId)>>1) * IRO[214].m1) + (((pfId)&1) * \ - IRO[214].m2)) -#define XSTORM_VF_TO_PF_OFFSET(funcId) \ - (IRO[48].base + ((funcId) * IRO[48].m1)) -#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 - -/** -* This file defines HSI constants for the ETH flow -*/ -#ifdef _EVEREST_MICROCODE -#include "Microcode\Generated\DataTypes\eth_rx_bd.h" -#include "Microcode\Generated\DataTypes\eth_tx_bd.h" -#include "Microcode\Generated\DataTypes\eth_rx_cqe.h" -#include "Microcode\Generated\DataTypes\eth_rx_sge.h" -#include "Microcode\Generated\DataTypes\eth_rx_cqe_next_page.h" -#endif - - -/* Ethernet Ring parameters */ -#define X_ETH_LOCAL_RING_SIZE 13 -#define FIRST_BD_IN_PKT 0 -#define PARSE_BD_INDEX 1 -#define NUM_OF_ETH_BDS_IN_PAGE ((PAGE_SIZE)/(STRUCT_SIZE(eth_tx_bd)/8)) -#define U_ETH_NUM_OF_SGES_TO_FETCH 8 -#define U_ETH_MAX_SGES_FOR_PACKET 3 - -/* Rx ring params */ -#define U_ETH_LOCAL_BD_RING_SIZE 8 -#define U_ETH_LOCAL_SGE_RING_SIZE 10 -#define U_ETH_SGL_SIZE 8 - /* The fw will padd the buffer with this value, so the IP header \ - will be align to 4 Byte */ -#define IP_HEADER_ALIGNMENT_PADDING 2 - -#define U_ETH_SGES_PER_PAGE_INVERSE_MASK \ - (0xFFFF - ((PAGE_SIZE/((STRUCT_SIZE(eth_rx_sge))/8))-1)) - -#define TU_ETH_CQES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_cqe)/8)) -#define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8)) -#define U_ETH_SGES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8)) - -#define U_ETH_BDS_PER_PAGE_MASK (U_ETH_BDS_PER_PAGE-1) -#define U_ETH_CQE_PER_PAGE_MASK (TU_ETH_CQES_PER_PAGE-1) -#define U_ETH_SGES_PER_PAGE_MASK (U_ETH_SGES_PER_PAGE-1) - -#define U_ETH_UNDEFINED_Q 0xFF - -#define T_ETH_INDIRECTION_TABLE_SIZE 128 -#define T_ETH_RSS_KEY 10 -#define ETH_NUM_OF_RSS_ENGINES_E2 72 - -#define FILTER_RULES_COUNT 16 -#define MULTICAST_RULES_COUNT 16 -#define CLASSIFY_RULES_COUNT 16 - -/*The CRC32 seed, that is used for the hash(reduction) multicast address */ -#define ETH_CRC32_HASH_SEED 0x00000000 - -#define ETH_CRC32_HASH_BIT_SIZE (8) -#define ETH_CRC32_HASH_MASK EVAL((1< - * Written by: Vladislav Zolotarov - * Based on the original idea of John Wright . - */ - -#ifndef BNX2X_INIT_FILE_HDR_H -#define BNX2X_INIT_FILE_HDR_H - -struct bnx2x_fw_file_section { - __be32 len; - __be32 offset; -}; - -struct bnx2x_fw_file_hdr { - struct bnx2x_fw_file_section init_ops; - struct bnx2x_fw_file_section init_ops_offsets; - struct bnx2x_fw_file_section init_data; - struct bnx2x_fw_file_section tsem_int_table_data; - struct bnx2x_fw_file_section tsem_pram_data; - struct bnx2x_fw_file_section usem_int_table_data; - struct bnx2x_fw_file_section usem_pram_data; - struct bnx2x_fw_file_section csem_int_table_data; - struct bnx2x_fw_file_section csem_pram_data; - struct bnx2x_fw_file_section xsem_int_table_data; - struct bnx2x_fw_file_section xsem_pram_data; - struct bnx2x_fw_file_section iro_arr; - struct bnx2x_fw_file_section fw_version; -}; - -#endif /* BNX2X_INIT_FILE_HDR_H */ diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h deleted file mode 100644 index dc24de4..0000000 --- a/drivers/net/bnx2x/bnx2x_hsi.h +++ /dev/null @@ -1,5131 +0,0 @@ -/* bnx2x_hsi.h: Broadcom Everest network driver. - * - * Copyright (c) 2007-2011 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - */ -#ifndef BNX2X_HSI_H -#define BNX2X_HSI_H - -#include "bnx2x_fw_defs.h" - -#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e - -struct license_key { - u32 reserved[6]; - - u32 max_iscsi_conn; -#define BNX2X_MAX_ISCSI_TRGT_CONN_MASK 0xFFFF -#define BNX2X_MAX_ISCSI_TRGT_CONN_SHIFT 0 -#define BNX2X_MAX_ISCSI_INIT_CONN_MASK 0xFFFF0000 -#define BNX2X_MAX_ISCSI_INIT_CONN_SHIFT 16 - - u32 reserved_a; - - u32 max_fcoe_conn; -#define BNX2X_MAX_FCOE_TRGT_CONN_MASK 0xFFFF -#define BNX2X_MAX_FCOE_TRGT_CONN_SHIFT 0 -#define BNX2X_MAX_FCOE_INIT_CONN_MASK 0xFFFF0000 -#define BNX2X_MAX_FCOE_INIT_CONN_SHIFT 16 - - u32 reserved_b[4]; -}; - - -#define PORT_0 0 -#define PORT_1 1 -#define PORT_MAX 2 - -/**************************************************************************** - * Shared HW configuration * - ****************************************************************************/ -#define PIN_CFG_NA 0x00000000 -#define PIN_CFG_GPIO0_P0 0x00000001 -#define PIN_CFG_GPIO1_P0 0x00000002 -#define PIN_CFG_GPIO2_P0 0x00000003 -#define PIN_CFG_GPIO3_P0 0x00000004 -#define PIN_CFG_GPIO0_P1 0x00000005 -#define PIN_CFG_GPIO1_P1 0x00000006 -#define PIN_CFG_GPIO2_P1 0x00000007 -#define PIN_CFG_GPIO3_P1 0x00000008 -#define PIN_CFG_EPIO0 0x00000009 -#define PIN_CFG_EPIO1 0x0000000a -#define PIN_CFG_EPIO2 0x0000000b -#define PIN_CFG_EPIO3 0x0000000c -#define PIN_CFG_EPIO4 0x0000000d -#define PIN_CFG_EPIO5 0x0000000e -#define PIN_CFG_EPIO6 0x0000000f -#define PIN_CFG_EPIO7 0x00000010 -#define PIN_CFG_EPIO8 0x00000011 -#define PIN_CFG_EPIO9 0x00000012 -#define PIN_CFG_EPIO10 0x00000013 -#define PIN_CFG_EPIO11 0x00000014 -#define PIN_CFG_EPIO12 0x00000015 -#define PIN_CFG_EPIO13 0x00000016 -#define PIN_CFG_EPIO14 0x00000017 -#define PIN_CFG_EPIO15 0x00000018 -#define PIN_CFG_EPIO16 0x00000019 -#define PIN_CFG_EPIO17 0x0000001a -#define PIN_CFG_EPIO18 0x0000001b -#define PIN_CFG_EPIO19 0x0000001c -#define PIN_CFG_EPIO20 0x0000001d -#define PIN_CFG_EPIO21 0x0000001e -#define PIN_CFG_EPIO22 0x0000001f -#define PIN_CFG_EPIO23 0x00000020 -#define PIN_CFG_EPIO24 0x00000021 -#define PIN_CFG_EPIO25 0x00000022 -#define PIN_CFG_EPIO26 0x00000023 -#define PIN_CFG_EPIO27 0x00000024 -#define PIN_CFG_EPIO28 0x00000025 -#define PIN_CFG_EPIO29 0x00000026 -#define PIN_CFG_EPIO30 0x00000027 -#define PIN_CFG_EPIO31 0x00000028 - -/* EPIO definition */ -#define EPIO_CFG_NA 0x00000000 -#define EPIO_CFG_EPIO0 0x00000001 -#define EPIO_CFG_EPIO1 0x00000002 -#define EPIO_CFG_EPIO2 0x00000003 -#define EPIO_CFG_EPIO3 0x00000004 -#define EPIO_CFG_EPIO4 0x00000005 -#define EPIO_CFG_EPIO5 0x00000006 -#define EPIO_CFG_EPIO6 0x00000007 -#define EPIO_CFG_EPIO7 0x00000008 -#define EPIO_CFG_EPIO8 0x00000009 -#define EPIO_CFG_EPIO9 0x0000000a -#define EPIO_CFG_EPIO10 0x0000000b -#define EPIO_CFG_EPIO11 0x0000000c -#define EPIO_CFG_EPIO12 0x0000000d -#define EPIO_CFG_EPIO13 0x0000000e -#define EPIO_CFG_EPIO14 0x0000000f -#define EPIO_CFG_EPIO15 0x00000010 -#define EPIO_CFG_EPIO16 0x00000011 -#define EPIO_CFG_EPIO17 0x00000012 -#define EPIO_CFG_EPIO18 0x00000013 -#define EPIO_CFG_EPIO19 0x00000014 -#define EPIO_CFG_EPIO20 0x00000015 -#define EPIO_CFG_EPIO21 0x00000016 -#define EPIO_CFG_EPIO22 0x00000017 -#define EPIO_CFG_EPIO23 0x00000018 -#define EPIO_CFG_EPIO24 0x00000019 -#define EPIO_CFG_EPIO25 0x0000001a -#define EPIO_CFG_EPIO26 0x0000001b -#define EPIO_CFG_EPIO27 0x0000001c -#define EPIO_CFG_EPIO28 0x0000001d -#define EPIO_CFG_EPIO29 0x0000001e -#define EPIO_CFG_EPIO30 0x0000001f -#define EPIO_CFG_EPIO31 0x00000020 - - -struct shared_hw_cfg { /* NVRAM Offset */ - /* Up to 16 bytes of NULL-terminated string */ - u8 part_num[16]; /* 0x104 */ - - u32 config; /* 0x114 */ - #define SHARED_HW_CFG_MDIO_VOLTAGE_MASK 0x00000001 - #define SHARED_HW_CFG_MDIO_VOLTAGE_SHIFT 0 - #define SHARED_HW_CFG_MDIO_VOLTAGE_1_2V 0x00000000 - #define SHARED_HW_CFG_MDIO_VOLTAGE_2_5V 0x00000001 - #define SHARED_HW_CFG_MCP_RST_ON_CORE_RST_EN 0x00000002 - - #define SHARED_HW_CFG_PORT_SWAP 0x00000004 - - #define SHARED_HW_CFG_BEACON_WOL_EN 0x00000008 - - #define SHARED_HW_CFG_PCIE_GEN3_DISABLED 0x00000000 - #define SHARED_HW_CFG_PCIE_GEN3_ENABLED 0x00000010 - - #define SHARED_HW_CFG_MFW_SELECT_MASK 0x00000700 - #define SHARED_HW_CFG_MFW_SELECT_SHIFT 8 - /* Whatever MFW found in NVM - (if multiple found, priority order is: NC-SI, UMP, IPMI) */ - #define SHARED_HW_CFG_MFW_SELECT_DEFAULT 0x00000000 - #define SHARED_HW_CFG_MFW_SELECT_NC_SI 0x00000100 - #define SHARED_HW_CFG_MFW_SELECT_UMP 0x00000200 - #define SHARED_HW_CFG_MFW_SELECT_IPMI 0x00000300 - /* Use SPIO4 as an arbiter between: 0-NC_SI, 1-IPMI - (can only be used when an add-in board, not BMC, pulls-down SPIO4) */ - #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_IPMI 0x00000400 - /* Use SPIO4 as an arbiter between: 0-UMP, 1-IPMI - (can only be used when an add-in board, not BMC, pulls-down SPIO4) */ - #define SHARED_HW_CFG_MFW_SELECT_SPIO4_UMP_IPMI 0x00000500 - /* Use SPIO4 as an arbiter between: 0-NC-SI, 1-UMP - (can only be used when an add-in board, not BMC, pulls-down SPIO4) */ - #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_UMP 0x00000600 - - #define SHARED_HW_CFG_LED_MODE_MASK 0x000f0000 - #define SHARED_HW_CFG_LED_MODE_SHIFT 16 - #define SHARED_HW_CFG_LED_MAC1 0x00000000 - #define SHARED_HW_CFG_LED_PHY1 0x00010000 - #define SHARED_HW_CFG_LED_PHY2 0x00020000 - #define SHARED_HW_CFG_LED_PHY3 0x00030000 - #define SHARED_HW_CFG_LED_MAC2 0x00040000 - #define SHARED_HW_CFG_LED_PHY4 0x00050000 - #define SHARED_HW_CFG_LED_PHY5 0x00060000 - #define SHARED_HW_CFG_LED_PHY6 0x00070000 - #define SHARED_HW_CFG_LED_MAC3 0x00080000 - #define SHARED_HW_CFG_LED_PHY7 0x00090000 - #define SHARED_HW_CFG_LED_PHY9 0x000a0000 - #define SHARED_HW_CFG_LED_PHY11 0x000b0000 - #define SHARED_HW_CFG_LED_MAC4 0x000c0000 - #define SHARED_HW_CFG_LED_PHY8 0x000d0000 - #define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000 - - - #define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000 - #define SHARED_HW_CFG_AN_ENABLE_SHIFT 24 - #define SHARED_HW_CFG_AN_ENABLE_CL37 0x01000000 - #define SHARED_HW_CFG_AN_ENABLE_CL73 0x02000000 - #define SHARED_HW_CFG_AN_ENABLE_BAM 0x04000000 - #define SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION 0x08000000 - #define SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT 0x10000000 - #define SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY 0x20000000 - - #define SHARED_HW_CFG_SRIOV_MASK 0x40000000 - #define SHARED_HW_CFG_SRIOV_DISABLED 0x00000000 - #define SHARED_HW_CFG_SRIOV_ENABLED 0x40000000 - - #define SHARED_HW_CFG_ATC_MASK 0x80000000 - #define SHARED_HW_CFG_ATC_DISABLED 0x00000000 - #define SHARED_HW_CFG_ATC_ENABLED 0x80000000 - - u32 config2; /* 0x118 */ - /* one time auto detect grace period (in sec) */ - #define SHARED_HW_CFG_GRACE_PERIOD_MASK 0x000000ff - #define SHARED_HW_CFG_GRACE_PERIOD_SHIFT 0 - - #define SHARED_HW_CFG_PCIE_GEN2_ENABLED 0x00000100 - #define SHARED_HW_CFG_PCIE_GEN2_DISABLED 0x00000000 - - /* The default value for the core clock is 250MHz and it is - achieved by setting the clock change to 4 */ - #define SHARED_HW_CFG_CLOCK_CHANGE_MASK 0x00000e00 - #define SHARED_HW_CFG_CLOCK_CHANGE_SHIFT 9 - - #define SHARED_HW_CFG_SMBUS_TIMING_MASK 0x00001000 - #define SHARED_HW_CFG_SMBUS_TIMING_100KHZ 0x00000000 - #define SHARED_HW_CFG_SMBUS_TIMING_400KHZ 0x00001000 - - #define SHARED_HW_CFG_HIDE_PORT1 0x00002000 - - #define SHARED_HW_CFG_WOL_CAPABLE_MASK 0x00004000 - #define SHARED_HW_CFG_WOL_CAPABLE_DISABLED 0x00000000 - #define SHARED_HW_CFG_WOL_CAPABLE_ENABLED 0x00004000 - - /* Output low when PERST is asserted */ - #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_MASK 0x00008000 - #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_DISABLED 0x00000000 - #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_ENABLED 0x00008000 - - #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_MASK 0x00070000 - #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_SHIFT 16 - #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_HW 0x00000000 - #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_0DB 0x00010000 - #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_3_5DB 0x00020000 - #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_6_0DB 0x00030000 - - /* The fan failure mechanism is usually related to the PHY type - since the power consumption of the board is determined by the PHY. - Currently, fan is required for most designs with SFX7101, BCM8727 - and BCM8481. If a fan is not required for a board which uses one - of those PHYs, this field should be set to "Disabled". If a fan is - required for a different PHY type, this option should be set to - "Enabled". The fan failure indication is expected on SPIO5 */ - #define SHARED_HW_CFG_FAN_FAILURE_MASK 0x00180000 - #define SHARED_HW_CFG_FAN_FAILURE_SHIFT 19 - #define SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE 0x00000000 - #define SHARED_HW_CFG_FAN_FAILURE_DISABLED 0x00080000 - #define SHARED_HW_CFG_FAN_FAILURE_ENABLED 0x00100000 - - /* ASPM Power Management support */ - #define SHARED_HW_CFG_ASPM_SUPPORT_MASK 0x00600000 - #define SHARED_HW_CFG_ASPM_SUPPORT_SHIFT 21 - #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_ENABLED 0x00000000 - #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_DISABLED 0x00200000 - #define SHARED_HW_CFG_ASPM_SUPPORT_L1_DISABLED 0x00400000 - #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_DISABLED 0x00600000 - - /* The value of PM_TL_IGNORE_REQS (bit0) in PCI register - tl_control_0 (register 0x2800) */ - #define SHARED_HW_CFG_PREVENT_L1_ENTRY_MASK 0x00800000 - #define SHARED_HW_CFG_PREVENT_L1_ENTRY_DISABLED 0x00000000 - #define SHARED_HW_CFG_PREVENT_L1_ENTRY_ENABLED 0x00800000 - - #define SHARED_HW_CFG_PORT_MODE_MASK 0x01000000 - #define SHARED_HW_CFG_PORT_MODE_2 0x00000000 - #define SHARED_HW_CFG_PORT_MODE_4 0x01000000 - - #define SHARED_HW_CFG_PATH_SWAP_MASK 0x02000000 - #define SHARED_HW_CFG_PATH_SWAP_DISABLED 0x00000000 - #define SHARED_HW_CFG_PATH_SWAP_ENABLED 0x02000000 - - /* Set the MDC/MDIO access for the first external phy */ - #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK 0x1C000000 - #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT 26 - #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE 0x00000000 - #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0 0x04000000 - #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1 0x08000000 - #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH 0x0c000000 - #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED 0x10000000 - - /* Set the MDC/MDIO access for the second external phy */ - #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK 0xE0000000 - #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT 29 - #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_PHY_TYPE 0x00000000 - #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC0 0x20000000 - #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC1 0x40000000 - #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_BOTH 0x60000000 - #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SWAPPED 0x80000000 - - - u32 power_dissipated; /* 0x11c */ - #define SHARED_HW_CFG_POWER_MGNT_SCALE_MASK 0x00ff0000 - #define SHARED_HW_CFG_POWER_MGNT_SCALE_SHIFT 16 - #define SHARED_HW_CFG_POWER_MGNT_UNKNOWN_SCALE 0x00000000 - #define SHARED_HW_CFG_POWER_MGNT_DOT_1_WATT 0x00010000 - #define SHARED_HW_CFG_POWER_MGNT_DOT_01_WATT 0x00020000 - #define SHARED_HW_CFG_POWER_MGNT_DOT_001_WATT 0x00030000 - - #define SHARED_HW_CFG_POWER_DIS_CMN_MASK 0xff000000 - #define SHARED_HW_CFG_POWER_DIS_CMN_SHIFT 24 - - u32 ump_nc_si_config; /* 0x120 */ - #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MASK 0x00000003 - #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_SHIFT 0 - #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MAC 0x00000000 - #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_PHY 0x00000001 - #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MII 0x00000000 - #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_RMII 0x00000002 - - #define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_MASK 0x00000f00 - #define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_SHIFT 8 - - #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_MASK 0x00ff0000 - #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_SHIFT 16 - #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_NONE 0x00000000 - #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_BCM5221 0x00010000 - - u32 board; /* 0x124 */ - #define SHARED_HW_CFG_E3_I2C_MUX0_MASK 0x0000003F - #define SHARED_HW_CFG_E3_I2C_MUX0_SHIFT 0 - #define SHARED_HW_CFG_E3_I2C_MUX1_MASK 0x00000FC0 - #define SHARED_HW_CFG_E3_I2C_MUX1_SHIFT 6 - /* Use the PIN_CFG_XXX defines on top */ - #define SHARED_HW_CFG_BOARD_REV_MASK 0x00ff0000 - #define SHARED_HW_CFG_BOARD_REV_SHIFT 16 - - #define SHARED_HW_CFG_BOARD_MAJOR_VER_MASK 0x0f000000 - #define SHARED_HW_CFG_BOARD_MAJOR_VER_SHIFT 24 - - #define SHARED_HW_CFG_BOARD_MINOR_VER_MASK 0xf0000000 - #define SHARED_HW_CFG_BOARD_MINOR_VER_SHIFT 28 - - u32 wc_lane_config; /* 0x128 */ - #define SHARED_HW_CFG_LANE_SWAP_CFG_MASK 0x0000FFFF - #define SHARED_HW_CFG_LANE_SWAP_CFG_SHIFT 0 - #define SHARED_HW_CFG_LANE_SWAP_CFG_32103210 0x00001b1b - #define SHARED_HW_CFG_LANE_SWAP_CFG_32100123 0x00001be4 - #define SHARED_HW_CFG_LANE_SWAP_CFG_01233210 0x0000e41b - #define SHARED_HW_CFG_LANE_SWAP_CFG_01230123 0x0000e4e4 - #define SHARED_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000FF - #define SHARED_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0 - #define SHARED_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000FF00 - #define SHARED_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8 - - /* TX lane Polarity swap */ - #define SHARED_HW_CFG_TX_LANE0_POL_FLIP_ENABLED 0x00010000 - #define SHARED_HW_CFG_TX_LANE1_POL_FLIP_ENABLED 0x00020000 - #define SHARED_HW_CFG_TX_LANE2_POL_FLIP_ENABLED 0x00040000 - #define SHARED_HW_CFG_TX_LANE3_POL_FLIP_ENABLED 0x00080000 - /* TX lane Polarity swap */ - #define SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED 0x00100000 - #define SHARED_HW_CFG_RX_LANE1_POL_FLIP_ENABLED 0x00200000 - #define SHARED_HW_CFG_RX_LANE2_POL_FLIP_ENABLED 0x00400000 - #define SHARED_HW_CFG_RX_LANE3_POL_FLIP_ENABLED 0x00800000 - - /* Selects the port layout of the board */ - #define SHARED_HW_CFG_E3_PORT_LAYOUT_MASK 0x0F000000 - #define SHARED_HW_CFG_E3_PORT_LAYOUT_SHIFT 24 - #define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_01 0x00000000 - #define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_10 0x01000000 - #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_0123 0x02000000 - #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_1032 0x03000000 - #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_2301 0x04000000 - #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_3210 0x05000000 -}; - - -/**************************************************************************** - * Port HW configuration * - ****************************************************************************/ -struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ - - u32 pci_id; - #define PORT_HW_CFG_PCI_VENDOR_ID_MASK 0xffff0000 - #define PORT_HW_CFG_PCI_DEVICE_ID_MASK 0x0000ffff - - u32 pci_sub_id; - #define PORT_HW_CFG_PCI_SUBSYS_DEVICE_ID_MASK 0xffff0000 - #define PORT_HW_CFG_PCI_SUBSYS_VENDOR_ID_MASK 0x0000ffff - - u32 power_dissipated; - #define PORT_HW_CFG_POWER_DIS_D0_MASK 0x000000ff - #define PORT_HW_CFG_POWER_DIS_D0_SHIFT 0 - #define PORT_HW_CFG_POWER_DIS_D1_MASK 0x0000ff00 - #define PORT_HW_CFG_POWER_DIS_D1_SHIFT 8 - #define PORT_HW_CFG_POWER_DIS_D2_MASK 0x00ff0000 - #define PORT_HW_CFG_POWER_DIS_D2_SHIFT 16 - #define PORT_HW_CFG_POWER_DIS_D3_MASK 0xff000000 - #define PORT_HW_CFG_POWER_DIS_D3_SHIFT 24 - - u32 power_consumed; - #define PORT_HW_CFG_POWER_CONS_D0_MASK 0x000000ff - #define PORT_HW_CFG_POWER_CONS_D0_SHIFT 0 - #define PORT_HW_CFG_POWER_CONS_D1_MASK 0x0000ff00 - #define PORT_HW_CFG_POWER_CONS_D1_SHIFT 8 - #define PORT_HW_CFG_POWER_CONS_D2_MASK 0x00ff0000 - #define PORT_HW_CFG_POWER_CONS_D2_SHIFT 16 - #define PORT_HW_CFG_POWER_CONS_D3_MASK 0xff000000 - #define PORT_HW_CFG_POWER_CONS_D3_SHIFT 24 - - u32 mac_upper; - #define PORT_HW_CFG_UPPERMAC_MASK 0x0000ffff - #define PORT_HW_CFG_UPPERMAC_SHIFT 0 - u32 mac_lower; - - u32 iscsi_mac_upper; /* Upper 16 bits are always zeroes */ - u32 iscsi_mac_lower; - - u32 rdma_mac_upper; /* Upper 16 bits are always zeroes */ - u32 rdma_mac_lower; - - u32 serdes_config; - #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_MASK 0x0000ffff - #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_SHIFT 0 - - #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK 0xffff0000 - #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16 - - - /* Default values: 2P-64, 4P-32 */ - u32 pf_config; /* 0x158 */ - #define PORT_HW_CFG_PF_NUM_VF_MASK 0x0000007F - #define PORT_HW_CFG_PF_NUM_VF_SHIFT 0 - - /* Default values: 17 */ - #define PORT_HW_CFG_PF_NUM_MSIX_VECTORS_MASK 0x00007F00 - #define PORT_HW_CFG_PF_NUM_MSIX_VECTORS_SHIFT 8 - - #define PORT_HW_CFG_ENABLE_FLR_MASK 0x00010000 - #define PORT_HW_CFG_FLR_ENABLED 0x00010000 - - u32 vf_config; /* 0x15C */ - #define PORT_HW_CFG_VF_NUM_MSIX_VECTORS_MASK 0x0000007F - #define PORT_HW_CFG_VF_NUM_MSIX_VECTORS_SHIFT 0 - - #define PORT_HW_CFG_VF_PCI_DEVICE_ID_MASK 0xFFFF0000 - #define PORT_HW_CFG_VF_PCI_DEVICE_ID_SHIFT 16 - - u32 mf_pci_id; /* 0x160 */ - #define PORT_HW_CFG_MF_PCI_DEVICE_ID_MASK 0x0000FFFF - #define PORT_HW_CFG_MF_PCI_DEVICE_ID_SHIFT 0 - - /* Controls the TX laser of the SFP+ module */ - u32 sfp_ctrl; /* 0x164 */ - #define PORT_HW_CFG_TX_LASER_MASK 0x000000FF - #define PORT_HW_CFG_TX_LASER_SHIFT 0 - #define PORT_HW_CFG_TX_LASER_MDIO 0x00000000 - #define PORT_HW_CFG_TX_LASER_GPIO0 0x00000001 - #define PORT_HW_CFG_TX_LASER_GPIO1 0x00000002 - #define PORT_HW_CFG_TX_LASER_GPIO2 0x00000003 - #define PORT_HW_CFG_TX_LASER_GPIO3 0x00000004 - - /* Controls the fault module LED of the SFP+ */ - #define PORT_HW_CFG_FAULT_MODULE_LED_MASK 0x0000FF00 - #define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT 8 - #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0 0x00000000 - #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1 0x00000100 - #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2 0x00000200 - #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3 0x00000300 - #define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED 0x00000400 - - /* The output pin TX_DIS that controls the TX laser of the SFP+ - module. Use the PIN_CFG_XXX defines on top */ - u32 e3_sfp_ctrl; /* 0x168 */ - #define PORT_HW_CFG_E3_TX_LASER_MASK 0x000000FF - #define PORT_HW_CFG_E3_TX_LASER_SHIFT 0 - - /* The output pin for SFPP_TYPE which turns on the Fault module LED */ - #define PORT_HW_CFG_E3_FAULT_MDL_LED_MASK 0x0000FF00 - #define PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT 8 - - /* The input pin MOD_ABS that indicates whether SFP+ module is - present or not. Use the PIN_CFG_XXX defines on top */ - #define PORT_HW_CFG_E3_MOD_ABS_MASK 0x00FF0000 - #define PORT_HW_CFG_E3_MOD_ABS_SHIFT 16 - - /* The output pin PWRDIS_SFP_X which disable the power of the SFP+ - module. Use the PIN_CFG_XXX defines on top */ - #define PORT_HW_CFG_E3_PWR_DIS_MASK 0xFF000000 - #define PORT_HW_CFG_E3_PWR_DIS_SHIFT 24 - - /* - * The input pin which signals module transmit fault. Use the - * PIN_CFG_XXX defines on top - */ - u32 e3_cmn_pin_cfg; /* 0x16C */ - #define PORT_HW_CFG_E3_TX_FAULT_MASK 0x000000FF - #define PORT_HW_CFG_E3_TX_FAULT_SHIFT 0 - - /* The output pin which reset the PHY. Use the PIN_CFG_XXX defines on - top */ - #define PORT_HW_CFG_E3_PHY_RESET_MASK 0x0000FF00 - #define PORT_HW_CFG_E3_PHY_RESET_SHIFT 8 - - /* - * The output pin which powers down the PHY. Use the PIN_CFG_XXX - * defines on top - */ - #define PORT_HW_CFG_E3_PWR_DOWN_MASK 0x00FF0000 - #define PORT_HW_CFG_E3_PWR_DOWN_SHIFT 16 - - /* The output pin values BSC_SEL which selects the I2C for this port - in the I2C Mux */ - #define PORT_HW_CFG_E3_I2C_MUX0_MASK 0x01000000 - #define PORT_HW_CFG_E3_I2C_MUX1_MASK 0x02000000 - - - /* - * The input pin I_FAULT which indicate over-current has occurred. - * Use the PIN_CFG_XXX defines on top - */ - u32 e3_cmn_pin_cfg1; /* 0x170 */ - #define PORT_HW_CFG_E3_OVER_CURRENT_MASK 0x000000FF - #define PORT_HW_CFG_E3_OVER_CURRENT_SHIFT 0 - u32 reserved0[7]; /* 0x174 */ - - u32 aeu_int_mask; /* 0x190 */ - - u32 media_type; /* 0x194 */ - #define PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK 0x000000FF - #define PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT 0 - - #define PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK 0x0000FF00 - #define PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT 8 - - #define PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK 0x00FF0000 - #define PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT 16 - - /* 4 times 16 bits for all 4 lanes. In case external PHY is present - (not direct mode), those values will not take effect on the 4 XGXS - lanes. For some external PHYs (such as 8706 and 8726) the values - will be used to configure the external PHY in those cases, not - all 4 values are needed. */ - u16 xgxs_config_rx[4]; /* 0x198 */ - u16 xgxs_config_tx[4]; /* 0x1A0 */ - - /* For storing FCOE mac on shared memory */ - u32 fcoe_fip_mac_upper; - #define PORT_HW_CFG_FCOE_UPPERMAC_MASK 0x0000ffff - #define PORT_HW_CFG_FCOE_UPPERMAC_SHIFT 0 - u32 fcoe_fip_mac_lower; - - u32 fcoe_wwn_port_name_upper; - u32 fcoe_wwn_port_name_lower; - - u32 fcoe_wwn_node_name_upper; - u32 fcoe_wwn_node_name_lower; - - u32 Reserved1[49]; /* 0x1C0 */ - - /* Enable RJ45 magjack pair swapping on 10GBase-T PHY (0=default), - 84833 only */ - u32 xgbt_phy_cfg; /* 0x284 */ - #define PORT_HW_CFG_RJ45_PAIR_SWAP_MASK 0x000000FF - #define PORT_HW_CFG_RJ45_PAIR_SWAP_SHIFT 0 - - u32 default_cfg; /* 0x288 */ - #define PORT_HW_CFG_GPIO0_CONFIG_MASK 0x00000003 - #define PORT_HW_CFG_GPIO0_CONFIG_SHIFT 0 - #define PORT_HW_CFG_GPIO0_CONFIG_NA 0x00000000 - #define PORT_HW_CFG_GPIO0_CONFIG_LOW 0x00000001 - #define PORT_HW_CFG_GPIO0_CONFIG_HIGH 0x00000002 - #define PORT_HW_CFG_GPIO0_CONFIG_INPUT 0x00000003 - - #define PORT_HW_CFG_GPIO1_CONFIG_MASK 0x0000000C - #define PORT_HW_CFG_GPIO1_CONFIG_SHIFT 2 - #define PORT_HW_CFG_GPIO1_CONFIG_NA 0x00000000 - #define PORT_HW_CFG_GPIO1_CONFIG_LOW 0x00000004 - #define PORT_HW_CFG_GPIO1_CONFIG_HIGH 0x00000008 - #define PORT_HW_CFG_GPIO1_CONFIG_INPUT 0x0000000c - - #define PORT_HW_CFG_GPIO2_CONFIG_MASK 0x00000030 - #define PORT_HW_CFG_GPIO2_CONFIG_SHIFT 4 - #define PORT_HW_CFG_GPIO2_CONFIG_NA 0x00000000 - #define PORT_HW_CFG_GPIO2_CONFIG_LOW 0x00000010 - #define PORT_HW_CFG_GPIO2_CONFIG_HIGH 0x00000020 - #define PORT_HW_CFG_GPIO2_CONFIG_INPUT 0x00000030 - - #define PORT_HW_CFG_GPIO3_CONFIG_MASK 0x000000C0 - #define PORT_HW_CFG_GPIO3_CONFIG_SHIFT 6 - #define PORT_HW_CFG_GPIO3_CONFIG_NA 0x00000000 - #define PORT_HW_CFG_GPIO3_CONFIG_LOW 0x00000040 - #define PORT_HW_CFG_GPIO3_CONFIG_HIGH 0x00000080 - #define PORT_HW_CFG_GPIO3_CONFIG_INPUT 0x000000c0 - - /* When KR link is required to be set to force which is not - KR-compliant, this parameter determine what is the trigger for it. - When GPIO is selected, low input will force the speed. Currently - default speed is 1G. In the future, it may be widen to select the - forced speed in with another parameter. Note when force-1G is - enabled, it override option 56: Link Speed option. */ - #define PORT_HW_CFG_FORCE_KR_ENABLER_MASK 0x00000F00 - #define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT 8 - #define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED 0x00000000 - #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0 0x00000100 - #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0 0x00000200 - #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0 0x00000300 - #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0 0x00000400 - #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1 0x00000500 - #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1 0x00000600 - #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1 0x00000700 - #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1 0x00000800 - #define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED 0x00000900 - /* Enable to determine with which GPIO to reset the external phy */ - #define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK 0x000F0000 - #define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT 16 - #define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE 0x00000000 - #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0 0x00010000 - #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0 0x00020000 - #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0 0x00030000 - #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0 0x00040000 - #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1 0x00050000 - #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1 0x00060000 - #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1 0x00070000 - #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1 0x00080000 - - /* Enable BAM on KR */ - #define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000 - #define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20 - #define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000 - #define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000 - - /* Enable Common Mode Sense */ - #define PORT_HW_CFG_ENABLE_CMS_MASK 0x00200000 - #define PORT_HW_CFG_ENABLE_CMS_SHIFT 21 - #define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000 - #define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000 - - /* Enable RJ45 magjack pair swapping on 10GBase-T PHY, 84833 only */ - #define PORT_HW_CFG_RJ45_PR_SWP_MASK 0x00400000 - #define PORT_HW_CFG_RJ45_PR_SWP_SHIFT 22 - #define PORT_HW_CFG_RJ45_PR_SWP_DISABLED 0x00000000 - #define PORT_HW_CFG_RJ45_PR_SWP_ENABLED 0x00400000 - - /* Determine the Serdes electrical interface */ - #define PORT_HW_CFG_NET_SERDES_IF_MASK 0x0F000000 - #define PORT_HW_CFG_NET_SERDES_IF_SHIFT 24 - #define PORT_HW_CFG_NET_SERDES_IF_SGMII 0x00000000 - #define PORT_HW_CFG_NET_SERDES_IF_XFI 0x01000000 - #define PORT_HW_CFG_NET_SERDES_IF_SFI 0x02000000 - #define PORT_HW_CFG_NET_SERDES_IF_KR 0x03000000 - #define PORT_HW_CFG_NET_SERDES_IF_DXGXS 0x04000000 - #define PORT_HW_CFG_NET_SERDES_IF_KR2 0x05000000 - - - u32 speed_capability_mask2; /* 0x28C */ - #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF - #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0 - #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10M_FULL 0x00000001 - #define PORT_HW_CFG_SPEED_CAPABILITY2_D3__ 0x00000002 - #define PORT_HW_CFG_SPEED_CAPABILITY2_D3___ 0x00000004 - #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_100M_FULL 0x00000008 - #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_1G 0x00000010 - #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_2_DOT_5G 0x00000020 - #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10G 0x00000040 - #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_20G 0x00000080 - - #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_MASK 0xFFFF0000 - #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_SHIFT 16 - #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10M_FULL 0x00010000 - #define PORT_HW_CFG_SPEED_CAPABILITY2_D0__ 0x00020000 - #define PORT_HW_CFG_SPEED_CAPABILITY2_D0___ 0x00040000 - #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_100M_FULL 0x00080000 - #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_1G 0x00100000 - #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_2_DOT_5G 0x00200000 - #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10G 0x00400000 - #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_20G 0x00800000 - - - /* In the case where two media types (e.g. copper and fiber) are - present and electrically active at the same time, PHY Selection - will determine which of the two PHYs will be designated as the - Active PHY and used for a connection to the network. */ - u32 multi_phy_config; /* 0x290 */ - #define PORT_HW_CFG_PHY_SELECTION_MASK 0x00000007 - #define PORT_HW_CFG_PHY_SELECTION_SHIFT 0 - #define PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT 0x00000000 - #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY 0x00000001 - #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY 0x00000002 - #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY 0x00000003 - #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY 0x00000004 - - /* When enabled, all second phy nvram parameters will be swapped - with the first phy parameters */ - #define PORT_HW_CFG_PHY_SWAPPED_MASK 0x00000008 - #define PORT_HW_CFG_PHY_SWAPPED_SHIFT 3 - #define PORT_HW_CFG_PHY_SWAPPED_DISABLED 0x00000000 - #define PORT_HW_CFG_PHY_SWAPPED_ENABLED 0x00000008 - - - /* Address of the second external phy */ - u32 external_phy_config2; /* 0x294 */ - #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_MASK 0x000000FF - #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_SHIFT 0 - - /* The second XGXS external PHY type */ - #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_MASK 0x0000FF00 - #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SHIFT 8 - #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_DIRECT 0x00000000 - #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8071 0x00000100 - #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8072 0x00000200 - #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8073 0x00000300 - #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8705 0x00000400 - #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8706 0x00000500 - #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8726 0x00000600 - #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8481 0x00000700 - #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SFX7101 0x00000800 - #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727 0x00000900 - #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727_NOC 0x00000a00 - #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84823 0x00000b00 - #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54640 0x00000c00 - #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84833 0x00000d00 - #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54618SE 0x00000e00 - #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8722 0x00000f00 - #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00 - #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00 - - - /* 4 times 16 bits for all 4 lanes. For some external PHYs (such as - 8706, 8726 and 8727) not all 4 values are needed. */ - u16 xgxs_config2_rx[4]; /* 0x296 */ - u16 xgxs_config2_tx[4]; /* 0x2A0 */ - - u32 lane_config; - #define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000ffff - #define PORT_HW_CFG_LANE_SWAP_CFG_SHIFT 0 - /* AN and forced */ - #define PORT_HW_CFG_LANE_SWAP_CFG_01230123 0x00001b1b - /* forced only */ - #define PORT_HW_CFG_LANE_SWAP_CFG_01233210 0x00001be4 - /* forced only */ - #define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8 - /* forced only */ - #define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4 - #define PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000ff - #define PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0 - #define PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000ff00 - #define PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8 - #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK 0x0000c000 - #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT 14 - - /* Indicate whether to swap the external phy polarity */ - #define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK 0x00010000 - #define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED 0x00000000 - #define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED 0x00010000 - - - u32 external_phy_config; - #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK 0x000000ff - #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT 0 - - #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK 0x0000ff00 - #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SHIFT 8 - #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT 0x00000000 - #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8071 0x00000100 - #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072 0x00000200 - #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073 0x00000300 - #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705 0x00000400 - #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706 0x00000500 - #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726 0x00000600 - #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481 0x00000700 - #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101 0x00000800 - #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900 - #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00 - #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00 - #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54640 0x00000c00 - #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833 0x00000d00 - #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE 0x00000e00 - #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722 0x00000f00 - #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC 0x0000fc00 - #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00 - #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00 - - #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK 0x00ff0000 - #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT 16 - - #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000 - #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_SHIFT 24 - #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT 0x00000000 - #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482 0x01000000 - #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD 0x02000000 - #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN 0xff000000 - - u32 speed_capability_mask; - #define PORT_HW_CFG_SPEED_CAPABILITY_D3_MASK 0x0000ffff - #define PORT_HW_CFG_SPEED_CAPABILITY_D3_SHIFT 0 - #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_FULL 0x00000001 - #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_HALF 0x00000002 - #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_HALF 0x00000004 - #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_FULL 0x00000008 - #define PORT_HW_CFG_SPEED_CAPABILITY_D3_1G 0x00000010 - #define PORT_HW_CFG_SPEED_CAPABILITY_D3_2_5G 0x00000020 - #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10G 0x00000040 - #define PORT_HW_CFG_SPEED_CAPABILITY_D3_20G 0x00000080 - #define PORT_HW_CFG_SPEED_CAPABILITY_D3_RESERVED 0x0000f000 - - #define PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK 0xffff0000 - #define PORT_HW_CFG_SPEED_CAPABILITY_D0_SHIFT 16 - #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL 0x00010000 - #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF 0x00020000 - #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF 0x00040000 - #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL 0x00080000 - #define PORT_HW_CFG_SPEED_CAPABILITY_D0_1G 0x00100000 - #define PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G 0x00200000 - #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10G 0x00400000 - #define PORT_HW_CFG_SPEED_CAPABILITY_D0_20G 0x00800000 - #define PORT_HW_CFG_SPEED_CAPABILITY_D0_RESERVED 0xf0000000 - - /* A place to hold the original MAC address as a backup */ - u32 backup_mac_upper; /* 0x2B4 */ - u32 backup_mac_lower; /* 0x2B8 */ - -}; - - -/**************************************************************************** - * Shared Feature configuration * - ****************************************************************************/ -struct shared_feat_cfg { /* NVRAM Offset */ - - u32 config; /* 0x450 */ - #define SHARED_FEATURE_BMC_ECHO_MODE_EN 0x00000001 - - /* Use NVRAM values instead of HW default values */ - #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_MASK \ - 0x00000002 - #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED \ - 0x00000000 - #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED \ - 0x00000002 - - #define SHARED_FEAT_CFG_NCSI_ID_METHOD_MASK 0x00000008 - #define SHARED_FEAT_CFG_NCSI_ID_METHOD_SPIO 0x00000000 - #define SHARED_FEAT_CFG_NCSI_ID_METHOD_NVRAM 0x00000008 - - #define SHARED_FEAT_CFG_NCSI_ID_MASK 0x00000030 - #define SHARED_FEAT_CFG_NCSI_ID_SHIFT 4 - - /* Override the OTP back to single function mode. When using GPIO, - high means only SF, 0 is according to CLP configuration */ - #define SHARED_FEAT_CFG_FORCE_SF_MODE_MASK 0x00000700 - #define SHARED_FEAT_CFG_FORCE_SF_MODE_SHIFT 8 - #define SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED 0x00000000 - #define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100 - #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200 - #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300 - - /* The interval in seconds between sending LLDP packets. Set to zero - to disable the feature */ - #define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_MASK 0x00ff0000 - #define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_SHIFT 16 - - /* The assigned device type ID for LLDP usage */ - #define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_MASK 0xff000000 - #define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_SHIFT 24 - -}; - - -/**************************************************************************** - * Port Feature configuration * - ****************************************************************************/ -struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */ - - u32 config; - #define PORT_FEATURE_BAR1_SIZE_MASK 0x0000000f - #define PORT_FEATURE_BAR1_SIZE_SHIFT 0 - #define PORT_FEATURE_BAR1_SIZE_DISABLED 0x00000000 - #define PORT_FEATURE_BAR1_SIZE_64K 0x00000001 - #define PORT_FEATURE_BAR1_SIZE_128K 0x00000002 - #define PORT_FEATURE_BAR1_SIZE_256K 0x00000003 - #define PORT_FEATURE_BAR1_SIZE_512K 0x00000004 - #define PORT_FEATURE_BAR1_SIZE_1M 0x00000005 - #define PORT_FEATURE_BAR1_SIZE_2M 0x00000006 - #define PORT_FEATURE_BAR1_SIZE_4M 0x00000007 - #define PORT_FEATURE_BAR1_SIZE_8M 0x00000008 - #define PORT_FEATURE_BAR1_SIZE_16M 0x00000009 - #define PORT_FEATURE_BAR1_SIZE_32M 0x0000000a - #define PORT_FEATURE_BAR1_SIZE_64M 0x0000000b - #define PORT_FEATURE_BAR1_SIZE_128M 0x0000000c - #define PORT_FEATURE_BAR1_SIZE_256M 0x0000000d - #define PORT_FEATURE_BAR1_SIZE_512M 0x0000000e - #define PORT_FEATURE_BAR1_SIZE_1G 0x0000000f - #define PORT_FEATURE_BAR2_SIZE_MASK 0x000000f0 - #define PORT_FEATURE_BAR2_SIZE_SHIFT 4 - #define PORT_FEATURE_BAR2_SIZE_DISABLED 0x00000000 - #define PORT_FEATURE_BAR2_SIZE_64K 0x00000010 - #define PORT_FEATURE_BAR2_SIZE_128K 0x00000020 - #define PORT_FEATURE_BAR2_SIZE_256K 0x00000030 - #define PORT_FEATURE_BAR2_SIZE_512K 0x00000040 - #define PORT_FEATURE_BAR2_SIZE_1M 0x00000050 - #define PORT_FEATURE_BAR2_SIZE_2M 0x00000060 - #define PORT_FEATURE_BAR2_SIZE_4M 0x00000070 - #define PORT_FEATURE_BAR2_SIZE_8M 0x00000080 - #define PORT_FEATURE_BAR2_SIZE_16M 0x00000090 - #define PORT_FEATURE_BAR2_SIZE_32M 0x000000a0 - #define PORT_FEATURE_BAR2_SIZE_64M 0x000000b0 - #define PORT_FEATURE_BAR2_SIZE_128M 0x000000c0 - #define PORT_FEATURE_BAR2_SIZE_256M 0x000000d0 - #define PORT_FEATURE_BAR2_SIZE_512M 0x000000e0 - #define PORT_FEATURE_BAR2_SIZE_1G 0x000000f0 - - #define PORT_FEAT_CFG_DCBX_MASK 0x00000100 - #define PORT_FEAT_CFG_DCBX_DISABLED 0x00000000 - #define PORT_FEAT_CFG_DCBX_ENABLED 0x00000100 - - #define PORT_FEAT_CFG_AUTOGREEN_MASK 0x00000200 - #define PORT_FEAT_CFG_AUTOGREEN_SHIFT 9 - #define PORT_FEAT_CFG_AUTOGREEN_DISABLED 0x00000000 - #define PORT_FEAT_CFG_AUTOGREEN_ENABLED 0x00000200 - - #define PORT_FEATURE_EN_SIZE_MASK 0x0f000000 - #define PORT_FEATURE_EN_SIZE_SHIFT 24 - #define PORT_FEATURE_WOL_ENABLED 0x01000000 - #define PORT_FEATURE_MBA_ENABLED 0x02000000 - #define PORT_FEATURE_MFW_ENABLED 0x04000000 - - /* Advertise expansion ROM even if MBA is disabled */ - #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_MASK 0x08000000 - #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_DISABLED 0x00000000 - #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_ENABLED 0x08000000 - - /* Check the optic vendor via i2c against a list of approved modules - in a separate nvram image */ - #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK 0xe0000000 - #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_SHIFT 29 - #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT \ - 0x00000000 - #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER \ - 0x20000000 - #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG 0x40000000 - #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN 0x60000000 - - u32 wol_config; - /* Default is used when driver sets to "auto" mode */ - #define PORT_FEATURE_WOL_DEFAULT_MASK 0x00000003 - #define PORT_FEATURE_WOL_DEFAULT_SHIFT 0 - #define PORT_FEATURE_WOL_DEFAULT_DISABLE 0x00000000 - #define PORT_FEATURE_WOL_DEFAULT_MAGIC 0x00000001 - #define PORT_FEATURE_WOL_DEFAULT_ACPI 0x00000002 - #define PORT_FEATURE_WOL_DEFAULT_MAGIC_AND_ACPI 0x00000003 - #define PORT_FEATURE_WOL_RES_PAUSE_CAP 0x00000004 - #define PORT_FEATURE_WOL_RES_ASYM_PAUSE_CAP 0x00000008 - #define PORT_FEATURE_WOL_ACPI_UPON_MGMT 0x00000010 - - u32 mba_config; - #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK 0x00000007 - #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_SHIFT 0 - #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE 0x00000000 - #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_RPL 0x00000001 - #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_BOOTP 0x00000002 - #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB 0x00000003 - #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT 0x00000004 - #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE 0x00000007 - - #define PORT_FEATURE_MBA_BOOT_RETRY_MASK 0x00000038 - #define PORT_FEATURE_MBA_BOOT_RETRY_SHIFT 3 - - #define PORT_FEATURE_MBA_RES_PAUSE_CAP 0x00000100 - #define PORT_FEATURE_MBA_RES_ASYM_PAUSE_CAP 0x00000200 - #define PORT_FEATURE_MBA_SETUP_PROMPT_ENABLE 0x00000400 - #define PORT_FEATURE_MBA_HOTKEY_MASK 0x00000800 - #define PORT_FEATURE_MBA_HOTKEY_CTRL_S 0x00000000 - #define PORT_FEATURE_MBA_HOTKEY_CTRL_B 0x00000800 - #define PORT_FEATURE_MBA_EXP_ROM_SIZE_MASK 0x000ff000 - #define PORT_FEATURE_MBA_EXP_ROM_SIZE_SHIFT 12 - #define PORT_FEATURE_MBA_EXP_ROM_SIZE_DISABLED 0x00000000 - #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2K 0x00001000 - #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4K 0x00002000 - #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8K 0x00003000 - #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16K 0x00004000 - #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32K 0x00005000 - #define PORT_FEATURE_MBA_EXP_ROM_SIZE_64K 0x00006000 - #define PORT_FEATURE_MBA_EXP_ROM_SIZE_128K 0x00007000 - #define PORT_FEATURE_MBA_EXP_ROM_SIZE_256K 0x00008000 - #define PORT_FEATURE_MBA_EXP_ROM_SIZE_512K 0x00009000 - #define PORT_FEATURE_MBA_EXP_ROM_SIZE_1M 0x0000a000 - #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2M 0x0000b000 - #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4M 0x0000c000 - #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8M 0x0000d000 - #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16M 0x0000e000 - #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32M 0x0000f000 - #define PORT_FEATURE_MBA_MSG_TIMEOUT_MASK 0x00f00000 - #define PORT_FEATURE_MBA_MSG_TIMEOUT_SHIFT 20 - #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_MASK 0x03000000 - #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_SHIFT 24 - #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_AUTO 0x00000000 - #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_BBS 0x01000000 - #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT18H 0x02000000 - #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT19H 0x03000000 - #define PORT_FEATURE_MBA_LINK_SPEED_MASK 0x3c000000 - #define PORT_FEATURE_MBA_LINK_SPEED_SHIFT 26 - #define PORT_FEATURE_MBA_LINK_SPEED_AUTO 0x00000000 - #define PORT_FEATURE_MBA_LINK_SPEED_10HD 0x04000000 - #define PORT_FEATURE_MBA_LINK_SPEED_10FD 0x08000000 - #define PORT_FEATURE_MBA_LINK_SPEED_100HD 0x0c000000 - #define PORT_FEATURE_MBA_LINK_SPEED_100FD 0x10000000 - #define PORT_FEATURE_MBA_LINK_SPEED_1GBPS 0x14000000 - #define PORT_FEATURE_MBA_LINK_SPEED_2_5GBPS 0x18000000 - #define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_CX4 0x1c000000 - #define PORT_FEATURE_MBA_LINK_SPEED_20GBPS 0x20000000 - u32 bmc_config; - #define PORT_FEATURE_BMC_LINK_OVERRIDE_MASK 0x00000001 - #define PORT_FEATURE_BMC_LINK_OVERRIDE_DEFAULT 0x00000000 - #define PORT_FEATURE_BMC_LINK_OVERRIDE_EN 0x00000001 - - u32 mba_vlan_cfg; - #define PORT_FEATURE_MBA_VLAN_TAG_MASK 0x0000ffff - #define PORT_FEATURE_MBA_VLAN_TAG_SHIFT 0 - #define PORT_FEATURE_MBA_VLAN_EN 0x00010000 - - u32 resource_cfg; - #define PORT_FEATURE_RESOURCE_CFG_VALID 0x00000001 - #define PORT_FEATURE_RESOURCE_CFG_DIAG 0x00000002 - #define PORT_FEATURE_RESOURCE_CFG_L2 0x00000004 - #define PORT_FEATURE_RESOURCE_CFG_ISCSI 0x00000008 - #define PORT_FEATURE_RESOURCE_CFG_RDMA 0x00000010 - - u32 smbus_config; - #define PORT_FEATURE_SMBUS_ADDR_MASK 0x000000fe - #define PORT_FEATURE_SMBUS_ADDR_SHIFT 1 - - u32 vf_config; - #define PORT_FEAT_CFG_VF_BAR2_SIZE_MASK 0x0000000f - #define PORT_FEAT_CFG_VF_BAR2_SIZE_SHIFT 0 - #define PORT_FEAT_CFG_VF_BAR2_SIZE_DISABLED 0x00000000 - #define PORT_FEAT_CFG_VF_BAR2_SIZE_4K 0x00000001 - #define PORT_FEAT_CFG_VF_BAR2_SIZE_8K 0x00000002 - #define PORT_FEAT_CFG_VF_BAR2_SIZE_16K 0x00000003 - #define PORT_FEAT_CFG_VF_BAR2_SIZE_32K 0x00000004 - #define PORT_FEAT_CFG_VF_BAR2_SIZE_64K 0x00000005 - #define PORT_FEAT_CFG_VF_BAR2_SIZE_128K 0x00000006 - #define PORT_FEAT_CFG_VF_BAR2_SIZE_256K 0x00000007 - #define PORT_FEAT_CFG_VF_BAR2_SIZE_512K 0x00000008 - #define PORT_FEAT_CFG_VF_BAR2_SIZE_1M 0x00000009 - #define PORT_FEAT_CFG_VF_BAR2_SIZE_2M 0x0000000a - #define PORT_FEAT_CFG_VF_BAR2_SIZE_4M 0x0000000b - #define PORT_FEAT_CFG_VF_BAR2_SIZE_8M 0x0000000c - #define PORT_FEAT_CFG_VF_BAR2_SIZE_16M 0x0000000d - #define PORT_FEAT_CFG_VF_BAR2_SIZE_32M 0x0000000e - #define PORT_FEAT_CFG_VF_BAR2_SIZE_64M 0x0000000f - - u32 link_config; /* Used as HW defaults for the driver */ - #define PORT_FEATURE_CONNECTED_SWITCH_MASK 0x03000000 - #define PORT_FEATURE_CONNECTED_SWITCH_SHIFT 24 - /* (forced) low speed switch (< 10G) */ - #define PORT_FEATURE_CON_SWITCH_1G_SWITCH 0x00000000 - /* (forced) high speed switch (>= 10G) */ - #define PORT_FEATURE_CON_SWITCH_10G_SWITCH 0x01000000 - #define PORT_FEATURE_CON_SWITCH_AUTO_DETECT 0x02000000 - #define PORT_FEATURE_CON_SWITCH_ONE_TIME_DETECT 0x03000000 - - #define PORT_FEATURE_LINK_SPEED_MASK 0x000f0000 - #define PORT_FEATURE_LINK_SPEED_SHIFT 16 - #define PORT_FEATURE_LINK_SPEED_AUTO 0x00000000 - #define PORT_FEATURE_LINK_SPEED_10M_FULL 0x00010000 - #define PORT_FEATURE_LINK_SPEED_10M_HALF 0x00020000 - #define PORT_FEATURE_LINK_SPEED_100M_HALF 0x00030000 - #define PORT_FEATURE_LINK_SPEED_100M_FULL 0x00040000 - #define PORT_FEATURE_LINK_SPEED_1G 0x00050000 - #define PORT_FEATURE_LINK_SPEED_2_5G 0x00060000 - #define PORT_FEATURE_LINK_SPEED_10G_CX4 0x00070000 - #define PORT_FEATURE_LINK_SPEED_20G 0x00080000 - - #define PORT_FEATURE_FLOW_CONTROL_MASK 0x00000700 - #define PORT_FEATURE_FLOW_CONTROL_SHIFT 8 - #define PORT_FEATURE_FLOW_CONTROL_AUTO 0x00000000 - #define PORT_FEATURE_FLOW_CONTROL_TX 0x00000100 - #define PORT_FEATURE_FLOW_CONTROL_RX 0x00000200 - #define PORT_FEATURE_FLOW_CONTROL_BOTH 0x00000300 - #define PORT_FEATURE_FLOW_CONTROL_NONE 0x00000400 - - /* The default for MCP link configuration, - uses the same defines as link_config */ - u32 mfw_wol_link_cfg; - - /* The default for the driver of the second external phy, - uses the same defines as link_config */ - u32 link_config2; /* 0x47C */ - - /* The default for MCP of the second external phy, - uses the same defines as link_config */ - u32 mfw_wol_link_cfg2; /* 0x480 */ - - u32 Reserved2[17]; /* 0x484 */ - -}; - - -/**************************************************************************** - * Device Information * - ****************************************************************************/ -struct shm_dev_info { /* size */ - - u32 bc_rev; /* 8 bits each: major, minor, build */ /* 4 */ - - struct shared_hw_cfg shared_hw_config; /* 40 */ - - struct port_hw_cfg port_hw_config[PORT_MAX]; /* 400*2=800 */ - - struct shared_feat_cfg shared_feature_config; /* 4 */ - - struct port_feat_cfg port_feature_config[PORT_MAX];/* 116*2=232 */ - -}; - - -#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN) - #error "Missing either LITTLE_ENDIAN or BIG_ENDIAN definition." -#endif - -#define FUNC_0 0 -#define FUNC_1 1 -#define FUNC_2 2 -#define FUNC_3 3 -#define FUNC_4 4 -#define FUNC_5 5 -#define FUNC_6 6 -#define FUNC_7 7 -#define E1_FUNC_MAX 2 -#define E1H_FUNC_MAX 8 -#define E2_FUNC_MAX 4 /* per path */ - -#define VN_0 0 -#define VN_1 1 -#define VN_2 2 -#define VN_3 3 -#define E1VN_MAX 1 -#define E1HVN_MAX 4 - -#define E2_VF_MAX 64 /* HC_REG_VF_CONFIGURATION_SIZE */ -/* This value (in milliseconds) determines the frequency of the driver - * issuing the PULSE message code. The firmware monitors this periodic - * pulse to determine when to switch to an OS-absent mode. */ -#define DRV_PULSE_PERIOD_MS 250 - -/* This value (in milliseconds) determines how long the driver should - * wait for an acknowledgement from the firmware before timing out. Once - * the firmware has timed out, the driver will assume there is no firmware - * running and there won't be any firmware-driver synchronization during a - * driver reset. */ -#define FW_ACK_TIME_OUT_MS 5000 - -#define FW_ACK_POLL_TIME_MS 1 - -#define FW_ACK_NUM_OF_POLL (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS) - -/* LED Blink rate that will achieve ~15.9Hz */ -#define LED_BLINK_RATE_VAL 480 - -/**************************************************************************** - * Driver <-> FW Mailbox * - ****************************************************************************/ -struct drv_port_mb { - - u32 link_status; - /* Driver should update this field on any link change event */ - - #define LINK_STATUS_LINK_FLAG_MASK 0x00000001 - #define LINK_STATUS_LINK_UP 0x00000001 - #define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E - #define LINK_STATUS_SPEED_AND_DUPLEX_AN_NOT_COMPLETE (0<<1) - #define LINK_STATUS_SPEED_AND_DUPLEX_10THD (1<<1) - #define LINK_STATUS_SPEED_AND_DUPLEX_10TFD (2<<1) - #define LINK_STATUS_SPEED_AND_DUPLEX_100TXHD (3<<1) - #define LINK_STATUS_SPEED_AND_DUPLEX_100T4 (4<<1) - #define LINK_STATUS_SPEED_AND_DUPLEX_100TXFD (5<<1) - #define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (6<<1) - #define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (7<<1) - #define LINK_STATUS_SPEED_AND_DUPLEX_1000XFD (7<<1) - #define LINK_STATUS_SPEED_AND_DUPLEX_2500THD (8<<1) - #define LINK_STATUS_SPEED_AND_DUPLEX_2500TFD (9<<1) - #define LINK_STATUS_SPEED_AND_DUPLEX_2500XFD (9<<1) - #define LINK_STATUS_SPEED_AND_DUPLEX_10GTFD (10<<1) - #define LINK_STATUS_SPEED_AND_DUPLEX_10GXFD (10<<1) - #define LINK_STATUS_SPEED_AND_DUPLEX_20GTFD (11<<1) - #define LINK_STATUS_SPEED_AND_DUPLEX_20GXFD (11<<1) - - #define LINK_STATUS_AUTO_NEGOTIATE_FLAG_MASK 0x00000020 - #define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 - - #define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 - #define LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK 0x00000080 - #define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 - - #define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 - #define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 - #define LINK_STATUS_LINK_PARTNER_100T4_CAPABLE 0x00000800 - #define LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE 0x00001000 - #define LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE 0x00002000 - #define LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE 0x00004000 - #define LINK_STATUS_LINK_PARTNER_10THD_CAPABLE 0x00008000 - - #define LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK 0x00010000 - #define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00010000 - - #define LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK 0x00020000 - #define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00020000 - - #define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000 - #define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0<<18) - #define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1<<18) - #define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2<<18) - #define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3<<18) - - #define LINK_STATUS_SERDES_LINK 0x00100000 - - #define LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE 0x00200000 - #define LINK_STATUS_LINK_PARTNER_2500XHD_CAPABLE 0x00400000 - #define LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE 0x00800000 - #define LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE 0x10000000 - - #define LINK_STATUS_PFC_ENABLED 0x20000000 - - #define LINK_STATUS_PHYSICAL_LINK_FLAG 0x40000000 - - u32 port_stx; - - u32 stat_nig_timer; - - /* MCP firmware does not use this field */ - u32 ext_phy_fw_version; - -}; - - -struct drv_func_mb { - - u32 drv_mb_header; - #define DRV_MSG_CODE_MASK 0xffff0000 - #define DRV_MSG_CODE_LOAD_REQ 0x10000000 - #define DRV_MSG_CODE_LOAD_DONE 0x11000000 - #define DRV_MSG_CODE_UNLOAD_REQ_WOL_EN 0x20000000 - #define DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS 0x20010000 - #define DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP 0x20020000 - #define DRV_MSG_CODE_UNLOAD_DONE 0x21000000 - #define DRV_MSG_CODE_DCC_OK 0x30000000 - #define DRV_MSG_CODE_DCC_FAILURE 0x31000000 - #define DRV_MSG_CODE_DIAG_ENTER_REQ 0x50000000 - #define DRV_MSG_CODE_DIAG_EXIT_REQ 0x60000000 - #define DRV_MSG_CODE_VALIDATE_KEY 0x70000000 - #define DRV_MSG_CODE_GET_CURR_KEY 0x80000000 - #define DRV_MSG_CODE_GET_UPGRADE_KEY 0x81000000 - #define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000 - #define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000 - /* - * The optic module verification command requires bootcode - * v5.0.6 or later, te specific optic module verification command - * requires bootcode v5.2.12 or later - */ - #define DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL 0xa0000000 - #define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006 - #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000 - #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234 - #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014 - - #define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000 - #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000 - - #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 - - #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 - #define REQ_BC_VER_4_SET_MF_BW 0x00060202 - #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000 - - #define DRV_MSG_CODE_LINK_STATUS_CHANGED 0x01000000 - - #define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000 - #define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000 - #define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000 - #define BIOS_MSG_CODE_VIRT_MAC_ISCSI 0xff040000 - - #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff - - u32 drv_mb_param; - #define DRV_MSG_CODE_SET_MF_BW_MIN_MASK 0x00ff0000 - #define DRV_MSG_CODE_SET_MF_BW_MAX_MASK 0xff000000 - - u32 fw_mb_header; - #define FW_MSG_CODE_MASK 0xffff0000 - #define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000 - #define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000 - #define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000 - /* Load common chip is supported from bc 6.0.0 */ - #define REQ_BC_VER_4_DRV_LOAD_COMMON_CHIP 0x00060000 - #define FW_MSG_CODE_DRV_LOAD_COMMON_CHIP 0x10130000 - - #define FW_MSG_CODE_DRV_LOAD_REFUSED 0x10200000 - #define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000 - #define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x20100000 - #define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20110000 - #define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20120000 - #define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000 - #define FW_MSG_CODE_DCC_DONE 0x30100000 - #define FW_MSG_CODE_LLDP_DONE 0x40100000 - #define FW_MSG_CODE_DIAG_ENTER_DONE 0x50100000 - #define FW_MSG_CODE_DIAG_REFUSE 0x50200000 - #define FW_MSG_CODE_DIAG_EXIT_DONE 0x60100000 - #define FW_MSG_CODE_VALIDATE_KEY_SUCCESS 0x70100000 - #define FW_MSG_CODE_VALIDATE_KEY_FAILURE 0x70200000 - #define FW_MSG_CODE_GET_KEY_DONE 0x80100000 - #define FW_MSG_CODE_NO_KEY 0x80f00000 - #define FW_MSG_CODE_LIC_INFO_NOT_READY 0x80f80000 - #define FW_MSG_CODE_L2B_PRAM_LOADED 0x90100000 - #define FW_MSG_CODE_L2B_PRAM_T_LOAD_FAILURE 0x90210000 - #define FW_MSG_CODE_L2B_PRAM_C_LOAD_FAILURE 0x90220000 - #define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE 0x90230000 - #define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE 0x90240000 - #define FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS 0xa0100000 - #define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000 - #define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000 - #define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000 - - #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000 - #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000 - - #define FW_MSG_CODE_LINK_CHANGED_ACK 0x01100000 - - #define FW_MSG_CODE_LIC_CHALLENGE 0xff010000 - #define FW_MSG_CODE_LIC_RESPONSE 0xff020000 - #define FW_MSG_CODE_VIRT_MAC_PRIM 0xff030000 - #define FW_MSG_CODE_VIRT_MAC_ISCSI 0xff040000 - - #define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff - - u32 fw_mb_param; - - u32 drv_pulse_mb; - #define DRV_PULSE_SEQ_MASK 0x00007fff - #define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 - /* - * The system time is in the format of - * (year-2001)*12*32 + month*32 + day. - */ - #define DRV_PULSE_ALWAYS_ALIVE 0x00008000 - /* - * Indicate to the firmware not to go into the - * OS-absent when it is not getting driver pulse. - * This is used for debugging as well for PXE(MBA). - */ - - u32 mcp_pulse_mb; - #define MCP_PULSE_SEQ_MASK 0x00007fff - #define MCP_PULSE_ALWAYS_ALIVE 0x00008000 - /* Indicates to the driver not to assert due to lack - * of MCP response */ - #define MCP_EVENT_MASK 0xffff0000 - #define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 - - u32 iscsi_boot_signature; - u32 iscsi_boot_block_offset; - - u32 drv_status; - #define DRV_STATUS_PMF 0x00000001 - #define DRV_STATUS_VF_DISABLED 0x00000002 - #define DRV_STATUS_SET_MF_BW 0x00000004 - #define DRV_STATUS_LINK_EVENT 0x00000008 - - #define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00 - #define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100 - #define DRV_STATUS_DCC_BANDWIDTH_ALLOCATION 0x00000200 - #define DRV_STATUS_DCC_CHANGE_MAC_ADDRESS 0x00000400 - #define DRV_STATUS_DCC_RESERVED1 0x00000800 - #define DRV_STATUS_DCC_SET_PROTOCOL 0x00001000 - #define DRV_STATUS_DCC_SET_PRIORITY 0x00002000 - - #define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000 - #define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000 - - u32 virt_mac_upper; - #define VIRT_MAC_SIGN_MASK 0xffff0000 - #define VIRT_MAC_SIGNATURE 0x564d0000 - u32 virt_mac_lower; - -}; - - -/**************************************************************************** - * Management firmware state * - ****************************************************************************/ -/* Allocate 440 bytes for management firmware */ -#define MGMTFW_STATE_WORD_SIZE 110 - -struct mgmtfw_state { - u32 opaque[MGMTFW_STATE_WORD_SIZE]; -}; - - -/**************************************************************************** - * Multi-Function configuration * - ****************************************************************************/ -struct shared_mf_cfg { - - u32 clp_mb; - #define SHARED_MF_CLP_SET_DEFAULT 0x00000000 - /* set by CLP */ - #define SHARED_MF_CLP_EXIT 0x00000001 - /* set by MCP */ - #define SHARED_MF_CLP_EXIT_DONE 0x00010000 - -}; - -struct port_mf_cfg { - - u32 dynamic_cfg; /* device control channel */ - #define PORT_MF_CFG_E1HOV_TAG_MASK 0x0000ffff - #define PORT_MF_CFG_E1HOV_TAG_SHIFT 0 - #define PORT_MF_CFG_E1HOV_TAG_DEFAULT PORT_MF_CFG_E1HOV_TAG_MASK - - u32 reserved[3]; - -}; - -struct func_mf_cfg { - - u32 config; - /* E/R/I/D */ - /* function 0 of each port cannot be hidden */ - #define FUNC_MF_CFG_FUNC_HIDE 0x00000001 - - #define FUNC_MF_CFG_PROTOCOL_MASK 0x00000006 - #define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000000 - #define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000002 - #define FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA 0x00000004 - #define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000006 - #define FUNC_MF_CFG_PROTOCOL_DEFAULT \ - FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA - - #define FUNC_MF_CFG_FUNC_DISABLED 0x00000008 - #define FUNC_MF_CFG_FUNC_DELETED 0x00000010 - - /* PRI */ - /* 0 - low priority, 3 - high priority */ - #define FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK 0x00000300 - #define FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT 8 - #define FUNC_MF_CFG_TRANSMIT_PRIORITY_DEFAULT 0x00000000 - - /* MINBW, MAXBW */ - /* value range - 0..100, increments in 100Mbps */ - #define FUNC_MF_CFG_MIN_BW_MASK 0x00ff0000 - #define FUNC_MF_CFG_MIN_BW_SHIFT 16 - #define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000 - #define FUNC_MF_CFG_MAX_BW_MASK 0xff000000 - #define FUNC_MF_CFG_MAX_BW_SHIFT 24 - #define FUNC_MF_CFG_MAX_BW_DEFAULT 0x64000000 - - u32 mac_upper; /* MAC */ - #define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff - #define FUNC_MF_CFG_UPPERMAC_SHIFT 0 - #define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK - u32 mac_lower; - #define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff - - u32 e1hov_tag; /* VNI */ - #define FUNC_MF_CFG_E1HOV_TAG_MASK 0x0000ffff - #define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0 - #define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK - - u32 reserved[2]; -}; - -/* This structure is not applicable and should not be accessed on 57711 */ -struct func_ext_cfg { - u32 func_cfg; - #define MACP_FUNC_CFG_FLAGS_MASK 0x000000FF - #define MACP_FUNC_CFG_FLAGS_SHIFT 0 - #define MACP_FUNC_CFG_FLAGS_ENABLED 0x00000001 - #define MACP_FUNC_CFG_FLAGS_ETHERNET 0x00000002 - #define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD 0x00000004 - #define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD 0x00000008 - - u32 iscsi_mac_addr_upper; - u32 iscsi_mac_addr_lower; - - u32 fcoe_mac_addr_upper; - u32 fcoe_mac_addr_lower; - - u32 fcoe_wwn_port_name_upper; - u32 fcoe_wwn_port_name_lower; - - u32 fcoe_wwn_node_name_upper; - u32 fcoe_wwn_node_name_lower; - - u32 preserve_data; - #define MF_FUNC_CFG_PRESERVE_L2_MAC (1<<0) - #define MF_FUNC_CFG_PRESERVE_ISCSI_MAC (1<<1) - #define MF_FUNC_CFG_PRESERVE_FCOE_MAC (1<<2) - #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_P (1<<3) - #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_N (1<<4) - #define MF_FUNC_CFG_PRESERVE_TX_BW (1<<5) -}; - -struct mf_cfg { - - struct shared_mf_cfg shared_mf_config; /* 0x4 */ - struct port_mf_cfg port_mf_config[PORT_MAX]; /* 0x10 * 2 = 0x20 */ - /* for all chips, there are 8 mf functions */ - struct func_mf_cfg func_mf_config[E1H_FUNC_MAX]; /* 0x18 * 8 = 0xc0 */ - /* - * Extended configuration per function - this array does not exist and - * should not be accessed on 57711 - */ - struct func_ext_cfg func_ext_config[E1H_FUNC_MAX]; /* 0x28 * 8 = 0x140*/ -}; /* 0x224 */ - -/**************************************************************************** - * Shared Memory Region * - ****************************************************************************/ -struct shmem_region { /* SharedMem Offset (size) */ - - u32 validity_map[PORT_MAX]; /* 0x0 (4*2 = 0x8) */ - #define SHR_MEM_FORMAT_REV_MASK 0xff000000 - #define SHR_MEM_FORMAT_REV_ID ('A'<<24) - /* validity bits */ - #define SHR_MEM_VALIDITY_PCI_CFG 0x00100000 - #define SHR_MEM_VALIDITY_MB 0x00200000 - #define SHR_MEM_VALIDITY_DEV_INFO 0x00400000 - #define SHR_MEM_VALIDITY_RESERVED 0x00000007 - /* One licensing bit should be set */ - #define SHR_MEM_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038 - #define SHR_MEM_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008 - #define SHR_MEM_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010 - #define SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020 - /* Active MFW */ - #define SHR_MEM_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000 - #define SHR_MEM_VALIDITY_ACTIVE_MFW_MASK 0x000001c0 - #define SHR_MEM_VALIDITY_ACTIVE_MFW_IPMI 0x00000040 - #define SHR_MEM_VALIDITY_ACTIVE_MFW_UMP 0x00000080 - #define SHR_MEM_VALIDITY_ACTIVE_MFW_NCSI 0x000000c0 - #define SHR_MEM_VALIDITY_ACTIVE_MFW_NONE 0x000001c0 - - struct shm_dev_info dev_info; /* 0x8 (0x438) */ - - struct license_key drv_lic_key[PORT_MAX]; /* 0x440 (52*2=0x68) */ - - /* FW information (for internal FW use) */ - u32 fw_info_fio_offset; /* 0x4a8 (0x4) */ - struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */ - - struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */ - -#ifdef BMAPI - /* This is a variable length array */ - /* the number of function depends on the chip type */ - struct drv_func_mb func_mb[1]; /* 0x684 (44*2/4/8=0x58/0xb0/0x160) */ -#else - /* the number of function depends on the chip type */ - struct drv_func_mb func_mb[]; /* 0x684 (44*2/4/8=0x58/0xb0/0x160) */ -#endif /* BMAPI */ - -}; /* 57710 = 0x6dc | 57711 = 0x7E4 | 57712 = 0x734 */ - -/**************************************************************************** - * Shared Memory 2 Region * - ****************************************************************************/ -/* The fw_flr_ack is actually built in the following way: */ -/* 8 bit: PF ack */ -/* 64 bit: VF ack */ -/* 8 bit: ios_dis_ack */ -/* In order to maintain endianity in the mailbox hsi, we want to keep using */ -/* u32. The fw must have the VF right after the PF since this is how it */ -/* access arrays(it expects always the VF to reside after the PF, and that */ -/* makes the calculation much easier for it. ) */ -/* In order to answer both limitations, and keep the struct small, the code */ -/* will abuse the structure defined here to achieve the actual partition */ -/* above */ -/****************************************************************************/ -struct fw_flr_ack { - u32 pf_ack; - u32 vf_ack[1]; - u32 iov_dis_ack; -}; - -struct fw_flr_mb { - u32 aggint; - u32 opgen_addr; - struct fw_flr_ack ack; -}; - -/**** SUPPORT FOR SHMEM ARRRAYS *** - * The SHMEM HSI is aligned on 32 bit boundaries which makes it difficult to - * define arrays with storage types smaller then unsigned dwords. - * The macros below add generic support for SHMEM arrays with numeric elements - * that can span 2,4,8 or 16 bits. The array underlying type is a 32 bit dword - * array with individual bit-filed elements accessed using shifts and masks. - * - */ - -/* eb is the bitwidth of a single element */ -#define SHMEM_ARRAY_MASK(eb) ((1<<(eb))-1) -#define SHMEM_ARRAY_ENTRY(i, eb) ((i)/(32/(eb))) - -/* the bit-position macro allows the used to flip the order of the arrays - * elements on a per byte or word boundary. - * - * example: an array with 8 entries each 4 bit wide. This array will fit into - * a single dword. The diagrmas below show the array order of the nibbles. - * - * SHMEM_ARRAY_BITPOS(i, 4, 4) defines the stadard ordering: - * - * | | | | - * 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | - * | | | | - * - * SHMEM_ARRAY_BITPOS(i, 4, 8) defines a flip ordering per byte: - * - * | | | | - * 1 | 0 | 3 | 2 | 5 | 4 | 7 | 6 | - * | | | | - * - * SHMEM_ARRAY_BITPOS(i, 4, 16) defines a flip ordering per word: - * - * | | | | - * 3 | 2 | 1 | 0 | 7 | 6 | 5 | 4 | - * | | | | - */ -#define SHMEM_ARRAY_BITPOS(i, eb, fb) \ - ((((32/(fb)) - 1 - ((i)/((fb)/(eb))) % (32/(fb))) * (fb)) + \ - (((i)%((fb)/(eb))) * (eb))) - -#define SHMEM_ARRAY_GET(a, i, eb, fb) \ - ((a[SHMEM_ARRAY_ENTRY(i, eb)] >> SHMEM_ARRAY_BITPOS(i, eb, fb)) & \ - SHMEM_ARRAY_MASK(eb)) - -#define SHMEM_ARRAY_SET(a, i, eb, fb, val) \ -do { \ - a[SHMEM_ARRAY_ENTRY(i, eb)] &= ~(SHMEM_ARRAY_MASK(eb) << \ - SHMEM_ARRAY_BITPOS(i, eb, fb)); \ - a[SHMEM_ARRAY_ENTRY(i, eb)] |= (((val) & SHMEM_ARRAY_MASK(eb)) << \ - SHMEM_ARRAY_BITPOS(i, eb, fb)); \ -} while (0) - - -/****START OF DCBX STRUCTURES DECLARATIONS****/ -#define DCBX_MAX_NUM_PRI_PG_ENTRIES 8 -#define DCBX_PRI_PG_BITWIDTH 4 -#define DCBX_PRI_PG_FBITS 8 -#define DCBX_PRI_PG_GET(a, i) \ - SHMEM_ARRAY_GET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS) -#define DCBX_PRI_PG_SET(a, i, val) \ - SHMEM_ARRAY_SET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS, val) -#define DCBX_MAX_NUM_PG_BW_ENTRIES 8 -#define DCBX_BW_PG_BITWIDTH 8 -#define DCBX_PG_BW_GET(a, i) \ - SHMEM_ARRAY_GET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH) -#define DCBX_PG_BW_SET(a, i, val) \ - SHMEM_ARRAY_SET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH, val) -#define DCBX_STRICT_PRI_PG 15 -#define DCBX_MAX_APP_PROTOCOL 16 -#define FCOE_APP_IDX 0 -#define ISCSI_APP_IDX 1 -#define PREDEFINED_APP_IDX_MAX 2 - - -/* Big/Little endian have the same representation. */ -struct dcbx_ets_feature { - /* - * For Admin MIB - is this feature supported by the - * driver | For Local MIB - should this feature be enabled. - */ - u32 enabled; - u32 pg_bw_tbl[2]; - u32 pri_pg_tbl[1]; -}; - -/* Driver structure in LE */ -struct dcbx_pfc_feature { -#ifdef __BIG_ENDIAN - u8 pri_en_bitmap; - #define DCBX_PFC_PRI_0 0x01 - #define DCBX_PFC_PRI_1 0x02 - #define DCBX_PFC_PRI_2 0x04 - #define DCBX_PFC_PRI_3 0x08 - #define DCBX_PFC_PRI_4 0x10 - #define DCBX_PFC_PRI_5 0x20 - #define DCBX_PFC_PRI_6 0x40 - #define DCBX_PFC_PRI_7 0x80 - u8 pfc_caps; - u8 reserved; - u8 enabled; -#elif defined(__LITTLE_ENDIAN) - u8 enabled; - u8 reserved; - u8 pfc_caps; - u8 pri_en_bitmap; - #define DCBX_PFC_PRI_0 0x01 - #define DCBX_PFC_PRI_1 0x02 - #define DCBX_PFC_PRI_2 0x04 - #define DCBX_PFC_PRI_3 0x08 - #define DCBX_PFC_PRI_4 0x10 - #define DCBX_PFC_PRI_5 0x20 - #define DCBX_PFC_PRI_6 0x40 - #define DCBX_PFC_PRI_7 0x80 -#endif -}; - -struct dcbx_app_priority_entry { -#ifdef __BIG_ENDIAN - u16 app_id; - u8 pri_bitmap; - u8 appBitfield; - #define DCBX_APP_ENTRY_VALID 0x01 - #define DCBX_APP_ENTRY_SF_MASK 0x30 - #define DCBX_APP_ENTRY_SF_SHIFT 4 - #define DCBX_APP_SF_ETH_TYPE 0x10 - #define DCBX_APP_SF_PORT 0x20 -#elif defined(__LITTLE_ENDIAN) - u8 appBitfield; - #define DCBX_APP_ENTRY_VALID 0x01 - #define DCBX_APP_ENTRY_SF_MASK 0x30 - #define DCBX_APP_ENTRY_SF_SHIFT 4 - #define DCBX_APP_SF_ETH_TYPE 0x10 - #define DCBX_APP_SF_PORT 0x20 - u8 pri_bitmap; - u16 app_id; -#endif -}; - - -/* FW structure in BE */ -struct dcbx_app_priority_feature { -#ifdef __BIG_ENDIAN - u8 reserved; - u8 default_pri; - u8 tc_supported; - u8 enabled; -#elif defined(__LITTLE_ENDIAN) - u8 enabled; - u8 tc_supported; - u8 default_pri; - u8 reserved; -#endif - struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL]; -}; - -/* FW structure in BE */ -struct dcbx_features { - /* PG feature */ - struct dcbx_ets_feature ets; - /* PFC feature */ - struct dcbx_pfc_feature pfc; - /* APP feature */ - struct dcbx_app_priority_feature app; -}; - -/* LLDP protocol parameters */ -/* FW structure in BE */ -struct lldp_params { -#ifdef __BIG_ENDIAN - u8 msg_fast_tx_interval; - u8 msg_tx_hold; - u8 msg_tx_interval; - u8 admin_status; - #define LLDP_TX_ONLY 0x01 - #define LLDP_RX_ONLY 0x02 - #define LLDP_TX_RX 0x03 - #define LLDP_DISABLED 0x04 - u8 reserved1; - u8 tx_fast; - u8 tx_crd_max; - u8 tx_crd; -#elif defined(__LITTLE_ENDIAN) - u8 admin_status; - #define LLDP_TX_ONLY 0x01 - #define LLDP_RX_ONLY 0x02 - #define LLDP_TX_RX 0x03 - #define LLDP_DISABLED 0x04 - u8 msg_tx_interval; - u8 msg_tx_hold; - u8 msg_fast_tx_interval; - u8 tx_crd; - u8 tx_crd_max; - u8 tx_fast; - u8 reserved1; -#endif - #define REM_CHASSIS_ID_STAT_LEN 4 - #define REM_PORT_ID_STAT_LEN 4 - /* Holds remote Chassis ID TLV header, subtype and 9B of payload. */ - u32 peer_chassis_id[REM_CHASSIS_ID_STAT_LEN]; - /* Holds remote Port ID TLV header, subtype and 9B of payload. */ - u32 peer_port_id[REM_PORT_ID_STAT_LEN]; -}; - -struct lldp_dcbx_stat { - #define LOCAL_CHASSIS_ID_STAT_LEN 2 - #define LOCAL_PORT_ID_STAT_LEN 2 - /* Holds local Chassis ID 8B payload of constant subtype 4. */ - u32 local_chassis_id[LOCAL_CHASSIS_ID_STAT_LEN]; - /* Holds local Port ID 8B payload of constant subtype 3. */ - u32 local_port_id[LOCAL_PORT_ID_STAT_LEN]; - /* Number of DCBX frames transmitted. */ - u32 num_tx_dcbx_pkts; - /* Number of DCBX frames received. */ - u32 num_rx_dcbx_pkts; -}; - -/* ADMIN MIB - DCBX local machine default configuration. */ -struct lldp_admin_mib { - u32 ver_cfg_flags; - #define DCBX_ETS_CONFIG_TX_ENABLED 0x00000001 - #define DCBX_PFC_CONFIG_TX_ENABLED 0x00000002 - #define DCBX_APP_CONFIG_TX_ENABLED 0x00000004 - #define DCBX_ETS_RECO_TX_ENABLED 0x00000008 - #define DCBX_ETS_RECO_VALID 0x00000010 - #define DCBX_ETS_WILLING 0x00000020 - #define DCBX_PFC_WILLING 0x00000040 - #define DCBX_APP_WILLING 0x00000080 - #define DCBX_VERSION_CEE 0x00000100 - #define DCBX_VERSION_IEEE 0x00000200 - #define DCBX_DCBX_ENABLED 0x00000400 - #define DCBX_CEE_VERSION_MASK 0x0000f000 - #define DCBX_CEE_VERSION_SHIFT 12 - #define DCBX_CEE_MAX_VERSION_MASK 0x000f0000 - #define DCBX_CEE_MAX_VERSION_SHIFT 16 - struct dcbx_features features; -}; - -/* REMOTE MIB - remote machine DCBX configuration. */ -struct lldp_remote_mib { - u32 prefix_seq_num; - u32 flags; - #define DCBX_ETS_TLV_RX 0x00000001 - #define DCBX_PFC_TLV_RX 0x00000002 - #define DCBX_APP_TLV_RX 0x00000004 - #define DCBX_ETS_RX_ERROR 0x00000010 - #define DCBX_PFC_RX_ERROR 0x00000020 - #define DCBX_APP_RX_ERROR 0x00000040 - #define DCBX_ETS_REM_WILLING 0x00000100 - #define DCBX_PFC_REM_WILLING 0x00000200 - #define DCBX_APP_REM_WILLING 0x00000400 - #define DCBX_REMOTE_ETS_RECO_VALID 0x00001000 - #define DCBX_REMOTE_MIB_VALID 0x00002000 - struct dcbx_features features; - u32 suffix_seq_num; -}; - -/* LOCAL MIB - operational DCBX configuration - transmitted on Tx LLDPDU. */ -struct lldp_local_mib { - u32 prefix_seq_num; - /* Indicates if there is mismatch with negotiation results. */ - u32 error; - #define DCBX_LOCAL_ETS_ERROR 0x00000001 - #define DCBX_LOCAL_PFC_ERROR 0x00000002 - #define DCBX_LOCAL_APP_ERROR 0x00000004 - #define DCBX_LOCAL_PFC_MISMATCH 0x00000010 - #define DCBX_LOCAL_APP_MISMATCH 0x00000020 - #define DCBX_REMOTE_MIB_ERROR 0x00000040 - struct dcbx_features features; - u32 suffix_seq_num; -}; -/***END OF DCBX STRUCTURES DECLARATIONS***/ - -struct ncsi_oem_fcoe_features { - u32 fcoe_features1; - #define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK 0x0000FFFF - #define FCOE_FEATURES1_IOS_PER_CONNECTION_OFFSET 0 - - #define FCOE_FEATURES1_LOGINS_PER_PORT_MASK 0xFFFF0000 - #define FCOE_FEATURES1_LOGINS_PER_PORT_OFFSET 16 - - u32 fcoe_features2; - #define FCOE_FEATURES2_EXCHANGES_MASK 0x0000FFFF - #define FCOE_FEATURES2_EXCHANGES_OFFSET 0 - - #define FCOE_FEATURES2_NPIV_WWN_PER_PORT_MASK 0xFFFF0000 - #define FCOE_FEATURES2_NPIV_WWN_PER_PORT_OFFSET 16 - - u32 fcoe_features3; - #define FCOE_FEATURES3_TARGETS_SUPPORTED_MASK 0x0000FFFF - #define FCOE_FEATURES3_TARGETS_SUPPORTED_OFFSET 0 - - #define FCOE_FEATURES3_OUTSTANDING_COMMANDS_MASK 0xFFFF0000 - #define FCOE_FEATURES3_OUTSTANDING_COMMANDS_OFFSET 16 - - u32 fcoe_features4; - #define FCOE_FEATURES4_FEATURE_SETTINGS_MASK 0x0000000F - #define FCOE_FEATURES4_FEATURE_SETTINGS_OFFSET 0 -}; - -struct ncsi_oem_data { - u32 driver_version[4]; - struct ncsi_oem_fcoe_features ncsi_oem_fcoe_features; -}; - -struct shmem2_region { - - u32 size; /* 0x0000 */ - - u32 dcc_support; /* 0x0004 */ - #define SHMEM_DCC_SUPPORT_NONE 0x00000000 - #define SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV 0x00000001 - #define SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV 0x00000004 - #define SHMEM_DCC_SUPPORT_CHANGE_MAC_ADDRESS_TLV 0x00000008 - #define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV 0x00000040 - #define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV 0x00000080 - - u32 ext_phy_fw_version2[PORT_MAX]; /* 0x0008 */ - /* - * For backwards compatibility, if the mf_cfg_addr does not exist - * (the size filed is smaller than 0xc) the mf_cfg resides at the - * end of struct shmem_region - */ - u32 mf_cfg_addr; /* 0x0010 */ - #define SHMEM_MF_CFG_ADDR_NONE 0x00000000 - - struct fw_flr_mb flr_mb; /* 0x0014 */ - u32 dcbx_lldp_params_offset; /* 0x0028 */ - #define SHMEM_LLDP_DCBX_PARAMS_NONE 0x00000000 - u32 dcbx_neg_res_offset; /* 0x002c */ - #define SHMEM_DCBX_NEG_RES_NONE 0x00000000 - u32 dcbx_remote_mib_offset; /* 0x0030 */ - #define SHMEM_DCBX_REMOTE_MIB_NONE 0x00000000 - /* - * The other shmemX_base_addr holds the other path's shmem address - * required for example in case of common phy init, or for path1 to know - * the address of mcp debug trace which is located in offset from shmem - * of path0 - */ - u32 other_shmem_base_addr; /* 0x0034 */ - u32 other_shmem2_base_addr; /* 0x0038 */ - /* - * mcp_vf_disabled is set by the MCP to indicate the driver about VFs - * which were disabled/flred - */ - u32 mcp_vf_disabled[E2_VF_MAX / 32]; /* 0x003c */ - - /* - * drv_ack_vf_disabled is set by the PF driver to ack handled disabled - * VFs - */ - u32 drv_ack_vf_disabled[E2_FUNC_MAX][E2_VF_MAX / 32]; /* 0x0044 */ - - u32 dcbx_lldp_dcbx_stat_offset; /* 0x0064 */ - #define SHMEM_LLDP_DCBX_STAT_NONE 0x00000000 - - /* - * edebug_driver_if field is used to transfer messages between edebug - * app to the driver through shmem2. - * - * message format: - * bits 0-2 - function number / instance of driver to perform request - * bits 3-5 - op code / is_ack? - * bits 6-63 - data - */ - u32 edebug_driver_if[2]; /* 0x0068 */ - #define EDEBUG_DRIVER_IF_OP_CODE_GET_PHYS_ADDR 1 - #define EDEBUG_DRIVER_IF_OP_CODE_GET_BUS_ADDR 2 - #define EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT 3 - - u32 nvm_retain_bitmap_addr; /* 0x0070 */ - - u32 reserved1; /* 0x0074 */ - - u32 reserved2[E2_FUNC_MAX]; - - u32 reserved3[E2_FUNC_MAX];/* 0x0088 */ - u32 reserved4[E2_FUNC_MAX];/* 0x0098 */ - - u32 swim_base_addr; /* 0x0108 */ - u32 swim_funcs; - u32 swim_main_cb; - - u32 reserved5[2]; - - /* generic flags controlled by the driver */ - u32 drv_flags; - #define DRV_FLAGS_DCB_CONFIGURED 0x1 - - /* pointer to extended dev_info shared data copied from nvm image */ - u32 extended_dev_info_shared_addr; - u32 ncsi_oem_data_addr; - - u32 ocsd_host_addr; - u32 ocbb_host_addr; - u32 ocsd_req_update_interval; -}; - - -struct emac_stats { - u32 rx_stat_ifhcinoctets; - u32 rx_stat_ifhcinbadoctets; - u32 rx_stat_etherstatsfragments; - u32 rx_stat_ifhcinucastpkts; - u32 rx_stat_ifhcinmulticastpkts; - u32 rx_stat_ifhcinbroadcastpkts; - u32 rx_stat_dot3statsfcserrors; - u32 rx_stat_dot3statsalignmenterrors; - u32 rx_stat_dot3statscarriersenseerrors; - u32 rx_stat_xonpauseframesreceived; - u32 rx_stat_xoffpauseframesreceived; - u32 rx_stat_maccontrolframesreceived; - u32 rx_stat_xoffstateentered; - u32 rx_stat_dot3statsframestoolong; - u32 rx_stat_etherstatsjabbers; - u32 rx_stat_etherstatsundersizepkts; - u32 rx_stat_etherstatspkts64octets; - u32 rx_stat_etherstatspkts65octetsto127octets; - u32 rx_stat_etherstatspkts128octetsto255octets; - u32 rx_stat_etherstatspkts256octetsto511octets; - u32 rx_stat_etherstatspkts512octetsto1023octets; - u32 rx_stat_etherstatspkts1024octetsto1522octets; - u32 rx_stat_etherstatspktsover1522octets; - - u32 rx_stat_falsecarriererrors; - - u32 tx_stat_ifhcoutoctets; - u32 tx_stat_ifhcoutbadoctets; - u32 tx_stat_etherstatscollisions; - u32 tx_stat_outxonsent; - u32 tx_stat_outxoffsent; - u32 tx_stat_flowcontroldone; - u32 tx_stat_dot3statssinglecollisionframes; - u32 tx_stat_dot3statsmultiplecollisionframes; - u32 tx_stat_dot3statsdeferredtransmissions; - u32 tx_stat_dot3statsexcessivecollisions; - u32 tx_stat_dot3statslatecollisions; - u32 tx_stat_ifhcoutucastpkts; - u32 tx_stat_ifhcoutmulticastpkts; - u32 tx_stat_ifhcoutbroadcastpkts; - u32 tx_stat_etherstatspkts64octets; - u32 tx_stat_etherstatspkts65octetsto127octets; - u32 tx_stat_etherstatspkts128octetsto255octets; - u32 tx_stat_etherstatspkts256octetsto511octets; - u32 tx_stat_etherstatspkts512octetsto1023octets; - u32 tx_stat_etherstatspkts1024octetsto1522octets; - u32 tx_stat_etherstatspktsover1522octets; - u32 tx_stat_dot3statsinternalmactransmiterrors; -}; - - -struct bmac1_stats { - u32 tx_stat_gtpkt_lo; - u32 tx_stat_gtpkt_hi; - u32 tx_stat_gtxpf_lo; - u32 tx_stat_gtxpf_hi; - u32 tx_stat_gtfcs_lo; - u32 tx_stat_gtfcs_hi; - u32 tx_stat_gtmca_lo; - u32 tx_stat_gtmca_hi; - u32 tx_stat_gtbca_lo; - u32 tx_stat_gtbca_hi; - u32 tx_stat_gtfrg_lo; - u32 tx_stat_gtfrg_hi; - u32 tx_stat_gtovr_lo; - u32 tx_stat_gtovr_hi; - u32 tx_stat_gt64_lo; - u32 tx_stat_gt64_hi; - u32 tx_stat_gt127_lo; - u32 tx_stat_gt127_hi; - u32 tx_stat_gt255_lo; - u32 tx_stat_gt255_hi; - u32 tx_stat_gt511_lo; - u32 tx_stat_gt511_hi; - u32 tx_stat_gt1023_lo; - u32 tx_stat_gt1023_hi; - u32 tx_stat_gt1518_lo; - u32 tx_stat_gt1518_hi; - u32 tx_stat_gt2047_lo; - u32 tx_stat_gt2047_hi; - u32 tx_stat_gt4095_lo; - u32 tx_stat_gt4095_hi; - u32 tx_stat_gt9216_lo; - u32 tx_stat_gt9216_hi; - u32 tx_stat_gt16383_lo; - u32 tx_stat_gt16383_hi; - u32 tx_stat_gtmax_lo; - u32 tx_stat_gtmax_hi; - u32 tx_stat_gtufl_lo; - u32 tx_stat_gtufl_hi; - u32 tx_stat_gterr_lo; - u32 tx_stat_gterr_hi; - u32 tx_stat_gtbyt_lo; - u32 tx_stat_gtbyt_hi; - - u32 rx_stat_gr64_lo; - u32 rx_stat_gr64_hi; - u32 rx_stat_gr127_lo; - u32 rx_stat_gr127_hi; - u32 rx_stat_gr255_lo; - u32 rx_stat_gr255_hi; - u32 rx_stat_gr511_lo; - u32 rx_stat_gr511_hi; - u32 rx_stat_gr1023_lo; - u32 rx_stat_gr1023_hi; - u32 rx_stat_gr1518_lo; - u32 rx_stat_gr1518_hi; - u32 rx_stat_gr2047_lo; - u32 rx_stat_gr2047_hi; - u32 rx_stat_gr4095_lo; - u32 rx_stat_gr4095_hi; - u32 rx_stat_gr9216_lo; - u32 rx_stat_gr9216_hi; - u32 rx_stat_gr16383_lo; - u32 rx_stat_gr16383_hi; - u32 rx_stat_grmax_lo; - u32 rx_stat_grmax_hi; - u32 rx_stat_grpkt_lo; - u32 rx_stat_grpkt_hi; - u32 rx_stat_grfcs_lo; - u32 rx_stat_grfcs_hi; - u32 rx_stat_grmca_lo; - u32 rx_stat_grmca_hi; - u32 rx_stat_grbca_lo; - u32 rx_stat_grbca_hi; - u32 rx_stat_grxcf_lo; - u32 rx_stat_grxcf_hi; - u32 rx_stat_grxpf_lo; - u32 rx_stat_grxpf_hi; - u32 rx_stat_grxuo_lo; - u32 rx_stat_grxuo_hi; - u32 rx_stat_grjbr_lo; - u32 rx_stat_grjbr_hi; - u32 rx_stat_grovr_lo; - u32 rx_stat_grovr_hi; - u32 rx_stat_grflr_lo; - u32 rx_stat_grflr_hi; - u32 rx_stat_grmeg_lo; - u32 rx_stat_grmeg_hi; - u32 rx_stat_grmeb_lo; - u32 rx_stat_grmeb_hi; - u32 rx_stat_grbyt_lo; - u32 rx_stat_grbyt_hi; - u32 rx_stat_grund_lo; - u32 rx_stat_grund_hi; - u32 rx_stat_grfrg_lo; - u32 rx_stat_grfrg_hi; - u32 rx_stat_grerb_lo; - u32 rx_stat_grerb_hi; - u32 rx_stat_grfre_lo; - u32 rx_stat_grfre_hi; - u32 rx_stat_gripj_lo; - u32 rx_stat_gripj_hi; -}; - -struct bmac2_stats { - u32 tx_stat_gtpk_lo; /* gtpok */ - u32 tx_stat_gtpk_hi; /* gtpok */ - u32 tx_stat_gtxpf_lo; /* gtpf */ - u32 tx_stat_gtxpf_hi; /* gtpf */ - u32 tx_stat_gtpp_lo; /* NEW BMAC2 */ - u32 tx_stat_gtpp_hi; /* NEW BMAC2 */ - u32 tx_stat_gtfcs_lo; - u32 tx_stat_gtfcs_hi; - u32 tx_stat_gtuca_lo; /* NEW BMAC2 */ - u32 tx_stat_gtuca_hi; /* NEW BMAC2 */ - u32 tx_stat_gtmca_lo; - u32 tx_stat_gtmca_hi; - u32 tx_stat_gtbca_lo; - u32 tx_stat_gtbca_hi; - u32 tx_stat_gtovr_lo; - u32 tx_stat_gtovr_hi; - u32 tx_stat_gtfrg_lo; - u32 tx_stat_gtfrg_hi; - u32 tx_stat_gtpkt1_lo; /* gtpkt */ - u32 tx_stat_gtpkt1_hi; /* gtpkt */ - u32 tx_stat_gt64_lo; - u32 tx_stat_gt64_hi; - u32 tx_stat_gt127_lo; - u32 tx_stat_gt127_hi; - u32 tx_stat_gt255_lo; - u32 tx_stat_gt255_hi; - u32 tx_stat_gt511_lo; - u32 tx_stat_gt511_hi; - u32 tx_stat_gt1023_lo; - u32 tx_stat_gt1023_hi; - u32 tx_stat_gt1518_lo; - u32 tx_stat_gt1518_hi; - u32 tx_stat_gt2047_lo; - u32 tx_stat_gt2047_hi; - u32 tx_stat_gt4095_lo; - u32 tx_stat_gt4095_hi; - u32 tx_stat_gt9216_lo; - u32 tx_stat_gt9216_hi; - u32 tx_stat_gt16383_lo; - u32 tx_stat_gt16383_hi; - u32 tx_stat_gtmax_lo; - u32 tx_stat_gtmax_hi; - u32 tx_stat_gtufl_lo; - u32 tx_stat_gtufl_hi; - u32 tx_stat_gterr_lo; - u32 tx_stat_gterr_hi; - u32 tx_stat_gtbyt_lo; - u32 tx_stat_gtbyt_hi; - - u32 rx_stat_gr64_lo; - u32 rx_stat_gr64_hi; - u32 rx_stat_gr127_lo; - u32 rx_stat_gr127_hi; - u32 rx_stat_gr255_lo; - u32 rx_stat_gr255_hi; - u32 rx_stat_gr511_lo; - u32 rx_stat_gr511_hi; - u32 rx_stat_gr1023_lo; - u32 rx_stat_gr1023_hi; - u32 rx_stat_gr1518_lo; - u32 rx_stat_gr1518_hi; - u32 rx_stat_gr2047_lo; - u32 rx_stat_gr2047_hi; - u32 rx_stat_gr4095_lo; - u32 rx_stat_gr4095_hi; - u32 rx_stat_gr9216_lo; - u32 rx_stat_gr9216_hi; - u32 rx_stat_gr16383_lo; - u32 rx_stat_gr16383_hi; - u32 rx_stat_grmax_lo; - u32 rx_stat_grmax_hi; - u32 rx_stat_grpkt_lo; - u32 rx_stat_grpkt_hi; - u32 rx_stat_grfcs_lo; - u32 rx_stat_grfcs_hi; - u32 rx_stat_gruca_lo; - u32 rx_stat_gruca_hi; - u32 rx_stat_grmca_lo; - u32 rx_stat_grmca_hi; - u32 rx_stat_grbca_lo; - u32 rx_stat_grbca_hi; - u32 rx_stat_grxpf_lo; /* grpf */ - u32 rx_stat_grxpf_hi; /* grpf */ - u32 rx_stat_grpp_lo; - u32 rx_stat_grpp_hi; - u32 rx_stat_grxuo_lo; /* gruo */ - u32 rx_stat_grxuo_hi; /* gruo */ - u32 rx_stat_grjbr_lo; - u32 rx_stat_grjbr_hi; - u32 rx_stat_grovr_lo; - u32 rx_stat_grovr_hi; - u32 rx_stat_grxcf_lo; /* grcf */ - u32 rx_stat_grxcf_hi; /* grcf */ - u32 rx_stat_grflr_lo; - u32 rx_stat_grflr_hi; - u32 rx_stat_grpok_lo; - u32 rx_stat_grpok_hi; - u32 rx_stat_grmeg_lo; - u32 rx_stat_grmeg_hi; - u32 rx_stat_grmeb_lo; - u32 rx_stat_grmeb_hi; - u32 rx_stat_grbyt_lo; - u32 rx_stat_grbyt_hi; - u32 rx_stat_grund_lo; - u32 rx_stat_grund_hi; - u32 rx_stat_grfrg_lo; - u32 rx_stat_grfrg_hi; - u32 rx_stat_grerb_lo; /* grerrbyt */ - u32 rx_stat_grerb_hi; /* grerrbyt */ - u32 rx_stat_grfre_lo; /* grfrerr */ - u32 rx_stat_grfre_hi; /* grfrerr */ - u32 rx_stat_gripj_lo; - u32 rx_stat_gripj_hi; -}; - -struct mstat_stats { - struct { - /* OTE MSTAT on E3 has a bug where this register's contents are - * actually tx_gtxpok + tx_gtxpf + (possibly)tx_gtxpp - */ - u32 tx_gtxpok_lo; - u32 tx_gtxpok_hi; - u32 tx_gtxpf_lo; - u32 tx_gtxpf_hi; - u32 tx_gtxpp_lo; - u32 tx_gtxpp_hi; - u32 tx_gtfcs_lo; - u32 tx_gtfcs_hi; - u32 tx_gtuca_lo; - u32 tx_gtuca_hi; - u32 tx_gtmca_lo; - u32 tx_gtmca_hi; - u32 tx_gtgca_lo; - u32 tx_gtgca_hi; - u32 tx_gtpkt_lo; - u32 tx_gtpkt_hi; - u32 tx_gt64_lo; - u32 tx_gt64_hi; - u32 tx_gt127_lo; - u32 tx_gt127_hi; - u32 tx_gt255_lo; - u32 tx_gt255_hi; - u32 tx_gt511_lo; - u32 tx_gt511_hi; - u32 tx_gt1023_lo; - u32 tx_gt1023_hi; - u32 tx_gt1518_lo; - u32 tx_gt1518_hi; - u32 tx_gt2047_lo; - u32 tx_gt2047_hi; - u32 tx_gt4095_lo; - u32 tx_gt4095_hi; - u32 tx_gt9216_lo; - u32 tx_gt9216_hi; - u32 tx_gt16383_lo; - u32 tx_gt16383_hi; - u32 tx_gtufl_lo; - u32 tx_gtufl_hi; - u32 tx_gterr_lo; - u32 tx_gterr_hi; - u32 tx_gtbyt_lo; - u32 tx_gtbyt_hi; - u32 tx_collisions_lo; - u32 tx_collisions_hi; - u32 tx_singlecollision_lo; - u32 tx_singlecollision_hi; - u32 tx_multiplecollisions_lo; - u32 tx_multiplecollisions_hi; - u32 tx_deferred_lo; - u32 tx_deferred_hi; - u32 tx_excessivecollisions_lo; - u32 tx_excessivecollisions_hi; - u32 tx_latecollisions_lo; - u32 tx_latecollisions_hi; - } stats_tx; - - struct { - u32 rx_gr64_lo; - u32 rx_gr64_hi; - u32 rx_gr127_lo; - u32 rx_gr127_hi; - u32 rx_gr255_lo; - u32 rx_gr255_hi; - u32 rx_gr511_lo; - u32 rx_gr511_hi; - u32 rx_gr1023_lo; - u32 rx_gr1023_hi; - u32 rx_gr1518_lo; - u32 rx_gr1518_hi; - u32 rx_gr2047_lo; - u32 rx_gr2047_hi; - u32 rx_gr4095_lo; - u32 rx_gr4095_hi; - u32 rx_gr9216_lo; - u32 rx_gr9216_hi; - u32 rx_gr16383_lo; - u32 rx_gr16383_hi; - u32 rx_grpkt_lo; - u32 rx_grpkt_hi; - u32 rx_grfcs_lo; - u32 rx_grfcs_hi; - u32 rx_gruca_lo; - u32 rx_gruca_hi; - u32 rx_grmca_lo; - u32 rx_grmca_hi; - u32 rx_grbca_lo; - u32 rx_grbca_hi; - u32 rx_grxpf_lo; - u32 rx_grxpf_hi; - u32 rx_grxpp_lo; - u32 rx_grxpp_hi; - u32 rx_grxuo_lo; - u32 rx_grxuo_hi; - u32 rx_grovr_lo; - u32 rx_grovr_hi; - u32 rx_grxcf_lo; - u32 rx_grxcf_hi; - u32 rx_grflr_lo; - u32 rx_grflr_hi; - u32 rx_grpok_lo; - u32 rx_grpok_hi; - u32 rx_grbyt_lo; - u32 rx_grbyt_hi; - u32 rx_grund_lo; - u32 rx_grund_hi; - u32 rx_grfrg_lo; - u32 rx_grfrg_hi; - u32 rx_grerb_lo; - u32 rx_grerb_hi; - u32 rx_grfre_lo; - u32 rx_grfre_hi; - - u32 rx_alignmenterrors_lo; - u32 rx_alignmenterrors_hi; - u32 rx_falsecarrier_lo; - u32 rx_falsecarrier_hi; - u32 rx_llfcmsgcnt_lo; - u32 rx_llfcmsgcnt_hi; - } stats_rx; -}; - -union mac_stats { - struct emac_stats emac_stats; - struct bmac1_stats bmac1_stats; - struct bmac2_stats bmac2_stats; - struct mstat_stats mstat_stats; -}; - - -struct mac_stx { - /* in_bad_octets */ - u32 rx_stat_ifhcinbadoctets_hi; - u32 rx_stat_ifhcinbadoctets_lo; - - /* out_bad_octets */ - u32 tx_stat_ifhcoutbadoctets_hi; - u32 tx_stat_ifhcoutbadoctets_lo; - - /* crc_receive_errors */ - u32 rx_stat_dot3statsfcserrors_hi; - u32 rx_stat_dot3statsfcserrors_lo; - /* alignment_errors */ - u32 rx_stat_dot3statsalignmenterrors_hi; - u32 rx_stat_dot3statsalignmenterrors_lo; - /* carrier_sense_errors */ - u32 rx_stat_dot3statscarriersenseerrors_hi; - u32 rx_stat_dot3statscarriersenseerrors_lo; - /* false_carrier_detections */ - u32 rx_stat_falsecarriererrors_hi; - u32 rx_stat_falsecarriererrors_lo; - - /* runt_packets_received */ - u32 rx_stat_etherstatsundersizepkts_hi; - u32 rx_stat_etherstatsundersizepkts_lo; - /* jabber_packets_received */ - u32 rx_stat_dot3statsframestoolong_hi; - u32 rx_stat_dot3statsframestoolong_lo; - - /* error_runt_packets_received */ - u32 rx_stat_etherstatsfragments_hi; - u32 rx_stat_etherstatsfragments_lo; - /* error_jabber_packets_received */ - u32 rx_stat_etherstatsjabbers_hi; - u32 rx_stat_etherstatsjabbers_lo; - - /* control_frames_received */ - u32 rx_stat_maccontrolframesreceived_hi; - u32 rx_stat_maccontrolframesreceived_lo; - u32 rx_stat_mac_xpf_hi; - u32 rx_stat_mac_xpf_lo; - u32 rx_stat_mac_xcf_hi; - u32 rx_stat_mac_xcf_lo; - - /* xoff_state_entered */ - u32 rx_stat_xoffstateentered_hi; - u32 rx_stat_xoffstateentered_lo; - /* pause_xon_frames_received */ - u32 rx_stat_xonpauseframesreceived_hi; - u32 rx_stat_xonpauseframesreceived_lo; - /* pause_xoff_frames_received */ - u32 rx_stat_xoffpauseframesreceived_hi; - u32 rx_stat_xoffpauseframesreceived_lo; - /* pause_xon_frames_transmitted */ - u32 tx_stat_outxonsent_hi; - u32 tx_stat_outxonsent_lo; - /* pause_xoff_frames_transmitted */ - u32 tx_stat_outxoffsent_hi; - u32 tx_stat_outxoffsent_lo; - /* flow_control_done */ - u32 tx_stat_flowcontroldone_hi; - u32 tx_stat_flowcontroldone_lo; - - /* ether_stats_collisions */ - u32 tx_stat_etherstatscollisions_hi; - u32 tx_stat_etherstatscollisions_lo; - /* single_collision_transmit_frames */ - u32 tx_stat_dot3statssinglecollisionframes_hi; - u32 tx_stat_dot3statssinglecollisionframes_lo; - /* multiple_collision_transmit_frames */ - u32 tx_stat_dot3statsmultiplecollisionframes_hi; - u32 tx_stat_dot3statsmultiplecollisionframes_lo; - /* deferred_transmissions */ - u32 tx_stat_dot3statsdeferredtransmissions_hi; - u32 tx_stat_dot3statsdeferredtransmissions_lo; - /* excessive_collision_frames */ - u32 tx_stat_dot3statsexcessivecollisions_hi; - u32 tx_stat_dot3statsexcessivecollisions_lo; - /* late_collision_frames */ - u32 tx_stat_dot3statslatecollisions_hi; - u32 tx_stat_dot3statslatecollisions_lo; - - /* frames_transmitted_64_bytes */ - u32 tx_stat_etherstatspkts64octets_hi; - u32 tx_stat_etherstatspkts64octets_lo; - /* frames_transmitted_65_127_bytes */ - u32 tx_stat_etherstatspkts65octetsto127octets_hi; - u32 tx_stat_etherstatspkts65octetsto127octets_lo; - /* frames_transmitted_128_255_bytes */ - u32 tx_stat_etherstatspkts128octetsto255octets_hi; - u32 tx_stat_etherstatspkts128octetsto255octets_lo; - /* frames_transmitted_256_511_bytes */ - u32 tx_stat_etherstatspkts256octetsto511octets_hi; - u32 tx_stat_etherstatspkts256octetsto511octets_lo; - /* frames_transmitted_512_1023_bytes */ - u32 tx_stat_etherstatspkts512octetsto1023octets_hi; - u32 tx_stat_etherstatspkts512octetsto1023octets_lo; - /* frames_transmitted_1024_1522_bytes */ - u32 tx_stat_etherstatspkts1024octetsto1522octets_hi; - u32 tx_stat_etherstatspkts1024octetsto1522octets_lo; - /* frames_transmitted_1523_9022_bytes */ - u32 tx_stat_etherstatspktsover1522octets_hi; - u32 tx_stat_etherstatspktsover1522octets_lo; - u32 tx_stat_mac_2047_hi; - u32 tx_stat_mac_2047_lo; - u32 tx_stat_mac_4095_hi; - u32 tx_stat_mac_4095_lo; - u32 tx_stat_mac_9216_hi; - u32 tx_stat_mac_9216_lo; - u32 tx_stat_mac_16383_hi; - u32 tx_stat_mac_16383_lo; - - /* internal_mac_transmit_errors */ - u32 tx_stat_dot3statsinternalmactransmiterrors_hi; - u32 tx_stat_dot3statsinternalmactransmiterrors_lo; - - /* if_out_discards */ - u32 tx_stat_mac_ufl_hi; - u32 tx_stat_mac_ufl_lo; -}; - - -#define MAC_STX_IDX_MAX 2 - -struct host_port_stats { - u32 host_port_stats_start; - - struct mac_stx mac_stx[MAC_STX_IDX_MAX]; - - u32 brb_drop_hi; - u32 brb_drop_lo; - - u32 host_port_stats_end; -}; - - -struct host_func_stats { - u32 host_func_stats_start; - - u32 total_bytes_received_hi; - u32 total_bytes_received_lo; - - u32 total_bytes_transmitted_hi; - u32 total_bytes_transmitted_lo; - - u32 total_unicast_packets_received_hi; - u32 total_unicast_packets_received_lo; - - u32 total_multicast_packets_received_hi; - u32 total_multicast_packets_received_lo; - - u32 total_broadcast_packets_received_hi; - u32 total_broadcast_packets_received_lo; - - u32 total_unicast_packets_transmitted_hi; - u32 total_unicast_packets_transmitted_lo; - - u32 total_multicast_packets_transmitted_hi; - u32 total_multicast_packets_transmitted_lo; - - u32 total_broadcast_packets_transmitted_hi; - u32 total_broadcast_packets_transmitted_lo; - - u32 valid_bytes_received_hi; - u32 valid_bytes_received_lo; - - u32 host_func_stats_end; -}; - -/* VIC definitions */ -#define VICSTATST_UIF_INDEX 2 - -#define BCM_5710_FW_MAJOR_VERSION 7 -#define BCM_5710_FW_MINOR_VERSION 0 -#define BCM_5710_FW_REVISION_VERSION 23 -#define BCM_5710_FW_ENGINEERING_VERSION 0 -#define BCM_5710_FW_COMPILE_FLAGS 1 - - -/* - * attention bits - */ -struct atten_sp_status_block { - __le32 attn_bits; - __le32 attn_bits_ack; - u8 status_block_id; - u8 reserved0; - __le16 attn_bits_index; - __le32 reserved1; -}; - - -/* - * The eth aggregative context of Cstorm - */ -struct cstorm_eth_ag_context { - u32 __reserved0[10]; -}; - - -/* - * dmae command structure - */ -struct dmae_command { - u32 opcode; -#define DMAE_COMMAND_SRC (0x1<<0) -#define DMAE_COMMAND_SRC_SHIFT 0 -#define DMAE_COMMAND_DST (0x3<<1) -#define DMAE_COMMAND_DST_SHIFT 1 -#define DMAE_COMMAND_C_DST (0x1<<3) -#define DMAE_COMMAND_C_DST_SHIFT 3 -#define DMAE_COMMAND_C_TYPE_ENABLE (0x1<<4) -#define DMAE_COMMAND_C_TYPE_ENABLE_SHIFT 4 -#define DMAE_COMMAND_C_TYPE_CRC_ENABLE (0x1<<5) -#define DMAE_COMMAND_C_TYPE_CRC_ENABLE_SHIFT 5 -#define DMAE_COMMAND_C_TYPE_CRC_OFFSET (0x7<<6) -#define DMAE_COMMAND_C_TYPE_CRC_OFFSET_SHIFT 6 -#define DMAE_COMMAND_ENDIANITY (0x3<<9) -#define DMAE_COMMAND_ENDIANITY_SHIFT 9 -#define DMAE_COMMAND_PORT (0x1<<11) -#define DMAE_COMMAND_PORT_SHIFT 11 -#define DMAE_COMMAND_CRC_RESET (0x1<<12) -#define DMAE_COMMAND_CRC_RESET_SHIFT 12 -#define DMAE_COMMAND_SRC_RESET (0x1<<13) -#define DMAE_COMMAND_SRC_RESET_SHIFT 13 -#define DMAE_COMMAND_DST_RESET (0x1<<14) -#define DMAE_COMMAND_DST_RESET_SHIFT 14 -#define DMAE_COMMAND_E1HVN (0x3<<15) -#define DMAE_COMMAND_E1HVN_SHIFT 15 -#define DMAE_COMMAND_DST_VN (0x3<<17) -#define DMAE_COMMAND_DST_VN_SHIFT 17 -#define DMAE_COMMAND_C_FUNC (0x1<<19) -#define DMAE_COMMAND_C_FUNC_SHIFT 19 -#define DMAE_COMMAND_ERR_POLICY (0x3<<20) -#define DMAE_COMMAND_ERR_POLICY_SHIFT 20 -#define DMAE_COMMAND_RESERVED0 (0x3FF<<22) -#define DMAE_COMMAND_RESERVED0_SHIFT 22 - u32 src_addr_lo; - u32 src_addr_hi; - u32 dst_addr_lo; - u32 dst_addr_hi; -#if defined(__BIG_ENDIAN) - u16 opcode_iov; -#define DMAE_COMMAND_SRC_VFID (0x3F<<0) -#define DMAE_COMMAND_SRC_VFID_SHIFT 0 -#define DMAE_COMMAND_SRC_VFPF (0x1<<6) -#define DMAE_COMMAND_SRC_VFPF_SHIFT 6 -#define DMAE_COMMAND_RESERVED1 (0x1<<7) -#define DMAE_COMMAND_RESERVED1_SHIFT 7 -#define DMAE_COMMAND_DST_VFID (0x3F<<8) -#define DMAE_COMMAND_DST_VFID_SHIFT 8 -#define DMAE_COMMAND_DST_VFPF (0x1<<14) -#define DMAE_COMMAND_DST_VFPF_SHIFT 14 -#define DMAE_COMMAND_RESERVED2 (0x1<<15) -#define DMAE_COMMAND_RESERVED2_SHIFT 15 - u16 len; -#elif defined(__LITTLE_ENDIAN) - u16 len; - u16 opcode_iov; -#define DMAE_COMMAND_SRC_VFID (0x3F<<0) -#define DMAE_COMMAND_SRC_VFID_SHIFT 0 -#define DMAE_COMMAND_SRC_VFPF (0x1<<6) -#define DMAE_COMMAND_SRC_VFPF_SHIFT 6 -#define DMAE_COMMAND_RESERVED1 (0x1<<7) -#define DMAE_COMMAND_RESERVED1_SHIFT 7 -#define DMAE_COMMAND_DST_VFID (0x3F<<8) -#define DMAE_COMMAND_DST_VFID_SHIFT 8 -#define DMAE_COMMAND_DST_VFPF (0x1<<14) -#define DMAE_COMMAND_DST_VFPF_SHIFT 14 -#define DMAE_COMMAND_RESERVED2 (0x1<<15) -#define DMAE_COMMAND_RESERVED2_SHIFT 15 -#endif - u32 comp_addr_lo; - u32 comp_addr_hi; - u32 comp_val; - u32 crc32; - u32 crc32_c; -#if defined(__BIG_ENDIAN) - u16 crc16_c; - u16 crc16; -#elif defined(__LITTLE_ENDIAN) - u16 crc16; - u16 crc16_c; -#endif -#if defined(__BIG_ENDIAN) - u16 reserved3; - u16 crc_t10; -#elif defined(__LITTLE_ENDIAN) - u16 crc_t10; - u16 reserved3; -#endif -#if defined(__BIG_ENDIAN) - u16 xsum8; - u16 xsum16; -#elif defined(__LITTLE_ENDIAN) - u16 xsum16; - u16 xsum8; -#endif -}; - - -/* - * common data for all protocols - */ -struct doorbell_hdr { - u8 header; -#define DOORBELL_HDR_RX (0x1<<0) -#define DOORBELL_HDR_RX_SHIFT 0 -#define DOORBELL_HDR_DB_TYPE (0x1<<1) -#define DOORBELL_HDR_DB_TYPE_SHIFT 1 -#define DOORBELL_HDR_DPM_SIZE (0x3<<2) -#define DOORBELL_HDR_DPM_SIZE_SHIFT 2 -#define DOORBELL_HDR_CONN_TYPE (0xF<<4) -#define DOORBELL_HDR_CONN_TYPE_SHIFT 4 -}; - -/* - * Ethernet doorbell - */ -struct eth_tx_doorbell { -#if defined(__BIG_ENDIAN) - u16 npackets; - u8 params; -#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0) -#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0 -#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6) -#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6 -#define ETH_TX_DOORBELL_SPARE (0x1<<7) -#define ETH_TX_DOORBELL_SPARE_SHIFT 7 - struct doorbell_hdr hdr; -#elif defined(__LITTLE_ENDIAN) - struct doorbell_hdr hdr; - u8 params; -#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0) -#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0 -#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6) -#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6 -#define ETH_TX_DOORBELL_SPARE (0x1<<7) -#define ETH_TX_DOORBELL_SPARE_SHIFT 7 - u16 npackets; -#endif -}; - - -/* - * 3 lines. status block - */ -struct hc_status_block_e1x { - __le16 index_values[HC_SB_MAX_INDICES_E1X]; - __le16 running_index[HC_SB_MAX_SM]; - __le32 rsrv[11]; -}; - -/* - * host status block - */ -struct host_hc_status_block_e1x { - struct hc_status_block_e1x sb; -}; - - -/* - * 3 lines. status block - */ -struct hc_status_block_e2 { - __le16 index_values[HC_SB_MAX_INDICES_E2]; - __le16 running_index[HC_SB_MAX_SM]; - __le32 reserved[11]; -}; - -/* - * host status block - */ -struct host_hc_status_block_e2 { - struct hc_status_block_e2 sb; -}; - - -/* - * 5 lines. slow-path status block - */ -struct hc_sp_status_block { - __le16 index_values[HC_SP_SB_MAX_INDICES]; - __le16 running_index; - __le16 rsrv; - u32 rsrv1; -}; - -/* - * host status block - */ -struct host_sp_status_block { - struct atten_sp_status_block atten_status_block; - struct hc_sp_status_block sp_sb; -}; - - -/* - * IGU driver acknowledgment register - */ -struct igu_ack_register { -#if defined(__BIG_ENDIAN) - u16 sb_id_and_flags; -#define IGU_ACK_REGISTER_STATUS_BLOCK_ID (0x1F<<0) -#define IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT 0 -#define IGU_ACK_REGISTER_STORM_ID (0x7<<5) -#define IGU_ACK_REGISTER_STORM_ID_SHIFT 5 -#define IGU_ACK_REGISTER_UPDATE_INDEX (0x1<<8) -#define IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT 8 -#define IGU_ACK_REGISTER_INTERRUPT_MODE (0x3<<9) -#define IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT 9 -#define IGU_ACK_REGISTER_RESERVED (0x1F<<11) -#define IGU_ACK_REGISTER_RESERVED_SHIFT 11 - u16 status_block_index; -#elif defined(__LITTLE_ENDIAN) - u16 status_block_index; - u16 sb_id_and_flags; -#define IGU_ACK_REGISTER_STATUS_BLOCK_ID (0x1F<<0) -#define IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT 0 -#define IGU_ACK_REGISTER_STORM_ID (0x7<<5) -#define IGU_ACK_REGISTER_STORM_ID_SHIFT 5 -#define IGU_ACK_REGISTER_UPDATE_INDEX (0x1<<8) -#define IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT 8 -#define IGU_ACK_REGISTER_INTERRUPT_MODE (0x3<<9) -#define IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT 9 -#define IGU_ACK_REGISTER_RESERVED (0x1F<<11) -#define IGU_ACK_REGISTER_RESERVED_SHIFT 11 -#endif -}; - - -/* - * IGU driver acknowledgement register - */ -struct igu_backward_compatible { - u32 sb_id_and_flags; -#define IGU_BACKWARD_COMPATIBLE_SB_INDEX (0xFFFF<<0) -#define IGU_BACKWARD_COMPATIBLE_SB_INDEX_SHIFT 0 -#define IGU_BACKWARD_COMPATIBLE_SB_SELECT (0x1F<<16) -#define IGU_BACKWARD_COMPATIBLE_SB_SELECT_SHIFT 16 -#define IGU_BACKWARD_COMPATIBLE_SEGMENT_ACCESS (0x7<<21) -#define IGU_BACKWARD_COMPATIBLE_SEGMENT_ACCESS_SHIFT 21 -#define IGU_BACKWARD_COMPATIBLE_BUPDATE (0x1<<24) -#define IGU_BACKWARD_COMPATIBLE_BUPDATE_SHIFT 24 -#define IGU_BACKWARD_COMPATIBLE_ENABLE_INT (0x3<<25) -#define IGU_BACKWARD_COMPATIBLE_ENABLE_INT_SHIFT 25 -#define IGU_BACKWARD_COMPATIBLE_RESERVED_0 (0x1F<<27) -#define IGU_BACKWARD_COMPATIBLE_RESERVED_0_SHIFT 27 - u32 reserved_2; -}; - - -/* - * IGU driver acknowledgement register - */ -struct igu_regular { - u32 sb_id_and_flags; -#define IGU_REGULAR_SB_INDEX (0xFFFFF<<0) -#define IGU_REGULAR_SB_INDEX_SHIFT 0 -#define IGU_REGULAR_RESERVED0 (0x1<<20) -#define IGU_REGULAR_RESERVED0_SHIFT 20 -#define IGU_REGULAR_SEGMENT_ACCESS (0x7<<21) -#define IGU_REGULAR_SEGMENT_ACCESS_SHIFT 21 -#define IGU_REGULAR_BUPDATE (0x1<<24) -#define IGU_REGULAR_BUPDATE_SHIFT 24 -#define IGU_REGULAR_ENABLE_INT (0x3<<25) -#define IGU_REGULAR_ENABLE_INT_SHIFT 25 -#define IGU_REGULAR_RESERVED_1 (0x1<<27) -#define IGU_REGULAR_RESERVED_1_SHIFT 27 -#define IGU_REGULAR_CLEANUP_TYPE (0x3<<28) -#define IGU_REGULAR_CLEANUP_TYPE_SHIFT 28 -#define IGU_REGULAR_CLEANUP_SET (0x1<<30) -#define IGU_REGULAR_CLEANUP_SET_SHIFT 30 -#define IGU_REGULAR_BCLEANUP (0x1<<31) -#define IGU_REGULAR_BCLEANUP_SHIFT 31 - u32 reserved_2; -}; - -/* - * IGU driver acknowledgement register - */ -union igu_consprod_reg { - struct igu_regular regular; - struct igu_backward_compatible backward_compatible; -}; - - -/* - * Igu control commands - */ -enum igu_ctrl_cmd { - IGU_CTRL_CMD_TYPE_RD, - IGU_CTRL_CMD_TYPE_WR, - MAX_IGU_CTRL_CMD -}; - - -/* - * Control register for the IGU command register - */ -struct igu_ctrl_reg { - u32 ctrl_data; -#define IGU_CTRL_REG_ADDRESS (0xFFF<<0) -#define IGU_CTRL_REG_ADDRESS_SHIFT 0 -#define IGU_CTRL_REG_FID (0x7F<<12) -#define IGU_CTRL_REG_FID_SHIFT 12 -#define IGU_CTRL_REG_RESERVED (0x1<<19) -#define IGU_CTRL_REG_RESERVED_SHIFT 19 -#define IGU_CTRL_REG_TYPE (0x1<<20) -#define IGU_CTRL_REG_TYPE_SHIFT 20 -#define IGU_CTRL_REG_UNUSED (0x7FF<<21) -#define IGU_CTRL_REG_UNUSED_SHIFT 21 -}; - - -/* - * Igu interrupt command - */ -enum igu_int_cmd { - IGU_INT_ENABLE, - IGU_INT_DISABLE, - IGU_INT_NOP, - IGU_INT_NOP2, - MAX_IGU_INT_CMD -}; - - -/* - * Igu segments - */ -enum igu_seg_access { - IGU_SEG_ACCESS_NORM, - IGU_SEG_ACCESS_DEF, - IGU_SEG_ACCESS_ATTN, - MAX_IGU_SEG_ACCESS -}; - - -/* - * Parser parsing flags field - */ -struct parsing_flags { - __le16 flags; -#define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE (0x1<<0) -#define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE_SHIFT 0 -#define PARSING_FLAGS_VLAN (0x1<<1) -#define PARSING_FLAGS_VLAN_SHIFT 1 -#define PARSING_FLAGS_EXTRA_VLAN (0x1<<2) -#define PARSING_FLAGS_EXTRA_VLAN_SHIFT 2 -#define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL (0x3<<3) -#define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT 3 -#define PARSING_FLAGS_IP_OPTIONS (0x1<<5) -#define PARSING_FLAGS_IP_OPTIONS_SHIFT 5 -#define PARSING_FLAGS_FRAGMENTATION_STATUS (0x1<<6) -#define PARSING_FLAGS_FRAGMENTATION_STATUS_SHIFT 6 -#define PARSING_FLAGS_OVER_IP_PROTOCOL (0x3<<7) -#define PARSING_FLAGS_OVER_IP_PROTOCOL_SHIFT 7 -#define PARSING_FLAGS_PURE_ACK_INDICATION (0x1<<9) -#define PARSING_FLAGS_PURE_ACK_INDICATION_SHIFT 9 -#define PARSING_FLAGS_TCP_OPTIONS_EXIST (0x1<<10) -#define PARSING_FLAGS_TCP_OPTIONS_EXIST_SHIFT 10 -#define PARSING_FLAGS_TIME_STAMP_EXIST_FLAG (0x1<<11) -#define PARSING_FLAGS_TIME_STAMP_EXIST_FLAG_SHIFT 11 -#define PARSING_FLAGS_CONNECTION_MATCH (0x1<<12) -#define PARSING_FLAGS_CONNECTION_MATCH_SHIFT 12 -#define PARSING_FLAGS_LLC_SNAP (0x1<<13) -#define PARSING_FLAGS_LLC_SNAP_SHIFT 13 -#define PARSING_FLAGS_RESERVED0 (0x3<<14) -#define PARSING_FLAGS_RESERVED0_SHIFT 14 -}; - - -/* - * Parsing flags for TCP ACK type - */ -enum prs_flags_ack_type { - PRS_FLAG_PUREACK_PIGGY, - PRS_FLAG_PUREACK_PURE, - MAX_PRS_FLAGS_ACK_TYPE -}; - - -/* - * Parsing flags for Ethernet address type - */ -enum prs_flags_eth_addr_type { - PRS_FLAG_ETHTYPE_NON_UNICAST, - PRS_FLAG_ETHTYPE_UNICAST, - MAX_PRS_FLAGS_ETH_ADDR_TYPE -}; - - -/* - * Parsing flags for over-ethernet protocol - */ -enum prs_flags_over_eth { - PRS_FLAG_OVERETH_UNKNOWN, - PRS_FLAG_OVERETH_IPV4, - PRS_FLAG_OVERETH_IPV6, - PRS_FLAG_OVERETH_LLCSNAP_UNKNOWN, - MAX_PRS_FLAGS_OVER_ETH -}; - - -/* - * Parsing flags for over-IP protocol - */ -enum prs_flags_over_ip { - PRS_FLAG_OVERIP_UNKNOWN, - PRS_FLAG_OVERIP_TCP, - PRS_FLAG_OVERIP_UDP, - MAX_PRS_FLAGS_OVER_IP -}; - - -/* - * SDM operation gen command (generate aggregative interrupt) - */ -struct sdm_op_gen { - __le32 command; -#define SDM_OP_GEN_COMP_PARAM (0x1F<<0) -#define SDM_OP_GEN_COMP_PARAM_SHIFT 0 -#define SDM_OP_GEN_COMP_TYPE (0x7<<5) -#define SDM_OP_GEN_COMP_TYPE_SHIFT 5 -#define SDM_OP_GEN_AGG_VECT_IDX (0xFF<<8) -#define SDM_OP_GEN_AGG_VECT_IDX_SHIFT 8 -#define SDM_OP_GEN_AGG_VECT_IDX_VALID (0x1<<16) -#define SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT 16 -#define SDM_OP_GEN_RESERVED (0x7FFF<<17) -#define SDM_OP_GEN_RESERVED_SHIFT 17 -}; - - -/* - * Timers connection context - */ -struct timers_block_context { - u32 __reserved_0; - u32 __reserved_1; - u32 __reserved_2; - u32 flags; -#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0) -#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0 -#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2) -#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2 -#define __TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3) -#define __TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3 -}; - - -/* - * The eth aggregative context of Tstorm - */ -struct tstorm_eth_ag_context { - u32 __reserved0[14]; -}; - - -/* - * The eth aggregative context of Ustorm - */ -struct ustorm_eth_ag_context { - u32 __reserved0; -#if defined(__BIG_ENDIAN) - u8 cdu_usage; - u8 __reserved2; - u16 __reserved1; -#elif defined(__LITTLE_ENDIAN) - u16 __reserved1; - u8 __reserved2; - u8 cdu_usage; -#endif - u32 __reserved3[6]; -}; - - -/* - * The eth aggregative context of Xstorm - */ -struct xstorm_eth_ag_context { - u32 reserved0; -#if defined(__BIG_ENDIAN) - u8 cdu_reserved; - u8 reserved2; - u16 reserved1; -#elif defined(__LITTLE_ENDIAN) - u16 reserved1; - u8 reserved2; - u8 cdu_reserved; -#endif - u32 reserved3[30]; -}; - - -/* - * doorbell message sent to the chip - */ -struct doorbell { -#if defined(__BIG_ENDIAN) - u16 zero_fill2; - u8 zero_fill1; - struct doorbell_hdr header; -#elif defined(__LITTLE_ENDIAN) - struct doorbell_hdr header; - u8 zero_fill1; - u16 zero_fill2; -#endif -}; - - -/* - * doorbell message sent to the chip - */ -struct doorbell_set_prod { -#if defined(__BIG_ENDIAN) - u16 prod; - u8 zero_fill1; - struct doorbell_hdr header; -#elif defined(__LITTLE_ENDIAN) - struct doorbell_hdr header; - u8 zero_fill1; - u16 prod; -#endif -}; - - -struct regpair { - __le32 lo; - __le32 hi; -}; - - -/* - * Classify rule opcodes in E2/E3 - */ -enum classify_rule { - CLASSIFY_RULE_OPCODE_MAC, - CLASSIFY_RULE_OPCODE_VLAN, - CLASSIFY_RULE_OPCODE_PAIR, - MAX_CLASSIFY_RULE -}; - - -/* - * Classify rule types in E2/E3 - */ -enum classify_rule_action_type { - CLASSIFY_RULE_REMOVE, - CLASSIFY_RULE_ADD, - MAX_CLASSIFY_RULE_ACTION_TYPE -}; - - -/* - * client init ramrod data - */ -struct client_init_general_data { - u8 client_id; - u8 statistics_counter_id; - u8 statistics_en_flg; - u8 is_fcoe_flg; - u8 activate_flg; - u8 sp_client_id; - __le16 mtu; - u8 statistics_zero_flg; - u8 func_id; - u8 cos; - u8 traffic_type; - u32 reserved0; -}; - - -/* - * client init rx data - */ -struct client_init_rx_data { - u8 tpa_en; -#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4 (0x1<<0) -#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4_SHIFT 0 -#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6 (0x1<<1) -#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6_SHIFT 1 -#define CLIENT_INIT_RX_DATA_RESERVED5 (0x3F<<2) -#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 2 - u8 vmqueue_mode_en_flg; - u8 extra_data_over_sgl_en_flg; - u8 cache_line_alignment_log_size; - u8 enable_dynamic_hc; - u8 max_sges_for_packet; - u8 client_qzone_id; - u8 drop_ip_cs_err_flg; - u8 drop_tcp_cs_err_flg; - u8 drop_ttl0_flg; - u8 drop_udp_cs_err_flg; - u8 inner_vlan_removal_enable_flg; - u8 outer_vlan_removal_enable_flg; - u8 status_block_id; - u8 rx_sb_index_number; - u8 reserved0; - u8 max_tpa_queues; - u8 silent_vlan_removal_flg; - __le16 max_bytes_on_bd; - __le16 sge_buff_size; - u8 approx_mcast_engine_id; - u8 rss_engine_id; - struct regpair bd_page_base; - struct regpair sge_page_base; - struct regpair cqe_page_base; - u8 is_leading_rss; - u8 is_approx_mcast; - __le16 max_agg_size; - __le16 state; -#define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL (0x1<<0) -#define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL_SHIFT 0 -#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL (0x1<<1) -#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL_SHIFT 1 -#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED (0x1<<2) -#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED_SHIFT 2 -#define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL (0x1<<3) -#define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL_SHIFT 3 -#define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL (0x1<<4) -#define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL_SHIFT 4 -#define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL (0x1<<5) -#define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL_SHIFT 5 -#define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN (0x1<<6) -#define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN_SHIFT 6 -#define CLIENT_INIT_RX_DATA_RESERVED2 (0x1FF<<7) -#define CLIENT_INIT_RX_DATA_RESERVED2_SHIFT 7 - __le16 cqe_pause_thr_low; - __le16 cqe_pause_thr_high; - __le16 bd_pause_thr_low; - __le16 bd_pause_thr_high; - __le16 sge_pause_thr_low; - __le16 sge_pause_thr_high; - __le16 rx_cos_mask; - __le16 silent_vlan_value; - __le16 silent_vlan_mask; - __le32 reserved6[2]; -}; - -/* - * client init tx data - */ -struct client_init_tx_data { - u8 enforce_security_flg; - u8 tx_status_block_id; - u8 tx_sb_index_number; - u8 tss_leading_client_id; - u8 tx_switching_flg; - u8 anti_spoofing_flg; - __le16 default_vlan; - struct regpair tx_bd_page_base; - __le16 state; -#define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL (0x1<<0) -#define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL_SHIFT 0 -#define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL (0x1<<1) -#define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL_SHIFT 1 -#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL (0x1<<2) -#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL_SHIFT 2 -#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN (0x1<<3) -#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN_SHIFT 3 -#define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4) -#define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4 - u8 default_vlan_flg; - u8 reserved2; - __le32 reserved3; -}; - -/* - * client init ramrod data - */ -struct client_init_ramrod_data { - struct client_init_general_data general; - struct client_init_rx_data rx; - struct client_init_tx_data tx; -}; - - -/* - * client update ramrod data - */ -struct client_update_ramrod_data { - u8 client_id; - u8 func_id; - u8 inner_vlan_removal_enable_flg; - u8 inner_vlan_removal_change_flg; - u8 outer_vlan_removal_enable_flg; - u8 outer_vlan_removal_change_flg; - u8 anti_spoofing_enable_flg; - u8 anti_spoofing_change_flg; - u8 activate_flg; - u8 activate_change_flg; - __le16 default_vlan; - u8 default_vlan_enable_flg; - u8 default_vlan_change_flg; - __le16 silent_vlan_value; - __le16 silent_vlan_mask; - u8 silent_vlan_removal_flg; - u8 silent_vlan_change_flg; - __le32 echo; -}; - - -/* - * The eth storm context of Cstorm - */ -struct cstorm_eth_st_context { - u32 __reserved0[4]; -}; - - -struct double_regpair { - u32 regpair0_lo; - u32 regpair0_hi; - u32 regpair1_lo; - u32 regpair1_hi; -}; - - -/* - * Ethernet address typesm used in ethernet tx BDs - */ -enum eth_addr_type { - UNKNOWN_ADDRESS, - UNICAST_ADDRESS, - MULTICAST_ADDRESS, - BROADCAST_ADDRESS, - MAX_ETH_ADDR_TYPE -}; - - -/* - * - */ -struct eth_classify_cmd_header { - u8 cmd_general_data; -#define ETH_CLASSIFY_CMD_HEADER_RX_CMD (0x1<<0) -#define ETH_CLASSIFY_CMD_HEADER_RX_CMD_SHIFT 0 -#define ETH_CLASSIFY_CMD_HEADER_TX_CMD (0x1<<1) -#define ETH_CLASSIFY_CMD_HEADER_TX_CMD_SHIFT 1 -#define ETH_CLASSIFY_CMD_HEADER_OPCODE (0x3<<2) -#define ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT 2 -#define ETH_CLASSIFY_CMD_HEADER_IS_ADD (0x1<<4) -#define ETH_CLASSIFY_CMD_HEADER_IS_ADD_SHIFT 4 -#define ETH_CLASSIFY_CMD_HEADER_RESERVED0 (0x7<<5) -#define ETH_CLASSIFY_CMD_HEADER_RESERVED0_SHIFT 5 - u8 func_id; - u8 client_id; - u8 reserved1; -}; - - -/* - * header for eth classification config ramrod - */ -struct eth_classify_header { - u8 rule_cnt; - u8 reserved0; - __le16 reserved1; - __le32 echo; -}; - - -/* - * Command for adding/removing a MAC classification rule - */ -struct eth_classify_mac_cmd { - struct eth_classify_cmd_header header; - __le32 reserved0; - __le16 mac_lsb; - __le16 mac_mid; - __le16 mac_msb; - __le16 reserved1; -}; - - -/* - * Command for adding/removing a MAC-VLAN pair classification rule - */ -struct eth_classify_pair_cmd { - struct eth_classify_cmd_header header; - __le32 reserved0; - __le16 mac_lsb; - __le16 mac_mid; - __le16 mac_msb; - __le16 vlan; -}; - - -/* - * Command for adding/removing a VLAN classification rule - */ -struct eth_classify_vlan_cmd { - struct eth_classify_cmd_header header; - __le32 reserved0; - __le32 reserved1; - __le16 reserved2; - __le16 vlan; -}; - -/* - * union for eth classification rule - */ -union eth_classify_rule_cmd { - struct eth_classify_mac_cmd mac; - struct eth_classify_vlan_cmd vlan; - struct eth_classify_pair_cmd pair; -}; - -/* - * parameters for eth classification configuration ramrod - */ -struct eth_classify_rules_ramrod_data { - struct eth_classify_header header; - union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT]; -}; - - -/* - * The data contain client ID need to the ramrod - */ -struct eth_common_ramrod_data { - __le32 client_id; - __le32 reserved1; -}; - - -/* - * The eth storm context of Ustorm - */ -struct ustorm_eth_st_context { - u32 reserved0[52]; -}; - -/* - * The eth storm context of Tstorm - */ -struct tstorm_eth_st_context { - u32 __reserved0[28]; -}; - -/* - * The eth storm context of Xstorm - */ -struct xstorm_eth_st_context { - u32 reserved0[60]; -}; - -/* - * Ethernet connection context - */ -struct eth_context { - struct ustorm_eth_st_context ustorm_st_context; - struct tstorm_eth_st_context tstorm_st_context; - struct xstorm_eth_ag_context xstorm_ag_context; - struct tstorm_eth_ag_context tstorm_ag_context; - struct cstorm_eth_ag_context cstorm_ag_context; - struct ustorm_eth_ag_context ustorm_ag_context; - struct timers_block_context timers_context; - struct xstorm_eth_st_context xstorm_st_context; - struct cstorm_eth_st_context cstorm_st_context; -}; - - -/* - * union for sgl and raw data. - */ -union eth_sgl_or_raw_data { - __le16 sgl[8]; - u32 raw_data[4]; -}; - -/* - * eth FP end aggregation CQE parameters struct - */ -struct eth_end_agg_rx_cqe { - u8 type_error_flags; -#define ETH_END_AGG_RX_CQE_TYPE (0x3<<0) -#define ETH_END_AGG_RX_CQE_TYPE_SHIFT 0 -#define ETH_END_AGG_RX_CQE_SGL_RAW_SEL (0x1<<2) -#define ETH_END_AGG_RX_CQE_SGL_RAW_SEL_SHIFT 2 -#define ETH_END_AGG_RX_CQE_RESERVED0 (0x1F<<3) -#define ETH_END_AGG_RX_CQE_RESERVED0_SHIFT 3 - u8 reserved1; - u8 queue_index; - u8 reserved2; - __le32 timestamp_delta; - __le16 num_of_coalesced_segs; - __le16 pkt_len; - u8 pure_ack_count; - u8 reserved3; - __le16 reserved4; - union eth_sgl_or_raw_data sgl_or_raw_data; - __le32 reserved5[8]; -}; - - -/* - * regular eth FP CQE parameters struct - */ -struct eth_fast_path_rx_cqe { - u8 type_error_flags; -#define ETH_FAST_PATH_RX_CQE_TYPE (0x3<<0) -#define ETH_FAST_PATH_RX_CQE_TYPE_SHIFT 0 -#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL (0x1<<2) -#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT 2 -#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG (0x1<<3) -#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG_SHIFT 3 -#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG (0x1<<4) -#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 4 -#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<5) -#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 5 -#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x3<<6) -#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 6 - u8 status_flags; -#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0) -#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0 -#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG (0x1<<3) -#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG_SHIFT 3 -#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG (0x1<<4) -#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG_SHIFT 4 -#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG (0x1<<5) -#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG_SHIFT 5 -#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG (0x1<<6) -#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG_SHIFT 6 -#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG (0x1<<7) -#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG_SHIFT 7 - u8 queue_index; - u8 placement_offset; - __le32 rss_hash_result; - __le16 vlan_tag; - __le16 pkt_len; - __le16 len_on_bd; - struct parsing_flags pars_flags; - union eth_sgl_or_raw_data sgl_or_raw_data; - __le32 reserved1[8]; -}; - - -/* - * Command for setting classification flags for a client - */ -struct eth_filter_rules_cmd { - u8 cmd_general_data; -#define ETH_FILTER_RULES_CMD_RX_CMD (0x1<<0) -#define ETH_FILTER_RULES_CMD_RX_CMD_SHIFT 0 -#define ETH_FILTER_RULES_CMD_TX_CMD (0x1<<1) -#define ETH_FILTER_RULES_CMD_TX_CMD_SHIFT 1 -#define ETH_FILTER_RULES_CMD_RESERVED0 (0x3F<<2) -#define ETH_FILTER_RULES_CMD_RESERVED0_SHIFT 2 - u8 func_id; - u8 client_id; - u8 reserved1; - __le16 state; -#define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL (0x1<<0) -#define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL_SHIFT 0 -#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL (0x1<<1) -#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL_SHIFT 1 -#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED (0x1<<2) -#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED_SHIFT 2 -#define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL (0x1<<3) -#define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL_SHIFT 3 -#define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL (0x1<<4) -#define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL_SHIFT 4 -#define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL (0x1<<5) -#define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL_SHIFT 5 -#define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN (0x1<<6) -#define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN_SHIFT 6 -#define ETH_FILTER_RULES_CMD_RESERVED2 (0x1FF<<7) -#define ETH_FILTER_RULES_CMD_RESERVED2_SHIFT 7 - __le16 reserved3; - struct regpair reserved4; -}; - - -/* - * parameters for eth classification filters ramrod - */ -struct eth_filter_rules_ramrod_data { - struct eth_classify_header header; - struct eth_filter_rules_cmd rules[FILTER_RULES_COUNT]; -}; - - -/* - * parameters for eth classification configuration ramrod - */ -struct eth_general_rules_ramrod_data { - struct eth_classify_header header; - union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT]; -}; - - -/* - * The data for Halt ramrod - */ -struct eth_halt_ramrod_data { - __le32 client_id; - __le32 reserved0; -}; - - -/* - * Command for setting multicast classification for a client - */ -struct eth_multicast_rules_cmd { - u8 cmd_general_data; -#define ETH_MULTICAST_RULES_CMD_RX_CMD (0x1<<0) -#define ETH_MULTICAST_RULES_CMD_RX_CMD_SHIFT 0 -#define ETH_MULTICAST_RULES_CMD_TX_CMD (0x1<<1) -#define ETH_MULTICAST_RULES_CMD_TX_CMD_SHIFT 1 -#define ETH_MULTICAST_RULES_CMD_IS_ADD (0x1<<2) -#define ETH_MULTICAST_RULES_CMD_IS_ADD_SHIFT 2 -#define ETH_MULTICAST_RULES_CMD_RESERVED0 (0x1F<<3) -#define ETH_MULTICAST_RULES_CMD_RESERVED0_SHIFT 3 - u8 func_id; - u8 bin_id; - u8 engine_id; - __le32 reserved2; - struct regpair reserved3; -}; - - -/* - * parameters for multicast classification ramrod - */ -struct eth_multicast_rules_ramrod_data { - struct eth_classify_header header; - struct eth_multicast_rules_cmd rules[MULTICAST_RULES_COUNT]; -}; - - -/* - * Place holder for ramrods protocol specific data - */ -struct ramrod_data { - __le32 data_lo; - __le32 data_hi; -}; - -/* - * union for ramrod data for Ethernet protocol (CQE) (force size of 16 bits) - */ -union eth_ramrod_data { - struct ramrod_data general; -}; - - -/* - * RSS toeplitz hash type, as reported in CQE - */ -enum eth_rss_hash_type { - DEFAULT_HASH_TYPE, - IPV4_HASH_TYPE, - TCP_IPV4_HASH_TYPE, - IPV6_HASH_TYPE, - TCP_IPV6_HASH_TYPE, - VLAN_PRI_HASH_TYPE, - E1HOV_PRI_HASH_TYPE, - DSCP_HASH_TYPE, - MAX_ETH_RSS_HASH_TYPE -}; - - -/* - * Ethernet RSS mode - */ -enum eth_rss_mode { - ETH_RSS_MODE_DISABLED, - ETH_RSS_MODE_REGULAR, - ETH_RSS_MODE_VLAN_PRI, - ETH_RSS_MODE_E1HOV_PRI, - ETH_RSS_MODE_IP_DSCP, - MAX_ETH_RSS_MODE -}; - - -/* - * parameters for RSS update ramrod (E2) - */ -struct eth_rss_update_ramrod_data { - u8 rss_engine_id; - u8 capabilities; -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY (0x1<<0) -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY_SHIFT 0 -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY (0x1<<1) -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY_SHIFT 1 -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY (0x1<<2) -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY_SHIFT 2 -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY (0x1<<3) -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY_SHIFT 3 -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY (0x1<<4) -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4 -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5) -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5 -#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<6) -#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 6 -#define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0 (0x1<<7) -#define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0_SHIFT 7 - u8 rss_result_mask; - u8 rss_mode; - __le32 __reserved2; - u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE]; - __le32 rss_key[T_ETH_RSS_KEY]; - __le32 echo; - __le32 reserved3; -}; - - -/* - * The eth Rx Buffer Descriptor - */ -struct eth_rx_bd { - __le32 addr_lo; - __le32 addr_hi; -}; - - -/* - * Eth Rx Cqe structure- general structure for ramrods - */ -struct common_ramrod_eth_rx_cqe { - u8 ramrod_type; -#define COMMON_RAMROD_ETH_RX_CQE_TYPE (0x3<<0) -#define COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT 0 -#define COMMON_RAMROD_ETH_RX_CQE_ERROR (0x1<<2) -#define COMMON_RAMROD_ETH_RX_CQE_ERROR_SHIFT 2 -#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x1F<<3) -#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 3 - u8 conn_type; - __le16 reserved1; - __le32 conn_and_cmd_data; -#define COMMON_RAMROD_ETH_RX_CQE_CID (0xFFFFFF<<0) -#define COMMON_RAMROD_ETH_RX_CQE_CID_SHIFT 0 -#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID (0xFF<<24) -#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT 24 - struct ramrod_data protocol_data; - __le32 echo; - __le32 reserved2[11]; -}; - -/* - * Rx Last CQE in page (in ETH) - */ -struct eth_rx_cqe_next_page { - __le32 addr_lo; - __le32 addr_hi; - __le32 reserved[14]; -}; - -/* - * union for all eth rx cqe types (fix their sizes) - */ -union eth_rx_cqe { - struct eth_fast_path_rx_cqe fast_path_cqe; - struct common_ramrod_eth_rx_cqe ramrod_cqe; - struct eth_rx_cqe_next_page next_page_cqe; - struct eth_end_agg_rx_cqe end_agg_cqe; -}; - - -/* - * Values for RX ETH CQE type field - */ -enum eth_rx_cqe_type { - RX_ETH_CQE_TYPE_ETH_FASTPATH, - RX_ETH_CQE_TYPE_ETH_RAMROD, - RX_ETH_CQE_TYPE_ETH_START_AGG, - RX_ETH_CQE_TYPE_ETH_STOP_AGG, - MAX_ETH_RX_CQE_TYPE -}; - - -/* - * Type of SGL/Raw field in ETH RX fast path CQE - */ -enum eth_rx_fp_sel { - ETH_FP_CQE_REGULAR, - ETH_FP_CQE_RAW, - MAX_ETH_RX_FP_SEL -}; - - -/* - * The eth Rx SGE Descriptor - */ -struct eth_rx_sge { - __le32 addr_lo; - __le32 addr_hi; -}; - - -/* - * common data for all protocols - */ -struct spe_hdr { - __le32 conn_and_cmd_data; -#define SPE_HDR_CID (0xFFFFFF<<0) -#define SPE_HDR_CID_SHIFT 0 -#define SPE_HDR_CMD_ID (0xFF<<24) -#define SPE_HDR_CMD_ID_SHIFT 24 - __le16 type; -#define SPE_HDR_CONN_TYPE (0xFF<<0) -#define SPE_HDR_CONN_TYPE_SHIFT 0 -#define SPE_HDR_FUNCTION_ID (0xFF<<8) -#define SPE_HDR_FUNCTION_ID_SHIFT 8 - __le16 reserved1; -}; - -/* - * specific data for ethernet slow path element - */ -union eth_specific_data { - u8 protocol_data[8]; - struct regpair client_update_ramrod_data; - struct regpair client_init_ramrod_init_data; - struct eth_halt_ramrod_data halt_ramrod_data; - struct regpair update_data_addr; - struct eth_common_ramrod_data common_ramrod_data; - struct regpair classify_cfg_addr; - struct regpair filter_cfg_addr; - struct regpair mcast_cfg_addr; -}; - -/* - * Ethernet slow path element - */ -struct eth_spe { - struct spe_hdr hdr; - union eth_specific_data data; -}; - - -/* - * Ethernet command ID for slow path elements - */ -enum eth_spqe_cmd_id { - RAMROD_CMD_ID_ETH_UNUSED, - RAMROD_CMD_ID_ETH_CLIENT_SETUP, - RAMROD_CMD_ID_ETH_HALT, - RAMROD_CMD_ID_ETH_FORWARD_SETUP, - RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP, - RAMROD_CMD_ID_ETH_CLIENT_UPDATE, - RAMROD_CMD_ID_ETH_EMPTY, - RAMROD_CMD_ID_ETH_TERMINATE, - RAMROD_CMD_ID_ETH_TPA_UPDATE, - RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES, - RAMROD_CMD_ID_ETH_FILTER_RULES, - RAMROD_CMD_ID_ETH_MULTICAST_RULES, - RAMROD_CMD_ID_ETH_RSS_UPDATE, - RAMROD_CMD_ID_ETH_SET_MAC, - MAX_ETH_SPQE_CMD_ID -}; - - -/* - * eth tpa update command - */ -enum eth_tpa_update_command { - TPA_UPDATE_NONE_COMMAND, - TPA_UPDATE_ENABLE_COMMAND, - TPA_UPDATE_DISABLE_COMMAND, - MAX_ETH_TPA_UPDATE_COMMAND -}; - - -/* - * Tx regular BD structure - */ -struct eth_tx_bd { - __le32 addr_lo; - __le32 addr_hi; - __le16 total_pkt_bytes; - __le16 nbytes; - u8 reserved[4]; -}; - - -/* - * structure for easy accessibility to assembler - */ -struct eth_tx_bd_flags { - u8 as_bitfield; -#define ETH_TX_BD_FLAGS_IP_CSUM (0x1<<0) -#define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 0 -#define ETH_TX_BD_FLAGS_L4_CSUM (0x1<<1) -#define ETH_TX_BD_FLAGS_L4_CSUM_SHIFT 1 -#define ETH_TX_BD_FLAGS_VLAN_MODE (0x3<<2) -#define ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT 2 -#define ETH_TX_BD_FLAGS_START_BD (0x1<<4) -#define ETH_TX_BD_FLAGS_START_BD_SHIFT 4 -#define ETH_TX_BD_FLAGS_IS_UDP (0x1<<5) -#define ETH_TX_BD_FLAGS_IS_UDP_SHIFT 5 -#define ETH_TX_BD_FLAGS_SW_LSO (0x1<<6) -#define ETH_TX_BD_FLAGS_SW_LSO_SHIFT 6 -#define ETH_TX_BD_FLAGS_IPV6 (0x1<<7) -#define ETH_TX_BD_FLAGS_IPV6_SHIFT 7 -}; - -/* - * The eth Tx Buffer Descriptor - */ -struct eth_tx_start_bd { - __le32 addr_lo; - __le32 addr_hi; - __le16 nbd; - __le16 nbytes; - __le16 vlan_or_ethertype; - struct eth_tx_bd_flags bd_flags; - u8 general_data; -#define ETH_TX_START_BD_HDR_NBDS (0xF<<0) -#define ETH_TX_START_BD_HDR_NBDS_SHIFT 0 -#define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4) -#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4 -#define ETH_TX_START_BD_RESREVED (0x1<<5) -#define ETH_TX_START_BD_RESREVED_SHIFT 5 -#define ETH_TX_START_BD_ETH_ADDR_TYPE (0x3<<6) -#define ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT 6 -}; - -/* - * Tx parsing BD structure for ETH E1/E1h - */ -struct eth_tx_parse_bd_e1x { - u8 global_data; -#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0) -#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0 -#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x1<<4) -#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 4 -#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<5) -#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 5 -#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<6) -#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 6 -#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<7) -#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 7 - u8 tcp_flags; -#define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0) -#define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0 -#define ETH_TX_PARSE_BD_E1X_SYN_FLG (0x1<<1) -#define ETH_TX_PARSE_BD_E1X_SYN_FLG_SHIFT 1 -#define ETH_TX_PARSE_BD_E1X_RST_FLG (0x1<<2) -#define ETH_TX_PARSE_BD_E1X_RST_FLG_SHIFT 2 -#define ETH_TX_PARSE_BD_E1X_PSH_FLG (0x1<<3) -#define ETH_TX_PARSE_BD_E1X_PSH_FLG_SHIFT 3 -#define ETH_TX_PARSE_BD_E1X_ACK_FLG (0x1<<4) -#define ETH_TX_PARSE_BD_E1X_ACK_FLG_SHIFT 4 -#define ETH_TX_PARSE_BD_E1X_URG_FLG (0x1<<5) -#define ETH_TX_PARSE_BD_E1X_URG_FLG_SHIFT 5 -#define ETH_TX_PARSE_BD_E1X_ECE_FLG (0x1<<6) -#define ETH_TX_PARSE_BD_E1X_ECE_FLG_SHIFT 6 -#define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7) -#define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7 - u8 ip_hlen_w; - s8 reserved; - __le16 total_hlen_w; - __le16 tcp_pseudo_csum; - __le16 lso_mss; - __le16 ip_id; - __le32 tcp_send_seq; -}; - -/* - * Tx parsing BD structure for ETH E2 - */ -struct eth_tx_parse_bd_e2 { - __le16 dst_mac_addr_lo; - __le16 dst_mac_addr_mid; - __le16 dst_mac_addr_hi; - __le16 src_mac_addr_lo; - __le16 src_mac_addr_mid; - __le16 src_mac_addr_hi; - __le32 parsing_data; -#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x1FFF<<0) -#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0 -#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<13) -#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 13 -#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<17) -#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 17 -#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<31) -#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 31 -}; - -/* - * The last BD in the BD memory will hold a pointer to the next BD memory - */ -struct eth_tx_next_bd { - __le32 addr_lo; - __le32 addr_hi; - u8 reserved[8]; -}; - -/* - * union for 4 Bd types - */ -union eth_tx_bd_types { - struct eth_tx_start_bd start_bd; - struct eth_tx_bd reg_bd; - struct eth_tx_parse_bd_e1x parse_bd_e1x; - struct eth_tx_parse_bd_e2 parse_bd_e2; - struct eth_tx_next_bd next_bd; -}; - -/* - * array of 13 bds as appears in the eth xstorm context - */ -struct eth_tx_bds_array { - union eth_tx_bd_types bds[13]; -}; - - -/* - * VLAN mode on TX BDs - */ -enum eth_tx_vlan_type { - X_ETH_NO_VLAN, - X_ETH_OUTBAND_VLAN, - X_ETH_INBAND_VLAN, - X_ETH_FW_ADDED_VLAN, - MAX_ETH_TX_VLAN_TYPE -}; - - -/* - * Ethernet VLAN filtering mode in E1x - */ -enum eth_vlan_filter_mode { - ETH_VLAN_FILTER_ANY_VLAN, - ETH_VLAN_FILTER_SPECIFIC_VLAN, - ETH_VLAN_FILTER_CLASSIFY, - MAX_ETH_VLAN_FILTER_MODE -}; - - -/* - * MAC filtering configuration command header - */ -struct mac_configuration_hdr { - u8 length; - u8 offset; - __le16 client_id; - __le32 echo; -}; - -/* - * MAC address in list for ramrod - */ -struct mac_configuration_entry { - __le16 lsb_mac_addr; - __le16 middle_mac_addr; - __le16 msb_mac_addr; - __le16 vlan_id; - u8 pf_id; - u8 flags; -#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE (0x1<<0) -#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE_SHIFT 0 -#define MAC_CONFIGURATION_ENTRY_RDMA_MAC (0x1<<1) -#define MAC_CONFIGURATION_ENTRY_RDMA_MAC_SHIFT 1 -#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE (0x3<<2) -#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE_SHIFT 2 -#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL (0x1<<4) -#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL_SHIFT 4 -#define MAC_CONFIGURATION_ENTRY_BROADCAST (0x1<<5) -#define MAC_CONFIGURATION_ENTRY_BROADCAST_SHIFT 5 -#define MAC_CONFIGURATION_ENTRY_RESERVED1 (0x3<<6) -#define MAC_CONFIGURATION_ENTRY_RESERVED1_SHIFT 6 - __le16 reserved0; - __le32 clients_bit_vector; -}; - -/* - * MAC filtering configuration command - */ -struct mac_configuration_cmd { - struct mac_configuration_hdr hdr; - struct mac_configuration_entry config_table[64]; -}; - - -/* - * Set-MAC command type (in E1x) - */ -enum set_mac_action_type { - T_ETH_MAC_COMMAND_INVALIDATE, - T_ETH_MAC_COMMAND_SET, - MAX_SET_MAC_ACTION_TYPE -}; - - -/* - * tpa update ramrod data - */ -struct tpa_update_ramrod_data { - u8 update_ipv4; - u8 update_ipv6; - u8 client_id; - u8 max_tpa_queues; - u8 max_sges_for_packet; - u8 complete_on_both_clients; - __le16 reserved1; - __le16 sge_buff_size; - __le16 max_agg_size; - __le32 sge_page_base_lo; - __le32 sge_page_base_hi; - __le16 sge_pause_thr_low; - __le16 sge_pause_thr_high; -}; - - -/* - * approximate-match multicast filtering for E1H per function in Tstorm - */ -struct tstorm_eth_approximate_match_multicast_filtering { - u32 mcast_add_hash_bit_array[8]; -}; - - -/* - * Common configuration parameters per function in Tstorm - */ -struct tstorm_eth_function_common_config { - __le16 config_flags; -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0) -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0 -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1) -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1 -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2) -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2 -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3) -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3 -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4) -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4 -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<7) -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 7 -#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0xFF<<8) -#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 8 - u8 rss_result_mask; - u8 reserved1; - __le16 vlan_id[2]; -}; - - -/* - * MAC filtering configuration parameters per port in Tstorm - */ -struct tstorm_eth_mac_filter_config { - __le32 ucast_drop_all; - __le32 ucast_accept_all; - __le32 mcast_drop_all; - __le32 mcast_accept_all; - __le32 bcast_accept_all; - __le32 vlan_filter[2]; - __le32 unmatched_unicast; -}; - - -/* - * tx only queue init ramrod data - */ -struct tx_queue_init_ramrod_data { - struct client_init_general_data general; - struct client_init_tx_data tx; -}; - - -/* - * Three RX producers for ETH - */ -struct ustorm_eth_rx_producers { -#if defined(__BIG_ENDIAN) - u16 bd_prod; - u16 cqe_prod; -#elif defined(__LITTLE_ENDIAN) - u16 cqe_prod; - u16 bd_prod; -#endif -#if defined(__BIG_ENDIAN) - u16 reserved; - u16 sge_prod; -#elif defined(__LITTLE_ENDIAN) - u16 sge_prod; - u16 reserved; -#endif -}; - - -/* - * cfc delete event data - */ -struct cfc_del_event_data { - u32 cid; - u32 reserved0; - u32 reserved1; -}; - - -/* - * per-port SAFC demo variables - */ -struct cmng_flags_per_port { - u32 cmng_enables; -#define CMNG_FLAGS_PER_PORT_FAIRNESS_VN (0x1<<0) -#define CMNG_FLAGS_PER_PORT_FAIRNESS_VN_SHIFT 0 -#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN (0x1<<1) -#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN_SHIFT 1 -#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS (0x1<<2) -#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_SHIFT 2 -#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE (0x1<<3) -#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE_SHIFT 3 -#define __CMNG_FLAGS_PER_PORT_RESERVED0 (0xFFFFFFF<<4) -#define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 4 - u32 __reserved1; -}; - - -/* - * per-port rate shaping variables - */ -struct rate_shaping_vars_per_port { - u32 rs_periodic_timeout; - u32 rs_threshold; -}; - -/* - * per-port fairness variables - */ -struct fairness_vars_per_port { - u32 upper_bound; - u32 fair_threshold; - u32 fairness_timeout; - u32 reserved0; -}; - -/* - * per-port SAFC variables - */ -struct safc_struct_per_port { -#if defined(__BIG_ENDIAN) - u16 __reserved1; - u8 __reserved0; - u8 safc_timeout_usec; -#elif defined(__LITTLE_ENDIAN) - u8 safc_timeout_usec; - u8 __reserved0; - u16 __reserved1; -#endif - u8 cos_to_traffic_types[MAX_COS_NUMBER]; - u16 cos_to_pause_mask[NUM_OF_SAFC_BITS]; -}; - -/* - * Per-port congestion management variables - */ -struct cmng_struct_per_port { - struct rate_shaping_vars_per_port rs_vars; - struct fairness_vars_per_port fair_vars; - struct safc_struct_per_port safc_vars; - struct cmng_flags_per_port flags; -}; - - -/* - * Protocol-common command ID for slow path elements - */ -enum common_spqe_cmd_id { - RAMROD_CMD_ID_COMMON_UNUSED, - RAMROD_CMD_ID_COMMON_FUNCTION_START, - RAMROD_CMD_ID_COMMON_FUNCTION_STOP, - RAMROD_CMD_ID_COMMON_CFC_DEL, - RAMROD_CMD_ID_COMMON_CFC_DEL_WB, - RAMROD_CMD_ID_COMMON_STAT_QUERY, - RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, - RAMROD_CMD_ID_COMMON_START_TRAFFIC, - RAMROD_CMD_ID_COMMON_RESERVED1, - RAMROD_CMD_ID_COMMON_RESERVED2, - MAX_COMMON_SPQE_CMD_ID -}; - - -/* - * Per-protocol connection types - */ -enum connection_type { - ETH_CONNECTION_TYPE, - TOE_CONNECTION_TYPE, - RDMA_CONNECTION_TYPE, - ISCSI_CONNECTION_TYPE, - FCOE_CONNECTION_TYPE, - RESERVED_CONNECTION_TYPE_0, - RESERVED_CONNECTION_TYPE_1, - RESERVED_CONNECTION_TYPE_2, - NONE_CONNECTION_TYPE, - MAX_CONNECTION_TYPE -}; - - -/* - * Cos modes - */ -enum cos_mode { - OVERRIDE_COS, - STATIC_COS, - FW_WRR, - MAX_COS_MODE -}; - - -/* - * Dynamic HC counters set by the driver - */ -struct hc_dynamic_drv_counter { - u32 val[HC_SB_MAX_DYNAMIC_INDICES]; -}; - -/* - * zone A per-queue data - */ -struct cstorm_queue_zone_data { - struct hc_dynamic_drv_counter hc_dyn_drv_cnt; - struct regpair reserved[2]; -}; - - -/* - * Vf-PF channel data in cstorm ram (non-triggered zone) - */ -struct vf_pf_channel_zone_data { - u32 msg_addr_lo; - u32 msg_addr_hi; -}; - -/* - * zone for VF non-triggered data - */ -struct non_trigger_vf_zone { - struct vf_pf_channel_zone_data vf_pf_channel; -}; - -/* - * Vf-PF channel trigger zone in cstorm ram - */ -struct vf_pf_channel_zone_trigger { - u8 addr_valid; -}; - -/* - * zone that triggers the in-bound interrupt - */ -struct trigger_vf_zone { -#if defined(__BIG_ENDIAN) - u16 reserved1; - u8 reserved0; - struct vf_pf_channel_zone_trigger vf_pf_channel; -#elif defined(__LITTLE_ENDIAN) - struct vf_pf_channel_zone_trigger vf_pf_channel; - u8 reserved0; - u16 reserved1; -#endif - u32 reserved2; -}; - -/* - * zone B per-VF data - */ -struct cstorm_vf_zone_data { - struct non_trigger_vf_zone non_trigger; - struct trigger_vf_zone trigger; -}; - - -/* - * Dynamic host coalescing init parameters, per state machine - */ -struct dynamic_hc_sm_config { - u32 threshold[3]; - u8 shift_per_protocol[HC_SB_MAX_DYNAMIC_INDICES]; - u8 hc_timeout0[HC_SB_MAX_DYNAMIC_INDICES]; - u8 hc_timeout1[HC_SB_MAX_DYNAMIC_INDICES]; - u8 hc_timeout2[HC_SB_MAX_DYNAMIC_INDICES]; - u8 hc_timeout3[HC_SB_MAX_DYNAMIC_INDICES]; -}; - -/* - * Dynamic host coalescing init parameters - */ -struct dynamic_hc_config { - struct dynamic_hc_sm_config sm_config[HC_SB_MAX_SM]; -}; - - -struct e2_integ_data { -#if defined(__BIG_ENDIAN) - u8 flags; -#define E2_INTEG_DATA_TESTING_EN (0x1<<0) -#define E2_INTEG_DATA_TESTING_EN_SHIFT 0 -#define E2_INTEG_DATA_LB_TX (0x1<<1) -#define E2_INTEG_DATA_LB_TX_SHIFT 1 -#define E2_INTEG_DATA_COS_TX (0x1<<2) -#define E2_INTEG_DATA_COS_TX_SHIFT 2 -#define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3) -#define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3 -#define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4) -#define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4 -#define E2_INTEG_DATA_RESERVED (0x7<<5) -#define E2_INTEG_DATA_RESERVED_SHIFT 5 - u8 cos; - u8 voq; - u8 pbf_queue; -#elif defined(__LITTLE_ENDIAN) - u8 pbf_queue; - u8 voq; - u8 cos; - u8 flags; -#define E2_INTEG_DATA_TESTING_EN (0x1<<0) -#define E2_INTEG_DATA_TESTING_EN_SHIFT 0 -#define E2_INTEG_DATA_LB_TX (0x1<<1) -#define E2_INTEG_DATA_LB_TX_SHIFT 1 -#define E2_INTEG_DATA_COS_TX (0x1<<2) -#define E2_INTEG_DATA_COS_TX_SHIFT 2 -#define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3) -#define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3 -#define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4) -#define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4 -#define E2_INTEG_DATA_RESERVED (0x7<<5) -#define E2_INTEG_DATA_RESERVED_SHIFT 5 -#endif -#if defined(__BIG_ENDIAN) - u16 reserved3; - u8 reserved2; - u8 ramEn; -#elif defined(__LITTLE_ENDIAN) - u8 ramEn; - u8 reserved2; - u16 reserved3; -#endif -}; - - -/* - * set mac event data - */ -struct eth_event_data { - u32 echo; - u32 reserved0; - u32 reserved1; -}; - - -/* - * pf-vf event data - */ -struct vf_pf_event_data { - u8 vf_id; - u8 reserved0; - u16 reserved1; - u32 msg_addr_lo; - u32 msg_addr_hi; -}; - -/* - * VF FLR event data - */ -struct vf_flr_event_data { - u8 vf_id; - u8 reserved0; - u16 reserved1; - u32 reserved2; - u32 reserved3; -}; - -/* - * malicious VF event data - */ -struct malicious_vf_event_data { - u8 vf_id; - u8 reserved0; - u16 reserved1; - u32 reserved2; - u32 reserved3; -}; - -/* - * union for all event ring message types - */ -union event_data { - struct vf_pf_event_data vf_pf_event; - struct eth_event_data eth_event; - struct cfc_del_event_data cfc_del_event; - struct vf_flr_event_data vf_flr_event; - struct malicious_vf_event_data malicious_vf_event; -}; - - -/* - * per PF event ring data - */ -struct event_ring_data { - struct regpair base_addr; -#if defined(__BIG_ENDIAN) - u8 index_id; - u8 sb_id; - u16 producer; -#elif defined(__LITTLE_ENDIAN) - u16 producer; - u8 sb_id; - u8 index_id; -#endif - u32 reserved0; -}; - - -/* - * event ring message element (each element is 128 bits) - */ -struct event_ring_msg { - u8 opcode; - u8 error; - u16 reserved1; - union event_data data; -}; - -/* - * event ring next page element (128 bits) - */ -struct event_ring_next { - struct regpair addr; - u32 reserved[2]; -}; - -/* - * union for event ring element types (each element is 128 bits) - */ -union event_ring_elem { - struct event_ring_msg message; - struct event_ring_next next_page; -}; - - -/* - * Common event ring opcodes - */ -enum event_ring_opcode { - EVENT_RING_OPCODE_VF_PF_CHANNEL, - EVENT_RING_OPCODE_FUNCTION_START, - EVENT_RING_OPCODE_FUNCTION_STOP, - EVENT_RING_OPCODE_CFC_DEL, - EVENT_RING_OPCODE_CFC_DEL_WB, - EVENT_RING_OPCODE_STAT_QUERY, - EVENT_RING_OPCODE_STOP_TRAFFIC, - EVENT_RING_OPCODE_START_TRAFFIC, - EVENT_RING_OPCODE_VF_FLR, - EVENT_RING_OPCODE_MALICIOUS_VF, - EVENT_RING_OPCODE_FORWARD_SETUP, - EVENT_RING_OPCODE_RSS_UPDATE_RULES, - EVENT_RING_OPCODE_RESERVED1, - EVENT_RING_OPCODE_RESERVED2, - EVENT_RING_OPCODE_SET_MAC, - EVENT_RING_OPCODE_CLASSIFICATION_RULES, - EVENT_RING_OPCODE_FILTERS_RULES, - EVENT_RING_OPCODE_MULTICAST_RULES, - MAX_EVENT_RING_OPCODE -}; - - -/* - * Modes for fairness algorithm - */ -enum fairness_mode { - FAIRNESS_COS_WRR_MODE, - FAIRNESS_COS_ETS_MODE, - MAX_FAIRNESS_MODE -}; - - -/* - * per-vnic fairness variables - */ -struct fairness_vars_per_vn { - u32 cos_credit_delta[MAX_COS_NUMBER]; - u32 vn_credit_delta; - u32 __reserved0; -}; - - -/* - * Priority and cos - */ -struct priority_cos { - u8 priority; - u8 cos; - __le16 reserved1; -}; - -/* - * The data for flow control configuration - */ -struct flow_control_configuration { - struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES]; - u8 dcb_enabled; - u8 dcb_version; - u8 dont_add_pri_0_en; - u8 reserved1; - __le32 reserved2; -}; - - -/* - * - */ -struct function_start_data { - __le16 function_mode; - __le16 sd_vlan_tag; - u16 reserved; - u8 path_id; - u8 network_cos_mode; -}; - - -/* - * FW version stored in the Xstorm RAM - */ -struct fw_version { -#if defined(__BIG_ENDIAN) - u8 engineering; - u8 revision; - u8 minor; - u8 major; -#elif defined(__LITTLE_ENDIAN) - u8 major; - u8 minor; - u8 revision; - u8 engineering; -#endif - u32 flags; -#define FW_VERSION_OPTIMIZED (0x1<<0) -#define FW_VERSION_OPTIMIZED_SHIFT 0 -#define FW_VERSION_BIG_ENDIEN (0x1<<1) -#define FW_VERSION_BIG_ENDIEN_SHIFT 1 -#define FW_VERSION_CHIP_VERSION (0x3<<2) -#define FW_VERSION_CHIP_VERSION_SHIFT 2 -#define __FW_VERSION_RESERVED (0xFFFFFFF<<4) -#define __FW_VERSION_RESERVED_SHIFT 4 -}; - - -/* - * Dynamic Host-Coalescing - Driver(host) counters - */ -struct hc_dynamic_sb_drv_counters { - u32 dynamic_hc_drv_counter[HC_SB_MAX_DYNAMIC_INDICES]; -}; - - -/* - * 2 bytes. configuration/state parameters for a single protocol index - */ -struct hc_index_data { -#if defined(__BIG_ENDIAN) - u8 flags; -#define HC_INDEX_DATA_SM_ID (0x1<<0) -#define HC_INDEX_DATA_SM_ID_SHIFT 0 -#define HC_INDEX_DATA_HC_ENABLED (0x1<<1) -#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1 -#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1<<2) -#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2 -#define HC_INDEX_DATA_RESERVE (0x1F<<3) -#define HC_INDEX_DATA_RESERVE_SHIFT 3 - u8 timeout; -#elif defined(__LITTLE_ENDIAN) - u8 timeout; - u8 flags; -#define HC_INDEX_DATA_SM_ID (0x1<<0) -#define HC_INDEX_DATA_SM_ID_SHIFT 0 -#define HC_INDEX_DATA_HC_ENABLED (0x1<<1) -#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1 -#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1<<2) -#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2 -#define HC_INDEX_DATA_RESERVE (0x1F<<3) -#define HC_INDEX_DATA_RESERVE_SHIFT 3 -#endif -}; - - -/* - * HC state-machine - */ -struct hc_status_block_sm { -#if defined(__BIG_ENDIAN) - u8 igu_seg_id; - u8 igu_sb_id; - u8 timer_value; - u8 __flags; -#elif defined(__LITTLE_ENDIAN) - u8 __flags; - u8 timer_value; - u8 igu_sb_id; - u8 igu_seg_id; -#endif - u32 time_to_expire; -}; - -/* - * hold PCI identification variables- used in various places in firmware - */ -struct pci_entity { -#if defined(__BIG_ENDIAN) - u8 vf_valid; - u8 vf_id; - u8 vnic_id; - u8 pf_id; -#elif defined(__LITTLE_ENDIAN) - u8 pf_id; - u8 vnic_id; - u8 vf_id; - u8 vf_valid; -#endif -}; - -/* - * The fast-path status block meta-data, common to all chips - */ -struct hc_sb_data { - struct regpair host_sb_addr; - struct hc_status_block_sm state_machine[HC_SB_MAX_SM]; - struct pci_entity p_func; -#if defined(__BIG_ENDIAN) - u8 rsrv0; - u8 state; - u8 dhc_qzone_id; - u8 same_igu_sb_1b; -#elif defined(__LITTLE_ENDIAN) - u8 same_igu_sb_1b; - u8 dhc_qzone_id; - u8 state; - u8 rsrv0; -#endif - struct regpair rsrv1[2]; -}; - - -/* - * Segment types for host coaslescing - */ -enum hc_segment { - HC_REGULAR_SEGMENT, - HC_DEFAULT_SEGMENT, - MAX_HC_SEGMENT -}; - - -/* - * The fast-path status block meta-data - */ -struct hc_sp_status_block_data { - struct regpair host_sb_addr; -#if defined(__BIG_ENDIAN) - u8 rsrv1; - u8 state; - u8 igu_seg_id; - u8 igu_sb_id; -#elif defined(__LITTLE_ENDIAN) - u8 igu_sb_id; - u8 igu_seg_id; - u8 state; - u8 rsrv1; -#endif - struct pci_entity p_func; -}; - - -/* - * The fast-path status block meta-data - */ -struct hc_status_block_data_e1x { - struct hc_index_data index_data[HC_SB_MAX_INDICES_E1X]; - struct hc_sb_data common; -}; - - -/* - * The fast-path status block meta-data - */ -struct hc_status_block_data_e2 { - struct hc_index_data index_data[HC_SB_MAX_INDICES_E2]; - struct hc_sb_data common; -}; - - -/* - * IGU block operartion modes (in Everest2) - */ -enum igu_mode { - HC_IGU_BC_MODE, - HC_IGU_NBC_MODE, - MAX_IGU_MODE -}; - - -/* - * IP versions - */ -enum ip_ver { - IP_V4, - IP_V6, - MAX_IP_VER -}; - - -/* - * Multi-function modes - */ -enum mf_mode { - SINGLE_FUNCTION, - MULTI_FUNCTION_SD, - MULTI_FUNCTION_SI, - MULTI_FUNCTION_RESERVED, - MAX_MF_MODE -}; - -/* - * Protocol-common statistics collected by the Tstorm (per pf) - */ -struct tstorm_per_pf_stats { - struct regpair rcv_error_bytes; -}; - -/* - * - */ -struct per_pf_stats { - struct tstorm_per_pf_stats tstorm_pf_statistics; -}; - - -/* - * Protocol-common statistics collected by the Tstorm (per port) - */ -struct tstorm_per_port_stats { - __le32 mac_discard; - __le32 mac_filter_discard; - __le32 brb_truncate_discard; - __le32 mf_tag_discard; - __le32 packet_drop; - __le32 reserved; -}; - -/* - * - */ -struct per_port_stats { - struct tstorm_per_port_stats tstorm_port_statistics; -}; - - -/* - * Protocol-common statistics collected by the Tstorm (per client) - */ -struct tstorm_per_queue_stats { - struct regpair rcv_ucast_bytes; - __le32 rcv_ucast_pkts; - __le32 checksum_discard; - struct regpair rcv_bcast_bytes; - __le32 rcv_bcast_pkts; - __le32 pkts_too_big_discard; - struct regpair rcv_mcast_bytes; - __le32 rcv_mcast_pkts; - __le32 ttl0_discard; - __le16 no_buff_discard; - __le16 reserved0; - __le32 reserved1; -}; - -/* - * Protocol-common statistics collected by the Ustorm (per client) - */ -struct ustorm_per_queue_stats { - struct regpair ucast_no_buff_bytes; - struct regpair mcast_no_buff_bytes; - struct regpair bcast_no_buff_bytes; - __le32 ucast_no_buff_pkts; - __le32 mcast_no_buff_pkts; - __le32 bcast_no_buff_pkts; - __le32 coalesced_pkts; - struct regpair coalesced_bytes; - __le32 coalesced_events; - __le32 coalesced_aborts; -}; - -/* - * Protocol-common statistics collected by the Xstorm (per client) - */ -struct xstorm_per_queue_stats { - struct regpair ucast_bytes_sent; - struct regpair mcast_bytes_sent; - struct regpair bcast_bytes_sent; - __le32 ucast_pkts_sent; - __le32 mcast_pkts_sent; - __le32 bcast_pkts_sent; - __le32 error_drop_pkts; -}; - -/* - * - */ -struct per_queue_stats { - struct tstorm_per_queue_stats tstorm_queue_statistics; - struct ustorm_per_queue_stats ustorm_queue_statistics; - struct xstorm_per_queue_stats xstorm_queue_statistics; -}; - - -/* - * FW version stored in first line of pram - */ -struct pram_fw_version { - u8 major; - u8 minor; - u8 revision; - u8 engineering; - u8 flags; -#define PRAM_FW_VERSION_OPTIMIZED (0x1<<0) -#define PRAM_FW_VERSION_OPTIMIZED_SHIFT 0 -#define PRAM_FW_VERSION_STORM_ID (0x3<<1) -#define PRAM_FW_VERSION_STORM_ID_SHIFT 1 -#define PRAM_FW_VERSION_BIG_ENDIEN (0x1<<3) -#define PRAM_FW_VERSION_BIG_ENDIEN_SHIFT 3 -#define PRAM_FW_VERSION_CHIP_VERSION (0x3<<4) -#define PRAM_FW_VERSION_CHIP_VERSION_SHIFT 4 -#define __PRAM_FW_VERSION_RESERVED0 (0x3<<6) -#define __PRAM_FW_VERSION_RESERVED0_SHIFT 6 -}; - - -/* - * Ethernet slow path element - */ -union protocol_common_specific_data { - u8 protocol_data[8]; - struct regpair phy_address; - struct regpair mac_config_addr; -}; - -/* - * The send queue element - */ -struct protocol_common_spe { - struct spe_hdr hdr; - union protocol_common_specific_data data; -}; - - -/* - * a single rate shaping counter. can be used as protocol or vnic counter - */ -struct rate_shaping_counter { - u32 quota; -#if defined(__BIG_ENDIAN) - u16 __reserved0; - u16 rate; -#elif defined(__LITTLE_ENDIAN) - u16 rate; - u16 __reserved0; -#endif -}; - - -/* - * per-vnic rate shaping variables - */ -struct rate_shaping_vars_per_vn { - struct rate_shaping_counter vn_counter; -}; - - -/* - * The send queue element - */ -struct slow_path_element { - struct spe_hdr hdr; - struct regpair protocol_data; -}; - - -/* - * Protocol-common statistics counter - */ -struct stats_counter { - __le16 xstats_counter; - __le16 reserved0; - __le32 reserved1; - __le16 tstats_counter; - __le16 reserved2; - __le32 reserved3; - __le16 ustats_counter; - __le16 reserved4; - __le32 reserved5; - __le16 cstats_counter; - __le16 reserved6; - __le32 reserved7; -}; - - -/* - * - */ -struct stats_query_entry { - u8 kind; - u8 index; - __le16 funcID; - __le32 reserved; - struct regpair address; -}; - -/* - * statistic command - */ -struct stats_query_cmd_group { - struct stats_query_entry query[STATS_QUERY_CMD_COUNT]; -}; - - -/* - * statistic command header - */ -struct stats_query_header { - u8 cmd_num; - u8 reserved0; - __le16 drv_stats_counter; - __le32 reserved1; - struct regpair stats_counters_addrs; -}; - - -/* - * Types of statistcis query entry - */ -enum stats_query_type { - STATS_TYPE_QUEUE, - STATS_TYPE_PORT, - STATS_TYPE_PF, - STATS_TYPE_TOE, - STATS_TYPE_FCOE, - MAX_STATS_QUERY_TYPE -}; - - -/* - * Indicate of the function status block state - */ -enum status_block_state { - SB_DISABLED, - SB_ENABLED, - SB_CLEANED, - MAX_STATUS_BLOCK_STATE -}; - - -/* - * Storm IDs (including attentions for IGU related enums) - */ -enum storm_id { - USTORM_ID, - CSTORM_ID, - XSTORM_ID, - TSTORM_ID, - ATTENTION_ID, - MAX_STORM_ID -}; - - -/* - * Taffic types used in ETS and flow control algorithms - */ -enum traffic_type { - LLFC_TRAFFIC_TYPE_NW, - LLFC_TRAFFIC_TYPE_FCOE, - LLFC_TRAFFIC_TYPE_ISCSI, - MAX_TRAFFIC_TYPE -}; - - -/* - * zone A per-queue data - */ -struct tstorm_queue_zone_data { - struct regpair reserved[4]; -}; - - -/* - * zone B per-VF data - */ -struct tstorm_vf_zone_data { - struct regpair reserved; -}; - - -/* - * zone A per-queue data - */ -struct ustorm_queue_zone_data { - struct ustorm_eth_rx_producers eth_rx_producers; - struct regpair reserved[3]; -}; - - -/* - * zone B per-VF data - */ -struct ustorm_vf_zone_data { - struct regpair reserved; -}; - - -/* - * data per VF-PF channel - */ -struct vf_pf_channel_data { -#if defined(__BIG_ENDIAN) - u16 reserved0; - u8 valid; - u8 state; -#elif defined(__LITTLE_ENDIAN) - u8 state; - u8 valid; - u16 reserved0; -#endif - u32 reserved1; -}; - - -/* - * State of VF-PF channel - */ -enum vf_pf_channel_state { - VF_PF_CHANNEL_STATE_READY, - VF_PF_CHANNEL_STATE_WAITING_FOR_ACK, - MAX_VF_PF_CHANNEL_STATE -}; - - -/* - * zone A per-queue data - */ -struct xstorm_queue_zone_data { - struct regpair reserved[4]; -}; - - -/* - * zone B per-VF data - */ -struct xstorm_vf_zone_data { - struct regpair reserved; -}; - -#endif /* BNX2X_HSI_H */ diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h deleted file mode 100644 index 4d748e7..0000000 --- a/drivers/net/bnx2x/bnx2x_init.h +++ /dev/null @@ -1,567 +0,0 @@ -/* bnx2x_init.h: Broadcom Everest network driver. - * Structures and macroes needed during the initialization. - * - * Copyright (c) 2007-2011 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - * Maintained by: Eilon Greenstein - * Written by: Eliezer Tamir - * Modified by: Vladislav Zolotarov - */ - -#ifndef BNX2X_INIT_H -#define BNX2X_INIT_H - -/* Init operation types and structures */ -enum { - OP_RD = 0x1, /* read a single register */ - OP_WR, /* write a single register */ - OP_SW, /* copy a string to the device */ - OP_ZR, /* clear memory */ - OP_ZP, /* unzip then copy with DMAE */ - OP_WR_64, /* write 64 bit pattern */ - OP_WB, /* copy a string using DMAE */ - OP_WB_ZR, /* Clear a string using DMAE or indirect-wr */ - /* Skip the following ops if all of the init modes don't match */ - OP_IF_MODE_OR, - /* Skip the following ops if any of the init modes don't match */ - OP_IF_MODE_AND, - OP_MAX -}; - -enum { - STAGE_START, - STAGE_END, -}; - -/* Returns the index of start or end of a specific block stage in ops array*/ -#define BLOCK_OPS_IDX(block, stage, end) \ - (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end)) - - -/* structs for the various opcodes */ -struct raw_op { - u32 op:8; - u32 offset:24; - u32 raw_data; -}; - -struct op_read { - u32 op:8; - u32 offset:24; - u32 val; -}; - -struct op_write { - u32 op:8; - u32 offset:24; - u32 val; -}; - -struct op_arr_write { - u32 op:8; - u32 offset:24; -#ifdef __BIG_ENDIAN - u16 data_len; - u16 data_off; -#else /* __LITTLE_ENDIAN */ - u16 data_off; - u16 data_len; -#endif -}; - -struct op_zero { - u32 op:8; - u32 offset:24; - u32 len; -}; - -struct op_if_mode { - u32 op:8; - u32 cmd_offset:24; - u32 mode_bit_map; -}; - - -union init_op { - struct op_read read; - struct op_write write; - struct op_arr_write arr_wr; - struct op_zero zero; - struct raw_op raw; - struct op_if_mode if_mode; -}; - - -/* Init Phases */ -enum { - PHASE_COMMON, - PHASE_PORT0, - PHASE_PORT1, - PHASE_PF0, - PHASE_PF1, - PHASE_PF2, - PHASE_PF3, - PHASE_PF4, - PHASE_PF5, - PHASE_PF6, - PHASE_PF7, - NUM_OF_INIT_PHASES -}; - -/* Init Modes */ -enum { - MODE_ASIC = 0x00000001, - MODE_FPGA = 0x00000002, - MODE_EMUL = 0x00000004, - MODE_E2 = 0x00000008, - MODE_E3 = 0x00000010, - MODE_PORT2 = 0x00000020, - MODE_PORT4 = 0x00000040, - MODE_SF = 0x00000080, - MODE_MF = 0x00000100, - MODE_MF_SD = 0x00000200, - MODE_MF_SI = 0x00000400, - MODE_MF_NIV = 0x00000800, - MODE_E3_A0 = 0x00001000, - MODE_E3_B0 = 0x00002000, - MODE_COS3 = 0x00004000, - MODE_COS6 = 0x00008000, - MODE_LITTLE_ENDIAN = 0x00010000, - MODE_BIG_ENDIAN = 0x00020000, -}; - -/* Init Blocks */ -enum { - BLOCK_ATC, - BLOCK_BRB1, - BLOCK_CCM, - BLOCK_CDU, - BLOCK_CFC, - BLOCK_CSDM, - BLOCK_CSEM, - BLOCK_DBG, - BLOCK_DMAE, - BLOCK_DORQ, - BLOCK_HC, - BLOCK_IGU, - BLOCK_MISC, - BLOCK_NIG, - BLOCK_PBF, - BLOCK_PGLUE_B, - BLOCK_PRS, - BLOCK_PXP2, - BLOCK_PXP, - BLOCK_QM, - BLOCK_SRC, - BLOCK_TCM, - BLOCK_TM, - BLOCK_TSDM, - BLOCK_TSEM, - BLOCK_UCM, - BLOCK_UPB, - BLOCK_USDM, - BLOCK_USEM, - BLOCK_XCM, - BLOCK_XPB, - BLOCK_XSDM, - BLOCK_XSEM, - BLOCK_MISC_AEU, - NUM_OF_INIT_BLOCKS -}; - -/* QM queue numbers */ -#define BNX2X_ETH_Q 0 -#define BNX2X_TOE_Q 3 -#define BNX2X_TOE_ACK_Q 6 -#define BNX2X_ISCSI_Q 9 -#define BNX2X_ISCSI_ACK_Q 11 -#define BNX2X_FCOE_Q 10 - -/* Vnics per mode */ -#define BNX2X_PORT2_MODE_NUM_VNICS 4 -#define BNX2X_PORT4_MODE_NUM_VNICS 2 - -/* COS offset for port1 in E3 B0 4port mode */ -#define BNX2X_E3B0_PORT1_COS_OFFSET 3 - -/* QM Register addresses */ -#define BNX2X_Q_VOQ_REG_ADDR(pf_q_num)\ - (QM_REG_QVOQIDX_0 + 4 * (pf_q_num)) -#define BNX2X_VOQ_Q_REG_ADDR(cos, pf_q_num)\ - (QM_REG_VOQQMASK_0_LSB + 4 * ((cos) * 2 + ((pf_q_num) >> 5))) -#define BNX2X_Q_CMDQ_REG_ADDR(pf_q_num)\ - (QM_REG_BYTECRDCMDQ_0 + 4 * ((pf_q_num) >> 4)) - -/* extracts the QM queue number for the specified port and vnic */ -#define BNX2X_PF_Q_NUM(q_num, port, vnic)\ - ((((port) << 1) | (vnic)) * 16 + (q_num)) - - -/* Maps the specified queue to the specified COS */ -static inline void bnx2x_map_q_cos(struct bnx2x *bp, u32 q_num, u32 new_cos) -{ - /* find current COS mapping */ - u32 curr_cos = REG_RD(bp, QM_REG_QVOQIDX_0 + q_num * 4); - - /* check if queue->COS mapping has changed */ - if (curr_cos != new_cos) { - u32 num_vnics = BNX2X_PORT2_MODE_NUM_VNICS; - u32 reg_addr, reg_bit_map, vnic; - - /* update parameters for 4port mode */ - if (INIT_MODE_FLAGS(bp) & MODE_PORT4) { - num_vnics = BNX2X_PORT4_MODE_NUM_VNICS; - if (BP_PORT(bp)) { - curr_cos += BNX2X_E3B0_PORT1_COS_OFFSET; - new_cos += BNX2X_E3B0_PORT1_COS_OFFSET; - } - } - - /* change queue mapping for each VNIC */ - for (vnic = 0; vnic < num_vnics; vnic++) { - u32 pf_q_num = - BNX2X_PF_Q_NUM(q_num, BP_PORT(bp), vnic); - u32 q_bit_map = 1 << (pf_q_num & 0x1f); - - /* overwrite queue->VOQ mapping */ - REG_WR(bp, BNX2X_Q_VOQ_REG_ADDR(pf_q_num), new_cos); - - /* clear queue bit from current COS bit map */ - reg_addr = BNX2X_VOQ_Q_REG_ADDR(curr_cos, pf_q_num); - reg_bit_map = REG_RD(bp, reg_addr); - REG_WR(bp, reg_addr, reg_bit_map & (~q_bit_map)); - - /* set queue bit in new COS bit map */ - reg_addr = BNX2X_VOQ_Q_REG_ADDR(new_cos, pf_q_num); - reg_bit_map = REG_RD(bp, reg_addr); - REG_WR(bp, reg_addr, reg_bit_map | q_bit_map); - - /* set/clear queue bit in command-queue bit map - (E2/E3A0 only, valid COS values are 0/1) */ - if (!(INIT_MODE_FLAGS(bp) & MODE_E3_B0)) { - reg_addr = BNX2X_Q_CMDQ_REG_ADDR(pf_q_num); - reg_bit_map = REG_RD(bp, reg_addr); - q_bit_map = 1 << (2 * (pf_q_num & 0xf)); - reg_bit_map = new_cos ? - (reg_bit_map | q_bit_map) : - (reg_bit_map & (~q_bit_map)); - REG_WR(bp, reg_addr, reg_bit_map); - } - } - } -} - -/* Configures the QM according to the specified per-traffic-type COSes */ -static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, enum cos_mode mode, - struct priority_cos *traffic_cos) -{ - bnx2x_map_q_cos(bp, BNX2X_FCOE_Q, - traffic_cos[LLFC_TRAFFIC_TYPE_FCOE].cos); - bnx2x_map_q_cos(bp, BNX2X_ISCSI_Q, - traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos); - bnx2x_map_q_cos(bp, BNX2X_ISCSI_ACK_Q, - traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos); - if (mode != STATIC_COS) { - /* required only in backward compatible COS mode */ - bnx2x_map_q_cos(bp, BNX2X_ETH_Q, - traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos); - bnx2x_map_q_cos(bp, BNX2X_TOE_Q, - traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos); - bnx2x_map_q_cos(bp, BNX2X_TOE_ACK_Q, - traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos); - } -} - - -/* Returns the index of start or end of a specific block stage in ops array*/ -#define BLOCK_OPS_IDX(block, stage, end) \ - (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end)) - - -#define INITOP_SET 0 /* set the HW directly */ -#define INITOP_CLEAR 1 /* clear the HW directly */ -#define INITOP_INIT 2 /* set the init-value array */ - -/**************************************************************************** -* ILT management -****************************************************************************/ -struct ilt_line { - dma_addr_t page_mapping; - void *page; - u32 size; -}; - -struct ilt_client_info { - u32 page_size; - u16 start; - u16 end; - u16 client_num; - u16 flags; -#define ILT_CLIENT_SKIP_INIT 0x1 -#define ILT_CLIENT_SKIP_MEM 0x2 -}; - -struct bnx2x_ilt { - u32 start_line; - struct ilt_line *lines; - struct ilt_client_info clients[4]; -#define ILT_CLIENT_CDU 0 -#define ILT_CLIENT_QM 1 -#define ILT_CLIENT_SRC 2 -#define ILT_CLIENT_TM 3 -}; - -/**************************************************************************** -* SRC configuration -****************************************************************************/ -struct src_ent { - u8 opaque[56]; - u64 next; -}; - -/**************************************************************************** -* Parity configuration -****************************************************************************/ -#define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2, m3) \ -{ \ - block##_REG_##block##_PRTY_MASK, \ - block##_REG_##block##_PRTY_STS_CLR, \ - en_mask, {m1, m1h, m2, m3}, #block \ -} - -#define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2, m3) \ -{ \ - block##_REG_##block##_PRTY_MASK_0, \ - block##_REG_##block##_PRTY_STS_CLR_0, \ - en_mask, {m1, m1h, m2, m3}, #block"_0" \ -} - -#define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2, m3) \ -{ \ - block##_REG_##block##_PRTY_MASK_1, \ - block##_REG_##block##_PRTY_STS_CLR_1, \ - en_mask, {m1, m1h, m2, m3}, #block"_1" \ -} - -static const struct { - u32 mask_addr; - u32 sts_clr_addr; - u32 en_mask; /* Mask to enable parity attentions */ - struct { - u32 e1; /* 57710 */ - u32 e1h; /* 57711 */ - u32 e2; /* 57712 */ - u32 e3; /* 578xx */ - } reg_mask; /* Register mask (all valid bits) */ - char name[7]; /* Block's longest name is 6 characters long - * (name + suffix) - */ -} bnx2x_blocks_parity_data[] = { - /* bit 19 masked */ - /* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */ - /* bit 5,18,20-31 */ - /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */ - /* bit 5 */ - /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20); */ - /* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */ - /* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */ - - /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't - * want to handle "system kill" flow at the moment. - */ - BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff, - 0x7ffffff), - BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, - 0xffffffff), - BLOCK_PRTY_INFO_1(PXP2, 0x1ffffff, 0x7f, 0x7f, 0x7ff, 0x1ffffff), - BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0, 0), - BLOCK_PRTY_INFO(NIG, 0xffffffff, 0x3fffffff, 0xffffffff, 0, 0), - BLOCK_PRTY_INFO_0(NIG, 0xffffffff, 0, 0, 0xffffffff, 0xffffffff), - BLOCK_PRTY_INFO_1(NIG, 0xffff, 0, 0, 0xff, 0xffff), - BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff, 0x7ff), - BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1, 0x1), - BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff, 0xfff), - BLOCK_PRTY_INFO(ATC, 0x1f, 0, 0, 0x1f, 0x1f), - BLOCK_PRTY_INFO(PGLUE_B, 0x3, 0, 0, 0x3, 0x3), - BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3, 0x3), - {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, - GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0xf, - {0xf, 0xf, 0xf, 0xf}, "UPB"}, - {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, - GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0, - {0xf, 0xf, 0xf, 0xf}, "XPB"}, - BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7, 0x7), - BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f, 0x1f), - BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf, 0x3f), - BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1, 0x1), - BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf, 0xf), - BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf, 0xf), - BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff, 0xff), - BLOCK_PRTY_INFO(PBF, 0, 0, 0x3ffff, 0xfffff, 0xfffffff), - BLOCK_PRTY_INFO(TM, 0, 0, 0x7f, 0x7f, 0x7f), - BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff, 0x7ff), - BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff), - BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff, 0x7ff), - BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff), - BLOCK_PRTY_INFO(TCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff), - BLOCK_PRTY_INFO(CCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff), - BLOCK_PRTY_INFO(UCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff), - BLOCK_PRTY_INFO(XCM, 0, 0, 0x3fffffff, 0x3fffffff, 0x3fffffff), - BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff, - 0xffffffff), - BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f, 0x3f), - BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff, - 0xffffffff), - BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f, 0x1f), - BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff, - 0xffffffff), - BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f, 0x1f), - BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff, - 0xffffffff), - BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f, 0x3f), -}; - - -/* [28] MCP Latched rom_parity - * [29] MCP Latched ump_rx_parity - * [30] MCP Latched ump_tx_parity - * [31] MCP Latched scpad_parity - */ -#define MISC_AEU_ENABLE_MCP_PRTY_BITS \ - (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \ - AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ - AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \ - AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) - -/* Below registers control the MCP parity attention output. When - * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are - * enabled, when cleared - disabled. - */ -static const u32 mcp_attn_ctl_regs[] = { - MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0, - MISC_REG_AEU_ENABLE4_NIG_0, - MISC_REG_AEU_ENABLE4_PXP_0, - MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0, - MISC_REG_AEU_ENABLE4_NIG_1, - MISC_REG_AEU_ENABLE4_PXP_1 -}; - -static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable) -{ - int i; - u32 reg_val; - - for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) { - reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]); - - if (enable) - reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS; - else - reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS; - - REG_WR(bp, mcp_attn_ctl_regs[i], reg_val); - } -} - -static inline u32 bnx2x_parity_reg_mask(struct bnx2x *bp, int idx) -{ - if (CHIP_IS_E1(bp)) - return bnx2x_blocks_parity_data[idx].reg_mask.e1; - else if (CHIP_IS_E1H(bp)) - return bnx2x_blocks_parity_data[idx].reg_mask.e1h; - else if (CHIP_IS_E2(bp)) - return bnx2x_blocks_parity_data[idx].reg_mask.e2; - else /* CHIP_IS_E3 */ - return bnx2x_blocks_parity_data[idx].reg_mask.e3; -} - -static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) { - u32 dis_mask = bnx2x_parity_reg_mask(bp, i); - - if (dis_mask) { - REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr, - dis_mask); - DP(NETIF_MSG_HW, "Setting parity mask " - "for %s to\t\t0x%x\n", - bnx2x_blocks_parity_data[i].name, dis_mask); - } - } - - /* Disable MCP parity attentions */ - bnx2x_set_mcp_parity(bp, false); -} - -/** - * Clear the parity error status registers. - */ -static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp) -{ - int i; - u32 reg_val, mcp_aeu_bits = - AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | - AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY | - AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | - AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY; - - /* Clear SEM_FAST parities */ - REG_WR(bp, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); - REG_WR(bp, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); - REG_WR(bp, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); - REG_WR(bp, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); - - for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) { - u32 reg_mask = bnx2x_parity_reg_mask(bp, i); - - if (reg_mask) { - reg_val = REG_RD(bp, bnx2x_blocks_parity_data[i]. - sts_clr_addr); - if (reg_val & reg_mask) - DP(NETIF_MSG_HW, - "Parity errors in %s: 0x%x\n", - bnx2x_blocks_parity_data[i].name, - reg_val & reg_mask); - } - } - - /* Check if there were parity attentions in MCP */ - reg_val = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_MCP); - if (reg_val & mcp_aeu_bits) - DP(NETIF_MSG_HW, "Parity error in MCP: 0x%x\n", - reg_val & mcp_aeu_bits); - - /* Clear parity attentions in MCP: - * [7] clears Latched rom_parity - * [8] clears Latched ump_rx_parity - * [9] clears Latched ump_tx_parity - * [10] clears Latched scpad_parity (both ports) - */ - REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780); -} - -static inline void bnx2x_enable_blocks_parity(struct bnx2x *bp) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) { - u32 reg_mask = bnx2x_parity_reg_mask(bp, i); - - if (reg_mask) - REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr, - bnx2x_blocks_parity_data[i].en_mask & reg_mask); - } - - /* Enable MCP parity attentions */ - bnx2x_set_mcp_parity(bp, true); -} - - -#endif /* BNX2X_INIT_H */ - diff --git a/drivers/net/bnx2x/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h deleted file mode 100644 index 7ec1724..0000000 --- a/drivers/net/bnx2x/bnx2x_init_ops.h +++ /dev/null @@ -1,912 +0,0 @@ -/* bnx2x_init_ops.h: Broadcom Everest network driver. - * Static functions needed during the initialization. - * This file is "included" in bnx2x_main.c. - * - * Copyright (c) 2007-2011 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - * Maintained by: Eilon Greenstein - * Written by: Vladislav Zolotarov - */ - -#ifndef BNX2X_INIT_OPS_H -#define BNX2X_INIT_OPS_H - - -#ifndef BP_ILT -#define BP_ILT(bp) NULL -#endif - -#ifndef BP_FUNC -#define BP_FUNC(bp) 0 -#endif - -#ifndef BP_PORT -#define BP_PORT(bp) 0 -#endif - -#ifndef BNX2X_ILT_FREE -#define BNX2X_ILT_FREE(x, y, sz) -#endif - -#ifndef BNX2X_ILT_ZALLOC -#define BNX2X_ILT_ZALLOC(x, y, sz) -#endif - -#ifndef ILOG2 -#define ILOG2(x) x -#endif - -static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len); -static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val); -static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, - dma_addr_t phys_addr, u32 addr, - u32 len); - -static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, - const u32 *data, u32 len) -{ - u32 i; - - for (i = 0; i < len; i++) - REG_WR(bp, addr + i*4, data[i]); -} - -static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr, - const u32 *data, u32 len) -{ - u32 i; - - for (i = 0; i < len; i++) - bnx2x_reg_wr_ind(bp, addr + i*4, data[i]); -} - -static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len, - u8 wb) -{ - if (bp->dmae_ready) - bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len); - else if (wb) - /* - * Wide bus registers with no dmae need to be written - * using indirect write. - */ - bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len); - else - bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len); -} - -static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, - u32 len, u8 wb) -{ - u32 buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4)); - u32 buf_len32 = buf_len/4; - u32 i; - - memset(GUNZIP_BUF(bp), (u8)fill, buf_len); - - for (i = 0; i < len; i += buf_len32) { - u32 cur_len = min(buf_len32, len - i); - - bnx2x_write_big_buf(bp, addr + i*4, cur_len, wb); - } -} - -static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len) -{ - if (bp->dmae_ready) - bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len); - else - bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len); -} - -static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, - const u32 *data, u32 len64) -{ - u32 buf_len32 = FW_BUF_SIZE/4; - u32 len = len64*2; - u64 data64 = 0; - u32 i; - - /* 64 bit value is in a blob: first low DWORD, then high DWORD */ - data64 = HILO_U64((*(data + 1)), (*data)); - - len64 = min((u32)(FW_BUF_SIZE/8), len64); - for (i = 0; i < len64; i++) { - u64 *pdata = ((u64 *)(GUNZIP_BUF(bp))) + i; - - *pdata = data64; - } - - for (i = 0; i < len; i += buf_len32) { - u32 cur_len = min(buf_len32, len - i); - - bnx2x_write_big_buf_wb(bp, addr + i*4, cur_len); - } -} - -/********************************************************* - There are different blobs for each PRAM section. - In addition, each blob write operation is divided into a few operations - in order to decrease the amount of phys. contiguous buffer needed. - Thus, when we select a blob the address may be with some offset - from the beginning of PRAM section. - The same holds for the INT_TABLE sections. -**********************************************************/ -#define IF_IS_INT_TABLE_ADDR(base, addr) \ - if (((base) <= (addr)) && ((base) + 0x400 >= (addr))) - -#define IF_IS_PRAM_ADDR(base, addr) \ - if (((base) <= (addr)) && ((base) + 0x40000 >= (addr))) - -static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr, - const u8 *data) -{ - IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr) - data = INIT_TSEM_INT_TABLE_DATA(bp); - else - IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr) - data = INIT_CSEM_INT_TABLE_DATA(bp); - else - IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr) - data = INIT_USEM_INT_TABLE_DATA(bp); - else - IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr) - data = INIT_XSEM_INT_TABLE_DATA(bp); - else - IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr) - data = INIT_TSEM_PRAM_DATA(bp); - else - IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr) - data = INIT_CSEM_PRAM_DATA(bp); - else - IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr) - data = INIT_USEM_PRAM_DATA(bp); - else - IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr) - data = INIT_XSEM_PRAM_DATA(bp); - - return data; -} - -static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, - const u32 *data, u32 len) -{ - if (bp->dmae_ready) - VIRT_WR_DMAE_LEN(bp, data, addr, len, 0); - else - bnx2x_init_ind_wr(bp, addr, data, len); -} - -static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo, - u32 val_hi) -{ - u32 wb_write[2]; - - wb_write[0] = val_lo; - wb_write[1] = val_hi; - REG_WR_DMAE_LEN(bp, reg, wb_write, 2); -} -static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, - u32 blob_off) -{ - const u8 *data = NULL; - int rc; - u32 i; - - data = bnx2x_sel_blob(bp, addr, data) + blob_off*4; - - rc = bnx2x_gunzip(bp, data, len); - if (rc) - return; - - /* gunzip_outlen is in dwords */ - len = GUNZIP_OUTLEN(bp); - for (i = 0; i < len; i++) - ((u32 *)GUNZIP_BUF(bp))[i] = - cpu_to_le32(((u32 *)GUNZIP_BUF(bp))[i]); - - bnx2x_write_big_buf_wb(bp, addr, len); -} - -static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage) -{ - u16 op_start = - INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, - STAGE_START)]; - u16 op_end = - INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, - STAGE_END)]; - union init_op *op; - u32 op_idx, op_type, addr, len; - const u32 *data, *data_base; - - /* If empty block */ - if (op_start == op_end) - return; - - data_base = INIT_DATA(bp); - - for (op_idx = op_start; op_idx < op_end; op_idx++) { - - op = (union init_op *)&(INIT_OPS(bp)[op_idx]); - /* Get generic data */ - op_type = op->raw.op; - addr = op->raw.offset; - /* Get data that's used for OP_SW, OP_WB, OP_FW, OP_ZP and - * OP_WR64 (we assume that op_arr_write and op_write have the - * same structure). - */ - len = op->arr_wr.data_len; - data = data_base + op->arr_wr.data_off; - - switch (op_type) { - case OP_RD: - REG_RD(bp, addr); - break; - case OP_WR: - REG_WR(bp, addr, op->write.val); - break; - case OP_SW: - bnx2x_init_str_wr(bp, addr, data, len); - break; - case OP_WB: - bnx2x_init_wr_wb(bp, addr, data, len); - break; - case OP_ZR: - bnx2x_init_fill(bp, addr, 0, op->zero.len, 0); - break; - case OP_WB_ZR: - bnx2x_init_fill(bp, addr, 0, op->zero.len, 1); - break; - case OP_ZP: - bnx2x_init_wr_zp(bp, addr, len, - op->arr_wr.data_off); - break; - case OP_WR_64: - bnx2x_init_wr_64(bp, addr, data, len); - break; - case OP_IF_MODE_AND: - /* if any of the flags doesn't match, skip the - * conditional block. - */ - if ((INIT_MODE_FLAGS(bp) & - op->if_mode.mode_bit_map) != - op->if_mode.mode_bit_map) - op_idx += op->if_mode.cmd_offset; - break; - case OP_IF_MODE_OR: - /* if all the flags don't match, skip the conditional - * block. - */ - if ((INIT_MODE_FLAGS(bp) & - op->if_mode.mode_bit_map) == 0) - op_idx += op->if_mode.cmd_offset; - break; - default: - /* Should never get here! */ - - break; - } - } -} - - -/**************************************************************************** -* PXP Arbiter -****************************************************************************/ -/* - * This code configures the PCI read/write arbiter - * which implements a weighted round robin - * between the virtual queues in the chip. - * - * The values were derived for each PCI max payload and max request size. - * since max payload and max request size are only known at run time, - * this is done as a separate init stage. - */ - -#define NUM_WR_Q 13 -#define NUM_RD_Q 29 -#define MAX_RD_ORD 3 -#define MAX_WR_ORD 2 - -/* configuration for one arbiter queue */ -struct arb_line { - int l; - int add; - int ubound; -}; - -/* derived configuration for each read queue for each max request size */ -static const struct arb_line read_arb_data[NUM_RD_Q][MAX_RD_ORD + 1] = { -/* 1 */ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} }, - { {4, 8, 4}, {4, 8, 4}, {4, 8, 4}, {4, 8, 4} }, - { {4, 3, 3}, {4, 3, 3}, {4, 3, 3}, {4, 3, 3} }, - { {8, 3, 6}, {16, 3, 11}, {16, 3, 11}, {16, 3, 11} }, - { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} }, - { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, - { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, - { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, - { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, -/* 10 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, - { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, - { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, - { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, - { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, - { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, - { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, - { {8, 64, 6}, {16, 64, 11}, {32, 64, 21}, {32, 64, 21} }, - { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, - { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, -/* 20 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, - { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, - { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, - { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, - { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, - { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, - { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, - { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, - { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, - { {8, 64, 25}, {16, 64, 41}, {32, 64, 81}, {64, 64, 120} } -}; - -/* derived configuration for each write queue for each max request size */ -static const struct arb_line write_arb_data[NUM_WR_Q][MAX_WR_ORD + 1] = { -/* 1 */ { {4, 6, 3}, {4, 6, 3}, {4, 6, 3} }, - { {4, 2, 3}, {4, 2, 3}, {4, 2, 3} }, - { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} }, - { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} }, - { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} }, - { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} }, - { {8, 64, 25}, {16, 64, 25}, {32, 64, 25} }, - { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} }, - { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} }, -/* 10 */{ {8, 9, 6}, {16, 9, 11}, {32, 9, 21} }, - { {8, 47, 19}, {16, 47, 19}, {32, 47, 21} }, - { {8, 9, 6}, {16, 9, 11}, {16, 9, 11} }, - { {8, 64, 25}, {16, 64, 41}, {32, 64, 81} } -}; - -/* register addresses for read queues */ -static const struct arb_line read_arb_addr[NUM_RD_Q-1] = { -/* 1 */ {PXP2_REG_RQ_BW_RD_L0, PXP2_REG_RQ_BW_RD_ADD0, - PXP2_REG_RQ_BW_RD_UBOUND0}, - {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1, - PXP2_REG_PSWRQ_BW_UB1}, - {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2, - PXP2_REG_PSWRQ_BW_UB2}, - {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3, - PXP2_REG_PSWRQ_BW_UB3}, - {PXP2_REG_RQ_BW_RD_L4, PXP2_REG_RQ_BW_RD_ADD4, - PXP2_REG_RQ_BW_RD_UBOUND4}, - {PXP2_REG_RQ_BW_RD_L5, PXP2_REG_RQ_BW_RD_ADD5, - PXP2_REG_RQ_BW_RD_UBOUND5}, - {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6, - PXP2_REG_PSWRQ_BW_UB6}, - {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7, - PXP2_REG_PSWRQ_BW_UB7}, - {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8, - PXP2_REG_PSWRQ_BW_UB8}, -/* 10 */{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9, - PXP2_REG_PSWRQ_BW_UB9}, - {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10, - PXP2_REG_PSWRQ_BW_UB10}, - {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11, - PXP2_REG_PSWRQ_BW_UB11}, - {PXP2_REG_RQ_BW_RD_L12, PXP2_REG_RQ_BW_RD_ADD12, - PXP2_REG_RQ_BW_RD_UBOUND12}, - {PXP2_REG_RQ_BW_RD_L13, PXP2_REG_RQ_BW_RD_ADD13, - PXP2_REG_RQ_BW_RD_UBOUND13}, - {PXP2_REG_RQ_BW_RD_L14, PXP2_REG_RQ_BW_RD_ADD14, - PXP2_REG_RQ_BW_RD_UBOUND14}, - {PXP2_REG_RQ_BW_RD_L15, PXP2_REG_RQ_BW_RD_ADD15, - PXP2_REG_RQ_BW_RD_UBOUND15}, - {PXP2_REG_RQ_BW_RD_L16, PXP2_REG_RQ_BW_RD_ADD16, - PXP2_REG_RQ_BW_RD_UBOUND16}, - {PXP2_REG_RQ_BW_RD_L17, PXP2_REG_RQ_BW_RD_ADD17, - PXP2_REG_RQ_BW_RD_UBOUND17}, - {PXP2_REG_RQ_BW_RD_L18, PXP2_REG_RQ_BW_RD_ADD18, - PXP2_REG_RQ_BW_RD_UBOUND18}, -/* 20 */{PXP2_REG_RQ_BW_RD_L19, PXP2_REG_RQ_BW_RD_ADD19, - PXP2_REG_RQ_BW_RD_UBOUND19}, - {PXP2_REG_RQ_BW_RD_L20, PXP2_REG_RQ_BW_RD_ADD20, - PXP2_REG_RQ_BW_RD_UBOUND20}, - {PXP2_REG_RQ_BW_RD_L22, PXP2_REG_RQ_BW_RD_ADD22, - PXP2_REG_RQ_BW_RD_UBOUND22}, - {PXP2_REG_RQ_BW_RD_L23, PXP2_REG_RQ_BW_RD_ADD23, - PXP2_REG_RQ_BW_RD_UBOUND23}, - {PXP2_REG_RQ_BW_RD_L24, PXP2_REG_RQ_BW_RD_ADD24, - PXP2_REG_RQ_BW_RD_UBOUND24}, - {PXP2_REG_RQ_BW_RD_L25, PXP2_REG_RQ_BW_RD_ADD25, - PXP2_REG_RQ_BW_RD_UBOUND25}, - {PXP2_REG_RQ_BW_RD_L26, PXP2_REG_RQ_BW_RD_ADD26, - PXP2_REG_RQ_BW_RD_UBOUND26}, - {PXP2_REG_RQ_BW_RD_L27, PXP2_REG_RQ_BW_RD_ADD27, - PXP2_REG_RQ_BW_RD_UBOUND27}, - {PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28, - PXP2_REG_PSWRQ_BW_UB28} -}; - -/* register addresses for write queues */ -static const struct arb_line write_arb_addr[NUM_WR_Q-1] = { -/* 1 */ {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1, - PXP2_REG_PSWRQ_BW_UB1}, - {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2, - PXP2_REG_PSWRQ_BW_UB2}, - {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3, - PXP2_REG_PSWRQ_BW_UB3}, - {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6, - PXP2_REG_PSWRQ_BW_UB6}, - {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7, - PXP2_REG_PSWRQ_BW_UB7}, - {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8, - PXP2_REG_PSWRQ_BW_UB8}, - {PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9, - PXP2_REG_PSWRQ_BW_UB9}, - {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10, - PXP2_REG_PSWRQ_BW_UB10}, - {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11, - PXP2_REG_PSWRQ_BW_UB11}, -/* 10 */{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28, - PXP2_REG_PSWRQ_BW_UB28}, - {PXP2_REG_RQ_BW_WR_L29, PXP2_REG_RQ_BW_WR_ADD29, - PXP2_REG_RQ_BW_WR_UBOUND29}, - {PXP2_REG_RQ_BW_WR_L30, PXP2_REG_RQ_BW_WR_ADD30, - PXP2_REG_RQ_BW_WR_UBOUND30} -}; - -static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, - int w_order) -{ - u32 val, i; - - if (r_order > MAX_RD_ORD) { - DP(NETIF_MSG_HW, "read order of %d order adjusted to %d\n", - r_order, MAX_RD_ORD); - r_order = MAX_RD_ORD; - } - if (w_order > MAX_WR_ORD) { - DP(NETIF_MSG_HW, "write order of %d order adjusted to %d\n", - w_order, MAX_WR_ORD); - w_order = MAX_WR_ORD; - } - if (CHIP_REV_IS_FPGA(bp)) { - DP(NETIF_MSG_HW, "write order adjusted to 1 for FPGA\n"); - w_order = 0; - } - DP(NETIF_MSG_HW, "read order %d write order %d\n", r_order, w_order); - - for (i = 0; i < NUM_RD_Q-1; i++) { - REG_WR(bp, read_arb_addr[i].l, read_arb_data[i][r_order].l); - REG_WR(bp, read_arb_addr[i].add, - read_arb_data[i][r_order].add); - REG_WR(bp, read_arb_addr[i].ubound, - read_arb_data[i][r_order].ubound); - } - - for (i = 0; i < NUM_WR_Q-1; i++) { - if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) || - (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) { - - REG_WR(bp, write_arb_addr[i].l, - write_arb_data[i][w_order].l); - - REG_WR(bp, write_arb_addr[i].add, - write_arb_data[i][w_order].add); - - REG_WR(bp, write_arb_addr[i].ubound, - write_arb_data[i][w_order].ubound); - } else { - - val = REG_RD(bp, write_arb_addr[i].l); - REG_WR(bp, write_arb_addr[i].l, - val | (write_arb_data[i][w_order].l << 10)); - - val = REG_RD(bp, write_arb_addr[i].add); - REG_WR(bp, write_arb_addr[i].add, - val | (write_arb_data[i][w_order].add << 10)); - - val = REG_RD(bp, write_arb_addr[i].ubound); - REG_WR(bp, write_arb_addr[i].ubound, - val | (write_arb_data[i][w_order].ubound << 7)); - } - } - - val = write_arb_data[NUM_WR_Q-1][w_order].add; - val += write_arb_data[NUM_WR_Q-1][w_order].ubound << 10; - val += write_arb_data[NUM_WR_Q-1][w_order].l << 17; - REG_WR(bp, PXP2_REG_PSWRQ_BW_RD, val); - - val = read_arb_data[NUM_RD_Q-1][r_order].add; - val += read_arb_data[NUM_RD_Q-1][r_order].ubound << 10; - val += read_arb_data[NUM_RD_Q-1][r_order].l << 17; - REG_WR(bp, PXP2_REG_PSWRQ_BW_WR, val); - - REG_WR(bp, PXP2_REG_RQ_WR_MBS0, w_order); - REG_WR(bp, PXP2_REG_RQ_WR_MBS1, w_order); - REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order); - REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order); - - if ((CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) && (r_order == MAX_RD_ORD)) - REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00); - - if (CHIP_IS_E3(bp)) - REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x4 << w_order)); - else if (CHIP_IS_E2(bp)) - REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order)); - else - REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order)); - - if (!CHIP_IS_E1(bp)) { - /* MPS w_order optimal TH presently TH - * 128 0 0 2 - * 256 1 1 3 - * >=512 2 2 3 - */ - /* DMAE is special */ - if (!CHIP_IS_E1H(bp)) { - /* E2 can use optimal TH */ - val = w_order; - REG_WR(bp, PXP2_REG_WR_DMAE_MPS, val); - } else { - val = ((w_order == 0) ? 2 : 3); - REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2); - } - - REG_WR(bp, PXP2_REG_WR_HC_MPS, val); - REG_WR(bp, PXP2_REG_WR_USDM_MPS, val); - REG_WR(bp, PXP2_REG_WR_CSDM_MPS, val); - REG_WR(bp, PXP2_REG_WR_TSDM_MPS, val); - REG_WR(bp, PXP2_REG_WR_XSDM_MPS, val); - REG_WR(bp, PXP2_REG_WR_QM_MPS, val); - REG_WR(bp, PXP2_REG_WR_TM_MPS, val); - REG_WR(bp, PXP2_REG_WR_SRC_MPS, val); - REG_WR(bp, PXP2_REG_WR_DBG_MPS, val); - REG_WR(bp, PXP2_REG_WR_CDU_MPS, val); - } - - /* Validate number of tags suppoted by device */ -#define PCIE_REG_PCIER_TL_HDR_FC_ST 0x2980 - val = REG_RD(bp, PCIE_REG_PCIER_TL_HDR_FC_ST); - val &= 0xFF; - if (val <= 0x20) - REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x20); -} - -/**************************************************************************** -* ILT management -****************************************************************************/ -/* - * This codes hides the low level HW interaction for ILT management and - * configuration. The API consists of a shadow ILT table which is set by the - * driver and a set of routines to use it to configure the HW. - * - */ - -/* ILT HW init operations */ - -/* ILT memory management operations */ -#define ILT_MEMOP_ALLOC 0 -#define ILT_MEMOP_FREE 1 - -/* the phys address is shifted right 12 bits and has an added - * 1=valid bit added to the 53rd bit - * then since this is a wide register(TM) - * we split it into two 32 bit writes - */ -#define ILT_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF)) -#define ILT_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44))) -#define ILT_RANGE(f, l) (((l) << 10) | f) - -static int bnx2x_ilt_line_mem_op(struct bnx2x *bp, - struct ilt_line *line, u32 size, u8 memop) -{ - if (memop == ILT_MEMOP_FREE) { - BNX2X_ILT_FREE(line->page, line->page_mapping, line->size); - return 0; - } - BNX2X_ILT_ZALLOC(line->page, &line->page_mapping, size); - if (!line->page) - return -1; - line->size = size; - return 0; -} - - -static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, - u8 memop) -{ - int i, rc; - struct bnx2x_ilt *ilt = BP_ILT(bp); - struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; - - if (!ilt || !ilt->lines) - return -1; - - if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM)) - return 0; - - for (rc = 0, i = ilt_cli->start; i <= ilt_cli->end && !rc; i++) { - rc = bnx2x_ilt_line_mem_op(bp, &ilt->lines[i], - ilt_cli->page_size, memop); - } - return rc; -} - -static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop) -{ - int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop); - if (!rc) - rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_QM, memop); - if (!rc) - rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop); - if (!rc) - rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop); - - return rc; -} - -static void bnx2x_ilt_line_wr(struct bnx2x *bp, int abs_idx, - dma_addr_t page_mapping) -{ - u32 reg; - - if (CHIP_IS_E1(bp)) - reg = PXP2_REG_RQ_ONCHIP_AT + abs_idx*8; - else - reg = PXP2_REG_RQ_ONCHIP_AT_B0 + abs_idx*8; - - bnx2x_wr_64(bp, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping)); -} - -static void bnx2x_ilt_line_init_op(struct bnx2x *bp, - struct bnx2x_ilt *ilt, int idx, u8 initop) -{ - dma_addr_t null_mapping; - int abs_idx = ilt->start_line + idx; - - - switch (initop) { - case INITOP_INIT: - /* set in the init-value array */ - case INITOP_SET: - bnx2x_ilt_line_wr(bp, abs_idx, ilt->lines[idx].page_mapping); - break; - case INITOP_CLEAR: - null_mapping = 0; - bnx2x_ilt_line_wr(bp, abs_idx, null_mapping); - break; - } -} - -static void bnx2x_ilt_boundry_init_op(struct bnx2x *bp, - struct ilt_client_info *ilt_cli, - u32 ilt_start, u8 initop) -{ - u32 start_reg = 0; - u32 end_reg = 0; - - /* The boundary is either SET or INIT, - CLEAR => SET and for now SET ~~ INIT */ - - /* find the appropriate regs */ - if (CHIP_IS_E1(bp)) { - switch (ilt_cli->client_num) { - case ILT_CLIENT_CDU: - start_reg = PXP2_REG_PSWRQ_CDU0_L2P; - break; - case ILT_CLIENT_QM: - start_reg = PXP2_REG_PSWRQ_QM0_L2P; - break; - case ILT_CLIENT_SRC: - start_reg = PXP2_REG_PSWRQ_SRC0_L2P; - break; - case ILT_CLIENT_TM: - start_reg = PXP2_REG_PSWRQ_TM0_L2P; - break; - } - REG_WR(bp, start_reg + BP_FUNC(bp)*4, - ILT_RANGE((ilt_start + ilt_cli->start), - (ilt_start + ilt_cli->end))); - } else { - switch (ilt_cli->client_num) { - case ILT_CLIENT_CDU: - start_reg = PXP2_REG_RQ_CDU_FIRST_ILT; - end_reg = PXP2_REG_RQ_CDU_LAST_ILT; - break; - case ILT_CLIENT_QM: - start_reg = PXP2_REG_RQ_QM_FIRST_ILT; - end_reg = PXP2_REG_RQ_QM_LAST_ILT; - break; - case ILT_CLIENT_SRC: - start_reg = PXP2_REG_RQ_SRC_FIRST_ILT; - end_reg = PXP2_REG_RQ_SRC_LAST_ILT; - break; - case ILT_CLIENT_TM: - start_reg = PXP2_REG_RQ_TM_FIRST_ILT; - end_reg = PXP2_REG_RQ_TM_LAST_ILT; - break; - } - REG_WR(bp, start_reg, (ilt_start + ilt_cli->start)); - REG_WR(bp, end_reg, (ilt_start + ilt_cli->end)); - } -} - -static void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp, - struct bnx2x_ilt *ilt, - struct ilt_client_info *ilt_cli, - u8 initop) -{ - int i; - - if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT) - return; - - for (i = ilt_cli->start; i <= ilt_cli->end; i++) - bnx2x_ilt_line_init_op(bp, ilt, i, initop); - - /* init/clear the ILT boundries */ - bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop); -} - -static void bnx2x_ilt_client_init_op(struct bnx2x *bp, - struct ilt_client_info *ilt_cli, u8 initop) -{ - struct bnx2x_ilt *ilt = BP_ILT(bp); - - bnx2x_ilt_client_init_op_ilt(bp, ilt, ilt_cli, initop); -} - -static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp, - int cli_num, u8 initop) -{ - struct bnx2x_ilt *ilt = BP_ILT(bp); - struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; - - bnx2x_ilt_client_init_op(bp, ilt_cli, initop); -} - -static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop) -{ - bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop); - bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop); - bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop); - bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop); -} - -static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num, - u32 psz_reg, u8 initop) -{ - struct bnx2x_ilt *ilt = BP_ILT(bp); - struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; - - if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT) - return; - - switch (initop) { - case INITOP_INIT: - /* set in the init-value array */ - case INITOP_SET: - REG_WR(bp, psz_reg, ILOG2(ilt_cli->page_size >> 12)); - break; - case INITOP_CLEAR: - break; - } -} - -/* - * called during init common stage, ilt clients should be initialized - * prioir to calling this function - */ -static void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop) -{ - bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU, - PXP2_REG_RQ_CDU_P_SIZE, initop); - bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_QM, - PXP2_REG_RQ_QM_P_SIZE, initop); - bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_SRC, - PXP2_REG_RQ_SRC_P_SIZE, initop); - bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_TM, - PXP2_REG_RQ_TM_P_SIZE, initop); -} - -/**************************************************************************** -* QM initializations -****************************************************************************/ -#define QM_QUEUES_PER_FUNC 16 /* E1 has 32, but only 16 are used */ -#define QM_INIT_MIN_CID_COUNT 31 -#define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT) - -/* called during init port stage */ -static void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count, - u8 initop) -{ - int port = BP_PORT(bp); - - if (QM_INIT(qm_cid_count)) { - switch (initop) { - case INITOP_INIT: - /* set in the init-value array */ - case INITOP_SET: - REG_WR(bp, QM_REG_CONNNUM_0 + port*4, - qm_cid_count/16 - 1); - break; - case INITOP_CLEAR: - break; - } - } -} - -static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count) -{ - int i; - u32 wb_data[2]; - - wb_data[0] = wb_data[1] = 0; - - for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) { - REG_WR(bp, QM_REG_BASEADDR + i*4, - qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC)); - bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, - wb_data, 2); - - if (CHIP_IS_E1H(bp)) { - REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, - qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC)); - bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8, - wb_data, 2); - } - } -} - -/* called during init common stage */ -static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count, - u8 initop) -{ - if (!QM_INIT(qm_cid_count)) - return; - - switch (initop) { - case INITOP_INIT: - /* set in the init-value array */ - case INITOP_SET: - bnx2x_qm_set_ptr_table(bp, qm_cid_count); - break; - case INITOP_CLEAR: - break; - } -} - -/**************************************************************************** -* SRC initializations -****************************************************************************/ -#ifdef BCM_CNIC -/* called during init func stage */ -static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2, - dma_addr_t t2_mapping, int src_cid_count) -{ - int i; - int port = BP_PORT(bp); - - /* Initialize T2 */ - for (i = 0; i < src_cid_count-1; i++) - t2[i].next = (u64)(t2_mapping + - (i+1)*sizeof(struct src_ent)); - - /* tell the searcher where the T2 table is */ - REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, src_cid_count); - - bnx2x_wr_64(bp, SRC_REG_FIRSTFREE0 + port*16, - U64_LO(t2_mapping), U64_HI(t2_mapping)); - - bnx2x_wr_64(bp, SRC_REG_LASTFREE0 + port*16, - U64_LO((u64)t2_mapping + - (src_cid_count-1) * sizeof(struct src_ent)), - U64_HI((u64)t2_mapping + - (src_cid_count-1) * sizeof(struct src_ent))); -} -#endif -#endif /* BNX2X_INIT_OPS_H */ diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c deleted file mode 100644 index d45b155..0000000 --- a/drivers/net/bnx2x/bnx2x_link.c +++ /dev/null @@ -1,12472 +0,0 @@ -/* Copyright 2008-2011 Broadcom Corporation - * - * Unless you and Broadcom execute a separate written software license - * agreement governing use of this software, this software is licensed to you - * under the terms of the GNU General Public License version 2, available - * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). - * - * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written - * consent. - * - * Written by Yaniv Rosner - * - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include - -#include "bnx2x.h" -#include "bnx2x_cmn.h" - - -/********************************************************/ -#define ETH_HLEN 14 -/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ -#define ETH_OVREHEAD (ETH_HLEN + 8 + 8) -#define ETH_MIN_PACKET_SIZE 60 -#define ETH_MAX_PACKET_SIZE 1500 -#define ETH_MAX_JUMBO_PACKET_SIZE 9600 -#define MDIO_ACCESS_TIMEOUT 1000 -#define BMAC_CONTROL_RX_ENABLE 2 -#define WC_LANE_MAX 4 -#define I2C_SWITCH_WIDTH 2 -#define I2C_BSC0 0 -#define I2C_BSC1 1 -#define I2C_WA_RETRY_CNT 3 -#define MCPR_IMC_COMMAND_READ_OP 1 -#define MCPR_IMC_COMMAND_WRITE_OP 2 - -/***********************************************************/ -/* Shortcut definitions */ -/***********************************************************/ - -#define NIG_LATCH_BC_ENABLE_MI_INT 0 - -#define NIG_STATUS_EMAC0_MI_INT \ - NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT -#define NIG_STATUS_XGXS0_LINK10G \ - NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G -#define NIG_STATUS_XGXS0_LINK_STATUS \ - NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS -#define NIG_STATUS_XGXS0_LINK_STATUS_SIZE \ - NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE -#define NIG_STATUS_SERDES0_LINK_STATUS \ - NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS -#define NIG_MASK_MI_INT \ - NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT -#define NIG_MASK_XGXS0_LINK10G \ - NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G -#define NIG_MASK_XGXS0_LINK_STATUS \ - NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS -#define NIG_MASK_SERDES0_LINK_STATUS \ - NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS - -#define MDIO_AN_CL73_OR_37_COMPLETE \ - (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | \ - MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE) - -#define XGXS_RESET_BITS \ - (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW | \ - MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ | \ - MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN | \ - MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD | \ - MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB) - -#define SERDES_RESET_BITS \ - (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW | \ - MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ | \ - MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN | \ - MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD) - -#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37 -#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73 -#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM -#define AUTONEG_PARALLEL \ - SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION -#define AUTONEG_SGMII_FIBER_AUTODET \ - SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT -#define AUTONEG_REMOTE_PHY SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY - -#define GP_STATUS_PAUSE_RSOLUTION_TXSIDE \ - MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE -#define GP_STATUS_PAUSE_RSOLUTION_RXSIDE \ - MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE -#define GP_STATUS_SPEED_MASK \ - MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK -#define GP_STATUS_10M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M -#define GP_STATUS_100M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M -#define GP_STATUS_1G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G -#define GP_STATUS_2_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G -#define GP_STATUS_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G -#define GP_STATUS_6G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G -#define GP_STATUS_10G_HIG \ - MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG -#define GP_STATUS_10G_CX4 \ - MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4 -#define GP_STATUS_1G_KX MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX -#define GP_STATUS_10G_KX4 \ - MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 -#define GP_STATUS_10G_KR MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR -#define GP_STATUS_10G_XFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI -#define GP_STATUS_20G_DXGXS MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS -#define GP_STATUS_10G_SFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI -#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD -#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD -#define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD -#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4 -#define LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD -#define LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD -#define LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD -#define LINK_1000XFD LINK_STATUS_SPEED_AND_DUPLEX_1000XFD -#define LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD -#define LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD -#define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD -#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD -#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD -#define LINK_20GTFD LINK_STATUS_SPEED_AND_DUPLEX_20GTFD -#define LINK_20GXFD LINK_STATUS_SPEED_AND_DUPLEX_20GXFD - - - -/* */ -#define SFP_EEPROM_CON_TYPE_ADDR 0x2 - #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 - #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21 - - -#define SFP_EEPROM_COMP_CODE_ADDR 0x3 - #define SFP_EEPROM_COMP_CODE_SR_MASK (1<<4) - #define SFP_EEPROM_COMP_CODE_LR_MASK (1<<5) - #define SFP_EEPROM_COMP_CODE_LRM_MASK (1<<6) - -#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8 - #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4 - #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8 - -#define SFP_EEPROM_OPTIONS_ADDR 0x40 - #define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1 -#define SFP_EEPROM_OPTIONS_SIZE 2 - -#define EDC_MODE_LINEAR 0x0022 -#define EDC_MODE_LIMITING 0x0044 -#define EDC_MODE_PASSIVE_DAC 0x0055 - - -/* BRB thresholds for E2*/ -#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE 170 -#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0 - -#define PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE 250 -#define PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0 - -#define PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE 10 -#define PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 90 - -#define PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE 50 -#define PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE 250 - -/* BRB thresholds for E3A0 */ -#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE 290 -#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0 - -#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE 410 -#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0 - -#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE 10 -#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 170 - -#define PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE 50 -#define PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE 410 - - -/* BRB thresholds for E3B0 2 port mode*/ -#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 1025 -#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0 - -#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE 1025 -#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0 - -#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE 10 -#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 1025 - -#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE 50 -#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE 1025 - -/* only for E3B0*/ -#define PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR 1025 -#define PFC_E3B0_2P_BRB_FULL_LB_XON_THR 1025 - -/* Lossy +Lossless GUARANTIED == GUART */ -#define PFC_E3B0_2P_MIX_PAUSE_LB_GUART 284 -/* Lossless +Lossless*/ -#define PFC_E3B0_2P_PAUSE_LB_GUART 236 -/* Lossy +Lossy*/ -#define PFC_E3B0_2P_NON_PAUSE_LB_GUART 342 - -/* Lossy +Lossless*/ -#define PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART 284 -/* Lossless +Lossless*/ -#define PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART 236 -/* Lossy +Lossy*/ -#define PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART 336 -#define PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST 80 - -#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART 0 -#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST 0 - -/* BRB thresholds for E3B0 4 port mode */ -#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 304 -#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0 - -#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE 384 -#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0 - -#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE 10 -#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 304 - -#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE 50 -#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE 384 - - -/* only for E3B0*/ -#define PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR 304 -#define PFC_E3B0_4P_BRB_FULL_LB_XON_THR 384 -#define PFC_E3B0_4P_LB_GUART 120 - -#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART 120 -#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST 80 - -#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART 80 -#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST 120 - -#define DCBX_INVALID_COS (0xFF) - -#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000) -#define ETS_BW_LIMIT_CREDIT_WEIGHT (0x5000) -#define ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS (1360) -#define ETS_E3B0_NIG_MIN_W_VAL_20GBPS (2720) -#define ETS_E3B0_PBF_MIN_W_VAL (10000) - -#define MAX_PACKET_SIZE (9700) -#define WC_UC_TIMEOUT 100 - -/**********************************************************/ -/* INTERFACE */ -/**********************************************************/ - -#define CL22_WR_OVER_CL45(_bp, _phy, _bank, _addr, _val) \ - bnx2x_cl45_write(_bp, _phy, \ - (_phy)->def_md_devad, \ - (_bank + (_addr & 0xf)), \ - _val) - -#define CL22_RD_OVER_CL45(_bp, _phy, _bank, _addr, _val) \ - bnx2x_cl45_read(_bp, _phy, \ - (_phy)->def_md_devad, \ - (_bank + (_addr & 0xf)), \ - _val) - -static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits) -{ - u32 val = REG_RD(bp, reg); - - val |= bits; - REG_WR(bp, reg, val); - return val; -} - -static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits) -{ - u32 val = REG_RD(bp, reg); - - val &= ~bits; - REG_WR(bp, reg, val); - return val; -} - -/******************************************************************/ -/* EPIO/GPIO section */ -/******************************************************************/ -static void bnx2x_get_epio(struct bnx2x *bp, u32 epio_pin, u32 *en) -{ - u32 epio_mask, gp_oenable; - *en = 0; - /* Sanity check */ - if (epio_pin > 31) { - DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to get\n", epio_pin); - return; - } - - epio_mask = 1 << epio_pin; - /* Set this EPIO to output */ - gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE); - REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable & ~epio_mask); - - *en = (REG_RD(bp, MCP_REG_MCPR_GP_INPUTS) & epio_mask) >> epio_pin; -} -static void bnx2x_set_epio(struct bnx2x *bp, u32 epio_pin, u32 en) -{ - u32 epio_mask, gp_output, gp_oenable; - - /* Sanity check */ - if (epio_pin > 31) { - DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to set\n", epio_pin); - return; - } - DP(NETIF_MSG_LINK, "Setting EPIO pin %d to %d\n", epio_pin, en); - epio_mask = 1 << epio_pin; - /* Set this EPIO to output */ - gp_output = REG_RD(bp, MCP_REG_MCPR_GP_OUTPUTS); - if (en) - gp_output |= epio_mask; - else - gp_output &= ~epio_mask; - - REG_WR(bp, MCP_REG_MCPR_GP_OUTPUTS, gp_output); - - /* Set the value for this EPIO */ - gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE); - REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable | epio_mask); -} - -static void bnx2x_set_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 val) -{ - if (pin_cfg == PIN_CFG_NA) - return; - if (pin_cfg >= PIN_CFG_EPIO0) { - bnx2x_set_epio(bp, pin_cfg - PIN_CFG_EPIO0, val); - } else { - u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3; - u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2; - bnx2x_set_gpio(bp, gpio_num, (u8)val, gpio_port); - } -} - -static u32 bnx2x_get_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 *val) -{ - if (pin_cfg == PIN_CFG_NA) - return -EINVAL; - if (pin_cfg >= PIN_CFG_EPIO0) { - bnx2x_get_epio(bp, pin_cfg - PIN_CFG_EPIO0, val); - } else { - u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3; - u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2; - *val = bnx2x_get_gpio(bp, gpio_num, gpio_port); - } - return 0; - -} -/******************************************************************/ -/* ETS section */ -/******************************************************************/ -static void bnx2x_ets_e2e3a0_disabled(struct link_params *params) -{ - /* ETS disabled configuration*/ - struct bnx2x *bp = params->bp; - - DP(NETIF_MSG_LINK, "ETS E2E3 disabled configuration\n"); - - /* - * mapping between entry priority to client number (0,1,2 -debug and - * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) - * 3bits client num. - * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 - * cos1-100 cos0-011 dbg1-010 dbg0-001 MCP-000 - */ - - REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688); - /* - * Bitmap of 5bits length. Each bit specifies whether the entry behaves - * as strict. Bits 0,1,2 - debug and management entries, 3 - - * COS0 entry, 4 - COS1 entry. - * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT - * bit4 bit3 bit2 bit1 bit0 - * MCP and debug are strict - */ - - REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7); - /* defines which entries (clients) are subjected to WFQ arbitration */ - REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0); - /* - * For strict priority entries defines the number of consecutive - * slots for the highest priority. - */ - REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); - /* - * mapping between the CREDIT_WEIGHT registers and actual client - * numbers - */ - REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0); - REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0); - REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0); - - REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, 0); - REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, 0); - REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0); - /* ETS mode disable */ - REG_WR(bp, PBF_REG_ETS_ENABLED, 0); - /* - * If ETS mode is enabled (there is no strict priority) defines a WFQ - * weight for COS0/COS1. - */ - REG_WR(bp, PBF_REG_COS0_WEIGHT, 0x2710); - REG_WR(bp, PBF_REG_COS1_WEIGHT, 0x2710); - /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter */ - REG_WR(bp, PBF_REG_COS0_UPPER_BOUND, 0x989680); - REG_WR(bp, PBF_REG_COS1_UPPER_BOUND, 0x989680); - /* Defines the number of consecutive slots for the strict priority */ - REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); -} -/****************************************************************************** -* Description: -* Getting min_w_val will be set according to line speed . -*. -******************************************************************************/ -static u32 bnx2x_ets_get_min_w_val_nig(const struct link_vars *vars) -{ - u32 min_w_val = 0; - /* Calculate min_w_val.*/ - if (vars->link_up) { - if (SPEED_20000 == vars->line_speed) - min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS; - else - min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS; - } else - min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS; - /** - * If the link isn't up (static configuration for example ) The - * link will be according to 20GBPS. - */ - return min_w_val; -} -/****************************************************************************** -* Description: -* Getting credit upper bound form min_w_val. -*. -******************************************************************************/ -static u32 bnx2x_ets_get_credit_upper_bound(const u32 min_w_val) -{ - const u32 credit_upper_bound = (u32)MAXVAL((150 * min_w_val), - MAX_PACKET_SIZE); - return credit_upper_bound; -} -/****************************************************************************** -* Description: -* Set credit upper bound for NIG. -*. -******************************************************************************/ -static void bnx2x_ets_e3b0_set_credit_upper_bound_nig( - const struct link_params *params, - const u32 min_w_val) -{ - struct bnx2x *bp = params->bp; - const u8 port = params->port; - const u32 credit_upper_bound = - bnx2x_ets_get_credit_upper_bound(min_w_val); - - REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 : - NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, credit_upper_bound); - REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 : - NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, credit_upper_bound); - REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 : - NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2, credit_upper_bound); - REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 : - NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3, credit_upper_bound); - REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 : - NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4, credit_upper_bound); - REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 : - NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5, credit_upper_bound); - - if (0 == port) { - REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6, - credit_upper_bound); - REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7, - credit_upper_bound); - REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8, - credit_upper_bound); - } -} -/****************************************************************************** -* Description: -* Will return the NIG ETS registers to init values.Except -* credit_upper_bound. -* That isn't used in this configuration (No WFQ is enabled) and will be -* configured acording to spec -*. -******************************************************************************/ -static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params, - const struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - const u8 port = params->port; - const u32 min_w_val = bnx2x_ets_get_min_w_val_nig(vars); - /** - * mapping between entry priority to client number (0,1,2 -debug and - * management clients, 3 - COS0 client, 4 - COS1, ... 8 - - * COS5)(HIGHEST) 4bits client num.TODO_ETS - Should be done by - * reset value or init tool - */ - if (port) { - REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB, 0x543210); - REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB, 0x0); - } else { - REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210); - REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8); - } - /** - * For strict priority entries defines the number of consecutive - * slots for the highest priority. - */ - /* TODO_ETS - Should be done by reset value or init tool */ - REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS : - NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); - /** - * mapping between the CREDIT_WEIGHT registers and actual client - * numbers - */ - /* TODO_ETS - Should be done by reset value or init tool */ - if (port) { - /*Port 1 has 6 COS*/ - REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543); - REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x0); - } else { - /*Port 0 has 9 COS*/ - REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB, - 0x43210876); - REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5); - } - - /** - * Bitmap of 5bits length. Each bit specifies whether the entry behaves - * as strict. Bits 0,1,2 - debug and management entries, 3 - - * COS0 entry, 4 - COS1 entry. - * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT - * bit4 bit3 bit2 bit1 bit0 - * MCP and debug are strict - */ - if (port) - REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT, 0x3f); - else - REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1ff); - /* defines which entries (clients) are subjected to WFQ arbitration */ - REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ : - NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0); - - /** - * Please notice the register address are note continuous and a - * for here is note appropriate.In 2 port mode port0 only COS0-5 - * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4 - * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT - * are never used for WFQ - */ - REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 : - NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0x0); - REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 : - NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0x0); - REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 : - NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2, 0x0); - REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 : - NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3, 0x0); - REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 : - NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4, 0x0); - REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 : - NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5, 0x0); - if (0 == port) { - REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6, 0x0); - REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7, 0x0); - REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8, 0x0); - } - - bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val); -} -/****************************************************************************** -* Description: -* Set credit upper bound for PBF. -*. -******************************************************************************/ -static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf( - const struct link_params *params, - const u32 min_w_val) -{ - struct bnx2x *bp = params->bp; - const u32 credit_upper_bound = - bnx2x_ets_get_credit_upper_bound(min_w_val); - const u8 port = params->port; - u32 base_upper_bound = 0; - u8 max_cos = 0; - u8 i = 0; - /** - * In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4 - * port mode port1 has COS0-2 that can be used for WFQ. - */ - if (0 == port) { - base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0; - max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0; - } else { - base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P1; - max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1; - } - - for (i = 0; i < max_cos; i++) - REG_WR(bp, base_upper_bound + (i << 2), credit_upper_bound); -} - -/****************************************************************************** -* Description: -* Will return the PBF ETS registers to init values.Except -* credit_upper_bound. -* That isn't used in this configuration (No WFQ is enabled) and will be -* configured acording to spec -*. -******************************************************************************/ -static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params) -{ - struct bnx2x *bp = params->bp; - const u8 port = params->port; - const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL; - u8 i = 0; - u32 base_weight = 0; - u8 max_cos = 0; - - /** - * mapping between entry priority to client number 0 - COS0 - * client, 2 - COS1, ... 5 - COS5)(HIGHEST) 4bits client num. - * TODO_ETS - Should be done by reset value or init tool - */ - if (port) - /* 0x688 (|011|0 10|00 1|000) */ - REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , 0x688); - else - /* (10 1|100 |011|0 10|00 1|000) */ - REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , 0x2C688); - - /* TODO_ETS - Should be done by reset value or init tool */ - if (port) - /* 0x688 (|011|0 10|00 1|000)*/ - REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1, 0x688); - else - /* 0x2C688 (10 1|100 |011|0 10|00 1|000) */ - REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0, 0x2C688); - - REG_WR(bp, (port) ? PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 : - PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 , 0x100); - - - REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 : - PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , 0); - - REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 : - PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 , 0); - /** - * In 2 port mode port0 has COS0-5 that can be used for WFQ. - * In 4 port mode port1 has COS0-2 that can be used for WFQ. - */ - if (0 == port) { - base_weight = PBF_REG_COS0_WEIGHT_P0; - max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0; - } else { - base_weight = PBF_REG_COS0_WEIGHT_P1; - max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1; - } - - for (i = 0; i < max_cos; i++) - REG_WR(bp, base_weight + (0x4 * i), 0); - - bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf); -} -/****************************************************************************** -* Description: -* E3B0 disable will return basicly the values to init values. -*. -******************************************************************************/ -static int bnx2x_ets_e3b0_disabled(const struct link_params *params, - const struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - - if (!CHIP_IS_E3B0(bp)) { - DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_disabled the chip isn't E3B0" - "\n"); - return -EINVAL; - } - - bnx2x_ets_e3b0_nig_disabled(params, vars); - - bnx2x_ets_e3b0_pbf_disabled(params); - - return 0; -} - -/****************************************************************************** -* Description: -* Disable will return basicly the values to init values. -*. -******************************************************************************/ -int bnx2x_ets_disabled(struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - int bnx2x_status = 0; - - if ((CHIP_IS_E2(bp)) || (CHIP_IS_E3A0(bp))) - bnx2x_ets_e2e3a0_disabled(params); - else if (CHIP_IS_E3B0(bp)) - bnx2x_status = bnx2x_ets_e3b0_disabled(params, vars); - else { - DP(NETIF_MSG_LINK, "bnx2x_ets_disabled - chip not supported\n"); - return -EINVAL; - } - - return bnx2x_status; -} - -/****************************************************************************** -* Description -* Set the COS mappimg to SP and BW until this point all the COS are not -* set as SP or BW. -******************************************************************************/ -static int bnx2x_ets_e3b0_cli_map(const struct link_params *params, - const struct bnx2x_ets_params *ets_params, - const u8 cos_sp_bitmap, - const u8 cos_bw_bitmap) -{ - struct bnx2x *bp = params->bp; - const u8 port = params->port; - const u8 nig_cli_sp_bitmap = 0x7 | (cos_sp_bitmap << 3); - const u8 pbf_cli_sp_bitmap = cos_sp_bitmap; - const u8 nig_cli_subject2wfq_bitmap = cos_bw_bitmap << 3; - const u8 pbf_cli_subject2wfq_bitmap = cos_bw_bitmap; - - REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT : - NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, nig_cli_sp_bitmap); - - REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 : - PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , pbf_cli_sp_bitmap); - - REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ : - NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, - nig_cli_subject2wfq_bitmap); - - REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 : - PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0, - pbf_cli_subject2wfq_bitmap); - - return 0; -} - -/****************************************************************************** -* Description: -* This function is needed because NIG ARB_CREDIT_WEIGHT_X are -* not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable. -******************************************************************************/ -static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp, - const u8 cos_entry, - const u32 min_w_val_nig, - const u32 min_w_val_pbf, - const u16 total_bw, - const u8 bw, - const u8 port) -{ - u32 nig_reg_adress_crd_weight = 0; - u32 pbf_reg_adress_crd_weight = 0; - /* Calculate and set BW for this COS*/ - const u32 cos_bw_nig = (bw * min_w_val_nig) / total_bw; - const u32 cos_bw_pbf = (bw * min_w_val_pbf) / total_bw; - - switch (cos_entry) { - case 0: - nig_reg_adress_crd_weight = - (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 : - NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0; - pbf_reg_adress_crd_weight = (port) ? - PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0; - break; - case 1: - nig_reg_adress_crd_weight = (port) ? - NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 : - NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1; - pbf_reg_adress_crd_weight = (port) ? - PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0; - break; - case 2: - nig_reg_adress_crd_weight = (port) ? - NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 : - NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2; - - pbf_reg_adress_crd_weight = (port) ? - PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0; - break; - case 3: - if (port) - return -EINVAL; - nig_reg_adress_crd_weight = - NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3; - pbf_reg_adress_crd_weight = - PBF_REG_COS3_WEIGHT_P0; - break; - case 4: - if (port) - return -EINVAL; - nig_reg_adress_crd_weight = - NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4; - pbf_reg_adress_crd_weight = PBF_REG_COS4_WEIGHT_P0; - break; - case 5: - if (port) - return -EINVAL; - nig_reg_adress_crd_weight = - NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5; - pbf_reg_adress_crd_weight = PBF_REG_COS5_WEIGHT_P0; - break; - } - - REG_WR(bp, nig_reg_adress_crd_weight, cos_bw_nig); - - REG_WR(bp, pbf_reg_adress_crd_weight, cos_bw_pbf); - - return 0; -} -/****************************************************************************** -* Description: -* Calculate the total BW.A value of 0 isn't legal. -*. -******************************************************************************/ -static int bnx2x_ets_e3b0_get_total_bw( - const struct link_params *params, - const struct bnx2x_ets_params *ets_params, - u16 *total_bw) -{ - struct bnx2x *bp = params->bp; - u8 cos_idx = 0; - - *total_bw = 0 ; - /* Calculate total BW requested */ - for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) { - if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) { - - if (0 == ets_params->cos[cos_idx].params.bw_params.bw) { - DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW" - "was set to 0\n"); - return -EINVAL; - } - *total_bw += - ets_params->cos[cos_idx].params.bw_params.bw; - } - } - - /*Check taotl BW is valid */ - if ((100 != *total_bw) || (0 == *total_bw)) { - if (0 == *total_bw) { - DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW" - "shouldn't be 0\n"); - return -EINVAL; - } - DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW should be" - "100\n"); - /** - * We can handle a case whre the BW isn't 100 this can happen - * if the TC are joined. - */ - } - return 0; -} - -/****************************************************************************** -* Description: -* Invalidate all the sp_pri_to_cos. -*. -******************************************************************************/ -static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos) -{ - u8 pri = 0; - for (pri = 0; pri < DCBX_MAX_NUM_COS; pri++) - sp_pri_to_cos[pri] = DCBX_INVALID_COS; -} -/****************************************************************************** -* Description: -* Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers -* according to sp_pri_to_cos. -*. -******************************************************************************/ -static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params, - u8 *sp_pri_to_cos, const u8 pri, - const u8 cos_entry) -{ - struct bnx2x *bp = params->bp; - const u8 port = params->port; - const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 : - DCBX_E3B0_MAX_NUM_COS_PORT0; - - if (DCBX_INVALID_COS != sp_pri_to_cos[pri]) { - DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid " - "parameter There can't be two COS's with" - "the same strict pri\n"); - return -EINVAL; - } - - if (pri > max_num_of_cos) { - DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid" - "parameter Illegal strict priority\n"); - return -EINVAL; - } - - sp_pri_to_cos[pri] = cos_entry; - return 0; - -} - -/****************************************************************************** -* Description: -* Returns the correct value according to COS and priority in -* the sp_pri_cli register. -*. -******************************************************************************/ -static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset, - const u8 pri_set, - const u8 pri_offset, - const u8 entry_size) -{ - u64 pri_cli_nig = 0; - pri_cli_nig = ((u64)(cos + cos_offset)) << (entry_size * - (pri_set + pri_offset)); - - return pri_cli_nig; -} -/****************************************************************************** -* Description: -* Returns the correct value according to COS and priority in the -* sp_pri_cli register for NIG. -*. -******************************************************************************/ -static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set) -{ - /* MCP Dbg0 and dbg1 are always with higher strict pri*/ - const u8 nig_cos_offset = 3; - const u8 nig_pri_offset = 3; - - return bnx2x_e3b0_sp_get_pri_cli_reg(cos, nig_cos_offset, pri_set, - nig_pri_offset, 4); - -} -/****************************************************************************** -* Description: -* Returns the correct value according to COS and priority in the -* sp_pri_cli register for PBF. -*. -******************************************************************************/ -static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set) -{ - const u8 pbf_cos_offset = 0; - const u8 pbf_pri_offset = 0; - - return bnx2x_e3b0_sp_get_pri_cli_reg(cos, pbf_cos_offset, pri_set, - pbf_pri_offset, 3); - -} - -/****************************************************************************** -* Description: -* Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers -* according to sp_pri_to_cos.(which COS has higher priority) -*. -******************************************************************************/ -static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params, - u8 *sp_pri_to_cos) -{ - struct bnx2x *bp = params->bp; - u8 i = 0; - const u8 port = params->port; - /* MCP Dbg0 and dbg1 are always with higher strict pri*/ - u64 pri_cli_nig = 0x210; - u32 pri_cli_pbf = 0x0; - u8 pri_set = 0; - u8 pri_bitmask = 0; - const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 : - DCBX_E3B0_MAX_NUM_COS_PORT0; - - u8 cos_bit_to_set = (1 << max_num_of_cos) - 1; - - /* Set all the strict priority first */ - for (i = 0; i < max_num_of_cos; i++) { - if (DCBX_INVALID_COS != sp_pri_to_cos[i]) { - if (DCBX_MAX_NUM_COS <= sp_pri_to_cos[i]) { - DP(NETIF_MSG_LINK, - "bnx2x_ets_e3b0_sp_set_pri_cli_reg " - "invalid cos entry\n"); - return -EINVAL; - } - - pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig( - sp_pri_to_cos[i], pri_set); - - pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf( - sp_pri_to_cos[i], pri_set); - pri_bitmask = 1 << sp_pri_to_cos[i]; - /* COS is used remove it from bitmap.*/ - if (0 == (pri_bitmask & cos_bit_to_set)) { - DP(NETIF_MSG_LINK, - "bnx2x_ets_e3b0_sp_set_pri_cli_reg " - "invalid There can't be two COS's with" - " the same strict pri\n"); - return -EINVAL; - } - cos_bit_to_set &= ~pri_bitmask; - pri_set++; - } - } - - /* Set all the Non strict priority i= COS*/ - for (i = 0; i < max_num_of_cos; i++) { - pri_bitmask = 1 << i; - /* Check if COS was already used for SP */ - if (pri_bitmask & cos_bit_to_set) { - /* COS wasn't used for SP */ - pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig( - i, pri_set); - - pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf( - i, pri_set); - /* COS is used remove it from bitmap.*/ - cos_bit_to_set &= ~pri_bitmask; - pri_set++; - } - } - - if (pri_set != max_num_of_cos) { - DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_set_pri_cli_reg not all " - "entries were set\n"); - return -EINVAL; - } - - if (port) { - /* Only 6 usable clients*/ - REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB, - (u32)pri_cli_nig); - - REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , pri_cli_pbf); - } else { - /* Only 9 usable clients*/ - const u32 pri_cli_nig_lsb = (u32) (pri_cli_nig); - const u32 pri_cli_nig_msb = (u32) ((pri_cli_nig >> 32) & 0xF); - - REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, - pri_cli_nig_lsb); - REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, - pri_cli_nig_msb); - - REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , pri_cli_pbf); - } - return 0; -} - -/****************************************************************************** -* Description: -* Configure the COS to ETS according to BW and SP settings. -******************************************************************************/ -int bnx2x_ets_e3b0_config(const struct link_params *params, - const struct link_vars *vars, - const struct bnx2x_ets_params *ets_params) -{ - struct bnx2x *bp = params->bp; - int bnx2x_status = 0; - const u8 port = params->port; - u16 total_bw = 0; - const u32 min_w_val_nig = bnx2x_ets_get_min_w_val_nig(vars); - const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL; - u8 cos_bw_bitmap = 0; - u8 cos_sp_bitmap = 0; - u8 sp_pri_to_cos[DCBX_MAX_NUM_COS] = {0}; - const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 : - DCBX_E3B0_MAX_NUM_COS_PORT0; - u8 cos_entry = 0; - - if (!CHIP_IS_E3B0(bp)) { - DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_disabled the chip isn't E3B0" - "\n"); - return -EINVAL; - } - - if ((ets_params->num_of_cos > max_num_of_cos)) { - DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config the number of COS " - "isn't supported\n"); - return -EINVAL; - } - - /* Prepare sp strict priority parameters*/ - bnx2x_ets_e3b0_sp_pri_to_cos_init(sp_pri_to_cos); - - /* Prepare BW parameters*/ - bnx2x_status = bnx2x_ets_e3b0_get_total_bw(params, ets_params, - &total_bw); - if (0 != bnx2x_status) { - DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config get_total_bw failed " - "\n"); - return -EINVAL; - } - - /** - * Upper bound is set according to current link speed (min_w_val - * should be the same for upper bound and COS credit val). - */ - bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig); - bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf); - - - for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) { - if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) { - cos_bw_bitmap |= (1 << cos_entry); - /** - * The function also sets the BW in HW(not the mappin - * yet) - */ - bnx2x_status = bnx2x_ets_e3b0_set_cos_bw( - bp, cos_entry, min_w_val_nig, min_w_val_pbf, - total_bw, - ets_params->cos[cos_entry].params.bw_params.bw, - port); - } else if (bnx2x_cos_state_strict == - ets_params->cos[cos_entry].state){ - cos_sp_bitmap |= (1 << cos_entry); - - bnx2x_status = bnx2x_ets_e3b0_sp_pri_to_cos_set( - params, - sp_pri_to_cos, - ets_params->cos[cos_entry].params.sp_params.pri, - cos_entry); - - } else { - DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_config cos state not" - " valid\n"); - return -EINVAL; - } - if (0 != bnx2x_status) { - DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_config set cos bw " - "failed\n"); - return bnx2x_status; - } - } - - /* Set SP register (which COS has higher priority) */ - bnx2x_status = bnx2x_ets_e3b0_sp_set_pri_cli_reg(params, - sp_pri_to_cos); - - if (0 != bnx2x_status) { - DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config set_pri_cli_reg " - "failed\n"); - return bnx2x_status; - } - - /* Set client mapping of BW and strict */ - bnx2x_status = bnx2x_ets_e3b0_cli_map(params, ets_params, - cos_sp_bitmap, - cos_bw_bitmap); - - if (0 != bnx2x_status) { - DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config SP failed\n"); - return bnx2x_status; - } - return 0; -} -static void bnx2x_ets_bw_limit_common(const struct link_params *params) -{ - /* ETS disabled configuration */ - struct bnx2x *bp = params->bp; - DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n"); - /* - * defines which entries (clients) are subjected to WFQ arbitration - * COS0 0x8 - * COS1 0x10 - */ - REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18); - /* - * mapping between the ARB_CREDIT_WEIGHT registers and actual - * client numbers (WEIGHT_0 does not actually have to represent - * client 0) - * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 - * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010 - */ - REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A); - - REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, - ETS_BW_LIMIT_CREDIT_UPPER_BOUND); - REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, - ETS_BW_LIMIT_CREDIT_UPPER_BOUND); - - /* ETS mode enabled*/ - REG_WR(bp, PBF_REG_ETS_ENABLED, 1); - - /* Defines the number of consecutive slots for the strict priority */ - REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); - /* - * Bitmap of 5bits length. Each bit specifies whether the entry behaves - * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0 - * entry, 4 - COS1 entry. - * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT - * bit4 bit3 bit2 bit1 bit0 - * MCP and debug are strict - */ - REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7); - - /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/ - REG_WR(bp, PBF_REG_COS0_UPPER_BOUND, - ETS_BW_LIMIT_CREDIT_UPPER_BOUND); - REG_WR(bp, PBF_REG_COS1_UPPER_BOUND, - ETS_BW_LIMIT_CREDIT_UPPER_BOUND); -} - -void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw, - const u32 cos1_bw) -{ - /* ETS disabled configuration*/ - struct bnx2x *bp = params->bp; - const u32 total_bw = cos0_bw + cos1_bw; - u32 cos0_credit_weight = 0; - u32 cos1_credit_weight = 0; - - DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n"); - - if ((0 == total_bw) || - (0 == cos0_bw) || - (0 == cos1_bw)) { - DP(NETIF_MSG_LINK, "Total BW can't be zero\n"); - return; - } - - cos0_credit_weight = (cos0_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/ - total_bw; - cos1_credit_weight = (cos1_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/ - total_bw; - - bnx2x_ets_bw_limit_common(params); - - REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, cos0_credit_weight); - REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, cos1_credit_weight); - - REG_WR(bp, PBF_REG_COS0_WEIGHT, cos0_credit_weight); - REG_WR(bp, PBF_REG_COS1_WEIGHT, cos1_credit_weight); -} - -int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos) -{ - /* ETS disabled configuration*/ - struct bnx2x *bp = params->bp; - u32 val = 0; - - DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n"); - /* - * Bitmap of 5bits length. Each bit specifies whether the entry behaves - * as strict. Bits 0,1,2 - debug and management entries, - * 3 - COS0 entry, 4 - COS1 entry. - * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT - * bit4 bit3 bit2 bit1 bit0 - * MCP and debug are strict - */ - REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F); - /* - * For strict priority entries defines the number of consecutive slots - * for the highest priority. - */ - REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); - /* ETS mode disable */ - REG_WR(bp, PBF_REG_ETS_ENABLED, 0); - /* Defines the number of consecutive slots for the strict priority */ - REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0x100); - - /* Defines the number of consecutive slots for the strict priority */ - REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos); - - /* - * mapping between entry priority to client number (0,1,2 -debug and - * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) - * 3bits client num. - * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 - * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000 - * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000 - */ - val = (0 == strict_cos) ? 0x2318 : 0x22E0; - REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val); - - return 0; -} -/******************************************************************/ -/* PFC section */ -/******************************************************************/ - -static void bnx2x_update_pfc_xmac(struct link_params *params, - struct link_vars *vars, - u8 is_lb) -{ - struct bnx2x *bp = params->bp; - u32 xmac_base; - u32 pause_val, pfc0_val, pfc1_val; - - /* XMAC base adrr */ - xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; - - /* Initialize pause and pfc registers */ - pause_val = 0x18000; - pfc0_val = 0xFFFF8000; - pfc1_val = 0x2; - - /* No PFC support */ - if (!(params->feature_config_flags & - FEATURE_CONFIG_PFC_ENABLED)) { - - /* - * RX flow control - Process pause frame in receive direction - */ - if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX) - pause_val |= XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN; - - /* - * TX flow control - Send pause packet when buffer is full - */ - if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) - pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN; - } else {/* PFC support */ - pfc1_val |= XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN | - XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN | - XMAC_PFC_CTRL_HI_REG_RX_PFC_EN | - XMAC_PFC_CTRL_HI_REG_TX_PFC_EN; - } - - /* Write pause and PFC registers */ - REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val); - REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val); - REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val); - - - /* Set MAC address for source TX Pause/PFC frames */ - REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_LO, - ((params->mac_addr[2] << 24) | - (params->mac_addr[3] << 16) | - (params->mac_addr[4] << 8) | - (params->mac_addr[5]))); - REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_HI, - ((params->mac_addr[0] << 8) | - (params->mac_addr[1]))); - - udelay(30); -} - - -static void bnx2x_emac_get_pfc_stat(struct link_params *params, - u32 pfc_frames_sent[2], - u32 pfc_frames_received[2]) -{ - /* Read pfc statistic */ - struct bnx2x *bp = params->bp; - u32 emac_base = params->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; - u32 val_xon = 0; - u32 val_xoff = 0; - - DP(NETIF_MSG_LINK, "pfc statistic read from EMAC\n"); - - /* PFC received frames */ - val_xoff = REG_RD(bp, emac_base + - EMAC_REG_RX_PFC_STATS_XOFF_RCVD); - val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT; - val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_RCVD); - val_xon &= EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT; - - pfc_frames_received[0] = val_xon + val_xoff; - - /* PFC received sent */ - val_xoff = REG_RD(bp, emac_base + - EMAC_REG_RX_PFC_STATS_XOFF_SENT); - val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT; - val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_SENT); - val_xon &= EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT; - - pfc_frames_sent[0] = val_xon + val_xoff; -} - -/* Read pfc statistic*/ -void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars, - u32 pfc_frames_sent[2], - u32 pfc_frames_received[2]) -{ - /* Read pfc statistic */ - struct bnx2x *bp = params->bp; - - DP(NETIF_MSG_LINK, "pfc statistic\n"); - - if (!vars->link_up) - return; - - if (MAC_TYPE_EMAC == vars->mac_type) { - DP(NETIF_MSG_LINK, "About to read PFC stats from EMAC\n"); - bnx2x_emac_get_pfc_stat(params, pfc_frames_sent, - pfc_frames_received); - } -} -/******************************************************************/ -/* MAC/PBF section */ -/******************************************************************/ -static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port) -{ - u32 mode, emac_base; - /** - * Set clause 45 mode, slow down the MDIO clock to 2.5MHz - * (a value of 49==0x31) and make sure that the AUTO poll is off - */ - - if (CHIP_IS_E2(bp)) - emac_base = GRCBASE_EMAC0; - else - emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; - mode = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); - mode &= ~(EMAC_MDIO_MODE_AUTO_POLL | - EMAC_MDIO_MODE_CLOCK_CNT); - if (USES_WARPCORE(bp)) - mode |= (74L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT); - else - mode |= (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT); - - mode |= (EMAC_MDIO_MODE_CLAUSE_45); - REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE, mode); - - udelay(40); -} - -static void bnx2x_emac_init(struct link_params *params, - struct link_vars *vars) -{ - /* reset and unreset the emac core */ - struct bnx2x *bp = params->bp; - u8 port = params->port; - u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; - u32 val; - u16 timeout; - - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, - (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); - udelay(5); - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, - (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); - - /* init emac - use read-modify-write */ - /* self clear reset */ - val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); - EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET)); - - timeout = 200; - do { - val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); - DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val); - if (!timeout) { - DP(NETIF_MSG_LINK, "EMAC timeout!\n"); - return; - } - timeout--; - } while (val & EMAC_MODE_RESET); - bnx2x_set_mdio_clk(bp, params->chip_id, port); - /* Set mac address */ - val = ((params->mac_addr[0] << 8) | - params->mac_addr[1]); - EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH, val); - - val = ((params->mac_addr[2] << 24) | - (params->mac_addr[3] << 16) | - (params->mac_addr[4] << 8) | - params->mac_addr[5]); - EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + 4, val); -} - -static void bnx2x_set_xumac_nig(struct link_params *params, - u16 tx_pause_en, - u8 enable) -{ - struct bnx2x *bp = params->bp; - - REG_WR(bp, params->port ? NIG_REG_P1_MAC_IN_EN : NIG_REG_P0_MAC_IN_EN, - enable); - REG_WR(bp, params->port ? NIG_REG_P1_MAC_OUT_EN : NIG_REG_P0_MAC_OUT_EN, - enable); - REG_WR(bp, params->port ? NIG_REG_P1_MAC_PAUSE_OUT_EN : - NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en); -} - -static void bnx2x_umac_enable(struct link_params *params, - struct link_vars *vars, u8 lb) -{ - u32 val; - u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; - struct bnx2x *bp = params->bp; - /* Reset UMAC */ - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, - (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)); - usleep_range(1000, 1000); - - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, - (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)); - - DP(NETIF_MSG_LINK, "enabling UMAC\n"); - - /** - * This register determines on which events the MAC will assert - * error on the i/f to the NIG along w/ EOP. - */ - - /** - * BD REG_WR(bp, NIG_REG_P0_MAC_RSV_ERR_MASK + - * params->port*0x14, 0xfffff. - */ - /* This register opens the gate for the UMAC despite its name */ - REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1); - - val = UMAC_COMMAND_CONFIG_REG_PROMIS_EN | - UMAC_COMMAND_CONFIG_REG_PAD_EN | - UMAC_COMMAND_CONFIG_REG_SW_RESET | - UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK; - switch (vars->line_speed) { - case SPEED_10: - val |= (0<<2); - break; - case SPEED_100: - val |= (1<<2); - break; - case SPEED_1000: - val |= (2<<2); - break; - case SPEED_2500: - val |= (3<<2); - break; - default: - DP(NETIF_MSG_LINK, "Invalid speed for UMAC %d\n", - vars->line_speed); - break; - } - if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)) - val |= UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE; - - if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)) - val |= UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE; - - REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); - udelay(50); - - /* Set MAC address for source TX Pause/PFC frames (under SW reset) */ - REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR0, - ((params->mac_addr[2] << 24) | - (params->mac_addr[3] << 16) | - (params->mac_addr[4] << 8) | - (params->mac_addr[5]))); - REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR1, - ((params->mac_addr[0] << 8) | - (params->mac_addr[1]))); - - /* Enable RX and TX */ - val &= ~UMAC_COMMAND_CONFIG_REG_PAD_EN; - val |= UMAC_COMMAND_CONFIG_REG_TX_ENA | - UMAC_COMMAND_CONFIG_REG_RX_ENA; - REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); - udelay(50); - - /* Remove SW Reset */ - val &= ~UMAC_COMMAND_CONFIG_REG_SW_RESET; - - /* Check loopback mode */ - if (lb) - val |= UMAC_COMMAND_CONFIG_REG_LOOP_ENA; - REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); - - /* - * Maximum Frame Length (RW). Defines a 14-Bit maximum frame - * length used by the MAC receive logic to check frames. - */ - REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710); - bnx2x_set_xumac_nig(params, - ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1); - vars->mac_type = MAC_TYPE_UMAC; - -} - -static u8 bnx2x_is_4_port_mode(struct bnx2x *bp) -{ - u32 port4mode_ovwr_val; - /* Check 4-port override enabled */ - port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); - if (port4mode_ovwr_val & (1<<0)) { - /* Return 4-port mode override value */ - return ((port4mode_ovwr_val & (1<<1)) == (1<<1)); - } - /* Return 4-port mode from input pin */ - return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN); -} - -/* Define the XMAC mode */ -static void bnx2x_xmac_init(struct bnx2x *bp, u32 max_speed) -{ - u32 is_port4mode = bnx2x_is_4_port_mode(bp); - - /** - * In 4-port mode, need to set the mode only once, so if XMAC is - * already out of reset, it means the mode has already been set, - * and it must not* reset the XMAC again, since it controls both - * ports of the path - **/ - - if (is_port4mode && (REG_RD(bp, MISC_REG_RESET_REG_2) & - MISC_REGISTERS_RESET_REG_2_XMAC)) { - DP(NETIF_MSG_LINK, "XMAC already out of reset" - " in 4-port mode\n"); - return; - } - - /* Hard reset */ - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, - MISC_REGISTERS_RESET_REG_2_XMAC); - usleep_range(1000, 1000); - - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, - MISC_REGISTERS_RESET_REG_2_XMAC); - if (is_port4mode) { - DP(NETIF_MSG_LINK, "Init XMAC to 2 ports x 10G per path\n"); - - /* Set the number of ports on the system side to up to 2 */ - REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 1); - - /* Set the number of ports on the Warp Core to 10G */ - REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3); - } else { - /* Set the number of ports on the system side to 1 */ - REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 0); - if (max_speed == SPEED_10000) { - DP(NETIF_MSG_LINK, "Init XMAC to 10G x 1" - " port per path\n"); - /* Set the number of ports on the Warp Core to 10G */ - REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3); - } else { - DP(NETIF_MSG_LINK, "Init XMAC to 20G x 2 ports" - " per path\n"); - /* Set the number of ports on the Warp Core to 20G */ - REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 1); - } - } - /* Soft reset */ - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, - MISC_REGISTERS_RESET_REG_2_XMAC_SOFT); - usleep_range(1000, 1000); - - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, - MISC_REGISTERS_RESET_REG_2_XMAC_SOFT); - -} - -static void bnx2x_xmac_disable(struct link_params *params) -{ - u8 port = params->port; - struct bnx2x *bp = params->bp; - u32 pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; - - if (REG_RD(bp, MISC_REG_RESET_REG_2) & - MISC_REGISTERS_RESET_REG_2_XMAC) { - /* - * Send an indication to change the state in the NIG back to XON - * Clearing this bit enables the next set of this bit to get - * rising edge - */ - pfc_ctrl = REG_RD(bp, xmac_base + XMAC_REG_PFC_CTRL_HI); - REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, - (pfc_ctrl & ~(1<<1))); - REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, - (pfc_ctrl | (1<<1))); - DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port); - REG_WR(bp, xmac_base + XMAC_REG_CTRL, 0); - usleep_range(1000, 1000); - bnx2x_set_xumac_nig(params, 0, 0); - REG_WR(bp, xmac_base + XMAC_REG_CTRL, - XMAC_CTRL_REG_SOFT_RESET); - } -} - -static int bnx2x_xmac_enable(struct link_params *params, - struct link_vars *vars, u8 lb) -{ - u32 val, xmac_base; - struct bnx2x *bp = params->bp; - DP(NETIF_MSG_LINK, "enabling XMAC\n"); - - xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; - - bnx2x_xmac_init(bp, vars->line_speed); - - /* - * This register determines on which events the MAC will assert - * error on the i/f to the NIG along w/ EOP. - */ - - /* - * This register tells the NIG whether to send traffic to UMAC - * or XMAC - */ - REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0); - - /* Set Max packet size */ - REG_WR(bp, xmac_base + XMAC_REG_RX_MAX_SIZE, 0x2710); - - /* CRC append for Tx packets */ - REG_WR(bp, xmac_base + XMAC_REG_TX_CTRL, 0xC800); - - /* update PFC */ - bnx2x_update_pfc_xmac(params, vars, 0); - - /* Enable TX and RX */ - val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN; - - /* Check loopback mode */ - if (lb) - val |= XMAC_CTRL_REG_CORE_LOCAL_LPBK; - REG_WR(bp, xmac_base + XMAC_REG_CTRL, val); - bnx2x_set_xumac_nig(params, - ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1); - - vars->mac_type = MAC_TYPE_XMAC; - - return 0; -} -static int bnx2x_emac_enable(struct link_params *params, - struct link_vars *vars, u8 lb) -{ - struct bnx2x *bp = params->bp; - u8 port = params->port; - u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; - u32 val; - - DP(NETIF_MSG_LINK, "enabling EMAC\n"); - - /* Disable BMAC */ - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, - (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); - - /* enable emac and not bmac */ - REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1); - - /* ASIC */ - if (vars->phy_flags & PHY_XGXS_FLAG) { - u32 ser_lane = ((params->lane_config & - PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> - PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); - - DP(NETIF_MSG_LINK, "XGXS\n"); - /* select the master lanes (out of 0-3) */ - REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, ser_lane); - /* select XGXS */ - REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); - - } else { /* SerDes */ - DP(NETIF_MSG_LINK, "SerDes\n"); - /* select SerDes */ - REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0); - } - - bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE, - EMAC_RX_MODE_RESET); - bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, - EMAC_TX_MODE_RESET); - - if (CHIP_REV_IS_SLOW(bp)) { - /* config GMII mode */ - val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); - EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII)); - } else { /* ASIC */ - /* pause enable/disable */ - bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE, - EMAC_RX_MODE_FLOW_EN); - - bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE, - (EMAC_TX_MODE_EXT_PAUSE_EN | - EMAC_TX_MODE_FLOW_EN)); - if (!(params->feature_config_flags & - FEATURE_CONFIG_PFC_ENABLED)) { - if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX) - bnx2x_bits_en(bp, emac_base + - EMAC_REG_EMAC_RX_MODE, - EMAC_RX_MODE_FLOW_EN); - - if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) - bnx2x_bits_en(bp, emac_base + - EMAC_REG_EMAC_TX_MODE, - (EMAC_TX_MODE_EXT_PAUSE_EN | - EMAC_TX_MODE_FLOW_EN)); - } else - bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, - EMAC_TX_MODE_FLOW_EN); - } - - /* KEEP_VLAN_TAG, promiscuous */ - val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); - val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS; - - /* - * Setting this bit causes MAC control frames (except for pause - * frames) to be passed on for processing. This setting has no - * affect on the operation of the pause frames. This bit effects - * all packets regardless of RX Parser packet sorting logic. - * Turn the PFC off to make sure we are in Xon state before - * enabling it. - */ - EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0); - if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) { - DP(NETIF_MSG_LINK, "PFC is enabled\n"); - /* Enable PFC again */ - EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, - EMAC_REG_RX_PFC_MODE_RX_EN | - EMAC_REG_RX_PFC_MODE_TX_EN | - EMAC_REG_RX_PFC_MODE_PRIORITIES); - - EMAC_WR(bp, EMAC_REG_RX_PFC_PARAM, - ((0x0101 << - EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT) | - (0x00ff << - EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT))); - val |= EMAC_RX_MODE_KEEP_MAC_CONTROL; - } - EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val); - - /* Set Loopback */ - val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); - if (lb) - val |= 0x810; - else - val &= ~0x810; - EMAC_WR(bp, EMAC_REG_EMAC_MODE, val); - - /* enable emac */ - REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1); - - /* enable emac for jumbo packets */ - EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE, - (EMAC_RX_MTU_SIZE_JUMBO_ENA | - (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))); - - /* strip CRC */ - REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1); - - /* disable the NIG in/out to the bmac */ - REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x0); - REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0); - REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x0); - - /* enable the NIG in/out to the emac */ - REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1); - val = 0; - if ((params->feature_config_flags & - FEATURE_CONFIG_PFC_ENABLED) || - (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)) - val = 1; - - REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val); - REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1); - - REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0); - - vars->mac_type = MAC_TYPE_EMAC; - return 0; -} - -static void bnx2x_update_pfc_bmac1(struct link_params *params, - struct link_vars *vars) -{ - u32 wb_data[2]; - struct bnx2x *bp = params->bp; - u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM : - NIG_REG_INGRESS_BMAC0_MEM; - - u32 val = 0x14; - if ((!(params->feature_config_flags & - FEATURE_CONFIG_PFC_ENABLED)) && - (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)) - /* Enable BigMAC to react on received Pause packets */ - val |= (1<<5); - wb_data[0] = val; - wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2); - - /* tx control */ - val = 0xc0; - if (!(params->feature_config_flags & - FEATURE_CONFIG_PFC_ENABLED) && - (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)) - val |= 0x800000; - wb_data[0] = val; - wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_data, 2); -} - -static void bnx2x_update_pfc_bmac2(struct link_params *params, - struct link_vars *vars, - u8 is_lb) -{ - /* - * Set rx control: Strip CRC and enable BigMAC to relay - * control packets to the system as well - */ - u32 wb_data[2]; - struct bnx2x *bp = params->bp; - u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM : - NIG_REG_INGRESS_BMAC0_MEM; - u32 val = 0x14; - - if ((!(params->feature_config_flags & - FEATURE_CONFIG_PFC_ENABLED)) && - (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)) - /* Enable BigMAC to react on received Pause packets */ - val |= (1<<5); - wb_data[0] = val; - wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2); - udelay(30); - - /* Tx control */ - val = 0xc0; - if (!(params->feature_config_flags & - FEATURE_CONFIG_PFC_ENABLED) && - (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)) - val |= 0x800000; - wb_data[0] = val; - wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL, wb_data, 2); - - if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) { - DP(NETIF_MSG_LINK, "PFC is enabled\n"); - /* Enable PFC RX & TX & STATS and set 8 COS */ - wb_data[0] = 0x0; - wb_data[0] |= (1<<0); /* RX */ - wb_data[0] |= (1<<1); /* TX */ - wb_data[0] |= (1<<2); /* Force initial Xon */ - wb_data[0] |= (1<<3); /* 8 cos */ - wb_data[0] |= (1<<5); /* STATS */ - wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, - wb_data, 2); - /* Clear the force Xon */ - wb_data[0] &= ~(1<<2); - } else { - DP(NETIF_MSG_LINK, "PFC is disabled\n"); - /* disable PFC RX & TX & STATS and set 8 COS */ - wb_data[0] = 0x8; - wb_data[1] = 0; - } - - REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2); - - /* - * Set Time (based unit is 512 bit time) between automatic - * re-sending of PP packets amd enable automatic re-send of - * Per-Priroity Packet as long as pp_gen is asserted and - * pp_disable is low. - */ - val = 0x8000; - if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) - val |= (1<<16); /* enable automatic re-send */ - - wb_data[0] = val; - wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL, - wb_data, 2); - - /* mac control */ - val = 0x3; /* Enable RX and TX */ - if (is_lb) { - val |= 0x4; /* Local loopback */ - DP(NETIF_MSG_LINK, "enable bmac loopback\n"); - } - /* When PFC enabled, Pass pause frames towards the NIG. */ - if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) - val |= ((1<<6)|(1<<5)); - - wb_data[0] = val; - wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2); -} - - -/* PFC BRB internal port configuration params */ -struct bnx2x_pfc_brb_threshold_val { - u32 pause_xoff; - u32 pause_xon; - u32 full_xoff; - u32 full_xon; -}; - -struct bnx2x_pfc_brb_e3b0_val { - u32 full_lb_xoff_th; - u32 full_lb_xon_threshold; - u32 lb_guarantied; - u32 mac_0_class_t_guarantied; - u32 mac_0_class_t_guarantied_hyst; - u32 mac_1_class_t_guarantied; - u32 mac_1_class_t_guarantied_hyst; -}; - -struct bnx2x_pfc_brb_th_val { - struct bnx2x_pfc_brb_threshold_val pauseable_th; - struct bnx2x_pfc_brb_threshold_val non_pauseable_th; -}; -static int bnx2x_pfc_brb_get_config_params( - struct link_params *params, - struct bnx2x_pfc_brb_th_val *config_val) -{ - struct bnx2x *bp = params->bp; - DP(NETIF_MSG_LINK, "Setting PFC BRB configuration\n"); - if (CHIP_IS_E2(bp)) { - config_val->pauseable_th.pause_xoff = - PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE; - config_val->pauseable_th.pause_xon = - PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE; - config_val->pauseable_th.full_xoff = - PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE; - config_val->pauseable_th.full_xon = - PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE; - /* non pause able*/ - config_val->non_pauseable_th.pause_xoff = - PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; - config_val->non_pauseable_th.pause_xon = - PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; - config_val->non_pauseable_th.full_xoff = - PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; - config_val->non_pauseable_th.full_xon = - PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE; - } else if (CHIP_IS_E3A0(bp)) { - config_val->pauseable_th.pause_xoff = - PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE; - config_val->pauseable_th.pause_xon = - PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE; - config_val->pauseable_th.full_xoff = - PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE; - config_val->pauseable_th.full_xon = - PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE; - /* non pause able*/ - config_val->non_pauseable_th.pause_xoff = - PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; - config_val->non_pauseable_th.pause_xon = - PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; - config_val->non_pauseable_th.full_xoff = - PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; - config_val->non_pauseable_th.full_xon = - PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE; - } else if (CHIP_IS_E3B0(bp)) { - if (params->phy[INT_PHY].flags & - FLAGS_4_PORT_MODE) { - config_val->pauseable_th.pause_xoff = - PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE; - config_val->pauseable_th.pause_xon = - PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE; - config_val->pauseable_th.full_xoff = - PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE; - config_val->pauseable_th.full_xon = - PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE; - /* non pause able*/ - config_val->non_pauseable_th.pause_xoff = - PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; - config_val->non_pauseable_th.pause_xon = - PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; - config_val->non_pauseable_th.full_xoff = - PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; - config_val->non_pauseable_th.full_xon = - PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE; - } else { - config_val->pauseable_th.pause_xoff = - PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE; - config_val->pauseable_th.pause_xon = - PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE; - config_val->pauseable_th.full_xoff = - PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE; - config_val->pauseable_th.full_xon = - PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE; - /* non pause able*/ - config_val->non_pauseable_th.pause_xoff = - PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; - config_val->non_pauseable_th.pause_xon = - PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; - config_val->non_pauseable_th.full_xoff = - PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; - config_val->non_pauseable_th.full_xon = - PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE; - } - } else - return -EINVAL; - - return 0; -} - - -static void bnx2x_pfc_brb_get_e3b0_config_params(struct link_params *params, - struct bnx2x_pfc_brb_e3b0_val - *e3b0_val, - u32 cos0_pauseable, - u32 cos1_pauseable) -{ - if (params->phy[INT_PHY].flags & FLAGS_4_PORT_MODE) { - e3b0_val->full_lb_xoff_th = - PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR; - e3b0_val->full_lb_xon_threshold = - PFC_E3B0_4P_BRB_FULL_LB_XON_THR; - e3b0_val->lb_guarantied = - PFC_E3B0_4P_LB_GUART; - e3b0_val->mac_0_class_t_guarantied = - PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART; - e3b0_val->mac_0_class_t_guarantied_hyst = - PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST; - e3b0_val->mac_1_class_t_guarantied = - PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART; - e3b0_val->mac_1_class_t_guarantied_hyst = - PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST; - } else { - e3b0_val->full_lb_xoff_th = - PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR; - e3b0_val->full_lb_xon_threshold = - PFC_E3B0_2P_BRB_FULL_LB_XON_THR; - e3b0_val->mac_0_class_t_guarantied_hyst = - PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST; - e3b0_val->mac_1_class_t_guarantied = - PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART; - e3b0_val->mac_1_class_t_guarantied_hyst = - PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST; - - if (cos0_pauseable != cos1_pauseable) { - /* nonpauseable= Lossy + pauseable = Lossless*/ - e3b0_val->lb_guarantied = - PFC_E3B0_2P_MIX_PAUSE_LB_GUART; - e3b0_val->mac_0_class_t_guarantied = - PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART; - } else if (cos0_pauseable) { - /* Lossless +Lossless*/ - e3b0_val->lb_guarantied = - PFC_E3B0_2P_PAUSE_LB_GUART; - e3b0_val->mac_0_class_t_guarantied = - PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART; - } else { - /* Lossy +Lossy*/ - e3b0_val->lb_guarantied = - PFC_E3B0_2P_NON_PAUSE_LB_GUART; - e3b0_val->mac_0_class_t_guarantied = - PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART; - } - } -} -static int bnx2x_update_pfc_brb(struct link_params *params, - struct link_vars *vars, - struct bnx2x_nig_brb_pfc_port_params - *pfc_params) -{ - struct bnx2x *bp = params->bp; - struct bnx2x_pfc_brb_th_val config_val = { {0} }; - struct bnx2x_pfc_brb_threshold_val *reg_th_config = - &config_val.pauseable_th; - struct bnx2x_pfc_brb_e3b0_val e3b0_val = {0}; - int set_pfc = params->feature_config_flags & - FEATURE_CONFIG_PFC_ENABLED; - int bnx2x_status = 0; - u8 port = params->port; - - /* default - pause configuration */ - reg_th_config = &config_val.pauseable_th; - bnx2x_status = bnx2x_pfc_brb_get_config_params(params, &config_val); - if (0 != bnx2x_status) - return bnx2x_status; - - if (set_pfc && pfc_params) - /* First COS */ - if (!pfc_params->cos0_pauseable) - reg_th_config = &config_val.non_pauseable_th; - /* - * The number of free blocks below which the pause signal to class 0 - * of MAC #n is asserted. n=0,1 - */ - REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 : - BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , - reg_th_config->pause_xoff); - /* - * The number of free blocks above which the pause signal to class 0 - * of MAC #n is de-asserted. n=0,1 - */ - REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XON_THRESHOLD_1 : - BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , reg_th_config->pause_xon); - /* - * The number of free blocks below which the full signal to class 0 - * of MAC #n is asserted. n=0,1 - */ - REG_WR(bp, (port) ? BRB1_REG_FULL_0_XOFF_THRESHOLD_1 : - BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , reg_th_config->full_xoff); - /* - * The number of free blocks above which the full signal to class 0 - * of MAC #n is de-asserted. n=0,1 - */ - REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 : - BRB1_REG_FULL_0_XON_THRESHOLD_0 , reg_th_config->full_xon); - - if (set_pfc && pfc_params) { - /* Second COS */ - if (pfc_params->cos1_pauseable) - reg_th_config = &config_val.pauseable_th; - else - reg_th_config = &config_val.non_pauseable_th; - /* - * The number of free blocks below which the pause signal to - * class 1 of MAC #n is asserted. n=0,1 - **/ - REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 : - BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, - reg_th_config->pause_xoff); - /* - * The number of free blocks above which the pause signal to - * class 1 of MAC #n is de-asserted. n=0,1 - */ - REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 : - BRB1_REG_PAUSE_1_XON_THRESHOLD_0, - reg_th_config->pause_xon); - /* - * The number of free blocks below which the full signal to - * class 1 of MAC #n is asserted. n=0,1 - */ - REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 : - BRB1_REG_FULL_1_XOFF_THRESHOLD_0, - reg_th_config->full_xoff); - /* - * The number of free blocks above which the full signal to - * class 1 of MAC #n is de-asserted. n=0,1 - */ - REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 : - BRB1_REG_FULL_1_XON_THRESHOLD_0, - reg_th_config->full_xon); - - - if (CHIP_IS_E3B0(bp)) { - /*Should be done by init tool */ - /* - * BRB_empty_for_dup = BRB1_REG_BRB_EMPTY_THRESHOLD - * reset value - * 944 - */ - - /** - * The hysteresis on the guarantied buffer space for the Lb port - * before signaling XON. - **/ - REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST, 80); - - bnx2x_pfc_brb_get_e3b0_config_params( - params, - &e3b0_val, - pfc_params->cos0_pauseable, - pfc_params->cos1_pauseable); - /** - * The number of free blocks below which the full signal to the - * LB port is asserted. - */ - REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, - e3b0_val.full_lb_xoff_th); - /** - * The number of free blocks above which the full signal to the - * LB port is de-asserted. - */ - REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, - e3b0_val.full_lb_xon_threshold); - /** - * The number of blocks guarantied for the MAC #n port. n=0,1 - */ - - /*The number of blocks guarantied for the LB port.*/ - REG_WR(bp, BRB1_REG_LB_GUARANTIED, - e3b0_val.lb_guarantied); - - /** - * The number of blocks guarantied for the MAC #n port. - */ - REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0, - 2 * e3b0_val.mac_0_class_t_guarantied); - REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1, - 2 * e3b0_val.mac_1_class_t_guarantied); - /** - * The number of blocks guarantied for class #t in MAC0. t=0,1 - */ - REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED, - e3b0_val.mac_0_class_t_guarantied); - REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED, - e3b0_val.mac_0_class_t_guarantied); - /** - * The hysteresis on the guarantied buffer space for class in - * MAC0. t=0,1 - */ - REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST, - e3b0_val.mac_0_class_t_guarantied_hyst); - REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST, - e3b0_val.mac_0_class_t_guarantied_hyst); - - /** - * The number of blocks guarantied for class #t in MAC1.t=0,1 - */ - REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED, - e3b0_val.mac_1_class_t_guarantied); - REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED, - e3b0_val.mac_1_class_t_guarantied); - /** - * The hysteresis on the guarantied buffer space for class #t - * in MAC1. t=0,1 - */ - REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST, - e3b0_val.mac_1_class_t_guarantied_hyst); - REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST, - e3b0_val.mac_1_class_t_guarantied_hyst); - - } - - } - - return bnx2x_status; -} - -/****************************************************************************** -* Description: -* This function is needed because NIG ARB_CREDIT_WEIGHT_X are -* not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable. -******************************************************************************/ -int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp, - u8 cos_entry, - u32 priority_mask, u8 port) -{ - u32 nig_reg_rx_priority_mask_add = 0; - - switch (cos_entry) { - case 0: - nig_reg_rx_priority_mask_add = (port) ? - NIG_REG_P1_RX_COS0_PRIORITY_MASK : - NIG_REG_P0_RX_COS0_PRIORITY_MASK; - break; - case 1: - nig_reg_rx_priority_mask_add = (port) ? - NIG_REG_P1_RX_COS1_PRIORITY_MASK : - NIG_REG_P0_RX_COS1_PRIORITY_MASK; - break; - case 2: - nig_reg_rx_priority_mask_add = (port) ? - NIG_REG_P1_RX_COS2_PRIORITY_MASK : - NIG_REG_P0_RX_COS2_PRIORITY_MASK; - break; - case 3: - if (port) - return -EINVAL; - nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS3_PRIORITY_MASK; - break; - case 4: - if (port) - return -EINVAL; - nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS4_PRIORITY_MASK; - break; - case 5: - if (port) - return -EINVAL; - nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS5_PRIORITY_MASK; - break; - } - - REG_WR(bp, nig_reg_rx_priority_mask_add, priority_mask); - - return 0; -} -static void bnx2x_update_mng(struct link_params *params, u32 link_status) -{ - struct bnx2x *bp = params->bp; - - REG_WR(bp, params->shmem_base + - offsetof(struct shmem_region, - port_mb[params->port].link_status), link_status); -} - -static void bnx2x_update_pfc_nig(struct link_params *params, - struct link_vars *vars, - struct bnx2x_nig_brb_pfc_port_params *nig_params) -{ - u32 xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = 0; - u32 llfc_enable = 0, xcm0_out_en = 0, p0_hwpfc_enable = 0; - u32 pkt_priority_to_cos = 0; - struct bnx2x *bp = params->bp; - u8 port = params->port; - - int set_pfc = params->feature_config_flags & - FEATURE_CONFIG_PFC_ENABLED; - DP(NETIF_MSG_LINK, "updating pfc nig parameters\n"); - - /* - * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set - * MAC control frames (that are not pause packets) - * will be forwarded to the XCM. - */ - xcm_mask = REG_RD(bp, - port ? NIG_REG_LLH1_XCM_MASK : - NIG_REG_LLH0_XCM_MASK); - /* - * nig params will override non PFC params, since it's possible to - * do transition from PFC to SAFC - */ - if (set_pfc) { - pause_enable = 0; - llfc_out_en = 0; - llfc_enable = 0; - if (CHIP_IS_E3(bp)) - ppp_enable = 0; - else - ppp_enable = 1; - xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN : - NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN); - xcm0_out_en = 0; - p0_hwpfc_enable = 1; - } else { - if (nig_params) { - llfc_out_en = nig_params->llfc_out_en; - llfc_enable = nig_params->llfc_enable; - pause_enable = nig_params->pause_enable; - } else /*defaul non PFC mode - PAUSE */ - pause_enable = 1; - - xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN : - NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN); - xcm0_out_en = 1; - } - - if (CHIP_IS_E3(bp)) - REG_WR(bp, port ? NIG_REG_BRB1_PAUSE_IN_EN : - NIG_REG_BRB0_PAUSE_IN_EN, pause_enable); - REG_WR(bp, port ? NIG_REG_LLFC_OUT_EN_1 : - NIG_REG_LLFC_OUT_EN_0, llfc_out_en); - REG_WR(bp, port ? NIG_REG_LLFC_ENABLE_1 : - NIG_REG_LLFC_ENABLE_0, llfc_enable); - REG_WR(bp, port ? NIG_REG_PAUSE_ENABLE_1 : - NIG_REG_PAUSE_ENABLE_0, pause_enable); - - REG_WR(bp, port ? NIG_REG_PPP_ENABLE_1 : - NIG_REG_PPP_ENABLE_0, ppp_enable); - - REG_WR(bp, port ? NIG_REG_LLH1_XCM_MASK : - NIG_REG_LLH0_XCM_MASK, xcm_mask); - - REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7); - - /* output enable for RX_XCM # IF */ - REG_WR(bp, NIG_REG_XCM0_OUT_EN, xcm0_out_en); - - /* HW PFC TX enable */ - REG_WR(bp, NIG_REG_P0_HWPFC_ENABLE, p0_hwpfc_enable); - - if (nig_params) { - u8 i = 0; - pkt_priority_to_cos = nig_params->pkt_priority_to_cos; - - for (i = 0; i < nig_params->num_of_rx_cos_priority_mask; i++) - bnx2x_pfc_nig_rx_priority_mask(bp, i, - nig_params->rx_cos_priority_mask[i], port); - - REG_WR(bp, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 : - NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0, - nig_params->llfc_high_priority_classes); - - REG_WR(bp, port ? NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 : - NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0, - nig_params->llfc_low_priority_classes); - } - REG_WR(bp, port ? NIG_REG_P1_PKT_PRIORITY_TO_COS : - NIG_REG_P0_PKT_PRIORITY_TO_COS, - pkt_priority_to_cos); -} - -int bnx2x_update_pfc(struct link_params *params, - struct link_vars *vars, - struct bnx2x_nig_brb_pfc_port_params *pfc_params) -{ - /* - * The PFC and pause are orthogonal to one another, meaning when - * PFC is enabled, the pause are disabled, and when PFC is - * disabled, pause are set according to the pause result. - */ - u32 val; - struct bnx2x *bp = params->bp; - int bnx2x_status = 0; - u8 bmac_loopback = (params->loopback_mode == LOOPBACK_BMAC); - - if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) - vars->link_status |= LINK_STATUS_PFC_ENABLED; - else - vars->link_status &= ~LINK_STATUS_PFC_ENABLED; - - bnx2x_update_mng(params, vars->link_status); - - /* update NIG params */ - bnx2x_update_pfc_nig(params, vars, pfc_params); - - /* update BRB params */ - bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params); - if (0 != bnx2x_status) - return bnx2x_status; - - if (!vars->link_up) - return bnx2x_status; - - DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n"); - if (CHIP_IS_E3(bp)) - bnx2x_update_pfc_xmac(params, vars, 0); - else { - val = REG_RD(bp, MISC_REG_RESET_REG_2); - if ((val & - (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) - == 0) { - DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n"); - bnx2x_emac_enable(params, vars, 0); - return bnx2x_status; - } - - if (CHIP_IS_E2(bp)) - bnx2x_update_pfc_bmac2(params, vars, bmac_loopback); - else - bnx2x_update_pfc_bmac1(params, vars); - - val = 0; - if ((params->feature_config_flags & - FEATURE_CONFIG_PFC_ENABLED) || - (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)) - val = 1; - REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val); - } - return bnx2x_status; -} - - -static int bnx2x_bmac1_enable(struct link_params *params, - struct link_vars *vars, - u8 is_lb) -{ - struct bnx2x *bp = params->bp; - u8 port = params->port; - u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : - NIG_REG_INGRESS_BMAC0_MEM; - u32 wb_data[2]; - u32 val; - - DP(NETIF_MSG_LINK, "Enabling BigMAC1\n"); - - /* XGXS control */ - wb_data[0] = 0x3c; - wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL, - wb_data, 2); - - /* tx MAC SA */ - wb_data[0] = ((params->mac_addr[2] << 24) | - (params->mac_addr[3] << 16) | - (params->mac_addr[4] << 8) | - params->mac_addr[5]); - wb_data[1] = ((params->mac_addr[0] << 8) | - params->mac_addr[1]); - REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2); - - /* mac control */ - val = 0x3; - if (is_lb) { - val |= 0x4; - DP(NETIF_MSG_LINK, "enable bmac loopback\n"); - } - wb_data[0] = val; - wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2); - - /* set rx mtu */ - wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; - wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2); - - bnx2x_update_pfc_bmac1(params, vars); - - /* set tx mtu */ - wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; - wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2); - - /* set cnt max size */ - wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; - wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2); - - /* configure safc */ - wb_data[0] = 0x1000200; - wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS, - wb_data, 2); - - return 0; -} - -static int bnx2x_bmac2_enable(struct link_params *params, - struct link_vars *vars, - u8 is_lb) -{ - struct bnx2x *bp = params->bp; - u8 port = params->port; - u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : - NIG_REG_INGRESS_BMAC0_MEM; - u32 wb_data[2]; - - DP(NETIF_MSG_LINK, "Enabling BigMAC2\n"); - - wb_data[0] = 0; - wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2); - udelay(30); - - /* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */ - wb_data[0] = 0x3c; - wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL, - wb_data, 2); - - udelay(30); - - /* tx MAC SA */ - wb_data[0] = ((params->mac_addr[2] << 24) | - (params->mac_addr[3] << 16) | - (params->mac_addr[4] << 8) | - params->mac_addr[5]); - wb_data[1] = ((params->mac_addr[0] << 8) | - params->mac_addr[1]); - REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR, - wb_data, 2); - - udelay(30); - - /* Configure SAFC */ - wb_data[0] = 0x1000200; - wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS, - wb_data, 2); - udelay(30); - - /* set rx mtu */ - wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; - wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2); - udelay(30); - - /* set tx mtu */ - wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; - wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2); - udelay(30); - /* set cnt max size */ - wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2; - wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2); - udelay(30); - bnx2x_update_pfc_bmac2(params, vars, is_lb); - - return 0; -} - -static int bnx2x_bmac_enable(struct link_params *params, - struct link_vars *vars, - u8 is_lb) -{ - int rc = 0; - u8 port = params->port; - struct bnx2x *bp = params->bp; - u32 val; - /* reset and unreset the BigMac */ - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, - (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); - msleep(1); - - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, - (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); - - /* enable access for bmac registers */ - REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1); - - /* Enable BMAC according to BMAC type*/ - if (CHIP_IS_E2(bp)) - rc = bnx2x_bmac2_enable(params, vars, is_lb); - else - rc = bnx2x_bmac1_enable(params, vars, is_lb); - REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1); - REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0); - REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0); - val = 0; - if ((params->feature_config_flags & - FEATURE_CONFIG_PFC_ENABLED) || - (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)) - val = 1; - REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val); - REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0); - REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x0); - REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0); - REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x1); - REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x1); - - vars->mac_type = MAC_TYPE_BMAC; - return rc; -} - -static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port) -{ - u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : - NIG_REG_INGRESS_BMAC0_MEM; - u32 wb_data[2]; - u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4); - - /* Only if the bmac is out of reset */ - if (REG_RD(bp, MISC_REG_RESET_REG_2) & - (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) && - nig_bmac_enable) { - - if (CHIP_IS_E2(bp)) { - /* Clear Rx Enable bit in BMAC_CONTROL register */ - REG_RD_DMAE(bp, bmac_addr + - BIGMAC2_REGISTER_BMAC_CONTROL, - wb_data, 2); - wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; - REG_WR_DMAE(bp, bmac_addr + - BIGMAC2_REGISTER_BMAC_CONTROL, - wb_data, 2); - } else { - /* Clear Rx Enable bit in BMAC_CONTROL register */ - REG_RD_DMAE(bp, bmac_addr + - BIGMAC_REGISTER_BMAC_CONTROL, - wb_data, 2); - wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; - REG_WR_DMAE(bp, bmac_addr + - BIGMAC_REGISTER_BMAC_CONTROL, - wb_data, 2); - } - msleep(1); - } -} - -static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl, - u32 line_speed) -{ - struct bnx2x *bp = params->bp; - u8 port = params->port; - u32 init_crd, crd; - u32 count = 1000; - - /* disable port */ - REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1); - - /* wait for init credit */ - init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4); - crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8); - DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd); - - while ((init_crd != crd) && count) { - msleep(5); - - crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8); - count--; - } - crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8); - if (init_crd != crd) { - DP(NETIF_MSG_LINK, "BUG! init_crd 0x%x != crd 0x%x\n", - init_crd, crd); - return -EINVAL; - } - - if (flow_ctrl & BNX2X_FLOW_CTRL_RX || - line_speed == SPEED_10 || - line_speed == SPEED_100 || - line_speed == SPEED_1000 || - line_speed == SPEED_2500) { - REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1); - /* update threshold */ - REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0); - /* update init credit */ - init_crd = 778; /* (800-18-4) */ - - } else { - u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + - ETH_OVREHEAD)/16; - REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); - /* update threshold */ - REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh); - /* update init credit */ - switch (line_speed) { - case SPEED_10000: - init_crd = thresh + 553 - 22; - break; - default: - DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n", - line_speed); - return -EINVAL; - } - } - REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd); - DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n", - line_speed, init_crd); - - /* probe the credit changes */ - REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1); - msleep(5); - REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0); - - /* enable port */ - REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0); - return 0; -} - -/** - * bnx2x_get_emac_base - retrive emac base address - * - * @bp: driver handle - * @mdc_mdio_access: access type - * @port: port id - * - * This function selects the MDC/MDIO access (through emac0 or - * emac1) depend on the mdc_mdio_access, port, port swapped. Each - * phy has a default access mode, which could also be overridden - * by nvram configuration. This parameter, whether this is the - * default phy configuration, or the nvram overrun - * configuration, is passed here as mdc_mdio_access and selects - * the emac_base for the CL45 read/writes operations - */ -static u32 bnx2x_get_emac_base(struct bnx2x *bp, - u32 mdc_mdio_access, u8 port) -{ - u32 emac_base = 0; - switch (mdc_mdio_access) { - case SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE: - break; - case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0: - if (REG_RD(bp, NIG_REG_PORT_SWAP)) - emac_base = GRCBASE_EMAC1; - else - emac_base = GRCBASE_EMAC0; - break; - case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1: - if (REG_RD(bp, NIG_REG_PORT_SWAP)) - emac_base = GRCBASE_EMAC0; - else - emac_base = GRCBASE_EMAC1; - break; - case SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH: - emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; - break; - case SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED: - emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1; - break; - default: - break; - } - return emac_base; - -} - -/******************************************************************/ -/* CL22 access functions */ -/******************************************************************/ -static int bnx2x_cl22_write(struct bnx2x *bp, - struct bnx2x_phy *phy, - u16 reg, u16 val) -{ - u32 tmp, mode; - u8 i; - int rc = 0; - /* Switch to CL22 */ - mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); - REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, - mode & ~EMAC_MDIO_MODE_CLAUSE_45); - - /* address */ - tmp = ((phy->addr << 21) | (reg << 16) | val | - EMAC_MDIO_COMM_COMMAND_WRITE_22 | - EMAC_MDIO_COMM_START_BUSY); - REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); - - for (i = 0; i < 50; i++) { - udelay(10); - - tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); - if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { - udelay(5); - break; - } - } - if (tmp & EMAC_MDIO_COMM_START_BUSY) { - DP(NETIF_MSG_LINK, "write phy register failed\n"); - rc = -EFAULT; - } - REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode); - return rc; -} - -static int bnx2x_cl22_read(struct bnx2x *bp, - struct bnx2x_phy *phy, - u16 reg, u16 *ret_val) -{ - u32 val, mode; - u16 i; - int rc = 0; - - /* Switch to CL22 */ - mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); - REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, - mode & ~EMAC_MDIO_MODE_CLAUSE_45); - - /* address */ - val = ((phy->addr << 21) | (reg << 16) | - EMAC_MDIO_COMM_COMMAND_READ_22 | - EMAC_MDIO_COMM_START_BUSY); - REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); - - for (i = 0; i < 50; i++) { - udelay(10); - - val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); - if (!(val & EMAC_MDIO_COMM_START_BUSY)) { - *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA); - udelay(5); - break; - } - } - if (val & EMAC_MDIO_COMM_START_BUSY) { - DP(NETIF_MSG_LINK, "read phy register failed\n"); - - *ret_val = 0; - rc = -EFAULT; - } - REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode); - return rc; -} - -/******************************************************************/ -/* CL45 access functions */ -/******************************************************************/ -static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, - u8 devad, u16 reg, u16 *ret_val) -{ - u32 val; - u16 i; - int rc = 0; - if (phy->flags & FLAGS_MDC_MDIO_WA_B0) - bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, - EMAC_MDIO_STATUS_10MB); - /* address */ - val = ((phy->addr << 21) | (devad << 16) | reg | - EMAC_MDIO_COMM_COMMAND_ADDRESS | - EMAC_MDIO_COMM_START_BUSY); - REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); - - for (i = 0; i < 50; i++) { - udelay(10); - - val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); - if (!(val & EMAC_MDIO_COMM_START_BUSY)) { - udelay(5); - break; - } - } - if (val & EMAC_MDIO_COMM_START_BUSY) { - DP(NETIF_MSG_LINK, "read phy register failed\n"); - netdev_err(bp->dev, "MDC/MDIO access timeout\n"); - *ret_val = 0; - rc = -EFAULT; - } else { - /* data */ - val = ((phy->addr << 21) | (devad << 16) | - EMAC_MDIO_COMM_COMMAND_READ_45 | - EMAC_MDIO_COMM_START_BUSY); - REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); - - for (i = 0; i < 50; i++) { - udelay(10); - - val = REG_RD(bp, phy->mdio_ctrl + - EMAC_REG_EMAC_MDIO_COMM); - if (!(val & EMAC_MDIO_COMM_START_BUSY)) { - *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA); - break; - } - } - if (val & EMAC_MDIO_COMM_START_BUSY) { - DP(NETIF_MSG_LINK, "read phy register failed\n"); - netdev_err(bp->dev, "MDC/MDIO access timeout\n"); - *ret_val = 0; - rc = -EFAULT; - } - } - /* Work around for E3 A0 */ - if (phy->flags & FLAGS_MDC_MDIO_WA) { - phy->flags ^= FLAGS_DUMMY_READ; - if (phy->flags & FLAGS_DUMMY_READ) { - u16 temp_val; - bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val); - } - } - - if (phy->flags & FLAGS_MDC_MDIO_WA_B0) - bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, - EMAC_MDIO_STATUS_10MB); - return rc; -} - -static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, - u8 devad, u16 reg, u16 val) -{ - u32 tmp; - u8 i; - int rc = 0; - if (phy->flags & FLAGS_MDC_MDIO_WA_B0) - bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, - EMAC_MDIO_STATUS_10MB); - - /* address */ - - tmp = ((phy->addr << 21) | (devad << 16) | reg | - EMAC_MDIO_COMM_COMMAND_ADDRESS | - EMAC_MDIO_COMM_START_BUSY); - REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); - - for (i = 0; i < 50; i++) { - udelay(10); - - tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); - if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { - udelay(5); - break; - } - } - if (tmp & EMAC_MDIO_COMM_START_BUSY) { - DP(NETIF_MSG_LINK, "write phy register failed\n"); - netdev_err(bp->dev, "MDC/MDIO access timeout\n"); - rc = -EFAULT; - - } else { - /* data */ - tmp = ((phy->addr << 21) | (devad << 16) | val | - EMAC_MDIO_COMM_COMMAND_WRITE_45 | - EMAC_MDIO_COMM_START_BUSY); - REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); - - for (i = 0; i < 50; i++) { - udelay(10); - - tmp = REG_RD(bp, phy->mdio_ctrl + - EMAC_REG_EMAC_MDIO_COMM); - if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { - udelay(5); - break; - } - } - if (tmp & EMAC_MDIO_COMM_START_BUSY) { - DP(NETIF_MSG_LINK, "write phy register failed\n"); - netdev_err(bp->dev, "MDC/MDIO access timeout\n"); - rc = -EFAULT; - } - } - /* Work around for E3 A0 */ - if (phy->flags & FLAGS_MDC_MDIO_WA) { - phy->flags ^= FLAGS_DUMMY_READ; - if (phy->flags & FLAGS_DUMMY_READ) { - u16 temp_val; - bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val); - } - } - if (phy->flags & FLAGS_MDC_MDIO_WA_B0) - bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, - EMAC_MDIO_STATUS_10MB); - return rc; -} - - -/******************************************************************/ -/* BSC access functions from E3 */ -/******************************************************************/ -static void bnx2x_bsc_module_sel(struct link_params *params) -{ - int idx; - u32 board_cfg, sfp_ctrl; - u32 i2c_pins[I2C_SWITCH_WIDTH], i2c_val[I2C_SWITCH_WIDTH]; - struct bnx2x *bp = params->bp; - u8 port = params->port; - /* Read I2C output PINs */ - board_cfg = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - dev_info.shared_hw_config.board)); - i2c_pins[I2C_BSC0] = board_cfg & SHARED_HW_CFG_E3_I2C_MUX0_MASK; - i2c_pins[I2C_BSC1] = (board_cfg & SHARED_HW_CFG_E3_I2C_MUX1_MASK) >> - SHARED_HW_CFG_E3_I2C_MUX1_SHIFT; - - /* Read I2C output value */ - sfp_ctrl = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[port].e3_cmn_pin_cfg)); - i2c_val[I2C_BSC0] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX0_MASK) > 0; - i2c_val[I2C_BSC1] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX1_MASK) > 0; - DP(NETIF_MSG_LINK, "Setting BSC switch\n"); - for (idx = 0; idx < I2C_SWITCH_WIDTH; idx++) - bnx2x_set_cfg_pin(bp, i2c_pins[idx], i2c_val[idx]); -} - -static int bnx2x_bsc_read(struct link_params *params, - struct bnx2x_phy *phy, - u8 sl_devid, - u16 sl_addr, - u8 lc_addr, - u8 xfer_cnt, - u32 *data_array) -{ - u32 val, i; - int rc = 0; - struct bnx2x *bp = params->bp; - - if ((sl_devid != 0xa0) && (sl_devid != 0xa2)) { - DP(NETIF_MSG_LINK, "invalid sl_devid 0x%x\n", sl_devid); - return -EINVAL; - } - - if (xfer_cnt > 16) { - DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n", - xfer_cnt); - return -EINVAL; - } - bnx2x_bsc_module_sel(params); - - xfer_cnt = 16 - lc_addr; - - /* enable the engine */ - val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); - val |= MCPR_IMC_COMMAND_ENABLE; - REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); - - /* program slave device ID */ - val = (sl_devid << 16) | sl_addr; - REG_WR(bp, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val); - - /* start xfer with 0 byte to update the address pointer ???*/ - val = (MCPR_IMC_COMMAND_ENABLE) | - (MCPR_IMC_COMMAND_WRITE_OP << - MCPR_IMC_COMMAND_OPERATION_BITSHIFT) | - (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0); - REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); - - /* poll for completion */ - i = 0; - val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); - while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) { - udelay(10); - val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); - if (i++ > 1000) { - DP(NETIF_MSG_LINK, "wr 0 byte timed out after %d try\n", - i); - rc = -EFAULT; - break; - } - } - if (rc == -EFAULT) - return rc; - - /* start xfer with read op */ - val = (MCPR_IMC_COMMAND_ENABLE) | - (MCPR_IMC_COMMAND_READ_OP << - MCPR_IMC_COMMAND_OPERATION_BITSHIFT) | - (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | - (xfer_cnt); - REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); - - /* poll for completion */ - i = 0; - val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); - while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) { - udelay(10); - val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); - if (i++ > 1000) { - DP(NETIF_MSG_LINK, "rd op timed out after %d try\n", i); - rc = -EFAULT; - break; - } - } - if (rc == -EFAULT) - return rc; - - for (i = (lc_addr >> 2); i < 4; i++) { - data_array[i] = REG_RD(bp, (MCP_REG_MCPR_IMC_DATAREG0 + i*4)); -#ifdef __BIG_ENDIAN - data_array[i] = ((data_array[i] & 0x000000ff) << 24) | - ((data_array[i] & 0x0000ff00) << 8) | - ((data_array[i] & 0x00ff0000) >> 8) | - ((data_array[i] & 0xff000000) >> 24); -#endif - } - return rc; -} - -static void bnx2x_cl45_read_or_write(struct bnx2x *bp, struct bnx2x_phy *phy, - u8 devad, u16 reg, u16 or_val) -{ - u16 val; - bnx2x_cl45_read(bp, phy, devad, reg, &val); - bnx2x_cl45_write(bp, phy, devad, reg, val | or_val); -} - -int bnx2x_phy_read(struct link_params *params, u8 phy_addr, - u8 devad, u16 reg, u16 *ret_val) -{ - u8 phy_index; - /* - * Probe for the phy according to the given phy_addr, and execute - * the read request on it - */ - for (phy_index = 0; phy_index < params->num_phys; phy_index++) { - if (params->phy[phy_index].addr == phy_addr) { - return bnx2x_cl45_read(params->bp, - ¶ms->phy[phy_index], devad, - reg, ret_val); - } - } - return -EINVAL; -} - -int bnx2x_phy_write(struct link_params *params, u8 phy_addr, - u8 devad, u16 reg, u16 val) -{ - u8 phy_index; - /* - * Probe for the phy according to the given phy_addr, and execute - * the write request on it - */ - for (phy_index = 0; phy_index < params->num_phys; phy_index++) { - if (params->phy[phy_index].addr == phy_addr) { - return bnx2x_cl45_write(params->bp, - ¶ms->phy[phy_index], devad, - reg, val); - } - } - return -EINVAL; -} -static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy, - struct link_params *params) -{ - u8 lane = 0; - struct bnx2x *bp = params->bp; - u32 path_swap, path_swap_ovr; - u8 path, port; - - path = BP_PATH(bp); - port = params->port; - - if (bnx2x_is_4_port_mode(bp)) { - u32 port_swap, port_swap_ovr; - - /*figure out path swap value */ - path_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR); - if (path_swap_ovr & 0x1) - path_swap = (path_swap_ovr & 0x2); - else - path_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP); - - if (path_swap) - path = path ^ 1; - - /*figure out port swap value */ - port_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR); - if (port_swap_ovr & 0x1) - port_swap = (port_swap_ovr & 0x2); - else - port_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP); - - if (port_swap) - port = port ^ 1; - - lane = (port<<1) + path; - } else { /* two port mode - no port swap */ - - /*figure out path swap value */ - path_swap_ovr = - REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP_OVWR); - if (path_swap_ovr & 0x1) { - path_swap = (path_swap_ovr & 0x2); - } else { - path_swap = - REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP); - } - if (path_swap) - path = path ^ 1; - - lane = path << 1 ; - } - return lane; -} - -static void bnx2x_set_aer_mmd(struct link_params *params, - struct bnx2x_phy *phy) -{ - u32 ser_lane; - u16 offset, aer_val; - struct bnx2x *bp = params->bp; - ser_lane = ((params->lane_config & - PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> - PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); - - offset = (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ? - (phy->addr + ser_lane) : 0; - - if (USES_WARPCORE(bp)) { - aer_val = bnx2x_get_warpcore_lane(phy, params); - /* - * In Dual-lane mode, two lanes are joined together, - * so in order to configure them, the AER broadcast method is - * used here. - * 0x200 is the broadcast address for lanes 0,1 - * 0x201 is the broadcast address for lanes 2,3 - */ - if (phy->flags & FLAGS_WC_DUAL_MODE) - aer_val = (aer_val >> 1) | 0x200; - } else if (CHIP_IS_E2(bp)) - aer_val = 0x3800 + offset - 1; - else - aer_val = 0x3800 + offset; - DP(NETIF_MSG_LINK, "Set AER to 0x%x\n", aer_val); - CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, - MDIO_AER_BLOCK_AER_REG, aer_val); - -} - -/******************************************************************/ -/* Internal phy section */ -/******************************************************************/ - -static void bnx2x_set_serdes_access(struct bnx2x *bp, u8 port) -{ - u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; - - /* Set Clause 22 */ - REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 1); - REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245f8000); - udelay(500); - REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245d000f); - udelay(500); - /* Set Clause 45 */ - REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 0); -} - -static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port) -{ - u32 val; - - DP(NETIF_MSG_LINK, "bnx2x_serdes_deassert\n"); - - val = SERDES_RESET_BITS << (port*16); - - /* reset and unreset the SerDes/XGXS */ - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); - udelay(500); - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); - - bnx2x_set_serdes_access(bp, port); - - REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + port*0x10, - DEFAULT_PHY_DEV_ADDR); -} - -static void bnx2x_xgxs_deassert(struct link_params *params) -{ - struct bnx2x *bp = params->bp; - u8 port; - u32 val; - DP(NETIF_MSG_LINK, "bnx2x_xgxs_deassert\n"); - port = params->port; - - val = XGXS_RESET_BITS << (port*16); - - /* reset and unreset the SerDes/XGXS */ - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); - udelay(500); - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); - - REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + port*0x18, 0); - REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, - params->phy[INT_PHY].def_md_devad); -} - -static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy, - struct link_params *params, u16 *ieee_fc) -{ - struct bnx2x *bp = params->bp; - *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; - /** - * resolve pause mode and advertisement Please refer to Table - * 28B-3 of the 802.3ab-1999 spec - */ - - switch (phy->req_flow_ctrl) { - case BNX2X_FLOW_CTRL_AUTO: - if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) - *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; - else - *ieee_fc |= - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; - break; - - case BNX2X_FLOW_CTRL_TX: - *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; - break; - - case BNX2X_FLOW_CTRL_RX: - case BNX2X_FLOW_CTRL_BOTH: - *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; - break; - - case BNX2X_FLOW_CTRL_NONE: - default: - *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE; - break; - } - DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc); -} - -static void set_phy_vars(struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - u8 actual_phy_idx, phy_index, link_cfg_idx; - u8 phy_config_swapped = params->multi_phy_config & - PORT_HW_CFG_PHY_SWAPPED_ENABLED; - for (phy_index = INT_PHY; phy_index < params->num_phys; - phy_index++) { - link_cfg_idx = LINK_CONFIG_IDX(phy_index); - actual_phy_idx = phy_index; - if (phy_config_swapped) { - if (phy_index == EXT_PHY1) - actual_phy_idx = EXT_PHY2; - else if (phy_index == EXT_PHY2) - actual_phy_idx = EXT_PHY1; - } - params->phy[actual_phy_idx].req_flow_ctrl = - params->req_flow_ctrl[link_cfg_idx]; - - params->phy[actual_phy_idx].req_line_speed = - params->req_line_speed[link_cfg_idx]; - - params->phy[actual_phy_idx].speed_cap_mask = - params->speed_cap_mask[link_cfg_idx]; - - params->phy[actual_phy_idx].req_duplex = - params->req_duplex[link_cfg_idx]; - - if (params->req_line_speed[link_cfg_idx] == - SPEED_AUTO_NEG) - vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED; - - DP(NETIF_MSG_LINK, "req_flow_ctrl %x, req_line_speed %x," - " speed_cap_mask %x\n", - params->phy[actual_phy_idx].req_flow_ctrl, - params->phy[actual_phy_idx].req_line_speed, - params->phy[actual_phy_idx].speed_cap_mask); - } -} - -static void bnx2x_ext_phy_set_pause(struct link_params *params, - struct bnx2x_phy *phy, - struct link_vars *vars) -{ - u16 val; - struct bnx2x *bp = params->bp; - /* read modify write pause advertizing */ - bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val); - - val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH; - - /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ - bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); - if ((vars->ieee_fc & - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { - val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; - } - if ((vars->ieee_fc & - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) { - val |= MDIO_AN_REG_ADV_PAUSE_PAUSE; - } - DP(NETIF_MSG_LINK, "Ext phy AN advertize 0x%x\n", val); - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val); -} - -static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result) -{ /* LD LP */ - switch (pause_result) { /* ASYM P ASYM P */ - case 0xb: /* 1 0 1 1 */ - vars->flow_ctrl = BNX2X_FLOW_CTRL_TX; - break; - - case 0xe: /* 1 1 1 0 */ - vars->flow_ctrl = BNX2X_FLOW_CTRL_RX; - break; - - case 0x5: /* 0 1 0 1 */ - case 0x7: /* 0 1 1 1 */ - case 0xd: /* 1 1 0 1 */ - case 0xf: /* 1 1 1 1 */ - vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH; - break; - - default: - break; - } - if (pause_result & (1<<0)) - vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE; - if (pause_result & (1<<1)) - vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE; -} - -static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - u16 ld_pause; /* local */ - u16 lp_pause; /* link partner */ - u16 pause_result; - u8 ret = 0; - /* read twice */ - - vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; - - if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) - vars->flow_ctrl = phy->req_flow_ctrl; - else if (phy->req_line_speed != SPEED_AUTO_NEG) - vars->flow_ctrl = params->req_fc_auto_adv; - else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { - ret = 1; - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) { - bnx2x_cl22_read(bp, phy, - 0x4, &ld_pause); - bnx2x_cl22_read(bp, phy, - 0x5, &lp_pause); - } else { - bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, - MDIO_AN_REG_ADV_PAUSE, &ld_pause); - bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, - MDIO_AN_REG_LP_AUTO_NEG, &lp_pause); - } - pause_result = (ld_pause & - MDIO_AN_REG_ADV_PAUSE_MASK) >> 8; - pause_result |= (lp_pause & - MDIO_AN_REG_ADV_PAUSE_MASK) >> 10; - DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n", - pause_result); - bnx2x_pause_resolve(vars, pause_result); - } - return ret; -} -/******************************************************************/ -/* Warpcore section */ -/******************************************************************/ -/* The init_internal_warpcore should mirror the xgxs, - * i.e. reset the lane (if needed), set aer for the - * init configuration, and set/clear SGMII flag. Internal - * phy init is done purely in phy_init stage. - */ -static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) { - u16 val16 = 0, lane, bam37 = 0; - struct bnx2x *bp = params->bp; - DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n"); - /* Check adding advertisement for 1G KX */ - if (((vars->line_speed == SPEED_AUTO_NEG) && - (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || - (vars->line_speed == SPEED_1000)) { - u16 sd_digital; - val16 |= (1<<5); - - /* Enable CL37 1G Parallel Detect */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &sd_digital); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, - (sd_digital | 0x1)); - - DP(NETIF_MSG_LINK, "Advertize 1G\n"); - } - if (((vars->line_speed == SPEED_AUTO_NEG) && - (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) || - (vars->line_speed == SPEED_10000)) { - /* Check adding advertisement for 10G KR */ - val16 |= (1<<7); - /* Enable 10G Parallel Detect */ - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, - MDIO_WC_REG_PAR_DET_10G_CTRL, 1); - - DP(NETIF_MSG_LINK, "Advertize 10G\n"); - } - - /* Set Transmit PMD settings */ - lane = bnx2x_get_warpcore_lane(phy, params); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, - ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | - (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | - (0x09 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET))); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL, - 0x03f0); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL, - 0x03f0); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, - 0x383f); - - /* Advertised speeds */ - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, - MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16); - - /* Enable CL37 BAM */ - if (REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, dev_info. - port_hw_config[params->port].default_cfg)) & - PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) { - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, &bam37); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, bam37 | 1); - DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n"); - } - - /* Advertise pause */ - bnx2x_ext_phy_set_pause(params, phy, vars); - - /* Enable Autoneg */ - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, - MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1000); - - /* Over 1G - AN local device user page 1 */ - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL3_UP1, 0x1f); - - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL5_MISC7, &val16); - - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL5_MISC7, val16 | 0x100); -} - -static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - u16 val; - - /* Disable Autoneg */ - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7); - - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, - MDIO_WC_REG_PAR_DET_10G_CTRL, 0); - - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0x3f00); - - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, - MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0); - - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, - MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0); - - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL3_UP1, 0x1); - - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL5_MISC7, 0xa); - - /* Disable CL36 PCS Tx */ - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0); - - /* Double Wide Single Data Rate @ pll rate */ - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF); - - /* Leave cl72 training enable, needed for KR */ - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, - MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150, - 0x2); - - /* Leave CL72 enabled */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, - &val); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, - val | 0x3800); - - /* Set speed via PMA/PMD register */ - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, - MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040); - - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, - MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0xB); - - /*Enable encoded forced speed */ - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x30); - - /* Turn TX scramble payload only the 64/66 scrambler */ - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_TX66_CONTROL, 0x9); - - /* Turn RX scramble payload only the 64/66 scrambler */ - bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_RX66_CONTROL, 0xF9); - - /* set and clear loopback to cause a reset to 64/66 decoder */ - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x4000); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0); - -} - -static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy, - struct link_params *params, - u8 is_xfi) -{ - struct bnx2x *bp = params->bp; - u16 misc1_val, tap_val, tx_driver_val, lane, val; - /* Hold rxSeqStart */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val | 0x8000)); - - /* Hold tx_fifo_reset */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, (val | 0x1)); - - /* Disable CL73 AN */ - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0); - - /* Disable 100FX Enable and Auto-Detect */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_FX100_CTRL1, &val); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_FX100_CTRL1, (val & 0xFFFA)); - - /* Disable 100FX Idle detect */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_FX100_CTRL3, &val); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_FX100_CTRL3, (val | 0x0080)); - - /* Set Block address to Remote PHY & Clear forced_speed[5] */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL4_MISC3, &val); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL4_MISC3, (val & 0xFF7F)); - - /* Turn off auto-detect & fiber mode */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, - (val & 0xFFEE)); - - /* Set filter_force_link, disable_false_link and parallel_detect */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &val); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, - ((val | 0x0006) & 0xFFFE)); - - /* Set XFI / SFI */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_MISC1, &misc1_val); - - misc1_val &= ~(0x1f); - - if (is_xfi) { - misc1_val |= 0x5; - tap_val = ((0x08 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | - (0x37 << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | - (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET)); - tx_driver_val = - ((0x00 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | - (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | - (0x03 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)); - - } else { - misc1_val |= 0x9; - tap_val = ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | - (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | - (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET)); - tx_driver_val = - ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | - (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | - (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)); - } - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val); - - /* Set Transmit PMD settings */ - lane = bnx2x_get_warpcore_lane(phy, params); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_TX_FIR_TAP, - tap_val | MDIO_WC_REG_TX_FIR_TAP_ENABLE); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, - tx_driver_val); - - /* Enable fiber mode, enable and invert sig_det */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, val | 0xd); - - /* Set Block address to Remote PHY & Set forced_speed[5], 40bit mode */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL4_MISC3, &val); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL4_MISC3, val | 0x8080); - - /* 10G XFI Full Duplex */ - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x100); - - /* Release tx_fifo_reset */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, val & 0xFFFE); - - /* Release rxSeqStart */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val & 0x7FFF)); -} - -static void bnx2x_warpcore_set_20G_KR2(struct bnx2x *bp, - struct bnx2x_phy *phy) -{ - DP(NETIF_MSG_LINK, "KR2 still not supported !!!\n"); -} - -static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp, - struct bnx2x_phy *phy, - u16 lane) -{ - /* Rx0 anaRxControl1G */ - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_RX0_ANARXCONTROL1G, 0x90); - - /* Rx2 anaRxControl1G */ - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_RX2_ANARXCONTROL1G, 0x90); - - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_RX66_SCW0, 0xE070); - - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_RX66_SCW1, 0xC0D0); - - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_RX66_SCW2, 0xA0B0); - - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_RX66_SCW3, 0x8090); - - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_RX66_SCW0_MASK, 0xF0F0); - - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_RX66_SCW1_MASK, 0xF0F0); - - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_RX66_SCW2_MASK, 0xF0F0); - - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_RX66_SCW3_MASK, 0xF0F0); - - /* Serdes Digital Misc1 */ - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6008); - - /* Serdes Digital4 Misc3 */ - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL4_MISC3, 0x8088); - - /* Set Transmit PMD settings */ - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_TX_FIR_TAP, - ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | - (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | - (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET) | - MDIO_WC_REG_TX_FIR_TAP_ENABLE)); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, - ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | - (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | - (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET))); -} - -static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy, - struct link_params *params, - u8 fiber_mode) -{ - struct bnx2x *bp = params->bp; - u16 val16, digctrl_kx1, digctrl_kx2; - u8 lane; - - lane = bnx2x_get_warpcore_lane(phy, params); - - /* Clear XFI clock comp in non-10G single lane mode. */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_RX66_CONTROL, &val16); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13)); - - if (phy->req_line_speed == SPEED_AUTO_NEG) { - /* SGMII Autoneg */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_COMBO_IEEE0_MIICTRL, - val16 | 0x1000); - DP(NETIF_MSG_LINK, "set SGMII AUTONEG\n"); - } else { - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); - val16 &= 0xcfbf; - switch (phy->req_line_speed) { - case SPEED_10: - break; - case SPEED_100: - val16 |= 0x2000; - break; - case SPEED_1000: - val16 |= 0x0040; - break; - default: - DP(NETIF_MSG_LINK, "Speed not supported: 0x%x" - "\n", phy->req_line_speed); - return; - } - - if (phy->req_duplex == DUPLEX_FULL) - val16 |= 0x0100; - - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16); - - DP(NETIF_MSG_LINK, "set SGMII force speed %d\n", - phy->req_line_speed); - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); - DP(NETIF_MSG_LINK, " (readback) %x\n", val16); - } - - /* SGMII Slave mode and disable signal detect */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &digctrl_kx1); - if (fiber_mode) - digctrl_kx1 = 1; - else - digctrl_kx1 &= 0xff4a; - - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, - digctrl_kx1); - - /* Turn off parallel detect */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &digctrl_kx2); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, - (digctrl_kx2 & ~(1<<2))); - - /* Re-enable parallel detect */ - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, - (digctrl_kx2 | (1<<2))); - - /* Enable autodet */ - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, - (digctrl_kx1 | 0x10)); -} - -static void bnx2x_warpcore_reset_lane(struct bnx2x *bp, - struct bnx2x_phy *phy, - u8 reset) -{ - u16 val; - /* Take lane out of reset after configuration is finished */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL5_MISC6, &val); - if (reset) - val |= 0xC000; - else - val &= 0x3FFF; - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL5_MISC6, val); - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL5_MISC6, &val); -} - - - /* Clear SFI/XFI link settings registers */ -static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy, - struct link_params *params, - u16 lane) -{ - struct bnx2x *bp = params->bp; - u16 val16; - - /* Set XFI clock comp as default. */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_RX66_CONTROL, &val16); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_RX66_CONTROL, val16 | (3<<13)); - - bnx2x_warpcore_reset_lane(bp, phy, 1); - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_FX100_CTRL1, 0x014a); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_FX100_CTRL3, 0x0800); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL4_MISC3, 0x8008); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x0195); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x0007); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x0002); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000); - lane = bnx2x_get_warpcore_lane(phy, params); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_TX_FIR_TAP, 0x0000); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 0x0990); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140); - bnx2x_warpcore_reset_lane(bp, phy, 0); -} - -static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp, - u32 chip_id, - u32 shmem_base, u8 port, - u8 *gpio_num, u8 *gpio_port) -{ - u32 cfg_pin; - *gpio_num = 0; - *gpio_port = 0; - if (CHIP_IS_E3(bp)) { - cfg_pin = (REG_RD(bp, shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[port].e3_sfp_ctrl)) & - PORT_HW_CFG_E3_MOD_ABS_MASK) >> - PORT_HW_CFG_E3_MOD_ABS_SHIFT; - - /* - * Should not happen. This function called upon interrupt - * triggered by GPIO ( since EPIO can only generate interrupts - * to MCP). - * So if this function was called and none of the GPIOs was set, - * it means the shit hit the fan. - */ - if ((cfg_pin < PIN_CFG_GPIO0_P0) || - (cfg_pin > PIN_CFG_GPIO3_P1)) { - DP(NETIF_MSG_LINK, "ERROR: Invalid cfg pin %x for " - "module detect indication\n", - cfg_pin); - return -EINVAL; - } - - *gpio_num = (cfg_pin - PIN_CFG_GPIO0_P0) & 0x3; - *gpio_port = (cfg_pin - PIN_CFG_GPIO0_P0) >> 2; - } else { - *gpio_num = MISC_REGISTERS_GPIO_3; - *gpio_port = port; - } - DP(NETIF_MSG_LINK, "MOD_ABS int GPIO%d_P%d\n", *gpio_num, *gpio_port); - return 0; -} - -static int bnx2x_is_sfp_module_plugged(struct bnx2x_phy *phy, - struct link_params *params) -{ - struct bnx2x *bp = params->bp; - u8 gpio_num, gpio_port; - u32 gpio_val; - if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id, - params->shmem_base, params->port, - &gpio_num, &gpio_port) != 0) - return 0; - gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port); - - /* Call the handling function in case module is detected */ - if (gpio_val == 0) - return 1; - else - return 0; -} - -static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - u32 serdes_net_if; - u8 fiber_mode; - u16 lane = bnx2x_get_warpcore_lane(phy, params); - serdes_net_if = (REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, dev_info. - port_hw_config[params->port].default_cfg)) & - PORT_HW_CFG_NET_SERDES_IF_MASK); - DP(NETIF_MSG_LINK, "Begin Warpcore init, link_speed %d, " - "serdes_net_if = 0x%x\n", - vars->line_speed, serdes_net_if); - bnx2x_set_aer_mmd(params, phy); - - vars->phy_flags |= PHY_XGXS_FLAG; - if ((serdes_net_if == PORT_HW_CFG_NET_SERDES_IF_SGMII) || - (phy->req_line_speed && - ((phy->req_line_speed == SPEED_100) || - (phy->req_line_speed == SPEED_10)))) { - vars->phy_flags |= PHY_SGMII_FLAG; - DP(NETIF_MSG_LINK, "Setting SGMII mode\n"); - bnx2x_warpcore_clear_regs(phy, params, lane); - bnx2x_warpcore_set_sgmii_speed(phy, params, 0); - } else { - switch (serdes_net_if) { - case PORT_HW_CFG_NET_SERDES_IF_KR: - /* Enable KR Auto Neg */ - if (params->loopback_mode == LOOPBACK_NONE) - bnx2x_warpcore_enable_AN_KR(phy, params, vars); - else { - DP(NETIF_MSG_LINK, "Setting KR 10G-Force\n"); - bnx2x_warpcore_set_10G_KR(phy, params, vars); - } - break; - - case PORT_HW_CFG_NET_SERDES_IF_XFI: - bnx2x_warpcore_clear_regs(phy, params, lane); - if (vars->line_speed == SPEED_10000) { - DP(NETIF_MSG_LINK, "Setting 10G XFI\n"); - bnx2x_warpcore_set_10G_XFI(phy, params, 1); - } else { - if (SINGLE_MEDIA_DIRECT(params)) { - DP(NETIF_MSG_LINK, "1G Fiber\n"); - fiber_mode = 1; - } else { - DP(NETIF_MSG_LINK, "10/100/1G SGMII\n"); - fiber_mode = 0; - } - bnx2x_warpcore_set_sgmii_speed(phy, - params, - fiber_mode); - } - - break; - - case PORT_HW_CFG_NET_SERDES_IF_SFI: - - bnx2x_warpcore_clear_regs(phy, params, lane); - if (vars->line_speed == SPEED_10000) { - DP(NETIF_MSG_LINK, "Setting 10G SFI\n"); - bnx2x_warpcore_set_10G_XFI(phy, params, 0); - } else if (vars->line_speed == SPEED_1000) { - DP(NETIF_MSG_LINK, "Setting 1G Fiber\n"); - bnx2x_warpcore_set_sgmii_speed(phy, params, 1); - } - /* Issue Module detection */ - if (bnx2x_is_sfp_module_plugged(phy, params)) - bnx2x_sfp_module_detection(phy, params); - break; - - case PORT_HW_CFG_NET_SERDES_IF_DXGXS: - if (vars->line_speed != SPEED_20000) { - DP(NETIF_MSG_LINK, "Speed not supported yet\n"); - return; - } - DP(NETIF_MSG_LINK, "Setting 20G DXGXS\n"); - bnx2x_warpcore_set_20G_DXGXS(bp, phy, lane); - /* Issue Module detection */ - - bnx2x_sfp_module_detection(phy, params); - break; - - case PORT_HW_CFG_NET_SERDES_IF_KR2: - if (vars->line_speed != SPEED_20000) { - DP(NETIF_MSG_LINK, "Speed not supported yet\n"); - return; - } - DP(NETIF_MSG_LINK, "Setting 20G KR2\n"); - bnx2x_warpcore_set_20G_KR2(bp, phy); - break; - - default: - DP(NETIF_MSG_LINK, "Unsupported Serdes Net Interface " - "0x%x\n", serdes_net_if); - return; - } - } - - /* Take lane out of reset after configuration is finished */ - bnx2x_warpcore_reset_lane(bp, phy, 0); - DP(NETIF_MSG_LINK, "Exit config init\n"); -} - -static void bnx2x_sfp_e3_set_transmitter(struct link_params *params, - struct bnx2x_phy *phy, - u8 tx_en) -{ - struct bnx2x *bp = params->bp; - u32 cfg_pin; - u8 port = params->port; - - cfg_pin = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[port].e3_sfp_ctrl)) & - PORT_HW_CFG_TX_LASER_MASK; - /* Set the !tx_en since this pin is DISABLE_TX_LASER */ - DP(NETIF_MSG_LINK, "Setting WC TX to %d\n", tx_en); - /* For 20G, the expected pin to be used is 3 pins after the current */ - - bnx2x_set_cfg_pin(bp, cfg_pin, tx_en ^ 1); - if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G) - bnx2x_set_cfg_pin(bp, cfg_pin + 3, tx_en ^ 1); -} - -static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy, - struct link_params *params) -{ - struct bnx2x *bp = params->bp; - u16 val16; - bnx2x_sfp_e3_set_transmitter(params, phy, 0); - bnx2x_set_mdio_clk(bp, params->chip_id, params->port); - bnx2x_set_aer_mmd(params, phy); - /* Global register */ - bnx2x_warpcore_reset_lane(bp, phy, 1); - - /* Clear loopback settings (if any) */ - /* 10G & 20G */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 & - 0xBFFF); - - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 & 0xfffe); - - /* Update those 1-copy registers */ - CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, - MDIO_AER_BLOCK_AER_REG, 0); - /* Enable 1G MDIO (1-copy) */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, - &val16); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, - val16 & ~0x10); - - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_XGXSBLK1_LANECTRL2, - val16 & 0xff00); - -} - -static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy, - struct link_params *params) -{ - struct bnx2x *bp = params->bp; - u16 val16; - u32 lane; - DP(NETIF_MSG_LINK, "Setting Warpcore loopback type %x, speed %d\n", - params->loopback_mode, phy->req_line_speed); - - if (phy->req_line_speed < SPEED_10000) { - /* 10/100/1000 */ - - /* Update those 1-copy registers */ - CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, - MDIO_AER_BLOCK_AER_REG, 0); - /* Enable 1G MDIO (1-copy) */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, - &val16); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, - val16 | 0x10); - /* Set 1G loopback based on lane (1-copy) */ - lane = bnx2x_get_warpcore_lane(phy, params); - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_XGXSBLK1_LANECTRL2, - val16 | (1<bp; - u8 link_10g_plus; - u8 port = params->port; - u32 sync_offset, media_types; - /* Update PHY configuration */ - set_phy_vars(params, vars); - - vars->link_status = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - port_mb[port].link_status)); - - vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP); - vars->phy_flags = PHY_XGXS_FLAG; - if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG) - vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG; - - if (vars->link_up) { - DP(NETIF_MSG_LINK, "phy link up\n"); - - vars->phy_link_up = 1; - vars->duplex = DUPLEX_FULL; - switch (vars->link_status & - LINK_STATUS_SPEED_AND_DUPLEX_MASK) { - case LINK_10THD: - vars->duplex = DUPLEX_HALF; - /* fall thru */ - case LINK_10TFD: - vars->line_speed = SPEED_10; - break; - - case LINK_100TXHD: - vars->duplex = DUPLEX_HALF; - /* fall thru */ - case LINK_100T4: - case LINK_100TXFD: - vars->line_speed = SPEED_100; - break; - - case LINK_1000THD: - vars->duplex = DUPLEX_HALF; - /* fall thru */ - case LINK_1000TFD: - vars->line_speed = SPEED_1000; - break; - - case LINK_2500THD: - vars->duplex = DUPLEX_HALF; - /* fall thru */ - case LINK_2500TFD: - vars->line_speed = SPEED_2500; - break; - - case LINK_10GTFD: - vars->line_speed = SPEED_10000; - break; - case LINK_20GTFD: - vars->line_speed = SPEED_20000; - break; - default: - break; - } - vars->flow_ctrl = 0; - if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED) - vars->flow_ctrl |= BNX2X_FLOW_CTRL_TX; - - if (vars->link_status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED) - vars->flow_ctrl |= BNX2X_FLOW_CTRL_RX; - - if (!vars->flow_ctrl) - vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; - - if (vars->line_speed && - ((vars->line_speed == SPEED_10) || - (vars->line_speed == SPEED_100))) { - vars->phy_flags |= PHY_SGMII_FLAG; - } else { - vars->phy_flags &= ~PHY_SGMII_FLAG; - } - if (vars->line_speed && - USES_WARPCORE(bp) && - (vars->line_speed == SPEED_1000)) - vars->phy_flags |= PHY_SGMII_FLAG; - /* anything 10 and over uses the bmac */ - link_10g_plus = (vars->line_speed >= SPEED_10000); - - if (link_10g_plus) { - if (USES_WARPCORE(bp)) - vars->mac_type = MAC_TYPE_XMAC; - else - vars->mac_type = MAC_TYPE_BMAC; - } else { - if (USES_WARPCORE(bp)) - vars->mac_type = MAC_TYPE_UMAC; - else - vars->mac_type = MAC_TYPE_EMAC; - } - } else { /* link down */ - DP(NETIF_MSG_LINK, "phy link down\n"); - - vars->phy_link_up = 0; - - vars->line_speed = 0; - vars->duplex = DUPLEX_FULL; - vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; - - /* indicate no mac active */ - vars->mac_type = MAC_TYPE_NONE; - if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG) - vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; - } - - /* Sync media type */ - sync_offset = params->shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[port].media_type); - media_types = REG_RD(bp, sync_offset); - - params->phy[INT_PHY].media_type = - (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) >> - PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT; - params->phy[EXT_PHY1].media_type = - (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK) >> - PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT; - params->phy[EXT_PHY2].media_type = - (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK) >> - PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT; - DP(NETIF_MSG_LINK, "media_types = 0x%x\n", media_types); - - /* Sync AEU offset */ - sync_offset = params->shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[port].aeu_int_mask); - - vars->aeu_int_mask = REG_RD(bp, sync_offset); - - /* Sync PFC status */ - if (vars->link_status & LINK_STATUS_PFC_ENABLED) - params->feature_config_flags |= - FEATURE_CONFIG_PFC_ENABLED; - else - params->feature_config_flags &= - ~FEATURE_CONFIG_PFC_ENABLED; - - DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x int_mask 0x%x\n", - vars->link_status, vars->phy_link_up, vars->aeu_int_mask); - DP(NETIF_MSG_LINK, "line_speed %x duplex %x flow_ctrl 0x%x\n", - vars->line_speed, vars->duplex, vars->flow_ctrl); -} - - -static void bnx2x_set_master_ln(struct link_params *params, - struct bnx2x_phy *phy) -{ - struct bnx2x *bp = params->bp; - u16 new_master_ln, ser_lane; - ser_lane = ((params->lane_config & - PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> - PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); - - /* set the master_ln for AN */ - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_XGXS_BLOCK2, - MDIO_XGXS_BLOCK2_TEST_MODE_LANE, - &new_master_ln); - - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_XGXS_BLOCK2 , - MDIO_XGXS_BLOCK2_TEST_MODE_LANE, - (new_master_ln | ser_lane)); -} - -static int bnx2x_reset_unicore(struct link_params *params, - struct bnx2x_phy *phy, - u8 set_serdes) -{ - struct bnx2x *bp = params->bp; - u16 mii_control; - u16 i; - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control); - - /* reset the unicore */ - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, - (mii_control | - MDIO_COMBO_IEEO_MII_CONTROL_RESET)); - if (set_serdes) - bnx2x_set_serdes_access(bp, params->port); - - /* wait for the reset to self clear */ - for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) { - udelay(5); - - /* the reset erased the previous bank value */ - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, - &mii_control); - - if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) { - udelay(5); - return 0; - } - } - - netdev_err(bp->dev, "Warning: PHY was not initialized," - " Port %d\n", - params->port); - DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n"); - return -EINVAL; - -} - -static void bnx2x_set_swap_lanes(struct link_params *params, - struct bnx2x_phy *phy) -{ - struct bnx2x *bp = params->bp; - /* - * Each two bits represents a lane number: - * No swap is 0123 => 0x1b no need to enable the swap - */ - u16 ser_lane, rx_lane_swap, tx_lane_swap; - - ser_lane = ((params->lane_config & - PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> - PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); - rx_lane_swap = ((params->lane_config & - PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >> - PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT); - tx_lane_swap = ((params->lane_config & - PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >> - PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT); - - if (rx_lane_swap != 0x1b) { - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_XGXS_BLOCK2, - MDIO_XGXS_BLOCK2_RX_LN_SWAP, - (rx_lane_swap | - MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE | - MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE)); - } else { - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_XGXS_BLOCK2, - MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0); - } - - if (tx_lane_swap != 0x1b) { - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_XGXS_BLOCK2, - MDIO_XGXS_BLOCK2_TX_LN_SWAP, - (tx_lane_swap | - MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE)); - } else { - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_XGXS_BLOCK2, - MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0); - } -} - -static void bnx2x_set_parallel_detection(struct bnx2x_phy *phy, - struct link_params *params) -{ - struct bnx2x *bp = params->bp; - u16 control2; - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_SERDES_DIGITAL, - MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, - &control2); - if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) - control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; - else - control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; - DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n", - phy->speed_cap_mask, control2); - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_SERDES_DIGITAL, - MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, - control2); - - if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) && - (phy->speed_cap_mask & - PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { - DP(NETIF_MSG_LINK, "XGXS\n"); - - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_10G_PARALLEL_DETECT, - MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK, - MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT); - - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_10G_PARALLEL_DETECT, - MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, - &control2); - - - control2 |= - MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN; - - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_10G_PARALLEL_DETECT, - MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, - control2); - - /* Disable parallel detection of HiG */ - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_XGXS_BLOCK2, - MDIO_XGXS_BLOCK2_UNICORE_MODE_10G, - MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS | - MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS); - } -} - -static void bnx2x_set_autoneg(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars, - u8 enable_cl73) -{ - struct bnx2x *bp = params->bp; - u16 reg_val; - - /* CL37 Autoneg */ - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, ®_val); - - /* CL37 Autoneg Enabled */ - if (vars->line_speed == SPEED_AUTO_NEG) - reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN; - else /* CL37 Autoneg Disabled */ - reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | - MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN); - - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); - - /* Enable/Disable Autodetection */ - - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_SERDES_DIGITAL, - MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, ®_val); - reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN | - MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT); - reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE; - if (vars->line_speed == SPEED_AUTO_NEG) - reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; - else - reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; - - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_SERDES_DIGITAL, - MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val); - - /* Enable TetonII and BAM autoneg */ - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_BAM_NEXT_PAGE, - MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, - ®_val); - if (vars->line_speed == SPEED_AUTO_NEG) { - /* Enable BAM aneg Mode and TetonII aneg Mode */ - reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE | - MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN); - } else { - /* TetonII and BAM Autoneg Disabled */ - reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE | - MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN); - } - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_BAM_NEXT_PAGE, - MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, - reg_val); - - if (enable_cl73) { - /* Enable Cl73 FSM status bits */ - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_CL73_USERB0, - MDIO_CL73_USERB0_CL73_UCTRL, - 0xe); - - /* Enable BAM Station Manager*/ - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_CL73_USERB0, - MDIO_CL73_USERB0_CL73_BAM_CTRL1, - MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN | - MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN | - MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN); - - /* Advertise CL73 link speeds */ - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_CL73_IEEEB1, - MDIO_CL73_IEEEB1_AN_ADV2, - ®_val); - if (phy->speed_cap_mask & - PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) - reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4; - if (phy->speed_cap_mask & - PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) - reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX; - - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_CL73_IEEEB1, - MDIO_CL73_IEEEB1_AN_ADV2, - reg_val); - - /* CL73 Autoneg Enabled */ - reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN; - - } else /* CL73 Autoneg Disabled */ - reg_val = 0; - - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_CL73_IEEEB0, - MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val); -} - -/* program SerDes, forced speed */ -static void bnx2x_program_serdes(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - u16 reg_val; - - /* program duplex, disable autoneg and sgmii*/ - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, ®_val); - reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX | - MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | - MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK); - if (phy->req_duplex == DUPLEX_FULL) - reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); - - /* - * program speed - * - needed only if the speed is greater than 1G (2.5G or 10G) - */ - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_SERDES_DIGITAL, - MDIO_SERDES_DIGITAL_MISC1, ®_val); - /* clearing the speed value before setting the right speed */ - DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val); - - reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK | - MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL); - - if (!((vars->line_speed == SPEED_1000) || - (vars->line_speed == SPEED_100) || - (vars->line_speed == SPEED_10))) { - - reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M | - MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL); - if (vars->line_speed == SPEED_10000) - reg_val |= - MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4; - } - - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_SERDES_DIGITAL, - MDIO_SERDES_DIGITAL_MISC1, reg_val); - -} - -static void bnx2x_set_brcm_cl37_advertisement(struct bnx2x_phy *phy, - struct link_params *params) -{ - struct bnx2x *bp = params->bp; - u16 val = 0; - - /* configure the 48 bits for BAM AN */ - - /* set extended capabilities */ - if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) - val |= MDIO_OVER_1G_UP1_2_5G; - if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) - val |= MDIO_OVER_1G_UP1_10G; - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_OVER_1G, - MDIO_OVER_1G_UP1, val); - - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_OVER_1G, - MDIO_OVER_1G_UP3, 0x400); -} - -static void bnx2x_set_ieee_aneg_advertisement(struct bnx2x_phy *phy, - struct link_params *params, - u16 ieee_fc) -{ - struct bnx2x *bp = params->bp; - u16 val; - /* for AN, we are always publishing full duplex */ - - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc); - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_CL73_IEEEB1, - MDIO_CL73_IEEEB1_AN_ADV1, &val); - val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH; - val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK); - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_CL73_IEEEB1, - MDIO_CL73_IEEEB1_AN_ADV1, val); -} - -static void bnx2x_restart_autoneg(struct bnx2x_phy *phy, - struct link_params *params, - u8 enable_cl73) -{ - struct bnx2x *bp = params->bp; - u16 mii_control; - - DP(NETIF_MSG_LINK, "bnx2x_restart_autoneg\n"); - /* Enable and restart BAM/CL37 aneg */ - - if (enable_cl73) { - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_CL73_IEEEB0, - MDIO_CL73_IEEEB0_CL73_AN_CONTROL, - &mii_control); - - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_CL73_IEEEB0, - MDIO_CL73_IEEEB0_CL73_AN_CONTROL, - (mii_control | - MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN | - MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN)); - } else { - - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, - &mii_control); - DP(NETIF_MSG_LINK, - "bnx2x_restart_autoneg mii_control before = 0x%x\n", - mii_control); - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, - (mii_control | - MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | - MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN)); - } -} - -static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - u16 control1; - - /* in SGMII mode, the unicore is always slave */ - - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_SERDES_DIGITAL, - MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, - &control1); - control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT; - /* set sgmii mode (and not fiber) */ - control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE | - MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET | - MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE); - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_SERDES_DIGITAL, - MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, - control1); - - /* if forced speed */ - if (!(vars->line_speed == SPEED_AUTO_NEG)) { - /* set speed, disable autoneg */ - u16 mii_control; - - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, - &mii_control); - mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | - MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK| - MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX); - - switch (vars->line_speed) { - case SPEED_100: - mii_control |= - MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100; - break; - case SPEED_1000: - mii_control |= - MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000; - break; - case SPEED_10: - /* there is nothing to set for 10M */ - break; - default: - /* invalid speed for SGMII */ - DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n", - vars->line_speed); - break; - } - - /* setting the full duplex */ - if (phy->req_duplex == DUPLEX_FULL) - mii_control |= - MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, - mii_control); - - } else { /* AN mode */ - /* enable and restart AN */ - bnx2x_restart_autoneg(phy, params, 0); - } -} - - -/* - * link management - */ - -static int bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy, - struct link_params *params) -{ - struct bnx2x *bp = params->bp; - u16 pd_10g, status2_1000x; - if (phy->req_line_speed != SPEED_AUTO_NEG) - return 0; - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_SERDES_DIGITAL, - MDIO_SERDES_DIGITAL_A_1000X_STATUS2, - &status2_1000x); - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_SERDES_DIGITAL, - MDIO_SERDES_DIGITAL_A_1000X_STATUS2, - &status2_1000x); - if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) { - DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n", - params->port); - return 1; - } - - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_10G_PARALLEL_DETECT, - MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS, - &pd_10g); - - if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) { - DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n", - params->port); - return 1; - } - return 0; -} - -static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars, - u32 gp_status) -{ - struct bnx2x *bp = params->bp; - u16 ld_pause; /* local driver */ - u16 lp_pause; /* link partner */ - u16 pause_result; - - vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; - - /* resolve from gp_status in case of AN complete and not sgmii */ - if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) - vars->flow_ctrl = phy->req_flow_ctrl; - else if (phy->req_line_speed != SPEED_AUTO_NEG) - vars->flow_ctrl = params->req_fc_auto_adv; - else if ((gp_status & MDIO_AN_CL73_OR_37_COMPLETE) && - (!(vars->phy_flags & PHY_SGMII_FLAG))) { - if (bnx2x_direct_parallel_detect_used(phy, params)) { - vars->flow_ctrl = params->req_fc_auto_adv; - return; - } - if ((gp_status & - (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | - MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) == - (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | - MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) { - - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_CL73_IEEEB1, - MDIO_CL73_IEEEB1_AN_ADV1, - &ld_pause); - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_CL73_IEEEB1, - MDIO_CL73_IEEEB1_AN_LP_ADV1, - &lp_pause); - pause_result = (ld_pause & - MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK) - >> 8; - pause_result |= (lp_pause & - MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK) - >> 10; - DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n", - pause_result); - } else { - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_AUTO_NEG_ADV, - &ld_pause); - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1, - &lp_pause); - pause_result = (ld_pause & - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5; - pause_result |= (lp_pause & - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7; - DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n", - pause_result); - } - bnx2x_pause_resolve(vars, pause_result); - } - DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl); -} - -static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy, - struct link_params *params) -{ - struct bnx2x *bp = params->bp; - u16 rx_status, ustat_val, cl37_fsm_received; - DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n"); - /* Step 1: Make sure signal is detected */ - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_RX0, - MDIO_RX0_RX_STATUS, - &rx_status); - if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) != - (MDIO_RX0_RX_STATUS_SIGDET)) { - DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73." - "rx_status(0x80b0) = 0x%x\n", rx_status); - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_CL73_IEEEB0, - MDIO_CL73_IEEEB0_CL73_AN_CONTROL, - MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN); - return; - } - /* Step 2: Check CL73 state machine */ - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_CL73_USERB0, - MDIO_CL73_USERB0_CL73_USTAT1, - &ustat_val); - if ((ustat_val & - (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK | - MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) != - (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK | - MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) { - DP(NETIF_MSG_LINK, "CL73 state-machine is not stable. " - "ustat_val(0x8371) = 0x%x\n", ustat_val); - return; - } - /* - * Step 3: Check CL37 Message Pages received to indicate LP - * supports only CL37 - */ - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_REMOTE_PHY, - MDIO_REMOTE_PHY_MISC_RX_STATUS, - &cl37_fsm_received); - if ((cl37_fsm_received & - (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG | - MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) != - (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG | - MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) { - DP(NETIF_MSG_LINK, "No CL37 FSM were received. " - "misc_rx_status(0x8330) = 0x%x\n", - cl37_fsm_received); - return; - } - /* - * The combined cl37/cl73 fsm state information indicating that - * we are connected to a device which does not support cl73, but - * does support cl37 BAM. In this case we disable cl73 and - * restart cl37 auto-neg - */ - - /* Disable CL73 */ - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_CL73_IEEEB0, - MDIO_CL73_IEEEB0_CL73_AN_CONTROL, - 0); - /* Restart CL37 autoneg */ - bnx2x_restart_autoneg(phy, params, 0); - DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n"); -} - -static void bnx2x_xgxs_an_resolve(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars, - u32 gp_status) -{ - if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) - vars->link_status |= - LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; - - if (bnx2x_direct_parallel_detect_used(phy, params)) - vars->link_status |= - LINK_STATUS_PARALLEL_DETECTION_USED; -} -static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars, - u16 is_link_up, - u16 speed_mask, - u16 is_duplex) -{ - struct bnx2x *bp = params->bp; - if (phy->req_line_speed == SPEED_AUTO_NEG) - vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED; - if (is_link_up) { - DP(NETIF_MSG_LINK, "phy link up\n"); - - vars->phy_link_up = 1; - vars->link_status |= LINK_STATUS_LINK_UP; - - switch (speed_mask) { - case GP_STATUS_10M: - vars->line_speed = SPEED_10; - if (vars->duplex == DUPLEX_FULL) - vars->link_status |= LINK_10TFD; - else - vars->link_status |= LINK_10THD; - break; - - case GP_STATUS_100M: - vars->line_speed = SPEED_100; - if (vars->duplex == DUPLEX_FULL) - vars->link_status |= LINK_100TXFD; - else - vars->link_status |= LINK_100TXHD; - break; - - case GP_STATUS_1G: - case GP_STATUS_1G_KX: - vars->line_speed = SPEED_1000; - if (vars->duplex == DUPLEX_FULL) - vars->link_status |= LINK_1000TFD; - else - vars->link_status |= LINK_1000THD; - break; - - case GP_STATUS_2_5G: - vars->line_speed = SPEED_2500; - if (vars->duplex == DUPLEX_FULL) - vars->link_status |= LINK_2500TFD; - else - vars->link_status |= LINK_2500THD; - break; - - case GP_STATUS_5G: - case GP_STATUS_6G: - DP(NETIF_MSG_LINK, - "link speed unsupported gp_status 0x%x\n", - speed_mask); - return -EINVAL; - - case GP_STATUS_10G_KX4: - case GP_STATUS_10G_HIG: - case GP_STATUS_10G_CX4: - case GP_STATUS_10G_KR: - case GP_STATUS_10G_SFI: - case GP_STATUS_10G_XFI: - vars->line_speed = SPEED_10000; - vars->link_status |= LINK_10GTFD; - break; - case GP_STATUS_20G_DXGXS: - vars->line_speed = SPEED_20000; - vars->link_status |= LINK_20GTFD; - break; - default: - DP(NETIF_MSG_LINK, - "link speed unsupported gp_status 0x%x\n", - speed_mask); - return -EINVAL; - } - } else { /* link_down */ - DP(NETIF_MSG_LINK, "phy link down\n"); - - vars->phy_link_up = 0; - - vars->duplex = DUPLEX_FULL; - vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; - vars->mac_type = MAC_TYPE_NONE; - } - DP(NETIF_MSG_LINK, " phy_link_up %x line_speed %d\n", - vars->phy_link_up, vars->line_speed); - return 0; -} - -static int bnx2x_link_settings_status(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - - struct bnx2x *bp = params->bp; - - u16 gp_status, duplex = DUPLEX_HALF, link_up = 0, speed_mask; - int rc = 0; - - /* Read gp_status */ - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_GP_STATUS, - MDIO_GP_STATUS_TOP_AN_STATUS1, - &gp_status); - if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS) - duplex = DUPLEX_FULL; - if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) - link_up = 1; - speed_mask = gp_status & GP_STATUS_SPEED_MASK; - DP(NETIF_MSG_LINK, "gp_status 0x%x, is_link_up %d, speed_mask 0x%x\n", - gp_status, link_up, speed_mask); - rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, speed_mask, - duplex); - if (rc == -EINVAL) - return rc; - - if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) { - if (SINGLE_MEDIA_DIRECT(params)) { - bnx2x_flow_ctrl_resolve(phy, params, vars, gp_status); - if (phy->req_line_speed == SPEED_AUTO_NEG) - bnx2x_xgxs_an_resolve(phy, params, vars, - gp_status); - } - } else { /* link_down */ - if ((phy->req_line_speed == SPEED_AUTO_NEG) && - SINGLE_MEDIA_DIRECT(params)) { - /* Check signal is detected */ - bnx2x_check_fallback_to_cl37(phy, params); - } - } - - DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n", - vars->duplex, vars->flow_ctrl, vars->link_status); - return rc; -} - -static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - - struct bnx2x *bp = params->bp; - - u8 lane; - u16 gp_status1, gp_speed, link_up, duplex = DUPLEX_FULL; - int rc = 0; - lane = bnx2x_get_warpcore_lane(phy, params); - /* Read gp_status */ - if (phy->req_line_speed > SPEED_10000) { - u16 temp_link_up; - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - 1, &temp_link_up); - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - 1, &link_up); - DP(NETIF_MSG_LINK, "PCS RX link status = 0x%x-->0x%x\n", - temp_link_up, link_up); - link_up &= (1<<2); - if (link_up) - bnx2x_ext_phy_resolve_fc(phy, params, vars); - } else { - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_GP2_STATUS_GP_2_1, &gp_status1); - DP(NETIF_MSG_LINK, "0x81d1 = 0x%x\n", gp_status1); - /* Check for either KR or generic link up. */ - gp_status1 = ((gp_status1 >> 8) & 0xf) | - ((gp_status1 >> 12) & 0xf); - link_up = gp_status1 & (1 << lane); - if (link_up && SINGLE_MEDIA_DIRECT(params)) { - u16 pd, gp_status4; - if (phy->req_line_speed == SPEED_AUTO_NEG) { - /* Check Autoneg complete */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_GP2_STATUS_GP_2_4, - &gp_status4); - if (gp_status4 & ((1<<12)<link_status |= - LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; - - /* Check parallel detect used */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_PAR_DET_10G_STATUS, - &pd); - if (pd & (1<<15)) - vars->link_status |= - LINK_STATUS_PARALLEL_DETECTION_USED; - } - bnx2x_ext_phy_resolve_fc(phy, params, vars); - } - } - - if (lane < 2) { - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_GP2_STATUS_GP_2_2, &gp_speed); - } else { - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_GP2_STATUS_GP_2_3, &gp_speed); - } - DP(NETIF_MSG_LINK, "lane %d gp_speed 0x%x\n", lane, gp_speed); - - if ((lane & 1) == 0) - gp_speed <<= 8; - gp_speed &= 0x3f00; - - - rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed, - duplex); - - DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n", - vars->duplex, vars->flow_ctrl, vars->link_status); - return rc; -} -static void bnx2x_set_gmii_tx_driver(struct link_params *params) -{ - struct bnx2x *bp = params->bp; - struct bnx2x_phy *phy = ¶ms->phy[INT_PHY]; - u16 lp_up2; - u16 tx_driver; - u16 bank; - - /* read precomp */ - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_OVER_1G, - MDIO_OVER_1G_LP_UP2, &lp_up2); - - /* bits [10:7] at lp_up2, positioned at [15:12] */ - lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >> - MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) << - MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT); - - if (lp_up2 == 0) - return; - - for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3; - bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) { - CL22_RD_OVER_CL45(bp, phy, - bank, - MDIO_TX0_TX_DRIVER, &tx_driver); - - /* replace tx_driver bits [15:12] */ - if (lp_up2 != - (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) { - tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK; - tx_driver |= lp_up2; - CL22_WR_OVER_CL45(bp, phy, - bank, - MDIO_TX0_TX_DRIVER, tx_driver); - } - } -} - -static int bnx2x_emac_program(struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - u8 port = params->port; - u16 mode = 0; - - DP(NETIF_MSG_LINK, "setting link speed & duplex\n"); - bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 + - EMAC_REG_EMAC_MODE, - (EMAC_MODE_25G_MODE | - EMAC_MODE_PORT_MII_10M | - EMAC_MODE_HALF_DUPLEX)); - switch (vars->line_speed) { - case SPEED_10: - mode |= EMAC_MODE_PORT_MII_10M; - break; - - case SPEED_100: - mode |= EMAC_MODE_PORT_MII; - break; - - case SPEED_1000: - mode |= EMAC_MODE_PORT_GMII; - break; - - case SPEED_2500: - mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII); - break; - - default: - /* 10G not valid for EMAC */ - DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n", - vars->line_speed); - return -EINVAL; - } - - if (vars->duplex == DUPLEX_HALF) - mode |= EMAC_MODE_HALF_DUPLEX; - bnx2x_bits_en(bp, - GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE, - mode); - - bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed); - return 0; -} - -static void bnx2x_set_preemphasis(struct bnx2x_phy *phy, - struct link_params *params) -{ - - u16 bank, i = 0; - struct bnx2x *bp = params->bp; - - for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3; - bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) { - CL22_WR_OVER_CL45(bp, phy, - bank, - MDIO_RX0_RX_EQ_BOOST, - phy->rx_preemphasis[i]); - } - - for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3; - bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) { - CL22_WR_OVER_CL45(bp, phy, - bank, - MDIO_TX0_TX_DRIVER, - phy->tx_preemphasis[i]); - } -} - -static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - u8 enable_cl73 = (SINGLE_MEDIA_DIRECT(params) || - (params->loopback_mode == LOOPBACK_XGXS)); - if (!(vars->phy_flags & PHY_SGMII_FLAG)) { - if (SINGLE_MEDIA_DIRECT(params) && - (params->feature_config_flags & - FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) - bnx2x_set_preemphasis(phy, params); - - /* forced speed requested? */ - if (vars->line_speed != SPEED_AUTO_NEG || - (SINGLE_MEDIA_DIRECT(params) && - params->loopback_mode == LOOPBACK_EXT)) { - DP(NETIF_MSG_LINK, "not SGMII, no AN\n"); - - /* disable autoneg */ - bnx2x_set_autoneg(phy, params, vars, 0); - - /* program speed and duplex */ - bnx2x_program_serdes(phy, params, vars); - - } else { /* AN_mode */ - DP(NETIF_MSG_LINK, "not SGMII, AN\n"); - - /* AN enabled */ - bnx2x_set_brcm_cl37_advertisement(phy, params); - - /* program duplex & pause advertisement (for aneg) */ - bnx2x_set_ieee_aneg_advertisement(phy, params, - vars->ieee_fc); - - /* enable autoneg */ - bnx2x_set_autoneg(phy, params, vars, enable_cl73); - - /* enable and restart AN */ - bnx2x_restart_autoneg(phy, params, enable_cl73); - } - - } else { /* SGMII mode */ - DP(NETIF_MSG_LINK, "SGMII\n"); - - bnx2x_initialize_sgmii_process(phy, params, vars); - } -} - -static int bnx2x_prepare_xgxs(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - int rc; - vars->phy_flags |= PHY_XGXS_FLAG; - if ((phy->req_line_speed && - ((phy->req_line_speed == SPEED_100) || - (phy->req_line_speed == SPEED_10))) || - (!phy->req_line_speed && - (phy->speed_cap_mask >= - PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) && - (phy->speed_cap_mask < - PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || - (phy->type == PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD)) - vars->phy_flags |= PHY_SGMII_FLAG; - else - vars->phy_flags &= ~PHY_SGMII_FLAG; - - bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); - bnx2x_set_aer_mmd(params, phy); - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) - bnx2x_set_master_ln(params, phy); - - rc = bnx2x_reset_unicore(params, phy, 0); - /* reset the SerDes and wait for reset bit return low */ - if (rc != 0) - return rc; - - bnx2x_set_aer_mmd(params, phy); - /* setting the masterLn_def again after the reset */ - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) { - bnx2x_set_master_ln(params, phy); - bnx2x_set_swap_lanes(params, phy); - } - - return rc; -} - -static u16 bnx2x_wait_reset_complete(struct bnx2x *bp, - struct bnx2x_phy *phy, - struct link_params *params) -{ - u16 cnt, ctrl; - /* Wait for soft reset to get cleared up to 1 sec */ - for (cnt = 0; cnt < 1000; cnt++) { - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) - bnx2x_cl22_read(bp, phy, - MDIO_PMA_REG_CTRL, &ctrl); - else - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_CTRL, &ctrl); - if (!(ctrl & (1<<15))) - break; - msleep(1); - } - - if (cnt == 1000) - netdev_err(bp->dev, "Warning: PHY was not initialized," - " Port %d\n", - params->port); - DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt); - return cnt; -} - -static void bnx2x_link_int_enable(struct link_params *params) -{ - u8 port = params->port; - u32 mask; - struct bnx2x *bp = params->bp; - - /* Setting the status to report on link up for either XGXS or SerDes */ - if (CHIP_IS_E3(bp)) { - mask = NIG_MASK_XGXS0_LINK_STATUS; - if (!(SINGLE_MEDIA_DIRECT(params))) - mask |= NIG_MASK_MI_INT; - } else if (params->switch_cfg == SWITCH_CFG_10G) { - mask = (NIG_MASK_XGXS0_LINK10G | - NIG_MASK_XGXS0_LINK_STATUS); - DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n"); - if (!(SINGLE_MEDIA_DIRECT(params)) && - params->phy[INT_PHY].type != - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) { - mask |= NIG_MASK_MI_INT; - DP(NETIF_MSG_LINK, "enabled external phy int\n"); - } - - } else { /* SerDes */ - mask = NIG_MASK_SERDES0_LINK_STATUS; - DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n"); - if (!(SINGLE_MEDIA_DIRECT(params)) && - params->phy[INT_PHY].type != - PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN) { - mask |= NIG_MASK_MI_INT; - DP(NETIF_MSG_LINK, "enabled external phy int\n"); - } - } - bnx2x_bits_en(bp, - NIG_REG_MASK_INTERRUPT_PORT0 + port*4, - mask); - - DP(NETIF_MSG_LINK, "port %x, is_xgxs %x, int_status 0x%x\n", port, - (params->switch_cfg == SWITCH_CFG_10G), - REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4)); - DP(NETIF_MSG_LINK, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x\n", - REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), - REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18), - REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS+port*0x3c)); - DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n", - REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), - REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)); -} - -static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port, - u8 exp_mi_int) -{ - u32 latch_status = 0; - - /* - * Disable the MI INT ( external phy int ) by writing 1 to the - * status register. Link down indication is high-active-signal, - * so in this case we need to write the status to clear the XOR - */ - /* Read Latched signals */ - latch_status = REG_RD(bp, - NIG_REG_LATCH_STATUS_0 + port*8); - DP(NETIF_MSG_LINK, "latch_status = 0x%x\n", latch_status); - /* Handle only those with latched-signal=up.*/ - if (exp_mi_int) - bnx2x_bits_en(bp, - NIG_REG_STATUS_INTERRUPT_PORT0 - + port*4, - NIG_STATUS_EMAC0_MI_INT); - else - bnx2x_bits_dis(bp, - NIG_REG_STATUS_INTERRUPT_PORT0 - + port*4, - NIG_STATUS_EMAC0_MI_INT); - - if (latch_status & 1) { - - /* For all latched-signal=up : Re-Arm Latch signals */ - REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8, - (latch_status & 0xfffe) | (latch_status & 1)); - } - /* For all latched-signal=up,Write original_signal to status */ -} - -static void bnx2x_link_int_ack(struct link_params *params, - struct link_vars *vars, u8 is_10g_plus) -{ - struct bnx2x *bp = params->bp; - u8 port = params->port; - u32 mask; - /* - * First reset all status we assume only one line will be - * change at a time - */ - bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, - (NIG_STATUS_XGXS0_LINK10G | - NIG_STATUS_XGXS0_LINK_STATUS | - NIG_STATUS_SERDES0_LINK_STATUS)); - if (vars->phy_link_up) { - if (USES_WARPCORE(bp)) - mask = NIG_STATUS_XGXS0_LINK_STATUS; - else { - if (is_10g_plus) - mask = NIG_STATUS_XGXS0_LINK10G; - else if (params->switch_cfg == SWITCH_CFG_10G) { - /* - * Disable the link interrupt by writing 1 to - * the relevant lane in the status register - */ - u32 ser_lane = - ((params->lane_config & - PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> - PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); - mask = ((1 << ser_lane) << - NIG_STATUS_XGXS0_LINK_STATUS_SIZE); - } else - mask = NIG_STATUS_SERDES0_LINK_STATUS; - } - DP(NETIF_MSG_LINK, "Ack link up interrupt with mask 0x%x\n", - mask); - bnx2x_bits_en(bp, - NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, - mask); - } -} - -static int bnx2x_format_ver(u32 num, u8 *str, u16 *len) -{ - u8 *str_ptr = str; - u32 mask = 0xf0000000; - u8 shift = 8*4; - u8 digit; - u8 remove_leading_zeros = 1; - if (*len < 10) { - /* Need more than 10chars for this format */ - *str_ptr = '\0'; - (*len)--; - return -EINVAL; - } - while (shift > 0) { - - shift -= 4; - digit = ((num & mask) >> shift); - if (digit == 0 && remove_leading_zeros) { - mask = mask >> 4; - continue; - } else if (digit < 0xa) - *str_ptr = digit + '0'; - else - *str_ptr = digit - 0xa + 'a'; - remove_leading_zeros = 0; - str_ptr++; - (*len)--; - mask = mask >> 4; - if (shift == 4*4) { - *str_ptr = '.'; - str_ptr++; - (*len)--; - remove_leading_zeros = 1; - } - } - return 0; -} - - -static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len) -{ - str[0] = '\0'; - (*len)--; - return 0; -} - -int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, - u8 *version, u16 len) -{ - struct bnx2x *bp; - u32 spirom_ver = 0; - int status = 0; - u8 *ver_p = version; - u16 remain_len = len; - if (version == NULL || params == NULL) - return -EINVAL; - bp = params->bp; - - /* Extract first external phy*/ - version[0] = '\0'; - spirom_ver = REG_RD(bp, params->phy[EXT_PHY1].ver_addr); - - if (params->phy[EXT_PHY1].format_fw_ver) { - status |= params->phy[EXT_PHY1].format_fw_ver(spirom_ver, - ver_p, - &remain_len); - ver_p += (len - remain_len); - } - if ((params->num_phys == MAX_PHYS) && - (params->phy[EXT_PHY2].ver_addr != 0)) { - spirom_ver = REG_RD(bp, params->phy[EXT_PHY2].ver_addr); - if (params->phy[EXT_PHY2].format_fw_ver) { - *ver_p = '/'; - ver_p++; - remain_len--; - status |= params->phy[EXT_PHY2].format_fw_ver( - spirom_ver, - ver_p, - &remain_len); - ver_p = version + (len - remain_len); - } - } - *ver_p = '\0'; - return status; -} - -static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy, - struct link_params *params) -{ - u8 port = params->port; - struct bnx2x *bp = params->bp; - - if (phy->req_line_speed != SPEED_1000) { - u32 md_devad = 0; - - DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n"); - - if (!CHIP_IS_E3(bp)) { - /* change the uni_phy_addr in the nig */ - md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + - port*0x18)); - - REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, - 0x5); - } - - bnx2x_cl45_write(bp, phy, - 5, - (MDIO_REG_BANK_AER_BLOCK + - (MDIO_AER_BLOCK_AER_REG & 0xf)), - 0x2800); - - bnx2x_cl45_write(bp, phy, - 5, - (MDIO_REG_BANK_CL73_IEEEB0 + - (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)), - 0x6041); - msleep(200); - /* set aer mmd back */ - bnx2x_set_aer_mmd(params, phy); - - if (!CHIP_IS_E3(bp)) { - /* and md_devad */ - REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, - md_devad); - } - } else { - u16 mii_ctrl; - DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n"); - bnx2x_cl45_read(bp, phy, 5, - (MDIO_REG_BANK_COMBO_IEEE0 + - (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)), - &mii_ctrl); - bnx2x_cl45_write(bp, phy, 5, - (MDIO_REG_BANK_COMBO_IEEE0 + - (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)), - mii_ctrl | - MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK); - } -} - -int bnx2x_set_led(struct link_params *params, - struct link_vars *vars, u8 mode, u32 speed) -{ - u8 port = params->port; - u16 hw_led_mode = params->hw_led_mode; - int rc = 0; - u8 phy_idx; - u32 tmp; - u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; - struct bnx2x *bp = params->bp; - DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode); - DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n", - speed, hw_led_mode); - /* In case */ - for (phy_idx = EXT_PHY1; phy_idx < MAX_PHYS; phy_idx++) { - if (params->phy[phy_idx].set_link_led) { - params->phy[phy_idx].set_link_led( - ¶ms->phy[phy_idx], params, mode); - } - } - - switch (mode) { - case LED_MODE_FRONT_PANEL_OFF: - case LED_MODE_OFF: - REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0); - REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, - SHARED_HW_CFG_LED_MAC1); - - tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); - EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE)); - break; - - case LED_MODE_OPER: - /* - * For all other phys, OPER mode is same as ON, so in case - * link is down, do nothing - */ - if (!vars->link_up) - break; - case LED_MODE_ON: - if (((params->phy[EXT_PHY1].type == - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) || - (params->phy[EXT_PHY1].type == - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722)) && - CHIP_IS_E2(bp) && params->num_phys == 2) { - /* - * This is a work-around for E2+8727 Configurations - */ - if (mode == LED_MODE_ON || - speed == SPEED_10000){ - REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); - REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); - - tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); - EMAC_WR(bp, EMAC_REG_EMAC_LED, - (tmp | EMAC_LED_OVERRIDE)); - /* - * return here without enabling traffic - * LED blink andsetting rate in ON mode. - * In oper mode, enabling LED blink - * and setting rate is needed. - */ - if (mode == LED_MODE_ON) - return rc; - } - } else if (SINGLE_MEDIA_DIRECT(params)) { - /* - * This is a work-around for HW issue found when link - * is up in CL73 - */ - REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); - if (CHIP_IS_E1x(bp) || - CHIP_IS_E2(bp) || - (mode == LED_MODE_ON)) - REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); - else - REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, - hw_led_mode); - } else - REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode); - - REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0); - /* Set blinking rate to ~15.9Hz */ - REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4, - LED_BLINK_RATE_VAL); - REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + - port*4, 1); - tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); - EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp & (~EMAC_LED_OVERRIDE))); - - if (CHIP_IS_E1(bp) && - ((speed == SPEED_2500) || - (speed == SPEED_1000) || - (speed == SPEED_100) || - (speed == SPEED_10))) { - /* - * On Everest 1 Ax chip versions for speeds less than - * 10G LED scheme is different - */ - REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 - + port*4, 1); - REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + - port*4, 0); - REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 + - port*4, 1); - } - break; - - default: - rc = -EINVAL; - DP(NETIF_MSG_LINK, "bnx2x_set_led: Invalid led mode %d\n", - mode); - break; - } - return rc; - -} - -/* - * This function comes to reflect the actual link state read DIRECTLY from the - * HW - */ -int bnx2x_test_link(struct link_params *params, struct link_vars *vars, - u8 is_serdes) -{ - struct bnx2x *bp = params->bp; - u16 gp_status = 0, phy_index = 0; - u8 ext_phy_link_up = 0, serdes_phy_type; - struct link_vars temp_vars; - struct bnx2x_phy *int_phy = ¶ms->phy[INT_PHY]; - - if (CHIP_IS_E3(bp)) { - u16 link_up; - if (params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)] - > SPEED_10000) { - /* Check 20G link */ - bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD, - 1, &link_up); - bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD, - 1, &link_up); - link_up &= (1<<2); - } else { - /* Check 10G link and below*/ - u8 lane = bnx2x_get_warpcore_lane(int_phy, params); - bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD, - MDIO_WC_REG_GP2_STATUS_GP_2_1, - &gp_status); - gp_status = ((gp_status >> 8) & 0xf) | - ((gp_status >> 12) & 0xf); - link_up = gp_status & (1 << lane); - } - if (!link_up) - return -ESRCH; - } else { - CL22_RD_OVER_CL45(bp, int_phy, - MDIO_REG_BANK_GP_STATUS, - MDIO_GP_STATUS_TOP_AN_STATUS1, - &gp_status); - /* link is up only if both local phy and external phy are up */ - if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)) - return -ESRCH; - } - /* In XGXS loopback mode, do not check external PHY */ - if (params->loopback_mode == LOOPBACK_XGXS) - return 0; - - switch (params->num_phys) { - case 1: - /* No external PHY */ - return 0; - case 2: - ext_phy_link_up = params->phy[EXT_PHY1].read_status( - ¶ms->phy[EXT_PHY1], - params, &temp_vars); - break; - case 3: /* Dual Media */ - for (phy_index = EXT_PHY1; phy_index < params->num_phys; - phy_index++) { - serdes_phy_type = ((params->phy[phy_index].media_type == - ETH_PHY_SFP_FIBER) || - (params->phy[phy_index].media_type == - ETH_PHY_XFP_FIBER) || - (params->phy[phy_index].media_type == - ETH_PHY_DA_TWINAX)); - - if (is_serdes != serdes_phy_type) - continue; - if (params->phy[phy_index].read_status) { - ext_phy_link_up |= - params->phy[phy_index].read_status( - ¶ms->phy[phy_index], - params, &temp_vars); - } - } - break; - } - if (ext_phy_link_up) - return 0; - return -ESRCH; -} - -static int bnx2x_link_initialize(struct link_params *params, - struct link_vars *vars) -{ - int rc = 0; - u8 phy_index, non_ext_phy; - struct bnx2x *bp = params->bp; - /* - * In case of external phy existence, the line speed would be the - * line speed linked up by the external phy. In case it is direct - * only, then the line_speed during initialization will be - * equal to the req_line_speed - */ - vars->line_speed = params->phy[INT_PHY].req_line_speed; - - /* - * Initialize the internal phy in case this is a direct board - * (no external phys), or this board has external phy which requires - * to first. - */ - if (!USES_WARPCORE(bp)) - bnx2x_prepare_xgxs(¶ms->phy[INT_PHY], params, vars); - /* init ext phy and enable link state int */ - non_ext_phy = (SINGLE_MEDIA_DIRECT(params) || - (params->loopback_mode == LOOPBACK_XGXS)); - - if (non_ext_phy || - (params->phy[EXT_PHY1].flags & FLAGS_INIT_XGXS_FIRST) || - (params->loopback_mode == LOOPBACK_EXT_PHY)) { - struct bnx2x_phy *phy = ¶ms->phy[INT_PHY]; - if (vars->line_speed == SPEED_AUTO_NEG && - (CHIP_IS_E1x(bp) || - CHIP_IS_E2(bp))) - bnx2x_set_parallel_detection(phy, params); - if (params->phy[INT_PHY].config_init) - params->phy[INT_PHY].config_init(phy, - params, - vars); - } - - /* Init external phy*/ - if (non_ext_phy) { - if (params->phy[INT_PHY].supported & - SUPPORTED_FIBRE) - vars->link_status |= LINK_STATUS_SERDES_LINK; - } else { - for (phy_index = EXT_PHY1; phy_index < params->num_phys; - phy_index++) { - /* - * No need to initialize second phy in case of first - * phy only selection. In case of second phy, we do - * need to initialize the first phy, since they are - * connected. - */ - if (params->phy[phy_index].supported & - SUPPORTED_FIBRE) - vars->link_status |= LINK_STATUS_SERDES_LINK; - - if (phy_index == EXT_PHY2 && - (bnx2x_phy_selection(params) == - PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) { - DP(NETIF_MSG_LINK, "Not initializing" - " second phy\n"); - continue; - } - params->phy[phy_index].config_init( - ¶ms->phy[phy_index], - params, vars); - } - } - /* Reset the interrupt indication after phy was initialized */ - bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + - params->port*4, - (NIG_STATUS_XGXS0_LINK10G | - NIG_STATUS_XGXS0_LINK_STATUS | - NIG_STATUS_SERDES0_LINK_STATUS | - NIG_MASK_MI_INT)); - bnx2x_update_mng(params, vars->link_status); - return rc; -} - -static void bnx2x_int_link_reset(struct bnx2x_phy *phy, - struct link_params *params) -{ - /* reset the SerDes/XGXS */ - REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, - (0x1ff << (params->port*16))); -} - -static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy, - struct link_params *params) -{ - struct bnx2x *bp = params->bp; - u8 gpio_port; - /* HW reset */ - if (CHIP_IS_E2(bp)) - gpio_port = BP_PATH(bp); - else - gpio_port = params->port; - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_LOW, - gpio_port); - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_LOW, - gpio_port); - DP(NETIF_MSG_LINK, "reset external PHY\n"); -} - -static int bnx2x_update_link_down(struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - u8 port = params->port; - - DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port); - bnx2x_set_led(params, vars, LED_MODE_OFF, 0); - vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG; - /* indicate no mac active */ - vars->mac_type = MAC_TYPE_NONE; - - /* update shared memory */ - vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK | - LINK_STATUS_LINK_UP | - LINK_STATUS_PHYSICAL_LINK_FLAG | - LINK_STATUS_AUTO_NEGOTIATE_COMPLETE | - LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK | - LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK | - LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK); - vars->line_speed = 0; - bnx2x_update_mng(params, vars->link_status); - - /* activate nig drain */ - REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); - - /* disable emac */ - if (!CHIP_IS_E3(bp)) - REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); - - msleep(10); - /* reset BigMac/Xmac */ - if (CHIP_IS_E1x(bp) || - CHIP_IS_E2(bp)) { - bnx2x_bmac_rx_disable(bp, params->port); - REG_WR(bp, GRCBASE_MISC + - MISC_REGISTERS_RESET_REG_2_CLEAR, - (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); - } - if (CHIP_IS_E3(bp)) - bnx2x_xmac_disable(params); - - return 0; -} - -static int bnx2x_update_link_up(struct link_params *params, - struct link_vars *vars, - u8 link_10g) -{ - struct bnx2x *bp = params->bp; - u8 port = params->port; - int rc = 0; - - vars->link_status |= (LINK_STATUS_LINK_UP | - LINK_STATUS_PHYSICAL_LINK_FLAG); - vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG; - - if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) - vars->link_status |= - LINK_STATUS_TX_FLOW_CONTROL_ENABLED; - - if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX) - vars->link_status |= - LINK_STATUS_RX_FLOW_CONTROL_ENABLED; - if (USES_WARPCORE(bp)) { - if (link_10g) { - if (bnx2x_xmac_enable(params, vars, 0) == - -ESRCH) { - DP(NETIF_MSG_LINK, "Found errors on XMAC\n"); - vars->link_up = 0; - vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; - vars->link_status &= ~LINK_STATUS_LINK_UP; - } - } else - bnx2x_umac_enable(params, vars, 0); - bnx2x_set_led(params, vars, - LED_MODE_OPER, vars->line_speed); - } - if ((CHIP_IS_E1x(bp) || - CHIP_IS_E2(bp))) { - if (link_10g) { - if (bnx2x_bmac_enable(params, vars, 0) == - -ESRCH) { - DP(NETIF_MSG_LINK, "Found errors on BMAC\n"); - vars->link_up = 0; - vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; - vars->link_status &= ~LINK_STATUS_LINK_UP; - } - - bnx2x_set_led(params, vars, - LED_MODE_OPER, SPEED_10000); - } else { - rc = bnx2x_emac_program(params, vars); - bnx2x_emac_enable(params, vars, 0); - - /* AN complete? */ - if ((vars->link_status & - LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) - && (!(vars->phy_flags & PHY_SGMII_FLAG)) && - SINGLE_MEDIA_DIRECT(params)) - bnx2x_set_gmii_tx_driver(params); - } - } - - /* PBF - link up */ - if (CHIP_IS_E1x(bp)) - rc |= bnx2x_pbf_update(params, vars->flow_ctrl, - vars->line_speed); - - /* disable drain */ - REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0); - - /* update shared memory */ - bnx2x_update_mng(params, vars->link_status); - msleep(20); - return rc; -} -/* - * The bnx2x_link_update function should be called upon link - * interrupt. - * Link is considered up as follows: - * - DIRECT_SINGLE_MEDIA - Only XGXS link (internal link) needs - * to be up - * - SINGLE_MEDIA - The link between the 577xx and the external - * phy (XGXS) need to up as well as the external link of the - * phy (PHY_EXT1) - * - DUAL_MEDIA - The link between the 577xx and the first - * external phy needs to be up, and at least one of the 2 - * external phy link must be up. - */ -int bnx2x_link_update(struct link_params *params, struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - struct link_vars phy_vars[MAX_PHYS]; - u8 port = params->port; - u8 link_10g_plus, phy_index; - u8 ext_phy_link_up = 0, cur_link_up; - int rc = 0; - u8 is_mi_int = 0; - u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed; - u8 active_external_phy = INT_PHY; - vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; - for (phy_index = INT_PHY; phy_index < params->num_phys; - phy_index++) { - phy_vars[phy_index].flow_ctrl = 0; - phy_vars[phy_index].link_status = 0; - phy_vars[phy_index].line_speed = 0; - phy_vars[phy_index].duplex = DUPLEX_FULL; - phy_vars[phy_index].phy_link_up = 0; - phy_vars[phy_index].link_up = 0; - phy_vars[phy_index].fault_detected = 0; - } - - if (USES_WARPCORE(bp)) - bnx2x_set_aer_mmd(params, ¶ms->phy[INT_PHY]); - - DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n", - port, (vars->phy_flags & PHY_XGXS_FLAG), - REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4)); - - is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + - port*0x18) > 0); - DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n", - REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), - is_mi_int, - REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c)); - - DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n", - REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), - REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)); - - /* disable emac */ - if (!CHIP_IS_E3(bp)) - REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); - - /* - * Step 1: - * Check external link change only for external phys, and apply - * priority selection between them in case the link on both phys - * is up. Note that instead of the common vars, a temporary - * vars argument is used since each phy may have different link/ - * speed/duplex result - */ - for (phy_index = EXT_PHY1; phy_index < params->num_phys; - phy_index++) { - struct bnx2x_phy *phy = ¶ms->phy[phy_index]; - if (!phy->read_status) - continue; - /* Read link status and params of this ext phy */ - cur_link_up = phy->read_status(phy, params, - &phy_vars[phy_index]); - if (cur_link_up) { - DP(NETIF_MSG_LINK, "phy in index %d link is up\n", - phy_index); - } else { - DP(NETIF_MSG_LINK, "phy in index %d link is down\n", - phy_index); - continue; - } - - if (!ext_phy_link_up) { - ext_phy_link_up = 1; - active_external_phy = phy_index; - } else { - switch (bnx2x_phy_selection(params)) { - case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: - case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: - /* - * In this option, the first PHY makes sure to pass the - * traffic through itself only. - * Its not clear how to reset the link on the second phy - */ - active_external_phy = EXT_PHY1; - break; - case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: - /* - * In this option, the first PHY makes sure to pass the - * traffic through the second PHY. - */ - active_external_phy = EXT_PHY2; - break; - default: - /* - * Link indication on both PHYs with the following cases - * is invalid: - * - FIRST_PHY means that second phy wasn't initialized, - * hence its link is expected to be down - * - SECOND_PHY means that first phy should not be able - * to link up by itself (using configuration) - * - DEFAULT should be overriden during initialiazation - */ - DP(NETIF_MSG_LINK, "Invalid link indication" - "mpc=0x%x. DISABLING LINK !!!\n", - params->multi_phy_config); - ext_phy_link_up = 0; - break; - } - } - } - prev_line_speed = vars->line_speed; - /* - * Step 2: - * Read the status of the internal phy. In case of - * DIRECT_SINGLE_MEDIA board, this link is the external link, - * otherwise this is the link between the 577xx and the first - * external phy - */ - if (params->phy[INT_PHY].read_status) - params->phy[INT_PHY].read_status( - ¶ms->phy[INT_PHY], - params, vars); - /* - * The INT_PHY flow control reside in the vars. This include the - * case where the speed or flow control are not set to AUTO. - * Otherwise, the active external phy flow control result is set - * to the vars. The ext_phy_line_speed is needed to check if the - * speed is different between the internal phy and external phy. - * This case may be result of intermediate link speed change. - */ - if (active_external_phy > INT_PHY) { - vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl; - /* - * Link speed is taken from the XGXS. AN and FC result from - * the external phy. - */ - vars->link_status |= phy_vars[active_external_phy].link_status; - - /* - * if active_external_phy is first PHY and link is up - disable - * disable TX on second external PHY - */ - if (active_external_phy == EXT_PHY1) { - if (params->phy[EXT_PHY2].phy_specific_func) { - DP(NETIF_MSG_LINK, "Disabling TX on" - " EXT_PHY2\n"); - params->phy[EXT_PHY2].phy_specific_func( - ¶ms->phy[EXT_PHY2], - params, DISABLE_TX); - } - } - - ext_phy_line_speed = phy_vars[active_external_phy].line_speed; - vars->duplex = phy_vars[active_external_phy].duplex; - if (params->phy[active_external_phy].supported & - SUPPORTED_FIBRE) - vars->link_status |= LINK_STATUS_SERDES_LINK; - else - vars->link_status &= ~LINK_STATUS_SERDES_LINK; - DP(NETIF_MSG_LINK, "Active external phy selected: %x\n", - active_external_phy); - } - - for (phy_index = EXT_PHY1; phy_index < params->num_phys; - phy_index++) { - if (params->phy[phy_index].flags & - FLAGS_REARM_LATCH_SIGNAL) { - bnx2x_rearm_latch_signal(bp, port, - phy_index == - active_external_phy); - break; - } - } - DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x," - " ext_phy_line_speed = %d\n", vars->flow_ctrl, - vars->link_status, ext_phy_line_speed); - /* - * Upon link speed change set the NIG into drain mode. Comes to - * deals with possible FIFO glitch due to clk change when speed - * is decreased without link down indicator - */ - - if (vars->phy_link_up) { - if (!(SINGLE_MEDIA_DIRECT(params)) && ext_phy_link_up && - (ext_phy_line_speed != vars->line_speed)) { - DP(NETIF_MSG_LINK, "Internal link speed %d is" - " different than the external" - " link speed %d\n", vars->line_speed, - ext_phy_line_speed); - vars->phy_link_up = 0; - } else if (prev_line_speed != vars->line_speed) { - REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, - 0); - msleep(1); - } - } - - /* anything 10 and over uses the bmac */ - link_10g_plus = (vars->line_speed >= SPEED_10000); - - bnx2x_link_int_ack(params, vars, link_10g_plus); - - /* - * In case external phy link is up, and internal link is down - * (not initialized yet probably after link initialization, it - * needs to be initialized. - * Note that after link down-up as result of cable plug, the xgxs - * link would probably become up again without the need - * initialize it - */ - if (!(SINGLE_MEDIA_DIRECT(params))) { - DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d," - " init_preceding = %d\n", ext_phy_link_up, - vars->phy_link_up, - params->phy[EXT_PHY1].flags & - FLAGS_INIT_XGXS_FIRST); - if (!(params->phy[EXT_PHY1].flags & - FLAGS_INIT_XGXS_FIRST) - && ext_phy_link_up && !vars->phy_link_up) { - vars->line_speed = ext_phy_line_speed; - if (vars->line_speed < SPEED_1000) - vars->phy_flags |= PHY_SGMII_FLAG; - else - vars->phy_flags &= ~PHY_SGMII_FLAG; - - if (params->phy[INT_PHY].config_init) - params->phy[INT_PHY].config_init( - ¶ms->phy[INT_PHY], params, - vars); - } - } - /* - * Link is up only if both local phy and external phy (in case of - * non-direct board) are up and no fault detected on active PHY. - */ - vars->link_up = (vars->phy_link_up && - (ext_phy_link_up || - SINGLE_MEDIA_DIRECT(params)) && - (phy_vars[active_external_phy].fault_detected == 0)); - - if (vars->link_up) - rc = bnx2x_update_link_up(params, vars, link_10g_plus); - else - rc = bnx2x_update_link_down(params, vars); - - return rc; -} - - -/*****************************************************************************/ -/* External Phy section */ -/*****************************************************************************/ -void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port) -{ - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_LOW, port); - msleep(1); - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); -} - -static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port, - u32 spirom_ver, u32 ver_addr) -{ - DP(NETIF_MSG_LINK, "FW version 0x%x:0x%x for port %d\n", - (u16)(spirom_ver>>16), (u16)spirom_ver, port); - - if (ver_addr) - REG_WR(bp, ver_addr, spirom_ver); -} - -static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp, - struct bnx2x_phy *phy, - u8 port) -{ - u16 fw_ver1, fw_ver2; - - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_ROM_VER1, &fw_ver1); - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_ROM_VER2, &fw_ver2); - bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2), - phy->ver_addr); -} - -static void bnx2x_ext_phy_10G_an_resolve(struct bnx2x *bp, - struct bnx2x_phy *phy, - struct link_vars *vars) -{ - u16 val; - bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, - MDIO_AN_REG_STATUS, &val); - bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, - MDIO_AN_REG_STATUS, &val); - if (val & (1<<5)) - vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; - if ((val & (1<<0)) == 0) - vars->link_status |= LINK_STATUS_PARALLEL_DETECTION_USED; -} - -/******************************************************************/ -/* common BCM8073/BCM8727 PHY SECTION */ -/******************************************************************/ -static void bnx2x_8073_resolve_fc(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - if (phy->req_line_speed == SPEED_10 || - phy->req_line_speed == SPEED_100) { - vars->flow_ctrl = phy->req_flow_ctrl; - return; - } - - if (bnx2x_ext_phy_resolve_fc(phy, params, vars) && - (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE)) { - u16 pause_result; - u16 ld_pause; /* local */ - u16 lp_pause; /* link partner */ - bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, - MDIO_AN_REG_CL37_FC_LD, &ld_pause); - - bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, - MDIO_AN_REG_CL37_FC_LP, &lp_pause); - pause_result = (ld_pause & - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5; - pause_result |= (lp_pause & - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7; - - bnx2x_pause_resolve(vars, pause_result); - DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n", - pause_result); - } -} -static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp, - struct bnx2x_phy *phy, - u8 port) -{ - u32 count = 0; - u16 fw_ver1, fw_msgout; - int rc = 0; - - /* Boot port from external ROM */ - /* EDC grst */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_GEN_CTRL, - 0x0001); - - /* ucode reboot and rst */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_GEN_CTRL, - 0x008c); - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_MISC_CTRL1, 0x0001); - - /* Reset internal microprocessor */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_GEN_CTRL, - MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); - - /* Release srst bit */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_GEN_CTRL, - MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); - - /* Delay 100ms per the PHY specifications */ - msleep(100); - - /* 8073 sometimes taking longer to download */ - do { - count++; - if (count > 300) { - DP(NETIF_MSG_LINK, - "bnx2x_8073_8727_external_rom_boot port %x:" - "Download failed. fw version = 0x%x\n", - port, fw_ver1); - rc = -EINVAL; - break; - } - - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_ROM_VER1, &fw_ver1); - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout); - - msleep(1); - } while (fw_ver1 == 0 || fw_ver1 == 0x4321 || - ((fw_msgout & 0xff) != 0x03 && (phy->type == - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))); - - /* Clear ser_boot_ctl bit */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_MISC_CTRL1, 0x0000); - bnx2x_save_bcm_spirom_ver(bp, phy, port); - - DP(NETIF_MSG_LINK, - "bnx2x_8073_8727_external_rom_boot port %x:" - "Download complete. fw version = 0x%x\n", - port, fw_ver1); - - return rc; -} - -/******************************************************************/ -/* BCM8073 PHY SECTION */ -/******************************************************************/ -static int bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy) -{ - /* This is only required for 8073A1, version 102 only */ - u16 val; - - /* Read 8073 HW revision*/ - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8073_CHIP_REV, &val); - - if (val != 1) { - /* No need to workaround in 8073 A1 */ - return 0; - } - - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_ROM_VER2, &val); - - /* SNR should be applied only for version 0x102 */ - if (val != 0x102) - return 0; - - return 1; -} - -static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy) -{ - u16 val, cnt, cnt1 ; - - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8073_CHIP_REV, &val); - - if (val > 0) { - /* No need to workaround in 8073 A1 */ - return 0; - } - /* XAUI workaround in 8073 A0: */ - - /* - * After loading the boot ROM and restarting Autoneg, poll - * Dev1, Reg $C820: - */ - - for (cnt = 0; cnt < 1000; cnt++) { - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8073_SPEED_LINK_STATUS, - &val); - /* - * If bit [14] = 0 or bit [13] = 0, continue on with - * system initialization (XAUI work-around not required, as - * these bits indicate 2.5G or 1G link up). - */ - if (!(val & (1<<14)) || !(val & (1<<13))) { - DP(NETIF_MSG_LINK, "XAUI work-around not required\n"); - return 0; - } else if (!(val & (1<<15))) { - DP(NETIF_MSG_LINK, "bit 15 went off\n"); - /* - * If bit 15 is 0, then poll Dev1, Reg $C841 until it's - * MSB (bit15) goes to 1 (indicating that the XAUI - * workaround has completed), then continue on with - * system initialization. - */ - for (cnt1 = 0; cnt1 < 1000; cnt1++) { - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8073_XAUI_WA, &val); - if (val & (1<<15)) { - DP(NETIF_MSG_LINK, - "XAUI workaround has completed\n"); - return 0; - } - msleep(3); - } - break; - } - msleep(3); - } - DP(NETIF_MSG_LINK, "Warning: XAUI work-around timeout !!!\n"); - return -EINVAL; -} - -static void bnx2x_807x_force_10G(struct bnx2x *bp, struct bnx2x_phy *phy) -{ - /* Force KR or KX */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0x000b); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0000); - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000); -} - -static void bnx2x_8073_set_pause_cl37(struct link_params *params, - struct bnx2x_phy *phy, - struct link_vars *vars) -{ - u16 cl37_val; - struct bnx2x *bp = params->bp; - bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &cl37_val); - - cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; - /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ - bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); - if ((vars->ieee_fc & - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) == - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) { - cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC; - } - if ((vars->ieee_fc & - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { - cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; - } - if ((vars->ieee_fc & - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) { - cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; - } - DP(NETIF_MSG_LINK, - "Ext phy AN advertize cl37 0x%x\n", cl37_val); - - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, cl37_val); - msleep(500); -} - -static int bnx2x_8073_config_init(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - u16 val = 0, tmp1; - u8 gpio_port; - DP(NETIF_MSG_LINK, "Init 8073\n"); - - if (CHIP_IS_E2(bp)) - gpio_port = BP_PATH(bp); - else - gpio_port = params->port; - /* Restore normal power mode*/ - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); - - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); - - /* enable LASI */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2)); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0004); - - bnx2x_8073_set_pause_cl37(params, phy, vars); - - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1); - - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1); - - DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1); - - /* Swap polarity if required - Must be done only in non-1G mode */ - if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) { - /* Configure the 8073 to swap _P and _N of the KR lines */ - DP(NETIF_MSG_LINK, "Swapping polarity for the 8073\n"); - /* 10G Rx/Tx and 1G Tx signal polarity swap */ - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, &val); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, - (val | (3<<9))); - } - - - /* Enable CL37 BAM */ - if (REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, dev_info. - port_hw_config[params->port].default_cfg)) & - PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) { - - bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, - MDIO_AN_REG_8073_BAM, &val); - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, - MDIO_AN_REG_8073_BAM, val | 1); - DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n"); - } - if (params->loopback_mode == LOOPBACK_EXT) { - bnx2x_807x_force_10G(bp, phy); - DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n"); - return 0; - } else { - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0002); - } - if (phy->req_line_speed != SPEED_AUTO_NEG) { - if (phy->req_line_speed == SPEED_10000) { - val = (1<<7); - } else if (phy->req_line_speed == SPEED_2500) { - val = (1<<5); - /* - * Note that 2.5G works only when used with 1G - * advertisement - */ - } else - val = (1<<5); - } else { - val = 0; - if (phy->speed_cap_mask & - PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) - val |= (1<<7); - - /* Note that 2.5G works only when used with 1G advertisement */ - if (phy->speed_cap_mask & - (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G | - PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) - val |= (1<<5); - DP(NETIF_MSG_LINK, "807x autoneg val = 0x%x\n", val); - } - - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV, val); - bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, &tmp1); - - if (((phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) && - (phy->req_line_speed == SPEED_AUTO_NEG)) || - (phy->req_line_speed == SPEED_2500)) { - u16 phy_ver; - /* Allow 2.5G for A1 and above */ - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, - &phy_ver); - DP(NETIF_MSG_LINK, "Add 2.5G\n"); - if (phy_ver > 0) - tmp1 |= 1; - else - tmp1 &= 0xfffe; - } else { - DP(NETIF_MSG_LINK, "Disable 2.5G\n"); - tmp1 &= 0xfffe; - } - - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, tmp1); - /* Add support for CL37 (passive mode) II */ - - bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &tmp1); - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, - (tmp1 | ((phy->req_duplex == DUPLEX_FULL) ? - 0x20 : 0x40))); - - /* Add support for CL37 (passive mode) III */ - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); - - /* - * The SNR will improve about 2db by changing BW and FEE main - * tap. Rest commands are executed after link is up - * Change FFE main cursor to 5 in EDC register - */ - if (bnx2x_8073_is_snr_needed(bp, phy)) - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN, - 0xFB0C); - - /* Enable FEC (Forware Error Correction) Request in the AN */ - bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, &tmp1); - tmp1 |= (1<<15); - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, tmp1); - - bnx2x_ext_phy_set_pause(params, phy, vars); - - /* Restart autoneg */ - msleep(500); - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); - DP(NETIF_MSG_LINK, "807x Autoneg Restart: Advertise 1G=%x, 10G=%x\n", - ((val & (1<<5)) > 0), ((val & (1<<7)) > 0)); - return 0; -} - -static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - u8 link_up = 0; - u16 val1, val2; - u16 link_status = 0; - u16 an1000_status = 0; - - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); - - DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1); - - /* clear the interrupt LASI status register */ - bnx2x_cl45_read(bp, phy, - MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2); - bnx2x_cl45_read(bp, phy, - MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val1); - DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n", val2, val1); - /* Clear MSG-OUT */ - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1); - - /* Check the LASI */ - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2); - - DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2); - - /* Check the link status */ - bnx2x_cl45_read(bp, phy, - MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2); - DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2); - - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2); - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1); - link_up = ((val1 & 4) == 4); - DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1); - - if (link_up && - ((phy->req_line_speed != SPEED_10000))) { - if (bnx2x_8073_xaui_wa(bp, phy) != 0) - return 0; - } - bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status); - bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status); - - /* Check the link status on 1.1.2 */ - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2); - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1); - DP(NETIF_MSG_LINK, "KR PMA status 0x%x->0x%x," - "an_link_status=0x%x\n", val2, val1, an1000_status); - - link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1))); - if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) { - /* - * The SNR will improve about 2dbby changing the BW and FEE main - * tap. The 1st write to change FFE main tap is set before - * restart AN. Change PLL Bandwidth in EDC register - */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH, - 0x26BC); - - /* Change CDR Bandwidth in EDC register */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_CDR_BANDWIDTH, - 0x0333); - } - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_SPEED_LINK_STATUS, - &link_status); - - /* Bits 0..2 --> speed detected, bits 13..15--> link is down */ - if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) { - link_up = 1; - vars->line_speed = SPEED_10000; - DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n", - params->port); - } else if ((link_status & (1<<1)) && (!(link_status & (1<<14)))) { - link_up = 1; - vars->line_speed = SPEED_2500; - DP(NETIF_MSG_LINK, "port %x: External link up in 2.5G\n", - params->port); - } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) { - link_up = 1; - vars->line_speed = SPEED_1000; - DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n", - params->port); - } else { - link_up = 0; - DP(NETIF_MSG_LINK, "port %x: External link is down\n", - params->port); - } - - if (link_up) { - /* Swap polarity if required */ - if (params->lane_config & - PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) { - /* Configure the 8073 to swap P and N of the KR lines */ - bnx2x_cl45_read(bp, phy, - MDIO_XS_DEVAD, - MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1); - /* - * Set bit 3 to invert Rx in 1G mode and clear this bit - * when it`s in 10G mode. - */ - if (vars->line_speed == SPEED_1000) { - DP(NETIF_MSG_LINK, "Swapping 1G polarity for" - "the 8073\n"); - val1 |= (1<<3); - } else - val1 &= ~(1<<3); - - bnx2x_cl45_write(bp, phy, - MDIO_XS_DEVAD, - MDIO_XS_REG_8073_RX_CTRL_PCIE, - val1); - } - bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); - bnx2x_8073_resolve_fc(phy, params, vars); - vars->duplex = DUPLEX_FULL; - } - return link_up; -} - -static void bnx2x_8073_link_reset(struct bnx2x_phy *phy, - struct link_params *params) -{ - struct bnx2x *bp = params->bp; - u8 gpio_port; - if (CHIP_IS_E2(bp)) - gpio_port = BP_PATH(bp); - else - gpio_port = params->port; - DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n", - gpio_port); - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_LOW, - gpio_port); -} - -/******************************************************************/ -/* BCM8705 PHY SECTION */ -/******************************************************************/ -static int bnx2x_8705_config_init(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - DP(NETIF_MSG_LINK, "init 8705\n"); - /* Restore normal power mode*/ - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); - /* HW reset */ - bnx2x_ext_phy_hw_reset(bp, params->port); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040); - bnx2x_wait_reset_complete(bp, phy, params); - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, 0x7fbf); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_CMU_PLL_BYPASS, 0x0100); - bnx2x_cl45_write(bp, phy, - MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_CNTL, 0x1); - /* BCM8705 doesn't have microcode, hence the 0 */ - bnx2x_save_spirom_version(bp, params->port, params->shmem_base, 0); - return 0; -} - -static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - u8 link_up = 0; - u16 val1, rx_sd; - struct bnx2x *bp = params->bp; - DP(NETIF_MSG_LINK, "read status 8705\n"); - bnx2x_cl45_read(bp, phy, - MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1); - DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1); - - bnx2x_cl45_read(bp, phy, - MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1); - DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1); - - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd); - - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, 0xc809, &val1); - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, 0xc809, &val1); - - DP(NETIF_MSG_LINK, "8705 1.c809 val=0x%x\n", val1); - link_up = ((rx_sd & 0x1) && (val1 & (1<<9)) && ((val1 & (1<<8)) == 0)); - if (link_up) { - vars->line_speed = SPEED_10000; - bnx2x_ext_phy_resolve_fc(phy, params, vars); - } - return link_up; -} - -/******************************************************************/ -/* SFP+ module Section */ -/******************************************************************/ -static void bnx2x_set_disable_pmd_transmit(struct link_params *params, - struct bnx2x_phy *phy, - u8 pmd_dis) -{ - struct bnx2x *bp = params->bp; - /* - * Disable transmitter only for bootcodes which can enable it afterwards - * (for D3 link) - */ - if (pmd_dis) { - if (params->feature_config_flags & - FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED) - DP(NETIF_MSG_LINK, "Disabling PMD transmitter\n"); - else { - DP(NETIF_MSG_LINK, "NOT disabling PMD transmitter\n"); - return; - } - } else - DP(NETIF_MSG_LINK, "Enabling PMD transmitter\n"); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_TX_DISABLE, pmd_dis); -} - -static u8 bnx2x_get_gpio_port(struct link_params *params) -{ - u8 gpio_port; - u32 swap_val, swap_override; - struct bnx2x *bp = params->bp; - if (CHIP_IS_E2(bp)) - gpio_port = BP_PATH(bp); - else - gpio_port = params->port; - swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); - swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); - return gpio_port ^ (swap_val && swap_override); -} - -static void bnx2x_sfp_e1e2_set_transmitter(struct link_params *params, - struct bnx2x_phy *phy, - u8 tx_en) -{ - u16 val; - u8 port = params->port; - struct bnx2x *bp = params->bp; - u32 tx_en_mode; - - /* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/ - tx_en_mode = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[port].sfp_ctrl)) & - PORT_HW_CFG_TX_LASER_MASK; - DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x " - "mode = %x\n", tx_en, port, tx_en_mode); - switch (tx_en_mode) { - case PORT_HW_CFG_TX_LASER_MDIO: - - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_PHY_IDENTIFIER, - &val); - - if (tx_en) - val &= ~(1<<15); - else - val |= (1<<15); - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_PHY_IDENTIFIER, - val); - break; - case PORT_HW_CFG_TX_LASER_GPIO0: - case PORT_HW_CFG_TX_LASER_GPIO1: - case PORT_HW_CFG_TX_LASER_GPIO2: - case PORT_HW_CFG_TX_LASER_GPIO3: - { - u16 gpio_pin; - u8 gpio_port, gpio_mode; - if (tx_en) - gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_HIGH; - else - gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_LOW; - - gpio_pin = tx_en_mode - PORT_HW_CFG_TX_LASER_GPIO0; - gpio_port = bnx2x_get_gpio_port(params); - bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port); - break; - } - default: - DP(NETIF_MSG_LINK, "Invalid TX_LASER_MDIO 0x%x\n", tx_en_mode); - break; - } -} - -static void bnx2x_sfp_set_transmitter(struct link_params *params, - struct bnx2x_phy *phy, - u8 tx_en) -{ - struct bnx2x *bp = params->bp; - DP(NETIF_MSG_LINK, "Setting SFP+ transmitter to %d\n", tx_en); - if (CHIP_IS_E3(bp)) - bnx2x_sfp_e3_set_transmitter(params, phy, tx_en); - else - bnx2x_sfp_e1e2_set_transmitter(params, phy, tx_en); -} - -static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, - struct link_params *params, - u16 addr, u8 byte_cnt, u8 *o_buf) -{ - struct bnx2x *bp = params->bp; - u16 val = 0; - u16 i; - if (byte_cnt > 16) { - DP(NETIF_MSG_LINK, "Reading from eeprom is" - " is limited to 0xf\n"); - return -EINVAL; - } - /* Set the read command byte count */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, - (byte_cnt | 0xa000)); - - /* Set the read command address */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR, - addr); - - /* Activate read command */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, - 0x2c0f); - - /* Wait up to 500us for command complete status */ - for (i = 0; i < 100; i++) { - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); - if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == - MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) - break; - udelay(5); - } - - if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) != - MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) { - DP(NETIF_MSG_LINK, - "Got bad status 0x%x when reading from SFP+ EEPROM\n", - (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK)); - return -EINVAL; - } - - /* Read the buffer */ - for (i = 0; i < byte_cnt; i++) { - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val); - o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK); - } - - for (i = 0; i < 100; i++) { - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); - if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == - MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) - return 0; - msleep(1); - } - return -EINVAL; -} - -static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, - struct link_params *params, - u16 addr, u8 byte_cnt, - u8 *o_buf) -{ - int rc = 0; - u8 i, j = 0, cnt = 0; - u32 data_array[4]; - u16 addr32; - struct bnx2x *bp = params->bp; - /*DP(NETIF_MSG_LINK, "bnx2x_direct_read_sfp_module_eeprom:" - " addr %d, cnt %d\n", - addr, byte_cnt);*/ - if (byte_cnt > 16) { - DP(NETIF_MSG_LINK, "Reading from eeprom is" - " is limited to 16 bytes\n"); - return -EINVAL; - } - - /* 4 byte aligned address */ - addr32 = addr & (~0x3); - do { - rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt, - data_array); - } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT)); - - if (rc == 0) { - for (i = (addr - addr32); i < byte_cnt + (addr - addr32); i++) { - o_buf[j] = *((u8 *)data_array + i); - j++; - } - } - - return rc; -} - -static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, - struct link_params *params, - u16 addr, u8 byte_cnt, u8 *o_buf) -{ - struct bnx2x *bp = params->bp; - u16 val, i; - - if (byte_cnt > 16) { - DP(NETIF_MSG_LINK, "Reading from eeprom is" - " is limited to 0xf\n"); - return -EINVAL; - } - - /* Need to read from 1.8000 to clear it */ - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, - &val); - - /* Set the read command byte count */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, - ((byte_cnt < 2) ? 2 : byte_cnt)); - - /* Set the read command address */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR, - addr); - /* Set the destination address */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - 0x8004, - MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF); - - /* Activate read command */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, - 0x8002); - /* - * Wait appropriate time for two-wire command to finish before - * polling the status register - */ - msleep(1); - - /* Wait up to 500us for command complete status */ - for (i = 0; i < 100; i++) { - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); - if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == - MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) - break; - udelay(5); - } - - if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) != - MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) { - DP(NETIF_MSG_LINK, - "Got bad status 0x%x when reading from SFP+ EEPROM\n", - (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK)); - return -EFAULT; - } - - /* Read the buffer */ - for (i = 0; i < byte_cnt; i++) { - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val); - o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK); - } - - for (i = 0; i < 100; i++) { - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); - if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == - MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) - return 0; - msleep(1); - } - - return -EINVAL; -} - -int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, - struct link_params *params, u16 addr, - u8 byte_cnt, u8 *o_buf) -{ - int rc = -EINVAL; - switch (phy->type) { - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: - rc = bnx2x_8726_read_sfp_module_eeprom(phy, params, addr, - byte_cnt, o_buf); - break; - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: - rc = bnx2x_8727_read_sfp_module_eeprom(phy, params, addr, - byte_cnt, o_buf); - break; - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: - rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr, - byte_cnt, o_buf); - break; - } - return rc; -} - -static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, - struct link_params *params, - u16 *edc_mode) -{ - struct bnx2x *bp = params->bp; - u32 sync_offset = 0, phy_idx, media_types; - u8 val, check_limiting_mode = 0; - *edc_mode = EDC_MODE_LIMITING; - - phy->media_type = ETH_PHY_UNSPECIFIED; - /* First check for copper cable */ - if (bnx2x_read_sfp_module_eeprom(phy, - params, - SFP_EEPROM_CON_TYPE_ADDR, - 1, - &val) != 0) { - DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n"); - return -EINVAL; - } - - switch (val) { - case SFP_EEPROM_CON_TYPE_VAL_COPPER: - { - u8 copper_module_type; - phy->media_type = ETH_PHY_DA_TWINAX; - /* - * Check if its active cable (includes SFP+ module) - * of passive cable - */ - if (bnx2x_read_sfp_module_eeprom(phy, - params, - SFP_EEPROM_FC_TX_TECH_ADDR, - 1, - &copper_module_type) != 0) { - DP(NETIF_MSG_LINK, - "Failed to read copper-cable-type" - " from SFP+ EEPROM\n"); - return -EINVAL; - } - - if (copper_module_type & - SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) { - DP(NETIF_MSG_LINK, "Active Copper cable detected\n"); - check_limiting_mode = 1; - } else if (copper_module_type & - SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { - DP(NETIF_MSG_LINK, "Passive Copper" - " cable detected\n"); - *edc_mode = - EDC_MODE_PASSIVE_DAC; - } else { - DP(NETIF_MSG_LINK, "Unknown copper-cable-" - "type 0x%x !!!\n", copper_module_type); - return -EINVAL; - } - break; - } - case SFP_EEPROM_CON_TYPE_VAL_LC: - phy->media_type = ETH_PHY_SFP_FIBER; - DP(NETIF_MSG_LINK, "Optic module detected\n"); - check_limiting_mode = 1; - break; - default: - DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n", - val); - return -EINVAL; - } - sync_offset = params->shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[params->port].media_type); - media_types = REG_RD(bp, sync_offset); - /* Update media type for non-PMF sync */ - for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) { - if (&(params->phy[phy_idx]) == phy) { - media_types &= ~(PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK << - (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx)); - media_types |= ((phy->media_type & - PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) << - (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx)); - break; - } - } - REG_WR(bp, sync_offset, media_types); - if (check_limiting_mode) { - u8 options[SFP_EEPROM_OPTIONS_SIZE]; - if (bnx2x_read_sfp_module_eeprom(phy, - params, - SFP_EEPROM_OPTIONS_ADDR, - SFP_EEPROM_OPTIONS_SIZE, - options) != 0) { - DP(NETIF_MSG_LINK, "Failed to read Option" - " field from module EEPROM\n"); - return -EINVAL; - } - if ((options[0] & SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK)) - *edc_mode = EDC_MODE_LINEAR; - else - *edc_mode = EDC_MODE_LIMITING; - } - DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode); - return 0; -} -/* - * This function read the relevant field from the module (SFP+), and verify it - * is compliant with this board - */ -static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy, - struct link_params *params) -{ - struct bnx2x *bp = params->bp; - u32 val, cmd; - u32 fw_resp, fw_cmd_param; - char vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE+1]; - char vendor_pn[SFP_EEPROM_PART_NO_SIZE+1]; - phy->flags &= ~FLAGS_SFP_NOT_APPROVED; - val = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, dev_info. - port_feature_config[params->port].config)); - if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == - PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT) { - DP(NETIF_MSG_LINK, "NOT enforcing module verification\n"); - return 0; - } - - if (params->feature_config_flags & - FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY) { - /* Use specific phy request */ - cmd = DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL; - } else if (params->feature_config_flags & - FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY) { - /* Use first phy request only in case of non-dual media*/ - if (DUAL_MEDIA(params)) { - DP(NETIF_MSG_LINK, "FW does not support OPT MDL " - "verification\n"); - return -EINVAL; - } - cmd = DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL; - } else { - /* No support in OPT MDL detection */ - DP(NETIF_MSG_LINK, "FW does not support OPT MDL " - "verification\n"); - return -EINVAL; - } - - fw_cmd_param = FW_PARAM_SET(phy->addr, phy->type, phy->mdio_ctrl); - fw_resp = bnx2x_fw_command(bp, cmd, fw_cmd_param); - if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) { - DP(NETIF_MSG_LINK, "Approved module\n"); - return 0; - } - - /* format the warning message */ - if (bnx2x_read_sfp_module_eeprom(phy, - params, - SFP_EEPROM_VENDOR_NAME_ADDR, - SFP_EEPROM_VENDOR_NAME_SIZE, - (u8 *)vendor_name)) - vendor_name[0] = '\0'; - else - vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0'; - if (bnx2x_read_sfp_module_eeprom(phy, - params, - SFP_EEPROM_PART_NO_ADDR, - SFP_EEPROM_PART_NO_SIZE, - (u8 *)vendor_pn)) - vendor_pn[0] = '\0'; - else - vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0'; - - netdev_err(bp->dev, "Warning: Unqualified SFP+ module detected," - " Port %d from %s part number %s\n", - params->port, vendor_name, vendor_pn); - phy->flags |= FLAGS_SFP_NOT_APPROVED; - return -EINVAL; -} - -static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy, - struct link_params *params) - -{ - u8 val; - struct bnx2x *bp = params->bp; - u16 timeout; - /* - * Initialization time after hot-plug may take up to 300ms for - * some phys type ( e.g. JDSU ) - */ - - for (timeout = 0; timeout < 60; timeout++) { - if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val) - == 0) { - DP(NETIF_MSG_LINK, "SFP+ module initialization " - "took %d ms\n", timeout * 5); - return 0; - } - msleep(5); - } - return -EINVAL; -} - -static void bnx2x_8727_power_module(struct bnx2x *bp, - struct bnx2x_phy *phy, - u8 is_power_up) { - /* Make sure GPIOs are not using for LED mode */ - u16 val; - /* - * In the GPIO register, bit 4 is use to determine if the GPIOs are - * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for - * output - * Bits 0-1 determine the GPIOs value for OUTPUT in case bit 4 val is 0 - * Bits 8-9 determine the GPIOs value for INPUT in case bit 4 val is 1 - * where the 1st bit is the over-current(only input), and 2nd bit is - * for power( only output ) - * - * In case of NOC feature is disabled and power is up, set GPIO control - * as input to enable listening of over-current indication - */ - if (phy->flags & FLAGS_NOC) - return; - if (is_power_up) - val = (1<<4); - else - /* - * Set GPIO control to OUTPUT, and set the power bit - * to according to the is_power_up - */ - val = (1<<1); - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8727_GPIO_CTRL, - val); -} - -static int bnx2x_8726_set_limiting_mode(struct bnx2x *bp, - struct bnx2x_phy *phy, - u16 edc_mode) -{ - u16 cur_limiting_mode; - - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_ROM_VER2, - &cur_limiting_mode); - DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n", - cur_limiting_mode); - - if (edc_mode == EDC_MODE_LIMITING) { - DP(NETIF_MSG_LINK, "Setting LIMITING MODE\n"); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_ROM_VER2, - EDC_MODE_LIMITING); - } else { /* LRM mode ( default )*/ - - DP(NETIF_MSG_LINK, "Setting LRM MODE\n"); - - /* - * Changing to LRM mode takes quite few seconds. So do it only - * if current mode is limiting (default is LRM) - */ - if (cur_limiting_mode != EDC_MODE_LIMITING) - return 0; - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_LRM_MODE, - 0); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_ROM_VER2, - 0x128); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_MISC_CTRL0, - 0x4008); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_LRM_MODE, - 0xaaaa); - } - return 0; -} - -static int bnx2x_8727_set_limiting_mode(struct bnx2x *bp, - struct bnx2x_phy *phy, - u16 edc_mode) -{ - u16 phy_identifier; - u16 rom_ver2_val; - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_PHY_IDENTIFIER, - &phy_identifier); - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_PHY_IDENTIFIER, - (phy_identifier & ~(1<<9))); - - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_ROM_VER2, - &rom_ver2_val); - /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_ROM_VER2, - (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff)); - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_PHY_IDENTIFIER, - (phy_identifier | (1<<9))); - - return 0; -} - -static void bnx2x_8727_specific_func(struct bnx2x_phy *phy, - struct link_params *params, - u32 action) -{ - struct bnx2x *bp = params->bp; - - switch (action) { - case DISABLE_TX: - bnx2x_sfp_set_transmitter(params, phy, 0); - break; - case ENABLE_TX: - if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) - bnx2x_sfp_set_transmitter(params, phy, 1); - break; - default: - DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n", - action); - return; - } -} - -static void bnx2x_set_e1e2_module_fault_led(struct link_params *params, - u8 gpio_mode) -{ - struct bnx2x *bp = params->bp; - - u32 fault_led_gpio = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[params->port].sfp_ctrl)) & - PORT_HW_CFG_FAULT_MODULE_LED_MASK; - switch (fault_led_gpio) { - case PORT_HW_CFG_FAULT_MODULE_LED_DISABLED: - return; - case PORT_HW_CFG_FAULT_MODULE_LED_GPIO0: - case PORT_HW_CFG_FAULT_MODULE_LED_GPIO1: - case PORT_HW_CFG_FAULT_MODULE_LED_GPIO2: - case PORT_HW_CFG_FAULT_MODULE_LED_GPIO3: - { - u8 gpio_port = bnx2x_get_gpio_port(params); - u16 gpio_pin = fault_led_gpio - - PORT_HW_CFG_FAULT_MODULE_LED_GPIO0; - DP(NETIF_MSG_LINK, "Set fault module-detected led " - "pin %x port %x mode %x\n", - gpio_pin, gpio_port, gpio_mode); - bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port); - } - break; - default: - DP(NETIF_MSG_LINK, "Error: Invalid fault led mode 0x%x\n", - fault_led_gpio); - } -} - -static void bnx2x_set_e3_module_fault_led(struct link_params *params, - u8 gpio_mode) -{ - u32 pin_cfg; - u8 port = params->port; - struct bnx2x *bp = params->bp; - pin_cfg = (REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[port].e3_sfp_ctrl)) & - PORT_HW_CFG_E3_FAULT_MDL_LED_MASK) >> - PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT; - DP(NETIF_MSG_LINK, "Setting Fault LED to %d using pin cfg %d\n", - gpio_mode, pin_cfg); - bnx2x_set_cfg_pin(bp, pin_cfg, gpio_mode); -} - -static void bnx2x_set_sfp_module_fault_led(struct link_params *params, - u8 gpio_mode) -{ - struct bnx2x *bp = params->bp; - DP(NETIF_MSG_LINK, "Setting SFP+ module fault LED to %d\n", gpio_mode); - if (CHIP_IS_E3(bp)) { - /* - * Low ==> if SFP+ module is supported otherwise - * High ==> if SFP+ module is not on the approved vendor list - */ - bnx2x_set_e3_module_fault_led(params, gpio_mode); - } else - bnx2x_set_e1e2_module_fault_led(params, gpio_mode); -} - -static void bnx2x_warpcore_power_module(struct link_params *params, - struct bnx2x_phy *phy, - u8 power) -{ - u32 pin_cfg; - struct bnx2x *bp = params->bp; - - pin_cfg = (REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[params->port].e3_sfp_ctrl)) & - PORT_HW_CFG_E3_PWR_DIS_MASK) >> - PORT_HW_CFG_E3_PWR_DIS_SHIFT; - - if (pin_cfg == PIN_CFG_NA) - return; - DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n", - power, pin_cfg); - /* - * Low ==> corresponding SFP+ module is powered - * high ==> the SFP+ module is powered down - */ - bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1); -} - -static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy, - struct link_params *params) -{ - bnx2x_warpcore_power_module(params, phy, 0); -} - -static void bnx2x_power_sfp_module(struct link_params *params, - struct bnx2x_phy *phy, - u8 power) -{ - struct bnx2x *bp = params->bp; - DP(NETIF_MSG_LINK, "Setting SFP+ power to %x\n", power); - - switch (phy->type) { - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: - bnx2x_8727_power_module(params->bp, phy, power); - break; - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: - bnx2x_warpcore_power_module(params, phy, power); - break; - default: - break; - } -} -static void bnx2x_warpcore_set_limiting_mode(struct link_params *params, - struct bnx2x_phy *phy, - u16 edc_mode) -{ - u16 val = 0; - u16 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT; - struct bnx2x *bp = params->bp; - - u8 lane = bnx2x_get_warpcore_lane(phy, params); - /* This is a global register which controls all lanes */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val); - val &= ~(0xf << (lane << 2)); - - switch (edc_mode) { - case EDC_MODE_LINEAR: - case EDC_MODE_LIMITING: - mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT; - break; - case EDC_MODE_PASSIVE_DAC: - mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC; - break; - default: - break; - } - - val |= (mode << (lane << 2)); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, val); - /* A must read */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val); - - /* Restart microcode to re-read the new mode */ - bnx2x_warpcore_reset_lane(bp, phy, 1); - bnx2x_warpcore_reset_lane(bp, phy, 0); - -} - -static void bnx2x_set_limiting_mode(struct link_params *params, - struct bnx2x_phy *phy, - u16 edc_mode) -{ - switch (phy->type) { - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: - bnx2x_8726_set_limiting_mode(params->bp, phy, edc_mode); - break; - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: - bnx2x_8727_set_limiting_mode(params->bp, phy, edc_mode); - break; - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: - bnx2x_warpcore_set_limiting_mode(params, phy, edc_mode); - break; - } -} - -int bnx2x_sfp_module_detection(struct bnx2x_phy *phy, - struct link_params *params) -{ - struct bnx2x *bp = params->bp; - u16 edc_mode; - int rc = 0; - - u32 val = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, dev_info. - port_feature_config[params->port].config)); - - DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n", - params->port); - /* Power up module */ - bnx2x_power_sfp_module(params, phy, 1); - if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) { - DP(NETIF_MSG_LINK, "Failed to get valid module type\n"); - return -EINVAL; - } else if (bnx2x_verify_sfp_module(phy, params) != 0) { - /* check SFP+ module compatibility */ - DP(NETIF_MSG_LINK, "Module verification failed!!\n"); - rc = -EINVAL; - /* Turn on fault module-detected led */ - bnx2x_set_sfp_module_fault_led(params, - MISC_REGISTERS_GPIO_HIGH); - - /* Check if need to power down the SFP+ module */ - if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == - PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN) { - DP(NETIF_MSG_LINK, "Shutdown SFP+ module!!\n"); - bnx2x_power_sfp_module(params, phy, 0); - return rc; - } - } else { - /* Turn off fault module-detected led */ - bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW); - } - - /* - * Check and set limiting mode / LRM mode on 8726. On 8727 it - * is done automatically - */ - bnx2x_set_limiting_mode(params, phy, edc_mode); - - /* - * Enable transmit for this module if the module is approved, or - * if unapproved modules should also enable the Tx laser - */ - if (rc == 0 || - (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) != - PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) - bnx2x_sfp_set_transmitter(params, phy, 1); - else - bnx2x_sfp_set_transmitter(params, phy, 0); - - return rc; -} - -void bnx2x_handle_module_detect_int(struct link_params *params) -{ - struct bnx2x *bp = params->bp; - struct bnx2x_phy *phy; - u32 gpio_val; - u8 gpio_num, gpio_port; - if (CHIP_IS_E3(bp)) - phy = ¶ms->phy[INT_PHY]; - else - phy = ¶ms->phy[EXT_PHY1]; - - if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id, params->shmem_base, - params->port, &gpio_num, &gpio_port) == - -EINVAL) { - DP(NETIF_MSG_LINK, "Failed to get MOD_ABS interrupt config\n"); - return; - } - - /* Set valid module led off */ - bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH); - - /* Get current gpio val reflecting module plugged in / out*/ - gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port); - - /* Call the handling function in case module is detected */ - if (gpio_val == 0) { - bnx2x_power_sfp_module(params, phy, 1); - bnx2x_set_gpio_int(bp, gpio_num, - MISC_REGISTERS_GPIO_INT_OUTPUT_CLR, - gpio_port); - if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) - bnx2x_sfp_module_detection(phy, params); - else - DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n"); - } else { - u32 val = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, dev_info. - port_feature_config[params->port]. - config)); - bnx2x_set_gpio_int(bp, gpio_num, - MISC_REGISTERS_GPIO_INT_OUTPUT_SET, - gpio_port); - /* - * Module was plugged out. - * Disable transmit for this module - */ - phy->media_type = ETH_PHY_NOT_PRESENT; - if (((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == - PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) || - CHIP_IS_E3(bp)) - bnx2x_sfp_set_transmitter(params, phy, 0); - } -} - -/******************************************************************/ -/* Used by 8706 and 8727 */ -/******************************************************************/ -static void bnx2x_sfp_mask_fault(struct bnx2x *bp, - struct bnx2x_phy *phy, - u16 alarm_status_offset, - u16 alarm_ctrl_offset) -{ - u16 alarm_status, val; - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, alarm_status_offset, - &alarm_status); - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, alarm_status_offset, - &alarm_status); - /* Mask or enable the fault event. */ - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, &val); - if (alarm_status & (1<<0)) - val &= ~(1<<0); - else - val |= (1<<0); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, val); -} -/******************************************************************/ -/* common BCM8706/BCM8726 PHY SECTION */ -/******************************************************************/ -static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - u8 link_up = 0; - u16 val1, val2, rx_sd, pcs_status; - struct bnx2x *bp = params->bp; - DP(NETIF_MSG_LINK, "XGXS 8706/8726\n"); - /* Clear RX Alarm*/ - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2); - - bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT, - MDIO_PMA_LASI_TXCTRL); - - /* clear LASI indication*/ - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2); - DP(NETIF_MSG_LINK, "8706/8726 LASI status 0x%x--> 0x%x\n", val1, val2); - - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd); - bnx2x_cl45_read(bp, phy, - MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &pcs_status); - bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2); - bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2); - - DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps" - " link_status 0x%x\n", rx_sd, pcs_status, val2); - /* - * link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status - * are set, or if the autoneg bit 1 is set - */ - link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1))); - if (link_up) { - if (val2 & (1<<1)) - vars->line_speed = SPEED_1000; - else - vars->line_speed = SPEED_10000; - bnx2x_ext_phy_resolve_fc(phy, params, vars); - vars->duplex = DUPLEX_FULL; - } - - /* Capture 10G link fault. Read twice to clear stale value. */ - if (vars->line_speed == SPEED_10000) { - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_LASI_TXSTAT, &val1); - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_LASI_TXSTAT, &val1); - if (val1 & (1<<0)) - vars->fault_detected = 1; - } - - return link_up; -} - -/******************************************************************/ -/* BCM8706 PHY SECTION */ -/******************************************************************/ -static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - u32 tx_en_mode; - u16 cnt, val, tmp1; - struct bnx2x *bp = params->bp; - - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); - /* HW reset */ - bnx2x_ext_phy_hw_reset(bp, params->port); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040); - bnx2x_wait_reset_complete(bp, phy, params); - - /* Wait until fw is loaded */ - for (cnt = 0; cnt < 100; cnt++) { - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER1, &val); - if (val) - break; - msleep(10); - } - DP(NETIF_MSG_LINK, "XGXS 8706 is initialized after %d ms\n", cnt); - if ((params->feature_config_flags & - FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) { - u8 i; - u16 reg; - for (i = 0; i < 4; i++) { - reg = MDIO_XS_8706_REG_BANK_RX0 + - i*(MDIO_XS_8706_REG_BANK_RX1 - - MDIO_XS_8706_REG_BANK_RX0); - bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, reg, &val); - /* Clear first 3 bits of the control */ - val &= ~0x7; - /* Set control bits according to configuration */ - val |= (phy->rx_preemphasis[i] & 0x7); - DP(NETIF_MSG_LINK, "Setting RX Equalizer to BCM8706" - " reg 0x%x <-- val 0x%x\n", reg, val); - bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, reg, val); - } - } - /* Force speed */ - if (phy->req_line_speed == SPEED_10000) { - DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n"); - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_DIGITAL_CTRL, 0x400); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL, - 0); - /* Arm LASI for link and Tx fault. */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 3); - } else { - /* Force 1Gbps using autoneg with 1G advertisement */ - - /* Allow CL37 through CL73 */ - DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n"); - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c); - - /* Enable Full-Duplex advertisement on CL37 */ - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LP, 0x0020); - /* Enable CL37 AN */ - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); - /* 1G support */ - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_ADV, (1<<5)); - - /* Enable clause 73 AN */ - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, - 0x0400); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, - 0x0004); - } - bnx2x_save_bcm_spirom_ver(bp, phy, params->port); - - /* - * If TX Laser is controlled by GPIO_0, do not let PHY go into low - * power mode, if TX Laser is disabled - */ - - tx_en_mode = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[params->port].sfp_ctrl)) - & PORT_HW_CFG_TX_LASER_MASK; - - if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) { - DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n"); - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, &tmp1); - tmp1 |= 0x1; - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1); - } - - return 0; -} - -static int bnx2x_8706_read_status(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - return bnx2x_8706_8726_read_status(phy, params, vars); -} - -/******************************************************************/ -/* BCM8726 PHY SECTION */ -/******************************************************************/ -static void bnx2x_8726_config_loopback(struct bnx2x_phy *phy, - struct link_params *params) -{ - struct bnx2x *bp = params->bp; - DP(NETIF_MSG_LINK, "PMA/PMD ext_phy_loopback: 8726\n"); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0001); -} - -static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy, - struct link_params *params) -{ - struct bnx2x *bp = params->bp; - /* Need to wait 100ms after reset */ - msleep(100); - - /* Micro controller re-boot */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x018B); - - /* Set soft reset */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_GEN_CTRL, - MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_MISC_CTRL1, 0x0001); - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_GEN_CTRL, - MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); - - /* wait for 150ms for microcode load */ - msleep(150); - - /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_MISC_CTRL1, 0x0000); - - msleep(200); - bnx2x_save_bcm_spirom_ver(bp, phy, params->port); -} - -static u8 bnx2x_8726_read_status(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - u16 val1; - u8 link_up = bnx2x_8706_8726_read_status(phy, params, vars); - if (link_up) { - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, - &val1); - if (val1 & (1<<15)) { - DP(NETIF_MSG_LINK, "Tx is disabled\n"); - link_up = 0; - vars->line_speed = 0; - } - } - return link_up; -} - - -static int bnx2x_8726_config_init(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - DP(NETIF_MSG_LINK, "Initializing BCM8726\n"); - - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); - bnx2x_wait_reset_complete(bp, phy, params); - - bnx2x_8726_external_rom_boot(phy, params); - - /* - * Need to call module detected on initialization since the module - * detection triggered by actual module insertion might occur before - * driver is loaded, and when driver is loaded, it reset all - * registers, including the transmitter - */ - bnx2x_sfp_module_detection(phy, params); - - if (phy->req_line_speed == SPEED_1000) { - DP(NETIF_MSG_LINK, "Setting 1G force\n"); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x5); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, - 0x400); - } else if ((phy->req_line_speed == SPEED_AUTO_NEG) && - (phy->speed_cap_mask & - PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) && - ((phy->speed_cap_mask & - PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) != - PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { - DP(NETIF_MSG_LINK, "Setting 1G clause37\n"); - /* Set Flow control */ - bnx2x_ext_phy_set_pause(params, phy, vars); - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_ADV, 0x20); - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c); - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, 0x0020); - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); - /* - * Enable RX-ALARM control to receive interrupt for 1G speed - * change - */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x4); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, - 0x400); - - } else { /* Default 10G. Set only LASI control */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 1); - } - - /* Set TX PreEmphasis if needed */ - if ((params->feature_config_flags & - FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) { - DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x," - "TX_CTRL2 0x%x\n", - phy->tx_preemphasis[0], - phy->tx_preemphasis[1]); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8726_TX_CTRL1, - phy->tx_preemphasis[0]); - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8726_TX_CTRL2, - phy->tx_preemphasis[1]); - } - - return 0; - -} - -static void bnx2x_8726_link_reset(struct bnx2x_phy *phy, - struct link_params *params) -{ - struct bnx2x *bp = params->bp; - DP(NETIF_MSG_LINK, "bnx2x_8726_link_reset port %d\n", params->port); - /* Set serial boot control for external load */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_GEN_CTRL, 0x0001); -} - -/******************************************************************/ -/* BCM8727 PHY SECTION */ -/******************************************************************/ - -static void bnx2x_8727_set_link_led(struct bnx2x_phy *phy, - struct link_params *params, u8 mode) -{ - struct bnx2x *bp = params->bp; - u16 led_mode_bitmask = 0; - u16 gpio_pins_bitmask = 0; - u16 val; - /* Only NOC flavor requires to set the LED specifically */ - if (!(phy->flags & FLAGS_NOC)) - return; - switch (mode) { - case LED_MODE_FRONT_PANEL_OFF: - case LED_MODE_OFF: - led_mode_bitmask = 0; - gpio_pins_bitmask = 0x03; - break; - case LED_MODE_ON: - led_mode_bitmask = 0; - gpio_pins_bitmask = 0x02; - break; - case LED_MODE_OPER: - led_mode_bitmask = 0x60; - gpio_pins_bitmask = 0x11; - break; - } - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8727_PCS_OPT_CTRL, - &val); - val &= 0xff8f; - val |= led_mode_bitmask; - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8727_PCS_OPT_CTRL, - val); - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8727_GPIO_CTRL, - &val); - val &= 0xffe0; - val |= gpio_pins_bitmask; - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8727_GPIO_CTRL, - val); -} -static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy, - struct link_params *params) { - u32 swap_val, swap_override; - u8 port; - /* - * The PHY reset is controlled by GPIO 1. Fake the port number - * to cancel the swap done in set_gpio() - */ - struct bnx2x *bp = params->bp; - swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); - swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); - port = (swap_val && swap_override) ^ 1; - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_LOW, port); -} - -static int bnx2x_8727_config_init(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - u32 tx_en_mode; - u16 tmp1, val, mod_abs, tmp2; - u16 rx_alarm_ctrl_val; - u16 lasi_ctrl_val; - struct bnx2x *bp = params->bp; - /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */ - - bnx2x_wait_reset_complete(bp, phy, params); - rx_alarm_ctrl_val = (1<<2) | (1<<5) ; - /* Should be 0x6 to enable XS on Tx side. */ - lasi_ctrl_val = 0x0006; - - DP(NETIF_MSG_LINK, "Initializing BCM8727\n"); - /* enable LASI */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, - rx_alarm_ctrl_val); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL, - 0); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, lasi_ctrl_val); - - /* - * Initially configure MOD_ABS to interrupt when module is - * presence( bit 8) - */ - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); - /* - * Set EDC off by setting OPTXLOS signal input to low (bit 9). - * When the EDC is off it locks onto a reference clock and avoids - * becoming 'lost' - */ - mod_abs &= ~(1<<8); - if (!(phy->flags & FLAGS_NOC)) - mod_abs &= ~(1<<9); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); - - - /* Enable/Disable PHY transmitter output */ - bnx2x_set_disable_pmd_transmit(params, phy, 0); - - /* Make MOD_ABS give interrupt on change */ - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, - &val); - val |= (1<<12); - if (phy->flags & FLAGS_NOC) - val |= (3<<5); - - /* - * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0 - * status which reflect SFP+ module over-current - */ - if (!(phy->flags & FLAGS_NOC)) - val &= 0xff8f; /* Reset bits 4-6 */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, val); - - bnx2x_8727_power_module(bp, phy, 1); - - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1); - - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1); - - /* Set option 1G speed */ - if (phy->req_line_speed == SPEED_1000) { - DP(NETIF_MSG_LINK, "Setting 1G force\n"); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD); - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1); - DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1); - /* - * Power down the XAUI until link is up in case of dual-media - * and 1G - */ - if (DUAL_MEDIA(params)) { - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8727_PCS_GP, &val); - val |= (3<<10); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8727_PCS_GP, val); - } - } else if ((phy->req_line_speed == SPEED_AUTO_NEG) && - ((phy->speed_cap_mask & - PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) && - ((phy->speed_cap_mask & - PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) != - PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { - - DP(NETIF_MSG_LINK, "Setting 1G clause37\n"); - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0); - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300); - } else { - /* - * Since the 8727 has only single reset pin, need to set the 10G - * registers although it is default - */ - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, - 0x0020); - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, - 0x0008); - } - - /* - * Set 2-wire transfer rate of SFP+ module EEPROM - * to 100Khz since some DACs(direct attached cables) do - * not work at 400Khz. - */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR, - 0xa001); - - /* Set TX PreEmphasis if needed */ - if ((params->feature_config_flags & - FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) { - DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x\n", - phy->tx_preemphasis[0], - phy->tx_preemphasis[1]); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL1, - phy->tx_preemphasis[0]); - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL2, - phy->tx_preemphasis[1]); - } - - /* - * If TX Laser is controlled by GPIO_0, do not let PHY go into low - * power mode, if TX Laser is disabled - */ - tx_en_mode = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[params->port].sfp_ctrl)) - & PORT_HW_CFG_TX_LASER_MASK; - - if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) { - - DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n"); - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, &tmp2); - tmp2 |= 0x1000; - tmp2 &= 0xFFEF; - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2); - } - - return 0; -} - -static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy, - struct link_params *params) -{ - struct bnx2x *bp = params->bp; - u16 mod_abs, rx_alarm_status; - u32 val = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, dev_info. - port_feature_config[params->port]. - config)); - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); - if (mod_abs & (1<<8)) { - - /* Module is absent */ - DP(NETIF_MSG_LINK, "MOD_ABS indication " - "show module is absent\n"); - phy->media_type = ETH_PHY_NOT_PRESENT; - /* - * 1. Set mod_abs to detect next module - * presence event - * 2. Set EDC off by setting OPTXLOS signal input to low - * (bit 9). - * When the EDC is off it locks onto a reference clock and - * avoids becoming 'lost'. - */ - mod_abs &= ~(1<<8); - if (!(phy->flags & FLAGS_NOC)) - mod_abs &= ~(1<<9); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); - - /* - * Clear RX alarm since it stays up as long as - * the mod_abs wasn't changed - */ - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_LASI_RXSTAT, &rx_alarm_status); - - } else { - /* Module is present */ - DP(NETIF_MSG_LINK, "MOD_ABS indication " - "show module is present\n"); - /* - * First disable transmitter, and if the module is ok, the - * module_detection will enable it - * 1. Set mod_abs to detect next module absent event ( bit 8) - * 2. Restore the default polarity of the OPRXLOS signal and - * this signal will then correctly indicate the presence or - * absence of the Rx signal. (bit 9) - */ - mod_abs |= (1<<8); - if (!(phy->flags & FLAGS_NOC)) - mod_abs |= (1<<9); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); - - /* - * Clear RX alarm since it stays up as long as the mod_abs - * wasn't changed. This is need to be done before calling the - * module detection, otherwise it will clear* the link update - * alarm - */ - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_LASI_RXSTAT, &rx_alarm_status); - - - if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == - PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) - bnx2x_sfp_set_transmitter(params, phy, 0); - - if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) - bnx2x_sfp_module_detection(phy, params); - else - DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n"); - } - - DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", - rx_alarm_status); - /* No need to check link status in case of module plugged in/out */ -} - -static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) - -{ - struct bnx2x *bp = params->bp; - u8 link_up = 0, oc_port = params->port; - u16 link_status = 0; - u16 rx_alarm_status, lasi_ctrl, val1; - - /* If PHY is not initialized, do not check link status */ - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, - &lasi_ctrl); - if (!lasi_ctrl) - return 0; - - /* Check the LASI on Rx */ - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, - &rx_alarm_status); - vars->line_speed = 0; - DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", rx_alarm_status); - - bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT, - MDIO_PMA_LASI_TXCTRL); - - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); - - DP(NETIF_MSG_LINK, "8727 LASI status 0x%x\n", val1); - - /* Clear MSG-OUT */ - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1); - - /* - * If a module is present and there is need to check - * for over current - */ - if (!(phy->flags & FLAGS_NOC) && !(rx_alarm_status & (1<<5))) { - /* Check over-current using 8727 GPIO0 input*/ - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_GPIO_CTRL, - &val1); - - if ((val1 & (1<<8)) == 0) { - if (!CHIP_IS_E1x(bp)) - oc_port = BP_PATH(bp) + (params->port << 1); - DP(NETIF_MSG_LINK, "8727 Power fault has been detected" - " on port %d\n", oc_port); - netdev_err(bp->dev, "Error: Power fault on Port %d has" - " been detected and the power to " - "that SFP+ module has been removed" - " to prevent failure of the card." - " Please remove the SFP+ module and" - " restart the system to clear this" - " error.\n", - oc_port); - /* Disable all RX_ALARMs except for mod_abs */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_LASI_RXCTRL, (1<<5)); - - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_PHY_IDENTIFIER, &val1); - /* Wait for module_absent_event */ - val1 |= (1<<8); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_PHY_IDENTIFIER, val1); - /* Clear RX alarm */ - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_LASI_RXSTAT, &rx_alarm_status); - return 0; - } - } /* Over current check */ - - /* When module absent bit is set, check module */ - if (rx_alarm_status & (1<<5)) { - bnx2x_8727_handle_mod_abs(phy, params); - /* Enable all mod_abs and link detection bits */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, - ((1<<5) | (1<<2))); - } - DP(NETIF_MSG_LINK, "Enabling 8727 TX laser if SFP is approved\n"); - bnx2x_8727_specific_func(phy, params, ENABLE_TX); - /* If transmitter is disabled, ignore false link up indication */ - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &val1); - if (val1 & (1<<15)) { - DP(NETIF_MSG_LINK, "Tx is disabled\n"); - return 0; - } - - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status); - - /* - * Bits 0..2 --> speed detected, - * Bits 13..15--> link is down - */ - if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) { - link_up = 1; - vars->line_speed = SPEED_10000; - DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n", - params->port); - } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) { - link_up = 1; - vars->line_speed = SPEED_1000; - DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n", - params->port); - } else { - link_up = 0; - DP(NETIF_MSG_LINK, "port %x: External link is down\n", - params->port); - } - - /* Capture 10G link fault. */ - if (vars->line_speed == SPEED_10000) { - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_LASI_TXSTAT, &val1); - - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_LASI_TXSTAT, &val1); - - if (val1 & (1<<0)) { - vars->fault_detected = 1; - } - } - - if (link_up) { - bnx2x_ext_phy_resolve_fc(phy, params, vars); - vars->duplex = DUPLEX_FULL; - DP(NETIF_MSG_LINK, "duplex = 0x%x\n", vars->duplex); - } - - if ((DUAL_MEDIA(params)) && - (phy->req_line_speed == SPEED_1000)) { - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8727_PCS_GP, &val1); - /* - * In case of dual-media board and 1G, power up the XAUI side, - * otherwise power it down. For 10G it is done automatically - */ - if (link_up) - val1 &= ~(3<<10); - else - val1 |= (3<<10); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8727_PCS_GP, val1); - } - return link_up; -} - -static void bnx2x_8727_link_reset(struct bnx2x_phy *phy, - struct link_params *params) -{ - struct bnx2x *bp = params->bp; - - /* Enable/Disable PHY transmitter output */ - bnx2x_set_disable_pmd_transmit(params, phy, 1); - - /* Disable Transmitter */ - bnx2x_sfp_set_transmitter(params, phy, 0); - /* Clear LASI */ - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0); - -} - -/******************************************************************/ -/* BCM8481/BCM84823/BCM84833 PHY SECTION */ -/******************************************************************/ -static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, - struct link_params *params) -{ - u16 val, fw_ver1, fw_ver2, cnt; - u8 port; - struct bnx2x *bp = params->bp; - - port = params->port; - - /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/ - /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009); - - for (cnt = 0; cnt < 100; cnt++) { - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); - if (val & 1) - break; - udelay(5); - } - if (cnt == 100) { - DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(1)\n"); - bnx2x_save_spirom_version(bp, port, 0, - phy->ver_addr); - return; - } - - - /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */ - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A); - for (cnt = 0; cnt < 100; cnt++) { - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); - if (val & 1) - break; - udelay(5); - } - if (cnt == 100) { - DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(2)\n"); - bnx2x_save_spirom_version(bp, port, 0, - phy->ver_addr); - return; - } - - /* lower 16 bits of the register SPI_FW_STATUS */ - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1); - /* upper 16 bits of register SPI_FW_STATUS */ - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2); - - bnx2x_save_spirom_version(bp, port, (fw_ver2<<16) | fw_ver1, - phy->ver_addr); -} - -static void bnx2x_848xx_set_led(struct bnx2x *bp, - struct bnx2x_phy *phy) -{ - u16 val; - - /* PHYC_CTL_LED_CTL */ - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LINK_SIGNAL, &val); - val &= 0xFE00; - val |= 0x0092; - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LINK_SIGNAL, val); - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED1_MASK, - 0x80); - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED2_MASK, - 0x18); - - /* Select activity source by Tx and Rx, as suggested by PHY AE */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED3_MASK, - 0x0006); - - /* Select the closest activity blink rate to that in 10/100/1000 */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED3_BLINK, - 0); - - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val); - val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/ - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_84823_CTL_LED_CTL_1, val); - - /* 'Interrupt Mask' */ - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, - 0xFFFB, 0xFFFD); -} - -static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - u16 autoneg_val, an_1000_val, an_10_100_val; - u16 tmp_req_line_speed; - - tmp_req_line_speed = phy->req_line_speed; - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) - if (phy->req_line_speed == SPEED_10000) - phy->req_line_speed = SPEED_AUTO_NEG; - - /* - * This phy uses the NIG latch mechanism since link indication - * arrives through its LED4 and not via its LASI signal, so we - * get steady signal instead of clear on read - */ - bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4, - 1 << NIG_LATCH_BC_ENABLE_MI_INT); - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000); - - bnx2x_848xx_set_led(bp, phy); - - /* set 1000 speed advertisement */ - bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL, - &an_1000_val); - - bnx2x_ext_phy_set_pause(params, phy, vars); - bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, - MDIO_AN_REG_8481_LEGACY_AN_ADV, - &an_10_100_val); - bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_MII_CTRL, - &autoneg_val); - /* Disable forced speed */ - autoneg_val &= ~((1<<6) | (1<<8) | (1<<9) | (1<<12) | (1<<13)); - an_10_100_val &= ~((1<<5) | (1<<6) | (1<<7) | (1<<8)); - - if (((phy->req_line_speed == SPEED_AUTO_NEG) && - (phy->speed_cap_mask & - PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || - (phy->req_line_speed == SPEED_1000)) { - an_1000_val |= (1<<8); - autoneg_val |= (1<<9 | 1<<12); - if (phy->req_duplex == DUPLEX_FULL) - an_1000_val |= (1<<9); - DP(NETIF_MSG_LINK, "Advertising 1G\n"); - } else - an_1000_val &= ~((1<<8) | (1<<9)); - - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL, - an_1000_val); - - /* set 100 speed advertisement */ - if (((phy->req_line_speed == SPEED_AUTO_NEG) && - (phy->speed_cap_mask & - (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | - PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) && - (phy->supported & - (SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full)))) { - an_10_100_val |= (1<<7); - /* Enable autoneg and restart autoneg for legacy speeds */ - autoneg_val |= (1<<9 | 1<<12); - - if (phy->req_duplex == DUPLEX_FULL) - an_10_100_val |= (1<<8); - DP(NETIF_MSG_LINK, "Advertising 100M\n"); - } - /* set 10 speed advertisement */ - if (((phy->req_line_speed == SPEED_AUTO_NEG) && - (phy->speed_cap_mask & - (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | - PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) && - (phy->supported & - (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full)))) { - an_10_100_val |= (1<<5); - autoneg_val |= (1<<9 | 1<<12); - if (phy->req_duplex == DUPLEX_FULL) - an_10_100_val |= (1<<6); - DP(NETIF_MSG_LINK, "Advertising 10M\n"); - } - - /* Only 10/100 are allowed to work in FORCE mode */ - if ((phy->req_line_speed == SPEED_100) && - (phy->supported & - (SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full))) { - autoneg_val |= (1<<13); - /* Enabled AUTO-MDIX when autoneg is disabled */ - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL, - (1<<15 | 1<<9 | 7<<0)); - DP(NETIF_MSG_LINK, "Setting 100M force\n"); - } - if ((phy->req_line_speed == SPEED_10) && - (phy->supported & - (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full))) { - /* Enabled AUTO-MDIX when autoneg is disabled */ - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL, - (1<<15 | 1<<9 | 7<<0)); - DP(NETIF_MSG_LINK, "Setting 10M force\n"); - } - - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_AN_ADV, - an_10_100_val); - - if (phy->req_duplex == DUPLEX_FULL) - autoneg_val |= (1<<8); - - /* - * Always write this if this is not 84833. - * For 84833, write it only when it's a forced speed. - */ - if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || - ((autoneg_val & (1<<12)) == 0)) - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, - MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val); - - if (((phy->req_line_speed == SPEED_AUTO_NEG) && - (phy->speed_cap_mask & - PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) || - (phy->req_line_speed == SPEED_10000)) { - DP(NETIF_MSG_LINK, "Advertising 10G\n"); - /* Restart autoneg for 10G*/ - - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, - 0x3200); - } else - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, - MDIO_AN_REG_8481_10GBASE_T_AN_CTRL, - 1); - - /* Save spirom version */ - bnx2x_save_848xx_spirom_version(phy, params); - - phy->req_line_speed = tmp_req_line_speed; - - return 0; -} - -static int bnx2x_8481_config_init(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - /* Restore normal power mode*/ - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); - - /* HW reset */ - bnx2x_ext_phy_hw_reset(bp, params->port); - bnx2x_wait_reset_complete(bp, phy, params); - - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); - return bnx2x_848xx_cmn_config_init(phy, params, vars); -} - - -#define PHY84833_HDSHK_WAIT 300 -static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - u32 idx; - u32 pair_swap; - u16 val; - u16 data; - struct bnx2x *bp = params->bp; - /* Do pair swap */ - - /* Check for configuration. */ - pair_swap = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[params->port].xgbt_phy_cfg)) & - PORT_HW_CFG_RJ45_PAIR_SWAP_MASK; - - if (pair_swap == 0) - return 0; - - data = (u16)pair_swap; - - /* Write CMD_OPEN_OVERRIDE to STATUS reg */ - bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_SCRATCH_REG2, - PHY84833_CMD_OPEN_OVERRIDE); - for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) { - bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_SCRATCH_REG2, &val); - if (val == PHY84833_CMD_OPEN_FOR_CMDS) - break; - msleep(1); - } - if (idx >= PHY84833_HDSHK_WAIT) { - DP(NETIF_MSG_LINK, "Pairswap: FW not ready.\n"); - return -EINVAL; - } - - bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_SCRATCH_REG4, - data); - /* Issue pair swap command */ - bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_SCRATCH_REG0, - PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE); - for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) { - bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_SCRATCH_REG2, &val); - if ((val == PHY84833_CMD_COMPLETE_PASS) || - (val == PHY84833_CMD_COMPLETE_ERROR)) - break; - msleep(1); - } - if ((idx >= PHY84833_HDSHK_WAIT) || - (val == PHY84833_CMD_COMPLETE_ERROR)) { - DP(NETIF_MSG_LINK, "Pairswap: override failed.\n"); - return -EINVAL; - } - bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_SCRATCH_REG2, - PHY84833_CMD_CLEAR_COMPLETE); - DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data); - return 0; -} - - -static u8 bnx2x_84833_get_reset_gpios(struct bnx2x *bp, - u32 shmem_base_path[], - u32 chip_id) -{ - u32 reset_pin[2]; - u32 idx; - u8 reset_gpios; - if (CHIP_IS_E3(bp)) { - /* Assume that these will be GPIOs, not EPIOs. */ - for (idx = 0; idx < 2; idx++) { - /* Map config param to register bit. */ - reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] + - offsetof(struct shmem_region, - dev_info.port_hw_config[0].e3_cmn_pin_cfg)); - reset_pin[idx] = (reset_pin[idx] & - PORT_HW_CFG_E3_PHY_RESET_MASK) >> - PORT_HW_CFG_E3_PHY_RESET_SHIFT; - reset_pin[idx] -= PIN_CFG_GPIO0_P0; - reset_pin[idx] = (1 << reset_pin[idx]); - } - reset_gpios = (u8)(reset_pin[0] | reset_pin[1]); - } else { - /* E2, look from diff place of shmem. */ - for (idx = 0; idx < 2; idx++) { - reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] + - offsetof(struct shmem_region, - dev_info.port_hw_config[0].default_cfg)); - reset_pin[idx] &= PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK; - reset_pin[idx] -= PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0; - reset_pin[idx] >>= PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT; - reset_pin[idx] = (1 << reset_pin[idx]); - } - reset_gpios = (u8)(reset_pin[0] | reset_pin[1]); - } - - return reset_gpios; -} - -static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy, - struct link_params *params) -{ - struct bnx2x *bp = params->bp; - u8 reset_gpios; - u32 other_shmem_base_addr = REG_RD(bp, params->shmem2_base + - offsetof(struct shmem2_region, - other_shmem_base_addr)); - - u32 shmem_base_path[2]; - shmem_base_path[0] = params->shmem_base; - shmem_base_path[1] = other_shmem_base_addr; - - reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, - params->chip_id); - - bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW); - udelay(10); - DP(NETIF_MSG_LINK, "84833 hw reset on pin values 0x%x\n", - reset_gpios); - - return 0; -} - -static int bnx2x_84833_common_init_phy(struct bnx2x *bp, - u32 shmem_base_path[], - u32 chip_id) -{ - u8 reset_gpios; - - reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id); - - bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW); - udelay(10); - bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH); - msleep(800); - DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n", - reset_gpios); - - return 0; -} - -#define PHY84833_CONSTANT_LATENCY 1193 -static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - u8 port, initialize = 1; - u16 val; - u16 temp; - u32 actual_phy_selection, cms_enable, idx; - int rc = 0; - - msleep(1); - - if (!(CHIP_IS_E1(bp))) - port = BP_PATH(bp); - else - port = params->port; - - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) { - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, - port); - } else { - /* MDIO reset */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_CTRL, 0x8000); - /* Bring PHY out of super isolate mode */ - bnx2x_cl45_read(bp, phy, - MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val); - val &= ~MDIO_84833_SUPER_ISOLATE; - bnx2x_cl45_write(bp, phy, - MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_XGPHY_STRAP1, val); - } - - bnx2x_wait_reset_complete(bp, phy, params); - - /* Wait for GPHY to come out of reset */ - msleep(50); - - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) - bnx2x_84833_pair_swap_cfg(phy, params, vars); - - /* - * BCM84823 requires that XGXS links up first @ 10G for normal behavior - */ - temp = vars->line_speed; - vars->line_speed = SPEED_10000; - bnx2x_set_autoneg(¶ms->phy[INT_PHY], params, vars, 0); - bnx2x_program_serdes(¶ms->phy[INT_PHY], params, vars); - vars->line_speed = temp; - - /* Set dual-media configuration according to configuration */ - - bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, - MDIO_CTL_REG_84823_MEDIA, &val); - val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK | - MDIO_CTL_REG_84823_MEDIA_LINE_MASK | - MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN | - MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK | - MDIO_CTL_REG_84823_MEDIA_FIBER_1G); - - if (CHIP_IS_E3(bp)) { - val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK | - MDIO_CTL_REG_84823_MEDIA_LINE_MASK); - } else { - val |= (MDIO_CTL_REG_84823_CTRL_MAC_XFI | - MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L); - } - - actual_phy_selection = bnx2x_phy_selection(params); - - switch (actual_phy_selection) { - case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: - /* Do nothing. Essentially this is like the priority copper */ - break; - case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: - val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER; - break; - case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: - val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER; - break; - case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: - /* Do nothing here. The first PHY won't be initialized at all */ - break; - case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: - val |= MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN; - initialize = 0; - break; - } - if (params->phy[EXT_PHY2].req_line_speed == SPEED_1000) - val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G; - - bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_CTL_REG_84823_MEDIA, val); - DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n", - params->multi_phy_config, val); - - /* AutogrEEEn */ - if (params->feature_config_flags & - FEATURE_CONFIG_AUTOGREEEN_ENABLED) { - /* Ensure that f/w is ready */ - for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) { - bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_SCRATCH_REG2, &val); - if (val == PHY84833_CMD_OPEN_FOR_CMDS) - break; - usleep_range(1000, 1000); - } - if (idx >= PHY84833_HDSHK_WAIT) { - DP(NETIF_MSG_LINK, "AutogrEEEn: FW not ready.\n"); - return -EINVAL; - } - - /* Select EEE mode */ - bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_SCRATCH_REG3, - 0x2); - - /* Set Idle and Latency */ - bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_SCRATCH_REG4, - PHY84833_CONSTANT_LATENCY + 1); - - bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_DATA3_REG, - PHY84833_CONSTANT_LATENCY + 1); - - bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_DATA4_REG, - PHY84833_CONSTANT_LATENCY); - - /* Send EEE instruction to command register */ - bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_SCRATCH_REG0, - PHY84833_DIAG_CMD_SET_EEE_MODE); - - /* Ensure that the command has completed */ - for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) { - bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_SCRATCH_REG2, &val); - if ((val == PHY84833_CMD_COMPLETE_PASS) || - (val == PHY84833_CMD_COMPLETE_ERROR)) - break; - usleep_range(1000, 1000); - } - if ((idx >= PHY84833_HDSHK_WAIT) || - (val == PHY84833_CMD_COMPLETE_ERROR)) { - DP(NETIF_MSG_LINK, "AutogrEEEn: command failed.\n"); - return -EINVAL; - } - - /* Reset command handler */ - bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_SCRATCH_REG2, - PHY84833_CMD_CLEAR_COMPLETE); - } - - if (initialize) - rc = bnx2x_848xx_cmn_config_init(phy, params, vars); - else - bnx2x_save_848xx_spirom_version(phy, params); - /* 84833 PHY has a better feature and doesn't need to support this. */ - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) { - cms_enable = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[params->port].default_cfg)) & - PORT_HW_CFG_ENABLE_CMS_MASK; - - bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, - MDIO_CTL_REG_84823_USER_CTRL_REG, &val); - if (cms_enable) - val |= MDIO_CTL_REG_84823_USER_CTRL_CMS; - else - val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS; - bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_CTL_REG_84823_USER_CTRL_REG, val); - } - - return rc; -} - -static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - u16 val, val1, val2; - u8 link_up = 0; - - - /* Check 10G-BaseT link status */ - /* Check PMD signal ok */ - bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, 0xFFFA, &val1); - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL, - &val2); - DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2); - - /* Check link 10G */ - if (val2 & (1<<11)) { - vars->line_speed = SPEED_10000; - vars->duplex = DUPLEX_FULL; - link_up = 1; - bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); - } else { /* Check Legacy speed link */ - u16 legacy_status, legacy_speed; - - /* Enable expansion register 0x42 (Operation mode status) */ - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, - MDIO_AN_REG_8481_EXPANSION_REG_ACCESS, 0xf42); - - /* Get legacy speed operation status */ - bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, - MDIO_AN_REG_8481_EXPANSION_REG_RD_RW, - &legacy_status); - - DP(NETIF_MSG_LINK, "Legacy speed status" - " = 0x%x\n", legacy_status); - link_up = ((legacy_status & (1<<11)) == (1<<11)); - if (link_up) { - legacy_speed = (legacy_status & (3<<9)); - if (legacy_speed == (0<<9)) - vars->line_speed = SPEED_10; - else if (legacy_speed == (1<<9)) - vars->line_speed = SPEED_100; - else if (legacy_speed == (2<<9)) - vars->line_speed = SPEED_1000; - else /* Should not happen */ - vars->line_speed = 0; - - if (legacy_status & (1<<8)) - vars->duplex = DUPLEX_FULL; - else - vars->duplex = DUPLEX_HALF; - - DP(NETIF_MSG_LINK, "Link is up in %dMbps," - " is_duplex_full= %d\n", vars->line_speed, - (vars->duplex == DUPLEX_FULL)); - /* Check legacy speed AN resolution */ - bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, - MDIO_AN_REG_8481_LEGACY_MII_STATUS, - &val); - if (val & (1<<5)) - vars->link_status |= - LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; - bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, - MDIO_AN_REG_8481_LEGACY_AN_EXPANSION, - &val); - if ((val & (1<<0)) == 0) - vars->link_status |= - LINK_STATUS_PARALLEL_DETECTION_USED; - } - } - if (link_up) { - DP(NETIF_MSG_LINK, "BCM84823: link speed is %d\n", - vars->line_speed); - bnx2x_ext_phy_resolve_fc(phy, params, vars); - } - - return link_up; -} - - -static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len) -{ - int status = 0; - u32 spirom_ver; - spirom_ver = ((raw_ver & 0xF80) >> 7) << 16 | (raw_ver & 0x7F); - status = bnx2x_format_ver(spirom_ver, str, len); - return status; -} - -static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy, - struct link_params *params) -{ - bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_LOW, 0); - bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_LOW, 1); -} - -static void bnx2x_8481_link_reset(struct bnx2x_phy *phy, - struct link_params *params) -{ - bnx2x_cl45_write(params->bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000); - bnx2x_cl45_write(params->bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1); -} - -static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy, - struct link_params *params) -{ - struct bnx2x *bp = params->bp; - u8 port; - u16 val16; - - if (!(CHIP_IS_E1(bp))) - port = BP_PATH(bp); - else - port = params->port; - - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) { - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, - MISC_REGISTERS_GPIO_OUTPUT_LOW, - port); - } else { - bnx2x_cl45_read(bp, phy, - MDIO_CTL_DEVAD, - 0x400f, &val16); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_CTRL, 0x800); - } -} - -static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, - struct link_params *params, u8 mode) -{ - struct bnx2x *bp = params->bp; - u16 val; - u8 port; - - if (!(CHIP_IS_E1(bp))) - port = BP_PATH(bp); - else - port = params->port; - - switch (mode) { - case LED_MODE_OFF: - - DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OFF\n", port); - - if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == - SHARED_HW_CFG_LED_EXTPHY1) { - - /* Set LED masks */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED1_MASK, - 0x0); - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED2_MASK, - 0x0); - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED3_MASK, - 0x0); - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED5_MASK, - 0x0); - - } else { - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED1_MASK, - 0x0); - } - break; - case LED_MODE_FRONT_PANEL_OFF: - - DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE FRONT PANEL OFF\n", - port); - - if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == - SHARED_HW_CFG_LED_EXTPHY1) { - - /* Set LED masks */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED1_MASK, - 0x0); - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED2_MASK, - 0x0); - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED3_MASK, - 0x0); - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED5_MASK, - 0x20); - - } else { - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED1_MASK, - 0x0); - } - break; - case LED_MODE_ON: - - DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE ON\n", port); - - if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == - SHARED_HW_CFG_LED_EXTPHY1) { - /* Set control reg */ - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LINK_SIGNAL, - &val); - val &= 0x8000; - val |= 0x2492; - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LINK_SIGNAL, - val); - - /* Set LED masks */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED1_MASK, - 0x0); - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED2_MASK, - 0x20); - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED3_MASK, - 0x20); - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED5_MASK, - 0x0); - } else { - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED1_MASK, - 0x20); - } - break; - - case LED_MODE_OPER: - - DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OPER\n", port); - - if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == - SHARED_HW_CFG_LED_EXTPHY1) { - - /* Set control reg */ - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LINK_SIGNAL, - &val); - - if (!((val & - MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK) - >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)) { - DP(NETIF_MSG_LINK, "Setting LINK_SIGNAL\n"); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LINK_SIGNAL, - 0xa492); - } - - /* Set LED masks */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED1_MASK, - 0x10); - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED2_MASK, - 0x80); - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED3_MASK, - 0x98); - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED5_MASK, - 0x40); - - } else { - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED1_MASK, - 0x80); - - /* Tell LED3 to blink on source */ - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LINK_SIGNAL, - &val); - val &= ~(7<<6); - val |= (1<<6); /* A83B[8:6]= 1 */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LINK_SIGNAL, - val); - } - break; - } - - /* - * This is a workaround for E3+84833 until autoneg - * restart is fixed in f/w - */ - if (CHIP_IS_E3(bp)) { - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_GP2_STATUS_GP_2_1, &val); - } -} - -/******************************************************************/ -/* 54618SE PHY SECTION */ -/******************************************************************/ -static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - u8 port; - u16 autoneg_val, an_1000_val, an_10_100_val, fc_val, temp; - u32 cfg_pin; - - DP(NETIF_MSG_LINK, "54618SE cfg init\n"); - usleep_range(1000, 1000); - - /* This works with E3 only, no need to check the chip - before determining the port. */ - port = params->port; - - cfg_pin = (REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[port].e3_cmn_pin_cfg)) & - PORT_HW_CFG_E3_PHY_RESET_MASK) >> - PORT_HW_CFG_E3_PHY_RESET_SHIFT; - - /* Drive pin high to bring the GPHY out of reset. */ - bnx2x_set_cfg_pin(bp, cfg_pin, 1); - - /* wait for GPHY to reset */ - msleep(50); - - /* reset phy */ - bnx2x_cl22_write(bp, phy, - MDIO_PMA_REG_CTRL, 0x8000); - bnx2x_wait_reset_complete(bp, phy, params); - - /*wait for GPHY to reset */ - msleep(50); - - /* Configure LED4: set to INTR (0x6). */ - /* Accessing shadow register 0xe. */ - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_SHADOW, - MDIO_REG_GPHY_SHADOW_LED_SEL2); - bnx2x_cl22_read(bp, phy, - MDIO_REG_GPHY_SHADOW, - &temp); - temp &= ~(0xf << 4); - temp |= (0x6 << 4); - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_SHADOW, - MDIO_REG_GPHY_SHADOW_WR_ENA | temp); - /* Configure INTR based on link status change. */ - bnx2x_cl22_write(bp, phy, - MDIO_REG_INTR_MASK, - ~MDIO_REG_INTR_MASK_LINK_STATUS); - - /* Flip the signal detect polarity (set 0x1c.0x1e[8]). */ - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_SHADOW, - MDIO_REG_GPHY_SHADOW_AUTO_DET_MED); - bnx2x_cl22_read(bp, phy, - MDIO_REG_GPHY_SHADOW, - &temp); - temp |= MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD; - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_SHADOW, - MDIO_REG_GPHY_SHADOW_WR_ENA | temp); - - /* Set up fc */ - /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ - bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); - fc_val = 0; - if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) - fc_val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; - - if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) - fc_val |= MDIO_AN_REG_ADV_PAUSE_PAUSE; - - /* read all advertisement */ - bnx2x_cl22_read(bp, phy, - 0x09, - &an_1000_val); - - bnx2x_cl22_read(bp, phy, - 0x04, - &an_10_100_val); - - bnx2x_cl22_read(bp, phy, - MDIO_PMA_REG_CTRL, - &autoneg_val); - - /* Disable forced speed */ - autoneg_val &= ~((1<<6) | (1<<8) | (1<<9) | (1<<12) | (1<<13)); - an_10_100_val &= ~((1<<5) | (1<<6) | (1<<7) | (1<<8) | (1<<10) | - (1<<11)); - - if (((phy->req_line_speed == SPEED_AUTO_NEG) && - (phy->speed_cap_mask & - PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || - (phy->req_line_speed == SPEED_1000)) { - an_1000_val |= (1<<8); - autoneg_val |= (1<<9 | 1<<12); - if (phy->req_duplex == DUPLEX_FULL) - an_1000_val |= (1<<9); - DP(NETIF_MSG_LINK, "Advertising 1G\n"); - } else - an_1000_val &= ~((1<<8) | (1<<9)); - - bnx2x_cl22_write(bp, phy, - 0x09, - an_1000_val); - bnx2x_cl22_read(bp, phy, - 0x09, - &an_1000_val); - - /* set 100 speed advertisement */ - if (((phy->req_line_speed == SPEED_AUTO_NEG) && - (phy->speed_cap_mask & - (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | - PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) { - an_10_100_val |= (1<<7); - /* Enable autoneg and restart autoneg for legacy speeds */ - autoneg_val |= (1<<9 | 1<<12); - - if (phy->req_duplex == DUPLEX_FULL) - an_10_100_val |= (1<<8); - DP(NETIF_MSG_LINK, "Advertising 100M\n"); - } - - /* set 10 speed advertisement */ - if (((phy->req_line_speed == SPEED_AUTO_NEG) && - (phy->speed_cap_mask & - (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | - PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) { - an_10_100_val |= (1<<5); - autoneg_val |= (1<<9 | 1<<12); - if (phy->req_duplex == DUPLEX_FULL) - an_10_100_val |= (1<<6); - DP(NETIF_MSG_LINK, "Advertising 10M\n"); - } - - /* Only 10/100 are allowed to work in FORCE mode */ - if (phy->req_line_speed == SPEED_100) { - autoneg_val |= (1<<13); - /* Enabled AUTO-MDIX when autoneg is disabled */ - bnx2x_cl22_write(bp, phy, - 0x18, - (1<<15 | 1<<9 | 7<<0)); - DP(NETIF_MSG_LINK, "Setting 100M force\n"); - } - if (phy->req_line_speed == SPEED_10) { - /* Enabled AUTO-MDIX when autoneg is disabled */ - bnx2x_cl22_write(bp, phy, - 0x18, - (1<<15 | 1<<9 | 7<<0)); - DP(NETIF_MSG_LINK, "Setting 10M force\n"); - } - - /* Check if we should turn on Auto-GrEEEn */ - bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &temp); - if (temp == MDIO_REG_GPHY_ID_54618SE) { - if (params->feature_config_flags & - FEATURE_CONFIG_AUTOGREEEN_ENABLED) { - temp = 6; - DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n"); - } else { - temp = 0; - DP(NETIF_MSG_LINK, "Disabling Auto-GrEEEn\n"); - } - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_CL45_ADDR_REG, MDIO_AN_DEVAD); - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_CL45_DATA_REG, - MDIO_REG_GPHY_EEE_ADV); - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_CL45_ADDR_REG, - (0x1 << 14) | MDIO_AN_DEVAD); - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_CL45_DATA_REG, - temp); - } - - bnx2x_cl22_write(bp, phy, - 0x04, - an_10_100_val | fc_val); - - if (phy->req_duplex == DUPLEX_FULL) - autoneg_val |= (1<<8); - - bnx2x_cl22_write(bp, phy, - MDIO_PMA_REG_CTRL, autoneg_val); - - return 0; -} - -static void bnx2x_54618se_set_link_led(struct bnx2x_phy *phy, - struct link_params *params, u8 mode) -{ - struct bnx2x *bp = params->bp; - DP(NETIF_MSG_LINK, "54618SE set link led (mode=%x)\n", mode); - switch (mode) { - case LED_MODE_FRONT_PANEL_OFF: - case LED_MODE_OFF: - case LED_MODE_OPER: - case LED_MODE_ON: - default: - break; - } - return; -} - -static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy, - struct link_params *params) -{ - struct bnx2x *bp = params->bp; - u32 cfg_pin; - u8 port; - - /* - * In case of no EPIO routed to reset the GPHY, put it - * in low power mode. - */ - bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, 0x800); - /* - * This works with E3 only, no need to check the chip - * before determining the port. - */ - port = params->port; - cfg_pin = (REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[port].e3_cmn_pin_cfg)) & - PORT_HW_CFG_E3_PHY_RESET_MASK) >> - PORT_HW_CFG_E3_PHY_RESET_SHIFT; - - /* Drive pin low to put GPHY in reset. */ - bnx2x_set_cfg_pin(bp, cfg_pin, 0); -} - -static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - u16 val; - u8 link_up = 0; - u16 legacy_status, legacy_speed; - - /* Get speed operation status */ - bnx2x_cl22_read(bp, phy, - 0x19, - &legacy_status); - DP(NETIF_MSG_LINK, "54618SE read_status: 0x%x\n", legacy_status); - - /* Read status to clear the PHY interrupt. */ - bnx2x_cl22_read(bp, phy, - MDIO_REG_INTR_STATUS, - &val); - - link_up = ((legacy_status & (1<<2)) == (1<<2)); - - if (link_up) { - legacy_speed = (legacy_status & (7<<8)); - if (legacy_speed == (7<<8)) { - vars->line_speed = SPEED_1000; - vars->duplex = DUPLEX_FULL; - } else if (legacy_speed == (6<<8)) { - vars->line_speed = SPEED_1000; - vars->duplex = DUPLEX_HALF; - } else if (legacy_speed == (5<<8)) { - vars->line_speed = SPEED_100; - vars->duplex = DUPLEX_FULL; - } - /* Omitting 100Base-T4 for now */ - else if (legacy_speed == (3<<8)) { - vars->line_speed = SPEED_100; - vars->duplex = DUPLEX_HALF; - } else if (legacy_speed == (2<<8)) { - vars->line_speed = SPEED_10; - vars->duplex = DUPLEX_FULL; - } else if (legacy_speed == (1<<8)) { - vars->line_speed = SPEED_10; - vars->duplex = DUPLEX_HALF; - } else /* Should not happen */ - vars->line_speed = 0; - - DP(NETIF_MSG_LINK, "Link is up in %dMbps," - " is_duplex_full= %d\n", vars->line_speed, - (vars->duplex == DUPLEX_FULL)); - - /* Check legacy speed AN resolution */ - bnx2x_cl22_read(bp, phy, - 0x01, - &val); - if (val & (1<<5)) - vars->link_status |= - LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; - bnx2x_cl22_read(bp, phy, - 0x06, - &val); - if ((val & (1<<0)) == 0) - vars->link_status |= - LINK_STATUS_PARALLEL_DETECTION_USED; - - DP(NETIF_MSG_LINK, "BCM54618SE: link speed is %d\n", - vars->line_speed); - - /* Report whether EEE is resolved. */ - bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &val); - if (val == MDIO_REG_GPHY_ID_54618SE) { - if (vars->link_status & - LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) - val = 0; - else { - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_CL45_ADDR_REG, - MDIO_AN_DEVAD); - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_CL45_DATA_REG, - MDIO_REG_GPHY_EEE_RESOLVED); - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_CL45_ADDR_REG, - (0x1 << 14) | MDIO_AN_DEVAD); - bnx2x_cl22_read(bp, phy, - MDIO_REG_GPHY_CL45_DATA_REG, - &val); - } - DP(NETIF_MSG_LINK, "EEE resolution: 0x%x\n", val); - } - - bnx2x_ext_phy_resolve_fc(phy, params, vars); - } - return link_up; -} - -static void bnx2x_54618se_config_loopback(struct bnx2x_phy *phy, - struct link_params *params) -{ - struct bnx2x *bp = params->bp; - u16 val; - u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; - - DP(NETIF_MSG_LINK, "2PMA/PMD ext_phy_loopback: 54618se\n"); - - /* Enable master/slave manual mmode and set to master */ - /* mii write 9 [bits set 11 12] */ - bnx2x_cl22_write(bp, phy, 0x09, 3<<11); - - /* forced 1G and disable autoneg */ - /* set val [mii read 0] */ - /* set val [expr $val & [bits clear 6 12 13]] */ - /* set val [expr $val | [bits set 6 8]] */ - /* mii write 0 $val */ - bnx2x_cl22_read(bp, phy, 0x00, &val); - val &= ~((1<<6) | (1<<12) | (1<<13)); - val |= (1<<6) | (1<<8); - bnx2x_cl22_write(bp, phy, 0x00, val); - - /* Set external loopback and Tx using 6dB coding */ - /* mii write 0x18 7 */ - /* set val [mii read 0x18] */ - /* mii write 0x18 [expr $val | [bits set 10 15]] */ - bnx2x_cl22_write(bp, phy, 0x18, 7); - bnx2x_cl22_read(bp, phy, 0x18, &val); - bnx2x_cl22_write(bp, phy, 0x18, val | (1<<10) | (1<<15)); - - /* This register opens the gate for the UMAC despite its name */ - REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1); - - /* - * Maximum Frame Length (RW). Defines a 14-Bit maximum frame - * length used by the MAC receive logic to check frames. - */ - REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710); -} - -/******************************************************************/ -/* SFX7101 PHY SECTION */ -/******************************************************************/ -static void bnx2x_7101_config_loopback(struct bnx2x_phy *phy, - struct link_params *params) -{ - struct bnx2x *bp = params->bp; - /* SFX7101_XGXS_TEST1 */ - bnx2x_cl45_write(bp, phy, - MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100); -} - -static int bnx2x_7101_config_init(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - u16 fw_ver1, fw_ver2, val; - struct bnx2x *bp = params->bp; - DP(NETIF_MSG_LINK, "Setting the SFX7101 LASI indication\n"); - - /* Restore normal power mode*/ - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); - /* HW reset */ - bnx2x_ext_phy_hw_reset(bp, params->port); - bnx2x_wait_reset_complete(bp, phy, params); - - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x1); - DP(NETIF_MSG_LINK, "Setting the SFX7101 LED to blink on traffic\n"); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_7107_LED_CNTL, (1<<3)); - - bnx2x_ext_phy_set_pause(params, phy, vars); - /* Restart autoneg */ - bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, &val); - val |= 0x200; - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, val); - - /* Save spirom version */ - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER1, &fw_ver1); - - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER2, &fw_ver2); - bnx2x_save_spirom_version(bp, params->port, - (u32)(fw_ver1<<16 | fw_ver2), phy->ver_addr); - return 0; -} - -static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - u8 link_up; - u16 val1, val2; - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2); - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); - DP(NETIF_MSG_LINK, "10G-base-T LASI status 0x%x->0x%x\n", - val2, val1); - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2); - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1); - DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n", - val2, val1); - link_up = ((val1 & 4) == 4); - /* if link is up print the AN outcome of the SFX7101 PHY */ - if (link_up) { - bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS, - &val2); - vars->line_speed = SPEED_10000; - vars->duplex = DUPLEX_FULL; - DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n", - val2, (val2 & (1<<14))); - bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); - bnx2x_ext_phy_resolve_fc(phy, params, vars); - } - return link_up; -} - -static int bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len) -{ - if (*len < 5) - return -EINVAL; - str[0] = (spirom_ver & 0xFF); - str[1] = (spirom_ver & 0xFF00) >> 8; - str[2] = (spirom_ver & 0xFF0000) >> 16; - str[3] = (spirom_ver & 0xFF000000) >> 24; - str[4] = '\0'; - *len -= 5; - return 0; -} - -void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy) -{ - u16 val, cnt; - - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_7101_RESET, &val); - - for (cnt = 0; cnt < 10; cnt++) { - msleep(50); - /* Writes a self-clearing reset */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_7101_RESET, - (val | (1<<15))); - /* Wait for clear */ - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_7101_RESET, &val); - - if ((val & (1<<15)) == 0) - break; - } -} - -static void bnx2x_7101_hw_reset(struct bnx2x_phy *phy, - struct link_params *params) { - /* Low power mode is controlled by GPIO 2 */ - bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port); - /* The PHY reset is controlled by GPIO 1 */ - bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port); -} - -static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy, - struct link_params *params, u8 mode) -{ - u16 val = 0; - struct bnx2x *bp = params->bp; - switch (mode) { - case LED_MODE_FRONT_PANEL_OFF: - case LED_MODE_OFF: - val = 2; - break; - case LED_MODE_ON: - val = 1; - break; - case LED_MODE_OPER: - val = 0; - break; - } - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_7107_LINK_LED_CNTL, - val); -} - -/******************************************************************/ -/* STATIC PHY DECLARATION */ -/******************************************************************/ - -static struct bnx2x_phy phy_null = { - .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN, - .addr = 0, - .def_md_devad = 0, - .flags = FLAGS_INIT_XGXS_FIRST, - .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, - .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, - .mdio_ctrl = 0, - .supported = 0, - .media_type = ETH_PHY_NOT_PRESENT, - .ver_addr = 0, - .req_flow_ctrl = 0, - .req_line_speed = 0, - .speed_cap_mask = 0, - .req_duplex = 0, - .rsrv = 0, - .config_init = (config_init_t)NULL, - .read_status = (read_status_t)NULL, - .link_reset = (link_reset_t)NULL, - .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)NULL, - .hw_reset = (hw_reset_t)NULL, - .set_link_led = (set_link_led_t)NULL, - .phy_specific_func = (phy_specific_func_t)NULL -}; - -static struct bnx2x_phy phy_serdes = { - .type = PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT, - .addr = 0xff, - .def_md_devad = 0, - .flags = 0, - .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, - .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, - .mdio_ctrl = 0, - .supported = (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_2500baseX_Full | - SUPPORTED_TP | - SUPPORTED_Autoneg | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause), - .media_type = ETH_PHY_BASE_T, - .ver_addr = 0, - .req_flow_ctrl = 0, - .req_line_speed = 0, - .speed_cap_mask = 0, - .req_duplex = 0, - .rsrv = 0, - .config_init = (config_init_t)bnx2x_xgxs_config_init, - .read_status = (read_status_t)bnx2x_link_settings_status, - .link_reset = (link_reset_t)bnx2x_int_link_reset, - .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)NULL, - .hw_reset = (hw_reset_t)NULL, - .set_link_led = (set_link_led_t)NULL, - .phy_specific_func = (phy_specific_func_t)NULL -}; - -static struct bnx2x_phy phy_xgxs = { - .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, - .addr = 0xff, - .def_md_devad = 0, - .flags = 0, - .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, - .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, - .mdio_ctrl = 0, - .supported = (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_2500baseX_Full | - SUPPORTED_10000baseT_Full | - SUPPORTED_FIBRE | - SUPPORTED_Autoneg | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause), - .media_type = ETH_PHY_CX4, - .ver_addr = 0, - .req_flow_ctrl = 0, - .req_line_speed = 0, - .speed_cap_mask = 0, - .req_duplex = 0, - .rsrv = 0, - .config_init = (config_init_t)bnx2x_xgxs_config_init, - .read_status = (read_status_t)bnx2x_link_settings_status, - .link_reset = (link_reset_t)bnx2x_int_link_reset, - .config_loopback = (config_loopback_t)bnx2x_set_xgxs_loopback, - .format_fw_ver = (format_fw_ver_t)NULL, - .hw_reset = (hw_reset_t)NULL, - .set_link_led = (set_link_led_t)NULL, - .phy_specific_func = (phy_specific_func_t)NULL -}; -static struct bnx2x_phy phy_warpcore = { - .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, - .addr = 0xff, - .def_md_devad = 0, - .flags = (FLAGS_HW_LOCK_REQUIRED | - FLAGS_TX_ERROR_CHECK), - .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, - .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, - .mdio_ctrl = 0, - .supported = (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_10000baseT_Full | - SUPPORTED_20000baseKR2_Full | - SUPPORTED_20000baseMLD2_Full | - SUPPORTED_FIBRE | - SUPPORTED_Autoneg | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause), - .media_type = ETH_PHY_UNSPECIFIED, - .ver_addr = 0, - .req_flow_ctrl = 0, - .req_line_speed = 0, - .speed_cap_mask = 0, - /* req_duplex = */0, - /* rsrv = */0, - .config_init = (config_init_t)bnx2x_warpcore_config_init, - .read_status = (read_status_t)bnx2x_warpcore_read_status, - .link_reset = (link_reset_t)bnx2x_warpcore_link_reset, - .config_loopback = (config_loopback_t)bnx2x_set_warpcore_loopback, - .format_fw_ver = (format_fw_ver_t)NULL, - .hw_reset = (hw_reset_t)bnx2x_warpcore_hw_reset, - .set_link_led = (set_link_led_t)NULL, - .phy_specific_func = (phy_specific_func_t)NULL -}; - - -static struct bnx2x_phy phy_7101 = { - .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101, - .addr = 0xff, - .def_md_devad = 0, - .flags = FLAGS_FAN_FAILURE_DET_REQ, - .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, - .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, - .mdio_ctrl = 0, - .supported = (SUPPORTED_10000baseT_Full | - SUPPORTED_TP | - SUPPORTED_Autoneg | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause), - .media_type = ETH_PHY_BASE_T, - .ver_addr = 0, - .req_flow_ctrl = 0, - .req_line_speed = 0, - .speed_cap_mask = 0, - .req_duplex = 0, - .rsrv = 0, - .config_init = (config_init_t)bnx2x_7101_config_init, - .read_status = (read_status_t)bnx2x_7101_read_status, - .link_reset = (link_reset_t)bnx2x_common_ext_link_reset, - .config_loopback = (config_loopback_t)bnx2x_7101_config_loopback, - .format_fw_ver = (format_fw_ver_t)bnx2x_7101_format_ver, - .hw_reset = (hw_reset_t)bnx2x_7101_hw_reset, - .set_link_led = (set_link_led_t)bnx2x_7101_set_link_led, - .phy_specific_func = (phy_specific_func_t)NULL -}; -static struct bnx2x_phy phy_8073 = { - .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, - .addr = 0xff, - .def_md_devad = 0, - .flags = FLAGS_HW_LOCK_REQUIRED, - .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, - .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, - .mdio_ctrl = 0, - .supported = (SUPPORTED_10000baseT_Full | - SUPPORTED_2500baseX_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_FIBRE | - SUPPORTED_Autoneg | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause), - .media_type = ETH_PHY_KR, - .ver_addr = 0, - .req_flow_ctrl = 0, - .req_line_speed = 0, - .speed_cap_mask = 0, - .req_duplex = 0, - .rsrv = 0, - .config_init = (config_init_t)bnx2x_8073_config_init, - .read_status = (read_status_t)bnx2x_8073_read_status, - .link_reset = (link_reset_t)bnx2x_8073_link_reset, - .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver, - .hw_reset = (hw_reset_t)NULL, - .set_link_led = (set_link_led_t)NULL, - .phy_specific_func = (phy_specific_func_t)NULL -}; -static struct bnx2x_phy phy_8705 = { - .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705, - .addr = 0xff, - .def_md_devad = 0, - .flags = FLAGS_INIT_XGXS_FIRST, - .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, - .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, - .mdio_ctrl = 0, - .supported = (SUPPORTED_10000baseT_Full | - SUPPORTED_FIBRE | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause), - .media_type = ETH_PHY_XFP_FIBER, - .ver_addr = 0, - .req_flow_ctrl = 0, - .req_line_speed = 0, - .speed_cap_mask = 0, - .req_duplex = 0, - .rsrv = 0, - .config_init = (config_init_t)bnx2x_8705_config_init, - .read_status = (read_status_t)bnx2x_8705_read_status, - .link_reset = (link_reset_t)bnx2x_common_ext_link_reset, - .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)bnx2x_null_format_ver, - .hw_reset = (hw_reset_t)NULL, - .set_link_led = (set_link_led_t)NULL, - .phy_specific_func = (phy_specific_func_t)NULL -}; -static struct bnx2x_phy phy_8706 = { - .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706, - .addr = 0xff, - .def_md_devad = 0, - .flags = (FLAGS_INIT_XGXS_FIRST | - FLAGS_TX_ERROR_CHECK), - .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, - .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, - .mdio_ctrl = 0, - .supported = (SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_FIBRE | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause), - .media_type = ETH_PHY_SFP_FIBER, - .ver_addr = 0, - .req_flow_ctrl = 0, - .req_line_speed = 0, - .speed_cap_mask = 0, - .req_duplex = 0, - .rsrv = 0, - .config_init = (config_init_t)bnx2x_8706_config_init, - .read_status = (read_status_t)bnx2x_8706_read_status, - .link_reset = (link_reset_t)bnx2x_common_ext_link_reset, - .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver, - .hw_reset = (hw_reset_t)NULL, - .set_link_led = (set_link_led_t)NULL, - .phy_specific_func = (phy_specific_func_t)NULL -}; - -static struct bnx2x_phy phy_8726 = { - .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726, - .addr = 0xff, - .def_md_devad = 0, - .flags = (FLAGS_HW_LOCK_REQUIRED | - FLAGS_INIT_XGXS_FIRST | - FLAGS_TX_ERROR_CHECK), - .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, - .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, - .mdio_ctrl = 0, - .supported = (SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_Autoneg | - SUPPORTED_FIBRE | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause), - .media_type = ETH_PHY_NOT_PRESENT, - .ver_addr = 0, - .req_flow_ctrl = 0, - .req_line_speed = 0, - .speed_cap_mask = 0, - .req_duplex = 0, - .rsrv = 0, - .config_init = (config_init_t)bnx2x_8726_config_init, - .read_status = (read_status_t)bnx2x_8726_read_status, - .link_reset = (link_reset_t)bnx2x_8726_link_reset, - .config_loopback = (config_loopback_t)bnx2x_8726_config_loopback, - .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver, - .hw_reset = (hw_reset_t)NULL, - .set_link_led = (set_link_led_t)NULL, - .phy_specific_func = (phy_specific_func_t)NULL -}; - -static struct bnx2x_phy phy_8727 = { - .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, - .addr = 0xff, - .def_md_devad = 0, - .flags = (FLAGS_FAN_FAILURE_DET_REQ | - FLAGS_TX_ERROR_CHECK), - .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, - .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, - .mdio_ctrl = 0, - .supported = (SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_FIBRE | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause), - .media_type = ETH_PHY_NOT_PRESENT, - .ver_addr = 0, - .req_flow_ctrl = 0, - .req_line_speed = 0, - .speed_cap_mask = 0, - .req_duplex = 0, - .rsrv = 0, - .config_init = (config_init_t)bnx2x_8727_config_init, - .read_status = (read_status_t)bnx2x_8727_read_status, - .link_reset = (link_reset_t)bnx2x_8727_link_reset, - .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver, - .hw_reset = (hw_reset_t)bnx2x_8727_hw_reset, - .set_link_led = (set_link_led_t)bnx2x_8727_set_link_led, - .phy_specific_func = (phy_specific_func_t)bnx2x_8727_specific_func -}; -static struct bnx2x_phy phy_8481 = { - .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, - .addr = 0xff, - .def_md_devad = 0, - .flags = FLAGS_FAN_FAILURE_DET_REQ | - FLAGS_REARM_LATCH_SIGNAL, - .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, - .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, - .mdio_ctrl = 0, - .supported = (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_10000baseT_Full | - SUPPORTED_TP | - SUPPORTED_Autoneg | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause), - .media_type = ETH_PHY_BASE_T, - .ver_addr = 0, - .req_flow_ctrl = 0, - .req_line_speed = 0, - .speed_cap_mask = 0, - .req_duplex = 0, - .rsrv = 0, - .config_init = (config_init_t)bnx2x_8481_config_init, - .read_status = (read_status_t)bnx2x_848xx_read_status, - .link_reset = (link_reset_t)bnx2x_8481_link_reset, - .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, - .hw_reset = (hw_reset_t)bnx2x_8481_hw_reset, - .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, - .phy_specific_func = (phy_specific_func_t)NULL -}; - -static struct bnx2x_phy phy_84823 = { - .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823, - .addr = 0xff, - .def_md_devad = 0, - .flags = FLAGS_FAN_FAILURE_DET_REQ | - FLAGS_REARM_LATCH_SIGNAL, - .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, - .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, - .mdio_ctrl = 0, - .supported = (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_10000baseT_Full | - SUPPORTED_TP | - SUPPORTED_Autoneg | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause), - .media_type = ETH_PHY_BASE_T, - .ver_addr = 0, - .req_flow_ctrl = 0, - .req_line_speed = 0, - .speed_cap_mask = 0, - .req_duplex = 0, - .rsrv = 0, - .config_init = (config_init_t)bnx2x_848x3_config_init, - .read_status = (read_status_t)bnx2x_848xx_read_status, - .link_reset = (link_reset_t)bnx2x_848x3_link_reset, - .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, - .hw_reset = (hw_reset_t)NULL, - .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, - .phy_specific_func = (phy_specific_func_t)NULL -}; - -static struct bnx2x_phy phy_84833 = { - .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833, - .addr = 0xff, - .def_md_devad = 0, - .flags = FLAGS_FAN_FAILURE_DET_REQ | - FLAGS_REARM_LATCH_SIGNAL, - .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, - .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, - .mdio_ctrl = 0, - .supported = (SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_10000baseT_Full | - SUPPORTED_TP | - SUPPORTED_Autoneg | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause), - .media_type = ETH_PHY_BASE_T, - .ver_addr = 0, - .req_flow_ctrl = 0, - .req_line_speed = 0, - .speed_cap_mask = 0, - .req_duplex = 0, - .rsrv = 0, - .config_init = (config_init_t)bnx2x_848x3_config_init, - .read_status = (read_status_t)bnx2x_848xx_read_status, - .link_reset = (link_reset_t)bnx2x_848x3_link_reset, - .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, - .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy, - .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, - .phy_specific_func = (phy_specific_func_t)NULL -}; - -static struct bnx2x_phy phy_54618se = { - .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE, - .addr = 0xff, - .def_md_devad = 0, - .flags = FLAGS_INIT_XGXS_FIRST, - .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, - .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, - .mdio_ctrl = 0, - .supported = (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_TP | - SUPPORTED_Autoneg | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause), - .media_type = ETH_PHY_BASE_T, - .ver_addr = 0, - .req_flow_ctrl = 0, - .req_line_speed = 0, - .speed_cap_mask = 0, - /* req_duplex = */0, - /* rsrv = */0, - .config_init = (config_init_t)bnx2x_54618se_config_init, - .read_status = (read_status_t)bnx2x_54618se_read_status, - .link_reset = (link_reset_t)bnx2x_54618se_link_reset, - .config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback, - .format_fw_ver = (format_fw_ver_t)NULL, - .hw_reset = (hw_reset_t)NULL, - .set_link_led = (set_link_led_t)bnx2x_54618se_set_link_led, - .phy_specific_func = (phy_specific_func_t)NULL -}; -/*****************************************************************/ -/* */ -/* Populate the phy according. Main function: bnx2x_populate_phy */ -/* */ -/*****************************************************************/ - -static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base, - struct bnx2x_phy *phy, u8 port, - u8 phy_index) -{ - /* Get the 4 lanes xgxs config rx and tx */ - u32 rx = 0, tx = 0, i; - for (i = 0; i < 2; i++) { - /* - * INT_PHY and EXT_PHY1 share the same value location in the - * shmem. When num_phys is greater than 1, than this value - * applies only to EXT_PHY1 - */ - if (phy_index == INT_PHY || phy_index == EXT_PHY1) { - rx = REG_RD(bp, shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[port].xgxs_config_rx[i<<1])); - - tx = REG_RD(bp, shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[port].xgxs_config_tx[i<<1])); - } else { - rx = REG_RD(bp, shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[port].xgxs_config2_rx[i<<1])); - - tx = REG_RD(bp, shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[port].xgxs_config2_rx[i<<1])); - } - - phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff); - phy->rx_preemphasis[(i << 1) + 1] = (rx & 0xffff); - - phy->tx_preemphasis[i << 1] = ((tx>>16) & 0xffff); - phy->tx_preemphasis[(i << 1) + 1] = (tx & 0xffff); - } -} - -static u32 bnx2x_get_ext_phy_config(struct bnx2x *bp, u32 shmem_base, - u8 phy_index, u8 port) -{ - u32 ext_phy_config = 0; - switch (phy_index) { - case EXT_PHY1: - ext_phy_config = REG_RD(bp, shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[port].external_phy_config)); - break; - case EXT_PHY2: - ext_phy_config = REG_RD(bp, shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[port].external_phy_config2)); - break; - default: - DP(NETIF_MSG_LINK, "Invalid phy_index %d\n", phy_index); - return -EINVAL; - } - - return ext_phy_config; -} -static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port, - struct bnx2x_phy *phy) -{ - u32 phy_addr; - u32 chip_id; - u32 switch_cfg = (REG_RD(bp, shmem_base + - offsetof(struct shmem_region, - dev_info.port_feature_config[port].link_config)) & - PORT_FEATURE_CONNECTED_SWITCH_MASK); - chip_id = REG_RD(bp, MISC_REG_CHIP_NUM) << 16; - DP(NETIF_MSG_LINK, ":chip_id = 0x%x\n", chip_id); - if (USES_WARPCORE(bp)) { - u32 serdes_net_if; - phy_addr = REG_RD(bp, - MISC_REG_WC0_CTRL_PHY_ADDR); - *phy = phy_warpcore; - if (REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR) == 0x3) - phy->flags |= FLAGS_4_PORT_MODE; - else - phy->flags &= ~FLAGS_4_PORT_MODE; - /* Check Dual mode */ - serdes_net_if = (REG_RD(bp, shmem_base + - offsetof(struct shmem_region, dev_info. - port_hw_config[port].default_cfg)) & - PORT_HW_CFG_NET_SERDES_IF_MASK); - /* - * Set the appropriate supported and flags indications per - * interface type of the chip - */ - switch (serdes_net_if) { - case PORT_HW_CFG_NET_SERDES_IF_SGMII: - phy->supported &= (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_FIBRE | - SUPPORTED_Autoneg | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause); - phy->media_type = ETH_PHY_BASE_T; - break; - case PORT_HW_CFG_NET_SERDES_IF_XFI: - phy->media_type = ETH_PHY_XFP_FIBER; - break; - case PORT_HW_CFG_NET_SERDES_IF_SFI: - phy->supported &= (SUPPORTED_1000baseT_Full | - SUPPORTED_10000baseT_Full | - SUPPORTED_FIBRE | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause); - phy->media_type = ETH_PHY_SFP_FIBER; - break; - case PORT_HW_CFG_NET_SERDES_IF_KR: - phy->media_type = ETH_PHY_KR; - phy->supported &= (SUPPORTED_1000baseT_Full | - SUPPORTED_10000baseT_Full | - SUPPORTED_FIBRE | - SUPPORTED_Autoneg | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause); - break; - case PORT_HW_CFG_NET_SERDES_IF_DXGXS: - phy->media_type = ETH_PHY_KR; - phy->flags |= FLAGS_WC_DUAL_MODE; - phy->supported &= (SUPPORTED_20000baseMLD2_Full | - SUPPORTED_FIBRE | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause); - break; - case PORT_HW_CFG_NET_SERDES_IF_KR2: - phy->media_type = ETH_PHY_KR; - phy->flags |= FLAGS_WC_DUAL_MODE; - phy->supported &= (SUPPORTED_20000baseKR2_Full | - SUPPORTED_FIBRE | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause); - break; - default: - DP(NETIF_MSG_LINK, "Unknown WC interface type 0x%x\n", - serdes_net_if); - break; - } - - /* - * Enable MDC/MDIO work-around for E3 A0 since free running MDC - * was not set as expected. For B0, ECO will be enabled so there - * won't be an issue there - */ - if (CHIP_REV(bp) == CHIP_REV_Ax) - phy->flags |= FLAGS_MDC_MDIO_WA; - else - phy->flags |= FLAGS_MDC_MDIO_WA_B0; - } else { - switch (switch_cfg) { - case SWITCH_CFG_1G: - phy_addr = REG_RD(bp, - NIG_REG_SERDES0_CTRL_PHY_ADDR + - port * 0x10); - *phy = phy_serdes; - break; - case SWITCH_CFG_10G: - phy_addr = REG_RD(bp, - NIG_REG_XGXS0_CTRL_PHY_ADDR + - port * 0x18); - *phy = phy_xgxs; - break; - default: - DP(NETIF_MSG_LINK, "Invalid switch_cfg\n"); - return -EINVAL; - } - } - phy->addr = (u8)phy_addr; - phy->mdio_ctrl = bnx2x_get_emac_base(bp, - SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH, - port); - if (CHIP_IS_E2(bp)) - phy->def_md_devad = E2_DEFAULT_PHY_DEV_ADDR; - else - phy->def_md_devad = DEFAULT_PHY_DEV_ADDR; - - DP(NETIF_MSG_LINK, "Internal phy port=%d, addr=0x%x, mdio_ctl=0x%x\n", - port, phy->addr, phy->mdio_ctrl); - - bnx2x_populate_preemphasis(bp, shmem_base, phy, port, INT_PHY); - return 0; -} - -static int bnx2x_populate_ext_phy(struct bnx2x *bp, - u8 phy_index, - u32 shmem_base, - u32 shmem2_base, - u8 port, - struct bnx2x_phy *phy) -{ - u32 ext_phy_config, phy_type, config2; - u32 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH; - ext_phy_config = bnx2x_get_ext_phy_config(bp, shmem_base, - phy_index, port); - phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config); - /* Select the phy type */ - switch (phy_type) { - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: - mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED; - *phy = phy_8073; - break; - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: - *phy = phy_8705; - break; - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: - *phy = phy_8706; - break; - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: - mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1; - *phy = phy_8726; - break; - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC: - /* BCM8727_NOC => BCM8727 no over current */ - mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1; - *phy = phy_8727; - phy->flags |= FLAGS_NOC; - break; - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: - mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1; - *phy = phy_8727; - break; - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481: - *phy = phy_8481; - break; - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823: - *phy = phy_84823; - break; - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833: - *phy = phy_84833; - break; - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE: - *phy = phy_54618se; - break; - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: - *phy = phy_7101; - break; - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: - *phy = phy_null; - return -EINVAL; - default: - *phy = phy_null; - return 0; - } - - phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config); - bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index); - - /* - * The shmem address of the phy version is located on different - * structures. In case this structure is too old, do not set - * the address - */ - config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region, - dev_info.shared_hw_config.config2)); - if (phy_index == EXT_PHY1) { - phy->ver_addr = shmem_base + offsetof(struct shmem_region, - port_mb[port].ext_phy_fw_version); - - /* Check specific mdc mdio settings */ - if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK) - mdc_mdio_access = config2 & - SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK; - } else { - u32 size = REG_RD(bp, shmem2_base); - - if (size > - offsetof(struct shmem2_region, ext_phy_fw_version2)) { - phy->ver_addr = shmem2_base + - offsetof(struct shmem2_region, - ext_phy_fw_version2[port]); - } - /* Check specific mdc mdio settings */ - if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK) - mdc_mdio_access = (config2 & - SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK) >> - (SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT - - SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT); - } - phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port); - - /* - * In case mdc/mdio_access of the external phy is different than the - * mdc/mdio access of the XGXS, a HW lock must be taken in each access - * to prevent one port interfere with another port's CL45 operations. - */ - if (mdc_mdio_access != SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH) - phy->flags |= FLAGS_HW_LOCK_REQUIRED; - DP(NETIF_MSG_LINK, "phy_type 0x%x port %d found in index %d\n", - phy_type, port, phy_index); - DP(NETIF_MSG_LINK, " addr=0x%x, mdio_ctl=0x%x\n", - phy->addr, phy->mdio_ctrl); - return 0; -} - -static int bnx2x_populate_phy(struct bnx2x *bp, u8 phy_index, u32 shmem_base, - u32 shmem2_base, u8 port, struct bnx2x_phy *phy) -{ - int status = 0; - phy->type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN; - if (phy_index == INT_PHY) - return bnx2x_populate_int_phy(bp, shmem_base, port, phy); - status = bnx2x_populate_ext_phy(bp, phy_index, shmem_base, shmem2_base, - port, phy); - return status; -} - -static void bnx2x_phy_def_cfg(struct link_params *params, - struct bnx2x_phy *phy, - u8 phy_index) -{ - struct bnx2x *bp = params->bp; - u32 link_config; - /* Populate the default phy configuration for MF mode */ - if (phy_index == EXT_PHY2) { - link_config = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, dev_info. - port_feature_config[params->port].link_config2)); - phy->speed_cap_mask = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - dev_info. - port_hw_config[params->port].speed_capability_mask2)); - } else { - link_config = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, dev_info. - port_feature_config[params->port].link_config)); - phy->speed_cap_mask = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - dev_info. - port_hw_config[params->port].speed_capability_mask)); - } - DP(NETIF_MSG_LINK, "Default config phy idx %x cfg 0x%x speed_cap_mask" - " 0x%x\n", phy_index, link_config, phy->speed_cap_mask); - - phy->req_duplex = DUPLEX_FULL; - switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { - case PORT_FEATURE_LINK_SPEED_10M_HALF: - phy->req_duplex = DUPLEX_HALF; - case PORT_FEATURE_LINK_SPEED_10M_FULL: - phy->req_line_speed = SPEED_10; - break; - case PORT_FEATURE_LINK_SPEED_100M_HALF: - phy->req_duplex = DUPLEX_HALF; - case PORT_FEATURE_LINK_SPEED_100M_FULL: - phy->req_line_speed = SPEED_100; - break; - case PORT_FEATURE_LINK_SPEED_1G: - phy->req_line_speed = SPEED_1000; - break; - case PORT_FEATURE_LINK_SPEED_2_5G: - phy->req_line_speed = SPEED_2500; - break; - case PORT_FEATURE_LINK_SPEED_10G_CX4: - phy->req_line_speed = SPEED_10000; - break; - default: - phy->req_line_speed = SPEED_AUTO_NEG; - break; - } - - switch (link_config & PORT_FEATURE_FLOW_CONTROL_MASK) { - case PORT_FEATURE_FLOW_CONTROL_AUTO: - phy->req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO; - break; - case PORT_FEATURE_FLOW_CONTROL_TX: - phy->req_flow_ctrl = BNX2X_FLOW_CTRL_TX; - break; - case PORT_FEATURE_FLOW_CONTROL_RX: - phy->req_flow_ctrl = BNX2X_FLOW_CTRL_RX; - break; - case PORT_FEATURE_FLOW_CONTROL_BOTH: - phy->req_flow_ctrl = BNX2X_FLOW_CTRL_BOTH; - break; - default: - phy->req_flow_ctrl = BNX2X_FLOW_CTRL_NONE; - break; - } -} - -u32 bnx2x_phy_selection(struct link_params *params) -{ - u32 phy_config_swapped, prio_cfg; - u32 return_cfg = PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT; - - phy_config_swapped = params->multi_phy_config & - PORT_HW_CFG_PHY_SWAPPED_ENABLED; - - prio_cfg = params->multi_phy_config & - PORT_HW_CFG_PHY_SELECTION_MASK; - - if (phy_config_swapped) { - switch (prio_cfg) { - case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: - return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY; - break; - case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: - return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY; - break; - case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: - return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY; - break; - case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: - return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY; - break; - } - } else - return_cfg = prio_cfg; - - return return_cfg; -} - - -int bnx2x_phy_probe(struct link_params *params) -{ - u8 phy_index, actual_phy_idx, link_cfg_idx; - u32 phy_config_swapped, sync_offset, media_types; - struct bnx2x *bp = params->bp; - struct bnx2x_phy *phy; - params->num_phys = 0; - DP(NETIF_MSG_LINK, "Begin phy probe\n"); - phy_config_swapped = params->multi_phy_config & - PORT_HW_CFG_PHY_SWAPPED_ENABLED; - - for (phy_index = INT_PHY; phy_index < MAX_PHYS; - phy_index++) { - link_cfg_idx = LINK_CONFIG_IDX(phy_index); - actual_phy_idx = phy_index; - if (phy_config_swapped) { - if (phy_index == EXT_PHY1) - actual_phy_idx = EXT_PHY2; - else if (phy_index == EXT_PHY2) - actual_phy_idx = EXT_PHY1; - } - DP(NETIF_MSG_LINK, "phy_config_swapped %x, phy_index %x," - " actual_phy_idx %x\n", phy_config_swapped, - phy_index, actual_phy_idx); - phy = ¶ms->phy[actual_phy_idx]; - if (bnx2x_populate_phy(bp, phy_index, params->shmem_base, - params->shmem2_base, params->port, - phy) != 0) { - params->num_phys = 0; - DP(NETIF_MSG_LINK, "phy probe failed in phy index %d\n", - phy_index); - for (phy_index = INT_PHY; - phy_index < MAX_PHYS; - phy_index++) - *phy = phy_null; - return -EINVAL; - } - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) - break; - - sync_offset = params->shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[params->port].media_type); - media_types = REG_RD(bp, sync_offset); - - /* - * Update media type for non-PMF sync only for the first time - * In case the media type changes afterwards, it will be updated - * using the update_status function - */ - if ((media_types & (PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK << - (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * - actual_phy_idx))) == 0) { - media_types |= ((phy->media_type & - PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) << - (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * - actual_phy_idx)); - } - REG_WR(bp, sync_offset, media_types); - - bnx2x_phy_def_cfg(params, phy, phy_index); - params->num_phys++; - } - - DP(NETIF_MSG_LINK, "End phy probe. #phys found %x\n", params->num_phys); - return 0; -} - -void bnx2x_init_bmac_loopback(struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - vars->link_up = 1; - vars->line_speed = SPEED_10000; - vars->duplex = DUPLEX_FULL; - vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; - vars->mac_type = MAC_TYPE_BMAC; - - vars->phy_flags = PHY_XGXS_FLAG; - - bnx2x_xgxs_deassert(params); - - /* set bmac loopback */ - bnx2x_bmac_enable(params, vars, 1); - - REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); -} - -void bnx2x_init_emac_loopback(struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - vars->link_up = 1; - vars->line_speed = SPEED_1000; - vars->duplex = DUPLEX_FULL; - vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; - vars->mac_type = MAC_TYPE_EMAC; - - vars->phy_flags = PHY_XGXS_FLAG; - - bnx2x_xgxs_deassert(params); - /* set bmac loopback */ - bnx2x_emac_enable(params, vars, 1); - bnx2x_emac_program(params, vars); - REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); -} - -void bnx2x_init_xmac_loopback(struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - vars->link_up = 1; - if (!params->req_line_speed[0]) - vars->line_speed = SPEED_10000; - else - vars->line_speed = params->req_line_speed[0]; - vars->duplex = DUPLEX_FULL; - vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; - vars->mac_type = MAC_TYPE_XMAC; - vars->phy_flags = PHY_XGXS_FLAG; - /* - * Set WC to loopback mode since link is required to provide clock - * to the XMAC in 20G mode - */ - bnx2x_set_aer_mmd(params, ¶ms->phy[0]); - bnx2x_warpcore_reset_lane(bp, ¶ms->phy[0], 0); - params->phy[INT_PHY].config_loopback( - ¶ms->phy[INT_PHY], - params); - - bnx2x_xmac_enable(params, vars, 1); - REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); -} - -void bnx2x_init_umac_loopback(struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - vars->link_up = 1; - vars->line_speed = SPEED_1000; - vars->duplex = DUPLEX_FULL; - vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; - vars->mac_type = MAC_TYPE_UMAC; - vars->phy_flags = PHY_XGXS_FLAG; - bnx2x_umac_enable(params, vars, 1); - - REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); -} - -void bnx2x_init_xgxs_loopback(struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - vars->link_up = 1; - vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; - vars->duplex = DUPLEX_FULL; - if (params->req_line_speed[0] == SPEED_1000) - vars->line_speed = SPEED_1000; - else - vars->line_speed = SPEED_10000; - - if (!USES_WARPCORE(bp)) - bnx2x_xgxs_deassert(params); - bnx2x_link_initialize(params, vars); - - if (params->req_line_speed[0] == SPEED_1000) { - if (USES_WARPCORE(bp)) - bnx2x_umac_enable(params, vars, 0); - else { - bnx2x_emac_program(params, vars); - bnx2x_emac_enable(params, vars, 0); - } - } else { - if (USES_WARPCORE(bp)) - bnx2x_xmac_enable(params, vars, 0); - else - bnx2x_bmac_enable(params, vars, 0); - } - - if (params->loopback_mode == LOOPBACK_XGXS) { - /* set 10G XGXS loopback */ - params->phy[INT_PHY].config_loopback( - ¶ms->phy[INT_PHY], - params); - - } else { - /* set external phy loopback */ - u8 phy_index; - for (phy_index = EXT_PHY1; - phy_index < params->num_phys; phy_index++) { - if (params->phy[phy_index].config_loopback) - params->phy[phy_index].config_loopback( - ¶ms->phy[phy_index], - params); - } - } - REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); - - bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed); -} - -int bnx2x_phy_init(struct link_params *params, struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - DP(NETIF_MSG_LINK, "Phy Initialization started\n"); - DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n", - params->req_line_speed[0], params->req_flow_ctrl[0]); - DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n", - params->req_line_speed[1], params->req_flow_ctrl[1]); - vars->link_status = 0; - vars->phy_link_up = 0; - vars->link_up = 0; - vars->line_speed = 0; - vars->duplex = DUPLEX_FULL; - vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; - vars->mac_type = MAC_TYPE_NONE; - vars->phy_flags = 0; - - /* disable attentions */ - bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, - (NIG_MASK_XGXS0_LINK_STATUS | - NIG_MASK_XGXS0_LINK10G | - NIG_MASK_SERDES0_LINK_STATUS | - NIG_MASK_MI_INT)); - - bnx2x_emac_init(params, vars); - - if (params->num_phys == 0) { - DP(NETIF_MSG_LINK, "No phy found for initialization !!\n"); - return -EINVAL; - } - set_phy_vars(params, vars); - - DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys); - switch (params->loopback_mode) { - case LOOPBACK_BMAC: - bnx2x_init_bmac_loopback(params, vars); - break; - case LOOPBACK_EMAC: - bnx2x_init_emac_loopback(params, vars); - break; - case LOOPBACK_XMAC: - bnx2x_init_xmac_loopback(params, vars); - break; - case LOOPBACK_UMAC: - bnx2x_init_umac_loopback(params, vars); - break; - case LOOPBACK_XGXS: - case LOOPBACK_EXT_PHY: - bnx2x_init_xgxs_loopback(params, vars); - break; - default: - if (!CHIP_IS_E3(bp)) { - if (params->switch_cfg == SWITCH_CFG_10G) - bnx2x_xgxs_deassert(params); - else - bnx2x_serdes_deassert(bp, params->port); - } - bnx2x_link_initialize(params, vars); - msleep(30); - bnx2x_link_int_enable(params); - break; - } - return 0; -} - -int bnx2x_link_reset(struct link_params *params, struct link_vars *vars, - u8 reset_ext_phy) -{ - struct bnx2x *bp = params->bp; - u8 phy_index, port = params->port, clear_latch_ind = 0; - DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port); - /* disable attentions */ - vars->link_status = 0; - bnx2x_update_mng(params, vars->link_status); - bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, - (NIG_MASK_XGXS0_LINK_STATUS | - NIG_MASK_XGXS0_LINK10G | - NIG_MASK_SERDES0_LINK_STATUS | - NIG_MASK_MI_INT)); - - /* activate nig drain */ - REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); - - /* disable nig egress interface */ - if (!CHIP_IS_E3(bp)) { - REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0); - REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0); - } - - /* Stop BigMac rx */ - if (!CHIP_IS_E3(bp)) - bnx2x_bmac_rx_disable(bp, port); - else - bnx2x_xmac_disable(params); - /* disable emac */ - if (!CHIP_IS_E3(bp)) - REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); - - msleep(10); - /* The PHY reset is controlled by GPIO 1 - * Hold it as vars low - */ - /* clear link led */ - bnx2x_set_led(params, vars, LED_MODE_OFF, 0); - - if (reset_ext_phy) { - bnx2x_set_mdio_clk(bp, params->chip_id, port); - for (phy_index = EXT_PHY1; phy_index < params->num_phys; - phy_index++) { - if (params->phy[phy_index].link_reset) { - bnx2x_set_aer_mmd(params, - ¶ms->phy[phy_index]); - params->phy[phy_index].link_reset( - ¶ms->phy[phy_index], - params); - } - if (params->phy[phy_index].flags & - FLAGS_REARM_LATCH_SIGNAL) - clear_latch_ind = 1; - } - } - - if (clear_latch_ind) { - /* Clear latching indication */ - bnx2x_rearm_latch_signal(bp, port, 0); - bnx2x_bits_dis(bp, NIG_REG_LATCH_BC_0 + port*4, - 1 << NIG_LATCH_BC_ENABLE_MI_INT); - } - if (params->phy[INT_PHY].link_reset) - params->phy[INT_PHY].link_reset( - ¶ms->phy[INT_PHY], params); - /* reset BigMac */ - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, - (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); - - /* disable nig ingress interface */ - if (!CHIP_IS_E3(bp)) { - REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0); - REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0); - } - vars->link_up = 0; - vars->phy_flags = 0; - return 0; -} - -/****************************************************************************/ -/* Common function */ -/****************************************************************************/ -static int bnx2x_8073_common_init_phy(struct bnx2x *bp, - u32 shmem_base_path[], - u32 shmem2_base_path[], u8 phy_index, - u32 chip_id) -{ - struct bnx2x_phy phy[PORT_MAX]; - struct bnx2x_phy *phy_blk[PORT_MAX]; - u16 val; - s8 port = 0; - s8 port_of_path = 0; - u32 swap_val, swap_override; - swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); - swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); - port ^= (swap_val && swap_override); - bnx2x_ext_phy_hw_reset(bp, port); - /* PART1 - Reset both phys */ - for (port = PORT_MAX - 1; port >= PORT_0; port--) { - u32 shmem_base, shmem2_base; - /* In E2, same phy is using for port0 of the two paths */ - if (CHIP_IS_E1x(bp)) { - shmem_base = shmem_base_path[0]; - shmem2_base = shmem2_base_path[0]; - port_of_path = port; - } else { - shmem_base = shmem_base_path[port]; - shmem2_base = shmem2_base_path[port]; - port_of_path = 0; - } - - /* Extract the ext phy address for the port */ - if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, - port_of_path, &phy[port]) != - 0) { - DP(NETIF_MSG_LINK, "populate_phy failed\n"); - return -EINVAL; - } - /* disable attentions */ - bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + - port_of_path*4, - (NIG_MASK_XGXS0_LINK_STATUS | - NIG_MASK_XGXS0_LINK10G | - NIG_MASK_SERDES0_LINK_STATUS | - NIG_MASK_MI_INT)); - - /* Need to take the phy out of low power mode in order - to write to access its registers */ - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, - port); - - /* Reset the phy */ - bnx2x_cl45_write(bp, &phy[port], - MDIO_PMA_DEVAD, - MDIO_PMA_REG_CTRL, - 1<<15); - } - - /* Add delay of 150ms after reset */ - msleep(150); - - if (phy[PORT_0].addr & 0x1) { - phy_blk[PORT_0] = &(phy[PORT_1]); - phy_blk[PORT_1] = &(phy[PORT_0]); - } else { - phy_blk[PORT_0] = &(phy[PORT_0]); - phy_blk[PORT_1] = &(phy[PORT_1]); - } - - /* PART2 - Download firmware to both phys */ - for (port = PORT_MAX - 1; port >= PORT_0; port--) { - if (CHIP_IS_E1x(bp)) - port_of_path = port; - else - port_of_path = 0; - - DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n", - phy_blk[port]->addr); - if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], - port_of_path)) - return -EINVAL; - - /* Only set bit 10 = 1 (Tx power down) */ - bnx2x_cl45_read(bp, phy_blk[port], - MDIO_PMA_DEVAD, - MDIO_PMA_REG_TX_POWER_DOWN, &val); - - /* Phase1 of TX_POWER_DOWN reset */ - bnx2x_cl45_write(bp, phy_blk[port], - MDIO_PMA_DEVAD, - MDIO_PMA_REG_TX_POWER_DOWN, - (val | 1<<10)); - } - - /* - * Toggle Transmitter: Power down and then up with 600ms delay - * between - */ - msleep(600); - - /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */ - for (port = PORT_MAX - 1; port >= PORT_0; port--) { - /* Phase2 of POWER_DOWN_RESET */ - /* Release bit 10 (Release Tx power down) */ - bnx2x_cl45_read(bp, phy_blk[port], - MDIO_PMA_DEVAD, - MDIO_PMA_REG_TX_POWER_DOWN, &val); - - bnx2x_cl45_write(bp, phy_blk[port], - MDIO_PMA_DEVAD, - MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10)))); - msleep(15); - - /* Read modify write the SPI-ROM version select register */ - bnx2x_cl45_read(bp, phy_blk[port], - MDIO_PMA_DEVAD, - MDIO_PMA_REG_EDC_FFE_MAIN, &val); - bnx2x_cl45_write(bp, phy_blk[port], - MDIO_PMA_DEVAD, - MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12))); - - /* set GPIO2 back to LOW */ - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_LOW, port); - } - return 0; -} -static int bnx2x_8726_common_init_phy(struct bnx2x *bp, - u32 shmem_base_path[], - u32 shmem2_base_path[], u8 phy_index, - u32 chip_id) -{ - u32 val; - s8 port; - struct bnx2x_phy phy; - /* Use port1 because of the static port-swap */ - /* Enable the module detection interrupt */ - val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN); - val |= ((1<= PORT_0; port--) { - u32 shmem_base, shmem2_base; - - /* In E2, same phy is using for port0 of the two paths */ - if (CHIP_IS_E1x(bp)) { - shmem_base = shmem_base_path[0]; - shmem2_base = shmem2_base_path[0]; - port_of_path = port; - } else { - shmem_base = shmem_base_path[port]; - shmem2_base = shmem2_base_path[port]; - port_of_path = 0; - } - - /* Extract the ext phy address for the port */ - if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, - port_of_path, &phy[port]) != - 0) { - DP(NETIF_MSG_LINK, "populate phy failed\n"); - return -EINVAL; - } - /* disable attentions */ - bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + - port_of_path*4, - (NIG_MASK_XGXS0_LINK_STATUS | - NIG_MASK_XGXS0_LINK10G | - NIG_MASK_SERDES0_LINK_STATUS | - NIG_MASK_MI_INT)); - - - /* Reset the phy */ - bnx2x_cl45_write(bp, &phy[port], - MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); - } - - /* Add delay of 150ms after reset */ - msleep(150); - if (phy[PORT_0].addr & 0x1) { - phy_blk[PORT_0] = &(phy[PORT_1]); - phy_blk[PORT_1] = &(phy[PORT_0]); - } else { - phy_blk[PORT_0] = &(phy[PORT_0]); - phy_blk[PORT_1] = &(phy[PORT_1]); - } - /* PART2 - Download firmware to both phys */ - for (port = PORT_MAX - 1; port >= PORT_0; port--) { - if (CHIP_IS_E1x(bp)) - port_of_path = port; - else - port_of_path = 0; - DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n", - phy_blk[port]->addr); - if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], - port_of_path)) - return -EINVAL; - /* Disable PHY transmitter output */ - bnx2x_cl45_write(bp, phy_blk[port], - MDIO_PMA_DEVAD, - MDIO_PMA_REG_TX_DISABLE, 1); - - } - return 0; -} - -static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], - u32 shmem2_base_path[], u8 phy_index, - u32 ext_phy_type, u32 chip_id) -{ - int rc = 0; - - switch (ext_phy_type) { - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: - rc = bnx2x_8073_common_init_phy(bp, shmem_base_path, - shmem2_base_path, - phy_index, chip_id); - break; - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC: - rc = bnx2x_8727_common_init_phy(bp, shmem_base_path, - shmem2_base_path, - phy_index, chip_id); - break; - - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: - /* - * GPIO1 affects both ports, so there's need to pull - * it for single port alone - */ - rc = bnx2x_8726_common_init_phy(bp, shmem_base_path, - shmem2_base_path, - phy_index, chip_id); - break; - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833: - /* - * GPIO3's are linked, and so both need to be toggled - * to obtain required 2us pulse. - */ - rc = bnx2x_84833_common_init_phy(bp, shmem_base_path, chip_id); - break; - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: - rc = -EINVAL; - break; - default: - DP(NETIF_MSG_LINK, - "ext_phy 0x%x common init not required\n", - ext_phy_type); - break; - } - - if (rc != 0) - netdev_err(bp->dev, "Warning: PHY was not initialized," - " Port %d\n", - 0); - return rc; -} - -int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[], - u32 shmem2_base_path[], u32 chip_id) -{ - int rc = 0; - u32 phy_ver, val; - u8 phy_index = 0; - u32 ext_phy_type, ext_phy_config; - bnx2x_set_mdio_clk(bp, chip_id, PORT_0); - bnx2x_set_mdio_clk(bp, chip_id, PORT_1); - DP(NETIF_MSG_LINK, "Begin common phy init\n"); - if (CHIP_IS_E3(bp)) { - /* Enable EPIO */ - val = REG_RD(bp, MISC_REG_GEN_PURP_HWG); - REG_WR(bp, MISC_REG_GEN_PURP_HWG, val | 1); - } - /* Check if common init was already done */ - phy_ver = REG_RD(bp, shmem_base_path[0] + - offsetof(struct shmem_region, - port_mb[PORT_0].ext_phy_fw_version)); - if (phy_ver) { - DP(NETIF_MSG_LINK, "Not doing common init; phy ver is 0x%x\n", - phy_ver); - return 0; - } - - /* Read the ext_phy_type for arbitrary port(0) */ - for (phy_index = EXT_PHY1; phy_index < MAX_PHYS; - phy_index++) { - ext_phy_config = bnx2x_get_ext_phy_config(bp, - shmem_base_path[0], - phy_index, 0); - ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config); - rc |= bnx2x_ext_phy_common_init(bp, shmem_base_path, - shmem2_base_path, - phy_index, ext_phy_type, - chip_id); - } - return rc; -} - -static void bnx2x_check_over_curr(struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - u32 cfg_pin; - u8 port = params->port; - u32 pin_val; - - cfg_pin = (REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[port].e3_cmn_pin_cfg1)) & - PORT_HW_CFG_E3_OVER_CURRENT_MASK) >> - PORT_HW_CFG_E3_OVER_CURRENT_SHIFT; - - /* Ignore check if no external input PIN available */ - if (bnx2x_get_cfg_pin(bp, cfg_pin, &pin_val) != 0) - return; - - if (!pin_val) { - if ((vars->phy_flags & PHY_OVER_CURRENT_FLAG) == 0) { - netdev_err(bp->dev, "Error: Power fault on Port %d has" - " been detected and the power to " - "that SFP+ module has been removed" - " to prevent failure of the card." - " Please remove the SFP+ module and" - " restart the system to clear this" - " error.\n", - params->port); - vars->phy_flags |= PHY_OVER_CURRENT_FLAG; - } - } else - vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG; -} - -static void bnx2x_analyze_link_error(struct link_params *params, - struct link_vars *vars, u32 lss_status) -{ - struct bnx2x *bp = params->bp; - /* Compare new value with previous value */ - u8 led_mode; - u32 half_open_conn = (vars->phy_flags & PHY_HALF_OPEN_CONN_FLAG) > 0; - - if ((lss_status ^ half_open_conn) == 0) - return; - - /* If values differ */ - DP(NETIF_MSG_LINK, "Link changed:%x %x->%x\n", vars->link_up, - half_open_conn, lss_status); - - /* - * a. Update shmem->link_status accordingly - * b. Update link_vars->link_up - */ - if (lss_status) { - DP(NETIF_MSG_LINK, "Remote Fault detected !!!\n"); - vars->link_status &= ~LINK_STATUS_LINK_UP; - vars->link_up = 0; - vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; - /* - * Set LED mode to off since the PHY doesn't know about these - * errors - */ - led_mode = LED_MODE_OFF; - } else { - DP(NETIF_MSG_LINK, "Remote Fault cleared\n"); - vars->link_status |= LINK_STATUS_LINK_UP; - vars->link_up = 1; - vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; - led_mode = LED_MODE_OPER; - } - /* Update the LED according to the link state */ - bnx2x_set_led(params, vars, led_mode, SPEED_10000); - - /* Update link status in the shared memory */ - bnx2x_update_mng(params, vars->link_status); - - /* C. Trigger General Attention */ - vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT; - bnx2x_notify_link_changed(bp); -} - -/****************************************************************************** -* Description: -* This function checks for half opened connection change indication. -* When such change occurs, it calls the bnx2x_analyze_link_error -* to check if Remote Fault is set or cleared. Reception of remote fault -* status message in the MAC indicates that the peer's MAC has detected -* a fault, for example, due to break in the TX side of fiber. -* -******************************************************************************/ -static void bnx2x_check_half_open_conn(struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - u32 lss_status = 0; - u32 mac_base; - /* In case link status is physically up @ 10G do */ - if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) - return; - - if (CHIP_IS_E3(bp) && - (REG_RD(bp, MISC_REG_RESET_REG_2) & - (MISC_REGISTERS_RESET_REG_2_XMAC))) { - /* Check E3 XMAC */ - /* - * Note that link speed cannot be queried here, since it may be - * zero while link is down. In case UMAC is active, LSS will - * simply not be set - */ - mac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; - - /* Clear stick bits (Requires rising edge) */ - REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0); - REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, - XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS | - XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS); - if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS)) - lss_status = 1; - - bnx2x_analyze_link_error(params, vars, lss_status); - } else if (REG_RD(bp, MISC_REG_RESET_REG_2) & - (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) { - /* Check E1X / E2 BMAC */ - u32 lss_status_reg; - u32 wb_data[2]; - mac_base = params->port ? NIG_REG_INGRESS_BMAC1_MEM : - NIG_REG_INGRESS_BMAC0_MEM; - /* Read BIGMAC_REGISTER_RX_LSS_STATUS */ - if (CHIP_IS_E2(bp)) - lss_status_reg = BIGMAC2_REGISTER_RX_LSS_STAT; - else - lss_status_reg = BIGMAC_REGISTER_RX_LSS_STATUS; - - REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2); - lss_status = (wb_data[0] > 0); - - bnx2x_analyze_link_error(params, vars, lss_status); - } -} - -void bnx2x_period_func(struct link_params *params, struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - u16 phy_idx; - if (!params) { - DP(NETIF_MSG_LINK, "Uninitialized params !\n"); - return; - } - - for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) { - if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) { - bnx2x_set_aer_mmd(params, ¶ms->phy[phy_idx]); - bnx2x_check_half_open_conn(params, vars); - break; - } - } - - if (CHIP_IS_E3(bp)) - bnx2x_check_over_curr(params, vars); -} - -u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base) -{ - u8 phy_index; - struct bnx2x_phy phy; - for (phy_index = INT_PHY; phy_index < MAX_PHYS; - phy_index++) { - if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, - 0, &phy) != 0) { - DP(NETIF_MSG_LINK, "populate phy failed\n"); - return 0; - } - - if (phy.flags & FLAGS_HW_LOCK_REQUIRED) - return 1; - } - return 0; -} - -u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, - u32 shmem_base, - u32 shmem2_base, - u8 port) -{ - u8 phy_index, fan_failure_det_req = 0; - struct bnx2x_phy phy; - for (phy_index = EXT_PHY1; phy_index < MAX_PHYS; - phy_index++) { - if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, - port, &phy) - != 0) { - DP(NETIF_MSG_LINK, "populate phy failed\n"); - return 0; - } - fan_failure_det_req |= (phy.flags & - FLAGS_FAN_FAILURE_DET_REQ); - } - return fan_failure_det_req; -} - -void bnx2x_hw_reset_phy(struct link_params *params) -{ - u8 phy_index; - struct bnx2x *bp = params->bp; - bnx2x_update_mng(params, 0); - bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, - (NIG_MASK_XGXS0_LINK_STATUS | - NIG_MASK_XGXS0_LINK10G | - NIG_MASK_SERDES0_LINK_STATUS | - NIG_MASK_MI_INT)); - - for (phy_index = INT_PHY; phy_index < MAX_PHYS; - phy_index++) { - if (params->phy[phy_index].hw_reset) { - params->phy[phy_index].hw_reset( - ¶ms->phy[phy_index], - params); - params->phy[phy_index] = phy_null; - } - } -} - -void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars, - u32 chip_id, u32 shmem_base, u32 shmem2_base, - u8 port) -{ - u8 gpio_num = 0xff, gpio_port = 0xff, phy_index; - u32 val; - u32 offset, aeu_mask, swap_val, swap_override, sync_offset; - if (CHIP_IS_E3(bp)) { - if (bnx2x_get_mod_abs_int_cfg(bp, chip_id, - shmem_base, - port, - &gpio_num, - &gpio_port) != 0) - return; - } else { - struct bnx2x_phy phy; - for (phy_index = EXT_PHY1; phy_index < MAX_PHYS; - phy_index++) { - if (bnx2x_populate_phy(bp, phy_index, shmem_base, - shmem2_base, port, &phy) - != 0) { - DP(NETIF_MSG_LINK, "populate phy failed\n"); - return; - } - if (phy.type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) { - gpio_num = MISC_REGISTERS_GPIO_3; - gpio_port = port; - break; - } - } - } - - if (gpio_num == 0xff) - return; - - /* Set GPIO3 to trigger SFP+ module insertion/removal */ - bnx2x_set_gpio(bp, gpio_num, MISC_REGISTERS_GPIO_INPUT_HI_Z, gpio_port); - - swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); - swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); - gpio_port ^= (swap_val && swap_override); - - vars->aeu_int_mask = AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 << - (gpio_num + (gpio_port << 2)); - - sync_offset = shmem_base + - offsetof(struct shmem_region, - dev_info.port_hw_config[port].aeu_int_mask); - REG_WR(bp, sync_offset, vars->aeu_int_mask); - - DP(NETIF_MSG_LINK, "Setting MOD_ABS (GPIO%d_P%d) AEU to 0x%x\n", - gpio_num, gpio_port, vars->aeu_int_mask); - - if (port == 0) - offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; - else - offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0; - - /* Open appropriate AEU for interrupts */ - aeu_mask = REG_RD(bp, offset); - aeu_mask |= vars->aeu_int_mask; - REG_WR(bp, offset, aeu_mask); - - /* Enable the GPIO to trigger interrupt */ - val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN); - val |= 1 << (gpio_num + (gpio_port << 2)); - REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val); -} diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h deleted file mode 100644 index c12db6d..0000000 --- a/drivers/net/bnx2x/bnx2x_link.h +++ /dev/null @@ -1,493 +0,0 @@ -/* Copyright 2008-2011 Broadcom Corporation - * - * Unless you and Broadcom execute a separate written software license - * agreement governing use of this software, this software is licensed to you - * under the terms of the GNU General Public License version 2, available - * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). - * - * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written - * consent. - * - * Written by Yaniv Rosner - * - */ - -#ifndef BNX2X_LINK_H -#define BNX2X_LINK_H - - - -/***********************************************************/ -/* Defines */ -/***********************************************************/ -#define DEFAULT_PHY_DEV_ADDR 3 -#define E2_DEFAULT_PHY_DEV_ADDR 5 - - - -#define BNX2X_FLOW_CTRL_AUTO PORT_FEATURE_FLOW_CONTROL_AUTO -#define BNX2X_FLOW_CTRL_TX PORT_FEATURE_FLOW_CONTROL_TX -#define BNX2X_FLOW_CTRL_RX PORT_FEATURE_FLOW_CONTROL_RX -#define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH -#define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE - -#define NET_SERDES_IF_XFI 1 -#define NET_SERDES_IF_SFI 2 -#define NET_SERDES_IF_KR 3 -#define NET_SERDES_IF_DXGXS 4 - -#define SPEED_AUTO_NEG 0 -#define SPEED_20000 20000 - -#define SFP_EEPROM_VENDOR_NAME_ADDR 0x14 -#define SFP_EEPROM_VENDOR_NAME_SIZE 16 -#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25 -#define SFP_EEPROM_VENDOR_OUI_SIZE 3 -#define SFP_EEPROM_PART_NO_ADDR 0x28 -#define SFP_EEPROM_PART_NO_SIZE 16 -#define SFP_EEPROM_REVISION_ADDR 0x38 -#define SFP_EEPROM_REVISION_SIZE 4 -#define SFP_EEPROM_SERIAL_ADDR 0x44 -#define SFP_EEPROM_SERIAL_SIZE 16 -#define SFP_EEPROM_DATE_ADDR 0x54 /* ASCII YYMMDD */ -#define SFP_EEPROM_DATE_SIZE 6 -#define PWR_FLT_ERR_MSG_LEN 250 - -#define XGXS_EXT_PHY_TYPE(ext_phy_config) \ - ((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) -#define XGXS_EXT_PHY_ADDR(ext_phy_config) \ - (((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> \ - PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT) -#define SERDES_EXT_PHY_TYPE(ext_phy_config) \ - ((ext_phy_config) & PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK) - -/* Single Media Direct board is the plain 577xx board with CX4/RJ45 jacks */ -#define SINGLE_MEDIA_DIRECT(params) (params->num_phys == 1) -/* Single Media board contains single external phy */ -#define SINGLE_MEDIA(params) (params->num_phys == 2) -/* Dual Media board contains two external phy with different media */ -#define DUAL_MEDIA(params) (params->num_phys == 3) - -#define FW_PARAM_PHY_ADDR_MASK 0x000000FF -#define FW_PARAM_PHY_TYPE_MASK 0x0000FF00 -#define FW_PARAM_MDIO_CTRL_MASK 0xFFFF0000 -#define FW_PARAM_MDIO_CTRL_OFFSET 16 -#define FW_PARAM_PHY_ADDR(fw_param) (fw_param & \ - FW_PARAM_PHY_ADDR_MASK) -#define FW_PARAM_PHY_TYPE(fw_param) (fw_param & \ - FW_PARAM_PHY_TYPE_MASK) -#define FW_PARAM_MDIO_CTRL(fw_param) ((fw_param & \ - FW_PARAM_MDIO_CTRL_MASK) >> \ - FW_PARAM_MDIO_CTRL_OFFSET) -#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \ - (phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET) - - -#define PFC_BRB_FULL_LB_XOFF_THRESHOLD 170 -#define PFC_BRB_FULL_LB_XON_THRESHOLD 250 - -#define MAXVAL(a, b) (((a) > (b)) ? (a) : (b)) -/***********************************************************/ -/* Structs */ -/***********************************************************/ -#define INT_PHY 0 -#define EXT_PHY1 1 -#define EXT_PHY2 2 -#define MAX_PHYS 3 - -/* Same configuration is shared between the XGXS and the first external phy */ -#define LINK_CONFIG_SIZE (MAX_PHYS - 1) -#define LINK_CONFIG_IDX(_phy_idx) ((_phy_idx == INT_PHY) ? \ - 0 : (_phy_idx - 1)) -/***********************************************************/ -/* bnx2x_phy struct */ -/* Defines the required arguments and function per phy */ -/***********************************************************/ -struct link_vars; -struct link_params; -struct bnx2x_phy; - -typedef u8 (*config_init_t)(struct bnx2x_phy *phy, struct link_params *params, - struct link_vars *vars); -typedef u8 (*read_status_t)(struct bnx2x_phy *phy, struct link_params *params, - struct link_vars *vars); -typedef void (*link_reset_t)(struct bnx2x_phy *phy, - struct link_params *params); -typedef void (*config_loopback_t)(struct bnx2x_phy *phy, - struct link_params *params); -typedef u8 (*format_fw_ver_t)(u32 raw, u8 *str, u16 *len); -typedef void (*hw_reset_t)(struct bnx2x_phy *phy, struct link_params *params); -typedef void (*set_link_led_t)(struct bnx2x_phy *phy, - struct link_params *params, u8 mode); -typedef void (*phy_specific_func_t)(struct bnx2x_phy *phy, - struct link_params *params, u32 action); - -struct bnx2x_phy { - u32 type; - - /* Loaded during init */ - u8 addr; - u8 def_md_devad; - u16 flags; - /* Require HW lock */ -#define FLAGS_HW_LOCK_REQUIRED (1<<0) - /* No Over-Current detection */ -#define FLAGS_NOC (1<<1) - /* Fan failure detection required */ -#define FLAGS_FAN_FAILURE_DET_REQ (1<<2) - /* Initialize first the XGXS and only then the phy itself */ -#define FLAGS_INIT_XGXS_FIRST (1<<3) -#define FLAGS_WC_DUAL_MODE (1<<4) -#define FLAGS_4_PORT_MODE (1<<5) -#define FLAGS_REARM_LATCH_SIGNAL (1<<6) -#define FLAGS_SFP_NOT_APPROVED (1<<7) -#define FLAGS_MDC_MDIO_WA (1<<8) -#define FLAGS_DUMMY_READ (1<<9) -#define FLAGS_MDC_MDIO_WA_B0 (1<<10) -#define FLAGS_TX_ERROR_CHECK (1<<12) - - /* preemphasis values for the rx side */ - u16 rx_preemphasis[4]; - - /* preemphasis values for the tx side */ - u16 tx_preemphasis[4]; - - /* EMAC address for access MDIO */ - u32 mdio_ctrl; - - u32 supported; - - u32 media_type; -#define ETH_PHY_UNSPECIFIED 0x0 -#define ETH_PHY_SFP_FIBER 0x1 -#define ETH_PHY_XFP_FIBER 0x2 -#define ETH_PHY_DA_TWINAX 0x3 -#define ETH_PHY_BASE_T 0x4 -#define ETH_PHY_KR 0xf0 -#define ETH_PHY_CX4 0xf1 -#define ETH_PHY_NOT_PRESENT 0xff - - /* The address in which version is located*/ - u32 ver_addr; - - u16 req_flow_ctrl; - - u16 req_line_speed; - - u32 speed_cap_mask; - - u16 req_duplex; - u16 rsrv; - /* Called per phy/port init, and it configures LASI, speed, autoneg, - duplex, flow control negotiation, etc. */ - config_init_t config_init; - - /* Called due to interrupt. It determines the link, speed */ - read_status_t read_status; - - /* Called when driver is unloading. Should reset the phy */ - link_reset_t link_reset; - - /* Set the loopback configuration for the phy */ - config_loopback_t config_loopback; - - /* Format the given raw number into str up to len */ - format_fw_ver_t format_fw_ver; - - /* Reset the phy (both ports) */ - hw_reset_t hw_reset; - - /* Set link led mode (on/off/oper)*/ - set_link_led_t set_link_led; - - /* PHY Specific tasks */ - phy_specific_func_t phy_specific_func; -#define DISABLE_TX 1 -#define ENABLE_TX 2 -}; - -/* Inputs parameters to the CLC */ -struct link_params { - - u8 port; - - /* Default / User Configuration */ - u8 loopback_mode; -#define LOOPBACK_NONE 0 -#define LOOPBACK_EMAC 1 -#define LOOPBACK_BMAC 2 -#define LOOPBACK_XGXS 3 -#define LOOPBACK_EXT_PHY 4 -#define LOOPBACK_EXT 5 -#define LOOPBACK_UMAC 6 -#define LOOPBACK_XMAC 7 - - /* Device parameters */ - u8 mac_addr[6]; - - u16 req_duplex[LINK_CONFIG_SIZE]; - u16 req_flow_ctrl[LINK_CONFIG_SIZE]; - - u16 req_line_speed[LINK_CONFIG_SIZE]; /* Also determine AutoNeg */ - - /* shmem parameters */ - u32 shmem_base; - u32 shmem2_base; - u32 speed_cap_mask[LINK_CONFIG_SIZE]; - u32 switch_cfg; -#define SWITCH_CFG_1G PORT_FEATURE_CON_SWITCH_1G_SWITCH -#define SWITCH_CFG_10G PORT_FEATURE_CON_SWITCH_10G_SWITCH -#define SWITCH_CFG_AUTO_DETECT PORT_FEATURE_CON_SWITCH_AUTO_DETECT - - u32 lane_config; - - /* Phy register parameter */ - u32 chip_id; - - /* features */ - u32 feature_config_flags; -#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0) -#define FEATURE_CONFIG_PFC_ENABLED (1<<1) -#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2) -#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3) -#define FEATURE_CONFIG_AUTOGREEEN_ENABLED (1<<9) -#define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED (1<<10) - /* Will be populated during common init */ - struct bnx2x_phy phy[MAX_PHYS]; - - /* Will be populated during common init */ - u8 num_phys; - - u8 rsrv; - u16 hw_led_mode; /* part of the hw_config read from the shmem */ - u32 multi_phy_config; - - /* Device pointer passed to all callback functions */ - struct bnx2x *bp; - u16 req_fc_auto_adv; /* Should be set to TX / BOTH when - req_flow_ctrl is set to AUTO */ -}; - -/* Output parameters */ -struct link_vars { - u8 phy_flags; -#define PHY_XGXS_FLAG (1<<0) -#define PHY_SGMII_FLAG (1<<1) -#define PHY_PHYSICAL_LINK_FLAG (1<<2) -#define PHY_HALF_OPEN_CONN_FLAG (1<<3) -#define PHY_OVER_CURRENT_FLAG (1<<4) - - u8 mac_type; -#define MAC_TYPE_NONE 0 -#define MAC_TYPE_EMAC 1 -#define MAC_TYPE_BMAC 2 -#define MAC_TYPE_UMAC 3 -#define MAC_TYPE_XMAC 4 - - u8 phy_link_up; /* internal phy link indication */ - u8 link_up; - - u16 line_speed; - u16 duplex; - - u16 flow_ctrl; - u16 ieee_fc; - - /* The same definitions as the shmem parameter */ - u32 link_status; - u8 fault_detected; - u8 rsrv1; - u16 periodic_flags; -#define PERIODIC_FLAGS_LINK_EVENT 0x0001 - - u32 aeu_int_mask; -}; - -/***********************************************************/ -/* Functions */ -/***********************************************************/ -int bnx2x_phy_init(struct link_params *params, struct link_vars *vars); - -/* Reset the link. Should be called when driver or interface goes down - Before calling phy firmware upgrade, the reset_ext_phy should be set - to 0 */ -int bnx2x_link_reset(struct link_params *params, struct link_vars *vars, - u8 reset_ext_phy); - -/* bnx2x_link_update should be called upon link interrupt */ -int bnx2x_link_update(struct link_params *params, struct link_vars *vars); - -/* use the following phy functions to read/write from external_phy - In order to use it to read/write internal phy registers, use - DEFAULT_PHY_DEV_ADDR as devad, and (_bank + (_addr & 0xf)) as - the register */ -int bnx2x_phy_read(struct link_params *params, u8 phy_addr, - u8 devad, u16 reg, u16 *ret_val); - -int bnx2x_phy_write(struct link_params *params, u8 phy_addr, - u8 devad, u16 reg, u16 val); - -/* Reads the link_status from the shmem, - and update the link vars accordingly */ -void bnx2x_link_status_update(struct link_params *input, - struct link_vars *output); -/* returns string representing the fw_version of the external phy */ -int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, - u8 *version, u16 len); - -/* Set/Unset the led - Basically, the CLC takes care of the led for the link, but in case one needs - to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to - blink the led, and LED_MODE_OFF to set the led off.*/ -int bnx2x_set_led(struct link_params *params, - struct link_vars *vars, u8 mode, u32 speed); -#define LED_MODE_OFF 0 -#define LED_MODE_ON 1 -#define LED_MODE_OPER 2 -#define LED_MODE_FRONT_PANEL_OFF 3 - -/* bnx2x_handle_module_detect_int should be called upon module detection - interrupt */ -void bnx2x_handle_module_detect_int(struct link_params *params); - -/* Get the actual link status. In case it returns 0, link is up, - otherwise link is down*/ -int bnx2x_test_link(struct link_params *params, struct link_vars *vars, - u8 is_serdes); - -/* One-time initialization for external phy after power up */ -int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[], - u32 shmem2_base_path[], u32 chip_id); - -/* Reset the external PHY using GPIO */ -void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port); - -/* Reset the external of SFX7101 */ -void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy); - -/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */ -int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, - struct link_params *params, u16 addr, - u8 byte_cnt, u8 *o_buf); - -void bnx2x_hw_reset_phy(struct link_params *params); - -/* Checks if HW lock is required for this phy/board type */ -u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base, - u32 shmem2_base); - -/* Check swap bit and adjust PHY order */ -u32 bnx2x_phy_selection(struct link_params *params); - -/* Probe the phys on board, and populate them in "params" */ -int bnx2x_phy_probe(struct link_params *params); - -/* Checks if fan failure detection is required on one of the phys on board */ -u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base, - u32 shmem2_base, u8 port); - - - -/* DCBX structs */ - -/* Number of maximum COS per chip */ -#define DCBX_E2E3_MAX_NUM_COS (2) -#define DCBX_E3B0_MAX_NUM_COS_PORT0 (6) -#define DCBX_E3B0_MAX_NUM_COS_PORT1 (3) -#define DCBX_E3B0_MAX_NUM_COS ( \ - MAXVAL(DCBX_E3B0_MAX_NUM_COS_PORT0, \ - DCBX_E3B0_MAX_NUM_COS_PORT1)) - -#define DCBX_MAX_NUM_COS ( \ - MAXVAL(DCBX_E3B0_MAX_NUM_COS, \ - DCBX_E2E3_MAX_NUM_COS)) - -/* PFC port configuration params */ -struct bnx2x_nig_brb_pfc_port_params { - /* NIG */ - u32 pause_enable; - u32 llfc_out_en; - u32 llfc_enable; - u32 pkt_priority_to_cos; - u8 num_of_rx_cos_priority_mask; - u32 rx_cos_priority_mask[DCBX_MAX_NUM_COS]; - u32 llfc_high_priority_classes; - u32 llfc_low_priority_classes; - /* BRB */ - u32 cos0_pauseable; - u32 cos1_pauseable; -}; - - -/* ETS port configuration params */ -struct bnx2x_ets_bw_params { - u8 bw; -}; - -struct bnx2x_ets_sp_params { - /** - * valid values are 0 - 5. 0 is highest strict priority. - * There can't be two COS's with the same pri. - */ - u8 pri; -}; - -enum bnx2x_cos_state { - bnx2x_cos_state_strict = 0, - bnx2x_cos_state_bw = 1, -}; - -struct bnx2x_ets_cos_params { - enum bnx2x_cos_state state ; - union { - struct bnx2x_ets_bw_params bw_params; - struct bnx2x_ets_sp_params sp_params; - } params; -}; - -struct bnx2x_ets_params { - u8 num_of_cos; /* Number of valid COS entries*/ - struct bnx2x_ets_cos_params cos[DCBX_MAX_NUM_COS]; -}; - -/** - * Used to update the PFC attributes in EMAC, BMAC, NIG and BRB - * when link is already up - */ -int bnx2x_update_pfc(struct link_params *params, - struct link_vars *vars, - struct bnx2x_nig_brb_pfc_port_params *pfc_params); - - -/* Used to configure the ETS to disable */ -int bnx2x_ets_disabled(struct link_params *params, - struct link_vars *vars); - -/* Used to configure the ETS to BW limited */ -void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw, - const u32 cos1_bw); - -/* Used to configure the ETS to strict */ -int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos); - - -/* Configure the COS to ETS according to BW and SP settings.*/ -int bnx2x_ets_e3b0_config(const struct link_params *params, - const struct link_vars *vars, - const struct bnx2x_ets_params *ets_params); -/* Read pfc statistic*/ -void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars, - u32 pfc_frames_sent[2], - u32 pfc_frames_received[2]); -void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars, - u32 chip_id, u32 shmem_base, u32 shmem2_base, - u8 port); - -int bnx2x_sfp_module_detection(struct bnx2x_phy *phy, - struct link_params *params); - -void bnx2x_period_func(struct link_params *params, struct link_vars *vars); - -#endif /* BNX2X_LINK_H */ diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c deleted file mode 100644 index 1507091..0000000 --- a/drivers/net/bnx2x/bnx2x_main.c +++ /dev/null @@ -1,11531 +0,0 @@ -/* bnx2x_main.c: Broadcom Everest network driver. - * - * Copyright (c) 2007-2011 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - * Maintained by: Eilon Greenstein - * Written by: Eliezer Tamir - * Based on code from Michael Chan's bnx2 driver - * UDP CSUM errata workaround by Arik Gendelman - * Slowpath and fastpath rework by Vladislav Zolotarov - * Statistics and Link management by Yitchak Gertner - * - */ - -#include -#include -#include -#include /* for dev_info() */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "bnx2x.h" -#include "bnx2x_init.h" -#include "bnx2x_init_ops.h" -#include "bnx2x_cmn.h" -#include "bnx2x_dcb.h" -#include "bnx2x_sp.h" - -#include -#include "bnx2x_fw_file_hdr.h" -/* FW files */ -#define FW_FILE_VERSION \ - __stringify(BCM_5710_FW_MAJOR_VERSION) "." \ - __stringify(BCM_5710_FW_MINOR_VERSION) "." \ - __stringify(BCM_5710_FW_REVISION_VERSION) "." \ - __stringify(BCM_5710_FW_ENGINEERING_VERSION) -#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw" -#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw" -#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw" - -/* Time in jiffies before concluding the transmitter is hung */ -#define TX_TIMEOUT (5*HZ) - -static char version[] __devinitdata = - "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver " - DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; - -MODULE_AUTHOR("Eliezer Tamir"); -MODULE_DESCRIPTION("Broadcom NetXtreme II " - "BCM57710/57711/57711E/" - "57712/57712_MF/57800/57800_MF/57810/57810_MF/" - "57840/57840_MF Driver"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_MODULE_VERSION); -MODULE_FIRMWARE(FW_FILE_NAME_E1); -MODULE_FIRMWARE(FW_FILE_NAME_E1H); -MODULE_FIRMWARE(FW_FILE_NAME_E2); - -static int multi_mode = 1; -module_param(multi_mode, int, 0); -MODULE_PARM_DESC(multi_mode, " Multi queue mode " - "(0 Disable; 1 Enable (default))"); - -int num_queues; -module_param(num_queues, int, 0); -MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1" - " (default is as a number of CPUs)"); - -static int disable_tpa; -module_param(disable_tpa, int, 0); -MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature"); - -#define INT_MODE_INTx 1 -#define INT_MODE_MSI 2 -static int int_mode; -module_param(int_mode, int, 0); -MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X " - "(1 INT#x; 2 MSI)"); - -static int dropless_fc; -module_param(dropless_fc, int, 0); -MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring"); - -static int poll; -module_param(poll, int, 0); -MODULE_PARM_DESC(poll, " Use polling (for debug)"); - -static int mrrs = -1; -module_param(mrrs, int, 0); -MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)"); - -static int debug; -module_param(debug, int, 0); -MODULE_PARM_DESC(debug, " Default debug msglevel"); - - - -struct workqueue_struct *bnx2x_wq; - -enum bnx2x_board_type { - BCM57710 = 0, - BCM57711, - BCM57711E, - BCM57712, - BCM57712_MF, - BCM57800, - BCM57800_MF, - BCM57810, - BCM57810_MF, - BCM57840, - BCM57840_MF -}; - -/* indexed by board_type, above */ -static struct { - char *name; -} board_info[] __devinitdata = { - { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" }, - { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" }, - { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" }, - { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" }, - { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" }, - { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" }, - { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" }, - { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" }, - { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" }, - { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" }, - { "Broadcom NetXtreme II BCM57840 10/20 Gigabit " - "Ethernet Multi Function"} -}; - -#ifndef PCI_DEVICE_ID_NX2_57710 -#define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710 -#endif -#ifndef PCI_DEVICE_ID_NX2_57711 -#define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711 -#endif -#ifndef PCI_DEVICE_ID_NX2_57711E -#define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E -#endif -#ifndef PCI_DEVICE_ID_NX2_57712 -#define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712 -#endif -#ifndef PCI_DEVICE_ID_NX2_57712_MF -#define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF -#endif -#ifndef PCI_DEVICE_ID_NX2_57800 -#define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800 -#endif -#ifndef PCI_DEVICE_ID_NX2_57800_MF -#define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF -#endif -#ifndef PCI_DEVICE_ID_NX2_57810 -#define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810 -#endif -#ifndef PCI_DEVICE_ID_NX2_57810_MF -#define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF -#endif -#ifndef PCI_DEVICE_ID_NX2_57840 -#define PCI_DEVICE_ID_NX2_57840 CHIP_NUM_57840 -#endif -#ifndef PCI_DEVICE_ID_NX2_57840_MF -#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF -#endif -static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = { - { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, - { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, - { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, - { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 }, - { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF }, - { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 }, - { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF }, - { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 }, - { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF }, - { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 }, - { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF }, - { 0 } -}; - -MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl); - -/**************************************************************************** -* General service functions -****************************************************************************/ - -static inline void __storm_memset_dma_mapping(struct bnx2x *bp, - u32 addr, dma_addr_t mapping) -{ - REG_WR(bp, addr, U64_LO(mapping)); - REG_WR(bp, addr + 4, U64_HI(mapping)); -} - -static inline void storm_memset_spq_addr(struct bnx2x *bp, - dma_addr_t mapping, u16 abs_fid) -{ - u32 addr = XSEM_REG_FAST_MEMORY + - XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid); - - __storm_memset_dma_mapping(bp, addr, mapping); -} - -static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, - u16 pf_id) -{ - REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), - pf_id); - REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), - pf_id); - REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), - pf_id); - REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), - pf_id); -} - -static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, - u8 enable) -{ - REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), - enable); - REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), - enable); - REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), - enable); - REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), - enable); -} - -static inline void storm_memset_eq_data(struct bnx2x *bp, - struct event_ring_data *eq_data, - u16 pfid) -{ - size_t size = sizeof(struct event_ring_data); - - u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid); - - __storm_memset_struct(bp, addr, size, (u32 *)eq_data); -} - -static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod, - u16 pfid) -{ - u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid); - REG_WR16(bp, addr, eq_prod); -} - -/* used only at init - * locking is done by mcp - */ -static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val) -{ - pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); - pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val); - pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, - PCICFG_VENDOR_ID_OFFSET); -} - -static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr) -{ - u32 val; - - pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); - pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val); - pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, - PCICFG_VENDOR_ID_OFFSET); - - return val; -} - -#define DMAE_DP_SRC_GRC "grc src_addr [%08x]" -#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]" -#define DMAE_DP_DST_GRC "grc dst_addr [%08x]" -#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]" -#define DMAE_DP_DST_NONE "dst_addr [none]" - -static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, - int msglvl) -{ - u32 src_type = dmae->opcode & DMAE_COMMAND_SRC; - - switch (dmae->opcode & DMAE_COMMAND_DST) { - case DMAE_CMD_DST_PCI: - if (src_type == DMAE_CMD_SRC_PCI) - DP(msglvl, "DMAE: opcode 0x%08x\n" - "src [%x:%08x], len [%d*4], dst [%x:%08x]\n" - "comp_addr [%x:%08x], comp_val 0x%08x\n", - dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, - dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, - dmae->comp_addr_hi, dmae->comp_addr_lo, - dmae->comp_val); - else - DP(msglvl, "DMAE: opcode 0x%08x\n" - "src [%08x], len [%d*4], dst [%x:%08x]\n" - "comp_addr [%x:%08x], comp_val 0x%08x\n", - dmae->opcode, dmae->src_addr_lo >> 2, - dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, - dmae->comp_addr_hi, dmae->comp_addr_lo, - dmae->comp_val); - break; - case DMAE_CMD_DST_GRC: - if (src_type == DMAE_CMD_SRC_PCI) - DP(msglvl, "DMAE: opcode 0x%08x\n" - "src [%x:%08x], len [%d*4], dst_addr [%08x]\n" - "comp_addr [%x:%08x], comp_val 0x%08x\n", - dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, - dmae->len, dmae->dst_addr_lo >> 2, - dmae->comp_addr_hi, dmae->comp_addr_lo, - dmae->comp_val); - else - DP(msglvl, "DMAE: opcode 0x%08x\n" - "src [%08x], len [%d*4], dst [%08x]\n" - "comp_addr [%x:%08x], comp_val 0x%08x\n", - dmae->opcode, dmae->src_addr_lo >> 2, - dmae->len, dmae->dst_addr_lo >> 2, - dmae->comp_addr_hi, dmae->comp_addr_lo, - dmae->comp_val); - break; - default: - if (src_type == DMAE_CMD_SRC_PCI) - DP(msglvl, "DMAE: opcode 0x%08x\n" - DP_LEVEL "src_addr [%x:%08x] len [%d * 4] " - "dst_addr [none]\n" - DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n", - dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, - dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, - dmae->comp_val); - else - DP(msglvl, "DMAE: opcode 0x%08x\n" - DP_LEVEL "src_addr [%08x] len [%d * 4] " - "dst_addr [none]\n" - DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n", - dmae->opcode, dmae->src_addr_lo >> 2, - dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, - dmae->comp_val); - break; - } - -} - -/* copy command into DMAE command memory and set DMAE command go */ -void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) -{ - u32 cmd_offset; - int i; - - cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx); - for (i = 0; i < (sizeof(struct dmae_command)/4); i++) { - REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i)); - - DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n", - idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i)); - } - REG_WR(bp, dmae_reg_go_c[idx], 1); -} - -u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type) -{ - return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) | - DMAE_CMD_C_ENABLE); -} - -u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode) -{ - return opcode & ~DMAE_CMD_SRC_RESET; -} - -u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, - bool with_comp, u8 comp_type) -{ - u32 opcode = 0; - - opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) | - (dst_type << DMAE_COMMAND_DST_SHIFT)); - - opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); - - opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); - opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) | - (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); - opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); - -#ifdef __BIG_ENDIAN - opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; -#else - opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; -#endif - if (with_comp) - opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type); - return opcode; -} - -static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, - struct dmae_command *dmae, - u8 src_type, u8 dst_type) -{ - memset(dmae, 0, sizeof(struct dmae_command)); - - /* set the opcode */ - dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type, - true, DMAE_COMP_PCI); - - /* fill in the completion parameters */ - dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp)); - dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp)); - dmae->comp_val = DMAE_COMP_VAL; -} - -/* issue a dmae command over the init-channel and wailt for completion */ -static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, - struct dmae_command *dmae) -{ - u32 *wb_comp = bnx2x_sp(bp, wb_comp); - int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; - int rc = 0; - - DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n", - bp->slowpath->wb_data[0], bp->slowpath->wb_data[1], - bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); - - /* - * Lock the dmae channel. Disable BHs to prevent a dead-lock - * as long as this code is called both from syscall context and - * from ndo_set_rx_mode() flow that may be called from BH. - */ - spin_lock_bh(&bp->dmae_lock); - - /* reset completion */ - *wb_comp = 0; - - /* post the command on the channel used for initializations */ - bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); - - /* wait for completion */ - udelay(5); - while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { - DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp); - - if (!cnt) { - BNX2X_ERR("DMAE timeout!\n"); - rc = DMAE_TIMEOUT; - goto unlock; - } - cnt--; - udelay(50); - } - if (*wb_comp & DMAE_PCI_ERR_FLAG) { - BNX2X_ERR("DMAE PCI error!\n"); - rc = DMAE_PCI_ERROR; - } - - DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n", - bp->slowpath->wb_data[0], bp->slowpath->wb_data[1], - bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); - -unlock: - spin_unlock_bh(&bp->dmae_lock); - return rc; -} - -void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, - u32 len32) -{ - struct dmae_command dmae; - - if (!bp->dmae_ready) { - u32 *data = bnx2x_sp(bp, wb_data[0]); - - DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)" - " using indirect\n", dst_addr, len32); - bnx2x_init_ind_wr(bp, dst_addr, data, len32); - return; - } - - /* set opcode and fixed command fields */ - bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); - - /* fill in addresses and len */ - dmae.src_addr_lo = U64_LO(dma_addr); - dmae.src_addr_hi = U64_HI(dma_addr); - dmae.dst_addr_lo = dst_addr >> 2; - dmae.dst_addr_hi = 0; - dmae.len = len32; - - bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF); - - /* issue the command and wait for completion */ - bnx2x_issue_dmae_with_comp(bp, &dmae); -} - -void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) -{ - struct dmae_command dmae; - - if (!bp->dmae_ready) { - u32 *data = bnx2x_sp(bp, wb_data[0]); - int i; - - DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)" - " using indirect\n", src_addr, len32); - for (i = 0; i < len32; i++) - data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4); - return; - } - - /* set opcode and fixed command fields */ - bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); - - /* fill in addresses and len */ - dmae.src_addr_lo = src_addr >> 2; - dmae.src_addr_hi = 0; - dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data)); - dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data)); - dmae.len = len32; - - bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF); - - /* issue the command and wait for completion */ - bnx2x_issue_dmae_with_comp(bp, &dmae); -} - -static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, - u32 addr, u32 len) -{ - int dmae_wr_max = DMAE_LEN32_WR_MAX(bp); - int offset = 0; - - while (len > dmae_wr_max) { - bnx2x_write_dmae(bp, phys_addr + offset, - addr + offset, dmae_wr_max); - offset += dmae_wr_max * 4; - len -= dmae_wr_max; - } - - bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len); -} - -/* used only for slowpath so not inlined */ -static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo) -{ - u32 wb_write[2]; - - wb_write[0] = val_hi; - wb_write[1] = val_lo; - REG_WR_DMAE(bp, reg, wb_write, 2); -} - -#ifdef USE_WB_RD -static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg) -{ - u32 wb_data[2]; - - REG_RD_DMAE(bp, reg, wb_data, 2); - - return HILO_U64(wb_data[0], wb_data[1]); -} -#endif - -static int bnx2x_mc_assert(struct bnx2x *bp) -{ - char last_idx; - int i, rc = 0; - u32 row0, row1, row2, row3; - - /* XSTORM */ - last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM + - XSTORM_ASSERT_LIST_INDEX_OFFSET); - if (last_idx) - BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); - - /* print the asserts */ - for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { - - row0 = REG_RD(bp, BAR_XSTRORM_INTMEM + - XSTORM_ASSERT_LIST_OFFSET(i)); - row1 = REG_RD(bp, BAR_XSTRORM_INTMEM + - XSTORM_ASSERT_LIST_OFFSET(i) + 4); - row2 = REG_RD(bp, BAR_XSTRORM_INTMEM + - XSTORM_ASSERT_LIST_OFFSET(i) + 8); - row3 = REG_RD(bp, BAR_XSTRORM_INTMEM + - XSTORM_ASSERT_LIST_OFFSET(i) + 12); - - if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { - BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x" - " 0x%08x 0x%08x 0x%08x\n", - i, row3, row2, row1, row0); - rc++; - } else { - break; - } - } - - /* TSTORM */ - last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM + - TSTORM_ASSERT_LIST_INDEX_OFFSET); - if (last_idx) - BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); - - /* print the asserts */ - for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { - - row0 = REG_RD(bp, BAR_TSTRORM_INTMEM + - TSTORM_ASSERT_LIST_OFFSET(i)); - row1 = REG_RD(bp, BAR_TSTRORM_INTMEM + - TSTORM_ASSERT_LIST_OFFSET(i) + 4); - row2 = REG_RD(bp, BAR_TSTRORM_INTMEM + - TSTORM_ASSERT_LIST_OFFSET(i) + 8); - row3 = REG_RD(bp, BAR_TSTRORM_INTMEM + - TSTORM_ASSERT_LIST_OFFSET(i) + 12); - - if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { - BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x" - " 0x%08x 0x%08x 0x%08x\n", - i, row3, row2, row1, row0); - rc++; - } else { - break; - } - } - - /* CSTORM */ - last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM + - CSTORM_ASSERT_LIST_INDEX_OFFSET); - if (last_idx) - BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); - - /* print the asserts */ - for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { - - row0 = REG_RD(bp, BAR_CSTRORM_INTMEM + - CSTORM_ASSERT_LIST_OFFSET(i)); - row1 = REG_RD(bp, BAR_CSTRORM_INTMEM + - CSTORM_ASSERT_LIST_OFFSET(i) + 4); - row2 = REG_RD(bp, BAR_CSTRORM_INTMEM + - CSTORM_ASSERT_LIST_OFFSET(i) + 8); - row3 = REG_RD(bp, BAR_CSTRORM_INTMEM + - CSTORM_ASSERT_LIST_OFFSET(i) + 12); - - if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { - BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x" - " 0x%08x 0x%08x 0x%08x\n", - i, row3, row2, row1, row0); - rc++; - } else { - break; - } - } - - /* USTORM */ - last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM + - USTORM_ASSERT_LIST_INDEX_OFFSET); - if (last_idx) - BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); - - /* print the asserts */ - for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { - - row0 = REG_RD(bp, BAR_USTRORM_INTMEM + - USTORM_ASSERT_LIST_OFFSET(i)); - row1 = REG_RD(bp, BAR_USTRORM_INTMEM + - USTORM_ASSERT_LIST_OFFSET(i) + 4); - row2 = REG_RD(bp, BAR_USTRORM_INTMEM + - USTORM_ASSERT_LIST_OFFSET(i) + 8); - row3 = REG_RD(bp, BAR_USTRORM_INTMEM + - USTORM_ASSERT_LIST_OFFSET(i) + 12); - - if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { - BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x" - " 0x%08x 0x%08x 0x%08x\n", - i, row3, row2, row1, row0); - rc++; - } else { - break; - } - } - - return rc; -} - -void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) -{ - u32 addr, val; - u32 mark, offset; - __be32 data[9]; - int word; - u32 trace_shmem_base; - if (BP_NOMCP(bp)) { - BNX2X_ERR("NO MCP - can not dump\n"); - return; - } - netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n", - (bp->common.bc_ver & 0xff0000) >> 16, - (bp->common.bc_ver & 0xff00) >> 8, - (bp->common.bc_ver & 0xff)); - - val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER); - if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER)) - printk("%s" "MCP PC at 0x%x\n", lvl, val); - - if (BP_PATH(bp) == 0) - trace_shmem_base = bp->common.shmem_base; - else - trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr); - addr = trace_shmem_base - 0x0800 + 4; - mark = REG_RD(bp, addr); - mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) - + ((mark + 0x3) & ~0x3) - 0x08000000; - printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark); - - printk("%s", lvl); - for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) { - for (word = 0; word < 8; word++) - data[word] = htonl(REG_RD(bp, offset + 4*word)); - data[8] = 0x0; - pr_cont("%s", (char *)data); - } - for (offset = addr + 4; offset <= mark; offset += 0x8*4) { - for (word = 0; word < 8; word++) - data[word] = htonl(REG_RD(bp, offset + 4*word)); - data[8] = 0x0; - pr_cont("%s", (char *)data); - } - printk("%s" "end of fw dump\n", lvl); -} - -static inline void bnx2x_fw_dump(struct bnx2x *bp) -{ - bnx2x_fw_dump_lvl(bp, KERN_ERR); -} - -void bnx2x_panic_dump(struct bnx2x *bp) -{ - int i; - u16 j; - struct hc_sp_status_block_data sp_sb_data; - int func = BP_FUNC(bp); -#ifdef BNX2X_STOP_ON_ERROR - u16 start = 0, end = 0; - u8 cos; -#endif - - bp->stats_state = STATS_STATE_DISABLED; - DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); - - BNX2X_ERR("begin crash dump -----------------\n"); - - /* Indices */ - /* Common */ - BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)" - " spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n", - bp->def_idx, bp->def_att_idx, bp->attn_state, - bp->spq_prod_idx, bp->stats_counter); - BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n", - bp->def_status_blk->atten_status_block.attn_bits, - bp->def_status_blk->atten_status_block.attn_bits_ack, - bp->def_status_blk->atten_status_block.status_block_id, - bp->def_status_blk->atten_status_block.attn_bits_index); - BNX2X_ERR(" def ("); - for (i = 0; i < HC_SP_SB_MAX_INDICES; i++) - pr_cont("0x%x%s", - bp->def_status_blk->sp_sb.index_values[i], - (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " "); - - for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) - *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM + - CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + - i*sizeof(u32)); - - pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) " - "pf_id(0x%x) vnic_id(0x%x) " - "vf_id(0x%x) vf_valid (0x%x) " - "state(0x%x)\n", - sp_sb_data.igu_sb_id, - sp_sb_data.igu_seg_id, - sp_sb_data.p_func.pf_id, - sp_sb_data.p_func.vnic_id, - sp_sb_data.p_func.vf_id, - sp_sb_data.p_func.vf_valid, - sp_sb_data.state); - - - for_each_eth_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - int loop; - struct hc_status_block_data_e2 sb_data_e2; - struct hc_status_block_data_e1x sb_data_e1x; - struct hc_status_block_sm *hc_sm_p = - CHIP_IS_E1x(bp) ? - sb_data_e1x.common.state_machine : - sb_data_e2.common.state_machine; - struct hc_index_data *hc_index_p = - CHIP_IS_E1x(bp) ? - sb_data_e1x.index_data : - sb_data_e2.index_data; - u8 data_size, cos; - u32 *sb_data_p; - struct bnx2x_fp_txdata txdata; - - /* Rx */ - BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)" - " rx_comp_prod(0x%x)" - " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n", - i, fp->rx_bd_prod, fp->rx_bd_cons, - fp->rx_comp_prod, - fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb)); - BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)" - " fp_hc_idx(0x%x)\n", - fp->rx_sge_prod, fp->last_max_sge, - le16_to_cpu(fp->fp_hc_idx)); - - /* Tx */ - for_each_cos_in_tx_queue(fp, cos) - { - txdata = fp->txdata[cos]; - BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)" - " tx_bd_prod(0x%x) tx_bd_cons(0x%x)" - " *tx_cons_sb(0x%x)\n", - i, txdata.tx_pkt_prod, - txdata.tx_pkt_cons, txdata.tx_bd_prod, - txdata.tx_bd_cons, - le16_to_cpu(*txdata.tx_cons_sb)); - } - - loop = CHIP_IS_E1x(bp) ? - HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2; - - /* host sb data */ - -#ifdef BCM_CNIC - if (IS_FCOE_FP(fp)) - continue; -#endif - BNX2X_ERR(" run indexes ("); - for (j = 0; j < HC_SB_MAX_SM; j++) - pr_cont("0x%x%s", - fp->sb_running_index[j], - (j == HC_SB_MAX_SM - 1) ? ")" : " "); - - BNX2X_ERR(" indexes ("); - for (j = 0; j < loop; j++) - pr_cont("0x%x%s", - fp->sb_index_values[j], - (j == loop - 1) ? ")" : " "); - /* fw sb data */ - data_size = CHIP_IS_E1x(bp) ? - sizeof(struct hc_status_block_data_e1x) : - sizeof(struct hc_status_block_data_e2); - data_size /= sizeof(u32); - sb_data_p = CHIP_IS_E1x(bp) ? - (u32 *)&sb_data_e1x : - (u32 *)&sb_data_e2; - /* copy sb data in here */ - for (j = 0; j < data_size; j++) - *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM + - CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) + - j * sizeof(u32)); - - if (!CHIP_IS_E1x(bp)) { - pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) " - "vnic_id(0x%x) same_igu_sb_1b(0x%x) " - "state(0x%x)\n", - sb_data_e2.common.p_func.pf_id, - sb_data_e2.common.p_func.vf_id, - sb_data_e2.common.p_func.vf_valid, - sb_data_e2.common.p_func.vnic_id, - sb_data_e2.common.same_igu_sb_1b, - sb_data_e2.common.state); - } else { - pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) " - "vnic_id(0x%x) same_igu_sb_1b(0x%x) " - "state(0x%x)\n", - sb_data_e1x.common.p_func.pf_id, - sb_data_e1x.common.p_func.vf_id, - sb_data_e1x.common.p_func.vf_valid, - sb_data_e1x.common.p_func.vnic_id, - sb_data_e1x.common.same_igu_sb_1b, - sb_data_e1x.common.state); - } - - /* SB_SMs data */ - for (j = 0; j < HC_SB_MAX_SM; j++) { - pr_cont("SM[%d] __flags (0x%x) " - "igu_sb_id (0x%x) igu_seg_id(0x%x) " - "time_to_expire (0x%x) " - "timer_value(0x%x)\n", j, - hc_sm_p[j].__flags, - hc_sm_p[j].igu_sb_id, - hc_sm_p[j].igu_seg_id, - hc_sm_p[j].time_to_expire, - hc_sm_p[j].timer_value); - } - - /* Indecies data */ - for (j = 0; j < loop; j++) { - pr_cont("INDEX[%d] flags (0x%x) " - "timeout (0x%x)\n", j, - hc_index_p[j].flags, - hc_index_p[j].timeout); - } - } - -#ifdef BNX2X_STOP_ON_ERROR - /* Rings */ - /* Rx */ - for_each_rx_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - - start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); - end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503); - for (j = start; j != end; j = RX_BD(j + 1)) { - u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j]; - struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j]; - - BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n", - i, j, rx_bd[1], rx_bd[0], sw_bd->skb); - } - - start = RX_SGE(fp->rx_sge_prod); - end = RX_SGE(fp->last_max_sge); - for (j = start; j != end; j = RX_SGE(j + 1)) { - u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j]; - struct sw_rx_page *sw_page = &fp->rx_page_ring[j]; - - BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n", - i, j, rx_sge[1], rx_sge[0], sw_page->page); - } - - start = RCQ_BD(fp->rx_comp_cons - 10); - end = RCQ_BD(fp->rx_comp_cons + 503); - for (j = start; j != end; j = RCQ_BD(j + 1)) { - u32 *cqe = (u32 *)&fp->rx_comp_ring[j]; - - BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n", - i, j, cqe[0], cqe[1], cqe[2], cqe[3]); - } - } - - /* Tx */ - for_each_tx_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - for_each_cos_in_tx_queue(fp, cos) { - struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; - - start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10); - end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245); - for (j = start; j != end; j = TX_BD(j + 1)) { - struct sw_tx_bd *sw_bd = - &txdata->tx_buf_ring[j]; - - BNX2X_ERR("fp%d: txdata %d, " - "packet[%x]=[%p,%x]\n", - i, cos, j, sw_bd->skb, - sw_bd->first_bd); - } - - start = TX_BD(txdata->tx_bd_cons - 10); - end = TX_BD(txdata->tx_bd_cons + 254); - for (j = start; j != end; j = TX_BD(j + 1)) { - u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j]; - - BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=" - "[%x:%x:%x:%x]\n", - i, cos, j, tx_bd[0], tx_bd[1], - tx_bd[2], tx_bd[3]); - } - } - } -#endif - bnx2x_fw_dump(bp); - bnx2x_mc_assert(bp); - BNX2X_ERR("end crash dump -----------------\n"); -} - -/* - * FLR Support for E2 - * - * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW - * initialization. - */ -#define FLR_WAIT_USEC 10000 /* 10 miliseconds */ -#define FLR_WAIT_INTERAVAL 50 /* usec */ -#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERAVAL) /* 200 */ - -struct pbf_pN_buf_regs { - int pN; - u32 init_crd; - u32 crd; - u32 crd_freed; -}; - -struct pbf_pN_cmd_regs { - int pN; - u32 lines_occup; - u32 lines_freed; -}; - -static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp, - struct pbf_pN_buf_regs *regs, - u32 poll_count) -{ - u32 init_crd, crd, crd_start, crd_freed, crd_freed_start; - u32 cur_cnt = poll_count; - - crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed); - crd = crd_start = REG_RD(bp, regs->crd); - init_crd = REG_RD(bp, regs->init_crd); - - DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); - DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd); - DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); - - while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) < - (init_crd - crd_start))) { - if (cur_cnt--) { - udelay(FLR_WAIT_INTERAVAL); - crd = REG_RD(bp, regs->crd); - crd_freed = REG_RD(bp, regs->crd_freed); - } else { - DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n", - regs->pN); - DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n", - regs->pN, crd); - DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n", - regs->pN, crd_freed); - break; - } - } - DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n", - poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN); -} - -static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp, - struct pbf_pN_cmd_regs *regs, - u32 poll_count) -{ - u32 occup, to_free, freed, freed_start; - u32 cur_cnt = poll_count; - - occup = to_free = REG_RD(bp, regs->lines_occup); - freed = freed_start = REG_RD(bp, regs->lines_freed); - - DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); - DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); - - while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) { - if (cur_cnt--) { - udelay(FLR_WAIT_INTERAVAL); - occup = REG_RD(bp, regs->lines_occup); - freed = REG_RD(bp, regs->lines_freed); - } else { - DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n", - regs->pN); - DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", - regs->pN, occup); - DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", - regs->pN, freed); - break; - } - } - DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n", - poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN); -} - -static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg, - u32 expected, u32 poll_count) -{ - u32 cur_cnt = poll_count; - u32 val; - - while ((val = REG_RD(bp, reg)) != expected && cur_cnt--) - udelay(FLR_WAIT_INTERAVAL); - - return val; -} - -static inline int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, - char *msg, u32 poll_cnt) -{ - u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt); - if (val != 0) { - BNX2X_ERR("%s usage count=%d\n", msg, val); - return 1; - } - return 0; -} - -static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp) -{ - /* adjust polling timeout */ - if (CHIP_REV_IS_EMUL(bp)) - return FLR_POLL_CNT * 2000; - - if (CHIP_REV_IS_FPGA(bp)) - return FLR_POLL_CNT * 120; - - return FLR_POLL_CNT; -} - -static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) -{ - struct pbf_pN_cmd_regs cmd_regs[] = { - {0, (CHIP_IS_E3B0(bp)) ? - PBF_REG_TQ_OCCUPANCY_Q0 : - PBF_REG_P0_TQ_OCCUPANCY, - (CHIP_IS_E3B0(bp)) ? - PBF_REG_TQ_LINES_FREED_CNT_Q0 : - PBF_REG_P0_TQ_LINES_FREED_CNT}, - {1, (CHIP_IS_E3B0(bp)) ? - PBF_REG_TQ_OCCUPANCY_Q1 : - PBF_REG_P1_TQ_OCCUPANCY, - (CHIP_IS_E3B0(bp)) ? - PBF_REG_TQ_LINES_FREED_CNT_Q1 : - PBF_REG_P1_TQ_LINES_FREED_CNT}, - {4, (CHIP_IS_E3B0(bp)) ? - PBF_REG_TQ_OCCUPANCY_LB_Q : - PBF_REG_P4_TQ_OCCUPANCY, - (CHIP_IS_E3B0(bp)) ? - PBF_REG_TQ_LINES_FREED_CNT_LB_Q : - PBF_REG_P4_TQ_LINES_FREED_CNT} - }; - - struct pbf_pN_buf_regs buf_regs[] = { - {0, (CHIP_IS_E3B0(bp)) ? - PBF_REG_INIT_CRD_Q0 : - PBF_REG_P0_INIT_CRD , - (CHIP_IS_E3B0(bp)) ? - PBF_REG_CREDIT_Q0 : - PBF_REG_P0_CREDIT, - (CHIP_IS_E3B0(bp)) ? - PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : - PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, - {1, (CHIP_IS_E3B0(bp)) ? - PBF_REG_INIT_CRD_Q1 : - PBF_REG_P1_INIT_CRD, - (CHIP_IS_E3B0(bp)) ? - PBF_REG_CREDIT_Q1 : - PBF_REG_P1_CREDIT, - (CHIP_IS_E3B0(bp)) ? - PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : - PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, - {4, (CHIP_IS_E3B0(bp)) ? - PBF_REG_INIT_CRD_LB_Q : - PBF_REG_P4_INIT_CRD, - (CHIP_IS_E3B0(bp)) ? - PBF_REG_CREDIT_LB_Q : - PBF_REG_P4_CREDIT, - (CHIP_IS_E3B0(bp)) ? - PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : - PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, - }; - - int i; - - /* Verify the command queues are flushed P0, P1, P4 */ - for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) - bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count); - - - /* Verify the transmission buffers are flushed P0, P1, P4 */ - for (i = 0; i < ARRAY_SIZE(buf_regs); i++) - bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count); -} - -#define OP_GEN_PARAM(param) \ - (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) - -#define OP_GEN_TYPE(type) \ - (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) - -#define OP_GEN_AGG_VECT(index) \ - (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) - - -static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, - u32 poll_cnt) -{ - struct sdm_op_gen op_gen = {0}; - - u32 comp_addr = BAR_CSTRORM_INTMEM + - CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func); - int ret = 0; - - if (REG_RD(bp, comp_addr)) { - BNX2X_ERR("Cleanup complete is not 0\n"); - return 1; - } - - op_gen.command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); - op_gen.command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); - op_gen.command |= OP_GEN_AGG_VECT(clnup_func); - op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; - - DP(BNX2X_MSG_SP, "FW Final cleanup\n"); - REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command); - - if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) { - BNX2X_ERR("FW final cleanup did not succeed\n"); - ret = 1; - } - /* Zero completion for nxt FLR */ - REG_WR(bp, comp_addr, 0); - - return ret; -} - -static inline u8 bnx2x_is_pcie_pending(struct pci_dev *dev) -{ - int pos; - u16 status; - - pos = pci_pcie_cap(dev); - if (!pos) - return false; - - pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status); - return status & PCI_EXP_DEVSTA_TRPND; -} - -/* PF FLR specific routines -*/ -static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt) -{ - - /* wait for CFC PF usage-counter to zero (includes all the VFs) */ - if (bnx2x_flr_clnup_poll_hw_counter(bp, - CFC_REG_NUM_LCIDS_INSIDE_PF, - "CFC PF usage counter timed out", - poll_cnt)) - return 1; - - - /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ - if (bnx2x_flr_clnup_poll_hw_counter(bp, - DORQ_REG_PF_USAGE_CNT, - "DQ PF usage counter timed out", - poll_cnt)) - return 1; - - /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ - if (bnx2x_flr_clnup_poll_hw_counter(bp, - QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp), - "QM PF usage counter timed out", - poll_cnt)) - return 1; - - /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ - if (bnx2x_flr_clnup_poll_hw_counter(bp, - TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp), - "Timers VNIC usage counter timed out", - poll_cnt)) - return 1; - if (bnx2x_flr_clnup_poll_hw_counter(bp, - TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp), - "Timers NUM_SCANS usage counter timed out", - poll_cnt)) - return 1; - - /* Wait DMAE PF usage counter to zero */ - if (bnx2x_flr_clnup_poll_hw_counter(bp, - dmae_reg_go_c[INIT_DMAE_C(bp)], - "DMAE dommand register timed out", - poll_cnt)) - return 1; - - return 0; -} - -static void bnx2x_hw_enable_status(struct bnx2x *bp) -{ - u32 val; - - val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF); - DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); - - val = REG_RD(bp, PBF_REG_DISABLE_PF); - DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val); - - val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN); - DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); - - val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN); - DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); - - val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK); - DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); - - val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); - DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); - - val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); - DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); - - val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); - DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", - val); -} - -static int bnx2x_pf_flr_clnup(struct bnx2x *bp) -{ - u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); - - DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp)); - - /* Re-enable PF target read access */ - REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); - - /* Poll HW usage counters */ - if (bnx2x_poll_hw_usage_counters(bp, poll_cnt)) - return -EBUSY; - - /* Zero the igu 'trailing edge' and 'leading edge' */ - - /* Send the FW cleanup command */ - if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt)) - return -EBUSY; - - /* ATC cleanup */ - - /* Verify TX hw is flushed */ - bnx2x_tx_hw_flushed(bp, poll_cnt); - - /* Wait 100ms (not adjusted according to platform) */ - msleep(100); - - /* Verify no pending pci transactions */ - if (bnx2x_is_pcie_pending(bp->pdev)) - BNX2X_ERR("PCIE Transactions still pending\n"); - - /* Debug */ - bnx2x_hw_enable_status(bp); - - /* - * Master enable - Due to WB DMAE writes performed before this - * register is re-initialized as part of the regular function init - */ - REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); - - return 0; -} - -static void bnx2x_hc_int_enable(struct bnx2x *bp) -{ - int port = BP_PORT(bp); - u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; - u32 val = REG_RD(bp, addr); - int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; - int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0; - - if (msix) { - val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | - HC_CONFIG_0_REG_INT_LINE_EN_0); - val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | - HC_CONFIG_0_REG_ATTN_BIT_EN_0); - } else if (msi) { - val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; - val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | - HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | - HC_CONFIG_0_REG_ATTN_BIT_EN_0); - } else { - val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | - HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | - HC_CONFIG_0_REG_INT_LINE_EN_0 | - HC_CONFIG_0_REG_ATTN_BIT_EN_0); - - if (!CHIP_IS_E1(bp)) { - DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n", - val, port, addr); - - REG_WR(bp, addr, val); - - val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; - } - } - - if (CHIP_IS_E1(bp)) - REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF); - - DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n", - val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); - - REG_WR(bp, addr, val); - /* - * Ensure that HC_CONFIG is written before leading/trailing edge config - */ - mmiowb(); - barrier(); - - if (!CHIP_IS_E1(bp)) { - /* init leading/trailing edge */ - if (IS_MF(bp)) { - val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); - if (bp->port.pmf) - /* enable nig and gpio3 attention */ - val |= 0x1100; - } else - val = 0xffff; - - REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); - REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); - } - - /* Make sure that interrupts are indeed enabled from here on */ - mmiowb(); -} - -static void bnx2x_igu_int_enable(struct bnx2x *bp) -{ - u32 val; - int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; - int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0; - - val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); - - if (msix) { - val &= ~(IGU_PF_CONF_INT_LINE_EN | - IGU_PF_CONF_SINGLE_ISR_EN); - val |= (IGU_PF_CONF_FUNC_EN | - IGU_PF_CONF_MSI_MSIX_EN | - IGU_PF_CONF_ATTN_BIT_EN); - } else if (msi) { - val &= ~IGU_PF_CONF_INT_LINE_EN; - val |= (IGU_PF_CONF_FUNC_EN | - IGU_PF_CONF_MSI_MSIX_EN | - IGU_PF_CONF_ATTN_BIT_EN | - IGU_PF_CONF_SINGLE_ISR_EN); - } else { - val &= ~IGU_PF_CONF_MSI_MSIX_EN; - val |= (IGU_PF_CONF_FUNC_EN | - IGU_PF_CONF_INT_LINE_EN | - IGU_PF_CONF_ATTN_BIT_EN | - IGU_PF_CONF_SINGLE_ISR_EN); - } - - DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n", - val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); - - REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); - - barrier(); - - /* init leading/trailing edge */ - if (IS_MF(bp)) { - val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); - if (bp->port.pmf) - /* enable nig and gpio3 attention */ - val |= 0x1100; - } else - val = 0xffff; - - REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); - REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); - - /* Make sure that interrupts are indeed enabled from here on */ - mmiowb(); -} - -void bnx2x_int_enable(struct bnx2x *bp) -{ - if (bp->common.int_block == INT_BLOCK_HC) - bnx2x_hc_int_enable(bp); - else - bnx2x_igu_int_enable(bp); -} - -static void bnx2x_hc_int_disable(struct bnx2x *bp) -{ - int port = BP_PORT(bp); - u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; - u32 val = REG_RD(bp, addr); - - /* - * in E1 we must use only PCI configuration space to disable - * MSI/MSIX capablility - * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block - */ - if (CHIP_IS_E1(bp)) { - /* Since IGU_PF_CONF_MSI_MSIX_EN still always on - * Use mask register to prevent from HC sending interrupts - * after we exit the function - */ - REG_WR(bp, HC_REG_INT_MASK + port*4, 0); - - val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | - HC_CONFIG_0_REG_INT_LINE_EN_0 | - HC_CONFIG_0_REG_ATTN_BIT_EN_0); - } else - val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | - HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | - HC_CONFIG_0_REG_INT_LINE_EN_0 | - HC_CONFIG_0_REG_ATTN_BIT_EN_0); - - DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n", - val, port, addr); - - /* flush all outstanding writes */ - mmiowb(); - - REG_WR(bp, addr, val); - if (REG_RD(bp, addr) != val) - BNX2X_ERR("BUG! proper val not read from IGU!\n"); -} - -static void bnx2x_igu_int_disable(struct bnx2x *bp) -{ - u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); - - val &= ~(IGU_PF_CONF_MSI_MSIX_EN | - IGU_PF_CONF_INT_LINE_EN | - IGU_PF_CONF_ATTN_BIT_EN); - - DP(NETIF_MSG_INTR, "write %x to IGU\n", val); - - /* flush all outstanding writes */ - mmiowb(); - - REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); - if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val) - BNX2X_ERR("BUG! proper val not read from IGU!\n"); -} - -void bnx2x_int_disable(struct bnx2x *bp) -{ - if (bp->common.int_block == INT_BLOCK_HC) - bnx2x_hc_int_disable(bp); - else - bnx2x_igu_int_disable(bp); -} - -void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) -{ - int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; - int i, offset; - - if (disable_hw) - /* prevent the HW from sending interrupts */ - bnx2x_int_disable(bp); - - /* make sure all ISRs are done */ - if (msix) { - synchronize_irq(bp->msix_table[0].vector); - offset = 1; -#ifdef BCM_CNIC - offset++; -#endif - for_each_eth_queue(bp, i) - synchronize_irq(bp->msix_table[offset++].vector); - } else - synchronize_irq(bp->pdev->irq); - - /* make sure sp_task is not running */ - cancel_delayed_work(&bp->sp_task); - cancel_delayed_work(&bp->period_task); - flush_workqueue(bnx2x_wq); -} - -/* fast path */ - -/* - * General service functions - */ - -/* Return true if succeeded to acquire the lock */ -static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource) -{ - u32 lock_status; - u32 resource_bit = (1 << resource); - int func = BP_FUNC(bp); - u32 hw_lock_control_reg; - - DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource); - - /* Validating that the resource is within range */ - if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { - DP(NETIF_MSG_HW, - "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", - resource, HW_LOCK_MAX_RESOURCE_VALUE); - return false; - } - - if (func <= 5) - hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); - else - hw_lock_control_reg = - (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); - - /* Try to acquire the lock */ - REG_WR(bp, hw_lock_control_reg + 4, resource_bit); - lock_status = REG_RD(bp, hw_lock_control_reg); - if (lock_status & resource_bit) - return true; - - DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource); - return false; -} - -/** - * bnx2x_get_leader_lock_resource - get the recovery leader resource id - * - * @bp: driver handle - * - * Returns the recovery leader resource id according to the engine this function - * belongs to. Currently only only 2 engines is supported. - */ -static inline int bnx2x_get_leader_lock_resource(struct bnx2x *bp) -{ - if (BP_PATH(bp)) - return HW_LOCK_RESOURCE_RECOVERY_LEADER_1; - else - return HW_LOCK_RESOURCE_RECOVERY_LEADER_0; -} - -/** - * bnx2x_trylock_leader_lock- try to aquire a leader lock. - * - * @bp: driver handle - * - * Tries to aquire a leader lock for cuurent engine. - */ -static inline bool bnx2x_trylock_leader_lock(struct bnx2x *bp) -{ - return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); -} - -#ifdef BCM_CNIC -static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err); -#endif - -void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) -{ - struct bnx2x *bp = fp->bp; - int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); - int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); - enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX; - struct bnx2x_queue_sp_obj *q_obj = &fp->q_obj; - - DP(BNX2X_MSG_SP, - "fp %d cid %d got ramrod #%d state is %x type is %d\n", - fp->index, cid, command, bp->state, - rr_cqe->ramrod_cqe.ramrod_type); - - switch (command) { - case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): - DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid); - drv_cmd = BNX2X_Q_CMD_UPDATE; - break; - - case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): - DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid); - drv_cmd = BNX2X_Q_CMD_SETUP; - break; - - case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): - DP(NETIF_MSG_IFUP, "got MULTI[%d] tx-only setup ramrod\n", cid); - drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; - break; - - case (RAMROD_CMD_ID_ETH_HALT): - DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid); - drv_cmd = BNX2X_Q_CMD_HALT; - break; - - case (RAMROD_CMD_ID_ETH_TERMINATE): - DP(BNX2X_MSG_SP, "got MULTI[%d] teminate ramrod\n", cid); - drv_cmd = BNX2X_Q_CMD_TERMINATE; - break; - - case (RAMROD_CMD_ID_ETH_EMPTY): - DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid); - drv_cmd = BNX2X_Q_CMD_EMPTY; - break; - - default: - BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n", - command, fp->index); - return; - } - - if ((drv_cmd != BNX2X_Q_CMD_MAX) && - q_obj->complete_cmd(bp, q_obj, drv_cmd)) - /* q_obj->complete_cmd() failure means that this was - * an unexpected completion. - * - * In this case we don't want to increase the bp->spq_left - * because apparently we haven't sent this command the first - * place. - */ -#ifdef BNX2X_STOP_ON_ERROR - bnx2x_panic(); -#else - return; -#endif - - smp_mb__before_atomic_inc(); - atomic_inc(&bp->cq_spq_left); - /* push the change in bp->spq_left and towards the memory */ - smp_mb__after_atomic_inc(); - - DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left)); - - return; -} - -void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, - u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod) -{ - u32 start = BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset; - - bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod, - start); -} - -irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) -{ - struct bnx2x *bp = netdev_priv(dev_instance); - u16 status = bnx2x_ack_int(bp); - u16 mask; - int i; - u8 cos; - - /* Return here if interrupt is shared and it's not for us */ - if (unlikely(status == 0)) { - DP(NETIF_MSG_INTR, "not our interrupt!\n"); - return IRQ_NONE; - } - DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status); - -#ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) - return IRQ_HANDLED; -#endif - - for_each_eth_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - - mask = 0x2 << (fp->index + CNIC_PRESENT); - if (status & mask) { - /* Handle Rx or Tx according to SB id */ - prefetch(fp->rx_cons_sb); - for_each_cos_in_tx_queue(fp, cos) - prefetch(fp->txdata[cos].tx_cons_sb); - prefetch(&fp->sb_running_index[SM_RX_ID]); - napi_schedule(&bnx2x_fp(bp, fp->index, napi)); - status &= ~mask; - } - } - -#ifdef BCM_CNIC - mask = 0x2; - if (status & (mask | 0x1)) { - struct cnic_ops *c_ops = NULL; - - if (likely(bp->state == BNX2X_STATE_OPEN)) { - rcu_read_lock(); - c_ops = rcu_dereference(bp->cnic_ops); - if (c_ops) - c_ops->cnic_handler(bp->cnic_data, NULL); - rcu_read_unlock(); - } - - status &= ~mask; - } -#endif - - if (unlikely(status & 0x1)) { - queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); - - status &= ~0x1; - if (!status) - return IRQ_HANDLED; - } - - if (unlikely(status)) - DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n", - status); - - return IRQ_HANDLED; -} - -/* Link */ - -/* - * General service functions - */ - -int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) -{ - u32 lock_status; - u32 resource_bit = (1 << resource); - int func = BP_FUNC(bp); - u32 hw_lock_control_reg; - int cnt; - - /* Validating that the resource is within range */ - if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { - DP(NETIF_MSG_HW, - "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", - resource, HW_LOCK_MAX_RESOURCE_VALUE); - return -EINVAL; - } - - if (func <= 5) { - hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); - } else { - hw_lock_control_reg = - (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); - } - - /* Validating that the resource is not already taken */ - lock_status = REG_RD(bp, hw_lock_control_reg); - if (lock_status & resource_bit) { - DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", - lock_status, resource_bit); - return -EEXIST; - } - - /* Try for 5 second every 5ms */ - for (cnt = 0; cnt < 1000; cnt++) { - /* Try to acquire the lock */ - REG_WR(bp, hw_lock_control_reg + 4, resource_bit); - lock_status = REG_RD(bp, hw_lock_control_reg); - if (lock_status & resource_bit) - return 0; - - msleep(5); - } - DP(NETIF_MSG_HW, "Timeout\n"); - return -EAGAIN; -} - -int bnx2x_release_leader_lock(struct bnx2x *bp) -{ - return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); -} - -int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) -{ - u32 lock_status; - u32 resource_bit = (1 << resource); - int func = BP_FUNC(bp); - u32 hw_lock_control_reg; - - DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource); - - /* Validating that the resource is within range */ - if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { - DP(NETIF_MSG_HW, - "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", - resource, HW_LOCK_MAX_RESOURCE_VALUE); - return -EINVAL; - } - - if (func <= 5) { - hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); - } else { - hw_lock_control_reg = - (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); - } - - /* Validating that the resource is currently taken */ - lock_status = REG_RD(bp, hw_lock_control_reg); - if (!(lock_status & resource_bit)) { - DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", - lock_status, resource_bit); - return -EFAULT; - } - - REG_WR(bp, hw_lock_control_reg, resource_bit); - return 0; -} - - -int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port) -{ - /* The GPIO should be swapped if swap register is set and active */ - int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && - REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; - int gpio_shift = gpio_num + - (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); - u32 gpio_mask = (1 << gpio_shift); - u32 gpio_reg; - int value; - - if (gpio_num > MISC_REGISTERS_GPIO_3) { - BNX2X_ERR("Invalid GPIO %d\n", gpio_num); - return -EINVAL; - } - - /* read GPIO value */ - gpio_reg = REG_RD(bp, MISC_REG_GPIO); - - /* get the requested pin value */ - if ((gpio_reg & gpio_mask) == gpio_mask) - value = 1; - else - value = 0; - - DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value); - - return value; -} - -int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) -{ - /* The GPIO should be swapped if swap register is set and active */ - int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && - REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; - int gpio_shift = gpio_num + - (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); - u32 gpio_mask = (1 << gpio_shift); - u32 gpio_reg; - - if (gpio_num > MISC_REGISTERS_GPIO_3) { - BNX2X_ERR("Invalid GPIO %d\n", gpio_num); - return -EINVAL; - } - - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); - /* read GPIO and mask except the float bits */ - gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); - - switch (mode) { - case MISC_REGISTERS_GPIO_OUTPUT_LOW: - DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n", - gpio_num, gpio_shift); - /* clear FLOAT and set CLR */ - gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); - gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); - break; - - case MISC_REGISTERS_GPIO_OUTPUT_HIGH: - DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n", - gpio_num, gpio_shift); - /* clear FLOAT and set SET */ - gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); - gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); - break; - - case MISC_REGISTERS_GPIO_INPUT_HI_Z: - DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n", - gpio_num, gpio_shift); - /* set FLOAT */ - gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); - break; - - default: - break; - } - - REG_WR(bp, MISC_REG_GPIO, gpio_reg); - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); - - return 0; -} - -int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode) -{ - u32 gpio_reg = 0; - int rc = 0; - - /* Any port swapping should be handled by caller. */ - - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); - /* read GPIO and mask except the float bits */ - gpio_reg = REG_RD(bp, MISC_REG_GPIO); - gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); - gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); - gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); - - switch (mode) { - case MISC_REGISTERS_GPIO_OUTPUT_LOW: - DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins); - /* set CLR */ - gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); - break; - - case MISC_REGISTERS_GPIO_OUTPUT_HIGH: - DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins); - /* set SET */ - gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); - break; - - case MISC_REGISTERS_GPIO_INPUT_HI_Z: - DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins); - /* set FLOAT */ - gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); - break; - - default: - BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode); - rc = -EINVAL; - break; - } - - if (rc == 0) - REG_WR(bp, MISC_REG_GPIO, gpio_reg); - - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); - - return rc; -} - -int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) -{ - /* The GPIO should be swapped if swap register is set and active */ - int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && - REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; - int gpio_shift = gpio_num + - (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); - u32 gpio_mask = (1 << gpio_shift); - u32 gpio_reg; - - if (gpio_num > MISC_REGISTERS_GPIO_3) { - BNX2X_ERR("Invalid GPIO %d\n", gpio_num); - return -EINVAL; - } - - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); - /* read GPIO int */ - gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT); - - switch (mode) { - case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: - DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> " - "output low\n", gpio_num, gpio_shift); - /* clear SET and set CLR */ - gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); - gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); - break; - - case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: - DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> " - "output high\n", gpio_num, gpio_shift); - /* clear CLR and set SET */ - gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); - gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); - break; - - default: - break; - } - - REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg); - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); - - return 0; -} - -static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode) -{ - u32 spio_mask = (1 << spio_num); - u32 spio_reg; - - if ((spio_num < MISC_REGISTERS_SPIO_4) || - (spio_num > MISC_REGISTERS_SPIO_7)) { - BNX2X_ERR("Invalid SPIO %d\n", spio_num); - return -EINVAL; - } - - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); - /* read SPIO and mask except the float bits */ - spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT); - - switch (mode) { - case MISC_REGISTERS_SPIO_OUTPUT_LOW: - DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num); - /* clear FLOAT and set CLR */ - spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); - spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS); - break; - - case MISC_REGISTERS_SPIO_OUTPUT_HIGH: - DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num); - /* clear FLOAT and set SET */ - spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); - spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS); - break; - - case MISC_REGISTERS_SPIO_INPUT_HI_Z: - DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num); - /* set FLOAT */ - spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); - break; - - default: - break; - } - - REG_WR(bp, MISC_REG_SPIO, spio_reg); - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); - - return 0; -} - -void bnx2x_calc_fc_adv(struct bnx2x *bp) -{ - u8 cfg_idx = bnx2x_get_link_cfg_idx(bp); - switch (bp->link_vars.ieee_fc & - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { - case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: - bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | - ADVERTISED_Pause); - break; - - case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: - bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | - ADVERTISED_Pause); - break; - - case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: - bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; - break; - - default: - bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | - ADVERTISED_Pause); - break; - } -} - -u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) -{ - if (!BP_NOMCP(bp)) { - u8 rc; - int cfx_idx = bnx2x_get_link_cfg_idx(bp); - u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx]; - /* - * Initialize link parameters structure variables - * It is recommended to turn off RX FC for jumbo frames - * for better performance - */ - if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000)) - bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX; - else - bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; - - bnx2x_acquire_phy_lock(bp); - - if (load_mode == LOAD_DIAG) { - struct link_params *lp = &bp->link_params; - lp->loopback_mode = LOOPBACK_XGXS; - /* do PHY loopback at 10G speed, if possible */ - if (lp->req_line_speed[cfx_idx] < SPEED_10000) { - if (lp->speed_cap_mask[cfx_idx] & - PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) - lp->req_line_speed[cfx_idx] = - SPEED_10000; - else - lp->req_line_speed[cfx_idx] = - SPEED_1000; - } - } - - rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); - - bnx2x_release_phy_lock(bp); - - bnx2x_calc_fc_adv(bp); - - if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) { - bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); - bnx2x_link_report(bp); - } else - queue_delayed_work(bnx2x_wq, &bp->period_task, 0); - bp->link_params.req_line_speed[cfx_idx] = req_line_speed; - return rc; - } - BNX2X_ERR("Bootcode is missing - can not initialize link\n"); - return -EINVAL; -} - -void bnx2x_link_set(struct bnx2x *bp) -{ - if (!BP_NOMCP(bp)) { - bnx2x_acquire_phy_lock(bp); - bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); - bnx2x_phy_init(&bp->link_params, &bp->link_vars); - bnx2x_release_phy_lock(bp); - - bnx2x_calc_fc_adv(bp); - } else - BNX2X_ERR("Bootcode is missing - can not set link\n"); -} - -static void bnx2x__link_reset(struct bnx2x *bp) -{ - if (!BP_NOMCP(bp)) { - bnx2x_acquire_phy_lock(bp); - bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); - bnx2x_release_phy_lock(bp); - } else - BNX2X_ERR("Bootcode is missing - can not reset link\n"); -} - -u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes) -{ - u8 rc = 0; - - if (!BP_NOMCP(bp)) { - bnx2x_acquire_phy_lock(bp); - rc = bnx2x_test_link(&bp->link_params, &bp->link_vars, - is_serdes); - bnx2x_release_phy_lock(bp); - } else - BNX2X_ERR("Bootcode is missing - can not test link\n"); - - return rc; -} - -static void bnx2x_init_port_minmax(struct bnx2x *bp) -{ - u32 r_param = bp->link_vars.line_speed / 8; - u32 fair_periodic_timeout_usec; - u32 t_fair; - - memset(&(bp->cmng.rs_vars), 0, - sizeof(struct rate_shaping_vars_per_port)); - memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port)); - - /* 100 usec in SDM ticks = 25 since each tick is 4 usec */ - bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4; - - /* this is the threshold below which no timer arming will occur - 1.25 coefficient is for the threshold to be a little bigger - than the real time, to compensate for timer in-accuracy */ - bp->cmng.rs_vars.rs_threshold = - (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4; - - /* resolution of fairness timer */ - fair_periodic_timeout_usec = QM_ARB_BYTES / r_param; - /* for 10G it is 1000usec. for 1G it is 10000usec. */ - t_fair = T_FAIR_COEF / bp->link_vars.line_speed; - - /* this is the threshold below which we won't arm the timer anymore */ - bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES; - - /* we multiply by 1e3/8 to get bytes/msec. - We don't want the credits to pass a credit - of the t_fair*FAIR_MEM (algorithm resolution) */ - bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM; - /* since each tick is 4 usec */ - bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4; -} - -/* Calculates the sum of vn_min_rates. - It's needed for further normalizing of the min_rates. - Returns: - sum of vn_min_rates. - or - 0 - if all the min_rates are 0. - In the later case fainess algorithm should be deactivated. - If not all min_rates are zero then those that are zeroes will be set to 1. - */ -static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) -{ - int all_zero = 1; - int vn; - - bp->vn_weight_sum = 0; - for (vn = VN_0; vn < E1HVN_MAX; vn++) { - u32 vn_cfg = bp->mf_config[vn]; - u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> - FUNC_MF_CFG_MIN_BW_SHIFT) * 100; - - /* Skip hidden vns */ - if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) - continue; - - /* If min rate is zero - set it to 1 */ - if (!vn_min_rate) - vn_min_rate = DEF_MIN_RATE; - else - all_zero = 0; - - bp->vn_weight_sum += vn_min_rate; - } - - /* if ETS or all min rates are zeros - disable fairness */ - if (BNX2X_IS_ETS_ENABLED(bp)) { - bp->cmng.flags.cmng_enables &= - ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; - DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n"); - } else if (all_zero) { - bp->cmng.flags.cmng_enables &= - ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; - DP(NETIF_MSG_IFUP, "All MIN values are zeroes" - " fairness will be disabled\n"); - } else - bp->cmng.flags.cmng_enables |= - CMNG_FLAGS_PER_PORT_FAIRNESS_VN; -} - -static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) -{ - struct rate_shaping_vars_per_vn m_rs_vn; - struct fairness_vars_per_vn m_fair_vn; - u32 vn_cfg = bp->mf_config[vn]; - int func = 2*vn + BP_PORT(bp); - u16 vn_min_rate, vn_max_rate; - int i; - - /* If function is hidden - set min and max to zeroes */ - if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { - vn_min_rate = 0; - vn_max_rate = 0; - - } else { - u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg); - - vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> - FUNC_MF_CFG_MIN_BW_SHIFT) * 100; - /* If fairness is enabled (not all min rates are zeroes) and - if current min rate is zero - set it to 1. - This is a requirement of the algorithm. */ - if (bp->vn_weight_sum && (vn_min_rate == 0)) - vn_min_rate = DEF_MIN_RATE; - - if (IS_MF_SI(bp)) - /* maxCfg in percents of linkspeed */ - vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; - else - /* maxCfg is absolute in 100Mb units */ - vn_max_rate = maxCfg * 100; - } - - DP(NETIF_MSG_IFUP, - "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n", - func, vn_min_rate, vn_max_rate, bp->vn_weight_sum); - - memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn)); - memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn)); - - /* global vn counter - maximal Mbps for this vn */ - m_rs_vn.vn_counter.rate = vn_max_rate; - - /* quota - number of bytes transmitted in this period */ - m_rs_vn.vn_counter.quota = - (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8; - - if (bp->vn_weight_sum) { - /* credit for each period of the fairness algorithm: - number of bytes in T_FAIR (the vn share the port rate). - vn_weight_sum should not be larger than 10000, thus - T_FAIR_COEF / (8 * vn_weight_sum) will always be greater - than zero */ - m_fair_vn.vn_credit_delta = - max_t(u32, (vn_min_rate * (T_FAIR_COEF / - (8 * bp->vn_weight_sum))), - (bp->cmng.fair_vars.fair_threshold + - MIN_ABOVE_THRESH)); - DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n", - m_fair_vn.vn_credit_delta); - } - - /* Store it to internal memory */ - for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++) - REG_WR(bp, BAR_XSTRORM_INTMEM + - XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4, - ((u32 *)(&m_rs_vn))[i]); - - for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++) - REG_WR(bp, BAR_XSTRORM_INTMEM + - XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4, - ((u32 *)(&m_fair_vn))[i]); -} - -static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp) -{ - if (CHIP_REV_IS_SLOW(bp)) - return CMNG_FNS_NONE; - if (IS_MF(bp)) - return CMNG_FNS_MINMAX; - - return CMNG_FNS_NONE; -} - -void bnx2x_read_mf_cfg(struct bnx2x *bp) -{ - int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1); - - if (BP_NOMCP(bp)) - return; /* what should be the default bvalue in this case */ - - /* For 2 port configuration the absolute function number formula - * is: - * abs_func = 2 * vn + BP_PORT + BP_PATH - * - * and there are 4 functions per port - * - * For 4 port configuration it is - * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH - * - * and there are 2 functions per port - */ - for (vn = VN_0; vn < E1HVN_MAX; vn++) { - int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); - - if (func >= E1H_FUNC_MAX) - break; - - bp->mf_config[vn] = - MF_CFG_RD(bp, func_mf_config[func].config); - } -} - -static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) -{ - - if (cmng_type == CMNG_FNS_MINMAX) { - int vn; - - /* clear cmng_enables */ - bp->cmng.flags.cmng_enables = 0; - - /* read mf conf from shmem */ - if (read_cfg) - bnx2x_read_mf_cfg(bp); - - /* Init rate shaping and fairness contexts */ - bnx2x_init_port_minmax(bp); - - /* vn_weight_sum and enable fairness if not 0 */ - bnx2x_calc_vn_weight_sum(bp); - - /* calculate and set min-max rate for each vn */ - if (bp->port.pmf) - for (vn = VN_0; vn < E1HVN_MAX; vn++) - bnx2x_init_vn_minmax(bp, vn); - - /* always enable rate shaping and fairness */ - bp->cmng.flags.cmng_enables |= - CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; - if (!bp->vn_weight_sum) - DP(NETIF_MSG_IFUP, "All MIN values are zeroes" - " fairness will be disabled\n"); - return; - } - - /* rate shaping and fairness are disabled */ - DP(NETIF_MSG_IFUP, - "rate shaping and fairness are disabled\n"); -} - -static inline void bnx2x_link_sync_notify(struct bnx2x *bp) -{ - int port = BP_PORT(bp); - int func; - int vn; - - /* Set the attention towards other drivers on the same port */ - for (vn = VN_0; vn < E1HVN_MAX; vn++) { - if (vn == BP_E1HVN(bp)) - continue; - - func = ((vn << 1) | port); - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + - (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); - } -} - -/* This function is called upon link interrupt */ -static void bnx2x_link_attn(struct bnx2x *bp) -{ - /* Make sure that we are synced with the current statistics */ - bnx2x_stats_handle(bp, STATS_EVENT_STOP); - - bnx2x_link_update(&bp->link_params, &bp->link_vars); - - if (bp->link_vars.link_up) { - - /* dropless flow control */ - if (!CHIP_IS_E1(bp) && bp->dropless_fc) { - int port = BP_PORT(bp); - u32 pause_enabled = 0; - - if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) - pause_enabled = 1; - - REG_WR(bp, BAR_USTRORM_INTMEM + - USTORM_ETH_PAUSE_ENABLED_OFFSET(port), - pause_enabled); - } - - if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { - struct host_port_stats *pstats; - - pstats = bnx2x_sp(bp, port_stats); - /* reset old mac stats */ - memset(&(pstats->mac_stx[0]), 0, - sizeof(struct mac_stx)); - } - if (bp->state == BNX2X_STATE_OPEN) - bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); - } - - if (bp->link_vars.link_up && bp->link_vars.line_speed) { - int cmng_fns = bnx2x_get_cmng_fns_mode(bp); - - if (cmng_fns != CMNG_FNS_NONE) { - bnx2x_cmng_fns_init(bp, false, cmng_fns); - storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); - } else - /* rate shaping and fairness are disabled */ - DP(NETIF_MSG_IFUP, - "single function mode without fairness\n"); - } - - __bnx2x_link_report(bp); - - if (IS_MF(bp)) - bnx2x_link_sync_notify(bp); -} - -void bnx2x__link_status_update(struct bnx2x *bp) -{ - if (bp->state != BNX2X_STATE_OPEN) - return; - - bnx2x_link_status_update(&bp->link_params, &bp->link_vars); - - if (bp->link_vars.link_up) - bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); - else - bnx2x_stats_handle(bp, STATS_EVENT_STOP); - - /* indicate link status */ - bnx2x_link_report(bp); -} - -static void bnx2x_pmf_update(struct bnx2x *bp) -{ - int port = BP_PORT(bp); - u32 val; - - bp->port.pmf = 1; - DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); - - /* - * We need the mb() to ensure the ordering between the writing to - * bp->port.pmf here and reading it from the bnx2x_periodic_task(). - */ - smp_mb(); - - /* queue a periodic task */ - queue_delayed_work(bnx2x_wq, &bp->period_task, 0); - - bnx2x_dcbx_pmf_update(bp); - - /* enable nig attention */ - val = (0xff0f | (1 << (BP_E1HVN(bp) + 4))); - if (bp->common.int_block == INT_BLOCK_HC) { - REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); - REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); - } else if (!CHIP_IS_E1x(bp)) { - REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); - REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); - } - - bnx2x_stats_handle(bp, STATS_EVENT_PMF); -} - -/* end of Link */ - -/* slow path */ - -/* - * General service functions - */ - -/* send the MCP a request, block until there is a reply */ -u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) -{ - int mb_idx = BP_FW_MB_IDX(bp); - u32 seq; - u32 rc = 0; - u32 cnt = 1; - u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; - - mutex_lock(&bp->fw_mb_mutex); - seq = ++bp->fw_seq; - SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param); - SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq)); - - DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n", - (command | seq), param); - - do { - /* let the FW do it's magic ... */ - msleep(delay); - - rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header); - - /* Give the FW up to 5 second (500*10ms) */ - } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); - - DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n", - cnt*delay, rc, seq); - - /* is this a reply to our command? */ - if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) - rc &= FW_MSG_CODE_MASK; - else { - /* FW BUG! */ - BNX2X_ERR("FW failed to respond!\n"); - bnx2x_fw_dump(bp); - rc = 0; - } - mutex_unlock(&bp->fw_mb_mutex); - - return rc; -} - -static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp) -{ -#ifdef BCM_CNIC - /* Statistics are not supported for CNIC Clients at the moment */ - if (IS_FCOE_FP(fp)) - return false; -#endif - return true; -} - -void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) -{ - if (CHIP_IS_E1x(bp)) { - struct tstorm_eth_function_common_config tcfg = {0}; - - storm_memset_func_cfg(bp, &tcfg, p->func_id); - } - - /* Enable the function in the FW */ - storm_memset_vf_to_pf(bp, p->func_id, p->pf_id); - storm_memset_func_en(bp, p->func_id, 1); - - /* spq */ - if (p->func_flgs & FUNC_FLG_SPQ) { - storm_memset_spq_addr(bp, p->spq_map, p->func_id); - REG_WR(bp, XSEM_REG_FAST_MEMORY + - XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod); - } -} - -/** - * bnx2x_get_tx_only_flags - Return common flags - * - * @bp device handle - * @fp queue handle - * @zero_stats TRUE if statistics zeroing is needed - * - * Return the flags that are common for the Tx-only and not normal connections. - */ -static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp, - struct bnx2x_fastpath *fp, - bool zero_stats) -{ - unsigned long flags = 0; - - /* PF driver will always initialize the Queue to an ACTIVE state */ - __set_bit(BNX2X_Q_FLG_ACTIVE, &flags); - - /* tx only connections collect statistics (on the same index as the - * parent connection). The statistics are zeroed when the parent - * connection is initialized. - */ - if (stat_counter_valid(bp, fp)) { - __set_bit(BNX2X_Q_FLG_STATS, &flags); - if (zero_stats) - __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags); - } - - return flags; -} - -static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp, - struct bnx2x_fastpath *fp, - bool leading) -{ - unsigned long flags = 0; - - /* calculate other queue flags */ - if (IS_MF_SD(bp)) - __set_bit(BNX2X_Q_FLG_OV, &flags); - - if (IS_FCOE_FP(fp)) - __set_bit(BNX2X_Q_FLG_FCOE, &flags); - - if (!fp->disable_tpa) { - __set_bit(BNX2X_Q_FLG_TPA, &flags); - __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags); - } - - if (leading) { - __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags); - __set_bit(BNX2X_Q_FLG_MCAST, &flags); - } - - /* Always set HW VLAN stripping */ - __set_bit(BNX2X_Q_FLG_VLAN, &flags); - - - return flags | bnx2x_get_common_flags(bp, fp, true); -} - -static void bnx2x_pf_q_prep_general(struct bnx2x *bp, - struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init, - u8 cos) -{ - gen_init->stat_id = bnx2x_stats_id(fp); - gen_init->spcl_id = fp->cl_id; - - /* Always use mini-jumbo MTU for FCoE L2 ring */ - if (IS_FCOE_FP(fp)) - gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU; - else - gen_init->mtu = bp->dev->mtu; - - gen_init->cos = cos; -} - -static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, - struct bnx2x_fastpath *fp, struct rxq_pause_params *pause, - struct bnx2x_rxq_setup_params *rxq_init) -{ - u8 max_sge = 0; - u16 sge_sz = 0; - u16 tpa_agg_size = 0; - - if (!fp->disable_tpa) { - pause->sge_th_hi = 250; - pause->sge_th_lo = 150; - tpa_agg_size = min_t(u32, - (min_t(u32, 8, MAX_SKB_FRAGS) * - SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); - max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >> - SGE_PAGE_SHIFT; - max_sge = ((max_sge + PAGES_PER_SGE - 1) & - (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT; - sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE, - 0xffff); - } - - /* pause - not for e1 */ - if (!CHIP_IS_E1(bp)) { - pause->bd_th_hi = 350; - pause->bd_th_lo = 250; - pause->rcq_th_hi = 350; - pause->rcq_th_lo = 250; - - pause->pri_map = 1; - } - - /* rxq setup */ - rxq_init->dscr_map = fp->rx_desc_mapping; - rxq_init->sge_map = fp->rx_sge_mapping; - rxq_init->rcq_map = fp->rx_comp_mapping; - rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE; - - /* This should be a maximum number of data bytes that may be - * placed on the BD (not including paddings). - */ - rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN - - IP_HEADER_ALIGNMENT_PADDING; - - rxq_init->cl_qzone_id = fp->cl_qzone_id; - rxq_init->tpa_agg_sz = tpa_agg_size; - rxq_init->sge_buf_sz = sge_sz; - rxq_init->max_sges_pkt = max_sge; - rxq_init->rss_engine_id = BP_FUNC(bp); - - /* Maximum number or simultaneous TPA aggregation for this Queue. - * - * For PF Clients it should be the maximum avaliable number. - * VF driver(s) may want to define it to a smaller value. - */ - rxq_init->max_tpa_queues = - (CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : - ETH_MAX_AGGREGATION_QUEUES_E1H_E2); - - rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; - rxq_init->fw_sb_id = fp->fw_sb_id; - - if (IS_FCOE_FP(fp)) - rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS; - else - rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; -} - -static void bnx2x_pf_tx_q_prep(struct bnx2x *bp, - struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init, - u8 cos) -{ - txq_init->dscr_map = fp->txdata[cos].tx_desc_mapping; - txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; - txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; - txq_init->fw_sb_id = fp->fw_sb_id; - - /* - * set the tss leading client id for TX classfication == - * leading RSS client id - */ - txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id); - - if (IS_FCOE_FP(fp)) { - txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS; - txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE; - } -} - -static void bnx2x_pf_init(struct bnx2x *bp) -{ - struct bnx2x_func_init_params func_init = {0}; - struct event_ring_data eq_data = { {0} }; - u16 flags; - - if (!CHIP_IS_E1x(bp)) { - /* reset IGU PF statistics: MSIX + ATTN */ - /* PF */ - REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + - BNX2X_IGU_STAS_MSG_VF_CNT*4 + - (CHIP_MODE_IS_4_PORT(bp) ? - BP_FUNC(bp) : BP_VN(bp))*4, 0); - /* ATTN */ - REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + - BNX2X_IGU_STAS_MSG_VF_CNT*4 + - BNX2X_IGU_STAS_MSG_PF_CNT*4 + - (CHIP_MODE_IS_4_PORT(bp) ? - BP_FUNC(bp) : BP_VN(bp))*4, 0); - } - - /* function setup flags */ - flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); - - /* This flag is relevant for E1x only. - * E2 doesn't have a TPA configuration in a function level. - */ - flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0; - - func_init.func_flgs = flags; - func_init.pf_id = BP_FUNC(bp); - func_init.func_id = BP_FUNC(bp); - func_init.spq_map = bp->spq_mapping; - func_init.spq_prod = bp->spq_prod_idx; - - bnx2x_func_init(bp, &func_init); - - memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port)); - - /* - * Congestion management values depend on the link rate - * There is no active link so initial link rate is set to 10 Gbps. - * When the link comes up The congestion management values are - * re-calculated according to the actual link rate. - */ - bp->link_vars.line_speed = SPEED_10000; - bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp)); - - /* Only the PMF sets the HW */ - if (bp->port.pmf) - storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); - - /* init Event Queue */ - eq_data.base_addr.hi = U64_HI(bp->eq_mapping); - eq_data.base_addr.lo = U64_LO(bp->eq_mapping); - eq_data.producer = bp->eq_prod; - eq_data.index_id = HC_SP_INDEX_EQ_CONS; - eq_data.sb_id = DEF_SB_ID; - storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp)); -} - - -static void bnx2x_e1h_disable(struct bnx2x *bp) -{ - int port = BP_PORT(bp); - - bnx2x_tx_disable(bp); - - REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); -} - -static void bnx2x_e1h_enable(struct bnx2x *bp) -{ - int port = BP_PORT(bp); - - REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); - - /* Tx queue should be only reenabled */ - netif_tx_wake_all_queues(bp->dev); - - /* - * Should not call netif_carrier_on since it will be called if the link - * is up when checking for link state - */ -} - -/* called due to MCP event (on pmf): - * reread new bandwidth configuration - * configure FW - * notify others function about the change - */ -static inline void bnx2x_config_mf_bw(struct bnx2x *bp) -{ - if (bp->link_vars.link_up) { - bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX); - bnx2x_link_sync_notify(bp); - } - storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); -} - -static inline void bnx2x_set_mf_bw(struct bnx2x *bp) -{ - bnx2x_config_mf_bw(bp); - bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0); -} - -static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) -{ - DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event); - - if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { - - /* - * This is the only place besides the function initialization - * where the bp->flags can change so it is done without any - * locks - */ - if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { - DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n"); - bp->flags |= MF_FUNC_DIS; - - bnx2x_e1h_disable(bp); - } else { - DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n"); - bp->flags &= ~MF_FUNC_DIS; - - bnx2x_e1h_enable(bp); - } - dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; - } - if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { - bnx2x_config_mf_bw(bp); - dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; - } - - /* Report results to MCP */ - if (dcc_event) - bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0); - else - bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0); -} - -/* must be called under the spq lock */ -static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp) -{ - struct eth_spe *next_spe = bp->spq_prod_bd; - - if (bp->spq_prod_bd == bp->spq_last_bd) { - bp->spq_prod_bd = bp->spq; - bp->spq_prod_idx = 0; - DP(NETIF_MSG_TIMER, "end of spq\n"); - } else { - bp->spq_prod_bd++; - bp->spq_prod_idx++; - } - return next_spe; -} - -/* must be called under the spq lock */ -static inline void bnx2x_sp_prod_update(struct bnx2x *bp) -{ - int func = BP_FUNC(bp); - - /* - * Make sure that BD data is updated before writing the producer: - * BD data is written to the memory, the producer is read from the - * memory, thus we need a full memory barrier to ensure the ordering. - */ - mb(); - - REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), - bp->spq_prod_idx); - mmiowb(); -} - -/** - * bnx2x_is_contextless_ramrod - check if the current command ends on EQ - * - * @cmd: command to check - * @cmd_type: command type - */ -static inline bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type) -{ - if ((cmd_type == NONE_CONNECTION_TYPE) || - (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || - (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || - (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || - (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || - (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || - (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) - return true; - else - return false; - -} - - -/** - * bnx2x_sp_post - place a single command on an SP ring - * - * @bp: driver handle - * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) - * @cid: SW CID the command is related to - * @data_hi: command private data address (high 32 bits) - * @data_lo: command private data address (low 32 bits) - * @cmd_type: command type (e.g. NONE, ETH) - * - * SP data is handled as if it's always an address pair, thus data fields are - * not swapped to little endian in upper functions. Instead this function swaps - * data as if it's two u32 fields. - */ -int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, - u32 data_hi, u32 data_lo, int cmd_type) -{ - struct eth_spe *spe; - u16 type; - bool common = bnx2x_is_contextless_ramrod(command, cmd_type); - -#ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) - return -EIO; -#endif - - spin_lock_bh(&bp->spq_lock); - - if (common) { - if (!atomic_read(&bp->eq_spq_left)) { - BNX2X_ERR("BUG! EQ ring full!\n"); - spin_unlock_bh(&bp->spq_lock); - bnx2x_panic(); - return -EBUSY; - } - } else if (!atomic_read(&bp->cq_spq_left)) { - BNX2X_ERR("BUG! SPQ ring full!\n"); - spin_unlock_bh(&bp->spq_lock); - bnx2x_panic(); - return -EBUSY; - } - - spe = bnx2x_sp_get_next(bp); - - /* CID needs port number to be encoded int it */ - spe->hdr.conn_and_cmd_data = - cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | - HW_CID(bp, cid)); - - type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; - - type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & - SPE_HDR_FUNCTION_ID); - - spe->hdr.type = cpu_to_le16(type); - - spe->data.update_data_addr.hi = cpu_to_le32(data_hi); - spe->data.update_data_addr.lo = cpu_to_le32(data_lo); - - /* - * It's ok if the actual decrement is issued towards the memory - * somewhere between the spin_lock and spin_unlock. Thus no - * more explict memory barrier is needed. - */ - if (common) - atomic_dec(&bp->eq_spq_left); - else - atomic_dec(&bp->cq_spq_left); - - - DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/, - "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) " - "type(0x%x) left (CQ, EQ) (%x,%x)\n", - bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), - (u32)(U64_LO(bp->spq_mapping) + - (void *)bp->spq_prod_bd - (void *)bp->spq), command, common, - HW_CID(bp, cid), data_hi, data_lo, type, - atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left)); - - bnx2x_sp_prod_update(bp); - spin_unlock_bh(&bp->spq_lock); - return 0; -} - -/* acquire split MCP access lock register */ -static int bnx2x_acquire_alr(struct bnx2x *bp) -{ - u32 j, val; - int rc = 0; - - might_sleep(); - for (j = 0; j < 1000; j++) { - val = (1UL << 31); - REG_WR(bp, GRCBASE_MCP + 0x9c, val); - val = REG_RD(bp, GRCBASE_MCP + 0x9c); - if (val & (1L << 31)) - break; - - msleep(5); - } - if (!(val & (1L << 31))) { - BNX2X_ERR("Cannot acquire MCP access lock register\n"); - rc = -EBUSY; - } - - return rc; -} - -/* release split MCP access lock register */ -static void bnx2x_release_alr(struct bnx2x *bp) -{ - REG_WR(bp, GRCBASE_MCP + 0x9c, 0); -} - -#define BNX2X_DEF_SB_ATT_IDX 0x0001 -#define BNX2X_DEF_SB_IDX 0x0002 - -static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp) -{ - struct host_sp_status_block *def_sb = bp->def_status_blk; - u16 rc = 0; - - barrier(); /* status block is written to by the chip */ - if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { - bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; - rc |= BNX2X_DEF_SB_ATT_IDX; - } - - if (bp->def_idx != def_sb->sp_sb.running_index) { - bp->def_idx = def_sb->sp_sb.running_index; - rc |= BNX2X_DEF_SB_IDX; - } - - /* Do not reorder: indecies reading should complete before handling */ - barrier(); - return rc; -} - -/* - * slow path service functions - */ - -static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) -{ - int port = BP_PORT(bp); - u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : - MISC_REG_AEU_MASK_ATTN_FUNC_0; - u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : - NIG_REG_MASK_INTERRUPT_PORT0; - u32 aeu_mask; - u32 nig_mask = 0; - u32 reg_addr; - - if (bp->attn_state & asserted) - BNX2X_ERR("IGU ERROR\n"); - - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); - aeu_mask = REG_RD(bp, aeu_addr); - - DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", - aeu_mask, asserted); - aeu_mask &= ~(asserted & 0x3ff); - DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); - - REG_WR(bp, aeu_addr, aeu_mask); - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); - - DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); - bp->attn_state |= asserted; - DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); - - if (asserted & ATTN_HARD_WIRED_MASK) { - if (asserted & ATTN_NIG_FOR_FUNC) { - - bnx2x_acquire_phy_lock(bp); - - /* save nig interrupt mask */ - nig_mask = REG_RD(bp, nig_int_mask_addr); - - /* If nig_mask is not set, no need to call the update - * function. - */ - if (nig_mask) { - REG_WR(bp, nig_int_mask_addr, 0); - - bnx2x_link_attn(bp); - } - - /* handle unicore attn? */ - } - if (asserted & ATTN_SW_TIMER_4_FUNC) - DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n"); - - if (asserted & GPIO_2_FUNC) - DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n"); - - if (asserted & GPIO_3_FUNC) - DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n"); - - if (asserted & GPIO_4_FUNC) - DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n"); - - if (port == 0) { - if (asserted & ATTN_GENERAL_ATTN_1) { - DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n"); - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); - } - if (asserted & ATTN_GENERAL_ATTN_2) { - DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n"); - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); - } - if (asserted & ATTN_GENERAL_ATTN_3) { - DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n"); - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); - } - } else { - if (asserted & ATTN_GENERAL_ATTN_4) { - DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n"); - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); - } - if (asserted & ATTN_GENERAL_ATTN_5) { - DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n"); - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); - } - if (asserted & ATTN_GENERAL_ATTN_6) { - DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n"); - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); - } - } - - } /* if hardwired */ - - if (bp->common.int_block == INT_BLOCK_HC) - reg_addr = (HC_REG_COMMAND_REG + port*32 + - COMMAND_REG_ATTN_BITS_SET); - else - reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8); - - DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted, - (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); - REG_WR(bp, reg_addr, asserted); - - /* now set back the mask */ - if (asserted & ATTN_NIG_FOR_FUNC) { - REG_WR(bp, nig_int_mask_addr, nig_mask); - bnx2x_release_phy_lock(bp); - } -} - -static inline void bnx2x_fan_failure(struct bnx2x *bp) -{ - int port = BP_PORT(bp); - u32 ext_phy_config; - /* mark the failure */ - ext_phy_config = - SHMEM_RD(bp, - dev_info.port_hw_config[port].external_phy_config); - - ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; - ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; - SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config, - ext_phy_config); - - /* log the failure */ - netdev_err(bp->dev, "Fan Failure on Network Controller has caused" - " the driver to shutdown the card to prevent permanent" - " damage. Please contact OEM Support for assistance\n"); -} - -static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) -{ - int port = BP_PORT(bp); - int reg_offset; - u32 val; - - reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : - MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); - - if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { - - val = REG_RD(bp, reg_offset); - val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; - REG_WR(bp, reg_offset, val); - - BNX2X_ERR("SPIO5 hw attention\n"); - - /* Fan failure attention */ - bnx2x_hw_reset_phy(&bp->link_params); - bnx2x_fan_failure(bp); - } - - if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) { - bnx2x_acquire_phy_lock(bp); - bnx2x_handle_module_detect_int(&bp->link_params); - bnx2x_release_phy_lock(bp); - } - - if (attn & HW_INTERRUT_ASSERT_SET_0) { - - val = REG_RD(bp, reg_offset); - val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); - REG_WR(bp, reg_offset, val); - - BNX2X_ERR("FATAL HW block attention set0 0x%x\n", - (u32)(attn & HW_INTERRUT_ASSERT_SET_0)); - bnx2x_panic(); - } -} - -static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) -{ - u32 val; - - if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { - - val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR); - BNX2X_ERR("DB hw attention 0x%x\n", val); - /* DORQ discard attention */ - if (val & 0x2) - BNX2X_ERR("FATAL error from DORQ\n"); - } - - if (attn & HW_INTERRUT_ASSERT_SET_1) { - - int port = BP_PORT(bp); - int reg_offset; - - reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : - MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); - - val = REG_RD(bp, reg_offset); - val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); - REG_WR(bp, reg_offset, val); - - BNX2X_ERR("FATAL HW block attention set1 0x%x\n", - (u32)(attn & HW_INTERRUT_ASSERT_SET_1)); - bnx2x_panic(); - } -} - -static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) -{ - u32 val; - - if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { - - val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR); - BNX2X_ERR("CFC hw attention 0x%x\n", val); - /* CFC error attention */ - if (val & 0x2) - BNX2X_ERR("FATAL error from CFC\n"); - } - - if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { - val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0); - BNX2X_ERR("PXP hw attention-0 0x%x\n", val); - /* RQ_USDMDP_FIFO_OVERFLOW */ - if (val & 0x18000) - BNX2X_ERR("FATAL error from PXP\n"); - - if (!CHIP_IS_E1x(bp)) { - val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1); - BNX2X_ERR("PXP hw attention-1 0x%x\n", val); - } - } - - if (attn & HW_INTERRUT_ASSERT_SET_2) { - - int port = BP_PORT(bp); - int reg_offset; - - reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : - MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); - - val = REG_RD(bp, reg_offset); - val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); - REG_WR(bp, reg_offset, val); - - BNX2X_ERR("FATAL HW block attention set2 0x%x\n", - (u32)(attn & HW_INTERRUT_ASSERT_SET_2)); - bnx2x_panic(); - } -} - -static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) -{ - u32 val; - - if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { - - if (attn & BNX2X_PMF_LINK_ASSERT) { - int func = BP_FUNC(bp); - - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); - bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp, - func_mf_config[BP_ABS_FUNC(bp)].config); - val = SHMEM_RD(bp, - func_mb[BP_FW_MB_IDX(bp)].drv_status); - if (val & DRV_STATUS_DCC_EVENT_MASK) - bnx2x_dcc_event(bp, - (val & DRV_STATUS_DCC_EVENT_MASK)); - - if (val & DRV_STATUS_SET_MF_BW) - bnx2x_set_mf_bw(bp); - - if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) - bnx2x_pmf_update(bp); - - if (bp->port.pmf && - (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) && - bp->dcbx_enabled > 0) - /* start dcbx state machine */ - bnx2x_dcbx_set_params(bp, - BNX2X_DCBX_STATE_NEG_RECEIVED); - if (bp->link_vars.periodic_flags & - PERIODIC_FLAGS_LINK_EVENT) { - /* sync with link */ - bnx2x_acquire_phy_lock(bp); - bp->link_vars.periodic_flags &= - ~PERIODIC_FLAGS_LINK_EVENT; - bnx2x_release_phy_lock(bp); - if (IS_MF(bp)) - bnx2x_link_sync_notify(bp); - bnx2x_link_report(bp); - } - /* Always call it here: bnx2x_link_report() will - * prevent the link indication duplication. - */ - bnx2x__link_status_update(bp); - } else if (attn & BNX2X_MC_ASSERT_BITS) { - - BNX2X_ERR("MC assert!\n"); - bnx2x_mc_assert(bp); - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0); - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0); - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0); - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0); - bnx2x_panic(); - - } else if (attn & BNX2X_MCP_ASSERT) { - - BNX2X_ERR("MCP assert!\n"); - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0); - bnx2x_fw_dump(bp); - - } else - BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn); - } - - if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { - BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn); - if (attn & BNX2X_GRC_TIMEOUT) { - val = CHIP_IS_E1(bp) ? 0 : - REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN); - BNX2X_ERR("GRC time-out 0x%08x\n", val); - } - if (attn & BNX2X_GRC_RSV) { - val = CHIP_IS_E1(bp) ? 0 : - REG_RD(bp, MISC_REG_GRC_RSV_ATTN); - BNX2X_ERR("GRC reserved 0x%08x\n", val); - } - REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); - } -} - -/* - * Bits map: - * 0-7 - Engine0 load counter. - * 8-15 - Engine1 load counter. - * 16 - Engine0 RESET_IN_PROGRESS bit. - * 17 - Engine1 RESET_IN_PROGRESS bit. - * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active function - * on the engine - * 19 - Engine1 ONE_IS_LOADED. - * 20 - Chip reset flow bit. When set none-leader must wait for both engines - * leader to complete (check for both RESET_IN_PROGRESS bits and not for - * just the one belonging to its engine). - * - */ -#define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 - -#define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff -#define BNX2X_PATH0_LOAD_CNT_SHIFT 0 -#define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00 -#define BNX2X_PATH1_LOAD_CNT_SHIFT 8 -#define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000 -#define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000 -#define BNX2X_GLOBAL_RESET_BIT 0x00040000 - -/* - * Set the GLOBAL_RESET bit. - * - * Should be run under rtnl lock - */ -void bnx2x_set_reset_global(struct bnx2x *bp) -{ - u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); - - REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT); - barrier(); - mmiowb(); -} - -/* - * Clear the GLOBAL_RESET bit. - * - * Should be run under rtnl lock - */ -static inline void bnx2x_clear_reset_global(struct bnx2x *bp) -{ - u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); - - REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT)); - barrier(); - mmiowb(); -} - -/* - * Checks the GLOBAL_RESET bit. - * - * should be run under rtnl lock - */ -static inline bool bnx2x_reset_is_global(struct bnx2x *bp) -{ - u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); - - DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); - return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false; -} - -/* - * Clear RESET_IN_PROGRESS bit for the current engine. - * - * Should be run under rtnl lock - */ -static inline void bnx2x_set_reset_done(struct bnx2x *bp) -{ - u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); - u32 bit = BP_PATH(bp) ? - BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; - - /* Clear the bit */ - val &= ~bit; - REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); - barrier(); - mmiowb(); -} - -/* - * Set RESET_IN_PROGRESS for the current engine. - * - * should be run under rtnl lock - */ -void bnx2x_set_reset_in_progress(struct bnx2x *bp) -{ - u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); - u32 bit = BP_PATH(bp) ? - BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; - - /* Set the bit */ - val |= bit; - REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); - barrier(); - mmiowb(); -} - -/* - * Checks the RESET_IN_PROGRESS bit for the given engine. - * should be run under rtnl lock - */ -bool bnx2x_reset_is_done(struct bnx2x *bp, int engine) -{ - u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); - u32 bit = engine ? - BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; - - /* return false if bit is set */ - return (val & bit) ? false : true; -} - -/* - * Increment the load counter for the current engine. - * - * should be run under rtnl lock - */ -void bnx2x_inc_load_cnt(struct bnx2x *bp) -{ - u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); - u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : - BNX2X_PATH0_LOAD_CNT_MASK; - u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : - BNX2X_PATH0_LOAD_CNT_SHIFT; - - DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val); - - /* get the current counter value */ - val1 = (val & mask) >> shift; - - /* increment... */ - val1++; - - /* clear the old value */ - val &= ~mask; - - /* set the new one */ - val |= ((val1 << shift) & mask); - - REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); - barrier(); - mmiowb(); -} - -/** - * bnx2x_dec_load_cnt - decrement the load counter - * - * @bp: driver handle - * - * Should be run under rtnl lock. - * Decrements the load counter for the current engine. Returns - * the new counter value. - */ -u32 bnx2x_dec_load_cnt(struct bnx2x *bp) -{ - u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); - u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : - BNX2X_PATH0_LOAD_CNT_MASK; - u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : - BNX2X_PATH0_LOAD_CNT_SHIFT; - - DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val); - - /* get the current counter value */ - val1 = (val & mask) >> shift; - - /* decrement... */ - val1--; - - /* clear the old value */ - val &= ~mask; - - /* set the new one */ - val |= ((val1 << shift) & mask); - - REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); - barrier(); - mmiowb(); - - return val1; -} - -/* - * Read the load counter for the current engine. - * - * should be run under rtnl lock - */ -static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp, int engine) -{ - u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK : - BNX2X_PATH0_LOAD_CNT_MASK); - u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT : - BNX2X_PATH0_LOAD_CNT_SHIFT); - u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); - - DP(NETIF_MSG_HW, "GLOB_REG=0x%08x\n", val); - - val = (val & mask) >> shift; - - DP(NETIF_MSG_HW, "load_cnt for engine %d = %d\n", engine, val); - - return val; -} - -/* - * Reset the load counter for the current engine. - * - * should be run under rtnl lock - */ -static inline void bnx2x_clear_load_cnt(struct bnx2x *bp) -{ - u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); - u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : - BNX2X_PATH0_LOAD_CNT_MASK); - - REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask)); -} - -static inline void _print_next_block(int idx, const char *blk) -{ - if (idx) - pr_cont(", "); - pr_cont("%s", blk); -} - -static inline int bnx2x_check_blocks_with_parity0(u32 sig, int par_num, - bool print) -{ - int i = 0; - u32 cur_bit = 0; - for (i = 0; sig; i++) { - cur_bit = ((u32)0x1 << i); - if (sig & cur_bit) { - switch (cur_bit) { - case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "BRB"); - break; - case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "PARSER"); - break; - case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "TSDM"); - break; - case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: - if (print) - _print_next_block(par_num++, - "SEARCHER"); - break; - case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "TCM"); - break; - case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "TSEMI"); - break; - case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "XPB"); - break; - } - - /* Clear the bit */ - sig &= ~cur_bit; - } - } - - return par_num; -} - -static inline int bnx2x_check_blocks_with_parity1(u32 sig, int par_num, - bool *global, bool print) -{ - int i = 0; - u32 cur_bit = 0; - for (i = 0; sig; i++) { - cur_bit = ((u32)0x1 << i); - if (sig & cur_bit) { - switch (cur_bit) { - case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "PBF"); - break; - case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "QM"); - break; - case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "TM"); - break; - case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "XSDM"); - break; - case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "XCM"); - break; - case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "XSEMI"); - break; - case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: - if (print) - _print_next_block(par_num++, - "DOORBELLQ"); - break; - case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "NIG"); - break; - case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: - if (print) - _print_next_block(par_num++, - "VAUX PCI CORE"); - *global = true; - break; - case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "DEBUG"); - break; - case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "USDM"); - break; - case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "UCM"); - break; - case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "USEMI"); - break; - case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "UPB"); - break; - case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "CSDM"); - break; - case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "CCM"); - break; - } - - /* Clear the bit */ - sig &= ~cur_bit; - } - } - - return par_num; -} - -static inline int bnx2x_check_blocks_with_parity2(u32 sig, int par_num, - bool print) -{ - int i = 0; - u32 cur_bit = 0; - for (i = 0; sig; i++) { - cur_bit = ((u32)0x1 << i); - if (sig & cur_bit) { - switch (cur_bit) { - case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "CSEMI"); - break; - case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "PXP"); - break; - case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: - if (print) - _print_next_block(par_num++, - "PXPPCICLOCKCLIENT"); - break; - case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "CFC"); - break; - case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "CDU"); - break; - case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "DMAE"); - break; - case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "IGU"); - break; - case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "MISC"); - break; - } - - /* Clear the bit */ - sig &= ~cur_bit; - } - } - - return par_num; -} - -static inline int bnx2x_check_blocks_with_parity3(u32 sig, int par_num, - bool *global, bool print) -{ - int i = 0; - u32 cur_bit = 0; - for (i = 0; sig; i++) { - cur_bit = ((u32)0x1 << i); - if (sig & cur_bit) { - switch (cur_bit) { - case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: - if (print) - _print_next_block(par_num++, "MCP ROM"); - *global = true; - break; - case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: - if (print) - _print_next_block(par_num++, - "MCP UMP RX"); - *global = true; - break; - case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: - if (print) - _print_next_block(par_num++, - "MCP UMP TX"); - *global = true; - break; - case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: - if (print) - _print_next_block(par_num++, - "MCP SCPAD"); - *global = true; - break; - } - - /* Clear the bit */ - sig &= ~cur_bit; - } - } - - return par_num; -} - -static inline int bnx2x_check_blocks_with_parity4(u32 sig, int par_num, - bool print) -{ - int i = 0; - u32 cur_bit = 0; - for (i = 0; sig; i++) { - cur_bit = ((u32)0x1 << i); - if (sig & cur_bit) { - switch (cur_bit) { - case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "PGLUE_B"); - break; - case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: - if (print) - _print_next_block(par_num++, "ATC"); - break; - } - - /* Clear the bit */ - sig &= ~cur_bit; - } - } - - return par_num; -} - -static inline bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, - u32 *sig) -{ - if ((sig[0] & HW_PRTY_ASSERT_SET_0) || - (sig[1] & HW_PRTY_ASSERT_SET_1) || - (sig[2] & HW_PRTY_ASSERT_SET_2) || - (sig[3] & HW_PRTY_ASSERT_SET_3) || - (sig[4] & HW_PRTY_ASSERT_SET_4)) { - int par_num = 0; - DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: " - "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x " - "[4]:0x%08x\n", - sig[0] & HW_PRTY_ASSERT_SET_0, - sig[1] & HW_PRTY_ASSERT_SET_1, - sig[2] & HW_PRTY_ASSERT_SET_2, - sig[3] & HW_PRTY_ASSERT_SET_3, - sig[4] & HW_PRTY_ASSERT_SET_4); - if (print) - netdev_err(bp->dev, - "Parity errors detected in blocks: "); - par_num = bnx2x_check_blocks_with_parity0( - sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print); - par_num = bnx2x_check_blocks_with_parity1( - sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print); - par_num = bnx2x_check_blocks_with_parity2( - sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print); - par_num = bnx2x_check_blocks_with_parity3( - sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print); - par_num = bnx2x_check_blocks_with_parity4( - sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print); - - if (print) - pr_cont("\n"); - - return true; - } else - return false; -} - -/** - * bnx2x_chk_parity_attn - checks for parity attentions. - * - * @bp: driver handle - * @global: true if there was a global attention - * @print: show parity attention in syslog - */ -bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print) -{ - struct attn_route attn = { {0} }; - int port = BP_PORT(bp); - - attn.sig[0] = REG_RD(bp, - MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + - port*4); - attn.sig[1] = REG_RD(bp, - MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + - port*4); - attn.sig[2] = REG_RD(bp, - MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + - port*4); - attn.sig[3] = REG_RD(bp, - MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + - port*4); - - if (!CHIP_IS_E1x(bp)) - attn.sig[4] = REG_RD(bp, - MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + - port*4); - - return bnx2x_parity_attn(bp, global, print, attn.sig); -} - - -static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) -{ - u32 val; - if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { - - val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); - BNX2X_ERR("PGLUE hw attention 0x%x\n", val); - if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) - BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" - "ADDRESS_ERROR\n"); - if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) - BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" - "INCORRECT_RCV_BEHAVIOR\n"); - if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) - BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" - "WAS_ERROR_ATTN\n"); - if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) - BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" - "VF_LENGTH_VIOLATION_ATTN\n"); - if (val & - PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) - BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" - "VF_GRC_SPACE_VIOLATION_ATTN\n"); - if (val & - PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) - BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" - "VF_MSIX_BAR_VIOLATION_ATTN\n"); - if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) - BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" - "TCPL_ERROR_ATTN\n"); - if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) - BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" - "TCPL_IN_TWO_RCBS_ATTN\n"); - if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) - BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" - "CSSNOOP_FIFO_OVERFLOW\n"); - } - if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { - val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR); - BNX2X_ERR("ATC hw attention 0x%x\n", val); - if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) - BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n"); - if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) - BNX2X_ERR("ATC_ATC_INT_STS_REG" - "_ATC_TCPL_TO_NOT_PEND\n"); - if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) - BNX2X_ERR("ATC_ATC_INT_STS_REG_" - "ATC_GPA_MULTIPLE_HITS\n"); - if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) - BNX2X_ERR("ATC_ATC_INT_STS_REG_" - "ATC_RCPL_TO_EMPTY_CNT\n"); - if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) - BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n"); - if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) - BNX2X_ERR("ATC_ATC_INT_STS_REG_" - "ATC_IREQ_LESS_THAN_STU\n"); - } - - if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | - AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { - BNX2X_ERR("FATAL parity attention set4 0x%x\n", - (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | - AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); - } - -} - -static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) -{ - struct attn_route attn, *group_mask; - int port = BP_PORT(bp); - int index; - u32 reg_addr; - u32 val; - u32 aeu_mask; - bool global = false; - - /* need to take HW lock because MCP or other port might also - try to handle this event */ - bnx2x_acquire_alr(bp); - - if (bnx2x_chk_parity_attn(bp, &global, true)) { -#ifndef BNX2X_STOP_ON_ERROR - bp->recovery_state = BNX2X_RECOVERY_INIT; - schedule_delayed_work(&bp->sp_rtnl_task, 0); - /* Disable HW interrupts */ - bnx2x_int_disable(bp); - /* In case of parity errors don't handle attentions so that - * other function would "see" parity errors. - */ -#else - bnx2x_panic(); -#endif - bnx2x_release_alr(bp); - return; - } - - attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); - attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); - attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); - attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); - if (!CHIP_IS_E1x(bp)) - attn.sig[4] = - REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); - else - attn.sig[4] = 0; - - DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n", - attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]); - - for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { - if (deasserted & (1 << index)) { - group_mask = &bp->attn_group[index]; - - DP(NETIF_MSG_HW, "group[%d]: %08x %08x " - "%08x %08x %08x\n", - index, - group_mask->sig[0], group_mask->sig[1], - group_mask->sig[2], group_mask->sig[3], - group_mask->sig[4]); - - bnx2x_attn_int_deasserted4(bp, - attn.sig[4] & group_mask->sig[4]); - bnx2x_attn_int_deasserted3(bp, - attn.sig[3] & group_mask->sig[3]); - bnx2x_attn_int_deasserted1(bp, - attn.sig[1] & group_mask->sig[1]); - bnx2x_attn_int_deasserted2(bp, - attn.sig[2] & group_mask->sig[2]); - bnx2x_attn_int_deasserted0(bp, - attn.sig[0] & group_mask->sig[0]); - } - } - - bnx2x_release_alr(bp); - - if (bp->common.int_block == INT_BLOCK_HC) - reg_addr = (HC_REG_COMMAND_REG + port*32 + - COMMAND_REG_ATTN_BITS_CLR); - else - reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8); - - val = ~deasserted; - DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val, - (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); - REG_WR(bp, reg_addr, val); - - if (~bp->attn_state & deasserted) - BNX2X_ERR("IGU ERROR\n"); - - reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : - MISC_REG_AEU_MASK_ATTN_FUNC_0; - - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); - aeu_mask = REG_RD(bp, reg_addr); - - DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n", - aeu_mask, deasserted); - aeu_mask |= (deasserted & 0x3ff); - DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); - - REG_WR(bp, reg_addr, aeu_mask); - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); - - DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); - bp->attn_state &= ~deasserted; - DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); -} - -static void bnx2x_attn_int(struct bnx2x *bp) -{ - /* read local copy of bits */ - u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block. - attn_bits); - u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block. - attn_bits_ack); - u32 attn_state = bp->attn_state; - - /* look for changed bits */ - u32 asserted = attn_bits & ~attn_ack & ~attn_state; - u32 deasserted = ~attn_bits & attn_ack & attn_state; - - DP(NETIF_MSG_HW, - "attn_bits %x attn_ack %x asserted %x deasserted %x\n", - attn_bits, attn_ack, asserted, deasserted); - - if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) - BNX2X_ERR("BAD attention state\n"); - - /* handle bits that were raised */ - if (asserted) - bnx2x_attn_int_asserted(bp, asserted); - - if (deasserted) - bnx2x_attn_int_deasserted(bp, deasserted); -} - -void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, - u16 index, u8 op, u8 update) -{ - u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; - - bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update, - igu_addr); -} - -static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) -{ - /* No memory barriers */ - storm_memset_eq_prod(bp, prod, BP_FUNC(bp)); - mmiowb(); /* keep prod updates ordered */ -} - -#ifdef BCM_CNIC -static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, - union event_ring_elem *elem) -{ - u8 err = elem->message.error; - - if (!bp->cnic_eth_dev.starting_cid || - (cid < bp->cnic_eth_dev.starting_cid && - cid != bp->cnic_eth_dev.iscsi_l2_cid)) - return 1; - - DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid); - - if (unlikely(err)) { - - BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n", - cid); - bnx2x_panic_dump(bp); - } - bnx2x_cnic_cfc_comp(bp, cid, err); - return 0; -} -#endif - -static inline void bnx2x_handle_mcast_eqe(struct bnx2x *bp) -{ - struct bnx2x_mcast_ramrod_params rparam; - int rc; - - memset(&rparam, 0, sizeof(rparam)); - - rparam.mcast_obj = &bp->mcast_obj; - - netif_addr_lock_bh(bp->dev); - - /* Clear pending state for the last command */ - bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw); - - /* If there are pending mcast commands - send them */ - if (bp->mcast_obj.check_pending(&bp->mcast_obj)) { - rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); - if (rc < 0) - BNX2X_ERR("Failed to send pending mcast commands: %d\n", - rc); - } - - netif_addr_unlock_bh(bp->dev); -} - -static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp, - union event_ring_elem *elem) -{ - unsigned long ramrod_flags = 0; - int rc = 0; - u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; - struct bnx2x_vlan_mac_obj *vlan_mac_obj; - - /* Always push next commands out, don't wait here */ - __set_bit(RAMROD_CONT, &ramrod_flags); - - switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { - case BNX2X_FILTER_MAC_PENDING: -#ifdef BCM_CNIC - if (cid == BNX2X_ISCSI_ETH_CID) - vlan_mac_obj = &bp->iscsi_l2_mac_obj; - else -#endif - vlan_mac_obj = &bp->fp[cid].mac_obj; - - break; - vlan_mac_obj = &bp->fp[cid].mac_obj; - - case BNX2X_FILTER_MCAST_PENDING: - /* This is only relevant for 57710 where multicast MACs are - * configured as unicast MACs using the same ramrod. - */ - bnx2x_handle_mcast_eqe(bp); - return; - default: - BNX2X_ERR("Unsupported classification command: %d\n", - elem->message.data.eth_event.echo); - return; - } - - rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags); - - if (rc < 0) - BNX2X_ERR("Failed to schedule new commands: %d\n", rc); - else if (rc > 0) - DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n"); - -} - -#ifdef BCM_CNIC -static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start); -#endif - -static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp) -{ - netif_addr_lock_bh(bp->dev); - - clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); - - /* Send rx_mode command again if was requested */ - if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state)) - bnx2x_set_storm_rx_mode(bp); -#ifdef BCM_CNIC - else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, - &bp->sp_state)) - bnx2x_set_iscsi_eth_rx_mode(bp, true); - else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, - &bp->sp_state)) - bnx2x_set_iscsi_eth_rx_mode(bp, false); -#endif - - netif_addr_unlock_bh(bp->dev); -} - -static inline struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj( - struct bnx2x *bp, u32 cid) -{ - DP(BNX2X_MSG_SP, "retrieving fp from cid %d", cid); -#ifdef BCM_CNIC - if (cid == BNX2X_FCOE_ETH_CID) - return &bnx2x_fcoe(bp, q_obj); - else -#endif - return &bnx2x_fp(bp, CID_TO_FP(cid), q_obj); -} - -static void bnx2x_eq_int(struct bnx2x *bp) -{ - u16 hw_cons, sw_cons, sw_prod; - union event_ring_elem *elem; - u32 cid; - u8 opcode; - int spqe_cnt = 0; - struct bnx2x_queue_sp_obj *q_obj; - struct bnx2x_func_sp_obj *f_obj = &bp->func_obj; - struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw; - - hw_cons = le16_to_cpu(*bp->eq_cons_sb); - - /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256. - * when we get the the next-page we nned to adjust so the loop - * condition below will be met. The next element is the size of a - * regular element and hence incrementing by 1 - */ - if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) - hw_cons++; - - /* This function may never run in parallel with itself for a - * specific bp, thus there is no need in "paired" read memory - * barrier here. - */ - sw_cons = bp->eq_cons; - sw_prod = bp->eq_prod; - - DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n", - hw_cons, sw_cons, atomic_read(&bp->eq_spq_left)); - - for (; sw_cons != hw_cons; - sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { - - - elem = &bp->eq_ring[EQ_DESC(sw_cons)]; - - cid = SW_CID(elem->message.data.cfc_del_event.cid); - opcode = elem->message.opcode; - - - /* handle eq element */ - switch (opcode) { - case EVENT_RING_OPCODE_STAT_QUERY: - DP(NETIF_MSG_TIMER, "got statistics comp event %d\n", - bp->stats_comp++); - /* nothing to do with stats comp */ - goto next_spqe; - - case EVENT_RING_OPCODE_CFC_DEL: - /* handle according to cid range */ - /* - * we may want to verify here that the bp state is - * HALTING - */ - DP(BNX2X_MSG_SP, - "got delete ramrod for MULTI[%d]\n", cid); -#ifdef BCM_CNIC - if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem)) - goto next_spqe; -#endif - q_obj = bnx2x_cid_to_q_obj(bp, cid); - - if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL)) - break; - - - - goto next_spqe; - - case EVENT_RING_OPCODE_STOP_TRAFFIC: - DP(BNX2X_MSG_SP, "got STOP TRAFFIC\n"); - if (f_obj->complete_cmd(bp, f_obj, - BNX2X_F_CMD_TX_STOP)) - break; - bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); - goto next_spqe; - - case EVENT_RING_OPCODE_START_TRAFFIC: - DP(BNX2X_MSG_SP, "got START TRAFFIC\n"); - if (f_obj->complete_cmd(bp, f_obj, - BNX2X_F_CMD_TX_START)) - break; - bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); - goto next_spqe; - case EVENT_RING_OPCODE_FUNCTION_START: - DP(BNX2X_MSG_SP, "got FUNC_START ramrod\n"); - if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START)) - break; - - goto next_spqe; - - case EVENT_RING_OPCODE_FUNCTION_STOP: - DP(BNX2X_MSG_SP, "got FUNC_STOP ramrod\n"); - if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP)) - break; - - goto next_spqe; - } - - switch (opcode | bp->state) { - case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | - BNX2X_STATE_OPEN): - case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | - BNX2X_STATE_OPENING_WAIT4_PORT): - cid = elem->message.data.eth_event.echo & - BNX2X_SWCID_MASK; - DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n", - cid); - rss_raw->clear_pending(rss_raw); - break; - - case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN): - case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG): - case (EVENT_RING_OPCODE_SET_MAC | - BNX2X_STATE_CLOSING_WAIT4_HALT): - case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | - BNX2X_STATE_OPEN): - case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | - BNX2X_STATE_DIAG): - case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | - BNX2X_STATE_CLOSING_WAIT4_HALT): - DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n"); - bnx2x_handle_classification_eqe(bp, elem); - break; - - case (EVENT_RING_OPCODE_MULTICAST_RULES | - BNX2X_STATE_OPEN): - case (EVENT_RING_OPCODE_MULTICAST_RULES | - BNX2X_STATE_DIAG): - case (EVENT_RING_OPCODE_MULTICAST_RULES | - BNX2X_STATE_CLOSING_WAIT4_HALT): - DP(BNX2X_MSG_SP, "got mcast ramrod\n"); - bnx2x_handle_mcast_eqe(bp); - break; - - case (EVENT_RING_OPCODE_FILTERS_RULES | - BNX2X_STATE_OPEN): - case (EVENT_RING_OPCODE_FILTERS_RULES | - BNX2X_STATE_DIAG): - case (EVENT_RING_OPCODE_FILTERS_RULES | - BNX2X_STATE_CLOSING_WAIT4_HALT): - DP(BNX2X_MSG_SP, "got rx_mode ramrod\n"); - bnx2x_handle_rx_mode_eqe(bp); - break; - default: - /* unknown event log error and continue */ - BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n", - elem->message.opcode, bp->state); - } -next_spqe: - spqe_cnt++; - } /* for */ - - smp_mb__before_atomic_inc(); - atomic_add(spqe_cnt, &bp->eq_spq_left); - - bp->eq_cons = sw_cons; - bp->eq_prod = sw_prod; - /* Make sure that above mem writes were issued towards the memory */ - smp_wmb(); - - /* update producer */ - bnx2x_update_eq_prod(bp, bp->eq_prod); -} - -static void bnx2x_sp_task(struct work_struct *work) -{ - struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); - u16 status; - - status = bnx2x_update_dsb_idx(bp); -/* if (status == 0) */ -/* BNX2X_ERR("spurious slowpath interrupt!\n"); */ - - DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status); - - /* HW attentions */ - if (status & BNX2X_DEF_SB_ATT_IDX) { - bnx2x_attn_int(bp); - status &= ~BNX2X_DEF_SB_ATT_IDX; - } - - /* SP events: STAT_QUERY and others */ - if (status & BNX2X_DEF_SB_IDX) { -#ifdef BCM_CNIC - struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); - - if ((!NO_FCOE(bp)) && - (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { - /* - * Prevent local bottom-halves from running as - * we are going to change the local NAPI list. - */ - local_bh_disable(); - napi_schedule(&bnx2x_fcoe(bp, napi)); - local_bh_enable(); - } -#endif - /* Handle EQ completions */ - bnx2x_eq_int(bp); - - bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, - le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1); - - status &= ~BNX2X_DEF_SB_IDX; - } - - if (unlikely(status)) - DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n", - status); - - bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, - le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); -} - -irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) -{ - struct net_device *dev = dev_instance; - struct bnx2x *bp = netdev_priv(dev); - - bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, - IGU_INT_DISABLE, 0); - -#ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) - return IRQ_HANDLED; -#endif - -#ifdef BCM_CNIC - { - struct cnic_ops *c_ops; - - rcu_read_lock(); - c_ops = rcu_dereference(bp->cnic_ops); - if (c_ops) - c_ops->cnic_handler(bp->cnic_data, NULL); - rcu_read_unlock(); - } -#endif - queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); - - return IRQ_HANDLED; -} - -/* end of slow path */ - - -void bnx2x_drv_pulse(struct bnx2x *bp) -{ - SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb, - bp->fw_drv_pulse_wr_seq); -} - - -static void bnx2x_timer(unsigned long data) -{ - u8 cos; - struct bnx2x *bp = (struct bnx2x *) data; - - if (!netif_running(bp->dev)) - return; - - if (poll) { - struct bnx2x_fastpath *fp = &bp->fp[0]; - - for_each_cos_in_tx_queue(fp, cos) - bnx2x_tx_int(bp, &fp->txdata[cos]); - bnx2x_rx_int(fp, 1000); - } - - if (!BP_NOMCP(bp)) { - int mb_idx = BP_FW_MB_IDX(bp); - u32 drv_pulse; - u32 mcp_pulse; - - ++bp->fw_drv_pulse_wr_seq; - bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; - /* TBD - add SYSTEM_TIME */ - drv_pulse = bp->fw_drv_pulse_wr_seq; - bnx2x_drv_pulse(bp); - - mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) & - MCP_PULSE_SEQ_MASK); - /* The delta between driver pulse and mcp response - * should be 1 (before mcp response) or 0 (after mcp response) - */ - if ((drv_pulse != mcp_pulse) && - (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { - /* someone lost a heartbeat... */ - BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n", - drv_pulse, mcp_pulse); - } - } - - if (bp->state == BNX2X_STATE_OPEN) - bnx2x_stats_handle(bp, STATS_EVENT_UPDATE); - - mod_timer(&bp->timer, jiffies + bp->current_interval); -} - -/* end of Statistics */ - -/* nic init */ - -/* - * nic init service functions - */ - -static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) -{ - u32 i; - if (!(len%4) && !(addr%4)) - for (i = 0; i < len; i += 4) - REG_WR(bp, addr + i, fill); - else - for (i = 0; i < len; i++) - REG_WR8(bp, addr + i, fill); - -} - -/* helper: writes FP SP data to FW - data_size in dwords */ -static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp, - int fw_sb_id, - u32 *sb_data_p, - u32 data_size) -{ - int index; - for (index = 0; index < data_size; index++) - REG_WR(bp, BAR_CSTRORM_INTMEM + - CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + - sizeof(u32)*index, - *(sb_data_p + index)); -} - -static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id) -{ - u32 *sb_data_p; - u32 data_size = 0; - struct hc_status_block_data_e2 sb_data_e2; - struct hc_status_block_data_e1x sb_data_e1x; - - /* disable the function first */ - if (!CHIP_IS_E1x(bp)) { - memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); - sb_data_e2.common.state = SB_DISABLED; - sb_data_e2.common.p_func.vf_valid = false; - sb_data_p = (u32 *)&sb_data_e2; - data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); - } else { - memset(&sb_data_e1x, 0, - sizeof(struct hc_status_block_data_e1x)); - sb_data_e1x.common.state = SB_DISABLED; - sb_data_e1x.common.p_func.vf_valid = false; - sb_data_p = (u32 *)&sb_data_e1x; - data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); - } - bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); - - bnx2x_fill(bp, BAR_CSTRORM_INTMEM + - CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0, - CSTORM_STATUS_BLOCK_SIZE); - bnx2x_fill(bp, BAR_CSTRORM_INTMEM + - CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0, - CSTORM_SYNC_BLOCK_SIZE); -} - -/* helper: writes SP SB data to FW */ -static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp, - struct hc_sp_status_block_data *sp_sb_data) -{ - int func = BP_FUNC(bp); - int i; - for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) - REG_WR(bp, BAR_CSTRORM_INTMEM + - CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + - i*sizeof(u32), - *((u32 *)sp_sb_data + i)); -} - -static inline void bnx2x_zero_sp_sb(struct bnx2x *bp) -{ - int func = BP_FUNC(bp); - struct hc_sp_status_block_data sp_sb_data; - memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); - - sp_sb_data.state = SB_DISABLED; - sp_sb_data.p_func.vf_valid = false; - - bnx2x_wr_sp_sb_data(bp, &sp_sb_data); - - bnx2x_fill(bp, BAR_CSTRORM_INTMEM + - CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0, - CSTORM_SP_STATUS_BLOCK_SIZE); - bnx2x_fill(bp, BAR_CSTRORM_INTMEM + - CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0, - CSTORM_SP_SYNC_BLOCK_SIZE); - -} - - -static inline -void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, - int igu_sb_id, int igu_seg_id) -{ - hc_sm->igu_sb_id = igu_sb_id; - hc_sm->igu_seg_id = igu_seg_id; - hc_sm->timer_value = 0xFF; - hc_sm->time_to_expire = 0xFFFFFFFF; -} - -static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, - u8 vf_valid, int fw_sb_id, int igu_sb_id) -{ - int igu_seg_id; - - struct hc_status_block_data_e2 sb_data_e2; - struct hc_status_block_data_e1x sb_data_e1x; - struct hc_status_block_sm *hc_sm_p; - int data_size; - u32 *sb_data_p; - - if (CHIP_INT_MODE_IS_BC(bp)) - igu_seg_id = HC_SEG_ACCESS_NORM; - else - igu_seg_id = IGU_SEG_ACCESS_NORM; - - bnx2x_zero_fp_sb(bp, fw_sb_id); - - if (!CHIP_IS_E1x(bp)) { - memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); - sb_data_e2.common.state = SB_ENABLED; - sb_data_e2.common.p_func.pf_id = BP_FUNC(bp); - sb_data_e2.common.p_func.vf_id = vfid; - sb_data_e2.common.p_func.vf_valid = vf_valid; - sb_data_e2.common.p_func.vnic_id = BP_VN(bp); - sb_data_e2.common.same_igu_sb_1b = true; - sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping); - sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping); - hc_sm_p = sb_data_e2.common.state_machine; - sb_data_p = (u32 *)&sb_data_e2; - data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); - } else { - memset(&sb_data_e1x, 0, - sizeof(struct hc_status_block_data_e1x)); - sb_data_e1x.common.state = SB_ENABLED; - sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp); - sb_data_e1x.common.p_func.vf_id = 0xff; - sb_data_e1x.common.p_func.vf_valid = false; - sb_data_e1x.common.p_func.vnic_id = BP_VN(bp); - sb_data_e1x.common.same_igu_sb_1b = true; - sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping); - sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping); - hc_sm_p = sb_data_e1x.common.state_machine; - sb_data_p = (u32 *)&sb_data_e1x; - data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); - } - - bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], - igu_sb_id, igu_seg_id); - bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], - igu_sb_id, igu_seg_id); - - DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id); - - /* write indecies to HW */ - bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); -} - -static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id, - u16 tx_usec, u16 rx_usec) -{ - bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS, - false, rx_usec); - bnx2x_update_coalesce_sb_index(bp, fw_sb_id, - HC_INDEX_ETH_TX_CQ_CONS_COS0, false, - tx_usec); - bnx2x_update_coalesce_sb_index(bp, fw_sb_id, - HC_INDEX_ETH_TX_CQ_CONS_COS1, false, - tx_usec); - bnx2x_update_coalesce_sb_index(bp, fw_sb_id, - HC_INDEX_ETH_TX_CQ_CONS_COS2, false, - tx_usec); -} - -static void bnx2x_init_def_sb(struct bnx2x *bp) -{ - struct host_sp_status_block *def_sb = bp->def_status_blk; - dma_addr_t mapping = bp->def_status_blk_mapping; - int igu_sp_sb_index; - int igu_seg_id; - int port = BP_PORT(bp); - int func = BP_FUNC(bp); - int reg_offset; - u64 section; - int index; - struct hc_sp_status_block_data sp_sb_data; - memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); - - if (CHIP_INT_MODE_IS_BC(bp)) { - igu_sp_sb_index = DEF_SB_IGU_ID; - igu_seg_id = HC_SEG_ACCESS_DEF; - } else { - igu_sp_sb_index = bp->igu_dsb_id; - igu_seg_id = IGU_SEG_ACCESS_DEF; - } - - /* ATTN */ - section = ((u64)mapping) + offsetof(struct host_sp_status_block, - atten_status_block); - def_sb->atten_status_block.status_block_id = igu_sp_sb_index; - - bp->attn_state = 0; - - reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : - MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); - for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { - int sindex; - /* take care of sig[0]..sig[4] */ - for (sindex = 0; sindex < 4; sindex++) - bp->attn_group[index].sig[sindex] = - REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index); - - if (!CHIP_IS_E1x(bp)) - /* - * enable5 is separate from the rest of the registers, - * and therefore the address skip is 4 - * and not 16 between the different groups - */ - bp->attn_group[index].sig[4] = REG_RD(bp, - reg_offset + 0x10 + 0x4*index); - else - bp->attn_group[index].sig[4] = 0; - } - - if (bp->common.int_block == INT_BLOCK_HC) { - reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L : - HC_REG_ATTN_MSG0_ADDR_L); - - REG_WR(bp, reg_offset, U64_LO(section)); - REG_WR(bp, reg_offset + 4, U64_HI(section)); - } else if (!CHIP_IS_E1x(bp)) { - REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); - REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); - } - - section = ((u64)mapping) + offsetof(struct host_sp_status_block, - sp_sb); - - bnx2x_zero_sp_sb(bp); - - sp_sb_data.state = SB_ENABLED; - sp_sb_data.host_sb_addr.lo = U64_LO(section); - sp_sb_data.host_sb_addr.hi = U64_HI(section); - sp_sb_data.igu_sb_id = igu_sp_sb_index; - sp_sb_data.igu_seg_id = igu_seg_id; - sp_sb_data.p_func.pf_id = func; - sp_sb_data.p_func.vnic_id = BP_VN(bp); - sp_sb_data.p_func.vf_id = 0xff; - - bnx2x_wr_sp_sb_data(bp, &sp_sb_data); - - bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); -} - -void bnx2x_update_coalesce(struct bnx2x *bp) -{ - int i; - - for_each_eth_queue(bp, i) - bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id, - bp->tx_ticks, bp->rx_ticks); -} - -static void bnx2x_init_sp_ring(struct bnx2x *bp) -{ - spin_lock_init(&bp->spq_lock); - atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING); - - bp->spq_prod_idx = 0; - bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; - bp->spq_prod_bd = bp->spq; - bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; -} - -static void bnx2x_init_eq_ring(struct bnx2x *bp) -{ - int i; - for (i = 1; i <= NUM_EQ_PAGES; i++) { - union event_ring_elem *elem = - &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1]; - - elem->next_page.addr.hi = - cpu_to_le32(U64_HI(bp->eq_mapping + - BCM_PAGE_SIZE * (i % NUM_EQ_PAGES))); - elem->next_page.addr.lo = - cpu_to_le32(U64_LO(bp->eq_mapping + - BCM_PAGE_SIZE*(i % NUM_EQ_PAGES))); - } - bp->eq_cons = 0; - bp->eq_prod = NUM_EQ_DESC; - bp->eq_cons_sb = BNX2X_EQ_INDEX; - /* we want a warning message before it gets rought... */ - atomic_set(&bp->eq_spq_left, - min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); -} - - -/* called with netif_addr_lock_bh() */ -void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, - unsigned long rx_mode_flags, - unsigned long rx_accept_flags, - unsigned long tx_accept_flags, - unsigned long ramrod_flags) -{ - struct bnx2x_rx_mode_ramrod_params ramrod_param; - int rc; - - memset(&ramrod_param, 0, sizeof(ramrod_param)); - - /* Prepare ramrod parameters */ - ramrod_param.cid = 0; - ramrod_param.cl_id = cl_id; - ramrod_param.rx_mode_obj = &bp->rx_mode_obj; - ramrod_param.func_id = BP_FUNC(bp); - - ramrod_param.pstate = &bp->sp_state; - ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING; - - ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata); - ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata); - - set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); - - ramrod_param.ramrod_flags = ramrod_flags; - ramrod_param.rx_mode_flags = rx_mode_flags; - - ramrod_param.rx_accept_flags = rx_accept_flags; - ramrod_param.tx_accept_flags = tx_accept_flags; - - rc = bnx2x_config_rx_mode(bp, &ramrod_param); - if (rc < 0) { - BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode); - return; - } -} - -/* called with netif_addr_lock_bh() */ -void bnx2x_set_storm_rx_mode(struct bnx2x *bp) -{ - unsigned long rx_mode_flags = 0, ramrod_flags = 0; - unsigned long rx_accept_flags = 0, tx_accept_flags = 0; - -#ifdef BCM_CNIC - if (!NO_FCOE(bp)) - - /* Configure rx_mode of FCoE Queue */ - __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags); -#endif - - switch (bp->rx_mode) { - case BNX2X_RX_MODE_NONE: - /* - * 'drop all' supersedes any accept flags that may have been - * passed to the function. - */ - break; - case BNX2X_RX_MODE_NORMAL: - __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); - __set_bit(BNX2X_ACCEPT_MULTICAST, &rx_accept_flags); - __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); - - /* internal switching mode */ - __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); - __set_bit(BNX2X_ACCEPT_MULTICAST, &tx_accept_flags); - __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); - - break; - case BNX2X_RX_MODE_ALLMULTI: - __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); - __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags); - __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); - - /* internal switching mode */ - __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); - __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags); - __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); - - break; - case BNX2X_RX_MODE_PROMISC: - /* According to deffinition of SI mode, iface in promisc mode - * should receive matched and unmatched (in resolution of port) - * unicast packets. - */ - __set_bit(BNX2X_ACCEPT_UNMATCHED, &rx_accept_flags); - __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); - __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags); - __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); - - /* internal switching mode */ - __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags); - __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); - - if (IS_MF_SI(bp)) - __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &tx_accept_flags); - else - __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); - - break; - default: - BNX2X_ERR("Unknown rx_mode: %d\n", bp->rx_mode); - return; - } - - if (bp->rx_mode != BNX2X_RX_MODE_NONE) { - __set_bit(BNX2X_ACCEPT_ANY_VLAN, &rx_accept_flags); - __set_bit(BNX2X_ACCEPT_ANY_VLAN, &tx_accept_flags); - } - - __set_bit(RAMROD_RX, &ramrod_flags); - __set_bit(RAMROD_TX, &ramrod_flags); - - bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, rx_accept_flags, - tx_accept_flags, ramrod_flags); -} - -static void bnx2x_init_internal_common(struct bnx2x *bp) -{ - int i; - - if (IS_MF_SI(bp)) - /* - * In switch independent mode, the TSTORM needs to accept - * packets that failed classification, since approximate match - * mac addresses aren't written to NIG LLH - */ - REG_WR8(bp, BAR_TSTRORM_INTMEM + - TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2); - else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */ - REG_WR8(bp, BAR_TSTRORM_INTMEM + - TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0); - - /* Zero this manually as its initialization is - currently missing in the initTool */ - for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) - REG_WR(bp, BAR_USTRORM_INTMEM + - USTORM_AGG_DATA_OFFSET + i * 4, 0); - if (!CHIP_IS_E1x(bp)) { - REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET, - CHIP_INT_MODE_IS_BC(bp) ? - HC_IGU_BC_MODE : HC_IGU_NBC_MODE); - } -} - -static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) -{ - switch (load_code) { - case FW_MSG_CODE_DRV_LOAD_COMMON: - case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: - bnx2x_init_internal_common(bp); - /* no break */ - - case FW_MSG_CODE_DRV_LOAD_PORT: - /* nothing to do */ - /* no break */ - - case FW_MSG_CODE_DRV_LOAD_FUNCTION: - /* internal memory per function is - initialized inside bnx2x_pf_init */ - break; - - default: - BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); - break; - } -} - -static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp) -{ - return fp->bp->igu_base_sb + fp->index + CNIC_PRESENT; -} - -static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp) -{ - return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT; -} - -static inline u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp) -{ - if (CHIP_IS_E1x(fp->bp)) - return BP_L_ID(fp->bp) + fp->index; - else /* We want Client ID to be the same as IGU SB ID for 57712 */ - return bnx2x_fp_igu_sb_id(fp); -} - -static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) -{ - struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; - u8 cos; - unsigned long q_type = 0; - u32 cids[BNX2X_MULTI_TX_COS] = { 0 }; - - fp->cid = fp_idx; - fp->cl_id = bnx2x_fp_cl_id(fp); - fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp); - fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp); - /* qZone id equals to FW (per path) client id */ - fp->cl_qzone_id = bnx2x_fp_qzone_id(fp); - - /* init shortcut */ - fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp); - /* Setup SB indicies */ - fp->rx_cons_sb = BNX2X_RX_SB_INDEX; - - /* Configure Queue State object */ - __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); - __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); - - BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS); - - /* init tx data */ - for_each_cos_in_tx_queue(fp, cos) { - bnx2x_init_txdata(bp, &fp->txdata[cos], - CID_COS_TO_TX_ONLY_CID(fp->cid, cos), - FP_COS_TO_TXQ(fp, cos), - BNX2X_TX_SB_INDEX_BASE + cos); - cids[cos] = fp->txdata[cos].cid; - } - - bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, cids, fp->max_cos, - BP_FUNC(bp), bnx2x_sp(bp, q_rdata), - bnx2x_sp_mapping(bp, q_rdata), q_type); - - /** - * Configure classification DBs: Always enable Tx switching - */ - bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX); - - DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) " - "cl_id %d fw_sb %d igu_sb %d\n", - fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, - fp->igu_sb_id); - bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false, - fp->fw_sb_id, fp->igu_sb_id); - - bnx2x_update_fpsb_idx(fp); -} - -void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) -{ - int i; - - for_each_eth_queue(bp, i) - bnx2x_init_eth_fp(bp, i); -#ifdef BCM_CNIC - if (!NO_FCOE(bp)) - bnx2x_init_fcoe_fp(bp); - - bnx2x_init_sb(bp, bp->cnic_sb_mapping, - BNX2X_VF_ID_INVALID, false, - bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp)); - -#endif - - /* Initialize MOD_ABS interrupts */ - bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, - bp->common.shmem_base, bp->common.shmem2_base, - BP_PORT(bp)); - /* ensure status block indices were read */ - rmb(); - - bnx2x_init_def_sb(bp); - bnx2x_update_dsb_idx(bp); - bnx2x_init_rx_rings(bp); - bnx2x_init_tx_rings(bp); - bnx2x_init_sp_ring(bp); - bnx2x_init_eq_ring(bp); - bnx2x_init_internal(bp, load_code); - bnx2x_pf_init(bp); - bnx2x_stats_init(bp); - - /* flush all before enabling interrupts */ - mb(); - mmiowb(); - - bnx2x_int_enable(bp); - - /* Check for SPIO5 */ - bnx2x_attn_int_deasserted0(bp, - REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) & - AEU_INPUTS_ATTN_BITS_SPIO5); -} - -/* end of nic init */ - -/* - * gzip service functions - */ - -static int bnx2x_gunzip_init(struct bnx2x *bp) -{ - bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE, - &bp->gunzip_mapping, GFP_KERNEL); - if (bp->gunzip_buf == NULL) - goto gunzip_nomem1; - - bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL); - if (bp->strm == NULL) - goto gunzip_nomem2; - - bp->strm->workspace = vmalloc(zlib_inflate_workspacesize()); - if (bp->strm->workspace == NULL) - goto gunzip_nomem3; - - return 0; - -gunzip_nomem3: - kfree(bp->strm); - bp->strm = NULL; - -gunzip_nomem2: - dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, - bp->gunzip_mapping); - bp->gunzip_buf = NULL; - -gunzip_nomem1: - netdev_err(bp->dev, "Cannot allocate firmware buffer for" - " un-compression\n"); - return -ENOMEM; -} - -static void bnx2x_gunzip_end(struct bnx2x *bp) -{ - if (bp->strm) { - vfree(bp->strm->workspace); - kfree(bp->strm); - bp->strm = NULL; - } - - if (bp->gunzip_buf) { - dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, - bp->gunzip_mapping); - bp->gunzip_buf = NULL; - } -} - -static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len) -{ - int n, rc; - - /* check gzip header */ - if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) { - BNX2X_ERR("Bad gzip header\n"); - return -EINVAL; - } - - n = 10; - -#define FNAME 0x8 - - if (zbuf[3] & FNAME) - while ((zbuf[n++] != 0) && (n < len)); - - bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n; - bp->strm->avail_in = len - n; - bp->strm->next_out = bp->gunzip_buf; - bp->strm->avail_out = FW_BUF_SIZE; - - rc = zlib_inflateInit2(bp->strm, -MAX_WBITS); - if (rc != Z_OK) - return rc; - - rc = zlib_inflate(bp->strm, Z_FINISH); - if ((rc != Z_OK) && (rc != Z_STREAM_END)) - netdev_err(bp->dev, "Firmware decompression error: %s\n", - bp->strm->msg); - - bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out); - if (bp->gunzip_outlen & 0x3) - netdev_err(bp->dev, "Firmware decompression error:" - " gunzip_outlen (%d) not aligned\n", - bp->gunzip_outlen); - bp->gunzip_outlen >>= 2; - - zlib_inflateEnd(bp->strm); - - if (rc == Z_STREAM_END) - return 0; - - return rc; -} - -/* nic load/unload */ - -/* - * General service functions - */ - -/* send a NIG loopback debug packet */ -static void bnx2x_lb_pckt(struct bnx2x *bp) -{ - u32 wb_write[3]; - - /* Ethernet source and destination addresses */ - wb_write[0] = 0x55555555; - wb_write[1] = 0x55555555; - wb_write[2] = 0x20; /* SOP */ - REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); - - /* NON-IP protocol */ - wb_write[0] = 0x09000000; - wb_write[1] = 0x55555555; - wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ - REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); -} - -/* some of the internal memories - * are not directly readable from the driver - * to test them we send debug packets - */ -static int bnx2x_int_mem_test(struct bnx2x *bp) -{ - int factor; - int count, i; - u32 val = 0; - - if (CHIP_REV_IS_FPGA(bp)) - factor = 120; - else if (CHIP_REV_IS_EMUL(bp)) - factor = 200; - else - factor = 1; - - /* Disable inputs of parser neighbor blocks */ - REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); - REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); - REG_WR(bp, CFC_REG_DEBUG0, 0x1); - REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); - - /* Write 0 to parser credits for CFC search request */ - REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); - - /* send Ethernet packet */ - bnx2x_lb_pckt(bp); - - /* TODO do i reset NIG statistic? */ - /* Wait until NIG register shows 1 packet of size 0x10 */ - count = 1000 * factor; - while (count) { - - bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); - val = *bnx2x_sp(bp, wb_data[0]); - if (val == 0x10) - break; - - msleep(10); - count--; - } - if (val != 0x10) { - BNX2X_ERR("NIG timeout val = 0x%x\n", val); - return -1; - } - - /* Wait until PRS register shows 1 packet */ - count = 1000 * factor; - while (count) { - val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); - if (val == 1) - break; - - msleep(10); - count--; - } - if (val != 0x1) { - BNX2X_ERR("PRS timeout val = 0x%x\n", val); - return -2; - } - - /* Reset and init BRB, PRS */ - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); - msleep(50); - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); - msleep(50); - bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); - bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); - - DP(NETIF_MSG_HW, "part2\n"); - - /* Disable inputs of parser neighbor blocks */ - REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); - REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); - REG_WR(bp, CFC_REG_DEBUG0, 0x1); - REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); - - /* Write 0 to parser credits for CFC search request */ - REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); - - /* send 10 Ethernet packets */ - for (i = 0; i < 10; i++) - bnx2x_lb_pckt(bp); - - /* Wait until NIG register shows 10 + 1 - packets of size 11*0x10 = 0xb0 */ - count = 1000 * factor; - while (count) { - - bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); - val = *bnx2x_sp(bp, wb_data[0]); - if (val == 0xb0) - break; - - msleep(10); - count--; - } - if (val != 0xb0) { - BNX2X_ERR("NIG timeout val = 0x%x\n", val); - return -3; - } - - /* Wait until PRS register shows 2 packets */ - val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); - if (val != 2) - BNX2X_ERR("PRS timeout val = 0x%x\n", val); - - /* Write 1 to parser credits for CFC search request */ - REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); - - /* Wait until PRS register shows 3 packets */ - msleep(10 * factor); - /* Wait until NIG register shows 1 packet of size 0x10 */ - val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); - if (val != 3) - BNX2X_ERR("PRS timeout val = 0x%x\n", val); - - /* clear NIG EOP FIFO */ - for (i = 0; i < 11; i++) - REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO); - val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY); - if (val != 1) { - BNX2X_ERR("clear of NIG failed\n"); - return -4; - } - - /* Reset and init BRB, PRS, NIG */ - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); - msleep(50); - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); - msleep(50); - bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); - bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); -#ifndef BCM_CNIC - /* set NIC mode */ - REG_WR(bp, PRS_REG_NIC_MODE, 1); -#endif - - /* Enable inputs of parser neighbor blocks */ - REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff); - REG_WR(bp, TCM_REG_PRS_IFEN, 0x1); - REG_WR(bp, CFC_REG_DEBUG0, 0x0); - REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1); - - DP(NETIF_MSG_HW, "done\n"); - - return 0; /* OK */ -} - -static void bnx2x_enable_blocks_attention(struct bnx2x *bp) -{ - REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); - if (!CHIP_IS_E1x(bp)) - REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40); - else - REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0); - REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); - REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); - /* - * mask read length error interrupts in brb for parser - * (parsing unit and 'checksum and crc' unit) - * these errors are legal (PU reads fixed length and CAC can cause - * read length error on truncated packets) - */ - REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00); - REG_WR(bp, QM_REG_QM_INT_MASK, 0); - REG_WR(bp, TM_REG_TM_INT_MASK, 0); - REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0); - REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0); - REG_WR(bp, XCM_REG_XCM_INT_MASK, 0); -/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */ -/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */ - REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0); - REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0); - REG_WR(bp, UCM_REG_UCM_INT_MASK, 0); -/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */ -/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */ - REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); - REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0); - REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0); - REG_WR(bp, CCM_REG_CCM_INT_MASK, 0); -/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */ -/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */ - - if (CHIP_REV_IS_FPGA(bp)) - REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000); - else if (!CHIP_IS_E1x(bp)) - REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, - (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF - | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT - | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN - | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED - | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED)); - else - REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000); - REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0); - REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0); - REG_WR(bp, TCM_REG_TCM_INT_MASK, 0); -/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */ - - if (!CHIP_IS_E1x(bp)) - /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ - REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); - - REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); - REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); -/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ - REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ -} - -static void bnx2x_reset_common(struct bnx2x *bp) -{ - u32 val = 0x1400; - - /* reset_common */ - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, - 0xd3ffff7f); - - if (CHIP_IS_E3(bp)) { - val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; - val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; - } - - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val); -} - -static void bnx2x_setup_dmae(struct bnx2x *bp) -{ - bp->dmae_ready = 0; - spin_lock_init(&bp->dmae_lock); -} - -static void bnx2x_init_pxp(struct bnx2x *bp) -{ - u16 devctl; - int r_order, w_order; - - pci_read_config_word(bp->pdev, - pci_pcie_cap(bp->pdev) + PCI_EXP_DEVCTL, &devctl); - DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl); - w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); - if (bp->mrrs == -1) - r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12); - else { - DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs); - r_order = bp->mrrs; - } - - bnx2x_init_pxp_arb(bp, r_order, w_order); -} - -static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp) -{ - int is_required; - u32 val; - int port; - - if (BP_NOMCP(bp)) - return; - - is_required = 0; - val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) & - SHARED_HW_CFG_FAN_FAILURE_MASK; - - if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) - is_required = 1; - - /* - * The fan failure mechanism is usually related to the PHY type since - * the power consumption of the board is affected by the PHY. Currently, - * fan is required for most designs with SFX7101, BCM8727 and BCM8481. - */ - else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) - for (port = PORT_0; port < PORT_MAX; port++) { - is_required |= - bnx2x_fan_failure_det_req( - bp, - bp->common.shmem_base, - bp->common.shmem2_base, - port); - } - - DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required); - - if (is_required == 0) - return; - - /* Fan failure is indicated by SPIO 5 */ - bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5, - MISC_REGISTERS_SPIO_INPUT_HI_Z); - - /* set to active low mode */ - val = REG_RD(bp, MISC_REG_SPIO_INT); - val |= ((1 << MISC_REGISTERS_SPIO_5) << - MISC_REGISTERS_SPIO_INT_OLD_SET_POS); - REG_WR(bp, MISC_REG_SPIO_INT, val); - - /* enable interrupt to signal the IGU */ - val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); - val |= (1 << MISC_REGISTERS_SPIO_5); - REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); -} - -static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num) -{ - u32 offset = 0; - - if (CHIP_IS_E1(bp)) - return; - if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX)) - return; - - switch (BP_ABS_FUNC(bp)) { - case 0: - offset = PXP2_REG_PGL_PRETEND_FUNC_F0; - break; - case 1: - offset = PXP2_REG_PGL_PRETEND_FUNC_F1; - break; - case 2: - offset = PXP2_REG_PGL_PRETEND_FUNC_F2; - break; - case 3: - offset = PXP2_REG_PGL_PRETEND_FUNC_F3; - break; - case 4: - offset = PXP2_REG_PGL_PRETEND_FUNC_F4; - break; - case 5: - offset = PXP2_REG_PGL_PRETEND_FUNC_F5; - break; - case 6: - offset = PXP2_REG_PGL_PRETEND_FUNC_F6; - break; - case 7: - offset = PXP2_REG_PGL_PRETEND_FUNC_F7; - break; - default: - return; - } - - REG_WR(bp, offset, pretend_func_num); - REG_RD(bp, offset); - DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num); -} - -void bnx2x_pf_disable(struct bnx2x *bp) -{ - u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); - val &= ~IGU_PF_CONF_FUNC_EN; - - REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); - REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); - REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0); -} - -static inline void bnx2x__common_init_phy(struct bnx2x *bp) -{ - u32 shmem_base[2], shmem2_base[2]; - shmem_base[0] = bp->common.shmem_base; - shmem2_base[0] = bp->common.shmem2_base; - if (!CHIP_IS_E1x(bp)) { - shmem_base[1] = - SHMEM2_RD(bp, other_shmem_base_addr); - shmem2_base[1] = - SHMEM2_RD(bp, other_shmem2_base_addr); - } - bnx2x_acquire_phy_lock(bp); - bnx2x_common_init_phy(bp, shmem_base, shmem2_base, - bp->common.chip_id); - bnx2x_release_phy_lock(bp); -} - -/** - * bnx2x_init_hw_common - initialize the HW at the COMMON phase. - * - * @bp: driver handle - */ -static int bnx2x_init_hw_common(struct bnx2x *bp) -{ - u32 val; - - DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp)); - - bnx2x_reset_common(bp); - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); - - val = 0xfffc; - if (CHIP_IS_E3(bp)) { - val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; - val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; - } - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); - - bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); - - if (!CHIP_IS_E1x(bp)) { - u8 abs_func_id; - - /** - * 4-port mode or 2-port mode we need to turn of master-enable - * for everyone, after that, turn it back on for self. - * so, we disregard multi-function or not, and always disable - * for all functions on the given path, this means 0,2,4,6 for - * path 0 and 1,3,5,7 for path 1 - */ - for (abs_func_id = BP_PATH(bp); - abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) { - if (abs_func_id == BP_ABS_FUNC(bp)) { - REG_WR(bp, - PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, - 1); - continue; - } - - bnx2x_pretend_func(bp, abs_func_id); - /* clear pf enable */ - bnx2x_pf_disable(bp); - bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); - } - } - - bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON); - if (CHIP_IS_E1(bp)) { - /* enable HW interrupt from PXP on USDM overflow - bit 16 on INT_MASK_0 */ - REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); - } - - bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON); - bnx2x_init_pxp(bp); - -#ifdef __BIG_ENDIAN - REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1); - REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1); - REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1); - REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1); - REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1); - /* make sure this value is 0 */ - REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0); - -/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */ - REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1); - REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1); - REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1); - REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); -#endif - - bnx2x_ilt_init_page_size(bp, INITOP_SET); - - if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) - REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1); - - /* let the HW do it's magic ... */ - msleep(100); - /* finish PXP init */ - val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE); - if (val != 1) { - BNX2X_ERR("PXP2 CFG failed\n"); - return -EBUSY; - } - val = REG_RD(bp, PXP2_REG_RD_INIT_DONE); - if (val != 1) { - BNX2X_ERR("PXP2 RD_INIT failed\n"); - return -EBUSY; - } - - /* Timers bug workaround E2 only. We need to set the entire ILT to - * have entries with value "0" and valid bit on. - * This needs to be done by the first PF that is loaded in a path - * (i.e. common phase) - */ - if (!CHIP_IS_E1x(bp)) { -/* In E2 there is a bug in the timers block that can cause function 6 / 7 - * (i.e. vnic3) to start even if it is marked as "scan-off". - * This occurs when a different function (func2,3) is being marked - * as "scan-off". Real-life scenario for example: if a driver is being - * load-unloaded while func6,7 are down. This will cause the timer to access - * the ilt, translate to a logical address and send a request to read/write. - * Since the ilt for the function that is down is not valid, this will cause - * a translation error which is unrecoverable. - * The Workaround is intended to make sure that when this happens nothing fatal - * will occur. The workaround: - * 1. First PF driver which loads on a path will: - * a. After taking the chip out of reset, by using pretend, - * it will write "0" to the following registers of - * the other vnics. - * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); - * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); - * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); - * And for itself it will write '1' to - * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable - * dmae-operations (writing to pram for example.) - * note: can be done for only function 6,7 but cleaner this - * way. - * b. Write zero+valid to the entire ILT. - * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of - * VNIC3 (of that port). The range allocated will be the - * entire ILT. This is needed to prevent ILT range error. - * 2. Any PF driver load flow: - * a. ILT update with the physical addresses of the allocated - * logical pages. - * b. Wait 20msec. - note that this timeout is needed to make - * sure there are no requests in one of the PXP internal - * queues with "old" ILT addresses. - * c. PF enable in the PGLC. - * d. Clear the was_error of the PF in the PGLC. (could have - * occured while driver was down) - * e. PF enable in the CFC (WEAK + STRONG) - * f. Timers scan enable - * 3. PF driver unload flow: - * a. Clear the Timers scan_en. - * b. Polling for scan_on=0 for that PF. - * c. Clear the PF enable bit in the PXP. - * d. Clear the PF enable in the CFC (WEAK + STRONG) - * e. Write zero+valid to all ILT entries (The valid bit must - * stay set) - * f. If this is VNIC 3 of a port then also init - * first_timers_ilt_entry to zero and last_timers_ilt_entry - * to the last enrty in the ILT. - * - * Notes: - * Currently the PF error in the PGLC is non recoverable. - * In the future the there will be a recovery routine for this error. - * Currently attention is masked. - * Having an MCP lock on the load/unload process does not guarantee that - * there is no Timer disable during Func6/7 enable. This is because the - * Timers scan is currently being cleared by the MCP on FLR. - * Step 2.d can be done only for PF6/7 and the driver can also check if - * there is error before clearing it. But the flow above is simpler and - * more general. - * All ILT entries are written by zero+valid and not just PF6/7 - * ILT entries since in the future the ILT entries allocation for - * PF-s might be dynamic. - */ - struct ilt_client_info ilt_cli; - struct bnx2x_ilt ilt; - memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); - memset(&ilt, 0, sizeof(struct bnx2x_ilt)); - - /* initialize dummy TM client */ - ilt_cli.start = 0; - ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; - ilt_cli.client_num = ILT_CLIENT_TM; - - /* Step 1: set zeroes to all ilt page entries with valid bit on - * Step 2: set the timers first/last ilt entry to point - * to the entire range to prevent ILT range error for 3rd/4th - * vnic (this code assumes existance of the vnic) - * - * both steps performed by call to bnx2x_ilt_client_init_op() - * with dummy TM client - * - * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT - * and his brother are split registers - */ - bnx2x_pretend_func(bp, (BP_PATH(bp) + 6)); - bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR); - bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); - - REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN); - REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN); - REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); - } - - - REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); - REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); - - if (!CHIP_IS_E1x(bp)) { - int factor = CHIP_REV_IS_EMUL(bp) ? 1000 : - (CHIP_REV_IS_FPGA(bp) ? 400 : 0); - bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON); - - bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON); - - /* let the HW do it's magic ... */ - do { - msleep(200); - val = REG_RD(bp, ATC_REG_ATC_INIT_DONE); - } while (factor-- && (val != 1)); - - if (val != 1) { - BNX2X_ERR("ATC_INIT failed\n"); - return -EBUSY; - } - } - - bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON); - - /* clean the DMAE memory */ - bp->dmae_ready = 1; - bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1); - - bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON); - - bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON); - - bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON); - - bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON); - - bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3); - bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3); - bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3); - bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3); - - bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON); - - - /* QM queues pointers table */ - bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); - - /* soft reset pulse */ - REG_WR(bp, QM_REG_SOFT_RESET, 1); - REG_WR(bp, QM_REG_SOFT_RESET, 0); - -#ifdef BCM_CNIC - bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON); -#endif - - bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON); - REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT); - if (!CHIP_REV_IS_SLOW(bp)) - /* enable hw interrupt from doorbell Q */ - REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); - - bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); - - bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); - REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); - - if (!CHIP_IS_E1(bp)) - REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan); - - if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) - /* Bit-map indicating which L2 hdrs may appear - * after the basic Ethernet header - */ - REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, - bp->path_has_ovlan ? 7 : 6); - - bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON); - bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON); - bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON); - bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON); - - if (!CHIP_IS_E1x(bp)) { - /* reset VFC memories */ - REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, - VFC_MEMORIES_RST_REG_CAM_RST | - VFC_MEMORIES_RST_REG_RAM_RST); - REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, - VFC_MEMORIES_RST_REG_CAM_RST | - VFC_MEMORIES_RST_REG_RAM_RST); - - msleep(20); - } - - bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON); - bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON); - bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON); - bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON); - - /* sync semi rtc */ - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, - 0x80000000); - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, - 0x80000000); - - bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON); - bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON); - bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON); - - if (!CHIP_IS_E1x(bp)) - REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, - bp->path_has_ovlan ? 7 : 6); - - REG_WR(bp, SRC_REG_SOFT_RST, 1); - - bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON); - -#ifdef BCM_CNIC - REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); - REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); - REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b); - REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a); - REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116); - REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b); - REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf); - REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); - REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f); - REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7); -#endif - REG_WR(bp, SRC_REG_SOFT_RST, 0); - - if (sizeof(union cdu_context) != 1024) - /* we currently assume that a context is 1024 bytes */ - dev_alert(&bp->pdev->dev, "please adjust the size " - "of cdu_context(%ld)\n", - (long)sizeof(union cdu_context)); - - bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON); - val = (4 << 24) + (0 << 12) + 1024; - REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val); - - bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON); - REG_WR(bp, CFC_REG_INIT_REG, 0x7FF); - /* enable context validation interrupt from CFC */ - REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); - - /* set the thresholds to prevent CFC/CDU race */ - REG_WR(bp, CFC_REG_DEBUG0, 0x20020000); - - bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON); - - if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp)) - REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36); - - bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON); - bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON); - - /* Reset PCIE errors for debug */ - REG_WR(bp, 0x2814, 0xffffffff); - REG_WR(bp, 0x3820, 0xffffffff); - - if (!CHIP_IS_E1x(bp)) { - REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, - (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | - PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); - REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, - (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | - PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | - PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); - REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, - (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | - PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | - PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); - } - - bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON); - if (!CHIP_IS_E1(bp)) { - /* in E3 this done in per-port section */ - if (!CHIP_IS_E3(bp)) - REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp)); - } - if (CHIP_IS_E1H(bp)) - /* not applicable for E2 (and above ...) */ - REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp)); - - if (CHIP_REV_IS_SLOW(bp)) - msleep(200); - - /* finish CFC init */ - val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10); - if (val != 1) { - BNX2X_ERR("CFC LL_INIT failed\n"); - return -EBUSY; - } - val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10); - if (val != 1) { - BNX2X_ERR("CFC AC_INIT failed\n"); - return -EBUSY; - } - val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10); - if (val != 1) { - BNX2X_ERR("CFC CAM_INIT failed\n"); - return -EBUSY; - } - REG_WR(bp, CFC_REG_DEBUG0, 0); - - if (CHIP_IS_E1(bp)) { - /* read NIG statistic - to see if this is our first up since powerup */ - bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); - val = *bnx2x_sp(bp, wb_data[0]); - - /* do internal memory self test */ - if ((val == 0) && bnx2x_int_mem_test(bp)) { - BNX2X_ERR("internal mem self test failed\n"); - return -EBUSY; - } - } - - bnx2x_setup_fan_failure_detection(bp); - - /* clear PXP2 attentions */ - REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); - - bnx2x_enable_blocks_attention(bp); - bnx2x_enable_blocks_parity(bp); - - if (!BP_NOMCP(bp)) { - if (CHIP_IS_E1x(bp)) - bnx2x__common_init_phy(bp); - } else - BNX2X_ERR("Bootcode is missing - can not initialize link\n"); - - return 0; -} - -/** - * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase. - * - * @bp: driver handle - */ -static int bnx2x_init_hw_common_chip(struct bnx2x *bp) -{ - int rc = bnx2x_init_hw_common(bp); - - if (rc) - return rc; - - /* In E2 2-PORT mode, same ext phy is used for the two paths */ - if (!BP_NOMCP(bp)) - bnx2x__common_init_phy(bp); - - return 0; -} - -static int bnx2x_init_hw_port(struct bnx2x *bp) -{ - int port = BP_PORT(bp); - int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; - u32 low, high; - u32 val; - - bnx2x__link_reset(bp); - - DP(BNX2X_MSG_MCP, "starting port init port %d\n", port); - - REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); - - bnx2x_init_block(bp, BLOCK_MISC, init_phase); - bnx2x_init_block(bp, BLOCK_PXP, init_phase); - bnx2x_init_block(bp, BLOCK_PXP2, init_phase); - - /* Timers bug workaround: disables the pf_master bit in pglue at - * common phase, we need to enable it here before any dmae access are - * attempted. Therefore we manually added the enable-master to the - * port phase (it also happens in the function phase) - */ - if (!CHIP_IS_E1x(bp)) - REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); - - bnx2x_init_block(bp, BLOCK_ATC, init_phase); - bnx2x_init_block(bp, BLOCK_DMAE, init_phase); - bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); - bnx2x_init_block(bp, BLOCK_QM, init_phase); - - bnx2x_init_block(bp, BLOCK_TCM, init_phase); - bnx2x_init_block(bp, BLOCK_UCM, init_phase); - bnx2x_init_block(bp, BLOCK_CCM, init_phase); - bnx2x_init_block(bp, BLOCK_XCM, init_phase); - - /* QM cid (connection) count */ - bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET); - -#ifdef BCM_CNIC - bnx2x_init_block(bp, BLOCK_TM, init_phase); - REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); - REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); -#endif - - bnx2x_init_block(bp, BLOCK_DORQ, init_phase); - - if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) { - bnx2x_init_block(bp, BLOCK_BRB1, init_phase); - - if (IS_MF(bp)) - low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); - else if (bp->dev->mtu > 4096) { - if (bp->flags & ONE_PORT_FLAG) - low = 160; - else { - val = bp->dev->mtu; - /* (24*1024 + val*4)/256 */ - low = 96 + (val/64) + - ((val % 64) ? 1 : 0); - } - } else - low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160); - high = low + 56; /* 14*1024/256 */ - REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); - REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); - } - - if (CHIP_MODE_IS_4_PORT(bp)) - REG_WR(bp, (BP_PORT(bp) ? - BRB1_REG_MAC_GUARANTIED_1 : - BRB1_REG_MAC_GUARANTIED_0), 40); - - - bnx2x_init_block(bp, BLOCK_PRS, init_phase); - if (CHIP_IS_E3B0(bp)) - /* Ovlan exists only if we are in multi-function + - * switch-dependent mode, in switch-independent there - * is no ovlan headers - */ - REG_WR(bp, BP_PORT(bp) ? - PRS_REG_HDRS_AFTER_BASIC_PORT_1 : - PRS_REG_HDRS_AFTER_BASIC_PORT_0, - (bp->path_has_ovlan ? 7 : 6)); - - bnx2x_init_block(bp, BLOCK_TSDM, init_phase); - bnx2x_init_block(bp, BLOCK_CSDM, init_phase); - bnx2x_init_block(bp, BLOCK_USDM, init_phase); - bnx2x_init_block(bp, BLOCK_XSDM, init_phase); - - bnx2x_init_block(bp, BLOCK_TSEM, init_phase); - bnx2x_init_block(bp, BLOCK_USEM, init_phase); - bnx2x_init_block(bp, BLOCK_CSEM, init_phase); - bnx2x_init_block(bp, BLOCK_XSEM, init_phase); - - bnx2x_init_block(bp, BLOCK_UPB, init_phase); - bnx2x_init_block(bp, BLOCK_XPB, init_phase); - - bnx2x_init_block(bp, BLOCK_PBF, init_phase); - - if (CHIP_IS_E1x(bp)) { - /* configure PBF to work without PAUSE mtu 9000 */ - REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); - - /* update threshold */ - REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); - /* update init credit */ - REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); - - /* probe changes */ - REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1); - udelay(50); - REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); - } - -#ifdef BCM_CNIC - bnx2x_init_block(bp, BLOCK_SRC, init_phase); -#endif - bnx2x_init_block(bp, BLOCK_CDU, init_phase); - bnx2x_init_block(bp, BLOCK_CFC, init_phase); - - if (CHIP_IS_E1(bp)) { - REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); - REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); - } - bnx2x_init_block(bp, BLOCK_HC, init_phase); - - bnx2x_init_block(bp, BLOCK_IGU, init_phase); - - bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); - /* init aeu_mask_attn_func_0/1: - * - SF mode: bits 3-7 are masked. only bits 0-2 are in use - * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF - * bits 4-7 are used for "per vn group attention" */ - val = IS_MF(bp) ? 0xF7 : 0x7; - /* Enable DCBX attention for all but E1 */ - val |= CHIP_IS_E1(bp) ? 0 : 0x10; - REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); - - bnx2x_init_block(bp, BLOCK_NIG, init_phase); - - if (!CHIP_IS_E1x(bp)) { - /* Bit-map indicating which L2 hdrs may appear after the - * basic Ethernet header - */ - REG_WR(bp, BP_PORT(bp) ? - NIG_REG_P1_HDRS_AFTER_BASIC : - NIG_REG_P0_HDRS_AFTER_BASIC, - IS_MF_SD(bp) ? 7 : 6); - - if (CHIP_IS_E3(bp)) - REG_WR(bp, BP_PORT(bp) ? - NIG_REG_LLH1_MF_MODE : - NIG_REG_LLH_MF_MODE, IS_MF(bp)); - } - if (!CHIP_IS_E3(bp)) - REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); - - if (!CHIP_IS_E1(bp)) { - /* 0x2 disable mf_ov, 0x1 enable */ - REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, - (IS_MF_SD(bp) ? 0x1 : 0x2)); - - if (!CHIP_IS_E1x(bp)) { - val = 0; - switch (bp->mf_mode) { - case MULTI_FUNCTION_SD: - val = 1; - break; - case MULTI_FUNCTION_SI: - val = 2; - break; - } - - REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE : - NIG_REG_LLH0_CLS_TYPE), val); - } - { - REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0); - REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); - REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); - } - } - - - /* If SPIO5 is set to generate interrupts, enable it for this port */ - val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); - if (val & (1 << MISC_REGISTERS_SPIO_5)) { - u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : - MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); - val = REG_RD(bp, reg_addr); - val |= AEU_INPUTS_ATTN_BITS_SPIO5; - REG_WR(bp, reg_addr, val); - } - - return 0; -} - -static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) -{ - int reg; - - if (CHIP_IS_E1(bp)) - reg = PXP2_REG_RQ_ONCHIP_AT + index*8; - else - reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; - - bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr)); -} - -static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id) -{ - bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/); -} - -static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func) -{ - u32 i, base = FUNC_ILT_BASE(func); - for (i = base; i < base + ILT_PER_FUNC; i++) - bnx2x_ilt_wr(bp, i, 0); -} - -static int bnx2x_init_hw_func(struct bnx2x *bp) -{ - int port = BP_PORT(bp); - int func = BP_FUNC(bp); - int init_phase = PHASE_PF0 + func; - struct bnx2x_ilt *ilt = BP_ILT(bp); - u16 cdu_ilt_start; - u32 addr, val; - u32 main_mem_base, main_mem_size, main_mem_prty_clr; - int i, main_mem_width; - - DP(BNX2X_MSG_MCP, "starting func init func %d\n", func); - - /* FLR cleanup - hmmm */ - if (!CHIP_IS_E1x(bp)) - bnx2x_pf_flr_clnup(bp); - - /* set MSI reconfigure capability */ - if (bp->common.int_block == INT_BLOCK_HC) { - addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); - val = REG_RD(bp, addr); - val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; - REG_WR(bp, addr, val); - } - - bnx2x_init_block(bp, BLOCK_PXP, init_phase); - bnx2x_init_block(bp, BLOCK_PXP2, init_phase); - - ilt = BP_ILT(bp); - cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; - - for (i = 0; i < L2_ILT_LINES(bp); i++) { - ilt->lines[cdu_ilt_start + i].page = - bp->context.vcxt + (ILT_PAGE_CIDS * i); - ilt->lines[cdu_ilt_start + i].page_mapping = - bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i); - /* cdu ilt pages are allocated manually so there's no need to - set the size */ - } - bnx2x_ilt_init_op(bp, INITOP_SET); - -#ifdef BCM_CNIC - bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM); - - /* T1 hash bits value determines the T1 number of entries */ - REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS); -#endif - -#ifndef BCM_CNIC - /* set NIC mode */ - REG_WR(bp, PRS_REG_NIC_MODE, 1); -#endif /* BCM_CNIC */ - - if (!CHIP_IS_E1x(bp)) { - u32 pf_conf = IGU_PF_CONF_FUNC_EN; - - /* Turn on a single ISR mode in IGU if driver is going to use - * INT#x or MSI - */ - if (!(bp->flags & USING_MSIX_FLAG)) - pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; - /* - * Timers workaround bug: function init part. - * Need to wait 20msec after initializing ILT, - * needed to make sure there are no requests in - * one of the PXP internal queues with "old" ILT addresses - */ - msleep(20); - /* - * Master enable - Due to WB DMAE writes performed before this - * register is re-initialized as part of the regular function - * init - */ - REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); - /* Enable the function in IGU */ - REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf); - } - - bp->dmae_ready = 1; - - bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); - - if (!CHIP_IS_E1x(bp)) - REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); - - bnx2x_init_block(bp, BLOCK_ATC, init_phase); - bnx2x_init_block(bp, BLOCK_DMAE, init_phase); - bnx2x_init_block(bp, BLOCK_NIG, init_phase); - bnx2x_init_block(bp, BLOCK_SRC, init_phase); - bnx2x_init_block(bp, BLOCK_MISC, init_phase); - bnx2x_init_block(bp, BLOCK_TCM, init_phase); - bnx2x_init_block(bp, BLOCK_UCM, init_phase); - bnx2x_init_block(bp, BLOCK_CCM, init_phase); - bnx2x_init_block(bp, BLOCK_XCM, init_phase); - bnx2x_init_block(bp, BLOCK_TSEM, init_phase); - bnx2x_init_block(bp, BLOCK_USEM, init_phase); - bnx2x_init_block(bp, BLOCK_CSEM, init_phase); - bnx2x_init_block(bp, BLOCK_XSEM, init_phase); - - if (!CHIP_IS_E1x(bp)) - REG_WR(bp, QM_REG_PF_EN, 1); - - if (!CHIP_IS_E1x(bp)) { - REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); - REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); - REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); - REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); - } - bnx2x_init_block(bp, BLOCK_QM, init_phase); - - bnx2x_init_block(bp, BLOCK_TM, init_phase); - bnx2x_init_block(bp, BLOCK_DORQ, init_phase); - bnx2x_init_block(bp, BLOCK_BRB1, init_phase); - bnx2x_init_block(bp, BLOCK_PRS, init_phase); - bnx2x_init_block(bp, BLOCK_TSDM, init_phase); - bnx2x_init_block(bp, BLOCK_CSDM, init_phase); - bnx2x_init_block(bp, BLOCK_USDM, init_phase); - bnx2x_init_block(bp, BLOCK_XSDM, init_phase); - bnx2x_init_block(bp, BLOCK_UPB, init_phase); - bnx2x_init_block(bp, BLOCK_XPB, init_phase); - bnx2x_init_block(bp, BLOCK_PBF, init_phase); - if (!CHIP_IS_E1x(bp)) - REG_WR(bp, PBF_REG_DISABLE_PF, 0); - - bnx2x_init_block(bp, BLOCK_CDU, init_phase); - - bnx2x_init_block(bp, BLOCK_CFC, init_phase); - - if (!CHIP_IS_E1x(bp)) - REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1); - - if (IS_MF(bp)) { - REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); - REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov); - } - - bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); - - /* HC init per function */ - if (bp->common.int_block == INT_BLOCK_HC) { - if (CHIP_IS_E1H(bp)) { - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); - - REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); - REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); - } - bnx2x_init_block(bp, BLOCK_HC, init_phase); - - } else { - int num_segs, sb_idx, prod_offset; - - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); - - if (!CHIP_IS_E1x(bp)) { - REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); - REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); - } - - bnx2x_init_block(bp, BLOCK_IGU, init_phase); - - if (!CHIP_IS_E1x(bp)) { - int dsb_idx = 0; - /** - * Producer memory: - * E2 mode: address 0-135 match to the mapping memory; - * 136 - PF0 default prod; 137 - PF1 default prod; - * 138 - PF2 default prod; 139 - PF3 default prod; - * 140 - PF0 attn prod; 141 - PF1 attn prod; - * 142 - PF2 attn prod; 143 - PF3 attn prod; - * 144-147 reserved. - * - * E1.5 mode - In backward compatible mode; - * for non default SB; each even line in the memory - * holds the U producer and each odd line hold - * the C producer. The first 128 producers are for - * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 - * producers are for the DSB for each PF. - * Each PF has five segments: (the order inside each - * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; - * 132-135 C prods; 136-139 X prods; 140-143 T prods; - * 144-147 attn prods; - */ - /* non-default-status-blocks */ - num_segs = CHIP_INT_MODE_IS_BC(bp) ? - IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; - for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) { - prod_offset = (bp->igu_base_sb + sb_idx) * - num_segs; - - for (i = 0; i < num_segs; i++) { - addr = IGU_REG_PROD_CONS_MEMORY + - (prod_offset + i) * 4; - REG_WR(bp, addr, 0); - } - /* send consumer update with value 0 */ - bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx, - USTORM_ID, 0, IGU_INT_NOP, 1); - bnx2x_igu_clear_sb(bp, - bp->igu_base_sb + sb_idx); - } - - /* default-status-blocks */ - num_segs = CHIP_INT_MODE_IS_BC(bp) ? - IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; - - if (CHIP_MODE_IS_4_PORT(bp)) - dsb_idx = BP_FUNC(bp); - else - dsb_idx = BP_E1HVN(bp); - - prod_offset = (CHIP_INT_MODE_IS_BC(bp) ? - IGU_BC_BASE_DSB_PROD + dsb_idx : - IGU_NORM_BASE_DSB_PROD + dsb_idx); - - for (i = 0; i < (num_segs * E1HVN_MAX); - i += E1HVN_MAX) { - addr = IGU_REG_PROD_CONS_MEMORY + - (prod_offset + i)*4; - REG_WR(bp, addr, 0); - } - /* send consumer update with 0 */ - if (CHIP_INT_MODE_IS_BC(bp)) { - bnx2x_ack_sb(bp, bp->igu_dsb_id, - USTORM_ID, 0, IGU_INT_NOP, 1); - bnx2x_ack_sb(bp, bp->igu_dsb_id, - CSTORM_ID, 0, IGU_INT_NOP, 1); - bnx2x_ack_sb(bp, bp->igu_dsb_id, - XSTORM_ID, 0, IGU_INT_NOP, 1); - bnx2x_ack_sb(bp, bp->igu_dsb_id, - TSTORM_ID, 0, IGU_INT_NOP, 1); - bnx2x_ack_sb(bp, bp->igu_dsb_id, - ATTENTION_ID, 0, IGU_INT_NOP, 1); - } else { - bnx2x_ack_sb(bp, bp->igu_dsb_id, - USTORM_ID, 0, IGU_INT_NOP, 1); - bnx2x_ack_sb(bp, bp->igu_dsb_id, - ATTENTION_ID, 0, IGU_INT_NOP, 1); - } - bnx2x_igu_clear_sb(bp, bp->igu_dsb_id); - - /* !!! these should become driver const once - rf-tool supports split-68 const */ - REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); - REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); - REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); - REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); - REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); - REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); - } - } - - /* Reset PCIE errors for debug */ - REG_WR(bp, 0x2114, 0xffffffff); - REG_WR(bp, 0x2120, 0xffffffff); - - if (CHIP_IS_E1x(bp)) { - main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ - main_mem_base = HC_REG_MAIN_MEMORY + - BP_PORT(bp) * (main_mem_size * 4); - main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; - main_mem_width = 8; - - val = REG_RD(bp, main_mem_prty_clr); - if (val) - DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC " - "block during " - "function init (0x%x)!\n", val); - - /* Clear "false" parity errors in MSI-X table */ - for (i = main_mem_base; - i < main_mem_base + main_mem_size * 4; - i += main_mem_width) { - bnx2x_read_dmae(bp, i, main_mem_width / 4); - bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), - i, main_mem_width / 4); - } - /* Clear HC parity attention */ - REG_RD(bp, main_mem_prty_clr); - } - -#ifdef BNX2X_STOP_ON_ERROR - /* Enable STORMs SP logging */ - REG_WR8(bp, BAR_USTRORM_INTMEM + - USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); - REG_WR8(bp, BAR_TSTRORM_INTMEM + - TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); - REG_WR8(bp, BAR_CSTRORM_INTMEM + - CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); - REG_WR8(bp, BAR_XSTRORM_INTMEM + - XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); -#endif - - bnx2x_phy_probe(&bp->link_params); - - return 0; -} - - -void bnx2x_free_mem(struct bnx2x *bp) -{ - /* fastpath */ - bnx2x_free_fp_mem(bp); - /* end of fastpath */ - - BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, - sizeof(struct host_sp_status_block)); - - BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, - bp->fw_stats_data_sz + bp->fw_stats_req_sz); - - BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, - sizeof(struct bnx2x_slowpath)); - - BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping, - bp->context.size); - - bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE); - - BNX2X_FREE(bp->ilt->lines); - -#ifdef BCM_CNIC - if (!CHIP_IS_E1x(bp)) - BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping, - sizeof(struct host_hc_status_block_e2)); - else - BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping, - sizeof(struct host_hc_status_block_e1x)); - - BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); -#endif - - BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); - - BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, - BCM_PAGE_SIZE * NUM_EQ_PAGES); -} - -static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) -{ - int num_groups; - - /* number of eth_queues */ - u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp); - - /* Total number of FW statistics requests = - * 1 for port stats + 1 for PF stats + num_eth_queues */ - bp->fw_stats_num = 2 + num_queue_stats; - - - /* Request is built from stats_query_header and an array of - * stats_query_cmd_group each of which contains - * STATS_QUERY_CMD_COUNT rules. The real number or requests is - * configured in the stats_query_header. - */ - num_groups = (2 + num_queue_stats) / STATS_QUERY_CMD_COUNT + - (((2 + num_queue_stats) % STATS_QUERY_CMD_COUNT) ? 1 : 0); - - bp->fw_stats_req_sz = sizeof(struct stats_query_header) + - num_groups * sizeof(struct stats_query_cmd_group); - - /* Data for statistics requests + stats_conter - * - * stats_counter holds per-STORM counters that are incremented - * when STORM has finished with the current request. - */ - bp->fw_stats_data_sz = sizeof(struct per_port_stats) + - sizeof(struct per_pf_stats) + - sizeof(struct per_queue_stats) * num_queue_stats + - sizeof(struct stats_counter); - - BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping, - bp->fw_stats_data_sz + bp->fw_stats_req_sz); - - /* Set shortcuts */ - bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats; - bp->fw_stats_req_mapping = bp->fw_stats_mapping; - - bp->fw_stats_data = (struct bnx2x_fw_stats_data *) - ((u8 *)bp->fw_stats + bp->fw_stats_req_sz); - - bp->fw_stats_data_mapping = bp->fw_stats_mapping + - bp->fw_stats_req_sz; - return 0; - -alloc_mem_err: - BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, - bp->fw_stats_data_sz + bp->fw_stats_req_sz); - return -ENOMEM; -} - - -int bnx2x_alloc_mem(struct bnx2x *bp) -{ -#ifdef BCM_CNIC - if (!CHIP_IS_E1x(bp)) - /* size = the status block + ramrod buffers */ - BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping, - sizeof(struct host_hc_status_block_e2)); - else - BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping, - sizeof(struct host_hc_status_block_e1x)); - - /* allocate searcher T2 table */ - BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); -#endif - - - BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping, - sizeof(struct host_sp_status_block)); - - BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, - sizeof(struct bnx2x_slowpath)); - - /* Allocated memory for FW statistics */ - if (bnx2x_alloc_fw_stats_mem(bp)) - goto alloc_mem_err; - - bp->context.size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp); - - BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping, - bp->context.size); - - BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES); - - if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC)) - goto alloc_mem_err; - - /* Slow path ring */ - BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE); - - /* EQ */ - BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping, - BCM_PAGE_SIZE * NUM_EQ_PAGES); - - - /* fastpath */ - /* need to be done at the end, since it's self adjusting to amount - * of memory available for RSS queues - */ - if (bnx2x_alloc_fp_mem(bp)) - goto alloc_mem_err; - return 0; - -alloc_mem_err: - bnx2x_free_mem(bp); - return -ENOMEM; -} - -/* - * Init service functions - */ - -int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, - struct bnx2x_vlan_mac_obj *obj, bool set, - int mac_type, unsigned long *ramrod_flags) -{ - int rc; - struct bnx2x_vlan_mac_ramrod_params ramrod_param; - - memset(&ramrod_param, 0, sizeof(ramrod_param)); - - /* Fill general parameters */ - ramrod_param.vlan_mac_obj = obj; - ramrod_param.ramrod_flags = *ramrod_flags; - - /* Fill a user request section if needed */ - if (!test_bit(RAMROD_CONT, ramrod_flags)) { - memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN); - - __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); - - /* Set the command: ADD or DEL */ - if (set) - ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; - else - ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL; - } - - rc = bnx2x_config_vlan_mac(bp, &ramrod_param); - if (rc < 0) - BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del")); - return rc; -} - -int bnx2x_del_all_macs(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *mac_obj, - int mac_type, bool wait_for_comp) -{ - int rc; - unsigned long ramrod_flags = 0, vlan_mac_flags = 0; - - /* Wait for completion of requested */ - if (wait_for_comp) - __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); - - /* Set the mac type of addresses we want to clear */ - __set_bit(mac_type, &vlan_mac_flags); - - rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags); - if (rc < 0) - BNX2X_ERR("Failed to delete MACs: %d\n", rc); - - return rc; -} - -int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) -{ - unsigned long ramrod_flags = 0; - - DP(NETIF_MSG_IFUP, "Adding Eth MAC\n"); - - __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); - /* Eth MAC is set on RSS leading client (fp[0]) */ - return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->fp->mac_obj, set, - BNX2X_ETH_MAC, &ramrod_flags); -} - -int bnx2x_setup_leading(struct bnx2x *bp) -{ - return bnx2x_setup_queue(bp, &bp->fp[0], 1); -} - -/** - * bnx2x_set_int_mode - configure interrupt mode - * - * @bp: driver handle - * - * In case of MSI-X it will also try to enable MSI-X. - */ -static void __devinit bnx2x_set_int_mode(struct bnx2x *bp) -{ - switch (int_mode) { - case INT_MODE_MSI: - bnx2x_enable_msi(bp); - /* falling through... */ - case INT_MODE_INTx: - bp->num_queues = 1 + NON_ETH_CONTEXT_USE; - DP(NETIF_MSG_IFUP, "set number of queues to 1\n"); - break; - default: - /* Set number of queues according to bp->multi_mode value */ - bnx2x_set_num_queues(bp); - - DP(NETIF_MSG_IFUP, "set number of queues to %d\n", - bp->num_queues); - - /* if we can't use MSI-X we only need one fp, - * so try to enable MSI-X with the requested number of fp's - * and fallback to MSI or legacy INTx with one fp - */ - if (bnx2x_enable_msix(bp)) { - /* failed to enable MSI-X */ - if (bp->multi_mode) - DP(NETIF_MSG_IFUP, - "Multi requested but failed to " - "enable MSI-X (%d), " - "set number of queues to %d\n", - bp->num_queues, - 1 + NON_ETH_CONTEXT_USE); - bp->num_queues = 1 + NON_ETH_CONTEXT_USE; - - /* Try to enable MSI */ - if (!(bp->flags & DISABLE_MSI_FLAG)) - bnx2x_enable_msi(bp); - } - break; - } -} - -/* must be called prioir to any HW initializations */ -static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp) -{ - return L2_ILT_LINES(bp); -} - -void bnx2x_ilt_set_info(struct bnx2x *bp) -{ - struct ilt_client_info *ilt_client; - struct bnx2x_ilt *ilt = BP_ILT(bp); - u16 line = 0; - - ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp)); - DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line); - - /* CDU */ - ilt_client = &ilt->clients[ILT_CLIENT_CDU]; - ilt_client->client_num = ILT_CLIENT_CDU; - ilt_client->page_size = CDU_ILT_PAGE_SZ; - ilt_client->flags = ILT_CLIENT_SKIP_MEM; - ilt_client->start = line; - line += bnx2x_cid_ilt_lines(bp); -#ifdef BCM_CNIC - line += CNIC_ILT_LINES; -#endif - ilt_client->end = line - 1; - - DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, " - "flags 0x%x, hw psz %d\n", - ilt_client->start, - ilt_client->end, - ilt_client->page_size, - ilt_client->flags, - ilog2(ilt_client->page_size >> 12)); - - /* QM */ - if (QM_INIT(bp->qm_cid_count)) { - ilt_client = &ilt->clients[ILT_CLIENT_QM]; - ilt_client->client_num = ILT_CLIENT_QM; - ilt_client->page_size = QM_ILT_PAGE_SZ; - ilt_client->flags = 0; - ilt_client->start = line; - - /* 4 bytes for each cid */ - line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4, - QM_ILT_PAGE_SZ); - - ilt_client->end = line - 1; - - DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, " - "flags 0x%x, hw psz %d\n", - ilt_client->start, - ilt_client->end, - ilt_client->page_size, - ilt_client->flags, - ilog2(ilt_client->page_size >> 12)); - - } - /* SRC */ - ilt_client = &ilt->clients[ILT_CLIENT_SRC]; -#ifdef BCM_CNIC - ilt_client->client_num = ILT_CLIENT_SRC; - ilt_client->page_size = SRC_ILT_PAGE_SZ; - ilt_client->flags = 0; - ilt_client->start = line; - line += SRC_ILT_LINES; - ilt_client->end = line - 1; - - DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, " - "flags 0x%x, hw psz %d\n", - ilt_client->start, - ilt_client->end, - ilt_client->page_size, - ilt_client->flags, - ilog2(ilt_client->page_size >> 12)); - -#else - ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM); -#endif - - /* TM */ - ilt_client = &ilt->clients[ILT_CLIENT_TM]; -#ifdef BCM_CNIC - ilt_client->client_num = ILT_CLIENT_TM; - ilt_client->page_size = TM_ILT_PAGE_SZ; - ilt_client->flags = 0; - ilt_client->start = line; - line += TM_ILT_LINES; - ilt_client->end = line - 1; - - DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, " - "flags 0x%x, hw psz %d\n", - ilt_client->start, - ilt_client->end, - ilt_client->page_size, - ilt_client->flags, - ilog2(ilt_client->page_size >> 12)); - -#else - ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM); -#endif - BUG_ON(line > ILT_MAX_LINES); -} - -/** - * bnx2x_pf_q_prep_init - prepare INIT transition parameters - * - * @bp: driver handle - * @fp: pointer to fastpath - * @init_params: pointer to parameters structure - * - * parameters configured: - * - HC configuration - * - Queue's CDU context - */ -static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp, - struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params) -{ - - u8 cos; - /* FCoE Queue uses Default SB, thus has no HC capabilities */ - if (!IS_FCOE_FP(fp)) { - __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); - __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags); - - /* If HC is supporterd, enable host coalescing in the transition - * to INIT state. - */ - __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags); - __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags); - - /* HC rate */ - init_params->rx.hc_rate = bp->rx_ticks ? - (1000000 / bp->rx_ticks) : 0; - init_params->tx.hc_rate = bp->tx_ticks ? - (1000000 / bp->tx_ticks) : 0; - - /* FW SB ID */ - init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = - fp->fw_sb_id; - - /* - * CQ index among the SB indices: FCoE clients uses the default - * SB, therefore it's different. - */ - init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; - init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; - } - - /* set maximum number of COSs supported by this queue */ - init_params->max_cos = fp->max_cos; - - DP(BNX2X_MSG_SP, "fp: %d setting queue params max cos to: %d", - fp->index, init_params->max_cos); - - /* set the context pointers queue object */ - for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) - init_params->cxts[cos] = - &bp->context.vcxt[fp->txdata[cos].cid].eth; -} - -int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, - struct bnx2x_queue_state_params *q_params, - struct bnx2x_queue_setup_tx_only_params *tx_only_params, - int tx_index, bool leading) -{ - memset(tx_only_params, 0, sizeof(*tx_only_params)); - - /* Set the command */ - q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; - - /* Set tx-only QUEUE flags: don't zero statistics */ - tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false); - - /* choose the index of the cid to send the slow path on */ - tx_only_params->cid_index = tx_index; - - /* Set general TX_ONLY_SETUP parameters */ - bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index); - - /* Set Tx TX_ONLY_SETUP parameters */ - bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index); - - DP(BNX2X_MSG_SP, "preparing to send tx-only ramrod for connection:" - "cos %d, primary cid %d, cid %d, " - "client id %d, sp-client id %d, flags %lx", - tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX], - q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id, - tx_only_params->gen_params.spcl_id, tx_only_params->flags); - - /* send the ramrod */ - return bnx2x_queue_state_change(bp, q_params); -} - - -/** - * bnx2x_setup_queue - setup queue - * - * @bp: driver handle - * @fp: pointer to fastpath - * @leading: is leading - * - * This function performs 2 steps in a Queue state machine - * actually: 1) RESET->INIT 2) INIT->SETUP - */ - -int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, - bool leading) -{ - struct bnx2x_queue_state_params q_params = {0}; - struct bnx2x_queue_setup_params *setup_params = - &q_params.params.setup; - struct bnx2x_queue_setup_tx_only_params *tx_only_params = - &q_params.params.tx_only; - int rc; - u8 tx_index; - - DP(BNX2X_MSG_SP, "setting up queue %d", fp->index); - - /* reset IGU state skip FCoE L2 queue */ - if (!IS_FCOE_FP(fp)) - bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, - IGU_INT_ENABLE, 0); - - q_params.q_obj = &fp->q_obj; - /* We want to wait for completion in this context */ - __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); - - /* Prepare the INIT parameters */ - bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init); - - /* Set the command */ - q_params.cmd = BNX2X_Q_CMD_INIT; - - /* Change the state to INIT */ - rc = bnx2x_queue_state_change(bp, &q_params); - if (rc) { - BNX2X_ERR("Queue(%d) INIT failed\n", fp->index); - return rc; - } - - DP(BNX2X_MSG_SP, "init complete"); - - - /* Now move the Queue to the SETUP state... */ - memset(setup_params, 0, sizeof(*setup_params)); - - /* Set QUEUE flags */ - setup_params->flags = bnx2x_get_q_flags(bp, fp, leading); - - /* Set general SETUP parameters */ - bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params, - FIRST_TX_COS_INDEX); - - bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params, - &setup_params->rxq_params); - - bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params, - FIRST_TX_COS_INDEX); - - /* Set the command */ - q_params.cmd = BNX2X_Q_CMD_SETUP; - - /* Change the state to SETUP */ - rc = bnx2x_queue_state_change(bp, &q_params); - if (rc) { - BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index); - return rc; - } - - /* loop through the relevant tx-only indices */ - for (tx_index = FIRST_TX_ONLY_COS_INDEX; - tx_index < fp->max_cos; - tx_index++) { - - /* prepare and send tx-only ramrod*/ - rc = bnx2x_setup_tx_only(bp, fp, &q_params, - tx_only_params, tx_index, leading); - if (rc) { - BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n", - fp->index, tx_index); - return rc; - } - } - - return rc; -} - -static int bnx2x_stop_queue(struct bnx2x *bp, int index) -{ - struct bnx2x_fastpath *fp = &bp->fp[index]; - struct bnx2x_fp_txdata *txdata; - struct bnx2x_queue_state_params q_params = {0}; - int rc, tx_index; - - DP(BNX2X_MSG_SP, "stopping queue %d cid %d", index, fp->cid); - - q_params.q_obj = &fp->q_obj; - /* We want to wait for completion in this context */ - __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); - - - /* close tx-only connections */ - for (tx_index = FIRST_TX_ONLY_COS_INDEX; - tx_index < fp->max_cos; - tx_index++){ - - /* ascertain this is a normal queue*/ - txdata = &fp->txdata[tx_index]; - - DP(BNX2X_MSG_SP, "stopping tx-only queue %d", - txdata->txq_index); - - /* send halt terminate on tx-only connection */ - q_params.cmd = BNX2X_Q_CMD_TERMINATE; - memset(&q_params.params.terminate, 0, - sizeof(q_params.params.terminate)); - q_params.params.terminate.cid_index = tx_index; - - rc = bnx2x_queue_state_change(bp, &q_params); - if (rc) - return rc; - - /* send halt terminate on tx-only connection */ - q_params.cmd = BNX2X_Q_CMD_CFC_DEL; - memset(&q_params.params.cfc_del, 0, - sizeof(q_params.params.cfc_del)); - q_params.params.cfc_del.cid_index = tx_index; - rc = bnx2x_queue_state_change(bp, &q_params); - if (rc) - return rc; - } - /* Stop the primary connection: */ - /* ...halt the connection */ - q_params.cmd = BNX2X_Q_CMD_HALT; - rc = bnx2x_queue_state_change(bp, &q_params); - if (rc) - return rc; - - /* ...terminate the connection */ - q_params.cmd = BNX2X_Q_CMD_TERMINATE; - memset(&q_params.params.terminate, 0, - sizeof(q_params.params.terminate)); - q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; - rc = bnx2x_queue_state_change(bp, &q_params); - if (rc) - return rc; - /* ...delete cfc entry */ - q_params.cmd = BNX2X_Q_CMD_CFC_DEL; - memset(&q_params.params.cfc_del, 0, - sizeof(q_params.params.cfc_del)); - q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; - return bnx2x_queue_state_change(bp, &q_params); -} - - -static void bnx2x_reset_func(struct bnx2x *bp) -{ - int port = BP_PORT(bp); - int func = BP_FUNC(bp); - int i; - - /* Disable the function in the FW */ - REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); - REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); - REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); - REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); - - /* FP SBs */ - for_each_eth_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - REG_WR8(bp, BAR_CSTRORM_INTMEM + - CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), - SB_DISABLED); - } - -#ifdef BCM_CNIC - /* CNIC SB */ - REG_WR8(bp, BAR_CSTRORM_INTMEM + - CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(bnx2x_cnic_fw_sb_id(bp)), - SB_DISABLED); -#endif - /* SP SB */ - REG_WR8(bp, BAR_CSTRORM_INTMEM + - CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), - SB_DISABLED); - - for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) - REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), - 0); - - /* Configure IGU */ - if (bp->common.int_block == INT_BLOCK_HC) { - REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); - REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); - } else { - REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); - REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); - } - -#ifdef BCM_CNIC - /* Disable Timer scan */ - REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); - /* - * Wait for at least 10ms and up to 2 second for the timers scan to - * complete - */ - for (i = 0; i < 200; i++) { - msleep(10); - if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4)) - break; - } -#endif - /* Clear ILT */ - bnx2x_clear_func_ilt(bp, func); - - /* Timers workaround bug for E2: if this is vnic-3, - * we need to set the entire ilt range for this timers. - */ - if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) { - struct ilt_client_info ilt_cli; - /* use dummy TM client */ - memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); - ilt_cli.start = 0; - ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; - ilt_cli.client_num = ILT_CLIENT_TM; - - bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR); - } - - /* this assumes that reset_port() called before reset_func()*/ - if (!CHIP_IS_E1x(bp)) - bnx2x_pf_disable(bp); - - bp->dmae_ready = 0; -} - -static void bnx2x_reset_port(struct bnx2x *bp) -{ - int port = BP_PORT(bp); - u32 val; - - /* Reset physical Link */ - bnx2x__link_reset(bp); - - REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); - - /* Do not rcv packets to BRB */ - REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); - /* Do not direct rcv packets that are not for MCP to the BRB */ - REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : - NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); - - /* Configure AEU */ - REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); - - msleep(100); - /* Check for BRB port occupancy */ - val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); - if (val) - DP(NETIF_MSG_IFDOWN, - "BRB1 is not empty %d blocks are occupied\n", val); - - /* TODO: Close Doorbell port? */ -} - -static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code) -{ - struct bnx2x_func_state_params func_params = {0}; - - /* Prepare parameters for function state transitions */ - __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); - - func_params.f_obj = &bp->func_obj; - func_params.cmd = BNX2X_F_CMD_HW_RESET; - - func_params.params.hw_init.load_phase = load_code; - - return bnx2x_func_state_change(bp, &func_params); -} - -static inline int bnx2x_func_stop(struct bnx2x *bp) -{ - struct bnx2x_func_state_params func_params = {0}; - int rc; - - /* Prepare parameters for function state transitions */ - __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); - func_params.f_obj = &bp->func_obj; - func_params.cmd = BNX2X_F_CMD_STOP; - - /* - * Try to stop the function the 'good way'. If fails (in case - * of a parity error during bnx2x_chip_cleanup()) and we are - * not in a debug mode, perform a state transaction in order to - * enable further HW_RESET transaction. - */ - rc = bnx2x_func_state_change(bp, &func_params); - if (rc) { -#ifdef BNX2X_STOP_ON_ERROR - return rc; -#else - BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry " - "transaction\n"); - __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); - return bnx2x_func_state_change(bp, &func_params); -#endif - } - - return 0; -} - -/** - * bnx2x_send_unload_req - request unload mode from the MCP. - * - * @bp: driver handle - * @unload_mode: requested function's unload mode - * - * Return unload mode returned by the MCP: COMMON, PORT or FUNC. - */ -u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) -{ - u32 reset_code = 0; - int port = BP_PORT(bp); - - /* Select the UNLOAD request mode */ - if (unload_mode == UNLOAD_NORMAL) - reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; - - else if (bp->flags & NO_WOL_FLAG) - reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; - - else if (bp->wol) { - u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; - u8 *mac_addr = bp->dev->dev_addr; - u32 val; - /* The mac address is written to entries 1-4 to - preserve entry 0 which is used by the PMF */ - u8 entry = (BP_E1HVN(bp) + 1)*8; - - val = (mac_addr[0] << 8) | mac_addr[1]; - EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); - - val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | - (mac_addr[4] << 8) | mac_addr[5]; - EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); - - reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; - - } else - reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; - - /* Send the request to the MCP */ - if (!BP_NOMCP(bp)) - reset_code = bnx2x_fw_command(bp, reset_code, 0); - else { - int path = BP_PATH(bp); - - DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] " - "%d, %d, %d\n", - path, load_count[path][0], load_count[path][1], - load_count[path][2]); - load_count[path][0]--; - load_count[path][1 + port]--; - DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] " - "%d, %d, %d\n", - path, load_count[path][0], load_count[path][1], - load_count[path][2]); - if (load_count[path][0] == 0) - reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; - else if (load_count[path][1 + port] == 0) - reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; - else - reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; - } - - return reset_code; -} - -/** - * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. - * - * @bp: driver handle - */ -void bnx2x_send_unload_done(struct bnx2x *bp) -{ - /* Report UNLOAD_DONE to MCP */ - if (!BP_NOMCP(bp)) - bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); -} - -static inline int bnx2x_func_wait_started(struct bnx2x *bp) -{ - int tout = 50; - int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; - - if (!bp->port.pmf) - return 0; - - /* - * (assumption: No Attention from MCP at this stage) - * PMF probably in the middle of TXdisable/enable transaction - * 1. Sync IRS for default SB - * 2. Sync SP queue - this guarantes us that attention handling started - * 3. Wait, that TXdisable/enable transaction completes - * - * 1+2 guranty that if DCBx attention was scheduled it already changed - * pending bit of transaction from STARTED-->TX_STOPPED, if we alredy - * received complettion for the transaction the state is TX_STOPPED. - * State will return to STARTED after completion of TX_STOPPED-->STARTED - * transaction. - */ - - /* make sure default SB ISR is done */ - if (msix) - synchronize_irq(bp->msix_table[0].vector); - else - synchronize_irq(bp->pdev->irq); - - flush_workqueue(bnx2x_wq); - - while (bnx2x_func_get_state(bp, &bp->func_obj) != - BNX2X_F_STATE_STARTED && tout--) - msleep(20); - - if (bnx2x_func_get_state(bp, &bp->func_obj) != - BNX2X_F_STATE_STARTED) { -#ifdef BNX2X_STOP_ON_ERROR - return -EBUSY; -#else - /* - * Failed to complete the transaction in a "good way" - * Force both transactions with CLR bit - */ - struct bnx2x_func_state_params func_params = {0}; - - DP(BNX2X_MSG_SP, "Hmmm... unexpected function state! " - "Forcing STARTED-->TX_ST0PPED-->STARTED\n"); - - func_params.f_obj = &bp->func_obj; - __set_bit(RAMROD_DRV_CLR_ONLY, - &func_params.ramrod_flags); - - /* STARTED-->TX_ST0PPED */ - func_params.cmd = BNX2X_F_CMD_TX_STOP; - bnx2x_func_state_change(bp, &func_params); - - /* TX_ST0PPED-->STARTED */ - func_params.cmd = BNX2X_F_CMD_TX_START; - return bnx2x_func_state_change(bp, &func_params); -#endif - } - - return 0; -} - -void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) -{ - int port = BP_PORT(bp); - int i, rc = 0; - u8 cos; - struct bnx2x_mcast_ramrod_params rparam = {0}; - u32 reset_code; - - /* Wait until tx fastpath tasks complete */ - for_each_tx_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - - for_each_cos_in_tx_queue(fp, cos) - rc = bnx2x_clean_tx_queue(bp, &fp->txdata[cos]); -#ifdef BNX2X_STOP_ON_ERROR - if (rc) - return; -#endif - } - - /* Give HW time to discard old tx messages */ - usleep_range(1000, 1000); - - /* Clean all ETH MACs */ - rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_ETH_MAC, false); - if (rc < 0) - BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc); - - /* Clean up UC list */ - rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC, - true); - if (rc < 0) - BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: " - "%d\n", rc); - - /* Disable LLH */ - if (!CHIP_IS_E1(bp)) - REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); - - /* Set "drop all" (stop Rx). - * We need to take a netif_addr_lock() here in order to prevent - * a race between the completion code and this code. - */ - netif_addr_lock_bh(bp->dev); - /* Schedule the rx_mode command */ - if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) - set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); - else - bnx2x_set_storm_rx_mode(bp); - - /* Cleanup multicast configuration */ - rparam.mcast_obj = &bp->mcast_obj; - rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); - if (rc < 0) - BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc); - - netif_addr_unlock_bh(bp->dev); - - - - /* - * Send the UNLOAD_REQUEST to the MCP. This will return if - * this function should perform FUNC, PORT or COMMON HW - * reset. - */ - reset_code = bnx2x_send_unload_req(bp, unload_mode); - - /* - * (assumption: No Attention from MCP at this stage) - * PMF probably in the middle of TXdisable/enable transaction - */ - rc = bnx2x_func_wait_started(bp); - if (rc) { - BNX2X_ERR("bnx2x_func_wait_started failed\n"); -#ifdef BNX2X_STOP_ON_ERROR - return; -#endif - } - - /* Close multi and leading connections - * Completions for ramrods are collected in a synchronous way - */ - for_each_queue(bp, i) - if (bnx2x_stop_queue(bp, i)) -#ifdef BNX2X_STOP_ON_ERROR - return; -#else - goto unload_error; -#endif - /* If SP settings didn't get completed so far - something - * very wrong has happen. - */ - if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) - BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n"); - -#ifndef BNX2X_STOP_ON_ERROR -unload_error: -#endif - rc = bnx2x_func_stop(bp); - if (rc) { - BNX2X_ERR("Function stop failed!\n"); -#ifdef BNX2X_STOP_ON_ERROR - return; -#endif - } - - /* Disable HW interrupts, NAPI */ - bnx2x_netif_stop(bp, 1); - - /* Release IRQs */ - bnx2x_free_irq(bp); - - /* Reset the chip */ - rc = bnx2x_reset_hw(bp, reset_code); - if (rc) - BNX2X_ERR("HW_RESET failed\n"); - - - /* Report UNLOAD_DONE to MCP */ - bnx2x_send_unload_done(bp); -} - -void bnx2x_disable_close_the_gate(struct bnx2x *bp) -{ - u32 val; - - DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n"); - - if (CHIP_IS_E1(bp)) { - int port = BP_PORT(bp); - u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : - MISC_REG_AEU_MASK_ATTN_FUNC_0; - - val = REG_RD(bp, addr); - val &= ~(0x300); - REG_WR(bp, addr, val); - } else { - val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK); - val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | - MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); - REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val); - } -} - -/* Close gates #2, #3 and #4: */ -static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) -{ - u32 val; - - /* Gates #2 and #4a are closed/opened for "not E1" only */ - if (!CHIP_IS_E1(bp)) { - /* #4 */ - REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close); - /* #2 */ - REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); - } - - /* #3 */ - if (CHIP_IS_E1x(bp)) { - /* Prevent interrupts from HC on both ports */ - val = REG_RD(bp, HC_REG_CONFIG_1); - REG_WR(bp, HC_REG_CONFIG_1, - (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) : - (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1)); - - val = REG_RD(bp, HC_REG_CONFIG_0); - REG_WR(bp, HC_REG_CONFIG_0, - (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : - (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); - } else { - /* Prevent incomming interrupts in IGU */ - val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); - - REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, - (!close) ? - (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) : - (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); - } - - DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n", - close ? "closing" : "opening"); - mmiowb(); -} - -#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */ - -static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val) -{ - /* Do some magic... */ - u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); - *magic_val = val & SHARED_MF_CLP_MAGIC; - MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); -} - -/** - * bnx2x_clp_reset_done - restore the value of the `magic' bit. - * - * @bp: driver handle - * @magic_val: old value of the `magic' bit. - */ -static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val) -{ - /* Restore the `magic' bit value... */ - u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); - MF_CFG_WR(bp, shared_mf_config.clp_mb, - (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); -} - -/** - * bnx2x_reset_mcp_prep - prepare for MCP reset. - * - * @bp: driver handle - * @magic_val: old value of 'magic' bit. - * - * Takes care of CLP configurations. - */ -static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val) -{ - u32 shmem; - u32 validity_offset; - - DP(NETIF_MSG_HW, "Starting\n"); - - /* Set `magic' bit in order to save MF config */ - if (!CHIP_IS_E1(bp)) - bnx2x_clp_reset_prep(bp, magic_val); - - /* Get shmem offset */ - shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); - validity_offset = offsetof(struct shmem_region, validity_map[0]); - - /* Clear validity map flags */ - if (shmem > 0) - REG_WR(bp, shmem + validity_offset, 0); -} - -#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ -#define MCP_ONE_TIMEOUT 100 /* 100 ms */ - -/** - * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT - * - * @bp: driver handle - */ -static inline void bnx2x_mcp_wait_one(struct bnx2x *bp) -{ - /* special handling for emulation and FPGA, - wait 10 times longer */ - if (CHIP_REV_IS_SLOW(bp)) - msleep(MCP_ONE_TIMEOUT*10); - else - msleep(MCP_ONE_TIMEOUT); -} - -/* - * initializes bp->common.shmem_base and waits for validity signature to appear - */ -static int bnx2x_init_shmem(struct bnx2x *bp) -{ - int cnt = 0; - u32 val = 0; - - do { - bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); - if (bp->common.shmem_base) { - val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); - if (val & SHR_MEM_VALIDITY_MB) - return 0; - } - - bnx2x_mcp_wait_one(bp); - - } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); - - BNX2X_ERR("BAD MCP validity signature\n"); - - return -ENODEV; -} - -static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val) -{ - int rc = bnx2x_init_shmem(bp); - - /* Restore the `magic' bit value */ - if (!CHIP_IS_E1(bp)) - bnx2x_clp_reset_done(bp, magic_val); - - return rc; -} - -static void bnx2x_pxp_prep(struct bnx2x *bp) -{ - if (!CHIP_IS_E1(bp)) { - REG_WR(bp, PXP2_REG_RD_START_INIT, 0); - REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0); - mmiowb(); - } -} - -/* - * Reset the whole chip except for: - * - PCIE core - * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by - * one reset bit) - * - IGU - * - MISC (including AEU) - * - GRC - * - RBCN, RBCP - */ -static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global) -{ - u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; - u32 global_bits2, stay_reset2; - - /* - * Bits that have to be set in reset_mask2 if we want to reset 'global' - * (per chip) blocks. - */ - global_bits2 = - MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | - MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; - - /* Don't reset the following blocks */ - not_reset_mask1 = - MISC_REGISTERS_RESET_REG_1_RST_HC | - MISC_REGISTERS_RESET_REG_1_RST_PXPV | - MISC_REGISTERS_RESET_REG_1_RST_PXP; - - not_reset_mask2 = - MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | - MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | - MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | - MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | - MISC_REGISTERS_RESET_REG_2_RST_RBCN | - MISC_REGISTERS_RESET_REG_2_RST_GRC | - MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | - MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | - MISC_REGISTERS_RESET_REG_2_RST_ATC | - MISC_REGISTERS_RESET_REG_2_PGLC; - - /* - * Keep the following blocks in reset: - * - all xxMACs are handled by the bnx2x_link code. - */ - stay_reset2 = - MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | - MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | - MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | - MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | - MISC_REGISTERS_RESET_REG_2_UMAC0 | - MISC_REGISTERS_RESET_REG_2_UMAC1 | - MISC_REGISTERS_RESET_REG_2_XMAC | - MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; - - /* Full reset masks according to the chip */ - reset_mask1 = 0xffffffff; - - if (CHIP_IS_E1(bp)) - reset_mask2 = 0xffff; - else if (CHIP_IS_E1H(bp)) - reset_mask2 = 0x1ffff; - else if (CHIP_IS_E2(bp)) - reset_mask2 = 0xfffff; - else /* CHIP_IS_E3 */ - reset_mask2 = 0x3ffffff; - - /* Don't reset global blocks unless we need to */ - if (!global) - reset_mask2 &= ~global_bits2; - - /* - * In case of attention in the QM, we need to reset PXP - * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM - * because otherwise QM reset would release 'close the gates' shortly - * before resetting the PXP, then the PSWRQ would send a write - * request to PGLUE. Then when PXP is reset, PGLUE would try to - * read the payload data from PSWWR, but PSWWR would not - * respond. The write queue in PGLUE would stuck, dmae commands - * would not return. Therefore it's important to reset the second - * reset register (containing the - * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the - * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM - * bit). - */ - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, - reset_mask2 & (~not_reset_mask2)); - - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, - reset_mask1 & (~not_reset_mask1)); - - barrier(); - mmiowb(); - - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, - reset_mask2 & (~stay_reset2)); - - barrier(); - mmiowb(); - - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); - mmiowb(); -} - -/** - * bnx2x_er_poll_igu_vq - poll for pending writes bit. - * It should get cleared in no more than 1s. - * - * @bp: driver handle - * - * It should get cleared in no more than 1s. Returns 0 if - * pending writes bit gets cleared. - */ -static int bnx2x_er_poll_igu_vq(struct bnx2x *bp) -{ - u32 cnt = 1000; - u32 pend_bits = 0; - - do { - pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS); - - if (pend_bits == 0) - break; - - usleep_range(1000, 1000); - } while (cnt-- > 0); - - if (cnt <= 0) { - BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n", - pend_bits); - return -EBUSY; - } - - return 0; -} - -static int bnx2x_process_kill(struct bnx2x *bp, bool global) -{ - int cnt = 1000; - u32 val = 0; - u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; - - - /* Empty the Tetris buffer, wait for 1s */ - do { - sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT); - blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT); - port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0); - port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1); - pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2); - if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && - ((port_is_idle_0 & 0x1) == 0x1) && - ((port_is_idle_1 & 0x1) == 0x1) && - (pgl_exp_rom2 == 0xffffffff)) - break; - usleep_range(1000, 1000); - } while (cnt-- > 0); - - if (cnt <= 0) { - DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there" - " are still" - " outstanding read requests after 1s!\n"); - DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x," - " port_is_idle_0=0x%08x," - " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", - sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, - pgl_exp_rom2); - return -EAGAIN; - } - - barrier(); - - /* Close gates #2, #3 and #4 */ - bnx2x_set_234_gates(bp, true); - - /* Poll for IGU VQs for 57712 and newer chips */ - if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp)) - return -EAGAIN; - - - /* TBD: Indicate that "process kill" is in progress to MCP */ - - /* Clear "unprepared" bit */ - REG_WR(bp, MISC_REG_UNPREPARED, 0); - barrier(); - - /* Make sure all is written to the chip before the reset */ - mmiowb(); - - /* Wait for 1ms to empty GLUE and PCI-E core queues, - * PSWHST, GRC and PSWRD Tetris buffer. - */ - usleep_range(1000, 1000); - - /* Prepare to chip reset: */ - /* MCP */ - if (global) - bnx2x_reset_mcp_prep(bp, &val); - - /* PXP */ - bnx2x_pxp_prep(bp); - barrier(); - - /* reset the chip */ - bnx2x_process_kill_chip_reset(bp, global); - barrier(); - - /* Recover after reset: */ - /* MCP */ - if (global && bnx2x_reset_mcp_comp(bp, val)) - return -EAGAIN; - - /* TBD: Add resetting the NO_MCP mode DB here */ - - /* PXP */ - bnx2x_pxp_prep(bp); - - /* Open the gates #2, #3 and #4 */ - bnx2x_set_234_gates(bp, false); - - /* TBD: IGU/AEU preparation bring back the AEU/IGU to a - * reset state, re-enable attentions. */ - - return 0; -} - -int bnx2x_leader_reset(struct bnx2x *bp) -{ - int rc = 0; - bool global = bnx2x_reset_is_global(bp); - - /* Try to recover after the failure */ - if (bnx2x_process_kill(bp, global)) { - netdev_err(bp->dev, "Something bad had happen on engine %d! " - "Aii!\n", BP_PATH(bp)); - rc = -EAGAIN; - goto exit_leader_reset; - } - - /* - * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver - * state. - */ - bnx2x_set_reset_done(bp); - if (global) - bnx2x_clear_reset_global(bp); - -exit_leader_reset: - bp->is_leader = 0; - bnx2x_release_leader_lock(bp); - smp_mb(); - return rc; -} - -static inline void bnx2x_recovery_failed(struct bnx2x *bp) -{ - netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n"); - - /* Disconnect this device */ - netif_device_detach(bp->dev); - - /* - * Block ifup for all function on this engine until "process kill" - * or power cycle. - */ - bnx2x_set_reset_in_progress(bp); - - /* Shut down the power */ - bnx2x_set_power_state(bp, PCI_D3hot); - - bp->recovery_state = BNX2X_RECOVERY_FAILED; - - smp_mb(); -} - -/* - * Assumption: runs under rtnl lock. This together with the fact - * that it's called only from bnx2x_sp_rtnl() ensure that it - * will never be called when netif_running(bp->dev) is false. - */ -static void bnx2x_parity_recover(struct bnx2x *bp) -{ - bool global = false; - - DP(NETIF_MSG_HW, "Handling parity\n"); - while (1) { - switch (bp->recovery_state) { - case BNX2X_RECOVERY_INIT: - DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n"); - bnx2x_chk_parity_attn(bp, &global, false); - - /* Try to get a LEADER_LOCK HW lock */ - if (bnx2x_trylock_leader_lock(bp)) { - bnx2x_set_reset_in_progress(bp); - /* - * Check if there is a global attention and if - * there was a global attention, set the global - * reset bit. - */ - - if (global) - bnx2x_set_reset_global(bp); - - bp->is_leader = 1; - } - - /* Stop the driver */ - /* If interface has been removed - break */ - if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY)) - return; - - bp->recovery_state = BNX2X_RECOVERY_WAIT; - - /* - * Reset MCP command sequence number and MCP mail box - * sequence as we are going to reset the MCP. - */ - if (global) { - bp->fw_seq = 0; - bp->fw_drv_pulse_wr_seq = 0; - } - - /* Ensure "is_leader", MCP command sequence and - * "recovery_state" update values are seen on other - * CPUs. - */ - smp_mb(); - break; - - case BNX2X_RECOVERY_WAIT: - DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n"); - if (bp->is_leader) { - int other_engine = BP_PATH(bp) ? 0 : 1; - u32 other_load_counter = - bnx2x_get_load_cnt(bp, other_engine); - u32 load_counter = - bnx2x_get_load_cnt(bp, BP_PATH(bp)); - global = bnx2x_reset_is_global(bp); - - /* - * In case of a parity in a global block, let - * the first leader that performs a - * leader_reset() reset the global blocks in - * order to clear global attentions. Otherwise - * the the gates will remain closed for that - * engine. - */ - if (load_counter || - (global && other_load_counter)) { - /* Wait until all other functions get - * down. - */ - schedule_delayed_work(&bp->sp_rtnl_task, - HZ/10); - return; - } else { - /* If all other functions got down - - * try to bring the chip back to - * normal. In any case it's an exit - * point for a leader. - */ - if (bnx2x_leader_reset(bp)) { - bnx2x_recovery_failed(bp); - return; - } - - /* If we are here, means that the - * leader has succeeded and doesn't - * want to be a leader any more. Try - * to continue as a none-leader. - */ - break; - } - } else { /* non-leader */ - if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) { - /* Try to get a LEADER_LOCK HW lock as - * long as a former leader may have - * been unloaded by the user or - * released a leadership by another - * reason. - */ - if (bnx2x_trylock_leader_lock(bp)) { - /* I'm a leader now! Restart a - * switch case. - */ - bp->is_leader = 1; - break; - } - - schedule_delayed_work(&bp->sp_rtnl_task, - HZ/10); - return; - - } else { - /* - * If there was a global attention, wait - * for it to be cleared. - */ - if (bnx2x_reset_is_global(bp)) { - schedule_delayed_work( - &bp->sp_rtnl_task, - HZ/10); - return; - } - - if (bnx2x_nic_load(bp, LOAD_NORMAL)) - bnx2x_recovery_failed(bp); - else { - bp->recovery_state = - BNX2X_RECOVERY_DONE; - smp_mb(); - } - - return; - } - } - default: - return; - } - } -} - -/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is - * scheduled on a general queue in order to prevent a dead lock. - */ -static void bnx2x_sp_rtnl_task(struct work_struct *work) -{ - struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work); - - rtnl_lock(); - - if (!netif_running(bp->dev)) - goto sp_rtnl_exit; - - /* if stop on error is defined no recovery flows should be executed */ -#ifdef BNX2X_STOP_ON_ERROR - BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined " - "so reset not done to allow debug dump,\n" - "you will need to reboot when done\n"); - goto sp_rtnl_not_reset; -#endif - - if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) { - /* - * Clear all pending SP commands as we are going to reset the - * function anyway. - */ - bp->sp_rtnl_state = 0; - smp_mb(); - - bnx2x_parity_recover(bp); - - goto sp_rtnl_exit; - } - - if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) { - /* - * Clear all pending SP commands as we are going to reset the - * function anyway. - */ - bp->sp_rtnl_state = 0; - smp_mb(); - - bnx2x_nic_unload(bp, UNLOAD_NORMAL); - bnx2x_nic_load(bp, LOAD_NORMAL); - - goto sp_rtnl_exit; - } -#ifdef BNX2X_STOP_ON_ERROR -sp_rtnl_not_reset: -#endif - if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) - bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos); - -sp_rtnl_exit: - rtnl_unlock(); -} - -/* end of nic load/unload */ - -static void bnx2x_period_task(struct work_struct *work) -{ - struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work); - - if (!netif_running(bp->dev)) - goto period_task_exit; - - if (CHIP_REV_IS_SLOW(bp)) { - BNX2X_ERR("period task called on emulation, ignoring\n"); - goto period_task_exit; - } - - bnx2x_acquire_phy_lock(bp); - /* - * The barrier is needed to ensure the ordering between the writing to - * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and - * the reading here. - */ - smp_mb(); - if (bp->port.pmf) { - bnx2x_period_func(&bp->link_params, &bp->link_vars); - - /* Re-queue task in 1 sec */ - queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ); - } - - bnx2x_release_phy_lock(bp); -period_task_exit: - return; -} - -/* - * Init service functions - */ - -static u32 bnx2x_get_pretend_reg(struct bnx2x *bp) -{ - u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0; - u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base; - return base + (BP_ABS_FUNC(bp)) * stride; -} - -static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp) -{ - u32 reg = bnx2x_get_pretend_reg(bp); - - /* Flush all outstanding writes */ - mmiowb(); - - /* Pretend to be function 0 */ - REG_WR(bp, reg, 0); - REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */ - - /* From now we are in the "like-E1" mode */ - bnx2x_int_disable(bp); - - /* Flush all outstanding writes */ - mmiowb(); - - /* Restore the original function */ - REG_WR(bp, reg, BP_ABS_FUNC(bp)); - REG_RD(bp, reg); -} - -static inline void bnx2x_undi_int_disable(struct bnx2x *bp) -{ - if (CHIP_IS_E1(bp)) - bnx2x_int_disable(bp); - else - bnx2x_undi_int_disable_e1h(bp); -} - -static void __devinit bnx2x_undi_unload(struct bnx2x *bp) -{ - u32 val; - - /* Check if there is any driver already loaded */ - val = REG_RD(bp, MISC_REG_UNPREPARED); - if (val == 0x1) { - /* Check if it is the UNDI driver - * UNDI driver initializes CID offset for normal bell to 0x7 - */ - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); - val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); - if (val == 0x7) { - u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; - /* save our pf_num */ - int orig_pf_num = bp->pf_num; - int port; - u32 swap_en, swap_val, value; - - /* clear the UNDI indication */ - REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); - - BNX2X_DEV_INFO("UNDI is active! reset device\n"); - - /* try unload UNDI on port 0 */ - bp->pf_num = 0; - bp->fw_seq = - (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) & - DRV_MSG_SEQ_NUMBER_MASK); - reset_code = bnx2x_fw_command(bp, reset_code, 0); - - /* if UNDI is loaded on the other port */ - if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) { - - /* send "DONE" for previous unload */ - bnx2x_fw_command(bp, - DRV_MSG_CODE_UNLOAD_DONE, 0); - - /* unload UNDI on port 1 */ - bp->pf_num = 1; - bp->fw_seq = - (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) & - DRV_MSG_SEQ_NUMBER_MASK); - reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; - - bnx2x_fw_command(bp, reset_code, 0); - } - - /* now it's safe to release the lock */ - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); - - bnx2x_undi_int_disable(bp); - port = BP_PORT(bp); - - /* close input traffic and wait for it */ - /* Do not rcv packets to BRB */ - REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_DRV_MASK : - NIG_REG_LLH0_BRB1_DRV_MASK), 0x0); - /* Do not direct rcv packets that are not for MCP to - * the BRB */ - REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : - NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); - /* clear AEU */ - REG_WR(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : - MISC_REG_AEU_MASK_ATTN_FUNC_0), 0); - msleep(10); - - /* save NIG port swap info */ - swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); - swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); - /* reset device */ - REG_WR(bp, - GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, - 0xd3ffffff); - - value = 0x1400; - if (CHIP_IS_E3(bp)) { - value |= MISC_REGISTERS_RESET_REG_2_MSTAT0; - value |= MISC_REGISTERS_RESET_REG_2_MSTAT1; - } - - REG_WR(bp, - GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, - value); - - /* take the NIG out of reset and restore swap values */ - REG_WR(bp, - GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, - MISC_REGISTERS_RESET_REG_1_RST_NIG); - REG_WR(bp, NIG_REG_PORT_SWAP, swap_val); - REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en); - - /* send unload done to the MCP */ - bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); - - /* restore our func and fw_seq */ - bp->pf_num = orig_pf_num; - bp->fw_seq = - (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) & - DRV_MSG_SEQ_NUMBER_MASK); - } else - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); - } -} - -static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) -{ - u32 val, val2, val3, val4, id; - u16 pmc; - - /* Get the chip revision id and number. */ - /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ - val = REG_RD(bp, MISC_REG_CHIP_NUM); - id = ((val & 0xffff) << 16); - val = REG_RD(bp, MISC_REG_CHIP_REV); - id |= ((val & 0xf) << 12); - val = REG_RD(bp, MISC_REG_CHIP_METAL); - id |= ((val & 0xff) << 4); - val = REG_RD(bp, MISC_REG_BOND_ID); - id |= (val & 0xf); - bp->common.chip_id = id; - - /* Set doorbell size */ - bp->db_size = (1 << BNX2X_DB_SHIFT); - - if (!CHIP_IS_E1x(bp)) { - val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); - if ((val & 1) == 0) - val = REG_RD(bp, MISC_REG_PORT4MODE_EN); - else - val = (val >> 1) & 1; - BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" : - "2_PORT_MODE"); - bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE : - CHIP_2_PORT_MODE; - - if (CHIP_MODE_IS_4_PORT(bp)) - bp->pfid = (bp->pf_num >> 1); /* 0..3 */ - else - bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */ - } else { - bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */ - bp->pfid = bp->pf_num; /* 0..7 */ - } - - bp->link_params.chip_id = bp->common.chip_id; - BNX2X_DEV_INFO("chip ID is 0x%x\n", id); - - val = (REG_RD(bp, 0x2874) & 0x55); - if ((bp->common.chip_id & 0x1) || - (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) { - bp->flags |= ONE_PORT_FLAG; - BNX2X_DEV_INFO("single port device\n"); - } - - val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4); - bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE << - (val & MCPR_NVM_CFG4_FLASH_SIZE)); - BNX2X_DEV_INFO("flash_size 0x%x (%d)\n", - bp->common.flash_size, bp->common.flash_size); - - bnx2x_init_shmem(bp); - - - - bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ? - MISC_REG_GENERIC_CR_1 : - MISC_REG_GENERIC_CR_0)); - - bp->link_params.shmem_base = bp->common.shmem_base; - bp->link_params.shmem2_base = bp->common.shmem2_base; - BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n", - bp->common.shmem_base, bp->common.shmem2_base); - - if (!bp->common.shmem_base) { - BNX2X_DEV_INFO("MCP not active\n"); - bp->flags |= NO_MCP_FLAG; - return; - } - - bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); - BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config); - - bp->link_params.hw_led_mode = ((bp->common.hw_config & - SHARED_HW_CFG_LED_MODE_MASK) >> - SHARED_HW_CFG_LED_MODE_SHIFT); - - bp->link_params.feature_config_flags = 0; - val = SHMEM_RD(bp, dev_info.shared_feature_config.config); - if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) - bp->link_params.feature_config_flags |= - FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; - else - bp->link_params.feature_config_flags &= - ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; - - val = SHMEM_RD(bp, dev_info.bc_rev) >> 8; - bp->common.bc_ver = val; - BNX2X_DEV_INFO("bc_ver %X\n", val); - if (val < BNX2X_BC_VER) { - /* for now only warn - * later we might need to enforce this */ - BNX2X_ERR("This driver needs bc_ver %X but found %X, " - "please upgrade BC\n", BNX2X_BC_VER, val); - } - bp->link_params.feature_config_flags |= - (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ? - FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0; - - bp->link_params.feature_config_flags |= - (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ? - FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0; - - bp->link_params.feature_config_flags |= - (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ? - FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0; - - pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc); - bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; - - BNX2X_DEV_INFO("%sWoL capable\n", - (bp->flags & NO_WOL_FLAG) ? "not " : ""); - - val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num); - val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]); - val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]); - val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]); - - dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n", - val, val2, val3, val4); -} - -#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) -#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) - -static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) -{ - int pfid = BP_FUNC(bp); - int vn = BP_E1HVN(bp); - int igu_sb_id; - u32 val; - u8 fid, igu_sb_cnt = 0; - - bp->igu_base_sb = 0xff; - if (CHIP_INT_MODE_IS_BC(bp)) { - igu_sb_cnt = bp->igu_sb_cnt; - bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * - FP_SB_MAX_E1x; - - bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x + - (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn); - - return; - } - - /* IGU in normal mode - read CAM */ - for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; - igu_sb_id++) { - val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); - if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) - continue; - fid = IGU_FID(val); - if ((fid & IGU_FID_ENCODE_IS_PF)) { - if ((fid & IGU_FID_PF_NUM_MASK) != pfid) - continue; - if (IGU_VEC(val) == 0) - /* default status block */ - bp->igu_dsb_id = igu_sb_id; - else { - if (bp->igu_base_sb == 0xff) - bp->igu_base_sb = igu_sb_id; - igu_sb_cnt++; - } - } - } - -#ifdef CONFIG_PCI_MSI - /* - * It's expected that number of CAM entries for this functions is equal - * to the number evaluated based on the MSI-X table size. We want a - * harsh warning if these values are different! - */ - WARN_ON(bp->igu_sb_cnt != igu_sb_cnt); -#endif - - if (igu_sb_cnt == 0) - BNX2X_ERR("CAM configuration error\n"); -} - -static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp, - u32 switch_cfg) -{ - int cfg_size = 0, idx, port = BP_PORT(bp); - - /* Aggregation of supported attributes of all external phys */ - bp->port.supported[0] = 0; - bp->port.supported[1] = 0; - switch (bp->link_params.num_phys) { - case 1: - bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported; - cfg_size = 1; - break; - case 2: - bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported; - cfg_size = 1; - break; - case 3: - if (bp->link_params.multi_phy_config & - PORT_HW_CFG_PHY_SWAPPED_ENABLED) { - bp->port.supported[1] = - bp->link_params.phy[EXT_PHY1].supported; - bp->port.supported[0] = - bp->link_params.phy[EXT_PHY2].supported; - } else { - bp->port.supported[0] = - bp->link_params.phy[EXT_PHY1].supported; - bp->port.supported[1] = - bp->link_params.phy[EXT_PHY2].supported; - } - cfg_size = 2; - break; - } - - if (!(bp->port.supported[0] || bp->port.supported[1])) { - BNX2X_ERR("NVRAM config error. BAD phy config." - "PHY1 config 0x%x, PHY2 config 0x%x\n", - SHMEM_RD(bp, - dev_info.port_hw_config[port].external_phy_config), - SHMEM_RD(bp, - dev_info.port_hw_config[port].external_phy_config2)); - return; - } - - if (CHIP_IS_E3(bp)) - bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR); - else { - switch (switch_cfg) { - case SWITCH_CFG_1G: - bp->port.phy_addr = REG_RD( - bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); - break; - case SWITCH_CFG_10G: - bp->port.phy_addr = REG_RD( - bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); - break; - default: - BNX2X_ERR("BAD switch_cfg link_config 0x%x\n", - bp->port.link_config[0]); - return; - } - } - BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); - /* mask what we support according to speed_cap_mask per configuration */ - for (idx = 0; idx < cfg_size; idx++) { - if (!(bp->link_params.speed_cap_mask[idx] & - PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) - bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half; - - if (!(bp->link_params.speed_cap_mask[idx] & - PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) - bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full; - - if (!(bp->link_params.speed_cap_mask[idx] & - PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) - bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half; - - if (!(bp->link_params.speed_cap_mask[idx] & - PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) - bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full; - - if (!(bp->link_params.speed_cap_mask[idx] & - PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) - bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full); - - if (!(bp->link_params.speed_cap_mask[idx] & - PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) - bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full; - - if (!(bp->link_params.speed_cap_mask[idx] & - PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) - bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full; - - } - - BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0], - bp->port.supported[1]); -} - -static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) -{ - u32 link_config, idx, cfg_size = 0; - bp->port.advertising[0] = 0; - bp->port.advertising[1] = 0; - switch (bp->link_params.num_phys) { - case 1: - case 2: - cfg_size = 1; - break; - case 3: - cfg_size = 2; - break; - } - for (idx = 0; idx < cfg_size; idx++) { - bp->link_params.req_duplex[idx] = DUPLEX_FULL; - link_config = bp->port.link_config[idx]; - switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { - case PORT_FEATURE_LINK_SPEED_AUTO: - if (bp->port.supported[idx] & SUPPORTED_Autoneg) { - bp->link_params.req_line_speed[idx] = - SPEED_AUTO_NEG; - bp->port.advertising[idx] |= - bp->port.supported[idx]; - } else { - /* force 10G, no AN */ - bp->link_params.req_line_speed[idx] = - SPEED_10000; - bp->port.advertising[idx] |= - (ADVERTISED_10000baseT_Full | - ADVERTISED_FIBRE); - continue; - } - break; - - case PORT_FEATURE_LINK_SPEED_10M_FULL: - if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) { - bp->link_params.req_line_speed[idx] = - SPEED_10; - bp->port.advertising[idx] |= - (ADVERTISED_10baseT_Full | - ADVERTISED_TP); - } else { - BNX2X_ERR("NVRAM config error. " - "Invalid link_config 0x%x" - " speed_cap_mask 0x%x\n", - link_config, - bp->link_params.speed_cap_mask[idx]); - return; - } - break; - - case PORT_FEATURE_LINK_SPEED_10M_HALF: - if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) { - bp->link_params.req_line_speed[idx] = - SPEED_10; - bp->link_params.req_duplex[idx] = - DUPLEX_HALF; - bp->port.advertising[idx] |= - (ADVERTISED_10baseT_Half | - ADVERTISED_TP); - } else { - BNX2X_ERR("NVRAM config error. " - "Invalid link_config 0x%x" - " speed_cap_mask 0x%x\n", - link_config, - bp->link_params.speed_cap_mask[idx]); - return; - } - break; - - case PORT_FEATURE_LINK_SPEED_100M_FULL: - if (bp->port.supported[idx] & - SUPPORTED_100baseT_Full) { - bp->link_params.req_line_speed[idx] = - SPEED_100; - bp->port.advertising[idx] |= - (ADVERTISED_100baseT_Full | - ADVERTISED_TP); - } else { - BNX2X_ERR("NVRAM config error. " - "Invalid link_config 0x%x" - " speed_cap_mask 0x%x\n", - link_config, - bp->link_params.speed_cap_mask[idx]); - return; - } - break; - - case PORT_FEATURE_LINK_SPEED_100M_HALF: - if (bp->port.supported[idx] & - SUPPORTED_100baseT_Half) { - bp->link_params.req_line_speed[idx] = - SPEED_100; - bp->link_params.req_duplex[idx] = - DUPLEX_HALF; - bp->port.advertising[idx] |= - (ADVERTISED_100baseT_Half | - ADVERTISED_TP); - } else { - BNX2X_ERR("NVRAM config error. " - "Invalid link_config 0x%x" - " speed_cap_mask 0x%x\n", - link_config, - bp->link_params.speed_cap_mask[idx]); - return; - } - break; - - case PORT_FEATURE_LINK_SPEED_1G: - if (bp->port.supported[idx] & - SUPPORTED_1000baseT_Full) { - bp->link_params.req_line_speed[idx] = - SPEED_1000; - bp->port.advertising[idx] |= - (ADVERTISED_1000baseT_Full | - ADVERTISED_TP); - } else { - BNX2X_ERR("NVRAM config error. " - "Invalid link_config 0x%x" - " speed_cap_mask 0x%x\n", - link_config, - bp->link_params.speed_cap_mask[idx]); - return; - } - break; - - case PORT_FEATURE_LINK_SPEED_2_5G: - if (bp->port.supported[idx] & - SUPPORTED_2500baseX_Full) { - bp->link_params.req_line_speed[idx] = - SPEED_2500; - bp->port.advertising[idx] |= - (ADVERTISED_2500baseX_Full | - ADVERTISED_TP); - } else { - BNX2X_ERR("NVRAM config error. " - "Invalid link_config 0x%x" - " speed_cap_mask 0x%x\n", - link_config, - bp->link_params.speed_cap_mask[idx]); - return; - } - break; - - case PORT_FEATURE_LINK_SPEED_10G_CX4: - if (bp->port.supported[idx] & - SUPPORTED_10000baseT_Full) { - bp->link_params.req_line_speed[idx] = - SPEED_10000; - bp->port.advertising[idx] |= - (ADVERTISED_10000baseT_Full | - ADVERTISED_FIBRE); - } else { - BNX2X_ERR("NVRAM config error. " - "Invalid link_config 0x%x" - " speed_cap_mask 0x%x\n", - link_config, - bp->link_params.speed_cap_mask[idx]); - return; - } - break; - case PORT_FEATURE_LINK_SPEED_20G: - bp->link_params.req_line_speed[idx] = SPEED_20000; - - break; - default: - BNX2X_ERR("NVRAM config error. " - "BAD link speed link_config 0x%x\n", - link_config); - bp->link_params.req_line_speed[idx] = - SPEED_AUTO_NEG; - bp->port.advertising[idx] = - bp->port.supported[idx]; - break; - } - - bp->link_params.req_flow_ctrl[idx] = (link_config & - PORT_FEATURE_FLOW_CONTROL_MASK); - if ((bp->link_params.req_flow_ctrl[idx] == - BNX2X_FLOW_CTRL_AUTO) && - !(bp->port.supported[idx] & SUPPORTED_Autoneg)) { - bp->link_params.req_flow_ctrl[idx] = - BNX2X_FLOW_CTRL_NONE; - } - - BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl" - " 0x%x advertising 0x%x\n", - bp->link_params.req_line_speed[idx], - bp->link_params.req_duplex[idx], - bp->link_params.req_flow_ctrl[idx], - bp->port.advertising[idx]); - } -} - -static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi) -{ - mac_hi = cpu_to_be16(mac_hi); - mac_lo = cpu_to_be32(mac_lo); - memcpy(mac_buf, &mac_hi, sizeof(mac_hi)); - memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo)); -} - -static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) -{ - int port = BP_PORT(bp); - u32 config; - u32 ext_phy_type, ext_phy_config; - - bp->link_params.bp = bp; - bp->link_params.port = port; - - bp->link_params.lane_config = - SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config); - - bp->link_params.speed_cap_mask[0] = - SHMEM_RD(bp, - dev_info.port_hw_config[port].speed_capability_mask); - bp->link_params.speed_cap_mask[1] = - SHMEM_RD(bp, - dev_info.port_hw_config[port].speed_capability_mask2); - bp->port.link_config[0] = - SHMEM_RD(bp, dev_info.port_feature_config[port].link_config); - - bp->port.link_config[1] = - SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2); - - bp->link_params.multi_phy_config = - SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config); - /* If the device is capable of WoL, set the default state according - * to the HW - */ - config = SHMEM_RD(bp, dev_info.port_feature_config[port].config); - bp->wol = (!(bp->flags & NO_WOL_FLAG) && - (config & PORT_FEATURE_WOL_ENABLED)); - - BNX2X_DEV_INFO("lane_config 0x%08x " - "speed_cap_mask0 0x%08x link_config0 0x%08x\n", - bp->link_params.lane_config, - bp->link_params.speed_cap_mask[0], - bp->port.link_config[0]); - - bp->link_params.switch_cfg = (bp->port.link_config[0] & - PORT_FEATURE_CONNECTED_SWITCH_MASK); - bnx2x_phy_probe(&bp->link_params); - bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg); - - bnx2x_link_settings_requested(bp); - - /* - * If connected directly, work with the internal PHY, otherwise, work - * with the external PHY - */ - ext_phy_config = - SHMEM_RD(bp, - dev_info.port_hw_config[port].external_phy_config); - ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config); - if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) - bp->mdio.prtad = bp->port.phy_addr; - - else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) && - (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) - bp->mdio.prtad = - XGXS_EXT_PHY_ADDR(ext_phy_config); - - /* - * Check if hw lock is required to access MDC/MDIO bus to the PHY(s) - * In MF mode, it is set to cover self test cases - */ - if (IS_MF(bp)) - bp->port.need_hw_lock = 1; - else - bp->port.need_hw_lock = bnx2x_hw_lock_required(bp, - bp->common.shmem_base, - bp->common.shmem2_base); -} - -#ifdef BCM_CNIC -static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp) -{ - int port = BP_PORT(bp); - int func = BP_ABS_FUNC(bp); - - u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, - drv_lic_key[port].max_iscsi_conn); - u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, - drv_lic_key[port].max_fcoe_conn); - - /* Get the number of maximum allowed iSCSI and FCoE connections */ - bp->cnic_eth_dev.max_iscsi_conn = - (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >> - BNX2X_MAX_ISCSI_INIT_CONN_SHIFT; - - bp->cnic_eth_dev.max_fcoe_conn = - (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >> - BNX2X_MAX_FCOE_INIT_CONN_SHIFT; - - /* Read the WWN: */ - if (!IS_MF(bp)) { - /* Port info */ - bp->cnic_eth_dev.fcoe_wwn_port_name_hi = - SHMEM_RD(bp, - dev_info.port_hw_config[port]. - fcoe_wwn_port_name_upper); - bp->cnic_eth_dev.fcoe_wwn_port_name_lo = - SHMEM_RD(bp, - dev_info.port_hw_config[port]. - fcoe_wwn_port_name_lower); - - /* Node info */ - bp->cnic_eth_dev.fcoe_wwn_node_name_hi = - SHMEM_RD(bp, - dev_info.port_hw_config[port]. - fcoe_wwn_node_name_upper); - bp->cnic_eth_dev.fcoe_wwn_node_name_lo = - SHMEM_RD(bp, - dev_info.port_hw_config[port]. - fcoe_wwn_node_name_lower); - } else if (!IS_MF_SD(bp)) { - u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); - - /* - * Read the WWN info only if the FCoE feature is enabled for - * this function. - */ - if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { - /* Port info */ - bp->cnic_eth_dev.fcoe_wwn_port_name_hi = - MF_CFG_RD(bp, func_ext_config[func]. - fcoe_wwn_port_name_upper); - bp->cnic_eth_dev.fcoe_wwn_port_name_lo = - MF_CFG_RD(bp, func_ext_config[func]. - fcoe_wwn_port_name_lower); - - /* Node info */ - bp->cnic_eth_dev.fcoe_wwn_node_name_hi = - MF_CFG_RD(bp, func_ext_config[func]. - fcoe_wwn_node_name_upper); - bp->cnic_eth_dev.fcoe_wwn_node_name_lo = - MF_CFG_RD(bp, func_ext_config[func]. - fcoe_wwn_node_name_lower); - } - } - - BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n", - bp->cnic_eth_dev.max_iscsi_conn, - bp->cnic_eth_dev.max_fcoe_conn); - - /* - * If maximum allowed number of connections is zero - - * disable the feature. - */ - if (!bp->cnic_eth_dev.max_iscsi_conn) - bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; - - if (!bp->cnic_eth_dev.max_fcoe_conn) - bp->flags |= NO_FCOE_FLAG; -} -#endif - -static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) -{ - u32 val, val2; - int func = BP_ABS_FUNC(bp); - int port = BP_PORT(bp); -#ifdef BCM_CNIC - u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac; - u8 *fip_mac = bp->fip_mac; -#endif - - /* Zero primary MAC configuration */ - memset(bp->dev->dev_addr, 0, ETH_ALEN); - - if (BP_NOMCP(bp)) { - BNX2X_ERROR("warning: random MAC workaround active\n"); - random_ether_addr(bp->dev->dev_addr); - } else if (IS_MF(bp)) { - val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper); - val = MF_CFG_RD(bp, func_mf_config[func].mac_lower); - if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) && - (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) - bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); - -#ifdef BCM_CNIC - /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or - * FCoE MAC then the appropriate feature should be disabled. - */ - if (IS_MF_SI(bp)) { - u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); - if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { - val2 = MF_CFG_RD(bp, func_ext_config[func]. - iscsi_mac_addr_upper); - val = MF_CFG_RD(bp, func_ext_config[func]. - iscsi_mac_addr_lower); - bnx2x_set_mac_buf(iscsi_mac, val, val2); - BNX2X_DEV_INFO("Read iSCSI MAC: " - BNX2X_MAC_FMT"\n", - BNX2X_MAC_PRN_LIST(iscsi_mac)); - } else - bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; - - if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { - val2 = MF_CFG_RD(bp, func_ext_config[func]. - fcoe_mac_addr_upper); - val = MF_CFG_RD(bp, func_ext_config[func]. - fcoe_mac_addr_lower); - bnx2x_set_mac_buf(fip_mac, val, val2); - BNX2X_DEV_INFO("Read FCoE L2 MAC to " - BNX2X_MAC_FMT"\n", - BNX2X_MAC_PRN_LIST(fip_mac)); - - } else - bp->flags |= NO_FCOE_FLAG; - } -#endif - } else { - /* in SF read MACs from port configuration */ - val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); - val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); - bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); - -#ifdef BCM_CNIC - val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. - iscsi_mac_upper); - val = SHMEM_RD(bp, dev_info.port_hw_config[port]. - iscsi_mac_lower); - bnx2x_set_mac_buf(iscsi_mac, val, val2); - - val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. - fcoe_fip_mac_upper); - val = SHMEM_RD(bp, dev_info.port_hw_config[port]. - fcoe_fip_mac_lower); - bnx2x_set_mac_buf(fip_mac, val, val2); -#endif - } - - memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); - memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); - -#ifdef BCM_CNIC - /* Set the FCoE MAC in MF_SD mode */ - if (!CHIP_IS_E1x(bp) && IS_MF_SD(bp)) - memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN); - - /* Disable iSCSI if MAC configuration is - * invalid. - */ - if (!is_valid_ether_addr(iscsi_mac)) { - bp->flags |= NO_ISCSI_FLAG; - memset(iscsi_mac, 0, ETH_ALEN); - } - - /* Disable FCoE if MAC configuration is - * invalid. - */ - if (!is_valid_ether_addr(fip_mac)) { - bp->flags |= NO_FCOE_FLAG; - memset(bp->fip_mac, 0, ETH_ALEN); - } -#endif - - if (!is_valid_ether_addr(bp->dev->dev_addr)) - dev_err(&bp->pdev->dev, - "bad Ethernet MAC address configuration: " - BNX2X_MAC_FMT", change it manually before bringing up " - "the appropriate network interface\n", - BNX2X_MAC_PRN_LIST(bp->dev->dev_addr)); -} - -static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) -{ - int /*abs*/func = BP_ABS_FUNC(bp); - int vn; - u32 val = 0; - int rc = 0; - - bnx2x_get_common_hwinfo(bp); - - /* - * initialize IGU parameters - */ - if (CHIP_IS_E1x(bp)) { - bp->common.int_block = INT_BLOCK_HC; - - bp->igu_dsb_id = DEF_SB_IGU_ID; - bp->igu_base_sb = 0; - } else { - bp->common.int_block = INT_BLOCK_IGU; - val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); - - if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { - int tout = 5000; - - BNX2X_DEV_INFO("FORCING Normal Mode\n"); - - val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); - REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val); - REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f); - - while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) { - tout--; - usleep_range(1000, 1000); - } - - if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) { - dev_err(&bp->pdev->dev, - "FORCING Normal Mode failed!!!\n"); - return -EPERM; - } - } - - if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { - BNX2X_DEV_INFO("IGU Backward Compatible Mode\n"); - bp->common.int_block |= INT_BLOCK_MODE_BW_COMP; - } else - BNX2X_DEV_INFO("IGU Normal Mode\n"); - - bnx2x_get_igu_cam_info(bp); - - } - - /* - * set base FW non-default (fast path) status block id, this value is - * used to initialize the fw_sb_id saved on the fp/queue structure to - * determine the id used by the FW. - */ - if (CHIP_IS_E1x(bp)) - bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp); - else /* - * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of - * the same queue are indicated on the same IGU SB). So we prefer - * FW and IGU SBs to be the same value. - */ - bp->base_fw_ndsb = bp->igu_base_sb; - - BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n" - "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb, - bp->igu_sb_cnt, bp->base_fw_ndsb); - - /* - * Initialize MF configuration - */ - - bp->mf_ov = 0; - bp->mf_mode = 0; - vn = BP_E1HVN(bp); - - if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { - BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", - bp->common.shmem2_base, SHMEM2_RD(bp, size), - (u32)offsetof(struct shmem2_region, mf_cfg_addr)); - - if (SHMEM2_HAS(bp, mf_cfg_addr)) - bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr); - else - bp->common.mf_cfg_base = bp->common.shmem_base + - offsetof(struct shmem_region, func_mb) + - E1H_FUNC_MAX * sizeof(struct drv_func_mb); - /* - * get mf configuration: - * 1. existence of MF configuration - * 2. MAC address must be legal (check only upper bytes) - * for Switch-Independent mode; - * OVLAN must be legal for Switch-Dependent mode - * 3. SF_MODE configures specific MF mode - */ - if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { - /* get mf configuration */ - val = SHMEM_RD(bp, - dev_info.shared_feature_config.config); - val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK; - - switch (val) { - case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: - val = MF_CFG_RD(bp, func_mf_config[func]. - mac_upper); - /* check for legal mac (upper bytes)*/ - if (val != 0xffff) { - bp->mf_mode = MULTI_FUNCTION_SI; - bp->mf_config[vn] = MF_CFG_RD(bp, - func_mf_config[func].config); - } else - BNX2X_DEV_INFO("illegal MAC address " - "for SI\n"); - break; - case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: - /* get OV configuration */ - val = MF_CFG_RD(bp, - func_mf_config[FUNC_0].e1hov_tag); - val &= FUNC_MF_CFG_E1HOV_TAG_MASK; - - if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { - bp->mf_mode = MULTI_FUNCTION_SD; - bp->mf_config[vn] = MF_CFG_RD(bp, - func_mf_config[func].config); - } else - BNX2X_DEV_INFO("illegal OV for SD\n"); - break; - default: - /* Unknown configuration: reset mf_config */ - bp->mf_config[vn] = 0; - BNX2X_DEV_INFO("unkown MF mode 0x%x\n", val); - } - } - - BNX2X_DEV_INFO("%s function mode\n", - IS_MF(bp) ? "multi" : "single"); - - switch (bp->mf_mode) { - case MULTI_FUNCTION_SD: - val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & - FUNC_MF_CFG_E1HOV_TAG_MASK; - if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { - bp->mf_ov = val; - bp->path_has_ovlan = true; - - BNX2X_DEV_INFO("MF OV for func %d is %d " - "(0x%04x)\n", func, bp->mf_ov, - bp->mf_ov); - } else { - dev_err(&bp->pdev->dev, - "No valid MF OV for func %d, " - "aborting\n", func); - return -EPERM; - } - break; - case MULTI_FUNCTION_SI: - BNX2X_DEV_INFO("func %d is in MF " - "switch-independent mode\n", func); - break; - default: - if (vn) { - dev_err(&bp->pdev->dev, - "VN %d is in a single function mode, " - "aborting\n", vn); - return -EPERM; - } - break; - } - - /* check if other port on the path needs ovlan: - * Since MF configuration is shared between ports - * Possible mixed modes are only - * {SF, SI} {SF, SD} {SD, SF} {SI, SF} - */ - if (CHIP_MODE_IS_4_PORT(bp) && - !bp->path_has_ovlan && - !IS_MF(bp) && - bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { - u8 other_port = !BP_PORT(bp); - u8 other_func = BP_PATH(bp) + 2*other_port; - val = MF_CFG_RD(bp, - func_mf_config[other_func].e1hov_tag); - if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) - bp->path_has_ovlan = true; - } - } - - /* adjust igu_sb_cnt to MF for E1x */ - if (CHIP_IS_E1x(bp) && IS_MF(bp)) - bp->igu_sb_cnt /= E1HVN_MAX; - - /* port info */ - bnx2x_get_port_hwinfo(bp); - - if (!BP_NOMCP(bp)) { - bp->fw_seq = - (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & - DRV_MSG_SEQ_NUMBER_MASK); - BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); - } - - /* Get MAC addresses */ - bnx2x_get_mac_hwinfo(bp); - -#ifdef BCM_CNIC - bnx2x_get_cnic_info(bp); -#endif - - /* Get current FW pulse sequence */ - if (!BP_NOMCP(bp)) { - int mb_idx = BP_FW_MB_IDX(bp); - - bp->fw_drv_pulse_wr_seq = - (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) & - DRV_PULSE_SEQ_MASK); - BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); - } - - return rc; -} - -static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp) -{ - int cnt, i, block_end, rodi; - char vpd_data[BNX2X_VPD_LEN+1]; - char str_id_reg[VENDOR_ID_LEN+1]; - char str_id_cap[VENDOR_ID_LEN+1]; - u8 len; - - cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data); - memset(bp->fw_ver, 0, sizeof(bp->fw_ver)); - - if (cnt < BNX2X_VPD_LEN) - goto out_not_found; - - i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN, - PCI_VPD_LRDT_RO_DATA); - if (i < 0) - goto out_not_found; - - - block_end = i + PCI_VPD_LRDT_TAG_SIZE + - pci_vpd_lrdt_size(&vpd_data[i]); - - i += PCI_VPD_LRDT_TAG_SIZE; - - if (block_end > BNX2X_VPD_LEN) - goto out_not_found; - - rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, - PCI_VPD_RO_KEYWORD_MFR_ID); - if (rodi < 0) - goto out_not_found; - - len = pci_vpd_info_field_size(&vpd_data[rodi]); - - if (len != VENDOR_ID_LEN) - goto out_not_found; - - rodi += PCI_VPD_INFO_FLD_HDR_SIZE; - - /* vendor specific info */ - snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL); - snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL); - if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) || - !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) { - - rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, - PCI_VPD_RO_KEYWORD_VENDOR0); - if (rodi >= 0) { - len = pci_vpd_info_field_size(&vpd_data[rodi]); - - rodi += PCI_VPD_INFO_FLD_HDR_SIZE; - - if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) { - memcpy(bp->fw_ver, &vpd_data[rodi], len); - bp->fw_ver[len] = ' '; - } - } - return; - } -out_not_found: - return; -} - -static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp) -{ - u32 flags = 0; - - if (CHIP_REV_IS_FPGA(bp)) - SET_FLAGS(flags, MODE_FPGA); - else if (CHIP_REV_IS_EMUL(bp)) - SET_FLAGS(flags, MODE_EMUL); - else - SET_FLAGS(flags, MODE_ASIC); - - if (CHIP_MODE_IS_4_PORT(bp)) - SET_FLAGS(flags, MODE_PORT4); - else - SET_FLAGS(flags, MODE_PORT2); - - if (CHIP_IS_E2(bp)) - SET_FLAGS(flags, MODE_E2); - else if (CHIP_IS_E3(bp)) { - SET_FLAGS(flags, MODE_E3); - if (CHIP_REV(bp) == CHIP_REV_Ax) - SET_FLAGS(flags, MODE_E3_A0); - else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/ - SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3); - } - - if (IS_MF(bp)) { - SET_FLAGS(flags, MODE_MF); - switch (bp->mf_mode) { - case MULTI_FUNCTION_SD: - SET_FLAGS(flags, MODE_MF_SD); - break; - case MULTI_FUNCTION_SI: - SET_FLAGS(flags, MODE_MF_SI); - break; - } - } else - SET_FLAGS(flags, MODE_SF); - -#if defined(__LITTLE_ENDIAN) - SET_FLAGS(flags, MODE_LITTLE_ENDIAN); -#else /*(__BIG_ENDIAN)*/ - SET_FLAGS(flags, MODE_BIG_ENDIAN); -#endif - INIT_MODE_FLAGS(bp) = flags; -} - -static int __devinit bnx2x_init_bp(struct bnx2x *bp) -{ - int func; - int timer_interval; - int rc; - - mutex_init(&bp->port.phy_mutex); - mutex_init(&bp->fw_mb_mutex); - spin_lock_init(&bp->stats_lock); -#ifdef BCM_CNIC - mutex_init(&bp->cnic_mutex); -#endif - - INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); - INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); - INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); - rc = bnx2x_get_hwinfo(bp); - if (rc) - return rc; - - bnx2x_set_modes_bitmap(bp); - - rc = bnx2x_alloc_mem_bp(bp); - if (rc) - return rc; - - bnx2x_read_fwinfo(bp); - - func = BP_FUNC(bp); - - /* need to reset chip if undi was active */ - if (!BP_NOMCP(bp)) - bnx2x_undi_unload(bp); - - if (CHIP_REV_IS_FPGA(bp)) - dev_err(&bp->pdev->dev, "FPGA detected\n"); - - if (BP_NOMCP(bp) && (func == 0)) - dev_err(&bp->pdev->dev, "MCP disabled, " - "must load devices in order!\n"); - - bp->multi_mode = multi_mode; - - /* Set TPA flags */ - if (disable_tpa) { - bp->flags &= ~TPA_ENABLE_FLAG; - bp->dev->features &= ~NETIF_F_LRO; - } else { - bp->flags |= TPA_ENABLE_FLAG; - bp->dev->features |= NETIF_F_LRO; - } - bp->disable_tpa = disable_tpa; - - if (CHIP_IS_E1(bp)) - bp->dropless_fc = 0; - else - bp->dropless_fc = dropless_fc; - - bp->mrrs = mrrs; - - bp->tx_ring_size = MAX_TX_AVAIL; - - /* make sure that the numbers are in the right granularity */ - bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR; - bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR; - - timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ); - bp->current_interval = (poll ? poll : timer_interval); - - init_timer(&bp->timer); - bp->timer.expires = jiffies + bp->current_interval; - bp->timer.data = (unsigned long) bp; - bp->timer.function = bnx2x_timer; - - bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON); - bnx2x_dcbx_init_params(bp); - -#ifdef BCM_CNIC - if (CHIP_IS_E1x(bp)) - bp->cnic_base_cl_id = FP_SB_MAX_E1x; - else - bp->cnic_base_cl_id = FP_SB_MAX_E2; -#endif - - /* multiple tx priority */ - if (CHIP_IS_E1x(bp)) - bp->max_cos = BNX2X_MULTI_TX_COS_E1X; - if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) - bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0; - if (CHIP_IS_E3B0(bp)) - bp->max_cos = BNX2X_MULTI_TX_COS_E3B0; - - return rc; -} - - -/**************************************************************************** -* General service functions -****************************************************************************/ - -/* - * net_device service functions - */ - -/* called with rtnl_lock */ -static int bnx2x_open(struct net_device *dev) -{ - struct bnx2x *bp = netdev_priv(dev); - bool global = false; - int other_engine = BP_PATH(bp) ? 0 : 1; - u32 other_load_counter, load_counter; - - netif_carrier_off(dev); - - bnx2x_set_power_state(bp, PCI_D0); - - other_load_counter = bnx2x_get_load_cnt(bp, other_engine); - load_counter = bnx2x_get_load_cnt(bp, BP_PATH(bp)); - - /* - * If parity had happen during the unload, then attentions - * and/or RECOVERY_IN_PROGRES may still be set. In this case we - * want the first function loaded on the current engine to - * complete the recovery. - */ - if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) || - bnx2x_chk_parity_attn(bp, &global, true)) - do { - /* - * If there are attentions and they are in a global - * blocks, set the GLOBAL_RESET bit regardless whether - * it will be this function that will complete the - * recovery or not. - */ - if (global) - bnx2x_set_reset_global(bp); - - /* - * Only the first function on the current engine should - * try to recover in open. In case of attentions in - * global blocks only the first in the chip should try - * to recover. - */ - if ((!load_counter && - (!global || !other_load_counter)) && - bnx2x_trylock_leader_lock(bp) && - !bnx2x_leader_reset(bp)) { - netdev_info(bp->dev, "Recovered in open\n"); - break; - } - - /* recovery has failed... */ - bnx2x_set_power_state(bp, PCI_D3hot); - bp->recovery_state = BNX2X_RECOVERY_FAILED; - - netdev_err(bp->dev, "Recovery flow hasn't been properly" - " completed yet. Try again later. If u still see this" - " message after a few retries then power cycle is" - " required.\n"); - - return -EAGAIN; - } while (0); - - bp->recovery_state = BNX2X_RECOVERY_DONE; - return bnx2x_nic_load(bp, LOAD_OPEN); -} - -/* called with rtnl_lock */ -static int bnx2x_close(struct net_device *dev) -{ - struct bnx2x *bp = netdev_priv(dev); - - /* Unload the driver, release IRQs */ - bnx2x_nic_unload(bp, UNLOAD_CLOSE); - - /* Power off */ - bnx2x_set_power_state(bp, PCI_D3hot); - - return 0; -} - -static inline int bnx2x_init_mcast_macs_list(struct bnx2x *bp, - struct bnx2x_mcast_ramrod_params *p) -{ - int mc_count = netdev_mc_count(bp->dev); - struct bnx2x_mcast_list_elem *mc_mac = - kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC); - struct netdev_hw_addr *ha; - - if (!mc_mac) - return -ENOMEM; - - INIT_LIST_HEAD(&p->mcast_list); - - netdev_for_each_mc_addr(ha, bp->dev) { - mc_mac->mac = bnx2x_mc_addr(ha); - list_add_tail(&mc_mac->link, &p->mcast_list); - mc_mac++; - } - - p->mcast_list_len = mc_count; - - return 0; -} - -static inline void bnx2x_free_mcast_macs_list( - struct bnx2x_mcast_ramrod_params *p) -{ - struct bnx2x_mcast_list_elem *mc_mac = - list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem, - link); - - WARN_ON(!mc_mac); - kfree(mc_mac); -} - -/** - * bnx2x_set_uc_list - configure a new unicast MACs list. - * - * @bp: driver handle - * - * We will use zero (0) as a MAC type for these MACs. - */ -static inline int bnx2x_set_uc_list(struct bnx2x *bp) -{ - int rc; - struct net_device *dev = bp->dev; - struct netdev_hw_addr *ha; - struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; - unsigned long ramrod_flags = 0; - - /* First schedule a cleanup up of old configuration */ - rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false); - if (rc < 0) { - BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc); - return rc; - } - - netdev_for_each_uc_addr(ha, dev) { - rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true, - BNX2X_UC_LIST_MAC, &ramrod_flags); - if (rc < 0) { - BNX2X_ERR("Failed to schedule ADD operations: %d\n", - rc); - return rc; - } - } - - /* Execute the pending commands */ - __set_bit(RAMROD_CONT, &ramrod_flags); - return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */, - BNX2X_UC_LIST_MAC, &ramrod_flags); -} - -static inline int bnx2x_set_mc_list(struct bnx2x *bp) -{ - struct net_device *dev = bp->dev; - struct bnx2x_mcast_ramrod_params rparam = {0}; - int rc = 0; - - rparam.mcast_obj = &bp->mcast_obj; - - /* first, clear all configured multicast MACs */ - rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); - if (rc < 0) { - BNX2X_ERR("Failed to clear multicast " - "configuration: %d\n", rc); - return rc; - } - - /* then, configure a new MACs list */ - if (netdev_mc_count(dev)) { - rc = bnx2x_init_mcast_macs_list(bp, &rparam); - if (rc) { - BNX2X_ERR("Failed to create multicast MACs " - "list: %d\n", rc); - return rc; - } - - /* Now add the new MACs */ - rc = bnx2x_config_mcast(bp, &rparam, - BNX2X_MCAST_CMD_ADD); - if (rc < 0) - BNX2X_ERR("Failed to set a new multicast " - "configuration: %d\n", rc); - - bnx2x_free_mcast_macs_list(&rparam); - } - - return rc; -} - - -/* If bp->state is OPEN, should be called with netif_addr_lock_bh() */ -void bnx2x_set_rx_mode(struct net_device *dev) -{ - struct bnx2x *bp = netdev_priv(dev); - u32 rx_mode = BNX2X_RX_MODE_NORMAL; - - if (bp->state != BNX2X_STATE_OPEN) { - DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); - return; - } - - DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags); - - if (dev->flags & IFF_PROMISC) - rx_mode = BNX2X_RX_MODE_PROMISC; - else if ((dev->flags & IFF_ALLMULTI) || - ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) && - CHIP_IS_E1(bp))) - rx_mode = BNX2X_RX_MODE_ALLMULTI; - else { - /* some multicasts */ - if (bnx2x_set_mc_list(bp) < 0) - rx_mode = BNX2X_RX_MODE_ALLMULTI; - - if (bnx2x_set_uc_list(bp) < 0) - rx_mode = BNX2X_RX_MODE_PROMISC; - } - - bp->rx_mode = rx_mode; - - /* Schedule the rx_mode command */ - if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { - set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); - return; - } - - bnx2x_set_storm_rx_mode(bp); -} - -/* called with rtnl_lock */ -static int bnx2x_mdio_read(struct net_device *netdev, int prtad, - int devad, u16 addr) -{ - struct bnx2x *bp = netdev_priv(netdev); - u16 value; - int rc; - - DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n", - prtad, devad, addr); - - /* The HW expects different devad if CL22 is used */ - devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; - - bnx2x_acquire_phy_lock(bp); - rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value); - bnx2x_release_phy_lock(bp); - DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc); - - if (!rc) - rc = value; - return rc; -} - -/* called with rtnl_lock */ -static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad, - u16 addr, u16 value) -{ - struct bnx2x *bp = netdev_priv(netdev); - int rc; - - DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x," - " value 0x%x\n", prtad, devad, addr, value); - - /* The HW expects different devad if CL22 is used */ - devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; - - bnx2x_acquire_phy_lock(bp); - rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value); - bnx2x_release_phy_lock(bp); - return rc; -} - -/* called with rtnl_lock */ -static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - struct bnx2x *bp = netdev_priv(dev); - struct mii_ioctl_data *mdio = if_mii(ifr); - - DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n", - mdio->phy_id, mdio->reg_num, mdio->val_in); - - if (!netif_running(dev)) - return -EAGAIN; - - return mdio_mii_ioctl(&bp->mdio, mdio, cmd); -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void poll_bnx2x(struct net_device *dev) -{ - struct bnx2x *bp = netdev_priv(dev); - - disable_irq(bp->pdev->irq); - bnx2x_interrupt(bp->pdev->irq, dev); - enable_irq(bp->pdev->irq); -} -#endif - -static const struct net_device_ops bnx2x_netdev_ops = { - .ndo_open = bnx2x_open, - .ndo_stop = bnx2x_close, - .ndo_start_xmit = bnx2x_start_xmit, - .ndo_select_queue = bnx2x_select_queue, - .ndo_set_rx_mode = bnx2x_set_rx_mode, - .ndo_set_mac_address = bnx2x_change_mac_addr, - .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = bnx2x_ioctl, - .ndo_change_mtu = bnx2x_change_mtu, - .ndo_fix_features = bnx2x_fix_features, - .ndo_set_features = bnx2x_set_features, - .ndo_tx_timeout = bnx2x_tx_timeout, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = poll_bnx2x, -#endif - .ndo_setup_tc = bnx2x_setup_tc, - -#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) - .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, -#endif -}; - -static inline int bnx2x_set_coherency_mask(struct bnx2x *bp) -{ - struct device *dev = &bp->pdev->dev; - - if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { - bp->flags |= USING_DAC_FLAG; - if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { - dev_err(dev, "dma_set_coherent_mask failed, " - "aborting\n"); - return -EIO; - } - } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { - dev_err(dev, "System does not support DMA, aborting\n"); - return -EIO; - } - - return 0; -} - -static int __devinit bnx2x_init_dev(struct pci_dev *pdev, - struct net_device *dev, - unsigned long board_type) -{ - struct bnx2x *bp; - int rc; - - SET_NETDEV_DEV(dev, &pdev->dev); - bp = netdev_priv(dev); - - bp->dev = dev; - bp->pdev = pdev; - bp->flags = 0; - bp->pf_num = PCI_FUNC(pdev->devfn); - - rc = pci_enable_device(pdev); - if (rc) { - dev_err(&bp->pdev->dev, - "Cannot enable PCI device, aborting\n"); - goto err_out; - } - - if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { - dev_err(&bp->pdev->dev, - "Cannot find PCI device base address, aborting\n"); - rc = -ENODEV; - goto err_out_disable; - } - - if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { - dev_err(&bp->pdev->dev, "Cannot find second PCI device" - " base address, aborting\n"); - rc = -ENODEV; - goto err_out_disable; - } - - if (atomic_read(&pdev->enable_cnt) == 1) { - rc = pci_request_regions(pdev, DRV_MODULE_NAME); - if (rc) { - dev_err(&bp->pdev->dev, - "Cannot obtain PCI resources, aborting\n"); - goto err_out_disable; - } - - pci_set_master(pdev); - pci_save_state(pdev); - } - - bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); - if (bp->pm_cap == 0) { - dev_err(&bp->pdev->dev, - "Cannot find power management capability, aborting\n"); - rc = -EIO; - goto err_out_release; - } - - if (!pci_is_pcie(pdev)) { - dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n"); - rc = -EIO; - goto err_out_release; - } - - rc = bnx2x_set_coherency_mask(bp); - if (rc) - goto err_out_release; - - dev->mem_start = pci_resource_start(pdev, 0); - dev->base_addr = dev->mem_start; - dev->mem_end = pci_resource_end(pdev, 0); - - dev->irq = pdev->irq; - - bp->regview = pci_ioremap_bar(pdev, 0); - if (!bp->regview) { - dev_err(&bp->pdev->dev, - "Cannot map register space, aborting\n"); - rc = -ENOMEM; - goto err_out_release; - } - - bnx2x_set_power_state(bp, PCI_D0); - - /* clean indirect addresses */ - pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, - PCICFG_VENDOR_ID_OFFSET); - REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0); - REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0); - REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0); - REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0); - - /* - * Enable internal target-read (in case we are probed after PF FLR). - * Must be done prior to any BAR read access. Only for 57712 and up - */ - if (board_type != BCM57710 && - board_type != BCM57711 && - board_type != BCM57711E) - REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); - - /* Reset the load counter */ - bnx2x_clear_load_cnt(bp); - - dev->watchdog_timeo = TX_TIMEOUT; - - dev->netdev_ops = &bnx2x_netdev_ops; - bnx2x_set_ethtool_ops(dev); - - dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | - NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_TX; - - dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; - - dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX; - if (bp->flags & USING_DAC_FLAG) - dev->features |= NETIF_F_HIGHDMA; - - /* Add Loopback capability to the device */ - dev->hw_features |= NETIF_F_LOOPBACK; - -#ifdef BCM_DCBNL - dev->dcbnl_ops = &bnx2x_dcbnl_ops; -#endif - - /* get_port_hwinfo() will set prtad and mmds properly */ - bp->mdio.prtad = MDIO_PRTAD_NONE; - bp->mdio.mmds = 0; - bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; - bp->mdio.dev = dev; - bp->mdio.mdio_read = bnx2x_mdio_read; - bp->mdio.mdio_write = bnx2x_mdio_write; - - return 0; - -err_out_release: - if (atomic_read(&pdev->enable_cnt) == 1) - pci_release_regions(pdev); - -err_out_disable: - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - -err_out: - return rc; -} - -static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp, - int *width, int *speed) -{ - u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL); - - *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT; - - /* return value of 1=2.5GHz 2=5GHz */ - *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT; -} - -static int bnx2x_check_firmware(struct bnx2x *bp) -{ - const struct firmware *firmware = bp->firmware; - struct bnx2x_fw_file_hdr *fw_hdr; - struct bnx2x_fw_file_section *sections; - u32 offset, len, num_ops; - u16 *ops_offsets; - int i; - const u8 *fw_ver; - - if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) - return -EINVAL; - - fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data; - sections = (struct bnx2x_fw_file_section *)fw_hdr; - - /* Make sure none of the offsets and sizes make us read beyond - * the end of the firmware data */ - for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) { - offset = be32_to_cpu(sections[i].offset); - len = be32_to_cpu(sections[i].len); - if (offset + len > firmware->size) { - dev_err(&bp->pdev->dev, - "Section %d length is out of bounds\n", i); - return -EINVAL; - } - } - - /* Likewise for the init_ops offsets */ - offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset); - ops_offsets = (u16 *)(firmware->data + offset); - num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op); - - for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) { - if (be16_to_cpu(ops_offsets[i]) > num_ops) { - dev_err(&bp->pdev->dev, - "Section offset %d is out of bounds\n", i); - return -EINVAL; - } - } - - /* Check FW version */ - offset = be32_to_cpu(fw_hdr->fw_version.offset); - fw_ver = firmware->data + offset; - if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) || - (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) || - (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) || - (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) { - dev_err(&bp->pdev->dev, - "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n", - fw_ver[0], fw_ver[1], fw_ver[2], - fw_ver[3], BCM_5710_FW_MAJOR_VERSION, - BCM_5710_FW_MINOR_VERSION, - BCM_5710_FW_REVISION_VERSION, - BCM_5710_FW_ENGINEERING_VERSION); - return -EINVAL; - } - - return 0; -} - -static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n) -{ - const __be32 *source = (const __be32 *)_source; - u32 *target = (u32 *)_target; - u32 i; - - for (i = 0; i < n/4; i++) - target[i] = be32_to_cpu(source[i]); -} - -/* - Ops array is stored in the following format: - {op(8bit), offset(24bit, big endian), data(32bit, big endian)} - */ -static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n) -{ - const __be32 *source = (const __be32 *)_source; - struct raw_op *target = (struct raw_op *)_target; - u32 i, j, tmp; - - for (i = 0, j = 0; i < n/8; i++, j += 2) { - tmp = be32_to_cpu(source[j]); - target[i].op = (tmp >> 24) & 0xff; - target[i].offset = tmp & 0xffffff; - target[i].raw_data = be32_to_cpu(source[j + 1]); - } -} - -/** - * IRO array is stored in the following format: - * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) } - */ -static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n) -{ - const __be32 *source = (const __be32 *)_source; - struct iro *target = (struct iro *)_target; - u32 i, j, tmp; - - for (i = 0, j = 0; i < n/sizeof(struct iro); i++) { - target[i].base = be32_to_cpu(source[j]); - j++; - tmp = be32_to_cpu(source[j]); - target[i].m1 = (tmp >> 16) & 0xffff; - target[i].m2 = tmp & 0xffff; - j++; - tmp = be32_to_cpu(source[j]); - target[i].m3 = (tmp >> 16) & 0xffff; - target[i].size = tmp & 0xffff; - j++; - } -} - -static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n) -{ - const __be16 *source = (const __be16 *)_source; - u16 *target = (u16 *)_target; - u32 i; - - for (i = 0; i < n/2; i++) - target[i] = be16_to_cpu(source[i]); -} - -#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \ -do { \ - u32 len = be32_to_cpu(fw_hdr->arr.len); \ - bp->arr = kmalloc(len, GFP_KERNEL); \ - if (!bp->arr) { \ - pr_err("Failed to allocate %d bytes for "#arr"\n", len); \ - goto lbl; \ - } \ - func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \ - (u8 *)bp->arr, len); \ -} while (0) - -int bnx2x_init_firmware(struct bnx2x *bp) -{ - const char *fw_file_name; - struct bnx2x_fw_file_hdr *fw_hdr; - int rc; - - if (CHIP_IS_E1(bp)) - fw_file_name = FW_FILE_NAME_E1; - else if (CHIP_IS_E1H(bp)) - fw_file_name = FW_FILE_NAME_E1H; - else if (!CHIP_IS_E1x(bp)) - fw_file_name = FW_FILE_NAME_E2; - else { - BNX2X_ERR("Unsupported chip revision\n"); - return -EINVAL; - } - - BNX2X_DEV_INFO("Loading %s\n", fw_file_name); - - rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev); - if (rc) { - BNX2X_ERR("Can't load firmware file %s\n", fw_file_name); - goto request_firmware_exit; - } - - rc = bnx2x_check_firmware(bp); - if (rc) { - BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name); - goto request_firmware_exit; - } - - fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data; - - /* Initialize the pointers to the init arrays */ - /* Blob */ - BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n); - - /* Opcodes */ - BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops); - - /* Offsets */ - BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, - be16_to_cpu_n); - - /* STORMs firmware */ - INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data + - be32_to_cpu(fw_hdr->tsem_int_table_data.offset); - INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data + - be32_to_cpu(fw_hdr->tsem_pram_data.offset); - INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data + - be32_to_cpu(fw_hdr->usem_int_table_data.offset); - INIT_USEM_PRAM_DATA(bp) = bp->firmware->data + - be32_to_cpu(fw_hdr->usem_pram_data.offset); - INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data + - be32_to_cpu(fw_hdr->xsem_int_table_data.offset); - INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data + - be32_to_cpu(fw_hdr->xsem_pram_data.offset); - INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data + - be32_to_cpu(fw_hdr->csem_int_table_data.offset); - INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data + - be32_to_cpu(fw_hdr->csem_pram_data.offset); - /* IRO */ - BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro); - - return 0; - -iro_alloc_err: - kfree(bp->init_ops_offsets); -init_offsets_alloc_err: - kfree(bp->init_ops); -init_ops_alloc_err: - kfree(bp->init_data); -request_firmware_exit: - release_firmware(bp->firmware); - - return rc; -} - -static void bnx2x_release_firmware(struct bnx2x *bp) -{ - kfree(bp->init_ops_offsets); - kfree(bp->init_ops); - kfree(bp->init_data); - release_firmware(bp->firmware); -} - - -static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = { - .init_hw_cmn_chip = bnx2x_init_hw_common_chip, - .init_hw_cmn = bnx2x_init_hw_common, - .init_hw_port = bnx2x_init_hw_port, - .init_hw_func = bnx2x_init_hw_func, - - .reset_hw_cmn = bnx2x_reset_common, - .reset_hw_port = bnx2x_reset_port, - .reset_hw_func = bnx2x_reset_func, - - .gunzip_init = bnx2x_gunzip_init, - .gunzip_end = bnx2x_gunzip_end, - - .init_fw = bnx2x_init_firmware, - .release_fw = bnx2x_release_firmware, -}; - -void bnx2x__init_func_obj(struct bnx2x *bp) -{ - /* Prepare DMAE related driver resources */ - bnx2x_setup_dmae(bp); - - bnx2x_init_func_obj(bp, &bp->func_obj, - bnx2x_sp(bp, func_rdata), - bnx2x_sp_mapping(bp, func_rdata), - &bnx2x_func_sp_drv); -} - -/* must be called after sriov-enable */ -static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp) -{ - int cid_count = BNX2X_L2_CID_COUNT(bp); - -#ifdef BCM_CNIC - cid_count += CNIC_CID_MAX; -#endif - return roundup(cid_count, QM_CID_ROUND); -} - -/** - * bnx2x_get_num_none_def_sbs - return the number of none default SBs - * - * @dev: pci device - * - */ -static inline int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev) -{ - int pos; - u16 control; - - pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); - - /* - * If MSI-X is not supported - return number of SBs needed to support - * one fast path queue: one FP queue + SB for CNIC - */ - if (!pos) - return 1 + CNIC_PRESENT; - - /* - * The value in the PCI configuration space is the index of the last - * entry, namely one less than the actual size of the table, which is - * exactly what we want to return from this function: number of all SBs - * without the default SB. - */ - pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control); - return control & PCI_MSIX_FLAGS_QSIZE; -} - -static int __devinit bnx2x_init_one(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - struct net_device *dev = NULL; - struct bnx2x *bp; - int pcie_width, pcie_speed; - int rc, max_non_def_sbs; - int rx_count, tx_count, rss_count; - /* - * An estimated maximum supported CoS number according to the chip - * version. - * We will try to roughly estimate the maximum number of CoSes this chip - * may support in order to minimize the memory allocated for Tx - * netdev_queue's. This number will be accurately calculated during the - * initialization of bp->max_cos based on the chip versions AND chip - * revision in the bnx2x_init_bp(). - */ - u8 max_cos_est = 0; - - switch (ent->driver_data) { - case BCM57710: - case BCM57711: - case BCM57711E: - max_cos_est = BNX2X_MULTI_TX_COS_E1X; - break; - - case BCM57712: - case BCM57712_MF: - max_cos_est = BNX2X_MULTI_TX_COS_E2_E3A0; - break; - - case BCM57800: - case BCM57800_MF: - case BCM57810: - case BCM57810_MF: - case BCM57840: - case BCM57840_MF: - max_cos_est = BNX2X_MULTI_TX_COS_E3B0; - break; - - default: - pr_err("Unknown board_type (%ld), aborting\n", - ent->driver_data); - return -ENODEV; - } - - max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev); - - /* !!! FIXME !!! - * Do not allow the maximum SB count to grow above 16 - * since Special CIDs starts from 16*BNX2X_MULTI_TX_COS=48. - * We will use the FP_SB_MAX_E1x macro for this matter. - */ - max_non_def_sbs = min_t(int, FP_SB_MAX_E1x, max_non_def_sbs); - - WARN_ON(!max_non_def_sbs); - - /* Maximum number of RSS queues: one IGU SB goes to CNIC */ - rss_count = max_non_def_sbs - CNIC_PRESENT; - - /* Maximum number of netdev Rx queues: RSS + FCoE L2 */ - rx_count = rss_count + FCOE_PRESENT; - - /* - * Maximum number of netdev Tx queues: - * Maximum TSS queues * Maximum supported number of CoS + FCoE L2 - */ - tx_count = MAX_TXQS_PER_COS * max_cos_est + FCOE_PRESENT; - - /* dev zeroed in init_etherdev */ - dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count); - if (!dev) { - dev_err(&pdev->dev, "Cannot allocate net device\n"); - return -ENOMEM; - } - - bp = netdev_priv(dev); - - DP(NETIF_MSG_DRV, "Allocated netdev with %d tx and %d rx queues\n", - tx_count, rx_count); - - bp->igu_sb_cnt = max_non_def_sbs; - bp->msg_enable = debug; - pci_set_drvdata(pdev, dev); - - rc = bnx2x_init_dev(pdev, dev, ent->driver_data); - if (rc < 0) { - free_netdev(dev); - return rc; - } - - DP(NETIF_MSG_DRV, "max_non_def_sbs %d", max_non_def_sbs); - - rc = bnx2x_init_bp(bp); - if (rc) - goto init_one_exit; - - /* - * Map doorbels here as we need the real value of bp->max_cos which - * is initialized in bnx2x_init_bp(). - */ - bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), - min_t(u64, BNX2X_DB_SIZE(bp), - pci_resource_len(pdev, 2))); - if (!bp->doorbells) { - dev_err(&bp->pdev->dev, - "Cannot map doorbell space, aborting\n"); - rc = -ENOMEM; - goto init_one_exit; - } - - /* calc qm_cid_count */ - bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); - -#ifdef BCM_CNIC - /* disable FCOE L2 queue for E1x and E3*/ - if (CHIP_IS_E1x(bp) || CHIP_IS_E3(bp)) - bp->flags |= NO_FCOE_FLAG; - -#endif - - /* Configure interrupt mode: try to enable MSI-X/MSI if - * needed, set bp->num_queues appropriately. - */ - bnx2x_set_int_mode(bp); - - /* Add all NAPI objects */ - bnx2x_add_all_napi(bp); - - rc = register_netdev(dev); - if (rc) { - dev_err(&pdev->dev, "Cannot register net device\n"); - goto init_one_exit; - } - -#ifdef BCM_CNIC - if (!NO_FCOE(bp)) { - /* Add storage MAC address */ - rtnl_lock(); - dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); - rtnl_unlock(); - } -#endif - - bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); - - netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx," - " IRQ %d, ", board_info[ent->driver_data].name, - (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), - pcie_width, - ((!CHIP_IS_E2(bp) && pcie_speed == 2) || - (CHIP_IS_E2(bp) && pcie_speed == 1)) ? - "5GHz (Gen2)" : "2.5GHz", - dev->base_addr, bp->pdev->irq); - pr_cont("node addr %pM\n", dev->dev_addr); - - return 0; - -init_one_exit: - if (bp->regview) - iounmap(bp->regview); - - if (bp->doorbells) - iounmap(bp->doorbells); - - free_netdev(dev); - - if (atomic_read(&pdev->enable_cnt) == 1) - pci_release_regions(pdev); - - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - - return rc; -} - -static void __devexit bnx2x_remove_one(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct bnx2x *bp; - - if (!dev) { - dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); - return; - } - bp = netdev_priv(dev); - -#ifdef BCM_CNIC - /* Delete storage MAC address */ - if (!NO_FCOE(bp)) { - rtnl_lock(); - dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); - rtnl_unlock(); - } -#endif - -#ifdef BCM_DCBNL - /* Delete app tlvs from dcbnl */ - bnx2x_dcbnl_update_applist(bp, true); -#endif - - unregister_netdev(dev); - - /* Delete all NAPI objects */ - bnx2x_del_all_napi(bp); - - /* Power on: we can't let PCI layer write to us while we are in D3 */ - bnx2x_set_power_state(bp, PCI_D0); - - /* Disable MSI/MSI-X */ - bnx2x_disable_msi(bp); - - /* Power off */ - bnx2x_set_power_state(bp, PCI_D3hot); - - /* Make sure RESET task is not scheduled before continuing */ - cancel_delayed_work_sync(&bp->sp_rtnl_task); - - if (bp->regview) - iounmap(bp->regview); - - if (bp->doorbells) - iounmap(bp->doorbells); - - bnx2x_free_mem_bp(bp); - - free_netdev(dev); - - if (atomic_read(&pdev->enable_cnt) == 1) - pci_release_regions(pdev); - - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); -} - -static int bnx2x_eeh_nic_unload(struct bnx2x *bp) -{ - int i; - - bp->state = BNX2X_STATE_ERROR; - - bp->rx_mode = BNX2X_RX_MODE_NONE; - -#ifdef BCM_CNIC - bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); -#endif - /* Stop Tx */ - bnx2x_tx_disable(bp); - - bnx2x_netif_stop(bp, 0); - - del_timer_sync(&bp->timer); - - bnx2x_stats_handle(bp, STATS_EVENT_STOP); - - /* Release IRQs */ - bnx2x_free_irq(bp); - - /* Free SKBs, SGEs, TPA pool and driver internals */ - bnx2x_free_skbs(bp); - - for_each_rx_queue(bp, i) - bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); - - bnx2x_free_mem(bp); - - bp->state = BNX2X_STATE_CLOSED; - - netif_carrier_off(bp->dev); - - return 0; -} - -static void bnx2x_eeh_recover(struct bnx2x *bp) -{ - u32 val; - - mutex_init(&bp->port.phy_mutex); - - bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); - bp->link_params.shmem_base = bp->common.shmem_base; - BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base); - - if (!bp->common.shmem_base || - (bp->common.shmem_base < 0xA0000) || - (bp->common.shmem_base >= 0xC0000)) { - BNX2X_DEV_INFO("MCP not active\n"); - bp->flags |= NO_MCP_FLAG; - return; - } - - val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); - if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) - != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) - BNX2X_ERR("BAD MCP validity signature\n"); - - if (!BP_NOMCP(bp)) { - bp->fw_seq = - (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & - DRV_MSG_SEQ_NUMBER_MASK); - BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); - } -} - -/** - * bnx2x_io_error_detected - called when PCI error is detected - * @pdev: Pointer to PCI device - * @state: The current pci connection state - * - * This function is called after a PCI bus error affecting - * this device has been detected. - */ -static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct bnx2x *bp = netdev_priv(dev); - - rtnl_lock(); - - netif_device_detach(dev); - - if (state == pci_channel_io_perm_failure) { - rtnl_unlock(); - return PCI_ERS_RESULT_DISCONNECT; - } - - if (netif_running(dev)) - bnx2x_eeh_nic_unload(bp); - - pci_disable_device(pdev); - - rtnl_unlock(); - - /* Request a slot reset */ - return PCI_ERS_RESULT_NEED_RESET; -} - -/** - * bnx2x_io_slot_reset - called after the PCI bus has been reset - * @pdev: Pointer to PCI device - * - * Restart the card from scratch, as if from a cold-boot. - */ -static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct bnx2x *bp = netdev_priv(dev); - - rtnl_lock(); - - if (pci_enable_device(pdev)) { - dev_err(&pdev->dev, - "Cannot re-enable PCI device after reset\n"); - rtnl_unlock(); - return PCI_ERS_RESULT_DISCONNECT; - } - - pci_set_master(pdev); - pci_restore_state(pdev); - - if (netif_running(dev)) - bnx2x_set_power_state(bp, PCI_D0); - - rtnl_unlock(); - - return PCI_ERS_RESULT_RECOVERED; -} - -/** - * bnx2x_io_resume - called when traffic can start flowing again - * @pdev: Pointer to PCI device - * - * This callback is called when the error recovery driver tells us that - * its OK to resume normal operation. - */ -static void bnx2x_io_resume(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct bnx2x *bp = netdev_priv(dev); - - if (bp->recovery_state != BNX2X_RECOVERY_DONE) { - netdev_err(bp->dev, "Handling parity error recovery. " - "Try again later\n"); - return; - } - - rtnl_lock(); - - bnx2x_eeh_recover(bp); - - if (netif_running(dev)) - bnx2x_nic_load(bp, LOAD_NORMAL); - - netif_device_attach(dev); - - rtnl_unlock(); -} - -static struct pci_error_handlers bnx2x_err_handler = { - .error_detected = bnx2x_io_error_detected, - .slot_reset = bnx2x_io_slot_reset, - .resume = bnx2x_io_resume, -}; - -static struct pci_driver bnx2x_pci_driver = { - .name = DRV_MODULE_NAME, - .id_table = bnx2x_pci_tbl, - .probe = bnx2x_init_one, - .remove = __devexit_p(bnx2x_remove_one), - .suspend = bnx2x_suspend, - .resume = bnx2x_resume, - .err_handler = &bnx2x_err_handler, -}; - -static int __init bnx2x_init(void) -{ - int ret; - - pr_info("%s", version); - - bnx2x_wq = create_singlethread_workqueue("bnx2x"); - if (bnx2x_wq == NULL) { - pr_err("Cannot create workqueue\n"); - return -ENOMEM; - } - - ret = pci_register_driver(&bnx2x_pci_driver); - if (ret) { - pr_err("Cannot register driver\n"); - destroy_workqueue(bnx2x_wq); - } - return ret; -} - -static void __exit bnx2x_cleanup(void) -{ - pci_unregister_driver(&bnx2x_pci_driver); - - destroy_workqueue(bnx2x_wq); -} - -void bnx2x_notify_link_changed(struct bnx2x *bp) -{ - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1); -} - -module_init(bnx2x_init); -module_exit(bnx2x_cleanup); - -#ifdef BCM_CNIC -/** - * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s). - * - * @bp: driver handle - * @set: set or clear the CAM entry - * - * This function will wait until the ramdord completion returns. - * Return 0 if success, -ENODEV if ramrod doesn't return. - */ -static inline int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) -{ - unsigned long ramrod_flags = 0; - - __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); - return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac, - &bp->iscsi_l2_mac_obj, true, - BNX2X_ISCSI_ETH_MAC, &ramrod_flags); -} - -/* count denotes the number of new completions we have seen */ -static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) -{ - struct eth_spe *spe; - -#ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) - return; -#endif - - spin_lock_bh(&bp->spq_lock); - BUG_ON(bp->cnic_spq_pending < count); - bp->cnic_spq_pending -= count; - - - for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) { - u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type) - & SPE_HDR_CONN_TYPE) >> - SPE_HDR_CONN_TYPE_SHIFT; - u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data) - >> SPE_HDR_CMD_ID_SHIFT) & 0xff; - - /* Set validation for iSCSI L2 client before sending SETUP - * ramrod - */ - if (type == ETH_CONNECTION_TYPE) { - if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) - bnx2x_set_ctx_validation(bp, &bp->context. - vcxt[BNX2X_ISCSI_ETH_CID].eth, - BNX2X_ISCSI_ETH_CID); - } - - /* - * There may be not more than 8 L2, not more than 8 L5 SPEs - * and in the air. We also check that number of outstanding - * COMMON ramrods is not more than the EQ and SPQ can - * accommodate. - */ - if (type == ETH_CONNECTION_TYPE) { - if (!atomic_read(&bp->cq_spq_left)) - break; - else - atomic_dec(&bp->cq_spq_left); - } else if (type == NONE_CONNECTION_TYPE) { - if (!atomic_read(&bp->eq_spq_left)) - break; - else - atomic_dec(&bp->eq_spq_left); - } else if ((type == ISCSI_CONNECTION_TYPE) || - (type == FCOE_CONNECTION_TYPE)) { - if (bp->cnic_spq_pending >= - bp->cnic_eth_dev.max_kwqe_pending) - break; - else - bp->cnic_spq_pending++; - } else { - BNX2X_ERR("Unknown SPE type: %d\n", type); - bnx2x_panic(); - break; - } - - spe = bnx2x_sp_get_next(bp); - *spe = *bp->cnic_kwq_cons; - - DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n", - bp->cnic_spq_pending, bp->cnic_kwq_pending, count); - - if (bp->cnic_kwq_cons == bp->cnic_kwq_last) - bp->cnic_kwq_cons = bp->cnic_kwq; - else - bp->cnic_kwq_cons++; - } - bnx2x_sp_prod_update(bp); - spin_unlock_bh(&bp->spq_lock); -} - -static int bnx2x_cnic_sp_queue(struct net_device *dev, - struct kwqe_16 *kwqes[], u32 count) -{ - struct bnx2x *bp = netdev_priv(dev); - int i; - -#ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) - return -EIO; -#endif - - spin_lock_bh(&bp->spq_lock); - - for (i = 0; i < count; i++) { - struct eth_spe *spe = (struct eth_spe *)kwqes[i]; - - if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT) - break; - - *bp->cnic_kwq_prod = *spe; - - bp->cnic_kwq_pending++; - - DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n", - spe->hdr.conn_and_cmd_data, spe->hdr.type, - spe->data.update_data_addr.hi, - spe->data.update_data_addr.lo, - bp->cnic_kwq_pending); - - if (bp->cnic_kwq_prod == bp->cnic_kwq_last) - bp->cnic_kwq_prod = bp->cnic_kwq; - else - bp->cnic_kwq_prod++; - } - - spin_unlock_bh(&bp->spq_lock); - - if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending) - bnx2x_cnic_sp_post(bp, 0); - - return i; -} - -static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl) -{ - struct cnic_ops *c_ops; - int rc = 0; - - mutex_lock(&bp->cnic_mutex); - c_ops = rcu_dereference_protected(bp->cnic_ops, - lockdep_is_held(&bp->cnic_mutex)); - if (c_ops) - rc = c_ops->cnic_ctl(bp->cnic_data, ctl); - mutex_unlock(&bp->cnic_mutex); - - return rc; -} - -static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl) -{ - struct cnic_ops *c_ops; - int rc = 0; - - rcu_read_lock(); - c_ops = rcu_dereference(bp->cnic_ops); - if (c_ops) - rc = c_ops->cnic_ctl(bp->cnic_data, ctl); - rcu_read_unlock(); - - return rc; -} - -/* - * for commands that have no data - */ -int bnx2x_cnic_notify(struct bnx2x *bp, int cmd) -{ - struct cnic_ctl_info ctl = {0}; - - ctl.cmd = cmd; - - return bnx2x_cnic_ctl_send(bp, &ctl); -} - -static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err) -{ - struct cnic_ctl_info ctl = {0}; - - /* first we tell CNIC and only then we count this as a completion */ - ctl.cmd = CNIC_CTL_COMPLETION_CMD; - ctl.data.comp.cid = cid; - ctl.data.comp.error = err; - - bnx2x_cnic_ctl_send_bh(bp, &ctl); - bnx2x_cnic_sp_post(bp, 0); -} - - -/* Called with netif_addr_lock_bh() taken. - * Sets an rx_mode config for an iSCSI ETH client. - * Doesn't block. - * Completion should be checked outside. - */ -static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start) -{ - unsigned long accept_flags = 0, ramrod_flags = 0; - u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); - int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED; - - if (start) { - /* Start accepting on iSCSI L2 ring. Accept all multicasts - * because it's the only way for UIO Queue to accept - * multicasts (in non-promiscuous mode only one Queue per - * function will receive multicast packets (leading in our - * case). - */ - __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags); - __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags); - __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags); - __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); - - /* Clear STOP_PENDING bit if START is requested */ - clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state); - - sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED; - } else - /* Clear START_PENDING bit if STOP is requested */ - clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state); - - if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) - set_bit(sched_state, &bp->sp_state); - else { - __set_bit(RAMROD_RX, &ramrod_flags); - bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0, - ramrod_flags); - } -} - - -static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) -{ - struct bnx2x *bp = netdev_priv(dev); - int rc = 0; - - switch (ctl->cmd) { - case DRV_CTL_CTXTBL_WR_CMD: { - u32 index = ctl->data.io.offset; - dma_addr_t addr = ctl->data.io.dma_addr; - - bnx2x_ilt_wr(bp, index, addr); - break; - } - - case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: { - int count = ctl->data.credit.credit_count; - - bnx2x_cnic_sp_post(bp, count); - break; - } - - /* rtnl_lock is held. */ - case DRV_CTL_START_L2_CMD: { - struct cnic_eth_dev *cp = &bp->cnic_eth_dev; - unsigned long sp_bits = 0; - - /* Configure the iSCSI classification object */ - bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj, - cp->iscsi_l2_client_id, - cp->iscsi_l2_cid, BP_FUNC(bp), - bnx2x_sp(bp, mac_rdata), - bnx2x_sp_mapping(bp, mac_rdata), - BNX2X_FILTER_MAC_PENDING, - &bp->sp_state, BNX2X_OBJ_TYPE_RX, - &bp->macs_pool); - - /* Set iSCSI MAC address */ - rc = bnx2x_set_iscsi_eth_mac_addr(bp); - if (rc) - break; - - mmiowb(); - barrier(); - - /* Start accepting on iSCSI L2 ring */ - - netif_addr_lock_bh(dev); - bnx2x_set_iscsi_eth_rx_mode(bp, true); - netif_addr_unlock_bh(dev); - - /* bits to wait on */ - __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); - __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits); - - if (!bnx2x_wait_sp_comp(bp, sp_bits)) - BNX2X_ERR("rx_mode completion timed out!\n"); - - break; - } - - /* rtnl_lock is held. */ - case DRV_CTL_STOP_L2_CMD: { - unsigned long sp_bits = 0; - - /* Stop accepting on iSCSI L2 ring */ - netif_addr_lock_bh(dev); - bnx2x_set_iscsi_eth_rx_mode(bp, false); - netif_addr_unlock_bh(dev); - - /* bits to wait on */ - __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); - __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits); - - if (!bnx2x_wait_sp_comp(bp, sp_bits)) - BNX2X_ERR("rx_mode completion timed out!\n"); - - mmiowb(); - barrier(); - - /* Unset iSCSI L2 MAC */ - rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj, - BNX2X_ISCSI_ETH_MAC, true); - break; - } - case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: { - int count = ctl->data.credit.credit_count; - - smp_mb__before_atomic_inc(); - atomic_add(count, &bp->cq_spq_left); - smp_mb__after_atomic_inc(); - break; - } - - default: - BNX2X_ERR("unknown command %x\n", ctl->cmd); - rc = -EINVAL; - } - - return rc; -} - -void bnx2x_setup_cnic_irq_info(struct bnx2x *bp) -{ - struct cnic_eth_dev *cp = &bp->cnic_eth_dev; - - if (bp->flags & USING_MSIX_FLAG) { - cp->drv_state |= CNIC_DRV_STATE_USING_MSIX; - cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX; - cp->irq_arr[0].vector = bp->msix_table[1].vector; - } else { - cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; - cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; - } - if (!CHIP_IS_E1x(bp)) - cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb; - else - cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb; - - cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp); - cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp); - cp->irq_arr[1].status_blk = bp->def_status_blk; - cp->irq_arr[1].status_blk_num = DEF_SB_ID; - cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID; - - cp->num_irq = 2; -} - -static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, - void *data) -{ - struct bnx2x *bp = netdev_priv(dev); - struct cnic_eth_dev *cp = &bp->cnic_eth_dev; - - if (ops == NULL) - return -EINVAL; - - bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL); - if (!bp->cnic_kwq) - return -ENOMEM; - - bp->cnic_kwq_cons = bp->cnic_kwq; - bp->cnic_kwq_prod = bp->cnic_kwq; - bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT; - - bp->cnic_spq_pending = 0; - bp->cnic_kwq_pending = 0; - - bp->cnic_data = data; - - cp->num_irq = 0; - cp->drv_state |= CNIC_DRV_STATE_REGD; - cp->iro_arr = bp->iro_arr; - - bnx2x_setup_cnic_irq_info(bp); - - rcu_assign_pointer(bp->cnic_ops, ops); - - return 0; -} - -static int bnx2x_unregister_cnic(struct net_device *dev) -{ - struct bnx2x *bp = netdev_priv(dev); - struct cnic_eth_dev *cp = &bp->cnic_eth_dev; - - mutex_lock(&bp->cnic_mutex); - cp->drv_state = 0; - rcu_assign_pointer(bp->cnic_ops, NULL); - mutex_unlock(&bp->cnic_mutex); - synchronize_rcu(); - kfree(bp->cnic_kwq); - bp->cnic_kwq = NULL; - - return 0; -} - -struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) -{ - struct bnx2x *bp = netdev_priv(dev); - struct cnic_eth_dev *cp = &bp->cnic_eth_dev; - - /* If both iSCSI and FCoE are disabled - return NULL in - * order to indicate CNIC that it should not try to work - * with this device. - */ - if (NO_ISCSI(bp) && NO_FCOE(bp)) - return NULL; - - cp->drv_owner = THIS_MODULE; - cp->chip_id = CHIP_ID(bp); - cp->pdev = bp->pdev; - cp->io_base = bp->regview; - cp->io_base2 = bp->doorbells; - cp->max_kwqe_pending = 8; - cp->ctx_blk_size = CDU_ILT_PAGE_SZ; - cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + - bnx2x_cid_ilt_lines(bp); - cp->ctx_tbl_len = CNIC_ILT_LINES; - cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; - cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue; - cp->drv_ctl = bnx2x_drv_ctl; - cp->drv_register_cnic = bnx2x_register_cnic; - cp->drv_unregister_cnic = bnx2x_unregister_cnic; - cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID; - cp->iscsi_l2_client_id = - bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); - cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID; - - if (NO_ISCSI_OOO(bp)) - cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; - - if (NO_ISCSI(bp)) - cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI; - - if (NO_FCOE(bp)) - cp->drv_state |= CNIC_DRV_STATE_NO_FCOE; - - DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, " - "starting cid %d\n", - cp->ctx_blk_size, - cp->ctx_tbl_offset, - cp->ctx_tbl_len, - cp->starting_cid); - return cp; -} -EXPORT_SYMBOL(bnx2x_cnic_probe); - -#endif /* BCM_CNIC */ - diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h deleted file mode 100644 index 27b5ecb..0000000 --- a/drivers/net/bnx2x/bnx2x_reg.h +++ /dev/null @@ -1,7146 +0,0 @@ -/* bnx2x_reg.h: Broadcom Everest network driver. - * - * Copyright (c) 2007-2011 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - * The registers description starts with the register Access type followed - * by size in bits. For example [RW 32]. The access types are: - * R - Read only - * RC - Clear on read - * RW - Read/Write - * ST - Statistics register (clear on read) - * W - Write only - * WB - Wide bus register - the size is over 32 bits and it should be - * read/write in consecutive 32 bits accesses - * WR - Write Clear (write 1 to clear the bit) - * - */ -#ifndef BNX2X_REG_H -#define BNX2X_REG_H - -#define ATC_ATC_INT_STS_REG_ADDRESS_ERROR (0x1<<0) -#define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS (0x1<<2) -#define ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU (0x1<<5) -#define ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT (0x1<<3) -#define ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR (0x1<<4) -#define ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND (0x1<<1) -/* [RW 1] Initiate the ATC array - reset all the valid bits */ -#define ATC_REG_ATC_INIT_ARRAY 0x1100b8 -/* [R 1] ATC initalization done */ -#define ATC_REG_ATC_INIT_DONE 0x1100bc -/* [RC 6] Interrupt register #0 read clear */ -#define ATC_REG_ATC_INT_STS_CLR 0x1101c0 -/* [RW 5] Parity mask register #0 read/write */ -#define ATC_REG_ATC_PRTY_MASK 0x1101d8 -/* [RC 5] Parity register #0 read clear */ -#define ATC_REG_ATC_PRTY_STS_CLR 0x1101d0 -/* [RW 19] Interrupt mask register #0 read/write */ -#define BRB1_REG_BRB1_INT_MASK 0x60128 -/* [R 19] Interrupt register #0 read */ -#define BRB1_REG_BRB1_INT_STS 0x6011c -/* [RW 4] Parity mask register #0 read/write */ -#define BRB1_REG_BRB1_PRTY_MASK 0x60138 -/* [R 4] Parity register #0 read */ -#define BRB1_REG_BRB1_PRTY_STS 0x6012c -/* [RC 4] Parity register #0 read clear */ -#define BRB1_REG_BRB1_PRTY_STS_CLR 0x60130 -/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At - * address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address - * BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning - - * following reset the first rbc access to this reg must be write; there can - * be no more rbc writes after the first one; there can be any number of rbc - * read following the first write; rbc access not following these rules will - * result in hang condition. */ -#define BRB1_REG_FREE_LIST_PRS_CRDT 0x60200 -/* [RW 10] The number of free blocks below which the full signal to class 0 - * is asserted */ -#define BRB1_REG_FULL_0_XOFF_THRESHOLD_0 0x601d0 -#define BRB1_REG_FULL_0_XOFF_THRESHOLD_1 0x60230 -/* [RW 11] The number of free blocks above which the full signal to class 0 - * is de-asserted */ -#define BRB1_REG_FULL_0_XON_THRESHOLD_0 0x601d4 -#define BRB1_REG_FULL_0_XON_THRESHOLD_1 0x60234 -/* [RW 11] The number of free blocks below which the full signal to class 1 - * is asserted */ -#define BRB1_REG_FULL_1_XOFF_THRESHOLD_0 0x601d8 -#define BRB1_REG_FULL_1_XOFF_THRESHOLD_1 0x60238 -/* [RW 11] The number of free blocks above which the full signal to class 1 - * is de-asserted */ -#define BRB1_REG_FULL_1_XON_THRESHOLD_0 0x601dc -#define BRB1_REG_FULL_1_XON_THRESHOLD_1 0x6023c -/* [RW 11] The number of free blocks below which the full signal to the LB - * port is asserted */ -#define BRB1_REG_FULL_LB_XOFF_THRESHOLD 0x601e0 -/* [RW 10] The number of free blocks above which the full signal to the LB - * port is de-asserted */ -#define BRB1_REG_FULL_LB_XON_THRESHOLD 0x601e4 -/* [RW 10] The number of free blocks above which the High_llfc signal to - interface #n is de-asserted. */ -#define BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD_0 0x6014c -/* [RW 10] The number of free blocks below which the High_llfc signal to - interface #n is asserted. */ -#define BRB1_REG_HIGH_LLFC_LOW_THRESHOLD_0 0x6013c -/* [RW 11] The number of blocks guarantied for the LB port */ -#define BRB1_REG_LB_GUARANTIED 0x601ec -/* [RW 11] The hysteresis on the guarantied buffer space for the Lb port - * before signaling XON. */ -#define BRB1_REG_LB_GUARANTIED_HYST 0x60264 -/* [RW 24] LL RAM data. */ -#define BRB1_REG_LL_RAM 0x61000 -/* [RW 10] The number of free blocks above which the Low_llfc signal to - interface #n is de-asserted. */ -#define BRB1_REG_LOW_LLFC_HIGH_THRESHOLD_0 0x6016c -/* [RW 10] The number of free blocks below which the Low_llfc signal to - interface #n is asserted. */ -#define BRB1_REG_LOW_LLFC_LOW_THRESHOLD_0 0x6015c -/* [RW 11] The number of blocks guarantied for class 0 in MAC 0. The - * register is applicable only when per_class_guaranty_mode is set. */ -#define BRB1_REG_MAC_0_CLASS_0_GUARANTIED 0x60244 -/* [RW 11] The hysteresis on the guarantied buffer space for class 0 in MAC - * 1 before signaling XON. The register is applicable only when - * per_class_guaranty_mode is set. */ -#define BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST 0x60254 -/* [RW 11] The number of blocks guarantied for class 1 in MAC 0. The - * register is applicable only when per_class_guaranty_mode is set. */ -#define BRB1_REG_MAC_0_CLASS_1_GUARANTIED 0x60248 -/* [RW 11] The hysteresis on the guarantied buffer space for class 1in MAC 0 - * before signaling XON. The register is applicable only when - * per_class_guaranty_mode is set. */ -#define BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST 0x60258 -/* [RW 11] The number of blocks guarantied for class 0in MAC1.The register - * is applicable only when per_class_guaranty_mode is set. */ -#define BRB1_REG_MAC_1_CLASS_0_GUARANTIED 0x6024c -/* [RW 11] The hysteresis on the guarantied buffer space for class 0 in MAC - * 1 before signaling XON. The register is applicable only when - * per_class_guaranty_mode is set. */ -#define BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST 0x6025c -/* [RW 11] The number of blocks guarantied for class 1 in MAC 1. The - * register is applicable only when per_class_guaranty_mode is set. */ -#define BRB1_REG_MAC_1_CLASS_1_GUARANTIED 0x60250 -/* [RW 11] The hysteresis on the guarantied buffer space for class 1 in MAC - * 1 before signaling XON. The register is applicable only when - * per_class_guaranty_mode is set. */ -#define BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST 0x60260 -/* [RW 11] The number of blocks guarantied for the MAC port. The register is - * applicable only when per_class_guaranty_mode is reset. */ -#define BRB1_REG_MAC_GUARANTIED_0 0x601e8 -#define BRB1_REG_MAC_GUARANTIED_1 0x60240 -/* [R 24] The number of full blocks. */ -#define BRB1_REG_NUM_OF_FULL_BLOCKS 0x60090 -/* [ST 32] The number of cycles that the write_full signal towards MAC #0 - was asserted. */ -#define BRB1_REG_NUM_OF_FULL_CYCLES_0 0x600c8 -#define BRB1_REG_NUM_OF_FULL_CYCLES_1 0x600cc -#define BRB1_REG_NUM_OF_FULL_CYCLES_4 0x600d8 -/* [ST 32] The number of cycles that the pause signal towards MAC #0 was - asserted. */ -#define BRB1_REG_NUM_OF_PAUSE_CYCLES_0 0x600b8 -#define BRB1_REG_NUM_OF_PAUSE_CYCLES_1 0x600bc -/* [RW 10] The number of free blocks below which the pause signal to class 0 - * is asserted */ -#define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 0x601c0 -#define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 0x60220 -/* [RW 11] The number of free blocks above which the pause signal to class 0 - * is de-asserted */ -#define BRB1_REG_PAUSE_0_XON_THRESHOLD_0 0x601c4 -#define BRB1_REG_PAUSE_0_XON_THRESHOLD_1 0x60224 -/* [RW 11] The number of free blocks below which the pause signal to class 1 - * is asserted */ -#define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0 0x601c8 -#define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 0x60228 -/* [RW 11] The number of free blocks above which the pause signal to class 1 - * is de-asserted */ -#define BRB1_REG_PAUSE_1_XON_THRESHOLD_0 0x601cc -#define BRB1_REG_PAUSE_1_XON_THRESHOLD_1 0x6022c -/* [RW 10] Write client 0: De-assert pause threshold. Not Functional */ -#define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 0x60078 -#define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 0x6007c -/* [RW 10] Write client 0: Assert pause threshold. */ -#define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068 -#define BRB1_REG_PAUSE_LOW_THRESHOLD_1 0x6006c -/* [R 24] The number of full blocks occupied by port. */ -#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094 -/* [RW 1] Reset the design by software. */ -#define BRB1_REG_SOFT_RESET 0x600dc -/* [R 5] Used to read the value of the XX protection CAM occupancy counter. */ -#define CCM_REG_CAM_OCCUP 0xd0188 -/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded; - acknowledge output is deasserted; all other signals are treated as usual; - if 1 - normal activity. */ -#define CCM_REG_CCM_CFC_IFEN 0xd003c -/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is - disregarded; valid is deasserted; all other signals are treated as usual; - if 1 - normal activity. */ -#define CCM_REG_CCM_CQM_IFEN 0xd000c -/* [RW 1] If set the Q index; received from the QM is inserted to event ID. - Otherwise 0 is inserted. */ -#define CCM_REG_CCM_CQM_USE_Q 0xd00c0 -/* [RW 11] Interrupt mask register #0 read/write */ -#define CCM_REG_CCM_INT_MASK 0xd01e4 -/* [R 11] Interrupt register #0 read */ -#define CCM_REG_CCM_INT_STS 0xd01d8 -/* [RW 27] Parity mask register #0 read/write */ -#define CCM_REG_CCM_PRTY_MASK 0xd01f4 -/* [R 27] Parity register #0 read */ -#define CCM_REG_CCM_PRTY_STS 0xd01e8 -/* [RC 27] Parity register #0 read clear */ -#define CCM_REG_CCM_PRTY_STS_CLR 0xd01ec -/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS - REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). - Is used to determine the number of the AG context REG-pairs written back; - when the input message Reg1WbFlg isn't set. */ -#define CCM_REG_CCM_REG0_SZ 0xd00c4 -/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is - disregarded; valid is deasserted; all other signals are treated as usual; - if 1 - normal activity. */ -#define CCM_REG_CCM_STORM0_IFEN 0xd0004 -/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is - disregarded; valid is deasserted; all other signals are treated as usual; - if 1 - normal activity. */ -#define CCM_REG_CCM_STORM1_IFEN 0xd0008 -/* [RW 1] CDU AG read Interface enable. If 0 - the request input is - disregarded; valid output is deasserted; all other signals are treated as - usual; if 1 - normal activity. */ -#define CCM_REG_CDU_AG_RD_IFEN 0xd0030 -/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input - are disregarded; all other signals are treated as usual; if 1 - normal - activity. */ -#define CCM_REG_CDU_AG_WR_IFEN 0xd002c -/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is - disregarded; valid output is deasserted; all other signals are treated as - usual; if 1 - normal activity. */ -#define CCM_REG_CDU_SM_RD_IFEN 0xd0038 -/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid - input is disregarded; all other signals are treated as usual; if 1 - - normal activity. */ -#define CCM_REG_CDU_SM_WR_IFEN 0xd0034 -/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes - the initial credit value; read returns the current value of the credit - counter. Must be initialized to 1 at start-up. */ -#define CCM_REG_CFC_INIT_CRD 0xd0204 -/* [RW 2] Auxiliary counter flag Q number 1. */ -#define CCM_REG_CNT_AUX1_Q 0xd00c8 -/* [RW 2] Auxiliary counter flag Q number 2. */ -#define CCM_REG_CNT_AUX2_Q 0xd00cc -/* [RW 28] The CM header value for QM request (primary). */ -#define CCM_REG_CQM_CCM_HDR_P 0xd008c -/* [RW 28] The CM header value for QM request (secondary). */ -#define CCM_REG_CQM_CCM_HDR_S 0xd0090 -/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded; - acknowledge output is deasserted; all other signals are treated as usual; - if 1 - normal activity. */ -#define CCM_REG_CQM_CCM_IFEN 0xd0014 -/* [RW 6] QM output initial credit. Max credit available - 32. Write writes - the initial credit value; read returns the current value of the credit - counter. Must be initialized to 32 at start-up. */ -#define CCM_REG_CQM_INIT_CRD 0xd020c -/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0 - stands for weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define CCM_REG_CQM_P_WEIGHT 0xd00b8 -/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0 - stands for weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define CCM_REG_CQM_S_WEIGHT 0xd00bc -/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded; - acknowledge output is deasserted; all other signals are treated as usual; - if 1 - normal activity. */ -#define CCM_REG_CSDM_IFEN 0xd0018 -/* [RC 1] Set when the message length mismatch (relative to last indication) - at the SDM interface is detected. */ -#define CCM_REG_CSDM_LENGTH_MIS 0xd0170 -/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for - weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define CCM_REG_CSDM_WEIGHT 0xd00b4 -/* [RW 28] The CM header for QM formatting in case of an error in the QM - inputs. */ -#define CCM_REG_ERR_CCM_HDR 0xd0094 -/* [RW 8] The Event ID in case the input message ErrorFlg is set. */ -#define CCM_REG_ERR_EVNT_ID 0xd0098 -/* [RW 8] FIC0 output initial credit. Max credit available - 255. Write - writes the initial credit value; read returns the current value of the - credit counter. Must be initialized to 64 at start-up. */ -#define CCM_REG_FIC0_INIT_CRD 0xd0210 -/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write - writes the initial credit value; read returns the current value of the - credit counter. Must be initialized to 64 at start-up. */ -#define CCM_REG_FIC1_INIT_CRD 0xd0214 -/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1 - - strict priority defined by ~ccm_registers_gr_ag_pr.gr_ag_pr; - ~ccm_registers_gr_ld0_pr.gr_ld0_pr and - ~ccm_registers_gr_ld1_pr.gr_ld1_pr. Groups are according to channels and - outputs to STORM: aggregation; load FIC0; load FIC1 and store. */ -#define CCM_REG_GR_ARB_TYPE 0xd015c -/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the - highest priority is 3. It is supposed; that the Store channel priority is - the compliment to 4 of the rest priorities - Aggregation channel; Load - (FIC0) channel and Load (FIC1). */ -#define CCM_REG_GR_LD0_PR 0xd0164 -/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the - highest priority is 3. It is supposed; that the Store channel priority is - the compliment to 4 of the rest priorities - Aggregation channel; Load - (FIC0) channel and Load (FIC1). */ -#define CCM_REG_GR_LD1_PR 0xd0168 -/* [RW 2] General flags index. */ -#define CCM_REG_INV_DONE_Q 0xd0108 -/* [RW 4] The number of double REG-pairs(128 bits); loaded from the STORM - context and sent to STORM; for a specific connection type. The double - REG-pairs are used in order to align to STORM context row size of 128 - bits. The offset of these data in the STORM context is always 0. Index - _(0..15) stands for the connection type (one of 16). */ -#define CCM_REG_N_SM_CTX_LD_0 0xd004c -#define CCM_REG_N_SM_CTX_LD_1 0xd0050 -#define CCM_REG_N_SM_CTX_LD_2 0xd0054 -#define CCM_REG_N_SM_CTX_LD_3 0xd0058 -#define CCM_REG_N_SM_CTX_LD_4 0xd005c -/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded; - acknowledge output is deasserted; all other signals are treated as usual; - if 1 - normal activity. */ -#define CCM_REG_PBF_IFEN 0xd0028 -/* [RC 1] Set when the message length mismatch (relative to last indication) - at the pbf interface is detected. */ -#define CCM_REG_PBF_LENGTH_MIS 0xd0180 -/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for - weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define CCM_REG_PBF_WEIGHT 0xd00ac -#define CCM_REG_PHYS_QNUM1_0 0xd0134 -#define CCM_REG_PHYS_QNUM1_1 0xd0138 -#define CCM_REG_PHYS_QNUM2_0 0xd013c -#define CCM_REG_PHYS_QNUM2_1 0xd0140 -#define CCM_REG_PHYS_QNUM3_0 0xd0144 -#define CCM_REG_PHYS_QNUM3_1 0xd0148 -#define CCM_REG_QOS_PHYS_QNUM0_0 0xd0114 -#define CCM_REG_QOS_PHYS_QNUM0_1 0xd0118 -#define CCM_REG_QOS_PHYS_QNUM1_0 0xd011c -#define CCM_REG_QOS_PHYS_QNUM1_1 0xd0120 -#define CCM_REG_QOS_PHYS_QNUM2_0 0xd0124 -#define CCM_REG_QOS_PHYS_QNUM2_1 0xd0128 -#define CCM_REG_QOS_PHYS_QNUM3_0 0xd012c -#define CCM_REG_QOS_PHYS_QNUM3_1 0xd0130 -/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is - disregarded; acknowledge output is deasserted; all other signals are - treated as usual; if 1 - normal activity. */ -#define CCM_REG_STORM_CCM_IFEN 0xd0010 -/* [RC 1] Set when the message length mismatch (relative to last indication) - at the STORM interface is detected. */ -#define CCM_REG_STORM_LENGTH_MIS 0xd016c -/* [RW 3] The weight of the STORM input in the WRR (Weighted Round robin) - mechanism. 0 stands for weight 8 (the most prioritised); 1 stands for - weight 1(least prioritised); 2 stands for weight 2 (more prioritised); - tc. */ -#define CCM_REG_STORM_WEIGHT 0xd009c -/* [RW 1] Input tsem Interface enable. If 0 - the valid input is - disregarded; acknowledge output is deasserted; all other signals are - treated as usual; if 1 - normal activity. */ -#define CCM_REG_TSEM_IFEN 0xd001c -/* [RC 1] Set when the message length mismatch (relative to last indication) - at the tsem interface is detected. */ -#define CCM_REG_TSEM_LENGTH_MIS 0xd0174 -/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for - weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define CCM_REG_TSEM_WEIGHT 0xd00a0 -/* [RW 1] Input usem Interface enable. If 0 - the valid input is - disregarded; acknowledge output is deasserted; all other signals are - treated as usual; if 1 - normal activity. */ -#define CCM_REG_USEM_IFEN 0xd0024 -/* [RC 1] Set when message length mismatch (relative to last indication) at - the usem interface is detected. */ -#define CCM_REG_USEM_LENGTH_MIS 0xd017c -/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for - weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define CCM_REG_USEM_WEIGHT 0xd00a8 -/* [RW 1] Input xsem Interface enable. If 0 - the valid input is - disregarded; acknowledge output is deasserted; all other signals are - treated as usual; if 1 - normal activity. */ -#define CCM_REG_XSEM_IFEN 0xd0020 -/* [RC 1] Set when the message length mismatch (relative to last indication) - at the xsem interface is detected. */ -#define CCM_REG_XSEM_LENGTH_MIS 0xd0178 -/* [RW 3] The weight of the input xsem in the WRR mechanism. 0 stands for - weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define CCM_REG_XSEM_WEIGHT 0xd00a4 -/* [RW 19] Indirect access to the descriptor table of the XX protection - mechanism. The fields are: [5:0] - message length; [12:6] - message - pointer; 18:13] - next pointer. */ -#define CCM_REG_XX_DESCR_TABLE 0xd0300 -#define CCM_REG_XX_DESCR_TABLE_SIZE 24 -/* [R 7] Used to read the value of XX protection Free counter. */ -#define CCM_REG_XX_FREE 0xd0184 -/* [RW 6] Initial value for the credit counter; responsible for fulfilling - of the Input Stage XX protection buffer by the XX protection pending - messages. Max credit available - 127. Write writes the initial credit - value; read returns the current value of the credit counter. Must be - initialized to maximum XX protected message size - 2 at start-up. */ -#define CCM_REG_XX_INIT_CRD 0xd0220 -/* [RW 7] The maximum number of pending messages; which may be stored in XX - protection. At read the ~ccm_registers_xx_free.xx_free counter is read. - At write comprises the start value of the ~ccm_registers_xx_free.xx_free - counter. */ -#define CCM_REG_XX_MSG_NUM 0xd0224 -/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */ -#define CCM_REG_XX_OVFL_EVNT_ID 0xd0044 -/* [RW 18] Indirect access to the XX table of the XX protection mechanism. - The fields are: [5:0] - tail pointer; 11:6] - Link List size; 17:12] - - header pointer. */ -#define CCM_REG_XX_TABLE 0xd0280 -#define CDU_REG_CDU_CHK_MASK0 0x101000 -#define CDU_REG_CDU_CHK_MASK1 0x101004 -#define CDU_REG_CDU_CONTROL0 0x101008 -#define CDU_REG_CDU_DEBUG 0x101010 -#define CDU_REG_CDU_GLOBAL_PARAMS 0x101020 -/* [RW 7] Interrupt mask register #0 read/write */ -#define CDU_REG_CDU_INT_MASK 0x10103c -/* [R 7] Interrupt register #0 read */ -#define CDU_REG_CDU_INT_STS 0x101030 -/* [RW 5] Parity mask register #0 read/write */ -#define CDU_REG_CDU_PRTY_MASK 0x10104c -/* [R 5] Parity register #0 read */ -#define CDU_REG_CDU_PRTY_STS 0x101040 -/* [RC 5] Parity register #0 read clear */ -#define CDU_REG_CDU_PRTY_STS_CLR 0x101044 -/* [RC 32] logging of error data in case of a CDU load error: - {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error; - ype_error; ctual_active; ctual_compressed_context}; */ -#define CDU_REG_ERROR_DATA 0x101014 -/* [WB 216] L1TT ram access. each entry has the following format : - {mrege_regions[7:0]; ffset12[5:0]...offset0[5:0]; - ength12[5:0]...length0[5:0]; d12[3:0]...id0[3:0]} */ -#define CDU_REG_L1TT 0x101800 -/* [WB 24] MATT ram access. each entry has the following - format:{RegionLength[11:0]; egionOffset[11:0]} */ -#define CDU_REG_MATT 0x101100 -/* [RW 1] when this bit is set the CDU operates in e1hmf mode */ -#define CDU_REG_MF_MODE 0x101050 -/* [R 1] indication the initializing the activity counter by the hardware - was done. */ -#define CFC_REG_AC_INIT_DONE 0x104078 -/* [RW 13] activity counter ram access */ -#define CFC_REG_ACTIVITY_COUNTER 0x104400 -#define CFC_REG_ACTIVITY_COUNTER_SIZE 256 -/* [R 1] indication the initializing the cams by the hardware was done. */ -#define CFC_REG_CAM_INIT_DONE 0x10407c -/* [RW 2] Interrupt mask register #0 read/write */ -#define CFC_REG_CFC_INT_MASK 0x104108 -/* [R 2] Interrupt register #0 read */ -#define CFC_REG_CFC_INT_STS 0x1040fc -/* [RC 2] Interrupt register #0 read clear */ -#define CFC_REG_CFC_INT_STS_CLR 0x104100 -/* [RW 4] Parity mask register #0 read/write */ -#define CFC_REG_CFC_PRTY_MASK 0x104118 -/* [R 4] Parity register #0 read */ -#define CFC_REG_CFC_PRTY_STS 0x10410c -/* [RC 4] Parity register #0 read clear */ -#define CFC_REG_CFC_PRTY_STS_CLR 0x104110 -/* [RW 21] CID cam access (21:1 - Data; alid - 0) */ -#define CFC_REG_CID_CAM 0x104800 -#define CFC_REG_CONTROL0 0x104028 -#define CFC_REG_DEBUG0 0x104050 -/* [RW 14] indicates per error (in #cfc_registers_cfc_error_vector.cfc_error - vector) whether the cfc should be disabled upon it */ -#define CFC_REG_DISABLE_ON_ERROR 0x104044 -/* [RC 14] CFC error vector. when the CFC detects an internal error it will - set one of these bits. the bit description can be found in CFC - specifications */ -#define CFC_REG_ERROR_VECTOR 0x10403c -/* [WB 93] LCID info ram access */ -#define CFC_REG_INFO_RAM 0x105000 -#define CFC_REG_INFO_RAM_SIZE 1024 -#define CFC_REG_INIT_REG 0x10404c -#define CFC_REG_INTERFACES 0x104058 -/* [RW 24] {weight_load_client7[2:0] to weight_load_client0[2:0]}. this - field allows changing the priorities of the weighted-round-robin arbiter - which selects which CFC load client should be served next */ -#define CFC_REG_LCREQ_WEIGHTS 0x104084 -/* [RW 16] Link List ram access; data = {prev_lcid; ext_lcid} */ -#define CFC_REG_LINK_LIST 0x104c00 -#define CFC_REG_LINK_LIST_SIZE 256 -/* [R 1] indication the initializing the link list by the hardware was done. */ -#define CFC_REG_LL_INIT_DONE 0x104074 -/* [R 9] Number of allocated LCIDs which are at empty state */ -#define CFC_REG_NUM_LCIDS_ALLOC 0x104020 -/* [R 9] Number of Arriving LCIDs in Link List Block */ -#define CFC_REG_NUM_LCIDS_ARRIVING 0x104004 -#define CFC_REG_NUM_LCIDS_INSIDE_PF 0x104120 -/* [R 9] Number of Leaving LCIDs in Link List Block */ -#define CFC_REG_NUM_LCIDS_LEAVING 0x104018 -#define CFC_REG_WEAK_ENABLE_PF 0x104124 -/* [RW 8] The event id for aggregated interrupt 0 */ -#define CSDM_REG_AGG_INT_EVENT_0 0xc2038 -#define CSDM_REG_AGG_INT_EVENT_10 0xc2060 -#define CSDM_REG_AGG_INT_EVENT_11 0xc2064 -#define CSDM_REG_AGG_INT_EVENT_12 0xc2068 -#define CSDM_REG_AGG_INT_EVENT_13 0xc206c -#define CSDM_REG_AGG_INT_EVENT_14 0xc2070 -#define CSDM_REG_AGG_INT_EVENT_15 0xc2074 -#define CSDM_REG_AGG_INT_EVENT_16 0xc2078 -#define CSDM_REG_AGG_INT_EVENT_2 0xc2040 -#define CSDM_REG_AGG_INT_EVENT_3 0xc2044 -#define CSDM_REG_AGG_INT_EVENT_4 0xc2048 -#define CSDM_REG_AGG_INT_EVENT_5 0xc204c -#define CSDM_REG_AGG_INT_EVENT_6 0xc2050 -#define CSDM_REG_AGG_INT_EVENT_7 0xc2054 -#define CSDM_REG_AGG_INT_EVENT_8 0xc2058 -#define CSDM_REG_AGG_INT_EVENT_9 0xc205c -/* [RW 1] For each aggregated interrupt index whether the mode is normal (0) - or auto-mask-mode (1) */ -#define CSDM_REG_AGG_INT_MODE_10 0xc21e0 -#define CSDM_REG_AGG_INT_MODE_11 0xc21e4 -#define CSDM_REG_AGG_INT_MODE_12 0xc21e8 -#define CSDM_REG_AGG_INT_MODE_13 0xc21ec -#define CSDM_REG_AGG_INT_MODE_14 0xc21f0 -#define CSDM_REG_AGG_INT_MODE_15 0xc21f4 -#define CSDM_REG_AGG_INT_MODE_16 0xc21f8 -#define CSDM_REG_AGG_INT_MODE_6 0xc21d0 -#define CSDM_REG_AGG_INT_MODE_7 0xc21d4 -#define CSDM_REG_AGG_INT_MODE_8 0xc21d8 -#define CSDM_REG_AGG_INT_MODE_9 0xc21dc -/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */ -#define CSDM_REG_CFC_RSP_START_ADDR 0xc2008 -/* [RW 16] The maximum value of the completion counter #0 */ -#define CSDM_REG_CMP_COUNTER_MAX0 0xc201c -/* [RW 16] The maximum value of the completion counter #1 */ -#define CSDM_REG_CMP_COUNTER_MAX1 0xc2020 -/* [RW 16] The maximum value of the completion counter #2 */ -#define CSDM_REG_CMP_COUNTER_MAX2 0xc2024 -/* [RW 16] The maximum value of the completion counter #3 */ -#define CSDM_REG_CMP_COUNTER_MAX3 0xc2028 -/* [RW 13] The start address in the internal RAM for the completion - counters. */ -#define CSDM_REG_CMP_COUNTER_START_ADDR 0xc200c -/* [RW 32] Interrupt mask register #0 read/write */ -#define CSDM_REG_CSDM_INT_MASK_0 0xc229c -#define CSDM_REG_CSDM_INT_MASK_1 0xc22ac -/* [R 32] Interrupt register #0 read */ -#define CSDM_REG_CSDM_INT_STS_0 0xc2290 -#define CSDM_REG_CSDM_INT_STS_1 0xc22a0 -/* [RW 11] Parity mask register #0 read/write */ -#define CSDM_REG_CSDM_PRTY_MASK 0xc22bc -/* [R 11] Parity register #0 read */ -#define CSDM_REG_CSDM_PRTY_STS 0xc22b0 -/* [RC 11] Parity register #0 read clear */ -#define CSDM_REG_CSDM_PRTY_STS_CLR 0xc22b4 -#define CSDM_REG_ENABLE_IN1 0xc2238 -#define CSDM_REG_ENABLE_IN2 0xc223c -#define CSDM_REG_ENABLE_OUT1 0xc2240 -#define CSDM_REG_ENABLE_OUT2 0xc2244 -/* [RW 4] The initial number of messages that can be sent to the pxp control - interface without receiving any ACK. */ -#define CSDM_REG_INIT_CREDIT_PXP_CTRL 0xc24bc -/* [ST 32] The number of ACK after placement messages received */ -#define CSDM_REG_NUM_OF_ACK_AFTER_PLACE 0xc227c -/* [ST 32] The number of packet end messages received from the parser */ -#define CSDM_REG_NUM_OF_PKT_END_MSG 0xc2274 -/* [ST 32] The number of requests received from the pxp async if */ -#define CSDM_REG_NUM_OF_PXP_ASYNC_REQ 0xc2278 -/* [ST 32] The number of commands received in queue 0 */ -#define CSDM_REG_NUM_OF_Q0_CMD 0xc2248 -/* [ST 32] The number of commands received in queue 10 */ -#define CSDM_REG_NUM_OF_Q10_CMD 0xc226c -/* [ST 32] The number of commands received in queue 11 */ -#define CSDM_REG_NUM_OF_Q11_CMD 0xc2270 -/* [ST 32] The number of commands received in queue 1 */ -#define CSDM_REG_NUM_OF_Q1_CMD 0xc224c -/* [ST 32] The number of commands received in queue 3 */ -#define CSDM_REG_NUM_OF_Q3_CMD 0xc2250 -/* [ST 32] The number of commands received in queue 4 */ -#define CSDM_REG_NUM_OF_Q4_CMD 0xc2254 -/* [ST 32] The number of commands received in queue 5 */ -#define CSDM_REG_NUM_OF_Q5_CMD 0xc2258 -/* [ST 32] The number of commands received in queue 6 */ -#define CSDM_REG_NUM_OF_Q6_CMD 0xc225c -/* [ST 32] The number of commands received in queue 7 */ -#define CSDM_REG_NUM_OF_Q7_CMD 0xc2260 -/* [ST 32] The number of commands received in queue 8 */ -#define CSDM_REG_NUM_OF_Q8_CMD 0xc2264 -/* [ST 32] The number of commands received in queue 9 */ -#define CSDM_REG_NUM_OF_Q9_CMD 0xc2268 -/* [RW 13] The start address in the internal RAM for queue counters */ -#define CSDM_REG_Q_COUNTER_START_ADDR 0xc2010 -/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */ -#define CSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0xc2548 -/* [R 1] parser fifo empty in sdm_sync block */ -#define CSDM_REG_SYNC_PARSER_EMPTY 0xc2550 -/* [R 1] parser serial fifo empty in sdm_sync block */ -#define CSDM_REG_SYNC_SYNC_EMPTY 0xc2558 -/* [RW 32] Tick for timer counter. Applicable only when - ~csdm_registers_timer_tick_enable.timer_tick_enable =1 */ -#define CSDM_REG_TIMER_TICK 0xc2000 -/* [RW 5] The number of time_slots in the arbitration cycle */ -#define CSEM_REG_ARB_CYCLE_SIZE 0x200034 -/* [RW 3] The source that is associated with arbitration element 0. Source - decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- - sleeping thread with priority 1; 4- sleeping thread with priority 2 */ -#define CSEM_REG_ARB_ELEMENT0 0x200020 -/* [RW 3] The source that is associated with arbitration element 1. Source - decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- - sleeping thread with priority 1; 4- sleeping thread with priority 2. - Could not be equal to register ~csem_registers_arb_element0.arb_element0 */ -#define CSEM_REG_ARB_ELEMENT1 0x200024 -/* [RW 3] The source that is associated with arbitration element 2. Source - decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- - sleeping thread with priority 1; 4- sleeping thread with priority 2. - Could not be equal to register ~csem_registers_arb_element0.arb_element0 - and ~csem_registers_arb_element1.arb_element1 */ -#define CSEM_REG_ARB_ELEMENT2 0x200028 -/* [RW 3] The source that is associated with arbitration element 3. Source - decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- - sleeping thread with priority 1; 4- sleeping thread with priority 2.Could - not be equal to register ~csem_registers_arb_element0.arb_element0 and - ~csem_registers_arb_element1.arb_element1 and - ~csem_registers_arb_element2.arb_element2 */ -#define CSEM_REG_ARB_ELEMENT3 0x20002c -/* [RW 3] The source that is associated with arbitration element 4. Source - decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- - sleeping thread with priority 1; 4- sleeping thread with priority 2. - Could not be equal to register ~csem_registers_arb_element0.arb_element0 - and ~csem_registers_arb_element1.arb_element1 and - ~csem_registers_arb_element2.arb_element2 and - ~csem_registers_arb_element3.arb_element3 */ -#define CSEM_REG_ARB_ELEMENT4 0x200030 -/* [RW 32] Interrupt mask register #0 read/write */ -#define CSEM_REG_CSEM_INT_MASK_0 0x200110 -#define CSEM_REG_CSEM_INT_MASK_1 0x200120 -/* [R 32] Interrupt register #0 read */ -#define CSEM_REG_CSEM_INT_STS_0 0x200104 -#define CSEM_REG_CSEM_INT_STS_1 0x200114 -/* [RW 32] Parity mask register #0 read/write */ -#define CSEM_REG_CSEM_PRTY_MASK_0 0x200130 -#define CSEM_REG_CSEM_PRTY_MASK_1 0x200140 -/* [R 32] Parity register #0 read */ -#define CSEM_REG_CSEM_PRTY_STS_0 0x200124 -#define CSEM_REG_CSEM_PRTY_STS_1 0x200134 -/* [RC 32] Parity register #0 read clear */ -#define CSEM_REG_CSEM_PRTY_STS_CLR_0 0x200128 -#define CSEM_REG_CSEM_PRTY_STS_CLR_1 0x200138 -#define CSEM_REG_ENABLE_IN 0x2000a4 -#define CSEM_REG_ENABLE_OUT 0x2000a8 -/* [RW 32] This address space contains all registers and memories that are - placed in SEM_FAST block. The SEM_FAST registers are described in - appendix B. In order to access the sem_fast registers the base address - ~fast_memory.fast_memory should be added to eachsem_fast register offset. */ -#define CSEM_REG_FAST_MEMORY 0x220000 -/* [RW 1] Disables input messages from FIC0 May be updated during run_time - by the microcode */ -#define CSEM_REG_FIC0_DISABLE 0x200224 -/* [RW 1] Disables input messages from FIC1 May be updated during run_time - by the microcode */ -#define CSEM_REG_FIC1_DISABLE 0x200234 -/* [RW 15] Interrupt table Read and write access to it is not possible in - the middle of the work */ -#define CSEM_REG_INT_TABLE 0x200400 -/* [ST 24] Statistics register. The number of messages that entered through - FIC0 */ -#define CSEM_REG_MSG_NUM_FIC0 0x200000 -/* [ST 24] Statistics register. The number of messages that entered through - FIC1 */ -#define CSEM_REG_MSG_NUM_FIC1 0x200004 -/* [ST 24] Statistics register. The number of messages that were sent to - FOC0 */ -#define CSEM_REG_MSG_NUM_FOC0 0x200008 -/* [ST 24] Statistics register. The number of messages that were sent to - FOC1 */ -#define CSEM_REG_MSG_NUM_FOC1 0x20000c -/* [ST 24] Statistics register. The number of messages that were sent to - FOC2 */ -#define CSEM_REG_MSG_NUM_FOC2 0x200010 -/* [ST 24] Statistics register. The number of messages that were sent to - FOC3 */ -#define CSEM_REG_MSG_NUM_FOC3 0x200014 -/* [RW 1] Disables input messages from the passive buffer May be updated - during run_time by the microcode */ -#define CSEM_REG_PAS_DISABLE 0x20024c -/* [WB 128] Debug only. Passive buffer memory */ -#define CSEM_REG_PASSIVE_BUFFER 0x202000 -/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */ -#define CSEM_REG_PRAM 0x240000 -/* [R 16] Valid sleeping threads indication have bit per thread */ -#define CSEM_REG_SLEEP_THREADS_VALID 0x20026c -/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */ -#define CSEM_REG_SLOW_EXT_STORE_EMPTY 0x2002a0 -/* [RW 16] List of free threads . There is a bit per thread. */ -#define CSEM_REG_THREADS_LIST 0x2002e4 -/* [RW 3] The arbitration scheme of time_slot 0 */ -#define CSEM_REG_TS_0_AS 0x200038 -/* [RW 3] The arbitration scheme of time_slot 10 */ -#define CSEM_REG_TS_10_AS 0x200060 -/* [RW 3] The arbitration scheme of time_slot 11 */ -#define CSEM_REG_TS_11_AS 0x200064 -/* [RW 3] The arbitration scheme of time_slot 12 */ -#define CSEM_REG_TS_12_AS 0x200068 -/* [RW 3] The arbitration scheme of time_slot 13 */ -#define CSEM_REG_TS_13_AS 0x20006c -/* [RW 3] The arbitration scheme of time_slot 14 */ -#define CSEM_REG_TS_14_AS 0x200070 -/* [RW 3] The arbitration scheme of time_slot 15 */ -#define CSEM_REG_TS_15_AS 0x200074 -/* [RW 3] The arbitration scheme of time_slot 16 */ -#define CSEM_REG_TS_16_AS 0x200078 -/* [RW 3] The arbitration scheme of time_slot 17 */ -#define CSEM_REG_TS_17_AS 0x20007c -/* [RW 3] The arbitration scheme of time_slot 18 */ -#define CSEM_REG_TS_18_AS 0x200080 -/* [RW 3] The arbitration scheme of time_slot 1 */ -#define CSEM_REG_TS_1_AS 0x20003c -/* [RW 3] The arbitration scheme of time_slot 2 */ -#define CSEM_REG_TS_2_AS 0x200040 -/* [RW 3] The arbitration scheme of time_slot 3 */ -#define CSEM_REG_TS_3_AS 0x200044 -/* [RW 3] The arbitration scheme of time_slot 4 */ -#define CSEM_REG_TS_4_AS 0x200048 -/* [RW 3] The arbitration scheme of time_slot 5 */ -#define CSEM_REG_TS_5_AS 0x20004c -/* [RW 3] The arbitration scheme of time_slot 6 */ -#define CSEM_REG_TS_6_AS 0x200050 -/* [RW 3] The arbitration scheme of time_slot 7 */ -#define CSEM_REG_TS_7_AS 0x200054 -/* [RW 3] The arbitration scheme of time_slot 8 */ -#define CSEM_REG_TS_8_AS 0x200058 -/* [RW 3] The arbitration scheme of time_slot 9 */ -#define CSEM_REG_TS_9_AS 0x20005c -/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64 - * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */ -#define CSEM_REG_VFPF_ERR_NUM 0x200380 -/* [RW 1] Parity mask register #0 read/write */ -#define DBG_REG_DBG_PRTY_MASK 0xc0a8 -/* [R 1] Parity register #0 read */ -#define DBG_REG_DBG_PRTY_STS 0xc09c -/* [RC 1] Parity register #0 read clear */ -#define DBG_REG_DBG_PRTY_STS_CLR 0xc0a0 -/* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The - * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0; - * 4.Completion function=0; 5.Error handling=0 */ -#define DMAE_REG_BACKWARD_COMP_EN 0x10207c -/* [RW 32] Commands memory. The address to command X; row Y is to calculated - as 14*X+Y. */ -#define DMAE_REG_CMD_MEM 0x102400 -#define DMAE_REG_CMD_MEM_SIZE 224 -/* [RW 1] If 0 - the CRC-16c initial value is all zeroes; if 1 - the CRC-16c - initial value is all ones. */ -#define DMAE_REG_CRC16C_INIT 0x10201c -/* [RW 1] If 0 - the CRC-16 T10 initial value is all zeroes; if 1 - the - CRC-16 T10 initial value is all ones. */ -#define DMAE_REG_CRC16T10_INIT 0x102020 -/* [RW 2] Interrupt mask register #0 read/write */ -#define DMAE_REG_DMAE_INT_MASK 0x102054 -/* [RW 4] Parity mask register #0 read/write */ -#define DMAE_REG_DMAE_PRTY_MASK 0x102064 -/* [R 4] Parity register #0 read */ -#define DMAE_REG_DMAE_PRTY_STS 0x102058 -/* [RC 4] Parity register #0 read clear */ -#define DMAE_REG_DMAE_PRTY_STS_CLR 0x10205c -/* [RW 1] Command 0 go. */ -#define DMAE_REG_GO_C0 0x102080 -/* [RW 1] Command 1 go. */ -#define DMAE_REG_GO_C1 0x102084 -/* [RW 1] Command 10 go. */ -#define DMAE_REG_GO_C10 0x102088 -/* [RW 1] Command 11 go. */ -#define DMAE_REG_GO_C11 0x10208c -/* [RW 1] Command 12 go. */ -#define DMAE_REG_GO_C12 0x102090 -/* [RW 1] Command 13 go. */ -#define DMAE_REG_GO_C13 0x102094 -/* [RW 1] Command 14 go. */ -#define DMAE_REG_GO_C14 0x102098 -/* [RW 1] Command 15 go. */ -#define DMAE_REG_GO_C15 0x10209c -/* [RW 1] Command 2 go. */ -#define DMAE_REG_GO_C2 0x1020a0 -/* [RW 1] Command 3 go. */ -#define DMAE_REG_GO_C3 0x1020a4 -/* [RW 1] Command 4 go. */ -#define DMAE_REG_GO_C4 0x1020a8 -/* [RW 1] Command 5 go. */ -#define DMAE_REG_GO_C5 0x1020ac -/* [RW 1] Command 6 go. */ -#define DMAE_REG_GO_C6 0x1020b0 -/* [RW 1] Command 7 go. */ -#define DMAE_REG_GO_C7 0x1020b4 -/* [RW 1] Command 8 go. */ -#define DMAE_REG_GO_C8 0x1020b8 -/* [RW 1] Command 9 go. */ -#define DMAE_REG_GO_C9 0x1020bc -/* [RW 1] DMAE GRC Interface (Target; aster) enable. If 0 - the acknowledge - input is disregarded; valid is deasserted; all other signals are treated - as usual; if 1 - normal activity. */ -#define DMAE_REG_GRC_IFEN 0x102008 -/* [RW 1] DMAE PCI Interface (Request; ead; rite) enable. If 0 - the - acknowledge input is disregarded; valid is deasserted; full is asserted; - all other signals are treated as usual; if 1 - normal activity. */ -#define DMAE_REG_PCI_IFEN 0x102004 -/* [RW 4] DMAE- PCI Request Interface initial credit. Write writes the - initial value to the credit counter; related to the address. Read returns - the current value of the counter. */ -#define DMAE_REG_PXP_REQ_INIT_CRD 0x1020c0 -/* [RW 8] Aggregation command. */ -#define DORQ_REG_AGG_CMD0 0x170060 -/* [RW 8] Aggregation command. */ -#define DORQ_REG_AGG_CMD1 0x170064 -/* [RW 8] Aggregation command. */ -#define DORQ_REG_AGG_CMD2 0x170068 -/* [RW 8] Aggregation command. */ -#define DORQ_REG_AGG_CMD3 0x17006c -/* [RW 28] UCM Header. */ -#define DORQ_REG_CMHEAD_RX 0x170050 -/* [RW 32] Doorbell address for RBC doorbells (function 0). */ -#define DORQ_REG_DB_ADDR0 0x17008c -/* [RW 5] Interrupt mask register #0 read/write */ -#define DORQ_REG_DORQ_INT_MASK 0x170180 -/* [R 5] Interrupt register #0 read */ -#define DORQ_REG_DORQ_INT_STS 0x170174 -/* [RC 5] Interrupt register #0 read clear */ -#define DORQ_REG_DORQ_INT_STS_CLR 0x170178 -/* [RW 2] Parity mask register #0 read/write */ -#define DORQ_REG_DORQ_PRTY_MASK 0x170190 -/* [R 2] Parity register #0 read */ -#define DORQ_REG_DORQ_PRTY_STS 0x170184 -/* [RC 2] Parity register #0 read clear */ -#define DORQ_REG_DORQ_PRTY_STS_CLR 0x170188 -/* [RW 8] The address to write the DPM CID to STORM. */ -#define DORQ_REG_DPM_CID_ADDR 0x170044 -/* [RW 5] The DPM mode CID extraction offset. */ -#define DORQ_REG_DPM_CID_OFST 0x170030 -/* [RW 12] The threshold of the DQ FIFO to send the almost full interrupt. */ -#define DORQ_REG_DQ_FIFO_AFULL_TH 0x17007c -/* [RW 12] The threshold of the DQ FIFO to send the full interrupt. */ -#define DORQ_REG_DQ_FIFO_FULL_TH 0x170078 -/* [R 13] Current value of the DQ FIFO fill level according to following - pointer. The range is 0 - 256 FIFO rows; where each row stands for the - doorbell. */ -#define DORQ_REG_DQ_FILL_LVLF 0x1700a4 -/* [R 1] DQ FIFO full status. Is set; when FIFO filling level is more or - equal to full threshold; reset on full clear. */ -#define DORQ_REG_DQ_FULL_ST 0x1700c0 -/* [RW 28] The value sent to CM header in the case of CFC load error. */ -#define DORQ_REG_ERR_CMHEAD 0x170058 -#define DORQ_REG_IF_EN 0x170004 -#define DORQ_REG_MODE_ACT 0x170008 -/* [RW 5] The normal mode CID extraction offset. */ -#define DORQ_REG_NORM_CID_OFST 0x17002c -/* [RW 28] TCM Header when only TCP context is loaded. */ -#define DORQ_REG_NORM_CMHEAD_TX 0x17004c -/* [RW 3] The number of simultaneous outstanding requests to Context Fetch - Interface. */ -#define DORQ_REG_OUTST_REQ 0x17003c -#define DORQ_REG_PF_USAGE_CNT 0x1701d0 -#define DORQ_REG_REGN 0x170038 -/* [R 4] Current value of response A counter credit. Initial credit is - configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd - register. */ -#define DORQ_REG_RSPA_CRD_CNT 0x1700ac -/* [R 4] Current value of response B counter credit. Initial credit is - configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd - register. */ -#define DORQ_REG_RSPB_CRD_CNT 0x1700b0 -/* [RW 4] The initial credit at the Doorbell Response Interface. The write - writes the same initial credit to the rspa_crd_cnt and rspb_crd_cnt. The - read reads this written value. */ -#define DORQ_REG_RSP_INIT_CRD 0x170048 -/* [RW 4] Initial activity counter value on the load request; when the - shortcut is done. */ -#define DORQ_REG_SHRT_ACT_CNT 0x170070 -/* [RW 28] TCM Header when both ULP and TCP context is loaded. */ -#define DORQ_REG_SHRT_CMHEAD 0x170054 -#define HC_CONFIG_0_REG_ATTN_BIT_EN_0 (0x1<<4) -#define HC_CONFIG_0_REG_BLOCK_DISABLE_0 (0x1<<0) -#define HC_CONFIG_0_REG_INT_LINE_EN_0 (0x1<<3) -#define HC_CONFIG_0_REG_MSI_ATTN_EN_0 (0x1<<7) -#define HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 (0x1<<2) -#define HC_CONFIG_0_REG_SINGLE_ISR_EN_0 (0x1<<1) -#define HC_CONFIG_1_REG_BLOCK_DISABLE_1 (0x1<<0) -#define HC_REG_AGG_INT_0 0x108050 -#define HC_REG_AGG_INT_1 0x108054 -#define HC_REG_ATTN_BIT 0x108120 -#define HC_REG_ATTN_IDX 0x108100 -#define HC_REG_ATTN_MSG0_ADDR_L 0x108018 -#define HC_REG_ATTN_MSG1_ADDR_L 0x108020 -#define HC_REG_ATTN_NUM_P0 0x108038 -#define HC_REG_ATTN_NUM_P1 0x10803c -#define HC_REG_COMMAND_REG 0x108180 -#define HC_REG_CONFIG_0 0x108000 -#define HC_REG_CONFIG_1 0x108004 -#define HC_REG_FUNC_NUM_P0 0x1080ac -#define HC_REG_FUNC_NUM_P1 0x1080b0 -/* [RW 3] Parity mask register #0 read/write */ -#define HC_REG_HC_PRTY_MASK 0x1080a0 -/* [R 3] Parity register #0 read */ -#define HC_REG_HC_PRTY_STS 0x108094 -/* [RC 3] Parity register #0 read clear */ -#define HC_REG_HC_PRTY_STS_CLR 0x108098 -#define HC_REG_INT_MASK 0x108108 -#define HC_REG_LEADING_EDGE_0 0x108040 -#define HC_REG_LEADING_EDGE_1 0x108048 -#define HC_REG_MAIN_MEMORY 0x108800 -#define HC_REG_MAIN_MEMORY_SIZE 152 -#define HC_REG_P0_PROD_CONS 0x108200 -#define HC_REG_P1_PROD_CONS 0x108400 -#define HC_REG_PBA_COMMAND 0x108140 -#define HC_REG_PCI_CONFIG_0 0x108010 -#define HC_REG_PCI_CONFIG_1 0x108014 -#define HC_REG_STATISTIC_COUNTERS 0x109000 -#define HC_REG_TRAILING_EDGE_0 0x108044 -#define HC_REG_TRAILING_EDGE_1 0x10804c -#define HC_REG_UC_RAM_ADDR_0 0x108028 -#define HC_REG_UC_RAM_ADDR_1 0x108030 -#define HC_REG_USTORM_ADDR_FOR_COALESCE 0x108068 -#define HC_REG_VQID_0 0x108008 -#define HC_REG_VQID_1 0x10800c -#define IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN (0x1<<1) -#define IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE (0x1<<0) -#define IGU_REG_ATTENTION_ACK_BITS 0x130108 -/* [R 4] Debug: attn_fsm */ -#define IGU_REG_ATTN_FSM 0x130054 -#define IGU_REG_ATTN_MSG_ADDR_H 0x13011c -#define IGU_REG_ATTN_MSG_ADDR_L 0x130120 -/* [R 4] Debug: [3] - attention write done message is pending (0-no pending; - * 1-pending). [2:0] = PFID. Pending means attention message was sent; but - * write done didn't receive. */ -#define IGU_REG_ATTN_WRITE_DONE_PENDING 0x130030 -#define IGU_REG_BLOCK_CONFIGURATION 0x130000 -#define IGU_REG_COMMAND_REG_32LSB_DATA 0x130124 -#define IGU_REG_COMMAND_REG_CTRL 0x13012c -/* [WB_R 32] Cleanup bit status per SB. 1 = cleanup is set. 0 = cleanup bit - * is clear. The bits in this registers are set and clear via the producer - * command. Data valid only in addresses 0-4. all the rest are zero. */ -#define IGU_REG_CSTORM_TYPE_0_SB_CLEANUP 0x130200 -/* [R 5] Debug: ctrl_fsm */ -#define IGU_REG_CTRL_FSM 0x130064 -/* [R 1] data available for error memory. If this bit is clear do not red - * from error_handling_memory. */ -#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x130130 -/* [RW 11] Parity mask register #0 read/write */ -#define IGU_REG_IGU_PRTY_MASK 0x1300a8 -/* [R 11] Parity register #0 read */ -#define IGU_REG_IGU_PRTY_STS 0x13009c -/* [RC 11] Parity register #0 read clear */ -#define IGU_REG_IGU_PRTY_STS_CLR 0x1300a0 -/* [R 4] Debug: int_handle_fsm */ -#define IGU_REG_INT_HANDLE_FSM 0x130050 -#define IGU_REG_LEADING_EDGE_LATCH 0x130134 -/* [RW 14] mapping CAM; relevant for E2 operating mode only. [0] - valid. - * [6:1] - vector number; [13:7] - FID (if VF - [13] = 0; [12:7] = VF - * number; if PF - [13] = 1; [12:10] = 0; [9:7] = PF number); */ -#define IGU_REG_MAPPING_MEMORY 0x131000 -#define IGU_REG_MAPPING_MEMORY_SIZE 136 -#define IGU_REG_PBA_STATUS_LSB 0x130138 -#define IGU_REG_PBA_STATUS_MSB 0x13013c -#define IGU_REG_PCI_PF_MSI_EN 0x130140 -#define IGU_REG_PCI_PF_MSIX_EN 0x130144 -#define IGU_REG_PCI_PF_MSIX_FUNC_MASK 0x130148 -/* [WB_R 32] Each bit represent the pending bits status for that SB. 0 = no - * pending; 1 = pending. Pendings means interrupt was asserted; and write - * done was not received. Data valid only in addresses 0-4. all the rest are - * zero. */ -#define IGU_REG_PENDING_BITS_STATUS 0x130300 -#define IGU_REG_PF_CONFIGURATION 0x130154 -/* [RW 20] producers only. E2 mode: address 0-135 match to the mapping - * memory; 136 - PF0 default prod; 137 PF1 default prod; 138 - PF2 default - * prod; 139 PF3 default prod; 140 - PF0 - ATTN prod; 141 - PF1 - ATTN prod; - * 142 - PF2 - ATTN prod; 143 - PF3 - ATTN prod; 144-147 reserved. E1.5 mode - * - In backward compatible mode; for non default SB; each even line in the - * memory holds the U producer and each odd line hold the C producer. The - * first 128 producer are for NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The - * last 20 producers are for the DSB for each PF. each PF has five segments - * (the order inside each segment is PF0; PF1; PF2; PF3) - 128-131 U prods; - * 132-135 C prods; 136-139 X prods; 140-143 T prods; 144-147 ATTN prods; */ -#define IGU_REG_PROD_CONS_MEMORY 0x132000 -/* [R 3] Debug: pxp_arb_fsm */ -#define IGU_REG_PXP_ARB_FSM 0x130068 -/* [RW 6] Write one for each bit will reset the appropriate memory. When the - * memory reset finished the appropriate bit will be clear. Bit 0 - mapping - * memory; Bit 1 - SB memory; Bit 2 - SB interrupt and mask register; Bit 3 - * - MSIX memory; Bit 4 - PBA memory; Bit 5 - statistics; */ -#define IGU_REG_RESET_MEMORIES 0x130158 -/* [R 4] Debug: sb_ctrl_fsm */ -#define IGU_REG_SB_CTRL_FSM 0x13004c -#define IGU_REG_SB_INT_BEFORE_MASK_LSB 0x13015c -#define IGU_REG_SB_INT_BEFORE_MASK_MSB 0x130160 -#define IGU_REG_SB_MASK_LSB 0x130164 -#define IGU_REG_SB_MASK_MSB 0x130168 -/* [RW 16] Number of command that were dropped without causing an interrupt - * due to: read access for WO BAR address; or write access for RO BAR - * address or any access for reserved address or PCI function error is set - * and address is not MSIX; PBA or cleanup */ -#define IGU_REG_SILENT_DROP 0x13016c -/* [RW 10] Number of MSI/MSIX/ATTN messages sent for the function: 0-63 - - * number of MSIX messages per VF; 64-67 - number of MSI/MSIX messages per - * PF; 68-71 number of ATTN messages per PF */ -#define IGU_REG_STATISTIC_NUM_MESSAGE_SENT 0x130800 -/* [RW 32] Number of cycles the timer mask masking the IGU interrupt when a - * timer mask command arrives. Value must be bigger than 100. */ -#define IGU_REG_TIMER_MASKING_VALUE 0x13003c -#define IGU_REG_TRAILING_EDGE_LATCH 0x130104 -#define IGU_REG_VF_CONFIGURATION 0x130170 -/* [WB_R 32] Each bit represent write done pending bits status for that SB - * (MSI/MSIX message was sent and write done was not received yet). 0 = - * clear; 1 = set. Data valid only in addresses 0-4. all the rest are zero. */ -#define IGU_REG_WRITE_DONE_PENDING 0x130480 -#define MCP_A_REG_MCPR_SCRATCH 0x3a0000 -#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER 0x8501c -#define MCP_REG_MCPR_GP_INPUTS 0x800c0 -#define MCP_REG_MCPR_GP_OENABLE 0x800c8 -#define MCP_REG_MCPR_GP_OUTPUTS 0x800c4 -#define MCP_REG_MCPR_IMC_COMMAND 0x85900 -#define MCP_REG_MCPR_IMC_DATAREG0 0x85920 -#define MCP_REG_MCPR_IMC_SLAVE_CONTROL 0x85904 -#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER 0x8501c -#define MCP_REG_MCPR_NVM_ACCESS_ENABLE 0x86424 -#define MCP_REG_MCPR_NVM_ADDR 0x8640c -#define MCP_REG_MCPR_NVM_CFG4 0x8642c -#define MCP_REG_MCPR_NVM_COMMAND 0x86400 -#define MCP_REG_MCPR_NVM_READ 0x86410 -#define MCP_REG_MCPR_NVM_SW_ARB 0x86420 -#define MCP_REG_MCPR_NVM_WRITE 0x86408 -#define MCP_REG_MCPR_SCRATCH 0xa0000 -#define MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK (0x1<<1) -#define MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK (0x1<<0) -/* [R 32] read first 32 bit after inversion of function 0. mapped as - follows: [0] NIG attention for function0; [1] NIG attention for - function1; [2] GPIO1 mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; - [6] GPIO1 function 1; [7] GPIO2 function 1; [8] GPIO3 function 1; [9] - GPIO4 function 1; [10] PCIE glue/PXP VPD event function0; [11] PCIE - glue/PXP VPD event function1; [12] PCIE glue/PXP Expansion ROM event0; - [13] PCIE glue/PXP Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] - MSI/X indication for mcp; [17] MSI/X indication for function 1; [18] BRB - Parity error; [19] BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw - interrupt; [22] SRC Parity error; [23] SRC Hw interrupt; [24] TSDM Parity - error; [25] TSDM Hw interrupt; [26] TCM Parity error; [27] TCM Hw - interrupt; [28] TSEMI Parity error; [29] TSEMI Hw interrupt; [30] PBF - Parity error; [31] PBF Hw interrupt; */ -#define MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 0xa42c -#define MISC_REG_AEU_AFTER_INVERT_1_FUNC_1 0xa430 -/* [R 32] read first 32 bit after inversion of mcp. mapped as follows: [0] - NIG attention for function0; [1] NIG attention for function1; [2] GPIO1 - mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1; - [7] GPIO2 function 1; [8] GPIO3 function 1; [9] GPIO4 function 1; [10] - PCIE glue/PXP VPD event function0; [11] PCIE glue/PXP VPD event - function1; [12] PCIE glue/PXP Expansion ROM event0; [13] PCIE glue/PXP - Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] MSI/X indication for - mcp; [17] MSI/X indication for function 1; [18] BRB Parity error; [19] - BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC - Parity error; [23] SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw - interrupt; [26] TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI - Parity error; [29] TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw - interrupt; */ -#define MISC_REG_AEU_AFTER_INVERT_1_MCP 0xa434 -/* [R 32] read second 32 bit after inversion of function 0. mapped as - follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM - Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw - interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity - error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw - interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] - NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; - [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw - interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM - Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI - Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM - Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw - interrupt; */ -#define MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 0xa438 -#define MISC_REG_AEU_AFTER_INVERT_2_FUNC_1 0xa43c -/* [R 32] read second 32 bit after inversion of mcp. mapped as follows: [0] - PBClient Parity error; [1] PBClient Hw interrupt; [2] QM Parity error; - [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw interrupt; - [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity error; [9] - XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw interrupt; [12] - DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] NIG Parity - error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; [17] Vaux - PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw interrupt; - [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM Parity error; - [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI Hw interrupt; - [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM Parity error; - [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw interrupt; */ -#define MISC_REG_AEU_AFTER_INVERT_2_MCP 0xa440 -/* [R 32] read third 32 bit after inversion of function 0. mapped as - follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP Parity - error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error; [5] - PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw - interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity - error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) - Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16] - pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20] - MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23] - SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW - timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 - func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General - attn1; */ -#define MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 0xa444 -#define MISC_REG_AEU_AFTER_INVERT_3_FUNC_1 0xa448 -/* [R 32] read third 32 bit after inversion of mcp. mapped as follows: [0] - CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP Parity error; [3] PXP - Hw interrupt; [4] PXPpciClockClient Parity error; [5] PXPpciClockClient - Hw interrupt; [6] CFC Parity error; [7] CFC Hw interrupt; [8] CDU Parity - error; [9] CDU Hw interrupt; [10] DMAE Parity error; [11] DMAE Hw - interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) Hw interrupt; [14] - MISC Parity error; [15] MISC Hw interrupt; [16] pxp_misc_mps_attn; [17] - Flash event; [18] SMB event; [19] MCP attn0; [20] MCP attn1; [21] SW - timers attn_1 func0; [22] SW timers attn_2 func0; [23] SW timers attn_3 - func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW timers attn_1 - func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 func1; [29] SW - timers attn_4 func1; [30] General attn0; [31] General attn1; */ -#define MISC_REG_AEU_AFTER_INVERT_3_MCP 0xa44c -/* [R 32] read fourth 32 bit after inversion of function 0. mapped as - follows: [0] General attn2; [1] General attn3; [2] General attn4; [3] - General attn5; [4] General attn6; [5] General attn7; [6] General attn8; - [7] General attn9; [8] General attn10; [9] General attn11; [10] General - attn12; [11] General attn13; [12] General attn14; [13] General attn15; - [14] General attn16; [15] General attn17; [16] General attn18; [17] - General attn19; [18] General attn20; [19] General attn21; [20] Main power - interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN - Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC - Latched timeout attention; [27] GRC Latched reserved access attention; - [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP - Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ -#define MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 0xa450 -#define MISC_REG_AEU_AFTER_INVERT_4_FUNC_1 0xa454 -/* [R 32] read fourth 32 bit after inversion of mcp. mapped as follows: [0] - General attn2; [1] General attn3; [2] General attn4; [3] General attn5; - [4] General attn6; [5] General attn7; [6] General attn8; [7] General - attn9; [8] General attn10; [9] General attn11; [10] General attn12; [11] - General attn13; [12] General attn14; [13] General attn15; [14] General - attn16; [15] General attn17; [16] General attn18; [17] General attn19; - [18] General attn20; [19] General attn21; [20] Main power interrupt; [21] - RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN Latched attn; [24] - RBCU Latched attn; [25] RBCP Latched attn; [26] GRC Latched timeout - attention; [27] GRC Latched reserved access attention; [28] MCP Latched - rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP Latched - ump_tx_parity; [31] MCP Latched scpad_parity; */ -#define MISC_REG_AEU_AFTER_INVERT_4_MCP 0xa458 -/* [R 32] Read fifth 32 bit after inversion of function 0. Mapped as - * follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC - * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6] - * CNIG attention (reserved); [7] CNIG parity (reserved); [31-8] Reserved; */ -#define MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 0xa700 -/* [W 14] write to this register results with the clear of the latched - signals; one in d0 clears RBCR latch; one in d1 clears RBCT latch; one in - d2 clears RBCN latch; one in d3 clears RBCU latch; one in d4 clears RBCP - latch; one in d5 clears GRC Latched timeout attention; one in d6 clears - GRC Latched reserved access attention; one in d7 clears Latched - rom_parity; one in d8 clears Latched ump_rx_parity; one in d9 clears - Latched ump_tx_parity; one in d10 clears Latched scpad_parity (both - ports); one in d11 clears pxpv_misc_mps_attn; one in d12 clears - pxp_misc_exp_rom_attn0; one in d13 clears pxp_misc_exp_rom_attn1; read - from this register return zero */ -#define MISC_REG_AEU_CLR_LATCH_SIGNAL 0xa45c -/* [RW 32] first 32b for enabling the output for function 0 output0. mapped - as follows: [0] NIG attention for function0; [1] NIG attention for - function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function - 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8] - GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event - function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP - Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14] - SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X - indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt; - [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23] - SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26] - TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29] - TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */ -#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0 0xa06c -#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1 0xa07c -#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2 0xa08c -#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_3 0xa09c -#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_5 0xa0bc -#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_6 0xa0cc -#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_7 0xa0dc -/* [RW 32] first 32b for enabling the output for function 1 output0. mapped - as follows: [0] NIG attention for function0; [1] NIG attention for - function1; [2] GPIO1 function 1; [3] GPIO2 function 1; [4] GPIO3 function - 1; [5] GPIO4 function 1; [6] GPIO1 function 1; [7] GPIO2 function 1; [8] - GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event - function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP - Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14] - SPIO4; [15] SPIO5; [16] MSI/X indication for function 1; [17] MSI/X - indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt; - [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23] - SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26] - TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29] - TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */ -#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 0xa10c -#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 0xa11c -#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 0xa12c -#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_3 0xa13c -#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_5 0xa15c -#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_6 0xa16c -#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_7 0xa17c -/* [RW 32] first 32b for enabling the output for close the gate nig. mapped - as follows: [0] NIG attention for function0; [1] NIG attention for - function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function - 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8] - GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event - function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP - Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14] - SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X - indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt; - [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23] - SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26] - TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29] - TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */ -#define MISC_REG_AEU_ENABLE1_NIG_0 0xa0ec -#define MISC_REG_AEU_ENABLE1_NIG_1 0xa18c -/* [RW 32] first 32b for enabling the output for close the gate pxp. mapped - as follows: [0] NIG attention for function0; [1] NIG attention for - function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function - 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8] - GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event - function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP - Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14] - SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X - indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt; - [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23] - SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26] - TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29] - TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */ -#define MISC_REG_AEU_ENABLE1_PXP_0 0xa0fc -#define MISC_REG_AEU_ENABLE1_PXP_1 0xa19c -/* [RW 32] second 32b for enabling the output for function 0 output0. mapped - as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM - Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw - interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity - error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw - interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] - NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; - [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw - interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM - Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI - Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM - Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw - interrupt; */ -#define MISC_REG_AEU_ENABLE2_FUNC_0_OUT_0 0xa070 -#define MISC_REG_AEU_ENABLE2_FUNC_0_OUT_1 0xa080 -/* [RW 32] second 32b for enabling the output for function 1 output0. mapped - as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM - Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw - interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity - error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw - interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] - NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; - [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw - interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM - Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI - Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM - Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw - interrupt; */ -#define MISC_REG_AEU_ENABLE2_FUNC_1_OUT_0 0xa110 -#define MISC_REG_AEU_ENABLE2_FUNC_1_OUT_1 0xa120 -/* [RW 32] second 32b for enabling the output for close the gate nig. mapped - as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM - Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw - interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity - error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw - interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] - NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; - [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw - interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM - Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI - Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM - Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw - interrupt; */ -#define MISC_REG_AEU_ENABLE2_NIG_0 0xa0f0 -#define MISC_REG_AEU_ENABLE2_NIG_1 0xa190 -/* [RW 32] second 32b for enabling the output for close the gate pxp. mapped - as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM - Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw - interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity - error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw - interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] - NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; - [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw - interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM - Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI - Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM - Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw - interrupt; */ -#define MISC_REG_AEU_ENABLE2_PXP_0 0xa100 -#define MISC_REG_AEU_ENABLE2_PXP_1 0xa1a0 -/* [RW 32] third 32b for enabling the output for function 0 output0. mapped - as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP - Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error; - [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw - interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity - error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) - Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16] - pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20] - MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23] - SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW - timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 - func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General - attn1; */ -#define MISC_REG_AEU_ENABLE3_FUNC_0_OUT_0 0xa074 -#define MISC_REG_AEU_ENABLE3_FUNC_0_OUT_1 0xa084 -/* [RW 32] third 32b for enabling the output for function 1 output0. mapped - as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP - Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error; - [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw - interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity - error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) - Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16] - pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20] - MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23] - SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW - timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 - func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General - attn1; */ -#define MISC_REG_AEU_ENABLE3_FUNC_1_OUT_0 0xa114 -#define MISC_REG_AEU_ENABLE3_FUNC_1_OUT_1 0xa124 -/* [RW 32] third 32b for enabling the output for close the gate nig. mapped - as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP - Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error; - [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw - interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity - error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) - Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16] - pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20] - MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23] - SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW - timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 - func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General - attn1; */ -#define MISC_REG_AEU_ENABLE3_NIG_0 0xa0f4 -#define MISC_REG_AEU_ENABLE3_NIG_1 0xa194 -/* [RW 32] third 32b for enabling the output for close the gate pxp. mapped - as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP - Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error; - [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw - interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity - error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) - Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16] - pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20] - MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23] - SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW - timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 - func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General - attn1; */ -#define MISC_REG_AEU_ENABLE3_PXP_0 0xa104 -#define MISC_REG_AEU_ENABLE3_PXP_1 0xa1a4 -/* [RW 32] fourth 32b for enabling the output for function 0 output0.mapped - as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3] - General attn5; [4] General attn6; [5] General attn7; [6] General attn8; - [7] General attn9; [8] General attn10; [9] General attn11; [10] General - attn12; [11] General attn13; [12] General attn14; [13] General attn15; - [14] General attn16; [15] General attn17; [16] General attn18; [17] - General attn19; [18] General attn20; [19] General attn21; [20] Main power - interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN - Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC - Latched timeout attention; [27] GRC Latched reserved access attention; - [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP - Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ -#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 0xa078 -#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_2 0xa098 -#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_4 0xa0b8 -#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_5 0xa0c8 -#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_6 0xa0d8 -#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_7 0xa0e8 -/* [RW 32] fourth 32b for enabling the output for function 1 output0.mapped - as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3] - General attn5; [4] General attn6; [5] General attn7; [6] General attn8; - [7] General attn9; [8] General attn10; [9] General attn11; [10] General - attn12; [11] General attn13; [12] General attn14; [13] General attn15; - [14] General attn16; [15] General attn17; [16] General attn18; [17] - General attn19; [18] General attn20; [19] General attn21; [20] Main power - interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN - Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC - Latched timeout attention; [27] GRC Latched reserved access attention; - [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP - Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ -#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0 0xa118 -#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_2 0xa138 -#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_4 0xa158 -#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_5 0xa168 -#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_6 0xa178 -#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_7 0xa188 -/* [RW 32] fourth 32b for enabling the output for close the gate nig.mapped - as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3] - General attn5; [4] General attn6; [5] General attn7; [6] General attn8; - [7] General attn9; [8] General attn10; [9] General attn11; [10] General - attn12; [11] General attn13; [12] General attn14; [13] General attn15; - [14] General attn16; [15] General attn17; [16] General attn18; [17] - General attn19; [18] General attn20; [19] General attn21; [20] Main power - interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN - Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC - Latched timeout attention; [27] GRC Latched reserved access attention; - [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP - Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ -#define MISC_REG_AEU_ENABLE4_NIG_0 0xa0f8 -#define MISC_REG_AEU_ENABLE4_NIG_1 0xa198 -/* [RW 32] fourth 32b for enabling the output for close the gate pxp.mapped - as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3] - General attn5; [4] General attn6; [5] General attn7; [6] General attn8; - [7] General attn9; [8] General attn10; [9] General attn11; [10] General - attn12; [11] General attn13; [12] General attn14; [13] General attn15; - [14] General attn16; [15] General attn17; [16] General attn18; [17] - General attn19; [18] General attn20; [19] General attn21; [20] Main power - interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN - Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC - Latched timeout attention; [27] GRC Latched reserved access attention; - [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP - Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ -#define MISC_REG_AEU_ENABLE4_PXP_0 0xa108 -#define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8 -/* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu - 128 bit vector */ -#define MISC_REG_AEU_GENERAL_ATTN_0 0xa000 -#define MISC_REG_AEU_GENERAL_ATTN_1 0xa004 -#define MISC_REG_AEU_GENERAL_ATTN_10 0xa028 -#define MISC_REG_AEU_GENERAL_ATTN_11 0xa02c -#define MISC_REG_AEU_GENERAL_ATTN_12 0xa030 -#define MISC_REG_AEU_GENERAL_ATTN_2 0xa008 -#define MISC_REG_AEU_GENERAL_ATTN_3 0xa00c -#define MISC_REG_AEU_GENERAL_ATTN_4 0xa010 -#define MISC_REG_AEU_GENERAL_ATTN_5 0xa014 -#define MISC_REG_AEU_GENERAL_ATTN_6 0xa018 -#define MISC_REG_AEU_GENERAL_ATTN_7 0xa01c -#define MISC_REG_AEU_GENERAL_ATTN_8 0xa020 -#define MISC_REG_AEU_GENERAL_ATTN_9 0xa024 -#define MISC_REG_AEU_GENERAL_MASK 0xa61c -/* [RW 32] first 32b for inverting the input for function 0; for each bit: - 0= do not invert; 1= invert; mapped as follows: [0] NIG attention for - function0; [1] NIG attention for function1; [2] GPIO1 mcp; [3] GPIO2 mcp; - [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1; [7] GPIO2 function 1; - [8] GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event - function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP - Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14] - SPIO4; [15] SPIO5; [16] MSI/X indication for mcp; [17] MSI/X indication - for function 1; [18] BRB Parity error; [19] BRB Hw interrupt; [20] PRS - Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23] SRC Hw - interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26] TCM - Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29] TSEMI - Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */ -#define MISC_REG_AEU_INVERTER_1_FUNC_0 0xa22c -#define MISC_REG_AEU_INVERTER_1_FUNC_1 0xa23c -/* [RW 32] second 32b for inverting the input for function 0; for each bit: - 0= do not invert; 1= invert. mapped as follows: [0] PBClient Parity - error; [1] PBClient Hw interrupt; [2] QM Parity error; [3] QM Hw - interrupt; [4] Timers Parity error; [5] Timers Hw interrupt; [6] XSDM - Parity error; [7] XSDM Hw interrupt; [8] XCM Parity error; [9] XCM Hw - interrupt; [10] XSEMI Parity error; [11] XSEMI Hw interrupt; [12] - DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] NIG Parity - error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; [17] Vaux - PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw interrupt; - [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM Parity error; - [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI Hw interrupt; - [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM Parity error; - [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw interrupt; */ -#define MISC_REG_AEU_INVERTER_2_FUNC_0 0xa230 -#define MISC_REG_AEU_INVERTER_2_FUNC_1 0xa240 -/* [RW 10] [7:0] = mask 8 attention output signals toward IGU function0; - [9:8] = raserved. Zero = mask; one = unmask */ -#define MISC_REG_AEU_MASK_ATTN_FUNC_0 0xa060 -#define MISC_REG_AEU_MASK_ATTN_FUNC_1 0xa064 -/* [RW 1] If set a system kill occurred */ -#define MISC_REG_AEU_SYS_KILL_OCCURRED 0xa610 -/* [RW 32] Represent the status of the input vector to the AEU when a system - kill occurred. The register is reset in por reset. Mapped as follows: [0] - NIG attention for function0; [1] NIG attention for function1; [2] GPIO1 - mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1; - [7] GPIO2 function 1; [8] GPIO3 function 1; [9] GPIO4 function 1; [10] - PCIE glue/PXP VPD event function0; [11] PCIE glue/PXP VPD event - function1; [12] PCIE glue/PXP Expansion ROM event0; [13] PCIE glue/PXP - Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] MSI/X indication for - mcp; [17] MSI/X indication for function 1; [18] BRB Parity error; [19] - BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC - Parity error; [23] SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw - interrupt; [26] TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI - Parity error; [29] TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw - interrupt; */ -#define MISC_REG_AEU_SYS_KILL_STATUS_0 0xa600 -#define MISC_REG_AEU_SYS_KILL_STATUS_1 0xa604 -#define MISC_REG_AEU_SYS_KILL_STATUS_2 0xa608 -#define MISC_REG_AEU_SYS_KILL_STATUS_3 0xa60c -/* [R 4] This field indicates the type of the device. '0' - 2 Ports; '1' - 1 - Port. */ -#define MISC_REG_BOND_ID 0xa400 -/* [R 8] These bits indicate the metal revision of the chip. This value - starts at 0x00 for each all-layer tape-out and increments by one for each - tape-out. */ -#define MISC_REG_CHIP_METAL 0xa404 -/* [R 16] These bits indicate the part number for the chip. */ -#define MISC_REG_CHIP_NUM 0xa408 -/* [R 4] These bits indicate the base revision of the chip. This value - starts at 0x0 for the A0 tape-out and increments by one for each - all-layer tape-out. */ -#define MISC_REG_CHIP_REV 0xa40c -/* [RW 32] The following driver registers(1...16) represent 16 drivers and - 32 clients. Each client can be controlled by one driver only. One in each - bit represent that this driver control the appropriate client (Ex: bit 5 - is set means this driver control client number 5). addr1 = set; addr0 = - clear; read from both addresses will give the same result = status. write - to address 1 will set a request to control all the clients that their - appropriate bit (in the write command) is set. if the client is free (the - appropriate bit in all the other drivers is clear) one will be written to - that driver register; if the client isn't free the bit will remain zero. - if the appropriate bit is set (the driver request to gain control on a - client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW - interrupt will be asserted). write to address 0 will set a request to - free all the clients that their appropriate bit (in the write command) is - set. if the appropriate bit is clear (the driver request to free a client - it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will - be asserted). */ -#define MISC_REG_DRIVER_CONTROL_1 0xa510 -#define MISC_REG_DRIVER_CONTROL_7 0xa3c8 -/* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0 - only. */ -#define MISC_REG_E1HMF_MODE 0xa5f8 -/* [R 1] Status of four port mode path swap input pin. */ -#define MISC_REG_FOUR_PORT_PATH_SWAP 0xa75c -/* [RW 2] 4 port path swap overwrite.[0] - Overwrite control; if it is 0 - - the path_swap output is equal to 4 port mode path swap input pin; if it - is 1 - the path_swap output is equal to bit[1] of this register; [1] - - Overwrite value. If bit[0] of this register is 1 this is the value that - receives the path_swap output. Reset on Hard reset. */ -#define MISC_REG_FOUR_PORT_PATH_SWAP_OVWR 0xa738 -/* [R 1] Status of 4 port mode port swap input pin. */ -#define MISC_REG_FOUR_PORT_PORT_SWAP 0xa754 -/* [RW 2] 4 port port swap overwrite.[0] - Overwrite control; if it is 0 - - the port_swap output is equal to 4 port mode port swap input pin; if it - is 1 - the port_swap output is equal to bit[1] of this register; [1] - - Overwrite value. If bit[0] of this register is 1 this is the value that - receives the port_swap output. Reset on Hard reset. */ -#define MISC_REG_FOUR_PORT_PORT_SWAP_OVWR 0xa734 -/* [RW 32] Debug only: spare RW register reset by core reset */ -#define MISC_REG_GENERIC_CR_0 0xa460 -#define MISC_REG_GENERIC_CR_1 0xa464 -/* [RW 32] Debug only: spare RW register reset by por reset */ -#define MISC_REG_GENERIC_POR_1 0xa474 -/* [RW 32] Bit[0]: EPIO MODE SEL: Setting this bit to 1 will allow SW/FW to - use all of the 32 Extended GPIO pins. Without setting this bit; an EPIO - can not be configured as an output. Each output has its output enable in - the MCP register space; but this bit needs to be set to make use of that. - Bit[3:1] spare. Bit[4]: WCVTMON_PWRDN: Powerdown for Warpcore VTMON. When - set to 1 - Powerdown. Bit[5]: WCVTMON_RESETB: Reset for Warpcore VTMON. - When set to 0 - vTMON is in reset. Bit[6]: setting this bit will change - the i/o to an output and will drive the TimeSync output. Bit[31:7]: - spare. Global register. Reset by hard reset. */ -#define MISC_REG_GEN_PURP_HWG 0xa9a0 -/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of - these bits is written as a '1'; the corresponding SPIO bit will turn off - it's drivers and become an input. This is the reset state of all GPIO - pins. The read value of these bits will be a '1' if that last command - (#SET; #CLR; or #FLOAT) for this bit was a #FLOAT. (reset value 0xff). - [23-20] CLR port 1; 19-16] CLR port 0; When any of these bits is written - as a '1'; the corresponding GPIO bit will drive low. The read value of - these bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for - this bit was a #CLR. (reset value 0). [15-12] SET port 1; 11-8] port 0; - SET When any of these bits is written as a '1'; the corresponding GPIO - bit will drive high (if it has that capability). The read value of these - bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for this - bit was a #SET. (reset value 0). [7-4] VALUE port 1; [3-0] VALUE port 0; - RO; These bits indicate the read value of each of the eight GPIO pins. - This is the result value of the pin; not the drive value. Writing these - bits will have not effect. */ -#define MISC_REG_GPIO 0xa490 -/* [RW 8] These bits enable the GPIO_INTs to signals event to the - IGU/MCP.according to the following map: [0] p0_gpio_0; [1] p0_gpio_1; [2] - p0_gpio_2; [3] p0_gpio_3; [4] p1_gpio_0; [5] p1_gpio_1; [6] p1_gpio_2; - [7] p1_gpio_3; */ -#define MISC_REG_GPIO_EVENT_EN 0xa2bc -/* [RW 32] GPIO INT. [31-28] OLD_CLR port1; [27-24] OLD_CLR port0; Writing a - '1' to these bit clears the corresponding bit in the #OLD_VALUE register. - This will acknowledge an interrupt on the falling edge of corresponding - GPIO input (reset value 0). [23-16] OLD_SET [23-16] port1; OLD_SET port0; - Writing a '1' to these bit sets the corresponding bit in the #OLD_VALUE - register. This will acknowledge an interrupt on the rising edge of - corresponding SPIO input (reset value 0). [15-12] OLD_VALUE [11-8] port1; - OLD_VALUE port0; RO; These bits indicate the old value of the GPIO input - value. When the ~INT_STATE bit is set; this bit indicates the OLD value - of the pin such that if ~INT_STATE is set and this bit is '0'; then the - interrupt is due to a low to high edge. If ~INT_STATE is set and this bit - is '1'; then the interrupt is due to a high to low edge (reset value 0). - [7-4] INT_STATE port1; [3-0] INT_STATE RO port0; These bits indicate the - current GPIO interrupt state for each GPIO pin. This bit is cleared when - the appropriate #OLD_SET or #OLD_CLR command bit is written. This bit is - set when the GPIO input does not match the current value in #OLD_VALUE - (reset value 0). */ -#define MISC_REG_GPIO_INT 0xa494 -/* [R 28] this field hold the last information that caused reserved - attention. bits [19:0] - address; [22:20] function; [23] reserved; - [27:24] the master that caused the attention - according to the following - encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 = - dbu; 8 = dmae */ -#define MISC_REG_GRC_RSV_ATTN 0xa3c0 -/* [R 28] this field hold the last information that caused timeout - attention. bits [19:0] - address; [22:20] function; [23] reserved; - [27:24] the master that caused the attention - according to the following - encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 = - dbu; 8 = dmae */ -#define MISC_REG_GRC_TIMEOUT_ATTN 0xa3c4 -/* [RW 1] Setting this bit enables a timer in the GRC block to timeout any - access that does not finish within - ~misc_registers_grc_timout_val.grc_timeout_val cycles. When this bit is - cleared; this timeout is disabled. If this timeout occurs; the GRC shall - assert it attention output. */ -#define MISC_REG_GRC_TIMEOUT_EN 0xa280 -/* [RW 28] 28 LSB of LCPLL first register; reset val = 521. inside order of - the bits is: [2:0] OAC reset value 001) CML output buffer bias control; - 111 for +40%; 011 for +20%; 001 for 0%; 000 for -20%. [5:3] Icp_ctrl - (reset value 001) Charge pump current control; 111 for 720u; 011 for - 600u; 001 for 480u and 000 for 360u. [7:6] Bias_ctrl (reset value 00) - Global bias control; When bit 7 is high bias current will be 10 0gh; When - bit 6 is high bias will be 100w; Valid values are 00; 10; 01. [10:8] - Pll_observe (reset value 010) Bits to control observability. bit 10 is - for test bias; bit 9 is for test CK; bit 8 is test Vc. [12:11] Vth_ctrl - (reset value 00) Comparator threshold control. 00 for 0.6V; 01 for 0.54V - and 10 for 0.66V. [13] pllSeqStart (reset value 0) Enables VCO tuning - sequencer: 1= sequencer disabled; 0= sequencer enabled (inverted - internally). [14] reserved (reset value 0) Reset for VCO sequencer is - connected to RESET input directly. [15] capRetry_en (reset value 0) - enable retry on cap search failure (inverted). [16] freqMonitor_e (reset - value 0) bit to continuously monitor vco freq (inverted). [17] - freqDetRestart_en (reset value 0) bit to enable restart when not freq - locked (inverted). [18] freqDetRetry_en (reset value 0) bit to enable - retry on freq det failure(inverted). [19] pllForceFdone_en (reset value - 0) bit to enable pllForceFdone & pllForceFpass into pllSeq. [20] - pllForceFdone (reset value 0) bit to force freqDone. [21] pllForceFpass - (reset value 0) bit to force freqPass. [22] pllForceDone_en (reset value - 0) bit to enable pllForceCapDone. [23] pllForceCapDone (reset value 0) - bit to force capDone. [24] pllForceCapPass_en (reset value 0) bit to - enable pllForceCapPass. [25] pllForceCapPass (reset value 0) bit to force - capPass. [26] capRestart (reset value 0) bit to force cap sequencer to - restart. [27] capSelectM_en (reset value 0) bit to enable cap select - register bits. */ -#define MISC_REG_LCPLL_CTRL_1 0xa2a4 -#define MISC_REG_LCPLL_CTRL_REG_2 0xa2a8 -/* [RW 4] Interrupt mask register #0 read/write */ -#define MISC_REG_MISC_INT_MASK 0xa388 -/* [RW 1] Parity mask register #0 read/write */ -#define MISC_REG_MISC_PRTY_MASK 0xa398 -/* [R 1] Parity register #0 read */ -#define MISC_REG_MISC_PRTY_STS 0xa38c -/* [RC 1] Parity register #0 read clear */ -#define MISC_REG_MISC_PRTY_STS_CLR 0xa390 -#define MISC_REG_NIG_WOL_P0 0xa270 -#define MISC_REG_NIG_WOL_P1 0xa274 -/* [R 1] If set indicate that the pcie_rst_b was asserted without perst - assertion */ -#define MISC_REG_PCIE_HOT_RESET 0xa618 -/* [RW 32] 32 LSB of storm PLL first register; reset val = 0x 071d2911. - inside order of the bits is: [0] P1 divider[0] (reset value 1); [1] P1 - divider[1] (reset value 0); [2] P1 divider[2] (reset value 0); [3] P1 - divider[3] (reset value 0); [4] P2 divider[0] (reset value 1); [5] P2 - divider[1] (reset value 0); [6] P2 divider[2] (reset value 0); [7] P2 - divider[3] (reset value 0); [8] ph_det_dis (reset value 1); [9] - freq_det_dis (reset value 0); [10] Icpx[0] (reset value 0); [11] Icpx[1] - (reset value 1); [12] Icpx[2] (reset value 0); [13] Icpx[3] (reset value - 1); [14] Icpx[4] (reset value 0); [15] Icpx[5] (reset value 0); [16] - Rx[0] (reset value 1); [17] Rx[1] (reset value 0); [18] vc_en (reset - value 1); [19] vco_rng[0] (reset value 1); [20] vco_rng[1] (reset value - 1); [21] Kvco_xf[0] (reset value 0); [22] Kvco_xf[1] (reset value 0); - [23] Kvco_xf[2] (reset value 0); [24] Kvco_xs[0] (reset value 1); [25] - Kvco_xs[1] (reset value 1); [26] Kvco_xs[2] (reset value 1); [27] - testd_en (reset value 0); [28] testd_sel[0] (reset value 0); [29] - testd_sel[1] (reset value 0); [30] testd_sel[2] (reset value 0); [31] - testa_en (reset value 0); */ -#define MISC_REG_PLL_STORM_CTRL_1 0xa294 -#define MISC_REG_PLL_STORM_CTRL_2 0xa298 -#define MISC_REG_PLL_STORM_CTRL_3 0xa29c -#define MISC_REG_PLL_STORM_CTRL_4 0xa2a0 -/* [R 1] Status of 4 port mode enable input pin. */ -#define MISC_REG_PORT4MODE_EN 0xa750 -/* [RW 2] 4 port mode enable overwrite.[0] - Overwrite control; if it is 0 - - * the port4mode_en output is equal to 4 port mode input pin; if it is 1 - - * the port4mode_en output is equal to bit[1] of this register; [1] - - * Overwrite value. If bit[0] of this register is 1 this is the value that - * receives the port4mode_en output . */ -#define MISC_REG_PORT4MODE_EN_OVWR 0xa720 -/* [RW 32] reset reg#2; rite/read one = the specific block is out of reset; - write/read zero = the specific block is in reset; addr 0-wr- the write - value will be written to the register; addr 1-set - one will be written - to all the bits that have the value of one in the data written (bits that - have the value of zero will not be change) ; addr 2-clear - zero will be - written to all the bits that have the value of one in the data written - (bits that have the value of zero will not be change); addr 3-ignore; - read ignore from all addr except addr 00; inside order of the bits is: - [0] rst_bmac0; [1] rst_bmac1; [2] rst_emac0; [3] rst_emac1; [4] rst_grc; - [5] rst_mcp_n_reset_reg_hard_core; [6] rst_ mcp_n_hard_core_rst_b; [7] - rst_ mcp_n_reset_cmn_cpu; [8] rst_ mcp_n_reset_cmn_core; [9] rst_rbcn; - [10] rst_dbg; [11] rst_misc_core; [12] rst_dbue (UART); [13] - Pci_resetmdio_n; [14] rst_emac0_hard_core; [15] rst_emac1_hard_core; 16] - rst_pxp_rq_rd_wr; 31:17] reserved */ -#define MISC_REG_RESET_REG_2 0xa590 -/* [RW 20] 20 bit GRC address where the scratch-pad of the MCP that is - shared with the driver resides */ -#define MISC_REG_SHARED_MEM_ADDR 0xa2b4 -/* [RW 32] SPIO. [31-24] FLOAT When any of these bits is written as a '1'; - the corresponding SPIO bit will turn off it's drivers and become an - input. This is the reset state of all SPIO pins. The read value of these - bits will be a '1' if that last command (#SET; #CL; or #FLOAT) for this - bit was a #FLOAT. (reset value 0xff). [23-16] CLR When any of these bits - is written as a '1'; the corresponding SPIO bit will drive low. The read - value of these bits will be a '1' if that last command (#SET; #CLR; or -#FLOAT) for this bit was a #CLR. (reset value 0). [15-8] SET When any of - these bits is written as a '1'; the corresponding SPIO bit will drive - high (if it has that capability). The read value of these bits will be a - '1' if that last command (#SET; #CLR; or #FLOAT) for this bit was a #SET. - (reset value 0). [7-0] VALUE RO; These bits indicate the read value of - each of the eight SPIO pins. This is the result value of the pin; not the - drive value. Writing these bits will have not effect. Each 8 bits field - is divided as follows: [0] VAUX Enable; when pulsed low; enables supply - from VAUX. (This is an output pin only; the FLOAT field is not applicable - for this pin); [1] VAUX Disable; when pulsed low; disables supply form - VAUX. (This is an output pin only; FLOAT field is not applicable for this - pin); [2] SEL_VAUX_B - Control to power switching logic. Drive low to - select VAUX supply. (This is an output pin only; it is not controlled by - the SET and CLR fields; it is controlled by the Main Power SM; the FLOAT - field is not applicable for this pin; only the VALUE fields is relevant - - it reflects the output value); [3] port swap [4] spio_4; [5] spio_5; [6] - Bit 0 of UMP device ID select; read by UMP firmware; [7] Bit 1 of UMP - device ID select; read by UMP firmware. */ -#define MISC_REG_SPIO 0xa4fc -/* [RW 8] These bits enable the SPIO_INTs to signals event to the IGU/MC. - according to the following map: [3:0] reserved; [4] spio_4 [5] spio_5; - [7:0] reserved */ -#define MISC_REG_SPIO_EVENT_EN 0xa2b8 -/* [RW 32] SPIO INT. [31-24] OLD_CLR Writing a '1' to these bit clears the - corresponding bit in the #OLD_VALUE register. This will acknowledge an - interrupt on the falling edge of corresponding SPIO input (reset value - 0). [23-16] OLD_SET Writing a '1' to these bit sets the corresponding bit - in the #OLD_VALUE register. This will acknowledge an interrupt on the - rising edge of corresponding SPIO input (reset value 0). [15-8] OLD_VALUE - RO; These bits indicate the old value of the SPIO input value. When the - ~INT_STATE bit is set; this bit indicates the OLD value of the pin such - that if ~INT_STATE is set and this bit is '0'; then the interrupt is due - to a low to high edge. If ~INT_STATE is set and this bit is '1'; then the - interrupt is due to a high to low edge (reset value 0). [7-0] INT_STATE - RO; These bits indicate the current SPIO interrupt state for each SPIO - pin. This bit is cleared when the appropriate #OLD_SET or #OLD_CLR - command bit is written. This bit is set when the SPIO input does not - match the current value in #OLD_VALUE (reset value 0). */ -#define MISC_REG_SPIO_INT 0xa500 -/* [RW 32] reload value for counter 4 if reload; the value will be reload if - the counter reached zero and the reload bit - (~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */ -#define MISC_REG_SW_TIMER_RELOAD_VAL_4 0xa2fc -/* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses - in this register. address 0 - timer 1; address 1 - timer 2, ... address 7 - - timer 8 */ -#define MISC_REG_SW_TIMER_VAL 0xa5c0 -/* [R 1] Status of two port mode path swap input pin. */ -#define MISC_REG_TWO_PORT_PATH_SWAP 0xa758 -/* [RW 2] 2 port swap overwrite.[0] - Overwrite control; if it is 0 - the - path_swap output is equal to 2 port mode path swap input pin; if it is 1 - - the path_swap output is equal to bit[1] of this register; [1] - - Overwrite value. If bit[0] of this register is 1 this is the value that - receives the path_swap output. Reset on Hard reset. */ -#define MISC_REG_TWO_PORT_PATH_SWAP_OVWR 0xa72c -/* [RW 1] Set by the MCP to remember if one or more of the drivers is/are - loaded; 0-prepare; -unprepare */ -#define MISC_REG_UNPREPARED 0xa424 -#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST (0x1<<0) -#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST (0x1<<1) -#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4) -#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2) -#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3) -/* [RW 5] MDIO PHY Address. The WC uses this address to determine whether or - * not it is the recipient of the message on the MDIO interface. The value - * is compared to the value on ctrl_md_devad. Drives output - * misc_xgxs0_phy_addr. Global register. */ -#define MISC_REG_WC0_CTRL_PHY_ADDR 0xa9cc -/* [RW 2] XMAC Core port mode. Indicates the number of ports on the system - side. This should be less than or equal to phy_port_mode; if some of the - ports are not used. This enables reduction of frequency on the core side. - This is a strap input for the XMAC_MP core. 00 - Single Port Mode; 01 - - Dual Port Mode; 10 - Tri Port Mode; 11 - Quad Port Mode. This is a strap - input for the XMAC_MP core; and should be changed only while reset is - held low. Reset on Hard reset. */ -#define MISC_REG_XMAC_CORE_PORT_MODE 0xa964 -/* [RW 2] XMAC PHY port mode. Indicates the number of ports on the Warp - Core. This is a strap input for the XMAC_MP core. 00 - Single Port Mode; - 01 - Dual Port Mode; 1x - Quad Port Mode; This is a strap input for the - XMAC_MP core; and should be changed only while reset is held low. Reset - on Hard reset. */ -#define MISC_REG_XMAC_PHY_PORT_MODE 0xa960 -/* [RW 32] 1 [47] Packet Size = 64 Write to this register write bits 31:0. - * Reads from this register will clear bits 31:0. */ -#define MSTAT_REG_RX_STAT_GR64_LO 0x200 -/* [RW 32] 1 [00] Tx Good Packet Count Write to this register write bits - * 31:0. Reads from this register will clear bits 31:0. */ -#define MSTAT_REG_TX_STAT_GTXPOK_LO 0 -#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST (0x1<<0) -#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST (0x1<<1) -#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4) -#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2) -#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3) -#define NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN (0x1<<0) -#define NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN (0x1<<0) -#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT (0x1<<0) -#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS (0x1<<9) -#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G (0x1<<15) -#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS (0xf<<18) -/* [RW 1] Input enable for RX_BMAC0 IF */ -#define NIG_REG_BMAC0_IN_EN 0x100ac -/* [RW 1] output enable for TX_BMAC0 IF */ -#define NIG_REG_BMAC0_OUT_EN 0x100e0 -/* [RW 1] output enable for TX BMAC pause port 0 IF */ -#define NIG_REG_BMAC0_PAUSE_OUT_EN 0x10110 -/* [RW 1] output enable for RX_BMAC0_REGS IF */ -#define NIG_REG_BMAC0_REGS_OUT_EN 0x100e8 -/* [RW 1] output enable for RX BRB1 port0 IF */ -#define NIG_REG_BRB0_OUT_EN 0x100f8 -/* [RW 1] Input enable for TX BRB1 pause port 0 IF */ -#define NIG_REG_BRB0_PAUSE_IN_EN 0x100c4 -/* [RW 1] output enable for RX BRB1 port1 IF */ -#define NIG_REG_BRB1_OUT_EN 0x100fc -/* [RW 1] Input enable for TX BRB1 pause port 1 IF */ -#define NIG_REG_BRB1_PAUSE_IN_EN 0x100c8 -/* [RW 1] output enable for RX BRB1 LP IF */ -#define NIG_REG_BRB_LB_OUT_EN 0x10100 -/* [WB_W 82] Debug packet to LP from RBC; Data spelling:[63:0] data; 64] - error; [67:65]eop_bvalid; [68]eop; [69]sop; [70]port_id; 71]flush; - 72:73]-vnic_num; 81:74]-sideband_info */ -#define NIG_REG_DEBUG_PACKET_LB 0x10800 -/* [RW 1] Input enable for TX Debug packet */ -#define NIG_REG_EGRESS_DEBUG_IN_EN 0x100dc -/* [RW 1] If 1 - egress drain mode for port0 is active. In this mode all - packets from PBFare not forwarded to the MAC and just deleted from FIFO. - First packet may be deleted from the middle. And last packet will be - always deleted till the end. */ -#define NIG_REG_EGRESS_DRAIN0_MODE 0x10060 -/* [RW 1] Output enable to EMAC0 */ -#define NIG_REG_EGRESS_EMAC0_OUT_EN 0x10120 -/* [RW 1] MAC configuration for packets of port0. If 1 - all packet outputs - to emac for port0; other way to bmac for port0 */ -#define NIG_REG_EGRESS_EMAC0_PORT 0x10058 -/* [RW 1] Input enable for TX PBF user packet port0 IF */ -#define NIG_REG_EGRESS_PBF0_IN_EN 0x100cc -/* [RW 1] Input enable for TX PBF user packet port1 IF */ -#define NIG_REG_EGRESS_PBF1_IN_EN 0x100d0 -/* [RW 1] Input enable for TX UMP management packet port0 IF */ -#define NIG_REG_EGRESS_UMP0_IN_EN 0x100d4 -/* [RW 1] Input enable for RX_EMAC0 IF */ -#define NIG_REG_EMAC0_IN_EN 0x100a4 -/* [RW 1] output enable for TX EMAC pause port 0 IF */ -#define NIG_REG_EMAC0_PAUSE_OUT_EN 0x10118 -/* [R 1] status from emac0. This bit is set when MDINT from either the - EXT_MDINT pin or from the Copper PHY is driven low. This condition must - be cleared in the attached PHY device that is driving the MINT pin. */ -#define NIG_REG_EMAC0_STATUS_MISC_MI_INT 0x10494 -/* [WB 48] This address space contains BMAC0 registers. The BMAC registers - are described in appendix A. In order to access the BMAC0 registers; the - base address; NIG_REGISTERS_INGRESS_BMAC0_MEM; Offset: 0x10c00; should be - added to each BMAC register offset */ -#define NIG_REG_INGRESS_BMAC0_MEM 0x10c00 -/* [WB 48] This address space contains BMAC1 registers. The BMAC registers - are described in appendix A. In order to access the BMAC0 registers; the - base address; NIG_REGISTERS_INGRESS_BMAC1_MEM; Offset: 0x11000; should be - added to each BMAC register offset */ -#define NIG_REG_INGRESS_BMAC1_MEM 0x11000 -/* [R 1] FIFO empty in EOP descriptor FIFO of LP in NIG_RX_EOP */ -#define NIG_REG_INGRESS_EOP_LB_EMPTY 0x104e0 -/* [RW 17] Debug only. RX_EOP_DSCR_lb_FIFO in NIG_RX_EOP. Data - packet_length[13:0]; mac_error[14]; trunc_error[15]; parity[16] */ -#define NIG_REG_INGRESS_EOP_LB_FIFO 0x104e4 -/* [RW 27] 0 - must be active for Everest A0; 1- for Everest B0 when latch - logic for interrupts must be used. Enable per bit of interrupt of - ~latch_status.latch_status */ -#define NIG_REG_LATCH_BC_0 0x16210 -/* [RW 27] Latch for each interrupt from Unicore.b[0] - status_emac0_misc_mi_int; b[1] status_emac0_misc_mi_complete; - b[2]status_emac0_misc_cfg_change; b[3]status_emac0_misc_link_status; - b[4]status_emac0_misc_link_change; b[5]status_emac0_misc_attn; - b[6]status_serdes0_mac_crs; b[7]status_serdes0_autoneg_complete; - b[8]status_serdes0_fiber_rxact; b[9]status_serdes0_link_status; - b[10]status_serdes0_mr_page_rx; b[11]status_serdes0_cl73_an_complete; - b[12]status_serdes0_cl73_mr_page_rx; b[13]status_serdes0_rx_sigdet; - b[14]status_xgxs0_remotemdioreq; b[15]status_xgxs0_link10g; - b[16]status_xgxs0_autoneg_complete; b[17]status_xgxs0_fiber_rxact; - b[21:18]status_xgxs0_link_status; b[22]status_xgxs0_mr_page_rx; - b[23]status_xgxs0_cl73_an_complete; b[24]status_xgxs0_cl73_mr_page_rx; - b[25]status_xgxs0_rx_sigdet; b[26]status_xgxs0_mac_crs */ -#define NIG_REG_LATCH_STATUS_0 0x18000 -/* [RW 1] led 10g for port 0 */ -#define NIG_REG_LED_10G_P0 0x10320 -/* [RW 1] led 10g for port 1 */ -#define NIG_REG_LED_10G_P1 0x10324 -/* [RW 1] Port0: This bit is set to enable the use of the - ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 field - defined below. If this bit is cleared; then the blink rate will be about - 8Hz. */ -#define NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 0x10318 -/* [RW 12] Port0: Specifies the period of each blink cycle (on + off) for - Traffic LED in milliseconds. Must be a non-zero value. This 12-bit field - is reset to 0x080; giving a default blink period of approximately 8Hz. */ -#define NIG_REG_LED_CONTROL_BLINK_RATE_P0 0x10310 -/* [RW 1] Port0: If set along with the - ~nig_registers_led_control_override_traffic_p0.led_control_override_traffic_p0 - bit and ~nig_registers_led_control_traffic_p0.led_control_traffic_p0 LED - bit; the Traffic LED will blink with the blink rate specified in - ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and - ~nig_registers_led_control_blink_rate_ena_p0.led_control_blink_rate_ena_p0 - fields. */ -#define NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 0x10308 -/* [RW 1] Port0: If set overrides hardware control of the Traffic LED. The - Traffic LED will then be controlled via bit ~nig_registers_ - led_control_traffic_p0.led_control_traffic_p0 and bit - ~nig_registers_led_control_blink_traffic_p0.led_control_blink_traffic_p0 */ -#define NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 0x102f8 -/* [RW 1] Port0: If set along with the led_control_override_trafic_p0 bit; - turns on the Traffic LED. If the led_control_blink_traffic_p0 bit is also - set; the LED will blink with blink rate specified in - ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and - ~nig_regsters_led_control_blink_rate_ena_p0.led_control_blink_rate_ena_p0 - fields. */ -#define NIG_REG_LED_CONTROL_TRAFFIC_P0 0x10300 -/* [RW 4] led mode for port0: 0 MAC; 1-3 PHY1; 4 MAC2; 5-7 PHY4; 8-MAC3; - 9-11PHY7; 12 MAC4; 13-15 PHY10; */ -#define NIG_REG_LED_MODE_P0 0x102f0 -/* [RW 3] for port0 enable for llfc ppp and pause. b0 - brb1 enable; b1- - tsdm enable; b2- usdm enable */ -#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 0x16070 -#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 0x16074 -/* [RW 1] SAFC enable for port0. This register may get 1 only when - ~ppp_enable.ppp_enable = 0 and pause_enable.pause_enable =0 for the same - port */ -#define NIG_REG_LLFC_ENABLE_0 0x16208 -#define NIG_REG_LLFC_ENABLE_1 0x1620c -/* [RW 16] classes are high-priority for port0 */ -#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0 0x16058 -#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 0x1605c -/* [RW 16] classes are low-priority for port0 */ -#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0 0x16060 -#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 0x16064 -/* [RW 1] Output enable of message to LLFC BMAC IF for port0 */ -#define NIG_REG_LLFC_OUT_EN_0 0x160c8 -#define NIG_REG_LLFC_OUT_EN_1 0x160cc -#define NIG_REG_LLH0_ACPI_PAT_0_CRC 0x1015c -#define NIG_REG_LLH0_ACPI_PAT_6_LEN 0x10154 -#define NIG_REG_LLH0_BRB1_DRV_MASK 0x10244 -#define NIG_REG_LLH0_BRB1_DRV_MASK_MF 0x16048 -/* [RW 1] send to BRB1 if no match on any of RMP rules. */ -#define NIG_REG_LLH0_BRB1_NOT_MCP 0x1025c -/* [RW 2] Determine the classification participants. 0: no classification.1: - classification upon VLAN id. 2: classification upon MAC address. 3: - classification upon both VLAN id & MAC addr. */ -#define NIG_REG_LLH0_CLS_TYPE 0x16080 -/* [RW 32] cm header for llh0 */ -#define NIG_REG_LLH0_CM_HEADER 0x1007c -#define NIG_REG_LLH0_DEST_IP_0_1 0x101dc -#define NIG_REG_LLH0_DEST_MAC_0_0 0x101c0 -/* [RW 16] destination TCP address 1. The LLH will look for this address in - all incoming packets. */ -#define NIG_REG_LLH0_DEST_TCP_0 0x10220 -/* [RW 16] destination UDP address 1 The LLH will look for this address in - all incoming packets. */ -#define NIG_REG_LLH0_DEST_UDP_0 0x10214 -#define NIG_REG_LLH0_ERROR_MASK 0x1008c -/* [RW 8] event id for llh0 */ -#define NIG_REG_LLH0_EVENT_ID 0x10084 -#define NIG_REG_LLH0_FUNC_EN 0x160fc -#define NIG_REG_LLH0_FUNC_MEM 0x16180 -#define NIG_REG_LLH0_FUNC_MEM_ENABLE 0x16140 -#define NIG_REG_LLH0_FUNC_VLAN_ID 0x16100 -/* [RW 1] Determine the IP version to look for in - ~nig_registers_llh0_dest_ip_0.llh0_dest_ip_0. 0 - IPv6; 1-IPv4 */ -#define NIG_REG_LLH0_IPV4_IPV6_0 0x10208 -/* [RW 1] t bit for llh0 */ -#define NIG_REG_LLH0_T_BIT 0x10074 -/* [RW 12] VLAN ID 1. In case of VLAN packet the LLH will look for this ID. */ -#define NIG_REG_LLH0_VLAN_ID_0 0x1022c -/* [RW 8] init credit counter for port0 in LLH */ -#define NIG_REG_LLH0_XCM_INIT_CREDIT 0x10554 -#define NIG_REG_LLH0_XCM_MASK 0x10130 -#define NIG_REG_LLH1_BRB1_DRV_MASK 0x10248 -/* [RW 1] send to BRB1 if no match on any of RMP rules. */ -#define NIG_REG_LLH1_BRB1_NOT_MCP 0x102dc -/* [RW 2] Determine the classification participants. 0: no classification.1: - classification upon VLAN id. 2: classification upon MAC address. 3: - classification upon both VLAN id & MAC addr. */ -#define NIG_REG_LLH1_CLS_TYPE 0x16084 -/* [RW 32] cm header for llh1 */ -#define NIG_REG_LLH1_CM_HEADER 0x10080 -#define NIG_REG_LLH1_ERROR_MASK 0x10090 -/* [RW 8] event id for llh1 */ -#define NIG_REG_LLH1_EVENT_ID 0x10088 -#define NIG_REG_LLH1_FUNC_MEM 0x161c0 -#define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160 -#define NIG_REG_LLH1_FUNC_MEM_SIZE 16 -/* [RW 1] When this bit is set; the LLH will classify the packet before - * sending it to the BRB or calculating WoL on it. This bit controls port 1 - * only. The legacy llh_multi_function_mode bit controls port 0. */ -#define NIG_REG_LLH1_MF_MODE 0x18614 -/* [RW 8] init credit counter for port1 in LLH */ -#define NIG_REG_LLH1_XCM_INIT_CREDIT 0x10564 -#define NIG_REG_LLH1_XCM_MASK 0x10134 -/* [RW 1] When this bit is set; the LLH will expect all packets to be with - e1hov */ -#define NIG_REG_LLH_E1HOV_MODE 0x160d8 -/* [RW 1] When this bit is set; the LLH will classify the packet before - sending it to the BRB or calculating WoL on it. */ -#define NIG_REG_LLH_MF_MODE 0x16024 -#define NIG_REG_MASK_INTERRUPT_PORT0 0x10330 -#define NIG_REG_MASK_INTERRUPT_PORT1 0x10334 -/* [RW 1] Output signal from NIG to EMAC0. When set enables the EMAC0 block. */ -#define NIG_REG_NIG_EMAC0_EN 0x1003c -/* [RW 1] Output signal from NIG to EMAC1. When set enables the EMAC1 block. */ -#define NIG_REG_NIG_EMAC1_EN 0x10040 -/* [RW 1] Output signal from NIG to TX_EMAC0. When set indicates to the - EMAC0 to strip the CRC from the ingress packets. */ -#define NIG_REG_NIG_INGRESS_EMAC0_NO_CRC 0x10044 -/* [R 32] Interrupt register #0 read */ -#define NIG_REG_NIG_INT_STS_0 0x103b0 -#define NIG_REG_NIG_INT_STS_1 0x103c0 -/* [R 32] Legacy E1 and E1H location for parity error mask register. */ -#define NIG_REG_NIG_PRTY_MASK 0x103dc -/* [RW 32] Parity mask register #0 read/write */ -#define NIG_REG_NIG_PRTY_MASK_0 0x183c8 -#define NIG_REG_NIG_PRTY_MASK_1 0x183d8 -/* [R 32] Legacy E1 and E1H location for parity error status register. */ -#define NIG_REG_NIG_PRTY_STS 0x103d0 -/* [R 32] Parity register #0 read */ -#define NIG_REG_NIG_PRTY_STS_0 0x183bc -#define NIG_REG_NIG_PRTY_STS_1 0x183cc -/* [R 32] Legacy E1 and E1H location for parity error status clear register. */ -#define NIG_REG_NIG_PRTY_STS_CLR 0x103d4 -/* [RC 32] Parity register #0 read clear */ -#define NIG_REG_NIG_PRTY_STS_CLR_0 0x183c0 -#define NIG_REG_NIG_PRTY_STS_CLR_1 0x183d0 -#define MCPR_IMC_COMMAND_ENABLE (1L<<31) -#define MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT 16 -#define MCPR_IMC_COMMAND_OPERATION_BITSHIFT 28 -#define MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT 8 -/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic - * Ethernet header. */ -#define NIG_REG_P0_HDRS_AFTER_BASIC 0x18038 -/* [RW 1] HW PFC enable bit. Set this bit to enable the PFC functionality in - * the NIG. Other flow control modes such as PAUSE and SAFC/LLFC should be - * disabled when this bit is set. */ -#define NIG_REG_P0_HWPFC_ENABLE 0x18078 -#define NIG_REG_P0_LLH_FUNC_MEM2 0x18480 -#define NIG_REG_P0_LLH_FUNC_MEM2_ENABLE 0x18440 -/* [RW 1] Input enable for RX MAC interface. */ -#define NIG_REG_P0_MAC_IN_EN 0x185ac -/* [RW 1] Output enable for TX MAC interface */ -#define NIG_REG_P0_MAC_OUT_EN 0x185b0 -/* [RW 1] Output enable for TX PAUSE signal to the MAC. */ -#define NIG_REG_P0_MAC_PAUSE_OUT_EN 0x185b4 -/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for - * future expansion) each priorty is to be mapped to. Bits 3:0 specify the - * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit - * priority field is extracted from the outer-most VLAN in receive packet. - * Only COS 0 and COS 1 are supported in E2. */ -#define NIG_REG_P0_PKT_PRIORITY_TO_COS 0x18054 -/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A - * priority is mapped to COS 0 when the corresponding mask bit is 1. More - * than one bit may be set; allowing multiple priorities to be mapped to one - * COS. */ -#define NIG_REG_P0_RX_COS0_PRIORITY_MASK 0x18058 -/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A - * priority is mapped to COS 1 when the corresponding mask bit is 1. More - * than one bit may be set; allowing multiple priorities to be mapped to one - * COS. */ -#define NIG_REG_P0_RX_COS1_PRIORITY_MASK 0x1805c -/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A - * priority is mapped to COS 2 when the corresponding mask bit is 1. More - * than one bit may be set; allowing multiple priorities to be mapped to one - * COS. */ -#define NIG_REG_P0_RX_COS2_PRIORITY_MASK 0x186b0 -/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 3. A - * priority is mapped to COS 3 when the corresponding mask bit is 1. More - * than one bit may be set; allowing multiple priorities to be mapped to one - * COS. */ -#define NIG_REG_P0_RX_COS3_PRIORITY_MASK 0x186b4 -/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 4. A - * priority is mapped to COS 4 when the corresponding mask bit is 1. More - * than one bit may be set; allowing multiple priorities to be mapped to one - * COS. */ -#define NIG_REG_P0_RX_COS4_PRIORITY_MASK 0x186b8 -/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 5. A - * priority is mapped to COS 5 when the corresponding mask bit is 1. More - * than one bit may be set; allowing multiple priorities to be mapped to one - * COS. */ -#define NIG_REG_P0_RX_COS5_PRIORITY_MASK 0x186bc -/* [R 1] RX FIFO for receiving data from MAC is empty. */ -/* [RW 15] Specify which of the credit registers the client is to be mapped - * to. Bits[2:0] are for client 0; bits [14:12] are for client 4. For - * clients that are not subject to WFQ credit blocking - their - * specifications here are not used. */ -#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP 0x180f0 -/* [RW 32] Specify which of the credit registers the client is to be mapped - * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are - * for client 0; bits [35:32] are for client 8. For clients that are not - * subject to WFQ credit blocking - their specifications here are not used. - * This is a new register (with 2_) added in E3 B0 to accommodate the 9 - * input clients to ETS arbiter. The reset default is set for management and - * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to - * use credit registers 0-5 respectively (0x543210876). Note that credit - * registers can not be shared between clients. */ -#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB 0x18688 -/* [RW 4] Specify which of the credit registers the client is to be mapped - * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are - * for client 0; bits [35:32] are for client 8. For clients that are not - * subject to WFQ credit blocking - their specifications here are not used. - * This is a new register (with 2_) added in E3 B0 to accommodate the 9 - * input clients to ETS arbiter. The reset default is set for management and - * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to - * use credit registers 0-5 respectively (0x543210876). Note that credit - * registers can not be shared between clients. */ -#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB 0x1868c -/* [RW 5] Specify whether the client competes directly in the strict - * priority arbiter. The bits are mapped according to client ID (client IDs - * are defined in tx_arb_priority_client). Default value is set to enable - * strict priorities for clients 0-2 -- management and debug traffic. */ -#define NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT 0x180e8 -/* [RW 5] Specify whether the client is subject to WFQ credit blocking. The - * bits are mapped according to client ID (client IDs are defined in - * tx_arb_priority_client). Default value is 0 for not using WFQ credit - * blocking. */ -#define NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x180ec -/* [RW 32] Specify the upper bound that credit register 0 is allowed to - * reach. */ -#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0 0x1810c -#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1 0x18110 -#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2 0x18114 -#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3 0x18118 -#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4 0x1811c -#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5 0x186a0 -#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6 0x186a4 -#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7 0x186a8 -#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8 0x186ac -/* [RW 32] Specify the weight (in bytes) to be added to credit register 0 - * when it is time to increment. */ -#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0 0x180f8 -#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1 0x180fc -#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2 0x18100 -#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3 0x18104 -#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4 0x18108 -#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5 0x18690 -#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6 0x18694 -#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7 0x18698 -#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8 0x1869c -/* [RW 12] Specify the number of strict priority arbitration slots between - * two round-robin arbitration slots to avoid starvation. A value of 0 means - * no strict priority cycles - the strict priority with anti-starvation - * arbiter becomes a round-robin arbiter. */ -#define NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS 0x180f4 -/* [RW 15] Specify the client number to be assigned to each priority of the - * strict priority arbiter. Priority 0 is the highest priority. Bits [2:0] - * are for priority 0 client; bits [14:12] are for priority 4 client. The - * clients are assigned the following IDs: 0-management; 1-debug traffic - * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1 - * traffic. The reset value[14:0] is set to 0x4688 (15'b100_011_010_001_000) - * for management at priority 0; debug traffic at priorities 1 and 2; COS0 - * traffic at priority 3; and COS1 traffic at priority 4. */ -#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT 0x180e4 -/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic - * Ethernet header. */ -#define NIG_REG_P1_HDRS_AFTER_BASIC 0x1818c -#define NIG_REG_P1_LLH_FUNC_MEM2 0x184c0 -#define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE 0x18460 -/* [RW 32] Specify the client number to be assigned to each priority of the - * strict priority arbiter. This register specifies bits 31:0 of the 36-bit - * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 - * client; bits [35-32] are for priority 8 client. The clients are assigned - * the following IDs: 0-management; 1-debug traffic from this port; 2-debug - * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic; - * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is - * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to - * accommodate the 9 input clients to ETS arbiter. */ -#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB 0x18680 -/* [RW 4] Specify the client number to be assigned to each priority of the - * strict priority arbiter. This register specifies bits 35:32 of the 36-bit - * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 - * client; bits [35-32] are for priority 8 client. The clients are assigned - * the following IDs: 0-management; 1-debug traffic from this port; 2-debug - * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic; - * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is - * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to - * accommodate the 9 input clients to ETS arbiter. */ -#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB 0x18684 -#define NIG_REG_P1_MAC_IN_EN 0x185c0 -/* [RW 1] Output enable for TX MAC interface */ -#define NIG_REG_P1_MAC_OUT_EN 0x185c4 -/* [RW 1] Output enable for TX PAUSE signal to the MAC. */ -#define NIG_REG_P1_MAC_PAUSE_OUT_EN 0x185c8 -/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for - * future expansion) each priorty is to be mapped to. Bits 3:0 specify the - * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit - * priority field is extracted from the outer-most VLAN in receive packet. - * Only COS 0 and COS 1 are supported in E2. */ -#define NIG_REG_P1_PKT_PRIORITY_TO_COS 0x181a8 -/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A - * priority is mapped to COS 0 when the corresponding mask bit is 1. More - * than one bit may be set; allowing multiple priorities to be mapped to one - * COS. */ -#define NIG_REG_P1_RX_COS0_PRIORITY_MASK 0x181ac -/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A - * priority is mapped to COS 1 when the corresponding mask bit is 1. More - * than one bit may be set; allowing multiple priorities to be mapped to one - * COS. */ -#define NIG_REG_P1_RX_COS1_PRIORITY_MASK 0x181b0 -/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A - * priority is mapped to COS 2 when the corresponding mask bit is 1. More - * than one bit may be set; allowing multiple priorities to be mapped to one - * COS. */ -#define NIG_REG_P1_RX_COS2_PRIORITY_MASK 0x186f8 -/* [R 1] RX FIFO for receiving data from MAC is empty. */ -#define NIG_REG_P1_RX_MACFIFO_EMPTY 0x1858c -/* [R 1] TLLH FIFO is empty. */ -#define NIG_REG_P1_TLLH_FIFO_EMPTY 0x18338 -/* [RW 32] Specify which of the credit registers the client is to be mapped - * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are - * for client 0; bits [35:32] are for client 8. For clients that are not - * subject to WFQ credit blocking - their specifications here are not used. - * This is a new register (with 2_) added in E3 B0 to accommodate the 9 - * input clients to ETS arbiter. The reset default is set for management and - * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to - * use credit registers 0-5 respectively (0x543210876). Note that credit - * registers can not be shared between clients. Note also that there are - * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only - * credit registers 0-5 are valid. This register should be configured - * appropriately before enabling WFQ. */ -#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB 0x186e8 -/* [RW 4] Specify which of the credit registers the client is to be mapped - * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are - * for client 0; bits [35:32] are for client 8. For clients that are not - * subject to WFQ credit blocking - their specifications here are not used. - * This is a new register (with 2_) added in E3 B0 to accommodate the 9 - * input clients to ETS arbiter. The reset default is set for management and - * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to - * use credit registers 0-5 respectively (0x543210876). Note that credit - * registers can not be shared between clients. Note also that there are - * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only - * credit registers 0-5 are valid. This register should be configured - * appropriately before enabling WFQ. */ -#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB 0x186ec -/* [RW 9] Specify whether the client competes directly in the strict - * priority arbiter. The bits are mapped according to client ID (client IDs - * are defined in tx_arb_priority_client2): 0-management; 1-debug traffic - * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1 - * traffic; 5-COS2 traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. - * Default value is set to enable strict priorities for all clients. */ -#define NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT 0x18234 -/* [RW 9] Specify whether the client is subject to WFQ credit blocking. The - * bits are mapped according to client ID (client IDs are defined in - * tx_arb_priority_client2): 0-management; 1-debug traffic from this port; - * 2-debug traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 - * traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. Default value is - * 0 for not using WFQ credit blocking. */ -#define NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x18238 -#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 0x18258 -#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 0x1825c -#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 0x18260 -#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 0x18264 -#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 0x18268 -#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 0x186f4 -/* [RW 32] Specify the weight (in bytes) to be added to credit register 0 - * when it is time to increment. */ -#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 0x18244 -#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 0x18248 -#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 0x1824c -#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 0x18250 -#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 0x18254 -#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 0x186f0 -/* [RW 12] Specify the number of strict priority arbitration slots between - two round-robin arbitration slots to avoid starvation. A value of 0 means - no strict priority cycles - the strict priority with anti-starvation - arbiter becomes a round-robin arbiter. */ -#define NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS 0x18240 -/* [RW 32] Specify the client number to be assigned to each priority of the - strict priority arbiter. This register specifies bits 31:0 of the 36-bit - value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 - client; bits [35-32] are for priority 8 client. The clients are assigned - the following IDs: 0-management; 1-debug traffic from this port; 2-debug - traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic; - 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is - set to 0x345678021. This is a new register (with 2_) added in E3 B0 to - accommodate the 9 input clients to ETS arbiter. Note that this register - is the same as the one for port 0, except that port 1 only has COS 0-2 - traffic. There is no traffic for COS 3-5 of port 1. */ -#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB 0x186e0 -/* [RW 4] Specify the client number to be assigned to each priority of the - strict priority arbiter. This register specifies bits 35:32 of the 36-bit - value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 - client; bits [35-32] are for priority 8 client. The clients are assigned - the following IDs: 0-management; 1-debug traffic from this port; 2-debug - traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic; - 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is - set to 0x345678021. This is a new register (with 2_) added in E3 B0 to - accommodate the 9 input clients to ETS arbiter. Note that this register - is the same as the one for port 0, except that port 1 only has COS 0-2 - traffic. There is no traffic for COS 3-5 of port 1. */ -#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB 0x186e4 -/* [R 1] TX FIFO for transmitting data to MAC is empty. */ -#define NIG_REG_P1_TX_MACFIFO_EMPTY 0x18594 -/* [R 1] FIFO empty status of the MCP TX FIFO used for storing MCP packets - forwarded to the host. */ -#define NIG_REG_P1_TX_MNG_HOST_FIFO_EMPTY 0x182b8 -/* [RW 32] Specify the upper bound that credit register 0 is allowed to - * reach. */ -/* [RW 1] Pause enable for port0. This register may get 1 only when - ~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same - port */ -#define NIG_REG_PAUSE_ENABLE_0 0x160c0 -#define NIG_REG_PAUSE_ENABLE_1 0x160c4 -/* [RW 1] Input enable for RX PBF LP IF */ -#define NIG_REG_PBF_LB_IN_EN 0x100b4 -/* [RW 1] Value of this register will be transmitted to port swap when - ~nig_registers_strap_override.strap_override =1 */ -#define NIG_REG_PORT_SWAP 0x10394 -/* [RW 1] PPP enable for port0. This register may get 1 only when - * ~safc_enable.safc_enable = 0 and pause_enable.pause_enable =0 for the - * same port */ -#define NIG_REG_PPP_ENABLE_0 0x160b0 -#define NIG_REG_PPP_ENABLE_1 0x160b4 -/* [RW 1] output enable for RX parser descriptor IF */ -#define NIG_REG_PRS_EOP_OUT_EN 0x10104 -/* [RW 1] Input enable for RX parser request IF */ -#define NIG_REG_PRS_REQ_IN_EN 0x100b8 -/* [RW 5] control to serdes - CL45 DEVAD */ -#define NIG_REG_SERDES0_CTRL_MD_DEVAD 0x10370 -/* [RW 1] control to serdes; 0 - clause 45; 1 - clause 22 */ -#define NIG_REG_SERDES0_CTRL_MD_ST 0x1036c -/* [RW 5] control to serdes - CL22 PHY_ADD and CL45 PRTAD */ -#define NIG_REG_SERDES0_CTRL_PHY_ADDR 0x10374 -/* [R 1] status from serdes0 that inputs to interrupt logic of link status */ -#define NIG_REG_SERDES0_STATUS_LINK_STATUS 0x10578 -/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure - for port0 */ -#define NIG_REG_STAT0_BRB_DISCARD 0x105f0 -/* [R 32] Rx statistics : In user packets truncated due to BRB backpressure - for port0 */ -#define NIG_REG_STAT0_BRB_TRUNCATE 0x105f8 -/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that - between 1024 and 1522 bytes for port0 */ -#define NIG_REG_STAT0_EGRESS_MAC_PKT0 0x10750 -/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that - between 1523 bytes and above for port0 */ -#define NIG_REG_STAT0_EGRESS_MAC_PKT1 0x10760 -/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure - for port1 */ -#define NIG_REG_STAT1_BRB_DISCARD 0x10628 -/* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that - between 1024 and 1522 bytes for port1 */ -#define NIG_REG_STAT1_EGRESS_MAC_PKT0 0x107a0 -/* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that - between 1523 bytes and above for port1 */ -#define NIG_REG_STAT1_EGRESS_MAC_PKT1 0x107b0 -/* [WB_R 64] Rx statistics : User octets received for LP */ -#define NIG_REG_STAT2_BRB_OCTET 0x107e0 -#define NIG_REG_STATUS_INTERRUPT_PORT0 0x10328 -#define NIG_REG_STATUS_INTERRUPT_PORT1 0x1032c -/* [RW 1] port swap mux selection. If this register equal to 0 then port - swap is equal to SPIO pin that inputs from ifmux_serdes_swap. If 1 then - ort swap is equal to ~nig_registers_port_swap.port_swap */ -#define NIG_REG_STRAP_OVERRIDE 0x10398 -/* [RW 1] output enable for RX_XCM0 IF */ -#define NIG_REG_XCM0_OUT_EN 0x100f0 -/* [RW 1] output enable for RX_XCM1 IF */ -#define NIG_REG_XCM1_OUT_EN 0x100f4 -/* [RW 1] control to xgxs - remote PHY in-band MDIO */ -#define NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST 0x10348 -/* [RW 5] control to xgxs - CL45 DEVAD */ -#define NIG_REG_XGXS0_CTRL_MD_DEVAD 0x1033c -/* [RW 1] control to xgxs; 0 - clause 45; 1 - clause 22 */ -#define NIG_REG_XGXS0_CTRL_MD_ST 0x10338 -/* [RW 5] control to xgxs - CL22 PHY_ADD and CL45 PRTAD */ -#define NIG_REG_XGXS0_CTRL_PHY_ADDR 0x10340 -/* [R 1] status from xgxs0 that inputs to interrupt logic of link10g. */ -#define NIG_REG_XGXS0_STATUS_LINK10G 0x10680 -/* [R 4] status from xgxs0 that inputs to interrupt logic of link status */ -#define NIG_REG_XGXS0_STATUS_LINK_STATUS 0x10684 -/* [RW 2] selection for XGXS lane of port 0 in NIG_MUX block */ -#define NIG_REG_XGXS_LANE_SEL_P0 0x102e8 -/* [RW 1] selection for port0 for NIG_MUX block : 0 = SerDes; 1 = XGXS */ -#define NIG_REG_XGXS_SERDES0_MODE_SEL 0x102e0 -#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT (0x1<<0) -#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS (0x1<<9) -#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G (0x1<<15) -#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS (0xf<<18) -#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE 18 -/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter. */ -#define PBF_REG_COS0_UPPER_BOUND 0x15c05c -/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter - * of port 0. */ -#define PBF_REG_COS0_UPPER_BOUND_P0 0x15c2cc -/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter - * of port 1. */ -#define PBF_REG_COS0_UPPER_BOUND_P1 0x15c2e4 -/* [RW 31] The weight of COS0 in the ETS command arbiter. */ -#define PBF_REG_COS0_WEIGHT 0x15c054 -/* [RW 31] The weight of COS0 in port 0 ETS command arbiter. */ -#define PBF_REG_COS0_WEIGHT_P0 0x15c2a8 -/* [RW 31] The weight of COS0 in port 1 ETS command arbiter. */ -#define PBF_REG_COS0_WEIGHT_P1 0x15c2c0 -/* [RW 31] The upper bound of the weight of COS1 in the ETS command arbiter. */ -#define PBF_REG_COS1_UPPER_BOUND 0x15c060 -/* [RW 31] The weight of COS1 in the ETS command arbiter. */ -#define PBF_REG_COS1_WEIGHT 0x15c058 -/* [RW 31] The weight of COS1 in port 0 ETS command arbiter. */ -#define PBF_REG_COS1_WEIGHT_P0 0x15c2ac -/* [RW 31] The weight of COS1 in port 1 ETS command arbiter. */ -#define PBF_REG_COS1_WEIGHT_P1 0x15c2c4 -/* [RW 31] The weight of COS2 in port 0 ETS command arbiter. */ -#define PBF_REG_COS2_WEIGHT_P0 0x15c2b0 -/* [RW 31] The weight of COS2 in port 1 ETS command arbiter. */ -#define PBF_REG_COS2_WEIGHT_P1 0x15c2c8 -/* [RW 31] The weight of COS3 in port 0 ETS command arbiter. */ -#define PBF_REG_COS3_WEIGHT_P0 0x15c2b4 -/* [RW 31] The weight of COS4 in port 0 ETS command arbiter. */ -#define PBF_REG_COS4_WEIGHT_P0 0x15c2b8 -/* [RW 31] The weight of COS5 in port 0 ETS command arbiter. */ -#define PBF_REG_COS5_WEIGHT_P0 0x15c2bc -/* [R 11] Current credit for the LB queue in the tx port buffers in 16 byte - * lines. */ -#define PBF_REG_CREDIT_LB_Q 0x140338 -/* [R 11] Current credit for queue 0 in the tx port buffers in 16 byte - * lines. */ -#define PBF_REG_CREDIT_Q0 0x14033c -/* [R 11] Current credit for queue 1 in the tx port buffers in 16 byte - * lines. */ -#define PBF_REG_CREDIT_Q1 0x140340 -/* [RW 1] Disable processing further tasks from port 0 (after ending the - current task in process). */ -#define PBF_REG_DISABLE_NEW_TASK_PROC_P0 0x14005c -/* [RW 1] Disable processing further tasks from port 1 (after ending the - current task in process). */ -#define PBF_REG_DISABLE_NEW_TASK_PROC_P1 0x140060 -/* [RW 1] Disable processing further tasks from port 4 (after ending the - current task in process). */ -#define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c -#define PBF_REG_DISABLE_PF 0x1402e8 -/* [RW 18] For port 0: For each client that is subject to WFQ (the - * corresponding bit is 1); indicates to which of the credit registers this - * client is mapped. For clients which are not credit blocked; their mapping - * is dont care. */ -#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0 0x15c288 -/* [RW 9] For port 1: For each client that is subject to WFQ (the - * corresponding bit is 1); indicates to which of the credit registers this - * client is mapped. For clients which are not credit blocked; their mapping - * is dont care. */ -#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1 0x15c28c -/* [RW 6] For port 0: Bit per client to indicate if the client competes in - * the strict priority arbiter directly (corresponding bit = 1); or first - * goes to the RR arbiter (corresponding bit = 0); and then competes in the - * lowest priority in the strict-priority arbiter. */ -#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 0x15c278 -/* [RW 3] For port 1: Bit per client to indicate if the client competes in - * the strict priority arbiter directly (corresponding bit = 1); or first - * goes to the RR arbiter (corresponding bit = 0); and then competes in the - * lowest priority in the strict-priority arbiter. */ -#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 0x15c27c -/* [RW 6] For port 0: Bit per client to indicate if the client is subject to - * WFQ credit blocking (corresponding bit = 1). */ -#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 0x15c280 -/* [RW 3] For port 0: Bit per client to indicate if the client is subject to - * WFQ credit blocking (corresponding bit = 1). */ -#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 0x15c284 -/* [RW 16] For port 0: The number of strict priority arbitration slots - * between 2 RR arbitration slots. A value of 0 means no strict priority - * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR - * arbiter. */ -#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 0x15c2a0 -/* [RW 16] For port 1: The number of strict priority arbitration slots - * between 2 RR arbitration slots. A value of 0 means no strict priority - * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR - * arbiter. */ -#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 0x15c2a4 -/* [RW 18] For port 0: Indicates which client is connected to each priority - * in the strict-priority arbiter. Priority 0 is the highest priority, and - * priority 5 is the lowest; to which the RR output is connected to (this is - * not configurable). */ -#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 0x15c270 -/* [RW 9] For port 1: Indicates which client is connected to each priority - * in the strict-priority arbiter. Priority 0 is the highest priority, and - * priority 5 is the lowest; to which the RR output is connected to (this is - * not configurable). */ -#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 0x15c274 -/* [RW 1] Indicates that ETS is performed between the COSes in the command - * arbiter. If reset strict priority w/ anti-starvation will be performed - * w/o WFQ. */ -#define PBF_REG_ETS_ENABLED 0x15c050 -/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic - * Ethernet header. */ -#define PBF_REG_HDRS_AFTER_BASIC 0x15c0a8 -/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */ -#define PBF_REG_HDRS_AFTER_TAG_0 0x15c0b8 -/* [R 1] Removed for E3 B0 - Indicates which COS is conncted to the highest - * priority in the command arbiter. */ -#define PBF_REG_HIGH_PRIORITY_COS_NUM 0x15c04c -#define PBF_REG_IF_ENABLE_REG 0x140044 -/* [RW 1] Init bit. When set the initial credits are copied to the credit - registers (except the port credits). Should be set and then reset after - the configuration of the block has ended. */ -#define PBF_REG_INIT 0x140000 -/* [RW 11] Initial credit for the LB queue in the tx port buffers in 16 byte - * lines. */ -#define PBF_REG_INIT_CRD_LB_Q 0x15c248 -/* [RW 11] Initial credit for queue 0 in the tx port buffers in 16 byte - * lines. */ -#define PBF_REG_INIT_CRD_Q0 0x15c230 -/* [RW 11] Initial credit for queue 1 in the tx port buffers in 16 byte - * lines. */ -#define PBF_REG_INIT_CRD_Q1 0x15c234 -/* [RW 1] Init bit for port 0. When set the initial credit of port 0 is - copied to the credit register. Should be set and then reset after the - configuration of the port has ended. */ -#define PBF_REG_INIT_P0 0x140004 -/* [RW 1] Init bit for port 1. When set the initial credit of port 1 is - copied to the credit register. Should be set and then reset after the - configuration of the port has ended. */ -#define PBF_REG_INIT_P1 0x140008 -/* [RW 1] Init bit for port 4. When set the initial credit of port 4 is - copied to the credit register. Should be set and then reset after the - configuration of the port has ended. */ -#define PBF_REG_INIT_P4 0x14000c -/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for - * the LB queue. Reset upon init. */ -#define PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q 0x140354 -/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for - * queue 0. Reset upon init. */ -#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 0x140358 -/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for - * queue 1. Reset upon init. */ -#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 0x14035c -/* [RW 1] Enable for mac interface 0. */ -#define PBF_REG_MAC_IF0_ENABLE 0x140030 -/* [RW 1] Enable for mac interface 1. */ -#define PBF_REG_MAC_IF1_ENABLE 0x140034 -/* [RW 1] Enable for the loopback interface. */ -#define PBF_REG_MAC_LB_ENABLE 0x140040 -/* [RW 6] Bit-map indicating which headers must appear in the packet */ -#define PBF_REG_MUST_HAVE_HDRS 0x15c0c4 -/* [RW 16] The number of strict priority arbitration slots between 2 RR - * arbitration slots. A value of 0 means no strict priority cycles; i.e. the - * strict-priority w/ anti-starvation arbiter is a RR arbiter. */ -#define PBF_REG_NUM_STRICT_ARB_SLOTS 0x15c064 -/* [RW 10] Port 0 threshold used by arbiter in 16 byte lines used when pause - not suppoterd. */ -#define PBF_REG_P0_ARB_THRSH 0x1400e4 -/* [R 11] Current credit for port 0 in the tx port buffers in 16 byte lines. */ -#define PBF_REG_P0_CREDIT 0x140200 -/* [RW 11] Initial credit for port 0 in the tx port buffers in 16 byte - lines. */ -#define PBF_REG_P0_INIT_CRD 0x1400d0 -/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for - * port 0. Reset upon init. */ -#define PBF_REG_P0_INTERNAL_CRD_FREED_CNT 0x140308 -/* [R 1] Removed for E3 B0 - Indication that pause is enabled for port 0. */ -#define PBF_REG_P0_PAUSE_ENABLE 0x140014 -/* [R 8] Removed for E3 B0 - Number of tasks in port 0 task queue. */ -#define PBF_REG_P0_TASK_CNT 0x140204 -/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines - * freed from the task queue of port 0. Reset upon init. */ -#define PBF_REG_P0_TQ_LINES_FREED_CNT 0x1402f0 -/* [R 12] Number of 8 bytes lines occupied in the task queue of port 0. */ -#define PBF_REG_P0_TQ_OCCUPANCY 0x1402fc -/* [R 11] Removed for E3 B0 - Current credit for port 1 in the tx port - * buffers in 16 byte lines. */ -#define PBF_REG_P1_CREDIT 0x140208 -/* [R 11] Removed for E3 B0 - Initial credit for port 0 in the tx port - * buffers in 16 byte lines. */ -#define PBF_REG_P1_INIT_CRD 0x1400d4 -/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for - * port 1. Reset upon init. */ -#define PBF_REG_P1_INTERNAL_CRD_FREED_CNT 0x14030c -/* [R 8] Removed for E3 B0 - Number of tasks in port 1 task queue. */ -#define PBF_REG_P1_TASK_CNT 0x14020c -/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines - * freed from the task queue of port 1. Reset upon init. */ -#define PBF_REG_P1_TQ_LINES_FREED_CNT 0x1402f4 -/* [R 12] Number of 8 bytes lines occupied in the task queue of port 1. */ -#define PBF_REG_P1_TQ_OCCUPANCY 0x140300 -/* [R 11] Current credit for port 4 in the tx port buffers in 16 byte lines. */ -#define PBF_REG_P4_CREDIT 0x140210 -/* [RW 11] Initial credit for port 4 in the tx port buffers in 16 byte - lines. */ -#define PBF_REG_P4_INIT_CRD 0x1400e0 -/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for - * port 4. Reset upon init. */ -#define PBF_REG_P4_INTERNAL_CRD_FREED_CNT 0x140310 -/* [R 8] Removed for E3 B0 - Number of tasks in port 4 task queue. */ -#define PBF_REG_P4_TASK_CNT 0x140214 -/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines - * freed from the task queue of port 4. Reset upon init. */ -#define PBF_REG_P4_TQ_LINES_FREED_CNT 0x1402f8 -/* [R 12] Number of 8 bytes lines occupied in the task queue of port 4. */ -#define PBF_REG_P4_TQ_OCCUPANCY 0x140304 -/* [RW 5] Interrupt mask register #0 read/write */ -#define PBF_REG_PBF_INT_MASK 0x1401d4 -/* [R 5] Interrupt register #0 read */ -#define PBF_REG_PBF_INT_STS 0x1401c8 -/* [RW 20] Parity mask register #0 read/write */ -#define PBF_REG_PBF_PRTY_MASK 0x1401e4 -/* [RC 20] Parity register #0 read clear */ -#define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc -/* [RW 16] The Ethernet type value for L2 tag 0 */ -#define PBF_REG_TAG_ETHERTYPE_0 0x15c090 -/* [RW 4] The length of the info field for L2 tag 0. The length is between - * 2B and 14B; in 2B granularity */ -#define PBF_REG_TAG_LEN_0 0x15c09c -/* [R 32] Cyclic counter for number of 8 byte lines freed from the LB task - * queue. Reset upon init. */ -#define PBF_REG_TQ_LINES_FREED_CNT_LB_Q 0x14038c -/* [R 32] Cyclic counter for number of 8 byte lines freed from the task - * queue 0. Reset upon init. */ -#define PBF_REG_TQ_LINES_FREED_CNT_Q0 0x140390 -/* [R 32] Cyclic counter for number of 8 byte lines freed from task queue 1. - * Reset upon init. */ -#define PBF_REG_TQ_LINES_FREED_CNT_Q1 0x140394 -/* [R 13] Number of 8 bytes lines occupied in the task queue of the LB - * queue. */ -#define PBF_REG_TQ_OCCUPANCY_LB_Q 0x1403a8 -/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 0. */ -#define PBF_REG_TQ_OCCUPANCY_Q0 0x1403ac -/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 1. */ -#define PBF_REG_TQ_OCCUPANCY_Q1 0x1403b0 -#define PB_REG_CONTROL 0 -/* [RW 2] Interrupt mask register #0 read/write */ -#define PB_REG_PB_INT_MASK 0x28 -/* [R 2] Interrupt register #0 read */ -#define PB_REG_PB_INT_STS 0x1c -/* [RW 4] Parity mask register #0 read/write */ -#define PB_REG_PB_PRTY_MASK 0x38 -/* [R 4] Parity register #0 read */ -#define PB_REG_PB_PRTY_STS 0x2c -/* [RC 4] Parity register #0 read clear */ -#define PB_REG_PB_PRTY_STS_CLR 0x30 -#define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR (0x1<<0) -#define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW (0x1<<8) -#define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR (0x1<<1) -#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN (0x1<<6) -#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN (0x1<<7) -#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN (0x1<<4) -#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN (0x1<<3) -#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN (0x1<<5) -#define PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN (0x1<<2) -/* [R 8] Config space A attention dirty bits. Each bit indicates that the - * corresponding PF generates config space A attention. Set by PXP. Reset by - * MCP writing 1 to icfg_space_a_request_clr. Note: register contains bits - * from both paths. */ -#define PGLUE_B_REG_CFG_SPACE_A_REQUEST 0x9010 -/* [R 8] Config space B attention dirty bits. Each bit indicates that the - * corresponding PF generates config space B attention. Set by PXP. Reset by - * MCP writing 1 to icfg_space_b_request_clr. Note: register contains bits - * from both paths. */ -#define PGLUE_B_REG_CFG_SPACE_B_REQUEST 0x9014 -/* [RW 1] Type A PF enable inbound interrupt table for CSDM. 0 - disable; 1 - * - enable. */ -#define PGLUE_B_REG_CSDM_INB_INT_A_PF_ENABLE 0x9194 -/* [RW 18] Type B VF inbound interrupt table for CSDM: bits[17:9]-mask; - * its[8:0]-address. Bits [1:0] must be zero (DW resolution address). */ -#define PGLUE_B_REG_CSDM_INB_INT_B_VF 0x916c -/* [RW 1] Type B VF enable inbound interrupt table for CSDM. 0 - disable; 1 - * - enable. */ -#define PGLUE_B_REG_CSDM_INB_INT_B_VF_ENABLE 0x919c -/* [RW 16] Start offset of CSDM zone A (queue zone) in the internal RAM */ -#define PGLUE_B_REG_CSDM_START_OFFSET_A 0x9100 -/* [RW 16] Start offset of CSDM zone B (legacy zone) in the internal RAM */ -#define PGLUE_B_REG_CSDM_START_OFFSET_B 0x9108 -/* [RW 5] VF Shift of CSDM zone B (legacy zone) in the internal RAM */ -#define PGLUE_B_REG_CSDM_VF_SHIFT_B 0x9110 -/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */ -#define PGLUE_B_REG_CSDM_ZONE_A_SIZE_PF 0x91ac -/* [R 8] FLR request attention dirty bits for PFs 0 to 7. Each bit indicates - * that the FLR register of the corresponding PF was set. Set by PXP. Reset - * by MCP writing 1 to flr_request_pf_7_0_clr. Note: register contains bits - * from both paths. */ -#define PGLUE_B_REG_FLR_REQUEST_PF_7_0 0x9028 -/* [W 8] FLR request attention dirty bits clear for PFs 0 to 7. MCP writes 1 - * to a bit in this register in order to clear the corresponding bit in - * flr_request_pf_7_0 register. Note: register contains bits from both - * paths. */ -#define PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR 0x9418 -/* [R 32] FLR request attention dirty bits for VFs 96 to 127. Each bit - * indicates that the FLR register of the corresponding VF was set. Set by - * PXP. Reset by MCP writing 1 to flr_request_vf_127_96_clr. */ -#define PGLUE_B_REG_FLR_REQUEST_VF_127_96 0x9024 -/* [R 32] FLR request attention dirty bits for VFs 0 to 31. Each bit - * indicates that the FLR register of the corresponding VF was set. Set by - * PXP. Reset by MCP writing 1 to flr_request_vf_31_0_clr. */ -#define PGLUE_B_REG_FLR_REQUEST_VF_31_0 0x9018 -/* [R 32] FLR request attention dirty bits for VFs 32 to 63. Each bit - * indicates that the FLR register of the corresponding VF was set. Set by - * PXP. Reset by MCP writing 1 to flr_request_vf_63_32_clr. */ -#define PGLUE_B_REG_FLR_REQUEST_VF_63_32 0x901c -/* [R 32] FLR request attention dirty bits for VFs 64 to 95. Each bit - * indicates that the FLR register of the corresponding VF was set. Set by - * PXP. Reset by MCP writing 1 to flr_request_vf_95_64_clr. */ -#define PGLUE_B_REG_FLR_REQUEST_VF_95_64 0x9020 -/* [R 8] Each bit indicates an incorrect behavior in user RX interface. Bit - * 0 - Target memory read arrived with a correctable error. Bit 1 - Target - * memory read arrived with an uncorrectable error. Bit 2 - Configuration RW - * arrived with a correctable error. Bit 3 - Configuration RW arrived with - * an uncorrectable error. Bit 4 - Completion with Configuration Request - * Retry Status. Bit 5 - Expansion ROM access received with a write request. - * Bit 6 - Completion with pcie_rx_err of 0000; CMPL_STATUS of non-zero; and - * pcie_rx_last not asserted. Bit 7 - Completion with pcie_rx_err of 1010; - * and pcie_rx_last not asserted. */ -#define PGLUE_B_REG_INCORRECT_RCV_DETAILS 0x9068 -#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER 0x942c -#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ 0x9430 -#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_WRITE 0x9434 -#define PGLUE_B_REG_INTERNAL_VFID_ENABLE 0x9438 -/* [R 9] Interrupt register #0 read */ -#define PGLUE_B_REG_PGLUE_B_INT_STS 0x9298 -/* [RC 9] Interrupt register #0 read clear */ -#define PGLUE_B_REG_PGLUE_B_INT_STS_CLR 0x929c -/* [RW 2] Parity mask register #0 read/write */ -#define PGLUE_B_REG_PGLUE_B_PRTY_MASK 0x92b4 -/* [R 2] Parity register #0 read */ -#define PGLUE_B_REG_PGLUE_B_PRTY_STS 0x92a8 -/* [RC 2] Parity register #0 read clear */ -#define PGLUE_B_REG_PGLUE_B_PRTY_STS_CLR 0x92ac -/* [R 13] Details of first request received with error. [2:0] - PFID. [3] - - * VF_VALID. [9:4] - VFID. [11:10] - Error Code - 0 - Indicates Completion - * Timeout of a User Tx non-posted request. 1 - unsupported request. 2 - - * completer abort. 3 - Illegal value for this field. [12] valid - indicates - * if there was a completion error since the last time this register was - * cleared. */ -#define PGLUE_B_REG_RX_ERR_DETAILS 0x9080 -/* [R 18] Details of first ATS Translation Completion request received with - * error. [2:0] - PFID. [3] - VF_VALID. [9:4] - VFID. [11:10] - Error Code - - * 0 - Indicates Completion Timeout of a User Tx non-posted request. 1 - - * unsupported request. 2 - completer abort. 3 - Illegal value for this - * field. [16:12] - ATC OTB EntryID. [17] valid - indicates if there was a - * completion error since the last time this register was cleared. */ -#define PGLUE_B_REG_RX_TCPL_ERR_DETAILS 0x9084 -/* [W 8] Debug only - Shadow BME bits clear for PFs 0 to 7. MCP writes 1 to - * a bit in this register in order to clear the corresponding bit in - * shadow_bme_pf_7_0 register. MCP should never use this unless a - * work-around is needed. Note: register contains bits from both paths. */ -#define PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR 0x9458 -/* [R 8] SR IOV disabled attention dirty bits. Each bit indicates that the - * VF enable register of the corresponding PF is written to 0 and was - * previously 1. Set by PXP. Reset by MCP writing 1 to - * sr_iov_disabled_request_clr. Note: register contains bits from both - * paths. */ -#define PGLUE_B_REG_SR_IOV_DISABLED_REQUEST 0x9030 -/* [R 32] Indicates the status of tags 32-63. 0 - tags is used - read - * completion did not return yet. 1 - tag is unused. Same functionality as - * pxp2_registers_pgl_exp_rom_data2 for tags 0-31. */ -#define PGLUE_B_REG_TAGS_63_32 0x9244 -/* [RW 1] Type A PF enable inbound interrupt table for TSDM. 0 - disable; 1 - * - enable. */ -#define PGLUE_B_REG_TSDM_INB_INT_A_PF_ENABLE 0x9170 -/* [RW 16] Start offset of TSDM zone A (queue zone) in the internal RAM */ -#define PGLUE_B_REG_TSDM_START_OFFSET_A 0x90c4 -/* [RW 16] Start offset of TSDM zone B (legacy zone) in the internal RAM */ -#define PGLUE_B_REG_TSDM_START_OFFSET_B 0x90cc -/* [RW 5] VF Shift of TSDM zone B (legacy zone) in the internal RAM */ -#define PGLUE_B_REG_TSDM_VF_SHIFT_B 0x90d4 -/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */ -#define PGLUE_B_REG_TSDM_ZONE_A_SIZE_PF 0x91a0 -/* [R 32] Address [31:0] of first read request not submitted due to error */ -#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0 0x9098 -/* [R 32] Address [63:32] of first read request not submitted due to error */ -#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32 0x909c -/* [R 31] Details of first read request not submitted due to error. [4:0] - * VQID. [5] TREQ. 1 - Indicates the request is a Translation Request. - * [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25] - - * VFID. */ -#define PGLUE_B_REG_TX_ERR_RD_DETAILS 0x90a0 -/* [R 26] Details of first read request not submitted due to error. [15:0] - * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type - - * [21] - Indicates was_error was set; [22] - Indicates BME was cleared; - * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent - * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid - - * indicates if there was a request not submitted due to error since the - * last time this register was cleared. */ -#define PGLUE_B_REG_TX_ERR_RD_DETAILS2 0x90a4 -/* [R 32] Address [31:0] of first write request not submitted due to error */ -#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0 0x9088 -/* [R 32] Address [63:32] of first write request not submitted due to error */ -#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32 0x908c -/* [R 31] Details of first write request not submitted due to error. [4:0] - * VQID. [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25] - * - VFID. */ -#define PGLUE_B_REG_TX_ERR_WR_DETAILS 0x9090 -/* [R 26] Details of first write request not submitted due to error. [15:0] - * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type - - * [21] - Indicates was_error was set; [22] - Indicates BME was cleared; - * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent - * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid - - * indicates if there was a request not submitted due to error since the - * last time this register was cleared. */ -#define PGLUE_B_REG_TX_ERR_WR_DETAILS2 0x9094 -/* [RW 10] Type A PF/VF inbound interrupt table for USDM: bits[9:5]-mask; - * its[4:0]-address relative to start_offset_a. Bits [1:0] can have any - * value (Byte resolution address). */ -#define PGLUE_B_REG_USDM_INB_INT_A_0 0x9128 -#define PGLUE_B_REG_USDM_INB_INT_A_1 0x912c -#define PGLUE_B_REG_USDM_INB_INT_A_2 0x9130 -#define PGLUE_B_REG_USDM_INB_INT_A_3 0x9134 -#define PGLUE_B_REG_USDM_INB_INT_A_4 0x9138 -#define PGLUE_B_REG_USDM_INB_INT_A_5 0x913c -#define PGLUE_B_REG_USDM_INB_INT_A_6 0x9140 -/* [RW 1] Type A PF enable inbound interrupt table for USDM. 0 - disable; 1 - * - enable. */ -#define PGLUE_B_REG_USDM_INB_INT_A_PF_ENABLE 0x917c -/* [RW 1] Type A VF enable inbound interrupt table for USDM. 0 - disable; 1 - * - enable. */ -#define PGLUE_B_REG_USDM_INB_INT_A_VF_ENABLE 0x9180 -/* [RW 1] Type B VF enable inbound interrupt table for USDM. 0 - disable; 1 - * - enable. */ -#define PGLUE_B_REG_USDM_INB_INT_B_VF_ENABLE 0x9184 -/* [RW 16] Start offset of USDM zone A (queue zone) in the internal RAM */ -#define PGLUE_B_REG_USDM_START_OFFSET_A 0x90d8 -/* [RW 16] Start offset of USDM zone B (legacy zone) in the internal RAM */ -#define PGLUE_B_REG_USDM_START_OFFSET_B 0x90e0 -/* [RW 5] VF Shift of USDM zone B (legacy zone) in the internal RAM */ -#define PGLUE_B_REG_USDM_VF_SHIFT_B 0x90e8 -/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */ -#define PGLUE_B_REG_USDM_ZONE_A_SIZE_PF 0x91a4 -/* [R 26] Details of first target VF request accessing VF GRC space that - * failed permission check. [14:0] Address. [15] w_nr: 0 - Read; 1 - Write. - * [21:16] VFID. [24:22] - PFID. [25] valid - indicates if there was a - * request accessing VF GRC space that failed permission check since the - * last time this register was cleared. Permission checks are: function - * permission; R/W permission; address range permission. */ -#define PGLUE_B_REG_VF_GRC_SPACE_VIOLATION_DETAILS 0x9234 -/* [R 31] Details of first target VF request with length violation (too many - * DWs) accessing BAR0. [12:0] Address in DWs (bits [14:2] of byte address). - * [14:13] BAR. [20:15] VFID. [23:21] - PFID. [29:24] - Length in DWs. [30] - * valid - indicates if there was a request with length violation since the - * last time this register was cleared. Length violations: length of more - * than 2DWs; length of 2DWs and address not QW aligned; window is GRC and - * length is more than 1 DW. */ -#define PGLUE_B_REG_VF_LENGTH_VIOLATION_DETAILS 0x9230 -/* [R 8] Was_error indication dirty bits for PFs 0 to 7. Each bit indicates - * that there was a completion with uncorrectable error for the - * corresponding PF. Set by PXP. Reset by MCP writing 1 to - * was_error_pf_7_0_clr. */ -#define PGLUE_B_REG_WAS_ERROR_PF_7_0 0x907c -/* [W 8] Was_error indication dirty bits clear for PFs 0 to 7. MCP writes 1 - * to a bit in this register in order to clear the corresponding bit in - * flr_request_pf_7_0 register. */ -#define PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR 0x9470 -/* [R 32] Was_error indication dirty bits for VFs 96 to 127. Each bit - * indicates that there was a completion with uncorrectable error for the - * corresponding VF. Set by PXP. Reset by MCP writing 1 to - * was_error_vf_127_96_clr. */ -#define PGLUE_B_REG_WAS_ERROR_VF_127_96 0x9078 -/* [W 32] Was_error indication dirty bits clear for VFs 96 to 127. MCP - * writes 1 to a bit in this register in order to clear the corresponding - * bit in was_error_vf_127_96 register. */ -#define PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR 0x9474 -/* [R 32] Was_error indication dirty bits for VFs 0 to 31. Each bit - * indicates that there was a completion with uncorrectable error for the - * corresponding VF. Set by PXP. Reset by MCP writing 1 to - * was_error_vf_31_0_clr. */ -#define PGLUE_B_REG_WAS_ERROR_VF_31_0 0x906c -/* [W 32] Was_error indication dirty bits clear for VFs 0 to 31. MCP writes - * 1 to a bit in this register in order to clear the corresponding bit in - * was_error_vf_31_0 register. */ -#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR 0x9478 -/* [R 32] Was_error indication dirty bits for VFs 32 to 63. Each bit - * indicates that there was a completion with uncorrectable error for the - * corresponding VF. Set by PXP. Reset by MCP writing 1 to - * was_error_vf_63_32_clr. */ -#define PGLUE_B_REG_WAS_ERROR_VF_63_32 0x9070 -/* [W 32] Was_error indication dirty bits clear for VFs 32 to 63. MCP writes - * 1 to a bit in this register in order to clear the corresponding bit in - * was_error_vf_63_32 register. */ -#define PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR 0x947c -/* [R 32] Was_error indication dirty bits for VFs 64 to 95. Each bit - * indicates that there was a completion with uncorrectable error for the - * corresponding VF. Set by PXP. Reset by MCP writing 1 to - * was_error_vf_95_64_clr. */ -#define PGLUE_B_REG_WAS_ERROR_VF_95_64 0x9074 -/* [W 32] Was_error indication dirty bits clear for VFs 64 to 95. MCP writes - * 1 to a bit in this register in order to clear the corresponding bit in - * was_error_vf_95_64 register. */ -#define PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR 0x9480 -/* [RW 1] Type A PF enable inbound interrupt table for XSDM. 0 - disable; 1 - * - enable. */ -#define PGLUE_B_REG_XSDM_INB_INT_A_PF_ENABLE 0x9188 -/* [RW 16] Start offset of XSDM zone A (queue zone) in the internal RAM */ -#define PGLUE_B_REG_XSDM_START_OFFSET_A 0x90ec -/* [RW 16] Start offset of XSDM zone B (legacy zone) in the internal RAM */ -#define PGLUE_B_REG_XSDM_START_OFFSET_B 0x90f4 -/* [RW 5] VF Shift of XSDM zone B (legacy zone) in the internal RAM */ -#define PGLUE_B_REG_XSDM_VF_SHIFT_B 0x90fc -/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */ -#define PGLUE_B_REG_XSDM_ZONE_A_SIZE_PF 0x91a8 -#define PRS_REG_A_PRSU_20 0x40134 -/* [R 8] debug only: CFC load request current credit. Transaction based. */ -#define PRS_REG_CFC_LD_CURRENT_CREDIT 0x40164 -/* [R 8] debug only: CFC search request current credit. Transaction based. */ -#define PRS_REG_CFC_SEARCH_CURRENT_CREDIT 0x40168 -/* [RW 6] The initial credit for the search message to the CFC interface. - Credit is transaction based. */ -#define PRS_REG_CFC_SEARCH_INITIAL_CREDIT 0x4011c -/* [RW 24] CID for port 0 if no match */ -#define PRS_REG_CID_PORT_0 0x400fc -/* [RW 32] The CM header for flush message where 'load existed' bit in CFC - load response is reset and packet type is 0. Used in packet start message - to TCM. */ -#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_0 0x400dc -#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_1 0x400e0 -#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_2 0x400e4 -#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_3 0x400e8 -#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_4 0x400ec -#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_5 0x400f0 -/* [RW 32] The CM header for flush message where 'load existed' bit in CFC - load response is set and packet type is 0. Used in packet start message - to TCM. */ -#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_0 0x400bc -#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_1 0x400c0 -#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_2 0x400c4 -#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_3 0x400c8 -#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_4 0x400cc -#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_5 0x400d0 -/* [RW 32] The CM header for a match and packet type 1 for loopback port. - Used in packet start message to TCM. */ -#define PRS_REG_CM_HDR_LOOPBACK_TYPE_1 0x4009c -#define PRS_REG_CM_HDR_LOOPBACK_TYPE_2 0x400a0 -#define PRS_REG_CM_HDR_LOOPBACK_TYPE_3 0x400a4 -#define PRS_REG_CM_HDR_LOOPBACK_TYPE_4 0x400a8 -/* [RW 32] The CM header for a match and packet type 0. Used in packet start - message to TCM. */ -#define PRS_REG_CM_HDR_TYPE_0 0x40078 -#define PRS_REG_CM_HDR_TYPE_1 0x4007c -#define PRS_REG_CM_HDR_TYPE_2 0x40080 -#define PRS_REG_CM_HDR_TYPE_3 0x40084 -#define PRS_REG_CM_HDR_TYPE_4 0x40088 -/* [RW 32] The CM header in case there was not a match on the connection */ -#define PRS_REG_CM_NO_MATCH_HDR 0x400b8 -/* [RW 1] Indicates if in e1hov mode. 0=non-e1hov mode; 1=e1hov mode. */ -#define PRS_REG_E1HOV_MODE 0x401c8 -/* [RW 8] The 8-bit event ID for a match and packet type 1. Used in packet - start message to TCM. */ -#define PRS_REG_EVENT_ID_1 0x40054 -#define PRS_REG_EVENT_ID_2 0x40058 -#define PRS_REG_EVENT_ID_3 0x4005c -/* [RW 16] The Ethernet type value for FCoE */ -#define PRS_REG_FCOE_TYPE 0x401d0 -/* [RW 8] Context region for flush packet with packet type 0. Used in CFC - load request message. */ -#define PRS_REG_FLUSH_REGIONS_TYPE_0 0x40004 -#define PRS_REG_FLUSH_REGIONS_TYPE_1 0x40008 -#define PRS_REG_FLUSH_REGIONS_TYPE_2 0x4000c -#define PRS_REG_FLUSH_REGIONS_TYPE_3 0x40010 -#define PRS_REG_FLUSH_REGIONS_TYPE_4 0x40014 -#define PRS_REG_FLUSH_REGIONS_TYPE_5 0x40018 -#define PRS_REG_FLUSH_REGIONS_TYPE_6 0x4001c -#define PRS_REG_FLUSH_REGIONS_TYPE_7 0x40020 -/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic - * Ethernet header. */ -#define PRS_REG_HDRS_AFTER_BASIC 0x40238 -/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic - * Ethernet header for port 0 packets. */ -#define PRS_REG_HDRS_AFTER_BASIC_PORT_0 0x40270 -#define PRS_REG_HDRS_AFTER_BASIC_PORT_1 0x40290 -/* [R 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */ -#define PRS_REG_HDRS_AFTER_TAG_0 0x40248 -/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 for - * port 0 packets */ -#define PRS_REG_HDRS_AFTER_TAG_0_PORT_0 0x40280 -#define PRS_REG_HDRS_AFTER_TAG_0_PORT_1 0x402a0 -/* [RW 4] The increment value to send in the CFC load request message */ -#define PRS_REG_INC_VALUE 0x40048 -/* [RW 6] Bit-map indicating which headers must appear in the packet */ -#define PRS_REG_MUST_HAVE_HDRS 0x40254 -/* [RW 6] Bit-map indicating which headers must appear in the packet for - * port 0 packets */ -#define PRS_REG_MUST_HAVE_HDRS_PORT_0 0x4028c -#define PRS_REG_MUST_HAVE_HDRS_PORT_1 0x402ac -#define PRS_REG_NIC_MODE 0x40138 -/* [RW 8] The 8-bit event ID for cases where there is no match on the - connection. Used in packet start message to TCM. */ -#define PRS_REG_NO_MATCH_EVENT_ID 0x40070 -/* [ST 24] The number of input CFC flush packets */ -#define PRS_REG_NUM_OF_CFC_FLUSH_MESSAGES 0x40128 -/* [ST 32] The number of cycles the Parser halted its operation since it - could not allocate the next serial number */ -#define PRS_REG_NUM_OF_DEAD_CYCLES 0x40130 -/* [ST 24] The number of input packets */ -#define PRS_REG_NUM_OF_PACKETS 0x40124 -/* [ST 24] The number of input transparent flush packets */ -#define PRS_REG_NUM_OF_TRANSPARENT_FLUSH_MESSAGES 0x4012c -/* [RW 8] Context region for received Ethernet packet with a match and - packet type 0. Used in CFC load request message */ -#define PRS_REG_PACKET_REGIONS_TYPE_0 0x40028 -#define PRS_REG_PACKET_REGIONS_TYPE_1 0x4002c -#define PRS_REG_PACKET_REGIONS_TYPE_2 0x40030 -#define PRS_REG_PACKET_REGIONS_TYPE_3 0x40034 -#define PRS_REG_PACKET_REGIONS_TYPE_4 0x40038 -#define PRS_REG_PACKET_REGIONS_TYPE_5 0x4003c -#define PRS_REG_PACKET_REGIONS_TYPE_6 0x40040 -#define PRS_REG_PACKET_REGIONS_TYPE_7 0x40044 -/* [R 2] debug only: Number of pending requests for CAC on port 0. */ -#define PRS_REG_PENDING_BRB_CAC0_RQ 0x40174 -/* [R 2] debug only: Number of pending requests for header parsing. */ -#define PRS_REG_PENDING_BRB_PRS_RQ 0x40170 -/* [R 1] Interrupt register #0 read */ -#define PRS_REG_PRS_INT_STS 0x40188 -/* [RW 8] Parity mask register #0 read/write */ -#define PRS_REG_PRS_PRTY_MASK 0x401a4 -/* [R 8] Parity register #0 read */ -#define PRS_REG_PRS_PRTY_STS 0x40198 -/* [RC 8] Parity register #0 read clear */ -#define PRS_REG_PRS_PRTY_STS_CLR 0x4019c -/* [RW 8] Context region for pure acknowledge packets. Used in CFC load - request message */ -#define PRS_REG_PURE_REGIONS 0x40024 -/* [R 32] debug only: Serial number status lsb 32 bits. '1' indicates this - serail number was released by SDM but cannot be used because a previous - serial number was not released. */ -#define PRS_REG_SERIAL_NUM_STATUS_LSB 0x40154 -/* [R 32] debug only: Serial number status msb 32 bits. '1' indicates this - serail number was released by SDM but cannot be used because a previous - serial number was not released. */ -#define PRS_REG_SERIAL_NUM_STATUS_MSB 0x40158 -/* [R 4] debug only: SRC current credit. Transaction based. */ -#define PRS_REG_SRC_CURRENT_CREDIT 0x4016c -/* [RW 16] The Ethernet type value for L2 tag 0 */ -#define PRS_REG_TAG_ETHERTYPE_0 0x401d4 -/* [RW 4] The length of the info field for L2 tag 0. The length is between - * 2B and 14B; in 2B granularity */ -#define PRS_REG_TAG_LEN_0 0x4022c -/* [R 8] debug only: TCM current credit. Cycle based. */ -#define PRS_REG_TCM_CURRENT_CREDIT 0x40160 -/* [R 8] debug only: TSDM current credit. Transaction based. */ -#define PRS_REG_TSDM_CURRENT_CREDIT 0x4015c -#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT (0x1<<19) -#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF (0x1<<20) -#define PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN (0x1<<22) -#define PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED (0x1<<23) -#define PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED (0x1<<24) -#define PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR (0x1<<7) -#define PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR (0x1<<7) -/* [R 6] Debug only: Number of used entries in the data FIFO */ -#define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c -/* [R 7] Debug only: Number of used entries in the header FIFO */ -#define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478 -#define PXP2_REG_PGL_ADDR_88_F0 0x120534 -#define PXP2_REG_PGL_ADDR_8C_F0 0x120538 -#define PXP2_REG_PGL_ADDR_90_F0 0x12053c -#define PXP2_REG_PGL_ADDR_94_F0 0x120540 -#define PXP2_REG_PGL_CONTROL0 0x120490 -#define PXP2_REG_PGL_CONTROL1 0x120514 -#define PXP2_REG_PGL_DEBUG 0x120520 -/* [RW 32] third dword data of expansion rom request. this register is - special. reading from it provides a vector outstanding read requests. if - a bit is zero it means that a read request on the corresponding tag did - not finish yet (not all completions have arrived for it) */ -#define PXP2_REG_PGL_EXP_ROM2 0x120808 -/* [RW 32] Inbound interrupt table for CSDM: bits[31:16]-mask; - its[15:0]-address */ -#define PXP2_REG_PGL_INT_CSDM_0 0x1204f4 -#define PXP2_REG_PGL_INT_CSDM_1 0x1204f8 -#define PXP2_REG_PGL_INT_CSDM_2 0x1204fc -#define PXP2_REG_PGL_INT_CSDM_3 0x120500 -#define PXP2_REG_PGL_INT_CSDM_4 0x120504 -#define PXP2_REG_PGL_INT_CSDM_5 0x120508 -#define PXP2_REG_PGL_INT_CSDM_6 0x12050c -#define PXP2_REG_PGL_INT_CSDM_7 0x120510 -/* [RW 32] Inbound interrupt table for TSDM: bits[31:16]-mask; - its[15:0]-address */ -#define PXP2_REG_PGL_INT_TSDM_0 0x120494 -#define PXP2_REG_PGL_INT_TSDM_1 0x120498 -#define PXP2_REG_PGL_INT_TSDM_2 0x12049c -#define PXP2_REG_PGL_INT_TSDM_3 0x1204a0 -#define PXP2_REG_PGL_INT_TSDM_4 0x1204a4 -#define PXP2_REG_PGL_INT_TSDM_5 0x1204a8 -#define PXP2_REG_PGL_INT_TSDM_6 0x1204ac -#define PXP2_REG_PGL_INT_TSDM_7 0x1204b0 -/* [RW 32] Inbound interrupt table for USDM: bits[31:16]-mask; - its[15:0]-address */ -#define PXP2_REG_PGL_INT_USDM_0 0x1204b4 -#define PXP2_REG_PGL_INT_USDM_1 0x1204b8 -#define PXP2_REG_PGL_INT_USDM_2 0x1204bc -#define PXP2_REG_PGL_INT_USDM_3 0x1204c0 -#define PXP2_REG_PGL_INT_USDM_4 0x1204c4 -#define PXP2_REG_PGL_INT_USDM_5 0x1204c8 -#define PXP2_REG_PGL_INT_USDM_6 0x1204cc -#define PXP2_REG_PGL_INT_USDM_7 0x1204d0 -/* [RW 32] Inbound interrupt table for XSDM: bits[31:16]-mask; - its[15:0]-address */ -#define PXP2_REG_PGL_INT_XSDM_0 0x1204d4 -#define PXP2_REG_PGL_INT_XSDM_1 0x1204d8 -#define PXP2_REG_PGL_INT_XSDM_2 0x1204dc -#define PXP2_REG_PGL_INT_XSDM_3 0x1204e0 -#define PXP2_REG_PGL_INT_XSDM_4 0x1204e4 -#define PXP2_REG_PGL_INT_XSDM_5 0x1204e8 -#define PXP2_REG_PGL_INT_XSDM_6 0x1204ec -#define PXP2_REG_PGL_INT_XSDM_7 0x1204f0 -/* [RW 3] this field allows one function to pretend being another function - when accessing any BAR mapped resource within the device. the value of - the field is the number of the function that will be accessed - effectively. after software write to this bit it must read it in order to - know that the new value is updated */ -#define PXP2_REG_PGL_PRETEND_FUNC_F0 0x120674 -#define PXP2_REG_PGL_PRETEND_FUNC_F1 0x120678 -#define PXP2_REG_PGL_PRETEND_FUNC_F2 0x12067c -#define PXP2_REG_PGL_PRETEND_FUNC_F3 0x120680 -#define PXP2_REG_PGL_PRETEND_FUNC_F4 0x120684 -#define PXP2_REG_PGL_PRETEND_FUNC_F5 0x120688 -#define PXP2_REG_PGL_PRETEND_FUNC_F6 0x12068c -#define PXP2_REG_PGL_PRETEND_FUNC_F7 0x120690 -/* [R 1] this bit indicates that a read request was blocked because of - bus_master_en was deasserted */ -#define PXP2_REG_PGL_READ_BLOCKED 0x120568 -#define PXP2_REG_PGL_TAGS_LIMIT 0x1205a8 -/* [R 18] debug only */ -#define PXP2_REG_PGL_TXW_CDTS 0x12052c -/* [R 1] this bit indicates that a write request was blocked because of - bus_master_en was deasserted */ -#define PXP2_REG_PGL_WRITE_BLOCKED 0x120564 -#define PXP2_REG_PSWRQ_BW_ADD1 0x1201c0 -#define PXP2_REG_PSWRQ_BW_ADD10 0x1201e4 -#define PXP2_REG_PSWRQ_BW_ADD11 0x1201e8 -#define PXP2_REG_PSWRQ_BW_ADD2 0x1201c4 -#define PXP2_REG_PSWRQ_BW_ADD28 0x120228 -#define PXP2_REG_PSWRQ_BW_ADD3 0x1201c8 -#define PXP2_REG_PSWRQ_BW_ADD6 0x1201d4 -#define PXP2_REG_PSWRQ_BW_ADD7 0x1201d8 -#define PXP2_REG_PSWRQ_BW_ADD8 0x1201dc -#define PXP2_REG_PSWRQ_BW_ADD9 0x1201e0 -#define PXP2_REG_PSWRQ_BW_CREDIT 0x12032c -#define PXP2_REG_PSWRQ_BW_L1 0x1202b0 -#define PXP2_REG_PSWRQ_BW_L10 0x1202d4 -#define PXP2_REG_PSWRQ_BW_L11 0x1202d8 -#define PXP2_REG_PSWRQ_BW_L2 0x1202b4 -#define PXP2_REG_PSWRQ_BW_L28 0x120318 -#define PXP2_REG_PSWRQ_BW_L3 0x1202b8 -#define PXP2_REG_PSWRQ_BW_L6 0x1202c4 -#define PXP2_REG_PSWRQ_BW_L7 0x1202c8 -#define PXP2_REG_PSWRQ_BW_L8 0x1202cc -#define PXP2_REG_PSWRQ_BW_L9 0x1202d0 -#define PXP2_REG_PSWRQ_BW_RD 0x120324 -#define PXP2_REG_PSWRQ_BW_UB1 0x120238 -#define PXP2_REG_PSWRQ_BW_UB10 0x12025c -#define PXP2_REG_PSWRQ_BW_UB11 0x120260 -#define PXP2_REG_PSWRQ_BW_UB2 0x12023c -#define PXP2_REG_PSWRQ_BW_UB28 0x1202a0 -#define PXP2_REG_PSWRQ_BW_UB3 0x120240 -#define PXP2_REG_PSWRQ_BW_UB6 0x12024c -#define PXP2_REG_PSWRQ_BW_UB7 0x120250 -#define PXP2_REG_PSWRQ_BW_UB8 0x120254 -#define PXP2_REG_PSWRQ_BW_UB9 0x120258 -#define PXP2_REG_PSWRQ_BW_WR 0x120328 -#define PXP2_REG_PSWRQ_CDU0_L2P 0x120000 -#define PXP2_REG_PSWRQ_QM0_L2P 0x120038 -#define PXP2_REG_PSWRQ_SRC0_L2P 0x120054 -#define PXP2_REG_PSWRQ_TM0_L2P 0x12001c -#define PXP2_REG_PSWRQ_TSDM0_L2P 0x1200e0 -/* [RW 32] Interrupt mask register #0 read/write */ -#define PXP2_REG_PXP2_INT_MASK_0 0x120578 -/* [R 32] Interrupt register #0 read */ -#define PXP2_REG_PXP2_INT_STS_0 0x12056c -#define PXP2_REG_PXP2_INT_STS_1 0x120608 -/* [RC 32] Interrupt register #0 read clear */ -#define PXP2_REG_PXP2_INT_STS_CLR_0 0x120570 -/* [RW 32] Parity mask register #0 read/write */ -#define PXP2_REG_PXP2_PRTY_MASK_0 0x120588 -#define PXP2_REG_PXP2_PRTY_MASK_1 0x120598 -/* [R 32] Parity register #0 read */ -#define PXP2_REG_PXP2_PRTY_STS_0 0x12057c -#define PXP2_REG_PXP2_PRTY_STS_1 0x12058c -/* [RC 32] Parity register #0 read clear */ -#define PXP2_REG_PXP2_PRTY_STS_CLR_0 0x120580 -#define PXP2_REG_PXP2_PRTY_STS_CLR_1 0x120590 -/* [R 1] Debug only: The 'almost full' indication from each fifo (gives - indication about backpressure) */ -#define PXP2_REG_RD_ALMOST_FULL_0 0x120424 -/* [R 8] Debug only: The blocks counter - number of unused block ids */ -#define PXP2_REG_RD_BLK_CNT 0x120418 -/* [RW 8] Debug only: Total number of available blocks in Tetris Buffer. - Must be bigger than 6. Normally should not be changed. */ -#define PXP2_REG_RD_BLK_NUM_CFG 0x12040c -/* [RW 2] CDU byte swapping mode configuration for master read requests */ -#define PXP2_REG_RD_CDURD_SWAP_MODE 0x120404 -/* [RW 1] When '1'; inputs to the PSWRD block are ignored */ -#define PXP2_REG_RD_DISABLE_INPUTS 0x120374 -/* [R 1] PSWRD internal memories initialization is done */ -#define PXP2_REG_RD_INIT_DONE 0x120370 -/* [RW 8] The maximum number of blocks in Tetris Buffer that can be - allocated for vq10 */ -#define PXP2_REG_RD_MAX_BLKS_VQ10 0x1203a0 -/* [RW 8] The maximum number of blocks in Tetris Buffer that can be - allocated for vq11 */ -#define PXP2_REG_RD_MAX_BLKS_VQ11 0x1203a4 -/* [RW 8] The maximum number of blocks in Tetris Buffer that can be - allocated for vq17 */ -#define PXP2_REG_RD_MAX_BLKS_VQ17 0x1203bc -/* [RW 8] The maximum number of blocks in Tetris Buffer that can be - allocated for vq18 */ -#define PXP2_REG_RD_MAX_BLKS_VQ18 0x1203c0 -/* [RW 8] The maximum number of blocks in Tetris Buffer that can be - allocated for vq19 */ -#define PXP2_REG_RD_MAX_BLKS_VQ19 0x1203c4 -/* [RW 8] The maximum number of blocks in Tetris Buffer that can be - allocated for vq22 */ -#define PXP2_REG_RD_MAX_BLKS_VQ22 0x1203d0 -/* [RW 8] The maximum number of blocks in Tetris Buffer that can be - allocated for vq25 */ -#define PXP2_REG_RD_MAX_BLKS_VQ25 0x1203dc -/* [RW 8] The maximum number of blocks in Tetris Buffer that can be - allocated for vq6 */ -#define PXP2_REG_RD_MAX_BLKS_VQ6 0x120390 -/* [RW 8] The maximum number of blocks in Tetris Buffer that can be - allocated for vq9 */ -#define PXP2_REG_RD_MAX_BLKS_VQ9 0x12039c -/* [RW 2] PBF byte swapping mode configuration for master read requests */ -#define PXP2_REG_RD_PBF_SWAP_MODE 0x1203f4 -/* [R 1] Debug only: Indication if delivery ports are idle */ -#define PXP2_REG_RD_PORT_IS_IDLE_0 0x12041c -#define PXP2_REG_RD_PORT_IS_IDLE_1 0x120420 -/* [RW 2] QM byte swapping mode configuration for master read requests */ -#define PXP2_REG_RD_QM_SWAP_MODE 0x1203f8 -/* [R 7] Debug only: The SR counter - number of unused sub request ids */ -#define PXP2_REG_RD_SR_CNT 0x120414 -/* [RW 2] SRC byte swapping mode configuration for master read requests */ -#define PXP2_REG_RD_SRC_SWAP_MODE 0x120400 -/* [RW 7] Debug only: Total number of available PCI read sub-requests. Must - be bigger than 1. Normally should not be changed. */ -#define PXP2_REG_RD_SR_NUM_CFG 0x120408 -/* [RW 1] Signals the PSWRD block to start initializing internal memories */ -#define PXP2_REG_RD_START_INIT 0x12036c -/* [RW 2] TM byte swapping mode configuration for master read requests */ -#define PXP2_REG_RD_TM_SWAP_MODE 0x1203fc -/* [RW 10] Bandwidth addition to VQ0 write requests */ -#define PXP2_REG_RQ_BW_RD_ADD0 0x1201bc -/* [RW 10] Bandwidth addition to VQ12 read requests */ -#define PXP2_REG_RQ_BW_RD_ADD12 0x1201ec -/* [RW 10] Bandwidth addition to VQ13 read requests */ -#define PXP2_REG_RQ_BW_RD_ADD13 0x1201f0 -/* [RW 10] Bandwidth addition to VQ14 read requests */ -#define PXP2_REG_RQ_BW_RD_ADD14 0x1201f4 -/* [RW 10] Bandwidth addition to VQ15 read requests */ -#define PXP2_REG_RQ_BW_RD_ADD15 0x1201f8 -/* [RW 10] Bandwidth addition to VQ16 read requests */ -#define PXP2_REG_RQ_BW_RD_ADD16 0x1201fc -/* [RW 10] Bandwidth addition to VQ17 read requests */ -#define PXP2_REG_RQ_BW_RD_ADD17 0x120200 -/* [RW 10] Bandwidth addition to VQ18 read requests */ -#define PXP2_REG_RQ_BW_RD_ADD18 0x120204 -/* [RW 10] Bandwidth addition to VQ19 read requests */ -#define PXP2_REG_RQ_BW_RD_ADD19 0x120208 -/* [RW 10] Bandwidth addition to VQ20 read requests */ -#define PXP2_REG_RQ_BW_RD_ADD20 0x12020c -/* [RW 10] Bandwidth addition to VQ22 read requests */ -#define PXP2_REG_RQ_BW_RD_ADD22 0x120210 -/* [RW 10] Bandwidth addition to VQ23 read requests */ -#define PXP2_REG_RQ_BW_RD_ADD23 0x120214 -/* [RW 10] Bandwidth addition to VQ24 read requests */ -#define PXP2_REG_RQ_BW_RD_ADD24 0x120218 -/* [RW 10] Bandwidth addition to VQ25 read requests */ -#define PXP2_REG_RQ_BW_RD_ADD25 0x12021c -/* [RW 10] Bandwidth addition to VQ26 read requests */ -#define PXP2_REG_RQ_BW_RD_ADD26 0x120220 -/* [RW 10] Bandwidth addition to VQ27 read requests */ -#define PXP2_REG_RQ_BW_RD_ADD27 0x120224 -/* [RW 10] Bandwidth addition to VQ4 read requests */ -#define PXP2_REG_RQ_BW_RD_ADD4 0x1201cc -/* [RW 10] Bandwidth addition to VQ5 read requests */ -#define PXP2_REG_RQ_BW_RD_ADD5 0x1201d0 -/* [RW 10] Bandwidth Typical L for VQ0 Read requests */ -#define PXP2_REG_RQ_BW_RD_L0 0x1202ac -/* [RW 10] Bandwidth Typical L for VQ12 Read requests */ -#define PXP2_REG_RQ_BW_RD_L12 0x1202dc -/* [RW 10] Bandwidth Typical L for VQ13 Read requests */ -#define PXP2_REG_RQ_BW_RD_L13 0x1202e0 -/* [RW 10] Bandwidth Typical L for VQ14 Read requests */ -#define PXP2_REG_RQ_BW_RD_L14 0x1202e4 -/* [RW 10] Bandwidth Typical L for VQ15 Read requests */ -#define PXP2_REG_RQ_BW_RD_L15 0x1202e8 -/* [RW 10] Bandwidth Typical L for VQ16 Read requests */ -#define PXP2_REG_RQ_BW_RD_L16 0x1202ec -/* [RW 10] Bandwidth Typical L for VQ17 Read requests */ -#define PXP2_REG_RQ_BW_RD_L17 0x1202f0 -/* [RW 10] Bandwidth Typical L for VQ18 Read requests */ -#define PXP2_REG_RQ_BW_RD_L18 0x1202f4 -/* [RW 10] Bandwidth Typical L for VQ19 Read requests */ -#define PXP2_REG_RQ_BW_RD_L19 0x1202f8 -/* [RW 10] Bandwidth Typical L for VQ20 Read requests */ -#define PXP2_REG_RQ_BW_RD_L20 0x1202fc -/* [RW 10] Bandwidth Typical L for VQ22 Read requests */ -#define PXP2_REG_RQ_BW_RD_L22 0x120300 -/* [RW 10] Bandwidth Typical L for VQ23 Read requests */ -#define PXP2_REG_RQ_BW_RD_L23 0x120304 -/* [RW 10] Bandwidth Typical L for VQ24 Read requests */ -#define PXP2_REG_RQ_BW_RD_L24 0x120308 -/* [RW 10] Bandwidth Typical L for VQ25 Read requests */ -#define PXP2_REG_RQ_BW_RD_L25 0x12030c -/* [RW 10] Bandwidth Typical L for VQ26 Read requests */ -#define PXP2_REG_RQ_BW_RD_L26 0x120310 -/* [RW 10] Bandwidth Typical L for VQ27 Read requests */ -#define PXP2_REG_RQ_BW_RD_L27 0x120314 -/* [RW 10] Bandwidth Typical L for VQ4 Read requests */ -#define PXP2_REG_RQ_BW_RD_L4 0x1202bc -/* [RW 10] Bandwidth Typical L for VQ5 Read- currently not used */ -#define PXP2_REG_RQ_BW_RD_L5 0x1202c0 -/* [RW 7] Bandwidth upper bound for VQ0 read requests */ -#define PXP2_REG_RQ_BW_RD_UBOUND0 0x120234 -/* [RW 7] Bandwidth upper bound for VQ12 read requests */ -#define PXP2_REG_RQ_BW_RD_UBOUND12 0x120264 -/* [RW 7] Bandwidth upper bound for VQ13 read requests */ -#define PXP2_REG_RQ_BW_RD_UBOUND13 0x120268 -/* [RW 7] Bandwidth upper bound for VQ14 read requests */ -#define PXP2_REG_RQ_BW_RD_UBOUND14 0x12026c -/* [RW 7] Bandwidth upper bound for VQ15 read requests */ -#define PXP2_REG_RQ_BW_RD_UBOUND15 0x120270 -/* [RW 7] Bandwidth upper bound for VQ16 read requests */ -#define PXP2_REG_RQ_BW_RD_UBOUND16 0x120274 -/* [RW 7] Bandwidth upper bound for VQ17 read requests */ -#define PXP2_REG_RQ_BW_RD_UBOUND17 0x120278 -/* [RW 7] Bandwidth upper bound for VQ18 read requests */ -#define PXP2_REG_RQ_BW_RD_UBOUND18 0x12027c -/* [RW 7] Bandwidth upper bound for VQ19 read requests */ -#define PXP2_REG_RQ_BW_RD_UBOUND19 0x120280 -/* [RW 7] Bandwidth upper bound for VQ20 read requests */ -#define PXP2_REG_RQ_BW_RD_UBOUND20 0x120284 -/* [RW 7] Bandwidth upper bound for VQ22 read requests */ -#define PXP2_REG_RQ_BW_RD_UBOUND22 0x120288 -/* [RW 7] Bandwidth upper bound for VQ23 read requests */ -#define PXP2_REG_RQ_BW_RD_UBOUND23 0x12028c -/* [RW 7] Bandwidth upper bound for VQ24 read requests */ -#define PXP2_REG_RQ_BW_RD_UBOUND24 0x120290 -/* [RW 7] Bandwidth upper bound for VQ25 read requests */ -#define PXP2_REG_RQ_BW_RD_UBOUND25 0x120294 -/* [RW 7] Bandwidth upper bound for VQ26 read requests */ -#define PXP2_REG_RQ_BW_RD_UBOUND26 0x120298 -/* [RW 7] Bandwidth upper bound for VQ27 read requests */ -#define PXP2_REG_RQ_BW_RD_UBOUND27 0x12029c -/* [RW 7] Bandwidth upper bound for VQ4 read requests */ -#define PXP2_REG_RQ_BW_RD_UBOUND4 0x120244 -/* [RW 7] Bandwidth upper bound for VQ5 read requests */ -#define PXP2_REG_RQ_BW_RD_UBOUND5 0x120248 -/* [RW 10] Bandwidth addition to VQ29 write requests */ -#define PXP2_REG_RQ_BW_WR_ADD29 0x12022c -/* [RW 10] Bandwidth addition to VQ30 write requests */ -#define PXP2_REG_RQ_BW_WR_ADD30 0x120230 -/* [RW 10] Bandwidth Typical L for VQ29 Write requests */ -#define PXP2_REG_RQ_BW_WR_L29 0x12031c -/* [RW 10] Bandwidth Typical L for VQ30 Write requests */ -#define PXP2_REG_RQ_BW_WR_L30 0x120320 -/* [RW 7] Bandwidth upper bound for VQ29 */ -#define PXP2_REG_RQ_BW_WR_UBOUND29 0x1202a4 -/* [RW 7] Bandwidth upper bound for VQ30 */ -#define PXP2_REG_RQ_BW_WR_UBOUND30 0x1202a8 -/* [RW 18] external first_mem_addr field in L2P table for CDU module port 0 */ -#define PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR 0x120008 -/* [RW 2] Endian mode for cdu */ -#define PXP2_REG_RQ_CDU_ENDIAN_M 0x1201a0 -#define PXP2_REG_RQ_CDU_FIRST_ILT 0x12061c -#define PXP2_REG_RQ_CDU_LAST_ILT 0x120620 -/* [RW 3] page size in L2P table for CDU module; -4k; -8k; -16k; -32k; -64k; - -128k */ -#define PXP2_REG_RQ_CDU_P_SIZE 0x120018 -/* [R 1] 1' indicates that the requester has finished its internal - configuration */ -#define PXP2_REG_RQ_CFG_DONE 0x1201b4 -/* [RW 2] Endian mode for debug */ -#define PXP2_REG_RQ_DBG_ENDIAN_M 0x1201a4 -/* [RW 1] When '1'; requests will enter input buffers but wont get out - towards the glue */ -#define PXP2_REG_RQ_DISABLE_INPUTS 0x120330 -/* [RW 4] Determines alignment of write SRs when a request is split into - * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B - * aligned. 4 - 512B aligned. */ -#define PXP2_REG_RQ_DRAM_ALIGN 0x1205b0 -/* [RW 4] Determines alignment of read SRs when a request is split into - * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B - * aligned. 4 - 512B aligned. */ -#define PXP2_REG_RQ_DRAM_ALIGN_RD 0x12092c -/* [RW 1] when set the new alignment method (E2) will be applied; when reset - * the original alignment method (E1 E1H) will be applied */ -#define PXP2_REG_RQ_DRAM_ALIGN_SEL 0x120930 -/* [RW 1] If 1 ILT failiue will not result in ELT access; An interrupt will - be asserted */ -#define PXP2_REG_RQ_ELT_DISABLE 0x12066c -/* [RW 2] Endian mode for hc */ -#define PXP2_REG_RQ_HC_ENDIAN_M 0x1201a8 -/* [RW 1] when '0' ILT logic will work as in A0; otherwise B0; for back - compatibility needs; Note that different registers are used per mode */ -#define PXP2_REG_RQ_ILT_MODE 0x1205b4 -/* [WB 53] Onchip address table */ -#define PXP2_REG_RQ_ONCHIP_AT 0x122000 -/* [WB 53] Onchip address table - B0 */ -#define PXP2_REG_RQ_ONCHIP_AT_B0 0x128000 -/* [RW 13] Pending read limiter threshold; in Dwords */ -#define PXP2_REG_RQ_PDR_LIMIT 0x12033c -/* [RW 2] Endian mode for qm */ -#define PXP2_REG_RQ_QM_ENDIAN_M 0x120194 -#define PXP2_REG_RQ_QM_FIRST_ILT 0x120634 -#define PXP2_REG_RQ_QM_LAST_ILT 0x120638 -/* [RW 3] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k; - -128k */ -#define PXP2_REG_RQ_QM_P_SIZE 0x120050 -/* [RW 1] 1' indicates that the RBC has finished configuring the PSWRQ */ -#define PXP2_REG_RQ_RBC_DONE 0x1201b0 -/* [RW 3] Max burst size filed for read requests port 0; 000 - 128B; - 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */ -#define PXP2_REG_RQ_RD_MBS0 0x120160 -/* [RW 3] Max burst size filed for read requests port 1; 000 - 128B; - 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */ -#define PXP2_REG_RQ_RD_MBS1 0x120168 -/* [RW 2] Endian mode for src */ -#define PXP2_REG_RQ_SRC_ENDIAN_M 0x12019c -#define PXP2_REG_RQ_SRC_FIRST_ILT 0x12063c -#define PXP2_REG_RQ_SRC_LAST_ILT 0x120640 -/* [RW 3] page size in L2P table for SRC module; -4k; -8k; -16k; -32k; -64k; - -128k */ -#define PXP2_REG_RQ_SRC_P_SIZE 0x12006c -/* [RW 2] Endian mode for tm */ -#define PXP2_REG_RQ_TM_ENDIAN_M 0x120198 -#define PXP2_REG_RQ_TM_FIRST_ILT 0x120644 -#define PXP2_REG_RQ_TM_LAST_ILT 0x120648 -/* [RW 3] page size in L2P table for TM module; -4k; -8k; -16k; -32k; -64k; - -128k */ -#define PXP2_REG_RQ_TM_P_SIZE 0x120034 -/* [R 5] Number of entries in the ufifo; his fifo has l2p completions */ -#define PXP2_REG_RQ_UFIFO_NUM_OF_ENTRY 0x12080c -/* [RW 18] external first_mem_addr field in L2P table for USDM module port 0 */ -#define PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR 0x120094 -/* [R 8] Number of entries occupied by vq 0 in pswrq memory */ -#define PXP2_REG_RQ_VQ0_ENTRY_CNT 0x120810 -/* [R 8] Number of entries occupied by vq 10 in pswrq memory */ -#define PXP2_REG_RQ_VQ10_ENTRY_CNT 0x120818 -/* [R 8] Number of entries occupied by vq 11 in pswrq memory */ -#define PXP2_REG_RQ_VQ11_ENTRY_CNT 0x120820 -/* [R 8] Number of entries occupied by vq 12 in pswrq memory */ -#define PXP2_REG_RQ_VQ12_ENTRY_CNT 0x120828 -/* [R 8] Number of entries occupied by vq 13 in pswrq memory */ -#define PXP2_REG_RQ_VQ13_ENTRY_CNT 0x120830 -/* [R 8] Number of entries occupied by vq 14 in pswrq memory */ -#define PXP2_REG_RQ_VQ14_ENTRY_CNT 0x120838 -/* [R 8] Number of entries occupied by vq 15 in pswrq memory */ -#define PXP2_REG_RQ_VQ15_ENTRY_CNT 0x120840 -/* [R 8] Number of entries occupied by vq 16 in pswrq memory */ -#define PXP2_REG_RQ_VQ16_ENTRY_CNT 0x120848 -/* [R 8] Number of entries occupied by vq 17 in pswrq memory */ -#define PXP2_REG_RQ_VQ17_ENTRY_CNT 0x120850 -/* [R 8] Number of entries occupied by vq 18 in pswrq memory */ -#define PXP2_REG_RQ_VQ18_ENTRY_CNT 0x120858 -/* [R 8] Number of entries occupied by vq 19 in pswrq memory */ -#define PXP2_REG_RQ_VQ19_ENTRY_CNT 0x120860 -/* [R 8] Number of entries occupied by vq 1 in pswrq memory */ -#define PXP2_REG_RQ_VQ1_ENTRY_CNT 0x120868 -/* [R 8] Number of entries occupied by vq 20 in pswrq memory */ -#define PXP2_REG_RQ_VQ20_ENTRY_CNT 0x120870 -/* [R 8] Number of entries occupied by vq 21 in pswrq memory */ -#define PXP2_REG_RQ_VQ21_ENTRY_CNT 0x120878 -/* [R 8] Number of entries occupied by vq 22 in pswrq memory */ -#define PXP2_REG_RQ_VQ22_ENTRY_CNT 0x120880 -/* [R 8] Number of entries occupied by vq 23 in pswrq memory */ -#define PXP2_REG_RQ_VQ23_ENTRY_CNT 0x120888 -/* [R 8] Number of entries occupied by vq 24 in pswrq memory */ -#define PXP2_REG_RQ_VQ24_ENTRY_CNT 0x120890 -/* [R 8] Number of entries occupied by vq 25 in pswrq memory */ -#define PXP2_REG_RQ_VQ25_ENTRY_CNT 0x120898 -/* [R 8] Number of entries occupied by vq 26 in pswrq memory */ -#define PXP2_REG_RQ_VQ26_ENTRY_CNT 0x1208a0 -/* [R 8] Number of entries occupied by vq 27 in pswrq memory */ -#define PXP2_REG_RQ_VQ27_ENTRY_CNT 0x1208a8 -/* [R 8] Number of entries occupied by vq 28 in pswrq memory */ -#define PXP2_REG_RQ_VQ28_ENTRY_CNT 0x1208b0 -/* [R 8] Number of entries occupied by vq 29 in pswrq memory */ -#define PXP2_REG_RQ_VQ29_ENTRY_CNT 0x1208b8 -/* [R 8] Number of entries occupied by vq 2 in pswrq memory */ -#define PXP2_REG_RQ_VQ2_ENTRY_CNT 0x1208c0 -/* [R 8] Number of entries occupied by vq 30 in pswrq memory */ -#define PXP2_REG_RQ_VQ30_ENTRY_CNT 0x1208c8 -/* [R 8] Number of entries occupied by vq 31 in pswrq memory */ -#define PXP2_REG_RQ_VQ31_ENTRY_CNT 0x1208d0 -/* [R 8] Number of entries occupied by vq 3 in pswrq memory */ -#define PXP2_REG_RQ_VQ3_ENTRY_CNT 0x1208d8 -/* [R 8] Number of entries occupied by vq 4 in pswrq memory */ -#define PXP2_REG_RQ_VQ4_ENTRY_CNT 0x1208e0 -/* [R 8] Number of entries occupied by vq 5 in pswrq memory */ -#define PXP2_REG_RQ_VQ5_ENTRY_CNT 0x1208e8 -/* [R 8] Number of entries occupied by vq 6 in pswrq memory */ -#define PXP2_REG_RQ_VQ6_ENTRY_CNT 0x1208f0 -/* [R 8] Number of entries occupied by vq 7 in pswrq memory */ -#define PXP2_REG_RQ_VQ7_ENTRY_CNT 0x1208f8 -/* [R 8] Number of entries occupied by vq 8 in pswrq memory */ -#define PXP2_REG_RQ_VQ8_ENTRY_CNT 0x120900 -/* [R 8] Number of entries occupied by vq 9 in pswrq memory */ -#define PXP2_REG_RQ_VQ9_ENTRY_CNT 0x120908 -/* [RW 3] Max burst size filed for write requests port 0; 000 - 128B; - 001:256B; 010: 512B; */ -#define PXP2_REG_RQ_WR_MBS0 0x12015c -/* [RW 3] Max burst size filed for write requests port 1; 000 - 128B; - 001:256B; 010: 512B; */ -#define PXP2_REG_RQ_WR_MBS1 0x120164 -/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the - buffer reaches this number has_payload will be asserted */ -#define PXP2_REG_WR_CDU_MPS 0x1205f0 -/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the - buffer reaches this number has_payload will be asserted */ -#define PXP2_REG_WR_CSDM_MPS 0x1205d0 -/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the - buffer reaches this number has_payload will be asserted */ -#define PXP2_REG_WR_DBG_MPS 0x1205e8 -/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the - buffer reaches this number has_payload will be asserted */ -#define PXP2_REG_WR_DMAE_MPS 0x1205ec -/* [RW 10] if Number of entries in dmae fifo will be higher than this - threshold then has_payload indication will be asserted; the default value - should be equal to > write MBS size! */ -#define PXP2_REG_WR_DMAE_TH 0x120368 -/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the - buffer reaches this number has_payload will be asserted */ -#define PXP2_REG_WR_HC_MPS 0x1205c8 -/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the - buffer reaches this number has_payload will be asserted */ -#define PXP2_REG_WR_QM_MPS 0x1205dc -/* [RW 1] 0 - working in A0 mode; - working in B0 mode */ -#define PXP2_REG_WR_REV_MODE 0x120670 -/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the - buffer reaches this number has_payload will be asserted */ -#define PXP2_REG_WR_SRC_MPS 0x1205e4 -/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the - buffer reaches this number has_payload will be asserted */ -#define PXP2_REG_WR_TM_MPS 0x1205e0 -/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the - buffer reaches this number has_payload will be asserted */ -#define PXP2_REG_WR_TSDM_MPS 0x1205d4 -/* [RW 10] if Number of entries in usdmdp fifo will be higher than this - threshold then has_payload indication will be asserted; the default value - should be equal to > write MBS size! */ -#define PXP2_REG_WR_USDMDP_TH 0x120348 -/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the - buffer reaches this number has_payload will be asserted */ -#define PXP2_REG_WR_USDM_MPS 0x1205cc -/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the - buffer reaches this number has_payload will be asserted */ -#define PXP2_REG_WR_XSDM_MPS 0x1205d8 -/* [R 1] debug only: Indication if PSWHST arbiter is idle */ -#define PXP_REG_HST_ARB_IS_IDLE 0x103004 -/* [R 8] debug only: A bit mask for all PSWHST arbiter clients. '1' means - this client is waiting for the arbiter. */ -#define PXP_REG_HST_CLIENTS_WAITING_TO_ARB 0x103008 -/* [RW 1] When 1; doorbells are discarded and not passed to doorbell queue - block. Should be used for close the gates. */ -#define PXP_REG_HST_DISCARD_DOORBELLS 0x1030a4 -/* [R 1] debug only: '1' means this PSWHST is discarding doorbells. This bit - should update according to 'hst_discard_doorbells' register when the state - machine is idle */ -#define PXP_REG_HST_DISCARD_DOORBELLS_STATUS 0x1030a0 -/* [RW 1] When 1; new internal writes arriving to the block are discarded. - Should be used for close the gates. */ -#define PXP_REG_HST_DISCARD_INTERNAL_WRITES 0x1030a8 -/* [R 6] debug only: A bit mask for all PSWHST internal write clients. '1' - means this PSWHST is discarding inputs from this client. Each bit should - update according to 'hst_discard_internal_writes' register when the state - machine is idle. */ -#define PXP_REG_HST_DISCARD_INTERNAL_WRITES_STATUS 0x10309c -/* [WB 160] Used for initialization of the inbound interrupts memory */ -#define PXP_REG_HST_INBOUND_INT 0x103800 -/* [RW 32] Interrupt mask register #0 read/write */ -#define PXP_REG_PXP_INT_MASK_0 0x103074 -#define PXP_REG_PXP_INT_MASK_1 0x103084 -/* [R 32] Interrupt register #0 read */ -#define PXP_REG_PXP_INT_STS_0 0x103068 -#define PXP_REG_PXP_INT_STS_1 0x103078 -/* [RC 32] Interrupt register #0 read clear */ -#define PXP_REG_PXP_INT_STS_CLR_0 0x10306c -#define PXP_REG_PXP_INT_STS_CLR_1 0x10307c -/* [RW 27] Parity mask register #0 read/write */ -#define PXP_REG_PXP_PRTY_MASK 0x103094 -/* [R 26] Parity register #0 read */ -#define PXP_REG_PXP_PRTY_STS 0x103088 -/* [RC 27] Parity register #0 read clear */ -#define PXP_REG_PXP_PRTY_STS_CLR 0x10308c -/* [RW 4] The activity counter initial increment value sent in the load - request */ -#define QM_REG_ACTCTRINITVAL_0 0x168040 -#define QM_REG_ACTCTRINITVAL_1 0x168044 -#define QM_REG_ACTCTRINITVAL_2 0x168048 -#define QM_REG_ACTCTRINITVAL_3 0x16804c -/* [RW 32] The base logical address (in bytes) of each physical queue. The - index I represents the physical queue number. The 12 lsbs are ignore and - considered zero so practically there are only 20 bits in this register; - queues 63-0 */ -#define QM_REG_BASEADDR 0x168900 -/* [RW 32] The base logical address (in bytes) of each physical queue. The - index I represents the physical queue number. The 12 lsbs are ignore and - considered zero so practically there are only 20 bits in this register; - queues 127-64 */ -#define QM_REG_BASEADDR_EXT_A 0x16e100 -/* [RW 16] The byte credit cost for each task. This value is for both ports */ -#define QM_REG_BYTECRDCOST 0x168234 -/* [RW 16] The initial byte credit value for both ports. */ -#define QM_REG_BYTECRDINITVAL 0x168238 -/* [RW 32] A bit per physical queue. If the bit is cleared then the physical - queue uses port 0 else it uses port 1; queues 31-0 */ -#define QM_REG_BYTECRDPORT_LSB 0x168228 -/* [RW 32] A bit per physical queue. If the bit is cleared then the physical - queue uses port 0 else it uses port 1; queues 95-64 */ -#define QM_REG_BYTECRDPORT_LSB_EXT_A 0x16e520 -/* [RW 32] A bit per physical queue. If the bit is cleared then the physical - queue uses port 0 else it uses port 1; queues 63-32 */ -#define QM_REG_BYTECRDPORT_MSB 0x168224 -/* [RW 32] A bit per physical queue. If the bit is cleared then the physical - queue uses port 0 else it uses port 1; queues 127-96 */ -#define QM_REG_BYTECRDPORT_MSB_EXT_A 0x16e51c -/* [RW 16] The byte credit value that if above the QM is considered almost - full */ -#define QM_REG_BYTECREDITAFULLTHR 0x168094 -/* [RW 4] The initial credit for interface */ -#define QM_REG_CMINITCRD_0 0x1680cc -#define QM_REG_BYTECRDCMDQ_0 0x16e6e8 -#define QM_REG_CMINITCRD_1 0x1680d0 -#define QM_REG_CMINITCRD_2 0x1680d4 -#define QM_REG_CMINITCRD_3 0x1680d8 -#define QM_REG_CMINITCRD_4 0x1680dc -#define QM_REG_CMINITCRD_5 0x1680e0 -#define QM_REG_CMINITCRD_6 0x1680e4 -#define QM_REG_CMINITCRD_7 0x1680e8 -/* [RW 8] A mask bit per CM interface. If this bit is 0 then this interface - is masked */ -#define QM_REG_CMINTEN 0x1680ec -/* [RW 12] A bit vector which indicates which one of the queues are tied to - interface 0 */ -#define QM_REG_CMINTVOQMASK_0 0x1681f4 -#define QM_REG_CMINTVOQMASK_1 0x1681f8 -#define QM_REG_CMINTVOQMASK_2 0x1681fc -#define QM_REG_CMINTVOQMASK_3 0x168200 -#define QM_REG_CMINTVOQMASK_4 0x168204 -#define QM_REG_CMINTVOQMASK_5 0x168208 -#define QM_REG_CMINTVOQMASK_6 0x16820c -#define QM_REG_CMINTVOQMASK_7 0x168210 -/* [RW 20] The number of connections divided by 16 which dictates the size - of each queue which belongs to even function number. */ -#define QM_REG_CONNNUM_0 0x168020 -/* [R 6] Keep the fill level of the fifo from write client 4 */ -#define QM_REG_CQM_WRC_FIFOLVL 0x168018 -/* [RW 8] The context regions sent in the CFC load request */ -#define QM_REG_CTXREG_0 0x168030 -#define QM_REG_CTXREG_1 0x168034 -#define QM_REG_CTXREG_2 0x168038 -#define QM_REG_CTXREG_3 0x16803c -/* [RW 12] The VOQ mask used to select the VOQs which needs to be full for - bypass enable */ -#define QM_REG_ENBYPVOQMASK 0x16823c -/* [RW 32] A bit mask per each physical queue. If a bit is set then the - physical queue uses the byte credit; queues 31-0 */ -#define QM_REG_ENBYTECRD_LSB 0x168220 -/* [RW 32] A bit mask per each physical queue. If a bit is set then the - physical queue uses the byte credit; queues 95-64 */ -#define QM_REG_ENBYTECRD_LSB_EXT_A 0x16e518 -/* [RW 32] A bit mask per each physical queue. If a bit is set then the - physical queue uses the byte credit; queues 63-32 */ -#define QM_REG_ENBYTECRD_MSB 0x16821c -/* [RW 32] A bit mask per each physical queue. If a bit is set then the - physical queue uses the byte credit; queues 127-96 */ -#define QM_REG_ENBYTECRD_MSB_EXT_A 0x16e514 -/* [RW 4] If cleared then the secondary interface will not be served by the - RR arbiter */ -#define QM_REG_ENSEC 0x1680f0 -/* [RW 32] NA */ -#define QM_REG_FUNCNUMSEL_LSB 0x168230 -/* [RW 32] NA */ -#define QM_REG_FUNCNUMSEL_MSB 0x16822c -/* [RW 32] A mask register to mask the Almost empty signals which will not - be use for the almost empty indication to the HW block; queues 31:0 */ -#define QM_REG_HWAEMPTYMASK_LSB 0x168218 -/* [RW 32] A mask register to mask the Almost empty signals which will not - be use for the almost empty indication to the HW block; queues 95-64 */ -#define QM_REG_HWAEMPTYMASK_LSB_EXT_A 0x16e510 -/* [RW 32] A mask register to mask the Almost empty signals which will not - be use for the almost empty indication to the HW block; queues 63:32 */ -#define QM_REG_HWAEMPTYMASK_MSB 0x168214 -/* [RW 32] A mask register to mask the Almost empty signals which will not - be use for the almost empty indication to the HW block; queues 127-96 */ -#define QM_REG_HWAEMPTYMASK_MSB_EXT_A 0x16e50c -/* [RW 4] The number of outstanding request to CFC */ -#define QM_REG_OUTLDREQ 0x168804 -/* [RC 1] A flag to indicate that overflow error occurred in one of the - queues. */ -#define QM_REG_OVFERROR 0x16805c -/* [RC 7] the Q where the overflow occurs */ -#define QM_REG_OVFQNUM 0x168058 -/* [R 16] Pause state for physical queues 15-0 */ -#define QM_REG_PAUSESTATE0 0x168410 -/* [R 16] Pause state for physical queues 31-16 */ -#define QM_REG_PAUSESTATE1 0x168414 -/* [R 16] Pause state for physical queues 47-32 */ -#define QM_REG_PAUSESTATE2 0x16e684 -/* [R 16] Pause state for physical queues 63-48 */ -#define QM_REG_PAUSESTATE3 0x16e688 -/* [R 16] Pause state for physical queues 79-64 */ -#define QM_REG_PAUSESTATE4 0x16e68c -/* [R 16] Pause state for physical queues 95-80 */ -#define QM_REG_PAUSESTATE5 0x16e690 -/* [R 16] Pause state for physical queues 111-96 */ -#define QM_REG_PAUSESTATE6 0x16e694 -/* [R 16] Pause state for physical queues 127-112 */ -#define QM_REG_PAUSESTATE7 0x16e698 -/* [RW 2] The PCI attributes field used in the PCI request. */ -#define QM_REG_PCIREQAT 0x168054 -#define QM_REG_PF_EN 0x16e70c -/* [R 24] The number of tasks stored in the QM for the PF. only even - * functions are valid in E2 (odd I registers will be hard wired to 0) */ -#define QM_REG_PF_USG_CNT_0 0x16e040 -/* [R 16] NOT USED */ -#define QM_REG_PORT0BYTECRD 0x168300 -/* [R 16] The byte credit of port 1 */ -#define QM_REG_PORT1BYTECRD 0x168304 -/* [RW 3] pci function number of queues 15-0 */ -#define QM_REG_PQ2PCIFUNC_0 0x16e6bc -#define QM_REG_PQ2PCIFUNC_1 0x16e6c0 -#define QM_REG_PQ2PCIFUNC_2 0x16e6c4 -#define QM_REG_PQ2PCIFUNC_3 0x16e6c8 -#define QM_REG_PQ2PCIFUNC_4 0x16e6cc -#define QM_REG_PQ2PCIFUNC_5 0x16e6d0 -#define QM_REG_PQ2PCIFUNC_6 0x16e6d4 -#define QM_REG_PQ2PCIFUNC_7 0x16e6d8 -/* [WB 54] Pointer Table Memory for queues 63-0; The mapping is as follow: - ptrtbl[53:30] read pointer; ptrtbl[29:6] write pointer; ptrtbl[5:4] read - bank0; ptrtbl[3:2] read bank 1; ptrtbl[1:0] write bank; */ -#define QM_REG_PTRTBL 0x168a00 -/* [WB 54] Pointer Table Memory for queues 127-64; The mapping is as follow: - ptrtbl[53:30] read pointer; ptrtbl[29:6] write pointer; ptrtbl[5:4] read - bank0; ptrtbl[3:2] read bank 1; ptrtbl[1:0] write bank; */ -#define QM_REG_PTRTBL_EXT_A 0x16e200 -/* [RW 2] Interrupt mask register #0 read/write */ -#define QM_REG_QM_INT_MASK 0x168444 -/* [R 2] Interrupt register #0 read */ -#define QM_REG_QM_INT_STS 0x168438 -/* [RW 12] Parity mask register #0 read/write */ -#define QM_REG_QM_PRTY_MASK 0x168454 -/* [R 12] Parity register #0 read */ -#define QM_REG_QM_PRTY_STS 0x168448 -/* [RC 12] Parity register #0 read clear */ -#define QM_REG_QM_PRTY_STS_CLR 0x16844c -/* [R 32] Current queues in pipeline: Queues from 32 to 63 */ -#define QM_REG_QSTATUS_HIGH 0x16802c -/* [R 32] Current queues in pipeline: Queues from 96 to 127 */ -#define QM_REG_QSTATUS_HIGH_EXT_A 0x16e408 -/* [R 32] Current queues in pipeline: Queues from 0 to 31 */ -#define QM_REG_QSTATUS_LOW 0x168028 -/* [R 32] Current queues in pipeline: Queues from 64 to 95 */ -#define QM_REG_QSTATUS_LOW_EXT_A 0x16e404 -/* [R 24] The number of tasks queued for each queue; queues 63-0 */ -#define QM_REG_QTASKCTR_0 0x168308 -/* [R 24] The number of tasks queued for each queue; queues 127-64 */ -#define QM_REG_QTASKCTR_EXT_A_0 0x16e584 -/* [RW 4] Queue tied to VOQ */ -#define QM_REG_QVOQIDX_0 0x1680f4 -#define QM_REG_QVOQIDX_10 0x16811c -#define QM_REG_QVOQIDX_100 0x16e49c -#define QM_REG_QVOQIDX_101 0x16e4a0 -#define QM_REG_QVOQIDX_102 0x16e4a4 -#define QM_REG_QVOQIDX_103 0x16e4a8 -#define QM_REG_QVOQIDX_104 0x16e4ac -#define QM_REG_QVOQIDX_105 0x16e4b0 -#define QM_REG_QVOQIDX_106 0x16e4b4 -#define QM_REG_QVOQIDX_107 0x16e4b8 -#define QM_REG_QVOQIDX_108 0x16e4bc -#define QM_REG_QVOQIDX_109 0x16e4c0 -#define QM_REG_QVOQIDX_11 0x168120 -#define QM_REG_QVOQIDX_110 0x16e4c4 -#define QM_REG_QVOQIDX_111 0x16e4c8 -#define QM_REG_QVOQIDX_112 0x16e4cc -#define QM_REG_QVOQIDX_113 0x16e4d0 -#define QM_REG_QVOQIDX_114 0x16e4d4 -#define QM_REG_QVOQIDX_115 0x16e4d8 -#define QM_REG_QVOQIDX_116 0x16e4dc -#define QM_REG_QVOQIDX_117 0x16e4e0 -#define QM_REG_QVOQIDX_118 0x16e4e4 -#define QM_REG_QVOQIDX_119 0x16e4e8 -#define QM_REG_QVOQIDX_12 0x168124 -#define QM_REG_QVOQIDX_120 0x16e4ec -#define QM_REG_QVOQIDX_121 0x16e4f0 -#define QM_REG_QVOQIDX_122 0x16e4f4 -#define QM_REG_QVOQIDX_123 0x16e4f8 -#define QM_REG_QVOQIDX_124 0x16e4fc -#define QM_REG_QVOQIDX_125 0x16e500 -#define QM_REG_QVOQIDX_126 0x16e504 -#define QM_REG_QVOQIDX_127 0x16e508 -#define QM_REG_QVOQIDX_13 0x168128 -#define QM_REG_QVOQIDX_14 0x16812c -#define QM_REG_QVOQIDX_15 0x168130 -#define QM_REG_QVOQIDX_16 0x168134 -#define QM_REG_QVOQIDX_17 0x168138 -#define QM_REG_QVOQIDX_21 0x168148 -#define QM_REG_QVOQIDX_22 0x16814c -#define QM_REG_QVOQIDX_23 0x168150 -#define QM_REG_QVOQIDX_24 0x168154 -#define QM_REG_QVOQIDX_25 0x168158 -#define QM_REG_QVOQIDX_26 0x16815c -#define QM_REG_QVOQIDX_27 0x168160 -#define QM_REG_QVOQIDX_28 0x168164 -#define QM_REG_QVOQIDX_29 0x168168 -#define QM_REG_QVOQIDX_30 0x16816c -#define QM_REG_QVOQIDX_31 0x168170 -#define QM_REG_QVOQIDX_32 0x168174 -#define QM_REG_QVOQIDX_33 0x168178 -#define QM_REG_QVOQIDX_34 0x16817c -#define QM_REG_QVOQIDX_35 0x168180 -#define QM_REG_QVOQIDX_36 0x168184 -#define QM_REG_QVOQIDX_37 0x168188 -#define QM_REG_QVOQIDX_38 0x16818c -#define QM_REG_QVOQIDX_39 0x168190 -#define QM_REG_QVOQIDX_40 0x168194 -#define QM_REG_QVOQIDX_41 0x168198 -#define QM_REG_QVOQIDX_42 0x16819c -#define QM_REG_QVOQIDX_43 0x1681a0 -#define QM_REG_QVOQIDX_44 0x1681a4 -#define QM_REG_QVOQIDX_45 0x1681a8 -#define QM_REG_QVOQIDX_46 0x1681ac -#define QM_REG_QVOQIDX_47 0x1681b0 -#define QM_REG_QVOQIDX_48 0x1681b4 -#define QM_REG_QVOQIDX_49 0x1681b8 -#define QM_REG_QVOQIDX_5 0x168108 -#define QM_REG_QVOQIDX_50 0x1681bc -#define QM_REG_QVOQIDX_51 0x1681c0 -#define QM_REG_QVOQIDX_52 0x1681c4 -#define QM_REG_QVOQIDX_53 0x1681c8 -#define QM_REG_QVOQIDX_54 0x1681cc -#define QM_REG_QVOQIDX_55 0x1681d0 -#define QM_REG_QVOQIDX_56 0x1681d4 -#define QM_REG_QVOQIDX_57 0x1681d8 -#define QM_REG_QVOQIDX_58 0x1681dc -#define QM_REG_QVOQIDX_59 0x1681e0 -#define QM_REG_QVOQIDX_6 0x16810c -#define QM_REG_QVOQIDX_60 0x1681e4 -#define QM_REG_QVOQIDX_61 0x1681e8 -#define QM_REG_QVOQIDX_62 0x1681ec -#define QM_REG_QVOQIDX_63 0x1681f0 -#define QM_REG_QVOQIDX_64 0x16e40c -#define QM_REG_QVOQIDX_65 0x16e410 -#define QM_REG_QVOQIDX_69 0x16e420 -#define QM_REG_QVOQIDX_7 0x168110 -#define QM_REG_QVOQIDX_70 0x16e424 -#define QM_REG_QVOQIDX_71 0x16e428 -#define QM_REG_QVOQIDX_72 0x16e42c -#define QM_REG_QVOQIDX_73 0x16e430 -#define QM_REG_QVOQIDX_74 0x16e434 -#define QM_REG_QVOQIDX_75 0x16e438 -#define QM_REG_QVOQIDX_76 0x16e43c -#define QM_REG_QVOQIDX_77 0x16e440 -#define QM_REG_QVOQIDX_78 0x16e444 -#define QM_REG_QVOQIDX_79 0x16e448 -#define QM_REG_QVOQIDX_8 0x168114 -#define QM_REG_QVOQIDX_80 0x16e44c -#define QM_REG_QVOQIDX_81 0x16e450 -#define QM_REG_QVOQIDX_85 0x16e460 -#define QM_REG_QVOQIDX_86 0x16e464 -#define QM_REG_QVOQIDX_87 0x16e468 -#define QM_REG_QVOQIDX_88 0x16e46c -#define QM_REG_QVOQIDX_89 0x16e470 -#define QM_REG_QVOQIDX_9 0x168118 -#define QM_REG_QVOQIDX_90 0x16e474 -#define QM_REG_QVOQIDX_91 0x16e478 -#define QM_REG_QVOQIDX_92 0x16e47c -#define QM_REG_QVOQIDX_93 0x16e480 -#define QM_REG_QVOQIDX_94 0x16e484 -#define QM_REG_QVOQIDX_95 0x16e488 -#define QM_REG_QVOQIDX_96 0x16e48c -#define QM_REG_QVOQIDX_97 0x16e490 -#define QM_REG_QVOQIDX_98 0x16e494 -#define QM_REG_QVOQIDX_99 0x16e498 -/* [RW 1] Initialization bit command */ -#define QM_REG_SOFT_RESET 0x168428 -/* [RW 8] The credit cost per every task in the QM. A value per each VOQ */ -#define QM_REG_TASKCRDCOST_0 0x16809c -#define QM_REG_TASKCRDCOST_1 0x1680a0 -#define QM_REG_TASKCRDCOST_2 0x1680a4 -#define QM_REG_TASKCRDCOST_4 0x1680ac -#define QM_REG_TASKCRDCOST_5 0x1680b0 -/* [R 6] Keep the fill level of the fifo from write client 3 */ -#define QM_REG_TQM_WRC_FIFOLVL 0x168010 -/* [R 6] Keep the fill level of the fifo from write client 2 */ -#define QM_REG_UQM_WRC_FIFOLVL 0x168008 -/* [RC 32] Credit update error register */ -#define QM_REG_VOQCRDERRREG 0x168408 -/* [R 16] The credit value for each VOQ */ -#define QM_REG_VOQCREDIT_0 0x1682d0 -#define QM_REG_VOQCREDIT_1 0x1682d4 -#define QM_REG_VOQCREDIT_4 0x1682e0 -/* [RW 16] The credit value that if above the QM is considered almost full */ -#define QM_REG_VOQCREDITAFULLTHR 0x168090 -/* [RW 16] The init and maximum credit for each VoQ */ -#define QM_REG_VOQINITCREDIT_0 0x168060 -#define QM_REG_VOQINITCREDIT_1 0x168064 -#define QM_REG_VOQINITCREDIT_2 0x168068 -#define QM_REG_VOQINITCREDIT_4 0x168070 -#define QM_REG_VOQINITCREDIT_5 0x168074 -/* [RW 1] The port of which VOQ belongs */ -#define QM_REG_VOQPORT_0 0x1682a0 -#define QM_REG_VOQPORT_1 0x1682a4 -#define QM_REG_VOQPORT_2 0x1682a8 -/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ -#define QM_REG_VOQQMASK_0_LSB 0x168240 -/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ -#define QM_REG_VOQQMASK_0_LSB_EXT_A 0x16e524 -/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ -#define QM_REG_VOQQMASK_0_MSB 0x168244 -/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ -#define QM_REG_VOQQMASK_0_MSB_EXT_A 0x16e528 -/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ -#define QM_REG_VOQQMASK_10_LSB 0x168290 -/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ -#define QM_REG_VOQQMASK_10_LSB_EXT_A 0x16e574 -/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ -#define QM_REG_VOQQMASK_10_MSB 0x168294 -/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ -#define QM_REG_VOQQMASK_10_MSB_EXT_A 0x16e578 -/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ -#define QM_REG_VOQQMASK_11_LSB 0x168298 -/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ -#define QM_REG_VOQQMASK_11_LSB_EXT_A 0x16e57c -/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ -#define QM_REG_VOQQMASK_11_MSB 0x16829c -/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ -#define QM_REG_VOQQMASK_11_MSB_EXT_A 0x16e580 -/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ -#define QM_REG_VOQQMASK_1_LSB 0x168248 -/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ -#define QM_REG_VOQQMASK_1_LSB_EXT_A 0x16e52c -/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ -#define QM_REG_VOQQMASK_1_MSB 0x16824c -/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ -#define QM_REG_VOQQMASK_1_MSB_EXT_A 0x16e530 -/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ -#define QM_REG_VOQQMASK_2_LSB 0x168250 -/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ -#define QM_REG_VOQQMASK_2_LSB_EXT_A 0x16e534 -/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ -#define QM_REG_VOQQMASK_2_MSB 0x168254 -/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ -#define QM_REG_VOQQMASK_2_MSB_EXT_A 0x16e538 -/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ -#define QM_REG_VOQQMASK_3_LSB 0x168258 -/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ -#define QM_REG_VOQQMASK_3_LSB_EXT_A 0x16e53c -/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ -#define QM_REG_VOQQMASK_3_MSB_EXT_A 0x16e540 -/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ -#define QM_REG_VOQQMASK_4_LSB 0x168260 -/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ -#define QM_REG_VOQQMASK_4_LSB_EXT_A 0x16e544 -/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ -#define QM_REG_VOQQMASK_4_MSB 0x168264 -/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ -#define QM_REG_VOQQMASK_4_MSB_EXT_A 0x16e548 -/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ -#define QM_REG_VOQQMASK_5_LSB 0x168268 -/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ -#define QM_REG_VOQQMASK_5_LSB_EXT_A 0x16e54c -/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ -#define QM_REG_VOQQMASK_5_MSB 0x16826c -/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ -#define QM_REG_VOQQMASK_5_MSB_EXT_A 0x16e550 -/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ -#define QM_REG_VOQQMASK_6_LSB 0x168270 -/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ -#define QM_REG_VOQQMASK_6_LSB_EXT_A 0x16e554 -/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ -#define QM_REG_VOQQMASK_6_MSB 0x168274 -/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ -#define QM_REG_VOQQMASK_6_MSB_EXT_A 0x16e558 -/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ -#define QM_REG_VOQQMASK_7_LSB 0x168278 -/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ -#define QM_REG_VOQQMASK_7_LSB_EXT_A 0x16e55c -/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ -#define QM_REG_VOQQMASK_7_MSB 0x16827c -/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ -#define QM_REG_VOQQMASK_7_MSB_EXT_A 0x16e560 -/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ -#define QM_REG_VOQQMASK_8_LSB 0x168280 -/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ -#define QM_REG_VOQQMASK_8_LSB_EXT_A 0x16e564 -/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ -#define QM_REG_VOQQMASK_8_MSB 0x168284 -/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ -#define QM_REG_VOQQMASK_8_MSB_EXT_A 0x16e568 -/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ -#define QM_REG_VOQQMASK_9_LSB 0x168288 -/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ -#define QM_REG_VOQQMASK_9_LSB_EXT_A 0x16e56c -/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ -#define QM_REG_VOQQMASK_9_MSB_EXT_A 0x16e570 -/* [RW 32] Wrr weights */ -#define QM_REG_WRRWEIGHTS_0 0x16880c -#define QM_REG_WRRWEIGHTS_1 0x168810 -#define QM_REG_WRRWEIGHTS_10 0x168814 -#define QM_REG_WRRWEIGHTS_11 0x168818 -#define QM_REG_WRRWEIGHTS_12 0x16881c -#define QM_REG_WRRWEIGHTS_13 0x168820 -#define QM_REG_WRRWEIGHTS_14 0x168824 -#define QM_REG_WRRWEIGHTS_15 0x168828 -#define QM_REG_WRRWEIGHTS_16 0x16e000 -#define QM_REG_WRRWEIGHTS_17 0x16e004 -#define QM_REG_WRRWEIGHTS_18 0x16e008 -#define QM_REG_WRRWEIGHTS_19 0x16e00c -#define QM_REG_WRRWEIGHTS_2 0x16882c -#define QM_REG_WRRWEIGHTS_20 0x16e010 -#define QM_REG_WRRWEIGHTS_21 0x16e014 -#define QM_REG_WRRWEIGHTS_22 0x16e018 -#define QM_REG_WRRWEIGHTS_23 0x16e01c -#define QM_REG_WRRWEIGHTS_24 0x16e020 -#define QM_REG_WRRWEIGHTS_25 0x16e024 -#define QM_REG_WRRWEIGHTS_26 0x16e028 -#define QM_REG_WRRWEIGHTS_27 0x16e02c -#define QM_REG_WRRWEIGHTS_28 0x16e030 -#define QM_REG_WRRWEIGHTS_29 0x16e034 -#define QM_REG_WRRWEIGHTS_3 0x168830 -#define QM_REG_WRRWEIGHTS_30 0x16e038 -#define QM_REG_WRRWEIGHTS_31 0x16e03c -#define QM_REG_WRRWEIGHTS_4 0x168834 -#define QM_REG_WRRWEIGHTS_5 0x168838 -#define QM_REG_WRRWEIGHTS_6 0x16883c -#define QM_REG_WRRWEIGHTS_7 0x168840 -#define QM_REG_WRRWEIGHTS_8 0x168844 -#define QM_REG_WRRWEIGHTS_9 0x168848 -/* [R 6] Keep the fill level of the fifo from write client 1 */ -#define QM_REG_XQM_WRC_FIFOLVL 0x168000 -/* [W 1] reset to parity interrupt */ -#define SEM_FAST_REG_PARITY_RST 0x18840 -#define SRC_REG_COUNTFREE0 0x40500 -/* [RW 1] If clr the searcher is compatible to E1 A0 - support only two - ports. If set the searcher support 8 functions. */ -#define SRC_REG_E1HMF_ENABLE 0x404cc -#define SRC_REG_FIRSTFREE0 0x40510 -#define SRC_REG_KEYRSS0_0 0x40408 -#define SRC_REG_KEYRSS0_7 0x40424 -#define SRC_REG_KEYRSS1_9 0x40454 -#define SRC_REG_KEYSEARCH_0 0x40458 -#define SRC_REG_KEYSEARCH_1 0x4045c -#define SRC_REG_KEYSEARCH_2 0x40460 -#define SRC_REG_KEYSEARCH_3 0x40464 -#define SRC_REG_KEYSEARCH_4 0x40468 -#define SRC_REG_KEYSEARCH_5 0x4046c -#define SRC_REG_KEYSEARCH_6 0x40470 -#define SRC_REG_KEYSEARCH_7 0x40474 -#define SRC_REG_KEYSEARCH_8 0x40478 -#define SRC_REG_KEYSEARCH_9 0x4047c -#define SRC_REG_LASTFREE0 0x40530 -#define SRC_REG_NUMBER_HASH_BITS0 0x40400 -/* [RW 1] Reset internal state machines. */ -#define SRC_REG_SOFT_RST 0x4049c -/* [R 3] Interrupt register #0 read */ -#define SRC_REG_SRC_INT_STS 0x404ac -/* [RW 3] Parity mask register #0 read/write */ -#define SRC_REG_SRC_PRTY_MASK 0x404c8 -/* [R 3] Parity register #0 read */ -#define SRC_REG_SRC_PRTY_STS 0x404bc -/* [RC 3] Parity register #0 read clear */ -#define SRC_REG_SRC_PRTY_STS_CLR 0x404c0 -/* [R 4] Used to read the value of the XX protection CAM occupancy counter. */ -#define TCM_REG_CAM_OCCUP 0x5017c -/* [RW 1] CDU AG read Interface enable. If 0 - the request input is - disregarded; valid output is deasserted; all other signals are treated as - usual; if 1 - normal activity. */ -#define TCM_REG_CDU_AG_RD_IFEN 0x50034 -/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input - are disregarded; all other signals are treated as usual; if 1 - normal - activity. */ -#define TCM_REG_CDU_AG_WR_IFEN 0x50030 -/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is - disregarded; valid output is deasserted; all other signals are treated as - usual; if 1 - normal activity. */ -#define TCM_REG_CDU_SM_RD_IFEN 0x5003c -/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid - input is disregarded; all other signals are treated as usual; if 1 - - normal activity. */ -#define TCM_REG_CDU_SM_WR_IFEN 0x50038 -/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes - the initial credit value; read returns the current value of the credit - counter. Must be initialized to 1 at start-up. */ -#define TCM_REG_CFC_INIT_CRD 0x50204 -/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for - weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define TCM_REG_CP_WEIGHT 0x500c0 -/* [RW 1] Input csem Interface enable. If 0 - the valid input is - disregarded; acknowledge output is deasserted; all other signals are - treated as usual; if 1 - normal activity. */ -#define TCM_REG_CSEM_IFEN 0x5002c -/* [RC 1] Message length mismatch (relative to last indication) at the In#9 - interface. */ -#define TCM_REG_CSEM_LENGTH_MIS 0x50174 -/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for - weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define TCM_REG_CSEM_WEIGHT 0x500bc -/* [RW 8] The Event ID in case of ErrorFlg is set in the input message. */ -#define TCM_REG_ERR_EVNT_ID 0x500a0 -/* [RW 28] The CM erroneous header for QM and Timers formatting. */ -#define TCM_REG_ERR_TCM_HDR 0x5009c -/* [RW 8] The Event ID for Timers expiration. */ -#define TCM_REG_EXPR_EVNT_ID 0x500a4 -/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write - writes the initial credit value; read returns the current value of the - credit counter. Must be initialized to 64 at start-up. */ -#define TCM_REG_FIC0_INIT_CRD 0x5020c -/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write - writes the initial credit value; read returns the current value of the - credit counter. Must be initialized to 64 at start-up. */ -#define TCM_REG_FIC1_INIT_CRD 0x50210 -/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1 - - strict priority defined by ~tcm_registers_gr_ag_pr.gr_ag_pr; - ~tcm_registers_gr_ld0_pr.gr_ld0_pr and - ~tcm_registers_gr_ld1_pr.gr_ld1_pr. */ -#define TCM_REG_GR_ARB_TYPE 0x50114 -/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the - highest priority is 3. It is supposed that the Store channel is the - compliment of the other 3 groups. */ -#define TCM_REG_GR_LD0_PR 0x5011c -/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the - highest priority is 3. It is supposed that the Store channel is the - compliment of the other 3 groups. */ -#define TCM_REG_GR_LD1_PR 0x50120 -/* [RW 4] The number of double REG-pairs; loaded from the STORM context and - sent to STORM; for a specific connection type. The double REG-pairs are - used to align to STORM context row size of 128 bits. The offset of these - data in the STORM context is always 0. Index _i stands for the connection - type (one of 16). */ -#define TCM_REG_N_SM_CTX_LD_0 0x50050 -#define TCM_REG_N_SM_CTX_LD_1 0x50054 -#define TCM_REG_N_SM_CTX_LD_2 0x50058 -#define TCM_REG_N_SM_CTX_LD_3 0x5005c -#define TCM_REG_N_SM_CTX_LD_4 0x50060 -#define TCM_REG_N_SM_CTX_LD_5 0x50064 -/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded; - acknowledge output is deasserted; all other signals are treated as usual; - if 1 - normal activity. */ -#define TCM_REG_PBF_IFEN 0x50024 -/* [RC 1] Message length mismatch (relative to last indication) at the In#7 - interface. */ -#define TCM_REG_PBF_LENGTH_MIS 0x5016c -/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for - weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define TCM_REG_PBF_WEIGHT 0x500b4 -#define TCM_REG_PHYS_QNUM0_0 0x500e0 -#define TCM_REG_PHYS_QNUM0_1 0x500e4 -#define TCM_REG_PHYS_QNUM1_0 0x500e8 -#define TCM_REG_PHYS_QNUM1_1 0x500ec -#define TCM_REG_PHYS_QNUM2_0 0x500f0 -#define TCM_REG_PHYS_QNUM2_1 0x500f4 -#define TCM_REG_PHYS_QNUM3_0 0x500f8 -#define TCM_REG_PHYS_QNUM3_1 0x500fc -/* [RW 1] Input prs Interface enable. If 0 - the valid input is disregarded; - acknowledge output is deasserted; all other signals are treated as usual; - if 1 - normal activity. */ -#define TCM_REG_PRS_IFEN 0x50020 -/* [RC 1] Message length mismatch (relative to last indication) at the In#6 - interface. */ -#define TCM_REG_PRS_LENGTH_MIS 0x50168 -/* [RW 3] The weight of the input prs in the WRR mechanism. 0 stands for - weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define TCM_REG_PRS_WEIGHT 0x500b0 -/* [RW 8] The Event ID for Timers formatting in case of stop done. */ -#define TCM_REG_STOP_EVNT_ID 0x500a8 -/* [RC 1] Message length mismatch (relative to last indication) at the STORM - interface. */ -#define TCM_REG_STORM_LENGTH_MIS 0x50160 -/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is - disregarded; acknowledge output is deasserted; all other signals are - treated as usual; if 1 - normal activity. */ -#define TCM_REG_STORM_TCM_IFEN 0x50010 -/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for - weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define TCM_REG_STORM_WEIGHT 0x500ac -/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded; - acknowledge output is deasserted; all other signals are treated as usual; - if 1 - normal activity. */ -#define TCM_REG_TCM_CFC_IFEN 0x50040 -/* [RW 11] Interrupt mask register #0 read/write */ -#define TCM_REG_TCM_INT_MASK 0x501dc -/* [R 11] Interrupt register #0 read */ -#define TCM_REG_TCM_INT_STS 0x501d0 -/* [RW 27] Parity mask register #0 read/write */ -#define TCM_REG_TCM_PRTY_MASK 0x501ec -/* [R 27] Parity register #0 read */ -#define TCM_REG_TCM_PRTY_STS 0x501e0 -/* [RC 27] Parity register #0 read clear */ -#define TCM_REG_TCM_PRTY_STS_CLR 0x501e4 -/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS - REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). - Is used to determine the number of the AG context REG-pairs written back; - when the input message Reg1WbFlg isn't set. */ -#define TCM_REG_TCM_REG0_SZ 0x500d8 -/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is - disregarded; valid is deasserted; all other signals are treated as usual; - if 1 - normal activity. */ -#define TCM_REG_TCM_STORM0_IFEN 0x50004 -/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is - disregarded; valid is deasserted; all other signals are treated as usual; - if 1 - normal activity. */ -#define TCM_REG_TCM_STORM1_IFEN 0x50008 -/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is - disregarded; valid is deasserted; all other signals are treated as usual; - if 1 - normal activity. */ -#define TCM_REG_TCM_TQM_IFEN 0x5000c -/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */ -#define TCM_REG_TCM_TQM_USE_Q 0x500d4 -/* [RW 28] The CM header for Timers expiration command. */ -#define TCM_REG_TM_TCM_HDR 0x50098 -/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is - disregarded; acknowledge output is deasserted; all other signals are - treated as usual; if 1 - normal activity. */ -#define TCM_REG_TM_TCM_IFEN 0x5001c -/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for - weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define TCM_REG_TM_WEIGHT 0x500d0 -/* [RW 6] QM output initial credit. Max credit available - 32.Write writes - the initial credit value; read returns the current value of the credit - counter. Must be initialized to 32 at start-up. */ -#define TCM_REG_TQM_INIT_CRD 0x5021c -/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0 - stands for weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define TCM_REG_TQM_P_WEIGHT 0x500c8 -/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0 - stands for weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define TCM_REG_TQM_S_WEIGHT 0x500cc -/* [RW 28] The CM header value for QM request (primary). */ -#define TCM_REG_TQM_TCM_HDR_P 0x50090 -/* [RW 28] The CM header value for QM request (secondary). */ -#define TCM_REG_TQM_TCM_HDR_S 0x50094 -/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded; - acknowledge output is deasserted; all other signals are treated as usual; - if 1 - normal activity. */ -#define TCM_REG_TQM_TCM_IFEN 0x50014 -/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded; - acknowledge output is deasserted; all other signals are treated as usual; - if 1 - normal activity. */ -#define TCM_REG_TSDM_IFEN 0x50018 -/* [RC 1] Message length mismatch (relative to last indication) at the SDM - interface. */ -#define TCM_REG_TSDM_LENGTH_MIS 0x50164 -/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for - weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define TCM_REG_TSDM_WEIGHT 0x500c4 -/* [RW 1] Input usem Interface enable. If 0 - the valid input is - disregarded; acknowledge output is deasserted; all other signals are - treated as usual; if 1 - normal activity. */ -#define TCM_REG_USEM_IFEN 0x50028 -/* [RC 1] Message length mismatch (relative to last indication) at the In#8 - interface. */ -#define TCM_REG_USEM_LENGTH_MIS 0x50170 -/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for - weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define TCM_REG_USEM_WEIGHT 0x500b8 -/* [RW 21] Indirect access to the descriptor table of the XX protection - mechanism. The fields are: [5:0] - length of the message; 15:6] - message - pointer; 20:16] - next pointer. */ -#define TCM_REG_XX_DESCR_TABLE 0x50280 -#define TCM_REG_XX_DESCR_TABLE_SIZE 29 -/* [R 6] Use to read the value of XX protection Free counter. */ -#define TCM_REG_XX_FREE 0x50178 -/* [RW 6] Initial value for the credit counter; responsible for fulfilling - of the Input Stage XX protection buffer by the XX protection pending - messages. Max credit available - 127.Write writes the initial credit - value; read returns the current value of the credit counter. Must be - initialized to 19 at start-up. */ -#define TCM_REG_XX_INIT_CRD 0x50220 -/* [RW 6] Maximum link list size (messages locked) per connection in the XX - protection. */ -#define TCM_REG_XX_MAX_LL_SZ 0x50044 -/* [RW 6] The maximum number of pending messages; which may be stored in XX - protection. ~tcm_registers_xx_free.xx_free is read on read. */ -#define TCM_REG_XX_MSG_NUM 0x50224 -/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */ -#define TCM_REG_XX_OVFL_EVNT_ID 0x50048 -/* [RW 16] Indirect access to the XX table of the XX protection mechanism. - The fields are:[4:0] - tail pointer; [10:5] - Link List size; 15:11] - - header pointer. */ -#define TCM_REG_XX_TABLE 0x50240 -/* [RW 4] Load value for cfc ac credit cnt. */ -#define TM_REG_CFC_AC_CRDCNT_VAL 0x164208 -/* [RW 4] Load value for cfc cld credit cnt. */ -#define TM_REG_CFC_CLD_CRDCNT_VAL 0x164210 -/* [RW 8] Client0 context region. */ -#define TM_REG_CL0_CONT_REGION 0x164030 -/* [RW 8] Client1 context region. */ -#define TM_REG_CL1_CONT_REGION 0x164034 -/* [RW 8] Client2 context region. */ -#define TM_REG_CL2_CONT_REGION 0x164038 -/* [RW 2] Client in High priority client number. */ -#define TM_REG_CLIN_PRIOR0_CLIENT 0x164024 -/* [RW 4] Load value for clout0 cred cnt. */ -#define TM_REG_CLOUT_CRDCNT0_VAL 0x164220 -/* [RW 4] Load value for clout1 cred cnt. */ -#define TM_REG_CLOUT_CRDCNT1_VAL 0x164228 -/* [RW 4] Load value for clout2 cred cnt. */ -#define TM_REG_CLOUT_CRDCNT2_VAL 0x164230 -/* [RW 1] Enable client0 input. */ -#define TM_REG_EN_CL0_INPUT 0x164008 -/* [RW 1] Enable client1 input. */ -#define TM_REG_EN_CL1_INPUT 0x16400c -/* [RW 1] Enable client2 input. */ -#define TM_REG_EN_CL2_INPUT 0x164010 -#define TM_REG_EN_LINEAR0_TIMER 0x164014 -/* [RW 1] Enable real time counter. */ -#define TM_REG_EN_REAL_TIME_CNT 0x1640d8 -/* [RW 1] Enable for Timers state machines. */ -#define TM_REG_EN_TIMERS 0x164000 -/* [RW 4] Load value for expiration credit cnt. CFC max number of - outstanding load requests for timers (expiration) context loading. */ -#define TM_REG_EXP_CRDCNT_VAL 0x164238 -/* [RW 32] Linear0 logic address. */ -#define TM_REG_LIN0_LOGIC_ADDR 0x164240 -/* [RW 18] Linear0 Max active cid (in banks of 32 entries). */ -#define TM_REG_LIN0_MAX_ACTIVE_CID 0x164048 -/* [ST 16] Linear0 Number of scans counter. */ -#define TM_REG_LIN0_NUM_SCANS 0x1640a0 -/* [WB 64] Linear0 phy address. */ -#define TM_REG_LIN0_PHY_ADDR 0x164270 -/* [RW 1] Linear0 physical address valid. */ -#define TM_REG_LIN0_PHY_ADDR_VALID 0x164248 -#define TM_REG_LIN0_SCAN_ON 0x1640d0 -/* [RW 24] Linear0 array scan timeout. */ -#define TM_REG_LIN0_SCAN_TIME 0x16403c -#define TM_REG_LIN0_VNIC_UC 0x164128 -/* [RW 32] Linear1 logic address. */ -#define TM_REG_LIN1_LOGIC_ADDR 0x164250 -/* [WB 64] Linear1 phy address. */ -#define TM_REG_LIN1_PHY_ADDR 0x164280 -/* [RW 1] Linear1 physical address valid. */ -#define TM_REG_LIN1_PHY_ADDR_VALID 0x164258 -/* [RW 6] Linear timer set_clear fifo threshold. */ -#define TM_REG_LIN_SETCLR_FIFO_ALFULL_THR 0x164070 -/* [RW 2] Load value for pci arbiter credit cnt. */ -#define TM_REG_PCIARB_CRDCNT_VAL 0x164260 -/* [RW 20] The amount of hardware cycles for each timer tick. */ -#define TM_REG_TIMER_TICK_SIZE 0x16401c -/* [RW 8] Timers Context region. */ -#define TM_REG_TM_CONTEXT_REGION 0x164044 -/* [RW 1] Interrupt mask register #0 read/write */ -#define TM_REG_TM_INT_MASK 0x1640fc -/* [R 1] Interrupt register #0 read */ -#define TM_REG_TM_INT_STS 0x1640f0 -/* [RW 7] Parity mask register #0 read/write */ -#define TM_REG_TM_PRTY_MASK 0x16410c -/* [RC 7] Parity register #0 read clear */ -#define TM_REG_TM_PRTY_STS_CLR 0x164104 -/* [RW 8] The event id for aggregated interrupt 0 */ -#define TSDM_REG_AGG_INT_EVENT_0 0x42038 -#define TSDM_REG_AGG_INT_EVENT_1 0x4203c -#define TSDM_REG_AGG_INT_EVENT_2 0x42040 -#define TSDM_REG_AGG_INT_EVENT_3 0x42044 -#define TSDM_REG_AGG_INT_EVENT_4 0x42048 -/* [RW 1] The T bit for aggregated interrupt 0 */ -#define TSDM_REG_AGG_INT_T_0 0x420b8 -#define TSDM_REG_AGG_INT_T_1 0x420bc -/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */ -#define TSDM_REG_CFC_RSP_START_ADDR 0x42008 -/* [RW 16] The maximum value of the completion counter #0 */ -#define TSDM_REG_CMP_COUNTER_MAX0 0x4201c -/* [RW 16] The maximum value of the completion counter #1 */ -#define TSDM_REG_CMP_COUNTER_MAX1 0x42020 -/* [RW 16] The maximum value of the completion counter #2 */ -#define TSDM_REG_CMP_COUNTER_MAX2 0x42024 -/* [RW 16] The maximum value of the completion counter #3 */ -#define TSDM_REG_CMP_COUNTER_MAX3 0x42028 -/* [RW 13] The start address in the internal RAM for the completion - counters. */ -#define TSDM_REG_CMP_COUNTER_START_ADDR 0x4200c -#define TSDM_REG_ENABLE_IN1 0x42238 -#define TSDM_REG_ENABLE_IN2 0x4223c -#define TSDM_REG_ENABLE_OUT1 0x42240 -#define TSDM_REG_ENABLE_OUT2 0x42244 -/* [RW 4] The initial number of messages that can be sent to the pxp control - interface without receiving any ACK. */ -#define TSDM_REG_INIT_CREDIT_PXP_CTRL 0x424bc -/* [ST 32] The number of ACK after placement messages received */ -#define TSDM_REG_NUM_OF_ACK_AFTER_PLACE 0x4227c -/* [ST 32] The number of packet end messages received from the parser */ -#define TSDM_REG_NUM_OF_PKT_END_MSG 0x42274 -/* [ST 32] The number of requests received from the pxp async if */ -#define TSDM_REG_NUM_OF_PXP_ASYNC_REQ 0x42278 -/* [ST 32] The number of commands received in queue 0 */ -#define TSDM_REG_NUM_OF_Q0_CMD 0x42248 -/* [ST 32] The number of commands received in queue 10 */ -#define TSDM_REG_NUM_OF_Q10_CMD 0x4226c -/* [ST 32] The number of commands received in queue 11 */ -#define TSDM_REG_NUM_OF_Q11_CMD 0x42270 -/* [ST 32] The number of commands received in queue 1 */ -#define TSDM_REG_NUM_OF_Q1_CMD 0x4224c -/* [ST 32] The number of commands received in queue 3 */ -#define TSDM_REG_NUM_OF_Q3_CMD 0x42250 -/* [ST 32] The number of commands received in queue 4 */ -#define TSDM_REG_NUM_OF_Q4_CMD 0x42254 -/* [ST 32] The number of commands received in queue 5 */ -#define TSDM_REG_NUM_OF_Q5_CMD 0x42258 -/* [ST 32] The number of commands received in queue 6 */ -#define TSDM_REG_NUM_OF_Q6_CMD 0x4225c -/* [ST 32] The number of commands received in queue 7 */ -#define TSDM_REG_NUM_OF_Q7_CMD 0x42260 -/* [ST 32] The number of commands received in queue 8 */ -#define TSDM_REG_NUM_OF_Q8_CMD 0x42264 -/* [ST 32] The number of commands received in queue 9 */ -#define TSDM_REG_NUM_OF_Q9_CMD 0x42268 -/* [RW 13] The start address in the internal RAM for the packet end message */ -#define TSDM_REG_PCK_END_MSG_START_ADDR 0x42014 -/* [RW 13] The start address in the internal RAM for queue counters */ -#define TSDM_REG_Q_COUNTER_START_ADDR 0x42010 -/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */ -#define TSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0x42548 -/* [R 1] parser fifo empty in sdm_sync block */ -#define TSDM_REG_SYNC_PARSER_EMPTY 0x42550 -/* [R 1] parser serial fifo empty in sdm_sync block */ -#define TSDM_REG_SYNC_SYNC_EMPTY 0x42558 -/* [RW 32] Tick for timer counter. Applicable only when - ~tsdm_registers_timer_tick_enable.timer_tick_enable =1 */ -#define TSDM_REG_TIMER_TICK 0x42000 -/* [RW 32] Interrupt mask register #0 read/write */ -#define TSDM_REG_TSDM_INT_MASK_0 0x4229c -#define TSDM_REG_TSDM_INT_MASK_1 0x422ac -/* [R 32] Interrupt register #0 read */ -#define TSDM_REG_TSDM_INT_STS_0 0x42290 -#define TSDM_REG_TSDM_INT_STS_1 0x422a0 -/* [RW 11] Parity mask register #0 read/write */ -#define TSDM_REG_TSDM_PRTY_MASK 0x422bc -/* [R 11] Parity register #0 read */ -#define TSDM_REG_TSDM_PRTY_STS 0x422b0 -/* [RC 11] Parity register #0 read clear */ -#define TSDM_REG_TSDM_PRTY_STS_CLR 0x422b4 -/* [RW 5] The number of time_slots in the arbitration cycle */ -#define TSEM_REG_ARB_CYCLE_SIZE 0x180034 -/* [RW 3] The source that is associated with arbitration element 0. Source - decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- - sleeping thread with priority 1; 4- sleeping thread with priority 2 */ -#define TSEM_REG_ARB_ELEMENT0 0x180020 -/* [RW 3] The source that is associated with arbitration element 1. Source - decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- - sleeping thread with priority 1; 4- sleeping thread with priority 2. - Could not be equal to register ~tsem_registers_arb_element0.arb_element0 */ -#define TSEM_REG_ARB_ELEMENT1 0x180024 -/* [RW 3] The source that is associated with arbitration element 2. Source - decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- - sleeping thread with priority 1; 4- sleeping thread with priority 2. - Could not be equal to register ~tsem_registers_arb_element0.arb_element0 - and ~tsem_registers_arb_element1.arb_element1 */ -#define TSEM_REG_ARB_ELEMENT2 0x180028 -/* [RW 3] The source that is associated with arbitration element 3. Source - decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- - sleeping thread with priority 1; 4- sleeping thread with priority 2.Could - not be equal to register ~tsem_registers_arb_element0.arb_element0 and - ~tsem_registers_arb_element1.arb_element1 and - ~tsem_registers_arb_element2.arb_element2 */ -#define TSEM_REG_ARB_ELEMENT3 0x18002c -/* [RW 3] The source that is associated with arbitration element 4. Source - decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- - sleeping thread with priority 1; 4- sleeping thread with priority 2. - Could not be equal to register ~tsem_registers_arb_element0.arb_element0 - and ~tsem_registers_arb_element1.arb_element1 and - ~tsem_registers_arb_element2.arb_element2 and - ~tsem_registers_arb_element3.arb_element3 */ -#define TSEM_REG_ARB_ELEMENT4 0x180030 -#define TSEM_REG_ENABLE_IN 0x1800a4 -#define TSEM_REG_ENABLE_OUT 0x1800a8 -/* [RW 32] This address space contains all registers and memories that are - placed in SEM_FAST block. The SEM_FAST registers are described in - appendix B. In order to access the sem_fast registers the base address - ~fast_memory.fast_memory should be added to eachsem_fast register offset. */ -#define TSEM_REG_FAST_MEMORY 0x1a0000 -/* [RW 1] Disables input messages from FIC0 May be updated during run_time - by the microcode */ -#define TSEM_REG_FIC0_DISABLE 0x180224 -/* [RW 1] Disables input messages from FIC1 May be updated during run_time - by the microcode */ -#define TSEM_REG_FIC1_DISABLE 0x180234 -/* [RW 15] Interrupt table Read and write access to it is not possible in - the middle of the work */ -#define TSEM_REG_INT_TABLE 0x180400 -/* [ST 24] Statistics register. The number of messages that entered through - FIC0 */ -#define TSEM_REG_MSG_NUM_FIC0 0x180000 -/* [ST 24] Statistics register. The number of messages that entered through - FIC1 */ -#define TSEM_REG_MSG_NUM_FIC1 0x180004 -/* [ST 24] Statistics register. The number of messages that were sent to - FOC0 */ -#define TSEM_REG_MSG_NUM_FOC0 0x180008 -/* [ST 24] Statistics register. The number of messages that were sent to - FOC1 */ -#define TSEM_REG_MSG_NUM_FOC1 0x18000c -/* [ST 24] Statistics register. The number of messages that were sent to - FOC2 */ -#define TSEM_REG_MSG_NUM_FOC2 0x180010 -/* [ST 24] Statistics register. The number of messages that were sent to - FOC3 */ -#define TSEM_REG_MSG_NUM_FOC3 0x180014 -/* [RW 1] Disables input messages from the passive buffer May be updated - during run_time by the microcode */ -#define TSEM_REG_PAS_DISABLE 0x18024c -/* [WB 128] Debug only. Passive buffer memory */ -#define TSEM_REG_PASSIVE_BUFFER 0x181000 -/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */ -#define TSEM_REG_PRAM 0x1c0000 -/* [R 8] Valid sleeping threads indication have bit per thread */ -#define TSEM_REG_SLEEP_THREADS_VALID 0x18026c -/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */ -#define TSEM_REG_SLOW_EXT_STORE_EMPTY 0x1802a0 -/* [RW 8] List of free threads . There is a bit per thread. */ -#define TSEM_REG_THREADS_LIST 0x1802e4 -/* [RC 32] Parity register #0 read clear */ -#define TSEM_REG_TSEM_PRTY_STS_CLR_0 0x180118 -#define TSEM_REG_TSEM_PRTY_STS_CLR_1 0x180128 -/* [RW 3] The arbitration scheme of time_slot 0 */ -#define TSEM_REG_TS_0_AS 0x180038 -/* [RW 3] The arbitration scheme of time_slot 10 */ -#define TSEM_REG_TS_10_AS 0x180060 -/* [RW 3] The arbitration scheme of time_slot 11 */ -#define TSEM_REG_TS_11_AS 0x180064 -/* [RW 3] The arbitration scheme of time_slot 12 */ -#define TSEM_REG_TS_12_AS 0x180068 -/* [RW 3] The arbitration scheme of time_slot 13 */ -#define TSEM_REG_TS_13_AS 0x18006c -/* [RW 3] The arbitration scheme of time_slot 14 */ -#define TSEM_REG_TS_14_AS 0x180070 -/* [RW 3] The arbitration scheme of time_slot 15 */ -#define TSEM_REG_TS_15_AS 0x180074 -/* [RW 3] The arbitration scheme of time_slot 16 */ -#define TSEM_REG_TS_16_AS 0x180078 -/* [RW 3] The arbitration scheme of time_slot 17 */ -#define TSEM_REG_TS_17_AS 0x18007c -/* [RW 3] The arbitration scheme of time_slot 18 */ -#define TSEM_REG_TS_18_AS 0x180080 -/* [RW 3] The arbitration scheme of time_slot 1 */ -#define TSEM_REG_TS_1_AS 0x18003c -/* [RW 3] The arbitration scheme of time_slot 2 */ -#define TSEM_REG_TS_2_AS 0x180040 -/* [RW 3] The arbitration scheme of time_slot 3 */ -#define TSEM_REG_TS_3_AS 0x180044 -/* [RW 3] The arbitration scheme of time_slot 4 */ -#define TSEM_REG_TS_4_AS 0x180048 -/* [RW 3] The arbitration scheme of time_slot 5 */ -#define TSEM_REG_TS_5_AS 0x18004c -/* [RW 3] The arbitration scheme of time_slot 6 */ -#define TSEM_REG_TS_6_AS 0x180050 -/* [RW 3] The arbitration scheme of time_slot 7 */ -#define TSEM_REG_TS_7_AS 0x180054 -/* [RW 3] The arbitration scheme of time_slot 8 */ -#define TSEM_REG_TS_8_AS 0x180058 -/* [RW 3] The arbitration scheme of time_slot 9 */ -#define TSEM_REG_TS_9_AS 0x18005c -/* [RW 32] Interrupt mask register #0 read/write */ -#define TSEM_REG_TSEM_INT_MASK_0 0x180100 -#define TSEM_REG_TSEM_INT_MASK_1 0x180110 -/* [R 32] Interrupt register #0 read */ -#define TSEM_REG_TSEM_INT_STS_0 0x1800f4 -#define TSEM_REG_TSEM_INT_STS_1 0x180104 -/* [RW 32] Parity mask register #0 read/write */ -#define TSEM_REG_TSEM_PRTY_MASK_0 0x180120 -#define TSEM_REG_TSEM_PRTY_MASK_1 0x180130 -/* [R 32] Parity register #0 read */ -#define TSEM_REG_TSEM_PRTY_STS_0 0x180114 -#define TSEM_REG_TSEM_PRTY_STS_1 0x180124 -/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64 - * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */ -#define TSEM_REG_VFPF_ERR_NUM 0x180380 -/* [RW 32] Indirect access to AG context with 32-bits granularity. The bits - * [10:8] of the address should be the offset within the accessed LCID - * context; the bits [7:0] are the accessed LCID.Example: to write to REG10 - * LCID100. The RBC address should be 12'ha64. */ -#define UCM_REG_AG_CTX 0xe2000 -/* [R 5] Used to read the XX protection CAM occupancy counter. */ -#define UCM_REG_CAM_OCCUP 0xe0170 -/* [RW 1] CDU AG read Interface enable. If 0 - the request input is - disregarded; valid output is deasserted; all other signals are treated as - usual; if 1 - normal activity. */ -#define UCM_REG_CDU_AG_RD_IFEN 0xe0038 -/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input - are disregarded; all other signals are treated as usual; if 1 - normal - activity. */ -#define UCM_REG_CDU_AG_WR_IFEN 0xe0034 -/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is - disregarded; valid output is deasserted; all other signals are treated as - usual; if 1 - normal activity. */ -#define UCM_REG_CDU_SM_RD_IFEN 0xe0040 -/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid - input is disregarded; all other signals are treated as usual; if 1 - - normal activity. */ -#define UCM_REG_CDU_SM_WR_IFEN 0xe003c -/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes - the initial credit value; read returns the current value of the credit - counter. Must be initialized to 1 at start-up. */ -#define UCM_REG_CFC_INIT_CRD 0xe0204 -/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for - weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define UCM_REG_CP_WEIGHT 0xe00c4 -/* [RW 1] Input csem Interface enable. If 0 - the valid input is - disregarded; acknowledge output is deasserted; all other signals are - treated as usual; if 1 - normal activity. */ -#define UCM_REG_CSEM_IFEN 0xe0028 -/* [RC 1] Set when the message length mismatch (relative to last indication) - at the csem interface is detected. */ -#define UCM_REG_CSEM_LENGTH_MIS 0xe0160 -/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for - weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define UCM_REG_CSEM_WEIGHT 0xe00b8 -/* [RW 1] Input dorq Interface enable. If 0 - the valid input is - disregarded; acknowledge output is deasserted; all other signals are - treated as usual; if 1 - normal activity. */ -#define UCM_REG_DORQ_IFEN 0xe0030 -/* [RC 1] Set when the message length mismatch (relative to last indication) - at the dorq interface is detected. */ -#define UCM_REG_DORQ_LENGTH_MIS 0xe0168 -/* [RW 3] The weight of the input dorq in the WRR mechanism. 0 stands for - weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define UCM_REG_DORQ_WEIGHT 0xe00c0 -/* [RW 8] The Event ID in case ErrorFlg input message bit is set. */ -#define UCM_REG_ERR_EVNT_ID 0xe00a4 -/* [RW 28] The CM erroneous header for QM and Timers formatting. */ -#define UCM_REG_ERR_UCM_HDR 0xe00a0 -/* [RW 8] The Event ID for Timers expiration. */ -#define UCM_REG_EXPR_EVNT_ID 0xe00a8 -/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write - writes the initial credit value; read returns the current value of the - credit counter. Must be initialized to 64 at start-up. */ -#define UCM_REG_FIC0_INIT_CRD 0xe020c -/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write - writes the initial credit value; read returns the current value of the - credit counter. Must be initialized to 64 at start-up. */ -#define UCM_REG_FIC1_INIT_CRD 0xe0210 -/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1 - - strict priority defined by ~ucm_registers_gr_ag_pr.gr_ag_pr; - ~ucm_registers_gr_ld0_pr.gr_ld0_pr and - ~ucm_registers_gr_ld1_pr.gr_ld1_pr. */ -#define UCM_REG_GR_ARB_TYPE 0xe0144 -/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the - highest priority is 3. It is supposed that the Store channel group is - compliment to the others. */ -#define UCM_REG_GR_LD0_PR 0xe014c -/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the - highest priority is 3. It is supposed that the Store channel group is - compliment to the others. */ -#define UCM_REG_GR_LD1_PR 0xe0150 -/* [RW 2] The queue index for invalidate counter flag decision. */ -#define UCM_REG_INV_CFLG_Q 0xe00e4 -/* [RW 5] The number of double REG-pairs; loaded from the STORM context and - sent to STORM; for a specific connection type. the double REG-pairs are - used in order to align to STORM context row size of 128 bits. The offset - of these data in the STORM context is always 0. Index _i stands for the - connection type (one of 16). */ -#define UCM_REG_N_SM_CTX_LD_0 0xe0054 -#define UCM_REG_N_SM_CTX_LD_1 0xe0058 -#define UCM_REG_N_SM_CTX_LD_2 0xe005c -#define UCM_REG_N_SM_CTX_LD_3 0xe0060 -#define UCM_REG_N_SM_CTX_LD_4 0xe0064 -#define UCM_REG_N_SM_CTX_LD_5 0xe0068 -#define UCM_REG_PHYS_QNUM0_0 0xe0110 -#define UCM_REG_PHYS_QNUM0_1 0xe0114 -#define UCM_REG_PHYS_QNUM1_0 0xe0118 -#define UCM_REG_PHYS_QNUM1_1 0xe011c -#define UCM_REG_PHYS_QNUM2_0 0xe0120 -#define UCM_REG_PHYS_QNUM2_1 0xe0124 -#define UCM_REG_PHYS_QNUM3_0 0xe0128 -#define UCM_REG_PHYS_QNUM3_1 0xe012c -/* [RW 8] The Event ID for Timers formatting in case of stop done. */ -#define UCM_REG_STOP_EVNT_ID 0xe00ac -/* [RC 1] Set when the message length mismatch (relative to last indication) - at the STORM interface is detected. */ -#define UCM_REG_STORM_LENGTH_MIS 0xe0154 -/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is - disregarded; acknowledge output is deasserted; all other signals are - treated as usual; if 1 - normal activity. */ -#define UCM_REG_STORM_UCM_IFEN 0xe0010 -/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for - weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define UCM_REG_STORM_WEIGHT 0xe00b0 -/* [RW 4] Timers output initial credit. Max credit available - 15.Write - writes the initial credit value; read returns the current value of the - credit counter. Must be initialized to 4 at start-up. */ -#define UCM_REG_TM_INIT_CRD 0xe021c -/* [RW 28] The CM header for Timers expiration command. */ -#define UCM_REG_TM_UCM_HDR 0xe009c -/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is - disregarded; acknowledge output is deasserted; all other signals are - treated as usual; if 1 - normal activity. */ -#define UCM_REG_TM_UCM_IFEN 0xe001c -/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for - weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define UCM_REG_TM_WEIGHT 0xe00d4 -/* [RW 1] Input tsem Interface enable. If 0 - the valid input is - disregarded; acknowledge output is deasserted; all other signals are - treated as usual; if 1 - normal activity. */ -#define UCM_REG_TSEM_IFEN 0xe0024 -/* [RC 1] Set when the message length mismatch (relative to last indication) - at the tsem interface is detected. */ -#define UCM_REG_TSEM_LENGTH_MIS 0xe015c -/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for - weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define UCM_REG_TSEM_WEIGHT 0xe00b4 -/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded; - acknowledge output is deasserted; all other signals are treated as usual; - if 1 - normal activity. */ -#define UCM_REG_UCM_CFC_IFEN 0xe0044 -/* [RW 11] Interrupt mask register #0 read/write */ -#define UCM_REG_UCM_INT_MASK 0xe01d4 -/* [R 11] Interrupt register #0 read */ -#define UCM_REG_UCM_INT_STS 0xe01c8 -/* [RW 27] Parity mask register #0 read/write */ -#define UCM_REG_UCM_PRTY_MASK 0xe01e4 -/* [R 27] Parity register #0 read */ -#define UCM_REG_UCM_PRTY_STS 0xe01d8 -/* [RC 27] Parity register #0 read clear */ -#define UCM_REG_UCM_PRTY_STS_CLR 0xe01dc -/* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS - REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). - Is used to determine the number of the AG context REG-pairs written back; - when the Reg1WbFlg isn't set. */ -#define UCM_REG_UCM_REG0_SZ 0xe00dc -/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is - disregarded; valid is deasserted; all other signals are treated as usual; - if 1 - normal activity. */ -#define UCM_REG_UCM_STORM0_IFEN 0xe0004 -/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is - disregarded; valid is deasserted; all other signals are treated as usual; - if 1 - normal activity. */ -#define UCM_REG_UCM_STORM1_IFEN 0xe0008 -/* [RW 1] CM - Timers Interface enable. If 0 - the valid input is - disregarded; acknowledge output is deasserted; all other signals are - treated as usual; if 1 - normal activity. */ -#define UCM_REG_UCM_TM_IFEN 0xe0020 -/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is - disregarded; valid is deasserted; all other signals are treated as usual; - if 1 - normal activity. */ -#define UCM_REG_UCM_UQM_IFEN 0xe000c -/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */ -#define UCM_REG_UCM_UQM_USE_Q 0xe00d8 -/* [RW 6] QM output initial credit. Max credit available - 32.Write writes - the initial credit value; read returns the current value of the credit - counter. Must be initialized to 32 at start-up. */ -#define UCM_REG_UQM_INIT_CRD 0xe0220 -/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0 - stands for weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define UCM_REG_UQM_P_WEIGHT 0xe00cc -/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0 - stands for weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define UCM_REG_UQM_S_WEIGHT 0xe00d0 -/* [RW 28] The CM header value for QM request (primary). */ -#define UCM_REG_UQM_UCM_HDR_P 0xe0094 -/* [RW 28] The CM header value for QM request (secondary). */ -#define UCM_REG_UQM_UCM_HDR_S 0xe0098 -/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded; - acknowledge output is deasserted; all other signals are treated as usual; - if 1 - normal activity. */ -#define UCM_REG_UQM_UCM_IFEN 0xe0014 -/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded; - acknowledge output is deasserted; all other signals are treated as usual; - if 1 - normal activity. */ -#define UCM_REG_USDM_IFEN 0xe0018 -/* [RC 1] Set when the message length mismatch (relative to last indication) - at the SDM interface is detected. */ -#define UCM_REG_USDM_LENGTH_MIS 0xe0158 -/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for - weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define UCM_REG_USDM_WEIGHT 0xe00c8 -/* [RW 1] Input xsem Interface enable. If 0 - the valid input is - disregarded; acknowledge output is deasserted; all other signals are - treated as usual; if 1 - normal activity. */ -#define UCM_REG_XSEM_IFEN 0xe002c -/* [RC 1] Set when the message length mismatch (relative to last indication) - at the xsem interface isdetected. */ -#define UCM_REG_XSEM_LENGTH_MIS 0xe0164 -/* [RW 3] The weight of the input xsem in the WRR mechanism. 0 stands for - weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define UCM_REG_XSEM_WEIGHT 0xe00bc -/* [RW 20] Indirect access to the descriptor table of the XX protection - mechanism. The fields are:[5:0] - message length; 14:6] - message - pointer; 19:15] - next pointer. */ -#define UCM_REG_XX_DESCR_TABLE 0xe0280 -#define UCM_REG_XX_DESCR_TABLE_SIZE 27 -/* [R 6] Use to read the XX protection Free counter. */ -#define UCM_REG_XX_FREE 0xe016c -/* [RW 6] Initial value for the credit counter; responsible for fulfilling - of the Input Stage XX protection buffer by the XX protection pending - messages. Write writes the initial credit value; read returns the current - value of the credit counter. Must be initialized to 12 at start-up. */ -#define UCM_REG_XX_INIT_CRD 0xe0224 -/* [RW 6] The maximum number of pending messages; which may be stored in XX - protection. ~ucm_registers_xx_free.xx_free read on read. */ -#define UCM_REG_XX_MSG_NUM 0xe0228 -/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */ -#define UCM_REG_XX_OVFL_EVNT_ID 0xe004c -/* [RW 16] Indirect access to the XX table of the XX protection mechanism. - The fields are: [4:0] - tail pointer; 10:5] - Link List size; 15:11] - - header pointer. */ -#define UCM_REG_XX_TABLE 0xe0300 -#define UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE (0x1<<28) -#define UMAC_COMMAND_CONFIG_REG_LOOP_ENA (0x1<<15) -#define UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK (0x1<<24) -#define UMAC_COMMAND_CONFIG_REG_PAD_EN (0x1<<5) -#define UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE (0x1<<8) -#define UMAC_COMMAND_CONFIG_REG_PROMIS_EN (0x1<<4) -#define UMAC_COMMAND_CONFIG_REG_RX_ENA (0x1<<1) -#define UMAC_COMMAND_CONFIG_REG_SW_RESET (0x1<<13) -#define UMAC_COMMAND_CONFIG_REG_TX_ENA (0x1<<0) -#define UMAC_REG_COMMAND_CONFIG 0x8 -/* [RW 32] Register Bit 0 refers to Bit 16 of the MAC address; Bit 1 refers - * to bit 17 of the MAC address etc. */ -#define UMAC_REG_MAC_ADDR0 0xc -/* [RW 16] Register Bit 0 refers to Bit 0 of the MAC address; Register Bit 1 - * refers to Bit 1 of the MAC address etc. Bits 16 to 31 are reserved. */ -#define UMAC_REG_MAC_ADDR1 0x10 -/* [RW 14] Defines a 14-Bit maximum frame length used by the MAC receive - * logic to check frames. */ -#define UMAC_REG_MAXFR 0x14 -/* [RW 8] The event id for aggregated interrupt 0 */ -#define USDM_REG_AGG_INT_EVENT_0 0xc4038 -#define USDM_REG_AGG_INT_EVENT_1 0xc403c -#define USDM_REG_AGG_INT_EVENT_2 0xc4040 -#define USDM_REG_AGG_INT_EVENT_4 0xc4048 -#define USDM_REG_AGG_INT_EVENT_5 0xc404c -#define USDM_REG_AGG_INT_EVENT_6 0xc4050 -/* [RW 1] For each aggregated interrupt index whether the mode is normal (0) - or auto-mask-mode (1) */ -#define USDM_REG_AGG_INT_MODE_0 0xc41b8 -#define USDM_REG_AGG_INT_MODE_1 0xc41bc -#define USDM_REG_AGG_INT_MODE_4 0xc41c8 -#define USDM_REG_AGG_INT_MODE_5 0xc41cc -#define USDM_REG_AGG_INT_MODE_6 0xc41d0 -/* [RW 1] The T bit for aggregated interrupt 5 */ -#define USDM_REG_AGG_INT_T_5 0xc40cc -#define USDM_REG_AGG_INT_T_6 0xc40d0 -/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */ -#define USDM_REG_CFC_RSP_START_ADDR 0xc4008 -/* [RW 16] The maximum value of the completion counter #0 */ -#define USDM_REG_CMP_COUNTER_MAX0 0xc401c -/* [RW 16] The maximum value of the completion counter #1 */ -#define USDM_REG_CMP_COUNTER_MAX1 0xc4020 -/* [RW 16] The maximum value of the completion counter #2 */ -#define USDM_REG_CMP_COUNTER_MAX2 0xc4024 -/* [RW 16] The maximum value of the completion counter #3 */ -#define USDM_REG_CMP_COUNTER_MAX3 0xc4028 -/* [RW 13] The start address in the internal RAM for the completion - counters. */ -#define USDM_REG_CMP_COUNTER_START_ADDR 0xc400c -#define USDM_REG_ENABLE_IN1 0xc4238 -#define USDM_REG_ENABLE_IN2 0xc423c -#define USDM_REG_ENABLE_OUT1 0xc4240 -#define USDM_REG_ENABLE_OUT2 0xc4244 -/* [RW 4] The initial number of messages that can be sent to the pxp control - interface without receiving any ACK. */ -#define USDM_REG_INIT_CREDIT_PXP_CTRL 0xc44c0 -/* [ST 32] The number of ACK after placement messages received */ -#define USDM_REG_NUM_OF_ACK_AFTER_PLACE 0xc4280 -/* [ST 32] The number of packet end messages received from the parser */ -#define USDM_REG_NUM_OF_PKT_END_MSG 0xc4278 -/* [ST 32] The number of requests received from the pxp async if */ -#define USDM_REG_NUM_OF_PXP_ASYNC_REQ 0xc427c -/* [ST 32] The number of commands received in queue 0 */ -#define USDM_REG_NUM_OF_Q0_CMD 0xc4248 -/* [ST 32] The number of commands received in queue 10 */ -#define USDM_REG_NUM_OF_Q10_CMD 0xc4270 -/* [ST 32] The number of commands received in queue 11 */ -#define USDM_REG_NUM_OF_Q11_CMD 0xc4274 -/* [ST 32] The number of commands received in queue 1 */ -#define USDM_REG_NUM_OF_Q1_CMD 0xc424c -/* [ST 32] The number of commands received in queue 2 */ -#define USDM_REG_NUM_OF_Q2_CMD 0xc4250 -/* [ST 32] The number of commands received in queue 3 */ -#define USDM_REG_NUM_OF_Q3_CMD 0xc4254 -/* [ST 32] The number of commands received in queue 4 */ -#define USDM_REG_NUM_OF_Q4_CMD 0xc4258 -/* [ST 32] The number of commands received in queue 5 */ -#define USDM_REG_NUM_OF_Q5_CMD 0xc425c -/* [ST 32] The number of commands received in queue 6 */ -#define USDM_REG_NUM_OF_Q6_CMD 0xc4260 -/* [ST 32] The number of commands received in queue 7 */ -#define USDM_REG_NUM_OF_Q7_CMD 0xc4264 -/* [ST 32] The number of commands received in queue 8 */ -#define USDM_REG_NUM_OF_Q8_CMD 0xc4268 -/* [ST 32] The number of commands received in queue 9 */ -#define USDM_REG_NUM_OF_Q9_CMD 0xc426c -/* [RW 13] The start address in the internal RAM for the packet end message */ -#define USDM_REG_PCK_END_MSG_START_ADDR 0xc4014 -/* [RW 13] The start address in the internal RAM for queue counters */ -#define USDM_REG_Q_COUNTER_START_ADDR 0xc4010 -/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */ -#define USDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0xc4550 -/* [R 1] parser fifo empty in sdm_sync block */ -#define USDM_REG_SYNC_PARSER_EMPTY 0xc4558 -/* [R 1] parser serial fifo empty in sdm_sync block */ -#define USDM_REG_SYNC_SYNC_EMPTY 0xc4560 -/* [RW 32] Tick for timer counter. Applicable only when - ~usdm_registers_timer_tick_enable.timer_tick_enable =1 */ -#define USDM_REG_TIMER_TICK 0xc4000 -/* [RW 32] Interrupt mask register #0 read/write */ -#define USDM_REG_USDM_INT_MASK_0 0xc42a0 -#define USDM_REG_USDM_INT_MASK_1 0xc42b0 -/* [R 32] Interrupt register #0 read */ -#define USDM_REG_USDM_INT_STS_0 0xc4294 -#define USDM_REG_USDM_INT_STS_1 0xc42a4 -/* [RW 11] Parity mask register #0 read/write */ -#define USDM_REG_USDM_PRTY_MASK 0xc42c0 -/* [R 11] Parity register #0 read */ -#define USDM_REG_USDM_PRTY_STS 0xc42b4 -/* [RC 11] Parity register #0 read clear */ -#define USDM_REG_USDM_PRTY_STS_CLR 0xc42b8 -/* [RW 5] The number of time_slots in the arbitration cycle */ -#define USEM_REG_ARB_CYCLE_SIZE 0x300034 -/* [RW 3] The source that is associated with arbitration element 0. Source - decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- - sleeping thread with priority 1; 4- sleeping thread with priority 2 */ -#define USEM_REG_ARB_ELEMENT0 0x300020 -/* [RW 3] The source that is associated with arbitration element 1. Source - decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- - sleeping thread with priority 1; 4- sleeping thread with priority 2. - Could not be equal to register ~usem_registers_arb_element0.arb_element0 */ -#define USEM_REG_ARB_ELEMENT1 0x300024 -/* [RW 3] The source that is associated with arbitration element 2. Source - decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- - sleeping thread with priority 1; 4- sleeping thread with priority 2. - Could not be equal to register ~usem_registers_arb_element0.arb_element0 - and ~usem_registers_arb_element1.arb_element1 */ -#define USEM_REG_ARB_ELEMENT2 0x300028 -/* [RW 3] The source that is associated with arbitration element 3. Source - decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- - sleeping thread with priority 1; 4- sleeping thread with priority 2.Could - not be equal to register ~usem_registers_arb_element0.arb_element0 and - ~usem_registers_arb_element1.arb_element1 and - ~usem_registers_arb_element2.arb_element2 */ -#define USEM_REG_ARB_ELEMENT3 0x30002c -/* [RW 3] The source that is associated with arbitration element 4. Source - decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- - sleeping thread with priority 1; 4- sleeping thread with priority 2. - Could not be equal to register ~usem_registers_arb_element0.arb_element0 - and ~usem_registers_arb_element1.arb_element1 and - ~usem_registers_arb_element2.arb_element2 and - ~usem_registers_arb_element3.arb_element3 */ -#define USEM_REG_ARB_ELEMENT4 0x300030 -#define USEM_REG_ENABLE_IN 0x3000a4 -#define USEM_REG_ENABLE_OUT 0x3000a8 -/* [RW 32] This address space contains all registers and memories that are - placed in SEM_FAST block. The SEM_FAST registers are described in - appendix B. In order to access the sem_fast registers the base address - ~fast_memory.fast_memory should be added to eachsem_fast register offset. */ -#define USEM_REG_FAST_MEMORY 0x320000 -/* [RW 1] Disables input messages from FIC0 May be updated during run_time - by the microcode */ -#define USEM_REG_FIC0_DISABLE 0x300224 -/* [RW 1] Disables input messages from FIC1 May be updated during run_time - by the microcode */ -#define USEM_REG_FIC1_DISABLE 0x300234 -/* [RW 15] Interrupt table Read and write access to it is not possible in - the middle of the work */ -#define USEM_REG_INT_TABLE 0x300400 -/* [ST 24] Statistics register. The number of messages that entered through - FIC0 */ -#define USEM_REG_MSG_NUM_FIC0 0x300000 -/* [ST 24] Statistics register. The number of messages that entered through - FIC1 */ -#define USEM_REG_MSG_NUM_FIC1 0x300004 -/* [ST 24] Statistics register. The number of messages that were sent to - FOC0 */ -#define USEM_REG_MSG_NUM_FOC0 0x300008 -/* [ST 24] Statistics register. The number of messages that were sent to - FOC1 */ -#define USEM_REG_MSG_NUM_FOC1 0x30000c -/* [ST 24] Statistics register. The number of messages that were sent to - FOC2 */ -#define USEM_REG_MSG_NUM_FOC2 0x300010 -/* [ST 24] Statistics register. The number of messages that were sent to - FOC3 */ -#define USEM_REG_MSG_NUM_FOC3 0x300014 -/* [RW 1] Disables input messages from the passive buffer May be updated - during run_time by the microcode */ -#define USEM_REG_PAS_DISABLE 0x30024c -/* [WB 128] Debug only. Passive buffer memory */ -#define USEM_REG_PASSIVE_BUFFER 0x302000 -/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */ -#define USEM_REG_PRAM 0x340000 -/* [R 16] Valid sleeping threads indication have bit per thread */ -#define USEM_REG_SLEEP_THREADS_VALID 0x30026c -/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */ -#define USEM_REG_SLOW_EXT_STORE_EMPTY 0x3002a0 -/* [RW 16] List of free threads . There is a bit per thread. */ -#define USEM_REG_THREADS_LIST 0x3002e4 -/* [RW 3] The arbitration scheme of time_slot 0 */ -#define USEM_REG_TS_0_AS 0x300038 -/* [RW 3] The arbitration scheme of time_slot 10 */ -#define USEM_REG_TS_10_AS 0x300060 -/* [RW 3] The arbitration scheme of time_slot 11 */ -#define USEM_REG_TS_11_AS 0x300064 -/* [RW 3] The arbitration scheme of time_slot 12 */ -#define USEM_REG_TS_12_AS 0x300068 -/* [RW 3] The arbitration scheme of time_slot 13 */ -#define USEM_REG_TS_13_AS 0x30006c -/* [RW 3] The arbitration scheme of time_slot 14 */ -#define USEM_REG_TS_14_AS 0x300070 -/* [RW 3] The arbitration scheme of time_slot 15 */ -#define USEM_REG_TS_15_AS 0x300074 -/* [RW 3] The arbitration scheme of time_slot 16 */ -#define USEM_REG_TS_16_AS 0x300078 -/* [RW 3] The arbitration scheme of time_slot 17 */ -#define USEM_REG_TS_17_AS 0x30007c -/* [RW 3] The arbitration scheme of time_slot 18 */ -#define USEM_REG_TS_18_AS 0x300080 -/* [RW 3] The arbitration scheme of time_slot 1 */ -#define USEM_REG_TS_1_AS 0x30003c -/* [RW 3] The arbitration scheme of time_slot 2 */ -#define USEM_REG_TS_2_AS 0x300040 -/* [RW 3] The arbitration scheme of time_slot 3 */ -#define USEM_REG_TS_3_AS 0x300044 -/* [RW 3] The arbitration scheme of time_slot 4 */ -#define USEM_REG_TS_4_AS 0x300048 -/* [RW 3] The arbitration scheme of time_slot 5 */ -#define USEM_REG_TS_5_AS 0x30004c -/* [RW 3] The arbitration scheme of time_slot 6 */ -#define USEM_REG_TS_6_AS 0x300050 -/* [RW 3] The arbitration scheme of time_slot 7 */ -#define USEM_REG_TS_7_AS 0x300054 -/* [RW 3] The arbitration scheme of time_slot 8 */ -#define USEM_REG_TS_8_AS 0x300058 -/* [RW 3] The arbitration scheme of time_slot 9 */ -#define USEM_REG_TS_9_AS 0x30005c -/* [RW 32] Interrupt mask register #0 read/write */ -#define USEM_REG_USEM_INT_MASK_0 0x300110 -#define USEM_REG_USEM_INT_MASK_1 0x300120 -/* [R 32] Interrupt register #0 read */ -#define USEM_REG_USEM_INT_STS_0 0x300104 -#define USEM_REG_USEM_INT_STS_1 0x300114 -/* [RW 32] Parity mask register #0 read/write */ -#define USEM_REG_USEM_PRTY_MASK_0 0x300130 -#define USEM_REG_USEM_PRTY_MASK_1 0x300140 -/* [R 32] Parity register #0 read */ -#define USEM_REG_USEM_PRTY_STS_0 0x300124 -#define USEM_REG_USEM_PRTY_STS_1 0x300134 -/* [RC 32] Parity register #0 read clear */ -#define USEM_REG_USEM_PRTY_STS_CLR_0 0x300128 -#define USEM_REG_USEM_PRTY_STS_CLR_1 0x300138 -/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64 - * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */ -#define USEM_REG_VFPF_ERR_NUM 0x300380 -#define VFC_MEMORIES_RST_REG_CAM_RST (0x1<<0) -#define VFC_MEMORIES_RST_REG_RAM_RST (0x1<<1) -#define VFC_REG_MEMORIES_RST 0x1943c -/* [RW 32] Indirect access to AG context with 32-bits granularity. The bits - * [12:8] of the address should be the offset within the accessed LCID - * context; the bits [7:0] are the accessed LCID.Example: to write to REG10 - * LCID100. The RBC address should be 13'ha64. */ -#define XCM_REG_AG_CTX 0x28000 -/* [RW 2] The queue index for registration on Aux1 counter flag. */ -#define XCM_REG_AUX1_Q 0x20134 -/* [RW 2] Per each decision rule the queue index to register to. */ -#define XCM_REG_AUX_CNT_FLG_Q_19 0x201b0 -/* [R 5] Used to read the XX protection CAM occupancy counter. */ -#define XCM_REG_CAM_OCCUP 0x20244 -/* [RW 1] CDU AG read Interface enable. If 0 - the request input is - disregarded; valid output is deasserted; all other signals are treated as - usual; if 1 - normal activity. */ -#define XCM_REG_CDU_AG_RD_IFEN 0x20044 -/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input - are disregarded; all other signals are treated as usual; if 1 - normal - activity. */ -#define XCM_REG_CDU_AG_WR_IFEN 0x20040 -/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is - disregarded; valid output is deasserted; all other signals are treated as - usual; if 1 - normal activity. */ -#define XCM_REG_CDU_SM_RD_IFEN 0x2004c -/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid - input is disregarded; all other signals are treated as usual; if 1 - - normal activity. */ -#define XCM_REG_CDU_SM_WR_IFEN 0x20048 -/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes - the initial credit value; read returns the current value of the credit - counter. Must be initialized to 1 at start-up. */ -#define XCM_REG_CFC_INIT_CRD 0x20404 -/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for - weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define XCM_REG_CP_WEIGHT 0x200dc -/* [RW 1] Input csem Interface enable. If 0 - the valid input is - disregarded; acknowledge output is deasserted; all other signals are - treated as usual; if 1 - normal activity. */ -#define XCM_REG_CSEM_IFEN 0x20028 -/* [RC 1] Set at message length mismatch (relative to last indication) at - the csem interface. */ -#define XCM_REG_CSEM_LENGTH_MIS 0x20228 -/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for - weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define XCM_REG_CSEM_WEIGHT 0x200c4 -/* [RW 1] Input dorq Interface enable. If 0 - the valid input is - disregarded; acknowledge output is deasserted; all other signals are - treated as usual; if 1 - normal activity. */ -#define XCM_REG_DORQ_IFEN 0x20030 -/* [RC 1] Set at message length mismatch (relative to last indication) at - the dorq interface. */ -#define XCM_REG_DORQ_LENGTH_MIS 0x20230 -/* [RW 3] The weight of the input dorq in the WRR mechanism. 0 stands for - weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define XCM_REG_DORQ_WEIGHT 0x200cc -/* [RW 8] The Event ID in case the ErrorFlg input message bit is set. */ -#define XCM_REG_ERR_EVNT_ID 0x200b0 -/* [RW 28] The CM erroneous header for QM and Timers formatting. */ -#define XCM_REG_ERR_XCM_HDR 0x200ac -/* [RW 8] The Event ID for Timers expiration. */ -#define XCM_REG_EXPR_EVNT_ID 0x200b4 -/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write - writes the initial credit value; read returns the current value of the - credit counter. Must be initialized to 64 at start-up. */ -#define XCM_REG_FIC0_INIT_CRD 0x2040c -/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write - writes the initial credit value; read returns the current value of the - credit counter. Must be initialized to 64 at start-up. */ -#define XCM_REG_FIC1_INIT_CRD 0x20410 -#define XCM_REG_GLB_DEL_ACK_MAX_CNT_0 0x20118 -#define XCM_REG_GLB_DEL_ACK_MAX_CNT_1 0x2011c -#define XCM_REG_GLB_DEL_ACK_TMR_VAL_0 0x20108 -#define XCM_REG_GLB_DEL_ACK_TMR_VAL_1 0x2010c -/* [RW 1] Arbitratiojn between Input Arbiter groups: 0 - fair Round-Robin; 1 - - strict priority defined by ~xcm_registers_gr_ag_pr.gr_ag_pr; - ~xcm_registers_gr_ld0_pr.gr_ld0_pr and - ~xcm_registers_gr_ld1_pr.gr_ld1_pr. */ -#define XCM_REG_GR_ARB_TYPE 0x2020c -/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the - highest priority is 3. It is supposed that the Channel group is the - compliment of the other 3 groups. */ -#define XCM_REG_GR_LD0_PR 0x20214 -/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the - highest priority is 3. It is supposed that the Channel group is the - compliment of the other 3 groups. */ -#define XCM_REG_GR_LD1_PR 0x20218 -/* [RW 1] Input nig0 Interface enable. If 0 - the valid input is - disregarded; acknowledge output is deasserted; all other signals are - treated as usual; if 1 - normal activity. */ -#define XCM_REG_NIG0_IFEN 0x20038 -/* [RC 1] Set at message length mismatch (relative to last indication) at - the nig0 interface. */ -#define XCM_REG_NIG0_LENGTH_MIS 0x20238 -/* [RW 3] The weight of the input nig0 in the WRR mechanism. 0 stands for - weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define XCM_REG_NIG0_WEIGHT 0x200d4 -/* [RW 1] Input nig1 Interface enable. If 0 - the valid input is - disregarded; acknowledge output is deasserted; all other signals are - treated as usual; if 1 - normal activity. */ -#define XCM_REG_NIG1_IFEN 0x2003c -/* [RC 1] Set at message length mismatch (relative to last indication) at - the nig1 interface. */ -#define XCM_REG_NIG1_LENGTH_MIS 0x2023c -/* [RW 5] The number of double REG-pairs; loaded from the STORM context and - sent to STORM; for a specific connection type. The double REG-pairs are - used in order to align to STORM context row size of 128 bits. The offset - of these data in the STORM context is always 0. Index _i stands for the - connection type (one of 16). */ -#define XCM_REG_N_SM_CTX_LD_0 0x20060 -#define XCM_REG_N_SM_CTX_LD_1 0x20064 -#define XCM_REG_N_SM_CTX_LD_2 0x20068 -#define XCM_REG_N_SM_CTX_LD_3 0x2006c -#define XCM_REG_N_SM_CTX_LD_4 0x20070 -#define XCM_REG_N_SM_CTX_LD_5 0x20074 -/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded; - acknowledge output is deasserted; all other signals are treated as usual; - if 1 - normal activity. */ -#define XCM_REG_PBF_IFEN 0x20034 -/* [RC 1] Set at message length mismatch (relative to last indication) at - the pbf interface. */ -#define XCM_REG_PBF_LENGTH_MIS 0x20234 -/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for - weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define XCM_REG_PBF_WEIGHT 0x200d0 -#define XCM_REG_PHYS_QNUM3_0 0x20100 -#define XCM_REG_PHYS_QNUM3_1 0x20104 -/* [RW 8] The Event ID for Timers formatting in case of stop done. */ -#define XCM_REG_STOP_EVNT_ID 0x200b8 -/* [RC 1] Set at message length mismatch (relative to last indication) at - the STORM interface. */ -#define XCM_REG_STORM_LENGTH_MIS 0x2021c -/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for - weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define XCM_REG_STORM_WEIGHT 0x200bc -/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is - disregarded; acknowledge output is deasserted; all other signals are - treated as usual; if 1 - normal activity. */ -#define XCM_REG_STORM_XCM_IFEN 0x20010 -/* [RW 4] Timers output initial credit. Max credit available - 15.Write - writes the initial credit value; read returns the current value of the - credit counter. Must be initialized to 4 at start-up. */ -#define XCM_REG_TM_INIT_CRD 0x2041c -/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for - weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define XCM_REG_TM_WEIGHT 0x200ec -/* [RW 28] The CM header for Timers expiration command. */ -#define XCM_REG_TM_XCM_HDR 0x200a8 -/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is - disregarded; acknowledge output is deasserted; all other signals are - treated as usual; if 1 - normal activity. */ -#define XCM_REG_TM_XCM_IFEN 0x2001c -/* [RW 1] Input tsem Interface enable. If 0 - the valid input is - disregarded; acknowledge output is deasserted; all other signals are - treated as usual; if 1 - normal activity. */ -#define XCM_REG_TSEM_IFEN 0x20024 -/* [RC 1] Set at message length mismatch (relative to last indication) at - the tsem interface. */ -#define XCM_REG_TSEM_LENGTH_MIS 0x20224 -/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for - weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define XCM_REG_TSEM_WEIGHT 0x200c0 -/* [RW 2] The queue index for registration on UNA greater NXT decision rule. */ -#define XCM_REG_UNA_GT_NXT_Q 0x20120 -/* [RW 1] Input usem Interface enable. If 0 - the valid input is - disregarded; acknowledge output is deasserted; all other signals are - treated as usual; if 1 - normal activity. */ -#define XCM_REG_USEM_IFEN 0x2002c -/* [RC 1] Message length mismatch (relative to last indication) at the usem - interface. */ -#define XCM_REG_USEM_LENGTH_MIS 0x2022c -/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for - weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define XCM_REG_USEM_WEIGHT 0x200c8 -#define XCM_REG_WU_DA_CNT_CMD00 0x201d4 -#define XCM_REG_WU_DA_CNT_CMD01 0x201d8 -#define XCM_REG_WU_DA_CNT_CMD10 0x201dc -#define XCM_REG_WU_DA_CNT_CMD11 0x201e0 -#define XCM_REG_WU_DA_CNT_UPD_VAL00 0x201e4 -#define XCM_REG_WU_DA_CNT_UPD_VAL01 0x201e8 -#define XCM_REG_WU_DA_CNT_UPD_VAL10 0x201ec -#define XCM_REG_WU_DA_CNT_UPD_VAL11 0x201f0 -#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00 0x201c4 -#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01 0x201c8 -#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD10 0x201cc -#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD11 0x201d0 -/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded; - acknowledge output is deasserted; all other signals are treated as usual; - if 1 - normal activity. */ -#define XCM_REG_XCM_CFC_IFEN 0x20050 -/* [RW 14] Interrupt mask register #0 read/write */ -#define XCM_REG_XCM_INT_MASK 0x202b4 -/* [R 14] Interrupt register #0 read */ -#define XCM_REG_XCM_INT_STS 0x202a8 -/* [RW 30] Parity mask register #0 read/write */ -#define XCM_REG_XCM_PRTY_MASK 0x202c4 -/* [R 30] Parity register #0 read */ -#define XCM_REG_XCM_PRTY_STS 0x202b8 -/* [RC 30] Parity register #0 read clear */ -#define XCM_REG_XCM_PRTY_STS_CLR 0x202bc - -/* [RW 4] The size of AG context region 0 in REG-pairs. Designates the MS - REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). - Is used to determine the number of the AG context REG-pairs written back; - when the Reg1WbFlg isn't set. */ -#define XCM_REG_XCM_REG0_SZ 0x200f4 -/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is - disregarded; valid is deasserted; all other signals are treated as usual; - if 1 - normal activity. */ -#define XCM_REG_XCM_STORM0_IFEN 0x20004 -/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is - disregarded; valid is deasserted; all other signals are treated as usual; - if 1 - normal activity. */ -#define XCM_REG_XCM_STORM1_IFEN 0x20008 -/* [RW 1] CM - Timers Interface enable. If 0 - the valid input is - disregarded; acknowledge output is deasserted; all other signals are - treated as usual; if 1 - normal activity. */ -#define XCM_REG_XCM_TM_IFEN 0x20020 -/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is - disregarded; valid is deasserted; all other signals are treated as usual; - if 1 - normal activity. */ -#define XCM_REG_XCM_XQM_IFEN 0x2000c -/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */ -#define XCM_REG_XCM_XQM_USE_Q 0x200f0 -/* [RW 4] The value by which CFC updates the activity counter at QM bypass. */ -#define XCM_REG_XQM_BYP_ACT_UPD 0x200fc -/* [RW 6] QM output initial credit. Max credit available - 32.Write writes - the initial credit value; read returns the current value of the credit - counter. Must be initialized to 32 at start-up. */ -#define XCM_REG_XQM_INIT_CRD 0x20420 -/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0 - stands for weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define XCM_REG_XQM_P_WEIGHT 0x200e4 -/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0 - stands for weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define XCM_REG_XQM_S_WEIGHT 0x200e8 -/* [RW 28] The CM header value for QM request (primary). */ -#define XCM_REG_XQM_XCM_HDR_P 0x200a0 -/* [RW 28] The CM header value for QM request (secondary). */ -#define XCM_REG_XQM_XCM_HDR_S 0x200a4 -/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded; - acknowledge output is deasserted; all other signals are treated as usual; - if 1 - normal activity. */ -#define XCM_REG_XQM_XCM_IFEN 0x20014 -/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded; - acknowledge output is deasserted; all other signals are treated as usual; - if 1 - normal activity. */ -#define XCM_REG_XSDM_IFEN 0x20018 -/* [RC 1] Set at message length mismatch (relative to last indication) at - the SDM interface. */ -#define XCM_REG_XSDM_LENGTH_MIS 0x20220 -/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for - weight 8 (the most prioritised); 1 stands for weight 1(least - prioritised); 2 stands for weight 2; tc. */ -#define XCM_REG_XSDM_WEIGHT 0x200e0 -/* [RW 17] Indirect access to the descriptor table of the XX protection - mechanism. The fields are: [5:0] - message length; 11:6] - message - pointer; 16:12] - next pointer. */ -#define XCM_REG_XX_DESCR_TABLE 0x20480 -#define XCM_REG_XX_DESCR_TABLE_SIZE 32 -/* [R 6] Used to read the XX protection Free counter. */ -#define XCM_REG_XX_FREE 0x20240 -/* [RW 6] Initial value for the credit counter; responsible for fulfilling - of the Input Stage XX protection buffer by the XX protection pending - messages. Max credit available - 3.Write writes the initial credit value; - read returns the current value of the credit counter. Must be initialized - to 2 at start-up. */ -#define XCM_REG_XX_INIT_CRD 0x20424 -/* [RW 6] The maximum number of pending messages; which may be stored in XX - protection. ~xcm_registers_xx_free.xx_free read on read. */ -#define XCM_REG_XX_MSG_NUM 0x20428 -/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */ -#define XCM_REG_XX_OVFL_EVNT_ID 0x20058 -#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0) -#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1) -#define XMAC_CTRL_REG_CORE_LOCAL_LPBK (0x1<<3) -#define XMAC_CTRL_REG_RX_EN (0x1<<1) -#define XMAC_CTRL_REG_SOFT_RESET (0x1<<6) -#define XMAC_CTRL_REG_TX_EN (0x1<<0) -#define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN (0x1<<18) -#define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN (0x1<<17) -#define XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN (0x1<<0) -#define XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN (0x1<<3) -#define XMAC_PFC_CTRL_HI_REG_RX_PFC_EN (0x1<<4) -#define XMAC_PFC_CTRL_HI_REG_TX_PFC_EN (0x1<<5) -#define XMAC_REG_CLEAR_RX_LSS_STATUS 0x60 -#define XMAC_REG_CTRL 0 -/* [RW 16] Upper 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC - * packets transmitted by the MAC */ -#define XMAC_REG_CTRL_SA_HI 0x2c -/* [RW 32] Lower 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC - * packets transmitted by the MAC */ -#define XMAC_REG_CTRL_SA_LO 0x28 -#define XMAC_REG_PAUSE_CTRL 0x68 -#define XMAC_REG_PFC_CTRL 0x70 -#define XMAC_REG_PFC_CTRL_HI 0x74 -#define XMAC_REG_RX_LSS_STATUS 0x58 -/* [RW 14] Maximum packet size in receive direction; exclusive of preamble & - * CRC in strip mode */ -#define XMAC_REG_RX_MAX_SIZE 0x40 -#define XMAC_REG_TX_CTRL 0x20 -/* [RW 16] Indirect access to the XX table of the XX protection mechanism. - The fields are:[4:0] - tail pointer; 9:5] - Link List size; 14:10] - - header pointer. */ -#define XCM_REG_XX_TABLE 0x20500 -/* [RW 8] The event id for aggregated interrupt 0 */ -#define XSDM_REG_AGG_INT_EVENT_0 0x166038 -#define XSDM_REG_AGG_INT_EVENT_1 0x16603c -#define XSDM_REG_AGG_INT_EVENT_10 0x166060 -#define XSDM_REG_AGG_INT_EVENT_11 0x166064 -#define XSDM_REG_AGG_INT_EVENT_12 0x166068 -#define XSDM_REG_AGG_INT_EVENT_13 0x16606c -#define XSDM_REG_AGG_INT_EVENT_14 0x166070 -#define XSDM_REG_AGG_INT_EVENT_2 0x166040 -#define XSDM_REG_AGG_INT_EVENT_3 0x166044 -#define XSDM_REG_AGG_INT_EVENT_4 0x166048 -#define XSDM_REG_AGG_INT_EVENT_5 0x16604c -#define XSDM_REG_AGG_INT_EVENT_6 0x166050 -#define XSDM_REG_AGG_INT_EVENT_7 0x166054 -#define XSDM_REG_AGG_INT_EVENT_8 0x166058 -#define XSDM_REG_AGG_INT_EVENT_9 0x16605c -/* [RW 1] For each aggregated interrupt index whether the mode is normal (0) - or auto-mask-mode (1) */ -#define XSDM_REG_AGG_INT_MODE_0 0x1661b8 -#define XSDM_REG_AGG_INT_MODE_1 0x1661bc -/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */ -#define XSDM_REG_CFC_RSP_START_ADDR 0x166008 -/* [RW 16] The maximum value of the completion counter #0 */ -#define XSDM_REG_CMP_COUNTER_MAX0 0x16601c -/* [RW 16] The maximum value of the completion counter #1 */ -#define XSDM_REG_CMP_COUNTER_MAX1 0x166020 -/* [RW 16] The maximum value of the completion counter #2 */ -#define XSDM_REG_CMP_COUNTER_MAX2 0x166024 -/* [RW 16] The maximum value of the completion counter #3 */ -#define XSDM_REG_CMP_COUNTER_MAX3 0x166028 -/* [RW 13] The start address in the internal RAM for the completion - counters. */ -#define XSDM_REG_CMP_COUNTER_START_ADDR 0x16600c -#define XSDM_REG_ENABLE_IN1 0x166238 -#define XSDM_REG_ENABLE_IN2 0x16623c -#define XSDM_REG_ENABLE_OUT1 0x166240 -#define XSDM_REG_ENABLE_OUT2 0x166244 -/* [RW 4] The initial number of messages that can be sent to the pxp control - interface without receiving any ACK. */ -#define XSDM_REG_INIT_CREDIT_PXP_CTRL 0x1664bc -/* [ST 32] The number of ACK after placement messages received */ -#define XSDM_REG_NUM_OF_ACK_AFTER_PLACE 0x16627c -/* [ST 32] The number of packet end messages received from the parser */ -#define XSDM_REG_NUM_OF_PKT_END_MSG 0x166274 -/* [ST 32] The number of requests received from the pxp async if */ -#define XSDM_REG_NUM_OF_PXP_ASYNC_REQ 0x166278 -/* [ST 32] The number of commands received in queue 0 */ -#define XSDM_REG_NUM_OF_Q0_CMD 0x166248 -/* [ST 32] The number of commands received in queue 10 */ -#define XSDM_REG_NUM_OF_Q10_CMD 0x16626c -/* [ST 32] The number of commands received in queue 11 */ -#define XSDM_REG_NUM_OF_Q11_CMD 0x166270 -/* [ST 32] The number of commands received in queue 1 */ -#define XSDM_REG_NUM_OF_Q1_CMD 0x16624c -/* [ST 32] The number of commands received in queue 3 */ -#define XSDM_REG_NUM_OF_Q3_CMD 0x166250 -/* [ST 32] The number of commands received in queue 4 */ -#define XSDM_REG_NUM_OF_Q4_CMD 0x166254 -/* [ST 32] The number of commands received in queue 5 */ -#define XSDM_REG_NUM_OF_Q5_CMD 0x166258 -/* [ST 32] The number of commands received in queue 6 */ -#define XSDM_REG_NUM_OF_Q6_CMD 0x16625c -/* [ST 32] The number of commands received in queue 7 */ -#define XSDM_REG_NUM_OF_Q7_CMD 0x166260 -/* [ST 32] The number of commands received in queue 8 */ -#define XSDM_REG_NUM_OF_Q8_CMD 0x166264 -/* [ST 32] The number of commands received in queue 9 */ -#define XSDM_REG_NUM_OF_Q9_CMD 0x166268 -/* [RW 13] The start address in the internal RAM for queue counters */ -#define XSDM_REG_Q_COUNTER_START_ADDR 0x166010 -/* [W 17] Generate an operation after completion; bit-16 is - * AggVectIdx_valid; bits 15:8 are AggVectIdx; bits 7:5 are the TRIG and - * bits 4:0 are the T124Param[4:0] */ -#define XSDM_REG_OPERATION_GEN 0x1664c4 -/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */ -#define XSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0x166548 -/* [R 1] parser fifo empty in sdm_sync block */ -#define XSDM_REG_SYNC_PARSER_EMPTY 0x166550 -/* [R 1] parser serial fifo empty in sdm_sync block */ -#define XSDM_REG_SYNC_SYNC_EMPTY 0x166558 -/* [RW 32] Tick for timer counter. Applicable only when - ~xsdm_registers_timer_tick_enable.timer_tick_enable =1 */ -#define XSDM_REG_TIMER_TICK 0x166000 -/* [RW 32] Interrupt mask register #0 read/write */ -#define XSDM_REG_XSDM_INT_MASK_0 0x16629c -#define XSDM_REG_XSDM_INT_MASK_1 0x1662ac -/* [R 32] Interrupt register #0 read */ -#define XSDM_REG_XSDM_INT_STS_0 0x166290 -#define XSDM_REG_XSDM_INT_STS_1 0x1662a0 -/* [RW 11] Parity mask register #0 read/write */ -#define XSDM_REG_XSDM_PRTY_MASK 0x1662bc -/* [R 11] Parity register #0 read */ -#define XSDM_REG_XSDM_PRTY_STS 0x1662b0 -/* [RC 11] Parity register #0 read clear */ -#define XSDM_REG_XSDM_PRTY_STS_CLR 0x1662b4 -/* [RW 5] The number of time_slots in the arbitration cycle */ -#define XSEM_REG_ARB_CYCLE_SIZE 0x280034 -/* [RW 3] The source that is associated with arbitration element 0. Source - decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- - sleeping thread with priority 1; 4- sleeping thread with priority 2 */ -#define XSEM_REG_ARB_ELEMENT0 0x280020 -/* [RW 3] The source that is associated with arbitration element 1. Source - decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- - sleeping thread with priority 1; 4- sleeping thread with priority 2. - Could not be equal to register ~xsem_registers_arb_element0.arb_element0 */ -#define XSEM_REG_ARB_ELEMENT1 0x280024 -/* [RW 3] The source that is associated with arbitration element 2. Source - decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- - sleeping thread with priority 1; 4- sleeping thread with priority 2. - Could not be equal to register ~xsem_registers_arb_element0.arb_element0 - and ~xsem_registers_arb_element1.arb_element1 */ -#define XSEM_REG_ARB_ELEMENT2 0x280028 -/* [RW 3] The source that is associated with arbitration element 3. Source - decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- - sleeping thread with priority 1; 4- sleeping thread with priority 2.Could - not be equal to register ~xsem_registers_arb_element0.arb_element0 and - ~xsem_registers_arb_element1.arb_element1 and - ~xsem_registers_arb_element2.arb_element2 */ -#define XSEM_REG_ARB_ELEMENT3 0x28002c -/* [RW 3] The source that is associated with arbitration element 4. Source - decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- - sleeping thread with priority 1; 4- sleeping thread with priority 2. - Could not be equal to register ~xsem_registers_arb_element0.arb_element0 - and ~xsem_registers_arb_element1.arb_element1 and - ~xsem_registers_arb_element2.arb_element2 and - ~xsem_registers_arb_element3.arb_element3 */ -#define XSEM_REG_ARB_ELEMENT4 0x280030 -#define XSEM_REG_ENABLE_IN 0x2800a4 -#define XSEM_REG_ENABLE_OUT 0x2800a8 -/* [RW 32] This address space contains all registers and memories that are - placed in SEM_FAST block. The SEM_FAST registers are described in - appendix B. In order to access the sem_fast registers the base address - ~fast_memory.fast_memory should be added to eachsem_fast register offset. */ -#define XSEM_REG_FAST_MEMORY 0x2a0000 -/* [RW 1] Disables input messages from FIC0 May be updated during run_time - by the microcode */ -#define XSEM_REG_FIC0_DISABLE 0x280224 -/* [RW 1] Disables input messages from FIC1 May be updated during run_time - by the microcode */ -#define XSEM_REG_FIC1_DISABLE 0x280234 -/* [RW 15] Interrupt table Read and write access to it is not possible in - the middle of the work */ -#define XSEM_REG_INT_TABLE 0x280400 -/* [ST 24] Statistics register. The number of messages that entered through - FIC0 */ -#define XSEM_REG_MSG_NUM_FIC0 0x280000 -/* [ST 24] Statistics register. The number of messages that entered through - FIC1 */ -#define XSEM_REG_MSG_NUM_FIC1 0x280004 -/* [ST 24] Statistics register. The number of messages that were sent to - FOC0 */ -#define XSEM_REG_MSG_NUM_FOC0 0x280008 -/* [ST 24] Statistics register. The number of messages that were sent to - FOC1 */ -#define XSEM_REG_MSG_NUM_FOC1 0x28000c -/* [ST 24] Statistics register. The number of messages that were sent to - FOC2 */ -#define XSEM_REG_MSG_NUM_FOC2 0x280010 -/* [ST 24] Statistics register. The number of messages that were sent to - FOC3 */ -#define XSEM_REG_MSG_NUM_FOC3 0x280014 -/* [RW 1] Disables input messages from the passive buffer May be updated - during run_time by the microcode */ -#define XSEM_REG_PAS_DISABLE 0x28024c -/* [WB 128] Debug only. Passive buffer memory */ -#define XSEM_REG_PASSIVE_BUFFER 0x282000 -/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */ -#define XSEM_REG_PRAM 0x2c0000 -/* [R 16] Valid sleeping threads indication have bit per thread */ -#define XSEM_REG_SLEEP_THREADS_VALID 0x28026c -/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */ -#define XSEM_REG_SLOW_EXT_STORE_EMPTY 0x2802a0 -/* [RW 16] List of free threads . There is a bit per thread. */ -#define XSEM_REG_THREADS_LIST 0x2802e4 -/* [RW 3] The arbitration scheme of time_slot 0 */ -#define XSEM_REG_TS_0_AS 0x280038 -/* [RW 3] The arbitration scheme of time_slot 10 */ -#define XSEM_REG_TS_10_AS 0x280060 -/* [RW 3] The arbitration scheme of time_slot 11 */ -#define XSEM_REG_TS_11_AS 0x280064 -/* [RW 3] The arbitration scheme of time_slot 12 */ -#define XSEM_REG_TS_12_AS 0x280068 -/* [RW 3] The arbitration scheme of time_slot 13 */ -#define XSEM_REG_TS_13_AS 0x28006c -/* [RW 3] The arbitration scheme of time_slot 14 */ -#define XSEM_REG_TS_14_AS 0x280070 -/* [RW 3] The arbitration scheme of time_slot 15 */ -#define XSEM_REG_TS_15_AS 0x280074 -/* [RW 3] The arbitration scheme of time_slot 16 */ -#define XSEM_REG_TS_16_AS 0x280078 -/* [RW 3] The arbitration scheme of time_slot 17 */ -#define XSEM_REG_TS_17_AS 0x28007c -/* [RW 3] The arbitration scheme of time_slot 18 */ -#define XSEM_REG_TS_18_AS 0x280080 -/* [RW 3] The arbitration scheme of time_slot 1 */ -#define XSEM_REG_TS_1_AS 0x28003c -/* [RW 3] The arbitration scheme of time_slot 2 */ -#define XSEM_REG_TS_2_AS 0x280040 -/* [RW 3] The arbitration scheme of time_slot 3 */ -#define XSEM_REG_TS_3_AS 0x280044 -/* [RW 3] The arbitration scheme of time_slot 4 */ -#define XSEM_REG_TS_4_AS 0x280048 -/* [RW 3] The arbitration scheme of time_slot 5 */ -#define XSEM_REG_TS_5_AS 0x28004c -/* [RW 3] The arbitration scheme of time_slot 6 */ -#define XSEM_REG_TS_6_AS 0x280050 -/* [RW 3] The arbitration scheme of time_slot 7 */ -#define XSEM_REG_TS_7_AS 0x280054 -/* [RW 3] The arbitration scheme of time_slot 8 */ -#define XSEM_REG_TS_8_AS 0x280058 -/* [RW 3] The arbitration scheme of time_slot 9 */ -#define XSEM_REG_TS_9_AS 0x28005c -/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64 - * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */ -#define XSEM_REG_VFPF_ERR_NUM 0x280380 -/* [RW 32] Interrupt mask register #0 read/write */ -#define XSEM_REG_XSEM_INT_MASK_0 0x280110 -#define XSEM_REG_XSEM_INT_MASK_1 0x280120 -/* [R 32] Interrupt register #0 read */ -#define XSEM_REG_XSEM_INT_STS_0 0x280104 -#define XSEM_REG_XSEM_INT_STS_1 0x280114 -/* [RW 32] Parity mask register #0 read/write */ -#define XSEM_REG_XSEM_PRTY_MASK_0 0x280130 -#define XSEM_REG_XSEM_PRTY_MASK_1 0x280140 -/* [R 32] Parity register #0 read */ -#define XSEM_REG_XSEM_PRTY_STS_0 0x280124 -#define XSEM_REG_XSEM_PRTY_STS_1 0x280134 -/* [RC 32] Parity register #0 read clear */ -#define XSEM_REG_XSEM_PRTY_STS_CLR_0 0x280128 -#define XSEM_REG_XSEM_PRTY_STS_CLR_1 0x280138 -#define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0) -#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1) -#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0) -#define MCPR_NVM_CFG4_FLASH_SIZE (0x7L<<0) -#define MCPR_NVM_COMMAND_DOIT (1L<<4) -#define MCPR_NVM_COMMAND_DONE (1L<<3) -#define MCPR_NVM_COMMAND_FIRST (1L<<7) -#define MCPR_NVM_COMMAND_LAST (1L<<8) -#define MCPR_NVM_COMMAND_WR (1L<<5) -#define MCPR_NVM_SW_ARB_ARB_ARB1 (1L<<9) -#define MCPR_NVM_SW_ARB_ARB_REQ_CLR1 (1L<<5) -#define MCPR_NVM_SW_ARB_ARB_REQ_SET1 (1L<<1) -#define BIGMAC_REGISTER_BMAC_CONTROL (0x00<<3) -#define BIGMAC_REGISTER_BMAC_XGXS_CONTROL (0x01<<3) -#define BIGMAC_REGISTER_CNT_MAX_SIZE (0x05<<3) -#define BIGMAC_REGISTER_RX_CONTROL (0x21<<3) -#define BIGMAC_REGISTER_RX_LLFC_MSG_FLDS (0x46<<3) -#define BIGMAC_REGISTER_RX_LSS_STATUS (0x43<<3) -#define BIGMAC_REGISTER_RX_MAX_SIZE (0x23<<3) -#define BIGMAC_REGISTER_RX_STAT_GR64 (0x26<<3) -#define BIGMAC_REGISTER_RX_STAT_GRIPJ (0x42<<3) -#define BIGMAC_REGISTER_TX_CONTROL (0x07<<3) -#define BIGMAC_REGISTER_TX_MAX_SIZE (0x09<<3) -#define BIGMAC_REGISTER_TX_PAUSE_THRESHOLD (0x0A<<3) -#define BIGMAC_REGISTER_TX_SOURCE_ADDR (0x08<<3) -#define BIGMAC_REGISTER_TX_STAT_GTBYT (0x20<<3) -#define BIGMAC_REGISTER_TX_STAT_GTPKT (0x0C<<3) -#define BIGMAC2_REGISTER_BMAC_CONTROL (0x00<<3) -#define BIGMAC2_REGISTER_BMAC_XGXS_CONTROL (0x01<<3) -#define BIGMAC2_REGISTER_CNT_MAX_SIZE (0x05<<3) -#define BIGMAC2_REGISTER_PFC_CONTROL (0x06<<3) -#define BIGMAC2_REGISTER_RX_CONTROL (0x3A<<3) -#define BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS (0x62<<3) -#define BIGMAC2_REGISTER_RX_LSS_STAT (0x3E<<3) -#define BIGMAC2_REGISTER_RX_MAX_SIZE (0x3C<<3) -#define BIGMAC2_REGISTER_RX_STAT_GR64 (0x40<<3) -#define BIGMAC2_REGISTER_RX_STAT_GRIPJ (0x5f<<3) -#define BIGMAC2_REGISTER_RX_STAT_GRPP (0x51<<3) -#define BIGMAC2_REGISTER_TX_CONTROL (0x1C<<3) -#define BIGMAC2_REGISTER_TX_MAX_SIZE (0x1E<<3) -#define BIGMAC2_REGISTER_TX_PAUSE_CONTROL (0x20<<3) -#define BIGMAC2_REGISTER_TX_SOURCE_ADDR (0x1D<<3) -#define BIGMAC2_REGISTER_TX_STAT_GTBYT (0x39<<3) -#define BIGMAC2_REGISTER_TX_STAT_GTPOK (0x22<<3) -#define BIGMAC2_REGISTER_TX_STAT_GTPP (0x24<<3) -#define EMAC_LED_1000MB_OVERRIDE (1L<<1) -#define EMAC_LED_100MB_OVERRIDE (1L<<2) -#define EMAC_LED_10MB_OVERRIDE (1L<<3) -#define EMAC_LED_2500MB_OVERRIDE (1L<<12) -#define EMAC_LED_OVERRIDE (1L<<0) -#define EMAC_LED_TRAFFIC (1L<<6) -#define EMAC_MDIO_COMM_COMMAND_ADDRESS (0L<<26) -#define EMAC_MDIO_COMM_COMMAND_READ_22 (2L<<26) -#define EMAC_MDIO_COMM_COMMAND_READ_45 (3L<<26) -#define EMAC_MDIO_COMM_COMMAND_WRITE_22 (1L<<26) -#define EMAC_MDIO_COMM_COMMAND_WRITE_45 (1L<<26) -#define EMAC_MDIO_COMM_DATA (0xffffL<<0) -#define EMAC_MDIO_COMM_START_BUSY (1L<<29) -#define EMAC_MDIO_MODE_AUTO_POLL (1L<<4) -#define EMAC_MDIO_MODE_CLAUSE_45 (1L<<31) -#define EMAC_MDIO_MODE_CLOCK_CNT (0x3ffL<<16) -#define EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT 16 -#define EMAC_MDIO_STATUS_10MB (1L<<1) -#define EMAC_MODE_25G_MODE (1L<<5) -#define EMAC_MODE_HALF_DUPLEX (1L<<1) -#define EMAC_MODE_PORT_GMII (2L<<2) -#define EMAC_MODE_PORT_MII (1L<<2) -#define EMAC_MODE_PORT_MII_10M (3L<<2) -#define EMAC_MODE_RESET (1L<<0) -#define EMAC_REG_EMAC_LED 0xc -#define EMAC_REG_EMAC_MAC_MATCH 0x10 -#define EMAC_REG_EMAC_MDIO_COMM 0xac -#define EMAC_REG_EMAC_MDIO_MODE 0xb4 -#define EMAC_REG_EMAC_MDIO_STATUS 0xb0 -#define EMAC_REG_EMAC_MODE 0x0 -#define EMAC_REG_EMAC_RX_MODE 0xc8 -#define EMAC_REG_EMAC_RX_MTU_SIZE 0x9c -#define EMAC_REG_EMAC_RX_STAT_AC 0x180 -#define EMAC_REG_EMAC_RX_STAT_AC_28 0x1f4 -#define EMAC_REG_EMAC_RX_STAT_AC_COUNT 23 -#define EMAC_REG_EMAC_TX_MODE 0xbc -#define EMAC_REG_EMAC_TX_STAT_AC 0x280 -#define EMAC_REG_EMAC_TX_STAT_AC_COUNT 22 -#define EMAC_REG_RX_PFC_MODE 0x320 -#define EMAC_REG_RX_PFC_MODE_PRIORITIES (1L<<2) -#define EMAC_REG_RX_PFC_MODE_RX_EN (1L<<1) -#define EMAC_REG_RX_PFC_MODE_TX_EN (1L<<0) -#define EMAC_REG_RX_PFC_PARAM 0x324 -#define EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT 0 -#define EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT 16 -#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD 0x328 -#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT (0xffff<<0) -#define EMAC_REG_RX_PFC_STATS_XOFF_SENT 0x330 -#define EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT (0xffff<<0) -#define EMAC_REG_RX_PFC_STATS_XON_RCVD 0x32c -#define EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT (0xffff<<0) -#define EMAC_REG_RX_PFC_STATS_XON_SENT 0x334 -#define EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT (0xffff<<0) -#define EMAC_RX_MODE_FLOW_EN (1L<<2) -#define EMAC_RX_MODE_KEEP_MAC_CONTROL (1L<<3) -#define EMAC_RX_MODE_KEEP_VLAN_TAG (1L<<10) -#define EMAC_RX_MODE_PROMISCUOUS (1L<<8) -#define EMAC_RX_MODE_RESET (1L<<0) -#define EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31) -#define EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3) -#define EMAC_TX_MODE_FLOW_EN (1L<<4) -#define EMAC_TX_MODE_RESET (1L<<0) -#define MISC_REGISTERS_GPIO_0 0 -#define MISC_REGISTERS_GPIO_1 1 -#define MISC_REGISTERS_GPIO_2 2 -#define MISC_REGISTERS_GPIO_3 3 -#define MISC_REGISTERS_GPIO_CLR_POS 16 -#define MISC_REGISTERS_GPIO_FLOAT (0xffL<<24) -#define MISC_REGISTERS_GPIO_FLOAT_POS 24 -#define MISC_REGISTERS_GPIO_HIGH 1 -#define MISC_REGISTERS_GPIO_INPUT_HI_Z 2 -#define MISC_REGISTERS_GPIO_INT_CLR_POS 24 -#define MISC_REGISTERS_GPIO_INT_OUTPUT_CLR 0 -#define MISC_REGISTERS_GPIO_INT_OUTPUT_SET 1 -#define MISC_REGISTERS_GPIO_INT_SET_POS 16 -#define MISC_REGISTERS_GPIO_LOW 0 -#define MISC_REGISTERS_GPIO_OUTPUT_HIGH 1 -#define MISC_REGISTERS_GPIO_OUTPUT_LOW 0 -#define MISC_REGISTERS_GPIO_PORT_SHIFT 4 -#define MISC_REGISTERS_GPIO_SET_POS 8 -#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588 -#define MISC_REGISTERS_RESET_REG_1_RST_HC (0x1<<29) -#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7) -#define MISC_REGISTERS_RESET_REG_1_RST_PXP (0x1<<26) -#define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27) -#define MISC_REGISTERS_RESET_REG_1_SET 0x584 -#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598 -#define MISC_REGISTERS_RESET_REG_2_MSTAT0 (0x1<<24) -#define MISC_REGISTERS_RESET_REG_2_MSTAT1 (0x1<<25) -#define MISC_REGISTERS_RESET_REG_2_PGLC (0x1<<19) -#define MISC_REGISTERS_RESET_REG_2_RST_ATC (0x1<<17) -#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0) -#define MISC_REGISTERS_RESET_REG_2_RST_BMAC1 (0x1<<1) -#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0 (0x1<<2) -#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE (0x1<<14) -#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1 (0x1<<3) -#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE (0x1<<15) -#define MISC_REGISTERS_RESET_REG_2_RST_GRC (0x1<<4) -#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B (0x1<<6) -#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE (0x1<<8) -#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU (0x1<<7) -#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE (0x1<<5) -#define MISC_REGISTERS_RESET_REG_2_RST_MDIO (0x1<<13) -#define MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE (0x1<<11) -#define MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO (0x1<<13) -#define MISC_REGISTERS_RESET_REG_2_RST_RBCN (0x1<<9) -#define MISC_REGISTERS_RESET_REG_2_SET 0x594 -#define MISC_REGISTERS_RESET_REG_2_UMAC0 (0x1<<20) -#define MISC_REGISTERS_RESET_REG_2_UMAC1 (0x1<<21) -#define MISC_REGISTERS_RESET_REG_2_XMAC (0x1<<22) -#define MISC_REGISTERS_RESET_REG_2_XMAC_SOFT (0x1<<23) -#define MISC_REGISTERS_RESET_REG_3_CLEAR 0x5a8 -#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ (0x1<<1) -#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN (0x1<<2) -#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD (0x1<<3) -#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW (0x1<<0) -#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ (0x1<<5) -#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN (0x1<<6) -#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD (0x1<<7) -#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW (0x1<<4) -#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB (0x1<<8) -#define MISC_REGISTERS_RESET_REG_3_SET 0x5a4 -#define MISC_REGISTERS_SPIO_4 4 -#define MISC_REGISTERS_SPIO_5 5 -#define MISC_REGISTERS_SPIO_7 7 -#define MISC_REGISTERS_SPIO_CLR_POS 16 -#define MISC_REGISTERS_SPIO_FLOAT (0xffL<<24) -#define MISC_REGISTERS_SPIO_FLOAT_POS 24 -#define MISC_REGISTERS_SPIO_INPUT_HI_Z 2 -#define MISC_REGISTERS_SPIO_INT_OLD_SET_POS 16 -#define MISC_REGISTERS_SPIO_OUTPUT_HIGH 1 -#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0 -#define MISC_REGISTERS_SPIO_SET_POS 8 -#define HW_LOCK_DRV_FLAGS 10 -#define HW_LOCK_MAX_RESOURCE_VALUE 31 -#define HW_LOCK_RESOURCE_GPIO 1 -#define HW_LOCK_RESOURCE_MDIO 0 -#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3 -#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8 -#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9 -#define HW_LOCK_RESOURCE_SPIO 2 -#define HW_LOCK_RESOURCE_UNDI 5 -#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4) -#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5) -#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18) -#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (0x1<<31) -#define AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR (0x1<<30) -#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (0x1<<9) -#define AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR (0x1<<8) -#define AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT (0x1<<7) -#define AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR (0x1<<6) -#define AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT (0x1<<29) -#define AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR (0x1<<28) -#define AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT (0x1<<1) -#define AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR (0x1<<0) -#define AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR (0x1<<18) -#define AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT (0x1<<11) -#define AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR (0x1<<10) -#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT (0x1<<13) -#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR (0x1<<12) -#define AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 (0x1<<2) -#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (0x1<<12) -#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (0x1<<28) -#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (0x1<<31) -#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (0x1<<29) -#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (0x1<<30) -#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (0x1<<15) -#define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR (0x1<<14) -#define AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR (0x1<<14) -#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (0x1<<20) -#define AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT (0x1<<31) -#define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR (0x1<<30) -#define AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR (0x1<<0) -#define AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT (0x1<<2) -#define AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR (0x1<<3) -#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT (0x1<<5) -#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR (0x1<<4) -#define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT (0x1<<3) -#define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR (0x1<<2) -#define AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT (0x1<<3) -#define AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR (0x1<<2) -#define AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR (0x1<<22) -#define AEU_INPUTS_ATTN_BITS_SPIO5 (0x1<<15) -#define AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT (0x1<<27) -#define AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR (0x1<<26) -#define AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT (0x1<<5) -#define AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR (0x1<<4) -#define AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT (0x1<<25) -#define AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR (0x1<<24) -#define AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT (0x1<<29) -#define AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR (0x1<<28) -#define AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT (0x1<<23) -#define AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR (0x1<<22) -#define AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT (0x1<<27) -#define AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR (0x1<<26) -#define AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT (0x1<<21) -#define AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR (0x1<<20) -#define AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT (0x1<<25) -#define AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR (0x1<<24) -#define AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR (0x1<<16) -#define AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT (0x1<<9) -#define AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR (0x1<<8) -#define AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT (0x1<<7) -#define AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR (0x1<<6) -#define AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT (0x1<<11) -#define AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR (0x1<<10) - -#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 (0x1<<5) -#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 (0x1<<9) - -#define RESERVED_GENERAL_ATTENTION_BIT_0 0 - -#define EVEREST_GEN_ATTN_IN_USE_MASK 0x7ffe0 -#define EVEREST_LATCHED_ATTN_IN_USE_MASK 0xffe00000 - -#define RESERVED_GENERAL_ATTENTION_BIT_6 6 -#define RESERVED_GENERAL_ATTENTION_BIT_7 7 -#define RESERVED_GENERAL_ATTENTION_BIT_8 8 -#define RESERVED_GENERAL_ATTENTION_BIT_9 9 -#define RESERVED_GENERAL_ATTENTION_BIT_10 10 -#define RESERVED_GENERAL_ATTENTION_BIT_11 11 -#define RESERVED_GENERAL_ATTENTION_BIT_12 12 -#define RESERVED_GENERAL_ATTENTION_BIT_13 13 -#define RESERVED_GENERAL_ATTENTION_BIT_14 14 -#define RESERVED_GENERAL_ATTENTION_BIT_15 15 -#define RESERVED_GENERAL_ATTENTION_BIT_16 16 -#define RESERVED_GENERAL_ATTENTION_BIT_17 17 -#define RESERVED_GENERAL_ATTENTION_BIT_18 18 -#define RESERVED_GENERAL_ATTENTION_BIT_19 19 -#define RESERVED_GENERAL_ATTENTION_BIT_20 20 -#define RESERVED_GENERAL_ATTENTION_BIT_21 21 - -/* storm asserts attention bits */ -#define TSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_7 -#define USTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_8 -#define CSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_9 -#define XSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_10 - -/* mcp error attention bit */ -#define MCP_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_11 - -/*E1H NIG status sync attention mapped to group 4-7*/ -#define LINK_SYNC_ATTENTION_BIT_FUNC_0 RESERVED_GENERAL_ATTENTION_BIT_12 -#define LINK_SYNC_ATTENTION_BIT_FUNC_1 RESERVED_GENERAL_ATTENTION_BIT_13 -#define LINK_SYNC_ATTENTION_BIT_FUNC_2 RESERVED_GENERAL_ATTENTION_BIT_14 -#define LINK_SYNC_ATTENTION_BIT_FUNC_3 RESERVED_GENERAL_ATTENTION_BIT_15 -#define LINK_SYNC_ATTENTION_BIT_FUNC_4 RESERVED_GENERAL_ATTENTION_BIT_16 -#define LINK_SYNC_ATTENTION_BIT_FUNC_5 RESERVED_GENERAL_ATTENTION_BIT_17 -#define LINK_SYNC_ATTENTION_BIT_FUNC_6 RESERVED_GENERAL_ATTENTION_BIT_18 -#define LINK_SYNC_ATTENTION_BIT_FUNC_7 RESERVED_GENERAL_ATTENTION_BIT_19 - - -#define LATCHED_ATTN_RBCR 23 -#define LATCHED_ATTN_RBCT 24 -#define LATCHED_ATTN_RBCN 25 -#define LATCHED_ATTN_RBCU 26 -#define LATCHED_ATTN_RBCP 27 -#define LATCHED_ATTN_TIMEOUT_GRC 28 -#define LATCHED_ATTN_RSVD_GRC 29 -#define LATCHED_ATTN_ROM_PARITY_MCP 30 -#define LATCHED_ATTN_UM_RX_PARITY_MCP 31 -#define LATCHED_ATTN_UM_TX_PARITY_MCP 32 -#define LATCHED_ATTN_SCPAD_PARITY_MCP 33 - -#define GENERAL_ATTEN_WORD(atten_name) ((94 + atten_name) / 32) -#define GENERAL_ATTEN_OFFSET(atten_name)\ - (1UL << ((94 + atten_name) % 32)) -/* - * This file defines GRC base address for every block. - * This file is included by chipsim, asm microcode and cpp microcode. - * These values are used in Design.xml on regBase attribute - * Use the base with the generated offsets of specific registers. - */ - -#define GRCBASE_PXPCS 0x000000 -#define GRCBASE_PCICONFIG 0x002000 -#define GRCBASE_PCIREG 0x002400 -#define GRCBASE_EMAC0 0x008000 -#define GRCBASE_EMAC1 0x008400 -#define GRCBASE_DBU 0x008800 -#define GRCBASE_MISC 0x00A000 -#define GRCBASE_DBG 0x00C000 -#define GRCBASE_NIG 0x010000 -#define GRCBASE_XCM 0x020000 -#define GRCBASE_PRS 0x040000 -#define GRCBASE_SRCH 0x040400 -#define GRCBASE_TSDM 0x042000 -#define GRCBASE_TCM 0x050000 -#define GRCBASE_BRB1 0x060000 -#define GRCBASE_MCP 0x080000 -#define GRCBASE_UPB 0x0C1000 -#define GRCBASE_CSDM 0x0C2000 -#define GRCBASE_USDM 0x0C4000 -#define GRCBASE_CCM 0x0D0000 -#define GRCBASE_UCM 0x0E0000 -#define GRCBASE_CDU 0x101000 -#define GRCBASE_DMAE 0x102000 -#define GRCBASE_PXP 0x103000 -#define GRCBASE_CFC 0x104000 -#define GRCBASE_HC 0x108000 -#define GRCBASE_PXP2 0x120000 -#define GRCBASE_PBF 0x140000 -#define GRCBASE_UMAC0 0x160000 -#define GRCBASE_UMAC1 0x160400 -#define GRCBASE_XPB 0x161000 -#define GRCBASE_MSTAT0 0x162000 -#define GRCBASE_MSTAT1 0x162800 -#define GRCBASE_XMAC0 0x163000 -#define GRCBASE_XMAC1 0x163800 -#define GRCBASE_TIMERS 0x164000 -#define GRCBASE_XSDM 0x166000 -#define GRCBASE_QM 0x168000 -#define GRCBASE_DQ 0x170000 -#define GRCBASE_TSEM 0x180000 -#define GRCBASE_CSEM 0x200000 -#define GRCBASE_XSEM 0x280000 -#define GRCBASE_USEM 0x300000 -#define GRCBASE_MISC_AEU GRCBASE_MISC - - -/* offset of configuration space in the pci core register */ -#define PCICFG_OFFSET 0x2000 -#define PCICFG_VENDOR_ID_OFFSET 0x00 -#define PCICFG_DEVICE_ID_OFFSET 0x02 -#define PCICFG_COMMAND_OFFSET 0x04 -#define PCICFG_COMMAND_IO_SPACE (1<<0) -#define PCICFG_COMMAND_MEM_SPACE (1<<1) -#define PCICFG_COMMAND_BUS_MASTER (1<<2) -#define PCICFG_COMMAND_SPECIAL_CYCLES (1<<3) -#define PCICFG_COMMAND_MWI_CYCLES (1<<4) -#define PCICFG_COMMAND_VGA_SNOOP (1<<5) -#define PCICFG_COMMAND_PERR_ENA (1<<6) -#define PCICFG_COMMAND_STEPPING (1<<7) -#define PCICFG_COMMAND_SERR_ENA (1<<8) -#define PCICFG_COMMAND_FAST_B2B (1<<9) -#define PCICFG_COMMAND_INT_DISABLE (1<<10) -#define PCICFG_COMMAND_RESERVED (0x1f<<11) -#define PCICFG_STATUS_OFFSET 0x06 -#define PCICFG_REVESION_ID_OFFSET 0x08 -#define PCICFG_CACHE_LINE_SIZE 0x0c -#define PCICFG_LATENCY_TIMER 0x0d -#define PCICFG_BAR_1_LOW 0x10 -#define PCICFG_BAR_1_HIGH 0x14 -#define PCICFG_BAR_2_LOW 0x18 -#define PCICFG_BAR_2_HIGH 0x1c -#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c -#define PCICFG_SUBSYSTEM_ID_OFFSET 0x2e -#define PCICFG_INT_LINE 0x3c -#define PCICFG_INT_PIN 0x3d -#define PCICFG_PM_CAPABILITY 0x48 -#define PCICFG_PM_CAPABILITY_VERSION (0x3<<16) -#define PCICFG_PM_CAPABILITY_CLOCK (1<<19) -#define PCICFG_PM_CAPABILITY_RESERVED (1<<20) -#define PCICFG_PM_CAPABILITY_DSI (1<<21) -#define PCICFG_PM_CAPABILITY_AUX_CURRENT (0x7<<22) -#define PCICFG_PM_CAPABILITY_D1_SUPPORT (1<<25) -#define PCICFG_PM_CAPABILITY_D2_SUPPORT (1<<26) -#define PCICFG_PM_CAPABILITY_PME_IN_D0 (1<<27) -#define PCICFG_PM_CAPABILITY_PME_IN_D1 (1<<28) -#define PCICFG_PM_CAPABILITY_PME_IN_D2 (1<<29) -#define PCICFG_PM_CAPABILITY_PME_IN_D3_HOT (1<<30) -#define PCICFG_PM_CAPABILITY_PME_IN_D3_COLD (1<<31) -#define PCICFG_PM_CSR_OFFSET 0x4c -#define PCICFG_PM_CSR_STATE (0x3<<0) -#define PCICFG_PM_CSR_PME_ENABLE (1<<8) -#define PCICFG_PM_CSR_PME_STATUS (1<<15) -#define PCICFG_MSI_CAP_ID_OFFSET 0x58 -#define PCICFG_MSI_CONTROL_ENABLE (0x1<<16) -#define PCICFG_MSI_CONTROL_MCAP (0x7<<17) -#define PCICFG_MSI_CONTROL_MENA (0x7<<20) -#define PCICFG_MSI_CONTROL_64_BIT_ADDR_CAP (0x1<<23) -#define PCICFG_MSI_CONTROL_MSI_PVMASK_CAPABLE (0x1<<24) -#define PCICFG_GRC_ADDRESS 0x78 -#define PCICFG_GRC_DATA 0x80 -#define PCICFG_MSIX_CAP_ID_OFFSET 0xa0 -#define PCICFG_MSIX_CONTROL_TABLE_SIZE (0x7ff<<16) -#define PCICFG_MSIX_CONTROL_RESERVED (0x7<<27) -#define PCICFG_MSIX_CONTROL_FUNC_MASK (0x1<<30) -#define PCICFG_MSIX_CONTROL_MSIX_ENABLE (0x1<<31) - -#define PCICFG_DEVICE_CONTROL 0xb4 -#define PCICFG_DEVICE_STATUS 0xb6 -#define PCICFG_DEVICE_STATUS_CORR_ERR_DET (1<<0) -#define PCICFG_DEVICE_STATUS_NON_FATAL_ERR_DET (1<<1) -#define PCICFG_DEVICE_STATUS_FATAL_ERR_DET (1<<2) -#define PCICFG_DEVICE_STATUS_UNSUP_REQ_DET (1<<3) -#define PCICFG_DEVICE_STATUS_AUX_PWR_DET (1<<4) -#define PCICFG_DEVICE_STATUS_NO_PEND (1<<5) -#define PCICFG_LINK_CONTROL 0xbc - - -#define BAR_USTRORM_INTMEM 0x400000 -#define BAR_CSTRORM_INTMEM 0x410000 -#define BAR_XSTRORM_INTMEM 0x420000 -#define BAR_TSTRORM_INTMEM 0x430000 - -/* for accessing the IGU in case of status block ACK */ -#define BAR_IGU_INTMEM 0x440000 - -#define BAR_DOORBELL_OFFSET 0x800000 - -#define BAR_ME_REGISTER 0x450000 - -/* config_2 offset */ -#define GRC_CONFIG_2_SIZE_REG 0x408 -#define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0) -#define PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0) -#define PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0) -#define PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0) -#define PCI_CONFIG_2_BAR1_SIZE_256K (3L<<0) -#define PCI_CONFIG_2_BAR1_SIZE_512K (4L<<0) -#define PCI_CONFIG_2_BAR1_SIZE_1M (5L<<0) -#define PCI_CONFIG_2_BAR1_SIZE_2M (6L<<0) -#define PCI_CONFIG_2_BAR1_SIZE_4M (7L<<0) -#define PCI_CONFIG_2_BAR1_SIZE_8M (8L<<0) -#define PCI_CONFIG_2_BAR1_SIZE_16M (9L<<0) -#define PCI_CONFIG_2_BAR1_SIZE_32M (10L<<0) -#define PCI_CONFIG_2_BAR1_SIZE_64M (11L<<0) -#define PCI_CONFIG_2_BAR1_SIZE_128M (12L<<0) -#define PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0) -#define PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0) -#define PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0) -#define PCI_CONFIG_2_BAR1_64ENA (1L<<4) -#define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5) -#define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6) -#define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7) -#define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8) -#define PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8) -#define PCI_CONFIG_2_EXP_ROM_SIZE_2K (1L<<8) -#define PCI_CONFIG_2_EXP_ROM_SIZE_4K (2L<<8) -#define PCI_CONFIG_2_EXP_ROM_SIZE_8K (3L<<8) -#define PCI_CONFIG_2_EXP_ROM_SIZE_16K (4L<<8) -#define PCI_CONFIG_2_EXP_ROM_SIZE_32K (5L<<8) -#define PCI_CONFIG_2_EXP_ROM_SIZE_64K (6L<<8) -#define PCI_CONFIG_2_EXP_ROM_SIZE_128K (7L<<8) -#define PCI_CONFIG_2_EXP_ROM_SIZE_256K (8L<<8) -#define PCI_CONFIG_2_EXP_ROM_SIZE_512K (9L<<8) -#define PCI_CONFIG_2_EXP_ROM_SIZE_1M (10L<<8) -#define PCI_CONFIG_2_EXP_ROM_SIZE_2M (11L<<8) -#define PCI_CONFIG_2_EXP_ROM_SIZE_4M (12L<<8) -#define PCI_CONFIG_2_EXP_ROM_SIZE_8M (13L<<8) -#define PCI_CONFIG_2_EXP_ROM_SIZE_16M (14L<<8) -#define PCI_CONFIG_2_EXP_ROM_SIZE_32M (15L<<8) -#define PCI_CONFIG_2_BAR_PREFETCH (1L<<16) -#define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17) - -/* config_3 offset */ -#define GRC_CONFIG_3_SIZE_REG 0x40c -#define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0) -#define PCI_CONFIG_3_FORCE_PME (1L<<24) -#define PCI_CONFIG_3_PME_STATUS (1L<<25) -#define PCI_CONFIG_3_PME_ENABLE (1L<<26) -#define PCI_CONFIG_3_PM_STATE (0x3L<<27) -#define PCI_CONFIG_3_VAUX_PRESET (1L<<30) -#define PCI_CONFIG_3_PCI_POWER (1L<<31) - -#define GRC_BAR2_CONFIG 0x4e0 -#define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0) -#define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0) -#define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0) -#define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0) -#define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0) -#define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0) -#define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0) -#define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0) -#define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0) -#define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0) -#define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0) -#define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0) -#define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0) -#define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0) -#define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0) -#define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0) -#define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0) -#define PCI_CONFIG_2_BAR2_64ENA (1L<<4) - -#define PCI_PM_DATA_A 0x410 -#define PCI_PM_DATA_B 0x414 -#define PCI_ID_VAL1 0x434 -#define PCI_ID_VAL2 0x438 - -#define PXPCS_TL_CONTROL_5 0x814 -#define PXPCS_TL_CONTROL_5_UNKNOWNTYPE_ERR_ATTN (1 << 29) /*WC*/ -#define PXPCS_TL_CONTROL_5_BOUNDARY4K_ERR_ATTN (1 << 28) /*WC*/ -#define PXPCS_TL_CONTROL_5_MRRS_ERR_ATTN (1 << 27) /*WC*/ -#define PXPCS_TL_CONTROL_5_MPS_ERR_ATTN (1 << 26) /*WC*/ -#define PXPCS_TL_CONTROL_5_TTX_BRIDGE_FORWARD_ERR (1 << 25) /*WC*/ -#define PXPCS_TL_CONTROL_5_TTX_TXINTF_OVERFLOW (1 << 24) /*WC*/ -#define PXPCS_TL_CONTROL_5_PHY_ERR_ATTN (1 << 23) /*RO*/ -#define PXPCS_TL_CONTROL_5_DL_ERR_ATTN (1 << 22) /*RO*/ -#define PXPCS_TL_CONTROL_5_TTX_ERR_NP_TAG_IN_USE (1 << 21) /*WC*/ -#define PXPCS_TL_CONTROL_5_TRX_ERR_UNEXP_RTAG (1 << 20) /*WC*/ -#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT1 (1 << 19) /*WC*/ -#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 (1 << 18) /*WC*/ -#define PXPCS_TL_CONTROL_5_ERR_ECRC1 (1 << 17) /*WC*/ -#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP1 (1 << 16) /*WC*/ -#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW1 (1 << 15) /*WC*/ -#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL1 (1 << 14) /*WC*/ -#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT1 (1 << 13) /*WC*/ -#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT1 (1 << 12) /*WC*/ -#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL1 (1 << 11) /*WC*/ -#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP1 (1 << 10) /*WC*/ -#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT (1 << 9) /*WC*/ -#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT (1 << 8) /*WC*/ -#define PXPCS_TL_CONTROL_5_ERR_ECRC (1 << 7) /*WC*/ -#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP (1 << 6) /*WC*/ -#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW (1 << 5) /*WC*/ -#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL (1 << 4) /*WC*/ -#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT (1 << 3) /*WC*/ -#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT (1 << 2) /*WC*/ -#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL (1 << 1) /*WC*/ -#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP (1 << 0) /*WC*/ - - -#define PXPCS_TL_FUNC345_STAT 0x854 -#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT4 (1 << 29) /* WC */ -#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4\ - (1 << 28) /* Unsupported Request Error Status in function4, if \ - set, generate pcie_err_attn output when this error is seen. WC */ -#define PXPCS_TL_FUNC345_STAT_ERR_ECRC4\ - (1 << 27) /* ECRC Error TLP Status Status in function 4, if set, \ - generate pcie_err_attn output when this error is seen.. WC */ -#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP4\ - (1 << 26) /* Malformed TLP Status Status in function 4, if set, \ - generate pcie_err_attn output when this error is seen.. WC */ -#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW4\ - (1 << 25) /* Receiver Overflow Status Status in function 4, if \ - set, generate pcie_err_attn output when this error is seen.. WC \ - */ -#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL4\ - (1 << 24) /* Unexpected Completion Status Status in function 4, \ - if set, generate pcie_err_attn output when this error is seen. WC \ - */ -#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT4\ - (1 << 23) /* Receive UR Statusin function 4. If set, generate \ - pcie_err_attn output when this error is seen. WC */ -#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT4\ - (1 << 22) /* Completer Timeout Status Status in function 4, if \ - set, generate pcie_err_attn output when this error is seen. WC */ -#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL4\ - (1 << 21) /* Flow Control Protocol Error Status Status in \ - function 4, if set, generate pcie_err_attn output when this error \ - is seen. WC */ -#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP4\ - (1 << 20) /* Poisoned Error Status Status in function 4, if set, \ - generate pcie_err_attn output when this error is seen.. WC */ -#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT3 (1 << 19) /* WC */ -#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3\ - (1 << 18) /* Unsupported Request Error Status in function3, if \ - set, generate pcie_err_attn output when this error is seen. WC */ -#define PXPCS_TL_FUNC345_STAT_ERR_ECRC3\ - (1 << 17) /* ECRC Error TLP Status Status in function 3, if set, \ - generate pcie_err_attn output when this error is seen.. WC */ -#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP3\ - (1 << 16) /* Malformed TLP Status Status in function 3, if set, \ - generate pcie_err_attn output when this error is seen.. WC */ -#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW3\ - (1 << 15) /* Receiver Overflow Status Status in function 3, if \ - set, generate pcie_err_attn output when this error is seen.. WC \ - */ -#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL3\ - (1 << 14) /* Unexpected Completion Status Status in function 3, \ - if set, generate pcie_err_attn output when this error is seen. WC \ - */ -#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT3\ - (1 << 13) /* Receive UR Statusin function 3. If set, generate \ - pcie_err_attn output when this error is seen. WC */ -#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT3\ - (1 << 12) /* Completer Timeout Status Status in function 3, if \ - set, generate pcie_err_attn output when this error is seen. WC */ -#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL3\ - (1 << 11) /* Flow Control Protocol Error Status Status in \ - function 3, if set, generate pcie_err_attn output when this error \ - is seen. WC */ -#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP3\ - (1 << 10) /* Poisoned Error Status Status in function 3, if set, \ - generate pcie_err_attn output when this error is seen.. WC */ -#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT2 (1 << 9) /* WC */ -#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2\ - (1 << 8) /* Unsupported Request Error Status for Function 2, if \ - set, generate pcie_err_attn output when this error is seen. WC */ -#define PXPCS_TL_FUNC345_STAT_ERR_ECRC2\ - (1 << 7) /* ECRC Error TLP Status Status for Function 2, if set, \ - generate pcie_err_attn output when this error is seen.. WC */ -#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP2\ - (1 << 6) /* Malformed TLP Status Status for Function 2, if set, \ - generate pcie_err_attn output when this error is seen.. WC */ -#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW2\ - (1 << 5) /* Receiver Overflow Status Status for Function 2, if \ - set, generate pcie_err_attn output when this error is seen.. WC \ - */ -#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL2\ - (1 << 4) /* Unexpected Completion Status Status for Function 2, \ - if set, generate pcie_err_attn output when this error is seen. WC \ - */ -#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT2\ - (1 << 3) /* Receive UR Statusfor Function 2. If set, generate \ - pcie_err_attn output when this error is seen. WC */ -#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT2\ - (1 << 2) /* Completer Timeout Status Status for Function 2, if \ - set, generate pcie_err_attn output when this error is seen. WC */ -#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL2\ - (1 << 1) /* Flow Control Protocol Error Status Status for \ - Function 2, if set, generate pcie_err_attn output when this error \ - is seen. WC */ -#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP2\ - (1 << 0) /* Poisoned Error Status Status for Function 2, if set, \ - generate pcie_err_attn output when this error is seen.. WC */ - - -#define PXPCS_TL_FUNC678_STAT 0x85C -#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT7 (1 << 29) /* WC */ -#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7\ - (1 << 28) /* Unsupported Request Error Status in function7, if \ - set, generate pcie_err_attn output when this error is seen. WC */ -#define PXPCS_TL_FUNC678_STAT_ERR_ECRC7\ - (1 << 27) /* ECRC Error TLP Status Status in function 7, if set, \ - generate pcie_err_attn output when this error is seen.. WC */ -#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP7\ - (1 << 26) /* Malformed TLP Status Status in function 7, if set, \ - generate pcie_err_attn output when this error is seen.. WC */ -#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW7\ - (1 << 25) /* Receiver Overflow Status Status in function 7, if \ - set, generate pcie_err_attn output when this error is seen.. WC \ - */ -#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL7\ - (1 << 24) /* Unexpected Completion Status Status in function 7, \ - if set, generate pcie_err_attn output when this error is seen. WC \ - */ -#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT7\ - (1 << 23) /* Receive UR Statusin function 7. If set, generate \ - pcie_err_attn output when this error is seen. WC */ -#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT7\ - (1 << 22) /* Completer Timeout Status Status in function 7, if \ - set, generate pcie_err_attn output when this error is seen. WC */ -#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL7\ - (1 << 21) /* Flow Control Protocol Error Status Status in \ - function 7, if set, generate pcie_err_attn output when this error \ - is seen. WC */ -#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP7\ - (1 << 20) /* Poisoned Error Status Status in function 7, if set, \ - generate pcie_err_attn output when this error is seen.. WC */ -#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT6 (1 << 19) /* WC */ -#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6\ - (1 << 18) /* Unsupported Request Error Status in function6, if \ - set, generate pcie_err_attn output when this error is seen. WC */ -#define PXPCS_TL_FUNC678_STAT_ERR_ECRC6\ - (1 << 17) /* ECRC Error TLP Status Status in function 6, if set, \ - generate pcie_err_attn output when this error is seen.. WC */ -#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP6\ - (1 << 16) /* Malformed TLP Status Status in function 6, if set, \ - generate pcie_err_attn output when this error is seen.. WC */ -#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW6\ - (1 << 15) /* Receiver Overflow Status Status in function 6, if \ - set, generate pcie_err_attn output when this error is seen.. WC \ - */ -#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL6\ - (1 << 14) /* Unexpected Completion Status Status in function 6, \ - if set, generate pcie_err_attn output when this error is seen. WC \ - */ -#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT6\ - (1 << 13) /* Receive UR Statusin function 6. If set, generate \ - pcie_err_attn output when this error is seen. WC */ -#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT6\ - (1 << 12) /* Completer Timeout Status Status in function 6, if \ - set, generate pcie_err_attn output when this error is seen. WC */ -#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL6\ - (1 << 11) /* Flow Control Protocol Error Status Status in \ - function 6, if set, generate pcie_err_attn output when this error \ - is seen. WC */ -#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP6\ - (1 << 10) /* Poisoned Error Status Status in function 6, if set, \ - generate pcie_err_attn output when this error is seen.. WC */ -#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT5 (1 << 9) /* WC */ -#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5\ - (1 << 8) /* Unsupported Request Error Status for Function 5, if \ - set, generate pcie_err_attn output when this error is seen. WC */ -#define PXPCS_TL_FUNC678_STAT_ERR_ECRC5\ - (1 << 7) /* ECRC Error TLP Status Status for Function 5, if set, \ - generate pcie_err_attn output when this error is seen.. WC */ -#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP5\ - (1 << 6) /* Malformed TLP Status Status for Function 5, if set, \ - generate pcie_err_attn output when this error is seen.. WC */ -#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW5\ - (1 << 5) /* Receiver Overflow Status Status for Function 5, if \ - set, generate pcie_err_attn output when this error is seen.. WC \ - */ -#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL5\ - (1 << 4) /* Unexpected Completion Status Status for Function 5, \ - if set, generate pcie_err_attn output when this error is seen. WC \ - */ -#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT5\ - (1 << 3) /* Receive UR Statusfor Function 5. If set, generate \ - pcie_err_attn output when this error is seen. WC */ -#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT5\ - (1 << 2) /* Completer Timeout Status Status for Function 5, if \ - set, generate pcie_err_attn output when this error is seen. WC */ -#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL5\ - (1 << 1) /* Flow Control Protocol Error Status Status for \ - Function 5, if set, generate pcie_err_attn output when this error \ - is seen. WC */ -#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP5\ - (1 << 0) /* Poisoned Error Status Status for Function 5, if set, \ - generate pcie_err_attn output when this error is seen.. WC */ - - -#define BAR_USTRORM_INTMEM 0x400000 -#define BAR_CSTRORM_INTMEM 0x410000 -#define BAR_XSTRORM_INTMEM 0x420000 -#define BAR_TSTRORM_INTMEM 0x430000 - -/* for accessing the IGU in case of status block ACK */ -#define BAR_IGU_INTMEM 0x440000 - -#define BAR_DOORBELL_OFFSET 0x800000 - -#define BAR_ME_REGISTER 0x450000 -#define ME_REG_PF_NUM_SHIFT 0 -#define ME_REG_PF_NUM\ - (7L<> 1; - } - - /* split the crc into 8 bits */ - for (i = 0; i < 8; i++) { - C[i] = crc & 1; - crc = crc >> 1; - } - - NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^ - D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^ - C[6] ^ C[7]; - NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^ - D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^ - D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^ - C[6]; - NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^ - D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^ - C[0] ^ C[1] ^ C[4] ^ C[5]; - NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^ - D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^ - C[1] ^ C[2] ^ C[5] ^ C[6]; - NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^ - D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^ - C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7]; - NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^ - D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^ - C[3] ^ C[4] ^ C[7]; - NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^ - D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^ - C[5]; - NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^ - D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^ - C[6]; - - crc_res = 0; - for (i = 0; i < 8; i++) - crc_res |= (NewCRC[i] << i); - - return crc_res; -} - - -#endif /* BNX2X_REG_H */ diff --git a/drivers/net/bnx2x/bnx2x_sp.c b/drivers/net/bnx2x/bnx2x_sp.c deleted file mode 100644 index df52f11..0000000 --- a/drivers/net/bnx2x/bnx2x_sp.c +++ /dev/null @@ -1,5692 +0,0 @@ -/* bnx2x_sp.c: Broadcom Everest network driver. - * - * Copyright 2011 Broadcom Corporation - * - * Unless you and Broadcom execute a separate written software license - * agreement governing use of this software, this software is licensed to you - * under the terms of the GNU General Public License version 2, available - * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). - * - * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written - * consent. - * - * Maintained by: Eilon Greenstein - * Written by: Vladislav Zolotarov - * - */ -#include -#include -#include -#include -#include -#include "bnx2x.h" -#include "bnx2x_cmn.h" -#include "bnx2x_sp.h" - -#define BNX2X_MAX_EMUL_MULTI 16 - -/**** Exe Queue interfaces ****/ - -/** - * bnx2x_exe_queue_init - init the Exe Queue object - * - * @o: poiter to the object - * @exe_len: length - * @owner: poiter to the owner - * @validate: validate function pointer - * @optimize: optimize function pointer - * @exec: execute function pointer - * @get: get function pointer - */ -static inline void bnx2x_exe_queue_init(struct bnx2x *bp, - struct bnx2x_exe_queue_obj *o, - int exe_len, - union bnx2x_qable_obj *owner, - exe_q_validate validate, - exe_q_optimize optimize, - exe_q_execute exec, - exe_q_get get) -{ - memset(o, 0, sizeof(*o)); - - INIT_LIST_HEAD(&o->exe_queue); - INIT_LIST_HEAD(&o->pending_comp); - - spin_lock_init(&o->lock); - - o->exe_chunk_len = exe_len; - o->owner = owner; - - /* Owner specific callbacks */ - o->validate = validate; - o->optimize = optimize; - o->execute = exec; - o->get = get; - - DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk " - "length of %d\n", exe_len); -} - -static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp, - struct bnx2x_exeq_elem *elem) -{ - DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n"); - kfree(elem); -} - -static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o) -{ - struct bnx2x_exeq_elem *elem; - int cnt = 0; - - spin_lock_bh(&o->lock); - - list_for_each_entry(elem, &o->exe_queue, link) - cnt++; - - spin_unlock_bh(&o->lock); - - return cnt; -} - -/** - * bnx2x_exe_queue_add - add a new element to the execution queue - * - * @bp: driver handle - * @o: queue - * @cmd: new command to add - * @restore: true - do not optimize the command - * - * If the element is optimized or is illegal, frees it. - */ -static inline int bnx2x_exe_queue_add(struct bnx2x *bp, - struct bnx2x_exe_queue_obj *o, - struct bnx2x_exeq_elem *elem, - bool restore) -{ - int rc; - - spin_lock_bh(&o->lock); - - if (!restore) { - /* Try to cancel this element queue */ - rc = o->optimize(bp, o->owner, elem); - if (rc) - goto free_and_exit; - - /* Check if this request is ok */ - rc = o->validate(bp, o->owner, elem); - if (rc) { - BNX2X_ERR("Preamble failed: %d\n", rc); - goto free_and_exit; - } - } - - /* If so, add it to the execution queue */ - list_add_tail(&elem->link, &o->exe_queue); - - spin_unlock_bh(&o->lock); - - return 0; - -free_and_exit: - bnx2x_exe_queue_free_elem(bp, elem); - - spin_unlock_bh(&o->lock); - - return rc; - -} - -static inline void __bnx2x_exe_queue_reset_pending( - struct bnx2x *bp, - struct bnx2x_exe_queue_obj *o) -{ - struct bnx2x_exeq_elem *elem; - - while (!list_empty(&o->pending_comp)) { - elem = list_first_entry(&o->pending_comp, - struct bnx2x_exeq_elem, link); - - list_del(&elem->link); - bnx2x_exe_queue_free_elem(bp, elem); - } -} - -static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp, - struct bnx2x_exe_queue_obj *o) -{ - - spin_lock_bh(&o->lock); - - __bnx2x_exe_queue_reset_pending(bp, o); - - spin_unlock_bh(&o->lock); - -} - -/** - * bnx2x_exe_queue_step - execute one execution chunk atomically - * - * @bp: driver handle - * @o: queue - * @ramrod_flags: flags - * - * (Atomicy is ensured using the exe_queue->lock). - */ -static inline int bnx2x_exe_queue_step(struct bnx2x *bp, - struct bnx2x_exe_queue_obj *o, - unsigned long *ramrod_flags) -{ - struct bnx2x_exeq_elem *elem, spacer; - int cur_len = 0, rc; - - memset(&spacer, 0, sizeof(spacer)); - - spin_lock_bh(&o->lock); - - /* - * Next step should not be performed until the current is finished, - * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to - * properly clear object internals without sending any command to the FW - * which also implies there won't be any completion to clear the - * 'pending' list. - */ - if (!list_empty(&o->pending_comp)) { - if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { - DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: " - "resetting pending_comp\n"); - __bnx2x_exe_queue_reset_pending(bp, o); - } else { - spin_unlock_bh(&o->lock); - return 1; - } - } - - /* - * Run through the pending commands list and create a next - * execution chunk. - */ - while (!list_empty(&o->exe_queue)) { - elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem, - link); - WARN_ON(!elem->cmd_len); - - if (cur_len + elem->cmd_len <= o->exe_chunk_len) { - cur_len += elem->cmd_len; - /* - * Prevent from both lists being empty when moving an - * element. This will allow the call of - * bnx2x_exe_queue_empty() without locking. - */ - list_add_tail(&spacer.link, &o->pending_comp); - mb(); - list_del(&elem->link); - list_add_tail(&elem->link, &o->pending_comp); - list_del(&spacer.link); - } else - break; - } - - /* Sanity check */ - if (!cur_len) { - spin_unlock_bh(&o->lock); - return 0; - } - - rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags); - if (rc < 0) - /* - * In case of an error return the commands back to the queue - * and reset the pending_comp. - */ - list_splice_init(&o->pending_comp, &o->exe_queue); - else if (!rc) - /* - * If zero is returned, means there are no outstanding pending - * completions and we may dismiss the pending list. - */ - __bnx2x_exe_queue_reset_pending(bp, o); - - spin_unlock_bh(&o->lock); - return rc; -} - -static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o) -{ - bool empty = list_empty(&o->exe_queue); - - /* Don't reorder!!! */ - mb(); - - return empty && list_empty(&o->pending_comp); -} - -static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem( - struct bnx2x *bp) -{ - DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n"); - return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC); -} - -/************************ raw_obj functions ***********************************/ -static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o) -{ - return !!test_bit(o->state, o->pstate); -} - -static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o) -{ - smp_mb__before_clear_bit(); - clear_bit(o->state, o->pstate); - smp_mb__after_clear_bit(); -} - -static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o) -{ - smp_mb__before_clear_bit(); - set_bit(o->state, o->pstate); - smp_mb__after_clear_bit(); -} - -/** - * bnx2x_state_wait - wait until the given bit(state) is cleared - * - * @bp: device handle - * @state: state which is to be cleared - * @state_p: state buffer - * - */ -static inline int bnx2x_state_wait(struct bnx2x *bp, int state, - unsigned long *pstate) -{ - /* can take a while if any port is running */ - int cnt = 5000; - - - if (CHIP_REV_IS_EMUL(bp)) - cnt *= 20; - - DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state); - - might_sleep(); - while (cnt--) { - if (!test_bit(state, pstate)) { -#ifdef BNX2X_STOP_ON_ERROR - DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt); -#endif - return 0; - } - - usleep_range(1000, 1000); - - if (bp->panic) - return -EIO; - } - - /* timeout! */ - BNX2X_ERR("timeout waiting for state %d\n", state); -#ifdef BNX2X_STOP_ON_ERROR - bnx2x_panic(); -#endif - - return -EBUSY; -} - -static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw) -{ - return bnx2x_state_wait(bp, raw->state, raw->pstate); -} - -/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ -/* credit handling callbacks */ -static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset) -{ - struct bnx2x_credit_pool_obj *mp = o->macs_pool; - - WARN_ON(!mp); - - return mp->get_entry(mp, offset); -} - -static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o) -{ - struct bnx2x_credit_pool_obj *mp = o->macs_pool; - - WARN_ON(!mp); - - return mp->get(mp, 1); -} - -static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset) -{ - struct bnx2x_credit_pool_obj *vp = o->vlans_pool; - - WARN_ON(!vp); - - return vp->get_entry(vp, offset); -} - -static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o) -{ - struct bnx2x_credit_pool_obj *vp = o->vlans_pool; - - WARN_ON(!vp); - - return vp->get(vp, 1); -} - -static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) -{ - struct bnx2x_credit_pool_obj *mp = o->macs_pool; - struct bnx2x_credit_pool_obj *vp = o->vlans_pool; - - if (!mp->get(mp, 1)) - return false; - - if (!vp->get(vp, 1)) { - mp->put(mp, 1); - return false; - } - - return true; -} - -static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset) -{ - struct bnx2x_credit_pool_obj *mp = o->macs_pool; - - return mp->put_entry(mp, offset); -} - -static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o) -{ - struct bnx2x_credit_pool_obj *mp = o->macs_pool; - - return mp->put(mp, 1); -} - -static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset) -{ - struct bnx2x_credit_pool_obj *vp = o->vlans_pool; - - return vp->put_entry(vp, offset); -} - -static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o) -{ - struct bnx2x_credit_pool_obj *vp = o->vlans_pool; - - return vp->put(vp, 1); -} - -static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) -{ - struct bnx2x_credit_pool_obj *mp = o->macs_pool; - struct bnx2x_credit_pool_obj *vp = o->vlans_pool; - - if (!mp->put(mp, 1)) - return false; - - if (!vp->put(vp, 1)) { - mp->get(mp, 1); - return false; - } - - return true; -} - -/* check_add() callbacks */ -static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o, - union bnx2x_classification_ramrod_data *data) -{ - struct bnx2x_vlan_mac_registry_elem *pos; - - if (!is_valid_ether_addr(data->mac.mac)) - return -EINVAL; - - /* Check if a requested MAC already exists */ - list_for_each_entry(pos, &o->head, link) - if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) - return -EEXIST; - - return 0; -} - -static int bnx2x_check_vlan_add(struct bnx2x_vlan_mac_obj *o, - union bnx2x_classification_ramrod_data *data) -{ - struct bnx2x_vlan_mac_registry_elem *pos; - - list_for_each_entry(pos, &o->head, link) - if (data->vlan.vlan == pos->u.vlan.vlan) - return -EEXIST; - - return 0; -} - -static int bnx2x_check_vlan_mac_add(struct bnx2x_vlan_mac_obj *o, - union bnx2x_classification_ramrod_data *data) -{ - struct bnx2x_vlan_mac_registry_elem *pos; - - list_for_each_entry(pos, &o->head, link) - if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && - (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, - ETH_ALEN))) - return -EEXIST; - - return 0; -} - - -/* check_del() callbacks */ -static struct bnx2x_vlan_mac_registry_elem * - bnx2x_check_mac_del(struct bnx2x_vlan_mac_obj *o, - union bnx2x_classification_ramrod_data *data) -{ - struct bnx2x_vlan_mac_registry_elem *pos; - - list_for_each_entry(pos, &o->head, link) - if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) - return pos; - - return NULL; -} - -static struct bnx2x_vlan_mac_registry_elem * - bnx2x_check_vlan_del(struct bnx2x_vlan_mac_obj *o, - union bnx2x_classification_ramrod_data *data) -{ - struct bnx2x_vlan_mac_registry_elem *pos; - - list_for_each_entry(pos, &o->head, link) - if (data->vlan.vlan == pos->u.vlan.vlan) - return pos; - - return NULL; -} - -static struct bnx2x_vlan_mac_registry_elem * - bnx2x_check_vlan_mac_del(struct bnx2x_vlan_mac_obj *o, - union bnx2x_classification_ramrod_data *data) -{ - struct bnx2x_vlan_mac_registry_elem *pos; - - list_for_each_entry(pos, &o->head, link) - if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && - (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, - ETH_ALEN))) - return pos; - - return NULL; -} - -/* check_move() callback */ -static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o, - struct bnx2x_vlan_mac_obj *dst_o, - union bnx2x_classification_ramrod_data *data) -{ - struct bnx2x_vlan_mac_registry_elem *pos; - int rc; - - /* Check if we can delete the requested configuration from the first - * object. - */ - pos = src_o->check_del(src_o, data); - - /* check if configuration can be added */ - rc = dst_o->check_add(dst_o, data); - - /* If this classification can not be added (is already set) - * or can't be deleted - return an error. - */ - if (rc || !pos) - return false; - - return true; -} - -static bool bnx2x_check_move_always_err( - struct bnx2x_vlan_mac_obj *src_o, - struct bnx2x_vlan_mac_obj *dst_o, - union bnx2x_classification_ramrod_data *data) -{ - return false; -} - - -static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o) -{ - struct bnx2x_raw_obj *raw = &o->raw; - u8 rx_tx_flag = 0; - - if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) || - (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) - rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD; - - if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) || - (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) - rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD; - - return rx_tx_flag; -} - -/* LLH CAM line allocations */ -enum { - LLH_CAM_ISCSI_ETH_LINE = 0, - LLH_CAM_ETH_LINE, - LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2 -}; - -static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp, - bool add, unsigned char *dev_addr, int index) -{ - u32 wb_data[2]; - u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM : - NIG_REG_LLH0_FUNC_MEM; - - if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE) - return; - - DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n", - (add ? "ADD" : "DELETE"), index); - - if (add) { - /* LLH_FUNC_MEM is a u64 WB register */ - reg_offset += 8*index; - - wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) | - (dev_addr[4] << 8) | dev_addr[5]); - wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]); - - REG_WR_DMAE(bp, reg_offset, wb_data, 2); - } - - REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE : - NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add); -} - -/** - * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod - * - * @bp: device handle - * @o: queue for which we want to configure this rule - * @add: if true the command is an ADD command, DEL otherwise - * @opcode: CLASSIFY_RULE_OPCODE_XXX - * @hdr: pointer to a header to setup - * - */ -static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *o, bool add, int opcode, - struct eth_classify_cmd_header *hdr) -{ - struct bnx2x_raw_obj *raw = &o->raw; - - hdr->client_id = raw->cl_id; - hdr->func_id = raw->func_id; - - /* Rx or/and Tx (internal switching) configuration ? */ - hdr->cmd_general_data |= - bnx2x_vlan_mac_get_rx_tx_flag(o); - - if (add) - hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD; - - hdr->cmd_general_data |= - (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT); -} - -/** - * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header - * - * @cid: connection id - * @type: BNX2X_FILTER_XXX_PENDING - * @hdr: poiter to header to setup - * @rule_cnt: - * - * currently we always configure one rule and echo field to contain a CID and an - * opcode type. - */ -static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type, - struct eth_classify_header *hdr, int rule_cnt) -{ - hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT); - hdr->rule_cnt = (u8)rule_cnt; -} - - -/* hw_config() callbacks */ -static void bnx2x_set_one_mac_e2(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *o, - struct bnx2x_exeq_elem *elem, int rule_idx, - int cam_offset) -{ - struct bnx2x_raw_obj *raw = &o->raw; - struct eth_classify_rules_ramrod_data *data = - (struct eth_classify_rules_ramrod_data *)(raw->rdata); - int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd; - union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; - bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; - unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags; - u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac; - - /* - * Set LLH CAM entry: currently only iSCSI and ETH macs are - * relevant. In addition, current implementation is tuned for a - * single ETH MAC. - * - * When multiple unicast ETH MACs PF configuration in switch - * independent mode is required (NetQ, multiple netdev MACs, - * etc.), consider better utilisation of 8 per function MAC - * entries in the LLH register. There is also - * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the - * total number of CAM entries to 16. - * - * Currently we won't configure NIG for MACs other than a primary ETH - * MAC and iSCSI L2 MAC. - * - * If this MAC is moving from one Queue to another, no need to change - * NIG configuration. - */ - if (cmd != BNX2X_VLAN_MAC_MOVE) { - if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags)) - bnx2x_set_mac_in_nig(bp, add, mac, - LLH_CAM_ISCSI_ETH_LINE); - else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags)) - bnx2x_set_mac_in_nig(bp, add, mac, LLH_CAM_ETH_LINE); - } - - /* Reset the ramrod data buffer for the first rule */ - if (rule_idx == 0) - memset(data, 0, sizeof(*data)); - - /* Setup a command header */ - bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC, - &rule_entry->mac.header); - - DP(BNX2X_MSG_SP, "About to %s MAC "BNX2X_MAC_FMT" for " - "Queue %d\n", (add ? "add" : "delete"), - BNX2X_MAC_PRN_LIST(mac), raw->cl_id); - - /* Set a MAC itself */ - bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb, - &rule_entry->mac.mac_mid, - &rule_entry->mac.mac_lsb, mac); - - /* MOVE: Add a rule that will add this MAC to the target Queue */ - if (cmd == BNX2X_VLAN_MAC_MOVE) { - rule_entry++; - rule_cnt++; - - /* Setup ramrod data */ - bnx2x_vlan_mac_set_cmd_hdr_e2(bp, - elem->cmd_data.vlan_mac.target_obj, - true, CLASSIFY_RULE_OPCODE_MAC, - &rule_entry->mac.header); - - /* Set a MAC itself */ - bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb, - &rule_entry->mac.mac_mid, - &rule_entry->mac.mac_lsb, mac); - } - - /* Set the ramrod data header */ - /* TODO: take this to the higher level in order to prevent multiple - writing */ - bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, - rule_cnt); -} - -/** - * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod - * - * @bp: device handle - * @o: queue - * @type: - * @cam_offset: offset in cam memory - * @hdr: pointer to a header to setup - * - * E1/E1H - */ -static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, - struct mac_configuration_hdr *hdr) -{ - struct bnx2x_raw_obj *r = &o->raw; - - hdr->length = 1; - hdr->offset = (u8)cam_offset; - hdr->client_id = 0xff; - hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT)); -} - -static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac, - u16 vlan_id, struct mac_configuration_entry *cfg_entry) -{ - struct bnx2x_raw_obj *r = &o->raw; - u32 cl_bit_vec = (1 << r->cl_id); - - cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec); - cfg_entry->pf_id = r->func_id; - cfg_entry->vlan_id = cpu_to_le16(vlan_id); - - if (add) { - SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE, - T_ETH_MAC_COMMAND_SET); - SET_FLAG(cfg_entry->flags, - MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode); - - /* Set a MAC in a ramrod data */ - bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr, - &cfg_entry->middle_mac_addr, - &cfg_entry->lsb_mac_addr, mac); - } else - SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE, - T_ETH_MAC_COMMAND_INVALIDATE); -} - -static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add, - u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config) -{ - struct mac_configuration_entry *cfg_entry = &config->config_table[0]; - struct bnx2x_raw_obj *raw = &o->raw; - - bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset, - &config->hdr); - bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id, - cfg_entry); - - DP(BNX2X_MSG_SP, "%s MAC "BNX2X_MAC_FMT" CLID %d CAM offset %d\n", - (add ? "setting" : "clearing"), - BNX2X_MAC_PRN_LIST(mac), raw->cl_id, cam_offset); -} - -/** - * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data - * - * @bp: device handle - * @o: bnx2x_vlan_mac_obj - * @elem: bnx2x_exeq_elem - * @rule_idx: rule_idx - * @cam_offset: cam_offset - */ -static void bnx2x_set_one_mac_e1x(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *o, - struct bnx2x_exeq_elem *elem, int rule_idx, - int cam_offset) -{ - struct bnx2x_raw_obj *raw = &o->raw; - struct mac_configuration_cmd *config = - (struct mac_configuration_cmd *)(raw->rdata); - /* - * 57710 and 57711 do not support MOVE command, - * so it's either ADD or DEL - */ - bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? - true : false; - - /* Reset the ramrod data buffer */ - memset(config, 0, sizeof(*config)); - - bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_MAC_PENDING, - cam_offset, add, - elem->cmd_data.vlan_mac.u.mac.mac, 0, - ETH_VLAN_FILTER_ANY_VLAN, config); -} - -static void bnx2x_set_one_vlan_e2(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *o, - struct bnx2x_exeq_elem *elem, int rule_idx, - int cam_offset) -{ - struct bnx2x_raw_obj *raw = &o->raw; - struct eth_classify_rules_ramrod_data *data = - (struct eth_classify_rules_ramrod_data *)(raw->rdata); - int rule_cnt = rule_idx + 1; - union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; - int cmd = elem->cmd_data.vlan_mac.cmd; - bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; - u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan; - - /* Reset the ramrod data buffer for the first rule */ - if (rule_idx == 0) - memset(data, 0, sizeof(*data)); - - /* Set a rule header */ - bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN, - &rule_entry->vlan.header); - - DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"), - vlan); - - /* Set a VLAN itself */ - rule_entry->vlan.vlan = cpu_to_le16(vlan); - - /* MOVE: Add a rule that will add this MAC to the target Queue */ - if (cmd == BNX2X_VLAN_MAC_MOVE) { - rule_entry++; - rule_cnt++; - - /* Setup ramrod data */ - bnx2x_vlan_mac_set_cmd_hdr_e2(bp, - elem->cmd_data.vlan_mac.target_obj, - true, CLASSIFY_RULE_OPCODE_VLAN, - &rule_entry->vlan.header); - - /* Set a VLAN itself */ - rule_entry->vlan.vlan = cpu_to_le16(vlan); - } - - /* Set the ramrod data header */ - /* TODO: take this to the higher level in order to prevent multiple - writing */ - bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, - rule_cnt); -} - -static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *o, - struct bnx2x_exeq_elem *elem, - int rule_idx, int cam_offset) -{ - struct bnx2x_raw_obj *raw = &o->raw; - struct eth_classify_rules_ramrod_data *data = - (struct eth_classify_rules_ramrod_data *)(raw->rdata); - int rule_cnt = rule_idx + 1; - union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; - int cmd = elem->cmd_data.vlan_mac.cmd; - bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; - u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan; - u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac; - - - /* Reset the ramrod data buffer for the first rule */ - if (rule_idx == 0) - memset(data, 0, sizeof(*data)); - - /* Set a rule header */ - bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR, - &rule_entry->pair.header); - - /* Set VLAN and MAC themselvs */ - rule_entry->pair.vlan = cpu_to_le16(vlan); - bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, - &rule_entry->pair.mac_mid, - &rule_entry->pair.mac_lsb, mac); - - /* MOVE: Add a rule that will add this MAC to the target Queue */ - if (cmd == BNX2X_VLAN_MAC_MOVE) { - rule_entry++; - rule_cnt++; - - /* Setup ramrod data */ - bnx2x_vlan_mac_set_cmd_hdr_e2(bp, - elem->cmd_data.vlan_mac.target_obj, - true, CLASSIFY_RULE_OPCODE_PAIR, - &rule_entry->pair.header); - - /* Set a VLAN itself */ - rule_entry->pair.vlan = cpu_to_le16(vlan); - bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, - &rule_entry->pair.mac_mid, - &rule_entry->pair.mac_lsb, mac); - } - - /* Set the ramrod data header */ - /* TODO: take this to the higher level in order to prevent multiple - writing */ - bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, - rule_cnt); -} - -/** - * bnx2x_set_one_vlan_mac_e1h - - * - * @bp: device handle - * @o: bnx2x_vlan_mac_obj - * @elem: bnx2x_exeq_elem - * @rule_idx: rule_idx - * @cam_offset: cam_offset - */ -static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *o, - struct bnx2x_exeq_elem *elem, - int rule_idx, int cam_offset) -{ - struct bnx2x_raw_obj *raw = &o->raw; - struct mac_configuration_cmd *config = - (struct mac_configuration_cmd *)(raw->rdata); - /* - * 57710 and 57711 do not support MOVE command, - * so it's either ADD or DEL - */ - bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? - true : false; - - /* Reset the ramrod data buffer */ - memset(config, 0, sizeof(*config)); - - bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING, - cam_offset, add, - elem->cmd_data.vlan_mac.u.vlan_mac.mac, - elem->cmd_data.vlan_mac.u.vlan_mac.vlan, - ETH_VLAN_FILTER_CLASSIFY, config); -} - -#define list_next_entry(pos, member) \ - list_entry((pos)->member.next, typeof(*(pos)), member) - -/** - * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element - * - * @bp: device handle - * @p: command parameters - * @ppos: pointer to the cooky - * - * reconfigure next MAC/VLAN/VLAN-MAC element from the - * previously configured elements list. - * - * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken - * into an account - * - * pointer to the cooky - that should be given back in the next call to make - * function handle the next element. If *ppos is set to NULL it will restart the - * iterator. If returned *ppos == NULL this means that the last element has been - * handled. - * - */ -static int bnx2x_vlan_mac_restore(struct bnx2x *bp, - struct bnx2x_vlan_mac_ramrod_params *p, - struct bnx2x_vlan_mac_registry_elem **ppos) -{ - struct bnx2x_vlan_mac_registry_elem *pos; - struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; - - /* If list is empty - there is nothing to do here */ - if (list_empty(&o->head)) { - *ppos = NULL; - return 0; - } - - /* make a step... */ - if (*ppos == NULL) - *ppos = list_first_entry(&o->head, - struct bnx2x_vlan_mac_registry_elem, - link); - else - *ppos = list_next_entry(*ppos, link); - - pos = *ppos; - - /* If it's the last step - return NULL */ - if (list_is_last(&pos->link, &o->head)) - *ppos = NULL; - - /* Prepare a 'user_req' */ - memcpy(&p->user_req.u, &pos->u, sizeof(pos->u)); - - /* Set the command */ - p->user_req.cmd = BNX2X_VLAN_MAC_ADD; - - /* Set vlan_mac_flags */ - p->user_req.vlan_mac_flags = pos->vlan_mac_flags; - - /* Set a restore bit */ - __set_bit(RAMROD_RESTORE, &p->ramrod_flags); - - return bnx2x_config_vlan_mac(bp, p); -} - -/* - * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a - * pointer to an element with a specific criteria and NULL if such an element - * hasn't been found. - */ -static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac( - struct bnx2x_exe_queue_obj *o, - struct bnx2x_exeq_elem *elem) -{ - struct bnx2x_exeq_elem *pos; - struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac; - - /* Check pending for execution commands */ - list_for_each_entry(pos, &o->exe_queue, link) - if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data, - sizeof(*data)) && - (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) - return pos; - - return NULL; -} - -static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan( - struct bnx2x_exe_queue_obj *o, - struct bnx2x_exeq_elem *elem) -{ - struct bnx2x_exeq_elem *pos; - struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan; - - /* Check pending for execution commands */ - list_for_each_entry(pos, &o->exe_queue, link) - if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data, - sizeof(*data)) && - (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) - return pos; - - return NULL; -} - -static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac( - struct bnx2x_exe_queue_obj *o, - struct bnx2x_exeq_elem *elem) -{ - struct bnx2x_exeq_elem *pos; - struct bnx2x_vlan_mac_ramrod_data *data = - &elem->cmd_data.vlan_mac.u.vlan_mac; - - /* Check pending for execution commands */ - list_for_each_entry(pos, &o->exe_queue, link) - if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data, - sizeof(*data)) && - (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) - return pos; - - return NULL; -} - -/** - * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed - * - * @bp: device handle - * @qo: bnx2x_qable_obj - * @elem: bnx2x_exeq_elem - * - * Checks that the requested configuration can be added. If yes and if - * requested, consume CAM credit. - * - * The 'validate' is run after the 'optimize'. - * - */ -static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp, - union bnx2x_qable_obj *qo, - struct bnx2x_exeq_elem *elem) -{ - struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; - struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; - int rc; - - /* Check the registry */ - rc = o->check_add(o, &elem->cmd_data.vlan_mac.u); - if (rc) { - DP(BNX2X_MSG_SP, "ADD command is not allowed considering " - "current registry state\n"); - return rc; - } - - /* - * Check if there is a pending ADD command for this - * MAC/VLAN/VLAN-MAC. Return an error if there is. - */ - if (exeq->get(exeq, elem)) { - DP(BNX2X_MSG_SP, "There is a pending ADD command already\n"); - return -EEXIST; - } - - /* - * TODO: Check the pending MOVE from other objects where this - * object is a destination object. - */ - - /* Consume the credit if not requested not to */ - if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, - &elem->cmd_data.vlan_mac.vlan_mac_flags) || - o->get_credit(o))) - return -EINVAL; - - return 0; -} - -/** - * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed - * - * @bp: device handle - * @qo: quable object to check - * @elem: element that needs to be deleted - * - * Checks that the requested configuration can be deleted. If yes and if - * requested, returns a CAM credit. - * - * The 'validate' is run after the 'optimize'. - */ -static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp, - union bnx2x_qable_obj *qo, - struct bnx2x_exeq_elem *elem) -{ - struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; - struct bnx2x_vlan_mac_registry_elem *pos; - struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; - struct bnx2x_exeq_elem query_elem; - - /* If this classification can not be deleted (doesn't exist) - * - return a BNX2X_EXIST. - */ - pos = o->check_del(o, &elem->cmd_data.vlan_mac.u); - if (!pos) { - DP(BNX2X_MSG_SP, "DEL command is not allowed considering " - "current registry state\n"); - return -EEXIST; - } - - /* - * Check if there are pending DEL or MOVE commands for this - * MAC/VLAN/VLAN-MAC. Return an error if so. - */ - memcpy(&query_elem, elem, sizeof(query_elem)); - - /* Check for MOVE commands */ - query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE; - if (exeq->get(exeq, &query_elem)) { - BNX2X_ERR("There is a pending MOVE command already\n"); - return -EINVAL; - } - - /* Check for DEL commands */ - if (exeq->get(exeq, elem)) { - DP(BNX2X_MSG_SP, "There is a pending DEL command already\n"); - return -EEXIST; - } - - /* Return the credit to the credit pool if not requested not to */ - if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, - &elem->cmd_data.vlan_mac.vlan_mac_flags) || - o->put_credit(o))) { - BNX2X_ERR("Failed to return a credit\n"); - return -EINVAL; - } - - return 0; -} - -/** - * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed - * - * @bp: device handle - * @qo: quable object to check (source) - * @elem: element that needs to be moved - * - * Checks that the requested configuration can be moved. If yes and if - * requested, returns a CAM credit. - * - * The 'validate' is run after the 'optimize'. - */ -static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp, - union bnx2x_qable_obj *qo, - struct bnx2x_exeq_elem *elem) -{ - struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac; - struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj; - struct bnx2x_exeq_elem query_elem; - struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue; - struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue; - - /* - * Check if we can perform this operation based on the current registry - * state. - */ - if (!src_o->check_move(src_o, dest_o, &elem->cmd_data.vlan_mac.u)) { - DP(BNX2X_MSG_SP, "MOVE command is not allowed considering " - "current registry state\n"); - return -EINVAL; - } - - /* - * Check if there is an already pending DEL or MOVE command for the - * source object or ADD command for a destination object. Return an - * error if so. - */ - memcpy(&query_elem, elem, sizeof(query_elem)); - - /* Check DEL on source */ - query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL; - if (src_exeq->get(src_exeq, &query_elem)) { - BNX2X_ERR("There is a pending DEL command on the source " - "queue already\n"); - return -EINVAL; - } - - /* Check MOVE on source */ - if (src_exeq->get(src_exeq, elem)) { - DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n"); - return -EEXIST; - } - - /* Check ADD on destination */ - query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD; - if (dest_exeq->get(dest_exeq, &query_elem)) { - BNX2X_ERR("There is a pending ADD command on the " - "destination queue already\n"); - return -EINVAL; - } - - /* Consume the credit if not requested not to */ - if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, - &elem->cmd_data.vlan_mac.vlan_mac_flags) || - dest_o->get_credit(dest_o))) - return -EINVAL; - - if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, - &elem->cmd_data.vlan_mac.vlan_mac_flags) || - src_o->put_credit(src_o))) { - /* return the credit taken from dest... */ - dest_o->put_credit(dest_o); - return -EINVAL; - } - - return 0; -} - -static int bnx2x_validate_vlan_mac(struct bnx2x *bp, - union bnx2x_qable_obj *qo, - struct bnx2x_exeq_elem *elem) -{ - switch (elem->cmd_data.vlan_mac.cmd) { - case BNX2X_VLAN_MAC_ADD: - return bnx2x_validate_vlan_mac_add(bp, qo, elem); - case BNX2X_VLAN_MAC_DEL: - return bnx2x_validate_vlan_mac_del(bp, qo, elem); - case BNX2X_VLAN_MAC_MOVE: - return bnx2x_validate_vlan_mac_move(bp, qo, elem); - default: - return -EINVAL; - } -} - -/** - * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes. - * - * @bp: device handle - * @o: bnx2x_vlan_mac_obj - * - */ -static int bnx2x_wait_vlan_mac(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *o) -{ - int cnt = 5000, rc; - struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; - struct bnx2x_raw_obj *raw = &o->raw; - - while (cnt--) { - /* Wait for the current command to complete */ - rc = raw->wait_comp(bp, raw); - if (rc) - return rc; - - /* Wait until there are no pending commands */ - if (!bnx2x_exe_queue_empty(exeq)) - usleep_range(1000, 1000); - else - return 0; - } - - return -EBUSY; -} - -/** - * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod - * - * @bp: device handle - * @o: bnx2x_vlan_mac_obj - * @cqe: - * @cont: if true schedule next execution chunk - * - */ -static int bnx2x_complete_vlan_mac(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *o, - union event_ring_elem *cqe, - unsigned long *ramrod_flags) -{ - struct bnx2x_raw_obj *r = &o->raw; - int rc; - - /* Reset pending list */ - bnx2x_exe_queue_reset_pending(bp, &o->exe_queue); - - /* Clear pending */ - r->clear_pending(r); - - /* If ramrod failed this is most likely a SW bug */ - if (cqe->message.error) - return -EINVAL; - - /* Run the next bulk of pending commands if requeted */ - if (test_bit(RAMROD_CONT, ramrod_flags)) { - rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); - if (rc < 0) - return rc; - } - - /* If there is more work to do return PENDING */ - if (!bnx2x_exe_queue_empty(&o->exe_queue)) - return 1; - - return 0; -} - -/** - * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands. - * - * @bp: device handle - * @o: bnx2x_qable_obj - * @elem: bnx2x_exeq_elem - */ -static int bnx2x_optimize_vlan_mac(struct bnx2x *bp, - union bnx2x_qable_obj *qo, - struct bnx2x_exeq_elem *elem) -{ - struct bnx2x_exeq_elem query, *pos; - struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; - struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; - - memcpy(&query, elem, sizeof(query)); - - switch (elem->cmd_data.vlan_mac.cmd) { - case BNX2X_VLAN_MAC_ADD: - query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL; - break; - case BNX2X_VLAN_MAC_DEL: - query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD; - break; - default: - /* Don't handle anything other than ADD or DEL */ - return 0; - } - - /* If we found the appropriate element - delete it */ - pos = exeq->get(exeq, &query); - if (pos) { - - /* Return the credit of the optimized command */ - if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, - &pos->cmd_data.vlan_mac.vlan_mac_flags)) { - if ((query.cmd_data.vlan_mac.cmd == - BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) { - BNX2X_ERR("Failed to return the credit for the " - "optimized ADD command\n"); - return -EINVAL; - } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */ - BNX2X_ERR("Failed to recover the credit from " - "the optimized DEL command\n"); - return -EINVAL; - } - } - - DP(BNX2X_MSG_SP, "Optimizing %s command\n", - (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? - "ADD" : "DEL"); - - list_del(&pos->link); - bnx2x_exe_queue_free_elem(bp, pos); - return 1; - } - - return 0; -} - -/** - * bnx2x_vlan_mac_get_registry_elem - prepare a registry element - * - * @bp: device handle - * @o: - * @elem: - * @restore: - * @re: - * - * prepare a registry element according to the current command request. - */ -static inline int bnx2x_vlan_mac_get_registry_elem( - struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *o, - struct bnx2x_exeq_elem *elem, - bool restore, - struct bnx2x_vlan_mac_registry_elem **re) -{ - int cmd = elem->cmd_data.vlan_mac.cmd; - struct bnx2x_vlan_mac_registry_elem *reg_elem; - - /* Allocate a new registry element if needed. */ - if (!restore && - ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) { - reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC); - if (!reg_elem) - return -ENOMEM; - - /* Get a new CAM offset */ - if (!o->get_cam_offset(o, ®_elem->cam_offset)) { - /* - * This shell never happen, because we have checked the - * CAM availiability in the 'validate'. - */ - WARN_ON(1); - kfree(reg_elem); - return -EINVAL; - } - - DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset); - - /* Set a VLAN-MAC data */ - memcpy(®_elem->u, &elem->cmd_data.vlan_mac.u, - sizeof(reg_elem->u)); - - /* Copy the flags (needed for DEL and RESTORE flows) */ - reg_elem->vlan_mac_flags = - elem->cmd_data.vlan_mac.vlan_mac_flags; - } else /* DEL, RESTORE */ - reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u); - - *re = reg_elem; - return 0; -} - -/** - * bnx2x_execute_vlan_mac - execute vlan mac command - * - * @bp: device handle - * @qo: - * @exe_chunk: - * @ramrod_flags: - * - * go and send a ramrod! - */ -static int bnx2x_execute_vlan_mac(struct bnx2x *bp, - union bnx2x_qable_obj *qo, - struct list_head *exe_chunk, - unsigned long *ramrod_flags) -{ - struct bnx2x_exeq_elem *elem; - struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj; - struct bnx2x_raw_obj *r = &o->raw; - int rc, idx = 0; - bool restore = test_bit(RAMROD_RESTORE, ramrod_flags); - bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags); - struct bnx2x_vlan_mac_registry_elem *reg_elem; - int cmd; - - /* - * If DRIVER_ONLY execution is requested, cleanup a registry - * and exit. Otherwise send a ramrod to FW. - */ - if (!drv_only) { - WARN_ON(r->check_pending(r)); - - /* Set pending */ - r->set_pending(r); - - /* Fill tha ramrod data */ - list_for_each_entry(elem, exe_chunk, link) { - cmd = elem->cmd_data.vlan_mac.cmd; - /* - * We will add to the target object in MOVE command, so - * change the object for a CAM search. - */ - if (cmd == BNX2X_VLAN_MAC_MOVE) - cam_obj = elem->cmd_data.vlan_mac.target_obj; - else - cam_obj = o; - - rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj, - elem, restore, - ®_elem); - if (rc) - goto error_exit; - - WARN_ON(!reg_elem); - - /* Push a new entry into the registry */ - if (!restore && - ((cmd == BNX2X_VLAN_MAC_ADD) || - (cmd == BNX2X_VLAN_MAC_MOVE))) - list_add(®_elem->link, &cam_obj->head); - - /* Configure a single command in a ramrod data buffer */ - o->set_one_rule(bp, o, elem, idx, - reg_elem->cam_offset); - - /* MOVE command consumes 2 entries in the ramrod data */ - if (cmd == BNX2X_VLAN_MAC_MOVE) - idx += 2; - else - idx++; - } - - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). - */ - - rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid, - U64_HI(r->rdata_mapping), - U64_LO(r->rdata_mapping), - ETH_CONNECTION_TYPE); - if (rc) - goto error_exit; - } - - /* Now, when we are done with the ramrod - clean up the registry */ - list_for_each_entry(elem, exe_chunk, link) { - cmd = elem->cmd_data.vlan_mac.cmd; - if ((cmd == BNX2X_VLAN_MAC_DEL) || - (cmd == BNX2X_VLAN_MAC_MOVE)) { - reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u); - - WARN_ON(!reg_elem); - - o->put_cam_offset(o, reg_elem->cam_offset); - list_del(®_elem->link); - kfree(reg_elem); - } - } - - if (!drv_only) - return 1; - else - return 0; - -error_exit: - r->clear_pending(r); - - /* Cleanup a registry in case of a failure */ - list_for_each_entry(elem, exe_chunk, link) { - cmd = elem->cmd_data.vlan_mac.cmd; - - if (cmd == BNX2X_VLAN_MAC_MOVE) - cam_obj = elem->cmd_data.vlan_mac.target_obj; - else - cam_obj = o; - - /* Delete all newly added above entries */ - if (!restore && - ((cmd == BNX2X_VLAN_MAC_ADD) || - (cmd == BNX2X_VLAN_MAC_MOVE))) { - reg_elem = o->check_del(cam_obj, - &elem->cmd_data.vlan_mac.u); - if (reg_elem) { - list_del(®_elem->link); - kfree(reg_elem); - } - } - } - - return rc; -} - -static inline int bnx2x_vlan_mac_push_new_cmd( - struct bnx2x *bp, - struct bnx2x_vlan_mac_ramrod_params *p) -{ - struct bnx2x_exeq_elem *elem; - struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; - bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags); - - /* Allocate the execution queue element */ - elem = bnx2x_exe_queue_alloc_elem(bp); - if (!elem) - return -ENOMEM; - - /* Set the command 'length' */ - switch (p->user_req.cmd) { - case BNX2X_VLAN_MAC_MOVE: - elem->cmd_len = 2; - break; - default: - elem->cmd_len = 1; - } - - /* Fill the object specific info */ - memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req)); - - /* Try to add a new command to the pending list */ - return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore); -} - -/** - * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules. - * - * @bp: device handle - * @p: - * - */ -int bnx2x_config_vlan_mac( - struct bnx2x *bp, - struct bnx2x_vlan_mac_ramrod_params *p) -{ - int rc = 0; - struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; - unsigned long *ramrod_flags = &p->ramrod_flags; - bool cont = test_bit(RAMROD_CONT, ramrod_flags); - struct bnx2x_raw_obj *raw = &o->raw; - - /* - * Add new elements to the execution list for commands that require it. - */ - if (!cont) { - rc = bnx2x_vlan_mac_push_new_cmd(bp, p); - if (rc) - return rc; - } - - /* - * If nothing will be executed further in this iteration we want to - * return PENDING if there are pending commands - */ - if (!bnx2x_exe_queue_empty(&o->exe_queue)) - rc = 1; - - if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { - DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: " - "clearing a pending bit.\n"); - raw->clear_pending(raw); - } - - /* Execute commands if required */ - if (cont || test_bit(RAMROD_EXEC, ramrod_flags) || - test_bit(RAMROD_COMP_WAIT, ramrod_flags)) { - rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); - if (rc < 0) - return rc; - } - - /* - * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set - * then user want to wait until the last command is done. - */ - if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) { - /* - * Wait maximum for the current exe_queue length iterations plus - * one (for the current pending command). - */ - int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1; - - while (!bnx2x_exe_queue_empty(&o->exe_queue) && - max_iterations--) { - - /* Wait for the current command to complete */ - rc = raw->wait_comp(bp, raw); - if (rc) - return rc; - - /* Make a next step */ - rc = bnx2x_exe_queue_step(bp, &o->exe_queue, - ramrod_flags); - if (rc < 0) - return rc; - } - - return 0; - } - - return rc; -} - - - -/** - * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec - * - * @bp: device handle - * @o: - * @vlan_mac_flags: - * @ramrod_flags: execution flags to be used for this deletion - * - * if the last operation has completed successfully and there are no - * moreelements left, positive value if the last operation has completed - * successfully and there are more previously configured elements, negative - * value is current operation has failed. - */ -static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *o, - unsigned long *vlan_mac_flags, - unsigned long *ramrod_flags) -{ - struct bnx2x_vlan_mac_registry_elem *pos = NULL; - int rc = 0; - struct bnx2x_vlan_mac_ramrod_params p; - struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; - struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; - - /* Clear pending commands first */ - - spin_lock_bh(&exeq->lock); - - list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) { - if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags == - *vlan_mac_flags) - list_del(&exeq_pos->link); - } - - spin_unlock_bh(&exeq->lock); - - /* Prepare a command request */ - memset(&p, 0, sizeof(p)); - p.vlan_mac_obj = o; - p.ramrod_flags = *ramrod_flags; - p.user_req.cmd = BNX2X_VLAN_MAC_DEL; - - /* - * Add all but the last VLAN-MAC to the execution queue without actually - * execution anything. - */ - __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags); - __clear_bit(RAMROD_EXEC, &p.ramrod_flags); - __clear_bit(RAMROD_CONT, &p.ramrod_flags); - - list_for_each_entry(pos, &o->head, link) { - if (pos->vlan_mac_flags == *vlan_mac_flags) { - p.user_req.vlan_mac_flags = pos->vlan_mac_flags; - memcpy(&p.user_req.u, &pos->u, sizeof(pos->u)); - rc = bnx2x_config_vlan_mac(bp, &p); - if (rc < 0) { - BNX2X_ERR("Failed to add a new DEL command\n"); - return rc; - } - } - } - - p.ramrod_flags = *ramrod_flags; - __set_bit(RAMROD_CONT, &p.ramrod_flags); - - return bnx2x_config_vlan_mac(bp, &p); -} - -static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id, - u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state, - unsigned long *pstate, bnx2x_obj_type type) -{ - raw->func_id = func_id; - raw->cid = cid; - raw->cl_id = cl_id; - raw->rdata = rdata; - raw->rdata_mapping = rdata_mapping; - raw->state = state; - raw->pstate = pstate; - raw->obj_type = type; - raw->check_pending = bnx2x_raw_check_pending; - raw->clear_pending = bnx2x_raw_clear_pending; - raw->set_pending = bnx2x_raw_set_pending; - raw->wait_comp = bnx2x_raw_wait; -} - -static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o, - u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, - int state, unsigned long *pstate, bnx2x_obj_type type, - struct bnx2x_credit_pool_obj *macs_pool, - struct bnx2x_credit_pool_obj *vlans_pool) -{ - INIT_LIST_HEAD(&o->head); - - o->macs_pool = macs_pool; - o->vlans_pool = vlans_pool; - - o->delete_all = bnx2x_vlan_mac_del_all; - o->restore = bnx2x_vlan_mac_restore; - o->complete = bnx2x_complete_vlan_mac; - o->wait = bnx2x_wait_vlan_mac; - - bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping, - state, pstate, type); -} - - -void bnx2x_init_mac_obj(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *mac_obj, - u8 cl_id, u32 cid, u8 func_id, void *rdata, - dma_addr_t rdata_mapping, int state, - unsigned long *pstate, bnx2x_obj_type type, - struct bnx2x_credit_pool_obj *macs_pool) -{ - union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj; - - bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata, - rdata_mapping, state, pstate, type, - macs_pool, NULL); - - /* CAM credit pool handling */ - mac_obj->get_credit = bnx2x_get_credit_mac; - mac_obj->put_credit = bnx2x_put_credit_mac; - mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac; - mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac; - - if (CHIP_IS_E1x(bp)) { - mac_obj->set_one_rule = bnx2x_set_one_mac_e1x; - mac_obj->check_del = bnx2x_check_mac_del; - mac_obj->check_add = bnx2x_check_mac_add; - mac_obj->check_move = bnx2x_check_move_always_err; - mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; - - /* Exe Queue */ - bnx2x_exe_queue_init(bp, - &mac_obj->exe_queue, 1, qable_obj, - bnx2x_validate_vlan_mac, - bnx2x_optimize_vlan_mac, - bnx2x_execute_vlan_mac, - bnx2x_exeq_get_mac); - } else { - mac_obj->set_one_rule = bnx2x_set_one_mac_e2; - mac_obj->check_del = bnx2x_check_mac_del; - mac_obj->check_add = bnx2x_check_mac_add; - mac_obj->check_move = bnx2x_check_move; - mac_obj->ramrod_cmd = - RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; - - /* Exe Queue */ - bnx2x_exe_queue_init(bp, - &mac_obj->exe_queue, CLASSIFY_RULES_COUNT, - qable_obj, bnx2x_validate_vlan_mac, - bnx2x_optimize_vlan_mac, - bnx2x_execute_vlan_mac, - bnx2x_exeq_get_mac); - } -} - -void bnx2x_init_vlan_obj(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *vlan_obj, - u8 cl_id, u32 cid, u8 func_id, void *rdata, - dma_addr_t rdata_mapping, int state, - unsigned long *pstate, bnx2x_obj_type type, - struct bnx2x_credit_pool_obj *vlans_pool) -{ - union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj; - - bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata, - rdata_mapping, state, pstate, type, NULL, - vlans_pool); - - vlan_obj->get_credit = bnx2x_get_credit_vlan; - vlan_obj->put_credit = bnx2x_put_credit_vlan; - vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan; - vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan; - - if (CHIP_IS_E1x(bp)) { - BNX2X_ERR("Do not support chips others than E2 and newer\n"); - BUG(); - } else { - vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2; - vlan_obj->check_del = bnx2x_check_vlan_del; - vlan_obj->check_add = bnx2x_check_vlan_add; - vlan_obj->check_move = bnx2x_check_move; - vlan_obj->ramrod_cmd = - RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; - - /* Exe Queue */ - bnx2x_exe_queue_init(bp, - &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT, - qable_obj, bnx2x_validate_vlan_mac, - bnx2x_optimize_vlan_mac, - bnx2x_execute_vlan_mac, - bnx2x_exeq_get_vlan); - } -} - -void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *vlan_mac_obj, - u8 cl_id, u32 cid, u8 func_id, void *rdata, - dma_addr_t rdata_mapping, int state, - unsigned long *pstate, bnx2x_obj_type type, - struct bnx2x_credit_pool_obj *macs_pool, - struct bnx2x_credit_pool_obj *vlans_pool) -{ - union bnx2x_qable_obj *qable_obj = - (union bnx2x_qable_obj *)vlan_mac_obj; - - bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata, - rdata_mapping, state, pstate, type, - macs_pool, vlans_pool); - - /* CAM pool handling */ - vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac; - vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac; - /* - * CAM offset is relevant for 57710 and 57711 chips only which have a - * single CAM for both MACs and VLAN-MAC pairs. So the offset - * will be taken from MACs' pool object only. - */ - vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac; - vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac; - - if (CHIP_IS_E1(bp)) { - BNX2X_ERR("Do not support chips others than E2\n"); - BUG(); - } else if (CHIP_IS_E1H(bp)) { - vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h; - vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del; - vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add; - vlan_mac_obj->check_move = bnx2x_check_move_always_err; - vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; - - /* Exe Queue */ - bnx2x_exe_queue_init(bp, - &vlan_mac_obj->exe_queue, 1, qable_obj, - bnx2x_validate_vlan_mac, - bnx2x_optimize_vlan_mac, - bnx2x_execute_vlan_mac, - bnx2x_exeq_get_vlan_mac); - } else { - vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2; - vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del; - vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add; - vlan_mac_obj->check_move = bnx2x_check_move; - vlan_mac_obj->ramrod_cmd = - RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; - - /* Exe Queue */ - bnx2x_exe_queue_init(bp, - &vlan_mac_obj->exe_queue, - CLASSIFY_RULES_COUNT, - qable_obj, bnx2x_validate_vlan_mac, - bnx2x_optimize_vlan_mac, - bnx2x_execute_vlan_mac, - bnx2x_exeq_get_vlan_mac); - } - -} - -/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ -static inline void __storm_memset_mac_filters(struct bnx2x *bp, - struct tstorm_eth_mac_filter_config *mac_filters, - u16 pf_id) -{ - size_t size = sizeof(struct tstorm_eth_mac_filter_config); - - u32 addr = BAR_TSTRORM_INTMEM + - TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id); - - __storm_memset_struct(bp, addr, size, (u32 *)mac_filters); -} - -static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp, - struct bnx2x_rx_mode_ramrod_params *p) -{ - /* update the bp MAC filter structure */ - u32 mask = (1 << p->cl_id); - - struct tstorm_eth_mac_filter_config *mac_filters = - (struct tstorm_eth_mac_filter_config *)p->rdata; - - /* initial seeting is drop-all */ - u8 drop_all_ucast = 1, drop_all_mcast = 1; - u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; - u8 unmatched_unicast = 0; - - /* In e1x there we only take into account rx acceot flag since tx switching - * isn't enabled. */ - if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags)) - /* accept matched ucast */ - drop_all_ucast = 0; - - if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags)) - /* accept matched mcast */ - drop_all_mcast = 0; - - if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) { - /* accept all mcast */ - drop_all_ucast = 0; - accp_all_ucast = 1; - } - if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) { - /* accept all mcast */ - drop_all_mcast = 0; - accp_all_mcast = 1; - } - if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags)) - /* accept (all) bcast */ - accp_all_bcast = 1; - if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags)) - /* accept unmatched unicasts */ - unmatched_unicast = 1; - - mac_filters->ucast_drop_all = drop_all_ucast ? - mac_filters->ucast_drop_all | mask : - mac_filters->ucast_drop_all & ~mask; - - mac_filters->mcast_drop_all = drop_all_mcast ? - mac_filters->mcast_drop_all | mask : - mac_filters->mcast_drop_all & ~mask; - - mac_filters->ucast_accept_all = accp_all_ucast ? - mac_filters->ucast_accept_all | mask : - mac_filters->ucast_accept_all & ~mask; - - mac_filters->mcast_accept_all = accp_all_mcast ? - mac_filters->mcast_accept_all | mask : - mac_filters->mcast_accept_all & ~mask; - - mac_filters->bcast_accept_all = accp_all_bcast ? - mac_filters->bcast_accept_all | mask : - mac_filters->bcast_accept_all & ~mask; - - mac_filters->unmatched_unicast = unmatched_unicast ? - mac_filters->unmatched_unicast | mask : - mac_filters->unmatched_unicast & ~mask; - - DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n" - "accp_mcast 0x%x\naccp_bcast 0x%x\n", - mac_filters->ucast_drop_all, - mac_filters->mcast_drop_all, - mac_filters->ucast_accept_all, - mac_filters->mcast_accept_all, - mac_filters->bcast_accept_all); - - /* write the MAC filter structure*/ - __storm_memset_mac_filters(bp, mac_filters, p->func_id); - - /* The operation is completed */ - clear_bit(p->state, p->pstate); - smp_mb__after_clear_bit(); - - return 0; -} - -/* Setup ramrod data */ -static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid, - struct eth_classify_header *hdr, - u8 rule_cnt) -{ - hdr->echo = cid; - hdr->rule_cnt = rule_cnt; -} - -static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp, - unsigned long accept_flags, - struct eth_filter_rules_cmd *cmd, - bool clear_accept_all) -{ - u16 state; - - /* start with 'drop-all' */ - state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL | - ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; - - if (accept_flags) { - if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags)) - state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; - - if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags)) - state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; - - if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) { - state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; - state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; - } - - if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) { - state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; - state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; - } - if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags)) - state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; - - if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) { - state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; - state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; - } - if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags)) - state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN; - } - - /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */ - if (clear_accept_all) { - state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; - state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; - state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; - state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; - } - - cmd->state = cpu_to_le16(state); - -} - -static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, - struct bnx2x_rx_mode_ramrod_params *p) -{ - struct eth_filter_rules_ramrod_data *data = p->rdata; - int rc; - u8 rule_idx = 0; - - /* Reset the ramrod data buffer */ - memset(data, 0, sizeof(*data)); - - /* Setup ramrod data */ - - /* Tx (internal switching) */ - if (test_bit(RAMROD_TX, &p->ramrod_flags)) { - data->rules[rule_idx].client_id = p->cl_id; - data->rules[rule_idx].func_id = p->func_id; - - data->rules[rule_idx].cmd_general_data = - ETH_FILTER_RULES_CMD_TX_CMD; - - bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags, - &(data->rules[rule_idx++]), false); - } - - /* Rx */ - if (test_bit(RAMROD_RX, &p->ramrod_flags)) { - data->rules[rule_idx].client_id = p->cl_id; - data->rules[rule_idx].func_id = p->func_id; - - data->rules[rule_idx].cmd_general_data = - ETH_FILTER_RULES_CMD_RX_CMD; - - bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags, - &(data->rules[rule_idx++]), false); - } - - - /* - * If FCoE Queue configuration has been requested configure the Rx and - * internal switching modes for this queue in separate rules. - * - * FCoE queue shell never be set to ACCEPT_ALL packets of any sort: - * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED. - */ - if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) { - /* Tx (internal switching) */ - if (test_bit(RAMROD_TX, &p->ramrod_flags)) { - data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id); - data->rules[rule_idx].func_id = p->func_id; - - data->rules[rule_idx].cmd_general_data = - ETH_FILTER_RULES_CMD_TX_CMD; - - bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags, - &(data->rules[rule_idx++]), - true); - } - - /* Rx */ - if (test_bit(RAMROD_RX, &p->ramrod_flags)) { - data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id); - data->rules[rule_idx].func_id = p->func_id; - - data->rules[rule_idx].cmd_general_data = - ETH_FILTER_RULES_CMD_RX_CMD; - - bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags, - &(data->rules[rule_idx++]), - true); - } - } - - /* - * Set the ramrod header (most importantly - number of rules to - * configure). - */ - bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx); - - DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, " - "tx_accept_flags 0x%lx\n", - data->header.rule_cnt, p->rx_accept_flags, - p->tx_accept_flags); - - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). - */ - - /* Send a ramrod */ - rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid, - U64_HI(p->rdata_mapping), - U64_LO(p->rdata_mapping), - ETH_CONNECTION_TYPE); - if (rc) - return rc; - - /* Ramrod completion is pending */ - return 1; -} - -static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp, - struct bnx2x_rx_mode_ramrod_params *p) -{ - return bnx2x_state_wait(bp, p->state, p->pstate); -} - -static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp, - struct bnx2x_rx_mode_ramrod_params *p) -{ - /* Do nothing */ - return 0; -} - -int bnx2x_config_rx_mode(struct bnx2x *bp, - struct bnx2x_rx_mode_ramrod_params *p) -{ - int rc; - - /* Configure the new classification in the chip */ - rc = p->rx_mode_obj->config_rx_mode(bp, p); - if (rc < 0) - return rc; - - /* Wait for a ramrod completion if was requested */ - if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) { - rc = p->rx_mode_obj->wait_comp(bp, p); - if (rc) - return rc; - } - - return rc; -} - -void bnx2x_init_rx_mode_obj(struct bnx2x *bp, - struct bnx2x_rx_mode_obj *o) -{ - if (CHIP_IS_E1x(bp)) { - o->wait_comp = bnx2x_empty_rx_mode_wait; - o->config_rx_mode = bnx2x_set_rx_mode_e1x; - } else { - o->wait_comp = bnx2x_wait_rx_mode_comp_e2; - o->config_rx_mode = bnx2x_set_rx_mode_e2; - } -} - -/********************* Multicast verbs: SET, CLEAR ****************************/ -static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac) -{ - return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff; -} - -struct bnx2x_mcast_mac_elem { - struct list_head link; - u8 mac[ETH_ALEN]; - u8 pad[2]; /* For a natural alignment of the following buffer */ -}; - -struct bnx2x_pending_mcast_cmd { - struct list_head link; - int type; /* BNX2X_MCAST_CMD_X */ - union { - struct list_head macs_head; - u32 macs_num; /* Needed for DEL command */ - int next_bin; /* Needed for RESTORE flow with aprox match */ - } data; - - bool done; /* set to true, when the command has been handled, - * practically used in 57712 handling only, where one pending - * command may be handled in a few operations. As long as for - * other chips every operation handling is completed in a - * single ramrod, there is no need to utilize this field. - */ -}; - -static int bnx2x_mcast_wait(struct bnx2x *bp, - struct bnx2x_mcast_obj *o) -{ - if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) || - o->raw.wait_comp(bp, &o->raw)) - return -EBUSY; - - return 0; -} - -static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, - struct bnx2x_mcast_obj *o, - struct bnx2x_mcast_ramrod_params *p, - int cmd) -{ - int total_sz; - struct bnx2x_pending_mcast_cmd *new_cmd; - struct bnx2x_mcast_mac_elem *cur_mac = NULL; - struct bnx2x_mcast_list_elem *pos; - int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ? - p->mcast_list_len : 0); - - /* If the command is empty ("handle pending commands only"), break */ - if (!p->mcast_list_len) - return 0; - - total_sz = sizeof(*new_cmd) + - macs_list_len * sizeof(struct bnx2x_mcast_mac_elem); - - /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */ - new_cmd = kzalloc(total_sz, GFP_ATOMIC); - - if (!new_cmd) - return -ENOMEM; - - DP(BNX2X_MSG_SP, "About to enqueue a new %d command. " - "macs_list_len=%d\n", cmd, macs_list_len); - - INIT_LIST_HEAD(&new_cmd->data.macs_head); - - new_cmd->type = cmd; - new_cmd->done = false; - - switch (cmd) { - case BNX2X_MCAST_CMD_ADD: - cur_mac = (struct bnx2x_mcast_mac_elem *) - ((u8 *)new_cmd + sizeof(*new_cmd)); - - /* Push the MACs of the current command into the pendig command - * MACs list: FIFO - */ - list_for_each_entry(pos, &p->mcast_list, link) { - memcpy(cur_mac->mac, pos->mac, ETH_ALEN); - list_add_tail(&cur_mac->link, &new_cmd->data.macs_head); - cur_mac++; - } - - break; - - case BNX2X_MCAST_CMD_DEL: - new_cmd->data.macs_num = p->mcast_list_len; - break; - - case BNX2X_MCAST_CMD_RESTORE: - new_cmd->data.next_bin = 0; - break; - - default: - BNX2X_ERR("Unknown command: %d\n", cmd); - return -EINVAL; - } - - /* Push the new pending command to the tail of the pending list: FIFO */ - list_add_tail(&new_cmd->link, &o->pending_cmds_head); - - o->set_sched(o); - - return 1; -} - -/** - * bnx2x_mcast_get_next_bin - get the next set bin (index) - * - * @o: - * @last: index to start looking from (including) - * - * returns the next found (set) bin or a negative value if none is found. - */ -static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last) -{ - int i, j, inner_start = last % BIT_VEC64_ELEM_SZ; - - for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) { - if (o->registry.aprox_match.vec[i]) - for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) { - int cur_bit = j + BIT_VEC64_ELEM_SZ * i; - if (BIT_VEC64_TEST_BIT(o->registry.aprox_match. - vec, cur_bit)) { - return cur_bit; - } - } - inner_start = 0; - } - - /* None found */ - return -1; -} - -/** - * bnx2x_mcast_clear_first_bin - find the first set bin and clear it - * - * @o: - * - * returns the index of the found bin or -1 if none is found - */ -static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o) -{ - int cur_bit = bnx2x_mcast_get_next_bin(o, 0); - - if (cur_bit >= 0) - BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit); - - return cur_bit; -} - -static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o) -{ - struct bnx2x_raw_obj *raw = &o->raw; - u8 rx_tx_flag = 0; - - if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) || - (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) - rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD; - - if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) || - (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) - rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD; - - return rx_tx_flag; -} - -static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp, - struct bnx2x_mcast_obj *o, int idx, - union bnx2x_mcast_config_data *cfg_data, - int cmd) -{ - struct bnx2x_raw_obj *r = &o->raw; - struct eth_multicast_rules_ramrod_data *data = - (struct eth_multicast_rules_ramrod_data *)(r->rdata); - u8 func_id = r->func_id; - u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o); - int bin; - - if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) - rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD; - - data->rules[idx].cmd_general_data |= rx_tx_add_flag; - - /* Get a bin and update a bins' vector */ - switch (cmd) { - case BNX2X_MCAST_CMD_ADD: - bin = bnx2x_mcast_bin_from_mac(cfg_data->mac); - BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin); - break; - - case BNX2X_MCAST_CMD_DEL: - /* If there were no more bins to clear - * (bnx2x_mcast_clear_first_bin() returns -1) then we would - * clear any (0xff) bin. - * See bnx2x_mcast_validate_e2() for explanation when it may - * happen. - */ - bin = bnx2x_mcast_clear_first_bin(o); - break; - - case BNX2X_MCAST_CMD_RESTORE: - bin = cfg_data->bin; - break; - - default: - BNX2X_ERR("Unknown command: %d\n", cmd); - return; - } - - DP(BNX2X_MSG_SP, "%s bin %d\n", - ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ? - "Setting" : "Clearing"), bin); - - data->rules[idx].bin_id = (u8)bin; - data->rules[idx].func_id = func_id; - data->rules[idx].engine_id = o->engine_id; -} - -/** - * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry - * - * @bp: device handle - * @o: - * @start_bin: index in the registry to start from (including) - * @rdata_idx: index in the ramrod data to start from - * - * returns last handled bin index or -1 if all bins have been handled - */ -static inline int bnx2x_mcast_handle_restore_cmd_e2( - struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin, - int *rdata_idx) -{ - int cur_bin, cnt = *rdata_idx; - union bnx2x_mcast_config_data cfg_data = {0}; - - /* go through the registry and configure the bins from it */ - for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0; - cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) { - - cfg_data.bin = (u8)cur_bin; - o->set_one_rule(bp, o, cnt, &cfg_data, - BNX2X_MCAST_CMD_RESTORE); - - cnt++; - - DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin); - - /* Break if we reached the maximum number - * of rules. - */ - if (cnt >= o->max_cmd_len) - break; - } - - *rdata_idx = cnt; - - return cur_bin; -} - -static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp, - struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, - int *line_idx) -{ - struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n; - int cnt = *line_idx; - union bnx2x_mcast_config_data cfg_data = {0}; - - list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head, - link) { - - cfg_data.mac = &pmac_pos->mac[0]; - o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type); - - cnt++; - - DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT - " mcast MAC\n", - BNX2X_MAC_PRN_LIST(pmac_pos->mac)); - - list_del(&pmac_pos->link); - - /* Break if we reached the maximum number - * of rules. - */ - if (cnt >= o->max_cmd_len) - break; - } - - *line_idx = cnt; - - /* if no more MACs to configure - we are done */ - if (list_empty(&cmd_pos->data.macs_head)) - cmd_pos->done = true; -} - -static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp, - struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, - int *line_idx) -{ - int cnt = *line_idx; - - while (cmd_pos->data.macs_num) { - o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type); - - cnt++; - - cmd_pos->data.macs_num--; - - DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n", - cmd_pos->data.macs_num, cnt); - - /* Break if we reached the maximum - * number of rules. - */ - if (cnt >= o->max_cmd_len) - break; - } - - *line_idx = cnt; - - /* If we cleared all bins - we are done */ - if (!cmd_pos->data.macs_num) - cmd_pos->done = true; -} - -static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp, - struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, - int *line_idx) -{ - cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin, - line_idx); - - if (cmd_pos->data.next_bin < 0) - /* If o->set_restore returned -1 we are done */ - cmd_pos->done = true; - else - /* Start from the next bin next time */ - cmd_pos->data.next_bin++; -} - -static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp, - struct bnx2x_mcast_ramrod_params *p) -{ - struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n; - int cnt = 0; - struct bnx2x_mcast_obj *o = p->mcast_obj; - - list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head, - link) { - switch (cmd_pos->type) { - case BNX2X_MCAST_CMD_ADD: - bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt); - break; - - case BNX2X_MCAST_CMD_DEL: - bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt); - break; - - case BNX2X_MCAST_CMD_RESTORE: - bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos, - &cnt); - break; - - default: - BNX2X_ERR("Unknown command: %d\n", cmd_pos->type); - return -EINVAL; - } - - /* If the command has been completed - remove it from the list - * and free the memory - */ - if (cmd_pos->done) { - list_del(&cmd_pos->link); - kfree(cmd_pos); - } - - /* Break if we reached the maximum number of rules */ - if (cnt >= o->max_cmd_len) - break; - } - - return cnt; -} - -static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp, - struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, - int *line_idx) -{ - struct bnx2x_mcast_list_elem *mlist_pos; - union bnx2x_mcast_config_data cfg_data = {0}; - int cnt = *line_idx; - - list_for_each_entry(mlist_pos, &p->mcast_list, link) { - cfg_data.mac = mlist_pos->mac; - o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD); - - cnt++; - - DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT - " mcast MAC\n", - BNX2X_MAC_PRN_LIST(mlist_pos->mac)); - } - - *line_idx = cnt; -} - -static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp, - struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, - int *line_idx) -{ - int cnt = *line_idx, i; - - for (i = 0; i < p->mcast_list_len; i++) { - o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL); - - cnt++; - - DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n", - p->mcast_list_len - i - 1); - } - - *line_idx = cnt; -} - -/** - * bnx2x_mcast_handle_current_cmd - - * - * @bp: device handle - * @p: - * @cmd: - * @start_cnt: first line in the ramrod data that may be used - * - * This function is called iff there is enough place for the current command in - * the ramrod data. - * Returns number of lines filled in the ramrod data in total. - */ -static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp, - struct bnx2x_mcast_ramrod_params *p, int cmd, - int start_cnt) -{ - struct bnx2x_mcast_obj *o = p->mcast_obj; - int cnt = start_cnt; - - DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len); - - switch (cmd) { - case BNX2X_MCAST_CMD_ADD: - bnx2x_mcast_hdl_add(bp, o, p, &cnt); - break; - - case BNX2X_MCAST_CMD_DEL: - bnx2x_mcast_hdl_del(bp, o, p, &cnt); - break; - - case BNX2X_MCAST_CMD_RESTORE: - o->hdl_restore(bp, o, 0, &cnt); - break; - - default: - BNX2X_ERR("Unknown command: %d\n", cmd); - return -EINVAL; - } - - /* The current command has been handled */ - p->mcast_list_len = 0; - - return cnt; -} - -static int bnx2x_mcast_validate_e2(struct bnx2x *bp, - struct bnx2x_mcast_ramrod_params *p, - int cmd) -{ - struct bnx2x_mcast_obj *o = p->mcast_obj; - int reg_sz = o->get_registry_size(o); - - switch (cmd) { - /* DEL command deletes all currently configured MACs */ - case BNX2X_MCAST_CMD_DEL: - o->set_registry_size(o, 0); - /* Don't break */ - - /* RESTORE command will restore the entire multicast configuration */ - case BNX2X_MCAST_CMD_RESTORE: - /* Here we set the approximate amount of work to do, which in - * fact may be only less as some MACs in postponed ADD - * command(s) scheduled before this command may fall into - * the same bin and the actual number of bins set in the - * registry would be less than we estimated here. See - * bnx2x_mcast_set_one_rule_e2() for further details. - */ - p->mcast_list_len = reg_sz; - break; - - case BNX2X_MCAST_CMD_ADD: - case BNX2X_MCAST_CMD_CONT: - /* Here we assume that all new MACs will fall into new bins. - * However we will correct the real registry size after we - * handle all pending commands. - */ - o->set_registry_size(o, reg_sz + p->mcast_list_len); - break; - - default: - BNX2X_ERR("Unknown command: %d\n", cmd); - return -EINVAL; - - } - - /* Increase the total number of MACs pending to be configured */ - o->total_pending_num += p->mcast_list_len; - - return 0; -} - -static void bnx2x_mcast_revert_e2(struct bnx2x *bp, - struct bnx2x_mcast_ramrod_params *p, - int old_num_bins) -{ - struct bnx2x_mcast_obj *o = p->mcast_obj; - - o->set_registry_size(o, old_num_bins); - o->total_pending_num -= p->mcast_list_len; -} - -/** - * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values - * - * @bp: device handle - * @p: - * @len: number of rules to handle - */ -static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp, - struct bnx2x_mcast_ramrod_params *p, - u8 len) -{ - struct bnx2x_raw_obj *r = &p->mcast_obj->raw; - struct eth_multicast_rules_ramrod_data *data = - (struct eth_multicast_rules_ramrod_data *)(r->rdata); - - data->header.echo = ((r->cid & BNX2X_SWCID_MASK) | - (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT)); - data->header.rule_cnt = len; -} - -/** - * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins - * - * @bp: device handle - * @o: - * - * Recalculate the actual number of set bins in the registry using Brian - * Kernighan's algorithm: it's execution complexity is as a number of set bins. - * - * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1(). - */ -static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp, - struct bnx2x_mcast_obj *o) -{ - int i, cnt = 0; - u64 elem; - - for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) { - elem = o->registry.aprox_match.vec[i]; - for (; elem; cnt++) - elem &= elem - 1; - } - - o->set_registry_size(o, cnt); - - return 0; -} - -static int bnx2x_mcast_setup_e2(struct bnx2x *bp, - struct bnx2x_mcast_ramrod_params *p, - int cmd) -{ - struct bnx2x_raw_obj *raw = &p->mcast_obj->raw; - struct bnx2x_mcast_obj *o = p->mcast_obj; - struct eth_multicast_rules_ramrod_data *data = - (struct eth_multicast_rules_ramrod_data *)(raw->rdata); - int cnt = 0, rc; - - /* Reset the ramrod data buffer */ - memset(data, 0, sizeof(*data)); - - cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p); - - /* If there are no more pending commands - clear SCHEDULED state */ - if (list_empty(&o->pending_cmds_head)) - o->clear_sched(o); - - /* The below may be true iff there was enough room in ramrod - * data for all pending commands and for the current - * command. Otherwise the current command would have been added - * to the pending commands and p->mcast_list_len would have been - * zeroed. - */ - if (p->mcast_list_len > 0) - cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt); - - /* We've pulled out some MACs - update the total number of - * outstanding. - */ - o->total_pending_num -= cnt; - - /* send a ramrod */ - WARN_ON(o->total_pending_num < 0); - WARN_ON(cnt > o->max_cmd_len); - - bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt); - - /* Update a registry size if there are no more pending operations. - * - * We don't want to change the value of the registry size if there are - * pending operations because we want it to always be equal to the - * exact or the approximate number (see bnx2x_mcast_validate_e2()) of - * set bins after the last requested operation in order to properly - * evaluate the size of the next DEL/RESTORE operation. - * - * Note that we update the registry itself during command(s) handling - * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we - * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but - * with a limited amount of update commands (per MAC/bin) and we don't - * know in this scope what the actual state of bins configuration is - * going to be after this ramrod. - */ - if (!o->total_pending_num) - bnx2x_mcast_refresh_registry_e2(bp, o); - - /* - * If CLEAR_ONLY was requested - don't send a ramrod and clear - * RAMROD_PENDING status immediately. - */ - if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { - raw->clear_pending(raw); - return 0; - } else { - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). - */ - - /* Send a ramrod */ - rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES, - raw->cid, U64_HI(raw->rdata_mapping), - U64_LO(raw->rdata_mapping), - ETH_CONNECTION_TYPE); - if (rc) - return rc; - - /* Ramrod completion is pending */ - return 1; - } -} - -static int bnx2x_mcast_validate_e1h(struct bnx2x *bp, - struct bnx2x_mcast_ramrod_params *p, - int cmd) -{ - /* Mark, that there is a work to do */ - if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE)) - p->mcast_list_len = 1; - - return 0; -} - -static void bnx2x_mcast_revert_e1h(struct bnx2x *bp, - struct bnx2x_mcast_ramrod_params *p, - int old_num_bins) -{ - /* Do nothing */ -} - -#define BNX2X_57711_SET_MC_FILTER(filter, bit) \ -do { \ - (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \ -} while (0) - -static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp, - struct bnx2x_mcast_obj *o, - struct bnx2x_mcast_ramrod_params *p, - u32 *mc_filter) -{ - struct bnx2x_mcast_list_elem *mlist_pos; - int bit; - - list_for_each_entry(mlist_pos, &p->mcast_list, link) { - bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac); - BNX2X_57711_SET_MC_FILTER(mc_filter, bit); - - DP(BNX2X_MSG_SP, "About to configure " - BNX2X_MAC_FMT" mcast MAC, bin %d\n", - BNX2X_MAC_PRN_LIST(mlist_pos->mac), bit); - - /* bookkeeping... */ - BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, - bit); - } -} - -static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp, - struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, - u32 *mc_filter) -{ - int bit; - - for (bit = bnx2x_mcast_get_next_bin(o, 0); - bit >= 0; - bit = bnx2x_mcast_get_next_bin(o, bit + 1)) { - BNX2X_57711_SET_MC_FILTER(mc_filter, bit); - DP(BNX2X_MSG_SP, "About to set bin %d\n", bit); - } -} - -/* On 57711 we write the multicast MACs' aproximate match - * table by directly into the TSTORM's internal RAM. So we don't - * really need to handle any tricks to make it work. - */ -static int bnx2x_mcast_setup_e1h(struct bnx2x *bp, - struct bnx2x_mcast_ramrod_params *p, - int cmd) -{ - int i; - struct bnx2x_mcast_obj *o = p->mcast_obj; - struct bnx2x_raw_obj *r = &o->raw; - - /* If CLEAR_ONLY has been requested - clear the registry - * and clear a pending bit. - */ - if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { - u32 mc_filter[MC_HASH_SIZE] = {0}; - - /* Set the multicast filter bits before writing it into - * the internal memory. - */ - switch (cmd) { - case BNX2X_MCAST_CMD_ADD: - bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter); - break; - - case BNX2X_MCAST_CMD_DEL: - DP(BNX2X_MSG_SP, "Invalidating multicast " - "MACs configuration\n"); - - /* clear the registry */ - memset(o->registry.aprox_match.vec, 0, - sizeof(o->registry.aprox_match.vec)); - break; - - case BNX2X_MCAST_CMD_RESTORE: - bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter); - break; - - default: - BNX2X_ERR("Unknown command: %d\n", cmd); - return -EINVAL; - } - - /* Set the mcast filter in the internal memory */ - for (i = 0; i < MC_HASH_SIZE; i++) - REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]); - } else - /* clear the registry */ - memset(o->registry.aprox_match.vec, 0, - sizeof(o->registry.aprox_match.vec)); - - /* We are done */ - r->clear_pending(r); - - return 0; -} - -static int bnx2x_mcast_validate_e1(struct bnx2x *bp, - struct bnx2x_mcast_ramrod_params *p, - int cmd) -{ - struct bnx2x_mcast_obj *o = p->mcast_obj; - int reg_sz = o->get_registry_size(o); - - switch (cmd) { - /* DEL command deletes all currently configured MACs */ - case BNX2X_MCAST_CMD_DEL: - o->set_registry_size(o, 0); - /* Don't break */ - - /* RESTORE command will restore the entire multicast configuration */ - case BNX2X_MCAST_CMD_RESTORE: - p->mcast_list_len = reg_sz; - DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n", - cmd, p->mcast_list_len); - break; - - case BNX2X_MCAST_CMD_ADD: - case BNX2X_MCAST_CMD_CONT: - /* Multicast MACs on 57710 are configured as unicast MACs and - * there is only a limited number of CAM entries for that - * matter. - */ - if (p->mcast_list_len > o->max_cmd_len) { - BNX2X_ERR("Can't configure more than %d multicast MACs" - "on 57710\n", o->max_cmd_len); - return -EINVAL; - } - /* Every configured MAC should be cleared if DEL command is - * called. Only the last ADD command is relevant as long as - * every ADD commands overrides the previous configuration. - */ - DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len); - if (p->mcast_list_len > 0) - o->set_registry_size(o, p->mcast_list_len); - - break; - - default: - BNX2X_ERR("Unknown command: %d\n", cmd); - return -EINVAL; - - } - - /* We want to ensure that commands are executed one by one for 57710. - * Therefore each none-empty command will consume o->max_cmd_len. - */ - if (p->mcast_list_len) - o->total_pending_num += o->max_cmd_len; - - return 0; -} - -static void bnx2x_mcast_revert_e1(struct bnx2x *bp, - struct bnx2x_mcast_ramrod_params *p, - int old_num_macs) -{ - struct bnx2x_mcast_obj *o = p->mcast_obj; - - o->set_registry_size(o, old_num_macs); - - /* If current command hasn't been handled yet and we are - * here means that it's meant to be dropped and we have to - * update the number of outstandling MACs accordingly. - */ - if (p->mcast_list_len) - o->total_pending_num -= o->max_cmd_len; -} - -static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp, - struct bnx2x_mcast_obj *o, int idx, - union bnx2x_mcast_config_data *cfg_data, - int cmd) -{ - struct bnx2x_raw_obj *r = &o->raw; - struct mac_configuration_cmd *data = - (struct mac_configuration_cmd *)(r->rdata); - - /* copy mac */ - if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) { - bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr, - &data->config_table[idx].middle_mac_addr, - &data->config_table[idx].lsb_mac_addr, - cfg_data->mac); - - data->config_table[idx].vlan_id = 0; - data->config_table[idx].pf_id = r->func_id; - data->config_table[idx].clients_bit_vector = - cpu_to_le32(1 << r->cl_id); - - SET_FLAG(data->config_table[idx].flags, - MAC_CONFIGURATION_ENTRY_ACTION_TYPE, - T_ETH_MAC_COMMAND_SET); - } -} - -/** - * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd - * - * @bp: device handle - * @p: - * @len: number of rules to handle - */ -static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp, - struct bnx2x_mcast_ramrod_params *p, - u8 len) -{ - struct bnx2x_raw_obj *r = &p->mcast_obj->raw; - struct mac_configuration_cmd *data = - (struct mac_configuration_cmd *)(r->rdata); - - u8 offset = (CHIP_REV_IS_SLOW(bp) ? - BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) : - BNX2X_MAX_MULTICAST*(1 + r->func_id)); - - data->hdr.offset = offset; - data->hdr.client_id = 0xff; - data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) | - (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT)); - data->hdr.length = len; -} - -/** - * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710 - * - * @bp: device handle - * @o: - * @start_idx: index in the registry to start from - * @rdata_idx: index in the ramrod data to start from - * - * restore command for 57710 is like all other commands - always a stand alone - * command - start_idx and rdata_idx will always be 0. This function will always - * succeed. - * returns -1 to comply with 57712 variant. - */ -static inline int bnx2x_mcast_handle_restore_cmd_e1( - struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx, - int *rdata_idx) -{ - struct bnx2x_mcast_mac_elem *elem; - int i = 0; - union bnx2x_mcast_config_data cfg_data = {0}; - - /* go through the registry and configure the MACs from it. */ - list_for_each_entry(elem, &o->registry.exact_match.macs, link) { - cfg_data.mac = &elem->mac[0]; - o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE); - - i++; - - DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT - " mcast MAC\n", - BNX2X_MAC_PRN_LIST(cfg_data.mac)); - } - - *rdata_idx = i; - - return -1; -} - - -static inline int bnx2x_mcast_handle_pending_cmds_e1( - struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p) -{ - struct bnx2x_pending_mcast_cmd *cmd_pos; - struct bnx2x_mcast_mac_elem *pmac_pos; - struct bnx2x_mcast_obj *o = p->mcast_obj; - union bnx2x_mcast_config_data cfg_data = {0}; - int cnt = 0; - - - /* If nothing to be done - return */ - if (list_empty(&o->pending_cmds_head)) - return 0; - - /* Handle the first command */ - cmd_pos = list_first_entry(&o->pending_cmds_head, - struct bnx2x_pending_mcast_cmd, link); - - switch (cmd_pos->type) { - case BNX2X_MCAST_CMD_ADD: - list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) { - cfg_data.mac = &pmac_pos->mac[0]; - o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type); - - cnt++; - - DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT - " mcast MAC\n", - BNX2X_MAC_PRN_LIST(pmac_pos->mac)); - } - break; - - case BNX2X_MCAST_CMD_DEL: - cnt = cmd_pos->data.macs_num; - DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt); - break; - - case BNX2X_MCAST_CMD_RESTORE: - o->hdl_restore(bp, o, 0, &cnt); - break; - - default: - BNX2X_ERR("Unknown command: %d\n", cmd_pos->type); - return -EINVAL; - } - - list_del(&cmd_pos->link); - kfree(cmd_pos); - - return cnt; -} - -/** - * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr(). - * - * @fw_hi: - * @fw_mid: - * @fw_lo: - * @mac: - */ -static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid, - __le16 *fw_lo, u8 *mac) -{ - mac[1] = ((u8 *)fw_hi)[0]; - mac[0] = ((u8 *)fw_hi)[1]; - mac[3] = ((u8 *)fw_mid)[0]; - mac[2] = ((u8 *)fw_mid)[1]; - mac[5] = ((u8 *)fw_lo)[0]; - mac[4] = ((u8 *)fw_lo)[1]; -} - -/** - * bnx2x_mcast_refresh_registry_e1 - - * - * @bp: device handle - * @cnt: - * - * Check the ramrod data first entry flag to see if it's a DELETE or ADD command - * and update the registry correspondingly: if ADD - allocate a memory and add - * the entries to the registry (list), if DELETE - clear the registry and free - * the memory. - */ -static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp, - struct bnx2x_mcast_obj *o) -{ - struct bnx2x_raw_obj *raw = &o->raw; - struct bnx2x_mcast_mac_elem *elem; - struct mac_configuration_cmd *data = - (struct mac_configuration_cmd *)(raw->rdata); - - /* If first entry contains a SET bit - the command was ADD, - * otherwise - DEL_ALL - */ - if (GET_FLAG(data->config_table[0].flags, - MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) { - int i, len = data->hdr.length; - - /* Break if it was a RESTORE command */ - if (!list_empty(&o->registry.exact_match.macs)) - return 0; - - elem = kzalloc(sizeof(*elem)*len, GFP_ATOMIC); - if (!elem) { - BNX2X_ERR("Failed to allocate registry memory\n"); - return -ENOMEM; - } - - for (i = 0; i < len; i++, elem++) { - bnx2x_get_fw_mac_addr( - &data->config_table[i].msb_mac_addr, - &data->config_table[i].middle_mac_addr, - &data->config_table[i].lsb_mac_addr, - elem->mac); - DP(BNX2X_MSG_SP, "Adding registry entry for [" - BNX2X_MAC_FMT"]\n", - BNX2X_MAC_PRN_LIST(elem->mac)); - list_add_tail(&elem->link, - &o->registry.exact_match.macs); - } - } else { - elem = list_first_entry(&o->registry.exact_match.macs, - struct bnx2x_mcast_mac_elem, link); - DP(BNX2X_MSG_SP, "Deleting a registry\n"); - kfree(elem); - INIT_LIST_HEAD(&o->registry.exact_match.macs); - } - - return 0; -} - -static int bnx2x_mcast_setup_e1(struct bnx2x *bp, - struct bnx2x_mcast_ramrod_params *p, - int cmd) -{ - struct bnx2x_mcast_obj *o = p->mcast_obj; - struct bnx2x_raw_obj *raw = &o->raw; - struct mac_configuration_cmd *data = - (struct mac_configuration_cmd *)(raw->rdata); - int cnt = 0, i, rc; - - /* Reset the ramrod data buffer */ - memset(data, 0, sizeof(*data)); - - /* First set all entries as invalid */ - for (i = 0; i < o->max_cmd_len ; i++) - SET_FLAG(data->config_table[i].flags, - MAC_CONFIGURATION_ENTRY_ACTION_TYPE, - T_ETH_MAC_COMMAND_INVALIDATE); - - /* Handle pending commands first */ - cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p); - - /* If there are no more pending commands - clear SCHEDULED state */ - if (list_empty(&o->pending_cmds_head)) - o->clear_sched(o); - - /* The below may be true iff there were no pending commands */ - if (!cnt) - cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0); - - /* For 57710 every command has o->max_cmd_len length to ensure that - * commands are done one at a time. - */ - o->total_pending_num -= o->max_cmd_len; - - /* send a ramrod */ - - WARN_ON(cnt > o->max_cmd_len); - - /* Set ramrod header (in particular, a number of entries to update) */ - bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt); - - /* update a registry: we need the registry contents to be always up - * to date in order to be able to execute a RESTORE opcode. Here - * we use the fact that for 57710 we sent one command at a time - * hence we may take the registry update out of the command handling - * and do it in a simpler way here. - */ - rc = bnx2x_mcast_refresh_registry_e1(bp, o); - if (rc) - return rc; - - /* - * If CLEAR_ONLY was requested - don't send a ramrod and clear - * RAMROD_PENDING status immediately. - */ - if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { - raw->clear_pending(raw); - return 0; - } else { - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). - */ - - /* Send a ramrod */ - rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid, - U64_HI(raw->rdata_mapping), - U64_LO(raw->rdata_mapping), - ETH_CONNECTION_TYPE); - if (rc) - return rc; - - /* Ramrod completion is pending */ - return 1; - } - -} - -static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o) -{ - return o->registry.exact_match.num_macs_set; -} - -static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o) -{ - return o->registry.aprox_match.num_bins_set; -} - -static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o, - int n) -{ - o->registry.exact_match.num_macs_set = n; -} - -static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o, - int n) -{ - o->registry.aprox_match.num_bins_set = n; -} - -int bnx2x_config_mcast(struct bnx2x *bp, - struct bnx2x_mcast_ramrod_params *p, - int cmd) -{ - struct bnx2x_mcast_obj *o = p->mcast_obj; - struct bnx2x_raw_obj *r = &o->raw; - int rc = 0, old_reg_size; - - /* This is needed to recover number of currently configured mcast macs - * in case of failure. - */ - old_reg_size = o->get_registry_size(o); - - /* Do some calculations and checks */ - rc = o->validate(bp, p, cmd); - if (rc) - return rc; - - /* Return if there is no work to do */ - if ((!p->mcast_list_len) && (!o->check_sched(o))) - return 0; - - DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d " - "o->max_cmd_len=%d\n", o->total_pending_num, - p->mcast_list_len, o->max_cmd_len); - - /* Enqueue the current command to the pending list if we can't complete - * it in the current iteration - */ - if (r->check_pending(r) || - ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) { - rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd); - if (rc < 0) - goto error_exit1; - - /* As long as the current command is in a command list we - * don't need to handle it separately. - */ - p->mcast_list_len = 0; - } - - if (!r->check_pending(r)) { - - /* Set 'pending' state */ - r->set_pending(r); - - /* Configure the new classification in the chip */ - rc = o->config_mcast(bp, p, cmd); - if (rc < 0) - goto error_exit2; - - /* Wait for a ramrod completion if was requested */ - if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) - rc = o->wait_comp(bp, o); - } - - return rc; - -error_exit2: - r->clear_pending(r); - -error_exit1: - o->revert(bp, p, old_reg_size); - - return rc; -} - -static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o) -{ - smp_mb__before_clear_bit(); - clear_bit(o->sched_state, o->raw.pstate); - smp_mb__after_clear_bit(); -} - -static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o) -{ - smp_mb__before_clear_bit(); - set_bit(o->sched_state, o->raw.pstate); - smp_mb__after_clear_bit(); -} - -static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o) -{ - return !!test_bit(o->sched_state, o->raw.pstate); -} - -static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o) -{ - return o->raw.check_pending(&o->raw) || o->check_sched(o); -} - -void bnx2x_init_mcast_obj(struct bnx2x *bp, - struct bnx2x_mcast_obj *mcast_obj, - u8 mcast_cl_id, u32 mcast_cid, u8 func_id, - u8 engine_id, void *rdata, dma_addr_t rdata_mapping, - int state, unsigned long *pstate, bnx2x_obj_type type) -{ - memset(mcast_obj, 0, sizeof(*mcast_obj)); - - bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id, - rdata, rdata_mapping, state, pstate, type); - - mcast_obj->engine_id = engine_id; - - INIT_LIST_HEAD(&mcast_obj->pending_cmds_head); - - mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED; - mcast_obj->check_sched = bnx2x_mcast_check_sched; - mcast_obj->set_sched = bnx2x_mcast_set_sched; - mcast_obj->clear_sched = bnx2x_mcast_clear_sched; - - if (CHIP_IS_E1(bp)) { - mcast_obj->config_mcast = bnx2x_mcast_setup_e1; - mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd; - mcast_obj->hdl_restore = - bnx2x_mcast_handle_restore_cmd_e1; - mcast_obj->check_pending = bnx2x_mcast_check_pending; - - if (CHIP_REV_IS_SLOW(bp)) - mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI; - else - mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST; - - mcast_obj->wait_comp = bnx2x_mcast_wait; - mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1; - mcast_obj->validate = bnx2x_mcast_validate_e1; - mcast_obj->revert = bnx2x_mcast_revert_e1; - mcast_obj->get_registry_size = - bnx2x_mcast_get_registry_size_exact; - mcast_obj->set_registry_size = - bnx2x_mcast_set_registry_size_exact; - - /* 57710 is the only chip that uses the exact match for mcast - * at the moment. - */ - INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs); - - } else if (CHIP_IS_E1H(bp)) { - mcast_obj->config_mcast = bnx2x_mcast_setup_e1h; - mcast_obj->enqueue_cmd = NULL; - mcast_obj->hdl_restore = NULL; - mcast_obj->check_pending = bnx2x_mcast_check_pending; - - /* 57711 doesn't send a ramrod, so it has unlimited credit - * for one command. - */ - mcast_obj->max_cmd_len = -1; - mcast_obj->wait_comp = bnx2x_mcast_wait; - mcast_obj->set_one_rule = NULL; - mcast_obj->validate = bnx2x_mcast_validate_e1h; - mcast_obj->revert = bnx2x_mcast_revert_e1h; - mcast_obj->get_registry_size = - bnx2x_mcast_get_registry_size_aprox; - mcast_obj->set_registry_size = - bnx2x_mcast_set_registry_size_aprox; - } else { - mcast_obj->config_mcast = bnx2x_mcast_setup_e2; - mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd; - mcast_obj->hdl_restore = - bnx2x_mcast_handle_restore_cmd_e2; - mcast_obj->check_pending = bnx2x_mcast_check_pending; - /* TODO: There should be a proper HSI define for this number!!! - */ - mcast_obj->max_cmd_len = 16; - mcast_obj->wait_comp = bnx2x_mcast_wait; - mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2; - mcast_obj->validate = bnx2x_mcast_validate_e2; - mcast_obj->revert = bnx2x_mcast_revert_e2; - mcast_obj->get_registry_size = - bnx2x_mcast_get_registry_size_aprox; - mcast_obj->set_registry_size = - bnx2x_mcast_set_registry_size_aprox; - } -} - -/*************************** Credit handling **********************************/ - -/** - * atomic_add_ifless - add if the result is less than a given value. - * - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...if (v + a) is less than u. - * - * returns true if (v + a) was less than u, and false otherwise. - * - */ -static inline bool __atomic_add_ifless(atomic_t *v, int a, int u) -{ - int c, old; - - c = atomic_read(v); - for (;;) { - if (unlikely(c + a >= u)) - return false; - - old = atomic_cmpxchg((v), c, c + a); - if (likely(old == c)) - break; - c = old; - } - - return true; -} - -/** - * atomic_dec_ifmoe - dec if the result is more or equal than a given value. - * - * @v: pointer of type atomic_t - * @a: the amount to dec from v... - * @u: ...if (v - a) is more or equal than u. - * - * returns true if (v - a) was more or equal than u, and false - * otherwise. - */ -static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u) -{ - int c, old; - - c = atomic_read(v); - for (;;) { - if (unlikely(c - a < u)) - return false; - - old = atomic_cmpxchg((v), c, c - a); - if (likely(old == c)) - break; - c = old; - } - - return true; -} - -static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt) -{ - bool rc; - - smp_mb(); - rc = __atomic_dec_ifmoe(&o->credit, cnt, 0); - smp_mb(); - - return rc; -} - -static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt) -{ - bool rc; - - smp_mb(); - - /* Don't let to refill if credit + cnt > pool_sz */ - rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1); - - smp_mb(); - - return rc; -} - -static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o) -{ - int cur_credit; - - smp_mb(); - cur_credit = atomic_read(&o->credit); - - return cur_credit; -} - -static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o, - int cnt) -{ - return true; -} - - -static bool bnx2x_credit_pool_get_entry( - struct bnx2x_credit_pool_obj *o, - int *offset) -{ - int idx, vec, i; - - *offset = -1; - - /* Find "internal cam-offset" then add to base for this object... */ - for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) { - - /* Skip the current vector if there are no free entries in it */ - if (!o->pool_mirror[vec]) - continue; - - /* If we've got here we are going to find a free entry */ - for (idx = vec * BNX2X_POOL_VEC_SIZE, i = 0; - i < BIT_VEC64_ELEM_SZ; idx++, i++) - - if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) { - /* Got one!! */ - BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx); - *offset = o->base_pool_offset + idx; - return true; - } - } - - return false; -} - -static bool bnx2x_credit_pool_put_entry( - struct bnx2x_credit_pool_obj *o, - int offset) -{ - if (offset < o->base_pool_offset) - return false; - - offset -= o->base_pool_offset; - - if (offset >= o->pool_sz) - return false; - - /* Return the entry to the pool */ - BIT_VEC64_SET_BIT(o->pool_mirror, offset); - - return true; -} - -static bool bnx2x_credit_pool_put_entry_always_true( - struct bnx2x_credit_pool_obj *o, - int offset) -{ - return true; -} - -static bool bnx2x_credit_pool_get_entry_always_true( - struct bnx2x_credit_pool_obj *o, - int *offset) -{ - *offset = -1; - return true; -} -/** - * bnx2x_init_credit_pool - initialize credit pool internals. - * - * @p: - * @base: Base entry in the CAM to use. - * @credit: pool size. - * - * If base is negative no CAM entries handling will be performed. - * If credit is negative pool operations will always succeed (unlimited pool). - * - */ -static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p, - int base, int credit) -{ - /* Zero the object first */ - memset(p, 0, sizeof(*p)); - - /* Set the table to all 1s */ - memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror)); - - /* Init a pool as full */ - atomic_set(&p->credit, credit); - - /* The total poll size */ - p->pool_sz = credit; - - p->base_pool_offset = base; - - /* Commit the change */ - smp_mb(); - - p->check = bnx2x_credit_pool_check; - - /* if pool credit is negative - disable the checks */ - if (credit >= 0) { - p->put = bnx2x_credit_pool_put; - p->get = bnx2x_credit_pool_get; - p->put_entry = bnx2x_credit_pool_put_entry; - p->get_entry = bnx2x_credit_pool_get_entry; - } else { - p->put = bnx2x_credit_pool_always_true; - p->get = bnx2x_credit_pool_always_true; - p->put_entry = bnx2x_credit_pool_put_entry_always_true; - p->get_entry = bnx2x_credit_pool_get_entry_always_true; - } - - /* If base is negative - disable entries handling */ - if (base < 0) { - p->put_entry = bnx2x_credit_pool_put_entry_always_true; - p->get_entry = bnx2x_credit_pool_get_entry_always_true; - } -} - -void bnx2x_init_mac_credit_pool(struct bnx2x *bp, - struct bnx2x_credit_pool_obj *p, u8 func_id, - u8 func_num) -{ -/* TODO: this will be defined in consts as well... */ -#define BNX2X_CAM_SIZE_EMUL 5 - - int cam_sz; - - if (CHIP_IS_E1(bp)) { - /* In E1, Multicast is saved in cam... */ - if (!CHIP_REV_IS_SLOW(bp)) - cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST; - else - cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI; - - bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz); - - } else if (CHIP_IS_E1H(bp)) { - /* CAM credit is equaly divided between all active functions - * on the PORT!. - */ - if ((func_num > 0)) { - if (!CHIP_REV_IS_SLOW(bp)) - cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num)); - else - cam_sz = BNX2X_CAM_SIZE_EMUL; - bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz); - } else { - /* this should never happen! Block MAC operations. */ - bnx2x_init_credit_pool(p, 0, 0); - } - - } else { - - /* - * CAM credit is equaly divided between all active functions - * on the PATH. - */ - if ((func_num > 0)) { - if (!CHIP_REV_IS_SLOW(bp)) - cam_sz = (MAX_MAC_CREDIT_E2 / func_num); - else - cam_sz = BNX2X_CAM_SIZE_EMUL; - - /* - * No need for CAM entries handling for 57712 and - * newer. - */ - bnx2x_init_credit_pool(p, -1, cam_sz); - } else { - /* this should never happen! Block MAC operations. */ - bnx2x_init_credit_pool(p, 0, 0); - } - - } -} - -void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, - struct bnx2x_credit_pool_obj *p, - u8 func_id, - u8 func_num) -{ - if (CHIP_IS_E1x(bp)) { - /* - * There is no VLAN credit in HW on 57710 and 57711 only - * MAC / MAC-VLAN can be set - */ - bnx2x_init_credit_pool(p, 0, -1); - } else { - /* - * CAM credit is equaly divided between all active functions - * on the PATH. - */ - if (func_num > 0) { - int credit = MAX_VLAN_CREDIT_E2 / func_num; - bnx2x_init_credit_pool(p, func_id * credit, credit); - } else - /* this should never happen! Block VLAN operations. */ - bnx2x_init_credit_pool(p, 0, 0); - } -} - -/****************** RSS Configuration ******************/ -/** - * bnx2x_debug_print_ind_table - prints the indirection table configuration. - * - * @bp: driver hanlde - * @p: pointer to rss configuration - * - * Prints it when NETIF_MSG_IFUP debug level is configured. - */ -static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp, - struct bnx2x_config_rss_params *p) -{ - int i; - - DP(BNX2X_MSG_SP, "Setting indirection table to:\n"); - DP(BNX2X_MSG_SP, "0x0000: "); - for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { - DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]); - - /* Print 4 bytes in a line */ - if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) && - (((i + 1) & 0x3) == 0)) { - DP_CONT(BNX2X_MSG_SP, "\n"); - DP(BNX2X_MSG_SP, "0x%04x: ", i + 1); - } - } - - DP_CONT(BNX2X_MSG_SP, "\n"); -} - -/** - * bnx2x_setup_rss - configure RSS - * - * @bp: device handle - * @p: rss configuration - * - * sends on UPDATE ramrod for that matter. - */ -static int bnx2x_setup_rss(struct bnx2x *bp, - struct bnx2x_config_rss_params *p) -{ - struct bnx2x_rss_config_obj *o = p->rss_obj; - struct bnx2x_raw_obj *r = &o->raw; - struct eth_rss_update_ramrod_data *data = - (struct eth_rss_update_ramrod_data *)(r->rdata); - u8 rss_mode = 0; - int rc; - - memset(data, 0, sizeof(*data)); - - DP(BNX2X_MSG_SP, "Configuring RSS\n"); - - /* Set an echo field */ - data->echo = (r->cid & BNX2X_SWCID_MASK) | - (r->state << BNX2X_SWCID_SHIFT); - - /* RSS mode */ - if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags)) - rss_mode = ETH_RSS_MODE_DISABLED; - else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags)) - rss_mode = ETH_RSS_MODE_REGULAR; - else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags)) - rss_mode = ETH_RSS_MODE_VLAN_PRI; - else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags)) - rss_mode = ETH_RSS_MODE_E1HOV_PRI; - else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags)) - rss_mode = ETH_RSS_MODE_IP_DSCP; - - data->rss_mode = rss_mode; - - DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode); - - /* RSS capabilities */ - if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags)) - data->capabilities |= - ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY; - - if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags)) - data->capabilities |= - ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY; - - if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags)) - data->capabilities |= - ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY; - - if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags)) - data->capabilities |= - ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY; - - /* Hashing mask */ - data->rss_result_mask = p->rss_result_mask; - - /* RSS engine ID */ - data->rss_engine_id = o->engine_id; - - DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id); - - /* Indirection table */ - memcpy(data->indirection_table, p->ind_table, - T_ETH_INDIRECTION_TABLE_SIZE); - - /* Remember the last configuration */ - memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); - - /* Print the indirection table */ - if (netif_msg_ifup(bp)) - bnx2x_debug_print_ind_table(bp, p); - - /* RSS keys */ - if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) { - memcpy(&data->rss_key[0], &p->rss_key[0], - sizeof(data->rss_key)); - data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; - } - - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). - */ - - /* Send a ramrod */ - rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid, - U64_HI(r->rdata_mapping), - U64_LO(r->rdata_mapping), - ETH_CONNECTION_TYPE); - - if (rc < 0) - return rc; - - return 1; -} - -void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, - u8 *ind_table) -{ - memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table)); -} - -int bnx2x_config_rss(struct bnx2x *bp, - struct bnx2x_config_rss_params *p) -{ - int rc; - struct bnx2x_rss_config_obj *o = p->rss_obj; - struct bnx2x_raw_obj *r = &o->raw; - - /* Do nothing if only driver cleanup was requested */ - if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) - return 0; - - r->set_pending(r); - - rc = o->config_rss(bp, p); - if (rc < 0) { - r->clear_pending(r); - return rc; - } - - if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) - rc = r->wait_comp(bp, r); - - return rc; -} - - -void bnx2x_init_rss_config_obj(struct bnx2x *bp, - struct bnx2x_rss_config_obj *rss_obj, - u8 cl_id, u32 cid, u8 func_id, u8 engine_id, - void *rdata, dma_addr_t rdata_mapping, - int state, unsigned long *pstate, - bnx2x_obj_type type) -{ - bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata, - rdata_mapping, state, pstate, type); - - rss_obj->engine_id = engine_id; - rss_obj->config_rss = bnx2x_setup_rss; -} - -/********************** Queue state object ***********************************/ - -/** - * bnx2x_queue_state_change - perform Queue state change transition - * - * @bp: device handle - * @params: parameters to perform the transition - * - * returns 0 in case of successfully completed transition, negative error - * code in case of failure, positive (EBUSY) value if there is a completion - * to that is still pending (possible only if RAMROD_COMP_WAIT is - * not set in params->ramrod_flags for asynchronous commands). - * - */ -int bnx2x_queue_state_change(struct bnx2x *bp, - struct bnx2x_queue_state_params *params) -{ - struct bnx2x_queue_sp_obj *o = params->q_obj; - int rc, pending_bit; - unsigned long *pending = &o->pending; - - /* Check that the requested transition is legal */ - if (o->check_transition(bp, o, params)) - return -EINVAL; - - /* Set "pending" bit */ - pending_bit = o->set_pending(o, params); - - /* Don't send a command if only driver cleanup was requested */ - if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) - o->complete_cmd(bp, o, pending_bit); - else { - /* Send a ramrod */ - rc = o->send_cmd(bp, params); - if (rc) { - o->next_state = BNX2X_Q_STATE_MAX; - clear_bit(pending_bit, pending); - smp_mb__after_clear_bit(); - return rc; - } - - if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { - rc = o->wait_comp(bp, o, pending_bit); - if (rc) - return rc; - - return 0; - } - } - - return !!test_bit(pending_bit, pending); -} - - -static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj, - struct bnx2x_queue_state_params *params) -{ - enum bnx2x_queue_cmd cmd = params->cmd, bit; - - /* ACTIVATE and DEACTIVATE commands are implemented on top of - * UPDATE command. - */ - if ((cmd == BNX2X_Q_CMD_ACTIVATE) || - (cmd == BNX2X_Q_CMD_DEACTIVATE)) - bit = BNX2X_Q_CMD_UPDATE; - else - bit = cmd; - - set_bit(bit, &obj->pending); - return bit; -} - -static int bnx2x_queue_wait_comp(struct bnx2x *bp, - struct bnx2x_queue_sp_obj *o, - enum bnx2x_queue_cmd cmd) -{ - return bnx2x_state_wait(bp, cmd, &o->pending); -} - -/** - * bnx2x_queue_comp_cmd - complete the state change command. - * - * @bp: device handle - * @o: - * @cmd: - * - * Checks that the arrived completion is expected. - */ -static int bnx2x_queue_comp_cmd(struct bnx2x *bp, - struct bnx2x_queue_sp_obj *o, - enum bnx2x_queue_cmd cmd) -{ - unsigned long cur_pending = o->pending; - - if (!test_and_clear_bit(cmd, &cur_pending)) { - BNX2X_ERR("Bad MC reply %d for queue %d in state %d " - "pending 0x%lx, next_state %d\n", cmd, - o->cids[BNX2X_PRIMARY_CID_INDEX], - o->state, cur_pending, o->next_state); - return -EINVAL; - } - - if (o->next_tx_only >= o->max_cos) - /* >= becuase tx only must always be smaller than cos since the - * primary connection suports COS 0 - */ - BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d", - o->next_tx_only, o->max_cos); - - DP(BNX2X_MSG_SP, "Completing command %d for queue %d, " - "setting state to %d\n", cmd, - o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state); - - if (o->next_tx_only) /* print num tx-only if any exist */ - DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d", - o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only); - - o->state = o->next_state; - o->num_tx_only = o->next_tx_only; - o->next_state = BNX2X_Q_STATE_MAX; - - /* It's important that o->state and o->next_state are - * updated before o->pending. - */ - wmb(); - - clear_bit(cmd, &o->pending); - smp_mb__after_clear_bit(); - - return 0; -} - -static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp, - struct bnx2x_queue_state_params *cmd_params, - struct client_init_ramrod_data *data) -{ - struct bnx2x_queue_setup_params *params = &cmd_params->params.setup; - - /* Rx data */ - - /* IPv6 TPA supported for E2 and above only */ - data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, ¶ms->flags) * - CLIENT_INIT_RX_DATA_TPA_EN_IPV6; -} - -static void bnx2x_q_fill_init_general_data(struct bnx2x *bp, - struct bnx2x_queue_sp_obj *o, - struct bnx2x_general_setup_params *params, - struct client_init_general_data *gen_data, - unsigned long *flags) -{ - gen_data->client_id = o->cl_id; - - if (test_bit(BNX2X_Q_FLG_STATS, flags)) { - gen_data->statistics_counter_id = - params->stat_id; - gen_data->statistics_en_flg = 1; - gen_data->statistics_zero_flg = - test_bit(BNX2X_Q_FLG_ZERO_STATS, flags); - } else - gen_data->statistics_counter_id = - DISABLE_STATISTIC_COUNTER_ID_VALUE; - - gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags); - gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags); - gen_data->sp_client_id = params->spcl_id; - gen_data->mtu = cpu_to_le16(params->mtu); - gen_data->func_id = o->func_id; - - - gen_data->cos = params->cos; - - gen_data->traffic_type = - test_bit(BNX2X_Q_FLG_FCOE, flags) ? - LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW; - - DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d", - gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg); -} - -static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o, - struct bnx2x_txq_setup_params *params, - struct client_init_tx_data *tx_data, - unsigned long *flags) -{ - tx_data->enforce_security_flg = - test_bit(BNX2X_Q_FLG_TX_SEC, flags); - tx_data->default_vlan = - cpu_to_le16(params->default_vlan); - tx_data->default_vlan_flg = - test_bit(BNX2X_Q_FLG_DEF_VLAN, flags); - tx_data->tx_switching_flg = - test_bit(BNX2X_Q_FLG_TX_SWITCH, flags); - tx_data->anti_spoofing_flg = - test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags); - tx_data->tx_status_block_id = params->fw_sb_id; - tx_data->tx_sb_index_number = params->sb_cq_index; - tx_data->tss_leading_client_id = params->tss_leading_cl_id; - - tx_data->tx_bd_page_base.lo = - cpu_to_le32(U64_LO(params->dscr_map)); - tx_data->tx_bd_page_base.hi = - cpu_to_le32(U64_HI(params->dscr_map)); - - /* Don't configure any Tx switching mode during queue SETUP */ - tx_data->state = 0; -} - -static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o, - struct rxq_pause_params *params, - struct client_init_rx_data *rx_data) -{ - /* flow control data */ - rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo); - rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi); - rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo); - rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi); - rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo); - rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi); - rx_data->rx_cos_mask = cpu_to_le16(params->pri_map); -} - -static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o, - struct bnx2x_rxq_setup_params *params, - struct client_init_rx_data *rx_data, - unsigned long *flags) -{ - /* Rx data */ - rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) * - CLIENT_INIT_RX_DATA_TPA_EN_IPV4; - rx_data->vmqueue_mode_en_flg = 0; - - rx_data->cache_line_alignment_log_size = - params->cache_line_log; - rx_data->enable_dynamic_hc = - test_bit(BNX2X_Q_FLG_DHC, flags); - rx_data->max_sges_for_packet = params->max_sges_pkt; - rx_data->client_qzone_id = params->cl_qzone_id; - rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz); - - /* Always start in DROP_ALL mode */ - rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL | - CLIENT_INIT_RX_DATA_MCAST_DROP_ALL); - - /* We don't set drop flags */ - rx_data->drop_ip_cs_err_flg = 0; - rx_data->drop_tcp_cs_err_flg = 0; - rx_data->drop_ttl0_flg = 0; - rx_data->drop_udp_cs_err_flg = 0; - rx_data->inner_vlan_removal_enable_flg = - test_bit(BNX2X_Q_FLG_VLAN, flags); - rx_data->outer_vlan_removal_enable_flg = - test_bit(BNX2X_Q_FLG_OV, flags); - rx_data->status_block_id = params->fw_sb_id; - rx_data->rx_sb_index_number = params->sb_cq_index; - rx_data->max_tpa_queues = params->max_tpa_queues; - rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz); - rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz); - rx_data->bd_page_base.lo = - cpu_to_le32(U64_LO(params->dscr_map)); - rx_data->bd_page_base.hi = - cpu_to_le32(U64_HI(params->dscr_map)); - rx_data->sge_page_base.lo = - cpu_to_le32(U64_LO(params->sge_map)); - rx_data->sge_page_base.hi = - cpu_to_le32(U64_HI(params->sge_map)); - rx_data->cqe_page_base.lo = - cpu_to_le32(U64_LO(params->rcq_map)); - rx_data->cqe_page_base.hi = - cpu_to_le32(U64_HI(params->rcq_map)); - rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags); - - if (test_bit(BNX2X_Q_FLG_MCAST, flags)) { - rx_data->approx_mcast_engine_id = o->func_id; - rx_data->is_approx_mcast = 1; - } - - rx_data->rss_engine_id = params->rss_engine_id; - - /* silent vlan removal */ - rx_data->silent_vlan_removal_flg = - test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags); - rx_data->silent_vlan_value = - cpu_to_le16(params->silent_removal_value); - rx_data->silent_vlan_mask = - cpu_to_le16(params->silent_removal_mask); - -} - -/* initialize the general, tx and rx parts of a queue object */ -static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp, - struct bnx2x_queue_state_params *cmd_params, - struct client_init_ramrod_data *data) -{ - bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj, - &cmd_params->params.setup.gen_params, - &data->general, - &cmd_params->params.setup.flags); - - bnx2x_q_fill_init_tx_data(cmd_params->q_obj, - &cmd_params->params.setup.txq_params, - &data->tx, - &cmd_params->params.setup.flags); - - bnx2x_q_fill_init_rx_data(cmd_params->q_obj, - &cmd_params->params.setup.rxq_params, - &data->rx, - &cmd_params->params.setup.flags); - - bnx2x_q_fill_init_pause_data(cmd_params->q_obj, - &cmd_params->params.setup.pause_params, - &data->rx); -} - -/* initialize the general and tx parts of a tx-only queue object */ -static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp, - struct bnx2x_queue_state_params *cmd_params, - struct tx_queue_init_ramrod_data *data) -{ - bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj, - &cmd_params->params.tx_only.gen_params, - &data->general, - &cmd_params->params.tx_only.flags); - - bnx2x_q_fill_init_tx_data(cmd_params->q_obj, - &cmd_params->params.tx_only.txq_params, - &data->tx, - &cmd_params->params.tx_only.flags); - - DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",cmd_params->q_obj->cids[0], - data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi); -} - -/** - * bnx2x_q_init - init HW/FW queue - * - * @bp: device handle - * @params: - * - * HW/FW initial Queue configuration: - * - HC: Rx and Tx - * - CDU context validation - * - */ -static inline int bnx2x_q_init(struct bnx2x *bp, - struct bnx2x_queue_state_params *params) -{ - struct bnx2x_queue_sp_obj *o = params->q_obj; - struct bnx2x_queue_init_params *init = ¶ms->params.init; - u16 hc_usec; - u8 cos; - - /* Tx HC configuration */ - if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) && - test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) { - hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0; - - bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id, - init->tx.sb_cq_index, - !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags), - hc_usec); - } - - /* Rx HC configuration */ - if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) && - test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) { - hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0; - - bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id, - init->rx.sb_cq_index, - !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags), - hc_usec); - } - - /* Set CDU context validation values */ - for (cos = 0; cos < o->max_cos; cos++) { - DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d", - o->cids[cos], cos); - DP(BNX2X_MSG_SP, "context pointer %p", init->cxts[cos]); - bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]); - } - - /* As no ramrod is sent, complete the command immediately */ - o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT); - - mmiowb(); - smp_mb(); - - return 0; -} - -static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp, - struct bnx2x_queue_state_params *params) -{ - struct bnx2x_queue_sp_obj *o = params->q_obj; - struct client_init_ramrod_data *rdata = - (struct client_init_ramrod_data *)o->rdata; - dma_addr_t data_mapping = o->rdata_mapping; - int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; - - /* Clear the ramrod data */ - memset(rdata, 0, sizeof(*rdata)); - - /* Fill the ramrod data */ - bnx2x_q_fill_setup_data_cmn(bp, params, rdata); - - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). - */ - - return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], - U64_HI(data_mapping), - U64_LO(data_mapping), ETH_CONNECTION_TYPE); -} - -static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp, - struct bnx2x_queue_state_params *params) -{ - struct bnx2x_queue_sp_obj *o = params->q_obj; - struct client_init_ramrod_data *rdata = - (struct client_init_ramrod_data *)o->rdata; - dma_addr_t data_mapping = o->rdata_mapping; - int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; - - /* Clear the ramrod data */ - memset(rdata, 0, sizeof(*rdata)); - - /* Fill the ramrod data */ - bnx2x_q_fill_setup_data_cmn(bp, params, rdata); - bnx2x_q_fill_setup_data_e2(bp, params, rdata); - - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). - */ - - return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], - U64_HI(data_mapping), - U64_LO(data_mapping), ETH_CONNECTION_TYPE); -} - -static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp, - struct bnx2x_queue_state_params *params) -{ - struct bnx2x_queue_sp_obj *o = params->q_obj; - struct tx_queue_init_ramrod_data *rdata = - (struct tx_queue_init_ramrod_data *)o->rdata; - dma_addr_t data_mapping = o->rdata_mapping; - int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP; - struct bnx2x_queue_setup_tx_only_params *tx_only_params = - ¶ms->params.tx_only; - u8 cid_index = tx_only_params->cid_index; - - - if (cid_index >= o->max_cos) { - BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", - o->cl_id, cid_index); - return -EINVAL; - } - - DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d", - tx_only_params->gen_params.cos, - tx_only_params->gen_params.spcl_id); - - /* Clear the ramrod data */ - memset(rdata, 0, sizeof(*rdata)); - - /* Fill the ramrod data */ - bnx2x_q_fill_setup_tx_only(bp, params, rdata); - - DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d," - "sp-client id %d, cos %d", - o->cids[cid_index], - rdata->general.client_id, - rdata->general.sp_client_id, rdata->general.cos); - - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). - */ - - return bnx2x_sp_post(bp, ramrod, o->cids[cid_index], - U64_HI(data_mapping), - U64_LO(data_mapping), ETH_CONNECTION_TYPE); -} - -static void bnx2x_q_fill_update_data(struct bnx2x *bp, - struct bnx2x_queue_sp_obj *obj, - struct bnx2x_queue_update_params *params, - struct client_update_ramrod_data *data) -{ - /* Client ID of the client to update */ - data->client_id = obj->cl_id; - - /* Function ID of the client to update */ - data->func_id = obj->func_id; - - /* Default VLAN value */ - data->default_vlan = cpu_to_le16(params->def_vlan); - - /* Inner VLAN stripping */ - data->inner_vlan_removal_enable_flg = - test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, ¶ms->update_flags); - data->inner_vlan_removal_change_flg = - test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG, - ¶ms->update_flags); - - /* Outer VLAN sripping */ - data->outer_vlan_removal_enable_flg = - test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags); - data->outer_vlan_removal_change_flg = - test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG, - ¶ms->update_flags); - - /* Drop packets that have source MAC that doesn't belong to this - * Queue. - */ - data->anti_spoofing_enable_flg = - test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, ¶ms->update_flags); - data->anti_spoofing_change_flg = - test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, ¶ms->update_flags); - - /* Activate/Deactivate */ - data->activate_flg = - test_bit(BNX2X_Q_UPDATE_ACTIVATE, ¶ms->update_flags); - data->activate_change_flg = - test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, ¶ms->update_flags); - - /* Enable default VLAN */ - data->default_vlan_enable_flg = - test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, ¶ms->update_flags); - data->default_vlan_change_flg = - test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, - ¶ms->update_flags); - - /* silent vlan removal */ - data->silent_vlan_change_flg = - test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, - ¶ms->update_flags); - data->silent_vlan_removal_flg = - test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, ¶ms->update_flags); - data->silent_vlan_value = cpu_to_le16(params->silent_removal_value); - data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask); -} - -static inline int bnx2x_q_send_update(struct bnx2x *bp, - struct bnx2x_queue_state_params *params) -{ - struct bnx2x_queue_sp_obj *o = params->q_obj; - struct client_update_ramrod_data *rdata = - (struct client_update_ramrod_data *)o->rdata; - dma_addr_t data_mapping = o->rdata_mapping; - struct bnx2x_queue_update_params *update_params = - ¶ms->params.update; - u8 cid_index = update_params->cid_index; - - if (cid_index >= o->max_cos) { - BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", - o->cl_id, cid_index); - return -EINVAL; - } - - - /* Clear the ramrod data */ - memset(rdata, 0, sizeof(*rdata)); - - /* Fill the ramrod data */ - bnx2x_q_fill_update_data(bp, o, update_params, rdata); - - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). - */ - - return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, - o->cids[cid_index], U64_HI(data_mapping), - U64_LO(data_mapping), ETH_CONNECTION_TYPE); -} - -/** - * bnx2x_q_send_deactivate - send DEACTIVATE command - * - * @bp: device handle - * @params: - * - * implemented using the UPDATE command. - */ -static inline int bnx2x_q_send_deactivate(struct bnx2x *bp, - struct bnx2x_queue_state_params *params) -{ - struct bnx2x_queue_update_params *update = ¶ms->params.update; - - memset(update, 0, sizeof(*update)); - - __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); - - return bnx2x_q_send_update(bp, params); -} - -/** - * bnx2x_q_send_activate - send ACTIVATE command - * - * @bp: device handle - * @params: - * - * implemented using the UPDATE command. - */ -static inline int bnx2x_q_send_activate(struct bnx2x *bp, - struct bnx2x_queue_state_params *params) -{ - struct bnx2x_queue_update_params *update = ¶ms->params.update; - - memset(update, 0, sizeof(*update)); - - __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags); - __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); - - return bnx2x_q_send_update(bp, params); -} - -static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp, - struct bnx2x_queue_state_params *params) -{ - /* TODO: Not implemented yet. */ - return -1; -} - -static inline int bnx2x_q_send_halt(struct bnx2x *bp, - struct bnx2x_queue_state_params *params) -{ - struct bnx2x_queue_sp_obj *o = params->q_obj; - - return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, - o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id, - ETH_CONNECTION_TYPE); -} - -static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp, - struct bnx2x_queue_state_params *params) -{ - struct bnx2x_queue_sp_obj *o = params->q_obj; - u8 cid_idx = params->params.cfc_del.cid_index; - - if (cid_idx >= o->max_cos) { - BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", - o->cl_id, cid_idx); - return -EINVAL; - } - - return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, - o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE); -} - -static inline int bnx2x_q_send_terminate(struct bnx2x *bp, - struct bnx2x_queue_state_params *params) -{ - struct bnx2x_queue_sp_obj *o = params->q_obj; - u8 cid_index = params->params.terminate.cid_index; - - if (cid_index >= o->max_cos) { - BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", - o->cl_id, cid_index); - return -EINVAL; - } - - return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, - o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE); -} - -static inline int bnx2x_q_send_empty(struct bnx2x *bp, - struct bnx2x_queue_state_params *params) -{ - struct bnx2x_queue_sp_obj *o = params->q_obj; - - return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY, - o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0, - ETH_CONNECTION_TYPE); -} - -static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp, - struct bnx2x_queue_state_params *params) -{ - switch (params->cmd) { - case BNX2X_Q_CMD_INIT: - return bnx2x_q_init(bp, params); - case BNX2X_Q_CMD_SETUP_TX_ONLY: - return bnx2x_q_send_setup_tx_only(bp, params); - case BNX2X_Q_CMD_DEACTIVATE: - return bnx2x_q_send_deactivate(bp, params); - case BNX2X_Q_CMD_ACTIVATE: - return bnx2x_q_send_activate(bp, params); - case BNX2X_Q_CMD_UPDATE: - return bnx2x_q_send_update(bp, params); - case BNX2X_Q_CMD_UPDATE_TPA: - return bnx2x_q_send_update_tpa(bp, params); - case BNX2X_Q_CMD_HALT: - return bnx2x_q_send_halt(bp, params); - case BNX2X_Q_CMD_CFC_DEL: - return bnx2x_q_send_cfc_del(bp, params); - case BNX2X_Q_CMD_TERMINATE: - return bnx2x_q_send_terminate(bp, params); - case BNX2X_Q_CMD_EMPTY: - return bnx2x_q_send_empty(bp, params); - default: - BNX2X_ERR("Unknown command: %d\n", params->cmd); - return -EINVAL; - } -} - -static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp, - struct bnx2x_queue_state_params *params) -{ - switch (params->cmd) { - case BNX2X_Q_CMD_SETUP: - return bnx2x_q_send_setup_e1x(bp, params); - case BNX2X_Q_CMD_INIT: - case BNX2X_Q_CMD_SETUP_TX_ONLY: - case BNX2X_Q_CMD_DEACTIVATE: - case BNX2X_Q_CMD_ACTIVATE: - case BNX2X_Q_CMD_UPDATE: - case BNX2X_Q_CMD_UPDATE_TPA: - case BNX2X_Q_CMD_HALT: - case BNX2X_Q_CMD_CFC_DEL: - case BNX2X_Q_CMD_TERMINATE: - case BNX2X_Q_CMD_EMPTY: - return bnx2x_queue_send_cmd_cmn(bp, params); - default: - BNX2X_ERR("Unknown command: %d\n", params->cmd); - return -EINVAL; - } -} - -static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp, - struct bnx2x_queue_state_params *params) -{ - switch (params->cmd) { - case BNX2X_Q_CMD_SETUP: - return bnx2x_q_send_setup_e2(bp, params); - case BNX2X_Q_CMD_INIT: - case BNX2X_Q_CMD_SETUP_TX_ONLY: - case BNX2X_Q_CMD_DEACTIVATE: - case BNX2X_Q_CMD_ACTIVATE: - case BNX2X_Q_CMD_UPDATE: - case BNX2X_Q_CMD_UPDATE_TPA: - case BNX2X_Q_CMD_HALT: - case BNX2X_Q_CMD_CFC_DEL: - case BNX2X_Q_CMD_TERMINATE: - case BNX2X_Q_CMD_EMPTY: - return bnx2x_queue_send_cmd_cmn(bp, params); - default: - BNX2X_ERR("Unknown command: %d\n", params->cmd); - return -EINVAL; - } -} - -/** - * bnx2x_queue_chk_transition - check state machine of a regular Queue - * - * @bp: device handle - * @o: - * @params: - * - * (not Forwarding) - * It both checks if the requested command is legal in a current - * state and, if it's legal, sets a `next_state' in the object - * that will be used in the completion flow to set the `state' - * of the object. - * - * returns 0 if a requested command is a legal transition, - * -EINVAL otherwise. - */ -static int bnx2x_queue_chk_transition(struct bnx2x *bp, - struct bnx2x_queue_sp_obj *o, - struct bnx2x_queue_state_params *params) -{ - enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX; - enum bnx2x_queue_cmd cmd = params->cmd; - struct bnx2x_queue_update_params *update_params = - ¶ms->params.update; - u8 next_tx_only = o->num_tx_only; - - /* - * Forget all pending for completion commands if a driver only state - * transition has been requested. - */ - if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { - o->pending = 0; - o->next_state = BNX2X_Q_STATE_MAX; - } - - /* - * Don't allow a next state transition if we are in the middle of - * the previous one. - */ - if (o->pending) - return -EBUSY; - - switch (state) { - case BNX2X_Q_STATE_RESET: - if (cmd == BNX2X_Q_CMD_INIT) - next_state = BNX2X_Q_STATE_INITIALIZED; - - break; - case BNX2X_Q_STATE_INITIALIZED: - if (cmd == BNX2X_Q_CMD_SETUP) { - if (test_bit(BNX2X_Q_FLG_ACTIVE, - ¶ms->params.setup.flags)) - next_state = BNX2X_Q_STATE_ACTIVE; - else - next_state = BNX2X_Q_STATE_INACTIVE; - } - - break; - case BNX2X_Q_STATE_ACTIVE: - if (cmd == BNX2X_Q_CMD_DEACTIVATE) - next_state = BNX2X_Q_STATE_INACTIVE; - - else if ((cmd == BNX2X_Q_CMD_EMPTY) || - (cmd == BNX2X_Q_CMD_UPDATE_TPA)) - next_state = BNX2X_Q_STATE_ACTIVE; - - else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) { - next_state = BNX2X_Q_STATE_MULTI_COS; - next_tx_only = 1; - } - - else if (cmd == BNX2X_Q_CMD_HALT) - next_state = BNX2X_Q_STATE_STOPPED; - - else if (cmd == BNX2X_Q_CMD_UPDATE) { - /* If "active" state change is requested, update the - * state accordingly. - */ - if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, - &update_params->update_flags) && - !test_bit(BNX2X_Q_UPDATE_ACTIVATE, - &update_params->update_flags)) - next_state = BNX2X_Q_STATE_INACTIVE; - else - next_state = BNX2X_Q_STATE_ACTIVE; - } - - break; - case BNX2X_Q_STATE_MULTI_COS: - if (cmd == BNX2X_Q_CMD_TERMINATE) - next_state = BNX2X_Q_STATE_MCOS_TERMINATED; - - else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) { - next_state = BNX2X_Q_STATE_MULTI_COS; - next_tx_only = o->num_tx_only + 1; - } - - else if ((cmd == BNX2X_Q_CMD_EMPTY) || - (cmd == BNX2X_Q_CMD_UPDATE_TPA)) - next_state = BNX2X_Q_STATE_MULTI_COS; - - else if (cmd == BNX2X_Q_CMD_UPDATE) { - /* If "active" state change is requested, update the - * state accordingly. - */ - if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, - &update_params->update_flags) && - !test_bit(BNX2X_Q_UPDATE_ACTIVATE, - &update_params->update_flags)) - next_state = BNX2X_Q_STATE_INACTIVE; - else - next_state = BNX2X_Q_STATE_MULTI_COS; - } - - break; - case BNX2X_Q_STATE_MCOS_TERMINATED: - if (cmd == BNX2X_Q_CMD_CFC_DEL) { - next_tx_only = o->num_tx_only - 1; - if (next_tx_only == 0) - next_state = BNX2X_Q_STATE_ACTIVE; - else - next_state = BNX2X_Q_STATE_MULTI_COS; - } - - break; - case BNX2X_Q_STATE_INACTIVE: - if (cmd == BNX2X_Q_CMD_ACTIVATE) - next_state = BNX2X_Q_STATE_ACTIVE; - - else if ((cmd == BNX2X_Q_CMD_EMPTY) || - (cmd == BNX2X_Q_CMD_UPDATE_TPA)) - next_state = BNX2X_Q_STATE_INACTIVE; - - else if (cmd == BNX2X_Q_CMD_HALT) - next_state = BNX2X_Q_STATE_STOPPED; - - else if (cmd == BNX2X_Q_CMD_UPDATE) { - /* If "active" state change is requested, update the - * state accordingly. - */ - if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, - &update_params->update_flags) && - test_bit(BNX2X_Q_UPDATE_ACTIVATE, - &update_params->update_flags)){ - if (o->num_tx_only == 0) - next_state = BNX2X_Q_STATE_ACTIVE; - else /* tx only queues exist for this queue */ - next_state = BNX2X_Q_STATE_MULTI_COS; - } else - next_state = BNX2X_Q_STATE_INACTIVE; - } - - break; - case BNX2X_Q_STATE_STOPPED: - if (cmd == BNX2X_Q_CMD_TERMINATE) - next_state = BNX2X_Q_STATE_TERMINATED; - - break; - case BNX2X_Q_STATE_TERMINATED: - if (cmd == BNX2X_Q_CMD_CFC_DEL) - next_state = BNX2X_Q_STATE_RESET; - - break; - default: - BNX2X_ERR("Illegal state: %d\n", state); - } - - /* Transition is assured */ - if (next_state != BNX2X_Q_STATE_MAX) { - DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n", - state, cmd, next_state); - o->next_state = next_state; - o->next_tx_only = next_tx_only; - return 0; - } - - DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd); - - return -EINVAL; -} - -void bnx2x_init_queue_obj(struct bnx2x *bp, - struct bnx2x_queue_sp_obj *obj, - u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id, - void *rdata, - dma_addr_t rdata_mapping, unsigned long type) -{ - memset(obj, 0, sizeof(*obj)); - - /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */ - BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt); - - memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt); - obj->max_cos = cid_cnt; - obj->cl_id = cl_id; - obj->func_id = func_id; - obj->rdata = rdata; - obj->rdata_mapping = rdata_mapping; - obj->type = type; - obj->next_state = BNX2X_Q_STATE_MAX; - - if (CHIP_IS_E1x(bp)) - obj->send_cmd = bnx2x_queue_send_cmd_e1x; - else - obj->send_cmd = bnx2x_queue_send_cmd_e2; - - obj->check_transition = bnx2x_queue_chk_transition; - - obj->complete_cmd = bnx2x_queue_comp_cmd; - obj->wait_comp = bnx2x_queue_wait_comp; - obj->set_pending = bnx2x_queue_set_pending; -} - -void bnx2x_queue_set_cos_cid(struct bnx2x *bp, - struct bnx2x_queue_sp_obj *obj, - u32 cid, u8 index) -{ - obj->cids[index] = cid; -} - -/********************** Function state object *********************************/ -enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp, - struct bnx2x_func_sp_obj *o) -{ - /* in the middle of transaction - return INVALID state */ - if (o->pending) - return BNX2X_F_STATE_MAX; - - /* - * unsure the order of reading of o->pending and o->state - * o->pending should be read first - */ - rmb(); - - return o->state; -} - -static int bnx2x_func_wait_comp(struct bnx2x *bp, - struct bnx2x_func_sp_obj *o, - enum bnx2x_func_cmd cmd) -{ - return bnx2x_state_wait(bp, cmd, &o->pending); -} - -/** - * bnx2x_func_state_change_comp - complete the state machine transition - * - * @bp: device handle - * @o: - * @cmd: - * - * Called on state change transition. Completes the state - * machine transition only - no HW interaction. - */ -static inline int bnx2x_func_state_change_comp(struct bnx2x *bp, - struct bnx2x_func_sp_obj *o, - enum bnx2x_func_cmd cmd) -{ - unsigned long cur_pending = o->pending; - - if (!test_and_clear_bit(cmd, &cur_pending)) { - BNX2X_ERR("Bad MC reply %d for func %d in state %d " - "pending 0x%lx, next_state %d\n", cmd, BP_FUNC(bp), - o->state, cur_pending, o->next_state); - return -EINVAL; - } - - DP(BNX2X_MSG_SP, "Completing command %d for func %d, setting state to " - "%d\n", cmd, BP_FUNC(bp), o->next_state); - - o->state = o->next_state; - o->next_state = BNX2X_F_STATE_MAX; - - /* It's important that o->state and o->next_state are - * updated before o->pending. - */ - wmb(); - - clear_bit(cmd, &o->pending); - smp_mb__after_clear_bit(); - - return 0; -} - -/** - * bnx2x_func_comp_cmd - complete the state change command - * - * @bp: device handle - * @o: - * @cmd: - * - * Checks that the arrived completion is expected. - */ -static int bnx2x_func_comp_cmd(struct bnx2x *bp, - struct bnx2x_func_sp_obj *o, - enum bnx2x_func_cmd cmd) -{ - /* Complete the state machine part first, check if it's a - * legal completion. - */ - int rc = bnx2x_func_state_change_comp(bp, o, cmd); - return rc; -} - -/** - * bnx2x_func_chk_transition - perform function state machine transition - * - * @bp: device handle - * @o: - * @params: - * - * It both checks if the requested command is legal in a current - * state and, if it's legal, sets a `next_state' in the object - * that will be used in the completion flow to set the `state' - * of the object. - * - * returns 0 if a requested command is a legal transition, - * -EINVAL otherwise. - */ -static int bnx2x_func_chk_transition(struct bnx2x *bp, - struct bnx2x_func_sp_obj *o, - struct bnx2x_func_state_params *params) -{ - enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX; - enum bnx2x_func_cmd cmd = params->cmd; - - /* - * Forget all pending for completion commands if a driver only state - * transition has been requested. - */ - if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { - o->pending = 0; - o->next_state = BNX2X_F_STATE_MAX; - } - - /* - * Don't allow a next state transition if we are in the middle of - * the previous one. - */ - if (o->pending) - return -EBUSY; - - switch (state) { - case BNX2X_F_STATE_RESET: - if (cmd == BNX2X_F_CMD_HW_INIT) - next_state = BNX2X_F_STATE_INITIALIZED; - - break; - case BNX2X_F_STATE_INITIALIZED: - if (cmd == BNX2X_F_CMD_START) - next_state = BNX2X_F_STATE_STARTED; - - else if (cmd == BNX2X_F_CMD_HW_RESET) - next_state = BNX2X_F_STATE_RESET; - - break; - case BNX2X_F_STATE_STARTED: - if (cmd == BNX2X_F_CMD_STOP) - next_state = BNX2X_F_STATE_INITIALIZED; - else if (cmd == BNX2X_F_CMD_TX_STOP) - next_state = BNX2X_F_STATE_TX_STOPPED; - - break; - case BNX2X_F_STATE_TX_STOPPED: - if (cmd == BNX2X_F_CMD_TX_START) - next_state = BNX2X_F_STATE_STARTED; - - break; - default: - BNX2X_ERR("Unknown state: %d\n", state); - } - - /* Transition is assured */ - if (next_state != BNX2X_F_STATE_MAX) { - DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n", - state, cmd, next_state); - o->next_state = next_state; - return 0; - } - - DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n", - state, cmd); - - return -EINVAL; -} - -/** - * bnx2x_func_init_func - performs HW init at function stage - * - * @bp: device handle - * @drv: - * - * Init HW when the current phase is - * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only - * HW blocks. - */ -static inline int bnx2x_func_init_func(struct bnx2x *bp, - const struct bnx2x_func_sp_drv_ops *drv) -{ - return drv->init_hw_func(bp); -} - -/** - * bnx2x_func_init_port - performs HW init at port stage - * - * @bp: device handle - * @drv: - * - * Init HW when the current phase is - * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and - * FUNCTION-only HW blocks. - * - */ -static inline int bnx2x_func_init_port(struct bnx2x *bp, - const struct bnx2x_func_sp_drv_ops *drv) -{ - int rc = drv->init_hw_port(bp); - if (rc) - return rc; - - return bnx2x_func_init_func(bp, drv); -} - -/** - * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage - * - * @bp: device handle - * @drv: - * - * Init HW when the current phase is - * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP, - * PORT-only and FUNCTION-only HW blocks. - */ -static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp, - const struct bnx2x_func_sp_drv_ops *drv) -{ - int rc = drv->init_hw_cmn_chip(bp); - if (rc) - return rc; - - return bnx2x_func_init_port(bp, drv); -} - -/** - * bnx2x_func_init_cmn - performs HW init at common stage - * - * @bp: device handle - * @drv: - * - * Init HW when the current phase is - * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON, - * PORT-only and FUNCTION-only HW blocks. - */ -static inline int bnx2x_func_init_cmn(struct bnx2x *bp, - const struct bnx2x_func_sp_drv_ops *drv) -{ - int rc = drv->init_hw_cmn(bp); - if (rc) - return rc; - - return bnx2x_func_init_port(bp, drv); -} - -static int bnx2x_func_hw_init(struct bnx2x *bp, - struct bnx2x_func_state_params *params) -{ - u32 load_code = params->params.hw_init.load_phase; - struct bnx2x_func_sp_obj *o = params->f_obj; - const struct bnx2x_func_sp_drv_ops *drv = o->drv; - int rc = 0; - - DP(BNX2X_MSG_SP, "function %d load_code %x\n", - BP_ABS_FUNC(bp), load_code); - - /* Prepare buffers for unzipping the FW */ - rc = drv->gunzip_init(bp); - if (rc) - return rc; - - /* Prepare FW */ - rc = drv->init_fw(bp); - if (rc) { - BNX2X_ERR("Error loading firmware\n"); - goto fw_init_err; - } - - /* Handle the beginning of COMMON_XXX pases separatelly... */ - switch (load_code) { - case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: - rc = bnx2x_func_init_cmn_chip(bp, drv); - if (rc) - goto init_hw_err; - - break; - case FW_MSG_CODE_DRV_LOAD_COMMON: - rc = bnx2x_func_init_cmn(bp, drv); - if (rc) - goto init_hw_err; - - break; - case FW_MSG_CODE_DRV_LOAD_PORT: - rc = bnx2x_func_init_port(bp, drv); - if (rc) - goto init_hw_err; - - break; - case FW_MSG_CODE_DRV_LOAD_FUNCTION: - rc = bnx2x_func_init_func(bp, drv); - if (rc) - goto init_hw_err; - - break; - default: - BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); - rc = -EINVAL; - } - -init_hw_err: - drv->release_fw(bp); - -fw_init_err: - drv->gunzip_end(bp); - - /* In case of success, complete the comand immediatelly: no ramrods - * have been sent. - */ - if (!rc) - o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT); - - return rc; -} - -/** - * bnx2x_func_reset_func - reset HW at function stage - * - * @bp: device handle - * @drv: - * - * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only - * FUNCTION-only HW blocks. - */ -static inline void bnx2x_func_reset_func(struct bnx2x *bp, - const struct bnx2x_func_sp_drv_ops *drv) -{ - drv->reset_hw_func(bp); -} - -/** - * bnx2x_func_reset_port - reser HW at port stage - * - * @bp: device handle - * @drv: - * - * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset - * FUNCTION-only and PORT-only HW blocks. - * - * !!!IMPORTANT!!! - * - * It's important to call reset_port before reset_func() as the last thing - * reset_func does is pf_disable() thus disabling PGLUE_B, which - * makes impossible any DMAE transactions. - */ -static inline void bnx2x_func_reset_port(struct bnx2x *bp, - const struct bnx2x_func_sp_drv_ops *drv) -{ - drv->reset_hw_port(bp); - bnx2x_func_reset_func(bp, drv); -} - -/** - * bnx2x_func_reset_cmn - reser HW at common stage - * - * @bp: device handle - * @drv: - * - * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and - * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON, - * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks. - */ -static inline void bnx2x_func_reset_cmn(struct bnx2x *bp, - const struct bnx2x_func_sp_drv_ops *drv) -{ - bnx2x_func_reset_port(bp, drv); - drv->reset_hw_cmn(bp); -} - - -static inline int bnx2x_func_hw_reset(struct bnx2x *bp, - struct bnx2x_func_state_params *params) -{ - u32 reset_phase = params->params.hw_reset.reset_phase; - struct bnx2x_func_sp_obj *o = params->f_obj; - const struct bnx2x_func_sp_drv_ops *drv = o->drv; - - DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp), - reset_phase); - - switch (reset_phase) { - case FW_MSG_CODE_DRV_UNLOAD_COMMON: - bnx2x_func_reset_cmn(bp, drv); - break; - case FW_MSG_CODE_DRV_UNLOAD_PORT: - bnx2x_func_reset_port(bp, drv); - break; - case FW_MSG_CODE_DRV_UNLOAD_FUNCTION: - bnx2x_func_reset_func(bp, drv); - break; - default: - BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n", - reset_phase); - break; - } - - /* Complete the comand immediatelly: no ramrods have been sent. */ - o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET); - - return 0; -} - -static inline int bnx2x_func_send_start(struct bnx2x *bp, - struct bnx2x_func_state_params *params) -{ - struct bnx2x_func_sp_obj *o = params->f_obj; - struct function_start_data *rdata = - (struct function_start_data *)o->rdata; - dma_addr_t data_mapping = o->rdata_mapping; - struct bnx2x_func_start_params *start_params = ¶ms->params.start; - - memset(rdata, 0, sizeof(*rdata)); - - /* Fill the ramrod data with provided parameters */ - rdata->function_mode = cpu_to_le16(start_params->mf_mode); - rdata->sd_vlan_tag = start_params->sd_vlan_tag; - rdata->path_id = BP_PATH(bp); - rdata->network_cos_mode = start_params->network_cos_mode; - - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). - */ - - return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, - U64_HI(data_mapping), - U64_LO(data_mapping), NONE_CONNECTION_TYPE); -} - -static inline int bnx2x_func_send_stop(struct bnx2x *bp, - struct bnx2x_func_state_params *params) -{ - return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, - NONE_CONNECTION_TYPE); -} - -static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp, - struct bnx2x_func_state_params *params) -{ - return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0, - NONE_CONNECTION_TYPE); -} -static inline int bnx2x_func_send_tx_start(struct bnx2x *bp, - struct bnx2x_func_state_params *params) -{ - struct bnx2x_func_sp_obj *o = params->f_obj; - struct flow_control_configuration *rdata = - (struct flow_control_configuration *)o->rdata; - dma_addr_t data_mapping = o->rdata_mapping; - struct bnx2x_func_tx_start_params *tx_start_params = - ¶ms->params.tx_start; - int i; - - memset(rdata, 0, sizeof(*rdata)); - - rdata->dcb_enabled = tx_start_params->dcb_enabled; - rdata->dcb_version = tx_start_params->dcb_version; - rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en; - - for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++) - rdata->traffic_type_to_priority_cos[i] = - tx_start_params->traffic_type_to_priority_cos[i]; - - return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0, - U64_HI(data_mapping), - U64_LO(data_mapping), NONE_CONNECTION_TYPE); -} - -static int bnx2x_func_send_cmd(struct bnx2x *bp, - struct bnx2x_func_state_params *params) -{ - switch (params->cmd) { - case BNX2X_F_CMD_HW_INIT: - return bnx2x_func_hw_init(bp, params); - case BNX2X_F_CMD_START: - return bnx2x_func_send_start(bp, params); - case BNX2X_F_CMD_STOP: - return bnx2x_func_send_stop(bp, params); - case BNX2X_F_CMD_HW_RESET: - return bnx2x_func_hw_reset(bp, params); - case BNX2X_F_CMD_TX_STOP: - return bnx2x_func_send_tx_stop(bp, params); - case BNX2X_F_CMD_TX_START: - return bnx2x_func_send_tx_start(bp, params); - default: - BNX2X_ERR("Unknown command: %d\n", params->cmd); - return -EINVAL; - } -} - -void bnx2x_init_func_obj(struct bnx2x *bp, - struct bnx2x_func_sp_obj *obj, - void *rdata, dma_addr_t rdata_mapping, - struct bnx2x_func_sp_drv_ops *drv_iface) -{ - memset(obj, 0, sizeof(*obj)); - - mutex_init(&obj->one_pending_mutex); - - obj->rdata = rdata; - obj->rdata_mapping = rdata_mapping; - - obj->send_cmd = bnx2x_func_send_cmd; - obj->check_transition = bnx2x_func_chk_transition; - obj->complete_cmd = bnx2x_func_comp_cmd; - obj->wait_comp = bnx2x_func_wait_comp; - - obj->drv = drv_iface; -} - -/** - * bnx2x_func_state_change - perform Function state change transition - * - * @bp: device handle - * @params: parameters to perform the transaction - * - * returns 0 in case of successfully completed transition, - * negative error code in case of failure, positive - * (EBUSY) value if there is a completion to that is - * still pending (possible only if RAMROD_COMP_WAIT is - * not set in params->ramrod_flags for asynchronous - * commands). - */ -int bnx2x_func_state_change(struct bnx2x *bp, - struct bnx2x_func_state_params *params) -{ - struct bnx2x_func_sp_obj *o = params->f_obj; - int rc; - enum bnx2x_func_cmd cmd = params->cmd; - unsigned long *pending = &o->pending; - - mutex_lock(&o->one_pending_mutex); - - /* Check that the requested transition is legal */ - if (o->check_transition(bp, o, params)) { - mutex_unlock(&o->one_pending_mutex); - return -EINVAL; - } - - /* Set "pending" bit */ - set_bit(cmd, pending); - - /* Don't send a command if only driver cleanup was requested */ - if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { - bnx2x_func_state_change_comp(bp, o, cmd); - mutex_unlock(&o->one_pending_mutex); - } else { - /* Send a ramrod */ - rc = o->send_cmd(bp, params); - - mutex_unlock(&o->one_pending_mutex); - - if (rc) { - o->next_state = BNX2X_F_STATE_MAX; - clear_bit(cmd, pending); - smp_mb__after_clear_bit(); - return rc; - } - - if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { - rc = o->wait_comp(bp, o, cmd); - if (rc) - return rc; - - return 0; - } - } - - return !!test_bit(cmd, pending); -} diff --git a/drivers/net/bnx2x/bnx2x_sp.h b/drivers/net/bnx2x/bnx2x_sp.h deleted file mode 100644 index 9a517c2..0000000 --- a/drivers/net/bnx2x/bnx2x_sp.h +++ /dev/null @@ -1,1297 +0,0 @@ -/* bnx2x_sp.h: Broadcom Everest network driver. - * - * Copyright 2011 Broadcom Corporation - * - * Unless you and Broadcom execute a separate written software license - * agreement governing use of this software, this software is licensed to you - * under the terms of the GNU General Public License version 2, available - * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). - * - * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written - * consent. - * - * Maintained by: Eilon Greenstein - * Written by: Vladislav Zolotarov - * - */ -#ifndef BNX2X_SP_VERBS -#define BNX2X_SP_VERBS - -struct bnx2x; -struct eth_context; - -/* Bits representing general command's configuration */ -enum { - RAMROD_TX, - RAMROD_RX, - /* Wait until all pending commands complete */ - RAMROD_COMP_WAIT, - /* Don't send a ramrod, only update a registry */ - RAMROD_DRV_CLR_ONLY, - /* Configure HW according to the current object state */ - RAMROD_RESTORE, - /* Execute the next command now */ - RAMROD_EXEC, - /* - * Don't add a new command and continue execution of posponed - * commands. If not set a new command will be added to the - * pending commands list. - */ - RAMROD_CONT, -}; - -typedef enum { - BNX2X_OBJ_TYPE_RX, - BNX2X_OBJ_TYPE_TX, - BNX2X_OBJ_TYPE_RX_TX, -} bnx2x_obj_type; - -/* Filtering states */ -enum { - BNX2X_FILTER_MAC_PENDING, - BNX2X_FILTER_VLAN_PENDING, - BNX2X_FILTER_VLAN_MAC_PENDING, - BNX2X_FILTER_RX_MODE_PENDING, - BNX2X_FILTER_RX_MODE_SCHED, - BNX2X_FILTER_ISCSI_ETH_START_SCHED, - BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, - BNX2X_FILTER_FCOE_ETH_START_SCHED, - BNX2X_FILTER_FCOE_ETH_STOP_SCHED, - BNX2X_FILTER_MCAST_PENDING, - BNX2X_FILTER_MCAST_SCHED, - BNX2X_FILTER_RSS_CONF_PENDING, -}; - -struct bnx2x_raw_obj { - u8 func_id; - - /* Queue params */ - u8 cl_id; - u32 cid; - - /* Ramrod data buffer params */ - void *rdata; - dma_addr_t rdata_mapping; - - /* Ramrod state params */ - int state; /* "ramrod is pending" state bit */ - unsigned long *pstate; /* pointer to state buffer */ - - bnx2x_obj_type obj_type; - - int (*wait_comp)(struct bnx2x *bp, - struct bnx2x_raw_obj *o); - - bool (*check_pending)(struct bnx2x_raw_obj *o); - void (*clear_pending)(struct bnx2x_raw_obj *o); - void (*set_pending)(struct bnx2x_raw_obj *o); -}; - -/************************* VLAN-MAC commands related parameters ***************/ -struct bnx2x_mac_ramrod_data { - u8 mac[ETH_ALEN]; -}; - -struct bnx2x_vlan_ramrod_data { - u16 vlan; -}; - -struct bnx2x_vlan_mac_ramrod_data { - u8 mac[ETH_ALEN]; - u16 vlan; -}; - -union bnx2x_classification_ramrod_data { - struct bnx2x_mac_ramrod_data mac; - struct bnx2x_vlan_ramrod_data vlan; - struct bnx2x_vlan_mac_ramrod_data vlan_mac; -}; - -/* VLAN_MAC commands */ -enum bnx2x_vlan_mac_cmd { - BNX2X_VLAN_MAC_ADD, - BNX2X_VLAN_MAC_DEL, - BNX2X_VLAN_MAC_MOVE, -}; - -struct bnx2x_vlan_mac_data { - /* Requested command: BNX2X_VLAN_MAC_XX */ - enum bnx2x_vlan_mac_cmd cmd; - /* - * used to contain the data related vlan_mac_flags bits from - * ramrod parameters. - */ - unsigned long vlan_mac_flags; - - /* Needed for MOVE command */ - struct bnx2x_vlan_mac_obj *target_obj; - - union bnx2x_classification_ramrod_data u; -}; - -/*************************** Exe Queue obj ************************************/ -union bnx2x_exe_queue_cmd_data { - struct bnx2x_vlan_mac_data vlan_mac; - - struct { - /* TODO */ - } mcast; -}; - -struct bnx2x_exeq_elem { - struct list_head link; - - /* Length of this element in the exe_chunk. */ - int cmd_len; - - union bnx2x_exe_queue_cmd_data cmd_data; -}; - -union bnx2x_qable_obj; - -union bnx2x_exeq_comp_elem { - union event_ring_elem *elem; -}; - -struct bnx2x_exe_queue_obj; - -typedef int (*exe_q_validate)(struct bnx2x *bp, - union bnx2x_qable_obj *o, - struct bnx2x_exeq_elem *elem); - -/** - * @return positive is entry was optimized, 0 - if not, negative - * in case of an error. - */ -typedef int (*exe_q_optimize)(struct bnx2x *bp, - union bnx2x_qable_obj *o, - struct bnx2x_exeq_elem *elem); -typedef int (*exe_q_execute)(struct bnx2x *bp, - union bnx2x_qable_obj *o, - struct list_head *exe_chunk, - unsigned long *ramrod_flags); -typedef struct bnx2x_exeq_elem * - (*exe_q_get)(struct bnx2x_exe_queue_obj *o, - struct bnx2x_exeq_elem *elem); - -struct bnx2x_exe_queue_obj { - /* - * Commands pending for an execution. - */ - struct list_head exe_queue; - - /* - * Commands pending for an completion. - */ - struct list_head pending_comp; - - spinlock_t lock; - - /* Maximum length of commands' list for one execution */ - int exe_chunk_len; - - union bnx2x_qable_obj *owner; - - /****** Virtual functions ******/ - /** - * Called before commands execution for commands that are really - * going to be executed (after 'optimize'). - * - * Must run under exe_queue->lock - */ - exe_q_validate validate; - - - /** - * This will try to cancel the current pending commands list - * considering the new command. - * - * Must run under exe_queue->lock - */ - exe_q_optimize optimize; - - /** - * Run the next commands chunk (owner specific). - */ - exe_q_execute execute; - - /** - * Return the exe_queue element containing the specific command - * if any. Otherwise return NULL. - */ - exe_q_get get; -}; -/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ -/* - * Element in the VLAN_MAC registry list having all currenty configured - * rules. - */ -struct bnx2x_vlan_mac_registry_elem { - struct list_head link; - - /* - * Used to store the cam offset used for the mac/vlan/vlan-mac. - * Relevant for 57710 and 57711 only. VLANs and MACs share the - * same CAM for these chips. - */ - int cam_offset; - - /* Needed for DEL and RESTORE flows */ - unsigned long vlan_mac_flags; - - union bnx2x_classification_ramrod_data u; -}; - -/* Bits representing VLAN_MAC commands specific flags */ -enum { - BNX2X_UC_LIST_MAC, - BNX2X_ETH_MAC, - BNX2X_ISCSI_ETH_MAC, - BNX2X_NETQ_ETH_MAC, - BNX2X_DONT_CONSUME_CAM_CREDIT, - BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, -}; - -struct bnx2x_vlan_mac_ramrod_params { - /* Object to run the command from */ - struct bnx2x_vlan_mac_obj *vlan_mac_obj; - - /* General command flags: COMP_WAIT, etc. */ - unsigned long ramrod_flags; - - /* Command specific configuration request */ - struct bnx2x_vlan_mac_data user_req; -}; - -struct bnx2x_vlan_mac_obj { - struct bnx2x_raw_obj raw; - - /* Bookkeeping list: will prevent the addition of already existing - * entries. - */ - struct list_head head; - - /* TODO: Add it's initialization in the init functions */ - struct bnx2x_exe_queue_obj exe_queue; - - /* MACs credit pool */ - struct bnx2x_credit_pool_obj *macs_pool; - - /* VLANs credit pool */ - struct bnx2x_credit_pool_obj *vlans_pool; - - /* RAMROD command to be used */ - int ramrod_cmd; - - /** - * Checks if ADD-ramrod with the given params may be performed. - * - * @return zero if the element may be added - */ - - int (*check_add)(struct bnx2x_vlan_mac_obj *o, - union bnx2x_classification_ramrod_data *data); - - /** - * Checks if DEL-ramrod with the given params may be performed. - * - * @return true if the element may be deleted - */ - struct bnx2x_vlan_mac_registry_elem * - (*check_del)(struct bnx2x_vlan_mac_obj *o, - union bnx2x_classification_ramrod_data *data); - - /** - * Checks if DEL-ramrod with the given params may be performed. - * - * @return true if the element may be deleted - */ - bool (*check_move)(struct bnx2x_vlan_mac_obj *src_o, - struct bnx2x_vlan_mac_obj *dst_o, - union bnx2x_classification_ramrod_data *data); - - /** - * Update the relevant credit object(s) (consume/return - * correspondingly). - */ - bool (*get_credit)(struct bnx2x_vlan_mac_obj *o); - bool (*put_credit)(struct bnx2x_vlan_mac_obj *o); - bool (*get_cam_offset)(struct bnx2x_vlan_mac_obj *o, int *offset); - bool (*put_cam_offset)(struct bnx2x_vlan_mac_obj *o, int offset); - - /** - * Configures one rule in the ramrod data buffer. - */ - void (*set_one_rule)(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *o, - struct bnx2x_exeq_elem *elem, int rule_idx, - int cam_offset); - - /** - * Delete all configured elements having the given - * vlan_mac_flags specification. Assumes no pending for - * execution commands. Will schedule all all currently - * configured MACs/VLANs/VLAN-MACs matching the vlan_mac_flags - * specification for deletion and will use the given - * ramrod_flags for the last DEL operation. - * - * @param bp - * @param o - * @param ramrod_flags RAMROD_XX flags - * - * @return 0 if the last operation has completed successfully - * and there are no more elements left, positive value - * if there are pending for completion commands, - * negative value in case of failure. - */ - int (*delete_all)(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *o, - unsigned long *vlan_mac_flags, - unsigned long *ramrod_flags); - - /** - * Reconfigures the next MAC/VLAN/VLAN-MAC element from the previously - * configured elements list. - * - * @param bp - * @param p Command parameters (RAMROD_COMP_WAIT bit in - * ramrod_flags is only taken into an account) - * @param ppos a pointer to the cooky that should be given back in the - * next call to make function handle the next element. If - * *ppos is set to NULL it will restart the iterator. - * If returned *ppos == NULL this means that the last - * element has been handled. - * - * @return int - */ - int (*restore)(struct bnx2x *bp, - struct bnx2x_vlan_mac_ramrod_params *p, - struct bnx2x_vlan_mac_registry_elem **ppos); - - /** - * Should be called on a completion arival. - * - * @param bp - * @param o - * @param cqe Completion element we are handling - * @param ramrod_flags if RAMROD_CONT is set the next bulk of - * pending commands will be executed. - * RAMROD_DRV_CLR_ONLY and RAMROD_RESTORE - * may also be set if needed. - * - * @return 0 if there are neither pending nor waiting for - * completion commands. Positive value if there are - * pending for execution or for completion commands. - * Negative value in case of an error (including an - * error in the cqe). - */ - int (*complete)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, - union event_ring_elem *cqe, - unsigned long *ramrod_flags); - - /** - * Wait for completion of all commands. Don't schedule new ones, - * just wait. It assumes that the completion code will schedule - * for new commands. - */ - int (*wait)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o); -}; - -/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ - -/* RX_MODE ramrod spesial flags: set in rx_mode_flags field in - * a bnx2x_rx_mode_ramrod_params. - */ -enum { - BNX2X_RX_MODE_FCOE_ETH, - BNX2X_RX_MODE_ISCSI_ETH, -}; - -enum { - BNX2X_ACCEPT_UNICAST, - BNX2X_ACCEPT_MULTICAST, - BNX2X_ACCEPT_ALL_UNICAST, - BNX2X_ACCEPT_ALL_MULTICAST, - BNX2X_ACCEPT_BROADCAST, - BNX2X_ACCEPT_UNMATCHED, - BNX2X_ACCEPT_ANY_VLAN -}; - -struct bnx2x_rx_mode_ramrod_params { - struct bnx2x_rx_mode_obj *rx_mode_obj; - unsigned long *pstate; - int state; - u8 cl_id; - u32 cid; - u8 func_id; - unsigned long ramrod_flags; - unsigned long rx_mode_flags; - - /* - * rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to - * a tstorm_eth_mac_filter_config (e1x). - */ - void *rdata; - dma_addr_t rdata_mapping; - - /* Rx mode settings */ - unsigned long rx_accept_flags; - - /* internal switching settings */ - unsigned long tx_accept_flags; -}; - -struct bnx2x_rx_mode_obj { - int (*config_rx_mode)(struct bnx2x *bp, - struct bnx2x_rx_mode_ramrod_params *p); - - int (*wait_comp)(struct bnx2x *bp, - struct bnx2x_rx_mode_ramrod_params *p); -}; - -/********************** Set multicast group ***********************************/ - -struct bnx2x_mcast_list_elem { - struct list_head link; - u8 *mac; -}; - -union bnx2x_mcast_config_data { - u8 *mac; - u8 bin; /* used in a RESTORE flow */ -}; - -struct bnx2x_mcast_ramrod_params { - struct bnx2x_mcast_obj *mcast_obj; - - /* Relevant options are RAMROD_COMP_WAIT and RAMROD_DRV_CLR_ONLY */ - unsigned long ramrod_flags; - - struct list_head mcast_list; /* list of struct bnx2x_mcast_list_elem */ - /** TODO: - * - rename it to macs_num. - * - Add a new command type for handling pending commands - * (remove "zero semantics"). - * - * Length of mcast_list. If zero and ADD_CONT command - post - * pending commands. - */ - int mcast_list_len; -}; - -enum { - BNX2X_MCAST_CMD_ADD, - BNX2X_MCAST_CMD_CONT, - BNX2X_MCAST_CMD_DEL, - BNX2X_MCAST_CMD_RESTORE, -}; - -struct bnx2x_mcast_obj { - struct bnx2x_raw_obj raw; - - union { - struct { - #define BNX2X_MCAST_BINS_NUM 256 - #define BNX2X_MCAST_VEC_SZ (BNX2X_MCAST_BINS_NUM / 64) - u64 vec[BNX2X_MCAST_VEC_SZ]; - - /** Number of BINs to clear. Should be updated - * immediately when a command arrives in order to - * properly create DEL commands. - */ - int num_bins_set; - } aprox_match; - - struct { - struct list_head macs; - int num_macs_set; - } exact_match; - } registry; - - /* Pending commands */ - struct list_head pending_cmds_head; - - /* A state that is set in raw.pstate, when there are pending commands */ - int sched_state; - - /* Maximal number of mcast MACs configured in one command */ - int max_cmd_len; - - /* Total number of currently pending MACs to configure: both - * in the pending commands list and in the current command. - */ - int total_pending_num; - - u8 engine_id; - - /** - * @param cmd command to execute (BNX2X_MCAST_CMD_X, see above) - */ - int (*config_mcast)(struct bnx2x *bp, - struct bnx2x_mcast_ramrod_params *p, int cmd); - - /** - * Fills the ramrod data during the RESTORE flow. - * - * @param bp - * @param o - * @param start_idx Registry index to start from - * @param rdata_idx Index in the ramrod data to start from - * - * @return -1 if we handled the whole registry or index of the last - * handled registry element. - */ - int (*hdl_restore)(struct bnx2x *bp, struct bnx2x_mcast_obj *o, - int start_bin, int *rdata_idx); - - int (*enqueue_cmd)(struct bnx2x *bp, struct bnx2x_mcast_obj *o, - struct bnx2x_mcast_ramrod_params *p, int cmd); - - void (*set_one_rule)(struct bnx2x *bp, - struct bnx2x_mcast_obj *o, int idx, - union bnx2x_mcast_config_data *cfg_data, int cmd); - - /** Checks if there are more mcast MACs to be set or a previous - * command is still pending. - */ - bool (*check_pending)(struct bnx2x_mcast_obj *o); - - /** - * Set/Clear/Check SCHEDULED state of the object - */ - void (*set_sched)(struct bnx2x_mcast_obj *o); - void (*clear_sched)(struct bnx2x_mcast_obj *o); - bool (*check_sched)(struct bnx2x_mcast_obj *o); - - /* Wait until all pending commands complete */ - int (*wait_comp)(struct bnx2x *bp, struct bnx2x_mcast_obj *o); - - /** - * Handle the internal object counters needed for proper - * commands handling. Checks that the provided parameters are - * feasible. - */ - int (*validate)(struct bnx2x *bp, - struct bnx2x_mcast_ramrod_params *p, int cmd); - - /** - * Restore the values of internal counters in case of a failure. - */ - void (*revert)(struct bnx2x *bp, - struct bnx2x_mcast_ramrod_params *p, - int old_num_bins); - - int (*get_registry_size)(struct bnx2x_mcast_obj *o); - void (*set_registry_size)(struct bnx2x_mcast_obj *o, int n); -}; - -/*************************** Credit handling **********************************/ -struct bnx2x_credit_pool_obj { - - /* Current amount of credit in the pool */ - atomic_t credit; - - /* Maximum allowed credit. put() will check against it. */ - int pool_sz; - - /* - * Allocate a pool table statically. - * - * Currently the mamimum allowed size is MAX_MAC_CREDIT_E2(272) - * - * The set bit in the table will mean that the entry is available. - */ -#define BNX2X_POOL_VEC_SIZE (MAX_MAC_CREDIT_E2 / 64) - u64 pool_mirror[BNX2X_POOL_VEC_SIZE]; - - /* Base pool offset (initialized differently */ - int base_pool_offset; - - /** - * Get the next free pool entry. - * - * @return true if there was a free entry in the pool - */ - bool (*get_entry)(struct bnx2x_credit_pool_obj *o, int *entry); - - /** - * Return the entry back to the pool. - * - * @return true if entry is legal and has been successfully - * returned to the pool. - */ - bool (*put_entry)(struct bnx2x_credit_pool_obj *o, int entry); - - /** - * Get the requested amount of credit from the pool. - * - * @param cnt Amount of requested credit - * @return true if the operation is successful - */ - bool (*get)(struct bnx2x_credit_pool_obj *o, int cnt); - - /** - * Returns the credit to the pool. - * - * @param cnt Amount of credit to return - * @return true if the operation is successful - */ - bool (*put)(struct bnx2x_credit_pool_obj *o, int cnt); - - /** - * Reads the current amount of credit. - */ - int (*check)(struct bnx2x_credit_pool_obj *o); -}; - -/*************************** RSS configuration ********************************/ -enum { - /* RSS_MODE bits are mutually exclusive */ - BNX2X_RSS_MODE_DISABLED, - BNX2X_RSS_MODE_REGULAR, - BNX2X_RSS_MODE_VLAN_PRI, - BNX2X_RSS_MODE_E1HOV_PRI, - BNX2X_RSS_MODE_IP_DSCP, - - BNX2X_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */ - - BNX2X_RSS_IPV4, - BNX2X_RSS_IPV4_TCP, - BNX2X_RSS_IPV6, - BNX2X_RSS_IPV6_TCP, -}; - -struct bnx2x_config_rss_params { - struct bnx2x_rss_config_obj *rss_obj; - - /* may have RAMROD_COMP_WAIT set only */ - unsigned long ramrod_flags; - - /* BNX2X_RSS_X bits */ - unsigned long rss_flags; - - /* Number hash bits to take into an account */ - u8 rss_result_mask; - - /* Indirection table */ - u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; - - /* RSS hash values */ - u32 rss_key[10]; - - /* valid only iff BNX2X_RSS_UPDATE_TOE is set */ - u16 toe_rss_bitmap; -}; - -struct bnx2x_rss_config_obj { - struct bnx2x_raw_obj raw; - - /* RSS engine to use */ - u8 engine_id; - - /* Last configured indirection table */ - u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; - - int (*config_rss)(struct bnx2x *bp, - struct bnx2x_config_rss_params *p); -}; - -/*********************** Queue state update ***********************************/ - -/* UPDATE command options */ -enum { - BNX2X_Q_UPDATE_IN_VLAN_REM, - BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG, - BNX2X_Q_UPDATE_OUT_VLAN_REM, - BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG, - BNX2X_Q_UPDATE_ANTI_SPOOF, - BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, - BNX2X_Q_UPDATE_ACTIVATE, - BNX2X_Q_UPDATE_ACTIVATE_CHNG, - BNX2X_Q_UPDATE_DEF_VLAN_EN, - BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, - BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, - BNX2X_Q_UPDATE_SILENT_VLAN_REM -}; - -/* Allowed Queue states */ -enum bnx2x_q_state { - BNX2X_Q_STATE_RESET, - BNX2X_Q_STATE_INITIALIZED, - BNX2X_Q_STATE_ACTIVE, - BNX2X_Q_STATE_MULTI_COS, - BNX2X_Q_STATE_MCOS_TERMINATED, - BNX2X_Q_STATE_INACTIVE, - BNX2X_Q_STATE_STOPPED, - BNX2X_Q_STATE_TERMINATED, - BNX2X_Q_STATE_FLRED, - BNX2X_Q_STATE_MAX, -}; - -/* Allowed commands */ -enum bnx2x_queue_cmd { - BNX2X_Q_CMD_INIT, - BNX2X_Q_CMD_SETUP, - BNX2X_Q_CMD_SETUP_TX_ONLY, - BNX2X_Q_CMD_DEACTIVATE, - BNX2X_Q_CMD_ACTIVATE, - BNX2X_Q_CMD_UPDATE, - BNX2X_Q_CMD_UPDATE_TPA, - BNX2X_Q_CMD_HALT, - BNX2X_Q_CMD_CFC_DEL, - BNX2X_Q_CMD_TERMINATE, - BNX2X_Q_CMD_EMPTY, - BNX2X_Q_CMD_MAX, -}; - -/* queue SETUP + INIT flags */ -enum { - BNX2X_Q_FLG_TPA, - BNX2X_Q_FLG_TPA_IPV6, - BNX2X_Q_FLG_STATS, - BNX2X_Q_FLG_ZERO_STATS, - BNX2X_Q_FLG_ACTIVE, - BNX2X_Q_FLG_OV, - BNX2X_Q_FLG_VLAN, - BNX2X_Q_FLG_COS, - BNX2X_Q_FLG_HC, - BNX2X_Q_FLG_HC_EN, - BNX2X_Q_FLG_DHC, - BNX2X_Q_FLG_FCOE, - BNX2X_Q_FLG_LEADING_RSS, - BNX2X_Q_FLG_MCAST, - BNX2X_Q_FLG_DEF_VLAN, - BNX2X_Q_FLG_TX_SWITCH, - BNX2X_Q_FLG_TX_SEC, - BNX2X_Q_FLG_ANTI_SPOOF, - BNX2X_Q_FLG_SILENT_VLAN_REM -}; - -/* Queue type options: queue type may be a compination of below. */ -enum bnx2x_q_type { - /** TODO: Consider moving both these flags into the init() - * ramrod params. - */ - BNX2X_Q_TYPE_HAS_RX, - BNX2X_Q_TYPE_HAS_TX, -}; - -#define BNX2X_PRIMARY_CID_INDEX 0 -#define BNX2X_MULTI_TX_COS_E1X 1 -#define BNX2X_MULTI_TX_COS_E2_E3A0 2 -#define BNX2X_MULTI_TX_COS_E3B0 3 -#define BNX2X_MULTI_TX_COS BNX2X_MULTI_TX_COS_E3B0 - - -struct bnx2x_queue_init_params { - struct { - unsigned long flags; - u16 hc_rate; - u8 fw_sb_id; - u8 sb_cq_index; - } tx; - - struct { - unsigned long flags; - u16 hc_rate; - u8 fw_sb_id; - u8 sb_cq_index; - } rx; - - /* CID context in the host memory */ - struct eth_context *cxts[BNX2X_MULTI_TX_COS]; - - /* maximum number of cos supported by hardware */ - u8 max_cos; -}; - -struct bnx2x_queue_terminate_params { - /* index within the tx_only cids of this queue object */ - u8 cid_index; -}; - -struct bnx2x_queue_cfc_del_params { - /* index within the tx_only cids of this queue object */ - u8 cid_index; -}; - -struct bnx2x_queue_update_params { - unsigned long update_flags; /* BNX2X_Q_UPDATE_XX bits */ - u16 def_vlan; - u16 silent_removal_value; - u16 silent_removal_mask; -/* index within the tx_only cids of this queue object */ - u8 cid_index; -}; - -struct rxq_pause_params { - u16 bd_th_lo; - u16 bd_th_hi; - u16 rcq_th_lo; - u16 rcq_th_hi; - u16 sge_th_lo; /* valid iff BNX2X_Q_FLG_TPA */ - u16 sge_th_hi; /* valid iff BNX2X_Q_FLG_TPA */ - u16 pri_map; -}; - -/* general */ -struct bnx2x_general_setup_params { - /* valid iff BNX2X_Q_FLG_STATS */ - u8 stat_id; - - u8 spcl_id; - u16 mtu; - u8 cos; -}; - -struct bnx2x_rxq_setup_params { - /* dma */ - dma_addr_t dscr_map; - dma_addr_t sge_map; - dma_addr_t rcq_map; - dma_addr_t rcq_np_map; - - u16 drop_flags; - u16 buf_sz; - u8 fw_sb_id; - u8 cl_qzone_id; - - /* valid iff BNX2X_Q_FLG_TPA */ - u16 tpa_agg_sz; - u16 sge_buf_sz; - u8 max_sges_pkt; - u8 max_tpa_queues; - u8 rss_engine_id; - - u8 cache_line_log; - - u8 sb_cq_index; - - /* valid iff BXN2X_Q_FLG_SILENT_VLAN_REM */ - u16 silent_removal_value; - u16 silent_removal_mask; -}; - -struct bnx2x_txq_setup_params { - /* dma */ - dma_addr_t dscr_map; - - u8 fw_sb_id; - u8 sb_cq_index; - u8 cos; /* valid iff BNX2X_Q_FLG_COS */ - u16 traffic_type; - /* equals to the leading rss client id, used for TX classification*/ - u8 tss_leading_cl_id; - - /* valid iff BNX2X_Q_FLG_DEF_VLAN */ - u16 default_vlan; -}; - -struct bnx2x_queue_setup_params { - struct bnx2x_general_setup_params gen_params; - struct bnx2x_txq_setup_params txq_params; - struct bnx2x_rxq_setup_params rxq_params; - struct rxq_pause_params pause_params; - unsigned long flags; -}; - -struct bnx2x_queue_setup_tx_only_params { - struct bnx2x_general_setup_params gen_params; - struct bnx2x_txq_setup_params txq_params; - unsigned long flags; - /* index within the tx_only cids of this queue object */ - u8 cid_index; -}; - -struct bnx2x_queue_state_params { - struct bnx2x_queue_sp_obj *q_obj; - - /* Current command */ - enum bnx2x_queue_cmd cmd; - - /* may have RAMROD_COMP_WAIT set only */ - unsigned long ramrod_flags; - - /* Params according to the current command */ - union { - struct bnx2x_queue_update_params update; - struct bnx2x_queue_setup_params setup; - struct bnx2x_queue_init_params init; - struct bnx2x_queue_setup_tx_only_params tx_only; - struct bnx2x_queue_terminate_params terminate; - struct bnx2x_queue_cfc_del_params cfc_del; - } params; -}; - -struct bnx2x_queue_sp_obj { - u32 cids[BNX2X_MULTI_TX_COS]; - u8 cl_id; - u8 func_id; - - /* - * number of traffic classes supported by queue. - * The primary connection of the queue suppotrs the first traffic - * class. Any further traffic class is suppoted by a tx-only - * connection. - * - * Therefore max_cos is also a number of valid entries in the cids - * array. - */ - u8 max_cos; - u8 num_tx_only, next_tx_only; - - enum bnx2x_q_state state, next_state; - - /* bits from enum bnx2x_q_type */ - unsigned long type; - - /* BNX2X_Q_CMD_XX bits. This object implements "one - * pending" paradigm but for debug and tracing purposes it's - * more convinient to have different bits for different - * commands. - */ - unsigned long pending; - - /* Buffer to use as a ramrod data and its mapping */ - void *rdata; - dma_addr_t rdata_mapping; - - /** - * Performs one state change according to the given parameters. - * - * @return 0 in case of success and negative value otherwise. - */ - int (*send_cmd)(struct bnx2x *bp, - struct bnx2x_queue_state_params *params); - - /** - * Sets the pending bit according to the requested transition. - */ - int (*set_pending)(struct bnx2x_queue_sp_obj *o, - struct bnx2x_queue_state_params *params); - - /** - * Checks that the requested state transition is legal. - */ - int (*check_transition)(struct bnx2x *bp, - struct bnx2x_queue_sp_obj *o, - struct bnx2x_queue_state_params *params); - - /** - * Completes the pending command. - */ - int (*complete_cmd)(struct bnx2x *bp, - struct bnx2x_queue_sp_obj *o, - enum bnx2x_queue_cmd); - - int (*wait_comp)(struct bnx2x *bp, - struct bnx2x_queue_sp_obj *o, - enum bnx2x_queue_cmd cmd); -}; - -/********************** Function state update *********************************/ -/* Allowed Function states */ -enum bnx2x_func_state { - BNX2X_F_STATE_RESET, - BNX2X_F_STATE_INITIALIZED, - BNX2X_F_STATE_STARTED, - BNX2X_F_STATE_TX_STOPPED, - BNX2X_F_STATE_MAX, -}; - -/* Allowed Function commands */ -enum bnx2x_func_cmd { - BNX2X_F_CMD_HW_INIT, - BNX2X_F_CMD_START, - BNX2X_F_CMD_STOP, - BNX2X_F_CMD_HW_RESET, - BNX2X_F_CMD_TX_STOP, - BNX2X_F_CMD_TX_START, - BNX2X_F_CMD_MAX, -}; - -struct bnx2x_func_hw_init_params { - /* A load phase returned by MCP. - * - * May be: - * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP - * FW_MSG_CODE_DRV_LOAD_COMMON - * FW_MSG_CODE_DRV_LOAD_PORT - * FW_MSG_CODE_DRV_LOAD_FUNCTION - */ - u32 load_phase; -}; - -struct bnx2x_func_hw_reset_params { - /* A load phase returned by MCP. - * - * May be: - * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP - * FW_MSG_CODE_DRV_LOAD_COMMON - * FW_MSG_CODE_DRV_LOAD_PORT - * FW_MSG_CODE_DRV_LOAD_FUNCTION - */ - u32 reset_phase; -}; - -struct bnx2x_func_start_params { - /* Multi Function mode: - * - Single Function - * - Switch Dependent - * - Switch Independent - */ - u16 mf_mode; - - /* Switch Dependent mode outer VLAN tag */ - u16 sd_vlan_tag; - - /* Function cos mode */ - u8 network_cos_mode; -}; - -struct bnx2x_func_tx_start_params { - struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES]; - u8 dcb_enabled; - u8 dcb_version; - u8 dont_add_pri_0_en; -}; - -struct bnx2x_func_state_params { - struct bnx2x_func_sp_obj *f_obj; - - /* Current command */ - enum bnx2x_func_cmd cmd; - - /* may have RAMROD_COMP_WAIT set only */ - unsigned long ramrod_flags; - - /* Params according to the current command */ - union { - struct bnx2x_func_hw_init_params hw_init; - struct bnx2x_func_hw_reset_params hw_reset; - struct bnx2x_func_start_params start; - struct bnx2x_func_tx_start_params tx_start; - } params; -}; - -struct bnx2x_func_sp_drv_ops { - /* Init tool + runtime initialization: - * - Common Chip - * - Common (per Path) - * - Port - * - Function phases - */ - int (*init_hw_cmn_chip)(struct bnx2x *bp); - int (*init_hw_cmn)(struct bnx2x *bp); - int (*init_hw_port)(struct bnx2x *bp); - int (*init_hw_func)(struct bnx2x *bp); - - /* Reset Function HW: Common, Port, Function phases. */ - void (*reset_hw_cmn)(struct bnx2x *bp); - void (*reset_hw_port)(struct bnx2x *bp); - void (*reset_hw_func)(struct bnx2x *bp); - - /* Init/Free GUNZIP resources */ - int (*gunzip_init)(struct bnx2x *bp); - void (*gunzip_end)(struct bnx2x *bp); - - /* Prepare/Release FW resources */ - int (*init_fw)(struct bnx2x *bp); - void (*release_fw)(struct bnx2x *bp); -}; - -struct bnx2x_func_sp_obj { - enum bnx2x_func_state state, next_state; - - /* BNX2X_FUNC_CMD_XX bits. This object implements "one - * pending" paradigm but for debug and tracing purposes it's - * more convinient to have different bits for different - * commands. - */ - unsigned long pending; - - /* Buffer to use as a ramrod data and its mapping */ - void *rdata; - dma_addr_t rdata_mapping; - - /* this mutex validates that when pending flag is taken, the next - * ramrod to be sent will be the one set the pending bit - */ - struct mutex one_pending_mutex; - - /* Driver interface */ - struct bnx2x_func_sp_drv_ops *drv; - - /** - * Performs one state change according to the given parameters. - * - * @return 0 in case of success and negative value otherwise. - */ - int (*send_cmd)(struct bnx2x *bp, - struct bnx2x_func_state_params *params); - - /** - * Checks that the requested state transition is legal. - */ - int (*check_transition)(struct bnx2x *bp, - struct bnx2x_func_sp_obj *o, - struct bnx2x_func_state_params *params); - - /** - * Completes the pending command. - */ - int (*complete_cmd)(struct bnx2x *bp, - struct bnx2x_func_sp_obj *o, - enum bnx2x_func_cmd cmd); - - int (*wait_comp)(struct bnx2x *bp, struct bnx2x_func_sp_obj *o, - enum bnx2x_func_cmd cmd); -}; - -/********************** Interfaces ********************************************/ -/* Queueable objects set */ -union bnx2x_qable_obj { - struct bnx2x_vlan_mac_obj vlan_mac; -}; -/************** Function state update *********/ -void bnx2x_init_func_obj(struct bnx2x *bp, - struct bnx2x_func_sp_obj *obj, - void *rdata, dma_addr_t rdata_mapping, - struct bnx2x_func_sp_drv_ops *drv_iface); - -int bnx2x_func_state_change(struct bnx2x *bp, - struct bnx2x_func_state_params *params); - -enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp, - struct bnx2x_func_sp_obj *o); -/******************* Queue State **************/ -void bnx2x_init_queue_obj(struct bnx2x *bp, - struct bnx2x_queue_sp_obj *obj, u8 cl_id, u32 *cids, - u8 cid_cnt, u8 func_id, void *rdata, - dma_addr_t rdata_mapping, unsigned long type); - -int bnx2x_queue_state_change(struct bnx2x *bp, - struct bnx2x_queue_state_params *params); - -/********************* VLAN-MAC ****************/ -void bnx2x_init_mac_obj(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *mac_obj, - u8 cl_id, u32 cid, u8 func_id, void *rdata, - dma_addr_t rdata_mapping, int state, - unsigned long *pstate, bnx2x_obj_type type, - struct bnx2x_credit_pool_obj *macs_pool); - -void bnx2x_init_vlan_obj(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *vlan_obj, - u8 cl_id, u32 cid, u8 func_id, void *rdata, - dma_addr_t rdata_mapping, int state, - unsigned long *pstate, bnx2x_obj_type type, - struct bnx2x_credit_pool_obj *vlans_pool); - -void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *vlan_mac_obj, - u8 cl_id, u32 cid, u8 func_id, void *rdata, - dma_addr_t rdata_mapping, int state, - unsigned long *pstate, bnx2x_obj_type type, - struct bnx2x_credit_pool_obj *macs_pool, - struct bnx2x_credit_pool_obj *vlans_pool); - -int bnx2x_config_vlan_mac(struct bnx2x *bp, - struct bnx2x_vlan_mac_ramrod_params *p); - -int bnx2x_vlan_mac_move(struct bnx2x *bp, - struct bnx2x_vlan_mac_ramrod_params *p, - struct bnx2x_vlan_mac_obj *dest_o); - -/********************* RX MODE ****************/ - -void bnx2x_init_rx_mode_obj(struct bnx2x *bp, - struct bnx2x_rx_mode_obj *o); - -/** - * Send and RX_MODE ramrod according to the provided parameters. - * - * @param bp - * @param p Command parameters - * - * @return 0 - if operation was successfull and there is no pending completions, - * positive number - if there are pending completions, - * negative - if there were errors - */ -int bnx2x_config_rx_mode(struct bnx2x *bp, - struct bnx2x_rx_mode_ramrod_params *p); - -/****************** MULTICASTS ****************/ - -void bnx2x_init_mcast_obj(struct bnx2x *bp, - struct bnx2x_mcast_obj *mcast_obj, - u8 mcast_cl_id, u32 mcast_cid, u8 func_id, - u8 engine_id, void *rdata, dma_addr_t rdata_mapping, - int state, unsigned long *pstate, - bnx2x_obj_type type); - -/** - * Configure multicast MACs list. May configure a new list - * provided in p->mcast_list (BNX2X_MCAST_CMD_ADD), clean up - * (BNX2X_MCAST_CMD_DEL) or restore (BNX2X_MCAST_CMD_RESTORE) a current - * configuration, continue to execute the pending commands - * (BNX2X_MCAST_CMD_CONT). - * - * If previous command is still pending or if number of MACs to - * configure is more that maximum number of MACs in one command, - * the current command will be enqueued to the tail of the - * pending commands list. - * - * @param bp - * @param p - * @param command to execute: BNX2X_MCAST_CMD_X - * - * @return 0 is operation was sucessfull and there are no pending completions, - * negative if there were errors, positive if there are pending - * completions. - */ -int bnx2x_config_mcast(struct bnx2x *bp, - struct bnx2x_mcast_ramrod_params *p, int cmd); - -/****************** CREDIT POOL ****************/ -void bnx2x_init_mac_credit_pool(struct bnx2x *bp, - struct bnx2x_credit_pool_obj *p, u8 func_id, - u8 func_num); -void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, - struct bnx2x_credit_pool_obj *p, u8 func_id, - u8 func_num); - - -/****************** RSS CONFIGURATION ****************/ -void bnx2x_init_rss_config_obj(struct bnx2x *bp, - struct bnx2x_rss_config_obj *rss_obj, - u8 cl_id, u32 cid, u8 func_id, u8 engine_id, - void *rdata, dma_addr_t rdata_mapping, - int state, unsigned long *pstate, - bnx2x_obj_type type); - -/** - * Updates RSS configuration according to provided parameters. - * - * @param bp - * @param p - * - * @return 0 in case of success - */ -int bnx2x_config_rss(struct bnx2x *bp, - struct bnx2x_config_rss_params *p); - -/** - * Return the current ind_table configuration. - * - * @param bp - * @param ind_table buffer to fill with the current indirection - * table content. Should be at least - * T_ETH_INDIRECTION_TABLE_SIZE bytes long. - */ -void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, - u8 *ind_table); - -#endif /* BNX2X_SP_VERBS */ diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c deleted file mode 100644 index 771f680..0000000 --- a/drivers/net/bnx2x/bnx2x_stats.c +++ /dev/null @@ -1,1598 +0,0 @@ -/* bnx2x_stats.c: Broadcom Everest network driver. - * - * Copyright (c) 2007-2011 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - * Maintained by: Eilon Greenstein - * Written by: Eliezer Tamir - * Based on code from Michael Chan's bnx2 driver - * UDP CSUM errata workaround by Arik Gendelman - * Slowpath and fastpath rework by Vladislav Zolotarov - * Statistics and Link management by Yitchak Gertner - * - */ -#include "bnx2x_stats.h" -#include "bnx2x_cmn.h" - - -/* Statistics */ - -/* - * General service functions - */ - -static inline long bnx2x_hilo(u32 *hiref) -{ - u32 lo = *(hiref + 1); -#if (BITS_PER_LONG == 64) - u32 hi = *hiref; - - return HILO_U64(hi, lo); -#else - return lo; -#endif -} - -/* - * Init service functions - */ - -/* Post the next statistics ramrod. Protect it with the spin in - * order to ensure the strict order between statistics ramrods - * (each ramrod has a sequence number passed in a - * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be - * sent in order). - */ -static void bnx2x_storm_stats_post(struct bnx2x *bp) -{ - if (!bp->stats_pending) { - int rc; - - spin_lock_bh(&bp->stats_lock); - - if (bp->stats_pending) { - spin_unlock_bh(&bp->stats_lock); - return; - } - - bp->fw_stats_req->hdr.drv_stats_counter = - cpu_to_le16(bp->stats_counter++); - - DP(NETIF_MSG_TIMER, "Sending statistics ramrod %d\n", - bp->fw_stats_req->hdr.drv_stats_counter); - - - - /* send FW stats ramrod */ - rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, - U64_HI(bp->fw_stats_req_mapping), - U64_LO(bp->fw_stats_req_mapping), - NONE_CONNECTION_TYPE); - if (rc == 0) - bp->stats_pending = 1; - - spin_unlock_bh(&bp->stats_lock); - } -} - -static void bnx2x_hw_stats_post(struct bnx2x *bp) -{ - struct dmae_command *dmae = &bp->stats_dmae; - u32 *stats_comp = bnx2x_sp(bp, stats_comp); - - *stats_comp = DMAE_COMP_VAL; - if (CHIP_REV_IS_SLOW(bp)) - return; - - /* loader */ - if (bp->executer_idx) { - int loader_idx = PMF_DMAE_C(bp); - u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, - true, DMAE_COMP_GRC); - opcode = bnx2x_dmae_opcode_clr_src_reset(opcode); - - memset(dmae, 0, sizeof(struct dmae_command)); - dmae->opcode = opcode; - dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0])); - dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0])); - dmae->dst_addr_lo = (DMAE_REG_CMD_MEM + - sizeof(struct dmae_command) * - (loader_idx + 1)) >> 2; - dmae->dst_addr_hi = 0; - dmae->len = sizeof(struct dmae_command) >> 2; - if (CHIP_IS_E1(bp)) - dmae->len--; - dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2; - dmae->comp_addr_hi = 0; - dmae->comp_val = 1; - - *stats_comp = 0; - bnx2x_post_dmae(bp, dmae, loader_idx); - - } else if (bp->func_stx) { - *stats_comp = 0; - bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); - } -} - -static int bnx2x_stats_comp(struct bnx2x *bp) -{ - u32 *stats_comp = bnx2x_sp(bp, stats_comp); - int cnt = 10; - - might_sleep(); - while (*stats_comp != DMAE_COMP_VAL) { - if (!cnt) { - BNX2X_ERR("timeout waiting for stats finished\n"); - break; - } - cnt--; - usleep_range(1000, 1000); - } - return 1; -} - -/* - * Statistics service functions - */ - -static void bnx2x_stats_pmf_update(struct bnx2x *bp) -{ - struct dmae_command *dmae; - u32 opcode; - int loader_idx = PMF_DMAE_C(bp); - u32 *stats_comp = bnx2x_sp(bp, stats_comp); - - /* sanity */ - if (!IS_MF(bp) || !bp->port.pmf || !bp->port.port_stx) { - BNX2X_ERR("BUG!\n"); - return; - } - - bp->executer_idx = 0; - - opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0); - - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC); - dmae->src_addr_lo = bp->port.port_stx >> 2; - dmae->src_addr_hi = 0; - dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); - dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); - dmae->len = DMAE_LEN32_RD_MAX; - dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; - dmae->comp_addr_hi = 0; - dmae->comp_val = 1; - - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); - dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX; - dmae->src_addr_hi = 0; - dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) + - DMAE_LEN32_RD_MAX * 4); - dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) + - DMAE_LEN32_RD_MAX * 4); - dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX; - dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_val = DMAE_COMP_VAL; - - *stats_comp = 0; - bnx2x_hw_stats_post(bp); - bnx2x_stats_comp(bp); -} - -static void bnx2x_port_stats_init(struct bnx2x *bp) -{ - struct dmae_command *dmae; - int port = BP_PORT(bp); - u32 opcode; - int loader_idx = PMF_DMAE_C(bp); - u32 mac_addr; - u32 *stats_comp = bnx2x_sp(bp, stats_comp); - - /* sanity */ - if (!bp->link_vars.link_up || !bp->port.pmf) { - BNX2X_ERR("BUG!\n"); - return; - } - - bp->executer_idx = 0; - - /* MCP */ - opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, - true, DMAE_COMP_GRC); - - if (bp->port.port_stx) { - - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = opcode; - dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); - dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); - dmae->dst_addr_lo = bp->port.port_stx >> 2; - dmae->dst_addr_hi = 0; - dmae->len = sizeof(struct host_port_stats) >> 2; - dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; - dmae->comp_addr_hi = 0; - dmae->comp_val = 1; - } - - if (bp->func_stx) { - - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = opcode; - dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); - dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); - dmae->dst_addr_lo = bp->func_stx >> 2; - dmae->dst_addr_hi = 0; - dmae->len = sizeof(struct host_func_stats) >> 2; - dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; - dmae->comp_addr_hi = 0; - dmae->comp_val = 1; - } - - /* MAC */ - opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, - true, DMAE_COMP_GRC); - - /* EMAC is special */ - if (bp->link_vars.mac_type == MAC_TYPE_EMAC) { - mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0); - - /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/ - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = opcode; - dmae->src_addr_lo = (mac_addr + - EMAC_REG_EMAC_RX_STAT_AC) >> 2; - dmae->src_addr_hi = 0; - dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); - dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); - dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT; - dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; - dmae->comp_addr_hi = 0; - dmae->comp_val = 1; - - /* EMAC_REG_EMAC_RX_STAT_AC_28 */ - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = opcode; - dmae->src_addr_lo = (mac_addr + - EMAC_REG_EMAC_RX_STAT_AC_28) >> 2; - dmae->src_addr_hi = 0; - dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + - offsetof(struct emac_stats, rx_stat_falsecarriererrors)); - dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + - offsetof(struct emac_stats, rx_stat_falsecarriererrors)); - dmae->len = 1; - dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; - dmae->comp_addr_hi = 0; - dmae->comp_val = 1; - - /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/ - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = opcode; - dmae->src_addr_lo = (mac_addr + - EMAC_REG_EMAC_TX_STAT_AC) >> 2; - dmae->src_addr_hi = 0; - dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + - offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); - dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + - offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); - dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT; - dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; - dmae->comp_addr_hi = 0; - dmae->comp_val = 1; - } else { - u32 tx_src_addr_lo, rx_src_addr_lo; - u16 rx_len, tx_len; - - /* configure the params according to MAC type */ - switch (bp->link_vars.mac_type) { - case MAC_TYPE_BMAC: - mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM : - NIG_REG_INGRESS_BMAC0_MEM); - - /* BIGMAC_REGISTER_TX_STAT_GTPKT .. - BIGMAC_REGISTER_TX_STAT_GTBYT */ - if (CHIP_IS_E1x(bp)) { - tx_src_addr_lo = (mac_addr + - BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; - tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT - - BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; - rx_src_addr_lo = (mac_addr + - BIGMAC_REGISTER_RX_STAT_GR64) >> 2; - rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ - - BIGMAC_REGISTER_RX_STAT_GR64) >> 2; - } else { - tx_src_addr_lo = (mac_addr + - BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; - tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT - - BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; - rx_src_addr_lo = (mac_addr + - BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; - rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ - - BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; - } - break; - - case MAC_TYPE_UMAC: /* handled by MSTAT */ - case MAC_TYPE_XMAC: /* handled by MSTAT */ - default: - mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0; - tx_src_addr_lo = (mac_addr + - MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2; - rx_src_addr_lo = (mac_addr + - MSTAT_REG_RX_STAT_GR64_LO) >> 2; - tx_len = sizeof(bp->slowpath-> - mac_stats.mstat_stats.stats_tx) >> 2; - rx_len = sizeof(bp->slowpath-> - mac_stats.mstat_stats.stats_rx) >> 2; - break; - } - - /* TX stats */ - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = opcode; - dmae->src_addr_lo = tx_src_addr_lo; - dmae->src_addr_hi = 0; - dmae->len = tx_len; - dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); - dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); - dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; - dmae->comp_addr_hi = 0; - dmae->comp_val = 1; - - /* RX stats */ - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = opcode; - dmae->src_addr_hi = 0; - dmae->src_addr_lo = rx_src_addr_lo; - dmae->dst_addr_lo = - U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); - dmae->dst_addr_hi = - U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); - dmae->len = rx_len; - dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; - dmae->comp_addr_hi = 0; - dmae->comp_val = 1; - } - - /* NIG */ - if (!CHIP_IS_E3(bp)) { - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = opcode; - dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 : - NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2; - dmae->src_addr_hi = 0; - dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + - offsetof(struct nig_stats, egress_mac_pkt0_lo)); - dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + - offsetof(struct nig_stats, egress_mac_pkt0_lo)); - dmae->len = (2*sizeof(u32)) >> 2; - dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; - dmae->comp_addr_hi = 0; - dmae->comp_val = 1; - - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = opcode; - dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 : - NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2; - dmae->src_addr_hi = 0; - dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + - offsetof(struct nig_stats, egress_mac_pkt1_lo)); - dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + - offsetof(struct nig_stats, egress_mac_pkt1_lo)); - dmae->len = (2*sizeof(u32)) >> 2; - dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; - dmae->comp_addr_hi = 0; - dmae->comp_val = 1; - } - - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, - true, DMAE_COMP_PCI); - dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD : - NIG_REG_STAT0_BRB_DISCARD) >> 2; - dmae->src_addr_hi = 0; - dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats)); - dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats)); - dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2; - - dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_val = DMAE_COMP_VAL; - - *stats_comp = 0; -} - -static void bnx2x_func_stats_init(struct bnx2x *bp) -{ - struct dmae_command *dmae = &bp->stats_dmae; - u32 *stats_comp = bnx2x_sp(bp, stats_comp); - - /* sanity */ - if (!bp->func_stx) { - BNX2X_ERR("BUG!\n"); - return; - } - - bp->executer_idx = 0; - memset(dmae, 0, sizeof(struct dmae_command)); - - dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, - true, DMAE_COMP_PCI); - dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); - dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); - dmae->dst_addr_lo = bp->func_stx >> 2; - dmae->dst_addr_hi = 0; - dmae->len = sizeof(struct host_func_stats) >> 2; - dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_val = DMAE_COMP_VAL; - - *stats_comp = 0; -} - -static void bnx2x_stats_start(struct bnx2x *bp) -{ - if (bp->port.pmf) - bnx2x_port_stats_init(bp); - - else if (bp->func_stx) - bnx2x_func_stats_init(bp); - - bnx2x_hw_stats_post(bp); - bnx2x_storm_stats_post(bp); -} - -static void bnx2x_stats_pmf_start(struct bnx2x *bp) -{ - bnx2x_stats_comp(bp); - bnx2x_stats_pmf_update(bp); - bnx2x_stats_start(bp); -} - -static void bnx2x_stats_restart(struct bnx2x *bp) -{ - bnx2x_stats_comp(bp); - bnx2x_stats_start(bp); -} - -static void bnx2x_bmac_stats_update(struct bnx2x *bp) -{ - struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); - struct bnx2x_eth_stats *estats = &bp->eth_stats; - struct { - u32 lo; - u32 hi; - } diff; - - if (CHIP_IS_E1x(bp)) { - struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats); - - /* the macros below will use "bmac1_stats" type */ - UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); - UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); - UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); - UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); - UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); - UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); - UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); - UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); - UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); - - UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); - UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); - UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); - UPDATE_STAT64(tx_stat_gt127, - tx_stat_etherstatspkts65octetsto127octets); - UPDATE_STAT64(tx_stat_gt255, - tx_stat_etherstatspkts128octetsto255octets); - UPDATE_STAT64(tx_stat_gt511, - tx_stat_etherstatspkts256octetsto511octets); - UPDATE_STAT64(tx_stat_gt1023, - tx_stat_etherstatspkts512octetsto1023octets); - UPDATE_STAT64(tx_stat_gt1518, - tx_stat_etherstatspkts1024octetsto1522octets); - UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); - UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); - UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); - UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); - UPDATE_STAT64(tx_stat_gterr, - tx_stat_dot3statsinternalmactransmiterrors); - UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); - - } else { - struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats); - - /* the macros below will use "bmac2_stats" type */ - UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); - UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); - UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); - UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); - UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); - UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); - UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); - UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); - UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); - UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); - UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); - UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); - UPDATE_STAT64(tx_stat_gt127, - tx_stat_etherstatspkts65octetsto127octets); - UPDATE_STAT64(tx_stat_gt255, - tx_stat_etherstatspkts128octetsto255octets); - UPDATE_STAT64(tx_stat_gt511, - tx_stat_etherstatspkts256octetsto511octets); - UPDATE_STAT64(tx_stat_gt1023, - tx_stat_etherstatspkts512octetsto1023octets); - UPDATE_STAT64(tx_stat_gt1518, - tx_stat_etherstatspkts1024octetsto1522octets); - UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); - UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); - UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); - UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); - UPDATE_STAT64(tx_stat_gterr, - tx_stat_dot3statsinternalmactransmiterrors); - UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); - } - - estats->pause_frames_received_hi = - pstats->mac_stx[1].rx_stat_mac_xpf_hi; - estats->pause_frames_received_lo = - pstats->mac_stx[1].rx_stat_mac_xpf_lo; - - estats->pause_frames_sent_hi = - pstats->mac_stx[1].tx_stat_outxoffsent_hi; - estats->pause_frames_sent_lo = - pstats->mac_stx[1].tx_stat_outxoffsent_lo; -} - -static void bnx2x_mstat_stats_update(struct bnx2x *bp) -{ - struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); - struct bnx2x_eth_stats *estats = &bp->eth_stats; - - struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats); - - ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets); - ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors); - ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts); - ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong); - ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments); - ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived); - ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered); - ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf); - ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent); - ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone); - - - ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets); - ADD_STAT64(stats_tx.tx_gt127, - tx_stat_etherstatspkts65octetsto127octets); - ADD_STAT64(stats_tx.tx_gt255, - tx_stat_etherstatspkts128octetsto255octets); - ADD_STAT64(stats_tx.tx_gt511, - tx_stat_etherstatspkts256octetsto511octets); - ADD_STAT64(stats_tx.tx_gt1023, - tx_stat_etherstatspkts512octetsto1023octets); - ADD_STAT64(stats_tx.tx_gt1518, - tx_stat_etherstatspkts1024octetsto1522octets); - ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047); - - ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095); - ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216); - ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383); - - ADD_STAT64(stats_tx.tx_gterr, - tx_stat_dot3statsinternalmactransmiterrors); - ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl); - - ADD_64(estats->etherstatspkts1024octetsto1522octets_hi, - new->stats_tx.tx_gt1518_hi, - estats->etherstatspkts1024octetsto1522octets_lo, - new->stats_tx.tx_gt1518_lo); - - ADD_64(estats->etherstatspktsover1522octets_hi, - new->stats_tx.tx_gt2047_hi, - estats->etherstatspktsover1522octets_lo, - new->stats_tx.tx_gt2047_lo); - - ADD_64(estats->etherstatspktsover1522octets_hi, - new->stats_tx.tx_gt4095_hi, - estats->etherstatspktsover1522octets_lo, - new->stats_tx.tx_gt4095_lo); - - ADD_64(estats->etherstatspktsover1522octets_hi, - new->stats_tx.tx_gt9216_hi, - estats->etherstatspktsover1522octets_lo, - new->stats_tx.tx_gt9216_lo); - - - ADD_64(estats->etherstatspktsover1522octets_hi, - new->stats_tx.tx_gt16383_hi, - estats->etherstatspktsover1522octets_lo, - new->stats_tx.tx_gt16383_lo); - - estats->pause_frames_received_hi = - pstats->mac_stx[1].rx_stat_mac_xpf_hi; - estats->pause_frames_received_lo = - pstats->mac_stx[1].rx_stat_mac_xpf_lo; - - estats->pause_frames_sent_hi = - pstats->mac_stx[1].tx_stat_outxoffsent_hi; - estats->pause_frames_sent_lo = - pstats->mac_stx[1].tx_stat_outxoffsent_lo; -} - -static void bnx2x_emac_stats_update(struct bnx2x *bp) -{ - struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats); - struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); - struct bnx2x_eth_stats *estats = &bp->eth_stats; - - UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets); - UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets); - UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors); - UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors); - UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors); - UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors); - UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts); - UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong); - UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments); - UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers); - UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived); - UPDATE_EXTEND_STAT(rx_stat_xoffstateentered); - UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived); - UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived); - UPDATE_EXTEND_STAT(tx_stat_outxonsent); - UPDATE_EXTEND_STAT(tx_stat_outxoffsent); - UPDATE_EXTEND_STAT(tx_stat_flowcontroldone); - UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions); - UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes); - UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes); - UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions); - UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions); - UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions); - UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets); - UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets); - UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets); - UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets); - UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets); - UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets); - UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets); - UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors); - - estats->pause_frames_received_hi = - pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi; - estats->pause_frames_received_lo = - pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo; - ADD_64(estats->pause_frames_received_hi, - pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi, - estats->pause_frames_received_lo, - pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo); - - estats->pause_frames_sent_hi = - pstats->mac_stx[1].tx_stat_outxonsent_hi; - estats->pause_frames_sent_lo = - pstats->mac_stx[1].tx_stat_outxonsent_lo; - ADD_64(estats->pause_frames_sent_hi, - pstats->mac_stx[1].tx_stat_outxoffsent_hi, - estats->pause_frames_sent_lo, - pstats->mac_stx[1].tx_stat_outxoffsent_lo); -} - -static int bnx2x_hw_stats_update(struct bnx2x *bp) -{ - struct nig_stats *new = bnx2x_sp(bp, nig_stats); - struct nig_stats *old = &(bp->port.old_nig_stats); - struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); - struct bnx2x_eth_stats *estats = &bp->eth_stats; - struct { - u32 lo; - u32 hi; - } diff; - - switch (bp->link_vars.mac_type) { - case MAC_TYPE_BMAC: - bnx2x_bmac_stats_update(bp); - break; - - case MAC_TYPE_EMAC: - bnx2x_emac_stats_update(bp); - break; - - case MAC_TYPE_UMAC: - case MAC_TYPE_XMAC: - bnx2x_mstat_stats_update(bp); - break; - - case MAC_TYPE_NONE: /* unreached */ - BNX2X_ERR("stats updated by DMAE but no MAC active\n"); - return -1; - - default: /* unreached */ - BNX2X_ERR("Unknown MAC type\n"); - } - - ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, - new->brb_discard - old->brb_discard); - ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo, - new->brb_truncate - old->brb_truncate); - - if (!CHIP_IS_E3(bp)) { - UPDATE_STAT64_NIG(egress_mac_pkt0, - etherstatspkts1024octetsto1522octets); - UPDATE_STAT64_NIG(egress_mac_pkt1, - etherstatspktsover1522octets); - } - - memcpy(old, new, sizeof(struct nig_stats)); - - memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]), - sizeof(struct mac_stx)); - estats->brb_drop_hi = pstats->brb_drop_hi; - estats->brb_drop_lo = pstats->brb_drop_lo; - - pstats->host_port_stats_start = ++pstats->host_port_stats_end; - - if (!BP_NOMCP(bp)) { - u32 nig_timer_max = - SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer); - if (nig_timer_max != estats->nig_timer_max) { - estats->nig_timer_max = nig_timer_max; - BNX2X_ERR("NIG timer max (%u)\n", - estats->nig_timer_max); - } - } - - return 0; -} - -static int bnx2x_storm_stats_update(struct bnx2x *bp) -{ - struct tstorm_per_port_stats *tport = - &bp->fw_stats_data->port.tstorm_port_statistics; - struct tstorm_per_pf_stats *tfunc = - &bp->fw_stats_data->pf.tstorm_pf_statistics; - struct host_func_stats *fstats = bnx2x_sp(bp, func_stats); - struct bnx2x_eth_stats *estats = &bp->eth_stats; - struct stats_counter *counters = &bp->fw_stats_data->storm_counters; - int i; - u16 cur_stats_counter; - - /* Make sure we use the value of the counter - * used for sending the last stats ramrod. - */ - spin_lock_bh(&bp->stats_lock); - cur_stats_counter = bp->stats_counter - 1; - spin_unlock_bh(&bp->stats_lock); - - /* are storm stats valid? */ - if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { - DP(BNX2X_MSG_STATS, "stats not updated by xstorm" - " xstorm counter (0x%x) != stats_counter (0x%x)\n", - le16_to_cpu(counters->xstats_counter), bp->stats_counter); - return -EAGAIN; - } - - if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) { - DP(BNX2X_MSG_STATS, "stats not updated by ustorm" - " ustorm counter (0x%x) != stats_counter (0x%x)\n", - le16_to_cpu(counters->ustats_counter), bp->stats_counter); - return -EAGAIN; - } - - if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) { - DP(BNX2X_MSG_STATS, "stats not updated by cstorm" - " cstorm counter (0x%x) != stats_counter (0x%x)\n", - le16_to_cpu(counters->cstats_counter), bp->stats_counter); - return -EAGAIN; - } - - if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) { - DP(BNX2X_MSG_STATS, "stats not updated by tstorm" - " tstorm counter (0x%x) != stats_counter (0x%x)\n", - le16_to_cpu(counters->tstats_counter), bp->stats_counter); - return -EAGAIN; - } - - memcpy(&(fstats->total_bytes_received_hi), - &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi), - sizeof(struct host_func_stats) - 2*sizeof(u32)); - estats->error_bytes_received_hi = 0; - estats->error_bytes_received_lo = 0; - estats->etherstatsoverrsizepkts_hi = 0; - estats->etherstatsoverrsizepkts_lo = 0; - estats->no_buff_discard_hi = 0; - estats->no_buff_discard_lo = 0; - estats->total_tpa_aggregations_hi = 0; - estats->total_tpa_aggregations_lo = 0; - estats->total_tpa_aggregated_frames_hi = 0; - estats->total_tpa_aggregated_frames_lo = 0; - estats->total_tpa_bytes_hi = 0; - estats->total_tpa_bytes_lo = 0; - - for_each_eth_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - struct tstorm_per_queue_stats *tclient = - &bp->fw_stats_data->queue_stats[i]. - tstorm_queue_statistics; - struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient; - struct ustorm_per_queue_stats *uclient = - &bp->fw_stats_data->queue_stats[i]. - ustorm_queue_statistics; - struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient; - struct xstorm_per_queue_stats *xclient = - &bp->fw_stats_data->queue_stats[i]. - xstorm_queue_statistics; - struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient; - struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; - u32 diff; - - DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, " - "bcast_sent 0x%x mcast_sent 0x%x\n", - i, xclient->ucast_pkts_sent, - xclient->bcast_pkts_sent, xclient->mcast_pkts_sent); - - DP(BNX2X_MSG_STATS, "---------------\n"); - - qstats->total_broadcast_bytes_received_hi = - le32_to_cpu(tclient->rcv_bcast_bytes.hi); - qstats->total_broadcast_bytes_received_lo = - le32_to_cpu(tclient->rcv_bcast_bytes.lo); - - qstats->total_multicast_bytes_received_hi = - le32_to_cpu(tclient->rcv_mcast_bytes.hi); - qstats->total_multicast_bytes_received_lo = - le32_to_cpu(tclient->rcv_mcast_bytes.lo); - - qstats->total_unicast_bytes_received_hi = - le32_to_cpu(tclient->rcv_ucast_bytes.hi); - qstats->total_unicast_bytes_received_lo = - le32_to_cpu(tclient->rcv_ucast_bytes.lo); - - /* - * sum to total_bytes_received all - * unicast/multicast/broadcast - */ - qstats->total_bytes_received_hi = - qstats->total_broadcast_bytes_received_hi; - qstats->total_bytes_received_lo = - qstats->total_broadcast_bytes_received_lo; - - ADD_64(qstats->total_bytes_received_hi, - qstats->total_multicast_bytes_received_hi, - qstats->total_bytes_received_lo, - qstats->total_multicast_bytes_received_lo); - - ADD_64(qstats->total_bytes_received_hi, - qstats->total_unicast_bytes_received_hi, - qstats->total_bytes_received_lo, - qstats->total_unicast_bytes_received_lo); - - qstats->valid_bytes_received_hi = - qstats->total_bytes_received_hi; - qstats->valid_bytes_received_lo = - qstats->total_bytes_received_lo; - - - UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, - total_unicast_packets_received); - UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, - total_multicast_packets_received); - UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, - total_broadcast_packets_received); - UPDATE_EXTEND_TSTAT(pkts_too_big_discard, - etherstatsoverrsizepkts); - UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard); - - SUB_EXTEND_USTAT(ucast_no_buff_pkts, - total_unicast_packets_received); - SUB_EXTEND_USTAT(mcast_no_buff_pkts, - total_multicast_packets_received); - SUB_EXTEND_USTAT(bcast_no_buff_pkts, - total_broadcast_packets_received); - UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard); - UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard); - UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard); - - qstats->total_broadcast_bytes_transmitted_hi = - le32_to_cpu(xclient->bcast_bytes_sent.hi); - qstats->total_broadcast_bytes_transmitted_lo = - le32_to_cpu(xclient->bcast_bytes_sent.lo); - - qstats->total_multicast_bytes_transmitted_hi = - le32_to_cpu(xclient->mcast_bytes_sent.hi); - qstats->total_multicast_bytes_transmitted_lo = - le32_to_cpu(xclient->mcast_bytes_sent.lo); - - qstats->total_unicast_bytes_transmitted_hi = - le32_to_cpu(xclient->ucast_bytes_sent.hi); - qstats->total_unicast_bytes_transmitted_lo = - le32_to_cpu(xclient->ucast_bytes_sent.lo); - /* - * sum to total_bytes_transmitted all - * unicast/multicast/broadcast - */ - qstats->total_bytes_transmitted_hi = - qstats->total_unicast_bytes_transmitted_hi; - qstats->total_bytes_transmitted_lo = - qstats->total_unicast_bytes_transmitted_lo; - - ADD_64(qstats->total_bytes_transmitted_hi, - qstats->total_broadcast_bytes_transmitted_hi, - qstats->total_bytes_transmitted_lo, - qstats->total_broadcast_bytes_transmitted_lo); - - ADD_64(qstats->total_bytes_transmitted_hi, - qstats->total_multicast_bytes_transmitted_hi, - qstats->total_bytes_transmitted_lo, - qstats->total_multicast_bytes_transmitted_lo); - - UPDATE_EXTEND_XSTAT(ucast_pkts_sent, - total_unicast_packets_transmitted); - UPDATE_EXTEND_XSTAT(mcast_pkts_sent, - total_multicast_packets_transmitted); - UPDATE_EXTEND_XSTAT(bcast_pkts_sent, - total_broadcast_packets_transmitted); - - UPDATE_EXTEND_TSTAT(checksum_discard, - total_packets_received_checksum_discarded); - UPDATE_EXTEND_TSTAT(ttl0_discard, - total_packets_received_ttl0_discarded); - - UPDATE_EXTEND_XSTAT(error_drop_pkts, - total_transmitted_dropped_packets_error); - - /* TPA aggregations completed */ - UPDATE_EXTEND_USTAT(coalesced_events, total_tpa_aggregations); - /* Number of network frames aggregated by TPA */ - UPDATE_EXTEND_USTAT(coalesced_pkts, - total_tpa_aggregated_frames); - /* Total number of bytes in completed TPA aggregations */ - qstats->total_tpa_bytes_lo = - le32_to_cpu(uclient->coalesced_bytes.lo); - qstats->total_tpa_bytes_hi = - le32_to_cpu(uclient->coalesced_bytes.hi); - - /* TPA stats per-function */ - ADD_64(estats->total_tpa_aggregations_hi, - qstats->total_tpa_aggregations_hi, - estats->total_tpa_aggregations_lo, - qstats->total_tpa_aggregations_lo); - ADD_64(estats->total_tpa_aggregated_frames_hi, - qstats->total_tpa_aggregated_frames_hi, - estats->total_tpa_aggregated_frames_lo, - qstats->total_tpa_aggregated_frames_lo); - ADD_64(estats->total_tpa_bytes_hi, - qstats->total_tpa_bytes_hi, - estats->total_tpa_bytes_lo, - qstats->total_tpa_bytes_lo); - - ADD_64(fstats->total_bytes_received_hi, - qstats->total_bytes_received_hi, - fstats->total_bytes_received_lo, - qstats->total_bytes_received_lo); - ADD_64(fstats->total_bytes_transmitted_hi, - qstats->total_bytes_transmitted_hi, - fstats->total_bytes_transmitted_lo, - qstats->total_bytes_transmitted_lo); - ADD_64(fstats->total_unicast_packets_received_hi, - qstats->total_unicast_packets_received_hi, - fstats->total_unicast_packets_received_lo, - qstats->total_unicast_packets_received_lo); - ADD_64(fstats->total_multicast_packets_received_hi, - qstats->total_multicast_packets_received_hi, - fstats->total_multicast_packets_received_lo, - qstats->total_multicast_packets_received_lo); - ADD_64(fstats->total_broadcast_packets_received_hi, - qstats->total_broadcast_packets_received_hi, - fstats->total_broadcast_packets_received_lo, - qstats->total_broadcast_packets_received_lo); - ADD_64(fstats->total_unicast_packets_transmitted_hi, - qstats->total_unicast_packets_transmitted_hi, - fstats->total_unicast_packets_transmitted_lo, - qstats->total_unicast_packets_transmitted_lo); - ADD_64(fstats->total_multicast_packets_transmitted_hi, - qstats->total_multicast_packets_transmitted_hi, - fstats->total_multicast_packets_transmitted_lo, - qstats->total_multicast_packets_transmitted_lo); - ADD_64(fstats->total_broadcast_packets_transmitted_hi, - qstats->total_broadcast_packets_transmitted_hi, - fstats->total_broadcast_packets_transmitted_lo, - qstats->total_broadcast_packets_transmitted_lo); - ADD_64(fstats->valid_bytes_received_hi, - qstats->valid_bytes_received_hi, - fstats->valid_bytes_received_lo, - qstats->valid_bytes_received_lo); - - ADD_64(estats->etherstatsoverrsizepkts_hi, - qstats->etherstatsoverrsizepkts_hi, - estats->etherstatsoverrsizepkts_lo, - qstats->etherstatsoverrsizepkts_lo); - ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi, - estats->no_buff_discard_lo, qstats->no_buff_discard_lo); - } - - ADD_64(fstats->total_bytes_received_hi, - estats->rx_stat_ifhcinbadoctets_hi, - fstats->total_bytes_received_lo, - estats->rx_stat_ifhcinbadoctets_lo); - - ADD_64(fstats->total_bytes_received_hi, - tfunc->rcv_error_bytes.hi, - fstats->total_bytes_received_lo, - tfunc->rcv_error_bytes.lo); - - memcpy(estats, &(fstats->total_bytes_received_hi), - sizeof(struct host_func_stats) - 2*sizeof(u32)); - - ADD_64(estats->error_bytes_received_hi, - tfunc->rcv_error_bytes.hi, - estats->error_bytes_received_lo, - tfunc->rcv_error_bytes.lo); - - ADD_64(estats->etherstatsoverrsizepkts_hi, - estats->rx_stat_dot3statsframestoolong_hi, - estats->etherstatsoverrsizepkts_lo, - estats->rx_stat_dot3statsframestoolong_lo); - ADD_64(estats->error_bytes_received_hi, - estats->rx_stat_ifhcinbadoctets_hi, - estats->error_bytes_received_lo, - estats->rx_stat_ifhcinbadoctets_lo); - - if (bp->port.pmf) { - estats->mac_filter_discard = - le32_to_cpu(tport->mac_filter_discard); - estats->mf_tag_discard = - le32_to_cpu(tport->mf_tag_discard); - estats->brb_truncate_discard = - le32_to_cpu(tport->brb_truncate_discard); - estats->mac_discard = le32_to_cpu(tport->mac_discard); - } - - fstats->host_func_stats_start = ++fstats->host_func_stats_end; - - bp->stats_pending = 0; - - return 0; -} - -static void bnx2x_net_stats_update(struct bnx2x *bp) -{ - struct bnx2x_eth_stats *estats = &bp->eth_stats; - struct net_device_stats *nstats = &bp->dev->stats; - unsigned long tmp; - int i; - - nstats->rx_packets = - bnx2x_hilo(&estats->total_unicast_packets_received_hi) + - bnx2x_hilo(&estats->total_multicast_packets_received_hi) + - bnx2x_hilo(&estats->total_broadcast_packets_received_hi); - - nstats->tx_packets = - bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) + - bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) + - bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi); - - nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi); - - nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); - - tmp = estats->mac_discard; - for_each_rx_queue(bp, i) - tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); - nstats->rx_dropped = tmp; - - nstats->tx_dropped = 0; - - nstats->multicast = - bnx2x_hilo(&estats->total_multicast_packets_received_hi); - - nstats->collisions = - bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi); - - nstats->rx_length_errors = - bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) + - bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi); - nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) + - bnx2x_hilo(&estats->brb_truncate_hi); - nstats->rx_crc_errors = - bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi); - nstats->rx_frame_errors = - bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi); - nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi); - nstats->rx_missed_errors = 0; - - nstats->rx_errors = nstats->rx_length_errors + - nstats->rx_over_errors + - nstats->rx_crc_errors + - nstats->rx_frame_errors + - nstats->rx_fifo_errors + - nstats->rx_missed_errors; - - nstats->tx_aborted_errors = - bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) + - bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi); - nstats->tx_carrier_errors = - bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi); - nstats->tx_fifo_errors = 0; - nstats->tx_heartbeat_errors = 0; - nstats->tx_window_errors = 0; - - nstats->tx_errors = nstats->tx_aborted_errors + - nstats->tx_carrier_errors + - bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi); -} - -static void bnx2x_drv_stats_update(struct bnx2x *bp) -{ - struct bnx2x_eth_stats *estats = &bp->eth_stats; - int i; - - estats->driver_xoff = 0; - estats->rx_err_discard_pkt = 0; - estats->rx_skb_alloc_failed = 0; - estats->hw_csum_err = 0; - for_each_queue(bp, i) { - struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats; - - estats->driver_xoff += qstats->driver_xoff; - estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt; - estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed; - estats->hw_csum_err += qstats->hw_csum_err; - } -} - -static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp) -{ - u32 val; - - if (SHMEM2_HAS(bp, edebug_driver_if[1])) { - val = SHMEM2_RD(bp, edebug_driver_if[1]); - - if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT) - return true; - } - - return false; -} - -static void bnx2x_stats_update(struct bnx2x *bp) -{ - u32 *stats_comp = bnx2x_sp(bp, stats_comp); - - if (bnx2x_edebug_stats_stopped(bp)) - return; - - if (*stats_comp != DMAE_COMP_VAL) - return; - - if (bp->port.pmf) - bnx2x_hw_stats_update(bp); - - if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) { - BNX2X_ERR("storm stats were not updated for 3 times\n"); - bnx2x_panic(); - return; - } - - bnx2x_net_stats_update(bp); - bnx2x_drv_stats_update(bp); - - if (netif_msg_timer(bp)) { - struct bnx2x_eth_stats *estats = &bp->eth_stats; - int i, cos; - - netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n", - estats->brb_drop_lo, estats->brb_truncate_lo); - - for_each_eth_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; - - printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)" - " rx pkt(%lu) rx calls(%lu %lu)\n", - fp->name, (le16_to_cpu(*fp->rx_cons_sb) - - fp->rx_comp_cons), - le16_to_cpu(*fp->rx_cons_sb), - bnx2x_hilo(&qstats-> - total_unicast_packets_received_hi), - fp->rx_calls, fp->rx_pkt); - } - - for_each_eth_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - struct bnx2x_fp_txdata *txdata; - struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; - struct netdev_queue *txq; - - printk(KERN_DEBUG "%s: tx pkt(%lu) (Xoff events %u)", - fp->name, bnx2x_hilo( - &qstats->total_unicast_packets_transmitted_hi), - qstats->driver_xoff); - - for_each_cos_in_tx_queue(fp, cos) { - txdata = &fp->txdata[cos]; - txq = netdev_get_tx_queue(bp->dev, - FP_COS_TO_TXQ(fp, cos)); - - printk(KERN_DEBUG "%d: tx avail(%4u)" - " *tx_cons_sb(%u)" - " tx calls (%lu)" - " %s\n", - cos, - bnx2x_tx_avail(bp, txdata), - le16_to_cpu(*txdata->tx_cons_sb), - txdata->tx_pkt, - (netif_tx_queue_stopped(txq) ? - "Xoff" : "Xon") - ); - } - } - } - - bnx2x_hw_stats_post(bp); - bnx2x_storm_stats_post(bp); -} - -static void bnx2x_port_stats_stop(struct bnx2x *bp) -{ - struct dmae_command *dmae; - u32 opcode; - int loader_idx = PMF_DMAE_C(bp); - u32 *stats_comp = bnx2x_sp(bp, stats_comp); - - bp->executer_idx = 0; - - opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0); - - if (bp->port.port_stx) { - - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - if (bp->func_stx) - dmae->opcode = bnx2x_dmae_opcode_add_comp( - opcode, DMAE_COMP_GRC); - else - dmae->opcode = bnx2x_dmae_opcode_add_comp( - opcode, DMAE_COMP_PCI); - - dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); - dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); - dmae->dst_addr_lo = bp->port.port_stx >> 2; - dmae->dst_addr_hi = 0; - dmae->len = sizeof(struct host_port_stats) >> 2; - if (bp->func_stx) { - dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; - dmae->comp_addr_hi = 0; - dmae->comp_val = 1; - } else { - dmae->comp_addr_lo = - U64_LO(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_addr_hi = - U64_HI(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_val = DMAE_COMP_VAL; - - *stats_comp = 0; - } - } - - if (bp->func_stx) { - - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = - bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); - dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); - dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); - dmae->dst_addr_lo = bp->func_stx >> 2; - dmae->dst_addr_hi = 0; - dmae->len = sizeof(struct host_func_stats) >> 2; - dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_val = DMAE_COMP_VAL; - - *stats_comp = 0; - } -} - -static void bnx2x_stats_stop(struct bnx2x *bp) -{ - int update = 0; - - bnx2x_stats_comp(bp); - - if (bp->port.pmf) - update = (bnx2x_hw_stats_update(bp) == 0); - - update |= (bnx2x_storm_stats_update(bp) == 0); - - if (update) { - bnx2x_net_stats_update(bp); - - if (bp->port.pmf) - bnx2x_port_stats_stop(bp); - - bnx2x_hw_stats_post(bp); - bnx2x_stats_comp(bp); - } -} - -static void bnx2x_stats_do_nothing(struct bnx2x *bp) -{ -} - -static const struct { - void (*action)(struct bnx2x *bp); - enum bnx2x_stats_state next_state; -} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = { -/* state event */ -{ -/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED}, -/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED}, -/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}, -/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED} -}, -{ -/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED}, -/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED}, -/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED}, -/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED} -} -}; - -void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) -{ - enum bnx2x_stats_state state; - if (unlikely(bp->panic)) - return; - bnx2x_stats_stm[bp->stats_state][event].action(bp); - spin_lock_bh(&bp->stats_lock); - state = bp->stats_state; - bp->stats_state = bnx2x_stats_stm[state][event].next_state; - spin_unlock_bh(&bp->stats_lock); - - if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) - DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", - state, event, bp->stats_state); -} - -static void bnx2x_port_stats_base_init(struct bnx2x *bp) -{ - struct dmae_command *dmae; - u32 *stats_comp = bnx2x_sp(bp, stats_comp); - - /* sanity */ - if (!bp->port.pmf || !bp->port.port_stx) { - BNX2X_ERR("BUG!\n"); - return; - } - - bp->executer_idx = 0; - - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, - true, DMAE_COMP_PCI); - dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); - dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); - dmae->dst_addr_lo = bp->port.port_stx >> 2; - dmae->dst_addr_hi = 0; - dmae->len = sizeof(struct host_port_stats) >> 2; - dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_val = DMAE_COMP_VAL; - - *stats_comp = 0; - bnx2x_hw_stats_post(bp); - bnx2x_stats_comp(bp); -} - -static void bnx2x_func_stats_base_init(struct bnx2x *bp) -{ - int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX; - u32 func_stx; - - /* sanity */ - if (!bp->port.pmf || !bp->func_stx) { - BNX2X_ERR("BUG!\n"); - return; - } - - /* save our func_stx */ - func_stx = bp->func_stx; - - for (vn = VN_0; vn < vn_max; vn++) { - int mb_idx = CHIP_IS_E1x(bp) ? 2*vn + BP_PORT(bp) : vn; - - bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); - bnx2x_func_stats_init(bp); - bnx2x_hw_stats_post(bp); - bnx2x_stats_comp(bp); - } - - /* restore our func_stx */ - bp->func_stx = func_stx; -} - -static void bnx2x_func_stats_base_update(struct bnx2x *bp) -{ - struct dmae_command *dmae = &bp->stats_dmae; - u32 *stats_comp = bnx2x_sp(bp, stats_comp); - - /* sanity */ - if (!bp->func_stx) { - BNX2X_ERR("BUG!\n"); - return; - } - - bp->executer_idx = 0; - memset(dmae, 0, sizeof(struct dmae_command)); - - dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, - true, DMAE_COMP_PCI); - dmae->src_addr_lo = bp->func_stx >> 2; - dmae->src_addr_hi = 0; - dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base)); - dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base)); - dmae->len = sizeof(struct host_func_stats) >> 2; - dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); - dmae->comp_val = DMAE_COMP_VAL; - - *stats_comp = 0; - bnx2x_hw_stats_post(bp); - bnx2x_stats_comp(bp); -} - -/** - * This function will prepare the statistics ramrod data the way - * we will only have to increment the statistics counter and - * send the ramrod each time we have to. - * - * @param bp - */ -static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp) -{ - int i; - struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr; - - dma_addr_t cur_data_offset; - struct stats_query_entry *cur_query_entry; - - stats_hdr->cmd_num = bp->fw_stats_num; - stats_hdr->drv_stats_counter = 0; - - /* storm_counters struct contains the counters of completed - * statistics requests per storm which are incremented by FW - * each time it completes hadning a statistics ramrod. We will - * check these counters in the timer handler and discard a - * (statistics) ramrod completion. - */ - cur_data_offset = bp->fw_stats_data_mapping + - offsetof(struct bnx2x_fw_stats_data, storm_counters); - - stats_hdr->stats_counters_addrs.hi = - cpu_to_le32(U64_HI(cur_data_offset)); - stats_hdr->stats_counters_addrs.lo = - cpu_to_le32(U64_LO(cur_data_offset)); - - /* prepare to the first stats ramrod (will be completed with - * the counters equal to zero) - init counters to somethig different. - */ - memset(&bp->fw_stats_data->storm_counters, 0xff, - sizeof(struct stats_counter)); - - /**** Port FW statistics data ****/ - cur_data_offset = bp->fw_stats_data_mapping + - offsetof(struct bnx2x_fw_stats_data, port); - - cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX]; - - cur_query_entry->kind = STATS_TYPE_PORT; - /* For port query index is a DONT CARE */ - cur_query_entry->index = BP_PORT(bp); - /* For port query funcID is a DONT CARE */ - cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); - cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); - cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); - - /**** PF FW statistics data ****/ - cur_data_offset = bp->fw_stats_data_mapping + - offsetof(struct bnx2x_fw_stats_data, pf); - - cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX]; - - cur_query_entry->kind = STATS_TYPE_PF; - /* For PF query index is a DONT CARE */ - cur_query_entry->index = BP_PORT(bp); - cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); - cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); - cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); - - /**** Clients' queries ****/ - cur_data_offset = bp->fw_stats_data_mapping + - offsetof(struct bnx2x_fw_stats_data, queue_stats); - - for_each_eth_queue(bp, i) { - cur_query_entry = - &bp->fw_stats_req-> - query[BNX2X_FIRST_QUEUE_QUERY_IDX + i]; - - cur_query_entry->kind = STATS_TYPE_QUEUE; - cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]); - cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); - cur_query_entry->address.hi = - cpu_to_le32(U64_HI(cur_data_offset)); - cur_query_entry->address.lo = - cpu_to_le32(U64_LO(cur_data_offset)); - - cur_data_offset += sizeof(struct per_queue_stats); - } -} - -void bnx2x_stats_init(struct bnx2x *bp) -{ - int /*abs*/port = BP_PORT(bp); - int mb_idx = BP_FW_MB_IDX(bp); - int i; - - bp->stats_pending = 0; - bp->executer_idx = 0; - bp->stats_counter = 0; - - /* port and func stats for management */ - if (!BP_NOMCP(bp)) { - bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx); - bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); - - } else { - bp->port.port_stx = 0; - bp->func_stx = 0; - } - DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n", - bp->port.port_stx, bp->func_stx); - - port = BP_PORT(bp); - /* port stats */ - memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); - bp->port.old_nig_stats.brb_discard = - REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); - bp->port.old_nig_stats.brb_truncate = - REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); - if (!CHIP_IS_E3(bp)) { - REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, - &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); - REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, - &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); - } - - /* function stats */ - for_each_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - - memset(&fp->old_tclient, 0, sizeof(fp->old_tclient)); - memset(&fp->old_uclient, 0, sizeof(fp->old_uclient)); - memset(&fp->old_xclient, 0, sizeof(fp->old_xclient)); - memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats)); - } - - /* Prepare statistics ramrod data */ - bnx2x_prep_fw_stats_req(bp); - - memset(&bp->dev->stats, 0, sizeof(bp->dev->stats)); - memset(&bp->eth_stats, 0, sizeof(bp->eth_stats)); - - bp->stats_state = STATS_STATE_DISABLED; - - if (bp->port.pmf) { - if (bp->port.port_stx) - bnx2x_port_stats_base_init(bp); - - if (bp->func_stx) - bnx2x_func_stats_base_init(bp); - - } else if (bp->func_stx) - bnx2x_func_stats_base_update(bp); -} diff --git a/drivers/net/bnx2x/bnx2x_stats.h b/drivers/net/bnx2x/bnx2x_stats.h deleted file mode 100644 index 5d8ce2f..0000000 --- a/drivers/net/bnx2x/bnx2x_stats.h +++ /dev/null @@ -1,381 +0,0 @@ -/* bnx2x_stats.h: Broadcom Everest network driver. - * - * Copyright (c) 2007-2011 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - * Maintained by: Eilon Greenstein - * Written by: Eliezer Tamir - * Based on code from Michael Chan's bnx2 driver - * UDP CSUM errata workaround by Arik Gendelman - * Slowpath and fastpath rework by Vladislav Zolotarov - * Statistics and Link management by Yitchak Gertner - * - */ -#ifndef BNX2X_STATS_H -#define BNX2X_STATS_H - -#include - -struct nig_stats { - u32 brb_discard; - u32 brb_packet; - u32 brb_truncate; - u32 flow_ctrl_discard; - u32 flow_ctrl_octets; - u32 flow_ctrl_packet; - u32 mng_discard; - u32 mng_octet_inp; - u32 mng_octet_out; - u32 mng_packet_inp; - u32 mng_packet_out; - u32 pbf_octets; - u32 pbf_packet; - u32 safc_inp; - u32 egress_mac_pkt0_lo; - u32 egress_mac_pkt0_hi; - u32 egress_mac_pkt1_lo; - u32 egress_mac_pkt1_hi; -}; - - -enum bnx2x_stats_event { - STATS_EVENT_PMF = 0, - STATS_EVENT_LINK_UP, - STATS_EVENT_UPDATE, - STATS_EVENT_STOP, - STATS_EVENT_MAX -}; - -enum bnx2x_stats_state { - STATS_STATE_DISABLED = 0, - STATS_STATE_ENABLED, - STATS_STATE_MAX -}; - -struct bnx2x_eth_stats { - u32 total_bytes_received_hi; - u32 total_bytes_received_lo; - u32 total_bytes_transmitted_hi; - u32 total_bytes_transmitted_lo; - u32 total_unicast_packets_received_hi; - u32 total_unicast_packets_received_lo; - u32 total_multicast_packets_received_hi; - u32 total_multicast_packets_received_lo; - u32 total_broadcast_packets_received_hi; - u32 total_broadcast_packets_received_lo; - u32 total_unicast_packets_transmitted_hi; - u32 total_unicast_packets_transmitted_lo; - u32 total_multicast_packets_transmitted_hi; - u32 total_multicast_packets_transmitted_lo; - u32 total_broadcast_packets_transmitted_hi; - u32 total_broadcast_packets_transmitted_lo; - u32 valid_bytes_received_hi; - u32 valid_bytes_received_lo; - - u32 error_bytes_received_hi; - u32 error_bytes_received_lo; - u32 etherstatsoverrsizepkts_hi; - u32 etherstatsoverrsizepkts_lo; - u32 no_buff_discard_hi; - u32 no_buff_discard_lo; - - u32 rx_stat_ifhcinbadoctets_hi; - u32 rx_stat_ifhcinbadoctets_lo; - u32 tx_stat_ifhcoutbadoctets_hi; - u32 tx_stat_ifhcoutbadoctets_lo; - u32 rx_stat_dot3statsfcserrors_hi; - u32 rx_stat_dot3statsfcserrors_lo; - u32 rx_stat_dot3statsalignmenterrors_hi; - u32 rx_stat_dot3statsalignmenterrors_lo; - u32 rx_stat_dot3statscarriersenseerrors_hi; - u32 rx_stat_dot3statscarriersenseerrors_lo; - u32 rx_stat_falsecarriererrors_hi; - u32 rx_stat_falsecarriererrors_lo; - u32 rx_stat_etherstatsundersizepkts_hi; - u32 rx_stat_etherstatsundersizepkts_lo; - u32 rx_stat_dot3statsframestoolong_hi; - u32 rx_stat_dot3statsframestoolong_lo; - u32 rx_stat_etherstatsfragments_hi; - u32 rx_stat_etherstatsfragments_lo; - u32 rx_stat_etherstatsjabbers_hi; - u32 rx_stat_etherstatsjabbers_lo; - u32 rx_stat_maccontrolframesreceived_hi; - u32 rx_stat_maccontrolframesreceived_lo; - u32 rx_stat_bmac_xpf_hi; - u32 rx_stat_bmac_xpf_lo; - u32 rx_stat_bmac_xcf_hi; - u32 rx_stat_bmac_xcf_lo; - u32 rx_stat_xoffstateentered_hi; - u32 rx_stat_xoffstateentered_lo; - u32 rx_stat_xonpauseframesreceived_hi; - u32 rx_stat_xonpauseframesreceived_lo; - u32 rx_stat_xoffpauseframesreceived_hi; - u32 rx_stat_xoffpauseframesreceived_lo; - u32 tx_stat_outxonsent_hi; - u32 tx_stat_outxonsent_lo; - u32 tx_stat_outxoffsent_hi; - u32 tx_stat_outxoffsent_lo; - u32 tx_stat_flowcontroldone_hi; - u32 tx_stat_flowcontroldone_lo; - u32 tx_stat_etherstatscollisions_hi; - u32 tx_stat_etherstatscollisions_lo; - u32 tx_stat_dot3statssinglecollisionframes_hi; - u32 tx_stat_dot3statssinglecollisionframes_lo; - u32 tx_stat_dot3statsmultiplecollisionframes_hi; - u32 tx_stat_dot3statsmultiplecollisionframes_lo; - u32 tx_stat_dot3statsdeferredtransmissions_hi; - u32 tx_stat_dot3statsdeferredtransmissions_lo; - u32 tx_stat_dot3statsexcessivecollisions_hi; - u32 tx_stat_dot3statsexcessivecollisions_lo; - u32 tx_stat_dot3statslatecollisions_hi; - u32 tx_stat_dot3statslatecollisions_lo; - u32 tx_stat_etherstatspkts64octets_hi; - u32 tx_stat_etherstatspkts64octets_lo; - u32 tx_stat_etherstatspkts65octetsto127octets_hi; - u32 tx_stat_etherstatspkts65octetsto127octets_lo; - u32 tx_stat_etherstatspkts128octetsto255octets_hi; - u32 tx_stat_etherstatspkts128octetsto255octets_lo; - u32 tx_stat_etherstatspkts256octetsto511octets_hi; - u32 tx_stat_etherstatspkts256octetsto511octets_lo; - u32 tx_stat_etherstatspkts512octetsto1023octets_hi; - u32 tx_stat_etherstatspkts512octetsto1023octets_lo; - u32 tx_stat_etherstatspkts1024octetsto1522octets_hi; - u32 tx_stat_etherstatspkts1024octetsto1522octets_lo; - u32 tx_stat_etherstatspktsover1522octets_hi; - u32 tx_stat_etherstatspktsover1522octets_lo; - u32 tx_stat_bmac_2047_hi; - u32 tx_stat_bmac_2047_lo; - u32 tx_stat_bmac_4095_hi; - u32 tx_stat_bmac_4095_lo; - u32 tx_stat_bmac_9216_hi; - u32 tx_stat_bmac_9216_lo; - u32 tx_stat_bmac_16383_hi; - u32 tx_stat_bmac_16383_lo; - u32 tx_stat_dot3statsinternalmactransmiterrors_hi; - u32 tx_stat_dot3statsinternalmactransmiterrors_lo; - u32 tx_stat_bmac_ufl_hi; - u32 tx_stat_bmac_ufl_lo; - - u32 pause_frames_received_hi; - u32 pause_frames_received_lo; - u32 pause_frames_sent_hi; - u32 pause_frames_sent_lo; - - u32 etherstatspkts1024octetsto1522octets_hi; - u32 etherstatspkts1024octetsto1522octets_lo; - u32 etherstatspktsover1522octets_hi; - u32 etherstatspktsover1522octets_lo; - - u32 brb_drop_hi; - u32 brb_drop_lo; - u32 brb_truncate_hi; - u32 brb_truncate_lo; - - u32 mac_filter_discard; - u32 mf_tag_discard; - u32 brb_truncate_discard; - u32 mac_discard; - - u32 driver_xoff; - u32 rx_err_discard_pkt; - u32 rx_skb_alloc_failed; - u32 hw_csum_err; - - u32 nig_timer_max; - - /* TPA */ - u32 total_tpa_aggregations_hi; - u32 total_tpa_aggregations_lo; - u32 total_tpa_aggregated_frames_hi; - u32 total_tpa_aggregated_frames_lo; - u32 total_tpa_bytes_hi; - u32 total_tpa_bytes_lo; -}; - - -struct bnx2x_eth_q_stats { - u32 total_unicast_bytes_received_hi; - u32 total_unicast_bytes_received_lo; - u32 total_broadcast_bytes_received_hi; - u32 total_broadcast_bytes_received_lo; - u32 total_multicast_bytes_received_hi; - u32 total_multicast_bytes_received_lo; - u32 total_bytes_received_hi; - u32 total_bytes_received_lo; - u32 total_unicast_bytes_transmitted_hi; - u32 total_unicast_bytes_transmitted_lo; - u32 total_broadcast_bytes_transmitted_hi; - u32 total_broadcast_bytes_transmitted_lo; - u32 total_multicast_bytes_transmitted_hi; - u32 total_multicast_bytes_transmitted_lo; - u32 total_bytes_transmitted_hi; - u32 total_bytes_transmitted_lo; - u32 total_unicast_packets_received_hi; - u32 total_unicast_packets_received_lo; - u32 total_multicast_packets_received_hi; - u32 total_multicast_packets_received_lo; - u32 total_broadcast_packets_received_hi; - u32 total_broadcast_packets_received_lo; - u32 total_unicast_packets_transmitted_hi; - u32 total_unicast_packets_transmitted_lo; - u32 total_multicast_packets_transmitted_hi; - u32 total_multicast_packets_transmitted_lo; - u32 total_broadcast_packets_transmitted_hi; - u32 total_broadcast_packets_transmitted_lo; - u32 valid_bytes_received_hi; - u32 valid_bytes_received_lo; - - u32 etherstatsoverrsizepkts_hi; - u32 etherstatsoverrsizepkts_lo; - u32 no_buff_discard_hi; - u32 no_buff_discard_lo; - - u32 driver_xoff; - u32 rx_err_discard_pkt; - u32 rx_skb_alloc_failed; - u32 hw_csum_err; - - u32 total_packets_received_checksum_discarded_hi; - u32 total_packets_received_checksum_discarded_lo; - u32 total_packets_received_ttl0_discarded_hi; - u32 total_packets_received_ttl0_discarded_lo; - u32 total_transmitted_dropped_packets_error_hi; - u32 total_transmitted_dropped_packets_error_lo; - - /* TPA */ - u32 total_tpa_aggregations_hi; - u32 total_tpa_aggregations_lo; - u32 total_tpa_aggregated_frames_hi; - u32 total_tpa_aggregated_frames_lo; - u32 total_tpa_bytes_hi; - u32 total_tpa_bytes_lo; -}; - -/**************************************************************************** -* Macros -****************************************************************************/ - -/* sum[hi:lo] += add[hi:lo] */ -#define ADD_64(s_hi, a_hi, s_lo, a_lo) \ - do { \ - s_lo += a_lo; \ - s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \ - } while (0) - -/* difference = minuend - subtrahend */ -#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \ - do { \ - if (m_lo < s_lo) { \ - /* underflow */ \ - d_hi = m_hi - s_hi; \ - if (d_hi > 0) { \ - /* we can 'loan' 1 */ \ - d_hi--; \ - d_lo = m_lo + (UINT_MAX - s_lo) + 1; \ - } else { \ - /* m_hi <= s_hi */ \ - d_hi = 0; \ - d_lo = 0; \ - } \ - } else { \ - /* m_lo >= s_lo */ \ - if (m_hi < s_hi) { \ - d_hi = 0; \ - d_lo = 0; \ - } else { \ - /* m_hi >= s_hi */ \ - d_hi = m_hi - s_hi; \ - d_lo = m_lo - s_lo; \ - } \ - } \ - } while (0) - -#define UPDATE_STAT64(s, t) \ - do { \ - DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \ - diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \ - pstats->mac_stx[0].t##_hi = new->s##_hi; \ - pstats->mac_stx[0].t##_lo = new->s##_lo; \ - ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \ - pstats->mac_stx[1].t##_lo, diff.lo); \ - } while (0) - -#define UPDATE_STAT64_NIG(s, t) \ - do { \ - DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \ - diff.lo, new->s##_lo, old->s##_lo); \ - ADD_64(estats->t##_hi, diff.hi, \ - estats->t##_lo, diff.lo); \ - } while (0) - -/* sum[hi:lo] += add */ -#define ADD_EXTEND_64(s_hi, s_lo, a) \ - do { \ - s_lo += a; \ - s_hi += (s_lo < a) ? 1 : 0; \ - } while (0) - -#define ADD_STAT64(diff, t) \ - do { \ - ADD_64(pstats->mac_stx[1].t##_hi, new->diff##_hi, \ - pstats->mac_stx[1].t##_lo, new->diff##_lo); \ - } while (0) - -#define UPDATE_EXTEND_STAT(s) \ - do { \ - ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \ - pstats->mac_stx[1].s##_lo, \ - new->s); \ - } while (0) - -#define UPDATE_EXTEND_TSTAT(s, t) \ - do { \ - diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \ - old_tclient->s = tclient->s; \ - ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ - } while (0) - -#define UPDATE_EXTEND_USTAT(s, t) \ - do { \ - diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \ - old_uclient->s = uclient->s; \ - ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ - } while (0) - -#define UPDATE_EXTEND_XSTAT(s, t) \ - do { \ - diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \ - old_xclient->s = xclient->s; \ - ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ - } while (0) - -/* minuend -= subtrahend */ -#define SUB_64(m_hi, s_hi, m_lo, s_lo) \ - do { \ - DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \ - } while (0) - -/* minuend[hi:lo] -= subtrahend */ -#define SUB_EXTEND_64(m_hi, m_lo, s) \ - do { \ - SUB_64(m_hi, 0, m_lo, s); \ - } while (0) - -#define SUB_EXTEND_USTAT(s, t) \ - do { \ - diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \ - SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ - } while (0) - - -/* forward */ -struct bnx2x; - -void bnx2x_stats_init(struct bnx2x *bp); - -void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); - -#endif /* BNX2X_STATS_H */ diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c deleted file mode 100644 index 94a2e54..0000000 --- a/drivers/net/cnic.c +++ /dev/null @@ -1,5487 +0,0 @@ -/* cnic.c: Broadcom CNIC core network driver. - * - * Copyright (c) 2006-2011 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com) - * Modified and maintained by: Michael Chan - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) -#define BCM_VLAN 1 -#endif -#include -#include -#include -#include -#include -#include -#include - -#include "cnic_if.h" -#include "bnx2.h" -#include "bnx2x/bnx2x_reg.h" -#include "bnx2x/bnx2x_fw_defs.h" -#include "bnx2x/bnx2x_hsi.h" -#include "../scsi/bnx2i/57xx_iscsi_constants.h" -#include "../scsi/bnx2i/57xx_iscsi_hsi.h" -#include "cnic.h" -#include "cnic_defs.h" - -#define DRV_MODULE_NAME "cnic" - -static char version[] __devinitdata = - "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n"; - -MODULE_AUTHOR("Michael Chan and John(Zongxi) " - "Chen (zongxi@broadcom.com"); -MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(CNIC_MODULE_VERSION); - -/* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */ -static LIST_HEAD(cnic_dev_list); -static LIST_HEAD(cnic_udev_list); -static DEFINE_RWLOCK(cnic_dev_lock); -static DEFINE_MUTEX(cnic_lock); - -static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; - -/* helper function, assuming cnic_lock is held */ -static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type) -{ - return rcu_dereference_protected(cnic_ulp_tbl[type], - lockdep_is_held(&cnic_lock)); -} - -static int cnic_service_bnx2(void *, void *); -static int cnic_service_bnx2x(void *, void *); -static int cnic_ctl(void *, struct cnic_ctl_info *); - -static struct cnic_ops cnic_bnx2_ops = { - .cnic_owner = THIS_MODULE, - .cnic_handler = cnic_service_bnx2, - .cnic_ctl = cnic_ctl, -}; - -static struct cnic_ops cnic_bnx2x_ops = { - .cnic_owner = THIS_MODULE, - .cnic_handler = cnic_service_bnx2x, - .cnic_ctl = cnic_ctl, -}; - -static struct workqueue_struct *cnic_wq; - -static void cnic_shutdown_rings(struct cnic_dev *); -static void cnic_init_rings(struct cnic_dev *); -static int cnic_cm_set_pg(struct cnic_sock *); - -static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode) -{ - struct cnic_uio_dev *udev = uinfo->priv; - struct cnic_dev *dev; - - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - if (udev->uio_dev != -1) - return -EBUSY; - - rtnl_lock(); - dev = udev->dev; - - if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) { - rtnl_unlock(); - return -ENODEV; - } - - udev->uio_dev = iminor(inode); - - cnic_shutdown_rings(dev); - cnic_init_rings(dev); - rtnl_unlock(); - - return 0; -} - -static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode) -{ - struct cnic_uio_dev *udev = uinfo->priv; - - udev->uio_dev = -1; - return 0; -} - -static inline void cnic_hold(struct cnic_dev *dev) -{ - atomic_inc(&dev->ref_count); -} - -static inline void cnic_put(struct cnic_dev *dev) -{ - atomic_dec(&dev->ref_count); -} - -static inline void csk_hold(struct cnic_sock *csk) -{ - atomic_inc(&csk->ref_count); -} - -static inline void csk_put(struct cnic_sock *csk) -{ - atomic_dec(&csk->ref_count); -} - -static struct cnic_dev *cnic_from_netdev(struct net_device *netdev) -{ - struct cnic_dev *cdev; - - read_lock(&cnic_dev_lock); - list_for_each_entry(cdev, &cnic_dev_list, list) { - if (netdev == cdev->netdev) { - cnic_hold(cdev); - read_unlock(&cnic_dev_lock); - return cdev; - } - } - read_unlock(&cnic_dev_lock); - return NULL; -} - -static inline void ulp_get(struct cnic_ulp_ops *ulp_ops) -{ - atomic_inc(&ulp_ops->ref_count); -} - -static inline void ulp_put(struct cnic_ulp_ops *ulp_ops) -{ - atomic_dec(&ulp_ops->ref_count); -} - -static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - struct drv_ctl_info info; - struct drv_ctl_io *io = &info.data.io; - - info.cmd = DRV_CTL_CTX_WR_CMD; - io->cid_addr = cid_addr; - io->offset = off; - io->data = val; - ethdev->drv_ctl(dev->netdev, &info); -} - -static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - struct drv_ctl_info info; - struct drv_ctl_io *io = &info.data.io; - - info.cmd = DRV_CTL_CTXTBL_WR_CMD; - io->offset = off; - io->dma_addr = addr; - ethdev->drv_ctl(dev->netdev, &info); -} - -static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - struct drv_ctl_info info; - struct drv_ctl_l2_ring *ring = &info.data.ring; - - if (start) - info.cmd = DRV_CTL_START_L2_CMD; - else - info.cmd = DRV_CTL_STOP_L2_CMD; - - ring->cid = cid; - ring->client_id = cl_id; - ethdev->drv_ctl(dev->netdev, &info); -} - -static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - struct drv_ctl_info info; - struct drv_ctl_io *io = &info.data.io; - - info.cmd = DRV_CTL_IO_WR_CMD; - io->offset = off; - io->data = val; - ethdev->drv_ctl(dev->netdev, &info); -} - -static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - struct drv_ctl_info info; - struct drv_ctl_io *io = &info.data.io; - - info.cmd = DRV_CTL_IO_RD_CMD; - io->offset = off; - ethdev->drv_ctl(dev->netdev, &info); - return io->data; -} - -static int cnic_in_use(struct cnic_sock *csk) -{ - return test_bit(SK_F_INUSE, &csk->flags); -} - -static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - struct drv_ctl_info info; - - info.cmd = cmd; - info.data.credit.credit_count = count; - ethdev->drv_ctl(dev->netdev, &info); -} - -static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid) -{ - u32 i; - - for (i = 0; i < cp->max_cid_space; i++) { - if (cp->ctx_tbl[i].cid == cid) { - *l5_cid = i; - return 0; - } - } - return -EINVAL; -} - -static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, - struct cnic_sock *csk) -{ - struct iscsi_path path_req; - char *buf = NULL; - u16 len = 0; - u32 msg_type = ISCSI_KEVENT_IF_DOWN; - struct cnic_ulp_ops *ulp_ops; - struct cnic_uio_dev *udev = cp->udev; - int rc = 0, retry = 0; - - if (!udev || udev->uio_dev == -1) - return -ENODEV; - - if (csk) { - len = sizeof(path_req); - buf = (char *) &path_req; - memset(&path_req, 0, len); - - msg_type = ISCSI_KEVENT_PATH_REQ; - path_req.handle = (u64) csk->l5_cid; - if (test_bit(SK_F_IPV6, &csk->flags)) { - memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0], - sizeof(struct in6_addr)); - path_req.ip_addr_len = 16; - } else { - memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0], - sizeof(struct in_addr)); - path_req.ip_addr_len = 4; - } - path_req.vlan_id = csk->vlan_id; - path_req.pmtu = csk->mtu; - } - - while (retry < 3) { - rc = 0; - rcu_read_lock(); - ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]); - if (ulp_ops) - rc = ulp_ops->iscsi_nl_send_msg( - cp->ulp_handle[CNIC_ULP_ISCSI], - msg_type, buf, len); - rcu_read_unlock(); - if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ) - break; - - msleep(100); - retry++; - } - return rc; -} - -static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8); - -static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, - char *buf, u16 len) -{ - int rc = -EINVAL; - - switch (msg_type) { - case ISCSI_UEVENT_PATH_UPDATE: { - struct cnic_local *cp; - u32 l5_cid; - struct cnic_sock *csk; - struct iscsi_path *path_resp; - - if (len < sizeof(*path_resp)) - break; - - path_resp = (struct iscsi_path *) buf; - cp = dev->cnic_priv; - l5_cid = (u32) path_resp->handle; - if (l5_cid >= MAX_CM_SK_TBL_SZ) - break; - - rcu_read_lock(); - if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) { - rc = -ENODEV; - rcu_read_unlock(); - break; - } - csk = &cp->csk_tbl[l5_cid]; - csk_hold(csk); - if (cnic_in_use(csk) && - test_bit(SK_F_CONNECT_START, &csk->flags)) { - - memcpy(csk->ha, path_resp->mac_addr, 6); - if (test_bit(SK_F_IPV6, &csk->flags)) - memcpy(&csk->src_ip[0], &path_resp->src.v6_addr, - sizeof(struct in6_addr)); - else - memcpy(&csk->src_ip[0], &path_resp->src.v4_addr, - sizeof(struct in_addr)); - - if (is_valid_ether_addr(csk->ha)) { - cnic_cm_set_pg(csk); - } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) && - !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { - - cnic_cm_upcall(cp, csk, - L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); - clear_bit(SK_F_CONNECT_START, &csk->flags); - } - } - csk_put(csk); - rcu_read_unlock(); - rc = 0; - } - } - - return rc; -} - -static int cnic_offld_prep(struct cnic_sock *csk) -{ - if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) - return 0; - - if (!test_bit(SK_F_CONNECT_START, &csk->flags)) { - clear_bit(SK_F_OFFLD_SCHED, &csk->flags); - return 0; - } - - return 1; -} - -static int cnic_close_prep(struct cnic_sock *csk) -{ - clear_bit(SK_F_CONNECT_START, &csk->flags); - smp_mb__after_clear_bit(); - - if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { - while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) - msleep(1); - - return 1; - } - return 0; -} - -static int cnic_abort_prep(struct cnic_sock *csk) -{ - clear_bit(SK_F_CONNECT_START, &csk->flags); - smp_mb__after_clear_bit(); - - while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) - msleep(1); - - if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { - csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP; - return 1; - } - - return 0; -} - -int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) -{ - struct cnic_dev *dev; - - if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { - pr_err("%s: Bad type %d\n", __func__, ulp_type); - return -EINVAL; - } - mutex_lock(&cnic_lock); - if (cnic_ulp_tbl_prot(ulp_type)) { - pr_err("%s: Type %d has already been registered\n", - __func__, ulp_type); - mutex_unlock(&cnic_lock); - return -EBUSY; - } - - read_lock(&cnic_dev_lock); - list_for_each_entry(dev, &cnic_dev_list, list) { - struct cnic_local *cp = dev->cnic_priv; - - clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]); - } - read_unlock(&cnic_dev_lock); - - atomic_set(&ulp_ops->ref_count, 0); - rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops); - mutex_unlock(&cnic_lock); - - /* Prevent race conditions with netdev_event */ - rtnl_lock(); - list_for_each_entry(dev, &cnic_dev_list, list) { - struct cnic_local *cp = dev->cnic_priv; - - if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type])) - ulp_ops->cnic_init(dev); - } - rtnl_unlock(); - - return 0; -} - -int cnic_unregister_driver(int ulp_type) -{ - struct cnic_dev *dev; - struct cnic_ulp_ops *ulp_ops; - int i = 0; - - if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { - pr_err("%s: Bad type %d\n", __func__, ulp_type); - return -EINVAL; - } - mutex_lock(&cnic_lock); - ulp_ops = cnic_ulp_tbl_prot(ulp_type); - if (!ulp_ops) { - pr_err("%s: Type %d has not been registered\n", - __func__, ulp_type); - goto out_unlock; - } - read_lock(&cnic_dev_lock); - list_for_each_entry(dev, &cnic_dev_list, list) { - struct cnic_local *cp = dev->cnic_priv; - - if (rcu_dereference(cp->ulp_ops[ulp_type])) { - pr_err("%s: Type %d still has devices registered\n", - __func__, ulp_type); - read_unlock(&cnic_dev_lock); - goto out_unlock; - } - } - read_unlock(&cnic_dev_lock); - - rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL); - - mutex_unlock(&cnic_lock); - synchronize_rcu(); - while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) { - msleep(100); - i++; - } - - if (atomic_read(&ulp_ops->ref_count) != 0) - netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n"); - return 0; - -out_unlock: - mutex_unlock(&cnic_lock); - return -EINVAL; -} - -static int cnic_start_hw(struct cnic_dev *); -static void cnic_stop_hw(struct cnic_dev *); - -static int cnic_register_device(struct cnic_dev *dev, int ulp_type, - void *ulp_ctx) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_ulp_ops *ulp_ops; - - if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { - pr_err("%s: Bad type %d\n", __func__, ulp_type); - return -EINVAL; - } - mutex_lock(&cnic_lock); - if (cnic_ulp_tbl_prot(ulp_type) == NULL) { - pr_err("%s: Driver with type %d has not been registered\n", - __func__, ulp_type); - mutex_unlock(&cnic_lock); - return -EAGAIN; - } - if (rcu_dereference(cp->ulp_ops[ulp_type])) { - pr_err("%s: Type %d has already been registered to this device\n", - __func__, ulp_type); - mutex_unlock(&cnic_lock); - return -EBUSY; - } - - clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]); - cp->ulp_handle[ulp_type] = ulp_ctx; - ulp_ops = cnic_ulp_tbl_prot(ulp_type); - rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops); - cnic_hold(dev); - - if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) - if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type])) - ulp_ops->cnic_start(cp->ulp_handle[ulp_type]); - - mutex_unlock(&cnic_lock); - - return 0; - -} -EXPORT_SYMBOL(cnic_register_driver); - -static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) -{ - struct cnic_local *cp = dev->cnic_priv; - int i = 0; - - if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { - pr_err("%s: Bad type %d\n", __func__, ulp_type); - return -EINVAL; - } - mutex_lock(&cnic_lock); - if (rcu_dereference(cp->ulp_ops[ulp_type])) { - rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL); - cnic_put(dev); - } else { - pr_err("%s: device not registered to this ulp type %d\n", - __func__, ulp_type); - mutex_unlock(&cnic_lock); - return -EINVAL; - } - mutex_unlock(&cnic_lock); - - if (ulp_type == CNIC_ULP_ISCSI) - cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); - - synchronize_rcu(); - - while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) && - i < 20) { - msleep(100); - i++; - } - if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type])) - netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n"); - - return 0; -} -EXPORT_SYMBOL(cnic_unregister_driver); - -static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id, - u32 next) -{ - id_tbl->start = start_id; - id_tbl->max = size; - id_tbl->next = next; - spin_lock_init(&id_tbl->lock); - id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL); - if (!id_tbl->table) - return -ENOMEM; - - return 0; -} - -static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl) -{ - kfree(id_tbl->table); - id_tbl->table = NULL; -} - -static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id) -{ - int ret = -1; - - id -= id_tbl->start; - if (id >= id_tbl->max) - return ret; - - spin_lock(&id_tbl->lock); - if (!test_bit(id, id_tbl->table)) { - set_bit(id, id_tbl->table); - ret = 0; - } - spin_unlock(&id_tbl->lock); - return ret; -} - -/* Returns -1 if not successful */ -static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl) -{ - u32 id; - - spin_lock(&id_tbl->lock); - id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next); - if (id >= id_tbl->max) { - id = -1; - if (id_tbl->next != 0) { - id = find_first_zero_bit(id_tbl->table, id_tbl->next); - if (id >= id_tbl->next) - id = -1; - } - } - - if (id < id_tbl->max) { - set_bit(id, id_tbl->table); - id_tbl->next = (id + 1) & (id_tbl->max - 1); - id += id_tbl->start; - } - - spin_unlock(&id_tbl->lock); - - return id; -} - -static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id) -{ - if (id == -1) - return; - - id -= id_tbl->start; - if (id >= id_tbl->max) - return; - - clear_bit(id, id_tbl->table); -} - -static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma) -{ - int i; - - if (!dma->pg_arr) - return; - - for (i = 0; i < dma->num_pages; i++) { - if (dma->pg_arr[i]) { - dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE, - dma->pg_arr[i], dma->pg_map_arr[i]); - dma->pg_arr[i] = NULL; - } - } - if (dma->pgtbl) { - dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size, - dma->pgtbl, dma->pgtbl_map); - dma->pgtbl = NULL; - } - kfree(dma->pg_arr); - dma->pg_arr = NULL; - dma->num_pages = 0; -} - -static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) -{ - int i; - __le32 *page_table = (__le32 *) dma->pgtbl; - - for (i = 0; i < dma->num_pages; i++) { - /* Each entry needs to be in big endian format. */ - *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32); - page_table++; - *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff); - page_table++; - } -} - -static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma) -{ - int i; - __le32 *page_table = (__le32 *) dma->pgtbl; - - for (i = 0; i < dma->num_pages; i++) { - /* Each entry needs to be in little endian format. */ - *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff); - page_table++; - *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32); - page_table++; - } -} - -static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, - int pages, int use_pg_tbl) -{ - int i, size; - struct cnic_local *cp = dev->cnic_priv; - - size = pages * (sizeof(void *) + sizeof(dma_addr_t)); - dma->pg_arr = kzalloc(size, GFP_ATOMIC); - if (dma->pg_arr == NULL) - return -ENOMEM; - - dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages); - dma->num_pages = pages; - - for (i = 0; i < pages; i++) { - dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev, - BCM_PAGE_SIZE, - &dma->pg_map_arr[i], - GFP_ATOMIC); - if (dma->pg_arr[i] == NULL) - goto error; - } - if (!use_pg_tbl) - return 0; - - dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) & - ~(BCM_PAGE_SIZE - 1); - dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size, - &dma->pgtbl_map, GFP_ATOMIC); - if (dma->pgtbl == NULL) - goto error; - - cp->setup_pgtbl(dev, dma); - - return 0; - -error: - cnic_free_dma(dev, dma); - return -ENOMEM; -} - -static void cnic_free_context(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - int i; - - for (i = 0; i < cp->ctx_blks; i++) { - if (cp->ctx_arr[i].ctx) { - dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size, - cp->ctx_arr[i].ctx, - cp->ctx_arr[i].mapping); - cp->ctx_arr[i].ctx = NULL; - } - } -} - -static void __cnic_free_uio(struct cnic_uio_dev *udev) -{ - uio_unregister_device(&udev->cnic_uinfo); - - if (udev->l2_buf) { - dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size, - udev->l2_buf, udev->l2_buf_map); - udev->l2_buf = NULL; - } - - if (udev->l2_ring) { - dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size, - udev->l2_ring, udev->l2_ring_map); - udev->l2_ring = NULL; - } - - pci_dev_put(udev->pdev); - kfree(udev); -} - -static void cnic_free_uio(struct cnic_uio_dev *udev) -{ - if (!udev) - return; - - write_lock(&cnic_dev_lock); - list_del_init(&udev->list); - write_unlock(&cnic_dev_lock); - __cnic_free_uio(udev); -} - -static void cnic_free_resc(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_uio_dev *udev = cp->udev; - - if (udev) { - udev->dev = NULL; - cp->udev = NULL; - } - - cnic_free_context(dev); - kfree(cp->ctx_arr); - cp->ctx_arr = NULL; - cp->ctx_blks = 0; - - cnic_free_dma(dev, &cp->gbl_buf_info); - cnic_free_dma(dev, &cp->kwq_info); - cnic_free_dma(dev, &cp->kwq_16_data_info); - cnic_free_dma(dev, &cp->kcq2.dma); - cnic_free_dma(dev, &cp->kcq1.dma); - kfree(cp->iscsi_tbl); - cp->iscsi_tbl = NULL; - kfree(cp->ctx_tbl); - cp->ctx_tbl = NULL; - - cnic_free_id_tbl(&cp->fcoe_cid_tbl); - cnic_free_id_tbl(&cp->cid_tbl); -} - -static int cnic_alloc_context(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - - if (CHIP_NUM(cp) == CHIP_NUM_5709) { - int i, k, arr_size; - - cp->ctx_blk_size = BCM_PAGE_SIZE; - cp->cids_per_blk = BCM_PAGE_SIZE / 128; - arr_size = BNX2_MAX_CID / cp->cids_per_blk * - sizeof(struct cnic_ctx); - cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL); - if (cp->ctx_arr == NULL) - return -ENOMEM; - - k = 0; - for (i = 0; i < 2; i++) { - u32 j, reg, off, lo, hi; - - if (i == 0) - off = BNX2_PG_CTX_MAP; - else - off = BNX2_ISCSI_CTX_MAP; - - reg = cnic_reg_rd_ind(dev, off); - lo = reg >> 16; - hi = reg & 0xffff; - for (j = lo; j < hi; j += cp->cids_per_blk, k++) - cp->ctx_arr[k].cid = j; - } - - cp->ctx_blks = k; - if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) { - cp->ctx_blks = 0; - return -ENOMEM; - } - - for (i = 0; i < cp->ctx_blks; i++) { - cp->ctx_arr[i].ctx = - dma_alloc_coherent(&dev->pcidev->dev, - BCM_PAGE_SIZE, - &cp->ctx_arr[i].mapping, - GFP_KERNEL); - if (cp->ctx_arr[i].ctx == NULL) - return -ENOMEM; - } - } - return 0; -} - -static u16 cnic_bnx2_next_idx(u16 idx) -{ - return idx + 1; -} - -static u16 cnic_bnx2_hw_idx(u16 idx) -{ - return idx; -} - -static u16 cnic_bnx2x_next_idx(u16 idx) -{ - idx++; - if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT) - idx++; - - return idx; -} - -static u16 cnic_bnx2x_hw_idx(u16 idx) -{ - if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT) - idx++; - return idx; -} - -static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info, - bool use_pg_tbl) -{ - int err, i, use_page_tbl = 0; - struct kcqe **kcq; - - if (use_pg_tbl) - use_page_tbl = 1; - - err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl); - if (err) - return err; - - kcq = (struct kcqe **) info->dma.pg_arr; - info->kcq = kcq; - - info->next_idx = cnic_bnx2_next_idx; - info->hw_idx = cnic_bnx2_hw_idx; - if (use_pg_tbl) - return 0; - - info->next_idx = cnic_bnx2x_next_idx; - info->hw_idx = cnic_bnx2x_hw_idx; - - for (i = 0; i < KCQ_PAGE_CNT; i++) { - struct bnx2x_bd_chain_next *next = - (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT]; - int j = i + 1; - - if (j >= KCQ_PAGE_CNT) - j = 0; - next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32; - next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff; - } - return 0; -} - -static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_uio_dev *udev; - - read_lock(&cnic_dev_lock); - list_for_each_entry(udev, &cnic_udev_list, list) { - if (udev->pdev == dev->pcidev) { - udev->dev = dev; - cp->udev = udev; - read_unlock(&cnic_dev_lock); - return 0; - } - } - read_unlock(&cnic_dev_lock); - - udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC); - if (!udev) - return -ENOMEM; - - udev->uio_dev = -1; - - udev->dev = dev; - udev->pdev = dev->pcidev; - udev->l2_ring_size = pages * BCM_PAGE_SIZE; - udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size, - &udev->l2_ring_map, - GFP_KERNEL | __GFP_COMP); - if (!udev->l2_ring) - goto err_udev; - - udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; - udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size); - udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size, - &udev->l2_buf_map, - GFP_KERNEL | __GFP_COMP); - if (!udev->l2_buf) - goto err_dma; - - write_lock(&cnic_dev_lock); - list_add(&udev->list, &cnic_udev_list); - write_unlock(&cnic_dev_lock); - - pci_dev_get(udev->pdev); - - cp->udev = udev; - - return 0; - err_dma: - dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size, - udev->l2_ring, udev->l2_ring_map); - err_udev: - kfree(udev); - return -ENOMEM; -} - -static int cnic_init_uio(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_uio_dev *udev = cp->udev; - struct uio_info *uinfo; - int ret = 0; - - if (!udev) - return -ENOMEM; - - uinfo = &udev->cnic_uinfo; - - uinfo->mem[0].addr = dev->netdev->base_addr; - uinfo->mem[0].internal_addr = dev->regview; - uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start; - uinfo->mem[0].memtype = UIO_MEM_PHYS; - - if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { - uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & - PAGE_MASK; - if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) - uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; - else - uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE; - - uinfo->name = "bnx2_cnic"; - } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { - uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & - PAGE_MASK; - uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk); - - uinfo->name = "bnx2x_cnic"; - } - - uinfo->mem[1].memtype = UIO_MEM_LOGICAL; - - uinfo->mem[2].addr = (unsigned long) udev->l2_ring; - uinfo->mem[2].size = udev->l2_ring_size; - uinfo->mem[2].memtype = UIO_MEM_LOGICAL; - - uinfo->mem[3].addr = (unsigned long) udev->l2_buf; - uinfo->mem[3].size = udev->l2_buf_size; - uinfo->mem[3].memtype = UIO_MEM_LOGICAL; - - uinfo->version = CNIC_MODULE_VERSION; - uinfo->irq = UIO_IRQ_CUSTOM; - - uinfo->open = cnic_uio_open; - uinfo->release = cnic_uio_close; - - if (udev->uio_dev == -1) { - if (!uinfo->priv) { - uinfo->priv = udev; - - ret = uio_register_device(&udev->pdev->dev, uinfo); - } - } else { - cnic_init_rings(dev); - } - - return ret; -} - -static int cnic_alloc_bnx2_resc(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - int ret; - - ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1); - if (ret) - goto error; - cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr; - - ret = cnic_alloc_kcq(dev, &cp->kcq1, true); - if (ret) - goto error; - - ret = cnic_alloc_context(dev); - if (ret) - goto error; - - ret = cnic_alloc_uio_rings(dev, 2); - if (ret) - goto error; - - ret = cnic_init_uio(dev); - if (ret) - goto error; - - return 0; - -error: - cnic_free_resc(dev); - return ret; -} - -static int cnic_alloc_bnx2x_context(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - int ctx_blk_size = cp->ethdev->ctx_blk_size; - int total_mem, blks, i; - - total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space; - blks = total_mem / ctx_blk_size; - if (total_mem % ctx_blk_size) - blks++; - - if (blks > cp->ethdev->ctx_tbl_len) - return -ENOMEM; - - cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL); - if (cp->ctx_arr == NULL) - return -ENOMEM; - - cp->ctx_blks = blks; - cp->ctx_blk_size = ctx_blk_size; - if (!BNX2X_CHIP_IS_57710(cp->chip_id)) - cp->ctx_align = 0; - else - cp->ctx_align = ctx_blk_size; - - cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE; - - for (i = 0; i < blks; i++) { - cp->ctx_arr[i].ctx = - dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size, - &cp->ctx_arr[i].mapping, - GFP_KERNEL); - if (cp->ctx_arr[i].ctx == NULL) - return -ENOMEM; - - if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) { - if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) { - cnic_free_context(dev); - cp->ctx_blk_size += cp->ctx_align; - i = -1; - continue; - } - } - } - return 0; -} - -static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - u32 start_cid = ethdev->starting_cid; - int i, j, n, ret, pages; - struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info; - - cp->iro_arr = ethdev->iro_arr; - - cp->max_cid_space = MAX_ISCSI_TBL_SZ; - cp->iscsi_start_cid = start_cid; - cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ; - - if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { - cp->max_cid_space += BNX2X_FCOE_NUM_CONNECTIONS; - cp->fcoe_init_cid = ethdev->fcoe_init_cid; - if (!cp->fcoe_init_cid) - cp->fcoe_init_cid = 0x10; - } - - cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ, - GFP_KERNEL); - if (!cp->iscsi_tbl) - goto error; - - cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) * - cp->max_cid_space, GFP_KERNEL); - if (!cp->ctx_tbl) - goto error; - - for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) { - cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i]; - cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI; - } - - for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++) - cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE; - - pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / - PAGE_SIZE; - - ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); - if (ret) - return -ENOMEM; - - n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; - for (i = 0, j = 0; i < cp->max_cid_space; i++) { - long off = CNIC_KWQ16_DATA_SIZE * (i % n); - - cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off; - cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] + - off; - - if ((i % n) == (n - 1)) - j++; - } - - ret = cnic_alloc_kcq(dev, &cp->kcq1, false); - if (ret) - goto error; - - if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { - ret = cnic_alloc_kcq(dev, &cp->kcq2, true); - if (ret) - goto error; - } - - pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE; - ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0); - if (ret) - goto error; - - ret = cnic_alloc_bnx2x_context(dev); - if (ret) - goto error; - - cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk; - - cp->l2_rx_ring_size = 15; - - ret = cnic_alloc_uio_rings(dev, 4); - if (ret) - goto error; - - ret = cnic_init_uio(dev); - if (ret) - goto error; - - return 0; - -error: - cnic_free_resc(dev); - return -ENOMEM; -} - -static inline u32 cnic_kwq_avail(struct cnic_local *cp) -{ - return cp->max_kwq_idx - - ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx); -} - -static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], - u32 num_wqes) -{ - struct cnic_local *cp = dev->cnic_priv; - struct kwqe *prod_qe; - u16 prod, sw_prod, i; - - if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) - return -EAGAIN; /* bnx2 is down */ - - spin_lock_bh(&cp->cnic_ulp_lock); - if (num_wqes > cnic_kwq_avail(cp) && - !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) { - spin_unlock_bh(&cp->cnic_ulp_lock); - return -EAGAIN; - } - - clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags); - - prod = cp->kwq_prod_idx; - sw_prod = prod & MAX_KWQ_IDX; - for (i = 0; i < num_wqes; i++) { - prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)]; - memcpy(prod_qe, wqes[i], sizeof(struct kwqe)); - prod++; - sw_prod = prod & MAX_KWQ_IDX; - } - cp->kwq_prod_idx = prod; - - CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx); - - spin_unlock_bh(&cp->cnic_ulp_lock); - return 0; -} - -static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid, - union l5cm_specific_data *l5_data) -{ - struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; - dma_addr_t map; - - map = ctx->kwqe_data_mapping; - l5_data->phy_address.lo = (u64) map & 0xffffffff; - l5_data->phy_address.hi = (u64) map >> 32; - return ctx->kwqe_data; -} - -static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid, - u32 type, union l5cm_specific_data *l5_data) -{ - struct cnic_local *cp = dev->cnic_priv; - struct l5cm_spe kwqe; - struct kwqe_16 *kwq[1]; - u16 type_16; - int ret; - - kwqe.hdr.conn_and_cmd_data = - cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) | - BNX2X_HW_CID(cp, cid))); - - type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; - type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) & - SPE_HDR_FUNCTION_ID; - - kwqe.hdr.type = cpu_to_le16(type_16); - kwqe.hdr.reserved1 = 0; - kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo); - kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi); - - kwq[0] = (struct kwqe_16 *) &kwqe; - - spin_lock_bh(&cp->cnic_ulp_lock); - ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1); - spin_unlock_bh(&cp->cnic_ulp_lock); - - if (ret == 1) - return 0; - - return -EBUSY; -} - -static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type, - struct kcqe *cqes[], u32 num_cqes) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_ulp_ops *ulp_ops; - - rcu_read_lock(); - ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); - if (likely(ulp_ops)) { - ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], - cqes, num_cqes); - } - rcu_read_unlock(); -} - -static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) -{ - struct cnic_local *cp = dev->cnic_priv; - struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe; - int hq_bds, pages; - u32 pfid = cp->pfid; - - cp->num_iscsi_tasks = req1->num_tasks_per_conn; - cp->num_ccells = req1->num_ccells_per_conn; - cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE * - cp->num_iscsi_tasks; - cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS * - BNX2X_ISCSI_R2TQE_SIZE; - cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE; - pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; - hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE); - cp->num_cqs = req1->num_cqs; - - if (!dev->max_iscsi_conn) - return 0; - - /* init Tstorm RAM */ - CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid), - req1->rq_num_wqes); - CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), - PAGE_SIZE); - CNIC_WR8(dev, BAR_TSTRORM_INTMEM + - TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); - CNIC_WR16(dev, BAR_TSTRORM_INTMEM + - TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), - req1->num_tasks_per_conn); - - /* init Ustorm RAM */ - CNIC_WR16(dev, BAR_USTRORM_INTMEM + - USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid), - req1->rq_buffer_size); - CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), - PAGE_SIZE); - CNIC_WR8(dev, BAR_USTRORM_INTMEM + - USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); - CNIC_WR16(dev, BAR_USTRORM_INTMEM + - USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), - req1->num_tasks_per_conn); - CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid), - req1->rq_num_wqes); - CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid), - req1->cq_num_wqes); - CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid), - cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); - - /* init Xstorm RAM */ - CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), - PAGE_SIZE); - CNIC_WR8(dev, BAR_XSTRORM_INTMEM + - XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); - CNIC_WR16(dev, BAR_XSTRORM_INTMEM + - XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), - req1->num_tasks_per_conn); - CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid), - hq_bds); - CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid), - req1->num_tasks_per_conn); - CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid), - cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); - - /* init Cstorm RAM */ - CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), - PAGE_SIZE); - CNIC_WR8(dev, BAR_CSTRORM_INTMEM + - CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); - CNIC_WR16(dev, BAR_CSTRORM_INTMEM + - CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), - req1->num_tasks_per_conn); - CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid), - req1->cq_num_wqes); - CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid), - hq_bds); - - return 0; -} - -static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe) -{ - struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe; - struct cnic_local *cp = dev->cnic_priv; - u32 pfid = cp->pfid; - struct iscsi_kcqe kcqe; - struct kcqe *cqes[1]; - - memset(&kcqe, 0, sizeof(kcqe)); - if (!dev->max_iscsi_conn) { - kcqe.completion_status = - ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED; - goto done; - } - - CNIC_WR(dev, BAR_TSTRORM_INTMEM + - TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]); - CNIC_WR(dev, BAR_TSTRORM_INTMEM + - TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4, - req2->error_bit_map[1]); - - CNIC_WR16(dev, BAR_USTRORM_INTMEM + - USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn); - CNIC_WR(dev, BAR_USTRORM_INTMEM + - USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]); - CNIC_WR(dev, BAR_USTRORM_INTMEM + - USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4, - req2->error_bit_map[1]); - - CNIC_WR16(dev, BAR_CSTRORM_INTMEM + - CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn); - - kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; - -done: - kcqe.op_code = ISCSI_KCQE_OPCODE_INIT; - cqes[0] = (struct kcqe *) &kcqe; - cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); - - return 0; -} - -static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; - - if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) { - struct cnic_iscsi *iscsi = ctx->proto.iscsi; - - cnic_free_dma(dev, &iscsi->hq_info); - cnic_free_dma(dev, &iscsi->r2tq_info); - cnic_free_dma(dev, &iscsi->task_array_info); - cnic_free_id(&cp->cid_tbl, ctx->cid); - } else { - cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid); - } - - ctx->cid = 0; -} - -static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) -{ - u32 cid; - int ret, pages; - struct cnic_local *cp = dev->cnic_priv; - struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; - struct cnic_iscsi *iscsi = ctx->proto.iscsi; - - if (ctx->ulp_proto_id == CNIC_ULP_FCOE) { - cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl); - if (cid == -1) { - ret = -ENOMEM; - goto error; - } - ctx->cid = cid; - return 0; - } - - cid = cnic_alloc_new_id(&cp->cid_tbl); - if (cid == -1) { - ret = -ENOMEM; - goto error; - } - - ctx->cid = cid; - pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE; - - ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1); - if (ret) - goto error; - - pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE; - ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1); - if (ret) - goto error; - - pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; - ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1); - if (ret) - goto error; - - return 0; - -error: - cnic_free_bnx2x_conn_resc(dev, l5_cid); - return ret; -} - -static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init, - struct regpair *ctx_addr) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk; - int off = (cid - ethdev->starting_cid) % cp->cids_per_blk; - unsigned long align_off = 0; - dma_addr_t ctx_map; - void *ctx; - - if (cp->ctx_align) { - unsigned long mask = cp->ctx_align - 1; - - if (cp->ctx_arr[blk].mapping & mask) - align_off = cp->ctx_align - - (cp->ctx_arr[blk].mapping & mask); - } - ctx_map = cp->ctx_arr[blk].mapping + align_off + - (off * BNX2X_CONTEXT_MEM_SIZE); - ctx = cp->ctx_arr[blk].ctx + align_off + - (off * BNX2X_CONTEXT_MEM_SIZE); - if (init) - memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE); - - ctx_addr->lo = ctx_map & 0xffffffff; - ctx_addr->hi = (u64) ctx_map >> 32; - return ctx; -} - -static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], - u32 num) -{ - struct cnic_local *cp = dev->cnic_priv; - struct iscsi_kwqe_conn_offload1 *req1 = - (struct iscsi_kwqe_conn_offload1 *) wqes[0]; - struct iscsi_kwqe_conn_offload2 *req2 = - (struct iscsi_kwqe_conn_offload2 *) wqes[1]; - struct iscsi_kwqe_conn_offload3 *req3; - struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id]; - struct cnic_iscsi *iscsi = ctx->proto.iscsi; - u32 cid = ctx->cid; - u32 hw_cid = BNX2X_HW_CID(cp, cid); - struct iscsi_context *ictx; - struct regpair context_addr; - int i, j, n = 2, n_max; - u8 port = CNIC_PORT(cp); - - ctx->ctx_flags = 0; - if (!req2->num_additional_wqes) - return -EINVAL; - - n_max = req2->num_additional_wqes + 2; - - ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr); - if (ictx == NULL) - return -ENOMEM; - - req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; - - ictx->xstorm_ag_context.hq_prod = 1; - - ictx->xstorm_st_context.iscsi.first_burst_length = - ISCSI_DEF_FIRST_BURST_LEN; - ictx->xstorm_st_context.iscsi.max_send_pdu_length = - ISCSI_DEF_MAX_RECV_SEG_LEN; - ictx->xstorm_st_context.iscsi.sq_pbl_base.lo = - req1->sq_page_table_addr_lo; - ictx->xstorm_st_context.iscsi.sq_pbl_base.hi = - req1->sq_page_table_addr_hi; - ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi; - ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo; - ictx->xstorm_st_context.iscsi.hq_pbl_base.lo = - iscsi->hq_info.pgtbl_map & 0xffffffff; - ictx->xstorm_st_context.iscsi.hq_pbl_base.hi = - (u64) iscsi->hq_info.pgtbl_map >> 32; - ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo = - iscsi->hq_info.pgtbl[0]; - ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi = - iscsi->hq_info.pgtbl[1]; - ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo = - iscsi->r2tq_info.pgtbl_map & 0xffffffff; - ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi = - (u64) iscsi->r2tq_info.pgtbl_map >> 32; - ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo = - iscsi->r2tq_info.pgtbl[0]; - ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi = - iscsi->r2tq_info.pgtbl[1]; - ictx->xstorm_st_context.iscsi.task_pbl_base.lo = - iscsi->task_array_info.pgtbl_map & 0xffffffff; - ictx->xstorm_st_context.iscsi.task_pbl_base.hi = - (u64) iscsi->task_array_info.pgtbl_map >> 32; - ictx->xstorm_st_context.iscsi.task_pbl_cache_idx = - BNX2X_ISCSI_PBL_NOT_CACHED; - ictx->xstorm_st_context.iscsi.flags.flags |= - XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA; - ictx->xstorm_st_context.iscsi.flags.flags |= - XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T; - ictx->xstorm_st_context.common.ethernet.reserved_vlan_type = - ETH_P_8021Q; - if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) && - cp->port_mode == CHIP_2_PORT_MODE) { - - port = 0; - } - ictx->xstorm_st_context.common.flags = - 1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT; - ictx->xstorm_st_context.common.flags = - port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT; - - ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE; - /* TSTORM requires the base address of RQ DB & not PTE */ - ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo = - req2->rq_page_table_addr_lo & PAGE_MASK; - ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi = - req2->rq_page_table_addr_hi; - ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id; - ictx->tstorm_st_context.tcp.cwnd = 0x5A8; - ictx->tstorm_st_context.tcp.flags2 |= - TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN; - ictx->tstorm_st_context.tcp.ooo_support_mode = - TCP_TSTORM_OOO_DROP_AND_PROC_ACK; - - ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG; - - ictx->ustorm_st_context.ring.rq.pbl_base.lo = - req2->rq_page_table_addr_lo; - ictx->ustorm_st_context.ring.rq.pbl_base.hi = - req2->rq_page_table_addr_hi; - ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi; - ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo; - ictx->ustorm_st_context.ring.r2tq.pbl_base.lo = - iscsi->r2tq_info.pgtbl_map & 0xffffffff; - ictx->ustorm_st_context.ring.r2tq.pbl_base.hi = - (u64) iscsi->r2tq_info.pgtbl_map >> 32; - ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo = - iscsi->r2tq_info.pgtbl[0]; - ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi = - iscsi->r2tq_info.pgtbl[1]; - ictx->ustorm_st_context.ring.cq_pbl_base.lo = - req1->cq_page_table_addr_lo; - ictx->ustorm_st_context.ring.cq_pbl_base.hi = - req1->cq_page_table_addr_hi; - ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN; - ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi; - ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo; - ictx->ustorm_st_context.task_pbe_cache_index = - BNX2X_ISCSI_PBL_NOT_CACHED; - ictx->ustorm_st_context.task_pdu_cache_index = - BNX2X_ISCSI_PDU_HEADER_NOT_CACHED; - - for (i = 1, j = 1; i < cp->num_cqs; i++, j++) { - if (j == 3) { - if (n >= n_max) - break; - req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; - j = 0; - } - ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN; - ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo = - req3->qp_first_pte[j].hi; - ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi = - req3->qp_first_pte[j].lo; - } - - ictx->ustorm_st_context.task_pbl_base.lo = - iscsi->task_array_info.pgtbl_map & 0xffffffff; - ictx->ustorm_st_context.task_pbl_base.hi = - (u64) iscsi->task_array_info.pgtbl_map >> 32; - ictx->ustorm_st_context.tce_phy_addr.lo = - iscsi->task_array_info.pgtbl[0]; - ictx->ustorm_st_context.tce_phy_addr.hi = - iscsi->task_array_info.pgtbl[1]; - ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; - ictx->ustorm_st_context.num_cqs = cp->num_cqs; - ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN; - ictx->ustorm_st_context.negotiated_rx_and_flags |= - ISCSI_DEF_MAX_BURST_LEN; - ictx->ustorm_st_context.negotiated_rx |= - ISCSI_DEFAULT_MAX_OUTSTANDING_R2T << - USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT; - - ictx->cstorm_st_context.hq_pbl_base.lo = - iscsi->hq_info.pgtbl_map & 0xffffffff; - ictx->cstorm_st_context.hq_pbl_base.hi = - (u64) iscsi->hq_info.pgtbl_map >> 32; - ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0]; - ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1]; - ictx->cstorm_st_context.task_pbl_base.lo = - iscsi->task_array_info.pgtbl_map & 0xffffffff; - ictx->cstorm_st_context.task_pbl_base.hi = - (u64) iscsi->task_array_info.pgtbl_map >> 32; - /* CSTORM and USTORM initialization is different, CSTORM requires - * CQ DB base & not PTE addr */ - ictx->cstorm_st_context.cq_db_base.lo = - req1->cq_page_table_addr_lo & PAGE_MASK; - ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi; - ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; - ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1; - for (i = 0; i < cp->num_cqs; i++) { - ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] = - ISCSI_INITIAL_SN; - ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] = - ISCSI_INITIAL_SN; - } - - ictx->xstorm_ag_context.cdu_reserved = - CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG, - ISCSI_CONNECTION_TYPE); - ictx->ustorm_ag_context.cdu_usage = - CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG, - ISCSI_CONNECTION_TYPE); - return 0; - -} - -static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], - u32 num, int *work) -{ - struct iscsi_kwqe_conn_offload1 *req1; - struct iscsi_kwqe_conn_offload2 *req2; - struct cnic_local *cp = dev->cnic_priv; - struct cnic_context *ctx; - struct iscsi_kcqe kcqe; - struct kcqe *cqes[1]; - u32 l5_cid; - int ret = 0; - - if (num < 2) { - *work = num; - return -EINVAL; - } - - req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0]; - req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1]; - if ((num - 2) < req2->num_additional_wqes) { - *work = num; - return -EINVAL; - } - *work = 2 + req2->num_additional_wqes; - - l5_cid = req1->iscsi_conn_id; - if (l5_cid >= MAX_ISCSI_TBL_SZ) - return -EINVAL; - - memset(&kcqe, 0, sizeof(kcqe)); - kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN; - kcqe.iscsi_conn_id = l5_cid; - kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE; - - ctx = &cp->ctx_tbl[l5_cid]; - if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) { - kcqe.completion_status = - ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY; - goto done; - } - - if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) { - atomic_dec(&cp->iscsi_conn); - goto done; - } - ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid); - if (ret) { - atomic_dec(&cp->iscsi_conn); - ret = 0; - goto done; - } - ret = cnic_setup_bnx2x_ctx(dev, wqes, num); - if (ret < 0) { - cnic_free_bnx2x_conn_resc(dev, l5_cid); - atomic_dec(&cp->iscsi_conn); - goto done; - } - - kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; - kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid); - -done: - cqes[0] = (struct kcqe *) &kcqe; - cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); - return ret; -} - - -static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe) -{ - struct cnic_local *cp = dev->cnic_priv; - struct iscsi_kwqe_conn_update *req = - (struct iscsi_kwqe_conn_update *) kwqe; - void *data; - union l5cm_specific_data l5_data; - u32 l5_cid, cid = BNX2X_SW_CID(req->context_id); - int ret; - - if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0) - return -EINVAL; - - data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); - if (!data) - return -ENOMEM; - - memcpy(data, kwqe, sizeof(struct kwqe)); - - ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN, - req->context_id, ISCSI_CONNECTION_TYPE, &l5_data); - return ret; -} - -static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; - union l5cm_specific_data l5_data; - int ret; - u32 hw_cid; - - init_waitqueue_head(&ctx->waitq); - ctx->wait_cond = 0; - memset(&l5_data, 0, sizeof(l5_data)); - hw_cid = BNX2X_HW_CID(cp, ctx->cid); - - ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, - hw_cid, NONE_CONNECTION_TYPE, &l5_data); - - if (ret == 0) { - wait_event(ctx->waitq, ctx->wait_cond); - if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags))) - return -EBUSY; - } - - return ret; -} - -static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe) -{ - struct cnic_local *cp = dev->cnic_priv; - struct iscsi_kwqe_conn_destroy *req = - (struct iscsi_kwqe_conn_destroy *) kwqe; - u32 l5_cid = req->reserved0; - struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; - int ret = 0; - struct iscsi_kcqe kcqe; - struct kcqe *cqes[1]; - - if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) - goto skip_cfc_delete; - - if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) { - unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies; - - if (delta > (2 * HZ)) - delta = 0; - - set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags); - queue_delayed_work(cnic_wq, &cp->delete_task, delta); - goto destroy_reply; - } - - ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid); - -skip_cfc_delete: - cnic_free_bnx2x_conn_resc(dev, l5_cid); - - if (!ret) { - atomic_dec(&cp->iscsi_conn); - clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); - } - -destroy_reply: - memset(&kcqe, 0, sizeof(kcqe)); - kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN; - kcqe.iscsi_conn_id = l5_cid; - kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; - kcqe.iscsi_conn_context_id = req->context_id; - - cqes[0] = (struct kcqe *) &kcqe; - cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); - - return ret; -} - -static void cnic_init_storm_conn_bufs(struct cnic_dev *dev, - struct l4_kwq_connect_req1 *kwqe1, - struct l4_kwq_connect_req3 *kwqe3, - struct l5cm_active_conn_buffer *conn_buf) -{ - struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf; - struct l5cm_xstorm_conn_buffer *xstorm_buf = - &conn_buf->xstorm_conn_buffer; - struct l5cm_tstorm_conn_buffer *tstorm_buf = - &conn_buf->tstorm_conn_buffer; - struct regpair context_addr; - u32 cid = BNX2X_SW_CID(kwqe1->cid); - struct in6_addr src_ip, dst_ip; - int i; - u32 *addrp; - - addrp = (u32 *) &conn_addr->local_ip_addr; - for (i = 0; i < 4; i++, addrp++) - src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp); - - addrp = (u32 *) &conn_addr->remote_ip_addr; - for (i = 0; i < 4; i++, addrp++) - dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp); - - cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr); - - xstorm_buf->context_addr.hi = context_addr.hi; - xstorm_buf->context_addr.lo = context_addr.lo; - xstorm_buf->mss = 0xffff; - xstorm_buf->rcv_buf = kwqe3->rcv_buf; - if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE) - xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE; - xstorm_buf->pseudo_header_checksum = - swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0)); - - if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK)) - tstorm_buf->params |= - L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE; - if (kwqe3->ka_timeout) { - tstorm_buf->ka_enable = 1; - tstorm_buf->ka_timeout = kwqe3->ka_timeout; - tstorm_buf->ka_interval = kwqe3->ka_interval; - tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count; - } - tstorm_buf->max_rt_time = 0xffffffff; -} - -static void cnic_init_bnx2x_mac(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - u32 pfid = cp->pfid; - u8 *mac = dev->mac_addr; - - CNIC_WR8(dev, BAR_XSTRORM_INTMEM + - XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]); - CNIC_WR8(dev, BAR_XSTRORM_INTMEM + - XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]); - CNIC_WR8(dev, BAR_XSTRORM_INTMEM + - XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]); - CNIC_WR8(dev, BAR_XSTRORM_INTMEM + - XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]); - CNIC_WR8(dev, BAR_XSTRORM_INTMEM + - XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]); - CNIC_WR8(dev, BAR_XSTRORM_INTMEM + - XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]); - - CNIC_WR8(dev, BAR_TSTRORM_INTMEM + - TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]); - CNIC_WR8(dev, BAR_TSTRORM_INTMEM + - TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1, - mac[4]); - CNIC_WR8(dev, BAR_TSTRORM_INTMEM + - TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]); - CNIC_WR8(dev, BAR_TSTRORM_INTMEM + - TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1, - mac[2]); - CNIC_WR8(dev, BAR_TSTRORM_INTMEM + - TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]); - CNIC_WR8(dev, BAR_TSTRORM_INTMEM + - TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1, - mac[0]); -} - -static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts) -{ - struct cnic_local *cp = dev->cnic_priv; - u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN; - u16 tstorm_flags = 0; - - if (tcp_ts) { - xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED; - tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED; - } - - CNIC_WR8(dev, BAR_XSTRORM_INTMEM + - XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags); - - CNIC_WR16(dev, BAR_TSTRORM_INTMEM + - TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags); -} - -static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[], - u32 num, int *work) -{ - struct cnic_local *cp = dev->cnic_priv; - struct l4_kwq_connect_req1 *kwqe1 = - (struct l4_kwq_connect_req1 *) wqes[0]; - struct l4_kwq_connect_req3 *kwqe3; - struct l5cm_active_conn_buffer *conn_buf; - struct l5cm_conn_addr_params *conn_addr; - union l5cm_specific_data l5_data; - u32 l5_cid = kwqe1->pg_cid; - struct cnic_sock *csk = &cp->csk_tbl[l5_cid]; - struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; - int ret; - - if (num < 2) { - *work = num; - return -EINVAL; - } - - if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) - *work = 3; - else - *work = 2; - - if (num < *work) { - *work = num; - return -EINVAL; - } - - if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) { - netdev_err(dev->netdev, "conn_buf size too big\n"); - return -ENOMEM; - } - conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); - if (!conn_buf) - return -ENOMEM; - - memset(conn_buf, 0, sizeof(*conn_buf)); - - conn_addr = &conn_buf->conn_addr_buf; - conn_addr->remote_addr_0 = csk->ha[0]; - conn_addr->remote_addr_1 = csk->ha[1]; - conn_addr->remote_addr_2 = csk->ha[2]; - conn_addr->remote_addr_3 = csk->ha[3]; - conn_addr->remote_addr_4 = csk->ha[4]; - conn_addr->remote_addr_5 = csk->ha[5]; - - if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) { - struct l4_kwq_connect_req2 *kwqe2 = - (struct l4_kwq_connect_req2 *) wqes[1]; - - conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4; - conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3; - conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2; - - conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4; - conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3; - conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2; - conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION; - } - kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1]; - - conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip; - conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip; - conn_addr->local_tcp_port = kwqe1->src_port; - conn_addr->remote_tcp_port = kwqe1->dst_port; - - conn_addr->pmtu = kwqe3->pmtu; - cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf); - - CNIC_WR16(dev, BAR_XSTRORM_INTMEM + - XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id); - - cnic_bnx2x_set_tcp_timestamp(dev, - kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP); - - ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT, - kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data); - if (!ret) - set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); - - return ret; -} - -static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe) -{ - struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe; - union l5cm_specific_data l5_data; - int ret; - - memset(&l5_data, 0, sizeof(l5_data)); - ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE, - req->cid, ISCSI_CONNECTION_TYPE, &l5_data); - return ret; -} - -static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe) -{ - struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe; - union l5cm_specific_data l5_data; - int ret; - - memset(&l5_data, 0, sizeof(l5_data)); - ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT, - req->cid, ISCSI_CONNECTION_TYPE, &l5_data); - return ret; -} -static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe) -{ - struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe; - struct l4_kcq kcqe; - struct kcqe *cqes[1]; - - memset(&kcqe, 0, sizeof(kcqe)); - kcqe.pg_host_opaque = req->host_opaque; - kcqe.pg_cid = req->host_opaque; - kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG; - cqes[0] = (struct kcqe *) &kcqe; - cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1); - return 0; -} - -static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe) -{ - struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe; - struct l4_kcq kcqe; - struct kcqe *cqes[1]; - - memset(&kcqe, 0, sizeof(kcqe)); - kcqe.pg_host_opaque = req->pg_host_opaque; - kcqe.pg_cid = req->pg_cid; - kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG; - cqes[0] = (struct kcqe *) &kcqe; - cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1); - return 0; -} - -static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe) -{ - struct fcoe_kwqe_stat *req; - struct fcoe_stat_ramrod_params *fcoe_stat; - union l5cm_specific_data l5_data; - struct cnic_local *cp = dev->cnic_priv; - int ret; - u32 cid; - - req = (struct fcoe_kwqe_stat *) kwqe; - cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); - - fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data); - if (!fcoe_stat) - return -ENOMEM; - - memset(fcoe_stat, 0, sizeof(*fcoe_stat)); - memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req)); - - ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid, - FCOE_CONNECTION_TYPE, &l5_data); - return ret; -} - -static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[], - u32 num, int *work) -{ - int ret; - struct cnic_local *cp = dev->cnic_priv; - u32 cid; - struct fcoe_init_ramrod_params *fcoe_init; - struct fcoe_kwqe_init1 *req1; - struct fcoe_kwqe_init2 *req2; - struct fcoe_kwqe_init3 *req3; - union l5cm_specific_data l5_data; - - if (num < 3) { - *work = num; - return -EINVAL; - } - req1 = (struct fcoe_kwqe_init1 *) wqes[0]; - req2 = (struct fcoe_kwqe_init2 *) wqes[1]; - req3 = (struct fcoe_kwqe_init3 *) wqes[2]; - if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) { - *work = 1; - return -EINVAL; - } - if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) { - *work = 2; - return -EINVAL; - } - - if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) { - netdev_err(dev->netdev, "fcoe_init size too big\n"); - return -ENOMEM; - } - fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data); - if (!fcoe_init) - return -ENOMEM; - - memset(fcoe_init, 0, sizeof(*fcoe_init)); - memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1)); - memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2)); - memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3)); - fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff; - fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32; - fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages; - - fcoe_init->sb_num = cp->status_blk_num; - fcoe_init->eq_prod = MAX_KCQ_IDX; - fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS; - cp->kcq2.sw_prod_idx = 0; - - cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); - ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid, - FCOE_CONNECTION_TYPE, &l5_data); - *work = 3; - return ret; -} - -static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], - u32 num, int *work) -{ - int ret = 0; - u32 cid = -1, l5_cid; - struct cnic_local *cp = dev->cnic_priv; - struct fcoe_kwqe_conn_offload1 *req1; - struct fcoe_kwqe_conn_offload2 *req2; - struct fcoe_kwqe_conn_offload3 *req3; - struct fcoe_kwqe_conn_offload4 *req4; - struct fcoe_conn_offload_ramrod_params *fcoe_offload; - struct cnic_context *ctx; - struct fcoe_context *fctx; - struct regpair ctx_addr; - union l5cm_specific_data l5_data; - struct fcoe_kcqe kcqe; - struct kcqe *cqes[1]; - - if (num < 4) { - *work = num; - return -EINVAL; - } - req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0]; - req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1]; - req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2]; - req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3]; - - *work = 4; - - l5_cid = req1->fcoe_conn_id; - if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS) - goto err_reply; - - l5_cid += BNX2X_FCOE_L5_CID_BASE; - - ctx = &cp->ctx_tbl[l5_cid]; - if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) - goto err_reply; - - ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid); - if (ret) { - ret = 0; - goto err_reply; - } - cid = ctx->cid; - - fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr); - if (fctx) { - u32 hw_cid = BNX2X_HW_CID(cp, cid); - u32 val; - - val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG, - FCOE_CONNECTION_TYPE); - fctx->xstorm_ag_context.cdu_reserved = val; - val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG, - FCOE_CONNECTION_TYPE); - fctx->ustorm_ag_context.cdu_usage = val; - } - if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) { - netdev_err(dev->netdev, "fcoe_offload size too big\n"); - goto err_reply; - } - fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); - if (!fcoe_offload) - goto err_reply; - - memset(fcoe_offload, 0, sizeof(*fcoe_offload)); - memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1)); - memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2)); - memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3)); - memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4)); - - cid = BNX2X_HW_CID(cp, cid); - ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid, - FCOE_CONNECTION_TYPE, &l5_data); - if (!ret) - set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); - - return ret; - -err_reply: - if (cid != -1) - cnic_free_bnx2x_conn_resc(dev, l5_cid); - - memset(&kcqe, 0, sizeof(kcqe)); - kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN; - kcqe.fcoe_conn_id = req1->fcoe_conn_id; - kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE; - - cqes[0] = (struct kcqe *) &kcqe; - cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1); - return ret; -} - -static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe) -{ - struct fcoe_kwqe_conn_enable_disable *req; - struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable; - union l5cm_specific_data l5_data; - int ret; - u32 cid, l5_cid; - struct cnic_local *cp = dev->cnic_priv; - - req = (struct fcoe_kwqe_conn_enable_disable *) kwqe; - cid = req->context_id; - l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE; - - if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) { - netdev_err(dev->netdev, "fcoe_enable size too big\n"); - return -ENOMEM; - } - fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); - if (!fcoe_enable) - return -ENOMEM; - - memset(fcoe_enable, 0, sizeof(*fcoe_enable)); - memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req)); - ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid, - FCOE_CONNECTION_TYPE, &l5_data); - return ret; -} - -static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe) -{ - struct fcoe_kwqe_conn_enable_disable *req; - struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable; - union l5cm_specific_data l5_data; - int ret; - u32 cid, l5_cid; - struct cnic_local *cp = dev->cnic_priv; - - req = (struct fcoe_kwqe_conn_enable_disable *) kwqe; - cid = req->context_id; - l5_cid = req->conn_id; - if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS) - return -EINVAL; - - l5_cid += BNX2X_FCOE_L5_CID_BASE; - - if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) { - netdev_err(dev->netdev, "fcoe_disable size too big\n"); - return -ENOMEM; - } - fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); - if (!fcoe_disable) - return -ENOMEM; - - memset(fcoe_disable, 0, sizeof(*fcoe_disable)); - memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req)); - ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid, - FCOE_CONNECTION_TYPE, &l5_data); - return ret; -} - -static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe) -{ - struct fcoe_kwqe_conn_destroy *req; - union l5cm_specific_data l5_data; - int ret; - u32 cid, l5_cid; - struct cnic_local *cp = dev->cnic_priv; - struct cnic_context *ctx; - struct fcoe_kcqe kcqe; - struct kcqe *cqes[1]; - - req = (struct fcoe_kwqe_conn_destroy *) kwqe; - cid = req->context_id; - l5_cid = req->conn_id; - if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS) - return -EINVAL; - - l5_cid += BNX2X_FCOE_L5_CID_BASE; - - ctx = &cp->ctx_tbl[l5_cid]; - - init_waitqueue_head(&ctx->waitq); - ctx->wait_cond = 0; - - memset(&l5_data, 0, sizeof(l5_data)); - ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid, - FCOE_CONNECTION_TYPE, &l5_data); - if (ret == 0) { - wait_event(ctx->waitq, ctx->wait_cond); - set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags); - queue_delayed_work(cnic_wq, &cp->delete_task, - msecs_to_jiffies(2000)); - } - - memset(&kcqe, 0, sizeof(kcqe)); - kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN; - kcqe.fcoe_conn_id = req->conn_id; - kcqe.fcoe_conn_context_id = cid; - - cqes[0] = (struct kcqe *) &kcqe; - cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1); - return ret; -} - -static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid) -{ - struct cnic_local *cp = dev->cnic_priv; - u32 i; - - for (i = start_cid; i < cp->max_cid_space; i++) { - struct cnic_context *ctx = &cp->ctx_tbl[i]; - int j; - - while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) - msleep(10); - - for (j = 0; j < 5; j++) { - if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) - break; - msleep(20); - } - - if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) - netdev_warn(dev->netdev, "CID %x not deleted\n", - ctx->cid); - } -} - -static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe) -{ - struct fcoe_kwqe_destroy *req; - union l5cm_specific_data l5_data; - struct cnic_local *cp = dev->cnic_priv; - int ret; - u32 cid; - - cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ); - - req = (struct fcoe_kwqe_destroy *) kwqe; - cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); - - memset(&l5_data, 0, sizeof(l5_data)); - ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid, - FCOE_CONNECTION_TYPE, &l5_data); - return ret; -} - -static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev, - struct kwqe *wqes[], u32 num_wqes) -{ - int i, work, ret; - u32 opcode; - struct kwqe *kwqe; - - if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) - return -EAGAIN; /* bnx2 is down */ - - for (i = 0; i < num_wqes; ) { - kwqe = wqes[i]; - opcode = KWQE_OPCODE(kwqe->kwqe_op_flag); - work = 1; - - switch (opcode) { - case ISCSI_KWQE_OPCODE_INIT1: - ret = cnic_bnx2x_iscsi_init1(dev, kwqe); - break; - case ISCSI_KWQE_OPCODE_INIT2: - ret = cnic_bnx2x_iscsi_init2(dev, kwqe); - break; - case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1: - ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i], - num_wqes - i, &work); - break; - case ISCSI_KWQE_OPCODE_UPDATE_CONN: - ret = cnic_bnx2x_iscsi_update(dev, kwqe); - break; - case ISCSI_KWQE_OPCODE_DESTROY_CONN: - ret = cnic_bnx2x_iscsi_destroy(dev, kwqe); - break; - case L4_KWQE_OPCODE_VALUE_CONNECT1: - ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i, - &work); - break; - case L4_KWQE_OPCODE_VALUE_CLOSE: - ret = cnic_bnx2x_close(dev, kwqe); - break; - case L4_KWQE_OPCODE_VALUE_RESET: - ret = cnic_bnx2x_reset(dev, kwqe); - break; - case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG: - ret = cnic_bnx2x_offload_pg(dev, kwqe); - break; - case L4_KWQE_OPCODE_VALUE_UPDATE_PG: - ret = cnic_bnx2x_update_pg(dev, kwqe); - break; - case L4_KWQE_OPCODE_VALUE_UPLOAD_PG: - ret = 0; - break; - default: - ret = 0; - netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n", - opcode); - break; - } - if (ret < 0) - netdev_err(dev->netdev, "KWQE(0x%x) failed\n", - opcode); - i += work; - } - return 0; -} - -static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev, - struct kwqe *wqes[], u32 num_wqes) -{ - struct cnic_local *cp = dev->cnic_priv; - int i, work, ret; - u32 opcode; - struct kwqe *kwqe; - - if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) - return -EAGAIN; /* bnx2 is down */ - - if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) - return -EINVAL; - - for (i = 0; i < num_wqes; ) { - kwqe = wqes[i]; - opcode = KWQE_OPCODE(kwqe->kwqe_op_flag); - work = 1; - - switch (opcode) { - case FCOE_KWQE_OPCODE_INIT1: - ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i], - num_wqes - i, &work); - break; - case FCOE_KWQE_OPCODE_OFFLOAD_CONN1: - ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i], - num_wqes - i, &work); - break; - case FCOE_KWQE_OPCODE_ENABLE_CONN: - ret = cnic_bnx2x_fcoe_enable(dev, kwqe); - break; - case FCOE_KWQE_OPCODE_DISABLE_CONN: - ret = cnic_bnx2x_fcoe_disable(dev, kwqe); - break; - case FCOE_KWQE_OPCODE_DESTROY_CONN: - ret = cnic_bnx2x_fcoe_destroy(dev, kwqe); - break; - case FCOE_KWQE_OPCODE_DESTROY: - ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe); - break; - case FCOE_KWQE_OPCODE_STAT: - ret = cnic_bnx2x_fcoe_stat(dev, kwqe); - break; - default: - ret = 0; - netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n", - opcode); - break; - } - if (ret < 0) - netdev_err(dev->netdev, "KWQE(0x%x) failed\n", - opcode); - i += work; - } - return 0; -} - -static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], - u32 num_wqes) -{ - int ret = -EINVAL; - u32 layer_code; - - if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) - return -EAGAIN; /* bnx2x is down */ - - if (!num_wqes) - return 0; - - layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK; - switch (layer_code) { - case KWQE_FLAGS_LAYER_MASK_L5_ISCSI: - case KWQE_FLAGS_LAYER_MASK_L4: - case KWQE_FLAGS_LAYER_MASK_L2: - ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes); - break; - - case KWQE_FLAGS_LAYER_MASK_L5_FCOE: - ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes); - break; - } - return ret; -} - -static inline u32 cnic_get_kcqe_layer_mask(u32 opflag) -{ - if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN)) - return KCQE_FLAGS_LAYER_MASK_L4; - - return opflag & KCQE_FLAGS_LAYER_MASK; -} - -static void service_kcqes(struct cnic_dev *dev, int num_cqes) -{ - struct cnic_local *cp = dev->cnic_priv; - int i, j, comp = 0; - - i = 0; - j = 1; - while (num_cqes) { - struct cnic_ulp_ops *ulp_ops; - int ulp_type; - u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag; - u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag); - - if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION)) - comp++; - - while (j < num_cqes) { - u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag; - - if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer) - break; - - if (unlikely(next_op & KCQE_RAMROD_COMPLETION)) - comp++; - j++; - } - - if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA) - ulp_type = CNIC_ULP_RDMA; - else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI) - ulp_type = CNIC_ULP_ISCSI; - else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE) - ulp_type = CNIC_ULP_FCOE; - else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4) - ulp_type = CNIC_ULP_L4; - else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2) - goto end; - else { - netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n", - kcqe_op_flag); - goto end; - } - - rcu_read_lock(); - ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); - if (likely(ulp_ops)) { - ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], - cp->completed_kcq + i, j); - } - rcu_read_unlock(); -end: - num_cqes -= j; - i += j; - j = 1; - } - if (unlikely(comp)) - cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp); -} - -static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info) -{ - struct cnic_local *cp = dev->cnic_priv; - u16 i, ri, hw_prod, last; - struct kcqe *kcqe; - int kcqe_cnt = 0, last_cnt = 0; - - i = ri = last = info->sw_prod_idx; - ri &= MAX_KCQ_IDX; - hw_prod = *info->hw_prod_idx_ptr; - hw_prod = info->hw_idx(hw_prod); - - while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) { - kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)]; - cp->completed_kcq[kcqe_cnt++] = kcqe; - i = info->next_idx(i); - ri = i & MAX_KCQ_IDX; - if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) { - last_cnt = kcqe_cnt; - last = i; - } - } - - info->sw_prod_idx = last; - return last_cnt; -} - -static int cnic_l2_completion(struct cnic_local *cp) -{ - u16 hw_cons, sw_cons; - struct cnic_uio_dev *udev = cp->udev; - union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *) - (udev->l2_ring + (2 * BCM_PAGE_SIZE)); - u32 cmd; - int comp = 0; - - if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags)) - return 0; - - hw_cons = *cp->rx_cons_ptr; - if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT) - hw_cons++; - - sw_cons = cp->rx_cons; - while (sw_cons != hw_cons) { - u8 cqe_fp_flags; - - cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT]; - cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; - if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) { - cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data); - cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT; - if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP || - cmd == RAMROD_CMD_ID_ETH_HALT) - comp++; - } - sw_cons = BNX2X_NEXT_RCQE(sw_cons); - } - return comp; -} - -static void cnic_chk_pkt_rings(struct cnic_local *cp) -{ - u16 rx_cons, tx_cons; - int comp = 0; - - if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) - return; - - rx_cons = *cp->rx_cons_ptr; - tx_cons = *cp->tx_cons_ptr; - if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) { - if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) - comp = cnic_l2_completion(cp); - - cp->tx_cons = tx_cons; - cp->rx_cons = rx_cons; - - if (cp->udev) - uio_event_notify(&cp->udev->cnic_uinfo); - } - if (comp) - clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); -} - -static u32 cnic_service_bnx2_queues(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - u32 status_idx = (u16) *cp->kcq1.status_idx_ptr; - int kcqe_cnt; - - /* status block index must be read before reading other fields */ - rmb(); - cp->kwq_con_idx = *cp->kwq_con_idx_ptr; - - while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) { - - service_kcqes(dev, kcqe_cnt); - - /* Tell compiler that status_blk fields can change. */ - barrier(); - status_idx = (u16) *cp->kcq1.status_idx_ptr; - /* status block index must be read first */ - rmb(); - cp->kwq_con_idx = *cp->kwq_con_idx_ptr; - } - - CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx); - - cnic_chk_pkt_rings(cp); - - return status_idx; -} - -static int cnic_service_bnx2(void *data, void *status_blk) -{ - struct cnic_dev *dev = data; - - if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) { - struct status_block *sblk = status_blk; - - return sblk->status_idx; - } - - return cnic_service_bnx2_queues(dev); -} - -static void cnic_service_bnx2_msix(unsigned long data) -{ - struct cnic_dev *dev = (struct cnic_dev *) data; - struct cnic_local *cp = dev->cnic_priv; - - cp->last_status_idx = cnic_service_bnx2_queues(dev); - - CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | - BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); -} - -static void cnic_doirq(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - - if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) { - u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX; - - prefetch(cp->status_blk.gen); - prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); - - tasklet_schedule(&cp->cnic_irq_task); - } -} - -static irqreturn_t cnic_irq(int irq, void *dev_instance) -{ - struct cnic_dev *dev = dev_instance; - struct cnic_local *cp = dev->cnic_priv; - - if (cp->ack_int) - cp->ack_int(dev); - - cnic_doirq(dev); - - return IRQ_HANDLED; -} - -static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm, - u16 index, u8 op, u8 update) -{ - struct cnic_local *cp = dev->cnic_priv; - u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 + - COMMAND_REG_INT_ACK); - struct igu_ack_register igu_ack; - - igu_ack.status_block_index = index; - igu_ack.sb_id_and_flags = - ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | - (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | - (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | - (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); - - CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack)); -} - -static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment, - u16 index, u8 op, u8 update) -{ - struct igu_regular cmd_data; - u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8; - - cmd_data.sb_id_and_flags = - (index << IGU_REGULAR_SB_INDEX_SHIFT) | - (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | - (update << IGU_REGULAR_BUPDATE_SHIFT) | - (op << IGU_REGULAR_ENABLE_INT_SHIFT); - - - CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags); -} - -static void cnic_ack_bnx2x_msix(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - - cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0, - IGU_INT_DISABLE, 0); -} - -static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - - cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0, - IGU_INT_DISABLE, 0); -} - -static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info) -{ - u32 last_status = *info->status_idx_ptr; - int kcqe_cnt; - - /* status block index must be read before reading the KCQ */ - rmb(); - while ((kcqe_cnt = cnic_get_kcqes(dev, info))) { - - service_kcqes(dev, kcqe_cnt); - - /* Tell compiler that sblk fields can change. */ - barrier(); - - last_status = *info->status_idx_ptr; - /* status block index must be read before reading the KCQ */ - rmb(); - } - return last_status; -} - -static void cnic_service_bnx2x_bh(unsigned long data) -{ - struct cnic_dev *dev = (struct cnic_dev *) data; - struct cnic_local *cp = dev->cnic_priv; - u32 status_idx, new_status_idx; - - if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) - return; - - while (1) { - status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); - - CNIC_WR16(dev, cp->kcq1.io_addr, - cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); - - if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { - cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, - status_idx, IGU_INT_ENABLE, 1); - break; - } - - new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2); - - if (new_status_idx != status_idx) - continue; - - CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx + - MAX_KCQ_IDX); - - cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, - status_idx, IGU_INT_ENABLE, 1); - - break; - } -} - -static int cnic_service_bnx2x(void *data, void *status_blk) -{ - struct cnic_dev *dev = data; - struct cnic_local *cp = dev->cnic_priv; - - if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) - cnic_doirq(dev); - - cnic_chk_pkt_rings(cp); - - return 0; -} - -static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type) -{ - struct cnic_ulp_ops *ulp_ops; - - if (if_type == CNIC_ULP_ISCSI) - cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); - - mutex_lock(&cnic_lock); - ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type], - lockdep_is_held(&cnic_lock)); - if (!ulp_ops) { - mutex_unlock(&cnic_lock); - return; - } - set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); - mutex_unlock(&cnic_lock); - - if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type])) - ulp_ops->cnic_stop(cp->ulp_handle[if_type]); - - clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); -} - -static void cnic_ulp_stop(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - int if_type; - - for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) - cnic_ulp_stop_one(cp, if_type); -} - -static void cnic_ulp_start(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - int if_type; - - for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { - struct cnic_ulp_ops *ulp_ops; - - mutex_lock(&cnic_lock); - ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type], - lockdep_is_held(&cnic_lock)); - if (!ulp_ops || !ulp_ops->cnic_start) { - mutex_unlock(&cnic_lock); - continue; - } - set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); - mutex_unlock(&cnic_lock); - - if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type])) - ulp_ops->cnic_start(cp->ulp_handle[if_type]); - - clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); - } -} - -static int cnic_ctl(void *data, struct cnic_ctl_info *info) -{ - struct cnic_dev *dev = data; - - switch (info->cmd) { - case CNIC_CTL_STOP_CMD: - cnic_hold(dev); - - cnic_ulp_stop(dev); - cnic_stop_hw(dev); - - cnic_put(dev); - break; - case CNIC_CTL_START_CMD: - cnic_hold(dev); - - if (!cnic_start_hw(dev)) - cnic_ulp_start(dev); - - cnic_put(dev); - break; - case CNIC_CTL_STOP_ISCSI_CMD: { - struct cnic_local *cp = dev->cnic_priv; - set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags); - queue_delayed_work(cnic_wq, &cp->delete_task, 0); - break; - } - case CNIC_CTL_COMPLETION_CMD: { - struct cnic_ctl_completion *comp = &info->data.comp; - u32 cid = BNX2X_SW_CID(comp->cid); - u32 l5_cid; - struct cnic_local *cp = dev->cnic_priv; - - if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) { - struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; - - if (unlikely(comp->error)) { - set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags); - netdev_err(dev->netdev, - "CID %x CFC delete comp error %x\n", - cid, comp->error); - } - - ctx->wait_cond = 1; - wake_up(&ctx->waitq); - } - break; - } - default: - return -EINVAL; - } - return 0; -} - -static void cnic_ulp_init(struct cnic_dev *dev) -{ - int i; - struct cnic_local *cp = dev->cnic_priv; - - for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { - struct cnic_ulp_ops *ulp_ops; - - mutex_lock(&cnic_lock); - ulp_ops = cnic_ulp_tbl_prot(i); - if (!ulp_ops || !ulp_ops->cnic_init) { - mutex_unlock(&cnic_lock); - continue; - } - ulp_get(ulp_ops); - mutex_unlock(&cnic_lock); - - if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i])) - ulp_ops->cnic_init(dev); - - ulp_put(ulp_ops); - } -} - -static void cnic_ulp_exit(struct cnic_dev *dev) -{ - int i; - struct cnic_local *cp = dev->cnic_priv; - - for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { - struct cnic_ulp_ops *ulp_ops; - - mutex_lock(&cnic_lock); - ulp_ops = cnic_ulp_tbl_prot(i); - if (!ulp_ops || !ulp_ops->cnic_exit) { - mutex_unlock(&cnic_lock); - continue; - } - ulp_get(ulp_ops); - mutex_unlock(&cnic_lock); - - if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i])) - ulp_ops->cnic_exit(dev); - - ulp_put(ulp_ops); - } -} - -static int cnic_cm_offload_pg(struct cnic_sock *csk) -{ - struct cnic_dev *dev = csk->dev; - struct l4_kwq_offload_pg *l4kwqe; - struct kwqe *wqes[1]; - - l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1; - memset(l4kwqe, 0, sizeof(*l4kwqe)); - wqes[0] = (struct kwqe *) l4kwqe; - - l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG; - l4kwqe->flags = - L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT; - l4kwqe->l2hdr_nbytes = ETH_HLEN; - - l4kwqe->da0 = csk->ha[0]; - l4kwqe->da1 = csk->ha[1]; - l4kwqe->da2 = csk->ha[2]; - l4kwqe->da3 = csk->ha[3]; - l4kwqe->da4 = csk->ha[4]; - l4kwqe->da5 = csk->ha[5]; - - l4kwqe->sa0 = dev->mac_addr[0]; - l4kwqe->sa1 = dev->mac_addr[1]; - l4kwqe->sa2 = dev->mac_addr[2]; - l4kwqe->sa3 = dev->mac_addr[3]; - l4kwqe->sa4 = dev->mac_addr[4]; - l4kwqe->sa5 = dev->mac_addr[5]; - - l4kwqe->etype = ETH_P_IP; - l4kwqe->ipid_start = DEF_IPID_START; - l4kwqe->host_opaque = csk->l5_cid; - - if (csk->vlan_id) { - l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING; - l4kwqe->vlan_tag = csk->vlan_id; - l4kwqe->l2hdr_nbytes += 4; - } - - return dev->submit_kwqes(dev, wqes, 1); -} - -static int cnic_cm_update_pg(struct cnic_sock *csk) -{ - struct cnic_dev *dev = csk->dev; - struct l4_kwq_update_pg *l4kwqe; - struct kwqe *wqes[1]; - - l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1; - memset(l4kwqe, 0, sizeof(*l4kwqe)); - wqes[0] = (struct kwqe *) l4kwqe; - - l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG; - l4kwqe->flags = - L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT; - l4kwqe->pg_cid = csk->pg_cid; - - l4kwqe->da0 = csk->ha[0]; - l4kwqe->da1 = csk->ha[1]; - l4kwqe->da2 = csk->ha[2]; - l4kwqe->da3 = csk->ha[3]; - l4kwqe->da4 = csk->ha[4]; - l4kwqe->da5 = csk->ha[5]; - - l4kwqe->pg_host_opaque = csk->l5_cid; - l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA; - - return dev->submit_kwqes(dev, wqes, 1); -} - -static int cnic_cm_upload_pg(struct cnic_sock *csk) -{ - struct cnic_dev *dev = csk->dev; - struct l4_kwq_upload *l4kwqe; - struct kwqe *wqes[1]; - - l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1; - memset(l4kwqe, 0, sizeof(*l4kwqe)); - wqes[0] = (struct kwqe *) l4kwqe; - - l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG; - l4kwqe->flags = - L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT; - l4kwqe->cid = csk->pg_cid; - - return dev->submit_kwqes(dev, wqes, 1); -} - -static int cnic_cm_conn_req(struct cnic_sock *csk) -{ - struct cnic_dev *dev = csk->dev; - struct l4_kwq_connect_req1 *l4kwqe1; - struct l4_kwq_connect_req2 *l4kwqe2; - struct l4_kwq_connect_req3 *l4kwqe3; - struct kwqe *wqes[3]; - u8 tcp_flags = 0; - int num_wqes = 2; - - l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1; - l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2; - l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3; - memset(l4kwqe1, 0, sizeof(*l4kwqe1)); - memset(l4kwqe2, 0, sizeof(*l4kwqe2)); - memset(l4kwqe3, 0, sizeof(*l4kwqe3)); - - l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3; - l4kwqe3->flags = - L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT; - l4kwqe3->ka_timeout = csk->ka_timeout; - l4kwqe3->ka_interval = csk->ka_interval; - l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count; - l4kwqe3->tos = csk->tos; - l4kwqe3->ttl = csk->ttl; - l4kwqe3->snd_seq_scale = csk->snd_seq_scale; - l4kwqe3->pmtu = csk->mtu; - l4kwqe3->rcv_buf = csk->rcv_buf; - l4kwqe3->snd_buf = csk->snd_buf; - l4kwqe3->seed = csk->seed; - - wqes[0] = (struct kwqe *) l4kwqe1; - if (test_bit(SK_F_IPV6, &csk->flags)) { - wqes[1] = (struct kwqe *) l4kwqe2; - wqes[2] = (struct kwqe *) l4kwqe3; - num_wqes = 3; - - l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6; - l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2; - l4kwqe2->flags = - L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT | - L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT; - l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]); - l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]); - l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]); - l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]); - l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]); - l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]); - l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) - - sizeof(struct tcphdr); - } else { - wqes[1] = (struct kwqe *) l4kwqe3; - l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) - - sizeof(struct tcphdr); - } - - l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1; - l4kwqe1->flags = - (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) | - L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT; - l4kwqe1->cid = csk->cid; - l4kwqe1->pg_cid = csk->pg_cid; - l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]); - l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]); - l4kwqe1->src_port = be16_to_cpu(csk->src_port); - l4kwqe1->dst_port = be16_to_cpu(csk->dst_port); - if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK) - tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK; - if (csk->tcp_flags & SK_TCP_KEEP_ALIVE) - tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE; - if (csk->tcp_flags & SK_TCP_NAGLE) - tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE; - if (csk->tcp_flags & SK_TCP_TIMESTAMP) - tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP; - if (csk->tcp_flags & SK_TCP_SACK) - tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK; - if (csk->tcp_flags & SK_TCP_SEG_SCALING) - tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING; - - l4kwqe1->tcp_flags = tcp_flags; - - return dev->submit_kwqes(dev, wqes, num_wqes); -} - -static int cnic_cm_close_req(struct cnic_sock *csk) -{ - struct cnic_dev *dev = csk->dev; - struct l4_kwq_close_req *l4kwqe; - struct kwqe *wqes[1]; - - l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2; - memset(l4kwqe, 0, sizeof(*l4kwqe)); - wqes[0] = (struct kwqe *) l4kwqe; - - l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE; - l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT; - l4kwqe->cid = csk->cid; - - return dev->submit_kwqes(dev, wqes, 1); -} - -static int cnic_cm_abort_req(struct cnic_sock *csk) -{ - struct cnic_dev *dev = csk->dev; - struct l4_kwq_reset_req *l4kwqe; - struct kwqe *wqes[1]; - - l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2; - memset(l4kwqe, 0, sizeof(*l4kwqe)); - wqes[0] = (struct kwqe *) l4kwqe; - - l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET; - l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT; - l4kwqe->cid = csk->cid; - - return dev->submit_kwqes(dev, wqes, 1); -} - -static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid, - u32 l5_cid, struct cnic_sock **csk, void *context) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_sock *csk1; - - if (l5_cid >= MAX_CM_SK_TBL_SZ) - return -EINVAL; - - if (cp->ctx_tbl) { - struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; - - if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) - return -EAGAIN; - } - - csk1 = &cp->csk_tbl[l5_cid]; - if (atomic_read(&csk1->ref_count)) - return -EAGAIN; - - if (test_and_set_bit(SK_F_INUSE, &csk1->flags)) - return -EBUSY; - - csk1->dev = dev; - csk1->cid = cid; - csk1->l5_cid = l5_cid; - csk1->ulp_type = ulp_type; - csk1->context = context; - - csk1->ka_timeout = DEF_KA_TIMEOUT; - csk1->ka_interval = DEF_KA_INTERVAL; - csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT; - csk1->tos = DEF_TOS; - csk1->ttl = DEF_TTL; - csk1->snd_seq_scale = DEF_SND_SEQ_SCALE; - csk1->rcv_buf = DEF_RCV_BUF; - csk1->snd_buf = DEF_SND_BUF; - csk1->seed = DEF_SEED; - - *csk = csk1; - return 0; -} - -static void cnic_cm_cleanup(struct cnic_sock *csk) -{ - if (csk->src_port) { - struct cnic_dev *dev = csk->dev; - struct cnic_local *cp = dev->cnic_priv; - - cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port)); - csk->src_port = 0; - } -} - -static void cnic_close_conn(struct cnic_sock *csk) -{ - if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) { - cnic_cm_upload_pg(csk); - clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags); - } - cnic_cm_cleanup(csk); -} - -static int cnic_cm_destroy(struct cnic_sock *csk) -{ - if (!cnic_in_use(csk)) - return -EINVAL; - - csk_hold(csk); - clear_bit(SK_F_INUSE, &csk->flags); - smp_mb__after_clear_bit(); - while (atomic_read(&csk->ref_count) != 1) - msleep(1); - cnic_cm_cleanup(csk); - - csk->flags = 0; - csk_put(csk); - return 0; -} - -static inline u16 cnic_get_vlan(struct net_device *dev, - struct net_device **vlan_dev) -{ - if (dev->priv_flags & IFF_802_1Q_VLAN) { - *vlan_dev = vlan_dev_real_dev(dev); - return vlan_dev_vlan_id(dev); - } - *vlan_dev = dev; - return 0; -} - -static int cnic_get_v4_route(struct sockaddr_in *dst_addr, - struct dst_entry **dst) -{ -#if defined(CONFIG_INET) - struct rtable *rt; - - rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0); - if (!IS_ERR(rt)) { - *dst = &rt->dst; - return 0; - } - return PTR_ERR(rt); -#else - return -ENETUNREACH; -#endif -} - -static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr, - struct dst_entry **dst) -{ -#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE)) - struct flowi6 fl6; - - memset(&fl6, 0, sizeof(fl6)); - ipv6_addr_copy(&fl6.daddr, &dst_addr->sin6_addr); - if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) - fl6.flowi6_oif = dst_addr->sin6_scope_id; - - *dst = ip6_route_output(&init_net, NULL, &fl6); - if (*dst) - return 0; -#endif - - return -ENETUNREACH; -} - -static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr, - int ulp_type) -{ - struct cnic_dev *dev = NULL; - struct dst_entry *dst; - struct net_device *netdev = NULL; - int err = -ENETUNREACH; - - if (dst_addr->sin_family == AF_INET) - err = cnic_get_v4_route(dst_addr, &dst); - else if (dst_addr->sin_family == AF_INET6) { - struct sockaddr_in6 *dst_addr6 = - (struct sockaddr_in6 *) dst_addr; - - err = cnic_get_v6_route(dst_addr6, &dst); - } else - return NULL; - - if (err) - return NULL; - - if (!dst->dev) - goto done; - - cnic_get_vlan(dst->dev, &netdev); - - dev = cnic_from_netdev(netdev); - -done: - dst_release(dst); - if (dev) - cnic_put(dev); - return dev; -} - -static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr) -{ - struct cnic_dev *dev = csk->dev; - struct cnic_local *cp = dev->cnic_priv; - - return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk); -} - -static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr) -{ - struct cnic_dev *dev = csk->dev; - struct cnic_local *cp = dev->cnic_priv; - int is_v6, rc = 0; - struct dst_entry *dst = NULL; - struct net_device *realdev; - __be16 local_port; - u32 port_id; - - if (saddr->local.v6.sin6_family == AF_INET6 && - saddr->remote.v6.sin6_family == AF_INET6) - is_v6 = 1; - else if (saddr->local.v4.sin_family == AF_INET && - saddr->remote.v4.sin_family == AF_INET) - is_v6 = 0; - else - return -EINVAL; - - clear_bit(SK_F_IPV6, &csk->flags); - - if (is_v6) { - set_bit(SK_F_IPV6, &csk->flags); - cnic_get_v6_route(&saddr->remote.v6, &dst); - - memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr, - sizeof(struct in6_addr)); - csk->dst_port = saddr->remote.v6.sin6_port; - local_port = saddr->local.v6.sin6_port; - - } else { - cnic_get_v4_route(&saddr->remote.v4, &dst); - - csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr; - csk->dst_port = saddr->remote.v4.sin_port; - local_port = saddr->local.v4.sin_port; - } - - csk->vlan_id = 0; - csk->mtu = dev->netdev->mtu; - if (dst && dst->dev) { - u16 vlan = cnic_get_vlan(dst->dev, &realdev); - if (realdev == dev->netdev) { - csk->vlan_id = vlan; - csk->mtu = dst_mtu(dst); - } - } - - port_id = be16_to_cpu(local_port); - if (port_id >= CNIC_LOCAL_PORT_MIN && - port_id < CNIC_LOCAL_PORT_MAX) { - if (cnic_alloc_id(&cp->csk_port_tbl, port_id)) - port_id = 0; - } else - port_id = 0; - - if (!port_id) { - port_id = cnic_alloc_new_id(&cp->csk_port_tbl); - if (port_id == -1) { - rc = -ENOMEM; - goto err_out; - } - local_port = cpu_to_be16(port_id); - } - csk->src_port = local_port; - -err_out: - dst_release(dst); - return rc; -} - -static void cnic_init_csk_state(struct cnic_sock *csk) -{ - csk->state = 0; - clear_bit(SK_F_OFFLD_SCHED, &csk->flags); - clear_bit(SK_F_CLOSING, &csk->flags); -} - -static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr) -{ - struct cnic_local *cp = csk->dev->cnic_priv; - int err = 0; - - if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI) - return -EOPNOTSUPP; - - if (!cnic_in_use(csk)) - return -EINVAL; - - if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags)) - return -EINVAL; - - cnic_init_csk_state(csk); - - err = cnic_get_route(csk, saddr); - if (err) - goto err_out; - - err = cnic_resolve_addr(csk, saddr); - if (!err) - return 0; - -err_out: - clear_bit(SK_F_CONNECT_START, &csk->flags); - return err; -} - -static int cnic_cm_abort(struct cnic_sock *csk) -{ - struct cnic_local *cp = csk->dev->cnic_priv; - u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP; - - if (!cnic_in_use(csk)) - return -EINVAL; - - if (cnic_abort_prep(csk)) - return cnic_cm_abort_req(csk); - - /* Getting here means that we haven't started connect, or - * connect was not successful. - */ - - cp->close_conn(csk, opcode); - if (csk->state != opcode) - return -EALREADY; - - return 0; -} - -static int cnic_cm_close(struct cnic_sock *csk) -{ - if (!cnic_in_use(csk)) - return -EINVAL; - - if (cnic_close_prep(csk)) { - csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; - return cnic_cm_close_req(csk); - } else { - return -EALREADY; - } - return 0; -} - -static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk, - u8 opcode) -{ - struct cnic_ulp_ops *ulp_ops; - int ulp_type = csk->ulp_type; - - rcu_read_lock(); - ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); - if (ulp_ops) { - if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE) - ulp_ops->cm_connect_complete(csk); - else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) - ulp_ops->cm_close_complete(csk); - else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) - ulp_ops->cm_remote_abort(csk); - else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP) - ulp_ops->cm_abort_complete(csk); - else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED) - ulp_ops->cm_remote_close(csk); - } - rcu_read_unlock(); -} - -static int cnic_cm_set_pg(struct cnic_sock *csk) -{ - if (cnic_offld_prep(csk)) { - if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) - cnic_cm_update_pg(csk); - else - cnic_cm_offload_pg(csk); - } - return 0; -} - -static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe) -{ - struct cnic_local *cp = dev->cnic_priv; - u32 l5_cid = kcqe->pg_host_opaque; - u8 opcode = kcqe->op_code; - struct cnic_sock *csk = &cp->csk_tbl[l5_cid]; - - csk_hold(csk); - if (!cnic_in_use(csk)) - goto done; - - if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { - clear_bit(SK_F_OFFLD_SCHED, &csk->flags); - goto done; - } - /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */ - if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) { - clear_bit(SK_F_OFFLD_SCHED, &csk->flags); - cnic_cm_upcall(cp, csk, - L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); - goto done; - } - - csk->pg_cid = kcqe->pg_cid; - set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags); - cnic_cm_conn_req(csk); - -done: - csk_put(csk); -} - -static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe) -{ - struct cnic_local *cp = dev->cnic_priv; - struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe; - u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE; - struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; - - ctx->timestamp = jiffies; - ctx->wait_cond = 1; - wake_up(&ctx->waitq); -} - -static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) -{ - struct cnic_local *cp = dev->cnic_priv; - struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe; - u8 opcode = l4kcqe->op_code; - u32 l5_cid; - struct cnic_sock *csk; - - if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) { - cnic_process_fcoe_term_conn(dev, kcqe); - return; - } - if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG || - opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { - cnic_cm_process_offld_pg(dev, l4kcqe); - return; - } - - l5_cid = l4kcqe->conn_id; - if (opcode & 0x80) - l5_cid = l4kcqe->cid; - if (l5_cid >= MAX_CM_SK_TBL_SZ) - return; - - csk = &cp->csk_tbl[l5_cid]; - csk_hold(csk); - - if (!cnic_in_use(csk)) { - csk_put(csk); - return; - } - - switch (opcode) { - case L5CM_RAMROD_CMD_ID_TCP_CONNECT: - if (l4kcqe->status != 0) { - clear_bit(SK_F_OFFLD_SCHED, &csk->flags); - cnic_cm_upcall(cp, csk, - L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); - } - break; - case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE: - if (l4kcqe->status == 0) - set_bit(SK_F_OFFLD_COMPLETE, &csk->flags); - - smp_mb__before_clear_bit(); - clear_bit(SK_F_OFFLD_SCHED, &csk->flags); - cnic_cm_upcall(cp, csk, opcode); - break; - - case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: - case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: - case L4_KCQE_OPCODE_VALUE_RESET_COMP: - case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: - case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD: - cp->close_conn(csk, opcode); - break; - - case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED: - /* after we already sent CLOSE_REQ */ - if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) && - !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) && - csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) - cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP); - else - cnic_cm_upcall(cp, csk, opcode); - break; - } - csk_put(csk); -} - -static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num) -{ - struct cnic_dev *dev = data; - int i; - - for (i = 0; i < num; i++) - cnic_cm_process_kcqe(dev, kcqe[i]); -} - -static struct cnic_ulp_ops cm_ulp_ops = { - .indicate_kcqes = cnic_cm_indicate_kcqe, -}; - -static void cnic_cm_free_mem(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - - kfree(cp->csk_tbl); - cp->csk_tbl = NULL; - cnic_free_id_tbl(&cp->csk_port_tbl); -} - -static int cnic_cm_alloc_mem(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - u32 port_id; - - cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ, - GFP_KERNEL); - if (!cp->csk_tbl) - return -ENOMEM; - - port_id = random32(); - port_id %= CNIC_LOCAL_PORT_RANGE; - if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE, - CNIC_LOCAL_PORT_MIN, port_id)) { - cnic_cm_free_mem(dev); - return -ENOMEM; - } - return 0; -} - -static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode) -{ - if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { - /* Unsolicited RESET_COMP or RESET_RECEIVED */ - opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED; - csk->state = opcode; - } - - /* 1. If event opcode matches the expected event in csk->state - * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any - * event - * 3. If the expected event is 0, meaning the connection was never - * never established, we accept the opcode from cm_abort. - */ - if (opcode == csk->state || csk->state == 0 || - csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP || - csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) { - if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) { - if (csk->state == 0) - csk->state = opcode; - return 1; - } - } - return 0; -} - -static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode) -{ - struct cnic_dev *dev = csk->dev; - struct cnic_local *cp = dev->cnic_priv; - - if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) { - cnic_cm_upcall(cp, csk, opcode); - return; - } - - clear_bit(SK_F_CONNECT_START, &csk->flags); - cnic_close_conn(csk); - csk->state = opcode; - cnic_cm_upcall(cp, csk, opcode); -} - -static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev) -{ -} - -static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev) -{ - u32 seed; - - seed = random32(); - cnic_ctx_wr(dev, 45, 0, seed); - return 0; -} - -static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode) -{ - struct cnic_dev *dev = csk->dev; - struct cnic_local *cp = dev->cnic_priv; - struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid]; - union l5cm_specific_data l5_data; - u32 cmd = 0; - int close_complete = 0; - - switch (opcode) { - case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: - case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: - case L4_KCQE_OPCODE_VALUE_RESET_COMP: - if (cnic_ready_to_close(csk, opcode)) { - if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) - cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE; - else - close_complete = 1; - } - break; - case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: - cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD; - break; - case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD: - close_complete = 1; - break; - } - if (cmd) { - memset(&l5_data, 0, sizeof(l5_data)); - - cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE, - &l5_data); - } else if (close_complete) { - ctx->timestamp = jiffies; - cnic_close_conn(csk); - cnic_cm_upcall(cp, csk, csk->state); - } -} - -static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - - if (!cp->ctx_tbl) - return; - - if (!netif_running(dev->netdev)) - return; - - cnic_bnx2x_delete_wait(dev, 0); - - cancel_delayed_work(&cp->delete_task); - flush_workqueue(cnic_wq); - - if (atomic_read(&cp->iscsi_conn) != 0) - netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n", - atomic_read(&cp->iscsi_conn)); -} - -static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - u32 pfid = cp->pfid; - u32 port = CNIC_PORT(cp); - - cnic_init_bnx2x_mac(dev); - cnic_bnx2x_set_tcp_timestamp(dev, 1); - - CNIC_WR16(dev, BAR_XSTRORM_INTMEM + - XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0); - - CNIC_WR(dev, BAR_XSTRORM_INTMEM + - XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1); - CNIC_WR(dev, BAR_XSTRORM_INTMEM + - XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port), - DEF_MAX_DA_COUNT); - - CNIC_WR8(dev, BAR_XSTRORM_INTMEM + - XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL); - CNIC_WR8(dev, BAR_XSTRORM_INTMEM + - XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS); - CNIC_WR8(dev, BAR_XSTRORM_INTMEM + - XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2); - CNIC_WR(dev, BAR_XSTRORM_INTMEM + - XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER); - - CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid), - DEF_MAX_CWND); - return 0; -} - -static void cnic_delete_task(struct work_struct *work) -{ - struct cnic_local *cp; - struct cnic_dev *dev; - u32 i; - int need_resched = 0; - - cp = container_of(work, struct cnic_local, delete_task.work); - dev = cp->dev; - - if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) { - struct drv_ctl_info info; - - cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI); - - info.cmd = DRV_CTL_ISCSI_STOPPED_CMD; - cp->ethdev->drv_ctl(dev->netdev, &info); - } - - for (i = 0; i < cp->max_cid_space; i++) { - struct cnic_context *ctx = &cp->ctx_tbl[i]; - int err; - - if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) || - !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) - continue; - - if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) { - need_resched = 1; - continue; - } - - if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) - continue; - - err = cnic_bnx2x_destroy_ramrod(dev, i); - - cnic_free_bnx2x_conn_resc(dev, i); - if (!err) { - if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) - atomic_dec(&cp->iscsi_conn); - - clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); - } - } - - if (need_resched) - queue_delayed_work(cnic_wq, &cp->delete_task, - msecs_to_jiffies(10)); - -} - -static int cnic_cm_open(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - int err; - - err = cnic_cm_alloc_mem(dev); - if (err) - return err; - - err = cp->start_cm(dev); - - if (err) - goto err_out; - - INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task); - - dev->cm_create = cnic_cm_create; - dev->cm_destroy = cnic_cm_destroy; - dev->cm_connect = cnic_cm_connect; - dev->cm_abort = cnic_cm_abort; - dev->cm_close = cnic_cm_close; - dev->cm_select_dev = cnic_cm_select_dev; - - cp->ulp_handle[CNIC_ULP_L4] = dev; - rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops); - return 0; - -err_out: - cnic_cm_free_mem(dev); - return err; -} - -static int cnic_cm_shutdown(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - int i; - - cp->stop_cm(dev); - - if (!cp->csk_tbl) - return 0; - - for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) { - struct cnic_sock *csk = &cp->csk_tbl[i]; - - clear_bit(SK_F_INUSE, &csk->flags); - cnic_cm_cleanup(csk); - } - cnic_cm_free_mem(dev); - - return 0; -} - -static void cnic_init_context(struct cnic_dev *dev, u32 cid) -{ - u32 cid_addr; - int i; - - cid_addr = GET_CID_ADDR(cid); - - for (i = 0; i < CTX_SIZE; i += 4) - cnic_ctx_wr(dev, cid_addr, i, 0); -} - -static int cnic_setup_5709_context(struct cnic_dev *dev, int valid) -{ - struct cnic_local *cp = dev->cnic_priv; - int ret = 0, i; - u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0; - - if (CHIP_NUM(cp) != CHIP_NUM_5709) - return 0; - - for (i = 0; i < cp->ctx_blks; i++) { - int j; - u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk; - u32 val; - - memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE); - - CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0, - (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit); - CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1, - (u64) cp->ctx_arr[i].mapping >> 32); - CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx | - BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); - for (j = 0; j < 10; j++) { - - val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL); - if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ)) - break; - udelay(5); - } - if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) { - ret = -EBUSY; - break; - } - } - return ret; -} - -static void cnic_free_irq(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - - if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { - cp->disable_int_sync(dev); - tasklet_kill(&cp->cnic_irq_task); - free_irq(ethdev->irq_arr[0].vector, dev); - } -} - -static int cnic_request_irq(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - int err; - - err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev); - if (err) - tasklet_disable(&cp->cnic_irq_task); - - return err; -} - -static int cnic_init_bnx2_irq(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - - if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { - int err, i = 0; - int sblk_num = cp->status_blk_num; - u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) + - BNX2_HC_SB_CONFIG_1; - - CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT); - - CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8); - CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220); - CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220); - - cp->last_status_idx = cp->status_blk.bnx2->status_idx; - tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix, - (unsigned long) dev); - err = cnic_request_irq(dev); - if (err) - return err; - - while (cp->status_blk.bnx2->status_completion_producer_index && - i < 10) { - CNIC_WR(dev, BNX2_HC_COALESCE_NOW, - 1 << (11 + sblk_num)); - udelay(10); - i++; - barrier(); - } - if (cp->status_blk.bnx2->status_completion_producer_index) { - cnic_free_irq(dev); - goto failed; - } - - } else { - struct status_block *sblk = cp->status_blk.gen; - u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND); - int i = 0; - - while (sblk->status_completion_producer_index && i < 10) { - CNIC_WR(dev, BNX2_HC_COMMAND, - hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); - udelay(10); - i++; - barrier(); - } - if (sblk->status_completion_producer_index) - goto failed; - - } - return 0; - -failed: - netdev_err(dev->netdev, "KCQ index not resetting to 0\n"); - return -EBUSY; -} - -static void cnic_enable_bnx2_int(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - - if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) - return; - - CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | - BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); -} - -static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - - if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) - return; - - CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | - BNX2_PCICFG_INT_ACK_CMD_MASK_INT); - CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD); - synchronize_irq(ethdev->irq_arr[0].vector); -} - -static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - struct cnic_uio_dev *udev = cp->udev; - u32 cid_addr, tx_cid, sb_id; - u32 val, offset0, offset1, offset2, offset3; - int i; - struct tx_bd *txbd; - dma_addr_t buf_map, ring_map = udev->l2_ring_map; - struct status_block *s_blk = cp->status_blk.gen; - - sb_id = cp->status_blk_num; - tx_cid = 20; - cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2; - if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { - struct status_block_msix *sblk = cp->status_blk.bnx2; - - tx_cid = TX_TSS_CID + sb_id - 1; - CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) | - (TX_TSS_CID << 7)); - cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index; - } - cp->tx_cons = *cp->tx_cons_ptr; - - cid_addr = GET_CID_ADDR(tx_cid); - if (CHIP_NUM(cp) == CHIP_NUM_5709) { - u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40; - - for (i = 0; i < PHY_CTX_SIZE; i += 4) - cnic_ctx_wr(dev, cid_addr2, i, 0); - - offset0 = BNX2_L2CTX_TYPE_XI; - offset1 = BNX2_L2CTX_CMD_TYPE_XI; - offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI; - offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI; - } else { - cnic_init_context(dev, tx_cid); - cnic_init_context(dev, tx_cid + 1); - - offset0 = BNX2_L2CTX_TYPE; - offset1 = BNX2_L2CTX_CMD_TYPE; - offset2 = BNX2_L2CTX_TBDR_BHADDR_HI; - offset3 = BNX2_L2CTX_TBDR_BHADDR_LO; - } - val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2; - cnic_ctx_wr(dev, cid_addr, offset0, val); - - val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); - cnic_ctx_wr(dev, cid_addr, offset1, val); - - txbd = udev->l2_ring; - - buf_map = udev->l2_buf_map; - for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) { - txbd->tx_bd_haddr_hi = (u64) buf_map >> 32; - txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff; - } - val = (u64) ring_map >> 32; - cnic_ctx_wr(dev, cid_addr, offset2, val); - txbd->tx_bd_haddr_hi = val; - - val = (u64) ring_map & 0xffffffff; - cnic_ctx_wr(dev, cid_addr, offset3, val); - txbd->tx_bd_haddr_lo = val; -} - -static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - struct cnic_uio_dev *udev = cp->udev; - u32 cid_addr, sb_id, val, coal_reg, coal_val; - int i; - struct rx_bd *rxbd; - struct status_block *s_blk = cp->status_blk.gen; - dma_addr_t ring_map = udev->l2_ring_map; - - sb_id = cp->status_blk_num; - cnic_init_context(dev, 2); - cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2; - coal_reg = BNX2_HC_COMMAND; - coal_val = CNIC_RD(dev, coal_reg); - if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { - struct status_block_msix *sblk = cp->status_blk.bnx2; - - cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index; - coal_reg = BNX2_HC_COALESCE_NOW; - coal_val = 1 << (11 + sb_id); - } - i = 0; - while (!(*cp->rx_cons_ptr != 0) && i < 10) { - CNIC_WR(dev, coal_reg, coal_val); - udelay(10); - i++; - barrier(); - } - cp->rx_cons = *cp->rx_cons_ptr; - - cid_addr = GET_CID_ADDR(2); - val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | - BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8); - cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val); - - if (sb_id == 0) - val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT; - else - val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id); - cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); - - rxbd = udev->l2_ring + BCM_PAGE_SIZE; - for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) { - dma_addr_t buf_map; - int n = (i % cp->l2_rx_ring_size) + 1; - - buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size); - rxbd->rx_bd_len = cp->l2_single_buf_size; - rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END; - rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32; - rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff; - } - val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32; - cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); - rxbd->rx_bd_haddr_hi = val; - - val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff; - cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); - rxbd->rx_bd_haddr_lo = val; - - val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD); - cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2)); -} - -static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev) -{ - struct kwqe *wqes[1], l2kwqe; - - memset(&l2kwqe, 0, sizeof(l2kwqe)); - wqes[0] = &l2kwqe; - l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) | - (L2_KWQE_OPCODE_VALUE_FLUSH << - KWQE_OPCODE_SHIFT) | 2; - dev->submit_kwqes(dev, wqes, 1); -} - -static void cnic_set_bnx2_mac(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - u32 val; - - val = cp->func << 2; - - cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val); - - val = cnic_reg_rd_ind(dev, cp->shmem_base + - BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER); - dev->mac_addr[0] = (u8) (val >> 8); - dev->mac_addr[1] = (u8) val; - - CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val); - - val = cnic_reg_rd_ind(dev, cp->shmem_base + - BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER); - dev->mac_addr[2] = (u8) (val >> 24); - dev->mac_addr[3] = (u8) (val >> 16); - dev->mac_addr[4] = (u8) (val >> 8); - dev->mac_addr[5] = (u8) val; - - CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val); - - val = 4 | BNX2_RPM_SORT_USER2_BC_EN; - if (CHIP_NUM(cp) != CHIP_NUM_5709) - val |= BNX2_RPM_SORT_USER2_PROM_VLAN; - - CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0); - CNIC_WR(dev, BNX2_RPM_SORT_USER2, val); - CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA); -} - -static int cnic_start_bnx2_hw(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - struct status_block *sblk = cp->status_blk.gen; - u32 val, kcq_cid_addr, kwq_cid_addr; - int err; - - cnic_set_bnx2_mac(dev); - - val = CNIC_RD(dev, BNX2_MQ_CONFIG); - val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; - if (BCM_PAGE_BITS > 12) - val |= (12 - 8) << 4; - else - val |= (BCM_PAGE_BITS - 8) << 4; - - CNIC_WR(dev, BNX2_MQ_CONFIG, val); - - CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8); - CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220); - CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220); - - err = cnic_setup_5709_context(dev, 1); - if (err) - return err; - - cnic_init_context(dev, KWQ_CID); - cnic_init_context(dev, KCQ_CID); - - kwq_cid_addr = GET_CID_ADDR(KWQ_CID); - cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX; - - cp->max_kwq_idx = MAX_KWQ_IDX; - cp->kwq_prod_idx = 0; - cp->kwq_con_idx = 0; - set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags); - - if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708) - cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15; - else - cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index; - - /* Initialize the kernel work queue context. */ - val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | - (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; - cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val); - - val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; - cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); - - val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; - cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); - - val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); - cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); - - val = (u32) cp->kwq_info.pgtbl_map; - cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); - - kcq_cid_addr = GET_CID_ADDR(KCQ_CID); - cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX; - - cp->kcq1.sw_prod_idx = 0; - cp->kcq1.hw_prod_idx_ptr = - (u16 *) &sblk->status_completion_producer_index; - - cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx; - - /* Initialize the kernel complete queue context. */ - val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | - (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; - cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val); - - val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; - cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); - - val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; - cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); - - val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32); - cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); - - val = (u32) cp->kcq1.dma.pgtbl_map; - cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); - - cp->int_num = 0; - if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { - struct status_block_msix *msblk = cp->status_blk.bnx2; - u32 sb_id = cp->status_blk_num; - u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id); - - cp->kcq1.hw_prod_idx_ptr = - (u16 *) &msblk->status_completion_producer_index; - cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx; - cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index; - cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT; - cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); - cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); - } - - /* Enable Commnad Scheduler notification when we write to the - * host producer index of the kernel contexts. */ - CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2); - - /* Enable Command Scheduler notification when we write to either - * the Send Queue or Receive Queue producer indexes of the kernel - * bypass contexts. */ - CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7); - CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7); - - /* Notify COM when the driver post an application buffer. */ - CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000); - - /* Set the CP and COM doorbells. These two processors polls the - * doorbell for a non zero value before running. This must be done - * after setting up the kernel queue contexts. */ - cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1); - cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1); - - cnic_init_bnx2_tx_ring(dev); - cnic_init_bnx2_rx_ring(dev); - - err = cnic_init_bnx2_irq(dev); - if (err) { - netdev_err(dev->netdev, "cnic_init_irq failed\n"); - cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); - cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0); - return err; - } - - return 0; -} - -static void cnic_setup_bnx2x_context(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - u32 start_offset = ethdev->ctx_tbl_offset; - int i; - - for (i = 0; i < cp->ctx_blks; i++) { - struct cnic_ctx *ctx = &cp->ctx_arr[i]; - dma_addr_t map = ctx->mapping; - - if (cp->ctx_align) { - unsigned long mask = cp->ctx_align - 1; - - map = (map + mask) & ~mask; - } - - cnic_ctx_tbl_wr(dev, start_offset + i, map); - } -} - -static int cnic_init_bnx2x_irq(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - int err = 0; - - tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh, - (unsigned long) dev); - if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) - err = cnic_request_irq(dev); - - return err; -} - -static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev, - u16 sb_id, u8 sb_index, - u8 disable) -{ - - u32 addr = BAR_CSTRORM_INTMEM + - CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) + - offsetof(struct hc_status_block_data_e1x, index_data) + - sizeof(struct hc_index_data)*sb_index + - offsetof(struct hc_index_data, flags); - u16 flags = CNIC_RD16(dev, addr); - /* clear and set */ - flags &= ~HC_INDEX_DATA_HC_ENABLED; - flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) & - HC_INDEX_DATA_HC_ENABLED); - CNIC_WR16(dev, addr, flags); -} - -static void cnic_enable_bnx2x_int(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - u8 sb_id = cp->status_blk_num; - - CNIC_WR8(dev, BAR_CSTRORM_INTMEM + - CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) + - offsetof(struct hc_status_block_data_e1x, index_data) + - sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS + - offsetof(struct hc_index_data, timeout), 64 / 4); - cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0); -} - -static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev) -{ -} - -static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev, - struct client_init_ramrod_data *data) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_uio_dev *udev = cp->udev; - union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring; - dma_addr_t buf_map, ring_map = udev->l2_ring_map; - struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; - int i; - u32 cli = cp->ethdev->iscsi_l2_client_id; - u32 val; - - memset(txbd, 0, BCM_PAGE_SIZE); - - buf_map = udev->l2_buf_map; - for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) { - struct eth_tx_start_bd *start_bd = &txbd->start_bd; - struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd); - - start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32); - start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); - reg_bd->addr_hi = start_bd->addr_hi; - reg_bd->addr_lo = start_bd->addr_lo + 0x10; - start_bd->nbytes = cpu_to_le16(0x10); - start_bd->nbd = cpu_to_le16(3); - start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; - start_bd->general_data = (UNICAST_ADDRESS << - ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT); - start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); - - } - - val = (u64) ring_map >> 32; - txbd->next_bd.addr_hi = cpu_to_le32(val); - - data->tx.tx_bd_page_base.hi = cpu_to_le32(val); - - val = (u64) ring_map & 0xffffffff; - txbd->next_bd.addr_lo = cpu_to_le32(val); - - data->tx.tx_bd_page_base.lo = cpu_to_le32(val); - - /* Other ramrod params */ - data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS; - data->tx.tx_status_block_id = BNX2X_DEF_SB_ID; - - /* reset xstorm per client statistics */ - if (cli < MAX_STAT_COUNTER_ID) { - data->general.statistics_zero_flg = 1; - data->general.statistics_en_flg = 1; - data->general.statistics_counter_id = cli; - } - - cp->tx_cons_ptr = - &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS]; -} - -static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, - struct client_init_ramrod_data *data) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_uio_dev *udev = cp->udev; - struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring + - BCM_PAGE_SIZE); - struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) - (udev->l2_ring + (2 * BCM_PAGE_SIZE)); - struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; - int i; - u32 cli = cp->ethdev->iscsi_l2_client_id; - int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); - u32 val; - dma_addr_t ring_map = udev->l2_ring_map; - - /* General data */ - data->general.client_id = cli; - data->general.activate_flg = 1; - data->general.sp_client_id = cli; - data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14); - data->general.func_id = cp->pfid; - - for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) { - dma_addr_t buf_map; - int n = (i % cp->l2_rx_ring_size) + 1; - - buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size); - rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32); - rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); - } - - val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32; - rxbd->addr_hi = cpu_to_le32(val); - data->rx.bd_page_base.hi = cpu_to_le32(val); - - val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff; - rxbd->addr_lo = cpu_to_le32(val); - data->rx.bd_page_base.lo = cpu_to_le32(val); - - rxcqe += BNX2X_MAX_RCQ_DESC_CNT; - val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32; - rxcqe->addr_hi = cpu_to_le32(val); - data->rx.cqe_page_base.hi = cpu_to_le32(val); - - val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff; - rxcqe->addr_lo = cpu_to_le32(val); - data->rx.cqe_page_base.lo = cpu_to_le32(val); - - /* Other ramrod params */ - data->rx.client_qzone_id = cl_qzone_id; - data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS; - data->rx.status_block_id = BNX2X_DEF_SB_ID; - - data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT; - - data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size); - data->rx.outer_vlan_removal_enable_flg = 1; - data->rx.silent_vlan_removal_flg = 1; - data->rx.silent_vlan_value = 0; - data->rx.silent_vlan_mask = 0xffff; - - cp->rx_cons_ptr = - &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS]; - cp->rx_cons = *cp->rx_cons_ptr; -} - -static void cnic_init_bnx2x_kcq(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - u32 pfid = cp->pfid; - - cp->kcq1.io_addr = BAR_CSTRORM_INTMEM + - CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0); - cp->kcq1.sw_prod_idx = 0; - - if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { - struct host_hc_status_block_e2 *sb = cp->status_blk.gen; - - cp->kcq1.hw_prod_idx_ptr = - &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]; - cp->kcq1.status_idx_ptr = - &sb->sb.running_index[SM_RX_ID]; - } else { - struct host_hc_status_block_e1x *sb = cp->status_blk.gen; - - cp->kcq1.hw_prod_idx_ptr = - &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]; - cp->kcq1.status_idx_ptr = - &sb->sb.running_index[SM_RX_ID]; - } - - if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { - struct host_hc_status_block_e2 *sb = cp->status_blk.gen; - - cp->kcq2.io_addr = BAR_USTRORM_INTMEM + - USTORM_FCOE_EQ_PROD_OFFSET(pfid); - cp->kcq2.sw_prod_idx = 0; - cp->kcq2.hw_prod_idx_ptr = - &sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS]; - cp->kcq2.status_idx_ptr = - &sb->sb.running_index[SM_RX_ID]; - } -} - -static int cnic_start_bnx2x_hw(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - int func = CNIC_FUNC(cp), ret; - u32 pfid; - - cp->port_mode = CHIP_PORT_MODE_NONE; - - if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { - u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR); - - if (!(val & 1)) - val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN); - else - val = (val >> 1) & 1; - - if (val) { - cp->port_mode = CHIP_4_PORT_MODE; - cp->pfid = func >> 1; - } else { - cp->port_mode = CHIP_2_PORT_MODE; - cp->pfid = func & 0x6; - } - } else { - cp->pfid = func; - } - pfid = cp->pfid; - - ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ, - cp->iscsi_start_cid, 0); - - if (ret) - return -ENOMEM; - - if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { - ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, - BNX2X_FCOE_NUM_CONNECTIONS, - cp->fcoe_start_cid, 0); - - if (ret) - return -ENOMEM; - } - - cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2; - - cnic_init_bnx2x_kcq(dev); - - /* Only 1 EQ */ - CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX); - CNIC_WR(dev, BAR_CSTRORM_INTMEM + - CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0); - CNIC_WR(dev, BAR_CSTRORM_INTMEM + - CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0), - cp->kcq1.dma.pg_map_arr[1] & 0xffffffff); - CNIC_WR(dev, BAR_CSTRORM_INTMEM + - CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4, - (u64) cp->kcq1.dma.pg_map_arr[1] >> 32); - CNIC_WR(dev, BAR_CSTRORM_INTMEM + - CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0), - cp->kcq1.dma.pg_map_arr[0] & 0xffffffff); - CNIC_WR(dev, BAR_CSTRORM_INTMEM + - CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4, - (u64) cp->kcq1.dma.pg_map_arr[0] >> 32); - CNIC_WR8(dev, BAR_CSTRORM_INTMEM + - CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1); - CNIC_WR16(dev, BAR_CSTRORM_INTMEM + - CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num); - CNIC_WR8(dev, BAR_CSTRORM_INTMEM + - CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0), - HC_INDEX_ISCSI_EQ_CONS); - - CNIC_WR(dev, BAR_USTRORM_INTMEM + - USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid), - cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff); - CNIC_WR(dev, BAR_USTRORM_INTMEM + - USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4, - (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32); - - CNIC_WR(dev, BAR_TSTRORM_INTMEM + - TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF); - - cnic_setup_bnx2x_context(dev); - - ret = cnic_init_bnx2x_irq(dev); - if (ret) - return ret; - - return 0; -} - -static void cnic_init_rings(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_uio_dev *udev = cp->udev; - - if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) - return; - - if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { - cnic_init_bnx2_tx_ring(dev); - cnic_init_bnx2_rx_ring(dev); - set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); - } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { - u32 cli = cp->ethdev->iscsi_l2_client_id; - u32 cid = cp->ethdev->iscsi_l2_cid; - u32 cl_qzone_id; - struct client_init_ramrod_data *data; - union l5cm_specific_data l5_data; - struct ustorm_eth_rx_producers rx_prods = {0}; - u32 off, i, *cid_ptr; - - rx_prods.bd_prod = 0; - rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT; - barrier(); - - cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); - - off = BAR_USTRORM_INTMEM + - (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? - USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) : - USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli)); - - for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++) - CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]); - - set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); - - data = udev->l2_buf; - cid_ptr = udev->l2_buf + 12; - - memset(data, 0, sizeof(*data)); - - cnic_init_bnx2x_tx_ring(dev, data); - cnic_init_bnx2x_rx_ring(dev, data); - - l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff; - l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32; - - set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); - - cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP, - cid, ETH_CONNECTION_TYPE, &l5_data); - - i = 0; - while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && - ++i < 10) - msleep(1); - - if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) - netdev_err(dev->netdev, - "iSCSI CLIENT_SETUP did not complete\n"); - cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1); - cnic_ring_ctl(dev, cid, cli, 1); - *cid_ptr = cid; - } -} - -static void cnic_shutdown_rings(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_uio_dev *udev = cp->udev; - void *rx_ring; - - if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) - return; - - if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { - cnic_shutdown_bnx2_rx_ring(dev); - } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { - u32 cli = cp->ethdev->iscsi_l2_client_id; - u32 cid = cp->ethdev->iscsi_l2_cid; - union l5cm_specific_data l5_data; - int i; - - cnic_ring_ctl(dev, cid, cli, 0); - - set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); - - l5_data.phy_address.lo = cli; - l5_data.phy_address.hi = 0; - cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT, - cid, ETH_CONNECTION_TYPE, &l5_data); - i = 0; - while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && - ++i < 10) - msleep(1); - - if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) - netdev_err(dev->netdev, - "iSCSI CLIENT_HALT did not complete\n"); - cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1); - - memset(&l5_data, 0, sizeof(l5_data)); - cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, - cid, NONE_CONNECTION_TYPE, &l5_data); - msleep(10); - } - clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); - rx_ring = udev->l2_ring + BCM_PAGE_SIZE; - memset(rx_ring, 0, BCM_PAGE_SIZE); -} - -static int cnic_register_netdev(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - int err; - - if (!ethdev) - return -ENODEV; - - if (ethdev->drv_state & CNIC_DRV_STATE_REGD) - return 0; - - err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev); - if (err) - netdev_err(dev->netdev, "register_cnic failed\n"); - - return err; -} - -static void cnic_unregister_netdev(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - - if (!ethdev) - return; - - ethdev->drv_unregister_cnic(dev->netdev); -} - -static int cnic_start_hw(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - int err; - - if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) - return -EALREADY; - - dev->regview = ethdev->io_base; - pci_dev_get(dev->pcidev); - cp->func = PCI_FUNC(dev->pcidev->devfn); - cp->status_blk.gen = ethdev->irq_arr[0].status_blk; - cp->status_blk_num = ethdev->irq_arr[0].status_blk_num; - - err = cp->alloc_resc(dev); - if (err) { - netdev_err(dev->netdev, "allocate resource failure\n"); - goto err1; - } - - err = cp->start_hw(dev); - if (err) - goto err1; - - err = cnic_cm_open(dev); - if (err) - goto err1; - - set_bit(CNIC_F_CNIC_UP, &dev->flags); - - cp->enable_int(dev); - - return 0; - -err1: - cp->free_resc(dev); - pci_dev_put(dev->pcidev); - return err; -} - -static void cnic_stop_bnx2_hw(struct cnic_dev *dev) -{ - cnic_disable_bnx2_int_sync(dev); - - cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); - cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0); - - cnic_init_context(dev, KWQ_CID); - cnic_init_context(dev, KCQ_CID); - - cnic_setup_5709_context(dev, 0); - cnic_free_irq(dev); - - cnic_free_resc(dev); -} - - -static void cnic_stop_bnx2x_hw(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - - cnic_free_irq(dev); - *cp->kcq1.hw_prod_idx_ptr = 0; - CNIC_WR(dev, BAR_CSTRORM_INTMEM + - CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0); - CNIC_WR16(dev, cp->kcq1.io_addr, 0); - cnic_free_resc(dev); -} - -static void cnic_stop_hw(struct cnic_dev *dev) -{ - if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { - struct cnic_local *cp = dev->cnic_priv; - int i = 0; - - /* Need to wait for the ring shutdown event to complete - * before clearing the CNIC_UP flag. - */ - while (cp->udev->uio_dev != -1 && i < 15) { - msleep(100); - i++; - } - cnic_shutdown_rings(dev); - clear_bit(CNIC_F_CNIC_UP, &dev->flags); - rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL); - synchronize_rcu(); - cnic_cm_shutdown(dev); - cp->stop_hw(dev); - pci_dev_put(dev->pcidev); - } -} - -static void cnic_free_dev(struct cnic_dev *dev) -{ - int i = 0; - - while ((atomic_read(&dev->ref_count) != 0) && i < 10) { - msleep(100); - i++; - } - if (atomic_read(&dev->ref_count) != 0) - netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n"); - - netdev_info(dev->netdev, "Removed CNIC device\n"); - dev_put(dev->netdev); - kfree(dev); -} - -static struct cnic_dev *cnic_alloc_dev(struct net_device *dev, - struct pci_dev *pdev) -{ - struct cnic_dev *cdev; - struct cnic_local *cp; - int alloc_size; - - alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local); - - cdev = kzalloc(alloc_size , GFP_KERNEL); - if (cdev == NULL) { - netdev_err(dev, "allocate dev struct failure\n"); - return NULL; - } - - cdev->netdev = dev; - cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev); - cdev->register_device = cnic_register_device; - cdev->unregister_device = cnic_unregister_device; - cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv; - - cp = cdev->cnic_priv; - cp->dev = cdev; - cp->l2_single_buf_size = 0x400; - cp->l2_rx_ring_size = 3; - - spin_lock_init(&cp->cnic_ulp_lock); - - netdev_info(dev, "Added CNIC device\n"); - - return cdev; -} - -static struct cnic_dev *init_bnx2_cnic(struct net_device *dev) -{ - struct pci_dev *pdev; - struct cnic_dev *cdev; - struct cnic_local *cp; - struct cnic_eth_dev *ethdev = NULL; - struct cnic_eth_dev *(*probe)(struct net_device *) = NULL; - - probe = symbol_get(bnx2_cnic_probe); - if (probe) { - ethdev = (*probe)(dev); - symbol_put(bnx2_cnic_probe); - } - if (!ethdev) - return NULL; - - pdev = ethdev->pdev; - if (!pdev) - return NULL; - - dev_hold(dev); - pci_dev_get(pdev); - if ((pdev->device == PCI_DEVICE_ID_NX2_5709 || - pdev->device == PCI_DEVICE_ID_NX2_5709S) && - (pdev->revision < 0x10)) { - pci_dev_put(pdev); - goto cnic_err; - } - pci_dev_put(pdev); - - cdev = cnic_alloc_dev(dev, pdev); - if (cdev == NULL) - goto cnic_err; - - set_bit(CNIC_F_BNX2_CLASS, &cdev->flags); - cdev->submit_kwqes = cnic_submit_bnx2_kwqes; - - cp = cdev->cnic_priv; - cp->ethdev = ethdev; - cdev->pcidev = pdev; - cp->chip_id = ethdev->chip_id; - - cdev->max_iscsi_conn = ethdev->max_iscsi_conn; - - cp->cnic_ops = &cnic_bnx2_ops; - cp->start_hw = cnic_start_bnx2_hw; - cp->stop_hw = cnic_stop_bnx2_hw; - cp->setup_pgtbl = cnic_setup_page_tbl; - cp->alloc_resc = cnic_alloc_bnx2_resc; - cp->free_resc = cnic_free_resc; - cp->start_cm = cnic_cm_init_bnx2_hw; - cp->stop_cm = cnic_cm_stop_bnx2_hw; - cp->enable_int = cnic_enable_bnx2_int; - cp->disable_int_sync = cnic_disable_bnx2_int_sync; - cp->close_conn = cnic_close_bnx2_conn; - return cdev; - -cnic_err: - dev_put(dev); - return NULL; -} - -static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev) -{ - struct pci_dev *pdev; - struct cnic_dev *cdev; - struct cnic_local *cp; - struct cnic_eth_dev *ethdev = NULL; - struct cnic_eth_dev *(*probe)(struct net_device *) = NULL; - - probe = symbol_get(bnx2x_cnic_probe); - if (probe) { - ethdev = (*probe)(dev); - symbol_put(bnx2x_cnic_probe); - } - if (!ethdev) - return NULL; - - pdev = ethdev->pdev; - if (!pdev) - return NULL; - - dev_hold(dev); - cdev = cnic_alloc_dev(dev, pdev); - if (cdev == NULL) { - dev_put(dev); - return NULL; - } - - set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags); - cdev->submit_kwqes = cnic_submit_bnx2x_kwqes; - - cp = cdev->cnic_priv; - cp->ethdev = ethdev; - cdev->pcidev = pdev; - cp->chip_id = ethdev->chip_id; - - if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)) - cdev->max_iscsi_conn = ethdev->max_iscsi_conn; - if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) && - !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE)) - cdev->max_fcoe_conn = ethdev->max_fcoe_conn; - - memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6); - - cp->cnic_ops = &cnic_bnx2x_ops; - cp->start_hw = cnic_start_bnx2x_hw; - cp->stop_hw = cnic_stop_bnx2x_hw; - cp->setup_pgtbl = cnic_setup_page_tbl_le; - cp->alloc_resc = cnic_alloc_bnx2x_resc; - cp->free_resc = cnic_free_resc; - cp->start_cm = cnic_cm_init_bnx2x_hw; - cp->stop_cm = cnic_cm_stop_bnx2x_hw; - cp->enable_int = cnic_enable_bnx2x_int; - cp->disable_int_sync = cnic_disable_bnx2x_int_sync; - if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) - cp->ack_int = cnic_ack_bnx2x_e2_msix; - else - cp->ack_int = cnic_ack_bnx2x_msix; - cp->close_conn = cnic_close_bnx2x_conn; - return cdev; -} - -static struct cnic_dev *is_cnic_dev(struct net_device *dev) -{ - struct ethtool_drvinfo drvinfo; - struct cnic_dev *cdev = NULL; - - if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) { - memset(&drvinfo, 0, sizeof(drvinfo)); - dev->ethtool_ops->get_drvinfo(dev, &drvinfo); - - if (!strcmp(drvinfo.driver, "bnx2")) - cdev = init_bnx2_cnic(dev); - if (!strcmp(drvinfo.driver, "bnx2x")) - cdev = init_bnx2x_cnic(dev); - if (cdev) { - write_lock(&cnic_dev_lock); - list_add(&cdev->list, &cnic_dev_list); - write_unlock(&cnic_dev_lock); - } - } - return cdev; -} - -static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event, - u16 vlan_id) -{ - int if_type; - - rcu_read_lock(); - for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { - struct cnic_ulp_ops *ulp_ops; - void *ctx; - - ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); - if (!ulp_ops || !ulp_ops->indicate_netevent) - continue; - - ctx = cp->ulp_handle[if_type]; - - ulp_ops->indicate_netevent(ctx, event, vlan_id); - } - rcu_read_unlock(); -} - -/** - * netdev event handler - */ -static int cnic_netdev_event(struct notifier_block *this, unsigned long event, - void *ptr) -{ - struct net_device *netdev = ptr; - struct cnic_dev *dev; - int new_dev = 0; - - dev = cnic_from_netdev(netdev); - - if (!dev && (event == NETDEV_REGISTER || netif_running(netdev))) { - /* Check for the hot-plug device */ - dev = is_cnic_dev(netdev); - if (dev) { - new_dev = 1; - cnic_hold(dev); - } - } - if (dev) { - struct cnic_local *cp = dev->cnic_priv; - - if (new_dev) - cnic_ulp_init(dev); - else if (event == NETDEV_UNREGISTER) - cnic_ulp_exit(dev); - - if (event == NETDEV_UP || (new_dev && netif_running(netdev))) { - if (cnic_register_netdev(dev) != 0) { - cnic_put(dev); - goto done; - } - if (!cnic_start_hw(dev)) - cnic_ulp_start(dev); - } - - cnic_rcv_netevent(cp, event, 0); - - if (event == NETDEV_GOING_DOWN) { - cnic_ulp_stop(dev); - cnic_stop_hw(dev); - cnic_unregister_netdev(dev); - } else if (event == NETDEV_UNREGISTER) { - write_lock(&cnic_dev_lock); - list_del_init(&dev->list); - write_unlock(&cnic_dev_lock); - - cnic_put(dev); - cnic_free_dev(dev); - goto done; - } - cnic_put(dev); - } else { - struct net_device *realdev; - u16 vid; - - vid = cnic_get_vlan(netdev, &realdev); - if (realdev) { - dev = cnic_from_netdev(realdev); - if (dev) { - vid |= VLAN_TAG_PRESENT; - cnic_rcv_netevent(dev->cnic_priv, event, vid); - cnic_put(dev); - } - } - } -done: - return NOTIFY_DONE; -} - -static struct notifier_block cnic_netdev_notifier = { - .notifier_call = cnic_netdev_event -}; - -static void cnic_release(void) -{ - struct cnic_dev *dev; - struct cnic_uio_dev *udev; - - while (!list_empty(&cnic_dev_list)) { - dev = list_entry(cnic_dev_list.next, struct cnic_dev, list); - if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { - cnic_ulp_stop(dev); - cnic_stop_hw(dev); - } - - cnic_ulp_exit(dev); - cnic_unregister_netdev(dev); - list_del_init(&dev->list); - cnic_free_dev(dev); - } - while (!list_empty(&cnic_udev_list)) { - udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev, - list); - cnic_free_uio(udev); - } -} - -static int __init cnic_init(void) -{ - int rc = 0; - - pr_info("%s", version); - - rc = register_netdevice_notifier(&cnic_netdev_notifier); - if (rc) { - cnic_release(); - return rc; - } - - cnic_wq = create_singlethread_workqueue("cnic_wq"); - if (!cnic_wq) { - cnic_release(); - unregister_netdevice_notifier(&cnic_netdev_notifier); - return -ENOMEM; - } - - return 0; -} - -static void __exit cnic_exit(void) -{ - unregister_netdevice_notifier(&cnic_netdev_notifier); - cnic_release(); - destroy_workqueue(cnic_wq); -} - -module_init(cnic_init); -module_exit(cnic_exit); diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h deleted file mode 100644 index 7a2928f..0000000 --- a/drivers/net/cnic.h +++ /dev/null @@ -1,478 +0,0 @@ -/* cnic.h: Broadcom CNIC core network driver. - * - * Copyright (c) 2006-2011 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - */ - - -#ifndef CNIC_H -#define CNIC_H - -#define HC_INDEX_ISCSI_EQ_CONS 6 - -#define HC_INDEX_FCOE_EQ_CONS 3 - -#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5 -#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1 - -#define KWQ_PAGE_CNT 4 -#define KCQ_PAGE_CNT 16 - -#define KWQ_CID 24 -#define KCQ_CID 25 - -/* - * krnlq_context definition - */ -#define L5_KRNLQ_FLAGS 0x00000000 -#define L5_KRNLQ_SIZE 0x00000000 -#define L5_KRNLQ_TYPE 0x00000000 -#define KRNLQ_FLAGS_PG_SZ (0xf<<0) -#define KRNLQ_FLAGS_PG_SZ_256 (0<<0) -#define KRNLQ_FLAGS_PG_SZ_512 (1<<0) -#define KRNLQ_FLAGS_PG_SZ_1K (2<<0) -#define KRNLQ_FLAGS_PG_SZ_2K (3<<0) -#define KRNLQ_FLAGS_PG_SZ_4K (4<<0) -#define KRNLQ_FLAGS_PG_SZ_8K (5<<0) -#define KRNLQ_FLAGS_PG_SZ_16K (6<<0) -#define KRNLQ_FLAGS_PG_SZ_32K (7<<0) -#define KRNLQ_FLAGS_PG_SZ_64K (8<<0) -#define KRNLQ_FLAGS_PG_SZ_128K (9<<0) -#define KRNLQ_FLAGS_PG_SZ_256K (10<<0) -#define KRNLQ_FLAGS_PG_SZ_512K (11<<0) -#define KRNLQ_FLAGS_PG_SZ_1M (12<<0) -#define KRNLQ_FLAGS_PG_SZ_2M (13<<0) -#define KRNLQ_FLAGS_QE_SELF_SEQ (1<<15) -#define KRNLQ_SIZE_TYPE_SIZE ((((0x28 + 0x1f) & ~0x1f) / 0x20) << 16) -#define KRNLQ_TYPE_TYPE (0xf<<28) -#define KRNLQ_TYPE_TYPE_EMPTY (0<<28) -#define KRNLQ_TYPE_TYPE_KRNLQ (6<<28) - -#define L5_KRNLQ_HOST_QIDX 0x00000004 -#define L5_KRNLQ_HOST_FW_QIDX 0x00000008 -#define L5_KRNLQ_NX_QE_SELF_SEQ 0x0000000c -#define L5_KRNLQ_QE_SELF_SEQ_MAX 0x0000000c -#define L5_KRNLQ_NX_QE_HADDR_HI 0x00000010 -#define L5_KRNLQ_NX_QE_HADDR_LO 0x00000014 -#define L5_KRNLQ_PGTBL_PGIDX 0x00000018 -#define L5_KRNLQ_NX_PG_QIDX 0x00000018 -#define L5_KRNLQ_PGTBL_NPAGES 0x0000001c -#define L5_KRNLQ_QIDX_INCR 0x0000001c -#define L5_KRNLQ_PGTBL_HADDR_HI 0x00000020 -#define L5_KRNLQ_PGTBL_HADDR_LO 0x00000024 - -#define BNX2_PG_CTX_MAP 0x1a0034 -#define BNX2_ISCSI_CTX_MAP 0x1a0074 - -#define MAX_COMPLETED_KCQE 64 - -#define MAX_CNIC_L5_CONTEXT 256 - -#define MAX_CM_SK_TBL_SZ MAX_CNIC_L5_CONTEXT - -#define MAX_ISCSI_TBL_SZ 256 - -#define CNIC_LOCAL_PORT_MIN 60000 -#define CNIC_LOCAL_PORT_MAX 61024 -#define CNIC_LOCAL_PORT_RANGE (CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN) - -#define KWQE_CNT (BCM_PAGE_SIZE / sizeof(struct kwqe)) -#define KCQE_CNT (BCM_PAGE_SIZE / sizeof(struct kcqe)) -#define MAX_KWQE_CNT (KWQE_CNT - 1) -#define MAX_KCQE_CNT (KCQE_CNT - 1) - -#define MAX_KWQ_IDX ((KWQ_PAGE_CNT * KWQE_CNT) - 1) -#define MAX_KCQ_IDX ((KCQ_PAGE_CNT * KCQE_CNT) - 1) - -#define KWQ_PG(x) (((x) & ~MAX_KWQE_CNT) >> (BCM_PAGE_BITS - 5)) -#define KWQ_IDX(x) ((x) & MAX_KWQE_CNT) - -#define KCQ_PG(x) (((x) & ~MAX_KCQE_CNT) >> (BCM_PAGE_BITS - 5)) -#define KCQ_IDX(x) ((x) & MAX_KCQE_CNT) - -#define BNX2X_NEXT_KCQE(x) (((x) & (MAX_KCQE_CNT - 1)) == \ - (MAX_KCQE_CNT - 1)) ? \ - (x) + 2 : (x) + 1 - -#define BNX2X_KWQ_DATA_PG(cp, x) ((x) / (cp)->kwq_16_data_pp) -#define BNX2X_KWQ_DATA_IDX(cp, x) ((x) % (cp)->kwq_16_data_pp) -#define BNX2X_KWQ_DATA(cp, x) \ - &(cp)->kwq_16_data[BNX2X_KWQ_DATA_PG(cp, x)][BNX2X_KWQ_DATA_IDX(cp, x)] - -#define DEF_IPID_START 0x8000 - -#define DEF_KA_TIMEOUT 10000 -#define DEF_KA_INTERVAL 300000 -#define DEF_KA_MAX_PROBE_COUNT 3 -#define DEF_TOS 0 -#define DEF_TTL 0xfe -#define DEF_SND_SEQ_SCALE 0 -#define DEF_RCV_BUF 0xffff -#define DEF_SND_BUF 0xffff -#define DEF_SEED 0 -#define DEF_MAX_RT_TIME 500 -#define DEF_MAX_DA_COUNT 2 -#define DEF_SWS_TIMER 1000 -#define DEF_MAX_CWND 0xffff - -struct cnic_ctx { - u32 cid; - void *ctx; - dma_addr_t mapping; -}; - -#define BNX2_MAX_CID 0x2000 - -struct cnic_dma { - int num_pages; - void **pg_arr; - dma_addr_t *pg_map_arr; - int pgtbl_size; - u32 *pgtbl; - dma_addr_t pgtbl_map; -}; - -struct cnic_id_tbl { - spinlock_t lock; - u32 start; - u32 max; - u32 next; - unsigned long *table; -}; - -#define CNIC_KWQ16_DATA_SIZE 128 - -struct kwqe_16_data { - u8 data[CNIC_KWQ16_DATA_SIZE]; -}; - -struct cnic_iscsi { - struct cnic_dma task_array_info; - struct cnic_dma r2tq_info; - struct cnic_dma hq_info; -}; - -struct cnic_context { - u32 cid; - struct kwqe_16_data *kwqe_data; - dma_addr_t kwqe_data_mapping; - wait_queue_head_t waitq; - int wait_cond; - unsigned long timestamp; - unsigned long ctx_flags; -#define CTX_FL_OFFLD_START 0 -#define CTX_FL_DELETE_WAIT 1 -#define CTX_FL_CID_ERROR 2 - u8 ulp_proto_id; - union { - struct cnic_iscsi *iscsi; - } proto; -}; - -struct kcq_info { - struct cnic_dma dma; - struct kcqe **kcq; - - u16 *hw_prod_idx_ptr; - u16 sw_prod_idx; - u16 *status_idx_ptr; - u32 io_addr; - - u16 (*next_idx)(u16); - u16 (*hw_idx)(u16); -}; - -struct iro { - u32 base; - u16 m1; - u16 m2; - u16 m3; - u16 size; -}; - -struct cnic_uio_dev { - struct uio_info cnic_uinfo; - u32 uio_dev; - - int l2_ring_size; - void *l2_ring; - dma_addr_t l2_ring_map; - - int l2_buf_size; - void *l2_buf; - dma_addr_t l2_buf_map; - - struct cnic_dev *dev; - struct pci_dev *pdev; - struct list_head list; -}; - -struct cnic_local { - - spinlock_t cnic_ulp_lock; - void *ulp_handle[MAX_CNIC_ULP_TYPE]; - unsigned long ulp_flags[MAX_CNIC_ULP_TYPE]; -#define ULP_F_INIT 0 -#define ULP_F_START 1 -#define ULP_F_CALL_PENDING 2 - struct cnic_ulp_ops __rcu *ulp_ops[MAX_CNIC_ULP_TYPE]; - - unsigned long cnic_local_flags; -#define CNIC_LCL_FL_KWQ_INIT 0x0 -#define CNIC_LCL_FL_L2_WAIT 0x1 -#define CNIC_LCL_FL_RINGS_INITED 0x2 -#define CNIC_LCL_FL_STOP_ISCSI 0x4 - - struct cnic_dev *dev; - - struct cnic_eth_dev *ethdev; - - struct cnic_uio_dev *udev; - - int l2_rx_ring_size; - int l2_single_buf_size; - - u16 *rx_cons_ptr; - u16 *tx_cons_ptr; - u16 rx_cons; - u16 tx_cons; - - const struct iro *iro_arr; -#define IRO (((struct cnic_local *) dev->cnic_priv)->iro_arr) - - struct cnic_dma kwq_info; - struct kwqe **kwq; - - struct cnic_dma kwq_16_data_info; - - u16 max_kwq_idx; - - u16 kwq_prod_idx; - u32 kwq_io_addr; - - u16 *kwq_con_idx_ptr; - u16 kwq_con_idx; - - struct kcq_info kcq1; - struct kcq_info kcq2; - - union { - void *gen; - struct status_block_msix *bnx2; - struct host_hc_status_block_e1x *bnx2x_e1x; - /* index values - which counter to update */ - #define SM_RX_ID 0 - #define SM_TX_ID 1 - } status_blk; - - struct host_sp_status_block *bnx2x_def_status_blk; - - u32 status_blk_num; - u32 bnx2x_igu_sb_id; - u32 int_num; - u32 last_status_idx; - struct tasklet_struct cnic_irq_task; - - struct kcqe *completed_kcq[MAX_COMPLETED_KCQE]; - - struct cnic_sock *csk_tbl; - struct cnic_id_tbl csk_port_tbl; - - struct cnic_dma gbl_buf_info; - - struct cnic_iscsi *iscsi_tbl; - struct cnic_context *ctx_tbl; - struct cnic_id_tbl cid_tbl; - atomic_t iscsi_conn; - u32 iscsi_start_cid; - - u32 fcoe_init_cid; - u32 fcoe_start_cid; - struct cnic_id_tbl fcoe_cid_tbl; - - u32 max_cid_space; - - /* per connection parameters */ - int num_iscsi_tasks; - int num_ccells; - int task_array_size; - int r2tq_size; - int hq_size; - int num_cqs; - - struct delayed_work delete_task; - - struct cnic_ctx *ctx_arr; - int ctx_blks; - int ctx_blk_size; - unsigned long ctx_align; - int cids_per_blk; - - u32 chip_id; - int func; - u32 pfid; - u8 port_mode; -#define CHIP_4_PORT_MODE 0 -#define CHIP_2_PORT_MODE 1 -#define CHIP_PORT_MODE_NONE 2 - - u32 shmem_base; - - struct cnic_ops *cnic_ops; - int (*start_hw)(struct cnic_dev *); - void (*stop_hw)(struct cnic_dev *); - void (*setup_pgtbl)(struct cnic_dev *, - struct cnic_dma *); - int (*alloc_resc)(struct cnic_dev *); - void (*free_resc)(struct cnic_dev *); - int (*start_cm)(struct cnic_dev *); - void (*stop_cm)(struct cnic_dev *); - void (*enable_int)(struct cnic_dev *); - void (*disable_int_sync)(struct cnic_dev *); - void (*ack_int)(struct cnic_dev *); - void (*close_conn)(struct cnic_sock *, u32 opcode); -}; - -struct bnx2x_bd_chain_next { - u32 addr_lo; - u32 addr_hi; - u8 reserved[8]; -}; - -#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1) - -#define ISCSI_RAMROD_CMD_ID_UPDATE_CONN (ISCSI_KCQE_OPCODE_UPDATE_CONN) -#define ISCSI_RAMROD_CMD_ID_INIT (ISCSI_KCQE_OPCODE_INIT) - -#define CDU_REGION_NUMBER_XCM_AG 2 -#define CDU_REGION_NUMBER_UCM_AG 4 - -#define CDU_VALID_DATA(_cid, _region, _type) \ - (((_cid) << 8) | (((_region)&0xf)<<4) | (((_type)&0xf))) - -#define CDU_CRC8(_cid, _region, _type) \ - (calc_crc8(CDU_VALID_DATA(_cid, _region, _type), 0xff)) - -#define CDU_RSRVD_VALUE_TYPE_A(_cid, _region, _type) \ - (0x80 | ((CDU_CRC8(_cid, _region, _type)) & 0x7f)) - -#define BNX2X_CONTEXT_MEM_SIZE 1024 -#define BNX2X_FCOE_CID 16 - -#define BNX2X_ISCSI_START_CID 18 -#define BNX2X_ISCSI_NUM_CONNECTIONS 128 -#define BNX2X_ISCSI_TASK_CONTEXT_SIZE 128 -#define BNX2X_ISCSI_MAX_PENDING_R2TS 4 -#define BNX2X_ISCSI_R2TQE_SIZE 8 -#define BNX2X_ISCSI_HQ_BD_SIZE 64 -#define BNX2X_ISCSI_GLB_BUF_SIZE 64 -#define BNX2X_ISCSI_PBL_NOT_CACHED 0xff -#define BNX2X_ISCSI_PDU_HEADER_NOT_CACHED 0xff - -#define BNX2X_FCOE_NUM_CONNECTIONS 128 - -#define BNX2X_FCOE_L5_CID_BASE MAX_ISCSI_TBL_SZ - -#define BNX2X_CHIP_NUM_57710 0x164e -#define BNX2X_CHIP_NUM_57711 0x164f -#define BNX2X_CHIP_NUM_57711E 0x1650 -#define BNX2X_CHIP_NUM_57712 0x1662 -#define BNX2X_CHIP_NUM_57712E 0x1663 -#define BNX2X_CHIP_NUM_57713 0x1651 -#define BNX2X_CHIP_NUM_57713E 0x1652 -#define BNX2X_CHIP_NUM_57800 0x168a -#define BNX2X_CHIP_NUM_57810 0x168e -#define BNX2X_CHIP_NUM_57840 0x168d - -#define BNX2X_CHIP_NUM(x) (x >> 16) -#define BNX2X_CHIP_IS_57710(x) \ - (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57710) -#define BNX2X_CHIP_IS_57711(x) \ - (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711) -#define BNX2X_CHIP_IS_57711E(x) \ - (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711E) -#define BNX2X_CHIP_IS_E1H(x) \ - (BNX2X_CHIP_IS_57711(x) || BNX2X_CHIP_IS_57711E(x)) -#define BNX2X_CHIP_IS_57712(x) \ - (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712) -#define BNX2X_CHIP_IS_57712E(x) \ - (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712E) -#define BNX2X_CHIP_IS_57713(x) \ - (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713) -#define BNX2X_CHIP_IS_57713E(x) \ - (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713E) -#define BNX2X_CHIP_IS_57800(x) \ - (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57800) -#define BNX2X_CHIP_IS_57810(x) \ - (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57810) -#define BNX2X_CHIP_IS_57840(x) \ - (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57840) -#define BNX2X_CHIP_IS_E2(x) \ - (BNX2X_CHIP_IS_57712(x) || BNX2X_CHIP_IS_57712E(x) || \ - BNX2X_CHIP_IS_57713(x) || BNX2X_CHIP_IS_57713E(x)) -#define BNX2X_CHIP_IS_E3(x) \ - (BNX2X_CHIP_IS_57800(x) || BNX2X_CHIP_IS_57810(x) || \ - BNX2X_CHIP_IS_57840(x)) -#define BNX2X_CHIP_IS_E2_PLUS(x) (BNX2X_CHIP_IS_E2(x) || BNX2X_CHIP_IS_E3(x)) - -#define IS_E1H_OFFSET BNX2X_CHIP_IS_E1H(cp->chip_id) - -#define BNX2X_RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) -#define BNX2X_MAX_RX_DESC_CNT (BNX2X_RX_DESC_CNT - 2) -#define BNX2X_RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) -#define BNX2X_MAX_RCQ_DESC_CNT (BNX2X_RCQ_DESC_CNT - 1) - -#define BNX2X_NEXT_RCQE(x) (((x) & BNX2X_MAX_RCQ_DESC_CNT) == \ - (BNX2X_MAX_RCQ_DESC_CNT - 1)) ? \ - ((x) + 2) : ((x) + 1) - -#define BNX2X_DEF_SB_ID HC_SP_SB_ID - -#define BNX2X_SHMEM_MF_BLK_OFFSET 0x7e4 - -#define BNX2X_SHMEM_ADDR(base, field) (base + \ - offsetof(struct shmem_region, field)) - -#define BNX2X_SHMEM2_ADDR(base, field) (base + \ - offsetof(struct shmem2_region, field)) - -#define BNX2X_SHMEM2_HAS(base, field) \ - ((base) && \ - (CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base, size)) > \ - offsetof(struct shmem2_region, field))) - -#define BNX2X_MF_CFG_ADDR(base, field) \ - ((base) + offsetof(struct mf_cfg, field)) - -#ifndef ETH_MAX_RX_CLIENTS_E2 -#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H -#endif - -#define CNIC_PORT(cp) ((cp)->pfid & 1) -#define CNIC_FUNC(cp) ((cp)->func) -#define CNIC_PATH(cp) (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? \ - 0 : (CNIC_FUNC(cp) & 1)) -#define CNIC_E1HVN(cp) ((cp)->pfid >> 1) - -#define BNX2X_HW_CID(cp, x) ((CNIC_PORT(cp) << 23) | \ - (CNIC_E1HVN(cp) << 17) | (x)) - -#define BNX2X_SW_CID(x) (x & 0x1ffff) - -#define BNX2X_CL_QZONE_ID(cp, cli) \ - (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? cli : \ - cli + (CNIC_PORT(cp) * ETH_MAX_RX_CLIENTS_E1H)) - -#ifndef MAX_STAT_COUNTER_ID -#define MAX_STAT_COUNTER_ID \ - (BNX2X_CHIP_IS_E1H((cp)->chip_id) ? MAX_STAT_COUNTER_ID_E1H : \ - ((BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id)) ? MAX_STAT_COUNTER_ID_E2 :\ - MAX_STAT_COUNTER_ID_E1)) -#endif - -#endif - diff --git a/drivers/net/cnic_defs.h b/drivers/net/cnic_defs.h deleted file mode 100644 index e47d210..0000000 --- a/drivers/net/cnic_defs.h +++ /dev/null @@ -1,5484 +0,0 @@ - -/* cnic.c: Broadcom CNIC core network driver. - * - * Copyright (c) 2006-2009 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - */ - -#ifndef CNIC_DEFS_H -#define CNIC_DEFS_H - -/* KWQ (kernel work queue) request op codes */ -#define L2_KWQE_OPCODE_VALUE_FLUSH (4) -#define L2_KWQE_OPCODE_VALUE_VM_FREE_RX_QUEUE (8) - -#define L4_KWQE_OPCODE_VALUE_CONNECT1 (50) -#define L4_KWQE_OPCODE_VALUE_CONNECT2 (51) -#define L4_KWQE_OPCODE_VALUE_CONNECT3 (52) -#define L4_KWQE_OPCODE_VALUE_RESET (53) -#define L4_KWQE_OPCODE_VALUE_CLOSE (54) -#define L4_KWQE_OPCODE_VALUE_UPDATE_SECRET (60) -#define L4_KWQE_OPCODE_VALUE_INIT_ULP (61) - -#define L4_KWQE_OPCODE_VALUE_OFFLOAD_PG (1) -#define L4_KWQE_OPCODE_VALUE_UPDATE_PG (9) -#define L4_KWQE_OPCODE_VALUE_UPLOAD_PG (14) - -#define L5CM_RAMROD_CMD_ID_BASE (0x80) -#define L5CM_RAMROD_CMD_ID_TCP_CONNECT (L5CM_RAMROD_CMD_ID_BASE + 3) -#define L5CM_RAMROD_CMD_ID_CLOSE (L5CM_RAMROD_CMD_ID_BASE + 12) -#define L5CM_RAMROD_CMD_ID_ABORT (L5CM_RAMROD_CMD_ID_BASE + 13) -#define L5CM_RAMROD_CMD_ID_SEARCHER_DELETE (L5CM_RAMROD_CMD_ID_BASE + 14) -#define L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD (L5CM_RAMROD_CMD_ID_BASE + 15) - -#define FCOE_KCQE_OPCODE_INIT_FUNC (0x10) -#define FCOE_KCQE_OPCODE_DESTROY_FUNC (0x11) -#define FCOE_KCQE_OPCODE_STAT_FUNC (0x12) -#define FCOE_KCQE_OPCODE_OFFLOAD_CONN (0x15) -#define FCOE_KCQE_OPCODE_ENABLE_CONN (0x16) -#define FCOE_KCQE_OPCODE_DISABLE_CONN (0x17) -#define FCOE_KCQE_OPCODE_DESTROY_CONN (0x18) -#define FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20) -#define FCOE_KCQE_OPCODE_FCOE_ERROR (0x21) - -#define FCOE_RAMROD_CMD_ID_INIT_FUNC (FCOE_KCQE_OPCODE_INIT_FUNC) -#define FCOE_RAMROD_CMD_ID_DESTROY_FUNC (FCOE_KCQE_OPCODE_DESTROY_FUNC) -#define FCOE_RAMROD_CMD_ID_STAT_FUNC (FCOE_KCQE_OPCODE_STAT_FUNC) -#define FCOE_RAMROD_CMD_ID_OFFLOAD_CONN (FCOE_KCQE_OPCODE_OFFLOAD_CONN) -#define FCOE_RAMROD_CMD_ID_ENABLE_CONN (FCOE_KCQE_OPCODE_ENABLE_CONN) -#define FCOE_RAMROD_CMD_ID_DISABLE_CONN (FCOE_KCQE_OPCODE_DISABLE_CONN) -#define FCOE_RAMROD_CMD_ID_DESTROY_CONN (FCOE_KCQE_OPCODE_DESTROY_CONN) -#define FCOE_RAMROD_CMD_ID_TERMINATE_CONN (0x81) - -#define FCOE_KWQE_OPCODE_INIT1 (0) -#define FCOE_KWQE_OPCODE_INIT2 (1) -#define FCOE_KWQE_OPCODE_INIT3 (2) -#define FCOE_KWQE_OPCODE_OFFLOAD_CONN1 (3) -#define FCOE_KWQE_OPCODE_OFFLOAD_CONN2 (4) -#define FCOE_KWQE_OPCODE_OFFLOAD_CONN3 (5) -#define FCOE_KWQE_OPCODE_OFFLOAD_CONN4 (6) -#define FCOE_KWQE_OPCODE_ENABLE_CONN (7) -#define FCOE_KWQE_OPCODE_DISABLE_CONN (8) -#define FCOE_KWQE_OPCODE_DESTROY_CONN (9) -#define FCOE_KWQE_OPCODE_DESTROY (10) -#define FCOE_KWQE_OPCODE_STAT (11) - -#define FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x3) - -/* KCQ (kernel completion queue) response op codes */ -#define L4_KCQE_OPCODE_VALUE_CLOSE_COMP (53) -#define L4_KCQE_OPCODE_VALUE_RESET_COMP (54) -#define L4_KCQE_OPCODE_VALUE_FW_TCP_UPDATE (55) -#define L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE (56) -#define L4_KCQE_OPCODE_VALUE_RESET_RECEIVED (57) -#define L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED (58) -#define L4_KCQE_OPCODE_VALUE_INIT_ULP (61) - -#define L4_KCQE_OPCODE_VALUE_OFFLOAD_PG (1) -#define L4_KCQE_OPCODE_VALUE_UPDATE_PG (9) -#define L4_KCQE_OPCODE_VALUE_UPLOAD_PG (14) - -/* KCQ (kernel completion queue) completion status */ -#define L4_KCQE_COMPLETION_STATUS_SUCCESS (0) -#define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93) - -#define L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL (0x83) -#define L4_KCQE_COMPLETION_STATUS_OFFLOADED_PG (0x89) - -#define L4_KCQE_OPCODE_VALUE_OOO_EVENT_NOTIFICATION (0xa0) -#define L4_KCQE_OPCODE_VALUE_OOO_FLUSH (0xa1) - -#define L4_LAYER_CODE (4) -#define L2_LAYER_CODE (2) - -/* - * L4 KCQ CQE - */ -struct l4_kcq { - u32 cid; - u32 pg_cid; - u32 conn_id; - u32 pg_host_opaque; -#if defined(__BIG_ENDIAN) - u16 status; - u16 reserved1; -#elif defined(__LITTLE_ENDIAN) - u16 reserved1; - u16 status; -#endif - u32 reserved2[2]; -#if defined(__BIG_ENDIAN) - u8 flags; -#define L4_KCQ_RESERVED3 (0x7<<0) -#define L4_KCQ_RESERVED3_SHIFT 0 -#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */ -#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3 -#define L4_KCQ_LAYER_CODE (0x7<<4) -#define L4_KCQ_LAYER_CODE_SHIFT 4 -#define L4_KCQ_RESERVED4 (0x1<<7) -#define L4_KCQ_RESERVED4_SHIFT 7 - u8 op_code; - u16 qe_self_seq; -#elif defined(__LITTLE_ENDIAN) - u16 qe_self_seq; - u8 op_code; - u8 flags; -#define L4_KCQ_RESERVED3 (0xF<<0) -#define L4_KCQ_RESERVED3_SHIFT 0 -#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */ -#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3 -#define L4_KCQ_LAYER_CODE (0x7<<4) -#define L4_KCQ_LAYER_CODE_SHIFT 4 -#define L4_KCQ_RESERVED4 (0x1<<7) -#define L4_KCQ_RESERVED4_SHIFT 7 -#endif -}; - - -/* - * L4 KCQ CQE PG upload - */ -struct l4_kcq_upload_pg { - u32 pg_cid; -#if defined(__BIG_ENDIAN) - u16 pg_status; - u16 pg_ipid_count; -#elif defined(__LITTLE_ENDIAN) - u16 pg_ipid_count; - u16 pg_status; -#endif - u32 reserved1[5]; -#if defined(__BIG_ENDIAN) - u8 flags; -#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0) -#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0 -#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4) -#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4 -#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7) -#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7 - u8 op_code; - u16 qe_self_seq; -#elif defined(__LITTLE_ENDIAN) - u16 qe_self_seq; - u8 op_code; - u8 flags; -#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0) -#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0 -#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4) -#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4 -#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7) -#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7 -#endif -}; - - -/* - * Gracefully close the connection request - */ -struct l4_kwq_close_req { -#if defined(__BIG_ENDIAN) - u8 flags; -#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0) -#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0 -#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4) -#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4 -#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7 - u8 op_code; - u16 reserved0; -#elif defined(__LITTLE_ENDIAN) - u16 reserved0; - u8 op_code; - u8 flags; -#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0) -#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0 -#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4) -#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4 -#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7 -#endif - u32 cid; - u32 reserved2[6]; -}; - - -/* - * The first request to be passed in order to establish connection in option2 - */ -struct l4_kwq_connect_req1 { -#if defined(__BIG_ENDIAN) - u8 flags; -#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0) -#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0 -#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4) -#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4 -#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7 - u8 op_code; - u8 reserved0; - u8 conn_flags; -#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0) -#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0 -#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1) -#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1 -#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2) -#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2 -#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3) -#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3 -#elif defined(__LITTLE_ENDIAN) - u8 conn_flags; -#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0) -#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0 -#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1) -#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1 -#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2) -#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2 -#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3) -#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3 - u8 reserved0; - u8 op_code; - u8 flags; -#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0) -#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0 -#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4) -#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4 -#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7 -#endif - u32 cid; - u32 pg_cid; - u32 src_ip; - u32 dst_ip; -#if defined(__BIG_ENDIAN) - u16 dst_port; - u16 src_port; -#elif defined(__LITTLE_ENDIAN) - u16 src_port; - u16 dst_port; -#endif -#if defined(__BIG_ENDIAN) - u8 rsrv1[3]; - u8 tcp_flags; -#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0) -#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0 -#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1) -#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1 -#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2) -#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2 -#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3) -#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3 -#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4) -#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4 -#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5) -#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5 -#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6) -#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6 -#elif defined(__LITTLE_ENDIAN) - u8 tcp_flags; -#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0) -#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0 -#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1) -#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1 -#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2) -#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2 -#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3) -#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3 -#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4) -#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4 -#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5) -#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5 -#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6) -#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6 - u8 rsrv1[3]; -#endif - u32 rsrv2; -}; - - -/* - * The second ( optional )request to be passed in order to establish - * connection in option2 - for IPv6 only - */ -struct l4_kwq_connect_req2 { -#if defined(__BIG_ENDIAN) - u8 flags; -#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0) -#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0 -#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4) -#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4 -#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7 - u8 op_code; - u8 reserved0; - u8 rsrv; -#elif defined(__LITTLE_ENDIAN) - u8 rsrv; - u8 reserved0; - u8 op_code; - u8 flags; -#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0) -#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0 -#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4) -#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4 -#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7 -#endif - u32 reserved2; - u32 src_ip_v6_2; - u32 src_ip_v6_3; - u32 src_ip_v6_4; - u32 dst_ip_v6_2; - u32 dst_ip_v6_3; - u32 dst_ip_v6_4; -}; - - -/* - * The third ( and last )request to be passed in order to establish - * connection in option2 - */ -struct l4_kwq_connect_req3 { -#if defined(__BIG_ENDIAN) - u8 flags; -#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0) -#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0 -#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4) -#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4 -#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7 - u8 op_code; - u16 reserved0; -#elif defined(__LITTLE_ENDIAN) - u16 reserved0; - u8 op_code; - u8 flags; -#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0) -#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0 -#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4) -#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4 -#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7 -#endif - u32 ka_timeout; - u32 ka_interval ; -#if defined(__BIG_ENDIAN) - u8 snd_seq_scale; - u8 ttl; - u8 tos; - u8 ka_max_probe_count; -#elif defined(__LITTLE_ENDIAN) - u8 ka_max_probe_count; - u8 tos; - u8 ttl; - u8 snd_seq_scale; -#endif -#if defined(__BIG_ENDIAN) - u16 pmtu; - u16 mss; -#elif defined(__LITTLE_ENDIAN) - u16 mss; - u16 pmtu; -#endif - u32 rcv_buf; - u32 snd_buf; - u32 seed; -}; - - -/* - * a KWQE request to offload a PG connection - */ -struct l4_kwq_offload_pg { -#if defined(__BIG_ENDIAN) - u8 flags; -#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0) -#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0 -#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4) -#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4 -#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7 - u8 op_code; - u16 reserved0; -#elif defined(__LITTLE_ENDIAN) - u16 reserved0; - u8 op_code; - u8 flags; -#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0) -#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0 -#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4) -#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4 -#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7 -#endif -#if defined(__BIG_ENDIAN) - u8 l2hdr_nbytes; - u8 pg_flags; -#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0) -#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0 -#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1) -#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1 -#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2) -#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2 - u8 da0; - u8 da1; -#elif defined(__LITTLE_ENDIAN) - u8 da1; - u8 da0; - u8 pg_flags; -#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0) -#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0 -#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1) -#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1 -#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2) -#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2 - u8 l2hdr_nbytes; -#endif -#if defined(__BIG_ENDIAN) - u8 da2; - u8 da3; - u8 da4; - u8 da5; -#elif defined(__LITTLE_ENDIAN) - u8 da5; - u8 da4; - u8 da3; - u8 da2; -#endif -#if defined(__BIG_ENDIAN) - u8 sa0; - u8 sa1; - u8 sa2; - u8 sa3; -#elif defined(__LITTLE_ENDIAN) - u8 sa3; - u8 sa2; - u8 sa1; - u8 sa0; -#endif -#if defined(__BIG_ENDIAN) - u8 sa4; - u8 sa5; - u16 etype; -#elif defined(__LITTLE_ENDIAN) - u16 etype; - u8 sa5; - u8 sa4; -#endif -#if defined(__BIG_ENDIAN) - u16 vlan_tag; - u16 ipid_start; -#elif defined(__LITTLE_ENDIAN) - u16 ipid_start; - u16 vlan_tag; -#endif -#if defined(__BIG_ENDIAN) - u16 ipid_count; - u16 reserved3; -#elif defined(__LITTLE_ENDIAN) - u16 reserved3; - u16 ipid_count; -#endif - u32 host_opaque; -}; - - -/* - * Abortively close the connection request - */ -struct l4_kwq_reset_req { -#if defined(__BIG_ENDIAN) - u8 flags; -#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0) -#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0 -#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4) -#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4 -#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7 - u8 op_code; - u16 reserved0; -#elif defined(__LITTLE_ENDIAN) - u16 reserved0; - u8 op_code; - u8 flags; -#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0) -#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0 -#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4) -#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4 -#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7 -#endif - u32 cid; - u32 reserved2[6]; -}; - - -/* - * a KWQE request to update a PG connection - */ -struct l4_kwq_update_pg { -#if defined(__BIG_ENDIAN) - u8 flags; -#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0) -#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0 -#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4) -#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4 -#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7 - u8 opcode; - u16 oper16; -#elif defined(__LITTLE_ENDIAN) - u16 oper16; - u8 opcode; - u8 flags; -#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0) -#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0 -#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4) -#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4 -#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7 -#endif - u32 pg_cid; - u32 pg_host_opaque; -#if defined(__BIG_ENDIAN) - u8 pg_valids; -#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0) -#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0 -#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1) -#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1 -#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2) -#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2 - u8 pg_unused_a; - u16 pg_ipid_count; -#elif defined(__LITTLE_ENDIAN) - u16 pg_ipid_count; - u8 pg_unused_a; - u8 pg_valids; -#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0) -#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0 -#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1) -#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1 -#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2) -#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2 -#endif -#if defined(__BIG_ENDIAN) - u16 reserverd3; - u8 da0; - u8 da1; -#elif defined(__LITTLE_ENDIAN) - u8 da1; - u8 da0; - u16 reserverd3; -#endif -#if defined(__BIG_ENDIAN) - u8 da2; - u8 da3; - u8 da4; - u8 da5; -#elif defined(__LITTLE_ENDIAN) - u8 da5; - u8 da4; - u8 da3; - u8 da2; -#endif - u32 reserved4; - u32 reserved5; -}; - - -/* - * a KWQE request to upload a PG or L4 context - */ -struct l4_kwq_upload { -#if defined(__BIG_ENDIAN) - u8 flags; -#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0) -#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0 -#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4) -#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4 -#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7 - u8 opcode; - u16 oper16; -#elif defined(__LITTLE_ENDIAN) - u16 oper16; - u8 opcode; - u8 flags; -#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0) -#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0 -#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4) -#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4 -#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7 -#endif - u32 cid; - u32 reserved2[6]; -}; - -/* - * bnx2x structures - */ - -/* - * The iscsi aggregative context of Cstorm - */ -struct cstorm_iscsi_ag_context { - u32 agg_vars1; -#define CSTORM_ISCSI_AG_CONTEXT_STATE (0xFF<<0) -#define CSTORM_ISCSI_AG_CONTEXT_STATE_SHIFT 0 -#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<8) -#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 8 -#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<9) -#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 9 -#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<10) -#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 10 -#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<11) -#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 11 -#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN (0x1<<12) -#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN_SHIFT 12 -#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN (0x1<<13) -#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN_SHIFT 13 -#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF (0x3<<14) -#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF_SHIFT 14 -#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66 (0x3<<16) -#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66_SHIFT 16 -#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN (0x1<<18) -#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN_SHIFT 18 -#define __CSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<19) -#define __CSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 19 -#define __CSTORM_ISCSI_AG_CONTEXT_AUX2_CF_EN (0x1<<20) -#define __CSTORM_ISCSI_AG_CONTEXT_AUX2_CF_EN_SHIFT 20 -#define __CSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<21) -#define __CSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 21 -#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN (0x1<<22) -#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN_SHIFT 22 -#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE (0x7<<23) -#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE_SHIFT 23 -#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE (0x3<<26) -#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE_SHIFT 26 -#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52 (0x3<<28) -#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52_SHIFT 28 -#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53 (0x3<<30) -#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53_SHIFT 30 -#if defined(__BIG_ENDIAN) - u8 __aux1_th; - u8 __aux1_val; - u16 __agg_vars2; -#elif defined(__LITTLE_ENDIAN) - u16 __agg_vars2; - u8 __aux1_val; - u8 __aux1_th; -#endif - u32 rel_seq; - u32 rel_seq_th; -#if defined(__BIG_ENDIAN) - u16 hq_cons; - u16 hq_prod; -#elif defined(__LITTLE_ENDIAN) - u16 hq_prod; - u16 hq_cons; -#endif -#if defined(__BIG_ENDIAN) - u8 __reserved62; - u8 __reserved61; - u8 __reserved60; - u8 __reserved59; -#elif defined(__LITTLE_ENDIAN) - u8 __reserved59; - u8 __reserved60; - u8 __reserved61; - u8 __reserved62; -#endif -#if defined(__BIG_ENDIAN) - u16 __reserved64; - u16 cq_u_prod; -#elif defined(__LITTLE_ENDIAN) - u16 cq_u_prod; - u16 __reserved64; -#endif - u32 __cq_u_prod1; -#if defined(__BIG_ENDIAN) - u16 __agg_vars3; - u16 cq_u_pend; -#elif defined(__LITTLE_ENDIAN) - u16 cq_u_pend; - u16 __agg_vars3; -#endif -#if defined(__BIG_ENDIAN) - u16 __aux2_th; - u16 aux2_val; -#elif defined(__LITTLE_ENDIAN) - u16 aux2_val; - u16 __aux2_th; -#endif -}; - -/* - * The fcoe extra aggregative context section of Tstorm - */ -struct tstorm_fcoe_extra_ag_context_section { - u32 __agg_val1; -#if defined(__BIG_ENDIAN) - u8 __tcp_agg_vars2; - u8 __agg_val3; - u16 __agg_val2; -#elif defined(__LITTLE_ENDIAN) - u16 __agg_val2; - u8 __agg_val3; - u8 __tcp_agg_vars2; -#endif -#if defined(__BIG_ENDIAN) - u16 __agg_val5; - u8 __agg_val6; - u8 __tcp_agg_vars3; -#elif defined(__LITTLE_ENDIAN) - u8 __tcp_agg_vars3; - u8 __agg_val6; - u16 __agg_val5; -#endif - u32 __lcq_prod; - u32 rtt_seq; - u32 rtt_time; - u32 __reserved66; - u32 wnd_right_edge; - u32 tcp_agg_vars1; -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0) -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 0 -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG (0x1<<1) -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG_SHIFT 1 -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF (0x3<<2) -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_SHIFT 2 -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF (0x3<<4) -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_SHIFT 4 -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_EN (0x1<<6) -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_EN_SHIFT 6 -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_EN (0x1<<7) -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_EN_SHIFT 7 -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN (0x1<<8) -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN_SHIFT 8 -#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LCQ_SND_EN (0x1<<9) -#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LCQ_SND_EN_SHIFT 9 -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<10) -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 10 -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_FLAG (0x1<<11) -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_FLAG_SHIFT 11 -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_EN (0x1<<12) -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_EN_SHIFT 12 -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_EN (0x1<<13) -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_EN_SHIFT 13 -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF (0x3<<14) -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_SHIFT 14 -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF (0x3<<16) -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_SHIFT 16 -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_BLOCKED (0x1<<18) -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_BLOCKED_SHIFT 18 -#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<19) -#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 19 -#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX11_CF_EN (0x1<<20) -#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX11_CF_EN_SHIFT 20 -#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX12_CF_EN (0x1<<21) -#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX12_CF_EN_SHIFT 21 -#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED1 (0x3<<22) -#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED1_SHIFT 22 -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ (0xF<<24) -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ_SHIFT 24 -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ (0xF<<28) -#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ_SHIFT 28 - u32 snd_max; - u32 __lcq_cons; - u32 __reserved2; -}; - -/* - * The fcoe aggregative context of Tstorm - */ -struct tstorm_fcoe_ag_context { -#if defined(__BIG_ENDIAN) - u16 ulp_credit; - u8 agg_vars1; -#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) -#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 -#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) -#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 -#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) -#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 -#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) -#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 -#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF (0x3<<4) -#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_SHIFT 4 -#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG (0x1<<6) -#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG_SHIFT 6 -#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG (0x1<<7) -#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG_SHIFT 7 - u8 state; -#elif defined(__LITTLE_ENDIAN) - u8 state; - u8 agg_vars1; -#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) -#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 -#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) -#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 -#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) -#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 -#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) -#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 -#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF (0x3<<4) -#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_SHIFT 4 -#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG (0x1<<6) -#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG_SHIFT 6 -#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG (0x1<<7) -#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG_SHIFT 7 - u16 ulp_credit; -#endif -#if defined(__BIG_ENDIAN) - u16 __agg_val4; - u16 agg_vars2; -#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG (0x1<<0) -#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG_SHIFT 0 -#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG (0x1<<1) -#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG_SHIFT 1 -#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF (0x3<<2) -#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF_SHIFT 2 -#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF (0x3<<4) -#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF_SHIFT 4 -#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF (0x3<<6) -#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF_SHIFT 6 -#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF (0x3<<8) -#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF_SHIFT 8 -#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG (0x1<<10) -#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG_SHIFT 10 -#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN (0x1<<11) -#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN_SHIFT 11 -#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN (0x1<<12) -#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN_SHIFT 12 -#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN (0x1<<13) -#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN_SHIFT 13 -#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN (0x1<<14) -#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN_SHIFT 14 -#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN (0x1<<15) -#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN_SHIFT 15 -#elif defined(__LITTLE_ENDIAN) - u16 agg_vars2; -#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG (0x1<<0) -#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG_SHIFT 0 -#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG (0x1<<1) -#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG_SHIFT 1 -#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF (0x3<<2) -#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF_SHIFT 2 -#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF (0x3<<4) -#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF_SHIFT 4 -#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF (0x3<<6) -#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF_SHIFT 6 -#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF (0x3<<8) -#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF_SHIFT 8 -#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG (0x1<<10) -#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG_SHIFT 10 -#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN (0x1<<11) -#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN_SHIFT 11 -#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN (0x1<<12) -#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN_SHIFT 12 -#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN (0x1<<13) -#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN_SHIFT 13 -#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN (0x1<<14) -#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN_SHIFT 14 -#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN (0x1<<15) -#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN_SHIFT 15 - u16 __agg_val4; -#endif - struct tstorm_fcoe_extra_ag_context_section __extra_section; -}; - - - -/* - * The tcp aggregative context section of Tstorm - */ -struct tstorm_tcp_tcp_ag_context_section { - u32 __agg_val1; -#if defined(__BIG_ENDIAN) - u8 __tcp_agg_vars2; - u8 __agg_val3; - u16 __agg_val2; -#elif defined(__LITTLE_ENDIAN) - u16 __agg_val2; - u8 __agg_val3; - u8 __tcp_agg_vars2; -#endif -#if defined(__BIG_ENDIAN) - u16 __agg_val5; - u8 __agg_val6; - u8 __tcp_agg_vars3; -#elif defined(__LITTLE_ENDIAN) - u8 __tcp_agg_vars3; - u8 __agg_val6; - u16 __agg_val5; -#endif - u32 snd_nxt; - u32 rtt_seq; - u32 rtt_time; - u32 __reserved66; - u32 wnd_right_edge; - u32 tcp_agg_vars1; -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0) -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 0 -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG (0x1<<1) -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG_SHIFT 1 -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF (0x3<<2) -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_SHIFT 2 -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF (0x3<<4) -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_SHIFT 4 -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN (0x1<<6) -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN_SHIFT 6 -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN (0x1<<7) -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN_SHIFT 7 -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN (0x1<<8) -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN_SHIFT 8 -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN (0x1<<9) -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN_SHIFT 9 -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<10) -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 10 -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG (0x1<<11) -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG_SHIFT 11 -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN (0x1<<12) -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN_SHIFT 12 -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN (0x1<<13) -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN_SHIFT 13 -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF (0x3<<14) -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_SHIFT 14 -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF (0x3<<16) -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_SHIFT 16 -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED (0x1<<18) -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED_SHIFT 18 -#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<19) -#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 19 -#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN (0x1<<20) -#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN_SHIFT 20 -#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN (0x1<<21) -#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN_SHIFT 21 -#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1 (0x3<<22) -#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1_SHIFT 22 -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ (0xF<<24) -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ_SHIFT 24 -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ (0xF<<28) -#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ_SHIFT 28 - u32 snd_max; - u32 snd_una; - u32 __reserved2; -}; - -/* - * The iscsi aggregative context of Tstorm - */ -struct tstorm_iscsi_ag_context { -#if defined(__BIG_ENDIAN) - u16 ulp_credit; - u8 agg_vars1; -#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) -#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 -#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) -#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 -#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) -#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 -#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) -#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 -#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4) -#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4 -#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6) -#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6 -#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7) -#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7 - u8 state; -#elif defined(__LITTLE_ENDIAN) - u8 state; - u8 agg_vars1; -#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) -#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 -#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) -#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 -#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) -#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 -#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) -#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 -#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4) -#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4 -#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6) -#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6 -#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7) -#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7 - u16 ulp_credit; -#endif -#if defined(__BIG_ENDIAN) - u16 __agg_val4; - u16 agg_vars2; -#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0) -#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0 -#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1) -#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1 -#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2) -#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2 -#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4) -#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4 -#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6) -#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6 -#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8) -#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8 -#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10) -#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10 -#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11) -#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11 -#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12) -#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12 -#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13) -#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13 -#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14) -#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14 -#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15) -#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15 -#elif defined(__LITTLE_ENDIAN) - u16 agg_vars2; -#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0) -#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0 -#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1) -#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1 -#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2) -#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2 -#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4) -#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4 -#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6) -#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6 -#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8) -#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8 -#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10) -#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10 -#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11) -#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11 -#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12) -#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12 -#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13) -#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13 -#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14) -#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14 -#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15) -#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15 - u16 __agg_val4; -#endif - struct tstorm_tcp_tcp_ag_context_section tcp; -}; - - - -/* - * The fcoe aggregative context of Ustorm - */ -struct ustorm_fcoe_ag_context { -#if defined(__BIG_ENDIAN) - u8 __aux_counter_flags; - u8 agg_vars2; -#define USTORM_FCOE_AG_CONTEXT_TX_CF (0x3<<0) -#define USTORM_FCOE_AG_CONTEXT_TX_CF_SHIFT 0 -#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF (0x3<<2) -#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_SHIFT 2 -#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4) -#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4 -#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7) -#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7 - u8 agg_vars1; -#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) -#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 -#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) -#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 -#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) -#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 -#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) -#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 -#define USTORM_FCOE_AG_CONTEXT_INV_CF (0x3<<4) -#define USTORM_FCOE_AG_CONTEXT_INV_CF_SHIFT 4 -#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF (0x3<<6) -#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_SHIFT 6 - u8 state; -#elif defined(__LITTLE_ENDIAN) - u8 state; - u8 agg_vars1; -#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) -#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 -#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) -#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 -#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) -#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 -#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) -#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 -#define USTORM_FCOE_AG_CONTEXT_INV_CF (0x3<<4) -#define USTORM_FCOE_AG_CONTEXT_INV_CF_SHIFT 4 -#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF (0x3<<6) -#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_SHIFT 6 - u8 agg_vars2; -#define USTORM_FCOE_AG_CONTEXT_TX_CF (0x3<<0) -#define USTORM_FCOE_AG_CONTEXT_TX_CF_SHIFT 0 -#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF (0x3<<2) -#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_SHIFT 2 -#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4) -#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4 -#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7) -#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7 - u8 __aux_counter_flags; -#endif -#if defined(__BIG_ENDIAN) - u8 cdu_usage; - u8 agg_misc2; - u16 pbf_tx_seq_ack; -#elif defined(__LITTLE_ENDIAN) - u16 pbf_tx_seq_ack; - u8 agg_misc2; - u8 cdu_usage; -#endif - u32 agg_misc4; -#if defined(__BIG_ENDIAN) - u8 agg_val3_th; - u8 agg_val3; - u16 agg_misc3; -#elif defined(__LITTLE_ENDIAN) - u16 agg_misc3; - u8 agg_val3; - u8 agg_val3_th; -#endif - u32 expired_task_id; - u32 agg_misc4_th; -#if defined(__BIG_ENDIAN) - u16 cq_prod; - u16 cq_cons; -#elif defined(__LITTLE_ENDIAN) - u16 cq_cons; - u16 cq_prod; -#endif -#if defined(__BIG_ENDIAN) - u16 __reserved2; - u8 decision_rules; -#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE (0x7<<0) -#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE_SHIFT 0 -#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3) -#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3 -#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG (0x1<<6) -#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG_SHIFT 6 -#define __USTORM_FCOE_AG_CONTEXT_RESERVED1 (0x1<<7) -#define __USTORM_FCOE_AG_CONTEXT_RESERVED1_SHIFT 7 - u8 decision_rule_enable_bits; -#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN (0x1<<0) -#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN_SHIFT 0 -#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1) -#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1 -#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN (0x1<<2) -#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN_SHIFT 2 -#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN (0x1<<3) -#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN_SHIFT 3 -#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<4) -#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 4 -#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN (0x1<<5) -#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN_SHIFT 5 -#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN (0x1<<6) -#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN_SHIFT 6 -#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7) -#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7 -#elif defined(__LITTLE_ENDIAN) - u8 decision_rule_enable_bits; -#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN (0x1<<0) -#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN_SHIFT 0 -#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1) -#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1 -#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN (0x1<<2) -#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN_SHIFT 2 -#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN (0x1<<3) -#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN_SHIFT 3 -#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<4) -#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 4 -#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN (0x1<<5) -#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN_SHIFT 5 -#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN (0x1<<6) -#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN_SHIFT 6 -#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7) -#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7 - u8 decision_rules; -#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE (0x7<<0) -#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE_SHIFT 0 -#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3) -#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3 -#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG (0x1<<6) -#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG_SHIFT 6 -#define __USTORM_FCOE_AG_CONTEXT_RESERVED1 (0x1<<7) -#define __USTORM_FCOE_AG_CONTEXT_RESERVED1_SHIFT 7 - u16 __reserved2; -#endif -}; - - -/* - * The iscsi aggregative context of Ustorm - */ -struct ustorm_iscsi_ag_context { -#if defined(__BIG_ENDIAN) - u8 __aux_counter_flags; - u8 agg_vars2; -#define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0) -#define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0 -#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2) -#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2 -#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4) -#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4 -#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7) -#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7 - u8 agg_vars1; -#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) -#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 -#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) -#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 -#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) -#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 -#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) -#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 -#define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4) -#define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4 -#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6) -#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6 - u8 state; -#elif defined(__LITTLE_ENDIAN) - u8 state; - u8 agg_vars1; -#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) -#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 -#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) -#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 -#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) -#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 -#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) -#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 -#define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4) -#define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4 -#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6) -#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6 - u8 agg_vars2; -#define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0) -#define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0 -#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2) -#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2 -#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4) -#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4 -#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7) -#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7 - u8 __aux_counter_flags; -#endif -#if defined(__BIG_ENDIAN) - u8 cdu_usage; - u8 agg_misc2; - u16 __cq_local_comp_itt_val; -#elif defined(__LITTLE_ENDIAN) - u16 __cq_local_comp_itt_val; - u8 agg_misc2; - u8 cdu_usage; -#endif - u32 agg_misc4; -#if defined(__BIG_ENDIAN) - u8 agg_val3_th; - u8 agg_val3; - u16 agg_misc3; -#elif defined(__LITTLE_ENDIAN) - u16 agg_misc3; - u8 agg_val3; - u8 agg_val3_th; -#endif - u32 agg_val1; - u32 agg_misc4_th; -#if defined(__BIG_ENDIAN) - u16 agg_val2_th; - u16 agg_val2; -#elif defined(__LITTLE_ENDIAN) - u16 agg_val2; - u16 agg_val2_th; -#endif -#if defined(__BIG_ENDIAN) - u16 __reserved2; - u8 decision_rules; -#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0) -#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0 -#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3) -#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3 -#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6) -#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6 -#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7) -#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7 - u8 decision_rule_enable_bits; -#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0) -#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0 -#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1) -#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1 -#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2) -#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2 -#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3) -#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3 -#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4) -#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4 -#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5) -#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5 -#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6) -#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6 -#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7) -#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7 -#elif defined(__LITTLE_ENDIAN) - u8 decision_rule_enable_bits; -#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0) -#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0 -#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1) -#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1 -#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2) -#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2 -#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3) -#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3 -#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4) -#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4 -#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5) -#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5 -#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6) -#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6 -#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7) -#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7 - u8 decision_rules; -#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0) -#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0 -#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3) -#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3 -#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6) -#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6 -#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7) -#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7 - u16 __reserved2; -#endif -}; - - -/* - * The fcoe aggregative context section of Xstorm - */ -struct xstorm_fcoe_extra_ag_context_section { -#if defined(__BIG_ENDIAN) - u8 tcp_agg_vars1; -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51 (0x3<<0) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51_SHIFT 0 -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2 -#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4) -#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4 -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN (0x1<<6) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN_SHIFT 6 -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG (0x1<<7) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG_SHIFT 7 - u8 __reserved_da_cnt; - u16 __mtu; -#elif defined(__LITTLE_ENDIAN) - u16 __mtu; - u8 __reserved_da_cnt; - u8 tcp_agg_vars1; -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51 (0x3<<0) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51_SHIFT 0 -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2 -#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4) -#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4 -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN (0x1<<6) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN_SHIFT 6 -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG (0x1<<7) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG_SHIFT 7 -#endif - u32 snd_nxt; - u32 tx_wnd; - u32 __reserved55; - u32 local_adv_wnd; -#if defined(__BIG_ENDIAN) - u8 __agg_val8_th; - u8 __tx_dest; - u16 tcp_agg_vars2; -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57 (0x1<<0) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57_SHIFT 0 -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58 (0x1<<1) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58_SHIFT 1 -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59 (0x1<<2) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59_SHIFT 2 -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3 -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4 -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60 (0x1<<5) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60_SHIFT 5 -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN (0x1<<6) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN_SHIFT 6 -#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7) -#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7 -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN (0x1<<8) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN_SHIFT 8 -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9 -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10 -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12 -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14 -#elif defined(__LITTLE_ENDIAN) - u16 tcp_agg_vars2; -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57 (0x1<<0) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57_SHIFT 0 -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58 (0x1<<1) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58_SHIFT 1 -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59 (0x1<<2) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59_SHIFT 2 -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3 -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4 -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60 (0x1<<5) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60_SHIFT 5 -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN (0x1<<6) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN_SHIFT 6 -#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7) -#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7 -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN (0x1<<8) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN_SHIFT 8 -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9 -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10 -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12 -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14) -#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14 - u8 __tx_dest; - u8 __agg_val8_th; -#endif - u32 __sq_base_addr_lo; - u32 __sq_base_addr_hi; - u32 __xfrq_base_addr_lo; - u32 __xfrq_base_addr_hi; -#if defined(__BIG_ENDIAN) - u16 __xfrq_cons; - u16 __xfrq_prod; -#elif defined(__LITTLE_ENDIAN) - u16 __xfrq_prod; - u16 __xfrq_cons; -#endif -#if defined(__BIG_ENDIAN) - u8 __tcp_agg_vars5; - u8 __tcp_agg_vars4; - u8 __tcp_agg_vars3; - u8 __reserved_force_pure_ack_cnt; -#elif defined(__LITTLE_ENDIAN) - u8 __reserved_force_pure_ack_cnt; - u8 __tcp_agg_vars3; - u8 __tcp_agg_vars4; - u8 __tcp_agg_vars5; -#endif - u32 __tcp_agg_vars6; -#if defined(__BIG_ENDIAN) - u16 __agg_misc6; - u16 __tcp_agg_vars7; -#elif defined(__LITTLE_ENDIAN) - u16 __tcp_agg_vars7; - u16 __agg_misc6; -#endif - u32 __agg_val10; - u32 __agg_val10_th; -#if defined(__BIG_ENDIAN) - u16 __reserved3; - u8 __reserved2; - u8 __da_only_cnt; -#elif defined(__LITTLE_ENDIAN) - u8 __da_only_cnt; - u8 __reserved2; - u16 __reserved3; -#endif -}; - -/* - * The fcoe aggregative context of Xstorm - */ -struct xstorm_fcoe_ag_context { -#if defined(__BIG_ENDIAN) - u16 agg_val1; - u8 agg_vars1; -#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) -#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 -#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) -#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 -#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51 (0x1<<2) -#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51_SHIFT 2 -#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52 (0x1<<3) -#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52_SHIFT 3 -#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4) -#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4 -#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN (0x1<<5) -#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN_SHIFT 5 -#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6) -#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6 -#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN (0x1<<7) -#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN_SHIFT 7 - u8 __state; -#elif defined(__LITTLE_ENDIAN) - u8 __state; - u8 agg_vars1; -#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) -#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 -#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) -#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 -#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51 (0x1<<2) -#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51_SHIFT 2 -#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52 (0x1<<3) -#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52_SHIFT 3 -#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4) -#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4 -#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN (0x1<<5) -#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN_SHIFT 5 -#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6) -#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6 -#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN (0x1<<7) -#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN_SHIFT 7 - u16 agg_val1; -#endif -#if defined(__BIG_ENDIAN) - u8 cdu_reserved; - u8 __agg_vars4; - u8 agg_vars3; -#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) -#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 -#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF (0x3<<6) -#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF_SHIFT 6 - u8 agg_vars2; -#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF (0x3<<0) -#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_SHIFT 0 -#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2) -#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2 -#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG (0x1<<3) -#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG_SHIFT 3 -#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG (0x1<<4) -#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG_SHIFT 4 -#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1 (0x3<<5) -#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1_SHIFT 5 -#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7) -#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7 -#elif defined(__LITTLE_ENDIAN) - u8 agg_vars2; -#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF (0x3<<0) -#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_SHIFT 0 -#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2) -#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2 -#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG (0x1<<3) -#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG_SHIFT 3 -#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG (0x1<<4) -#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG_SHIFT 4 -#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1 (0x3<<5) -#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1_SHIFT 5 -#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7) -#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7 - u8 agg_vars3; -#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) -#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 -#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF (0x3<<6) -#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF_SHIFT 6 - u8 __agg_vars4; - u8 cdu_reserved; -#endif - u32 more_to_send; -#if defined(__BIG_ENDIAN) - u16 agg_vars5; -#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5 (0x3<<0) -#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5_SHIFT 0 -#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2) -#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2 -#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8) -#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8 -#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE (0x3<<14) -#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE_SHIFT 14 - u16 sq_cons; -#elif defined(__LITTLE_ENDIAN) - u16 sq_cons; - u16 agg_vars5; -#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5 (0x3<<0) -#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5_SHIFT 0 -#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2) -#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2 -#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8) -#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8 -#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE (0x3<<14) -#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE_SHIFT 14 -#endif - struct xstorm_fcoe_extra_ag_context_section __extra_section; -#if defined(__BIG_ENDIAN) - u16 agg_vars7; -#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0) -#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 -#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG (0x1<<3) -#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG_SHIFT 3 -#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF (0x3<<4) -#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF_SHIFT 4 -#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3 (0x3<<6) -#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3_SHIFT 6 -#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF (0x3<<8) -#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF_SHIFT 8 -#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62 (0x1<<10) -#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62_SHIFT 10 -#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<11) -#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 11 -#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG (0x1<<12) -#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG_SHIFT 12 -#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG (0x1<<13) -#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG_SHIFT 13 -#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG (0x1<<14) -#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG_SHIFT 14 -#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG (0x1<<15) -#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG_SHIFT 15 - u8 agg_val3_th; - u8 agg_vars6; -#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6 (0x7<<0) -#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6_SHIFT 0 -#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE (0x7<<3) -#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE_SHIFT 3 -#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE (0x3<<6) -#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE_SHIFT 6 -#elif defined(__LITTLE_ENDIAN) - u8 agg_vars6; -#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6 (0x7<<0) -#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6_SHIFT 0 -#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE (0x7<<3) -#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE_SHIFT 3 -#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE (0x3<<6) -#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE_SHIFT 6 - u8 agg_val3_th; - u16 agg_vars7; -#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0) -#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 -#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG (0x1<<3) -#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG_SHIFT 3 -#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF (0x3<<4) -#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF_SHIFT 4 -#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3 (0x3<<6) -#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3_SHIFT 6 -#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF (0x3<<8) -#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF_SHIFT 8 -#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62 (0x1<<10) -#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62_SHIFT 10 -#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<11) -#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 11 -#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG (0x1<<12) -#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG_SHIFT 12 -#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG (0x1<<13) -#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG_SHIFT 13 -#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG (0x1<<14) -#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG_SHIFT 14 -#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG (0x1<<15) -#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG_SHIFT 15 -#endif -#if defined(__BIG_ENDIAN) - u16 __agg_val11_th; - u16 __agg_val11; -#elif defined(__LITTLE_ENDIAN) - u16 __agg_val11; - u16 __agg_val11_th; -#endif -#if defined(__BIG_ENDIAN) - u8 __reserved1; - u8 __agg_val6_th; - u16 __agg_val9; -#elif defined(__LITTLE_ENDIAN) - u16 __agg_val9; - u8 __agg_val6_th; - u8 __reserved1; -#endif -#if defined(__BIG_ENDIAN) - u16 confq_cons; - u16 confq_prod; -#elif defined(__LITTLE_ENDIAN) - u16 confq_prod; - u16 confq_cons; -#endif - u32 agg_vars8; -#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0) -#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC2_SHIFT 0 -#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC3 (0xFF<<24) -#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC3_SHIFT 24 -#if defined(__BIG_ENDIAN) - u16 agg_misc0; - u16 sq_prod; -#elif defined(__LITTLE_ENDIAN) - u16 sq_prod; - u16 agg_misc0; -#endif -#if defined(__BIG_ENDIAN) - u8 agg_val3; - u8 agg_val6; - u8 agg_val5_th; - u8 agg_val5; -#elif defined(__LITTLE_ENDIAN) - u8 agg_val5; - u8 agg_val5_th; - u8 agg_val6; - u8 agg_val3; -#endif -#if defined(__BIG_ENDIAN) - u16 __agg_misc1; - u16 agg_limit1; -#elif defined(__LITTLE_ENDIAN) - u16 agg_limit1; - u16 __agg_misc1; -#endif - u32 completion_seq; - u32 confq_pbl_base_lo; - u32 confq_pbl_base_hi; -}; - - - -/* - * The tcp aggregative context section of Xstorm - */ -struct xstorm_tcp_tcp_ag_context_section { -#if defined(__BIG_ENDIAN) - u8 tcp_agg_vars1; -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF (0x3<<0) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF_SHIFT 0 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2 -#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4) -#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN (0x1<<6) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN_SHIFT 6 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG (0x1<<7) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG_SHIFT 7 - u8 __da_cnt; - u16 mss; -#elif defined(__LITTLE_ENDIAN) - u16 mss; - u8 __da_cnt; - u8 tcp_agg_vars1; -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF (0x3<<0) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF_SHIFT 0 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2 -#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4) -#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN (0x1<<6) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN_SHIFT 6 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG (0x1<<7) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG_SHIFT 7 -#endif - u32 snd_nxt; - u32 tx_wnd; - u32 snd_una; - u32 local_adv_wnd; -#if defined(__BIG_ENDIAN) - u8 __agg_val8_th; - u8 __tx_dest; - u16 tcp_agg_vars2; -#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG (0x1<<0) -#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_SHIFT 0 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED (0x1<<1) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED_SHIFT 1 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE (0x1<<2) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE_SHIFT 2 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4 -#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE (0x1<<5) -#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE_SHIFT 5 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN (0x1<<6) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN_SHIFT 6 -#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7) -#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN (0x1<<8) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN_SHIFT 8 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14 -#elif defined(__LITTLE_ENDIAN) - u16 tcp_agg_vars2; -#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG (0x1<<0) -#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_SHIFT 0 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED (0x1<<1) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED_SHIFT 1 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE (0x1<<2) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE_SHIFT 2 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4 -#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE (0x1<<5) -#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE_SHIFT 5 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN (0x1<<6) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN_SHIFT 6 -#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7) -#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN (0x1<<8) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN_SHIFT 8 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14 - u8 __tx_dest; - u8 __agg_val8_th; -#endif - u32 ack_to_far_end; - u32 rto_timer; - u32 ka_timer; - u32 ts_to_echo; -#if defined(__BIG_ENDIAN) - u16 __agg_val7_th; - u16 __agg_val7; -#elif defined(__LITTLE_ENDIAN) - u16 __agg_val7; - u16 __agg_val7_th; -#endif -#if defined(__BIG_ENDIAN) - u8 __tcp_agg_vars5; - u8 __tcp_agg_vars4; - u8 __tcp_agg_vars3; - u8 __force_pure_ack_cnt; -#elif defined(__LITTLE_ENDIAN) - u8 __force_pure_ack_cnt; - u8 __tcp_agg_vars3; - u8 __tcp_agg_vars4; - u8 __tcp_agg_vars5; -#endif - u32 tcp_agg_vars6; -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_CF_EN (0x1<<0) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_CF_EN_SHIFT 0 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_EN (0x1<<1) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_EN_SHIFT 1 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_EN (0x1<<2) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_EN_SHIFT 2 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<3) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 3 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX6_FLAG (0x1<<4) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX6_FLAG_SHIFT 4 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX7_FLAG (0x1<<5) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX7_FLAG_SHIFT 5 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX5_CF (0x3<<6) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX5_CF_SHIFT 6 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF (0x3<<8) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_SHIFT 8 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF (0x3<<10) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_SHIFT 10 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF (0x3<<12) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_SHIFT 12 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF (0x3<<14) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_SHIFT 14 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX13_CF (0x3<<16) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX13_CF_SHIFT 16 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX14_CF (0x3<<18) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX14_CF_SHIFT 18 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX15_CF (0x3<<20) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX15_CF_SHIFT 20 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX16_CF (0x3<<22) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX16_CF_SHIFT 22 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX17_CF (0x3<<24) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX17_CF_SHIFT 24 -#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ECE_FLAG (0x1<<26) -#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ECE_FLAG_SHIFT 26 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED71 (0x1<<27) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED71_SHIFT 27 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_FORCE_PURE_ACK_CNT_DIRTY (0x1<<28) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_FORCE_PURE_ACK_CNT_DIRTY_SHIFT 28 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TCP_AUTO_STOP_FLAG (0x1<<29) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TCP_AUTO_STOP_FLAG_SHIFT 29 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DO_TS_UPDATE_FLAG (0x1<<30) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DO_TS_UPDATE_FLAG_SHIFT 30 -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CANCEL_RETRANSMIT_FLAG (0x1<<31) -#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CANCEL_RETRANSMIT_FLAG_SHIFT 31 -#if defined(__BIG_ENDIAN) - u16 __agg_misc6; - u16 __tcp_agg_vars7; -#elif defined(__LITTLE_ENDIAN) - u16 __tcp_agg_vars7; - u16 __agg_misc6; -#endif - u32 __agg_val10; - u32 __agg_val10_th; -#if defined(__BIG_ENDIAN) - u16 __reserved3; - u8 __reserved2; - u8 __da_only_cnt; -#elif defined(__LITTLE_ENDIAN) - u8 __da_only_cnt; - u8 __reserved2; - u16 __reserved3; -#endif -}; - -/* - * The iscsi aggregative context of Xstorm - */ -struct xstorm_iscsi_ag_context { -#if defined(__BIG_ENDIAN) - u16 agg_val1; - u8 agg_vars1; -#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) -#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 -#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) -#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 -#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) -#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 -#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) -#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 -#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4) -#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4 -#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN (0x1<<5) -#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN_SHIFT 5 -#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6) -#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6 -#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7) -#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7 - u8 state; -#elif defined(__LITTLE_ENDIAN) - u8 state; - u8 agg_vars1; -#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) -#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 -#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) -#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 -#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) -#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 -#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) -#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 -#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4) -#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4 -#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN (0x1<<5) -#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN_SHIFT 5 -#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6) -#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6 -#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7) -#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7 - u16 agg_val1; -#endif -#if defined(__BIG_ENDIAN) - u8 cdu_reserved; - u8 __agg_vars4; - u8 agg_vars3; -#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) -#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 -#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF (0x3<<6) -#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6 - u8 agg_vars2; -#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0) -#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0 -#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2) -#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2 -#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG (0x1<<3) -#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG_SHIFT 3 -#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG (0x1<<4) -#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG_SHIFT 4 -#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1 (0x3<<5) -#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1_SHIFT 5 -#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7) -#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7 -#elif defined(__LITTLE_ENDIAN) - u8 agg_vars2; -#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0) -#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0 -#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2) -#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2 -#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG (0x1<<3) -#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG_SHIFT 3 -#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG (0x1<<4) -#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG_SHIFT 4 -#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1 (0x3<<5) -#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1_SHIFT 5 -#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7) -#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7 - u8 agg_vars3; -#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) -#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 -#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF (0x3<<6) -#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6 - u8 __agg_vars4; - u8 cdu_reserved; -#endif - u32 more_to_send; -#if defined(__BIG_ENDIAN) - u16 agg_vars5; -#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5 (0x3<<0) -#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5_SHIFT 0 -#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2) -#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2 -#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8) -#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8 -#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2 (0x3<<14) -#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2_SHIFT 14 - u16 sq_cons; -#elif defined(__LITTLE_ENDIAN) - u16 sq_cons; - u16 agg_vars5; -#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5 (0x3<<0) -#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5_SHIFT 0 -#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2) -#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2 -#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8) -#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8 -#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2 (0x3<<14) -#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2_SHIFT 14 -#endif - struct xstorm_tcp_tcp_ag_context_section tcp; -#if defined(__BIG_ENDIAN) - u16 agg_vars7; -#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0) -#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 -#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3) -#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3 -#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4) -#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4 -#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6) -#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6 -#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8) -#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_SHIFT 8 -#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10) -#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10 -#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<11) -#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 11 -#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG (0x1<<12) -#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG_SHIFT 12 -#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG (0x1<<13) -#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13 -#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14) -#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14 -#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15) -#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15 - u8 agg_val3_th; - u8 agg_vars6; -#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0) -#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6_SHIFT 0 -#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7 (0x7<<3) -#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7_SHIFT 3 -#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4 (0x3<<6) -#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4_SHIFT 6 -#elif defined(__LITTLE_ENDIAN) - u8 agg_vars6; -#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0) -#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6_SHIFT 0 -#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7 (0x7<<3) -#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7_SHIFT 3 -#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4 (0x3<<6) -#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4_SHIFT 6 - u8 agg_val3_th; - u16 agg_vars7; -#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0) -#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 -#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3) -#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3 -#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4) -#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4 -#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6) -#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6 -#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8) -#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_SHIFT 8 -#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10) -#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10 -#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<11) -#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 11 -#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG (0x1<<12) -#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG_SHIFT 12 -#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG (0x1<<13) -#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13 -#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14) -#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14 -#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15) -#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15 -#endif -#if defined(__BIG_ENDIAN) - u16 __agg_val11_th; - u16 __gen_data; -#elif defined(__LITTLE_ENDIAN) - u16 __gen_data; - u16 __agg_val11_th; -#endif -#if defined(__BIG_ENDIAN) - u8 __reserved1; - u8 __agg_val6_th; - u16 __agg_val9; -#elif defined(__LITTLE_ENDIAN) - u16 __agg_val9; - u8 __agg_val6_th; - u8 __reserved1; -#endif -#if defined(__BIG_ENDIAN) - u16 hq_prod; - u16 hq_cons; -#elif defined(__LITTLE_ENDIAN) - u16 hq_cons; - u16 hq_prod; -#endif - u32 agg_vars8; -#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0) -#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC2_SHIFT 0 -#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC3 (0xFF<<24) -#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC3_SHIFT 24 -#if defined(__BIG_ENDIAN) - u16 r2tq_prod; - u16 sq_prod; -#elif defined(__LITTLE_ENDIAN) - u16 sq_prod; - u16 r2tq_prod; -#endif -#if defined(__BIG_ENDIAN) - u8 agg_val3; - u8 agg_val6; - u8 agg_val5_th; - u8 agg_val5; -#elif defined(__LITTLE_ENDIAN) - u8 agg_val5; - u8 agg_val5_th; - u8 agg_val6; - u8 agg_val3; -#endif -#if defined(__BIG_ENDIAN) - u16 __agg_misc1; - u16 agg_limit1; -#elif defined(__LITTLE_ENDIAN) - u16 agg_limit1; - u16 __agg_misc1; -#endif - u32 hq_cons_tcp_seq; - u32 exp_stat_sn; - u32 rst_seq_num; -}; - - -/* - * The L5cm aggregative context of XStorm - */ -struct xstorm_l5cm_ag_context { -#if defined(__BIG_ENDIAN) - u16 agg_val1; - u8 agg_vars1; -#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) -#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 -#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) -#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 -#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) -#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 -#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) -#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 -#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4) -#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4 -#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN (0x1<<5) -#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN_SHIFT 5 -#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6) -#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6 -#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7) -#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7 - u8 state; -#elif defined(__LITTLE_ENDIAN) - u8 state; - u8 agg_vars1; -#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) -#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 -#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) -#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 -#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) -#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 -#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) -#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 -#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4) -#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4 -#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN (0x1<<5) -#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN_SHIFT 5 -#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6) -#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6 -#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7) -#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7 - u16 agg_val1; -#endif -#if defined(__BIG_ENDIAN) - u8 cdu_reserved; - u8 __agg_vars4; - u8 agg_vars3; -#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) -#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 -#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF (0x3<<6) -#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6 - u8 agg_vars2; -#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF (0x3<<0) -#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_SHIFT 0 -#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2) -#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2 -#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG (0x1<<3) -#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG_SHIFT 3 -#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG (0x1<<4) -#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG_SHIFT 4 -#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1 (0x3<<5) -#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1_SHIFT 5 -#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN (0x1<<7) -#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN_SHIFT 7 -#elif defined(__LITTLE_ENDIAN) - u8 agg_vars2; -#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF (0x3<<0) -#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_SHIFT 0 -#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2) -#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2 -#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG (0x1<<3) -#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG_SHIFT 3 -#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG (0x1<<4) -#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG_SHIFT 4 -#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1 (0x3<<5) -#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1_SHIFT 5 -#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN (0x1<<7) -#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN_SHIFT 7 - u8 agg_vars3; -#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) -#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 -#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF (0x3<<6) -#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6 - u8 __agg_vars4; - u8 cdu_reserved; -#endif - u32 more_to_send; -#if defined(__BIG_ENDIAN) - u16 agg_vars5; -#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5 (0x3<<0) -#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5_SHIFT 0 -#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2) -#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2 -#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8) -#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8 -#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2 (0x3<<14) -#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2_SHIFT 14 - u16 agg_val4_th; -#elif defined(__LITTLE_ENDIAN) - u16 agg_val4_th; - u16 agg_vars5; -#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5 (0x3<<0) -#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5_SHIFT 0 -#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2) -#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2 -#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8) -#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8 -#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2 (0x3<<14) -#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2_SHIFT 14 -#endif - struct xstorm_tcp_tcp_ag_context_section tcp; -#if defined(__BIG_ENDIAN) - u16 agg_vars7; -#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0) -#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 -#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG (0x1<<3) -#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG_SHIFT 3 -#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4) -#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4 -#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3 (0x3<<6) -#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3_SHIFT 6 -#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF (0x3<<8) -#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF_SHIFT 8 -#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10) -#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10 -#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN (0x1<<11) -#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN_SHIFT 11 -#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG (0x1<<12) -#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG_SHIFT 12 -#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG (0x1<<13) -#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG_SHIFT 13 -#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG (0x1<<14) -#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG_SHIFT 14 -#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15) -#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15 - u8 agg_val3_th; - u8 agg_vars6; -#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6 (0x7<<0) -#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6_SHIFT 0 -#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7 (0x7<<3) -#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7_SHIFT 3 -#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4 (0x3<<6) -#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4_SHIFT 6 -#elif defined(__LITTLE_ENDIAN) - u8 agg_vars6; -#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6 (0x7<<0) -#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6_SHIFT 0 -#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7 (0x7<<3) -#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7_SHIFT 3 -#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4 (0x3<<6) -#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4_SHIFT 6 - u8 agg_val3_th; - u16 agg_vars7; -#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0) -#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 -#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG (0x1<<3) -#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG_SHIFT 3 -#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4) -#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4 -#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3 (0x3<<6) -#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3_SHIFT 6 -#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF (0x3<<8) -#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF_SHIFT 8 -#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10) -#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10 -#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN (0x1<<11) -#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN_SHIFT 11 -#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG (0x1<<12) -#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG_SHIFT 12 -#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG (0x1<<13) -#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG_SHIFT 13 -#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG (0x1<<14) -#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG_SHIFT 14 -#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15) -#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15 -#endif -#if defined(__BIG_ENDIAN) - u16 __agg_val11_th; - u16 __gen_data; -#elif defined(__LITTLE_ENDIAN) - u16 __gen_data; - u16 __agg_val11_th; -#endif -#if defined(__BIG_ENDIAN) - u8 __reserved1; - u8 __agg_val6_th; - u16 __agg_val9; -#elif defined(__LITTLE_ENDIAN) - u16 __agg_val9; - u8 __agg_val6_th; - u8 __reserved1; -#endif -#if defined(__BIG_ENDIAN) - u16 agg_val2_th; - u16 agg_val2; -#elif defined(__LITTLE_ENDIAN) - u16 agg_val2; - u16 agg_val2_th; -#endif - u32 agg_vars8; -#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0) -#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC2_SHIFT 0 -#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC3 (0xFF<<24) -#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC3_SHIFT 24 -#if defined(__BIG_ENDIAN) - u16 agg_misc0; - u16 agg_val4; -#elif defined(__LITTLE_ENDIAN) - u16 agg_val4; - u16 agg_misc0; -#endif -#if defined(__BIG_ENDIAN) - u8 agg_val3; - u8 agg_val6; - u8 agg_val5_th; - u8 agg_val5; -#elif defined(__LITTLE_ENDIAN) - u8 agg_val5; - u8 agg_val5_th; - u8 agg_val6; - u8 agg_val3; -#endif -#if defined(__BIG_ENDIAN) - u16 __agg_misc1; - u16 agg_limit1; -#elif defined(__LITTLE_ENDIAN) - u16 agg_limit1; - u16 __agg_misc1; -#endif - u32 completion_seq; - u32 agg_misc4; - u32 rst_seq_num; -}; - -/* - * ABTS info $$KEEP_ENDIANNESS$$ - */ -struct fcoe_abts_info { - __le16 aborted_task_id; - __le16 reserved0; - __le32 reserved1; -}; - - -/* - * Fixed size structure in order to plant it in Union structure - * $$KEEP_ENDIANNESS$$ - */ -struct fcoe_abts_rsp_union { - u8 r_ctl; - u8 rsrv[3]; - __le32 abts_rsp_payload[7]; -}; - - -/* - * 4 regs size $$KEEP_ENDIANNESS$$ - */ -struct fcoe_bd_ctx { - __le32 buf_addr_hi; - __le32 buf_addr_lo; - __le16 buf_len; - __le16 rsrv0; - __le16 flags; - __le16 rsrv1; -}; - - -/* - * FCoE cached sges context $$KEEP_ENDIANNESS$$ - */ -struct fcoe_cached_sge_ctx { - struct regpair cur_buf_addr; - __le16 cur_buf_rem; - __le16 second_buf_rem; - struct regpair second_buf_addr; -}; - - -/* - * Cleanup info $$KEEP_ENDIANNESS$$ - */ -struct fcoe_cleanup_info { - __le16 cleaned_task_id; - __le16 rolled_tx_seq_cnt; - __le32 rolled_tx_data_offset; -}; - - -/* - * Fcp RSP flags $$KEEP_ENDIANNESS$$ - */ -struct fcoe_fcp_rsp_flags { - u8 flags; -#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0) -#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0 -#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1) -#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1 -#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2) -#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2 -#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3) -#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3 -#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ (0x1<<4) -#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4 -#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS (0x7<<5) -#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5 -}; - -/* - * Fcp RSP payload $$KEEP_ENDIANNESS$$ - */ -struct fcoe_fcp_rsp_payload { - struct regpair reserved0; - __le32 fcp_resid; - u8 scsi_status_code; - struct fcoe_fcp_rsp_flags fcp_flags; - __le16 retry_delay_timer; - __le32 fcp_rsp_len; - __le32 fcp_sns_len; -}; - -/* - * Fixed size structure in order to plant it in Union structure - * $$KEEP_ENDIANNESS$$ - */ -struct fcoe_fcp_rsp_union { - struct fcoe_fcp_rsp_payload payload; - struct regpair reserved0; -}; - -/* - * FC header $$KEEP_ENDIANNESS$$ - */ -struct fcoe_fc_hdr { - u8 s_id[3]; - u8 cs_ctl; - u8 d_id[3]; - u8 r_ctl; - __le16 seq_cnt; - u8 df_ctl; - u8 seq_id; - u8 f_ctl[3]; - u8 type; - __le32 parameters; - __le16 rx_id; - __le16 ox_id; -}; - -/* - * FC header union $$KEEP_ENDIANNESS$$ - */ -struct fcoe_mp_rsp_union { - struct fcoe_fc_hdr fc_hdr; - __le32 mp_payload_len; - __le32 rsrv; -}; - -/* - * Completion information $$KEEP_ENDIANNESS$$ - */ -union fcoe_comp_flow_info { - struct fcoe_fcp_rsp_union fcp_rsp; - struct fcoe_abts_rsp_union abts_rsp; - struct fcoe_mp_rsp_union mp_rsp; - __le32 opaque[8]; -}; - - -/* - * External ABTS info $$KEEP_ENDIANNESS$$ - */ -struct fcoe_ext_abts_info { - __le32 rsrv0[6]; - struct fcoe_abts_info ctx; -}; - - -/* - * External cleanup info $$KEEP_ENDIANNESS$$ - */ -struct fcoe_ext_cleanup_info { - __le32 rsrv0[6]; - struct fcoe_cleanup_info ctx; -}; - - -/* - * Fcoe FW Tx sequence context $$KEEP_ENDIANNESS$$ - */ -struct fcoe_fw_tx_seq_ctx { - __le32 data_offset; - __le16 seq_cnt; - __le16 rsrv0; -}; - -/* - * Fcoe external FW Tx sequence context $$KEEP_ENDIANNESS$$ - */ -struct fcoe_ext_fw_tx_seq_ctx { - __le32 rsrv0[6]; - struct fcoe_fw_tx_seq_ctx ctx; -}; - - -/* - * FCoE multiple sges context $$KEEP_ENDIANNESS$$ - */ -struct fcoe_mul_sges_ctx { - struct regpair cur_sge_addr; - __le16 cur_sge_off; - u8 cur_sge_idx; - u8 sgl_size; -}; - -/* - * FCoE external multiple sges context $$KEEP_ENDIANNESS$$ - */ -struct fcoe_ext_mul_sges_ctx { - struct fcoe_mul_sges_ctx mul_sgl; - struct regpair rsrv0; -}; - - -/* - * FCP CMD payload $$KEEP_ENDIANNESS$$ - */ -struct fcoe_fcp_cmd_payload { - __le32 opaque[8]; -}; - - - - - -/* - * Fcp xfr rdy payload $$KEEP_ENDIANNESS$$ - */ -struct fcoe_fcp_xfr_rdy_payload { - __le32 burst_len; - __le32 data_ro; -}; - - -/* - * FC frame $$KEEP_ENDIANNESS$$ - */ -struct fcoe_fc_frame { - struct fcoe_fc_hdr fc_hdr; - __le32 reserved0[2]; -}; - - - - -/* - * FCoE KCQ CQE parameters $$KEEP_ENDIANNESS$$ - */ -union fcoe_kcqe_params { - __le32 reserved0[4]; -}; - -/* - * FCoE KCQ CQE $$KEEP_ENDIANNESS$$ - */ -struct fcoe_kcqe { - __le32 fcoe_conn_id; - __le32 completion_status; - __le32 fcoe_conn_context_id; - union fcoe_kcqe_params params; - __le16 qe_self_seq; - u8 op_code; - u8 flags; -#define FCOE_KCQE_RESERVED0 (0x7<<0) -#define FCOE_KCQE_RESERVED0_SHIFT 0 -#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3) -#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3 -#define FCOE_KCQE_LAYER_CODE (0x7<<4) -#define FCOE_KCQE_LAYER_CODE_SHIFT 4 -#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7) -#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7 -}; - - - -/* - * FCoE KWQE header $$KEEP_ENDIANNESS$$ - */ -struct fcoe_kwqe_header { - u8 op_code; - u8 flags; -#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0) -#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0 -#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4) -#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4 -#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7) -#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7 -}; - -/* - * FCoE firmware init request 1 $$KEEP_ENDIANNESS$$ - */ -struct fcoe_kwqe_init1 { - __le16 num_tasks; - struct fcoe_kwqe_header hdr; - __le32 task_list_pbl_addr_lo; - __le32 task_list_pbl_addr_hi; - __le32 dummy_buffer_addr_lo; - __le32 dummy_buffer_addr_hi; - __le16 sq_num_wqes; - __le16 rq_num_wqes; - __le16 rq_buffer_log_size; - __le16 cq_num_wqes; - __le16 mtu; - u8 num_sessions_log; - u8 flags; -#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0) -#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0 -#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4) -#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4 -#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7) -#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7 -}; - -/* - * FCoE firmware init request 2 $$KEEP_ENDIANNESS$$ - */ -struct fcoe_kwqe_init2 { - u8 hsi_major_version; - u8 hsi_minor_version; - struct fcoe_kwqe_header hdr; - __le32 hash_tbl_pbl_addr_lo; - __le32 hash_tbl_pbl_addr_hi; - __le32 t2_hash_tbl_addr_lo; - __le32 t2_hash_tbl_addr_hi; - __le32 t2_ptr_hash_tbl_addr_lo; - __le32 t2_ptr_hash_tbl_addr_hi; - __le32 free_list_count; -}; - -/* - * FCoE firmware init request 3 $$KEEP_ENDIANNESS$$ - */ -struct fcoe_kwqe_init3 { - __le16 reserved0; - struct fcoe_kwqe_header hdr; - __le32 error_bit_map_lo; - __le32 error_bit_map_hi; - u8 perf_config; - u8 reserved21[3]; - __le32 reserved2[4]; -}; - -/* - * FCoE connection offload request 1 $$KEEP_ENDIANNESS$$ - */ -struct fcoe_kwqe_conn_offload1 { - __le16 fcoe_conn_id; - struct fcoe_kwqe_header hdr; - __le32 sq_addr_lo; - __le32 sq_addr_hi; - __le32 rq_pbl_addr_lo; - __le32 rq_pbl_addr_hi; - __le32 rq_first_pbe_addr_lo; - __le32 rq_first_pbe_addr_hi; - __le16 rq_prod; - __le16 reserved0; -}; - -/* - * FCoE connection offload request 2 $$KEEP_ENDIANNESS$$ - */ -struct fcoe_kwqe_conn_offload2 { - __le16 tx_max_fc_pay_len; - struct fcoe_kwqe_header hdr; - __le32 cq_addr_lo; - __le32 cq_addr_hi; - __le32 xferq_addr_lo; - __le32 xferq_addr_hi; - __le32 conn_db_addr_lo; - __le32 conn_db_addr_hi; - __le32 reserved1; -}; - -/* - * FCoE connection offload request 3 $$KEEP_ENDIANNESS$$ - */ -struct fcoe_kwqe_conn_offload3 { - __le16 vlan_tag; -#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0) -#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0 -#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12) -#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12 -#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13) -#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13 - struct fcoe_kwqe_header hdr; - u8 s_id[3]; - u8 tx_max_conc_seqs_c3; - u8 d_id[3]; - u8 flags; -#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0) -#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0 -#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1) -#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1 -#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2) -#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2 -#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3) -#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3 -#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4) -#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4 -#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5) -#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5 -#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6) -#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6 -#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7) -#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7 - __le32 reserved; - __le32 confq_first_pbe_addr_lo; - __le32 confq_first_pbe_addr_hi; - __le16 tx_total_conc_seqs; - __le16 rx_max_fc_pay_len; - __le16 rx_total_conc_seqs; - u8 rx_max_conc_seqs_c3; - u8 rx_open_seqs_exch_c3; -}; - -/* - * FCoE connection offload request 4 $$KEEP_ENDIANNESS$$ - */ -struct fcoe_kwqe_conn_offload4 { - u8 e_d_tov_timer_val; - u8 reserved2; - struct fcoe_kwqe_header hdr; - u8 src_mac_addr_lo[2]; - u8 src_mac_addr_mid[2]; - u8 src_mac_addr_hi[2]; - u8 dst_mac_addr_hi[2]; - u8 dst_mac_addr_lo[2]; - u8 dst_mac_addr_mid[2]; - __le32 lcq_addr_lo; - __le32 lcq_addr_hi; - __le32 confq_pbl_base_addr_lo; - __le32 confq_pbl_base_addr_hi; -}; - -/* - * FCoE connection enable request $$KEEP_ENDIANNESS$$ - */ -struct fcoe_kwqe_conn_enable_disable { - __le16 reserved0; - struct fcoe_kwqe_header hdr; - u8 src_mac_addr_lo[2]; - u8 src_mac_addr_mid[2]; - u8 src_mac_addr_hi[2]; - u16 vlan_tag; -#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0) -#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0 -#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12) -#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12 -#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13) -#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13 - u8 dst_mac_addr_lo[2]; - u8 dst_mac_addr_mid[2]; - u8 dst_mac_addr_hi[2]; - __le16 reserved1; - u8 s_id[3]; - u8 vlan_flag; - u8 d_id[3]; - u8 reserved3; - __le32 context_id; - __le32 conn_id; - __le32 reserved4; -}; - -/* - * FCoE connection destroy request $$KEEP_ENDIANNESS$$ - */ -struct fcoe_kwqe_conn_destroy { - __le16 reserved0; - struct fcoe_kwqe_header hdr; - __le32 context_id; - __le32 conn_id; - __le32 reserved1[5]; -}; - -/* - * FCoe destroy request $$KEEP_ENDIANNESS$$ - */ -struct fcoe_kwqe_destroy { - __le16 reserved0; - struct fcoe_kwqe_header hdr; - __le32 reserved1[7]; -}; - -/* - * FCoe statistics request $$KEEP_ENDIANNESS$$ - */ -struct fcoe_kwqe_stat { - __le16 reserved0; - struct fcoe_kwqe_header hdr; - __le32 stat_params_addr_lo; - __le32 stat_params_addr_hi; - __le32 reserved1[5]; -}; - -/* - * FCoE KWQ WQE $$KEEP_ENDIANNESS$$ - */ -union fcoe_kwqe { - struct fcoe_kwqe_init1 init1; - struct fcoe_kwqe_init2 init2; - struct fcoe_kwqe_init3 init3; - struct fcoe_kwqe_conn_offload1 conn_offload1; - struct fcoe_kwqe_conn_offload2 conn_offload2; - struct fcoe_kwqe_conn_offload3 conn_offload3; - struct fcoe_kwqe_conn_offload4 conn_offload4; - struct fcoe_kwqe_conn_enable_disable conn_enable_disable; - struct fcoe_kwqe_conn_destroy conn_destroy; - struct fcoe_kwqe_destroy destroy; - struct fcoe_kwqe_stat statistics; -}; - - - - - - - - - - - - - - - - -/* - * TX SGL context $$KEEP_ENDIANNESS$$ - */ -union fcoe_sgl_union_ctx { - struct fcoe_cached_sge_ctx cached_sge; - struct fcoe_ext_mul_sges_ctx sgl; - __le32 opaque[5]; -}; - -/* - * Data-In/ELS/BLS information $$KEEP_ENDIANNESS$$ - */ -struct fcoe_read_flow_info { - union fcoe_sgl_union_ctx sgl_ctx; - __le32 rsrv0[3]; -}; - - -/* - * Fcoe stat context $$KEEP_ENDIANNESS$$ - */ -struct fcoe_s_stat_ctx { - u8 flags; -#define FCOE_S_STAT_CTX_ACTIVE (0x1<<0) -#define FCOE_S_STAT_CTX_ACTIVE_SHIFT 0 -#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND (0x1<<1) -#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND_SHIFT 1 -#define FCOE_S_STAT_CTX_ABTS_PERFORMED (0x1<<2) -#define FCOE_S_STAT_CTX_ABTS_PERFORMED_SHIFT 2 -#define FCOE_S_STAT_CTX_SEQ_TIMEOUT (0x1<<3) -#define FCOE_S_STAT_CTX_SEQ_TIMEOUT_SHIFT 3 -#define FCOE_S_STAT_CTX_P_RJT (0x1<<4) -#define FCOE_S_STAT_CTX_P_RJT_SHIFT 4 -#define FCOE_S_STAT_CTX_ACK_EOFT (0x1<<5) -#define FCOE_S_STAT_CTX_ACK_EOFT_SHIFT 5 -#define FCOE_S_STAT_CTX_RSRV1 (0x3<<6) -#define FCOE_S_STAT_CTX_RSRV1_SHIFT 6 -}; - -/* - * Fcoe rx seq context $$KEEP_ENDIANNESS$$ - */ -struct fcoe_rx_seq_ctx { - u8 seq_id; - struct fcoe_s_stat_ctx s_stat; - __le16 seq_cnt; - __le32 low_exp_ro; - __le32 high_exp_ro; -}; - - -/* - * Fcoe rx_wr union context $$KEEP_ENDIANNESS$$ - */ -union fcoe_rx_wr_union_ctx { - struct fcoe_read_flow_info read_info; - union fcoe_comp_flow_info comp_info; - __le32 opaque[8]; -}; - - - -/* - * FCoE SQ element $$KEEP_ENDIANNESS$$ - */ -struct fcoe_sqe { - __le16 wqe; -#define FCOE_SQE_TASK_ID (0x7FFF<<0) -#define FCOE_SQE_TASK_ID_SHIFT 0 -#define FCOE_SQE_TOGGLE_BIT (0x1<<15) -#define FCOE_SQE_TOGGLE_BIT_SHIFT 15 -}; - - - -/* - * 14 regs $$KEEP_ENDIANNESS$$ - */ -struct fcoe_tce_tx_only { - union fcoe_sgl_union_ctx sgl_ctx; - __le32 rsrv0; -}; - -/* - * 32 bytes (8 regs) used for TX only purposes $$KEEP_ENDIANNESS$$ - */ -union fcoe_tx_wr_rx_rd_union_ctx { - struct fcoe_fc_frame tx_frame; - struct fcoe_fcp_cmd_payload fcp_cmd; - struct fcoe_ext_cleanup_info cleanup; - struct fcoe_ext_abts_info abts; - struct fcoe_ext_fw_tx_seq_ctx tx_seq; - __le32 opaque[8]; -}; - -/* - * tce_tx_wr_rx_rd_const $$KEEP_ENDIANNESS$$ - */ -struct fcoe_tce_tx_wr_rx_rd_const { - u8 init_flags; -#define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE (0x7<<0) -#define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT 0 -#define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE (0x1<<3) -#define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT 3 -#define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE (0x1<<4) -#define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT 4 -#define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE (0x3<<5) -#define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT 5 -#define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV (0x1<<7) -#define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV_SHIFT 7 - u8 tx_flags; -#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID (0x1<<0) -#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID_SHIFT 0 -#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE (0xF<<1) -#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT 1 -#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1 (0x1<<5) -#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1_SHIFT 5 -#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT (0x1<<6) -#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT_SHIFT 6 -#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV2 (0x1<<7) -#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV2_SHIFT 7 - __le16 rsrv3; - __le32 verify_tx_seq; -}; - -/* - * tce_tx_wr_rx_rd $$KEEP_ENDIANNESS$$ - */ -struct fcoe_tce_tx_wr_rx_rd { - union fcoe_tx_wr_rx_rd_union_ctx union_ctx; - struct fcoe_tce_tx_wr_rx_rd_const const_ctx; -}; - -/* - * tce_rx_wr_tx_rd_const $$KEEP_ENDIANNESS$$ - */ -struct fcoe_tce_rx_wr_tx_rd_const { - __le32 data_2_trns; - __le32 init_flags; -#define FCOE_TCE_RX_WR_TX_RD_CONST_CID (0xFFFFFF<<0) -#define FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT 0 -#define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0 (0xFF<<24) -#define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0_SHIFT 24 -}; - -/* - * tce_rx_wr_tx_rd_var $$KEEP_ENDIANNESS$$ - */ -struct fcoe_tce_rx_wr_tx_rd_var { - __le16 rx_flags; -#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1 (0xF<<0) -#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1_SHIFT 0 -#define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE (0x7<<4) -#define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT 4 -#define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ (0x1<<7) -#define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ_SHIFT 7 -#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE (0xF<<8) -#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT 8 -#define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME (0x1<<12) -#define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT 12 -#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT (0x1<<13) -#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT_SHIFT 13 -#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2 (0x1<<14) -#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2_SHIFT 14 -#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID (0x1<<15) -#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID_SHIFT 15 - __le16 rx_id; - struct fcoe_fcp_xfr_rdy_payload fcp_xfr_rdy; -}; - -/* - * tce_rx_wr_tx_rd $$KEEP_ENDIANNESS$$ - */ -struct fcoe_tce_rx_wr_tx_rd { - struct fcoe_tce_rx_wr_tx_rd_const const_ctx; - struct fcoe_tce_rx_wr_tx_rd_var var_ctx; -}; - -/* - * tce_rx_only $$KEEP_ENDIANNESS$$ - */ -struct fcoe_tce_rx_only { - struct fcoe_rx_seq_ctx rx_seq_ctx; - union fcoe_rx_wr_union_ctx union_ctx; -}; - -/* - * task_ctx_entry $$KEEP_ENDIANNESS$$ - */ -struct fcoe_task_ctx_entry { - struct fcoe_tce_tx_only txwr_only; - struct fcoe_tce_tx_wr_rx_rd txwr_rxrd; - struct fcoe_tce_rx_wr_tx_rd rxwr_txrd; - struct fcoe_tce_rx_only rxwr_only; -}; - - - - - - - - - - -/* - * FCoE XFRQ element $$KEEP_ENDIANNESS$$ - */ -struct fcoe_xfrqe { - __le16 wqe; -#define FCOE_XFRQE_TASK_ID (0x7FFF<<0) -#define FCOE_XFRQE_TASK_ID_SHIFT 0 -#define FCOE_XFRQE_TOGGLE_BIT (0x1<<15) -#define FCOE_XFRQE_TOGGLE_BIT_SHIFT 15 -}; - - -/* - * Cached SGEs $$KEEP_ENDIANNESS$$ - */ -struct common_fcoe_sgl { - struct fcoe_bd_ctx sge[3]; -}; - - -/* - * FCoE SQ\XFRQ element - */ -struct fcoe_cached_wqe { - struct fcoe_sqe sqe; - struct fcoe_xfrqe xfrqe; -}; - - -/* - * FCoE connection enable\disable params passed by driver to FW in FCoE enable - * ramrod $$KEEP_ENDIANNESS$$ - */ -struct fcoe_conn_enable_disable_ramrod_params { - struct fcoe_kwqe_conn_enable_disable enable_disable_kwqe; -}; - - -/* - * FCoE connection offload params passed by driver to FW in FCoE offload ramrod - * $$KEEP_ENDIANNESS$$ - */ -struct fcoe_conn_offload_ramrod_params { - struct fcoe_kwqe_conn_offload1 offload_kwqe1; - struct fcoe_kwqe_conn_offload2 offload_kwqe2; - struct fcoe_kwqe_conn_offload3 offload_kwqe3; - struct fcoe_kwqe_conn_offload4 offload_kwqe4; -}; - - -struct ustorm_fcoe_mng_ctx { -#if defined(__BIG_ENDIAN) - u8 mid_seq_proc_flag; - u8 tce_in_cam_flag; - u8 tce_on_ior_flag; - u8 en_cached_tce_flag; -#elif defined(__LITTLE_ENDIAN) - u8 en_cached_tce_flag; - u8 tce_on_ior_flag; - u8 tce_in_cam_flag; - u8 mid_seq_proc_flag; -#endif -#if defined(__BIG_ENDIAN) - u8 tce_cam_addr; - u8 cached_conn_flag; - u16 rsrv0; -#elif defined(__LITTLE_ENDIAN) - u16 rsrv0; - u8 cached_conn_flag; - u8 tce_cam_addr; -#endif -#if defined(__BIG_ENDIAN) - u16 dma_tce_ram_addr; - u16 tce_ram_addr; -#elif defined(__LITTLE_ENDIAN) - u16 tce_ram_addr; - u16 dma_tce_ram_addr; -#endif -#if defined(__BIG_ENDIAN) - u16 ox_id; - u16 wr_done_seq; -#elif defined(__LITTLE_ENDIAN) - u16 wr_done_seq; - u16 ox_id; -#endif - struct regpair task_addr; -}; - -/* - * Parameters initialized during offloaded according to FLOGI/PLOGI/PRLI and - * used in FCoE context section - */ -struct ustorm_fcoe_params { -#if defined(__BIG_ENDIAN) - u16 fcoe_conn_id; - u16 flags; -#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS (0x1<<0) -#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS_SHIFT 0 -#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES (0x1<<1) -#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES_SHIFT 1 -#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT (0x1<<2) -#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT_SHIFT 2 -#define USTORM_FCOE_PARAMS_B_CONF_REQ (0x1<<3) -#define USTORM_FCOE_PARAMS_B_CONF_REQ_SHIFT 3 -#define USTORM_FCOE_PARAMS_B_REC_VALID (0x1<<4) -#define USTORM_FCOE_PARAMS_B_REC_VALID_SHIFT 4 -#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT (0x1<<5) -#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT_SHIFT 5 -#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT (0x1<<6) -#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT_SHIFT 6 -#define USTORM_FCOE_PARAMS_RSRV0 (0x1FF<<7) -#define USTORM_FCOE_PARAMS_RSRV0_SHIFT 7 -#elif defined(__LITTLE_ENDIAN) - u16 flags; -#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS (0x1<<0) -#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS_SHIFT 0 -#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES (0x1<<1) -#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES_SHIFT 1 -#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT (0x1<<2) -#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT_SHIFT 2 -#define USTORM_FCOE_PARAMS_B_CONF_REQ (0x1<<3) -#define USTORM_FCOE_PARAMS_B_CONF_REQ_SHIFT 3 -#define USTORM_FCOE_PARAMS_B_REC_VALID (0x1<<4) -#define USTORM_FCOE_PARAMS_B_REC_VALID_SHIFT 4 -#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT (0x1<<5) -#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT_SHIFT 5 -#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT (0x1<<6) -#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT_SHIFT 6 -#define USTORM_FCOE_PARAMS_RSRV0 (0x1FF<<7) -#define USTORM_FCOE_PARAMS_RSRV0_SHIFT 7 - u16 fcoe_conn_id; -#endif -#if defined(__BIG_ENDIAN) - u8 hc_csdm_byte_en; - u8 func_id; - u8 port_id; - u8 vnic_id; -#elif defined(__LITTLE_ENDIAN) - u8 vnic_id; - u8 port_id; - u8 func_id; - u8 hc_csdm_byte_en; -#endif -#if defined(__BIG_ENDIAN) - u16 rx_total_conc_seqs; - u16 rx_max_fc_pay_len; -#elif defined(__LITTLE_ENDIAN) - u16 rx_max_fc_pay_len; - u16 rx_total_conc_seqs; -#endif -#if defined(__BIG_ENDIAN) - u8 task_pbe_idx_off; - u8 task_in_page_log_size; - u16 rx_max_conc_seqs; -#elif defined(__LITTLE_ENDIAN) - u16 rx_max_conc_seqs; - u8 task_in_page_log_size; - u8 task_pbe_idx_off; -#endif -}; - -/* - * FCoE 16-bits index structure - */ -struct fcoe_idx16_fields { - u16 fields; -#define FCOE_IDX16_FIELDS_IDX (0x7FFF<<0) -#define FCOE_IDX16_FIELDS_IDX_SHIFT 0 -#define FCOE_IDX16_FIELDS_MSB (0x1<<15) -#define FCOE_IDX16_FIELDS_MSB_SHIFT 15 -}; - -/* - * FCoE 16-bits index union - */ -union fcoe_idx16_field_union { - struct fcoe_idx16_fields fields; - u16 val; -}; - -/* - * Parameters required for placement according to SGL - */ -struct ustorm_fcoe_data_place_mng { -#if defined(__BIG_ENDIAN) - u16 sge_off; - u8 num_sges; - u8 sge_idx; -#elif defined(__LITTLE_ENDIAN) - u8 sge_idx; - u8 num_sges; - u16 sge_off; -#endif -}; - -/* - * Parameters required for placement according to SGL - */ -struct ustorm_fcoe_data_place { - struct ustorm_fcoe_data_place_mng cached_mng; - struct fcoe_bd_ctx cached_sge[2]; -}; - -/* - * TX processing shall write and RX processing shall read from this section - */ -union fcoe_u_tce_tx_wr_rx_rd_union { - struct fcoe_abts_info abts; - struct fcoe_cleanup_info cleanup; - struct fcoe_fw_tx_seq_ctx tx_seq_ctx; - u32 opaque[2]; -}; - -/* - * TX processing shall write and RX processing shall read from this section - */ -struct fcoe_u_tce_tx_wr_rx_rd { - union fcoe_u_tce_tx_wr_rx_rd_union union_ctx; - struct fcoe_tce_tx_wr_rx_rd_const const_ctx; -}; - -struct ustorm_fcoe_tce { - struct fcoe_u_tce_tx_wr_rx_rd txwr_rxrd; - struct fcoe_tce_rx_wr_tx_rd rxwr_txrd; - struct fcoe_tce_rx_only rxwr; -}; - -struct ustorm_fcoe_cache_ctx { - u32 rsrv0; - struct ustorm_fcoe_data_place data_place; - struct ustorm_fcoe_tce tce; -}; - -/* - * Ustorm FCoE Storm Context - */ -struct ustorm_fcoe_st_context { - struct ustorm_fcoe_mng_ctx mng_ctx; - struct ustorm_fcoe_params fcoe_params; - struct regpair cq_base_addr; - struct regpair rq_pbl_base; - struct regpair rq_cur_page_addr; - struct regpair confq_pbl_base_addr; - struct regpair conn_db_base; - struct regpair xfrq_base_addr; - struct regpair lcq_base_addr; -#if defined(__BIG_ENDIAN) - union fcoe_idx16_field_union rq_cons; - union fcoe_idx16_field_union rq_prod; -#elif defined(__LITTLE_ENDIAN) - union fcoe_idx16_field_union rq_prod; - union fcoe_idx16_field_union rq_cons; -#endif -#if defined(__BIG_ENDIAN) - u16 xfrq_prod; - u16 cq_cons; -#elif defined(__LITTLE_ENDIAN) - u16 cq_cons; - u16 xfrq_prod; -#endif -#if defined(__BIG_ENDIAN) - u16 lcq_cons; - u16 hc_cram_address; -#elif defined(__LITTLE_ENDIAN) - u16 hc_cram_address; - u16 lcq_cons; -#endif -#if defined(__BIG_ENDIAN) - u16 sq_xfrq_lcq_confq_size; - u16 confq_prod; -#elif defined(__LITTLE_ENDIAN) - u16 confq_prod; - u16 sq_xfrq_lcq_confq_size; -#endif -#if defined(__BIG_ENDIAN) - u8 hc_csdm_agg_int; - u8 rsrv2; - u8 available_rqes; - u8 sp_q_flush_cnt; -#elif defined(__LITTLE_ENDIAN) - u8 sp_q_flush_cnt; - u8 available_rqes; - u8 rsrv2; - u8 hc_csdm_agg_int; -#endif -#if defined(__BIG_ENDIAN) - u16 num_pend_tasks; - u16 pbf_ack_ram_addr; -#elif defined(__LITTLE_ENDIAN) - u16 pbf_ack_ram_addr; - u16 num_pend_tasks; -#endif - struct ustorm_fcoe_cache_ctx cache_ctx; -}; - -/* - * The FCoE non-aggregative context of Tstorm - */ -struct tstorm_fcoe_st_context { - struct regpair reserved0; - struct regpair reserved1; -}; - -/* - * Ethernet context section - */ -struct xstorm_fcoe_eth_context_section { -#if defined(__BIG_ENDIAN) - u8 remote_addr_4; - u8 remote_addr_5; - u8 local_addr_0; - u8 local_addr_1; -#elif defined(__LITTLE_ENDIAN) - u8 local_addr_1; - u8 local_addr_0; - u8 remote_addr_5; - u8 remote_addr_4; -#endif -#if defined(__BIG_ENDIAN) - u8 remote_addr_0; - u8 remote_addr_1; - u8 remote_addr_2; - u8 remote_addr_3; -#elif defined(__LITTLE_ENDIAN) - u8 remote_addr_3; - u8 remote_addr_2; - u8 remote_addr_1; - u8 remote_addr_0; -#endif -#if defined(__BIG_ENDIAN) - u16 reserved_vlan_type; - u16 params; -#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0) -#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0 -#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI (0x1<<12) -#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI_SHIFT 12 -#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13) -#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13 -#elif defined(__LITTLE_ENDIAN) - u16 params; -#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0) -#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0 -#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI (0x1<<12) -#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI_SHIFT 12 -#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13) -#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13 - u16 reserved_vlan_type; -#endif -#if defined(__BIG_ENDIAN) - u8 local_addr_2; - u8 local_addr_3; - u8 local_addr_4; - u8 local_addr_5; -#elif defined(__LITTLE_ENDIAN) - u8 local_addr_5; - u8 local_addr_4; - u8 local_addr_3; - u8 local_addr_2; -#endif -}; - -/* - * Flags used in FCoE context section - 1 byte - */ -struct xstorm_fcoe_context_flags { - u8 flags; -#define XSTORM_FCOE_CONTEXT_FLAGS_B_PROC_Q (0x3<<0) -#define XSTORM_FCOE_CONTEXT_FLAGS_B_PROC_Q_SHIFT 0 -#define XSTORM_FCOE_CONTEXT_FLAGS_B_MID_SEQ (0x1<<2) -#define XSTORM_FCOE_CONTEXT_FLAGS_B_MID_SEQ_SHIFT 2 -#define XSTORM_FCOE_CONTEXT_FLAGS_B_BLOCK_SQ (0x1<<3) -#define XSTORM_FCOE_CONTEXT_FLAGS_B_BLOCK_SQ_SHIFT 3 -#define XSTORM_FCOE_CONTEXT_FLAGS_B_REC_SUPPORT (0x1<<4) -#define XSTORM_FCOE_CONTEXT_FLAGS_B_REC_SUPPORT_SHIFT 4 -#define XSTORM_FCOE_CONTEXT_FLAGS_B_SQ_TOGGLE (0x1<<5) -#define XSTORM_FCOE_CONTEXT_FLAGS_B_SQ_TOGGLE_SHIFT 5 -#define XSTORM_FCOE_CONTEXT_FLAGS_B_XFRQ_TOGGLE (0x1<<6) -#define XSTORM_FCOE_CONTEXT_FLAGS_B_XFRQ_TOGGLE_SHIFT 6 -#define XSTORM_FCOE_CONTEXT_FLAGS_B_VNTAG_VLAN (0x1<<7) -#define XSTORM_FCOE_CONTEXT_FLAGS_B_VNTAG_VLAN_SHIFT 7 -}; - -struct xstorm_fcoe_tce { - struct fcoe_tce_tx_only txwr; - struct fcoe_tce_tx_wr_rx_rd txwr_rxrd; -}; - -/* - * FCP_DATA parameters required for transmission - */ -struct xstorm_fcoe_fcp_data { - u32 io_rem; -#if defined(__BIG_ENDIAN) - u16 cached_sge_off; - u8 cached_num_sges; - u8 cached_sge_idx; -#elif defined(__LITTLE_ENDIAN) - u8 cached_sge_idx; - u8 cached_num_sges; - u16 cached_sge_off; -#endif - u32 buf_addr_hi_0; - u32 buf_addr_lo_0; -#if defined(__BIG_ENDIAN) - u16 num_of_pending_tasks; - u16 buf_len_0; -#elif defined(__LITTLE_ENDIAN) - u16 buf_len_0; - u16 num_of_pending_tasks; -#endif - u32 buf_addr_hi_1; - u32 buf_addr_lo_1; -#if defined(__BIG_ENDIAN) - u16 task_pbe_idx_off; - u16 buf_len_1; -#elif defined(__LITTLE_ENDIAN) - u16 buf_len_1; - u16 task_pbe_idx_off; -#endif - u32 buf_addr_hi_2; - u32 buf_addr_lo_2; -#if defined(__BIG_ENDIAN) - u16 ox_id; - u16 buf_len_2; -#elif defined(__LITTLE_ENDIAN) - u16 buf_len_2; - u16 ox_id; -#endif -}; - -/* - * vlan configuration - */ -struct xstorm_fcoe_vlan_conf { - u8 vlan_conf; -#define XSTORM_FCOE_VLAN_CONF_PRIORITY (0x7<<0) -#define XSTORM_FCOE_VLAN_CONF_PRIORITY_SHIFT 0 -#define XSTORM_FCOE_VLAN_CONF_INNER_VLAN_FLAG (0x1<<3) -#define XSTORM_FCOE_VLAN_CONF_INNER_VLAN_FLAG_SHIFT 3 -#define XSTORM_FCOE_VLAN_CONF_RESERVED (0xF<<4) -#define XSTORM_FCOE_VLAN_CONF_RESERVED_SHIFT 4 -}; - -/* - * FCoE 16-bits vlan structure - */ -struct fcoe_vlan_fields { - u16 fields; -#define FCOE_VLAN_FIELDS_VID (0xFFF<<0) -#define FCOE_VLAN_FIELDS_VID_SHIFT 0 -#define FCOE_VLAN_FIELDS_CLI (0x1<<12) -#define FCOE_VLAN_FIELDS_CLI_SHIFT 12 -#define FCOE_VLAN_FIELDS_PRI (0x7<<13) -#define FCOE_VLAN_FIELDS_PRI_SHIFT 13 -}; - -/* - * FCoE 16-bits vlan union - */ -union fcoe_vlan_field_union { - struct fcoe_vlan_fields fields; - u16 val; -}; - -/* - * FCoE 16-bits vlan, vif union - */ -union fcoe_vlan_vif_field_union { - union fcoe_vlan_field_union vlan; - u16 vif; -}; - -/* - * FCoE context section - */ -struct xstorm_fcoe_context_section { -#if defined(__BIG_ENDIAN) - u8 cs_ctl; - u8 s_id[3]; -#elif defined(__LITTLE_ENDIAN) - u8 s_id[3]; - u8 cs_ctl; -#endif -#if defined(__BIG_ENDIAN) - u8 rctl; - u8 d_id[3]; -#elif defined(__LITTLE_ENDIAN) - u8 d_id[3]; - u8 rctl; -#endif -#if defined(__BIG_ENDIAN) - u16 sq_xfrq_lcq_confq_size; - u16 tx_max_fc_pay_len; -#elif defined(__LITTLE_ENDIAN) - u16 tx_max_fc_pay_len; - u16 sq_xfrq_lcq_confq_size; -#endif - u32 lcq_prod; -#if defined(__BIG_ENDIAN) - u8 port_id; - u8 func_id; - u8 seq_id; - struct xstorm_fcoe_context_flags tx_flags; -#elif defined(__LITTLE_ENDIAN) - struct xstorm_fcoe_context_flags tx_flags; - u8 seq_id; - u8 func_id; - u8 port_id; -#endif -#if defined(__BIG_ENDIAN) - u16 mtu; - u8 func_mode; - u8 vnic_id; -#elif defined(__LITTLE_ENDIAN) - u8 vnic_id; - u8 func_mode; - u16 mtu; -#endif - struct regpair confq_curr_page_addr; - struct fcoe_cached_wqe cached_wqe[8]; - struct regpair lcq_base_addr; - struct xstorm_fcoe_tce tce; - struct xstorm_fcoe_fcp_data fcp_data; -#if defined(__BIG_ENDIAN) - u8 tx_max_conc_seqs_c3; - u8 vlan_flag; - u8 dcb_val; - u8 data_pb_cmd_size; -#elif defined(__LITTLE_ENDIAN) - u8 data_pb_cmd_size; - u8 dcb_val; - u8 vlan_flag; - u8 tx_max_conc_seqs_c3; -#endif -#if defined(__BIG_ENDIAN) - u16 fcoe_tx_stat_params_ram_addr; - u16 fcoe_tx_fc_seq_ram_addr; -#elif defined(__LITTLE_ENDIAN) - u16 fcoe_tx_fc_seq_ram_addr; - u16 fcoe_tx_stat_params_ram_addr; -#endif -#if defined(__BIG_ENDIAN) - u8 fcp_cmd_line_credit; - u8 eth_hdr_size; - u16 pbf_addr; -#elif defined(__LITTLE_ENDIAN) - u16 pbf_addr; - u8 eth_hdr_size; - u8 fcp_cmd_line_credit; -#endif -#if defined(__BIG_ENDIAN) - union fcoe_vlan_vif_field_union multi_func_val; - u8 page_log_size; - struct xstorm_fcoe_vlan_conf orig_vlan_conf; -#elif defined(__LITTLE_ENDIAN) - struct xstorm_fcoe_vlan_conf orig_vlan_conf; - u8 page_log_size; - union fcoe_vlan_vif_field_union multi_func_val; -#endif -#if defined(__BIG_ENDIAN) - u16 fcp_cmd_frame_size; - u16 pbf_addr_ff; -#elif defined(__LITTLE_ENDIAN) - u16 pbf_addr_ff; - u16 fcp_cmd_frame_size; -#endif -#if defined(__BIG_ENDIAN) - u8 vlan_num; - u8 cos; - u8 cache_xfrq_cons; - u8 cache_sq_cons; -#elif defined(__LITTLE_ENDIAN) - u8 cache_sq_cons; - u8 cache_xfrq_cons; - u8 cos; - u8 vlan_num; -#endif - u32 verify_tx_seq; -}; - -/* - * Xstorm FCoE Storm Context - */ -struct xstorm_fcoe_st_context { - struct xstorm_fcoe_eth_context_section eth; - struct xstorm_fcoe_context_section fcoe; -}; - -/* - * Fcoe connection context - */ -struct fcoe_context { - struct ustorm_fcoe_st_context ustorm_st_context; - struct tstorm_fcoe_st_context tstorm_st_context; - struct xstorm_fcoe_ag_context xstorm_ag_context; - struct tstorm_fcoe_ag_context tstorm_ag_context; - struct ustorm_fcoe_ag_context ustorm_ag_context; - struct timers_block_context timers_context; - struct xstorm_fcoe_st_context xstorm_st_context; -}; - -/* - * FCoE init params passed by driver to FW in FCoE init ramrod - * $$KEEP_ENDIANNESS$$ - */ -struct fcoe_init_ramrod_params { - struct fcoe_kwqe_init1 init_kwqe1; - struct fcoe_kwqe_init2 init_kwqe2; - struct fcoe_kwqe_init3 init_kwqe3; - struct regpair eq_pbl_base; - __le32 eq_pbl_size; - __le32 reserved2; - __le16 eq_prod; - __le16 sb_num; - u8 sb_id; - u8 reserved0; - __le16 reserved1; -}; - -/* - * FCoE statistics params buffer passed by driver to FW in FCoE statistics - * ramrod $$KEEP_ENDIANNESS$$ - */ -struct fcoe_stat_ramrod_params { - struct fcoe_kwqe_stat stat_kwqe; -}; - -/* - * CQ DB CQ producer and pending completion counter - */ -struct iscsi_cq_db_prod_pnd_cmpltn_cnt { -#if defined(__BIG_ENDIAN) - u16 cntr; - u16 prod; -#elif defined(__LITTLE_ENDIAN) - u16 prod; - u16 cntr; -#endif -}; - -/* - * CQ DB pending completion ITT array - */ -struct iscsi_cq_db_prod_pnd_cmpltn_cnt_arr { - struct iscsi_cq_db_prod_pnd_cmpltn_cnt prod_pend_comp[8]; -}; - -/* - * Cstorm CQ sequence to notify array, updated by driver - */ -struct iscsi_cq_db_sqn_2_notify_arr { - u16 sqn[8]; -}; - -/* - * Cstorm iSCSI Storm Context - */ -struct cstorm_iscsi_st_context { - struct iscsi_cq_db_prod_pnd_cmpltn_cnt_arr cq_c_prod_pend_comp_ctr_arr; - struct iscsi_cq_db_sqn_2_notify_arr cq_c_prod_sqn_arr; - struct iscsi_cq_db_sqn_2_notify_arr cq_c_sqn_2_notify_arr; - struct regpair hq_pbl_base; - struct regpair hq_curr_pbe; - struct regpair task_pbl_base; - struct regpair cq_db_base; -#if defined(__BIG_ENDIAN) - u16 hq_bd_itt; - u16 iscsi_conn_id; -#elif defined(__LITTLE_ENDIAN) - u16 iscsi_conn_id; - u16 hq_bd_itt; -#endif - u32 hq_bd_data_segment_len; - u32 hq_bd_buffer_offset; -#if defined(__BIG_ENDIAN) - u8 rsrv; - u8 cq_proc_en_bit_map; - u8 cq_pend_comp_itt_valid_bit_map; - u8 hq_bd_opcode; -#elif defined(__LITTLE_ENDIAN) - u8 hq_bd_opcode; - u8 cq_pend_comp_itt_valid_bit_map; - u8 cq_proc_en_bit_map; - u8 rsrv; -#endif - u32 hq_tcp_seq; -#if defined(__BIG_ENDIAN) - u16 flags; -#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN (0x1<<0) -#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN_SHIFT 0 -#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN (0x1<<1) -#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN_SHIFT 1 -#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID (0x1<<2) -#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID_SHIFT 2 -#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG (0x1<<3) -#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG_SHIFT 3 -#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK (0x1<<4) -#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK_SHIFT 4 -#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV (0x7FF<<5) -#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV_SHIFT 5 - u16 hq_cons; -#elif defined(__LITTLE_ENDIAN) - u16 hq_cons; - u16 flags; -#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN (0x1<<0) -#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN_SHIFT 0 -#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN (0x1<<1) -#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN_SHIFT 1 -#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID (0x1<<2) -#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID_SHIFT 2 -#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG (0x1<<3) -#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG_SHIFT 3 -#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK (0x1<<4) -#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK_SHIFT 4 -#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV (0x7FF<<5) -#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV_SHIFT 5 -#endif - struct regpair rsrv1; -}; - - -/* - * SCSI read/write SQ WQE - */ -struct iscsi_cmd_pdu_hdr_little_endian { -#if defined(__BIG_ENDIAN) - u8 opcode; - u8 op_attr; -#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES (0x7<<0) -#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES_SHIFT 0 -#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x3<<3) -#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 3 -#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG (0x1<<5) -#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG_SHIFT 5 -#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG (0x1<<6) -#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG_SHIFT 6 -#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7) -#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7 - u16 rsrv0; -#elif defined(__LITTLE_ENDIAN) - u16 rsrv0; - u8 op_attr; -#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES (0x7<<0) -#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES_SHIFT 0 -#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x3<<3) -#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 3 -#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG (0x1<<5) -#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG_SHIFT 5 -#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG (0x1<<6) -#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG_SHIFT 6 -#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7) -#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7 - u8 opcode; -#endif - u32 data_fields; -#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0) -#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0 -#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24) -#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24 - struct regpair lun; - u32 itt; - u32 expected_data_transfer_length; - u32 cmd_sn; - u32 exp_stat_sn; - u32 scsi_command_block[4]; -}; - - -/* - * Buffer per connection, used in Tstorm - */ -struct iscsi_conn_buf { - struct regpair reserved[8]; -}; - - -/* - * iSCSI context region, used only in iSCSI - */ -struct ustorm_iscsi_rq_db { - struct regpair pbl_base; - struct regpair curr_pbe; -}; - -/* - * iSCSI context region, used only in iSCSI - */ -struct ustorm_iscsi_r2tq_db { - struct regpair pbl_base; - struct regpair curr_pbe; -}; - -/* - * iSCSI context region, used only in iSCSI - */ -struct ustorm_iscsi_cq_db { -#if defined(__BIG_ENDIAN) - u16 cq_sn; - u16 prod; -#elif defined(__LITTLE_ENDIAN) - u16 prod; - u16 cq_sn; -#endif - struct regpair curr_pbe; -}; - -/* - * iSCSI context region, used only in iSCSI - */ -struct rings_db { - struct ustorm_iscsi_rq_db rq; - struct ustorm_iscsi_r2tq_db r2tq; - struct ustorm_iscsi_cq_db cq[8]; -#if defined(__BIG_ENDIAN) - u16 rq_prod; - u16 r2tq_prod; -#elif defined(__LITTLE_ENDIAN) - u16 r2tq_prod; - u16 rq_prod; -#endif - struct regpair cq_pbl_base; -}; - -/* - * iSCSI context region, used only in iSCSI - */ -struct ustorm_iscsi_placement_db { - u32 sgl_base_lo; - u32 sgl_base_hi; - u32 local_sge_0_address_hi; - u32 local_sge_0_address_lo; -#if defined(__BIG_ENDIAN) - u16 curr_sge_offset; - u16 local_sge_0_size; -#elif defined(__LITTLE_ENDIAN) - u16 local_sge_0_size; - u16 curr_sge_offset; -#endif - u32 local_sge_1_address_hi; - u32 local_sge_1_address_lo; -#if defined(__BIG_ENDIAN) - u8 exp_padding_2b; - u8 nal_len_3b; - u16 local_sge_1_size; -#elif defined(__LITTLE_ENDIAN) - u16 local_sge_1_size; - u8 nal_len_3b; - u8 exp_padding_2b; -#endif -#if defined(__BIG_ENDIAN) - u8 sgl_size; - u8 local_sge_index_2b; - u16 reserved7; -#elif defined(__LITTLE_ENDIAN) - u16 reserved7; - u8 local_sge_index_2b; - u8 sgl_size; -#endif - u32 rem_pdu; - u32 place_db_bitfield_1; -#define USTORM_ISCSI_PLACEMENT_DB_REM_PDU_PAYLOAD (0xFFFFFF<<0) -#define USTORM_ISCSI_PLACEMENT_DB_REM_PDU_PAYLOAD_SHIFT 0 -#define USTORM_ISCSI_PLACEMENT_DB_CQ_ID (0xFF<<24) -#define USTORM_ISCSI_PLACEMENT_DB_CQ_ID_SHIFT 24 - u32 place_db_bitfield_2; -#define USTORM_ISCSI_PLACEMENT_DB_BYTES_2_TRUNCATE (0xFFFFFF<<0) -#define USTORM_ISCSI_PLACEMENT_DB_BYTES_2_TRUNCATE_SHIFT 0 -#define USTORM_ISCSI_PLACEMENT_DB_HOST_SGE_INDEX (0xFF<<24) -#define USTORM_ISCSI_PLACEMENT_DB_HOST_SGE_INDEX_SHIFT 24 - u32 nal; -#define USTORM_ISCSI_PLACEMENT_DB_REM_SGE_SIZE (0xFFFFFF<<0) -#define USTORM_ISCSI_PLACEMENT_DB_REM_SGE_SIZE_SHIFT 0 -#define USTORM_ISCSI_PLACEMENT_DB_EXP_DIGEST_3B (0xFF<<24) -#define USTORM_ISCSI_PLACEMENT_DB_EXP_DIGEST_3B_SHIFT 24 -}; - -/* - * Ustorm iSCSI Storm Context - */ -struct ustorm_iscsi_st_context { - u32 exp_stat_sn; - u32 exp_data_sn; - struct rings_db ring; - struct regpair task_pbl_base; - struct regpair tce_phy_addr; - struct ustorm_iscsi_placement_db place_db; - u32 reserved8; - u32 rem_rcv_len; -#if defined(__BIG_ENDIAN) - u16 hdr_itt; - u16 iscsi_conn_id; -#elif defined(__LITTLE_ENDIAN) - u16 iscsi_conn_id; - u16 hdr_itt; -#endif - u32 nal_bytes; -#if defined(__BIG_ENDIAN) - u8 hdr_second_byte_union; - u8 bitfield_0; -#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU (0x1<<0) -#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0 -#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1) -#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1 -#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC (0x1<<2) -#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC_SHIFT 2 -#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x1F<<3) -#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 3 - u8 task_pdu_cache_index; - u8 task_pbe_cache_index; -#elif defined(__LITTLE_ENDIAN) - u8 task_pbe_cache_index; - u8 task_pdu_cache_index; - u8 bitfield_0; -#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU (0x1<<0) -#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0 -#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1) -#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1 -#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC (0x1<<2) -#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC_SHIFT 2 -#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x1F<<3) -#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 3 - u8 hdr_second_byte_union; -#endif -#if defined(__BIG_ENDIAN) - u16 reserved3; - u8 reserved2; - u8 acDecrement; -#elif defined(__LITTLE_ENDIAN) - u8 acDecrement; - u8 reserved2; - u16 reserved3; -#endif - u32 task_stat; -#if defined(__BIG_ENDIAN) - u8 hdr_opcode; - u8 num_cqs; - u16 reserved5; -#elif defined(__LITTLE_ENDIAN) - u16 reserved5; - u8 num_cqs; - u8 hdr_opcode; -#endif - u32 negotiated_rx; -#define USTORM_ISCSI_ST_CONTEXT_MAX_RECV_PDU_LENGTH (0xFFFFFF<<0) -#define USTORM_ISCSI_ST_CONTEXT_MAX_RECV_PDU_LENGTH_SHIFT 0 -#define USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS (0xFF<<24) -#define USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT 24 - u32 negotiated_rx_and_flags; -#define USTORM_ISCSI_ST_CONTEXT_MAX_BURST_LENGTH (0xFFFFFF<<0) -#define USTORM_ISCSI_ST_CONTEXT_MAX_BURST_LENGTH_SHIFT 0 -#define USTORM_ISCSI_ST_CONTEXT_B_CQE_POSTED_OR_HEADER_CACHED (0x1<<24) -#define USTORM_ISCSI_ST_CONTEXT_B_CQE_POSTED_OR_HEADER_CACHED_SHIFT 24 -#define USTORM_ISCSI_ST_CONTEXT_B_HDR_DIGEST_EN (0x1<<25) -#define USTORM_ISCSI_ST_CONTEXT_B_HDR_DIGEST_EN_SHIFT 25 -#define USTORM_ISCSI_ST_CONTEXT_B_DATA_DIGEST_EN (0x1<<26) -#define USTORM_ISCSI_ST_CONTEXT_B_DATA_DIGEST_EN_SHIFT 26 -#define USTORM_ISCSI_ST_CONTEXT_B_PROTOCOL_ERROR (0x1<<27) -#define USTORM_ISCSI_ST_CONTEXT_B_PROTOCOL_ERROR_SHIFT 27 -#define USTORM_ISCSI_ST_CONTEXT_B_TASK_VALID (0x1<<28) -#define USTORM_ISCSI_ST_CONTEXT_B_TASK_VALID_SHIFT 28 -#define USTORM_ISCSI_ST_CONTEXT_TASK_TYPE (0x3<<29) -#define USTORM_ISCSI_ST_CONTEXT_TASK_TYPE_SHIFT 29 -#define USTORM_ISCSI_ST_CONTEXT_B_ALL_DATA_ACKED (0x1<<31) -#define USTORM_ISCSI_ST_CONTEXT_B_ALL_DATA_ACKED_SHIFT 31 -}; - -/* - * TCP context region, shared in TOE, RDMA and ISCSI - */ -struct tstorm_tcp_st_context_section { - u32 flags1; -#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT (0xFFFFFF<<0) -#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_SHIFT 0 -#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID (0x1<<24) -#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID_SHIFT 24 -#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS (0x1<<25) -#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS_SHIFT 25 -#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED0 (0x1<<26) -#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED0_SHIFT 26 -#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD (0x1<<27) -#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD_SHIFT 27 -#define TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED (0x1<<28) -#define TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED_SHIFT 28 -#define TSTORM_TCP_ST_CONTEXT_SECTION_FIRST_RTO_ESTIMATE (0x1<<29) -#define TSTORM_TCP_ST_CONTEXT_SECTION_FIRST_RTO_ESTIMATE_SHIFT 29 -#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN (0x1<<30) -#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN_SHIFT 30 -#define TSTORM_TCP_ST_CONTEXT_SECTION_LAST_ISLE_HAS_FIN (0x1<<31) -#define TSTORM_TCP_ST_CONTEXT_SECTION_LAST_ISLE_HAS_FIN_SHIFT 31 - u32 flags2; -#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION (0xFFFFFF<<0) -#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_SHIFT 0 -#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN (0x1<<24) -#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN_SHIFT 24 -#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN (0x1<<25) -#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN_SHIFT 25 -#define __TSTORM_TCP_ST_CONTEXT_SECTION_KA_PROBE_SENT (0x1<<26) -#define __TSTORM_TCP_ST_CONTEXT_SECTION_KA_PROBE_SENT_SHIFT 26 -#define __TSTORM_TCP_ST_CONTEXT_SECTION_PERSIST_PROBE_SENT (0x1<<27) -#define __TSTORM_TCP_ST_CONTEXT_SECTION_PERSIST_PROBE_SENT_SHIFT 27 -#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<28) -#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 28 -#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<29) -#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 29 -#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_RST_ATTACK (0x1<<30) -#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_RST_ATTACK_SHIFT 30 -#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_SYN_ATTACK (0x1<<31) -#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_SYN_ATTACK_SHIFT 31 -#if defined(__BIG_ENDIAN) - u16 mss; - u8 tcp_sm_state; - u8 rto_exp; -#elif defined(__LITTLE_ENDIAN) - u8 rto_exp; - u8 tcp_sm_state; - u16 mss; -#endif - u32 rcv_nxt; - u32 timestamp_recent; - u32 timestamp_recent_time; - u32 cwnd; - u32 ss_thresh; - u32 cwnd_accum; - u32 prev_seg_seq; - u32 expected_rel_seq; - u32 recover; -#if defined(__BIG_ENDIAN) - u8 retransmit_count; - u8 ka_max_probe_count; - u8 persist_probe_count; - u8 ka_probe_count; -#elif defined(__LITTLE_ENDIAN) - u8 ka_probe_count; - u8 persist_probe_count; - u8 ka_max_probe_count; - u8 retransmit_count; -#endif -#if defined(__BIG_ENDIAN) - u8 statistics_counter_id; - u8 ooo_support_mode; - u8 snd_wnd_scale; - u8 dup_ack_count; -#elif defined(__LITTLE_ENDIAN) - u8 dup_ack_count; - u8 snd_wnd_scale; - u8 ooo_support_mode; - u8 statistics_counter_id; -#endif - u32 retransmit_start_time; - u32 ka_timeout; - u32 ka_interval; - u32 isle_start_seq; - u32 isle_end_seq; -#if defined(__BIG_ENDIAN) - u16 second_isle_address; - u16 recent_seg_wnd; -#elif defined(__LITTLE_ENDIAN) - u16 recent_seg_wnd; - u16 second_isle_address; -#endif -#if defined(__BIG_ENDIAN) - u8 max_isles_ever_happened; - u8 isles_number; - u16 last_isle_address; -#elif defined(__LITTLE_ENDIAN) - u16 last_isle_address; - u8 isles_number; - u8 max_isles_ever_happened; -#endif - u32 max_rt_time; -#if defined(__BIG_ENDIAN) - u16 lsb_mac_address; - u16 vlan_id; -#elif defined(__LITTLE_ENDIAN) - u16 vlan_id; - u16 lsb_mac_address; -#endif -#if defined(__BIG_ENDIAN) - u16 msb_mac_address; - u16 mid_mac_address; -#elif defined(__LITTLE_ENDIAN) - u16 mid_mac_address; - u16 msb_mac_address; -#endif - u32 rightmost_received_seq; -}; - -/* - * Termination variables - */ -struct iscsi_term_vars { - u8 BitMap; -#define ISCSI_TERM_VARS_TCP_STATE (0xF<<0) -#define ISCSI_TERM_VARS_TCP_STATE_SHIFT 0 -#define ISCSI_TERM_VARS_FIN_RECEIVED_SBIT (0x1<<4) -#define ISCSI_TERM_VARS_FIN_RECEIVED_SBIT_SHIFT 4 -#define ISCSI_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT (0x1<<5) -#define ISCSI_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT_SHIFT 5 -#define ISCSI_TERM_VARS_TERM_ON_CHIP (0x1<<6) -#define ISCSI_TERM_VARS_TERM_ON_CHIP_SHIFT 6 -#define ISCSI_TERM_VARS_RSRV (0x1<<7) -#define ISCSI_TERM_VARS_RSRV_SHIFT 7 -}; - -/* - * iSCSI context region, used only in iSCSI - */ -struct tstorm_iscsi_st_context_section { - u32 nalPayload; - u32 b2nh; -#if defined(__BIG_ENDIAN) - u16 rq_cons; - u8 flags; -#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN (0x1<<0) -#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN_SHIFT 0 -#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN (0x1<<1) -#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN_SHIFT 1 -#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER (0x1<<2) -#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER_SHIFT 2 -#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE (0x1<<3) -#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE_SHIFT 3 -#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS (0x1<<4) -#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS_SHIFT 4 -#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN (0x3<<5) -#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN_SHIFT 5 -#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0 (0x1<<7) -#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0_SHIFT 7 - u8 hdr_bytes_2_fetch; -#elif defined(__LITTLE_ENDIAN) - u8 hdr_bytes_2_fetch; - u8 flags; -#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN (0x1<<0) -#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN_SHIFT 0 -#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN (0x1<<1) -#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN_SHIFT 1 -#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER (0x1<<2) -#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER_SHIFT 2 -#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE (0x1<<3) -#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE_SHIFT 3 -#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS (0x1<<4) -#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS_SHIFT 4 -#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN (0x3<<5) -#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN_SHIFT 5 -#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0 (0x1<<7) -#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0_SHIFT 7 - u16 rq_cons; -#endif - struct regpair rq_db_phy_addr; -#if defined(__BIG_ENDIAN) - struct iscsi_term_vars term_vars; - u8 rsrv1; - u16 iscsi_conn_id; -#elif defined(__LITTLE_ENDIAN) - u16 iscsi_conn_id; - u8 rsrv1; - struct iscsi_term_vars term_vars; -#endif - u32 process_nxt; -}; - -/* - * The iSCSI non-aggregative context of Tstorm - */ -struct tstorm_iscsi_st_context { - struct tstorm_tcp_st_context_section tcp; - struct tstorm_iscsi_st_context_section iscsi; -}; - -/* - * Ethernet context section, shared in TOE, RDMA and ISCSI - */ -struct xstorm_eth_context_section { -#if defined(__BIG_ENDIAN) - u8 remote_addr_4; - u8 remote_addr_5; - u8 local_addr_0; - u8 local_addr_1; -#elif defined(__LITTLE_ENDIAN) - u8 local_addr_1; - u8 local_addr_0; - u8 remote_addr_5; - u8 remote_addr_4; -#endif -#if defined(__BIG_ENDIAN) - u8 remote_addr_0; - u8 remote_addr_1; - u8 remote_addr_2; - u8 remote_addr_3; -#elif defined(__LITTLE_ENDIAN) - u8 remote_addr_3; - u8 remote_addr_2; - u8 remote_addr_1; - u8 remote_addr_0; -#endif -#if defined(__BIG_ENDIAN) - u16 reserved_vlan_type; - u16 params; -#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0) -#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0 -#define XSTORM_ETH_CONTEXT_SECTION_CFI (0x1<<12) -#define XSTORM_ETH_CONTEXT_SECTION_CFI_SHIFT 12 -#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13) -#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13 -#elif defined(__LITTLE_ENDIAN) - u16 params; -#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0) -#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0 -#define XSTORM_ETH_CONTEXT_SECTION_CFI (0x1<<12) -#define XSTORM_ETH_CONTEXT_SECTION_CFI_SHIFT 12 -#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13) -#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13 - u16 reserved_vlan_type; -#endif -#if defined(__BIG_ENDIAN) - u8 local_addr_2; - u8 local_addr_3; - u8 local_addr_4; - u8 local_addr_5; -#elif defined(__LITTLE_ENDIAN) - u8 local_addr_5; - u8 local_addr_4; - u8 local_addr_3; - u8 local_addr_2; -#endif -}; - -/* - * IpV4 context section, shared in TOE, RDMA and ISCSI - */ -struct xstorm_ip_v4_context_section { -#if defined(__BIG_ENDIAN) - u16 __pbf_hdr_cmd_rsvd_id; - u16 __pbf_hdr_cmd_rsvd_flags_offset; -#elif defined(__LITTLE_ENDIAN) - u16 __pbf_hdr_cmd_rsvd_flags_offset; - u16 __pbf_hdr_cmd_rsvd_id; -#endif -#if defined(__BIG_ENDIAN) - u8 __pbf_hdr_cmd_rsvd_ver_ihl; - u8 tos; - u16 __pbf_hdr_cmd_rsvd_length; -#elif defined(__LITTLE_ENDIAN) - u16 __pbf_hdr_cmd_rsvd_length; - u8 tos; - u8 __pbf_hdr_cmd_rsvd_ver_ihl; -#endif - u32 ip_local_addr; -#if defined(__BIG_ENDIAN) - u8 ttl; - u8 __pbf_hdr_cmd_rsvd_protocol; - u16 __pbf_hdr_cmd_rsvd_csum; -#elif defined(__LITTLE_ENDIAN) - u16 __pbf_hdr_cmd_rsvd_csum; - u8 __pbf_hdr_cmd_rsvd_protocol; - u8 ttl; -#endif - u32 __pbf_hdr_cmd_rsvd_1; - u32 ip_remote_addr; -}; - -/* - * context section, shared in TOE, RDMA and ISCSI - */ -struct xstorm_padded_ip_v4_context_section { - struct xstorm_ip_v4_context_section ip_v4; - u32 reserved1[4]; -}; - -/* - * IpV6 context section, shared in TOE, RDMA and ISCSI - */ -struct xstorm_ip_v6_context_section { -#if defined(__BIG_ENDIAN) - u16 pbf_hdr_cmd_rsvd_payload_len; - u8 pbf_hdr_cmd_rsvd_nxt_hdr; - u8 hop_limit; -#elif defined(__LITTLE_ENDIAN) - u8 hop_limit; - u8 pbf_hdr_cmd_rsvd_nxt_hdr; - u16 pbf_hdr_cmd_rsvd_payload_len; -#endif - u32 priority_flow_label; -#define XSTORM_IP_V6_CONTEXT_SECTION_FLOW_LABEL (0xFFFFF<<0) -#define XSTORM_IP_V6_CONTEXT_SECTION_FLOW_LABEL_SHIFT 0 -#define XSTORM_IP_V6_CONTEXT_SECTION_TRAFFIC_CLASS (0xFF<<20) -#define XSTORM_IP_V6_CONTEXT_SECTION_TRAFFIC_CLASS_SHIFT 20 -#define XSTORM_IP_V6_CONTEXT_SECTION_PBF_HDR_CMD_RSVD_VER (0xF<<28) -#define XSTORM_IP_V6_CONTEXT_SECTION_PBF_HDR_CMD_RSVD_VER_SHIFT 28 - u32 ip_local_addr_lo_hi; - u32 ip_local_addr_lo_lo; - u32 ip_local_addr_hi_hi; - u32 ip_local_addr_hi_lo; - u32 ip_remote_addr_lo_hi; - u32 ip_remote_addr_lo_lo; - u32 ip_remote_addr_hi_hi; - u32 ip_remote_addr_hi_lo; -}; - -union xstorm_ip_context_section_types { - struct xstorm_padded_ip_v4_context_section padded_ip_v4; - struct xstorm_ip_v6_context_section ip_v6; -}; - -/* - * TCP context section, shared in TOE, RDMA and ISCSI - */ -struct xstorm_tcp_context_section { - u32 snd_max; -#if defined(__BIG_ENDIAN) - u16 remote_port; - u16 local_port; -#elif defined(__LITTLE_ENDIAN) - u16 local_port; - u16 remote_port; -#endif -#if defined(__BIG_ENDIAN) - u8 original_nagle_1b; - u8 ts_enabled; - u16 tcp_params; -#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE (0xFF<<0) -#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE_SHIFT 0 -#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT (0x1<<8) -#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT_SHIFT 8 -#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED (0x1<<9) -#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9 -#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10) -#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10 -#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV (0x1<<11) -#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV_SHIFT 11 -#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12) -#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12 -#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13) -#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED_SHIFT 13 -#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER (0x3<<14) -#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER_SHIFT 14 -#elif defined(__LITTLE_ENDIAN) - u16 tcp_params; -#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE (0xFF<<0) -#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE_SHIFT 0 -#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT (0x1<<8) -#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT_SHIFT 8 -#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED (0x1<<9) -#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9 -#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10) -#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10 -#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV (0x1<<11) -#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV_SHIFT 11 -#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12) -#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12 -#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13) -#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED_SHIFT 13 -#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER (0x3<<14) -#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER_SHIFT 14 - u8 ts_enabled; - u8 original_nagle_1b; -#endif -#if defined(__BIG_ENDIAN) - u16 pseudo_csum; - u16 window_scaling_factor; -#elif defined(__LITTLE_ENDIAN) - u16 window_scaling_factor; - u16 pseudo_csum; -#endif -#if defined(__BIG_ENDIAN) - u16 reserved2; - u8 statistics_counter_id; - u8 statistics_params; -#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<0) -#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 0 -#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<1) -#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1 -#define XSTORM_TCP_CONTEXT_SECTION_RESERVED (0x3F<<2) -#define XSTORM_TCP_CONTEXT_SECTION_RESERVED_SHIFT 2 -#elif defined(__LITTLE_ENDIAN) - u8 statistics_params; -#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<0) -#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 0 -#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<1) -#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1 -#define XSTORM_TCP_CONTEXT_SECTION_RESERVED (0x3F<<2) -#define XSTORM_TCP_CONTEXT_SECTION_RESERVED_SHIFT 2 - u8 statistics_counter_id; - u16 reserved2; -#endif - u32 ts_time_diff; - u32 __next_timer_expir; -}; - -/* - * Common context section, shared in TOE, RDMA and ISCSI - */ -struct xstorm_common_context_section { - struct xstorm_eth_context_section ethernet; - union xstorm_ip_context_section_types ip_union; - struct xstorm_tcp_context_section tcp; -#if defined(__BIG_ENDIAN) - u8 __dcb_val; - u8 flags; -#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED (0x1<<0) -#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT 0 -#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT (0x7<<1) -#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT 1 -#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE (0x1<<4) -#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE_SHIFT 4 -#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY (0x7<<5) -#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY_SHIFT 5 - u8 reserved; - u8 ip_version_1b; -#elif defined(__LITTLE_ENDIAN) - u8 ip_version_1b; - u8 reserved; - u8 flags; -#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED (0x1<<0) -#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT 0 -#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT (0x7<<1) -#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT 1 -#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE (0x1<<4) -#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE_SHIFT 4 -#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY (0x7<<5) -#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY_SHIFT 5 - u8 __dcb_val; -#endif -}; - -/* - * Flags used in ISCSI context section - */ -struct xstorm_iscsi_context_flags { - u8 flags; -#define XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA (0x1<<0) -#define XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA_SHIFT 0 -#define XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T (0x1<<1) -#define XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T_SHIFT 1 -#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_HEADER_DIGEST (0x1<<2) -#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_HEADER_DIGEST_SHIFT 2 -#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_DATA_DIGEST (0x1<<3) -#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_DATA_DIGEST_SHIFT 3 -#define XSTORM_ISCSI_CONTEXT_FLAGS_B_HQ_BD_WRITTEN (0x1<<4) -#define XSTORM_ISCSI_CONTEXT_FLAGS_B_HQ_BD_WRITTEN_SHIFT 4 -#define XSTORM_ISCSI_CONTEXT_FLAGS_B_LAST_OP_SQ (0x1<<5) -#define XSTORM_ISCSI_CONTEXT_FLAGS_B_LAST_OP_SQ_SHIFT 5 -#define XSTORM_ISCSI_CONTEXT_FLAGS_B_UPDATE_SND_NXT (0x1<<6) -#define XSTORM_ISCSI_CONTEXT_FLAGS_B_UPDATE_SND_NXT_SHIFT 6 -#define XSTORM_ISCSI_CONTEXT_FLAGS_RESERVED4 (0x1<<7) -#define XSTORM_ISCSI_CONTEXT_FLAGS_RESERVED4_SHIFT 7 -}; - -struct iscsi_task_context_entry_x { - u32 data_out_buffer_offset; - u32 itt; - u32 data_sn; -}; - -struct iscsi_task_context_entry_xuc_x_write_only { - u32 tx_r2t_sn; -}; - -struct iscsi_task_context_entry_xuc_xu_write_both { - u32 sgl_base_lo; - u32 sgl_base_hi; -#if defined(__BIG_ENDIAN) - u8 sgl_size; - u8 sge_index; - u16 sge_offset; -#elif defined(__LITTLE_ENDIAN) - u16 sge_offset; - u8 sge_index; - u8 sgl_size; -#endif -}; - -/* - * iSCSI context section - */ -struct xstorm_iscsi_context_section { - u32 first_burst_length; - u32 max_send_pdu_length; - struct regpair sq_pbl_base; - struct regpair sq_curr_pbe; - struct regpair hq_pbl_base; - struct regpair hq_curr_pbe_base; - struct regpair r2tq_pbl_base; - struct regpair r2tq_curr_pbe_base; - struct regpair task_pbl_base; -#if defined(__BIG_ENDIAN) - u16 data_out_count; - struct xstorm_iscsi_context_flags flags; - u8 task_pbl_cache_idx; -#elif defined(__LITTLE_ENDIAN) - u8 task_pbl_cache_idx; - struct xstorm_iscsi_context_flags flags; - u16 data_out_count; -#endif - u32 seq_more_2_send; - u32 pdu_more_2_send; - struct iscsi_task_context_entry_x temp_tce_x; - struct iscsi_task_context_entry_xuc_x_write_only temp_tce_x_wr; - struct iscsi_task_context_entry_xuc_xu_write_both temp_tce_xu_wr; - struct regpair lun; - u32 exp_data_transfer_len_ttt; - u32 pdu_data_2_rxmit; - u32 rxmit_bytes_2_dr; -#if defined(__BIG_ENDIAN) - u16 rxmit_sge_offset; - u16 hq_rxmit_cons; -#elif defined(__LITTLE_ENDIAN) - u16 hq_rxmit_cons; - u16 rxmit_sge_offset; -#endif -#if defined(__BIG_ENDIAN) - u16 r2tq_cons; - u8 rxmit_flags; -#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD (0x1<<0) -#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD_SHIFT 0 -#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR (0x1<<1) -#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR_SHIFT 1 -#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU (0x1<<2) -#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU_SHIFT 2 -#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR (0x1<<3) -#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR_SHIFT 3 -#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR (0x1<<4) -#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR_SHIFT 4 -#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING (0x3<<5) -#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING_SHIFT 5 -#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT (0x1<<7) -#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT_SHIFT 7 - u8 rxmit_sge_idx; -#elif defined(__LITTLE_ENDIAN) - u8 rxmit_sge_idx; - u8 rxmit_flags; -#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD (0x1<<0) -#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD_SHIFT 0 -#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR (0x1<<1) -#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR_SHIFT 1 -#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU (0x1<<2) -#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU_SHIFT 2 -#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR (0x1<<3) -#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR_SHIFT 3 -#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR (0x1<<4) -#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR_SHIFT 4 -#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING (0x3<<5) -#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING_SHIFT 5 -#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT (0x1<<7) -#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT_SHIFT 7 - u16 r2tq_cons; -#endif - u32 hq_rxmit_tcp_seq; -}; - -/* - * Xstorm iSCSI Storm Context - */ -struct xstorm_iscsi_st_context { - struct xstorm_common_context_section common; - struct xstorm_iscsi_context_section iscsi; -}; - -/* - * Iscsi connection context - */ -struct iscsi_context { - struct ustorm_iscsi_st_context ustorm_st_context; - struct tstorm_iscsi_st_context tstorm_st_context; - struct xstorm_iscsi_ag_context xstorm_ag_context; - struct tstorm_iscsi_ag_context tstorm_ag_context; - struct cstorm_iscsi_ag_context cstorm_ag_context; - struct ustorm_iscsi_ag_context ustorm_ag_context; - struct timers_block_context timers_context; - struct regpair upb_context; - struct xstorm_iscsi_st_context xstorm_st_context; - struct regpair xpb_context; - struct cstorm_iscsi_st_context cstorm_st_context; -}; - - -/* - * PDU header of an iSCSI DATA-OUT - */ -struct iscsi_data_pdu_hdr_little_endian { -#if defined(__BIG_ENDIAN) - u8 opcode; - u8 op_attr; -#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0) -#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0 -#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7) -#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7 - u16 rsrv0; -#elif defined(__LITTLE_ENDIAN) - u16 rsrv0; - u8 op_attr; -#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0) -#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0 -#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7) -#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7 - u8 opcode; -#endif - u32 data_fields; -#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0) -#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0 -#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24) -#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24 - struct regpair lun; - u32 itt; - u32 ttt; - u32 rsrv2; - u32 exp_stat_sn; - u32 rsrv3; - u32 data_sn; - u32 buffer_offset; - u32 rsrv4; -}; - - -/* - * PDU header of an iSCSI login request - */ -struct iscsi_login_req_hdr_little_endian { -#if defined(__BIG_ENDIAN) - u8 opcode; - u8 op_attr; -#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG (0x3<<0) -#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG_SHIFT 0 -#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG (0x3<<2) -#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG_SHIFT 2 -#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0 (0x3<<4) -#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0_SHIFT 4 -#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6) -#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6 -#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT (0x1<<7) -#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT_SHIFT 7 - u8 version_max; - u8 version_min; -#elif defined(__LITTLE_ENDIAN) - u8 version_min; - u8 version_max; - u8 op_attr; -#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG (0x3<<0) -#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG_SHIFT 0 -#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG (0x3<<2) -#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG_SHIFT 2 -#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0 (0x3<<4) -#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0_SHIFT 4 -#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6) -#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6 -#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT (0x1<<7) -#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT_SHIFT 7 - u8 opcode; -#endif - u32 data_fields; -#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0) -#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0 -#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24) -#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24 - u32 isid_lo; -#if defined(__BIG_ENDIAN) - u16 isid_hi; - u16 tsih; -#elif defined(__LITTLE_ENDIAN) - u16 tsih; - u16 isid_hi; -#endif - u32 itt; -#if defined(__BIG_ENDIAN) - u16 cid; - u16 rsrv1; -#elif defined(__LITTLE_ENDIAN) - u16 rsrv1; - u16 cid; -#endif - u32 cmd_sn; - u32 exp_stat_sn; - u32 rsrv2[4]; -}; - -/* - * PDU header of an iSCSI logout request - */ -struct iscsi_logout_req_hdr_little_endian { -#if defined(__BIG_ENDIAN) - u8 opcode; - u8 op_attr; -#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE (0x7F<<0) -#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE_SHIFT 0 -#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7) -#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7 - u16 rsrv0; -#elif defined(__LITTLE_ENDIAN) - u16 rsrv0; - u8 op_attr; -#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE (0x7F<<0) -#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE_SHIFT 0 -#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7) -#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7 - u8 opcode; -#endif - u32 data_fields; -#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0) -#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0 -#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24) -#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24 - u32 rsrv2[2]; - u32 itt; -#if defined(__BIG_ENDIAN) - u16 cid; - u16 rsrv1; -#elif defined(__LITTLE_ENDIAN) - u16 rsrv1; - u16 cid; -#endif - u32 cmd_sn; - u32 exp_stat_sn; - u32 rsrv3[4]; -}; - -/* - * PDU header of an iSCSI TMF request - */ -struct iscsi_tmf_req_hdr_little_endian { -#if defined(__BIG_ENDIAN) - u8 opcode; - u8 op_attr; -#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION (0x7F<<0) -#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION_SHIFT 0 -#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7) -#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7 - u16 rsrv0; -#elif defined(__LITTLE_ENDIAN) - u16 rsrv0; - u8 op_attr; -#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION (0x7F<<0) -#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION_SHIFT 0 -#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7) -#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7 - u8 opcode; -#endif - u32 data_fields; -#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0) -#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0 -#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24) -#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24 - struct regpair lun; - u32 itt; - u32 referenced_task_tag; - u32 cmd_sn; - u32 exp_stat_sn; - u32 ref_cmd_sn; - u32 exp_data_sn; - u32 rsrv2[2]; -}; - -/* - * PDU header of an iSCSI Text request - */ -struct iscsi_text_req_hdr_little_endian { -#if defined(__BIG_ENDIAN) - u8 opcode; - u8 op_attr; -#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1 (0x3F<<0) -#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0 -#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6) -#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6 -#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL (0x1<<7) -#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL_SHIFT 7 - u16 rsrv0; -#elif defined(__LITTLE_ENDIAN) - u16 rsrv0; - u8 op_attr; -#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1 (0x3F<<0) -#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0 -#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6) -#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6 -#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL (0x1<<7) -#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL_SHIFT 7 - u8 opcode; -#endif - u32 data_fields; -#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0) -#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0 -#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24) -#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24 - struct regpair lun; - u32 itt; - u32 ttt; - u32 cmd_sn; - u32 exp_stat_sn; - u32 rsrv3[4]; -}; - -/* - * PDU header of an iSCSI Nop-Out - */ -struct iscsi_nop_out_hdr_little_endian { -#if defined(__BIG_ENDIAN) - u8 opcode; - u8 op_attr; -#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0) -#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0 -#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1 (0x1<<7) -#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1_SHIFT 7 - u16 rsrv0; -#elif defined(__LITTLE_ENDIAN) - u16 rsrv0; - u8 op_attr; -#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0) -#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0 -#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1 (0x1<<7) -#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1_SHIFT 7 - u8 opcode; -#endif - u32 data_fields; -#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0) -#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0 -#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24) -#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24 - struct regpair lun; - u32 itt; - u32 ttt; - u32 cmd_sn; - u32 exp_stat_sn; - u32 rsrv3[4]; -}; - -/* - * iscsi pdu headers in little endian form. - */ -union iscsi_pdu_headers_little_endian { - u32 fullHeaderSize[12]; - struct iscsi_cmd_pdu_hdr_little_endian command_pdu_hdr; - struct iscsi_data_pdu_hdr_little_endian data_out_pdu_hdr; - struct iscsi_login_req_hdr_little_endian login_req_pdu_hdr; - struct iscsi_logout_req_hdr_little_endian logout_req_pdu_hdr; - struct iscsi_tmf_req_hdr_little_endian tmf_req_pdu_hdr; - struct iscsi_text_req_hdr_little_endian text_req_pdu_hdr; - struct iscsi_nop_out_hdr_little_endian nop_out_pdu_hdr; -}; - -struct iscsi_hq_bd { - union iscsi_pdu_headers_little_endian pdu_header; -#if defined(__BIG_ENDIAN) - u16 reserved1; - u16 lcl_cmp_flg; -#elif defined(__LITTLE_ENDIAN) - u16 lcl_cmp_flg; - u16 reserved1; -#endif - u32 sgl_base_lo; - u32 sgl_base_hi; -#if defined(__BIG_ENDIAN) - u8 sgl_size; - u8 sge_index; - u16 sge_offset; -#elif defined(__LITTLE_ENDIAN) - u16 sge_offset; - u8 sge_index; - u8 sgl_size; -#endif -}; - - -/* - * CQE data for L2 OOO connection $$KEEP_ENDIANNESS$$ - */ -struct iscsi_l2_ooo_data { - __le32 iscsi_cid; - u8 drop_isle; - u8 drop_size; - u8 ooo_opcode; - u8 ooo_isle; - u8 reserved[8]; -}; - - - - - - -struct iscsi_task_context_entry_xuc_c_write_only { - u32 total_data_acked; -}; - -struct iscsi_task_context_r2t_table_entry { - u32 ttt; - u32 desired_data_len; -}; - -struct iscsi_task_context_entry_xuc_u_write_only { - u32 exp_r2t_sn; - struct iscsi_task_context_r2t_table_entry r2t_table[4]; -#if defined(__BIG_ENDIAN) - u16 data_in_count; - u8 cq_id; - u8 valid_1b; -#elif defined(__LITTLE_ENDIAN) - u8 valid_1b; - u8 cq_id; - u16 data_in_count; -#endif -}; - -struct iscsi_task_context_entry_xuc { - struct iscsi_task_context_entry_xuc_c_write_only write_c; - u32 exp_data_transfer_len; - struct iscsi_task_context_entry_xuc_x_write_only write_x; - u32 lun_lo; - struct iscsi_task_context_entry_xuc_xu_write_both write_xu; - u32 lun_hi; - struct iscsi_task_context_entry_xuc_u_write_only write_u; -}; - -struct iscsi_task_context_entry_u { - u32 exp_r2t_buff_offset; - u32 rem_rcv_len; - u32 exp_data_sn; -}; - -struct iscsi_task_context_entry { - struct iscsi_task_context_entry_x tce_x; -#if defined(__BIG_ENDIAN) - u16 data_out_count; - u16 rsrv0; -#elif defined(__LITTLE_ENDIAN) - u16 rsrv0; - u16 data_out_count; -#endif - struct iscsi_task_context_entry_xuc tce_xuc; - struct iscsi_task_context_entry_u tce_u; - u32 rsrv1[7]; -}; - - - - - - - - -struct iscsi_task_context_entry_xuc_x_init_only { - struct regpair lun; - u32 exp_data_transfer_len; -}; - - - - - - - - - - - - - - - - - -/* - * ipv6 structure - */ -struct ip_v6_addr { - u32 ip_addr_lo_lo; - u32 ip_addr_lo_hi; - u32 ip_addr_hi_lo; - u32 ip_addr_hi_hi; -}; - - - -/* - * l5cm- connection identification params - */ -struct l5cm_conn_addr_params { - u32 pmtu; -#if defined(__BIG_ENDIAN) - u8 remote_addr_3; - u8 remote_addr_2; - u8 remote_addr_1; - u8 remote_addr_0; -#elif defined(__LITTLE_ENDIAN) - u8 remote_addr_0; - u8 remote_addr_1; - u8 remote_addr_2; - u8 remote_addr_3; -#endif -#if defined(__BIG_ENDIAN) - u16 params; -#define L5CM_CONN_ADDR_PARAMS_IP_VERSION (0x1<<0) -#define L5CM_CONN_ADDR_PARAMS_IP_VERSION_SHIFT 0 -#define L5CM_CONN_ADDR_PARAMS_RSRV (0x7FFF<<1) -#define L5CM_CONN_ADDR_PARAMS_RSRV_SHIFT 1 - u8 remote_addr_5; - u8 remote_addr_4; -#elif defined(__LITTLE_ENDIAN) - u8 remote_addr_4; - u8 remote_addr_5; - u16 params; -#define L5CM_CONN_ADDR_PARAMS_IP_VERSION (0x1<<0) -#define L5CM_CONN_ADDR_PARAMS_IP_VERSION_SHIFT 0 -#define L5CM_CONN_ADDR_PARAMS_RSRV (0x7FFF<<1) -#define L5CM_CONN_ADDR_PARAMS_RSRV_SHIFT 1 -#endif - struct ip_v6_addr local_ip_addr; - struct ip_v6_addr remote_ip_addr; - u32 ipv6_flow_label_20b; - u32 reserved1; -#if defined(__BIG_ENDIAN) - u16 remote_tcp_port; - u16 local_tcp_port; -#elif defined(__LITTLE_ENDIAN) - u16 local_tcp_port; - u16 remote_tcp_port; -#endif -}; - -/* - * l5cm-xstorm connection buffer - */ -struct l5cm_xstorm_conn_buffer { -#if defined(__BIG_ENDIAN) - u16 rsrv1; - u16 params; -#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE (0x1<<0) -#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE_SHIFT 0 -#define L5CM_XSTORM_CONN_BUFFER_RSRV (0x7FFF<<1) -#define L5CM_XSTORM_CONN_BUFFER_RSRV_SHIFT 1 -#elif defined(__LITTLE_ENDIAN) - u16 params; -#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE (0x1<<0) -#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE_SHIFT 0 -#define L5CM_XSTORM_CONN_BUFFER_RSRV (0x7FFF<<1) -#define L5CM_XSTORM_CONN_BUFFER_RSRV_SHIFT 1 - u16 rsrv1; -#endif -#if defined(__BIG_ENDIAN) - u16 mss; - u16 pseudo_header_checksum; -#elif defined(__LITTLE_ENDIAN) - u16 pseudo_header_checksum; - u16 mss; -#endif - u32 rcv_buf; - u32 rsrv2; - struct regpair context_addr; -}; - -/* - * l5cm-tstorm connection buffer - */ -struct l5cm_tstorm_conn_buffer { - u32 rsrv1[2]; -#if defined(__BIG_ENDIAN) - u16 params; -#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE (0x1<<0) -#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE_SHIFT 0 -#define L5CM_TSTORM_CONN_BUFFER_RSRV (0x7FFF<<1) -#define L5CM_TSTORM_CONN_BUFFER_RSRV_SHIFT 1 - u8 ka_max_probe_count; - u8 ka_enable; -#elif defined(__LITTLE_ENDIAN) - u8 ka_enable; - u8 ka_max_probe_count; - u16 params; -#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE (0x1<<0) -#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE_SHIFT 0 -#define L5CM_TSTORM_CONN_BUFFER_RSRV (0x7FFF<<1) -#define L5CM_TSTORM_CONN_BUFFER_RSRV_SHIFT 1 -#endif - u32 ka_timeout; - u32 ka_interval; - u32 max_rt_time; -}; - -/* - * l5cm connection buffer for active side - */ -struct l5cm_active_conn_buffer { - struct l5cm_conn_addr_params conn_addr_buf; - struct l5cm_xstorm_conn_buffer xstorm_conn_buffer; - struct l5cm_tstorm_conn_buffer tstorm_conn_buffer; -}; - - - -/* - * The l5cm opaque buffer passed in add new connection ramrod passive side - */ -struct l5cm_hash_input_string { - u32 __opaque1; -#if defined(__BIG_ENDIAN) - u16 __opaque3; - u16 __opaque2; -#elif defined(__LITTLE_ENDIAN) - u16 __opaque2; - u16 __opaque3; -#endif - struct ip_v6_addr __opaque4; - struct ip_v6_addr __opaque5; - u32 __opaque6; - u32 __opaque7[5]; -}; - - -/* - * syn cookie component - */ -struct l5cm_syn_cookie_comp { - u32 __opaque; -}; - -/* - * data related to listeners of a TCP port - */ -struct l5cm_port_listener_data { - u8 params; -#define L5CM_PORT_LISTENER_DATA_ENABLE (0x1<<0) -#define L5CM_PORT_LISTENER_DATA_ENABLE_SHIFT 0 -#define L5CM_PORT_LISTENER_DATA_IP_INDEX (0xF<<1) -#define L5CM_PORT_LISTENER_DATA_IP_INDEX_SHIFT 1 -#define L5CM_PORT_LISTENER_DATA_NET_FILTER (0x1<<5) -#define L5CM_PORT_LISTENER_DATA_NET_FILTER_SHIFT 5 -#define L5CM_PORT_LISTENER_DATA_DEFFERED_MODE (0x1<<6) -#define L5CM_PORT_LISTENER_DATA_DEFFERED_MODE_SHIFT 6 -#define L5CM_PORT_LISTENER_DATA_MPA_MODE (0x1<<7) -#define L5CM_PORT_LISTENER_DATA_MPA_MODE_SHIFT 7 -}; - -/* - * Opaque structure passed from U to X when final ack arrives - */ -struct l5cm_opaque_buf { - u32 __opaque1; - u32 __opaque2; - u32 __opaque3; - u32 __opaque4; - struct l5cm_syn_cookie_comp __opaque5; -#if defined(__BIG_ENDIAN) - u16 rsrv2; - u8 rsrv; - struct l5cm_port_listener_data __opaque6; -#elif defined(__LITTLE_ENDIAN) - struct l5cm_port_listener_data __opaque6; - u8 rsrv; - u16 rsrv2; -#endif -}; - - -/* - * l5cm slow path element - */ -struct l5cm_packet_size { - u32 size; - u32 rsrv; -}; - - -/* - * The final-ack union structure in PCS entry after final ack arrived - */ -struct l5cm_pcse_ack { - struct l5cm_xstorm_conn_buffer tx_socket_params; - struct l5cm_opaque_buf opaque_buf; - struct l5cm_tstorm_conn_buffer rx_socket_params; -}; - - -/* - * The syn union structure in PCS entry after syn arrived - */ -struct l5cm_pcse_syn { - struct l5cm_opaque_buf opaque_buf; - u32 rsrv[12]; -}; - - -/* - * pcs entry data for passive connections - */ -struct l5cm_pcs_attributes { -#if defined(__BIG_ENDIAN) - u16 pcs_id; - u8 status; - u8 flags; -#define L5CM_PCS_ATTRIBUTES_NET_FILTER (0x1<<0) -#define L5CM_PCS_ATTRIBUTES_NET_FILTER_SHIFT 0 -#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH (0x1<<1) -#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH_SHIFT 1 -#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT (0x1<<2) -#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT_SHIFT 2 -#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT (0x1<<3) -#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT_SHIFT 3 -#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC (0x1<<4) -#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC_SHIFT 4 -#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD (0x1<<5) -#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD_SHIFT 5 -#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET (0x1<<6) -#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET_SHIFT 6 -#define L5CM_PCS_ATTRIBUTES_RSRV (0x1<<7) -#define L5CM_PCS_ATTRIBUTES_RSRV_SHIFT 7 -#elif defined(__LITTLE_ENDIAN) - u8 flags; -#define L5CM_PCS_ATTRIBUTES_NET_FILTER (0x1<<0) -#define L5CM_PCS_ATTRIBUTES_NET_FILTER_SHIFT 0 -#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH (0x1<<1) -#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH_SHIFT 1 -#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT (0x1<<2) -#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT_SHIFT 2 -#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT (0x1<<3) -#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT_SHIFT 3 -#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC (0x1<<4) -#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC_SHIFT 4 -#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD (0x1<<5) -#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD_SHIFT 5 -#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET (0x1<<6) -#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET_SHIFT 6 -#define L5CM_PCS_ATTRIBUTES_RSRV (0x1<<7) -#define L5CM_PCS_ATTRIBUTES_RSRV_SHIFT 7 - u8 status; - u16 pcs_id; -#endif -}; - - -union l5cm_seg_params { - struct l5cm_pcse_syn syn_seg_params; - struct l5cm_pcse_ack ack_seg_params; -}; - -/* - * pcs entry data for passive connections - */ -struct l5cm_pcs_hdr { - struct l5cm_hash_input_string hash_input_string; - struct l5cm_conn_addr_params conn_addr_buf; - u32 cid; - u32 hash_result; - union l5cm_seg_params seg_params; - struct l5cm_pcs_attributes att; -#if defined(__BIG_ENDIAN) - u16 rsrv; - u16 rx_seg_size; -#elif defined(__LITTLE_ENDIAN) - u16 rx_seg_size; - u16 rsrv; -#endif -}; - -/* - * pcs entry for passive connections - */ -struct l5cm_pcs_entry { - struct l5cm_pcs_hdr hdr; - u8 rx_segment[1516]; -}; - - - - -/* - * l5cm connection parameters - */ -union l5cm_reduce_param_union { - u32 opaque1; - u32 opaque2; -}; - -/* - * l5cm connection parameters - */ -struct l5cm_reduce_conn { - union l5cm_reduce_param_union opaque1; - u32 opaque2; -}; - -/* - * l5cm slow path element - */ -union l5cm_specific_data { - u8 protocol_data[8]; - struct regpair phy_address; - struct l5cm_packet_size packet_size; - struct l5cm_reduce_conn reduced_conn; -}; - -/* - * l5 slow path element - */ -struct l5cm_spe { - struct spe_hdr hdr; - union l5cm_specific_data data; -}; - - - - -/* - * Termination variables - */ -struct l5cm_term_vars { - u8 BitMap; -#define L5CM_TERM_VARS_TCP_STATE (0xF<<0) -#define L5CM_TERM_VARS_TCP_STATE_SHIFT 0 -#define L5CM_TERM_VARS_FIN_RECEIVED_SBIT (0x1<<4) -#define L5CM_TERM_VARS_FIN_RECEIVED_SBIT_SHIFT 4 -#define L5CM_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT (0x1<<5) -#define L5CM_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT_SHIFT 5 -#define L5CM_TERM_VARS_TERM_ON_CHIP (0x1<<6) -#define L5CM_TERM_VARS_TERM_ON_CHIP_SHIFT 6 -#define L5CM_TERM_VARS_RSRV (0x1<<7) -#define L5CM_TERM_VARS_RSRV_SHIFT 7 -}; - - - - -/* - * Tstorm Tcp flags - */ -struct tstorm_l5cm_tcp_flags { - u16 flags; -#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID (0xFFF<<0) -#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID_SHIFT 0 -#define TSTORM_L5CM_TCP_FLAGS_RSRV0 (0x1<<12) -#define TSTORM_L5CM_TCP_FLAGS_RSRV0_SHIFT 12 -#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED (0x1<<13) -#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED_SHIFT 13 -#define TSTORM_L5CM_TCP_FLAGS_RSRV1 (0x3<<14) -#define TSTORM_L5CM_TCP_FLAGS_RSRV1_SHIFT 14 -}; - - -/* - * Xstorm Tcp flags - */ -struct xstorm_l5cm_tcp_flags { - u8 flags; -#define XSTORM_L5CM_TCP_FLAGS_ENC_ENABLED (0x1<<0) -#define XSTORM_L5CM_TCP_FLAGS_ENC_ENABLED_SHIFT 0 -#define XSTORM_L5CM_TCP_FLAGS_TS_ENABLED (0x1<<1) -#define XSTORM_L5CM_TCP_FLAGS_TS_ENABLED_SHIFT 1 -#define XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN (0x1<<2) -#define XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN_SHIFT 2 -#define XSTORM_L5CM_TCP_FLAGS_RSRV (0x1F<<3) -#define XSTORM_L5CM_TCP_FLAGS_RSRV_SHIFT 3 -}; - - - -/* - * Out-of-order states - */ -enum tcp_ooo_event { - TCP_EVENT_ADD_PEN = 0, - TCP_EVENT_ADD_NEW_ISLE = 1, - TCP_EVENT_ADD_ISLE_RIGHT = 2, - TCP_EVENT_ADD_ISLE_LEFT = 3, - TCP_EVENT_JOIN = 4, - TCP_EVENT_NOP = 5, - MAX_TCP_OOO_EVENT -}; - - -/* - * OOO support modes - */ -enum tcp_tstorm_ooo { - TCP_TSTORM_OOO_DROP_AND_PROC_ACK = 0, - TCP_TSTORM_OOO_SEND_PURE_ACK = 1, - TCP_TSTORM_OOO_SUPPORTED = 2, - MAX_TCP_TSTORM_OOO -}; - - - - - - - - - -#endif /* __5710_HSI_CNIC_LE__ */ diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h deleted file mode 100644 index 79443e0..0000000 --- a/drivers/net/cnic_if.h +++ /dev/null @@ -1,340 +0,0 @@ -/* cnic_if.h: Broadcom CNIC core network driver. - * - * Copyright (c) 2006-2011 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - */ - - -#ifndef CNIC_IF_H -#define CNIC_IF_H - -#define CNIC_MODULE_VERSION "2.5.7" -#define CNIC_MODULE_RELDATE "July 20, 2011" - -#define CNIC_ULP_RDMA 0 -#define CNIC_ULP_ISCSI 1 -#define CNIC_ULP_FCOE 2 -#define CNIC_ULP_L4 3 -#define MAX_CNIC_ULP_TYPE_EXT 3 -#define MAX_CNIC_ULP_TYPE 4 - -struct kwqe { - u32 kwqe_op_flag; - -#define KWQE_QID_SHIFT 8 -#define KWQE_OPCODE_MASK 0x00ff0000 -#define KWQE_OPCODE_SHIFT 16 -#define KWQE_OPCODE(x) ((x & KWQE_OPCODE_MASK) >> KWQE_OPCODE_SHIFT) -#define KWQE_LAYER_MASK 0x70000000 -#define KWQE_LAYER_SHIFT 28 -#define KWQE_FLAGS_LAYER_MASK_L2 (2<<28) -#define KWQE_FLAGS_LAYER_MASK_L3 (3<<28) -#define KWQE_FLAGS_LAYER_MASK_L4 (4<<28) -#define KWQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28) -#define KWQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28) -#define KWQE_FLAGS_LAYER_MASK_L5_FCOE (7<<28) - - u32 kwqe_info0; - u32 kwqe_info1; - u32 kwqe_info2; - u32 kwqe_info3; - u32 kwqe_info4; - u32 kwqe_info5; - u32 kwqe_info6; -}; - -struct kwqe_16 { - u32 kwqe_info0; - u32 kwqe_info1; - u32 kwqe_info2; - u32 kwqe_info3; -}; - -struct kcqe { - u32 kcqe_info0; - u32 kcqe_info1; - u32 kcqe_info2; - u32 kcqe_info3; - u32 kcqe_info4; - u32 kcqe_info5; - u32 kcqe_info6; - u32 kcqe_op_flag; - #define KCQE_RAMROD_COMPLETION (0x1<<27) /* Everest */ - #define KCQE_FLAGS_LAYER_MASK (0x7<<28) - #define KCQE_FLAGS_LAYER_MASK_MISC (0<<28) - #define KCQE_FLAGS_LAYER_MASK_L2 (2<<28) - #define KCQE_FLAGS_LAYER_MASK_L3 (3<<28) - #define KCQE_FLAGS_LAYER_MASK_L4 (4<<28) - #define KCQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28) - #define KCQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28) - #define KCQE_FLAGS_LAYER_MASK_L5_FCOE (7<<28) - #define KCQE_FLAGS_NEXT (1<<31) - #define KCQE_FLAGS_OPCODE_MASK (0xff<<16) - #define KCQE_FLAGS_OPCODE_SHIFT (16) - #define KCQE_OPCODE(op) \ - (((op) & KCQE_FLAGS_OPCODE_MASK) >> KCQE_FLAGS_OPCODE_SHIFT) -}; - -#define MAX_CNIC_CTL_DATA 64 -#define MAX_DRV_CTL_DATA 64 - -#define CNIC_CTL_STOP_CMD 1 -#define CNIC_CTL_START_CMD 2 -#define CNIC_CTL_COMPLETION_CMD 3 -#define CNIC_CTL_STOP_ISCSI_CMD 4 - -#define DRV_CTL_IO_WR_CMD 0x101 -#define DRV_CTL_IO_RD_CMD 0x102 -#define DRV_CTL_CTX_WR_CMD 0x103 -#define DRV_CTL_CTXTBL_WR_CMD 0x104 -#define DRV_CTL_RET_L5_SPQ_CREDIT_CMD 0x105 -#define DRV_CTL_START_L2_CMD 0x106 -#define DRV_CTL_STOP_L2_CMD 0x107 -#define DRV_CTL_RET_L2_SPQ_CREDIT_CMD 0x10c -#define DRV_CTL_ISCSI_STOPPED_CMD 0x10d - -struct cnic_ctl_completion { - u32 cid; - u8 opcode; - u8 error; -}; - -struct cnic_ctl_info { - int cmd; - union { - struct cnic_ctl_completion comp; - char bytes[MAX_CNIC_CTL_DATA]; - } data; -}; - -struct drv_ctl_spq_credit { - u32 credit_count; -}; - -struct drv_ctl_io { - u32 cid_addr; - u32 offset; - u32 data; - dma_addr_t dma_addr; -}; - -struct drv_ctl_l2_ring { - u32 client_id; - u32 cid; -}; - -struct drv_ctl_info { - int cmd; - union { - struct drv_ctl_spq_credit credit; - struct drv_ctl_io io; - struct drv_ctl_l2_ring ring; - char bytes[MAX_DRV_CTL_DATA]; - } data; -}; - -struct cnic_ops { - struct module *cnic_owner; - /* Calls to these functions are protected by RCU. When - * unregistering, we wait for any calls to complete before - * continuing. - */ - int (*cnic_handler)(void *, void *); - int (*cnic_ctl)(void *, struct cnic_ctl_info *); -}; - -#define MAX_CNIC_VEC 8 - -struct cnic_irq { - unsigned int vector; - void *status_blk; - u32 status_blk_num; - u32 status_blk_num2; - u32 irq_flags; -#define CNIC_IRQ_FL_MSIX 0x00000001 -}; - -struct cnic_eth_dev { - struct module *drv_owner; - u32 drv_state; -#define CNIC_DRV_STATE_REGD 0x00000001 -#define CNIC_DRV_STATE_USING_MSIX 0x00000002 -#define CNIC_DRV_STATE_NO_ISCSI_OOO 0x00000004 -#define CNIC_DRV_STATE_NO_ISCSI 0x00000008 -#define CNIC_DRV_STATE_NO_FCOE 0x00000010 - u32 chip_id; - u32 max_kwqe_pending; - struct pci_dev *pdev; - void __iomem *io_base; - void __iomem *io_base2; - const void *iro_arr; - - u32 ctx_tbl_offset; - u32 ctx_tbl_len; - int ctx_blk_size; - u32 starting_cid; - u32 max_iscsi_conn; - u32 max_fcoe_conn; - u32 max_rdma_conn; - u32 fcoe_init_cid; - u32 fcoe_wwn_port_name_hi; - u32 fcoe_wwn_port_name_lo; - u32 fcoe_wwn_node_name_hi; - u32 fcoe_wwn_node_name_lo; - - u16 iscsi_l2_client_id; - u16 iscsi_l2_cid; - u8 iscsi_mac[ETH_ALEN]; - - int num_irq; - struct cnic_irq irq_arr[MAX_CNIC_VEC]; - int (*drv_register_cnic)(struct net_device *, - struct cnic_ops *, void *); - int (*drv_unregister_cnic)(struct net_device *); - int (*drv_submit_kwqes_32)(struct net_device *, - struct kwqe *[], u32); - int (*drv_submit_kwqes_16)(struct net_device *, - struct kwqe_16 *[], u32); - int (*drv_ctl)(struct net_device *, struct drv_ctl_info *); - unsigned long reserved1[2]; -}; - -struct cnic_sockaddr { - union { - struct sockaddr_in v4; - struct sockaddr_in6 v6; - } local; - union { - struct sockaddr_in v4; - struct sockaddr_in6 v6; - } remote; -}; - -struct cnic_sock { - struct cnic_dev *dev; - void *context; - u32 src_ip[4]; - u32 dst_ip[4]; - u16 src_port; - u16 dst_port; - u16 vlan_id; - unsigned char old_ha[6]; - unsigned char ha[6]; - u32 mtu; - u32 cid; - u32 l5_cid; - u32 pg_cid; - int ulp_type; - - u32 ka_timeout; - u32 ka_interval; - u8 ka_max_probe_count; - u8 tos; - u8 ttl; - u8 snd_seq_scale; - u32 rcv_buf; - u32 snd_buf; - u32 seed; - - unsigned long tcp_flags; -#define SK_TCP_NO_DELAY_ACK 0x1 -#define SK_TCP_KEEP_ALIVE 0x2 -#define SK_TCP_NAGLE 0x4 -#define SK_TCP_TIMESTAMP 0x8 -#define SK_TCP_SACK 0x10 -#define SK_TCP_SEG_SCALING 0x20 - unsigned long flags; -#define SK_F_INUSE 0 -#define SK_F_OFFLD_COMPLETE 1 -#define SK_F_OFFLD_SCHED 2 -#define SK_F_PG_OFFLD_COMPLETE 3 -#define SK_F_CONNECT_START 4 -#define SK_F_IPV6 5 -#define SK_F_CLOSING 7 - - atomic_t ref_count; - u32 state; - struct kwqe kwqe1; - struct kwqe kwqe2; - struct kwqe kwqe3; -}; - -struct cnic_dev { - struct net_device *netdev; - struct pci_dev *pcidev; - void __iomem *regview; - struct list_head list; - - int (*register_device)(struct cnic_dev *dev, int ulp_type, - void *ulp_ctx); - int (*unregister_device)(struct cnic_dev *dev, int ulp_type); - int (*submit_kwqes)(struct cnic_dev *dev, struct kwqe *wqes[], - u32 num_wqes); - int (*submit_kwqes_16)(struct cnic_dev *dev, struct kwqe_16 *wqes[], - u32 num_wqes); - - int (*cm_create)(struct cnic_dev *, int, u32, u32, struct cnic_sock **, - void *); - int (*cm_destroy)(struct cnic_sock *); - int (*cm_connect)(struct cnic_sock *, struct cnic_sockaddr *); - int (*cm_abort)(struct cnic_sock *); - int (*cm_close)(struct cnic_sock *); - struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type); - int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type, - char *data, u16 data_size); - unsigned long flags; -#define CNIC_F_CNIC_UP 1 -#define CNIC_F_BNX2_CLASS 3 -#define CNIC_F_BNX2X_CLASS 4 - atomic_t ref_count; - u8 mac_addr[6]; - - int max_iscsi_conn; - int max_fcoe_conn; - int max_rdma_conn; - - void *cnic_priv; -}; - -#define CNIC_WR(dev, off, val) writel(val, dev->regview + off) -#define CNIC_WR16(dev, off, val) writew(val, dev->regview + off) -#define CNIC_WR8(dev, off, val) writeb(val, dev->regview + off) -#define CNIC_RD(dev, off) readl(dev->regview + off) -#define CNIC_RD16(dev, off) readw(dev->regview + off) - -struct cnic_ulp_ops { - /* Calls to these functions are protected by RCU. When - * unregistering, we wait for any calls to complete before - * continuing. - */ - - void (*cnic_init)(struct cnic_dev *dev); - void (*cnic_exit)(struct cnic_dev *dev); - void (*cnic_start)(void *ulp_ctx); - void (*cnic_stop)(void *ulp_ctx); - void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[], - u32 num_cqes); - void (*indicate_netevent)(void *ulp_ctx, unsigned long event, u16 vid); - void (*cm_connect_complete)(struct cnic_sock *); - void (*cm_close_complete)(struct cnic_sock *); - void (*cm_abort_complete)(struct cnic_sock *); - void (*cm_remote_close)(struct cnic_sock *); - void (*cm_remote_abort)(struct cnic_sock *); - int (*iscsi_nl_send_msg)(void *ulp_ctx, u32 msg_type, - char *data, u16 data_size); - struct module *owner; - atomic_t ref_count; -}; - -extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops); - -extern int cnic_unregister_driver(int ulp_type); - -extern struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev); -extern struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev); - -#endif diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 56ed5ec..772fb49 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -14,5 +14,6 @@ if ETHERNET source "drivers/net/ethernet/3com/Kconfig" source "drivers/net/ethernet/8390/Kconfig" source "drivers/net/ethernet/amd/Kconfig" +source "drivers/net/ethernet/broadcom/Kconfig" endif # ETHERNET diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index fc82588..3ef49a2 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_NET_VENDOR_3COM) += 3com/ obj-$(CONFIG_NET_VENDOR_8390) += 8390/ obj-$(CONFIG_NET_VENDOR_AMD) += amd/ +obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig new file mode 100644 index 0000000..8986e57 --- /dev/null +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -0,0 +1,119 @@ +# +# Broadcom device configuration +# + +config NET_VENDOR_BROADCOM + bool "Broadcom devices" + depends on (SSB_POSSIBLE && HAS_DMA) || PCI || BCM63XX || \ + SIBYTE_SB1xxx_SOC + ---help--- + If you have a network (Ethernet) chipset belonging to this class, + say Y. + + Note that the answer to this question does not directly affect + the kernel: saying N will just case the configurator to skip all + the questions regarding AMD chipsets. If you say Y, you will be asked + for your specific chipset/driver in the following questions. + +if NET_VENDOR_BROADCOM + +config B44 + tristate "Broadcom 440x/47xx ethernet support" + depends on SSB_POSSIBLE && HAS_DMA + select SSB + select MII + ---help--- + If you have a network (Ethernet) controller of this type, say Y + or M and read the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called b44. + +# Auto-select SSB PCI-HOST support, if possible +config B44_PCI_AUTOSELECT + bool + depends on B44 && SSB_PCIHOST_POSSIBLE + select SSB_PCIHOST + default y + +# Auto-select SSB PCICORE driver, if possible +config B44_PCICORE_AUTOSELECT + bool + depends on B44 && SSB_DRIVER_PCICORE_POSSIBLE + select SSB_DRIVER_PCICORE + default y + +config B44_PCI + bool + depends on B44_PCI_AUTOSELECT && B44_PCICORE_AUTOSELECT + default y + +config BCM63XX_ENET + tristate "Broadcom 63xx internal mac support" + depends on BCM63XX + select MII + select PHYLIB + help + This driver supports the ethernet MACs in the Broadcom 63xx + MIPS chipset family (BCM63XX). + +config BNX2 + tristate "Broadcom NetXtremeII support" + depends on PCI + select CRC32 + select FW_LOADER + ---help--- + This driver supports Broadcom NetXtremeII gigabit Ethernet cards. + + To compile this driver as a module, choose M here: the module + will be called bnx2. This is recommended. + +config CNIC + tristate "Broadcom CNIC support" + depends on PCI + select BNX2 + select UIO + ---help--- + This driver supports offload features of Broadcom NetXtremeII + gigabit Ethernet cards. + + To compile this driver as a module, choose M here: the module + will be called cnic. This is recommended. + +config SB1250_MAC + tristate "SB1250 Gigabit Ethernet support" + depends on SIBYTE_SB1xxx_SOC + select PHYLIB + ---help--- + This driver supports Gigabit Ethernet interfaces based on the + Broadcom SiByte family of System-On-a-Chip parts. They include + the BCM1120, BCM1125, BCM1125H, BCM1250, BCM1255, BCM1280, BCM1455 + and BCM1480 chips. + + To compile this driver as a module, choose M here: the module + will be called sb1250-mac. + +config TIGON3 + tristate "Broadcom Tigon3 support" + depends on PCI + select PHYLIB + ---help--- + This driver supports Broadcom Tigon3 based gigabit Ethernet cards. + + To compile this driver as a module, choose M here: the module + will be called tg3. This is recommended. + +config BNX2X + tristate "Broadcom NetXtremeII 10Gb support" + depends on PCI + select FW_LOADER + select ZLIB_INFLATE + select LIBCRC32C + select MDIO + ---help--- + This driver supports Broadcom NetXtremeII 10 gigabit Ethernet cards. + To compile this driver as a module, choose M here: the module + will be called bnx2x. This is recommended. + +endif # NET_VENDOR_BROADCOM diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile new file mode 100644 index 0000000..b789605 --- /dev/null +++ b/drivers/net/ethernet/broadcom/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for the Broadcom network device drivers. +# + +obj-$(CONFIG_B44) += b44.o +obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o +obj-$(CONFIG_BNX2) += bnx2.o +obj-$(CONFIG_CNIC) += cnic.o +obj-$(CONFIG_BNX2X) += bnx2x/ +obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o +obj-$(CONFIG_TIGON3) += tg3.o diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c new file mode 100644 index 0000000..41ea84e --- /dev/null +++ b/drivers/net/ethernet/broadcom/b44.c @@ -0,0 +1,2374 @@ +/* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver. + * + * Copyright (C) 2002 David S. Miller (davem@redhat.com) + * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi) + * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org) + * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org) + * Copyright (C) 2006 Broadcom Corporation. + * Copyright (C) 2007 Michael Buesch + * + * Distribute under GPL. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + + +#include "b44.h" + +#define DRV_MODULE_NAME "b44" +#define DRV_MODULE_VERSION "2.0" +#define DRV_DESCRIPTION "Broadcom 44xx/47xx 10/100 PCI ethernet driver" + +#define B44_DEF_MSG_ENABLE \ + (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | \ + NETIF_MSG_IFDOWN | \ + NETIF_MSG_IFUP | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR) + +/* length of time before we decide the hardware is borked, + * and dev->tx_timeout() should be called to fix the problem + */ +#define B44_TX_TIMEOUT (5 * HZ) + +/* hardware minimum and maximum for a single frame's data payload */ +#define B44_MIN_MTU 60 +#define B44_MAX_MTU 1500 + +#define B44_RX_RING_SIZE 512 +#define B44_DEF_RX_RING_PENDING 200 +#define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \ + B44_RX_RING_SIZE) +#define B44_TX_RING_SIZE 512 +#define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1) +#define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \ + B44_TX_RING_SIZE) + +#define TX_RING_GAP(BP) \ + (B44_TX_RING_SIZE - (BP)->tx_pending) +#define TX_BUFFS_AVAIL(BP) \ + (((BP)->tx_cons <= (BP)->tx_prod) ? \ + (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \ + (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP)) +#define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1)) + +#define RX_PKT_OFFSET (RX_HEADER_LEN + 2) +#define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET) + +/* minimum number of free TX descriptors required to wake up TX process */ +#define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4) + +/* b44 internal pattern match filter info */ +#define B44_PATTERN_BASE 0x400 +#define B44_PATTERN_SIZE 0x80 +#define B44_PMASK_BASE 0x600 +#define B44_PMASK_SIZE 0x10 +#define B44_MAX_PATTERNS 16 +#define B44_ETHIPV6UDP_HLEN 62 +#define B44_ETHIPV4UDP_HLEN 42 + +MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller"); +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); + +static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */ +module_param(b44_debug, int, 0); +MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value"); + + +#ifdef CONFIG_B44_PCI +static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = { + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) }, + { 0 } /* terminate list with empty entry */ +}; +MODULE_DEVICE_TABLE(pci, b44_pci_tbl); + +static struct pci_driver b44_pci_driver = { + .name = DRV_MODULE_NAME, + .id_table = b44_pci_tbl, +}; +#endif /* CONFIG_B44_PCI */ + +static const struct ssb_device_id b44_ssb_tbl[] = { + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV), + SSB_DEVTABLE_END +}; +MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl); + +static void b44_halt(struct b44 *); +static void b44_init_rings(struct b44 *); + +#define B44_FULL_RESET 1 +#define B44_FULL_RESET_SKIP_PHY 2 +#define B44_PARTIAL_RESET 3 +#define B44_CHIP_RESET_FULL 4 +#define B44_CHIP_RESET_PARTIAL 5 + +static void b44_init_hw(struct b44 *, int); + +static int dma_desc_sync_size; +static int instance; + +static const char b44_gstrings[][ETH_GSTRING_LEN] = { +#define _B44(x...) # x, +B44_STAT_REG_DECLARE +#undef _B44 +}; + +static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev, + dma_addr_t dma_base, + unsigned long offset, + enum dma_data_direction dir) +{ + dma_sync_single_for_device(sdev->dma_dev, dma_base + offset, + dma_desc_sync_size, dir); +} + +static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev, + dma_addr_t dma_base, + unsigned long offset, + enum dma_data_direction dir) +{ + dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset, + dma_desc_sync_size, dir); +} + +static inline unsigned long br32(const struct b44 *bp, unsigned long reg) +{ + return ssb_read32(bp->sdev, reg); +} + +static inline void bw32(const struct b44 *bp, + unsigned long reg, unsigned long val) +{ + ssb_write32(bp->sdev, reg, val); +} + +static int b44_wait_bit(struct b44 *bp, unsigned long reg, + u32 bit, unsigned long timeout, const int clear) +{ + unsigned long i; + + for (i = 0; i < timeout; i++) { + u32 val = br32(bp, reg); + + if (clear && !(val & bit)) + break; + if (!clear && (val & bit)) + break; + udelay(10); + } + if (i == timeout) { + if (net_ratelimit()) + netdev_err(bp->dev, "BUG! Timeout waiting for bit %08x of register %lx to %s\n", + bit, reg, clear ? "clear" : "set"); + + return -ENODEV; + } + return 0; +} + +static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index) +{ + u32 val; + + bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ | + (index << CAM_CTRL_INDEX_SHIFT))); + + b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1); + + val = br32(bp, B44_CAM_DATA_LO); + + data[2] = (val >> 24) & 0xFF; + data[3] = (val >> 16) & 0xFF; + data[4] = (val >> 8) & 0xFF; + data[5] = (val >> 0) & 0xFF; + + val = br32(bp, B44_CAM_DATA_HI); + + data[0] = (val >> 8) & 0xFF; + data[1] = (val >> 0) & 0xFF; +} + +static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index) +{ + u32 val; + + val = ((u32) data[2]) << 24; + val |= ((u32) data[3]) << 16; + val |= ((u32) data[4]) << 8; + val |= ((u32) data[5]) << 0; + bw32(bp, B44_CAM_DATA_LO, val); + val = (CAM_DATA_HI_VALID | + (((u32) data[0]) << 8) | + (((u32) data[1]) << 0)); + bw32(bp, B44_CAM_DATA_HI, val); + bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE | + (index << CAM_CTRL_INDEX_SHIFT))); + b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1); +} + +static inline void __b44_disable_ints(struct b44 *bp) +{ + bw32(bp, B44_IMASK, 0); +} + +static void b44_disable_ints(struct b44 *bp) +{ + __b44_disable_ints(bp); + + /* Flush posted writes. */ + br32(bp, B44_IMASK); +} + +static void b44_enable_ints(struct b44 *bp) +{ + bw32(bp, B44_IMASK, bp->imask); +} + +static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val) +{ + int err; + + bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII); + bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | + (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) | + (phy_addr << MDIO_DATA_PMD_SHIFT) | + (reg << MDIO_DATA_RA_SHIFT) | + (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT))); + err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0); + *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA; + + return err; +} + +static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val) +{ + bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII); + bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | + (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) | + (phy_addr << MDIO_DATA_PMD_SHIFT) | + (reg << MDIO_DATA_RA_SHIFT) | + (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) | + (val & MDIO_DATA_DATA))); + return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0); +} + +static inline int b44_readphy(struct b44 *bp, int reg, u32 *val) +{ + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) + return 0; + + return __b44_readphy(bp, bp->phy_addr, reg, val); +} + +static inline int b44_writephy(struct b44 *bp, int reg, u32 val) +{ + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) + return 0; + + return __b44_writephy(bp, bp->phy_addr, reg, val); +} + +/* miilib interface */ +static int b44_mii_read(struct net_device *dev, int phy_id, int location) +{ + u32 val; + struct b44 *bp = netdev_priv(dev); + int rc = __b44_readphy(bp, phy_id, location, &val); + if (rc) + return 0xffffffff; + return val; +} + +static void b44_mii_write(struct net_device *dev, int phy_id, int location, + int val) +{ + struct b44 *bp = netdev_priv(dev); + __b44_writephy(bp, phy_id, location, val); +} + +static int b44_phy_reset(struct b44 *bp) +{ + u32 val; + int err; + + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) + return 0; + err = b44_writephy(bp, MII_BMCR, BMCR_RESET); + if (err) + return err; + udelay(100); + err = b44_readphy(bp, MII_BMCR, &val); + if (!err) { + if (val & BMCR_RESET) { + netdev_err(bp->dev, "PHY Reset would not complete\n"); + err = -ENODEV; + } + } + + return err; +} + +static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags) +{ + u32 val; + + bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE); + bp->flags |= pause_flags; + + val = br32(bp, B44_RXCONFIG); + if (pause_flags & B44_FLAG_RX_PAUSE) + val |= RXCONFIG_FLOW; + else + val &= ~RXCONFIG_FLOW; + bw32(bp, B44_RXCONFIG, val); + + val = br32(bp, B44_MAC_FLOW); + if (pause_flags & B44_FLAG_TX_PAUSE) + val |= (MAC_FLOW_PAUSE_ENAB | + (0xc0 & MAC_FLOW_RX_HI_WATER)); + else + val &= ~MAC_FLOW_PAUSE_ENAB; + bw32(bp, B44_MAC_FLOW, val); +} + +static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote) +{ + u32 pause_enab = 0; + + /* The driver supports only rx pause by default because + the b44 mac tx pause mechanism generates excessive + pause frames. + Use ethtool to turn on b44 tx pause if necessary. + */ + if ((local & ADVERTISE_PAUSE_CAP) && + (local & ADVERTISE_PAUSE_ASYM)){ + if ((remote & LPA_PAUSE_ASYM) && + !(remote & LPA_PAUSE_CAP)) + pause_enab |= B44_FLAG_RX_PAUSE; + } + + __b44_set_flow_ctrl(bp, pause_enab); +} + +#ifdef CONFIG_BCM47XX +#include +static void b44_wap54g10_workaround(struct b44 *bp) +{ + char buf[20]; + u32 val; + int err; + + /* + * workaround for bad hardware design in Linksys WAP54G v1.0 + * see https://dev.openwrt.org/ticket/146 + * check and reset bit "isolate" + */ + if (nvram_getenv("boardnum", buf, sizeof(buf)) < 0) + return; + if (simple_strtoul(buf, NULL, 0) == 2) { + err = __b44_readphy(bp, 0, MII_BMCR, &val); + if (err) + goto error; + if (!(val & BMCR_ISOLATE)) + return; + val &= ~BMCR_ISOLATE; + err = __b44_writephy(bp, 0, MII_BMCR, val); + if (err) + goto error; + } + return; +error: + pr_warning("PHY: cannot reset MII transceiver isolate bit\n"); +} +#else +static inline void b44_wap54g10_workaround(struct b44 *bp) +{ +} +#endif + +static int b44_setup_phy(struct b44 *bp) +{ + u32 val; + int err; + + b44_wap54g10_workaround(bp); + + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) + return 0; + if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0) + goto out; + if ((err = b44_writephy(bp, B44_MII_ALEDCTRL, + val & MII_ALEDCTRL_ALLMSK)) != 0) + goto out; + if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0) + goto out; + if ((err = b44_writephy(bp, B44_MII_TLEDCTRL, + val | MII_TLEDCTRL_ENABLE)) != 0) + goto out; + + if (!(bp->flags & B44_FLAG_FORCE_LINK)) { + u32 adv = ADVERTISE_CSMA; + + if (bp->flags & B44_FLAG_ADV_10HALF) + adv |= ADVERTISE_10HALF; + if (bp->flags & B44_FLAG_ADV_10FULL) + adv |= ADVERTISE_10FULL; + if (bp->flags & B44_FLAG_ADV_100HALF) + adv |= ADVERTISE_100HALF; + if (bp->flags & B44_FLAG_ADV_100FULL) + adv |= ADVERTISE_100FULL; + + if (bp->flags & B44_FLAG_PAUSE_AUTO) + adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + + if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0) + goto out; + if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE | + BMCR_ANRESTART))) != 0) + goto out; + } else { + u32 bmcr; + + if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0) + goto out; + bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100); + if (bp->flags & B44_FLAG_100_BASE_T) + bmcr |= BMCR_SPEED100; + if (bp->flags & B44_FLAG_FULL_DUPLEX) + bmcr |= BMCR_FULLDPLX; + if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0) + goto out; + + /* Since we will not be negotiating there is no safe way + * to determine if the link partner supports flow control + * or not. So just disable it completely in this case. + */ + b44_set_flow_ctrl(bp, 0, 0); + } + +out: + return err; +} + +static void b44_stats_update(struct b44 *bp) +{ + unsigned long reg; + u32 *val; + + val = &bp->hw_stats.tx_good_octets; + for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) { + *val++ += br32(bp, reg); + } + + /* Pad */ + reg += 8*4UL; + + for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) { + *val++ += br32(bp, reg); + } +} + +static void b44_link_report(struct b44 *bp) +{ + if (!netif_carrier_ok(bp->dev)) { + netdev_info(bp->dev, "Link is down\n"); + } else { + netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n", + (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10, + (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half"); + + netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n", + (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off", + (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off"); + } +} + +static void b44_check_phy(struct b44 *bp) +{ + u32 bmsr, aux; + + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) { + bp->flags |= B44_FLAG_100_BASE_T; + bp->flags |= B44_FLAG_FULL_DUPLEX; + if (!netif_carrier_ok(bp->dev)) { + u32 val = br32(bp, B44_TX_CTRL); + val |= TX_CTRL_DUPLEX; + bw32(bp, B44_TX_CTRL, val); + netif_carrier_on(bp->dev); + b44_link_report(bp); + } + return; + } + + if (!b44_readphy(bp, MII_BMSR, &bmsr) && + !b44_readphy(bp, B44_MII_AUXCTRL, &aux) && + (bmsr != 0xffff)) { + if (aux & MII_AUXCTRL_SPEED) + bp->flags |= B44_FLAG_100_BASE_T; + else + bp->flags &= ~B44_FLAG_100_BASE_T; + if (aux & MII_AUXCTRL_DUPLEX) + bp->flags |= B44_FLAG_FULL_DUPLEX; + else + bp->flags &= ~B44_FLAG_FULL_DUPLEX; + + if (!netif_carrier_ok(bp->dev) && + (bmsr & BMSR_LSTATUS)) { + u32 val = br32(bp, B44_TX_CTRL); + u32 local_adv, remote_adv; + + if (bp->flags & B44_FLAG_FULL_DUPLEX) + val |= TX_CTRL_DUPLEX; + else + val &= ~TX_CTRL_DUPLEX; + bw32(bp, B44_TX_CTRL, val); + + if (!(bp->flags & B44_FLAG_FORCE_LINK) && + !b44_readphy(bp, MII_ADVERTISE, &local_adv) && + !b44_readphy(bp, MII_LPA, &remote_adv)) + b44_set_flow_ctrl(bp, local_adv, remote_adv); + + /* Link now up */ + netif_carrier_on(bp->dev); + b44_link_report(bp); + } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) { + /* Link now down */ + netif_carrier_off(bp->dev); + b44_link_report(bp); + } + + if (bmsr & BMSR_RFAULT) + netdev_warn(bp->dev, "Remote fault detected in PHY\n"); + if (bmsr & BMSR_JCD) + netdev_warn(bp->dev, "Jabber detected in PHY\n"); + } +} + +static void b44_timer(unsigned long __opaque) +{ + struct b44 *bp = (struct b44 *) __opaque; + + spin_lock_irq(&bp->lock); + + b44_check_phy(bp); + + b44_stats_update(bp); + + spin_unlock_irq(&bp->lock); + + mod_timer(&bp->timer, round_jiffies(jiffies + HZ)); +} + +static void b44_tx(struct b44 *bp) +{ + u32 cur, cons; + + cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK; + cur /= sizeof(struct dma_desc); + + /* XXX needs updating when NETIF_F_SG is supported */ + for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) { + struct ring_info *rp = &bp->tx_buffers[cons]; + struct sk_buff *skb = rp->skb; + + BUG_ON(skb == NULL); + + dma_unmap_single(bp->sdev->dma_dev, + rp->mapping, + skb->len, + DMA_TO_DEVICE); + rp->skb = NULL; + dev_kfree_skb(skb); + } + + bp->tx_cons = cons; + if (netif_queue_stopped(bp->dev) && + TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH) + netif_wake_queue(bp->dev); + + bw32(bp, B44_GPTIMER, 0); +} + +/* Works like this. This chip writes a 'struct rx_header" 30 bytes + * before the DMA address you give it. So we allocate 30 more bytes + * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then + * point the chip at 30 bytes past where the rx_header will go. + */ +static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) +{ + struct dma_desc *dp; + struct ring_info *src_map, *map; + struct rx_header *rh; + struct sk_buff *skb; + dma_addr_t mapping; + int dest_idx; + u32 ctrl; + + src_map = NULL; + if (src_idx >= 0) + src_map = &bp->rx_buffers[src_idx]; + dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1); + map = &bp->rx_buffers[dest_idx]; + skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ); + if (skb == NULL) + return -ENOMEM; + + mapping = dma_map_single(bp->sdev->dma_dev, skb->data, + RX_PKT_BUF_SZ, + DMA_FROM_DEVICE); + + /* Hardware bug work-around, the chip is unable to do PCI DMA + to/from anything above 1GB :-( */ + if (dma_mapping_error(bp->sdev->dma_dev, mapping) || + mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) { + /* Sigh... */ + if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) + dma_unmap_single(bp->sdev->dma_dev, mapping, + RX_PKT_BUF_SZ, DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); + if (skb == NULL) + return -ENOMEM; + mapping = dma_map_single(bp->sdev->dma_dev, skb->data, + RX_PKT_BUF_SZ, + DMA_FROM_DEVICE); + if (dma_mapping_error(bp->sdev->dma_dev, mapping) || + mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) { + if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) + dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + return -ENOMEM; + } + bp->force_copybreak = 1; + } + + rh = (struct rx_header *) skb->data; + + rh->len = 0; + rh->flags = 0; + + map->skb = skb; + map->mapping = mapping; + + if (src_map != NULL) + src_map->skb = NULL; + + ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ); + if (dest_idx == (B44_RX_RING_SIZE - 1)) + ctrl |= DESC_CTRL_EOT; + + dp = &bp->rx_ring[dest_idx]; + dp->ctrl = cpu_to_le32(ctrl); + dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset); + + if (bp->flags & B44_FLAG_RX_RING_HACK) + b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma, + dest_idx * sizeof(*dp), + DMA_BIDIRECTIONAL); + + return RX_PKT_BUF_SZ; +} + +static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) +{ + struct dma_desc *src_desc, *dest_desc; + struct ring_info *src_map, *dest_map; + struct rx_header *rh; + int dest_idx; + __le32 ctrl; + + dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1); + dest_desc = &bp->rx_ring[dest_idx]; + dest_map = &bp->rx_buffers[dest_idx]; + src_desc = &bp->rx_ring[src_idx]; + src_map = &bp->rx_buffers[src_idx]; + + dest_map->skb = src_map->skb; + rh = (struct rx_header *) src_map->skb->data; + rh->len = 0; + rh->flags = 0; + dest_map->mapping = src_map->mapping; + + if (bp->flags & B44_FLAG_RX_RING_HACK) + b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma, + src_idx * sizeof(*src_desc), + DMA_BIDIRECTIONAL); + + ctrl = src_desc->ctrl; + if (dest_idx == (B44_RX_RING_SIZE - 1)) + ctrl |= cpu_to_le32(DESC_CTRL_EOT); + else + ctrl &= cpu_to_le32(~DESC_CTRL_EOT); + + dest_desc->ctrl = ctrl; + dest_desc->addr = src_desc->addr; + + src_map->skb = NULL; + + if (bp->flags & B44_FLAG_RX_RING_HACK) + b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma, + dest_idx * sizeof(*dest_desc), + DMA_BIDIRECTIONAL); + + dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping, + RX_PKT_BUF_SZ, + DMA_FROM_DEVICE); +} + +static int b44_rx(struct b44 *bp, int budget) +{ + int received; + u32 cons, prod; + + received = 0; + prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK; + prod /= sizeof(struct dma_desc); + cons = bp->rx_cons; + + while (cons != prod && budget > 0) { + struct ring_info *rp = &bp->rx_buffers[cons]; + struct sk_buff *skb = rp->skb; + dma_addr_t map = rp->mapping; + struct rx_header *rh; + u16 len; + + dma_sync_single_for_cpu(bp->sdev->dma_dev, map, + RX_PKT_BUF_SZ, + DMA_FROM_DEVICE); + rh = (struct rx_header *) skb->data; + len = le16_to_cpu(rh->len); + if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) || + (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) { + drop_it: + b44_recycle_rx(bp, cons, bp->rx_prod); + drop_it_no_recycle: + bp->dev->stats.rx_dropped++; + goto next_pkt; + } + + if (len == 0) { + int i = 0; + + do { + udelay(2); + barrier(); + len = le16_to_cpu(rh->len); + } while (len == 0 && i++ < 5); + if (len == 0) + goto drop_it; + } + + /* Omit CRC. */ + len -= 4; + + if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) { + int skb_size; + skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod); + if (skb_size < 0) + goto drop_it; + dma_unmap_single(bp->sdev->dma_dev, map, + skb_size, DMA_FROM_DEVICE); + /* Leave out rx_header */ + skb_put(skb, len + RX_PKT_OFFSET); + skb_pull(skb, RX_PKT_OFFSET); + } else { + struct sk_buff *copy_skb; + + b44_recycle_rx(bp, cons, bp->rx_prod); + copy_skb = netdev_alloc_skb(bp->dev, len + 2); + if (copy_skb == NULL) + goto drop_it_no_recycle; + + skb_reserve(copy_skb, 2); + skb_put(copy_skb, len); + /* DMA sync done above, copy just the actual packet */ + skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET, + copy_skb->data, len); + skb = copy_skb; + } + skb_checksum_none_assert(skb); + skb->protocol = eth_type_trans(skb, bp->dev); + netif_receive_skb(skb); + received++; + budget--; + next_pkt: + bp->rx_prod = (bp->rx_prod + 1) & + (B44_RX_RING_SIZE - 1); + cons = (cons + 1) & (B44_RX_RING_SIZE - 1); + } + + bp->rx_cons = cons; + bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc)); + + return received; +} + +static int b44_poll(struct napi_struct *napi, int budget) +{ + struct b44 *bp = container_of(napi, struct b44, napi); + int work_done; + unsigned long flags; + + spin_lock_irqsave(&bp->lock, flags); + + if (bp->istat & (ISTAT_TX | ISTAT_TO)) { + /* spin_lock(&bp->tx_lock); */ + b44_tx(bp); + /* spin_unlock(&bp->tx_lock); */ + } + if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */ + bp->istat &= ~ISTAT_RFO; + b44_disable_ints(bp); + ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */ + b44_init_rings(bp); + b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY); + netif_wake_queue(bp->dev); + } + + spin_unlock_irqrestore(&bp->lock, flags); + + work_done = 0; + if (bp->istat & ISTAT_RX) + work_done += b44_rx(bp, budget); + + if (bp->istat & ISTAT_ERRORS) { + spin_lock_irqsave(&bp->lock, flags); + b44_halt(bp); + b44_init_rings(bp); + b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY); + netif_wake_queue(bp->dev); + spin_unlock_irqrestore(&bp->lock, flags); + work_done = 0; + } + + if (work_done < budget) { + napi_complete(napi); + b44_enable_ints(bp); + } + + return work_done; +} + +static irqreturn_t b44_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct b44 *bp = netdev_priv(dev); + u32 istat, imask; + int handled = 0; + + spin_lock(&bp->lock); + + istat = br32(bp, B44_ISTAT); + imask = br32(bp, B44_IMASK); + + /* The interrupt mask register controls which interrupt bits + * will actually raise an interrupt to the CPU when set by hw/firmware, + * but doesn't mask off the bits. + */ + istat &= imask; + if (istat) { + handled = 1; + + if (unlikely(!netif_running(dev))) { + netdev_info(dev, "late interrupt\n"); + goto irq_ack; + } + + if (napi_schedule_prep(&bp->napi)) { + /* NOTE: These writes are posted by the readback of + * the ISTAT register below. + */ + bp->istat = istat; + __b44_disable_ints(bp); + __napi_schedule(&bp->napi); + } + +irq_ack: + bw32(bp, B44_ISTAT, istat); + br32(bp, B44_ISTAT); + } + spin_unlock(&bp->lock); + return IRQ_RETVAL(handled); +} + +static void b44_tx_timeout(struct net_device *dev) +{ + struct b44 *bp = netdev_priv(dev); + + netdev_err(dev, "transmit timed out, resetting\n"); + + spin_lock_irq(&bp->lock); + + b44_halt(bp); + b44_init_rings(bp); + b44_init_hw(bp, B44_FULL_RESET); + + spin_unlock_irq(&bp->lock); + + b44_enable_ints(bp); + + netif_wake_queue(dev); +} + +static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct b44 *bp = netdev_priv(dev); + int rc = NETDEV_TX_OK; + dma_addr_t mapping; + u32 len, entry, ctrl; + unsigned long flags; + + len = skb->len; + spin_lock_irqsave(&bp->lock, flags); + + /* This is a hard error, log it. */ + if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) { + netif_stop_queue(dev); + netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); + goto err_out; + } + + mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) { + struct sk_buff *bounce_skb; + + /* Chip can't handle DMA to/from >1GB, use bounce buffer */ + if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) + dma_unmap_single(bp->sdev->dma_dev, mapping, len, + DMA_TO_DEVICE); + + bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA); + if (!bounce_skb) + goto err_out; + + mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data, + len, DMA_TO_DEVICE); + if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) { + if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) + dma_unmap_single(bp->sdev->dma_dev, mapping, + len, DMA_TO_DEVICE); + dev_kfree_skb_any(bounce_skb); + goto err_out; + } + + skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len); + dev_kfree_skb_any(skb); + skb = bounce_skb; + } + + entry = bp->tx_prod; + bp->tx_buffers[entry].skb = skb; + bp->tx_buffers[entry].mapping = mapping; + + ctrl = (len & DESC_CTRL_LEN); + ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF; + if (entry == (B44_TX_RING_SIZE - 1)) + ctrl |= DESC_CTRL_EOT; + + bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl); + bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset); + + if (bp->flags & B44_FLAG_TX_RING_HACK) + b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma, + entry * sizeof(bp->tx_ring[0]), + DMA_TO_DEVICE); + + entry = NEXT_TX(entry); + + bp->tx_prod = entry; + + wmb(); + + bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc)); + if (bp->flags & B44_FLAG_BUGGY_TXPTR) + bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc)); + if (bp->flags & B44_FLAG_REORDER_BUG) + br32(bp, B44_DMATX_PTR); + + if (TX_BUFFS_AVAIL(bp) < 1) + netif_stop_queue(dev); + +out_unlock: + spin_unlock_irqrestore(&bp->lock, flags); + + return rc; + +err_out: + rc = NETDEV_TX_BUSY; + goto out_unlock; +} + +static int b44_change_mtu(struct net_device *dev, int new_mtu) +{ + struct b44 *bp = netdev_priv(dev); + + if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU) + return -EINVAL; + + if (!netif_running(dev)) { + /* We'll just catch it later when the + * device is up'd. + */ + dev->mtu = new_mtu; + return 0; + } + + spin_lock_irq(&bp->lock); + b44_halt(bp); + dev->mtu = new_mtu; + b44_init_rings(bp); + b44_init_hw(bp, B44_FULL_RESET); + spin_unlock_irq(&bp->lock); + + b44_enable_ints(bp); + + return 0; +} + +/* Free up pending packets in all rx/tx rings. + * + * The chip has been shut down and the driver detached from + * the networking, so no interrupts or new tx packets will + * end up in the driver. bp->lock is not held and we are not + * in an interrupt context and thus may sleep. + */ +static void b44_free_rings(struct b44 *bp) +{ + struct ring_info *rp; + int i; + + for (i = 0; i < B44_RX_RING_SIZE; i++) { + rp = &bp->rx_buffers[i]; + + if (rp->skb == NULL) + continue; + dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ, + DMA_FROM_DEVICE); + dev_kfree_skb_any(rp->skb); + rp->skb = NULL; + } + + /* XXX needs changes once NETIF_F_SG is set... */ + for (i = 0; i < B44_TX_RING_SIZE; i++) { + rp = &bp->tx_buffers[i]; + + if (rp->skb == NULL) + continue; + dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len, + DMA_TO_DEVICE); + dev_kfree_skb_any(rp->skb); + rp->skb = NULL; + } +} + +/* Initialize tx/rx rings for packet processing. + * + * The chip has been shut down and the driver detached from + * the networking, so no interrupts or new tx packets will + * end up in the driver. + */ +static void b44_init_rings(struct b44 *bp) +{ + int i; + + b44_free_rings(bp); + + memset(bp->rx_ring, 0, B44_RX_RING_BYTES); + memset(bp->tx_ring, 0, B44_TX_RING_BYTES); + + if (bp->flags & B44_FLAG_RX_RING_HACK) + dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma, + DMA_TABLE_BYTES, DMA_BIDIRECTIONAL); + + if (bp->flags & B44_FLAG_TX_RING_HACK) + dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma, + DMA_TABLE_BYTES, DMA_TO_DEVICE); + + for (i = 0; i < bp->rx_pending; i++) { + if (b44_alloc_rx_skb(bp, -1, i) < 0) + break; + } +} + +/* + * Must not be invoked with interrupt sources disabled and + * the hardware shutdown down. + */ +static void b44_free_consistent(struct b44 *bp) +{ + kfree(bp->rx_buffers); + bp->rx_buffers = NULL; + kfree(bp->tx_buffers); + bp->tx_buffers = NULL; + if (bp->rx_ring) { + if (bp->flags & B44_FLAG_RX_RING_HACK) { + dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma, + DMA_TABLE_BYTES, DMA_BIDIRECTIONAL); + kfree(bp->rx_ring); + } else + dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES, + bp->rx_ring, bp->rx_ring_dma); + bp->rx_ring = NULL; + bp->flags &= ~B44_FLAG_RX_RING_HACK; + } + if (bp->tx_ring) { + if (bp->flags & B44_FLAG_TX_RING_HACK) { + dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma, + DMA_TABLE_BYTES, DMA_TO_DEVICE); + kfree(bp->tx_ring); + } else + dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES, + bp->tx_ring, bp->tx_ring_dma); + bp->tx_ring = NULL; + bp->flags &= ~B44_FLAG_TX_RING_HACK; + } +} + +/* + * Must not be invoked with interrupt sources disabled and + * the hardware shutdown down. Can sleep. + */ +static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) +{ + int size; + + size = B44_RX_RING_SIZE * sizeof(struct ring_info); + bp->rx_buffers = kzalloc(size, gfp); + if (!bp->rx_buffers) + goto out_err; + + size = B44_TX_RING_SIZE * sizeof(struct ring_info); + bp->tx_buffers = kzalloc(size, gfp); + if (!bp->tx_buffers) + goto out_err; + + size = DMA_TABLE_BYTES; + bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, + &bp->rx_ring_dma, gfp); + if (!bp->rx_ring) { + /* Allocation may have failed due to pci_alloc_consistent + insisting on use of GFP_DMA, which is more restrictive + than necessary... */ + struct dma_desc *rx_ring; + dma_addr_t rx_ring_dma; + + rx_ring = kzalloc(size, gfp); + if (!rx_ring) + goto out_err; + + rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring, + DMA_TABLE_BYTES, + DMA_BIDIRECTIONAL); + + if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) || + rx_ring_dma + size > DMA_BIT_MASK(30)) { + kfree(rx_ring); + goto out_err; + } + + bp->rx_ring = rx_ring; + bp->rx_ring_dma = rx_ring_dma; + bp->flags |= B44_FLAG_RX_RING_HACK; + } + + bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, + &bp->tx_ring_dma, gfp); + if (!bp->tx_ring) { + /* Allocation may have failed due to ssb_dma_alloc_consistent + insisting on use of GFP_DMA, which is more restrictive + than necessary... */ + struct dma_desc *tx_ring; + dma_addr_t tx_ring_dma; + + tx_ring = kzalloc(size, gfp); + if (!tx_ring) + goto out_err; + + tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring, + DMA_TABLE_BYTES, + DMA_TO_DEVICE); + + if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) || + tx_ring_dma + size > DMA_BIT_MASK(30)) { + kfree(tx_ring); + goto out_err; + } + + bp->tx_ring = tx_ring; + bp->tx_ring_dma = tx_ring_dma; + bp->flags |= B44_FLAG_TX_RING_HACK; + } + + return 0; + +out_err: + b44_free_consistent(bp); + return -ENOMEM; +} + +/* bp->lock is held. */ +static void b44_clear_stats(struct b44 *bp) +{ + unsigned long reg; + + bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ); + for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) + br32(bp, reg); + for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) + br32(bp, reg); +} + +/* bp->lock is held. */ +static void b44_chip_reset(struct b44 *bp, int reset_kind) +{ + struct ssb_device *sdev = bp->sdev; + bool was_enabled; + + was_enabled = ssb_device_is_enabled(bp->sdev); + + ssb_device_enable(bp->sdev, 0); + ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev); + + if (was_enabled) { + bw32(bp, B44_RCV_LAZY, 0); + bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE); + b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1); + bw32(bp, B44_DMATX_CTRL, 0); + bp->tx_prod = bp->tx_cons = 0; + if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) { + b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE, + 100, 0); + } + bw32(bp, B44_DMARX_CTRL, 0); + bp->rx_prod = bp->rx_cons = 0; + } + + b44_clear_stats(bp); + + /* + * Don't enable PHY if we are doing a partial reset + * we are probably going to power down + */ + if (reset_kind == B44_CHIP_RESET_PARTIAL) + return; + + switch (sdev->bus->bustype) { + case SSB_BUSTYPE_SSB: + bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE | + (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus), + B44_MDC_RATIO) + & MDIO_CTRL_MAXF_MASK))); + break; + case SSB_BUSTYPE_PCI: + bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE | + (0x0d & MDIO_CTRL_MAXF_MASK))); + break; + case SSB_BUSTYPE_PCMCIA: + case SSB_BUSTYPE_SDIO: + WARN_ON(1); /* A device with this bus does not exist. */ + break; + } + + br32(bp, B44_MDIO_CTRL); + + if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) { + bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL); + br32(bp, B44_ENET_CTRL); + bp->flags &= ~B44_FLAG_INTERNAL_PHY; + } else { + u32 val = br32(bp, B44_DEVCTRL); + + if (val & DEVCTRL_EPR) { + bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR)); + br32(bp, B44_DEVCTRL); + udelay(100); + } + bp->flags |= B44_FLAG_INTERNAL_PHY; + } +} + +/* bp->lock is held. */ +static void b44_halt(struct b44 *bp) +{ + b44_disable_ints(bp); + /* reset PHY */ + b44_phy_reset(bp); + /* power down PHY */ + netdev_info(bp->dev, "powering down PHY\n"); + bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN); + /* now reset the chip, but without enabling the MAC&PHY + * part of it. This has to be done _after_ we shut down the PHY */ + b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL); +} + +/* bp->lock is held. */ +static void __b44_set_mac_addr(struct b44 *bp) +{ + bw32(bp, B44_CAM_CTRL, 0); + if (!(bp->dev->flags & IFF_PROMISC)) { + u32 val; + + __b44_cam_write(bp, bp->dev->dev_addr, 0); + val = br32(bp, B44_CAM_CTRL); + bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE); + } +} + +static int b44_set_mac_addr(struct net_device *dev, void *p) +{ + struct b44 *bp = netdev_priv(dev); + struct sockaddr *addr = p; + u32 val; + + if (netif_running(dev)) + return -EBUSY; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + spin_lock_irq(&bp->lock); + + val = br32(bp, B44_RXCONFIG); + if (!(val & RXCONFIG_CAM_ABSENT)) + __b44_set_mac_addr(bp); + + spin_unlock_irq(&bp->lock); + + return 0; +} + +/* Called at device open time to get the chip ready for + * packet processing. Invoked with bp->lock held. + */ +static void __b44_set_rx_mode(struct net_device *); +static void b44_init_hw(struct b44 *bp, int reset_kind) +{ + u32 val; + + b44_chip_reset(bp, B44_CHIP_RESET_FULL); + if (reset_kind == B44_FULL_RESET) { + b44_phy_reset(bp); + b44_setup_phy(bp); + } + + /* Enable CRC32, set proper LED modes and power on PHY */ + bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL); + bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT)); + + /* This sets the MAC address too. */ + __b44_set_rx_mode(bp->dev); + + /* MTU + eth header + possible VLAN tag + struct rx_header */ + bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN); + bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN); + + bw32(bp, B44_TX_WMARK, 56); /* XXX magic */ + if (reset_kind == B44_PARTIAL_RESET) { + bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE | + (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT))); + } else { + bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE); + bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset); + bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE | + (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT))); + bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset); + + bw32(bp, B44_DMARX_PTR, bp->rx_pending); + bp->rx_prod = bp->rx_pending; + + bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ); + } + + val = br32(bp, B44_ENET_CTRL); + bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE)); +} + +static int b44_open(struct net_device *dev) +{ + struct b44 *bp = netdev_priv(dev); + int err; + + err = b44_alloc_consistent(bp, GFP_KERNEL); + if (err) + goto out; + + napi_enable(&bp->napi); + + b44_init_rings(bp); + b44_init_hw(bp, B44_FULL_RESET); + + b44_check_phy(bp); + + err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev); + if (unlikely(err < 0)) { + napi_disable(&bp->napi); + b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL); + b44_free_rings(bp); + b44_free_consistent(bp); + goto out; + } + + init_timer(&bp->timer); + bp->timer.expires = jiffies + HZ; + bp->timer.data = (unsigned long) bp; + bp->timer.function = b44_timer; + add_timer(&bp->timer); + + b44_enable_ints(bp); + netif_start_queue(dev); +out: + return err; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling receive - used by netconsole and other diagnostic tools + * to allow network i/o with interrupts disabled. + */ +static void b44_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + b44_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset) +{ + u32 i; + u32 *pattern = (u32 *) pp; + + for (i = 0; i < bytes; i += sizeof(u32)) { + bw32(bp, B44_FILT_ADDR, table_offset + i); + bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]); + } +} + +static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset) +{ + int magicsync = 6; + int k, j, len = offset; + int ethaddr_bytes = ETH_ALEN; + + memset(ppattern + offset, 0xff, magicsync); + for (j = 0; j < magicsync; j++) + set_bit(len++, (unsigned long *) pmask); + + for (j = 0; j < B44_MAX_PATTERNS; j++) { + if ((B44_PATTERN_SIZE - len) >= ETH_ALEN) + ethaddr_bytes = ETH_ALEN; + else + ethaddr_bytes = B44_PATTERN_SIZE - len; + if (ethaddr_bytes <=0) + break; + for (k = 0; k< ethaddr_bytes; k++) { + ppattern[offset + magicsync + + (j * ETH_ALEN) + k] = macaddr[k]; + set_bit(len++, (unsigned long *) pmask); + } + } + return len - 1; +} + +/* Setup magic packet patterns in the b44 WOL + * pattern matching filter. + */ +static void b44_setup_pseudo_magicp(struct b44 *bp) +{ + + u32 val; + int plen0, plen1, plen2; + u8 *pwol_pattern; + u8 pwol_mask[B44_PMASK_SIZE]; + + pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL); + if (!pwol_pattern) { + pr_err("Memory not available for WOL\n"); + return; + } + + /* Ipv4 magic packet pattern - pattern 0.*/ + memset(pwol_mask, 0, B44_PMASK_SIZE); + plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask, + B44_ETHIPV4UDP_HLEN); + + bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE); + bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE); + + /* Raw ethernet II magic packet pattern - pattern 1 */ + memset(pwol_pattern, 0, B44_PATTERN_SIZE); + memset(pwol_mask, 0, B44_PMASK_SIZE); + plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask, + ETH_HLEN); + + bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, + B44_PATTERN_BASE + B44_PATTERN_SIZE); + bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, + B44_PMASK_BASE + B44_PMASK_SIZE); + + /* Ipv6 magic packet pattern - pattern 2 */ + memset(pwol_pattern, 0, B44_PATTERN_SIZE); + memset(pwol_mask, 0, B44_PMASK_SIZE); + plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask, + B44_ETHIPV6UDP_HLEN); + + bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, + B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE); + bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, + B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE); + + kfree(pwol_pattern); + + /* set these pattern's lengths: one less than each real length */ + val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE; + bw32(bp, B44_WKUP_LEN, val); + + /* enable wakeup pattern matching */ + val = br32(bp, B44_DEVCTRL); + bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE); + +} + +#ifdef CONFIG_B44_PCI +static void b44_setup_wol_pci(struct b44 *bp) +{ + u16 val; + + if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) { + bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE); + pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val); + pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE); + } +} +#else +static inline void b44_setup_wol_pci(struct b44 *bp) { } +#endif /* CONFIG_B44_PCI */ + +static void b44_setup_wol(struct b44 *bp) +{ + u32 val; + + bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI); + + if (bp->flags & B44_FLAG_B0_ANDLATER) { + + bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE); + + val = bp->dev->dev_addr[2] << 24 | + bp->dev->dev_addr[3] << 16 | + bp->dev->dev_addr[4] << 8 | + bp->dev->dev_addr[5]; + bw32(bp, B44_ADDR_LO, val); + + val = bp->dev->dev_addr[0] << 8 | + bp->dev->dev_addr[1]; + bw32(bp, B44_ADDR_HI, val); + + val = br32(bp, B44_DEVCTRL); + bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE); + + } else { + b44_setup_pseudo_magicp(bp); + } + b44_setup_wol_pci(bp); +} + +static int b44_close(struct net_device *dev) +{ + struct b44 *bp = netdev_priv(dev); + + netif_stop_queue(dev); + + napi_disable(&bp->napi); + + del_timer_sync(&bp->timer); + + spin_lock_irq(&bp->lock); + + b44_halt(bp); + b44_free_rings(bp); + netif_carrier_off(dev); + + spin_unlock_irq(&bp->lock); + + free_irq(dev->irq, dev); + + if (bp->flags & B44_FLAG_WOL_ENABLE) { + b44_init_hw(bp, B44_PARTIAL_RESET); + b44_setup_wol(bp); + } + + b44_free_consistent(bp); + + return 0; +} + +static struct net_device_stats *b44_get_stats(struct net_device *dev) +{ + struct b44 *bp = netdev_priv(dev); + struct net_device_stats *nstat = &dev->stats; + struct b44_hw_stats *hwstat = &bp->hw_stats; + + /* Convert HW stats into netdevice stats. */ + nstat->rx_packets = hwstat->rx_pkts; + nstat->tx_packets = hwstat->tx_pkts; + nstat->rx_bytes = hwstat->rx_octets; + nstat->tx_bytes = hwstat->tx_octets; + nstat->tx_errors = (hwstat->tx_jabber_pkts + + hwstat->tx_oversize_pkts + + hwstat->tx_underruns + + hwstat->tx_excessive_cols + + hwstat->tx_late_cols); + nstat->multicast = hwstat->tx_multicast_pkts; + nstat->collisions = hwstat->tx_total_cols; + + nstat->rx_length_errors = (hwstat->rx_oversize_pkts + + hwstat->rx_undersize); + nstat->rx_over_errors = hwstat->rx_missed_pkts; + nstat->rx_frame_errors = hwstat->rx_align_errs; + nstat->rx_crc_errors = hwstat->rx_crc_errs; + nstat->rx_errors = (hwstat->rx_jabber_pkts + + hwstat->rx_oversize_pkts + + hwstat->rx_missed_pkts + + hwstat->rx_crc_align_errs + + hwstat->rx_undersize + + hwstat->rx_crc_errs + + hwstat->rx_align_errs + + hwstat->rx_symbol_errs); + + nstat->tx_aborted_errors = hwstat->tx_underruns; +#if 0 + /* Carrier lost counter seems to be broken for some devices */ + nstat->tx_carrier_errors = hwstat->tx_carrier_lost; +#endif + + return nstat; +} + +static int __b44_load_mcast(struct b44 *bp, struct net_device *dev) +{ + struct netdev_hw_addr *ha; + int i, num_ents; + + num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE); + i = 0; + netdev_for_each_mc_addr(ha, dev) { + if (i == num_ents) + break; + __b44_cam_write(bp, ha->addr, i++ + 1); + } + return i+1; +} + +static void __b44_set_rx_mode(struct net_device *dev) +{ + struct b44 *bp = netdev_priv(dev); + u32 val; + + val = br32(bp, B44_RXCONFIG); + val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI); + if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) { + val |= RXCONFIG_PROMISC; + bw32(bp, B44_RXCONFIG, val); + } else { + unsigned char zero[6] = {0, 0, 0, 0, 0, 0}; + int i = 1; + + __b44_set_mac_addr(bp); + + if ((dev->flags & IFF_ALLMULTI) || + (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE)) + val |= RXCONFIG_ALLMULTI; + else + i = __b44_load_mcast(bp, dev); + + for (; i < 64; i++) + __b44_cam_write(bp, zero, i); + + bw32(bp, B44_RXCONFIG, val); + val = br32(bp, B44_CAM_CTRL); + bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE); + } +} + +static void b44_set_rx_mode(struct net_device *dev) +{ + struct b44 *bp = netdev_priv(dev); + + spin_lock_irq(&bp->lock); + __b44_set_rx_mode(dev); + spin_unlock_irq(&bp->lock); +} + +static u32 b44_get_msglevel(struct net_device *dev) +{ + struct b44 *bp = netdev_priv(dev); + return bp->msg_enable; +} + +static void b44_set_msglevel(struct net_device *dev, u32 value) +{ + struct b44 *bp = netdev_priv(dev); + bp->msg_enable = value; +} + +static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct b44 *bp = netdev_priv(dev); + struct ssb_bus *bus = bp->sdev->bus; + + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); + switch (bus->bustype) { + case SSB_BUSTYPE_PCI: + strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info)); + break; + case SSB_BUSTYPE_SSB: + strlcpy(info->bus_info, "SSB", sizeof(info->bus_info)); + break; + case SSB_BUSTYPE_PCMCIA: + case SSB_BUSTYPE_SDIO: + WARN_ON(1); /* A device with this bus does not exist. */ + break; + } +} + +static int b44_nway_reset(struct net_device *dev) +{ + struct b44 *bp = netdev_priv(dev); + u32 bmcr; + int r; + + spin_lock_irq(&bp->lock); + b44_readphy(bp, MII_BMCR, &bmcr); + b44_readphy(bp, MII_BMCR, &bmcr); + r = -EINVAL; + if (bmcr & BMCR_ANENABLE) { + b44_writephy(bp, MII_BMCR, + bmcr | BMCR_ANRESTART); + r = 0; + } + spin_unlock_irq(&bp->lock); + + return r; +} + +static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct b44 *bp = netdev_priv(dev); + + cmd->supported = (SUPPORTED_Autoneg); + cmd->supported |= (SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_MII); + + cmd->advertising = 0; + if (bp->flags & B44_FLAG_ADV_10HALF) + cmd->advertising |= ADVERTISED_10baseT_Half; + if (bp->flags & B44_FLAG_ADV_10FULL) + cmd->advertising |= ADVERTISED_10baseT_Full; + if (bp->flags & B44_FLAG_ADV_100HALF) + cmd->advertising |= ADVERTISED_100baseT_Half; + if (bp->flags & B44_FLAG_ADV_100FULL) + cmd->advertising |= ADVERTISED_100baseT_Full; + cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; + ethtool_cmd_speed_set(cmd, ((bp->flags & B44_FLAG_100_BASE_T) ? + SPEED_100 : SPEED_10)); + cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ? + DUPLEX_FULL : DUPLEX_HALF; + cmd->port = 0; + cmd->phy_address = bp->phy_addr; + cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ? + XCVR_INTERNAL : XCVR_EXTERNAL; + cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ? + AUTONEG_DISABLE : AUTONEG_ENABLE; + if (cmd->autoneg == AUTONEG_ENABLE) + cmd->advertising |= ADVERTISED_Autoneg; + if (!netif_running(dev)){ + ethtool_cmd_speed_set(cmd, 0); + cmd->duplex = 0xff; + } + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + return 0; +} + +static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct b44 *bp = netdev_priv(dev); + u32 speed = ethtool_cmd_speed(cmd); + + /* We do not support gigabit. */ + if (cmd->autoneg == AUTONEG_ENABLE) { + if (cmd->advertising & + (ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full)) + return -EINVAL; + } else if ((speed != SPEED_100 && + speed != SPEED_10) || + (cmd->duplex != DUPLEX_HALF && + cmd->duplex != DUPLEX_FULL)) { + return -EINVAL; + } + + spin_lock_irq(&bp->lock); + + if (cmd->autoneg == AUTONEG_ENABLE) { + bp->flags &= ~(B44_FLAG_FORCE_LINK | + B44_FLAG_100_BASE_T | + B44_FLAG_FULL_DUPLEX | + B44_FLAG_ADV_10HALF | + B44_FLAG_ADV_10FULL | + B44_FLAG_ADV_100HALF | + B44_FLAG_ADV_100FULL); + if (cmd->advertising == 0) { + bp->flags |= (B44_FLAG_ADV_10HALF | + B44_FLAG_ADV_10FULL | + B44_FLAG_ADV_100HALF | + B44_FLAG_ADV_100FULL); + } else { + if (cmd->advertising & ADVERTISED_10baseT_Half) + bp->flags |= B44_FLAG_ADV_10HALF; + if (cmd->advertising & ADVERTISED_10baseT_Full) + bp->flags |= B44_FLAG_ADV_10FULL; + if (cmd->advertising & ADVERTISED_100baseT_Half) + bp->flags |= B44_FLAG_ADV_100HALF; + if (cmd->advertising & ADVERTISED_100baseT_Full) + bp->flags |= B44_FLAG_ADV_100FULL; + } + } else { + bp->flags |= B44_FLAG_FORCE_LINK; + bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX); + if (speed == SPEED_100) + bp->flags |= B44_FLAG_100_BASE_T; + if (cmd->duplex == DUPLEX_FULL) + bp->flags |= B44_FLAG_FULL_DUPLEX; + } + + if (netif_running(dev)) + b44_setup_phy(bp); + + spin_unlock_irq(&bp->lock); + + return 0; +} + +static void b44_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct b44 *bp = netdev_priv(dev); + + ering->rx_max_pending = B44_RX_RING_SIZE - 1; + ering->rx_pending = bp->rx_pending; + + /* XXX ethtool lacks a tx_max_pending, oops... */ +} + +static int b44_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct b44 *bp = netdev_priv(dev); + + if ((ering->rx_pending > B44_RX_RING_SIZE - 1) || + (ering->rx_mini_pending != 0) || + (ering->rx_jumbo_pending != 0) || + (ering->tx_pending > B44_TX_RING_SIZE - 1)) + return -EINVAL; + + spin_lock_irq(&bp->lock); + + bp->rx_pending = ering->rx_pending; + bp->tx_pending = ering->tx_pending; + + b44_halt(bp); + b44_init_rings(bp); + b44_init_hw(bp, B44_FULL_RESET); + netif_wake_queue(bp->dev); + spin_unlock_irq(&bp->lock); + + b44_enable_ints(bp); + + return 0; +} + +static void b44_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct b44 *bp = netdev_priv(dev); + + epause->autoneg = + (bp->flags & B44_FLAG_PAUSE_AUTO) != 0; + epause->rx_pause = + (bp->flags & B44_FLAG_RX_PAUSE) != 0; + epause->tx_pause = + (bp->flags & B44_FLAG_TX_PAUSE) != 0; +} + +static int b44_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct b44 *bp = netdev_priv(dev); + + spin_lock_irq(&bp->lock); + if (epause->autoneg) + bp->flags |= B44_FLAG_PAUSE_AUTO; + else + bp->flags &= ~B44_FLAG_PAUSE_AUTO; + if (epause->rx_pause) + bp->flags |= B44_FLAG_RX_PAUSE; + else + bp->flags &= ~B44_FLAG_RX_PAUSE; + if (epause->tx_pause) + bp->flags |= B44_FLAG_TX_PAUSE; + else + bp->flags &= ~B44_FLAG_TX_PAUSE; + if (bp->flags & B44_FLAG_PAUSE_AUTO) { + b44_halt(bp); + b44_init_rings(bp); + b44_init_hw(bp, B44_FULL_RESET); + } else { + __b44_set_flow_ctrl(bp, bp->flags); + } + spin_unlock_irq(&bp->lock); + + b44_enable_ints(bp); + + return 0; +} + +static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + switch(stringset) { + case ETH_SS_STATS: + memcpy(data, *b44_gstrings, sizeof(b44_gstrings)); + break; + } +} + +static int b44_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(b44_gstrings); + default: + return -EOPNOTSUPP; + } +} + +static void b44_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct b44 *bp = netdev_priv(dev); + u32 *val = &bp->hw_stats.tx_good_octets; + u32 i; + + spin_lock_irq(&bp->lock); + + b44_stats_update(bp); + + for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++) + *data++ = *val++; + + spin_unlock_irq(&bp->lock); +} + +static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct b44 *bp = netdev_priv(dev); + + wol->supported = WAKE_MAGIC; + if (bp->flags & B44_FLAG_WOL_ENABLE) + wol->wolopts = WAKE_MAGIC; + else + wol->wolopts = 0; + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + +static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct b44 *bp = netdev_priv(dev); + + spin_lock_irq(&bp->lock); + if (wol->wolopts & WAKE_MAGIC) + bp->flags |= B44_FLAG_WOL_ENABLE; + else + bp->flags &= ~B44_FLAG_WOL_ENABLE; + spin_unlock_irq(&bp->lock); + + return 0; +} + +static const struct ethtool_ops b44_ethtool_ops = { + .get_drvinfo = b44_get_drvinfo, + .get_settings = b44_get_settings, + .set_settings = b44_set_settings, + .nway_reset = b44_nway_reset, + .get_link = ethtool_op_get_link, + .get_wol = b44_get_wol, + .set_wol = b44_set_wol, + .get_ringparam = b44_get_ringparam, + .set_ringparam = b44_set_ringparam, + .get_pauseparam = b44_get_pauseparam, + .set_pauseparam = b44_set_pauseparam, + .get_msglevel = b44_get_msglevel, + .set_msglevel = b44_set_msglevel, + .get_strings = b44_get_strings, + .get_sset_count = b44_get_sset_count, + .get_ethtool_stats = b44_get_ethtool_stats, +}; + +static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mii_ioctl_data *data = if_mii(ifr); + struct b44 *bp = netdev_priv(dev); + int err = -EINVAL; + + if (!netif_running(dev)) + goto out; + + spin_lock_irq(&bp->lock); + err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL); + spin_unlock_irq(&bp->lock); +out: + return err; +} + +static int __devinit b44_get_invariants(struct b44 *bp) +{ + struct ssb_device *sdev = bp->sdev; + int err = 0; + u8 *addr; + + bp->dma_offset = ssb_dma_translation(sdev); + + if (sdev->bus->bustype == SSB_BUSTYPE_SSB && + instance > 1) { + addr = sdev->bus->sprom.et1mac; + bp->phy_addr = sdev->bus->sprom.et1phyaddr; + } else { + addr = sdev->bus->sprom.et0mac; + bp->phy_addr = sdev->bus->sprom.et0phyaddr; + } + /* Some ROMs have buggy PHY addresses with the high + * bits set (sign extension?). Truncate them to a + * valid PHY address. */ + bp->phy_addr &= 0x1F; + + memcpy(bp->dev->dev_addr, addr, 6); + + if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){ + pr_err("Invalid MAC address found in EEPROM\n"); + return -EINVAL; + } + + memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len); + + bp->imask = IMASK_DEF; + + /* XXX - really required? + bp->flags |= B44_FLAG_BUGGY_TXPTR; + */ + + if (bp->sdev->id.revision >= 7) + bp->flags |= B44_FLAG_B0_ANDLATER; + + return err; +} + +static const struct net_device_ops b44_netdev_ops = { + .ndo_open = b44_open, + .ndo_stop = b44_close, + .ndo_start_xmit = b44_start_xmit, + .ndo_get_stats = b44_get_stats, + .ndo_set_multicast_list = b44_set_rx_mode, + .ndo_set_mac_address = b44_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = b44_ioctl, + .ndo_tx_timeout = b44_tx_timeout, + .ndo_change_mtu = b44_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = b44_poll_controller, +#endif +}; + +static int __devinit b44_init_one(struct ssb_device *sdev, + const struct ssb_device_id *ent) +{ + struct net_device *dev; + struct b44 *bp; + int err; + + instance++; + + pr_info_once("%s version %s\n", DRV_DESCRIPTION, DRV_MODULE_VERSION); + + dev = alloc_etherdev(sizeof(*bp)); + if (!dev) { + dev_err(sdev->dev, "Etherdev alloc failed, aborting\n"); + err = -ENOMEM; + goto out; + } + + SET_NETDEV_DEV(dev, sdev->dev); + + /* No interesting netdevice features in this card... */ + dev->features |= 0; + + bp = netdev_priv(dev); + bp->sdev = sdev; + bp->dev = dev; + bp->force_copybreak = 0; + + bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE); + + spin_lock_init(&bp->lock); + + bp->rx_pending = B44_DEF_RX_RING_PENDING; + bp->tx_pending = B44_DEF_TX_RING_PENDING; + + dev->netdev_ops = &b44_netdev_ops; + netif_napi_add(dev, &bp->napi, b44_poll, 64); + dev->watchdog_timeo = B44_TX_TIMEOUT; + dev->irq = sdev->irq; + SET_ETHTOOL_OPS(dev, &b44_ethtool_ops); + + err = ssb_bus_powerup(sdev->bus, 0); + if (err) { + dev_err(sdev->dev, + "Failed to powerup the bus\n"); + goto err_out_free_dev; + } + + if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) || + dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) { + dev_err(sdev->dev, + "Required 30BIT DMA mask unsupported by the system\n"); + goto err_out_powerdown; + } + + err = b44_get_invariants(bp); + if (err) { + dev_err(sdev->dev, + "Problem fetching invariants of chip, aborting\n"); + goto err_out_powerdown; + } + + bp->mii_if.dev = dev; + bp->mii_if.mdio_read = b44_mii_read; + bp->mii_if.mdio_write = b44_mii_write; + bp->mii_if.phy_id = bp->phy_addr; + bp->mii_if.phy_id_mask = 0x1f; + bp->mii_if.reg_num_mask = 0x1f; + + /* By default, advertise all speed/duplex settings. */ + bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL | + B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL); + + /* By default, auto-negotiate PAUSE. */ + bp->flags |= B44_FLAG_PAUSE_AUTO; + + err = register_netdev(dev); + if (err) { + dev_err(sdev->dev, "Cannot register net device, aborting\n"); + goto err_out_powerdown; + } + + netif_carrier_off(dev); + + ssb_set_drvdata(sdev, dev); + + /* Chip reset provides power to the b44 MAC & PCI cores, which + * is necessary for MAC register access. + */ + b44_chip_reset(bp, B44_CHIP_RESET_FULL); + + /* do a phy reset to test if there is an active phy */ + if (b44_phy_reset(bp) < 0) + bp->phy_addr = B44_PHY_ADDR_NO_PHY; + + netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr); + + return 0; + +err_out_powerdown: + ssb_bus_may_powerdown(sdev->bus); + +err_out_free_dev: + free_netdev(dev); + +out: + return err; +} + +static void __devexit b44_remove_one(struct ssb_device *sdev) +{ + struct net_device *dev = ssb_get_drvdata(sdev); + + unregister_netdev(dev); + ssb_device_disable(sdev, 0); + ssb_bus_may_powerdown(sdev->bus); + free_netdev(dev); + ssb_pcihost_set_power_state(sdev, PCI_D3hot); + ssb_set_drvdata(sdev, NULL); +} + +static int b44_suspend(struct ssb_device *sdev, pm_message_t state) +{ + struct net_device *dev = ssb_get_drvdata(sdev); + struct b44 *bp = netdev_priv(dev); + + if (!netif_running(dev)) + return 0; + + del_timer_sync(&bp->timer); + + spin_lock_irq(&bp->lock); + + b44_halt(bp); + netif_carrier_off(bp->dev); + netif_device_detach(bp->dev); + b44_free_rings(bp); + + spin_unlock_irq(&bp->lock); + + free_irq(dev->irq, dev); + if (bp->flags & B44_FLAG_WOL_ENABLE) { + b44_init_hw(bp, B44_PARTIAL_RESET); + b44_setup_wol(bp); + } + + ssb_pcihost_set_power_state(sdev, PCI_D3hot); + return 0; +} + +static int b44_resume(struct ssb_device *sdev) +{ + struct net_device *dev = ssb_get_drvdata(sdev); + struct b44 *bp = netdev_priv(dev); + int rc = 0; + + rc = ssb_bus_powerup(sdev->bus, 0); + if (rc) { + dev_err(sdev->dev, + "Failed to powerup the bus\n"); + return rc; + } + + if (!netif_running(dev)) + return 0; + + spin_lock_irq(&bp->lock); + b44_init_rings(bp); + b44_init_hw(bp, B44_FULL_RESET); + spin_unlock_irq(&bp->lock); + + /* + * As a shared interrupt, the handler can be called immediately. To be + * able to check the interrupt status the hardware must already be + * powered back on (b44_init_hw). + */ + rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev); + if (rc) { + netdev_err(dev, "request_irq failed\n"); + spin_lock_irq(&bp->lock); + b44_halt(bp); + b44_free_rings(bp); + spin_unlock_irq(&bp->lock); + return rc; + } + + netif_device_attach(bp->dev); + + b44_enable_ints(bp); + netif_wake_queue(dev); + + mod_timer(&bp->timer, jiffies + 1); + + return 0; +} + +static struct ssb_driver b44_ssb_driver = { + .name = DRV_MODULE_NAME, + .id_table = b44_ssb_tbl, + .probe = b44_init_one, + .remove = __devexit_p(b44_remove_one), + .suspend = b44_suspend, + .resume = b44_resume, +}; + +static inline int __init b44_pci_init(void) +{ + int err = 0; +#ifdef CONFIG_B44_PCI + err = ssb_pcihost_register(&b44_pci_driver); +#endif + return err; +} + +static inline void __exit b44_pci_exit(void) +{ +#ifdef CONFIG_B44_PCI + ssb_pcihost_unregister(&b44_pci_driver); +#endif +} + +static int __init b44_init(void) +{ + unsigned int dma_desc_align_size = dma_get_cache_alignment(); + int err; + + /* Setup paramaters for syncing RX/TX DMA descriptors */ + dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc)); + + err = b44_pci_init(); + if (err) + return err; + err = ssb_driver_register(&b44_ssb_driver); + if (err) + b44_pci_exit(); + return err; +} + +static void __exit b44_cleanup(void) +{ + ssb_driver_unregister(&b44_ssb_driver); + b44_pci_exit(); +} + +module_init(b44_init); +module_exit(b44_cleanup); + diff --git a/drivers/net/ethernet/broadcom/b44.h b/drivers/net/ethernet/broadcom/b44.h new file mode 100644 index 0000000..e1905a4 --- /dev/null +++ b/drivers/net/ethernet/broadcom/b44.h @@ -0,0 +1,401 @@ +#ifndef _B44_H +#define _B44_H + +/* Register layout. (These correspond to struct _bcmenettregs in bcm4400.) */ +#define B44_DEVCTRL 0x0000UL /* Device Control */ +#define DEVCTRL_MPM 0x00000040 /* Magic Packet PME Enable (B0 only) */ +#define DEVCTRL_PFE 0x00000080 /* Pattern Filtering Enable */ +#define DEVCTRL_IPP 0x00000400 /* Internal EPHY Present */ +#define DEVCTRL_EPR 0x00008000 /* EPHY Reset */ +#define DEVCTRL_PME 0x00001000 /* PHY Mode Enable */ +#define DEVCTRL_PMCE 0x00002000 /* PHY Mode Clocks Enable */ +#define DEVCTRL_PADDR 0x0007c000 /* PHY Address */ +#define DEVCTRL_PADDR_SHIFT 18 +#define B44_BIST_STAT 0x000CUL /* Built-In Self-Test Status */ +#define B44_WKUP_LEN 0x0010UL /* Wakeup Length */ +#define WKUP_LEN_P0_MASK 0x0000007f /* Pattern 0 */ +#define WKUP_LEN_D0 0x00000080 +#define WKUP_LEN_P1_MASK 0x00007f00 /* Pattern 1 */ +#define WKUP_LEN_P1_SHIFT 8 +#define WKUP_LEN_D1 0x00008000 +#define WKUP_LEN_P2_MASK 0x007f0000 /* Pattern 2 */ +#define WKUP_LEN_P2_SHIFT 16 +#define WKUP_LEN_D2 0x00000000 +#define WKUP_LEN_P3_MASK 0x7f000000 /* Pattern 3 */ +#define WKUP_LEN_P3_SHIFT 24 +#define WKUP_LEN_D3 0x80000000 +#define WKUP_LEN_DISABLE 0x80808080 +#define WKUP_LEN_ENABLE_TWO 0x80800000 +#define WKUP_LEN_ENABLE_THREE 0x80000000 +#define B44_ISTAT 0x0020UL /* Interrupt Status */ +#define ISTAT_LS 0x00000020 /* Link Change (B0 only) */ +#define ISTAT_PME 0x00000040 /* Power Management Event */ +#define ISTAT_TO 0x00000080 /* General Purpose Timeout */ +#define ISTAT_DSCE 0x00000400 /* Descriptor Error */ +#define ISTAT_DATAE 0x00000800 /* Data Error */ +#define ISTAT_DPE 0x00001000 /* Descr. Protocol Error */ +#define ISTAT_RDU 0x00002000 /* Receive Descr. Underflow */ +#define ISTAT_RFO 0x00004000 /* Receive FIFO Overflow */ +#define ISTAT_TFU 0x00008000 /* Transmit FIFO Underflow */ +#define ISTAT_RX 0x00010000 /* RX Interrupt */ +#define ISTAT_TX 0x01000000 /* TX Interrupt */ +#define ISTAT_EMAC 0x04000000 /* EMAC Interrupt */ +#define ISTAT_MII_WRITE 0x08000000 /* MII Write Interrupt */ +#define ISTAT_MII_READ 0x10000000 /* MII Read Interrupt */ +#define ISTAT_ERRORS (ISTAT_DSCE|ISTAT_DATAE|ISTAT_DPE|ISTAT_RDU|ISTAT_RFO|ISTAT_TFU) +#define B44_IMASK 0x0024UL /* Interrupt Mask */ +#define IMASK_DEF (ISTAT_ERRORS | ISTAT_TO | ISTAT_RX | ISTAT_TX) +#define B44_GPTIMER 0x0028UL /* General Purpose Timer */ +#define B44_ADDR_LO 0x0088UL /* ENET Address Lo (B0 only) */ +#define B44_ADDR_HI 0x008CUL /* ENET Address Hi (B0 only) */ +#define B44_FILT_ADDR 0x0090UL /* ENET Filter Address */ +#define B44_FILT_DATA 0x0094UL /* ENET Filter Data */ +#define B44_TXBURST 0x00A0UL /* TX Max Burst Length */ +#define B44_RXBURST 0x00A4UL /* RX Max Burst Length */ +#define B44_MAC_CTRL 0x00A8UL /* MAC Control */ +#define MAC_CTRL_CRC32_ENAB 0x00000001 /* CRC32 Generation Enable */ +#define MAC_CTRL_PHY_PDOWN 0x00000004 /* Onchip EPHY Powerdown */ +#define MAC_CTRL_PHY_EDET 0x00000008 /* Onchip EPHY Energy Detected */ +#define MAC_CTRL_PHY_LEDCTRL 0x000000e0 /* Onchip EPHY LED Control */ +#define MAC_CTRL_PHY_LEDCTRL_SHIFT 5 +#define B44_MAC_FLOW 0x00ACUL /* MAC Flow Control */ +#define MAC_FLOW_RX_HI_WATER 0x000000ff /* Receive FIFO HI Water Mark */ +#define MAC_FLOW_PAUSE_ENAB 0x00008000 /* Enable Pause Frame Generation */ +#define B44_RCV_LAZY 0x0100UL /* Lazy Interrupt Control */ +#define RCV_LAZY_TO_MASK 0x00ffffff /* Timeout */ +#define RCV_LAZY_FC_MASK 0xff000000 /* Frame Count */ +#define RCV_LAZY_FC_SHIFT 24 +#define B44_DMATX_CTRL 0x0200UL /* DMA TX Control */ +#define DMATX_CTRL_ENABLE 0x00000001 /* Enable */ +#define DMATX_CTRL_SUSPEND 0x00000002 /* Suepend Request */ +#define DMATX_CTRL_LPBACK 0x00000004 /* Loopback Enable */ +#define DMATX_CTRL_FAIRPRIOR 0x00000008 /* Fair Priority */ +#define DMATX_CTRL_FLUSH 0x00000010 /* Flush Request */ +#define B44_DMATX_ADDR 0x0204UL /* DMA TX Descriptor Ring Address */ +#define B44_DMATX_PTR 0x0208UL /* DMA TX Last Posted Descriptor */ +#define B44_DMATX_STAT 0x020CUL /* DMA TX Current Active Desc. + Status */ +#define DMATX_STAT_CDMASK 0x00000fff /* Current Descriptor Mask */ +#define DMATX_STAT_SMASK 0x0000f000 /* State Mask */ +#define DMATX_STAT_SDISABLED 0x00000000 /* State Disabled */ +#define DMATX_STAT_SACTIVE 0x00001000 /* State Active */ +#define DMATX_STAT_SIDLE 0x00002000 /* State Idle Wait */ +#define DMATX_STAT_SSTOPPED 0x00003000 /* State Stopped */ +#define DMATX_STAT_SSUSP 0x00004000 /* State Suspend Pending */ +#define DMATX_STAT_EMASK 0x000f0000 /* Error Mask */ +#define DMATX_STAT_ENONE 0x00000000 /* Error None */ +#define DMATX_STAT_EDPE 0x00010000 /* Error Desc. Protocol Error */ +#define DMATX_STAT_EDFU 0x00020000 /* Error Data FIFO Underrun */ +#define DMATX_STAT_EBEBR 0x00030000 /* Error Bus Error on Buffer Read */ +#define DMATX_STAT_EBEDA 0x00040000 /* Error Bus Error on Desc. Access */ +#define DMATX_STAT_FLUSHED 0x00100000 /* Flushed */ +#define B44_DMARX_CTRL 0x0210UL /* DMA RX Control */ +#define DMARX_CTRL_ENABLE 0x00000001 /* Enable */ +#define DMARX_CTRL_ROMASK 0x000000fe /* Receive Offset Mask */ +#define DMARX_CTRL_ROSHIFT 1 /* Receive Offset Shift */ +#define B44_DMARX_ADDR 0x0214UL /* DMA RX Descriptor Ring Address */ +#define B44_DMARX_PTR 0x0218UL /* DMA RX Last Posted Descriptor */ +#define B44_DMARX_STAT 0x021CUL /* DMA RX Current Active Desc. + Status */ +#define DMARX_STAT_CDMASK 0x00000fff /* Current Descriptor Mask */ +#define DMARX_STAT_SMASK 0x0000f000 /* State Mask */ +#define DMARX_STAT_SDISABLED 0x00000000 /* State Disabled */ +#define DMARX_STAT_SACTIVE 0x00001000 /* State Active */ +#define DMARX_STAT_SIDLE 0x00002000 /* State Idle Wait */ +#define DMARX_STAT_SSTOPPED 0x00003000 /* State Stopped */ +#define DMARX_STAT_EMASK 0x000f0000 /* Error Mask */ +#define DMARX_STAT_ENONE 0x00000000 /* Error None */ +#define DMARX_STAT_EDPE 0x00010000 /* Error Desc. Protocol Error */ +#define DMARX_STAT_EDFO 0x00020000 /* Error Data FIFO Overflow */ +#define DMARX_STAT_EBEBW 0x00030000 /* Error Bus Error on Buffer Write */ +#define DMARX_STAT_EBEDA 0x00040000 /* Error Bus Error on Desc. Access */ +#define B44_DMAFIFO_AD 0x0220UL /* DMA FIFO Diag Address */ +#define DMAFIFO_AD_OMASK 0x0000ffff /* Offset Mask */ +#define DMAFIFO_AD_SMASK 0x000f0000 /* Select Mask */ +#define DMAFIFO_AD_SXDD 0x00000000 /* Select Transmit DMA Data */ +#define DMAFIFO_AD_SXDP 0x00010000 /* Select Transmit DMA Pointers */ +#define DMAFIFO_AD_SRDD 0x00040000 /* Select Receive DMA Data */ +#define DMAFIFO_AD_SRDP 0x00050000 /* Select Receive DMA Pointers */ +#define DMAFIFO_AD_SXFD 0x00080000 /* Select Transmit FIFO Data */ +#define DMAFIFO_AD_SXFP 0x00090000 /* Select Transmit FIFO Pointers */ +#define DMAFIFO_AD_SRFD 0x000c0000 /* Select Receive FIFO Data */ +#define DMAFIFO_AD_SRFP 0x000c0000 /* Select Receive FIFO Pointers */ +#define B44_DMAFIFO_LO 0x0224UL /* DMA FIFO Diag Low Data */ +#define B44_DMAFIFO_HI 0x0228UL /* DMA FIFO Diag High Data */ +#define B44_RXCONFIG 0x0400UL /* EMAC RX Config */ +#define RXCONFIG_DBCAST 0x00000001 /* Disable Broadcast */ +#define RXCONFIG_ALLMULTI 0x00000002 /* Accept All Multicast */ +#define RXCONFIG_NORX_WHILE_TX 0x00000004 /* Receive Disable While Transmitting */ +#define RXCONFIG_PROMISC 0x00000008 /* Promiscuous Enable */ +#define RXCONFIG_LPBACK 0x00000010 /* Loopback Enable */ +#define RXCONFIG_FLOW 0x00000020 /* Flow Control Enable */ +#define RXCONFIG_FLOW_ACCEPT 0x00000040 /* Accept Unicast Flow Control Frame */ +#define RXCONFIG_RFILT 0x00000080 /* Reject Filter */ +#define RXCONFIG_CAM_ABSENT 0x00000100 /* CAM Absent */ +#define B44_RXMAXLEN 0x0404UL /* EMAC RX Max Packet Length */ +#define B44_TXMAXLEN 0x0408UL /* EMAC TX Max Packet Length */ +#define B44_MDIO_CTRL 0x0410UL /* EMAC MDIO Control */ +#define MDIO_CTRL_MAXF_MASK 0x0000007f /* MDC Frequency */ +#define MDIO_CTRL_PREAMBLE 0x00000080 /* MII Preamble Enable */ +#define B44_MDIO_DATA 0x0414UL /* EMAC MDIO Data */ +#define MDIO_DATA_DATA 0x0000ffff /* R/W Data */ +#define MDIO_DATA_TA_MASK 0x00030000 /* Turnaround Value */ +#define MDIO_DATA_TA_SHIFT 16 +#define MDIO_TA_VALID 2 +#define MDIO_DATA_RA_MASK 0x007c0000 /* Register Address */ +#define MDIO_DATA_RA_SHIFT 18 +#define MDIO_DATA_PMD_MASK 0x0f800000 /* Physical Media Device */ +#define MDIO_DATA_PMD_SHIFT 23 +#define MDIO_DATA_OP_MASK 0x30000000 /* Opcode */ +#define MDIO_DATA_OP_SHIFT 28 +#define MDIO_OP_WRITE 1 +#define MDIO_OP_READ 2 +#define MDIO_DATA_SB_MASK 0xc0000000 /* Start Bits */ +#define MDIO_DATA_SB_SHIFT 30 +#define MDIO_DATA_SB_START 0x40000000 /* Start Of Frame */ +#define B44_EMAC_IMASK 0x0418UL /* EMAC Interrupt Mask */ +#define B44_EMAC_ISTAT 0x041CUL /* EMAC Interrupt Status */ +#define EMAC_INT_MII 0x00000001 /* MII MDIO Interrupt */ +#define EMAC_INT_MIB 0x00000002 /* MIB Interrupt */ +#define EMAC_INT_FLOW 0x00000003 /* Flow Control Interrupt */ +#define B44_CAM_DATA_LO 0x0420UL /* EMAC CAM Data Low */ +#define B44_CAM_DATA_HI 0x0424UL /* EMAC CAM Data High */ +#define CAM_DATA_HI_VALID 0x00010000 /* Valid Bit */ +#define B44_CAM_CTRL 0x0428UL /* EMAC CAM Control */ +#define CAM_CTRL_ENABLE 0x00000001 /* CAM Enable */ +#define CAM_CTRL_MSEL 0x00000002 /* Mask Select */ +#define CAM_CTRL_READ 0x00000004 /* Read */ +#define CAM_CTRL_WRITE 0x00000008 /* Read */ +#define CAM_CTRL_INDEX_MASK 0x003f0000 /* Index Mask */ +#define CAM_CTRL_INDEX_SHIFT 16 +#define CAM_CTRL_BUSY 0x80000000 /* CAM Busy */ +#define B44_ENET_CTRL 0x042CUL /* EMAC ENET Control */ +#define ENET_CTRL_ENABLE 0x00000001 /* EMAC Enable */ +#define ENET_CTRL_DISABLE 0x00000002 /* EMAC Disable */ +#define ENET_CTRL_SRST 0x00000004 /* EMAC Soft Reset */ +#define ENET_CTRL_EPSEL 0x00000008 /* External PHY Select */ +#define B44_TX_CTRL 0x0430UL /* EMAC TX Control */ +#define TX_CTRL_DUPLEX 0x00000001 /* Full Duplex */ +#define TX_CTRL_FMODE 0x00000002 /* Flow Mode */ +#define TX_CTRL_SBENAB 0x00000004 /* Single Backoff Enable */ +#define TX_CTRL_SMALL_SLOT 0x00000008 /* Small Slottime */ +#define B44_TX_WMARK 0x0434UL /* EMAC TX Watermark */ +#define B44_MIB_CTRL 0x0438UL /* EMAC MIB Control */ +#define MIB_CTRL_CLR_ON_READ 0x00000001 /* Autoclear on Read */ +#define B44_TX_GOOD_O 0x0500UL /* MIB TX Good Octets */ +#define B44_TX_GOOD_P 0x0504UL /* MIB TX Good Packets */ +#define B44_TX_O 0x0508UL /* MIB TX Octets */ +#define B44_TX_P 0x050CUL /* MIB TX Packets */ +#define B44_TX_BCAST 0x0510UL /* MIB TX Broadcast Packets */ +#define B44_TX_MCAST 0x0514UL /* MIB TX Multicast Packets */ +#define B44_TX_64 0x0518UL /* MIB TX <= 64 byte Packets */ +#define B44_TX_65_127 0x051CUL /* MIB TX 65 to 127 byte Packets */ +#define B44_TX_128_255 0x0520UL /* MIB TX 128 to 255 byte Packets */ +#define B44_TX_256_511 0x0524UL /* MIB TX 256 to 511 byte Packets */ +#define B44_TX_512_1023 0x0528UL /* MIB TX 512 to 1023 byte Packets */ +#define B44_TX_1024_MAX 0x052CUL /* MIB TX 1024 to max byte Packets */ +#define B44_TX_JABBER 0x0530UL /* MIB TX Jabber Packets */ +#define B44_TX_OSIZE 0x0534UL /* MIB TX Oversize Packets */ +#define B44_TX_FRAG 0x0538UL /* MIB TX Fragment Packets */ +#define B44_TX_URUNS 0x053CUL /* MIB TX Underruns */ +#define B44_TX_TCOLS 0x0540UL /* MIB TX Total Collisions */ +#define B44_TX_SCOLS 0x0544UL /* MIB TX Single Collisions */ +#define B44_TX_MCOLS 0x0548UL /* MIB TX Multiple Collisions */ +#define B44_TX_ECOLS 0x054CUL /* MIB TX Excessive Collisions */ +#define B44_TX_LCOLS 0x0550UL /* MIB TX Late Collisions */ +#define B44_TX_DEFERED 0x0554UL /* MIB TX Defered Packets */ +#define B44_TX_CLOST 0x0558UL /* MIB TX Carrier Lost */ +#define B44_TX_PAUSE 0x055CUL /* MIB TX Pause Packets */ +#define B44_RX_GOOD_O 0x0580UL /* MIB RX Good Octets */ +#define B44_RX_GOOD_P 0x0584UL /* MIB RX Good Packets */ +#define B44_RX_O 0x0588UL /* MIB RX Octets */ +#define B44_RX_P 0x058CUL /* MIB RX Packets */ +#define B44_RX_BCAST 0x0590UL /* MIB RX Broadcast Packets */ +#define B44_RX_MCAST 0x0594UL /* MIB RX Multicast Packets */ +#define B44_RX_64 0x0598UL /* MIB RX <= 64 byte Packets */ +#define B44_RX_65_127 0x059CUL /* MIB RX 65 to 127 byte Packets */ +#define B44_RX_128_255 0x05A0UL /* MIB RX 128 to 255 byte Packets */ +#define B44_RX_256_511 0x05A4UL /* MIB RX 256 to 511 byte Packets */ +#define B44_RX_512_1023 0x05A8UL /* MIB RX 512 to 1023 byte Packets */ +#define B44_RX_1024_MAX 0x05ACUL /* MIB RX 1024 to max byte Packets */ +#define B44_RX_JABBER 0x05B0UL /* MIB RX Jabber Packets */ +#define B44_RX_OSIZE 0x05B4UL /* MIB RX Oversize Packets */ +#define B44_RX_FRAG 0x05B8UL /* MIB RX Fragment Packets */ +#define B44_RX_MISS 0x05BCUL /* MIB RX Missed Packets */ +#define B44_RX_CRCA 0x05C0UL /* MIB RX CRC Align Errors */ +#define B44_RX_USIZE 0x05C4UL /* MIB RX Undersize Packets */ +#define B44_RX_CRC 0x05C8UL /* MIB RX CRC Errors */ +#define B44_RX_ALIGN 0x05CCUL /* MIB RX Align Errors */ +#define B44_RX_SYM 0x05D0UL /* MIB RX Symbol Errors */ +#define B44_RX_PAUSE 0x05D4UL /* MIB RX Pause Packets */ +#define B44_RX_NPAUSE 0x05D8UL /* MIB RX Non-Pause Packets */ + +/* 4400 PHY registers */ +#define B44_MII_AUXCTRL 24 /* Auxiliary Control */ +#define MII_AUXCTRL_DUPLEX 0x0001 /* Full Duplex */ +#define MII_AUXCTRL_SPEED 0x0002 /* 1=100Mbps, 0=10Mbps */ +#define MII_AUXCTRL_FORCED 0x0004 /* Forced 10/100 */ +#define B44_MII_ALEDCTRL 26 /* Activity LED */ +#define MII_ALEDCTRL_ALLMSK 0x7fff +#define B44_MII_TLEDCTRL 27 /* Traffic Meter LED */ +#define MII_TLEDCTRL_ENABLE 0x0040 + +struct dma_desc { + __le32 ctrl; + __le32 addr; +}; + +/* There are only 12 bits in the DMA engine for descriptor offsetting + * so the table must be aligned on a boundary of this. + */ +#define DMA_TABLE_BYTES 4096 + +#define DESC_CTRL_LEN 0x00001fff +#define DESC_CTRL_CMASK 0x0ff00000 /* Core specific bits */ +#define DESC_CTRL_EOT 0x10000000 /* End of Table */ +#define DESC_CTRL_IOC 0x20000000 /* Interrupt On Completion */ +#define DESC_CTRL_EOF 0x40000000 /* End of Frame */ +#define DESC_CTRL_SOF 0x80000000 /* Start of Frame */ + +#define RX_COPY_THRESHOLD 256 + +struct rx_header { + __le16 len; + __le16 flags; + __le16 pad[12]; +}; +#define RX_HEADER_LEN 28 + +#define RX_FLAG_OFIFO 0x00000001 /* FIFO Overflow */ +#define RX_FLAG_CRCERR 0x00000002 /* CRC Error */ +#define RX_FLAG_SERR 0x00000004 /* Receive Symbol Error */ +#define RX_FLAG_ODD 0x00000008 /* Frame has odd number of nibbles */ +#define RX_FLAG_LARGE 0x00000010 /* Frame is > RX MAX Length */ +#define RX_FLAG_MCAST 0x00000020 /* Dest is Multicast Address */ +#define RX_FLAG_BCAST 0x00000040 /* Dest is Broadcast Address */ +#define RX_FLAG_MISS 0x00000080 /* Received due to promisc mode */ +#define RX_FLAG_LAST 0x00000800 /* Last buffer in frame */ +#define RX_FLAG_ERRORS (RX_FLAG_ODD | RX_FLAG_SERR | RX_FLAG_CRCERR | RX_FLAG_OFIFO) + +struct ring_info { + struct sk_buff *skb; + dma_addr_t mapping; +}; + +#define B44_MCAST_TABLE_SIZE 32 +#define B44_PHY_ADDR_NO_PHY 30 +#define B44_MDC_RATIO 5000000 + +#define B44_STAT_REG_DECLARE \ + _B44(tx_good_octets) \ + _B44(tx_good_pkts) \ + _B44(tx_octets) \ + _B44(tx_pkts) \ + _B44(tx_broadcast_pkts) \ + _B44(tx_multicast_pkts) \ + _B44(tx_len_64) \ + _B44(tx_len_65_to_127) \ + _B44(tx_len_128_to_255) \ + _B44(tx_len_256_to_511) \ + _B44(tx_len_512_to_1023) \ + _B44(tx_len_1024_to_max) \ + _B44(tx_jabber_pkts) \ + _B44(tx_oversize_pkts) \ + _B44(tx_fragment_pkts) \ + _B44(tx_underruns) \ + _B44(tx_total_cols) \ + _B44(tx_single_cols) \ + _B44(tx_multiple_cols) \ + _B44(tx_excessive_cols) \ + _B44(tx_late_cols) \ + _B44(tx_defered) \ + _B44(tx_carrier_lost) \ + _B44(tx_pause_pkts) \ + _B44(rx_good_octets) \ + _B44(rx_good_pkts) \ + _B44(rx_octets) \ + _B44(rx_pkts) \ + _B44(rx_broadcast_pkts) \ + _B44(rx_multicast_pkts) \ + _B44(rx_len_64) \ + _B44(rx_len_65_to_127) \ + _B44(rx_len_128_to_255) \ + _B44(rx_len_256_to_511) \ + _B44(rx_len_512_to_1023) \ + _B44(rx_len_1024_to_max) \ + _B44(rx_jabber_pkts) \ + _B44(rx_oversize_pkts) \ + _B44(rx_fragment_pkts) \ + _B44(rx_missed_pkts) \ + _B44(rx_crc_align_errs) \ + _B44(rx_undersize) \ + _B44(rx_crc_errs) \ + _B44(rx_align_errs) \ + _B44(rx_symbol_errs) \ + _B44(rx_pause_pkts) \ + _B44(rx_nonpause_pkts) + +/* SW copy of device statistics, kept up to date by periodic timer + * which probes HW values. Check b44_stats_update if you mess with + * the layout + */ +struct b44_hw_stats { +#define _B44(x) u32 x; +B44_STAT_REG_DECLARE +#undef _B44 +}; + +struct ssb_device; + +struct b44 { + spinlock_t lock; + + u32 imask, istat; + + struct dma_desc *rx_ring, *tx_ring; + + u32 tx_prod, tx_cons; + u32 rx_prod, rx_cons; + + struct ring_info *rx_buffers; + struct ring_info *tx_buffers; + + struct napi_struct napi; + + u32 dma_offset; + u32 flags; +#define B44_FLAG_B0_ANDLATER 0x00000001 +#define B44_FLAG_BUGGY_TXPTR 0x00000002 +#define B44_FLAG_REORDER_BUG 0x00000004 +#define B44_FLAG_PAUSE_AUTO 0x00008000 +#define B44_FLAG_FULL_DUPLEX 0x00010000 +#define B44_FLAG_100_BASE_T 0x00020000 +#define B44_FLAG_TX_PAUSE 0x00040000 +#define B44_FLAG_RX_PAUSE 0x00080000 +#define B44_FLAG_FORCE_LINK 0x00100000 +#define B44_FLAG_ADV_10HALF 0x01000000 +#define B44_FLAG_ADV_10FULL 0x02000000 +#define B44_FLAG_ADV_100HALF 0x04000000 +#define B44_FLAG_ADV_100FULL 0x08000000 +#define B44_FLAG_INTERNAL_PHY 0x10000000 +#define B44_FLAG_RX_RING_HACK 0x20000000 +#define B44_FLAG_TX_RING_HACK 0x40000000 +#define B44_FLAG_WOL_ENABLE 0x80000000 + + u32 msg_enable; + + struct timer_list timer; + + struct b44_hw_stats hw_stats; + + struct ssb_device *sdev; + struct net_device *dev; + + dma_addr_t rx_ring_dma, tx_ring_dma; + + u32 rx_pending; + u32 tx_pending; + u8 phy_addr; + u8 force_copybreak; + struct mii_if_info mii_if; +}; + +#endif /* _B44_H */ diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c new file mode 100644 index 0000000..1d9b985 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -0,0 +1,1966 @@ +/* + * Driver for BCM963xx builtin Ethernet mac + * + * Copyright (C) 2008 Maxime Bizon + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "bcm63xx_enet.h" + +static char bcm_enet_driver_name[] = "bcm63xx_enet"; +static char bcm_enet_driver_version[] = "1.0"; + +static int copybreak __read_mostly = 128; +module_param(copybreak, int, 0); +MODULE_PARM_DESC(copybreak, "Receive copy threshold"); + +/* io memory shared between all devices */ +static void __iomem *bcm_enet_shared_base; + +/* + * io helpers to access mac registers + */ +static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off) +{ + return bcm_readl(priv->base + off); +} + +static inline void enet_writel(struct bcm_enet_priv *priv, + u32 val, u32 off) +{ + bcm_writel(val, priv->base + off); +} + +/* + * io helpers to access shared registers + */ +static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off) +{ + return bcm_readl(bcm_enet_shared_base + off); +} + +static inline void enet_dma_writel(struct bcm_enet_priv *priv, + u32 val, u32 off) +{ + bcm_writel(val, bcm_enet_shared_base + off); +} + +/* + * write given data into mii register and wait for transfer to end + * with timeout (average measured transfer time is 25us) + */ +static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data) +{ + int limit; + + /* make sure mii interrupt status is cleared */ + enet_writel(priv, ENET_IR_MII, ENET_IR_REG); + + enet_writel(priv, data, ENET_MIIDATA_REG); + wmb(); + + /* busy wait on mii interrupt bit, with timeout */ + limit = 1000; + do { + if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII) + break; + udelay(1); + } while (limit-- > 0); + + return (limit < 0) ? 1 : 0; +} + +/* + * MII internal read callback + */ +static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id, + int regnum) +{ + u32 tmp, val; + + tmp = regnum << ENET_MIIDATA_REG_SHIFT; + tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; + tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; + tmp |= ENET_MIIDATA_OP_READ_MASK; + + if (do_mdio_op(priv, tmp)) + return -1; + + val = enet_readl(priv, ENET_MIIDATA_REG); + val &= 0xffff; + return val; +} + +/* + * MII internal write callback + */ +static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id, + int regnum, u16 value) +{ + u32 tmp; + + tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT; + tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; + tmp |= regnum << ENET_MIIDATA_REG_SHIFT; + tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; + tmp |= ENET_MIIDATA_OP_WRITE_MASK; + + (void)do_mdio_op(priv, tmp); + return 0; +} + +/* + * MII read callback from phylib + */ +static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id, + int regnum) +{ + return bcm_enet_mdio_read(bus->priv, mii_id, regnum); +} + +/* + * MII write callback from phylib + */ +static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id, + int regnum, u16 value) +{ + return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value); +} + +/* + * MII read callback from mii core + */ +static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id, + int regnum) +{ + return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum); +} + +/* + * MII write callback from mii core + */ +static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id, + int regnum, int value) +{ + bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value); +} + +/* + * refill rx queue + */ +static int bcm_enet_refill_rx(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + + while (priv->rx_desc_count < priv->rx_ring_size) { + struct bcm_enet_desc *desc; + struct sk_buff *skb; + dma_addr_t p; + int desc_idx; + u32 len_stat; + + desc_idx = priv->rx_dirty_desc; + desc = &priv->rx_desc_cpu[desc_idx]; + + if (!priv->rx_skb[desc_idx]) { + skb = netdev_alloc_skb(dev, priv->rx_skb_size); + if (!skb) + break; + priv->rx_skb[desc_idx] = skb; + + p = dma_map_single(&priv->pdev->dev, skb->data, + priv->rx_skb_size, + DMA_FROM_DEVICE); + desc->address = p; + } + + len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT; + len_stat |= DMADESC_OWNER_MASK; + if (priv->rx_dirty_desc == priv->rx_ring_size - 1) { + len_stat |= DMADESC_WRAP_MASK; + priv->rx_dirty_desc = 0; + } else { + priv->rx_dirty_desc++; + } + wmb(); + desc->len_stat = len_stat; + + priv->rx_desc_count++; + + /* tell dma engine we allocated one buffer */ + enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan)); + } + + /* If rx ring is still empty, set a timer to try allocating + * again at a later time. */ + if (priv->rx_desc_count == 0 && netif_running(dev)) { + dev_warn(&priv->pdev->dev, "unable to refill rx ring\n"); + priv->rx_timeout.expires = jiffies + HZ; + add_timer(&priv->rx_timeout); + } + + return 0; +} + +/* + * timer callback to defer refill rx queue in case we're OOM + */ +static void bcm_enet_refill_rx_timer(unsigned long data) +{ + struct net_device *dev; + struct bcm_enet_priv *priv; + + dev = (struct net_device *)data; + priv = netdev_priv(dev); + + spin_lock(&priv->rx_lock); + bcm_enet_refill_rx((struct net_device *)data); + spin_unlock(&priv->rx_lock); +} + +/* + * extract packet from rx queue + */ +static int bcm_enet_receive_queue(struct net_device *dev, int budget) +{ + struct bcm_enet_priv *priv; + struct device *kdev; + int processed; + + priv = netdev_priv(dev); + kdev = &priv->pdev->dev; + processed = 0; + + /* don't scan ring further than number of refilled + * descriptor */ + if (budget > priv->rx_desc_count) + budget = priv->rx_desc_count; + + do { + struct bcm_enet_desc *desc; + struct sk_buff *skb; + int desc_idx; + u32 len_stat; + unsigned int len; + + desc_idx = priv->rx_curr_desc; + desc = &priv->rx_desc_cpu[desc_idx]; + + /* make sure we actually read the descriptor status at + * each loop */ + rmb(); + + len_stat = desc->len_stat; + + /* break if dma ownership belongs to hw */ + if (len_stat & DMADESC_OWNER_MASK) + break; + + processed++; + priv->rx_curr_desc++; + if (priv->rx_curr_desc == priv->rx_ring_size) + priv->rx_curr_desc = 0; + priv->rx_desc_count--; + + /* if the packet does not have start of packet _and_ + * end of packet flag set, then just recycle it */ + if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) { + dev->stats.rx_dropped++; + continue; + } + + /* recycle packet if it's marked as bad */ + if (unlikely(len_stat & DMADESC_ERR_MASK)) { + dev->stats.rx_errors++; + + if (len_stat & DMADESC_OVSIZE_MASK) + dev->stats.rx_length_errors++; + if (len_stat & DMADESC_CRC_MASK) + dev->stats.rx_crc_errors++; + if (len_stat & DMADESC_UNDER_MASK) + dev->stats.rx_frame_errors++; + if (len_stat & DMADESC_OV_MASK) + dev->stats.rx_fifo_errors++; + continue; + } + + /* valid packet */ + skb = priv->rx_skb[desc_idx]; + len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT; + /* don't include FCS */ + len -= 4; + + if (len < copybreak) { + struct sk_buff *nskb; + + nskb = netdev_alloc_skb_ip_align(dev, len); + if (!nskb) { + /* forget packet, just rearm desc */ + dev->stats.rx_dropped++; + continue; + } + + dma_sync_single_for_cpu(kdev, desc->address, + len, DMA_FROM_DEVICE); + memcpy(nskb->data, skb->data, len); + dma_sync_single_for_device(kdev, desc->address, + len, DMA_FROM_DEVICE); + skb = nskb; + } else { + dma_unmap_single(&priv->pdev->dev, desc->address, + priv->rx_skb_size, DMA_FROM_DEVICE); + priv->rx_skb[desc_idx] = NULL; + } + + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, dev); + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + netif_receive_skb(skb); + + } while (--budget > 0); + + if (processed || !priv->rx_desc_count) { + bcm_enet_refill_rx(dev); + + /* kick rx dma */ + enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, + ENETDMA_CHANCFG_REG(priv->rx_chan)); + } + + return processed; +} + + +/* + * try to or force reclaim of transmitted buffers + */ +static int bcm_enet_tx_reclaim(struct net_device *dev, int force) +{ + struct bcm_enet_priv *priv; + int released; + + priv = netdev_priv(dev); + released = 0; + + while (priv->tx_desc_count < priv->tx_ring_size) { + struct bcm_enet_desc *desc; + struct sk_buff *skb; + + /* We run in a bh and fight against start_xmit, which + * is called with bh disabled */ + spin_lock(&priv->tx_lock); + + desc = &priv->tx_desc_cpu[priv->tx_dirty_desc]; + + if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) { + spin_unlock(&priv->tx_lock); + break; + } + + /* ensure other field of the descriptor were not read + * before we checked ownership */ + rmb(); + + skb = priv->tx_skb[priv->tx_dirty_desc]; + priv->tx_skb[priv->tx_dirty_desc] = NULL; + dma_unmap_single(&priv->pdev->dev, desc->address, skb->len, + DMA_TO_DEVICE); + + priv->tx_dirty_desc++; + if (priv->tx_dirty_desc == priv->tx_ring_size) + priv->tx_dirty_desc = 0; + priv->tx_desc_count++; + + spin_unlock(&priv->tx_lock); + + if (desc->len_stat & DMADESC_UNDER_MASK) + dev->stats.tx_errors++; + + dev_kfree_skb(skb); + released++; + } + + if (netif_queue_stopped(dev) && released) + netif_wake_queue(dev); + + return released; +} + +/* + * poll func, called by network core + */ +static int bcm_enet_poll(struct napi_struct *napi, int budget) +{ + struct bcm_enet_priv *priv; + struct net_device *dev; + int tx_work_done, rx_work_done; + + priv = container_of(napi, struct bcm_enet_priv, napi); + dev = priv->net_dev; + + /* ack interrupts */ + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, + ENETDMA_IR_REG(priv->rx_chan)); + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, + ENETDMA_IR_REG(priv->tx_chan)); + + /* reclaim sent skb */ + tx_work_done = bcm_enet_tx_reclaim(dev, 0); + + spin_lock(&priv->rx_lock); + rx_work_done = bcm_enet_receive_queue(dev, budget); + spin_unlock(&priv->rx_lock); + + if (rx_work_done >= budget || tx_work_done > 0) { + /* rx/tx queue is not yet empty/clean */ + return rx_work_done; + } + + /* no more packet in rx/tx queue, remove device from poll + * queue */ + napi_complete(napi); + + /* restore rx/tx interrupt */ + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, + ENETDMA_IRMASK_REG(priv->rx_chan)); + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, + ENETDMA_IRMASK_REG(priv->tx_chan)); + + return rx_work_done; +} + +/* + * mac interrupt handler + */ +static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id) +{ + struct net_device *dev; + struct bcm_enet_priv *priv; + u32 stat; + + dev = dev_id; + priv = netdev_priv(dev); + + stat = enet_readl(priv, ENET_IR_REG); + if (!(stat & ENET_IR_MIB)) + return IRQ_NONE; + + /* clear & mask interrupt */ + enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); + enet_writel(priv, 0, ENET_IRMASK_REG); + + /* read mib registers in workqueue */ + schedule_work(&priv->mib_update_task); + + return IRQ_HANDLED; +} + +/* + * rx/tx dma interrupt handler + */ +static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id) +{ + struct net_device *dev; + struct bcm_enet_priv *priv; + + dev = dev_id; + priv = netdev_priv(dev); + + /* mask rx/tx interrupts */ + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); + + napi_schedule(&priv->napi); + + return IRQ_HANDLED; +} + +/* + * tx request callback + */ +static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct bcm_enet_priv *priv; + struct bcm_enet_desc *desc; + u32 len_stat; + int ret; + + priv = netdev_priv(dev); + + /* lock against tx reclaim */ + spin_lock(&priv->tx_lock); + + /* make sure the tx hw queue is not full, should not happen + * since we stop queue before it's the case */ + if (unlikely(!priv->tx_desc_count)) { + netif_stop_queue(dev); + dev_err(&priv->pdev->dev, "xmit called with no tx desc " + "available?\n"); + ret = NETDEV_TX_BUSY; + goto out_unlock; + } + + /* point to the next available desc */ + desc = &priv->tx_desc_cpu[priv->tx_curr_desc]; + priv->tx_skb[priv->tx_curr_desc] = skb; + + /* fill descriptor */ + desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + + len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK; + len_stat |= DMADESC_ESOP_MASK | + DMADESC_APPEND_CRC | + DMADESC_OWNER_MASK; + + priv->tx_curr_desc++; + if (priv->tx_curr_desc == priv->tx_ring_size) { + priv->tx_curr_desc = 0; + len_stat |= DMADESC_WRAP_MASK; + } + priv->tx_desc_count--; + + /* dma might be already polling, make sure we update desc + * fields in correct order */ + wmb(); + desc->len_stat = len_stat; + wmb(); + + /* kick tx dma */ + enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, + ENETDMA_CHANCFG_REG(priv->tx_chan)); + + /* stop queue if no more desc available */ + if (!priv->tx_desc_count) + netif_stop_queue(dev); + + dev->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + ret = NETDEV_TX_OK; + +out_unlock: + spin_unlock(&priv->tx_lock); + return ret; +} + +/* + * Change the interface's mac address. + */ +static int bcm_enet_set_mac_address(struct net_device *dev, void *p) +{ + struct bcm_enet_priv *priv; + struct sockaddr *addr = p; + u32 val; + + priv = netdev_priv(dev); + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + + /* use perfect match register 0 to store my mac address */ + val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) | + (dev->dev_addr[4] << 8) | dev->dev_addr[5]; + enet_writel(priv, val, ENET_PML_REG(0)); + + val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]); + val |= ENET_PMH_DATAVALID_MASK; + enet_writel(priv, val, ENET_PMH_REG(0)); + + return 0; +} + +/* + * Change rx mode (promiscuous/allmulti) and update multicast list + */ +static void bcm_enet_set_multicast_list(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + struct netdev_hw_addr *ha; + u32 val; + int i; + + priv = netdev_priv(dev); + + val = enet_readl(priv, ENET_RXCFG_REG); + + if (dev->flags & IFF_PROMISC) + val |= ENET_RXCFG_PROMISC_MASK; + else + val &= ~ENET_RXCFG_PROMISC_MASK; + + /* only 3 perfect match registers left, first one is used for + * own mac address */ + if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3) + val |= ENET_RXCFG_ALLMCAST_MASK; + else + val &= ~ENET_RXCFG_ALLMCAST_MASK; + + /* no need to set perfect match registers if we catch all + * multicast */ + if (val & ENET_RXCFG_ALLMCAST_MASK) { + enet_writel(priv, val, ENET_RXCFG_REG); + return; + } + + i = 0; + netdev_for_each_mc_addr(ha, dev) { + u8 *dmi_addr; + u32 tmp; + + if (i == 3) + break; + /* update perfect match registers */ + dmi_addr = ha->addr; + tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) | + (dmi_addr[4] << 8) | dmi_addr[5]; + enet_writel(priv, tmp, ENET_PML_REG(i + 1)); + + tmp = (dmi_addr[0] << 8 | dmi_addr[1]); + tmp |= ENET_PMH_DATAVALID_MASK; + enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1)); + } + + for (; i < 3; i++) { + enet_writel(priv, 0, ENET_PML_REG(i + 1)); + enet_writel(priv, 0, ENET_PMH_REG(i + 1)); + } + + enet_writel(priv, val, ENET_RXCFG_REG); +} + +/* + * set mac duplex parameters + */ +static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex) +{ + u32 val; + + val = enet_readl(priv, ENET_TXCTL_REG); + if (fullduplex) + val |= ENET_TXCTL_FD_MASK; + else + val &= ~ENET_TXCTL_FD_MASK; + enet_writel(priv, val, ENET_TXCTL_REG); +} + +/* + * set mac flow control parameters + */ +static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en) +{ + u32 val; + + /* rx flow control (pause frame handling) */ + val = enet_readl(priv, ENET_RXCFG_REG); + if (rx_en) + val |= ENET_RXCFG_ENFLOW_MASK; + else + val &= ~ENET_RXCFG_ENFLOW_MASK; + enet_writel(priv, val, ENET_RXCFG_REG); + + /* tx flow control (pause frame generation) */ + val = enet_dma_readl(priv, ENETDMA_CFG_REG); + if (tx_en) + val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); + else + val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); + enet_dma_writel(priv, val, ENETDMA_CFG_REG); +} + +/* + * link changed callback (from phylib) + */ +static void bcm_enet_adjust_phy_link(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + struct phy_device *phydev; + int status_changed; + + priv = netdev_priv(dev); + phydev = priv->phydev; + status_changed = 0; + + if (priv->old_link != phydev->link) { + status_changed = 1; + priv->old_link = phydev->link; + } + + /* reflect duplex change in mac configuration */ + if (phydev->link && phydev->duplex != priv->old_duplex) { + bcm_enet_set_duplex(priv, + (phydev->duplex == DUPLEX_FULL) ? 1 : 0); + status_changed = 1; + priv->old_duplex = phydev->duplex; + } + + /* enable flow control if remote advertise it (trust phylib to + * check that duplex is full */ + if (phydev->link && phydev->pause != priv->old_pause) { + int rx_pause_en, tx_pause_en; + + if (phydev->pause) { + /* pause was advertised by lpa and us */ + rx_pause_en = 1; + tx_pause_en = 1; + } else if (!priv->pause_auto) { + /* pause setting overrided by user */ + rx_pause_en = priv->pause_rx; + tx_pause_en = priv->pause_tx; + } else { + rx_pause_en = 0; + tx_pause_en = 0; + } + + bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en); + status_changed = 1; + priv->old_pause = phydev->pause; + } + + if (status_changed) { + pr_info("%s: link %s", dev->name, phydev->link ? + "UP" : "DOWN"); + if (phydev->link) + pr_cont(" - %d/%s - flow control %s", phydev->speed, + DUPLEX_FULL == phydev->duplex ? "full" : "half", + phydev->pause == 1 ? "rx&tx" : "off"); + + pr_cont("\n"); + } +} + +/* + * link changed callback (if phylib is not used) + */ +static void bcm_enet_adjust_link(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + bcm_enet_set_duplex(priv, priv->force_duplex_full); + bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx); + netif_carrier_on(dev); + + pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n", + dev->name, + priv->force_speed_100 ? 100 : 10, + priv->force_duplex_full ? "full" : "half", + priv->pause_rx ? "rx" : "off", + priv->pause_tx ? "tx" : "off"); +} + +/* + * open callback, allocate dma rings & buffers and start rx operation + */ +static int bcm_enet_open(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + struct sockaddr addr; + struct device *kdev; + struct phy_device *phydev; + int i, ret; + unsigned int size; + char phy_id[MII_BUS_ID_SIZE + 3]; + void *p; + u32 val; + + priv = netdev_priv(dev); + kdev = &priv->pdev->dev; + + if (priv->has_phy) { + /* connect to PHY */ + snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, + priv->mac_id ? "1" : "0", priv->phy_id); + + phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link, 0, + PHY_INTERFACE_MODE_MII); + + if (IS_ERR(phydev)) { + dev_err(kdev, "could not attach to PHY\n"); + return PTR_ERR(phydev); + } + + /* mask with MAC supported features */ + phydev->supported &= (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_MII); + phydev->advertising = phydev->supported; + + if (priv->pause_auto && priv->pause_rx && priv->pause_tx) + phydev->advertising |= SUPPORTED_Pause; + else + phydev->advertising &= ~SUPPORTED_Pause; + + dev_info(kdev, "attached PHY at address %d [%s]\n", + phydev->addr, phydev->drv->name); + + priv->old_link = 0; + priv->old_duplex = -1; + priv->old_pause = -1; + priv->phydev = phydev; + } + + /* mask all interrupts and request them */ + enet_writel(priv, 0, ENET_IRMASK_REG); + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); + + ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev); + if (ret) + goto out_phy_disconnect; + + ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, IRQF_DISABLED, + dev->name, dev); + if (ret) + goto out_freeirq; + + ret = request_irq(priv->irq_tx, bcm_enet_isr_dma, + IRQF_DISABLED, dev->name, dev); + if (ret) + goto out_freeirq_rx; + + /* initialize perfect match registers */ + for (i = 0; i < 4; i++) { + enet_writel(priv, 0, ENET_PML_REG(i)); + enet_writel(priv, 0, ENET_PMH_REG(i)); + } + + /* write device mac address */ + memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN); + bcm_enet_set_mac_address(dev, &addr); + + /* allocate rx dma ring */ + size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); + p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); + if (!p) { + dev_err(kdev, "cannot allocate rx ring %u\n", size); + ret = -ENOMEM; + goto out_freeirq_tx; + } + + memset(p, 0, size); + priv->rx_desc_alloc_size = size; + priv->rx_desc_cpu = p; + + /* allocate tx dma ring */ + size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); + p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); + if (!p) { + dev_err(kdev, "cannot allocate tx ring\n"); + ret = -ENOMEM; + goto out_free_rx_ring; + } + + memset(p, 0, size); + priv->tx_desc_alloc_size = size; + priv->tx_desc_cpu = p; + + priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size, + GFP_KERNEL); + if (!priv->tx_skb) { + dev_err(kdev, "cannot allocate rx skb queue\n"); + ret = -ENOMEM; + goto out_free_tx_ring; + } + + priv->tx_desc_count = priv->tx_ring_size; + priv->tx_dirty_desc = 0; + priv->tx_curr_desc = 0; + spin_lock_init(&priv->tx_lock); + + /* init & fill rx ring with skbs */ + priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size, + GFP_KERNEL); + if (!priv->rx_skb) { + dev_err(kdev, "cannot allocate rx skb queue\n"); + ret = -ENOMEM; + goto out_free_tx_skb; + } + + priv->rx_desc_count = 0; + priv->rx_dirty_desc = 0; + priv->rx_curr_desc = 0; + + /* initialize flow control buffer allocation */ + enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, + ENETDMA_BUFALLOC_REG(priv->rx_chan)); + + if (bcm_enet_refill_rx(dev)) { + dev_err(kdev, "cannot allocate rx skb queue\n"); + ret = -ENOMEM; + goto out; + } + + /* write rx & tx ring addresses */ + enet_dma_writel(priv, priv->rx_desc_dma, + ENETDMA_RSTART_REG(priv->rx_chan)); + enet_dma_writel(priv, priv->tx_desc_dma, + ENETDMA_RSTART_REG(priv->tx_chan)); + + /* clear remaining state ram for rx & tx channel */ + enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan)); + enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan)); + enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan)); + enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan)); + enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan)); + enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan)); + + /* set max rx/tx length */ + enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG); + enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG); + + /* set dma maximum burst len */ + enet_dma_writel(priv, BCMENET_DMA_MAXBURST, + ENETDMA_MAXBURST_REG(priv->rx_chan)); + enet_dma_writel(priv, BCMENET_DMA_MAXBURST, + ENETDMA_MAXBURST_REG(priv->tx_chan)); + + /* set correct transmit fifo watermark */ + enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG); + + /* set flow control low/high threshold to 1/3 / 2/3 */ + val = priv->rx_ring_size / 3; + enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); + val = (priv->rx_ring_size * 2) / 3; + enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); + + /* all set, enable mac and interrupts, start dma engine and + * kick rx dma channel */ + wmb(); + val = enet_readl(priv, ENET_CTL_REG); + val |= ENET_CTL_ENABLE_MASK; + enet_writel(priv, val, ENET_CTL_REG); + enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); + enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, + ENETDMA_CHANCFG_REG(priv->rx_chan)); + + /* watch "mib counters about to overflow" interrupt */ + enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); + enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); + + /* watch "packet transferred" interrupt in rx and tx */ + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, + ENETDMA_IR_REG(priv->rx_chan)); + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, + ENETDMA_IR_REG(priv->tx_chan)); + + /* make sure we enable napi before rx interrupt */ + napi_enable(&priv->napi); + + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, + ENETDMA_IRMASK_REG(priv->rx_chan)); + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, + ENETDMA_IRMASK_REG(priv->tx_chan)); + + if (priv->has_phy) + phy_start(priv->phydev); + else + bcm_enet_adjust_link(dev); + + netif_start_queue(dev); + return 0; + +out: + for (i = 0; i < priv->rx_ring_size; i++) { + struct bcm_enet_desc *desc; + + if (!priv->rx_skb[i]) + continue; + + desc = &priv->rx_desc_cpu[i]; + dma_unmap_single(kdev, desc->address, priv->rx_skb_size, + DMA_FROM_DEVICE); + kfree_skb(priv->rx_skb[i]); + } + kfree(priv->rx_skb); + +out_free_tx_skb: + kfree(priv->tx_skb); + +out_free_tx_ring: + dma_free_coherent(kdev, priv->tx_desc_alloc_size, + priv->tx_desc_cpu, priv->tx_desc_dma); + +out_free_rx_ring: + dma_free_coherent(kdev, priv->rx_desc_alloc_size, + priv->rx_desc_cpu, priv->rx_desc_dma); + +out_freeirq_tx: + free_irq(priv->irq_tx, dev); + +out_freeirq_rx: + free_irq(priv->irq_rx, dev); + +out_freeirq: + free_irq(dev->irq, dev); + +out_phy_disconnect: + phy_disconnect(priv->phydev); + + return ret; +} + +/* + * disable mac + */ +static void bcm_enet_disable_mac(struct bcm_enet_priv *priv) +{ + int limit; + u32 val; + + val = enet_readl(priv, ENET_CTL_REG); + val |= ENET_CTL_DISABLE_MASK; + enet_writel(priv, val, ENET_CTL_REG); + + limit = 1000; + do { + u32 val; + + val = enet_readl(priv, ENET_CTL_REG); + if (!(val & ENET_CTL_DISABLE_MASK)) + break; + udelay(1); + } while (limit--); +} + +/* + * disable dma in given channel + */ +static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan) +{ + int limit; + + enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan)); + + limit = 1000; + do { + u32 val; + + val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan)); + if (!(val & ENETDMA_CHANCFG_EN_MASK)) + break; + udelay(1); + } while (limit--); +} + +/* + * stop callback + */ +static int bcm_enet_stop(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + struct device *kdev; + int i; + + priv = netdev_priv(dev); + kdev = &priv->pdev->dev; + + netif_stop_queue(dev); + napi_disable(&priv->napi); + if (priv->has_phy) + phy_stop(priv->phydev); + del_timer_sync(&priv->rx_timeout); + + /* mask all interrupts */ + enet_writel(priv, 0, ENET_IRMASK_REG); + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); + + /* make sure no mib update is scheduled */ + cancel_work_sync(&priv->mib_update_task); + + /* disable dma & mac */ + bcm_enet_disable_dma(priv, priv->tx_chan); + bcm_enet_disable_dma(priv, priv->rx_chan); + bcm_enet_disable_mac(priv); + + /* force reclaim of all tx buffers */ + bcm_enet_tx_reclaim(dev, 1); + + /* free the rx skb ring */ + for (i = 0; i < priv->rx_ring_size; i++) { + struct bcm_enet_desc *desc; + + if (!priv->rx_skb[i]) + continue; + + desc = &priv->rx_desc_cpu[i]; + dma_unmap_single(kdev, desc->address, priv->rx_skb_size, + DMA_FROM_DEVICE); + kfree_skb(priv->rx_skb[i]); + } + + /* free remaining allocated memory */ + kfree(priv->rx_skb); + kfree(priv->tx_skb); + dma_free_coherent(kdev, priv->rx_desc_alloc_size, + priv->rx_desc_cpu, priv->rx_desc_dma); + dma_free_coherent(kdev, priv->tx_desc_alloc_size, + priv->tx_desc_cpu, priv->tx_desc_dma); + free_irq(priv->irq_tx, dev); + free_irq(priv->irq_rx, dev); + free_irq(dev->irq, dev); + + /* release phy */ + if (priv->has_phy) { + phy_disconnect(priv->phydev); + priv->phydev = NULL; + } + + return 0; +} + +/* + * ethtool callbacks + */ +struct bcm_enet_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; + int mib_reg; +}; + +#define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \ + offsetof(struct bcm_enet_priv, m) +#define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m), \ + offsetof(struct net_device_stats, m) + +static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = { + { "rx_packets", DEV_STAT(rx_packets), -1 }, + { "tx_packets", DEV_STAT(tx_packets), -1 }, + { "rx_bytes", DEV_STAT(rx_bytes), -1 }, + { "tx_bytes", DEV_STAT(tx_bytes), -1 }, + { "rx_errors", DEV_STAT(rx_errors), -1 }, + { "tx_errors", DEV_STAT(tx_errors), -1 }, + { "rx_dropped", DEV_STAT(rx_dropped), -1 }, + { "tx_dropped", DEV_STAT(tx_dropped), -1 }, + + { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS}, + { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS }, + { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST }, + { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT }, + { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 }, + { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 }, + { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 }, + { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 }, + { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 }, + { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX }, + { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB }, + { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR }, + { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG }, + { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP }, + { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN }, + { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND }, + { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC }, + { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN }, + { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM }, + { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE }, + { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL }, + + { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS }, + { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS }, + { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST }, + { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT }, + { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 }, + { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 }, + { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 }, + { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 }, + { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023}, + { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX }, + { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB }, + { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR }, + { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG }, + { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN }, + { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL }, + { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL }, + { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL }, + { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL }, + { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE }, + { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF }, + { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS }, + { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE }, + +}; + +#define BCM_ENET_STATS_LEN \ + (sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats)) + +static const u32 unused_mib_regs[] = { + ETH_MIB_TX_ALL_OCTETS, + ETH_MIB_TX_ALL_PKTS, + ETH_MIB_RX_ALL_OCTETS, + ETH_MIB_RX_ALL_PKTS, +}; + + +static void bcm_enet_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + strncpy(drvinfo->driver, bcm_enet_driver_name, 32); + strncpy(drvinfo->version, bcm_enet_driver_version, 32); + strncpy(drvinfo->fw_version, "N/A", 32); + strncpy(drvinfo->bus_info, "bcm63xx", 32); + drvinfo->n_stats = BCM_ENET_STATS_LEN; +} + +static int bcm_enet_get_sset_count(struct net_device *netdev, + int string_set) +{ + switch (string_set) { + case ETH_SS_STATS: + return BCM_ENET_STATS_LEN; + default: + return -EINVAL; + } +} + +static void bcm_enet_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < BCM_ENET_STATS_LEN; i++) { + memcpy(data + i * ETH_GSTRING_LEN, + bcm_enet_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + } + break; + } +} + +static void update_mib_counters(struct bcm_enet_priv *priv) +{ + int i; + + for (i = 0; i < BCM_ENET_STATS_LEN; i++) { + const struct bcm_enet_stats *s; + u32 val; + char *p; + + s = &bcm_enet_gstrings_stats[i]; + if (s->mib_reg == -1) + continue; + + val = enet_readl(priv, ENET_MIB_REG(s->mib_reg)); + p = (char *)priv + s->stat_offset; + + if (s->sizeof_stat == sizeof(u64)) + *(u64 *)p += val; + else + *(u32 *)p += val; + } + + /* also empty unused mib counters to make sure mib counter + * overflow interrupt is cleared */ + for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++) + (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i])); +} + +static void bcm_enet_update_mib_counters_defer(struct work_struct *t) +{ + struct bcm_enet_priv *priv; + + priv = container_of(t, struct bcm_enet_priv, mib_update_task); + mutex_lock(&priv->mib_update_lock); + update_mib_counters(priv); + mutex_unlock(&priv->mib_update_lock); + + /* reenable mib interrupt */ + if (netif_running(priv->net_dev)) + enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); +} + +static void bcm_enet_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, + u64 *data) +{ + struct bcm_enet_priv *priv; + int i; + + priv = netdev_priv(netdev); + + mutex_lock(&priv->mib_update_lock); + update_mib_counters(priv); + + for (i = 0; i < BCM_ENET_STATS_LEN; i++) { + const struct bcm_enet_stats *s; + char *p; + + s = &bcm_enet_gstrings_stats[i]; + if (s->mib_reg == -1) + p = (char *)&netdev->stats; + else + p = (char *)priv; + p += s->stat_offset; + data[i] = (s->sizeof_stat == sizeof(u64)) ? + *(u64 *)p : *(u32 *)p; + } + mutex_unlock(&priv->mib_update_lock); +} + +static int bcm_enet_get_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + + cmd->maxrxpkt = 0; + cmd->maxtxpkt = 0; + + if (priv->has_phy) { + if (!priv->phydev) + return -ENODEV; + return phy_ethtool_gset(priv->phydev, cmd); + } else { + cmd->autoneg = 0; + ethtool_cmd_speed_set(cmd, ((priv->force_speed_100) + ? SPEED_100 : SPEED_10)); + cmd->duplex = (priv->force_duplex_full) ? + DUPLEX_FULL : DUPLEX_HALF; + cmd->supported = ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full; + cmd->advertising = 0; + cmd->port = PORT_MII; + cmd->transceiver = XCVR_EXTERNAL; + } + return 0; +} + +static int bcm_enet_set_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + if (priv->has_phy) { + if (!priv->phydev) + return -ENODEV; + return phy_ethtool_sset(priv->phydev, cmd); + } else { + + if (cmd->autoneg || + (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) || + cmd->port != PORT_MII) + return -EINVAL; + + priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0; + priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0; + + if (netif_running(dev)) + bcm_enet_adjust_link(dev); + return 0; + } +} + +static void bcm_enet_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + + /* rx/tx ring is actually only limited by memory */ + ering->rx_max_pending = 8192; + ering->tx_max_pending = 8192; + ering->rx_mini_max_pending = 0; + ering->rx_jumbo_max_pending = 0; + ering->rx_pending = priv->rx_ring_size; + ering->tx_pending = priv->tx_ring_size; +} + +static int bcm_enet_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct bcm_enet_priv *priv; + int was_running; + + priv = netdev_priv(dev); + + was_running = 0; + if (netif_running(dev)) { + bcm_enet_stop(dev); + was_running = 1; + } + + priv->rx_ring_size = ering->rx_pending; + priv->tx_ring_size = ering->tx_pending; + + if (was_running) { + int err; + + err = bcm_enet_open(dev); + if (err) + dev_close(dev); + else + bcm_enet_set_multicast_list(dev); + } + return 0; +} + +static void bcm_enet_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *ecmd) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + ecmd->autoneg = priv->pause_auto; + ecmd->rx_pause = priv->pause_rx; + ecmd->tx_pause = priv->pause_tx; +} + +static int bcm_enet_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *ecmd) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + + if (priv->has_phy) { + if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) { + /* asymetric pause mode not supported, + * actually possible but integrated PHY has RO + * asym_pause bit */ + return -EINVAL; + } + } else { + /* no pause autoneg on direct mii connection */ + if (ecmd->autoneg) + return -EINVAL; + } + + priv->pause_auto = ecmd->autoneg; + priv->pause_rx = ecmd->rx_pause; + priv->pause_tx = ecmd->tx_pause; + + return 0; +} + +static struct ethtool_ops bcm_enet_ethtool_ops = { + .get_strings = bcm_enet_get_strings, + .get_sset_count = bcm_enet_get_sset_count, + .get_ethtool_stats = bcm_enet_get_ethtool_stats, + .get_settings = bcm_enet_get_settings, + .set_settings = bcm_enet_set_settings, + .get_drvinfo = bcm_enet_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_ringparam = bcm_enet_get_ringparam, + .set_ringparam = bcm_enet_set_ringparam, + .get_pauseparam = bcm_enet_get_pauseparam, + .set_pauseparam = bcm_enet_set_pauseparam, +}; + +static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + if (priv->has_phy) { + if (!priv->phydev) + return -ENODEV; + return phy_mii_ioctl(priv->phydev, rq, cmd); + } else { + struct mii_if_info mii; + + mii.dev = dev; + mii.mdio_read = bcm_enet_mdio_read_mii; + mii.mdio_write = bcm_enet_mdio_write_mii; + mii.phy_id = 0; + mii.phy_id_mask = 0x3f; + mii.reg_num_mask = 0x1f; + return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL); + } +} + +/* + * calculate actual hardware mtu + */ +static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu) +{ + int actual_mtu; + + actual_mtu = mtu; + + /* add ethernet header + vlan tag size */ + actual_mtu += VLAN_ETH_HLEN; + + if (actual_mtu < 64 || actual_mtu > BCMENET_MAX_MTU) + return -EINVAL; + + /* + * setup maximum size before we get overflow mark in + * descriptor, note that this will not prevent reception of + * big frames, they will be split into multiple buffers + * anyway + */ + priv->hw_mtu = actual_mtu; + + /* + * align rx buffer size to dma burst len, account FCS since + * it's appended + */ + priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN, + BCMENET_DMA_MAXBURST * 4); + return 0; +} + +/* + * adjust mtu, can't be called while device is running + */ +static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu) +{ + int ret; + + if (netif_running(dev)) + return -EBUSY; + + ret = compute_hw_mtu(netdev_priv(dev), new_mtu); + if (ret) + return ret; + dev->mtu = new_mtu; + return 0; +} + +/* + * preinit hardware to allow mii operation while device is down + */ +static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv) +{ + u32 val; + int limit; + + /* make sure mac is disabled */ + bcm_enet_disable_mac(priv); + + /* soft reset mac */ + val = ENET_CTL_SRESET_MASK; + enet_writel(priv, val, ENET_CTL_REG); + wmb(); + + limit = 1000; + do { + val = enet_readl(priv, ENET_CTL_REG); + if (!(val & ENET_CTL_SRESET_MASK)) + break; + udelay(1); + } while (limit--); + + /* select correct mii interface */ + val = enet_readl(priv, ENET_CTL_REG); + if (priv->use_external_mii) + val |= ENET_CTL_EPHYSEL_MASK; + else + val &= ~ENET_CTL_EPHYSEL_MASK; + enet_writel(priv, val, ENET_CTL_REG); + + /* turn on mdc clock */ + enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) | + ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG); + + /* set mib counters to self-clear when read */ + val = enet_readl(priv, ENET_MIBCTL_REG); + val |= ENET_MIBCTL_RDCLEAR_MASK; + enet_writel(priv, val, ENET_MIBCTL_REG); +} + +static const struct net_device_ops bcm_enet_ops = { + .ndo_open = bcm_enet_open, + .ndo_stop = bcm_enet_stop, + .ndo_start_xmit = bcm_enet_start_xmit, + .ndo_set_mac_address = bcm_enet_set_mac_address, + .ndo_set_multicast_list = bcm_enet_set_multicast_list, + .ndo_do_ioctl = bcm_enet_ioctl, + .ndo_change_mtu = bcm_enet_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = bcm_enet_netpoll, +#endif +}; + +/* + * allocate netdevice, request register memory and register device. + */ +static int __devinit bcm_enet_probe(struct platform_device *pdev) +{ + struct bcm_enet_priv *priv; + struct net_device *dev; + struct bcm63xx_enet_platform_data *pd; + struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx; + struct mii_bus *bus; + const char *clk_name; + unsigned int iomem_size; + int i, ret; + + /* stop if shared driver failed, assume driver->probe will be + * called in the same order we register devices (correct ?) */ + if (!bcm_enet_shared_base) + return -ENODEV; + + res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1); + res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2); + if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx) + return -ENODEV; + + ret = 0; + dev = alloc_etherdev(sizeof(*priv)); + if (!dev) + return -ENOMEM; + priv = netdev_priv(dev); + + ret = compute_hw_mtu(priv, dev->mtu); + if (ret) + goto out; + + iomem_size = resource_size(res_mem); + if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) { + ret = -EBUSY; + goto out; + } + + priv->base = ioremap(res_mem->start, iomem_size); + if (priv->base == NULL) { + ret = -ENOMEM; + goto out_release_mem; + } + dev->irq = priv->irq = res_irq->start; + priv->irq_rx = res_irq_rx->start; + priv->irq_tx = res_irq_tx->start; + priv->mac_id = pdev->id; + + /* get rx & tx dma channel id for this mac */ + if (priv->mac_id == 0) { + priv->rx_chan = 0; + priv->tx_chan = 1; + clk_name = "enet0"; + } else { + priv->rx_chan = 2; + priv->tx_chan = 3; + clk_name = "enet1"; + } + + priv->mac_clk = clk_get(&pdev->dev, clk_name); + if (IS_ERR(priv->mac_clk)) { + ret = PTR_ERR(priv->mac_clk); + goto out_unmap; + } + clk_enable(priv->mac_clk); + + /* initialize default and fetch platform data */ + priv->rx_ring_size = BCMENET_DEF_RX_DESC; + priv->tx_ring_size = BCMENET_DEF_TX_DESC; + + pd = pdev->dev.platform_data; + if (pd) { + memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); + priv->has_phy = pd->has_phy; + priv->phy_id = pd->phy_id; + priv->has_phy_interrupt = pd->has_phy_interrupt; + priv->phy_interrupt = pd->phy_interrupt; + priv->use_external_mii = !pd->use_internal_phy; + priv->pause_auto = pd->pause_auto; + priv->pause_rx = pd->pause_rx; + priv->pause_tx = pd->pause_tx; + priv->force_duplex_full = pd->force_duplex_full; + priv->force_speed_100 = pd->force_speed_100; + } + + if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) { + /* using internal PHY, enable clock */ + priv->phy_clk = clk_get(&pdev->dev, "ephy"); + if (IS_ERR(priv->phy_clk)) { + ret = PTR_ERR(priv->phy_clk); + priv->phy_clk = NULL; + goto out_put_clk_mac; + } + clk_enable(priv->phy_clk); + } + + /* do minimal hardware init to be able to probe mii bus */ + bcm_enet_hw_preinit(priv); + + /* MII bus registration */ + if (priv->has_phy) { + + priv->mii_bus = mdiobus_alloc(); + if (!priv->mii_bus) { + ret = -ENOMEM; + goto out_uninit_hw; + } + + bus = priv->mii_bus; + bus->name = "bcm63xx_enet MII bus"; + bus->parent = &pdev->dev; + bus->priv = priv; + bus->read = bcm_enet_mdio_read_phylib; + bus->write = bcm_enet_mdio_write_phylib; + sprintf(bus->id, "%d", priv->mac_id); + + /* only probe bus where we think the PHY is, because + * the mdio read operation return 0 instead of 0xffff + * if a slave is not present on hw */ + bus->phy_mask = ~(1 << priv->phy_id); + + bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + if (!bus->irq) { + ret = -ENOMEM; + goto out_free_mdio; + } + + if (priv->has_phy_interrupt) + bus->irq[priv->phy_id] = priv->phy_interrupt; + else + bus->irq[priv->phy_id] = PHY_POLL; + + ret = mdiobus_register(bus); + if (ret) { + dev_err(&pdev->dev, "unable to register mdio bus\n"); + goto out_free_mdio; + } + } else { + + /* run platform code to initialize PHY device */ + if (pd->mii_config && + pd->mii_config(dev, 1, bcm_enet_mdio_read_mii, + bcm_enet_mdio_write_mii)) { + dev_err(&pdev->dev, "unable to configure mdio bus\n"); + goto out_uninit_hw; + } + } + + spin_lock_init(&priv->rx_lock); + + /* init rx timeout (used for oom) */ + init_timer(&priv->rx_timeout); + priv->rx_timeout.function = bcm_enet_refill_rx_timer; + priv->rx_timeout.data = (unsigned long)dev; + + /* init the mib update lock&work */ + mutex_init(&priv->mib_update_lock); + INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer); + + /* zero mib counters */ + for (i = 0; i < ENET_MIB_REG_COUNT; i++) + enet_writel(priv, 0, ENET_MIB_REG(i)); + + /* register netdevice */ + dev->netdev_ops = &bcm_enet_ops; + netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16); + + SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops); + SET_NETDEV_DEV(dev, &pdev->dev); + + ret = register_netdev(dev); + if (ret) + goto out_unregister_mdio; + + netif_carrier_off(dev); + platform_set_drvdata(pdev, dev); + priv->pdev = pdev; + priv->net_dev = dev; + + return 0; + +out_unregister_mdio: + if (priv->mii_bus) { + mdiobus_unregister(priv->mii_bus); + kfree(priv->mii_bus->irq); + } + +out_free_mdio: + if (priv->mii_bus) + mdiobus_free(priv->mii_bus); + +out_uninit_hw: + /* turn off mdc clock */ + enet_writel(priv, 0, ENET_MIISC_REG); + if (priv->phy_clk) { + clk_disable(priv->phy_clk); + clk_put(priv->phy_clk); + } + +out_put_clk_mac: + clk_disable(priv->mac_clk); + clk_put(priv->mac_clk); + +out_unmap: + iounmap(priv->base); + +out_release_mem: + release_mem_region(res_mem->start, iomem_size); +out: + free_netdev(dev); + return ret; +} + + +/* + * exit func, stops hardware and unregisters netdevice + */ +static int __devexit bcm_enet_remove(struct platform_device *pdev) +{ + struct bcm_enet_priv *priv; + struct net_device *dev; + struct resource *res; + + /* stop netdevice */ + dev = platform_get_drvdata(pdev); + priv = netdev_priv(dev); + unregister_netdev(dev); + + /* turn off mdc clock */ + enet_writel(priv, 0, ENET_MIISC_REG); + + if (priv->has_phy) { + mdiobus_unregister(priv->mii_bus); + kfree(priv->mii_bus->irq); + mdiobus_free(priv->mii_bus); + } else { + struct bcm63xx_enet_platform_data *pd; + + pd = pdev->dev.platform_data; + if (pd && pd->mii_config) + pd->mii_config(dev, 0, bcm_enet_mdio_read_mii, + bcm_enet_mdio_write_mii); + } + + /* release device resources */ + iounmap(priv->base); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, resource_size(res)); + + /* disable hw block clocks */ + if (priv->phy_clk) { + clk_disable(priv->phy_clk); + clk_put(priv->phy_clk); + } + clk_disable(priv->mac_clk); + clk_put(priv->mac_clk); + + platform_set_drvdata(pdev, NULL); + free_netdev(dev); + return 0; +} + +struct platform_driver bcm63xx_enet_driver = { + .probe = bcm_enet_probe, + .remove = __devexit_p(bcm_enet_remove), + .driver = { + .name = "bcm63xx_enet", + .owner = THIS_MODULE, + }, +}; + +/* + * reserve & remap memory space shared between all macs + */ +static int __devinit bcm_enet_shared_probe(struct platform_device *pdev) +{ + struct resource *res; + unsigned int iomem_size; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + iomem_size = resource_size(res); + if (!request_mem_region(res->start, iomem_size, "bcm63xx_enet_dma")) + return -EBUSY; + + bcm_enet_shared_base = ioremap(res->start, iomem_size); + if (!bcm_enet_shared_base) { + release_mem_region(res->start, iomem_size); + return -ENOMEM; + } + return 0; +} + +static int __devexit bcm_enet_shared_remove(struct platform_device *pdev) +{ + struct resource *res; + + iounmap(bcm_enet_shared_base); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, resource_size(res)); + return 0; +} + +/* + * this "shared" driver is needed because both macs share a single + * address space + */ +struct platform_driver bcm63xx_enet_shared_driver = { + .probe = bcm_enet_shared_probe, + .remove = __devexit_p(bcm_enet_shared_remove), + .driver = { + .name = "bcm63xx_enet_shared", + .owner = THIS_MODULE, + }, +}; + +/* + * entry point + */ +static int __init bcm_enet_init(void) +{ + int ret; + + ret = platform_driver_register(&bcm63xx_enet_shared_driver); + if (ret) + return ret; + + ret = platform_driver_register(&bcm63xx_enet_driver); + if (ret) + platform_driver_unregister(&bcm63xx_enet_shared_driver); + + return ret; +} + +static void __exit bcm_enet_exit(void) +{ + platform_driver_unregister(&bcm63xx_enet_driver); + platform_driver_unregister(&bcm63xx_enet_shared_driver); +} + + +module_init(bcm_enet_init); +module_exit(bcm_enet_exit); + +MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver"); +MODULE_AUTHOR("Maxime Bizon "); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h new file mode 100644 index 0000000..0e3048b --- /dev/null +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h @@ -0,0 +1,302 @@ +#ifndef BCM63XX_ENET_H_ +#define BCM63XX_ENET_H_ + +#include +#include +#include +#include +#include + +#include +#include +#include + +/* default number of descriptor */ +#define BCMENET_DEF_RX_DESC 64 +#define BCMENET_DEF_TX_DESC 32 + +/* maximum burst len for dma (4 bytes unit) */ +#define BCMENET_DMA_MAXBURST 16 + +/* tx transmit threshold (4 bytes unit), fifo is 256 bytes, the value + * must be low enough so that a DMA transfer of above burst length can + * not overflow the fifo */ +#define BCMENET_TX_FIFO_TRESH 32 + +/* + * hardware maximum rx/tx packet size including FCS, max mtu is + * actually 2047, but if we set max rx size register to 2047 we won't + * get overflow information if packet size is 2048 or above + */ +#define BCMENET_MAX_MTU 2046 + +/* + * rx/tx dma descriptor + */ +struct bcm_enet_desc { + u32 len_stat; + u32 address; +}; + +#define DMADESC_LENGTH_SHIFT 16 +#define DMADESC_LENGTH_MASK (0xfff << DMADESC_LENGTH_SHIFT) +#define DMADESC_OWNER_MASK (1 << 15) +#define DMADESC_EOP_MASK (1 << 14) +#define DMADESC_SOP_MASK (1 << 13) +#define DMADESC_ESOP_MASK (DMADESC_EOP_MASK | DMADESC_SOP_MASK) +#define DMADESC_WRAP_MASK (1 << 12) + +#define DMADESC_UNDER_MASK (1 << 9) +#define DMADESC_APPEND_CRC (1 << 8) +#define DMADESC_OVSIZE_MASK (1 << 4) +#define DMADESC_RXER_MASK (1 << 2) +#define DMADESC_CRC_MASK (1 << 1) +#define DMADESC_OV_MASK (1 << 0) +#define DMADESC_ERR_MASK (DMADESC_UNDER_MASK | \ + DMADESC_OVSIZE_MASK | \ + DMADESC_RXER_MASK | \ + DMADESC_CRC_MASK | \ + DMADESC_OV_MASK) + + +/* + * MIB Counters register definitions +*/ +#define ETH_MIB_TX_GD_OCTETS 0 +#define ETH_MIB_TX_GD_PKTS 1 +#define ETH_MIB_TX_ALL_OCTETS 2 +#define ETH_MIB_TX_ALL_PKTS 3 +#define ETH_MIB_TX_BRDCAST 4 +#define ETH_MIB_TX_MULT 5 +#define ETH_MIB_TX_64 6 +#define ETH_MIB_TX_65_127 7 +#define ETH_MIB_TX_128_255 8 +#define ETH_MIB_TX_256_511 9 +#define ETH_MIB_TX_512_1023 10 +#define ETH_MIB_TX_1024_MAX 11 +#define ETH_MIB_TX_JAB 12 +#define ETH_MIB_TX_OVR 13 +#define ETH_MIB_TX_FRAG 14 +#define ETH_MIB_TX_UNDERRUN 15 +#define ETH_MIB_TX_COL 16 +#define ETH_MIB_TX_1_COL 17 +#define ETH_MIB_TX_M_COL 18 +#define ETH_MIB_TX_EX_COL 19 +#define ETH_MIB_TX_LATE 20 +#define ETH_MIB_TX_DEF 21 +#define ETH_MIB_TX_CRS 22 +#define ETH_MIB_TX_PAUSE 23 + +#define ETH_MIB_RX_GD_OCTETS 32 +#define ETH_MIB_RX_GD_PKTS 33 +#define ETH_MIB_RX_ALL_OCTETS 34 +#define ETH_MIB_RX_ALL_PKTS 35 +#define ETH_MIB_RX_BRDCAST 36 +#define ETH_MIB_RX_MULT 37 +#define ETH_MIB_RX_64 38 +#define ETH_MIB_RX_65_127 39 +#define ETH_MIB_RX_128_255 40 +#define ETH_MIB_RX_256_511 41 +#define ETH_MIB_RX_512_1023 42 +#define ETH_MIB_RX_1024_MAX 43 +#define ETH_MIB_RX_JAB 44 +#define ETH_MIB_RX_OVR 45 +#define ETH_MIB_RX_FRAG 46 +#define ETH_MIB_RX_DROP 47 +#define ETH_MIB_RX_CRC_ALIGN 48 +#define ETH_MIB_RX_UND 49 +#define ETH_MIB_RX_CRC 50 +#define ETH_MIB_RX_ALIGN 51 +#define ETH_MIB_RX_SYM 52 +#define ETH_MIB_RX_PAUSE 53 +#define ETH_MIB_RX_CNTRL 54 + + +struct bcm_enet_mib_counters { + u64 tx_gd_octets; + u32 tx_gd_pkts; + u32 tx_all_octets; + u32 tx_all_pkts; + u32 tx_brdcast; + u32 tx_mult; + u32 tx_64; + u32 tx_65_127; + u32 tx_128_255; + u32 tx_256_511; + u32 tx_512_1023; + u32 tx_1024_max; + u32 tx_jab; + u32 tx_ovr; + u32 tx_frag; + u32 tx_underrun; + u32 tx_col; + u32 tx_1_col; + u32 tx_m_col; + u32 tx_ex_col; + u32 tx_late; + u32 tx_def; + u32 tx_crs; + u32 tx_pause; + u64 rx_gd_octets; + u32 rx_gd_pkts; + u32 rx_all_octets; + u32 rx_all_pkts; + u32 rx_brdcast; + u32 rx_mult; + u32 rx_64; + u32 rx_65_127; + u32 rx_128_255; + u32 rx_256_511; + u32 rx_512_1023; + u32 rx_1024_max; + u32 rx_jab; + u32 rx_ovr; + u32 rx_frag; + u32 rx_drop; + u32 rx_crc_align; + u32 rx_und; + u32 rx_crc; + u32 rx_align; + u32 rx_sym; + u32 rx_pause; + u32 rx_cntrl; +}; + + +struct bcm_enet_priv { + + /* mac id (from platform device id) */ + int mac_id; + + /* base remapped address of device */ + void __iomem *base; + + /* mac irq, rx_dma irq, tx_dma irq */ + int irq; + int irq_rx; + int irq_tx; + + /* hw view of rx & tx dma ring */ + dma_addr_t rx_desc_dma; + dma_addr_t tx_desc_dma; + + /* allocated size (in bytes) for rx & tx dma ring */ + unsigned int rx_desc_alloc_size; + unsigned int tx_desc_alloc_size; + + + struct napi_struct napi; + + /* dma channel id for rx */ + int rx_chan; + + /* number of dma desc in rx ring */ + int rx_ring_size; + + /* cpu view of rx dma ring */ + struct bcm_enet_desc *rx_desc_cpu; + + /* current number of armed descriptor given to hardware for rx */ + int rx_desc_count; + + /* next rx descriptor to fetch from hardware */ + int rx_curr_desc; + + /* next dirty rx descriptor to refill */ + int rx_dirty_desc; + + /* size of allocated rx skbs */ + unsigned int rx_skb_size; + + /* list of skb given to hw for rx */ + struct sk_buff **rx_skb; + + /* used when rx skb allocation failed, so we defer rx queue + * refill */ + struct timer_list rx_timeout; + + /* lock rx_timeout against rx normal operation */ + spinlock_t rx_lock; + + + /* dma channel id for tx */ + int tx_chan; + + /* number of dma desc in tx ring */ + int tx_ring_size; + + /* cpu view of rx dma ring */ + struct bcm_enet_desc *tx_desc_cpu; + + /* number of available descriptor for tx */ + int tx_desc_count; + + /* next tx descriptor avaiable */ + int tx_curr_desc; + + /* next dirty tx descriptor to reclaim */ + int tx_dirty_desc; + + /* list of skb given to hw for tx */ + struct sk_buff **tx_skb; + + /* lock used by tx reclaim and xmit */ + spinlock_t tx_lock; + + + /* set if internal phy is ignored and external mii interface + * is selected */ + int use_external_mii; + + /* set if a phy is connected, phy address must be known, + * probing is not possible */ + int has_phy; + int phy_id; + + /* set if connected phy has an associated irq */ + int has_phy_interrupt; + int phy_interrupt; + + /* used when a phy is connected (phylib used) */ + struct mii_bus *mii_bus; + struct phy_device *phydev; + int old_link; + int old_duplex; + int old_pause; + + /* used when no phy is connected */ + int force_speed_100; + int force_duplex_full; + + /* pause parameters */ + int pause_auto; + int pause_rx; + int pause_tx; + + /* stats */ + struct bcm_enet_mib_counters mib; + + /* after mib interrupt, mib registers update is done in this + * work queue */ + struct work_struct mib_update_task; + + /* lock mib update between userspace request and workqueue */ + struct mutex mib_update_lock; + + /* mac clock */ + struct clk *mac_clk; + + /* phy clock if internal phy is used */ + struct clk *phy_clk; + + /* network device reference */ + struct net_device *net_dev; + + /* platform device reference */ + struct platform_device *pdev; + + /* maximum hardware transmit/receive size */ + unsigned int hw_mtu; +}; + +#endif /* ! BCM63XX_ENET_H_ */ diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c new file mode 100644 index 0000000..4b2b570 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -0,0 +1,8607 @@ +/* bnx2.c: Broadcom NX2 network driver. + * + * Copyright (c) 2004-2011 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Michael Chan (mchan@broadcom.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) +#define BCM_CNIC 1 +#include "cnic_if.h" +#endif +#include "bnx2.h" +#include "bnx2_fw.h" + +#define DRV_MODULE_NAME "bnx2" +#define DRV_MODULE_VERSION "2.1.11" +#define DRV_MODULE_RELDATE "July 20, 2011" +#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.1.fw" +#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw" +#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1a.fw" +#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw" +#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw" + +#define RUN_AT(x) (jiffies + (x)) + +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (5*HZ) + +static char version[] __devinitdata = + "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + +MODULE_AUTHOR("Michael Chan "); +MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); +MODULE_FIRMWARE(FW_MIPS_FILE_06); +MODULE_FIRMWARE(FW_RV2P_FILE_06); +MODULE_FIRMWARE(FW_MIPS_FILE_09); +MODULE_FIRMWARE(FW_RV2P_FILE_09); +MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax); + +static int disable_msi = 0; + +module_param(disable_msi, int, 0); +MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); + +typedef enum { + BCM5706 = 0, + NC370T, + NC370I, + BCM5706S, + NC370F, + BCM5708, + BCM5708S, + BCM5709, + BCM5709S, + BCM5716, + BCM5716S, +} board_t; + +/* indexed by board_t, above */ +static struct { + char *name; +} board_info[] __devinitdata = { + { "Broadcom NetXtreme II BCM5706 1000Base-T" }, + { "HP NC370T Multifunction Gigabit Server Adapter" }, + { "HP NC370i Multifunction Gigabit Server Adapter" }, + { "Broadcom NetXtreme II BCM5706 1000Base-SX" }, + { "HP NC370F Multifunction Gigabit Server Adapter" }, + { "Broadcom NetXtreme II BCM5708 1000Base-T" }, + { "Broadcom NetXtreme II BCM5708 1000Base-SX" }, + { "Broadcom NetXtreme II BCM5709 1000Base-T" }, + { "Broadcom NetXtreme II BCM5709 1000Base-SX" }, + { "Broadcom NetXtreme II BCM5716 1000Base-T" }, + { "Broadcom NetXtreme II BCM5716 1000Base-SX" }, + }; + +static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = { + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706, + PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T }, + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706, + PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I }, + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 }, + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 }, + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S, + PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F }, + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S }, + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S }, + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 }, + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S }, + { PCI_VENDOR_ID_BROADCOM, 0x163b, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 }, + { PCI_VENDOR_ID_BROADCOM, 0x163c, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S }, + { 0, } +}; + +static const struct flash_spec flash_table[] = +{ +#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE) +#define NONBUFFERED_FLAGS (BNX2_NV_WREN) + /* Slow EEPROM */ + {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, + BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, + SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, + "EEPROM - slow"}, + /* Expansion entry 0001 */ + {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + SAIFUN_FLASH_BYTE_ADDR_MASK, 0, + "Entry 0001"}, + /* Saifun SA25F010 (non-buffered flash) */ + /* strap, cfg1, & write1 need updates */ + {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, + "Non-buffered flash (128kB)"}, + /* Saifun SA25F020 (non-buffered flash) */ + /* strap, cfg1, & write1 need updates */ + {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, + "Non-buffered flash (256kB)"}, + /* Expansion entry 0100 */ + {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + SAIFUN_FLASH_BYTE_ADDR_MASK, 0, + "Entry 0100"}, + /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ + {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, + NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, + ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, + "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, + /* Entry 0110: ST M45PE20 (non-buffered flash)*/ + {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, + NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, + ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, + "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, + /* Saifun SA25F005 (non-buffered flash) */ + /* strap, cfg1, & write1 need updates */ + {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, + "Non-buffered flash (64kB)"}, + /* Fast EEPROM */ + {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, + BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, + SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, + "EEPROM - fast"}, + /* Expansion entry 1001 */ + {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + SAIFUN_FLASH_BYTE_ADDR_MASK, 0, + "Entry 1001"}, + /* Expansion entry 1010 */ + {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + SAIFUN_FLASH_BYTE_ADDR_MASK, 0, + "Entry 1010"}, + /* ATMEL AT45DB011B (buffered flash) */ + {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, + BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, + BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, + "Buffered flash (128kB)"}, + /* Expansion entry 1100 */ + {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + SAIFUN_FLASH_BYTE_ADDR_MASK, 0, + "Entry 1100"}, + /* Expansion entry 1101 */ + {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + SAIFUN_FLASH_BYTE_ADDR_MASK, 0, + "Entry 1101"}, + /* Ateml Expansion entry 1110 */ + {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, + BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, + BUFFERED_FLASH_BYTE_ADDR_MASK, 0, + "Entry 1110 (Atmel)"}, + /* ATMEL AT45DB021B (buffered flash) */ + {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, + BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, + BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, + "Buffered flash (256kB)"}, +}; + +static const struct flash_spec flash_5709 = { + .flags = BNX2_NV_BUFFERED, + .page_bits = BCM5709_FLASH_PAGE_BITS, + .page_size = BCM5709_FLASH_PAGE_SIZE, + .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK, + .total_size = BUFFERED_FLASH_TOTAL_SIZE*2, + .name = "5709 Buffered flash (256kB)", +}; + +MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); + +static void bnx2_init_napi(struct bnx2 *bp); +static void bnx2_del_napi(struct bnx2 *bp); + +static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr) +{ + u32 diff; + + /* Tell compiler to fetch tx_prod and tx_cons from memory. */ + barrier(); + + /* The ring uses 256 indices for 255 entries, one of them + * needs to be skipped. + */ + diff = txr->tx_prod - txr->tx_cons; + if (unlikely(diff >= TX_DESC_CNT)) { + diff &= 0xffff; + if (diff == TX_DESC_CNT) + diff = MAX_TX_DESC_CNT; + } + return bp->tx_ring_size - diff; +} + +static u32 +bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset) +{ + u32 val; + + spin_lock_bh(&bp->indirect_lock); + REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); + val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW); + spin_unlock_bh(&bp->indirect_lock); + return val; +} + +static void +bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val) +{ + spin_lock_bh(&bp->indirect_lock); + REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); + REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val); + spin_unlock_bh(&bp->indirect_lock); +} + +static void +bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val) +{ + bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val); +} + +static u32 +bnx2_shmem_rd(struct bnx2 *bp, u32 offset) +{ + return bnx2_reg_rd_ind(bp, bp->shmem_base + offset); +} + +static void +bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val) +{ + offset += cid_addr; + spin_lock_bh(&bp->indirect_lock); + if (CHIP_NUM(bp) == CHIP_NUM_5709) { + int i; + + REG_WR(bp, BNX2_CTX_CTX_DATA, val); + REG_WR(bp, BNX2_CTX_CTX_CTRL, + offset | BNX2_CTX_CTX_CTRL_WRITE_REQ); + for (i = 0; i < 5; i++) { + val = REG_RD(bp, BNX2_CTX_CTX_CTRL); + if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0) + break; + udelay(5); + } + } else { + REG_WR(bp, BNX2_CTX_DATA_ADR, offset); + REG_WR(bp, BNX2_CTX_DATA, val); + } + spin_unlock_bh(&bp->indirect_lock); +} + +#ifdef BCM_CNIC +static int +bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info) +{ + struct bnx2 *bp = netdev_priv(dev); + struct drv_ctl_io *io = &info->data.io; + + switch (info->cmd) { + case DRV_CTL_IO_WR_CMD: + bnx2_reg_wr_ind(bp, io->offset, io->data); + break; + case DRV_CTL_IO_RD_CMD: + io->data = bnx2_reg_rd_ind(bp, io->offset); + break; + case DRV_CTL_CTX_WR_CMD: + bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data); + break; + default: + return -EINVAL; + } + return 0; +} + +static void bnx2_setup_cnic_irq_info(struct bnx2 *bp) +{ + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; + int sb_id; + + if (bp->flags & BNX2_FLAG_USING_MSIX) { + cp->drv_state |= CNIC_DRV_STATE_USING_MSIX; + bnapi->cnic_present = 0; + sb_id = bp->irq_nvecs; + cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX; + } else { + cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; + bnapi->cnic_tag = bnapi->last_status_idx; + bnapi->cnic_present = 1; + sb_id = 0; + cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; + } + + cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector; + cp->irq_arr[0].status_blk = (void *) + ((unsigned long) bnapi->status_blk.msi + + (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id)); + cp->irq_arr[0].status_blk_num = sb_id; + cp->num_irq = 1; +} + +static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops, + void *data) +{ + struct bnx2 *bp = netdev_priv(dev); + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + + if (ops == NULL) + return -EINVAL; + + if (cp->drv_state & CNIC_DRV_STATE_REGD) + return -EBUSY; + + if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN)) + return -ENODEV; + + bp->cnic_data = data; + rcu_assign_pointer(bp->cnic_ops, ops); + + cp->num_irq = 0; + cp->drv_state = CNIC_DRV_STATE_REGD; + + bnx2_setup_cnic_irq_info(bp); + + return 0; +} + +static int bnx2_unregister_cnic(struct net_device *dev) +{ + struct bnx2 *bp = netdev_priv(dev); + struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + + mutex_lock(&bp->cnic_lock); + cp->drv_state = 0; + bnapi->cnic_present = 0; + rcu_assign_pointer(bp->cnic_ops, NULL); + mutex_unlock(&bp->cnic_lock); + synchronize_rcu(); + return 0; +} + +struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev) +{ + struct bnx2 *bp = netdev_priv(dev); + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + + if (!cp->max_iscsi_conn) + return NULL; + + cp->drv_owner = THIS_MODULE; + cp->chip_id = bp->chip_id; + cp->pdev = bp->pdev; + cp->io_base = bp->regview; + cp->drv_ctl = bnx2_drv_ctl; + cp->drv_register_cnic = bnx2_register_cnic; + cp->drv_unregister_cnic = bnx2_unregister_cnic; + + return cp; +} +EXPORT_SYMBOL(bnx2_cnic_probe); + +static void +bnx2_cnic_stop(struct bnx2 *bp) +{ + struct cnic_ops *c_ops; + struct cnic_ctl_info info; + + mutex_lock(&bp->cnic_lock); + c_ops = rcu_dereference_protected(bp->cnic_ops, + lockdep_is_held(&bp->cnic_lock)); + if (c_ops) { + info.cmd = CNIC_CTL_STOP_CMD; + c_ops->cnic_ctl(bp->cnic_data, &info); + } + mutex_unlock(&bp->cnic_lock); +} + +static void +bnx2_cnic_start(struct bnx2 *bp) +{ + struct cnic_ops *c_ops; + struct cnic_ctl_info info; + + mutex_lock(&bp->cnic_lock); + c_ops = rcu_dereference_protected(bp->cnic_ops, + lockdep_is_held(&bp->cnic_lock)); + if (c_ops) { + if (!(bp->flags & BNX2_FLAG_USING_MSIX)) { + struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; + + bnapi->cnic_tag = bnapi->last_status_idx; + } + info.cmd = CNIC_CTL_START_CMD; + c_ops->cnic_ctl(bp->cnic_data, &info); + } + mutex_unlock(&bp->cnic_lock); +} + +#else + +static void +bnx2_cnic_stop(struct bnx2 *bp) +{ +} + +static void +bnx2_cnic_start(struct bnx2 *bp) +{ +} + +#endif + +static int +bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val) +{ + u32 val1; + int i, ret; + + if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { + val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); + val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL; + + REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1); + REG_RD(bp, BNX2_EMAC_MDIO_MODE); + + udelay(40); + } + + val1 = (bp->phy_addr << 21) | (reg << 16) | + BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT | + BNX2_EMAC_MDIO_COMM_START_BUSY; + REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1); + + for (i = 0; i < 50; i++) { + udelay(10); + + val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM); + if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) { + udelay(5); + + val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM); + val1 &= BNX2_EMAC_MDIO_COMM_DATA; + + break; + } + } + + if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) { + *val = 0x0; + ret = -EBUSY; + } + else { + *val = val1; + ret = 0; + } + + if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { + val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); + val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL; + + REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1); + REG_RD(bp, BNX2_EMAC_MDIO_MODE); + + udelay(40); + } + + return ret; +} + +static int +bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val) +{ + u32 val1; + int i, ret; + + if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { + val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); + val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL; + + REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1); + REG_RD(bp, BNX2_EMAC_MDIO_MODE); + + udelay(40); + } + + val1 = (bp->phy_addr << 21) | (reg << 16) | val | + BNX2_EMAC_MDIO_COMM_COMMAND_WRITE | + BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT; + REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1); + + for (i = 0; i < 50; i++) { + udelay(10); + + val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM); + if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) { + udelay(5); + break; + } + } + + if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) + ret = -EBUSY; + else + ret = 0; + + if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { + val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); + val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL; + + REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1); + REG_RD(bp, BNX2_EMAC_MDIO_MODE); + + udelay(40); + } + + return ret; +} + +static void +bnx2_disable_int(struct bnx2 *bp) +{ + int i; + struct bnx2_napi *bnapi; + + for (i = 0; i < bp->irq_nvecs; i++) { + bnapi = &bp->bnx2_napi[i]; + REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | + BNX2_PCICFG_INT_ACK_CMD_MASK_INT); + } + REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD); +} + +static void +bnx2_enable_int(struct bnx2 *bp) +{ + int i; + struct bnx2_napi *bnapi; + + for (i = 0; i < bp->irq_nvecs; i++) { + bnapi = &bp->bnx2_napi[i]; + + REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | + BNX2_PCICFG_INT_ACK_CMD_MASK_INT | + bnapi->last_status_idx); + + REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | + bnapi->last_status_idx); + } + REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW); +} + +static void +bnx2_disable_int_sync(struct bnx2 *bp) +{ + int i; + + atomic_inc(&bp->intr_sem); + if (!netif_running(bp->dev)) + return; + + bnx2_disable_int(bp); + for (i = 0; i < bp->irq_nvecs; i++) + synchronize_irq(bp->irq_tbl[i].vector); +} + +static void +bnx2_napi_disable(struct bnx2 *bp) +{ + int i; + + for (i = 0; i < bp->irq_nvecs; i++) + napi_disable(&bp->bnx2_napi[i].napi); +} + +static void +bnx2_napi_enable(struct bnx2 *bp) +{ + int i; + + for (i = 0; i < bp->irq_nvecs; i++) + napi_enable(&bp->bnx2_napi[i].napi); +} + +static void +bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic) +{ + if (stop_cnic) + bnx2_cnic_stop(bp); + if (netif_running(bp->dev)) { + bnx2_napi_disable(bp); + netif_tx_disable(bp->dev); + } + bnx2_disable_int_sync(bp); + netif_carrier_off(bp->dev); /* prevent tx timeout */ +} + +static void +bnx2_netif_start(struct bnx2 *bp, bool start_cnic) +{ + if (atomic_dec_and_test(&bp->intr_sem)) { + if (netif_running(bp->dev)) { + netif_tx_wake_all_queues(bp->dev); + spin_lock_bh(&bp->phy_lock); + if (bp->link_up) + netif_carrier_on(bp->dev); + spin_unlock_bh(&bp->phy_lock); + bnx2_napi_enable(bp); + bnx2_enable_int(bp); + if (start_cnic) + bnx2_cnic_start(bp); + } + } +} + +static void +bnx2_free_tx_mem(struct bnx2 *bp) +{ + int i; + + for (i = 0; i < bp->num_tx_rings; i++) { + struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; + struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; + + if (txr->tx_desc_ring) { + dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE, + txr->tx_desc_ring, + txr->tx_desc_mapping); + txr->tx_desc_ring = NULL; + } + kfree(txr->tx_buf_ring); + txr->tx_buf_ring = NULL; + } +} + +static void +bnx2_free_rx_mem(struct bnx2 *bp) +{ + int i; + + for (i = 0; i < bp->num_rx_rings; i++) { + struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; + struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; + int j; + + for (j = 0; j < bp->rx_max_ring; j++) { + if (rxr->rx_desc_ring[j]) + dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE, + rxr->rx_desc_ring[j], + rxr->rx_desc_mapping[j]); + rxr->rx_desc_ring[j] = NULL; + } + vfree(rxr->rx_buf_ring); + rxr->rx_buf_ring = NULL; + + for (j = 0; j < bp->rx_max_pg_ring; j++) { + if (rxr->rx_pg_desc_ring[j]) + dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE, + rxr->rx_pg_desc_ring[j], + rxr->rx_pg_desc_mapping[j]); + rxr->rx_pg_desc_ring[j] = NULL; + } + vfree(rxr->rx_pg_ring); + rxr->rx_pg_ring = NULL; + } +} + +static int +bnx2_alloc_tx_mem(struct bnx2 *bp) +{ + int i; + + for (i = 0; i < bp->num_tx_rings; i++) { + struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; + struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; + + txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL); + if (txr->tx_buf_ring == NULL) + return -ENOMEM; + + txr->tx_desc_ring = + dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE, + &txr->tx_desc_mapping, GFP_KERNEL); + if (txr->tx_desc_ring == NULL) + return -ENOMEM; + } + return 0; +} + +static int +bnx2_alloc_rx_mem(struct bnx2 *bp) +{ + int i; + + for (i = 0; i < bp->num_rx_rings; i++) { + struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; + struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; + int j; + + rxr->rx_buf_ring = + vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring); + if (rxr->rx_buf_ring == NULL) + return -ENOMEM; + + for (j = 0; j < bp->rx_max_ring; j++) { + rxr->rx_desc_ring[j] = + dma_alloc_coherent(&bp->pdev->dev, + RXBD_RING_SIZE, + &rxr->rx_desc_mapping[j], + GFP_KERNEL); + if (rxr->rx_desc_ring[j] == NULL) + return -ENOMEM; + + } + + if (bp->rx_pg_ring_size) { + rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE * + bp->rx_max_pg_ring); + if (rxr->rx_pg_ring == NULL) + return -ENOMEM; + + } + + for (j = 0; j < bp->rx_max_pg_ring; j++) { + rxr->rx_pg_desc_ring[j] = + dma_alloc_coherent(&bp->pdev->dev, + RXBD_RING_SIZE, + &rxr->rx_pg_desc_mapping[j], + GFP_KERNEL); + if (rxr->rx_pg_desc_ring[j] == NULL) + return -ENOMEM; + + } + } + return 0; +} + +static void +bnx2_free_mem(struct bnx2 *bp) +{ + int i; + struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; + + bnx2_free_tx_mem(bp); + bnx2_free_rx_mem(bp); + + for (i = 0; i < bp->ctx_pages; i++) { + if (bp->ctx_blk[i]) { + dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE, + bp->ctx_blk[i], + bp->ctx_blk_mapping[i]); + bp->ctx_blk[i] = NULL; + } + } + if (bnapi->status_blk.msi) { + dma_free_coherent(&bp->pdev->dev, bp->status_stats_size, + bnapi->status_blk.msi, + bp->status_blk_mapping); + bnapi->status_blk.msi = NULL; + bp->stats_blk = NULL; + } +} + +static int +bnx2_alloc_mem(struct bnx2 *bp) +{ + int i, status_blk_size, err; + struct bnx2_napi *bnapi; + void *status_blk; + + /* Combine status and statistics blocks into one allocation. */ + status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block)); + if (bp->flags & BNX2_FLAG_MSIX_CAP) + status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC * + BNX2_SBLK_MSIX_ALIGN_SIZE); + bp->status_stats_size = status_blk_size + + sizeof(struct statistics_block); + + status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size, + &bp->status_blk_mapping, GFP_KERNEL); + if (status_blk == NULL) + goto alloc_mem_err; + + memset(status_blk, 0, bp->status_stats_size); + + bnapi = &bp->bnx2_napi[0]; + bnapi->status_blk.msi = status_blk; + bnapi->hw_tx_cons_ptr = + &bnapi->status_blk.msi->status_tx_quick_consumer_index0; + bnapi->hw_rx_cons_ptr = + &bnapi->status_blk.msi->status_rx_quick_consumer_index0; + if (bp->flags & BNX2_FLAG_MSIX_CAP) { + for (i = 1; i < bp->irq_nvecs; i++) { + struct status_block_msix *sblk; + + bnapi = &bp->bnx2_napi[i]; + + sblk = (void *) (status_blk + + BNX2_SBLK_MSIX_ALIGN_SIZE * i); + bnapi->status_blk.msix = sblk; + bnapi->hw_tx_cons_ptr = + &sblk->status_tx_quick_consumer_index; + bnapi->hw_rx_cons_ptr = + &sblk->status_rx_quick_consumer_index; + bnapi->int_num = i << 24; + } + } + + bp->stats_blk = status_blk + status_blk_size; + + bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size; + + if (CHIP_NUM(bp) == CHIP_NUM_5709) { + bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE; + if (bp->ctx_pages == 0) + bp->ctx_pages = 1; + for (i = 0; i < bp->ctx_pages; i++) { + bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev, + BCM_PAGE_SIZE, + &bp->ctx_blk_mapping[i], + GFP_KERNEL); + if (bp->ctx_blk[i] == NULL) + goto alloc_mem_err; + } + } + + err = bnx2_alloc_rx_mem(bp); + if (err) + goto alloc_mem_err; + + err = bnx2_alloc_tx_mem(bp); + if (err) + goto alloc_mem_err; + + return 0; + +alloc_mem_err: + bnx2_free_mem(bp); + return -ENOMEM; +} + +static void +bnx2_report_fw_link(struct bnx2 *bp) +{ + u32 fw_link_status = 0; + + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) + return; + + if (bp->link_up) { + u32 bmsr; + + switch (bp->line_speed) { + case SPEED_10: + if (bp->duplex == DUPLEX_HALF) + fw_link_status = BNX2_LINK_STATUS_10HALF; + else + fw_link_status = BNX2_LINK_STATUS_10FULL; + break; + case SPEED_100: + if (bp->duplex == DUPLEX_HALF) + fw_link_status = BNX2_LINK_STATUS_100HALF; + else + fw_link_status = BNX2_LINK_STATUS_100FULL; + break; + case SPEED_1000: + if (bp->duplex == DUPLEX_HALF) + fw_link_status = BNX2_LINK_STATUS_1000HALF; + else + fw_link_status = BNX2_LINK_STATUS_1000FULL; + break; + case SPEED_2500: + if (bp->duplex == DUPLEX_HALF) + fw_link_status = BNX2_LINK_STATUS_2500HALF; + else + fw_link_status = BNX2_LINK_STATUS_2500FULL; + break; + } + + fw_link_status |= BNX2_LINK_STATUS_LINK_UP; + + if (bp->autoneg) { + fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED; + + bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); + bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); + + if (!(bmsr & BMSR_ANEGCOMPLETE) || + bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) + fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET; + else + fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE; + } + } + else + fw_link_status = BNX2_LINK_STATUS_LINK_DOWN; + + bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status); +} + +static char * +bnx2_xceiver_str(struct bnx2 *bp) +{ + return (bp->phy_port == PORT_FIBRE) ? "SerDes" : + ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" : + "Copper"); +} + +static void +bnx2_report_link(struct bnx2 *bp) +{ + if (bp->link_up) { + netif_carrier_on(bp->dev); + netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex", + bnx2_xceiver_str(bp), + bp->line_speed, + bp->duplex == DUPLEX_FULL ? "full" : "half"); + + if (bp->flow_ctrl) { + if (bp->flow_ctrl & FLOW_CTRL_RX) { + pr_cont(", receive "); + if (bp->flow_ctrl & FLOW_CTRL_TX) + pr_cont("& transmit "); + } + else { + pr_cont(", transmit "); + } + pr_cont("flow control ON"); + } + pr_cont("\n"); + } else { + netif_carrier_off(bp->dev); + netdev_err(bp->dev, "NIC %s Link is Down\n", + bnx2_xceiver_str(bp)); + } + + bnx2_report_fw_link(bp); +} + +static void +bnx2_resolve_flow_ctrl(struct bnx2 *bp) +{ + u32 local_adv, remote_adv; + + bp->flow_ctrl = 0; + if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) != + (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) { + + if (bp->duplex == DUPLEX_FULL) { + bp->flow_ctrl = bp->req_flow_ctrl; + } + return; + } + + if (bp->duplex != DUPLEX_FULL) { + return; + } + + if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && + (CHIP_NUM(bp) == CHIP_NUM_5708)) { + u32 val; + + bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val); + if (val & BCM5708S_1000X_STAT1_TX_PAUSE) + bp->flow_ctrl |= FLOW_CTRL_TX; + if (val & BCM5708S_1000X_STAT1_RX_PAUSE) + bp->flow_ctrl |= FLOW_CTRL_RX; + return; + } + + bnx2_read_phy(bp, bp->mii_adv, &local_adv); + bnx2_read_phy(bp, bp->mii_lpa, &remote_adv); + + if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { + u32 new_local_adv = 0; + u32 new_remote_adv = 0; + + if (local_adv & ADVERTISE_1000XPAUSE) + new_local_adv |= ADVERTISE_PAUSE_CAP; + if (local_adv & ADVERTISE_1000XPSE_ASYM) + new_local_adv |= ADVERTISE_PAUSE_ASYM; + if (remote_adv & ADVERTISE_1000XPAUSE) + new_remote_adv |= ADVERTISE_PAUSE_CAP; + if (remote_adv & ADVERTISE_1000XPSE_ASYM) + new_remote_adv |= ADVERTISE_PAUSE_ASYM; + + local_adv = new_local_adv; + remote_adv = new_remote_adv; + } + + /* See Table 28B-3 of 802.3ab-1999 spec. */ + if (local_adv & ADVERTISE_PAUSE_CAP) { + if(local_adv & ADVERTISE_PAUSE_ASYM) { + if (remote_adv & ADVERTISE_PAUSE_CAP) { + bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; + } + else if (remote_adv & ADVERTISE_PAUSE_ASYM) { + bp->flow_ctrl = FLOW_CTRL_RX; + } + } + else { + if (remote_adv & ADVERTISE_PAUSE_CAP) { + bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; + } + } + } + else if (local_adv & ADVERTISE_PAUSE_ASYM) { + if ((remote_adv & ADVERTISE_PAUSE_CAP) && + (remote_adv & ADVERTISE_PAUSE_ASYM)) { + + bp->flow_ctrl = FLOW_CTRL_TX; + } + } +} + +static int +bnx2_5709s_linkup(struct bnx2 *bp) +{ + u32 val, speed; + + bp->link_up = 1; + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS); + bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val); + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); + + if ((bp->autoneg & AUTONEG_SPEED) == 0) { + bp->line_speed = bp->req_line_speed; + bp->duplex = bp->req_duplex; + return 0; + } + speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK; + switch (speed) { + case MII_BNX2_GP_TOP_AN_SPEED_10: + bp->line_speed = SPEED_10; + break; + case MII_BNX2_GP_TOP_AN_SPEED_100: + bp->line_speed = SPEED_100; + break; + case MII_BNX2_GP_TOP_AN_SPEED_1G: + case MII_BNX2_GP_TOP_AN_SPEED_1GKV: + bp->line_speed = SPEED_1000; + break; + case MII_BNX2_GP_TOP_AN_SPEED_2_5G: + bp->line_speed = SPEED_2500; + break; + } + if (val & MII_BNX2_GP_TOP_AN_FD) + bp->duplex = DUPLEX_FULL; + else + bp->duplex = DUPLEX_HALF; + return 0; +} + +static int +bnx2_5708s_linkup(struct bnx2 *bp) +{ + u32 val; + + bp->link_up = 1; + bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val); + switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) { + case BCM5708S_1000X_STAT1_SPEED_10: + bp->line_speed = SPEED_10; + break; + case BCM5708S_1000X_STAT1_SPEED_100: + bp->line_speed = SPEED_100; + break; + case BCM5708S_1000X_STAT1_SPEED_1G: + bp->line_speed = SPEED_1000; + break; + case BCM5708S_1000X_STAT1_SPEED_2G5: + bp->line_speed = SPEED_2500; + break; + } + if (val & BCM5708S_1000X_STAT1_FD) + bp->duplex = DUPLEX_FULL; + else + bp->duplex = DUPLEX_HALF; + + return 0; +} + +static int +bnx2_5706s_linkup(struct bnx2 *bp) +{ + u32 bmcr, local_adv, remote_adv, common; + + bp->link_up = 1; + bp->line_speed = SPEED_1000; + + bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + if (bmcr & BMCR_FULLDPLX) { + bp->duplex = DUPLEX_FULL; + } + else { + bp->duplex = DUPLEX_HALF; + } + + if (!(bmcr & BMCR_ANENABLE)) { + return 0; + } + + bnx2_read_phy(bp, bp->mii_adv, &local_adv); + bnx2_read_phy(bp, bp->mii_lpa, &remote_adv); + + common = local_adv & remote_adv; + if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) { + + if (common & ADVERTISE_1000XFULL) { + bp->duplex = DUPLEX_FULL; + } + else { + bp->duplex = DUPLEX_HALF; + } + } + + return 0; +} + +static int +bnx2_copper_linkup(struct bnx2 *bp) +{ + u32 bmcr; + + bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + if (bmcr & BMCR_ANENABLE) { + u32 local_adv, remote_adv, common; + + bnx2_read_phy(bp, MII_CTRL1000, &local_adv); + bnx2_read_phy(bp, MII_STAT1000, &remote_adv); + + common = local_adv & (remote_adv >> 2); + if (common & ADVERTISE_1000FULL) { + bp->line_speed = SPEED_1000; + bp->duplex = DUPLEX_FULL; + } + else if (common & ADVERTISE_1000HALF) { + bp->line_speed = SPEED_1000; + bp->duplex = DUPLEX_HALF; + } + else { + bnx2_read_phy(bp, bp->mii_adv, &local_adv); + bnx2_read_phy(bp, bp->mii_lpa, &remote_adv); + + common = local_adv & remote_adv; + if (common & ADVERTISE_100FULL) { + bp->line_speed = SPEED_100; + bp->duplex = DUPLEX_FULL; + } + else if (common & ADVERTISE_100HALF) { + bp->line_speed = SPEED_100; + bp->duplex = DUPLEX_HALF; + } + else if (common & ADVERTISE_10FULL) { + bp->line_speed = SPEED_10; + bp->duplex = DUPLEX_FULL; + } + else if (common & ADVERTISE_10HALF) { + bp->line_speed = SPEED_10; + bp->duplex = DUPLEX_HALF; + } + else { + bp->line_speed = 0; + bp->link_up = 0; + } + } + } + else { + if (bmcr & BMCR_SPEED100) { + bp->line_speed = SPEED_100; + } + else { + bp->line_speed = SPEED_10; + } + if (bmcr & BMCR_FULLDPLX) { + bp->duplex = DUPLEX_FULL; + } + else { + bp->duplex = DUPLEX_HALF; + } + } + + return 0; +} + +static void +bnx2_init_rx_context(struct bnx2 *bp, u32 cid) +{ + u32 val, rx_cid_addr = GET_CID_ADDR(cid); + + val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE; + val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2; + val |= 0x02 << 8; + + if (bp->flow_ctrl & FLOW_CTRL_TX) + val |= BNX2_L2CTX_FLOW_CTRL_ENABLE; + + bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val); +} + +static void +bnx2_init_all_rx_contexts(struct bnx2 *bp) +{ + int i; + u32 cid; + + for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) { + if (i == 1) + cid = RX_RSS_CID; + bnx2_init_rx_context(bp, cid); + } +} + +static void +bnx2_set_mac_link(struct bnx2 *bp) +{ + u32 val; + + REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620); + if (bp->link_up && (bp->line_speed == SPEED_1000) && + (bp->duplex == DUPLEX_HALF)) { + REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff); + } + + /* Configure the EMAC mode register. */ + val = REG_RD(bp, BNX2_EMAC_MODE); + + val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX | + BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK | + BNX2_EMAC_MODE_25G_MODE); + + if (bp->link_up) { + switch (bp->line_speed) { + case SPEED_10: + if (CHIP_NUM(bp) != CHIP_NUM_5706) { + val |= BNX2_EMAC_MODE_PORT_MII_10M; + break; + } + /* fall through */ + case SPEED_100: + val |= BNX2_EMAC_MODE_PORT_MII; + break; + case SPEED_2500: + val |= BNX2_EMAC_MODE_25G_MODE; + /* fall through */ + case SPEED_1000: + val |= BNX2_EMAC_MODE_PORT_GMII; + break; + } + } + else { + val |= BNX2_EMAC_MODE_PORT_GMII; + } + + /* Set the MAC to operate in the appropriate duplex mode. */ + if (bp->duplex == DUPLEX_HALF) + val |= BNX2_EMAC_MODE_HALF_DUPLEX; + REG_WR(bp, BNX2_EMAC_MODE, val); + + /* Enable/disable rx PAUSE. */ + bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN; + + if (bp->flow_ctrl & FLOW_CTRL_RX) + bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN; + REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode); + + /* Enable/disable tx PAUSE. */ + val = REG_RD(bp, BNX2_EMAC_TX_MODE); + val &= ~BNX2_EMAC_TX_MODE_FLOW_EN; + + if (bp->flow_ctrl & FLOW_CTRL_TX) + val |= BNX2_EMAC_TX_MODE_FLOW_EN; + REG_WR(bp, BNX2_EMAC_TX_MODE, val); + + /* Acknowledge the interrupt. */ + REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE); + + bnx2_init_all_rx_contexts(bp); +} + +static void +bnx2_enable_bmsr1(struct bnx2 *bp) +{ + if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && + (CHIP_NUM(bp) == CHIP_NUM_5709)) + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, + MII_BNX2_BLK_ADDR_GP_STATUS); +} + +static void +bnx2_disable_bmsr1(struct bnx2 *bp) +{ + if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && + (CHIP_NUM(bp) == CHIP_NUM_5709)) + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, + MII_BNX2_BLK_ADDR_COMBO_IEEEB0); +} + +static int +bnx2_test_and_enable_2g5(struct bnx2 *bp) +{ + u32 up1; + int ret = 1; + + if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) + return 0; + + if (bp->autoneg & AUTONEG_SPEED) + bp->advertising |= ADVERTISED_2500baseX_Full; + + if (CHIP_NUM(bp) == CHIP_NUM_5709) + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G); + + bnx2_read_phy(bp, bp->mii_up1, &up1); + if (!(up1 & BCM5708S_UP1_2G5)) { + up1 |= BCM5708S_UP1_2G5; + bnx2_write_phy(bp, bp->mii_up1, up1); + ret = 0; + } + + if (CHIP_NUM(bp) == CHIP_NUM_5709) + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, + MII_BNX2_BLK_ADDR_COMBO_IEEEB0); + + return ret; +} + +static int +bnx2_test_and_disable_2g5(struct bnx2 *bp) +{ + u32 up1; + int ret = 0; + + if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) + return 0; + + if (CHIP_NUM(bp) == CHIP_NUM_5709) + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G); + + bnx2_read_phy(bp, bp->mii_up1, &up1); + if (up1 & BCM5708S_UP1_2G5) { + up1 &= ~BCM5708S_UP1_2G5; + bnx2_write_phy(bp, bp->mii_up1, up1); + ret = 1; + } + + if (CHIP_NUM(bp) == CHIP_NUM_5709) + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, + MII_BNX2_BLK_ADDR_COMBO_IEEEB0); + + return ret; +} + +static void +bnx2_enable_forced_2g5(struct bnx2 *bp) +{ + u32 uninitialized_var(bmcr); + int err; + + if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) + return; + + if (CHIP_NUM(bp) == CHIP_NUM_5709) { + u32 val; + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, + MII_BNX2_BLK_ADDR_SERDES_DIG); + if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) { + val &= ~MII_BNX2_SD_MISC1_FORCE_MSK; + val |= MII_BNX2_SD_MISC1_FORCE | + MII_BNX2_SD_MISC1_FORCE_2_5G; + bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val); + } + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, + MII_BNX2_BLK_ADDR_COMBO_IEEEB0); + err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + + } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { + err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + if (!err) + bmcr |= BCM5708S_BMCR_FORCE_2500; + } else { + return; + } + + if (err) + return; + + if (bp->autoneg & AUTONEG_SPEED) { + bmcr &= ~BMCR_ANENABLE; + if (bp->req_duplex == DUPLEX_FULL) + bmcr |= BMCR_FULLDPLX; + } + bnx2_write_phy(bp, bp->mii_bmcr, bmcr); +} + +static void +bnx2_disable_forced_2g5(struct bnx2 *bp) +{ + u32 uninitialized_var(bmcr); + int err; + + if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) + return; + + if (CHIP_NUM(bp) == CHIP_NUM_5709) { + u32 val; + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, + MII_BNX2_BLK_ADDR_SERDES_DIG); + if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) { + val &= ~MII_BNX2_SD_MISC1_FORCE; + bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val); + } + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, + MII_BNX2_BLK_ADDR_COMBO_IEEEB0); + err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + + } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { + err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + if (!err) + bmcr &= ~BCM5708S_BMCR_FORCE_2500; + } else { + return; + } + + if (err) + return; + + if (bp->autoneg & AUTONEG_SPEED) + bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART; + bnx2_write_phy(bp, bp->mii_bmcr, bmcr); +} + +static void +bnx2_5706s_force_link_dn(struct bnx2 *bp, int start) +{ + u32 val; + + bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL); + bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val); + if (start) + bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f); + else + bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0); +} + +static int +bnx2_set_link(struct bnx2 *bp) +{ + u32 bmsr; + u8 link_up; + + if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) { + bp->link_up = 1; + return 0; + } + + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) + return 0; + + link_up = bp->link_up; + + bnx2_enable_bmsr1(bp); + bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); + bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); + bnx2_disable_bmsr1(bp); + + if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && + (CHIP_NUM(bp) == CHIP_NUM_5706)) { + u32 val, an_dbg; + + if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) { + bnx2_5706s_force_link_dn(bp, 0); + bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN; + } + val = REG_RD(bp, BNX2_EMAC_STATUS); + + bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG); + bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg); + bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg); + + if ((val & BNX2_EMAC_STATUS_LINK) && + !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC)) + bmsr |= BMSR_LSTATUS; + else + bmsr &= ~BMSR_LSTATUS; + } + + if (bmsr & BMSR_LSTATUS) { + bp->link_up = 1; + + if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { + if (CHIP_NUM(bp) == CHIP_NUM_5706) + bnx2_5706s_linkup(bp); + else if (CHIP_NUM(bp) == CHIP_NUM_5708) + bnx2_5708s_linkup(bp); + else if (CHIP_NUM(bp) == CHIP_NUM_5709) + bnx2_5709s_linkup(bp); + } + else { + bnx2_copper_linkup(bp); + } + bnx2_resolve_flow_ctrl(bp); + } + else { + if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && + (bp->autoneg & AUTONEG_SPEED)) + bnx2_disable_forced_2g5(bp); + + if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) { + u32 bmcr; + + bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + bmcr |= BMCR_ANENABLE; + bnx2_write_phy(bp, bp->mii_bmcr, bmcr); + + bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT; + } + bp->link_up = 0; + } + + if (bp->link_up != link_up) { + bnx2_report_link(bp); + } + + bnx2_set_mac_link(bp); + + return 0; +} + +static int +bnx2_reset_phy(struct bnx2 *bp) +{ + int i; + u32 reg; + + bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET); + +#define PHY_RESET_MAX_WAIT 100 + for (i = 0; i < PHY_RESET_MAX_WAIT; i++) { + udelay(10); + + bnx2_read_phy(bp, bp->mii_bmcr, ®); + if (!(reg & BMCR_RESET)) { + udelay(20); + break; + } + } + if (i == PHY_RESET_MAX_WAIT) { + return -EBUSY; + } + return 0; +} + +static u32 +bnx2_phy_get_pause_adv(struct bnx2 *bp) +{ + u32 adv = 0; + + if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) == + (FLOW_CTRL_RX | FLOW_CTRL_TX)) { + + if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { + adv = ADVERTISE_1000XPAUSE; + } + else { + adv = ADVERTISE_PAUSE_CAP; + } + } + else if (bp->req_flow_ctrl & FLOW_CTRL_TX) { + if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { + adv = ADVERTISE_1000XPSE_ASYM; + } + else { + adv = ADVERTISE_PAUSE_ASYM; + } + } + else if (bp->req_flow_ctrl & FLOW_CTRL_RX) { + if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { + adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; + } + else { + adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + } + } + return adv; +} + +static int bnx2_fw_sync(struct bnx2 *, u32, int, int); + +static int +bnx2_setup_remote_phy(struct bnx2 *bp, u8 port) +__releases(&bp->phy_lock) +__acquires(&bp->phy_lock) +{ + u32 speed_arg = 0, pause_adv; + + pause_adv = bnx2_phy_get_pause_adv(bp); + + if (bp->autoneg & AUTONEG_SPEED) { + speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG; + if (bp->advertising & ADVERTISED_10baseT_Half) + speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF; + if (bp->advertising & ADVERTISED_10baseT_Full) + speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL; + if (bp->advertising & ADVERTISED_100baseT_Half) + speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF; + if (bp->advertising & ADVERTISED_100baseT_Full) + speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL; + if (bp->advertising & ADVERTISED_1000baseT_Full) + speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL; + if (bp->advertising & ADVERTISED_2500baseX_Full) + speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL; + } else { + if (bp->req_line_speed == SPEED_2500) + speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL; + else if (bp->req_line_speed == SPEED_1000) + speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL; + else if (bp->req_line_speed == SPEED_100) { + if (bp->req_duplex == DUPLEX_FULL) + speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL; + else + speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF; + } else if (bp->req_line_speed == SPEED_10) { + if (bp->req_duplex == DUPLEX_FULL) + speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL; + else + speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF; + } + } + + if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP)) + speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE; + if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM)) + speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE; + + if (port == PORT_TP) + speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE | + BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED; + + bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg); + + spin_unlock_bh(&bp->phy_lock); + bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0); + spin_lock_bh(&bp->phy_lock); + + return 0; +} + +static int +bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port) +__releases(&bp->phy_lock) +__acquires(&bp->phy_lock) +{ + u32 adv, bmcr; + u32 new_adv = 0; + + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) + return bnx2_setup_remote_phy(bp, port); + + if (!(bp->autoneg & AUTONEG_SPEED)) { + u32 new_bmcr; + int force_link_down = 0; + + if (bp->req_line_speed == SPEED_2500) { + if (!bnx2_test_and_enable_2g5(bp)) + force_link_down = 1; + } else if (bp->req_line_speed == SPEED_1000) { + if (bnx2_test_and_disable_2g5(bp)) + force_link_down = 1; + } + bnx2_read_phy(bp, bp->mii_adv, &adv); + adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF); + + bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + new_bmcr = bmcr & ~BMCR_ANENABLE; + new_bmcr |= BMCR_SPEED1000; + + if (CHIP_NUM(bp) == CHIP_NUM_5709) { + if (bp->req_line_speed == SPEED_2500) + bnx2_enable_forced_2g5(bp); + else if (bp->req_line_speed == SPEED_1000) { + bnx2_disable_forced_2g5(bp); + new_bmcr &= ~0x2000; + } + + } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { + if (bp->req_line_speed == SPEED_2500) + new_bmcr |= BCM5708S_BMCR_FORCE_2500; + else + new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500; + } + + if (bp->req_duplex == DUPLEX_FULL) { + adv |= ADVERTISE_1000XFULL; + new_bmcr |= BMCR_FULLDPLX; + } + else { + adv |= ADVERTISE_1000XHALF; + new_bmcr &= ~BMCR_FULLDPLX; + } + if ((new_bmcr != bmcr) || (force_link_down)) { + /* Force a link down visible on the other side */ + if (bp->link_up) { + bnx2_write_phy(bp, bp->mii_adv, adv & + ~(ADVERTISE_1000XFULL | + ADVERTISE_1000XHALF)); + bnx2_write_phy(bp, bp->mii_bmcr, bmcr | + BMCR_ANRESTART | BMCR_ANENABLE); + + bp->link_up = 0; + netif_carrier_off(bp->dev); + bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr); + bnx2_report_link(bp); + } + bnx2_write_phy(bp, bp->mii_adv, adv); + bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr); + } else { + bnx2_resolve_flow_ctrl(bp); + bnx2_set_mac_link(bp); + } + return 0; + } + + bnx2_test_and_enable_2g5(bp); + + if (bp->advertising & ADVERTISED_1000baseT_Full) + new_adv |= ADVERTISE_1000XFULL; + + new_adv |= bnx2_phy_get_pause_adv(bp); + + bnx2_read_phy(bp, bp->mii_adv, &adv); + bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + + bp->serdes_an_pending = 0; + if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) { + /* Force a link down visible on the other side */ + if (bp->link_up) { + bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK); + spin_unlock_bh(&bp->phy_lock); + msleep(20); + spin_lock_bh(&bp->phy_lock); + } + + bnx2_write_phy(bp, bp->mii_adv, new_adv); + bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | + BMCR_ANENABLE); + /* Speed up link-up time when the link partner + * does not autonegotiate which is very common + * in blade servers. Some blade servers use + * IPMI for kerboard input and it's important + * to minimize link disruptions. Autoneg. involves + * exchanging base pages plus 3 next pages and + * normally completes in about 120 msec. + */ + bp->current_interval = BNX2_SERDES_AN_TIMEOUT; + bp->serdes_an_pending = 1; + mod_timer(&bp->timer, jiffies + bp->current_interval); + } else { + bnx2_resolve_flow_ctrl(bp); + bnx2_set_mac_link(bp); + } + + return 0; +} + +#define ETHTOOL_ALL_FIBRE_SPEED \ + (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \ + (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\ + (ADVERTISED_1000baseT_Full) + +#define ETHTOOL_ALL_COPPER_SPEED \ + (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \ + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \ + ADVERTISED_1000baseT_Full) + +#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \ + ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA) + +#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL) + +static void +bnx2_set_default_remote_link(struct bnx2 *bp) +{ + u32 link; + + if (bp->phy_port == PORT_TP) + link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK); + else + link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK); + + if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) { + bp->req_line_speed = 0; + bp->autoneg |= AUTONEG_SPEED; + bp->advertising = ADVERTISED_Autoneg; + if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF) + bp->advertising |= ADVERTISED_10baseT_Half; + if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL) + bp->advertising |= ADVERTISED_10baseT_Full; + if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF) + bp->advertising |= ADVERTISED_100baseT_Half; + if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL) + bp->advertising |= ADVERTISED_100baseT_Full; + if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL) + bp->advertising |= ADVERTISED_1000baseT_Full; + if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL) + bp->advertising |= ADVERTISED_2500baseX_Full; + } else { + bp->autoneg = 0; + bp->advertising = 0; + bp->req_duplex = DUPLEX_FULL; + if (link & BNX2_NETLINK_SET_LINK_SPEED_10) { + bp->req_line_speed = SPEED_10; + if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF) + bp->req_duplex = DUPLEX_HALF; + } + if (link & BNX2_NETLINK_SET_LINK_SPEED_100) { + bp->req_line_speed = SPEED_100; + if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF) + bp->req_duplex = DUPLEX_HALF; + } + if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL) + bp->req_line_speed = SPEED_1000; + if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL) + bp->req_line_speed = SPEED_2500; + } +} + +static void +bnx2_set_default_link(struct bnx2 *bp) +{ + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) { + bnx2_set_default_remote_link(bp); + return; + } + + bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL; + bp->req_line_speed = 0; + if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { + u32 reg; + + bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg; + + reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG); + reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK; + if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) { + bp->autoneg = 0; + bp->req_line_speed = bp->line_speed = SPEED_1000; + bp->req_duplex = DUPLEX_FULL; + } + } else + bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg; +} + +static void +bnx2_send_heart_beat(struct bnx2 *bp) +{ + u32 msg; + u32 addr; + + spin_lock(&bp->indirect_lock); + msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK); + addr = bp->shmem_base + BNX2_DRV_PULSE_MB; + REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr); + REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg); + spin_unlock(&bp->indirect_lock); +} + +static void +bnx2_remote_phy_event(struct bnx2 *bp) +{ + u32 msg; + u8 link_up = bp->link_up; + u8 old_port; + + msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS); + + if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED) + bnx2_send_heart_beat(bp); + + msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED; + + if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN) + bp->link_up = 0; + else { + u32 speed; + + bp->link_up = 1; + speed = msg & BNX2_LINK_STATUS_SPEED_MASK; + bp->duplex = DUPLEX_FULL; + switch (speed) { + case BNX2_LINK_STATUS_10HALF: + bp->duplex = DUPLEX_HALF; + case BNX2_LINK_STATUS_10FULL: + bp->line_speed = SPEED_10; + break; + case BNX2_LINK_STATUS_100HALF: + bp->duplex = DUPLEX_HALF; + case BNX2_LINK_STATUS_100BASE_T4: + case BNX2_LINK_STATUS_100FULL: + bp->line_speed = SPEED_100; + break; + case BNX2_LINK_STATUS_1000HALF: + bp->duplex = DUPLEX_HALF; + case BNX2_LINK_STATUS_1000FULL: + bp->line_speed = SPEED_1000; + break; + case BNX2_LINK_STATUS_2500HALF: + bp->duplex = DUPLEX_HALF; + case BNX2_LINK_STATUS_2500FULL: + bp->line_speed = SPEED_2500; + break; + default: + bp->line_speed = 0; + break; + } + + bp->flow_ctrl = 0; + if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) != + (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) { + if (bp->duplex == DUPLEX_FULL) + bp->flow_ctrl = bp->req_flow_ctrl; + } else { + if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED) + bp->flow_ctrl |= FLOW_CTRL_TX; + if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED) + bp->flow_ctrl |= FLOW_CTRL_RX; + } + + old_port = bp->phy_port; + if (msg & BNX2_LINK_STATUS_SERDES_LINK) + bp->phy_port = PORT_FIBRE; + else + bp->phy_port = PORT_TP; + + if (old_port != bp->phy_port) + bnx2_set_default_link(bp); + + } + if (bp->link_up != link_up) + bnx2_report_link(bp); + + bnx2_set_mac_link(bp); +} + +static int +bnx2_set_remote_link(struct bnx2 *bp) +{ + u32 evt_code; + + evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB); + switch (evt_code) { + case BNX2_FW_EVT_CODE_LINK_EVENT: + bnx2_remote_phy_event(bp); + break; + case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT: + default: + bnx2_send_heart_beat(bp); + break; + } + return 0; +} + +static int +bnx2_setup_copper_phy(struct bnx2 *bp) +__releases(&bp->phy_lock) +__acquires(&bp->phy_lock) +{ + u32 bmcr; + u32 new_bmcr; + + bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + + if (bp->autoneg & AUTONEG_SPEED) { + u32 adv_reg, adv1000_reg; + u32 new_adv_reg = 0; + u32 new_adv1000_reg = 0; + + bnx2_read_phy(bp, bp->mii_adv, &adv_reg); + adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP | + ADVERTISE_PAUSE_ASYM); + + bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg); + adv1000_reg &= PHY_ALL_1000_SPEED; + + if (bp->advertising & ADVERTISED_10baseT_Half) + new_adv_reg |= ADVERTISE_10HALF; + if (bp->advertising & ADVERTISED_10baseT_Full) + new_adv_reg |= ADVERTISE_10FULL; + if (bp->advertising & ADVERTISED_100baseT_Half) + new_adv_reg |= ADVERTISE_100HALF; + if (bp->advertising & ADVERTISED_100baseT_Full) + new_adv_reg |= ADVERTISE_100FULL; + if (bp->advertising & ADVERTISED_1000baseT_Full) + new_adv1000_reg |= ADVERTISE_1000FULL; + + new_adv_reg |= ADVERTISE_CSMA; + + new_adv_reg |= bnx2_phy_get_pause_adv(bp); + + if ((adv1000_reg != new_adv1000_reg) || + (adv_reg != new_adv_reg) || + ((bmcr & BMCR_ANENABLE) == 0)) { + + bnx2_write_phy(bp, bp->mii_adv, new_adv_reg); + bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg); + bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART | + BMCR_ANENABLE); + } + else if (bp->link_up) { + /* Flow ctrl may have changed from auto to forced */ + /* or vice-versa. */ + + bnx2_resolve_flow_ctrl(bp); + bnx2_set_mac_link(bp); + } + return 0; + } + + new_bmcr = 0; + if (bp->req_line_speed == SPEED_100) { + new_bmcr |= BMCR_SPEED100; + } + if (bp->req_duplex == DUPLEX_FULL) { + new_bmcr |= BMCR_FULLDPLX; + } + if (new_bmcr != bmcr) { + u32 bmsr; + + bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); + bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); + + if (bmsr & BMSR_LSTATUS) { + /* Force link down */ + bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK); + spin_unlock_bh(&bp->phy_lock); + msleep(50); + spin_lock_bh(&bp->phy_lock); + + bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); + bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); + } + + bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr); + + /* Normally, the new speed is setup after the link has + * gone down and up again. In some cases, link will not go + * down so we need to set up the new speed here. + */ + if (bmsr & BMSR_LSTATUS) { + bp->line_speed = bp->req_line_speed; + bp->duplex = bp->req_duplex; + bnx2_resolve_flow_ctrl(bp); + bnx2_set_mac_link(bp); + } + } else { + bnx2_resolve_flow_ctrl(bp); + bnx2_set_mac_link(bp); + } + return 0; +} + +static int +bnx2_setup_phy(struct bnx2 *bp, u8 port) +__releases(&bp->phy_lock) +__acquires(&bp->phy_lock) +{ + if (bp->loopback == MAC_LOOPBACK) + return 0; + + if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { + return bnx2_setup_serdes_phy(bp, port); + } + else { + return bnx2_setup_copper_phy(bp); + } +} + +static int +bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy) +{ + u32 val; + + bp->mii_bmcr = MII_BMCR + 0x10; + bp->mii_bmsr = MII_BMSR + 0x10; + bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1; + bp->mii_adv = MII_ADVERTISE + 0x10; + bp->mii_lpa = MII_LPA + 0x10; + bp->mii_up1 = MII_BNX2_OVER1G_UP1; + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER); + bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD); + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); + if (reset_phy) + bnx2_reset_phy(bp); + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG); + + bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val); + val &= ~MII_BNX2_SD_1000XCTL1_AUTODET; + val |= MII_BNX2_SD_1000XCTL1_FIBER; + bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val); + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G); + bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val); + if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) + val |= BCM5708S_UP1_2G5; + else + val &= ~BCM5708S_UP1_2G5; + bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val); + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG); + bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val); + val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM; + bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val); + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0); + + val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN | + MII_BNX2_CL73_BAM_NP_AFT_BP_EN; + bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val); + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); + + return 0; +} + +static int +bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy) +{ + u32 val; + + if (reset_phy) + bnx2_reset_phy(bp); + + bp->mii_up1 = BCM5708S_UP1; + + bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3); + bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE); + bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG); + + bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val); + val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN; + bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val); + + bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val); + val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN; + bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val); + + if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) { + bnx2_read_phy(bp, BCM5708S_UP1, &val); + val |= BCM5708S_UP1_2G5; + bnx2_write_phy(bp, BCM5708S_UP1, val); + } + + if ((CHIP_ID(bp) == CHIP_ID_5708_A0) || + (CHIP_ID(bp) == CHIP_ID_5708_B0) || + (CHIP_ID(bp) == CHIP_ID_5708_B1)) { + /* increase tx signal amplitude */ + bnx2_write_phy(bp, BCM5708S_BLK_ADDR, + BCM5708S_BLK_ADDR_TX_MISC); + bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val); + val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM; + bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val); + bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG); + } + + val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) & + BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK; + + if (val) { + u32 is_backplane; + + is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG); + if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) { + bnx2_write_phy(bp, BCM5708S_BLK_ADDR, + BCM5708S_BLK_ADDR_TX_MISC); + bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val); + bnx2_write_phy(bp, BCM5708S_BLK_ADDR, + BCM5708S_BLK_ADDR_DIG); + } + } + return 0; +} + +static int +bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy) +{ + if (reset_phy) + bnx2_reset_phy(bp); + + bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT; + + if (CHIP_NUM(bp) == CHIP_NUM_5706) + REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300); + + if (bp->dev->mtu > 1500) { + u32 val; + + /* Set extended packet length bit */ + bnx2_write_phy(bp, 0x18, 0x7); + bnx2_read_phy(bp, 0x18, &val); + bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000); + + bnx2_write_phy(bp, 0x1c, 0x6c00); + bnx2_read_phy(bp, 0x1c, &val); + bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02); + } + else { + u32 val; + + bnx2_write_phy(bp, 0x18, 0x7); + bnx2_read_phy(bp, 0x18, &val); + bnx2_write_phy(bp, 0x18, val & ~0x4007); + + bnx2_write_phy(bp, 0x1c, 0x6c00); + bnx2_read_phy(bp, 0x1c, &val); + bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00); + } + + return 0; +} + +static int +bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy) +{ + u32 val; + + if (reset_phy) + bnx2_reset_phy(bp); + + if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) { + bnx2_write_phy(bp, 0x18, 0x0c00); + bnx2_write_phy(bp, 0x17, 0x000a); + bnx2_write_phy(bp, 0x15, 0x310b); + bnx2_write_phy(bp, 0x17, 0x201f); + bnx2_write_phy(bp, 0x15, 0x9506); + bnx2_write_phy(bp, 0x17, 0x401f); + bnx2_write_phy(bp, 0x15, 0x14e2); + bnx2_write_phy(bp, 0x18, 0x0400); + } + + if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) { + bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, + MII_BNX2_DSP_EXPAND_REG | 0x8); + bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val); + val &= ~(1 << 8); + bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val); + } + + if (bp->dev->mtu > 1500) { + /* Set extended packet length bit */ + bnx2_write_phy(bp, 0x18, 0x7); + bnx2_read_phy(bp, 0x18, &val); + bnx2_write_phy(bp, 0x18, val | 0x4000); + + bnx2_read_phy(bp, 0x10, &val); + bnx2_write_phy(bp, 0x10, val | 0x1); + } + else { + bnx2_write_phy(bp, 0x18, 0x7); + bnx2_read_phy(bp, 0x18, &val); + bnx2_write_phy(bp, 0x18, val & ~0x4007); + + bnx2_read_phy(bp, 0x10, &val); + bnx2_write_phy(bp, 0x10, val & ~0x1); + } + + /* ethernet@wirespeed */ + bnx2_write_phy(bp, 0x18, 0x7007); + bnx2_read_phy(bp, 0x18, &val); + bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4)); + return 0; +} + + +static int +bnx2_init_phy(struct bnx2 *bp, int reset_phy) +__releases(&bp->phy_lock) +__acquires(&bp->phy_lock) +{ + u32 val; + int rc = 0; + + bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK; + bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY; + + bp->mii_bmcr = MII_BMCR; + bp->mii_bmsr = MII_BMSR; + bp->mii_bmsr1 = MII_BMSR; + bp->mii_adv = MII_ADVERTISE; + bp->mii_lpa = MII_LPA; + + REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); + + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) + goto setup_phy; + + bnx2_read_phy(bp, MII_PHYSID1, &val); + bp->phy_id = val << 16; + bnx2_read_phy(bp, MII_PHYSID2, &val); + bp->phy_id |= val & 0xffff; + + if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { + if (CHIP_NUM(bp) == CHIP_NUM_5706) + rc = bnx2_init_5706s_phy(bp, reset_phy); + else if (CHIP_NUM(bp) == CHIP_NUM_5708) + rc = bnx2_init_5708s_phy(bp, reset_phy); + else if (CHIP_NUM(bp) == CHIP_NUM_5709) + rc = bnx2_init_5709s_phy(bp, reset_phy); + } + else { + rc = bnx2_init_copper_phy(bp, reset_phy); + } + +setup_phy: + if (!rc) + rc = bnx2_setup_phy(bp, bp->phy_port); + + return rc; +} + +static int +bnx2_set_mac_loopback(struct bnx2 *bp) +{ + u32 mac_mode; + + mac_mode = REG_RD(bp, BNX2_EMAC_MODE); + mac_mode &= ~BNX2_EMAC_MODE_PORT; + mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK; + REG_WR(bp, BNX2_EMAC_MODE, mac_mode); + bp->link_up = 1; + return 0; +} + +static int bnx2_test_link(struct bnx2 *); + +static int +bnx2_set_phy_loopback(struct bnx2 *bp) +{ + u32 mac_mode; + int rc, i; + + spin_lock_bh(&bp->phy_lock); + rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX | + BMCR_SPEED1000); + spin_unlock_bh(&bp->phy_lock); + if (rc) + return rc; + + for (i = 0; i < 10; i++) { + if (bnx2_test_link(bp) == 0) + break; + msleep(100); + } + + mac_mode = REG_RD(bp, BNX2_EMAC_MODE); + mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX | + BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK | + BNX2_EMAC_MODE_25G_MODE); + + mac_mode |= BNX2_EMAC_MODE_PORT_GMII; + REG_WR(bp, BNX2_EMAC_MODE, mac_mode); + bp->link_up = 1; + return 0; +} + +static void +bnx2_dump_mcp_state(struct bnx2 *bp) +{ + struct net_device *dev = bp->dev; + u32 mcp_p0, mcp_p1; + + netdev_err(dev, "<--- start MCP states dump --->\n"); + if (CHIP_NUM(bp) == CHIP_NUM_5709) { + mcp_p0 = BNX2_MCP_STATE_P0; + mcp_p1 = BNX2_MCP_STATE_P1; + } else { + mcp_p0 = BNX2_MCP_STATE_P0_5708; + mcp_p1 = BNX2_MCP_STATE_P1_5708; + } + netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n", + bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1)); + netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n", + bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE), + bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE), + bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK)); + netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n", + bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER), + bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER), + bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION)); + netdev_err(dev, "DEBUG: shmem states:\n"); + netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]", + bnx2_shmem_rd(bp, BNX2_DRV_MB), + bnx2_shmem_rd(bp, BNX2_FW_MB), + bnx2_shmem_rd(bp, BNX2_LINK_STATUS)); + pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB)); + netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]", + bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE), + bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE)); + pr_cont(" condition[%08x]\n", + bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION)); + DP_SHMEM_LINE(bp, 0x3cc); + DP_SHMEM_LINE(bp, 0x3dc); + DP_SHMEM_LINE(bp, 0x3ec); + netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc)); + netdev_err(dev, "<--- end MCP states dump --->\n"); +} + +static int +bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent) +{ + int i; + u32 val; + + bp->fw_wr_seq++; + msg_data |= bp->fw_wr_seq; + + bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data); + + if (!ack) + return 0; + + /* wait for an acknowledgement. */ + for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) { + msleep(10); + + val = bnx2_shmem_rd(bp, BNX2_FW_MB); + + if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ)) + break; + } + if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0) + return 0; + + /* If we timed out, inform the firmware that this is the case. */ + if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) { + msg_data &= ~BNX2_DRV_MSG_CODE; + msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT; + + bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data); + if (!silent) { + pr_err("fw sync timeout, reset code = %x\n", msg_data); + bnx2_dump_mcp_state(bp); + } + + return -EBUSY; + } + + if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK) + return -EIO; + + return 0; +} + +static int +bnx2_init_5709_context(struct bnx2 *bp) +{ + int i, ret = 0; + u32 val; + + val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12); + val |= (BCM_PAGE_BITS - 8) << 16; + REG_WR(bp, BNX2_CTX_COMMAND, val); + for (i = 0; i < 10; i++) { + val = REG_RD(bp, BNX2_CTX_COMMAND); + if (!(val & BNX2_CTX_COMMAND_MEM_INIT)) + break; + udelay(2); + } + if (val & BNX2_CTX_COMMAND_MEM_INIT) + return -EBUSY; + + for (i = 0; i < bp->ctx_pages; i++) { + int j; + + if (bp->ctx_blk[i]) + memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE); + else + return -ENOMEM; + + REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0, + (bp->ctx_blk_mapping[i] & 0xffffffff) | + BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID); + REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1, + (u64) bp->ctx_blk_mapping[i] >> 32); + REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i | + BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); + for (j = 0; j < 10; j++) { + + val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL); + if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ)) + break; + udelay(5); + } + if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) { + ret = -EBUSY; + break; + } + } + return ret; +} + +static void +bnx2_init_context(struct bnx2 *bp) +{ + u32 vcid; + + vcid = 96; + while (vcid) { + u32 vcid_addr, pcid_addr, offset; + int i; + + vcid--; + + if (CHIP_ID(bp) == CHIP_ID_5706_A0) { + u32 new_vcid; + + vcid_addr = GET_PCID_ADDR(vcid); + if (vcid & 0x8) { + new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7); + } + else { + new_vcid = vcid; + } + pcid_addr = GET_PCID_ADDR(new_vcid); + } + else { + vcid_addr = GET_CID_ADDR(vcid); + pcid_addr = vcid_addr; + } + + for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) { + vcid_addr += (i << PHY_CTX_SHIFT); + pcid_addr += (i << PHY_CTX_SHIFT); + + REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr); + REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr); + + /* Zero out the context. */ + for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) + bnx2_ctx_wr(bp, vcid_addr, offset, 0); + } + } +} + +static int +bnx2_alloc_bad_rbuf(struct bnx2 *bp) +{ + u16 *good_mbuf; + u32 good_mbuf_cnt; + u32 val; + + good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL); + if (good_mbuf == NULL) { + pr_err("Failed to allocate memory in %s\n", __func__); + return -ENOMEM; + } + + REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, + BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE); + + good_mbuf_cnt = 0; + + /* Allocate a bunch of mbufs and save the good ones in an array. */ + val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1); + while (val & BNX2_RBUF_STATUS1_FREE_COUNT) { + bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND, + BNX2_RBUF_COMMAND_ALLOC_REQ); + + val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC); + + val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE; + + /* The addresses with Bit 9 set are bad memory blocks. */ + if (!(val & (1 << 9))) { + good_mbuf[good_mbuf_cnt] = (u16) val; + good_mbuf_cnt++; + } + + val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1); + } + + /* Free the good ones back to the mbuf pool thus discarding + * all the bad ones. */ + while (good_mbuf_cnt) { + good_mbuf_cnt--; + + val = good_mbuf[good_mbuf_cnt]; + val = (val << 9) | val | 1; + + bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val); + } + kfree(good_mbuf); + return 0; +} + +static void +bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos) +{ + u32 val; + + val = (mac_addr[0] << 8) | mac_addr[1]; + + REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val); + + val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | + (mac_addr[4] << 8) | mac_addr[5]; + + REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val); +} + +static inline int +bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp) +{ + dma_addr_t mapping; + struct sw_pg *rx_pg = &rxr->rx_pg_ring[index]; + struct rx_bd *rxbd = + &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)]; + struct page *page = alloc_page(gfp); + + if (!page) + return -ENOMEM; + mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + if (dma_mapping_error(&bp->pdev->dev, mapping)) { + __free_page(page); + return -EIO; + } + + rx_pg->page = page; + dma_unmap_addr_set(rx_pg, mapping, mapping); + rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; + rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff; + return 0; +} + +static void +bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index) +{ + struct sw_pg *rx_pg = &rxr->rx_pg_ring[index]; + struct page *page = rx_pg->page; + + if (!page) + return; + + dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping), + PAGE_SIZE, PCI_DMA_FROMDEVICE); + + __free_page(page); + rx_pg->page = NULL; +} + +static inline int +bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp) +{ + struct sk_buff *skb; + struct sw_bd *rx_buf = &rxr->rx_buf_ring[index]; + dma_addr_t mapping; + struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)]; + unsigned long align; + + skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp); + if (skb == NULL) { + return -ENOMEM; + } + + if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1)))) + skb_reserve(skb, BNX2_RX_ALIGN - align); + + mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size, + PCI_DMA_FROMDEVICE); + if (dma_mapping_error(&bp->pdev->dev, mapping)) { + dev_kfree_skb(skb); + return -EIO; + } + + rx_buf->skb = skb; + rx_buf->desc = (struct l2_fhdr *) skb->data; + dma_unmap_addr_set(rx_buf, mapping, mapping); + + rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; + rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff; + + rxr->rx_prod_bseq += bp->rx_buf_use_size; + + return 0; +} + +static int +bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event) +{ + struct status_block *sblk = bnapi->status_blk.msi; + u32 new_link_state, old_link_state; + int is_set = 1; + + new_link_state = sblk->status_attn_bits & event; + old_link_state = sblk->status_attn_bits_ack & event; + if (new_link_state != old_link_state) { + if (new_link_state) + REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event); + else + REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event); + } else + is_set = 0; + + return is_set; +} + +static void +bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi) +{ + spin_lock(&bp->phy_lock); + + if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) + bnx2_set_link(bp); + if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT)) + bnx2_set_remote_link(bp); + + spin_unlock(&bp->phy_lock); + +} + +static inline u16 +bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi) +{ + u16 cons; + + /* Tell compiler that status block fields can change. */ + barrier(); + cons = *bnapi->hw_tx_cons_ptr; + barrier(); + if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT)) + cons++; + return cons; +} + +static int +bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) +{ + struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; + u16 hw_cons, sw_cons, sw_ring_cons; + int tx_pkt = 0, index; + struct netdev_queue *txq; + + index = (bnapi - bp->bnx2_napi); + txq = netdev_get_tx_queue(bp->dev, index); + + hw_cons = bnx2_get_hw_tx_cons(bnapi); + sw_cons = txr->tx_cons; + + while (sw_cons != hw_cons) { + struct sw_tx_bd *tx_buf; + struct sk_buff *skb; + int i, last; + + sw_ring_cons = TX_RING_IDX(sw_cons); + + tx_buf = &txr->tx_buf_ring[sw_ring_cons]; + skb = tx_buf->skb; + + /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */ + prefetch(&skb->end); + + /* partial BD completions possible with TSO packets */ + if (tx_buf->is_gso) { + u16 last_idx, last_ring_idx; + + last_idx = sw_cons + tx_buf->nr_frags + 1; + last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1; + if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) { + last_idx++; + } + if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) { + break; + } + } + + dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), + skb_headlen(skb), PCI_DMA_TODEVICE); + + tx_buf->skb = NULL; + last = tx_buf->nr_frags; + + for (i = 0; i < last; i++) { + sw_cons = NEXT_TX_BD(sw_cons); + + dma_unmap_page(&bp->pdev->dev, + dma_unmap_addr( + &txr->tx_buf_ring[TX_RING_IDX(sw_cons)], + mapping), + skb_shinfo(skb)->frags[i].size, + PCI_DMA_TODEVICE); + } + + sw_cons = NEXT_TX_BD(sw_cons); + + dev_kfree_skb(skb); + tx_pkt++; + if (tx_pkt == budget) + break; + + if (hw_cons == sw_cons) + hw_cons = bnx2_get_hw_tx_cons(bnapi); + } + + txr->hw_tx_cons = hw_cons; + txr->tx_cons = sw_cons; + + /* Need to make the tx_cons update visible to bnx2_start_xmit() + * before checking for netif_tx_queue_stopped(). Without the + * memory barrier, there is a small possibility that bnx2_start_xmit() + * will miss it and cause the queue to be stopped forever. + */ + smp_mb(); + + if (unlikely(netif_tx_queue_stopped(txq)) && + (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) { + __netif_tx_lock(txq, smp_processor_id()); + if ((netif_tx_queue_stopped(txq)) && + (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) + netif_tx_wake_queue(txq); + __netif_tx_unlock(txq); + } + + return tx_pkt; +} + +static void +bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, + struct sk_buff *skb, int count) +{ + struct sw_pg *cons_rx_pg, *prod_rx_pg; + struct rx_bd *cons_bd, *prod_bd; + int i; + u16 hw_prod, prod; + u16 cons = rxr->rx_pg_cons; + + cons_rx_pg = &rxr->rx_pg_ring[cons]; + + /* The caller was unable to allocate a new page to replace the + * last one in the frags array, so we need to recycle that page + * and then free the skb. + */ + if (skb) { + struct page *page; + struct skb_shared_info *shinfo; + + shinfo = skb_shinfo(skb); + shinfo->nr_frags--; + page = shinfo->frags[shinfo->nr_frags].page; + shinfo->frags[shinfo->nr_frags].page = NULL; + + cons_rx_pg->page = page; + dev_kfree_skb(skb); + } + + hw_prod = rxr->rx_pg_prod; + + for (i = 0; i < count; i++) { + prod = RX_PG_RING_IDX(hw_prod); + + prod_rx_pg = &rxr->rx_pg_ring[prod]; + cons_rx_pg = &rxr->rx_pg_ring[cons]; + cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)]; + prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; + + if (prod != cons) { + prod_rx_pg->page = cons_rx_pg->page; + cons_rx_pg->page = NULL; + dma_unmap_addr_set(prod_rx_pg, mapping, + dma_unmap_addr(cons_rx_pg, mapping)); + + prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi; + prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; + + } + cons = RX_PG_RING_IDX(NEXT_RX_BD(cons)); + hw_prod = NEXT_RX_BD(hw_prod); + } + rxr->rx_pg_prod = hw_prod; + rxr->rx_pg_cons = cons; +} + +static inline void +bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, + struct sk_buff *skb, u16 cons, u16 prod) +{ + struct sw_bd *cons_rx_buf, *prod_rx_buf; + struct rx_bd *cons_bd, *prod_bd; + + cons_rx_buf = &rxr->rx_buf_ring[cons]; + prod_rx_buf = &rxr->rx_buf_ring[prod]; + + dma_sync_single_for_device(&bp->pdev->dev, + dma_unmap_addr(cons_rx_buf, mapping), + BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE); + + rxr->rx_prod_bseq += bp->rx_buf_use_size; + + prod_rx_buf->skb = skb; + prod_rx_buf->desc = (struct l2_fhdr *) skb->data; + + if (cons == prod) + return; + + dma_unmap_addr_set(prod_rx_buf, mapping, + dma_unmap_addr(cons_rx_buf, mapping)); + + cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; + prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; + prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi; + prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; +} + +static int +bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb, + unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr, + u32 ring_idx) +{ + int err; + u16 prod = ring_idx & 0xffff; + + err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC); + if (unlikely(err)) { + bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod); + if (hdr_len) { + unsigned int raw_len = len + 4; + int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT; + + bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages); + } + return err; + } + + skb_reserve(skb, BNX2_RX_OFFSET); + dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, + PCI_DMA_FROMDEVICE); + + if (hdr_len == 0) { + skb_put(skb, len); + return 0; + } else { + unsigned int i, frag_len, frag_size, pages; + struct sw_pg *rx_pg; + u16 pg_cons = rxr->rx_pg_cons; + u16 pg_prod = rxr->rx_pg_prod; + + frag_size = len + 4 - hdr_len; + pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT; + skb_put(skb, hdr_len); + + for (i = 0; i < pages; i++) { + dma_addr_t mapping_old; + + frag_len = min(frag_size, (unsigned int) PAGE_SIZE); + if (unlikely(frag_len <= 4)) { + unsigned int tail = 4 - frag_len; + + rxr->rx_pg_cons = pg_cons; + rxr->rx_pg_prod = pg_prod; + bnx2_reuse_rx_skb_pages(bp, rxr, NULL, + pages - i); + skb->len -= tail; + if (i == 0) { + skb->tail -= tail; + } else { + skb_frag_t *frag = + &skb_shinfo(skb)->frags[i - 1]; + frag->size -= tail; + skb->data_len -= tail; + skb->truesize -= tail; + } + return 0; + } + rx_pg = &rxr->rx_pg_ring[pg_cons]; + + /* Don't unmap yet. If we're unable to allocate a new + * page, we need to recycle the page and the DMA addr. + */ + mapping_old = dma_unmap_addr(rx_pg, mapping); + if (i == pages - 1) + frag_len -= 4; + + skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len); + rx_pg->page = NULL; + + err = bnx2_alloc_rx_page(bp, rxr, + RX_PG_RING_IDX(pg_prod), + GFP_ATOMIC); + if (unlikely(err)) { + rxr->rx_pg_cons = pg_cons; + rxr->rx_pg_prod = pg_prod; + bnx2_reuse_rx_skb_pages(bp, rxr, skb, + pages - i); + return err; + } + + dma_unmap_page(&bp->pdev->dev, mapping_old, + PAGE_SIZE, PCI_DMA_FROMDEVICE); + + frag_size -= frag_len; + skb->data_len += frag_len; + skb->truesize += frag_len; + skb->len += frag_len; + + pg_prod = NEXT_RX_BD(pg_prod); + pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons)); + } + rxr->rx_pg_prod = pg_prod; + rxr->rx_pg_cons = pg_cons; + } + return 0; +} + +static inline u16 +bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi) +{ + u16 cons; + + /* Tell compiler that status block fields can change. */ + barrier(); + cons = *bnapi->hw_rx_cons_ptr; + barrier(); + if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)) + cons++; + return cons; +} + +static int +bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) +{ + struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; + u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod; + struct l2_fhdr *rx_hdr; + int rx_pkt = 0, pg_ring_used = 0; + + hw_cons = bnx2_get_hw_rx_cons(bnapi); + sw_cons = rxr->rx_cons; + sw_prod = rxr->rx_prod; + + /* Memory barrier necessary as speculative reads of the rx + * buffer can be ahead of the index in the status block + */ + rmb(); + while (sw_cons != hw_cons) { + unsigned int len, hdr_len; + u32 status; + struct sw_bd *rx_buf, *next_rx_buf; + struct sk_buff *skb; + dma_addr_t dma_addr; + + sw_ring_cons = RX_RING_IDX(sw_cons); + sw_ring_prod = RX_RING_IDX(sw_prod); + + rx_buf = &rxr->rx_buf_ring[sw_ring_cons]; + skb = rx_buf->skb; + prefetchw(skb); + + next_rx_buf = + &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))]; + prefetch(next_rx_buf->desc); + + rx_buf->skb = NULL; + + dma_addr = dma_unmap_addr(rx_buf, mapping); + + dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, + BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, + PCI_DMA_FROMDEVICE); + + rx_hdr = rx_buf->desc; + len = rx_hdr->l2_fhdr_pkt_len; + status = rx_hdr->l2_fhdr_status; + + hdr_len = 0; + if (status & L2_FHDR_STATUS_SPLIT) { + hdr_len = rx_hdr->l2_fhdr_ip_xsum; + pg_ring_used = 1; + } else if (len > bp->rx_jumbo_thresh) { + hdr_len = bp->rx_jumbo_thresh; + pg_ring_used = 1; + } + + if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC | + L2_FHDR_ERRORS_PHY_DECODE | + L2_FHDR_ERRORS_ALIGNMENT | + L2_FHDR_ERRORS_TOO_SHORT | + L2_FHDR_ERRORS_GIANT_FRAME))) { + + bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons, + sw_ring_prod); + if (pg_ring_used) { + int pages; + + pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT; + + bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages); + } + goto next_rx; + } + + len -= 4; + + if (len <= bp->rx_copy_thresh) { + struct sk_buff *new_skb; + + new_skb = netdev_alloc_skb(bp->dev, len + 6); + if (new_skb == NULL) { + bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons, + sw_ring_prod); + goto next_rx; + } + + /* aligned copy */ + skb_copy_from_linear_data_offset(skb, + BNX2_RX_OFFSET - 6, + new_skb->data, len + 6); + skb_reserve(new_skb, 6); + skb_put(new_skb, len); + + bnx2_reuse_rx_skb(bp, rxr, skb, + sw_ring_cons, sw_ring_prod); + + skb = new_skb; + } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len, + dma_addr, (sw_ring_cons << 16) | sw_ring_prod))) + goto next_rx; + + if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && + !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) + __vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag); + + skb->protocol = eth_type_trans(skb, bp->dev); + + if ((len > (bp->dev->mtu + ETH_HLEN)) && + (ntohs(skb->protocol) != 0x8100)) { + + dev_kfree_skb(skb); + goto next_rx; + + } + + skb_checksum_none_assert(skb); + if ((bp->dev->features & NETIF_F_RXCSUM) && + (status & (L2_FHDR_STATUS_TCP_SEGMENT | + L2_FHDR_STATUS_UDP_DATAGRAM))) { + + if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM | + L2_FHDR_ERRORS_UDP_XSUM)) == 0)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + if ((bp->dev->features & NETIF_F_RXHASH) && + ((status & L2_FHDR_STATUS_USE_RXHASH) == + L2_FHDR_STATUS_USE_RXHASH)) + skb->rxhash = rx_hdr->l2_fhdr_hash; + + skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]); + napi_gro_receive(&bnapi->napi, skb); + rx_pkt++; + +next_rx: + sw_cons = NEXT_RX_BD(sw_cons); + sw_prod = NEXT_RX_BD(sw_prod); + + if ((rx_pkt == budget)) + break; + + /* Refresh hw_cons to see if there is new work */ + if (sw_cons == hw_cons) { + hw_cons = bnx2_get_hw_rx_cons(bnapi); + rmb(); + } + } + rxr->rx_cons = sw_cons; + rxr->rx_prod = sw_prod; + + if (pg_ring_used) + REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod); + + REG_WR16(bp, rxr->rx_bidx_addr, sw_prod); + + REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq); + + mmiowb(); + + return rx_pkt; + +} + +/* MSI ISR - The only difference between this and the INTx ISR + * is that the MSI interrupt is always serviced. + */ +static irqreturn_t +bnx2_msi(int irq, void *dev_instance) +{ + struct bnx2_napi *bnapi = dev_instance; + struct bnx2 *bp = bnapi->bp; + + prefetch(bnapi->status_blk.msi); + REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, + BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | + BNX2_PCICFG_INT_ACK_CMD_MASK_INT); + + /* Return here if interrupt is disabled. */ + if (unlikely(atomic_read(&bp->intr_sem) != 0)) + return IRQ_HANDLED; + + napi_schedule(&bnapi->napi); + + return IRQ_HANDLED; +} + +static irqreturn_t +bnx2_msi_1shot(int irq, void *dev_instance) +{ + struct bnx2_napi *bnapi = dev_instance; + struct bnx2 *bp = bnapi->bp; + + prefetch(bnapi->status_blk.msi); + + /* Return here if interrupt is disabled. */ + if (unlikely(atomic_read(&bp->intr_sem) != 0)) + return IRQ_HANDLED; + + napi_schedule(&bnapi->napi); + + return IRQ_HANDLED; +} + +static irqreturn_t +bnx2_interrupt(int irq, void *dev_instance) +{ + struct bnx2_napi *bnapi = dev_instance; + struct bnx2 *bp = bnapi->bp; + struct status_block *sblk = bnapi->status_blk.msi; + + /* When using INTx, it is possible for the interrupt to arrive + * at the CPU before the status block posted prior to the + * interrupt. Reading a register will flush the status block. + * When using MSI, the MSI message will always complete after + * the status block write. + */ + if ((sblk->status_idx == bnapi->last_status_idx) && + (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) & + BNX2_PCICFG_MISC_STATUS_INTA_VALUE)) + return IRQ_NONE; + + REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, + BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | + BNX2_PCICFG_INT_ACK_CMD_MASK_INT); + + /* Read back to deassert IRQ immediately to avoid too many + * spurious interrupts. + */ + REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD); + + /* Return here if interrupt is shared and is disabled. */ + if (unlikely(atomic_read(&bp->intr_sem) != 0)) + return IRQ_HANDLED; + + if (napi_schedule_prep(&bnapi->napi)) { + bnapi->last_status_idx = sblk->status_idx; + __napi_schedule(&bnapi->napi); + } + + return IRQ_HANDLED; +} + +static inline int +bnx2_has_fast_work(struct bnx2_napi *bnapi) +{ + struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; + struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; + + if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) || + (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)) + return 1; + return 0; +} + +#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \ + STATUS_ATTN_BITS_TIMER_ABORT) + +static inline int +bnx2_has_work(struct bnx2_napi *bnapi) +{ + struct status_block *sblk = bnapi->status_blk.msi; + + if (bnx2_has_fast_work(bnapi)) + return 1; + +#ifdef BCM_CNIC + if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx)) + return 1; +#endif + + if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) != + (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS)) + return 1; + + return 0; +} + +static void +bnx2_chk_missed_msi(struct bnx2 *bp) +{ + struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; + u32 msi_ctrl; + + if (bnx2_has_work(bnapi)) { + msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL); + if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE)) + return; + + if (bnapi->last_status_idx == bp->idle_chk_status_idx) { + REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl & + ~BNX2_PCICFG_MSI_CONTROL_ENABLE); + REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl); + bnx2_msi(bp->irq_tbl[0].vector, bnapi); + } + } + + bp->idle_chk_status_idx = bnapi->last_status_idx; +} + +#ifdef BCM_CNIC +static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi) +{ + struct cnic_ops *c_ops; + + if (!bnapi->cnic_present) + return; + + rcu_read_lock(); + c_ops = rcu_dereference(bp->cnic_ops); + if (c_ops) + bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data, + bnapi->status_blk.msi); + rcu_read_unlock(); +} +#endif + +static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi) +{ + struct status_block *sblk = bnapi->status_blk.msi; + u32 status_attn_bits = sblk->status_attn_bits; + u32 status_attn_bits_ack = sblk->status_attn_bits_ack; + + if ((status_attn_bits & STATUS_ATTN_EVENTS) != + (status_attn_bits_ack & STATUS_ATTN_EVENTS)) { + + bnx2_phy_int(bp, bnapi); + + /* This is needed to take care of transient status + * during link changes. + */ + REG_WR(bp, BNX2_HC_COMMAND, + bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); + REG_RD(bp, BNX2_HC_COMMAND); + } +} + +static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi, + int work_done, int budget) +{ + struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; + struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; + + if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons) + bnx2_tx_int(bp, bnapi, 0); + + if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) + work_done += bnx2_rx_int(bp, bnapi, budget - work_done); + + return work_done; +} + +static int bnx2_poll_msix(struct napi_struct *napi, int budget) +{ + struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi); + struct bnx2 *bp = bnapi->bp; + int work_done = 0; + struct status_block_msix *sblk = bnapi->status_blk.msix; + + while (1) { + work_done = bnx2_poll_work(bp, bnapi, work_done, budget); + if (unlikely(work_done >= budget)) + break; + + bnapi->last_status_idx = sblk->status_idx; + /* status idx must be read before checking for more work. */ + rmb(); + if (likely(!bnx2_has_fast_work(bnapi))) { + + napi_complete(napi); + REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | + bnapi->last_status_idx); + break; + } + } + return work_done; +} + +static int bnx2_poll(struct napi_struct *napi, int budget) +{ + struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi); + struct bnx2 *bp = bnapi->bp; + int work_done = 0; + struct status_block *sblk = bnapi->status_blk.msi; + + while (1) { + bnx2_poll_link(bp, bnapi); + + work_done = bnx2_poll_work(bp, bnapi, work_done, budget); + +#ifdef BCM_CNIC + bnx2_poll_cnic(bp, bnapi); +#endif + + /* bnapi->last_status_idx is used below to tell the hw how + * much work has been processed, so we must read it before + * checking for more work. + */ + bnapi->last_status_idx = sblk->status_idx; + + if (unlikely(work_done >= budget)) + break; + + rmb(); + if (likely(!bnx2_has_work(bnapi))) { + napi_complete(napi); + if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) { + REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | + bnapi->last_status_idx); + break; + } + REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | + BNX2_PCICFG_INT_ACK_CMD_MASK_INT | + bnapi->last_status_idx); + + REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | + bnapi->last_status_idx); + break; + } + } + + return work_done; +} + +/* Called with rtnl_lock from vlan functions and also netif_tx_lock + * from set_multicast. + */ +static void +bnx2_set_rx_mode(struct net_device *dev) +{ + struct bnx2 *bp = netdev_priv(dev); + u32 rx_mode, sort_mode; + struct netdev_hw_addr *ha; + int i; + + if (!netif_running(dev)) + return; + + spin_lock_bh(&bp->phy_lock); + + rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS | + BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG); + sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN; + if (!(dev->features & NETIF_F_HW_VLAN_RX) && + (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)) + rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG; + if (dev->flags & IFF_PROMISC) { + /* Promiscuous mode. */ + rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS; + sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN | + BNX2_RPM_SORT_USER0_PROM_VLAN; + } + else if (dev->flags & IFF_ALLMULTI) { + for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { + REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), + 0xffffffff); + } + sort_mode |= BNX2_RPM_SORT_USER0_MC_EN; + } + else { + /* Accept one or more multicast(s). */ + u32 mc_filter[NUM_MC_HASH_REGISTERS]; + u32 regidx; + u32 bit; + u32 crc; + + memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS); + + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(ETH_ALEN, ha->addr); + bit = crc & 0xff; + regidx = (bit & 0xe0) >> 5; + bit &= 0x1f; + mc_filter[regidx] |= (1 << bit); + } + + for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { + REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), + mc_filter[i]); + } + + sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN; + } + + if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) { + rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS; + sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN | + BNX2_RPM_SORT_USER0_PROM_VLAN; + } else if (!(dev->flags & IFF_PROMISC)) { + /* Add all entries into to the match filter list */ + i = 0; + netdev_for_each_uc_addr(ha, dev) { + bnx2_set_mac_addr(bp, ha->addr, + i + BNX2_START_UNICAST_ADDRESS_INDEX); + sort_mode |= (1 << + (i + BNX2_START_UNICAST_ADDRESS_INDEX)); + i++; + } + + } + + if (rx_mode != bp->rx_mode) { + bp->rx_mode = rx_mode; + REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode); + } + + REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0); + REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode); + REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA); + + spin_unlock_bh(&bp->phy_lock); +} + +static int __devinit +check_fw_section(const struct firmware *fw, + const struct bnx2_fw_file_section *section, + u32 alignment, bool non_empty) +{ + u32 offset = be32_to_cpu(section->offset); + u32 len = be32_to_cpu(section->len); + + if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3) + return -EINVAL; + if ((non_empty && len == 0) || len > fw->size - offset || + len & (alignment - 1)) + return -EINVAL; + return 0; +} + +static int __devinit +check_mips_fw_entry(const struct firmware *fw, + const struct bnx2_mips_fw_file_entry *entry) +{ + if (check_fw_section(fw, &entry->text, 4, true) || + check_fw_section(fw, &entry->data, 4, false) || + check_fw_section(fw, &entry->rodata, 4, false)) + return -EINVAL; + return 0; +} + +static int __devinit +bnx2_request_firmware(struct bnx2 *bp) +{ + const char *mips_fw_file, *rv2p_fw_file; + const struct bnx2_mips_fw_file *mips_fw; + const struct bnx2_rv2p_fw_file *rv2p_fw; + int rc; + + if (CHIP_NUM(bp) == CHIP_NUM_5709) { + mips_fw_file = FW_MIPS_FILE_09; + if ((CHIP_ID(bp) == CHIP_ID_5709_A0) || + (CHIP_ID(bp) == CHIP_ID_5709_A1)) + rv2p_fw_file = FW_RV2P_FILE_09_Ax; + else + rv2p_fw_file = FW_RV2P_FILE_09; + } else { + mips_fw_file = FW_MIPS_FILE_06; + rv2p_fw_file = FW_RV2P_FILE_06; + } + + rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev); + if (rc) { + pr_err("Can't load firmware file \"%s\"\n", mips_fw_file); + return rc; + } + + rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev); + if (rc) { + pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file); + return rc; + } + mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data; + rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data; + if (bp->mips_firmware->size < sizeof(*mips_fw) || + check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) || + check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) || + check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) || + check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) || + check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) { + pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file); + return -EINVAL; + } + if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) || + check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) || + check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) { + pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file); + return -EINVAL; + } + + return 0; +} + +static u32 +rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code) +{ + switch (idx) { + case RV2P_P1_FIXUP_PAGE_SIZE_IDX: + rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK; + rv2p_code |= RV2P_BD_PAGE_SIZE; + break; + } + return rv2p_code; +} + +static int +load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc, + const struct bnx2_rv2p_fw_file_entry *fw_entry) +{ + u32 rv2p_code_len, file_offset; + __be32 *rv2p_code; + int i; + u32 val, cmd, addr; + + rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len); + file_offset = be32_to_cpu(fw_entry->rv2p.offset); + + rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset); + + if (rv2p_proc == RV2P_PROC1) { + cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR; + addr = BNX2_RV2P_PROC1_ADDR_CMD; + } else { + cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR; + addr = BNX2_RV2P_PROC2_ADDR_CMD; + } + + for (i = 0; i < rv2p_code_len; i += 8) { + REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code)); + rv2p_code++; + REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code)); + rv2p_code++; + + val = (i / 8) | cmd; + REG_WR(bp, addr, val); + } + + rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset); + for (i = 0; i < 8; i++) { + u32 loc, code; + + loc = be32_to_cpu(fw_entry->fixup[i]); + if (loc && ((loc * 4) < rv2p_code_len)) { + code = be32_to_cpu(*(rv2p_code + loc - 1)); + REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code); + code = be32_to_cpu(*(rv2p_code + loc)); + code = rv2p_fw_fixup(rv2p_proc, i, loc, code); + REG_WR(bp, BNX2_RV2P_INSTR_LOW, code); + + val = (loc / 2) | cmd; + REG_WR(bp, addr, val); + } + } + + /* Reset the processor, un-stall is done later. */ + if (rv2p_proc == RV2P_PROC1) { + REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET); + } + else { + REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET); + } + + return 0; +} + +static int +load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, + const struct bnx2_mips_fw_file_entry *fw_entry) +{ + u32 addr, len, file_offset; + __be32 *data; + u32 offset; + u32 val; + + /* Halt the CPU. */ + val = bnx2_reg_rd_ind(bp, cpu_reg->mode); + val |= cpu_reg->mode_value_halt; + bnx2_reg_wr_ind(bp, cpu_reg->mode, val); + bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear); + + /* Load the Text area. */ + addr = be32_to_cpu(fw_entry->text.addr); + len = be32_to_cpu(fw_entry->text.len); + file_offset = be32_to_cpu(fw_entry->text.offset); + data = (__be32 *)(bp->mips_firmware->data + file_offset); + + offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base); + if (len) { + int j; + + for (j = 0; j < (len / 4); j++, offset += 4) + bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j])); + } + + /* Load the Data area. */ + addr = be32_to_cpu(fw_entry->data.addr); + len = be32_to_cpu(fw_entry->data.len); + file_offset = be32_to_cpu(fw_entry->data.offset); + data = (__be32 *)(bp->mips_firmware->data + file_offset); + + offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base); + if (len) { + int j; + + for (j = 0; j < (len / 4); j++, offset += 4) + bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j])); + } + + /* Load the Read-Only area. */ + addr = be32_to_cpu(fw_entry->rodata.addr); + len = be32_to_cpu(fw_entry->rodata.len); + file_offset = be32_to_cpu(fw_entry->rodata.offset); + data = (__be32 *)(bp->mips_firmware->data + file_offset); + + offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base); + if (len) { + int j; + + for (j = 0; j < (len / 4); j++, offset += 4) + bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j])); + } + + /* Clear the pre-fetch instruction. */ + bnx2_reg_wr_ind(bp, cpu_reg->inst, 0); + + val = be32_to_cpu(fw_entry->start_addr); + bnx2_reg_wr_ind(bp, cpu_reg->pc, val); + + /* Start the CPU. */ + val = bnx2_reg_rd_ind(bp, cpu_reg->mode); + val &= ~cpu_reg->mode_value_halt; + bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear); + bnx2_reg_wr_ind(bp, cpu_reg->mode, val); + + return 0; +} + +static int +bnx2_init_cpus(struct bnx2 *bp) +{ + const struct bnx2_mips_fw_file *mips_fw = + (const struct bnx2_mips_fw_file *) bp->mips_firmware->data; + const struct bnx2_rv2p_fw_file *rv2p_fw = + (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data; + int rc; + + /* Initialize the RV2P processor. */ + load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1); + load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2); + + /* Initialize the RX Processor. */ + rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp); + if (rc) + goto init_cpu_err; + + /* Initialize the TX Processor. */ + rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp); + if (rc) + goto init_cpu_err; + + /* Initialize the TX Patch-up Processor. */ + rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat); + if (rc) + goto init_cpu_err; + + /* Initialize the Completion Processor. */ + rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com); + if (rc) + goto init_cpu_err; + + /* Initialize the Command Processor. */ + rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp); + +init_cpu_err: + return rc; +} + +static int +bnx2_set_power_state(struct bnx2 *bp, pci_power_t state) +{ + u16 pmcsr; + + pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr); + + switch (state) { + case PCI_D0: { + u32 val; + + pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, + (pmcsr & ~PCI_PM_CTRL_STATE_MASK) | + PCI_PM_CTRL_PME_STATUS); + + if (pmcsr & PCI_PM_CTRL_STATE_MASK) + /* delay required during transition out of D3hot */ + msleep(20); + + val = REG_RD(bp, BNX2_EMAC_MODE); + val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD; + val &= ~BNX2_EMAC_MODE_MPKT; + REG_WR(bp, BNX2_EMAC_MODE, val); + + val = REG_RD(bp, BNX2_RPM_CONFIG); + val &= ~BNX2_RPM_CONFIG_ACPI_ENA; + REG_WR(bp, BNX2_RPM_CONFIG, val); + break; + } + case PCI_D3hot: { + int i; + u32 val, wol_msg; + + if (bp->wol) { + u32 advertising; + u8 autoneg; + + autoneg = bp->autoneg; + advertising = bp->advertising; + + if (bp->phy_port == PORT_TP) { + bp->autoneg = AUTONEG_SPEED; + bp->advertising = ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_Autoneg; + } + + spin_lock_bh(&bp->phy_lock); + bnx2_setup_phy(bp, bp->phy_port); + spin_unlock_bh(&bp->phy_lock); + + bp->autoneg = autoneg; + bp->advertising = advertising; + + bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); + + val = REG_RD(bp, BNX2_EMAC_MODE); + + /* Enable port mode. */ + val &= ~BNX2_EMAC_MODE_PORT; + val |= BNX2_EMAC_MODE_MPKT_RCVD | + BNX2_EMAC_MODE_ACPI_RCVD | + BNX2_EMAC_MODE_MPKT; + if (bp->phy_port == PORT_TP) + val |= BNX2_EMAC_MODE_PORT_MII; + else { + val |= BNX2_EMAC_MODE_PORT_GMII; + if (bp->line_speed == SPEED_2500) + val |= BNX2_EMAC_MODE_25G_MODE; + } + + REG_WR(bp, BNX2_EMAC_MODE, val); + + /* receive all multicast */ + for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { + REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), + 0xffffffff); + } + REG_WR(bp, BNX2_EMAC_RX_MODE, + BNX2_EMAC_RX_MODE_SORT_MODE); + + val = 1 | BNX2_RPM_SORT_USER0_BC_EN | + BNX2_RPM_SORT_USER0_MC_EN; + REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0); + REG_WR(bp, BNX2_RPM_SORT_USER0, val); + REG_WR(bp, BNX2_RPM_SORT_USER0, val | + BNX2_RPM_SORT_USER0_ENA); + + /* Need to enable EMAC and RPM for WOL. */ + REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, + BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE | + BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE | + BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE); + + val = REG_RD(bp, BNX2_RPM_CONFIG); + val &= ~BNX2_RPM_CONFIG_ACPI_ENA; + REG_WR(bp, BNX2_RPM_CONFIG, val); + + wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL; + } + else { + wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; + } + + if (!(bp->flags & BNX2_FLAG_NO_WOL)) + bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, + 1, 0); + + pmcsr &= ~PCI_PM_CTRL_STATE_MASK; + if ((CHIP_ID(bp) == CHIP_ID_5706_A0) || + (CHIP_ID(bp) == CHIP_ID_5706_A1)) { + + if (bp->wol) + pmcsr |= 3; + } + else { + pmcsr |= 3; + } + if (bp->wol) { + pmcsr |= PCI_PM_CTRL_PME_ENABLE; + } + pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, + pmcsr); + + /* No more memory access after this point until + * device is brought back to D0. + */ + udelay(50); + break; + } + default: + return -EINVAL; + } + return 0; +} + +static int +bnx2_acquire_nvram_lock(struct bnx2 *bp) +{ + u32 val; + int j; + + /* Request access to the flash interface. */ + REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2); + for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { + val = REG_RD(bp, BNX2_NVM_SW_ARB); + if (val & BNX2_NVM_SW_ARB_ARB_ARB2) + break; + + udelay(5); + } + + if (j >= NVRAM_TIMEOUT_COUNT) + return -EBUSY; + + return 0; +} + +static int +bnx2_release_nvram_lock(struct bnx2 *bp) +{ + int j; + u32 val; + + /* Relinquish nvram interface. */ + REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2); + + for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { + val = REG_RD(bp, BNX2_NVM_SW_ARB); + if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2)) + break; + + udelay(5); + } + + if (j >= NVRAM_TIMEOUT_COUNT) + return -EBUSY; + + return 0; +} + + +static int +bnx2_enable_nvram_write(struct bnx2 *bp) +{ + u32 val; + + val = REG_RD(bp, BNX2_MISC_CFG); + REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI); + + if (bp->flash_info->flags & BNX2_NV_WREN) { + int j; + + REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); + REG_WR(bp, BNX2_NVM_COMMAND, + BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT); + + for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { + udelay(5); + + val = REG_RD(bp, BNX2_NVM_COMMAND); + if (val & BNX2_NVM_COMMAND_DONE) + break; + } + + if (j >= NVRAM_TIMEOUT_COUNT) + return -EBUSY; + } + return 0; +} + +static void +bnx2_disable_nvram_write(struct bnx2 *bp) +{ + u32 val; + + val = REG_RD(bp, BNX2_MISC_CFG); + REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN); +} + + +static void +bnx2_enable_nvram_access(struct bnx2 *bp) +{ + u32 val; + + val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE); + /* Enable both bits, even on read. */ + REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, + val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN); +} + +static void +bnx2_disable_nvram_access(struct bnx2 *bp) +{ + u32 val; + + val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE); + /* Disable both bits, even after read. */ + REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, + val & ~(BNX2_NVM_ACCESS_ENABLE_EN | + BNX2_NVM_ACCESS_ENABLE_WR_EN)); +} + +static int +bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset) +{ + u32 cmd; + int j; + + if (bp->flash_info->flags & BNX2_NV_BUFFERED) + /* Buffered flash, no erase needed */ + return 0; + + /* Build an erase command */ + cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR | + BNX2_NVM_COMMAND_DOIT; + + /* Need to clear DONE bit separately. */ + REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); + + /* Address of the NVRAM to read from. */ + REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); + + /* Issue an erase command. */ + REG_WR(bp, BNX2_NVM_COMMAND, cmd); + + /* Wait for completion. */ + for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { + u32 val; + + udelay(5); + + val = REG_RD(bp, BNX2_NVM_COMMAND); + if (val & BNX2_NVM_COMMAND_DONE) + break; + } + + if (j >= NVRAM_TIMEOUT_COUNT) + return -EBUSY; + + return 0; +} + +static int +bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags) +{ + u32 cmd; + int j; + + /* Build the command word. */ + cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags; + + /* Calculate an offset of a buffered flash, not needed for 5709. */ + if (bp->flash_info->flags & BNX2_NV_TRANSLATE) { + offset = ((offset / bp->flash_info->page_size) << + bp->flash_info->page_bits) + + (offset % bp->flash_info->page_size); + } + + /* Need to clear DONE bit separately. */ + REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); + + /* Address of the NVRAM to read from. */ + REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); + + /* Issue a read command. */ + REG_WR(bp, BNX2_NVM_COMMAND, cmd); + + /* Wait for completion. */ + for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { + u32 val; + + udelay(5); + + val = REG_RD(bp, BNX2_NVM_COMMAND); + if (val & BNX2_NVM_COMMAND_DONE) { + __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ)); + memcpy(ret_val, &v, 4); + break; + } + } + if (j >= NVRAM_TIMEOUT_COUNT) + return -EBUSY; + + return 0; +} + + +static int +bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags) +{ + u32 cmd; + __be32 val32; + int j; + + /* Build the command word. */ + cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags; + + /* Calculate an offset of a buffered flash, not needed for 5709. */ + if (bp->flash_info->flags & BNX2_NV_TRANSLATE) { + offset = ((offset / bp->flash_info->page_size) << + bp->flash_info->page_bits) + + (offset % bp->flash_info->page_size); + } + + /* Need to clear DONE bit separately. */ + REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); + + memcpy(&val32, val, 4); + + /* Write the data. */ + REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32)); + + /* Address of the NVRAM to write to. */ + REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); + + /* Issue the write command. */ + REG_WR(bp, BNX2_NVM_COMMAND, cmd); + + /* Wait for completion. */ + for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { + udelay(5); + + if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE) + break; + } + if (j >= NVRAM_TIMEOUT_COUNT) + return -EBUSY; + + return 0; +} + +static int +bnx2_init_nvram(struct bnx2 *bp) +{ + u32 val; + int j, entry_count, rc = 0; + const struct flash_spec *flash; + + if (CHIP_NUM(bp) == CHIP_NUM_5709) { + bp->flash_info = &flash_5709; + goto get_flash_size; + } + + /* Determine the selected interface. */ + val = REG_RD(bp, BNX2_NVM_CFG1); + + entry_count = ARRAY_SIZE(flash_table); + + if (val & 0x40000000) { + + /* Flash interface has been reconfigured */ + for (j = 0, flash = &flash_table[0]; j < entry_count; + j++, flash++) { + if ((val & FLASH_BACKUP_STRAP_MASK) == + (flash->config1 & FLASH_BACKUP_STRAP_MASK)) { + bp->flash_info = flash; + break; + } + } + } + else { + u32 mask; + /* Not yet been reconfigured */ + + if (val & (1 << 23)) + mask = FLASH_BACKUP_STRAP_MASK; + else + mask = FLASH_STRAP_MASK; + + for (j = 0, flash = &flash_table[0]; j < entry_count; + j++, flash++) { + + if ((val & mask) == (flash->strapping & mask)) { + bp->flash_info = flash; + + /* Request access to the flash interface. */ + if ((rc = bnx2_acquire_nvram_lock(bp)) != 0) + return rc; + + /* Enable access to flash interface */ + bnx2_enable_nvram_access(bp); + + /* Reconfigure the flash interface */ + REG_WR(bp, BNX2_NVM_CFG1, flash->config1); + REG_WR(bp, BNX2_NVM_CFG2, flash->config2); + REG_WR(bp, BNX2_NVM_CFG3, flash->config3); + REG_WR(bp, BNX2_NVM_WRITE1, flash->write1); + + /* Disable access to flash interface */ + bnx2_disable_nvram_access(bp); + bnx2_release_nvram_lock(bp); + + break; + } + } + } /* if (val & 0x40000000) */ + + if (j == entry_count) { + bp->flash_info = NULL; + pr_alert("Unknown flash/EEPROM type\n"); + return -ENODEV; + } + +get_flash_size: + val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2); + val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK; + if (val) + bp->flash_size = val; + else + bp->flash_size = bp->flash_info->total_size; + + return rc; +} + +static int +bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf, + int buf_size) +{ + int rc = 0; + u32 cmd_flags, offset32, len32, extra; + + if (buf_size == 0) + return 0; + + /* Request access to the flash interface. */ + if ((rc = bnx2_acquire_nvram_lock(bp)) != 0) + return rc; + + /* Enable access to flash interface */ + bnx2_enable_nvram_access(bp); + + len32 = buf_size; + offset32 = offset; + extra = 0; + + cmd_flags = 0; + + if (offset32 & 3) { + u8 buf[4]; + u32 pre_len; + + offset32 &= ~3; + pre_len = 4 - (offset & 3); + + if (pre_len >= len32) { + pre_len = len32; + cmd_flags = BNX2_NVM_COMMAND_FIRST | + BNX2_NVM_COMMAND_LAST; + } + else { + cmd_flags = BNX2_NVM_COMMAND_FIRST; + } + + rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags); + + if (rc) + return rc; + + memcpy(ret_buf, buf + (offset & 3), pre_len); + + offset32 += 4; + ret_buf += pre_len; + len32 -= pre_len; + } + if (len32 & 3) { + extra = 4 - (len32 & 3); + len32 = (len32 + 4) & ~3; + } + + if (len32 == 4) { + u8 buf[4]; + + if (cmd_flags) + cmd_flags = BNX2_NVM_COMMAND_LAST; + else + cmd_flags = BNX2_NVM_COMMAND_FIRST | + BNX2_NVM_COMMAND_LAST; + + rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags); + + memcpy(ret_buf, buf, 4 - extra); + } + else if (len32 > 0) { + u8 buf[4]; + + /* Read the first word. */ + if (cmd_flags) + cmd_flags = 0; + else + cmd_flags = BNX2_NVM_COMMAND_FIRST; + + rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags); + + /* Advance to the next dword. */ + offset32 += 4; + ret_buf += 4; + len32 -= 4; + + while (len32 > 4 && rc == 0) { + rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0); + + /* Advance to the next dword. */ + offset32 += 4; + ret_buf += 4; + len32 -= 4; + } + + if (rc) + return rc; + + cmd_flags = BNX2_NVM_COMMAND_LAST; + rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags); + + memcpy(ret_buf, buf, 4 - extra); + } + + /* Disable access to flash interface */ + bnx2_disable_nvram_access(bp); + + bnx2_release_nvram_lock(bp); + + return rc; +} + +static int +bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, + int buf_size) +{ + u32 written, offset32, len32; + u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL; + int rc = 0; + int align_start, align_end; + + buf = data_buf; + offset32 = offset; + len32 = buf_size; + align_start = align_end = 0; + + if ((align_start = (offset32 & 3))) { + offset32 &= ~3; + len32 += align_start; + if (len32 < 4) + len32 = 4; + if ((rc = bnx2_nvram_read(bp, offset32, start, 4))) + return rc; + } + + if (len32 & 3) { + align_end = 4 - (len32 & 3); + len32 += align_end; + if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4))) + return rc; + } + + if (align_start || align_end) { + align_buf = kmalloc(len32, GFP_KERNEL); + if (align_buf == NULL) + return -ENOMEM; + if (align_start) { + memcpy(align_buf, start, 4); + } + if (align_end) { + memcpy(align_buf + len32 - 4, end, 4); + } + memcpy(align_buf + align_start, data_buf, buf_size); + buf = align_buf; + } + + if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { + flash_buffer = kmalloc(264, GFP_KERNEL); + if (flash_buffer == NULL) { + rc = -ENOMEM; + goto nvram_write_end; + } + } + + written = 0; + while ((written < len32) && (rc == 0)) { + u32 page_start, page_end, data_start, data_end; + u32 addr, cmd_flags; + int i; + + /* Find the page_start addr */ + page_start = offset32 + written; + page_start -= (page_start % bp->flash_info->page_size); + /* Find the page_end addr */ + page_end = page_start + bp->flash_info->page_size; + /* Find the data_start addr */ + data_start = (written == 0) ? offset32 : page_start; + /* Find the data_end addr */ + data_end = (page_end > offset32 + len32) ? + (offset32 + len32) : page_end; + + /* Request access to the flash interface. */ + if ((rc = bnx2_acquire_nvram_lock(bp)) != 0) + goto nvram_write_end; + + /* Enable access to flash interface */ + bnx2_enable_nvram_access(bp); + + cmd_flags = BNX2_NVM_COMMAND_FIRST; + if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { + int j; + + /* Read the whole page into the buffer + * (non-buffer flash only) */ + for (j = 0; j < bp->flash_info->page_size; j += 4) { + if (j == (bp->flash_info->page_size - 4)) { + cmd_flags |= BNX2_NVM_COMMAND_LAST; + } + rc = bnx2_nvram_read_dword(bp, + page_start + j, + &flash_buffer[j], + cmd_flags); + + if (rc) + goto nvram_write_end; + + cmd_flags = 0; + } + } + + /* Enable writes to flash interface (unlock write-protect) */ + if ((rc = bnx2_enable_nvram_write(bp)) != 0) + goto nvram_write_end; + + /* Loop to write back the buffer data from page_start to + * data_start */ + i = 0; + if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { + /* Erase the page */ + if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0) + goto nvram_write_end; + + /* Re-enable the write again for the actual write */ + bnx2_enable_nvram_write(bp); + + for (addr = page_start; addr < data_start; + addr += 4, i += 4) { + + rc = bnx2_nvram_write_dword(bp, addr, + &flash_buffer[i], cmd_flags); + + if (rc != 0) + goto nvram_write_end; + + cmd_flags = 0; + } + } + + /* Loop to write the new data from data_start to data_end */ + for (addr = data_start; addr < data_end; addr += 4, i += 4) { + if ((addr == page_end - 4) || + ((bp->flash_info->flags & BNX2_NV_BUFFERED) && + (addr == data_end - 4))) { + + cmd_flags |= BNX2_NVM_COMMAND_LAST; + } + rc = bnx2_nvram_write_dword(bp, addr, buf, + cmd_flags); + + if (rc != 0) + goto nvram_write_end; + + cmd_flags = 0; + buf += 4; + } + + /* Loop to write back the buffer data from data_end + * to page_end */ + if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { + for (addr = data_end; addr < page_end; + addr += 4, i += 4) { + + if (addr == page_end-4) { + cmd_flags = BNX2_NVM_COMMAND_LAST; + } + rc = bnx2_nvram_write_dword(bp, addr, + &flash_buffer[i], cmd_flags); + + if (rc != 0) + goto nvram_write_end; + + cmd_flags = 0; + } + } + + /* Disable writes to flash interface (lock write-protect) */ + bnx2_disable_nvram_write(bp); + + /* Disable access to flash interface */ + bnx2_disable_nvram_access(bp); + bnx2_release_nvram_lock(bp); + + /* Increment written */ + written += data_end - data_start; + } + +nvram_write_end: + kfree(flash_buffer); + kfree(align_buf); + return rc; +} + +static void +bnx2_init_fw_cap(struct bnx2 *bp) +{ + u32 val, sig = 0; + + bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP; + bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN; + + if (!(bp->flags & BNX2_FLAG_ASF_ENABLE)) + bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN; + + val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB); + if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE) + return; + + if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) { + bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN; + sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN; + } + + if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && + (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) { + u32 link; + + bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP; + + link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS); + if (link & BNX2_LINK_STATUS_SERDES_LINK) + bp->phy_port = PORT_FIBRE; + else + bp->phy_port = PORT_TP; + + sig |= BNX2_DRV_ACK_CAP_SIGNATURE | + BNX2_FW_CAP_REMOTE_PHY_CAPABLE; + } + + if (netif_running(bp->dev) && sig) + bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig); +} + +static void +bnx2_setup_msix_tbl(struct bnx2 *bp) +{ + REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN); + + REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR); + REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR); +} + +static int +bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) +{ + u32 val; + int i, rc = 0; + u8 old_port; + + /* Wait for the current PCI transaction to complete before + * issuing a reset. */ + if ((CHIP_NUM(bp) == CHIP_NUM_5706) || + (CHIP_NUM(bp) == CHIP_NUM_5708)) { + REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS, + BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | + BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | + BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | + BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); + val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS); + udelay(5); + } else { /* 5709 */ + val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL); + val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE; + REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val); + val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL); + + for (i = 0; i < 100; i++) { + msleep(1); + val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL); + if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND)) + break; + } + } + + /* Wait for the firmware to tell us it is ok to issue a reset. */ + bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1); + + /* Deposit a driver reset signature so the firmware knows that + * this is a soft reset. */ + bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE, + BNX2_DRV_RESET_SIGNATURE_MAGIC); + + /* Do a dummy read to force the chip to complete all current transaction + * before we issue a reset. */ + val = REG_RD(bp, BNX2_MISC_ID); + + if (CHIP_NUM(bp) == CHIP_NUM_5709) { + REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET); + REG_RD(bp, BNX2_MISC_COMMAND); + udelay(5); + + val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | + BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; + + REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val); + + } else { + val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ | + BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | + BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; + + /* Chip reset. */ + REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val); + + /* Reading back any register after chip reset will hang the + * bus on 5706 A0 and A1. The msleep below provides plenty + * of margin for write posting. + */ + if ((CHIP_ID(bp) == CHIP_ID_5706_A0) || + (CHIP_ID(bp) == CHIP_ID_5706_A1)) + msleep(20); + + /* Reset takes approximate 30 usec */ + for (i = 0; i < 10; i++) { + val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG); + if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ | + BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) + break; + udelay(10); + } + + if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ | + BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) { + pr_err("Chip reset did not complete\n"); + return -EBUSY; + } + } + + /* Make sure byte swapping is properly configured. */ + val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0); + if (val != 0x01020304) { + pr_err("Chip not in correct endian mode\n"); + return -ENODEV; + } + + /* Wait for the firmware to finish its initialization. */ + rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0); + if (rc) + return rc; + + spin_lock_bh(&bp->phy_lock); + old_port = bp->phy_port; + bnx2_init_fw_cap(bp); + if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) && + old_port != bp->phy_port) + bnx2_set_default_remote_link(bp); + spin_unlock_bh(&bp->phy_lock); + + if (CHIP_ID(bp) == CHIP_ID_5706_A0) { + /* Adjust the voltage regular to two steps lower. The default + * of this register is 0x0000000e. */ + REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa); + + /* Remove bad rbuf memory from the free pool. */ + rc = bnx2_alloc_bad_rbuf(bp); + } + + if (bp->flags & BNX2_FLAG_USING_MSIX) { + bnx2_setup_msix_tbl(bp); + /* Prevent MSIX table reads and write from timing out */ + REG_WR(bp, BNX2_MISC_ECO_HW_CTL, + BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN); + } + + return rc; +} + +static int +bnx2_init_chip(struct bnx2 *bp) +{ + u32 val, mtu; + int rc, i; + + /* Make sure the interrupt is not active. */ + REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT); + + val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP | + BNX2_DMA_CONFIG_DATA_WORD_SWAP | +#ifdef __BIG_ENDIAN + BNX2_DMA_CONFIG_CNTL_BYTE_SWAP | +#endif + BNX2_DMA_CONFIG_CNTL_WORD_SWAP | + DMA_READ_CHANS << 12 | + DMA_WRITE_CHANS << 16; + + val |= (0x2 << 20) | (1 << 11); + + if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133)) + val |= (1 << 23); + + if ((CHIP_NUM(bp) == CHIP_NUM_5706) && + (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX)) + val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA; + + REG_WR(bp, BNX2_DMA_CONFIG, val); + + if (CHIP_ID(bp) == CHIP_ID_5706_A0) { + val = REG_RD(bp, BNX2_TDMA_CONFIG); + val |= BNX2_TDMA_CONFIG_ONE_DMA; + REG_WR(bp, BNX2_TDMA_CONFIG, val); + } + + if (bp->flags & BNX2_FLAG_PCIX) { + u16 val16; + + pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD, + &val16); + pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD, + val16 & ~PCI_X_CMD_ERO); + } + + REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, + BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | + BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | + BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); + + /* Initialize context mapping and zero out the quick contexts. The + * context block must have already been enabled. */ + if (CHIP_NUM(bp) == CHIP_NUM_5709) { + rc = bnx2_init_5709_context(bp); + if (rc) + return rc; + } else + bnx2_init_context(bp); + + if ((rc = bnx2_init_cpus(bp)) != 0) + return rc; + + bnx2_init_nvram(bp); + + bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); + + val = REG_RD(bp, BNX2_MQ_CONFIG); + val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; + val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; + if (CHIP_NUM(bp) == CHIP_NUM_5709) { + val |= BNX2_MQ_CONFIG_BIN_MQ_MODE; + if (CHIP_REV(bp) == CHIP_REV_Ax) + val |= BNX2_MQ_CONFIG_HALT_DIS; + } + + REG_WR(bp, BNX2_MQ_CONFIG, val); + + val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE); + REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val); + REG_WR(bp, BNX2_MQ_KNL_WIND_END, val); + + val = (BCM_PAGE_BITS - 8) << 24; + REG_WR(bp, BNX2_RV2P_CONFIG, val); + + /* Configure page size. */ + val = REG_RD(bp, BNX2_TBDR_CONFIG); + val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE; + val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; + REG_WR(bp, BNX2_TBDR_CONFIG, val); + + val = bp->mac_addr[0] + + (bp->mac_addr[1] << 8) + + (bp->mac_addr[2] << 16) + + bp->mac_addr[3] + + (bp->mac_addr[4] << 8) + + (bp->mac_addr[5] << 16); + REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val); + + /* Program the MTU. Also include 4 bytes for CRC32. */ + mtu = bp->dev->mtu; + val = mtu + ETH_HLEN + ETH_FCS_LEN; + if (val > (MAX_ETHERNET_PACKET_SIZE + 4)) + val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA; + REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val); + + if (mtu < 1500) + mtu = 1500; + + bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu)); + bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu)); + bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu)); + + memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size); + for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) + bp->bnx2_napi[i].last_status_idx = 0; + + bp->idle_chk_status_idx = 0xffff; + + bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE; + + /* Set up how to generate a link change interrupt. */ + REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); + + REG_WR(bp, BNX2_HC_STATUS_ADDR_L, + (u64) bp->status_blk_mapping & 0xffffffff); + REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32); + + REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L, + (u64) bp->stats_blk_mapping & 0xffffffff); + REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H, + (u64) bp->stats_blk_mapping >> 32); + + REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP, + (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip); + + REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP, + (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip); + + REG_WR(bp, BNX2_HC_COMP_PROD_TRIP, + (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip); + + REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks); + + REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks); + + REG_WR(bp, BNX2_HC_COM_TICKS, + (bp->com_ticks_int << 16) | bp->com_ticks); + + REG_WR(bp, BNX2_HC_CMD_TICKS, + (bp->cmd_ticks_int << 16) | bp->cmd_ticks); + + if (bp->flags & BNX2_FLAG_BROKEN_STATS) + REG_WR(bp, BNX2_HC_STATS_TICKS, 0); + else + REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks); + REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ + + if (CHIP_ID(bp) == CHIP_ID_5706_A1) + val = BNX2_HC_CONFIG_COLLECT_STATS; + else { + val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE | + BNX2_HC_CONFIG_COLLECT_STATS; + } + + if (bp->flags & BNX2_FLAG_USING_MSIX) { + REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR, + BNX2_HC_MSIX_BIT_VECTOR_VAL); + + val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B; + } + + if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI) + val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM; + + REG_WR(bp, BNX2_HC_CONFIG, val); + + if (bp->rx_ticks < 25) + bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1); + else + bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0); + + for (i = 1; i < bp->irq_nvecs; i++) { + u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) + + BNX2_HC_SB_CONFIG_1; + + REG_WR(bp, base, + BNX2_HC_SB_CONFIG_1_TX_TMR_MODE | + BNX2_HC_SB_CONFIG_1_RX_TMR_MODE | + BNX2_HC_SB_CONFIG_1_ONE_SHOT); + + REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF, + (bp->tx_quick_cons_trip_int << 16) | + bp->tx_quick_cons_trip); + + REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF, + (bp->tx_ticks_int << 16) | bp->tx_ticks); + + REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF, + (bp->rx_quick_cons_trip_int << 16) | + bp->rx_quick_cons_trip); + + REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF, + (bp->rx_ticks_int << 16) | bp->rx_ticks); + } + + /* Clear internal stats counters. */ + REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW); + + REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS); + + /* Initialize the receive filter. */ + bnx2_set_rx_mode(bp->dev); + + if (CHIP_NUM(bp) == CHIP_NUM_5709) { + val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL); + val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE; + REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val); + } + rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET, + 1, 0); + + REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT); + REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS); + + udelay(20); + + bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND); + + return rc; +} + +static void +bnx2_clear_ring_states(struct bnx2 *bp) +{ + struct bnx2_napi *bnapi; + struct bnx2_tx_ring_info *txr; + struct bnx2_rx_ring_info *rxr; + int i; + + for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) { + bnapi = &bp->bnx2_napi[i]; + txr = &bnapi->tx_ring; + rxr = &bnapi->rx_ring; + + txr->tx_cons = 0; + txr->hw_tx_cons = 0; + rxr->rx_prod_bseq = 0; + rxr->rx_prod = 0; + rxr->rx_cons = 0; + rxr->rx_pg_prod = 0; + rxr->rx_pg_cons = 0; + } +} + +static void +bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr) +{ + u32 val, offset0, offset1, offset2, offset3; + u32 cid_addr = GET_CID_ADDR(cid); + + if (CHIP_NUM(bp) == CHIP_NUM_5709) { + offset0 = BNX2_L2CTX_TYPE_XI; + offset1 = BNX2_L2CTX_CMD_TYPE_XI; + offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI; + offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI; + } else { + offset0 = BNX2_L2CTX_TYPE; + offset1 = BNX2_L2CTX_CMD_TYPE; + offset2 = BNX2_L2CTX_TBDR_BHADDR_HI; + offset3 = BNX2_L2CTX_TBDR_BHADDR_LO; + } + val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2; + bnx2_ctx_wr(bp, cid_addr, offset0, val); + + val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); + bnx2_ctx_wr(bp, cid_addr, offset1, val); + + val = (u64) txr->tx_desc_mapping >> 32; + bnx2_ctx_wr(bp, cid_addr, offset2, val); + + val = (u64) txr->tx_desc_mapping & 0xffffffff; + bnx2_ctx_wr(bp, cid_addr, offset3, val); +} + +static void +bnx2_init_tx_ring(struct bnx2 *bp, int ring_num) +{ + struct tx_bd *txbd; + u32 cid = TX_CID; + struct bnx2_napi *bnapi; + struct bnx2_tx_ring_info *txr; + + bnapi = &bp->bnx2_napi[ring_num]; + txr = &bnapi->tx_ring; + + if (ring_num == 0) + cid = TX_CID; + else + cid = TX_TSS_CID + ring_num - 1; + + bp->tx_wake_thresh = bp->tx_ring_size / 2; + + txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT]; + + txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32; + txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff; + + txr->tx_prod = 0; + txr->tx_prod_bseq = 0; + + txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX; + txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ; + + bnx2_init_tx_context(bp, cid, txr); +} + +static void +bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size, + int num_rings) +{ + int i; + struct rx_bd *rxbd; + + for (i = 0; i < num_rings; i++) { + int j; + + rxbd = &rx_ring[i][0]; + for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) { + rxbd->rx_bd_len = buf_size; + rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END; + } + if (i == (num_rings - 1)) + j = 0; + else + j = i + 1; + rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32; + rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff; + } +} + +static void +bnx2_init_rx_ring(struct bnx2 *bp, int ring_num) +{ + int i; + u16 prod, ring_prod; + u32 cid, rx_cid_addr, val; + struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num]; + struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; + + if (ring_num == 0) + cid = RX_CID; + else + cid = RX_RSS_CID + ring_num - 1; + + rx_cid_addr = GET_CID_ADDR(cid); + + bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping, + bp->rx_buf_use_size, bp->rx_max_ring); + + bnx2_init_rx_context(bp, cid); + + if (CHIP_NUM(bp) == CHIP_NUM_5709) { + val = REG_RD(bp, BNX2_MQ_MAP_L2_5); + REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM); + } + + bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0); + if (bp->rx_pg_ring_size) { + bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring, + rxr->rx_pg_desc_mapping, + PAGE_SIZE, bp->rx_max_pg_ring); + val = (bp->rx_buf_use_size << 16) | PAGE_SIZE; + bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val); + bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY, + BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num); + + val = (u64) rxr->rx_pg_desc_mapping[0] >> 32; + bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val); + + val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff; + bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val); + + if (CHIP_NUM(bp) == CHIP_NUM_5709) + REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT); + } + + val = (u64) rxr->rx_desc_mapping[0] >> 32; + bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); + + val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff; + bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); + + ring_prod = prod = rxr->rx_pg_prod; + for (i = 0; i < bp->rx_pg_ring_size; i++) { + if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) { + netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n", + ring_num, i, bp->rx_pg_ring_size); + break; + } + prod = NEXT_RX_BD(prod); + ring_prod = RX_PG_RING_IDX(prod); + } + rxr->rx_pg_prod = prod; + + ring_prod = prod = rxr->rx_prod; + for (i = 0; i < bp->rx_ring_size; i++) { + if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) { + netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n", + ring_num, i, bp->rx_ring_size); + break; + } + prod = NEXT_RX_BD(prod); + ring_prod = RX_RING_IDX(prod); + } + rxr->rx_prod = prod; + + rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX; + rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ; + rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX; + + REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod); + REG_WR16(bp, rxr->rx_bidx_addr, prod); + + REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq); +} + +static void +bnx2_init_all_rings(struct bnx2 *bp) +{ + int i; + u32 val; + + bnx2_clear_ring_states(bp); + + REG_WR(bp, BNX2_TSCH_TSS_CFG, 0); + for (i = 0; i < bp->num_tx_rings; i++) + bnx2_init_tx_ring(bp, i); + + if (bp->num_tx_rings > 1) + REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) | + (TX_TSS_CID << 7)); + + REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0); + bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0); + + for (i = 0; i < bp->num_rx_rings; i++) + bnx2_init_rx_ring(bp, i); + + if (bp->num_rx_rings > 1) { + u32 tbl_32 = 0; + + for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) { + int shift = (i % 8) << 2; + + tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift; + if ((i % 8) == 7) { + REG_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32); + REG_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) | + BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK | + BNX2_RLUP_RSS_COMMAND_WRITE | + BNX2_RLUP_RSS_COMMAND_HASH_MASK); + tbl_32 = 0; + } + } + + val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI | + BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI; + + REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val); + + } +} + +static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size) +{ + u32 max, num_rings = 1; + + while (ring_size > MAX_RX_DESC_CNT) { + ring_size -= MAX_RX_DESC_CNT; + num_rings++; + } + /* round to next power of 2 */ + max = max_size; + while ((max & num_rings) == 0) + max >>= 1; + + if (num_rings != max) + max <<= 1; + + return max; +} + +static void +bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size) +{ + u32 rx_size, rx_space, jumbo_size; + + /* 8 for CRC and VLAN */ + rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8; + + rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD + + sizeof(struct skb_shared_info); + + bp->rx_copy_thresh = BNX2_RX_COPY_THRESH; + bp->rx_pg_ring_size = 0; + bp->rx_max_pg_ring = 0; + bp->rx_max_pg_ring_idx = 0; + if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) { + int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; + + jumbo_size = size * pages; + if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT) + jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT; + + bp->rx_pg_ring_size = jumbo_size; + bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size, + MAX_RX_PG_RINGS); + bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1; + rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET; + bp->rx_copy_thresh = 0; + } + + bp->rx_buf_use_size = rx_size; + /* hw alignment */ + bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN; + bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET; + bp->rx_ring_size = size; + bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS); + bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1; +} + +static void +bnx2_free_tx_skbs(struct bnx2 *bp) +{ + int i; + + for (i = 0; i < bp->num_tx_rings; i++) { + struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; + struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; + int j; + + if (txr->tx_buf_ring == NULL) + continue; + + for (j = 0; j < TX_DESC_CNT; ) { + struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; + struct sk_buff *skb = tx_buf->skb; + int k, last; + + if (skb == NULL) { + j++; + continue; + } + + dma_unmap_single(&bp->pdev->dev, + dma_unmap_addr(tx_buf, mapping), + skb_headlen(skb), + PCI_DMA_TODEVICE); + + tx_buf->skb = NULL; + + last = tx_buf->nr_frags; + j++; + for (k = 0; k < last; k++, j++) { + tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)]; + dma_unmap_page(&bp->pdev->dev, + dma_unmap_addr(tx_buf, mapping), + skb_shinfo(skb)->frags[k].size, + PCI_DMA_TODEVICE); + } + dev_kfree_skb(skb); + } + } +} + +static void +bnx2_free_rx_skbs(struct bnx2 *bp) +{ + int i; + + for (i = 0; i < bp->num_rx_rings; i++) { + struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; + struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; + int j; + + if (rxr->rx_buf_ring == NULL) + return; + + for (j = 0; j < bp->rx_max_ring_idx; j++) { + struct sw_bd *rx_buf = &rxr->rx_buf_ring[j]; + struct sk_buff *skb = rx_buf->skb; + + if (skb == NULL) + continue; + + dma_unmap_single(&bp->pdev->dev, + dma_unmap_addr(rx_buf, mapping), + bp->rx_buf_use_size, + PCI_DMA_FROMDEVICE); + + rx_buf->skb = NULL; + + dev_kfree_skb(skb); + } + for (j = 0; j < bp->rx_max_pg_ring_idx; j++) + bnx2_free_rx_page(bp, rxr, j); + } +} + +static void +bnx2_free_skbs(struct bnx2 *bp) +{ + bnx2_free_tx_skbs(bp); + bnx2_free_rx_skbs(bp); +} + +static int +bnx2_reset_nic(struct bnx2 *bp, u32 reset_code) +{ + int rc; + + rc = bnx2_reset_chip(bp, reset_code); + bnx2_free_skbs(bp); + if (rc) + return rc; + + if ((rc = bnx2_init_chip(bp)) != 0) + return rc; + + bnx2_init_all_rings(bp); + return 0; +} + +static int +bnx2_init_nic(struct bnx2 *bp, int reset_phy) +{ + int rc; + + if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0) + return rc; + + spin_lock_bh(&bp->phy_lock); + bnx2_init_phy(bp, reset_phy); + bnx2_set_link(bp); + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) + bnx2_remote_phy_event(bp); + spin_unlock_bh(&bp->phy_lock); + return 0; +} + +static int +bnx2_shutdown_chip(struct bnx2 *bp) +{ + u32 reset_code; + + if (bp->flags & BNX2_FLAG_NO_WOL) + reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN; + else if (bp->wol) + reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL; + else + reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; + + return bnx2_reset_chip(bp, reset_code); +} + +static int +bnx2_test_registers(struct bnx2 *bp) +{ + int ret; + int i, is_5709; + static const struct { + u16 offset; + u16 flags; +#define BNX2_FL_NOT_5709 1 + u32 rw_mask; + u32 ro_mask; + } reg_tbl[] = { + { 0x006c, 0, 0x00000000, 0x0000003f }, + { 0x0090, 0, 0xffffffff, 0x00000000 }, + { 0x0094, 0, 0x00000000, 0x00000000 }, + + { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 }, + { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff }, + { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff }, + { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff }, + { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 }, + { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 }, + { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff }, + { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff }, + { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff }, + + { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff }, + { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff }, + { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 }, + { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 }, + { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 }, + { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 }, + + { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 }, + { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 }, + { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 }, + + { 0x1000, 0, 0x00000000, 0x00000001 }, + { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 }, + + { 0x1408, 0, 0x01c00800, 0x00000000 }, + { 0x149c, 0, 0x8000ffff, 0x00000000 }, + { 0x14a8, 0, 0x00000000, 0x000001ff }, + { 0x14ac, 0, 0x0fffffff, 0x10000000 }, + { 0x14b0, 0, 0x00000002, 0x00000001 }, + { 0x14b8, 0, 0x00000000, 0x00000000 }, + { 0x14c0, 0, 0x00000000, 0x00000009 }, + { 0x14c4, 0, 0x00003fff, 0x00000000 }, + { 0x14cc, 0, 0x00000000, 0x00000001 }, + { 0x14d0, 0, 0xffffffff, 0x00000000 }, + + { 0x1800, 0, 0x00000000, 0x00000001 }, + { 0x1804, 0, 0x00000000, 0x00000003 }, + + { 0x2800, 0, 0x00000000, 0x00000001 }, + { 0x2804, 0, 0x00000000, 0x00003f01 }, + { 0x2808, 0, 0x0f3f3f03, 0x00000000 }, + { 0x2810, 0, 0xffff0000, 0x00000000 }, + { 0x2814, 0, 0xffff0000, 0x00000000 }, + { 0x2818, 0, 0xffff0000, 0x00000000 }, + { 0x281c, 0, 0xffff0000, 0x00000000 }, + { 0x2834, 0, 0xffffffff, 0x00000000 }, + { 0x2840, 0, 0x00000000, 0xffffffff }, + { 0x2844, 0, 0x00000000, 0xffffffff }, + { 0x2848, 0, 0xffffffff, 0x00000000 }, + { 0x284c, 0, 0xf800f800, 0x07ff07ff }, + + { 0x2c00, 0, 0x00000000, 0x00000011 }, + { 0x2c04, 0, 0x00000000, 0x00030007 }, + + { 0x3c00, 0, 0x00000000, 0x00000001 }, + { 0x3c04, 0, 0x00000000, 0x00070000 }, + { 0x3c08, 0, 0x00007f71, 0x07f00000 }, + { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 }, + { 0x3c10, 0, 0xffffffff, 0x00000000 }, + { 0x3c14, 0, 0x00000000, 0xffffffff }, + { 0x3c18, 0, 0x00000000, 0xffffffff }, + { 0x3c1c, 0, 0xfffff000, 0x00000000 }, + { 0x3c20, 0, 0xffffff00, 0x00000000 }, + + { 0x5004, 0, 0x00000000, 0x0000007f }, + { 0x5008, 0, 0x0f0007ff, 0x00000000 }, + + { 0x5c00, 0, 0x00000000, 0x00000001 }, + { 0x5c04, 0, 0x00000000, 0x0003000f }, + { 0x5c08, 0, 0x00000003, 0x00000000 }, + { 0x5c0c, 0, 0x0000fff8, 0x00000000 }, + { 0x5c10, 0, 0x00000000, 0xffffffff }, + { 0x5c80, 0, 0x00000000, 0x0f7113f1 }, + { 0x5c84, 0, 0x00000000, 0x0000f333 }, + { 0x5c88, 0, 0x00000000, 0x00077373 }, + { 0x5c8c, 0, 0x00000000, 0x0007f737 }, + + { 0x6808, 0, 0x0000ff7f, 0x00000000 }, + { 0x680c, 0, 0xffffffff, 0x00000000 }, + { 0x6810, 0, 0xffffffff, 0x00000000 }, + { 0x6814, 0, 0xffffffff, 0x00000000 }, + { 0x6818, 0, 0xffffffff, 0x00000000 }, + { 0x681c, 0, 0xffffffff, 0x00000000 }, + { 0x6820, 0, 0x00ff00ff, 0x00000000 }, + { 0x6824, 0, 0x00ff00ff, 0x00000000 }, + { 0x6828, 0, 0x00ff00ff, 0x00000000 }, + { 0x682c, 0, 0x03ff03ff, 0x00000000 }, + { 0x6830, 0, 0x03ff03ff, 0x00000000 }, + { 0x6834, 0, 0x03ff03ff, 0x00000000 }, + { 0x6838, 0, 0x03ff03ff, 0x00000000 }, + { 0x683c, 0, 0x0000ffff, 0x00000000 }, + { 0x6840, 0, 0x00000ff0, 0x00000000 }, + { 0x6844, 0, 0x00ffff00, 0x00000000 }, + { 0x684c, 0, 0xffffffff, 0x00000000 }, + { 0x6850, 0, 0x7f7f7f7f, 0x00000000 }, + { 0x6854, 0, 0x7f7f7f7f, 0x00000000 }, + { 0x6858, 0, 0x7f7f7f7f, 0x00000000 }, + { 0x685c, 0, 0x7f7f7f7f, 0x00000000 }, + { 0x6908, 0, 0x00000000, 0x0001ff0f }, + { 0x690c, 0, 0x00000000, 0x0ffe00f0 }, + + { 0xffff, 0, 0x00000000, 0x00000000 }, + }; + + ret = 0; + is_5709 = 0; + if (CHIP_NUM(bp) == CHIP_NUM_5709) + is_5709 = 1; + + for (i = 0; reg_tbl[i].offset != 0xffff; i++) { + u32 offset, rw_mask, ro_mask, save_val, val; + u16 flags = reg_tbl[i].flags; + + if (is_5709 && (flags & BNX2_FL_NOT_5709)) + continue; + + offset = (u32) reg_tbl[i].offset; + rw_mask = reg_tbl[i].rw_mask; + ro_mask = reg_tbl[i].ro_mask; + + save_val = readl(bp->regview + offset); + + writel(0, bp->regview + offset); + + val = readl(bp->regview + offset); + if ((val & rw_mask) != 0) { + goto reg_test_err; + } + + if ((val & ro_mask) != (save_val & ro_mask)) { + goto reg_test_err; + } + + writel(0xffffffff, bp->regview + offset); + + val = readl(bp->regview + offset); + if ((val & rw_mask) != rw_mask) { + goto reg_test_err; + } + + if ((val & ro_mask) != (save_val & ro_mask)) { + goto reg_test_err; + } + + writel(save_val, bp->regview + offset); + continue; + +reg_test_err: + writel(save_val, bp->regview + offset); + ret = -ENODEV; + break; + } + return ret; +} + +static int +bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size) +{ + static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555, + 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa }; + int i; + + for (i = 0; i < sizeof(test_pattern) / 4; i++) { + u32 offset; + + for (offset = 0; offset < size; offset += 4) { + + bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]); + + if (bnx2_reg_rd_ind(bp, start + offset) != + test_pattern[i]) { + return -ENODEV; + } + } + } + return 0; +} + +static int +bnx2_test_memory(struct bnx2 *bp) +{ + int ret = 0; + int i; + static struct mem_entry { + u32 offset; + u32 len; + } mem_tbl_5706[] = { + { 0x60000, 0x4000 }, + { 0xa0000, 0x3000 }, + { 0xe0000, 0x4000 }, + { 0x120000, 0x4000 }, + { 0x1a0000, 0x4000 }, + { 0x160000, 0x4000 }, + { 0xffffffff, 0 }, + }, + mem_tbl_5709[] = { + { 0x60000, 0x4000 }, + { 0xa0000, 0x3000 }, + { 0xe0000, 0x4000 }, + { 0x120000, 0x4000 }, + { 0x1a0000, 0x4000 }, + { 0xffffffff, 0 }, + }; + struct mem_entry *mem_tbl; + + if (CHIP_NUM(bp) == CHIP_NUM_5709) + mem_tbl = mem_tbl_5709; + else + mem_tbl = mem_tbl_5706; + + for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { + if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset, + mem_tbl[i].len)) != 0) { + return ret; + } + } + + return ret; +} + +#define BNX2_MAC_LOOPBACK 0 +#define BNX2_PHY_LOOPBACK 1 + +static int +bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) +{ + unsigned int pkt_size, num_pkts, i; + struct sk_buff *skb, *rx_skb; + unsigned char *packet; + u16 rx_start_idx, rx_idx; + dma_addr_t map; + struct tx_bd *txbd; + struct sw_bd *rx_buf; + struct l2_fhdr *rx_hdr; + int ret = -ENODEV; + struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi; + struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; + struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; + + tx_napi = bnapi; + + txr = &tx_napi->tx_ring; + rxr = &bnapi->rx_ring; + if (loopback_mode == BNX2_MAC_LOOPBACK) { + bp->loopback = MAC_LOOPBACK; + bnx2_set_mac_loopback(bp); + } + else if (loopback_mode == BNX2_PHY_LOOPBACK) { + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) + return 0; + + bp->loopback = PHY_LOOPBACK; + bnx2_set_phy_loopback(bp); + } + else + return -EINVAL; + + pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4); + skb = netdev_alloc_skb(bp->dev, pkt_size); + if (!skb) + return -ENOMEM; + packet = skb_put(skb, pkt_size); + memcpy(packet, bp->dev->dev_addr, 6); + memset(packet + 6, 0x0, 8); + for (i = 14; i < pkt_size; i++) + packet[i] = (unsigned char) (i & 0xff); + + map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size, + PCI_DMA_TODEVICE); + if (dma_mapping_error(&bp->pdev->dev, map)) { + dev_kfree_skb(skb); + return -EIO; + } + + REG_WR(bp, BNX2_HC_COMMAND, + bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); + + REG_RD(bp, BNX2_HC_COMMAND); + + udelay(5); + rx_start_idx = bnx2_get_hw_rx_cons(bnapi); + + num_pkts = 0; + + txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)]; + + txbd->tx_bd_haddr_hi = (u64) map >> 32; + txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff; + txbd->tx_bd_mss_nbytes = pkt_size; + txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END; + + num_pkts++; + txr->tx_prod = NEXT_TX_BD(txr->tx_prod); + txr->tx_prod_bseq += pkt_size; + + REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod); + REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq); + + udelay(100); + + REG_WR(bp, BNX2_HC_COMMAND, + bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); + + REG_RD(bp, BNX2_HC_COMMAND); + + udelay(5); + + dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE); + dev_kfree_skb(skb); + + if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod) + goto loopback_test_done; + + rx_idx = bnx2_get_hw_rx_cons(bnapi); + if (rx_idx != rx_start_idx + num_pkts) { + goto loopback_test_done; + } + + rx_buf = &rxr->rx_buf_ring[rx_start_idx]; + rx_skb = rx_buf->skb; + + rx_hdr = rx_buf->desc; + skb_reserve(rx_skb, BNX2_RX_OFFSET); + + dma_sync_single_for_cpu(&bp->pdev->dev, + dma_unmap_addr(rx_buf, mapping), + bp->rx_buf_size, PCI_DMA_FROMDEVICE); + + if (rx_hdr->l2_fhdr_status & + (L2_FHDR_ERRORS_BAD_CRC | + L2_FHDR_ERRORS_PHY_DECODE | + L2_FHDR_ERRORS_ALIGNMENT | + L2_FHDR_ERRORS_TOO_SHORT | + L2_FHDR_ERRORS_GIANT_FRAME)) { + + goto loopback_test_done; + } + + if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) { + goto loopback_test_done; + } + + for (i = 14; i < pkt_size; i++) { + if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) { + goto loopback_test_done; + } + } + + ret = 0; + +loopback_test_done: + bp->loopback = 0; + return ret; +} + +#define BNX2_MAC_LOOPBACK_FAILED 1 +#define BNX2_PHY_LOOPBACK_FAILED 2 +#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \ + BNX2_PHY_LOOPBACK_FAILED) + +static int +bnx2_test_loopback(struct bnx2 *bp) +{ + int rc = 0; + + if (!netif_running(bp->dev)) + return BNX2_LOOPBACK_FAILED; + + bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET); + spin_lock_bh(&bp->phy_lock); + bnx2_init_phy(bp, 1); + spin_unlock_bh(&bp->phy_lock); + if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK)) + rc |= BNX2_MAC_LOOPBACK_FAILED; + if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK)) + rc |= BNX2_PHY_LOOPBACK_FAILED; + return rc; +} + +#define NVRAM_SIZE 0x200 +#define CRC32_RESIDUAL 0xdebb20e3 + +static int +bnx2_test_nvram(struct bnx2 *bp) +{ + __be32 buf[NVRAM_SIZE / 4]; + u8 *data = (u8 *) buf; + int rc = 0; + u32 magic, csum; + + if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0) + goto test_nvram_done; + + magic = be32_to_cpu(buf[0]); + if (magic != 0x669955aa) { + rc = -ENODEV; + goto test_nvram_done; + } + + if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0) + goto test_nvram_done; + + csum = ether_crc_le(0x100, data); + if (csum != CRC32_RESIDUAL) { + rc = -ENODEV; + goto test_nvram_done; + } + + csum = ether_crc_le(0x100, data + 0x100); + if (csum != CRC32_RESIDUAL) { + rc = -ENODEV; + } + +test_nvram_done: + return rc; +} + +static int +bnx2_test_link(struct bnx2 *bp) +{ + u32 bmsr; + + if (!netif_running(bp->dev)) + return -ENODEV; + + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) { + if (bp->link_up) + return 0; + return -ENODEV; + } + spin_lock_bh(&bp->phy_lock); + bnx2_enable_bmsr1(bp); + bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); + bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); + bnx2_disable_bmsr1(bp); + spin_unlock_bh(&bp->phy_lock); + + if (bmsr & BMSR_LSTATUS) { + return 0; + } + return -ENODEV; +} + +static int +bnx2_test_intr(struct bnx2 *bp) +{ + int i; + u16 status_idx; + + if (!netif_running(bp->dev)) + return -ENODEV; + + status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff; + + /* This register is not touched during run-time. */ + REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW); + REG_RD(bp, BNX2_HC_COMMAND); + + for (i = 0; i < 10; i++) { + if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) != + status_idx) { + + break; + } + + msleep_interruptible(10); + } + if (i < 10) + return 0; + + return -ENODEV; +} + +/* Determining link for parallel detection. */ +static int +bnx2_5706_serdes_has_link(struct bnx2 *bp) +{ + u32 mode_ctl, an_dbg, exp; + + if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL) + return 0; + + bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL); + bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl); + + if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET)) + return 0; + + bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG); + bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg); + bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg); + + if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID)) + return 0; + + bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1); + bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp); + bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp); + + if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */ + return 0; + + return 1; +} + +static void +bnx2_5706_serdes_timer(struct bnx2 *bp) +{ + int check_link = 1; + + spin_lock(&bp->phy_lock); + if (bp->serdes_an_pending) { + bp->serdes_an_pending--; + check_link = 0; + } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) { + u32 bmcr; + + bp->current_interval = BNX2_TIMER_INTERVAL; + + bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + + if (bmcr & BMCR_ANENABLE) { + if (bnx2_5706_serdes_has_link(bp)) { + bmcr &= ~BMCR_ANENABLE; + bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; + bnx2_write_phy(bp, bp->mii_bmcr, bmcr); + bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT; + } + } + } + else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) && + (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) { + u32 phy2; + + bnx2_write_phy(bp, 0x17, 0x0f01); + bnx2_read_phy(bp, 0x15, &phy2); + if (phy2 & 0x20) { + u32 bmcr; + + bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + bmcr |= BMCR_ANENABLE; + bnx2_write_phy(bp, bp->mii_bmcr, bmcr); + + bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT; + } + } else + bp->current_interval = BNX2_TIMER_INTERVAL; + + if (check_link) { + u32 val; + + bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG); + bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val); + bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val); + + if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) { + if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) { + bnx2_5706s_force_link_dn(bp, 1); + bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN; + } else + bnx2_set_link(bp); + } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC)) + bnx2_set_link(bp); + } + spin_unlock(&bp->phy_lock); +} + +static void +bnx2_5708_serdes_timer(struct bnx2 *bp) +{ + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) + return; + + if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) { + bp->serdes_an_pending = 0; + return; + } + + spin_lock(&bp->phy_lock); + if (bp->serdes_an_pending) + bp->serdes_an_pending--; + else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) { + u32 bmcr; + + bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + if (bmcr & BMCR_ANENABLE) { + bnx2_enable_forced_2g5(bp); + bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT; + } else { + bnx2_disable_forced_2g5(bp); + bp->serdes_an_pending = 2; + bp->current_interval = BNX2_TIMER_INTERVAL; + } + + } else + bp->current_interval = BNX2_TIMER_INTERVAL; + + spin_unlock(&bp->phy_lock); +} + +static void +bnx2_timer(unsigned long data) +{ + struct bnx2 *bp = (struct bnx2 *) data; + + if (!netif_running(bp->dev)) + return; + + if (atomic_read(&bp->intr_sem) != 0) + goto bnx2_restart_timer; + + if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) == + BNX2_FLAG_USING_MSI) + bnx2_chk_missed_msi(bp); + + bnx2_send_heart_beat(bp); + + bp->stats_blk->stat_FwRxDrop = + bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT); + + /* workaround occasional corrupted counters */ + if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks) + REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | + BNX2_HC_COMMAND_STATS_NOW); + + if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { + if (CHIP_NUM(bp) == CHIP_NUM_5706) + bnx2_5706_serdes_timer(bp); + else + bnx2_5708_serdes_timer(bp); + } + +bnx2_restart_timer: + mod_timer(&bp->timer, jiffies + bp->current_interval); +} + +static int +bnx2_request_irq(struct bnx2 *bp) +{ + unsigned long flags; + struct bnx2_irq *irq; + int rc = 0, i; + + if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX) + flags = 0; + else + flags = IRQF_SHARED; + + for (i = 0; i < bp->irq_nvecs; i++) { + irq = &bp->irq_tbl[i]; + rc = request_irq(irq->vector, irq->handler, flags, irq->name, + &bp->bnx2_napi[i]); + if (rc) + break; + irq->requested = 1; + } + return rc; +} + +static void +__bnx2_free_irq(struct bnx2 *bp) +{ + struct bnx2_irq *irq; + int i; + + for (i = 0; i < bp->irq_nvecs; i++) { + irq = &bp->irq_tbl[i]; + if (irq->requested) + free_irq(irq->vector, &bp->bnx2_napi[i]); + irq->requested = 0; + } +} + +static void +bnx2_free_irq(struct bnx2 *bp) +{ + + __bnx2_free_irq(bp); + if (bp->flags & BNX2_FLAG_USING_MSI) + pci_disable_msi(bp->pdev); + else if (bp->flags & BNX2_FLAG_USING_MSIX) + pci_disable_msix(bp->pdev); + + bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI); +} + +static void +bnx2_enable_msix(struct bnx2 *bp, int msix_vecs) +{ + int i, total_vecs, rc; + struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC]; + struct net_device *dev = bp->dev; + const int len = sizeof(bp->irq_tbl[0].name); + + bnx2_setup_msix_tbl(bp); + REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1); + REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE); + REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE); + + /* Need to flush the previous three writes to ensure MSI-X + * is setup properly */ + REG_RD(bp, BNX2_PCI_MSIX_CONTROL); + + for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) { + msix_ent[i].entry = i; + msix_ent[i].vector = 0; + } + + total_vecs = msix_vecs; +#ifdef BCM_CNIC + total_vecs++; +#endif + rc = -ENOSPC; + while (total_vecs >= BNX2_MIN_MSIX_VEC) { + rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs); + if (rc <= 0) + break; + if (rc > 0) + total_vecs = rc; + } + + if (rc != 0) + return; + + msix_vecs = total_vecs; +#ifdef BCM_CNIC + msix_vecs--; +#endif + bp->irq_nvecs = msix_vecs; + bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI; + for (i = 0; i < total_vecs; i++) { + bp->irq_tbl[i].vector = msix_ent[i].vector; + snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i); + bp->irq_tbl[i].handler = bnx2_msi_1shot; + } +} + +static int +bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi) +{ + int cpus = num_online_cpus(); + int msix_vecs = min(cpus + 1, RX_MAX_RINGS); + + bp->irq_tbl[0].handler = bnx2_interrupt; + strcpy(bp->irq_tbl[0].name, bp->dev->name); + bp->irq_nvecs = 1; + bp->irq_tbl[0].vector = bp->pdev->irq; + + if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi) + bnx2_enable_msix(bp, msix_vecs); + + if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi && + !(bp->flags & BNX2_FLAG_USING_MSIX)) { + if (pci_enable_msi(bp->pdev) == 0) { + bp->flags |= BNX2_FLAG_USING_MSI; + if (CHIP_NUM(bp) == CHIP_NUM_5709) { + bp->flags |= BNX2_FLAG_ONE_SHOT_MSI; + bp->irq_tbl[0].handler = bnx2_msi_1shot; + } else + bp->irq_tbl[0].handler = bnx2_msi; + + bp->irq_tbl[0].vector = bp->pdev->irq; + } + } + + bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs); + netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings); + + bp->num_rx_rings = bp->irq_nvecs; + return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings); +} + +/* Called with rtnl_lock */ +static int +bnx2_open(struct net_device *dev) +{ + struct bnx2 *bp = netdev_priv(dev); + int rc; + + netif_carrier_off(dev); + + bnx2_set_power_state(bp, PCI_D0); + bnx2_disable_int(bp); + + rc = bnx2_setup_int_mode(bp, disable_msi); + if (rc) + goto open_err; + bnx2_init_napi(bp); + bnx2_napi_enable(bp); + rc = bnx2_alloc_mem(bp); + if (rc) + goto open_err; + + rc = bnx2_request_irq(bp); + if (rc) + goto open_err; + + rc = bnx2_init_nic(bp, 1); + if (rc) + goto open_err; + + mod_timer(&bp->timer, jiffies + bp->current_interval); + + atomic_set(&bp->intr_sem, 0); + + memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block)); + + bnx2_enable_int(bp); + + if (bp->flags & BNX2_FLAG_USING_MSI) { + /* Test MSI to make sure it is working + * If MSI test fails, go back to INTx mode + */ + if (bnx2_test_intr(bp) != 0) { + netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n"); + + bnx2_disable_int(bp); + bnx2_free_irq(bp); + + bnx2_setup_int_mode(bp, 1); + + rc = bnx2_init_nic(bp, 0); + + if (!rc) + rc = bnx2_request_irq(bp); + + if (rc) { + del_timer_sync(&bp->timer); + goto open_err; + } + bnx2_enable_int(bp); + } + } + if (bp->flags & BNX2_FLAG_USING_MSI) + netdev_info(dev, "using MSI\n"); + else if (bp->flags & BNX2_FLAG_USING_MSIX) + netdev_info(dev, "using MSIX\n"); + + netif_tx_start_all_queues(dev); + + return 0; + +open_err: + bnx2_napi_disable(bp); + bnx2_free_skbs(bp); + bnx2_free_irq(bp); + bnx2_free_mem(bp); + bnx2_del_napi(bp); + return rc; +} + +static void +bnx2_reset_task(struct work_struct *work) +{ + struct bnx2 *bp = container_of(work, struct bnx2, reset_task); + int rc; + + rtnl_lock(); + if (!netif_running(bp->dev)) { + rtnl_unlock(); + return; + } + + bnx2_netif_stop(bp, true); + + rc = bnx2_init_nic(bp, 1); + if (rc) { + netdev_err(bp->dev, "failed to reset NIC, closing\n"); + bnx2_napi_enable(bp); + dev_close(bp->dev); + rtnl_unlock(); + return; + } + + atomic_set(&bp->intr_sem, 1); + bnx2_netif_start(bp, true); + rtnl_unlock(); +} + +static void +bnx2_dump_state(struct bnx2 *bp) +{ + struct net_device *dev = bp->dev; + u32 val1, val2; + + pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1); + netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n", + atomic_read(&bp->intr_sem), val1); + pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1); + pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2); + netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2); + netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n", + REG_RD(bp, BNX2_EMAC_TX_STATUS), + REG_RD(bp, BNX2_EMAC_RX_STATUS)); + netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n", + REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL)); + netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n", + REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS)); + if (bp->flags & BNX2_FLAG_USING_MSIX) + netdev_err(dev, "DEBUG: PBA[%08x]\n", + REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE)); +} + +static void +bnx2_tx_timeout(struct net_device *dev) +{ + struct bnx2 *bp = netdev_priv(dev); + + bnx2_dump_state(bp); + bnx2_dump_mcp_state(bp); + + /* This allows the netif to be shutdown gracefully before resetting */ + schedule_work(&bp->reset_task); +} + +/* Called with netif_tx_lock. + * bnx2_tx_int() runs without netif_tx_lock unless it needs to call + * netif_wake_queue(). + */ +static netdev_tx_t +bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct bnx2 *bp = netdev_priv(dev); + dma_addr_t mapping; + struct tx_bd *txbd; + struct sw_tx_bd *tx_buf; + u32 len, vlan_tag_flags, last_frag, mss; + u16 prod, ring_prod; + int i; + struct bnx2_napi *bnapi; + struct bnx2_tx_ring_info *txr; + struct netdev_queue *txq; + + /* Determine which tx ring we will be placed on */ + i = skb_get_queue_mapping(skb); + bnapi = &bp->bnx2_napi[i]; + txr = &bnapi->tx_ring; + txq = netdev_get_tx_queue(dev, i); + + if (unlikely(bnx2_tx_avail(bp, txr) < + (skb_shinfo(skb)->nr_frags + 1))) { + netif_tx_stop_queue(txq); + netdev_err(dev, "BUG! Tx ring full when queue awake!\n"); + + return NETDEV_TX_BUSY; + } + len = skb_headlen(skb); + prod = txr->tx_prod; + ring_prod = TX_RING_IDX(prod); + + vlan_tag_flags = 0; + if (skb->ip_summed == CHECKSUM_PARTIAL) { + vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; + } + + if (vlan_tx_tag_present(skb)) { + vlan_tag_flags |= + (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16)); + } + + if ((mss = skb_shinfo(skb)->gso_size)) { + u32 tcp_opt_len; + struct iphdr *iph; + + vlan_tag_flags |= TX_BD_FLAGS_SW_LSO; + + tcp_opt_len = tcp_optlen(skb); + + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { + u32 tcp_off = skb_transport_offset(skb) - + sizeof(struct ipv6hdr) - ETH_HLEN; + + vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) | + TX_BD_FLAGS_SW_FLAGS; + if (likely(tcp_off == 0)) + vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK; + else { + tcp_off >>= 3; + vlan_tag_flags |= ((tcp_off & 0x3) << + TX_BD_FLAGS_TCP6_OFF0_SHL) | + ((tcp_off & 0x10) << + TX_BD_FLAGS_TCP6_OFF4_SHL); + mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL; + } + } else { + iph = ip_hdr(skb); + if (tcp_opt_len || (iph->ihl > 5)) { + vlan_tag_flags |= ((iph->ihl - 5) + + (tcp_opt_len >> 2)) << 8; + } + } + } else + mss = 0; + + mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); + if (dma_mapping_error(&bp->pdev->dev, mapping)) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + tx_buf = &txr->tx_buf_ring[ring_prod]; + tx_buf->skb = skb; + dma_unmap_addr_set(tx_buf, mapping, mapping); + + txbd = &txr->tx_desc_ring[ring_prod]; + + txbd->tx_bd_haddr_hi = (u64) mapping >> 32; + txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff; + txbd->tx_bd_mss_nbytes = len | (mss << 16); + txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START; + + last_frag = skb_shinfo(skb)->nr_frags; + tx_buf->nr_frags = last_frag; + tx_buf->is_gso = skb_is_gso(skb); + + for (i = 0; i < last_frag; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + prod = NEXT_TX_BD(prod); + ring_prod = TX_RING_IDX(prod); + txbd = &txr->tx_desc_ring[ring_prod]; + + len = frag->size; + mapping = dma_map_page(&bp->pdev->dev, frag->page, frag->page_offset, + len, PCI_DMA_TODEVICE); + if (dma_mapping_error(&bp->pdev->dev, mapping)) + goto dma_error; + dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping, + mapping); + + txbd->tx_bd_haddr_hi = (u64) mapping >> 32; + txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff; + txbd->tx_bd_mss_nbytes = len | (mss << 16); + txbd->tx_bd_vlan_tag_flags = vlan_tag_flags; + + } + txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END; + + prod = NEXT_TX_BD(prod); + txr->tx_prod_bseq += skb->len; + + REG_WR16(bp, txr->tx_bidx_addr, prod); + REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq); + + mmiowb(); + + txr->tx_prod = prod; + + if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) { + netif_tx_stop_queue(txq); + + /* netif_tx_stop_queue() must be done before checking + * tx index in bnx2_tx_avail() below, because in + * bnx2_tx_int(), we update tx index before checking for + * netif_tx_queue_stopped(). + */ + smp_mb(); + if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh) + netif_tx_wake_queue(txq); + } + + return NETDEV_TX_OK; +dma_error: + /* save value of frag that failed */ + last_frag = i; + + /* start back at beginning and unmap skb */ + prod = txr->tx_prod; + ring_prod = TX_RING_IDX(prod); + tx_buf = &txr->tx_buf_ring[ring_prod]; + tx_buf->skb = NULL; + dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), + skb_headlen(skb), PCI_DMA_TODEVICE); + + /* unmap remaining mapped pages */ + for (i = 0; i < last_frag; i++) { + prod = NEXT_TX_BD(prod); + ring_prod = TX_RING_IDX(prod); + tx_buf = &txr->tx_buf_ring[ring_prod]; + dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), + skb_shinfo(skb)->frags[i].size, + PCI_DMA_TODEVICE); + } + + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +/* Called with rtnl_lock */ +static int +bnx2_close(struct net_device *dev) +{ + struct bnx2 *bp = netdev_priv(dev); + + bnx2_disable_int_sync(bp); + bnx2_napi_disable(bp); + del_timer_sync(&bp->timer); + bnx2_shutdown_chip(bp); + bnx2_free_irq(bp); + bnx2_free_skbs(bp); + bnx2_free_mem(bp); + bnx2_del_napi(bp); + bp->link_up = 0; + netif_carrier_off(bp->dev); + bnx2_set_power_state(bp, PCI_D3hot); + return 0; +} + +static void +bnx2_save_stats(struct bnx2 *bp) +{ + u32 *hw_stats = (u32 *) bp->stats_blk; + u32 *temp_stats = (u32 *) bp->temp_stats_blk; + int i; + + /* The 1st 10 counters are 64-bit counters */ + for (i = 0; i < 20; i += 2) { + u32 hi; + u64 lo; + + hi = temp_stats[i] + hw_stats[i]; + lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1]; + if (lo > 0xffffffff) + hi++; + temp_stats[i] = hi; + temp_stats[i + 1] = lo & 0xffffffff; + } + + for ( ; i < sizeof(struct statistics_block) / 4; i++) + temp_stats[i] += hw_stats[i]; +} + +#define GET_64BIT_NET_STATS64(ctr) \ + (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo)) + +#define GET_64BIT_NET_STATS(ctr) \ + GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \ + GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr) + +#define GET_32BIT_NET_STATS(ctr) \ + (unsigned long) (bp->stats_blk->ctr + \ + bp->temp_stats_blk->ctr) + +static struct rtnl_link_stats64 * +bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) +{ + struct bnx2 *bp = netdev_priv(dev); + + if (bp->stats_blk == NULL) + return net_stats; + + net_stats->rx_packets = + GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) + + GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) + + GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts); + + net_stats->tx_packets = + GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) + + GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) + + GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts); + + net_stats->rx_bytes = + GET_64BIT_NET_STATS(stat_IfHCInOctets); + + net_stats->tx_bytes = + GET_64BIT_NET_STATS(stat_IfHCOutOctets); + + net_stats->multicast = + GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts); + + net_stats->collisions = + GET_32BIT_NET_STATS(stat_EtherStatsCollisions); + + net_stats->rx_length_errors = + GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) + + GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts); + + net_stats->rx_over_errors = + GET_32BIT_NET_STATS(stat_IfInFTQDiscards) + + GET_32BIT_NET_STATS(stat_IfInMBUFDiscards); + + net_stats->rx_frame_errors = + GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors); + + net_stats->rx_crc_errors = + GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors); + + net_stats->rx_errors = net_stats->rx_length_errors + + net_stats->rx_over_errors + net_stats->rx_frame_errors + + net_stats->rx_crc_errors; + + net_stats->tx_aborted_errors = + GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) + + GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions); + + if ((CHIP_NUM(bp) == CHIP_NUM_5706) || + (CHIP_ID(bp) == CHIP_ID_5708_A0)) + net_stats->tx_carrier_errors = 0; + else { + net_stats->tx_carrier_errors = + GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors); + } + + net_stats->tx_errors = + GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) + + net_stats->tx_aborted_errors + + net_stats->tx_carrier_errors; + + net_stats->rx_missed_errors = + GET_32BIT_NET_STATS(stat_IfInFTQDiscards) + + GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) + + GET_32BIT_NET_STATS(stat_FwRxDrop); + + return net_stats; +} + +/* All ethtool functions called with rtnl_lock */ + +static int +bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct bnx2 *bp = netdev_priv(dev); + int support_serdes = 0, support_copper = 0; + + cmd->supported = SUPPORTED_Autoneg; + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) { + support_serdes = 1; + support_copper = 1; + } else if (bp->phy_port == PORT_FIBRE) + support_serdes = 1; + else + support_copper = 1; + + if (support_serdes) { + cmd->supported |= SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE; + if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) + cmd->supported |= SUPPORTED_2500baseX_Full; + + } + if (support_copper) { + cmd->supported |= SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_TP; + + } + + spin_lock_bh(&bp->phy_lock); + cmd->port = bp->phy_port; + cmd->advertising = bp->advertising; + + if (bp->autoneg & AUTONEG_SPEED) { + cmd->autoneg = AUTONEG_ENABLE; + } else { + cmd->autoneg = AUTONEG_DISABLE; + } + + if (netif_carrier_ok(dev)) { + ethtool_cmd_speed_set(cmd, bp->line_speed); + cmd->duplex = bp->duplex; + } + else { + ethtool_cmd_speed_set(cmd, -1); + cmd->duplex = -1; + } + spin_unlock_bh(&bp->phy_lock); + + cmd->transceiver = XCVR_INTERNAL; + cmd->phy_address = bp->phy_addr; + + return 0; +} + +static int +bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct bnx2 *bp = netdev_priv(dev); + u8 autoneg = bp->autoneg; + u8 req_duplex = bp->req_duplex; + u16 req_line_speed = bp->req_line_speed; + u32 advertising = bp->advertising; + int err = -EINVAL; + + spin_lock_bh(&bp->phy_lock); + + if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE) + goto err_out_unlock; + + if (cmd->port != bp->phy_port && + !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)) + goto err_out_unlock; + + /* If device is down, we can store the settings only if the user + * is setting the currently active port. + */ + if (!netif_running(dev) && cmd->port != bp->phy_port) + goto err_out_unlock; + + if (cmd->autoneg == AUTONEG_ENABLE) { + autoneg |= AUTONEG_SPEED; + + advertising = cmd->advertising; + if (cmd->port == PORT_TP) { + advertising &= ETHTOOL_ALL_COPPER_SPEED; + if (!advertising) + advertising = ETHTOOL_ALL_COPPER_SPEED; + } else { + advertising &= ETHTOOL_ALL_FIBRE_SPEED; + if (!advertising) + advertising = ETHTOOL_ALL_FIBRE_SPEED; + } + advertising |= ADVERTISED_Autoneg; + } + else { + u32 speed = ethtool_cmd_speed(cmd); + if (cmd->port == PORT_FIBRE) { + if ((speed != SPEED_1000 && + speed != SPEED_2500) || + (cmd->duplex != DUPLEX_FULL)) + goto err_out_unlock; + + if (speed == SPEED_2500 && + !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) + goto err_out_unlock; + } else if (speed == SPEED_1000 || speed == SPEED_2500) + goto err_out_unlock; + + autoneg &= ~AUTONEG_SPEED; + req_line_speed = speed; + req_duplex = cmd->duplex; + advertising = 0; + } + + bp->autoneg = autoneg; + bp->advertising = advertising; + bp->req_line_speed = req_line_speed; + bp->req_duplex = req_duplex; + + err = 0; + /* If device is down, the new settings will be picked up when it is + * brought up. + */ + if (netif_running(dev)) + err = bnx2_setup_phy(bp, cmd->port); + +err_out_unlock: + spin_unlock_bh(&bp->phy_lock); + + return err; +} + +static void +bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct bnx2 *bp = netdev_priv(dev); + + strcpy(info->driver, DRV_MODULE_NAME); + strcpy(info->version, DRV_MODULE_VERSION); + strcpy(info->bus_info, pci_name(bp->pdev)); + strcpy(info->fw_version, bp->fw_version); +} + +#define BNX2_REGDUMP_LEN (32 * 1024) + +static int +bnx2_get_regs_len(struct net_device *dev) +{ + return BNX2_REGDUMP_LEN; +} + +static void +bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) +{ + u32 *p = _p, i, offset; + u8 *orig_p = _p; + struct bnx2 *bp = netdev_priv(dev); + static const u32 reg_boundaries[] = { + 0x0000, 0x0098, 0x0400, 0x045c, + 0x0800, 0x0880, 0x0c00, 0x0c10, + 0x0c30, 0x0d08, 0x1000, 0x101c, + 0x1040, 0x1048, 0x1080, 0x10a4, + 0x1400, 0x1490, 0x1498, 0x14f0, + 0x1500, 0x155c, 0x1580, 0x15dc, + 0x1600, 0x1658, 0x1680, 0x16d8, + 0x1800, 0x1820, 0x1840, 0x1854, + 0x1880, 0x1894, 0x1900, 0x1984, + 0x1c00, 0x1c0c, 0x1c40, 0x1c54, + 0x1c80, 0x1c94, 0x1d00, 0x1d84, + 0x2000, 0x2030, 0x23c0, 0x2400, + 0x2800, 0x2820, 0x2830, 0x2850, + 0x2b40, 0x2c10, 0x2fc0, 0x3058, + 0x3c00, 0x3c94, 0x4000, 0x4010, + 0x4080, 0x4090, 0x43c0, 0x4458, + 0x4c00, 0x4c18, 0x4c40, 0x4c54, + 0x4fc0, 0x5010, 0x53c0, 0x5444, + 0x5c00, 0x5c18, 0x5c80, 0x5c90, + 0x5fc0, 0x6000, 0x6400, 0x6428, + 0x6800, 0x6848, 0x684c, 0x6860, + 0x6888, 0x6910, 0x8000 + }; + + regs->version = 0; + + memset(p, 0, BNX2_REGDUMP_LEN); + + if (!netif_running(bp->dev)) + return; + + i = 0; + offset = reg_boundaries[0]; + p += offset; + while (offset < BNX2_REGDUMP_LEN) { + *p++ = REG_RD(bp, offset); + offset += 4; + if (offset == reg_boundaries[i + 1]) { + offset = reg_boundaries[i + 2]; + p = (u32 *) (orig_p + offset); + i += 2; + } + } +} + +static void +bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bnx2 *bp = netdev_priv(dev); + + if (bp->flags & BNX2_FLAG_NO_WOL) { + wol->supported = 0; + wol->wolopts = 0; + } + else { + wol->supported = WAKE_MAGIC; + if (bp->wol) + wol->wolopts = WAKE_MAGIC; + else + wol->wolopts = 0; + } + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + +static int +bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bnx2 *bp = netdev_priv(dev); + + if (wol->wolopts & ~WAKE_MAGIC) + return -EINVAL; + + if (wol->wolopts & WAKE_MAGIC) { + if (bp->flags & BNX2_FLAG_NO_WOL) + return -EINVAL; + + bp->wol = 1; + } + else { + bp->wol = 0; + } + return 0; +} + +static int +bnx2_nway_reset(struct net_device *dev) +{ + struct bnx2 *bp = netdev_priv(dev); + u32 bmcr; + + if (!netif_running(dev)) + return -EAGAIN; + + if (!(bp->autoneg & AUTONEG_SPEED)) { + return -EINVAL; + } + + spin_lock_bh(&bp->phy_lock); + + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) { + int rc; + + rc = bnx2_setup_remote_phy(bp, bp->phy_port); + spin_unlock_bh(&bp->phy_lock); + return rc; + } + + /* Force a link down visible on the other side */ + if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { + bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK); + spin_unlock_bh(&bp->phy_lock); + + msleep(20); + + spin_lock_bh(&bp->phy_lock); + + bp->current_interval = BNX2_SERDES_AN_TIMEOUT; + bp->serdes_an_pending = 1; + mod_timer(&bp->timer, jiffies + bp->current_interval); + } + + bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + bmcr &= ~BMCR_LOOPBACK; + bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE); + + spin_unlock_bh(&bp->phy_lock); + + return 0; +} + +static u32 +bnx2_get_link(struct net_device *dev) +{ + struct bnx2 *bp = netdev_priv(dev); + + return bp->link_up; +} + +static int +bnx2_get_eeprom_len(struct net_device *dev) +{ + struct bnx2 *bp = netdev_priv(dev); + + if (bp->flash_info == NULL) + return 0; + + return (int) bp->flash_size; +} + +static int +bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *eebuf) +{ + struct bnx2 *bp = netdev_priv(dev); + int rc; + + if (!netif_running(dev)) + return -EAGAIN; + + /* parameters already validated in ethtool_get_eeprom */ + + rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len); + + return rc; +} + +static int +bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *eebuf) +{ + struct bnx2 *bp = netdev_priv(dev); + int rc; + + if (!netif_running(dev)) + return -EAGAIN; + + /* parameters already validated in ethtool_set_eeprom */ + + rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len); + + return rc; +} + +static int +bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) +{ + struct bnx2 *bp = netdev_priv(dev); + + memset(coal, 0, sizeof(struct ethtool_coalesce)); + + coal->rx_coalesce_usecs = bp->rx_ticks; + coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip; + coal->rx_coalesce_usecs_irq = bp->rx_ticks_int; + coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int; + + coal->tx_coalesce_usecs = bp->tx_ticks; + coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip; + coal->tx_coalesce_usecs_irq = bp->tx_ticks_int; + coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int; + + coal->stats_block_coalesce_usecs = bp->stats_ticks; + + return 0; +} + +static int +bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) +{ + struct bnx2 *bp = netdev_priv(dev); + + bp->rx_ticks = (u16) coal->rx_coalesce_usecs; + if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff; + + bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames; + if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff; + + bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq; + if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff; + + bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq; + if (bp->rx_quick_cons_trip_int > 0xff) + bp->rx_quick_cons_trip_int = 0xff; + + bp->tx_ticks = (u16) coal->tx_coalesce_usecs; + if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff; + + bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames; + if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff; + + bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq; + if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff; + + bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq; + if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int = + 0xff; + + bp->stats_ticks = coal->stats_block_coalesce_usecs; + if (bp->flags & BNX2_FLAG_BROKEN_STATS) { + if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC) + bp->stats_ticks = USEC_PER_SEC; + } + if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS) + bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS; + bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS; + + if (netif_running(bp->dev)) { + bnx2_netif_stop(bp, true); + bnx2_init_nic(bp, 0); + bnx2_netif_start(bp, true); + } + + return 0; +} + +static void +bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) +{ + struct bnx2 *bp = netdev_priv(dev); + + ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT; + ering->rx_mini_max_pending = 0; + ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT; + + ering->rx_pending = bp->rx_ring_size; + ering->rx_mini_pending = 0; + ering->rx_jumbo_pending = bp->rx_pg_ring_size; + + ering->tx_max_pending = MAX_TX_DESC_CNT; + ering->tx_pending = bp->tx_ring_size; +} + +static int +bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx) +{ + if (netif_running(bp->dev)) { + /* Reset will erase chipset stats; save them */ + bnx2_save_stats(bp); + + bnx2_netif_stop(bp, true); + bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); + __bnx2_free_irq(bp); + bnx2_free_skbs(bp); + bnx2_free_mem(bp); + } + + bnx2_set_rx_ring_size(bp, rx); + bp->tx_ring_size = tx; + + if (netif_running(bp->dev)) { + int rc; + + rc = bnx2_alloc_mem(bp); + if (!rc) + rc = bnx2_request_irq(bp); + + if (!rc) + rc = bnx2_init_nic(bp, 0); + + if (rc) { + bnx2_napi_enable(bp); + dev_close(bp->dev); + return rc; + } +#ifdef BCM_CNIC + mutex_lock(&bp->cnic_lock); + /* Let cnic know about the new status block. */ + if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) + bnx2_setup_cnic_irq_info(bp); + mutex_unlock(&bp->cnic_lock); +#endif + bnx2_netif_start(bp, true); + } + return 0; +} + +static int +bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) +{ + struct bnx2 *bp = netdev_priv(dev); + int rc; + + if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) || + (ering->tx_pending > MAX_TX_DESC_CNT) || + (ering->tx_pending <= MAX_SKB_FRAGS)) { + + return -EINVAL; + } + rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending); + return rc; +} + +static void +bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) +{ + struct bnx2 *bp = netdev_priv(dev); + + epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0); + epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0); + epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0); +} + +static int +bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) +{ + struct bnx2 *bp = netdev_priv(dev); + + bp->req_flow_ctrl = 0; + if (epause->rx_pause) + bp->req_flow_ctrl |= FLOW_CTRL_RX; + if (epause->tx_pause) + bp->req_flow_ctrl |= FLOW_CTRL_TX; + + if (epause->autoneg) { + bp->autoneg |= AUTONEG_FLOW_CTRL; + } + else { + bp->autoneg &= ~AUTONEG_FLOW_CTRL; + } + + if (netif_running(dev)) { + spin_lock_bh(&bp->phy_lock); + bnx2_setup_phy(bp, bp->phy_port); + spin_unlock_bh(&bp->phy_lock); + } + + return 0; +} + +static struct { + char string[ETH_GSTRING_LEN]; +} bnx2_stats_str_arr[] = { + { "rx_bytes" }, + { "rx_error_bytes" }, + { "tx_bytes" }, + { "tx_error_bytes" }, + { "rx_ucast_packets" }, + { "rx_mcast_packets" }, + { "rx_bcast_packets" }, + { "tx_ucast_packets" }, + { "tx_mcast_packets" }, + { "tx_bcast_packets" }, + { "tx_mac_errors" }, + { "tx_carrier_errors" }, + { "rx_crc_errors" }, + { "rx_align_errors" }, + { "tx_single_collisions" }, + { "tx_multi_collisions" }, + { "tx_deferred" }, + { "tx_excess_collisions" }, + { "tx_late_collisions" }, + { "tx_total_collisions" }, + { "rx_fragments" }, + { "rx_jabbers" }, + { "rx_undersize_packets" }, + { "rx_oversize_packets" }, + { "rx_64_byte_packets" }, + { "rx_65_to_127_byte_packets" }, + { "rx_128_to_255_byte_packets" }, + { "rx_256_to_511_byte_packets" }, + { "rx_512_to_1023_byte_packets" }, + { "rx_1024_to_1522_byte_packets" }, + { "rx_1523_to_9022_byte_packets" }, + { "tx_64_byte_packets" }, + { "tx_65_to_127_byte_packets" }, + { "tx_128_to_255_byte_packets" }, + { "tx_256_to_511_byte_packets" }, + { "tx_512_to_1023_byte_packets" }, + { "tx_1024_to_1522_byte_packets" }, + { "tx_1523_to_9022_byte_packets" }, + { "rx_xon_frames" }, + { "rx_xoff_frames" }, + { "tx_xon_frames" }, + { "tx_xoff_frames" }, + { "rx_mac_ctrl_frames" }, + { "rx_filtered_packets" }, + { "rx_ftq_discards" }, + { "rx_discards" }, + { "rx_fw_discards" }, +}; + +#define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\ + sizeof(bnx2_stats_str_arr[0])) + +#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4) + +static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = { + STATS_OFFSET32(stat_IfHCInOctets_hi), + STATS_OFFSET32(stat_IfHCInBadOctets_hi), + STATS_OFFSET32(stat_IfHCOutOctets_hi), + STATS_OFFSET32(stat_IfHCOutBadOctets_hi), + STATS_OFFSET32(stat_IfHCInUcastPkts_hi), + STATS_OFFSET32(stat_IfHCInMulticastPkts_hi), + STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi), + STATS_OFFSET32(stat_IfHCOutUcastPkts_hi), + STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi), + STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi), + STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors), + STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors), + STATS_OFFSET32(stat_Dot3StatsFCSErrors), + STATS_OFFSET32(stat_Dot3StatsAlignmentErrors), + STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames), + STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames), + STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions), + STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions), + STATS_OFFSET32(stat_Dot3StatsLateCollisions), + STATS_OFFSET32(stat_EtherStatsCollisions), + STATS_OFFSET32(stat_EtherStatsFragments), + STATS_OFFSET32(stat_EtherStatsJabbers), + STATS_OFFSET32(stat_EtherStatsUndersizePkts), + STATS_OFFSET32(stat_EtherStatsOverrsizePkts), + STATS_OFFSET32(stat_EtherStatsPktsRx64Octets), + STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets), + STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets), + STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets), + STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets), + STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets), + STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets), + STATS_OFFSET32(stat_EtherStatsPktsTx64Octets), + STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets), + STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets), + STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets), + STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets), + STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets), + STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets), + STATS_OFFSET32(stat_XonPauseFramesReceived), + STATS_OFFSET32(stat_XoffPauseFramesReceived), + STATS_OFFSET32(stat_OutXonSent), + STATS_OFFSET32(stat_OutXoffSent), + STATS_OFFSET32(stat_MacControlFramesReceived), + STATS_OFFSET32(stat_IfInFramesL2FilterDiscards), + STATS_OFFSET32(stat_IfInFTQDiscards), + STATS_OFFSET32(stat_IfInMBUFDiscards), + STATS_OFFSET32(stat_FwRxDrop), +}; + +/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are + * skipped because of errata. + */ +static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = { + 8,0,8,8,8,8,8,8,8,8, + 4,0,4,4,4,4,4,4,4,4, + 4,4,4,4,4,4,4,4,4,4, + 4,4,4,4,4,4,4,4,4,4, + 4,4,4,4,4,4,4, +}; + +static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = { + 8,0,8,8,8,8,8,8,8,8, + 4,4,4,4,4,4,4,4,4,4, + 4,4,4,4,4,4,4,4,4,4, + 4,4,4,4,4,4,4,4,4,4, + 4,4,4,4,4,4,4, +}; + +#define BNX2_NUM_TESTS 6 + +static struct { + char string[ETH_GSTRING_LEN]; +} bnx2_tests_str_arr[BNX2_NUM_TESTS] = { + { "register_test (offline)" }, + { "memory_test (offline)" }, + { "loopback_test (offline)" }, + { "nvram_test (online)" }, + { "interrupt_test (online)" }, + { "link_test (online)" }, +}; + +static int +bnx2_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return BNX2_NUM_TESTS; + case ETH_SS_STATS: + return BNX2_NUM_STATS; + default: + return -EOPNOTSUPP; + } +} + +static void +bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf) +{ + struct bnx2 *bp = netdev_priv(dev); + + bnx2_set_power_state(bp, PCI_D0); + + memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS); + if (etest->flags & ETH_TEST_FL_OFFLINE) { + int i; + + bnx2_netif_stop(bp, true); + bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG); + bnx2_free_skbs(bp); + + if (bnx2_test_registers(bp) != 0) { + buf[0] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + if (bnx2_test_memory(bp) != 0) { + buf[1] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + if ((buf[2] = bnx2_test_loopback(bp)) != 0) + etest->flags |= ETH_TEST_FL_FAILED; + + if (!netif_running(bp->dev)) + bnx2_shutdown_chip(bp); + else { + bnx2_init_nic(bp, 1); + bnx2_netif_start(bp, true); + } + + /* wait for link up */ + for (i = 0; i < 7; i++) { + if (bp->link_up) + break; + msleep_interruptible(1000); + } + } + + if (bnx2_test_nvram(bp) != 0) { + buf[3] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + if (bnx2_test_intr(bp) != 0) { + buf[4] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + + if (bnx2_test_link(bp) != 0) { + buf[5] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + + } + if (!netif_running(bp->dev)) + bnx2_set_power_state(bp, PCI_D3hot); +} + +static void +bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(buf, bnx2_stats_str_arr, + sizeof(bnx2_stats_str_arr)); + break; + case ETH_SS_TEST: + memcpy(buf, bnx2_tests_str_arr, + sizeof(bnx2_tests_str_arr)); + break; + } +} + +static void +bnx2_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *buf) +{ + struct bnx2 *bp = netdev_priv(dev); + int i; + u32 *hw_stats = (u32 *) bp->stats_blk; + u32 *temp_stats = (u32 *) bp->temp_stats_blk; + u8 *stats_len_arr = NULL; + + if (hw_stats == NULL) { + memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS); + return; + } + + if ((CHIP_ID(bp) == CHIP_ID_5706_A0) || + (CHIP_ID(bp) == CHIP_ID_5706_A1) || + (CHIP_ID(bp) == CHIP_ID_5706_A2) || + (CHIP_ID(bp) == CHIP_ID_5708_A0)) + stats_len_arr = bnx2_5706_stats_len_arr; + else + stats_len_arr = bnx2_5708_stats_len_arr; + + for (i = 0; i < BNX2_NUM_STATS; i++) { + unsigned long offset; + + if (stats_len_arr[i] == 0) { + /* skip this counter */ + buf[i] = 0; + continue; + } + + offset = bnx2_stats_offset_arr[i]; + if (stats_len_arr[i] == 4) { + /* 4-byte counter */ + buf[i] = (u64) *(hw_stats + offset) + + *(temp_stats + offset); + continue; + } + /* 8-byte counter */ + buf[i] = (((u64) *(hw_stats + offset)) << 32) + + *(hw_stats + offset + 1) + + (((u64) *(temp_stats + offset)) << 32) + + *(temp_stats + offset + 1); + } +} + +static int +bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) +{ + struct bnx2 *bp = netdev_priv(dev); + + switch (state) { + case ETHTOOL_ID_ACTIVE: + bnx2_set_power_state(bp, PCI_D0); + + bp->leds_save = REG_RD(bp, BNX2_MISC_CFG); + REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC); + return 1; /* cycle on/off once per second */ + + case ETHTOOL_ID_ON: + REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE | + BNX2_EMAC_LED_1000MB_OVERRIDE | + BNX2_EMAC_LED_100MB_OVERRIDE | + BNX2_EMAC_LED_10MB_OVERRIDE | + BNX2_EMAC_LED_TRAFFIC_OVERRIDE | + BNX2_EMAC_LED_TRAFFIC); + break; + + case ETHTOOL_ID_OFF: + REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE); + break; + + case ETHTOOL_ID_INACTIVE: + REG_WR(bp, BNX2_EMAC_LED, 0); + REG_WR(bp, BNX2_MISC_CFG, bp->leds_save); + + if (!netif_running(dev)) + bnx2_set_power_state(bp, PCI_D3hot); + break; + } + + return 0; +} + +static u32 +bnx2_fix_features(struct net_device *dev, u32 features) +{ + struct bnx2 *bp = netdev_priv(dev); + + if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)) + features |= NETIF_F_HW_VLAN_RX; + + return features; +} + +static int +bnx2_set_features(struct net_device *dev, u32 features) +{ + struct bnx2 *bp = netdev_priv(dev); + + /* TSO with VLAN tag won't work with current firmware */ + if (features & NETIF_F_HW_VLAN_TX) + dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO); + else + dev->vlan_features &= ~NETIF_F_ALL_TSO; + + if ((!!(features & NETIF_F_HW_VLAN_RX) != + !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) && + netif_running(dev)) { + bnx2_netif_stop(bp, false); + dev->features = features; + bnx2_set_rx_mode(dev); + bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1); + bnx2_netif_start(bp, false); + return 1; + } + + return 0; +} + +static const struct ethtool_ops bnx2_ethtool_ops = { + .get_settings = bnx2_get_settings, + .set_settings = bnx2_set_settings, + .get_drvinfo = bnx2_get_drvinfo, + .get_regs_len = bnx2_get_regs_len, + .get_regs = bnx2_get_regs, + .get_wol = bnx2_get_wol, + .set_wol = bnx2_set_wol, + .nway_reset = bnx2_nway_reset, + .get_link = bnx2_get_link, + .get_eeprom_len = bnx2_get_eeprom_len, + .get_eeprom = bnx2_get_eeprom, + .set_eeprom = bnx2_set_eeprom, + .get_coalesce = bnx2_get_coalesce, + .set_coalesce = bnx2_set_coalesce, + .get_ringparam = bnx2_get_ringparam, + .set_ringparam = bnx2_set_ringparam, + .get_pauseparam = bnx2_get_pauseparam, + .set_pauseparam = bnx2_set_pauseparam, + .self_test = bnx2_self_test, + .get_strings = bnx2_get_strings, + .set_phys_id = bnx2_set_phys_id, + .get_ethtool_stats = bnx2_get_ethtool_stats, + .get_sset_count = bnx2_get_sset_count, +}; + +/* Called with rtnl_lock */ +static int +bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mii_ioctl_data *data = if_mii(ifr); + struct bnx2 *bp = netdev_priv(dev); + int err; + + switch(cmd) { + case SIOCGMIIPHY: + data->phy_id = bp->phy_addr; + + /* fallthru */ + case SIOCGMIIREG: { + u32 mii_regval; + + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) + return -EOPNOTSUPP; + + if (!netif_running(dev)) + return -EAGAIN; + + spin_lock_bh(&bp->phy_lock); + err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval); + spin_unlock_bh(&bp->phy_lock); + + data->val_out = mii_regval; + + return err; + } + + case SIOCSMIIREG: + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) + return -EOPNOTSUPP; + + if (!netif_running(dev)) + return -EAGAIN; + + spin_lock_bh(&bp->phy_lock); + err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in); + spin_unlock_bh(&bp->phy_lock); + + return err; + + default: + /* do nothing */ + break; + } + return -EOPNOTSUPP; +} + +/* Called with rtnl_lock */ +static int +bnx2_change_mac_addr(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + struct bnx2 *bp = netdev_priv(dev); + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + if (netif_running(dev)) + bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); + + return 0; +} + +/* Called with rtnl_lock */ +static int +bnx2_change_mtu(struct net_device *dev, int new_mtu) +{ + struct bnx2 *bp = netdev_priv(dev); + + if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) || + ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE)) + return -EINVAL; + + dev->mtu = new_mtu; + return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void +poll_bnx2(struct net_device *dev) +{ + struct bnx2 *bp = netdev_priv(dev); + int i; + + for (i = 0; i < bp->irq_nvecs; i++) { + struct bnx2_irq *irq = &bp->irq_tbl[i]; + + disable_irq(irq->vector); + irq->handler(irq->vector, &bp->bnx2_napi[i]); + enable_irq(irq->vector); + } +} +#endif + +static void __devinit +bnx2_get_5709_media(struct bnx2 *bp) +{ + u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL); + u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID; + u32 strap; + + if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) + return; + else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) { + bp->phy_flags |= BNX2_PHY_FLAG_SERDES; + return; + } + + if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) + strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21; + else + strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8; + + if (PCI_FUNC(bp->pdev->devfn) == 0) { + switch (strap) { + case 0x4: + case 0x5: + case 0x6: + bp->phy_flags |= BNX2_PHY_FLAG_SERDES; + return; + } + } else { + switch (strap) { + case 0x1: + case 0x2: + case 0x4: + bp->phy_flags |= BNX2_PHY_FLAG_SERDES; + return; + } + } +} + +static void __devinit +bnx2_get_pci_speed(struct bnx2 *bp) +{ + u32 reg; + + reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS); + if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) { + u32 clkreg; + + bp->flags |= BNX2_FLAG_PCIX; + + clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS); + + clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; + switch (clkreg) { + case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ: + bp->bus_speed_mhz = 133; + break; + + case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ: + bp->bus_speed_mhz = 100; + break; + + case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ: + case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ: + bp->bus_speed_mhz = 66; + break; + + case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ: + case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ: + bp->bus_speed_mhz = 50; + break; + + case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW: + case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ: + case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ: + bp->bus_speed_mhz = 33; + break; + } + } + else { + if (reg & BNX2_PCICFG_MISC_STATUS_M66EN) + bp->bus_speed_mhz = 66; + else + bp->bus_speed_mhz = 33; + } + + if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET) + bp->flags |= BNX2_FLAG_PCI_32BIT; + +} + +static void __devinit +bnx2_read_vpd_fw_ver(struct bnx2 *bp) +{ + int rc, i, j; + u8 *data; + unsigned int block_end, rosize, len; + +#define BNX2_VPD_NVRAM_OFFSET 0x300 +#define BNX2_VPD_LEN 128 +#define BNX2_MAX_VER_SLEN 30 + + data = kmalloc(256, GFP_KERNEL); + if (!data) + return; + + rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN, + BNX2_VPD_LEN); + if (rc) + goto vpd_done; + + for (i = 0; i < BNX2_VPD_LEN; i += 4) { + data[i] = data[i + BNX2_VPD_LEN + 3]; + data[i + 1] = data[i + BNX2_VPD_LEN + 2]; + data[i + 2] = data[i + BNX2_VPD_LEN + 1]; + data[i + 3] = data[i + BNX2_VPD_LEN]; + } + + i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA); + if (i < 0) + goto vpd_done; + + rosize = pci_vpd_lrdt_size(&data[i]); + i += PCI_VPD_LRDT_TAG_SIZE; + block_end = i + rosize; + + if (block_end > BNX2_VPD_LEN) + goto vpd_done; + + j = pci_vpd_find_info_keyword(data, i, rosize, + PCI_VPD_RO_KEYWORD_MFR_ID); + if (j < 0) + goto vpd_done; + + len = pci_vpd_info_field_size(&data[j]); + + j += PCI_VPD_INFO_FLD_HDR_SIZE; + if (j + len > block_end || len != 4 || + memcmp(&data[j], "1028", 4)) + goto vpd_done; + + j = pci_vpd_find_info_keyword(data, i, rosize, + PCI_VPD_RO_KEYWORD_VENDOR0); + if (j < 0) + goto vpd_done; + + len = pci_vpd_info_field_size(&data[j]); + + j += PCI_VPD_INFO_FLD_HDR_SIZE; + if (j + len > block_end || len > BNX2_MAX_VER_SLEN) + goto vpd_done; + + memcpy(bp->fw_version, &data[j], len); + bp->fw_version[len] = ' '; + +vpd_done: + kfree(data); +} + +static int __devinit +bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) +{ + struct bnx2 *bp; + unsigned long mem_len; + int rc, i, j; + u32 reg; + u64 dma_mask, persist_dma_mask; + int err; + + SET_NETDEV_DEV(dev, &pdev->dev); + bp = netdev_priv(dev); + + bp->flags = 0; + bp->phy_flags = 0; + + bp->temp_stats_blk = + kzalloc(sizeof(struct statistics_block), GFP_KERNEL); + + if (bp->temp_stats_blk == NULL) { + rc = -ENOMEM; + goto err_out; + } + + /* enable device (incl. PCI PM wakeup), and bus-mastering */ + rc = pci_enable_device(pdev); + if (rc) { + dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); + goto err_out; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, + "Cannot find PCI device base address, aborting\n"); + rc = -ENODEV; + goto err_out_disable; + } + + rc = pci_request_regions(pdev, DRV_MODULE_NAME); + if (rc) { + dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); + goto err_out_disable; + } + + pci_set_master(pdev); + + bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); + if (bp->pm_cap == 0) { + dev_err(&pdev->dev, + "Cannot find power management capability, aborting\n"); + rc = -EIO; + goto err_out_release; + } + + bp->dev = dev; + bp->pdev = pdev; + + spin_lock_init(&bp->phy_lock); + spin_lock_init(&bp->indirect_lock); +#ifdef BCM_CNIC + mutex_init(&bp->cnic_lock); +#endif + INIT_WORK(&bp->reset_task, bnx2_reset_task); + + dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); + mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1); + dev->mem_end = dev->mem_start + mem_len; + dev->irq = pdev->irq; + + bp->regview = ioremap_nocache(dev->base_addr, mem_len); + + if (!bp->regview) { + dev_err(&pdev->dev, "Cannot map register space, aborting\n"); + rc = -ENOMEM; + goto err_out_release; + } + + bnx2_set_power_state(bp, PCI_D0); + + /* Configure byte swap and enable write to the reg_window registers. + * Rely on CPU to do target byte swapping on big endian systems + * The chip's target access swapping will not swap all accesses + */ + REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, + BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | + BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP); + + bp->chip_id = REG_RD(bp, BNX2_MISC_ID); + + if (CHIP_NUM(bp) == CHIP_NUM_5709) { + if (!pci_is_pcie(pdev)) { + dev_err(&pdev->dev, "Not PCIE, aborting\n"); + rc = -EIO; + goto err_out_unmap; + } + bp->flags |= BNX2_FLAG_PCIE; + if (CHIP_REV(bp) == CHIP_REV_Ax) + bp->flags |= BNX2_FLAG_JUMBO_BROKEN; + + /* AER (Advanced Error Reporting) hooks */ + err = pci_enable_pcie_error_reporting(pdev); + if (!err) + bp->flags |= BNX2_FLAG_AER_ENABLED; + + } else { + bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX); + if (bp->pcix_cap == 0) { + dev_err(&pdev->dev, + "Cannot find PCIX capability, aborting\n"); + rc = -EIO; + goto err_out_unmap; + } + bp->flags |= BNX2_FLAG_BROKEN_STATS; + } + + if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) { + if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) + bp->flags |= BNX2_FLAG_MSIX_CAP; + } + + if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) { + if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) + bp->flags |= BNX2_FLAG_MSI_CAP; + } + + /* 5708 cannot support DMA addresses > 40-bit. */ + if (CHIP_NUM(bp) == CHIP_NUM_5708) + persist_dma_mask = dma_mask = DMA_BIT_MASK(40); + else + persist_dma_mask = dma_mask = DMA_BIT_MASK(64); + + /* Configure DMA attributes. */ + if (pci_set_dma_mask(pdev, dma_mask) == 0) { + dev->features |= NETIF_F_HIGHDMA; + rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask); + if (rc) { + dev_err(&pdev->dev, + "pci_set_consistent_dma_mask failed, aborting\n"); + goto err_out_unmap; + } + } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) { + dev_err(&pdev->dev, "System does not support DMA, aborting\n"); + goto err_out_unmap; + } + + if (!(bp->flags & BNX2_FLAG_PCIE)) + bnx2_get_pci_speed(bp); + + /* 5706A0 may falsely detect SERR and PERR. */ + if (CHIP_ID(bp) == CHIP_ID_5706_A0) { + reg = REG_RD(bp, PCI_COMMAND); + reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY); + REG_WR(bp, PCI_COMMAND, reg); + } + else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) && + !(bp->flags & BNX2_FLAG_PCIX)) { + + dev_err(&pdev->dev, + "5706 A1 can only be used in a PCIX bus, aborting\n"); + goto err_out_unmap; + } + + bnx2_init_nvram(bp); + + reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE); + + if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) == + BNX2_SHM_HDR_SIGNATURE_SIG) { + u32 off = PCI_FUNC(pdev->devfn) << 2; + + bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off); + } else + bp->shmem_base = HOST_VIEW_SHMEM_BASE; + + /* Get the permanent MAC address. First we need to make sure the + * firmware is actually running. + */ + reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE); + + if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) != + BNX2_DEV_INFO_SIGNATURE_MAGIC) { + dev_err(&pdev->dev, "Firmware not running, aborting\n"); + rc = -ENODEV; + goto err_out_unmap; + } + + bnx2_read_vpd_fw_ver(bp); + + j = strlen(bp->fw_version); + reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV); + for (i = 0; i < 3 && j < 24; i++) { + u8 num, k, skip0; + + if (i == 0) { + bp->fw_version[j++] = 'b'; + bp->fw_version[j++] = 'c'; + bp->fw_version[j++] = ' '; + } + num = (u8) (reg >> (24 - (i * 8))); + for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) { + if (num >= k || !skip0 || k == 1) { + bp->fw_version[j++] = (num / k) + '0'; + skip0 = 0; + } + } + if (i != 2) + bp->fw_version[j++] = '.'; + } + reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE); + if (reg & BNX2_PORT_FEATURE_WOL_ENABLED) + bp->wol = 1; + + if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) { + bp->flags |= BNX2_FLAG_ASF_ENABLE; + + for (i = 0; i < 30; i++) { + reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION); + if (reg & BNX2_CONDITION_MFW_RUN_MASK) + break; + msleep(10); + } + } + reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION); + reg &= BNX2_CONDITION_MFW_RUN_MASK; + if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN && + reg != BNX2_CONDITION_MFW_RUN_NONE) { + u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR); + + if (j < 32) + bp->fw_version[j++] = ' '; + for (i = 0; i < 3 && j < 28; i++) { + reg = bnx2_reg_rd_ind(bp, addr + i * 4); + reg = be32_to_cpu(reg); + memcpy(&bp->fw_version[j], ®, 4); + j += 4; + } + } + + reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER); + bp->mac_addr[0] = (u8) (reg >> 8); + bp->mac_addr[1] = (u8) reg; + + reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER); + bp->mac_addr[2] = (u8) (reg >> 24); + bp->mac_addr[3] = (u8) (reg >> 16); + bp->mac_addr[4] = (u8) (reg >> 8); + bp->mac_addr[5] = (u8) reg; + + bp->tx_ring_size = MAX_TX_DESC_CNT; + bnx2_set_rx_ring_size(bp, 255); + + bp->tx_quick_cons_trip_int = 2; + bp->tx_quick_cons_trip = 20; + bp->tx_ticks_int = 18; + bp->tx_ticks = 80; + + bp->rx_quick_cons_trip_int = 2; + bp->rx_quick_cons_trip = 12; + bp->rx_ticks_int = 18; + bp->rx_ticks = 18; + + bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS; + + bp->current_interval = BNX2_TIMER_INTERVAL; + + bp->phy_addr = 1; + + /* Disable WOL support if we are running on a SERDES chip. */ + if (CHIP_NUM(bp) == CHIP_NUM_5709) + bnx2_get_5709_media(bp); + else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) + bp->phy_flags |= BNX2_PHY_FLAG_SERDES; + + bp->phy_port = PORT_TP; + if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { + bp->phy_port = PORT_FIBRE; + reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG); + if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) { + bp->flags |= BNX2_FLAG_NO_WOL; + bp->wol = 0; + } + if (CHIP_NUM(bp) == CHIP_NUM_5706) { + /* Don't do parallel detect on this board because of + * some board problems. The link will not go down + * if we do parallel detect. + */ + if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP && + pdev->subsystem_device == 0x310c) + bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL; + } else { + bp->phy_addr = 2; + if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G) + bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE; + } + } else if (CHIP_NUM(bp) == CHIP_NUM_5706 || + CHIP_NUM(bp) == CHIP_NUM_5708) + bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX; + else if (CHIP_NUM(bp) == CHIP_NUM_5709 && + (CHIP_REV(bp) == CHIP_REV_Ax || + CHIP_REV(bp) == CHIP_REV_Bx)) + bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC; + + bnx2_init_fw_cap(bp); + + if ((CHIP_ID(bp) == CHIP_ID_5708_A0) || + (CHIP_ID(bp) == CHIP_ID_5708_B0) || + (CHIP_ID(bp) == CHIP_ID_5708_B1) || + !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) { + bp->flags |= BNX2_FLAG_NO_WOL; + bp->wol = 0; + } + + if (CHIP_ID(bp) == CHIP_ID_5706_A0) { + bp->tx_quick_cons_trip_int = + bp->tx_quick_cons_trip; + bp->tx_ticks_int = bp->tx_ticks; + bp->rx_quick_cons_trip_int = + bp->rx_quick_cons_trip; + bp->rx_ticks_int = bp->rx_ticks; + bp->comp_prod_trip_int = bp->comp_prod_trip; + bp->com_ticks_int = bp->com_ticks; + bp->cmd_ticks_int = bp->cmd_ticks; + } + + /* Disable MSI on 5706 if AMD 8132 bridge is found. + * + * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes + * with byte enables disabled on the unused 32-bit word. This is legal + * but causes problems on the AMD 8132 which will eventually stop + * responding after a while. + * + * AMD believes this incompatibility is unique to the 5706, and + * prefers to locally disable MSI rather than globally disabling it. + */ + if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) { + struct pci_dev *amd_8132 = NULL; + + while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD, + PCI_DEVICE_ID_AMD_8132_BRIDGE, + amd_8132))) { + + if (amd_8132->revision >= 0x10 && + amd_8132->revision <= 0x13) { + disable_msi = 1; + pci_dev_put(amd_8132); + break; + } + } + } + + bnx2_set_default_link(bp); + bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; + + init_timer(&bp->timer); + bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL); + bp->timer.data = (unsigned long) bp; + bp->timer.function = bnx2_timer; + +#ifdef BCM_CNIC + if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN) + bp->cnic_eth_dev.max_iscsi_conn = + (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) & + BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT; +#endif + pci_save_state(pdev); + + return 0; + +err_out_unmap: + if (bp->flags & BNX2_FLAG_AER_ENABLED) { + pci_disable_pcie_error_reporting(pdev); + bp->flags &= ~BNX2_FLAG_AER_ENABLED; + } + + if (bp->regview) { + iounmap(bp->regview); + bp->regview = NULL; + } + +err_out_release: + pci_release_regions(pdev); + +err_out_disable: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + +err_out: + return rc; +} + +static char * __devinit +bnx2_bus_string(struct bnx2 *bp, char *str) +{ + char *s = str; + + if (bp->flags & BNX2_FLAG_PCIE) { + s += sprintf(s, "PCI Express"); + } else { + s += sprintf(s, "PCI"); + if (bp->flags & BNX2_FLAG_PCIX) + s += sprintf(s, "-X"); + if (bp->flags & BNX2_FLAG_PCI_32BIT) + s += sprintf(s, " 32-bit"); + else + s += sprintf(s, " 64-bit"); + s += sprintf(s, " %dMHz", bp->bus_speed_mhz); + } + return str; +} + +static void +bnx2_del_napi(struct bnx2 *bp) +{ + int i; + + for (i = 0; i < bp->irq_nvecs; i++) + netif_napi_del(&bp->bnx2_napi[i].napi); +} + +static void +bnx2_init_napi(struct bnx2 *bp) +{ + int i; + + for (i = 0; i < bp->irq_nvecs; i++) { + struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; + int (*poll)(struct napi_struct *, int); + + if (i == 0) + poll = bnx2_poll; + else + poll = bnx2_poll_msix; + + netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64); + bnapi->bp = bp; + } +} + +static const struct net_device_ops bnx2_netdev_ops = { + .ndo_open = bnx2_open, + .ndo_start_xmit = bnx2_start_xmit, + .ndo_stop = bnx2_close, + .ndo_get_stats64 = bnx2_get_stats64, + .ndo_set_rx_mode = bnx2_set_rx_mode, + .ndo_do_ioctl = bnx2_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = bnx2_change_mac_addr, + .ndo_change_mtu = bnx2_change_mtu, + .ndo_fix_features = bnx2_fix_features, + .ndo_set_features = bnx2_set_features, + .ndo_tx_timeout = bnx2_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = poll_bnx2, +#endif +}; + +static int __devinit +bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int version_printed = 0; + struct net_device *dev = NULL; + struct bnx2 *bp; + int rc; + char str[40]; + + if (version_printed++ == 0) + pr_info("%s", version); + + /* dev zeroed in init_etherdev */ + dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS); + + if (!dev) + return -ENOMEM; + + rc = bnx2_init_board(pdev, dev); + if (rc < 0) { + free_netdev(dev); + return rc; + } + + dev->netdev_ops = &bnx2_netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + dev->ethtool_ops = &bnx2_ethtool_ops; + + bp = netdev_priv(dev); + + pci_set_drvdata(pdev, dev); + + rc = bnx2_request_firmware(bp); + if (rc) + goto error; + + memcpy(dev->dev_addr, bp->mac_addr, 6); + memcpy(dev->perm_addr, bp->mac_addr, 6); + + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_TSO_ECN | + NETIF_F_RXHASH | NETIF_F_RXCSUM; + + if (CHIP_NUM(bp) == CHIP_NUM_5709) + dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; + + dev->vlan_features = dev->hw_features; + dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + dev->features |= dev->hw_features; + + if ((rc = register_netdev(dev))) { + dev_err(&pdev->dev, "Cannot register net device\n"); + goto error; + } + + netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n", + board_info[ent->driver_data].name, + ((CHIP_ID(bp) & 0xf000) >> 12) + 'A', + ((CHIP_ID(bp) & 0x0ff0) >> 4), + bnx2_bus_string(bp, str), + dev->base_addr, + bp->pdev->irq, dev->dev_addr); + + return 0; + +error: + if (bp->mips_firmware) + release_firmware(bp->mips_firmware); + if (bp->rv2p_firmware) + release_firmware(bp->rv2p_firmware); + + if (bp->regview) + iounmap(bp->regview); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + free_netdev(dev); + return rc; +} + +static void __devexit +bnx2_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2 *bp = netdev_priv(dev); + + unregister_netdev(dev); + + del_timer_sync(&bp->timer); + cancel_work_sync(&bp->reset_task); + + if (bp->mips_firmware) + release_firmware(bp->mips_firmware); + if (bp->rv2p_firmware) + release_firmware(bp->rv2p_firmware); + + if (bp->regview) + iounmap(bp->regview); + + kfree(bp->temp_stats_blk); + + if (bp->flags & BNX2_FLAG_AER_ENABLED) { + pci_disable_pcie_error_reporting(pdev); + bp->flags &= ~BNX2_FLAG_AER_ENABLED; + } + + free_netdev(dev); + + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +static int +bnx2_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2 *bp = netdev_priv(dev); + + /* PCI register 4 needs to be saved whether netif_running() or not. + * MSI address and data need to be saved if using MSI and + * netif_running(). + */ + pci_save_state(pdev); + if (!netif_running(dev)) + return 0; + + cancel_work_sync(&bp->reset_task); + bnx2_netif_stop(bp, true); + netif_device_detach(dev); + del_timer_sync(&bp->timer); + bnx2_shutdown_chip(bp); + bnx2_free_skbs(bp); + bnx2_set_power_state(bp, pci_choose_state(pdev, state)); + return 0; +} + +static int +bnx2_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2 *bp = netdev_priv(dev); + + pci_restore_state(pdev); + if (!netif_running(dev)) + return 0; + + bnx2_set_power_state(bp, PCI_D0); + netif_device_attach(dev); + bnx2_init_nic(bp, 1); + bnx2_netif_start(bp, true); + return 0; +} + +/** + * bnx2_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2 *bp = netdev_priv(dev); + + rtnl_lock(); + netif_device_detach(dev); + + if (state == pci_channel_io_perm_failure) { + rtnl_unlock(); + return PCI_ERS_RESULT_DISCONNECT; + } + + if (netif_running(dev)) { + bnx2_netif_stop(bp, true); + del_timer_sync(&bp->timer); + bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET); + } + + pci_disable_device(pdev); + rtnl_unlock(); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * bnx2_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + */ +static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2 *bp = netdev_priv(dev); + pci_ers_result_t result; + int err; + + rtnl_lock(); + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (netif_running(dev)) { + bnx2_set_power_state(bp, PCI_D0); + bnx2_init_nic(bp, 1); + } + result = PCI_ERS_RESULT_RECOVERED; + } + rtnl_unlock(); + + if (!(bp->flags & BNX2_FLAG_AER_ENABLED)) + return result; + + err = pci_cleanup_aer_uncorrect_error_status(pdev); + if (err) { + dev_err(&pdev->dev, + "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", + err); /* non-fatal, continue */ + } + + return result; +} + +/** + * bnx2_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. + */ +static void bnx2_io_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2 *bp = netdev_priv(dev); + + rtnl_lock(); + if (netif_running(dev)) + bnx2_netif_start(bp, true); + + netif_device_attach(dev); + rtnl_unlock(); +} + +static struct pci_error_handlers bnx2_err_handler = { + .error_detected = bnx2_io_error_detected, + .slot_reset = bnx2_io_slot_reset, + .resume = bnx2_io_resume, +}; + +static struct pci_driver bnx2_pci_driver = { + .name = DRV_MODULE_NAME, + .id_table = bnx2_pci_tbl, + .probe = bnx2_init_one, + .remove = __devexit_p(bnx2_remove_one), + .suspend = bnx2_suspend, + .resume = bnx2_resume, + .err_handler = &bnx2_err_handler, +}; + +static int __init bnx2_init(void) +{ + return pci_register_driver(&bnx2_pci_driver); +} + +static void __exit bnx2_cleanup(void) +{ + pci_unregister_driver(&bnx2_pci_driver); +} + +module_init(bnx2_init); +module_exit(bnx2_cleanup); + + + diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h new file mode 100644 index 0000000..fc50d42 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2.h @@ -0,0 +1,7388 @@ +/* bnx2.h: Broadcom NX2 network driver. + * + * Copyright (c) 2004-2011 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Michael Chan (mchan@broadcom.com) + */ + + +#ifndef BNX2_H +#define BNX2_H + +/* Hardware data structures and register definitions automatically + * generated from RTL code. Do not modify. + */ + +/* + * tx_bd definition + */ +struct tx_bd { + u32 tx_bd_haddr_hi; + u32 tx_bd_haddr_lo; + u32 tx_bd_mss_nbytes; + #define TX_BD_TCP6_OFF2_SHL (14) + u32 tx_bd_vlan_tag_flags; + #define TX_BD_FLAGS_CONN_FAULT (1<<0) + #define TX_BD_FLAGS_TCP6_OFF0_MSK (3<<1) + #define TX_BD_FLAGS_TCP6_OFF0_SHL (1) + #define TX_BD_FLAGS_TCP_UDP_CKSUM (1<<1) + #define TX_BD_FLAGS_IP_CKSUM (1<<2) + #define TX_BD_FLAGS_VLAN_TAG (1<<3) + #define TX_BD_FLAGS_COAL_NOW (1<<4) + #define TX_BD_FLAGS_DONT_GEN_CRC (1<<5) + #define TX_BD_FLAGS_END (1<<6) + #define TX_BD_FLAGS_START (1<<7) + #define TX_BD_FLAGS_SW_OPTION_WORD (0x1f<<8) + #define TX_BD_FLAGS_TCP6_OFF4_SHL (12) + #define TX_BD_FLAGS_SW_FLAGS (1<<13) + #define TX_BD_FLAGS_SW_SNAP (1<<14) + #define TX_BD_FLAGS_SW_LSO (1<<15) + +}; + + +/* + * rx_bd definition + */ +struct rx_bd { + u32 rx_bd_haddr_hi; + u32 rx_bd_haddr_lo; + u32 rx_bd_len; + u32 rx_bd_flags; + #define RX_BD_FLAGS_NOPUSH (1<<0) + #define RX_BD_FLAGS_DUMMY (1<<1) + #define RX_BD_FLAGS_END (1<<2) + #define RX_BD_FLAGS_START (1<<3) + +}; + +#define BNX2_RX_ALIGN 16 + +/* + * status_block definition + */ +struct status_block { + u32 status_attn_bits; + #define STATUS_ATTN_BITS_LINK_STATE (1L<<0) + #define STATUS_ATTN_BITS_TX_SCHEDULER_ABORT (1L<<1) + #define STATUS_ATTN_BITS_TX_BD_READ_ABORT (1L<<2) + #define STATUS_ATTN_BITS_TX_BD_CACHE_ABORT (1L<<3) + #define STATUS_ATTN_BITS_TX_PROCESSOR_ABORT (1L<<4) + #define STATUS_ATTN_BITS_TX_DMA_ABORT (1L<<5) + #define STATUS_ATTN_BITS_TX_PATCHUP_ABORT (1L<<6) + #define STATUS_ATTN_BITS_TX_ASSEMBLER_ABORT (1L<<7) + #define STATUS_ATTN_BITS_RX_PARSER_MAC_ABORT (1L<<8) + #define STATUS_ATTN_BITS_RX_PARSER_CATCHUP_ABORT (1L<<9) + #define STATUS_ATTN_BITS_RX_MBUF_ABORT (1L<<10) + #define STATUS_ATTN_BITS_RX_LOOKUP_ABORT (1L<<11) + #define STATUS_ATTN_BITS_RX_PROCESSOR_ABORT (1L<<12) + #define STATUS_ATTN_BITS_RX_V2P_ABORT (1L<<13) + #define STATUS_ATTN_BITS_RX_BD_CACHE_ABORT (1L<<14) + #define STATUS_ATTN_BITS_RX_DMA_ABORT (1L<<15) + #define STATUS_ATTN_BITS_COMPLETION_ABORT (1L<<16) + #define STATUS_ATTN_BITS_HOST_COALESCE_ABORT (1L<<17) + #define STATUS_ATTN_BITS_MAILBOX_QUEUE_ABORT (1L<<18) + #define STATUS_ATTN_BITS_CONTEXT_ABORT (1L<<19) + #define STATUS_ATTN_BITS_CMD_SCHEDULER_ABORT (1L<<20) + #define STATUS_ATTN_BITS_CMD_PROCESSOR_ABORT (1L<<21) + #define STATUS_ATTN_BITS_MGMT_PROCESSOR_ABORT (1L<<22) + #define STATUS_ATTN_BITS_MAC_ABORT (1L<<23) + #define STATUS_ATTN_BITS_TIMER_ABORT (1L<<24) + #define STATUS_ATTN_BITS_DMAE_ABORT (1L<<25) + #define STATUS_ATTN_BITS_FLSH_ABORT (1L<<26) + #define STATUS_ATTN_BITS_GRC_ABORT (1L<<27) + #define STATUS_ATTN_BITS_EPB_ERROR (1L<<30) + #define STATUS_ATTN_BITS_PARITY_ERROR (1L<<31) + + u32 status_attn_bits_ack; +#if defined(__BIG_ENDIAN) + u16 status_tx_quick_consumer_index0; + u16 status_tx_quick_consumer_index1; + u16 status_tx_quick_consumer_index2; + u16 status_tx_quick_consumer_index3; + u16 status_rx_quick_consumer_index0; + u16 status_rx_quick_consumer_index1; + u16 status_rx_quick_consumer_index2; + u16 status_rx_quick_consumer_index3; + u16 status_rx_quick_consumer_index4; + u16 status_rx_quick_consumer_index5; + u16 status_rx_quick_consumer_index6; + u16 status_rx_quick_consumer_index7; + u16 status_rx_quick_consumer_index8; + u16 status_rx_quick_consumer_index9; + u16 status_rx_quick_consumer_index10; + u16 status_rx_quick_consumer_index11; + u16 status_rx_quick_consumer_index12; + u16 status_rx_quick_consumer_index13; + u16 status_rx_quick_consumer_index14; + u16 status_rx_quick_consumer_index15; + u16 status_completion_producer_index; + u16 status_cmd_consumer_index; + u16 status_idx; + u8 status_unused; + u8 status_blk_num; +#elif defined(__LITTLE_ENDIAN) + u16 status_tx_quick_consumer_index1; + u16 status_tx_quick_consumer_index0; + u16 status_tx_quick_consumer_index3; + u16 status_tx_quick_consumer_index2; + u16 status_rx_quick_consumer_index1; + u16 status_rx_quick_consumer_index0; + u16 status_rx_quick_consumer_index3; + u16 status_rx_quick_consumer_index2; + u16 status_rx_quick_consumer_index5; + u16 status_rx_quick_consumer_index4; + u16 status_rx_quick_consumer_index7; + u16 status_rx_quick_consumer_index6; + u16 status_rx_quick_consumer_index9; + u16 status_rx_quick_consumer_index8; + u16 status_rx_quick_consumer_index11; + u16 status_rx_quick_consumer_index10; + u16 status_rx_quick_consumer_index13; + u16 status_rx_quick_consumer_index12; + u16 status_rx_quick_consumer_index15; + u16 status_rx_quick_consumer_index14; + u16 status_cmd_consumer_index; + u16 status_completion_producer_index; + u8 status_blk_num; + u8 status_unused; + u16 status_idx; +#endif +}; + +/* + * status_block definition + */ +struct status_block_msix { +#if defined(__BIG_ENDIAN) + u16 status_tx_quick_consumer_index; + u16 status_rx_quick_consumer_index; + u16 status_completion_producer_index; + u16 status_cmd_consumer_index; + u32 status_unused; + u16 status_idx; + u8 status_unused2; + u8 status_blk_num; +#elif defined(__LITTLE_ENDIAN) + u16 status_rx_quick_consumer_index; + u16 status_tx_quick_consumer_index; + u16 status_cmd_consumer_index; + u16 status_completion_producer_index; + u32 status_unused; + u8 status_blk_num; + u8 status_unused2; + u16 status_idx; +#endif +}; + +#define BNX2_SBLK_MSIX_ALIGN_SIZE 128 + + +/* + * statistics_block definition + */ +struct statistics_block { + u32 stat_IfHCInOctets_hi; + u32 stat_IfHCInOctets_lo; + u32 stat_IfHCInBadOctets_hi; + u32 stat_IfHCInBadOctets_lo; + u32 stat_IfHCOutOctets_hi; + u32 stat_IfHCOutOctets_lo; + u32 stat_IfHCOutBadOctets_hi; + u32 stat_IfHCOutBadOctets_lo; + u32 stat_IfHCInUcastPkts_hi; + u32 stat_IfHCInUcastPkts_lo; + u32 stat_IfHCInMulticastPkts_hi; + u32 stat_IfHCInMulticastPkts_lo; + u32 stat_IfHCInBroadcastPkts_hi; + u32 stat_IfHCInBroadcastPkts_lo; + u32 stat_IfHCOutUcastPkts_hi; + u32 stat_IfHCOutUcastPkts_lo; + u32 stat_IfHCOutMulticastPkts_hi; + u32 stat_IfHCOutMulticastPkts_lo; + u32 stat_IfHCOutBroadcastPkts_hi; + u32 stat_IfHCOutBroadcastPkts_lo; + u32 stat_emac_tx_stat_dot3statsinternalmactransmiterrors; + u32 stat_Dot3StatsCarrierSenseErrors; + u32 stat_Dot3StatsFCSErrors; + u32 stat_Dot3StatsAlignmentErrors; + u32 stat_Dot3StatsSingleCollisionFrames; + u32 stat_Dot3StatsMultipleCollisionFrames; + u32 stat_Dot3StatsDeferredTransmissions; + u32 stat_Dot3StatsExcessiveCollisions; + u32 stat_Dot3StatsLateCollisions; + u32 stat_EtherStatsCollisions; + u32 stat_EtherStatsFragments; + u32 stat_EtherStatsJabbers; + u32 stat_EtherStatsUndersizePkts; + u32 stat_EtherStatsOverrsizePkts; + u32 stat_EtherStatsPktsRx64Octets; + u32 stat_EtherStatsPktsRx65Octetsto127Octets; + u32 stat_EtherStatsPktsRx128Octetsto255Octets; + u32 stat_EtherStatsPktsRx256Octetsto511Octets; + u32 stat_EtherStatsPktsRx512Octetsto1023Octets; + u32 stat_EtherStatsPktsRx1024Octetsto1522Octets; + u32 stat_EtherStatsPktsRx1523Octetsto9022Octets; + u32 stat_EtherStatsPktsTx64Octets; + u32 stat_EtherStatsPktsTx65Octetsto127Octets; + u32 stat_EtherStatsPktsTx128Octetsto255Octets; + u32 stat_EtherStatsPktsTx256Octetsto511Octets; + u32 stat_EtherStatsPktsTx512Octetsto1023Octets; + u32 stat_EtherStatsPktsTx1024Octetsto1522Octets; + u32 stat_EtherStatsPktsTx1523Octetsto9022Octets; + u32 stat_XonPauseFramesReceived; + u32 stat_XoffPauseFramesReceived; + u32 stat_OutXonSent; + u32 stat_OutXoffSent; + u32 stat_FlowControlDone; + u32 stat_MacControlFramesReceived; + u32 stat_XoffStateEntered; + u32 stat_IfInFramesL2FilterDiscards; + u32 stat_IfInRuleCheckerDiscards; + u32 stat_IfInFTQDiscards; + u32 stat_IfInMBUFDiscards; + u32 stat_IfInRuleCheckerP4Hit; + u32 stat_CatchupInRuleCheckerDiscards; + u32 stat_CatchupInFTQDiscards; + u32 stat_CatchupInMBUFDiscards; + u32 stat_CatchupInRuleCheckerP4Hit; + u32 stat_GenStat00; + u32 stat_GenStat01; + u32 stat_GenStat02; + u32 stat_GenStat03; + u32 stat_GenStat04; + u32 stat_GenStat05; + u32 stat_GenStat06; + u32 stat_GenStat07; + u32 stat_GenStat08; + u32 stat_GenStat09; + u32 stat_GenStat10; + u32 stat_GenStat11; + u32 stat_GenStat12; + u32 stat_GenStat13; + u32 stat_GenStat14; + u32 stat_GenStat15; + u32 stat_FwRxDrop; +}; + + +/* + * l2_fhdr definition + */ +struct l2_fhdr { + u32 l2_fhdr_status; + #define L2_FHDR_STATUS_RULE_CLASS (0x7<<0) + #define L2_FHDR_STATUS_RULE_P2 (1<<3) + #define L2_FHDR_STATUS_RULE_P3 (1<<4) + #define L2_FHDR_STATUS_RULE_P4 (1<<5) + #define L2_FHDR_STATUS_L2_VLAN_TAG (1<<6) + #define L2_FHDR_STATUS_L2_LLC_SNAP (1<<7) + #define L2_FHDR_STATUS_RSS_HASH (1<<8) + #define L2_FHDR_STATUS_IP_DATAGRAM (1<<13) + #define L2_FHDR_STATUS_TCP_SEGMENT (1<<14) + #define L2_FHDR_STATUS_UDP_DATAGRAM (1<<15) + + #define L2_FHDR_STATUS_SPLIT (1<<16) + #define L2_FHDR_ERRORS_BAD_CRC (1<<17) + #define L2_FHDR_ERRORS_PHY_DECODE (1<<18) + #define L2_FHDR_ERRORS_ALIGNMENT (1<<19) + #define L2_FHDR_ERRORS_TOO_SHORT (1<<20) + #define L2_FHDR_ERRORS_GIANT_FRAME (1<<21) + #define L2_FHDR_ERRORS_TCP_XSUM (1<<28) + #define L2_FHDR_ERRORS_UDP_XSUM (1<<31) + + #define L2_FHDR_STATUS_USE_RXHASH \ + (L2_FHDR_STATUS_TCP_SEGMENT | L2_FHDR_STATUS_RSS_HASH) + + u32 l2_fhdr_hash; +#if defined(__BIG_ENDIAN) + u16 l2_fhdr_pkt_len; + u16 l2_fhdr_vlan_tag; + u16 l2_fhdr_ip_xsum; + u16 l2_fhdr_tcp_udp_xsum; +#elif defined(__LITTLE_ENDIAN) + u16 l2_fhdr_vlan_tag; + u16 l2_fhdr_pkt_len; + u16 l2_fhdr_tcp_udp_xsum; + u16 l2_fhdr_ip_xsum; +#endif +}; + +#define BNX2_RX_OFFSET (sizeof(struct l2_fhdr) + 2) + +/* + * l2_context definition + */ +#define BNX2_L2CTX_TYPE 0x00000000 +#define BNX2_L2CTX_TYPE_SIZE_L2 ((0xc0/0x20)<<16) +#define BNX2_L2CTX_TYPE_TYPE (0xf<<28) +#define BNX2_L2CTX_TYPE_TYPE_EMPTY (0<<28) +#define BNX2_L2CTX_TYPE_TYPE_L2 (1<<28) + +#define BNX2_L2CTX_TX_HOST_BIDX 0x00000088 +#define BNX2_L2CTX_EST_NBD 0x00000088 +#define BNX2_L2CTX_CMD_TYPE 0x00000088 +#define BNX2_L2CTX_CMD_TYPE_TYPE (0xf<<24) +#define BNX2_L2CTX_CMD_TYPE_TYPE_L2 (0<<24) +#define BNX2_L2CTX_CMD_TYPE_TYPE_TCP (1<<24) + +#define BNX2_L2CTX_TX_HOST_BSEQ 0x00000090 +#define BNX2_L2CTX_TSCH_BSEQ 0x00000094 +#define BNX2_L2CTX_TBDR_BSEQ 0x00000098 +#define BNX2_L2CTX_TBDR_BOFF 0x0000009c +#define BNX2_L2CTX_TBDR_BIDX 0x0000009c +#define BNX2_L2CTX_TBDR_BHADDR_HI 0x000000a0 +#define BNX2_L2CTX_TBDR_BHADDR_LO 0x000000a4 +#define BNX2_L2CTX_TXP_BOFF 0x000000a8 +#define BNX2_L2CTX_TXP_BIDX 0x000000a8 +#define BNX2_L2CTX_TXP_BSEQ 0x000000ac + +#define BNX2_L2CTX_TYPE_XI 0x00000080 +#define BNX2_L2CTX_CMD_TYPE_XI 0x00000240 +#define BNX2_L2CTX_TBDR_BHADDR_HI_XI 0x00000258 +#define BNX2_L2CTX_TBDR_BHADDR_LO_XI 0x0000025c + +/* + * l2_bd_chain_context definition + */ +#define BNX2_L2CTX_BD_PRE_READ 0x00000000 +#define BNX2_L2CTX_CTX_SIZE 0x00000000 +#define BNX2_L2CTX_CTX_TYPE 0x00000000 +#define BNX2_L2CTX_FLOW_CTRL_ENABLE 0x000000ff +#define BNX2_L2CTX_CTX_TYPE_SIZE_L2 ((0x20/20)<<16) +#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE (0xf<<28) +#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_UNDEFINED (0<<28) +#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE (1<<28) + +#define BNX2_L2CTX_HOST_BDIDX 0x00000004 +#define BNX2_L2CTX_L5_STATUSB_NUM_SHIFT 16 +#define BNX2_L2CTX_L2_STATUSB_NUM_SHIFT 24 +#define BNX2_L2CTX_L5_STATUSB_NUM(sb_id) \ + (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_L5_STATUSB_NUM_SHIFT) : 0) +#define BNX2_L2CTX_L2_STATUSB_NUM(sb_id) \ + (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT) : 0) +#define BNX2_L2CTX_HOST_BSEQ 0x00000008 +#define BNX2_L2CTX_NX_BSEQ 0x0000000c +#define BNX2_L2CTX_NX_BDHADDR_HI 0x00000010 +#define BNX2_L2CTX_NX_BDHADDR_LO 0x00000014 +#define BNX2_L2CTX_NX_BDIDX 0x00000018 + +#define BNX2_L2CTX_HOST_PG_BDIDX 0x00000044 +#define BNX2_L2CTX_PG_BUF_SIZE 0x00000048 +#define BNX2_L2CTX_RBDC_KEY 0x0000004c +#define BNX2_L2CTX_RBDC_JUMBO_KEY 0x3ffe +#define BNX2_L2CTX_NX_PG_BDHADDR_HI 0x00000050 +#define BNX2_L2CTX_NX_PG_BDHADDR_LO 0x00000054 + +/* + * pci_config_l definition + * offset: 0000 + */ +#define BNX2_PCICFG_MSI_CONTROL 0x00000058 +#define BNX2_PCICFG_MSI_CONTROL_ENABLE (1L<<16) + +#define BNX2_PCICFG_MISC_CONFIG 0x00000068 +#define BNX2_PCICFG_MISC_CONFIG_TARGET_BYTE_SWAP (1L<<2) +#define BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP (1L<<3) +#define BNX2_PCICFG_MISC_CONFIG_RESERVED1 (1L<<4) +#define BNX2_PCICFG_MISC_CONFIG_CLOCK_CTL_ENA (1L<<5) +#define BNX2_PCICFG_MISC_CONFIG_TARGET_GRC_WORD_SWAP (1L<<6) +#define BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA (1L<<7) +#define BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ (1L<<8) +#define BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY (1L<<9) +#define BNX2_PCICFG_MISC_CONFIG_GRC_WIN1_SWAP_EN (1L<<10) +#define BNX2_PCICFG_MISC_CONFIG_GRC_WIN2_SWAP_EN (1L<<11) +#define BNX2_PCICFG_MISC_CONFIG_GRC_WIN3_SWAP_EN (1L<<12) +#define BNX2_PCICFG_MISC_CONFIG_ASIC_METAL_REV (0xffL<<16) +#define BNX2_PCICFG_MISC_CONFIG_ASIC_BASE_REV (0xfL<<24) +#define BNX2_PCICFG_MISC_CONFIG_ASIC_ID (0xfL<<28) + +#define BNX2_PCICFG_MISC_STATUS 0x0000006c +#define BNX2_PCICFG_MISC_STATUS_INTA_VALUE (1L<<0) +#define BNX2_PCICFG_MISC_STATUS_32BIT_DET (1L<<1) +#define BNX2_PCICFG_MISC_STATUS_M66EN (1L<<2) +#define BNX2_PCICFG_MISC_STATUS_PCIX_DET (1L<<3) +#define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED (0x3L<<4) +#define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED_66 (0L<<4) +#define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED_100 (1L<<4) +#define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED_133 (2L<<4) +#define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED_PCI_MODE (3L<<4) +#define BNX2_PCICFG_MISC_STATUS_BAD_MEM_WRITE_BE (1L<<8) + +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS 0x00000070 +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET (0xfL<<0) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ (0L<<0) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ (1L<<0) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ (2L<<0) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ (3L<<0) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ (4L<<0) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ (5L<<0) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ (6L<<0) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ (7L<<0) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW (0xfL<<0) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_DISABLE (1L<<6) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT (1L<<7) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC (0x7L<<8) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_UNDEF (0L<<8) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_12 (1L<<8) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_6 (2L<<8) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_62 (4L<<8) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_MIN_POWER (1L<<11) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED (0xfL<<12) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_100 (0L<<12) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_80 (1L<<12) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_50 (2L<<12) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_40 (4L<<12) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_25 (8L<<12) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_STOP (1L<<16) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_RESERVED_17 (1L<<17) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_RESERVED_18 (1L<<18) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_RESERVED_19 (1L<<19) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_RESERVED (0xfffL<<20) + +#define BNX2_PCICFG_REG_WINDOW_ADDRESS 0x00000078 +#define BNX2_PCICFG_REG_WINDOW_ADDRESS_VAL (0xfffffL<<2) + +#define BNX2_PCICFG_REG_WINDOW 0x00000080 +#define BNX2_PCICFG_INT_ACK_CMD 0x00000084 +#define BNX2_PCICFG_INT_ACK_CMD_INDEX (0xffffL<<0) +#define BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID (1L<<16) +#define BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM (1L<<17) +#define BNX2_PCICFG_INT_ACK_CMD_MASK_INT (1L<<18) +#define BNX2_PCICFG_INT_ACK_CMD_INTERRUPT_NUM (0xfL<<24) +#define BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT 24 + +#define BNX2_PCICFG_STATUS_BIT_SET_CMD 0x00000088 +#define BNX2_PCICFG_STATUS_BIT_CLEAR_CMD 0x0000008c +#define BNX2_PCICFG_MAILBOX_QUEUE_ADDR 0x00000090 +#define BNX2_PCICFG_MAILBOX_QUEUE_DATA 0x00000094 + +#define BNX2_PCICFG_DEVICE_CONTROL 0x000000b4 +#define BNX2_PCICFG_DEVICE_STATUS_NO_PEND ((1L<<5)<<16) + +/* + * pci_reg definition + * offset: 0x400 + */ +#define BNX2_PCI_GRC_WINDOW_ADDR 0x00000400 +#define BNX2_PCI_GRC_WINDOW_ADDR_VALUE (0x1ffL<<13) +#define BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN (1L<<31) + +#define BNX2_PCI_GRC_WINDOW2_BASE 0xc000 +#define BNX2_PCI_GRC_WINDOW3_BASE 0xe000 + +#define BNX2_PCI_CONFIG_1 0x00000404 +#define BNX2_PCI_CONFIG_1_RESERVED0 (0xffL<<0) +#define BNX2_PCI_CONFIG_1_READ_BOUNDARY (0x7L<<8) +#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_OFF (0L<<8) +#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_16 (1L<<8) +#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_32 (2L<<8) +#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_64 (3L<<8) +#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_128 (4L<<8) +#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_256 (5L<<8) +#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_512 (6L<<8) +#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_1024 (7L<<8) +#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY (0x7L<<11) +#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_OFF (0L<<11) +#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_16 (1L<<11) +#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_32 (2L<<11) +#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_64 (3L<<11) +#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_128 (4L<<11) +#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_256 (5L<<11) +#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_512 (6L<<11) +#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_1024 (7L<<11) +#define BNX2_PCI_CONFIG_1_RESERVED1 (0x3ffffL<<14) + +#define BNX2_PCI_CONFIG_2 0x00000408 +#define BNX2_PCI_CONFIG_2_BAR1_SIZE (0xfL<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_256K (3L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_512K (4L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_1M (5L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_2M (6L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_4M (7L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_8M (8L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_16M (9L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_32M (10L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_64M (11L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_128M (12L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_64ENA (1L<<4) +#define BNX2_PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5) +#define BNX2_PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6) +#define BNX2_PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_1K (1L<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_2K (2L<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_4K (3L<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_8K (4L<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_16K (5L<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_32K (6L<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_64K (7L<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_128K (8L<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_256K (9L<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_512K (10L<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_1M (11L<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_2M (12L<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_4M (13L<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_8M (14L<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_16M (15L<<8) +#define BNX2_PCI_CONFIG_2_MAX_SPLIT_LIMIT (0x1fL<<16) +#define BNX2_PCI_CONFIG_2_MAX_READ_LIMIT (0x3L<<21) +#define BNX2_PCI_CONFIG_2_MAX_READ_LIMIT_512 (0L<<21) +#define BNX2_PCI_CONFIG_2_MAX_READ_LIMIT_1K (1L<<21) +#define BNX2_PCI_CONFIG_2_MAX_READ_LIMIT_2K (2L<<21) +#define BNX2_PCI_CONFIG_2_MAX_READ_LIMIT_4K (3L<<21) +#define BNX2_PCI_CONFIG_2_FORCE_32_BIT_MSTR (1L<<23) +#define BNX2_PCI_CONFIG_2_FORCE_32_BIT_TGT (1L<<24) +#define BNX2_PCI_CONFIG_2_KEEP_REQ_ASSERT (1L<<25) +#define BNX2_PCI_CONFIG_2_RESERVED0 (0x3fL<<26) +#define BNX2_PCI_CONFIG_2_BAR_PREFETCH_XI (1L<<16) +#define BNX2_PCI_CONFIG_2_RESERVED0_XI (0x7fffL<<17) + +#define BNX2_PCI_CONFIG_3 0x0000040c +#define BNX2_PCI_CONFIG_3_STICKY_BYTE (0xffL<<0) +#define BNX2_PCI_CONFIG_3_REG_STICKY_BYTE (0xffL<<8) +#define BNX2_PCI_CONFIG_3_FORCE_PME (1L<<24) +#define BNX2_PCI_CONFIG_3_PME_STATUS (1L<<25) +#define BNX2_PCI_CONFIG_3_PME_ENABLE (1L<<26) +#define BNX2_PCI_CONFIG_3_PM_STATE (0x3L<<27) +#define BNX2_PCI_CONFIG_3_VAUX_PRESET (1L<<30) +#define BNX2_PCI_CONFIG_3_PCI_POWER (1L<<31) + +#define BNX2_PCI_PM_DATA_A 0x00000410 +#define BNX2_PCI_PM_DATA_A_PM_DATA_0_PRG (0xffL<<0) +#define BNX2_PCI_PM_DATA_A_PM_DATA_1_PRG (0xffL<<8) +#define BNX2_PCI_PM_DATA_A_PM_DATA_2_PRG (0xffL<<16) +#define BNX2_PCI_PM_DATA_A_PM_DATA_3_PRG (0xffL<<24) + +#define BNX2_PCI_PM_DATA_B 0x00000414 +#define BNX2_PCI_PM_DATA_B_PM_DATA_4_PRG (0xffL<<0) +#define BNX2_PCI_PM_DATA_B_PM_DATA_5_PRG (0xffL<<8) +#define BNX2_PCI_PM_DATA_B_PM_DATA_6_PRG (0xffL<<16) +#define BNX2_PCI_PM_DATA_B_PM_DATA_7_PRG (0xffL<<24) + +#define BNX2_PCI_SWAP_DIAG0 0x00000418 +#define BNX2_PCI_SWAP_DIAG1 0x0000041c +#define BNX2_PCI_EXP_ROM_ADDR 0x00000420 +#define BNX2_PCI_EXP_ROM_ADDR_ADDRESS (0x3fffffL<<2) +#define BNX2_PCI_EXP_ROM_ADDR_REQ (1L<<31) + +#define BNX2_PCI_EXP_ROM_DATA 0x00000424 +#define BNX2_PCI_VPD_INTF 0x00000428 +#define BNX2_PCI_VPD_INTF_INTF_REQ (1L<<0) + +#define BNX2_PCI_VPD_ADDR_FLAG 0x0000042c +#define BNX2_PCI_VPD_ADDR_FLAG_MSK 0x0000ffff +#define BNX2_PCI_VPD_ADDR_FLAG_SL 0L +#define BNX2_PCI_VPD_ADDR_FLAG_ADDRESS (0x1fffL<<2) +#define BNX2_PCI_VPD_ADDR_FLAG_WR (1L<<15) + +#define BNX2_PCI_VPD_DATA 0x00000430 +#define BNX2_PCI_ID_VAL1 0x00000434 +#define BNX2_PCI_ID_VAL1_DEVICE_ID (0xffffL<<0) +#define BNX2_PCI_ID_VAL1_VENDOR_ID (0xffffL<<16) + +#define BNX2_PCI_ID_VAL2 0x00000438 +#define BNX2_PCI_ID_VAL2_SUBSYSTEM_VENDOR_ID (0xffffL<<0) +#define BNX2_PCI_ID_VAL2_SUBSYSTEM_ID (0xffffL<<16) + +#define BNX2_PCI_ID_VAL3 0x0000043c +#define BNX2_PCI_ID_VAL3_CLASS_CODE (0xffffffL<<0) +#define BNX2_PCI_ID_VAL3_REVISION_ID (0xffL<<24) + +#define BNX2_PCI_ID_VAL4 0x00000440 +#define BNX2_PCI_ID_VAL4_CAP_ENA (0xfL<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_0 (0L<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_1 (1L<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_2 (2L<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_3 (3L<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_4 (4L<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_5 (5L<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_6 (6L<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_7 (7L<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_8 (8L<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_9 (9L<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_10 (10L<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_11 (11L<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_12 (12L<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_13 (13L<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_14 (14L<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_15 (15L<<0) +#define BNX2_PCI_ID_VAL4_RESERVED0 (0x3L<<4) +#define BNX2_PCI_ID_VAL4_PM_SCALE_PRG (0x3L<<6) +#define BNX2_PCI_ID_VAL4_PM_SCALE_PRG_0 (0L<<6) +#define BNX2_PCI_ID_VAL4_PM_SCALE_PRG_1 (1L<<6) +#define BNX2_PCI_ID_VAL4_PM_SCALE_PRG_2 (2L<<6) +#define BNX2_PCI_ID_VAL4_PM_SCALE_PRG_3 (3L<<6) +#define BNX2_PCI_ID_VAL4_MSI_PV_MASK_CAP (1L<<8) +#define BNX2_PCI_ID_VAL4_MSI_LIMIT (0x7L<<9) +#define BNX2_PCI_ID_VAL4_MULTI_MSG_CAP (0x7L<<12) +#define BNX2_PCI_ID_VAL4_MSI_ENABLE (1L<<15) +#define BNX2_PCI_ID_VAL4_MAX_64_ADVERTIZE (1L<<16) +#define BNX2_PCI_ID_VAL4_MAX_133_ADVERTIZE (1L<<17) +#define BNX2_PCI_ID_VAL4_RESERVED2 (0x7L<<18) +#define BNX2_PCI_ID_VAL4_MAX_CUMULATIVE_SIZE_B21 (0x3L<<21) +#define BNX2_PCI_ID_VAL4_MAX_SPLIT_SIZE_B21 (0x3L<<23) +#define BNX2_PCI_ID_VAL4_MAX_CUMULATIVE_SIZE_B0 (1L<<25) +#define BNX2_PCI_ID_VAL4_MAX_MEM_READ_SIZE_B10 (0x3L<<26) +#define BNX2_PCI_ID_VAL4_MAX_SPLIT_SIZE_B0 (1L<<28) +#define BNX2_PCI_ID_VAL4_RESERVED3 (0x7L<<29) +#define BNX2_PCI_ID_VAL4_RESERVED3_XI (0xffffL<<16) + +#define BNX2_PCI_ID_VAL5 0x00000444 +#define BNX2_PCI_ID_VAL5_D1_SUPPORT (1L<<0) +#define BNX2_PCI_ID_VAL5_D2_SUPPORT (1L<<1) +#define BNX2_PCI_ID_VAL5_PME_IN_D0 (1L<<2) +#define BNX2_PCI_ID_VAL5_PME_IN_D1 (1L<<3) +#define BNX2_PCI_ID_VAL5_PME_IN_D2 (1L<<4) +#define BNX2_PCI_ID_VAL5_PME_IN_D3_HOT (1L<<5) +#define BNX2_PCI_ID_VAL5_RESERVED0_TE (0x3ffffffL<<6) +#define BNX2_PCI_ID_VAL5_PM_VERSION_XI (0x7L<<6) +#define BNX2_PCI_ID_VAL5_NO_SOFT_RESET_XI (1L<<9) +#define BNX2_PCI_ID_VAL5_RESERVED0_XI (0x3fffffL<<10) + +#define BNX2_PCI_PCIX_EXTENDED_STATUS 0x00000448 +#define BNX2_PCI_PCIX_EXTENDED_STATUS_NO_SNOOP (1L<<8) +#define BNX2_PCI_PCIX_EXTENDED_STATUS_LONG_BURST (1L<<9) +#define BNX2_PCI_PCIX_EXTENDED_STATUS_SPLIT_COMP_MSG_CLASS (0xfL<<16) +#define BNX2_PCI_PCIX_EXTENDED_STATUS_SPLIT_COMP_MSG_IDX (0xffL<<24) + +#define BNX2_PCI_ID_VAL6 0x0000044c +#define BNX2_PCI_ID_VAL6_MAX_LAT (0xffL<<0) +#define BNX2_PCI_ID_VAL6_MIN_GNT (0xffL<<8) +#define BNX2_PCI_ID_VAL6_BIST (0xffL<<16) +#define BNX2_PCI_ID_VAL6_RESERVED0 (0xffL<<24) + +#define BNX2_PCI_MSI_DATA 0x00000450 +#define BNX2_PCI_MSI_DATA_MSI_DATA (0xffffL<<0) + +#define BNX2_PCI_MSI_ADDR_H 0x00000454 +#define BNX2_PCI_MSI_ADDR_L 0x00000458 +#define BNX2_PCI_MSI_ADDR_L_VAL (0x3fffffffL<<2) + +#define BNX2_PCI_CFG_ACCESS_CMD 0x0000045c +#define BNX2_PCI_CFG_ACCESS_CMD_ADR (0x3fL<<2) +#define BNX2_PCI_CFG_ACCESS_CMD_RD_REQ (1L<<27) +#define BNX2_PCI_CFG_ACCESS_CMD_WR_REQ (0xfL<<28) + +#define BNX2_PCI_CFG_ACCESS_DATA 0x00000460 +#define BNX2_PCI_MSI_MASK 0x00000464 +#define BNX2_PCI_MSI_MASK_MSI_MASK (0xffffffffL<<0) + +#define BNX2_PCI_MSI_PEND 0x00000468 +#define BNX2_PCI_MSI_PEND_MSI_PEND (0xffffffffL<<0) + +#define BNX2_PCI_PM_DATA_C 0x0000046c +#define BNX2_PCI_PM_DATA_C_PM_DATA_8_PRG (0xffL<<0) +#define BNX2_PCI_PM_DATA_C_RESERVED0 (0xffffffL<<8) + +#define BNX2_PCI_MSIX_CONTROL 0x000004c0 +#define BNX2_PCI_MSIX_CONTROL_MSIX_TBL_SIZ (0x7ffL<<0) +#define BNX2_PCI_MSIX_CONTROL_RESERVED0 (0x1fffffL<<11) + +#define BNX2_PCI_MSIX_TBL_OFF_BIR 0x000004c4 +#define BNX2_PCI_MSIX_TBL_OFF_BIR_MSIX_TBL_BIR (0x7L<<0) +#define BNX2_PCI_MSIX_TBL_OFF_BIR_MSIX_TBL_OFF (0x1fffffffL<<3) + +#define BNX2_PCI_MSIX_PBA_OFF_BIT 0x000004c8 +#define BNX2_PCI_MSIX_PBA_OFF_BIT_MSIX_PBA_BIR (0x7L<<0) +#define BNX2_PCI_MSIX_PBA_OFF_BIT_MSIX_PBA_OFF (0x1fffffffL<<3) + +#define BNX2_PCI_PCIE_CAPABILITY 0x000004d0 +#define BNX2_PCI_PCIE_CAPABILITY_INTERRUPT_MSG_NUM (0x1fL<<0) +#define BNX2_PCI_PCIE_CAPABILITY_COMPLY_PCIE_1_1 (1L<<5) + +#define BNX2_PCI_DEVICE_CAPABILITY 0x000004d4 +#define BNX2_PCI_DEVICE_CAPABILITY_MAX_PL_SIZ_SUPPORTED (0x7L<<0) +#define BNX2_PCI_DEVICE_CAPABILITY_EXTENDED_TAG_SUPPORT (1L<<5) +#define BNX2_PCI_DEVICE_CAPABILITY_L0S_ACCEPTABLE_LATENCY (0x7L<<6) +#define BNX2_PCI_DEVICE_CAPABILITY_L1_ACCEPTABLE_LATENCY (0x7L<<9) +#define BNX2_PCI_DEVICE_CAPABILITY_ROLE_BASED_ERR_RPT (1L<<15) + +#define BNX2_PCI_LINK_CAPABILITY 0x000004dc +#define BNX2_PCI_LINK_CAPABILITY_MAX_LINK_SPEED (0xfL<<0) +#define BNX2_PCI_LINK_CAPABILITY_MAX_LINK_SPEED_0001 (1L<<0) +#define BNX2_PCI_LINK_CAPABILITY_MAX_LINK_SPEED_0010 (1L<<0) +#define BNX2_PCI_LINK_CAPABILITY_MAX_LINK_WIDTH (0x1fL<<4) +#define BNX2_PCI_LINK_CAPABILITY_CLK_POWER_MGMT (1L<<9) +#define BNX2_PCI_LINK_CAPABILITY_ASPM_SUPPORT (0x3L<<10) +#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_LAT (0x7L<<12) +#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_LAT_101 (5L<<12) +#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_LAT_110 (6L<<12) +#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_LAT (0x7L<<15) +#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_LAT_001 (1L<<15) +#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_LAT_010 (2L<<15) +#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_COMM_LAT (0x7L<<18) +#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_COMM_LAT_101 (5L<<18) +#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_COMM_LAT_110 (6L<<18) +#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_COMM_LAT (0x7L<<21) +#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_COMM_LAT_001 (1L<<21) +#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_COMM_LAT_010 (2L<<21) +#define BNX2_PCI_LINK_CAPABILITY_PORT_NUM (0xffL<<24) + +#define BNX2_PCI_PCIE_DEVICE_CAPABILITY_2 0x000004e4 +#define BNX2_PCI_PCIE_DEVICE_CAPABILITY_2_CMPL_TO_RANGE_SUPP (0xfL<<0) +#define BNX2_PCI_PCIE_DEVICE_CAPABILITY_2_CMPL_TO_DISABL_SUPP (1L<<4) +#define BNX2_PCI_PCIE_DEVICE_CAPABILITY_2_RESERVED (0x7ffffffL<<5) + +#define BNX2_PCI_PCIE_LINK_CAPABILITY_2 0x000004e8 +#define BNX2_PCI_PCIE_LINK_CAPABILITY_2_RESERVED (0xffffffffL<<0) + +#define BNX2_PCI_GRC_WINDOW1_ADDR 0x00000610 +#define BNX2_PCI_GRC_WINDOW1_ADDR_VALUE (0x1ffL<<13) + +#define BNX2_PCI_GRC_WINDOW2_ADDR 0x00000614 +#define BNX2_PCI_GRC_WINDOW2_ADDR_VALUE (0x1ffL<<13) + +#define BNX2_PCI_GRC_WINDOW3_ADDR 0x00000618 +#define BNX2_PCI_GRC_WINDOW3_ADDR_VALUE (0x1ffL<<13) + +#define BNX2_MSIX_TABLE_ADDR 0x318000 +#define BNX2_MSIX_PBA_ADDR 0x31c000 + +/* + * misc_reg definition + * offset: 0x800 + */ +#define BNX2_MISC_COMMAND 0x00000800 +#define BNX2_MISC_COMMAND_ENABLE_ALL (1L<<0) +#define BNX2_MISC_COMMAND_DISABLE_ALL (1L<<1) +#define BNX2_MISC_COMMAND_SW_RESET (1L<<4) +#define BNX2_MISC_COMMAND_POR_RESET (1L<<5) +#define BNX2_MISC_COMMAND_HD_RESET (1L<<6) +#define BNX2_MISC_COMMAND_CMN_SW_RESET (1L<<7) +#define BNX2_MISC_COMMAND_PAR_ERROR (1L<<8) +#define BNX2_MISC_COMMAND_CS16_ERR (1L<<9) +#define BNX2_MISC_COMMAND_CS16_ERR_LOC (0xfL<<12) +#define BNX2_MISC_COMMAND_PAR_ERR_RAM (0x7fL<<16) +#define BNX2_MISC_COMMAND_POWERDOWN_EVENT (1L<<23) +#define BNX2_MISC_COMMAND_SW_SHUTDOWN (1L<<24) +#define BNX2_MISC_COMMAND_SHUTDOWN_EN (1L<<25) +#define BNX2_MISC_COMMAND_DINTEG_ATTN_EN (1L<<26) +#define BNX2_MISC_COMMAND_PCIE_LINK_IN_L23 (1L<<27) +#define BNX2_MISC_COMMAND_PCIE_DIS (1L<<28) + +#define BNX2_MISC_CFG 0x00000804 +#define BNX2_MISC_CFG_GRC_TMOUT (1L<<0) +#define BNX2_MISC_CFG_NVM_WR_EN (0x3L<<1) +#define BNX2_MISC_CFG_NVM_WR_EN_PROTECT (0L<<1) +#define BNX2_MISC_CFG_NVM_WR_EN_PCI (1L<<1) +#define BNX2_MISC_CFG_NVM_WR_EN_ALLOW (2L<<1) +#define BNX2_MISC_CFG_NVM_WR_EN_ALLOW2 (3L<<1) +#define BNX2_MISC_CFG_BIST_EN (1L<<3) +#define BNX2_MISC_CFG_CK25_OUT_ALT_SRC (1L<<4) +#define BNX2_MISC_CFG_RESERVED5_TE (1L<<5) +#define BNX2_MISC_CFG_RESERVED6_TE (1L<<6) +#define BNX2_MISC_CFG_CLK_CTL_OVERRIDE (1L<<7) +#define BNX2_MISC_CFG_LEDMODE (0x7L<<8) +#define BNX2_MISC_CFG_LEDMODE_MAC (0L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY1_TE (1L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY2_TE (2L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY3_TE (3L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY4_TE (4L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY5_TE (5L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY6_TE (6L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY7_TE (7L<<8) +#define BNX2_MISC_CFG_MCP_GRC_TMOUT_TE (1L<<11) +#define BNX2_MISC_CFG_DBU_GRC_TMOUT_TE (1L<<12) +#define BNX2_MISC_CFG_LEDMODE_XI (0xfL<<8) +#define BNX2_MISC_CFG_LEDMODE_MAC_XI (0L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY1_XI (1L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY2_XI (2L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY3_XI (3L<<8) +#define BNX2_MISC_CFG_LEDMODE_MAC2_XI (4L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY4_XI (5L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY5_XI (6L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY6_XI (7L<<8) +#define BNX2_MISC_CFG_LEDMODE_MAC3_XI (8L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY7_XI (9L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY8_XI (10L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY9_XI (11L<<8) +#define BNX2_MISC_CFG_LEDMODE_MAC4_XI (12L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY10_XI (13L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY11_XI (14L<<8) +#define BNX2_MISC_CFG_LEDMODE_UNUSED_XI (15L<<8) +#define BNX2_MISC_CFG_PORT_SELECT_XI (1L<<13) +#define BNX2_MISC_CFG_PARITY_MODE_XI (1L<<14) + +#define BNX2_MISC_ID 0x00000808 +#define BNX2_MISC_ID_BOND_ID (0xfL<<0) +#define BNX2_MISC_ID_BOND_ID_X (0L<<0) +#define BNX2_MISC_ID_BOND_ID_C (3L<<0) +#define BNX2_MISC_ID_BOND_ID_S (12L<<0) +#define BNX2_MISC_ID_CHIP_METAL (0xffL<<4) +#define BNX2_MISC_ID_CHIP_REV (0xfL<<12) +#define BNX2_MISC_ID_CHIP_NUM (0xffffL<<16) + +#define BNX2_MISC_ENABLE_STATUS_BITS 0x0000080c +#define BNX2_MISC_ENABLE_STATUS_BITS_TX_SCHEDULER_ENABLE (1L<<0) +#define BNX2_MISC_ENABLE_STATUS_BITS_TX_BD_READ_ENABLE (1L<<1) +#define BNX2_MISC_ENABLE_STATUS_BITS_TX_BD_CACHE_ENABLE (1L<<2) +#define BNX2_MISC_ENABLE_STATUS_BITS_TX_PROCESSOR_ENABLE (1L<<3) +#define BNX2_MISC_ENABLE_STATUS_BITS_TX_DMA_ENABLE (1L<<4) +#define BNX2_MISC_ENABLE_STATUS_BITS_TX_PATCHUP_ENABLE (1L<<5) +#define BNX2_MISC_ENABLE_STATUS_BITS_TX_PAYLOAD_Q_ENABLE (1L<<6) +#define BNX2_MISC_ENABLE_STATUS_BITS_TX_HEADER_Q_ENABLE (1L<<7) +#define BNX2_MISC_ENABLE_STATUS_BITS_TX_ASSEMBLER_ENABLE (1L<<8) +#define BNX2_MISC_ENABLE_STATUS_BITS_EMAC_ENABLE (1L<<9) +#define BNX2_MISC_ENABLE_STATUS_BITS_RX_PARSER_MAC_ENABLE (1L<<10) +#define BNX2_MISC_ENABLE_STATUS_BITS_RX_PARSER_CATCHUP_ENABLE (1L<<11) +#define BNX2_MISC_ENABLE_STATUS_BITS_RX_MBUF_ENABLE (1L<<12) +#define BNX2_MISC_ENABLE_STATUS_BITS_RX_LOOKUP_ENABLE (1L<<13) +#define BNX2_MISC_ENABLE_STATUS_BITS_RX_PROCESSOR_ENABLE (1L<<14) +#define BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE (1L<<15) +#define BNX2_MISC_ENABLE_STATUS_BITS_RX_BD_CACHE_ENABLE (1L<<16) +#define BNX2_MISC_ENABLE_STATUS_BITS_RX_DMA_ENABLE (1L<<17) +#define BNX2_MISC_ENABLE_STATUS_BITS_COMPLETION_ENABLE (1L<<18) +#define BNX2_MISC_ENABLE_STATUS_BITS_HOST_COALESCE_ENABLE (1L<<19) +#define BNX2_MISC_ENABLE_STATUS_BITS_MAILBOX_QUEUE_ENABLE (1L<<20) +#define BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE (1L<<21) +#define BNX2_MISC_ENABLE_STATUS_BITS_CMD_SCHEDULER_ENABLE (1L<<22) +#define BNX2_MISC_ENABLE_STATUS_BITS_CMD_PROCESSOR_ENABLE (1L<<23) +#define BNX2_MISC_ENABLE_STATUS_BITS_MGMT_PROCESSOR_ENABLE (1L<<24) +#define BNX2_MISC_ENABLE_STATUS_BITS_TIMER_ENABLE (1L<<25) +#define BNX2_MISC_ENABLE_STATUS_BITS_DMA_ENGINE_ENABLE (1L<<26) +#define BNX2_MISC_ENABLE_STATUS_BITS_UMP_ENABLE (1L<<27) +#define BNX2_MISC_ENABLE_STATUS_BITS_RV2P_CMD_SCHEDULER_ENABLE (1L<<28) +#define BNX2_MISC_ENABLE_STATUS_BITS_RSVD_FUTURE_ENABLE (0x7L<<29) + +#define BNX2_MISC_ENABLE_SET_BITS 0x00000810 +#define BNX2_MISC_ENABLE_SET_BITS_TX_SCHEDULER_ENABLE (1L<<0) +#define BNX2_MISC_ENABLE_SET_BITS_TX_BD_READ_ENABLE (1L<<1) +#define BNX2_MISC_ENABLE_SET_BITS_TX_BD_CACHE_ENABLE (1L<<2) +#define BNX2_MISC_ENABLE_SET_BITS_TX_PROCESSOR_ENABLE (1L<<3) +#define BNX2_MISC_ENABLE_SET_BITS_TX_DMA_ENABLE (1L<<4) +#define BNX2_MISC_ENABLE_SET_BITS_TX_PATCHUP_ENABLE (1L<<5) +#define BNX2_MISC_ENABLE_SET_BITS_TX_PAYLOAD_Q_ENABLE (1L<<6) +#define BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE (1L<<7) +#define BNX2_MISC_ENABLE_SET_BITS_TX_ASSEMBLER_ENABLE (1L<<8) +#define BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE (1L<<9) +#define BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE (1L<<10) +#define BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_CATCHUP_ENABLE (1L<<11) +#define BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE (1L<<12) +#define BNX2_MISC_ENABLE_SET_BITS_RX_LOOKUP_ENABLE (1L<<13) +#define BNX2_MISC_ENABLE_SET_BITS_RX_PROCESSOR_ENABLE (1L<<14) +#define BNX2_MISC_ENABLE_SET_BITS_RX_V2P_ENABLE (1L<<15) +#define BNX2_MISC_ENABLE_SET_BITS_RX_BD_CACHE_ENABLE (1L<<16) +#define BNX2_MISC_ENABLE_SET_BITS_RX_DMA_ENABLE (1L<<17) +#define BNX2_MISC_ENABLE_SET_BITS_COMPLETION_ENABLE (1L<<18) +#define BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE (1L<<19) +#define BNX2_MISC_ENABLE_SET_BITS_MAILBOX_QUEUE_ENABLE (1L<<20) +#define BNX2_MISC_ENABLE_SET_BITS_CONTEXT_ENABLE (1L<<21) +#define BNX2_MISC_ENABLE_SET_BITS_CMD_SCHEDULER_ENABLE (1L<<22) +#define BNX2_MISC_ENABLE_SET_BITS_CMD_PROCESSOR_ENABLE (1L<<23) +#define BNX2_MISC_ENABLE_SET_BITS_MGMT_PROCESSOR_ENABLE (1L<<24) +#define BNX2_MISC_ENABLE_SET_BITS_TIMER_ENABLE (1L<<25) +#define BNX2_MISC_ENABLE_SET_BITS_DMA_ENGINE_ENABLE (1L<<26) +#define BNX2_MISC_ENABLE_SET_BITS_UMP_ENABLE (1L<<27) +#define BNX2_MISC_ENABLE_SET_BITS_RV2P_CMD_SCHEDULER_ENABLE (1L<<28) +#define BNX2_MISC_ENABLE_SET_BITS_RSVD_FUTURE_ENABLE (0x7L<<29) + +#define BNX2_MISC_ENABLE_CLR_BITS 0x00000814 +#define BNX2_MISC_ENABLE_CLR_BITS_TX_SCHEDULER_ENABLE (1L<<0) +#define BNX2_MISC_ENABLE_CLR_BITS_TX_BD_READ_ENABLE (1L<<1) +#define BNX2_MISC_ENABLE_CLR_BITS_TX_BD_CACHE_ENABLE (1L<<2) +#define BNX2_MISC_ENABLE_CLR_BITS_TX_PROCESSOR_ENABLE (1L<<3) +#define BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE (1L<<4) +#define BNX2_MISC_ENABLE_CLR_BITS_TX_PATCHUP_ENABLE (1L<<5) +#define BNX2_MISC_ENABLE_CLR_BITS_TX_PAYLOAD_Q_ENABLE (1L<<6) +#define BNX2_MISC_ENABLE_CLR_BITS_TX_HEADER_Q_ENABLE (1L<<7) +#define BNX2_MISC_ENABLE_CLR_BITS_TX_ASSEMBLER_ENABLE (1L<<8) +#define BNX2_MISC_ENABLE_CLR_BITS_EMAC_ENABLE (1L<<9) +#define BNX2_MISC_ENABLE_CLR_BITS_RX_PARSER_MAC_ENABLE (1L<<10) +#define BNX2_MISC_ENABLE_CLR_BITS_RX_PARSER_CATCHUP_ENABLE (1L<<11) +#define BNX2_MISC_ENABLE_CLR_BITS_RX_MBUF_ENABLE (1L<<12) +#define BNX2_MISC_ENABLE_CLR_BITS_RX_LOOKUP_ENABLE (1L<<13) +#define BNX2_MISC_ENABLE_CLR_BITS_RX_PROCESSOR_ENABLE (1L<<14) +#define BNX2_MISC_ENABLE_CLR_BITS_RX_V2P_ENABLE (1L<<15) +#define BNX2_MISC_ENABLE_CLR_BITS_RX_BD_CACHE_ENABLE (1L<<16) +#define BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE (1L<<17) +#define BNX2_MISC_ENABLE_CLR_BITS_COMPLETION_ENABLE (1L<<18) +#define BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE (1L<<19) +#define BNX2_MISC_ENABLE_CLR_BITS_MAILBOX_QUEUE_ENABLE (1L<<20) +#define BNX2_MISC_ENABLE_CLR_BITS_CONTEXT_ENABLE (1L<<21) +#define BNX2_MISC_ENABLE_CLR_BITS_CMD_SCHEDULER_ENABLE (1L<<22) +#define BNX2_MISC_ENABLE_CLR_BITS_CMD_PROCESSOR_ENABLE (1L<<23) +#define BNX2_MISC_ENABLE_CLR_BITS_MGMT_PROCESSOR_ENABLE (1L<<24) +#define BNX2_MISC_ENABLE_CLR_BITS_TIMER_ENABLE (1L<<25) +#define BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE (1L<<26) +#define BNX2_MISC_ENABLE_CLR_BITS_UMP_ENABLE (1L<<27) +#define BNX2_MISC_ENABLE_CLR_BITS_RV2P_CMD_SCHEDULER_ENABLE (1L<<28) +#define BNX2_MISC_ENABLE_CLR_BITS_RSVD_FUTURE_ENABLE (0x7L<<29) + +#define BNX2_MISC_CLOCK_CONTROL_BITS 0x00000818 +#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET (0xfL<<0) +#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ (0L<<0) +#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ (1L<<0) +#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ (2L<<0) +#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ (3L<<0) +#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ (4L<<0) +#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ (5L<<0) +#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ (6L<<0) +#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ (7L<<0) +#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW (0xfL<<0) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_DISABLE (1L<<6) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT (1L<<7) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC (0x7L<<8) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_UNDEF (0L<<8) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_12 (1L<<8) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_6 (2L<<8) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_62 (4L<<8) +#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED0_XI (0x7L<<8) +#define BNX2_MISC_CLOCK_CONTROL_BITS_MIN_POWER (1L<<11) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED (0xfL<<12) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_100 (0L<<12) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_80 (1L<<12) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_50 (2L<<12) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_40 (4L<<12) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_25 (8L<<12) +#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED1_XI (0xfL<<12) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_STOP (1L<<16) +#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED_17_TE (1L<<17) +#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED_18_TE (1L<<18) +#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED_19_TE (1L<<19) +#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED_TE (0xfffL<<20) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_MGMT_XI (1L<<17) +#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED2_XI (0x3fL<<18) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_VCO_XI (0x7L<<24) +#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED3_XI (1L<<27) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_XI (0xfL<<28) + +#define BNX2_MISC_SPIO 0x0000081c +#define BNX2_MISC_SPIO_VALUE (0xffL<<0) +#define BNX2_MISC_SPIO_SET (0xffL<<8) +#define BNX2_MISC_SPIO_CLR (0xffL<<16) +#define BNX2_MISC_SPIO_FLOAT (0xffL<<24) + +#define BNX2_MISC_SPIO_INT 0x00000820 +#define BNX2_MISC_SPIO_INT_INT_STATE_TE (0xfL<<0) +#define BNX2_MISC_SPIO_INT_OLD_VALUE_TE (0xfL<<8) +#define BNX2_MISC_SPIO_INT_OLD_SET_TE (0xfL<<16) +#define BNX2_MISC_SPIO_INT_OLD_CLR_TE (0xfL<<24) +#define BNX2_MISC_SPIO_INT_INT_STATE_XI (0xffL<<0) +#define BNX2_MISC_SPIO_INT_OLD_VALUE_XI (0xffL<<8) +#define BNX2_MISC_SPIO_INT_OLD_SET_XI (0xffL<<16) +#define BNX2_MISC_SPIO_INT_OLD_CLR_XI (0xffL<<24) + +#define BNX2_MISC_CONFIG_LFSR 0x00000824 +#define BNX2_MISC_CONFIG_LFSR_DIV (0xffffL<<0) + +#define BNX2_MISC_LFSR_MASK_BITS 0x00000828 +#define BNX2_MISC_LFSR_MASK_BITS_TX_SCHEDULER_ENABLE (1L<<0) +#define BNX2_MISC_LFSR_MASK_BITS_TX_BD_READ_ENABLE (1L<<1) +#define BNX2_MISC_LFSR_MASK_BITS_TX_BD_CACHE_ENABLE (1L<<2) +#define BNX2_MISC_LFSR_MASK_BITS_TX_PROCESSOR_ENABLE (1L<<3) +#define BNX2_MISC_LFSR_MASK_BITS_TX_DMA_ENABLE (1L<<4) +#define BNX2_MISC_LFSR_MASK_BITS_TX_PATCHUP_ENABLE (1L<<5) +#define BNX2_MISC_LFSR_MASK_BITS_TX_PAYLOAD_Q_ENABLE (1L<<6) +#define BNX2_MISC_LFSR_MASK_BITS_TX_HEADER_Q_ENABLE (1L<<7) +#define BNX2_MISC_LFSR_MASK_BITS_TX_ASSEMBLER_ENABLE (1L<<8) +#define BNX2_MISC_LFSR_MASK_BITS_EMAC_ENABLE (1L<<9) +#define BNX2_MISC_LFSR_MASK_BITS_RX_PARSER_MAC_ENABLE (1L<<10) +#define BNX2_MISC_LFSR_MASK_BITS_RX_PARSER_CATCHUP_ENABLE (1L<<11) +#define BNX2_MISC_LFSR_MASK_BITS_RX_MBUF_ENABLE (1L<<12) +#define BNX2_MISC_LFSR_MASK_BITS_RX_LOOKUP_ENABLE (1L<<13) +#define BNX2_MISC_LFSR_MASK_BITS_RX_PROCESSOR_ENABLE (1L<<14) +#define BNX2_MISC_LFSR_MASK_BITS_RX_V2P_ENABLE (1L<<15) +#define BNX2_MISC_LFSR_MASK_BITS_RX_BD_CACHE_ENABLE (1L<<16) +#define BNX2_MISC_LFSR_MASK_BITS_RX_DMA_ENABLE (1L<<17) +#define BNX2_MISC_LFSR_MASK_BITS_COMPLETION_ENABLE (1L<<18) +#define BNX2_MISC_LFSR_MASK_BITS_HOST_COALESCE_ENABLE (1L<<19) +#define BNX2_MISC_LFSR_MASK_BITS_MAILBOX_QUEUE_ENABLE (1L<<20) +#define BNX2_MISC_LFSR_MASK_BITS_CONTEXT_ENABLE (1L<<21) +#define BNX2_MISC_LFSR_MASK_BITS_CMD_SCHEDULER_ENABLE (1L<<22) +#define BNX2_MISC_LFSR_MASK_BITS_CMD_PROCESSOR_ENABLE (1L<<23) +#define BNX2_MISC_LFSR_MASK_BITS_MGMT_PROCESSOR_ENABLE (1L<<24) +#define BNX2_MISC_LFSR_MASK_BITS_TIMER_ENABLE (1L<<25) +#define BNX2_MISC_LFSR_MASK_BITS_DMA_ENGINE_ENABLE (1L<<26) +#define BNX2_MISC_LFSR_MASK_BITS_UMP_ENABLE (1L<<27) +#define BNX2_MISC_LFSR_MASK_BITS_RV2P_CMD_SCHEDULER_ENABLE (1L<<28) +#define BNX2_MISC_LFSR_MASK_BITS_RSVD_FUTURE_ENABLE (0x7L<<29) + +#define BNX2_MISC_ARB_REQ0 0x0000082c +#define BNX2_MISC_ARB_REQ1 0x00000830 +#define BNX2_MISC_ARB_REQ2 0x00000834 +#define BNX2_MISC_ARB_REQ3 0x00000838 +#define BNX2_MISC_ARB_REQ4 0x0000083c +#define BNX2_MISC_ARB_FREE0 0x00000840 +#define BNX2_MISC_ARB_FREE1 0x00000844 +#define BNX2_MISC_ARB_FREE2 0x00000848 +#define BNX2_MISC_ARB_FREE3 0x0000084c +#define BNX2_MISC_ARB_FREE4 0x00000850 +#define BNX2_MISC_ARB_REQ_STATUS0 0x00000854 +#define BNX2_MISC_ARB_REQ_STATUS1 0x00000858 +#define BNX2_MISC_ARB_REQ_STATUS2 0x0000085c +#define BNX2_MISC_ARB_REQ_STATUS3 0x00000860 +#define BNX2_MISC_ARB_REQ_STATUS4 0x00000864 +#define BNX2_MISC_ARB_GNT0 0x00000868 +#define BNX2_MISC_ARB_GNT0_0 (0x7L<<0) +#define BNX2_MISC_ARB_GNT0_1 (0x7L<<4) +#define BNX2_MISC_ARB_GNT0_2 (0x7L<<8) +#define BNX2_MISC_ARB_GNT0_3 (0x7L<<12) +#define BNX2_MISC_ARB_GNT0_4 (0x7L<<16) +#define BNX2_MISC_ARB_GNT0_5 (0x7L<<20) +#define BNX2_MISC_ARB_GNT0_6 (0x7L<<24) +#define BNX2_MISC_ARB_GNT0_7 (0x7L<<28) + +#define BNX2_MISC_ARB_GNT1 0x0000086c +#define BNX2_MISC_ARB_GNT1_8 (0x7L<<0) +#define BNX2_MISC_ARB_GNT1_9 (0x7L<<4) +#define BNX2_MISC_ARB_GNT1_10 (0x7L<<8) +#define BNX2_MISC_ARB_GNT1_11 (0x7L<<12) +#define BNX2_MISC_ARB_GNT1_12 (0x7L<<16) +#define BNX2_MISC_ARB_GNT1_13 (0x7L<<20) +#define BNX2_MISC_ARB_GNT1_14 (0x7L<<24) +#define BNX2_MISC_ARB_GNT1_15 (0x7L<<28) + +#define BNX2_MISC_ARB_GNT2 0x00000870 +#define BNX2_MISC_ARB_GNT2_16 (0x7L<<0) +#define BNX2_MISC_ARB_GNT2_17 (0x7L<<4) +#define BNX2_MISC_ARB_GNT2_18 (0x7L<<8) +#define BNX2_MISC_ARB_GNT2_19 (0x7L<<12) +#define BNX2_MISC_ARB_GNT2_20 (0x7L<<16) +#define BNX2_MISC_ARB_GNT2_21 (0x7L<<20) +#define BNX2_MISC_ARB_GNT2_22 (0x7L<<24) +#define BNX2_MISC_ARB_GNT2_23 (0x7L<<28) + +#define BNX2_MISC_ARB_GNT3 0x00000874 +#define BNX2_MISC_ARB_GNT3_24 (0x7L<<0) +#define BNX2_MISC_ARB_GNT3_25 (0x7L<<4) +#define BNX2_MISC_ARB_GNT3_26 (0x7L<<8) +#define BNX2_MISC_ARB_GNT3_27 (0x7L<<12) +#define BNX2_MISC_ARB_GNT3_28 (0x7L<<16) +#define BNX2_MISC_ARB_GNT3_29 (0x7L<<20) +#define BNX2_MISC_ARB_GNT3_30 (0x7L<<24) +#define BNX2_MISC_ARB_GNT3_31 (0x7L<<28) + +#define BNX2_MISC_RESERVED1 0x00000878 +#define BNX2_MISC_RESERVED1_MISC_RESERVED1_VALUE (0x3fL<<0) + +#define BNX2_MISC_RESERVED2 0x0000087c +#define BNX2_MISC_RESERVED2_PCIE_DIS (1L<<0) +#define BNX2_MISC_RESERVED2_LINK_IN_L23 (1L<<1) + +#define BNX2_MISC_SM_ASF_CONTROL 0x00000880 +#define BNX2_MISC_SM_ASF_CONTROL_ASF_RST (1L<<0) +#define BNX2_MISC_SM_ASF_CONTROL_TSC_EN (1L<<1) +#define BNX2_MISC_SM_ASF_CONTROL_WG_TO (1L<<2) +#define BNX2_MISC_SM_ASF_CONTROL_HB_TO (1L<<3) +#define BNX2_MISC_SM_ASF_CONTROL_PA_TO (1L<<4) +#define BNX2_MISC_SM_ASF_CONTROL_PL_TO (1L<<5) +#define BNX2_MISC_SM_ASF_CONTROL_RT_TO (1L<<6) +#define BNX2_MISC_SM_ASF_CONTROL_SMB_EVENT (1L<<7) +#define BNX2_MISC_SM_ASF_CONTROL_STRETCH_EN (1L<<8) +#define BNX2_MISC_SM_ASF_CONTROL_STRETCH_PULSE (1L<<9) +#define BNX2_MISC_SM_ASF_CONTROL_RES (0x3L<<10) +#define BNX2_MISC_SM_ASF_CONTROL_SMB_EN (1L<<12) +#define BNX2_MISC_SM_ASF_CONTROL_SMB_BB_EN (1L<<13) +#define BNX2_MISC_SM_ASF_CONTROL_SMB_NO_ADDR_FILT (1L<<14) +#define BNX2_MISC_SM_ASF_CONTROL_SMB_AUTOREAD (1L<<15) +#define BNX2_MISC_SM_ASF_CONTROL_NIC_SMB_ADDR1 (0x7fL<<16) +#define BNX2_MISC_SM_ASF_CONTROL_NIC_SMB_ADDR2 (0x7fL<<23) +#define BNX2_MISC_SM_ASF_CONTROL_EN_NIC_SMB_ADDR_0 (1L<<30) +#define BNX2_MISC_SM_ASF_CONTROL_SMB_EARLY_ATTN (1L<<31) + +#define BNX2_MISC_SMB_IN 0x00000884 +#define BNX2_MISC_SMB_IN_DAT_IN (0xffL<<0) +#define BNX2_MISC_SMB_IN_RDY (1L<<8) +#define BNX2_MISC_SMB_IN_DONE (1L<<9) +#define BNX2_MISC_SMB_IN_FIRSTBYTE (1L<<10) +#define BNX2_MISC_SMB_IN_STATUS (0x7L<<11) +#define BNX2_MISC_SMB_IN_STATUS_OK (0x0L<<11) +#define BNX2_MISC_SMB_IN_STATUS_PEC (0x1L<<11) +#define BNX2_MISC_SMB_IN_STATUS_OFLOW (0x2L<<11) +#define BNX2_MISC_SMB_IN_STATUS_STOP (0x3L<<11) +#define BNX2_MISC_SMB_IN_STATUS_TIMEOUT (0x4L<<11) + +#define BNX2_MISC_SMB_OUT 0x00000888 +#define BNX2_MISC_SMB_OUT_DAT_OUT (0xffL<<0) +#define BNX2_MISC_SMB_OUT_RDY (1L<<8) +#define BNX2_MISC_SMB_OUT_START (1L<<9) +#define BNX2_MISC_SMB_OUT_LAST (1L<<10) +#define BNX2_MISC_SMB_OUT_ACC_TYPE (1L<<11) +#define BNX2_MISC_SMB_OUT_ENB_PEC (1L<<12) +#define BNX2_MISC_SMB_OUT_GET_RX_LEN (1L<<13) +#define BNX2_MISC_SMB_OUT_SMB_READ_LEN (0x3fL<<14) +#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS (0xfL<<20) +#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_OK (0L<<20) +#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_FIRST_NACK (1L<<20) +#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_UFLOW (2L<<20) +#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_STOP (3L<<20) +#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_TIMEOUT (4L<<20) +#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_FIRST_LOST (5L<<20) +#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_BADACK (6L<<20) +#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_SUB_NACK (9L<<20) +#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_SUB_LOST (0xdL<<20) +#define BNX2_MISC_SMB_OUT_SMB_OUT_SLAVEMODE (1L<<24) +#define BNX2_MISC_SMB_OUT_SMB_OUT_DAT_EN (1L<<25) +#define BNX2_MISC_SMB_OUT_SMB_OUT_DAT_IN (1L<<26) +#define BNX2_MISC_SMB_OUT_SMB_OUT_CLK_EN (1L<<27) +#define BNX2_MISC_SMB_OUT_SMB_OUT_CLK_IN (1L<<28) + +#define BNX2_MISC_SMB_WATCHDOG 0x0000088c +#define BNX2_MISC_SMB_WATCHDOG_WATCHDOG (0xffffL<<0) + +#define BNX2_MISC_SMB_HEARTBEAT 0x00000890 +#define BNX2_MISC_SMB_HEARTBEAT_HEARTBEAT (0xffffL<<0) + +#define BNX2_MISC_SMB_POLL_ASF 0x00000894 +#define BNX2_MISC_SMB_POLL_ASF_POLL_ASF (0xffffL<<0) + +#define BNX2_MISC_SMB_POLL_LEGACY 0x00000898 +#define BNX2_MISC_SMB_POLL_LEGACY_POLL_LEGACY (0xffffL<<0) + +#define BNX2_MISC_SMB_RETRAN 0x0000089c +#define BNX2_MISC_SMB_RETRAN_RETRAN (0xffL<<0) + +#define BNX2_MISC_SMB_TIMESTAMP 0x000008a0 +#define BNX2_MISC_SMB_TIMESTAMP_TIMESTAMP (0xffffffffL<<0) + +#define BNX2_MISC_PERR_ENA0 0x000008a4 +#define BNX2_MISC_PERR_ENA0_COM_MISC_CTXC (1L<<0) +#define BNX2_MISC_PERR_ENA0_COM_MISC_REGF (1L<<1) +#define BNX2_MISC_PERR_ENA0_COM_MISC_SCPAD (1L<<2) +#define BNX2_MISC_PERR_ENA0_CP_MISC_CTXC (1L<<3) +#define BNX2_MISC_PERR_ENA0_CP_MISC_REGF (1L<<4) +#define BNX2_MISC_PERR_ENA0_CP_MISC_SCPAD (1L<<5) +#define BNX2_MISC_PERR_ENA0_CS_MISC_TMEM (1L<<6) +#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM0 (1L<<7) +#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM1 (1L<<8) +#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM2 (1L<<9) +#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM3 (1L<<10) +#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM4 (1L<<11) +#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM5 (1L<<12) +#define BNX2_MISC_PERR_ENA0_CTX_MISC_PGTBL (1L<<13) +#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DR0 (1L<<14) +#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DR1 (1L<<15) +#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DR2 (1L<<16) +#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DR3 (1L<<17) +#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DR4 (1L<<18) +#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DW0 (1L<<19) +#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DW1 (1L<<20) +#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DW2 (1L<<21) +#define BNX2_MISC_PERR_ENA0_HC_MISC_DMA (1L<<22) +#define BNX2_MISC_PERR_ENA0_MCP_MISC_REGF (1L<<23) +#define BNX2_MISC_PERR_ENA0_MCP_MISC_SCPAD (1L<<24) +#define BNX2_MISC_PERR_ENA0_MQ_MISC_CTX (1L<<25) +#define BNX2_MISC_PERR_ENA0_RBDC_MISC (1L<<26) +#define BNX2_MISC_PERR_ENA0_RBUF_MISC_MB (1L<<27) +#define BNX2_MISC_PERR_ENA0_RBUF_MISC_PTR (1L<<28) +#define BNX2_MISC_PERR_ENA0_RDE_MISC_RPC (1L<<29) +#define BNX2_MISC_PERR_ENA0_RDE_MISC_RPM (1L<<30) +#define BNX2_MISC_PERR_ENA0_RV2P_MISC_CB0REGS (1L<<31) +#define BNX2_MISC_PERR_ENA0_COM_DMAE_PERR_EN_XI (1L<<0) +#define BNX2_MISC_PERR_ENA0_CP_DMAE_PERR_EN_XI (1L<<1) +#define BNX2_MISC_PERR_ENA0_RPM_ACPIBEMEM_PERR_EN_XI (1L<<2) +#define BNX2_MISC_PERR_ENA0_CTX_USAGE_CNT_PERR_EN_XI (1L<<3) +#define BNX2_MISC_PERR_ENA0_CTX_PGTBL_PERR_EN_XI (1L<<4) +#define BNX2_MISC_PERR_ENA0_CTX_CACHE_PERR_EN_XI (1L<<5) +#define BNX2_MISC_PERR_ENA0_CTX_MIRROR_PERR_EN_XI (1L<<6) +#define BNX2_MISC_PERR_ENA0_COM_CTXC_PERR_EN_XI (1L<<7) +#define BNX2_MISC_PERR_ENA0_COM_SCPAD_PERR_EN_XI (1L<<8) +#define BNX2_MISC_PERR_ENA0_CP_CTXC_PERR_EN_XI (1L<<9) +#define BNX2_MISC_PERR_ENA0_CP_SCPAD_PERR_EN_XI (1L<<10) +#define BNX2_MISC_PERR_ENA0_RXP_RBUFC_PERR_EN_XI (1L<<11) +#define BNX2_MISC_PERR_ENA0_RXP_CTXC_PERR_EN_XI (1L<<12) +#define BNX2_MISC_PERR_ENA0_RXP_SCPAD_PERR_EN_XI (1L<<13) +#define BNX2_MISC_PERR_ENA0_TPAT_SCPAD_PERR_EN_XI (1L<<14) +#define BNX2_MISC_PERR_ENA0_TXP_CTXC_PERR_EN_XI (1L<<15) +#define BNX2_MISC_PERR_ENA0_TXP_SCPAD_PERR_EN_XI (1L<<16) +#define BNX2_MISC_PERR_ENA0_CS_TMEM_PERR_EN_XI (1L<<17) +#define BNX2_MISC_PERR_ENA0_MQ_CTX_PERR_EN_XI (1L<<18) +#define BNX2_MISC_PERR_ENA0_RPM_DFIFOMEM_PERR_EN_XI (1L<<19) +#define BNX2_MISC_PERR_ENA0_RPC_DFIFOMEM_PERR_EN_XI (1L<<20) +#define BNX2_MISC_PERR_ENA0_RBUF_PTRMEM_PERR_EN_XI (1L<<21) +#define BNX2_MISC_PERR_ENA0_RBUF_DATAMEM_PERR_EN_XI (1L<<22) +#define BNX2_MISC_PERR_ENA0_RV2P_P2IRAM_PERR_EN_XI (1L<<23) +#define BNX2_MISC_PERR_ENA0_RV2P_P1IRAM_PERR_EN_XI (1L<<24) +#define BNX2_MISC_PERR_ENA0_RV2P_CB1REGS_PERR_EN_XI (1L<<25) +#define BNX2_MISC_PERR_ENA0_RV2P_CB0REGS_PERR_EN_XI (1L<<26) +#define BNX2_MISC_PERR_ENA0_TPBUF_PERR_EN_XI (1L<<27) +#define BNX2_MISC_PERR_ENA0_THBUF_PERR_EN_XI (1L<<28) +#define BNX2_MISC_PERR_ENA0_TDMA_PERR_EN_XI (1L<<29) +#define BNX2_MISC_PERR_ENA0_TBDC_PERR_EN_XI (1L<<30) +#define BNX2_MISC_PERR_ENA0_TSCH_LR_PERR_EN_XI (1L<<31) + +#define BNX2_MISC_PERR_ENA1 0x000008a8 +#define BNX2_MISC_PERR_ENA1_RV2P_MISC_CB1REGS (1L<<0) +#define BNX2_MISC_PERR_ENA1_RV2P_MISC_P1IRAM (1L<<1) +#define BNX2_MISC_PERR_ENA1_RV2P_MISC_P2IRAM (1L<<2) +#define BNX2_MISC_PERR_ENA1_RXP_MISC_CTXC (1L<<3) +#define BNX2_MISC_PERR_ENA1_RXP_MISC_REGF (1L<<4) +#define BNX2_MISC_PERR_ENA1_RXP_MISC_SCPAD (1L<<5) +#define BNX2_MISC_PERR_ENA1_RXP_MISC_RBUFC (1L<<6) +#define BNX2_MISC_PERR_ENA1_TBDC_MISC (1L<<7) +#define BNX2_MISC_PERR_ENA1_TDMA_MISC (1L<<8) +#define BNX2_MISC_PERR_ENA1_THBUF_MISC_MB0 (1L<<9) +#define BNX2_MISC_PERR_ENA1_THBUF_MISC_MB1 (1L<<10) +#define BNX2_MISC_PERR_ENA1_TPAT_MISC_REGF (1L<<11) +#define BNX2_MISC_PERR_ENA1_TPAT_MISC_SCPAD (1L<<12) +#define BNX2_MISC_PERR_ENA1_TPBUF_MISC_MB (1L<<13) +#define BNX2_MISC_PERR_ENA1_TSCH_MISC_LR (1L<<14) +#define BNX2_MISC_PERR_ENA1_TXP_MISC_CTXC (1L<<15) +#define BNX2_MISC_PERR_ENA1_TXP_MISC_REGF (1L<<16) +#define BNX2_MISC_PERR_ENA1_TXP_MISC_SCPAD (1L<<17) +#define BNX2_MISC_PERR_ENA1_UMP_MISC_FIORX (1L<<18) +#define BNX2_MISC_PERR_ENA1_UMP_MISC_FIOTX (1L<<19) +#define BNX2_MISC_PERR_ENA1_UMP_MISC_RX (1L<<20) +#define BNX2_MISC_PERR_ENA1_UMP_MISC_TX (1L<<21) +#define BNX2_MISC_PERR_ENA1_RDMAQ_MISC (1L<<22) +#define BNX2_MISC_PERR_ENA1_CSQ_MISC (1L<<23) +#define BNX2_MISC_PERR_ENA1_CPQ_MISC (1L<<24) +#define BNX2_MISC_PERR_ENA1_MCPQ_MISC (1L<<25) +#define BNX2_MISC_PERR_ENA1_RV2PMQ_MISC (1L<<26) +#define BNX2_MISC_PERR_ENA1_RV2PPQ_MISC (1L<<27) +#define BNX2_MISC_PERR_ENA1_RV2PTQ_MISC (1L<<28) +#define BNX2_MISC_PERR_ENA1_RXPQ_MISC (1L<<29) +#define BNX2_MISC_PERR_ENA1_RXPCQ_MISC (1L<<30) +#define BNX2_MISC_PERR_ENA1_RLUPQ_MISC (1L<<31) +#define BNX2_MISC_PERR_ENA1_RBDC_PERR_EN_XI (1L<<0) +#define BNX2_MISC_PERR_ENA1_RDMA_DFIFO_PERR_EN_XI (1L<<2) +#define BNX2_MISC_PERR_ENA1_HC_STATS_PERR_EN_XI (1L<<3) +#define BNX2_MISC_PERR_ENA1_HC_MSIX_PERR_EN_XI (1L<<4) +#define BNX2_MISC_PERR_ENA1_HC_PRODUCSTB_PERR_EN_XI (1L<<5) +#define BNX2_MISC_PERR_ENA1_HC_CONSUMSTB_PERR_EN_XI (1L<<6) +#define BNX2_MISC_PERR_ENA1_TPATQ_PERR_EN_XI (1L<<7) +#define BNX2_MISC_PERR_ENA1_MCPQ_PERR_EN_XI (1L<<8) +#define BNX2_MISC_PERR_ENA1_TDMAQ_PERR_EN_XI (1L<<9) +#define BNX2_MISC_PERR_ENA1_TXPQ_PERR_EN_XI (1L<<10) +#define BNX2_MISC_PERR_ENA1_COMTQ_PERR_EN_XI (1L<<11) +#define BNX2_MISC_PERR_ENA1_COMQ_PERR_EN_XI (1L<<12) +#define BNX2_MISC_PERR_ENA1_RLUPQ_PERR_EN_XI (1L<<13) +#define BNX2_MISC_PERR_ENA1_RXPQ_PERR_EN_XI (1L<<14) +#define BNX2_MISC_PERR_ENA1_RV2PPQ_PERR_EN_XI (1L<<15) +#define BNX2_MISC_PERR_ENA1_RDMAQ_PERR_EN_XI (1L<<16) +#define BNX2_MISC_PERR_ENA1_TASQ_PERR_EN_XI (1L<<17) +#define BNX2_MISC_PERR_ENA1_TBDRQ_PERR_EN_XI (1L<<18) +#define BNX2_MISC_PERR_ENA1_TSCHQ_PERR_EN_XI (1L<<19) +#define BNX2_MISC_PERR_ENA1_COMXQ_PERR_EN_XI (1L<<20) +#define BNX2_MISC_PERR_ENA1_RXPCQ_PERR_EN_XI (1L<<21) +#define BNX2_MISC_PERR_ENA1_RV2PTQ_PERR_EN_XI (1L<<22) +#define BNX2_MISC_PERR_ENA1_RV2PMQ_PERR_EN_XI (1L<<23) +#define BNX2_MISC_PERR_ENA1_CPQ_PERR_EN_XI (1L<<24) +#define BNX2_MISC_PERR_ENA1_CSQ_PERR_EN_XI (1L<<25) +#define BNX2_MISC_PERR_ENA1_RLUP_CID_PERR_EN_XI (1L<<26) +#define BNX2_MISC_PERR_ENA1_RV2PCS_TMEM_PERR_EN_XI (1L<<27) +#define BNX2_MISC_PERR_ENA1_RV2PCSQ_PERR_EN_XI (1L<<28) +#define BNX2_MISC_PERR_ENA1_MQ_IDX_PERR_EN_XI (1L<<29) + +#define BNX2_MISC_PERR_ENA2 0x000008ac +#define BNX2_MISC_PERR_ENA2_COMQ_MISC (1L<<0) +#define BNX2_MISC_PERR_ENA2_COMXQ_MISC (1L<<1) +#define BNX2_MISC_PERR_ENA2_COMTQ_MISC (1L<<2) +#define BNX2_MISC_PERR_ENA2_TSCHQ_MISC (1L<<3) +#define BNX2_MISC_PERR_ENA2_TBDRQ_MISC (1L<<4) +#define BNX2_MISC_PERR_ENA2_TXPQ_MISC (1L<<5) +#define BNX2_MISC_PERR_ENA2_TDMAQ_MISC (1L<<6) +#define BNX2_MISC_PERR_ENA2_TPATQ_MISC (1L<<7) +#define BNX2_MISC_PERR_ENA2_TASQ_MISC (1L<<8) +#define BNX2_MISC_PERR_ENA2_TGT_FIFO_PERR_EN_XI (1L<<0) +#define BNX2_MISC_PERR_ENA2_UMP_TX_PERR_EN_XI (1L<<1) +#define BNX2_MISC_PERR_ENA2_UMP_RX_PERR_EN_XI (1L<<2) +#define BNX2_MISC_PERR_ENA2_MCP_ROM_PERR_EN_XI (1L<<3) +#define BNX2_MISC_PERR_ENA2_MCP_SCPAD_PERR_EN_XI (1L<<4) +#define BNX2_MISC_PERR_ENA2_HB_MEM_PERR_EN_XI (1L<<5) +#define BNX2_MISC_PERR_ENA2_PCIE_REPLAY_PERR_EN_XI (1L<<6) + +#define BNX2_MISC_DEBUG_VECTOR_SEL 0x000008b0 +#define BNX2_MISC_DEBUG_VECTOR_SEL_0 (0xfffL<<0) +#define BNX2_MISC_DEBUG_VECTOR_SEL_1 (0xfffL<<12) +#define BNX2_MISC_DEBUG_VECTOR_SEL_1_XI (0xfffL<<15) + +#define BNX2_MISC_VREG_CONTROL 0x000008b4 +#define BNX2_MISC_VREG_CONTROL_1_2 (0xfL<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_XI (0xfL<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS14_XI (0L<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS12_XI (1L<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS10_XI (2L<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS8_XI (3L<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS6_XI (4L<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS4_XI (5L<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS2_XI (6L<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_NOM_XI (7L<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS2_XI (8L<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS4_XI (9L<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS6_XI (10L<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS8_XI (11L<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS10_XI (12L<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS12_XI (13L<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS14_XI (14L<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS16_XI (15L<<0) +#define BNX2_MISC_VREG_CONTROL_2_5 (0xfL<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_PLUS14 (0L<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_PLUS12 (1L<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_PLUS10 (2L<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_PLUS8 (3L<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_PLUS6 (4L<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_PLUS4 (5L<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_PLUS2 (6L<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_NOM (7L<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_MINUS2 (8L<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_MINUS4 (9L<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_MINUS6 (10L<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_MINUS8 (11L<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_MINUS10 (12L<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_MINUS12 (13L<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_MINUS14 (14L<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_MINUS16 (15L<<4) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT (0xfL<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS14 (0L<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS12 (1L<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS10 (2L<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS8 (3L<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS6 (4L<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS4 (5L<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS2 (6L<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_NOM (7L<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS2 (8L<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS4 (9L<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS6 (10L<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS8 (11L<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS10 (12L<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS12 (13L<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS14 (14L<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS16 (15L<<8) + +#define BNX2_MISC_FINAL_CLK_CTL_VAL 0x000008b8 +#define BNX2_MISC_FINAL_CLK_CTL_VAL_MISC_FINAL_CLK_CTL_VAL (0x3ffffffL<<6) + +#define BNX2_MISC_GP_HW_CTL0 0x000008bc +#define BNX2_MISC_GP_HW_CTL0_TX_DRIVE (1L<<0) +#define BNX2_MISC_GP_HW_CTL0_RMII_MODE (1L<<1) +#define BNX2_MISC_GP_HW_CTL0_RMII_CRSDV_SEL (1L<<2) +#define BNX2_MISC_GP_HW_CTL0_RVMII_MODE (1L<<3) +#define BNX2_MISC_GP_HW_CTL0_FLASH_SAMP_SCLK_NEGEDGE_TE (1L<<4) +#define BNX2_MISC_GP_HW_CTL0_HIDDEN_REVISION_ID_TE (1L<<5) +#define BNX2_MISC_GP_HW_CTL0_HC_CNTL_TMOUT_CTR_RST_TE (1L<<6) +#define BNX2_MISC_GP_HW_CTL0_RESERVED1_XI (0x7L<<4) +#define BNX2_MISC_GP_HW_CTL0_ENA_CORE_RST_ON_MAIN_PWR_GOING_AWAY (1L<<7) +#define BNX2_MISC_GP_HW_CTL0_ENA_SEL_VAUX_B_IN_L2_TE (1L<<8) +#define BNX2_MISC_GP_HW_CTL0_GRC_BNK_FREE_FIX_TE (1L<<9) +#define BNX2_MISC_GP_HW_CTL0_LED_ACT_SEL_TE (1L<<10) +#define BNX2_MISC_GP_HW_CTL0_RESERVED2_XI (0x7L<<8) +#define BNX2_MISC_GP_HW_CTL0_UP1_DEF0 (1L<<11) +#define BNX2_MISC_GP_HW_CTL0_FIBER_MODE_DIS_DEF (1L<<12) +#define BNX2_MISC_GP_HW_CTL0_FORCE2500_DEF (1L<<13) +#define BNX2_MISC_GP_HW_CTL0_AUTODETECT_DIS_DEF (1L<<14) +#define BNX2_MISC_GP_HW_CTL0_PARALLEL_DETECT_DEF (1L<<15) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI (0xfL<<16) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_3MA (0L<<16) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_2P5MA (1L<<16) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_2P0MA (3L<<16) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_1P5MA (5L<<16) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_1P0MA (7L<<16) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_PWRDN (15L<<16) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PRE2DIS (1L<<20) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PRE1DIS (1L<<21) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT (0x3L<<22) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT_M6P (0L<<22) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT_M0P (1L<<22) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT_P0P (2L<<22) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT_P6P (3L<<22) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT (0x3L<<24) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT_M6P (0L<<24) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT_M0P (1L<<24) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT_P0P (2L<<24) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT_P6P (3L<<24) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ (0x3L<<26) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ_240UA (0L<<26) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ_160UA (1L<<26) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ_400UA (2L<<26) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ_320UA (3L<<26) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ (0x3L<<28) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ_240UA (0L<<28) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ_160UA (1L<<28) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ_400UA (2L<<28) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ_320UA (3L<<28) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ (0x3L<<30) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ_1P57 (0L<<30) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ_1P45 (1L<<30) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ_1P62 (2L<<30) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ_1P66 (3L<<30) + +#define BNX2_MISC_GP_HW_CTL1 0x000008c0 +#define BNX2_MISC_GP_HW_CTL1_1_ATTN_BTN_PRSNT_TE (1L<<0) +#define BNX2_MISC_GP_HW_CTL1_1_ATTN_IND_PRSNT_TE (1L<<1) +#define BNX2_MISC_GP_HW_CTL1_1_PWR_IND_PRSNT_TE (1L<<2) +#define BNX2_MISC_GP_HW_CTL1_0_PCIE_LOOPBACK_TE (1L<<3) +#define BNX2_MISC_GP_HW_CTL1_RESERVED_SOFT_XI (0xffffL<<0) +#define BNX2_MISC_GP_HW_CTL1_RESERVED_HARD_XI (0xffffL<<16) + +#define BNX2_MISC_NEW_HW_CTL 0x000008c4 +#define BNX2_MISC_NEW_HW_CTL_MAIN_POR_BYPASS (1L<<0) +#define BNX2_MISC_NEW_HW_CTL_RINGOSC_ENABLE (1L<<1) +#define BNX2_MISC_NEW_HW_CTL_RINGOSC_SEL0 (1L<<2) +#define BNX2_MISC_NEW_HW_CTL_RINGOSC_SEL1 (1L<<3) +#define BNX2_MISC_NEW_HW_CTL_RESERVED_SHARED (0xfffL<<4) +#define BNX2_MISC_NEW_HW_CTL_RESERVED_SPLIT (0xffffL<<16) + +#define BNX2_MISC_NEW_CORE_CTL 0x000008c8 +#define BNX2_MISC_NEW_CORE_CTL_LINK_HOLDOFF_SUCCESS (1L<<0) +#define BNX2_MISC_NEW_CORE_CTL_LINK_HOLDOFF_REQ (1L<<1) +#define BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE (1L<<16) +#define BNX2_MISC_NEW_CORE_CTL_RESERVED_CMN (0x3fffL<<2) +#define BNX2_MISC_NEW_CORE_CTL_RESERVED_TC (0xffffL<<16) + +#define BNX2_MISC_ECO_HW_CTL 0x000008cc +#define BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN (1L<<0) +#define BNX2_MISC_ECO_HW_CTL_RESERVED_SOFT (0x7fffL<<1) +#define BNX2_MISC_ECO_HW_CTL_RESERVED_HARD (0xffffL<<16) + +#define BNX2_MISC_ECO_CORE_CTL 0x000008d0 +#define BNX2_MISC_ECO_CORE_CTL_RESERVED_SOFT (0xffffL<<0) +#define BNX2_MISC_ECO_CORE_CTL_RESERVED_HARD (0xffffL<<16) + +#define BNX2_MISC_PPIO 0x000008d4 +#define BNX2_MISC_PPIO_VALUE (0xfL<<0) +#define BNX2_MISC_PPIO_SET (0xfL<<8) +#define BNX2_MISC_PPIO_CLR (0xfL<<16) +#define BNX2_MISC_PPIO_FLOAT (0xfL<<24) + +#define BNX2_MISC_PPIO_INT 0x000008d8 +#define BNX2_MISC_PPIO_INT_INT_STATE (0xfL<<0) +#define BNX2_MISC_PPIO_INT_OLD_VALUE (0xfL<<8) +#define BNX2_MISC_PPIO_INT_OLD_SET (0xfL<<16) +#define BNX2_MISC_PPIO_INT_OLD_CLR (0xfL<<24) + +#define BNX2_MISC_RESET_NUMS 0x000008dc +#define BNX2_MISC_RESET_NUMS_NUM_HARD_RESETS (0x7L<<0) +#define BNX2_MISC_RESET_NUMS_NUM_PCIE_RESETS (0x7L<<4) +#define BNX2_MISC_RESET_NUMS_NUM_PERSTB_RESETS (0x7L<<8) +#define BNX2_MISC_RESET_NUMS_NUM_CMN_RESETS (0x7L<<12) +#define BNX2_MISC_RESET_NUMS_NUM_PORT_RESETS (0x7L<<16) + +#define BNX2_MISC_CS16_ERR 0x000008e0 +#define BNX2_MISC_CS16_ERR_ENA_PCI (1L<<0) +#define BNX2_MISC_CS16_ERR_ENA_RDMA (1L<<1) +#define BNX2_MISC_CS16_ERR_ENA_TDMA (1L<<2) +#define BNX2_MISC_CS16_ERR_ENA_EMAC (1L<<3) +#define BNX2_MISC_CS16_ERR_ENA_CTX (1L<<4) +#define BNX2_MISC_CS16_ERR_ENA_TBDR (1L<<5) +#define BNX2_MISC_CS16_ERR_ENA_RBDC (1L<<6) +#define BNX2_MISC_CS16_ERR_ENA_COM (1L<<7) +#define BNX2_MISC_CS16_ERR_ENA_CP (1L<<8) +#define BNX2_MISC_CS16_ERR_STA_PCI (1L<<16) +#define BNX2_MISC_CS16_ERR_STA_RDMA (1L<<17) +#define BNX2_MISC_CS16_ERR_STA_TDMA (1L<<18) +#define BNX2_MISC_CS16_ERR_STA_EMAC (1L<<19) +#define BNX2_MISC_CS16_ERR_STA_CTX (1L<<20) +#define BNX2_MISC_CS16_ERR_STA_TBDR (1L<<21) +#define BNX2_MISC_CS16_ERR_STA_RBDC (1L<<22) +#define BNX2_MISC_CS16_ERR_STA_COM (1L<<23) +#define BNX2_MISC_CS16_ERR_STA_CP (1L<<24) + +#define BNX2_MISC_SPIO_EVENT 0x000008e4 +#define BNX2_MISC_SPIO_EVENT_ENABLE (0xffL<<0) + +#define BNX2_MISC_PPIO_EVENT 0x000008e8 +#define BNX2_MISC_PPIO_EVENT_ENABLE (0xfL<<0) + +#define BNX2_MISC_DUAL_MEDIA_CTRL 0x000008ec +#define BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID (0xffL<<0) +#define BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_X (0L<<0) +#define BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C (3L<<0) +#define BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S (12L<<0) +#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP (0x7L<<8) +#define BNX2_MISC_DUAL_MEDIA_CTRL_PORT_SWAP_PIN (1L<<11) +#define BNX2_MISC_DUAL_MEDIA_CTRL_SERDES1_SIGDET (1L<<12) +#define BNX2_MISC_DUAL_MEDIA_CTRL_SERDES0_SIGDET (1L<<13) +#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY1_SIGDET (1L<<14) +#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY0_SIGDET (1L<<15) +#define BNX2_MISC_DUAL_MEDIA_CTRL_LCPLL_RST (1L<<16) +#define BNX2_MISC_DUAL_MEDIA_CTRL_SERDES1_RST (1L<<17) +#define BNX2_MISC_DUAL_MEDIA_CTRL_SERDES0_RST (1L<<18) +#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY1_RST (1L<<19) +#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY0_RST (1L<<20) +#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL (0x7L<<21) +#define BNX2_MISC_DUAL_MEDIA_CTRL_PORT_SWAP (1L<<24) +#define BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE (1L<<25) +#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ (0xfL<<26) +#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ_SER1_IDDQ (1L<<26) +#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ_SER0_IDDQ (2L<<26) +#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ_PHY1_IDDQ (4L<<26) +#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ_PHY0_IDDQ (8L<<26) + +#define BNX2_MISC_OTP_CMD1 0x000008f0 +#define BNX2_MISC_OTP_CMD1_FMODE (0x7L<<0) +#define BNX2_MISC_OTP_CMD1_FMODE_IDLE (0L<<0) +#define BNX2_MISC_OTP_CMD1_FMODE_WRITE (1L<<0) +#define BNX2_MISC_OTP_CMD1_FMODE_INIT (2L<<0) +#define BNX2_MISC_OTP_CMD1_FMODE_SET (3L<<0) +#define BNX2_MISC_OTP_CMD1_FMODE_RST (4L<<0) +#define BNX2_MISC_OTP_CMD1_FMODE_VERIFY (5L<<0) +#define BNX2_MISC_OTP_CMD1_FMODE_RESERVED0 (6L<<0) +#define BNX2_MISC_OTP_CMD1_FMODE_RESERVED1 (7L<<0) +#define BNX2_MISC_OTP_CMD1_USEPINS (1L<<8) +#define BNX2_MISC_OTP_CMD1_PROGSEL (1L<<9) +#define BNX2_MISC_OTP_CMD1_PROGSTART (1L<<10) +#define BNX2_MISC_OTP_CMD1_PCOUNT (0x7L<<16) +#define BNX2_MISC_OTP_CMD1_PBYP (1L<<19) +#define BNX2_MISC_OTP_CMD1_VSEL (0xfL<<20) +#define BNX2_MISC_OTP_CMD1_TM (0x7L<<27) +#define BNX2_MISC_OTP_CMD1_SADBYP (1L<<30) +#define BNX2_MISC_OTP_CMD1_DEBUG (1L<<31) + +#define BNX2_MISC_OTP_CMD2 0x000008f4 +#define BNX2_MISC_OTP_CMD2_OTP_ROM_ADDR (0x3ffL<<0) +#define BNX2_MISC_OTP_CMD2_DOSEL (0x7fL<<16) +#define BNX2_MISC_OTP_CMD2_DOSEL_0 (0L<<16) +#define BNX2_MISC_OTP_CMD2_DOSEL_1 (1L<<16) +#define BNX2_MISC_OTP_CMD2_DOSEL_127 (127L<<16) + +#define BNX2_MISC_OTP_STATUS 0x000008f8 +#define BNX2_MISC_OTP_STATUS_DATA (0xffL<<0) +#define BNX2_MISC_OTP_STATUS_VALID (1L<<8) +#define BNX2_MISC_OTP_STATUS_BUSY (1L<<9) +#define BNX2_MISC_OTP_STATUS_BUSYSM (1L<<10) +#define BNX2_MISC_OTP_STATUS_DONE (1L<<11) + +#define BNX2_MISC_OTP_SHIFT1_CMD 0x000008fc +#define BNX2_MISC_OTP_SHIFT1_CMD_RESET_MODE_N (1L<<0) +#define BNX2_MISC_OTP_SHIFT1_CMD_SHIFT_DONE (1L<<1) +#define BNX2_MISC_OTP_SHIFT1_CMD_SHIFT_START (1L<<2) +#define BNX2_MISC_OTP_SHIFT1_CMD_LOAD_DATA (1L<<3) +#define BNX2_MISC_OTP_SHIFT1_CMD_SHIFT_SELECT (0x1fL<<8) + +#define BNX2_MISC_OTP_SHIFT1_DATA 0x00000900 +#define BNX2_MISC_OTP_SHIFT2_CMD 0x00000904 +#define BNX2_MISC_OTP_SHIFT2_CMD_RESET_MODE_N (1L<<0) +#define BNX2_MISC_OTP_SHIFT2_CMD_SHIFT_DONE (1L<<1) +#define BNX2_MISC_OTP_SHIFT2_CMD_SHIFT_START (1L<<2) +#define BNX2_MISC_OTP_SHIFT2_CMD_LOAD_DATA (1L<<3) +#define BNX2_MISC_OTP_SHIFT2_CMD_SHIFT_SELECT (0x1fL<<8) + +#define BNX2_MISC_OTP_SHIFT2_DATA 0x00000908 +#define BNX2_MISC_BIST_CS0 0x0000090c +#define BNX2_MISC_BIST_CS0_MBIST_EN (1L<<0) +#define BNX2_MISC_BIST_CS0_BIST_SETUP (0x3L<<1) +#define BNX2_MISC_BIST_CS0_MBIST_ASYNC_RESET (1L<<3) +#define BNX2_MISC_BIST_CS0_MBIST_DONE (1L<<8) +#define BNX2_MISC_BIST_CS0_MBIST_GO (1L<<9) +#define BNX2_MISC_BIST_CS0_BIST_OVERRIDE (1L<<31) + +#define BNX2_MISC_BIST_MEMSTATUS0 0x00000910 +#define BNX2_MISC_BIST_CS1 0x00000914 +#define BNX2_MISC_BIST_CS1_MBIST_EN (1L<<0) +#define BNX2_MISC_BIST_CS1_BIST_SETUP (0x3L<<1) +#define BNX2_MISC_BIST_CS1_MBIST_ASYNC_RESET (1L<<3) +#define BNX2_MISC_BIST_CS1_MBIST_DONE (1L<<8) +#define BNX2_MISC_BIST_CS1_MBIST_GO (1L<<9) + +#define BNX2_MISC_BIST_MEMSTATUS1 0x00000918 +#define BNX2_MISC_BIST_CS2 0x0000091c +#define BNX2_MISC_BIST_CS2_MBIST_EN (1L<<0) +#define BNX2_MISC_BIST_CS2_BIST_SETUP (0x3L<<1) +#define BNX2_MISC_BIST_CS2_MBIST_ASYNC_RESET (1L<<3) +#define BNX2_MISC_BIST_CS2_MBIST_DONE (1L<<8) +#define BNX2_MISC_BIST_CS2_MBIST_GO (1L<<9) + +#define BNX2_MISC_BIST_MEMSTATUS2 0x00000920 +#define BNX2_MISC_BIST_CS3 0x00000924 +#define BNX2_MISC_BIST_CS3_MBIST_EN (1L<<0) +#define BNX2_MISC_BIST_CS3_BIST_SETUP (0x3L<<1) +#define BNX2_MISC_BIST_CS3_MBIST_ASYNC_RESET (1L<<3) +#define BNX2_MISC_BIST_CS3_MBIST_DONE (1L<<8) +#define BNX2_MISC_BIST_CS3_MBIST_GO (1L<<9) + +#define BNX2_MISC_BIST_MEMSTATUS3 0x00000928 +#define BNX2_MISC_BIST_CS4 0x0000092c +#define BNX2_MISC_BIST_CS4_MBIST_EN (1L<<0) +#define BNX2_MISC_BIST_CS4_BIST_SETUP (0x3L<<1) +#define BNX2_MISC_BIST_CS4_MBIST_ASYNC_RESET (1L<<3) +#define BNX2_MISC_BIST_CS4_MBIST_DONE (1L<<8) +#define BNX2_MISC_BIST_CS4_MBIST_GO (1L<<9) + +#define BNX2_MISC_BIST_MEMSTATUS4 0x00000930 +#define BNX2_MISC_BIST_CS5 0x00000934 +#define BNX2_MISC_BIST_CS5_MBIST_EN (1L<<0) +#define BNX2_MISC_BIST_CS5_BIST_SETUP (0x3L<<1) +#define BNX2_MISC_BIST_CS5_MBIST_ASYNC_RESET (1L<<3) +#define BNX2_MISC_BIST_CS5_MBIST_DONE (1L<<8) +#define BNX2_MISC_BIST_CS5_MBIST_GO (1L<<9) + +#define BNX2_MISC_BIST_MEMSTATUS5 0x00000938 +#define BNX2_MISC_MEM_TM0 0x0000093c +#define BNX2_MISC_MEM_TM0_PCIE_REPLAY_TM (0xfL<<0) +#define BNX2_MISC_MEM_TM0_MCP_SCPAD (0xfL<<8) +#define BNX2_MISC_MEM_TM0_UMP_TM (0xffL<<16) +#define BNX2_MISC_MEM_TM0_HB_MEM_TM (0xfL<<24) + +#define BNX2_MISC_USPLL_CTRL 0x00000940 +#define BNX2_MISC_USPLL_CTRL_PH_DET_DIS (1L<<0) +#define BNX2_MISC_USPLL_CTRL_FREQ_DET_DIS (1L<<1) +#define BNX2_MISC_USPLL_CTRL_LCPX (0x3fL<<2) +#define BNX2_MISC_USPLL_CTRL_RX (0x3L<<8) +#define BNX2_MISC_USPLL_CTRL_VC_EN (1L<<10) +#define BNX2_MISC_USPLL_CTRL_VCO_MG (0x3L<<11) +#define BNX2_MISC_USPLL_CTRL_KVCO_XF (0x7L<<13) +#define BNX2_MISC_USPLL_CTRL_KVCO_XS (0x7L<<16) +#define BNX2_MISC_USPLL_CTRL_TESTD_EN (1L<<19) +#define BNX2_MISC_USPLL_CTRL_TESTD_SEL (0x7L<<20) +#define BNX2_MISC_USPLL_CTRL_TESTA_EN (1L<<23) +#define BNX2_MISC_USPLL_CTRL_TESTA_SEL (0x3L<<24) +#define BNX2_MISC_USPLL_CTRL_ATTEN_FREF (1L<<26) +#define BNX2_MISC_USPLL_CTRL_DIGITAL_RST (1L<<27) +#define BNX2_MISC_USPLL_CTRL_ANALOG_RST (1L<<28) +#define BNX2_MISC_USPLL_CTRL_LOCK (1L<<29) + +#define BNX2_MISC_PERR_STATUS0 0x00000944 +#define BNX2_MISC_PERR_STATUS0_COM_DMAE_PERR (1L<<0) +#define BNX2_MISC_PERR_STATUS0_CP_DMAE_PERR (1L<<1) +#define BNX2_MISC_PERR_STATUS0_RPM_ACPIBEMEM_PERR (1L<<2) +#define BNX2_MISC_PERR_STATUS0_CTX_USAGE_CNT_PERR (1L<<3) +#define BNX2_MISC_PERR_STATUS0_CTX_PGTBL_PERR (1L<<4) +#define BNX2_MISC_PERR_STATUS0_CTX_CACHE_PERR (1L<<5) +#define BNX2_MISC_PERR_STATUS0_CTX_MIRROR_PERR (1L<<6) +#define BNX2_MISC_PERR_STATUS0_COM_CTXC_PERR (1L<<7) +#define BNX2_MISC_PERR_STATUS0_COM_SCPAD_PERR (1L<<8) +#define BNX2_MISC_PERR_STATUS0_CP_CTXC_PERR (1L<<9) +#define BNX2_MISC_PERR_STATUS0_CP_SCPAD_PERR (1L<<10) +#define BNX2_MISC_PERR_STATUS0_RXP_RBUFC_PERR (1L<<11) +#define BNX2_MISC_PERR_STATUS0_RXP_CTXC_PERR (1L<<12) +#define BNX2_MISC_PERR_STATUS0_RXP_SCPAD_PERR (1L<<13) +#define BNX2_MISC_PERR_STATUS0_TPAT_SCPAD_PERR (1L<<14) +#define BNX2_MISC_PERR_STATUS0_TXP_CTXC_PERR (1L<<15) +#define BNX2_MISC_PERR_STATUS0_TXP_SCPAD_PERR (1L<<16) +#define BNX2_MISC_PERR_STATUS0_CS_TMEM_PERR (1L<<17) +#define BNX2_MISC_PERR_STATUS0_MQ_CTX_PERR (1L<<18) +#define BNX2_MISC_PERR_STATUS0_RPM_DFIFOMEM_PERR (1L<<19) +#define BNX2_MISC_PERR_STATUS0_RPC_DFIFOMEM_PERR (1L<<20) +#define BNX2_MISC_PERR_STATUS0_RBUF_PTRMEM_PERR (1L<<21) +#define BNX2_MISC_PERR_STATUS0_RBUF_DATAMEM_PERR (1L<<22) +#define BNX2_MISC_PERR_STATUS0_RV2P_P2IRAM_PERR (1L<<23) +#define BNX2_MISC_PERR_STATUS0_RV2P_P1IRAM_PERR (1L<<24) +#define BNX2_MISC_PERR_STATUS0_RV2P_CB1REGS_PERR (1L<<25) +#define BNX2_MISC_PERR_STATUS0_RV2P_CB0REGS_PERR (1L<<26) +#define BNX2_MISC_PERR_STATUS0_TPBUF_PERR (1L<<27) +#define BNX2_MISC_PERR_STATUS0_THBUF_PERR (1L<<28) +#define BNX2_MISC_PERR_STATUS0_TDMA_PERR (1L<<29) +#define BNX2_MISC_PERR_STATUS0_TBDC_PERR (1L<<30) +#define BNX2_MISC_PERR_STATUS0_TSCH_LR_PERR (1L<<31) + +#define BNX2_MISC_PERR_STATUS1 0x00000948 +#define BNX2_MISC_PERR_STATUS1_RBDC_PERR (1L<<0) +#define BNX2_MISC_PERR_STATUS1_RDMA_DFIFO_PERR (1L<<2) +#define BNX2_MISC_PERR_STATUS1_HC_STATS_PERR (1L<<3) +#define BNX2_MISC_PERR_STATUS1_HC_MSIX_PERR (1L<<4) +#define BNX2_MISC_PERR_STATUS1_HC_PRODUCSTB_PERR (1L<<5) +#define BNX2_MISC_PERR_STATUS1_HC_CONSUMSTB_PERR (1L<<6) +#define BNX2_MISC_PERR_STATUS1_TPATQ_PERR (1L<<7) +#define BNX2_MISC_PERR_STATUS1_MCPQ_PERR (1L<<8) +#define BNX2_MISC_PERR_STATUS1_TDMAQ_PERR (1L<<9) +#define BNX2_MISC_PERR_STATUS1_TXPQ_PERR (1L<<10) +#define BNX2_MISC_PERR_STATUS1_COMTQ_PERR (1L<<11) +#define BNX2_MISC_PERR_STATUS1_COMQ_PERR (1L<<12) +#define BNX2_MISC_PERR_STATUS1_RLUPQ_PERR (1L<<13) +#define BNX2_MISC_PERR_STATUS1_RXPQ_PERR (1L<<14) +#define BNX2_MISC_PERR_STATUS1_RV2PPQ_PERR (1L<<15) +#define BNX2_MISC_PERR_STATUS1_RDMAQ_PERR (1L<<16) +#define BNX2_MISC_PERR_STATUS1_TASQ_PERR (1L<<17) +#define BNX2_MISC_PERR_STATUS1_TBDRQ_PERR (1L<<18) +#define BNX2_MISC_PERR_STATUS1_TSCHQ_PERR (1L<<19) +#define BNX2_MISC_PERR_STATUS1_COMXQ_PERR (1L<<20) +#define BNX2_MISC_PERR_STATUS1_RXPCQ_PERR (1L<<21) +#define BNX2_MISC_PERR_STATUS1_RV2PTQ_PERR (1L<<22) +#define BNX2_MISC_PERR_STATUS1_RV2PMQ_PERR (1L<<23) +#define BNX2_MISC_PERR_STATUS1_CPQ_PERR (1L<<24) +#define BNX2_MISC_PERR_STATUS1_CSQ_PERR (1L<<25) +#define BNX2_MISC_PERR_STATUS1_RLUP_CID_PERR (1L<<26) +#define BNX2_MISC_PERR_STATUS1_RV2PCS_TMEM_PERR (1L<<27) +#define BNX2_MISC_PERR_STATUS1_RV2PCSQ_PERR (1L<<28) +#define BNX2_MISC_PERR_STATUS1_MQ_IDX_PERR (1L<<29) + +#define BNX2_MISC_PERR_STATUS2 0x0000094c +#define BNX2_MISC_PERR_STATUS2_TGT_FIFO_PERR (1L<<0) +#define BNX2_MISC_PERR_STATUS2_UMP_TX_PERR (1L<<1) +#define BNX2_MISC_PERR_STATUS2_UMP_RX_PERR (1L<<2) +#define BNX2_MISC_PERR_STATUS2_MCP_ROM_PERR (1L<<3) +#define BNX2_MISC_PERR_STATUS2_MCP_SCPAD_PERR (1L<<4) +#define BNX2_MISC_PERR_STATUS2_HB_MEM_PERR (1L<<5) +#define BNX2_MISC_PERR_STATUS2_PCIE_REPLAY_PERR (1L<<6) + +#define BNX2_MISC_LCPLL_CTRL0 0x00000950 +#define BNX2_MISC_LCPLL_CTRL0_OAC (0x7L<<0) +#define BNX2_MISC_LCPLL_CTRL0_OAC_NEGTWENTY (0L<<0) +#define BNX2_MISC_LCPLL_CTRL0_OAC_ZERO (1L<<0) +#define BNX2_MISC_LCPLL_CTRL0_OAC_TWENTY (3L<<0) +#define BNX2_MISC_LCPLL_CTRL0_OAC_FORTY (7L<<0) +#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL (0x7L<<3) +#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL_360 (0L<<3) +#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL_480 (1L<<3) +#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL_600 (3L<<3) +#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL_720 (7L<<3) +#define BNX2_MISC_LCPLL_CTRL0_BIAS_CTRL (0x3L<<6) +#define BNX2_MISC_LCPLL_CTRL0_PLL_OBSERVE (0x7L<<8) +#define BNX2_MISC_LCPLL_CTRL0_VTH_CTRL (0x3L<<11) +#define BNX2_MISC_LCPLL_CTRL0_VTH_CTRL_0 (0L<<11) +#define BNX2_MISC_LCPLL_CTRL0_VTH_CTRL_1 (1L<<11) +#define BNX2_MISC_LCPLL_CTRL0_VTH_CTRL_2 (2L<<11) +#define BNX2_MISC_LCPLL_CTRL0_PLLSEQSTART (1L<<13) +#define BNX2_MISC_LCPLL_CTRL0_RESERVED (1L<<14) +#define BNX2_MISC_LCPLL_CTRL0_CAPRETRY_EN (1L<<15) +#define BNX2_MISC_LCPLL_CTRL0_FREQMONITOR_EN (1L<<16) +#define BNX2_MISC_LCPLL_CTRL0_FREQDETRESTART_EN (1L<<17) +#define BNX2_MISC_LCPLL_CTRL0_FREQDETRETRY_EN (1L<<18) +#define BNX2_MISC_LCPLL_CTRL0_PLLFORCEFDONE_EN (1L<<19) +#define BNX2_MISC_LCPLL_CTRL0_PLLFORCEFDONE (1L<<20) +#define BNX2_MISC_LCPLL_CTRL0_PLLFORCEFPASS (1L<<21) +#define BNX2_MISC_LCPLL_CTRL0_PLLFORCECAPDONE_EN (1L<<22) +#define BNX2_MISC_LCPLL_CTRL0_PLLFORCECAPDONE (1L<<23) +#define BNX2_MISC_LCPLL_CTRL0_PLLFORCECAPPASS_EN (1L<<24) +#define BNX2_MISC_LCPLL_CTRL0_PLLFORCECAPPASS (1L<<25) +#define BNX2_MISC_LCPLL_CTRL0_CAPRESTART (1L<<26) +#define BNX2_MISC_LCPLL_CTRL0_CAPSELECTM_EN (1L<<27) + +#define BNX2_MISC_LCPLL_CTRL1 0x00000954 +#define BNX2_MISC_LCPLL_CTRL1_CAPSELECTM (0x1fL<<0) +#define BNX2_MISC_LCPLL_CTRL1_CAPFORCESLOWDOWN_EN (1L<<5) +#define BNX2_MISC_LCPLL_CTRL1_CAPFORCESLOWDOWN (1L<<6) +#define BNX2_MISC_LCPLL_CTRL1_SLOWDN_XOR (1L<<7) + +#define BNX2_MISC_LCPLL_STATUS 0x00000958 +#define BNX2_MISC_LCPLL_STATUS_FREQDONE_SM (1L<<0) +#define BNX2_MISC_LCPLL_STATUS_FREQPASS_SM (1L<<1) +#define BNX2_MISC_LCPLL_STATUS_PLLSEQDONE (1L<<2) +#define BNX2_MISC_LCPLL_STATUS_PLLSEQPASS (1L<<3) +#define BNX2_MISC_LCPLL_STATUS_PLLSTATE (0x7L<<4) +#define BNX2_MISC_LCPLL_STATUS_CAPSTATE (0x7L<<7) +#define BNX2_MISC_LCPLL_STATUS_CAPSELECT (0x1fL<<10) +#define BNX2_MISC_LCPLL_STATUS_SLOWDN_INDICATOR (1L<<15) +#define BNX2_MISC_LCPLL_STATUS_SLOWDN_INDICATOR_0 (0L<<15) +#define BNX2_MISC_LCPLL_STATUS_SLOWDN_INDICATOR_1 (1L<<15) + +#define BNX2_MISC_OSCFUNDS_CTRL 0x0000095c +#define BNX2_MISC_OSCFUNDS_CTRL_FREQ_MON (1L<<5) +#define BNX2_MISC_OSCFUNDS_CTRL_FREQ_MON_OFF (0L<<5) +#define BNX2_MISC_OSCFUNDS_CTRL_FREQ_MON_ON (1L<<5) +#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM (0x3L<<6) +#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM_0 (0L<<6) +#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM_1 (1L<<6) +#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM_2 (2L<<6) +#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM_3 (3L<<6) +#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ (0x3L<<8) +#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ_0 (0L<<8) +#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ_1 (1L<<8) +#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ_2 (2L<<8) +#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ_3 (3L<<8) +#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ (0x3L<<10) +#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ_0 (0L<<10) +#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ_1 (1L<<10) +#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ_2 (2L<<10) +#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ_3 (3L<<10) + + +/* + * nvm_reg definition + * offset: 0x6400 + */ +#define BNX2_NVM_COMMAND 0x00006400 +#define BNX2_NVM_COMMAND_RST (1L<<0) +#define BNX2_NVM_COMMAND_DONE (1L<<3) +#define BNX2_NVM_COMMAND_DOIT (1L<<4) +#define BNX2_NVM_COMMAND_WR (1L<<5) +#define BNX2_NVM_COMMAND_ERASE (1L<<6) +#define BNX2_NVM_COMMAND_FIRST (1L<<7) +#define BNX2_NVM_COMMAND_LAST (1L<<8) +#define BNX2_NVM_COMMAND_WREN (1L<<16) +#define BNX2_NVM_COMMAND_WRDI (1L<<17) +#define BNX2_NVM_COMMAND_EWSR (1L<<18) +#define BNX2_NVM_COMMAND_WRSR (1L<<19) +#define BNX2_NVM_COMMAND_RD_ID (1L<<20) +#define BNX2_NVM_COMMAND_RD_STATUS (1L<<21) +#define BNX2_NVM_COMMAND_MODE_256 (1L<<22) + +#define BNX2_NVM_STATUS 0x00006404 +#define BNX2_NVM_STATUS_PI_FSM_STATE (0xfL<<0) +#define BNX2_NVM_STATUS_EE_FSM_STATE (0xfL<<4) +#define BNX2_NVM_STATUS_EQ_FSM_STATE (0xfL<<8) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_XI (0x1fL<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_IDLE_XI (0L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CMD0_XI (1L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CMD1_XI (2L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CMD_FINISH0_XI (3L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CMD_FINISH1_XI (4L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_ADDR0_XI (5L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_WRITE_DATA0_XI (6L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_WRITE_DATA1_XI (7L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_WRITE_DATA2_XI (8L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_DATA0_XI (9L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_DATA1_XI (10L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_DATA2_XI (11L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID0_XI (12L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID1_XI (13L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID2_XI (14L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID3_XI (15L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID4_XI (16L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CHECK_BUSY0_XI (17L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_ST_WREN_XI (18L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_WAIT_XI (19L<<0) + +#define BNX2_NVM_WRITE 0x00006408 +#define BNX2_NVM_WRITE_NVM_WRITE_VALUE (0xffffffffL<<0) +#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_BIT_BANG (0L<<0) +#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_EECLK (1L<<0) +#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_EEDATA (2L<<0) +#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SCLK (4L<<0) +#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_CS_B (8L<<0) +#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SO (16L<<0) +#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SI (32L<<0) +#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SI_XI (1L<<0) +#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SO_XI (2L<<0) +#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_CS_B_XI (4L<<0) +#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SCLK_XI (8L<<0) + +#define BNX2_NVM_ADDR 0x0000640c +#define BNX2_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0) +#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_BIT_BANG (0L<<0) +#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_EECLK (1L<<0) +#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_EEDATA (2L<<0) +#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SCLK (4L<<0) +#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_CS_B (8L<<0) +#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SO (16L<<0) +#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SI (32L<<0) +#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SI_XI (1L<<0) +#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SO_XI (2L<<0) +#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_CS_B_XI (4L<<0) +#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SCLK_XI (8L<<0) + +#define BNX2_NVM_READ 0x00006410 +#define BNX2_NVM_READ_NVM_READ_VALUE (0xffffffffL<<0) +#define BNX2_NVM_READ_NVM_READ_VALUE_BIT_BANG (0L<<0) +#define BNX2_NVM_READ_NVM_READ_VALUE_EECLK (1L<<0) +#define BNX2_NVM_READ_NVM_READ_VALUE_EEDATA (2L<<0) +#define BNX2_NVM_READ_NVM_READ_VALUE_SCLK (4L<<0) +#define BNX2_NVM_READ_NVM_READ_VALUE_CS_B (8L<<0) +#define BNX2_NVM_READ_NVM_READ_VALUE_SO (16L<<0) +#define BNX2_NVM_READ_NVM_READ_VALUE_SI (32L<<0) +#define BNX2_NVM_READ_NVM_READ_VALUE_SI_XI (1L<<0) +#define BNX2_NVM_READ_NVM_READ_VALUE_SO_XI (2L<<0) +#define BNX2_NVM_READ_NVM_READ_VALUE_CS_B_XI (4L<<0) +#define BNX2_NVM_READ_NVM_READ_VALUE_SCLK_XI (8L<<0) + +#define BNX2_NVM_CFG1 0x00006414 +#define BNX2_NVM_CFG1_FLASH_MODE (1L<<0) +#define BNX2_NVM_CFG1_BUFFER_MODE (1L<<1) +#define BNX2_NVM_CFG1_PASS_MODE (1L<<2) +#define BNX2_NVM_CFG1_BITBANG_MODE (1L<<3) +#define BNX2_NVM_CFG1_STATUS_BIT (0x7L<<4) +#define BNX2_NVM_CFG1_STATUS_BIT_FLASH_RDY (0L<<4) +#define BNX2_NVM_CFG1_STATUS_BIT_BUFFER_RDY (7L<<4) +#define BNX2_NVM_CFG1_SPI_CLK_DIV (0xfL<<7) +#define BNX2_NVM_CFG1_SEE_CLK_DIV (0x7ffL<<11) +#define BNX2_NVM_CFG1_STRAP_CONTROL_0 (1L<<23) +#define BNX2_NVM_CFG1_PROTECT_MODE (1L<<24) +#define BNX2_NVM_CFG1_FLASH_SIZE (1L<<25) +#define BNX2_NVM_CFG1_FW_USTRAP_1 (1L<<26) +#define BNX2_NVM_CFG1_FW_USTRAP_0 (1L<<27) +#define BNX2_NVM_CFG1_FW_USTRAP_2 (1L<<28) +#define BNX2_NVM_CFG1_FW_USTRAP_3 (1L<<29) +#define BNX2_NVM_CFG1_FW_FLASH_TYPE_EN (1L<<30) +#define BNX2_NVM_CFG1_COMPAT_BYPASSS (1L<<31) + +#define BNX2_NVM_CFG2 0x00006418 +#define BNX2_NVM_CFG2_ERASE_CMD (0xffL<<0) +#define BNX2_NVM_CFG2_DUMMY (0xffL<<8) +#define BNX2_NVM_CFG2_STATUS_CMD (0xffL<<16) +#define BNX2_NVM_CFG2_READ_ID (0xffL<<24) + +#define BNX2_NVM_CFG3 0x0000641c +#define BNX2_NVM_CFG3_BUFFER_RD_CMD (0xffL<<0) +#define BNX2_NVM_CFG3_WRITE_CMD (0xffL<<8) +#define BNX2_NVM_CFG3_BUFFER_WRITE_CMD (0xffL<<16) +#define BNX2_NVM_CFG3_READ_CMD (0xffL<<24) + +#define BNX2_NVM_SW_ARB 0x00006420 +#define BNX2_NVM_SW_ARB_ARB_REQ_SET0 (1L<<0) +#define BNX2_NVM_SW_ARB_ARB_REQ_SET1 (1L<<1) +#define BNX2_NVM_SW_ARB_ARB_REQ_SET2 (1L<<2) +#define BNX2_NVM_SW_ARB_ARB_REQ_SET3 (1L<<3) +#define BNX2_NVM_SW_ARB_ARB_REQ_CLR0 (1L<<4) +#define BNX2_NVM_SW_ARB_ARB_REQ_CLR1 (1L<<5) +#define BNX2_NVM_SW_ARB_ARB_REQ_CLR2 (1L<<6) +#define BNX2_NVM_SW_ARB_ARB_REQ_CLR3 (1L<<7) +#define BNX2_NVM_SW_ARB_ARB_ARB0 (1L<<8) +#define BNX2_NVM_SW_ARB_ARB_ARB1 (1L<<9) +#define BNX2_NVM_SW_ARB_ARB_ARB2 (1L<<10) +#define BNX2_NVM_SW_ARB_ARB_ARB3 (1L<<11) +#define BNX2_NVM_SW_ARB_REQ0 (1L<<12) +#define BNX2_NVM_SW_ARB_REQ1 (1L<<13) +#define BNX2_NVM_SW_ARB_REQ2 (1L<<14) +#define BNX2_NVM_SW_ARB_REQ3 (1L<<15) + +#define BNX2_NVM_ACCESS_ENABLE 0x00006424 +#define BNX2_NVM_ACCESS_ENABLE_EN (1L<<0) +#define BNX2_NVM_ACCESS_ENABLE_WR_EN (1L<<1) + +#define BNX2_NVM_WRITE1 0x00006428 +#define BNX2_NVM_WRITE1_WREN_CMD (0xffL<<0) +#define BNX2_NVM_WRITE1_WRDI_CMD (0xffL<<8) +#define BNX2_NVM_WRITE1_SR_DATA (0xffL<<16) + +#define BNX2_NVM_CFG4 0x0000642c +#define BNX2_NVM_CFG4_FLASH_SIZE (0x7L<<0) +#define BNX2_NVM_CFG4_FLASH_SIZE_1MBIT (0L<<0) +#define BNX2_NVM_CFG4_FLASH_SIZE_2MBIT (1L<<0) +#define BNX2_NVM_CFG4_FLASH_SIZE_4MBIT (2L<<0) +#define BNX2_NVM_CFG4_FLASH_SIZE_8MBIT (3L<<0) +#define BNX2_NVM_CFG4_FLASH_SIZE_16MBIT (4L<<0) +#define BNX2_NVM_CFG4_FLASH_SIZE_32MBIT (5L<<0) +#define BNX2_NVM_CFG4_FLASH_SIZE_64MBIT (6L<<0) +#define BNX2_NVM_CFG4_FLASH_SIZE_128MBIT (7L<<0) +#define BNX2_NVM_CFG4_FLASH_VENDOR (1L<<3) +#define BNX2_NVM_CFG4_FLASH_VENDOR_ST (0L<<3) +#define BNX2_NVM_CFG4_FLASH_VENDOR_ATMEL (1L<<3) +#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC (0x3L<<4) +#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC_BIT8 (0L<<4) +#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC_BIT9 (1L<<4) +#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC_BIT10 (2L<<4) +#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC_BIT11 (3L<<4) +#define BNX2_NVM_CFG4_STATUS_BIT_POLARITY (1L<<6) +#define BNX2_NVM_CFG4_RESERVED (0x1ffffffL<<7) + +#define BNX2_NVM_RECONFIG 0x00006430 +#define BNX2_NVM_RECONFIG_ORIG_STRAP_VALUE (0xfL<<0) +#define BNX2_NVM_RECONFIG_ORIG_STRAP_VALUE_ST (0L<<0) +#define BNX2_NVM_RECONFIG_ORIG_STRAP_VALUE_ATMEL (1L<<0) +#define BNX2_NVM_RECONFIG_RECONFIG_STRAP_VALUE (0xfL<<4) +#define BNX2_NVM_RECONFIG_RESERVED (0x7fffffL<<8) +#define BNX2_NVM_RECONFIG_RECONFIG_DONE (1L<<31) + + + +/* + * dma_reg definition + * offset: 0xc00 + */ +#define BNX2_DMA_COMMAND 0x00000c00 +#define BNX2_DMA_COMMAND_ENABLE (1L<<0) + +#define BNX2_DMA_STATUS 0x00000c04 +#define BNX2_DMA_STATUS_PAR_ERROR_STATE (1L<<0) +#define BNX2_DMA_STATUS_READ_TRANSFERS_STAT (1L<<16) +#define BNX2_DMA_STATUS_READ_DELAY_PCI_CLKS_STAT (1L<<17) +#define BNX2_DMA_STATUS_BIG_READ_TRANSFERS_STAT (1L<<18) +#define BNX2_DMA_STATUS_BIG_READ_DELAY_PCI_CLKS_STAT (1L<<19) +#define BNX2_DMA_STATUS_BIG_READ_RETRY_AFTER_DATA_STAT (1L<<20) +#define BNX2_DMA_STATUS_WRITE_TRANSFERS_STAT (1L<<21) +#define BNX2_DMA_STATUS_WRITE_DELAY_PCI_CLKS_STAT (1L<<22) +#define BNX2_DMA_STATUS_BIG_WRITE_TRANSFERS_STAT (1L<<23) +#define BNX2_DMA_STATUS_BIG_WRITE_DELAY_PCI_CLKS_STAT (1L<<24) +#define BNX2_DMA_STATUS_BIG_WRITE_RETRY_AFTER_DATA_STAT (1L<<25) +#define BNX2_DMA_STATUS_GLOBAL_ERR_XI (1L<<0) +#define BNX2_DMA_STATUS_BME_XI (1L<<4) + +#define BNX2_DMA_CONFIG 0x00000c08 +#define BNX2_DMA_CONFIG_DATA_BYTE_SWAP (1L<<0) +#define BNX2_DMA_CONFIG_DATA_WORD_SWAP (1L<<1) +#define BNX2_DMA_CONFIG_CNTL_BYTE_SWAP (1L<<4) +#define BNX2_DMA_CONFIG_CNTL_WORD_SWAP (1L<<5) +#define BNX2_DMA_CONFIG_ONE_DMA (1L<<6) +#define BNX2_DMA_CONFIG_CNTL_TWO_DMA (1L<<7) +#define BNX2_DMA_CONFIG_CNTL_FPGA_MODE (1L<<8) +#define BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA (1L<<10) +#define BNX2_DMA_CONFIG_CNTL_PCI_COMP_DLY (1L<<11) +#define BNX2_DMA_CONFIG_NO_RCHANS_IN_USE (0xfL<<12) +#define BNX2_DMA_CONFIG_NO_WCHANS_IN_USE (0xfL<<16) +#define BNX2_DMA_CONFIG_PCI_CLK_CMP_BITS (0x7L<<20) +#define BNX2_DMA_CONFIG_PCI_FAST_CLK_CMP (1L<<23) +#define BNX2_DMA_CONFIG_BIG_SIZE (0xfL<<24) +#define BNX2_DMA_CONFIG_BIG_SIZE_NONE (0x0L<<24) +#define BNX2_DMA_CONFIG_BIG_SIZE_64 (0x1L<<24) +#define BNX2_DMA_CONFIG_BIG_SIZE_128 (0x2L<<24) +#define BNX2_DMA_CONFIG_BIG_SIZE_256 (0x4L<<24) +#define BNX2_DMA_CONFIG_BIG_SIZE_512 (0x8L<<24) +#define BNX2_DMA_CONFIG_DAT_WBSWAP_MODE_XI (0x3L<<0) +#define BNX2_DMA_CONFIG_CTL_WBSWAP_MODE_XI (0x3L<<4) +#define BNX2_DMA_CONFIG_MAX_PL_XI (0x7L<<12) +#define BNX2_DMA_CONFIG_MAX_PL_128B_XI (0L<<12) +#define BNX2_DMA_CONFIG_MAX_PL_256B_XI (1L<<12) +#define BNX2_DMA_CONFIG_MAX_PL_512B_XI (2L<<12) +#define BNX2_DMA_CONFIG_MAX_PL_EN_XI (1L<<15) +#define BNX2_DMA_CONFIG_MAX_RRS_XI (0x7L<<16) +#define BNX2_DMA_CONFIG_MAX_RRS_128B_XI (0L<<16) +#define BNX2_DMA_CONFIG_MAX_RRS_256B_XI (1L<<16) +#define BNX2_DMA_CONFIG_MAX_RRS_512B_XI (2L<<16) +#define BNX2_DMA_CONFIG_MAX_RRS_1024B_XI (3L<<16) +#define BNX2_DMA_CONFIG_MAX_RRS_2048B_XI (4L<<16) +#define BNX2_DMA_CONFIG_MAX_RRS_4096B_XI (5L<<16) +#define BNX2_DMA_CONFIG_MAX_RRS_EN_XI (1L<<19) +#define BNX2_DMA_CONFIG_NO_64SWAP_EN_XI (1L<<31) + +#define BNX2_DMA_BLACKOUT 0x00000c0c +#define BNX2_DMA_BLACKOUT_RD_RETRY_BLACKOUT (0xffL<<0) +#define BNX2_DMA_BLACKOUT_2ND_RD_RETRY_BLACKOUT (0xffL<<8) +#define BNX2_DMA_BLACKOUT_WR_RETRY_BLACKOUT (0xffL<<16) + +#define BNX2_DMA_READ_MASTER_SETTING_0 0x00000c10 +#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_NO_SNOOP (1L<<0) +#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_RELAX_ORDER (1L<<1) +#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_PRIORITY (1L<<2) +#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_TRAFFIC_CLASS (0x7L<<4) +#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_PARAM_EN (1L<<7) +#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_NO_SNOOP (1L<<8) +#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_RELAX_ORDER (1L<<9) +#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_PRIORITY (1L<<10) +#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_TRAFFIC_CLASS (0x7L<<12) +#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_PARAM_EN (1L<<15) +#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_NO_SNOOP (1L<<16) +#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_RELAX_ORDER (1L<<17) +#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_PRIORITY (1L<<18) +#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_TRAFFIC_CLASS (0x7L<<20) +#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_PARAM_EN (1L<<23) +#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_NO_SNOOP (1L<<24) +#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_RELAX_ORDER (1L<<25) +#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_PRIORITY (1L<<26) +#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_TRAFFIC_CLASS (0x7L<<28) +#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_PARAM_EN (1L<<31) + +#define BNX2_DMA_READ_MASTER_SETTING_1 0x00000c14 +#define BNX2_DMA_READ_MASTER_SETTING_1_COM_NO_SNOOP (1L<<0) +#define BNX2_DMA_READ_MASTER_SETTING_1_COM_RELAX_ORDER (1L<<1) +#define BNX2_DMA_READ_MASTER_SETTING_1_COM_PRIORITY (1L<<2) +#define BNX2_DMA_READ_MASTER_SETTING_1_COM_TRAFFIC_CLASS (0x7L<<4) +#define BNX2_DMA_READ_MASTER_SETTING_1_COM_PARAM_EN (1L<<7) +#define BNX2_DMA_READ_MASTER_SETTING_1_CP_NO_SNOOP (1L<<8) +#define BNX2_DMA_READ_MASTER_SETTING_1_CP_RELAX_ORDER (1L<<9) +#define BNX2_DMA_READ_MASTER_SETTING_1_CP_PRIORITY (1L<<10) +#define BNX2_DMA_READ_MASTER_SETTING_1_CP_TRAFFIC_CLASS (0x7L<<12) +#define BNX2_DMA_READ_MASTER_SETTING_1_CP_PARAM_EN (1L<<15) + +#define BNX2_DMA_WRITE_MASTER_SETTING_0 0x00000c18 +#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_NO_SNOOP (1L<<0) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_RELAX_ORDER (1L<<1) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_PRIORITY (1L<<2) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_CS_VLD (1L<<3) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_TRAFFIC_CLASS (0x7L<<4) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_PARAM_EN (1L<<7) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_NO_SNOOP (1L<<8) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_RELAX_ORDER (1L<<9) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_PRIORITY (1L<<10) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_CS_VLD (1L<<11) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_TRAFFIC_CLASS (0x7L<<12) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_PARAM_EN (1L<<15) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_NO_SNOOP (1L<<24) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_RELAX_ORDER (1L<<25) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_PRIORITY (1L<<26) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_CS_VLD (1L<<27) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_TRAFFIC_CLASS (0x7L<<28) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_PARAM_EN (1L<<31) + +#define BNX2_DMA_WRITE_MASTER_SETTING_1 0x00000c1c +#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_NO_SNOOP (1L<<0) +#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_RELAX_ORDER (1L<<1) +#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_PRIORITY (1L<<2) +#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_CS_VLD (1L<<3) +#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_TRAFFIC_CLASS (0x7L<<4) +#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_PARAM_EN (1L<<7) +#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_NO_SNOOP (1L<<8) +#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_RELAX_ORDER (1L<<9) +#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_PRIORITY (1L<<10) +#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_CS_VLD (1L<<11) +#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_TRAFFIC_CLASS (0x7L<<12) +#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_PARAM_EN (1L<<15) + +#define BNX2_DMA_ARBITER 0x00000c20 +#define BNX2_DMA_ARBITER_NUM_READS (0x7L<<0) +#define BNX2_DMA_ARBITER_WR_ARB_MODE (1L<<4) +#define BNX2_DMA_ARBITER_WR_ARB_MODE_STRICT (0L<<4) +#define BNX2_DMA_ARBITER_WR_ARB_MODE_RND_RBN (1L<<4) +#define BNX2_DMA_ARBITER_RD_ARB_MODE (0x3L<<5) +#define BNX2_DMA_ARBITER_RD_ARB_MODE_STRICT (0L<<5) +#define BNX2_DMA_ARBITER_RD_ARB_MODE_RND_RBN (1L<<5) +#define BNX2_DMA_ARBITER_RD_ARB_MODE_WGT_RND_RBN (2L<<5) +#define BNX2_DMA_ARBITER_ALT_MODE_EN (1L<<8) +#define BNX2_DMA_ARBITER_RR_MODE (1L<<9) +#define BNX2_DMA_ARBITER_TIMER_MODE (1L<<10) +#define BNX2_DMA_ARBITER_OUSTD_READ_REQ (0xfL<<12) + +#define BNX2_DMA_ARB_TIMERS 0x00000c24 +#define BNX2_DMA_ARB_TIMERS_RD_DRR_WAIT_TIME (0xffL<<0) +#define BNX2_DMA_ARB_TIMERS_TM_MIN_TIMEOUT (0xffL<<12) +#define BNX2_DMA_ARB_TIMERS_TM_MAX_TIMEOUT (0xfffL<<20) + +#define BNX2_DMA_DEBUG_VECT_PEEK 0x00000c2c +#define BNX2_DMA_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0) +#define BNX2_DMA_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11) +#define BNX2_DMA_DEBUG_VECT_PEEK_1_SEL (0xfL<<12) +#define BNX2_DMA_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16) +#define BNX2_DMA_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27) +#define BNX2_DMA_DEBUG_VECT_PEEK_2_SEL (0xfL<<28) + +#define BNX2_DMA_TAG_RAM_00 0x00000c30 +#define BNX2_DMA_TAG_RAM_00_CHANNEL (0xfL<<0) +#define BNX2_DMA_TAG_RAM_00_MASTER (0x7L<<4) +#define BNX2_DMA_TAG_RAM_00_MASTER_CTX (0L<<4) +#define BNX2_DMA_TAG_RAM_00_MASTER_RBDC (1L<<4) +#define BNX2_DMA_TAG_RAM_00_MASTER_TBDC (2L<<4) +#define BNX2_DMA_TAG_RAM_00_MASTER_COM (3L<<4) +#define BNX2_DMA_TAG_RAM_00_MASTER_CP (4L<<4) +#define BNX2_DMA_TAG_RAM_00_MASTER_TDMA (5L<<4) +#define BNX2_DMA_TAG_RAM_00_SWAP (0x3L<<7) +#define BNX2_DMA_TAG_RAM_00_SWAP_CONFIG (0L<<7) +#define BNX2_DMA_TAG_RAM_00_SWAP_DATA (1L<<7) +#define BNX2_DMA_TAG_RAM_00_SWAP_CONTROL (2L<<7) +#define BNX2_DMA_TAG_RAM_00_FUNCTION (1L<<9) +#define BNX2_DMA_TAG_RAM_00_VALID (1L<<10) + +#define BNX2_DMA_TAG_RAM_01 0x00000c34 +#define BNX2_DMA_TAG_RAM_01_CHANNEL (0xfL<<0) +#define BNX2_DMA_TAG_RAM_01_MASTER (0x7L<<4) +#define BNX2_DMA_TAG_RAM_01_MASTER_CTX (0L<<4) +#define BNX2_DMA_TAG_RAM_01_MASTER_RBDC (1L<<4) +#define BNX2_DMA_TAG_RAM_01_MASTER_TBDC (2L<<4) +#define BNX2_DMA_TAG_RAM_01_MASTER_COM (3L<<4) +#define BNX2_DMA_TAG_RAM_01_MASTER_CP (4L<<4) +#define BNX2_DMA_TAG_RAM_01_MASTER_TDMA (5L<<4) +#define BNX2_DMA_TAG_RAM_01_SWAP (0x3L<<7) +#define BNX2_DMA_TAG_RAM_01_SWAP_CONFIG (0L<<7) +#define BNX2_DMA_TAG_RAM_01_SWAP_DATA (1L<<7) +#define BNX2_DMA_TAG_RAM_01_SWAP_CONTROL (2L<<7) +#define BNX2_DMA_TAG_RAM_01_FUNCTION (1L<<9) +#define BNX2_DMA_TAG_RAM_01_VALID (1L<<10) + +#define BNX2_DMA_TAG_RAM_02 0x00000c38 +#define BNX2_DMA_TAG_RAM_02_CHANNEL (0xfL<<0) +#define BNX2_DMA_TAG_RAM_02_MASTER (0x7L<<4) +#define BNX2_DMA_TAG_RAM_02_MASTER_CTX (0L<<4) +#define BNX2_DMA_TAG_RAM_02_MASTER_RBDC (1L<<4) +#define BNX2_DMA_TAG_RAM_02_MASTER_TBDC (2L<<4) +#define BNX2_DMA_TAG_RAM_02_MASTER_COM (3L<<4) +#define BNX2_DMA_TAG_RAM_02_MASTER_CP (4L<<4) +#define BNX2_DMA_TAG_RAM_02_MASTER_TDMA (5L<<4) +#define BNX2_DMA_TAG_RAM_02_SWAP (0x3L<<7) +#define BNX2_DMA_TAG_RAM_02_SWAP_CONFIG (0L<<7) +#define BNX2_DMA_TAG_RAM_02_SWAP_DATA (1L<<7) +#define BNX2_DMA_TAG_RAM_02_SWAP_CONTROL (2L<<7) +#define BNX2_DMA_TAG_RAM_02_FUNCTION (1L<<9) +#define BNX2_DMA_TAG_RAM_02_VALID (1L<<10) + +#define BNX2_DMA_TAG_RAM_03 0x00000c3c +#define BNX2_DMA_TAG_RAM_03_CHANNEL (0xfL<<0) +#define BNX2_DMA_TAG_RAM_03_MASTER (0x7L<<4) +#define BNX2_DMA_TAG_RAM_03_MASTER_CTX (0L<<4) +#define BNX2_DMA_TAG_RAM_03_MASTER_RBDC (1L<<4) +#define BNX2_DMA_TAG_RAM_03_MASTER_TBDC (2L<<4) +#define BNX2_DMA_TAG_RAM_03_MASTER_COM (3L<<4) +#define BNX2_DMA_TAG_RAM_03_MASTER_CP (4L<<4) +#define BNX2_DMA_TAG_RAM_03_MASTER_TDMA (5L<<4) +#define BNX2_DMA_TAG_RAM_03_SWAP (0x3L<<7) +#define BNX2_DMA_TAG_RAM_03_SWAP_CONFIG (0L<<7) +#define BNX2_DMA_TAG_RAM_03_SWAP_DATA (1L<<7) +#define BNX2_DMA_TAG_RAM_03_SWAP_CONTROL (2L<<7) +#define BNX2_DMA_TAG_RAM_03_FUNCTION (1L<<9) +#define BNX2_DMA_TAG_RAM_03_VALID (1L<<10) + +#define BNX2_DMA_TAG_RAM_04 0x00000c40 +#define BNX2_DMA_TAG_RAM_04_CHANNEL (0xfL<<0) +#define BNX2_DMA_TAG_RAM_04_MASTER (0x7L<<4) +#define BNX2_DMA_TAG_RAM_04_MASTER_CTX (0L<<4) +#define BNX2_DMA_TAG_RAM_04_MASTER_RBDC (1L<<4) +#define BNX2_DMA_TAG_RAM_04_MASTER_TBDC (2L<<4) +#define BNX2_DMA_TAG_RAM_04_MASTER_COM (3L<<4) +#define BNX2_DMA_TAG_RAM_04_MASTER_CP (4L<<4) +#define BNX2_DMA_TAG_RAM_04_MASTER_TDMA (5L<<4) +#define BNX2_DMA_TAG_RAM_04_SWAP (0x3L<<7) +#define BNX2_DMA_TAG_RAM_04_SWAP_CONFIG (0L<<7) +#define BNX2_DMA_TAG_RAM_04_SWAP_DATA (1L<<7) +#define BNX2_DMA_TAG_RAM_04_SWAP_CONTROL (2L<<7) +#define BNX2_DMA_TAG_RAM_04_FUNCTION (1L<<9) +#define BNX2_DMA_TAG_RAM_04_VALID (1L<<10) + +#define BNX2_DMA_TAG_RAM_05 0x00000c44 +#define BNX2_DMA_TAG_RAM_05_CHANNEL (0xfL<<0) +#define BNX2_DMA_TAG_RAM_05_MASTER (0x7L<<4) +#define BNX2_DMA_TAG_RAM_05_MASTER_CTX (0L<<4) +#define BNX2_DMA_TAG_RAM_05_MASTER_RBDC (1L<<4) +#define BNX2_DMA_TAG_RAM_05_MASTER_TBDC (2L<<4) +#define BNX2_DMA_TAG_RAM_05_MASTER_COM (3L<<4) +#define BNX2_DMA_TAG_RAM_05_MASTER_CP (4L<<4) +#define BNX2_DMA_TAG_RAM_05_MASTER_TDMA (5L<<4) +#define BNX2_DMA_TAG_RAM_05_SWAP (0x3L<<7) +#define BNX2_DMA_TAG_RAM_05_SWAP_CONFIG (0L<<7) +#define BNX2_DMA_TAG_RAM_05_SWAP_DATA (1L<<7) +#define BNX2_DMA_TAG_RAM_05_SWAP_CONTROL (2L<<7) +#define BNX2_DMA_TAG_RAM_05_FUNCTION (1L<<9) +#define BNX2_DMA_TAG_RAM_05_VALID (1L<<10) + +#define BNX2_DMA_TAG_RAM_06 0x00000c48 +#define BNX2_DMA_TAG_RAM_06_CHANNEL (0xfL<<0) +#define BNX2_DMA_TAG_RAM_06_MASTER (0x7L<<4) +#define BNX2_DMA_TAG_RAM_06_MASTER_CTX (0L<<4) +#define BNX2_DMA_TAG_RAM_06_MASTER_RBDC (1L<<4) +#define BNX2_DMA_TAG_RAM_06_MASTER_TBDC (2L<<4) +#define BNX2_DMA_TAG_RAM_06_MASTER_COM (3L<<4) +#define BNX2_DMA_TAG_RAM_06_MASTER_CP (4L<<4) +#define BNX2_DMA_TAG_RAM_06_MASTER_TDMA (5L<<4) +#define BNX2_DMA_TAG_RAM_06_SWAP (0x3L<<7) +#define BNX2_DMA_TAG_RAM_06_SWAP_CONFIG (0L<<7) +#define BNX2_DMA_TAG_RAM_06_SWAP_DATA (1L<<7) +#define BNX2_DMA_TAG_RAM_06_SWAP_CONTROL (2L<<7) +#define BNX2_DMA_TAG_RAM_06_FUNCTION (1L<<9) +#define BNX2_DMA_TAG_RAM_06_VALID (1L<<10) + +#define BNX2_DMA_TAG_RAM_07 0x00000c4c +#define BNX2_DMA_TAG_RAM_07_CHANNEL (0xfL<<0) +#define BNX2_DMA_TAG_RAM_07_MASTER (0x7L<<4) +#define BNX2_DMA_TAG_RAM_07_MASTER_CTX (0L<<4) +#define BNX2_DMA_TAG_RAM_07_MASTER_RBDC (1L<<4) +#define BNX2_DMA_TAG_RAM_07_MASTER_TBDC (2L<<4) +#define BNX2_DMA_TAG_RAM_07_MASTER_COM (3L<<4) +#define BNX2_DMA_TAG_RAM_07_MASTER_CP (4L<<4) +#define BNX2_DMA_TAG_RAM_07_MASTER_TDMA (5L<<4) +#define BNX2_DMA_TAG_RAM_07_SWAP (0x3L<<7) +#define BNX2_DMA_TAG_RAM_07_SWAP_CONFIG (0L<<7) +#define BNX2_DMA_TAG_RAM_07_SWAP_DATA (1L<<7) +#define BNX2_DMA_TAG_RAM_07_SWAP_CONTROL (2L<<7) +#define BNX2_DMA_TAG_RAM_07_FUNCTION (1L<<9) +#define BNX2_DMA_TAG_RAM_07_VALID (1L<<10) + +#define BNX2_DMA_TAG_RAM_08 0x00000c50 +#define BNX2_DMA_TAG_RAM_08_CHANNEL (0xfL<<0) +#define BNX2_DMA_TAG_RAM_08_MASTER (0x7L<<4) +#define BNX2_DMA_TAG_RAM_08_MASTER_CTX (0L<<4) +#define BNX2_DMA_TAG_RAM_08_MASTER_RBDC (1L<<4) +#define BNX2_DMA_TAG_RAM_08_MASTER_TBDC (2L<<4) +#define BNX2_DMA_TAG_RAM_08_MASTER_COM (3L<<4) +#define BNX2_DMA_TAG_RAM_08_MASTER_CP (4L<<4) +#define BNX2_DMA_TAG_RAM_08_MASTER_TDMA (5L<<4) +#define BNX2_DMA_TAG_RAM_08_SWAP (0x3L<<7) +#define BNX2_DMA_TAG_RAM_08_SWAP_CONFIG (0L<<7) +#define BNX2_DMA_TAG_RAM_08_SWAP_DATA (1L<<7) +#define BNX2_DMA_TAG_RAM_08_SWAP_CONTROL (2L<<7) +#define BNX2_DMA_TAG_RAM_08_FUNCTION (1L<<9) +#define BNX2_DMA_TAG_RAM_08_VALID (1L<<10) + +#define BNX2_DMA_TAG_RAM_09 0x00000c54 +#define BNX2_DMA_TAG_RAM_09_CHANNEL (0xfL<<0) +#define BNX2_DMA_TAG_RAM_09_MASTER (0x7L<<4) +#define BNX2_DMA_TAG_RAM_09_MASTER_CTX (0L<<4) +#define BNX2_DMA_TAG_RAM_09_MASTER_RBDC (1L<<4) +#define BNX2_DMA_TAG_RAM_09_MASTER_TBDC (2L<<4) +#define BNX2_DMA_TAG_RAM_09_MASTER_COM (3L<<4) +#define BNX2_DMA_TAG_RAM_09_MASTER_CP (4L<<4) +#define BNX2_DMA_TAG_RAM_09_MASTER_TDMA (5L<<4) +#define BNX2_DMA_TAG_RAM_09_SWAP (0x3L<<7) +#define BNX2_DMA_TAG_RAM_09_SWAP_CONFIG (0L<<7) +#define BNX2_DMA_TAG_RAM_09_SWAP_DATA (1L<<7) +#define BNX2_DMA_TAG_RAM_09_SWAP_CONTROL (2L<<7) +#define BNX2_DMA_TAG_RAM_09_FUNCTION (1L<<9) +#define BNX2_DMA_TAG_RAM_09_VALID (1L<<10) + +#define BNX2_DMA_TAG_RAM_10 0x00000c58 +#define BNX2_DMA_TAG_RAM_10_CHANNEL (0xfL<<0) +#define BNX2_DMA_TAG_RAM_10_MASTER (0x7L<<4) +#define BNX2_DMA_TAG_RAM_10_MASTER_CTX (0L<<4) +#define BNX2_DMA_TAG_RAM_10_MASTER_RBDC (1L<<4) +#define BNX2_DMA_TAG_RAM_10_MASTER_TBDC (2L<<4) +#define BNX2_DMA_TAG_RAM_10_MASTER_COM (3L<<4) +#define BNX2_DMA_TAG_RAM_10_MASTER_CP (4L<<4) +#define BNX2_DMA_TAG_RAM_10_MASTER_TDMA (5L<<4) +#define BNX2_DMA_TAG_RAM_10_SWAP (0x3L<<7) +#define BNX2_DMA_TAG_RAM_10_SWAP_CONFIG (0L<<7) +#define BNX2_DMA_TAG_RAM_10_SWAP_DATA (1L<<7) +#define BNX2_DMA_TAG_RAM_10_SWAP_CONTROL (2L<<7) +#define BNX2_DMA_TAG_RAM_10_FUNCTION (1L<<9) +#define BNX2_DMA_TAG_RAM_10_VALID (1L<<10) + +#define BNX2_DMA_TAG_RAM_11 0x00000c5c +#define BNX2_DMA_TAG_RAM_11_CHANNEL (0xfL<<0) +#define BNX2_DMA_TAG_RAM_11_MASTER (0x7L<<4) +#define BNX2_DMA_TAG_RAM_11_MASTER_CTX (0L<<4) +#define BNX2_DMA_TAG_RAM_11_MASTER_RBDC (1L<<4) +#define BNX2_DMA_TAG_RAM_11_MASTER_TBDC (2L<<4) +#define BNX2_DMA_TAG_RAM_11_MASTER_COM (3L<<4) +#define BNX2_DMA_TAG_RAM_11_MASTER_CP (4L<<4) +#define BNX2_DMA_TAG_RAM_11_MASTER_TDMA (5L<<4) +#define BNX2_DMA_TAG_RAM_11_SWAP (0x3L<<7) +#define BNX2_DMA_TAG_RAM_11_SWAP_CONFIG (0L<<7) +#define BNX2_DMA_TAG_RAM_11_SWAP_DATA (1L<<7) +#define BNX2_DMA_TAG_RAM_11_SWAP_CONTROL (2L<<7) +#define BNX2_DMA_TAG_RAM_11_FUNCTION (1L<<9) +#define BNX2_DMA_TAG_RAM_11_VALID (1L<<10) + +#define BNX2_DMA_RCHAN_STAT_22 0x00000c60 +#define BNX2_DMA_RCHAN_STAT_30 0x00000c64 +#define BNX2_DMA_RCHAN_STAT_31 0x00000c68 +#define BNX2_DMA_RCHAN_STAT_32 0x00000c6c +#define BNX2_DMA_RCHAN_STAT_40 0x00000c70 +#define BNX2_DMA_RCHAN_STAT_41 0x00000c74 +#define BNX2_DMA_RCHAN_STAT_42 0x00000c78 +#define BNX2_DMA_RCHAN_STAT_50 0x00000c7c +#define BNX2_DMA_RCHAN_STAT_51 0x00000c80 +#define BNX2_DMA_RCHAN_STAT_52 0x00000c84 +#define BNX2_DMA_RCHAN_STAT_60 0x00000c88 +#define BNX2_DMA_RCHAN_STAT_61 0x00000c8c +#define BNX2_DMA_RCHAN_STAT_62 0x00000c90 +#define BNX2_DMA_RCHAN_STAT_70 0x00000c94 +#define BNX2_DMA_RCHAN_STAT_71 0x00000c98 +#define BNX2_DMA_RCHAN_STAT_72 0x00000c9c +#define BNX2_DMA_WCHAN_STAT_00 0x00000ca0 +#define BNX2_DMA_WCHAN_STAT_00_WCHAN_STA_HOST_ADDR_LOW (0xffffffffL<<0) + +#define BNX2_DMA_WCHAN_STAT_01 0x00000ca4 +#define BNX2_DMA_WCHAN_STAT_01_WCHAN_STA_HOST_ADDR_HIGH (0xffffffffL<<0) + +#define BNX2_DMA_WCHAN_STAT_02 0x00000ca8 +#define BNX2_DMA_WCHAN_STAT_02_LENGTH (0xffffL<<0) +#define BNX2_DMA_WCHAN_STAT_02_WORD_SWAP (1L<<16) +#define BNX2_DMA_WCHAN_STAT_02_BYTE_SWAP (1L<<17) +#define BNX2_DMA_WCHAN_STAT_02_PRIORITY_LVL (1L<<18) + +#define BNX2_DMA_WCHAN_STAT_10 0x00000cac +#define BNX2_DMA_WCHAN_STAT_11 0x00000cb0 +#define BNX2_DMA_WCHAN_STAT_12 0x00000cb4 +#define BNX2_DMA_WCHAN_STAT_20 0x00000cb8 +#define BNX2_DMA_WCHAN_STAT_21 0x00000cbc +#define BNX2_DMA_WCHAN_STAT_22 0x00000cc0 +#define BNX2_DMA_WCHAN_STAT_30 0x00000cc4 +#define BNX2_DMA_WCHAN_STAT_31 0x00000cc8 +#define BNX2_DMA_WCHAN_STAT_32 0x00000ccc +#define BNX2_DMA_WCHAN_STAT_40 0x00000cd0 +#define BNX2_DMA_WCHAN_STAT_41 0x00000cd4 +#define BNX2_DMA_WCHAN_STAT_42 0x00000cd8 +#define BNX2_DMA_WCHAN_STAT_50 0x00000cdc +#define BNX2_DMA_WCHAN_STAT_51 0x00000ce0 +#define BNX2_DMA_WCHAN_STAT_52 0x00000ce4 +#define BNX2_DMA_WCHAN_STAT_60 0x00000ce8 +#define BNX2_DMA_WCHAN_STAT_61 0x00000cec +#define BNX2_DMA_WCHAN_STAT_62 0x00000cf0 +#define BNX2_DMA_WCHAN_STAT_70 0x00000cf4 +#define BNX2_DMA_WCHAN_STAT_71 0x00000cf8 +#define BNX2_DMA_WCHAN_STAT_72 0x00000cfc +#define BNX2_DMA_ARB_STAT_00 0x00000d00 +#define BNX2_DMA_ARB_STAT_00_MASTER (0xffffL<<0) +#define BNX2_DMA_ARB_STAT_00_MASTER_ENC (0xffL<<16) +#define BNX2_DMA_ARB_STAT_00_CUR_BINMSTR (0xffL<<24) + +#define BNX2_DMA_ARB_STAT_01 0x00000d04 +#define BNX2_DMA_ARB_STAT_01_LPR_RPTR (0xfL<<0) +#define BNX2_DMA_ARB_STAT_01_LPR_WPTR (0xfL<<4) +#define BNX2_DMA_ARB_STAT_01_LPB_RPTR (0xfL<<8) +#define BNX2_DMA_ARB_STAT_01_LPB_WPTR (0xfL<<12) +#define BNX2_DMA_ARB_STAT_01_HPR_RPTR (0xfL<<16) +#define BNX2_DMA_ARB_STAT_01_HPR_WPTR (0xfL<<20) +#define BNX2_DMA_ARB_STAT_01_HPB_RPTR (0xfL<<24) +#define BNX2_DMA_ARB_STAT_01_HPB_WPTR (0xfL<<28) + +#define BNX2_DMA_FUSE_CTRL0_CMD 0x00000f00 +#define BNX2_DMA_FUSE_CTRL0_CMD_PWRUP_DONE (1L<<0) +#define BNX2_DMA_FUSE_CTRL0_CMD_SHIFT_DONE (1L<<1) +#define BNX2_DMA_FUSE_CTRL0_CMD_SHIFT (1L<<2) +#define BNX2_DMA_FUSE_CTRL0_CMD_LOAD (1L<<3) +#define BNX2_DMA_FUSE_CTRL0_CMD_SEL (0xfL<<8) + +#define BNX2_DMA_FUSE_CTRL0_DATA 0x00000f04 +#define BNX2_DMA_FUSE_CTRL1_CMD 0x00000f08 +#define BNX2_DMA_FUSE_CTRL1_CMD_PWRUP_DONE (1L<<0) +#define BNX2_DMA_FUSE_CTRL1_CMD_SHIFT_DONE (1L<<1) +#define BNX2_DMA_FUSE_CTRL1_CMD_SHIFT (1L<<2) +#define BNX2_DMA_FUSE_CTRL1_CMD_LOAD (1L<<3) +#define BNX2_DMA_FUSE_CTRL1_CMD_SEL (0xfL<<8) + +#define BNX2_DMA_FUSE_CTRL1_DATA 0x00000f0c +#define BNX2_DMA_FUSE_CTRL2_CMD 0x00000f10 +#define BNX2_DMA_FUSE_CTRL2_CMD_PWRUP_DONE (1L<<0) +#define BNX2_DMA_FUSE_CTRL2_CMD_SHIFT_DONE (1L<<1) +#define BNX2_DMA_FUSE_CTRL2_CMD_SHIFT (1L<<2) +#define BNX2_DMA_FUSE_CTRL2_CMD_LOAD (1L<<3) +#define BNX2_DMA_FUSE_CTRL2_CMD_SEL (0xfL<<8) + +#define BNX2_DMA_FUSE_CTRL2_DATA 0x00000f14 + + +/* + * context_reg definition + * offset: 0x1000 + */ +#define BNX2_CTX_COMMAND 0x00001000 +#define BNX2_CTX_COMMAND_ENABLED (1L<<0) +#define BNX2_CTX_COMMAND_DISABLE_USAGE_CNT (1L<<1) +#define BNX2_CTX_COMMAND_DISABLE_PLRU (1L<<2) +#define BNX2_CTX_COMMAND_DISABLE_COMBINE_READ (1L<<3) +#define BNX2_CTX_COMMAND_FLUSH_AHEAD (0x1fL<<8) +#define BNX2_CTX_COMMAND_MEM_INIT (1L<<13) +#define BNX2_CTX_COMMAND_PAGE_SIZE (0xfL<<16) +#define BNX2_CTX_COMMAND_PAGE_SIZE_256 (0L<<16) +#define BNX2_CTX_COMMAND_PAGE_SIZE_512 (1L<<16) +#define BNX2_CTX_COMMAND_PAGE_SIZE_1K (2L<<16) +#define BNX2_CTX_COMMAND_PAGE_SIZE_2K (3L<<16) +#define BNX2_CTX_COMMAND_PAGE_SIZE_4K (4L<<16) +#define BNX2_CTX_COMMAND_PAGE_SIZE_8K (5L<<16) +#define BNX2_CTX_COMMAND_PAGE_SIZE_16K (6L<<16) +#define BNX2_CTX_COMMAND_PAGE_SIZE_32K (7L<<16) +#define BNX2_CTX_COMMAND_PAGE_SIZE_64K (8L<<16) +#define BNX2_CTX_COMMAND_PAGE_SIZE_128K (9L<<16) +#define BNX2_CTX_COMMAND_PAGE_SIZE_256K (10L<<16) +#define BNX2_CTX_COMMAND_PAGE_SIZE_512K (11L<<16) +#define BNX2_CTX_COMMAND_PAGE_SIZE_1M (12L<<16) + +#define BNX2_CTX_STATUS 0x00001004 +#define BNX2_CTX_STATUS_LOCK_WAIT (1L<<0) +#define BNX2_CTX_STATUS_READ_STAT (1L<<16) +#define BNX2_CTX_STATUS_WRITE_STAT (1L<<17) +#define BNX2_CTX_STATUS_ACC_STALL_STAT (1L<<18) +#define BNX2_CTX_STATUS_LOCK_STALL_STAT (1L<<19) +#define BNX2_CTX_STATUS_EXT_READ_STAT (1L<<20) +#define BNX2_CTX_STATUS_EXT_WRITE_STAT (1L<<21) +#define BNX2_CTX_STATUS_MISS_STAT (1L<<22) +#define BNX2_CTX_STATUS_HIT_STAT (1L<<23) +#define BNX2_CTX_STATUS_DEAD_LOCK (1L<<24) +#define BNX2_CTX_STATUS_USAGE_CNT_ERR (1L<<25) +#define BNX2_CTX_STATUS_INVALID_PAGE (1L<<26) + +#define BNX2_CTX_VIRT_ADDR 0x00001008 +#define BNX2_CTX_VIRT_ADDR_VIRT_ADDR (0x7fffL<<6) + +#define BNX2_CTX_PAGE_TBL 0x0000100c +#define BNX2_CTX_PAGE_TBL_PAGE_TBL (0x3fffL<<6) + +#define BNX2_CTX_DATA_ADR 0x00001010 +#define BNX2_CTX_DATA_ADR_DATA_ADR (0x7ffffL<<2) + +#define BNX2_CTX_DATA 0x00001014 +#define BNX2_CTX_LOCK 0x00001018 +#define BNX2_CTX_LOCK_TYPE (0x7L<<0) +#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_VOID (0x0L<<0) +#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_PROTOCOL (0x1L<<0) +#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_TX (0x2L<<0) +#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_TIMER (0x4L<<0) +#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_COMPLETE (0x7L<<0) +#define BNX2_CTX_LOCK_TYPE_VOID_XI (0L<<0) +#define BNX2_CTX_LOCK_TYPE_PROTOCOL_XI (1L<<0) +#define BNX2_CTX_LOCK_TYPE_TX_XI (2L<<0) +#define BNX2_CTX_LOCK_TYPE_TIMER_XI (4L<<0) +#define BNX2_CTX_LOCK_TYPE_COMPLETE_XI (7L<<0) +#define BNX2_CTX_LOCK_CID_VALUE (0x3fffL<<7) +#define BNX2_CTX_LOCK_GRANTED (1L<<26) +#define BNX2_CTX_LOCK_MODE (0x7L<<27) +#define BNX2_CTX_LOCK_MODE_UNLOCK (0x0L<<27) +#define BNX2_CTX_LOCK_MODE_IMMEDIATE (0x1L<<27) +#define BNX2_CTX_LOCK_MODE_SURE (0x2L<<27) +#define BNX2_CTX_LOCK_STATUS (1L<<30) +#define BNX2_CTX_LOCK_REQ (1L<<31) + +#define BNX2_CTX_CTX_CTRL 0x0000101c +#define BNX2_CTX_CTX_CTRL_CTX_ADDR (0x7ffffL<<2) +#define BNX2_CTX_CTX_CTRL_MOD_USAGE_CNT (0x3L<<21) +#define BNX2_CTX_CTX_CTRL_NO_RAM_ACC (1L<<23) +#define BNX2_CTX_CTX_CTRL_PREFETCH_SIZE (0x3L<<24) +#define BNX2_CTX_CTX_CTRL_ATTR (1L<<26) +#define BNX2_CTX_CTX_CTRL_WRITE_REQ (1L<<30) +#define BNX2_CTX_CTX_CTRL_READ_REQ (1L<<31) + +#define BNX2_CTX_CTX_DATA 0x00001020 +#define BNX2_CTX_ACCESS_STATUS 0x00001040 +#define BNX2_CTX_ACCESS_STATUS_MASTERENCODED (0xfL<<0) +#define BNX2_CTX_ACCESS_STATUS_ACCESSMEMORYSM (0x3L<<10) +#define BNX2_CTX_ACCESS_STATUS_PAGETABLEINITSM (0x3L<<12) +#define BNX2_CTX_ACCESS_STATUS_ACCESSMEMORYINITSM (0x3L<<14) +#define BNX2_CTX_ACCESS_STATUS_QUALIFIED_REQUEST (0x7ffL<<17) +#define BNX2_CTX_ACCESS_STATUS_CAMMASTERENCODED_XI (0x1fL<<0) +#define BNX2_CTX_ACCESS_STATUS_CACHEMASTERENCODED_XI (0x1fL<<5) +#define BNX2_CTX_ACCESS_STATUS_REQUEST_XI (0x3fffffL<<10) + +#define BNX2_CTX_DBG_LOCK_STATUS 0x00001044 +#define BNX2_CTX_DBG_LOCK_STATUS_SM (0x3ffL<<0) +#define BNX2_CTX_DBG_LOCK_STATUS_MATCH (0x3ffL<<22) + +#define BNX2_CTX_CACHE_CTRL_STATUS 0x00001048 +#define BNX2_CTX_CACHE_CTRL_STATUS_RFIFO_OVERFLOW (1L<<0) +#define BNX2_CTX_CACHE_CTRL_STATUS_INVALID_READ_COMP (1L<<1) +#define BNX2_CTX_CACHE_CTRL_STATUS_FLUSH_START (1L<<6) +#define BNX2_CTX_CACHE_CTRL_STATUS_FREE_ENTRY_CNT (0x3fL<<7) +#define BNX2_CTX_CACHE_CTRL_STATUS_CACHE_ENTRY_NEEDED (0x3fL<<13) +#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN0_ACTIVE (1L<<19) +#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN1_ACTIVE (1L<<20) +#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN2_ACTIVE (1L<<21) +#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN3_ACTIVE (1L<<22) +#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN4_ACTIVE (1L<<23) +#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN5_ACTIVE (1L<<24) +#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN6_ACTIVE (1L<<25) +#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN7_ACTIVE (1L<<26) +#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN8_ACTIVE (1L<<27) +#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN9_ACTIVE (1L<<28) +#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN10_ACTIVE (1L<<29) + +#define BNX2_CTX_CACHE_CTRL_SM_STATUS 0x0000104c +#define BNX2_CTX_CACHE_CTRL_SM_STATUS_CS_DWC (0x7L<<0) +#define BNX2_CTX_CACHE_CTRL_SM_STATUS_CS_WFIFOC (0x7L<<3) +#define BNX2_CTX_CACHE_CTRL_SM_STATUS_CS_RTAGC (0x7L<<6) +#define BNX2_CTX_CACHE_CTRL_SM_STATUS_CS_RFIFOC (0x7L<<9) +#define BNX2_CTX_CACHE_CTRL_SM_STATUS_INVALID_BLK_ADDR (0x7fffL<<16) + +#define BNX2_CTX_CACHE_STATUS 0x00001050 +#define BNX2_CTX_CACHE_STATUS_HELD_ENTRIES (0x3ffL<<0) +#define BNX2_CTX_CACHE_STATUS_MAX_HELD_ENTRIES (0x3ffL<<16) + +#define BNX2_CTX_DMA_STATUS 0x00001054 +#define BNX2_CTX_DMA_STATUS_RD_CHAN0_STATUS (0x3L<<0) +#define BNX2_CTX_DMA_STATUS_RD_CHAN1_STATUS (0x3L<<2) +#define BNX2_CTX_DMA_STATUS_RD_CHAN2_STATUS (0x3L<<4) +#define BNX2_CTX_DMA_STATUS_RD_CHAN3_STATUS (0x3L<<6) +#define BNX2_CTX_DMA_STATUS_RD_CHAN4_STATUS (0x3L<<8) +#define BNX2_CTX_DMA_STATUS_RD_CHAN5_STATUS (0x3L<<10) +#define BNX2_CTX_DMA_STATUS_RD_CHAN6_STATUS (0x3L<<12) +#define BNX2_CTX_DMA_STATUS_RD_CHAN7_STATUS (0x3L<<14) +#define BNX2_CTX_DMA_STATUS_RD_CHAN8_STATUS (0x3L<<16) +#define BNX2_CTX_DMA_STATUS_RD_CHAN9_STATUS (0x3L<<18) +#define BNX2_CTX_DMA_STATUS_RD_CHAN10_STATUS (0x3L<<20) + +#define BNX2_CTX_REP_STATUS 0x00001058 +#define BNX2_CTX_REP_STATUS_ERROR_ENTRY (0x3ffL<<0) +#define BNX2_CTX_REP_STATUS_ERROR_CLIENT_ID (0x1fL<<10) +#define BNX2_CTX_REP_STATUS_USAGE_CNT_MAX_ERR (1L<<16) +#define BNX2_CTX_REP_STATUS_USAGE_CNT_MIN_ERR (1L<<17) +#define BNX2_CTX_REP_STATUS_USAGE_CNT_MISS_ERR (1L<<18) + +#define BNX2_CTX_CKSUM_ERROR_STATUS 0x0000105c +#define BNX2_CTX_CKSUM_ERROR_STATUS_CALCULATED (0xffffL<<0) +#define BNX2_CTX_CKSUM_ERROR_STATUS_EXPECTED (0xffffL<<16) + +#define BNX2_CTX_CHNL_LOCK_STATUS_0 0x00001080 +#define BNX2_CTX_CHNL_LOCK_STATUS_0_CID (0x3fffL<<0) +#define BNX2_CTX_CHNL_LOCK_STATUS_0_TYPE (0x3L<<14) +#define BNX2_CTX_CHNL_LOCK_STATUS_0_MODE (1L<<16) +#define BNX2_CTX_CHNL_LOCK_STATUS_0_MODE_XI (1L<<14) +#define BNX2_CTX_CHNL_LOCK_STATUS_0_TYPE_XI (0x7L<<15) + +#define BNX2_CTX_CHNL_LOCK_STATUS_1 0x00001084 +#define BNX2_CTX_CHNL_LOCK_STATUS_2 0x00001088 +#define BNX2_CTX_CHNL_LOCK_STATUS_3 0x0000108c +#define BNX2_CTX_CHNL_LOCK_STATUS_4 0x00001090 +#define BNX2_CTX_CHNL_LOCK_STATUS_5 0x00001094 +#define BNX2_CTX_CHNL_LOCK_STATUS_6 0x00001098 +#define BNX2_CTX_CHNL_LOCK_STATUS_7 0x0000109c +#define BNX2_CTX_CHNL_LOCK_STATUS_8 0x000010a0 +#define BNX2_CTX_CHNL_LOCK_STATUS_9 0x000010a4 + +#define BNX2_CTX_CACHE_DATA 0x000010c4 +#define BNX2_CTX_HOST_PAGE_TBL_CTRL 0x000010c8 +#define BNX2_CTX_HOST_PAGE_TBL_CTRL_PAGE_TBL_ADDR (0x1ffL<<0) +#define BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ (1L<<30) +#define BNX2_CTX_HOST_PAGE_TBL_CTRL_READ_REQ (1L<<31) + +#define BNX2_CTX_HOST_PAGE_TBL_DATA0 0x000010cc +#define BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID (1L<<0) +#define BNX2_CTX_HOST_PAGE_TBL_DATA0_VALUE (0xffffffL<<8) + +#define BNX2_CTX_HOST_PAGE_TBL_DATA1 0x000010d0 +#define BNX2_CTX_CAM_CTRL 0x000010d4 +#define BNX2_CTX_CAM_CTRL_CAM_ADDR (0x3ffL<<0) +#define BNX2_CTX_CAM_CTRL_RESET (1L<<27) +#define BNX2_CTX_CAM_CTRL_INVALIDATE (1L<<28) +#define BNX2_CTX_CAM_CTRL_SEARCH (1L<<29) +#define BNX2_CTX_CAM_CTRL_WRITE_REQ (1L<<30) +#define BNX2_CTX_CAM_CTRL_READ_REQ (1L<<31) + + +/* + * emac_reg definition + * offset: 0x1400 + */ +#define BNX2_EMAC_MODE 0x00001400 +#define BNX2_EMAC_MODE_RESET (1L<<0) +#define BNX2_EMAC_MODE_HALF_DUPLEX (1L<<1) +#define BNX2_EMAC_MODE_PORT (0x3L<<2) +#define BNX2_EMAC_MODE_PORT_NONE (0L<<2) +#define BNX2_EMAC_MODE_PORT_MII (1L<<2) +#define BNX2_EMAC_MODE_PORT_GMII (2L<<2) +#define BNX2_EMAC_MODE_PORT_MII_10M (3L<<2) +#define BNX2_EMAC_MODE_MAC_LOOP (1L<<4) +#define BNX2_EMAC_MODE_25G_MODE (1L<<5) +#define BNX2_EMAC_MODE_TAGGED_MAC_CTL (1L<<7) +#define BNX2_EMAC_MODE_TX_BURST (1L<<8) +#define BNX2_EMAC_MODE_MAX_DEFER_DROP_ENA (1L<<9) +#define BNX2_EMAC_MODE_EXT_LINK_POL (1L<<10) +#define BNX2_EMAC_MODE_FORCE_LINK (1L<<11) +#define BNX2_EMAC_MODE_SERDES_MODE (1L<<12) +#define BNX2_EMAC_MODE_BOND_OVRD (1L<<13) +#define BNX2_EMAC_MODE_MPKT (1L<<18) +#define BNX2_EMAC_MODE_MPKT_RCVD (1L<<19) +#define BNX2_EMAC_MODE_ACPI_RCVD (1L<<20) + +#define BNX2_EMAC_STATUS 0x00001404 +#define BNX2_EMAC_STATUS_LINK (1L<<11) +#define BNX2_EMAC_STATUS_LINK_CHANGE (1L<<12) +#define BNX2_EMAC_STATUS_SERDES_AUTONEG_COMPLETE (1L<<13) +#define BNX2_EMAC_STATUS_SERDES_AUTONEG_CHANGE (1L<<14) +#define BNX2_EMAC_STATUS_SERDES_NXT_PG_CHANGE (1L<<16) +#define BNX2_EMAC_STATUS_SERDES_RX_CONFIG_IS_0 (1L<<17) +#define BNX2_EMAC_STATUS_SERDES_RX_CONFIG_IS_0_CHANGE (1L<<18) +#define BNX2_EMAC_STATUS_MI_COMPLETE (1L<<22) +#define BNX2_EMAC_STATUS_MI_INT (1L<<23) +#define BNX2_EMAC_STATUS_AP_ERROR (1L<<24) +#define BNX2_EMAC_STATUS_PARITY_ERROR_STATE (1L<<31) + +#define BNX2_EMAC_ATTENTION_ENA 0x00001408 +#define BNX2_EMAC_ATTENTION_ENA_LINK (1L<<11) +#define BNX2_EMAC_ATTENTION_ENA_AUTONEG_CHANGE (1L<<14) +#define BNX2_EMAC_ATTENTION_ENA_NXT_PG_CHANGE (1L<<16) +#define BNX2_EMAC_ATTENTION_ENA_SERDES_RX_CONFIG_IS_0_CHANGE (1L<<18) +#define BNX2_EMAC_ATTENTION_ENA_MI_COMPLETE (1L<<22) +#define BNX2_EMAC_ATTENTION_ENA_MI_INT (1L<<23) +#define BNX2_EMAC_ATTENTION_ENA_AP_ERROR (1L<<24) + +#define BNX2_EMAC_LED 0x0000140c +#define BNX2_EMAC_LED_OVERRIDE (1L<<0) +#define BNX2_EMAC_LED_1000MB_OVERRIDE (1L<<1) +#define BNX2_EMAC_LED_100MB_OVERRIDE (1L<<2) +#define BNX2_EMAC_LED_10MB_OVERRIDE (1L<<3) +#define BNX2_EMAC_LED_TRAFFIC_OVERRIDE (1L<<4) +#define BNX2_EMAC_LED_BLNK_TRAFFIC (1L<<5) +#define BNX2_EMAC_LED_TRAFFIC (1L<<6) +#define BNX2_EMAC_LED_1000MB (1L<<7) +#define BNX2_EMAC_LED_100MB (1L<<8) +#define BNX2_EMAC_LED_10MB (1L<<9) +#define BNX2_EMAC_LED_TRAFFIC_STAT (1L<<10) +#define BNX2_EMAC_LED_2500MB (1L<<11) +#define BNX2_EMAC_LED_2500MB_OVERRIDE (1L<<12) +#define BNX2_EMAC_LED_ACTIVITY_SEL (0x3L<<17) +#define BNX2_EMAC_LED_ACTIVITY_SEL_0 (0L<<17) +#define BNX2_EMAC_LED_ACTIVITY_SEL_1 (1L<<17) +#define BNX2_EMAC_LED_ACTIVITY_SEL_2 (2L<<17) +#define BNX2_EMAC_LED_ACTIVITY_SEL_3 (3L<<17) +#define BNX2_EMAC_LED_BLNK_RATE (0xfffL<<19) +#define BNX2_EMAC_LED_BLNK_RATE_ENA (1L<<31) + +#define BNX2_EMAC_MAC_MATCH0 0x00001410 +#define BNX2_EMAC_MAC_MATCH1 0x00001414 +#define BNX2_EMAC_MAC_MATCH2 0x00001418 +#define BNX2_EMAC_MAC_MATCH3 0x0000141c +#define BNX2_EMAC_MAC_MATCH4 0x00001420 +#define BNX2_EMAC_MAC_MATCH5 0x00001424 +#define BNX2_EMAC_MAC_MATCH6 0x00001428 +#define BNX2_EMAC_MAC_MATCH7 0x0000142c +#define BNX2_EMAC_MAC_MATCH8 0x00001430 +#define BNX2_EMAC_MAC_MATCH9 0x00001434 +#define BNX2_EMAC_MAC_MATCH10 0x00001438 +#define BNX2_EMAC_MAC_MATCH11 0x0000143c +#define BNX2_EMAC_MAC_MATCH12 0x00001440 +#define BNX2_EMAC_MAC_MATCH13 0x00001444 +#define BNX2_EMAC_MAC_MATCH14 0x00001448 +#define BNX2_EMAC_MAC_MATCH15 0x0000144c +#define BNX2_EMAC_MAC_MATCH16 0x00001450 +#define BNX2_EMAC_MAC_MATCH17 0x00001454 +#define BNX2_EMAC_MAC_MATCH18 0x00001458 +#define BNX2_EMAC_MAC_MATCH19 0x0000145c +#define BNX2_EMAC_MAC_MATCH20 0x00001460 +#define BNX2_EMAC_MAC_MATCH21 0x00001464 +#define BNX2_EMAC_MAC_MATCH22 0x00001468 +#define BNX2_EMAC_MAC_MATCH23 0x0000146c +#define BNX2_EMAC_MAC_MATCH24 0x00001470 +#define BNX2_EMAC_MAC_MATCH25 0x00001474 +#define BNX2_EMAC_MAC_MATCH26 0x00001478 +#define BNX2_EMAC_MAC_MATCH27 0x0000147c +#define BNX2_EMAC_MAC_MATCH28 0x00001480 +#define BNX2_EMAC_MAC_MATCH29 0x00001484 +#define BNX2_EMAC_MAC_MATCH30 0x00001488 +#define BNX2_EMAC_MAC_MATCH31 0x0000148c +#define BNX2_EMAC_BACKOFF_SEED 0x00001498 +#define BNX2_EMAC_BACKOFF_SEED_EMAC_BACKOFF_SEED (0x3ffL<<0) + +#define BNX2_EMAC_RX_MTU_SIZE 0x0000149c +#define BNX2_EMAC_RX_MTU_SIZE_MTU_SIZE (0xffffL<<0) +#define BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31) + +#define BNX2_EMAC_SERDES_CNTL 0x000014a4 +#define BNX2_EMAC_SERDES_CNTL_RXR (0x7L<<0) +#define BNX2_EMAC_SERDES_CNTL_RXG (0x3L<<3) +#define BNX2_EMAC_SERDES_CNTL_RXCKSEL (1L<<6) +#define BNX2_EMAC_SERDES_CNTL_TXBIAS (0x7L<<7) +#define BNX2_EMAC_SERDES_CNTL_BGMAX (1L<<10) +#define BNX2_EMAC_SERDES_CNTL_BGMIN (1L<<11) +#define BNX2_EMAC_SERDES_CNTL_TXMODE (1L<<12) +#define BNX2_EMAC_SERDES_CNTL_TXEDGE (1L<<13) +#define BNX2_EMAC_SERDES_CNTL_SERDES_MODE (1L<<14) +#define BNX2_EMAC_SERDES_CNTL_PLLTEST (1L<<15) +#define BNX2_EMAC_SERDES_CNTL_CDET_EN (1L<<16) +#define BNX2_EMAC_SERDES_CNTL_TBI_LBK (1L<<17) +#define BNX2_EMAC_SERDES_CNTL_REMOTE_LBK (1L<<18) +#define BNX2_EMAC_SERDES_CNTL_REV_PHASE (1L<<19) +#define BNX2_EMAC_SERDES_CNTL_REGCTL12 (0x3L<<20) +#define BNX2_EMAC_SERDES_CNTL_REGCTL25 (0x3L<<22) + +#define BNX2_EMAC_SERDES_STATUS 0x000014a8 +#define BNX2_EMAC_SERDES_STATUS_RX_STAT (0xffL<<0) +#define BNX2_EMAC_SERDES_STATUS_COMMA_DET (1L<<8) + +#define BNX2_EMAC_MDIO_COMM 0x000014ac +#define BNX2_EMAC_MDIO_COMM_DATA (0xffffL<<0) +#define BNX2_EMAC_MDIO_COMM_REG_ADDR (0x1fL<<16) +#define BNX2_EMAC_MDIO_COMM_PHY_ADDR (0x1fL<<21) +#define BNX2_EMAC_MDIO_COMM_COMMAND (0x3L<<26) +#define BNX2_EMAC_MDIO_COMM_COMMAND_UNDEFINED_0 (0L<<26) +#define BNX2_EMAC_MDIO_COMM_COMMAND_ADDRESS (0L<<26) +#define BNX2_EMAC_MDIO_COMM_COMMAND_WRITE (1L<<26) +#define BNX2_EMAC_MDIO_COMM_COMMAND_READ (2L<<26) +#define BNX2_EMAC_MDIO_COMM_COMMAND_WRITE_22_XI (1L<<26) +#define BNX2_EMAC_MDIO_COMM_COMMAND_WRITE_45_XI (1L<<26) +#define BNX2_EMAC_MDIO_COMM_COMMAND_READ_22_XI (2L<<26) +#define BNX2_EMAC_MDIO_COMM_COMMAND_READ_INC_45_XI (2L<<26) +#define BNX2_EMAC_MDIO_COMM_COMMAND_UNDEFINED_3 (3L<<26) +#define BNX2_EMAC_MDIO_COMM_COMMAND_READ_45 (3L<<26) +#define BNX2_EMAC_MDIO_COMM_FAIL (1L<<28) +#define BNX2_EMAC_MDIO_COMM_START_BUSY (1L<<29) +#define BNX2_EMAC_MDIO_COMM_DISEXT (1L<<30) + +#define BNX2_EMAC_MDIO_STATUS 0x000014b0 +#define BNX2_EMAC_MDIO_STATUS_LINK (1L<<0) +#define BNX2_EMAC_MDIO_STATUS_10MB (1L<<1) + +#define BNX2_EMAC_MDIO_MODE 0x000014b4 +#define BNX2_EMAC_MDIO_MODE_SHORT_PREAMBLE (1L<<1) +#define BNX2_EMAC_MDIO_MODE_AUTO_POLL (1L<<4) +#define BNX2_EMAC_MDIO_MODE_BIT_BANG (1L<<8) +#define BNX2_EMAC_MDIO_MODE_MDIO (1L<<9) +#define BNX2_EMAC_MDIO_MODE_MDIO_OE (1L<<10) +#define BNX2_EMAC_MDIO_MODE_MDC (1L<<11) +#define BNX2_EMAC_MDIO_MODE_MDINT (1L<<12) +#define BNX2_EMAC_MDIO_MODE_EXT_MDINT (1L<<13) +#define BNX2_EMAC_MDIO_MODE_CLOCK_CNT (0x1fL<<16) +#define BNX2_EMAC_MDIO_MODE_CLOCK_CNT_XI (0x3fL<<16) +#define BNX2_EMAC_MDIO_MODE_CLAUSE_45_XI (1L<<31) + +#define BNX2_EMAC_MDIO_AUTO_STATUS 0x000014b8 +#define BNX2_EMAC_MDIO_AUTO_STATUS_AUTO_ERR (1L<<0) + +#define BNX2_EMAC_TX_MODE 0x000014bc +#define BNX2_EMAC_TX_MODE_RESET (1L<<0) +#define BNX2_EMAC_TX_MODE_CS16_TEST (1L<<2) +#define BNX2_EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3) +#define BNX2_EMAC_TX_MODE_FLOW_EN (1L<<4) +#define BNX2_EMAC_TX_MODE_BIG_BACKOFF (1L<<5) +#define BNX2_EMAC_TX_MODE_LONG_PAUSE (1L<<6) +#define BNX2_EMAC_TX_MODE_LINK_AWARE (1L<<7) + +#define BNX2_EMAC_TX_STATUS 0x000014c0 +#define BNX2_EMAC_TX_STATUS_XOFFED (1L<<0) +#define BNX2_EMAC_TX_STATUS_XOFF_SENT (1L<<1) +#define BNX2_EMAC_TX_STATUS_XON_SENT (1L<<2) +#define BNX2_EMAC_TX_STATUS_LINK_UP (1L<<3) +#define BNX2_EMAC_TX_STATUS_UNDERRUN (1L<<4) +#define BNX2_EMAC_TX_STATUS_CS16_ERROR (1L<<5) + +#define BNX2_EMAC_TX_LENGTHS 0x000014c4 +#define BNX2_EMAC_TX_LENGTHS_SLOT (0xffL<<0) +#define BNX2_EMAC_TX_LENGTHS_IPG (0xfL<<8) +#define BNX2_EMAC_TX_LENGTHS_IPG_CRS (0x3L<<12) + +#define BNX2_EMAC_RX_MODE 0x000014c8 +#define BNX2_EMAC_RX_MODE_RESET (1L<<0) +#define BNX2_EMAC_RX_MODE_FLOW_EN (1L<<2) +#define BNX2_EMAC_RX_MODE_KEEP_MAC_CONTROL (1L<<3) +#define BNX2_EMAC_RX_MODE_KEEP_PAUSE (1L<<4) +#define BNX2_EMAC_RX_MODE_ACCEPT_OVERSIZE (1L<<5) +#define BNX2_EMAC_RX_MODE_ACCEPT_RUNTS (1L<<6) +#define BNX2_EMAC_RX_MODE_LLC_CHK (1L<<7) +#define BNX2_EMAC_RX_MODE_PROMISCUOUS (1L<<8) +#define BNX2_EMAC_RX_MODE_NO_CRC_CHK (1L<<9) +#define BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG (1L<<10) +#define BNX2_EMAC_RX_MODE_FILT_BROADCAST (1L<<11) +#define BNX2_EMAC_RX_MODE_SORT_MODE (1L<<12) + +#define BNX2_EMAC_RX_STATUS 0x000014cc +#define BNX2_EMAC_RX_STATUS_FFED (1L<<0) +#define BNX2_EMAC_RX_STATUS_FF_RECEIVED (1L<<1) +#define BNX2_EMAC_RX_STATUS_N_RECEIVED (1L<<2) + +#define BNX2_EMAC_MULTICAST_HASH0 0x000014d0 +#define BNX2_EMAC_MULTICAST_HASH1 0x000014d4 +#define BNX2_EMAC_MULTICAST_HASH2 0x000014d8 +#define BNX2_EMAC_MULTICAST_HASH3 0x000014dc +#define BNX2_EMAC_MULTICAST_HASH4 0x000014e0 +#define BNX2_EMAC_MULTICAST_HASH5 0x000014e4 +#define BNX2_EMAC_MULTICAST_HASH6 0x000014e8 +#define BNX2_EMAC_MULTICAST_HASH7 0x000014ec +#define BNX2_EMAC_CKSUM_ERROR_STATUS 0x000014f0 +#define BNX2_EMAC_CKSUM_ERROR_STATUS_CALCULATED (0xffffL<<0) +#define BNX2_EMAC_CKSUM_ERROR_STATUS_EXPECTED (0xffffL<<16) + +#define BNX2_EMAC_RX_STAT_IFHCINOCTETS 0x00001500 +#define BNX2_EMAC_RX_STAT_IFHCINBADOCTETS 0x00001504 +#define BNX2_EMAC_RX_STAT_ETHERSTATSFRAGMENTS 0x00001508 +#define BNX2_EMAC_RX_STAT_IFHCINUCASTPKTS 0x0000150c +#define BNX2_EMAC_RX_STAT_IFHCINMULTICASTPKTS 0x00001510 +#define BNX2_EMAC_RX_STAT_IFHCINBROADCASTPKTS 0x00001514 +#define BNX2_EMAC_RX_STAT_DOT3STATSFCSERRORS 0x00001518 +#define BNX2_EMAC_RX_STAT_DOT3STATSALIGNMENTERRORS 0x0000151c +#define BNX2_EMAC_RX_STAT_DOT3STATSCARRIERSENSEERRORS 0x00001520 +#define BNX2_EMAC_RX_STAT_XONPAUSEFRAMESRECEIVED 0x00001524 +#define BNX2_EMAC_RX_STAT_XOFFPAUSEFRAMESRECEIVED 0x00001528 +#define BNX2_EMAC_RX_STAT_MACCONTROLFRAMESRECEIVED 0x0000152c +#define BNX2_EMAC_RX_STAT_XOFFSTATEENTERED 0x00001530 +#define BNX2_EMAC_RX_STAT_DOT3STATSFRAMESTOOLONG 0x00001534 +#define BNX2_EMAC_RX_STAT_ETHERSTATSJABBERS 0x00001538 +#define BNX2_EMAC_RX_STAT_ETHERSTATSUNDERSIZEPKTS 0x0000153c +#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS64OCTETS 0x00001540 +#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS65OCTETSTO127OCTETS 0x00001544 +#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS128OCTETSTO255OCTETS 0x00001548 +#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS256OCTETSTO511OCTETS 0x0000154c +#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS512OCTETSTO1023OCTETS 0x00001550 +#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS1024OCTETSTO1522OCTETS 0x00001554 +#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTSOVER1522OCTETS 0x00001558 +#define BNX2_EMAC_RXMAC_DEBUG0 0x0000155c +#define BNX2_EMAC_RXMAC_DEBUG1 0x00001560 +#define BNX2_EMAC_RXMAC_DEBUG1_LENGTH_NE_BYTE_COUNT (1L<<0) +#define BNX2_EMAC_RXMAC_DEBUG1_LENGTH_OUT_RANGE (1L<<1) +#define BNX2_EMAC_RXMAC_DEBUG1_BAD_CRC (1L<<2) +#define BNX2_EMAC_RXMAC_DEBUG1_RX_ERROR (1L<<3) +#define BNX2_EMAC_RXMAC_DEBUG1_ALIGN_ERROR (1L<<4) +#define BNX2_EMAC_RXMAC_DEBUG1_LAST_DATA (1L<<5) +#define BNX2_EMAC_RXMAC_DEBUG1_ODD_BYTE_START (1L<<6) +#define BNX2_EMAC_RXMAC_DEBUG1_BYTE_COUNT (0xffffL<<7) +#define BNX2_EMAC_RXMAC_DEBUG1_SLOT_TIME (0xffL<<23) + +#define BNX2_EMAC_RXMAC_DEBUG2 0x00001564 +#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE (0x7L<<0) +#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_IDLE (0x0L<<0) +#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_SFD (0x1L<<0) +#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_DATA (0x2L<<0) +#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_SKEEP (0x3L<<0) +#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_EXT (0x4L<<0) +#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_DROP (0x5L<<0) +#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_SDROP (0x6L<<0) +#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_FC (0x7L<<0) +#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE (0xfL<<3) +#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_IDLE (0x0L<<3) +#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_DATA0 (0x1L<<3) +#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_DATA1 (0x2L<<3) +#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_DATA2 (0x3L<<3) +#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_DATA3 (0x4L<<3) +#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_ABORT (0x5L<<3) +#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_WAIT (0x6L<<3) +#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_STATUS (0x7L<<3) +#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_LAST (0x8L<<3) +#define BNX2_EMAC_RXMAC_DEBUG2_BYTE_IN (0xffL<<7) +#define BNX2_EMAC_RXMAC_DEBUG2_FALSEC (1L<<15) +#define BNX2_EMAC_RXMAC_DEBUG2_TAGGED (1L<<16) +#define BNX2_EMAC_RXMAC_DEBUG2_PAUSE_STATE (1L<<18) +#define BNX2_EMAC_RXMAC_DEBUG2_PAUSE_STATE_IDLE (0L<<18) +#define BNX2_EMAC_RXMAC_DEBUG2_PAUSE_STATE_PAUSED (1L<<18) +#define BNX2_EMAC_RXMAC_DEBUG2_SE_COUNTER (0xfL<<19) +#define BNX2_EMAC_RXMAC_DEBUG2_QUANTA (0x1fL<<23) + +#define BNX2_EMAC_RXMAC_DEBUG3 0x00001568 +#define BNX2_EMAC_RXMAC_DEBUG3_PAUSE_CTR (0xffffL<<0) +#define BNX2_EMAC_RXMAC_DEBUG3_TMP_PAUSE_CTR (0xffffL<<16) + +#define BNX2_EMAC_RXMAC_DEBUG4 0x0000156c +#define BNX2_EMAC_RXMAC_DEBUG4_TYPE_FIELD (0xffffL<<0) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE (0x3fL<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_IDLE (0x0L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UMAC2 (0x1L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UMAC3 (0x2L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UNI (0x3L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MMAC3 (0x5L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PSA1 (0x6L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MMAC2 (0x7L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PSA2 (0x7L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PSA3 (0x8L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MC2 (0x9L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MC3 (0xaL<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MWAIT1 (0xeL<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MWAIT2 (0xfL<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MCHECK (0x10L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MC (0x11L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BC2 (0x12L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BC3 (0x13L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BSA1 (0x14L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BSA2 (0x15L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BSA3 (0x16L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BTYPE (0x17L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BC (0x18L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PTYPE (0x19L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_CMD (0x1aL<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MAC (0x1bL<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_LATCH (0x1cL<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_XOFF (0x1dL<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_XON (0x1eL<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PAUSED (0x1fL<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_NPAUSED (0x20L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_TTYPE (0x21L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_TVAL (0x22L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_USA1 (0x23L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_USA2 (0x24L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_USA3 (0x25L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UTYPE (0x26L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UTTYPE (0x27L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UTVAL (0x28L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MTYPE (0x29L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_DROP (0x2aL<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_DROP_PKT (1L<<22) +#define BNX2_EMAC_RXMAC_DEBUG4_SLOT_FILLED (1L<<23) +#define BNX2_EMAC_RXMAC_DEBUG4_FALSE_CARRIER (1L<<24) +#define BNX2_EMAC_RXMAC_DEBUG4_LAST_DATA (1L<<25) +#define BNX2_EMAC_RXMAC_DEBUG4_SFD_FOUND (1L<<26) +#define BNX2_EMAC_RXMAC_DEBUG4_ADVANCE (1L<<27) +#define BNX2_EMAC_RXMAC_DEBUG4_START (1L<<28) + +#define BNX2_EMAC_RXMAC_DEBUG5 0x00001570 +#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM (0x7L<<0) +#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_IDLE (0L<<0) +#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_WAIT_EOF (1L<<0) +#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_WAIT_STAT (2L<<0) +#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_SET_EOF4FCRC (3L<<0) +#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_SET_EOF4RDE (4L<<0) +#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_SET_EOF4ALL (5L<<0) +#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_1WD_WAIT_STAT (6L<<0) +#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1 (0x7L<<4) +#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_VDW (0x0L<<4) +#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_STAT (0x1L<<4) +#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_AEOF (0x2L<<4) +#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_NEOF (0x3L<<4) +#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_SOF (0x4L<<4) +#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_SAEOF (0x6L<<4) +#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_SNEOF (0x7L<<4) +#define BNX2_EMAC_RXMAC_DEBUG5_EOF_DETECTED (1L<<7) +#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF0 (0x7L<<8) +#define BNX2_EMAC_RXMAC_DEBUG5_RPM_IDI_FIFO_FULL (1L<<11) +#define BNX2_EMAC_RXMAC_DEBUG5_LOAD_CCODE (1L<<12) +#define BNX2_EMAC_RXMAC_DEBUG5_LOAD_DATA (1L<<13) +#define BNX2_EMAC_RXMAC_DEBUG5_LOAD_STAT (1L<<14) +#define BNX2_EMAC_RXMAC_DEBUG5_CLR_STAT (1L<<15) +#define BNX2_EMAC_RXMAC_DEBUG5_IDI_RPM_CCODE (0x3L<<16) +#define BNX2_EMAC_RXMAC_DEBUG5_IDI_RPM_ACCEPT (1L<<19) +#define BNX2_EMAC_RXMAC_DEBUG5_FMLEN (0xfffL<<20) + +#define BNX2_EMAC_RX_STAT_FALSECARRIERERRORS 0x00001574 +#define BNX2_EMAC_RX_STAT_AC0 0x00001580 +#define BNX2_EMAC_RX_STAT_AC1 0x00001584 +#define BNX2_EMAC_RX_STAT_AC2 0x00001588 +#define BNX2_EMAC_RX_STAT_AC3 0x0000158c +#define BNX2_EMAC_RX_STAT_AC4 0x00001590 +#define BNX2_EMAC_RX_STAT_AC5 0x00001594 +#define BNX2_EMAC_RX_STAT_AC6 0x00001598 +#define BNX2_EMAC_RX_STAT_AC7 0x0000159c +#define BNX2_EMAC_RX_STAT_AC8 0x000015a0 +#define BNX2_EMAC_RX_STAT_AC9 0x000015a4 +#define BNX2_EMAC_RX_STAT_AC10 0x000015a8 +#define BNX2_EMAC_RX_STAT_AC11 0x000015ac +#define BNX2_EMAC_RX_STAT_AC12 0x000015b0 +#define BNX2_EMAC_RX_STAT_AC13 0x000015b4 +#define BNX2_EMAC_RX_STAT_AC14 0x000015b8 +#define BNX2_EMAC_RX_STAT_AC15 0x000015bc +#define BNX2_EMAC_RX_STAT_AC16 0x000015c0 +#define BNX2_EMAC_RX_STAT_AC17 0x000015c4 +#define BNX2_EMAC_RX_STAT_AC18 0x000015c8 +#define BNX2_EMAC_RX_STAT_AC19 0x000015cc +#define BNX2_EMAC_RX_STAT_AC20 0x000015d0 +#define BNX2_EMAC_RX_STAT_AC21 0x000015d4 +#define BNX2_EMAC_RX_STAT_AC22 0x000015d8 +#define BNX2_EMAC_RXMAC_SUC_DBG_OVERRUNVEC 0x000015dc +#define BNX2_EMAC_RX_STAT_AC_28 0x000015f4 +#define BNX2_EMAC_TX_STAT_IFHCOUTOCTETS 0x00001600 +#define BNX2_EMAC_TX_STAT_IFHCOUTBADOCTETS 0x00001604 +#define BNX2_EMAC_TX_STAT_ETHERSTATSCOLLISIONS 0x00001608 +#define BNX2_EMAC_TX_STAT_OUTXONSENT 0x0000160c +#define BNX2_EMAC_TX_STAT_OUTXOFFSENT 0x00001610 +#define BNX2_EMAC_TX_STAT_FLOWCONTROLDONE 0x00001614 +#define BNX2_EMAC_TX_STAT_DOT3STATSSINGLECOLLISIONFRAMES 0x00001618 +#define BNX2_EMAC_TX_STAT_DOT3STATSMULTIPLECOLLISIONFRAMES 0x0000161c +#define BNX2_EMAC_TX_STAT_DOT3STATSDEFERREDTRANSMISSIONS 0x00001620 +#define BNX2_EMAC_TX_STAT_DOT3STATSEXCESSIVECOLLISIONS 0x00001624 +#define BNX2_EMAC_TX_STAT_DOT3STATSLATECOLLISIONS 0x00001628 +#define BNX2_EMAC_TX_STAT_IFHCOUTUCASTPKTS 0x0000162c +#define BNX2_EMAC_TX_STAT_IFHCOUTMULTICASTPKTS 0x00001630 +#define BNX2_EMAC_TX_STAT_IFHCOUTBROADCASTPKTS 0x00001634 +#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS64OCTETS 0x00001638 +#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS65OCTETSTO127OCTETS 0x0000163c +#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS128OCTETSTO255OCTETS 0x00001640 +#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS256OCTETSTO511OCTETS 0x00001644 +#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS512OCTETSTO1023OCTETS 0x00001648 +#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS1024OCTETSTO1522OCTETS 0x0000164c +#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTSOVER1522OCTETS 0x00001650 +#define BNX2_EMAC_TX_STAT_DOT3STATSINTERNALMACTRANSMITERRORS 0x00001654 +#define BNX2_EMAC_TXMAC_DEBUG0 0x00001658 +#define BNX2_EMAC_TXMAC_DEBUG1 0x0000165c +#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE (0xfL<<0) +#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_IDLE (0x0L<<0) +#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_START0 (0x1L<<0) +#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_DATA0 (0x4L<<0) +#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_DATA1 (0x5L<<0) +#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_DATA2 (0x6L<<0) +#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_DATA3 (0x7L<<0) +#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_WAIT0 (0x8L<<0) +#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_WAIT1 (0x9L<<0) +#define BNX2_EMAC_TXMAC_DEBUG1_CRS_ENABLE (1L<<4) +#define BNX2_EMAC_TXMAC_DEBUG1_BAD_CRC (1L<<5) +#define BNX2_EMAC_TXMAC_DEBUG1_SE_COUNTER (0xfL<<6) +#define BNX2_EMAC_TXMAC_DEBUG1_SEND_PAUSE (1L<<10) +#define BNX2_EMAC_TXMAC_DEBUG1_LATE_COLLISION (1L<<11) +#define BNX2_EMAC_TXMAC_DEBUG1_MAX_DEFER (1L<<12) +#define BNX2_EMAC_TXMAC_DEBUG1_DEFERRED (1L<<13) +#define BNX2_EMAC_TXMAC_DEBUG1_ONE_BYTE (1L<<14) +#define BNX2_EMAC_TXMAC_DEBUG1_IPG_TIME (0xfL<<15) +#define BNX2_EMAC_TXMAC_DEBUG1_SLOT_TIME (0xffL<<19) + +#define BNX2_EMAC_TXMAC_DEBUG2 0x00001660 +#define BNX2_EMAC_TXMAC_DEBUG2_BACK_OFF (0x3ffL<<0) +#define BNX2_EMAC_TXMAC_DEBUG2_BYTE_COUNT (0xffffL<<10) +#define BNX2_EMAC_TXMAC_DEBUG2_COL_COUNT (0x1fL<<26) +#define BNX2_EMAC_TXMAC_DEBUG2_COL_BIT (1L<<31) + +#define BNX2_EMAC_TXMAC_DEBUG3 0x00001664 +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE (0xfL<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_IDLE (0x0L<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_PRE1 (0x1L<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_PRE2 (0x2L<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_SFD (0x3L<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_DATA (0x4L<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_CRC1 (0x5L<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_CRC2 (0x6L<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_EXT (0x7L<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_STATB (0x8L<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_STATG (0x9L<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_JAM (0xaL<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_EJAM (0xbL<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_BJAM (0xcL<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_SWAIT (0xdL<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_BACKOFF (0xeL<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE (0x7L<<4) +#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_IDLE (0x0L<<4) +#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_WAIT (0x1L<<4) +#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_UNI (0x2L<<4) +#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_MC (0x3L<<4) +#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_BC2 (0x4L<<4) +#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_BC3 (0x5L<<4) +#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_BC (0x6L<<4) +#define BNX2_EMAC_TXMAC_DEBUG3_CRS_DONE (1L<<7) +#define BNX2_EMAC_TXMAC_DEBUG3_XOFF (1L<<8) +#define BNX2_EMAC_TXMAC_DEBUG3_SE_COUNTER (0xfL<<9) +#define BNX2_EMAC_TXMAC_DEBUG3_QUANTA_COUNTER (0x1fL<<13) + +#define BNX2_EMAC_TXMAC_DEBUG4 0x00001668 +#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_COUNTER (0xffffL<<0) +#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE (0xfL<<16) +#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_IDLE (0x0L<<16) +#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_MCA1 (0x2L<<16) +#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_MCA2 (0x3L<<16) +#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_SRC3 (0x4L<<16) +#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_SRC2 (0x5L<<16) +#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_MCA3 (0x6L<<16) +#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_SRC1 (0x7L<<16) +#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_CRC1 (0x8L<<16) +#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_CRC2 (0x9L<<16) +#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_TIME (0xaL<<16) +#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_TYPE (0xcL<<16) +#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_WAIT (0xdL<<16) +#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_CMD (0xeL<<16) +#define BNX2_EMAC_TXMAC_DEBUG4_STATS0_VALID (1L<<20) +#define BNX2_EMAC_TXMAC_DEBUG4_APPEND_CRC (1L<<21) +#define BNX2_EMAC_TXMAC_DEBUG4_SLOT_FILLED (1L<<22) +#define BNX2_EMAC_TXMAC_DEBUG4_MAX_DEFER (1L<<23) +#define BNX2_EMAC_TXMAC_DEBUG4_SEND_EXTEND (1L<<24) +#define BNX2_EMAC_TXMAC_DEBUG4_SEND_PADDING (1L<<25) +#define BNX2_EMAC_TXMAC_DEBUG4_EOF_LOC (1L<<26) +#define BNX2_EMAC_TXMAC_DEBUG4_COLLIDING (1L<<27) +#define BNX2_EMAC_TXMAC_DEBUG4_COL_IN (1L<<28) +#define BNX2_EMAC_TXMAC_DEBUG4_BURSTING (1L<<29) +#define BNX2_EMAC_TXMAC_DEBUG4_ADVANCE (1L<<30) +#define BNX2_EMAC_TXMAC_DEBUG4_GO (1L<<31) + +#define BNX2_EMAC_TX_STAT_AC0 0x00001680 +#define BNX2_EMAC_TX_STAT_AC1 0x00001684 +#define BNX2_EMAC_TX_STAT_AC2 0x00001688 +#define BNX2_EMAC_TX_STAT_AC3 0x0000168c +#define BNX2_EMAC_TX_STAT_AC4 0x00001690 +#define BNX2_EMAC_TX_STAT_AC5 0x00001694 +#define BNX2_EMAC_TX_STAT_AC6 0x00001698 +#define BNX2_EMAC_TX_STAT_AC7 0x0000169c +#define BNX2_EMAC_TX_STAT_AC8 0x000016a0 +#define BNX2_EMAC_TX_STAT_AC9 0x000016a4 +#define BNX2_EMAC_TX_STAT_AC10 0x000016a8 +#define BNX2_EMAC_TX_STAT_AC11 0x000016ac +#define BNX2_EMAC_TX_STAT_AC12 0x000016b0 +#define BNX2_EMAC_TX_STAT_AC13 0x000016b4 +#define BNX2_EMAC_TX_STAT_AC14 0x000016b8 +#define BNX2_EMAC_TX_STAT_AC15 0x000016bc +#define BNX2_EMAC_TX_STAT_AC16 0x000016c0 +#define BNX2_EMAC_TX_STAT_AC17 0x000016c4 +#define BNX2_EMAC_TX_STAT_AC18 0x000016c8 +#define BNX2_EMAC_TX_STAT_AC19 0x000016cc +#define BNX2_EMAC_TX_STAT_AC20 0x000016d0 +#define BNX2_EMAC_TXMAC_SUC_DBG_OVERRUNVEC 0x000016d8 +#define BNX2_EMAC_TX_RATE_LIMIT_CTRL 0x000016fc +#define BNX2_EMAC_TX_RATE_LIMIT_CTRL_TX_THROTTLE_INC (0x7fL<<0) +#define BNX2_EMAC_TX_RATE_LIMIT_CTRL_TX_THROTTLE_NUM (0x7fL<<16) +#define BNX2_EMAC_TX_RATE_LIMIT_CTRL_RATE_LIMITER_EN (1L<<31) + + +/* + * rpm_reg definition + * offset: 0x1800 + */ +#define BNX2_RPM_COMMAND 0x00001800 +#define BNX2_RPM_COMMAND_ENABLED (1L<<0) +#define BNX2_RPM_COMMAND_OVERRUN_ABORT (1L<<4) + +#define BNX2_RPM_STATUS 0x00001804 +#define BNX2_RPM_STATUS_MBUF_WAIT (1L<<0) +#define BNX2_RPM_STATUS_FREE_WAIT (1L<<1) + +#define BNX2_RPM_CONFIG 0x00001808 +#define BNX2_RPM_CONFIG_NO_PSD_HDR_CKSUM (1L<<0) +#define BNX2_RPM_CONFIG_ACPI_ENA (1L<<1) +#define BNX2_RPM_CONFIG_ACPI_KEEP (1L<<2) +#define BNX2_RPM_CONFIG_MP_KEEP (1L<<3) +#define BNX2_RPM_CONFIG_SORT_VECT_VAL (0xfL<<4) +#define BNX2_RPM_CONFIG_DISABLE_WOL_ASSERT (1L<<30) +#define BNX2_RPM_CONFIG_IGNORE_VLAN (1L<<31) + +#define BNX2_RPM_MGMT_PKT_CTRL 0x0000180c +#define BNX2_RPM_MGMT_PKT_CTRL_MGMT_SORT (0xfL<<0) +#define BNX2_RPM_MGMT_PKT_CTRL_MGMT_RULE (0xfL<<4) +#define BNX2_RPM_MGMT_PKT_CTRL_MGMT_DISCARD_EN (1L<<30) +#define BNX2_RPM_MGMT_PKT_CTRL_MGMT_EN (1L<<31) + +#define BNX2_RPM_VLAN_MATCH0 0x00001810 +#define BNX2_RPM_VLAN_MATCH0_RPM_VLAN_MTCH0_VALUE (0xfffL<<0) + +#define BNX2_RPM_VLAN_MATCH1 0x00001814 +#define BNX2_RPM_VLAN_MATCH1_RPM_VLAN_MTCH1_VALUE (0xfffL<<0) + +#define BNX2_RPM_VLAN_MATCH2 0x00001818 +#define BNX2_RPM_VLAN_MATCH2_RPM_VLAN_MTCH2_VALUE (0xfffL<<0) + +#define BNX2_RPM_VLAN_MATCH3 0x0000181c +#define BNX2_RPM_VLAN_MATCH3_RPM_VLAN_MTCH3_VALUE (0xfffL<<0) + +#define BNX2_RPM_SORT_USER0 0x00001820 +#define BNX2_RPM_SORT_USER0_PM_EN (0xffffL<<0) +#define BNX2_RPM_SORT_USER0_BC_EN (1L<<16) +#define BNX2_RPM_SORT_USER0_MC_EN (1L<<17) +#define BNX2_RPM_SORT_USER0_MC_HSH_EN (1L<<18) +#define BNX2_RPM_SORT_USER0_PROM_EN (1L<<19) +#define BNX2_RPM_SORT_USER0_VLAN_EN (0xfL<<20) +#define BNX2_RPM_SORT_USER0_PROM_VLAN (1L<<24) +#define BNX2_RPM_SORT_USER0_VLAN_NOTMATCH (1L<<25) +#define BNX2_RPM_SORT_USER0_ENA (1L<<31) + +#define BNX2_RPM_SORT_USER1 0x00001824 +#define BNX2_RPM_SORT_USER1_PM_EN (0xffffL<<0) +#define BNX2_RPM_SORT_USER1_BC_EN (1L<<16) +#define BNX2_RPM_SORT_USER1_MC_EN (1L<<17) +#define BNX2_RPM_SORT_USER1_MC_HSH_EN (1L<<18) +#define BNX2_RPM_SORT_USER1_PROM_EN (1L<<19) +#define BNX2_RPM_SORT_USER1_VLAN_EN (0xfL<<20) +#define BNX2_RPM_SORT_USER1_PROM_VLAN (1L<<24) +#define BNX2_RPM_SORT_USER1_ENA (1L<<31) + +#define BNX2_RPM_SORT_USER2 0x00001828 +#define BNX2_RPM_SORT_USER2_PM_EN (0xffffL<<0) +#define BNX2_RPM_SORT_USER2_BC_EN (1L<<16) +#define BNX2_RPM_SORT_USER2_MC_EN (1L<<17) +#define BNX2_RPM_SORT_USER2_MC_HSH_EN (1L<<18) +#define BNX2_RPM_SORT_USER2_PROM_EN (1L<<19) +#define BNX2_RPM_SORT_USER2_VLAN_EN (0xfL<<20) +#define BNX2_RPM_SORT_USER2_PROM_VLAN (1L<<24) +#define BNX2_RPM_SORT_USER2_ENA (1L<<31) + +#define BNX2_RPM_SORT_USER3 0x0000182c +#define BNX2_RPM_SORT_USER3_PM_EN (0xffffL<<0) +#define BNX2_RPM_SORT_USER3_BC_EN (1L<<16) +#define BNX2_RPM_SORT_USER3_MC_EN (1L<<17) +#define BNX2_RPM_SORT_USER3_MC_HSH_EN (1L<<18) +#define BNX2_RPM_SORT_USER3_PROM_EN (1L<<19) +#define BNX2_RPM_SORT_USER3_VLAN_EN (0xfL<<20) +#define BNX2_RPM_SORT_USER3_PROM_VLAN (1L<<24) +#define BNX2_RPM_SORT_USER3_ENA (1L<<31) + +#define BNX2_RPM_STAT_L2_FILTER_DISCARDS 0x00001840 +#define BNX2_RPM_STAT_RULE_CHECKER_DISCARDS 0x00001844 +#define BNX2_RPM_STAT_IFINFTQDISCARDS 0x00001848 +#define BNX2_RPM_STAT_IFINMBUFDISCARD 0x0000184c +#define BNX2_RPM_STAT_RULE_CHECKER_P4_HIT 0x00001850 +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0 0x00001854 +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0_NEXT_HEADER_LEN (0xffL<<0) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0_NEXT_HEADER (0xffL<<16) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0_NEXT_HEADER_LEN_TYPE (1L<<30) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0_NEXT_HEADER_EN (1L<<31) + +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1 0x00001858 +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1_NEXT_HEADER_LEN (0xffL<<0) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1_NEXT_HEADER (0xffL<<16) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1_NEXT_HEADER_LEN_TYPE (1L<<30) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1_NEXT_HEADER_EN (1L<<31) + +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2 0x0000185c +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2_NEXT_HEADER_LEN (0xffL<<0) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2_NEXT_HEADER (0xffL<<16) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2_NEXT_HEADER_LEN_TYPE (1L<<30) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2_NEXT_HEADER_EN (1L<<31) + +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3 0x00001860 +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3_NEXT_HEADER_LEN (0xffL<<0) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3_NEXT_HEADER (0xffL<<16) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3_NEXT_HEADER_LEN_TYPE (1L<<30) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3_NEXT_HEADER_EN (1L<<31) + +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4 0x00001864 +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4_NEXT_HEADER_LEN (0xffL<<0) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4_NEXT_HEADER (0xffL<<16) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4_NEXT_HEADER_LEN_TYPE (1L<<30) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4_NEXT_HEADER_EN (1L<<31) + +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5 0x00001868 +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5_NEXT_HEADER_LEN (0xffL<<0) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5_NEXT_HEADER (0xffL<<16) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5_NEXT_HEADER_LEN_TYPE (1L<<30) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5_NEXT_HEADER_EN (1L<<31) + +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6 0x0000186c +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6_NEXT_HEADER_LEN (0xffL<<0) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6_NEXT_HEADER (0xffL<<16) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6_NEXT_HEADER_LEN_TYPE (1L<<30) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6_NEXT_HEADER_EN (1L<<31) + +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7 0x00001870 +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7_NEXT_HEADER_LEN (0xffL<<0) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7_NEXT_HEADER (0xffL<<16) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7_NEXT_HEADER_LEN_TYPE (1L<<30) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7_NEXT_HEADER_EN (1L<<31) + +#define BNX2_RPM_STAT_AC0 0x00001880 +#define BNX2_RPM_STAT_AC1 0x00001884 +#define BNX2_RPM_STAT_AC2 0x00001888 +#define BNX2_RPM_STAT_AC3 0x0000188c +#define BNX2_RPM_STAT_AC4 0x00001890 +#define BNX2_RPM_RC_CNTL_16 0x000018e0 +#define BNX2_RPM_RC_CNTL_16_OFFSET (0xffL<<0) +#define BNX2_RPM_RC_CNTL_16_CLASS (0x7L<<8) +#define BNX2_RPM_RC_CNTL_16_PRIORITY (1L<<11) +#define BNX2_RPM_RC_CNTL_16_P4 (1L<<12) +#define BNX2_RPM_RC_CNTL_16_HDR_TYPE (0x7L<<13) +#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_START (0L<<13) +#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_IP (1L<<13) +#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_TCP (2L<<13) +#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_UDP (3L<<13) +#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_DATA (4L<<13) +#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_TCP_UDP (5L<<13) +#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_ICMPV6 (6L<<13) +#define BNX2_RPM_RC_CNTL_16_COMP (0x3L<<16) +#define BNX2_RPM_RC_CNTL_16_COMP_EQUAL (0L<<16) +#define BNX2_RPM_RC_CNTL_16_COMP_NEQUAL (1L<<16) +#define BNX2_RPM_RC_CNTL_16_COMP_GREATER (2L<<16) +#define BNX2_RPM_RC_CNTL_16_COMP_LESS (3L<<16) +#define BNX2_RPM_RC_CNTL_16_MAP (1L<<18) +#define BNX2_RPM_RC_CNTL_16_SBIT (1L<<19) +#define BNX2_RPM_RC_CNTL_16_CMDSEL (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_16_DISCARD (1L<<25) +#define BNX2_RPM_RC_CNTL_16_MASK (1L<<26) +#define BNX2_RPM_RC_CNTL_16_P1 (1L<<27) +#define BNX2_RPM_RC_CNTL_16_P2 (1L<<28) +#define BNX2_RPM_RC_CNTL_16_P3 (1L<<29) +#define BNX2_RPM_RC_CNTL_16_NBIT (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_16 0x000018e4 +#define BNX2_RPM_RC_VALUE_MASK_16_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_16_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_17 0x000018e8 +#define BNX2_RPM_RC_CNTL_17_OFFSET (0xffL<<0) +#define BNX2_RPM_RC_CNTL_17_CLASS (0x7L<<8) +#define BNX2_RPM_RC_CNTL_17_PRIORITY (1L<<11) +#define BNX2_RPM_RC_CNTL_17_P4 (1L<<12) +#define BNX2_RPM_RC_CNTL_17_HDR_TYPE (0x7L<<13) +#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_START (0L<<13) +#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_IP (1L<<13) +#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_TCP (2L<<13) +#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_UDP (3L<<13) +#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_DATA (4L<<13) +#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_TCP_UDP (5L<<13) +#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_ICMPV6 (6L<<13) +#define BNX2_RPM_RC_CNTL_17_COMP (0x3L<<16) +#define BNX2_RPM_RC_CNTL_17_COMP_EQUAL (0L<<16) +#define BNX2_RPM_RC_CNTL_17_COMP_NEQUAL (1L<<16) +#define BNX2_RPM_RC_CNTL_17_COMP_GREATER (2L<<16) +#define BNX2_RPM_RC_CNTL_17_COMP_LESS (3L<<16) +#define BNX2_RPM_RC_CNTL_17_MAP (1L<<18) +#define BNX2_RPM_RC_CNTL_17_SBIT (1L<<19) +#define BNX2_RPM_RC_CNTL_17_CMDSEL (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_17_DISCARD (1L<<25) +#define BNX2_RPM_RC_CNTL_17_MASK (1L<<26) +#define BNX2_RPM_RC_CNTL_17_P1 (1L<<27) +#define BNX2_RPM_RC_CNTL_17_P2 (1L<<28) +#define BNX2_RPM_RC_CNTL_17_P3 (1L<<29) +#define BNX2_RPM_RC_CNTL_17_NBIT (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_17 0x000018ec +#define BNX2_RPM_RC_VALUE_MASK_17_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_17_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_18 0x000018f0 +#define BNX2_RPM_RC_CNTL_18_OFFSET (0xffL<<0) +#define BNX2_RPM_RC_CNTL_18_CLASS (0x7L<<8) +#define BNX2_RPM_RC_CNTL_18_PRIORITY (1L<<11) +#define BNX2_RPM_RC_CNTL_18_P4 (1L<<12) +#define BNX2_RPM_RC_CNTL_18_HDR_TYPE (0x7L<<13) +#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_START (0L<<13) +#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_IP (1L<<13) +#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_TCP (2L<<13) +#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_UDP (3L<<13) +#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_DATA (4L<<13) +#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_TCP_UDP (5L<<13) +#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_ICMPV6 (6L<<13) +#define BNX2_RPM_RC_CNTL_18_COMP (0x3L<<16) +#define BNX2_RPM_RC_CNTL_18_COMP_EQUAL (0L<<16) +#define BNX2_RPM_RC_CNTL_18_COMP_NEQUAL (1L<<16) +#define BNX2_RPM_RC_CNTL_18_COMP_GREATER (2L<<16) +#define BNX2_RPM_RC_CNTL_18_COMP_LESS (3L<<16) +#define BNX2_RPM_RC_CNTL_18_MAP (1L<<18) +#define BNX2_RPM_RC_CNTL_18_SBIT (1L<<19) +#define BNX2_RPM_RC_CNTL_18_CMDSEL (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_18_DISCARD (1L<<25) +#define BNX2_RPM_RC_CNTL_18_MASK (1L<<26) +#define BNX2_RPM_RC_CNTL_18_P1 (1L<<27) +#define BNX2_RPM_RC_CNTL_18_P2 (1L<<28) +#define BNX2_RPM_RC_CNTL_18_P3 (1L<<29) +#define BNX2_RPM_RC_CNTL_18_NBIT (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_18 0x000018f4 +#define BNX2_RPM_RC_VALUE_MASK_18_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_18_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_19 0x000018f8 +#define BNX2_RPM_RC_CNTL_19_OFFSET (0xffL<<0) +#define BNX2_RPM_RC_CNTL_19_CLASS (0x7L<<8) +#define BNX2_RPM_RC_CNTL_19_PRIORITY (1L<<11) +#define BNX2_RPM_RC_CNTL_19_P4 (1L<<12) +#define BNX2_RPM_RC_CNTL_19_HDR_TYPE (0x7L<<13) +#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_START (0L<<13) +#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_IP (1L<<13) +#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_TCP (2L<<13) +#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_UDP (3L<<13) +#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_DATA (4L<<13) +#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_TCP_UDP (5L<<13) +#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_ICMPV6 (6L<<13) +#define BNX2_RPM_RC_CNTL_19_COMP (0x3L<<16) +#define BNX2_RPM_RC_CNTL_19_COMP_EQUAL (0L<<16) +#define BNX2_RPM_RC_CNTL_19_COMP_NEQUAL (1L<<16) +#define BNX2_RPM_RC_CNTL_19_COMP_GREATER (2L<<16) +#define BNX2_RPM_RC_CNTL_19_COMP_LESS (3L<<16) +#define BNX2_RPM_RC_CNTL_19_MAP (1L<<18) +#define BNX2_RPM_RC_CNTL_19_SBIT (1L<<19) +#define BNX2_RPM_RC_CNTL_19_CMDSEL (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_19_DISCARD (1L<<25) +#define BNX2_RPM_RC_CNTL_19_MASK (1L<<26) +#define BNX2_RPM_RC_CNTL_19_P1 (1L<<27) +#define BNX2_RPM_RC_CNTL_19_P2 (1L<<28) +#define BNX2_RPM_RC_CNTL_19_P3 (1L<<29) +#define BNX2_RPM_RC_CNTL_19_NBIT (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_19 0x000018fc +#define BNX2_RPM_RC_VALUE_MASK_19_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_19_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_0 0x00001900 +#define BNX2_RPM_RC_CNTL_0_OFFSET (0xffL<<0) +#define BNX2_RPM_RC_CNTL_0_CLASS (0x7L<<8) +#define BNX2_RPM_RC_CNTL_0_PRIORITY (1L<<11) +#define BNX2_RPM_RC_CNTL_0_P4 (1L<<12) +#define BNX2_RPM_RC_CNTL_0_HDR_TYPE (0x7L<<13) +#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_START (0L<<13) +#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_IP (1L<<13) +#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_TCP (2L<<13) +#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_UDP (3L<<13) +#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_DATA (4L<<13) +#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_TCP_UDP (5L<<13) +#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_ICMPV6 (6L<<13) +#define BNX2_RPM_RC_CNTL_0_COMP (0x3L<<16) +#define BNX2_RPM_RC_CNTL_0_COMP_EQUAL (0L<<16) +#define BNX2_RPM_RC_CNTL_0_COMP_NEQUAL (1L<<16) +#define BNX2_RPM_RC_CNTL_0_COMP_GREATER (2L<<16) +#define BNX2_RPM_RC_CNTL_0_COMP_LESS (3L<<16) +#define BNX2_RPM_RC_CNTL_0_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_0_SBIT (1L<<19) +#define BNX2_RPM_RC_CNTL_0_CMDSEL (0xfL<<20) +#define BNX2_RPM_RC_CNTL_0_MAP (1L<<24) +#define BNX2_RPM_RC_CNTL_0_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_0_DISCARD (1L<<25) +#define BNX2_RPM_RC_CNTL_0_MASK (1L<<26) +#define BNX2_RPM_RC_CNTL_0_P1 (1L<<27) +#define BNX2_RPM_RC_CNTL_0_P2 (1L<<28) +#define BNX2_RPM_RC_CNTL_0_P3 (1L<<29) +#define BNX2_RPM_RC_CNTL_0_NBIT (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_0 0x00001904 +#define BNX2_RPM_RC_VALUE_MASK_0_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_0_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_1 0x00001908 +#define BNX2_RPM_RC_CNTL_1_A (0x3ffffL<<0) +#define BNX2_RPM_RC_CNTL_1_B (0xfffL<<19) +#define BNX2_RPM_RC_CNTL_1_OFFSET_XI (0xffL<<0) +#define BNX2_RPM_RC_CNTL_1_CLASS_XI (0x7L<<8) +#define BNX2_RPM_RC_CNTL_1_PRIORITY_XI (1L<<11) +#define BNX2_RPM_RC_CNTL_1_P4_XI (1L<<12) +#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_XI (0x7L<<13) +#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_START_XI (0L<<13) +#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_IP_XI (1L<<13) +#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_TCP_XI (2L<<13) +#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_UDP_XI (3L<<13) +#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_DATA_XI (4L<<13) +#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_TCP_UDP_XI (5L<<13) +#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_ICMPV6_XI (6L<<13) +#define BNX2_RPM_RC_CNTL_1_COMP_XI (0x3L<<16) +#define BNX2_RPM_RC_CNTL_1_COMP_EQUAL_XI (0L<<16) +#define BNX2_RPM_RC_CNTL_1_COMP_NEQUAL_XI (1L<<16) +#define BNX2_RPM_RC_CNTL_1_COMP_GREATER_XI (2L<<16) +#define BNX2_RPM_RC_CNTL_1_COMP_LESS_XI (3L<<16) +#define BNX2_RPM_RC_CNTL_1_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_1_SBIT_XI (1L<<19) +#define BNX2_RPM_RC_CNTL_1_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_1_DISCARD_XI (1L<<25) +#define BNX2_RPM_RC_CNTL_1_MASK_XI (1L<<26) +#define BNX2_RPM_RC_CNTL_1_P1_XI (1L<<27) +#define BNX2_RPM_RC_CNTL_1_P2_XI (1L<<28) +#define BNX2_RPM_RC_CNTL_1_P3_XI (1L<<29) +#define BNX2_RPM_RC_CNTL_1_NBIT_XI (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_1 0x0000190c +#define BNX2_RPM_RC_VALUE_MASK_1_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_1_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_2 0x00001910 +#define BNX2_RPM_RC_CNTL_2_A (0x3ffffL<<0) +#define BNX2_RPM_RC_CNTL_2_B (0xfffL<<19) +#define BNX2_RPM_RC_CNTL_2_OFFSET_XI (0xffL<<0) +#define BNX2_RPM_RC_CNTL_2_CLASS_XI (0x7L<<8) +#define BNX2_RPM_RC_CNTL_2_PRIORITY_XI (1L<<11) +#define BNX2_RPM_RC_CNTL_2_P4_XI (1L<<12) +#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_XI (0x7L<<13) +#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_START_XI (0L<<13) +#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_IP_XI (1L<<13) +#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_TCP_XI (2L<<13) +#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_UDP_XI (3L<<13) +#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_DATA_XI (4L<<13) +#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_TCP_UDP_XI (5L<<13) +#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_ICMPV6_XI (6L<<13) +#define BNX2_RPM_RC_CNTL_2_COMP_XI (0x3L<<16) +#define BNX2_RPM_RC_CNTL_2_COMP_EQUAL_XI (0L<<16) +#define BNX2_RPM_RC_CNTL_2_COMP_NEQUAL_XI (1L<<16) +#define BNX2_RPM_RC_CNTL_2_COMP_GREATER_XI (2L<<16) +#define BNX2_RPM_RC_CNTL_2_COMP_LESS_XI (3L<<16) +#define BNX2_RPM_RC_CNTL_2_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_2_SBIT_XI (1L<<19) +#define BNX2_RPM_RC_CNTL_2_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_2_DISCARD_XI (1L<<25) +#define BNX2_RPM_RC_CNTL_2_MASK_XI (1L<<26) +#define BNX2_RPM_RC_CNTL_2_P1_XI (1L<<27) +#define BNX2_RPM_RC_CNTL_2_P2_XI (1L<<28) +#define BNX2_RPM_RC_CNTL_2_P3_XI (1L<<29) +#define BNX2_RPM_RC_CNTL_2_NBIT_XI (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_2 0x00001914 +#define BNX2_RPM_RC_VALUE_MASK_2_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_2_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_3 0x00001918 +#define BNX2_RPM_RC_CNTL_3_A (0x3ffffL<<0) +#define BNX2_RPM_RC_CNTL_3_B (0xfffL<<19) +#define BNX2_RPM_RC_CNTL_3_OFFSET_XI (0xffL<<0) +#define BNX2_RPM_RC_CNTL_3_CLASS_XI (0x7L<<8) +#define BNX2_RPM_RC_CNTL_3_PRIORITY_XI (1L<<11) +#define BNX2_RPM_RC_CNTL_3_P4_XI (1L<<12) +#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_XI (0x7L<<13) +#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_START_XI (0L<<13) +#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_IP_XI (1L<<13) +#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_TCP_XI (2L<<13) +#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_UDP_XI (3L<<13) +#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_DATA_XI (4L<<13) +#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_TCP_UDP_XI (5L<<13) +#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_ICMPV6_XI (6L<<13) +#define BNX2_RPM_RC_CNTL_3_COMP_XI (0x3L<<16) +#define BNX2_RPM_RC_CNTL_3_COMP_EQUAL_XI (0L<<16) +#define BNX2_RPM_RC_CNTL_3_COMP_NEQUAL_XI (1L<<16) +#define BNX2_RPM_RC_CNTL_3_COMP_GREATER_XI (2L<<16) +#define BNX2_RPM_RC_CNTL_3_COMP_LESS_XI (3L<<16) +#define BNX2_RPM_RC_CNTL_3_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_3_SBIT_XI (1L<<19) +#define BNX2_RPM_RC_CNTL_3_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_3_DISCARD_XI (1L<<25) +#define BNX2_RPM_RC_CNTL_3_MASK_XI (1L<<26) +#define BNX2_RPM_RC_CNTL_3_P1_XI (1L<<27) +#define BNX2_RPM_RC_CNTL_3_P2_XI (1L<<28) +#define BNX2_RPM_RC_CNTL_3_P3_XI (1L<<29) +#define BNX2_RPM_RC_CNTL_3_NBIT_XI (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_3 0x0000191c +#define BNX2_RPM_RC_VALUE_MASK_3_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_3_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_4 0x00001920 +#define BNX2_RPM_RC_CNTL_4_A (0x3ffffL<<0) +#define BNX2_RPM_RC_CNTL_4_B (0xfffL<<19) +#define BNX2_RPM_RC_CNTL_4_OFFSET_XI (0xffL<<0) +#define BNX2_RPM_RC_CNTL_4_CLASS_XI (0x7L<<8) +#define BNX2_RPM_RC_CNTL_4_PRIORITY_XI (1L<<11) +#define BNX2_RPM_RC_CNTL_4_P4_XI (1L<<12) +#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_XI (0x7L<<13) +#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_START_XI (0L<<13) +#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_IP_XI (1L<<13) +#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_TCP_XI (2L<<13) +#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_UDP_XI (3L<<13) +#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_DATA_XI (4L<<13) +#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_TCP_UDP_XI (5L<<13) +#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_ICMPV6_XI (6L<<13) +#define BNX2_RPM_RC_CNTL_4_COMP_XI (0x3L<<16) +#define BNX2_RPM_RC_CNTL_4_COMP_EQUAL_XI (0L<<16) +#define BNX2_RPM_RC_CNTL_4_COMP_NEQUAL_XI (1L<<16) +#define BNX2_RPM_RC_CNTL_4_COMP_GREATER_XI (2L<<16) +#define BNX2_RPM_RC_CNTL_4_COMP_LESS_XI (3L<<16) +#define BNX2_RPM_RC_CNTL_4_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_4_SBIT_XI (1L<<19) +#define BNX2_RPM_RC_CNTL_4_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_4_DISCARD_XI (1L<<25) +#define BNX2_RPM_RC_CNTL_4_MASK_XI (1L<<26) +#define BNX2_RPM_RC_CNTL_4_P1_XI (1L<<27) +#define BNX2_RPM_RC_CNTL_4_P2_XI (1L<<28) +#define BNX2_RPM_RC_CNTL_4_P3_XI (1L<<29) +#define BNX2_RPM_RC_CNTL_4_NBIT_XI (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_4 0x00001924 +#define BNX2_RPM_RC_VALUE_MASK_4_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_4_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_5 0x00001928 +#define BNX2_RPM_RC_CNTL_5_A (0x3ffffL<<0) +#define BNX2_RPM_RC_CNTL_5_B (0xfffL<<19) +#define BNX2_RPM_RC_CNTL_5_OFFSET_XI (0xffL<<0) +#define BNX2_RPM_RC_CNTL_5_CLASS_XI (0x7L<<8) +#define BNX2_RPM_RC_CNTL_5_PRIORITY_XI (1L<<11) +#define BNX2_RPM_RC_CNTL_5_P4_XI (1L<<12) +#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_XI (0x7L<<13) +#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_START_XI (0L<<13) +#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_IP_XI (1L<<13) +#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_TCP_XI (2L<<13) +#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_UDP_XI (3L<<13) +#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_DATA_XI (4L<<13) +#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_TCP_UDP_XI (5L<<13) +#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_ICMPV6_XI (6L<<13) +#define BNX2_RPM_RC_CNTL_5_COMP_XI (0x3L<<16) +#define BNX2_RPM_RC_CNTL_5_COMP_EQUAL_XI (0L<<16) +#define BNX2_RPM_RC_CNTL_5_COMP_NEQUAL_XI (1L<<16) +#define BNX2_RPM_RC_CNTL_5_COMP_GREATER_XI (2L<<16) +#define BNX2_RPM_RC_CNTL_5_COMP_LESS_XI (3L<<16) +#define BNX2_RPM_RC_CNTL_5_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_5_SBIT_XI (1L<<19) +#define BNX2_RPM_RC_CNTL_5_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_5_DISCARD_XI (1L<<25) +#define BNX2_RPM_RC_CNTL_5_MASK_XI (1L<<26) +#define BNX2_RPM_RC_CNTL_5_P1_XI (1L<<27) +#define BNX2_RPM_RC_CNTL_5_P2_XI (1L<<28) +#define BNX2_RPM_RC_CNTL_5_P3_XI (1L<<29) +#define BNX2_RPM_RC_CNTL_5_NBIT_XI (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_5 0x0000192c +#define BNX2_RPM_RC_VALUE_MASK_5_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_5_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_6 0x00001930 +#define BNX2_RPM_RC_CNTL_6_A (0x3ffffL<<0) +#define BNX2_RPM_RC_CNTL_6_B (0xfffL<<19) +#define BNX2_RPM_RC_CNTL_6_OFFSET_XI (0xffL<<0) +#define BNX2_RPM_RC_CNTL_6_CLASS_XI (0x7L<<8) +#define BNX2_RPM_RC_CNTL_6_PRIORITY_XI (1L<<11) +#define BNX2_RPM_RC_CNTL_6_P4_XI (1L<<12) +#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_XI (0x7L<<13) +#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_START_XI (0L<<13) +#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_IP_XI (1L<<13) +#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_TCP_XI (2L<<13) +#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_UDP_XI (3L<<13) +#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_DATA_XI (4L<<13) +#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_TCP_UDP_XI (5L<<13) +#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_ICMPV6_XI (6L<<13) +#define BNX2_RPM_RC_CNTL_6_COMP_XI (0x3L<<16) +#define BNX2_RPM_RC_CNTL_6_COMP_EQUAL_XI (0L<<16) +#define BNX2_RPM_RC_CNTL_6_COMP_NEQUAL_XI (1L<<16) +#define BNX2_RPM_RC_CNTL_6_COMP_GREATER_XI (2L<<16) +#define BNX2_RPM_RC_CNTL_6_COMP_LESS_XI (3L<<16) +#define BNX2_RPM_RC_CNTL_6_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_6_SBIT_XI (1L<<19) +#define BNX2_RPM_RC_CNTL_6_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_6_DISCARD_XI (1L<<25) +#define BNX2_RPM_RC_CNTL_6_MASK_XI (1L<<26) +#define BNX2_RPM_RC_CNTL_6_P1_XI (1L<<27) +#define BNX2_RPM_RC_CNTL_6_P2_XI (1L<<28) +#define BNX2_RPM_RC_CNTL_6_P3_XI (1L<<29) +#define BNX2_RPM_RC_CNTL_6_NBIT_XI (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_6 0x00001934 +#define BNX2_RPM_RC_VALUE_MASK_6_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_6_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_7 0x00001938 +#define BNX2_RPM_RC_CNTL_7_A (0x3ffffL<<0) +#define BNX2_RPM_RC_CNTL_7_B (0xfffL<<19) +#define BNX2_RPM_RC_CNTL_7_OFFSET_XI (0xffL<<0) +#define BNX2_RPM_RC_CNTL_7_CLASS_XI (0x7L<<8) +#define BNX2_RPM_RC_CNTL_7_PRIORITY_XI (1L<<11) +#define BNX2_RPM_RC_CNTL_7_P4_XI (1L<<12) +#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_XI (0x7L<<13) +#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_START_XI (0L<<13) +#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_IP_XI (1L<<13) +#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_TCP_XI (2L<<13) +#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_UDP_XI (3L<<13) +#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_DATA_XI (4L<<13) +#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_TCP_UDP_XI (5L<<13) +#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_ICMPV6_XI (6L<<13) +#define BNX2_RPM_RC_CNTL_7_COMP_XI (0x3L<<16) +#define BNX2_RPM_RC_CNTL_7_COMP_EQUAL_XI (0L<<16) +#define BNX2_RPM_RC_CNTL_7_COMP_NEQUAL_XI (1L<<16) +#define BNX2_RPM_RC_CNTL_7_COMP_GREATER_XI (2L<<16) +#define BNX2_RPM_RC_CNTL_7_COMP_LESS_XI (3L<<16) +#define BNX2_RPM_RC_CNTL_7_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_7_SBIT_XI (1L<<19) +#define BNX2_RPM_RC_CNTL_7_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_7_DISCARD_XI (1L<<25) +#define BNX2_RPM_RC_CNTL_7_MASK_XI (1L<<26) +#define BNX2_RPM_RC_CNTL_7_P1_XI (1L<<27) +#define BNX2_RPM_RC_CNTL_7_P2_XI (1L<<28) +#define BNX2_RPM_RC_CNTL_7_P3_XI (1L<<29) +#define BNX2_RPM_RC_CNTL_7_NBIT_XI (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_7 0x0000193c +#define BNX2_RPM_RC_VALUE_MASK_7_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_7_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_8 0x00001940 +#define BNX2_RPM_RC_CNTL_8_A (0x3ffffL<<0) +#define BNX2_RPM_RC_CNTL_8_B (0xfffL<<19) +#define BNX2_RPM_RC_CNTL_8_OFFSET_XI (0xffL<<0) +#define BNX2_RPM_RC_CNTL_8_CLASS_XI (0x7L<<8) +#define BNX2_RPM_RC_CNTL_8_PRIORITY_XI (1L<<11) +#define BNX2_RPM_RC_CNTL_8_P4_XI (1L<<12) +#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_XI (0x7L<<13) +#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_START_XI (0L<<13) +#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_IP_XI (1L<<13) +#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_TCP_XI (2L<<13) +#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_UDP_XI (3L<<13) +#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_DATA_XI (4L<<13) +#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_TCP_UDP_XI (5L<<13) +#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_ICMPV6_XI (6L<<13) +#define BNX2_RPM_RC_CNTL_8_COMP_XI (0x3L<<16) +#define BNX2_RPM_RC_CNTL_8_COMP_EQUAL_XI (0L<<16) +#define BNX2_RPM_RC_CNTL_8_COMP_NEQUAL_XI (1L<<16) +#define BNX2_RPM_RC_CNTL_8_COMP_GREATER_XI (2L<<16) +#define BNX2_RPM_RC_CNTL_8_COMP_LESS_XI (3L<<16) +#define BNX2_RPM_RC_CNTL_8_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_8_SBIT_XI (1L<<19) +#define BNX2_RPM_RC_CNTL_8_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_8_DISCARD_XI (1L<<25) +#define BNX2_RPM_RC_CNTL_8_MASK_XI (1L<<26) +#define BNX2_RPM_RC_CNTL_8_P1_XI (1L<<27) +#define BNX2_RPM_RC_CNTL_8_P2_XI (1L<<28) +#define BNX2_RPM_RC_CNTL_8_P3_XI (1L<<29) +#define BNX2_RPM_RC_CNTL_8_NBIT_XI (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_8 0x00001944 +#define BNX2_RPM_RC_VALUE_MASK_8_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_8_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_9 0x00001948 +#define BNX2_RPM_RC_CNTL_9_A (0x3ffffL<<0) +#define BNX2_RPM_RC_CNTL_9_B (0xfffL<<19) +#define BNX2_RPM_RC_CNTL_9_OFFSET_XI (0xffL<<0) +#define BNX2_RPM_RC_CNTL_9_CLASS_XI (0x7L<<8) +#define BNX2_RPM_RC_CNTL_9_PRIORITY_XI (1L<<11) +#define BNX2_RPM_RC_CNTL_9_P4_XI (1L<<12) +#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_XI (0x7L<<13) +#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_START_XI (0L<<13) +#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_IP_XI (1L<<13) +#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_TCP_XI (2L<<13) +#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_UDP_XI (3L<<13) +#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_DATA_XI (4L<<13) +#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_TCP_UDP_XI (5L<<13) +#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_ICMPV6_XI (6L<<13) +#define BNX2_RPM_RC_CNTL_9_COMP_XI (0x3L<<16) +#define BNX2_RPM_RC_CNTL_9_COMP_EQUAL_XI (0L<<16) +#define BNX2_RPM_RC_CNTL_9_COMP_NEQUAL_XI (1L<<16) +#define BNX2_RPM_RC_CNTL_9_COMP_GREATER_XI (2L<<16) +#define BNX2_RPM_RC_CNTL_9_COMP_LESS_XI (3L<<16) +#define BNX2_RPM_RC_CNTL_9_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_9_SBIT_XI (1L<<19) +#define BNX2_RPM_RC_CNTL_9_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_9_DISCARD_XI (1L<<25) +#define BNX2_RPM_RC_CNTL_9_MASK_XI (1L<<26) +#define BNX2_RPM_RC_CNTL_9_P1_XI (1L<<27) +#define BNX2_RPM_RC_CNTL_9_P2_XI (1L<<28) +#define BNX2_RPM_RC_CNTL_9_P3_XI (1L<<29) +#define BNX2_RPM_RC_CNTL_9_NBIT_XI (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_9 0x0000194c +#define BNX2_RPM_RC_VALUE_MASK_9_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_9_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_10 0x00001950 +#define BNX2_RPM_RC_CNTL_10_A (0x3ffffL<<0) +#define BNX2_RPM_RC_CNTL_10_B (0xfffL<<19) +#define BNX2_RPM_RC_CNTL_10_OFFSET_XI (0xffL<<0) +#define BNX2_RPM_RC_CNTL_10_CLASS_XI (0x7L<<8) +#define BNX2_RPM_RC_CNTL_10_PRIORITY_XI (1L<<11) +#define BNX2_RPM_RC_CNTL_10_P4_XI (1L<<12) +#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_XI (0x7L<<13) +#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_START_XI (0L<<13) +#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_IP_XI (1L<<13) +#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_TCP_XI (2L<<13) +#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_UDP_XI (3L<<13) +#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_DATA_XI (4L<<13) +#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_TCP_UDP_XI (5L<<13) +#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_ICMPV6_XI (6L<<13) +#define BNX2_RPM_RC_CNTL_10_COMP_XI (0x3L<<16) +#define BNX2_RPM_RC_CNTL_10_COMP_EQUAL_XI (0L<<16) +#define BNX2_RPM_RC_CNTL_10_COMP_NEQUAL_XI (1L<<16) +#define BNX2_RPM_RC_CNTL_10_COMP_GREATER_XI (2L<<16) +#define BNX2_RPM_RC_CNTL_10_COMP_LESS_XI (3L<<16) +#define BNX2_RPM_RC_CNTL_10_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_10_SBIT_XI (1L<<19) +#define BNX2_RPM_RC_CNTL_10_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_10_DISCARD_XI (1L<<25) +#define BNX2_RPM_RC_CNTL_10_MASK_XI (1L<<26) +#define BNX2_RPM_RC_CNTL_10_P1_XI (1L<<27) +#define BNX2_RPM_RC_CNTL_10_P2_XI (1L<<28) +#define BNX2_RPM_RC_CNTL_10_P3_XI (1L<<29) +#define BNX2_RPM_RC_CNTL_10_NBIT_XI (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_10 0x00001954 +#define BNX2_RPM_RC_VALUE_MASK_10_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_10_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_11 0x00001958 +#define BNX2_RPM_RC_CNTL_11_A (0x3ffffL<<0) +#define BNX2_RPM_RC_CNTL_11_B (0xfffL<<19) +#define BNX2_RPM_RC_CNTL_11_OFFSET_XI (0xffL<<0) +#define BNX2_RPM_RC_CNTL_11_CLASS_XI (0x7L<<8) +#define BNX2_RPM_RC_CNTL_11_PRIORITY_XI (1L<<11) +#define BNX2_RPM_RC_CNTL_11_P4_XI (1L<<12) +#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_XI (0x7L<<13) +#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_START_XI (0L<<13) +#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_IP_XI (1L<<13) +#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_TCP_XI (2L<<13) +#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_UDP_XI (3L<<13) +#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_DATA_XI (4L<<13) +#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_TCP_UDP_XI (5L<<13) +#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_ICMPV6_XI (6L<<13) +#define BNX2_RPM_RC_CNTL_11_COMP_XI (0x3L<<16) +#define BNX2_RPM_RC_CNTL_11_COMP_EQUAL_XI (0L<<16) +#define BNX2_RPM_RC_CNTL_11_COMP_NEQUAL_XI (1L<<16) +#define BNX2_RPM_RC_CNTL_11_COMP_GREATER_XI (2L<<16) +#define BNX2_RPM_RC_CNTL_11_COMP_LESS_XI (3L<<16) +#define BNX2_RPM_RC_CNTL_11_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_11_SBIT_XI (1L<<19) +#define BNX2_RPM_RC_CNTL_11_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_11_DISCARD_XI (1L<<25) +#define BNX2_RPM_RC_CNTL_11_MASK_XI (1L<<26) +#define BNX2_RPM_RC_CNTL_11_P1_XI (1L<<27) +#define BNX2_RPM_RC_CNTL_11_P2_XI (1L<<28) +#define BNX2_RPM_RC_CNTL_11_P3_XI (1L<<29) +#define BNX2_RPM_RC_CNTL_11_NBIT_XI (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_11 0x0000195c +#define BNX2_RPM_RC_VALUE_MASK_11_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_11_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_12 0x00001960 +#define BNX2_RPM_RC_CNTL_12_A (0x3ffffL<<0) +#define BNX2_RPM_RC_CNTL_12_B (0xfffL<<19) +#define BNX2_RPM_RC_CNTL_12_OFFSET_XI (0xffL<<0) +#define BNX2_RPM_RC_CNTL_12_CLASS_XI (0x7L<<8) +#define BNX2_RPM_RC_CNTL_12_PRIORITY_XI (1L<<11) +#define BNX2_RPM_RC_CNTL_12_P4_XI (1L<<12) +#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_XI (0x7L<<13) +#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_START_XI (0L<<13) +#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_IP_XI (1L<<13) +#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_TCP_XI (2L<<13) +#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_UDP_XI (3L<<13) +#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_DATA_XI (4L<<13) +#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_TCP_UDP_XI (5L<<13) +#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_ICMPV6_XI (6L<<13) +#define BNX2_RPM_RC_CNTL_12_COMP_XI (0x3L<<16) +#define BNX2_RPM_RC_CNTL_12_COMP_EQUAL_XI (0L<<16) +#define BNX2_RPM_RC_CNTL_12_COMP_NEQUAL_XI (1L<<16) +#define BNX2_RPM_RC_CNTL_12_COMP_GREATER_XI (2L<<16) +#define BNX2_RPM_RC_CNTL_12_COMP_LESS_XI (3L<<16) +#define BNX2_RPM_RC_CNTL_12_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_12_SBIT_XI (1L<<19) +#define BNX2_RPM_RC_CNTL_12_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_12_DISCARD_XI (1L<<25) +#define BNX2_RPM_RC_CNTL_12_MASK_XI (1L<<26) +#define BNX2_RPM_RC_CNTL_12_P1_XI (1L<<27) +#define BNX2_RPM_RC_CNTL_12_P2_XI (1L<<28) +#define BNX2_RPM_RC_CNTL_12_P3_XI (1L<<29) +#define BNX2_RPM_RC_CNTL_12_NBIT_XI (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_12 0x00001964 +#define BNX2_RPM_RC_VALUE_MASK_12_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_12_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_13 0x00001968 +#define BNX2_RPM_RC_CNTL_13_A (0x3ffffL<<0) +#define BNX2_RPM_RC_CNTL_13_B (0xfffL<<19) +#define BNX2_RPM_RC_CNTL_13_OFFSET_XI (0xffL<<0) +#define BNX2_RPM_RC_CNTL_13_CLASS_XI (0x7L<<8) +#define BNX2_RPM_RC_CNTL_13_PRIORITY_XI (1L<<11) +#define BNX2_RPM_RC_CNTL_13_P4_XI (1L<<12) +#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_XI (0x7L<<13) +#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_START_XI (0L<<13) +#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_IP_XI (1L<<13) +#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_TCP_XI (2L<<13) +#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_UDP_XI (3L<<13) +#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_DATA_XI (4L<<13) +#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_TCP_UDP_XI (5L<<13) +#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_ICMPV6_XI (6L<<13) +#define BNX2_RPM_RC_CNTL_13_COMP_XI (0x3L<<16) +#define BNX2_RPM_RC_CNTL_13_COMP_EQUAL_XI (0L<<16) +#define BNX2_RPM_RC_CNTL_13_COMP_NEQUAL_XI (1L<<16) +#define BNX2_RPM_RC_CNTL_13_COMP_GREATER_XI (2L<<16) +#define BNX2_RPM_RC_CNTL_13_COMP_LESS_XI (3L<<16) +#define BNX2_RPM_RC_CNTL_13_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_13_SBIT_XI (1L<<19) +#define BNX2_RPM_RC_CNTL_13_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_13_DISCARD_XI (1L<<25) +#define BNX2_RPM_RC_CNTL_13_MASK_XI (1L<<26) +#define BNX2_RPM_RC_CNTL_13_P1_XI (1L<<27) +#define BNX2_RPM_RC_CNTL_13_P2_XI (1L<<28) +#define BNX2_RPM_RC_CNTL_13_P3_XI (1L<<29) +#define BNX2_RPM_RC_CNTL_13_NBIT_XI (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_13 0x0000196c +#define BNX2_RPM_RC_VALUE_MASK_13_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_13_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_14 0x00001970 +#define BNX2_RPM_RC_CNTL_14_A (0x3ffffL<<0) +#define BNX2_RPM_RC_CNTL_14_B (0xfffL<<19) +#define BNX2_RPM_RC_CNTL_14_OFFSET_XI (0xffL<<0) +#define BNX2_RPM_RC_CNTL_14_CLASS_XI (0x7L<<8) +#define BNX2_RPM_RC_CNTL_14_PRIORITY_XI (1L<<11) +#define BNX2_RPM_RC_CNTL_14_P4_XI (1L<<12) +#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_XI (0x7L<<13) +#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_START_XI (0L<<13) +#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_IP_XI (1L<<13) +#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_TCP_XI (2L<<13) +#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_UDP_XI (3L<<13) +#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_DATA_XI (4L<<13) +#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_TCP_UDP_XI (5L<<13) +#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_ICMPV6_XI (6L<<13) +#define BNX2_RPM_RC_CNTL_14_COMP_XI (0x3L<<16) +#define BNX2_RPM_RC_CNTL_14_COMP_EQUAL_XI (0L<<16) +#define BNX2_RPM_RC_CNTL_14_COMP_NEQUAL_XI (1L<<16) +#define BNX2_RPM_RC_CNTL_14_COMP_GREATER_XI (2L<<16) +#define BNX2_RPM_RC_CNTL_14_COMP_LESS_XI (3L<<16) +#define BNX2_RPM_RC_CNTL_14_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_14_SBIT_XI (1L<<19) +#define BNX2_RPM_RC_CNTL_14_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_14_DISCARD_XI (1L<<25) +#define BNX2_RPM_RC_CNTL_14_MASK_XI (1L<<26) +#define BNX2_RPM_RC_CNTL_14_P1_XI (1L<<27) +#define BNX2_RPM_RC_CNTL_14_P2_XI (1L<<28) +#define BNX2_RPM_RC_CNTL_14_P3_XI (1L<<29) +#define BNX2_RPM_RC_CNTL_14_NBIT_XI (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_14 0x00001974 +#define BNX2_RPM_RC_VALUE_MASK_14_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_14_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_15 0x00001978 +#define BNX2_RPM_RC_CNTL_15_A (0x3ffffL<<0) +#define BNX2_RPM_RC_CNTL_15_B (0xfffL<<19) +#define BNX2_RPM_RC_CNTL_15_OFFSET_XI (0xffL<<0) +#define BNX2_RPM_RC_CNTL_15_CLASS_XI (0x7L<<8) +#define BNX2_RPM_RC_CNTL_15_PRIORITY_XI (1L<<11) +#define BNX2_RPM_RC_CNTL_15_P4_XI (1L<<12) +#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_XI (0x7L<<13) +#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_START_XI (0L<<13) +#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_IP_XI (1L<<13) +#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_TCP_XI (2L<<13) +#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_UDP_XI (3L<<13) +#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_DATA_XI (4L<<13) +#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_TCP_UDP_XI (5L<<13) +#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_ICMPV6_XI (6L<<13) +#define BNX2_RPM_RC_CNTL_15_COMP_XI (0x3L<<16) +#define BNX2_RPM_RC_CNTL_15_COMP_EQUAL_XI (0L<<16) +#define BNX2_RPM_RC_CNTL_15_COMP_NEQUAL_XI (1L<<16) +#define BNX2_RPM_RC_CNTL_15_COMP_GREATER_XI (2L<<16) +#define BNX2_RPM_RC_CNTL_15_COMP_LESS_XI (3L<<16) +#define BNX2_RPM_RC_CNTL_15_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_15_SBIT_XI (1L<<19) +#define BNX2_RPM_RC_CNTL_15_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_15_DISCARD_XI (1L<<25) +#define BNX2_RPM_RC_CNTL_15_MASK_XI (1L<<26) +#define BNX2_RPM_RC_CNTL_15_P1_XI (1L<<27) +#define BNX2_RPM_RC_CNTL_15_P2_XI (1L<<28) +#define BNX2_RPM_RC_CNTL_15_P3_XI (1L<<29) +#define BNX2_RPM_RC_CNTL_15_NBIT_XI (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_15 0x0000197c +#define BNX2_RPM_RC_VALUE_MASK_15_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_15_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CONFIG 0x00001980 +#define BNX2_RPM_RC_CONFIG_RULE_ENABLE (0xffffL<<0) +#define BNX2_RPM_RC_CONFIG_RULE_ENABLE_XI (0xfffffL<<0) +#define BNX2_RPM_RC_CONFIG_DEF_CLASS (0x7L<<24) +#define BNX2_RPM_RC_CONFIG_KNUM_OVERWRITE (1L<<31) + +#define BNX2_RPM_DEBUG0 0x00001984 +#define BNX2_RPM_DEBUG0_FM_BCNT (0xffffL<<0) +#define BNX2_RPM_DEBUG0_T_DATA_OFST_VLD (1L<<16) +#define BNX2_RPM_DEBUG0_T_UDP_OFST_VLD (1L<<17) +#define BNX2_RPM_DEBUG0_T_TCP_OFST_VLD (1L<<18) +#define BNX2_RPM_DEBUG0_T_IP_OFST_VLD (1L<<19) +#define BNX2_RPM_DEBUG0_IP_MORE_FRGMT (1L<<20) +#define BNX2_RPM_DEBUG0_T_IP_NO_TCP_UDP_HDR (1L<<21) +#define BNX2_RPM_DEBUG0_LLC_SNAP (1L<<22) +#define BNX2_RPM_DEBUG0_FM_STARTED (1L<<23) +#define BNX2_RPM_DEBUG0_DONE (1L<<24) +#define BNX2_RPM_DEBUG0_WAIT_4_DONE (1L<<25) +#define BNX2_RPM_DEBUG0_USE_TPBUF_CKSUM (1L<<26) +#define BNX2_RPM_DEBUG0_RX_NO_PSD_HDR_CKSUM (1L<<27) +#define BNX2_RPM_DEBUG0_IGNORE_VLAN (1L<<28) +#define BNX2_RPM_DEBUG0_RP_ENA_ACTIVE (1L<<31) + +#define BNX2_RPM_DEBUG1 0x00001988 +#define BNX2_RPM_DEBUG1_FSM_CUR_ST (0xffffL<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_IDLE (0L<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ETYPE_B6_ALL (1L<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ETYPE_B2_IPLLC (2L<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ETYPE_B6_IP (4L<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ETYPE_B2_IP (8L<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_IP_START (16L<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_IP (32L<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_TCP (64L<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_UDP (128L<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_AH (256L<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ESP (512L<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ESP_PAYLOAD (1024L<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_DATA (2048L<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ADD_CARRY (0x2000L<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ADD_CARRYOUT (0x4000L<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_LATCH_RESULT (0x8000L<<0) +#define BNX2_RPM_DEBUG1_HDR_BCNT (0x7ffL<<16) +#define BNX2_RPM_DEBUG1_UNKNOWN_ETYPE_D (1L<<28) +#define BNX2_RPM_DEBUG1_VLAN_REMOVED_D2 (1L<<29) +#define BNX2_RPM_DEBUG1_VLAN_REMOVED_D1 (1L<<30) +#define BNX2_RPM_DEBUG1_EOF_0XTRA_WD (1L<<31) + +#define BNX2_RPM_DEBUG2 0x0000198c +#define BNX2_RPM_DEBUG2_CMD_HIT_VEC (0xffffL<<0) +#define BNX2_RPM_DEBUG2_IP_BCNT (0xffL<<16) +#define BNX2_RPM_DEBUG2_THIS_CMD_M4 (1L<<24) +#define BNX2_RPM_DEBUG2_THIS_CMD_M3 (1L<<25) +#define BNX2_RPM_DEBUG2_THIS_CMD_M2 (1L<<26) +#define BNX2_RPM_DEBUG2_THIS_CMD_M1 (1L<<27) +#define BNX2_RPM_DEBUG2_IPIPE_EMPTY (1L<<28) +#define BNX2_RPM_DEBUG2_FM_DISCARD (1L<<29) +#define BNX2_RPM_DEBUG2_LAST_RULE_IN_FM_D2 (1L<<30) +#define BNX2_RPM_DEBUG2_LAST_RULE_IN_FM_D1 (1L<<31) + +#define BNX2_RPM_DEBUG3 0x00001990 +#define BNX2_RPM_DEBUG3_AVAIL_MBUF_PTR (0x1ffL<<0) +#define BNX2_RPM_DEBUG3_RDE_RLUPQ_WR_REQ_INT (1L<<9) +#define BNX2_RPM_DEBUG3_RDE_RBUF_WR_LAST_INT (1L<<10) +#define BNX2_RPM_DEBUG3_RDE_RBUF_WR_REQ_INT (1L<<11) +#define BNX2_RPM_DEBUG3_RDE_RBUF_FREE_REQ (1L<<12) +#define BNX2_RPM_DEBUG3_RDE_RBUF_ALLOC_REQ (1L<<13) +#define BNX2_RPM_DEBUG3_DFSM_MBUF_NOTAVAIL (1L<<14) +#define BNX2_RPM_DEBUG3_RBUF_RDE_SOF_DROP (1L<<15) +#define BNX2_RPM_DEBUG3_DFIFO_VLD_ENTRY_CT (0xfL<<16) +#define BNX2_RPM_DEBUG3_RDE_SRC_FIFO_ALMFULL (1L<<21) +#define BNX2_RPM_DEBUG3_DROP_NXT_VLD (1L<<22) +#define BNX2_RPM_DEBUG3_DROP_NXT (1L<<23) +#define BNX2_RPM_DEBUG3_FTQ_FSM (0x3L<<24) +#define BNX2_RPM_DEBUG3_FTQ_FSM_IDLE (0x0L<<24) +#define BNX2_RPM_DEBUG3_FTQ_FSM_WAIT_ACK (0x1L<<24) +#define BNX2_RPM_DEBUG3_FTQ_FSM_WAIT_FREE (0x2L<<24) +#define BNX2_RPM_DEBUG3_MBWRITE_FSM (0x3L<<26) +#define BNX2_RPM_DEBUG3_MBWRITE_FSM_WAIT_SOF (0x0L<<26) +#define BNX2_RPM_DEBUG3_MBWRITE_FSM_GET_MBUF (0x1L<<26) +#define BNX2_RPM_DEBUG3_MBWRITE_FSM_DMA_DATA (0x2L<<26) +#define BNX2_RPM_DEBUG3_MBWRITE_FSM_WAIT_DATA (0x3L<<26) +#define BNX2_RPM_DEBUG3_MBWRITE_FSM_WAIT_EOF (0x4L<<26) +#define BNX2_RPM_DEBUG3_MBWRITE_FSM_WAIT_MF_ACK (0x5L<<26) +#define BNX2_RPM_DEBUG3_MBWRITE_FSM_WAIT_DROP_NXT_VLD (0x6L<<26) +#define BNX2_RPM_DEBUG3_MBWRITE_FSM_DONE (0x7L<<26) +#define BNX2_RPM_DEBUG3_MBFREE_FSM (1L<<29) +#define BNX2_RPM_DEBUG3_MBFREE_FSM_IDLE (0L<<29) +#define BNX2_RPM_DEBUG3_MBFREE_FSM_WAIT_ACK (1L<<29) +#define BNX2_RPM_DEBUG3_MBALLOC_FSM (1L<<30) +#define BNX2_RPM_DEBUG3_MBALLOC_FSM_ET_MBUF (0x0L<<30) +#define BNX2_RPM_DEBUG3_MBALLOC_FSM_IVE_MBUF (0x1L<<30) +#define BNX2_RPM_DEBUG3_CCODE_EOF_ERROR (1L<<31) + +#define BNX2_RPM_DEBUG4 0x00001994 +#define BNX2_RPM_DEBUG4_DFSM_MBUF_CLUSTER (0x1ffffffL<<0) +#define BNX2_RPM_DEBUG4_DFIFO_CUR_CCODE (0x7L<<25) +#define BNX2_RPM_DEBUG4_MBWRITE_FSM (0x7L<<28) +#define BNX2_RPM_DEBUG4_DFIFO_EMPTY (1L<<31) + +#define BNX2_RPM_DEBUG5 0x00001998 +#define BNX2_RPM_DEBUG5_RDROP_WPTR (0x1fL<<0) +#define BNX2_RPM_DEBUG5_RDROP_ACPI_RPTR (0x1fL<<5) +#define BNX2_RPM_DEBUG5_RDROP_MC_RPTR (0x1fL<<10) +#define BNX2_RPM_DEBUG5_RDROP_RC_RPTR (0x1fL<<15) +#define BNX2_RPM_DEBUG5_RDROP_ACPI_EMPTY (1L<<20) +#define BNX2_RPM_DEBUG5_RDROP_MC_EMPTY (1L<<21) +#define BNX2_RPM_DEBUG5_RDROP_AEOF_VEC_AT_RDROP_MC_RPTR (1L<<22) +#define BNX2_RPM_DEBUG5_HOLDREG_WOL_DROP_INT (1L<<23) +#define BNX2_RPM_DEBUG5_HOLDREG_DISCARD (1L<<24) +#define BNX2_RPM_DEBUG5_HOLDREG_MBUF_NOTAVAIL (1L<<25) +#define BNX2_RPM_DEBUG5_HOLDREG_MC_EMPTY (1L<<26) +#define BNX2_RPM_DEBUG5_HOLDREG_RC_EMPTY (1L<<27) +#define BNX2_RPM_DEBUG5_HOLDREG_FC_EMPTY (1L<<28) +#define BNX2_RPM_DEBUG5_HOLDREG_ACPI_EMPTY (1L<<29) +#define BNX2_RPM_DEBUG5_HOLDREG_FULL_T (1L<<30) +#define BNX2_RPM_DEBUG5_HOLDREG_RD (1L<<31) + +#define BNX2_RPM_DEBUG6 0x0000199c +#define BNX2_RPM_DEBUG6_ACPI_VEC (0xffffL<<0) +#define BNX2_RPM_DEBUG6_VEC (0xffffL<<16) + +#define BNX2_RPM_DEBUG7 0x000019a0 +#define BNX2_RPM_DEBUG7_RPM_DBG7_LAST_CRC (0xffffffffL<<0) + +#define BNX2_RPM_DEBUG8 0x000019a4 +#define BNX2_RPM_DEBUG8_PS_ACPI_FSM (0xfL<<0) +#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_IDLE (0L<<0) +#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_SOF_W1_ADDR (1L<<0) +#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_SOF_W2_ADDR (2L<<0) +#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_SOF_W3_ADDR (3L<<0) +#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_SOF_WAIT_THBUF (4L<<0) +#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_W3_DATA (5L<<0) +#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_W0_ADDR (6L<<0) +#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_W1_ADDR (7L<<0) +#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_W2_ADDR (8L<<0) +#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_W3_ADDR (9L<<0) +#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_WAIT_THBUF (10L<<0) +#define BNX2_RPM_DEBUG8_COMPARE_AT_W0 (1L<<4) +#define BNX2_RPM_DEBUG8_COMPARE_AT_W3_DATA (1L<<5) +#define BNX2_RPM_DEBUG8_COMPARE_AT_SOF_WAIT (1L<<6) +#define BNX2_RPM_DEBUG8_COMPARE_AT_SOF_W3 (1L<<7) +#define BNX2_RPM_DEBUG8_COMPARE_AT_SOF_W2 (1L<<8) +#define BNX2_RPM_DEBUG8_EOF_W_LTEQ6_VLDBYTES (1L<<9) +#define BNX2_RPM_DEBUG8_EOF_W_LTEQ4_VLDBYTES (1L<<10) +#define BNX2_RPM_DEBUG8_NXT_EOF_W_12_VLDBYTES (1L<<11) +#define BNX2_RPM_DEBUG8_EOF_DET (1L<<12) +#define BNX2_RPM_DEBUG8_SOF_DET (1L<<13) +#define BNX2_RPM_DEBUG8_WAIT_4_SOF (1L<<14) +#define BNX2_RPM_DEBUG8_ALL_DONE (1L<<15) +#define BNX2_RPM_DEBUG8_THBUF_ADDR (0x7fL<<16) +#define BNX2_RPM_DEBUG8_BYTE_CTR (0xffL<<24) + +#define BNX2_RPM_DEBUG9 0x000019a8 +#define BNX2_RPM_DEBUG9_OUTFIFO_COUNT (0x7L<<0) +#define BNX2_RPM_DEBUG9_RDE_ACPI_RDY (1L<<3) +#define BNX2_RPM_DEBUG9_VLD_RD_ENTRY_CT (0x7L<<4) +#define BNX2_RPM_DEBUG9_OUTFIFO_OVERRUN_OCCURRED (1L<<28) +#define BNX2_RPM_DEBUG9_INFIFO_OVERRUN_OCCURRED (1L<<29) +#define BNX2_RPM_DEBUG9_ACPI_MATCH_INT (1L<<30) +#define BNX2_RPM_DEBUG9_ACPI_ENABLE_SYN (1L<<31) +#define BNX2_RPM_DEBUG9_BEMEM_R_XI (0x1fL<<0) +#define BNX2_RPM_DEBUG9_EO_XI (1L<<5) +#define BNX2_RPM_DEBUG9_AEOF_DE_XI (1L<<6) +#define BNX2_RPM_DEBUG9_SO_XI (1L<<7) +#define BNX2_RPM_DEBUG9_WD64_CT_XI (0x1fL<<8) +#define BNX2_RPM_DEBUG9_EOF_VLDBYTE_XI (0x7L<<13) +#define BNX2_RPM_DEBUG9_ACPI_RDE_PAT_ID_XI (0xfL<<16) +#define BNX2_RPM_DEBUG9_CALCRC_RESULT_XI (0x3ffL<<20) +#define BNX2_RPM_DEBUG9_DATA_IN_VL_XI (1L<<30) +#define BNX2_RPM_DEBUG9_CALCRC_BUFFER_VLD_XI (1L<<31) + +#define BNX2_RPM_ACPI_DBG_BUF_W00 0x000019c0 +#define BNX2_RPM_ACPI_DBG_BUF_W01 0x000019c4 +#define BNX2_RPM_ACPI_DBG_BUF_W02 0x000019c8 +#define BNX2_RPM_ACPI_DBG_BUF_W03 0x000019cc +#define BNX2_RPM_ACPI_DBG_BUF_W10 0x000019d0 +#define BNX2_RPM_ACPI_DBG_BUF_W11 0x000019d4 +#define BNX2_RPM_ACPI_DBG_BUF_W12 0x000019d8 +#define BNX2_RPM_ACPI_DBG_BUF_W13 0x000019dc +#define BNX2_RPM_ACPI_DBG_BUF_W20 0x000019e0 +#define BNX2_RPM_ACPI_DBG_BUF_W21 0x000019e4 +#define BNX2_RPM_ACPI_DBG_BUF_W22 0x000019e8 +#define BNX2_RPM_ACPI_DBG_BUF_W23 0x000019ec +#define BNX2_RPM_ACPI_DBG_BUF_W30 0x000019f0 +#define BNX2_RPM_ACPI_DBG_BUF_W31 0x000019f4 +#define BNX2_RPM_ACPI_DBG_BUF_W32 0x000019f8 +#define BNX2_RPM_ACPI_DBG_BUF_W33 0x000019fc +#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL 0x00001a00 +#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_BYTE_ADDRESS (0xffffL<<0) +#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_DEBUGRD (1L<<28) +#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_MODE (1L<<29) +#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_INIT (1L<<30) +#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_WR (1L<<31) + +#define BNX2_RPM_ACPI_PATTERN_CTRL 0x00001a04 +#define BNX2_RPM_ACPI_PATTERN_CTRL_PATTERN_ID (0xfL<<0) +#define BNX2_RPM_ACPI_PATTERN_CTRL_CRC_SM_CLR (1L<<30) +#define BNX2_RPM_ACPI_PATTERN_CTRL_WR (1L<<31) + +#define BNX2_RPM_ACPI_DATA 0x00001a08 +#define BNX2_RPM_ACPI_DATA_PATTERN_BE (0xffffffffL<<0) + +#define BNX2_RPM_ACPI_PATTERN_LEN0 0x00001a0c +#define BNX2_RPM_ACPI_PATTERN_LEN0_PATTERN_LEN3 (0xffL<<0) +#define BNX2_RPM_ACPI_PATTERN_LEN0_PATTERN_LEN2 (0xffL<<8) +#define BNX2_RPM_ACPI_PATTERN_LEN0_PATTERN_LEN1 (0xffL<<16) +#define BNX2_RPM_ACPI_PATTERN_LEN0_PATTERN_LEN0 (0xffL<<24) + +#define BNX2_RPM_ACPI_PATTERN_LEN1 0x00001a10 +#define BNX2_RPM_ACPI_PATTERN_LEN1_PATTERN_LEN7 (0xffL<<0) +#define BNX2_RPM_ACPI_PATTERN_LEN1_PATTERN_LEN6 (0xffL<<8) +#define BNX2_RPM_ACPI_PATTERN_LEN1_PATTERN_LEN5 (0xffL<<16) +#define BNX2_RPM_ACPI_PATTERN_LEN1_PATTERN_LEN4 (0xffL<<24) + +#define BNX2_RPM_ACPI_PATTERN_CRC0 0x00001a18 +#define BNX2_RPM_ACPI_PATTERN_CRC0_PATTERN_CRC0 (0xffffffffL<<0) + +#define BNX2_RPM_ACPI_PATTERN_CRC1 0x00001a1c +#define BNX2_RPM_ACPI_PATTERN_CRC1_PATTERN_CRC1 (0xffffffffL<<0) + +#define BNX2_RPM_ACPI_PATTERN_CRC2 0x00001a20 +#define BNX2_RPM_ACPI_PATTERN_CRC2_PATTERN_CRC2 (0xffffffffL<<0) + +#define BNX2_RPM_ACPI_PATTERN_CRC3 0x00001a24 +#define BNX2_RPM_ACPI_PATTERN_CRC3_PATTERN_CRC3 (0xffffffffL<<0) + +#define BNX2_RPM_ACPI_PATTERN_CRC4 0x00001a28 +#define BNX2_RPM_ACPI_PATTERN_CRC4_PATTERN_CRC4 (0xffffffffL<<0) + +#define BNX2_RPM_ACPI_PATTERN_CRC5 0x00001a2c +#define BNX2_RPM_ACPI_PATTERN_CRC5_PATTERN_CRC5 (0xffffffffL<<0) + +#define BNX2_RPM_ACPI_PATTERN_CRC6 0x00001a30 +#define BNX2_RPM_ACPI_PATTERN_CRC6_PATTERN_CRC6 (0xffffffffL<<0) + +#define BNX2_RPM_ACPI_PATTERN_CRC7 0x00001a34 +#define BNX2_RPM_ACPI_PATTERN_CRC7_PATTERN_CRC7 (0xffffffffL<<0) + + +/* + * rlup_reg definition + * offset: 0x2000 + */ +#define BNX2_RLUP_RSS_CONFIG 0x0000201c +#define BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_XI (0x3L<<0) +#define BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_OFF_XI (0L<<0) +#define BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI (1L<<0) +#define BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_IP_ONLY_XI (2L<<0) +#define BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_RES_XI (3L<<0) +#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_XI (0x3L<<2) +#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_OFF_XI (0L<<2) +#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI (1L<<2) +#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_IP_ONLY_XI (2L<<2) +#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_RES_XI (3L<<2) + +#define BNX2_RLUP_RSS_COMMAND 0x00002048 +#define BNX2_RLUP_RSS_COMMAND_RSS_IND_TABLE_ADDR (0xfUL<<0) +#define BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK (0xffUL<<4) +#define BNX2_RLUP_RSS_COMMAND_WRITE (1UL<<12) +#define BNX2_RLUP_RSS_COMMAND_READ (1UL<<13) +#define BNX2_RLUP_RSS_COMMAND_HASH_MASK (0x7UL<<14) + +#define BNX2_RLUP_RSS_DATA 0x0000204c + + +/* + * rbuf_reg definition + * offset: 0x200000 + */ +#define BNX2_RBUF_COMMAND 0x00200000 +#define BNX2_RBUF_COMMAND_ENABLED (1L<<0) +#define BNX2_RBUF_COMMAND_FREE_INIT (1L<<1) +#define BNX2_RBUF_COMMAND_RAM_INIT (1L<<2) +#define BNX2_RBUF_COMMAND_PKT_OFFSET_OVFL (1L<<3) +#define BNX2_RBUF_COMMAND_OVER_FREE (1L<<4) +#define BNX2_RBUF_COMMAND_ALLOC_REQ (1L<<5) +#define BNX2_RBUF_COMMAND_EN_PRI_CHNGE_TE (1L<<6) +#define BNX2_RBUF_COMMAND_CU_ISOLATE_XI (1L<<5) +#define BNX2_RBUF_COMMAND_EN_PRI_CHANGE_XI (1L<<6) +#define BNX2_RBUF_COMMAND_GRC_ENDIAN_CONV_DIS_XI (1L<<7) + +#define BNX2_RBUF_STATUS1 0x00200004 +#define BNX2_RBUF_STATUS1_FREE_COUNT (0x3ffL<<0) + +#define BNX2_RBUF_STATUS2 0x00200008 +#define BNX2_RBUF_STATUS2_FREE_TAIL (0x1ffL<<0) +#define BNX2_RBUF_STATUS2_FREE_HEAD (0x1ffL<<16) + +#define BNX2_RBUF_CONFIG 0x0020000c +#define BNX2_RBUF_CONFIG_XOFF_TRIP (0x3ffL<<0) +#define BNX2_RBUF_CONFIG_XOFF_TRIP_VAL(mtu) \ + ((((mtu) - 1500) * 31 / 1000) + 54) +#define BNX2_RBUF_CONFIG_XON_TRIP (0x3ffL<<16) +#define BNX2_RBUF_CONFIG_XON_TRIP_VAL(mtu) \ + ((((mtu) - 1500) * 39 / 1000) + 66) +#define BNX2_RBUF_CONFIG_VAL(mtu) \ + (BNX2_RBUF_CONFIG_XOFF_TRIP_VAL(mtu) | \ + (BNX2_RBUF_CONFIG_XON_TRIP_VAL(mtu) << 16)) + +#define BNX2_RBUF_FW_BUF_ALLOC 0x00200010 +#define BNX2_RBUF_FW_BUF_ALLOC_VALUE (0x1ffL<<7) +#define BNX2_RBUF_FW_BUF_ALLOC_TYPE (1L<<16) +#define BNX2_RBUF_FW_BUF_ALLOC_ALLOC_REQ (1L<<31) + +#define BNX2_RBUF_FW_BUF_FREE 0x00200014 +#define BNX2_RBUF_FW_BUF_FREE_COUNT (0x7fL<<0) +#define BNX2_RBUF_FW_BUF_FREE_TAIL (0x1ffL<<7) +#define BNX2_RBUF_FW_BUF_FREE_HEAD (0x1ffL<<16) +#define BNX2_RBUF_FW_BUF_FREE_TYPE (1L<<25) +#define BNX2_RBUF_FW_BUF_FREE_FREE_REQ (1L<<31) + +#define BNX2_RBUF_FW_BUF_SEL 0x00200018 +#define BNX2_RBUF_FW_BUF_SEL_COUNT (0x7fL<<0) +#define BNX2_RBUF_FW_BUF_SEL_TAIL (0x1ffL<<7) +#define BNX2_RBUF_FW_BUF_SEL_HEAD (0x1ffL<<16) +#define BNX2_RBUF_FW_BUF_SEL_SEL_REQ (1L<<31) + +#define BNX2_RBUF_CONFIG2 0x0020001c +#define BNX2_RBUF_CONFIG2_MAC_DROP_TRIP (0x3ffL<<0) +#define BNX2_RBUF_CONFIG2_MAC_DROP_TRIP_VAL(mtu) \ + ((((mtu) - 1500) * 4 / 1000) + 5) +#define BNX2_RBUF_CONFIG2_MAC_KEEP_TRIP (0x3ffL<<16) +#define BNX2_RBUF_CONFIG2_MAC_KEEP_TRIP_VAL(mtu) \ + ((((mtu) - 1500) * 2 / 100) + 30) +#define BNX2_RBUF_CONFIG2_VAL(mtu) \ + (BNX2_RBUF_CONFIG2_MAC_DROP_TRIP_VAL(mtu) | \ + (BNX2_RBUF_CONFIG2_MAC_KEEP_TRIP_VAL(mtu) << 16)) + +#define BNX2_RBUF_CONFIG3 0x00200020 +#define BNX2_RBUF_CONFIG3_CU_DROP_TRIP (0x3ffL<<0) +#define BNX2_RBUF_CONFIG3_CU_DROP_TRIP_VAL(mtu) \ + ((((mtu) - 1500) * 12 / 1000) + 18) +#define BNX2_RBUF_CONFIG3_CU_KEEP_TRIP (0x3ffL<<16) +#define BNX2_RBUF_CONFIG3_CU_KEEP_TRIP_VAL(mtu) \ + ((((mtu) - 1500) * 2 / 100) + 30) +#define BNX2_RBUF_CONFIG3_VAL(mtu) \ + (BNX2_RBUF_CONFIG3_CU_DROP_TRIP_VAL(mtu) | \ + (BNX2_RBUF_CONFIG3_CU_KEEP_TRIP_VAL(mtu) << 16)) + +#define BNX2_RBUF_PKT_DATA 0x00208000 +#define BNX2_RBUF_CLIST_DATA 0x00210000 +#define BNX2_RBUF_BUF_DATA 0x00220000 + + +/* + * rv2p_reg definition + * offset: 0x2800 + */ +#define BNX2_RV2P_COMMAND 0x00002800 +#define BNX2_RV2P_COMMAND_ENABLED (1L<<0) +#define BNX2_RV2P_COMMAND_PROC1_INTRPT (1L<<1) +#define BNX2_RV2P_COMMAND_PROC2_INTRPT (1L<<2) +#define BNX2_RV2P_COMMAND_ABORT0 (1L<<4) +#define BNX2_RV2P_COMMAND_ABORT1 (1L<<5) +#define BNX2_RV2P_COMMAND_ABORT2 (1L<<6) +#define BNX2_RV2P_COMMAND_ABORT3 (1L<<7) +#define BNX2_RV2P_COMMAND_ABORT4 (1L<<8) +#define BNX2_RV2P_COMMAND_ABORT5 (1L<<9) +#define BNX2_RV2P_COMMAND_PROC1_RESET (1L<<16) +#define BNX2_RV2P_COMMAND_PROC2_RESET (1L<<17) +#define BNX2_RV2P_COMMAND_CTXIF_RESET (1L<<18) + +#define BNX2_RV2P_STATUS 0x00002804 +#define BNX2_RV2P_STATUS_ALWAYS_0 (1L<<0) +#define BNX2_RV2P_STATUS_RV2P_GEN_STAT0_CNT (1L<<8) +#define BNX2_RV2P_STATUS_RV2P_GEN_STAT1_CNT (1L<<9) +#define BNX2_RV2P_STATUS_RV2P_GEN_STAT2_CNT (1L<<10) +#define BNX2_RV2P_STATUS_RV2P_GEN_STAT3_CNT (1L<<11) +#define BNX2_RV2P_STATUS_RV2P_GEN_STAT4_CNT (1L<<12) +#define BNX2_RV2P_STATUS_RV2P_GEN_STAT5_CNT (1L<<13) + +#define BNX2_RV2P_CONFIG 0x00002808 +#define BNX2_RV2P_CONFIG_STALL_PROC1 (1L<<0) +#define BNX2_RV2P_CONFIG_STALL_PROC2 (1L<<1) +#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT0 (1L<<8) +#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT1 (1L<<9) +#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT2 (1L<<10) +#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT3 (1L<<11) +#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT4 (1L<<12) +#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT5 (1L<<13) +#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT0 (1L<<16) +#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT1 (1L<<17) +#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT2 (1L<<18) +#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT3 (1L<<19) +#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT4 (1L<<20) +#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT5 (1L<<21) +#define BNX2_RV2P_CONFIG_PAGE_SIZE (0xfL<<24) +#define BNX2_RV2P_CONFIG_PAGE_SIZE_256 (0L<<24) +#define BNX2_RV2P_CONFIG_PAGE_SIZE_512 (1L<<24) +#define BNX2_RV2P_CONFIG_PAGE_SIZE_1K (2L<<24) +#define BNX2_RV2P_CONFIG_PAGE_SIZE_2K (3L<<24) +#define BNX2_RV2P_CONFIG_PAGE_SIZE_4K (4L<<24) +#define BNX2_RV2P_CONFIG_PAGE_SIZE_8K (5L<<24) +#define BNX2_RV2P_CONFIG_PAGE_SIZE_16K (6L<<24) +#define BNX2_RV2P_CONFIG_PAGE_SIZE_32K (7L<<24) +#define BNX2_RV2P_CONFIG_PAGE_SIZE_64K (8L<<24) +#define BNX2_RV2P_CONFIG_PAGE_SIZE_128K (9L<<24) +#define BNX2_RV2P_CONFIG_PAGE_SIZE_256K (10L<<24) +#define BNX2_RV2P_CONFIG_PAGE_SIZE_512K (11L<<24) +#define BNX2_RV2P_CONFIG_PAGE_SIZE_1M (12L<<24) + +#define BNX2_RV2P_GEN_BFR_ADDR_0 0x00002810 +#define BNX2_RV2P_GEN_BFR_ADDR_0_VALUE (0xffffL<<16) + +#define BNX2_RV2P_GEN_BFR_ADDR_1 0x00002814 +#define BNX2_RV2P_GEN_BFR_ADDR_1_VALUE (0xffffL<<16) + +#define BNX2_RV2P_GEN_BFR_ADDR_2 0x00002818 +#define BNX2_RV2P_GEN_BFR_ADDR_2_VALUE (0xffffL<<16) + +#define BNX2_RV2P_GEN_BFR_ADDR_3 0x0000281c +#define BNX2_RV2P_GEN_BFR_ADDR_3_VALUE (0xffffL<<16) + +#define BNX2_RV2P_INSTR_HIGH 0x00002830 +#define BNX2_RV2P_INSTR_HIGH_HIGH (0x1fL<<0) + +#define BNX2_RV2P_INSTR_LOW 0x00002834 +#define BNX2_RV2P_INSTR_LOW_LOW (0xffffffffL<<0) + +#define BNX2_RV2P_PROC1_ADDR_CMD 0x00002838 +#define BNX2_RV2P_PROC1_ADDR_CMD_ADD (0x3ffL<<0) +#define BNX2_RV2P_PROC1_ADDR_CMD_RDWR (1L<<31) + +#define BNX2_RV2P_PROC2_ADDR_CMD 0x0000283c +#define BNX2_RV2P_PROC2_ADDR_CMD_ADD (0x3ffL<<0) +#define BNX2_RV2P_PROC2_ADDR_CMD_RDWR (1L<<31) + +#define BNX2_RV2P_PROC1_GRC_DEBUG 0x00002840 +#define BNX2_RV2P_PROC2_GRC_DEBUG 0x00002844 +#define BNX2_RV2P_GRC_PROC_DEBUG 0x00002848 +#define BNX2_RV2P_DEBUG_VECT_PEEK 0x0000284c +#define BNX2_RV2P_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0) +#define BNX2_RV2P_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11) +#define BNX2_RV2P_DEBUG_VECT_PEEK_1_SEL (0xfL<<12) +#define BNX2_RV2P_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16) +#define BNX2_RV2P_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27) +#define BNX2_RV2P_DEBUG_VECT_PEEK_2_SEL (0xfL<<28) + +#define BNX2_RV2P_MPFE_PFE_CTL 0x00002afc +#define BNX2_RV2P_MPFE_PFE_CTL_INC_USAGE_CNT (1L<<0) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE (0xfL<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_0 (0L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_1 (1L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_2 (2L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_3 (3L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_4 (4L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_5 (5L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_6 (6L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_7 (7L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_8 (8L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_9 (9L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_10 (10L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_11 (11L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_12 (12L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_13 (13L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_14 (14L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_15 (15L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_COUNT (0xfL<<12) +#define BNX2_RV2P_MPFE_PFE_CTL_OFFSET (0x1ffL<<16) + +#define BNX2_RV2P_RV2PPQ 0x00002b40 +#define BNX2_RV2P_PFTQ_CMD 0x00002b78 +#define BNX2_RV2P_PFTQ_CMD_OFFSET (0x3ffL<<0) +#define BNX2_RV2P_PFTQ_CMD_WR_TOP (1L<<10) +#define BNX2_RV2P_PFTQ_CMD_WR_TOP_0 (0L<<10) +#define BNX2_RV2P_PFTQ_CMD_WR_TOP_1 (1L<<10) +#define BNX2_RV2P_PFTQ_CMD_SFT_RESET (1L<<25) +#define BNX2_RV2P_PFTQ_CMD_RD_DATA (1L<<26) +#define BNX2_RV2P_PFTQ_CMD_ADD_INTERVEN (1L<<27) +#define BNX2_RV2P_PFTQ_CMD_ADD_DATA (1L<<28) +#define BNX2_RV2P_PFTQ_CMD_INTERVENE_CLR (1L<<29) +#define BNX2_RV2P_PFTQ_CMD_POP (1L<<30) +#define BNX2_RV2P_PFTQ_CMD_BUSY (1L<<31) + +#define BNX2_RV2P_PFTQ_CTL 0x00002b7c +#define BNX2_RV2P_PFTQ_CTL_INTERVENE (1L<<0) +#define BNX2_RV2P_PFTQ_CTL_OVERFLOW (1L<<1) +#define BNX2_RV2P_PFTQ_CTL_FORCE_INTERVENE (1L<<2) +#define BNX2_RV2P_PFTQ_CTL_MAX_DEPTH (0x3ffL<<12) +#define BNX2_RV2P_PFTQ_CTL_CUR_DEPTH (0x3ffL<<22) + +#define BNX2_RV2P_RV2PTQ 0x00002b80 +#define BNX2_RV2P_TFTQ_CMD 0x00002bb8 +#define BNX2_RV2P_TFTQ_CMD_OFFSET (0x3ffL<<0) +#define BNX2_RV2P_TFTQ_CMD_WR_TOP (1L<<10) +#define BNX2_RV2P_TFTQ_CMD_WR_TOP_0 (0L<<10) +#define BNX2_RV2P_TFTQ_CMD_WR_TOP_1 (1L<<10) +#define BNX2_RV2P_TFTQ_CMD_SFT_RESET (1L<<25) +#define BNX2_RV2P_TFTQ_CMD_RD_DATA (1L<<26) +#define BNX2_RV2P_TFTQ_CMD_ADD_INTERVEN (1L<<27) +#define BNX2_RV2P_TFTQ_CMD_ADD_DATA (1L<<28) +#define BNX2_RV2P_TFTQ_CMD_INTERVENE_CLR (1L<<29) +#define BNX2_RV2P_TFTQ_CMD_POP (1L<<30) +#define BNX2_RV2P_TFTQ_CMD_BUSY (1L<<31) + +#define BNX2_RV2P_TFTQ_CTL 0x00002bbc +#define BNX2_RV2P_TFTQ_CTL_INTERVENE (1L<<0) +#define BNX2_RV2P_TFTQ_CTL_OVERFLOW (1L<<1) +#define BNX2_RV2P_TFTQ_CTL_FORCE_INTERVENE (1L<<2) +#define BNX2_RV2P_TFTQ_CTL_MAX_DEPTH (0x3ffL<<12) +#define BNX2_RV2P_TFTQ_CTL_CUR_DEPTH (0x3ffL<<22) + +#define BNX2_RV2P_RV2PMQ 0x00002bc0 +#define BNX2_RV2P_MFTQ_CMD 0x00002bf8 +#define BNX2_RV2P_MFTQ_CMD_OFFSET (0x3ffL<<0) +#define BNX2_RV2P_MFTQ_CMD_WR_TOP (1L<<10) +#define BNX2_RV2P_MFTQ_CMD_WR_TOP_0 (0L<<10) +#define BNX2_RV2P_MFTQ_CMD_WR_TOP_1 (1L<<10) +#define BNX2_RV2P_MFTQ_CMD_SFT_RESET (1L<<25) +#define BNX2_RV2P_MFTQ_CMD_RD_DATA (1L<<26) +#define BNX2_RV2P_MFTQ_CMD_ADD_INTERVEN (1L<<27) +#define BNX2_RV2P_MFTQ_CMD_ADD_DATA (1L<<28) +#define BNX2_RV2P_MFTQ_CMD_INTERVENE_CLR (1L<<29) +#define BNX2_RV2P_MFTQ_CMD_POP (1L<<30) +#define BNX2_RV2P_MFTQ_CMD_BUSY (1L<<31) + +#define BNX2_RV2P_MFTQ_CTL 0x00002bfc +#define BNX2_RV2P_MFTQ_CTL_INTERVENE (1L<<0) +#define BNX2_RV2P_MFTQ_CTL_OVERFLOW (1L<<1) +#define BNX2_RV2P_MFTQ_CTL_FORCE_INTERVENE (1L<<2) +#define BNX2_RV2P_MFTQ_CTL_MAX_DEPTH (0x3ffL<<12) +#define BNX2_RV2P_MFTQ_CTL_CUR_DEPTH (0x3ffL<<22) + + + +/* + * mq_reg definition + * offset: 0x3c00 + */ +#define BNX2_MQ_COMMAND 0x00003c00 +#define BNX2_MQ_COMMAND_ENABLED (1L<<0) +#define BNX2_MQ_COMMAND_INIT (1L<<1) +#define BNX2_MQ_COMMAND_OVERFLOW (1L<<4) +#define BNX2_MQ_COMMAND_WR_ERROR (1L<<5) +#define BNX2_MQ_COMMAND_RD_ERROR (1L<<6) +#define BNX2_MQ_COMMAND_IDB_CFG_ERROR (1L<<7) +#define BNX2_MQ_COMMAND_IDB_OVERFLOW (1L<<10) +#define BNX2_MQ_COMMAND_NO_BIN_ERROR (1L<<11) +#define BNX2_MQ_COMMAND_NO_MAP_ERROR (1L<<12) + +#define BNX2_MQ_STATUS 0x00003c04 +#define BNX2_MQ_STATUS_CTX_ACCESS_STAT (1L<<16) +#define BNX2_MQ_STATUS_CTX_ACCESS64_STAT (1L<<17) +#define BNX2_MQ_STATUS_PCI_STALL_STAT (1L<<18) +#define BNX2_MQ_STATUS_IDB_OFLOW_STAT (1L<<19) + +#define BNX2_MQ_CONFIG 0x00003c08 +#define BNX2_MQ_CONFIG_TX_HIGH_PRI (1L<<0) +#define BNX2_MQ_CONFIG_HALT_DIS (1L<<1) +#define BNX2_MQ_CONFIG_BIN_MQ_MODE (1L<<2) +#define BNX2_MQ_CONFIG_DIS_IDB_DROP (1L<<3) +#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE (0x7L<<4) +#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256 (0L<<4) +#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_512 (1L<<4) +#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_1K (2L<<4) +#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_2K (3L<<4) +#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_4K (4L<<4) +#define BNX2_MQ_CONFIG_MAX_DEPTH (0x7fL<<8) +#define BNX2_MQ_CONFIG_CUR_DEPTH (0x7fL<<20) + +#define BNX2_MQ_ENQUEUE1 0x00003c0c +#define BNX2_MQ_ENQUEUE1_OFFSET (0x3fL<<2) +#define BNX2_MQ_ENQUEUE1_CID (0x3fffL<<8) +#define BNX2_MQ_ENQUEUE1_BYTE_MASK (0xfL<<24) +#define BNX2_MQ_ENQUEUE1_KNL_MODE (1L<<28) + +#define BNX2_MQ_ENQUEUE2 0x00003c10 +#define BNX2_MQ_BAD_WR_ADDR 0x00003c14 +#define BNX2_MQ_BAD_RD_ADDR 0x00003c18 +#define BNX2_MQ_KNL_BYP_WIND_START 0x00003c1c +#define BNX2_MQ_KNL_BYP_WIND_START_VALUE (0xfffffL<<12) + +#define BNX2_MQ_KNL_WIND_END 0x00003c20 +#define BNX2_MQ_KNL_WIND_END_VALUE (0xffffffL<<8) + +#define BNX2_MQ_KNL_WRITE_MASK1 0x00003c24 +#define BNX2_MQ_KNL_TX_MASK1 0x00003c28 +#define BNX2_MQ_KNL_CMD_MASK1 0x00003c2c +#define BNX2_MQ_KNL_COND_ENQUEUE_MASK1 0x00003c30 +#define BNX2_MQ_KNL_RX_V2P_MASK1 0x00003c34 +#define BNX2_MQ_KNL_WRITE_MASK2 0x00003c38 +#define BNX2_MQ_KNL_TX_MASK2 0x00003c3c +#define BNX2_MQ_KNL_CMD_MASK2 0x00003c40 +#define BNX2_MQ_KNL_COND_ENQUEUE_MASK2 0x00003c44 +#define BNX2_MQ_KNL_RX_V2P_MASK2 0x00003c48 +#define BNX2_MQ_KNL_BYP_WRITE_MASK1 0x00003c4c +#define BNX2_MQ_KNL_BYP_TX_MASK1 0x00003c50 +#define BNX2_MQ_KNL_BYP_CMD_MASK1 0x00003c54 +#define BNX2_MQ_KNL_BYP_COND_ENQUEUE_MASK1 0x00003c58 +#define BNX2_MQ_KNL_BYP_RX_V2P_MASK1 0x00003c5c +#define BNX2_MQ_KNL_BYP_WRITE_MASK2 0x00003c60 +#define BNX2_MQ_KNL_BYP_TX_MASK2 0x00003c64 +#define BNX2_MQ_KNL_BYP_CMD_MASK2 0x00003c68 +#define BNX2_MQ_KNL_BYP_COND_ENQUEUE_MASK2 0x00003c6c +#define BNX2_MQ_KNL_BYP_RX_V2P_MASK2 0x00003c70 +#define BNX2_MQ_MEM_WR_ADDR 0x00003c74 +#define BNX2_MQ_MEM_WR_ADDR_VALUE (0x3fL<<0) + +#define BNX2_MQ_MEM_WR_DATA0 0x00003c78 +#define BNX2_MQ_MEM_WR_DATA0_VALUE (0xffffffffL<<0) + +#define BNX2_MQ_MEM_WR_DATA1 0x00003c7c +#define BNX2_MQ_MEM_WR_DATA1_VALUE (0xffffffffL<<0) + +#define BNX2_MQ_MEM_WR_DATA2 0x00003c80 +#define BNX2_MQ_MEM_WR_DATA2_VALUE (0x3fffffffL<<0) +#define BNX2_MQ_MEM_WR_DATA2_VALUE_XI (0x7fffffffL<<0) + +#define BNX2_MQ_MEM_RD_ADDR 0x00003c84 +#define BNX2_MQ_MEM_RD_ADDR_VALUE (0x3fL<<0) + +#define BNX2_MQ_MEM_RD_DATA0 0x00003c88 +#define BNX2_MQ_MEM_RD_DATA0_VALUE (0xffffffffL<<0) + +#define BNX2_MQ_MEM_RD_DATA1 0x00003c8c +#define BNX2_MQ_MEM_RD_DATA1_VALUE (0xffffffffL<<0) + +#define BNX2_MQ_MEM_RD_DATA2 0x00003c90 +#define BNX2_MQ_MEM_RD_DATA2_VALUE (0x3fffffffL<<0) +#define BNX2_MQ_MEM_RD_DATA2_VALUE_XI (0x7fffffffL<<0) + +#define BNX2_MQ_MAP_L2_3 0x00003d2c +#define BNX2_MQ_MAP_L2_3_MQ_OFFSET (0xffL<<0) +#define BNX2_MQ_MAP_L2_3_SZ (0x3L<<8) +#define BNX2_MQ_MAP_L2_3_CTX_OFFSET (0x2ffL<<10) +#define BNX2_MQ_MAP_L2_3_BIN_OFFSET (0x7L<<23) +#define BNX2_MQ_MAP_L2_3_ARM (0x3L<<26) +#define BNX2_MQ_MAP_L2_3_ENA (0x1L<<31) +#define BNX2_MQ_MAP_L2_3_DEFAULT 0x82004646 + +#define BNX2_MQ_MAP_L2_5 0x00003d34 +#define BNX2_MQ_MAP_L2_5_ARM (0x3L<<26) + +/* + * tsch_reg definition + * offset: 0x4c00 + */ +#define BNX2_TSCH_TSS_CFG 0x00004c1c +#define BNX2_TSCH_TSS_CFG_TSS_START_CID (0x7ffL<<8) +#define BNX2_TSCH_TSS_CFG_NUM_OF_TSS_CON (0xfL<<24) + + + +/* + * tbdr_reg definition + * offset: 0x5000 + */ +#define BNX2_TBDR_COMMAND 0x00005000 +#define BNX2_TBDR_COMMAND_ENABLE (1L<<0) +#define BNX2_TBDR_COMMAND_SOFT_RST (1L<<1) +#define BNX2_TBDR_COMMAND_MSTR_ABORT (1L<<4) + +#define BNX2_TBDR_STATUS 0x00005004 +#define BNX2_TBDR_STATUS_DMA_WAIT (1L<<0) +#define BNX2_TBDR_STATUS_FTQ_WAIT (1L<<1) +#define BNX2_TBDR_STATUS_FIFO_OVERFLOW (1L<<2) +#define BNX2_TBDR_STATUS_FIFO_UNDERFLOW (1L<<3) +#define BNX2_TBDR_STATUS_SEARCHMISS_ERROR (1L<<4) +#define BNX2_TBDR_STATUS_FTQ_ENTRY_CNT (1L<<5) +#define BNX2_TBDR_STATUS_BURST_CNT (1L<<6) + +#define BNX2_TBDR_CONFIG 0x00005008 +#define BNX2_TBDR_CONFIG_MAX_BDS (0xffL<<0) +#define BNX2_TBDR_CONFIG_SWAP_MODE (1L<<8) +#define BNX2_TBDR_CONFIG_PRIORITY (1L<<9) +#define BNX2_TBDR_CONFIG_CACHE_NEXT_PAGE_PTRS (1L<<10) +#define BNX2_TBDR_CONFIG_PAGE_SIZE (0xfL<<24) +#define BNX2_TBDR_CONFIG_PAGE_SIZE_256 (0L<<24) +#define BNX2_TBDR_CONFIG_PAGE_SIZE_512 (1L<<24) +#define BNX2_TBDR_CONFIG_PAGE_SIZE_1K (2L<<24) +#define BNX2_TBDR_CONFIG_PAGE_SIZE_2K (3L<<24) +#define BNX2_TBDR_CONFIG_PAGE_SIZE_4K (4L<<24) +#define BNX2_TBDR_CONFIG_PAGE_SIZE_8K (5L<<24) +#define BNX2_TBDR_CONFIG_PAGE_SIZE_16K (6L<<24) +#define BNX2_TBDR_CONFIG_PAGE_SIZE_32K (7L<<24) +#define BNX2_TBDR_CONFIG_PAGE_SIZE_64K (8L<<24) +#define BNX2_TBDR_CONFIG_PAGE_SIZE_128K (9L<<24) +#define BNX2_TBDR_CONFIG_PAGE_SIZE_256K (10L<<24) +#define BNX2_TBDR_CONFIG_PAGE_SIZE_512K (11L<<24) +#define BNX2_TBDR_CONFIG_PAGE_SIZE_1M (12L<<24) + +#define BNX2_TBDR_DEBUG_VECT_PEEK 0x0000500c +#define BNX2_TBDR_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0) +#define BNX2_TBDR_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11) +#define BNX2_TBDR_DEBUG_VECT_PEEK_1_SEL (0xfL<<12) +#define BNX2_TBDR_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16) +#define BNX2_TBDR_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27) +#define BNX2_TBDR_DEBUG_VECT_PEEK_2_SEL (0xfL<<28) + +#define BNX2_TBDR_CKSUM_ERROR_STATUS 0x00005010 +#define BNX2_TBDR_CKSUM_ERROR_STATUS_CALCULATED (0xffffL<<0) +#define BNX2_TBDR_CKSUM_ERROR_STATUS_EXPECTED (0xffffL<<16) + +#define BNX2_TBDR_TBDRQ 0x000053c0 +#define BNX2_TBDR_FTQ_CMD 0x000053f8 +#define BNX2_TBDR_FTQ_CMD_OFFSET (0x3ffL<<0) +#define BNX2_TBDR_FTQ_CMD_WR_TOP (1L<<10) +#define BNX2_TBDR_FTQ_CMD_WR_TOP_0 (0L<<10) +#define BNX2_TBDR_FTQ_CMD_WR_TOP_1 (1L<<10) +#define BNX2_TBDR_FTQ_CMD_SFT_RESET (1L<<25) +#define BNX2_TBDR_FTQ_CMD_RD_DATA (1L<<26) +#define BNX2_TBDR_FTQ_CMD_ADD_INTERVEN (1L<<27) +#define BNX2_TBDR_FTQ_CMD_ADD_DATA (1L<<28) +#define BNX2_TBDR_FTQ_CMD_INTERVENE_CLR (1L<<29) +#define BNX2_TBDR_FTQ_CMD_POP (1L<<30) +#define BNX2_TBDR_FTQ_CMD_BUSY (1L<<31) + +#define BNX2_TBDR_FTQ_CTL 0x000053fc +#define BNX2_TBDR_FTQ_CTL_INTERVENE (1L<<0) +#define BNX2_TBDR_FTQ_CTL_OVERFLOW (1L<<1) +#define BNX2_TBDR_FTQ_CTL_FORCE_INTERVENE (1L<<2) +#define BNX2_TBDR_FTQ_CTL_MAX_DEPTH (0x3ffL<<12) +#define BNX2_TBDR_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) + + + +/* + * tdma_reg definition + * offset: 0x5c00 + */ +#define BNX2_TDMA_COMMAND 0x00005c00 +#define BNX2_TDMA_COMMAND_ENABLED (1L<<0) +#define BNX2_TDMA_COMMAND_MASTER_ABORT (1L<<4) +#define BNX2_TDMA_COMMAND_CS16_ERR (1L<<5) +#define BNX2_TDMA_COMMAND_BAD_L2_LENGTH_ABORT (1L<<7) +#define BNX2_TDMA_COMMAND_MASK_CS1 (1L<<20) +#define BNX2_TDMA_COMMAND_MASK_CS2 (1L<<21) +#define BNX2_TDMA_COMMAND_MASK_CS3 (1L<<22) +#define BNX2_TDMA_COMMAND_MASK_CS4 (1L<<23) +#define BNX2_TDMA_COMMAND_FORCE_ILOCK_CKERR (1L<<24) +#define BNX2_TDMA_COMMAND_OFIFO_CLR (1L<<30) +#define BNX2_TDMA_COMMAND_IFIFO_CLR (1L<<31) + +#define BNX2_TDMA_STATUS 0x00005c04 +#define BNX2_TDMA_STATUS_DMA_WAIT (1L<<0) +#define BNX2_TDMA_STATUS_PAYLOAD_WAIT (1L<<1) +#define BNX2_TDMA_STATUS_PATCH_FTQ_WAIT (1L<<2) +#define BNX2_TDMA_STATUS_LOCK_WAIT (1L<<3) +#define BNX2_TDMA_STATUS_FTQ_ENTRY_CNT (1L<<16) +#define BNX2_TDMA_STATUS_BURST_CNT (1L<<17) +#define BNX2_TDMA_STATUS_MAX_IFIFO_DEPTH (0x3fL<<20) +#define BNX2_TDMA_STATUS_OFIFO_OVERFLOW (1L<<30) +#define BNX2_TDMA_STATUS_IFIFO_OVERFLOW (1L<<31) + +#define BNX2_TDMA_CONFIG 0x00005c08 +#define BNX2_TDMA_CONFIG_ONE_DMA (1L<<0) +#define BNX2_TDMA_CONFIG_ONE_RECORD (1L<<1) +#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN (0x3L<<2) +#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN_0 (0L<<2) +#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN_1 (1L<<2) +#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN_2 (2L<<2) +#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN_3 (3L<<2) +#define BNX2_TDMA_CONFIG_LIMIT_SZ (0xfL<<4) +#define BNX2_TDMA_CONFIG_LIMIT_SZ_64 (0L<<4) +#define BNX2_TDMA_CONFIG_LIMIT_SZ_128 (0x4L<<4) +#define BNX2_TDMA_CONFIG_LIMIT_SZ_256 (0x6L<<4) +#define BNX2_TDMA_CONFIG_LIMIT_SZ_512 (0x8L<<4) +#define BNX2_TDMA_CONFIG_LINE_SZ (0xfL<<8) +#define BNX2_TDMA_CONFIG_LINE_SZ_64 (0L<<8) +#define BNX2_TDMA_CONFIG_LINE_SZ_128 (4L<<8) +#define BNX2_TDMA_CONFIG_LINE_SZ_256 (6L<<8) +#define BNX2_TDMA_CONFIG_LINE_SZ_512 (8L<<8) +#define BNX2_TDMA_CONFIG_ALIGN_ENA (1L<<15) +#define BNX2_TDMA_CONFIG_CHK_L2_BD (1L<<16) +#define BNX2_TDMA_CONFIG_CMPL_ENTRY (1L<<17) +#define BNX2_TDMA_CONFIG_OFIFO_CMP (1L<<19) +#define BNX2_TDMA_CONFIG_OFIFO_CMP_3 (0L<<19) +#define BNX2_TDMA_CONFIG_OFIFO_CMP_2 (1L<<19) +#define BNX2_TDMA_CONFIG_FIFO_CMP (0xfL<<20) +#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_XI (0x7L<<20) +#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_0_XI (0L<<20) +#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_4_XI (1L<<20) +#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_8_XI (2L<<20) +#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_16_XI (3L<<20) +#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_32_XI (4L<<20) +#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_64_XI (5L<<20) +#define BNX2_TDMA_CONFIG_FIFO_CMP_EN_XI (1L<<23) +#define BNX2_TDMA_CONFIG_BYTES_OST_XI (0x7L<<24) +#define BNX2_TDMA_CONFIG_BYTES_OST_512_XI (0L<<24) +#define BNX2_TDMA_CONFIG_BYTES_OST_1024_XI (1L<<24) +#define BNX2_TDMA_CONFIG_BYTES_OST_2048_XI (2L<<24) +#define BNX2_TDMA_CONFIG_BYTES_OST_4096_XI (3L<<24) +#define BNX2_TDMA_CONFIG_BYTES_OST_8192_XI (4L<<24) +#define BNX2_TDMA_CONFIG_BYTES_OST_16384_XI (5L<<24) +#define BNX2_TDMA_CONFIG_HC_BYPASS_XI (1L<<27) +#define BNX2_TDMA_CONFIG_LCL_MRRS_XI (0x7L<<28) +#define BNX2_TDMA_CONFIG_LCL_MRRS_128_XI (0L<<28) +#define BNX2_TDMA_CONFIG_LCL_MRRS_256_XI (1L<<28) +#define BNX2_TDMA_CONFIG_LCL_MRRS_512_XI (2L<<28) +#define BNX2_TDMA_CONFIG_LCL_MRRS_1024_XI (3L<<28) +#define BNX2_TDMA_CONFIG_LCL_MRRS_2048_XI (4L<<28) +#define BNX2_TDMA_CONFIG_LCL_MRRS_4096_XI (5L<<28) +#define BNX2_TDMA_CONFIG_LCL_MRRS_EN_XI (1L<<31) + +#define BNX2_TDMA_PAYLOAD_PROD 0x00005c0c +#define BNX2_TDMA_PAYLOAD_PROD_VALUE (0x1fffL<<3) + +#define BNX2_TDMA_DBG_WATCHDOG 0x00005c10 +#define BNX2_TDMA_DBG_TRIGGER 0x00005c14 +#define BNX2_TDMA_DMAD_FSM 0x00005c80 +#define BNX2_TDMA_DMAD_FSM_BD_INVLD (1L<<0) +#define BNX2_TDMA_DMAD_FSM_PUSH (0xfL<<4) +#define BNX2_TDMA_DMAD_FSM_ARB_TBDC (0x3L<<8) +#define BNX2_TDMA_DMAD_FSM_ARB_CTX (1L<<12) +#define BNX2_TDMA_DMAD_FSM_DR_INTF (1L<<16) +#define BNX2_TDMA_DMAD_FSM_DMAD (0x7L<<20) +#define BNX2_TDMA_DMAD_FSM_BD (0xfL<<24) + +#define BNX2_TDMA_DMAD_STATUS 0x00005c84 +#define BNX2_TDMA_DMAD_STATUS_RHOLD_PUSH_ENTRY (0x3L<<0) +#define BNX2_TDMA_DMAD_STATUS_RHOLD_DMAD_ENTRY (0x3L<<4) +#define BNX2_TDMA_DMAD_STATUS_RHOLD_BD_ENTRY (0x3L<<8) +#define BNX2_TDMA_DMAD_STATUS_IFTQ_ENUM (0xfL<<12) + +#define BNX2_TDMA_DR_INTF_FSM 0x00005c88 +#define BNX2_TDMA_DR_INTF_FSM_L2_COMP (0x3L<<0) +#define BNX2_TDMA_DR_INTF_FSM_TPATQ (0x7L<<4) +#define BNX2_TDMA_DR_INTF_FSM_TPBUF (0x3L<<8) +#define BNX2_TDMA_DR_INTF_FSM_DR_BUF (0x7L<<12) +#define BNX2_TDMA_DR_INTF_FSM_DMAD (0x7L<<16) + +#define BNX2_TDMA_DR_INTF_STATUS 0x00005c8c +#define BNX2_TDMA_DR_INTF_STATUS_HOLE_PHASE (0x7L<<0) +#define BNX2_TDMA_DR_INTF_STATUS_DATA_AVAIL (0x3L<<4) +#define BNX2_TDMA_DR_INTF_STATUS_SHIFT_ADDR (0x7L<<8) +#define BNX2_TDMA_DR_INTF_STATUS_NXT_PNTR (0xfL<<12) +#define BNX2_TDMA_DR_INTF_STATUS_BYTE_COUNT (0x7L<<16) + +#define BNX2_TDMA_PUSH_FSM 0x00005c90 +#define BNX2_TDMA_BD_IF_DEBUG 0x00005c94 +#define BNX2_TDMA_DMAD_IF_DEBUG 0x00005c98 +#define BNX2_TDMA_CTX_IF_DEBUG 0x00005c9c +#define BNX2_TDMA_TPBUF_IF_DEBUG 0x00005ca0 +#define BNX2_TDMA_DR_IF_DEBUG 0x00005ca4 +#define BNX2_TDMA_TPATQ_IF_DEBUG 0x00005ca8 +#define BNX2_TDMA_TDMA_ILOCK_CKSUM 0x00005cac +#define BNX2_TDMA_TDMA_ILOCK_CKSUM_CALCULATED (0xffffL<<0) +#define BNX2_TDMA_TDMA_ILOCK_CKSUM_EXPECTED (0xffffL<<16) + +#define BNX2_TDMA_TDMA_PCIE_CKSUM 0x00005cb0 +#define BNX2_TDMA_TDMA_PCIE_CKSUM_CALCULATED (0xffffL<<0) +#define BNX2_TDMA_TDMA_PCIE_CKSUM_EXPECTED (0xffffL<<16) + +#define BNX2_TDMA_TDMAQ 0x00005fc0 +#define BNX2_TDMA_FTQ_CMD 0x00005ff8 +#define BNX2_TDMA_FTQ_CMD_OFFSET (0x3ffL<<0) +#define BNX2_TDMA_FTQ_CMD_WR_TOP (1L<<10) +#define BNX2_TDMA_FTQ_CMD_WR_TOP_0 (0L<<10) +#define BNX2_TDMA_FTQ_CMD_WR_TOP_1 (1L<<10) +#define BNX2_TDMA_FTQ_CMD_SFT_RESET (1L<<25) +#define BNX2_TDMA_FTQ_CMD_RD_DATA (1L<<26) +#define BNX2_TDMA_FTQ_CMD_ADD_INTERVEN (1L<<27) +#define BNX2_TDMA_FTQ_CMD_ADD_DATA (1L<<28) +#define BNX2_TDMA_FTQ_CMD_INTERVENE_CLR (1L<<29) +#define BNX2_TDMA_FTQ_CMD_POP (1L<<30) +#define BNX2_TDMA_FTQ_CMD_BUSY (1L<<31) + +#define BNX2_TDMA_FTQ_CTL 0x00005ffc +#define BNX2_TDMA_FTQ_CTL_INTERVENE (1L<<0) +#define BNX2_TDMA_FTQ_CTL_OVERFLOW (1L<<1) +#define BNX2_TDMA_FTQ_CTL_FORCE_INTERVENE (1L<<2) +#define BNX2_TDMA_FTQ_CTL_MAX_DEPTH (0x3ffL<<12) +#define BNX2_TDMA_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) + + + +/* + * hc_reg definition + * offset: 0x6800 + */ +#define BNX2_HC_COMMAND 0x00006800 +#define BNX2_HC_COMMAND_ENABLE (1L<<0) +#define BNX2_HC_COMMAND_SKIP_ABORT (1L<<4) +#define BNX2_HC_COMMAND_COAL_NOW (1L<<16) +#define BNX2_HC_COMMAND_COAL_NOW_WO_INT (1L<<17) +#define BNX2_HC_COMMAND_STATS_NOW (1L<<18) +#define BNX2_HC_COMMAND_FORCE_INT (0x3L<<19) +#define BNX2_HC_COMMAND_FORCE_INT_NULL (0L<<19) +#define BNX2_HC_COMMAND_FORCE_INT_HIGH (1L<<19) +#define BNX2_HC_COMMAND_FORCE_INT_LOW (2L<<19) +#define BNX2_HC_COMMAND_FORCE_INT_FREE (3L<<19) +#define BNX2_HC_COMMAND_CLR_STAT_NOW (1L<<21) +#define BNX2_HC_COMMAND_MAIN_PWR_INT (1L<<22) +#define BNX2_HC_COMMAND_COAL_ON_NEXT_EVENT (1L<<27) + +#define BNX2_HC_STATUS 0x00006804 +#define BNX2_HC_STATUS_MASTER_ABORT (1L<<0) +#define BNX2_HC_STATUS_PARITY_ERROR_STATE (1L<<1) +#define BNX2_HC_STATUS_PCI_CLK_CNT_STAT (1L<<16) +#define BNX2_HC_STATUS_CORE_CLK_CNT_STAT (1L<<17) +#define BNX2_HC_STATUS_NUM_STATUS_BLOCKS_STAT (1L<<18) +#define BNX2_HC_STATUS_NUM_INT_GEN_STAT (1L<<19) +#define BNX2_HC_STATUS_NUM_INT_MBOX_WR_STAT (1L<<20) +#define BNX2_HC_STATUS_CORE_CLKS_TO_HW_INTACK_STAT (1L<<23) +#define BNX2_HC_STATUS_CORE_CLKS_TO_SW_INTACK_STAT (1L<<24) +#define BNX2_HC_STATUS_CORE_CLKS_DURING_SW_INTACK_STAT (1L<<25) + +#define BNX2_HC_CONFIG 0x00006808 +#define BNX2_HC_CONFIG_COLLECT_STATS (1L<<0) +#define BNX2_HC_CONFIG_RX_TMR_MODE (1L<<1) +#define BNX2_HC_CONFIG_TX_TMR_MODE (1L<<2) +#define BNX2_HC_CONFIG_COM_TMR_MODE (1L<<3) +#define BNX2_HC_CONFIG_CMD_TMR_MODE (1L<<4) +#define BNX2_HC_CONFIG_STATISTIC_PRIORITY (1L<<5) +#define BNX2_HC_CONFIG_STATUS_PRIORITY (1L<<6) +#define BNX2_HC_CONFIG_STAT_MEM_ADDR (0xffL<<8) +#define BNX2_HC_CONFIG_PER_MODE (1L<<16) +#define BNX2_HC_CONFIG_ONE_SHOT (1L<<17) +#define BNX2_HC_CONFIG_USE_INT_PARAM (1L<<18) +#define BNX2_HC_CONFIG_SET_MASK_AT_RD (1L<<19) +#define BNX2_HC_CONFIG_PER_COLLECT_LIMIT (0xfL<<20) +#define BNX2_HC_CONFIG_SB_ADDR_INC (0x7L<<24) +#define BNX2_HC_CONFIG_SB_ADDR_INC_64B (0L<<24) +#define BNX2_HC_CONFIG_SB_ADDR_INC_128B (1L<<24) +#define BNX2_HC_CONFIG_SB_ADDR_INC_256B (2L<<24) +#define BNX2_HC_CONFIG_SB_ADDR_INC_512B (3L<<24) +#define BNX2_HC_CONFIG_SB_ADDR_INC_1024B (4L<<24) +#define BNX2_HC_CONFIG_SB_ADDR_INC_2048B (5L<<24) +#define BNX2_HC_CONFIG_SB_ADDR_INC_4096B (6L<<24) +#define BNX2_HC_CONFIG_SB_ADDR_INC_8192B (7L<<24) +#define BNX2_HC_CONFIG_GEN_STAT_AVG_INTR (1L<<29) +#define BNX2_HC_CONFIG_UNMASK_ALL (1L<<30) +#define BNX2_HC_CONFIG_TX_SEL (1L<<31) + +#define BNX2_HC_ATTN_BITS_ENABLE 0x0000680c +#define BNX2_HC_STATUS_ADDR_L 0x00006810 +#define BNX2_HC_STATUS_ADDR_H 0x00006814 +#define BNX2_HC_STATISTICS_ADDR_L 0x00006818 +#define BNX2_HC_STATISTICS_ADDR_H 0x0000681c +#define BNX2_HC_TX_QUICK_CONS_TRIP 0x00006820 +#define BNX2_HC_TX_QUICK_CONS_TRIP_VALUE (0xffL<<0) +#define BNX2_HC_TX_QUICK_CONS_TRIP_INT (0xffL<<16) + +#define BNX2_HC_COMP_PROD_TRIP 0x00006824 +#define BNX2_HC_COMP_PROD_TRIP_VALUE (0xffL<<0) +#define BNX2_HC_COMP_PROD_TRIP_INT (0xffL<<16) + +#define BNX2_HC_RX_QUICK_CONS_TRIP 0x00006828 +#define BNX2_HC_RX_QUICK_CONS_TRIP_VALUE (0xffL<<0) +#define BNX2_HC_RX_QUICK_CONS_TRIP_INT (0xffL<<16) + +#define BNX2_HC_RX_TICKS 0x0000682c +#define BNX2_HC_RX_TICKS_VALUE (0x3ffL<<0) +#define BNX2_HC_RX_TICKS_INT (0x3ffL<<16) + +#define BNX2_HC_TX_TICKS 0x00006830 +#define BNX2_HC_TX_TICKS_VALUE (0x3ffL<<0) +#define BNX2_HC_TX_TICKS_INT (0x3ffL<<16) + +#define BNX2_HC_COM_TICKS 0x00006834 +#define BNX2_HC_COM_TICKS_VALUE (0x3ffL<<0) +#define BNX2_HC_COM_TICKS_INT (0x3ffL<<16) + +#define BNX2_HC_CMD_TICKS 0x00006838 +#define BNX2_HC_CMD_TICKS_VALUE (0x3ffL<<0) +#define BNX2_HC_CMD_TICKS_INT (0x3ffL<<16) + +#define BNX2_HC_PERIODIC_TICKS 0x0000683c +#define BNX2_HC_PERIODIC_TICKS_HC_PERIODIC_TICKS (0xffffL<<0) +#define BNX2_HC_PERIODIC_TICKS_HC_INT_PERIODIC_TICKS (0xffffL<<16) + +#define BNX2_HC_STAT_COLLECT_TICKS 0x00006840 +#define BNX2_HC_STAT_COLLECT_TICKS_HC_STAT_COLL_TICKS (0xffL<<4) + +#define BNX2_HC_STATS_TICKS 0x00006844 +#define BNX2_HC_STATS_TICKS_HC_STAT_TICKS (0xffffL<<8) + +#define BNX2_HC_STATS_INTERRUPT_STATUS 0x00006848 +#define BNX2_HC_STATS_INTERRUPT_STATUS_SB_STATUS (0x1ffL<<0) +#define BNX2_HC_STATS_INTERRUPT_STATUS_INT_STATUS (0x1ffL<<16) + +#define BNX2_HC_STAT_MEM_DATA 0x0000684c +#define BNX2_HC_STAT_GEN_SEL_0 0x00006850 +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0 (0x7fL<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT0 (0L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT1 (1L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT2 (2L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT3 (3L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT4 (4L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT5 (5L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT6 (6L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT7 (7L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT8 (8L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT9 (9L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT10 (10L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT11 (11L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT0 (12L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT1 (13L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT2 (14L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT3 (15L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT4 (16L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT5 (17L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT6 (18L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT7 (19L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT0 (20L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT1 (21L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT2 (22L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT3 (23L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT4 (24L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT5 (25L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT6 (26L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT7 (27L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT8 (28L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT9 (29L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT10 (30L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT11 (31L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPAT_STAT0 (32L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPAT_STAT1 (33L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPAT_STAT2 (34L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPAT_STAT3 (35L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT0 (36L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT1 (37L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT2 (38L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT3 (39L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT4 (40L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT5 (41L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT6 (42L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT7 (43L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT0 (44L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT1 (45L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT2 (46L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT3 (47L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT4 (48L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT5 (49L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT6 (50L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT7 (51L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_PCI_CLK_CNT (52L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CORE_CLK_CNT (53L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS (54L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN (55L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR (56L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK (59L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK (60L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK (61L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCH_CMD_CNT (62L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCH_SLOT_CNT (63L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSCH_CMD_CNT (64L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSCH_SLOT_CNT (65L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RLUPQ_VALID_CNT (66L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPQ_VALID_CNT (67L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPCQ_VALID_CNT (68L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PPQ_VALID_CNT (69L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PMQ_VALID_CNT (70L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PTQ_VALID_CNT (71L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMAQ_VALID_CNT (72L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCHQ_VALID_CNT (73L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDRQ_VALID_CNT (74L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXPQ_VALID_CNT (75L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMAQ_VALID_CNT (76L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPATQ_VALID_CNT (77L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TASQ_VALID_CNT (78L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSQ_VALID_CNT (79L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CPQ_VALID_CNT (80L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMXQ_VALID_CNT (81L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMTQ_VALID_CNT (82L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMQ_VALID_CNT (83L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MGMQ_VALID_CNT (84L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_READ_TRANSFERS_CNT (85L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_READ_DELAY_PCI_CLKS_CNT (86L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_READ_TRANSFERS_CNT (87L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_READ_DELAY_PCI_CLKS_CNT (88L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_READ_RETRY_AFTER_DATA_CNT (89L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_WRITE_TRANSFERS_CNT (90L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_WRITE_DELAY_PCI_CLKS_CNT (91L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_WRITE_TRANSFERS_CNT (92L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_WRITE_DELAY_PCI_CLKS_CNT (93L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_WRITE_RETRY_AFTER_DATA_CNT (94L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_WR_CNT64 (95L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_RD_CNT64 (96L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_ACC_STALL_CLKS (97L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_LOCK_STALL_CLKS (98L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MBQ_CTX_ACCESS_STAT (99L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MBQ_CTX_ACCESS64_STAT (100L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MBQ_PCI_STALL_STAT (101L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDR_FTQ_ENTRY_CNT (102L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDR_BURST_CNT (103L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMA_FTQ_ENTRY_CNT (104L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMA_BURST_CNT (105L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMA_FTQ_ENTRY_CNT (106L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMA_BURST_CNT (107L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RLUP_MATCH_CNT (108L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_POLL_PASS_CNT (109L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_TMR1_CNT (110L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_TMR2_CNT (111L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_TMR3_CNT (112L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_TMR4_CNT (113L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_TMR5_CNT (114L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT0 (115L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT1 (116L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT2 (117L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT3 (118L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT4 (119L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT5 (120L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RBDC_PROC1_MISS (121L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RBDC_PROC2_MISS (122L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RBDC_BURST_CNT (127L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_1 (0x7fL<<8) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_2 (0x7fL<<16) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_3 (0x7fL<<24) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_XI (0xffL<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UMP_RX_FRAME_DROP_XI (52L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S0_XI (57L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S1_XI (58L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S2_XI (85L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S3_XI (86L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S4_XI (87L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S5_XI (88L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S6_XI (89L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S7_XI (90L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S8_XI (91L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S9_XI (92L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S10_XI (93L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MQ_IDB_OFLOW_XI (94L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_BLK_RD_CNT_XI (123L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_BLK_WR_CNT_XI (124L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_HITS_XI (125L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_MISSES_XI (126L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC1_XI (128L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC1_XI (129L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC1_XI (130L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC1_XI (131L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC1_XI (132L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC1_XI (133L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC2_XI (134L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC2_XI (135L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC2_XI (136L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC2_XI (137L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC2_XI (138L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC2_XI (139L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC3_XI (140L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC3_XI (141L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC3_XI (142L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC3_XI (143L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC3_XI (144L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC3_XI (145L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC4_XI (146L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC4_XI (147L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC4_XI (148L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC4_XI (149L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC4_XI (150L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC4_XI (151L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC5_XI (152L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC5_XI (153L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC5_XI (154L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC5_XI (155L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC5_XI (156L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC5_XI (157L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC6_XI (158L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC6_XI (159L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC6_XI (160L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC6_XI (161L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC6_XI (162L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC6_XI (163L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC7_XI (164L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC7_XI (165L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC7_XI (166L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC7_XI (167L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC7_XI (168L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC7_XI (169L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC8_XI (170L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC8_XI (171L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC8_XI (172L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC8_XI (173L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC8_XI (174L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC8_XI (175L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCS_CMD_CNT_XI (176L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCS_SLOT_CNT_XI (177L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCSQ_VALID_CNT_XI (178L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_1_XI (0xffL<<8) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_2_XI (0xffL<<16) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_3_XI (0xffL<<24) + +#define BNX2_HC_STAT_GEN_SEL_1 0x00006854 +#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_4 (0x7fL<<0) +#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_5 (0x7fL<<8) +#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_6 (0x7fL<<16) +#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_7 (0x7fL<<24) +#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_4_XI (0xffL<<0) +#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_5_XI (0xffL<<8) +#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_6_XI (0xffL<<16) +#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_7_XI (0xffL<<24) + +#define BNX2_HC_STAT_GEN_SEL_2 0x00006858 +#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_8 (0x7fL<<0) +#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_9 (0x7fL<<8) +#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_10 (0x7fL<<16) +#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_11 (0x7fL<<24) +#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_8_XI (0xffL<<0) +#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_9_XI (0xffL<<8) +#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_10_XI (0xffL<<16) +#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_11_XI (0xffL<<24) + +#define BNX2_HC_STAT_GEN_SEL_3 0x0000685c +#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_12 (0x7fL<<0) +#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_13 (0x7fL<<8) +#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_14 (0x7fL<<16) +#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_15 (0x7fL<<24) +#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_12_XI (0xffL<<0) +#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_13_XI (0xffL<<8) +#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_14_XI (0xffL<<16) +#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_15_XI (0xffL<<24) + +#define BNX2_HC_STAT_GEN_STAT0 0x00006888 +#define BNX2_HC_STAT_GEN_STAT1 0x0000688c +#define BNX2_HC_STAT_GEN_STAT2 0x00006890 +#define BNX2_HC_STAT_GEN_STAT3 0x00006894 +#define BNX2_HC_STAT_GEN_STAT4 0x00006898 +#define BNX2_HC_STAT_GEN_STAT5 0x0000689c +#define BNX2_HC_STAT_GEN_STAT6 0x000068a0 +#define BNX2_HC_STAT_GEN_STAT7 0x000068a4 +#define BNX2_HC_STAT_GEN_STAT8 0x000068a8 +#define BNX2_HC_STAT_GEN_STAT9 0x000068ac +#define BNX2_HC_STAT_GEN_STAT10 0x000068b0 +#define BNX2_HC_STAT_GEN_STAT11 0x000068b4 +#define BNX2_HC_STAT_GEN_STAT12 0x000068b8 +#define BNX2_HC_STAT_GEN_STAT13 0x000068bc +#define BNX2_HC_STAT_GEN_STAT14 0x000068c0 +#define BNX2_HC_STAT_GEN_STAT15 0x000068c4 +#define BNX2_HC_STAT_GEN_STAT_AC0 0x000068c8 +#define BNX2_HC_STAT_GEN_STAT_AC1 0x000068cc +#define BNX2_HC_STAT_GEN_STAT_AC2 0x000068d0 +#define BNX2_HC_STAT_GEN_STAT_AC3 0x000068d4 +#define BNX2_HC_STAT_GEN_STAT_AC4 0x000068d8 +#define BNX2_HC_STAT_GEN_STAT_AC5 0x000068dc +#define BNX2_HC_STAT_GEN_STAT_AC6 0x000068e0 +#define BNX2_HC_STAT_GEN_STAT_AC7 0x000068e4 +#define BNX2_HC_STAT_GEN_STAT_AC8 0x000068e8 +#define BNX2_HC_STAT_GEN_STAT_AC9 0x000068ec +#define BNX2_HC_STAT_GEN_STAT_AC10 0x000068f0 +#define BNX2_HC_STAT_GEN_STAT_AC11 0x000068f4 +#define BNX2_HC_STAT_GEN_STAT_AC12 0x000068f8 +#define BNX2_HC_STAT_GEN_STAT_AC13 0x000068fc +#define BNX2_HC_STAT_GEN_STAT_AC14 0x00006900 +#define BNX2_HC_STAT_GEN_STAT_AC15 0x00006904 +#define BNX2_HC_STAT_GEN_STAT_AC 0x000068c8 +#define BNX2_HC_VIS 0x00006908 +#define BNX2_HC_VIS_STAT_BUILD_STATE (0xfL<<0) +#define BNX2_HC_VIS_STAT_BUILD_STATE_IDLE (0L<<0) +#define BNX2_HC_VIS_STAT_BUILD_STATE_START (1L<<0) +#define BNX2_HC_VIS_STAT_BUILD_STATE_REQUEST (2L<<0) +#define BNX2_HC_VIS_STAT_BUILD_STATE_UPDATE64 (3L<<0) +#define BNX2_HC_VIS_STAT_BUILD_STATE_UPDATE32 (4L<<0) +#define BNX2_HC_VIS_STAT_BUILD_STATE_UPDATE_DONE (5L<<0) +#define BNX2_HC_VIS_STAT_BUILD_STATE_DMA (6L<<0) +#define BNX2_HC_VIS_STAT_BUILD_STATE_MSI_CONTROL (7L<<0) +#define BNX2_HC_VIS_STAT_BUILD_STATE_MSI_LOW (8L<<0) +#define BNX2_HC_VIS_STAT_BUILD_STATE_MSI_HIGH (9L<<0) +#define BNX2_HC_VIS_STAT_BUILD_STATE_MSI_DATA (10L<<0) +#define BNX2_HC_VIS_DMA_STAT_STATE (0xfL<<8) +#define BNX2_HC_VIS_DMA_STAT_STATE_IDLE (0L<<8) +#define BNX2_HC_VIS_DMA_STAT_STATE_STATUS_PARAM (1L<<8) +#define BNX2_HC_VIS_DMA_STAT_STATE_STATUS_DMA (2L<<8) +#define BNX2_HC_VIS_DMA_STAT_STATE_WRITE_COMP (3L<<8) +#define BNX2_HC_VIS_DMA_STAT_STATE_COMP (4L<<8) +#define BNX2_HC_VIS_DMA_STAT_STATE_STATISTIC_PARAM (5L<<8) +#define BNX2_HC_VIS_DMA_STAT_STATE_STATISTIC_DMA (6L<<8) +#define BNX2_HC_VIS_DMA_STAT_STATE_WRITE_COMP_1 (7L<<8) +#define BNX2_HC_VIS_DMA_STAT_STATE_WRITE_COMP_2 (8L<<8) +#define BNX2_HC_VIS_DMA_STAT_STATE_WAIT (9L<<8) +#define BNX2_HC_VIS_DMA_STAT_STATE_ABORT (15L<<8) +#define BNX2_HC_VIS_DMA_MSI_STATE (0x7L<<12) +#define BNX2_HC_VIS_STATISTIC_DMA_EN_STATE (0x3L<<15) +#define BNX2_HC_VIS_STATISTIC_DMA_EN_STATE_IDLE (0L<<15) +#define BNX2_HC_VIS_STATISTIC_DMA_EN_STATE_COUNT (1L<<15) +#define BNX2_HC_VIS_STATISTIC_DMA_EN_STATE_START (2L<<15) + +#define BNX2_HC_VIS_1 0x0000690c +#define BNX2_HC_VIS_1_HW_INTACK_STATE (1L<<4) +#define BNX2_HC_VIS_1_HW_INTACK_STATE_IDLE (0L<<4) +#define BNX2_HC_VIS_1_HW_INTACK_STATE_COUNT (1L<<4) +#define BNX2_HC_VIS_1_SW_INTACK_STATE (1L<<5) +#define BNX2_HC_VIS_1_SW_INTACK_STATE_IDLE (0L<<5) +#define BNX2_HC_VIS_1_SW_INTACK_STATE_COUNT (1L<<5) +#define BNX2_HC_VIS_1_DURING_SW_INTACK_STATE (1L<<6) +#define BNX2_HC_VIS_1_DURING_SW_INTACK_STATE_IDLE (0L<<6) +#define BNX2_HC_VIS_1_DURING_SW_INTACK_STATE_COUNT (1L<<6) +#define BNX2_HC_VIS_1_MAILBOX_COUNT_STATE (1L<<7) +#define BNX2_HC_VIS_1_MAILBOX_COUNT_STATE_IDLE (0L<<7) +#define BNX2_HC_VIS_1_MAILBOX_COUNT_STATE_COUNT (1L<<7) +#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE (0xfL<<17) +#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_IDLE (0L<<17) +#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_DMA (1L<<17) +#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_UPDATE (2L<<17) +#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_ASSIGN (3L<<17) +#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_WAIT (4L<<17) +#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_REG_UPDATE (5L<<17) +#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_REG_ASSIGN (6L<<17) +#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_REG_WAIT (7L<<17) +#define BNX2_HC_VIS_1_RAM_WR_ARB_STATE (0x3L<<21) +#define BNX2_HC_VIS_1_RAM_WR_ARB_STATE_NORMAL (0L<<21) +#define BNX2_HC_VIS_1_RAM_WR_ARB_STATE_CLEAR (1L<<21) +#define BNX2_HC_VIS_1_INT_GEN_STATE (1L<<23) +#define BNX2_HC_VIS_1_INT_GEN_STATE_DLE (0L<<23) +#define BNX2_HC_VIS_1_INT_GEN_STATE_NTERRUPT (1L<<23) +#define BNX2_HC_VIS_1_STAT_CHAN_ID (0x7L<<24) +#define BNX2_HC_VIS_1_INT_B (1L<<27) + +#define BNX2_HC_DEBUG_VECT_PEEK 0x00006910 +#define BNX2_HC_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0) +#define BNX2_HC_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11) +#define BNX2_HC_DEBUG_VECT_PEEK_1_SEL (0xfL<<12) +#define BNX2_HC_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16) +#define BNX2_HC_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27) +#define BNX2_HC_DEBUG_VECT_PEEK_2_SEL (0xfL<<28) + +#define BNX2_HC_COALESCE_NOW 0x00006914 +#define BNX2_HC_COALESCE_NOW_COAL_NOW (0x1ffL<<1) +#define BNX2_HC_COALESCE_NOW_COAL_NOW_WO_INT (0x1ffL<<11) +#define BNX2_HC_COALESCE_NOW_COAL_ON_NXT_EVENT (0x1ffL<<21) + +#define BNX2_HC_MSIX_BIT_VECTOR 0x00006918 +#define BNX2_HC_MSIX_BIT_VECTOR_VAL (0x1ffL<<0) + +#define BNX2_HC_SB_CONFIG_1 0x00006a00 +#define BNX2_HC_SB_CONFIG_1_RX_TMR_MODE (1L<<1) +#define BNX2_HC_SB_CONFIG_1_TX_TMR_MODE (1L<<2) +#define BNX2_HC_SB_CONFIG_1_COM_TMR_MODE (1L<<3) +#define BNX2_HC_SB_CONFIG_1_CMD_TMR_MODE (1L<<4) +#define BNX2_HC_SB_CONFIG_1_PER_MODE (1L<<16) +#define BNX2_HC_SB_CONFIG_1_ONE_SHOT (1L<<17) +#define BNX2_HC_SB_CONFIG_1_USE_INT_PARAM (1L<<18) +#define BNX2_HC_SB_CONFIG_1_PER_COLLECT_LIMIT (0xfL<<20) + +#define BNX2_HC_TX_QUICK_CONS_TRIP_1 0x00006a04 +#define BNX2_HC_TX_QUICK_CONS_TRIP_1_VALUE (0xffL<<0) +#define BNX2_HC_TX_QUICK_CONS_TRIP_1_INT (0xffL<<16) + +#define BNX2_HC_COMP_PROD_TRIP_1 0x00006a08 +#define BNX2_HC_COMP_PROD_TRIP_1_VALUE (0xffL<<0) +#define BNX2_HC_COMP_PROD_TRIP_1_INT (0xffL<<16) + +#define BNX2_HC_RX_QUICK_CONS_TRIP_1 0x00006a0c +#define BNX2_HC_RX_QUICK_CONS_TRIP_1_VALUE (0xffL<<0) +#define BNX2_HC_RX_QUICK_CONS_TRIP_1_INT (0xffL<<16) + +#define BNX2_HC_RX_TICKS_1 0x00006a10 +#define BNX2_HC_RX_TICKS_1_VALUE (0x3ffL<<0) +#define BNX2_HC_RX_TICKS_1_INT (0x3ffL<<16) + +#define BNX2_HC_TX_TICKS_1 0x00006a14 +#define BNX2_HC_TX_TICKS_1_VALUE (0x3ffL<<0) +#define BNX2_HC_TX_TICKS_1_INT (0x3ffL<<16) + +#define BNX2_HC_COM_TICKS_1 0x00006a18 +#define BNX2_HC_COM_TICKS_1_VALUE (0x3ffL<<0) +#define BNX2_HC_COM_TICKS_1_INT (0x3ffL<<16) + +#define BNX2_HC_CMD_TICKS_1 0x00006a1c +#define BNX2_HC_CMD_TICKS_1_VALUE (0x3ffL<<0) +#define BNX2_HC_CMD_TICKS_1_INT (0x3ffL<<16) + +#define BNX2_HC_PERIODIC_TICKS_1 0x00006a20 +#define BNX2_HC_PERIODIC_TICKS_1_HC_PERIODIC_TICKS (0xffffL<<0) +#define BNX2_HC_PERIODIC_TICKS_1_HC_INT_PERIODIC_TICKS (0xffffL<<16) + +#define BNX2_HC_SB_CONFIG_2 0x00006a24 +#define BNX2_HC_SB_CONFIG_2_RX_TMR_MODE (1L<<1) +#define BNX2_HC_SB_CONFIG_2_TX_TMR_MODE (1L<<2) +#define BNX2_HC_SB_CONFIG_2_COM_TMR_MODE (1L<<3) +#define BNX2_HC_SB_CONFIG_2_CMD_TMR_MODE (1L<<4) +#define BNX2_HC_SB_CONFIG_2_PER_MODE (1L<<16) +#define BNX2_HC_SB_CONFIG_2_ONE_SHOT (1L<<17) +#define BNX2_HC_SB_CONFIG_2_USE_INT_PARAM (1L<<18) +#define BNX2_HC_SB_CONFIG_2_PER_COLLECT_LIMIT (0xfL<<20) + +#define BNX2_HC_TX_QUICK_CONS_TRIP_2 0x00006a28 +#define BNX2_HC_TX_QUICK_CONS_TRIP_2_VALUE (0xffL<<0) +#define BNX2_HC_TX_QUICK_CONS_TRIP_2_INT (0xffL<<16) + +#define BNX2_HC_COMP_PROD_TRIP_2 0x00006a2c +#define BNX2_HC_COMP_PROD_TRIP_2_VALUE (0xffL<<0) +#define BNX2_HC_COMP_PROD_TRIP_2_INT (0xffL<<16) + +#define BNX2_HC_RX_QUICK_CONS_TRIP_2 0x00006a30 +#define BNX2_HC_RX_QUICK_CONS_TRIP_2_VALUE (0xffL<<0) +#define BNX2_HC_RX_QUICK_CONS_TRIP_2_INT (0xffL<<16) + +#define BNX2_HC_RX_TICKS_2 0x00006a34 +#define BNX2_HC_RX_TICKS_2_VALUE (0x3ffL<<0) +#define BNX2_HC_RX_TICKS_2_INT (0x3ffL<<16) + +#define BNX2_HC_TX_TICKS_2 0x00006a38 +#define BNX2_HC_TX_TICKS_2_VALUE (0x3ffL<<0) +#define BNX2_HC_TX_TICKS_2_INT (0x3ffL<<16) + +#define BNX2_HC_COM_TICKS_2 0x00006a3c +#define BNX2_HC_COM_TICKS_2_VALUE (0x3ffL<<0) +#define BNX2_HC_COM_TICKS_2_INT (0x3ffL<<16) + +#define BNX2_HC_CMD_TICKS_2 0x00006a40 +#define BNX2_HC_CMD_TICKS_2_VALUE (0x3ffL<<0) +#define BNX2_HC_CMD_TICKS_2_INT (0x3ffL<<16) + +#define BNX2_HC_PERIODIC_TICKS_2 0x00006a44 +#define BNX2_HC_PERIODIC_TICKS_2_HC_PERIODIC_TICKS (0xffffL<<0) +#define BNX2_HC_PERIODIC_TICKS_2_HC_INT_PERIODIC_TICKS (0xffffL<<16) + +#define BNX2_HC_SB_CONFIG_3 0x00006a48 +#define BNX2_HC_SB_CONFIG_3_RX_TMR_MODE (1L<<1) +#define BNX2_HC_SB_CONFIG_3_TX_TMR_MODE (1L<<2) +#define BNX2_HC_SB_CONFIG_3_COM_TMR_MODE (1L<<3) +#define BNX2_HC_SB_CONFIG_3_CMD_TMR_MODE (1L<<4) +#define BNX2_HC_SB_CONFIG_3_PER_MODE (1L<<16) +#define BNX2_HC_SB_CONFIG_3_ONE_SHOT (1L<<17) +#define BNX2_HC_SB_CONFIG_3_USE_INT_PARAM (1L<<18) +#define BNX2_HC_SB_CONFIG_3_PER_COLLECT_LIMIT (0xfL<<20) + +#define BNX2_HC_TX_QUICK_CONS_TRIP_3 0x00006a4c +#define BNX2_HC_TX_QUICK_CONS_TRIP_3_VALUE (0xffL<<0) +#define BNX2_HC_TX_QUICK_CONS_TRIP_3_INT (0xffL<<16) + +#define BNX2_HC_COMP_PROD_TRIP_3 0x00006a50 +#define BNX2_HC_COMP_PROD_TRIP_3_VALUE (0xffL<<0) +#define BNX2_HC_COMP_PROD_TRIP_3_INT (0xffL<<16) + +#define BNX2_HC_RX_QUICK_CONS_TRIP_3 0x00006a54 +#define BNX2_HC_RX_QUICK_CONS_TRIP_3_VALUE (0xffL<<0) +#define BNX2_HC_RX_QUICK_CONS_TRIP_3_INT (0xffL<<16) + +#define BNX2_HC_RX_TICKS_3 0x00006a58 +#define BNX2_HC_RX_TICKS_3_VALUE (0x3ffL<<0) +#define BNX2_HC_RX_TICKS_3_INT (0x3ffL<<16) + +#define BNX2_HC_TX_TICKS_3 0x00006a5c +#define BNX2_HC_TX_TICKS_3_VALUE (0x3ffL<<0) +#define BNX2_HC_TX_TICKS_3_INT (0x3ffL<<16) + +#define BNX2_HC_COM_TICKS_3 0x00006a60 +#define BNX2_HC_COM_TICKS_3_VALUE (0x3ffL<<0) +#define BNX2_HC_COM_TICKS_3_INT (0x3ffL<<16) + +#define BNX2_HC_CMD_TICKS_3 0x00006a64 +#define BNX2_HC_CMD_TICKS_3_VALUE (0x3ffL<<0) +#define BNX2_HC_CMD_TICKS_3_INT (0x3ffL<<16) + +#define BNX2_HC_PERIODIC_TICKS_3 0x00006a68 +#define BNX2_HC_PERIODIC_TICKS_3_HC_PERIODIC_TICKS (0xffffL<<0) +#define BNX2_HC_PERIODIC_TICKS_3_HC_INT_PERIODIC_TICKS (0xffffL<<16) + +#define BNX2_HC_SB_CONFIG_4 0x00006a6c +#define BNX2_HC_SB_CONFIG_4_RX_TMR_MODE (1L<<1) +#define BNX2_HC_SB_CONFIG_4_TX_TMR_MODE (1L<<2) +#define BNX2_HC_SB_CONFIG_4_COM_TMR_MODE (1L<<3) +#define BNX2_HC_SB_CONFIG_4_CMD_TMR_MODE (1L<<4) +#define BNX2_HC_SB_CONFIG_4_PER_MODE (1L<<16) +#define BNX2_HC_SB_CONFIG_4_ONE_SHOT (1L<<17) +#define BNX2_HC_SB_CONFIG_4_USE_INT_PARAM (1L<<18) +#define BNX2_HC_SB_CONFIG_4_PER_COLLECT_LIMIT (0xfL<<20) + +#define BNX2_HC_TX_QUICK_CONS_TRIP_4 0x00006a70 +#define BNX2_HC_TX_QUICK_CONS_TRIP_4_VALUE (0xffL<<0) +#define BNX2_HC_TX_QUICK_CONS_TRIP_4_INT (0xffL<<16) + +#define BNX2_HC_COMP_PROD_TRIP_4 0x00006a74 +#define BNX2_HC_COMP_PROD_TRIP_4_VALUE (0xffL<<0) +#define BNX2_HC_COMP_PROD_TRIP_4_INT (0xffL<<16) + +#define BNX2_HC_RX_QUICK_CONS_TRIP_4 0x00006a78 +#define BNX2_HC_RX_QUICK_CONS_TRIP_4_VALUE (0xffL<<0) +#define BNX2_HC_RX_QUICK_CONS_TRIP_4_INT (0xffL<<16) + +#define BNX2_HC_RX_TICKS_4 0x00006a7c +#define BNX2_HC_RX_TICKS_4_VALUE (0x3ffL<<0) +#define BNX2_HC_RX_TICKS_4_INT (0x3ffL<<16) + +#define BNX2_HC_TX_TICKS_4 0x00006a80 +#define BNX2_HC_TX_TICKS_4_VALUE (0x3ffL<<0) +#define BNX2_HC_TX_TICKS_4_INT (0x3ffL<<16) + +#define BNX2_HC_COM_TICKS_4 0x00006a84 +#define BNX2_HC_COM_TICKS_4_VALUE (0x3ffL<<0) +#define BNX2_HC_COM_TICKS_4_INT (0x3ffL<<16) + +#define BNX2_HC_CMD_TICKS_4 0x00006a88 +#define BNX2_HC_CMD_TICKS_4_VALUE (0x3ffL<<0) +#define BNX2_HC_CMD_TICKS_4_INT (0x3ffL<<16) + +#define BNX2_HC_PERIODIC_TICKS_4 0x00006a8c +#define BNX2_HC_PERIODIC_TICKS_4_HC_PERIODIC_TICKS (0xffffL<<0) +#define BNX2_HC_PERIODIC_TICKS_4_HC_INT_PERIODIC_TICKS (0xffffL<<16) + +#define BNX2_HC_SB_CONFIG_5 0x00006a90 +#define BNX2_HC_SB_CONFIG_5_RX_TMR_MODE (1L<<1) +#define BNX2_HC_SB_CONFIG_5_TX_TMR_MODE (1L<<2) +#define BNX2_HC_SB_CONFIG_5_COM_TMR_MODE (1L<<3) +#define BNX2_HC_SB_CONFIG_5_CMD_TMR_MODE (1L<<4) +#define BNX2_HC_SB_CONFIG_5_PER_MODE (1L<<16) +#define BNX2_HC_SB_CONFIG_5_ONE_SHOT (1L<<17) +#define BNX2_HC_SB_CONFIG_5_USE_INT_PARAM (1L<<18) +#define BNX2_HC_SB_CONFIG_5_PER_COLLECT_LIMIT (0xfL<<20) + +#define BNX2_HC_TX_QUICK_CONS_TRIP_5 0x00006a94 +#define BNX2_HC_TX_QUICK_CONS_TRIP_5_VALUE (0xffL<<0) +#define BNX2_HC_TX_QUICK_CONS_TRIP_5_INT (0xffL<<16) + +#define BNX2_HC_COMP_PROD_TRIP_5 0x00006a98 +#define BNX2_HC_COMP_PROD_TRIP_5_VALUE (0xffL<<0) +#define BNX2_HC_COMP_PROD_TRIP_5_INT (0xffL<<16) + +#define BNX2_HC_RX_QUICK_CONS_TRIP_5 0x00006a9c +#define BNX2_HC_RX_QUICK_CONS_TRIP_5_VALUE (0xffL<<0) +#define BNX2_HC_RX_QUICK_CONS_TRIP_5_INT (0xffL<<16) + +#define BNX2_HC_RX_TICKS_5 0x00006aa0 +#define BNX2_HC_RX_TICKS_5_VALUE (0x3ffL<<0) +#define BNX2_HC_RX_TICKS_5_INT (0x3ffL<<16) + +#define BNX2_HC_TX_TICKS_5 0x00006aa4 +#define BNX2_HC_TX_TICKS_5_VALUE (0x3ffL<<0) +#define BNX2_HC_TX_TICKS_5_INT (0x3ffL<<16) + +#define BNX2_HC_COM_TICKS_5 0x00006aa8 +#define BNX2_HC_COM_TICKS_5_VALUE (0x3ffL<<0) +#define BNX2_HC_COM_TICKS_5_INT (0x3ffL<<16) + +#define BNX2_HC_CMD_TICKS_5 0x00006aac +#define BNX2_HC_CMD_TICKS_5_VALUE (0x3ffL<<0) +#define BNX2_HC_CMD_TICKS_5_INT (0x3ffL<<16) + +#define BNX2_HC_PERIODIC_TICKS_5 0x00006ab0 +#define BNX2_HC_PERIODIC_TICKS_5_HC_PERIODIC_TICKS (0xffffL<<0) +#define BNX2_HC_PERIODIC_TICKS_5_HC_INT_PERIODIC_TICKS (0xffffL<<16) + +#define BNX2_HC_SB_CONFIG_6 0x00006ab4 +#define BNX2_HC_SB_CONFIG_6_RX_TMR_MODE (1L<<1) +#define BNX2_HC_SB_CONFIG_6_TX_TMR_MODE (1L<<2) +#define BNX2_HC_SB_CONFIG_6_COM_TMR_MODE (1L<<3) +#define BNX2_HC_SB_CONFIG_6_CMD_TMR_MODE (1L<<4) +#define BNX2_HC_SB_CONFIG_6_PER_MODE (1L<<16) +#define BNX2_HC_SB_CONFIG_6_ONE_SHOT (1L<<17) +#define BNX2_HC_SB_CONFIG_6_USE_INT_PARAM (1L<<18) +#define BNX2_HC_SB_CONFIG_6_PER_COLLECT_LIMIT (0xfL<<20) + +#define BNX2_HC_TX_QUICK_CONS_TRIP_6 0x00006ab8 +#define BNX2_HC_TX_QUICK_CONS_TRIP_6_VALUE (0xffL<<0) +#define BNX2_HC_TX_QUICK_CONS_TRIP_6_INT (0xffL<<16) + +#define BNX2_HC_COMP_PROD_TRIP_6 0x00006abc +#define BNX2_HC_COMP_PROD_TRIP_6_VALUE (0xffL<<0) +#define BNX2_HC_COMP_PROD_TRIP_6_INT (0xffL<<16) + +#define BNX2_HC_RX_QUICK_CONS_TRIP_6 0x00006ac0 +#define BNX2_HC_RX_QUICK_CONS_TRIP_6_VALUE (0xffL<<0) +#define BNX2_HC_RX_QUICK_CONS_TRIP_6_INT (0xffL<<16) + +#define BNX2_HC_RX_TICKS_6 0x00006ac4 +#define BNX2_HC_RX_TICKS_6_VALUE (0x3ffL<<0) +#define BNX2_HC_RX_TICKS_6_INT (0x3ffL<<16) + +#define BNX2_HC_TX_TICKS_6 0x00006ac8 +#define BNX2_HC_TX_TICKS_6_VALUE (0x3ffL<<0) +#define BNX2_HC_TX_TICKS_6_INT (0x3ffL<<16) + +#define BNX2_HC_COM_TICKS_6 0x00006acc +#define BNX2_HC_COM_TICKS_6_VALUE (0x3ffL<<0) +#define BNX2_HC_COM_TICKS_6_INT (0x3ffL<<16) + +#define BNX2_HC_CMD_TICKS_6 0x00006ad0 +#define BNX2_HC_CMD_TICKS_6_VALUE (0x3ffL<<0) +#define BNX2_HC_CMD_TICKS_6_INT (0x3ffL<<16) + +#define BNX2_HC_PERIODIC_TICKS_6 0x00006ad4 +#define BNX2_HC_PERIODIC_TICKS_6_HC_PERIODIC_TICKS (0xffffL<<0) +#define BNX2_HC_PERIODIC_TICKS_6_HC_INT_PERIODIC_TICKS (0xffffL<<16) + +#define BNX2_HC_SB_CONFIG_7 0x00006ad8 +#define BNX2_HC_SB_CONFIG_7_RX_TMR_MODE (1L<<1) +#define BNX2_HC_SB_CONFIG_7_TX_TMR_MODE (1L<<2) +#define BNX2_HC_SB_CONFIG_7_COM_TMR_MODE (1L<<3) +#define BNX2_HC_SB_CONFIG_7_CMD_TMR_MODE (1L<<4) +#define BNX2_HC_SB_CONFIG_7_PER_MODE (1L<<16) +#define BNX2_HC_SB_CONFIG_7_ONE_SHOT (1L<<17) +#define BNX2_HC_SB_CONFIG_7_USE_INT_PARAM (1L<<18) +#define BNX2_HC_SB_CONFIG_7_PER_COLLECT_LIMIT (0xfL<<20) + +#define BNX2_HC_TX_QUICK_CONS_TRIP_7 0x00006adc +#define BNX2_HC_TX_QUICK_CONS_TRIP_7_VALUE (0xffL<<0) +#define BNX2_HC_TX_QUICK_CONS_TRIP_7_INT (0xffL<<16) + +#define BNX2_HC_COMP_PROD_TRIP_7 0x00006ae0 +#define BNX2_HC_COMP_PROD_TRIP_7_VALUE (0xffL<<0) +#define BNX2_HC_COMP_PROD_TRIP_7_INT (0xffL<<16) + +#define BNX2_HC_RX_QUICK_CONS_TRIP_7 0x00006ae4 +#define BNX2_HC_RX_QUICK_CONS_TRIP_7_VALUE (0xffL<<0) +#define BNX2_HC_RX_QUICK_CONS_TRIP_7_INT (0xffL<<16) + +#define BNX2_HC_RX_TICKS_7 0x00006ae8 +#define BNX2_HC_RX_TICKS_7_VALUE (0x3ffL<<0) +#define BNX2_HC_RX_TICKS_7_INT (0x3ffL<<16) + +#define BNX2_HC_TX_TICKS_7 0x00006aec +#define BNX2_HC_TX_TICKS_7_VALUE (0x3ffL<<0) +#define BNX2_HC_TX_TICKS_7_INT (0x3ffL<<16) + +#define BNX2_HC_COM_TICKS_7 0x00006af0 +#define BNX2_HC_COM_TICKS_7_VALUE (0x3ffL<<0) +#define BNX2_HC_COM_TICKS_7_INT (0x3ffL<<16) + +#define BNX2_HC_CMD_TICKS_7 0x00006af4 +#define BNX2_HC_CMD_TICKS_7_VALUE (0x3ffL<<0) +#define BNX2_HC_CMD_TICKS_7_INT (0x3ffL<<16) + +#define BNX2_HC_PERIODIC_TICKS_7 0x00006af8 +#define BNX2_HC_PERIODIC_TICKS_7_HC_PERIODIC_TICKS (0xffffL<<0) +#define BNX2_HC_PERIODIC_TICKS_7_HC_INT_PERIODIC_TICKS (0xffffL<<16) + +#define BNX2_HC_SB_CONFIG_8 0x00006afc +#define BNX2_HC_SB_CONFIG_8_RX_TMR_MODE (1L<<1) +#define BNX2_HC_SB_CONFIG_8_TX_TMR_MODE (1L<<2) +#define BNX2_HC_SB_CONFIG_8_COM_TMR_MODE (1L<<3) +#define BNX2_HC_SB_CONFIG_8_CMD_TMR_MODE (1L<<4) +#define BNX2_HC_SB_CONFIG_8_PER_MODE (1L<<16) +#define BNX2_HC_SB_CONFIG_8_ONE_SHOT (1L<<17) +#define BNX2_HC_SB_CONFIG_8_USE_INT_PARAM (1L<<18) +#define BNX2_HC_SB_CONFIG_8_PER_COLLECT_LIMIT (0xfL<<20) + +#define BNX2_HC_TX_QUICK_CONS_TRIP_8 0x00006b00 +#define BNX2_HC_TX_QUICK_CONS_TRIP_8_VALUE (0xffL<<0) +#define BNX2_HC_TX_QUICK_CONS_TRIP_8_INT (0xffL<<16) + +#define BNX2_HC_COMP_PROD_TRIP_8 0x00006b04 +#define BNX2_HC_COMP_PROD_TRIP_8_VALUE (0xffL<<0) +#define BNX2_HC_COMP_PROD_TRIP_8_INT (0xffL<<16) + +#define BNX2_HC_RX_QUICK_CONS_TRIP_8 0x00006b08 +#define BNX2_HC_RX_QUICK_CONS_TRIP_8_VALUE (0xffL<<0) +#define BNX2_HC_RX_QUICK_CONS_TRIP_8_INT (0xffL<<16) + +#define BNX2_HC_RX_TICKS_8 0x00006b0c +#define BNX2_HC_RX_TICKS_8_VALUE (0x3ffL<<0) +#define BNX2_HC_RX_TICKS_8_INT (0x3ffL<<16) + +#define BNX2_HC_TX_TICKS_8 0x00006b10 +#define BNX2_HC_TX_TICKS_8_VALUE (0x3ffL<<0) +#define BNX2_HC_TX_TICKS_8_INT (0x3ffL<<16) + +#define BNX2_HC_COM_TICKS_8 0x00006b14 +#define BNX2_HC_COM_TICKS_8_VALUE (0x3ffL<<0) +#define BNX2_HC_COM_TICKS_8_INT (0x3ffL<<16) + +#define BNX2_HC_CMD_TICKS_8 0x00006b18 +#define BNX2_HC_CMD_TICKS_8_VALUE (0x3ffL<<0) +#define BNX2_HC_CMD_TICKS_8_INT (0x3ffL<<16) + +#define BNX2_HC_PERIODIC_TICKS_8 0x00006b1c +#define BNX2_HC_PERIODIC_TICKS_8_HC_PERIODIC_TICKS (0xffffL<<0) +#define BNX2_HC_PERIODIC_TICKS_8_HC_INT_PERIODIC_TICKS (0xffffL<<16) + +#define BNX2_HC_SB_CONFIG_SIZE (BNX2_HC_SB_CONFIG_2 - BNX2_HC_SB_CONFIG_1) +#define BNX2_HC_COMP_PROD_TRIP_OFF (BNX2_HC_COMP_PROD_TRIP_1 - \ + BNX2_HC_SB_CONFIG_1) +#define BNX2_HC_COM_TICKS_OFF (BNX2_HC_COM_TICKS_1 - BNX2_HC_SB_CONFIG_1) +#define BNX2_HC_CMD_TICKS_OFF (BNX2_HC_CMD_TICKS_1 - BNX2_HC_SB_CONFIG_1) +#define BNX2_HC_TX_QUICK_CONS_TRIP_OFF (BNX2_HC_TX_QUICK_CONS_TRIP_1 - \ + BNX2_HC_SB_CONFIG_1) +#define BNX2_HC_TX_TICKS_OFF (BNX2_HC_TX_TICKS_1 - BNX2_HC_SB_CONFIG_1) +#define BNX2_HC_RX_QUICK_CONS_TRIP_OFF (BNX2_HC_RX_QUICK_CONS_TRIP_1 - \ + BNX2_HC_SB_CONFIG_1) +#define BNX2_HC_RX_TICKS_OFF (BNX2_HC_RX_TICKS_1 - BNX2_HC_SB_CONFIG_1) + + +/* + * txp_reg definition + * offset: 0x40000 + */ +#define BNX2_TXP_CPU_MODE 0x00045000 +#define BNX2_TXP_CPU_MODE_LOCAL_RST (1L<<0) +#define BNX2_TXP_CPU_MODE_STEP_ENA (1L<<1) +#define BNX2_TXP_CPU_MODE_PAGE_0_DATA_ENA (1L<<2) +#define BNX2_TXP_CPU_MODE_PAGE_0_INST_ENA (1L<<3) +#define BNX2_TXP_CPU_MODE_MSG_BIT1 (1L<<6) +#define BNX2_TXP_CPU_MODE_INTERRUPT_ENA (1L<<7) +#define BNX2_TXP_CPU_MODE_SOFT_HALT (1L<<10) +#define BNX2_TXP_CPU_MODE_BAD_DATA_HALT_ENA (1L<<11) +#define BNX2_TXP_CPU_MODE_BAD_INST_HALT_ENA (1L<<12) +#define BNX2_TXP_CPU_MODE_FIO_ABORT_HALT_ENA (1L<<13) +#define BNX2_TXP_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA (1L<<15) + +#define BNX2_TXP_CPU_STATE 0x00045004 +#define BNX2_TXP_CPU_STATE_BREAKPOINT (1L<<0) +#define BNX2_TXP_CPU_STATE_BAD_INST_HALTED (1L<<2) +#define BNX2_TXP_CPU_STATE_PAGE_0_DATA_HALTED (1L<<3) +#define BNX2_TXP_CPU_STATE_PAGE_0_INST_HALTED (1L<<4) +#define BNX2_TXP_CPU_STATE_BAD_DATA_ADDR_HALTED (1L<<5) +#define BNX2_TXP_CPU_STATE_BAD_PC_HALTED (1L<<6) +#define BNX2_TXP_CPU_STATE_ALIGN_HALTED (1L<<7) +#define BNX2_TXP_CPU_STATE_FIO_ABORT_HALTED (1L<<8) +#define BNX2_TXP_CPU_STATE_SOFT_HALTED (1L<<10) +#define BNX2_TXP_CPU_STATE_SPAD_UNDERFLOW (1L<<11) +#define BNX2_TXP_CPU_STATE_INTERRRUPT (1L<<12) +#define BNX2_TXP_CPU_STATE_DATA_ACCESS_STALL (1L<<14) +#define BNX2_TXP_CPU_STATE_INST_FETCH_STALL (1L<<15) +#define BNX2_TXP_CPU_STATE_BLOCKED_READ (1L<<31) + +#define BNX2_TXP_CPU_EVENT_MASK 0x00045008 +#define BNX2_TXP_CPU_EVENT_MASK_BREAKPOINT_MASK (1L<<0) +#define BNX2_TXP_CPU_EVENT_MASK_BAD_INST_HALTED_MASK (1L<<2) +#define BNX2_TXP_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK (1L<<3) +#define BNX2_TXP_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK (1L<<4) +#define BNX2_TXP_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK (1L<<5) +#define BNX2_TXP_CPU_EVENT_MASK_BAD_PC_HALTED_MASK (1L<<6) +#define BNX2_TXP_CPU_EVENT_MASK_ALIGN_HALTED_MASK (1L<<7) +#define BNX2_TXP_CPU_EVENT_MASK_FIO_ABORT_MASK (1L<<8) +#define BNX2_TXP_CPU_EVENT_MASK_SOFT_HALTED_MASK (1L<<10) +#define BNX2_TXP_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK (1L<<11) +#define BNX2_TXP_CPU_EVENT_MASK_INTERRUPT_MASK (1L<<12) + +#define BNX2_TXP_CPU_PROGRAM_COUNTER 0x0004501c +#define BNX2_TXP_CPU_INSTRUCTION 0x00045020 +#define BNX2_TXP_CPU_DATA_ACCESS 0x00045024 +#define BNX2_TXP_CPU_INTERRUPT_ENABLE 0x00045028 +#define BNX2_TXP_CPU_INTERRUPT_VECTOR 0x0004502c +#define BNX2_TXP_CPU_INTERRUPT_SAVED_PC 0x00045030 +#define BNX2_TXP_CPU_HW_BREAKPOINT 0x00045034 +#define BNX2_TXP_CPU_HW_BREAKPOINT_DISABLE (1L<<0) +#define BNX2_TXP_CPU_HW_BREAKPOINT_ADDRESS (0x3fffffffL<<2) + +#define BNX2_TXP_CPU_DEBUG_VECT_PEEK 0x00045038 +#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0) +#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11) +#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_1_SEL (0xfL<<12) +#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16) +#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27) +#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_2_SEL (0xfL<<28) + +#define BNX2_TXP_CPU_LAST_BRANCH_ADDR 0x00045048 +#define BNX2_TXP_CPU_LAST_BRANCH_ADDR_TYPE (1L<<1) +#define BNX2_TXP_CPU_LAST_BRANCH_ADDR_TYPE_JUMP (0L<<1) +#define BNX2_TXP_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH (1L<<1) +#define BNX2_TXP_CPU_LAST_BRANCH_ADDR_LBA (0x3fffffffL<<2) + +#define BNX2_TXP_CPU_REG_FILE 0x00045200 +#define BNX2_TXP_TXPQ 0x000453c0 +#define BNX2_TXP_FTQ_CMD 0x000453f8 +#define BNX2_TXP_FTQ_CMD_OFFSET (0x3ffL<<0) +#define BNX2_TXP_FTQ_CMD_WR_TOP (1L<<10) +#define BNX2_TXP_FTQ_CMD_WR_TOP_0 (0L<<10) +#define BNX2_TXP_FTQ_CMD_WR_TOP_1 (1L<<10) +#define BNX2_TXP_FTQ_CMD_SFT_RESET (1L<<25) +#define BNX2_TXP_FTQ_CMD_RD_DATA (1L<<26) +#define BNX2_TXP_FTQ_CMD_ADD_INTERVEN (1L<<27) +#define BNX2_TXP_FTQ_CMD_ADD_DATA (1L<<28) +#define BNX2_TXP_FTQ_CMD_INTERVENE_CLR (1L<<29) +#define BNX2_TXP_FTQ_CMD_POP (1L<<30) +#define BNX2_TXP_FTQ_CMD_BUSY (1L<<31) + +#define BNX2_TXP_FTQ_CTL 0x000453fc +#define BNX2_TXP_FTQ_CTL_INTERVENE (1L<<0) +#define BNX2_TXP_FTQ_CTL_OVERFLOW (1L<<1) +#define BNX2_TXP_FTQ_CTL_FORCE_INTERVENE (1L<<2) +#define BNX2_TXP_FTQ_CTL_MAX_DEPTH (0x3ffL<<12) +#define BNX2_TXP_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) + +#define BNX2_TXP_SCRATCH 0x00060000 + + +/* + * tpat_reg definition + * offset: 0x80000 + */ +#define BNX2_TPAT_CPU_MODE 0x00085000 +#define BNX2_TPAT_CPU_MODE_LOCAL_RST (1L<<0) +#define BNX2_TPAT_CPU_MODE_STEP_ENA (1L<<1) +#define BNX2_TPAT_CPU_MODE_PAGE_0_DATA_ENA (1L<<2) +#define BNX2_TPAT_CPU_MODE_PAGE_0_INST_ENA (1L<<3) +#define BNX2_TPAT_CPU_MODE_MSG_BIT1 (1L<<6) +#define BNX2_TPAT_CPU_MODE_INTERRUPT_ENA (1L<<7) +#define BNX2_TPAT_CPU_MODE_SOFT_HALT (1L<<10) +#define BNX2_TPAT_CPU_MODE_BAD_DATA_HALT_ENA (1L<<11) +#define BNX2_TPAT_CPU_MODE_BAD_INST_HALT_ENA (1L<<12) +#define BNX2_TPAT_CPU_MODE_FIO_ABORT_HALT_ENA (1L<<13) +#define BNX2_TPAT_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA (1L<<15) + +#define BNX2_TPAT_CPU_STATE 0x00085004 +#define BNX2_TPAT_CPU_STATE_BREAKPOINT (1L<<0) +#define BNX2_TPAT_CPU_STATE_BAD_INST_HALTED (1L<<2) +#define BNX2_TPAT_CPU_STATE_PAGE_0_DATA_HALTED (1L<<3) +#define BNX2_TPAT_CPU_STATE_PAGE_0_INST_HALTED (1L<<4) +#define BNX2_TPAT_CPU_STATE_BAD_DATA_ADDR_HALTED (1L<<5) +#define BNX2_TPAT_CPU_STATE_BAD_PC_HALTED (1L<<6) +#define BNX2_TPAT_CPU_STATE_ALIGN_HALTED (1L<<7) +#define BNX2_TPAT_CPU_STATE_FIO_ABORT_HALTED (1L<<8) +#define BNX2_TPAT_CPU_STATE_SOFT_HALTED (1L<<10) +#define BNX2_TPAT_CPU_STATE_SPAD_UNDERFLOW (1L<<11) +#define BNX2_TPAT_CPU_STATE_INTERRRUPT (1L<<12) +#define BNX2_TPAT_CPU_STATE_DATA_ACCESS_STALL (1L<<14) +#define BNX2_TPAT_CPU_STATE_INST_FETCH_STALL (1L<<15) +#define BNX2_TPAT_CPU_STATE_BLOCKED_READ (1L<<31) + +#define BNX2_TPAT_CPU_EVENT_MASK 0x00085008 +#define BNX2_TPAT_CPU_EVENT_MASK_BREAKPOINT_MASK (1L<<0) +#define BNX2_TPAT_CPU_EVENT_MASK_BAD_INST_HALTED_MASK (1L<<2) +#define BNX2_TPAT_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK (1L<<3) +#define BNX2_TPAT_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK (1L<<4) +#define BNX2_TPAT_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK (1L<<5) +#define BNX2_TPAT_CPU_EVENT_MASK_BAD_PC_HALTED_MASK (1L<<6) +#define BNX2_TPAT_CPU_EVENT_MASK_ALIGN_HALTED_MASK (1L<<7) +#define BNX2_TPAT_CPU_EVENT_MASK_FIO_ABORT_MASK (1L<<8) +#define BNX2_TPAT_CPU_EVENT_MASK_SOFT_HALTED_MASK (1L<<10) +#define BNX2_TPAT_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK (1L<<11) +#define BNX2_TPAT_CPU_EVENT_MASK_INTERRUPT_MASK (1L<<12) + +#define BNX2_TPAT_CPU_PROGRAM_COUNTER 0x0008501c +#define BNX2_TPAT_CPU_INSTRUCTION 0x00085020 +#define BNX2_TPAT_CPU_DATA_ACCESS 0x00085024 +#define BNX2_TPAT_CPU_INTERRUPT_ENABLE 0x00085028 +#define BNX2_TPAT_CPU_INTERRUPT_VECTOR 0x0008502c +#define BNX2_TPAT_CPU_INTERRUPT_SAVED_PC 0x00085030 +#define BNX2_TPAT_CPU_HW_BREAKPOINT 0x00085034 +#define BNX2_TPAT_CPU_HW_BREAKPOINT_DISABLE (1L<<0) +#define BNX2_TPAT_CPU_HW_BREAKPOINT_ADDRESS (0x3fffffffL<<2) + +#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK 0x00085038 +#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0) +#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11) +#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_1_SEL (0xfL<<12) +#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16) +#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27) +#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_2_SEL (0xfL<<28) + +#define BNX2_TPAT_CPU_LAST_BRANCH_ADDR 0x00085048 +#define BNX2_TPAT_CPU_LAST_BRANCH_ADDR_TYPE (1L<<1) +#define BNX2_TPAT_CPU_LAST_BRANCH_ADDR_TYPE_JUMP (0L<<1) +#define BNX2_TPAT_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH (1L<<1) +#define BNX2_TPAT_CPU_LAST_BRANCH_ADDR_LBA (0x3fffffffL<<2) + +#define BNX2_TPAT_CPU_REG_FILE 0x00085200 +#define BNX2_TPAT_TPATQ 0x000853c0 +#define BNX2_TPAT_FTQ_CMD 0x000853f8 +#define BNX2_TPAT_FTQ_CMD_OFFSET (0x3ffL<<0) +#define BNX2_TPAT_FTQ_CMD_WR_TOP (1L<<10) +#define BNX2_TPAT_FTQ_CMD_WR_TOP_0 (0L<<10) +#define BNX2_TPAT_FTQ_CMD_WR_TOP_1 (1L<<10) +#define BNX2_TPAT_FTQ_CMD_SFT_RESET (1L<<25) +#define BNX2_TPAT_FTQ_CMD_RD_DATA (1L<<26) +#define BNX2_TPAT_FTQ_CMD_ADD_INTERVEN (1L<<27) +#define BNX2_TPAT_FTQ_CMD_ADD_DATA (1L<<28) +#define BNX2_TPAT_FTQ_CMD_INTERVENE_CLR (1L<<29) +#define BNX2_TPAT_FTQ_CMD_POP (1L<<30) +#define BNX2_TPAT_FTQ_CMD_BUSY (1L<<31) + +#define BNX2_TPAT_FTQ_CTL 0x000853fc +#define BNX2_TPAT_FTQ_CTL_INTERVENE (1L<<0) +#define BNX2_TPAT_FTQ_CTL_OVERFLOW (1L<<1) +#define BNX2_TPAT_FTQ_CTL_FORCE_INTERVENE (1L<<2) +#define BNX2_TPAT_FTQ_CTL_MAX_DEPTH (0x3ffL<<12) +#define BNX2_TPAT_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) + +#define BNX2_TPAT_SCRATCH 0x000a0000 + + +/* + * rxp_reg definition + * offset: 0xc0000 + */ +#define BNX2_RXP_CPU_MODE 0x000c5000 +#define BNX2_RXP_CPU_MODE_LOCAL_RST (1L<<0) +#define BNX2_RXP_CPU_MODE_STEP_ENA (1L<<1) +#define BNX2_RXP_CPU_MODE_PAGE_0_DATA_ENA (1L<<2) +#define BNX2_RXP_CPU_MODE_PAGE_0_INST_ENA (1L<<3) +#define BNX2_RXP_CPU_MODE_MSG_BIT1 (1L<<6) +#define BNX2_RXP_CPU_MODE_INTERRUPT_ENA (1L<<7) +#define BNX2_RXP_CPU_MODE_SOFT_HALT (1L<<10) +#define BNX2_RXP_CPU_MODE_BAD_DATA_HALT_ENA (1L<<11) +#define BNX2_RXP_CPU_MODE_BAD_INST_HALT_ENA (1L<<12) +#define BNX2_RXP_CPU_MODE_FIO_ABORT_HALT_ENA (1L<<13) +#define BNX2_RXP_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA (1L<<15) + +#define BNX2_RXP_CPU_STATE 0x000c5004 +#define BNX2_RXP_CPU_STATE_BREAKPOINT (1L<<0) +#define BNX2_RXP_CPU_STATE_BAD_INST_HALTED (1L<<2) +#define BNX2_RXP_CPU_STATE_PAGE_0_DATA_HALTED (1L<<3) +#define BNX2_RXP_CPU_STATE_PAGE_0_INST_HALTED (1L<<4) +#define BNX2_RXP_CPU_STATE_BAD_DATA_ADDR_HALTED (1L<<5) +#define BNX2_RXP_CPU_STATE_BAD_PC_HALTED (1L<<6) +#define BNX2_RXP_CPU_STATE_ALIGN_HALTED (1L<<7) +#define BNX2_RXP_CPU_STATE_FIO_ABORT_HALTED (1L<<8) +#define BNX2_RXP_CPU_STATE_SOFT_HALTED (1L<<10) +#define BNX2_RXP_CPU_STATE_SPAD_UNDERFLOW (1L<<11) +#define BNX2_RXP_CPU_STATE_INTERRRUPT (1L<<12) +#define BNX2_RXP_CPU_STATE_DATA_ACCESS_STALL (1L<<14) +#define BNX2_RXP_CPU_STATE_INST_FETCH_STALL (1L<<15) +#define BNX2_RXP_CPU_STATE_BLOCKED_READ (1L<<31) + +#define BNX2_RXP_CPU_EVENT_MASK 0x000c5008 +#define BNX2_RXP_CPU_EVENT_MASK_BREAKPOINT_MASK (1L<<0) +#define BNX2_RXP_CPU_EVENT_MASK_BAD_INST_HALTED_MASK (1L<<2) +#define BNX2_RXP_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK (1L<<3) +#define BNX2_RXP_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK (1L<<4) +#define BNX2_RXP_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK (1L<<5) +#define BNX2_RXP_CPU_EVENT_MASK_BAD_PC_HALTED_MASK (1L<<6) +#define BNX2_RXP_CPU_EVENT_MASK_ALIGN_HALTED_MASK (1L<<7) +#define BNX2_RXP_CPU_EVENT_MASK_FIO_ABORT_MASK (1L<<8) +#define BNX2_RXP_CPU_EVENT_MASK_SOFT_HALTED_MASK (1L<<10) +#define BNX2_RXP_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK (1L<<11) +#define BNX2_RXP_CPU_EVENT_MASK_INTERRUPT_MASK (1L<<12) + +#define BNX2_RXP_CPU_PROGRAM_COUNTER 0x000c501c +#define BNX2_RXP_CPU_INSTRUCTION 0x000c5020 +#define BNX2_RXP_CPU_DATA_ACCESS 0x000c5024 +#define BNX2_RXP_CPU_INTERRUPT_ENABLE 0x000c5028 +#define BNX2_RXP_CPU_INTERRUPT_VECTOR 0x000c502c +#define BNX2_RXP_CPU_INTERRUPT_SAVED_PC 0x000c5030 +#define BNX2_RXP_CPU_HW_BREAKPOINT 0x000c5034 +#define BNX2_RXP_CPU_HW_BREAKPOINT_DISABLE (1L<<0) +#define BNX2_RXP_CPU_HW_BREAKPOINT_ADDRESS (0x3fffffffL<<2) + +#define BNX2_RXP_CPU_DEBUG_VECT_PEEK 0x000c5038 +#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0) +#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11) +#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_1_SEL (0xfL<<12) +#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16) +#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27) +#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_2_SEL (0xfL<<28) + +#define BNX2_RXP_CPU_LAST_BRANCH_ADDR 0x000c5048 +#define BNX2_RXP_CPU_LAST_BRANCH_ADDR_TYPE (1L<<1) +#define BNX2_RXP_CPU_LAST_BRANCH_ADDR_TYPE_JUMP (0L<<1) +#define BNX2_RXP_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH (1L<<1) +#define BNX2_RXP_CPU_LAST_BRANCH_ADDR_LBA (0x3fffffffL<<2) + +#define BNX2_RXP_CPU_REG_FILE 0x000c5200 +#define BNX2_RXP_PFE_PFE_CTL 0x000c537c +#define BNX2_RXP_PFE_PFE_CTL_INC_USAGE_CNT (1L<<0) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE (0xfL<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_0 (0L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_1 (1L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_2 (2L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_3 (3L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_4 (4L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_5 (5L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_6 (6L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_7 (7L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_8 (8L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_9 (9L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_10 (10L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_11 (11L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_12 (12L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_13 (13L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_14 (14L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_15 (15L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_COUNT (0xfL<<12) +#define BNX2_RXP_PFE_PFE_CTL_OFFSET (0x1ffL<<16) + +#define BNX2_RXP_RXPCQ 0x000c5380 +#define BNX2_RXP_CFTQ_CMD 0x000c53b8 +#define BNX2_RXP_CFTQ_CMD_OFFSET (0x3ffL<<0) +#define BNX2_RXP_CFTQ_CMD_WR_TOP (1L<<10) +#define BNX2_RXP_CFTQ_CMD_WR_TOP_0 (0L<<10) +#define BNX2_RXP_CFTQ_CMD_WR_TOP_1 (1L<<10) +#define BNX2_RXP_CFTQ_CMD_SFT_RESET (1L<<25) +#define BNX2_RXP_CFTQ_CMD_RD_DATA (1L<<26) +#define BNX2_RXP_CFTQ_CMD_ADD_INTERVEN (1L<<27) +#define BNX2_RXP_CFTQ_CMD_ADD_DATA (1L<<28) +#define BNX2_RXP_CFTQ_CMD_INTERVENE_CLR (1L<<29) +#define BNX2_RXP_CFTQ_CMD_POP (1L<<30) +#define BNX2_RXP_CFTQ_CMD_BUSY (1L<<31) + +#define BNX2_RXP_CFTQ_CTL 0x000c53bc +#define BNX2_RXP_CFTQ_CTL_INTERVENE (1L<<0) +#define BNX2_RXP_CFTQ_CTL_OVERFLOW (1L<<1) +#define BNX2_RXP_CFTQ_CTL_FORCE_INTERVENE (1L<<2) +#define BNX2_RXP_CFTQ_CTL_MAX_DEPTH (0x3ffL<<12) +#define BNX2_RXP_CFTQ_CTL_CUR_DEPTH (0x3ffL<<22) + +#define BNX2_RXP_RXPQ 0x000c53c0 +#define BNX2_RXP_FTQ_CMD 0x000c53f8 +#define BNX2_RXP_FTQ_CMD_OFFSET (0x3ffL<<0) +#define BNX2_RXP_FTQ_CMD_WR_TOP (1L<<10) +#define BNX2_RXP_FTQ_CMD_WR_TOP_0 (0L<<10) +#define BNX2_RXP_FTQ_CMD_WR_TOP_1 (1L<<10) +#define BNX2_RXP_FTQ_CMD_SFT_RESET (1L<<25) +#define BNX2_RXP_FTQ_CMD_RD_DATA (1L<<26) +#define BNX2_RXP_FTQ_CMD_ADD_INTERVEN (1L<<27) +#define BNX2_RXP_FTQ_CMD_ADD_DATA (1L<<28) +#define BNX2_RXP_FTQ_CMD_INTERVENE_CLR (1L<<29) +#define BNX2_RXP_FTQ_CMD_POP (1L<<30) +#define BNX2_RXP_FTQ_CMD_BUSY (1L<<31) + +#define BNX2_RXP_FTQ_CTL 0x000c53fc +#define BNX2_RXP_FTQ_CTL_INTERVENE (1L<<0) +#define BNX2_RXP_FTQ_CTL_OVERFLOW (1L<<1) +#define BNX2_RXP_FTQ_CTL_FORCE_INTERVENE (1L<<2) +#define BNX2_RXP_FTQ_CTL_MAX_DEPTH (0x3ffL<<12) +#define BNX2_RXP_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) + +#define BNX2_RXP_SCRATCH 0x000e0000 +#define BNX2_RXP_SCRATCH_RXP_FLOOD 0x000e0024 +#define BNX2_RXP_SCRATCH_RSS_TBL_SZ 0x000e0038 +#define BNX2_RXP_SCRATCH_RSS_TBL 0x000e003c +#define BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES 128 + + +/* + * com_reg definition + * offset: 0x100000 + */ +#define BNX2_COM_CKSUM_ERROR_STATUS 0x00100000 +#define BNX2_COM_CKSUM_ERROR_STATUS_CALCULATED (0xffffL<<0) +#define BNX2_COM_CKSUM_ERROR_STATUS_EXPECTED (0xffffL<<16) + +#define BNX2_COM_CPU_MODE 0x00105000 +#define BNX2_COM_CPU_MODE_LOCAL_RST (1L<<0) +#define BNX2_COM_CPU_MODE_STEP_ENA (1L<<1) +#define BNX2_COM_CPU_MODE_PAGE_0_DATA_ENA (1L<<2) +#define BNX2_COM_CPU_MODE_PAGE_0_INST_ENA (1L<<3) +#define BNX2_COM_CPU_MODE_MSG_BIT1 (1L<<6) +#define BNX2_COM_CPU_MODE_INTERRUPT_ENA (1L<<7) +#define BNX2_COM_CPU_MODE_SOFT_HALT (1L<<10) +#define BNX2_COM_CPU_MODE_BAD_DATA_HALT_ENA (1L<<11) +#define BNX2_COM_CPU_MODE_BAD_INST_HALT_ENA (1L<<12) +#define BNX2_COM_CPU_MODE_FIO_ABORT_HALT_ENA (1L<<13) +#define BNX2_COM_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA (1L<<15) + +#define BNX2_COM_CPU_STATE 0x00105004 +#define BNX2_COM_CPU_STATE_BREAKPOINT (1L<<0) +#define BNX2_COM_CPU_STATE_BAD_INST_HALTED (1L<<2) +#define BNX2_COM_CPU_STATE_PAGE_0_DATA_HALTED (1L<<3) +#define BNX2_COM_CPU_STATE_PAGE_0_INST_HALTED (1L<<4) +#define BNX2_COM_CPU_STATE_BAD_DATA_ADDR_HALTED (1L<<5) +#define BNX2_COM_CPU_STATE_BAD_PC_HALTED (1L<<6) +#define BNX2_COM_CPU_STATE_ALIGN_HALTED (1L<<7) +#define BNX2_COM_CPU_STATE_FIO_ABORT_HALTED (1L<<8) +#define BNX2_COM_CPU_STATE_SOFT_HALTED (1L<<10) +#define BNX2_COM_CPU_STATE_SPAD_UNDERFLOW (1L<<11) +#define BNX2_COM_CPU_STATE_INTERRRUPT (1L<<12) +#define BNX2_COM_CPU_STATE_DATA_ACCESS_STALL (1L<<14) +#define BNX2_COM_CPU_STATE_INST_FETCH_STALL (1L<<15) +#define BNX2_COM_CPU_STATE_BLOCKED_READ (1L<<31) + +#define BNX2_COM_CPU_EVENT_MASK 0x00105008 +#define BNX2_COM_CPU_EVENT_MASK_BREAKPOINT_MASK (1L<<0) +#define BNX2_COM_CPU_EVENT_MASK_BAD_INST_HALTED_MASK (1L<<2) +#define BNX2_COM_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK (1L<<3) +#define BNX2_COM_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK (1L<<4) +#define BNX2_COM_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK (1L<<5) +#define BNX2_COM_CPU_EVENT_MASK_BAD_PC_HALTED_MASK (1L<<6) +#define BNX2_COM_CPU_EVENT_MASK_ALIGN_HALTED_MASK (1L<<7) +#define BNX2_COM_CPU_EVENT_MASK_FIO_ABORT_MASK (1L<<8) +#define BNX2_COM_CPU_EVENT_MASK_SOFT_HALTED_MASK (1L<<10) +#define BNX2_COM_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK (1L<<11) +#define BNX2_COM_CPU_EVENT_MASK_INTERRUPT_MASK (1L<<12) + +#define BNX2_COM_CPU_PROGRAM_COUNTER 0x0010501c +#define BNX2_COM_CPU_INSTRUCTION 0x00105020 +#define BNX2_COM_CPU_DATA_ACCESS 0x00105024 +#define BNX2_COM_CPU_INTERRUPT_ENABLE 0x00105028 +#define BNX2_COM_CPU_INTERRUPT_VECTOR 0x0010502c +#define BNX2_COM_CPU_INTERRUPT_SAVED_PC 0x00105030 +#define BNX2_COM_CPU_HW_BREAKPOINT 0x00105034 +#define BNX2_COM_CPU_HW_BREAKPOINT_DISABLE (1L<<0) +#define BNX2_COM_CPU_HW_BREAKPOINT_ADDRESS (0x3fffffffL<<2) + +#define BNX2_COM_CPU_DEBUG_VECT_PEEK 0x00105038 +#define BNX2_COM_CPU_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0) +#define BNX2_COM_CPU_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11) +#define BNX2_COM_CPU_DEBUG_VECT_PEEK_1_SEL (0xfL<<12) +#define BNX2_COM_CPU_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16) +#define BNX2_COM_CPU_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27) +#define BNX2_COM_CPU_DEBUG_VECT_PEEK_2_SEL (0xfL<<28) + +#define BNX2_COM_CPU_LAST_BRANCH_ADDR 0x00105048 +#define BNX2_COM_CPU_LAST_BRANCH_ADDR_TYPE (1L<<1) +#define BNX2_COM_CPU_LAST_BRANCH_ADDR_TYPE_JUMP (0L<<1) +#define BNX2_COM_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH (1L<<1) +#define BNX2_COM_CPU_LAST_BRANCH_ADDR_LBA (0x3fffffffL<<2) + +#define BNX2_COM_CPU_REG_FILE 0x00105200 +#define BNX2_COM_COMTQ_PFE_PFE_CTL 0x001052bc +#define BNX2_COM_COMTQ_PFE_PFE_CTL_INC_USAGE_CNT (1L<<0) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE (0xfL<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_0 (0L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_1 (1L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_2 (2L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_3 (3L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_4 (4L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_5 (5L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_6 (6L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_7 (7L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_8 (8L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_9 (9L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_10 (10L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_11 (11L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_12 (12L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_13 (13L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_14 (14L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_15 (15L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_COUNT (0xfL<<12) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_OFFSET (0x1ffL<<16) + +#define BNX2_COM_COMXQ 0x00105340 +#define BNX2_COM_COMXQ_FTQ_CMD 0x00105378 +#define BNX2_COM_COMXQ_FTQ_CMD_OFFSET (0x3ffL<<0) +#define BNX2_COM_COMXQ_FTQ_CMD_WR_TOP (1L<<10) +#define BNX2_COM_COMXQ_FTQ_CMD_WR_TOP_0 (0L<<10) +#define BNX2_COM_COMXQ_FTQ_CMD_WR_TOP_1 (1L<<10) +#define BNX2_COM_COMXQ_FTQ_CMD_SFT_RESET (1L<<25) +#define BNX2_COM_COMXQ_FTQ_CMD_RD_DATA (1L<<26) +#define BNX2_COM_COMXQ_FTQ_CMD_ADD_INTERVEN (1L<<27) +#define BNX2_COM_COMXQ_FTQ_CMD_ADD_DATA (1L<<28) +#define BNX2_COM_COMXQ_FTQ_CMD_INTERVENE_CLR (1L<<29) +#define BNX2_COM_COMXQ_FTQ_CMD_POP (1L<<30) +#define BNX2_COM_COMXQ_FTQ_CMD_BUSY (1L<<31) + +#define BNX2_COM_COMXQ_FTQ_CTL 0x0010537c +#define BNX2_COM_COMXQ_FTQ_CTL_INTERVENE (1L<<0) +#define BNX2_COM_COMXQ_FTQ_CTL_OVERFLOW (1L<<1) +#define BNX2_COM_COMXQ_FTQ_CTL_FORCE_INTERVENE (1L<<2) +#define BNX2_COM_COMXQ_FTQ_CTL_MAX_DEPTH (0x3ffL<<12) +#define BNX2_COM_COMXQ_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) + +#define BNX2_COM_COMTQ 0x00105380 +#define BNX2_COM_COMTQ_FTQ_CMD 0x001053b8 +#define BNX2_COM_COMTQ_FTQ_CMD_OFFSET (0x3ffL<<0) +#define BNX2_COM_COMTQ_FTQ_CMD_WR_TOP (1L<<10) +#define BNX2_COM_COMTQ_FTQ_CMD_WR_TOP_0 (0L<<10) +#define BNX2_COM_COMTQ_FTQ_CMD_WR_TOP_1 (1L<<10) +#define BNX2_COM_COMTQ_FTQ_CMD_SFT_RESET (1L<<25) +#define BNX2_COM_COMTQ_FTQ_CMD_RD_DATA (1L<<26) +#define BNX2_COM_COMTQ_FTQ_CMD_ADD_INTERVEN (1L<<27) +#define BNX2_COM_COMTQ_FTQ_CMD_ADD_DATA (1L<<28) +#define BNX2_COM_COMTQ_FTQ_CMD_INTERVENE_CLR (1L<<29) +#define BNX2_COM_COMTQ_FTQ_CMD_POP (1L<<30) +#define BNX2_COM_COMTQ_FTQ_CMD_BUSY (1L<<31) + +#define BNX2_COM_COMTQ_FTQ_CTL 0x001053bc +#define BNX2_COM_COMTQ_FTQ_CTL_INTERVENE (1L<<0) +#define BNX2_COM_COMTQ_FTQ_CTL_OVERFLOW (1L<<1) +#define BNX2_COM_COMTQ_FTQ_CTL_FORCE_INTERVENE (1L<<2) +#define BNX2_COM_COMTQ_FTQ_CTL_MAX_DEPTH (0x3ffL<<12) +#define BNX2_COM_COMTQ_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) + +#define BNX2_COM_COMQ 0x001053c0 +#define BNX2_COM_COMQ_FTQ_CMD 0x001053f8 +#define BNX2_COM_COMQ_FTQ_CMD_OFFSET (0x3ffL<<0) +#define BNX2_COM_COMQ_FTQ_CMD_WR_TOP (1L<<10) +#define BNX2_COM_COMQ_FTQ_CMD_WR_TOP_0 (0L<<10) +#define BNX2_COM_COMQ_FTQ_CMD_WR_TOP_1 (1L<<10) +#define BNX2_COM_COMQ_FTQ_CMD_SFT_RESET (1L<<25) +#define BNX2_COM_COMQ_FTQ_CMD_RD_DATA (1L<<26) +#define BNX2_COM_COMQ_FTQ_CMD_ADD_INTERVEN (1L<<27) +#define BNX2_COM_COMQ_FTQ_CMD_ADD_DATA (1L<<28) +#define BNX2_COM_COMQ_FTQ_CMD_INTERVENE_CLR (1L<<29) +#define BNX2_COM_COMQ_FTQ_CMD_POP (1L<<30) +#define BNX2_COM_COMQ_FTQ_CMD_BUSY (1L<<31) + +#define BNX2_COM_COMQ_FTQ_CTL 0x001053fc +#define BNX2_COM_COMQ_FTQ_CTL_INTERVENE (1L<<0) +#define BNX2_COM_COMQ_FTQ_CTL_OVERFLOW (1L<<1) +#define BNX2_COM_COMQ_FTQ_CTL_FORCE_INTERVENE (1L<<2) +#define BNX2_COM_COMQ_FTQ_CTL_MAX_DEPTH (0x3ffL<<12) +#define BNX2_COM_COMQ_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) + +#define BNX2_COM_SCRATCH 0x00120000 + +#define BNX2_FW_RX_LOW_LATENCY 0x00120058 +#define BNX2_FW_RX_DROP_COUNT 0x00120084 + + +/* + * cp_reg definition + * offset: 0x180000 + */ +#define BNX2_CP_CKSUM_ERROR_STATUS 0x00180000 +#define BNX2_CP_CKSUM_ERROR_STATUS_CALCULATED (0xffffL<<0) +#define BNX2_CP_CKSUM_ERROR_STATUS_EXPECTED (0xffffL<<16) + +#define BNX2_CP_CPU_MODE 0x00185000 +#define BNX2_CP_CPU_MODE_LOCAL_RST (1L<<0) +#define BNX2_CP_CPU_MODE_STEP_ENA (1L<<1) +#define BNX2_CP_CPU_MODE_PAGE_0_DATA_ENA (1L<<2) +#define BNX2_CP_CPU_MODE_PAGE_0_INST_ENA (1L<<3) +#define BNX2_CP_CPU_MODE_MSG_BIT1 (1L<<6) +#define BNX2_CP_CPU_MODE_INTERRUPT_ENA (1L<<7) +#define BNX2_CP_CPU_MODE_SOFT_HALT (1L<<10) +#define BNX2_CP_CPU_MODE_BAD_DATA_HALT_ENA (1L<<11) +#define BNX2_CP_CPU_MODE_BAD_INST_HALT_ENA (1L<<12) +#define BNX2_CP_CPU_MODE_FIO_ABORT_HALT_ENA (1L<<13) +#define BNX2_CP_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA (1L<<15) + +#define BNX2_CP_CPU_STATE 0x00185004 +#define BNX2_CP_CPU_STATE_BREAKPOINT (1L<<0) +#define BNX2_CP_CPU_STATE_BAD_INST_HALTED (1L<<2) +#define BNX2_CP_CPU_STATE_PAGE_0_DATA_HALTED (1L<<3) +#define BNX2_CP_CPU_STATE_PAGE_0_INST_HALTED (1L<<4) +#define BNX2_CP_CPU_STATE_BAD_DATA_ADDR_HALTED (1L<<5) +#define BNX2_CP_CPU_STATE_BAD_PC_HALTED (1L<<6) +#define BNX2_CP_CPU_STATE_ALIGN_HALTED (1L<<7) +#define BNX2_CP_CPU_STATE_FIO_ABORT_HALTED (1L<<8) +#define BNX2_CP_CPU_STATE_SOFT_HALTED (1L<<10) +#define BNX2_CP_CPU_STATE_SPAD_UNDERFLOW (1L<<11) +#define BNX2_CP_CPU_STATE_INTERRRUPT (1L<<12) +#define BNX2_CP_CPU_STATE_DATA_ACCESS_STALL (1L<<14) +#define BNX2_CP_CPU_STATE_INST_FETCH_STALL (1L<<15) +#define BNX2_CP_CPU_STATE_BLOCKED_READ (1L<<31) + +#define BNX2_CP_CPU_EVENT_MASK 0x00185008 +#define BNX2_CP_CPU_EVENT_MASK_BREAKPOINT_MASK (1L<<0) +#define BNX2_CP_CPU_EVENT_MASK_BAD_INST_HALTED_MASK (1L<<2) +#define BNX2_CP_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK (1L<<3) +#define BNX2_CP_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK (1L<<4) +#define BNX2_CP_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK (1L<<5) +#define BNX2_CP_CPU_EVENT_MASK_BAD_PC_HALTED_MASK (1L<<6) +#define BNX2_CP_CPU_EVENT_MASK_ALIGN_HALTED_MASK (1L<<7) +#define BNX2_CP_CPU_EVENT_MASK_FIO_ABORT_MASK (1L<<8) +#define BNX2_CP_CPU_EVENT_MASK_SOFT_HALTED_MASK (1L<<10) +#define BNX2_CP_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK (1L<<11) +#define BNX2_CP_CPU_EVENT_MASK_INTERRUPT_MASK (1L<<12) + +#define BNX2_CP_CPU_PROGRAM_COUNTER 0x0018501c +#define BNX2_CP_CPU_INSTRUCTION 0x00185020 +#define BNX2_CP_CPU_DATA_ACCESS 0x00185024 +#define BNX2_CP_CPU_INTERRUPT_ENABLE 0x00185028 +#define BNX2_CP_CPU_INTERRUPT_VECTOR 0x0018502c +#define BNX2_CP_CPU_INTERRUPT_SAVED_PC 0x00185030 +#define BNX2_CP_CPU_HW_BREAKPOINT 0x00185034 +#define BNX2_CP_CPU_HW_BREAKPOINT_DISABLE (1L<<0) +#define BNX2_CP_CPU_HW_BREAKPOINT_ADDRESS (0x3fffffffL<<2) + +#define BNX2_CP_CPU_DEBUG_VECT_PEEK 0x00185038 +#define BNX2_CP_CPU_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0) +#define BNX2_CP_CPU_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11) +#define BNX2_CP_CPU_DEBUG_VECT_PEEK_1_SEL (0xfL<<12) +#define BNX2_CP_CPU_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16) +#define BNX2_CP_CPU_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27) +#define BNX2_CP_CPU_DEBUG_VECT_PEEK_2_SEL (0xfL<<28) + +#define BNX2_CP_CPU_LAST_BRANCH_ADDR 0x00185048 +#define BNX2_CP_CPU_LAST_BRANCH_ADDR_TYPE (1L<<1) +#define BNX2_CP_CPU_LAST_BRANCH_ADDR_TYPE_JUMP (0L<<1) +#define BNX2_CP_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH (1L<<1) +#define BNX2_CP_CPU_LAST_BRANCH_ADDR_LBA (0x3fffffffL<<2) + +#define BNX2_CP_CPU_REG_FILE 0x00185200 +#define BNX2_CP_CPQ_PFE_PFE_CTL 0x001853bc +#define BNX2_CP_CPQ_PFE_PFE_CTL_INC_USAGE_CNT (1L<<0) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE (0xfL<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_0 (0L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_1 (1L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_2 (2L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_3 (3L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_4 (4L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_5 (5L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_6 (6L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_7 (7L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_8 (8L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_9 (9L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_10 (10L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_11 (11L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_12 (12L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_13 (13L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_14 (14L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_15 (15L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_COUNT (0xfL<<12) +#define BNX2_CP_CPQ_PFE_PFE_CTL_OFFSET (0x1ffL<<16) + +#define BNX2_CP_CPQ 0x001853c0 +#define BNX2_CP_CPQ_FTQ_CMD 0x001853f8 +#define BNX2_CP_CPQ_FTQ_CMD_OFFSET (0x3ffL<<0) +#define BNX2_CP_CPQ_FTQ_CMD_WR_TOP (1L<<10) +#define BNX2_CP_CPQ_FTQ_CMD_WR_TOP_0 (0L<<10) +#define BNX2_CP_CPQ_FTQ_CMD_WR_TOP_1 (1L<<10) +#define BNX2_CP_CPQ_FTQ_CMD_SFT_RESET (1L<<25) +#define BNX2_CP_CPQ_FTQ_CMD_RD_DATA (1L<<26) +#define BNX2_CP_CPQ_FTQ_CMD_ADD_INTERVEN (1L<<27) +#define BNX2_CP_CPQ_FTQ_CMD_ADD_DATA (1L<<28) +#define BNX2_CP_CPQ_FTQ_CMD_INTERVENE_CLR (1L<<29) +#define BNX2_CP_CPQ_FTQ_CMD_POP (1L<<30) +#define BNX2_CP_CPQ_FTQ_CMD_BUSY (1L<<31) + +#define BNX2_CP_CPQ_FTQ_CTL 0x001853fc +#define BNX2_CP_CPQ_FTQ_CTL_INTERVENE (1L<<0) +#define BNX2_CP_CPQ_FTQ_CTL_OVERFLOW (1L<<1) +#define BNX2_CP_CPQ_FTQ_CTL_FORCE_INTERVENE (1L<<2) +#define BNX2_CP_CPQ_FTQ_CTL_MAX_DEPTH (0x3ffL<<12) +#define BNX2_CP_CPQ_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) + +#define BNX2_CP_SCRATCH 0x001a0000 + +#define BNX2_FW_MAX_ISCSI_CONN 0x001a0080 + + +/* + * mcp_reg definition + * offset: 0x140000 + */ +#define BNX2_MCP_MCP_CONTROL 0x00140080 +#define BNX2_MCP_MCP_CONTROL_SMBUS_SEL (1L<<30) +#define BNX2_MCP_MCP_CONTROL_MCP_ISOLATE (1L<<31) + +#define BNX2_MCP_MCP_ATTENTION_STATUS 0x00140084 +#define BNX2_MCP_MCP_ATTENTION_STATUS_DRV_DOORBELL (1L<<29) +#define BNX2_MCP_MCP_ATTENTION_STATUS_WATCHDOG_TIMEOUT (1L<<30) +#define BNX2_MCP_MCP_ATTENTION_STATUS_CPU_EVENT (1L<<31) + +#define BNX2_MCP_MCP_HEARTBEAT_CONTROL 0x00140088 +#define BNX2_MCP_MCP_HEARTBEAT_CONTROL_MCP_HEARTBEAT_ENABLE (1L<<31) + +#define BNX2_MCP_MCP_HEARTBEAT_STATUS 0x0014008c +#define BNX2_MCP_MCP_HEARTBEAT_STATUS_MCP_HEARTBEAT_PERIOD (0x7ffL<<0) +#define BNX2_MCP_MCP_HEARTBEAT_STATUS_VALID (1L<<31) + +#define BNX2_MCP_MCP_HEARTBEAT 0x00140090 +#define BNX2_MCP_MCP_HEARTBEAT_MCP_HEARTBEAT_COUNT (0x3fffffffL<<0) +#define BNX2_MCP_MCP_HEARTBEAT_MCP_HEARTBEAT_INC (1L<<30) +#define BNX2_MCP_MCP_HEARTBEAT_MCP_HEARTBEAT_RESET (1L<<31) + +#define BNX2_MCP_WATCHDOG_RESET 0x00140094 +#define BNX2_MCP_WATCHDOG_RESET_WATCHDOG_RESET (1L<<31) + +#define BNX2_MCP_WATCHDOG_CONTROL 0x00140098 +#define BNX2_MCP_WATCHDOG_CONTROL_WATCHDOG_TIMEOUT (0xfffffffL<<0) +#define BNX2_MCP_WATCHDOG_CONTROL_WATCHDOG_ATTN (1L<<29) +#define BNX2_MCP_WATCHDOG_CONTROL_MCP_RST_ENABLE (1L<<30) +#define BNX2_MCP_WATCHDOG_CONTROL_WATCHDOG_ENABLE (1L<<31) + +#define BNX2_MCP_ACCESS_LOCK 0x0014009c +#define BNX2_MCP_ACCESS_LOCK_LOCK (1L<<31) + +#define BNX2_MCP_TOE_ID 0x001400a0 +#define BNX2_MCP_TOE_ID_FUNCTION_ID (1L<<31) + +#define BNX2_MCP_MAILBOX_CFG 0x001400a4 +#define BNX2_MCP_MAILBOX_CFG_MAILBOX_OFFSET (0x3fffL<<0) +#define BNX2_MCP_MAILBOX_CFG_MAILBOX_SIZE (0xfffL<<20) + +#define BNX2_MCP_MAILBOX_CFG_OTHER_FUNC 0x001400a8 +#define BNX2_MCP_MAILBOX_CFG_OTHER_FUNC_MAILBOX_OFFSET (0x3fffL<<0) +#define BNX2_MCP_MAILBOX_CFG_OTHER_FUNC_MAILBOX_SIZE (0xfffL<<20) + +#define BNX2_MCP_MCP_DOORBELL 0x001400ac +#define BNX2_MCP_MCP_DOORBELL_MCP_DOORBELL (1L<<31) + +#define BNX2_MCP_DRIVER_DOORBELL 0x001400b0 +#define BNX2_MCP_DRIVER_DOORBELL_DRIVER_DOORBELL (1L<<31) + +#define BNX2_MCP_DRIVER_DOORBELL_OTHER_FUNC 0x001400b4 +#define BNX2_MCP_DRIVER_DOORBELL_OTHER_FUNC_DRIVER_DOORBELL (1L<<31) + +#define BNX2_MCP_CPU_MODE 0x00145000 +#define BNX2_MCP_CPU_MODE_LOCAL_RST (1L<<0) +#define BNX2_MCP_CPU_MODE_STEP_ENA (1L<<1) +#define BNX2_MCP_CPU_MODE_PAGE_0_DATA_ENA (1L<<2) +#define BNX2_MCP_CPU_MODE_PAGE_0_INST_ENA (1L<<3) +#define BNX2_MCP_CPU_MODE_MSG_BIT1 (1L<<6) +#define BNX2_MCP_CPU_MODE_INTERRUPT_ENA (1L<<7) +#define BNX2_MCP_CPU_MODE_SOFT_HALT (1L<<10) +#define BNX2_MCP_CPU_MODE_BAD_DATA_HALT_ENA (1L<<11) +#define BNX2_MCP_CPU_MODE_BAD_INST_HALT_ENA (1L<<12) +#define BNX2_MCP_CPU_MODE_FIO_ABORT_HALT_ENA (1L<<13) +#define BNX2_MCP_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA (1L<<15) + +#define BNX2_MCP_CPU_STATE 0x00145004 +#define BNX2_MCP_CPU_STATE_BREAKPOINT (1L<<0) +#define BNX2_MCP_CPU_STATE_BAD_INST_HALTED (1L<<2) +#define BNX2_MCP_CPU_STATE_PAGE_0_DATA_HALTED (1L<<3) +#define BNX2_MCP_CPU_STATE_PAGE_0_INST_HALTED (1L<<4) +#define BNX2_MCP_CPU_STATE_BAD_DATA_ADDR_HALTED (1L<<5) +#define BNX2_MCP_CPU_STATE_BAD_PC_HALTED (1L<<6) +#define BNX2_MCP_CPU_STATE_ALIGN_HALTED (1L<<7) +#define BNX2_MCP_CPU_STATE_FIO_ABORT_HALTED (1L<<8) +#define BNX2_MCP_CPU_STATE_SOFT_HALTED (1L<<10) +#define BNX2_MCP_CPU_STATE_SPAD_UNDERFLOW (1L<<11) +#define BNX2_MCP_CPU_STATE_INTERRRUPT (1L<<12) +#define BNX2_MCP_CPU_STATE_DATA_ACCESS_STALL (1L<<14) +#define BNX2_MCP_CPU_STATE_INST_FETCH_STALL (1L<<15) +#define BNX2_MCP_CPU_STATE_BLOCKED_READ (1L<<31) + +#define BNX2_MCP_CPU_EVENT_MASK 0x00145008 +#define BNX2_MCP_CPU_EVENT_MASK_BREAKPOINT_MASK (1L<<0) +#define BNX2_MCP_CPU_EVENT_MASK_BAD_INST_HALTED_MASK (1L<<2) +#define BNX2_MCP_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK (1L<<3) +#define BNX2_MCP_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK (1L<<4) +#define BNX2_MCP_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK (1L<<5) +#define BNX2_MCP_CPU_EVENT_MASK_BAD_PC_HALTED_MASK (1L<<6) +#define BNX2_MCP_CPU_EVENT_MASK_ALIGN_HALTED_MASK (1L<<7) +#define BNX2_MCP_CPU_EVENT_MASK_FIO_ABORT_MASK (1L<<8) +#define BNX2_MCP_CPU_EVENT_MASK_SOFT_HALTED_MASK (1L<<10) +#define BNX2_MCP_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK (1L<<11) +#define BNX2_MCP_CPU_EVENT_MASK_INTERRUPT_MASK (1L<<12) + +#define BNX2_MCP_CPU_PROGRAM_COUNTER 0x0014501c +#define BNX2_MCP_CPU_INSTRUCTION 0x00145020 +#define BNX2_MCP_CPU_DATA_ACCESS 0x00145024 +#define BNX2_MCP_CPU_INTERRUPT_ENABLE 0x00145028 +#define BNX2_MCP_CPU_INTERRUPT_VECTOR 0x0014502c +#define BNX2_MCP_CPU_INTERRUPT_SAVED_PC 0x00145030 +#define BNX2_MCP_CPU_HW_BREAKPOINT 0x00145034 +#define BNX2_MCP_CPU_HW_BREAKPOINT_DISABLE (1L<<0) +#define BNX2_MCP_CPU_HW_BREAKPOINT_ADDRESS (0x3fffffffL<<2) + +#define BNX2_MCP_CPU_DEBUG_VECT_PEEK 0x00145038 +#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0) +#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11) +#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_1_SEL (0xfL<<12) +#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16) +#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27) +#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_2_SEL (0xfL<<28) + +#define BNX2_MCP_CPU_LAST_BRANCH_ADDR 0x00145048 +#define BNX2_MCP_CPU_LAST_BRANCH_ADDR_TYPE (1L<<1) +#define BNX2_MCP_CPU_LAST_BRANCH_ADDR_TYPE_JUMP (0L<<1) +#define BNX2_MCP_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH (1L<<1) +#define BNX2_MCP_CPU_LAST_BRANCH_ADDR_LBA (0x3fffffffL<<2) + +#define BNX2_MCP_CPU_REG_FILE 0x00145200 +#define BNX2_MCP_MCPQ 0x001453c0 +#define BNX2_MCP_MCPQ_FTQ_CMD 0x001453f8 +#define BNX2_MCP_MCPQ_FTQ_CMD_OFFSET (0x3ffL<<0) +#define BNX2_MCP_MCPQ_FTQ_CMD_WR_TOP (1L<<10) +#define BNX2_MCP_MCPQ_FTQ_CMD_WR_TOP_0 (0L<<10) +#define BNX2_MCP_MCPQ_FTQ_CMD_WR_TOP_1 (1L<<10) +#define BNX2_MCP_MCPQ_FTQ_CMD_SFT_RESET (1L<<25) +#define BNX2_MCP_MCPQ_FTQ_CMD_RD_DATA (1L<<26) +#define BNX2_MCP_MCPQ_FTQ_CMD_ADD_INTERVEN (1L<<27) +#define BNX2_MCP_MCPQ_FTQ_CMD_ADD_DATA (1L<<28) +#define BNX2_MCP_MCPQ_FTQ_CMD_INTERVENE_CLR (1L<<29) +#define BNX2_MCP_MCPQ_FTQ_CMD_POP (1L<<30) +#define BNX2_MCP_MCPQ_FTQ_CMD_BUSY (1L<<31) + +#define BNX2_MCP_MCPQ_FTQ_CTL 0x001453fc +#define BNX2_MCP_MCPQ_FTQ_CTL_INTERVENE (1L<<0) +#define BNX2_MCP_MCPQ_FTQ_CTL_OVERFLOW (1L<<1) +#define BNX2_MCP_MCPQ_FTQ_CTL_FORCE_INTERVENE (1L<<2) +#define BNX2_MCP_MCPQ_FTQ_CTL_MAX_DEPTH (0x3ffL<<12) +#define BNX2_MCP_MCPQ_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) + +#define BNX2_MCP_ROM 0x00150000 +#define BNX2_MCP_SCRATCH 0x00160000 +#define BNX2_MCP_STATE_P1 0x0016f9c8 +#define BNX2_MCP_STATE_P0 0x0016fdc8 +#define BNX2_MCP_STATE_P1_5708 0x001699c8 +#define BNX2_MCP_STATE_P0_5708 0x00169dc8 + +#define BNX2_SHM_HDR_SIGNATURE BNX2_MCP_SCRATCH +#define BNX2_SHM_HDR_SIGNATURE_SIG_MASK 0xffff0000 +#define BNX2_SHM_HDR_SIGNATURE_SIG 0x53530000 +#define BNX2_SHM_HDR_SIGNATURE_VER_MASK 0x000000ff +#define BNX2_SHM_HDR_SIGNATURE_VER_ONE 0x00000001 + +#define BNX2_SHM_HDR_ADDR_0 BNX2_MCP_SCRATCH + 4 +#define BNX2_SHM_HDR_ADDR_1 BNX2_MCP_SCRATCH + 8 + + +#define NUM_MC_HASH_REGISTERS 8 + + +/* PHY_ID1: bits 31-16; PHY_ID2: bits 15-0. */ +#define PHY_BCM5706_PHY_ID 0x00206160 + +#define PHY_ID(id) ((id) & 0xfffffff0) +#define PHY_REV_ID(id) ((id) & 0xf) + +/* 5708 Serdes PHY registers */ + +#define BCM5708S_BMCR_FORCE_2500 0x20 + +#define BCM5708S_UP1 0xb + +#define BCM5708S_UP1_2G5 0x1 + +#define BCM5708S_BLK_ADDR 0x1f + +#define BCM5708S_BLK_ADDR_DIG 0x0000 +#define BCM5708S_BLK_ADDR_DIG3 0x0002 +#define BCM5708S_BLK_ADDR_TX_MISC 0x0005 + +/* Digital Block */ +#define BCM5708S_1000X_CTL1 0x10 + +#define BCM5708S_1000X_CTL1_FIBER_MODE 0x0001 +#define BCM5708S_1000X_CTL1_AUTODET_EN 0x0010 + +#define BCM5708S_1000X_CTL2 0x11 + +#define BCM5708S_1000X_CTL2_PLLEL_DET_EN 0x0001 + +#define BCM5708S_1000X_STAT1 0x14 + +#define BCM5708S_1000X_STAT1_SGMII 0x0001 +#define BCM5708S_1000X_STAT1_LINK 0x0002 +#define BCM5708S_1000X_STAT1_FD 0x0004 +#define BCM5708S_1000X_STAT1_SPEED_MASK 0x0018 +#define BCM5708S_1000X_STAT1_SPEED_10 0x0000 +#define BCM5708S_1000X_STAT1_SPEED_100 0x0008 +#define BCM5708S_1000X_STAT1_SPEED_1G 0x0010 +#define BCM5708S_1000X_STAT1_SPEED_2G5 0x0018 +#define BCM5708S_1000X_STAT1_TX_PAUSE 0x0020 +#define BCM5708S_1000X_STAT1_RX_PAUSE 0x0040 + +/* Digital3 Block */ +#define BCM5708S_DIG_3_0 0x10 + +#define BCM5708S_DIG_3_0_USE_IEEE 0x0001 + +/* Tx/Misc Block */ +#define BCM5708S_TX_ACTL1 0x15 + +#define BCM5708S_TX_ACTL1_DRIVER_VCM 0x30 + +#define BCM5708S_TX_ACTL3 0x17 + +#define MII_BNX2_DSP_RW_PORT 0x15 +#define MII_BNX2_DSP_ADDRESS 0x17 +#define MII_BNX2_DSP_EXPAND_REG 0x0f00 +#define MII_EXPAND_REG1 (MII_BNX2_DSP_EXPAND_REG | 1) +#define MII_EXPAND_REG1_RUDI_C 0x20 +#define MII_EXPAND_SERDES_CTL (MII_BNX2_DSP_EXPAND_REG | 3) + +#define MII_BNX2_MISC_SHADOW 0x1c +#define MISC_SHDW_AN_DBG 0x6800 +#define MISC_SHDW_AN_DBG_NOSYNC 0x0002 +#define MISC_SHDW_AN_DBG_RUDI_INVALID 0x0100 +#define MISC_SHDW_MODE_CTL 0x7c00 +#define MISC_SHDW_MODE_CTL_SIG_DET 0x0010 + +#define MII_BNX2_BLK_ADDR 0x1f +#define MII_BNX2_BLK_ADDR_IEEE0 0x0000 +#define MII_BNX2_BLK_ADDR_GP_STATUS 0x8120 +#define MII_BNX2_GP_TOP_AN_STATUS1 0x1b +#define MII_BNX2_GP_TOP_AN_SPEED_MSK 0x3f00 +#define MII_BNX2_GP_TOP_AN_SPEED_10 0x0000 +#define MII_BNX2_GP_TOP_AN_SPEED_100 0x0100 +#define MII_BNX2_GP_TOP_AN_SPEED_1G 0x0200 +#define MII_BNX2_GP_TOP_AN_SPEED_2_5G 0x0300 +#define MII_BNX2_GP_TOP_AN_SPEED_1GKV 0x0d00 +#define MII_BNX2_GP_TOP_AN_FD 0x8 +#define MII_BNX2_BLK_ADDR_SERDES_DIG 0x8300 +#define MII_BNX2_SERDES_DIG_1000XCTL1 0x10 +#define MII_BNX2_SD_1000XCTL1_FIBER 0x01 +#define MII_BNX2_SD_1000XCTL1_AUTODET 0x10 +#define MII_BNX2_SERDES_DIG_MISC1 0x18 +#define MII_BNX2_SD_MISC1_FORCE_MSK 0xf +#define MII_BNX2_SD_MISC1_FORCE_2_5G 0x0 +#define MII_BNX2_SD_MISC1_FORCE 0x10 +#define MII_BNX2_BLK_ADDR_OVER1G 0x8320 +#define MII_BNX2_OVER1G_UP1 0x19 +#define MII_BNX2_BLK_ADDR_BAM_NXTPG 0x8350 +#define MII_BNX2_BAM_NXTPG_CTL 0x10 +#define MII_BNX2_NXTPG_CTL_BAM 0x1 +#define MII_BNX2_NXTPG_CTL_T2 0x2 +#define MII_BNX2_BLK_ADDR_CL73_USERB0 0x8370 +#define MII_BNX2_CL73_BAM_CTL1 0x12 +#define MII_BNX2_CL73_BAM_EN 0x8000 +#define MII_BNX2_CL73_BAM_STA_MGR_EN 0x4000 +#define MII_BNX2_CL73_BAM_NP_AFT_BP_EN 0x2000 +#define MII_BNX2_BLK_ADDR_AER 0xffd0 +#define MII_BNX2_AER_AER 0x1e +#define MII_BNX2_AER_AER_AN_MMD 0x3800 +#define MII_BNX2_BLK_ADDR_COMBO_IEEEB0 0xffe0 + +#define MIN_ETHERNET_PACKET_SIZE 60 +#define MAX_ETHERNET_PACKET_SIZE 1514 +#define MAX_ETHERNET_JUMBO_PACKET_SIZE 9014 + +#define BNX2_RX_COPY_THRESH 128 + +#define BNX2_MISC_ENABLE_DEFAULT 0x17ffffff + +#define BNX2_START_UNICAST_ADDRESS_INDEX 4 +#define BNX2_END_UNICAST_ADDRESS_INDEX 7 +#define BNX2_MAX_UNICAST_ADDRESSES (BNX2_END_UNICAST_ADDRESS_INDEX - \ + BNX2_START_UNICAST_ADDRESS_INDEX + 1) + +#define DMA_READ_CHANS 5 +#define DMA_WRITE_CHANS 3 + +/* Use CPU native page size up to 16K for the ring sizes. */ +#if (PAGE_SHIFT > 14) +#define BCM_PAGE_BITS 14 +#else +#define BCM_PAGE_BITS PAGE_SHIFT +#endif +#define BCM_PAGE_SIZE (1 << BCM_PAGE_BITS) + +#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct tx_bd)) +#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) + +#define MAX_RX_RINGS 8 +#define MAX_RX_PG_RINGS 32 +#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct rx_bd)) +#define MAX_RX_DESC_CNT (RX_DESC_CNT - 1) +#define MAX_TOTAL_RX_DESC_CNT (MAX_RX_DESC_CNT * MAX_RX_RINGS) +#define MAX_TOTAL_RX_PG_DESC_CNT (MAX_RX_DESC_CNT * MAX_RX_PG_RINGS) + +#define NEXT_TX_BD(x) (((x) & (MAX_TX_DESC_CNT - 1)) == \ + (MAX_TX_DESC_CNT - 1)) ? \ + (x) + 2 : (x) + 1 + +#define TX_RING_IDX(x) ((x) & MAX_TX_DESC_CNT) + +#define NEXT_RX_BD(x) (((x) & (MAX_RX_DESC_CNT - 1)) == \ + (MAX_RX_DESC_CNT - 1)) ? \ + (x) + 2 : (x) + 1 + +#define RX_RING_IDX(x) ((x) & bp->rx_max_ring_idx) +#define RX_PG_RING_IDX(x) ((x) & bp->rx_max_pg_ring_idx) + +#define RX_RING(x) (((x) & ~MAX_RX_DESC_CNT) >> (BCM_PAGE_BITS - 4)) +#define RX_IDX(x) ((x) & MAX_RX_DESC_CNT) + +/* Context size. */ +#define CTX_SHIFT 7 +#define CTX_SIZE (1 << CTX_SHIFT) +#define CTX_MASK (CTX_SIZE - 1) +#define GET_CID_ADDR(_cid) ((_cid) << CTX_SHIFT) +#define GET_CID(_cid_addr) ((_cid_addr) >> CTX_SHIFT) + +#define PHY_CTX_SHIFT 6 +#define PHY_CTX_SIZE (1 << PHY_CTX_SHIFT) +#define PHY_CTX_MASK (PHY_CTX_SIZE - 1) +#define GET_PCID_ADDR(_pcid) ((_pcid) << PHY_CTX_SHIFT) +#define GET_PCID(_pcid_addr) ((_pcid_addr) >> PHY_CTX_SHIFT) + +#define MB_KERNEL_CTX_SHIFT 8 +#define MB_KERNEL_CTX_SIZE (1 << MB_KERNEL_CTX_SHIFT) +#define MB_KERNEL_CTX_MASK (MB_KERNEL_CTX_SIZE - 1) +#define MB_GET_CID_ADDR(_cid) (0x10000 + ((_cid) << MB_KERNEL_CTX_SHIFT)) + +#define MAX_CID_CNT 0x4000 +#define MAX_CID_ADDR (GET_CID_ADDR(MAX_CID_CNT)) +#define INVALID_CID_ADDR 0xffffffff + +#define TX_CID 16 +#define TX_TSS_CID 32 +#define RX_CID 0 +#define RX_RSS_CID 4 +#define RX_MAX_RSS_RINGS 7 +#define RX_MAX_RINGS (RX_MAX_RSS_RINGS + 1) +#define TX_MAX_TSS_RINGS 7 +#define TX_MAX_RINGS (TX_MAX_TSS_RINGS + 1) + +#define MB_TX_CID_ADDR MB_GET_CID_ADDR(TX_CID) +#define MB_RX_CID_ADDR MB_GET_CID_ADDR(RX_CID) + +struct sw_bd { + struct sk_buff *skb; + struct l2_fhdr *desc; + DEFINE_DMA_UNMAP_ADDR(mapping); +}; + +struct sw_pg { + struct page *page; + DEFINE_DMA_UNMAP_ADDR(mapping); +}; + +struct sw_tx_bd { + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(mapping); + unsigned short is_gso; + unsigned short nr_frags; +}; + +#define SW_RXBD_RING_SIZE (sizeof(struct sw_bd) * RX_DESC_CNT) +#define SW_RXPG_RING_SIZE (sizeof(struct sw_pg) * RX_DESC_CNT) +#define RXBD_RING_SIZE (sizeof(struct rx_bd) * RX_DESC_CNT) +#define SW_TXBD_RING_SIZE (sizeof(struct sw_tx_bd) * TX_DESC_CNT) +#define TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT) + +/* Buffered flash (Atmel: AT45DB011B) specific information */ +#define SEEPROM_PAGE_BITS 2 +#define SEEPROM_PHY_PAGE_SIZE (1 << SEEPROM_PAGE_BITS) +#define SEEPROM_BYTE_ADDR_MASK (SEEPROM_PHY_PAGE_SIZE-1) +#define SEEPROM_PAGE_SIZE 4 +#define SEEPROM_TOTAL_SIZE 65536 + +#define BUFFERED_FLASH_PAGE_BITS 9 +#define BUFFERED_FLASH_PHY_PAGE_SIZE (1 << BUFFERED_FLASH_PAGE_BITS) +#define BUFFERED_FLASH_BYTE_ADDR_MASK (BUFFERED_FLASH_PHY_PAGE_SIZE-1) +#define BUFFERED_FLASH_PAGE_SIZE 264 +#define BUFFERED_FLASH_TOTAL_SIZE 0x21000 + +#define SAIFUN_FLASH_PAGE_BITS 8 +#define SAIFUN_FLASH_PHY_PAGE_SIZE (1 << SAIFUN_FLASH_PAGE_BITS) +#define SAIFUN_FLASH_BYTE_ADDR_MASK (SAIFUN_FLASH_PHY_PAGE_SIZE-1) +#define SAIFUN_FLASH_PAGE_SIZE 256 +#define SAIFUN_FLASH_BASE_TOTAL_SIZE 65536 + +#define ST_MICRO_FLASH_PAGE_BITS 8 +#define ST_MICRO_FLASH_PHY_PAGE_SIZE (1 << ST_MICRO_FLASH_PAGE_BITS) +#define ST_MICRO_FLASH_BYTE_ADDR_MASK (ST_MICRO_FLASH_PHY_PAGE_SIZE-1) +#define ST_MICRO_FLASH_PAGE_SIZE 256 +#define ST_MICRO_FLASH_BASE_TOTAL_SIZE 65536 + +#define BCM5709_FLASH_PAGE_BITS 8 +#define BCM5709_FLASH_PHY_PAGE_SIZE (1 << BCM5709_FLASH_PAGE_BITS) +#define BCM5709_FLASH_BYTE_ADDR_MASK (BCM5709_FLASH_PHY_PAGE_SIZE-1) +#define BCM5709_FLASH_PAGE_SIZE 256 + +#define NVRAM_TIMEOUT_COUNT 30000 + + +#define FLASH_STRAP_MASK (BNX2_NVM_CFG1_FLASH_MODE | \ + BNX2_NVM_CFG1_BUFFER_MODE | \ + BNX2_NVM_CFG1_PROTECT_MODE | \ + BNX2_NVM_CFG1_FLASH_SIZE) + +#define FLASH_BACKUP_STRAP_MASK (0xf << 26) + +struct flash_spec { + u32 strapping; + u32 config1; + u32 config2; + u32 config3; + u32 write1; + u32 flags; +#define BNX2_NV_BUFFERED 0x00000001 +#define BNX2_NV_TRANSLATE 0x00000002 +#define BNX2_NV_WREN 0x00000004 + u32 page_bits; + u32 page_size; + u32 addr_mask; + u32 total_size; + u8 *name; +}; + +#define BNX2_MAX_MSIX_HW_VEC 9 +#define BNX2_MAX_MSIX_VEC 9 +#ifdef BCM_CNIC +#define BNX2_MIN_MSIX_VEC 2 +#else +#define BNX2_MIN_MSIX_VEC 1 +#endif + + +struct bnx2_irq { + irq_handler_t handler; + unsigned int vector; + u8 requested; + char name[IFNAMSIZ + 2]; +}; + +struct bnx2_tx_ring_info { + u32 tx_prod_bseq; + u16 tx_prod; + u32 tx_bidx_addr; + u32 tx_bseq_addr; + + struct tx_bd *tx_desc_ring; + struct sw_tx_bd *tx_buf_ring; + + u16 tx_cons; + u16 hw_tx_cons; + + dma_addr_t tx_desc_mapping; +}; + +struct bnx2_rx_ring_info { + u32 rx_prod_bseq; + u16 rx_prod; + u16 rx_cons; + + u32 rx_bidx_addr; + u32 rx_bseq_addr; + u32 rx_pg_bidx_addr; + + u16 rx_pg_prod; + u16 rx_pg_cons; + + struct sw_bd *rx_buf_ring; + struct rx_bd *rx_desc_ring[MAX_RX_RINGS]; + struct sw_pg *rx_pg_ring; + struct rx_bd *rx_pg_desc_ring[MAX_RX_PG_RINGS]; + + dma_addr_t rx_desc_mapping[MAX_RX_RINGS]; + dma_addr_t rx_pg_desc_mapping[MAX_RX_PG_RINGS]; +}; + +struct bnx2_napi { + struct napi_struct napi ____cacheline_aligned; + struct bnx2 *bp; + union { + struct status_block *msi; + struct status_block_msix *msix; + } status_blk; + u16 *hw_tx_cons_ptr; + u16 *hw_rx_cons_ptr; + u32 last_status_idx; + u32 int_num; + +#ifdef BCM_CNIC + u32 cnic_tag; + int cnic_present; +#endif + + struct bnx2_rx_ring_info rx_ring; + struct bnx2_tx_ring_info tx_ring; +}; + +struct bnx2 { + /* Fields used in the tx and intr/napi performance paths are grouped */ + /* together in the beginning of the structure. */ + void __iomem *regview; + + struct net_device *dev; + struct pci_dev *pdev; + + atomic_t intr_sem; + + u32 flags; +#define BNX2_FLAG_PCIX 0x00000001 +#define BNX2_FLAG_PCI_32BIT 0x00000002 +#define BNX2_FLAG_MSIX_CAP 0x00000004 +#define BNX2_FLAG_NO_WOL 0x00000008 +#define BNX2_FLAG_USING_MSI 0x00000020 +#define BNX2_FLAG_ASF_ENABLE 0x00000040 +#define BNX2_FLAG_MSI_CAP 0x00000080 +#define BNX2_FLAG_ONE_SHOT_MSI 0x00000100 +#define BNX2_FLAG_PCIE 0x00000200 +#define BNX2_FLAG_USING_MSIX 0x00000400 +#define BNX2_FLAG_USING_MSI_OR_MSIX (BNX2_FLAG_USING_MSI | \ + BNX2_FLAG_USING_MSIX) +#define BNX2_FLAG_JUMBO_BROKEN 0x00000800 +#define BNX2_FLAG_CAN_KEEP_VLAN 0x00001000 +#define BNX2_FLAG_BROKEN_STATS 0x00002000 +#define BNX2_FLAG_AER_ENABLED 0x00004000 + + struct bnx2_napi bnx2_napi[BNX2_MAX_MSIX_VEC]; + + u32 rx_buf_use_size; /* useable size */ + u32 rx_buf_size; /* with alignment */ + u32 rx_copy_thresh; + u32 rx_jumbo_thresh; + u32 rx_max_ring_idx; + u32 rx_max_pg_ring_idx; + + /* TX constants */ + int tx_ring_size; + u32 tx_wake_thresh; + +#ifdef BCM_CNIC + struct cnic_ops __rcu *cnic_ops; + void *cnic_data; +#endif + + /* End of fields used in the performance code paths. */ + + unsigned int current_interval; +#define BNX2_TIMER_INTERVAL HZ +#define BNX2_SERDES_AN_TIMEOUT (HZ / 3) +#define BNX2_SERDES_FORCED_TIMEOUT (HZ / 10) + + struct timer_list timer; + struct work_struct reset_task; + + /* Used to synchronize phy accesses. */ + spinlock_t phy_lock; + spinlock_t indirect_lock; + + u32 phy_flags; +#define BNX2_PHY_FLAG_SERDES 0x00000001 +#define BNX2_PHY_FLAG_CRC_FIX 0x00000002 +#define BNX2_PHY_FLAG_PARALLEL_DETECT 0x00000004 +#define BNX2_PHY_FLAG_2_5G_CAPABLE 0x00000008 +#define BNX2_PHY_FLAG_INT_MODE_MASK 0x00000300 +#define BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING 0x00000100 +#define BNX2_PHY_FLAG_INT_MODE_LINK_READY 0x00000200 +#define BNX2_PHY_FLAG_DIS_EARLY_DAC 0x00000400 +#define BNX2_PHY_FLAG_REMOTE_PHY_CAP 0x00000800 +#define BNX2_PHY_FLAG_FORCED_DOWN 0x00001000 +#define BNX2_PHY_FLAG_NO_PARALLEL 0x00002000 + + u32 mii_bmcr; + u32 mii_bmsr; + u32 mii_bmsr1; + u32 mii_adv; + u32 mii_lpa; + u32 mii_up1; + + u32 chip_id; + /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ +#define CHIP_NUM(bp) (((bp)->chip_id) & 0xffff0000) +#define CHIP_NUM_5706 0x57060000 +#define CHIP_NUM_5708 0x57080000 +#define CHIP_NUM_5709 0x57090000 + +#define CHIP_REV(bp) (((bp)->chip_id) & 0x0000f000) +#define CHIP_REV_Ax 0x00000000 +#define CHIP_REV_Bx 0x00001000 +#define CHIP_REV_Cx 0x00002000 + +#define CHIP_METAL(bp) (((bp)->chip_id) & 0x00000ff0) +#define CHIP_BONDING(bp) (((bp)->chip_id) & 0x0000000f) + +#define CHIP_ID(bp) (((bp)->chip_id) & 0xfffffff0) +#define CHIP_ID_5706_A0 0x57060000 +#define CHIP_ID_5706_A1 0x57060010 +#define CHIP_ID_5706_A2 0x57060020 +#define CHIP_ID_5708_A0 0x57080000 +#define CHIP_ID_5708_B0 0x57081000 +#define CHIP_ID_5708_B1 0x57081010 +#define CHIP_ID_5709_A0 0x57090000 +#define CHIP_ID_5709_A1 0x57090010 + +#define CHIP_BOND_ID(bp) (((bp)->chip_id) & 0xf) + +/* A serdes chip will have the first bit of the bond id set. */ +#define CHIP_BOND_ID_SERDES_BIT 0x01 + + u32 phy_addr; + u32 phy_id; + + u16 bus_speed_mhz; + u8 wol; + + u8 pad; + + u16 fw_wr_seq; + u16 fw_drv_pulse_wr_seq; + + int rx_max_ring; + int rx_ring_size; + + int rx_max_pg_ring; + int rx_pg_ring_size; + + u16 tx_quick_cons_trip; + u16 tx_quick_cons_trip_int; + u16 rx_quick_cons_trip; + u16 rx_quick_cons_trip_int; + u16 comp_prod_trip; + u16 comp_prod_trip_int; + u16 tx_ticks; + u16 tx_ticks_int; + u16 com_ticks; + u16 com_ticks_int; + u16 cmd_ticks; + u16 cmd_ticks_int; + u16 rx_ticks; + u16 rx_ticks_int; + + u32 stats_ticks; + + dma_addr_t status_blk_mapping; + + struct statistics_block *stats_blk; + struct statistics_block *temp_stats_blk; + dma_addr_t stats_blk_mapping; + + int ctx_pages; + void *ctx_blk[4]; + dma_addr_t ctx_blk_mapping[4]; + + u32 hc_cmd; + u32 rx_mode; + + u16 req_line_speed; + u8 req_duplex; + + u8 phy_port; + u8 link_up; + + u16 line_speed; + u8 duplex; + u8 flow_ctrl; /* actual flow ctrl settings */ + /* may be different from */ + /* req_flow_ctrl if autoneg */ + u32 advertising; + + u8 req_flow_ctrl; /* flow ctrl advertisement */ + /* settings or forced */ + /* settings */ + u8 autoneg; +#define AUTONEG_SPEED 1 +#define AUTONEG_FLOW_CTRL 2 + + u8 loopback; +#define MAC_LOOPBACK 1 +#define PHY_LOOPBACK 2 + + u8 serdes_an_pending; + + u8 mac_addr[8]; + + u32 shmem_base; + + char fw_version[32]; + + int pm_cap; + int pcix_cap; + + const struct flash_spec *flash_info; + u32 flash_size; + + int status_stats_size; + + struct bnx2_irq irq_tbl[BNX2_MAX_MSIX_VEC]; + int irq_nvecs; + + u8 num_tx_rings; + u8 num_rx_rings; + + u32 leds_save; + u32 idle_chk_status_idx; + +#ifdef BCM_CNIC + struct mutex cnic_lock; + struct cnic_eth_dev cnic_eth_dev; +#endif + + const struct firmware *mips_firmware; + const struct firmware *rv2p_firmware; +}; + +#define REG_RD(bp, offset) \ + readl(bp->regview + offset) + +#define REG_WR(bp, offset, val) \ + writel(val, bp->regview + offset) + +#define REG_WR16(bp, offset, val) \ + writew(val, bp->regview + offset) + +struct cpu_reg { + u32 mode; + u32 mode_value_halt; + u32 mode_value_sstep; + + u32 state; + u32 state_value_clear; + + u32 gpr0; + u32 evmask; + u32 pc; + u32 inst; + u32 bp; + + u32 spad_base; + + u32 mips_view_base; +}; + +struct bnx2_fw_file_section { + __be32 addr; + __be32 len; + __be32 offset; +}; + +struct bnx2_mips_fw_file_entry { + __be32 start_addr; + struct bnx2_fw_file_section text; + struct bnx2_fw_file_section data; + struct bnx2_fw_file_section rodata; +}; + +struct bnx2_rv2p_fw_file_entry { + struct bnx2_fw_file_section rv2p; + __be32 fixup[8]; +}; + +struct bnx2_mips_fw_file { + struct bnx2_mips_fw_file_entry com; + struct bnx2_mips_fw_file_entry cp; + struct bnx2_mips_fw_file_entry rxp; + struct bnx2_mips_fw_file_entry tpat; + struct bnx2_mips_fw_file_entry txp; +}; + +struct bnx2_rv2p_fw_file { + struct bnx2_rv2p_fw_file_entry proc1; + struct bnx2_rv2p_fw_file_entry proc2; +}; + +#define RV2P_P1_FIXUP_PAGE_SIZE_IDX 0 +#define RV2P_BD_PAGE_SIZE_MSK 0xffff +#define RV2P_BD_PAGE_SIZE ((BCM_PAGE_SIZE / 16) - 1) + +#define RV2P_PROC1 0 +#define RV2P_PROC2 1 + + +/* This value (in milliseconds) determines the frequency of the driver + * issuing the PULSE message code. The firmware monitors this periodic + * pulse to determine when to switch to an OS-absent mode. */ +#define BNX2_DRV_PULSE_PERIOD_MS 250 + +/* This value (in milliseconds) determines how long the driver should + * wait for an acknowledgement from the firmware before timing out. Once + * the firmware has timed out, the driver will assume there is no firmware + * running and there won't be any firmware-driver synchronization during a + * driver reset. */ +#define BNX2_FW_ACK_TIME_OUT_MS 1000 + + +#define BNX2_DRV_RESET_SIGNATURE 0x00000000 +#define BNX2_DRV_RESET_SIGNATURE_MAGIC 0x4841564b /* HAVK */ +//#define DRV_RESET_SIGNATURE_MAGIC 0x47495352 /* RSIG */ + +#define BNX2_DRV_MB 0x00000004 +#define BNX2_DRV_MSG_CODE 0xff000000 +#define BNX2_DRV_MSG_CODE_RESET 0x01000000 +#define BNX2_DRV_MSG_CODE_UNLOAD 0x02000000 +#define BNX2_DRV_MSG_CODE_SHUTDOWN 0x03000000 +#define BNX2_DRV_MSG_CODE_SUSPEND_WOL 0x04000000 +#define BNX2_DRV_MSG_CODE_FW_TIMEOUT 0x05000000 +#define BNX2_DRV_MSG_CODE_PULSE 0x06000000 +#define BNX2_DRV_MSG_CODE_DIAG 0x07000000 +#define BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL 0x09000000 +#define BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN 0x0b000000 +#define BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE 0x0d000000 +#define BNX2_DRV_MSG_CODE_CMD_SET_LINK 0x10000000 + +#define BNX2_DRV_MSG_DATA 0x00ff0000 +#define BNX2_DRV_MSG_DATA_WAIT0 0x00010000 +#define BNX2_DRV_MSG_DATA_WAIT1 0x00020000 +#define BNX2_DRV_MSG_DATA_WAIT2 0x00030000 +#define BNX2_DRV_MSG_DATA_WAIT3 0x00040000 + +#define BNX2_DRV_MSG_SEQ 0x0000ffff + +#define BNX2_FW_MB 0x00000008 +#define BNX2_FW_MSG_ACK 0x0000ffff +#define BNX2_FW_MSG_STATUS_MASK 0x00ff0000 +#define BNX2_FW_MSG_STATUS_OK 0x00000000 +#define BNX2_FW_MSG_STATUS_FAILURE 0x00ff0000 + +#define BNX2_LINK_STATUS 0x0000000c +#define BNX2_LINK_STATUS_INIT_VALUE 0xffffffff +#define BNX2_LINK_STATUS_LINK_UP 0x1 +#define BNX2_LINK_STATUS_LINK_DOWN 0x0 +#define BNX2_LINK_STATUS_SPEED_MASK 0x1e +#define BNX2_LINK_STATUS_AN_INCOMPLETE (0<<1) +#define BNX2_LINK_STATUS_10HALF (1<<1) +#define BNX2_LINK_STATUS_10FULL (2<<1) +#define BNX2_LINK_STATUS_100HALF (3<<1) +#define BNX2_LINK_STATUS_100BASE_T4 (4<<1) +#define BNX2_LINK_STATUS_100FULL (5<<1) +#define BNX2_LINK_STATUS_1000HALF (6<<1) +#define BNX2_LINK_STATUS_1000FULL (7<<1) +#define BNX2_LINK_STATUS_2500HALF (8<<1) +#define BNX2_LINK_STATUS_2500FULL (9<<1) +#define BNX2_LINK_STATUS_AN_ENABLED (1<<5) +#define BNX2_LINK_STATUS_AN_COMPLETE (1<<6) +#define BNX2_LINK_STATUS_PARALLEL_DET (1<<7) +#define BNX2_LINK_STATUS_RESERVED (1<<8) +#define BNX2_LINK_STATUS_PARTNER_AD_1000FULL (1<<9) +#define BNX2_LINK_STATUS_PARTNER_AD_1000HALF (1<<10) +#define BNX2_LINK_STATUS_PARTNER_AD_100BT4 (1<<11) +#define BNX2_LINK_STATUS_PARTNER_AD_100FULL (1<<12) +#define BNX2_LINK_STATUS_PARTNER_AD_100HALF (1<<13) +#define BNX2_LINK_STATUS_PARTNER_AD_10FULL (1<<14) +#define BNX2_LINK_STATUS_PARTNER_AD_10HALF (1<<15) +#define BNX2_LINK_STATUS_TX_FC_ENABLED (1<<16) +#define BNX2_LINK_STATUS_RX_FC_ENABLED (1<<17) +#define BNX2_LINK_STATUS_PARTNER_SYM_PAUSE_CAP (1<<18) +#define BNX2_LINK_STATUS_PARTNER_ASYM_PAUSE_CAP (1<<19) +#define BNX2_LINK_STATUS_SERDES_LINK (1<<20) +#define BNX2_LINK_STATUS_PARTNER_AD_2500FULL (1<<21) +#define BNX2_LINK_STATUS_PARTNER_AD_2500HALF (1<<22) +#define BNX2_LINK_STATUS_HEART_BEAT_EXPIRED (1<<31) + +#define BNX2_DRV_PULSE_MB 0x00000010 +#define BNX2_DRV_PULSE_SEQ_MASK 0x00007fff + +/* Indicate to the firmware not to go into the + * OS absent when it is not getting driver pulse. + * This is used for debugging. */ +#define BNX2_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE 0x00080000 + +#define BNX2_DRV_MB_ARG0 0x00000014 +#define BNX2_NETLINK_SET_LINK_SPEED_10HALF (1<<0) +#define BNX2_NETLINK_SET_LINK_SPEED_10FULL (1<<1) +#define BNX2_NETLINK_SET_LINK_SPEED_10 \ + (BNX2_NETLINK_SET_LINK_SPEED_10HALF | \ + BNX2_NETLINK_SET_LINK_SPEED_10FULL) +#define BNX2_NETLINK_SET_LINK_SPEED_100HALF (1<<2) +#define BNX2_NETLINK_SET_LINK_SPEED_100FULL (1<<3) +#define BNX2_NETLINK_SET_LINK_SPEED_100 \ + (BNX2_NETLINK_SET_LINK_SPEED_100HALF | \ + BNX2_NETLINK_SET_LINK_SPEED_100FULL) +#define BNX2_NETLINK_SET_LINK_SPEED_1GHALF (1<<4) +#define BNX2_NETLINK_SET_LINK_SPEED_1GFULL (1<<5) +#define BNX2_NETLINK_SET_LINK_SPEED_2G5HALF (1<<6) +#define BNX2_NETLINK_SET_LINK_SPEED_2G5FULL (1<<7) +#define BNX2_NETLINK_SET_LINK_SPEED_10GHALF (1<<8) +#define BNX2_NETLINK_SET_LINK_SPEED_10GFULL (1<<9) +#define BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG (1<<10) +#define BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE (1<<11) +#define BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE (1<<12) +#define BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE (1<<13) +#define BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED (1<<14) +#define BNX2_NETLINK_SET_LINK_PHY_RESET (1<<15) + +#define BNX2_DEV_INFO_SIGNATURE 0x00000020 +#define BNX2_DEV_INFO_SIGNATURE_MAGIC 0x44564900 +#define BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK 0xffffff00 +#define BNX2_DEV_INFO_FEATURE_CFG_VALID 0x01 +#define BNX2_DEV_INFO_SECONDARY_PORT 0x80 +#define BNX2_DEV_INFO_DRV_ALWAYS_ALIVE 0x40 + +#define BNX2_SHARED_HW_CFG_PART_NUM 0x00000024 + +#define BNX2_SHARED_HW_CFG_POWER_DISSIPATED 0x00000034 +#define BNX2_SHARED_HW_CFG_POWER_STATE_D3_MASK 0xff000000 +#define BNX2_SHARED_HW_CFG_POWER_STATE_D2_MASK 0xff0000 +#define BNX2_SHARED_HW_CFG_POWER_STATE_D1_MASK 0xff00 +#define BNX2_SHARED_HW_CFG_POWER_STATE_D0_MASK 0xff + +#define BNX2_SHARED_HW_CFG POWER_CONSUMED 0x00000038 +#define BNX2_SHARED_HW_CFG_CONFIG 0x0000003c +#define BNX2_SHARED_HW_CFG_DESIGN_NIC 0 +#define BNX2_SHARED_HW_CFG_DESIGN_LOM 0x1 +#define BNX2_SHARED_HW_CFG_PHY_COPPER 0 +#define BNX2_SHARED_HW_CFG_PHY_FIBER 0x2 +#define BNX2_SHARED_HW_CFG_PHY_2_5G 0x20 +#define BNX2_SHARED_HW_CFG_PHY_BACKPLANE 0x40 +#define BNX2_SHARED_HW_CFG_LED_MODE_SHIFT_BITS 8 +#define BNX2_SHARED_HW_CFG_LED_MODE_MASK 0x300 +#define BNX2_SHARED_HW_CFG_LED_MODE_MAC 0 +#define BNX2_SHARED_HW_CFG_LED_MODE_GPHY1 0x100 +#define BNX2_SHARED_HW_CFG_LED_MODE_GPHY2 0x200 +#define BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX 0x8000 + +#define BNX2_SHARED_HW_CFG_CONFIG2 0x00000040 +#define BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK 0x00fff000 + +#define BNX2_DEV_INFO_BC_REV 0x0000004c + +#define BNX2_PORT_HW_CFG_MAC_UPPER 0x00000050 +#define BNX2_PORT_HW_CFG_UPPERMAC_MASK 0xffff + +#define BNX2_PORT_HW_CFG_MAC_LOWER 0x00000054 +#define BNX2_PORT_HW_CFG_CONFIG 0x00000058 +#define BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK 0x0000ffff +#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK 0x001f0000 +#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_AN 0x00000000 +#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G 0x00030000 +#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_2_5G 0x00040000 + +#define BNX2_PORT_HW_CFG_IMD_MAC_A_UPPER 0x00000068 +#define BNX2_PORT_HW_CFG_IMD_MAC_A_LOWER 0x0000006c +#define BNX2_PORT_HW_CFG_IMD_MAC_B_UPPER 0x00000070 +#define BNX2_PORT_HW_CFG_IMD_MAC_B_LOWER 0x00000074 +#define BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER 0x00000078 +#define BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER 0x0000007c + +#define BNX2_DEV_INFO_PER_PORT_HW_CONFIG2 0x000000b4 + +#define BNX2_DEV_INFO_FORMAT_REV 0x000000c4 +#define BNX2_DEV_INFO_FORMAT_REV_MASK 0xff000000 +#define BNX2_DEV_INFO_FORMAT_REV_ID ('A' << 24) + +#define BNX2_SHARED_FEATURE 0x000000c8 +#define BNX2_SHARED_FEATURE_MASK 0xffffffff + +#define BNX2_PORT_FEATURE 0x000000d8 +#define BNX2_PORT2_FEATURE 0x00000014c +#define BNX2_PORT_FEATURE_WOL_ENABLED 0x01000000 +#define BNX2_PORT_FEATURE_MBA_ENABLED 0x02000000 +#define BNX2_PORT_FEATURE_ASF_ENABLED 0x04000000 +#define BNX2_PORT_FEATURE_IMD_ENABLED 0x08000000 +#define BNX2_PORT_FEATURE_BAR1_SIZE_MASK 0xf +#define BNX2_PORT_FEATURE_BAR1_SIZE_DISABLED 0x0 +#define BNX2_PORT_FEATURE_BAR1_SIZE_64K 0x1 +#define BNX2_PORT_FEATURE_BAR1_SIZE_128K 0x2 +#define BNX2_PORT_FEATURE_BAR1_SIZE_256K 0x3 +#define BNX2_PORT_FEATURE_BAR1_SIZE_512K 0x4 +#define BNX2_PORT_FEATURE_BAR1_SIZE_1M 0x5 +#define BNX2_PORT_FEATURE_BAR1_SIZE_2M 0x6 +#define BNX2_PORT_FEATURE_BAR1_SIZE_4M 0x7 +#define BNX2_PORT_FEATURE_BAR1_SIZE_8M 0x8 +#define BNX2_PORT_FEATURE_BAR1_SIZE_16M 0x9 +#define BNX2_PORT_FEATURE_BAR1_SIZE_32M 0xa +#define BNX2_PORT_FEATURE_BAR1_SIZE_64M 0xb +#define BNX2_PORT_FEATURE_BAR1_SIZE_128M 0xc +#define BNX2_PORT_FEATURE_BAR1_SIZE_256M 0xd +#define BNX2_PORT_FEATURE_BAR1_SIZE_512M 0xe +#define BNX2_PORT_FEATURE_BAR1_SIZE_1G 0xf + +#define BNX2_PORT_FEATURE_WOL 0xdc +#define BNX2_PORT2_FEATURE_WOL 0x150 +#define BNX2_PORT_FEATURE_WOL_DEFAULT_SHIFT_BITS 4 +#define BNX2_PORT_FEATURE_WOL_DEFAULT_MASK 0x30 +#define BNX2_PORT_FEATURE_WOL_DEFAULT_DISABLE 0 +#define BNX2_PORT_FEATURE_WOL_DEFAULT_MAGIC 0x10 +#define BNX2_PORT_FEATURE_WOL_DEFAULT_ACPI 0x20 +#define BNX2_PORT_FEATURE_WOL_DEFAULT_MAGIC_AND_ACPI 0x30 +#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_MASK 0xf +#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_AUTONEG 0 +#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_10HALF 1 +#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_10FULL 2 +#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_100HALF 3 +#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_100FULL 4 +#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_1000HALF 5 +#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_1000FULL 6 +#define BNX2_PORT_FEATURE_WOL_AUTONEG_ADVERTISE_1000 0x40 +#define BNX2_PORT_FEATURE_WOL_RESERVED_PAUSE_CAP 0x400 +#define BNX2_PORT_FEATURE_WOL_RESERVED_ASYM_PAUSE_CAP 0x800 + +#define BNX2_PORT_FEATURE_MBA 0xe0 +#define BNX2_PORT2_FEATURE_MBA 0x154 +#define BNX2_PORT_FEATURE_MBA_BOOT_AGENT_TYPE_SHIFT_BITS 0 +#define BNX2_PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK 0x3 +#define BNX2_PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE 0 +#define BNX2_PORT_FEATURE_MBA_BOOT_AGENT_TYPE_RPL 1 +#define BNX2_PORT_FEATURE_MBA_BOOT_AGENT_TYPE_BOOTP 2 +#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_SHIFT_BITS 2 +#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_MASK 0x3c +#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_AUTONEG 0 +#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_10HALF 0x4 +#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_10FULL 0x8 +#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_100HALF 0xc +#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_100FULL 0x10 +#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_1000HALF 0x14 +#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_1000FULL 0x18 +#define BNX2_PORT_FEATURE_MBA_SETUP_PROMPT_ENABLE 0x40 +#define BNX2_PORT_FEATURE_MBA_HOTKEY_CTRL_S 0 +#define BNX2_PORT_FEATURE_MBA_HOTKEY_CTRL_B 0x80 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_SHIFT_BITS 8 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_MASK 0xff00 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_DISABLED 0 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_1K 0x100 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_2K 0x200 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_4K 0x300 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_8K 0x400 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_16K 0x500 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_32K 0x600 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_64K 0x700 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_128K 0x800 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_256K 0x900 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_512K 0xa00 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_1M 0xb00 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_2M 0xc00 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_4M 0xd00 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_8M 0xe00 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_16M 0xf00 +#define BNX2_PORT_FEATURE_MBA_MSG_TIMEOUT_SHIFT_BITS 16 +#define BNX2_PORT_FEATURE_MBA_MSG_TIMEOUT_MASK 0xf0000 +#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_SHIFT_BITS 20 +#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_MASK 0x300000 +#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_AUTO 0 +#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_BBS 0x100000 +#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT18H 0x200000 +#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT19H 0x300000 + +#define BNX2_PORT_FEATURE_IMD 0xe4 +#define BNX2_PORT2_FEATURE_IMD 0x158 +#define BNX2_PORT_FEATURE_IMD_LINK_OVERRIDE_DEFAULT 0 +#define BNX2_PORT_FEATURE_IMD_LINK_OVERRIDE_ENABLE 1 + +#define BNX2_PORT_FEATURE_VLAN 0xe8 +#define BNX2_PORT2_FEATURE_VLAN 0x15c +#define BNX2_PORT_FEATURE_MBA_VLAN_TAG_MASK 0xffff +#define BNX2_PORT_FEATURE_MBA_VLAN_ENABLE 0x10000 + +#define BNX2_MFW_VER_PTR 0x00000014c + +#define BNX2_BC_STATE_RESET_TYPE 0x000001c0 +#define BNX2_BC_STATE_RESET_TYPE_SIG 0x00005254 +#define BNX2_BC_STATE_RESET_TYPE_SIG_MASK 0x0000ffff +#define BNX2_BC_STATE_RESET_TYPE_NONE (BNX2_BC_STATE_RESET_TYPE_SIG | \ + 0x00010000) +#define BNX2_BC_STATE_RESET_TYPE_PCI (BNX2_BC_STATE_RESET_TYPE_SIG | \ + 0x00020000) +#define BNX2_BC_STATE_RESET_TYPE_VAUX (BNX2_BC_STATE_RESET_TYPE_SIG | \ + 0x00030000) +#define BNX2_BC_STATE_RESET_TYPE_DRV_MASK DRV_MSG_CODE +#define BNX2_BC_STATE_RESET_TYPE_DRV_RESET (BNX2_BC_STATE_RESET_TYPE_SIG | \ + DRV_MSG_CODE_RESET) +#define BNX2_BC_STATE_RESET_TYPE_DRV_UNLOAD (BNX2_BC_STATE_RESET_TYPE_SIG | \ + DRV_MSG_CODE_UNLOAD) +#define BNX2_BC_STATE_RESET_TYPE_DRV_SHUTDOWN (BNX2_BC_STATE_RESET_TYPE_SIG | \ + DRV_MSG_CODE_SHUTDOWN) +#define BNX2_BC_STATE_RESET_TYPE_DRV_WOL (BNX2_BC_STATE_RESET_TYPE_SIG | \ + DRV_MSG_CODE_WOL) +#define BNX2_BC_STATE_RESET_TYPE_DRV_DIAG (BNX2_BC_STATE_RESET_TYPE_SIG | \ + DRV_MSG_CODE_DIAG) +#define BNX2_BC_STATE_RESET_TYPE_VALUE(msg) (BNX2_BC_STATE_RESET_TYPE_SIG | \ + (msg)) + +#define BNX2_BC_STATE 0x000001c4 +#define BNX2_BC_STATE_ERR_MASK 0x0000ff00 +#define BNX2_BC_STATE_SIGN 0x42530000 +#define BNX2_BC_STATE_SIGN_MASK 0xffff0000 +#define BNX2_BC_STATE_BC1_START (BNX2_BC_STATE_SIGN | 0x1) +#define BNX2_BC_STATE_GET_NVM_CFG1 (BNX2_BC_STATE_SIGN | 0x2) +#define BNX2_BC_STATE_PROG_BAR (BNX2_BC_STATE_SIGN | 0x3) +#define BNX2_BC_STATE_INIT_VID (BNX2_BC_STATE_SIGN | 0x4) +#define BNX2_BC_STATE_GET_NVM_CFG2 (BNX2_BC_STATE_SIGN | 0x5) +#define BNX2_BC_STATE_APPLY_WKARND (BNX2_BC_STATE_SIGN | 0x6) +#define BNX2_BC_STATE_LOAD_BC2 (BNX2_BC_STATE_SIGN | 0x7) +#define BNX2_BC_STATE_GOING_BC2 (BNX2_BC_STATE_SIGN | 0x8) +#define BNX2_BC_STATE_GOING_DIAG (BNX2_BC_STATE_SIGN | 0x9) +#define BNX2_BC_STATE_RT_FINAL_INIT (BNX2_BC_STATE_SIGN | 0x81) +#define BNX2_BC_STATE_RT_WKARND (BNX2_BC_STATE_SIGN | 0x82) +#define BNX2_BC_STATE_RT_DRV_PULSE (BNX2_BC_STATE_SIGN | 0x83) +#define BNX2_BC_STATE_RT_FIOEVTS (BNX2_BC_STATE_SIGN | 0x84) +#define BNX2_BC_STATE_RT_DRV_CMD (BNX2_BC_STATE_SIGN | 0x85) +#define BNX2_BC_STATE_RT_LOW_POWER (BNX2_BC_STATE_SIGN | 0x86) +#define BNX2_BC_STATE_RT_SET_WOL (BNX2_BC_STATE_SIGN | 0x87) +#define BNX2_BC_STATE_RT_OTHER_FW (BNX2_BC_STATE_SIGN | 0x88) +#define BNX2_BC_STATE_RT_GOING_D3 (BNX2_BC_STATE_SIGN | 0x89) +#define BNX2_BC_STATE_ERR_BAD_VERSION (BNX2_BC_STATE_SIGN | 0x0100) +#define BNX2_BC_STATE_ERR_BAD_BC2_CRC (BNX2_BC_STATE_SIGN | 0x0200) +#define BNX2_BC_STATE_ERR_BC1_LOOP (BNX2_BC_STATE_SIGN | 0x0300) +#define BNX2_BC_STATE_ERR_UNKNOWN_CMD (BNX2_BC_STATE_SIGN | 0x0400) +#define BNX2_BC_STATE_ERR_DRV_DEAD (BNX2_BC_STATE_SIGN | 0x0500) +#define BNX2_BC_STATE_ERR_NO_RXP (BNX2_BC_STATE_SIGN | 0x0600) +#define BNX2_BC_STATE_ERR_TOO_MANY_RBUF (BNX2_BC_STATE_SIGN | 0x0700) + +#define BNX2_BC_STATE_CONDITION 0x000001c8 +#define BNX2_CONDITION_MFW_RUN_UNKNOWN 0x00000000 +#define BNX2_CONDITION_MFW_RUN_IPMI 0x00002000 +#define BNX2_CONDITION_MFW_RUN_UMP 0x00004000 +#define BNX2_CONDITION_MFW_RUN_NCSI 0x00006000 +#define BNX2_CONDITION_MFW_RUN_NONE 0x0000e000 +#define BNX2_CONDITION_MFW_RUN_MASK 0x0000e000 + +#define BNX2_BC_STATE_DEBUG_CMD 0x1dc +#define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE 0x42440000 +#define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE_MASK 0xffff0000 +#define BNX2_BC_STATE_BC_DBG_CMD_LOOP_CNT_MASK 0xffff +#define BNX2_BC_STATE_BC_DBG_CMD_LOOP_INFINITE 0xffff + +#define BNX2_FW_EVT_CODE_MB 0x354 +#define BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT 0x00000000 +#define BNX2_FW_EVT_CODE_LINK_EVENT 0x00000001 + +#define BNX2_DRV_ACK_CAP_MB 0x364 +#define BNX2_DRV_ACK_CAP_SIGNATURE 0x35450000 +#define BNX2_CAPABILITY_SIGNATURE_MASK 0xFFFF0000 + +#define BNX2_FW_CAP_MB 0x368 +#define BNX2_FW_CAP_SIGNATURE 0xaa550000 +#define BNX2_FW_ACK_DRV_SIGNATURE 0x52500000 +#define BNX2_FW_CAP_SIGNATURE_MASK 0xffff0000 +#define BNX2_FW_CAP_REMOTE_PHY_CAPABLE 0x00000001 +#define BNX2_FW_CAP_REMOTE_PHY_PRESENT 0x00000002 +#define BNX2_FW_CAP_MFW_CAN_KEEP_VLAN 0x00000008 +#define BNX2_FW_CAP_BC_CAN_KEEP_VLAN 0x00000010 +#define BNX2_FW_CAP_CAN_KEEP_VLAN (BNX2_FW_CAP_BC_CAN_KEEP_VLAN | \ + BNX2_FW_CAP_MFW_CAN_KEEP_VLAN) + +#define BNX2_RPHY_SIGNATURE 0x36c +#define BNX2_RPHY_LOAD_SIGNATURE 0x5a5a5a5a + +#define BNX2_RPHY_FLAGS 0x370 +#define BNX2_RPHY_SERDES_LINK 0x374 +#define BNX2_RPHY_COPPER_LINK 0x378 + +#define BNX2_ISCSI_INITIATOR 0x3dc +#define BNX2_ISCSI_INITIATOR_EN 0x00080000 + +#define BNX2_ISCSI_MAX_CONN 0x3e4 +#define BNX2_ISCSI_MAX_CONN_MASK 0xffff0000 +#define BNX2_ISCSI_MAX_CONN_SHIFT 16 + +#define HOST_VIEW_SHMEM_BASE 0x167c00 + +#define DP_SHMEM_LINE(bp, offset) \ + netdev_err(bp->dev, "DEBUG: %08x: %08x %08x %08x %08x\n", \ + offset, \ + bnx2_shmem_rd(bp, offset), \ + bnx2_shmem_rd(bp, offset + 4), \ + bnx2_shmem_rd(bp, offset + 8), \ + bnx2_shmem_rd(bp, offset + 12)) + +#endif diff --git a/drivers/net/ethernet/broadcom/bnx2_fw.h b/drivers/net/ethernet/broadcom/bnx2_fw.h new file mode 100644 index 0000000..940eb91 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2_fw.h @@ -0,0 +1,88 @@ +/* bnx2_fw.h: Broadcom NX2 network driver. + * + * Copyright (c) 2004, 2005, 2006, 2007 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +/* Initialized Values for the Completion Processor. */ +static const struct cpu_reg cpu_reg_com = { + .mode = BNX2_COM_CPU_MODE, + .mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT, + .mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA, + .state = BNX2_COM_CPU_STATE, + .state_value_clear = 0xffffff, + .gpr0 = BNX2_COM_CPU_REG_FILE, + .evmask = BNX2_COM_CPU_EVENT_MASK, + .pc = BNX2_COM_CPU_PROGRAM_COUNTER, + .inst = BNX2_COM_CPU_INSTRUCTION, + .bp = BNX2_COM_CPU_HW_BREAKPOINT, + .spad_base = BNX2_COM_SCRATCH, + .mips_view_base = 0x8000000, +}; + +/* Initialized Values the Command Processor. */ +static const struct cpu_reg cpu_reg_cp = { + .mode = BNX2_CP_CPU_MODE, + .mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT, + .mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA, + .state = BNX2_CP_CPU_STATE, + .state_value_clear = 0xffffff, + .gpr0 = BNX2_CP_CPU_REG_FILE, + .evmask = BNX2_CP_CPU_EVENT_MASK, + .pc = BNX2_CP_CPU_PROGRAM_COUNTER, + .inst = BNX2_CP_CPU_INSTRUCTION, + .bp = BNX2_CP_CPU_HW_BREAKPOINT, + .spad_base = BNX2_CP_SCRATCH, + .mips_view_base = 0x8000000, +}; + +/* Initialized Values for the RX Processor. */ +static const struct cpu_reg cpu_reg_rxp = { + .mode = BNX2_RXP_CPU_MODE, + .mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT, + .mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA, + .state = BNX2_RXP_CPU_STATE, + .state_value_clear = 0xffffff, + .gpr0 = BNX2_RXP_CPU_REG_FILE, + .evmask = BNX2_RXP_CPU_EVENT_MASK, + .pc = BNX2_RXP_CPU_PROGRAM_COUNTER, + .inst = BNX2_RXP_CPU_INSTRUCTION, + .bp = BNX2_RXP_CPU_HW_BREAKPOINT, + .spad_base = BNX2_RXP_SCRATCH, + .mips_view_base = 0x8000000, +}; + +/* Initialized Values for the TX Patch-up Processor. */ +static const struct cpu_reg cpu_reg_tpat = { + .mode = BNX2_TPAT_CPU_MODE, + .mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT, + .mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA, + .state = BNX2_TPAT_CPU_STATE, + .state_value_clear = 0xffffff, + .gpr0 = BNX2_TPAT_CPU_REG_FILE, + .evmask = BNX2_TPAT_CPU_EVENT_MASK, + .pc = BNX2_TPAT_CPU_PROGRAM_COUNTER, + .inst = BNX2_TPAT_CPU_INSTRUCTION, + .bp = BNX2_TPAT_CPU_HW_BREAKPOINT, + .spad_base = BNX2_TPAT_SCRATCH, + .mips_view_base = 0x8000000, +}; + +/* Initialized Values for the TX Processor. */ +static const struct cpu_reg cpu_reg_txp = { + .mode = BNX2_TXP_CPU_MODE, + .mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT, + .mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA, + .state = BNX2_TXP_CPU_STATE, + .state_value_clear = 0xffffff, + .gpr0 = BNX2_TXP_CPU_REG_FILE, + .evmask = BNX2_TXP_CPU_EVENT_MASK, + .pc = BNX2_TXP_CPU_PROGRAM_COUNTER, + .inst = BNX2_TXP_CPU_INSTRUCTION, + .bp = BNX2_TXP_CPU_HW_BREAKPOINT, + .spad_base = BNX2_TXP_SCRATCH, + .mips_view_base = 0x8000000, +}; diff --git a/drivers/net/ethernet/broadcom/bnx2x/Makefile b/drivers/net/ethernet/broadcom/bnx2x/Makefile new file mode 100644 index 0000000..48fbdd4 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for Broadcom 10-Gigabit ethernet driver +# + +obj-$(CONFIG_BNX2X) += bnx2x.o + +bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h new file mode 100644 index 0000000..c423504 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -0,0 +1,2014 @@ +/* bnx2x.h: Broadcom Everest network driver. + * + * Copyright (c) 2007-2011 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Eilon Greenstein + * Written by: Eliezer Tamir + * Based on code from Michael Chan's bnx2 driver + */ + +#ifndef BNX2X_H +#define BNX2X_H +#include +#include +#include + +/* compilation time flags */ + +/* define this to make the driver freeze on error to allow getting debug info + * (you will need to reboot afterwards) */ +/* #define BNX2X_STOP_ON_ERROR */ + +#define DRV_MODULE_VERSION "1.70.00-0" +#define DRV_MODULE_RELDATE "2011/06/13" +#define BNX2X_BC_VER 0x040200 + +#if defined(CONFIG_DCB) +#define BCM_DCBNL +#endif +#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) +#define BCM_CNIC 1 +#include "../cnic_if.h" +#endif + +#ifdef BCM_CNIC +#define BNX2X_MIN_MSIX_VEC_CNT 3 +#define BNX2X_MSIX_VEC_FP_START 2 +#else +#define BNX2X_MIN_MSIX_VEC_CNT 2 +#define BNX2X_MSIX_VEC_FP_START 1 +#endif + +#include + +#include "bnx2x_reg.h" +#include "bnx2x_fw_defs.h" +#include "bnx2x_hsi.h" +#include "bnx2x_link.h" +#include "bnx2x_sp.h" +#include "bnx2x_dcb.h" +#include "bnx2x_stats.h" + +/* error/debug prints */ + +#define DRV_MODULE_NAME "bnx2x" + +/* for messages that are currently off */ +#define BNX2X_MSG_OFF 0 +#define BNX2X_MSG_MCP 0x010000 /* was: NETIF_MSG_HW */ +#define BNX2X_MSG_STATS 0x020000 /* was: NETIF_MSG_TIMER */ +#define BNX2X_MSG_NVM 0x040000 /* was: NETIF_MSG_HW */ +#define BNX2X_MSG_DMAE 0x080000 /* was: NETIF_MSG_HW */ +#define BNX2X_MSG_SP 0x100000 /* was: NETIF_MSG_INTR */ +#define BNX2X_MSG_FP 0x200000 /* was: NETIF_MSG_INTR */ + +#define DP_LEVEL KERN_NOTICE /* was: KERN_DEBUG */ + +/* regular debug print */ +#define DP(__mask, __fmt, __args...) \ +do { \ + if (bp->msg_enable & (__mask)) \ + printk(DP_LEVEL "[%s:%d(%s)]" __fmt, \ + __func__, __LINE__, \ + bp->dev ? (bp->dev->name) : "?", \ + ##__args); \ +} while (0) + +#define DP_CONT(__mask, __fmt, __args...) \ +do { \ + if (bp->msg_enable & (__mask)) \ + pr_cont(__fmt, ##__args); \ +} while (0) + +/* errors debug print */ +#define BNX2X_DBG_ERR(__fmt, __args...) \ +do { \ + if (netif_msg_probe(bp)) \ + pr_err("[%s:%d(%s)]" __fmt, \ + __func__, __LINE__, \ + bp->dev ? (bp->dev->name) : "?", \ + ##__args); \ +} while (0) + +/* for errors (never masked) */ +#define BNX2X_ERR(__fmt, __args...) \ +do { \ + pr_err("[%s:%d(%s)]" __fmt, \ + __func__, __LINE__, \ + bp->dev ? (bp->dev->name) : "?", \ + ##__args); \ + } while (0) + +#define BNX2X_ERROR(__fmt, __args...) do { \ + pr_err("[%s:%d]" __fmt, __func__, __LINE__, ##__args); \ + } while (0) + + +/* before we have a dev->name use dev_info() */ +#define BNX2X_DEV_INFO(__fmt, __args...) \ +do { \ + if (netif_msg_probe(bp)) \ + dev_info(&bp->pdev->dev, __fmt, ##__args); \ +} while (0) + +#define BNX2X_MAC_FMT "%pM" +#define BNX2X_MAC_PRN_LIST(mac) (mac) + + +#ifdef BNX2X_STOP_ON_ERROR +void bnx2x_int_disable(struct bnx2x *bp); +#define bnx2x_panic() do { \ + bp->panic = 1; \ + BNX2X_ERR("driver assert\n"); \ + bnx2x_int_disable(bp); \ + bnx2x_panic_dump(bp); \ + } while (0) +#else +#define bnx2x_panic() do { \ + bp->panic = 1; \ + BNX2X_ERR("driver assert\n"); \ + bnx2x_panic_dump(bp); \ + } while (0) +#endif + +#define bnx2x_mc_addr(ha) ((ha)->addr) +#define bnx2x_uc_addr(ha) ((ha)->addr) + +#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff) +#define U64_HI(x) (u32)(((u64)(x)) >> 32) +#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) + + +#define REG_ADDR(bp, offset) ((bp->regview) + (offset)) + +#define REG_RD(bp, offset) readl(REG_ADDR(bp, offset)) +#define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset)) +#define REG_RD16(bp, offset) readw(REG_ADDR(bp, offset)) + +#define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset)) +#define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset)) +#define REG_WR16(bp, offset, val) writew((u16)val, REG_ADDR(bp, offset)) + +#define REG_RD_IND(bp, offset) bnx2x_reg_rd_ind(bp, offset) +#define REG_WR_IND(bp, offset, val) bnx2x_reg_wr_ind(bp, offset, val) + +#define REG_RD_DMAE(bp, offset, valp, len32) \ + do { \ + bnx2x_read_dmae(bp, offset, len32);\ + memcpy(valp, bnx2x_sp(bp, wb_data[0]), (len32) * 4); \ + } while (0) + +#define REG_WR_DMAE(bp, offset, valp, len32) \ + do { \ + memcpy(bnx2x_sp(bp, wb_data[0]), valp, (len32) * 4); \ + bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), \ + offset, len32); \ + } while (0) + +#define REG_WR_DMAE_LEN(bp, offset, valp, len32) \ + REG_WR_DMAE(bp, offset, valp, len32) + +#define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \ + do { \ + memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \ + bnx2x_write_big_buf_wb(bp, addr, len32); \ + } while (0) + +#define SHMEM_ADDR(bp, field) (bp->common.shmem_base + \ + offsetof(struct shmem_region, field)) +#define SHMEM_RD(bp, field) REG_RD(bp, SHMEM_ADDR(bp, field)) +#define SHMEM_WR(bp, field, val) REG_WR(bp, SHMEM_ADDR(bp, field), val) + +#define SHMEM2_ADDR(bp, field) (bp->common.shmem2_base + \ + offsetof(struct shmem2_region, field)) +#define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field)) +#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val) +#define MF_CFG_ADDR(bp, field) (bp->common.mf_cfg_base + \ + offsetof(struct mf_cfg, field)) +#define MF2_CFG_ADDR(bp, field) (bp->common.mf2_cfg_base + \ + offsetof(struct mf2_cfg, field)) + +#define MF_CFG_RD(bp, field) REG_RD(bp, MF_CFG_ADDR(bp, field)) +#define MF_CFG_WR(bp, field, val) REG_WR(bp,\ + MF_CFG_ADDR(bp, field), (val)) +#define MF2_CFG_RD(bp, field) REG_RD(bp, MF2_CFG_ADDR(bp, field)) + +#define SHMEM2_HAS(bp, field) ((bp)->common.shmem2_base && \ + (SHMEM2_RD((bp), size) > \ + offsetof(struct shmem2_region, field))) + +#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg) +#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val) + +/* SP SB indices */ + +/* General SP events - stats query, cfc delete, etc */ +#define HC_SP_INDEX_ETH_DEF_CONS 3 + +/* EQ completions */ +#define HC_SP_INDEX_EQ_CONS 7 + +/* FCoE L2 connection completions */ +#define HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS 6 +#define HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS 4 +/* iSCSI L2 */ +#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5 +#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1 + +/* Special clients parameters */ + +/* SB indices */ +/* FCoE L2 */ +#define BNX2X_FCOE_L2_RX_INDEX \ + (&bp->def_status_blk->sp_sb.\ + index_values[HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS]) + +#define BNX2X_FCOE_L2_TX_INDEX \ + (&bp->def_status_blk->sp_sb.\ + index_values[HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS]) + +/** + * CIDs and CLIDs: + * CLIDs below is a CLID for func 0, then the CLID for other + * functions will be calculated by the formula: + * + * FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X + * + */ +/* iSCSI L2 */ +#define BNX2X_ISCSI_ETH_CL_ID_IDX 1 +#define BNX2X_ISCSI_ETH_CID 49 + +/* FCoE L2 */ +#define BNX2X_FCOE_ETH_CL_ID_IDX 2 +#define BNX2X_FCOE_ETH_CID 50 + +/** Additional rings budgeting */ +#ifdef BCM_CNIC +#define CNIC_PRESENT 1 +#define FCOE_PRESENT 1 +#else +#define CNIC_PRESENT 0 +#define FCOE_PRESENT 0 +#endif /* BCM_CNIC */ +#define NON_ETH_CONTEXT_USE (FCOE_PRESENT) + +#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \ + AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR + +#define SM_RX_ID 0 +#define SM_TX_ID 1 + +/* defines for multiple tx priority indices */ +#define FIRST_TX_ONLY_COS_INDEX 1 +#define FIRST_TX_COS_INDEX 0 + +/* defines for decodeing the fastpath index and the cos index out of the + * transmission queue index + */ +#define MAX_TXQS_PER_COS FP_SB_MAX_E1x + +#define TXQ_TO_FP(txq_index) ((txq_index) % MAX_TXQS_PER_COS) +#define TXQ_TO_COS(txq_index) ((txq_index) / MAX_TXQS_PER_COS) + +/* rules for calculating the cids of tx-only connections */ +#define CID_TO_FP(cid) ((cid) % MAX_TXQS_PER_COS) +#define CID_COS_TO_TX_ONLY_CID(cid, cos) (cid + cos * MAX_TXQS_PER_COS) + +/* fp index inside class of service range */ +#define FP_COS_TO_TXQ(fp, cos) ((fp)->index + cos * MAX_TXQS_PER_COS) + +/* + * 0..15 eth cos0 + * 16..31 eth cos1 if applicable + * 32..47 eth cos2 If applicable + * fcoe queue follows eth queues (16, 32, 48 depending on cos) + */ +#define MAX_ETH_TXQ_IDX(bp) (MAX_TXQS_PER_COS * (bp)->max_cos) +#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp)) + +/* fast path */ +struct sw_rx_bd { + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(mapping); +}; + +struct sw_tx_bd { + struct sk_buff *skb; + u16 first_bd; + u8 flags; +/* Set on the first BD descriptor when there is a split BD */ +#define BNX2X_TSO_SPLIT_BD (1<<0) +}; + +struct sw_rx_page { + struct page *page; + DEFINE_DMA_UNMAP_ADDR(mapping); +}; + +union db_prod { + struct doorbell_set_prod data; + u32 raw; +}; + + +/* MC hsi */ +#define BCM_PAGE_SHIFT 12 +#define BCM_PAGE_SIZE (1 << BCM_PAGE_SHIFT) +#define BCM_PAGE_MASK (~(BCM_PAGE_SIZE - 1)) +#define BCM_PAGE_ALIGN(addr) (((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK) + +#define PAGES_PER_SGE_SHIFT 0 +#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT) +#define SGE_PAGE_SIZE PAGE_SIZE +#define SGE_PAGE_SHIFT PAGE_SHIFT +#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr)) + +/* SGE ring related macros */ +#define NUM_RX_SGE_PAGES 2 +#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) +#define MAX_RX_SGE_CNT (RX_SGE_CNT - 2) +/* RX_SGE_CNT is promised to be a power of 2 */ +#define RX_SGE_MASK (RX_SGE_CNT - 1) +#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) +#define MAX_RX_SGE (NUM_RX_SGE - 1) +#define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \ + (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1) +#define RX_SGE(x) ((x) & MAX_RX_SGE) + +/* Manipulate a bit vector defined as an array of u64 */ + +/* Number of bits in one sge_mask array element */ +#define BIT_VEC64_ELEM_SZ 64 +#define BIT_VEC64_ELEM_SHIFT 6 +#define BIT_VEC64_ELEM_MASK ((u64)BIT_VEC64_ELEM_SZ - 1) + + +#define __BIT_VEC64_SET_BIT(el, bit) \ + do { \ + el = ((el) | ((u64)0x1 << (bit))); \ + } while (0) + +#define __BIT_VEC64_CLEAR_BIT(el, bit) \ + do { \ + el = ((el) & (~((u64)0x1 << (bit)))); \ + } while (0) + + +#define BIT_VEC64_SET_BIT(vec64, idx) \ + __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ + (idx) & BIT_VEC64_ELEM_MASK) + +#define BIT_VEC64_CLEAR_BIT(vec64, idx) \ + __BIT_VEC64_CLEAR_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ + (idx) & BIT_VEC64_ELEM_MASK) + +#define BIT_VEC64_TEST_BIT(vec64, idx) \ + (((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT] >> \ + ((idx) & BIT_VEC64_ELEM_MASK)) & 0x1) + +/* Creates a bitmask of all ones in less significant bits. + idx - index of the most significant bit in the created mask */ +#define BIT_VEC64_ONES_MASK(idx) \ + (((u64)0x1 << (((idx) & BIT_VEC64_ELEM_MASK) + 1)) - 1) +#define BIT_VEC64_ELEM_ONE_MASK ((u64)(~0)) + +/*******************************************************/ + + + +/* Number of u64 elements in SGE mask array */ +#define RX_SGE_MASK_LEN ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \ + BIT_VEC64_ELEM_SZ) +#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1) +#define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK) + +union host_hc_status_block { + /* pointer to fp status block e1x */ + struct host_hc_status_block_e1x *e1x_sb; + /* pointer to fp status block e2 */ + struct host_hc_status_block_e2 *e2_sb; +}; + +struct bnx2x_agg_info { + /* + * First aggregation buffer is an skb, the following - are pages. + * We will preallocate the skbs for each aggregation when + * we open the interface and will replace the BD at the consumer + * with this one when we receive the TPA_START CQE in order to + * keep the Rx BD ring consistent. + */ + struct sw_rx_bd first_buf; + u8 tpa_state; +#define BNX2X_TPA_START 1 +#define BNX2X_TPA_STOP 2 +#define BNX2X_TPA_ERROR 3 + u8 placement_offset; + u16 parsing_flags; + u16 vlan_tag; + u16 len_on_bd; +}; + +#define Q_STATS_OFFSET32(stat_name) \ + (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4) + +struct bnx2x_fp_txdata { + + struct sw_tx_bd *tx_buf_ring; + + union eth_tx_bd_types *tx_desc_ring; + dma_addr_t tx_desc_mapping; + + u32 cid; + + union db_prod tx_db; + + u16 tx_pkt_prod; + u16 tx_pkt_cons; + u16 tx_bd_prod; + u16 tx_bd_cons; + + unsigned long tx_pkt; + + __le16 *tx_cons_sb; + + int txq_index; +}; + +struct bnx2x_fastpath { + struct bnx2x *bp; /* parent */ + +#define BNX2X_NAPI_WEIGHT 128 + struct napi_struct napi; + union host_hc_status_block status_blk; + /* chip independed shortcuts into sb structure */ + __le16 *sb_index_values; + __le16 *sb_running_index; + /* chip independed shortcut into rx_prods_offset memory */ + u32 ustorm_rx_prods_offset; + + u32 rx_buf_size; + + dma_addr_t status_blk_mapping; + + u8 max_cos; /* actual number of active tx coses */ + struct bnx2x_fp_txdata txdata[BNX2X_MULTI_TX_COS]; + + struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */ + struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */ + + struct eth_rx_bd *rx_desc_ring; + dma_addr_t rx_desc_mapping; + + union eth_rx_cqe *rx_comp_ring; + dma_addr_t rx_comp_mapping; + + /* SGE ring */ + struct eth_rx_sge *rx_sge_ring; + dma_addr_t rx_sge_mapping; + + u64 sge_mask[RX_SGE_MASK_LEN]; + + u32 cid; + + __le16 fp_hc_idx; + + u8 index; /* number in fp array */ + u8 cl_id; /* eth client id */ + u8 cl_qzone_id; + u8 fw_sb_id; /* status block number in FW */ + u8 igu_sb_id; /* status block number in HW */ + + u16 rx_bd_prod; + u16 rx_bd_cons; + u16 rx_comp_prod; + u16 rx_comp_cons; + u16 rx_sge_prod; + /* The last maximal completed SGE */ + u16 last_max_sge; + __le16 *rx_cons_sb; + unsigned long rx_pkt, + rx_calls; + + /* TPA related */ + struct bnx2x_agg_info tpa_info[ETH_MAX_AGGREGATION_QUEUES_E1H_E2]; + u8 disable_tpa; +#ifdef BNX2X_STOP_ON_ERROR + u64 tpa_queue_used; +#endif + + struct tstorm_per_queue_stats old_tclient; + struct ustorm_per_queue_stats old_uclient; + struct xstorm_per_queue_stats old_xclient; + struct bnx2x_eth_q_stats eth_q_stats; + + /* The size is calculated using the following: + sizeof name field from netdev structure + + 4 ('-Xx-' string) + + 4 (for the digits and to make it DWORD aligned) */ +#define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8) + char name[FP_NAME_SIZE]; + + /* MACs object */ + struct bnx2x_vlan_mac_obj mac_obj; + + /* Queue State object */ + struct bnx2x_queue_sp_obj q_obj; + +}; + +#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) + +/* Use 2500 as a mini-jumbo MTU for FCoE */ +#define BNX2X_FCOE_MINI_JUMBO_MTU 2500 + +/* FCoE L2 `fastpath' entry is right after the eth entries */ +#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp) +#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX]) +#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var) +#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \ + txdata[FIRST_TX_COS_INDEX].var) + + +#define IS_ETH_FP(fp) (fp->index < \ + BNX2X_NUM_ETH_QUEUES(fp->bp)) +#ifdef BCM_CNIC +#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX) +#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX) +#else +#define IS_FCOE_FP(fp) false +#define IS_FCOE_IDX(idx) false +#endif + + +/* MC hsi */ +#define MAX_FETCH_BD 13 /* HW max BDs per packet */ +#define RX_COPY_THRESH 92 + +#define NUM_TX_RINGS 16 +#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) +#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) +#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) +#define MAX_TX_BD (NUM_TX_BD - 1) +#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) +#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ + (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) +#define TX_BD(x) ((x) & MAX_TX_BD) +#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT) + +/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */ +#define NUM_RX_RINGS 8 +#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) +#define MAX_RX_DESC_CNT (RX_DESC_CNT - 2) +#define RX_DESC_MASK (RX_DESC_CNT - 1) +#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) +#define MAX_RX_BD (NUM_RX_BD - 1) +#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) +#define MIN_RX_AVAIL 128 + +#define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \ + ETH_MIN_RX_CQES_WITH_TPA_E1 : \ + ETH_MIN_RX_CQES_WITH_TPA_E1H_E2) +#define MIN_RX_SIZE_NONTPA_HW ETH_MIN_RX_CQES_WITHOUT_TPA +#define MIN_RX_SIZE_TPA (max_t(u32, MIN_RX_SIZE_TPA_HW, MIN_RX_AVAIL)) +#define MIN_RX_SIZE_NONTPA (max_t(u32, MIN_RX_SIZE_NONTPA_HW,\ + MIN_RX_AVAIL)) + +#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ + (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1) +#define RX_BD(x) ((x) & MAX_RX_BD) + +/* + * As long as CQE is X times bigger than BD entry we have to allocate X times + * more pages for CQ ring in order to keep it balanced with BD ring + */ +#define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd)) +#define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL) +#define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) +#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1) +#define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS) +#define MAX_RCQ_BD (NUM_RCQ_BD - 1) +#define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2) +#define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \ + (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) +#define RCQ_BD(x) ((x) & MAX_RCQ_BD) + + +/* This is needed for determining of last_max */ +#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) +#define SUB_S32(a, b) (s32)((s32)(a) - (s32)(b)) + + +#define BNX2X_SWCID_SHIFT 17 +#define BNX2X_SWCID_MASK ((0x1 << BNX2X_SWCID_SHIFT) - 1) + +/* used on a CID received from the HW */ +#define SW_CID(x) (le32_to_cpu(x) & BNX2X_SWCID_MASK) +#define CQE_CMD(x) (le32_to_cpu(x) >> \ + COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT) + +#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr_hi), \ + le32_to_cpu((bd)->addr_lo)) +#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes)) + +#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */ +#define BNX2X_DB_SHIFT 7 /* 128 bytes*/ +#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT) +#error "Min DB doorbell stride is 8" +#endif +#define DPM_TRIGER_TYPE 0x40 +#define DOORBELL(bp, cid, val) \ + do { \ + writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \ + DPM_TRIGER_TYPE); \ + } while (0) + + +/* TX CSUM helpers */ +#define SKB_CS_OFF(skb) (offsetof(struct tcphdr, check) - \ + skb->csum_offset) +#define SKB_CS(skb) (*(u16 *)(skb_transport_header(skb) + \ + skb->csum_offset)) + +#define pbd_tcp_flags(skb) (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff) + +#define XMIT_PLAIN 0 +#define XMIT_CSUM_V4 0x1 +#define XMIT_CSUM_V6 0x2 +#define XMIT_CSUM_TCP 0x4 +#define XMIT_GSO_V4 0x8 +#define XMIT_GSO_V6 0x10 + +#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6) +#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6) + + +/* stuff added to make the code fit 80Col */ +#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE) +#define CQE_TYPE_START(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_START_AGG) +#define CQE_TYPE_STOP(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_STOP_AGG) +#define CQE_TYPE_SLOW(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_RAMROD) +#define CQE_TYPE_FAST(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_FASTPATH) + +#define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG + +#define BNX2X_IP_CSUM_ERR(cqe) \ + (!((cqe)->fast_path_cqe.status_flags & \ + ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \ + ((cqe)->fast_path_cqe.type_error_flags & \ + ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) + +#define BNX2X_L4_CSUM_ERR(cqe) \ + (!((cqe)->fast_path_cqe.status_flags & \ + ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \ + ((cqe)->fast_path_cqe.type_error_flags & \ + ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) + +#define BNX2X_RX_CSUM_OK(cqe) \ + (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe))) + +#define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \ + (((le16_to_cpu(flags) & \ + PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \ + PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) \ + == PRS_FLAG_OVERETH_IPV4) +#define BNX2X_RX_SUM_FIX(cqe) \ + BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags) + + +#define FP_USB_FUNC_OFF \ + offsetof(struct cstorm_status_block_u, func) +#define FP_CSB_FUNC_OFF \ + offsetof(struct cstorm_status_block_c, func) + +#define HC_INDEX_TOE_RX_CQ_CONS 0 /* Formerly Ustorm TOE CQ index */ + /* (HC_INDEX_U_TOE_RX_CQ_CONS) */ +#define HC_INDEX_ETH_RX_CQ_CONS 1 /* Formerly Ustorm ETH CQ index */ + /* (HC_INDEX_U_ETH_RX_CQ_CONS) */ +#define HC_INDEX_ETH_RX_BD_CONS 2 /* Formerly Ustorm ETH BD index */ + /* (HC_INDEX_U_ETH_RX_BD_CONS) */ + +#define HC_INDEX_TOE_TX_CQ_CONS 4 /* Formerly Cstorm TOE CQ index */ + /* (HC_INDEX_C_TOE_TX_CQ_CONS) */ +#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 /* Formerly Cstorm ETH CQ index */ + /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ +#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 /* Formerly Cstorm ETH CQ index */ + /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ +#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 /* Formerly Cstorm ETH CQ index */ + /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ + +#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0 + + +#define BNX2X_RX_SB_INDEX \ + (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]) + +#define BNX2X_TX_SB_INDEX_BASE BNX2X_TX_SB_INDEX_COS0 + +#define BNX2X_TX_SB_INDEX_COS0 \ + (&fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0]) + +/* end of fast path */ + +/* common */ + +struct bnx2x_common { + + u32 chip_id; +/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ +#define CHIP_ID(bp) (bp->common.chip_id & 0xfffffff0) + +#define CHIP_NUM(bp) (bp->common.chip_id >> 16) +#define CHIP_NUM_57710 0x164e +#define CHIP_NUM_57711 0x164f +#define CHIP_NUM_57711E 0x1650 +#define CHIP_NUM_57712 0x1662 +#define CHIP_NUM_57712_MF 0x1663 +#define CHIP_NUM_57713 0x1651 +#define CHIP_NUM_57713E 0x1652 +#define CHIP_NUM_57800 0x168a +#define CHIP_NUM_57800_MF 0x16a5 +#define CHIP_NUM_57810 0x168e +#define CHIP_NUM_57810_MF 0x16ae +#define CHIP_NUM_57840 0x168d +#define CHIP_NUM_57840_MF 0x16ab +#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710) +#define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711) +#define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E) +#define CHIP_IS_57712(bp) (CHIP_NUM(bp) == CHIP_NUM_57712) +#define CHIP_IS_57712_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_MF) +#define CHIP_IS_57800(bp) (CHIP_NUM(bp) == CHIP_NUM_57800) +#define CHIP_IS_57800_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_MF) +#define CHIP_IS_57810(bp) (CHIP_NUM(bp) == CHIP_NUM_57810) +#define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF) +#define CHIP_IS_57840(bp) (CHIP_NUM(bp) == CHIP_NUM_57840) +#define CHIP_IS_57840_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_MF) +#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \ + CHIP_IS_57711E(bp)) +#define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \ + CHIP_IS_57712_MF(bp)) +#define CHIP_IS_E3(bp) (CHIP_IS_57800(bp) || \ + CHIP_IS_57800_MF(bp) || \ + CHIP_IS_57810(bp) || \ + CHIP_IS_57810_MF(bp) || \ + CHIP_IS_57840(bp) || \ + CHIP_IS_57840_MF(bp)) +#define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp))) +#define USES_WARPCORE(bp) (CHIP_IS_E3(bp)) +#define IS_E1H_OFFSET (!CHIP_IS_E1(bp)) + +#define CHIP_REV_SHIFT 12 +#define CHIP_REV_MASK (0xF << CHIP_REV_SHIFT) +#define CHIP_REV_VAL(bp) (bp->common.chip_id & CHIP_REV_MASK) +#define CHIP_REV_Ax (0x0 << CHIP_REV_SHIFT) +#define CHIP_REV_Bx (0x1 << CHIP_REV_SHIFT) +/* assume maximum 5 revisions */ +#define CHIP_REV_IS_SLOW(bp) (CHIP_REV_VAL(bp) > 0x00005000) +/* Emul versions are A=>0xe, B=>0xc, C=>0xa, D=>8, E=>6 */ +#define CHIP_REV_IS_EMUL(bp) ((CHIP_REV_IS_SLOW(bp)) && \ + !(CHIP_REV_VAL(bp) & 0x00001000)) +/* FPGA versions are A=>0xf, B=>0xd, C=>0xb, D=>9, E=>7 */ +#define CHIP_REV_IS_FPGA(bp) ((CHIP_REV_IS_SLOW(bp)) && \ + (CHIP_REV_VAL(bp) & 0x00001000)) + +#define CHIP_TIME(bp) ((CHIP_REV_IS_EMUL(bp)) ? 2000 : \ + ((CHIP_REV_IS_FPGA(bp)) ? 200 : 1)) + +#define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0) +#define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f) +#define CHIP_REV_SIM(bp) (((CHIP_REV_MASK - CHIP_REV_VAL(bp)) >>\ + (CHIP_REV_SHIFT + 1)) \ + << CHIP_REV_SHIFT) +#define CHIP_REV(bp) (CHIP_REV_IS_SLOW(bp) ? \ + CHIP_REV_SIM(bp) :\ + CHIP_REV_VAL(bp)) +#define CHIP_IS_E3B0(bp) (CHIP_IS_E3(bp) && \ + (CHIP_REV(bp) == CHIP_REV_Bx)) +#define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \ + (CHIP_REV(bp) == CHIP_REV_Ax)) + + int flash_size; +#define BNX2X_NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */ +#define BNX2X_NVRAM_TIMEOUT_COUNT 30000 +#define BNX2X_NVRAM_PAGE_SIZE 256 + + u32 shmem_base; + u32 shmem2_base; + u32 mf_cfg_base; + u32 mf2_cfg_base; + + u32 hw_config; + + u32 bc_ver; + + u8 int_block; +#define INT_BLOCK_HC 0 +#define INT_BLOCK_IGU 1 +#define INT_BLOCK_MODE_NORMAL 0 +#define INT_BLOCK_MODE_BW_COMP 2 +#define CHIP_INT_MODE_IS_NBC(bp) \ + (!CHIP_IS_E1x(bp) && \ + !((bp)->common.int_block & INT_BLOCK_MODE_BW_COMP)) +#define CHIP_INT_MODE_IS_BC(bp) (!CHIP_INT_MODE_IS_NBC(bp)) + + u8 chip_port_mode; +#define CHIP_4_PORT_MODE 0x0 +#define CHIP_2_PORT_MODE 0x1 +#define CHIP_PORT_MODE_NONE 0x2 +#define CHIP_MODE(bp) (bp->common.chip_port_mode) +#define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE) +}; + +/* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */ +#define BNX2X_IGU_STAS_MSG_VF_CNT 64 +#define BNX2X_IGU_STAS_MSG_PF_CNT 4 + +/* end of common */ + +/* port */ + +struct bnx2x_port { + u32 pmf; + + u32 link_config[LINK_CONFIG_SIZE]; + + u32 supported[LINK_CONFIG_SIZE]; +/* link settings - missing defines */ +#define SUPPORTED_2500baseX_Full (1 << 15) + + u32 advertising[LINK_CONFIG_SIZE]; +/* link settings - missing defines */ +#define ADVERTISED_2500baseX_Full (1 << 15) + + u32 phy_addr; + + /* used to synchronize phy accesses */ + struct mutex phy_mutex; + int need_hw_lock; + + u32 port_stx; + + struct nig_stats old_nig_stats; +}; + +/* end of port */ + +#define STATS_OFFSET32(stat_name) \ + (offsetof(struct bnx2x_eth_stats, stat_name) / 4) + +/* slow path */ + +/* slow path work-queue */ +extern struct workqueue_struct *bnx2x_wq; + +#define BNX2X_MAX_NUM_OF_VFS 64 +#define BNX2X_VF_ID_INVALID 0xFF + +/* + * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is + * control by the number of fast-path status blocks supported by the + * device (HW/FW). Each fast-path status block (FP-SB) aka non-default + * status block represents an independent interrupts context that can + * serve a regular L2 networking queue. However special L2 queues such + * as the FCoE queue do not require a FP-SB and other components like + * the CNIC may consume FP-SB reducing the number of possible L2 queues + * + * If the maximum number of FP-SB available is X then: + * a. If CNIC is supported it consumes 1 FP-SB thus the max number of + * regular L2 queues is Y=X-1 + * b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor) + * c. If the FCoE L2 queue is supported the actual number of L2 queues + * is Y+1 + * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for + * slow-path interrupts) or Y+2 if CNIC is supported (one additional + * FP interrupt context for the CNIC). + * e. The number of HW context (CID count) is always X or X+1 if FCoE + * L2 queue is supported. the cid for the FCoE L2 queue is always X. + */ + +/* fast-path interrupt contexts E1x */ +#define FP_SB_MAX_E1x 16 +/* fast-path interrupt contexts E2 */ +#define FP_SB_MAX_E2 HC_SB_MAX_SB_E2 + +union cdu_context { + struct eth_context eth; + char pad[1024]; +}; + +/* CDU host DB constants */ +#define CDU_ILT_PAGE_SZ_HW 3 +#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 64K */ +#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context)) + +#ifdef BCM_CNIC +#define CNIC_ISCSI_CID_MAX 256 +#define CNIC_FCOE_CID_MAX 2048 +#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX) +#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS) +#endif + +#define QM_ILT_PAGE_SZ_HW 0 +#define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 4K */ +#define QM_CID_ROUND 1024 + +#ifdef BCM_CNIC +/* TM (timers) host DB constants */ +#define TM_ILT_PAGE_SZ_HW 0 +#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */ +/* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */ +#define TM_CONN_NUM 1024 +#define TM_ILT_SZ (8 * TM_CONN_NUM) +#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ) + +/* SRC (Searcher) host DB constants */ +#define SRC_ILT_PAGE_SZ_HW 0 +#define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 4K */ +#define SRC_HASH_BITS 10 +#define SRC_CONN_NUM (1 << SRC_HASH_BITS) /* 1024 */ +#define SRC_ILT_SZ (sizeof(struct src_ent) * SRC_CONN_NUM) +#define SRC_T2_SZ SRC_ILT_SZ +#define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ) + +#endif + +#define MAX_DMAE_C 8 + +/* DMA memory not used in fastpath */ +struct bnx2x_slowpath { + union { + struct mac_configuration_cmd e1x; + struct eth_classify_rules_ramrod_data e2; + } mac_rdata; + + + union { + struct tstorm_eth_mac_filter_config e1x; + struct eth_filter_rules_ramrod_data e2; + } rx_mode_rdata; + + union { + struct mac_configuration_cmd e1; + struct eth_multicast_rules_ramrod_data e2; + } mcast_rdata; + + struct eth_rss_update_ramrod_data rss_rdata; + + /* Queue State related ramrods are always sent under rtnl_lock */ + union { + struct client_init_ramrod_data init_data; + struct client_update_ramrod_data update_data; + } q_rdata; + + union { + struct function_start_data func_start; + /* pfc configuration for DCBX ramrod */ + struct flow_control_configuration pfc_config; + } func_rdata; + + /* used by dmae command executer */ + struct dmae_command dmae[MAX_DMAE_C]; + + u32 stats_comp; + union mac_stats mac_stats; + struct nig_stats nig_stats; + struct host_port_stats port_stats; + struct host_func_stats func_stats; + struct host_func_stats func_stats_base; + + u32 wb_comp; + u32 wb_data[4]; +}; + +#define bnx2x_sp(bp, var) (&bp->slowpath->var) +#define bnx2x_sp_mapping(bp, var) \ + (bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var)) + + +/* attn group wiring */ +#define MAX_DYNAMIC_ATTN_GRPS 8 + +struct attn_route { + u32 sig[5]; +}; + +struct iro { + u32 base; + u16 m1; + u16 m2; + u16 m3; + u16 size; +}; + +struct hw_context { + union cdu_context *vcxt; + dma_addr_t cxt_mapping; + size_t size; +}; + +/* forward */ +struct bnx2x_ilt; + + +enum bnx2x_recovery_state { + BNX2X_RECOVERY_DONE, + BNX2X_RECOVERY_INIT, + BNX2X_RECOVERY_WAIT, + BNX2X_RECOVERY_FAILED +}; + +/* + * Event queue (EQ or event ring) MC hsi + * NUM_EQ_PAGES and EQ_DESC_CNT_PAGE must be power of 2 + */ +#define NUM_EQ_PAGES 1 +#define EQ_DESC_CNT_PAGE (BCM_PAGE_SIZE / sizeof(union event_ring_elem)) +#define EQ_DESC_MAX_PAGE (EQ_DESC_CNT_PAGE - 1) +#define NUM_EQ_DESC (EQ_DESC_CNT_PAGE * NUM_EQ_PAGES) +#define EQ_DESC_MASK (NUM_EQ_DESC - 1) +#define MAX_EQ_AVAIL (EQ_DESC_MAX_PAGE * NUM_EQ_PAGES - 2) + +/* depends on EQ_DESC_CNT_PAGE being a power of 2 */ +#define NEXT_EQ_IDX(x) ((((x) & EQ_DESC_MAX_PAGE) == \ + (EQ_DESC_MAX_PAGE - 1)) ? (x) + 2 : (x) + 1) + +/* depends on the above and on NUM_EQ_PAGES being a power of 2 */ +#define EQ_DESC(x) ((x) & EQ_DESC_MASK) + +#define BNX2X_EQ_INDEX \ + (&bp->def_status_blk->sp_sb.\ + index_values[HC_SP_INDEX_EQ_CONS]) + +/* This is a data that will be used to create a link report message. + * We will keep the data used for the last link report in order + * to prevent reporting the same link parameters twice. + */ +struct bnx2x_link_report_data { + u16 line_speed; /* Effective line speed */ + unsigned long link_report_flags;/* BNX2X_LINK_REPORT_XXX flags */ +}; + +enum { + BNX2X_LINK_REPORT_FD, /* Full DUPLEX */ + BNX2X_LINK_REPORT_LINK_DOWN, + BNX2X_LINK_REPORT_RX_FC_ON, + BNX2X_LINK_REPORT_TX_FC_ON, +}; + +enum { + BNX2X_PORT_QUERY_IDX, + BNX2X_PF_QUERY_IDX, + BNX2X_FIRST_QUEUE_QUERY_IDX, +}; + +struct bnx2x_fw_stats_req { + struct stats_query_header hdr; + struct stats_query_entry query[STATS_QUERY_CMD_COUNT]; +}; + +struct bnx2x_fw_stats_data { + struct stats_counter storm_counters; + struct per_port_stats port; + struct per_pf_stats pf; + struct per_queue_stats queue_stats[1]; +}; + +/* Public slow path states */ +enum { + BNX2X_SP_RTNL_SETUP_TC, + BNX2X_SP_RTNL_TX_TIMEOUT, +}; + + +struct bnx2x { + /* Fields used in the tx and intr/napi performance paths + * are grouped together in the beginning of the structure + */ + struct bnx2x_fastpath *fp; + void __iomem *regview; + void __iomem *doorbells; + u16 db_size; + + u8 pf_num; /* absolute PF number */ + u8 pfid; /* per-path PF number */ + int base_fw_ndsb; /**/ +#define BP_PATH(bp) (CHIP_IS_E1x(bp) ? 0 : (bp->pf_num & 1)) +#define BP_PORT(bp) (bp->pfid & 1) +#define BP_FUNC(bp) (bp->pfid) +#define BP_ABS_FUNC(bp) (bp->pf_num) +#define BP_E1HVN(bp) (bp->pfid >> 1) +#define BP_VN(bp) (BP_E1HVN(bp)) /*remove when approved*/ +#define BP_L_ID(bp) (BP_E1HVN(bp) << 2) +#define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\ + BP_VN(bp) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1)) + + struct net_device *dev; + struct pci_dev *pdev; + + const struct iro *iro_arr; +#define IRO (bp->iro_arr) + + enum bnx2x_recovery_state recovery_state; + int is_leader; + struct msix_entry *msix_table; + + int tx_ring_size; + +/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ +#define ETH_OVREHEAD (ETH_HLEN + 8 + 8) +#define ETH_MIN_PACKET_SIZE 60 +#define ETH_MAX_PACKET_SIZE 1500 +#define ETH_MAX_JUMBO_PACKET_SIZE 9600 + + /* Max supported alignment is 256 (8 shift) */ +#define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \ + L1_CACHE_SHIFT : 8) + /* FW use 2 Cache lines Alignment for start packet and size */ +#define BNX2X_FW_RX_ALIGN (2 << BNX2X_RX_ALIGN_SHIFT) +#define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5) + + struct host_sp_status_block *def_status_blk; +#define DEF_SB_IGU_ID 16 +#define DEF_SB_ID HC_SP_SB_ID + __le16 def_idx; + __le16 def_att_idx; + u32 attn_state; + struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS]; + + /* slow path ring */ + struct eth_spe *spq; + dma_addr_t spq_mapping; + u16 spq_prod_idx; + struct eth_spe *spq_prod_bd; + struct eth_spe *spq_last_bd; + __le16 *dsb_sp_prod; + atomic_t cq_spq_left; /* ETH_XXX ramrods credit */ + /* used to synchronize spq accesses */ + spinlock_t spq_lock; + + /* event queue */ + union event_ring_elem *eq_ring; + dma_addr_t eq_mapping; + u16 eq_prod; + u16 eq_cons; + __le16 *eq_cons_sb; + atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */ + + + + /* Counter for marking that there is a STAT_QUERY ramrod pending */ + u16 stats_pending; + /* Counter for completed statistics ramrods */ + u16 stats_comp; + + /* End of fields used in the performance code paths */ + + int panic; + int msg_enable; + + u32 flags; +#define PCIX_FLAG (1 << 0) +#define PCI_32BIT_FLAG (1 << 1) +#define ONE_PORT_FLAG (1 << 2) +#define NO_WOL_FLAG (1 << 3) +#define USING_DAC_FLAG (1 << 4) +#define USING_MSIX_FLAG (1 << 5) +#define USING_MSI_FLAG (1 << 6) +#define DISABLE_MSI_FLAG (1 << 7) +#define TPA_ENABLE_FLAG (1 << 8) +#define NO_MCP_FLAG (1 << 9) + +#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG) +#define MF_FUNC_DIS (1 << 11) +#define OWN_CNIC_IRQ (1 << 12) +#define NO_ISCSI_OOO_FLAG (1 << 13) +#define NO_ISCSI_FLAG (1 << 14) +#define NO_FCOE_FLAG (1 << 15) + +#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG) +#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG) +#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG) + + int pm_cap; + int mrrs; + + struct delayed_work sp_task; + struct delayed_work sp_rtnl_task; + + struct delayed_work period_task; + struct timer_list timer; + int current_interval; + + u16 fw_seq; + u16 fw_drv_pulse_wr_seq; + u32 func_stx; + + struct link_params link_params; + struct link_vars link_vars; + u32 link_cnt; + struct bnx2x_link_report_data last_reported_link; + + struct mdio_if_info mdio; + + struct bnx2x_common common; + struct bnx2x_port port; + + struct cmng_struct_per_port cmng; + u32 vn_weight_sum; + u32 mf_config[E1HVN_MAX]; + u32 mf2_config[E2_FUNC_MAX]; + u32 path_has_ovlan; /* E3 */ + u16 mf_ov; + u8 mf_mode; +#define IS_MF(bp) (bp->mf_mode != 0) +#define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI) +#define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD) + + u8 wol; + + int rx_ring_size; + + u16 tx_quick_cons_trip_int; + u16 tx_quick_cons_trip; + u16 tx_ticks_int; + u16 tx_ticks; + + u16 rx_quick_cons_trip_int; + u16 rx_quick_cons_trip; + u16 rx_ticks_int; + u16 rx_ticks; +/* Maximal coalescing timeout in us */ +#define BNX2X_MAX_COALESCE_TOUT (0xf0*12) + + u32 lin_cnt; + + u16 state; +#define BNX2X_STATE_CLOSED 0 +#define BNX2X_STATE_OPENING_WAIT4_LOAD 0x1000 +#define BNX2X_STATE_OPENING_WAIT4_PORT 0x2000 +#define BNX2X_STATE_OPEN 0x3000 +#define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000 +#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000 + +#define BNX2X_STATE_DIAG 0xe000 +#define BNX2X_STATE_ERROR 0xf000 + + int multi_mode; +#define BNX2X_MAX_PRIORITY 8 +#define BNX2X_MAX_ENTRIES_PER_PRI 16 +#define BNX2X_MAX_COS 3 +#define BNX2X_MAX_TX_COS 2 + int num_queues; + int disable_tpa; + + u32 rx_mode; +#define BNX2X_RX_MODE_NONE 0 +#define BNX2X_RX_MODE_NORMAL 1 +#define BNX2X_RX_MODE_ALLMULTI 2 +#define BNX2X_RX_MODE_PROMISC 3 +#define BNX2X_MAX_MULTICAST 64 + + u8 igu_dsb_id; + u8 igu_base_sb; + u8 igu_sb_cnt; + dma_addr_t def_status_blk_mapping; + + struct bnx2x_slowpath *slowpath; + dma_addr_t slowpath_mapping; + + /* Total number of FW statistics requests */ + u8 fw_stats_num; + + /* + * This is a memory buffer that will contain both statistics + * ramrod request and data. + */ + void *fw_stats; + dma_addr_t fw_stats_mapping; + + /* + * FW statistics request shortcut (points at the + * beginning of fw_stats buffer). + */ + struct bnx2x_fw_stats_req *fw_stats_req; + dma_addr_t fw_stats_req_mapping; + int fw_stats_req_sz; + + /* + * FW statistics data shortcut (points at the begining of + * fw_stats buffer + fw_stats_req_sz). + */ + struct bnx2x_fw_stats_data *fw_stats_data; + dma_addr_t fw_stats_data_mapping; + int fw_stats_data_sz; + + struct hw_context context; + + struct bnx2x_ilt *ilt; +#define BP_ILT(bp) ((bp)->ilt) +#define ILT_MAX_LINES 256 +/* + * Maximum supported number of RSS queues: number of IGU SBs minus one that goes + * to CNIC. + */ +#define BNX2X_MAX_RSS_COUNT(bp) ((bp)->igu_sb_cnt - CNIC_PRESENT) + +/* + * Maximum CID count that might be required by the bnx2x: + * Max Tss * Max_Tx_Multi_Cos + CNIC L2 Clients (FCoE and iSCSI related) + */ +#define BNX2X_L2_CID_COUNT(bp) (MAX_TXQS_PER_COS * BNX2X_MULTI_TX_COS +\ + NON_ETH_CONTEXT_USE + CNIC_PRESENT) +#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\ + ILT_PAGE_CIDS)) +#define BNX2X_DB_SIZE(bp) (BNX2X_L2_CID_COUNT(bp) * (1 << BNX2X_DB_SHIFT)) + + int qm_cid_count; + + int dropless_fc; + +#ifdef BCM_CNIC + u32 cnic_flags; +#define BNX2X_CNIC_FLAG_MAC_SET 1 + void *t2; + dma_addr_t t2_mapping; + struct cnic_ops __rcu *cnic_ops; + void *cnic_data; + u32 cnic_tag; + struct cnic_eth_dev cnic_eth_dev; + union host_hc_status_block cnic_sb; + dma_addr_t cnic_sb_mapping; + struct eth_spe *cnic_kwq; + struct eth_spe *cnic_kwq_prod; + struct eth_spe *cnic_kwq_cons; + struct eth_spe *cnic_kwq_last; + u16 cnic_kwq_pending; + u16 cnic_spq_pending; + u8 fip_mac[ETH_ALEN]; + struct mutex cnic_mutex; + struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj; + + /* Start index of the "special" (CNIC related) L2 cleints */ + u8 cnic_base_cl_id; +#endif + + int dmae_ready; + /* used to synchronize dmae accesses */ + spinlock_t dmae_lock; + + /* used to protect the FW mail box */ + struct mutex fw_mb_mutex; + + /* used to synchronize stats collecting */ + int stats_state; + + /* used for synchronization of concurrent threads statistics handling */ + spinlock_t stats_lock; + + /* used by dmae command loader */ + struct dmae_command stats_dmae; + int executer_idx; + + u16 stats_counter; + struct bnx2x_eth_stats eth_stats; + + struct z_stream_s *strm; + void *gunzip_buf; + dma_addr_t gunzip_mapping; + int gunzip_outlen; +#define FW_BUF_SIZE 0x8000 +#define GUNZIP_BUF(bp) (bp->gunzip_buf) +#define GUNZIP_PHYS(bp) (bp->gunzip_mapping) +#define GUNZIP_OUTLEN(bp) (bp->gunzip_outlen) + + struct raw_op *init_ops; + /* Init blocks offsets inside init_ops */ + u16 *init_ops_offsets; + /* Data blob - has 32 bit granularity */ + u32 *init_data; + u32 init_mode_flags; +#define INIT_MODE_FLAGS(bp) (bp->init_mode_flags) + /* Zipped PRAM blobs - raw data */ + const u8 *tsem_int_table_data; + const u8 *tsem_pram_data; + const u8 *usem_int_table_data; + const u8 *usem_pram_data; + const u8 *xsem_int_table_data; + const u8 *xsem_pram_data; + const u8 *csem_int_table_data; + const u8 *csem_pram_data; +#define INIT_OPS(bp) (bp->init_ops) +#define INIT_OPS_OFFSETS(bp) (bp->init_ops_offsets) +#define INIT_DATA(bp) (bp->init_data) +#define INIT_TSEM_INT_TABLE_DATA(bp) (bp->tsem_int_table_data) +#define INIT_TSEM_PRAM_DATA(bp) (bp->tsem_pram_data) +#define INIT_USEM_INT_TABLE_DATA(bp) (bp->usem_int_table_data) +#define INIT_USEM_PRAM_DATA(bp) (bp->usem_pram_data) +#define INIT_XSEM_INT_TABLE_DATA(bp) (bp->xsem_int_table_data) +#define INIT_XSEM_PRAM_DATA(bp) (bp->xsem_pram_data) +#define INIT_CSEM_INT_TABLE_DATA(bp) (bp->csem_int_table_data) +#define INIT_CSEM_PRAM_DATA(bp) (bp->csem_pram_data) + +#define PHY_FW_VER_LEN 20 + char fw_ver[32]; + const struct firmware *firmware; + + /* DCB support on/off */ + u16 dcb_state; +#define BNX2X_DCB_STATE_OFF 0 +#define BNX2X_DCB_STATE_ON 1 + + /* DCBX engine mode */ + int dcbx_enabled; +#define BNX2X_DCBX_ENABLED_OFF 0 +#define BNX2X_DCBX_ENABLED_ON_NEG_OFF 1 +#define BNX2X_DCBX_ENABLED_ON_NEG_ON 2 +#define BNX2X_DCBX_ENABLED_INVALID (-1) + + bool dcbx_mode_uset; + + struct bnx2x_config_dcbx_params dcbx_config_params; + struct bnx2x_dcbx_port_params dcbx_port_params; + int dcb_version; + + /* CAM credit pools */ + struct bnx2x_credit_pool_obj macs_pool; + + /* RX_MODE object */ + struct bnx2x_rx_mode_obj rx_mode_obj; + + /* MCAST object */ + struct bnx2x_mcast_obj mcast_obj; + + /* RSS configuration object */ + struct bnx2x_rss_config_obj rss_conf_obj; + + /* Function State controlling object */ + struct bnx2x_func_sp_obj func_obj; + + unsigned long sp_state; + + /* operation indication for the sp_rtnl task */ + unsigned long sp_rtnl_state; + + /* DCBX Negotation results */ + struct dcbx_features dcbx_local_feat; + u32 dcbx_error; + +#ifdef BCM_DCBNL + struct dcbx_features dcbx_remote_feat; + u32 dcbx_remote_flags; +#endif + u32 pending_max; + + /* multiple tx classes of service */ + u8 max_cos; + + /* priority to cos mapping */ + u8 prio_to_cos[8]; +}; + +/* Tx queues may be less or equal to Rx queues */ +extern int num_queues; +#define BNX2X_NUM_QUEUES(bp) (bp->num_queues) +#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE) +#define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp) + +#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) + +#define BNX2X_MAX_QUEUES(bp) BNX2X_MAX_RSS_COUNT(bp) +/* #define is_eth_multi(bp) (BNX2X_NUM_ETH_QUEUES(bp) > 1) */ + +#define RSS_IPV4_CAP_MASK \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY + +#define RSS_IPV4_TCP_CAP_MASK \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY + +#define RSS_IPV6_CAP_MASK \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY + +#define RSS_IPV6_TCP_CAP_MASK \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY + +/* func init flags */ +#define FUNC_FLG_RSS 0x0001 +#define FUNC_FLG_STATS 0x0002 +/* removed FUNC_FLG_UNMATCHED 0x0004 */ +#define FUNC_FLG_TPA 0x0008 +#define FUNC_FLG_SPQ 0x0010 +#define FUNC_FLG_LEADING 0x0020 /* PF only */ + + +struct bnx2x_func_init_params { + /* dma */ + dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */ + dma_addr_t spq_map; /* valid iff FUNC_FLG_SPQ */ + + u16 func_flgs; + u16 func_id; /* abs fid */ + u16 pf_id; + u16 spq_prod; /* valid iff FUNC_FLG_SPQ */ +}; + +#define for_each_eth_queue(bp, var) \ + for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++) + +#define for_each_nondefault_eth_queue(bp, var) \ + for ((var) = 1; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++) + +#define for_each_queue(bp, var) \ + for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ + if (skip_queue(bp, var)) \ + continue; \ + else + +/* Skip forwarding FP */ +#define for_each_rx_queue(bp, var) \ + for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ + if (skip_rx_queue(bp, var)) \ + continue; \ + else + +/* Skip OOO FP */ +#define for_each_tx_queue(bp, var) \ + for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ + if (skip_tx_queue(bp, var)) \ + continue; \ + else + +#define for_each_nondefault_queue(bp, var) \ + for ((var) = 1; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ + if (skip_queue(bp, var)) \ + continue; \ + else + +#define for_each_cos_in_tx_queue(fp, var) \ + for ((var) = 0; (var) < (fp)->max_cos; (var)++) + +/* skip rx queue + * if FCOE l2 support is disabled and this is the fcoe L2 queue + */ +#define skip_rx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) + +/* skip tx queue + * if FCOE l2 support is disabled and this is the fcoe L2 queue + */ +#define skip_tx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) + +#define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) + + + + +/** + * bnx2x_set_mac_one - configure a single MAC address + * + * @bp: driver handle + * @mac: MAC to configure + * @obj: MAC object handle + * @set: if 'true' add a new MAC, otherwise - delete + * @mac_type: the type of the MAC to configure (e.g. ETH, UC list) + * @ramrod_flags: RAMROD_XXX flags (e.g. RAMROD_CONT, RAMROD_COMP_WAIT) + * + * Configures one MAC according to provided parameters or continues the + * execution of previously scheduled commands if RAMROD_CONT is set in + * ramrod_flags. + * + * Returns zero if operation has successfully completed, a positive value if the + * operation has been successfully scheduled and a negative - if a requested + * operations has failed. + */ +int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, + struct bnx2x_vlan_mac_obj *obj, bool set, + int mac_type, unsigned long *ramrod_flags); +/** + * Deletes all MACs configured for the specific MAC object. + * + * @param bp Function driver instance + * @param mac_obj MAC object to cleanup + * + * @return zero if all MACs were cleaned + */ + +/** + * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object + * + * @bp: driver handle + * @mac_obj: MAC object handle + * @mac_type: type of the MACs to clear (BNX2X_XXX_MAC) + * @wait_for_comp: if 'true' block until completion + * + * Deletes all MACs of the specific type (e.g. ETH, UC list). + * + * Returns zero if operation has successfully completed, a positive value if the + * operation has been successfully scheduled and a negative - if a requested + * operations has failed. + */ +int bnx2x_del_all_macs(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *mac_obj, + int mac_type, bool wait_for_comp); + +/* Init Function API */ +void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p); +int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port); +int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); +int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode); +int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); +void bnx2x_read_mf_cfg(struct bnx2x *bp); + + +/* dmae */ +void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); +void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, + u32 len32); +void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx); +u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type); +u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode); +u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, + bool with_comp, u8 comp_type); + + +void bnx2x_calc_fc_adv(struct bnx2x *bp); +int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, + u32 data_hi, u32 data_lo, int cmd_type); +void bnx2x_update_coalesce(struct bnx2x *bp); +int bnx2x_get_cur_phy_idx(struct bnx2x *bp); + +static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, + int wait) +{ + u32 val; + + do { + val = REG_RD(bp, reg); + if (val == expected) + break; + ms -= wait; + msleep(wait); + + } while (ms > 0); + + return val; +} + +#define BNX2X_ILT_ZALLOC(x, y, size) \ + do { \ + x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ + if (x) \ + memset(x, 0, size); \ + } while (0) + +#define BNX2X_ILT_FREE(x, y, size) \ + do { \ + if (x) { \ + dma_free_coherent(&bp->pdev->dev, size, x, y); \ + x = NULL; \ + y = 0; \ + } \ + } while (0) + +#define ILOG2(x) (ilog2((x))) + +#define ILT_NUM_PAGE_ENTRIES (3072) +/* In 57710/11 we use whole table since we have 8 func + * In 57712 we have only 4 func, but use same size per func, then only half of + * the table in use + */ +#define ILT_PER_FUNC (ILT_NUM_PAGE_ENTRIES/8) + +#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC) +/* + * the phys address is shifted right 12 bits and has an added + * 1=valid bit added to the 53rd bit + * then since this is a wide register(TM) + * we split it into two 32 bit writes + */ +#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF)) +#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44))) + +/* load/unload mode */ +#define LOAD_NORMAL 0 +#define LOAD_OPEN 1 +#define LOAD_DIAG 2 +#define UNLOAD_NORMAL 0 +#define UNLOAD_CLOSE 1 +#define UNLOAD_RECOVERY 2 + + +/* DMAE command defines */ +#define DMAE_TIMEOUT -1 +#define DMAE_PCI_ERROR -2 /* E2 and onward */ +#define DMAE_NOT_RDY -3 +#define DMAE_PCI_ERR_FLAG 0x80000000 + +#define DMAE_SRC_PCI 0 +#define DMAE_SRC_GRC 1 + +#define DMAE_DST_NONE 0 +#define DMAE_DST_PCI 1 +#define DMAE_DST_GRC 2 + +#define DMAE_COMP_PCI 0 +#define DMAE_COMP_GRC 1 + +/* E2 and onward - PCI error handling in the completion */ + +#define DMAE_COMP_REGULAR 0 +#define DMAE_COM_SET_ERR 1 + +#define DMAE_CMD_SRC_PCI (DMAE_SRC_PCI << \ + DMAE_COMMAND_SRC_SHIFT) +#define DMAE_CMD_SRC_GRC (DMAE_SRC_GRC << \ + DMAE_COMMAND_SRC_SHIFT) + +#define DMAE_CMD_DST_PCI (DMAE_DST_PCI << \ + DMAE_COMMAND_DST_SHIFT) +#define DMAE_CMD_DST_GRC (DMAE_DST_GRC << \ + DMAE_COMMAND_DST_SHIFT) + +#define DMAE_CMD_C_DST_PCI (DMAE_COMP_PCI << \ + DMAE_COMMAND_C_DST_SHIFT) +#define DMAE_CMD_C_DST_GRC (DMAE_COMP_GRC << \ + DMAE_COMMAND_C_DST_SHIFT) + +#define DMAE_CMD_C_ENABLE DMAE_COMMAND_C_TYPE_ENABLE + +#define DMAE_CMD_ENDIANITY_NO_SWAP (0 << DMAE_COMMAND_ENDIANITY_SHIFT) +#define DMAE_CMD_ENDIANITY_B_SWAP (1 << DMAE_COMMAND_ENDIANITY_SHIFT) +#define DMAE_CMD_ENDIANITY_DW_SWAP (2 << DMAE_COMMAND_ENDIANITY_SHIFT) +#define DMAE_CMD_ENDIANITY_B_DW_SWAP (3 << DMAE_COMMAND_ENDIANITY_SHIFT) + +#define DMAE_CMD_PORT_0 0 +#define DMAE_CMD_PORT_1 DMAE_COMMAND_PORT + +#define DMAE_CMD_SRC_RESET DMAE_COMMAND_SRC_RESET +#define DMAE_CMD_DST_RESET DMAE_COMMAND_DST_RESET +#define DMAE_CMD_E1HVN_SHIFT DMAE_COMMAND_E1HVN_SHIFT + +#define DMAE_SRC_PF 0 +#define DMAE_SRC_VF 1 + +#define DMAE_DST_PF 0 +#define DMAE_DST_VF 1 + +#define DMAE_C_SRC 0 +#define DMAE_C_DST 1 + +#define DMAE_LEN32_RD_MAX 0x80 +#define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000) + +#define DMAE_COMP_VAL 0x60d0d0ae /* E2 and on - upper bit + indicates eror */ + +#define MAX_DMAE_C_PER_PORT 8 +#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ + BP_E1HVN(bp)) +#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ + E1HVN_MAX) + +/* PCIE link and speed */ +#define PCICFG_LINK_WIDTH 0x1f00000 +#define PCICFG_LINK_WIDTH_SHIFT 20 +#define PCICFG_LINK_SPEED 0xf0000 +#define PCICFG_LINK_SPEED_SHIFT 16 + + +#define BNX2X_NUM_TESTS 7 + +#define BNX2X_PHY_LOOPBACK 0 +#define BNX2X_MAC_LOOPBACK 1 +#define BNX2X_PHY_LOOPBACK_FAILED 1 +#define BNX2X_MAC_LOOPBACK_FAILED 2 +#define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \ + BNX2X_PHY_LOOPBACK_FAILED) + + +#define STROM_ASSERT_ARRAY_SIZE 50 + + +/* must be used on a CID before placing it on a HW ring */ +#define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ + (BP_E1HVN(bp) << BNX2X_SWCID_SHIFT) | \ + (x)) + +#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) +#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1) + + +#define BNX2X_BTR 4 +#define MAX_SPQ_PENDING 8 + +/* CMNG constants, as derived from system spec calculations */ +/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */ +#define DEF_MIN_RATE 100 +/* resolution of the rate shaping timer - 400 usec */ +#define RS_PERIODIC_TIMEOUT_USEC 400 +/* number of bytes in single QM arbitration cycle - + * coefficient for calculating the fairness timer */ +#define QM_ARB_BYTES 160000 +/* resolution of Min algorithm 1:100 */ +#define MIN_RES 100 +/* how many bytes above threshold for the minimal credit of Min algorithm*/ +#define MIN_ABOVE_THRESH 32768 +/* Fairness algorithm integration time coefficient - + * for calculating the actual Tfair */ +#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES) +/* Memory of fairness algorithm . 2 cycles */ +#define FAIR_MEM 2 + + +#define ATTN_NIG_FOR_FUNC (1L << 8) +#define ATTN_SW_TIMER_4_FUNC (1L << 9) +#define GPIO_2_FUNC (1L << 10) +#define GPIO_3_FUNC (1L << 11) +#define GPIO_4_FUNC (1L << 12) +#define ATTN_GENERAL_ATTN_1 (1L << 13) +#define ATTN_GENERAL_ATTN_2 (1L << 14) +#define ATTN_GENERAL_ATTN_3 (1L << 15) +#define ATTN_GENERAL_ATTN_4 (1L << 13) +#define ATTN_GENERAL_ATTN_5 (1L << 14) +#define ATTN_GENERAL_ATTN_6 (1L << 15) + +#define ATTN_HARD_WIRED_MASK 0xff00 +#define ATTENTION_ID 4 + + +/* stuff added to make the code fit 80Col */ + +#define BNX2X_PMF_LINK_ASSERT \ + GENERAL_ATTEN_OFFSET(LINK_SYNC_ATTENTION_BIT_FUNC_0 + BP_FUNC(bp)) + +#define BNX2X_MC_ASSERT_BITS \ + (GENERAL_ATTEN_OFFSET(TSTORM_FATAL_ASSERT_ATTENTION_BIT) | \ + GENERAL_ATTEN_OFFSET(USTORM_FATAL_ASSERT_ATTENTION_BIT) | \ + GENERAL_ATTEN_OFFSET(CSTORM_FATAL_ASSERT_ATTENTION_BIT) | \ + GENERAL_ATTEN_OFFSET(XSTORM_FATAL_ASSERT_ATTENTION_BIT)) + +#define BNX2X_MCP_ASSERT \ + GENERAL_ATTEN_OFFSET(MCP_FATAL_ASSERT_ATTENTION_BIT) + +#define BNX2X_GRC_TIMEOUT GENERAL_ATTEN_OFFSET(LATCHED_ATTN_TIMEOUT_GRC) +#define BNX2X_GRC_RSV (GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCR) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCT) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCN) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCU) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC)) + +#define HW_INTERRUT_ASSERT_SET_0 \ + (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT) +#define HW_PRTY_ASSERT_SET_0 (AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR) +#define HW_INTERRUT_ASSERT_SET_1 \ + (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT) +#define HW_PRTY_ASSERT_SET_1 (AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR) +#define HW_INTERRUT_ASSERT_SET_2 \ + (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT |\ + AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT) +#define HW_PRTY_ASSERT_SET_2 (AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR) + +#define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) + +#define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR) + +#define RSS_FLAGS(bp) \ + (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \ + (bp->multi_mode << \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT)) +#define MULTI_MASK 0x7f + + +#define DEF_USB_FUNC_OFF offsetof(struct cstorm_def_status_block_u, func) +#define DEF_CSB_FUNC_OFF offsetof(struct cstorm_def_status_block_c, func) +#define DEF_XSB_FUNC_OFF offsetof(struct xstorm_def_status_block, func) +#define DEF_TSB_FUNC_OFF offsetof(struct tstorm_def_status_block, func) + +#define DEF_USB_IGU_INDEX_OFF \ + offsetof(struct cstorm_def_status_block_u, igu_index) +#define DEF_CSB_IGU_INDEX_OFF \ + offsetof(struct cstorm_def_status_block_c, igu_index) +#define DEF_XSB_IGU_INDEX_OFF \ + offsetof(struct xstorm_def_status_block, igu_index) +#define DEF_TSB_IGU_INDEX_OFF \ + offsetof(struct tstorm_def_status_block, igu_index) + +#define DEF_USB_SEGMENT_OFF \ + offsetof(struct cstorm_def_status_block_u, segment) +#define DEF_CSB_SEGMENT_OFF \ + offsetof(struct cstorm_def_status_block_c, segment) +#define DEF_XSB_SEGMENT_OFF \ + offsetof(struct xstorm_def_status_block, segment) +#define DEF_TSB_SEGMENT_OFF \ + offsetof(struct tstorm_def_status_block, segment) + +#define BNX2X_SP_DSB_INDEX \ + (&bp->def_status_blk->sp_sb.\ + index_values[HC_SP_INDEX_ETH_DEF_CONS]) + +#define SET_FLAG(value, mask, flag) \ + do {\ + (value) &= ~(mask);\ + (value) |= ((flag) << (mask##_SHIFT));\ + } while (0) + +#define GET_FLAG(value, mask) \ + (((value) & (mask)) >> (mask##_SHIFT)) + +#define GET_FIELD(value, fname) \ + (((value) & (fname##_MASK)) >> (fname##_SHIFT)) + +#define CAM_IS_INVALID(x) \ + (GET_FLAG(x.flags, \ + MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \ + (T_ETH_MAC_COMMAND_INVALIDATE)) + +/* Number of u32 elements in MC hash array */ +#define MC_HASH_SIZE 8 +#define MC_HASH_OFFSET(bp, i) (BAR_TSTRORM_INTMEM + \ + TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(BP_FUNC(bp)) + i*4) + + +#ifndef PXP2_REG_PXP2_INT_STS +#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0 +#endif + +#ifndef ETH_MAX_RX_CLIENTS_E2 +#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H +#endif + +#define BNX2X_VPD_LEN 128 +#define VENDOR_ID_LEN 4 + +/* Congestion management fairness mode */ +#define CMNG_FNS_NONE 0 +#define CMNG_FNS_MINMAX 1 + +#define HC_SEG_ACCESS_DEF 0 /*Driver decision 0-3*/ +#define HC_SEG_ACCESS_ATTN 4 +#define HC_SEG_ACCESS_NORM 0 /*Driver decision 0-1*/ + +static const u32 dmae_reg_go_c[] = { + DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3, + DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7, + DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11, + DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15 +}; + +void bnx2x_set_ethtool_ops(struct net_device *netdev); +void bnx2x_notify_link_changed(struct bnx2x *bp); +#endif /* bnx2x.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c new file mode 100644 index 0000000..d724a18 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -0,0 +1,3571 @@ +/* bnx2x_cmn.c: Broadcom Everest network driver. + * + * Copyright (c) 2007-2011 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Eilon Greenstein + * Written by: Eliezer Tamir + * Based on code from Michael Chan's bnx2 driver + * UDP CSUM errata workaround by Arik Gendelman + * Slowpath and fastpath rework by Vladislav Zolotarov + * Statistics and Link management by Yitchak Gertner + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "bnx2x_cmn.h" +#include "bnx2x_init.h" +#include "bnx2x_sp.h" + + + +/** + * bnx2x_bz_fp - zero content of the fastpath structure. + * + * @bp: driver handle + * @index: fastpath index to be zeroed + * + * Makes sure the contents of the bp->fp[index].napi is kept + * intact. + */ +static inline void bnx2x_bz_fp(struct bnx2x *bp, int index) +{ + struct bnx2x_fastpath *fp = &bp->fp[index]; + struct napi_struct orig_napi = fp->napi; + /* bzero bnx2x_fastpath contents */ + memset(fp, 0, sizeof(*fp)); + + /* Restore the NAPI object as it has been already initialized */ + fp->napi = orig_napi; + + fp->bp = bp; + fp->index = index; + if (IS_ETH_FP(fp)) + fp->max_cos = bp->max_cos; + else + /* Special queues support only one CoS */ + fp->max_cos = 1; + + /* + * set the tpa flag for each queue. The tpa flag determines the queue + * minimal size so it must be set prior to queue memory allocation + */ + fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0); + +#ifdef BCM_CNIC + /* We don't want TPA on FCoE, FWD and OOO L2 rings */ + bnx2x_fcoe(bp, disable_tpa) = 1; +#endif +} + +/** + * bnx2x_move_fp - move content of the fastpath structure. + * + * @bp: driver handle + * @from: source FP index + * @to: destination FP index + * + * Makes sure the contents of the bp->fp[to].napi is kept + * intact. + */ +static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) +{ + struct bnx2x_fastpath *from_fp = &bp->fp[from]; + struct bnx2x_fastpath *to_fp = &bp->fp[to]; + struct napi_struct orig_napi = to_fp->napi; + /* Move bnx2x_fastpath contents */ + memcpy(to_fp, from_fp, sizeof(*to_fp)); + to_fp->index = to; + + /* Restore the NAPI object as it has been already initialized */ + to_fp->napi = orig_napi; +} + +int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ + +/* free skb in the packet ring at pos idx + * return idx of last bd freed + */ +static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, + u16 idx) +{ + struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx]; + struct eth_tx_start_bd *tx_start_bd; + struct eth_tx_bd *tx_data_bd; + struct sk_buff *skb = tx_buf->skb; + u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; + int nbd; + + /* prefetch skb end pointer to speedup dev_kfree_skb() */ + prefetch(&skb->end); + + DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", + txdata->txq_index, idx, tx_buf, skb); + + /* unmap first bd */ + DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); + tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; + dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), + BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE); + + + nbd = le16_to_cpu(tx_start_bd->nbd) - 1; +#ifdef BNX2X_STOP_ON_ERROR + if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) { + BNX2X_ERR("BAD nbd!\n"); + bnx2x_panic(); + } +#endif + new_cons = nbd + tx_buf->first_bd; + + /* Get the next bd */ + bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); + + /* Skip a parse bd... */ + --nbd; + bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); + + /* ...and the TSO split header bd since they have no mapping */ + if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { + --nbd; + bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); + } + + /* now free frags */ + while (nbd > 0) { + + DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx); + tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; + dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), + BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); + if (--nbd) + bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); + } + + /* release skb */ + WARN_ON(!skb); + dev_kfree_skb_any(skb); + tx_buf->first_bd = 0; + tx_buf->skb = NULL; + + return new_cons; +} + +int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) +{ + struct netdev_queue *txq; + u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons; + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return -1; +#endif + + txq = netdev_get_tx_queue(bp->dev, txdata->txq_index); + hw_cons = le16_to_cpu(*txdata->tx_cons_sb); + sw_cons = txdata->tx_pkt_cons; + + while (sw_cons != hw_cons) { + u16 pkt_cons; + + pkt_cons = TX_BD(sw_cons); + + DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u " + " pkt_cons %u\n", + txdata->txq_index, hw_cons, sw_cons, pkt_cons); + + bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons); + sw_cons++; + } + + txdata->tx_pkt_cons = sw_cons; + txdata->tx_bd_cons = bd_cons; + + /* Need to make the tx_bd_cons update visible to start_xmit() + * before checking for netif_tx_queue_stopped(). Without the + * memory barrier, there is a small possibility that + * start_xmit() will miss it and cause the queue to be stopped + * forever. + * On the other hand we need an rmb() here to ensure the proper + * ordering of bit testing in the following + * netif_tx_queue_stopped(txq) call. + */ + smp_mb(); + + if (unlikely(netif_tx_queue_stopped(txq))) { + /* Taking tx_lock() is needed to prevent reenabling the queue + * while it's empty. This could have happen if rx_action() gets + * suspended in bnx2x_tx_int() after the condition before + * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()): + * + * stops the queue->sees fresh tx_bd_cons->releases the queue-> + * sends some packets consuming the whole queue again-> + * stops the queue + */ + + __netif_tx_lock(txq, smp_processor_id()); + + if ((netif_tx_queue_stopped(txq)) && + (bp->state == BNX2X_STATE_OPEN) && + (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)) + netif_tx_wake_queue(txq); + + __netif_tx_unlock(txq); + } + return 0; +} + +static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp, + u16 idx) +{ + u16 last_max = fp->last_max_sge; + + if (SUB_S16(idx, last_max) > 0) + fp->last_max_sge = idx; +} + +static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, + struct eth_fast_path_rx_cqe *fp_cqe) +{ + struct bnx2x *bp = fp->bp; + u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) - + le16_to_cpu(fp_cqe->len_on_bd)) >> + SGE_PAGE_SHIFT; + u16 last_max, last_elem, first_elem; + u16 delta = 0; + u16 i; + + if (!sge_len) + return; + + /* First mark all used pages */ + for (i = 0; i < sge_len; i++) + BIT_VEC64_CLEAR_BIT(fp->sge_mask, + RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i]))); + + DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n", + sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1])); + + /* Here we assume that the last SGE index is the biggest */ + prefetch((void *)(fp->sge_mask)); + bnx2x_update_last_max_sge(fp, + le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1])); + + last_max = RX_SGE(fp->last_max_sge); + last_elem = last_max >> BIT_VEC64_ELEM_SHIFT; + first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; + + /* If ring is not full */ + if (last_elem + 1 != first_elem) + last_elem++; + + /* Now update the prod */ + for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) { + if (likely(fp->sge_mask[i])) + break; + + fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; + delta += BIT_VEC64_ELEM_SZ; + } + + if (delta > 0) { + fp->rx_sge_prod += delta; + /* clear page-end entries */ + bnx2x_clear_sge_mask_next_elems(fp); + } + + DP(NETIF_MSG_RX_STATUS, + "fp->last_max_sge = %d fp->rx_sge_prod = %d\n", + fp->last_max_sge, fp->rx_sge_prod); +} + +static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, + struct sk_buff *skb, u16 cons, u16 prod, + struct eth_fast_path_rx_cqe *cqe) +{ + struct bnx2x *bp = fp->bp; + struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; + struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; + struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; + dma_addr_t mapping; + struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; + struct sw_rx_bd *first_buf = &tpa_info->first_buf; + + /* print error if current state != stop */ + if (tpa_info->tpa_state != BNX2X_TPA_STOP) + BNX2X_ERR("start of bin not in stop [%d]\n", queue); + + /* Try to map an empty skb from the aggregation info */ + mapping = dma_map_single(&bp->pdev->dev, + first_buf->skb->data, + fp->rx_buf_size, DMA_FROM_DEVICE); + /* + * ...if it fails - move the skb from the consumer to the producer + * and set the current aggregation state as ERROR to drop it + * when TPA_STOP arrives. + */ + + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + /* Move the BD from the consumer to the producer */ + bnx2x_reuse_rx_skb(fp, cons, prod); + tpa_info->tpa_state = BNX2X_TPA_ERROR; + return; + } + + /* move empty skb from pool to prod */ + prod_rx_buf->skb = first_buf->skb; + dma_unmap_addr_set(prod_rx_buf, mapping, mapping); + /* point prod_bd to new skb */ + prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); + prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); + + /* move partial skb from cons to pool (don't unmap yet) */ + *first_buf = *cons_rx_buf; + + /* mark bin state as START */ + tpa_info->parsing_flags = + le16_to_cpu(cqe->pars_flags.flags); + tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); + tpa_info->tpa_state = BNX2X_TPA_START; + tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd); + tpa_info->placement_offset = cqe->placement_offset; + +#ifdef BNX2X_STOP_ON_ERROR + fp->tpa_queue_used |= (1 << queue); +#ifdef _ASM_GENERIC_INT_L64_H + DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n", +#else + DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n", +#endif + fp->tpa_queue_used); +#endif +} + +/* Timestamp option length allowed for TPA aggregation: + * + * nop nop kind length echo val + */ +#define TPA_TSTAMP_OPT_LEN 12 +/** + * bnx2x_set_lro_mss - calculate the approximate value of the MSS + * + * @bp: driver handle + * @parsing_flags: parsing flags from the START CQE + * @len_on_bd: total length of the first packet for the + * aggregation. + * + * Approximate value of the MSS for this aggregation calculated using + * the first packet of it. + */ +static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, + u16 len_on_bd) +{ + /* + * TPA arrgregation won't have either IP options or TCP options + * other than timestamp or IPv6 extension headers. + */ + u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr); + + if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == + PRS_FLAG_OVERETH_IPV6) + hdrs_len += sizeof(struct ipv6hdr); + else /* IPv4 */ + hdrs_len += sizeof(struct iphdr); + + + /* Check if there was a TCP timestamp, if there is it's will + * always be 12 bytes length: nop nop kind length echo val. + * + * Otherwise FW would close the aggregation. + */ + if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG) + hdrs_len += TPA_TSTAMP_OPT_LEN; + + return len_on_bd - hdrs_len; +} + +static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, + u16 queue, struct sk_buff *skb, + struct eth_end_agg_rx_cqe *cqe, + u16 cqe_idx) +{ + struct sw_rx_page *rx_pg, old_rx_pg; + u32 i, frag_len, frag_size, pages; + int err; + int j; + struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; + u16 len_on_bd = tpa_info->len_on_bd; + + frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd; + pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; + + /* This is needed in order to enable forwarding support */ + if (frag_size) + skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, + tpa_info->parsing_flags, len_on_bd); + +#ifdef BNX2X_STOP_ON_ERROR + if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { + BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", + pages, cqe_idx); + BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len); + bnx2x_panic(); + return -EINVAL; + } +#endif + + /* Run through the SGL and compose the fragmented skb */ + for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { + u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j])); + + /* FW gives the indices of the SGE as if the ring is an array + (meaning that "next" element will consume 2 indices) */ + frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE)); + rx_pg = &fp->rx_page_ring[sge_idx]; + old_rx_pg = *rx_pg; + + /* If we fail to allocate a substitute page, we simply stop + where we are and drop the whole packet */ + err = bnx2x_alloc_rx_sge(bp, fp, sge_idx); + if (unlikely(err)) { + fp->eth_q_stats.rx_skb_alloc_failed++; + return err; + } + + /* Unmap the page as we r going to pass it to the stack */ + dma_unmap_page(&bp->pdev->dev, + dma_unmap_addr(&old_rx_pg, mapping), + SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); + + /* Add one frag and update the appropriate fields in the skb */ + skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); + + skb->data_len += frag_len; + skb->truesize += frag_len; + skb->len += frag_len; + + frag_size -= frag_len; + } + + return 0; +} + +static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, + u16 queue, struct eth_end_agg_rx_cqe *cqe, + u16 cqe_idx) +{ + struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; + struct sw_rx_bd *rx_buf = &tpa_info->first_buf; + u8 pad = tpa_info->placement_offset; + u16 len = tpa_info->len_on_bd; + struct sk_buff *skb = rx_buf->skb; + /* alloc new skb */ + struct sk_buff *new_skb; + u8 old_tpa_state = tpa_info->tpa_state; + + tpa_info->tpa_state = BNX2X_TPA_STOP; + + /* If we there was an error during the handling of the TPA_START - + * drop this aggregation. + */ + if (old_tpa_state == BNX2X_TPA_ERROR) + goto drop; + + /* Try to allocate the new skb */ + new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size); + + /* Unmap skb in the pool anyway, as we are going to change + pool entry status to BNX2X_TPA_STOP even if new skb allocation + fails. */ + dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), + fp->rx_buf_size, DMA_FROM_DEVICE); + + if (likely(new_skb)) { + prefetch(skb); + prefetch(((char *)(skb)) + L1_CACHE_BYTES); + +#ifdef BNX2X_STOP_ON_ERROR + if (pad + len > fp->rx_buf_size) { + BNX2X_ERR("skb_put is about to fail... " + "pad %d len %d rx_buf_size %d\n", + pad, len, fp->rx_buf_size); + bnx2x_panic(); + return; + } +#endif + + skb_reserve(skb, pad); + skb_put(skb, len); + + skb->protocol = eth_type_trans(skb, bp->dev); + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) { + if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) + __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag); + napi_gro_receive(&fp->napi, skb); + } else { + DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages" + " - dropping packet!\n"); + dev_kfree_skb_any(skb); + } + + + /* put new skb in bin */ + rx_buf->skb = new_skb; + + return; + } + +drop: + /* drop the packet and keep the buffer in the bin */ + DP(NETIF_MSG_RX_STATUS, + "Failed to allocate or map a new skb - dropping packet!\n"); + fp->eth_q_stats.rx_skb_alloc_failed++; +} + +/* Set Toeplitz hash value in the skb using the value from the + * CQE (calculated by HW). + */ +static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe, + struct sk_buff *skb) +{ + /* Set Toeplitz hash from CQE */ + if ((bp->dev->features & NETIF_F_RXHASH) && + (cqe->fast_path_cqe.status_flags & + ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) + skb->rxhash = + le32_to_cpu(cqe->fast_path_cqe.rss_hash_result); +} + +int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) +{ + struct bnx2x *bp = fp->bp; + u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; + u16 hw_comp_cons, sw_comp_cons, sw_comp_prod; + int rx_pkt = 0; + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return 0; +#endif + + /* CQ "next element" is of the size of the regular element, + that's why it's ok here */ + hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb); + if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) + hw_comp_cons++; + + bd_cons = fp->rx_bd_cons; + bd_prod = fp->rx_bd_prod; + bd_prod_fw = bd_prod; + sw_comp_cons = fp->rx_comp_cons; + sw_comp_prod = fp->rx_comp_prod; + + /* Memory barrier necessary as speculative reads of the rx + * buffer can be ahead of the index in the status block + */ + rmb(); + + DP(NETIF_MSG_RX_STATUS, + "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n", + fp->index, hw_comp_cons, sw_comp_cons); + + while (sw_comp_cons != hw_comp_cons) { + struct sw_rx_bd *rx_buf = NULL; + struct sk_buff *skb; + union eth_rx_cqe *cqe; + struct eth_fast_path_rx_cqe *cqe_fp; + u8 cqe_fp_flags; + enum eth_rx_cqe_type cqe_fp_type; + u16 len, pad; + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return 0; +#endif + + comp_ring_cons = RCQ_BD(sw_comp_cons); + bd_prod = RX_BD(bd_prod); + bd_cons = RX_BD(bd_cons); + + /* Prefetch the page containing the BD descriptor + at producer's index. It will be needed when new skb is + allocated */ + prefetch((void *)(PAGE_ALIGN((unsigned long) + (&fp->rx_desc_ring[bd_prod])) - + PAGE_SIZE + 1)); + + cqe = &fp->rx_comp_ring[comp_ring_cons]; + cqe_fp = &cqe->fast_path_cqe; + cqe_fp_flags = cqe_fp->type_error_flags; + cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; + + DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x" + " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags), + cqe_fp_flags, cqe_fp->status_flags, + le32_to_cpu(cqe_fp->rss_hash_result), + le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len)); + + /* is this a slowpath msg? */ + if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) { + bnx2x_sp_event(fp, cqe); + goto next_cqe; + + /* this is an rx packet */ + } else { + rx_buf = &fp->rx_buf_ring[bd_cons]; + skb = rx_buf->skb; + prefetch(skb); + + if (!CQE_TYPE_FAST(cqe_fp_type)) { +#ifdef BNX2X_STOP_ON_ERROR + /* sanity check */ + if (fp->disable_tpa && + (CQE_TYPE_START(cqe_fp_type) || + CQE_TYPE_STOP(cqe_fp_type))) + BNX2X_ERR("START/STOP packet while " + "disable_tpa type %x\n", + CQE_TYPE(cqe_fp_type)); +#endif + + if (CQE_TYPE_START(cqe_fp_type)) { + u16 queue = cqe_fp->queue_index; + DP(NETIF_MSG_RX_STATUS, + "calling tpa_start on queue %d\n", + queue); + + bnx2x_tpa_start(fp, queue, skb, + bd_cons, bd_prod, + cqe_fp); + + /* Set Toeplitz hash for LRO skb */ + bnx2x_set_skb_rxhash(bp, cqe, skb); + + goto next_rx; + + } else { + u16 queue = + cqe->end_agg_cqe.queue_index; + DP(NETIF_MSG_RX_STATUS, + "calling tpa_stop on queue %d\n", + queue); + + bnx2x_tpa_stop(bp, fp, queue, + &cqe->end_agg_cqe, + comp_ring_cons); +#ifdef BNX2X_STOP_ON_ERROR + if (bp->panic) + return 0; +#endif + + bnx2x_update_sge_prod(fp, cqe_fp); + goto next_cqe; + } + } + /* non TPA */ + len = le16_to_cpu(cqe_fp->pkt_len); + pad = cqe_fp->placement_offset; + dma_sync_single_for_cpu(&bp->pdev->dev, + dma_unmap_addr(rx_buf, mapping), + pad + RX_COPY_THRESH, + DMA_FROM_DEVICE); + prefetch(((char *)(skb)) + L1_CACHE_BYTES); + + /* is this an error packet? */ + if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { + DP(NETIF_MSG_RX_ERR, + "ERROR flags %x rx packet %u\n", + cqe_fp_flags, sw_comp_cons); + fp->eth_q_stats.rx_err_discard_pkt++; + goto reuse_rx; + } + + /* Since we don't have a jumbo ring + * copy small packets if mtu > 1500 + */ + if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) && + (len <= RX_COPY_THRESH)) { + struct sk_buff *new_skb; + + new_skb = netdev_alloc_skb(bp->dev, len + pad); + if (new_skb == NULL) { + DP(NETIF_MSG_RX_ERR, + "ERROR packet dropped " + "because of alloc failure\n"); + fp->eth_q_stats.rx_skb_alloc_failed++; + goto reuse_rx; + } + + /* aligned copy */ + skb_copy_from_linear_data_offset(skb, pad, + new_skb->data + pad, len); + skb_reserve(new_skb, pad); + skb_put(new_skb, len); + + bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod); + + skb = new_skb; + + } else + if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) { + dma_unmap_single(&bp->pdev->dev, + dma_unmap_addr(rx_buf, mapping), + fp->rx_buf_size, + DMA_FROM_DEVICE); + skb_reserve(skb, pad); + skb_put(skb, len); + + } else { + DP(NETIF_MSG_RX_ERR, + "ERROR packet dropped because " + "of alloc failure\n"); + fp->eth_q_stats.rx_skb_alloc_failed++; +reuse_rx: + bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod); + goto next_rx; + } + + skb->protocol = eth_type_trans(skb, bp->dev); + + /* Set Toeplitz hash for a none-LRO skb */ + bnx2x_set_skb_rxhash(bp, cqe, skb); + + skb_checksum_none_assert(skb); + + if (bp->dev->features & NETIF_F_RXCSUM) { + + if (likely(BNX2X_RX_CSUM_OK(cqe))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + fp->eth_q_stats.hw_csum_err++; + } + } + + skb_record_rx_queue(skb, fp->index); + + if (le16_to_cpu(cqe_fp->pars_flags.flags) & + PARSING_FLAGS_VLAN) + __vlan_hwaccel_put_tag(skb, + le16_to_cpu(cqe_fp->vlan_tag)); + napi_gro_receive(&fp->napi, skb); + + +next_rx: + rx_buf->skb = NULL; + + bd_cons = NEXT_RX_IDX(bd_cons); + bd_prod = NEXT_RX_IDX(bd_prod); + bd_prod_fw = NEXT_RX_IDX(bd_prod_fw); + rx_pkt++; +next_cqe: + sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod); + sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons); + + if (rx_pkt == budget) + break; + } /* while */ + + fp->rx_bd_cons = bd_cons; + fp->rx_bd_prod = bd_prod_fw; + fp->rx_comp_cons = sw_comp_cons; + fp->rx_comp_prod = sw_comp_prod; + + /* Update producers */ + bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod, + fp->rx_sge_prod); + + fp->rx_pkt += rx_pkt; + fp->rx_calls++; + + return rx_pkt; +} + +static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) +{ + struct bnx2x_fastpath *fp = fp_cookie; + struct bnx2x *bp = fp->bp; + u8 cos; + + DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB " + "[fp %d fw_sd %d igusb %d]\n", + fp->index, fp->fw_sb_id, fp->igu_sb_id); + bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return IRQ_HANDLED; +#endif + + /* Handle Rx and Tx according to MSI-X vector */ + prefetch(fp->rx_cons_sb); + + for_each_cos_in_tx_queue(fp, cos) + prefetch(fp->txdata[cos].tx_cons_sb); + + prefetch(&fp->sb_running_index[SM_RX_ID]); + napi_schedule(&bnx2x_fp(bp, fp->index, napi)); + + return IRQ_HANDLED; +} + +/* HW Lock for shared dual port PHYs */ +void bnx2x_acquire_phy_lock(struct bnx2x *bp) +{ + mutex_lock(&bp->port.phy_mutex); + + if (bp->port.need_hw_lock) + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); +} + +void bnx2x_release_phy_lock(struct bnx2x *bp) +{ + if (bp->port.need_hw_lock) + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); + + mutex_unlock(&bp->port.phy_mutex); +} + +/* calculates MF speed according to current linespeed and MF configuration */ +u16 bnx2x_get_mf_speed(struct bnx2x *bp) +{ + u16 line_speed = bp->link_vars.line_speed; + if (IS_MF(bp)) { + u16 maxCfg = bnx2x_extract_max_cfg(bp, + bp->mf_config[BP_VN(bp)]); + + /* Calculate the current MAX line speed limit for the MF + * devices + */ + if (IS_MF_SI(bp)) + line_speed = (line_speed * maxCfg) / 100; + else { /* SD mode */ + u16 vn_max_rate = maxCfg * 100; + + if (vn_max_rate < line_speed) + line_speed = vn_max_rate; + } + } + + return line_speed; +} + +/** + * bnx2x_fill_report_data - fill link report data to report + * + * @bp: driver handle + * @data: link state to update + * + * It uses a none-atomic bit operations because is called under the mutex. + */ +static inline void bnx2x_fill_report_data(struct bnx2x *bp, + struct bnx2x_link_report_data *data) +{ + u16 line_speed = bnx2x_get_mf_speed(bp); + + memset(data, 0, sizeof(*data)); + + /* Fill the report data: efective line speed */ + data->line_speed = line_speed; + + /* Link is down */ + if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS)) + __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, + &data->link_report_flags); + + /* Full DUPLEX */ + if (bp->link_vars.duplex == DUPLEX_FULL) + __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags); + + /* Rx Flow Control is ON */ + if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) + __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags); + + /* Tx Flow Control is ON */ + if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) + __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags); +} + +/** + * bnx2x_link_report - report link status to OS. + * + * @bp: driver handle + * + * Calls the __bnx2x_link_report() under the same locking scheme + * as a link/PHY state managing code to ensure a consistent link + * reporting. + */ + +void bnx2x_link_report(struct bnx2x *bp) +{ + bnx2x_acquire_phy_lock(bp); + __bnx2x_link_report(bp); + bnx2x_release_phy_lock(bp); +} + +/** + * __bnx2x_link_report - report link status to OS. + * + * @bp: driver handle + * + * None atomic inmlementation. + * Should be called under the phy_lock. + */ +void __bnx2x_link_report(struct bnx2x *bp) +{ + struct bnx2x_link_report_data cur_data; + + /* reread mf_cfg */ + if (!CHIP_IS_E1(bp)) + bnx2x_read_mf_cfg(bp); + + /* Read the current link report info */ + bnx2x_fill_report_data(bp, &cur_data); + + /* Don't report link down or exactly the same link status twice */ + if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) || + (test_bit(BNX2X_LINK_REPORT_LINK_DOWN, + &bp->last_reported_link.link_report_flags) && + test_bit(BNX2X_LINK_REPORT_LINK_DOWN, + &cur_data.link_report_flags))) + return; + + bp->link_cnt++; + + /* We are going to report a new link parameters now - + * remember the current data for the next time. + */ + memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data)); + + if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN, + &cur_data.link_report_flags)) { + netif_carrier_off(bp->dev); + netdev_err(bp->dev, "NIC Link is Down\n"); + return; + } else { + netif_carrier_on(bp->dev); + netdev_info(bp->dev, "NIC Link is Up, "); + pr_cont("%d Mbps ", cur_data.line_speed); + + if (test_and_clear_bit(BNX2X_LINK_REPORT_FD, + &cur_data.link_report_flags)) + pr_cont("full duplex"); + else + pr_cont("half duplex"); + + /* Handle the FC at the end so that only these flags would be + * possibly set. This way we may easily check if there is no FC + * enabled. + */ + if (cur_data.link_report_flags) { + if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON, + &cur_data.link_report_flags)) { + pr_cont(", receive "); + if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON, + &cur_data.link_report_flags)) + pr_cont("& transmit "); + } else { + pr_cont(", transmit "); + } + pr_cont("flow control ON"); + } + pr_cont("\n"); + } +} + +void bnx2x_init_rx_rings(struct bnx2x *bp) +{ + int func = BP_FUNC(bp); + int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : + ETH_MAX_AGGREGATION_QUEUES_E1H_E2; + u16 ring_prod; + int i, j; + + /* Allocate TPA resources */ + for_each_rx_queue(bp, j) { + struct bnx2x_fastpath *fp = &bp->fp[j]; + + DP(NETIF_MSG_IFUP, + "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); + + if (!fp->disable_tpa) { + /* Fill the per-aggregtion pool */ + for (i = 0; i < max_agg_queues; i++) { + struct bnx2x_agg_info *tpa_info = + &fp->tpa_info[i]; + struct sw_rx_bd *first_buf = + &tpa_info->first_buf; + + first_buf->skb = netdev_alloc_skb(bp->dev, + fp->rx_buf_size); + if (!first_buf->skb) { + BNX2X_ERR("Failed to allocate TPA " + "skb pool for queue[%d] - " + "disabling TPA on this " + "queue!\n", j); + bnx2x_free_tpa_pool(bp, fp, i); + fp->disable_tpa = 1; + break; + } + dma_unmap_addr_set(first_buf, mapping, 0); + tpa_info->tpa_state = BNX2X_TPA_STOP; + } + + /* "next page" elements initialization */ + bnx2x_set_next_page_sgl(fp); + + /* set SGEs bit mask */ + bnx2x_init_sge_ring_bit_mask(fp); + + /* Allocate SGEs and initialize the ring elements */ + for (i = 0, ring_prod = 0; + i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) { + + if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) { + BNX2X_ERR("was only able to allocate " + "%d rx sges\n", i); + BNX2X_ERR("disabling TPA for " + "queue[%d]\n", j); + /* Cleanup already allocated elements */ + bnx2x_free_rx_sge_range(bp, fp, + ring_prod); + bnx2x_free_tpa_pool(bp, fp, + max_agg_queues); + fp->disable_tpa = 1; + ring_prod = 0; + break; + } + ring_prod = NEXT_SGE_IDX(ring_prod); + } + + fp->rx_sge_prod = ring_prod; + } + } + + for_each_rx_queue(bp, j) { + struct bnx2x_fastpath *fp = &bp->fp[j]; + + fp->rx_bd_cons = 0; + + /* Activate BD ring */ + /* Warning! + * this will generate an interrupt (to the TSTORM) + * must only be done after chip is initialized + */ + bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, + fp->rx_sge_prod); + + if (j != 0) + continue; + + if (CHIP_IS_E1(bp)) { + REG_WR(bp, BAR_USTRORM_INTMEM + + USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func), + U64_LO(fp->rx_comp_mapping)); + REG_WR(bp, BAR_USTRORM_INTMEM + + USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4, + U64_HI(fp->rx_comp_mapping)); + } + } +} + +static void bnx2x_free_tx_skbs(struct bnx2x *bp) +{ + int i; + u8 cos; + + for_each_tx_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + for_each_cos_in_tx_queue(fp, cos) { + struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; + + u16 bd_cons = txdata->tx_bd_cons; + u16 sw_prod = txdata->tx_pkt_prod; + u16 sw_cons = txdata->tx_pkt_cons; + + while (sw_cons != sw_prod) { + bd_cons = bnx2x_free_tx_pkt(bp, txdata, + TX_BD(sw_cons)); + sw_cons++; + } + } + } +} + +static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp) +{ + struct bnx2x *bp = fp->bp; + int i; + + /* ring wasn't allocated */ + if (fp->rx_buf_ring == NULL) + return; + + for (i = 0; i < NUM_RX_BD; i++) { + struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i]; + struct sk_buff *skb = rx_buf->skb; + + if (skb == NULL) + continue; + dma_unmap_single(&bp->pdev->dev, + dma_unmap_addr(rx_buf, mapping), + fp->rx_buf_size, DMA_FROM_DEVICE); + + rx_buf->skb = NULL; + dev_kfree_skb(skb); + } +} + +static void bnx2x_free_rx_skbs(struct bnx2x *bp) +{ + int j; + + for_each_rx_queue(bp, j) { + struct bnx2x_fastpath *fp = &bp->fp[j]; + + bnx2x_free_rx_bds(fp); + + if (!fp->disable_tpa) + bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? + ETH_MAX_AGGREGATION_QUEUES_E1 : + ETH_MAX_AGGREGATION_QUEUES_E1H_E2); + } +} + +void bnx2x_free_skbs(struct bnx2x *bp) +{ + bnx2x_free_tx_skbs(bp); + bnx2x_free_rx_skbs(bp); +} + +void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value) +{ + /* load old values */ + u32 mf_cfg = bp->mf_config[BP_VN(bp)]; + + if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) { + /* leave all but MAX value */ + mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK; + + /* set new MAX value */ + mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT) + & FUNC_MF_CFG_MAX_BW_MASK; + + bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg); + } +} + +/** + * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors + * + * @bp: driver handle + * @nvecs: number of vectors to be released + */ +static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs) +{ + int i, offset = 0; + + if (nvecs == offset) + return; + free_irq(bp->msix_table[offset].vector, bp->dev); + DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", + bp->msix_table[offset].vector); + offset++; +#ifdef BCM_CNIC + if (nvecs == offset) + return; + offset++; +#endif + + for_each_eth_queue(bp, i) { + if (nvecs == offset) + return; + DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d " + "irq\n", i, bp->msix_table[offset].vector); + + free_irq(bp->msix_table[offset++].vector, &bp->fp[i]); + } +} + +void bnx2x_free_irq(struct bnx2x *bp) +{ + if (bp->flags & USING_MSIX_FLAG) + bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) + + CNIC_PRESENT + 1); + else if (bp->flags & USING_MSI_FLAG) + free_irq(bp->pdev->irq, bp->dev); + else + free_irq(bp->pdev->irq, bp->dev); +} + +int bnx2x_enable_msix(struct bnx2x *bp) +{ + int msix_vec = 0, i, rc, req_cnt; + + bp->msix_table[msix_vec].entry = msix_vec; + DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", + bp->msix_table[0].entry); + msix_vec++; + +#ifdef BCM_CNIC + bp->msix_table[msix_vec].entry = msix_vec; + DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n", + bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry); + msix_vec++; +#endif + /* We need separate vectors for ETH queues only (not FCoE) */ + for_each_eth_queue(bp, i) { + bp->msix_table[msix_vec].entry = msix_vec; + DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d " + "(fastpath #%u)\n", msix_vec, msix_vec, i); + msix_vec++; + } + + req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1; + + rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt); + + /* + * reconfigure number of tx/rx queues according to available + * MSI-X vectors + */ + if (rc >= BNX2X_MIN_MSIX_VEC_CNT) { + /* how less vectors we will have? */ + int diff = req_cnt - rc; + + DP(NETIF_MSG_IFUP, + "Trying to use less MSI-X vectors: %d\n", rc); + + rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc); + + if (rc) { + DP(NETIF_MSG_IFUP, + "MSI-X is not attainable rc %d\n", rc); + return rc; + } + /* + * decrease number of queues by number of unallocated entries + */ + bp->num_queues -= diff; + + DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n", + bp->num_queues); + } else if (rc) { + /* fall to INTx if not enough memory */ + if (rc == -ENOMEM) + bp->flags |= DISABLE_MSI_FLAG; + DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc); + return rc; + } + + bp->flags |= USING_MSIX_FLAG; + + return 0; +} + +static int bnx2x_req_msix_irqs(struct bnx2x *bp) +{ + int i, rc, offset = 0; + + rc = request_irq(bp->msix_table[offset++].vector, + bnx2x_msix_sp_int, 0, + bp->dev->name, bp->dev); + if (rc) { + BNX2X_ERR("request sp irq failed\n"); + return -EBUSY; + } + +#ifdef BCM_CNIC + offset++; +#endif + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", + bp->dev->name, i); + + rc = request_irq(bp->msix_table[offset].vector, + bnx2x_msix_fp_int, 0, fp->name, fp); + if (rc) { + BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i, + bp->msix_table[offset].vector, rc); + bnx2x_free_msix_irqs(bp, offset); + return -EBUSY; + } + + offset++; + } + + i = BNX2X_NUM_ETH_QUEUES(bp); + offset = 1 + CNIC_PRESENT; + netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d" + " ... fp[%d] %d\n", + bp->msix_table[0].vector, + 0, bp->msix_table[offset].vector, + i - 1, bp->msix_table[offset + i - 1].vector); + + return 0; +} + +int bnx2x_enable_msi(struct bnx2x *bp) +{ + int rc; + + rc = pci_enable_msi(bp->pdev); + if (rc) { + DP(NETIF_MSG_IFUP, "MSI is not attainable\n"); + return -1; + } + bp->flags |= USING_MSI_FLAG; + + return 0; +} + +static int bnx2x_req_irq(struct bnx2x *bp) +{ + unsigned long flags; + int rc; + + if (bp->flags & USING_MSI_FLAG) + flags = 0; + else + flags = IRQF_SHARED; + + rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags, + bp->dev->name, bp->dev); + return rc; +} + +static inline int bnx2x_setup_irqs(struct bnx2x *bp) +{ + int rc = 0; + if (bp->flags & USING_MSIX_FLAG) { + rc = bnx2x_req_msix_irqs(bp); + if (rc) + return rc; + } else { + bnx2x_ack_int(bp); + rc = bnx2x_req_irq(bp); + if (rc) { + BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc); + return rc; + } + if (bp->flags & USING_MSI_FLAG) { + bp->dev->irq = bp->pdev->irq; + netdev_info(bp->dev, "using MSI IRQ %d\n", + bp->pdev->irq); + } + } + + return 0; +} + +static inline void bnx2x_napi_enable(struct bnx2x *bp) +{ + int i; + + for_each_rx_queue(bp, i) + napi_enable(&bnx2x_fp(bp, i, napi)); +} + +static inline void bnx2x_napi_disable(struct bnx2x *bp) +{ + int i; + + for_each_rx_queue(bp, i) + napi_disable(&bnx2x_fp(bp, i, napi)); +} + +void bnx2x_netif_start(struct bnx2x *bp) +{ + if (netif_running(bp->dev)) { + bnx2x_napi_enable(bp); + bnx2x_int_enable(bp); + if (bp->state == BNX2X_STATE_OPEN) + netif_tx_wake_all_queues(bp->dev); + } +} + +void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) +{ + bnx2x_int_disable_sync(bp, disable_hw); + bnx2x_napi_disable(bp); +} + +u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) +{ + struct bnx2x *bp = netdev_priv(dev); +#ifdef BCM_CNIC + if (NO_FCOE(bp)) + return skb_tx_hash(dev, skb); + else { + struct ethhdr *hdr = (struct ethhdr *)skb->data; + u16 ether_type = ntohs(hdr->h_proto); + + /* Skip VLAN tag if present */ + if (ether_type == ETH_P_8021Q) { + struct vlan_ethhdr *vhdr = + (struct vlan_ethhdr *)skb->data; + + ether_type = ntohs(vhdr->h_vlan_encapsulated_proto); + } + + /* If ethertype is FCoE or FIP - use FCoE ring */ + if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP)) + return bnx2x_fcoe_tx(bp, txq_index); + } +#endif + /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring + */ + return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp)); +} + +void bnx2x_set_num_queues(struct bnx2x *bp) +{ + switch (bp->multi_mode) { + case ETH_RSS_MODE_DISABLED: + bp->num_queues = 1; + break; + case ETH_RSS_MODE_REGULAR: + bp->num_queues = bnx2x_calc_num_queues(bp); + break; + + default: + bp->num_queues = 1; + break; + } + + /* Add special queues */ + bp->num_queues += NON_ETH_CONTEXT_USE; +} + +static inline int bnx2x_set_real_num_queues(struct bnx2x *bp) +{ + int rc, tx, rx; + + tx = MAX_TXQS_PER_COS * bp->max_cos; + rx = BNX2X_NUM_ETH_QUEUES(bp); + +/* account for fcoe queue */ +#ifdef BCM_CNIC + if (!NO_FCOE(bp)) { + rx += FCOE_PRESENT; + tx += FCOE_PRESENT; + } +#endif + + rc = netif_set_real_num_tx_queues(bp->dev, tx); + if (rc) { + BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc); + return rc; + } + rc = netif_set_real_num_rx_queues(bp->dev, rx); + if (rc) { + BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc); + return rc; + } + + DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n", + tx, rx); + + return rc; +} + +static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp) +{ + int i; + + for_each_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + + /* Always use a mini-jumbo MTU for the FCoE L2 ring */ + if (IS_FCOE_IDX(i)) + /* + * Although there are no IP frames expected to arrive to + * this ring we still want to add an + * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer + * overrun attack. + */ + fp->rx_buf_size = + BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD + + BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING; + else + fp->rx_buf_size = + bp->dev->mtu + ETH_OVREHEAD + + BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING; + } +} + +static inline int bnx2x_init_rss_pf(struct bnx2x *bp) +{ + int i; + u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; + u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); + + /* + * Prepare the inital contents fo the indirection table if RSS is + * enabled + */ + if (bp->multi_mode != ETH_RSS_MODE_DISABLED) { + for (i = 0; i < sizeof(ind_table); i++) + ind_table[i] = + bp->fp->cl_id + (i % num_eth_queues); + } + + /* + * For 57710 and 57711 SEARCHER configuration (rss_keys) is + * per-port, so if explicit configuration is needed , do it only + * for a PMF. + * + * For 57712 and newer on the other hand it's a per-function + * configuration. + */ + return bnx2x_config_rss_pf(bp, ind_table, + bp->port.pmf || !CHIP_IS_E1x(bp)); +} + +int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash) +{ + struct bnx2x_config_rss_params params = {0}; + int i; + + /* Although RSS is meaningless when there is a single HW queue we + * still need it enabled in order to have HW Rx hash generated. + * + * if (!is_eth_multi(bp)) + * bp->multi_mode = ETH_RSS_MODE_DISABLED; + */ + + params.rss_obj = &bp->rss_conf_obj; + + __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); + + /* RSS mode */ + switch (bp->multi_mode) { + case ETH_RSS_MODE_DISABLED: + __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags); + break; + case ETH_RSS_MODE_REGULAR: + __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags); + break; + case ETH_RSS_MODE_VLAN_PRI: + __set_bit(BNX2X_RSS_MODE_VLAN_PRI, ¶ms.rss_flags); + break; + case ETH_RSS_MODE_E1HOV_PRI: + __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, ¶ms.rss_flags); + break; + case ETH_RSS_MODE_IP_DSCP: + __set_bit(BNX2X_RSS_MODE_IP_DSCP, ¶ms.rss_flags); + break; + default: + BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode); + return -EINVAL; + } + + /* If RSS is enabled */ + if (bp->multi_mode != ETH_RSS_MODE_DISABLED) { + /* RSS configuration */ + __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags); + __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags); + __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags); + __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags); + + /* Hash bits */ + params.rss_result_mask = MULTI_MASK; + + memcpy(params.ind_table, ind_table, sizeof(params.ind_table)); + + if (config_hash) { + /* RSS keys */ + for (i = 0; i < sizeof(params.rss_key) / 4; i++) + params.rss_key[i] = random32(); + + __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags); + } + } + + return bnx2x_config_rss(bp, ¶ms); +} + +static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) +{ + struct bnx2x_func_state_params func_params = {0}; + + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_HW_INIT; + + func_params.params.hw_init.load_phase = load_code; + + return bnx2x_func_state_change(bp, &func_params); +} + +/* + * Cleans the object that have internal lists without sending + * ramrods. Should be run when interrutps are disabled. + */ +static void bnx2x_squeeze_objects(struct bnx2x *bp) +{ + int rc; + unsigned long ramrod_flags = 0, vlan_mac_flags = 0; + struct bnx2x_mcast_ramrod_params rparam = {0}; + struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; + + /***************** Cleanup MACs' object first *************************/ + + /* Wait for completion of requested */ + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + /* Perform a dry cleanup */ + __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags); + + /* Clean ETH primary MAC */ + __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags); + rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags, + &ramrod_flags); + if (rc != 0) + BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc); + + /* Cleanup UC list */ + vlan_mac_flags = 0; + __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags); + rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, + &ramrod_flags); + if (rc != 0) + BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc); + + /***************** Now clean mcast object *****************************/ + rparam.mcast_obj = &bp->mcast_obj; + __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); + + /* Add a DEL command... */ + rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); + if (rc < 0) + BNX2X_ERR("Failed to add a new DEL command to a multi-cast " + "object: %d\n", rc); + + /* ...and wait until all pending commands are cleared */ + rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); + while (rc != 0) { + if (rc < 0) { + BNX2X_ERR("Failed to clean multi-cast object: %d\n", + rc); + return; + } + + rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); + } +} + +#ifndef BNX2X_STOP_ON_ERROR +#define LOAD_ERROR_EXIT(bp, label) \ + do { \ + (bp)->state = BNX2X_STATE_ERROR; \ + goto label; \ + } while (0) +#else +#define LOAD_ERROR_EXIT(bp, label) \ + do { \ + (bp)->state = BNX2X_STATE_ERROR; \ + (bp)->panic = 1; \ + return -EBUSY; \ + } while (0) +#endif + +/* must be called with rtnl_lock */ +int bnx2x_nic_load(struct bnx2x *bp, int load_mode) +{ + int port = BP_PORT(bp); + u32 load_code; + int i, rc; + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return -EPERM; +#endif + + bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; + + /* Set the initial link reported state to link down */ + bnx2x_acquire_phy_lock(bp); + memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link)); + __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, + &bp->last_reported_link.link_report_flags); + bnx2x_release_phy_lock(bp); + + /* must be called before memory allocation and HW init */ + bnx2x_ilt_set_info(bp); + + /* + * Zero fastpath structures preserving invariants like napi, which are + * allocated only once, fp index, max_cos, bp pointer. + * Also set fp->disable_tpa. + */ + for_each_queue(bp, i) + bnx2x_bz_fp(bp, i); + + + /* Set the receive queues buffer size */ + bnx2x_set_rx_buf_size(bp); + + if (bnx2x_alloc_mem(bp)) + return -ENOMEM; + + /* As long as bnx2x_alloc_mem() may possibly update + * bp->num_queues, bnx2x_set_real_num_queues() should always + * come after it. + */ + rc = bnx2x_set_real_num_queues(bp); + if (rc) { + BNX2X_ERR("Unable to set real_num_queues\n"); + LOAD_ERROR_EXIT(bp, load_error0); + } + + /* configure multi cos mappings in kernel. + * this configuration may be overriden by a multi class queue discipline + * or by a dcbx negotiation result. + */ + bnx2x_setup_tc(bp->dev, bp->max_cos); + + bnx2x_napi_enable(bp); + + /* Send LOAD_REQUEST command to MCP + * Returns the type of LOAD command: + * if it is the first port to be initialized + * common blocks should be initialized, otherwise - not + */ + if (!BP_NOMCP(bp)) { + load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0); + if (!load_code) { + BNX2X_ERR("MCP response failure, aborting\n"); + rc = -EBUSY; + LOAD_ERROR_EXIT(bp, load_error1); + } + if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) { + rc = -EBUSY; /* other port in diagnostic mode */ + LOAD_ERROR_EXIT(bp, load_error1); + } + + } else { + int path = BP_PATH(bp); + + DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n", + path, load_count[path][0], load_count[path][1], + load_count[path][2]); + load_count[path][0]++; + load_count[path][1 + port]++; + DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n", + path, load_count[path][0], load_count[path][1], + load_count[path][2]); + if (load_count[path][0] == 1) + load_code = FW_MSG_CODE_DRV_LOAD_COMMON; + else if (load_count[path][1 + port] == 1) + load_code = FW_MSG_CODE_DRV_LOAD_PORT; + else + load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION; + } + + if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || + (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || + (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { + bp->port.pmf = 1; + /* + * We need the barrier to ensure the ordering between the + * writing to bp->port.pmf here and reading it from the + * bnx2x_periodic_task(). + */ + smp_mb(); + queue_delayed_work(bnx2x_wq, &bp->period_task, 0); + } else + bp->port.pmf = 0; + + DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); + + /* Init Function state controlling object */ + bnx2x__init_func_obj(bp); + + /* Initialize HW */ + rc = bnx2x_init_hw(bp, load_code); + if (rc) { + BNX2X_ERR("HW init failed, aborting\n"); + bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); + LOAD_ERROR_EXIT(bp, load_error2); + } + + /* Connect to IRQs */ + rc = bnx2x_setup_irqs(bp); + if (rc) { + bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); + LOAD_ERROR_EXIT(bp, load_error2); + } + + /* Setup NIC internals and enable interrupts */ + bnx2x_nic_init(bp, load_code); + + /* Init per-function objects */ + bnx2x_init_bp_objs(bp); + + if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || + (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) && + (bp->common.shmem2_base)) { + if (SHMEM2_HAS(bp, dcc_support)) + SHMEM2_WR(bp, dcc_support, + (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | + SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV)); + } + + bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; + rc = bnx2x_func_start(bp); + if (rc) { + BNX2X_ERR("Function start failed!\n"); + bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); + LOAD_ERROR_EXIT(bp, load_error3); + } + + /* Send LOAD_DONE command to MCP */ + if (!BP_NOMCP(bp)) { + load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); + if (!load_code) { + BNX2X_ERR("MCP response failure, aborting\n"); + rc = -EBUSY; + LOAD_ERROR_EXIT(bp, load_error3); + } + } + + rc = bnx2x_setup_leading(bp); + if (rc) { + BNX2X_ERR("Setup leading failed!\n"); + LOAD_ERROR_EXIT(bp, load_error3); + } + +#ifdef BCM_CNIC + /* Enable Timer scan */ + REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1); +#endif + + for_each_nondefault_queue(bp, i) { + rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); + if (rc) + LOAD_ERROR_EXIT(bp, load_error4); + } + + rc = bnx2x_init_rss_pf(bp); + if (rc) + LOAD_ERROR_EXIT(bp, load_error4); + + /* Now when Clients are configured we are ready to work */ + bp->state = BNX2X_STATE_OPEN; + + /* Configure a ucast MAC */ + rc = bnx2x_set_eth_mac(bp, true); + if (rc) + LOAD_ERROR_EXIT(bp, load_error4); + + if (bp->pending_max) { + bnx2x_update_max_mf_config(bp, bp->pending_max); + bp->pending_max = 0; + } + + if (bp->port.pmf) + bnx2x_initial_phy_init(bp, load_mode); + + /* Start fast path */ + + /* Initialize Rx filter. */ + netif_addr_lock_bh(bp->dev); + bnx2x_set_rx_mode(bp->dev); + netif_addr_unlock_bh(bp->dev); + + /* Start the Tx */ + switch (load_mode) { + case LOAD_NORMAL: + /* Tx queue should be only reenabled */ + netif_tx_wake_all_queues(bp->dev); + break; + + case LOAD_OPEN: + netif_tx_start_all_queues(bp->dev); + smp_mb__after_clear_bit(); + break; + + case LOAD_DIAG: + bp->state = BNX2X_STATE_DIAG; + break; + + default: + break; + } + + if (!bp->port.pmf) + bnx2x__link_status_update(bp); + + /* start the timer */ + mod_timer(&bp->timer, jiffies + bp->current_interval); + +#ifdef BCM_CNIC + bnx2x_setup_cnic_irq_info(bp); + if (bp->state == BNX2X_STATE_OPEN) + bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); +#endif + bnx2x_inc_load_cnt(bp); + + /* Wait for all pending SP commands to complete */ + if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) { + BNX2X_ERR("Timeout waiting for SP elements to complete\n"); + bnx2x_nic_unload(bp, UNLOAD_CLOSE); + return -EBUSY; + } + + bnx2x_dcbx_init(bp); + return 0; + +#ifndef BNX2X_STOP_ON_ERROR +load_error4: +#ifdef BCM_CNIC + /* Disable Timer scan */ + REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); +#endif +load_error3: + bnx2x_int_disable_sync(bp, 1); + + /* Clean queueable objects */ + bnx2x_squeeze_objects(bp); + + /* Free SKBs, SGEs, TPA pool and driver internals */ + bnx2x_free_skbs(bp); + for_each_rx_queue(bp, i) + bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); + + /* Release IRQs */ + bnx2x_free_irq(bp); +load_error2: + if (!BP_NOMCP(bp)) { + bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); + bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); + } + + bp->port.pmf = 0; +load_error1: + bnx2x_napi_disable(bp); +load_error0: + bnx2x_free_mem(bp); + + return rc; +#endif /* ! BNX2X_STOP_ON_ERROR */ +} + +/* must be called with rtnl_lock */ +int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) +{ + int i; + bool global = false; + + if ((bp->state == BNX2X_STATE_CLOSED) || + (bp->state == BNX2X_STATE_ERROR)) { + /* We can get here if the driver has been unloaded + * during parity error recovery and is either waiting for a + * leader to complete or for other functions to unload and + * then ifdown has been issued. In this case we want to + * unload and let other functions to complete a recovery + * process. + */ + bp->recovery_state = BNX2X_RECOVERY_DONE; + bp->is_leader = 0; + bnx2x_release_leader_lock(bp); + smp_mb(); + + DP(NETIF_MSG_HW, "Releasing a leadership...\n"); + + return -EINVAL; + } + + /* + * It's important to set the bp->state to the value different from + * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int() + * may restart the Tx from the NAPI context (see bnx2x_tx_int()). + */ + bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; + smp_mb(); + + /* Stop Tx */ + bnx2x_tx_disable(bp); + +#ifdef BCM_CNIC + bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); +#endif + + bp->rx_mode = BNX2X_RX_MODE_NONE; + + del_timer_sync(&bp->timer); + + /* Set ALWAYS_ALIVE bit in shmem */ + bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; + + bnx2x_drv_pulse(bp); + + bnx2x_stats_handle(bp, STATS_EVENT_STOP); + + /* Cleanup the chip if needed */ + if (unload_mode != UNLOAD_RECOVERY) + bnx2x_chip_cleanup(bp, unload_mode); + else { + /* Send the UNLOAD_REQUEST to the MCP */ + bnx2x_send_unload_req(bp, unload_mode); + + /* + * Prevent transactions to host from the functions on the + * engine that doesn't reset global blocks in case of global + * attention once gloabl blocks are reset and gates are opened + * (the engine which leader will perform the recovery + * last). + */ + if (!CHIP_IS_E1x(bp)) + bnx2x_pf_disable(bp); + + /* Disable HW interrupts, NAPI */ + bnx2x_netif_stop(bp, 1); + + /* Release IRQs */ + bnx2x_free_irq(bp); + + /* Report UNLOAD_DONE to MCP */ + bnx2x_send_unload_done(bp); + } + + /* + * At this stage no more interrupts will arrive so we may safly clean + * the queueable objects here in case they failed to get cleaned so far. + */ + bnx2x_squeeze_objects(bp); + + /* There should be no more pending SP commands at this stage */ + bp->sp_state = 0; + + bp->port.pmf = 0; + + /* Free SKBs, SGEs, TPA pool and driver internals */ + bnx2x_free_skbs(bp); + for_each_rx_queue(bp, i) + bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); + + bnx2x_free_mem(bp); + + bp->state = BNX2X_STATE_CLOSED; + + /* Check if there are pending parity attentions. If there are - set + * RECOVERY_IN_PROGRESS. + */ + if (bnx2x_chk_parity_attn(bp, &global, false)) { + bnx2x_set_reset_in_progress(bp); + + /* Set RESET_IS_GLOBAL if needed */ + if (global) + bnx2x_set_reset_global(bp); + } + + + /* The last driver must disable a "close the gate" if there is no + * parity attention or "process kill" pending. + */ + if (!bnx2x_dec_load_cnt(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp))) + bnx2x_disable_close_the_gate(bp); + + return 0; +} + +int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) +{ + u16 pmcsr; + + /* If there is no power capability, silently succeed */ + if (!bp->pm_cap) { + DP(NETIF_MSG_HW, "No power capability. Breaking.\n"); + return 0; + } + + pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr); + + switch (state) { + case PCI_D0: + pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, + ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) | + PCI_PM_CTRL_PME_STATUS)); + + if (pmcsr & PCI_PM_CTRL_STATE_MASK) + /* delay required during transition out of D3hot */ + msleep(20); + break; + + case PCI_D3hot: + /* If there are other clients above don't + shut down the power */ + if (atomic_read(&bp->pdev->enable_cnt) != 1) + return 0; + /* Don't shut down the power for emulation and FPGA */ + if (CHIP_REV_IS_SLOW(bp)) + return 0; + + pmcsr &= ~PCI_PM_CTRL_STATE_MASK; + pmcsr |= 3; + + if (bp->wol) + pmcsr |= PCI_PM_CTRL_PME_ENABLE; + + pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, + pmcsr); + + /* No more memory access after this point until + * device is brought back to D0. + */ + break; + + default: + return -EINVAL; + } + return 0; +} + +/* + * net_device service functions + */ +int bnx2x_poll(struct napi_struct *napi, int budget) +{ + int work_done = 0; + u8 cos; + struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, + napi); + struct bnx2x *bp = fp->bp; + + while (1) { +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) { + napi_complete(napi); + return 0; + } +#endif + + for_each_cos_in_tx_queue(fp, cos) + if (bnx2x_tx_queue_has_work(&fp->txdata[cos])) + bnx2x_tx_int(bp, &fp->txdata[cos]); + + + if (bnx2x_has_rx_work(fp)) { + work_done += bnx2x_rx_int(fp, budget - work_done); + + /* must not complete if we consumed full budget */ + if (work_done >= budget) + break; + } + + /* Fall out from the NAPI loop if needed */ + if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { +#ifdef BCM_CNIC + /* No need to update SB for FCoE L2 ring as long as + * it's connected to the default SB and the SB + * has been updated when NAPI was scheduled. + */ + if (IS_FCOE_FP(fp)) { + napi_complete(napi); + break; + } +#endif + + bnx2x_update_fpsb_idx(fp); + /* bnx2x_has_rx_work() reads the status block, + * thus we need to ensure that status block indices + * have been actually read (bnx2x_update_fpsb_idx) + * prior to this check (bnx2x_has_rx_work) so that + * we won't write the "newer" value of the status block + * to IGU (if there was a DMA right after + * bnx2x_has_rx_work and if there is no rmb, the memory + * reading (bnx2x_update_fpsb_idx) may be postponed + * to right before bnx2x_ack_sb). In this case there + * will never be another interrupt until there is + * another update of the status block, while there + * is still unhandled work. + */ + rmb(); + + if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { + napi_complete(napi); + /* Re-enable interrupts */ + DP(NETIF_MSG_HW, + "Update index to %d\n", fp->fp_hc_idx); + bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, + le16_to_cpu(fp->fp_hc_idx), + IGU_INT_ENABLE, 1); + break; + } + } + } + + return work_done; +} + +/* we split the first BD into headers and data BDs + * to ease the pain of our fellow microcode engineers + * we use one mapping for both BDs + * So far this has only been observed to happen + * in Other Operating Systems(TM) + */ +static noinline u16 bnx2x_tx_split(struct bnx2x *bp, + struct bnx2x_fp_txdata *txdata, + struct sw_tx_bd *tx_buf, + struct eth_tx_start_bd **tx_bd, u16 hlen, + u16 bd_prod, int nbd) +{ + struct eth_tx_start_bd *h_tx_bd = *tx_bd; + struct eth_tx_bd *d_tx_bd; + dma_addr_t mapping; + int old_len = le16_to_cpu(h_tx_bd->nbytes); + + /* first fix first BD */ + h_tx_bd->nbd = cpu_to_le16(nbd); + h_tx_bd->nbytes = cpu_to_le16(hlen); + + DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d " + "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi, + h_tx_bd->addr_lo, h_tx_bd->nbd); + + /* now get a new data BD + * (after the pbd) and fill it */ + bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); + d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; + + mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi), + le32_to_cpu(h_tx_bd->addr_lo)) + hlen; + + d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); + d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); + d_tx_bd->nbytes = cpu_to_le16(old_len - hlen); + + /* this marks the BD as one that has no individual mapping */ + tx_buf->flags |= BNX2X_TSO_SPLIT_BD; + + DP(NETIF_MSG_TX_QUEUED, + "TSO split data size is %d (%x:%x)\n", + d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo); + + /* update tx_bd */ + *tx_bd = (struct eth_tx_start_bd *)d_tx_bd; + + return bd_prod; +} + +static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix) +{ + if (fix > 0) + csum = (u16) ~csum_fold(csum_sub(csum, + csum_partial(t_header - fix, fix, 0))); + + else if (fix < 0) + csum = (u16) ~csum_fold(csum_add(csum, + csum_partial(t_header, -fix, 0))); + + return swab16(csum); +} + +static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) +{ + u32 rc; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + rc = XMIT_PLAIN; + + else { + if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) { + rc = XMIT_CSUM_V6; + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + rc |= XMIT_CSUM_TCP; + + } else { + rc = XMIT_CSUM_V4; + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + rc |= XMIT_CSUM_TCP; + } + } + + if (skb_is_gso_v6(skb)) + rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6; + else if (skb_is_gso(skb)) + rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP; + + return rc; +} + +#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) +/* check if packet requires linearization (packet is too fragmented) + no need to check fragmentation if page size > 8K (there will be no + violation to FW restrictions) */ +static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, + u32 xmit_type) +{ + int to_copy = 0; + int hlen = 0; + int first_bd_sz = 0; + + /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */ + if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) { + + if (xmit_type & XMIT_GSO) { + unsigned short lso_mss = skb_shinfo(skb)->gso_size; + /* Check if LSO packet needs to be copied: + 3 = 1 (for headers BD) + 2 (for PBD and last BD) */ + int wnd_size = MAX_FETCH_BD - 3; + /* Number of windows to check */ + int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size; + int wnd_idx = 0; + int frag_idx = 0; + u32 wnd_sum = 0; + + /* Headers length */ + hlen = (int)(skb_transport_header(skb) - skb->data) + + tcp_hdrlen(skb); + + /* Amount of data (w/o headers) on linear part of SKB*/ + first_bd_sz = skb_headlen(skb) - hlen; + + wnd_sum = first_bd_sz; + + /* Calculate the first sum - it's special */ + for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++) + wnd_sum += + skb_shinfo(skb)->frags[frag_idx].size; + + /* If there was data on linear skb data - check it */ + if (first_bd_sz > 0) { + if (unlikely(wnd_sum < lso_mss)) { + to_copy = 1; + goto exit_lbl; + } + + wnd_sum -= first_bd_sz; + } + + /* Others are easier: run through the frag list and + check all windows */ + for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) { + wnd_sum += + skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size; + + if (unlikely(wnd_sum < lso_mss)) { + to_copy = 1; + break; + } + wnd_sum -= + skb_shinfo(skb)->frags[wnd_idx].size; + } + } else { + /* in non-LSO too fragmented packet should always + be linearized */ + to_copy = 1; + } + } + +exit_lbl: + if (unlikely(to_copy)) + DP(NETIF_MSG_TX_QUEUED, + "Linearization IS REQUIRED for %s packet. " + "num_frags %d hlen %d first_bd_sz %d\n", + (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO", + skb_shinfo(skb)->nr_frags, hlen, first_bd_sz); + + return to_copy; +} +#endif + +static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data, + u32 xmit_type) +{ + *parsing_data |= (skb_shinfo(skb)->gso_size << + ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & + ETH_TX_PARSE_BD_E2_LSO_MSS; + if ((xmit_type & XMIT_GSO_V6) && + (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) + *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; +} + +/** + * bnx2x_set_pbd_gso - update PBD in GSO case. + * + * @skb: packet skb + * @pbd: parse BD + * @xmit_type: xmit flags + */ +static inline void bnx2x_set_pbd_gso(struct sk_buff *skb, + struct eth_tx_parse_bd_e1x *pbd, + u32 xmit_type) +{ + pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); + pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq); + pbd->tcp_flags = pbd_tcp_flags(skb); + + if (xmit_type & XMIT_GSO_V4) { + pbd->ip_id = swab16(ip_hdr(skb)->id); + pbd->tcp_pseudo_csum = + swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0)); + + } else + pbd->tcp_pseudo_csum = + swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0)); + + pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN; +} + +/** + * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length + * + * @bp: driver handle + * @skb: packet skb + * @parsing_data: data to be updated + * @xmit_type: xmit flags + * + * 57712 related + */ +static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, + u32 *parsing_data, u32 xmit_type) +{ + *parsing_data |= + ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) << + ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) & + ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W; + + if (xmit_type & XMIT_CSUM_TCP) { + *parsing_data |= ((tcp_hdrlen(skb) / 4) << + ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & + ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW; + + return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data; + } else + /* We support checksum offload for TCP and UDP only. + * No need to pass the UDP header length - it's a constant. + */ + return skb_transport_header(skb) + + sizeof(struct udphdr) - skb->data; +} + +static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, + struct eth_tx_start_bd *tx_start_bd, u32 xmit_type) +{ + tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; + + if (xmit_type & XMIT_CSUM_V4) + tx_start_bd->bd_flags.as_bitfield |= + ETH_TX_BD_FLAGS_IP_CSUM; + else + tx_start_bd->bd_flags.as_bitfield |= + ETH_TX_BD_FLAGS_IPV6; + + if (!(xmit_type & XMIT_CSUM_TCP)) + tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP; +} + +/** + * bnx2x_set_pbd_csum - update PBD with checksum and return header length + * + * @bp: driver handle + * @skb: packet skb + * @pbd: parse BD to be updated + * @xmit_type: xmit flags + */ +static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, + struct eth_tx_parse_bd_e1x *pbd, + u32 xmit_type) +{ + u8 hlen = (skb_network_header(skb) - skb->data) >> 1; + + /* for now NS flag is not used in Linux */ + pbd->global_data = + (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << + ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); + + pbd->ip_hlen_w = (skb_transport_header(skb) - + skb_network_header(skb)) >> 1; + + hlen += pbd->ip_hlen_w; + + /* We support checksum offload for TCP and UDP only */ + if (xmit_type & XMIT_CSUM_TCP) + hlen += tcp_hdrlen(skb) / 2; + else + hlen += sizeof(struct udphdr) / 2; + + pbd->total_hlen_w = cpu_to_le16(hlen); + hlen = hlen*2; + + if (xmit_type & XMIT_CSUM_TCP) { + pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check); + + } else { + s8 fix = SKB_CS_OFF(skb); /* signed! */ + + DP(NETIF_MSG_TX_QUEUED, + "hlen %d fix %d csum before fix %x\n", + le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb)); + + /* HW bug: fixup the CSUM */ + pbd->tcp_pseudo_csum = + bnx2x_csum_fix(skb_transport_header(skb), + SKB_CS(skb), fix); + + DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n", + pbd->tcp_pseudo_csum); + } + + return hlen; +} + +/* called with netif_tx_lock + * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call + * netif_wake_queue() + */ +netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + + struct bnx2x_fastpath *fp; + struct netdev_queue *txq; + struct bnx2x_fp_txdata *txdata; + struct sw_tx_bd *tx_buf; + struct eth_tx_start_bd *tx_start_bd, *first_bd; + struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; + struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; + struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; + u32 pbd_e2_parsing_data = 0; + u16 pkt_prod, bd_prod; + int nbd, txq_index, fp_index, txdata_index; + dma_addr_t mapping; + u32 xmit_type = bnx2x_xmit_type(bp, skb); + int i; + u8 hlen = 0; + __le16 pkt_size = 0; + struct ethhdr *eth; + u8 mac_type = UNICAST_ADDRESS; + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return NETDEV_TX_BUSY; +#endif + + txq_index = skb_get_queue_mapping(skb); + txq = netdev_get_tx_queue(dev, txq_index); + + BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT); + + /* decode the fastpath index and the cos index from the txq */ + fp_index = TXQ_TO_FP(txq_index); + txdata_index = TXQ_TO_COS(txq_index); + +#ifdef BCM_CNIC + /* + * Override the above for the FCoE queue: + * - FCoE fp entry is right after the ETH entries. + * - FCoE L2 queue uses bp->txdata[0] only. + */ + if (unlikely(!NO_FCOE(bp) && (txq_index == + bnx2x_fcoe_tx(bp, txq_index)))) { + fp_index = FCOE_IDX; + txdata_index = 0; + } +#endif + + /* enable this debug print to view the transmission queue being used + DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d", + txq_index, fp_index, txdata_index); */ + + /* locate the fastpath and the txdata */ + fp = &bp->fp[fp_index]; + txdata = &fp->txdata[txdata_index]; + + /* enable this debug print to view the tranmission details + DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d" + " tx_data ptr %p fp pointer %p", + txdata->cid, fp_index, txdata_index, txdata, fp); */ + + if (unlikely(bnx2x_tx_avail(bp, txdata) < + (skb_shinfo(skb)->nr_frags + 3))) { + fp->eth_q_stats.driver_xoff++; + netif_tx_stop_queue(txq); + BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); + return NETDEV_TX_BUSY; + } + + DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x " + "protocol(%x,%x) gso type %x xmit_type %x\n", + txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, + ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); + + eth = (struct ethhdr *)skb->data; + + /* set flag according to packet type (UNICAST_ADDRESS is default)*/ + if (unlikely(is_multicast_ether_addr(eth->h_dest))) { + if (is_broadcast_ether_addr(eth->h_dest)) + mac_type = BROADCAST_ADDRESS; + else + mac_type = MULTICAST_ADDRESS; + } + +#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) + /* First, check if we need to linearize the skb (due to FW + restrictions). No need to check fragmentation if page size > 8K + (there will be no violation to FW restrictions) */ + if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { + /* Statistics of linearization */ + bp->lin_cnt++; + if (skb_linearize(skb) != 0) { + DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - " + "silently dropping this SKB\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + } +#endif + /* Map skb linear data for DMA */ + mapping = dma_map_single(&bp->pdev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - " + "silently dropping this SKB\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + /* + Please read carefully. First we use one BD which we mark as start, + then we have a parsing info BD (used for TSO or xsum), + and only then we have the rest of the TSO BDs. + (don't forget to mark the last one as last, + and to unmap only AFTER you write to the BD ...) + And above all, all pdb sizes are in words - NOT DWORDS! + */ + + /* get current pkt produced now - advance it just before sending packet + * since mapping of pages may fail and cause packet to be dropped + */ + pkt_prod = txdata->tx_pkt_prod; + bd_prod = TX_BD(txdata->tx_bd_prod); + + /* get a tx_buf and first BD + * tx_start_bd may be changed during SPLIT, + * but first_bd will always stay first + */ + tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)]; + tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd; + first_bd = tx_start_bd; + + tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; + SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE, + mac_type); + + /* header nbd */ + SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1); + + /* remember the first BD of the packet */ + tx_buf->first_bd = txdata->tx_bd_prod; + tx_buf->skb = skb; + tx_buf->flags = 0; + + DP(NETIF_MSG_TX_QUEUED, + "sending pkt %u @%p next_idx %u bd %u @%p\n", + pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd); + + if (vlan_tx_tag_present(skb)) { + tx_start_bd->vlan_or_ethertype = + cpu_to_le16(vlan_tx_tag_get(skb)); + tx_start_bd->bd_flags.as_bitfield |= + (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); + } else + tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); + + /* turn on parsing and get a BD */ + bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); + + if (xmit_type & XMIT_CSUM) + bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type); + + if (!CHIP_IS_E1x(bp)) { + pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2; + memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); + /* Set PBD in checksum offload case */ + if (xmit_type & XMIT_CSUM) + hlen = bnx2x_set_pbd_csum_e2(bp, skb, + &pbd_e2_parsing_data, + xmit_type); + if (IS_MF_SI(bp)) { + /* + * fill in the MAC addresses in the PBD - for local + * switching + */ + bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi, + &pbd_e2->src_mac_addr_mid, + &pbd_e2->src_mac_addr_lo, + eth->h_source); + bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi, + &pbd_e2->dst_mac_addr_mid, + &pbd_e2->dst_mac_addr_lo, + eth->h_dest); + } + } else { + pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x; + memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); + /* Set PBD in checksum offload case */ + if (xmit_type & XMIT_CSUM) + hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type); + + } + + /* Setup the data pointer of the first BD of the packet */ + tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); + tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); + nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */ + tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); + pkt_size = tx_start_bd->nbytes; + + DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d" + " nbytes %d flags %x vlan %x\n", + tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo, + le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes), + tx_start_bd->bd_flags.as_bitfield, + le16_to_cpu(tx_start_bd->vlan_or_ethertype)); + + if (xmit_type & XMIT_GSO) { + + DP(NETIF_MSG_TX_QUEUED, + "TSO packet len %d hlen %d total len %d tso size %d\n", + skb->len, hlen, skb_headlen(skb), + skb_shinfo(skb)->gso_size); + + tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; + + if (unlikely(skb_headlen(skb) > hlen)) + bd_prod = bnx2x_tx_split(bp, txdata, tx_buf, + &tx_start_bd, hlen, + bd_prod, ++nbd); + if (!CHIP_IS_E1x(bp)) + bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data, + xmit_type); + else + bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type); + } + + /* Set the PBD's parsing_data field if not zero + * (for the chips newer than 57711). + */ + if (pbd_e2_parsing_data) + pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data); + + tx_data_bd = (struct eth_tx_bd *)tx_start_bd; + + /* Handle fragmented skb */ + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + mapping = dma_map_page(&bp->pdev->dev, frag->page, + frag->page_offset, frag->size, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + + DP(NETIF_MSG_TX_QUEUED, "Unable to map page - " + "dropping packet...\n"); + + /* we need unmap all buffers already mapped + * for this SKB; + * first_bd->nbd need to be properly updated + * before call to bnx2x_free_tx_pkt + */ + first_bd->nbd = cpu_to_le16(nbd); + bnx2x_free_tx_pkt(bp, txdata, + TX_BD(txdata->tx_pkt_prod)); + return NETDEV_TX_OK; + } + + bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); + tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; + if (total_pkt_bd == NULL) + total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; + + tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); + tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); + tx_data_bd->nbytes = cpu_to_le16(frag->size); + le16_add_cpu(&pkt_size, frag->size); + nbd++; + + DP(NETIF_MSG_TX_QUEUED, + "frag %d bd @%p addr (%x:%x) nbytes %d\n", + i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo, + le16_to_cpu(tx_data_bd->nbytes)); + } + + DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd); + + /* update with actual num BDs */ + first_bd->nbd = cpu_to_le16(nbd); + + bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); + + /* now send a tx doorbell, counting the next BD + * if the packet contains or ends with it + */ + if (TX_BD_POFF(bd_prod) < nbd) + nbd++; + + /* total_pkt_bytes should be set on the first data BD if + * it's not an LSO packet and there is more than one + * data BD. In this case pkt_size is limited by an MTU value. + * However we prefer to set it for an LSO packet (while we don't + * have to) in order to save some CPU cycles in a none-LSO + * case, when we much more care about them. + */ + if (total_pkt_bd != NULL) + total_pkt_bd->total_pkt_bytes = pkt_size; + + if (pbd_e1x) + DP(NETIF_MSG_TX_QUEUED, + "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u" + " tcp_flags %x xsum %x seq %u hlen %u\n", + pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w, + pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags, + pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq, + le16_to_cpu(pbd_e1x->total_hlen_w)); + if (pbd_e2) + DP(NETIF_MSG_TX_QUEUED, + "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n", + pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid, + pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi, + pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo, + pbd_e2->parsing_data); + DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); + + txdata->tx_pkt_prod++; + /* + * Make sure that the BD data is updated before updating the producer + * since FW might read the BD right after the producer is updated. + * This is only applicable for weak-ordered memory model archs such + * as IA-64. The following barrier is also mandatory since FW will + * assumes packets must have BDs. + */ + wmb(); + + txdata->tx_db.data.prod += nbd; + barrier(); + + DOORBELL(bp, txdata->cid, txdata->tx_db.raw); + + mmiowb(); + + txdata->tx_bd_prod += nbd; + + if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) { + netif_tx_stop_queue(txq); + + /* paired memory barrier is in bnx2x_tx_int(), we have to keep + * ordering of set_bit() in netif_tx_stop_queue() and read of + * fp->bd_tx_cons */ + smp_mb(); + + fp->eth_q_stats.driver_xoff++; + if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3) + netif_tx_wake_queue(txq); + } + txdata->tx_pkt++; + + return NETDEV_TX_OK; +} + +/** + * bnx2x_setup_tc - routine to configure net_device for multi tc + * + * @netdev: net device to configure + * @tc: number of traffic classes to enable + * + * callback connected to the ndo_setup_tc function pointer + */ +int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) +{ + int cos, prio, count, offset; + struct bnx2x *bp = netdev_priv(dev); + + /* setup tc must be called under rtnl lock */ + ASSERT_RTNL(); + + /* no traffic classes requested. aborting */ + if (!num_tc) { + netdev_reset_tc(dev); + return 0; + } + + /* requested to support too many traffic classes */ + if (num_tc > bp->max_cos) { + DP(NETIF_MSG_TX_ERR, "support for too many traffic classes" + " requested: %d. max supported is %d", + num_tc, bp->max_cos); + return -EINVAL; + } + + /* declare amount of supported traffic classes */ + if (netdev_set_num_tc(dev, num_tc)) { + DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes", + num_tc); + return -EINVAL; + } + + /* configure priority to traffic class mapping */ + for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) { + netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]); + DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", + prio, bp->prio_to_cos[prio]); + } + + + /* Use this configuration to diffrentiate tc0 from other COSes + This can be used for ets or pfc, and save the effort of setting + up a multio class queue disc or negotiating DCBX with a switch + netdev_set_prio_tc_map(dev, 0, 0); + DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", 0, 0); + for (prio = 1; prio < 16; prio++) { + netdev_set_prio_tc_map(dev, prio, 1); + DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", prio, 1); + } */ + + /* configure traffic class to transmission queue mapping */ + for (cos = 0; cos < bp->max_cos; cos++) { + count = BNX2X_NUM_ETH_QUEUES(bp); + offset = cos * MAX_TXQS_PER_COS; + netdev_set_tc_queue(dev, cos, count, offset); + DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d", + cos, offset, count); + } + + return 0; +} + +/* called with rtnl_lock */ +int bnx2x_change_mac_addr(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + struct bnx2x *bp = netdev_priv(dev); + int rc = 0; + + if (!is_valid_ether_addr((u8 *)(addr->sa_data))) + return -EINVAL; + + if (netif_running(dev)) { + rc = bnx2x_set_eth_mac(bp, false); + if (rc) + return rc; + } + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + if (netif_running(dev)) + rc = bnx2x_set_eth_mac(bp, true); + + return rc; +} + +static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) +{ + union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk); + struct bnx2x_fastpath *fp = &bp->fp[fp_index]; + u8 cos; + + /* Common */ +#ifdef BCM_CNIC + if (IS_FCOE_IDX(fp_index)) { + memset(sb, 0, sizeof(union host_hc_status_block)); + fp->status_blk_mapping = 0; + + } else { +#endif + /* status blocks */ + if (!CHIP_IS_E1x(bp)) + BNX2X_PCI_FREE(sb->e2_sb, + bnx2x_fp(bp, fp_index, + status_blk_mapping), + sizeof(struct host_hc_status_block_e2)); + else + BNX2X_PCI_FREE(sb->e1x_sb, + bnx2x_fp(bp, fp_index, + status_blk_mapping), + sizeof(struct host_hc_status_block_e1x)); +#ifdef BCM_CNIC + } +#endif + /* Rx */ + if (!skip_rx_queue(bp, fp_index)) { + bnx2x_free_rx_bds(fp); + + /* fastpath rx rings: rx_buf rx_desc rx_comp */ + BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring)); + BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring), + bnx2x_fp(bp, fp_index, rx_desc_mapping), + sizeof(struct eth_rx_bd) * NUM_RX_BD); + + BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring), + bnx2x_fp(bp, fp_index, rx_comp_mapping), + sizeof(struct eth_fast_path_rx_cqe) * + NUM_RCQ_BD); + + /* SGE ring */ + BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring)); + BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring), + bnx2x_fp(bp, fp_index, rx_sge_mapping), + BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); + } + + /* Tx */ + if (!skip_tx_queue(bp, fp_index)) { + /* fastpath tx rings: tx_buf tx_desc */ + for_each_cos_in_tx_queue(fp, cos) { + struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; + + DP(BNX2X_MSG_SP, + "freeing tx memory of fp %d cos %d cid %d", + fp_index, cos, txdata->cid); + + BNX2X_FREE(txdata->tx_buf_ring); + BNX2X_PCI_FREE(txdata->tx_desc_ring, + txdata->tx_desc_mapping, + sizeof(union eth_tx_bd_types) * NUM_TX_BD); + } + } + /* end of fastpath */ +} + +void bnx2x_free_fp_mem(struct bnx2x *bp) +{ + int i; + for_each_queue(bp, i) + bnx2x_free_fp_mem_at(bp, i); +} + +static inline void set_sb_shortcuts(struct bnx2x *bp, int index) +{ + union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk); + if (!CHIP_IS_E1x(bp)) { + bnx2x_fp(bp, index, sb_index_values) = + (__le16 *)status_blk.e2_sb->sb.index_values; + bnx2x_fp(bp, index, sb_running_index) = + (__le16 *)status_blk.e2_sb->sb.running_index; + } else { + bnx2x_fp(bp, index, sb_index_values) = + (__le16 *)status_blk.e1x_sb->sb.index_values; + bnx2x_fp(bp, index, sb_running_index) = + (__le16 *)status_blk.e1x_sb->sb.running_index; + } +} + +static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) +{ + union host_hc_status_block *sb; + struct bnx2x_fastpath *fp = &bp->fp[index]; + int ring_size = 0; + u8 cos; + + /* if rx_ring_size specified - use it */ + int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size : + MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); + + /* allocate at least number of buffers required by FW */ + rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : + MIN_RX_SIZE_TPA, + rx_ring_size); + + /* Common */ + sb = &bnx2x_fp(bp, index, status_blk); +#ifdef BCM_CNIC + if (!IS_FCOE_IDX(index)) { +#endif + /* status blocks */ + if (!CHIP_IS_E1x(bp)) + BNX2X_PCI_ALLOC(sb->e2_sb, + &bnx2x_fp(bp, index, status_blk_mapping), + sizeof(struct host_hc_status_block_e2)); + else + BNX2X_PCI_ALLOC(sb->e1x_sb, + &bnx2x_fp(bp, index, status_blk_mapping), + sizeof(struct host_hc_status_block_e1x)); +#ifdef BCM_CNIC + } +#endif + + /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to + * set shortcuts for it. + */ + if (!IS_FCOE_IDX(index)) + set_sb_shortcuts(bp, index); + + /* Tx */ + if (!skip_tx_queue(bp, index)) { + /* fastpath tx rings: tx_buf tx_desc */ + for_each_cos_in_tx_queue(fp, cos) { + struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; + + DP(BNX2X_MSG_SP, "allocating tx memory of " + "fp %d cos %d", + index, cos); + + BNX2X_ALLOC(txdata->tx_buf_ring, + sizeof(struct sw_tx_bd) * NUM_TX_BD); + BNX2X_PCI_ALLOC(txdata->tx_desc_ring, + &txdata->tx_desc_mapping, + sizeof(union eth_tx_bd_types) * NUM_TX_BD); + } + } + + /* Rx */ + if (!skip_rx_queue(bp, index)) { + /* fastpath rx rings: rx_buf rx_desc rx_comp */ + BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring), + sizeof(struct sw_rx_bd) * NUM_RX_BD); + BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring), + &bnx2x_fp(bp, index, rx_desc_mapping), + sizeof(struct eth_rx_bd) * NUM_RX_BD); + + BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring), + &bnx2x_fp(bp, index, rx_comp_mapping), + sizeof(struct eth_fast_path_rx_cqe) * + NUM_RCQ_BD); + + /* SGE ring */ + BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring), + sizeof(struct sw_rx_page) * NUM_RX_SGE); + BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring), + &bnx2x_fp(bp, index, rx_sge_mapping), + BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); + /* RX BD ring */ + bnx2x_set_next_page_rx_bd(fp); + + /* CQ ring */ + bnx2x_set_next_page_rx_cq(fp); + + /* BDs */ + ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size); + if (ring_size < rx_ring_size) + goto alloc_mem_err; + } + + return 0; + +/* handles low memory cases */ +alloc_mem_err: + BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n", + index, ring_size); + /* FW will drop all packets if queue is not big enough, + * In these cases we disable the queue + * Min size is different for OOO, TPA and non-TPA queues + */ + if (ring_size < (fp->disable_tpa ? + MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) { + /* release memory allocated for this queue */ + bnx2x_free_fp_mem_at(bp, index); + return -ENOMEM; + } + return 0; +} + +int bnx2x_alloc_fp_mem(struct bnx2x *bp) +{ + int i; + + /** + * 1. Allocate FP for leading - fatal if error + * 2. {CNIC} Allocate FCoE FP - fatal if error + * 3. {CNIC} Allocate OOO + FWD - disable OOO if error + * 4. Allocate RSS - fix number of queues if error + */ + + /* leading */ + if (bnx2x_alloc_fp_mem_at(bp, 0)) + return -ENOMEM; + +#ifdef BCM_CNIC + if (!NO_FCOE(bp)) + /* FCoE */ + if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX)) + /* we will fail load process instead of mark + * NO_FCOE_FLAG + */ + return -ENOMEM; +#endif + + /* RSS */ + for_each_nondefault_eth_queue(bp, i) + if (bnx2x_alloc_fp_mem_at(bp, i)) + break; + + /* handle memory failures */ + if (i != BNX2X_NUM_ETH_QUEUES(bp)) { + int delta = BNX2X_NUM_ETH_QUEUES(bp) - i; + + WARN_ON(delta < 0); +#ifdef BCM_CNIC + /** + * move non eth FPs next to last eth FP + * must be done in that order + * FCOE_IDX < FWD_IDX < OOO_IDX + */ + + /* move FCoE fp even NO_FCOE_FLAG is on */ + bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta); +#endif + bp->num_queues -= delta; + BNX2X_ERR("Adjusted num of queues from %d to %d\n", + bp->num_queues + delta, bp->num_queues); + } + + return 0; +} + +void bnx2x_free_mem_bp(struct bnx2x *bp) +{ + kfree(bp->fp); + kfree(bp->msix_table); + kfree(bp->ilt); +} + +int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp) +{ + struct bnx2x_fastpath *fp; + struct msix_entry *tbl; + struct bnx2x_ilt *ilt; + int msix_table_size = 0; + + /* + * The biggest MSI-X table we might need is as a maximum number of fast + * path IGU SBs plus default SB (for PF). + */ + msix_table_size = bp->igu_sb_cnt + 1; + + /* fp array: RSS plus CNIC related L2 queues */ + fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) * + sizeof(*fp), GFP_KERNEL); + if (!fp) + goto alloc_err; + bp->fp = fp; + + /* msix table */ + tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL); + if (!tbl) + goto alloc_err; + bp->msix_table = tbl; + + /* ilt */ + ilt = kzalloc(sizeof(*ilt), GFP_KERNEL); + if (!ilt) + goto alloc_err; + bp->ilt = ilt; + + return 0; +alloc_err: + bnx2x_free_mem_bp(bp); + return -ENOMEM; + +} + +int bnx2x_reload_if_running(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (unlikely(!netif_running(dev))) + return 0; + + bnx2x_nic_unload(bp, UNLOAD_NORMAL); + return bnx2x_nic_load(bp, LOAD_NORMAL); +} + +int bnx2x_get_cur_phy_idx(struct bnx2x *bp) +{ + u32 sel_phy_idx = 0; + if (bp->link_params.num_phys <= 1) + return INT_PHY; + + if (bp->link_vars.link_up) { + sel_phy_idx = EXT_PHY1; + /* In case link is SERDES, check if the EXT_PHY2 is the one */ + if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) && + (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE)) + sel_phy_idx = EXT_PHY2; + } else { + + switch (bnx2x_phy_selection(&bp->link_params)) { + case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: + sel_phy_idx = EXT_PHY1; + break; + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: + sel_phy_idx = EXT_PHY2; + break; + } + } + + return sel_phy_idx; + +} +int bnx2x_get_link_cfg_idx(struct bnx2x *bp) +{ + u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp); + /* + * The selected actived PHY is always after swapping (in case PHY + * swapping is enabled). So when swapping is enabled, we need to reverse + * the configuration + */ + + if (bp->link_params.multi_phy_config & + PORT_HW_CFG_PHY_SWAPPED_ENABLED) { + if (sel_phy_idx == EXT_PHY1) + sel_phy_idx = EXT_PHY2; + else if (sel_phy_idx == EXT_PHY2) + sel_phy_idx = EXT_PHY1; + } + return LINK_CONFIG_IDX(sel_phy_idx); +} + +#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) +int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) +{ + struct bnx2x *bp = netdev_priv(dev); + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + + switch (type) { + case NETDEV_FCOE_WWNN: + *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi, + cp->fcoe_wwn_node_name_lo); + break; + case NETDEV_FCOE_WWPN: + *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi, + cp->fcoe_wwn_port_name_lo); + break; + default: + return -EINVAL; + } + + return 0; +} +#endif + +/* called with rtnl_lock */ +int bnx2x_change_mtu(struct net_device *dev, int new_mtu) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (bp->recovery_state != BNX2X_RECOVERY_DONE) { + printk(KERN_ERR "Handling parity error recovery. Try again later\n"); + return -EAGAIN; + } + + if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || + ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) + return -EINVAL; + + /* This does not race with packet allocation + * because the actual alloc size is + * only updated as part of load + */ + dev->mtu = new_mtu; + + return bnx2x_reload_if_running(dev); +} + +u32 bnx2x_fix_features(struct net_device *dev, u32 features) +{ + struct bnx2x *bp = netdev_priv(dev); + + /* TPA requires Rx CSUM offloading */ + if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) + features &= ~NETIF_F_LRO; + + return features; +} + +int bnx2x_set_features(struct net_device *dev, u32 features) +{ + struct bnx2x *bp = netdev_priv(dev); + u32 flags = bp->flags; + bool bnx2x_reload = false; + + if (features & NETIF_F_LRO) + flags |= TPA_ENABLE_FLAG; + else + flags &= ~TPA_ENABLE_FLAG; + + if (features & NETIF_F_LOOPBACK) { + if (bp->link_params.loopback_mode != LOOPBACK_BMAC) { + bp->link_params.loopback_mode = LOOPBACK_BMAC; + bnx2x_reload = true; + } + } else { + if (bp->link_params.loopback_mode != LOOPBACK_NONE) { + bp->link_params.loopback_mode = LOOPBACK_NONE; + bnx2x_reload = true; + } + } + + if (flags ^ bp->flags) { + bp->flags = flags; + bnx2x_reload = true; + } + + if (bnx2x_reload) { + if (bp->recovery_state == BNX2X_RECOVERY_DONE) + return bnx2x_reload_if_running(dev); + /* else: bnx2x_nic_load() will be called at end of recovery */ + } + + return 0; +} + +void bnx2x_tx_timeout(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + +#ifdef BNX2X_STOP_ON_ERROR + if (!bp->panic) + bnx2x_panic(); +#endif + + smp_mb__before_clear_bit(); + set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state); + smp_mb__after_clear_bit(); + + /* This allows the netif to be shutdown gracefully before resetting */ + schedule_delayed_work(&bp->sp_rtnl_task, 0); +} + +int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2x *bp; + + if (!dev) { + dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); + return -ENODEV; + } + bp = netdev_priv(dev); + + rtnl_lock(); + + pci_save_state(pdev); + + if (!netif_running(dev)) { + rtnl_unlock(); + return 0; + } + + netif_device_detach(dev); + + bnx2x_nic_unload(bp, UNLOAD_CLOSE); + + bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); + + rtnl_unlock(); + + return 0; +} + +int bnx2x_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2x *bp; + int rc; + + if (!dev) { + dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); + return -ENODEV; + } + bp = netdev_priv(dev); + + if (bp->recovery_state != BNX2X_RECOVERY_DONE) { + printk(KERN_ERR "Handling parity error recovery. Try again later\n"); + return -EAGAIN; + } + + rtnl_lock(); + + pci_restore_state(pdev); + + if (!netif_running(dev)) { + rtnl_unlock(); + return 0; + } + + bnx2x_set_power_state(bp, PCI_D0); + netif_device_attach(dev); + + /* Since the chip was reset, clear the FW sequence number */ + bp->fw_seq = 0; + rc = bnx2x_nic_load(bp, LOAD_OPEN); + + rtnl_unlock(); + + return rc; +} + + +void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, + u32 cid) +{ + /* ustorm cxt validation */ + cxt->ustorm_ag_context.cdu_usage = + CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), + CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE); + /* xcontext validation */ + cxt->xstorm_ag_context.cdu_reserved = + CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), + CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); +} + +static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, + u8 fw_sb_id, u8 sb_index, + u8 ticks) +{ + + u32 addr = BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index); + REG_WR8(bp, addr, ticks); + DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n", + port, fw_sb_id, sb_index, ticks); +} + +static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port, + u16 fw_sb_id, u8 sb_index, + u8 disable) +{ + u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); + u32 addr = BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index); + u16 flags = REG_RD16(bp, addr); + /* clear and set */ + flags &= ~HC_INDEX_DATA_HC_ENABLED; + flags |= enable_flag; + REG_WR16(bp, addr, flags); + DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n", + port, fw_sb_id, sb_index, disable); +} + +void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, + u8 sb_index, u8 disable, u16 usec) +{ + int port = BP_PORT(bp); + u8 ticks = usec / BNX2X_BTR; + + storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks); + + disable = disable ? 1 : (usec ? 0 : 1); + storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable); +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h new file mode 100644 index 0000000..223bfee --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -0,0 +1,1491 @@ +/* bnx2x_cmn.h: Broadcom Everest network driver. + * + * Copyright (c) 2007-2011 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Eilon Greenstein + * Written by: Eliezer Tamir + * Based on code from Michael Chan's bnx2 driver + * UDP CSUM errata workaround by Arik Gendelman + * Slowpath and fastpath rework by Vladislav Zolotarov + * Statistics and Link management by Yitchak Gertner + * + */ +#ifndef BNX2X_CMN_H +#define BNX2X_CMN_H + +#include +#include +#include + + +#include "bnx2x.h" + +/* This is used as a replacement for an MCP if it's not present */ +extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */ + +extern int num_queues; + +/************************ Macros ********************************/ +#define BNX2X_PCI_FREE(x, y, size) \ + do { \ + if (x) { \ + dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \ + x = NULL; \ + y = 0; \ + } \ + } while (0) + +#define BNX2X_FREE(x) \ + do { \ + if (x) { \ + kfree((void *)x); \ + x = NULL; \ + } \ + } while (0) + +#define BNX2X_PCI_ALLOC(x, y, size) \ + do { \ + x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ + if (x == NULL) \ + goto alloc_mem_err; \ + memset((void *)x, 0, size); \ + } while (0) + +#define BNX2X_ALLOC(x, size) \ + do { \ + x = kzalloc(size, GFP_KERNEL); \ + if (x == NULL) \ + goto alloc_mem_err; \ + } while (0) + +/*********************** Interfaces **************************** + * Functions that need to be implemented by each driver version + */ +/* Init */ + +/** + * bnx2x_send_unload_req - request unload mode from the MCP. + * + * @bp: driver handle + * @unload_mode: requested function's unload mode + * + * Return unload mode returned by the MCP: COMMON, PORT or FUNC. + */ +u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode); + +/** + * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. + * + * @bp: driver handle + */ +void bnx2x_send_unload_done(struct bnx2x *bp); + +/** + * bnx2x_config_rss_pf - configure RSS parameters. + * + * @bp: driver handle + * @ind_table: indirection table to configure + * @config_hash: re-configure RSS hash keys configuration + */ +int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash); + +/** + * bnx2x__init_func_obj - init function object + * + * @bp: driver handle + * + * Initializes the Function Object with the appropriate + * parameters which include a function slow path driver + * interface. + */ +void bnx2x__init_func_obj(struct bnx2x *bp); + +/** + * bnx2x_setup_queue - setup eth queue. + * + * @bp: driver handle + * @fp: pointer to the fastpath structure + * @leading: boolean + * + */ +int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, + bool leading); + +/** + * bnx2x_setup_leading - bring up a leading eth queue. + * + * @bp: driver handle + */ +int bnx2x_setup_leading(struct bnx2x *bp); + +/** + * bnx2x_fw_command - send the MCP a request + * + * @bp: driver handle + * @command: request + * @param: request's parameter + * + * block until there is a reply + */ +u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param); + +/** + * bnx2x_initial_phy_init - initialize link parameters structure variables. + * + * @bp: driver handle + * @load_mode: current mode + */ +u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode); + +/** + * bnx2x_link_set - configure hw according to link parameters structure. + * + * @bp: driver handle + */ +void bnx2x_link_set(struct bnx2x *bp); + +/** + * bnx2x_link_test - query link status. + * + * @bp: driver handle + * @is_serdes: bool + * + * Returns 0 if link is UP. + */ +u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes); + +/** + * bnx2x_drv_pulse - write driver pulse to shmem + * + * @bp: driver handle + * + * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox + * in the shmem. + */ +void bnx2x_drv_pulse(struct bnx2x *bp); + +/** + * bnx2x_igu_ack_sb - update IGU with current SB value + * + * @bp: driver handle + * @igu_sb_id: SB id + * @segment: SB segment + * @index: SB index + * @op: SB operation + * @update: is HW update required + */ +void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, + u16 index, u8 op, u8 update); + +/* Disable transactions from chip to host */ +void bnx2x_pf_disable(struct bnx2x *bp); + +/** + * bnx2x__link_status_update - handles link status change. + * + * @bp: driver handle + */ +void bnx2x__link_status_update(struct bnx2x *bp); + +/** + * bnx2x_link_report - report link status to upper layer. + * + * @bp: driver handle + */ +void bnx2x_link_report(struct bnx2x *bp); + +/* None-atomic version of bnx2x_link_report() */ +void __bnx2x_link_report(struct bnx2x *bp); + +/** + * bnx2x_get_mf_speed - calculate MF speed. + * + * @bp: driver handle + * + * Takes into account current linespeed and MF configuration. + */ +u16 bnx2x_get_mf_speed(struct bnx2x *bp); + +/** + * bnx2x_msix_sp_int - MSI-X slowpath interrupt handler + * + * @irq: irq number + * @dev_instance: private instance + */ +irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance); + +/** + * bnx2x_interrupt - non MSI-X interrupt handler + * + * @irq: irq number + * @dev_instance: private instance + */ +irqreturn_t bnx2x_interrupt(int irq, void *dev_instance); +#ifdef BCM_CNIC + +/** + * bnx2x_cnic_notify - send command to cnic driver + * + * @bp: driver handle + * @cmd: command + */ +int bnx2x_cnic_notify(struct bnx2x *bp, int cmd); + +/** + * bnx2x_setup_cnic_irq_info - provides cnic with IRQ information + * + * @bp: driver handle + */ +void bnx2x_setup_cnic_irq_info(struct bnx2x *bp); +#endif + +/** + * bnx2x_int_enable - enable HW interrupts. + * + * @bp: driver handle + */ +void bnx2x_int_enable(struct bnx2x *bp); + +/** + * bnx2x_int_disable_sync - disable interrupts. + * + * @bp: driver handle + * @disable_hw: true, disable HW interrupts. + * + * This function ensures that there are no + * ISRs or SP DPCs (sp_task) are running after it returns. + */ +void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); + +/** + * bnx2x_nic_init - init driver internals. + * + * @bp: driver handle + * @load_code: COMMON, PORT or FUNCTION + * + * Initializes: + * - rings + * - status blocks + * - etc. + */ +void bnx2x_nic_init(struct bnx2x *bp, u32 load_code); + +/** + * bnx2x_alloc_mem - allocate driver's memory. + * + * @bp: driver handle + */ +int bnx2x_alloc_mem(struct bnx2x *bp); + +/** + * bnx2x_free_mem - release driver's memory. + * + * @bp: driver handle + */ +void bnx2x_free_mem(struct bnx2x *bp); + +/** + * bnx2x_set_num_queues - set number of queues according to mode. + * + * @bp: driver handle + */ +void bnx2x_set_num_queues(struct bnx2x *bp); + +/** + * bnx2x_chip_cleanup - cleanup chip internals. + * + * @bp: driver handle + * @unload_mode: COMMON, PORT, FUNCTION + * + * - Cleanup MAC configuration. + * - Closes clients. + * - etc. + */ +void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode); + +/** + * bnx2x_acquire_hw_lock - acquire HW lock. + * + * @bp: driver handle + * @resource: resource bit which was locked + */ +int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource); + +/** + * bnx2x_release_hw_lock - release HW lock. + * + * @bp: driver handle + * @resource: resource bit which was locked + */ +int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource); + +/** + * bnx2x_release_leader_lock - release recovery leader lock + * + * @bp: driver handle + */ +int bnx2x_release_leader_lock(struct bnx2x *bp); + +/** + * bnx2x_set_eth_mac - configure eth MAC address in the HW + * + * @bp: driver handle + * @set: set or clear + * + * Configures according to the value in netdev->dev_addr. + */ +int bnx2x_set_eth_mac(struct bnx2x *bp, bool set); + +/** + * bnx2x_set_rx_mode - set MAC filtering configurations. + * + * @dev: netdevice + * + * called with netif_tx_lock from dev_mcast.c + * If bp->state is OPEN, should be called with + * netif_addr_lock_bh() + */ +void bnx2x_set_rx_mode(struct net_device *dev); + +/** + * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW. + * + * @bp: driver handle + * + * If bp->state is OPEN, should be called with + * netif_addr_lock_bh(). + */ +void bnx2x_set_storm_rx_mode(struct bnx2x *bp); + +/** + * bnx2x_set_q_rx_mode - configures rx_mode for a single queue. + * + * @bp: driver handle + * @cl_id: client id + * @rx_mode_flags: rx mode configuration + * @rx_accept_flags: rx accept configuration + * @tx_accept_flags: tx accept configuration (tx switch) + * @ramrod_flags: ramrod configuration + */ +void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, + unsigned long rx_mode_flags, + unsigned long rx_accept_flags, + unsigned long tx_accept_flags, + unsigned long ramrod_flags); + +/* Parity errors related */ +void bnx2x_inc_load_cnt(struct bnx2x *bp); +u32 bnx2x_dec_load_cnt(struct bnx2x *bp); +bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print); +bool bnx2x_reset_is_done(struct bnx2x *bp, int engine); +void bnx2x_set_reset_in_progress(struct bnx2x *bp); +void bnx2x_set_reset_global(struct bnx2x *bp); +void bnx2x_disable_close_the_gate(struct bnx2x *bp); + +/** + * bnx2x_sp_event - handle ramrods completion. + * + * @fp: fastpath handle for the event + * @rr_cqe: eth_rx_cqe + */ +void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe); + +/** + * bnx2x_ilt_set_info - prepare ILT configurations. + * + * @bp: driver handle + */ +void bnx2x_ilt_set_info(struct bnx2x *bp); + +/** + * bnx2x_dcbx_init - initialize dcbx protocol. + * + * @bp: driver handle + */ +void bnx2x_dcbx_init(struct bnx2x *bp); + +/** + * bnx2x_set_power_state - set power state to the requested value. + * + * @bp: driver handle + * @state: required state D0 or D3hot + * + * Currently only D0 and D3hot are supported. + */ +int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); + +/** + * bnx2x_update_max_mf_config - update MAX part of MF configuration in HW. + * + * @bp: driver handle + * @value: new value + */ +void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value); +/* Error handling */ +void bnx2x_panic_dump(struct bnx2x *bp); + +void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl); + +/* dev_close main block */ +int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); + +/* dev_open main block */ +int bnx2x_nic_load(struct bnx2x *bp, int load_mode); + +/* hard_xmit callback */ +netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); + +/* setup_tc callback */ +int bnx2x_setup_tc(struct net_device *dev, u8 num_tc); + +/* select_queue callback */ +u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); + +/* reload helper */ +int bnx2x_reload_if_running(struct net_device *dev); + +int bnx2x_change_mac_addr(struct net_device *dev, void *p); + +/* NAPI poll Rx part */ +int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget); + +void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, + u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod); + +/* NAPI poll Tx part */ +int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata); + +/* suspend/resume callbacks */ +int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state); +int bnx2x_resume(struct pci_dev *pdev); + +/* Release IRQ vectors */ +void bnx2x_free_irq(struct bnx2x *bp); + +void bnx2x_free_fp_mem(struct bnx2x *bp); +int bnx2x_alloc_fp_mem(struct bnx2x *bp); +void bnx2x_init_rx_rings(struct bnx2x *bp); +void bnx2x_free_skbs(struct bnx2x *bp); +void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); +void bnx2x_netif_start(struct bnx2x *bp); + +/** + * bnx2x_enable_msix - set msix configuration. + * + * @bp: driver handle + * + * fills msix_table, requests vectors, updates num_queues + * according to number of available vectors. + */ +int bnx2x_enable_msix(struct bnx2x *bp); + +/** + * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly + * + * @bp: driver handle + */ +int bnx2x_enable_msi(struct bnx2x *bp); + +/** + * bnx2x_poll - NAPI callback + * + * @napi: napi structure + * @budget: + * + */ +int bnx2x_poll(struct napi_struct *napi, int budget); + +/** + * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure + * + * @bp: driver handle + */ +int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp); + +/** + * bnx2x_free_mem_bp - release memories outsize main driver structure + * + * @bp: driver handle + */ +void bnx2x_free_mem_bp(struct bnx2x *bp); + +/** + * bnx2x_change_mtu - change mtu netdev callback + * + * @dev: net device + * @new_mtu: requested mtu + * + */ +int bnx2x_change_mtu(struct net_device *dev, int new_mtu); + +#if defined(BCM_CNIC) && (defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)) +/** + * bnx2x_fcoe_get_wwn - return the requested WWN value for this port + * + * @dev: net_device + * @wwn: output buffer + * @type: WWN type: NETDEV_FCOE_WWNN (node) or NETDEV_FCOE_WWPN (port) + * + */ +int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type); +#endif +u32 bnx2x_fix_features(struct net_device *dev, u32 features); +int bnx2x_set_features(struct net_device *dev, u32 features); + +/** + * bnx2x_tx_timeout - tx timeout netdev callback + * + * @dev: net device + */ +void bnx2x_tx_timeout(struct net_device *dev); + +/*********************** Inlines **********************************/ +/*********************** Fast path ********************************/ +static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) +{ + barrier(); /* status block is written to by the chip */ + fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID]; +} + +static inline void bnx2x_update_rx_prod_gen(struct bnx2x *bp, + struct bnx2x_fastpath *fp, u16 bd_prod, + u16 rx_comp_prod, u16 rx_sge_prod, u32 start) +{ + struct ustorm_eth_rx_producers rx_prods = {0}; + u32 i; + + /* Update producers */ + rx_prods.bd_prod = bd_prod; + rx_prods.cqe_prod = rx_comp_prod; + rx_prods.sge_prod = rx_sge_prod; + + /* + * Make sure that the BD and SGE data is updated before updating the + * producers since FW might read the BD/SGE right after the producer + * is updated. + * This is only applicable for weak-ordered memory model archs such + * as IA-64. The following barrier is also mandatory since FW will + * assumes BDs must have buffers. + */ + wmb(); + + for (i = 0; i < sizeof(rx_prods)/4; i++) + REG_WR(bp, start + i*4, ((u32 *)&rx_prods)[i]); + + mmiowb(); /* keep prod updates ordered */ + + DP(NETIF_MSG_RX_STATUS, + "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n", + fp->index, bd_prod, rx_comp_prod, rx_sge_prod); +} + +static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id, + u8 segment, u16 index, u8 op, + u8 update, u32 igu_addr) +{ + struct igu_regular cmd_data = {0}; + + cmd_data.sb_id_and_flags = + ((index << IGU_REGULAR_SB_INDEX_SHIFT) | + (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | + (update << IGU_REGULAR_BUPDATE_SHIFT) | + (op << IGU_REGULAR_ENABLE_INT_SHIFT)); + + DP(NETIF_MSG_HW, "write 0x%08x to IGU addr 0x%x\n", + cmd_data.sb_id_and_flags, igu_addr); + REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags); + + /* Make sure that ACK is written */ + mmiowb(); + barrier(); +} + +static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, + u8 idu_sb_id, bool is_Pf) +{ + u32 data, ctl, cnt = 100; + u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; + u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; + u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; + u32 sb_bit = 1 << (idu_sb_id%32); + u32 func_encode = func | + ((is_Pf == true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT); + u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; + + /* Not supported in BC mode */ + if (CHIP_INT_MODE_IS_BC(bp)) + return; + + data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup + << IGU_REGULAR_CLEANUP_TYPE_SHIFT) | + IGU_REGULAR_CLEANUP_SET | + IGU_REGULAR_BCLEANUP; + + ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | + func_encode << IGU_CTRL_REG_FID_SHIFT | + IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; + + DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", + data, igu_addr_data); + REG_WR(bp, igu_addr_data, data); + mmiowb(); + barrier(); + DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", + ctl, igu_addr_ctl); + REG_WR(bp, igu_addr_ctl, ctl); + mmiowb(); + barrier(); + + /* wait for clean up to finish */ + while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt) + msleep(20); + + + if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) { + DP(NETIF_MSG_HW, "Unable to finish IGU cleanup: " + "idu_sb_id %d offset %d bit %d (cnt %d)\n", + idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); + } +} + +static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id, + u8 storm, u16 index, u8 op, u8 update) +{ + u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + + COMMAND_REG_INT_ACK); + struct igu_ack_register igu_ack; + + igu_ack.status_block_index = index; + igu_ack.sb_id_and_flags = + ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | + (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | + (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | + (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); + + DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n", + (*(u32 *)&igu_ack), hc_addr); + REG_WR(bp, hc_addr, (*(u32 *)&igu_ack)); + + /* Make sure that ACK is written */ + mmiowb(); + barrier(); +} + +static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm, + u16 index, u8 op, u8 update) +{ + if (bp->common.int_block == INT_BLOCK_HC) + bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update); + else { + u8 segment; + + if (CHIP_INT_MODE_IS_BC(bp)) + segment = storm; + else if (igu_sb_id != bp->igu_dsb_id) + segment = IGU_SEG_ACCESS_DEF; + else if (storm == ATTENTION_ID) + segment = IGU_SEG_ACCESS_ATTN; + else + segment = IGU_SEG_ACCESS_DEF; + bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update); + } +} + +static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp) +{ + u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + + COMMAND_REG_SIMD_MASK); + u32 result = REG_RD(bp, hc_addr); + + DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n", + result, hc_addr); + + barrier(); + return result; +} + +static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp) +{ + u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8); + u32 result = REG_RD(bp, igu_addr); + + DP(NETIF_MSG_HW, "read 0x%08x from IGU addr 0x%x\n", + result, igu_addr); + + barrier(); + return result; +} + +static inline u16 bnx2x_ack_int(struct bnx2x *bp) +{ + barrier(); + if (bp->common.int_block == INT_BLOCK_HC) + return bnx2x_hc_ack_int(bp); + else + return bnx2x_igu_ack_int(bp); +} + +static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata *txdata) +{ + /* Tell compiler that consumer and producer can change */ + barrier(); + return txdata->tx_pkt_prod != txdata->tx_pkt_cons; +} + +static inline u16 bnx2x_tx_avail(struct bnx2x *bp, + struct bnx2x_fp_txdata *txdata) +{ + s16 used; + u16 prod; + u16 cons; + + prod = txdata->tx_bd_prod; + cons = txdata->tx_bd_cons; + + /* NUM_TX_RINGS = number of "next-page" entries + It will be used as a threshold */ + used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS; + +#ifdef BNX2X_STOP_ON_ERROR + WARN_ON(used < 0); + WARN_ON(used > bp->tx_ring_size); + WARN_ON((bp->tx_ring_size - used) > MAX_TX_AVAIL); +#endif + + return (s16)(bp->tx_ring_size) - used; +} + +static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata) +{ + u16 hw_cons; + + /* Tell compiler that status block fields can change */ + barrier(); + hw_cons = le16_to_cpu(*txdata->tx_cons_sb); + return hw_cons != txdata->tx_pkt_cons; +} + +static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp) +{ + u8 cos; + for_each_cos_in_tx_queue(fp, cos) + if (bnx2x_tx_queue_has_work(&fp->txdata[cos])) + return true; + return false; +} + +static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) +{ + u16 rx_cons_sb; + + /* Tell compiler that status block fields can change */ + barrier(); + rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); + if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) + rx_cons_sb++; + return (fp->rx_comp_cons != rx_cons_sb); +} + +/** + * bnx2x_tx_disable - disables tx from stack point of view + * + * @bp: driver handle + */ +static inline void bnx2x_tx_disable(struct bnx2x *bp) +{ + netif_tx_disable(bp->dev); + netif_carrier_off(bp->dev); +} + +static inline void bnx2x_free_rx_sge(struct bnx2x *bp, + struct bnx2x_fastpath *fp, u16 index) +{ + struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; + struct page *page = sw_buf->page; + struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; + + /* Skip "next page" elements */ + if (!page) + return; + + dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), + SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); + __free_pages(page, PAGES_PER_SGE_SHIFT); + + sw_buf->page = NULL; + sge->addr_hi = 0; + sge->addr_lo = 0; +} + +static inline void bnx2x_add_all_napi(struct bnx2x *bp) +{ + int i; + + /* Add NAPI objects */ + for_each_rx_queue(bp, i) + netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), + bnx2x_poll, BNX2X_NAPI_WEIGHT); +} + +static inline void bnx2x_del_all_napi(struct bnx2x *bp) +{ + int i; + + for_each_rx_queue(bp, i) + netif_napi_del(&bnx2x_fp(bp, i, napi)); +} + +static inline void bnx2x_disable_msi(struct bnx2x *bp) +{ + if (bp->flags & USING_MSIX_FLAG) { + pci_disable_msix(bp->pdev); + bp->flags &= ~USING_MSIX_FLAG; + } else if (bp->flags & USING_MSI_FLAG) { + pci_disable_msi(bp->pdev); + bp->flags &= ~USING_MSI_FLAG; + } +} + +static inline int bnx2x_calc_num_queues(struct bnx2x *bp) +{ + return num_queues ? + min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) : + min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp)); +} + +static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) +{ + int i, j; + + for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { + int idx = RX_SGE_CNT * i - 1; + + for (j = 0; j < 2; j++) { + BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); + idx--; + } + } +} + +static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp) +{ + /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */ + memset(fp->sge_mask, 0xff, + (NUM_RX_SGE >> BIT_VEC64_ELEM_SHIFT)*sizeof(u64)); + + /* Clear the two last indices in the page to 1: + these are the indices that correspond to the "next" element, + hence will never be indicated and should be removed from + the calculations. */ + bnx2x_clear_sge_mask_next_elems(fp); +} + +static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, + struct bnx2x_fastpath *fp, u16 index) +{ + struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT); + struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; + struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; + dma_addr_t mapping; + + if (unlikely(page == NULL)) + return -ENOMEM; + + mapping = dma_map_page(&bp->pdev->dev, page, 0, + SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + __free_pages(page, PAGES_PER_SGE_SHIFT); + return -ENOMEM; + } + + sw_buf->page = page; + dma_unmap_addr_set(sw_buf, mapping, mapping); + + sge->addr_hi = cpu_to_le32(U64_HI(mapping)); + sge->addr_lo = cpu_to_le32(U64_LO(mapping)); + + return 0; +} + +static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, + struct bnx2x_fastpath *fp, u16 index) +{ + struct sk_buff *skb; + struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; + struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; + dma_addr_t mapping; + + skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size); + if (unlikely(skb == NULL)) + return -ENOMEM; + + mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + dev_kfree_skb_any(skb); + return -ENOMEM; + } + + rx_buf->skb = skb; + dma_unmap_addr_set(rx_buf, mapping, mapping); + + rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); + rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); + + return 0; +} + +/* note that we are not allocating a new skb, + * we are just moving one from cons to prod + * we are not creating a new mapping, + * so there is no need to check for dma_mapping_error(). + */ +static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp, + u16 cons, u16 prod) +{ + struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; + struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; + struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons]; + struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; + + dma_unmap_addr_set(prod_rx_buf, mapping, + dma_unmap_addr(cons_rx_buf, mapping)); + prod_rx_buf->skb = cons_rx_buf->skb; + *prod_bd = *cons_bd; +} + +/************************* Init ******************************************/ + +/** + * bnx2x_func_start - init function + * + * @bp: driver handle + * + * Must be called before sending CLIENT_SETUP for the first client. + */ +static inline int bnx2x_func_start(struct bnx2x *bp) +{ + struct bnx2x_func_state_params func_params = {0}; + struct bnx2x_func_start_params *start_params = + &func_params.params.start; + + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_START; + + /* Function parameters */ + start_params->mf_mode = bp->mf_mode; + start_params->sd_vlan_tag = bp->mf_ov; + if (CHIP_IS_E1x(bp)) + start_params->network_cos_mode = OVERRIDE_COS; + else + start_params->network_cos_mode = STATIC_COS; + + return bnx2x_func_state_change(bp, &func_params); +} + + +/** + * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format + * + * @fw_hi: pointer to upper part + * @fw_mid: pointer to middle part + * @fw_lo: pointer to lower part + * @mac: pointer to MAC address + */ +static inline void bnx2x_set_fw_mac_addr(u16 *fw_hi, u16 *fw_mid, u16 *fw_lo, + u8 *mac) +{ + ((u8 *)fw_hi)[0] = mac[1]; + ((u8 *)fw_hi)[1] = mac[0]; + ((u8 *)fw_mid)[0] = mac[3]; + ((u8 *)fw_mid)[1] = mac[2]; + ((u8 *)fw_lo)[0] = mac[5]; + ((u8 *)fw_lo)[1] = mac[4]; +} + +static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, + struct bnx2x_fastpath *fp, int last) +{ + int i; + + if (fp->disable_tpa) + return; + + for (i = 0; i < last; i++) + bnx2x_free_rx_sge(bp, fp, i); +} + +static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, + struct bnx2x_fastpath *fp, int last) +{ + int i; + + for (i = 0; i < last; i++) { + struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; + struct sw_rx_bd *first_buf = &tpa_info->first_buf; + struct sk_buff *skb = first_buf->skb; + + if (skb == NULL) { + DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i); + continue; + } + if (tpa_info->tpa_state == BNX2X_TPA_START) + dma_unmap_single(&bp->pdev->dev, + dma_unmap_addr(first_buf, mapping), + fp->rx_buf_size, DMA_FROM_DEVICE); + dev_kfree_skb(skb); + first_buf->skb = NULL; + } +} + +static inline void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata) +{ + int i; + + for (i = 1; i <= NUM_TX_RINGS; i++) { + struct eth_tx_next_bd *tx_next_bd = + &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; + + tx_next_bd->addr_hi = + cpu_to_le32(U64_HI(txdata->tx_desc_mapping + + BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); + tx_next_bd->addr_lo = + cpu_to_le32(U64_LO(txdata->tx_desc_mapping + + BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); + } + + SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); + txdata->tx_db.data.zero_fill1 = 0; + txdata->tx_db.data.prod = 0; + + txdata->tx_pkt_prod = 0; + txdata->tx_pkt_cons = 0; + txdata->tx_bd_prod = 0; + txdata->tx_bd_cons = 0; + txdata->tx_pkt = 0; +} + +static inline void bnx2x_init_tx_rings(struct bnx2x *bp) +{ + int i; + u8 cos; + + for_each_tx_queue(bp, i) + for_each_cos_in_tx_queue(&bp->fp[i], cos) + bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]); +} + +static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp) +{ + int i; + + for (i = 1; i <= NUM_RX_RINGS; i++) { + struct eth_rx_bd *rx_bd; + + rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2]; + rx_bd->addr_hi = + cpu_to_le32(U64_HI(fp->rx_desc_mapping + + BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); + rx_bd->addr_lo = + cpu_to_le32(U64_LO(fp->rx_desc_mapping + + BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); + } +} + +static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp) +{ + int i; + + for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { + struct eth_rx_sge *sge; + + sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2]; + sge->addr_hi = + cpu_to_le32(U64_HI(fp->rx_sge_mapping + + BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); + + sge->addr_lo = + cpu_to_le32(U64_LO(fp->rx_sge_mapping + + BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); + } +} + +static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp) +{ + int i; + for (i = 1; i <= NUM_RCQ_RINGS; i++) { + struct eth_rx_cqe_next_page *nextpg; + + nextpg = (struct eth_rx_cqe_next_page *) + &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; + nextpg->addr_hi = + cpu_to_le32(U64_HI(fp->rx_comp_mapping + + BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); + nextpg->addr_lo = + cpu_to_le32(U64_LO(fp->rx_comp_mapping + + BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); + } +} + +/* Returns the number of actually allocated BDs */ +static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, + int rx_ring_size) +{ + struct bnx2x *bp = fp->bp; + u16 ring_prod, cqe_ring_prod; + int i; + + fp->rx_comp_cons = 0; + cqe_ring_prod = ring_prod = 0; + + /* This routine is called only during fo init so + * fp->eth_q_stats.rx_skb_alloc_failed = 0 + */ + for (i = 0; i < rx_ring_size; i++) { + if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) { + fp->eth_q_stats.rx_skb_alloc_failed++; + continue; + } + ring_prod = NEXT_RX_IDX(ring_prod); + cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod); + WARN_ON(ring_prod <= (i - fp->eth_q_stats.rx_skb_alloc_failed)); + } + + if (fp->eth_q_stats.rx_skb_alloc_failed) + BNX2X_ERR("was only able to allocate " + "%d rx skbs on queue[%d]\n", + (i - fp->eth_q_stats.rx_skb_alloc_failed), fp->index); + + fp->rx_bd_prod = ring_prod; + /* Limit the CQE producer by the CQE ring size */ + fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT, + cqe_ring_prod); + fp->rx_pkt = fp->rx_calls = 0; + + return i - fp->eth_q_stats.rx_skb_alloc_failed; +} + +/* Statistics ID are global per chip/path, while Client IDs for E1x are per + * port. + */ +static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp) +{ + if (!CHIP_IS_E1x(fp->bp)) + return fp->cl_id; + else + return fp->cl_id + BP_PORT(fp->bp) * FP_SB_MAX_E1x; +} + +static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp, + bnx2x_obj_type obj_type) +{ + struct bnx2x *bp = fp->bp; + + /* Configure classification DBs */ + bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid, + BP_FUNC(bp), bnx2x_sp(bp, mac_rdata), + bnx2x_sp_mapping(bp, mac_rdata), + BNX2X_FILTER_MAC_PENDING, + &bp->sp_state, obj_type, + &bp->macs_pool); +} + +/** + * bnx2x_get_path_func_num - get number of active functions + * + * @bp: driver handle + * + * Calculates the number of active (not hidden) functions on the + * current path. + */ +static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp) +{ + u8 func_num = 0, i; + + /* 57710 has only one function per-port */ + if (CHIP_IS_E1(bp)) + return 1; + + /* Calculate a number of functions enabled on the current + * PATH/PORT. + */ + if (CHIP_REV_IS_SLOW(bp)) { + if (IS_MF(bp)) + func_num = 4; + else + func_num = 2; + } else { + for (i = 0; i < E1H_FUNC_MAX / 2; i++) { + u32 func_config = + MF_CFG_RD(bp, + func_mf_config[BP_PORT(bp) + 2 * i]. + config); + func_num += + ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1); + } + } + + WARN_ON(!func_num); + + return func_num; +} + +static inline void bnx2x_init_bp_objs(struct bnx2x *bp) +{ + /* RX_MODE controlling object */ + bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj); + + /* multicast configuration controlling object */ + bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid, + BP_FUNC(bp), BP_FUNC(bp), + bnx2x_sp(bp, mcast_rdata), + bnx2x_sp_mapping(bp, mcast_rdata), + BNX2X_FILTER_MCAST_PENDING, &bp->sp_state, + BNX2X_OBJ_TYPE_RX); + + /* Setup CAM credit pools */ + bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp), + bnx2x_get_path_func_num(bp)); + + /* RSS configuration object */ + bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id, + bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp), + bnx2x_sp(bp, rss_rdata), + bnx2x_sp_mapping(bp, rss_rdata), + BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state, + BNX2X_OBJ_TYPE_RX); +} + +static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp) +{ + if (CHIP_IS_E1x(fp->bp)) + return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H; + else + return fp->cl_id; +} + +static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp) +{ + struct bnx2x *bp = fp->bp; + + if (!CHIP_IS_E1x(bp)) + return USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); + else + return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); +} + +static inline void bnx2x_init_txdata(struct bnx2x *bp, + struct bnx2x_fp_txdata *txdata, u32 cid, int txq_index, + __le16 *tx_cons_sb) +{ + txdata->cid = cid; + txdata->txq_index = txq_index; + txdata->tx_cons_sb = tx_cons_sb; + + DP(BNX2X_MSG_SP, "created tx data cid %d, txq %d", + txdata->cid, txdata->txq_index); +} + +#ifdef BCM_CNIC +static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) +{ + return bp->cnic_base_cl_id + cl_idx + + (bp->pf_num >> 1) * NON_ETH_CONTEXT_USE; +} + +static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) +{ + + /* the 'first' id is allocated for the cnic */ + return bp->base_fw_ndsb; +} + +static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp) +{ + return bp->igu_base_sb; +} + + +static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) +{ + struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); + unsigned long q_type = 0; + + bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, + BNX2X_FCOE_ETH_CL_ID_IDX); + /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than + * 16 ETH clients per function when CNIC is enabled! + * + * Fix it ASAP!!! + */ + bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID; + bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; + bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; + bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; + + bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]), + fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX); + + DP(BNX2X_MSG_SP, "created fcoe tx data (fp index %d)", fp->index); + + /* qZone id equals to FW (per path) client id */ + bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp); + /* init shortcut */ + bnx2x_fcoe(bp, ustorm_rx_prods_offset) = + bnx2x_rx_ustorm_prods_offset(fp); + + /* Configure Queue State object */ + __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); + __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); + + /* No multi-CoS for FCoE L2 client */ + BUG_ON(fp->max_cos != 1); + + bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, &fp->cid, 1, + BP_FUNC(bp), bnx2x_sp(bp, q_rdata), + bnx2x_sp_mapping(bp, q_rdata), q_type); + + DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d " + "igu_sb %d\n", + fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, + fp->igu_sb_id); +} +#endif + +static inline int bnx2x_clean_tx_queue(struct bnx2x *bp, + struct bnx2x_fp_txdata *txdata) +{ + int cnt = 1000; + + while (bnx2x_has_tx_work_unload(txdata)) { + if (!cnt) { + BNX2X_ERR("timeout waiting for queue[%d]: " + "txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n", + txdata->txq_index, txdata->tx_pkt_prod, + txdata->tx_pkt_cons); +#ifdef BNX2X_STOP_ON_ERROR + bnx2x_panic(); + return -EBUSY; +#else + break; +#endif + } + cnt--; + usleep_range(1000, 1000); + } + + return 0; +} + +int bnx2x_get_link_cfg_idx(struct bnx2x *bp); + +static inline void __storm_memset_struct(struct bnx2x *bp, + u32 addr, size_t size, u32 *data) +{ + int i; + for (i = 0; i < size/4; i++) + REG_WR(bp, addr + (i * 4), data[i]); +} + +static inline void storm_memset_func_cfg(struct bnx2x *bp, + struct tstorm_eth_function_common_config *tcfg, + u16 abs_fid) +{ + size_t size = sizeof(struct tstorm_eth_function_common_config); + + u32 addr = BAR_TSTRORM_INTMEM + + TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid); + + __storm_memset_struct(bp, addr, size, (u32 *)tcfg); +} + +static inline void storm_memset_cmng(struct bnx2x *bp, + struct cmng_struct_per_port *cmng, + u8 port) +{ + size_t size = sizeof(struct cmng_struct_per_port); + + u32 addr = BAR_XSTRORM_INTMEM + + XSTORM_CMNG_PER_PORT_VARS_OFFSET(port); + + __storm_memset_struct(bp, addr, size, (u32 *)cmng); +} + +/** + * bnx2x_wait_sp_comp - wait for the outstanding SP commands. + * + * @bp: driver handle + * @mask: bits that need to be cleared + */ +static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask) +{ + int tout = 5000; /* Wait for 5 secs tops */ + + while (tout--) { + smp_mb(); + netif_addr_lock_bh(bp->dev); + if (!(bp->sp_state & mask)) { + netif_addr_unlock_bh(bp->dev); + return true; + } + netif_addr_unlock_bh(bp->dev); + + usleep_range(1000, 1000); + } + + smp_mb(); + + netif_addr_lock_bh(bp->dev); + if (bp->sp_state & mask) { + BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, " + "mask 0x%lx\n", bp->sp_state, mask); + netif_addr_unlock_bh(bp->dev); + return false; + } + netif_addr_unlock_bh(bp->dev); + + return true; +} + +/** + * bnx2x_set_ctx_validation - set CDU context validation values + * + * @bp: driver handle + * @cxt: context of the connection on the host memory + * @cid: SW CID of the connection to be configured + */ +void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, + u32 cid); + +void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, + u8 sb_index, u8 disable, u16 usec); +void bnx2x_acquire_phy_lock(struct bnx2x *bp); +void bnx2x_release_phy_lock(struct bnx2x *bp); + +/** + * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration. + * + * @bp: driver handle + * @mf_cfg: MF configuration + * + */ +static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg) +{ + u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> + FUNC_MF_CFG_MAX_BW_SHIFT; + if (!max_cfg) { + BNX2X_ERR("Illegal configuration detected for Max BW - " + "using 100 instead\n"); + max_cfg = 100; + } + return max_cfg; +} + +#endif /* BNX2X_CMN_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c new file mode 100644 index 0000000..a4ea35f --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -0,0 +1,2507 @@ +/* bnx2x_dcb.c: Broadcom Everest network driver. + * + * Copyright 2009-2011 Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2, available + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a + * license other than the GPL, without Broadcom's express prior written + * consent. + * + * Maintained by: Eilon Greenstein + * Written by: Dmitry Kravkov + * + */ +#include +#include +#include +#include +#include + +#include "bnx2x.h" +#include "bnx2x_cmn.h" +#include "bnx2x_dcb.h" + +/* forward declarations of dcbx related functions */ +static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp); +static void bnx2x_pfc_set_pfc(struct bnx2x *bp); +static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp); +static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp); +static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp, + u32 *set_configuration_ets_pg, + u32 *pri_pg_tbl); +static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp, + u32 *pg_pri_orginal_spread, + struct pg_help_data *help_data); +static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp, + struct pg_help_data *help_data, + struct dcbx_ets_feature *ets, + u32 *pg_pri_orginal_spread); +static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp, + struct cos_help_data *cos_data, + u32 *pg_pri_orginal_spread, + struct dcbx_ets_feature *ets); +static void bnx2x_dcbx_fw_struct(struct bnx2x *bp, + struct bnx2x_func_tx_start_params*); + +/* helpers: read/write len bytes from addr into buff by REG_RD/REG_WR */ +static void bnx2x_read_data(struct bnx2x *bp, u32 *buff, + u32 addr, u32 len) +{ + int i; + for (i = 0; i < len; i += 4, buff++) + *buff = REG_RD(bp, addr + i); +} + +static void bnx2x_write_data(struct bnx2x *bp, u32 *buff, + u32 addr, u32 len) +{ + int i; + for (i = 0; i < len; i += 4, buff++) + REG_WR(bp, addr + i, *buff); +} + +static void bnx2x_pfc_set(struct bnx2x *bp) +{ + struct bnx2x_nig_brb_pfc_port_params pfc_params = {0}; + u32 pri_bit, val = 0; + int i; + + pfc_params.num_of_rx_cos_priority_mask = + bp->dcbx_port_params.ets.num_of_cos; + + /* Tx COS configuration */ + for (i = 0; i < bp->dcbx_port_params.ets.num_of_cos; i++) + /* + * We configure only the pauseable bits (non pauseable aren't + * configured at all) it's done to avoid false pauses from + * network + */ + pfc_params.rx_cos_priority_mask[i] = + bp->dcbx_port_params.ets.cos_params[i].pri_bitmask + & DCBX_PFC_PRI_PAUSE_MASK(bp); + + /* + * Rx COS configuration + * Changing PFC RX configuration . + * In RX COS0 will always be configured to lossy and COS1 to lossless + */ + for (i = 0 ; i < MAX_PFC_PRIORITIES ; i++) { + pri_bit = 1 << i; + + if (pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp)) + val |= 1 << (i * 4); + } + + pfc_params.pkt_priority_to_cos = val; + + /* RX COS0 */ + pfc_params.llfc_low_priority_classes = 0; + /* RX COS1 */ + pfc_params.llfc_high_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp); + + /* BRB configuration */ + pfc_params.cos0_pauseable = false; + pfc_params.cos1_pauseable = true; + + bnx2x_acquire_phy_lock(bp); + bp->link_params.feature_config_flags |= FEATURE_CONFIG_PFC_ENABLED; + bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &pfc_params); + bnx2x_release_phy_lock(bp); +} + +static void bnx2x_pfc_clear(struct bnx2x *bp) +{ + struct bnx2x_nig_brb_pfc_port_params nig_params = {0}; + nig_params.pause_enable = 1; +#ifdef BNX2X_SAFC + if (bp->flags & SAFC_TX_FLAG) { + u32 high = 0, low = 0; + int i; + + for (i = 0; i < BNX2X_MAX_PRIORITY; i++) { + if (bp->pri_map[i] == 1) + high |= (1 << i); + if (bp->pri_map[i] == 0) + low |= (1 << i); + } + + nig_params.llfc_low_priority_classes = high; + nig_params.llfc_low_priority_classes = low; + + nig_params.pause_enable = 0; + nig_params.llfc_enable = 1; + nig_params.llfc_out_en = 1; + } +#endif /* BNX2X_SAFC */ + bnx2x_acquire_phy_lock(bp); + bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_PFC_ENABLED; + bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &nig_params); + bnx2x_release_phy_lock(bp); +} + +static void bnx2x_dump_dcbx_drv_param(struct bnx2x *bp, + struct dcbx_features *features, + u32 error) +{ + u8 i = 0; + DP(NETIF_MSG_LINK, "local_mib.error %x\n", error); + + /* PG */ + DP(NETIF_MSG_LINK, + "local_mib.features.ets.enabled %x\n", features->ets.enabled); + for (i = 0; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++) + DP(NETIF_MSG_LINK, + "local_mib.features.ets.pg_bw_tbl[%d] %d\n", i, + DCBX_PG_BW_GET(features->ets.pg_bw_tbl, i)); + for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) + DP(NETIF_MSG_LINK, + "local_mib.features.ets.pri_pg_tbl[%d] %d\n", i, + DCBX_PRI_PG_GET(features->ets.pri_pg_tbl, i)); + + /* pfc */ + DP(NETIF_MSG_LINK, "dcbx_features.pfc.pri_en_bitmap %x\n", + features->pfc.pri_en_bitmap); + DP(NETIF_MSG_LINK, "dcbx_features.pfc.pfc_caps %x\n", + features->pfc.pfc_caps); + DP(NETIF_MSG_LINK, "dcbx_features.pfc.enabled %x\n", + features->pfc.enabled); + + DP(NETIF_MSG_LINK, "dcbx_features.app.default_pri %x\n", + features->app.default_pri); + DP(NETIF_MSG_LINK, "dcbx_features.app.tc_supported %x\n", + features->app.tc_supported); + DP(NETIF_MSG_LINK, "dcbx_features.app.enabled %x\n", + features->app.enabled); + for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { + DP(NETIF_MSG_LINK, + "dcbx_features.app.app_pri_tbl[%x].app_id %x\n", + i, features->app.app_pri_tbl[i].app_id); + DP(NETIF_MSG_LINK, + "dcbx_features.app.app_pri_tbl[%x].pri_bitmap %x\n", + i, features->app.app_pri_tbl[i].pri_bitmap); + DP(NETIF_MSG_LINK, + "dcbx_features.app.app_pri_tbl[%x].appBitfield %x\n", + i, features->app.app_pri_tbl[i].appBitfield); + } +} + +static void bnx2x_dcbx_get_ap_priority(struct bnx2x *bp, + u8 pri_bitmap, + u8 llfc_traf_type) +{ + u32 pri = MAX_PFC_PRIORITIES; + u32 index = MAX_PFC_PRIORITIES - 1; + u32 pri_mask; + u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; + + /* Choose the highest priority */ + while ((MAX_PFC_PRIORITIES == pri) && (0 != index)) { + pri_mask = 1 << index; + if (GET_FLAGS(pri_bitmap, pri_mask)) + pri = index ; + index--; + } + + if (pri < MAX_PFC_PRIORITIES) + ttp[llfc_traf_type] = max_t(u32, ttp[llfc_traf_type], pri); +} + +static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp, + struct dcbx_app_priority_feature *app, + u32 error) { + u8 index; + u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; + + if (GET_FLAGS(error, DCBX_LOCAL_APP_ERROR)) + DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_ERROR\n"); + + if (GET_FLAGS(error, DCBX_LOCAL_APP_MISMATCH)) + DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_MISMATCH\n"); + + if (app->enabled && + !GET_FLAGS(error, DCBX_LOCAL_APP_ERROR | DCBX_LOCAL_APP_MISMATCH)) { + + bp->dcbx_port_params.app.enabled = true; + + for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++) + ttp[index] = 0; + + if (app->default_pri < MAX_PFC_PRIORITIES) + ttp[LLFC_TRAFFIC_TYPE_NW] = app->default_pri; + + for (index = 0 ; index < DCBX_MAX_APP_PROTOCOL; index++) { + struct dcbx_app_priority_entry *entry = + app->app_pri_tbl; + + if (GET_FLAGS(entry[index].appBitfield, + DCBX_APP_SF_ETH_TYPE) && + ETH_TYPE_FCOE == entry[index].app_id) + bnx2x_dcbx_get_ap_priority(bp, + entry[index].pri_bitmap, + LLFC_TRAFFIC_TYPE_FCOE); + + if (GET_FLAGS(entry[index].appBitfield, + DCBX_APP_SF_PORT) && + TCP_PORT_ISCSI == entry[index].app_id) + bnx2x_dcbx_get_ap_priority(bp, + entry[index].pri_bitmap, + LLFC_TRAFFIC_TYPE_ISCSI); + } + } else { + DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_DISABLED\n"); + bp->dcbx_port_params.app.enabled = false; + for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++) + ttp[index] = INVALID_TRAFFIC_TYPE_PRIORITY; + } +} + +static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp, + struct dcbx_ets_feature *ets, + u32 error) { + int i = 0; + u32 pg_pri_orginal_spread[DCBX_MAX_NUM_PG_BW_ENTRIES] = {0}; + struct pg_help_data pg_help_data; + struct bnx2x_dcbx_cos_params *cos_params = + bp->dcbx_port_params.ets.cos_params; + + memset(&pg_help_data, 0, sizeof(struct pg_help_data)); + + + if (GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR)) + DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ERROR\n"); + + + /* Clean up old settings of ets on COS */ + for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params) ; i++) { + cos_params[i].pauseable = false; + cos_params[i].strict = BNX2X_DCBX_STRICT_INVALID; + cos_params[i].bw_tbl = DCBX_INVALID_COS_BW; + cos_params[i].pri_bitmask = 0; + } + + if (bp->dcbx_port_params.app.enabled && + !GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR) && + ets->enabled) { + DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ENABLE\n"); + bp->dcbx_port_params.ets.enabled = true; + + bnx2x_dcbx_get_ets_pri_pg_tbl(bp, + pg_pri_orginal_spread, + ets->pri_pg_tbl); + + bnx2x_dcbx_get_num_pg_traf_type(bp, + pg_pri_orginal_spread, + &pg_help_data); + + bnx2x_dcbx_fill_cos_params(bp, &pg_help_data, + ets, pg_pri_orginal_spread); + + } else { + DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_DISABLED\n"); + bp->dcbx_port_params.ets.enabled = false; + ets->pri_pg_tbl[0] = 0; + + for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES ; i++) + DCBX_PG_BW_SET(ets->pg_bw_tbl, i, 1); + } +} + +static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp, + struct dcbx_pfc_feature *pfc, u32 error) +{ + + if (GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR)) + DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_ERROR\n"); + + if (bp->dcbx_port_params.app.enabled && + !GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR | DCBX_LOCAL_PFC_MISMATCH) && + pfc->enabled) { + bp->dcbx_port_params.pfc.enabled = true; + bp->dcbx_port_params.pfc.priority_non_pauseable_mask = + ~(pfc->pri_en_bitmap); + } else { + DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_DISABLED\n"); + bp->dcbx_port_params.pfc.enabled = false; + bp->dcbx_port_params.pfc.priority_non_pauseable_mask = 0; + } +} + +/* maps unmapped priorities to to the same COS as L2 */ +static void bnx2x_dcbx_map_nw(struct bnx2x *bp) +{ + int i; + u32 unmapped = (1 << MAX_PFC_PRIORITIES) - 1; /* all ones */ + u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; + u32 nw_prio = 1 << ttp[LLFC_TRAFFIC_TYPE_NW]; + struct bnx2x_dcbx_cos_params *cos_params = + bp->dcbx_port_params.ets.cos_params; + + /* get unmapped priorities by clearing mapped bits */ + for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) + unmapped &= ~(1 << ttp[i]); + + /* find cos for nw prio and extend it with unmapped */ + for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params); i++) { + if (cos_params[i].pri_bitmask & nw_prio) { + /* extend the bitmask with unmapped */ + DP(NETIF_MSG_LINK, + "cos %d extended with 0x%08x", i, unmapped); + cos_params[i].pri_bitmask |= unmapped; + break; + } + } +} + +static void bnx2x_get_dcbx_drv_param(struct bnx2x *bp, + struct dcbx_features *features, + u32 error) +{ + bnx2x_dcbx_get_ap_feature(bp, &features->app, error); + + bnx2x_dcbx_get_pfc_feature(bp, &features->pfc, error); + + bnx2x_dcbx_get_ets_feature(bp, &features->ets, error); + + bnx2x_dcbx_map_nw(bp); +} + +#define DCBX_LOCAL_MIB_MAX_TRY_READ (100) +static int bnx2x_dcbx_read_mib(struct bnx2x *bp, + u32 *base_mib_addr, + u32 offset, + int read_mib_type) +{ + int max_try_read = 0; + u32 mib_size, prefix_seq_num, suffix_seq_num; + struct lldp_remote_mib *remote_mib ; + struct lldp_local_mib *local_mib; + + + switch (read_mib_type) { + case DCBX_READ_LOCAL_MIB: + mib_size = sizeof(struct lldp_local_mib); + break; + case DCBX_READ_REMOTE_MIB: + mib_size = sizeof(struct lldp_remote_mib); + break; + default: + return 1; /*error*/ + } + + offset += BP_PORT(bp) * mib_size; + + do { + bnx2x_read_data(bp, base_mib_addr, offset, mib_size); + + max_try_read++; + + switch (read_mib_type) { + case DCBX_READ_LOCAL_MIB: + local_mib = (struct lldp_local_mib *) base_mib_addr; + prefix_seq_num = local_mib->prefix_seq_num; + suffix_seq_num = local_mib->suffix_seq_num; + break; + case DCBX_READ_REMOTE_MIB: + remote_mib = (struct lldp_remote_mib *) base_mib_addr; + prefix_seq_num = remote_mib->prefix_seq_num; + suffix_seq_num = remote_mib->suffix_seq_num; + break; + default: + return 1; /*error*/ + } + } while ((prefix_seq_num != suffix_seq_num) && + (max_try_read < DCBX_LOCAL_MIB_MAX_TRY_READ)); + + if (max_try_read >= DCBX_LOCAL_MIB_MAX_TRY_READ) { + BNX2X_ERR("MIB could not be read\n"); + return 1; + } + + return 0; +} + +static void bnx2x_pfc_set_pfc(struct bnx2x *bp) +{ + if (bp->dcbx_port_params.pfc.enabled && + !(bp->dcbx_error & DCBX_REMOTE_MIB_ERROR)) + /* + * 1. Fills up common PFC structures if required + * 2. Configure NIG, MAC and BRB via the elink + */ + bnx2x_pfc_set(bp); + else + bnx2x_pfc_clear(bp); +} + +static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp) +{ + struct bnx2x_func_state_params func_params = {0}; + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_TX_STOP; + + DP(NETIF_MSG_LINK, "STOP TRAFFIC\n"); + return bnx2x_func_state_change(bp, &func_params); +} + +static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp) +{ + struct bnx2x_func_state_params func_params = {0}; + struct bnx2x_func_tx_start_params *tx_params = + &func_params.params.tx_start; + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_TX_START; + + bnx2x_dcbx_fw_struct(bp, tx_params); + + DP(NETIF_MSG_LINK, "START TRAFFIC\n"); + return bnx2x_func_state_change(bp, &func_params); +} + +static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp) +{ + struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets); + int rc = 0; + + if (ets->num_of_cos == 0 || ets->num_of_cos > DCBX_COS_MAX_NUM_E2) { + BNX2X_ERR("Illegal number of COSes %d\n", ets->num_of_cos); + return; + } + + /* valid COS entries */ + if (ets->num_of_cos == 1) /* no ETS */ + return; + + /* sanity */ + if (((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[0].strict) && + (DCBX_INVALID_COS_BW == ets->cos_params[0].bw_tbl)) || + ((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[1].strict) && + (DCBX_INVALID_COS_BW == ets->cos_params[1].bw_tbl))) { + BNX2X_ERR("all COS should have at least bw_limit or strict" + "ets->cos_params[0].strict= %x" + "ets->cos_params[0].bw_tbl= %x" + "ets->cos_params[1].strict= %x" + "ets->cos_params[1].bw_tbl= %x", + ets->cos_params[0].strict, + ets->cos_params[0].bw_tbl, + ets->cos_params[1].strict, + ets->cos_params[1].bw_tbl); + return; + } + /* If we join a group and there is bw_tbl and strict then bw rules */ + if ((DCBX_INVALID_COS_BW != ets->cos_params[0].bw_tbl) && + (DCBX_INVALID_COS_BW != ets->cos_params[1].bw_tbl)) { + u32 bw_tbl_0 = ets->cos_params[0].bw_tbl; + u32 bw_tbl_1 = ets->cos_params[1].bw_tbl; + /* Do not allow 0-100 configuration + * since PBF does not support it + * force 1-99 instead + */ + if (bw_tbl_0 == 0) { + bw_tbl_0 = 1; + bw_tbl_1 = 99; + } else if (bw_tbl_1 == 0) { + bw_tbl_1 = 1; + bw_tbl_0 = 99; + } + + bnx2x_ets_bw_limit(&bp->link_params, bw_tbl_0, bw_tbl_1); + } else { + if (ets->cos_params[0].strict == BNX2X_DCBX_STRICT_COS_HIGHEST) + rc = bnx2x_ets_strict(&bp->link_params, 0); + else if (ets->cos_params[1].strict + == BNX2X_DCBX_STRICT_COS_HIGHEST) + rc = bnx2x_ets_strict(&bp->link_params, 1); + if (rc) + BNX2X_ERR("update_ets_params failed\n"); + } +} + +/* + * In E3B0 the configuration may have more than 2 COS. + */ +void bnx2x_dcbx_update_ets_config(struct bnx2x *bp) +{ + struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets); + struct bnx2x_ets_params ets_params = { 0 }; + u8 i; + + ets_params.num_of_cos = ets->num_of_cos; + + for (i = 0; i < ets->num_of_cos; i++) { + /* COS is SP */ + if (ets->cos_params[i].strict != BNX2X_DCBX_STRICT_INVALID) { + if (ets->cos_params[i].bw_tbl != DCBX_INVALID_COS_BW) { + BNX2X_ERR("COS can't be not BW and not SP\n"); + return; + } + + ets_params.cos[i].state = bnx2x_cos_state_strict; + ets_params.cos[i].params.sp_params.pri = + ets->cos_params[i].strict; + } else { /* COS is BW */ + if (ets->cos_params[i].bw_tbl == DCBX_INVALID_COS_BW) { + BNX2X_ERR("COS can't be not BW and not SP\n"); + return; + } + ets_params.cos[i].state = bnx2x_cos_state_bw; + ets_params.cos[i].params.bw_params.bw = + (u8)ets->cos_params[i].bw_tbl; + } + } + + /* Configure the ETS in HW */ + if (bnx2x_ets_e3b0_config(&bp->link_params, &bp->link_vars, + &ets_params)) { + BNX2X_ERR("bnx2x_ets_e3b0_config failed\n"); + bnx2x_ets_disabled(&bp->link_params, &bp->link_vars); + } +} + +static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp) +{ + bnx2x_ets_disabled(&bp->link_params, &bp->link_vars); + + if (!bp->dcbx_port_params.ets.enabled || + (bp->dcbx_error & DCBX_REMOTE_MIB_ERROR)) + return; + + if (CHIP_IS_E3B0(bp)) + bnx2x_dcbx_update_ets_config(bp); + else + bnx2x_dcbx_2cos_limit_update_ets_config(bp); +} + +#ifdef BCM_DCBNL +static int bnx2x_dcbx_read_shmem_remote_mib(struct bnx2x *bp) +{ + struct lldp_remote_mib remote_mib = {0}; + u32 dcbx_remote_mib_offset = SHMEM2_RD(bp, dcbx_remote_mib_offset); + int rc; + + DP(NETIF_MSG_LINK, "dcbx_remote_mib_offset 0x%x\n", + dcbx_remote_mib_offset); + + if (SHMEM_DCBX_REMOTE_MIB_NONE == dcbx_remote_mib_offset) { + BNX2X_ERR("FW doesn't support dcbx_remote_mib_offset\n"); + return -EINVAL; + } + + rc = bnx2x_dcbx_read_mib(bp, (u32 *)&remote_mib, dcbx_remote_mib_offset, + DCBX_READ_REMOTE_MIB); + + if (rc) { + BNX2X_ERR("Faild to read remote mib from FW\n"); + return rc; + } + + /* save features and flags */ + bp->dcbx_remote_feat = remote_mib.features; + bp->dcbx_remote_flags = remote_mib.flags; + return 0; +} +#endif + +static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp) +{ + struct lldp_local_mib local_mib = {0}; + u32 dcbx_neg_res_offset = SHMEM2_RD(bp, dcbx_neg_res_offset); + int rc; + + DP(NETIF_MSG_LINK, "dcbx_neg_res_offset 0x%x\n", dcbx_neg_res_offset); + + if (SHMEM_DCBX_NEG_RES_NONE == dcbx_neg_res_offset) { + BNX2X_ERR("FW doesn't support dcbx_neg_res_offset\n"); + return -EINVAL; + } + + rc = bnx2x_dcbx_read_mib(bp, (u32 *)&local_mib, dcbx_neg_res_offset, + DCBX_READ_LOCAL_MIB); + + if (rc) { + BNX2X_ERR("Faild to read local mib from FW\n"); + return rc; + } + + /* save features and error */ + bp->dcbx_local_feat = local_mib.features; + bp->dcbx_error = local_mib.error; + return 0; +} + + +#ifdef BCM_DCBNL +static inline +u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent) +{ + u8 pri; + + /* Choose the highest priority */ + for (pri = MAX_PFC_PRIORITIES - 1; pri > 0; pri--) + if (ent->pri_bitmap & (1 << pri)) + break; + return pri; +} + +static inline +u8 bnx2x_dcbx_dcbnl_app_idtype(struct dcbx_app_priority_entry *ent) +{ + return ((ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) == + DCBX_APP_SF_PORT) ? DCB_APP_IDTYPE_PORTNUM : + DCB_APP_IDTYPE_ETHTYPE; +} + +int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall) +{ + int i, err = 0; + + for (i = 0; i < DCBX_MAX_APP_PROTOCOL && err == 0; i++) { + struct dcbx_app_priority_entry *ent = + &bp->dcbx_local_feat.app.app_pri_tbl[i]; + + if (ent->appBitfield & DCBX_APP_ENTRY_VALID) { + u8 up = bnx2x_dcbx_dcbnl_app_up(ent); + + /* avoid invalid user-priority */ + if (up) { + struct dcb_app app; + app.selector = bnx2x_dcbx_dcbnl_app_idtype(ent); + app.protocol = ent->app_id; + app.priority = delall ? 0 : up; + err = dcb_setapp(bp->dev, &app); + } + } + } + return err; +} +#endif + +static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set) +{ + if (SHMEM2_HAS(bp, drv_flags)) { + u32 drv_flags; + bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS); + drv_flags = SHMEM2_RD(bp, drv_flags); + + if (set) + SET_FLAGS(drv_flags, flags); + else + RESET_FLAGS(drv_flags, flags); + + SHMEM2_WR(bp, drv_flags, drv_flags); + DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags); + bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS); + } +} + +static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp) +{ + u8 prio, cos; + for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++) { + for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) { + if (bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask + & (1 << prio)) { + bp->prio_to_cos[prio] = cos; + DP(NETIF_MSG_LINK, + "tx_mapping %d --> %d\n", prio, cos); + } + } + } + + /* setup tc must be called under rtnl lock, but we can't take it here + * as we are handling an attetntion on a work queue which must be + * flushed at some rtnl-locked contexts (e.g. if down) + */ + if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) + schedule_delayed_work(&bp->sp_rtnl_task, 0); +} + +void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) +{ + switch (state) { + case BNX2X_DCBX_STATE_NEG_RECEIVED: + { + DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n"); +#ifdef BCM_DCBNL + /** + * Delete app tlvs from dcbnl before reading new + * negotiation results + */ + bnx2x_dcbnl_update_applist(bp, true); + + /* Read rmeote mib if dcbx is in the FW */ + if (bnx2x_dcbx_read_shmem_remote_mib(bp)) + return; +#endif + /* Read neg results if dcbx is in the FW */ + if (bnx2x_dcbx_read_shmem_neg_results(bp)) + return; + + bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat, + bp->dcbx_error); + + bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat, + bp->dcbx_error); + + /* mark DCBX result for PMF migration */ + bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 1); +#ifdef BCM_DCBNL + /** + * Add new app tlvs to dcbnl + */ + bnx2x_dcbnl_update_applist(bp, false); +#endif + bnx2x_dcbx_stop_hw_tx(bp); + + /* reconfigure the netdevice with the results of the new + * dcbx negotiation. + */ + bnx2x_dcbx_update_tc_mapping(bp); + + return; + } + case BNX2X_DCBX_STATE_TX_PAUSED: + DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n"); + bnx2x_pfc_set_pfc(bp); + + bnx2x_dcbx_update_ets_params(bp); + bnx2x_dcbx_resume_hw_tx(bp); + return; + case BNX2X_DCBX_STATE_TX_RELEASED: + DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n"); + bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0); +#ifdef BCM_DCBNL + /* + * Send a notification for the new negotiated parameters + */ + dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0); +#endif + return; + default: + BNX2X_ERR("Unknown DCBX_STATE\n"); + } +} + +#define LLDP_ADMIN_MIB_OFFSET(bp) (PORT_MAX*sizeof(struct lldp_params) + \ + BP_PORT(bp)*sizeof(struct lldp_admin_mib)) + +static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, + u32 dcbx_lldp_params_offset) +{ + struct lldp_admin_mib admin_mib; + u32 i, other_traf_type = PREDEFINED_APP_IDX_MAX, traf_type = 0; + u32 offset = dcbx_lldp_params_offset + LLDP_ADMIN_MIB_OFFSET(bp); + + /*shortcuts*/ + struct dcbx_features *af = &admin_mib.features; + struct bnx2x_config_dcbx_params *dp = &bp->dcbx_config_params; + + memset(&admin_mib, 0, sizeof(struct lldp_admin_mib)); + + /* Read the data first */ + bnx2x_read_data(bp, (u32 *)&admin_mib, offset, + sizeof(struct lldp_admin_mib)); + + if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON) + SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED); + else + RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED); + + if (dp->overwrite_settings == BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE) { + + RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_CEE_VERSION_MASK); + admin_mib.ver_cfg_flags |= + (dp->admin_dcbx_version << DCBX_CEE_VERSION_SHIFT) & + DCBX_CEE_VERSION_MASK; + + af->ets.enabled = (u8)dp->admin_ets_enable; + + af->pfc.enabled = (u8)dp->admin_pfc_enable; + + /* FOR IEEE dp->admin_tc_supported_tx_enable */ + if (dp->admin_ets_configuration_tx_enable) + SET_FLAGS(admin_mib.ver_cfg_flags, + DCBX_ETS_CONFIG_TX_ENABLED); + else + RESET_FLAGS(admin_mib.ver_cfg_flags, + DCBX_ETS_CONFIG_TX_ENABLED); + /* For IEEE admin_ets_recommendation_tx_enable */ + if (dp->admin_pfc_tx_enable) + SET_FLAGS(admin_mib.ver_cfg_flags, + DCBX_PFC_CONFIG_TX_ENABLED); + else + RESET_FLAGS(admin_mib.ver_cfg_flags, + DCBX_PFC_CONFIG_TX_ENABLED); + + if (dp->admin_application_priority_tx_enable) + SET_FLAGS(admin_mib.ver_cfg_flags, + DCBX_APP_CONFIG_TX_ENABLED); + else + RESET_FLAGS(admin_mib.ver_cfg_flags, + DCBX_APP_CONFIG_TX_ENABLED); + + if (dp->admin_ets_willing) + SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING); + else + RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING); + /* For IEEE admin_ets_reco_valid */ + if (dp->admin_pfc_willing) + SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING); + else + RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING); + + if (dp->admin_app_priority_willing) + SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING); + else + RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING); + + for (i = 0 ; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++) { + DCBX_PG_BW_SET(af->ets.pg_bw_tbl, i, + (u8)dp->admin_configuration_bw_precentage[i]); + + DP(NETIF_MSG_LINK, "pg_bw_tbl[%d] = %02x\n", + i, DCBX_PG_BW_GET(af->ets.pg_bw_tbl, i)); + } + + for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) { + DCBX_PRI_PG_SET(af->ets.pri_pg_tbl, i, + (u8)dp->admin_configuration_ets_pg[i]); + + DP(NETIF_MSG_LINK, "pri_pg_tbl[%d] = %02x\n", + i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i)); + } + + /*For IEEE admin_recommendation_bw_precentage + *For IEEE admin_recommendation_ets_pg */ + af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap; + for (i = 0; i < 4; i++) { + if (dp->admin_priority_app_table[i].valid) { + struct bnx2x_admin_priority_app_table *table = + dp->admin_priority_app_table; + if ((ETH_TYPE_FCOE == table[i].app_id) && + (TRAFFIC_TYPE_ETH == table[i].traffic_type)) + traf_type = FCOE_APP_IDX; + else if ((TCP_PORT_ISCSI == table[i].app_id) && + (TRAFFIC_TYPE_PORT == table[i].traffic_type)) + traf_type = ISCSI_APP_IDX; + else + traf_type = other_traf_type++; + + af->app.app_pri_tbl[traf_type].app_id = + table[i].app_id; + + af->app.app_pri_tbl[traf_type].pri_bitmap = + (u8)(1 << table[i].priority); + + af->app.app_pri_tbl[traf_type].appBitfield = + (DCBX_APP_ENTRY_VALID); + + af->app.app_pri_tbl[traf_type].appBitfield |= + (TRAFFIC_TYPE_ETH == table[i].traffic_type) ? + DCBX_APP_SF_ETH_TYPE : DCBX_APP_SF_PORT; + } + } + + af->app.default_pri = (u8)dp->admin_default_priority; + + } + + /* Write the data. */ + bnx2x_write_data(bp, (u32 *)&admin_mib, offset, + sizeof(struct lldp_admin_mib)); + +} + +void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled) +{ + if (!CHIP_IS_E1x(bp)) { + bp->dcb_state = dcb_on; + bp->dcbx_enabled = dcbx_enabled; + } else { + bp->dcb_state = false; + bp->dcbx_enabled = BNX2X_DCBX_ENABLED_INVALID; + } + DP(NETIF_MSG_LINK, "DCB state [%s:%s]\n", + dcb_on ? "ON" : "OFF", + dcbx_enabled == BNX2X_DCBX_ENABLED_OFF ? "user-mode" : + dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF ? "on-chip static" : + dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON ? + "on-chip with negotiation" : "invalid"); +} + +void bnx2x_dcbx_init_params(struct bnx2x *bp) +{ + bp->dcbx_config_params.admin_dcbx_version = 0x0; /* 0 - CEE; 1 - IEEE */ + bp->dcbx_config_params.admin_ets_willing = 1; + bp->dcbx_config_params.admin_pfc_willing = 1; + bp->dcbx_config_params.overwrite_settings = 1; + bp->dcbx_config_params.admin_ets_enable = 1; + bp->dcbx_config_params.admin_pfc_enable = 1; + bp->dcbx_config_params.admin_tc_supported_tx_enable = 1; + bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1; + bp->dcbx_config_params.admin_pfc_tx_enable = 1; + bp->dcbx_config_params.admin_application_priority_tx_enable = 1; + bp->dcbx_config_params.admin_ets_reco_valid = 1; + bp->dcbx_config_params.admin_app_priority_willing = 1; + bp->dcbx_config_params.admin_configuration_bw_precentage[0] = 00; + bp->dcbx_config_params.admin_configuration_bw_precentage[1] = 50; + bp->dcbx_config_params.admin_configuration_bw_precentage[2] = 50; + bp->dcbx_config_params.admin_configuration_bw_precentage[3] = 0; + bp->dcbx_config_params.admin_configuration_bw_precentage[4] = 0; + bp->dcbx_config_params.admin_configuration_bw_precentage[5] = 0; + bp->dcbx_config_params.admin_configuration_bw_precentage[6] = 0; + bp->dcbx_config_params.admin_configuration_bw_precentage[7] = 0; + bp->dcbx_config_params.admin_configuration_ets_pg[0] = 1; + bp->dcbx_config_params.admin_configuration_ets_pg[1] = 0; + bp->dcbx_config_params.admin_configuration_ets_pg[2] = 0; + bp->dcbx_config_params.admin_configuration_ets_pg[3] = 2; + bp->dcbx_config_params.admin_configuration_ets_pg[4] = 0; + bp->dcbx_config_params.admin_configuration_ets_pg[5] = 0; + bp->dcbx_config_params.admin_configuration_ets_pg[6] = 0; + bp->dcbx_config_params.admin_configuration_ets_pg[7] = 0; + bp->dcbx_config_params.admin_recommendation_bw_precentage[0] = 0; + bp->dcbx_config_params.admin_recommendation_bw_precentage[1] = 1; + bp->dcbx_config_params.admin_recommendation_bw_precentage[2] = 2; + bp->dcbx_config_params.admin_recommendation_bw_precentage[3] = 0; + bp->dcbx_config_params.admin_recommendation_bw_precentage[4] = 7; + bp->dcbx_config_params.admin_recommendation_bw_precentage[5] = 5; + bp->dcbx_config_params.admin_recommendation_bw_precentage[6] = 6; + bp->dcbx_config_params.admin_recommendation_bw_precentage[7] = 7; + bp->dcbx_config_params.admin_recommendation_ets_pg[0] = 0; + bp->dcbx_config_params.admin_recommendation_ets_pg[1] = 1; + bp->dcbx_config_params.admin_recommendation_ets_pg[2] = 2; + bp->dcbx_config_params.admin_recommendation_ets_pg[3] = 3; + bp->dcbx_config_params.admin_recommendation_ets_pg[4] = 4; + bp->dcbx_config_params.admin_recommendation_ets_pg[5] = 5; + bp->dcbx_config_params.admin_recommendation_ets_pg[6] = 6; + bp->dcbx_config_params.admin_recommendation_ets_pg[7] = 7; + bp->dcbx_config_params.admin_pfc_bitmap = 0x8; /* FCoE(3) enable */ + bp->dcbx_config_params.admin_priority_app_table[0].valid = 1; + bp->dcbx_config_params.admin_priority_app_table[1].valid = 1; + bp->dcbx_config_params.admin_priority_app_table[2].valid = 0; + bp->dcbx_config_params.admin_priority_app_table[3].valid = 0; + bp->dcbx_config_params.admin_priority_app_table[0].priority = 3; + bp->dcbx_config_params.admin_priority_app_table[1].priority = 0; + bp->dcbx_config_params.admin_priority_app_table[2].priority = 0; + bp->dcbx_config_params.admin_priority_app_table[3].priority = 0; + bp->dcbx_config_params.admin_priority_app_table[0].traffic_type = 0; + bp->dcbx_config_params.admin_priority_app_table[1].traffic_type = 1; + bp->dcbx_config_params.admin_priority_app_table[2].traffic_type = 0; + bp->dcbx_config_params.admin_priority_app_table[3].traffic_type = 0; + bp->dcbx_config_params.admin_priority_app_table[0].app_id = 0x8906; + bp->dcbx_config_params.admin_priority_app_table[1].app_id = 3260; + bp->dcbx_config_params.admin_priority_app_table[2].app_id = 0; + bp->dcbx_config_params.admin_priority_app_table[3].app_id = 0; + bp->dcbx_config_params.admin_default_priority = + bp->dcbx_config_params.admin_priority_app_table[1].priority; +} + +void bnx2x_dcbx_init(struct bnx2x *bp) +{ + u32 dcbx_lldp_params_offset = SHMEM_LLDP_DCBX_PARAMS_NONE; + + if (bp->dcbx_enabled <= 0) + return; + + /* validate: + * chip of good for dcbx version, + * dcb is wanted + * the function is pmf + * shmem2 contains DCBX support fields + */ + DP(NETIF_MSG_LINK, "dcb_state %d bp->port.pmf %d\n", + bp->dcb_state, bp->port.pmf); + + if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf && + SHMEM2_HAS(bp, dcbx_lldp_params_offset)) { + dcbx_lldp_params_offset = + SHMEM2_RD(bp, dcbx_lldp_params_offset); + + DP(NETIF_MSG_LINK, "dcbx_lldp_params_offset 0x%x\n", + dcbx_lldp_params_offset); + + bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0); + + if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) { + bnx2x_dcbx_admin_mib_updated_params(bp, + dcbx_lldp_params_offset); + + /* Let HW start negotiation */ + bnx2x_fw_command(bp, + DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0); + } + } +} +static void +bnx2x_dcbx_print_cos_params(struct bnx2x *bp, + struct bnx2x_func_tx_start_params *pfc_fw_cfg) +{ + u8 pri = 0; + u8 cos = 0; + + DP(NETIF_MSG_LINK, + "pfc_fw_cfg->dcb_version %x\n", pfc_fw_cfg->dcb_version); + DP(NETIF_MSG_LINK, + "pdev->params.dcbx_port_params.pfc." + "priority_non_pauseable_mask %x\n", + bp->dcbx_port_params.pfc.priority_non_pauseable_mask); + + for (cos = 0 ; cos < bp->dcbx_port_params.ets.num_of_cos ; cos++) { + DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets." + "cos_params[%d].pri_bitmask %x\n", cos, + bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask); + + DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets." + "cos_params[%d].bw_tbl %x\n", cos, + bp->dcbx_port_params.ets.cos_params[cos].bw_tbl); + + DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets." + "cos_params[%d].strict %x\n", cos, + bp->dcbx_port_params.ets.cos_params[cos].strict); + + DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets." + "cos_params[%d].pauseable %x\n", cos, + bp->dcbx_port_params.ets.cos_params[cos].pauseable); + } + + for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) { + DP(NETIF_MSG_LINK, + "pfc_fw_cfg->traffic_type_to_priority_cos[%d]." + "priority %x\n", pri, + pfc_fw_cfg->traffic_type_to_priority_cos[pri].priority); + + DP(NETIF_MSG_LINK, + "pfc_fw_cfg->traffic_type_to_priority_cos[%d].cos %x\n", + pri, pfc_fw_cfg->traffic_type_to_priority_cos[pri].cos); + } +} + +/* fills help_data according to pg_info */ +static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp, + u32 *pg_pri_orginal_spread, + struct pg_help_data *help_data) +{ + bool pg_found = false; + u32 i, traf_type, add_traf_type, add_pg; + u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; + struct pg_entry_help_data *data = help_data->data; /*shotcut*/ + + /* Set to invalid */ + for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) + data[i].pg = DCBX_ILLEGAL_PG; + + for (add_traf_type = 0; + add_traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX; add_traf_type++) { + pg_found = false; + if (ttp[add_traf_type] < MAX_PFC_PRIORITIES) { + add_pg = (u8)pg_pri_orginal_spread[ttp[add_traf_type]]; + for (traf_type = 0; + traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX; + traf_type++) { + if (data[traf_type].pg == add_pg) { + if (!(data[traf_type].pg_priority & + (1 << ttp[add_traf_type]))) + data[traf_type]. + num_of_dif_pri++; + data[traf_type].pg_priority |= + (1 << ttp[add_traf_type]); + pg_found = true; + break; + } + } + if (false == pg_found) { + data[help_data->num_of_pg].pg = add_pg; + data[help_data->num_of_pg].pg_priority = + (1 << ttp[add_traf_type]); + data[help_data->num_of_pg].num_of_dif_pri = 1; + help_data->num_of_pg++; + } + } + DP(NETIF_MSG_LINK, + "add_traf_type %d pg_found %s num_of_pg %d\n", + add_traf_type, (false == pg_found) ? "NO" : "YES", + help_data->num_of_pg); + } +} + +static void bnx2x_dcbx_ets_disabled_entry_data(struct bnx2x *bp, + struct cos_help_data *cos_data, + u32 pri_join_mask) +{ + /* Only one priority than only one COS */ + cos_data->data[0].pausable = + IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask); + cos_data->data[0].pri_join_mask = pri_join_mask; + cos_data->data[0].cos_bw = 100; + cos_data->num_of_cos = 1; +} + +static inline void bnx2x_dcbx_add_to_cos_bw(struct bnx2x *bp, + struct cos_entry_help_data *data, + u8 pg_bw) +{ + if (data->cos_bw == DCBX_INVALID_COS_BW) + data->cos_bw = pg_bw; + else + data->cos_bw += pg_bw; +} + +static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp, + struct cos_help_data *cos_data, + u32 *pg_pri_orginal_spread, + struct dcbx_ets_feature *ets) +{ + u32 pri_tested = 0; + u8 i = 0; + u8 entry = 0; + u8 pg_entry = 0; + u8 num_of_pri = LLFC_DRIVER_TRAFFIC_TYPE_MAX; + + cos_data->data[0].pausable = true; + cos_data->data[1].pausable = false; + cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0; + + for (i = 0 ; i < num_of_pri ; i++) { + pri_tested = 1 << bp->dcbx_port_params. + app.traffic_type_priority[i]; + + if (pri_tested & DCBX_PFC_PRI_NON_PAUSE_MASK(bp)) { + cos_data->data[1].pri_join_mask |= pri_tested; + entry = 1; + } else { + cos_data->data[0].pri_join_mask |= pri_tested; + entry = 0; + } + pg_entry = (u8)pg_pri_orginal_spread[bp->dcbx_port_params. + app.traffic_type_priority[i]]; + /* There can be only one strict pg */ + if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES) + bnx2x_dcbx_add_to_cos_bw(bp, &cos_data->data[entry], + DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_entry)); + else + /* If we join a group and one is strict + * than the bw rulls */ + cos_data->data[entry].strict = + BNX2X_DCBX_STRICT_COS_HIGHEST; + } + if ((0 == cos_data->data[0].pri_join_mask) && + (0 == cos_data->data[1].pri_join_mask)) + BNX2X_ERR("dcbx error: Both groups must have priorities\n"); +} + + +#ifndef POWER_OF_2 +#define POWER_OF_2(x) ((0 != x) && (0 == (x & (x-1)))) +#endif + +static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp, + struct pg_help_data *pg_help_data, + struct cos_help_data *cos_data, + u32 pri_join_mask, + u8 num_of_dif_pri) +{ + u8 i = 0; + u32 pri_tested = 0; + u32 pri_mask_without_pri = 0; + u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; + /*debug*/ + if (num_of_dif_pri == 1) { + bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data, pri_join_mask); + return; + } + /* single priority group */ + if (pg_help_data->data[0].pg < DCBX_MAX_NUM_PG_BW_ENTRIES) { + /* If there are both pauseable and non-pauseable priorities, + * the pauseable priorities go to the first queue and + * the non-pauseable priorities go to the second queue. + */ + if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) { + /* Pauseable */ + cos_data->data[0].pausable = true; + /* Non pauseable.*/ + cos_data->data[1].pausable = false; + + if (2 == num_of_dif_pri) { + cos_data->data[0].cos_bw = 50; + cos_data->data[1].cos_bw = 50; + } + + if (3 == num_of_dif_pri) { + if (POWER_OF_2(DCBX_PFC_PRI_GET_PAUSE(bp, + pri_join_mask))) { + cos_data->data[0].cos_bw = 33; + cos_data->data[1].cos_bw = 67; + } else { + cos_data->data[0].cos_bw = 67; + cos_data->data[1].cos_bw = 33; + } + } + + } else if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask)) { + /* If there are only pauseable priorities, + * then one/two priorities go to the first queue + * and one priority goes to the second queue. + */ + if (2 == num_of_dif_pri) { + cos_data->data[0].cos_bw = 50; + cos_data->data[1].cos_bw = 50; + } else { + cos_data->data[0].cos_bw = 67; + cos_data->data[1].cos_bw = 33; + } + cos_data->data[1].pausable = true; + cos_data->data[0].pausable = true; + /* All priorities except FCOE */ + cos_data->data[0].pri_join_mask = (pri_join_mask & + ((u8)~(1 << ttp[LLFC_TRAFFIC_TYPE_FCOE]))); + /* Only FCOE priority.*/ + cos_data->data[1].pri_join_mask = + (1 << ttp[LLFC_TRAFFIC_TYPE_FCOE]); + } else + /* If there are only non-pauseable priorities, + * they will all go to the same queue. + */ + bnx2x_dcbx_ets_disabled_entry_data(bp, + cos_data, pri_join_mask); + } else { + /* priority group which is not BW limited (PG#15):*/ + if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) { + /* If there are both pauseable and non-pauseable + * priorities, the pauseable priorities go to the first + * queue and the non-pauseable priorities + * go to the second queue. + */ + if (DCBX_PFC_PRI_GET_PAUSE(bp, pri_join_mask) > + DCBX_PFC_PRI_GET_NON_PAUSE(bp, pri_join_mask)) { + cos_data->data[0].strict = + BNX2X_DCBX_STRICT_COS_HIGHEST; + cos_data->data[1].strict = + BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI( + BNX2X_DCBX_STRICT_COS_HIGHEST); + } else { + cos_data->data[0].strict = + BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI( + BNX2X_DCBX_STRICT_COS_HIGHEST); + cos_data->data[1].strict = + BNX2X_DCBX_STRICT_COS_HIGHEST; + } + /* Pauseable */ + cos_data->data[0].pausable = true; + /* Non pause-able.*/ + cos_data->data[1].pausable = false; + } else { + /* If there are only pauseable priorities or + * only non-pauseable,* the lower priorities go + * to the first queue and the higherpriorities go + * to the second queue. + */ + cos_data->data[0].pausable = + cos_data->data[1].pausable = + IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask); + + for (i = 0 ; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) { + pri_tested = 1 << bp->dcbx_port_params. + app.traffic_type_priority[i]; + /* Remove priority tested */ + pri_mask_without_pri = + (pri_join_mask & ((u8)(~pri_tested))); + if (pri_mask_without_pri < pri_tested) + break; + } + + if (i == LLFC_DRIVER_TRAFFIC_TYPE_MAX) + BNX2X_ERR("Invalid value for pri_join_mask -" + " could not find a priority\n"); + + cos_data->data[0].pri_join_mask = pri_mask_without_pri; + cos_data->data[1].pri_join_mask = pri_tested; + /* Both queues are strict priority, + * and that with the highest priority + * gets the highest strict priority in the arbiter. + */ + cos_data->data[0].strict = + BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI( + BNX2X_DCBX_STRICT_COS_HIGHEST); + cos_data->data[1].strict = + BNX2X_DCBX_STRICT_COS_HIGHEST; + } + } +} + +static void bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params( + struct bnx2x *bp, + struct pg_help_data *pg_help_data, + struct dcbx_ets_feature *ets, + struct cos_help_data *cos_data, + u32 *pg_pri_orginal_spread, + u32 pri_join_mask, + u8 num_of_dif_pri) +{ + u8 i = 0; + u8 pg[DCBX_COS_MAX_NUM_E2] = { 0 }; + + /* If there are both pauseable and non-pauseable priorities, + * the pauseable priorities go to the first queue and + * the non-pauseable priorities go to the second queue. + */ + if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) { + if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, + pg_help_data->data[0].pg_priority) || + IS_DCBX_PFC_PRI_MIX_PAUSE(bp, + pg_help_data->data[1].pg_priority)) { + /* If one PG contains both pauseable and + * non-pauseable priorities then ETS is disabled. + */ + bnx2x_dcbx_separate_pauseable_from_non(bp, cos_data, + pg_pri_orginal_spread, ets); + bp->dcbx_port_params.ets.enabled = false; + return; + } + + /* Pauseable */ + cos_data->data[0].pausable = true; + /* Non pauseable. */ + cos_data->data[1].pausable = false; + if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, + pg_help_data->data[0].pg_priority)) { + /* 0 is pauseable */ + cos_data->data[0].pri_join_mask = + pg_help_data->data[0].pg_priority; + pg[0] = pg_help_data->data[0].pg; + cos_data->data[1].pri_join_mask = + pg_help_data->data[1].pg_priority; + pg[1] = pg_help_data->data[1].pg; + } else {/* 1 is pauseable */ + cos_data->data[0].pri_join_mask = + pg_help_data->data[1].pg_priority; + pg[0] = pg_help_data->data[1].pg; + cos_data->data[1].pri_join_mask = + pg_help_data->data[0].pg_priority; + pg[1] = pg_help_data->data[0].pg; + } + } else { + /* If there are only pauseable priorities or + * only non-pauseable, each PG goes to a queue. + */ + cos_data->data[0].pausable = cos_data->data[1].pausable = + IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask); + cos_data->data[0].pri_join_mask = + pg_help_data->data[0].pg_priority; + pg[0] = pg_help_data->data[0].pg; + cos_data->data[1].pri_join_mask = + pg_help_data->data[1].pg_priority; + pg[1] = pg_help_data->data[1].pg; + } + + /* There can be only one strict pg */ + for (i = 0 ; i < ARRAY_SIZE(pg); i++) { + if (pg[i] < DCBX_MAX_NUM_PG_BW_ENTRIES) + cos_data->data[i].cos_bw = + DCBX_PG_BW_GET(ets->pg_bw_tbl, pg[i]); + else + cos_data->data[i].strict = + BNX2X_DCBX_STRICT_COS_HIGHEST; + } +} + +static int bnx2x_dcbx_join_pgs( + struct bnx2x *bp, + struct dcbx_ets_feature *ets, + struct pg_help_data *pg_help_data, + u8 required_num_of_pg) +{ + u8 entry_joined = pg_help_data->num_of_pg - 1; + u8 entry_removed = entry_joined + 1; + u8 pg_joined = 0; + + if (required_num_of_pg == 0 || ARRAY_SIZE(pg_help_data->data) + <= pg_help_data->num_of_pg) { + + BNX2X_ERR("required_num_of_pg can't be zero\n"); + return -EINVAL; + } + + while (required_num_of_pg < pg_help_data->num_of_pg) { + entry_joined = pg_help_data->num_of_pg - 2; + entry_removed = entry_joined + 1; + /* protect index */ + entry_removed %= ARRAY_SIZE(pg_help_data->data); + + pg_help_data->data[entry_joined].pg_priority |= + pg_help_data->data[entry_removed].pg_priority; + + pg_help_data->data[entry_joined].num_of_dif_pri += + pg_help_data->data[entry_removed].num_of_dif_pri; + + if (pg_help_data->data[entry_joined].pg == DCBX_STRICT_PRI_PG || + pg_help_data->data[entry_removed].pg == DCBX_STRICT_PRI_PG) + /* Entries joined strict priority rules */ + pg_help_data->data[entry_joined].pg = + DCBX_STRICT_PRI_PG; + else { + /* Entries can be joined join BW */ + pg_joined = DCBX_PG_BW_GET(ets->pg_bw_tbl, + pg_help_data->data[entry_joined].pg) + + DCBX_PG_BW_GET(ets->pg_bw_tbl, + pg_help_data->data[entry_removed].pg); + + DCBX_PG_BW_SET(ets->pg_bw_tbl, + pg_help_data->data[entry_joined].pg, pg_joined); + } + /* Joined the entries */ + pg_help_data->num_of_pg--; + } + + return 0; +} + +static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( + struct bnx2x *bp, + struct pg_help_data *pg_help_data, + struct dcbx_ets_feature *ets, + struct cos_help_data *cos_data, + u32 *pg_pri_orginal_spread, + u32 pri_join_mask, + u8 num_of_dif_pri) +{ + u8 i = 0; + u32 pri_tested = 0; + u8 entry = 0; + u8 pg_entry = 0; + bool b_found_strict = false; + u8 num_of_pri = LLFC_DRIVER_TRAFFIC_TYPE_MAX; + + cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0; + /* If there are both pauseable and non-pauseable priorities, + * the pauseable priorities go to the first queue and the + * non-pauseable priorities go to the second queue. + */ + if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) + bnx2x_dcbx_separate_pauseable_from_non(bp, + cos_data, pg_pri_orginal_spread, ets); + else { + /* If two BW-limited PG-s were combined to one queue, + * the BW is their sum. + * + * If there are only pauseable priorities or only non-pauseable, + * and there are both BW-limited and non-BW-limited PG-s, + * the BW-limited PG/s go to one queue and the non-BW-limited + * PG/s go to the second queue. + * + * If there are only pauseable priorities or only non-pauseable + * and all are BW limited, then two priorities go to the first + * queue and one priority goes to the second queue. + * + * We will join this two cases: + * if one is BW limited it will go to the secoend queue + * otherwise the last priority will get it + */ + + cos_data->data[0].pausable = cos_data->data[1].pausable = + IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask); + + for (i = 0 ; i < num_of_pri; i++) { + pri_tested = 1 << bp->dcbx_port_params. + app.traffic_type_priority[i]; + pg_entry = (u8)pg_pri_orginal_spread[bp-> + dcbx_port_params.app.traffic_type_priority[i]]; + + if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES) { + entry = 0; + + if (i == (num_of_pri-1) && + false == b_found_strict) + /* last entry will be handled separately + * If no priority is strict than last + * enty goes to last queue.*/ + entry = 1; + cos_data->data[entry].pri_join_mask |= + pri_tested; + bnx2x_dcbx_add_to_cos_bw(bp, + &cos_data->data[entry], + DCBX_PG_BW_GET(ets->pg_bw_tbl, + pg_entry)); + } else { + b_found_strict = true; + cos_data->data[1].pri_join_mask |= pri_tested; + /* If we join a group and one is strict + * than the bw rulls */ + cos_data->data[1].strict = + BNX2X_DCBX_STRICT_COS_HIGHEST; + } + } + } +} + + +static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp, + struct pg_help_data *help_data, + struct dcbx_ets_feature *ets, + struct cos_help_data *cos_data, + u32 *pg_pri_orginal_spread, + u32 pri_join_mask, + u8 num_of_dif_pri) +{ + + /* default E2 settings */ + cos_data->num_of_cos = DCBX_COS_MAX_NUM_E2; + + switch (help_data->num_of_pg) { + case 1: + bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params( + bp, + help_data, + cos_data, + pri_join_mask, + num_of_dif_pri); + break; + case 2: + bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params( + bp, + help_data, + ets, + cos_data, + pg_pri_orginal_spread, + pri_join_mask, + num_of_dif_pri); + break; + + case 3: + bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( + bp, + help_data, + ets, + cos_data, + pg_pri_orginal_spread, + pri_join_mask, + num_of_dif_pri); + break; + default: + BNX2X_ERR("Wrong pg_help_data.num_of_pg\n"); + bnx2x_dcbx_ets_disabled_entry_data(bp, + cos_data, pri_join_mask); + } +} + +static int bnx2x_dcbx_spread_strict_pri(struct bnx2x *bp, + struct cos_help_data *cos_data, + u8 entry, + u8 num_spread_of_entries, + u8 strict_app_pris) +{ + u8 strict_pri = BNX2X_DCBX_STRICT_COS_HIGHEST; + u8 num_of_app_pri = MAX_PFC_PRIORITIES; + u8 app_pri_bit = 0; + + while (num_spread_of_entries && num_of_app_pri > 0) { + app_pri_bit = 1 << (num_of_app_pri - 1); + if (app_pri_bit & strict_app_pris) { + struct cos_entry_help_data *data = &cos_data-> + data[entry]; + num_spread_of_entries--; + if (num_spread_of_entries == 0) { + /* last entry needed put all the entries left */ + data->cos_bw = DCBX_INVALID_COS_BW; + data->strict = strict_pri; + data->pri_join_mask = strict_app_pris; + data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, + data->pri_join_mask); + } else { + strict_app_pris &= ~app_pri_bit; + + data->cos_bw = DCBX_INVALID_COS_BW; + data->strict = strict_pri; + data->pri_join_mask = app_pri_bit; + data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, + data->pri_join_mask); + } + + strict_pri = + BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(strict_pri); + entry++; + } + + num_of_app_pri--; + } + + if (num_spread_of_entries) + return -EINVAL; + + return 0; +} + +static u8 bnx2x_dcbx_cee_fill_strict_pri(struct bnx2x *bp, + struct cos_help_data *cos_data, + u8 entry, + u8 num_spread_of_entries, + u8 strict_app_pris) +{ + + if (bnx2x_dcbx_spread_strict_pri(bp, cos_data, entry, + num_spread_of_entries, + strict_app_pris)) { + struct cos_entry_help_data *data = &cos_data-> + data[entry]; + /* Fill BW entry */ + data->cos_bw = DCBX_INVALID_COS_BW; + data->strict = BNX2X_DCBX_STRICT_COS_HIGHEST; + data->pri_join_mask = strict_app_pris; + data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, + data->pri_join_mask); + return 1; + } + + return num_spread_of_entries; +} + +static void bnx2x_dcbx_cee_fill_cos_params(struct bnx2x *bp, + struct pg_help_data *help_data, + struct dcbx_ets_feature *ets, + struct cos_help_data *cos_data, + u32 pri_join_mask) + +{ + u8 need_num_of_entries = 0; + u8 i = 0; + u8 entry = 0; + + /* + * if the number of requested PG-s in CEE is greater than 3 + * then the results are not determined since this is a violation + * of the standard. + */ + if (help_data->num_of_pg > DCBX_COS_MAX_NUM_E3B0) { + if (bnx2x_dcbx_join_pgs(bp, ets, help_data, + DCBX_COS_MAX_NUM_E3B0)) { + BNX2X_ERR("Unable to reduce the number of PGs -" + "we will disables ETS\n"); + bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data, + pri_join_mask); + return; + } + } + + for (i = 0 ; i < help_data->num_of_pg; i++) { + struct pg_entry_help_data *pg = &help_data->data[i]; + if (pg->pg < DCBX_MAX_NUM_PG_BW_ENTRIES) { + struct cos_entry_help_data *data = &cos_data-> + data[entry]; + /* Fill BW entry */ + data->cos_bw = DCBX_PG_BW_GET(ets->pg_bw_tbl, pg->pg); + data->strict = BNX2X_DCBX_STRICT_INVALID; + data->pri_join_mask = pg->pg_priority; + data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, + data->pri_join_mask); + + entry++; + } else { + need_num_of_entries = min_t(u8, + (u8)pg->num_of_dif_pri, + (u8)DCBX_COS_MAX_NUM_E3B0 - + help_data->num_of_pg + 1); + /* + * If there are still VOQ-s which have no associated PG, + * then associate these VOQ-s to PG15. These PG-s will + * be used for SP between priorities on PG15. + */ + entry += bnx2x_dcbx_cee_fill_strict_pri(bp, cos_data, + entry, need_num_of_entries, pg->pg_priority); + } + } + + /* the entry will represent the number of COSes used */ + cos_data->num_of_cos = entry; +} +static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp, + struct pg_help_data *help_data, + struct dcbx_ets_feature *ets, + u32 *pg_pri_orginal_spread) +{ + struct cos_help_data cos_data; + u8 i = 0; + u32 pri_join_mask = 0; + u8 num_of_dif_pri = 0; + + memset(&cos_data, 0, sizeof(cos_data)); + + /* Validate the pg value */ + for (i = 0; i < help_data->num_of_pg ; i++) { + if (DCBX_STRICT_PRIORITY != help_data->data[i].pg && + DCBX_MAX_NUM_PG_BW_ENTRIES <= help_data->data[i].pg) + BNX2X_ERR("Invalid pg[%d] data %x\n", i, + help_data->data[i].pg); + pri_join_mask |= help_data->data[i].pg_priority; + num_of_dif_pri += help_data->data[i].num_of_dif_pri; + } + + /* defaults */ + cos_data.num_of_cos = 1; + for (i = 0; i < ARRAY_SIZE(cos_data.data); i++) { + cos_data.data[i].pri_join_mask = 0; + cos_data.data[i].pausable = false; + cos_data.data[i].strict = BNX2X_DCBX_STRICT_INVALID; + cos_data.data[i].cos_bw = DCBX_INVALID_COS_BW; + } + + if (CHIP_IS_E3B0(bp)) + bnx2x_dcbx_cee_fill_cos_params(bp, help_data, ets, + &cos_data, pri_join_mask); + else /* E2 + E3A0 */ + bnx2x_dcbx_2cos_limit_cee_fill_cos_params(bp, + help_data, ets, + &cos_data, + pg_pri_orginal_spread, + pri_join_mask, + num_of_dif_pri); + + for (i = 0; i < cos_data.num_of_cos ; i++) { + struct bnx2x_dcbx_cos_params *p = + &bp->dcbx_port_params.ets.cos_params[i]; + + p->strict = cos_data.data[i].strict; + p->bw_tbl = cos_data.data[i].cos_bw; + p->pri_bitmask = cos_data.data[i].pri_join_mask; + p->pauseable = cos_data.data[i].pausable; + + /* sanity */ + if (p->bw_tbl != DCBX_INVALID_COS_BW || + p->strict != BNX2X_DCBX_STRICT_INVALID) { + if (p->pri_bitmask == 0) + BNX2X_ERR("Invalid pri_bitmask for %d\n", i); + + if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) { + + if (p->pauseable && + DCBX_PFC_PRI_GET_NON_PAUSE(bp, + p->pri_bitmask) != 0) + BNX2X_ERR("Inconsistent config for " + "pausable COS %d\n", i); + + if (!p->pauseable && + DCBX_PFC_PRI_GET_PAUSE(bp, + p->pri_bitmask) != 0) + BNX2X_ERR("Inconsistent config for " + "nonpausable COS %d\n", i); + } + } + + if (p->pauseable) + DP(NETIF_MSG_LINK, "COS %d PAUSABLE prijoinmask 0x%x\n", + i, cos_data.data[i].pri_join_mask); + else + DP(NETIF_MSG_LINK, "COS %d NONPAUSABLE prijoinmask " + "0x%x\n", + i, cos_data.data[i].pri_join_mask); + } + + bp->dcbx_port_params.ets.num_of_cos = cos_data.num_of_cos ; +} + +static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp, + u32 *set_configuration_ets_pg, + u32 *pri_pg_tbl) +{ + int i; + + for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) { + set_configuration_ets_pg[i] = DCBX_PRI_PG_GET(pri_pg_tbl, i); + + DP(NETIF_MSG_LINK, "set_configuration_ets_pg[%d] = 0x%x\n", + i, set_configuration_ets_pg[i]); + } +} + +static void bnx2x_dcbx_fw_struct(struct bnx2x *bp, + struct bnx2x_func_tx_start_params *pfc_fw_cfg) +{ + u16 pri_bit = 0; + u8 cos = 0, pri = 0; + struct priority_cos *tt2cos; + u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; + + memset(pfc_fw_cfg, 0, sizeof(*pfc_fw_cfg)); + + /* to disable DCB - the structure must be zeroed */ + if (bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) + return; + + /*shortcut*/ + tt2cos = pfc_fw_cfg->traffic_type_to_priority_cos; + + /* Fw version should be incremented each update */ + pfc_fw_cfg->dcb_version = ++bp->dcb_version; + pfc_fw_cfg->dcb_enabled = 1; + + /* Fill priority parameters */ + for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) { + tt2cos[pri].priority = ttp[pri]; + pri_bit = 1 << tt2cos[pri].priority; + + /* Fill COS parameters based on COS calculated to + * make it more general for future use */ + for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++) + if (bp->dcbx_port_params.ets.cos_params[cos]. + pri_bitmask & pri_bit) + tt2cos[pri].cos = cos; + } + + /* we never want the FW to add a 0 vlan tag */ + pfc_fw_cfg->dont_add_pri_0_en = 1; + + bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg); +} + +void bnx2x_dcbx_pmf_update(struct bnx2x *bp) +{ + /* if we need to syncronize DCBX result from prev PMF + * read it from shmem and update bp accordingly + */ + if (SHMEM2_HAS(bp, drv_flags) && + GET_FLAGS(SHMEM2_RD(bp, drv_flags), DRV_FLAGS_DCB_CONFIGURED)) { + /* Read neg results if dcbx is in the FW */ + if (bnx2x_dcbx_read_shmem_neg_results(bp)) + return; + + bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat, + bp->dcbx_error); + bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat, + bp->dcbx_error); + } +} + +/* DCB netlink */ +#ifdef BCM_DCBNL + +#define BNX2X_DCBX_CAPS (DCB_CAP_DCBX_LLD_MANAGED | \ + DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_STATIC) + +static inline bool bnx2x_dcbnl_set_valid(struct bnx2x *bp) +{ + /* validate dcbnl call that may change HW state: + * DCB is on and DCBX mode was SUCCESSFULLY set by the user. + */ + return bp->dcb_state && bp->dcbx_mode_uset; +} + +static u8 bnx2x_dcbnl_get_state(struct net_device *netdev) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "state = %d\n", bp->dcb_state); + return bp->dcb_state; +} + +static u8 bnx2x_dcbnl_set_state(struct net_device *netdev, u8 state) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "state = %s\n", state ? "on" : "off"); + + bnx2x_dcbx_set_state(bp, (state ? true : false), bp->dcbx_enabled); + return 0; +} + +static void bnx2x_dcbnl_get_perm_hw_addr(struct net_device *netdev, + u8 *perm_addr) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "GET-PERM-ADDR\n"); + + /* first the HW mac address */ + memcpy(perm_addr, netdev->dev_addr, netdev->addr_len); + +#ifdef BCM_CNIC + /* second SAN address */ + memcpy(perm_addr+netdev->addr_len, bp->fip_mac, netdev->addr_len); +#endif +} + +static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio, + u8 prio_type, u8 pgid, u8 bw_pct, + u8 up_map) +{ + struct bnx2x *bp = netdev_priv(netdev); + + DP(NETIF_MSG_LINK, "prio[%d] = %d\n", prio, pgid); + if (!bnx2x_dcbnl_set_valid(bp) || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES) + return; + + /** + * bw_pct ingnored - band-width percentage devision between user + * priorities within the same group is not + * standard and hence not supported + * + * prio_type igonred - priority levels within the same group are not + * standard and hence are not supported. According + * to the standard pgid 15 is dedicated to strict + * prioirty traffic (on the port level). + * + * up_map ignored + */ + + bp->dcbx_config_params.admin_configuration_ets_pg[prio] = pgid; + bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1; +} + +static void bnx2x_dcbnl_set_pg_bwgcfg_tx(struct net_device *netdev, + int pgid, u8 bw_pct) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "pgid[%d] = %d\n", pgid, bw_pct); + + if (!bnx2x_dcbnl_set_valid(bp) || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES) + return; + + bp->dcbx_config_params.admin_configuration_bw_precentage[pgid] = bw_pct; + bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1; +} + +static void bnx2x_dcbnl_set_pg_tccfg_rx(struct net_device *netdev, int prio, + u8 prio_type, u8 pgid, u8 bw_pct, + u8 up_map) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "Nothing to set; No RX support\n"); +} + +static void bnx2x_dcbnl_set_pg_bwgcfg_rx(struct net_device *netdev, + int pgid, u8 bw_pct) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "Nothing to set; No RX support\n"); +} + +static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio, + u8 *prio_type, u8 *pgid, u8 *bw_pct, + u8 *up_map) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "prio = %d\n", prio); + + /** + * bw_pct ingnored - band-width percentage devision between user + * priorities within the same group is not + * standard and hence not supported + * + * prio_type igonred - priority levels within the same group are not + * standard and hence are not supported. According + * to the standard pgid 15 is dedicated to strict + * prioirty traffic (on the port level). + * + * up_map ignored + */ + *up_map = *bw_pct = *prio_type = *pgid = 0; + + if (!bp->dcb_state || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES) + return; + + *pgid = DCBX_PRI_PG_GET(bp->dcbx_local_feat.ets.pri_pg_tbl, prio); +} + +static void bnx2x_dcbnl_get_pg_bwgcfg_tx(struct net_device *netdev, + int pgid, u8 *bw_pct) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "pgid = %d\n", pgid); + + *bw_pct = 0; + + if (!bp->dcb_state || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES) + return; + + *bw_pct = DCBX_PG_BW_GET(bp->dcbx_local_feat.ets.pg_bw_tbl, pgid); +} + +static void bnx2x_dcbnl_get_pg_tccfg_rx(struct net_device *netdev, int prio, + u8 *prio_type, u8 *pgid, u8 *bw_pct, + u8 *up_map) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "Nothing to get; No RX support\n"); + + *prio_type = *pgid = *bw_pct = *up_map = 0; +} + +static void bnx2x_dcbnl_get_pg_bwgcfg_rx(struct net_device *netdev, + int pgid, u8 *bw_pct) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "Nothing to get; No RX support\n"); + + *bw_pct = 0; +} + +static void bnx2x_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, + u8 setting) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "prio[%d] = %d\n", prio, setting); + + if (!bnx2x_dcbnl_set_valid(bp) || prio >= MAX_PFC_PRIORITIES) + return; + + bp->dcbx_config_params.admin_pfc_bitmap |= ((setting ? 1 : 0) << prio); + + if (setting) + bp->dcbx_config_params.admin_pfc_tx_enable = 1; +} + +static void bnx2x_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, + u8 *setting) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "prio = %d\n", prio); + + *setting = 0; + + if (!bp->dcb_state || prio >= MAX_PFC_PRIORITIES) + return; + + *setting = (bp->dcbx_local_feat.pfc.pri_en_bitmap >> prio) & 0x1; +} + +static u8 bnx2x_dcbnl_set_all(struct net_device *netdev) +{ + struct bnx2x *bp = netdev_priv(netdev); + int rc = 0; + + DP(NETIF_MSG_LINK, "SET-ALL\n"); + + if (!bnx2x_dcbnl_set_valid(bp)) + return 1; + + if (bp->recovery_state != BNX2X_RECOVERY_DONE) { + netdev_err(bp->dev, "Handling parity error recovery. " + "Try again later\n"); + return 1; + } + if (netif_running(bp->dev)) { + bnx2x_nic_unload(bp, UNLOAD_NORMAL); + rc = bnx2x_nic_load(bp, LOAD_NORMAL); + } + DP(NETIF_MSG_LINK, "set_dcbx_params done (%d)\n", rc); + if (rc) + return 1; + + return 0; +} + +static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap) +{ + struct bnx2x *bp = netdev_priv(netdev); + u8 rval = 0; + + if (bp->dcb_state) { + switch (capid) { + case DCB_CAP_ATTR_PG: + *cap = true; + break; + case DCB_CAP_ATTR_PFC: + *cap = true; + break; + case DCB_CAP_ATTR_UP2TC: + *cap = false; + break; + case DCB_CAP_ATTR_PG_TCS: + *cap = 0x80; /* 8 priorities for PGs */ + break; + case DCB_CAP_ATTR_PFC_TCS: + *cap = 0x80; /* 8 priorities for PFC */ + break; + case DCB_CAP_ATTR_GSP: + *cap = true; + break; + case DCB_CAP_ATTR_BCN: + *cap = false; + break; + case DCB_CAP_ATTR_DCBX: + *cap = BNX2X_DCBX_CAPS; + default: + rval = -EINVAL; + break; + } + } else + rval = -EINVAL; + + DP(NETIF_MSG_LINK, "capid %d:%x\n", capid, *cap); + return rval; +} + +static u8 bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num) +{ + struct bnx2x *bp = netdev_priv(netdev); + u8 rval = 0; + + DP(NETIF_MSG_LINK, "tcid %d\n", tcid); + + if (bp->dcb_state) { + switch (tcid) { + case DCB_NUMTCS_ATTR_PG: + *num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 : + DCBX_COS_MAX_NUM_E2; + break; + case DCB_NUMTCS_ATTR_PFC: + *num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 : + DCBX_COS_MAX_NUM_E2; + break; + default: + rval = -EINVAL; + break; + } + } else + rval = -EINVAL; + + return rval; +} + +static u8 bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "num tcs = %d; Not supported\n", num); + return -EINVAL; +} + +static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "state = %d\n", bp->dcbx_local_feat.pfc.enabled); + + if (!bp->dcb_state) + return 0; + + return bp->dcbx_local_feat.pfc.enabled; +} + +static void bnx2x_dcbnl_set_pfc_state(struct net_device *netdev, u8 state) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "state = %s\n", state ? "on" : "off"); + + if (!bnx2x_dcbnl_set_valid(bp)) + return; + + bp->dcbx_config_params.admin_pfc_tx_enable = + bp->dcbx_config_params.admin_pfc_enable = (state ? 1 : 0); +} + +static void bnx2x_admin_app_set_ent( + struct bnx2x_admin_priority_app_table *app_ent, + u8 idtype, u16 idval, u8 up) +{ + app_ent->valid = 1; + + switch (idtype) { + case DCB_APP_IDTYPE_ETHTYPE: + app_ent->traffic_type = TRAFFIC_TYPE_ETH; + break; + case DCB_APP_IDTYPE_PORTNUM: + app_ent->traffic_type = TRAFFIC_TYPE_PORT; + break; + default: + break; /* never gets here */ + } + app_ent->app_id = idval; + app_ent->priority = up; +} + +static bool bnx2x_admin_app_is_equal( + struct bnx2x_admin_priority_app_table *app_ent, + u8 idtype, u16 idval) +{ + if (!app_ent->valid) + return false; + + switch (idtype) { + case DCB_APP_IDTYPE_ETHTYPE: + if (app_ent->traffic_type != TRAFFIC_TYPE_ETH) + return false; + break; + case DCB_APP_IDTYPE_PORTNUM: + if (app_ent->traffic_type != TRAFFIC_TYPE_PORT) + return false; + break; + default: + return false; + } + if (app_ent->app_id != idval) + return false; + + return true; +} + +static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up) +{ + int i, ff; + + /* iterate over the app entries looking for idtype and idval */ + for (i = 0, ff = -1; i < 4; i++) { + struct bnx2x_admin_priority_app_table *app_ent = + &bp->dcbx_config_params.admin_priority_app_table[i]; + if (bnx2x_admin_app_is_equal(app_ent, idtype, idval)) + break; + + if (ff < 0 && !app_ent->valid) + ff = i; + } + if (i < 4) + /* if found overwrite up */ + bp->dcbx_config_params. + admin_priority_app_table[i].priority = up; + else if (ff >= 0) + /* not found use first-free */ + bnx2x_admin_app_set_ent( + &bp->dcbx_config_params.admin_priority_app_table[ff], + idtype, idval, up); + else + /* app table is full */ + return -EBUSY; + + /* up configured, if not 0 make sure feature is enabled */ + if (up) + bp->dcbx_config_params.admin_application_priority_tx_enable = 1; + + return 0; +} + +static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype, + u16 idval, u8 up) +{ + struct bnx2x *bp = netdev_priv(netdev); + + DP(NETIF_MSG_LINK, "app_type %d, app_id %x, prio bitmap %d\n", + idtype, idval, up); + + if (!bnx2x_dcbnl_set_valid(bp)) + return -EINVAL; + + /* verify idtype */ + switch (idtype) { + case DCB_APP_IDTYPE_ETHTYPE: + case DCB_APP_IDTYPE_PORTNUM: + break; + default: + return -EINVAL; + } + return bnx2x_set_admin_app_up(bp, idtype, idval, up); +} + +static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev) +{ + struct bnx2x *bp = netdev_priv(netdev); + u8 state; + + state = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE; + + if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF) + state |= DCB_CAP_DCBX_STATIC; + + return state; +} + +static u8 bnx2x_dcbnl_set_dcbx(struct net_device *netdev, u8 state) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(NETIF_MSG_LINK, "state = %02x\n", state); + + /* set dcbx mode */ + + if ((state & BNX2X_DCBX_CAPS) != state) { + BNX2X_ERR("Requested DCBX mode %x is beyond advertised " + "capabilities\n", state); + return 1; + } + + if (bp->dcb_state != BNX2X_DCB_STATE_ON) { + BNX2X_ERR("DCB turned off, DCBX configuration is invalid\n"); + return 1; + } + + if (state & DCB_CAP_DCBX_STATIC) + bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_OFF; + else + bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_ON; + + bp->dcbx_mode_uset = true; + return 0; +} + +static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid, + u8 *flags) +{ + struct bnx2x *bp = netdev_priv(netdev); + u8 rval = 0; + + DP(NETIF_MSG_LINK, "featid %d\n", featid); + + if (bp->dcb_state) { + *flags = 0; + switch (featid) { + case DCB_FEATCFG_ATTR_PG: + if (bp->dcbx_local_feat.ets.enabled) + *flags |= DCB_FEATCFG_ENABLE; + if (bp->dcbx_error & DCBX_LOCAL_ETS_ERROR) + *flags |= DCB_FEATCFG_ERROR; + break; + case DCB_FEATCFG_ATTR_PFC: + if (bp->dcbx_local_feat.pfc.enabled) + *flags |= DCB_FEATCFG_ENABLE; + if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR | + DCBX_LOCAL_PFC_MISMATCH)) + *flags |= DCB_FEATCFG_ERROR; + break; + case DCB_FEATCFG_ATTR_APP: + if (bp->dcbx_local_feat.app.enabled) + *flags |= DCB_FEATCFG_ENABLE; + if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR | + DCBX_LOCAL_APP_MISMATCH)) + *flags |= DCB_FEATCFG_ERROR; + break; + default: + rval = -EINVAL; + break; + } + } else + rval = -EINVAL; + + return rval; +} + +static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid, + u8 flags) +{ + struct bnx2x *bp = netdev_priv(netdev); + u8 rval = 0; + + DP(NETIF_MSG_LINK, "featid = %d flags = %02x\n", featid, flags); + + /* ignore the 'advertise' flag */ + if (bnx2x_dcbnl_set_valid(bp)) { + switch (featid) { + case DCB_FEATCFG_ATTR_PG: + bp->dcbx_config_params.admin_ets_enable = + flags & DCB_FEATCFG_ENABLE ? 1 : 0; + bp->dcbx_config_params.admin_ets_willing = + flags & DCB_FEATCFG_WILLING ? 1 : 0; + break; + case DCB_FEATCFG_ATTR_PFC: + bp->dcbx_config_params.admin_pfc_enable = + flags & DCB_FEATCFG_ENABLE ? 1 : 0; + bp->dcbx_config_params.admin_pfc_willing = + flags & DCB_FEATCFG_WILLING ? 1 : 0; + break; + case DCB_FEATCFG_ATTR_APP: + /* ignore enable, always enabled */ + bp->dcbx_config_params.admin_app_priority_willing = + flags & DCB_FEATCFG_WILLING ? 1 : 0; + break; + default: + rval = -EINVAL; + break; + } + } else + rval = -EINVAL; + + return rval; +} + +static int bnx2x_peer_appinfo(struct net_device *netdev, + struct dcb_peer_app_info *info, u16* app_count) +{ + int i; + struct bnx2x *bp = netdev_priv(netdev); + + DP(NETIF_MSG_LINK, "APP-INFO\n"); + + info->willing = (bp->dcbx_remote_flags & DCBX_APP_REM_WILLING) ?: 0; + info->error = (bp->dcbx_remote_flags & DCBX_APP_RX_ERROR) ?: 0; + *app_count = 0; + + for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) + if (bp->dcbx_remote_feat.app.app_pri_tbl[i].appBitfield & + DCBX_APP_ENTRY_VALID) + (*app_count)++; + return 0; +} + +static int bnx2x_peer_apptable(struct net_device *netdev, + struct dcb_app *table) +{ + int i, j; + struct bnx2x *bp = netdev_priv(netdev); + + DP(NETIF_MSG_LINK, "APP-TABLE\n"); + + for (i = 0, j = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { + struct dcbx_app_priority_entry *ent = + &bp->dcbx_remote_feat.app.app_pri_tbl[i]; + + if (ent->appBitfield & DCBX_APP_ENTRY_VALID) { + table[j].selector = bnx2x_dcbx_dcbnl_app_idtype(ent); + table[j].priority = bnx2x_dcbx_dcbnl_app_up(ent); + table[j++].protocol = ent->app_id; + } + } + return 0; +} + +static int bnx2x_cee_peer_getpg(struct net_device *netdev, struct cee_pg *pg) +{ + int i; + struct bnx2x *bp = netdev_priv(netdev); + + pg->willing = (bp->dcbx_remote_flags & DCBX_ETS_REM_WILLING) ?: 0; + + for (i = 0; i < CEE_DCBX_MAX_PGS; i++) { + pg->pg_bw[i] = + DCBX_PG_BW_GET(bp->dcbx_remote_feat.ets.pg_bw_tbl, i); + pg->prio_pg[i] = + DCBX_PRI_PG_GET(bp->dcbx_remote_feat.ets.pri_pg_tbl, i); + } + return 0; +} + +static int bnx2x_cee_peer_getpfc(struct net_device *netdev, + struct cee_pfc *pfc) +{ + struct bnx2x *bp = netdev_priv(netdev); + pfc->tcs_supported = bp->dcbx_remote_feat.pfc.pfc_caps; + pfc->pfc_en = bp->dcbx_remote_feat.pfc.pri_en_bitmap; + return 0; +} + +const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = { + .getstate = bnx2x_dcbnl_get_state, + .setstate = bnx2x_dcbnl_set_state, + .getpermhwaddr = bnx2x_dcbnl_get_perm_hw_addr, + .setpgtccfgtx = bnx2x_dcbnl_set_pg_tccfg_tx, + .setpgbwgcfgtx = bnx2x_dcbnl_set_pg_bwgcfg_tx, + .setpgtccfgrx = bnx2x_dcbnl_set_pg_tccfg_rx, + .setpgbwgcfgrx = bnx2x_dcbnl_set_pg_bwgcfg_rx, + .getpgtccfgtx = bnx2x_dcbnl_get_pg_tccfg_tx, + .getpgbwgcfgtx = bnx2x_dcbnl_get_pg_bwgcfg_tx, + .getpgtccfgrx = bnx2x_dcbnl_get_pg_tccfg_rx, + .getpgbwgcfgrx = bnx2x_dcbnl_get_pg_bwgcfg_rx, + .setpfccfg = bnx2x_dcbnl_set_pfc_cfg, + .getpfccfg = bnx2x_dcbnl_get_pfc_cfg, + .setall = bnx2x_dcbnl_set_all, + .getcap = bnx2x_dcbnl_get_cap, + .getnumtcs = bnx2x_dcbnl_get_numtcs, + .setnumtcs = bnx2x_dcbnl_set_numtcs, + .getpfcstate = bnx2x_dcbnl_get_pfc_state, + .setpfcstate = bnx2x_dcbnl_set_pfc_state, + .setapp = bnx2x_dcbnl_set_app_up, + .getdcbx = bnx2x_dcbnl_get_dcbx, + .setdcbx = bnx2x_dcbnl_set_dcbx, + .getfeatcfg = bnx2x_dcbnl_get_featcfg, + .setfeatcfg = bnx2x_dcbnl_set_featcfg, + .peer_getappinfo = bnx2x_peer_appinfo, + .peer_getapptable = bnx2x_peer_apptable, + .cee_peer_getpg = bnx2x_cee_peer_getpg, + .cee_peer_getpfc = bnx2x_cee_peer_getpfc, +}; + +#endif /* BCM_DCBNL */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h new file mode 100644 index 0000000..2c6a3bc --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h @@ -0,0 +1,203 @@ +/* bnx2x_dcb.h: Broadcom Everest network driver. + * + * Copyright 2009-2011 Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2, available + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a + * license other than the GPL, without Broadcom's express prior written + * consent. + * + * Maintained by: Eilon Greenstein + * Written by: Dmitry Kravkov + * + */ +#ifndef BNX2X_DCB_H +#define BNX2X_DCB_H + +#include "bnx2x_hsi.h" + +#define LLFC_DRIVER_TRAFFIC_TYPE_MAX 3 /* NW, iSCSI, FCoE */ +struct bnx2x_dcbx_app_params { + u32 enabled; + u32 traffic_type_priority[LLFC_DRIVER_TRAFFIC_TYPE_MAX]; +}; + +#define DCBX_COS_MAX_NUM_E2 DCBX_E2E3_MAX_NUM_COS +/* bnx2x currently limits numbers of supported COSes to 3 to be extended to 6 */ +#define BNX2X_MAX_COS_SUPPORT 3 +#define DCBX_COS_MAX_NUM_E3B0 BNX2X_MAX_COS_SUPPORT +#define DCBX_COS_MAX_NUM BNX2X_MAX_COS_SUPPORT + +struct bnx2x_dcbx_cos_params { + u32 bw_tbl; + u32 pri_bitmask; + /* + * strict priority: valid values are 0..5; 0 is highest priority. + * There can't be two COSes with the same priority. + */ + u8 strict; +#define BNX2X_DCBX_STRICT_INVALID DCBX_COS_MAX_NUM +#define BNX2X_DCBX_STRICT_COS_HIGHEST 0 +#define BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(sp) ((sp) + 1) + u8 pauseable; +}; + +struct bnx2x_dcbx_pg_params { + u32 enabled; + u8 num_of_cos; /* valid COS entries */ + struct bnx2x_dcbx_cos_params cos_params[DCBX_COS_MAX_NUM]; +}; + +struct bnx2x_dcbx_pfc_params { + u32 enabled; + u32 priority_non_pauseable_mask; +}; + +struct bnx2x_dcbx_port_params { + struct bnx2x_dcbx_pfc_params pfc; + struct bnx2x_dcbx_pg_params ets; + struct bnx2x_dcbx_app_params app; +}; + +#define BNX2X_DCBX_CONFIG_INV_VALUE (0xFFFFFFFF) +#define BNX2X_DCBX_OVERWRITE_SETTINGS_DISABLE 0 +#define BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE 1 +#define BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID (BNX2X_DCBX_CONFIG_INV_VALUE) +#define BNX2X_IS_ETS_ENABLED(bp) ((bp)->dcb_state == BNX2X_DCB_STATE_ON &&\ + (bp)->dcbx_port_params.ets.enabled) + +struct bnx2x_config_lldp_params { + u32 overwrite_settings; + u32 msg_tx_hold; + u32 msg_fast_tx; + u32 tx_credit_max; + u32 msg_tx_interval; + u32 tx_fast; +}; + +struct bnx2x_admin_priority_app_table { + u32 valid; + u32 priority; +#define INVALID_TRAFFIC_TYPE_PRIORITY (0xFFFFFFFF) + u32 traffic_type; +#define TRAFFIC_TYPE_ETH 0 +#define TRAFFIC_TYPE_PORT 1 + u32 app_id; +}; + +struct bnx2x_config_dcbx_params { + u32 overwrite_settings; + u32 admin_dcbx_version; + u32 admin_ets_enable; + u32 admin_pfc_enable; + u32 admin_tc_supported_tx_enable; + u32 admin_ets_configuration_tx_enable; + u32 admin_ets_recommendation_tx_enable; + u32 admin_pfc_tx_enable; + u32 admin_application_priority_tx_enable; + u32 admin_ets_willing; + u32 admin_ets_reco_valid; + u32 admin_pfc_willing; + u32 admin_app_priority_willing; + u32 admin_configuration_bw_precentage[8]; + u32 admin_configuration_ets_pg[8]; + u32 admin_recommendation_bw_precentage[8]; + u32 admin_recommendation_ets_pg[8]; + u32 admin_pfc_bitmap; + struct bnx2x_admin_priority_app_table admin_priority_app_table[4]; + u32 admin_default_priority; +}; + +#define GET_FLAGS(flags, bits) ((flags) & (bits)) +#define SET_FLAGS(flags, bits) ((flags) |= (bits)) +#define RESET_FLAGS(flags, bits) ((flags) &= ~(bits)) + +enum { + DCBX_READ_LOCAL_MIB, + DCBX_READ_REMOTE_MIB +}; + +#define ETH_TYPE_FCOE (0x8906) +#define TCP_PORT_ISCSI (0xCBC) + +#define PFC_VALUE_FRAME_SIZE (512) +#define PFC_QUANTA_IN_NANOSEC_FROM_SPEED_MEGA(mega_speed) \ + ((1000 * PFC_VALUE_FRAME_SIZE)/(mega_speed)) + +#define PFC_BRB1_REG_HIGH_LLFC_LOW_THRESHOLD 130 +#define PFC_BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD 170 + + + +struct cos_entry_help_data { + u32 pri_join_mask; + u32 cos_bw; + u8 strict; + bool pausable; +}; + +struct cos_help_data { + struct cos_entry_help_data data[DCBX_COS_MAX_NUM]; + u8 num_of_cos; +}; + +#define DCBX_ILLEGAL_PG (0xFF) +#define DCBX_PFC_PRI_MASK (0xFF) +#define DCBX_STRICT_PRIORITY (15) +#define DCBX_INVALID_COS_BW (0xFFFFFFFF) +#define DCBX_PFC_PRI_NON_PAUSE_MASK(bp) \ + ((bp)->dcbx_port_params.pfc.priority_non_pauseable_mask) +#define DCBX_PFC_PRI_PAUSE_MASK(bp) \ + ((u8)~DCBX_PFC_PRI_NON_PAUSE_MASK(bp)) +#define DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri) \ + ((pg_pri) & (DCBX_PFC_PRI_PAUSE_MASK(bp))) +#define DCBX_PFC_PRI_GET_NON_PAUSE(bp, pg_pri) \ + (DCBX_PFC_PRI_NON_PAUSE_MASK(bp) & (pg_pri)) +#define DCBX_IS_PFC_PRI_SOME_PAUSE(bp, pg_pri) \ + (0 != DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri)) +#define IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pg_pri) \ + (pg_pri == DCBX_PFC_PRI_GET_PAUSE((bp), (pg_pri))) +#define IS_DCBX_PFC_PRI_ONLY_NON_PAUSE(bp, pg_pri)\ + ((pg_pri) == DCBX_PFC_PRI_GET_NON_PAUSE((bp), (pg_pri))) +#define IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pg_pri) \ + (!(IS_DCBX_PFC_PRI_ONLY_NON_PAUSE((bp), (pg_pri)) || \ + IS_DCBX_PFC_PRI_ONLY_PAUSE((bp), (pg_pri)))) + + +struct pg_entry_help_data { + u8 num_of_dif_pri; + u8 pg; + u32 pg_priority; +}; + +struct pg_help_data { + struct pg_entry_help_data data[LLFC_DRIVER_TRAFFIC_TYPE_MAX]; + u8 num_of_pg; +}; + +/* forward DCB/PFC related declarations */ +struct bnx2x; +void bnx2x_dcbx_update(struct work_struct *work); +void bnx2x_dcbx_init_params(struct bnx2x *bp); +void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled); + +enum { + BNX2X_DCBX_STATE_NEG_RECEIVED = 0x1, + BNX2X_DCBX_STATE_TX_PAUSED, + BNX2X_DCBX_STATE_TX_RELEASED +}; + +void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state); +void bnx2x_dcbx_pmf_update(struct bnx2x *bp); +/* DCB netlink */ +#ifdef BCM_DCBNL +extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops; +int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall); +#endif /* BCM_DCBNL */ + +#endif /* BNX2X_DCB_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h new file mode 100644 index 0000000..b983825 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h @@ -0,0 +1,1156 @@ +/* bnx2x_dump.h: Broadcom Everest network driver. + * + * Copyright (c) 2011 Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2, available + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a + * license other than the GPL, without Broadcom's express prior written + * consent. + */ + + +/* This struct holds a signature to ensure the dump returned from the driver + * match the meta data file inserted to grc_dump.tcl + * The signature is time stamp, diag version and grc_dump version + */ + +#ifndef BNX2X_DUMP_H +#define BNX2X_DUMP_H + + + +/*definitions */ +#define XSTORM_WAITP_ADDR 0x2b8a80 +#define TSTORM_WAITP_ADDR 0x1b8a80 +#define USTORM_WAITP_ADDR 0x338a80 +#define CSTORM_WAITP_ADDR 0x238a80 +#define TSTORM_CAM_MODE 0x1B1440 + +#define MAX_TIMER_PENDING 200 +#define TIMER_SCAN_DONT_CARE 0xFF +#define RI_E1 0x1 +#define RI_E1H 0x2 +#define RI_E2 0x4 +#define RI_E3 0x8 +#define RI_E3B0 0x10 +#define RI_ONLINE 0x100 +#define RI_OFFLINE 0x0 +#define RI_PATH0_DUMP 0x200 +#define RI_PATH1_DUMP 0x400 + +#define RI_E1_ONLINE (RI_E1 | RI_ONLINE) +#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE) +#define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE) +#define RI_E2_ONLINE (RI_E2 | RI_ONLINE) +#define RI_E1E2_ONLINE (RI_E1 | RI_E2 | RI_ONLINE) +#define RI_E1HE2_ONLINE (RI_E1H | RI_E2 | RI_ONLINE) +#define RI_E1E1HE2_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE) +#define RI_E3_ONLINE (RI_E3 | RI_ONLINE) +#define RI_E1E3_ONLINE (RI_E1 | RI_E3 | RI_ONLINE) +#define RI_E1HE3_ONLINE (RI_E1H | RI_E3 | RI_ONLINE) +#define RI_E1E1HE3_ONLINE (RI_E1 | RI_E1H | RI_E3 | RI_ONLINE) +#define RI_E2E3_ONLINE (RI_E2 | RI_E3 | RI_ONLINE) +#define RI_E1E2E3_ONLINE (RI_E1 | RI_E2 | RI_E3 | RI_ONLINE) +#define RI_E1HE2E3_ONLINE (RI_E1H | RI_E2 | RI_E3 | RI_ONLINE) +#define RI_E1E1HE2E3_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_ONLINE) +#define RI_E3B0_ONLINE (RI_E3B0 | RI_ONLINE) +#define RI_E1E3B0_ONLINE (RI_E1 | RI_E3B0 | RI_ONLINE) +#define RI_E1HE3B0_ONLINE (RI_E1H | RI_E3B0 | RI_ONLINE) +#define RI_E1E1HE3B0_ONLINE (RI_E1 | RI_E1H | RI_E3B0 | RI_ONLINE) +#define RI_E2E3B0_ONLINE (RI_E2 | RI_E3B0 | RI_ONLINE) +#define RI_E1E2E3B0_ONLINE (RI_E1 | RI_E2 | RI_E3B0 | RI_ONLINE) +#define RI_E1HE2E3B0_ONLINE (RI_E1H | RI_E2 | RI_E3B0 | RI_ONLINE) +#define RI_E1E1HE2E3B0_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3B0 | RI_ONLINE) +#define RI_E3E3B0_ONLINE (RI_E3 | RI_E3B0 | RI_ONLINE) +#define RI_E1E3E3B0_ONLINE (RI_E1 | RI_E3 | RI_E3B0 | RI_ONLINE) +#define RI_E1HE3E3B0_ONLINE (RI_E1H | RI_E3 | RI_E3B0 | RI_ONLINE) +#define RI_E1E1HE3E3B0_ONLINE (RI_E1 | RI_E1H | RI_E3 | RI_E3B0 | RI_ONLINE) +#define RI_E2E3E3B0_ONLINE (RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE) +#define RI_E1E2E3E3B0_ONLINE (RI_E1 | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE) +#define RI_E1HE2E3E3B0_ONLINE (RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE) +#define RI_E1E1HE2E3E3B0_ONLINE \ + (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE) +#define RI_E1_OFFLINE (RI_E1 | RI_OFFLINE) +#define RI_E1H_OFFLINE (RI_E1H | RI_OFFLINE) +#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H | RI_OFFLINE) +#define RI_E2_OFFLINE (RI_E2 | RI_OFFLINE) +#define RI_E1E2_OFFLINE (RI_E1 | RI_E2 | RI_OFFLINE) +#define RI_E1HE2_OFFLINE (RI_E1H | RI_E2 | RI_OFFLINE) +#define RI_E1E1HE2_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_OFFLINE) +#define RI_E3_OFFLINE (RI_E3 | RI_OFFLINE) +#define RI_E1E3_OFFLINE (RI_E1 | RI_E3 | RI_OFFLINE) +#define RI_E1HE3_OFFLINE (RI_E1H | RI_E3 | RI_OFFLINE) +#define RI_E1E1HE3_OFFLINE (RI_E1 | RI_E1H | RI_E3 | RI_OFFLINE) +#define RI_E2E3_OFFLINE (RI_E2 | RI_E3 | RI_OFFLINE) +#define RI_E1E2E3_OFFLINE (RI_E1 | RI_E2 | RI_E3 | RI_OFFLINE) +#define RI_E1HE2E3_OFFLINE (RI_E1H | RI_E2 | RI_E3 | RI_OFFLINE) +#define RI_E1E1HE2E3_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_OFFLINE) +#define RI_E3B0_OFFLINE (RI_E3B0 | RI_OFFLINE) +#define RI_E1E3B0_OFFLINE (RI_E1 | RI_E3B0 | RI_OFFLINE) +#define RI_E1HE3B0_OFFLINE (RI_E1H | RI_E3B0 | RI_OFFLINE) +#define RI_E1E1HE3B0_OFFLINE (RI_E1 | RI_E1H | RI_E3B0 | RI_OFFLINE) +#define RI_E2E3B0_OFFLINE (RI_E2 | RI_E3B0 | RI_OFFLINE) +#define RI_E1E2E3B0_OFFLINE (RI_E1 | RI_E2 | RI_E3B0 | RI_OFFLINE) +#define RI_E1HE2E3B0_OFFLINE (RI_E1H | RI_E2 | RI_E3B0 | RI_OFFLINE) +#define RI_E1E1HE2E3B0_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3B0 | RI_OFFLINE) +#define RI_E3E3B0_OFFLINE (RI_E3 | RI_E3B0 | RI_OFFLINE) +#define RI_E1E3E3B0_OFFLINE (RI_E1 | RI_E3 | RI_E3B0 | RI_OFFLINE) +#define RI_E1HE3E3B0_OFFLINE (RI_E1H | RI_E3 | RI_E3B0 | RI_OFFLINE) +#define RI_E1E1HE3E3B0_OFFLINE (RI_E1 | RI_E1H | RI_E3 | RI_E3B0 | RI_OFFLINE) +#define RI_E2E3E3B0_OFFLINE (RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE) +#define RI_E1E2E3E3B0_OFFLINE (RI_E1 | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE) +#define RI_E1HE2E3E3B0_OFFLINE (RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE) +#define RI_E1E1HE2E3E3B0_OFFLINE \ + (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE) +#define RI_ALL_ONLINE RI_E1E1HE2E3E3B0_ONLINE +#define RI_ALL_OFFLINE RI_E1E1HE2E3E3B0_OFFLINE + +#define DBG_DMP_TRACE_BUFFER_SIZE 0x800 +#define DBG_DMP_TRACE_BUFFER_OFFSET(shmem0_offset) \ + ((shmem0_offset) - DBG_DMP_TRACE_BUFFER_SIZE) + +struct dump_sign { + u32 time_stamp; + u32 diag_ver; + u32 grc_dump_ver; +}; + +struct dump_hdr { + u32 hdr_size; /* in dwords, excluding this field */ + struct dump_sign dump_sign; + u32 xstorm_waitp; + u32 tstorm_waitp; + u32 ustorm_waitp; + u32 cstorm_waitp; + u16 info; + u8 idle_chk; + u8 reserved; +}; + +struct reg_addr { + u32 addr; + u32 size; + u16 info; +}; + +struct wreg_addr { + u32 addr; + u32 size; + u32 read_regs_count; + const u32 *read_regs; + u16 info; +}; + +static const struct reg_addr reg_addrs[] = { + { 0x2000, 341, RI_ALL_ONLINE }, + { 0x2800, 103, RI_ALL_ONLINE }, + { 0x3000, 287, RI_ALL_ONLINE }, + { 0x3800, 331, RI_ALL_ONLINE }, + { 0x8800, 6, RI_ALL_ONLINE }, + { 0x8818, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x9000, 147, RI_E2E3E3B0_ONLINE }, + { 0x924c, 1, RI_E2_ONLINE }, + { 0x9250, 16, RI_E2E3E3B0_ONLINE }, + { 0x9400, 33, RI_E2E3E3B0_ONLINE }, + { 0x9484, 5, RI_E3E3B0_ONLINE }, + { 0xa000, 27, RI_ALL_ONLINE }, + { 0xa06c, 1, RI_E1E1H_ONLINE }, + { 0xa070, 71, RI_ALL_ONLINE }, + { 0xa18c, 4, RI_E1E1H_ONLINE }, + { 0xa19c, 62, RI_ALL_ONLINE }, + { 0xa294, 2, RI_E1E1H_ONLINE }, + { 0xa29c, 2, RI_ALL_ONLINE }, + { 0xa2a4, 2, RI_E1E1HE2_ONLINE }, + { 0xa2ac, 52, RI_ALL_ONLINE }, + { 0xa39c, 7, RI_E1HE2E3E3B0_ONLINE }, + { 0xa3b8, 2, RI_E3E3B0_ONLINE }, + { 0xa3c0, 3, RI_E1HE2E3E3B0_ONLINE }, + { 0xa3d0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0xa3d8, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0xa3e0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0xa3e8, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0xa3f0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0xa3f8, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0xa400, 40, RI_ALL_ONLINE }, + { 0xa4a0, 1, RI_E1E1HE2_ONLINE }, + { 0xa4a4, 2, RI_ALL_ONLINE }, + { 0xa4ac, 2, RI_E1E1H_ONLINE }, + { 0xa4b4, 1, RI_E1E1HE2_ONLINE }, + { 0xa4b8, 2, RI_E1E1H_ONLINE }, + { 0xa4c0, 3, RI_ALL_ONLINE }, + { 0xa4cc, 5, RI_E1E1H_ONLINE }, + { 0xa4e0, 3, RI_ALL_ONLINE }, + { 0xa4fc, 2, RI_ALL_ONLINE }, + { 0xa504, 1, RI_E1E1H_ONLINE }, + { 0xa508, 3, RI_ALL_ONLINE }, + { 0xa518, 1, RI_ALL_ONLINE }, + { 0xa520, 1, RI_ALL_ONLINE }, + { 0xa528, 1, RI_ALL_ONLINE }, + { 0xa530, 1, RI_ALL_ONLINE }, + { 0xa538, 1, RI_ALL_ONLINE }, + { 0xa540, 1, RI_ALL_ONLINE }, + { 0xa548, 1, RI_E1E1H_ONLINE }, + { 0xa550, 1, RI_E1E1H_ONLINE }, + { 0xa558, 1, RI_E1E1H_ONLINE }, + { 0xa560, 1, RI_E1E1H_ONLINE }, + { 0xa568, 1, RI_E1E1H_ONLINE }, + { 0xa570, 1, RI_ALL_ONLINE }, + { 0xa580, 1, RI_ALL_ONLINE }, + { 0xa590, 1, RI_ALL_ONLINE }, + { 0xa5a0, 1, RI_E1E1HE2_ONLINE }, + { 0xa5c0, 1, RI_ALL_ONLINE }, + { 0xa5e0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0xa5e8, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0xa5f0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0xa5f8, 1, RI_E1HE2_ONLINE }, + { 0xa5fc, 9, RI_E1HE2E3E3B0_ONLINE }, + { 0xa620, 6, RI_E2E3E3B0_ONLINE }, + { 0xa638, 20, RI_E2_ONLINE }, + { 0xa688, 42, RI_E2E3E3B0_ONLINE }, + { 0xa730, 1, RI_E2_ONLINE }, + { 0xa734, 2, RI_E2E3E3B0_ONLINE }, + { 0xa73c, 4, RI_E2_ONLINE }, + { 0xa74c, 5, RI_E2E3E3B0_ONLINE }, + { 0xa760, 5, RI_E2_ONLINE }, + { 0xa774, 7, RI_E2E3E3B0_ONLINE }, + { 0xa790, 15, RI_E2_ONLINE }, + { 0xa7cc, 4, RI_E2E3E3B0_ONLINE }, + { 0xa7e0, 6, RI_E3E3B0_ONLINE }, + { 0xa800, 18, RI_E2_ONLINE }, + { 0xa848, 33, RI_E2E3E3B0_ONLINE }, + { 0xa8cc, 2, RI_E3E3B0_ONLINE }, + { 0xa8d4, 4, RI_E2E3E3B0_ONLINE }, + { 0xa8e4, 1, RI_E3E3B0_ONLINE }, + { 0xa8e8, 1, RI_E2E3E3B0_ONLINE }, + { 0xa8f0, 1, RI_E2E3E3B0_ONLINE }, + { 0xa8f8, 30, RI_E3E3B0_ONLINE }, + { 0xa974, 73, RI_E3E3B0_ONLINE }, + { 0xac30, 1, RI_E3E3B0_ONLINE }, + { 0xac40, 1, RI_E3E3B0_ONLINE }, + { 0xac50, 1, RI_E3E3B0_ONLINE }, + { 0xac60, 1, RI_E3B0_ONLINE }, + { 0x10000, 9, RI_ALL_ONLINE }, + { 0x10024, 1, RI_E1E1HE2_ONLINE }, + { 0x10028, 5, RI_ALL_ONLINE }, + { 0x1003c, 6, RI_E1E1HE2_ONLINE }, + { 0x10054, 20, RI_ALL_ONLINE }, + { 0x100a4, 4, RI_E1E1HE2_ONLINE }, + { 0x100b4, 11, RI_ALL_ONLINE }, + { 0x100e0, 4, RI_E1E1HE2_ONLINE }, + { 0x100f0, 8, RI_ALL_ONLINE }, + { 0x10110, 6, RI_E1E1HE2_ONLINE }, + { 0x10128, 110, RI_ALL_ONLINE }, + { 0x102e0, 4, RI_E1E1HE2_ONLINE }, + { 0x102f0, 18, RI_ALL_ONLINE }, + { 0x10338, 20, RI_E1E1HE2_ONLINE }, + { 0x10388, 10, RI_ALL_ONLINE }, + { 0x10400, 6, RI_E1E1HE2_ONLINE }, + { 0x10418, 6, RI_ALL_ONLINE }, + { 0x10430, 10, RI_E1E1HE2_ONLINE }, + { 0x10458, 22, RI_ALL_ONLINE }, + { 0x104b0, 12, RI_E1E1HE2_ONLINE }, + { 0x104e0, 1, RI_ALL_ONLINE }, + { 0x104e8, 2, RI_ALL_ONLINE }, + { 0x104f4, 2, RI_ALL_ONLINE }, + { 0x10500, 146, RI_ALL_ONLINE }, + { 0x10750, 2, RI_E1E1HE2_ONLINE }, + { 0x10760, 2, RI_E1E1HE2_ONLINE }, + { 0x10770, 2, RI_E1E1HE2_ONLINE }, + { 0x10780, 2, RI_E1E1HE2_ONLINE }, + { 0x10790, 2, RI_ALL_ONLINE }, + { 0x107a0, 2, RI_E1E1HE2_ONLINE }, + { 0x107b0, 2, RI_E1E1HE2_ONLINE }, + { 0x107c0, 2, RI_E1E1HE2_ONLINE }, + { 0x107d0, 2, RI_E1E1HE2_ONLINE }, + { 0x107e0, 2, RI_ALL_ONLINE }, + { 0x10880, 2, RI_ALL_ONLINE }, + { 0x10900, 2, RI_ALL_ONLINE }, + { 0x16000, 1, RI_E1HE2_ONLINE }, + { 0x16004, 25, RI_E1HE2E3E3B0_ONLINE }, + { 0x16070, 8, RI_E1HE2E3E3B0_ONLINE }, + { 0x16090, 4, RI_E1HE2E3_ONLINE }, + { 0x160a0, 6, RI_E1HE2E3E3B0_ONLINE }, + { 0x160c0, 7, RI_E1HE2E3E3B0_ONLINE }, + { 0x160dc, 2, RI_E1HE2_ONLINE }, + { 0x160e4, 10, RI_E1HE2E3E3B0_ONLINE }, + { 0x1610c, 2, RI_E1HE2_ONLINE }, + { 0x16114, 6, RI_E1HE2E3E3B0_ONLINE }, + { 0x16140, 48, RI_E1HE2E3E3B0_ONLINE }, + { 0x16204, 5, RI_E1HE2E3E3B0_ONLINE }, + { 0x18000, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x18008, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x18010, 35, RI_E2E3E3B0_ONLINE }, + { 0x180a4, 2, RI_E2E3E3B0_ONLINE }, + { 0x180c0, 9, RI_E2E3E3B0_ONLINE }, + { 0x180e4, 1, RI_E2E3_ONLINE }, + { 0x180e8, 2, RI_E2E3E3B0_ONLINE }, + { 0x180f0, 1, RI_E2E3_ONLINE }, + { 0x180f4, 79, RI_E2E3E3B0_ONLINE }, + { 0x18230, 1, RI_E2E3_ONLINE }, + { 0x18234, 2, RI_E2E3E3B0_ONLINE }, + { 0x1823c, 1, RI_E2E3_ONLINE }, + { 0x18240, 13, RI_E2E3E3B0_ONLINE }, + { 0x18274, 1, RI_E2_ONLINE }, + { 0x18278, 81, RI_E2E3E3B0_ONLINE }, + { 0x18440, 63, RI_E2E3E3B0_ONLINE }, + { 0x18570, 42, RI_E3E3B0_ONLINE }, + { 0x18618, 25, RI_E3B0_ONLINE }, + { 0x18680, 44, RI_E3B0_ONLINE }, + { 0x18748, 12, RI_E3B0_ONLINE }, + { 0x18788, 1, RI_E3B0_ONLINE }, + { 0x1879c, 6, RI_E3B0_ONLINE }, + { 0x187c4, 51, RI_E3B0_ONLINE }, + { 0x18a00, 48, RI_E3B0_ONLINE }, + { 0x20000, 24, RI_ALL_ONLINE }, + { 0x20060, 8, RI_ALL_ONLINE }, + { 0x20080, 94, RI_ALL_ONLINE }, + { 0x201f8, 1, RI_E1E1H_ONLINE }, + { 0x201fc, 1, RI_ALL_ONLINE }, + { 0x20200, 1, RI_E1E1H_ONLINE }, + { 0x20204, 1, RI_ALL_ONLINE }, + { 0x20208, 1, RI_E1E1H_ONLINE }, + { 0x2020c, 39, RI_ALL_ONLINE }, + { 0x202c8, 1, RI_E2E3E3B0_ONLINE }, + { 0x202d8, 4, RI_E2E3E3B0_ONLINE }, + { 0x202f0, 1, RI_E3B0_ONLINE }, + { 0x20400, 2, RI_ALL_ONLINE }, + { 0x2040c, 8, RI_ALL_ONLINE }, + { 0x2042c, 18, RI_E1HE2E3E3B0_ONLINE }, + { 0x20480, 1, RI_ALL_ONLINE }, + { 0x20500, 1, RI_ALL_ONLINE }, + { 0x20600, 1, RI_ALL_ONLINE }, + { 0x28000, 1, RI_ALL_ONLINE }, + { 0x28004, 8191, RI_ALL_OFFLINE }, + { 0x30000, 1, RI_ALL_ONLINE }, + { 0x30004, 16383, RI_ALL_OFFLINE }, + { 0x40000, 98, RI_ALL_ONLINE }, + { 0x401a8, 8, RI_E1HE2E3E3B0_ONLINE }, + { 0x401c8, 1, RI_E1H_ONLINE }, + { 0x401cc, 2, RI_E1HE2E3E3B0_ONLINE }, + { 0x401d4, 2, RI_E2E3E3B0_ONLINE }, + { 0x40200, 4, RI_ALL_ONLINE }, + { 0x40220, 6, RI_E2E3E3B0_ONLINE }, + { 0x40238, 8, RI_E2E3_ONLINE }, + { 0x40258, 4, RI_E2E3E3B0_ONLINE }, + { 0x40268, 2, RI_E3E3B0_ONLINE }, + { 0x40270, 17, RI_E3B0_ONLINE }, + { 0x40400, 43, RI_ALL_ONLINE }, + { 0x404cc, 3, RI_E1HE2E3E3B0_ONLINE }, + { 0x404e0, 1, RI_E2E3E3B0_ONLINE }, + { 0x40500, 2, RI_ALL_ONLINE }, + { 0x40510, 2, RI_ALL_ONLINE }, + { 0x40520, 2, RI_ALL_ONLINE }, + { 0x40530, 2, RI_ALL_ONLINE }, + { 0x40540, 2, RI_ALL_ONLINE }, + { 0x40550, 10, RI_E2E3E3B0_ONLINE }, + { 0x40610, 2, RI_E2E3E3B0_ONLINE }, + { 0x42000, 164, RI_ALL_ONLINE }, + { 0x422c0, 4, RI_E2E3E3B0_ONLINE }, + { 0x422d4, 5, RI_E1HE2E3E3B0_ONLINE }, + { 0x422e8, 1, RI_E2E3E3B0_ONLINE }, + { 0x42400, 49, RI_ALL_ONLINE }, + { 0x424c8, 38, RI_ALL_ONLINE }, + { 0x42568, 2, RI_ALL_ONLINE }, + { 0x42640, 5, RI_E2E3E3B0_ONLINE }, + { 0x42800, 1, RI_ALL_ONLINE }, + { 0x50000, 1, RI_ALL_ONLINE }, + { 0x50004, 19, RI_ALL_ONLINE }, + { 0x50050, 8, RI_ALL_ONLINE }, + { 0x50070, 88, RI_ALL_ONLINE }, + { 0x501f0, 4, RI_E1HE2E3E3B0_ONLINE }, + { 0x50200, 2, RI_ALL_ONLINE }, + { 0x5020c, 7, RI_ALL_ONLINE }, + { 0x50228, 6, RI_E1HE2E3E3B0_ONLINE }, + { 0x50240, 1, RI_ALL_ONLINE }, + { 0x50280, 1, RI_ALL_ONLINE }, + { 0x50300, 1, RI_E2E3E3B0_ONLINE }, + { 0x5030c, 1, RI_E2E3E3B0_ONLINE }, + { 0x50318, 1, RI_E2E3E3B0_ONLINE }, + { 0x5031c, 1, RI_E2E3E3B0_ONLINE }, + { 0x50320, 2, RI_E2E3E3B0_ONLINE }, + { 0x50330, 1, RI_E3B0_ONLINE }, + { 0x52000, 1, RI_ALL_ONLINE }, + { 0x54000, 1, RI_ALL_ONLINE }, + { 0x54004, 3327, RI_ALL_OFFLINE }, + { 0x58000, 1, RI_ALL_ONLINE }, + { 0x58004, 8191, RI_E1E1H_OFFLINE }, + { 0x60000, 26, RI_ALL_ONLINE }, + { 0x60068, 8, RI_E1E1H_ONLINE }, + { 0x60088, 12, RI_ALL_ONLINE }, + { 0x600b8, 9, RI_E1E1H_ONLINE }, + { 0x600dc, 1, RI_ALL_ONLINE }, + { 0x600e0, 5, RI_E1E1H_ONLINE }, + { 0x600f4, 1, RI_E1E1HE2_ONLINE }, + { 0x600f8, 1, RI_E1E1H_ONLINE }, + { 0x600fc, 8, RI_ALL_ONLINE }, + { 0x6013c, 24, RI_E1H_ONLINE }, + { 0x6019c, 2, RI_E2E3E3B0_ONLINE }, + { 0x601ac, 18, RI_E2E3E3B0_ONLINE }, + { 0x60200, 1, RI_ALL_ONLINE }, + { 0x60204, 2, RI_ALL_OFFLINE }, + { 0x60210, 13, RI_E2E3E3B0_ONLINE }, + { 0x60244, 16, RI_E3B0_ONLINE }, + { 0x61000, 1, RI_ALL_ONLINE }, + { 0x61004, 511, RI_ALL_OFFLINE }, + { 0x61800, 512, RI_E3E3B0_OFFLINE }, + { 0x70000, 8, RI_ALL_ONLINE }, + { 0x70020, 8184, RI_ALL_OFFLINE }, + { 0x78000, 8192, RI_E3E3B0_OFFLINE }, + { 0x85000, 3, RI_ALL_ONLINE }, + { 0x8501c, 7, RI_ALL_ONLINE }, + { 0x85048, 1, RI_ALL_ONLINE }, + { 0x85200, 32, RI_ALL_ONLINE }, + { 0xb0000, 16384, RI_E1H_ONLINE }, + { 0xc1000, 7, RI_ALL_ONLINE }, + { 0xc103c, 2, RI_E2E3E3B0_ONLINE }, + { 0xc1800, 2, RI_ALL_ONLINE }, + { 0xc2000, 164, RI_ALL_ONLINE }, + { 0xc22c0, 5, RI_E2E3E3B0_ONLINE }, + { 0xc22d8, 4, RI_E2E3E3B0_ONLINE }, + { 0xc2400, 49, RI_ALL_ONLINE }, + { 0xc24c8, 38, RI_ALL_ONLINE }, + { 0xc2568, 2, RI_ALL_ONLINE }, + { 0xc2600, 1, RI_ALL_ONLINE }, + { 0xc4000, 165, RI_ALL_ONLINE }, + { 0xc42d8, 2, RI_E2E3E3B0_ONLINE }, + { 0xc42e0, 7, RI_E1HE2E3E3B0_ONLINE }, + { 0xc42fc, 1, RI_E2E3E3B0_ONLINE }, + { 0xc4400, 51, RI_ALL_ONLINE }, + { 0xc44d0, 38, RI_ALL_ONLINE }, + { 0xc4570, 2, RI_ALL_ONLINE }, + { 0xc4578, 5, RI_E2E3E3B0_ONLINE }, + { 0xc4600, 1, RI_ALL_ONLINE }, + { 0xd0000, 19, RI_ALL_ONLINE }, + { 0xd004c, 8, RI_ALL_ONLINE }, + { 0xd006c, 91, RI_ALL_ONLINE }, + { 0xd01fc, 1, RI_E2E3E3B0_ONLINE }, + { 0xd0200, 2, RI_ALL_ONLINE }, + { 0xd020c, 7, RI_ALL_ONLINE }, + { 0xd0228, 18, RI_E1HE2E3E3B0_ONLINE }, + { 0xd0280, 1, RI_ALL_ONLINE }, + { 0xd0300, 1, RI_ALL_ONLINE }, + { 0xd0400, 1, RI_ALL_ONLINE }, + { 0xd0818, 1, RI_E3B0_ONLINE }, + { 0xd4000, 1, RI_ALL_ONLINE }, + { 0xd4004, 2559, RI_ALL_OFFLINE }, + { 0xd8000, 1, RI_ALL_ONLINE }, + { 0xd8004, 8191, RI_ALL_OFFLINE }, + { 0xe0000, 21, RI_ALL_ONLINE }, + { 0xe0054, 8, RI_ALL_ONLINE }, + { 0xe0074, 49, RI_ALL_ONLINE }, + { 0xe0138, 1, RI_E1E1H_ONLINE }, + { 0xe013c, 35, RI_ALL_ONLINE }, + { 0xe01f4, 1, RI_E2_ONLINE }, + { 0xe01f8, 1, RI_E2E3E3B0_ONLINE }, + { 0xe0200, 2, RI_ALL_ONLINE }, + { 0xe020c, 8, RI_ALL_ONLINE }, + { 0xe022c, 18, RI_E1HE2E3E3B0_ONLINE }, + { 0xe0280, 1, RI_ALL_ONLINE }, + { 0xe0300, 1, RI_ALL_ONLINE }, + { 0xe0400, 1, RI_E3B0_ONLINE }, + { 0xe1000, 1, RI_ALL_ONLINE }, + { 0xe2000, 1, RI_ALL_ONLINE }, + { 0xe2004, 2047, RI_ALL_OFFLINE }, + { 0xf0000, 1, RI_ALL_ONLINE }, + { 0xf0004, 16383, RI_ALL_OFFLINE }, + { 0x101000, 12, RI_ALL_ONLINE }, + { 0x101050, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x101054, 3, RI_E2E3E3B0_ONLINE }, + { 0x101100, 1, RI_ALL_ONLINE }, + { 0x101800, 8, RI_ALL_ONLINE }, + { 0x102000, 18, RI_ALL_ONLINE }, + { 0x102068, 6, RI_E2E3E3B0_ONLINE }, + { 0x102080, 17, RI_ALL_ONLINE }, + { 0x1020c8, 8, RI_E1H_ONLINE }, + { 0x1020e8, 9, RI_E2E3E3B0_ONLINE }, + { 0x102400, 1, RI_ALL_ONLINE }, + { 0x103000, 26, RI_ALL_ONLINE }, + { 0x103098, 5, RI_E1HE2E3E3B0_ONLINE }, + { 0x1030ac, 2, RI_E2E3E3B0_ONLINE }, + { 0x1030b4, 1, RI_E2_ONLINE }, + { 0x1030b8, 7, RI_E2E3E3B0_ONLINE }, + { 0x1030d8, 8, RI_E2E3E3B0_ONLINE }, + { 0x103400, 1, RI_E2E3E3B0_ONLINE }, + { 0x103404, 135, RI_E2E3E3B0_OFFLINE }, + { 0x103800, 8, RI_ALL_ONLINE }, + { 0x104000, 63, RI_ALL_ONLINE }, + { 0x10411c, 16, RI_E2E3E3B0_ONLINE }, + { 0x104200, 17, RI_ALL_ONLINE }, + { 0x104400, 64, RI_ALL_ONLINE }, + { 0x104500, 192, RI_ALL_OFFLINE }, + { 0x104800, 64, RI_ALL_ONLINE }, + { 0x104900, 192, RI_ALL_OFFLINE }, + { 0x105000, 256, RI_ALL_ONLINE }, + { 0x105400, 768, RI_ALL_OFFLINE }, + { 0x107000, 7, RI_E2E3E3B0_ONLINE }, + { 0x10701c, 1, RI_E3E3B0_ONLINE }, + { 0x108000, 33, RI_E1E1H_ONLINE }, + { 0x1080ac, 5, RI_E1H_ONLINE }, + { 0x108100, 5, RI_E1E1H_ONLINE }, + { 0x108120, 5, RI_E1E1H_ONLINE }, + { 0x108200, 74, RI_E1E1H_ONLINE }, + { 0x108400, 74, RI_E1E1H_ONLINE }, + { 0x108800, 152, RI_E1E1H_ONLINE }, + { 0x110000, 111, RI_E2E3E3B0_ONLINE }, + { 0x1101dc, 1, RI_E3E3B0_ONLINE }, + { 0x110200, 4, RI_E2E3E3B0_ONLINE }, + { 0x120000, 2, RI_ALL_ONLINE }, + { 0x120008, 4, RI_ALL_ONLINE }, + { 0x120018, 3, RI_ALL_ONLINE }, + { 0x120024, 4, RI_ALL_ONLINE }, + { 0x120034, 3, RI_ALL_ONLINE }, + { 0x120040, 4, RI_ALL_ONLINE }, + { 0x120050, 3, RI_ALL_ONLINE }, + { 0x12005c, 4, RI_ALL_ONLINE }, + { 0x12006c, 3, RI_ALL_ONLINE }, + { 0x120078, 4, RI_ALL_ONLINE }, + { 0x120088, 3, RI_ALL_ONLINE }, + { 0x120094, 4, RI_ALL_ONLINE }, + { 0x1200a4, 3, RI_ALL_ONLINE }, + { 0x1200b0, 4, RI_ALL_ONLINE }, + { 0x1200c0, 3, RI_ALL_ONLINE }, + { 0x1200cc, 4, RI_ALL_ONLINE }, + { 0x1200dc, 3, RI_ALL_ONLINE }, + { 0x1200e8, 4, RI_ALL_ONLINE }, + { 0x1200f8, 3, RI_ALL_ONLINE }, + { 0x120104, 4, RI_ALL_ONLINE }, + { 0x120114, 1, RI_ALL_ONLINE }, + { 0x120118, 22, RI_ALL_ONLINE }, + { 0x120170, 2, RI_E1E1H_ONLINE }, + { 0x120178, 243, RI_ALL_ONLINE }, + { 0x120544, 4, RI_E1E1H_ONLINE }, + { 0x120554, 6, RI_ALL_ONLINE }, + { 0x12059c, 6, RI_E1HE2E3E3B0_ONLINE }, + { 0x1205b4, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x1205b8, 15, RI_E1HE2E3E3B0_ONLINE }, + { 0x1205f4, 1, RI_E1HE2_ONLINE }, + { 0x1205f8, 4, RI_E2E3E3B0_ONLINE }, + { 0x120618, 1, RI_E2E3E3B0_ONLINE }, + { 0x12061c, 20, RI_E1HE2E3E3B0_ONLINE }, + { 0x12066c, 11, RI_E1HE2E3E3B0_ONLINE }, + { 0x120698, 3, RI_E2E3E3B0_ONLINE }, + { 0x1206a4, 1, RI_E2_ONLINE }, + { 0x1206a8, 1, RI_E2E3E3B0_ONLINE }, + { 0x1206b0, 75, RI_E2E3E3B0_ONLINE }, + { 0x1207dc, 1, RI_E2_ONLINE }, + { 0x1207fc, 1, RI_E2E3E3B0_ONLINE }, + { 0x12080c, 65, RI_ALL_ONLINE }, + { 0x120910, 7, RI_E2E3E3B0_ONLINE }, + { 0x120930, 9, RI_E2E3E3B0_ONLINE }, + { 0x12095c, 37, RI_E3E3B0_ONLINE }, + { 0x120a00, 2, RI_E1E1HE2_ONLINE }, + { 0x120b00, 1, RI_E3E3B0_ONLINE }, + { 0x122000, 2, RI_ALL_ONLINE }, + { 0x122008, 2046, RI_E1_OFFLINE }, + { 0x128000, 2, RI_E1HE2E3E3B0_ONLINE }, + { 0x128008, 6142, RI_E1HE2E3E3B0_OFFLINE }, + { 0x130000, 35, RI_E2E3E3B0_ONLINE }, + { 0x130100, 29, RI_E2E3E3B0_ONLINE }, + { 0x130180, 1, RI_E2E3E3B0_ONLINE }, + { 0x130200, 1, RI_E2E3E3B0_ONLINE }, + { 0x130280, 1, RI_E2E3E3B0_ONLINE }, + { 0x130300, 5, RI_E2E3E3B0_ONLINE }, + { 0x130380, 1, RI_E2E3E3B0_ONLINE }, + { 0x130400, 1, RI_E2E3E3B0_ONLINE }, + { 0x130480, 5, RI_E2E3E3B0_ONLINE }, + { 0x130800, 72, RI_E2E3E3B0_ONLINE }, + { 0x131000, 136, RI_E2E3E3B0_ONLINE }, + { 0x132000, 148, RI_E2E3E3B0_ONLINE }, + { 0x134000, 544, RI_E2E3E3B0_ONLINE }, + { 0x140000, 1, RI_ALL_ONLINE }, + { 0x140004, 9, RI_E1E1HE2E3_ONLINE }, + { 0x140028, 8, RI_ALL_ONLINE }, + { 0x140048, 10, RI_E1E1HE2E3_ONLINE }, + { 0x140070, 1, RI_ALL_ONLINE }, + { 0x140074, 10, RI_E1E1HE2E3_ONLINE }, + { 0x14009c, 1, RI_ALL_ONLINE }, + { 0x1400a0, 5, RI_E1E1HE2E3_ONLINE }, + { 0x1400b4, 7, RI_ALL_ONLINE }, + { 0x1400d0, 10, RI_E1E1HE2E3_ONLINE }, + { 0x1400f8, 2, RI_ALL_ONLINE }, + { 0x140100, 5, RI_E1E1H_ONLINE }, + { 0x140114, 5, RI_E1E1HE2E3_ONLINE }, + { 0x140128, 7, RI_ALL_ONLINE }, + { 0x140144, 9, RI_E1E1HE2E3_ONLINE }, + { 0x140168, 8, RI_ALL_ONLINE }, + { 0x140188, 3, RI_E1E1HE2E3_ONLINE }, + { 0x140194, 13, RI_ALL_ONLINE }, + { 0x140200, 6, RI_E1E1HE2E3_ONLINE }, + { 0x140220, 4, RI_E2E3_ONLINE }, + { 0x140240, 4, RI_E2E3_ONLINE }, + { 0x140260, 4, RI_E2E3_ONLINE }, + { 0x140280, 4, RI_E2E3_ONLINE }, + { 0x1402a0, 4, RI_E2E3_ONLINE }, + { 0x1402c0, 4, RI_E2E3_ONLINE }, + { 0x1402e0, 2, RI_E2E3_ONLINE }, + { 0x1402e8, 2, RI_E2E3E3B0_ONLINE }, + { 0x1402f0, 9, RI_E2E3_ONLINE }, + { 0x140314, 44, RI_E3B0_ONLINE }, + { 0x1403d0, 70, RI_E3B0_ONLINE }, + { 0x144000, 4, RI_E1E1H_ONLINE }, + { 0x148000, 4, RI_E1E1H_ONLINE }, + { 0x14c000, 4, RI_E1E1H_ONLINE }, + { 0x150000, 4, RI_E1E1H_ONLINE }, + { 0x154000, 4, RI_E1E1H_ONLINE }, + { 0x158000, 4, RI_E1E1H_ONLINE }, + { 0x15c000, 2, RI_E1HE2E3E3B0_ONLINE }, + { 0x15c008, 5, RI_E1H_ONLINE }, + { 0x15c020, 8, RI_E2E3E3B0_ONLINE }, + { 0x15c040, 1, RI_E2E3_ONLINE }, + { 0x15c044, 2, RI_E2E3E3B0_ONLINE }, + { 0x15c04c, 8, RI_E2E3_ONLINE }, + { 0x15c06c, 8, RI_E2E3E3B0_ONLINE }, + { 0x15c090, 13, RI_E2E3E3B0_ONLINE }, + { 0x15c0c8, 24, RI_E2E3E3B0_ONLINE }, + { 0x15c128, 2, RI_E2E3_ONLINE }, + { 0x15c130, 8, RI_E2E3E3B0_ONLINE }, + { 0x15c150, 2, RI_E3E3B0_ONLINE }, + { 0x15c158, 2, RI_E3_ONLINE }, + { 0x15c160, 149, RI_E3B0_ONLINE }, + { 0x161000, 7, RI_ALL_ONLINE }, + { 0x16103c, 2, RI_E2E3E3B0_ONLINE }, + { 0x161800, 2, RI_ALL_ONLINE }, + { 0x162000, 54, RI_E3E3B0_ONLINE }, + { 0x162200, 60, RI_E3E3B0_ONLINE }, + { 0x162400, 54, RI_E3E3B0_ONLINE }, + { 0x162600, 60, RI_E3E3B0_ONLINE }, + { 0x162800, 54, RI_E3E3B0_ONLINE }, + { 0x162a00, 60, RI_E3E3B0_ONLINE }, + { 0x162c00, 54, RI_E3E3B0_ONLINE }, + { 0x162e00, 60, RI_E3E3B0_ONLINE }, + { 0x164000, 60, RI_ALL_ONLINE }, + { 0x164110, 2, RI_E1HE2E3E3B0_ONLINE }, + { 0x164118, 15, RI_E2E3E3B0_ONLINE }, + { 0x164200, 1, RI_ALL_ONLINE }, + { 0x164208, 1, RI_ALL_ONLINE }, + { 0x164210, 1, RI_ALL_ONLINE }, + { 0x164218, 1, RI_ALL_ONLINE }, + { 0x164220, 1, RI_ALL_ONLINE }, + { 0x164228, 1, RI_ALL_ONLINE }, + { 0x164230, 1, RI_ALL_ONLINE }, + { 0x164238, 1, RI_ALL_ONLINE }, + { 0x164240, 1, RI_ALL_ONLINE }, + { 0x164248, 1, RI_ALL_ONLINE }, + { 0x164250, 1, RI_ALL_ONLINE }, + { 0x164258, 1, RI_ALL_ONLINE }, + { 0x164260, 1, RI_ALL_ONLINE }, + { 0x164270, 2, RI_ALL_ONLINE }, + { 0x164280, 2, RI_ALL_ONLINE }, + { 0x164800, 2, RI_ALL_ONLINE }, + { 0x165000, 2, RI_ALL_ONLINE }, + { 0x166000, 164, RI_ALL_ONLINE }, + { 0x1662cc, 7, RI_E2E3E3B0_ONLINE }, + { 0x166400, 49, RI_ALL_ONLINE }, + { 0x1664c8, 38, RI_ALL_ONLINE }, + { 0x166568, 2, RI_ALL_ONLINE }, + { 0x166570, 5, RI_E2E3E3B0_ONLINE }, + { 0x166800, 1, RI_ALL_ONLINE }, + { 0x168000, 137, RI_ALL_ONLINE }, + { 0x168224, 2, RI_E1E1H_ONLINE }, + { 0x16822c, 29, RI_ALL_ONLINE }, + { 0x1682a0, 12, RI_E1E1H_ONLINE }, + { 0x1682d0, 12, RI_ALL_ONLINE }, + { 0x168300, 2, RI_E1E1H_ONLINE }, + { 0x168308, 68, RI_ALL_ONLINE }, + { 0x168418, 2, RI_E1E1H_ONLINE }, + { 0x168420, 6, RI_ALL_ONLINE }, + { 0x168800, 19, RI_ALL_ONLINE }, + { 0x168900, 1, RI_ALL_ONLINE }, + { 0x168a00, 128, RI_ALL_ONLINE }, + { 0x16a000, 1, RI_ALL_ONLINE }, + { 0x16a004, 1535, RI_ALL_OFFLINE }, + { 0x16c000, 1, RI_ALL_ONLINE }, + { 0x16c004, 1535, RI_ALL_OFFLINE }, + { 0x16e000, 16, RI_E1H_ONLINE }, + { 0x16e040, 8, RI_E2E3E3B0_ONLINE }, + { 0x16e100, 1, RI_E1H_ONLINE }, + { 0x16e200, 2, RI_E1H_ONLINE }, + { 0x16e400, 161, RI_E1H_ONLINE }, + { 0x16e684, 2, RI_E1HE2E3E3B0_ONLINE }, + { 0x16e68c, 12, RI_E1H_ONLINE }, + { 0x16e6bc, 4, RI_E1HE2E3E3B0_ONLINE }, + { 0x16e6cc, 4, RI_E1H_ONLINE }, + { 0x16e6e0, 2, RI_E2E3E3B0_ONLINE }, + { 0x16e6e8, 5, RI_E2E3_ONLINE }, + { 0x16e6fc, 5, RI_E2E3E3B0_ONLINE }, + { 0x16e768, 17, RI_E2E3E3B0_ONLINE }, + { 0x16e7ac, 12, RI_E3B0_ONLINE }, + { 0x170000, 24, RI_ALL_ONLINE }, + { 0x170060, 4, RI_E1E1H_ONLINE }, + { 0x170070, 65, RI_ALL_ONLINE }, + { 0x170194, 11, RI_E2E3E3B0_ONLINE }, + { 0x1701c4, 1, RI_E2E3E3B0_ONLINE }, + { 0x1701cc, 7, RI_E2E3E3B0_ONLINE }, + { 0x1701e8, 1, RI_E3E3B0_ONLINE }, + { 0x1701ec, 1, RI_E2E3E3B0_ONLINE }, + { 0x1701f4, 1, RI_E2E3E3B0_ONLINE }, + { 0x170200, 4, RI_ALL_ONLINE }, + { 0x170214, 1, RI_ALL_ONLINE }, + { 0x170218, 77, RI_E2E3E3B0_ONLINE }, + { 0x170400, 64, RI_E2E3E3B0_ONLINE }, + { 0x178000, 1, RI_ALL_ONLINE }, + { 0x180000, 61, RI_ALL_ONLINE }, + { 0x18013c, 2, RI_E1HE2E3E3B0_ONLINE }, + { 0x180200, 58, RI_ALL_ONLINE }, + { 0x180340, 4, RI_ALL_ONLINE }, + { 0x180380, 1, RI_E2E3E3B0_ONLINE }, + { 0x180388, 1, RI_E2E3E3B0_ONLINE }, + { 0x180390, 1, RI_E2E3E3B0_ONLINE }, + { 0x180398, 1, RI_E2E3E3B0_ONLINE }, + { 0x1803a0, 5, RI_E2E3E3B0_ONLINE }, + { 0x1803b4, 2, RI_E3E3B0_ONLINE }, + { 0x180400, 1, RI_ALL_ONLINE }, + { 0x180404, 255, RI_E1E1H_OFFLINE }, + { 0x181000, 4, RI_ALL_ONLINE }, + { 0x181010, 1020, RI_ALL_OFFLINE }, + { 0x182000, 4, RI_E3E3B0_ONLINE }, + { 0x1a0000, 1, RI_ALL_ONLINE }, + { 0x1a0004, 5631, RI_ALL_OFFLINE }, + { 0x1a5800, 2560, RI_E1HE2E3E3B0_OFFLINE }, + { 0x1a8000, 1, RI_ALL_ONLINE }, + { 0x1a8004, 8191, RI_E1HE2E3E3B0_OFFLINE }, + { 0x1b0000, 1, RI_ALL_ONLINE }, + { 0x1b0004, 15, RI_E1H_OFFLINE }, + { 0x1b0040, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x1b0044, 239, RI_E1H_OFFLINE }, + { 0x1b0400, 1, RI_ALL_ONLINE }, + { 0x1b0404, 255, RI_E1H_OFFLINE }, + { 0x1b0800, 1, RI_ALL_ONLINE }, + { 0x1b0840, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x1b0c00, 1, RI_ALL_ONLINE }, + { 0x1b1000, 1, RI_ALL_ONLINE }, + { 0x1b1040, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x1b1400, 1, RI_ALL_ONLINE }, + { 0x1b1440, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x1b1480, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x1b14c0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x1b1800, 128, RI_ALL_OFFLINE }, + { 0x1b1c00, 128, RI_ALL_OFFLINE }, + { 0x1b2000, 1, RI_ALL_ONLINE }, + { 0x1b2400, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x1b2404, 5631, RI_E2E3E3B0_OFFLINE }, + { 0x1b8000, 1, RI_ALL_ONLINE }, + { 0x1b8040, 1, RI_ALL_ONLINE }, + { 0x1b8080, 1, RI_ALL_ONLINE }, + { 0x1b80c0, 1, RI_ALL_ONLINE }, + { 0x1b8100, 1, RI_ALL_ONLINE }, + { 0x1b8140, 1, RI_ALL_ONLINE }, + { 0x1b8180, 1, RI_ALL_ONLINE }, + { 0x1b81c0, 1, RI_ALL_ONLINE }, + { 0x1b8200, 1, RI_ALL_ONLINE }, + { 0x1b8240, 1, RI_ALL_ONLINE }, + { 0x1b8280, 1, RI_ALL_ONLINE }, + { 0x1b82c0, 1, RI_ALL_ONLINE }, + { 0x1b8300, 1, RI_ALL_ONLINE }, + { 0x1b8340, 1, RI_ALL_ONLINE }, + { 0x1b8380, 1, RI_ALL_ONLINE }, + { 0x1b83c0, 1, RI_ALL_ONLINE }, + { 0x1b8400, 1, RI_ALL_ONLINE }, + { 0x1b8440, 1, RI_ALL_ONLINE }, + { 0x1b8480, 1, RI_ALL_ONLINE }, + { 0x1b84c0, 1, RI_ALL_ONLINE }, + { 0x1b8500, 1, RI_ALL_ONLINE }, + { 0x1b8540, 1, RI_ALL_ONLINE }, + { 0x1b8580, 1, RI_ALL_ONLINE }, + { 0x1b85c0, 19, RI_E2E3E3B0_ONLINE }, + { 0x1b8800, 1, RI_ALL_ONLINE }, + { 0x1b8840, 1, RI_ALL_ONLINE }, + { 0x1b8880, 1, RI_ALL_ONLINE }, + { 0x1b88c0, 1, RI_ALL_ONLINE }, + { 0x1b8900, 1, RI_ALL_ONLINE }, + { 0x1b8940, 1, RI_ALL_ONLINE }, + { 0x1b8980, 1, RI_ALL_ONLINE }, + { 0x1b89c0, 1, RI_ALL_ONLINE }, + { 0x1b8a00, 1, RI_ALL_ONLINE }, + { 0x1b8a40, 1, RI_ALL_ONLINE }, + { 0x1b8a80, 1, RI_ALL_ONLINE }, + { 0x1b8ac0, 1, RI_ALL_ONLINE }, + { 0x1b8b00, 1, RI_ALL_ONLINE }, + { 0x1b8b40, 1, RI_ALL_ONLINE }, + { 0x1b8b80, 1, RI_ALL_ONLINE }, + { 0x1b8bc0, 1, RI_ALL_ONLINE }, + { 0x1b8c00, 1, RI_ALL_ONLINE }, + { 0x1b8c40, 1, RI_ALL_ONLINE }, + { 0x1b8c80, 1, RI_ALL_ONLINE }, + { 0x1b8cc0, 1, RI_ALL_ONLINE }, + { 0x1b8cc4, 1, RI_E2E3E3B0_ONLINE }, + { 0x1b8d00, 1, RI_ALL_ONLINE }, + { 0x1b8d40, 1, RI_ALL_ONLINE }, + { 0x1b8d80, 1, RI_ALL_ONLINE }, + { 0x1b8dc0, 1, RI_ALL_ONLINE }, + { 0x1b8e00, 1, RI_ALL_ONLINE }, + { 0x1b8e40, 1, RI_ALL_ONLINE }, + { 0x1b8e80, 1, RI_ALL_ONLINE }, + { 0x1b8e84, 1, RI_E2E3E3B0_ONLINE }, + { 0x1b8ec0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x1b8f00, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x1b8f40, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x1b8f80, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x1b8fc0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x1b8fc4, 2, RI_E2E3E3B0_ONLINE }, + { 0x1b8fd0, 6, RI_E2E3E3B0_ONLINE }, + { 0x1b8fe8, 2, RI_E3E3B0_ONLINE }, + { 0x1b9000, 1, RI_E2E3E3B0_ONLINE }, + { 0x1b9040, 3, RI_E2E3E3B0_ONLINE }, + { 0x1b905c, 1, RI_E3E3B0_ONLINE }, + { 0x1b9064, 1, RI_E3B0_ONLINE }, + { 0x1b9080, 10, RI_E3B0_ONLINE }, + { 0x1b9400, 14, RI_E2E3E3B0_ONLINE }, + { 0x1b943c, 19, RI_E2E3E3B0_ONLINE }, + { 0x1b9490, 10, RI_E2E3E3B0_ONLINE }, + { 0x1c0000, 2, RI_ALL_ONLINE }, + { 0x200000, 65, RI_ALL_ONLINE }, + { 0x20014c, 2, RI_E1HE2E3E3B0_ONLINE }, + { 0x200200, 58, RI_ALL_ONLINE }, + { 0x200340, 4, RI_ALL_ONLINE }, + { 0x200380, 1, RI_E2E3E3B0_ONLINE }, + { 0x200388, 1, RI_E2E3E3B0_ONLINE }, + { 0x200390, 1, RI_E2E3E3B0_ONLINE }, + { 0x200398, 1, RI_E2E3E3B0_ONLINE }, + { 0x2003a0, 1, RI_E2E3E3B0_ONLINE }, + { 0x2003a8, 2, RI_E2E3E3B0_ONLINE }, + { 0x200400, 1, RI_ALL_ONLINE }, + { 0x200404, 255, RI_E1E1H_OFFLINE }, + { 0x202000, 4, RI_ALL_ONLINE }, + { 0x202010, 2044, RI_ALL_OFFLINE }, + { 0x204000, 4, RI_E3E3B0_ONLINE }, + { 0x220000, 1, RI_ALL_ONLINE }, + { 0x220004, 5631, RI_ALL_OFFLINE }, + { 0x225800, 2560, RI_E1HE2E3E3B0_OFFLINE }, + { 0x228000, 1, RI_ALL_ONLINE }, + { 0x228004, 8191, RI_E1HE2E3E3B0_OFFLINE }, + { 0x230000, 1, RI_ALL_ONLINE }, + { 0x230004, 15, RI_E1H_OFFLINE }, + { 0x230040, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x230044, 239, RI_E1H_OFFLINE }, + { 0x230400, 1, RI_ALL_ONLINE }, + { 0x230404, 255, RI_E1H_OFFLINE }, + { 0x230800, 1, RI_ALL_ONLINE }, + { 0x230840, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x230c00, 1, RI_ALL_ONLINE }, + { 0x231000, 1, RI_ALL_ONLINE }, + { 0x231040, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x231400, 1, RI_ALL_ONLINE }, + { 0x231440, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x231480, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x2314c0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x231800, 128, RI_ALL_OFFLINE }, + { 0x231c00, 128, RI_ALL_OFFLINE }, + { 0x232000, 1, RI_ALL_ONLINE }, + { 0x232400, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x232404, 5631, RI_E2E3E3B0_OFFLINE }, + { 0x238000, 1, RI_ALL_ONLINE }, + { 0x238040, 1, RI_ALL_ONLINE }, + { 0x238080, 1, RI_ALL_ONLINE }, + { 0x2380c0, 1, RI_ALL_ONLINE }, + { 0x238100, 1, RI_ALL_ONLINE }, + { 0x238140, 1, RI_ALL_ONLINE }, + { 0x238180, 1, RI_ALL_ONLINE }, + { 0x2381c0, 1, RI_ALL_ONLINE }, + { 0x238200, 1, RI_ALL_ONLINE }, + { 0x238240, 1, RI_ALL_ONLINE }, + { 0x238280, 1, RI_ALL_ONLINE }, + { 0x2382c0, 1, RI_ALL_ONLINE }, + { 0x238300, 1, RI_ALL_ONLINE }, + { 0x238340, 1, RI_ALL_ONLINE }, + { 0x238380, 1, RI_ALL_ONLINE }, + { 0x2383c0, 1, RI_ALL_ONLINE }, + { 0x238400, 1, RI_ALL_ONLINE }, + { 0x238440, 1, RI_ALL_ONLINE }, + { 0x238480, 1, RI_ALL_ONLINE }, + { 0x2384c0, 1, RI_ALL_ONLINE }, + { 0x238500, 1, RI_ALL_ONLINE }, + { 0x238540, 1, RI_ALL_ONLINE }, + { 0x238580, 1, RI_ALL_ONLINE }, + { 0x2385c0, 19, RI_E2E3E3B0_ONLINE }, + { 0x238800, 1, RI_ALL_ONLINE }, + { 0x238840, 1, RI_ALL_ONLINE }, + { 0x238880, 1, RI_ALL_ONLINE }, + { 0x2388c0, 1, RI_ALL_ONLINE }, + { 0x238900, 1, RI_ALL_ONLINE }, + { 0x238940, 1, RI_ALL_ONLINE }, + { 0x238980, 1, RI_ALL_ONLINE }, + { 0x2389c0, 1, RI_ALL_ONLINE }, + { 0x238a00, 1, RI_ALL_ONLINE }, + { 0x238a40, 1, RI_ALL_ONLINE }, + { 0x238a80, 1, RI_ALL_ONLINE }, + { 0x238ac0, 1, RI_ALL_ONLINE }, + { 0x238b00, 1, RI_ALL_ONLINE }, + { 0x238b40, 1, RI_ALL_ONLINE }, + { 0x238b80, 1, RI_ALL_ONLINE }, + { 0x238bc0, 1, RI_ALL_ONLINE }, + { 0x238c00, 1, RI_ALL_ONLINE }, + { 0x238c40, 1, RI_ALL_ONLINE }, + { 0x238c80, 1, RI_ALL_ONLINE }, + { 0x238cc0, 1, RI_ALL_ONLINE }, + { 0x238cc4, 1, RI_E2E3E3B0_ONLINE }, + { 0x238d00, 1, RI_ALL_ONLINE }, + { 0x238d40, 1, RI_ALL_ONLINE }, + { 0x238d80, 1, RI_ALL_ONLINE }, + { 0x238dc0, 1, RI_ALL_ONLINE }, + { 0x238e00, 1, RI_ALL_ONLINE }, + { 0x238e40, 1, RI_ALL_ONLINE }, + { 0x238e80, 1, RI_ALL_ONLINE }, + { 0x238e84, 1, RI_E2E3E3B0_ONLINE }, + { 0x238ec0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x238f00, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x238f40, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x238f80, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x238fc0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x238fc4, 2, RI_E2E3E3B0_ONLINE }, + { 0x238fd0, 6, RI_E2E3E3B0_ONLINE }, + { 0x238fe8, 2, RI_E3E3B0_ONLINE }, + { 0x239000, 1, RI_E2E3E3B0_ONLINE }, + { 0x239040, 3, RI_E2E3E3B0_ONLINE }, + { 0x23905c, 1, RI_E3E3B0_ONLINE }, + { 0x239064, 1, RI_E3B0_ONLINE }, + { 0x239080, 10, RI_E3B0_ONLINE }, + { 0x240000, 2, RI_ALL_ONLINE }, + { 0x280000, 65, RI_ALL_ONLINE }, + { 0x28014c, 2, RI_E1HE2E3E3B0_ONLINE }, + { 0x280200, 58, RI_ALL_ONLINE }, + { 0x280340, 4, RI_ALL_ONLINE }, + { 0x280380, 1, RI_E2E3E3B0_ONLINE }, + { 0x280388, 1, RI_E2E3E3B0_ONLINE }, + { 0x280390, 1, RI_E2E3E3B0_ONLINE }, + { 0x280398, 1, RI_E2E3E3B0_ONLINE }, + { 0x2803a0, 1, RI_E2E3E3B0_ONLINE }, + { 0x2803a8, 2, RI_E2E3E3B0_ONLINE }, + { 0x280400, 1, RI_ALL_ONLINE }, + { 0x280404, 255, RI_E1E1H_OFFLINE }, + { 0x282000, 4, RI_ALL_ONLINE }, + { 0x282010, 2044, RI_ALL_OFFLINE }, + { 0x284000, 4, RI_E3E3B0_ONLINE }, + { 0x2a0000, 1, RI_ALL_ONLINE }, + { 0x2a0004, 5631, RI_ALL_OFFLINE }, + { 0x2a5800, 2560, RI_E1HE2E3E3B0_OFFLINE }, + { 0x2a8000, 1, RI_ALL_ONLINE }, + { 0x2a8004, 8191, RI_E1HE2E3E3B0_OFFLINE }, + { 0x2b0000, 1, RI_ALL_ONLINE }, + { 0x2b0004, 15, RI_E1H_OFFLINE }, + { 0x2b0040, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x2b0044, 239, RI_E1H_OFFLINE }, + { 0x2b0400, 1, RI_ALL_ONLINE }, + { 0x2b0404, 255, RI_E1H_OFFLINE }, + { 0x2b0800, 1, RI_ALL_ONLINE }, + { 0x2b0840, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x2b0c00, 1, RI_ALL_ONLINE }, + { 0x2b1000, 1, RI_ALL_ONLINE }, + { 0x2b1040, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x2b1400, 1, RI_ALL_ONLINE }, + { 0x2b1440, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x2b1480, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x2b14c0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x2b1800, 128, RI_ALL_OFFLINE }, + { 0x2b1c00, 128, RI_ALL_OFFLINE }, + { 0x2b2000, 1, RI_ALL_ONLINE }, + { 0x2b2400, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x2b2404, 5631, RI_E2E3E3B0_OFFLINE }, + { 0x2b8000, 1, RI_ALL_ONLINE }, + { 0x2b8040, 1, RI_ALL_ONLINE }, + { 0x2b8080, 1, RI_ALL_ONLINE }, + { 0x2b80c0, 1, RI_ALL_ONLINE }, + { 0x2b8100, 1, RI_ALL_ONLINE }, + { 0x2b8140, 1, RI_ALL_ONLINE }, + { 0x2b8180, 1, RI_ALL_ONLINE }, + { 0x2b81c0, 1, RI_ALL_ONLINE }, + { 0x2b8200, 1, RI_ALL_ONLINE }, + { 0x2b8240, 1, RI_ALL_ONLINE }, + { 0x2b8280, 1, RI_ALL_ONLINE }, + { 0x2b82c0, 1, RI_ALL_ONLINE }, + { 0x2b8300, 1, RI_ALL_ONLINE }, + { 0x2b8340, 1, RI_ALL_ONLINE }, + { 0x2b8380, 1, RI_ALL_ONLINE }, + { 0x2b83c0, 1, RI_ALL_ONLINE }, + { 0x2b8400, 1, RI_ALL_ONLINE }, + { 0x2b8440, 1, RI_ALL_ONLINE }, + { 0x2b8480, 1, RI_ALL_ONLINE }, + { 0x2b84c0, 1, RI_ALL_ONLINE }, + { 0x2b8500, 1, RI_ALL_ONLINE }, + { 0x2b8540, 1, RI_ALL_ONLINE }, + { 0x2b8580, 1, RI_ALL_ONLINE }, + { 0x2b85c0, 19, RI_E2E3E3B0_ONLINE }, + { 0x2b8800, 1, RI_ALL_ONLINE }, + { 0x2b8840, 1, RI_ALL_ONLINE }, + { 0x2b8880, 1, RI_ALL_ONLINE }, + { 0x2b88c0, 1, RI_ALL_ONLINE }, + { 0x2b8900, 1, RI_ALL_ONLINE }, + { 0x2b8940, 1, RI_ALL_ONLINE }, + { 0x2b8980, 1, RI_ALL_ONLINE }, + { 0x2b89c0, 1, RI_ALL_ONLINE }, + { 0x2b8a00, 1, RI_ALL_ONLINE }, + { 0x2b8a40, 1, RI_ALL_ONLINE }, + { 0x2b8a80, 1, RI_ALL_ONLINE }, + { 0x2b8ac0, 1, RI_ALL_ONLINE }, + { 0x2b8b00, 1, RI_ALL_ONLINE }, + { 0x2b8b40, 1, RI_ALL_ONLINE }, + { 0x2b8b80, 1, RI_ALL_ONLINE }, + { 0x2b8bc0, 1, RI_ALL_ONLINE }, + { 0x2b8c00, 1, RI_ALL_ONLINE }, + { 0x2b8c40, 1, RI_ALL_ONLINE }, + { 0x2b8c80, 1, RI_ALL_ONLINE }, + { 0x2b8cc0, 1, RI_ALL_ONLINE }, + { 0x2b8cc4, 1, RI_E2E3E3B0_ONLINE }, + { 0x2b8d00, 1, RI_ALL_ONLINE }, + { 0x2b8d40, 1, RI_ALL_ONLINE }, + { 0x2b8d80, 1, RI_ALL_ONLINE }, + { 0x2b8dc0, 1, RI_ALL_ONLINE }, + { 0x2b8e00, 1, RI_ALL_ONLINE }, + { 0x2b8e40, 1, RI_ALL_ONLINE }, + { 0x2b8e80, 1, RI_ALL_ONLINE }, + { 0x2b8e84, 1, RI_E2E3E3B0_ONLINE }, + { 0x2b8ec0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x2b8f00, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x2b8f40, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x2b8f80, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x2b8fc0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x2b8fc4, 2, RI_E2E3E3B0_ONLINE }, + { 0x2b8fd0, 6, RI_E2E3E3B0_ONLINE }, + { 0x2b8fe8, 2, RI_E3E3B0_ONLINE }, + { 0x2b9000, 1, RI_E2E3E3B0_ONLINE }, + { 0x2b9040, 3, RI_E2E3E3B0_ONLINE }, + { 0x2b905c, 1, RI_E3E3B0_ONLINE }, + { 0x2b9064, 1, RI_E3B0_ONLINE }, + { 0x2b9080, 10, RI_E3B0_ONLINE }, + { 0x2b9400, 14, RI_E2E3E3B0_ONLINE }, + { 0x2b943c, 19, RI_E2E3E3B0_ONLINE }, + { 0x2b9490, 10, RI_E2E3E3B0_ONLINE }, + { 0x2c0000, 2, RI_ALL_ONLINE }, + { 0x300000, 65, RI_ALL_ONLINE }, + { 0x30014c, 2, RI_E1HE2E3E3B0_ONLINE }, + { 0x300200, 58, RI_ALL_ONLINE }, + { 0x300340, 4, RI_ALL_ONLINE }, + { 0x300380, 1, RI_E2E3E3B0_ONLINE }, + { 0x300388, 1, RI_E2E3E3B0_ONLINE }, + { 0x300390, 1, RI_E2E3E3B0_ONLINE }, + { 0x300398, 1, RI_E2E3E3B0_ONLINE }, + { 0x3003a0, 1, RI_E2E3E3B0_ONLINE }, + { 0x3003a8, 2, RI_E2E3E3B0_ONLINE }, + { 0x300400, 1, RI_ALL_ONLINE }, + { 0x300404, 255, RI_E1E1H_OFFLINE }, + { 0x302000, 4, RI_ALL_ONLINE }, + { 0x302010, 2044, RI_ALL_OFFLINE }, + { 0x304000, 4, RI_E3E3B0_ONLINE }, + { 0x320000, 1, RI_ALL_ONLINE }, + { 0x320004, 5631, RI_ALL_OFFLINE }, + { 0x325800, 2560, RI_E1HE2E3E3B0_OFFLINE }, + { 0x328000, 1, RI_ALL_ONLINE }, + { 0x328004, 8191, RI_E1HE2E3E3B0_OFFLINE }, + { 0x330000, 1, RI_ALL_ONLINE }, + { 0x330004, 15, RI_E1H_OFFLINE }, + { 0x330040, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x330044, 239, RI_E1H_OFFLINE }, + { 0x330400, 1, RI_ALL_ONLINE }, + { 0x330404, 255, RI_E1H_OFFLINE }, + { 0x330800, 1, RI_ALL_ONLINE }, + { 0x330840, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x330c00, 1, RI_ALL_ONLINE }, + { 0x331000, 1, RI_ALL_ONLINE }, + { 0x331040, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x331400, 1, RI_ALL_ONLINE }, + { 0x331440, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x331480, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x3314c0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x331800, 128, RI_ALL_OFFLINE }, + { 0x331c00, 128, RI_ALL_OFFLINE }, + { 0x332000, 1, RI_ALL_ONLINE }, + { 0x332400, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x332404, 5631, RI_E2E3E3B0_OFFLINE }, + { 0x338000, 1, RI_ALL_ONLINE }, + { 0x338040, 1, RI_ALL_ONLINE }, + { 0x338080, 1, RI_ALL_ONLINE }, + { 0x3380c0, 1, RI_ALL_ONLINE }, + { 0x338100, 1, RI_ALL_ONLINE }, + { 0x338140, 1, RI_ALL_ONLINE }, + { 0x338180, 1, RI_ALL_ONLINE }, + { 0x3381c0, 1, RI_ALL_ONLINE }, + { 0x338200, 1, RI_ALL_ONLINE }, + { 0x338240, 1, RI_ALL_ONLINE }, + { 0x338280, 1, RI_ALL_ONLINE }, + { 0x3382c0, 1, RI_ALL_ONLINE }, + { 0x338300, 1, RI_ALL_ONLINE }, + { 0x338340, 1, RI_ALL_ONLINE }, + { 0x338380, 1, RI_ALL_ONLINE }, + { 0x3383c0, 1, RI_ALL_ONLINE }, + { 0x338400, 1, RI_ALL_ONLINE }, + { 0x338440, 1, RI_ALL_ONLINE }, + { 0x338480, 1, RI_ALL_ONLINE }, + { 0x3384c0, 1, RI_ALL_ONLINE }, + { 0x338500, 1, RI_ALL_ONLINE }, + { 0x338540, 1, RI_ALL_ONLINE }, + { 0x338580, 1, RI_ALL_ONLINE }, + { 0x3385c0, 19, RI_E2E3E3B0_ONLINE }, + { 0x338800, 1, RI_ALL_ONLINE }, + { 0x338840, 1, RI_ALL_ONLINE }, + { 0x338880, 1, RI_ALL_ONLINE }, + { 0x3388c0, 1, RI_ALL_ONLINE }, + { 0x338900, 1, RI_ALL_ONLINE }, + { 0x338940, 1, RI_ALL_ONLINE }, + { 0x338980, 1, RI_ALL_ONLINE }, + { 0x3389c0, 1, RI_ALL_ONLINE }, + { 0x338a00, 1, RI_ALL_ONLINE }, + { 0x338a40, 1, RI_ALL_ONLINE }, + { 0x338a80, 1, RI_ALL_ONLINE }, + { 0x338ac0, 1, RI_ALL_ONLINE }, + { 0x338b00, 1, RI_ALL_ONLINE }, + { 0x338b40, 1, RI_ALL_ONLINE }, + { 0x338b80, 1, RI_ALL_ONLINE }, + { 0x338bc0, 1, RI_ALL_ONLINE }, + { 0x338c00, 1, RI_ALL_ONLINE }, + { 0x338c40, 1, RI_ALL_ONLINE }, + { 0x338c80, 1, RI_ALL_ONLINE }, + { 0x338cc0, 1, RI_ALL_ONLINE }, + { 0x338cc4, 1, RI_E2E3E3B0_ONLINE }, + { 0x338d00, 1, RI_ALL_ONLINE }, + { 0x338d40, 1, RI_ALL_ONLINE }, + { 0x338d80, 1, RI_ALL_ONLINE }, + { 0x338dc0, 1, RI_ALL_ONLINE }, + { 0x338e00, 1, RI_ALL_ONLINE }, + { 0x338e40, 1, RI_ALL_ONLINE }, + { 0x338e80, 1, RI_ALL_ONLINE }, + { 0x338e84, 1, RI_E2E3E3B0_ONLINE }, + { 0x338ec0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x338f00, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x338f40, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x338f80, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x338fc0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x338fc4, 2, RI_E2E3E3B0_ONLINE }, + { 0x338fd0, 6, RI_E2E3E3B0_ONLINE }, + { 0x338fe8, 2, RI_E3E3B0_ONLINE }, + { 0x339000, 1, RI_E2E3E3B0_ONLINE }, + { 0x339040, 3, RI_E2E3E3B0_ONLINE }, + { 0x33905c, 1, RI_E3E3B0_ONLINE }, + { 0x339064, 1, RI_E3B0_ONLINE }, + { 0x339080, 10, RI_E3B0_ONLINE }, + { 0x340000, 2, RI_ALL_ONLINE }, +}; +#define REGS_COUNT ARRAY_SIZE(reg_addrs) + +static const struct dump_sign dump_sign_all = { 0x4e23fde1, 0x70017, 0x3a }; + +static const u32 page_vals_e2[] = { 0, 128 }; +#define PAGE_MODE_VALUES_E2 ARRAY_SIZE(page_vals_e2) + +static const u32 page_write_regs_e2[] = { 328476 }; +#define PAGE_WRITE_REGS_E2 ARRAY_SIZE(page_write_regs_e2) + +static const struct reg_addr page_read_regs_e2[] = { + { 0x58000, 4608, RI_E2_ONLINE } }; +#define PAGE_READ_REGS_E2 ARRAY_SIZE(page_read_regs_e2) + +static const u32 page_vals_e3[] = { 0, 128 }; +#define PAGE_MODE_VALUES_E3 ARRAY_SIZE(page_vals_e3) + +static const u32 page_write_regs_e3[] = { 328476 }; +#define PAGE_WRITE_REGS_E3 ARRAY_SIZE(page_write_regs_e3) + +static const struct reg_addr page_read_regs_e3[] = { + { 0x58000, 4608, RI_E3E3B0_ONLINE } }; +#define PAGE_READ_REGS_E3 ARRAY_SIZE(page_read_regs_e3) + +#endif /* BNX2X_DUMP_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c new file mode 100644 index 0000000..2218630 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -0,0 +1,2355 @@ +/* bnx2x_ethtool.c: Broadcom Everest network driver. + * + * Copyright (c) 2007-2011 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Eilon Greenstein + * Written by: Eliezer Tamir + * Based on code from Michael Chan's bnx2 driver + * UDP CSUM errata workaround by Arik Gendelman + * Slowpath and fastpath rework by Vladislav Zolotarov + * Statistics and Link management by Yitchak Gertner + * + */ +#include +#include +#include +#include +#include + + +#include "bnx2x.h" +#include "bnx2x_cmn.h" +#include "bnx2x_dump.h" +#include "bnx2x_init.h" +#include "bnx2x_sp.h" + +/* Note: in the format strings below %s is replaced by the queue-name which is + * either its index or 'fcoe' for the fcoe queue. Make sure the format string + * length does not exceed ETH_GSTRING_LEN - MAX_QUEUE_NAME_LEN + 2 + */ +#define MAX_QUEUE_NAME_LEN 4 +static const struct { + long offset; + int size; + char string[ETH_GSTRING_LEN]; +} bnx2x_q_stats_arr[] = { +/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" }, + { Q_STATS_OFFSET32(total_unicast_packets_received_hi), + 8, "[%s]: rx_ucast_packets" }, + { Q_STATS_OFFSET32(total_multicast_packets_received_hi), + 8, "[%s]: rx_mcast_packets" }, + { Q_STATS_OFFSET32(total_broadcast_packets_received_hi), + 8, "[%s]: rx_bcast_packets" }, + { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%s]: rx_discards" }, + { Q_STATS_OFFSET32(rx_err_discard_pkt), + 4, "[%s]: rx_phy_ip_err_discards"}, + { Q_STATS_OFFSET32(rx_skb_alloc_failed), + 4, "[%s]: rx_skb_alloc_discard" }, + { Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" }, + + { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" }, +/* 10 */{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), + 8, "[%s]: tx_ucast_packets" }, + { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi), + 8, "[%s]: tx_mcast_packets" }, + { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi), + 8, "[%s]: tx_bcast_packets" }, + { Q_STATS_OFFSET32(total_tpa_aggregations_hi), + 8, "[%s]: tpa_aggregations" }, + { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi), + 8, "[%s]: tpa_aggregated_frames"}, + { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%s]: tpa_bytes"} +}; + +#define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr) + +static const struct { + long offset; + int size; + u32 flags; +#define STATS_FLAGS_PORT 1 +#define STATS_FLAGS_FUNC 2 +#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT) + char string[ETH_GSTRING_LEN]; +} bnx2x_stats_arr[] = { +/* 1 */ { STATS_OFFSET32(total_bytes_received_hi), + 8, STATS_FLAGS_BOTH, "rx_bytes" }, + { STATS_OFFSET32(error_bytes_received_hi), + 8, STATS_FLAGS_BOTH, "rx_error_bytes" }, + { STATS_OFFSET32(total_unicast_packets_received_hi), + 8, STATS_FLAGS_BOTH, "rx_ucast_packets" }, + { STATS_OFFSET32(total_multicast_packets_received_hi), + 8, STATS_FLAGS_BOTH, "rx_mcast_packets" }, + { STATS_OFFSET32(total_broadcast_packets_received_hi), + 8, STATS_FLAGS_BOTH, "rx_bcast_packets" }, + { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), + 8, STATS_FLAGS_PORT, "rx_crc_errors" }, + { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), + 8, STATS_FLAGS_PORT, "rx_align_errors" }, + { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), + 8, STATS_FLAGS_PORT, "rx_undersize_packets" }, + { STATS_OFFSET32(etherstatsoverrsizepkts_hi), + 8, STATS_FLAGS_PORT, "rx_oversize_packets" }, +/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi), + 8, STATS_FLAGS_PORT, "rx_fragments" }, + { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), + 8, STATS_FLAGS_PORT, "rx_jabbers" }, + { STATS_OFFSET32(no_buff_discard_hi), + 8, STATS_FLAGS_BOTH, "rx_discards" }, + { STATS_OFFSET32(mac_filter_discard), + 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, + { STATS_OFFSET32(mf_tag_discard), + 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" }, + { STATS_OFFSET32(brb_drop_hi), + 8, STATS_FLAGS_PORT, "rx_brb_discard" }, + { STATS_OFFSET32(brb_truncate_hi), + 8, STATS_FLAGS_PORT, "rx_brb_truncate" }, + { STATS_OFFSET32(pause_frames_received_hi), + 8, STATS_FLAGS_PORT, "rx_pause_frames" }, + { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), + 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" }, + { STATS_OFFSET32(nig_timer_max), + 4, STATS_FLAGS_PORT, "rx_constant_pause_events" }, +/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt), + 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"}, + { STATS_OFFSET32(rx_skb_alloc_failed), + 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" }, + { STATS_OFFSET32(hw_csum_err), + 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" }, + + { STATS_OFFSET32(total_bytes_transmitted_hi), + 8, STATS_FLAGS_BOTH, "tx_bytes" }, + { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), + 8, STATS_FLAGS_PORT, "tx_error_bytes" }, + { STATS_OFFSET32(total_unicast_packets_transmitted_hi), + 8, STATS_FLAGS_BOTH, "tx_ucast_packets" }, + { STATS_OFFSET32(total_multicast_packets_transmitted_hi), + 8, STATS_FLAGS_BOTH, "tx_mcast_packets" }, + { STATS_OFFSET32(total_broadcast_packets_transmitted_hi), + 8, STATS_FLAGS_BOTH, "tx_bcast_packets" }, + { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), + 8, STATS_FLAGS_PORT, "tx_mac_errors" }, + { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), + 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, +/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), + 8, STATS_FLAGS_PORT, "tx_single_collisions" }, + { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), + 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, + { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), + 8, STATS_FLAGS_PORT, "tx_deferred" }, + { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), + 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, + { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), + 8, STATS_FLAGS_PORT, "tx_late_collisions" }, + { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), + 8, STATS_FLAGS_PORT, "tx_total_collisions" }, + { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), + 8, STATS_FLAGS_PORT, "tx_64_byte_packets" }, + { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), + 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" }, + { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), + 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, + { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), + 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, +/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), + 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, + { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), + 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, + { STATS_OFFSET32(etherstatspktsover1522octets_hi), + 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, + { STATS_OFFSET32(pause_frames_sent_hi), + 8, STATS_FLAGS_PORT, "tx_pause_frames" }, + { STATS_OFFSET32(total_tpa_aggregations_hi), + 8, STATS_FLAGS_FUNC, "tpa_aggregations" }, + { STATS_OFFSET32(total_tpa_aggregated_frames_hi), + 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"}, + { STATS_OFFSET32(total_tpa_bytes_hi), + 8, STATS_FLAGS_FUNC, "tpa_bytes"} +}; + +#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr) +static int bnx2x_get_port_type(struct bnx2x *bp) +{ + int port_type; + u32 phy_idx = bnx2x_get_cur_phy_idx(bp); + switch (bp->link_params.phy[phy_idx].media_type) { + case ETH_PHY_SFP_FIBER: + case ETH_PHY_XFP_FIBER: + case ETH_PHY_KR: + case ETH_PHY_CX4: + port_type = PORT_FIBRE; + break; + case ETH_PHY_DA_TWINAX: + port_type = PORT_DA; + break; + case ETH_PHY_BASE_T: + port_type = PORT_TP; + break; + case ETH_PHY_NOT_PRESENT: + port_type = PORT_NONE; + break; + case ETH_PHY_UNSPECIFIED: + default: + port_type = PORT_OTHER; + break; + } + return port_type; +} + +static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct bnx2x *bp = netdev_priv(dev); + int cfg_idx = bnx2x_get_link_cfg_idx(bp); + + /* Dual Media boards present all available port types */ + cmd->supported = bp->port.supported[cfg_idx] | + (bp->port.supported[cfg_idx ^ 1] & + (SUPPORTED_TP | SUPPORTED_FIBRE)); + cmd->advertising = bp->port.advertising[cfg_idx]; + + if ((bp->state == BNX2X_STATE_OPEN) && + !(bp->flags & MF_FUNC_DIS) && + (bp->link_vars.link_up)) { + ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed); + cmd->duplex = bp->link_vars.duplex; + } else { + ethtool_cmd_speed_set( + cmd, bp->link_params.req_line_speed[cfg_idx]); + cmd->duplex = bp->link_params.req_duplex[cfg_idx]; + } + + if (IS_MF(bp)) + ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp)); + + cmd->port = bnx2x_get_port_type(bp); + + cmd->phy_address = bp->mdio.prtad; + cmd->transceiver = XCVR_INTERNAL; + + if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) + cmd->autoneg = AUTONEG_ENABLE; + else + cmd->autoneg = AUTONEG_DISABLE; + + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + + DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n" + DP_LEVEL " supported 0x%x advertising 0x%x speed %u\n" + DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n" + DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n", + cmd->cmd, cmd->supported, cmd->advertising, + ethtool_cmd_speed(cmd), + cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, + cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); + + return 0; +} + +static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct bnx2x *bp = netdev_priv(dev); + u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config; + u32 speed; + + if (IS_MF_SD(bp)) + return 0; + + DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n" + " supported 0x%x advertising 0x%x speed %u\n" + " duplex %d port %d phy_address %d transceiver %d\n" + " autoneg %d maxtxpkt %d maxrxpkt %d\n", + cmd->cmd, cmd->supported, cmd->advertising, + ethtool_cmd_speed(cmd), + cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, + cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); + + speed = ethtool_cmd_speed(cmd); + + if (IS_MF_SI(bp)) { + u32 part; + u32 line_speed = bp->link_vars.line_speed; + + /* use 10G if no link detected */ + if (!line_speed) + line_speed = 10000; + + if (bp->common.bc_ver < REQ_BC_VER_4_SET_MF_BW) { + BNX2X_DEV_INFO("To set speed BC %X or higher " + "is required, please upgrade BC\n", + REQ_BC_VER_4_SET_MF_BW); + return -EINVAL; + } + + part = (speed * 100) / line_speed; + + if (line_speed < speed || !part) { + BNX2X_DEV_INFO("Speed setting should be in a range " + "from 1%% to 100%% " + "of actual line speed\n"); + return -EINVAL; + } + + if (bp->state != BNX2X_STATE_OPEN) + /* store value for following "load" */ + bp->pending_max = part; + else + bnx2x_update_max_mf_config(bp, part); + + return 0; + } + + cfg_idx = bnx2x_get_link_cfg_idx(bp); + old_multi_phy_config = bp->link_params.multi_phy_config; + switch (cmd->port) { + case PORT_TP: + if (bp->port.supported[cfg_idx] & SUPPORTED_TP) + break; /* no port change */ + + if (!(bp->port.supported[0] & SUPPORTED_TP || + bp->port.supported[1] & SUPPORTED_TP)) { + DP(NETIF_MSG_LINK, "Unsupported port type\n"); + return -EINVAL; + } + bp->link_params.multi_phy_config &= + ~PORT_HW_CFG_PHY_SELECTION_MASK; + if (bp->link_params.multi_phy_config & + PORT_HW_CFG_PHY_SWAPPED_ENABLED) + bp->link_params.multi_phy_config |= + PORT_HW_CFG_PHY_SELECTION_SECOND_PHY; + else + bp->link_params.multi_phy_config |= + PORT_HW_CFG_PHY_SELECTION_FIRST_PHY; + break; + case PORT_FIBRE: + if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE) + break; /* no port change */ + + if (!(bp->port.supported[0] & SUPPORTED_FIBRE || + bp->port.supported[1] & SUPPORTED_FIBRE)) { + DP(NETIF_MSG_LINK, "Unsupported port type\n"); + return -EINVAL; + } + bp->link_params.multi_phy_config &= + ~PORT_HW_CFG_PHY_SELECTION_MASK; + if (bp->link_params.multi_phy_config & + PORT_HW_CFG_PHY_SWAPPED_ENABLED) + bp->link_params.multi_phy_config |= + PORT_HW_CFG_PHY_SELECTION_FIRST_PHY; + else + bp->link_params.multi_phy_config |= + PORT_HW_CFG_PHY_SELECTION_SECOND_PHY; + break; + default: + DP(NETIF_MSG_LINK, "Unsupported port type\n"); + return -EINVAL; + } + /* Save new config in case command complete successuly */ + new_multi_phy_config = bp->link_params.multi_phy_config; + /* Get the new cfg_idx */ + cfg_idx = bnx2x_get_link_cfg_idx(bp); + /* Restore old config in case command failed */ + bp->link_params.multi_phy_config = old_multi_phy_config; + DP(NETIF_MSG_LINK, "cfg_idx = %x\n", cfg_idx); + + if (cmd->autoneg == AUTONEG_ENABLE) { + if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) { + DP(NETIF_MSG_LINK, "Autoneg not supported\n"); + return -EINVAL; + } + + /* advertise the requested speed and duplex if supported */ + cmd->advertising &= bp->port.supported[cfg_idx]; + + bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG; + bp->link_params.req_duplex[cfg_idx] = DUPLEX_FULL; + bp->port.advertising[cfg_idx] |= (ADVERTISED_Autoneg | + cmd->advertising); + + } else { /* forced speed */ + /* advertise the requested speed and duplex if supported */ + switch (speed) { + case SPEED_10: + if (cmd->duplex == DUPLEX_FULL) { + if (!(bp->port.supported[cfg_idx] & + SUPPORTED_10baseT_Full)) { + DP(NETIF_MSG_LINK, + "10M full not supported\n"); + return -EINVAL; + } + + advertising = (ADVERTISED_10baseT_Full | + ADVERTISED_TP); + } else { + if (!(bp->port.supported[cfg_idx] & + SUPPORTED_10baseT_Half)) { + DP(NETIF_MSG_LINK, + "10M half not supported\n"); + return -EINVAL; + } + + advertising = (ADVERTISED_10baseT_Half | + ADVERTISED_TP); + } + break; + + case SPEED_100: + if (cmd->duplex == DUPLEX_FULL) { + if (!(bp->port.supported[cfg_idx] & + SUPPORTED_100baseT_Full)) { + DP(NETIF_MSG_LINK, + "100M full not supported\n"); + return -EINVAL; + } + + advertising = (ADVERTISED_100baseT_Full | + ADVERTISED_TP); + } else { + if (!(bp->port.supported[cfg_idx] & + SUPPORTED_100baseT_Half)) { + DP(NETIF_MSG_LINK, + "100M half not supported\n"); + return -EINVAL; + } + + advertising = (ADVERTISED_100baseT_Half | + ADVERTISED_TP); + } + break; + + case SPEED_1000: + if (cmd->duplex != DUPLEX_FULL) { + DP(NETIF_MSG_LINK, "1G half not supported\n"); + return -EINVAL; + } + + if (!(bp->port.supported[cfg_idx] & + SUPPORTED_1000baseT_Full)) { + DP(NETIF_MSG_LINK, "1G full not supported\n"); + return -EINVAL; + } + + advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_TP); + break; + + case SPEED_2500: + if (cmd->duplex != DUPLEX_FULL) { + DP(NETIF_MSG_LINK, + "2.5G half not supported\n"); + return -EINVAL; + } + + if (!(bp->port.supported[cfg_idx] + & SUPPORTED_2500baseX_Full)) { + DP(NETIF_MSG_LINK, + "2.5G full not supported\n"); + return -EINVAL; + } + + advertising = (ADVERTISED_2500baseX_Full | + ADVERTISED_TP); + break; + + case SPEED_10000: + if (cmd->duplex != DUPLEX_FULL) { + DP(NETIF_MSG_LINK, "10G half not supported\n"); + return -EINVAL; + } + + if (!(bp->port.supported[cfg_idx] + & SUPPORTED_10000baseT_Full)) { + DP(NETIF_MSG_LINK, "10G full not supported\n"); + return -EINVAL; + } + + advertising = (ADVERTISED_10000baseT_Full | + ADVERTISED_FIBRE); + break; + + default: + DP(NETIF_MSG_LINK, "Unsupported speed %u\n", speed); + return -EINVAL; + } + + bp->link_params.req_line_speed[cfg_idx] = speed; + bp->link_params.req_duplex[cfg_idx] = cmd->duplex; + bp->port.advertising[cfg_idx] = advertising; + } + + DP(NETIF_MSG_LINK, "req_line_speed %d\n" + DP_LEVEL " req_duplex %d advertising 0x%x\n", + bp->link_params.req_line_speed[cfg_idx], + bp->link_params.req_duplex[cfg_idx], + bp->port.advertising[cfg_idx]); + + /* Set new config */ + bp->link_params.multi_phy_config = new_multi_phy_config; + if (netif_running(dev)) { + bnx2x_stats_handle(bp, STATS_EVENT_STOP); + bnx2x_link_set(bp); + } + + return 0; +} + +#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE) +#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE) +#define IS_E2_ONLINE(info) (((info) & RI_E2_ONLINE) == RI_E2_ONLINE) +#define IS_E3_ONLINE(info) (((info) & RI_E3_ONLINE) == RI_E3_ONLINE) +#define IS_E3B0_ONLINE(info) (((info) & RI_E3B0_ONLINE) == RI_E3B0_ONLINE) + +static inline bool bnx2x_is_reg_online(struct bnx2x *bp, + const struct reg_addr *reg_info) +{ + if (CHIP_IS_E1(bp)) + return IS_E1_ONLINE(reg_info->info); + else if (CHIP_IS_E1H(bp)) + return IS_E1H_ONLINE(reg_info->info); + else if (CHIP_IS_E2(bp)) + return IS_E2_ONLINE(reg_info->info); + else if (CHIP_IS_E3A0(bp)) + return IS_E3_ONLINE(reg_info->info); + else if (CHIP_IS_E3B0(bp)) + return IS_E3B0_ONLINE(reg_info->info); + else + return false; +} + +/******* Paged registers info selectors ********/ +static inline const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp) +{ + if (CHIP_IS_E2(bp)) + return page_vals_e2; + else if (CHIP_IS_E3(bp)) + return page_vals_e3; + else + return NULL; +} + +static inline u32 __bnx2x_get_page_reg_num(struct bnx2x *bp) +{ + if (CHIP_IS_E2(bp)) + return PAGE_MODE_VALUES_E2; + else if (CHIP_IS_E3(bp)) + return PAGE_MODE_VALUES_E3; + else + return 0; +} + +static inline const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp) +{ + if (CHIP_IS_E2(bp)) + return page_write_regs_e2; + else if (CHIP_IS_E3(bp)) + return page_write_regs_e3; + else + return NULL; +} + +static inline u32 __bnx2x_get_page_write_num(struct bnx2x *bp) +{ + if (CHIP_IS_E2(bp)) + return PAGE_WRITE_REGS_E2; + else if (CHIP_IS_E3(bp)) + return PAGE_WRITE_REGS_E3; + else + return 0; +} + +static inline const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp) +{ + if (CHIP_IS_E2(bp)) + return page_read_regs_e2; + else if (CHIP_IS_E3(bp)) + return page_read_regs_e3; + else + return NULL; +} + +static inline u32 __bnx2x_get_page_read_num(struct bnx2x *bp) +{ + if (CHIP_IS_E2(bp)) + return PAGE_READ_REGS_E2; + else if (CHIP_IS_E3(bp)) + return PAGE_READ_REGS_E3; + else + return 0; +} + +static inline int __bnx2x_get_regs_len(struct bnx2x *bp) +{ + int num_pages = __bnx2x_get_page_reg_num(bp); + int page_write_num = __bnx2x_get_page_write_num(bp); + const struct reg_addr *page_read_addr = __bnx2x_get_page_read_ar(bp); + int page_read_num = __bnx2x_get_page_read_num(bp); + int regdump_len = 0; + int i, j, k; + + for (i = 0; i < REGS_COUNT; i++) + if (bnx2x_is_reg_online(bp, ®_addrs[i])) + regdump_len += reg_addrs[i].size; + + for (i = 0; i < num_pages; i++) + for (j = 0; j < page_write_num; j++) + for (k = 0; k < page_read_num; k++) + if (bnx2x_is_reg_online(bp, &page_read_addr[k])) + regdump_len += page_read_addr[k].size; + + return regdump_len; +} + +static int bnx2x_get_regs_len(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + int regdump_len = 0; + + regdump_len = __bnx2x_get_regs_len(bp); + regdump_len *= 4; + regdump_len += sizeof(struct dump_hdr); + + return regdump_len; +} + +/** + * bnx2x_read_pages_regs - read "paged" registers + * + * @bp device handle + * @p output buffer + * + * Reads "paged" memories: memories that may only be read by first writing to a + * specific address ("write address") and then reading from a specific address + * ("read address"). There may be more than one write address per "page" and + * more than one read address per write address. + */ +static inline void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p) +{ + u32 i, j, k, n; + /* addresses of the paged registers */ + const u32 *page_addr = __bnx2x_get_page_addr_ar(bp); + /* number of paged registers */ + int num_pages = __bnx2x_get_page_reg_num(bp); + /* write addresses */ + const u32 *write_addr = __bnx2x_get_page_write_ar(bp); + /* number of write addresses */ + int write_num = __bnx2x_get_page_write_num(bp); + /* read addresses info */ + const struct reg_addr *read_addr = __bnx2x_get_page_read_ar(bp); + /* number of read addresses */ + int read_num = __bnx2x_get_page_read_num(bp); + + for (i = 0; i < num_pages; i++) { + for (j = 0; j < write_num; j++) { + REG_WR(bp, write_addr[j], page_addr[i]); + for (k = 0; k < read_num; k++) + if (bnx2x_is_reg_online(bp, &read_addr[k])) + for (n = 0; n < + read_addr[k].size; n++) + *p++ = REG_RD(bp, + read_addr[k].addr + n*4); + } + } +} + +static inline void __bnx2x_get_regs(struct bnx2x *bp, u32 *p) +{ + u32 i, j; + + /* Read the regular registers */ + for (i = 0; i < REGS_COUNT; i++) + if (bnx2x_is_reg_online(bp, ®_addrs[i])) + for (j = 0; j < reg_addrs[i].size; j++) + *p++ = REG_RD(bp, reg_addrs[i].addr + j*4); + + /* Read "paged" registes */ + bnx2x_read_pages_regs(bp, p); +} + +static void bnx2x_get_regs(struct net_device *dev, + struct ethtool_regs *regs, void *_p) +{ + u32 *p = _p; + struct bnx2x *bp = netdev_priv(dev); + struct dump_hdr dump_hdr = {0}; + + regs->version = 0; + memset(p, 0, regs->len); + + if (!netif_running(bp->dev)) + return; + + /* Disable parity attentions as long as following dump may + * cause false alarms by reading never written registers. We + * will re-enable parity attentions right after the dump. + */ + bnx2x_disable_blocks_parity(bp); + + dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1; + dump_hdr.dump_sign = dump_sign_all; + dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR); + dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR); + dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR); + dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR); + + if (CHIP_IS_E1(bp)) + dump_hdr.info = RI_E1_ONLINE; + else if (CHIP_IS_E1H(bp)) + dump_hdr.info = RI_E1H_ONLINE; + else if (!CHIP_IS_E1x(bp)) + dump_hdr.info = RI_E2_ONLINE | + (BP_PATH(bp) ? RI_PATH1_DUMP : RI_PATH0_DUMP); + + memcpy(p, &dump_hdr, sizeof(struct dump_hdr)); + p += dump_hdr.hdr_size + 1; + + /* Actually read the registers */ + __bnx2x_get_regs(bp, p); + + /* Re-enable parity attentions */ + bnx2x_clear_blocks_parity(bp); + bnx2x_enable_blocks_parity(bp); +} + +static void bnx2x_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct bnx2x *bp = netdev_priv(dev); + u8 phy_fw_ver[PHY_FW_VER_LEN]; + + strcpy(info->driver, DRV_MODULE_NAME); + strcpy(info->version, DRV_MODULE_VERSION); + + phy_fw_ver[0] = '\0'; + if (bp->port.pmf) { + bnx2x_acquire_phy_lock(bp); + bnx2x_get_ext_phy_fw_version(&bp->link_params, + (bp->state != BNX2X_STATE_CLOSED), + phy_fw_ver, PHY_FW_VER_LEN); + bnx2x_release_phy_lock(bp); + } + + strncpy(info->fw_version, bp->fw_ver, 32); + snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver), + "bc %d.%d.%d%s%s", + (bp->common.bc_ver & 0xff0000) >> 16, + (bp->common.bc_ver & 0xff00) >> 8, + (bp->common.bc_ver & 0xff), + ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver); + strcpy(info->bus_info, pci_name(bp->pdev)); + info->n_stats = BNX2X_NUM_STATS; + info->testinfo_len = BNX2X_NUM_TESTS; + info->eedump_len = bp->common.flash_size; + info->regdump_len = bnx2x_get_regs_len(dev); +} + +static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (bp->flags & NO_WOL_FLAG) { + wol->supported = 0; + wol->wolopts = 0; + } else { + wol->supported = WAKE_MAGIC; + if (bp->wol) + wol->wolopts = WAKE_MAGIC; + else + wol->wolopts = 0; + } + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + +static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (wol->wolopts & ~WAKE_MAGIC) + return -EINVAL; + + if (wol->wolopts & WAKE_MAGIC) { + if (bp->flags & NO_WOL_FLAG) + return -EINVAL; + + bp->wol = 1; + } else + bp->wol = 0; + + return 0; +} + +static u32 bnx2x_get_msglevel(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + + return bp->msg_enable; +} + +static void bnx2x_set_msglevel(struct net_device *dev, u32 level) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (capable(CAP_NET_ADMIN)) { + /* dump MCP trace */ + if (level & BNX2X_MSG_MCP) + bnx2x_fw_dump_lvl(bp, KERN_INFO); + bp->msg_enable = level; + } +} + +static int bnx2x_nway_reset(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (!bp->port.pmf) + return 0; + + if (netif_running(dev)) { + bnx2x_stats_handle(bp, STATS_EVENT_STOP); + bnx2x_link_set(bp); + } + + return 0; +} + +static u32 bnx2x_get_link(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (bp->flags & MF_FUNC_DIS || (bp->state != BNX2X_STATE_OPEN)) + return 0; + + return bp->link_vars.link_up; +} + +static int bnx2x_get_eeprom_len(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + + return bp->common.flash_size; +} + +static int bnx2x_acquire_nvram_lock(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + int count, i; + u32 val = 0; + + /* adjust timeout for emulation/FPGA */ + count = BNX2X_NVRAM_TIMEOUT_COUNT; + if (CHIP_REV_IS_SLOW(bp)) + count *= 100; + + /* request access to nvram interface */ + REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, + (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port)); + + for (i = 0; i < count*10; i++) { + val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB); + if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) + break; + + udelay(5); + } + + if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { + DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n"); + return -EBUSY; + } + + return 0; +} + +static int bnx2x_release_nvram_lock(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + int count, i; + u32 val = 0; + + /* adjust timeout for emulation/FPGA */ + count = BNX2X_NVRAM_TIMEOUT_COUNT; + if (CHIP_REV_IS_SLOW(bp)) + count *= 100; + + /* relinquish nvram interface */ + REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, + (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port)); + + for (i = 0; i < count*10; i++) { + val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB); + if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) + break; + + udelay(5); + } + + if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { + DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n"); + return -EBUSY; + } + + return 0; +} + +static void bnx2x_enable_nvram_access(struct bnx2x *bp) +{ + u32 val; + + val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE); + + /* enable both bits, even on read */ + REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE, + (val | MCPR_NVM_ACCESS_ENABLE_EN | + MCPR_NVM_ACCESS_ENABLE_WR_EN)); +} + +static void bnx2x_disable_nvram_access(struct bnx2x *bp) +{ + u32 val; + + val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE); + + /* disable both bits, even after read */ + REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE, + (val & ~(MCPR_NVM_ACCESS_ENABLE_EN | + MCPR_NVM_ACCESS_ENABLE_WR_EN))); +} + +static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val, + u32 cmd_flags) +{ + int count, i, rc; + u32 val; + + /* build the command word */ + cmd_flags |= MCPR_NVM_COMMAND_DOIT; + + /* need to clear DONE bit separately */ + REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); + + /* address of the NVRAM to read from */ + REG_WR(bp, MCP_REG_MCPR_NVM_ADDR, + (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); + + /* issue a read command */ + REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); + + /* adjust timeout for emulation/FPGA */ + count = BNX2X_NVRAM_TIMEOUT_COUNT; + if (CHIP_REV_IS_SLOW(bp)) + count *= 100; + + /* wait for completion */ + *ret_val = 0; + rc = -EBUSY; + for (i = 0; i < count; i++) { + udelay(5); + val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND); + + if (val & MCPR_NVM_COMMAND_DONE) { + val = REG_RD(bp, MCP_REG_MCPR_NVM_READ); + /* we read nvram data in cpu order + * but ethtool sees it as an array of bytes + * converting to big-endian will do the work */ + *ret_val = cpu_to_be32(val); + rc = 0; + break; + } + } + + return rc; +} + +static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf, + int buf_size) +{ + int rc; + u32 cmd_flags; + __be32 val; + + if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { + DP(BNX2X_MSG_NVM, + "Invalid parameter: offset 0x%x buf_size 0x%x\n", + offset, buf_size); + return -EINVAL; + } + + if (offset + buf_size > bp->common.flash_size) { + DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +" + " buf_size (0x%x) > flash_size (0x%x)\n", + offset, buf_size, bp->common.flash_size); + return -EINVAL; + } + + /* request access to nvram interface */ + rc = bnx2x_acquire_nvram_lock(bp); + if (rc) + return rc; + + /* enable access to nvram interface */ + bnx2x_enable_nvram_access(bp); + + /* read the first word(s) */ + cmd_flags = MCPR_NVM_COMMAND_FIRST; + while ((buf_size > sizeof(u32)) && (rc == 0)) { + rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags); + memcpy(ret_buf, &val, 4); + + /* advance to the next dword */ + offset += sizeof(u32); + ret_buf += sizeof(u32); + buf_size -= sizeof(u32); + cmd_flags = 0; + } + + if (rc == 0) { + cmd_flags |= MCPR_NVM_COMMAND_LAST; + rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags); + memcpy(ret_buf, &val, 4); + } + + /* disable access to nvram interface */ + bnx2x_disable_nvram_access(bp); + bnx2x_release_nvram_lock(bp); + + return rc; +} + +static int bnx2x_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *eebuf) +{ + struct bnx2x *bp = netdev_priv(dev); + int rc; + + if (!netif_running(dev)) + return -EAGAIN; + + DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" + DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", + eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, + eeprom->len, eeprom->len); + + /* parameters already validated in ethtool_get_eeprom */ + + rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len); + + return rc; +} + +static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val, + u32 cmd_flags) +{ + int count, i, rc; + + /* build the command word */ + cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR; + + /* need to clear DONE bit separately */ + REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); + + /* write the data */ + REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val); + + /* address of the NVRAM to write to */ + REG_WR(bp, MCP_REG_MCPR_NVM_ADDR, + (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); + + /* issue the write command */ + REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); + + /* adjust timeout for emulation/FPGA */ + count = BNX2X_NVRAM_TIMEOUT_COUNT; + if (CHIP_REV_IS_SLOW(bp)) + count *= 100; + + /* wait for completion */ + rc = -EBUSY; + for (i = 0; i < count; i++) { + udelay(5); + val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND); + if (val & MCPR_NVM_COMMAND_DONE) { + rc = 0; + break; + } + } + + return rc; +} + +#define BYTE_OFFSET(offset) (8 * (offset & 0x03)) + +static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf, + int buf_size) +{ + int rc; + u32 cmd_flags; + u32 align_offset; + __be32 val; + + if (offset + buf_size > bp->common.flash_size) { + DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +" + " buf_size (0x%x) > flash_size (0x%x)\n", + offset, buf_size, bp->common.flash_size); + return -EINVAL; + } + + /* request access to nvram interface */ + rc = bnx2x_acquire_nvram_lock(bp); + if (rc) + return rc; + + /* enable access to nvram interface */ + bnx2x_enable_nvram_access(bp); + + cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST); + align_offset = (offset & ~0x03); + rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags); + + if (rc == 0) { + val &= ~(0xff << BYTE_OFFSET(offset)); + val |= (*data_buf << BYTE_OFFSET(offset)); + + /* nvram data is returned as an array of bytes + * convert it back to cpu order */ + val = be32_to_cpu(val); + + rc = bnx2x_nvram_write_dword(bp, align_offset, val, + cmd_flags); + } + + /* disable access to nvram interface */ + bnx2x_disable_nvram_access(bp); + bnx2x_release_nvram_lock(bp); + + return rc; +} + +static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf, + int buf_size) +{ + int rc; + u32 cmd_flags; + u32 val; + u32 written_so_far; + + if (buf_size == 1) /* ethtool */ + return bnx2x_nvram_write1(bp, offset, data_buf, buf_size); + + if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { + DP(BNX2X_MSG_NVM, + "Invalid parameter: offset 0x%x buf_size 0x%x\n", + offset, buf_size); + return -EINVAL; + } + + if (offset + buf_size > bp->common.flash_size) { + DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +" + " buf_size (0x%x) > flash_size (0x%x)\n", + offset, buf_size, bp->common.flash_size); + return -EINVAL; + } + + /* request access to nvram interface */ + rc = bnx2x_acquire_nvram_lock(bp); + if (rc) + return rc; + + /* enable access to nvram interface */ + bnx2x_enable_nvram_access(bp); + + written_so_far = 0; + cmd_flags = MCPR_NVM_COMMAND_FIRST; + while ((written_so_far < buf_size) && (rc == 0)) { + if (written_so_far == (buf_size - sizeof(u32))) + cmd_flags |= MCPR_NVM_COMMAND_LAST; + else if (((offset + 4) % BNX2X_NVRAM_PAGE_SIZE) == 0) + cmd_flags |= MCPR_NVM_COMMAND_LAST; + else if ((offset % BNX2X_NVRAM_PAGE_SIZE) == 0) + cmd_flags |= MCPR_NVM_COMMAND_FIRST; + + memcpy(&val, data_buf, 4); + + rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags); + + /* advance to the next dword */ + offset += sizeof(u32); + data_buf += sizeof(u32); + written_so_far += sizeof(u32); + cmd_flags = 0; + } + + /* disable access to nvram interface */ + bnx2x_disable_nvram_access(bp); + bnx2x_release_nvram_lock(bp); + + return rc; +} + +static int bnx2x_set_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *eebuf) +{ + struct bnx2x *bp = netdev_priv(dev); + int port = BP_PORT(bp); + int rc = 0; + u32 ext_phy_config; + if (!netif_running(dev)) + return -EAGAIN; + + DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" + DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", + eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, + eeprom->len, eeprom->len); + + /* parameters already validated in ethtool_set_eeprom */ + + /* PHY eeprom can be accessed only by the PMF */ + if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) && + !bp->port.pmf) + return -EINVAL; + + ext_phy_config = + SHMEM_RD(bp, + dev_info.port_hw_config[port].external_phy_config); + + if (eeprom->magic == 0x50485950) { + /* 'PHYP' (0x50485950): prepare phy for FW upgrade */ + bnx2x_stats_handle(bp, STATS_EVENT_STOP); + + bnx2x_acquire_phy_lock(bp); + rc |= bnx2x_link_reset(&bp->link_params, + &bp->link_vars, 0); + if (XGXS_EXT_PHY_TYPE(ext_phy_config) == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, + MISC_REGISTERS_GPIO_HIGH, port); + bnx2x_release_phy_lock(bp); + bnx2x_link_report(bp); + + } else if (eeprom->magic == 0x50485952) { + /* 'PHYR' (0x50485952): re-init link after FW upgrade */ + if (bp->state == BNX2X_STATE_OPEN) { + bnx2x_acquire_phy_lock(bp); + rc |= bnx2x_link_reset(&bp->link_params, + &bp->link_vars, 1); + + rc |= bnx2x_phy_init(&bp->link_params, + &bp->link_vars); + bnx2x_release_phy_lock(bp); + bnx2x_calc_fc_adv(bp); + } + } else if (eeprom->magic == 0x53985943) { + /* 'PHYC' (0x53985943): PHY FW upgrade completed */ + if (XGXS_EXT_PHY_TYPE(ext_phy_config) == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) { + + /* DSP Remove Download Mode */ + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, + MISC_REGISTERS_GPIO_LOW, port); + + bnx2x_acquire_phy_lock(bp); + + bnx2x_sfx7101_sp_sw_reset(bp, + &bp->link_params.phy[EXT_PHY1]); + + /* wait 0.5 sec to allow it to run */ + msleep(500); + bnx2x_ext_phy_hw_reset(bp, port); + msleep(500); + bnx2x_release_phy_lock(bp); + } + } else + rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len); + + return rc; +} + +static int bnx2x_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal) +{ + struct bnx2x *bp = netdev_priv(dev); + + memset(coal, 0, sizeof(struct ethtool_coalesce)); + + coal->rx_coalesce_usecs = bp->rx_ticks; + coal->tx_coalesce_usecs = bp->tx_ticks; + + return 0; +} + +static int bnx2x_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal) +{ + struct bnx2x *bp = netdev_priv(dev); + + bp->rx_ticks = (u16)coal->rx_coalesce_usecs; + if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT) + bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT; + + bp->tx_ticks = (u16)coal->tx_coalesce_usecs; + if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT) + bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT; + + if (netif_running(dev)) + bnx2x_update_coalesce(bp); + + return 0; +} + +static void bnx2x_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct bnx2x *bp = netdev_priv(dev); + + ering->rx_max_pending = MAX_RX_AVAIL; + ering->rx_mini_max_pending = 0; + ering->rx_jumbo_max_pending = 0; + + if (bp->rx_ring_size) + ering->rx_pending = bp->rx_ring_size; + else + if (bp->state == BNX2X_STATE_OPEN && bp->num_queues) + ering->rx_pending = MAX_RX_AVAIL/bp->num_queues; + else + ering->rx_pending = MAX_RX_AVAIL; + + ering->rx_mini_pending = 0; + ering->rx_jumbo_pending = 0; + + ering->tx_max_pending = MAX_TX_AVAIL; + ering->tx_pending = bp->tx_ring_size; +} + +static int bnx2x_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (bp->recovery_state != BNX2X_RECOVERY_DONE) { + printk(KERN_ERR "Handling parity error recovery. Try again later\n"); + return -EAGAIN; + } + + if ((ering->rx_pending > MAX_RX_AVAIL) || + (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA : + MIN_RX_SIZE_TPA)) || + (ering->tx_pending > MAX_TX_AVAIL) || + (ering->tx_pending <= MAX_SKB_FRAGS + 4)) + return -EINVAL; + + bp->rx_ring_size = ering->rx_pending; + bp->tx_ring_size = ering->tx_pending; + + return bnx2x_reload_if_running(dev); +} + +static void bnx2x_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct bnx2x *bp = netdev_priv(dev); + int cfg_idx = bnx2x_get_link_cfg_idx(bp); + epause->autoneg = (bp->link_params.req_flow_ctrl[cfg_idx] == + BNX2X_FLOW_CTRL_AUTO); + + epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) == + BNX2X_FLOW_CTRL_RX); + epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) == + BNX2X_FLOW_CTRL_TX); + + DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n" + DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n", + epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); +} + +static int bnx2x_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct bnx2x *bp = netdev_priv(dev); + u32 cfg_idx = bnx2x_get_link_cfg_idx(bp); + if (IS_MF(bp)) + return 0; + + DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n" + DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n", + epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); + + bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_AUTO; + + if (epause->rx_pause) + bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_RX; + + if (epause->tx_pause) + bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_TX; + + if (bp->link_params.req_flow_ctrl[cfg_idx] == BNX2X_FLOW_CTRL_AUTO) + bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_NONE; + + if (epause->autoneg) { + if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) { + DP(NETIF_MSG_LINK, "autoneg not supported\n"); + return -EINVAL; + } + + if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) { + bp->link_params.req_flow_ctrl[cfg_idx] = + BNX2X_FLOW_CTRL_AUTO; + } + } + + DP(NETIF_MSG_LINK, + "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl[cfg_idx]); + + if (netif_running(dev)) { + bnx2x_stats_handle(bp, STATS_EVENT_STOP); + bnx2x_link_set(bp); + } + + return 0; +} + +static const struct { + char string[ETH_GSTRING_LEN]; +} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = { + { "register_test (offline)" }, + { "memory_test (offline)" }, + { "loopback_test (offline)" }, + { "nvram_test (online)" }, + { "interrupt_test (online)" }, + { "link_test (online)" }, + { "idle check (online)" } +}; + +enum { + BNX2X_CHIP_E1_OFST = 0, + BNX2X_CHIP_E1H_OFST, + BNX2X_CHIP_E2_OFST, + BNX2X_CHIP_E3_OFST, + BNX2X_CHIP_E3B0_OFST, + BNX2X_CHIP_MAX_OFST +}; + +#define BNX2X_CHIP_MASK_E1 (1 << BNX2X_CHIP_E1_OFST) +#define BNX2X_CHIP_MASK_E1H (1 << BNX2X_CHIP_E1H_OFST) +#define BNX2X_CHIP_MASK_E2 (1 << BNX2X_CHIP_E2_OFST) +#define BNX2X_CHIP_MASK_E3 (1 << BNX2X_CHIP_E3_OFST) +#define BNX2X_CHIP_MASK_E3B0 (1 << BNX2X_CHIP_E3B0_OFST) + +#define BNX2X_CHIP_MASK_ALL ((1 << BNX2X_CHIP_MAX_OFST) - 1) +#define BNX2X_CHIP_MASK_E1X (BNX2X_CHIP_MASK_E1 | BNX2X_CHIP_MASK_E1H) + +static int bnx2x_test_registers(struct bnx2x *bp) +{ + int idx, i, rc = -ENODEV; + u32 wr_val = 0, hw; + int port = BP_PORT(bp); + static const struct { + u32 hw; + u32 offset0; + u32 offset1; + u32 mask; + } reg_tbl[] = { +/* 0 */ { BNX2X_CHIP_MASK_ALL, + BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff }, + { BNX2X_CHIP_MASK_ALL, + DORQ_REG_DB_ADDR0, 4, 0xffffffff }, + { BNX2X_CHIP_MASK_E1X, + HC_REG_AGG_INT_0, 4, 0x000003ff }, + { BNX2X_CHIP_MASK_ALL, + PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 }, + { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2 | BNX2X_CHIP_MASK_E3, + PBF_REG_P0_INIT_CRD, 4, 0x000007ff }, + { BNX2X_CHIP_MASK_E3B0, + PBF_REG_INIT_CRD_Q0, 4, 0x000007ff }, + { BNX2X_CHIP_MASK_ALL, + PRS_REG_CID_PORT_0, 4, 0x00ffffff }, + { BNX2X_CHIP_MASK_ALL, + PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff }, + { BNX2X_CHIP_MASK_ALL, + PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff }, + { BNX2X_CHIP_MASK_ALL, + PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff }, +/* 10 */ { BNX2X_CHIP_MASK_ALL, + PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff }, + { BNX2X_CHIP_MASK_ALL, + PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff }, + { BNX2X_CHIP_MASK_ALL, + QM_REG_CONNNUM_0, 4, 0x000fffff }, + { BNX2X_CHIP_MASK_ALL, + TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff }, + { BNX2X_CHIP_MASK_ALL, + SRC_REG_KEYRSS0_0, 40, 0xffffffff }, + { BNX2X_CHIP_MASK_ALL, + SRC_REG_KEYRSS0_7, 40, 0xffffffff }, + { BNX2X_CHIP_MASK_ALL, + XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 }, + { BNX2X_CHIP_MASK_ALL, + XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 }, + { BNX2X_CHIP_MASK_ALL, + XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_LLH0_T_BIT, 4, 0x00000001 }, +/* 20 */ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, + NIG_REG_EMAC0_IN_EN, 4, 0x00000001 }, + { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, + NIG_REG_BMAC0_IN_EN, 4, 0x00000001 }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_XCM0_OUT_EN, 4, 0x00000001 }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_BRB0_OUT_EN, 4, 0x00000001 }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 }, +/* 30 */ { BNX2X_CHIP_MASK_ALL, + NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff }, + { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, + NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001}, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff }, + { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, + NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 }, + { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, + NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f }, + + { BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 } + }; + + if (!netif_running(bp->dev)) + return rc; + + if (CHIP_IS_E1(bp)) + hw = BNX2X_CHIP_MASK_E1; + else if (CHIP_IS_E1H(bp)) + hw = BNX2X_CHIP_MASK_E1H; + else if (CHIP_IS_E2(bp)) + hw = BNX2X_CHIP_MASK_E2; + else if (CHIP_IS_E3B0(bp)) + hw = BNX2X_CHIP_MASK_E3B0; + else /* e3 A0 */ + hw = BNX2X_CHIP_MASK_E3; + + /* Repeat the test twice: + First by writing 0x00000000, second by writing 0xffffffff */ + for (idx = 0; idx < 2; idx++) { + + switch (idx) { + case 0: + wr_val = 0; + break; + case 1: + wr_val = 0xffffffff; + break; + } + + for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) { + u32 offset, mask, save_val, val; + if (!(hw & reg_tbl[i].hw)) + continue; + + offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1; + mask = reg_tbl[i].mask; + + save_val = REG_RD(bp, offset); + + REG_WR(bp, offset, wr_val & mask); + + val = REG_RD(bp, offset); + + /* Restore the original register's value */ + REG_WR(bp, offset, save_val); + + /* verify value is as expected */ + if ((val & mask) != (wr_val & mask)) { + DP(NETIF_MSG_HW, + "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n", + offset, val, wr_val, mask); + goto test_reg_exit; + } + } + } + + rc = 0; + +test_reg_exit: + return rc; +} + +static int bnx2x_test_memory(struct bnx2x *bp) +{ + int i, j, rc = -ENODEV; + u32 val, index; + static const struct { + u32 offset; + int size; + } mem_tbl[] = { + { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE }, + { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE }, + { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE }, + { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE }, + { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE }, + { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE }, + { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE }, + + { 0xffffffff, 0 } + }; + + static const struct { + char *name; + u32 offset; + u32 hw_mask[BNX2X_CHIP_MAX_OFST]; + } prty_tbl[] = { + { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, + {0x3ffc0, 0, 0, 0} }, + { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, + {0x2, 0x2, 0, 0} }, + { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, + {0, 0, 0, 0} }, + { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, + {0x3ffc0, 0, 0, 0} }, + { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, + {0x3ffc0, 0, 0, 0} }, + { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, + {0x3ffc1, 0, 0, 0} }, + + { NULL, 0xffffffff, {0, 0, 0, 0} } + }; + + if (!netif_running(bp->dev)) + return rc; + + if (CHIP_IS_E1(bp)) + index = BNX2X_CHIP_E1_OFST; + else if (CHIP_IS_E1H(bp)) + index = BNX2X_CHIP_E1H_OFST; + else if (CHIP_IS_E2(bp)) + index = BNX2X_CHIP_E2_OFST; + else /* e3 */ + index = BNX2X_CHIP_E3_OFST; + + /* pre-Check the parity status */ + for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { + val = REG_RD(bp, prty_tbl[i].offset); + if (val & ~(prty_tbl[i].hw_mask[index])) { + DP(NETIF_MSG_HW, + "%s is 0x%x\n", prty_tbl[i].name, val); + goto test_mem_exit; + } + } + + /* Go through all the memories */ + for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) + for (j = 0; j < mem_tbl[i].size; j++) + REG_RD(bp, mem_tbl[i].offset + j*4); + + /* Check the parity status */ + for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { + val = REG_RD(bp, prty_tbl[i].offset); + if (val & ~(prty_tbl[i].hw_mask[index])) { + DP(NETIF_MSG_HW, + "%s is 0x%x\n", prty_tbl[i].name, val); + goto test_mem_exit; + } + } + + rc = 0; + +test_mem_exit: + return rc; +} + +static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes) +{ + int cnt = 1400; + + if (link_up) { + while (bnx2x_link_test(bp, is_serdes) && cnt--) + msleep(20); + + if (cnt <= 0 && bnx2x_link_test(bp, is_serdes)) + DP(NETIF_MSG_LINK, "Timeout waiting for link up\n"); + } +} + +static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) +{ + unsigned int pkt_size, num_pkts, i; + struct sk_buff *skb; + unsigned char *packet; + struct bnx2x_fastpath *fp_rx = &bp->fp[0]; + struct bnx2x_fastpath *fp_tx = &bp->fp[0]; + struct bnx2x_fp_txdata *txdata = &fp_tx->txdata[0]; + u16 tx_start_idx, tx_idx; + u16 rx_start_idx, rx_idx; + u16 pkt_prod, bd_prod, rx_comp_cons; + struct sw_tx_bd *tx_buf; + struct eth_tx_start_bd *tx_start_bd; + struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; + struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; + dma_addr_t mapping; + union eth_rx_cqe *cqe; + u8 cqe_fp_flags, cqe_fp_type; + struct sw_rx_bd *rx_buf; + u16 len; + int rc = -ENODEV; + + /* check the loopback mode */ + switch (loopback_mode) { + case BNX2X_PHY_LOOPBACK: + if (bp->link_params.loopback_mode != LOOPBACK_XGXS) + return -EINVAL; + break; + case BNX2X_MAC_LOOPBACK: + bp->link_params.loopback_mode = CHIP_IS_E3(bp) ? + LOOPBACK_XMAC : LOOPBACK_BMAC; + bnx2x_phy_init(&bp->link_params, &bp->link_vars); + break; + default: + return -EINVAL; + } + + /* prepare the loopback packet */ + pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ? + bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN); + skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size); + if (!skb) { + rc = -ENOMEM; + goto test_loopback_exit; + } + packet = skb_put(skb, pkt_size); + memcpy(packet, bp->dev->dev_addr, ETH_ALEN); + memset(packet + ETH_ALEN, 0, ETH_ALEN); + memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN)); + for (i = ETH_HLEN; i < pkt_size; i++) + packet[i] = (unsigned char) (i & 0xff); + mapping = dma_map_single(&bp->pdev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + rc = -ENOMEM; + dev_kfree_skb(skb); + BNX2X_ERR("Unable to map SKB\n"); + goto test_loopback_exit; + } + + /* send the loopback packet */ + num_pkts = 0; + tx_start_idx = le16_to_cpu(*txdata->tx_cons_sb); + rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb); + + pkt_prod = txdata->tx_pkt_prod++; + tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)]; + tx_buf->first_bd = txdata->tx_bd_prod; + tx_buf->skb = skb; + tx_buf->flags = 0; + + bd_prod = TX_BD(txdata->tx_bd_prod); + tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd; + tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); + tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); + tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */ + tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); + tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); + tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; + SET_FLAG(tx_start_bd->general_data, + ETH_TX_START_BD_ETH_ADDR_TYPE, + UNICAST_ADDRESS); + SET_FLAG(tx_start_bd->general_data, + ETH_TX_START_BD_HDR_NBDS, + 1); + + /* turn on parsing and get a BD */ + bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); + + pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x; + pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2; + + memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); + memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); + + wmb(); + + txdata->tx_db.data.prod += 2; + barrier(); + DOORBELL(bp, txdata->cid, txdata->tx_db.raw); + + mmiowb(); + barrier(); + + num_pkts++; + txdata->tx_bd_prod += 2; /* start + pbd */ + + udelay(100); + + tx_idx = le16_to_cpu(*txdata->tx_cons_sb); + if (tx_idx != tx_start_idx + num_pkts) + goto test_loopback_exit; + + /* Unlike HC IGU won't generate an interrupt for status block + * updates that have been performed while interrupts were + * disabled. + */ + if (bp->common.int_block == INT_BLOCK_IGU) { + /* Disable local BHes to prevent a dead-lock situation between + * sch_direct_xmit() and bnx2x_run_loopback() (calling + * bnx2x_tx_int()), as both are taking netif_tx_lock(). + */ + local_bh_disable(); + bnx2x_tx_int(bp, txdata); + local_bh_enable(); + } + + rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb); + if (rx_idx != rx_start_idx + num_pkts) + goto test_loopback_exit; + + rx_comp_cons = le16_to_cpu(fp_rx->rx_comp_cons); + cqe = &fp_rx->rx_comp_ring[RCQ_BD(rx_comp_cons)]; + cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; + cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; + if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS)) + goto test_loopback_rx_exit; + + len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); + if (len != pkt_size) + goto test_loopback_rx_exit; + + rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)]; + dma_sync_single_for_cpu(&bp->pdev->dev, + dma_unmap_addr(rx_buf, mapping), + fp_rx->rx_buf_size, DMA_FROM_DEVICE); + skb = rx_buf->skb; + skb_reserve(skb, cqe->fast_path_cqe.placement_offset); + for (i = ETH_HLEN; i < pkt_size; i++) + if (*(skb->data + i) != (unsigned char) (i & 0xff)) + goto test_loopback_rx_exit; + + rc = 0; + +test_loopback_rx_exit: + + fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons); + fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod); + fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons); + fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod); + + /* Update producers */ + bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod, + fp_rx->rx_sge_prod); + +test_loopback_exit: + bp->link_params.loopback_mode = LOOPBACK_NONE; + + return rc; +} + +static int bnx2x_test_loopback(struct bnx2x *bp) +{ + int rc = 0, res; + + if (BP_NOMCP(bp)) + return rc; + + if (!netif_running(bp->dev)) + return BNX2X_LOOPBACK_FAILED; + + bnx2x_netif_stop(bp, 1); + bnx2x_acquire_phy_lock(bp); + + res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK); + if (res) { + DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res); + rc |= BNX2X_PHY_LOOPBACK_FAILED; + } + + res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK); + if (res) { + DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res); + rc |= BNX2X_MAC_LOOPBACK_FAILED; + } + + bnx2x_release_phy_lock(bp); + bnx2x_netif_start(bp); + + return rc; +} + +#define CRC32_RESIDUAL 0xdebb20e3 + +static int bnx2x_test_nvram(struct bnx2x *bp) +{ + static const struct { + int offset; + int size; + } nvram_tbl[] = { + { 0, 0x14 }, /* bootstrap */ + { 0x14, 0xec }, /* dir */ + { 0x100, 0x350 }, /* manuf_info */ + { 0x450, 0xf0 }, /* feature_info */ + { 0x640, 0x64 }, /* upgrade_key_info */ + { 0x708, 0x70 }, /* manuf_key_info */ + { 0, 0 } + }; + __be32 buf[0x350 / 4]; + u8 *data = (u8 *)buf; + int i, rc; + u32 magic, crc; + + if (BP_NOMCP(bp)) + return 0; + + rc = bnx2x_nvram_read(bp, 0, data, 4); + if (rc) { + DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc); + goto test_nvram_exit; + } + + magic = be32_to_cpu(buf[0]); + if (magic != 0x669955aa) { + DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic); + rc = -ENODEV; + goto test_nvram_exit; + } + + for (i = 0; nvram_tbl[i].size; i++) { + + rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data, + nvram_tbl[i].size); + if (rc) { + DP(NETIF_MSG_PROBE, + "nvram_tbl[%d] read data (rc %d)\n", i, rc); + goto test_nvram_exit; + } + + crc = ether_crc_le(nvram_tbl[i].size, data); + if (crc != CRC32_RESIDUAL) { + DP(NETIF_MSG_PROBE, + "nvram_tbl[%d] crc value (0x%08x)\n", i, crc); + rc = -ENODEV; + goto test_nvram_exit; + } + } + +test_nvram_exit: + return rc; +} + +/* Send an EMPTY ramrod on the first queue */ +static int bnx2x_test_intr(struct bnx2x *bp) +{ + struct bnx2x_queue_state_params params = {0}; + + if (!netif_running(bp->dev)) + return -ENODEV; + + params.q_obj = &bp->fp->q_obj; + params.cmd = BNX2X_Q_CMD_EMPTY; + + __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); + + return bnx2x_queue_state_change(bp, ¶ms); +} + +static void bnx2x_self_test(struct net_device *dev, + struct ethtool_test *etest, u64 *buf) +{ + struct bnx2x *bp = netdev_priv(dev); + u8 is_serdes; + if (bp->recovery_state != BNX2X_RECOVERY_DONE) { + printk(KERN_ERR "Handling parity error recovery. Try again later\n"); + etest->flags |= ETH_TEST_FL_FAILED; + return; + } + + memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS); + + if (!netif_running(dev)) + return; + + /* offline tests are not supported in MF mode */ + if (IS_MF(bp)) + etest->flags &= ~ETH_TEST_FL_OFFLINE; + is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0; + + if (etest->flags & ETH_TEST_FL_OFFLINE) { + int port = BP_PORT(bp); + u32 val; + u8 link_up; + + /* save current value of input enable for TX port IF */ + val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4); + /* disable input for TX port IF */ + REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0); + + link_up = bp->link_vars.link_up; + + bnx2x_nic_unload(bp, UNLOAD_NORMAL); + bnx2x_nic_load(bp, LOAD_DIAG); + /* wait until link state is restored */ + bnx2x_wait_for_link(bp, 1, is_serdes); + + if (bnx2x_test_registers(bp) != 0) { + buf[0] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + if (bnx2x_test_memory(bp) != 0) { + buf[1] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + + buf[2] = bnx2x_test_loopback(bp); + if (buf[2] != 0) + etest->flags |= ETH_TEST_FL_FAILED; + + bnx2x_nic_unload(bp, UNLOAD_NORMAL); + + /* restore input for TX port IF */ + REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val); + + bnx2x_nic_load(bp, LOAD_NORMAL); + /* wait until link state is restored */ + bnx2x_wait_for_link(bp, link_up, is_serdes); + } + if (bnx2x_test_nvram(bp) != 0) { + buf[3] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + if (bnx2x_test_intr(bp) != 0) { + buf[4] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + + if (bnx2x_link_test(bp, is_serdes) != 0) { + buf[5] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + +#ifdef BNX2X_EXTRA_DEBUG + bnx2x_panic_dump(bp); +#endif +} + +#define IS_PORT_STAT(i) \ + ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT) +#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC) +#define IS_MF_MODE_STAT(bp) \ + (IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) + +/* ethtool statistics are displayed for all regular ethernet queues and the + * fcoe L2 queue if not disabled + */ +static inline int bnx2x_num_stat_queues(struct bnx2x *bp) +{ + return BNX2X_NUM_ETH_QUEUES(bp); +} + +static int bnx2x_get_sset_count(struct net_device *dev, int stringset) +{ + struct bnx2x *bp = netdev_priv(dev); + int i, num_stats; + + switch (stringset) { + case ETH_SS_STATS: + if (is_multi(bp)) { + num_stats = bnx2x_num_stat_queues(bp) * + BNX2X_NUM_Q_STATS; + if (!IS_MF_MODE_STAT(bp)) + num_stats += BNX2X_NUM_STATS; + } else { + if (IS_MF_MODE_STAT(bp)) { + num_stats = 0; + for (i = 0; i < BNX2X_NUM_STATS; i++) + if (IS_FUNC_STAT(i)) + num_stats++; + } else + num_stats = BNX2X_NUM_STATS; + } + return num_stats; + + case ETH_SS_TEST: + return BNX2X_NUM_TESTS; + + default: + return -EINVAL; + } +} + +static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) +{ + struct bnx2x *bp = netdev_priv(dev); + int i, j, k; + char queue_name[MAX_QUEUE_NAME_LEN+1]; + + switch (stringset) { + case ETH_SS_STATS: + if (is_multi(bp)) { + k = 0; + for_each_eth_queue(bp, i) { + memset(queue_name, 0, sizeof(queue_name)); + sprintf(queue_name, "%d", i); + for (j = 0; j < BNX2X_NUM_Q_STATS; j++) + snprintf(buf + (k + j)*ETH_GSTRING_LEN, + ETH_GSTRING_LEN, + bnx2x_q_stats_arr[j].string, + queue_name); + k += BNX2X_NUM_Q_STATS; + } + if (IS_MF_MODE_STAT(bp)) + break; + for (j = 0; j < BNX2X_NUM_STATS; j++) + strcpy(buf + (k + j)*ETH_GSTRING_LEN, + bnx2x_stats_arr[j].string); + } else { + for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { + if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i)) + continue; + strcpy(buf + j*ETH_GSTRING_LEN, + bnx2x_stats_arr[i].string); + j++; + } + } + break; + + case ETH_SS_TEST: + memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr)); + break; + } +} + +static void bnx2x_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *buf) +{ + struct bnx2x *bp = netdev_priv(dev); + u32 *hw_stats, *offset; + int i, j, k; + + if (is_multi(bp)) { + k = 0; + for_each_eth_queue(bp, i) { + hw_stats = (u32 *)&bp->fp[i].eth_q_stats; + for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { + if (bnx2x_q_stats_arr[j].size == 0) { + /* skip this counter */ + buf[k + j] = 0; + continue; + } + offset = (hw_stats + + bnx2x_q_stats_arr[j].offset); + if (bnx2x_q_stats_arr[j].size == 4) { + /* 4-byte counter */ + buf[k + j] = (u64) *offset; + continue; + } + /* 8-byte counter */ + buf[k + j] = HILO_U64(*offset, *(offset + 1)); + } + k += BNX2X_NUM_Q_STATS; + } + if (IS_MF_MODE_STAT(bp)) + return; + hw_stats = (u32 *)&bp->eth_stats; + for (j = 0; j < BNX2X_NUM_STATS; j++) { + if (bnx2x_stats_arr[j].size == 0) { + /* skip this counter */ + buf[k + j] = 0; + continue; + } + offset = (hw_stats + bnx2x_stats_arr[j].offset); + if (bnx2x_stats_arr[j].size == 4) { + /* 4-byte counter */ + buf[k + j] = (u64) *offset; + continue; + } + /* 8-byte counter */ + buf[k + j] = HILO_U64(*offset, *(offset + 1)); + } + } else { + hw_stats = (u32 *)&bp->eth_stats; + for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { + if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i)) + continue; + if (bnx2x_stats_arr[i].size == 0) { + /* skip this counter */ + buf[j] = 0; + j++; + continue; + } + offset = (hw_stats + bnx2x_stats_arr[i].offset); + if (bnx2x_stats_arr[i].size == 4) { + /* 4-byte counter */ + buf[j] = (u64) *offset; + j++; + continue; + } + /* 8-byte counter */ + buf[j] = HILO_U64(*offset, *(offset + 1)); + j++; + } + } +} + +static int bnx2x_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (!netif_running(dev)) + return -EAGAIN; + + if (!bp->port.pmf) + return -EOPNOTSUPP; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 1; /* cycle on/off once per second */ + + case ETHTOOL_ID_ON: + bnx2x_set_led(&bp->link_params, &bp->link_vars, + LED_MODE_ON, SPEED_1000); + break; + + case ETHTOOL_ID_OFF: + bnx2x_set_led(&bp->link_params, &bp->link_vars, + LED_MODE_FRONT_PANEL_OFF, 0); + + break; + + case ETHTOOL_ID_INACTIVE: + bnx2x_set_led(&bp->link_params, &bp->link_vars, + LED_MODE_OPER, + bp->link_vars.line_speed); + } + + return 0; +} + +static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, + void *rules __always_unused) +{ + struct bnx2x *bp = netdev_priv(dev); + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = BNX2X_NUM_ETH_QUEUES(bp); + return 0; + + default: + return -EOPNOTSUPP; + } +} + +static int bnx2x_get_rxfh_indir(struct net_device *dev, + struct ethtool_rxfh_indir *indir) +{ + struct bnx2x *bp = netdev_priv(dev); + size_t copy_size = + min_t(size_t, indir->size, T_ETH_INDIRECTION_TABLE_SIZE); + u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; + size_t i; + + if (bp->multi_mode == ETH_RSS_MODE_DISABLED) + return -EOPNOTSUPP; + + /* Get the current configuration of the RSS indirection table */ + bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table); + + /* + * We can't use a memcpy() as an internal storage of an + * indirection table is a u8 array while indir->ring_index + * points to an array of u32. + * + * Indirection table contains the FW Client IDs, so we need to + * align the returned table to the Client ID of the leading RSS + * queue. + */ + for (i = 0; i < copy_size; i++) + indir->ring_index[i] = ind_table[i] - bp->fp->cl_id; + + indir->size = T_ETH_INDIRECTION_TABLE_SIZE; + + return 0; +} + +static int bnx2x_set_rxfh_indir(struct net_device *dev, + const struct ethtool_rxfh_indir *indir) +{ + struct bnx2x *bp = netdev_priv(dev); + size_t i; + u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; + u32 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); + + if (bp->multi_mode == ETH_RSS_MODE_DISABLED) + return -EOPNOTSUPP; + + /* validate the size */ + if (indir->size != T_ETH_INDIRECTION_TABLE_SIZE) + return -EINVAL; + + for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { + /* validate the indices */ + if (indir->ring_index[i] >= num_eth_queues) + return -EINVAL; + /* + * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy() + * as an internal storage of an indirection table is a u8 array + * while indir->ring_index points to an array of u32. + * + * Indirection table contains the FW Client IDs, so we need to + * align the received table to the Client ID of the leading RSS + * queue + */ + ind_table[i] = indir->ring_index[i] + bp->fp->cl_id; + } + + return bnx2x_config_rss_pf(bp, ind_table, false); +} + +static const struct ethtool_ops bnx2x_ethtool_ops = { + .get_settings = bnx2x_get_settings, + .set_settings = bnx2x_set_settings, + .get_drvinfo = bnx2x_get_drvinfo, + .get_regs_len = bnx2x_get_regs_len, + .get_regs = bnx2x_get_regs, + .get_wol = bnx2x_get_wol, + .set_wol = bnx2x_set_wol, + .get_msglevel = bnx2x_get_msglevel, + .set_msglevel = bnx2x_set_msglevel, + .nway_reset = bnx2x_nway_reset, + .get_link = bnx2x_get_link, + .get_eeprom_len = bnx2x_get_eeprom_len, + .get_eeprom = bnx2x_get_eeprom, + .set_eeprom = bnx2x_set_eeprom, + .get_coalesce = bnx2x_get_coalesce, + .set_coalesce = bnx2x_set_coalesce, + .get_ringparam = bnx2x_get_ringparam, + .set_ringparam = bnx2x_set_ringparam, + .get_pauseparam = bnx2x_get_pauseparam, + .set_pauseparam = bnx2x_set_pauseparam, + .self_test = bnx2x_self_test, + .get_sset_count = bnx2x_get_sset_count, + .get_strings = bnx2x_get_strings, + .set_phys_id = bnx2x_set_phys_id, + .get_ethtool_stats = bnx2x_get_ethtool_stats, + .get_rxnfc = bnx2x_get_rxnfc, + .get_rxfh_indir = bnx2x_get_rxfh_indir, + .set_rxfh_indir = bnx2x_set_rxfh_indir, +}; + +void bnx2x_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops); +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h new file mode 100644 index 0000000..998652a --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h @@ -0,0 +1,410 @@ +/* bnx2x_fw_defs.h: Broadcom Everest network driver. + * + * Copyright (c) 2007-2011 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNX2X_FW_DEFS_H +#define BNX2X_FW_DEFS_H + +#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[148].base) +#define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ + (IRO[147].base + ((assertListEntry) * IRO[147].m1)) +#define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \ + (IRO[153].base + (((pfId)>>1) * IRO[153].m1) + (((pfId)&1) * \ + IRO[153].m2)) +#define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \ + (IRO[154].base + (((pfId)>>1) * IRO[154].m1) + (((pfId)&1) * \ + IRO[154].m2)) +#define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \ + (IRO[159].base + ((funcId) * IRO[159].m1)) +#define CSTORM_FUNC_EN_OFFSET(funcId) \ + (IRO[149].base + ((funcId) * IRO[149].m1)) +#define CSTORM_IGU_MODE_OFFSET (IRO[157].base) +#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ + (IRO[315].base + ((pfId) * IRO[315].m1)) +#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ + (IRO[316].base + ((pfId) * IRO[316].m1)) +#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \ + (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2)) +#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \ + (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2)) +#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \ + (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2)) +#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \ + (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2)) +#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \ + (IRO[307].base + ((pfId) * IRO[307].m1) + ((iscsiEqId) * IRO[307].m2)) +#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \ + (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2)) +#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \ + (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2)) +#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ + (IRO[314].base + ((pfId) * IRO[314].m1)) +#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ + (IRO[306].base + ((pfId) * IRO[306].m1)) +#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ + (IRO[305].base + ((pfId) * IRO[305].m1)) +#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ + (IRO[304].base + ((pfId) * IRO[304].m1)) +#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ + (IRO[151].base + ((funcId) * IRO[151].m1)) +#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \ + (IRO[142].base + ((pfId) * IRO[142].m1)) +#define CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(pfId) \ + (IRO[143].base + ((pfId) * IRO[143].m1)) +#define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \ + (IRO[141].base + ((pfId) * IRO[141].m1)) +#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[141].size) +#define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \ + (IRO[144].base + ((pfId) * IRO[144].m1)) +#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[144].size) +#define CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(sbId, hcIndex) \ + (IRO[136].base + ((sbId) * IRO[136].m1) + ((hcIndex) * IRO[136].m2)) +#define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \ + (IRO[133].base + ((sbId) * IRO[133].m1)) +#define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \ + (IRO[134].base + ((sbId) * IRO[134].m1)) +#define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \ + (IRO[135].base + ((sbId) * IRO[135].m1) + ((hcIndex) * IRO[135].m2)) +#define CSTORM_STATUS_BLOCK_OFFSET(sbId) \ + (IRO[132].base + ((sbId) * IRO[132].m1)) +#define CSTORM_STATUS_BLOCK_SIZE (IRO[132].size) +#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \ + (IRO[137].base + ((sbId) * IRO[137].m1)) +#define CSTORM_SYNC_BLOCK_SIZE (IRO[137].size) +#define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \ + (IRO[155].base + ((vfId) * IRO[155].m1)) +#define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \ + (IRO[156].base + ((vfId) * IRO[156].m1)) +#define CSTORM_VF_TO_PF_OFFSET(funcId) \ + (IRO[150].base + ((funcId) * IRO[150].m1)) +#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base) +#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \ + (IRO[203].base + ((pfId) * IRO[203].m1)) +#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base) +#define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ + (IRO[101].base + ((assertListEntry) * IRO[101].m1)) +#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[107].base) +#define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \ + (IRO[108].base) +#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \ + (IRO[201].base + ((pfId) * IRO[201].m1)) +#define TSTORM_FUNC_EN_OFFSET(funcId) \ + (IRO[103].base + ((funcId) * IRO[103].m1)) +#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ + (IRO[271].base + ((pfId) * IRO[271].m1)) +#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \ + (IRO[272].base + ((pfId) * IRO[272].m1)) +#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \ + (IRO[273].base + ((pfId) * IRO[273].m1)) +#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \ + (IRO[274].base + ((pfId) * IRO[274].m1)) +#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ + (IRO[270].base + ((pfId) * IRO[270].m1)) +#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ + (IRO[269].base + ((pfId) * IRO[269].m1)) +#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ + (IRO[268].base + ((pfId) * IRO[268].m1)) +#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ + (IRO[267].base + ((pfId) * IRO[267].m1)) +#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \ + (IRO[276].base + ((pfId) * IRO[276].m1)) +#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ + (IRO[263].base + ((pfId) * IRO[263].m1)) +#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ + (IRO[264].base + ((pfId) * IRO[264].m1)) +#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \ + (IRO[265].base + ((pfId) * IRO[265].m1)) +#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ + (IRO[266].base + ((pfId) * IRO[266].m1)) +#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \ + (IRO[202].base + ((pfId) * IRO[202].m1)) +#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ + (IRO[105].base + ((funcId) * IRO[105].m1)) +#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \ + (IRO[216].base + ((pfId) * IRO[216].m1)) +#define TSTORM_VF_TO_PF_OFFSET(funcId) \ + (IRO[104].base + ((funcId) * IRO[104].m1)) +#define USTORM_AGG_DATA_OFFSET (IRO[206].base) +#define USTORM_AGG_DATA_SIZE (IRO[206].size) +#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base) +#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \ + (IRO[176].base + ((assertListEntry) * IRO[176].m1)) +#define USTORM_CQE_PAGE_NEXT_OFFSET(portId, clientId) \ + (IRO[205].base + ((portId) * IRO[205].m1) + ((clientId) * \ + IRO[205].m2)) +#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \ + (IRO[183].base + ((portId) * IRO[183].m1)) +#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \ + (IRO[317].base + ((pfId) * IRO[317].m1)) +#define USTORM_FUNC_EN_OFFSET(funcId) \ + (IRO[178].base + ((funcId) * IRO[178].m1)) +#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ + (IRO[281].base + ((pfId) * IRO[281].m1)) +#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ + (IRO[282].base + ((pfId) * IRO[282].m1)) +#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ + (IRO[286].base + ((pfId) * IRO[286].m1)) +#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \ + (IRO[283].base + ((pfId) * IRO[283].m1)) +#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ + (IRO[279].base + ((pfId) * IRO[279].m1)) +#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ + (IRO[278].base + ((pfId) * IRO[278].m1)) +#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ + (IRO[277].base + ((pfId) * IRO[277].m1)) +#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ + (IRO[280].base + ((pfId) * IRO[280].m1)) +#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \ + (IRO[284].base + ((pfId) * IRO[284].m1)) +#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ + (IRO[285].base + ((pfId) * IRO[285].m1)) +#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \ + (IRO[182].base + ((pfId) * IRO[182].m1)) +#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ + (IRO[180].base + ((funcId) * IRO[180].m1)) +#define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \ + (IRO[209].base + ((portId) * IRO[209].m1) + ((clientId) * \ + IRO[209].m2)) +#define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \ + (IRO[210].base + ((qzoneId) * IRO[210].m1)) +#define USTORM_TPA_BTR_OFFSET (IRO[207].base) +#define USTORM_TPA_BTR_SIZE (IRO[207].size) +#define USTORM_VF_TO_PF_OFFSET(funcId) \ + (IRO[179].base + ((funcId) * IRO[179].m1)) +#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base) +#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[66].base) +#define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[51].base) +#define XSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ + (IRO[50].base + ((assertListEntry) * IRO[50].m1)) +#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(portId) \ + (IRO[43].base + ((portId) * IRO[43].m1)) +#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(pfId) \ + (IRO[45].base + ((pfId) * IRO[45].m1)) +#define XSTORM_FUNC_EN_OFFSET(funcId) \ + (IRO[47].base + ((funcId) * IRO[47].m1)) +#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ + (IRO[294].base + ((pfId) * IRO[294].m1)) +#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \ + (IRO[297].base + ((pfId) * IRO[297].m1)) +#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \ + (IRO[298].base + ((pfId) * IRO[298].m1)) +#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \ + (IRO[299].base + ((pfId) * IRO[299].m1)) +#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \ + (IRO[300].base + ((pfId) * IRO[300].m1)) +#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \ + (IRO[301].base + ((pfId) * IRO[301].m1)) +#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \ + (IRO[302].base + ((pfId) * IRO[302].m1)) +#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \ + (IRO[303].base + ((pfId) * IRO[303].m1)) +#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ + (IRO[293].base + ((pfId) * IRO[293].m1)) +#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ + (IRO[292].base + ((pfId) * IRO[292].m1)) +#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ + (IRO[291].base + ((pfId) * IRO[291].m1)) +#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ + (IRO[296].base + ((pfId) * IRO[296].m1)) +#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \ + (IRO[295].base + ((pfId) * IRO[295].m1)) +#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \ + (IRO[290].base + ((pfId) * IRO[290].m1)) +#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ + (IRO[289].base + ((pfId) * IRO[289].m1)) +#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \ + (IRO[288].base + ((pfId) * IRO[288].m1)) +#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \ + (IRO[287].base + ((pfId) * IRO[287].m1)) +#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \ + (IRO[44].base + ((pfId) * IRO[44].m1)) +#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ + (IRO[49].base + ((funcId) * IRO[49].m1)) +#define XSTORM_SPQ_DATA_OFFSET(funcId) \ + (IRO[32].base + ((funcId) * IRO[32].m1)) +#define XSTORM_SPQ_DATA_SIZE (IRO[32].size) +#define XSTORM_SPQ_PAGE_BASE_OFFSET(funcId) \ + (IRO[30].base + ((funcId) * IRO[30].m1)) +#define XSTORM_SPQ_PROD_OFFSET(funcId) \ + (IRO[31].base + ((funcId) * IRO[31].m1)) +#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \ + (IRO[211].base + ((portId) * IRO[211].m1)) +#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \ + (IRO[212].base + ((portId) * IRO[212].m1)) +#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \ + (IRO[214].base + (((pfId)>>1) * IRO[214].m1) + (((pfId)&1) * \ + IRO[214].m2)) +#define XSTORM_VF_TO_PF_OFFSET(funcId) \ + (IRO[48].base + ((funcId) * IRO[48].m1)) +#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 + +/** +* This file defines HSI constants for the ETH flow +*/ +#ifdef _EVEREST_MICROCODE +#include "Microcode\Generated\DataTypes\eth_rx_bd.h" +#include "Microcode\Generated\DataTypes\eth_tx_bd.h" +#include "Microcode\Generated\DataTypes\eth_rx_cqe.h" +#include "Microcode\Generated\DataTypes\eth_rx_sge.h" +#include "Microcode\Generated\DataTypes\eth_rx_cqe_next_page.h" +#endif + + +/* Ethernet Ring parameters */ +#define X_ETH_LOCAL_RING_SIZE 13 +#define FIRST_BD_IN_PKT 0 +#define PARSE_BD_INDEX 1 +#define NUM_OF_ETH_BDS_IN_PAGE ((PAGE_SIZE)/(STRUCT_SIZE(eth_tx_bd)/8)) +#define U_ETH_NUM_OF_SGES_TO_FETCH 8 +#define U_ETH_MAX_SGES_FOR_PACKET 3 + +/* Rx ring params */ +#define U_ETH_LOCAL_BD_RING_SIZE 8 +#define U_ETH_LOCAL_SGE_RING_SIZE 10 +#define U_ETH_SGL_SIZE 8 + /* The fw will padd the buffer with this value, so the IP header \ + will be align to 4 Byte */ +#define IP_HEADER_ALIGNMENT_PADDING 2 + +#define U_ETH_SGES_PER_PAGE_INVERSE_MASK \ + (0xFFFF - ((PAGE_SIZE/((STRUCT_SIZE(eth_rx_sge))/8))-1)) + +#define TU_ETH_CQES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_cqe)/8)) +#define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8)) +#define U_ETH_SGES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8)) + +#define U_ETH_BDS_PER_PAGE_MASK (U_ETH_BDS_PER_PAGE-1) +#define U_ETH_CQE_PER_PAGE_MASK (TU_ETH_CQES_PER_PAGE-1) +#define U_ETH_SGES_PER_PAGE_MASK (U_ETH_SGES_PER_PAGE-1) + +#define U_ETH_UNDEFINED_Q 0xFF + +#define T_ETH_INDIRECTION_TABLE_SIZE 128 +#define T_ETH_RSS_KEY 10 +#define ETH_NUM_OF_RSS_ENGINES_E2 72 + +#define FILTER_RULES_COUNT 16 +#define MULTICAST_RULES_COUNT 16 +#define CLASSIFY_RULES_COUNT 16 + +/*The CRC32 seed, that is used for the hash(reduction) multicast address */ +#define ETH_CRC32_HASH_SEED 0x00000000 + +#define ETH_CRC32_HASH_BIT_SIZE (8) +#define ETH_CRC32_HASH_MASK EVAL((1< + * Written by: Vladislav Zolotarov + * Based on the original idea of John Wright . + */ + +#ifndef BNX2X_INIT_FILE_HDR_H +#define BNX2X_INIT_FILE_HDR_H + +struct bnx2x_fw_file_section { + __be32 len; + __be32 offset; +}; + +struct bnx2x_fw_file_hdr { + struct bnx2x_fw_file_section init_ops; + struct bnx2x_fw_file_section init_ops_offsets; + struct bnx2x_fw_file_section init_data; + struct bnx2x_fw_file_section tsem_int_table_data; + struct bnx2x_fw_file_section tsem_pram_data; + struct bnx2x_fw_file_section usem_int_table_data; + struct bnx2x_fw_file_section usem_pram_data; + struct bnx2x_fw_file_section csem_int_table_data; + struct bnx2x_fw_file_section csem_pram_data; + struct bnx2x_fw_file_section xsem_int_table_data; + struct bnx2x_fw_file_section xsem_pram_data; + struct bnx2x_fw_file_section iro_arr; + struct bnx2x_fw_file_section fw_version; +}; + +#endif /* BNX2X_INIT_FILE_HDR_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h new file mode 100644 index 0000000..dc24de4 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -0,0 +1,5131 @@ +/* bnx2x_hsi.h: Broadcom Everest network driver. + * + * Copyright (c) 2007-2011 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ +#ifndef BNX2X_HSI_H +#define BNX2X_HSI_H + +#include "bnx2x_fw_defs.h" + +#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e + +struct license_key { + u32 reserved[6]; + + u32 max_iscsi_conn; +#define BNX2X_MAX_ISCSI_TRGT_CONN_MASK 0xFFFF +#define BNX2X_MAX_ISCSI_TRGT_CONN_SHIFT 0 +#define BNX2X_MAX_ISCSI_INIT_CONN_MASK 0xFFFF0000 +#define BNX2X_MAX_ISCSI_INIT_CONN_SHIFT 16 + + u32 reserved_a; + + u32 max_fcoe_conn; +#define BNX2X_MAX_FCOE_TRGT_CONN_MASK 0xFFFF +#define BNX2X_MAX_FCOE_TRGT_CONN_SHIFT 0 +#define BNX2X_MAX_FCOE_INIT_CONN_MASK 0xFFFF0000 +#define BNX2X_MAX_FCOE_INIT_CONN_SHIFT 16 + + u32 reserved_b[4]; +}; + + +#define PORT_0 0 +#define PORT_1 1 +#define PORT_MAX 2 + +/**************************************************************************** + * Shared HW configuration * + ****************************************************************************/ +#define PIN_CFG_NA 0x00000000 +#define PIN_CFG_GPIO0_P0 0x00000001 +#define PIN_CFG_GPIO1_P0 0x00000002 +#define PIN_CFG_GPIO2_P0 0x00000003 +#define PIN_CFG_GPIO3_P0 0x00000004 +#define PIN_CFG_GPIO0_P1 0x00000005 +#define PIN_CFG_GPIO1_P1 0x00000006 +#define PIN_CFG_GPIO2_P1 0x00000007 +#define PIN_CFG_GPIO3_P1 0x00000008 +#define PIN_CFG_EPIO0 0x00000009 +#define PIN_CFG_EPIO1 0x0000000a +#define PIN_CFG_EPIO2 0x0000000b +#define PIN_CFG_EPIO3 0x0000000c +#define PIN_CFG_EPIO4 0x0000000d +#define PIN_CFG_EPIO5 0x0000000e +#define PIN_CFG_EPIO6 0x0000000f +#define PIN_CFG_EPIO7 0x00000010 +#define PIN_CFG_EPIO8 0x00000011 +#define PIN_CFG_EPIO9 0x00000012 +#define PIN_CFG_EPIO10 0x00000013 +#define PIN_CFG_EPIO11 0x00000014 +#define PIN_CFG_EPIO12 0x00000015 +#define PIN_CFG_EPIO13 0x00000016 +#define PIN_CFG_EPIO14 0x00000017 +#define PIN_CFG_EPIO15 0x00000018 +#define PIN_CFG_EPIO16 0x00000019 +#define PIN_CFG_EPIO17 0x0000001a +#define PIN_CFG_EPIO18 0x0000001b +#define PIN_CFG_EPIO19 0x0000001c +#define PIN_CFG_EPIO20 0x0000001d +#define PIN_CFG_EPIO21 0x0000001e +#define PIN_CFG_EPIO22 0x0000001f +#define PIN_CFG_EPIO23 0x00000020 +#define PIN_CFG_EPIO24 0x00000021 +#define PIN_CFG_EPIO25 0x00000022 +#define PIN_CFG_EPIO26 0x00000023 +#define PIN_CFG_EPIO27 0x00000024 +#define PIN_CFG_EPIO28 0x00000025 +#define PIN_CFG_EPIO29 0x00000026 +#define PIN_CFG_EPIO30 0x00000027 +#define PIN_CFG_EPIO31 0x00000028 + +/* EPIO definition */ +#define EPIO_CFG_NA 0x00000000 +#define EPIO_CFG_EPIO0 0x00000001 +#define EPIO_CFG_EPIO1 0x00000002 +#define EPIO_CFG_EPIO2 0x00000003 +#define EPIO_CFG_EPIO3 0x00000004 +#define EPIO_CFG_EPIO4 0x00000005 +#define EPIO_CFG_EPIO5 0x00000006 +#define EPIO_CFG_EPIO6 0x00000007 +#define EPIO_CFG_EPIO7 0x00000008 +#define EPIO_CFG_EPIO8 0x00000009 +#define EPIO_CFG_EPIO9 0x0000000a +#define EPIO_CFG_EPIO10 0x0000000b +#define EPIO_CFG_EPIO11 0x0000000c +#define EPIO_CFG_EPIO12 0x0000000d +#define EPIO_CFG_EPIO13 0x0000000e +#define EPIO_CFG_EPIO14 0x0000000f +#define EPIO_CFG_EPIO15 0x00000010 +#define EPIO_CFG_EPIO16 0x00000011 +#define EPIO_CFG_EPIO17 0x00000012 +#define EPIO_CFG_EPIO18 0x00000013 +#define EPIO_CFG_EPIO19 0x00000014 +#define EPIO_CFG_EPIO20 0x00000015 +#define EPIO_CFG_EPIO21 0x00000016 +#define EPIO_CFG_EPIO22 0x00000017 +#define EPIO_CFG_EPIO23 0x00000018 +#define EPIO_CFG_EPIO24 0x00000019 +#define EPIO_CFG_EPIO25 0x0000001a +#define EPIO_CFG_EPIO26 0x0000001b +#define EPIO_CFG_EPIO27 0x0000001c +#define EPIO_CFG_EPIO28 0x0000001d +#define EPIO_CFG_EPIO29 0x0000001e +#define EPIO_CFG_EPIO30 0x0000001f +#define EPIO_CFG_EPIO31 0x00000020 + + +struct shared_hw_cfg { /* NVRAM Offset */ + /* Up to 16 bytes of NULL-terminated string */ + u8 part_num[16]; /* 0x104 */ + + u32 config; /* 0x114 */ + #define SHARED_HW_CFG_MDIO_VOLTAGE_MASK 0x00000001 + #define SHARED_HW_CFG_MDIO_VOLTAGE_SHIFT 0 + #define SHARED_HW_CFG_MDIO_VOLTAGE_1_2V 0x00000000 + #define SHARED_HW_CFG_MDIO_VOLTAGE_2_5V 0x00000001 + #define SHARED_HW_CFG_MCP_RST_ON_CORE_RST_EN 0x00000002 + + #define SHARED_HW_CFG_PORT_SWAP 0x00000004 + + #define SHARED_HW_CFG_BEACON_WOL_EN 0x00000008 + + #define SHARED_HW_CFG_PCIE_GEN3_DISABLED 0x00000000 + #define SHARED_HW_CFG_PCIE_GEN3_ENABLED 0x00000010 + + #define SHARED_HW_CFG_MFW_SELECT_MASK 0x00000700 + #define SHARED_HW_CFG_MFW_SELECT_SHIFT 8 + /* Whatever MFW found in NVM + (if multiple found, priority order is: NC-SI, UMP, IPMI) */ + #define SHARED_HW_CFG_MFW_SELECT_DEFAULT 0x00000000 + #define SHARED_HW_CFG_MFW_SELECT_NC_SI 0x00000100 + #define SHARED_HW_CFG_MFW_SELECT_UMP 0x00000200 + #define SHARED_HW_CFG_MFW_SELECT_IPMI 0x00000300 + /* Use SPIO4 as an arbiter between: 0-NC_SI, 1-IPMI + (can only be used when an add-in board, not BMC, pulls-down SPIO4) */ + #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_IPMI 0x00000400 + /* Use SPIO4 as an arbiter between: 0-UMP, 1-IPMI + (can only be used when an add-in board, not BMC, pulls-down SPIO4) */ + #define SHARED_HW_CFG_MFW_SELECT_SPIO4_UMP_IPMI 0x00000500 + /* Use SPIO4 as an arbiter between: 0-NC-SI, 1-UMP + (can only be used when an add-in board, not BMC, pulls-down SPIO4) */ + #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_UMP 0x00000600 + + #define SHARED_HW_CFG_LED_MODE_MASK 0x000f0000 + #define SHARED_HW_CFG_LED_MODE_SHIFT 16 + #define SHARED_HW_CFG_LED_MAC1 0x00000000 + #define SHARED_HW_CFG_LED_PHY1 0x00010000 + #define SHARED_HW_CFG_LED_PHY2 0x00020000 + #define SHARED_HW_CFG_LED_PHY3 0x00030000 + #define SHARED_HW_CFG_LED_MAC2 0x00040000 + #define SHARED_HW_CFG_LED_PHY4 0x00050000 + #define SHARED_HW_CFG_LED_PHY5 0x00060000 + #define SHARED_HW_CFG_LED_PHY6 0x00070000 + #define SHARED_HW_CFG_LED_MAC3 0x00080000 + #define SHARED_HW_CFG_LED_PHY7 0x00090000 + #define SHARED_HW_CFG_LED_PHY9 0x000a0000 + #define SHARED_HW_CFG_LED_PHY11 0x000b0000 + #define SHARED_HW_CFG_LED_MAC4 0x000c0000 + #define SHARED_HW_CFG_LED_PHY8 0x000d0000 + #define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000 + + + #define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000 + #define SHARED_HW_CFG_AN_ENABLE_SHIFT 24 + #define SHARED_HW_CFG_AN_ENABLE_CL37 0x01000000 + #define SHARED_HW_CFG_AN_ENABLE_CL73 0x02000000 + #define SHARED_HW_CFG_AN_ENABLE_BAM 0x04000000 + #define SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION 0x08000000 + #define SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT 0x10000000 + #define SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY 0x20000000 + + #define SHARED_HW_CFG_SRIOV_MASK 0x40000000 + #define SHARED_HW_CFG_SRIOV_DISABLED 0x00000000 + #define SHARED_HW_CFG_SRIOV_ENABLED 0x40000000 + + #define SHARED_HW_CFG_ATC_MASK 0x80000000 + #define SHARED_HW_CFG_ATC_DISABLED 0x00000000 + #define SHARED_HW_CFG_ATC_ENABLED 0x80000000 + + u32 config2; /* 0x118 */ + /* one time auto detect grace period (in sec) */ + #define SHARED_HW_CFG_GRACE_PERIOD_MASK 0x000000ff + #define SHARED_HW_CFG_GRACE_PERIOD_SHIFT 0 + + #define SHARED_HW_CFG_PCIE_GEN2_ENABLED 0x00000100 + #define SHARED_HW_CFG_PCIE_GEN2_DISABLED 0x00000000 + + /* The default value for the core clock is 250MHz and it is + achieved by setting the clock change to 4 */ + #define SHARED_HW_CFG_CLOCK_CHANGE_MASK 0x00000e00 + #define SHARED_HW_CFG_CLOCK_CHANGE_SHIFT 9 + + #define SHARED_HW_CFG_SMBUS_TIMING_MASK 0x00001000 + #define SHARED_HW_CFG_SMBUS_TIMING_100KHZ 0x00000000 + #define SHARED_HW_CFG_SMBUS_TIMING_400KHZ 0x00001000 + + #define SHARED_HW_CFG_HIDE_PORT1 0x00002000 + + #define SHARED_HW_CFG_WOL_CAPABLE_MASK 0x00004000 + #define SHARED_HW_CFG_WOL_CAPABLE_DISABLED 0x00000000 + #define SHARED_HW_CFG_WOL_CAPABLE_ENABLED 0x00004000 + + /* Output low when PERST is asserted */ + #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_MASK 0x00008000 + #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_DISABLED 0x00000000 + #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_ENABLED 0x00008000 + + #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_MASK 0x00070000 + #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_SHIFT 16 + #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_HW 0x00000000 + #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_0DB 0x00010000 + #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_3_5DB 0x00020000 + #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_6_0DB 0x00030000 + + /* The fan failure mechanism is usually related to the PHY type + since the power consumption of the board is determined by the PHY. + Currently, fan is required for most designs with SFX7101, BCM8727 + and BCM8481. If a fan is not required for a board which uses one + of those PHYs, this field should be set to "Disabled". If a fan is + required for a different PHY type, this option should be set to + "Enabled". The fan failure indication is expected on SPIO5 */ + #define SHARED_HW_CFG_FAN_FAILURE_MASK 0x00180000 + #define SHARED_HW_CFG_FAN_FAILURE_SHIFT 19 + #define SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE 0x00000000 + #define SHARED_HW_CFG_FAN_FAILURE_DISABLED 0x00080000 + #define SHARED_HW_CFG_FAN_FAILURE_ENABLED 0x00100000 + + /* ASPM Power Management support */ + #define SHARED_HW_CFG_ASPM_SUPPORT_MASK 0x00600000 + #define SHARED_HW_CFG_ASPM_SUPPORT_SHIFT 21 + #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_ENABLED 0x00000000 + #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_DISABLED 0x00200000 + #define SHARED_HW_CFG_ASPM_SUPPORT_L1_DISABLED 0x00400000 + #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_DISABLED 0x00600000 + + /* The value of PM_TL_IGNORE_REQS (bit0) in PCI register + tl_control_0 (register 0x2800) */ + #define SHARED_HW_CFG_PREVENT_L1_ENTRY_MASK 0x00800000 + #define SHARED_HW_CFG_PREVENT_L1_ENTRY_DISABLED 0x00000000 + #define SHARED_HW_CFG_PREVENT_L1_ENTRY_ENABLED 0x00800000 + + #define SHARED_HW_CFG_PORT_MODE_MASK 0x01000000 + #define SHARED_HW_CFG_PORT_MODE_2 0x00000000 + #define SHARED_HW_CFG_PORT_MODE_4 0x01000000 + + #define SHARED_HW_CFG_PATH_SWAP_MASK 0x02000000 + #define SHARED_HW_CFG_PATH_SWAP_DISABLED 0x00000000 + #define SHARED_HW_CFG_PATH_SWAP_ENABLED 0x02000000 + + /* Set the MDC/MDIO access for the first external phy */ + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK 0x1C000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT 26 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE 0x00000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0 0x04000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1 0x08000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH 0x0c000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED 0x10000000 + + /* Set the MDC/MDIO access for the second external phy */ + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK 0xE0000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT 29 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_PHY_TYPE 0x00000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC0 0x20000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC1 0x40000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_BOTH 0x60000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SWAPPED 0x80000000 + + + u32 power_dissipated; /* 0x11c */ + #define SHARED_HW_CFG_POWER_MGNT_SCALE_MASK 0x00ff0000 + #define SHARED_HW_CFG_POWER_MGNT_SCALE_SHIFT 16 + #define SHARED_HW_CFG_POWER_MGNT_UNKNOWN_SCALE 0x00000000 + #define SHARED_HW_CFG_POWER_MGNT_DOT_1_WATT 0x00010000 + #define SHARED_HW_CFG_POWER_MGNT_DOT_01_WATT 0x00020000 + #define SHARED_HW_CFG_POWER_MGNT_DOT_001_WATT 0x00030000 + + #define SHARED_HW_CFG_POWER_DIS_CMN_MASK 0xff000000 + #define SHARED_HW_CFG_POWER_DIS_CMN_SHIFT 24 + + u32 ump_nc_si_config; /* 0x120 */ + #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MASK 0x00000003 + #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_SHIFT 0 + #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MAC 0x00000000 + #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_PHY 0x00000001 + #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MII 0x00000000 + #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_RMII 0x00000002 + + #define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_MASK 0x00000f00 + #define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_SHIFT 8 + + #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_MASK 0x00ff0000 + #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_SHIFT 16 + #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_NONE 0x00000000 + #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_BCM5221 0x00010000 + + u32 board; /* 0x124 */ + #define SHARED_HW_CFG_E3_I2C_MUX0_MASK 0x0000003F + #define SHARED_HW_CFG_E3_I2C_MUX0_SHIFT 0 + #define SHARED_HW_CFG_E3_I2C_MUX1_MASK 0x00000FC0 + #define SHARED_HW_CFG_E3_I2C_MUX1_SHIFT 6 + /* Use the PIN_CFG_XXX defines on top */ + #define SHARED_HW_CFG_BOARD_REV_MASK 0x00ff0000 + #define SHARED_HW_CFG_BOARD_REV_SHIFT 16 + + #define SHARED_HW_CFG_BOARD_MAJOR_VER_MASK 0x0f000000 + #define SHARED_HW_CFG_BOARD_MAJOR_VER_SHIFT 24 + + #define SHARED_HW_CFG_BOARD_MINOR_VER_MASK 0xf0000000 + #define SHARED_HW_CFG_BOARD_MINOR_VER_SHIFT 28 + + u32 wc_lane_config; /* 0x128 */ + #define SHARED_HW_CFG_LANE_SWAP_CFG_MASK 0x0000FFFF + #define SHARED_HW_CFG_LANE_SWAP_CFG_SHIFT 0 + #define SHARED_HW_CFG_LANE_SWAP_CFG_32103210 0x00001b1b + #define SHARED_HW_CFG_LANE_SWAP_CFG_32100123 0x00001be4 + #define SHARED_HW_CFG_LANE_SWAP_CFG_01233210 0x0000e41b + #define SHARED_HW_CFG_LANE_SWAP_CFG_01230123 0x0000e4e4 + #define SHARED_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000FF + #define SHARED_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0 + #define SHARED_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000FF00 + #define SHARED_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8 + + /* TX lane Polarity swap */ + #define SHARED_HW_CFG_TX_LANE0_POL_FLIP_ENABLED 0x00010000 + #define SHARED_HW_CFG_TX_LANE1_POL_FLIP_ENABLED 0x00020000 + #define SHARED_HW_CFG_TX_LANE2_POL_FLIP_ENABLED 0x00040000 + #define SHARED_HW_CFG_TX_LANE3_POL_FLIP_ENABLED 0x00080000 + /* TX lane Polarity swap */ + #define SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED 0x00100000 + #define SHARED_HW_CFG_RX_LANE1_POL_FLIP_ENABLED 0x00200000 + #define SHARED_HW_CFG_RX_LANE2_POL_FLIP_ENABLED 0x00400000 + #define SHARED_HW_CFG_RX_LANE3_POL_FLIP_ENABLED 0x00800000 + + /* Selects the port layout of the board */ + #define SHARED_HW_CFG_E3_PORT_LAYOUT_MASK 0x0F000000 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_SHIFT 24 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_01 0x00000000 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_10 0x01000000 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_0123 0x02000000 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_1032 0x03000000 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_2301 0x04000000 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_3210 0x05000000 +}; + + +/**************************************************************************** + * Port HW configuration * + ****************************************************************************/ +struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ + + u32 pci_id; + #define PORT_HW_CFG_PCI_VENDOR_ID_MASK 0xffff0000 + #define PORT_HW_CFG_PCI_DEVICE_ID_MASK 0x0000ffff + + u32 pci_sub_id; + #define PORT_HW_CFG_PCI_SUBSYS_DEVICE_ID_MASK 0xffff0000 + #define PORT_HW_CFG_PCI_SUBSYS_VENDOR_ID_MASK 0x0000ffff + + u32 power_dissipated; + #define PORT_HW_CFG_POWER_DIS_D0_MASK 0x000000ff + #define PORT_HW_CFG_POWER_DIS_D0_SHIFT 0 + #define PORT_HW_CFG_POWER_DIS_D1_MASK 0x0000ff00 + #define PORT_HW_CFG_POWER_DIS_D1_SHIFT 8 + #define PORT_HW_CFG_POWER_DIS_D2_MASK 0x00ff0000 + #define PORT_HW_CFG_POWER_DIS_D2_SHIFT 16 + #define PORT_HW_CFG_POWER_DIS_D3_MASK 0xff000000 + #define PORT_HW_CFG_POWER_DIS_D3_SHIFT 24 + + u32 power_consumed; + #define PORT_HW_CFG_POWER_CONS_D0_MASK 0x000000ff + #define PORT_HW_CFG_POWER_CONS_D0_SHIFT 0 + #define PORT_HW_CFG_POWER_CONS_D1_MASK 0x0000ff00 + #define PORT_HW_CFG_POWER_CONS_D1_SHIFT 8 + #define PORT_HW_CFG_POWER_CONS_D2_MASK 0x00ff0000 + #define PORT_HW_CFG_POWER_CONS_D2_SHIFT 16 + #define PORT_HW_CFG_POWER_CONS_D3_MASK 0xff000000 + #define PORT_HW_CFG_POWER_CONS_D3_SHIFT 24 + + u32 mac_upper; + #define PORT_HW_CFG_UPPERMAC_MASK 0x0000ffff + #define PORT_HW_CFG_UPPERMAC_SHIFT 0 + u32 mac_lower; + + u32 iscsi_mac_upper; /* Upper 16 bits are always zeroes */ + u32 iscsi_mac_lower; + + u32 rdma_mac_upper; /* Upper 16 bits are always zeroes */ + u32 rdma_mac_lower; + + u32 serdes_config; + #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_MASK 0x0000ffff + #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_SHIFT 0 + + #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK 0xffff0000 + #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16 + + + /* Default values: 2P-64, 4P-32 */ + u32 pf_config; /* 0x158 */ + #define PORT_HW_CFG_PF_NUM_VF_MASK 0x0000007F + #define PORT_HW_CFG_PF_NUM_VF_SHIFT 0 + + /* Default values: 17 */ + #define PORT_HW_CFG_PF_NUM_MSIX_VECTORS_MASK 0x00007F00 + #define PORT_HW_CFG_PF_NUM_MSIX_VECTORS_SHIFT 8 + + #define PORT_HW_CFG_ENABLE_FLR_MASK 0x00010000 + #define PORT_HW_CFG_FLR_ENABLED 0x00010000 + + u32 vf_config; /* 0x15C */ + #define PORT_HW_CFG_VF_NUM_MSIX_VECTORS_MASK 0x0000007F + #define PORT_HW_CFG_VF_NUM_MSIX_VECTORS_SHIFT 0 + + #define PORT_HW_CFG_VF_PCI_DEVICE_ID_MASK 0xFFFF0000 + #define PORT_HW_CFG_VF_PCI_DEVICE_ID_SHIFT 16 + + u32 mf_pci_id; /* 0x160 */ + #define PORT_HW_CFG_MF_PCI_DEVICE_ID_MASK 0x0000FFFF + #define PORT_HW_CFG_MF_PCI_DEVICE_ID_SHIFT 0 + + /* Controls the TX laser of the SFP+ module */ + u32 sfp_ctrl; /* 0x164 */ + #define PORT_HW_CFG_TX_LASER_MASK 0x000000FF + #define PORT_HW_CFG_TX_LASER_SHIFT 0 + #define PORT_HW_CFG_TX_LASER_MDIO 0x00000000 + #define PORT_HW_CFG_TX_LASER_GPIO0 0x00000001 + #define PORT_HW_CFG_TX_LASER_GPIO1 0x00000002 + #define PORT_HW_CFG_TX_LASER_GPIO2 0x00000003 + #define PORT_HW_CFG_TX_LASER_GPIO3 0x00000004 + + /* Controls the fault module LED of the SFP+ */ + #define PORT_HW_CFG_FAULT_MODULE_LED_MASK 0x0000FF00 + #define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT 8 + #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0 0x00000000 + #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1 0x00000100 + #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2 0x00000200 + #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3 0x00000300 + #define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED 0x00000400 + + /* The output pin TX_DIS that controls the TX laser of the SFP+ + module. Use the PIN_CFG_XXX defines on top */ + u32 e3_sfp_ctrl; /* 0x168 */ + #define PORT_HW_CFG_E3_TX_LASER_MASK 0x000000FF + #define PORT_HW_CFG_E3_TX_LASER_SHIFT 0 + + /* The output pin for SFPP_TYPE which turns on the Fault module LED */ + #define PORT_HW_CFG_E3_FAULT_MDL_LED_MASK 0x0000FF00 + #define PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT 8 + + /* The input pin MOD_ABS that indicates whether SFP+ module is + present or not. Use the PIN_CFG_XXX defines on top */ + #define PORT_HW_CFG_E3_MOD_ABS_MASK 0x00FF0000 + #define PORT_HW_CFG_E3_MOD_ABS_SHIFT 16 + + /* The output pin PWRDIS_SFP_X which disable the power of the SFP+ + module. Use the PIN_CFG_XXX defines on top */ + #define PORT_HW_CFG_E3_PWR_DIS_MASK 0xFF000000 + #define PORT_HW_CFG_E3_PWR_DIS_SHIFT 24 + + /* + * The input pin which signals module transmit fault. Use the + * PIN_CFG_XXX defines on top + */ + u32 e3_cmn_pin_cfg; /* 0x16C */ + #define PORT_HW_CFG_E3_TX_FAULT_MASK 0x000000FF + #define PORT_HW_CFG_E3_TX_FAULT_SHIFT 0 + + /* The output pin which reset the PHY. Use the PIN_CFG_XXX defines on + top */ + #define PORT_HW_CFG_E3_PHY_RESET_MASK 0x0000FF00 + #define PORT_HW_CFG_E3_PHY_RESET_SHIFT 8 + + /* + * The output pin which powers down the PHY. Use the PIN_CFG_XXX + * defines on top + */ + #define PORT_HW_CFG_E3_PWR_DOWN_MASK 0x00FF0000 + #define PORT_HW_CFG_E3_PWR_DOWN_SHIFT 16 + + /* The output pin values BSC_SEL which selects the I2C for this port + in the I2C Mux */ + #define PORT_HW_CFG_E3_I2C_MUX0_MASK 0x01000000 + #define PORT_HW_CFG_E3_I2C_MUX1_MASK 0x02000000 + + + /* + * The input pin I_FAULT which indicate over-current has occurred. + * Use the PIN_CFG_XXX defines on top + */ + u32 e3_cmn_pin_cfg1; /* 0x170 */ + #define PORT_HW_CFG_E3_OVER_CURRENT_MASK 0x000000FF + #define PORT_HW_CFG_E3_OVER_CURRENT_SHIFT 0 + u32 reserved0[7]; /* 0x174 */ + + u32 aeu_int_mask; /* 0x190 */ + + u32 media_type; /* 0x194 */ + #define PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK 0x000000FF + #define PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT 0 + + #define PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK 0x0000FF00 + #define PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT 8 + + #define PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK 0x00FF0000 + #define PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT 16 + + /* 4 times 16 bits for all 4 lanes. In case external PHY is present + (not direct mode), those values will not take effect on the 4 XGXS + lanes. For some external PHYs (such as 8706 and 8726) the values + will be used to configure the external PHY in those cases, not + all 4 values are needed. */ + u16 xgxs_config_rx[4]; /* 0x198 */ + u16 xgxs_config_tx[4]; /* 0x1A0 */ + + /* For storing FCOE mac on shared memory */ + u32 fcoe_fip_mac_upper; + #define PORT_HW_CFG_FCOE_UPPERMAC_MASK 0x0000ffff + #define PORT_HW_CFG_FCOE_UPPERMAC_SHIFT 0 + u32 fcoe_fip_mac_lower; + + u32 fcoe_wwn_port_name_upper; + u32 fcoe_wwn_port_name_lower; + + u32 fcoe_wwn_node_name_upper; + u32 fcoe_wwn_node_name_lower; + + u32 Reserved1[49]; /* 0x1C0 */ + + /* Enable RJ45 magjack pair swapping on 10GBase-T PHY (0=default), + 84833 only */ + u32 xgbt_phy_cfg; /* 0x284 */ + #define PORT_HW_CFG_RJ45_PAIR_SWAP_MASK 0x000000FF + #define PORT_HW_CFG_RJ45_PAIR_SWAP_SHIFT 0 + + u32 default_cfg; /* 0x288 */ + #define PORT_HW_CFG_GPIO0_CONFIG_MASK 0x00000003 + #define PORT_HW_CFG_GPIO0_CONFIG_SHIFT 0 + #define PORT_HW_CFG_GPIO0_CONFIG_NA 0x00000000 + #define PORT_HW_CFG_GPIO0_CONFIG_LOW 0x00000001 + #define PORT_HW_CFG_GPIO0_CONFIG_HIGH 0x00000002 + #define PORT_HW_CFG_GPIO0_CONFIG_INPUT 0x00000003 + + #define PORT_HW_CFG_GPIO1_CONFIG_MASK 0x0000000C + #define PORT_HW_CFG_GPIO1_CONFIG_SHIFT 2 + #define PORT_HW_CFG_GPIO1_CONFIG_NA 0x00000000 + #define PORT_HW_CFG_GPIO1_CONFIG_LOW 0x00000004 + #define PORT_HW_CFG_GPIO1_CONFIG_HIGH 0x00000008 + #define PORT_HW_CFG_GPIO1_CONFIG_INPUT 0x0000000c + + #define PORT_HW_CFG_GPIO2_CONFIG_MASK 0x00000030 + #define PORT_HW_CFG_GPIO2_CONFIG_SHIFT 4 + #define PORT_HW_CFG_GPIO2_CONFIG_NA 0x00000000 + #define PORT_HW_CFG_GPIO2_CONFIG_LOW 0x00000010 + #define PORT_HW_CFG_GPIO2_CONFIG_HIGH 0x00000020 + #define PORT_HW_CFG_GPIO2_CONFIG_INPUT 0x00000030 + + #define PORT_HW_CFG_GPIO3_CONFIG_MASK 0x000000C0 + #define PORT_HW_CFG_GPIO3_CONFIG_SHIFT 6 + #define PORT_HW_CFG_GPIO3_CONFIG_NA 0x00000000 + #define PORT_HW_CFG_GPIO3_CONFIG_LOW 0x00000040 + #define PORT_HW_CFG_GPIO3_CONFIG_HIGH 0x00000080 + #define PORT_HW_CFG_GPIO3_CONFIG_INPUT 0x000000c0 + + /* When KR link is required to be set to force which is not + KR-compliant, this parameter determine what is the trigger for it. + When GPIO is selected, low input will force the speed. Currently + default speed is 1G. In the future, it may be widen to select the + forced speed in with another parameter. Note when force-1G is + enabled, it override option 56: Link Speed option. */ + #define PORT_HW_CFG_FORCE_KR_ENABLER_MASK 0x00000F00 + #define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT 8 + #define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED 0x00000000 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0 0x00000100 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0 0x00000200 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0 0x00000300 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0 0x00000400 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1 0x00000500 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1 0x00000600 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1 0x00000700 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1 0x00000800 + #define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED 0x00000900 + /* Enable to determine with which GPIO to reset the external phy */ + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK 0x000F0000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT 16 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE 0x00000000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0 0x00010000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0 0x00020000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0 0x00030000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0 0x00040000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1 0x00050000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1 0x00060000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1 0x00070000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1 0x00080000 + + /* Enable BAM on KR */ + #define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000 + #define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20 + #define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000 + #define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000 + + /* Enable Common Mode Sense */ + #define PORT_HW_CFG_ENABLE_CMS_MASK 0x00200000 + #define PORT_HW_CFG_ENABLE_CMS_SHIFT 21 + #define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000 + #define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000 + + /* Enable RJ45 magjack pair swapping on 10GBase-T PHY, 84833 only */ + #define PORT_HW_CFG_RJ45_PR_SWP_MASK 0x00400000 + #define PORT_HW_CFG_RJ45_PR_SWP_SHIFT 22 + #define PORT_HW_CFG_RJ45_PR_SWP_DISABLED 0x00000000 + #define PORT_HW_CFG_RJ45_PR_SWP_ENABLED 0x00400000 + + /* Determine the Serdes electrical interface */ + #define PORT_HW_CFG_NET_SERDES_IF_MASK 0x0F000000 + #define PORT_HW_CFG_NET_SERDES_IF_SHIFT 24 + #define PORT_HW_CFG_NET_SERDES_IF_SGMII 0x00000000 + #define PORT_HW_CFG_NET_SERDES_IF_XFI 0x01000000 + #define PORT_HW_CFG_NET_SERDES_IF_SFI 0x02000000 + #define PORT_HW_CFG_NET_SERDES_IF_KR 0x03000000 + #define PORT_HW_CFG_NET_SERDES_IF_DXGXS 0x04000000 + #define PORT_HW_CFG_NET_SERDES_IF_KR2 0x05000000 + + + u32 speed_capability_mask2; /* 0x28C */ + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10M_FULL 0x00000001 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3__ 0x00000002 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3___ 0x00000004 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_100M_FULL 0x00000008 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_1G 0x00000010 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_2_DOT_5G 0x00000020 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10G 0x00000040 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_20G 0x00000080 + + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_MASK 0xFFFF0000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_SHIFT 16 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10M_FULL 0x00010000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0__ 0x00020000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0___ 0x00040000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_100M_FULL 0x00080000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_1G 0x00100000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_2_DOT_5G 0x00200000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10G 0x00400000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_20G 0x00800000 + + + /* In the case where two media types (e.g. copper and fiber) are + present and electrically active at the same time, PHY Selection + will determine which of the two PHYs will be designated as the + Active PHY and used for a connection to the network. */ + u32 multi_phy_config; /* 0x290 */ + #define PORT_HW_CFG_PHY_SELECTION_MASK 0x00000007 + #define PORT_HW_CFG_PHY_SELECTION_SHIFT 0 + #define PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT 0x00000000 + #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY 0x00000001 + #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY 0x00000002 + #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY 0x00000003 + #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY 0x00000004 + + /* When enabled, all second phy nvram parameters will be swapped + with the first phy parameters */ + #define PORT_HW_CFG_PHY_SWAPPED_MASK 0x00000008 + #define PORT_HW_CFG_PHY_SWAPPED_SHIFT 3 + #define PORT_HW_CFG_PHY_SWAPPED_DISABLED 0x00000000 + #define PORT_HW_CFG_PHY_SWAPPED_ENABLED 0x00000008 + + + /* Address of the second external phy */ + u32 external_phy_config2; /* 0x294 */ + #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_MASK 0x000000FF + #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_SHIFT 0 + + /* The second XGXS external PHY type */ + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_MASK 0x0000FF00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SHIFT 8 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_DIRECT 0x00000000 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8071 0x00000100 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8072 0x00000200 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8073 0x00000300 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8705 0x00000400 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8706 0x00000500 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8726 0x00000600 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8481 0x00000700 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SFX7101 0x00000800 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727 0x00000900 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727_NOC 0x00000a00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84823 0x00000b00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54640 0x00000c00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84833 0x00000d00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54618SE 0x00000e00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8722 0x00000f00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00 + + + /* 4 times 16 bits for all 4 lanes. For some external PHYs (such as + 8706, 8726 and 8727) not all 4 values are needed. */ + u16 xgxs_config2_rx[4]; /* 0x296 */ + u16 xgxs_config2_tx[4]; /* 0x2A0 */ + + u32 lane_config; + #define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000ffff + #define PORT_HW_CFG_LANE_SWAP_CFG_SHIFT 0 + /* AN and forced */ + #define PORT_HW_CFG_LANE_SWAP_CFG_01230123 0x00001b1b + /* forced only */ + #define PORT_HW_CFG_LANE_SWAP_CFG_01233210 0x00001be4 + /* forced only */ + #define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8 + /* forced only */ + #define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4 + #define PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000ff + #define PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0 + #define PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000ff00 + #define PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8 + #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK 0x0000c000 + #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT 14 + + /* Indicate whether to swap the external phy polarity */ + #define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK 0x00010000 + #define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED 0x00000000 + #define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED 0x00010000 + + + u32 external_phy_config; + #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK 0x000000ff + #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT 0 + + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK 0x0000ff00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SHIFT 8 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT 0x00000000 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8071 0x00000100 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072 0x00000200 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073 0x00000300 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705 0x00000400 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706 0x00000500 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726 0x00000600 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481 0x00000700 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101 0x00000800 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54640 0x00000c00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833 0x00000d00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE 0x00000e00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722 0x00000f00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC 0x0000fc00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00 + + #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK 0x00ff0000 + #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT 16 + + #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000 + #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_SHIFT 24 + #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT 0x00000000 + #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482 0x01000000 + #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD 0x02000000 + #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN 0xff000000 + + u32 speed_capability_mask; + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_MASK 0x0000ffff + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_SHIFT 0 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_FULL 0x00000001 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_HALF 0x00000002 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_HALF 0x00000004 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_FULL 0x00000008 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_1G 0x00000010 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_2_5G 0x00000020 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10G 0x00000040 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_20G 0x00000080 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_RESERVED 0x0000f000 + + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK 0xffff0000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_SHIFT 16 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL 0x00010000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF 0x00020000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF 0x00040000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL 0x00080000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_1G 0x00100000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G 0x00200000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10G 0x00400000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_20G 0x00800000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_RESERVED 0xf0000000 + + /* A place to hold the original MAC address as a backup */ + u32 backup_mac_upper; /* 0x2B4 */ + u32 backup_mac_lower; /* 0x2B8 */ + +}; + + +/**************************************************************************** + * Shared Feature configuration * + ****************************************************************************/ +struct shared_feat_cfg { /* NVRAM Offset */ + + u32 config; /* 0x450 */ + #define SHARED_FEATURE_BMC_ECHO_MODE_EN 0x00000001 + + /* Use NVRAM values instead of HW default values */ + #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_MASK \ + 0x00000002 + #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED \ + 0x00000000 + #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED \ + 0x00000002 + + #define SHARED_FEAT_CFG_NCSI_ID_METHOD_MASK 0x00000008 + #define SHARED_FEAT_CFG_NCSI_ID_METHOD_SPIO 0x00000000 + #define SHARED_FEAT_CFG_NCSI_ID_METHOD_NVRAM 0x00000008 + + #define SHARED_FEAT_CFG_NCSI_ID_MASK 0x00000030 + #define SHARED_FEAT_CFG_NCSI_ID_SHIFT 4 + + /* Override the OTP back to single function mode. When using GPIO, + high means only SF, 0 is according to CLP configuration */ + #define SHARED_FEAT_CFG_FORCE_SF_MODE_MASK 0x00000700 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_SHIFT 8 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED 0x00000000 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300 + + /* The interval in seconds between sending LLDP packets. Set to zero + to disable the feature */ + #define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_MASK 0x00ff0000 + #define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_SHIFT 16 + + /* The assigned device type ID for LLDP usage */ + #define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_MASK 0xff000000 + #define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_SHIFT 24 + +}; + + +/**************************************************************************** + * Port Feature configuration * + ****************************************************************************/ +struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */ + + u32 config; + #define PORT_FEATURE_BAR1_SIZE_MASK 0x0000000f + #define PORT_FEATURE_BAR1_SIZE_SHIFT 0 + #define PORT_FEATURE_BAR1_SIZE_DISABLED 0x00000000 + #define PORT_FEATURE_BAR1_SIZE_64K 0x00000001 + #define PORT_FEATURE_BAR1_SIZE_128K 0x00000002 + #define PORT_FEATURE_BAR1_SIZE_256K 0x00000003 + #define PORT_FEATURE_BAR1_SIZE_512K 0x00000004 + #define PORT_FEATURE_BAR1_SIZE_1M 0x00000005 + #define PORT_FEATURE_BAR1_SIZE_2M 0x00000006 + #define PORT_FEATURE_BAR1_SIZE_4M 0x00000007 + #define PORT_FEATURE_BAR1_SIZE_8M 0x00000008 + #define PORT_FEATURE_BAR1_SIZE_16M 0x00000009 + #define PORT_FEATURE_BAR1_SIZE_32M 0x0000000a + #define PORT_FEATURE_BAR1_SIZE_64M 0x0000000b + #define PORT_FEATURE_BAR1_SIZE_128M 0x0000000c + #define PORT_FEATURE_BAR1_SIZE_256M 0x0000000d + #define PORT_FEATURE_BAR1_SIZE_512M 0x0000000e + #define PORT_FEATURE_BAR1_SIZE_1G 0x0000000f + #define PORT_FEATURE_BAR2_SIZE_MASK 0x000000f0 + #define PORT_FEATURE_BAR2_SIZE_SHIFT 4 + #define PORT_FEATURE_BAR2_SIZE_DISABLED 0x00000000 + #define PORT_FEATURE_BAR2_SIZE_64K 0x00000010 + #define PORT_FEATURE_BAR2_SIZE_128K 0x00000020 + #define PORT_FEATURE_BAR2_SIZE_256K 0x00000030 + #define PORT_FEATURE_BAR2_SIZE_512K 0x00000040 + #define PORT_FEATURE_BAR2_SIZE_1M 0x00000050 + #define PORT_FEATURE_BAR2_SIZE_2M 0x00000060 + #define PORT_FEATURE_BAR2_SIZE_4M 0x00000070 + #define PORT_FEATURE_BAR2_SIZE_8M 0x00000080 + #define PORT_FEATURE_BAR2_SIZE_16M 0x00000090 + #define PORT_FEATURE_BAR2_SIZE_32M 0x000000a0 + #define PORT_FEATURE_BAR2_SIZE_64M 0x000000b0 + #define PORT_FEATURE_BAR2_SIZE_128M 0x000000c0 + #define PORT_FEATURE_BAR2_SIZE_256M 0x000000d0 + #define PORT_FEATURE_BAR2_SIZE_512M 0x000000e0 + #define PORT_FEATURE_BAR2_SIZE_1G 0x000000f0 + + #define PORT_FEAT_CFG_DCBX_MASK 0x00000100 + #define PORT_FEAT_CFG_DCBX_DISABLED 0x00000000 + #define PORT_FEAT_CFG_DCBX_ENABLED 0x00000100 + + #define PORT_FEAT_CFG_AUTOGREEN_MASK 0x00000200 + #define PORT_FEAT_CFG_AUTOGREEN_SHIFT 9 + #define PORT_FEAT_CFG_AUTOGREEN_DISABLED 0x00000000 + #define PORT_FEAT_CFG_AUTOGREEN_ENABLED 0x00000200 + + #define PORT_FEATURE_EN_SIZE_MASK 0x0f000000 + #define PORT_FEATURE_EN_SIZE_SHIFT 24 + #define PORT_FEATURE_WOL_ENABLED 0x01000000 + #define PORT_FEATURE_MBA_ENABLED 0x02000000 + #define PORT_FEATURE_MFW_ENABLED 0x04000000 + + /* Advertise expansion ROM even if MBA is disabled */ + #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_MASK 0x08000000 + #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_DISABLED 0x00000000 + #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_ENABLED 0x08000000 + + /* Check the optic vendor via i2c against a list of approved modules + in a separate nvram image */ + #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK 0xe0000000 + #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_SHIFT 29 + #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT \ + 0x00000000 + #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER \ + 0x20000000 + #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG 0x40000000 + #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN 0x60000000 + + u32 wol_config; + /* Default is used when driver sets to "auto" mode */ + #define PORT_FEATURE_WOL_DEFAULT_MASK 0x00000003 + #define PORT_FEATURE_WOL_DEFAULT_SHIFT 0 + #define PORT_FEATURE_WOL_DEFAULT_DISABLE 0x00000000 + #define PORT_FEATURE_WOL_DEFAULT_MAGIC 0x00000001 + #define PORT_FEATURE_WOL_DEFAULT_ACPI 0x00000002 + #define PORT_FEATURE_WOL_DEFAULT_MAGIC_AND_ACPI 0x00000003 + #define PORT_FEATURE_WOL_RES_PAUSE_CAP 0x00000004 + #define PORT_FEATURE_WOL_RES_ASYM_PAUSE_CAP 0x00000008 + #define PORT_FEATURE_WOL_ACPI_UPON_MGMT 0x00000010 + + u32 mba_config; + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK 0x00000007 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_SHIFT 0 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE 0x00000000 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_RPL 0x00000001 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_BOOTP 0x00000002 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB 0x00000003 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT 0x00000004 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE 0x00000007 + + #define PORT_FEATURE_MBA_BOOT_RETRY_MASK 0x00000038 + #define PORT_FEATURE_MBA_BOOT_RETRY_SHIFT 3 + + #define PORT_FEATURE_MBA_RES_PAUSE_CAP 0x00000100 + #define PORT_FEATURE_MBA_RES_ASYM_PAUSE_CAP 0x00000200 + #define PORT_FEATURE_MBA_SETUP_PROMPT_ENABLE 0x00000400 + #define PORT_FEATURE_MBA_HOTKEY_MASK 0x00000800 + #define PORT_FEATURE_MBA_HOTKEY_CTRL_S 0x00000000 + #define PORT_FEATURE_MBA_HOTKEY_CTRL_B 0x00000800 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_MASK 0x000ff000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_SHIFT 12 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_DISABLED 0x00000000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2K 0x00001000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4K 0x00002000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8K 0x00003000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16K 0x00004000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32K 0x00005000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_64K 0x00006000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_128K 0x00007000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_256K 0x00008000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_512K 0x00009000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_1M 0x0000a000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2M 0x0000b000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4M 0x0000c000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8M 0x0000d000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16M 0x0000e000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32M 0x0000f000 + #define PORT_FEATURE_MBA_MSG_TIMEOUT_MASK 0x00f00000 + #define PORT_FEATURE_MBA_MSG_TIMEOUT_SHIFT 20 + #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_MASK 0x03000000 + #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_SHIFT 24 + #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_AUTO 0x00000000 + #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_BBS 0x01000000 + #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT18H 0x02000000 + #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT19H 0x03000000 + #define PORT_FEATURE_MBA_LINK_SPEED_MASK 0x3c000000 + #define PORT_FEATURE_MBA_LINK_SPEED_SHIFT 26 + #define PORT_FEATURE_MBA_LINK_SPEED_AUTO 0x00000000 + #define PORT_FEATURE_MBA_LINK_SPEED_10HD 0x04000000 + #define PORT_FEATURE_MBA_LINK_SPEED_10FD 0x08000000 + #define PORT_FEATURE_MBA_LINK_SPEED_100HD 0x0c000000 + #define PORT_FEATURE_MBA_LINK_SPEED_100FD 0x10000000 + #define PORT_FEATURE_MBA_LINK_SPEED_1GBPS 0x14000000 + #define PORT_FEATURE_MBA_LINK_SPEED_2_5GBPS 0x18000000 + #define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_CX4 0x1c000000 + #define PORT_FEATURE_MBA_LINK_SPEED_20GBPS 0x20000000 + u32 bmc_config; + #define PORT_FEATURE_BMC_LINK_OVERRIDE_MASK 0x00000001 + #define PORT_FEATURE_BMC_LINK_OVERRIDE_DEFAULT 0x00000000 + #define PORT_FEATURE_BMC_LINK_OVERRIDE_EN 0x00000001 + + u32 mba_vlan_cfg; + #define PORT_FEATURE_MBA_VLAN_TAG_MASK 0x0000ffff + #define PORT_FEATURE_MBA_VLAN_TAG_SHIFT 0 + #define PORT_FEATURE_MBA_VLAN_EN 0x00010000 + + u32 resource_cfg; + #define PORT_FEATURE_RESOURCE_CFG_VALID 0x00000001 + #define PORT_FEATURE_RESOURCE_CFG_DIAG 0x00000002 + #define PORT_FEATURE_RESOURCE_CFG_L2 0x00000004 + #define PORT_FEATURE_RESOURCE_CFG_ISCSI 0x00000008 + #define PORT_FEATURE_RESOURCE_CFG_RDMA 0x00000010 + + u32 smbus_config; + #define PORT_FEATURE_SMBUS_ADDR_MASK 0x000000fe + #define PORT_FEATURE_SMBUS_ADDR_SHIFT 1 + + u32 vf_config; + #define PORT_FEAT_CFG_VF_BAR2_SIZE_MASK 0x0000000f + #define PORT_FEAT_CFG_VF_BAR2_SIZE_SHIFT 0 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_DISABLED 0x00000000 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_4K 0x00000001 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_8K 0x00000002 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_16K 0x00000003 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_32K 0x00000004 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_64K 0x00000005 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_128K 0x00000006 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_256K 0x00000007 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_512K 0x00000008 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_1M 0x00000009 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_2M 0x0000000a + #define PORT_FEAT_CFG_VF_BAR2_SIZE_4M 0x0000000b + #define PORT_FEAT_CFG_VF_BAR2_SIZE_8M 0x0000000c + #define PORT_FEAT_CFG_VF_BAR2_SIZE_16M 0x0000000d + #define PORT_FEAT_CFG_VF_BAR2_SIZE_32M 0x0000000e + #define PORT_FEAT_CFG_VF_BAR2_SIZE_64M 0x0000000f + + u32 link_config; /* Used as HW defaults for the driver */ + #define PORT_FEATURE_CONNECTED_SWITCH_MASK 0x03000000 + #define PORT_FEATURE_CONNECTED_SWITCH_SHIFT 24 + /* (forced) low speed switch (< 10G) */ + #define PORT_FEATURE_CON_SWITCH_1G_SWITCH 0x00000000 + /* (forced) high speed switch (>= 10G) */ + #define PORT_FEATURE_CON_SWITCH_10G_SWITCH 0x01000000 + #define PORT_FEATURE_CON_SWITCH_AUTO_DETECT 0x02000000 + #define PORT_FEATURE_CON_SWITCH_ONE_TIME_DETECT 0x03000000 + + #define PORT_FEATURE_LINK_SPEED_MASK 0x000f0000 + #define PORT_FEATURE_LINK_SPEED_SHIFT 16 + #define PORT_FEATURE_LINK_SPEED_AUTO 0x00000000 + #define PORT_FEATURE_LINK_SPEED_10M_FULL 0x00010000 + #define PORT_FEATURE_LINK_SPEED_10M_HALF 0x00020000 + #define PORT_FEATURE_LINK_SPEED_100M_HALF 0x00030000 + #define PORT_FEATURE_LINK_SPEED_100M_FULL 0x00040000 + #define PORT_FEATURE_LINK_SPEED_1G 0x00050000 + #define PORT_FEATURE_LINK_SPEED_2_5G 0x00060000 + #define PORT_FEATURE_LINK_SPEED_10G_CX4 0x00070000 + #define PORT_FEATURE_LINK_SPEED_20G 0x00080000 + + #define PORT_FEATURE_FLOW_CONTROL_MASK 0x00000700 + #define PORT_FEATURE_FLOW_CONTROL_SHIFT 8 + #define PORT_FEATURE_FLOW_CONTROL_AUTO 0x00000000 + #define PORT_FEATURE_FLOW_CONTROL_TX 0x00000100 + #define PORT_FEATURE_FLOW_CONTROL_RX 0x00000200 + #define PORT_FEATURE_FLOW_CONTROL_BOTH 0x00000300 + #define PORT_FEATURE_FLOW_CONTROL_NONE 0x00000400 + + /* The default for MCP link configuration, + uses the same defines as link_config */ + u32 mfw_wol_link_cfg; + + /* The default for the driver of the second external phy, + uses the same defines as link_config */ + u32 link_config2; /* 0x47C */ + + /* The default for MCP of the second external phy, + uses the same defines as link_config */ + u32 mfw_wol_link_cfg2; /* 0x480 */ + + u32 Reserved2[17]; /* 0x484 */ + +}; + + +/**************************************************************************** + * Device Information * + ****************************************************************************/ +struct shm_dev_info { /* size */ + + u32 bc_rev; /* 8 bits each: major, minor, build */ /* 4 */ + + struct shared_hw_cfg shared_hw_config; /* 40 */ + + struct port_hw_cfg port_hw_config[PORT_MAX]; /* 400*2=800 */ + + struct shared_feat_cfg shared_feature_config; /* 4 */ + + struct port_feat_cfg port_feature_config[PORT_MAX];/* 116*2=232 */ + +}; + + +#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN) + #error "Missing either LITTLE_ENDIAN or BIG_ENDIAN definition." +#endif + +#define FUNC_0 0 +#define FUNC_1 1 +#define FUNC_2 2 +#define FUNC_3 3 +#define FUNC_4 4 +#define FUNC_5 5 +#define FUNC_6 6 +#define FUNC_7 7 +#define E1_FUNC_MAX 2 +#define E1H_FUNC_MAX 8 +#define E2_FUNC_MAX 4 /* per path */ + +#define VN_0 0 +#define VN_1 1 +#define VN_2 2 +#define VN_3 3 +#define E1VN_MAX 1 +#define E1HVN_MAX 4 + +#define E2_VF_MAX 64 /* HC_REG_VF_CONFIGURATION_SIZE */ +/* This value (in milliseconds) determines the frequency of the driver + * issuing the PULSE message code. The firmware monitors this periodic + * pulse to determine when to switch to an OS-absent mode. */ +#define DRV_PULSE_PERIOD_MS 250 + +/* This value (in milliseconds) determines how long the driver should + * wait for an acknowledgement from the firmware before timing out. Once + * the firmware has timed out, the driver will assume there is no firmware + * running and there won't be any firmware-driver synchronization during a + * driver reset. */ +#define FW_ACK_TIME_OUT_MS 5000 + +#define FW_ACK_POLL_TIME_MS 1 + +#define FW_ACK_NUM_OF_POLL (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS) + +/* LED Blink rate that will achieve ~15.9Hz */ +#define LED_BLINK_RATE_VAL 480 + +/**************************************************************************** + * Driver <-> FW Mailbox * + ****************************************************************************/ +struct drv_port_mb { + + u32 link_status; + /* Driver should update this field on any link change event */ + + #define LINK_STATUS_LINK_FLAG_MASK 0x00000001 + #define LINK_STATUS_LINK_UP 0x00000001 + #define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E + #define LINK_STATUS_SPEED_AND_DUPLEX_AN_NOT_COMPLETE (0<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_10THD (1<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_10TFD (2<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_100TXHD (3<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_100T4 (4<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_100TXFD (5<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (6<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (7<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_1000XFD (7<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_2500THD (8<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_2500TFD (9<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_2500XFD (9<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_10GTFD (10<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_10GXFD (10<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_20GTFD (11<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_20GXFD (11<<1) + + #define LINK_STATUS_AUTO_NEGOTIATE_FLAG_MASK 0x00000020 + #define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 + + #define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 + #define LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK 0x00000080 + #define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 + + #define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 + #define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 + #define LINK_STATUS_LINK_PARTNER_100T4_CAPABLE 0x00000800 + #define LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE 0x00001000 + #define LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE 0x00002000 + #define LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE 0x00004000 + #define LINK_STATUS_LINK_PARTNER_10THD_CAPABLE 0x00008000 + + #define LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK 0x00010000 + #define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00010000 + + #define LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK 0x00020000 + #define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00020000 + + #define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000 + #define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0<<18) + #define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1<<18) + #define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2<<18) + #define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3<<18) + + #define LINK_STATUS_SERDES_LINK 0x00100000 + + #define LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE 0x00200000 + #define LINK_STATUS_LINK_PARTNER_2500XHD_CAPABLE 0x00400000 + #define LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE 0x00800000 + #define LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE 0x10000000 + + #define LINK_STATUS_PFC_ENABLED 0x20000000 + + #define LINK_STATUS_PHYSICAL_LINK_FLAG 0x40000000 + + u32 port_stx; + + u32 stat_nig_timer; + + /* MCP firmware does not use this field */ + u32 ext_phy_fw_version; + +}; + + +struct drv_func_mb { + + u32 drv_mb_header; + #define DRV_MSG_CODE_MASK 0xffff0000 + #define DRV_MSG_CODE_LOAD_REQ 0x10000000 + #define DRV_MSG_CODE_LOAD_DONE 0x11000000 + #define DRV_MSG_CODE_UNLOAD_REQ_WOL_EN 0x20000000 + #define DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS 0x20010000 + #define DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP 0x20020000 + #define DRV_MSG_CODE_UNLOAD_DONE 0x21000000 + #define DRV_MSG_CODE_DCC_OK 0x30000000 + #define DRV_MSG_CODE_DCC_FAILURE 0x31000000 + #define DRV_MSG_CODE_DIAG_ENTER_REQ 0x50000000 + #define DRV_MSG_CODE_DIAG_EXIT_REQ 0x60000000 + #define DRV_MSG_CODE_VALIDATE_KEY 0x70000000 + #define DRV_MSG_CODE_GET_CURR_KEY 0x80000000 + #define DRV_MSG_CODE_GET_UPGRADE_KEY 0x81000000 + #define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000 + #define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000 + /* + * The optic module verification command requires bootcode + * v5.0.6 or later, te specific optic module verification command + * requires bootcode v5.2.12 or later + */ + #define DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL 0xa0000000 + #define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006 + #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000 + #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234 + #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014 + + #define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000 + #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000 + + #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 + + #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 + #define REQ_BC_VER_4_SET_MF_BW 0x00060202 + #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000 + + #define DRV_MSG_CODE_LINK_STATUS_CHANGED 0x01000000 + + #define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000 + #define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000 + #define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000 + #define BIOS_MSG_CODE_VIRT_MAC_ISCSI 0xff040000 + + #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff + + u32 drv_mb_param; + #define DRV_MSG_CODE_SET_MF_BW_MIN_MASK 0x00ff0000 + #define DRV_MSG_CODE_SET_MF_BW_MAX_MASK 0xff000000 + + u32 fw_mb_header; + #define FW_MSG_CODE_MASK 0xffff0000 + #define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000 + #define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000 + #define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000 + /* Load common chip is supported from bc 6.0.0 */ + #define REQ_BC_VER_4_DRV_LOAD_COMMON_CHIP 0x00060000 + #define FW_MSG_CODE_DRV_LOAD_COMMON_CHIP 0x10130000 + + #define FW_MSG_CODE_DRV_LOAD_REFUSED 0x10200000 + #define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000 + #define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x20100000 + #define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20110000 + #define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20120000 + #define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000 + #define FW_MSG_CODE_DCC_DONE 0x30100000 + #define FW_MSG_CODE_LLDP_DONE 0x40100000 + #define FW_MSG_CODE_DIAG_ENTER_DONE 0x50100000 + #define FW_MSG_CODE_DIAG_REFUSE 0x50200000 + #define FW_MSG_CODE_DIAG_EXIT_DONE 0x60100000 + #define FW_MSG_CODE_VALIDATE_KEY_SUCCESS 0x70100000 + #define FW_MSG_CODE_VALIDATE_KEY_FAILURE 0x70200000 + #define FW_MSG_CODE_GET_KEY_DONE 0x80100000 + #define FW_MSG_CODE_NO_KEY 0x80f00000 + #define FW_MSG_CODE_LIC_INFO_NOT_READY 0x80f80000 + #define FW_MSG_CODE_L2B_PRAM_LOADED 0x90100000 + #define FW_MSG_CODE_L2B_PRAM_T_LOAD_FAILURE 0x90210000 + #define FW_MSG_CODE_L2B_PRAM_C_LOAD_FAILURE 0x90220000 + #define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE 0x90230000 + #define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE 0x90240000 + #define FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS 0xa0100000 + #define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000 + #define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000 + #define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000 + + #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000 + #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000 + + #define FW_MSG_CODE_LINK_CHANGED_ACK 0x01100000 + + #define FW_MSG_CODE_LIC_CHALLENGE 0xff010000 + #define FW_MSG_CODE_LIC_RESPONSE 0xff020000 + #define FW_MSG_CODE_VIRT_MAC_PRIM 0xff030000 + #define FW_MSG_CODE_VIRT_MAC_ISCSI 0xff040000 + + #define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff + + u32 fw_mb_param; + + u32 drv_pulse_mb; + #define DRV_PULSE_SEQ_MASK 0x00007fff + #define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 + /* + * The system time is in the format of + * (year-2001)*12*32 + month*32 + day. + */ + #define DRV_PULSE_ALWAYS_ALIVE 0x00008000 + /* + * Indicate to the firmware not to go into the + * OS-absent when it is not getting driver pulse. + * This is used for debugging as well for PXE(MBA). + */ + + u32 mcp_pulse_mb; + #define MCP_PULSE_SEQ_MASK 0x00007fff + #define MCP_PULSE_ALWAYS_ALIVE 0x00008000 + /* Indicates to the driver not to assert due to lack + * of MCP response */ + #define MCP_EVENT_MASK 0xffff0000 + #define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 + + u32 iscsi_boot_signature; + u32 iscsi_boot_block_offset; + + u32 drv_status; + #define DRV_STATUS_PMF 0x00000001 + #define DRV_STATUS_VF_DISABLED 0x00000002 + #define DRV_STATUS_SET_MF_BW 0x00000004 + #define DRV_STATUS_LINK_EVENT 0x00000008 + + #define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00 + #define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100 + #define DRV_STATUS_DCC_BANDWIDTH_ALLOCATION 0x00000200 + #define DRV_STATUS_DCC_CHANGE_MAC_ADDRESS 0x00000400 + #define DRV_STATUS_DCC_RESERVED1 0x00000800 + #define DRV_STATUS_DCC_SET_PROTOCOL 0x00001000 + #define DRV_STATUS_DCC_SET_PRIORITY 0x00002000 + + #define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000 + #define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000 + + u32 virt_mac_upper; + #define VIRT_MAC_SIGN_MASK 0xffff0000 + #define VIRT_MAC_SIGNATURE 0x564d0000 + u32 virt_mac_lower; + +}; + + +/**************************************************************************** + * Management firmware state * + ****************************************************************************/ +/* Allocate 440 bytes for management firmware */ +#define MGMTFW_STATE_WORD_SIZE 110 + +struct mgmtfw_state { + u32 opaque[MGMTFW_STATE_WORD_SIZE]; +}; + + +/**************************************************************************** + * Multi-Function configuration * + ****************************************************************************/ +struct shared_mf_cfg { + + u32 clp_mb; + #define SHARED_MF_CLP_SET_DEFAULT 0x00000000 + /* set by CLP */ + #define SHARED_MF_CLP_EXIT 0x00000001 + /* set by MCP */ + #define SHARED_MF_CLP_EXIT_DONE 0x00010000 + +}; + +struct port_mf_cfg { + + u32 dynamic_cfg; /* device control channel */ + #define PORT_MF_CFG_E1HOV_TAG_MASK 0x0000ffff + #define PORT_MF_CFG_E1HOV_TAG_SHIFT 0 + #define PORT_MF_CFG_E1HOV_TAG_DEFAULT PORT_MF_CFG_E1HOV_TAG_MASK + + u32 reserved[3]; + +}; + +struct func_mf_cfg { + + u32 config; + /* E/R/I/D */ + /* function 0 of each port cannot be hidden */ + #define FUNC_MF_CFG_FUNC_HIDE 0x00000001 + + #define FUNC_MF_CFG_PROTOCOL_MASK 0x00000006 + #define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000000 + #define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000002 + #define FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA 0x00000004 + #define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000006 + #define FUNC_MF_CFG_PROTOCOL_DEFAULT \ + FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA + + #define FUNC_MF_CFG_FUNC_DISABLED 0x00000008 + #define FUNC_MF_CFG_FUNC_DELETED 0x00000010 + + /* PRI */ + /* 0 - low priority, 3 - high priority */ + #define FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK 0x00000300 + #define FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT 8 + #define FUNC_MF_CFG_TRANSMIT_PRIORITY_DEFAULT 0x00000000 + + /* MINBW, MAXBW */ + /* value range - 0..100, increments in 100Mbps */ + #define FUNC_MF_CFG_MIN_BW_MASK 0x00ff0000 + #define FUNC_MF_CFG_MIN_BW_SHIFT 16 + #define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000 + #define FUNC_MF_CFG_MAX_BW_MASK 0xff000000 + #define FUNC_MF_CFG_MAX_BW_SHIFT 24 + #define FUNC_MF_CFG_MAX_BW_DEFAULT 0x64000000 + + u32 mac_upper; /* MAC */ + #define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff + #define FUNC_MF_CFG_UPPERMAC_SHIFT 0 + #define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK + u32 mac_lower; + #define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff + + u32 e1hov_tag; /* VNI */ + #define FUNC_MF_CFG_E1HOV_TAG_MASK 0x0000ffff + #define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0 + #define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK + + u32 reserved[2]; +}; + +/* This structure is not applicable and should not be accessed on 57711 */ +struct func_ext_cfg { + u32 func_cfg; + #define MACP_FUNC_CFG_FLAGS_MASK 0x000000FF + #define MACP_FUNC_CFG_FLAGS_SHIFT 0 + #define MACP_FUNC_CFG_FLAGS_ENABLED 0x00000001 + #define MACP_FUNC_CFG_FLAGS_ETHERNET 0x00000002 + #define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD 0x00000004 + #define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD 0x00000008 + + u32 iscsi_mac_addr_upper; + u32 iscsi_mac_addr_lower; + + u32 fcoe_mac_addr_upper; + u32 fcoe_mac_addr_lower; + + u32 fcoe_wwn_port_name_upper; + u32 fcoe_wwn_port_name_lower; + + u32 fcoe_wwn_node_name_upper; + u32 fcoe_wwn_node_name_lower; + + u32 preserve_data; + #define MF_FUNC_CFG_PRESERVE_L2_MAC (1<<0) + #define MF_FUNC_CFG_PRESERVE_ISCSI_MAC (1<<1) + #define MF_FUNC_CFG_PRESERVE_FCOE_MAC (1<<2) + #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_P (1<<3) + #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_N (1<<4) + #define MF_FUNC_CFG_PRESERVE_TX_BW (1<<5) +}; + +struct mf_cfg { + + struct shared_mf_cfg shared_mf_config; /* 0x4 */ + struct port_mf_cfg port_mf_config[PORT_MAX]; /* 0x10 * 2 = 0x20 */ + /* for all chips, there are 8 mf functions */ + struct func_mf_cfg func_mf_config[E1H_FUNC_MAX]; /* 0x18 * 8 = 0xc0 */ + /* + * Extended configuration per function - this array does not exist and + * should not be accessed on 57711 + */ + struct func_ext_cfg func_ext_config[E1H_FUNC_MAX]; /* 0x28 * 8 = 0x140*/ +}; /* 0x224 */ + +/**************************************************************************** + * Shared Memory Region * + ****************************************************************************/ +struct shmem_region { /* SharedMem Offset (size) */ + + u32 validity_map[PORT_MAX]; /* 0x0 (4*2 = 0x8) */ + #define SHR_MEM_FORMAT_REV_MASK 0xff000000 + #define SHR_MEM_FORMAT_REV_ID ('A'<<24) + /* validity bits */ + #define SHR_MEM_VALIDITY_PCI_CFG 0x00100000 + #define SHR_MEM_VALIDITY_MB 0x00200000 + #define SHR_MEM_VALIDITY_DEV_INFO 0x00400000 + #define SHR_MEM_VALIDITY_RESERVED 0x00000007 + /* One licensing bit should be set */ + #define SHR_MEM_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038 + #define SHR_MEM_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008 + #define SHR_MEM_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010 + #define SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020 + /* Active MFW */ + #define SHR_MEM_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000 + #define SHR_MEM_VALIDITY_ACTIVE_MFW_MASK 0x000001c0 + #define SHR_MEM_VALIDITY_ACTIVE_MFW_IPMI 0x00000040 + #define SHR_MEM_VALIDITY_ACTIVE_MFW_UMP 0x00000080 + #define SHR_MEM_VALIDITY_ACTIVE_MFW_NCSI 0x000000c0 + #define SHR_MEM_VALIDITY_ACTIVE_MFW_NONE 0x000001c0 + + struct shm_dev_info dev_info; /* 0x8 (0x438) */ + + struct license_key drv_lic_key[PORT_MAX]; /* 0x440 (52*2=0x68) */ + + /* FW information (for internal FW use) */ + u32 fw_info_fio_offset; /* 0x4a8 (0x4) */ + struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */ + + struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */ + +#ifdef BMAPI + /* This is a variable length array */ + /* the number of function depends on the chip type */ + struct drv_func_mb func_mb[1]; /* 0x684 (44*2/4/8=0x58/0xb0/0x160) */ +#else + /* the number of function depends on the chip type */ + struct drv_func_mb func_mb[]; /* 0x684 (44*2/4/8=0x58/0xb0/0x160) */ +#endif /* BMAPI */ + +}; /* 57710 = 0x6dc | 57711 = 0x7E4 | 57712 = 0x734 */ + +/**************************************************************************** + * Shared Memory 2 Region * + ****************************************************************************/ +/* The fw_flr_ack is actually built in the following way: */ +/* 8 bit: PF ack */ +/* 64 bit: VF ack */ +/* 8 bit: ios_dis_ack */ +/* In order to maintain endianity in the mailbox hsi, we want to keep using */ +/* u32. The fw must have the VF right after the PF since this is how it */ +/* access arrays(it expects always the VF to reside after the PF, and that */ +/* makes the calculation much easier for it. ) */ +/* In order to answer both limitations, and keep the struct small, the code */ +/* will abuse the structure defined here to achieve the actual partition */ +/* above */ +/****************************************************************************/ +struct fw_flr_ack { + u32 pf_ack; + u32 vf_ack[1]; + u32 iov_dis_ack; +}; + +struct fw_flr_mb { + u32 aggint; + u32 opgen_addr; + struct fw_flr_ack ack; +}; + +/**** SUPPORT FOR SHMEM ARRRAYS *** + * The SHMEM HSI is aligned on 32 bit boundaries which makes it difficult to + * define arrays with storage types smaller then unsigned dwords. + * The macros below add generic support for SHMEM arrays with numeric elements + * that can span 2,4,8 or 16 bits. The array underlying type is a 32 bit dword + * array with individual bit-filed elements accessed using shifts and masks. + * + */ + +/* eb is the bitwidth of a single element */ +#define SHMEM_ARRAY_MASK(eb) ((1<<(eb))-1) +#define SHMEM_ARRAY_ENTRY(i, eb) ((i)/(32/(eb))) + +/* the bit-position macro allows the used to flip the order of the arrays + * elements on a per byte or word boundary. + * + * example: an array with 8 entries each 4 bit wide. This array will fit into + * a single dword. The diagrmas below show the array order of the nibbles. + * + * SHMEM_ARRAY_BITPOS(i, 4, 4) defines the stadard ordering: + * + * | | | | + * 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | + * | | | | + * + * SHMEM_ARRAY_BITPOS(i, 4, 8) defines a flip ordering per byte: + * + * | | | | + * 1 | 0 | 3 | 2 | 5 | 4 | 7 | 6 | + * | | | | + * + * SHMEM_ARRAY_BITPOS(i, 4, 16) defines a flip ordering per word: + * + * | | | | + * 3 | 2 | 1 | 0 | 7 | 6 | 5 | 4 | + * | | | | + */ +#define SHMEM_ARRAY_BITPOS(i, eb, fb) \ + ((((32/(fb)) - 1 - ((i)/((fb)/(eb))) % (32/(fb))) * (fb)) + \ + (((i)%((fb)/(eb))) * (eb))) + +#define SHMEM_ARRAY_GET(a, i, eb, fb) \ + ((a[SHMEM_ARRAY_ENTRY(i, eb)] >> SHMEM_ARRAY_BITPOS(i, eb, fb)) & \ + SHMEM_ARRAY_MASK(eb)) + +#define SHMEM_ARRAY_SET(a, i, eb, fb, val) \ +do { \ + a[SHMEM_ARRAY_ENTRY(i, eb)] &= ~(SHMEM_ARRAY_MASK(eb) << \ + SHMEM_ARRAY_BITPOS(i, eb, fb)); \ + a[SHMEM_ARRAY_ENTRY(i, eb)] |= (((val) & SHMEM_ARRAY_MASK(eb)) << \ + SHMEM_ARRAY_BITPOS(i, eb, fb)); \ +} while (0) + + +/****START OF DCBX STRUCTURES DECLARATIONS****/ +#define DCBX_MAX_NUM_PRI_PG_ENTRIES 8 +#define DCBX_PRI_PG_BITWIDTH 4 +#define DCBX_PRI_PG_FBITS 8 +#define DCBX_PRI_PG_GET(a, i) \ + SHMEM_ARRAY_GET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS) +#define DCBX_PRI_PG_SET(a, i, val) \ + SHMEM_ARRAY_SET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS, val) +#define DCBX_MAX_NUM_PG_BW_ENTRIES 8 +#define DCBX_BW_PG_BITWIDTH 8 +#define DCBX_PG_BW_GET(a, i) \ + SHMEM_ARRAY_GET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH) +#define DCBX_PG_BW_SET(a, i, val) \ + SHMEM_ARRAY_SET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH, val) +#define DCBX_STRICT_PRI_PG 15 +#define DCBX_MAX_APP_PROTOCOL 16 +#define FCOE_APP_IDX 0 +#define ISCSI_APP_IDX 1 +#define PREDEFINED_APP_IDX_MAX 2 + + +/* Big/Little endian have the same representation. */ +struct dcbx_ets_feature { + /* + * For Admin MIB - is this feature supported by the + * driver | For Local MIB - should this feature be enabled. + */ + u32 enabled; + u32 pg_bw_tbl[2]; + u32 pri_pg_tbl[1]; +}; + +/* Driver structure in LE */ +struct dcbx_pfc_feature { +#ifdef __BIG_ENDIAN + u8 pri_en_bitmap; + #define DCBX_PFC_PRI_0 0x01 + #define DCBX_PFC_PRI_1 0x02 + #define DCBX_PFC_PRI_2 0x04 + #define DCBX_PFC_PRI_3 0x08 + #define DCBX_PFC_PRI_4 0x10 + #define DCBX_PFC_PRI_5 0x20 + #define DCBX_PFC_PRI_6 0x40 + #define DCBX_PFC_PRI_7 0x80 + u8 pfc_caps; + u8 reserved; + u8 enabled; +#elif defined(__LITTLE_ENDIAN) + u8 enabled; + u8 reserved; + u8 pfc_caps; + u8 pri_en_bitmap; + #define DCBX_PFC_PRI_0 0x01 + #define DCBX_PFC_PRI_1 0x02 + #define DCBX_PFC_PRI_2 0x04 + #define DCBX_PFC_PRI_3 0x08 + #define DCBX_PFC_PRI_4 0x10 + #define DCBX_PFC_PRI_5 0x20 + #define DCBX_PFC_PRI_6 0x40 + #define DCBX_PFC_PRI_7 0x80 +#endif +}; + +struct dcbx_app_priority_entry { +#ifdef __BIG_ENDIAN + u16 app_id; + u8 pri_bitmap; + u8 appBitfield; + #define DCBX_APP_ENTRY_VALID 0x01 + #define DCBX_APP_ENTRY_SF_MASK 0x30 + #define DCBX_APP_ENTRY_SF_SHIFT 4 + #define DCBX_APP_SF_ETH_TYPE 0x10 + #define DCBX_APP_SF_PORT 0x20 +#elif defined(__LITTLE_ENDIAN) + u8 appBitfield; + #define DCBX_APP_ENTRY_VALID 0x01 + #define DCBX_APP_ENTRY_SF_MASK 0x30 + #define DCBX_APP_ENTRY_SF_SHIFT 4 + #define DCBX_APP_SF_ETH_TYPE 0x10 + #define DCBX_APP_SF_PORT 0x20 + u8 pri_bitmap; + u16 app_id; +#endif +}; + + +/* FW structure in BE */ +struct dcbx_app_priority_feature { +#ifdef __BIG_ENDIAN + u8 reserved; + u8 default_pri; + u8 tc_supported; + u8 enabled; +#elif defined(__LITTLE_ENDIAN) + u8 enabled; + u8 tc_supported; + u8 default_pri; + u8 reserved; +#endif + struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL]; +}; + +/* FW structure in BE */ +struct dcbx_features { + /* PG feature */ + struct dcbx_ets_feature ets; + /* PFC feature */ + struct dcbx_pfc_feature pfc; + /* APP feature */ + struct dcbx_app_priority_feature app; +}; + +/* LLDP protocol parameters */ +/* FW structure in BE */ +struct lldp_params { +#ifdef __BIG_ENDIAN + u8 msg_fast_tx_interval; + u8 msg_tx_hold; + u8 msg_tx_interval; + u8 admin_status; + #define LLDP_TX_ONLY 0x01 + #define LLDP_RX_ONLY 0x02 + #define LLDP_TX_RX 0x03 + #define LLDP_DISABLED 0x04 + u8 reserved1; + u8 tx_fast; + u8 tx_crd_max; + u8 tx_crd; +#elif defined(__LITTLE_ENDIAN) + u8 admin_status; + #define LLDP_TX_ONLY 0x01 + #define LLDP_RX_ONLY 0x02 + #define LLDP_TX_RX 0x03 + #define LLDP_DISABLED 0x04 + u8 msg_tx_interval; + u8 msg_tx_hold; + u8 msg_fast_tx_interval; + u8 tx_crd; + u8 tx_crd_max; + u8 tx_fast; + u8 reserved1; +#endif + #define REM_CHASSIS_ID_STAT_LEN 4 + #define REM_PORT_ID_STAT_LEN 4 + /* Holds remote Chassis ID TLV header, subtype and 9B of payload. */ + u32 peer_chassis_id[REM_CHASSIS_ID_STAT_LEN]; + /* Holds remote Port ID TLV header, subtype and 9B of payload. */ + u32 peer_port_id[REM_PORT_ID_STAT_LEN]; +}; + +struct lldp_dcbx_stat { + #define LOCAL_CHASSIS_ID_STAT_LEN 2 + #define LOCAL_PORT_ID_STAT_LEN 2 + /* Holds local Chassis ID 8B payload of constant subtype 4. */ + u32 local_chassis_id[LOCAL_CHASSIS_ID_STAT_LEN]; + /* Holds local Port ID 8B payload of constant subtype 3. */ + u32 local_port_id[LOCAL_PORT_ID_STAT_LEN]; + /* Number of DCBX frames transmitted. */ + u32 num_tx_dcbx_pkts; + /* Number of DCBX frames received. */ + u32 num_rx_dcbx_pkts; +}; + +/* ADMIN MIB - DCBX local machine default configuration. */ +struct lldp_admin_mib { + u32 ver_cfg_flags; + #define DCBX_ETS_CONFIG_TX_ENABLED 0x00000001 + #define DCBX_PFC_CONFIG_TX_ENABLED 0x00000002 + #define DCBX_APP_CONFIG_TX_ENABLED 0x00000004 + #define DCBX_ETS_RECO_TX_ENABLED 0x00000008 + #define DCBX_ETS_RECO_VALID 0x00000010 + #define DCBX_ETS_WILLING 0x00000020 + #define DCBX_PFC_WILLING 0x00000040 + #define DCBX_APP_WILLING 0x00000080 + #define DCBX_VERSION_CEE 0x00000100 + #define DCBX_VERSION_IEEE 0x00000200 + #define DCBX_DCBX_ENABLED 0x00000400 + #define DCBX_CEE_VERSION_MASK 0x0000f000 + #define DCBX_CEE_VERSION_SHIFT 12 + #define DCBX_CEE_MAX_VERSION_MASK 0x000f0000 + #define DCBX_CEE_MAX_VERSION_SHIFT 16 + struct dcbx_features features; +}; + +/* REMOTE MIB - remote machine DCBX configuration. */ +struct lldp_remote_mib { + u32 prefix_seq_num; + u32 flags; + #define DCBX_ETS_TLV_RX 0x00000001 + #define DCBX_PFC_TLV_RX 0x00000002 + #define DCBX_APP_TLV_RX 0x00000004 + #define DCBX_ETS_RX_ERROR 0x00000010 + #define DCBX_PFC_RX_ERROR 0x00000020 + #define DCBX_APP_RX_ERROR 0x00000040 + #define DCBX_ETS_REM_WILLING 0x00000100 + #define DCBX_PFC_REM_WILLING 0x00000200 + #define DCBX_APP_REM_WILLING 0x00000400 + #define DCBX_REMOTE_ETS_RECO_VALID 0x00001000 + #define DCBX_REMOTE_MIB_VALID 0x00002000 + struct dcbx_features features; + u32 suffix_seq_num; +}; + +/* LOCAL MIB - operational DCBX configuration - transmitted on Tx LLDPDU. */ +struct lldp_local_mib { + u32 prefix_seq_num; + /* Indicates if there is mismatch with negotiation results. */ + u32 error; + #define DCBX_LOCAL_ETS_ERROR 0x00000001 + #define DCBX_LOCAL_PFC_ERROR 0x00000002 + #define DCBX_LOCAL_APP_ERROR 0x00000004 + #define DCBX_LOCAL_PFC_MISMATCH 0x00000010 + #define DCBX_LOCAL_APP_MISMATCH 0x00000020 + #define DCBX_REMOTE_MIB_ERROR 0x00000040 + struct dcbx_features features; + u32 suffix_seq_num; +}; +/***END OF DCBX STRUCTURES DECLARATIONS***/ + +struct ncsi_oem_fcoe_features { + u32 fcoe_features1; + #define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK 0x0000FFFF + #define FCOE_FEATURES1_IOS_PER_CONNECTION_OFFSET 0 + + #define FCOE_FEATURES1_LOGINS_PER_PORT_MASK 0xFFFF0000 + #define FCOE_FEATURES1_LOGINS_PER_PORT_OFFSET 16 + + u32 fcoe_features2; + #define FCOE_FEATURES2_EXCHANGES_MASK 0x0000FFFF + #define FCOE_FEATURES2_EXCHANGES_OFFSET 0 + + #define FCOE_FEATURES2_NPIV_WWN_PER_PORT_MASK 0xFFFF0000 + #define FCOE_FEATURES2_NPIV_WWN_PER_PORT_OFFSET 16 + + u32 fcoe_features3; + #define FCOE_FEATURES3_TARGETS_SUPPORTED_MASK 0x0000FFFF + #define FCOE_FEATURES3_TARGETS_SUPPORTED_OFFSET 0 + + #define FCOE_FEATURES3_OUTSTANDING_COMMANDS_MASK 0xFFFF0000 + #define FCOE_FEATURES3_OUTSTANDING_COMMANDS_OFFSET 16 + + u32 fcoe_features4; + #define FCOE_FEATURES4_FEATURE_SETTINGS_MASK 0x0000000F + #define FCOE_FEATURES4_FEATURE_SETTINGS_OFFSET 0 +}; + +struct ncsi_oem_data { + u32 driver_version[4]; + struct ncsi_oem_fcoe_features ncsi_oem_fcoe_features; +}; + +struct shmem2_region { + + u32 size; /* 0x0000 */ + + u32 dcc_support; /* 0x0004 */ + #define SHMEM_DCC_SUPPORT_NONE 0x00000000 + #define SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV 0x00000001 + #define SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV 0x00000004 + #define SHMEM_DCC_SUPPORT_CHANGE_MAC_ADDRESS_TLV 0x00000008 + #define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV 0x00000040 + #define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV 0x00000080 + + u32 ext_phy_fw_version2[PORT_MAX]; /* 0x0008 */ + /* + * For backwards compatibility, if the mf_cfg_addr does not exist + * (the size filed is smaller than 0xc) the mf_cfg resides at the + * end of struct shmem_region + */ + u32 mf_cfg_addr; /* 0x0010 */ + #define SHMEM_MF_CFG_ADDR_NONE 0x00000000 + + struct fw_flr_mb flr_mb; /* 0x0014 */ + u32 dcbx_lldp_params_offset; /* 0x0028 */ + #define SHMEM_LLDP_DCBX_PARAMS_NONE 0x00000000 + u32 dcbx_neg_res_offset; /* 0x002c */ + #define SHMEM_DCBX_NEG_RES_NONE 0x00000000 + u32 dcbx_remote_mib_offset; /* 0x0030 */ + #define SHMEM_DCBX_REMOTE_MIB_NONE 0x00000000 + /* + * The other shmemX_base_addr holds the other path's shmem address + * required for example in case of common phy init, or for path1 to know + * the address of mcp debug trace which is located in offset from shmem + * of path0 + */ + u32 other_shmem_base_addr; /* 0x0034 */ + u32 other_shmem2_base_addr; /* 0x0038 */ + /* + * mcp_vf_disabled is set by the MCP to indicate the driver about VFs + * which were disabled/flred + */ + u32 mcp_vf_disabled[E2_VF_MAX / 32]; /* 0x003c */ + + /* + * drv_ack_vf_disabled is set by the PF driver to ack handled disabled + * VFs + */ + u32 drv_ack_vf_disabled[E2_FUNC_MAX][E2_VF_MAX / 32]; /* 0x0044 */ + + u32 dcbx_lldp_dcbx_stat_offset; /* 0x0064 */ + #define SHMEM_LLDP_DCBX_STAT_NONE 0x00000000 + + /* + * edebug_driver_if field is used to transfer messages between edebug + * app to the driver through shmem2. + * + * message format: + * bits 0-2 - function number / instance of driver to perform request + * bits 3-5 - op code / is_ack? + * bits 6-63 - data + */ + u32 edebug_driver_if[2]; /* 0x0068 */ + #define EDEBUG_DRIVER_IF_OP_CODE_GET_PHYS_ADDR 1 + #define EDEBUG_DRIVER_IF_OP_CODE_GET_BUS_ADDR 2 + #define EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT 3 + + u32 nvm_retain_bitmap_addr; /* 0x0070 */ + + u32 reserved1; /* 0x0074 */ + + u32 reserved2[E2_FUNC_MAX]; + + u32 reserved3[E2_FUNC_MAX];/* 0x0088 */ + u32 reserved4[E2_FUNC_MAX];/* 0x0098 */ + + u32 swim_base_addr; /* 0x0108 */ + u32 swim_funcs; + u32 swim_main_cb; + + u32 reserved5[2]; + + /* generic flags controlled by the driver */ + u32 drv_flags; + #define DRV_FLAGS_DCB_CONFIGURED 0x1 + + /* pointer to extended dev_info shared data copied from nvm image */ + u32 extended_dev_info_shared_addr; + u32 ncsi_oem_data_addr; + + u32 ocsd_host_addr; + u32 ocbb_host_addr; + u32 ocsd_req_update_interval; +}; + + +struct emac_stats { + u32 rx_stat_ifhcinoctets; + u32 rx_stat_ifhcinbadoctets; + u32 rx_stat_etherstatsfragments; + u32 rx_stat_ifhcinucastpkts; + u32 rx_stat_ifhcinmulticastpkts; + u32 rx_stat_ifhcinbroadcastpkts; + u32 rx_stat_dot3statsfcserrors; + u32 rx_stat_dot3statsalignmenterrors; + u32 rx_stat_dot3statscarriersenseerrors; + u32 rx_stat_xonpauseframesreceived; + u32 rx_stat_xoffpauseframesreceived; + u32 rx_stat_maccontrolframesreceived; + u32 rx_stat_xoffstateentered; + u32 rx_stat_dot3statsframestoolong; + u32 rx_stat_etherstatsjabbers; + u32 rx_stat_etherstatsundersizepkts; + u32 rx_stat_etherstatspkts64octets; + u32 rx_stat_etherstatspkts65octetsto127octets; + u32 rx_stat_etherstatspkts128octetsto255octets; + u32 rx_stat_etherstatspkts256octetsto511octets; + u32 rx_stat_etherstatspkts512octetsto1023octets; + u32 rx_stat_etherstatspkts1024octetsto1522octets; + u32 rx_stat_etherstatspktsover1522octets; + + u32 rx_stat_falsecarriererrors; + + u32 tx_stat_ifhcoutoctets; + u32 tx_stat_ifhcoutbadoctets; + u32 tx_stat_etherstatscollisions; + u32 tx_stat_outxonsent; + u32 tx_stat_outxoffsent; + u32 tx_stat_flowcontroldone; + u32 tx_stat_dot3statssinglecollisionframes; + u32 tx_stat_dot3statsmultiplecollisionframes; + u32 tx_stat_dot3statsdeferredtransmissions; + u32 tx_stat_dot3statsexcessivecollisions; + u32 tx_stat_dot3statslatecollisions; + u32 tx_stat_ifhcoutucastpkts; + u32 tx_stat_ifhcoutmulticastpkts; + u32 tx_stat_ifhcoutbroadcastpkts; + u32 tx_stat_etherstatspkts64octets; + u32 tx_stat_etherstatspkts65octetsto127octets; + u32 tx_stat_etherstatspkts128octetsto255octets; + u32 tx_stat_etherstatspkts256octetsto511octets; + u32 tx_stat_etherstatspkts512octetsto1023octets; + u32 tx_stat_etherstatspkts1024octetsto1522octets; + u32 tx_stat_etherstatspktsover1522octets; + u32 tx_stat_dot3statsinternalmactransmiterrors; +}; + + +struct bmac1_stats { + u32 tx_stat_gtpkt_lo; + u32 tx_stat_gtpkt_hi; + u32 tx_stat_gtxpf_lo; + u32 tx_stat_gtxpf_hi; + u32 tx_stat_gtfcs_lo; + u32 tx_stat_gtfcs_hi; + u32 tx_stat_gtmca_lo; + u32 tx_stat_gtmca_hi; + u32 tx_stat_gtbca_lo; + u32 tx_stat_gtbca_hi; + u32 tx_stat_gtfrg_lo; + u32 tx_stat_gtfrg_hi; + u32 tx_stat_gtovr_lo; + u32 tx_stat_gtovr_hi; + u32 tx_stat_gt64_lo; + u32 tx_stat_gt64_hi; + u32 tx_stat_gt127_lo; + u32 tx_stat_gt127_hi; + u32 tx_stat_gt255_lo; + u32 tx_stat_gt255_hi; + u32 tx_stat_gt511_lo; + u32 tx_stat_gt511_hi; + u32 tx_stat_gt1023_lo; + u32 tx_stat_gt1023_hi; + u32 tx_stat_gt1518_lo; + u32 tx_stat_gt1518_hi; + u32 tx_stat_gt2047_lo; + u32 tx_stat_gt2047_hi; + u32 tx_stat_gt4095_lo; + u32 tx_stat_gt4095_hi; + u32 tx_stat_gt9216_lo; + u32 tx_stat_gt9216_hi; + u32 tx_stat_gt16383_lo; + u32 tx_stat_gt16383_hi; + u32 tx_stat_gtmax_lo; + u32 tx_stat_gtmax_hi; + u32 tx_stat_gtufl_lo; + u32 tx_stat_gtufl_hi; + u32 tx_stat_gterr_lo; + u32 tx_stat_gterr_hi; + u32 tx_stat_gtbyt_lo; + u32 tx_stat_gtbyt_hi; + + u32 rx_stat_gr64_lo; + u32 rx_stat_gr64_hi; + u32 rx_stat_gr127_lo; + u32 rx_stat_gr127_hi; + u32 rx_stat_gr255_lo; + u32 rx_stat_gr255_hi; + u32 rx_stat_gr511_lo; + u32 rx_stat_gr511_hi; + u32 rx_stat_gr1023_lo; + u32 rx_stat_gr1023_hi; + u32 rx_stat_gr1518_lo; + u32 rx_stat_gr1518_hi; + u32 rx_stat_gr2047_lo; + u32 rx_stat_gr2047_hi; + u32 rx_stat_gr4095_lo; + u32 rx_stat_gr4095_hi; + u32 rx_stat_gr9216_lo; + u32 rx_stat_gr9216_hi; + u32 rx_stat_gr16383_lo; + u32 rx_stat_gr16383_hi; + u32 rx_stat_grmax_lo; + u32 rx_stat_grmax_hi; + u32 rx_stat_grpkt_lo; + u32 rx_stat_grpkt_hi; + u32 rx_stat_grfcs_lo; + u32 rx_stat_grfcs_hi; + u32 rx_stat_grmca_lo; + u32 rx_stat_grmca_hi; + u32 rx_stat_grbca_lo; + u32 rx_stat_grbca_hi; + u32 rx_stat_grxcf_lo; + u32 rx_stat_grxcf_hi; + u32 rx_stat_grxpf_lo; + u32 rx_stat_grxpf_hi; + u32 rx_stat_grxuo_lo; + u32 rx_stat_grxuo_hi; + u32 rx_stat_grjbr_lo; + u32 rx_stat_grjbr_hi; + u32 rx_stat_grovr_lo; + u32 rx_stat_grovr_hi; + u32 rx_stat_grflr_lo; + u32 rx_stat_grflr_hi; + u32 rx_stat_grmeg_lo; + u32 rx_stat_grmeg_hi; + u32 rx_stat_grmeb_lo; + u32 rx_stat_grmeb_hi; + u32 rx_stat_grbyt_lo; + u32 rx_stat_grbyt_hi; + u32 rx_stat_grund_lo; + u32 rx_stat_grund_hi; + u32 rx_stat_grfrg_lo; + u32 rx_stat_grfrg_hi; + u32 rx_stat_grerb_lo; + u32 rx_stat_grerb_hi; + u32 rx_stat_grfre_lo; + u32 rx_stat_grfre_hi; + u32 rx_stat_gripj_lo; + u32 rx_stat_gripj_hi; +}; + +struct bmac2_stats { + u32 tx_stat_gtpk_lo; /* gtpok */ + u32 tx_stat_gtpk_hi; /* gtpok */ + u32 tx_stat_gtxpf_lo; /* gtpf */ + u32 tx_stat_gtxpf_hi; /* gtpf */ + u32 tx_stat_gtpp_lo; /* NEW BMAC2 */ + u32 tx_stat_gtpp_hi; /* NEW BMAC2 */ + u32 tx_stat_gtfcs_lo; + u32 tx_stat_gtfcs_hi; + u32 tx_stat_gtuca_lo; /* NEW BMAC2 */ + u32 tx_stat_gtuca_hi; /* NEW BMAC2 */ + u32 tx_stat_gtmca_lo; + u32 tx_stat_gtmca_hi; + u32 tx_stat_gtbca_lo; + u32 tx_stat_gtbca_hi; + u32 tx_stat_gtovr_lo; + u32 tx_stat_gtovr_hi; + u32 tx_stat_gtfrg_lo; + u32 tx_stat_gtfrg_hi; + u32 tx_stat_gtpkt1_lo; /* gtpkt */ + u32 tx_stat_gtpkt1_hi; /* gtpkt */ + u32 tx_stat_gt64_lo; + u32 tx_stat_gt64_hi; + u32 tx_stat_gt127_lo; + u32 tx_stat_gt127_hi; + u32 tx_stat_gt255_lo; + u32 tx_stat_gt255_hi; + u32 tx_stat_gt511_lo; + u32 tx_stat_gt511_hi; + u32 tx_stat_gt1023_lo; + u32 tx_stat_gt1023_hi; + u32 tx_stat_gt1518_lo; + u32 tx_stat_gt1518_hi; + u32 tx_stat_gt2047_lo; + u32 tx_stat_gt2047_hi; + u32 tx_stat_gt4095_lo; + u32 tx_stat_gt4095_hi; + u32 tx_stat_gt9216_lo; + u32 tx_stat_gt9216_hi; + u32 tx_stat_gt16383_lo; + u32 tx_stat_gt16383_hi; + u32 tx_stat_gtmax_lo; + u32 tx_stat_gtmax_hi; + u32 tx_stat_gtufl_lo; + u32 tx_stat_gtufl_hi; + u32 tx_stat_gterr_lo; + u32 tx_stat_gterr_hi; + u32 tx_stat_gtbyt_lo; + u32 tx_stat_gtbyt_hi; + + u32 rx_stat_gr64_lo; + u32 rx_stat_gr64_hi; + u32 rx_stat_gr127_lo; + u32 rx_stat_gr127_hi; + u32 rx_stat_gr255_lo; + u32 rx_stat_gr255_hi; + u32 rx_stat_gr511_lo; + u32 rx_stat_gr511_hi; + u32 rx_stat_gr1023_lo; + u32 rx_stat_gr1023_hi; + u32 rx_stat_gr1518_lo; + u32 rx_stat_gr1518_hi; + u32 rx_stat_gr2047_lo; + u32 rx_stat_gr2047_hi; + u32 rx_stat_gr4095_lo; + u32 rx_stat_gr4095_hi; + u32 rx_stat_gr9216_lo; + u32 rx_stat_gr9216_hi; + u32 rx_stat_gr16383_lo; + u32 rx_stat_gr16383_hi; + u32 rx_stat_grmax_lo; + u32 rx_stat_grmax_hi; + u32 rx_stat_grpkt_lo; + u32 rx_stat_grpkt_hi; + u32 rx_stat_grfcs_lo; + u32 rx_stat_grfcs_hi; + u32 rx_stat_gruca_lo; + u32 rx_stat_gruca_hi; + u32 rx_stat_grmca_lo; + u32 rx_stat_grmca_hi; + u32 rx_stat_grbca_lo; + u32 rx_stat_grbca_hi; + u32 rx_stat_grxpf_lo; /* grpf */ + u32 rx_stat_grxpf_hi; /* grpf */ + u32 rx_stat_grpp_lo; + u32 rx_stat_grpp_hi; + u32 rx_stat_grxuo_lo; /* gruo */ + u32 rx_stat_grxuo_hi; /* gruo */ + u32 rx_stat_grjbr_lo; + u32 rx_stat_grjbr_hi; + u32 rx_stat_grovr_lo; + u32 rx_stat_grovr_hi; + u32 rx_stat_grxcf_lo; /* grcf */ + u32 rx_stat_grxcf_hi; /* grcf */ + u32 rx_stat_grflr_lo; + u32 rx_stat_grflr_hi; + u32 rx_stat_grpok_lo; + u32 rx_stat_grpok_hi; + u32 rx_stat_grmeg_lo; + u32 rx_stat_grmeg_hi; + u32 rx_stat_grmeb_lo; + u32 rx_stat_grmeb_hi; + u32 rx_stat_grbyt_lo; + u32 rx_stat_grbyt_hi; + u32 rx_stat_grund_lo; + u32 rx_stat_grund_hi; + u32 rx_stat_grfrg_lo; + u32 rx_stat_grfrg_hi; + u32 rx_stat_grerb_lo; /* grerrbyt */ + u32 rx_stat_grerb_hi; /* grerrbyt */ + u32 rx_stat_grfre_lo; /* grfrerr */ + u32 rx_stat_grfre_hi; /* grfrerr */ + u32 rx_stat_gripj_lo; + u32 rx_stat_gripj_hi; +}; + +struct mstat_stats { + struct { + /* OTE MSTAT on E3 has a bug where this register's contents are + * actually tx_gtxpok + tx_gtxpf + (possibly)tx_gtxpp + */ + u32 tx_gtxpok_lo; + u32 tx_gtxpok_hi; + u32 tx_gtxpf_lo; + u32 tx_gtxpf_hi; + u32 tx_gtxpp_lo; + u32 tx_gtxpp_hi; + u32 tx_gtfcs_lo; + u32 tx_gtfcs_hi; + u32 tx_gtuca_lo; + u32 tx_gtuca_hi; + u32 tx_gtmca_lo; + u32 tx_gtmca_hi; + u32 tx_gtgca_lo; + u32 tx_gtgca_hi; + u32 tx_gtpkt_lo; + u32 tx_gtpkt_hi; + u32 tx_gt64_lo; + u32 tx_gt64_hi; + u32 tx_gt127_lo; + u32 tx_gt127_hi; + u32 tx_gt255_lo; + u32 tx_gt255_hi; + u32 tx_gt511_lo; + u32 tx_gt511_hi; + u32 tx_gt1023_lo; + u32 tx_gt1023_hi; + u32 tx_gt1518_lo; + u32 tx_gt1518_hi; + u32 tx_gt2047_lo; + u32 tx_gt2047_hi; + u32 tx_gt4095_lo; + u32 tx_gt4095_hi; + u32 tx_gt9216_lo; + u32 tx_gt9216_hi; + u32 tx_gt16383_lo; + u32 tx_gt16383_hi; + u32 tx_gtufl_lo; + u32 tx_gtufl_hi; + u32 tx_gterr_lo; + u32 tx_gterr_hi; + u32 tx_gtbyt_lo; + u32 tx_gtbyt_hi; + u32 tx_collisions_lo; + u32 tx_collisions_hi; + u32 tx_singlecollision_lo; + u32 tx_singlecollision_hi; + u32 tx_multiplecollisions_lo; + u32 tx_multiplecollisions_hi; + u32 tx_deferred_lo; + u32 tx_deferred_hi; + u32 tx_excessivecollisions_lo; + u32 tx_excessivecollisions_hi; + u32 tx_latecollisions_lo; + u32 tx_latecollisions_hi; + } stats_tx; + + struct { + u32 rx_gr64_lo; + u32 rx_gr64_hi; + u32 rx_gr127_lo; + u32 rx_gr127_hi; + u32 rx_gr255_lo; + u32 rx_gr255_hi; + u32 rx_gr511_lo; + u32 rx_gr511_hi; + u32 rx_gr1023_lo; + u32 rx_gr1023_hi; + u32 rx_gr1518_lo; + u32 rx_gr1518_hi; + u32 rx_gr2047_lo; + u32 rx_gr2047_hi; + u32 rx_gr4095_lo; + u32 rx_gr4095_hi; + u32 rx_gr9216_lo; + u32 rx_gr9216_hi; + u32 rx_gr16383_lo; + u32 rx_gr16383_hi; + u32 rx_grpkt_lo; + u32 rx_grpkt_hi; + u32 rx_grfcs_lo; + u32 rx_grfcs_hi; + u32 rx_gruca_lo; + u32 rx_gruca_hi; + u32 rx_grmca_lo; + u32 rx_grmca_hi; + u32 rx_grbca_lo; + u32 rx_grbca_hi; + u32 rx_grxpf_lo; + u32 rx_grxpf_hi; + u32 rx_grxpp_lo; + u32 rx_grxpp_hi; + u32 rx_grxuo_lo; + u32 rx_grxuo_hi; + u32 rx_grovr_lo; + u32 rx_grovr_hi; + u32 rx_grxcf_lo; + u32 rx_grxcf_hi; + u32 rx_grflr_lo; + u32 rx_grflr_hi; + u32 rx_grpok_lo; + u32 rx_grpok_hi; + u32 rx_grbyt_lo; + u32 rx_grbyt_hi; + u32 rx_grund_lo; + u32 rx_grund_hi; + u32 rx_grfrg_lo; + u32 rx_grfrg_hi; + u32 rx_grerb_lo; + u32 rx_grerb_hi; + u32 rx_grfre_lo; + u32 rx_grfre_hi; + + u32 rx_alignmenterrors_lo; + u32 rx_alignmenterrors_hi; + u32 rx_falsecarrier_lo; + u32 rx_falsecarrier_hi; + u32 rx_llfcmsgcnt_lo; + u32 rx_llfcmsgcnt_hi; + } stats_rx; +}; + +union mac_stats { + struct emac_stats emac_stats; + struct bmac1_stats bmac1_stats; + struct bmac2_stats bmac2_stats; + struct mstat_stats mstat_stats; +}; + + +struct mac_stx { + /* in_bad_octets */ + u32 rx_stat_ifhcinbadoctets_hi; + u32 rx_stat_ifhcinbadoctets_lo; + + /* out_bad_octets */ + u32 tx_stat_ifhcoutbadoctets_hi; + u32 tx_stat_ifhcoutbadoctets_lo; + + /* crc_receive_errors */ + u32 rx_stat_dot3statsfcserrors_hi; + u32 rx_stat_dot3statsfcserrors_lo; + /* alignment_errors */ + u32 rx_stat_dot3statsalignmenterrors_hi; + u32 rx_stat_dot3statsalignmenterrors_lo; + /* carrier_sense_errors */ + u32 rx_stat_dot3statscarriersenseerrors_hi; + u32 rx_stat_dot3statscarriersenseerrors_lo; + /* false_carrier_detections */ + u32 rx_stat_falsecarriererrors_hi; + u32 rx_stat_falsecarriererrors_lo; + + /* runt_packets_received */ + u32 rx_stat_etherstatsundersizepkts_hi; + u32 rx_stat_etherstatsundersizepkts_lo; + /* jabber_packets_received */ + u32 rx_stat_dot3statsframestoolong_hi; + u32 rx_stat_dot3statsframestoolong_lo; + + /* error_runt_packets_received */ + u32 rx_stat_etherstatsfragments_hi; + u32 rx_stat_etherstatsfragments_lo; + /* error_jabber_packets_received */ + u32 rx_stat_etherstatsjabbers_hi; + u32 rx_stat_etherstatsjabbers_lo; + + /* control_frames_received */ + u32 rx_stat_maccontrolframesreceived_hi; + u32 rx_stat_maccontrolframesreceived_lo; + u32 rx_stat_mac_xpf_hi; + u32 rx_stat_mac_xpf_lo; + u32 rx_stat_mac_xcf_hi; + u32 rx_stat_mac_xcf_lo; + + /* xoff_state_entered */ + u32 rx_stat_xoffstateentered_hi; + u32 rx_stat_xoffstateentered_lo; + /* pause_xon_frames_received */ + u32 rx_stat_xonpauseframesreceived_hi; + u32 rx_stat_xonpauseframesreceived_lo; + /* pause_xoff_frames_received */ + u32 rx_stat_xoffpauseframesreceived_hi; + u32 rx_stat_xoffpauseframesreceived_lo; + /* pause_xon_frames_transmitted */ + u32 tx_stat_outxonsent_hi; + u32 tx_stat_outxonsent_lo; + /* pause_xoff_frames_transmitted */ + u32 tx_stat_outxoffsent_hi; + u32 tx_stat_outxoffsent_lo; + /* flow_control_done */ + u32 tx_stat_flowcontroldone_hi; + u32 tx_stat_flowcontroldone_lo; + + /* ether_stats_collisions */ + u32 tx_stat_etherstatscollisions_hi; + u32 tx_stat_etherstatscollisions_lo; + /* single_collision_transmit_frames */ + u32 tx_stat_dot3statssinglecollisionframes_hi; + u32 tx_stat_dot3statssinglecollisionframes_lo; + /* multiple_collision_transmit_frames */ + u32 tx_stat_dot3statsmultiplecollisionframes_hi; + u32 tx_stat_dot3statsmultiplecollisionframes_lo; + /* deferred_transmissions */ + u32 tx_stat_dot3statsdeferredtransmissions_hi; + u32 tx_stat_dot3statsdeferredtransmissions_lo; + /* excessive_collision_frames */ + u32 tx_stat_dot3statsexcessivecollisions_hi; + u32 tx_stat_dot3statsexcessivecollisions_lo; + /* late_collision_frames */ + u32 tx_stat_dot3statslatecollisions_hi; + u32 tx_stat_dot3statslatecollisions_lo; + + /* frames_transmitted_64_bytes */ + u32 tx_stat_etherstatspkts64octets_hi; + u32 tx_stat_etherstatspkts64octets_lo; + /* frames_transmitted_65_127_bytes */ + u32 tx_stat_etherstatspkts65octetsto127octets_hi; + u32 tx_stat_etherstatspkts65octetsto127octets_lo; + /* frames_transmitted_128_255_bytes */ + u32 tx_stat_etherstatspkts128octetsto255octets_hi; + u32 tx_stat_etherstatspkts128octetsto255octets_lo; + /* frames_transmitted_256_511_bytes */ + u32 tx_stat_etherstatspkts256octetsto511octets_hi; + u32 tx_stat_etherstatspkts256octetsto511octets_lo; + /* frames_transmitted_512_1023_bytes */ + u32 tx_stat_etherstatspkts512octetsto1023octets_hi; + u32 tx_stat_etherstatspkts512octetsto1023octets_lo; + /* frames_transmitted_1024_1522_bytes */ + u32 tx_stat_etherstatspkts1024octetsto1522octets_hi; + u32 tx_stat_etherstatspkts1024octetsto1522octets_lo; + /* frames_transmitted_1523_9022_bytes */ + u32 tx_stat_etherstatspktsover1522octets_hi; + u32 tx_stat_etherstatspktsover1522octets_lo; + u32 tx_stat_mac_2047_hi; + u32 tx_stat_mac_2047_lo; + u32 tx_stat_mac_4095_hi; + u32 tx_stat_mac_4095_lo; + u32 tx_stat_mac_9216_hi; + u32 tx_stat_mac_9216_lo; + u32 tx_stat_mac_16383_hi; + u32 tx_stat_mac_16383_lo; + + /* internal_mac_transmit_errors */ + u32 tx_stat_dot3statsinternalmactransmiterrors_hi; + u32 tx_stat_dot3statsinternalmactransmiterrors_lo; + + /* if_out_discards */ + u32 tx_stat_mac_ufl_hi; + u32 tx_stat_mac_ufl_lo; +}; + + +#define MAC_STX_IDX_MAX 2 + +struct host_port_stats { + u32 host_port_stats_start; + + struct mac_stx mac_stx[MAC_STX_IDX_MAX]; + + u32 brb_drop_hi; + u32 brb_drop_lo; + + u32 host_port_stats_end; +}; + + +struct host_func_stats { + u32 host_func_stats_start; + + u32 total_bytes_received_hi; + u32 total_bytes_received_lo; + + u32 total_bytes_transmitted_hi; + u32 total_bytes_transmitted_lo; + + u32 total_unicast_packets_received_hi; + u32 total_unicast_packets_received_lo; + + u32 total_multicast_packets_received_hi; + u32 total_multicast_packets_received_lo; + + u32 total_broadcast_packets_received_hi; + u32 total_broadcast_packets_received_lo; + + u32 total_unicast_packets_transmitted_hi; + u32 total_unicast_packets_transmitted_lo; + + u32 total_multicast_packets_transmitted_hi; + u32 total_multicast_packets_transmitted_lo; + + u32 total_broadcast_packets_transmitted_hi; + u32 total_broadcast_packets_transmitted_lo; + + u32 valid_bytes_received_hi; + u32 valid_bytes_received_lo; + + u32 host_func_stats_end; +}; + +/* VIC definitions */ +#define VICSTATST_UIF_INDEX 2 + +#define BCM_5710_FW_MAJOR_VERSION 7 +#define BCM_5710_FW_MINOR_VERSION 0 +#define BCM_5710_FW_REVISION_VERSION 23 +#define BCM_5710_FW_ENGINEERING_VERSION 0 +#define BCM_5710_FW_COMPILE_FLAGS 1 + + +/* + * attention bits + */ +struct atten_sp_status_block { + __le32 attn_bits; + __le32 attn_bits_ack; + u8 status_block_id; + u8 reserved0; + __le16 attn_bits_index; + __le32 reserved1; +}; + + +/* + * The eth aggregative context of Cstorm + */ +struct cstorm_eth_ag_context { + u32 __reserved0[10]; +}; + + +/* + * dmae command structure + */ +struct dmae_command { + u32 opcode; +#define DMAE_COMMAND_SRC (0x1<<0) +#define DMAE_COMMAND_SRC_SHIFT 0 +#define DMAE_COMMAND_DST (0x3<<1) +#define DMAE_COMMAND_DST_SHIFT 1 +#define DMAE_COMMAND_C_DST (0x1<<3) +#define DMAE_COMMAND_C_DST_SHIFT 3 +#define DMAE_COMMAND_C_TYPE_ENABLE (0x1<<4) +#define DMAE_COMMAND_C_TYPE_ENABLE_SHIFT 4 +#define DMAE_COMMAND_C_TYPE_CRC_ENABLE (0x1<<5) +#define DMAE_COMMAND_C_TYPE_CRC_ENABLE_SHIFT 5 +#define DMAE_COMMAND_C_TYPE_CRC_OFFSET (0x7<<6) +#define DMAE_COMMAND_C_TYPE_CRC_OFFSET_SHIFT 6 +#define DMAE_COMMAND_ENDIANITY (0x3<<9) +#define DMAE_COMMAND_ENDIANITY_SHIFT 9 +#define DMAE_COMMAND_PORT (0x1<<11) +#define DMAE_COMMAND_PORT_SHIFT 11 +#define DMAE_COMMAND_CRC_RESET (0x1<<12) +#define DMAE_COMMAND_CRC_RESET_SHIFT 12 +#define DMAE_COMMAND_SRC_RESET (0x1<<13) +#define DMAE_COMMAND_SRC_RESET_SHIFT 13 +#define DMAE_COMMAND_DST_RESET (0x1<<14) +#define DMAE_COMMAND_DST_RESET_SHIFT 14 +#define DMAE_COMMAND_E1HVN (0x3<<15) +#define DMAE_COMMAND_E1HVN_SHIFT 15 +#define DMAE_COMMAND_DST_VN (0x3<<17) +#define DMAE_COMMAND_DST_VN_SHIFT 17 +#define DMAE_COMMAND_C_FUNC (0x1<<19) +#define DMAE_COMMAND_C_FUNC_SHIFT 19 +#define DMAE_COMMAND_ERR_POLICY (0x3<<20) +#define DMAE_COMMAND_ERR_POLICY_SHIFT 20 +#define DMAE_COMMAND_RESERVED0 (0x3FF<<22) +#define DMAE_COMMAND_RESERVED0_SHIFT 22 + u32 src_addr_lo; + u32 src_addr_hi; + u32 dst_addr_lo; + u32 dst_addr_hi; +#if defined(__BIG_ENDIAN) + u16 opcode_iov; +#define DMAE_COMMAND_SRC_VFID (0x3F<<0) +#define DMAE_COMMAND_SRC_VFID_SHIFT 0 +#define DMAE_COMMAND_SRC_VFPF (0x1<<6) +#define DMAE_COMMAND_SRC_VFPF_SHIFT 6 +#define DMAE_COMMAND_RESERVED1 (0x1<<7) +#define DMAE_COMMAND_RESERVED1_SHIFT 7 +#define DMAE_COMMAND_DST_VFID (0x3F<<8) +#define DMAE_COMMAND_DST_VFID_SHIFT 8 +#define DMAE_COMMAND_DST_VFPF (0x1<<14) +#define DMAE_COMMAND_DST_VFPF_SHIFT 14 +#define DMAE_COMMAND_RESERVED2 (0x1<<15) +#define DMAE_COMMAND_RESERVED2_SHIFT 15 + u16 len; +#elif defined(__LITTLE_ENDIAN) + u16 len; + u16 opcode_iov; +#define DMAE_COMMAND_SRC_VFID (0x3F<<0) +#define DMAE_COMMAND_SRC_VFID_SHIFT 0 +#define DMAE_COMMAND_SRC_VFPF (0x1<<6) +#define DMAE_COMMAND_SRC_VFPF_SHIFT 6 +#define DMAE_COMMAND_RESERVED1 (0x1<<7) +#define DMAE_COMMAND_RESERVED1_SHIFT 7 +#define DMAE_COMMAND_DST_VFID (0x3F<<8) +#define DMAE_COMMAND_DST_VFID_SHIFT 8 +#define DMAE_COMMAND_DST_VFPF (0x1<<14) +#define DMAE_COMMAND_DST_VFPF_SHIFT 14 +#define DMAE_COMMAND_RESERVED2 (0x1<<15) +#define DMAE_COMMAND_RESERVED2_SHIFT 15 +#endif + u32 comp_addr_lo; + u32 comp_addr_hi; + u32 comp_val; + u32 crc32; + u32 crc32_c; +#if defined(__BIG_ENDIAN) + u16 crc16_c; + u16 crc16; +#elif defined(__LITTLE_ENDIAN) + u16 crc16; + u16 crc16_c; +#endif +#if defined(__BIG_ENDIAN) + u16 reserved3; + u16 crc_t10; +#elif defined(__LITTLE_ENDIAN) + u16 crc_t10; + u16 reserved3; +#endif +#if defined(__BIG_ENDIAN) + u16 xsum8; + u16 xsum16; +#elif defined(__LITTLE_ENDIAN) + u16 xsum16; + u16 xsum8; +#endif +}; + + +/* + * common data for all protocols + */ +struct doorbell_hdr { + u8 header; +#define DOORBELL_HDR_RX (0x1<<0) +#define DOORBELL_HDR_RX_SHIFT 0 +#define DOORBELL_HDR_DB_TYPE (0x1<<1) +#define DOORBELL_HDR_DB_TYPE_SHIFT 1 +#define DOORBELL_HDR_DPM_SIZE (0x3<<2) +#define DOORBELL_HDR_DPM_SIZE_SHIFT 2 +#define DOORBELL_HDR_CONN_TYPE (0xF<<4) +#define DOORBELL_HDR_CONN_TYPE_SHIFT 4 +}; + +/* + * Ethernet doorbell + */ +struct eth_tx_doorbell { +#if defined(__BIG_ENDIAN) + u16 npackets; + u8 params; +#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0) +#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0 +#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6) +#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6 +#define ETH_TX_DOORBELL_SPARE (0x1<<7) +#define ETH_TX_DOORBELL_SPARE_SHIFT 7 + struct doorbell_hdr hdr; +#elif defined(__LITTLE_ENDIAN) + struct doorbell_hdr hdr; + u8 params; +#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0) +#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0 +#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6) +#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6 +#define ETH_TX_DOORBELL_SPARE (0x1<<7) +#define ETH_TX_DOORBELL_SPARE_SHIFT 7 + u16 npackets; +#endif +}; + + +/* + * 3 lines. status block + */ +struct hc_status_block_e1x { + __le16 index_values[HC_SB_MAX_INDICES_E1X]; + __le16 running_index[HC_SB_MAX_SM]; + __le32 rsrv[11]; +}; + +/* + * host status block + */ +struct host_hc_status_block_e1x { + struct hc_status_block_e1x sb; +}; + + +/* + * 3 lines. status block + */ +struct hc_status_block_e2 { + __le16 index_values[HC_SB_MAX_INDICES_E2]; + __le16 running_index[HC_SB_MAX_SM]; + __le32 reserved[11]; +}; + +/* + * host status block + */ +struct host_hc_status_block_e2 { + struct hc_status_block_e2 sb; +}; + + +/* + * 5 lines. slow-path status block + */ +struct hc_sp_status_block { + __le16 index_values[HC_SP_SB_MAX_INDICES]; + __le16 running_index; + __le16 rsrv; + u32 rsrv1; +}; + +/* + * host status block + */ +struct host_sp_status_block { + struct atten_sp_status_block atten_status_block; + struct hc_sp_status_block sp_sb; +}; + + +/* + * IGU driver acknowledgment register + */ +struct igu_ack_register { +#if defined(__BIG_ENDIAN) + u16 sb_id_and_flags; +#define IGU_ACK_REGISTER_STATUS_BLOCK_ID (0x1F<<0) +#define IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT 0 +#define IGU_ACK_REGISTER_STORM_ID (0x7<<5) +#define IGU_ACK_REGISTER_STORM_ID_SHIFT 5 +#define IGU_ACK_REGISTER_UPDATE_INDEX (0x1<<8) +#define IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT 8 +#define IGU_ACK_REGISTER_INTERRUPT_MODE (0x3<<9) +#define IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT 9 +#define IGU_ACK_REGISTER_RESERVED (0x1F<<11) +#define IGU_ACK_REGISTER_RESERVED_SHIFT 11 + u16 status_block_index; +#elif defined(__LITTLE_ENDIAN) + u16 status_block_index; + u16 sb_id_and_flags; +#define IGU_ACK_REGISTER_STATUS_BLOCK_ID (0x1F<<0) +#define IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT 0 +#define IGU_ACK_REGISTER_STORM_ID (0x7<<5) +#define IGU_ACK_REGISTER_STORM_ID_SHIFT 5 +#define IGU_ACK_REGISTER_UPDATE_INDEX (0x1<<8) +#define IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT 8 +#define IGU_ACK_REGISTER_INTERRUPT_MODE (0x3<<9) +#define IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT 9 +#define IGU_ACK_REGISTER_RESERVED (0x1F<<11) +#define IGU_ACK_REGISTER_RESERVED_SHIFT 11 +#endif +}; + + +/* + * IGU driver acknowledgement register + */ +struct igu_backward_compatible { + u32 sb_id_and_flags; +#define IGU_BACKWARD_COMPATIBLE_SB_INDEX (0xFFFF<<0) +#define IGU_BACKWARD_COMPATIBLE_SB_INDEX_SHIFT 0 +#define IGU_BACKWARD_COMPATIBLE_SB_SELECT (0x1F<<16) +#define IGU_BACKWARD_COMPATIBLE_SB_SELECT_SHIFT 16 +#define IGU_BACKWARD_COMPATIBLE_SEGMENT_ACCESS (0x7<<21) +#define IGU_BACKWARD_COMPATIBLE_SEGMENT_ACCESS_SHIFT 21 +#define IGU_BACKWARD_COMPATIBLE_BUPDATE (0x1<<24) +#define IGU_BACKWARD_COMPATIBLE_BUPDATE_SHIFT 24 +#define IGU_BACKWARD_COMPATIBLE_ENABLE_INT (0x3<<25) +#define IGU_BACKWARD_COMPATIBLE_ENABLE_INT_SHIFT 25 +#define IGU_BACKWARD_COMPATIBLE_RESERVED_0 (0x1F<<27) +#define IGU_BACKWARD_COMPATIBLE_RESERVED_0_SHIFT 27 + u32 reserved_2; +}; + + +/* + * IGU driver acknowledgement register + */ +struct igu_regular { + u32 sb_id_and_flags; +#define IGU_REGULAR_SB_INDEX (0xFFFFF<<0) +#define IGU_REGULAR_SB_INDEX_SHIFT 0 +#define IGU_REGULAR_RESERVED0 (0x1<<20) +#define IGU_REGULAR_RESERVED0_SHIFT 20 +#define IGU_REGULAR_SEGMENT_ACCESS (0x7<<21) +#define IGU_REGULAR_SEGMENT_ACCESS_SHIFT 21 +#define IGU_REGULAR_BUPDATE (0x1<<24) +#define IGU_REGULAR_BUPDATE_SHIFT 24 +#define IGU_REGULAR_ENABLE_INT (0x3<<25) +#define IGU_REGULAR_ENABLE_INT_SHIFT 25 +#define IGU_REGULAR_RESERVED_1 (0x1<<27) +#define IGU_REGULAR_RESERVED_1_SHIFT 27 +#define IGU_REGULAR_CLEANUP_TYPE (0x3<<28) +#define IGU_REGULAR_CLEANUP_TYPE_SHIFT 28 +#define IGU_REGULAR_CLEANUP_SET (0x1<<30) +#define IGU_REGULAR_CLEANUP_SET_SHIFT 30 +#define IGU_REGULAR_BCLEANUP (0x1<<31) +#define IGU_REGULAR_BCLEANUP_SHIFT 31 + u32 reserved_2; +}; + +/* + * IGU driver acknowledgement register + */ +union igu_consprod_reg { + struct igu_regular regular; + struct igu_backward_compatible backward_compatible; +}; + + +/* + * Igu control commands + */ +enum igu_ctrl_cmd { + IGU_CTRL_CMD_TYPE_RD, + IGU_CTRL_CMD_TYPE_WR, + MAX_IGU_CTRL_CMD +}; + + +/* + * Control register for the IGU command register + */ +struct igu_ctrl_reg { + u32 ctrl_data; +#define IGU_CTRL_REG_ADDRESS (0xFFF<<0) +#define IGU_CTRL_REG_ADDRESS_SHIFT 0 +#define IGU_CTRL_REG_FID (0x7F<<12) +#define IGU_CTRL_REG_FID_SHIFT 12 +#define IGU_CTRL_REG_RESERVED (0x1<<19) +#define IGU_CTRL_REG_RESERVED_SHIFT 19 +#define IGU_CTRL_REG_TYPE (0x1<<20) +#define IGU_CTRL_REG_TYPE_SHIFT 20 +#define IGU_CTRL_REG_UNUSED (0x7FF<<21) +#define IGU_CTRL_REG_UNUSED_SHIFT 21 +}; + + +/* + * Igu interrupt command + */ +enum igu_int_cmd { + IGU_INT_ENABLE, + IGU_INT_DISABLE, + IGU_INT_NOP, + IGU_INT_NOP2, + MAX_IGU_INT_CMD +}; + + +/* + * Igu segments + */ +enum igu_seg_access { + IGU_SEG_ACCESS_NORM, + IGU_SEG_ACCESS_DEF, + IGU_SEG_ACCESS_ATTN, + MAX_IGU_SEG_ACCESS +}; + + +/* + * Parser parsing flags field + */ +struct parsing_flags { + __le16 flags; +#define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE (0x1<<0) +#define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE_SHIFT 0 +#define PARSING_FLAGS_VLAN (0x1<<1) +#define PARSING_FLAGS_VLAN_SHIFT 1 +#define PARSING_FLAGS_EXTRA_VLAN (0x1<<2) +#define PARSING_FLAGS_EXTRA_VLAN_SHIFT 2 +#define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL (0x3<<3) +#define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT 3 +#define PARSING_FLAGS_IP_OPTIONS (0x1<<5) +#define PARSING_FLAGS_IP_OPTIONS_SHIFT 5 +#define PARSING_FLAGS_FRAGMENTATION_STATUS (0x1<<6) +#define PARSING_FLAGS_FRAGMENTATION_STATUS_SHIFT 6 +#define PARSING_FLAGS_OVER_IP_PROTOCOL (0x3<<7) +#define PARSING_FLAGS_OVER_IP_PROTOCOL_SHIFT 7 +#define PARSING_FLAGS_PURE_ACK_INDICATION (0x1<<9) +#define PARSING_FLAGS_PURE_ACK_INDICATION_SHIFT 9 +#define PARSING_FLAGS_TCP_OPTIONS_EXIST (0x1<<10) +#define PARSING_FLAGS_TCP_OPTIONS_EXIST_SHIFT 10 +#define PARSING_FLAGS_TIME_STAMP_EXIST_FLAG (0x1<<11) +#define PARSING_FLAGS_TIME_STAMP_EXIST_FLAG_SHIFT 11 +#define PARSING_FLAGS_CONNECTION_MATCH (0x1<<12) +#define PARSING_FLAGS_CONNECTION_MATCH_SHIFT 12 +#define PARSING_FLAGS_LLC_SNAP (0x1<<13) +#define PARSING_FLAGS_LLC_SNAP_SHIFT 13 +#define PARSING_FLAGS_RESERVED0 (0x3<<14) +#define PARSING_FLAGS_RESERVED0_SHIFT 14 +}; + + +/* + * Parsing flags for TCP ACK type + */ +enum prs_flags_ack_type { + PRS_FLAG_PUREACK_PIGGY, + PRS_FLAG_PUREACK_PURE, + MAX_PRS_FLAGS_ACK_TYPE +}; + + +/* + * Parsing flags for Ethernet address type + */ +enum prs_flags_eth_addr_type { + PRS_FLAG_ETHTYPE_NON_UNICAST, + PRS_FLAG_ETHTYPE_UNICAST, + MAX_PRS_FLAGS_ETH_ADDR_TYPE +}; + + +/* + * Parsing flags for over-ethernet protocol + */ +enum prs_flags_over_eth { + PRS_FLAG_OVERETH_UNKNOWN, + PRS_FLAG_OVERETH_IPV4, + PRS_FLAG_OVERETH_IPV6, + PRS_FLAG_OVERETH_LLCSNAP_UNKNOWN, + MAX_PRS_FLAGS_OVER_ETH +}; + + +/* + * Parsing flags for over-IP protocol + */ +enum prs_flags_over_ip { + PRS_FLAG_OVERIP_UNKNOWN, + PRS_FLAG_OVERIP_TCP, + PRS_FLAG_OVERIP_UDP, + MAX_PRS_FLAGS_OVER_IP +}; + + +/* + * SDM operation gen command (generate aggregative interrupt) + */ +struct sdm_op_gen { + __le32 command; +#define SDM_OP_GEN_COMP_PARAM (0x1F<<0) +#define SDM_OP_GEN_COMP_PARAM_SHIFT 0 +#define SDM_OP_GEN_COMP_TYPE (0x7<<5) +#define SDM_OP_GEN_COMP_TYPE_SHIFT 5 +#define SDM_OP_GEN_AGG_VECT_IDX (0xFF<<8) +#define SDM_OP_GEN_AGG_VECT_IDX_SHIFT 8 +#define SDM_OP_GEN_AGG_VECT_IDX_VALID (0x1<<16) +#define SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT 16 +#define SDM_OP_GEN_RESERVED (0x7FFF<<17) +#define SDM_OP_GEN_RESERVED_SHIFT 17 +}; + + +/* + * Timers connection context + */ +struct timers_block_context { + u32 __reserved_0; + u32 __reserved_1; + u32 __reserved_2; + u32 flags; +#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0) +#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0 +#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2) +#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2 +#define __TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3) +#define __TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3 +}; + + +/* + * The eth aggregative context of Tstorm + */ +struct tstorm_eth_ag_context { + u32 __reserved0[14]; +}; + + +/* + * The eth aggregative context of Ustorm + */ +struct ustorm_eth_ag_context { + u32 __reserved0; +#if defined(__BIG_ENDIAN) + u8 cdu_usage; + u8 __reserved2; + u16 __reserved1; +#elif defined(__LITTLE_ENDIAN) + u16 __reserved1; + u8 __reserved2; + u8 cdu_usage; +#endif + u32 __reserved3[6]; +}; + + +/* + * The eth aggregative context of Xstorm + */ +struct xstorm_eth_ag_context { + u32 reserved0; +#if defined(__BIG_ENDIAN) + u8 cdu_reserved; + u8 reserved2; + u16 reserved1; +#elif defined(__LITTLE_ENDIAN) + u16 reserved1; + u8 reserved2; + u8 cdu_reserved; +#endif + u32 reserved3[30]; +}; + + +/* + * doorbell message sent to the chip + */ +struct doorbell { +#if defined(__BIG_ENDIAN) + u16 zero_fill2; + u8 zero_fill1; + struct doorbell_hdr header; +#elif defined(__LITTLE_ENDIAN) + struct doorbell_hdr header; + u8 zero_fill1; + u16 zero_fill2; +#endif +}; + + +/* + * doorbell message sent to the chip + */ +struct doorbell_set_prod { +#if defined(__BIG_ENDIAN) + u16 prod; + u8 zero_fill1; + struct doorbell_hdr header; +#elif defined(__LITTLE_ENDIAN) + struct doorbell_hdr header; + u8 zero_fill1; + u16 prod; +#endif +}; + + +struct regpair { + __le32 lo; + __le32 hi; +}; + + +/* + * Classify rule opcodes in E2/E3 + */ +enum classify_rule { + CLASSIFY_RULE_OPCODE_MAC, + CLASSIFY_RULE_OPCODE_VLAN, + CLASSIFY_RULE_OPCODE_PAIR, + MAX_CLASSIFY_RULE +}; + + +/* + * Classify rule types in E2/E3 + */ +enum classify_rule_action_type { + CLASSIFY_RULE_REMOVE, + CLASSIFY_RULE_ADD, + MAX_CLASSIFY_RULE_ACTION_TYPE +}; + + +/* + * client init ramrod data + */ +struct client_init_general_data { + u8 client_id; + u8 statistics_counter_id; + u8 statistics_en_flg; + u8 is_fcoe_flg; + u8 activate_flg; + u8 sp_client_id; + __le16 mtu; + u8 statistics_zero_flg; + u8 func_id; + u8 cos; + u8 traffic_type; + u32 reserved0; +}; + + +/* + * client init rx data + */ +struct client_init_rx_data { + u8 tpa_en; +#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4 (0x1<<0) +#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4_SHIFT 0 +#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6 (0x1<<1) +#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6_SHIFT 1 +#define CLIENT_INIT_RX_DATA_RESERVED5 (0x3F<<2) +#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 2 + u8 vmqueue_mode_en_flg; + u8 extra_data_over_sgl_en_flg; + u8 cache_line_alignment_log_size; + u8 enable_dynamic_hc; + u8 max_sges_for_packet; + u8 client_qzone_id; + u8 drop_ip_cs_err_flg; + u8 drop_tcp_cs_err_flg; + u8 drop_ttl0_flg; + u8 drop_udp_cs_err_flg; + u8 inner_vlan_removal_enable_flg; + u8 outer_vlan_removal_enable_flg; + u8 status_block_id; + u8 rx_sb_index_number; + u8 reserved0; + u8 max_tpa_queues; + u8 silent_vlan_removal_flg; + __le16 max_bytes_on_bd; + __le16 sge_buff_size; + u8 approx_mcast_engine_id; + u8 rss_engine_id; + struct regpair bd_page_base; + struct regpair sge_page_base; + struct regpair cqe_page_base; + u8 is_leading_rss; + u8 is_approx_mcast; + __le16 max_agg_size; + __le16 state; +#define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL (0x1<<0) +#define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL_SHIFT 0 +#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL (0x1<<1) +#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL_SHIFT 1 +#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED (0x1<<2) +#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED_SHIFT 2 +#define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL (0x1<<3) +#define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL_SHIFT 3 +#define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL (0x1<<4) +#define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL_SHIFT 4 +#define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL (0x1<<5) +#define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL_SHIFT 5 +#define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN (0x1<<6) +#define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN_SHIFT 6 +#define CLIENT_INIT_RX_DATA_RESERVED2 (0x1FF<<7) +#define CLIENT_INIT_RX_DATA_RESERVED2_SHIFT 7 + __le16 cqe_pause_thr_low; + __le16 cqe_pause_thr_high; + __le16 bd_pause_thr_low; + __le16 bd_pause_thr_high; + __le16 sge_pause_thr_low; + __le16 sge_pause_thr_high; + __le16 rx_cos_mask; + __le16 silent_vlan_value; + __le16 silent_vlan_mask; + __le32 reserved6[2]; +}; + +/* + * client init tx data + */ +struct client_init_tx_data { + u8 enforce_security_flg; + u8 tx_status_block_id; + u8 tx_sb_index_number; + u8 tss_leading_client_id; + u8 tx_switching_flg; + u8 anti_spoofing_flg; + __le16 default_vlan; + struct regpair tx_bd_page_base; + __le16 state; +#define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL (0x1<<0) +#define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL_SHIFT 0 +#define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL (0x1<<1) +#define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL_SHIFT 1 +#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL (0x1<<2) +#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL_SHIFT 2 +#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN (0x1<<3) +#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN_SHIFT 3 +#define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4) +#define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4 + u8 default_vlan_flg; + u8 reserved2; + __le32 reserved3; +}; + +/* + * client init ramrod data + */ +struct client_init_ramrod_data { + struct client_init_general_data general; + struct client_init_rx_data rx; + struct client_init_tx_data tx; +}; + + +/* + * client update ramrod data + */ +struct client_update_ramrod_data { + u8 client_id; + u8 func_id; + u8 inner_vlan_removal_enable_flg; + u8 inner_vlan_removal_change_flg; + u8 outer_vlan_removal_enable_flg; + u8 outer_vlan_removal_change_flg; + u8 anti_spoofing_enable_flg; + u8 anti_spoofing_change_flg; + u8 activate_flg; + u8 activate_change_flg; + __le16 default_vlan; + u8 default_vlan_enable_flg; + u8 default_vlan_change_flg; + __le16 silent_vlan_value; + __le16 silent_vlan_mask; + u8 silent_vlan_removal_flg; + u8 silent_vlan_change_flg; + __le32 echo; +}; + + +/* + * The eth storm context of Cstorm + */ +struct cstorm_eth_st_context { + u32 __reserved0[4]; +}; + + +struct double_regpair { + u32 regpair0_lo; + u32 regpair0_hi; + u32 regpair1_lo; + u32 regpair1_hi; +}; + + +/* + * Ethernet address typesm used in ethernet tx BDs + */ +enum eth_addr_type { + UNKNOWN_ADDRESS, + UNICAST_ADDRESS, + MULTICAST_ADDRESS, + BROADCAST_ADDRESS, + MAX_ETH_ADDR_TYPE +}; + + +/* + * + */ +struct eth_classify_cmd_header { + u8 cmd_general_data; +#define ETH_CLASSIFY_CMD_HEADER_RX_CMD (0x1<<0) +#define ETH_CLASSIFY_CMD_HEADER_RX_CMD_SHIFT 0 +#define ETH_CLASSIFY_CMD_HEADER_TX_CMD (0x1<<1) +#define ETH_CLASSIFY_CMD_HEADER_TX_CMD_SHIFT 1 +#define ETH_CLASSIFY_CMD_HEADER_OPCODE (0x3<<2) +#define ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT 2 +#define ETH_CLASSIFY_CMD_HEADER_IS_ADD (0x1<<4) +#define ETH_CLASSIFY_CMD_HEADER_IS_ADD_SHIFT 4 +#define ETH_CLASSIFY_CMD_HEADER_RESERVED0 (0x7<<5) +#define ETH_CLASSIFY_CMD_HEADER_RESERVED0_SHIFT 5 + u8 func_id; + u8 client_id; + u8 reserved1; +}; + + +/* + * header for eth classification config ramrod + */ +struct eth_classify_header { + u8 rule_cnt; + u8 reserved0; + __le16 reserved1; + __le32 echo; +}; + + +/* + * Command for adding/removing a MAC classification rule + */ +struct eth_classify_mac_cmd { + struct eth_classify_cmd_header header; + __le32 reserved0; + __le16 mac_lsb; + __le16 mac_mid; + __le16 mac_msb; + __le16 reserved1; +}; + + +/* + * Command for adding/removing a MAC-VLAN pair classification rule + */ +struct eth_classify_pair_cmd { + struct eth_classify_cmd_header header; + __le32 reserved0; + __le16 mac_lsb; + __le16 mac_mid; + __le16 mac_msb; + __le16 vlan; +}; + + +/* + * Command for adding/removing a VLAN classification rule + */ +struct eth_classify_vlan_cmd { + struct eth_classify_cmd_header header; + __le32 reserved0; + __le32 reserved1; + __le16 reserved2; + __le16 vlan; +}; + +/* + * union for eth classification rule + */ +union eth_classify_rule_cmd { + struct eth_classify_mac_cmd mac; + struct eth_classify_vlan_cmd vlan; + struct eth_classify_pair_cmd pair; +}; + +/* + * parameters for eth classification configuration ramrod + */ +struct eth_classify_rules_ramrod_data { + struct eth_classify_header header; + union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT]; +}; + + +/* + * The data contain client ID need to the ramrod + */ +struct eth_common_ramrod_data { + __le32 client_id; + __le32 reserved1; +}; + + +/* + * The eth storm context of Ustorm + */ +struct ustorm_eth_st_context { + u32 reserved0[52]; +}; + +/* + * The eth storm context of Tstorm + */ +struct tstorm_eth_st_context { + u32 __reserved0[28]; +}; + +/* + * The eth storm context of Xstorm + */ +struct xstorm_eth_st_context { + u32 reserved0[60]; +}; + +/* + * Ethernet connection context + */ +struct eth_context { + struct ustorm_eth_st_context ustorm_st_context; + struct tstorm_eth_st_context tstorm_st_context; + struct xstorm_eth_ag_context xstorm_ag_context; + struct tstorm_eth_ag_context tstorm_ag_context; + struct cstorm_eth_ag_context cstorm_ag_context; + struct ustorm_eth_ag_context ustorm_ag_context; + struct timers_block_context timers_context; + struct xstorm_eth_st_context xstorm_st_context; + struct cstorm_eth_st_context cstorm_st_context; +}; + + +/* + * union for sgl and raw data. + */ +union eth_sgl_or_raw_data { + __le16 sgl[8]; + u32 raw_data[4]; +}; + +/* + * eth FP end aggregation CQE parameters struct + */ +struct eth_end_agg_rx_cqe { + u8 type_error_flags; +#define ETH_END_AGG_RX_CQE_TYPE (0x3<<0) +#define ETH_END_AGG_RX_CQE_TYPE_SHIFT 0 +#define ETH_END_AGG_RX_CQE_SGL_RAW_SEL (0x1<<2) +#define ETH_END_AGG_RX_CQE_SGL_RAW_SEL_SHIFT 2 +#define ETH_END_AGG_RX_CQE_RESERVED0 (0x1F<<3) +#define ETH_END_AGG_RX_CQE_RESERVED0_SHIFT 3 + u8 reserved1; + u8 queue_index; + u8 reserved2; + __le32 timestamp_delta; + __le16 num_of_coalesced_segs; + __le16 pkt_len; + u8 pure_ack_count; + u8 reserved3; + __le16 reserved4; + union eth_sgl_or_raw_data sgl_or_raw_data; + __le32 reserved5[8]; +}; + + +/* + * regular eth FP CQE parameters struct + */ +struct eth_fast_path_rx_cqe { + u8 type_error_flags; +#define ETH_FAST_PATH_RX_CQE_TYPE (0x3<<0) +#define ETH_FAST_PATH_RX_CQE_TYPE_SHIFT 0 +#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL (0x1<<2) +#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT 2 +#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG (0x1<<3) +#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG_SHIFT 3 +#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG (0x1<<4) +#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 4 +#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<5) +#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 5 +#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x3<<6) +#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 6 + u8 status_flags; +#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0) +#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0 +#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG (0x1<<3) +#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG_SHIFT 3 +#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG (0x1<<4) +#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG_SHIFT 4 +#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG (0x1<<5) +#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG_SHIFT 5 +#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG (0x1<<6) +#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG_SHIFT 6 +#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG (0x1<<7) +#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG_SHIFT 7 + u8 queue_index; + u8 placement_offset; + __le32 rss_hash_result; + __le16 vlan_tag; + __le16 pkt_len; + __le16 len_on_bd; + struct parsing_flags pars_flags; + union eth_sgl_or_raw_data sgl_or_raw_data; + __le32 reserved1[8]; +}; + + +/* + * Command for setting classification flags for a client + */ +struct eth_filter_rules_cmd { + u8 cmd_general_data; +#define ETH_FILTER_RULES_CMD_RX_CMD (0x1<<0) +#define ETH_FILTER_RULES_CMD_RX_CMD_SHIFT 0 +#define ETH_FILTER_RULES_CMD_TX_CMD (0x1<<1) +#define ETH_FILTER_RULES_CMD_TX_CMD_SHIFT 1 +#define ETH_FILTER_RULES_CMD_RESERVED0 (0x3F<<2) +#define ETH_FILTER_RULES_CMD_RESERVED0_SHIFT 2 + u8 func_id; + u8 client_id; + u8 reserved1; + __le16 state; +#define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL (0x1<<0) +#define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL_SHIFT 0 +#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL (0x1<<1) +#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL_SHIFT 1 +#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED (0x1<<2) +#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED_SHIFT 2 +#define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL (0x1<<3) +#define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL_SHIFT 3 +#define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL (0x1<<4) +#define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL_SHIFT 4 +#define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL (0x1<<5) +#define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL_SHIFT 5 +#define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN (0x1<<6) +#define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN_SHIFT 6 +#define ETH_FILTER_RULES_CMD_RESERVED2 (0x1FF<<7) +#define ETH_FILTER_RULES_CMD_RESERVED2_SHIFT 7 + __le16 reserved3; + struct regpair reserved4; +}; + + +/* + * parameters for eth classification filters ramrod + */ +struct eth_filter_rules_ramrod_data { + struct eth_classify_header header; + struct eth_filter_rules_cmd rules[FILTER_RULES_COUNT]; +}; + + +/* + * parameters for eth classification configuration ramrod + */ +struct eth_general_rules_ramrod_data { + struct eth_classify_header header; + union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT]; +}; + + +/* + * The data for Halt ramrod + */ +struct eth_halt_ramrod_data { + __le32 client_id; + __le32 reserved0; +}; + + +/* + * Command for setting multicast classification for a client + */ +struct eth_multicast_rules_cmd { + u8 cmd_general_data; +#define ETH_MULTICAST_RULES_CMD_RX_CMD (0x1<<0) +#define ETH_MULTICAST_RULES_CMD_RX_CMD_SHIFT 0 +#define ETH_MULTICAST_RULES_CMD_TX_CMD (0x1<<1) +#define ETH_MULTICAST_RULES_CMD_TX_CMD_SHIFT 1 +#define ETH_MULTICAST_RULES_CMD_IS_ADD (0x1<<2) +#define ETH_MULTICAST_RULES_CMD_IS_ADD_SHIFT 2 +#define ETH_MULTICAST_RULES_CMD_RESERVED0 (0x1F<<3) +#define ETH_MULTICAST_RULES_CMD_RESERVED0_SHIFT 3 + u8 func_id; + u8 bin_id; + u8 engine_id; + __le32 reserved2; + struct regpair reserved3; +}; + + +/* + * parameters for multicast classification ramrod + */ +struct eth_multicast_rules_ramrod_data { + struct eth_classify_header header; + struct eth_multicast_rules_cmd rules[MULTICAST_RULES_COUNT]; +}; + + +/* + * Place holder for ramrods protocol specific data + */ +struct ramrod_data { + __le32 data_lo; + __le32 data_hi; +}; + +/* + * union for ramrod data for Ethernet protocol (CQE) (force size of 16 bits) + */ +union eth_ramrod_data { + struct ramrod_data general; +}; + + +/* + * RSS toeplitz hash type, as reported in CQE + */ +enum eth_rss_hash_type { + DEFAULT_HASH_TYPE, + IPV4_HASH_TYPE, + TCP_IPV4_HASH_TYPE, + IPV6_HASH_TYPE, + TCP_IPV6_HASH_TYPE, + VLAN_PRI_HASH_TYPE, + E1HOV_PRI_HASH_TYPE, + DSCP_HASH_TYPE, + MAX_ETH_RSS_HASH_TYPE +}; + + +/* + * Ethernet RSS mode + */ +enum eth_rss_mode { + ETH_RSS_MODE_DISABLED, + ETH_RSS_MODE_REGULAR, + ETH_RSS_MODE_VLAN_PRI, + ETH_RSS_MODE_E1HOV_PRI, + ETH_RSS_MODE_IP_DSCP, + MAX_ETH_RSS_MODE +}; + + +/* + * parameters for RSS update ramrod (E2) + */ +struct eth_rss_update_ramrod_data { + u8 rss_engine_id; + u8 capabilities; +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY (0x1<<0) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY_SHIFT 0 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY (0x1<<1) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY_SHIFT 1 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY (0x1<<2) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY_SHIFT 2 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY (0x1<<3) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY_SHIFT 3 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY (0x1<<4) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5 +#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<6) +#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 6 +#define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0 (0x1<<7) +#define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0_SHIFT 7 + u8 rss_result_mask; + u8 rss_mode; + __le32 __reserved2; + u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE]; + __le32 rss_key[T_ETH_RSS_KEY]; + __le32 echo; + __le32 reserved3; +}; + + +/* + * The eth Rx Buffer Descriptor + */ +struct eth_rx_bd { + __le32 addr_lo; + __le32 addr_hi; +}; + + +/* + * Eth Rx Cqe structure- general structure for ramrods + */ +struct common_ramrod_eth_rx_cqe { + u8 ramrod_type; +#define COMMON_RAMROD_ETH_RX_CQE_TYPE (0x3<<0) +#define COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT 0 +#define COMMON_RAMROD_ETH_RX_CQE_ERROR (0x1<<2) +#define COMMON_RAMROD_ETH_RX_CQE_ERROR_SHIFT 2 +#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x1F<<3) +#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 3 + u8 conn_type; + __le16 reserved1; + __le32 conn_and_cmd_data; +#define COMMON_RAMROD_ETH_RX_CQE_CID (0xFFFFFF<<0) +#define COMMON_RAMROD_ETH_RX_CQE_CID_SHIFT 0 +#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID (0xFF<<24) +#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT 24 + struct ramrod_data protocol_data; + __le32 echo; + __le32 reserved2[11]; +}; + +/* + * Rx Last CQE in page (in ETH) + */ +struct eth_rx_cqe_next_page { + __le32 addr_lo; + __le32 addr_hi; + __le32 reserved[14]; +}; + +/* + * union for all eth rx cqe types (fix their sizes) + */ +union eth_rx_cqe { + struct eth_fast_path_rx_cqe fast_path_cqe; + struct common_ramrod_eth_rx_cqe ramrod_cqe; + struct eth_rx_cqe_next_page next_page_cqe; + struct eth_end_agg_rx_cqe end_agg_cqe; +}; + + +/* + * Values for RX ETH CQE type field + */ +enum eth_rx_cqe_type { + RX_ETH_CQE_TYPE_ETH_FASTPATH, + RX_ETH_CQE_TYPE_ETH_RAMROD, + RX_ETH_CQE_TYPE_ETH_START_AGG, + RX_ETH_CQE_TYPE_ETH_STOP_AGG, + MAX_ETH_RX_CQE_TYPE +}; + + +/* + * Type of SGL/Raw field in ETH RX fast path CQE + */ +enum eth_rx_fp_sel { + ETH_FP_CQE_REGULAR, + ETH_FP_CQE_RAW, + MAX_ETH_RX_FP_SEL +}; + + +/* + * The eth Rx SGE Descriptor + */ +struct eth_rx_sge { + __le32 addr_lo; + __le32 addr_hi; +}; + + +/* + * common data for all protocols + */ +struct spe_hdr { + __le32 conn_and_cmd_data; +#define SPE_HDR_CID (0xFFFFFF<<0) +#define SPE_HDR_CID_SHIFT 0 +#define SPE_HDR_CMD_ID (0xFF<<24) +#define SPE_HDR_CMD_ID_SHIFT 24 + __le16 type; +#define SPE_HDR_CONN_TYPE (0xFF<<0) +#define SPE_HDR_CONN_TYPE_SHIFT 0 +#define SPE_HDR_FUNCTION_ID (0xFF<<8) +#define SPE_HDR_FUNCTION_ID_SHIFT 8 + __le16 reserved1; +}; + +/* + * specific data for ethernet slow path element + */ +union eth_specific_data { + u8 protocol_data[8]; + struct regpair client_update_ramrod_data; + struct regpair client_init_ramrod_init_data; + struct eth_halt_ramrod_data halt_ramrod_data; + struct regpair update_data_addr; + struct eth_common_ramrod_data common_ramrod_data; + struct regpair classify_cfg_addr; + struct regpair filter_cfg_addr; + struct regpair mcast_cfg_addr; +}; + +/* + * Ethernet slow path element + */ +struct eth_spe { + struct spe_hdr hdr; + union eth_specific_data data; +}; + + +/* + * Ethernet command ID for slow path elements + */ +enum eth_spqe_cmd_id { + RAMROD_CMD_ID_ETH_UNUSED, + RAMROD_CMD_ID_ETH_CLIENT_SETUP, + RAMROD_CMD_ID_ETH_HALT, + RAMROD_CMD_ID_ETH_FORWARD_SETUP, + RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP, + RAMROD_CMD_ID_ETH_CLIENT_UPDATE, + RAMROD_CMD_ID_ETH_EMPTY, + RAMROD_CMD_ID_ETH_TERMINATE, + RAMROD_CMD_ID_ETH_TPA_UPDATE, + RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES, + RAMROD_CMD_ID_ETH_FILTER_RULES, + RAMROD_CMD_ID_ETH_MULTICAST_RULES, + RAMROD_CMD_ID_ETH_RSS_UPDATE, + RAMROD_CMD_ID_ETH_SET_MAC, + MAX_ETH_SPQE_CMD_ID +}; + + +/* + * eth tpa update command + */ +enum eth_tpa_update_command { + TPA_UPDATE_NONE_COMMAND, + TPA_UPDATE_ENABLE_COMMAND, + TPA_UPDATE_DISABLE_COMMAND, + MAX_ETH_TPA_UPDATE_COMMAND +}; + + +/* + * Tx regular BD structure + */ +struct eth_tx_bd { + __le32 addr_lo; + __le32 addr_hi; + __le16 total_pkt_bytes; + __le16 nbytes; + u8 reserved[4]; +}; + + +/* + * structure for easy accessibility to assembler + */ +struct eth_tx_bd_flags { + u8 as_bitfield; +#define ETH_TX_BD_FLAGS_IP_CSUM (0x1<<0) +#define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 0 +#define ETH_TX_BD_FLAGS_L4_CSUM (0x1<<1) +#define ETH_TX_BD_FLAGS_L4_CSUM_SHIFT 1 +#define ETH_TX_BD_FLAGS_VLAN_MODE (0x3<<2) +#define ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT 2 +#define ETH_TX_BD_FLAGS_START_BD (0x1<<4) +#define ETH_TX_BD_FLAGS_START_BD_SHIFT 4 +#define ETH_TX_BD_FLAGS_IS_UDP (0x1<<5) +#define ETH_TX_BD_FLAGS_IS_UDP_SHIFT 5 +#define ETH_TX_BD_FLAGS_SW_LSO (0x1<<6) +#define ETH_TX_BD_FLAGS_SW_LSO_SHIFT 6 +#define ETH_TX_BD_FLAGS_IPV6 (0x1<<7) +#define ETH_TX_BD_FLAGS_IPV6_SHIFT 7 +}; + +/* + * The eth Tx Buffer Descriptor + */ +struct eth_tx_start_bd { + __le32 addr_lo; + __le32 addr_hi; + __le16 nbd; + __le16 nbytes; + __le16 vlan_or_ethertype; + struct eth_tx_bd_flags bd_flags; + u8 general_data; +#define ETH_TX_START_BD_HDR_NBDS (0xF<<0) +#define ETH_TX_START_BD_HDR_NBDS_SHIFT 0 +#define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4) +#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4 +#define ETH_TX_START_BD_RESREVED (0x1<<5) +#define ETH_TX_START_BD_RESREVED_SHIFT 5 +#define ETH_TX_START_BD_ETH_ADDR_TYPE (0x3<<6) +#define ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT 6 +}; + +/* + * Tx parsing BD structure for ETH E1/E1h + */ +struct eth_tx_parse_bd_e1x { + u8 global_data; +#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0) +#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0 +#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x1<<4) +#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 4 +#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<5) +#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 5 +#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<6) +#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 6 +#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<7) +#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 7 + u8 tcp_flags; +#define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0) +#define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0 +#define ETH_TX_PARSE_BD_E1X_SYN_FLG (0x1<<1) +#define ETH_TX_PARSE_BD_E1X_SYN_FLG_SHIFT 1 +#define ETH_TX_PARSE_BD_E1X_RST_FLG (0x1<<2) +#define ETH_TX_PARSE_BD_E1X_RST_FLG_SHIFT 2 +#define ETH_TX_PARSE_BD_E1X_PSH_FLG (0x1<<3) +#define ETH_TX_PARSE_BD_E1X_PSH_FLG_SHIFT 3 +#define ETH_TX_PARSE_BD_E1X_ACK_FLG (0x1<<4) +#define ETH_TX_PARSE_BD_E1X_ACK_FLG_SHIFT 4 +#define ETH_TX_PARSE_BD_E1X_URG_FLG (0x1<<5) +#define ETH_TX_PARSE_BD_E1X_URG_FLG_SHIFT 5 +#define ETH_TX_PARSE_BD_E1X_ECE_FLG (0x1<<6) +#define ETH_TX_PARSE_BD_E1X_ECE_FLG_SHIFT 6 +#define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7) +#define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7 + u8 ip_hlen_w; + s8 reserved; + __le16 total_hlen_w; + __le16 tcp_pseudo_csum; + __le16 lso_mss; + __le16 ip_id; + __le32 tcp_send_seq; +}; + +/* + * Tx parsing BD structure for ETH E2 + */ +struct eth_tx_parse_bd_e2 { + __le16 dst_mac_addr_lo; + __le16 dst_mac_addr_mid; + __le16 dst_mac_addr_hi; + __le16 src_mac_addr_lo; + __le16 src_mac_addr_mid; + __le16 src_mac_addr_hi; + __le32 parsing_data; +#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x1FFF<<0) +#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0 +#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<13) +#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 13 +#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<17) +#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 17 +#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<31) +#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 31 +}; + +/* + * The last BD in the BD memory will hold a pointer to the next BD memory + */ +struct eth_tx_next_bd { + __le32 addr_lo; + __le32 addr_hi; + u8 reserved[8]; +}; + +/* + * union for 4 Bd types + */ +union eth_tx_bd_types { + struct eth_tx_start_bd start_bd; + struct eth_tx_bd reg_bd; + struct eth_tx_parse_bd_e1x parse_bd_e1x; + struct eth_tx_parse_bd_e2 parse_bd_e2; + struct eth_tx_next_bd next_bd; +}; + +/* + * array of 13 bds as appears in the eth xstorm context + */ +struct eth_tx_bds_array { + union eth_tx_bd_types bds[13]; +}; + + +/* + * VLAN mode on TX BDs + */ +enum eth_tx_vlan_type { + X_ETH_NO_VLAN, + X_ETH_OUTBAND_VLAN, + X_ETH_INBAND_VLAN, + X_ETH_FW_ADDED_VLAN, + MAX_ETH_TX_VLAN_TYPE +}; + + +/* + * Ethernet VLAN filtering mode in E1x + */ +enum eth_vlan_filter_mode { + ETH_VLAN_FILTER_ANY_VLAN, + ETH_VLAN_FILTER_SPECIFIC_VLAN, + ETH_VLAN_FILTER_CLASSIFY, + MAX_ETH_VLAN_FILTER_MODE +}; + + +/* + * MAC filtering configuration command header + */ +struct mac_configuration_hdr { + u8 length; + u8 offset; + __le16 client_id; + __le32 echo; +}; + +/* + * MAC address in list for ramrod + */ +struct mac_configuration_entry { + __le16 lsb_mac_addr; + __le16 middle_mac_addr; + __le16 msb_mac_addr; + __le16 vlan_id; + u8 pf_id; + u8 flags; +#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE (0x1<<0) +#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE_SHIFT 0 +#define MAC_CONFIGURATION_ENTRY_RDMA_MAC (0x1<<1) +#define MAC_CONFIGURATION_ENTRY_RDMA_MAC_SHIFT 1 +#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE (0x3<<2) +#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE_SHIFT 2 +#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL (0x1<<4) +#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL_SHIFT 4 +#define MAC_CONFIGURATION_ENTRY_BROADCAST (0x1<<5) +#define MAC_CONFIGURATION_ENTRY_BROADCAST_SHIFT 5 +#define MAC_CONFIGURATION_ENTRY_RESERVED1 (0x3<<6) +#define MAC_CONFIGURATION_ENTRY_RESERVED1_SHIFT 6 + __le16 reserved0; + __le32 clients_bit_vector; +}; + +/* + * MAC filtering configuration command + */ +struct mac_configuration_cmd { + struct mac_configuration_hdr hdr; + struct mac_configuration_entry config_table[64]; +}; + + +/* + * Set-MAC command type (in E1x) + */ +enum set_mac_action_type { + T_ETH_MAC_COMMAND_INVALIDATE, + T_ETH_MAC_COMMAND_SET, + MAX_SET_MAC_ACTION_TYPE +}; + + +/* + * tpa update ramrod data + */ +struct tpa_update_ramrod_data { + u8 update_ipv4; + u8 update_ipv6; + u8 client_id; + u8 max_tpa_queues; + u8 max_sges_for_packet; + u8 complete_on_both_clients; + __le16 reserved1; + __le16 sge_buff_size; + __le16 max_agg_size; + __le32 sge_page_base_lo; + __le32 sge_page_base_hi; + __le16 sge_pause_thr_low; + __le16 sge_pause_thr_high; +}; + + +/* + * approximate-match multicast filtering for E1H per function in Tstorm + */ +struct tstorm_eth_approximate_match_multicast_filtering { + u32 mcast_add_hash_bit_array[8]; +}; + + +/* + * Common configuration parameters per function in Tstorm + */ +struct tstorm_eth_function_common_config { + __le16 config_flags; +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0) +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0 +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1) +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1 +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2) +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2 +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3) +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3 +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4) +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4 +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<7) +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 7 +#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0xFF<<8) +#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 8 + u8 rss_result_mask; + u8 reserved1; + __le16 vlan_id[2]; +}; + + +/* + * MAC filtering configuration parameters per port in Tstorm + */ +struct tstorm_eth_mac_filter_config { + __le32 ucast_drop_all; + __le32 ucast_accept_all; + __le32 mcast_drop_all; + __le32 mcast_accept_all; + __le32 bcast_accept_all; + __le32 vlan_filter[2]; + __le32 unmatched_unicast; +}; + + +/* + * tx only queue init ramrod data + */ +struct tx_queue_init_ramrod_data { + struct client_init_general_data general; + struct client_init_tx_data tx; +}; + + +/* + * Three RX producers for ETH + */ +struct ustorm_eth_rx_producers { +#if defined(__BIG_ENDIAN) + u16 bd_prod; + u16 cqe_prod; +#elif defined(__LITTLE_ENDIAN) + u16 cqe_prod; + u16 bd_prod; +#endif +#if defined(__BIG_ENDIAN) + u16 reserved; + u16 sge_prod; +#elif defined(__LITTLE_ENDIAN) + u16 sge_prod; + u16 reserved; +#endif +}; + + +/* + * cfc delete event data + */ +struct cfc_del_event_data { + u32 cid; + u32 reserved0; + u32 reserved1; +}; + + +/* + * per-port SAFC demo variables + */ +struct cmng_flags_per_port { + u32 cmng_enables; +#define CMNG_FLAGS_PER_PORT_FAIRNESS_VN (0x1<<0) +#define CMNG_FLAGS_PER_PORT_FAIRNESS_VN_SHIFT 0 +#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN (0x1<<1) +#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN_SHIFT 1 +#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS (0x1<<2) +#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_SHIFT 2 +#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE (0x1<<3) +#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE_SHIFT 3 +#define __CMNG_FLAGS_PER_PORT_RESERVED0 (0xFFFFFFF<<4) +#define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 4 + u32 __reserved1; +}; + + +/* + * per-port rate shaping variables + */ +struct rate_shaping_vars_per_port { + u32 rs_periodic_timeout; + u32 rs_threshold; +}; + +/* + * per-port fairness variables + */ +struct fairness_vars_per_port { + u32 upper_bound; + u32 fair_threshold; + u32 fairness_timeout; + u32 reserved0; +}; + +/* + * per-port SAFC variables + */ +struct safc_struct_per_port { +#if defined(__BIG_ENDIAN) + u16 __reserved1; + u8 __reserved0; + u8 safc_timeout_usec; +#elif defined(__LITTLE_ENDIAN) + u8 safc_timeout_usec; + u8 __reserved0; + u16 __reserved1; +#endif + u8 cos_to_traffic_types[MAX_COS_NUMBER]; + u16 cos_to_pause_mask[NUM_OF_SAFC_BITS]; +}; + +/* + * Per-port congestion management variables + */ +struct cmng_struct_per_port { + struct rate_shaping_vars_per_port rs_vars; + struct fairness_vars_per_port fair_vars; + struct safc_struct_per_port safc_vars; + struct cmng_flags_per_port flags; +}; + + +/* + * Protocol-common command ID for slow path elements + */ +enum common_spqe_cmd_id { + RAMROD_CMD_ID_COMMON_UNUSED, + RAMROD_CMD_ID_COMMON_FUNCTION_START, + RAMROD_CMD_ID_COMMON_FUNCTION_STOP, + RAMROD_CMD_ID_COMMON_CFC_DEL, + RAMROD_CMD_ID_COMMON_CFC_DEL_WB, + RAMROD_CMD_ID_COMMON_STAT_QUERY, + RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, + RAMROD_CMD_ID_COMMON_START_TRAFFIC, + RAMROD_CMD_ID_COMMON_RESERVED1, + RAMROD_CMD_ID_COMMON_RESERVED2, + MAX_COMMON_SPQE_CMD_ID +}; + + +/* + * Per-protocol connection types + */ +enum connection_type { + ETH_CONNECTION_TYPE, + TOE_CONNECTION_TYPE, + RDMA_CONNECTION_TYPE, + ISCSI_CONNECTION_TYPE, + FCOE_CONNECTION_TYPE, + RESERVED_CONNECTION_TYPE_0, + RESERVED_CONNECTION_TYPE_1, + RESERVED_CONNECTION_TYPE_2, + NONE_CONNECTION_TYPE, + MAX_CONNECTION_TYPE +}; + + +/* + * Cos modes + */ +enum cos_mode { + OVERRIDE_COS, + STATIC_COS, + FW_WRR, + MAX_COS_MODE +}; + + +/* + * Dynamic HC counters set by the driver + */ +struct hc_dynamic_drv_counter { + u32 val[HC_SB_MAX_DYNAMIC_INDICES]; +}; + +/* + * zone A per-queue data + */ +struct cstorm_queue_zone_data { + struct hc_dynamic_drv_counter hc_dyn_drv_cnt; + struct regpair reserved[2]; +}; + + +/* + * Vf-PF channel data in cstorm ram (non-triggered zone) + */ +struct vf_pf_channel_zone_data { + u32 msg_addr_lo; + u32 msg_addr_hi; +}; + +/* + * zone for VF non-triggered data + */ +struct non_trigger_vf_zone { + struct vf_pf_channel_zone_data vf_pf_channel; +}; + +/* + * Vf-PF channel trigger zone in cstorm ram + */ +struct vf_pf_channel_zone_trigger { + u8 addr_valid; +}; + +/* + * zone that triggers the in-bound interrupt + */ +struct trigger_vf_zone { +#if defined(__BIG_ENDIAN) + u16 reserved1; + u8 reserved0; + struct vf_pf_channel_zone_trigger vf_pf_channel; +#elif defined(__LITTLE_ENDIAN) + struct vf_pf_channel_zone_trigger vf_pf_channel; + u8 reserved0; + u16 reserved1; +#endif + u32 reserved2; +}; + +/* + * zone B per-VF data + */ +struct cstorm_vf_zone_data { + struct non_trigger_vf_zone non_trigger; + struct trigger_vf_zone trigger; +}; + + +/* + * Dynamic host coalescing init parameters, per state machine + */ +struct dynamic_hc_sm_config { + u32 threshold[3]; + u8 shift_per_protocol[HC_SB_MAX_DYNAMIC_INDICES]; + u8 hc_timeout0[HC_SB_MAX_DYNAMIC_INDICES]; + u8 hc_timeout1[HC_SB_MAX_DYNAMIC_INDICES]; + u8 hc_timeout2[HC_SB_MAX_DYNAMIC_INDICES]; + u8 hc_timeout3[HC_SB_MAX_DYNAMIC_INDICES]; +}; + +/* + * Dynamic host coalescing init parameters + */ +struct dynamic_hc_config { + struct dynamic_hc_sm_config sm_config[HC_SB_MAX_SM]; +}; + + +struct e2_integ_data { +#if defined(__BIG_ENDIAN) + u8 flags; +#define E2_INTEG_DATA_TESTING_EN (0x1<<0) +#define E2_INTEG_DATA_TESTING_EN_SHIFT 0 +#define E2_INTEG_DATA_LB_TX (0x1<<1) +#define E2_INTEG_DATA_LB_TX_SHIFT 1 +#define E2_INTEG_DATA_COS_TX (0x1<<2) +#define E2_INTEG_DATA_COS_TX_SHIFT 2 +#define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3) +#define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3 +#define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4) +#define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4 +#define E2_INTEG_DATA_RESERVED (0x7<<5) +#define E2_INTEG_DATA_RESERVED_SHIFT 5 + u8 cos; + u8 voq; + u8 pbf_queue; +#elif defined(__LITTLE_ENDIAN) + u8 pbf_queue; + u8 voq; + u8 cos; + u8 flags; +#define E2_INTEG_DATA_TESTING_EN (0x1<<0) +#define E2_INTEG_DATA_TESTING_EN_SHIFT 0 +#define E2_INTEG_DATA_LB_TX (0x1<<1) +#define E2_INTEG_DATA_LB_TX_SHIFT 1 +#define E2_INTEG_DATA_COS_TX (0x1<<2) +#define E2_INTEG_DATA_COS_TX_SHIFT 2 +#define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3) +#define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3 +#define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4) +#define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4 +#define E2_INTEG_DATA_RESERVED (0x7<<5) +#define E2_INTEG_DATA_RESERVED_SHIFT 5 +#endif +#if defined(__BIG_ENDIAN) + u16 reserved3; + u8 reserved2; + u8 ramEn; +#elif defined(__LITTLE_ENDIAN) + u8 ramEn; + u8 reserved2; + u16 reserved3; +#endif +}; + + +/* + * set mac event data + */ +struct eth_event_data { + u32 echo; + u32 reserved0; + u32 reserved1; +}; + + +/* + * pf-vf event data + */ +struct vf_pf_event_data { + u8 vf_id; + u8 reserved0; + u16 reserved1; + u32 msg_addr_lo; + u32 msg_addr_hi; +}; + +/* + * VF FLR event data + */ +struct vf_flr_event_data { + u8 vf_id; + u8 reserved0; + u16 reserved1; + u32 reserved2; + u32 reserved3; +}; + +/* + * malicious VF event data + */ +struct malicious_vf_event_data { + u8 vf_id; + u8 reserved0; + u16 reserved1; + u32 reserved2; + u32 reserved3; +}; + +/* + * union for all event ring message types + */ +union event_data { + struct vf_pf_event_data vf_pf_event; + struct eth_event_data eth_event; + struct cfc_del_event_data cfc_del_event; + struct vf_flr_event_data vf_flr_event; + struct malicious_vf_event_data malicious_vf_event; +}; + + +/* + * per PF event ring data + */ +struct event_ring_data { + struct regpair base_addr; +#if defined(__BIG_ENDIAN) + u8 index_id; + u8 sb_id; + u16 producer; +#elif defined(__LITTLE_ENDIAN) + u16 producer; + u8 sb_id; + u8 index_id; +#endif + u32 reserved0; +}; + + +/* + * event ring message element (each element is 128 bits) + */ +struct event_ring_msg { + u8 opcode; + u8 error; + u16 reserved1; + union event_data data; +}; + +/* + * event ring next page element (128 bits) + */ +struct event_ring_next { + struct regpair addr; + u32 reserved[2]; +}; + +/* + * union for event ring element types (each element is 128 bits) + */ +union event_ring_elem { + struct event_ring_msg message; + struct event_ring_next next_page; +}; + + +/* + * Common event ring opcodes + */ +enum event_ring_opcode { + EVENT_RING_OPCODE_VF_PF_CHANNEL, + EVENT_RING_OPCODE_FUNCTION_START, + EVENT_RING_OPCODE_FUNCTION_STOP, + EVENT_RING_OPCODE_CFC_DEL, + EVENT_RING_OPCODE_CFC_DEL_WB, + EVENT_RING_OPCODE_STAT_QUERY, + EVENT_RING_OPCODE_STOP_TRAFFIC, + EVENT_RING_OPCODE_START_TRAFFIC, + EVENT_RING_OPCODE_VF_FLR, + EVENT_RING_OPCODE_MALICIOUS_VF, + EVENT_RING_OPCODE_FORWARD_SETUP, + EVENT_RING_OPCODE_RSS_UPDATE_RULES, + EVENT_RING_OPCODE_RESERVED1, + EVENT_RING_OPCODE_RESERVED2, + EVENT_RING_OPCODE_SET_MAC, + EVENT_RING_OPCODE_CLASSIFICATION_RULES, + EVENT_RING_OPCODE_FILTERS_RULES, + EVENT_RING_OPCODE_MULTICAST_RULES, + MAX_EVENT_RING_OPCODE +}; + + +/* + * Modes for fairness algorithm + */ +enum fairness_mode { + FAIRNESS_COS_WRR_MODE, + FAIRNESS_COS_ETS_MODE, + MAX_FAIRNESS_MODE +}; + + +/* + * per-vnic fairness variables + */ +struct fairness_vars_per_vn { + u32 cos_credit_delta[MAX_COS_NUMBER]; + u32 vn_credit_delta; + u32 __reserved0; +}; + + +/* + * Priority and cos + */ +struct priority_cos { + u8 priority; + u8 cos; + __le16 reserved1; +}; + +/* + * The data for flow control configuration + */ +struct flow_control_configuration { + struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES]; + u8 dcb_enabled; + u8 dcb_version; + u8 dont_add_pri_0_en; + u8 reserved1; + __le32 reserved2; +}; + + +/* + * + */ +struct function_start_data { + __le16 function_mode; + __le16 sd_vlan_tag; + u16 reserved; + u8 path_id; + u8 network_cos_mode; +}; + + +/* + * FW version stored in the Xstorm RAM + */ +struct fw_version { +#if defined(__BIG_ENDIAN) + u8 engineering; + u8 revision; + u8 minor; + u8 major; +#elif defined(__LITTLE_ENDIAN) + u8 major; + u8 minor; + u8 revision; + u8 engineering; +#endif + u32 flags; +#define FW_VERSION_OPTIMIZED (0x1<<0) +#define FW_VERSION_OPTIMIZED_SHIFT 0 +#define FW_VERSION_BIG_ENDIEN (0x1<<1) +#define FW_VERSION_BIG_ENDIEN_SHIFT 1 +#define FW_VERSION_CHIP_VERSION (0x3<<2) +#define FW_VERSION_CHIP_VERSION_SHIFT 2 +#define __FW_VERSION_RESERVED (0xFFFFFFF<<4) +#define __FW_VERSION_RESERVED_SHIFT 4 +}; + + +/* + * Dynamic Host-Coalescing - Driver(host) counters + */ +struct hc_dynamic_sb_drv_counters { + u32 dynamic_hc_drv_counter[HC_SB_MAX_DYNAMIC_INDICES]; +}; + + +/* + * 2 bytes. configuration/state parameters for a single protocol index + */ +struct hc_index_data { +#if defined(__BIG_ENDIAN) + u8 flags; +#define HC_INDEX_DATA_SM_ID (0x1<<0) +#define HC_INDEX_DATA_SM_ID_SHIFT 0 +#define HC_INDEX_DATA_HC_ENABLED (0x1<<1) +#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1 +#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1<<2) +#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2 +#define HC_INDEX_DATA_RESERVE (0x1F<<3) +#define HC_INDEX_DATA_RESERVE_SHIFT 3 + u8 timeout; +#elif defined(__LITTLE_ENDIAN) + u8 timeout; + u8 flags; +#define HC_INDEX_DATA_SM_ID (0x1<<0) +#define HC_INDEX_DATA_SM_ID_SHIFT 0 +#define HC_INDEX_DATA_HC_ENABLED (0x1<<1) +#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1 +#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1<<2) +#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2 +#define HC_INDEX_DATA_RESERVE (0x1F<<3) +#define HC_INDEX_DATA_RESERVE_SHIFT 3 +#endif +}; + + +/* + * HC state-machine + */ +struct hc_status_block_sm { +#if defined(__BIG_ENDIAN) + u8 igu_seg_id; + u8 igu_sb_id; + u8 timer_value; + u8 __flags; +#elif defined(__LITTLE_ENDIAN) + u8 __flags; + u8 timer_value; + u8 igu_sb_id; + u8 igu_seg_id; +#endif + u32 time_to_expire; +}; + +/* + * hold PCI identification variables- used in various places in firmware + */ +struct pci_entity { +#if defined(__BIG_ENDIAN) + u8 vf_valid; + u8 vf_id; + u8 vnic_id; + u8 pf_id; +#elif defined(__LITTLE_ENDIAN) + u8 pf_id; + u8 vnic_id; + u8 vf_id; + u8 vf_valid; +#endif +}; + +/* + * The fast-path status block meta-data, common to all chips + */ +struct hc_sb_data { + struct regpair host_sb_addr; + struct hc_status_block_sm state_machine[HC_SB_MAX_SM]; + struct pci_entity p_func; +#if defined(__BIG_ENDIAN) + u8 rsrv0; + u8 state; + u8 dhc_qzone_id; + u8 same_igu_sb_1b; +#elif defined(__LITTLE_ENDIAN) + u8 same_igu_sb_1b; + u8 dhc_qzone_id; + u8 state; + u8 rsrv0; +#endif + struct regpair rsrv1[2]; +}; + + +/* + * Segment types for host coaslescing + */ +enum hc_segment { + HC_REGULAR_SEGMENT, + HC_DEFAULT_SEGMENT, + MAX_HC_SEGMENT +}; + + +/* + * The fast-path status block meta-data + */ +struct hc_sp_status_block_data { + struct regpair host_sb_addr; +#if defined(__BIG_ENDIAN) + u8 rsrv1; + u8 state; + u8 igu_seg_id; + u8 igu_sb_id; +#elif defined(__LITTLE_ENDIAN) + u8 igu_sb_id; + u8 igu_seg_id; + u8 state; + u8 rsrv1; +#endif + struct pci_entity p_func; +}; + + +/* + * The fast-path status block meta-data + */ +struct hc_status_block_data_e1x { + struct hc_index_data index_data[HC_SB_MAX_INDICES_E1X]; + struct hc_sb_data common; +}; + + +/* + * The fast-path status block meta-data + */ +struct hc_status_block_data_e2 { + struct hc_index_data index_data[HC_SB_MAX_INDICES_E2]; + struct hc_sb_data common; +}; + + +/* + * IGU block operartion modes (in Everest2) + */ +enum igu_mode { + HC_IGU_BC_MODE, + HC_IGU_NBC_MODE, + MAX_IGU_MODE +}; + + +/* + * IP versions + */ +enum ip_ver { + IP_V4, + IP_V6, + MAX_IP_VER +}; + + +/* + * Multi-function modes + */ +enum mf_mode { + SINGLE_FUNCTION, + MULTI_FUNCTION_SD, + MULTI_FUNCTION_SI, + MULTI_FUNCTION_RESERVED, + MAX_MF_MODE +}; + +/* + * Protocol-common statistics collected by the Tstorm (per pf) + */ +struct tstorm_per_pf_stats { + struct regpair rcv_error_bytes; +}; + +/* + * + */ +struct per_pf_stats { + struct tstorm_per_pf_stats tstorm_pf_statistics; +}; + + +/* + * Protocol-common statistics collected by the Tstorm (per port) + */ +struct tstorm_per_port_stats { + __le32 mac_discard; + __le32 mac_filter_discard; + __le32 brb_truncate_discard; + __le32 mf_tag_discard; + __le32 packet_drop; + __le32 reserved; +}; + +/* + * + */ +struct per_port_stats { + struct tstorm_per_port_stats tstorm_port_statistics; +}; + + +/* + * Protocol-common statistics collected by the Tstorm (per client) + */ +struct tstorm_per_queue_stats { + struct regpair rcv_ucast_bytes; + __le32 rcv_ucast_pkts; + __le32 checksum_discard; + struct regpair rcv_bcast_bytes; + __le32 rcv_bcast_pkts; + __le32 pkts_too_big_discard; + struct regpair rcv_mcast_bytes; + __le32 rcv_mcast_pkts; + __le32 ttl0_discard; + __le16 no_buff_discard; + __le16 reserved0; + __le32 reserved1; +}; + +/* + * Protocol-common statistics collected by the Ustorm (per client) + */ +struct ustorm_per_queue_stats { + struct regpair ucast_no_buff_bytes; + struct regpair mcast_no_buff_bytes; + struct regpair bcast_no_buff_bytes; + __le32 ucast_no_buff_pkts; + __le32 mcast_no_buff_pkts; + __le32 bcast_no_buff_pkts; + __le32 coalesced_pkts; + struct regpair coalesced_bytes; + __le32 coalesced_events; + __le32 coalesced_aborts; +}; + +/* + * Protocol-common statistics collected by the Xstorm (per client) + */ +struct xstorm_per_queue_stats { + struct regpair ucast_bytes_sent; + struct regpair mcast_bytes_sent; + struct regpair bcast_bytes_sent; + __le32 ucast_pkts_sent; + __le32 mcast_pkts_sent; + __le32 bcast_pkts_sent; + __le32 error_drop_pkts; +}; + +/* + * + */ +struct per_queue_stats { + struct tstorm_per_queue_stats tstorm_queue_statistics; + struct ustorm_per_queue_stats ustorm_queue_statistics; + struct xstorm_per_queue_stats xstorm_queue_statistics; +}; + + +/* + * FW version stored in first line of pram + */ +struct pram_fw_version { + u8 major; + u8 minor; + u8 revision; + u8 engineering; + u8 flags; +#define PRAM_FW_VERSION_OPTIMIZED (0x1<<0) +#define PRAM_FW_VERSION_OPTIMIZED_SHIFT 0 +#define PRAM_FW_VERSION_STORM_ID (0x3<<1) +#define PRAM_FW_VERSION_STORM_ID_SHIFT 1 +#define PRAM_FW_VERSION_BIG_ENDIEN (0x1<<3) +#define PRAM_FW_VERSION_BIG_ENDIEN_SHIFT 3 +#define PRAM_FW_VERSION_CHIP_VERSION (0x3<<4) +#define PRAM_FW_VERSION_CHIP_VERSION_SHIFT 4 +#define __PRAM_FW_VERSION_RESERVED0 (0x3<<6) +#define __PRAM_FW_VERSION_RESERVED0_SHIFT 6 +}; + + +/* + * Ethernet slow path element + */ +union protocol_common_specific_data { + u8 protocol_data[8]; + struct regpair phy_address; + struct regpair mac_config_addr; +}; + +/* + * The send queue element + */ +struct protocol_common_spe { + struct spe_hdr hdr; + union protocol_common_specific_data data; +}; + + +/* + * a single rate shaping counter. can be used as protocol or vnic counter + */ +struct rate_shaping_counter { + u32 quota; +#if defined(__BIG_ENDIAN) + u16 __reserved0; + u16 rate; +#elif defined(__LITTLE_ENDIAN) + u16 rate; + u16 __reserved0; +#endif +}; + + +/* + * per-vnic rate shaping variables + */ +struct rate_shaping_vars_per_vn { + struct rate_shaping_counter vn_counter; +}; + + +/* + * The send queue element + */ +struct slow_path_element { + struct spe_hdr hdr; + struct regpair protocol_data; +}; + + +/* + * Protocol-common statistics counter + */ +struct stats_counter { + __le16 xstats_counter; + __le16 reserved0; + __le32 reserved1; + __le16 tstats_counter; + __le16 reserved2; + __le32 reserved3; + __le16 ustats_counter; + __le16 reserved4; + __le32 reserved5; + __le16 cstats_counter; + __le16 reserved6; + __le32 reserved7; +}; + + +/* + * + */ +struct stats_query_entry { + u8 kind; + u8 index; + __le16 funcID; + __le32 reserved; + struct regpair address; +}; + +/* + * statistic command + */ +struct stats_query_cmd_group { + struct stats_query_entry query[STATS_QUERY_CMD_COUNT]; +}; + + +/* + * statistic command header + */ +struct stats_query_header { + u8 cmd_num; + u8 reserved0; + __le16 drv_stats_counter; + __le32 reserved1; + struct regpair stats_counters_addrs; +}; + + +/* + * Types of statistcis query entry + */ +enum stats_query_type { + STATS_TYPE_QUEUE, + STATS_TYPE_PORT, + STATS_TYPE_PF, + STATS_TYPE_TOE, + STATS_TYPE_FCOE, + MAX_STATS_QUERY_TYPE +}; + + +/* + * Indicate of the function status block state + */ +enum status_block_state { + SB_DISABLED, + SB_ENABLED, + SB_CLEANED, + MAX_STATUS_BLOCK_STATE +}; + + +/* + * Storm IDs (including attentions for IGU related enums) + */ +enum storm_id { + USTORM_ID, + CSTORM_ID, + XSTORM_ID, + TSTORM_ID, + ATTENTION_ID, + MAX_STORM_ID +}; + + +/* + * Taffic types used in ETS and flow control algorithms + */ +enum traffic_type { + LLFC_TRAFFIC_TYPE_NW, + LLFC_TRAFFIC_TYPE_FCOE, + LLFC_TRAFFIC_TYPE_ISCSI, + MAX_TRAFFIC_TYPE +}; + + +/* + * zone A per-queue data + */ +struct tstorm_queue_zone_data { + struct regpair reserved[4]; +}; + + +/* + * zone B per-VF data + */ +struct tstorm_vf_zone_data { + struct regpair reserved; +}; + + +/* + * zone A per-queue data + */ +struct ustorm_queue_zone_data { + struct ustorm_eth_rx_producers eth_rx_producers; + struct regpair reserved[3]; +}; + + +/* + * zone B per-VF data + */ +struct ustorm_vf_zone_data { + struct regpair reserved; +}; + + +/* + * data per VF-PF channel + */ +struct vf_pf_channel_data { +#if defined(__BIG_ENDIAN) + u16 reserved0; + u8 valid; + u8 state; +#elif defined(__LITTLE_ENDIAN) + u8 state; + u8 valid; + u16 reserved0; +#endif + u32 reserved1; +}; + + +/* + * State of VF-PF channel + */ +enum vf_pf_channel_state { + VF_PF_CHANNEL_STATE_READY, + VF_PF_CHANNEL_STATE_WAITING_FOR_ACK, + MAX_VF_PF_CHANNEL_STATE +}; + + +/* + * zone A per-queue data + */ +struct xstorm_queue_zone_data { + struct regpair reserved[4]; +}; + + +/* + * zone B per-VF data + */ +struct xstorm_vf_zone_data { + struct regpair reserved; +}; + +#endif /* BNX2X_HSI_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h new file mode 100644 index 0000000..4d748e7 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h @@ -0,0 +1,567 @@ +/* bnx2x_init.h: Broadcom Everest network driver. + * Structures and macroes needed during the initialization. + * + * Copyright (c) 2007-2011 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Eilon Greenstein + * Written by: Eliezer Tamir + * Modified by: Vladislav Zolotarov + */ + +#ifndef BNX2X_INIT_H +#define BNX2X_INIT_H + +/* Init operation types and structures */ +enum { + OP_RD = 0x1, /* read a single register */ + OP_WR, /* write a single register */ + OP_SW, /* copy a string to the device */ + OP_ZR, /* clear memory */ + OP_ZP, /* unzip then copy with DMAE */ + OP_WR_64, /* write 64 bit pattern */ + OP_WB, /* copy a string using DMAE */ + OP_WB_ZR, /* Clear a string using DMAE or indirect-wr */ + /* Skip the following ops if all of the init modes don't match */ + OP_IF_MODE_OR, + /* Skip the following ops if any of the init modes don't match */ + OP_IF_MODE_AND, + OP_MAX +}; + +enum { + STAGE_START, + STAGE_END, +}; + +/* Returns the index of start or end of a specific block stage in ops array*/ +#define BLOCK_OPS_IDX(block, stage, end) \ + (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end)) + + +/* structs for the various opcodes */ +struct raw_op { + u32 op:8; + u32 offset:24; + u32 raw_data; +}; + +struct op_read { + u32 op:8; + u32 offset:24; + u32 val; +}; + +struct op_write { + u32 op:8; + u32 offset:24; + u32 val; +}; + +struct op_arr_write { + u32 op:8; + u32 offset:24; +#ifdef __BIG_ENDIAN + u16 data_len; + u16 data_off; +#else /* __LITTLE_ENDIAN */ + u16 data_off; + u16 data_len; +#endif +}; + +struct op_zero { + u32 op:8; + u32 offset:24; + u32 len; +}; + +struct op_if_mode { + u32 op:8; + u32 cmd_offset:24; + u32 mode_bit_map; +}; + + +union init_op { + struct op_read read; + struct op_write write; + struct op_arr_write arr_wr; + struct op_zero zero; + struct raw_op raw; + struct op_if_mode if_mode; +}; + + +/* Init Phases */ +enum { + PHASE_COMMON, + PHASE_PORT0, + PHASE_PORT1, + PHASE_PF0, + PHASE_PF1, + PHASE_PF2, + PHASE_PF3, + PHASE_PF4, + PHASE_PF5, + PHASE_PF6, + PHASE_PF7, + NUM_OF_INIT_PHASES +}; + +/* Init Modes */ +enum { + MODE_ASIC = 0x00000001, + MODE_FPGA = 0x00000002, + MODE_EMUL = 0x00000004, + MODE_E2 = 0x00000008, + MODE_E3 = 0x00000010, + MODE_PORT2 = 0x00000020, + MODE_PORT4 = 0x00000040, + MODE_SF = 0x00000080, + MODE_MF = 0x00000100, + MODE_MF_SD = 0x00000200, + MODE_MF_SI = 0x00000400, + MODE_MF_NIV = 0x00000800, + MODE_E3_A0 = 0x00001000, + MODE_E3_B0 = 0x00002000, + MODE_COS3 = 0x00004000, + MODE_COS6 = 0x00008000, + MODE_LITTLE_ENDIAN = 0x00010000, + MODE_BIG_ENDIAN = 0x00020000, +}; + +/* Init Blocks */ +enum { + BLOCK_ATC, + BLOCK_BRB1, + BLOCK_CCM, + BLOCK_CDU, + BLOCK_CFC, + BLOCK_CSDM, + BLOCK_CSEM, + BLOCK_DBG, + BLOCK_DMAE, + BLOCK_DORQ, + BLOCK_HC, + BLOCK_IGU, + BLOCK_MISC, + BLOCK_NIG, + BLOCK_PBF, + BLOCK_PGLUE_B, + BLOCK_PRS, + BLOCK_PXP2, + BLOCK_PXP, + BLOCK_QM, + BLOCK_SRC, + BLOCK_TCM, + BLOCK_TM, + BLOCK_TSDM, + BLOCK_TSEM, + BLOCK_UCM, + BLOCK_UPB, + BLOCK_USDM, + BLOCK_USEM, + BLOCK_XCM, + BLOCK_XPB, + BLOCK_XSDM, + BLOCK_XSEM, + BLOCK_MISC_AEU, + NUM_OF_INIT_BLOCKS +}; + +/* QM queue numbers */ +#define BNX2X_ETH_Q 0 +#define BNX2X_TOE_Q 3 +#define BNX2X_TOE_ACK_Q 6 +#define BNX2X_ISCSI_Q 9 +#define BNX2X_ISCSI_ACK_Q 11 +#define BNX2X_FCOE_Q 10 + +/* Vnics per mode */ +#define BNX2X_PORT2_MODE_NUM_VNICS 4 +#define BNX2X_PORT4_MODE_NUM_VNICS 2 + +/* COS offset for port1 in E3 B0 4port mode */ +#define BNX2X_E3B0_PORT1_COS_OFFSET 3 + +/* QM Register addresses */ +#define BNX2X_Q_VOQ_REG_ADDR(pf_q_num)\ + (QM_REG_QVOQIDX_0 + 4 * (pf_q_num)) +#define BNX2X_VOQ_Q_REG_ADDR(cos, pf_q_num)\ + (QM_REG_VOQQMASK_0_LSB + 4 * ((cos) * 2 + ((pf_q_num) >> 5))) +#define BNX2X_Q_CMDQ_REG_ADDR(pf_q_num)\ + (QM_REG_BYTECRDCMDQ_0 + 4 * ((pf_q_num) >> 4)) + +/* extracts the QM queue number for the specified port and vnic */ +#define BNX2X_PF_Q_NUM(q_num, port, vnic)\ + ((((port) << 1) | (vnic)) * 16 + (q_num)) + + +/* Maps the specified queue to the specified COS */ +static inline void bnx2x_map_q_cos(struct bnx2x *bp, u32 q_num, u32 new_cos) +{ + /* find current COS mapping */ + u32 curr_cos = REG_RD(bp, QM_REG_QVOQIDX_0 + q_num * 4); + + /* check if queue->COS mapping has changed */ + if (curr_cos != new_cos) { + u32 num_vnics = BNX2X_PORT2_MODE_NUM_VNICS; + u32 reg_addr, reg_bit_map, vnic; + + /* update parameters for 4port mode */ + if (INIT_MODE_FLAGS(bp) & MODE_PORT4) { + num_vnics = BNX2X_PORT4_MODE_NUM_VNICS; + if (BP_PORT(bp)) { + curr_cos += BNX2X_E3B0_PORT1_COS_OFFSET; + new_cos += BNX2X_E3B0_PORT1_COS_OFFSET; + } + } + + /* change queue mapping for each VNIC */ + for (vnic = 0; vnic < num_vnics; vnic++) { + u32 pf_q_num = + BNX2X_PF_Q_NUM(q_num, BP_PORT(bp), vnic); + u32 q_bit_map = 1 << (pf_q_num & 0x1f); + + /* overwrite queue->VOQ mapping */ + REG_WR(bp, BNX2X_Q_VOQ_REG_ADDR(pf_q_num), new_cos); + + /* clear queue bit from current COS bit map */ + reg_addr = BNX2X_VOQ_Q_REG_ADDR(curr_cos, pf_q_num); + reg_bit_map = REG_RD(bp, reg_addr); + REG_WR(bp, reg_addr, reg_bit_map & (~q_bit_map)); + + /* set queue bit in new COS bit map */ + reg_addr = BNX2X_VOQ_Q_REG_ADDR(new_cos, pf_q_num); + reg_bit_map = REG_RD(bp, reg_addr); + REG_WR(bp, reg_addr, reg_bit_map | q_bit_map); + + /* set/clear queue bit in command-queue bit map + (E2/E3A0 only, valid COS values are 0/1) */ + if (!(INIT_MODE_FLAGS(bp) & MODE_E3_B0)) { + reg_addr = BNX2X_Q_CMDQ_REG_ADDR(pf_q_num); + reg_bit_map = REG_RD(bp, reg_addr); + q_bit_map = 1 << (2 * (pf_q_num & 0xf)); + reg_bit_map = new_cos ? + (reg_bit_map | q_bit_map) : + (reg_bit_map & (~q_bit_map)); + REG_WR(bp, reg_addr, reg_bit_map); + } + } + } +} + +/* Configures the QM according to the specified per-traffic-type COSes */ +static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, enum cos_mode mode, + struct priority_cos *traffic_cos) +{ + bnx2x_map_q_cos(bp, BNX2X_FCOE_Q, + traffic_cos[LLFC_TRAFFIC_TYPE_FCOE].cos); + bnx2x_map_q_cos(bp, BNX2X_ISCSI_Q, + traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos); + bnx2x_map_q_cos(bp, BNX2X_ISCSI_ACK_Q, + traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos); + if (mode != STATIC_COS) { + /* required only in backward compatible COS mode */ + bnx2x_map_q_cos(bp, BNX2X_ETH_Q, + traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos); + bnx2x_map_q_cos(bp, BNX2X_TOE_Q, + traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos); + bnx2x_map_q_cos(bp, BNX2X_TOE_ACK_Q, + traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos); + } +} + + +/* Returns the index of start or end of a specific block stage in ops array*/ +#define BLOCK_OPS_IDX(block, stage, end) \ + (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end)) + + +#define INITOP_SET 0 /* set the HW directly */ +#define INITOP_CLEAR 1 /* clear the HW directly */ +#define INITOP_INIT 2 /* set the init-value array */ + +/**************************************************************************** +* ILT management +****************************************************************************/ +struct ilt_line { + dma_addr_t page_mapping; + void *page; + u32 size; +}; + +struct ilt_client_info { + u32 page_size; + u16 start; + u16 end; + u16 client_num; + u16 flags; +#define ILT_CLIENT_SKIP_INIT 0x1 +#define ILT_CLIENT_SKIP_MEM 0x2 +}; + +struct bnx2x_ilt { + u32 start_line; + struct ilt_line *lines; + struct ilt_client_info clients[4]; +#define ILT_CLIENT_CDU 0 +#define ILT_CLIENT_QM 1 +#define ILT_CLIENT_SRC 2 +#define ILT_CLIENT_TM 3 +}; + +/**************************************************************************** +* SRC configuration +****************************************************************************/ +struct src_ent { + u8 opaque[56]; + u64 next; +}; + +/**************************************************************************** +* Parity configuration +****************************************************************************/ +#define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2, m3) \ +{ \ + block##_REG_##block##_PRTY_MASK, \ + block##_REG_##block##_PRTY_STS_CLR, \ + en_mask, {m1, m1h, m2, m3}, #block \ +} + +#define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2, m3) \ +{ \ + block##_REG_##block##_PRTY_MASK_0, \ + block##_REG_##block##_PRTY_STS_CLR_0, \ + en_mask, {m1, m1h, m2, m3}, #block"_0" \ +} + +#define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2, m3) \ +{ \ + block##_REG_##block##_PRTY_MASK_1, \ + block##_REG_##block##_PRTY_STS_CLR_1, \ + en_mask, {m1, m1h, m2, m3}, #block"_1" \ +} + +static const struct { + u32 mask_addr; + u32 sts_clr_addr; + u32 en_mask; /* Mask to enable parity attentions */ + struct { + u32 e1; /* 57710 */ + u32 e1h; /* 57711 */ + u32 e2; /* 57712 */ + u32 e3; /* 578xx */ + } reg_mask; /* Register mask (all valid bits) */ + char name[7]; /* Block's longest name is 6 characters long + * (name + suffix) + */ +} bnx2x_blocks_parity_data[] = { + /* bit 19 masked */ + /* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */ + /* bit 5,18,20-31 */ + /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */ + /* bit 5 */ + /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20); */ + /* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */ + /* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */ + + /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't + * want to handle "system kill" flow at the moment. + */ + BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff, + 0x7ffffff), + BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff), + BLOCK_PRTY_INFO_1(PXP2, 0x1ffffff, 0x7f, 0x7f, 0x7ff, 0x1ffffff), + BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0, 0), + BLOCK_PRTY_INFO(NIG, 0xffffffff, 0x3fffffff, 0xffffffff, 0, 0), + BLOCK_PRTY_INFO_0(NIG, 0xffffffff, 0, 0, 0xffffffff, 0xffffffff), + BLOCK_PRTY_INFO_1(NIG, 0xffff, 0, 0, 0xff, 0xffff), + BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff, 0x7ff), + BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1, 0x1), + BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff, 0xfff), + BLOCK_PRTY_INFO(ATC, 0x1f, 0, 0, 0x1f, 0x1f), + BLOCK_PRTY_INFO(PGLUE_B, 0x3, 0, 0, 0x3, 0x3), + BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3, 0x3), + {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, + GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0xf, + {0xf, 0xf, 0xf, 0xf}, "UPB"}, + {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, + GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0, + {0xf, 0xf, 0xf, 0xf}, "XPB"}, + BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7, 0x7), + BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f, 0x1f), + BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf, 0x3f), + BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1, 0x1), + BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf, 0xf), + BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf, 0xf), + BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff, 0xff), + BLOCK_PRTY_INFO(PBF, 0, 0, 0x3ffff, 0xfffff, 0xfffffff), + BLOCK_PRTY_INFO(TM, 0, 0, 0x7f, 0x7f, 0x7f), + BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff, 0x7ff), + BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff), + BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff, 0x7ff), + BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff), + BLOCK_PRTY_INFO(TCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff), + BLOCK_PRTY_INFO(CCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff), + BLOCK_PRTY_INFO(UCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff), + BLOCK_PRTY_INFO(XCM, 0, 0, 0x3fffffff, 0x3fffffff, 0x3fffffff), + BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff), + BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f, 0x3f), + BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff), + BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f, 0x1f), + BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff), + BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f, 0x1f), + BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff), + BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f, 0x3f), +}; + + +/* [28] MCP Latched rom_parity + * [29] MCP Latched ump_rx_parity + * [30] MCP Latched ump_tx_parity + * [31] MCP Latched scpad_parity + */ +#define MISC_AEU_ENABLE_MCP_PRTY_BITS \ + (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) + +/* Below registers control the MCP parity attention output. When + * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are + * enabled, when cleared - disabled. + */ +static const u32 mcp_attn_ctl_regs[] = { + MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0, + MISC_REG_AEU_ENABLE4_NIG_0, + MISC_REG_AEU_ENABLE4_PXP_0, + MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0, + MISC_REG_AEU_ENABLE4_NIG_1, + MISC_REG_AEU_ENABLE4_PXP_1 +}; + +static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable) +{ + int i; + u32 reg_val; + + for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) { + reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]); + + if (enable) + reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS; + else + reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS; + + REG_WR(bp, mcp_attn_ctl_regs[i], reg_val); + } +} + +static inline u32 bnx2x_parity_reg_mask(struct bnx2x *bp, int idx) +{ + if (CHIP_IS_E1(bp)) + return bnx2x_blocks_parity_data[idx].reg_mask.e1; + else if (CHIP_IS_E1H(bp)) + return bnx2x_blocks_parity_data[idx].reg_mask.e1h; + else if (CHIP_IS_E2(bp)) + return bnx2x_blocks_parity_data[idx].reg_mask.e2; + else /* CHIP_IS_E3 */ + return bnx2x_blocks_parity_data[idx].reg_mask.e3; +} + +static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) { + u32 dis_mask = bnx2x_parity_reg_mask(bp, i); + + if (dis_mask) { + REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr, + dis_mask); + DP(NETIF_MSG_HW, "Setting parity mask " + "for %s to\t\t0x%x\n", + bnx2x_blocks_parity_data[i].name, dis_mask); + } + } + + /* Disable MCP parity attentions */ + bnx2x_set_mcp_parity(bp, false); +} + +/** + * Clear the parity error status registers. + */ +static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp) +{ + int i; + u32 reg_val, mcp_aeu_bits = + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY | + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY; + + /* Clear SEM_FAST parities */ + REG_WR(bp, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); + REG_WR(bp, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); + REG_WR(bp, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); + REG_WR(bp, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); + + for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) { + u32 reg_mask = bnx2x_parity_reg_mask(bp, i); + + if (reg_mask) { + reg_val = REG_RD(bp, bnx2x_blocks_parity_data[i]. + sts_clr_addr); + if (reg_val & reg_mask) + DP(NETIF_MSG_HW, + "Parity errors in %s: 0x%x\n", + bnx2x_blocks_parity_data[i].name, + reg_val & reg_mask); + } + } + + /* Check if there were parity attentions in MCP */ + reg_val = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_MCP); + if (reg_val & mcp_aeu_bits) + DP(NETIF_MSG_HW, "Parity error in MCP: 0x%x\n", + reg_val & mcp_aeu_bits); + + /* Clear parity attentions in MCP: + * [7] clears Latched rom_parity + * [8] clears Latched ump_rx_parity + * [9] clears Latched ump_tx_parity + * [10] clears Latched scpad_parity (both ports) + */ + REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780); +} + +static inline void bnx2x_enable_blocks_parity(struct bnx2x *bp) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) { + u32 reg_mask = bnx2x_parity_reg_mask(bp, i); + + if (reg_mask) + REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr, + bnx2x_blocks_parity_data[i].en_mask & reg_mask); + } + + /* Enable MCP parity attentions */ + bnx2x_set_mcp_parity(bp, true); +} + + +#endif /* BNX2X_INIT_H */ + diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h new file mode 100644 index 0000000..7ec1724 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h @@ -0,0 +1,912 @@ +/* bnx2x_init_ops.h: Broadcom Everest network driver. + * Static functions needed during the initialization. + * This file is "included" in bnx2x_main.c. + * + * Copyright (c) 2007-2011 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Eilon Greenstein + * Written by: Vladislav Zolotarov + */ + +#ifndef BNX2X_INIT_OPS_H +#define BNX2X_INIT_OPS_H + + +#ifndef BP_ILT +#define BP_ILT(bp) NULL +#endif + +#ifndef BP_FUNC +#define BP_FUNC(bp) 0 +#endif + +#ifndef BP_PORT +#define BP_PORT(bp) 0 +#endif + +#ifndef BNX2X_ILT_FREE +#define BNX2X_ILT_FREE(x, y, sz) +#endif + +#ifndef BNX2X_ILT_ZALLOC +#define BNX2X_ILT_ZALLOC(x, y, sz) +#endif + +#ifndef ILOG2 +#define ILOG2(x) x +#endif + +static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len); +static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val); +static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, + dma_addr_t phys_addr, u32 addr, + u32 len); + +static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, + const u32 *data, u32 len) +{ + u32 i; + + for (i = 0; i < len; i++) + REG_WR(bp, addr + i*4, data[i]); +} + +static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr, + const u32 *data, u32 len) +{ + u32 i; + + for (i = 0; i < len; i++) + bnx2x_reg_wr_ind(bp, addr + i*4, data[i]); +} + +static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len, + u8 wb) +{ + if (bp->dmae_ready) + bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len); + else if (wb) + /* + * Wide bus registers with no dmae need to be written + * using indirect write. + */ + bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len); + else + bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len); +} + +static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, + u32 len, u8 wb) +{ + u32 buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4)); + u32 buf_len32 = buf_len/4; + u32 i; + + memset(GUNZIP_BUF(bp), (u8)fill, buf_len); + + for (i = 0; i < len; i += buf_len32) { + u32 cur_len = min(buf_len32, len - i); + + bnx2x_write_big_buf(bp, addr + i*4, cur_len, wb); + } +} + +static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len) +{ + if (bp->dmae_ready) + bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len); + else + bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len); +} + +static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, + const u32 *data, u32 len64) +{ + u32 buf_len32 = FW_BUF_SIZE/4; + u32 len = len64*2; + u64 data64 = 0; + u32 i; + + /* 64 bit value is in a blob: first low DWORD, then high DWORD */ + data64 = HILO_U64((*(data + 1)), (*data)); + + len64 = min((u32)(FW_BUF_SIZE/8), len64); + for (i = 0; i < len64; i++) { + u64 *pdata = ((u64 *)(GUNZIP_BUF(bp))) + i; + + *pdata = data64; + } + + for (i = 0; i < len; i += buf_len32) { + u32 cur_len = min(buf_len32, len - i); + + bnx2x_write_big_buf_wb(bp, addr + i*4, cur_len); + } +} + +/********************************************************* + There are different blobs for each PRAM section. + In addition, each blob write operation is divided into a few operations + in order to decrease the amount of phys. contiguous buffer needed. + Thus, when we select a blob the address may be with some offset + from the beginning of PRAM section. + The same holds for the INT_TABLE sections. +**********************************************************/ +#define IF_IS_INT_TABLE_ADDR(base, addr) \ + if (((base) <= (addr)) && ((base) + 0x400 >= (addr))) + +#define IF_IS_PRAM_ADDR(base, addr) \ + if (((base) <= (addr)) && ((base) + 0x40000 >= (addr))) + +static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr, + const u8 *data) +{ + IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr) + data = INIT_TSEM_INT_TABLE_DATA(bp); + else + IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr) + data = INIT_CSEM_INT_TABLE_DATA(bp); + else + IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr) + data = INIT_USEM_INT_TABLE_DATA(bp); + else + IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr) + data = INIT_XSEM_INT_TABLE_DATA(bp); + else + IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr) + data = INIT_TSEM_PRAM_DATA(bp); + else + IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr) + data = INIT_CSEM_PRAM_DATA(bp); + else + IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr) + data = INIT_USEM_PRAM_DATA(bp); + else + IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr) + data = INIT_XSEM_PRAM_DATA(bp); + + return data; +} + +static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, + const u32 *data, u32 len) +{ + if (bp->dmae_ready) + VIRT_WR_DMAE_LEN(bp, data, addr, len, 0); + else + bnx2x_init_ind_wr(bp, addr, data, len); +} + +static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo, + u32 val_hi) +{ + u32 wb_write[2]; + + wb_write[0] = val_lo; + wb_write[1] = val_hi; + REG_WR_DMAE_LEN(bp, reg, wb_write, 2); +} +static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, + u32 blob_off) +{ + const u8 *data = NULL; + int rc; + u32 i; + + data = bnx2x_sel_blob(bp, addr, data) + blob_off*4; + + rc = bnx2x_gunzip(bp, data, len); + if (rc) + return; + + /* gunzip_outlen is in dwords */ + len = GUNZIP_OUTLEN(bp); + for (i = 0; i < len; i++) + ((u32 *)GUNZIP_BUF(bp))[i] = + cpu_to_le32(((u32 *)GUNZIP_BUF(bp))[i]); + + bnx2x_write_big_buf_wb(bp, addr, len); +} + +static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage) +{ + u16 op_start = + INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, + STAGE_START)]; + u16 op_end = + INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, + STAGE_END)]; + union init_op *op; + u32 op_idx, op_type, addr, len; + const u32 *data, *data_base; + + /* If empty block */ + if (op_start == op_end) + return; + + data_base = INIT_DATA(bp); + + for (op_idx = op_start; op_idx < op_end; op_idx++) { + + op = (union init_op *)&(INIT_OPS(bp)[op_idx]); + /* Get generic data */ + op_type = op->raw.op; + addr = op->raw.offset; + /* Get data that's used for OP_SW, OP_WB, OP_FW, OP_ZP and + * OP_WR64 (we assume that op_arr_write and op_write have the + * same structure). + */ + len = op->arr_wr.data_len; + data = data_base + op->arr_wr.data_off; + + switch (op_type) { + case OP_RD: + REG_RD(bp, addr); + break; + case OP_WR: + REG_WR(bp, addr, op->write.val); + break; + case OP_SW: + bnx2x_init_str_wr(bp, addr, data, len); + break; + case OP_WB: + bnx2x_init_wr_wb(bp, addr, data, len); + break; + case OP_ZR: + bnx2x_init_fill(bp, addr, 0, op->zero.len, 0); + break; + case OP_WB_ZR: + bnx2x_init_fill(bp, addr, 0, op->zero.len, 1); + break; + case OP_ZP: + bnx2x_init_wr_zp(bp, addr, len, + op->arr_wr.data_off); + break; + case OP_WR_64: + bnx2x_init_wr_64(bp, addr, data, len); + break; + case OP_IF_MODE_AND: + /* if any of the flags doesn't match, skip the + * conditional block. + */ + if ((INIT_MODE_FLAGS(bp) & + op->if_mode.mode_bit_map) != + op->if_mode.mode_bit_map) + op_idx += op->if_mode.cmd_offset; + break; + case OP_IF_MODE_OR: + /* if all the flags don't match, skip the conditional + * block. + */ + if ((INIT_MODE_FLAGS(bp) & + op->if_mode.mode_bit_map) == 0) + op_idx += op->if_mode.cmd_offset; + break; + default: + /* Should never get here! */ + + break; + } + } +} + + +/**************************************************************************** +* PXP Arbiter +****************************************************************************/ +/* + * This code configures the PCI read/write arbiter + * which implements a weighted round robin + * between the virtual queues in the chip. + * + * The values were derived for each PCI max payload and max request size. + * since max payload and max request size are only known at run time, + * this is done as a separate init stage. + */ + +#define NUM_WR_Q 13 +#define NUM_RD_Q 29 +#define MAX_RD_ORD 3 +#define MAX_WR_ORD 2 + +/* configuration for one arbiter queue */ +struct arb_line { + int l; + int add; + int ubound; +}; + +/* derived configuration for each read queue for each max request size */ +static const struct arb_line read_arb_data[NUM_RD_Q][MAX_RD_ORD + 1] = { +/* 1 */ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} }, + { {4, 8, 4}, {4, 8, 4}, {4, 8, 4}, {4, 8, 4} }, + { {4, 3, 3}, {4, 3, 3}, {4, 3, 3}, {4, 3, 3} }, + { {8, 3, 6}, {16, 3, 11}, {16, 3, 11}, {16, 3, 11} }, + { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, +/* 10 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 64, 6}, {16, 64, 11}, {32, 64, 21}, {32, 64, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, +/* 20 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 64, 25}, {16, 64, 41}, {32, 64, 81}, {64, 64, 120} } +}; + +/* derived configuration for each write queue for each max request size */ +static const struct arb_line write_arb_data[NUM_WR_Q][MAX_WR_ORD + 1] = { +/* 1 */ { {4, 6, 3}, {4, 6, 3}, {4, 6, 3} }, + { {4, 2, 3}, {4, 2, 3}, {4, 2, 3} }, + { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} }, + { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} }, + { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} }, + { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} }, + { {8, 64, 25}, {16, 64, 25}, {32, 64, 25} }, + { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} }, + { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} }, +/* 10 */{ {8, 9, 6}, {16, 9, 11}, {32, 9, 21} }, + { {8, 47, 19}, {16, 47, 19}, {32, 47, 21} }, + { {8, 9, 6}, {16, 9, 11}, {16, 9, 11} }, + { {8, 64, 25}, {16, 64, 41}, {32, 64, 81} } +}; + +/* register addresses for read queues */ +static const struct arb_line read_arb_addr[NUM_RD_Q-1] = { +/* 1 */ {PXP2_REG_RQ_BW_RD_L0, PXP2_REG_RQ_BW_RD_ADD0, + PXP2_REG_RQ_BW_RD_UBOUND0}, + {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1, + PXP2_REG_PSWRQ_BW_UB1}, + {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2, + PXP2_REG_PSWRQ_BW_UB2}, + {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3, + PXP2_REG_PSWRQ_BW_UB3}, + {PXP2_REG_RQ_BW_RD_L4, PXP2_REG_RQ_BW_RD_ADD4, + PXP2_REG_RQ_BW_RD_UBOUND4}, + {PXP2_REG_RQ_BW_RD_L5, PXP2_REG_RQ_BW_RD_ADD5, + PXP2_REG_RQ_BW_RD_UBOUND5}, + {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6, + PXP2_REG_PSWRQ_BW_UB6}, + {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7, + PXP2_REG_PSWRQ_BW_UB7}, + {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8, + PXP2_REG_PSWRQ_BW_UB8}, +/* 10 */{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9, + PXP2_REG_PSWRQ_BW_UB9}, + {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10, + PXP2_REG_PSWRQ_BW_UB10}, + {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11, + PXP2_REG_PSWRQ_BW_UB11}, + {PXP2_REG_RQ_BW_RD_L12, PXP2_REG_RQ_BW_RD_ADD12, + PXP2_REG_RQ_BW_RD_UBOUND12}, + {PXP2_REG_RQ_BW_RD_L13, PXP2_REG_RQ_BW_RD_ADD13, + PXP2_REG_RQ_BW_RD_UBOUND13}, + {PXP2_REG_RQ_BW_RD_L14, PXP2_REG_RQ_BW_RD_ADD14, + PXP2_REG_RQ_BW_RD_UBOUND14}, + {PXP2_REG_RQ_BW_RD_L15, PXP2_REG_RQ_BW_RD_ADD15, + PXP2_REG_RQ_BW_RD_UBOUND15}, + {PXP2_REG_RQ_BW_RD_L16, PXP2_REG_RQ_BW_RD_ADD16, + PXP2_REG_RQ_BW_RD_UBOUND16}, + {PXP2_REG_RQ_BW_RD_L17, PXP2_REG_RQ_BW_RD_ADD17, + PXP2_REG_RQ_BW_RD_UBOUND17}, + {PXP2_REG_RQ_BW_RD_L18, PXP2_REG_RQ_BW_RD_ADD18, + PXP2_REG_RQ_BW_RD_UBOUND18}, +/* 20 */{PXP2_REG_RQ_BW_RD_L19, PXP2_REG_RQ_BW_RD_ADD19, + PXP2_REG_RQ_BW_RD_UBOUND19}, + {PXP2_REG_RQ_BW_RD_L20, PXP2_REG_RQ_BW_RD_ADD20, + PXP2_REG_RQ_BW_RD_UBOUND20}, + {PXP2_REG_RQ_BW_RD_L22, PXP2_REG_RQ_BW_RD_ADD22, + PXP2_REG_RQ_BW_RD_UBOUND22}, + {PXP2_REG_RQ_BW_RD_L23, PXP2_REG_RQ_BW_RD_ADD23, + PXP2_REG_RQ_BW_RD_UBOUND23}, + {PXP2_REG_RQ_BW_RD_L24, PXP2_REG_RQ_BW_RD_ADD24, + PXP2_REG_RQ_BW_RD_UBOUND24}, + {PXP2_REG_RQ_BW_RD_L25, PXP2_REG_RQ_BW_RD_ADD25, + PXP2_REG_RQ_BW_RD_UBOUND25}, + {PXP2_REG_RQ_BW_RD_L26, PXP2_REG_RQ_BW_RD_ADD26, + PXP2_REG_RQ_BW_RD_UBOUND26}, + {PXP2_REG_RQ_BW_RD_L27, PXP2_REG_RQ_BW_RD_ADD27, + PXP2_REG_RQ_BW_RD_UBOUND27}, + {PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28, + PXP2_REG_PSWRQ_BW_UB28} +}; + +/* register addresses for write queues */ +static const struct arb_line write_arb_addr[NUM_WR_Q-1] = { +/* 1 */ {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1, + PXP2_REG_PSWRQ_BW_UB1}, + {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2, + PXP2_REG_PSWRQ_BW_UB2}, + {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3, + PXP2_REG_PSWRQ_BW_UB3}, + {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6, + PXP2_REG_PSWRQ_BW_UB6}, + {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7, + PXP2_REG_PSWRQ_BW_UB7}, + {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8, + PXP2_REG_PSWRQ_BW_UB8}, + {PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9, + PXP2_REG_PSWRQ_BW_UB9}, + {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10, + PXP2_REG_PSWRQ_BW_UB10}, + {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11, + PXP2_REG_PSWRQ_BW_UB11}, +/* 10 */{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28, + PXP2_REG_PSWRQ_BW_UB28}, + {PXP2_REG_RQ_BW_WR_L29, PXP2_REG_RQ_BW_WR_ADD29, + PXP2_REG_RQ_BW_WR_UBOUND29}, + {PXP2_REG_RQ_BW_WR_L30, PXP2_REG_RQ_BW_WR_ADD30, + PXP2_REG_RQ_BW_WR_UBOUND30} +}; + +static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, + int w_order) +{ + u32 val, i; + + if (r_order > MAX_RD_ORD) { + DP(NETIF_MSG_HW, "read order of %d order adjusted to %d\n", + r_order, MAX_RD_ORD); + r_order = MAX_RD_ORD; + } + if (w_order > MAX_WR_ORD) { + DP(NETIF_MSG_HW, "write order of %d order adjusted to %d\n", + w_order, MAX_WR_ORD); + w_order = MAX_WR_ORD; + } + if (CHIP_REV_IS_FPGA(bp)) { + DP(NETIF_MSG_HW, "write order adjusted to 1 for FPGA\n"); + w_order = 0; + } + DP(NETIF_MSG_HW, "read order %d write order %d\n", r_order, w_order); + + for (i = 0; i < NUM_RD_Q-1; i++) { + REG_WR(bp, read_arb_addr[i].l, read_arb_data[i][r_order].l); + REG_WR(bp, read_arb_addr[i].add, + read_arb_data[i][r_order].add); + REG_WR(bp, read_arb_addr[i].ubound, + read_arb_data[i][r_order].ubound); + } + + for (i = 0; i < NUM_WR_Q-1; i++) { + if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) || + (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) { + + REG_WR(bp, write_arb_addr[i].l, + write_arb_data[i][w_order].l); + + REG_WR(bp, write_arb_addr[i].add, + write_arb_data[i][w_order].add); + + REG_WR(bp, write_arb_addr[i].ubound, + write_arb_data[i][w_order].ubound); + } else { + + val = REG_RD(bp, write_arb_addr[i].l); + REG_WR(bp, write_arb_addr[i].l, + val | (write_arb_data[i][w_order].l << 10)); + + val = REG_RD(bp, write_arb_addr[i].add); + REG_WR(bp, write_arb_addr[i].add, + val | (write_arb_data[i][w_order].add << 10)); + + val = REG_RD(bp, write_arb_addr[i].ubound); + REG_WR(bp, write_arb_addr[i].ubound, + val | (write_arb_data[i][w_order].ubound << 7)); + } + } + + val = write_arb_data[NUM_WR_Q-1][w_order].add; + val += write_arb_data[NUM_WR_Q-1][w_order].ubound << 10; + val += write_arb_data[NUM_WR_Q-1][w_order].l << 17; + REG_WR(bp, PXP2_REG_PSWRQ_BW_RD, val); + + val = read_arb_data[NUM_RD_Q-1][r_order].add; + val += read_arb_data[NUM_RD_Q-1][r_order].ubound << 10; + val += read_arb_data[NUM_RD_Q-1][r_order].l << 17; + REG_WR(bp, PXP2_REG_PSWRQ_BW_WR, val); + + REG_WR(bp, PXP2_REG_RQ_WR_MBS0, w_order); + REG_WR(bp, PXP2_REG_RQ_WR_MBS1, w_order); + REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order); + REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order); + + if ((CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) && (r_order == MAX_RD_ORD)) + REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00); + + if (CHIP_IS_E3(bp)) + REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x4 << w_order)); + else if (CHIP_IS_E2(bp)) + REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order)); + else + REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order)); + + if (!CHIP_IS_E1(bp)) { + /* MPS w_order optimal TH presently TH + * 128 0 0 2 + * 256 1 1 3 + * >=512 2 2 3 + */ + /* DMAE is special */ + if (!CHIP_IS_E1H(bp)) { + /* E2 can use optimal TH */ + val = w_order; + REG_WR(bp, PXP2_REG_WR_DMAE_MPS, val); + } else { + val = ((w_order == 0) ? 2 : 3); + REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2); + } + + REG_WR(bp, PXP2_REG_WR_HC_MPS, val); + REG_WR(bp, PXP2_REG_WR_USDM_MPS, val); + REG_WR(bp, PXP2_REG_WR_CSDM_MPS, val); + REG_WR(bp, PXP2_REG_WR_TSDM_MPS, val); + REG_WR(bp, PXP2_REG_WR_XSDM_MPS, val); + REG_WR(bp, PXP2_REG_WR_QM_MPS, val); + REG_WR(bp, PXP2_REG_WR_TM_MPS, val); + REG_WR(bp, PXP2_REG_WR_SRC_MPS, val); + REG_WR(bp, PXP2_REG_WR_DBG_MPS, val); + REG_WR(bp, PXP2_REG_WR_CDU_MPS, val); + } + + /* Validate number of tags suppoted by device */ +#define PCIE_REG_PCIER_TL_HDR_FC_ST 0x2980 + val = REG_RD(bp, PCIE_REG_PCIER_TL_HDR_FC_ST); + val &= 0xFF; + if (val <= 0x20) + REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x20); +} + +/**************************************************************************** +* ILT management +****************************************************************************/ +/* + * This codes hides the low level HW interaction for ILT management and + * configuration. The API consists of a shadow ILT table which is set by the + * driver and a set of routines to use it to configure the HW. + * + */ + +/* ILT HW init operations */ + +/* ILT memory management operations */ +#define ILT_MEMOP_ALLOC 0 +#define ILT_MEMOP_FREE 1 + +/* the phys address is shifted right 12 bits and has an added + * 1=valid bit added to the 53rd bit + * then since this is a wide register(TM) + * we split it into two 32 bit writes + */ +#define ILT_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF)) +#define ILT_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44))) +#define ILT_RANGE(f, l) (((l) << 10) | f) + +static int bnx2x_ilt_line_mem_op(struct bnx2x *bp, + struct ilt_line *line, u32 size, u8 memop) +{ + if (memop == ILT_MEMOP_FREE) { + BNX2X_ILT_FREE(line->page, line->page_mapping, line->size); + return 0; + } + BNX2X_ILT_ZALLOC(line->page, &line->page_mapping, size); + if (!line->page) + return -1; + line->size = size; + return 0; +} + + +static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, + u8 memop) +{ + int i, rc; + struct bnx2x_ilt *ilt = BP_ILT(bp); + struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; + + if (!ilt || !ilt->lines) + return -1; + + if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM)) + return 0; + + for (rc = 0, i = ilt_cli->start; i <= ilt_cli->end && !rc; i++) { + rc = bnx2x_ilt_line_mem_op(bp, &ilt->lines[i], + ilt_cli->page_size, memop); + } + return rc; +} + +static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop) +{ + int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop); + if (!rc) + rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_QM, memop); + if (!rc) + rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop); + if (!rc) + rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop); + + return rc; +} + +static void bnx2x_ilt_line_wr(struct bnx2x *bp, int abs_idx, + dma_addr_t page_mapping) +{ + u32 reg; + + if (CHIP_IS_E1(bp)) + reg = PXP2_REG_RQ_ONCHIP_AT + abs_idx*8; + else + reg = PXP2_REG_RQ_ONCHIP_AT_B0 + abs_idx*8; + + bnx2x_wr_64(bp, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping)); +} + +static void bnx2x_ilt_line_init_op(struct bnx2x *bp, + struct bnx2x_ilt *ilt, int idx, u8 initop) +{ + dma_addr_t null_mapping; + int abs_idx = ilt->start_line + idx; + + + switch (initop) { + case INITOP_INIT: + /* set in the init-value array */ + case INITOP_SET: + bnx2x_ilt_line_wr(bp, abs_idx, ilt->lines[idx].page_mapping); + break; + case INITOP_CLEAR: + null_mapping = 0; + bnx2x_ilt_line_wr(bp, abs_idx, null_mapping); + break; + } +} + +static void bnx2x_ilt_boundry_init_op(struct bnx2x *bp, + struct ilt_client_info *ilt_cli, + u32 ilt_start, u8 initop) +{ + u32 start_reg = 0; + u32 end_reg = 0; + + /* The boundary is either SET or INIT, + CLEAR => SET and for now SET ~~ INIT */ + + /* find the appropriate regs */ + if (CHIP_IS_E1(bp)) { + switch (ilt_cli->client_num) { + case ILT_CLIENT_CDU: + start_reg = PXP2_REG_PSWRQ_CDU0_L2P; + break; + case ILT_CLIENT_QM: + start_reg = PXP2_REG_PSWRQ_QM0_L2P; + break; + case ILT_CLIENT_SRC: + start_reg = PXP2_REG_PSWRQ_SRC0_L2P; + break; + case ILT_CLIENT_TM: + start_reg = PXP2_REG_PSWRQ_TM0_L2P; + break; + } + REG_WR(bp, start_reg + BP_FUNC(bp)*4, + ILT_RANGE((ilt_start + ilt_cli->start), + (ilt_start + ilt_cli->end))); + } else { + switch (ilt_cli->client_num) { + case ILT_CLIENT_CDU: + start_reg = PXP2_REG_RQ_CDU_FIRST_ILT; + end_reg = PXP2_REG_RQ_CDU_LAST_ILT; + break; + case ILT_CLIENT_QM: + start_reg = PXP2_REG_RQ_QM_FIRST_ILT; + end_reg = PXP2_REG_RQ_QM_LAST_ILT; + break; + case ILT_CLIENT_SRC: + start_reg = PXP2_REG_RQ_SRC_FIRST_ILT; + end_reg = PXP2_REG_RQ_SRC_LAST_ILT; + break; + case ILT_CLIENT_TM: + start_reg = PXP2_REG_RQ_TM_FIRST_ILT; + end_reg = PXP2_REG_RQ_TM_LAST_ILT; + break; + } + REG_WR(bp, start_reg, (ilt_start + ilt_cli->start)); + REG_WR(bp, end_reg, (ilt_start + ilt_cli->end)); + } +} + +static void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp, + struct bnx2x_ilt *ilt, + struct ilt_client_info *ilt_cli, + u8 initop) +{ + int i; + + if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT) + return; + + for (i = ilt_cli->start; i <= ilt_cli->end; i++) + bnx2x_ilt_line_init_op(bp, ilt, i, initop); + + /* init/clear the ILT boundries */ + bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop); +} + +static void bnx2x_ilt_client_init_op(struct bnx2x *bp, + struct ilt_client_info *ilt_cli, u8 initop) +{ + struct bnx2x_ilt *ilt = BP_ILT(bp); + + bnx2x_ilt_client_init_op_ilt(bp, ilt, ilt_cli, initop); +} + +static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp, + int cli_num, u8 initop) +{ + struct bnx2x_ilt *ilt = BP_ILT(bp); + struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; + + bnx2x_ilt_client_init_op(bp, ilt_cli, initop); +} + +static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop) +{ + bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop); + bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop); + bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop); + bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop); +} + +static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num, + u32 psz_reg, u8 initop) +{ + struct bnx2x_ilt *ilt = BP_ILT(bp); + struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; + + if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT) + return; + + switch (initop) { + case INITOP_INIT: + /* set in the init-value array */ + case INITOP_SET: + REG_WR(bp, psz_reg, ILOG2(ilt_cli->page_size >> 12)); + break; + case INITOP_CLEAR: + break; + } +} + +/* + * called during init common stage, ilt clients should be initialized + * prioir to calling this function + */ +static void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop) +{ + bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU, + PXP2_REG_RQ_CDU_P_SIZE, initop); + bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_QM, + PXP2_REG_RQ_QM_P_SIZE, initop); + bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_SRC, + PXP2_REG_RQ_SRC_P_SIZE, initop); + bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_TM, + PXP2_REG_RQ_TM_P_SIZE, initop); +} + +/**************************************************************************** +* QM initializations +****************************************************************************/ +#define QM_QUEUES_PER_FUNC 16 /* E1 has 32, but only 16 are used */ +#define QM_INIT_MIN_CID_COUNT 31 +#define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT) + +/* called during init port stage */ +static void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count, + u8 initop) +{ + int port = BP_PORT(bp); + + if (QM_INIT(qm_cid_count)) { + switch (initop) { + case INITOP_INIT: + /* set in the init-value array */ + case INITOP_SET: + REG_WR(bp, QM_REG_CONNNUM_0 + port*4, + qm_cid_count/16 - 1); + break; + case INITOP_CLEAR: + break; + } + } +} + +static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count) +{ + int i; + u32 wb_data[2]; + + wb_data[0] = wb_data[1] = 0; + + for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) { + REG_WR(bp, QM_REG_BASEADDR + i*4, + qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC)); + bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, + wb_data, 2); + + if (CHIP_IS_E1H(bp)) { + REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, + qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC)); + bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8, + wb_data, 2); + } + } +} + +/* called during init common stage */ +static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count, + u8 initop) +{ + if (!QM_INIT(qm_cid_count)) + return; + + switch (initop) { + case INITOP_INIT: + /* set in the init-value array */ + case INITOP_SET: + bnx2x_qm_set_ptr_table(bp, qm_cid_count); + break; + case INITOP_CLEAR: + break; + } +} + +/**************************************************************************** +* SRC initializations +****************************************************************************/ +#ifdef BCM_CNIC +/* called during init func stage */ +static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2, + dma_addr_t t2_mapping, int src_cid_count) +{ + int i; + int port = BP_PORT(bp); + + /* Initialize T2 */ + for (i = 0; i < src_cid_count-1; i++) + t2[i].next = (u64)(t2_mapping + + (i+1)*sizeof(struct src_ent)); + + /* tell the searcher where the T2 table is */ + REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, src_cid_count); + + bnx2x_wr_64(bp, SRC_REG_FIRSTFREE0 + port*16, + U64_LO(t2_mapping), U64_HI(t2_mapping)); + + bnx2x_wr_64(bp, SRC_REG_LASTFREE0 + port*16, + U64_LO((u64)t2_mapping + + (src_cid_count-1) * sizeof(struct src_ent)), + U64_HI((u64)t2_mapping + + (src_cid_count-1) * sizeof(struct src_ent))); +} +#endif +#endif /* BNX2X_INIT_OPS_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c new file mode 100644 index 0000000..d45b155 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -0,0 +1,12472 @@ +/* Copyright 2008-2011 Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2, available + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a + * license other than the GPL, without Broadcom's express prior written + * consent. + * + * Written by Yaniv Rosner + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include + +#include "bnx2x.h" +#include "bnx2x_cmn.h" + + +/********************************************************/ +#define ETH_HLEN 14 +/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ +#define ETH_OVREHEAD (ETH_HLEN + 8 + 8) +#define ETH_MIN_PACKET_SIZE 60 +#define ETH_MAX_PACKET_SIZE 1500 +#define ETH_MAX_JUMBO_PACKET_SIZE 9600 +#define MDIO_ACCESS_TIMEOUT 1000 +#define BMAC_CONTROL_RX_ENABLE 2 +#define WC_LANE_MAX 4 +#define I2C_SWITCH_WIDTH 2 +#define I2C_BSC0 0 +#define I2C_BSC1 1 +#define I2C_WA_RETRY_CNT 3 +#define MCPR_IMC_COMMAND_READ_OP 1 +#define MCPR_IMC_COMMAND_WRITE_OP 2 + +/***********************************************************/ +/* Shortcut definitions */ +/***********************************************************/ + +#define NIG_LATCH_BC_ENABLE_MI_INT 0 + +#define NIG_STATUS_EMAC0_MI_INT \ + NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT +#define NIG_STATUS_XGXS0_LINK10G \ + NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G +#define NIG_STATUS_XGXS0_LINK_STATUS \ + NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS +#define NIG_STATUS_XGXS0_LINK_STATUS_SIZE \ + NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE +#define NIG_STATUS_SERDES0_LINK_STATUS \ + NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS +#define NIG_MASK_MI_INT \ + NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT +#define NIG_MASK_XGXS0_LINK10G \ + NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G +#define NIG_MASK_XGXS0_LINK_STATUS \ + NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS +#define NIG_MASK_SERDES0_LINK_STATUS \ + NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS + +#define MDIO_AN_CL73_OR_37_COMPLETE \ + (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | \ + MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE) + +#define XGXS_RESET_BITS \ + (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW | \ + MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ | \ + MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN | \ + MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD | \ + MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB) + +#define SERDES_RESET_BITS \ + (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW | \ + MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ | \ + MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN | \ + MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD) + +#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37 +#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73 +#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM +#define AUTONEG_PARALLEL \ + SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION +#define AUTONEG_SGMII_FIBER_AUTODET \ + SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT +#define AUTONEG_REMOTE_PHY SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY + +#define GP_STATUS_PAUSE_RSOLUTION_TXSIDE \ + MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE +#define GP_STATUS_PAUSE_RSOLUTION_RXSIDE \ + MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE +#define GP_STATUS_SPEED_MASK \ + MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK +#define GP_STATUS_10M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M +#define GP_STATUS_100M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M +#define GP_STATUS_1G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G +#define GP_STATUS_2_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G +#define GP_STATUS_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G +#define GP_STATUS_6G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G +#define GP_STATUS_10G_HIG \ + MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG +#define GP_STATUS_10G_CX4 \ + MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4 +#define GP_STATUS_1G_KX MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX +#define GP_STATUS_10G_KX4 \ + MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 +#define GP_STATUS_10G_KR MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR +#define GP_STATUS_10G_XFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI +#define GP_STATUS_20G_DXGXS MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS +#define GP_STATUS_10G_SFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI +#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD +#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD +#define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD +#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4 +#define LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD +#define LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD +#define LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD +#define LINK_1000XFD LINK_STATUS_SPEED_AND_DUPLEX_1000XFD +#define LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD +#define LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD +#define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD +#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD +#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD +#define LINK_20GTFD LINK_STATUS_SPEED_AND_DUPLEX_20GTFD +#define LINK_20GXFD LINK_STATUS_SPEED_AND_DUPLEX_20GXFD + + + +/* */ +#define SFP_EEPROM_CON_TYPE_ADDR 0x2 + #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 + #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21 + + +#define SFP_EEPROM_COMP_CODE_ADDR 0x3 + #define SFP_EEPROM_COMP_CODE_SR_MASK (1<<4) + #define SFP_EEPROM_COMP_CODE_LR_MASK (1<<5) + #define SFP_EEPROM_COMP_CODE_LRM_MASK (1<<6) + +#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8 + #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4 + #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8 + +#define SFP_EEPROM_OPTIONS_ADDR 0x40 + #define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1 +#define SFP_EEPROM_OPTIONS_SIZE 2 + +#define EDC_MODE_LINEAR 0x0022 +#define EDC_MODE_LIMITING 0x0044 +#define EDC_MODE_PASSIVE_DAC 0x0055 + + +/* BRB thresholds for E2*/ +#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE 170 +#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0 + +#define PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE 250 +#define PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0 + +#define PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE 10 +#define PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 90 + +#define PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE 50 +#define PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE 250 + +/* BRB thresholds for E3A0 */ +#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE 290 +#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0 + +#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE 410 +#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0 + +#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE 10 +#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 170 + +#define PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE 50 +#define PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE 410 + + +/* BRB thresholds for E3B0 2 port mode*/ +#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 1025 +#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0 + +#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE 1025 +#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0 + +#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE 10 +#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 1025 + +#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE 50 +#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE 1025 + +/* only for E3B0*/ +#define PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR 1025 +#define PFC_E3B0_2P_BRB_FULL_LB_XON_THR 1025 + +/* Lossy +Lossless GUARANTIED == GUART */ +#define PFC_E3B0_2P_MIX_PAUSE_LB_GUART 284 +/* Lossless +Lossless*/ +#define PFC_E3B0_2P_PAUSE_LB_GUART 236 +/* Lossy +Lossy*/ +#define PFC_E3B0_2P_NON_PAUSE_LB_GUART 342 + +/* Lossy +Lossless*/ +#define PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART 284 +/* Lossless +Lossless*/ +#define PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART 236 +/* Lossy +Lossy*/ +#define PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART 336 +#define PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST 80 + +#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART 0 +#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST 0 + +/* BRB thresholds for E3B0 4 port mode */ +#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 304 +#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0 + +#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE 384 +#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0 + +#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE 10 +#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 304 + +#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE 50 +#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE 384 + + +/* only for E3B0*/ +#define PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR 304 +#define PFC_E3B0_4P_BRB_FULL_LB_XON_THR 384 +#define PFC_E3B0_4P_LB_GUART 120 + +#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART 120 +#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST 80 + +#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART 80 +#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST 120 + +#define DCBX_INVALID_COS (0xFF) + +#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000) +#define ETS_BW_LIMIT_CREDIT_WEIGHT (0x5000) +#define ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS (1360) +#define ETS_E3B0_NIG_MIN_W_VAL_20GBPS (2720) +#define ETS_E3B0_PBF_MIN_W_VAL (10000) + +#define MAX_PACKET_SIZE (9700) +#define WC_UC_TIMEOUT 100 + +/**********************************************************/ +/* INTERFACE */ +/**********************************************************/ + +#define CL22_WR_OVER_CL45(_bp, _phy, _bank, _addr, _val) \ + bnx2x_cl45_write(_bp, _phy, \ + (_phy)->def_md_devad, \ + (_bank + (_addr & 0xf)), \ + _val) + +#define CL22_RD_OVER_CL45(_bp, _phy, _bank, _addr, _val) \ + bnx2x_cl45_read(_bp, _phy, \ + (_phy)->def_md_devad, \ + (_bank + (_addr & 0xf)), \ + _val) + +static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits) +{ + u32 val = REG_RD(bp, reg); + + val |= bits; + REG_WR(bp, reg, val); + return val; +} + +static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits) +{ + u32 val = REG_RD(bp, reg); + + val &= ~bits; + REG_WR(bp, reg, val); + return val; +} + +/******************************************************************/ +/* EPIO/GPIO section */ +/******************************************************************/ +static void bnx2x_get_epio(struct bnx2x *bp, u32 epio_pin, u32 *en) +{ + u32 epio_mask, gp_oenable; + *en = 0; + /* Sanity check */ + if (epio_pin > 31) { + DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to get\n", epio_pin); + return; + } + + epio_mask = 1 << epio_pin; + /* Set this EPIO to output */ + gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE); + REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable & ~epio_mask); + + *en = (REG_RD(bp, MCP_REG_MCPR_GP_INPUTS) & epio_mask) >> epio_pin; +} +static void bnx2x_set_epio(struct bnx2x *bp, u32 epio_pin, u32 en) +{ + u32 epio_mask, gp_output, gp_oenable; + + /* Sanity check */ + if (epio_pin > 31) { + DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to set\n", epio_pin); + return; + } + DP(NETIF_MSG_LINK, "Setting EPIO pin %d to %d\n", epio_pin, en); + epio_mask = 1 << epio_pin; + /* Set this EPIO to output */ + gp_output = REG_RD(bp, MCP_REG_MCPR_GP_OUTPUTS); + if (en) + gp_output |= epio_mask; + else + gp_output &= ~epio_mask; + + REG_WR(bp, MCP_REG_MCPR_GP_OUTPUTS, gp_output); + + /* Set the value for this EPIO */ + gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE); + REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable | epio_mask); +} + +static void bnx2x_set_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 val) +{ + if (pin_cfg == PIN_CFG_NA) + return; + if (pin_cfg >= PIN_CFG_EPIO0) { + bnx2x_set_epio(bp, pin_cfg - PIN_CFG_EPIO0, val); + } else { + u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3; + u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2; + bnx2x_set_gpio(bp, gpio_num, (u8)val, gpio_port); + } +} + +static u32 bnx2x_get_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 *val) +{ + if (pin_cfg == PIN_CFG_NA) + return -EINVAL; + if (pin_cfg >= PIN_CFG_EPIO0) { + bnx2x_get_epio(bp, pin_cfg - PIN_CFG_EPIO0, val); + } else { + u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3; + u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2; + *val = bnx2x_get_gpio(bp, gpio_num, gpio_port); + } + return 0; + +} +/******************************************************************/ +/* ETS section */ +/******************************************************************/ +static void bnx2x_ets_e2e3a0_disabled(struct link_params *params) +{ + /* ETS disabled configuration*/ + struct bnx2x *bp = params->bp; + + DP(NETIF_MSG_LINK, "ETS E2E3 disabled configuration\n"); + + /* + * mapping between entry priority to client number (0,1,2 -debug and + * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) + * 3bits client num. + * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 + * cos1-100 cos0-011 dbg1-010 dbg0-001 MCP-000 + */ + + REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688); + /* + * Bitmap of 5bits length. Each bit specifies whether the entry behaves + * as strict. Bits 0,1,2 - debug and management entries, 3 - + * COS0 entry, 4 - COS1 entry. + * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT + * bit4 bit3 bit2 bit1 bit0 + * MCP and debug are strict + */ + + REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7); + /* defines which entries (clients) are subjected to WFQ arbitration */ + REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0); + /* + * For strict priority entries defines the number of consecutive + * slots for the highest priority. + */ + REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); + /* + * mapping between the CREDIT_WEIGHT registers and actual client + * numbers + */ + REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0); + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0); + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0); + + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, 0); + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, 0); + REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0); + /* ETS mode disable */ + REG_WR(bp, PBF_REG_ETS_ENABLED, 0); + /* + * If ETS mode is enabled (there is no strict priority) defines a WFQ + * weight for COS0/COS1. + */ + REG_WR(bp, PBF_REG_COS0_WEIGHT, 0x2710); + REG_WR(bp, PBF_REG_COS1_WEIGHT, 0x2710); + /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter */ + REG_WR(bp, PBF_REG_COS0_UPPER_BOUND, 0x989680); + REG_WR(bp, PBF_REG_COS1_UPPER_BOUND, 0x989680); + /* Defines the number of consecutive slots for the strict priority */ + REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); +} +/****************************************************************************** +* Description: +* Getting min_w_val will be set according to line speed . +*. +******************************************************************************/ +static u32 bnx2x_ets_get_min_w_val_nig(const struct link_vars *vars) +{ + u32 min_w_val = 0; + /* Calculate min_w_val.*/ + if (vars->link_up) { + if (SPEED_20000 == vars->line_speed) + min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS; + else + min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS; + } else + min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS; + /** + * If the link isn't up (static configuration for example ) The + * link will be according to 20GBPS. + */ + return min_w_val; +} +/****************************************************************************** +* Description: +* Getting credit upper bound form min_w_val. +*. +******************************************************************************/ +static u32 bnx2x_ets_get_credit_upper_bound(const u32 min_w_val) +{ + const u32 credit_upper_bound = (u32)MAXVAL((150 * min_w_val), + MAX_PACKET_SIZE); + return credit_upper_bound; +} +/****************************************************************************** +* Description: +* Set credit upper bound for NIG. +*. +******************************************************************************/ +static void bnx2x_ets_e3b0_set_credit_upper_bound_nig( + const struct link_params *params, + const u32 min_w_val) +{ + struct bnx2x *bp = params->bp; + const u8 port = params->port; + const u32 credit_upper_bound = + bnx2x_ets_get_credit_upper_bound(min_w_val); + + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 : + NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, credit_upper_bound); + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 : + NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, credit_upper_bound); + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 : + NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2, credit_upper_bound); + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 : + NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3, credit_upper_bound); + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 : + NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4, credit_upper_bound); + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 : + NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5, credit_upper_bound); + + if (0 == port) { + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6, + credit_upper_bound); + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7, + credit_upper_bound); + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8, + credit_upper_bound); + } +} +/****************************************************************************** +* Description: +* Will return the NIG ETS registers to init values.Except +* credit_upper_bound. +* That isn't used in this configuration (No WFQ is enabled) and will be +* configured acording to spec +*. +******************************************************************************/ +static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params, + const struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + const u8 port = params->port; + const u32 min_w_val = bnx2x_ets_get_min_w_val_nig(vars); + /** + * mapping between entry priority to client number (0,1,2 -debug and + * management clients, 3 - COS0 client, 4 - COS1, ... 8 - + * COS5)(HIGHEST) 4bits client num.TODO_ETS - Should be done by + * reset value or init tool + */ + if (port) { + REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB, 0x543210); + REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB, 0x0); + } else { + REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210); + REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8); + } + /** + * For strict priority entries defines the number of consecutive + * slots for the highest priority. + */ + /* TODO_ETS - Should be done by reset value or init tool */ + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS : + NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); + /** + * mapping between the CREDIT_WEIGHT registers and actual client + * numbers + */ + /* TODO_ETS - Should be done by reset value or init tool */ + if (port) { + /*Port 1 has 6 COS*/ + REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543); + REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x0); + } else { + /*Port 0 has 9 COS*/ + REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB, + 0x43210876); + REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5); + } + + /** + * Bitmap of 5bits length. Each bit specifies whether the entry behaves + * as strict. Bits 0,1,2 - debug and management entries, 3 - + * COS0 entry, 4 - COS1 entry. + * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT + * bit4 bit3 bit2 bit1 bit0 + * MCP and debug are strict + */ + if (port) + REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT, 0x3f); + else + REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1ff); + /* defines which entries (clients) are subjected to WFQ arbitration */ + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ : + NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0); + + /** + * Please notice the register address are note continuous and a + * for here is note appropriate.In 2 port mode port0 only COS0-5 + * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4 + * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT + * are never used for WFQ + */ + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0x0); + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0x0); + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2, 0x0); + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3, 0x0); + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4, 0x0); + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5, 0x0); + if (0 == port) { + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6, 0x0); + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7, 0x0); + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8, 0x0); + } + + bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val); +} +/****************************************************************************** +* Description: +* Set credit upper bound for PBF. +*. +******************************************************************************/ +static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf( + const struct link_params *params, + const u32 min_w_val) +{ + struct bnx2x *bp = params->bp; + const u32 credit_upper_bound = + bnx2x_ets_get_credit_upper_bound(min_w_val); + const u8 port = params->port; + u32 base_upper_bound = 0; + u8 max_cos = 0; + u8 i = 0; + /** + * In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4 + * port mode port1 has COS0-2 that can be used for WFQ. + */ + if (0 == port) { + base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0; + max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0; + } else { + base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P1; + max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1; + } + + for (i = 0; i < max_cos; i++) + REG_WR(bp, base_upper_bound + (i << 2), credit_upper_bound); +} + +/****************************************************************************** +* Description: +* Will return the PBF ETS registers to init values.Except +* credit_upper_bound. +* That isn't used in this configuration (No WFQ is enabled) and will be +* configured acording to spec +*. +******************************************************************************/ +static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params) +{ + struct bnx2x *bp = params->bp; + const u8 port = params->port; + const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL; + u8 i = 0; + u32 base_weight = 0; + u8 max_cos = 0; + + /** + * mapping between entry priority to client number 0 - COS0 + * client, 2 - COS1, ... 5 - COS5)(HIGHEST) 4bits client num. + * TODO_ETS - Should be done by reset value or init tool + */ + if (port) + /* 0x688 (|011|0 10|00 1|000) */ + REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , 0x688); + else + /* (10 1|100 |011|0 10|00 1|000) */ + REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , 0x2C688); + + /* TODO_ETS - Should be done by reset value or init tool */ + if (port) + /* 0x688 (|011|0 10|00 1|000)*/ + REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1, 0x688); + else + /* 0x2C688 (10 1|100 |011|0 10|00 1|000) */ + REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0, 0x2C688); + + REG_WR(bp, (port) ? PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 : + PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 , 0x100); + + + REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 : + PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , 0); + + REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 : + PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 , 0); + /** + * In 2 port mode port0 has COS0-5 that can be used for WFQ. + * In 4 port mode port1 has COS0-2 that can be used for WFQ. + */ + if (0 == port) { + base_weight = PBF_REG_COS0_WEIGHT_P0; + max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0; + } else { + base_weight = PBF_REG_COS0_WEIGHT_P1; + max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1; + } + + for (i = 0; i < max_cos; i++) + REG_WR(bp, base_weight + (0x4 * i), 0); + + bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf); +} +/****************************************************************************** +* Description: +* E3B0 disable will return basicly the values to init values. +*. +******************************************************************************/ +static int bnx2x_ets_e3b0_disabled(const struct link_params *params, + const struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + + if (!CHIP_IS_E3B0(bp)) { + DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_disabled the chip isn't E3B0" + "\n"); + return -EINVAL; + } + + bnx2x_ets_e3b0_nig_disabled(params, vars); + + bnx2x_ets_e3b0_pbf_disabled(params); + + return 0; +} + +/****************************************************************************** +* Description: +* Disable will return basicly the values to init values. +*. +******************************************************************************/ +int bnx2x_ets_disabled(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + int bnx2x_status = 0; + + if ((CHIP_IS_E2(bp)) || (CHIP_IS_E3A0(bp))) + bnx2x_ets_e2e3a0_disabled(params); + else if (CHIP_IS_E3B0(bp)) + bnx2x_status = bnx2x_ets_e3b0_disabled(params, vars); + else { + DP(NETIF_MSG_LINK, "bnx2x_ets_disabled - chip not supported\n"); + return -EINVAL; + } + + return bnx2x_status; +} + +/****************************************************************************** +* Description +* Set the COS mappimg to SP and BW until this point all the COS are not +* set as SP or BW. +******************************************************************************/ +static int bnx2x_ets_e3b0_cli_map(const struct link_params *params, + const struct bnx2x_ets_params *ets_params, + const u8 cos_sp_bitmap, + const u8 cos_bw_bitmap) +{ + struct bnx2x *bp = params->bp; + const u8 port = params->port; + const u8 nig_cli_sp_bitmap = 0x7 | (cos_sp_bitmap << 3); + const u8 pbf_cli_sp_bitmap = cos_sp_bitmap; + const u8 nig_cli_subject2wfq_bitmap = cos_bw_bitmap << 3; + const u8 pbf_cli_subject2wfq_bitmap = cos_bw_bitmap; + + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT : + NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, nig_cli_sp_bitmap); + + REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 : + PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , pbf_cli_sp_bitmap); + + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ : + NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, + nig_cli_subject2wfq_bitmap); + + REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 : + PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0, + pbf_cli_subject2wfq_bitmap); + + return 0; +} + +/****************************************************************************** +* Description: +* This function is needed because NIG ARB_CREDIT_WEIGHT_X are +* not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable. +******************************************************************************/ +static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp, + const u8 cos_entry, + const u32 min_w_val_nig, + const u32 min_w_val_pbf, + const u16 total_bw, + const u8 bw, + const u8 port) +{ + u32 nig_reg_adress_crd_weight = 0; + u32 pbf_reg_adress_crd_weight = 0; + /* Calculate and set BW for this COS*/ + const u32 cos_bw_nig = (bw * min_w_val_nig) / total_bw; + const u32 cos_bw_pbf = (bw * min_w_val_pbf) / total_bw; + + switch (cos_entry) { + case 0: + nig_reg_adress_crd_weight = + (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0; + pbf_reg_adress_crd_weight = (port) ? + PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0; + break; + case 1: + nig_reg_adress_crd_weight = (port) ? + NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1; + pbf_reg_adress_crd_weight = (port) ? + PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0; + break; + case 2: + nig_reg_adress_crd_weight = (port) ? + NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2; + + pbf_reg_adress_crd_weight = (port) ? + PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0; + break; + case 3: + if (port) + return -EINVAL; + nig_reg_adress_crd_weight = + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3; + pbf_reg_adress_crd_weight = + PBF_REG_COS3_WEIGHT_P0; + break; + case 4: + if (port) + return -EINVAL; + nig_reg_adress_crd_weight = + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4; + pbf_reg_adress_crd_weight = PBF_REG_COS4_WEIGHT_P0; + break; + case 5: + if (port) + return -EINVAL; + nig_reg_adress_crd_weight = + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5; + pbf_reg_adress_crd_weight = PBF_REG_COS5_WEIGHT_P0; + break; + } + + REG_WR(bp, nig_reg_adress_crd_weight, cos_bw_nig); + + REG_WR(bp, pbf_reg_adress_crd_weight, cos_bw_pbf); + + return 0; +} +/****************************************************************************** +* Description: +* Calculate the total BW.A value of 0 isn't legal. +*. +******************************************************************************/ +static int bnx2x_ets_e3b0_get_total_bw( + const struct link_params *params, + const struct bnx2x_ets_params *ets_params, + u16 *total_bw) +{ + struct bnx2x *bp = params->bp; + u8 cos_idx = 0; + + *total_bw = 0 ; + /* Calculate total BW requested */ + for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) { + if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) { + + if (0 == ets_params->cos[cos_idx].params.bw_params.bw) { + DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW" + "was set to 0\n"); + return -EINVAL; + } + *total_bw += + ets_params->cos[cos_idx].params.bw_params.bw; + } + } + + /*Check taotl BW is valid */ + if ((100 != *total_bw) || (0 == *total_bw)) { + if (0 == *total_bw) { + DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW" + "shouldn't be 0\n"); + return -EINVAL; + } + DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW should be" + "100\n"); + /** + * We can handle a case whre the BW isn't 100 this can happen + * if the TC are joined. + */ + } + return 0; +} + +/****************************************************************************** +* Description: +* Invalidate all the sp_pri_to_cos. +*. +******************************************************************************/ +static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos) +{ + u8 pri = 0; + for (pri = 0; pri < DCBX_MAX_NUM_COS; pri++) + sp_pri_to_cos[pri] = DCBX_INVALID_COS; +} +/****************************************************************************** +* Description: +* Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers +* according to sp_pri_to_cos. +*. +******************************************************************************/ +static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params, + u8 *sp_pri_to_cos, const u8 pri, + const u8 cos_entry) +{ + struct bnx2x *bp = params->bp; + const u8 port = params->port; + const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 : + DCBX_E3B0_MAX_NUM_COS_PORT0; + + if (DCBX_INVALID_COS != sp_pri_to_cos[pri]) { + DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid " + "parameter There can't be two COS's with" + "the same strict pri\n"); + return -EINVAL; + } + + if (pri > max_num_of_cos) { + DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid" + "parameter Illegal strict priority\n"); + return -EINVAL; + } + + sp_pri_to_cos[pri] = cos_entry; + return 0; + +} + +/****************************************************************************** +* Description: +* Returns the correct value according to COS and priority in +* the sp_pri_cli register. +*. +******************************************************************************/ +static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset, + const u8 pri_set, + const u8 pri_offset, + const u8 entry_size) +{ + u64 pri_cli_nig = 0; + pri_cli_nig = ((u64)(cos + cos_offset)) << (entry_size * + (pri_set + pri_offset)); + + return pri_cli_nig; +} +/****************************************************************************** +* Description: +* Returns the correct value according to COS and priority in the +* sp_pri_cli register for NIG. +*. +******************************************************************************/ +static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set) +{ + /* MCP Dbg0 and dbg1 are always with higher strict pri*/ + const u8 nig_cos_offset = 3; + const u8 nig_pri_offset = 3; + + return bnx2x_e3b0_sp_get_pri_cli_reg(cos, nig_cos_offset, pri_set, + nig_pri_offset, 4); + +} +/****************************************************************************** +* Description: +* Returns the correct value according to COS and priority in the +* sp_pri_cli register for PBF. +*. +******************************************************************************/ +static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set) +{ + const u8 pbf_cos_offset = 0; + const u8 pbf_pri_offset = 0; + + return bnx2x_e3b0_sp_get_pri_cli_reg(cos, pbf_cos_offset, pri_set, + pbf_pri_offset, 3); + +} + +/****************************************************************************** +* Description: +* Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers +* according to sp_pri_to_cos.(which COS has higher priority) +*. +******************************************************************************/ +static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params, + u8 *sp_pri_to_cos) +{ + struct bnx2x *bp = params->bp; + u8 i = 0; + const u8 port = params->port; + /* MCP Dbg0 and dbg1 are always with higher strict pri*/ + u64 pri_cli_nig = 0x210; + u32 pri_cli_pbf = 0x0; + u8 pri_set = 0; + u8 pri_bitmask = 0; + const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 : + DCBX_E3B0_MAX_NUM_COS_PORT0; + + u8 cos_bit_to_set = (1 << max_num_of_cos) - 1; + + /* Set all the strict priority first */ + for (i = 0; i < max_num_of_cos; i++) { + if (DCBX_INVALID_COS != sp_pri_to_cos[i]) { + if (DCBX_MAX_NUM_COS <= sp_pri_to_cos[i]) { + DP(NETIF_MSG_LINK, + "bnx2x_ets_e3b0_sp_set_pri_cli_reg " + "invalid cos entry\n"); + return -EINVAL; + } + + pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig( + sp_pri_to_cos[i], pri_set); + + pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf( + sp_pri_to_cos[i], pri_set); + pri_bitmask = 1 << sp_pri_to_cos[i]; + /* COS is used remove it from bitmap.*/ + if (0 == (pri_bitmask & cos_bit_to_set)) { + DP(NETIF_MSG_LINK, + "bnx2x_ets_e3b0_sp_set_pri_cli_reg " + "invalid There can't be two COS's with" + " the same strict pri\n"); + return -EINVAL; + } + cos_bit_to_set &= ~pri_bitmask; + pri_set++; + } + } + + /* Set all the Non strict priority i= COS*/ + for (i = 0; i < max_num_of_cos; i++) { + pri_bitmask = 1 << i; + /* Check if COS was already used for SP */ + if (pri_bitmask & cos_bit_to_set) { + /* COS wasn't used for SP */ + pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig( + i, pri_set); + + pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf( + i, pri_set); + /* COS is used remove it from bitmap.*/ + cos_bit_to_set &= ~pri_bitmask; + pri_set++; + } + } + + if (pri_set != max_num_of_cos) { + DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_set_pri_cli_reg not all " + "entries were set\n"); + return -EINVAL; + } + + if (port) { + /* Only 6 usable clients*/ + REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB, + (u32)pri_cli_nig); + + REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , pri_cli_pbf); + } else { + /* Only 9 usable clients*/ + const u32 pri_cli_nig_lsb = (u32) (pri_cli_nig); + const u32 pri_cli_nig_msb = (u32) ((pri_cli_nig >> 32) & 0xF); + + REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, + pri_cli_nig_lsb); + REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, + pri_cli_nig_msb); + + REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , pri_cli_pbf); + } + return 0; +} + +/****************************************************************************** +* Description: +* Configure the COS to ETS according to BW and SP settings. +******************************************************************************/ +int bnx2x_ets_e3b0_config(const struct link_params *params, + const struct link_vars *vars, + const struct bnx2x_ets_params *ets_params) +{ + struct bnx2x *bp = params->bp; + int bnx2x_status = 0; + const u8 port = params->port; + u16 total_bw = 0; + const u32 min_w_val_nig = bnx2x_ets_get_min_w_val_nig(vars); + const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL; + u8 cos_bw_bitmap = 0; + u8 cos_sp_bitmap = 0; + u8 sp_pri_to_cos[DCBX_MAX_NUM_COS] = {0}; + const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 : + DCBX_E3B0_MAX_NUM_COS_PORT0; + u8 cos_entry = 0; + + if (!CHIP_IS_E3B0(bp)) { + DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_disabled the chip isn't E3B0" + "\n"); + return -EINVAL; + } + + if ((ets_params->num_of_cos > max_num_of_cos)) { + DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config the number of COS " + "isn't supported\n"); + return -EINVAL; + } + + /* Prepare sp strict priority parameters*/ + bnx2x_ets_e3b0_sp_pri_to_cos_init(sp_pri_to_cos); + + /* Prepare BW parameters*/ + bnx2x_status = bnx2x_ets_e3b0_get_total_bw(params, ets_params, + &total_bw); + if (0 != bnx2x_status) { + DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config get_total_bw failed " + "\n"); + return -EINVAL; + } + + /** + * Upper bound is set according to current link speed (min_w_val + * should be the same for upper bound and COS credit val). + */ + bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig); + bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf); + + + for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) { + if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) { + cos_bw_bitmap |= (1 << cos_entry); + /** + * The function also sets the BW in HW(not the mappin + * yet) + */ + bnx2x_status = bnx2x_ets_e3b0_set_cos_bw( + bp, cos_entry, min_w_val_nig, min_w_val_pbf, + total_bw, + ets_params->cos[cos_entry].params.bw_params.bw, + port); + } else if (bnx2x_cos_state_strict == + ets_params->cos[cos_entry].state){ + cos_sp_bitmap |= (1 << cos_entry); + + bnx2x_status = bnx2x_ets_e3b0_sp_pri_to_cos_set( + params, + sp_pri_to_cos, + ets_params->cos[cos_entry].params.sp_params.pri, + cos_entry); + + } else { + DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_config cos state not" + " valid\n"); + return -EINVAL; + } + if (0 != bnx2x_status) { + DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_config set cos bw " + "failed\n"); + return bnx2x_status; + } + } + + /* Set SP register (which COS has higher priority) */ + bnx2x_status = bnx2x_ets_e3b0_sp_set_pri_cli_reg(params, + sp_pri_to_cos); + + if (0 != bnx2x_status) { + DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config set_pri_cli_reg " + "failed\n"); + return bnx2x_status; + } + + /* Set client mapping of BW and strict */ + bnx2x_status = bnx2x_ets_e3b0_cli_map(params, ets_params, + cos_sp_bitmap, + cos_bw_bitmap); + + if (0 != bnx2x_status) { + DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config SP failed\n"); + return bnx2x_status; + } + return 0; +} +static void bnx2x_ets_bw_limit_common(const struct link_params *params) +{ + /* ETS disabled configuration */ + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n"); + /* + * defines which entries (clients) are subjected to WFQ arbitration + * COS0 0x8 + * COS1 0x10 + */ + REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18); + /* + * mapping between the ARB_CREDIT_WEIGHT registers and actual + * client numbers (WEIGHT_0 does not actually have to represent + * client 0) + * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 + * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010 + */ + REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A); + + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, + ETS_BW_LIMIT_CREDIT_UPPER_BOUND); + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, + ETS_BW_LIMIT_CREDIT_UPPER_BOUND); + + /* ETS mode enabled*/ + REG_WR(bp, PBF_REG_ETS_ENABLED, 1); + + /* Defines the number of consecutive slots for the strict priority */ + REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); + /* + * Bitmap of 5bits length. Each bit specifies whether the entry behaves + * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0 + * entry, 4 - COS1 entry. + * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT + * bit4 bit3 bit2 bit1 bit0 + * MCP and debug are strict + */ + REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7); + + /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/ + REG_WR(bp, PBF_REG_COS0_UPPER_BOUND, + ETS_BW_LIMIT_CREDIT_UPPER_BOUND); + REG_WR(bp, PBF_REG_COS1_UPPER_BOUND, + ETS_BW_LIMIT_CREDIT_UPPER_BOUND); +} + +void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw, + const u32 cos1_bw) +{ + /* ETS disabled configuration*/ + struct bnx2x *bp = params->bp; + const u32 total_bw = cos0_bw + cos1_bw; + u32 cos0_credit_weight = 0; + u32 cos1_credit_weight = 0; + + DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n"); + + if ((0 == total_bw) || + (0 == cos0_bw) || + (0 == cos1_bw)) { + DP(NETIF_MSG_LINK, "Total BW can't be zero\n"); + return; + } + + cos0_credit_weight = (cos0_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/ + total_bw; + cos1_credit_weight = (cos1_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/ + total_bw; + + bnx2x_ets_bw_limit_common(params); + + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, cos0_credit_weight); + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, cos1_credit_weight); + + REG_WR(bp, PBF_REG_COS0_WEIGHT, cos0_credit_weight); + REG_WR(bp, PBF_REG_COS1_WEIGHT, cos1_credit_weight); +} + +int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos) +{ + /* ETS disabled configuration*/ + struct bnx2x *bp = params->bp; + u32 val = 0; + + DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n"); + /* + * Bitmap of 5bits length. Each bit specifies whether the entry behaves + * as strict. Bits 0,1,2 - debug and management entries, + * 3 - COS0 entry, 4 - COS1 entry. + * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT + * bit4 bit3 bit2 bit1 bit0 + * MCP and debug are strict + */ + REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F); + /* + * For strict priority entries defines the number of consecutive slots + * for the highest priority. + */ + REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); + /* ETS mode disable */ + REG_WR(bp, PBF_REG_ETS_ENABLED, 0); + /* Defines the number of consecutive slots for the strict priority */ + REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0x100); + + /* Defines the number of consecutive slots for the strict priority */ + REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos); + + /* + * mapping between entry priority to client number (0,1,2 -debug and + * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) + * 3bits client num. + * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 + * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000 + * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000 + */ + val = (0 == strict_cos) ? 0x2318 : 0x22E0; + REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val); + + return 0; +} +/******************************************************************/ +/* PFC section */ +/******************************************************************/ + +static void bnx2x_update_pfc_xmac(struct link_params *params, + struct link_vars *vars, + u8 is_lb) +{ + struct bnx2x *bp = params->bp; + u32 xmac_base; + u32 pause_val, pfc0_val, pfc1_val; + + /* XMAC base adrr */ + xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; + + /* Initialize pause and pfc registers */ + pause_val = 0x18000; + pfc0_val = 0xFFFF8000; + pfc1_val = 0x2; + + /* No PFC support */ + if (!(params->feature_config_flags & + FEATURE_CONFIG_PFC_ENABLED)) { + + /* + * RX flow control - Process pause frame in receive direction + */ + if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX) + pause_val |= XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN; + + /* + * TX flow control - Send pause packet when buffer is full + */ + if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) + pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN; + } else {/* PFC support */ + pfc1_val |= XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN | + XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN | + XMAC_PFC_CTRL_HI_REG_RX_PFC_EN | + XMAC_PFC_CTRL_HI_REG_TX_PFC_EN; + } + + /* Write pause and PFC registers */ + REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val); + REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val); + REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val); + + + /* Set MAC address for source TX Pause/PFC frames */ + REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_LO, + ((params->mac_addr[2] << 24) | + (params->mac_addr[3] << 16) | + (params->mac_addr[4] << 8) | + (params->mac_addr[5]))); + REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_HI, + ((params->mac_addr[0] << 8) | + (params->mac_addr[1]))); + + udelay(30); +} + + +static void bnx2x_emac_get_pfc_stat(struct link_params *params, + u32 pfc_frames_sent[2], + u32 pfc_frames_received[2]) +{ + /* Read pfc statistic */ + struct bnx2x *bp = params->bp; + u32 emac_base = params->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + u32 val_xon = 0; + u32 val_xoff = 0; + + DP(NETIF_MSG_LINK, "pfc statistic read from EMAC\n"); + + /* PFC received frames */ + val_xoff = REG_RD(bp, emac_base + + EMAC_REG_RX_PFC_STATS_XOFF_RCVD); + val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT; + val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_RCVD); + val_xon &= EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT; + + pfc_frames_received[0] = val_xon + val_xoff; + + /* PFC received sent */ + val_xoff = REG_RD(bp, emac_base + + EMAC_REG_RX_PFC_STATS_XOFF_SENT); + val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT; + val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_SENT); + val_xon &= EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT; + + pfc_frames_sent[0] = val_xon + val_xoff; +} + +/* Read pfc statistic*/ +void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars, + u32 pfc_frames_sent[2], + u32 pfc_frames_received[2]) +{ + /* Read pfc statistic */ + struct bnx2x *bp = params->bp; + + DP(NETIF_MSG_LINK, "pfc statistic\n"); + + if (!vars->link_up) + return; + + if (MAC_TYPE_EMAC == vars->mac_type) { + DP(NETIF_MSG_LINK, "About to read PFC stats from EMAC\n"); + bnx2x_emac_get_pfc_stat(params, pfc_frames_sent, + pfc_frames_received); + } +} +/******************************************************************/ +/* MAC/PBF section */ +/******************************************************************/ +static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port) +{ + u32 mode, emac_base; + /** + * Set clause 45 mode, slow down the MDIO clock to 2.5MHz + * (a value of 49==0x31) and make sure that the AUTO poll is off + */ + + if (CHIP_IS_E2(bp)) + emac_base = GRCBASE_EMAC0; + else + emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + mode = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); + mode &= ~(EMAC_MDIO_MODE_AUTO_POLL | + EMAC_MDIO_MODE_CLOCK_CNT); + if (USES_WARPCORE(bp)) + mode |= (74L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT); + else + mode |= (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT); + + mode |= (EMAC_MDIO_MODE_CLAUSE_45); + REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE, mode); + + udelay(40); +} + +static void bnx2x_emac_init(struct link_params *params, + struct link_vars *vars) +{ + /* reset and unreset the emac core */ + struct bnx2x *bp = params->bp; + u8 port = params->port; + u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + u32 val; + u16 timeout; + + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); + udelay(5); + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); + + /* init emac - use read-modify-write */ + /* self clear reset */ + val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); + EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET)); + + timeout = 200; + do { + val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); + DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val); + if (!timeout) { + DP(NETIF_MSG_LINK, "EMAC timeout!\n"); + return; + } + timeout--; + } while (val & EMAC_MODE_RESET); + bnx2x_set_mdio_clk(bp, params->chip_id, port); + /* Set mac address */ + val = ((params->mac_addr[0] << 8) | + params->mac_addr[1]); + EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH, val); + + val = ((params->mac_addr[2] << 24) | + (params->mac_addr[3] << 16) | + (params->mac_addr[4] << 8) | + params->mac_addr[5]); + EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + 4, val); +} + +static void bnx2x_set_xumac_nig(struct link_params *params, + u16 tx_pause_en, + u8 enable) +{ + struct bnx2x *bp = params->bp; + + REG_WR(bp, params->port ? NIG_REG_P1_MAC_IN_EN : NIG_REG_P0_MAC_IN_EN, + enable); + REG_WR(bp, params->port ? NIG_REG_P1_MAC_OUT_EN : NIG_REG_P0_MAC_OUT_EN, + enable); + REG_WR(bp, params->port ? NIG_REG_P1_MAC_PAUSE_OUT_EN : + NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en); +} + +static void bnx2x_umac_enable(struct link_params *params, + struct link_vars *vars, u8 lb) +{ + u32 val; + u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; + struct bnx2x *bp = params->bp; + /* Reset UMAC */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)); + usleep_range(1000, 1000); + + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)); + + DP(NETIF_MSG_LINK, "enabling UMAC\n"); + + /** + * This register determines on which events the MAC will assert + * error on the i/f to the NIG along w/ EOP. + */ + + /** + * BD REG_WR(bp, NIG_REG_P0_MAC_RSV_ERR_MASK + + * params->port*0x14, 0xfffff. + */ + /* This register opens the gate for the UMAC despite its name */ + REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1); + + val = UMAC_COMMAND_CONFIG_REG_PROMIS_EN | + UMAC_COMMAND_CONFIG_REG_PAD_EN | + UMAC_COMMAND_CONFIG_REG_SW_RESET | + UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK; + switch (vars->line_speed) { + case SPEED_10: + val |= (0<<2); + break; + case SPEED_100: + val |= (1<<2); + break; + case SPEED_1000: + val |= (2<<2); + break; + case SPEED_2500: + val |= (3<<2); + break; + default: + DP(NETIF_MSG_LINK, "Invalid speed for UMAC %d\n", + vars->line_speed); + break; + } + if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)) + val |= UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE; + + if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)) + val |= UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE; + + REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); + udelay(50); + + /* Set MAC address for source TX Pause/PFC frames (under SW reset) */ + REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR0, + ((params->mac_addr[2] << 24) | + (params->mac_addr[3] << 16) | + (params->mac_addr[4] << 8) | + (params->mac_addr[5]))); + REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR1, + ((params->mac_addr[0] << 8) | + (params->mac_addr[1]))); + + /* Enable RX and TX */ + val &= ~UMAC_COMMAND_CONFIG_REG_PAD_EN; + val |= UMAC_COMMAND_CONFIG_REG_TX_ENA | + UMAC_COMMAND_CONFIG_REG_RX_ENA; + REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); + udelay(50); + + /* Remove SW Reset */ + val &= ~UMAC_COMMAND_CONFIG_REG_SW_RESET; + + /* Check loopback mode */ + if (lb) + val |= UMAC_COMMAND_CONFIG_REG_LOOP_ENA; + REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); + + /* + * Maximum Frame Length (RW). Defines a 14-Bit maximum frame + * length used by the MAC receive logic to check frames. + */ + REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710); + bnx2x_set_xumac_nig(params, + ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1); + vars->mac_type = MAC_TYPE_UMAC; + +} + +static u8 bnx2x_is_4_port_mode(struct bnx2x *bp) +{ + u32 port4mode_ovwr_val; + /* Check 4-port override enabled */ + port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); + if (port4mode_ovwr_val & (1<<0)) { + /* Return 4-port mode override value */ + return ((port4mode_ovwr_val & (1<<1)) == (1<<1)); + } + /* Return 4-port mode from input pin */ + return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN); +} + +/* Define the XMAC mode */ +static void bnx2x_xmac_init(struct bnx2x *bp, u32 max_speed) +{ + u32 is_port4mode = bnx2x_is_4_port_mode(bp); + + /** + * In 4-port mode, need to set the mode only once, so if XMAC is + * already out of reset, it means the mode has already been set, + * and it must not* reset the XMAC again, since it controls both + * ports of the path + **/ + + if (is_port4mode && (REG_RD(bp, MISC_REG_RESET_REG_2) & + MISC_REGISTERS_RESET_REG_2_XMAC)) { + DP(NETIF_MSG_LINK, "XMAC already out of reset" + " in 4-port mode\n"); + return; + } + + /* Hard reset */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + MISC_REGISTERS_RESET_REG_2_XMAC); + usleep_range(1000, 1000); + + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + MISC_REGISTERS_RESET_REG_2_XMAC); + if (is_port4mode) { + DP(NETIF_MSG_LINK, "Init XMAC to 2 ports x 10G per path\n"); + + /* Set the number of ports on the system side to up to 2 */ + REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 1); + + /* Set the number of ports on the Warp Core to 10G */ + REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3); + } else { + /* Set the number of ports on the system side to 1 */ + REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 0); + if (max_speed == SPEED_10000) { + DP(NETIF_MSG_LINK, "Init XMAC to 10G x 1" + " port per path\n"); + /* Set the number of ports on the Warp Core to 10G */ + REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3); + } else { + DP(NETIF_MSG_LINK, "Init XMAC to 20G x 2 ports" + " per path\n"); + /* Set the number of ports on the Warp Core to 20G */ + REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 1); + } + } + /* Soft reset */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + MISC_REGISTERS_RESET_REG_2_XMAC_SOFT); + usleep_range(1000, 1000); + + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + MISC_REGISTERS_RESET_REG_2_XMAC_SOFT); + +} + +static void bnx2x_xmac_disable(struct link_params *params) +{ + u8 port = params->port; + struct bnx2x *bp = params->bp; + u32 pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; + + if (REG_RD(bp, MISC_REG_RESET_REG_2) & + MISC_REGISTERS_RESET_REG_2_XMAC) { + /* + * Send an indication to change the state in the NIG back to XON + * Clearing this bit enables the next set of this bit to get + * rising edge + */ + pfc_ctrl = REG_RD(bp, xmac_base + XMAC_REG_PFC_CTRL_HI); + REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, + (pfc_ctrl & ~(1<<1))); + REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, + (pfc_ctrl | (1<<1))); + DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port); + REG_WR(bp, xmac_base + XMAC_REG_CTRL, 0); + usleep_range(1000, 1000); + bnx2x_set_xumac_nig(params, 0, 0); + REG_WR(bp, xmac_base + XMAC_REG_CTRL, + XMAC_CTRL_REG_SOFT_RESET); + } +} + +static int bnx2x_xmac_enable(struct link_params *params, + struct link_vars *vars, u8 lb) +{ + u32 val, xmac_base; + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "enabling XMAC\n"); + + xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; + + bnx2x_xmac_init(bp, vars->line_speed); + + /* + * This register determines on which events the MAC will assert + * error on the i/f to the NIG along w/ EOP. + */ + + /* + * This register tells the NIG whether to send traffic to UMAC + * or XMAC + */ + REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0); + + /* Set Max packet size */ + REG_WR(bp, xmac_base + XMAC_REG_RX_MAX_SIZE, 0x2710); + + /* CRC append for Tx packets */ + REG_WR(bp, xmac_base + XMAC_REG_TX_CTRL, 0xC800); + + /* update PFC */ + bnx2x_update_pfc_xmac(params, vars, 0); + + /* Enable TX and RX */ + val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN; + + /* Check loopback mode */ + if (lb) + val |= XMAC_CTRL_REG_CORE_LOCAL_LPBK; + REG_WR(bp, xmac_base + XMAC_REG_CTRL, val); + bnx2x_set_xumac_nig(params, + ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1); + + vars->mac_type = MAC_TYPE_XMAC; + + return 0; +} +static int bnx2x_emac_enable(struct link_params *params, + struct link_vars *vars, u8 lb) +{ + struct bnx2x *bp = params->bp; + u8 port = params->port; + u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + u32 val; + + DP(NETIF_MSG_LINK, "enabling EMAC\n"); + + /* Disable BMAC */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); + + /* enable emac and not bmac */ + REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1); + + /* ASIC */ + if (vars->phy_flags & PHY_XGXS_FLAG) { + u32 ser_lane = ((params->lane_config & + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); + + DP(NETIF_MSG_LINK, "XGXS\n"); + /* select the master lanes (out of 0-3) */ + REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, ser_lane); + /* select XGXS */ + REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); + + } else { /* SerDes */ + DP(NETIF_MSG_LINK, "SerDes\n"); + /* select SerDes */ + REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0); + } + + bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE, + EMAC_RX_MODE_RESET); + bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, + EMAC_TX_MODE_RESET); + + if (CHIP_REV_IS_SLOW(bp)) { + /* config GMII mode */ + val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); + EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII)); + } else { /* ASIC */ + /* pause enable/disable */ + bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE, + EMAC_RX_MODE_FLOW_EN); + + bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE, + (EMAC_TX_MODE_EXT_PAUSE_EN | + EMAC_TX_MODE_FLOW_EN)); + if (!(params->feature_config_flags & + FEATURE_CONFIG_PFC_ENABLED)) { + if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX) + bnx2x_bits_en(bp, emac_base + + EMAC_REG_EMAC_RX_MODE, + EMAC_RX_MODE_FLOW_EN); + + if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) + bnx2x_bits_en(bp, emac_base + + EMAC_REG_EMAC_TX_MODE, + (EMAC_TX_MODE_EXT_PAUSE_EN | + EMAC_TX_MODE_FLOW_EN)); + } else + bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, + EMAC_TX_MODE_FLOW_EN); + } + + /* KEEP_VLAN_TAG, promiscuous */ + val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); + val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS; + + /* + * Setting this bit causes MAC control frames (except for pause + * frames) to be passed on for processing. This setting has no + * affect on the operation of the pause frames. This bit effects + * all packets regardless of RX Parser packet sorting logic. + * Turn the PFC off to make sure we are in Xon state before + * enabling it. + */ + EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0); + if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) { + DP(NETIF_MSG_LINK, "PFC is enabled\n"); + /* Enable PFC again */ + EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, + EMAC_REG_RX_PFC_MODE_RX_EN | + EMAC_REG_RX_PFC_MODE_TX_EN | + EMAC_REG_RX_PFC_MODE_PRIORITIES); + + EMAC_WR(bp, EMAC_REG_RX_PFC_PARAM, + ((0x0101 << + EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT) | + (0x00ff << + EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT))); + val |= EMAC_RX_MODE_KEEP_MAC_CONTROL; + } + EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val); + + /* Set Loopback */ + val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); + if (lb) + val |= 0x810; + else + val &= ~0x810; + EMAC_WR(bp, EMAC_REG_EMAC_MODE, val); + + /* enable emac */ + REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1); + + /* enable emac for jumbo packets */ + EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE, + (EMAC_RX_MTU_SIZE_JUMBO_ENA | + (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))); + + /* strip CRC */ + REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1); + + /* disable the NIG in/out to the bmac */ + REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x0); + REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0); + REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x0); + + /* enable the NIG in/out to the emac */ + REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1); + val = 0; + if ((params->feature_config_flags & + FEATURE_CONFIG_PFC_ENABLED) || + (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)) + val = 1; + + REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val); + REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1); + + REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0); + + vars->mac_type = MAC_TYPE_EMAC; + return 0; +} + +static void bnx2x_update_pfc_bmac1(struct link_params *params, + struct link_vars *vars) +{ + u32 wb_data[2]; + struct bnx2x *bp = params->bp; + u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM; + + u32 val = 0x14; + if ((!(params->feature_config_flags & + FEATURE_CONFIG_PFC_ENABLED)) && + (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)) + /* Enable BigMAC to react on received Pause packets */ + val |= (1<<5); + wb_data[0] = val; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2); + + /* tx control */ + val = 0xc0; + if (!(params->feature_config_flags & + FEATURE_CONFIG_PFC_ENABLED) && + (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)) + val |= 0x800000; + wb_data[0] = val; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_data, 2); +} + +static void bnx2x_update_pfc_bmac2(struct link_params *params, + struct link_vars *vars, + u8 is_lb) +{ + /* + * Set rx control: Strip CRC and enable BigMAC to relay + * control packets to the system as well + */ + u32 wb_data[2]; + struct bnx2x *bp = params->bp; + u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM; + u32 val = 0x14; + + if ((!(params->feature_config_flags & + FEATURE_CONFIG_PFC_ENABLED)) && + (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)) + /* Enable BigMAC to react on received Pause packets */ + val |= (1<<5); + wb_data[0] = val; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2); + udelay(30); + + /* Tx control */ + val = 0xc0; + if (!(params->feature_config_flags & + FEATURE_CONFIG_PFC_ENABLED) && + (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)) + val |= 0x800000; + wb_data[0] = val; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL, wb_data, 2); + + if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) { + DP(NETIF_MSG_LINK, "PFC is enabled\n"); + /* Enable PFC RX & TX & STATS and set 8 COS */ + wb_data[0] = 0x0; + wb_data[0] |= (1<<0); /* RX */ + wb_data[0] |= (1<<1); /* TX */ + wb_data[0] |= (1<<2); /* Force initial Xon */ + wb_data[0] |= (1<<3); /* 8 cos */ + wb_data[0] |= (1<<5); /* STATS */ + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, + wb_data, 2); + /* Clear the force Xon */ + wb_data[0] &= ~(1<<2); + } else { + DP(NETIF_MSG_LINK, "PFC is disabled\n"); + /* disable PFC RX & TX & STATS and set 8 COS */ + wb_data[0] = 0x8; + wb_data[1] = 0; + } + + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2); + + /* + * Set Time (based unit is 512 bit time) between automatic + * re-sending of PP packets amd enable automatic re-send of + * Per-Priroity Packet as long as pp_gen is asserted and + * pp_disable is low. + */ + val = 0x8000; + if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) + val |= (1<<16); /* enable automatic re-send */ + + wb_data[0] = val; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL, + wb_data, 2); + + /* mac control */ + val = 0x3; /* Enable RX and TX */ + if (is_lb) { + val |= 0x4; /* Local loopback */ + DP(NETIF_MSG_LINK, "enable bmac loopback\n"); + } + /* When PFC enabled, Pass pause frames towards the NIG. */ + if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) + val |= ((1<<6)|(1<<5)); + + wb_data[0] = val; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2); +} + + +/* PFC BRB internal port configuration params */ +struct bnx2x_pfc_brb_threshold_val { + u32 pause_xoff; + u32 pause_xon; + u32 full_xoff; + u32 full_xon; +}; + +struct bnx2x_pfc_brb_e3b0_val { + u32 full_lb_xoff_th; + u32 full_lb_xon_threshold; + u32 lb_guarantied; + u32 mac_0_class_t_guarantied; + u32 mac_0_class_t_guarantied_hyst; + u32 mac_1_class_t_guarantied; + u32 mac_1_class_t_guarantied_hyst; +}; + +struct bnx2x_pfc_brb_th_val { + struct bnx2x_pfc_brb_threshold_val pauseable_th; + struct bnx2x_pfc_brb_threshold_val non_pauseable_th; +}; +static int bnx2x_pfc_brb_get_config_params( + struct link_params *params, + struct bnx2x_pfc_brb_th_val *config_val) +{ + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "Setting PFC BRB configuration\n"); + if (CHIP_IS_E2(bp)) { + config_val->pauseable_th.pause_xoff = + PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE; + config_val->pauseable_th.pause_xon = + PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE; + config_val->pauseable_th.full_xoff = + PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE; + config_val->pauseable_th.full_xon = + PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE; + /* non pause able*/ + config_val->non_pauseable_th.pause_xoff = + PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; + config_val->non_pauseable_th.pause_xon = + PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; + config_val->non_pauseable_th.full_xoff = + PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; + config_val->non_pauseable_th.full_xon = + PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE; + } else if (CHIP_IS_E3A0(bp)) { + config_val->pauseable_th.pause_xoff = + PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE; + config_val->pauseable_th.pause_xon = + PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE; + config_val->pauseable_th.full_xoff = + PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE; + config_val->pauseable_th.full_xon = + PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE; + /* non pause able*/ + config_val->non_pauseable_th.pause_xoff = + PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; + config_val->non_pauseable_th.pause_xon = + PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; + config_val->non_pauseable_th.full_xoff = + PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; + config_val->non_pauseable_th.full_xon = + PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE; + } else if (CHIP_IS_E3B0(bp)) { + if (params->phy[INT_PHY].flags & + FLAGS_4_PORT_MODE) { + config_val->pauseable_th.pause_xoff = + PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE; + config_val->pauseable_th.pause_xon = + PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE; + config_val->pauseable_th.full_xoff = + PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE; + config_val->pauseable_th.full_xon = + PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE; + /* non pause able*/ + config_val->non_pauseable_th.pause_xoff = + PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; + config_val->non_pauseable_th.pause_xon = + PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; + config_val->non_pauseable_th.full_xoff = + PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; + config_val->non_pauseable_th.full_xon = + PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE; + } else { + config_val->pauseable_th.pause_xoff = + PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE; + config_val->pauseable_th.pause_xon = + PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE; + config_val->pauseable_th.full_xoff = + PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE; + config_val->pauseable_th.full_xon = + PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE; + /* non pause able*/ + config_val->non_pauseable_th.pause_xoff = + PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; + config_val->non_pauseable_th.pause_xon = + PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; + config_val->non_pauseable_th.full_xoff = + PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; + config_val->non_pauseable_th.full_xon = + PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE; + } + } else + return -EINVAL; + + return 0; +} + + +static void bnx2x_pfc_brb_get_e3b0_config_params(struct link_params *params, + struct bnx2x_pfc_brb_e3b0_val + *e3b0_val, + u32 cos0_pauseable, + u32 cos1_pauseable) +{ + if (params->phy[INT_PHY].flags & FLAGS_4_PORT_MODE) { + e3b0_val->full_lb_xoff_th = + PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR; + e3b0_val->full_lb_xon_threshold = + PFC_E3B0_4P_BRB_FULL_LB_XON_THR; + e3b0_val->lb_guarantied = + PFC_E3B0_4P_LB_GUART; + e3b0_val->mac_0_class_t_guarantied = + PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART; + e3b0_val->mac_0_class_t_guarantied_hyst = + PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST; + e3b0_val->mac_1_class_t_guarantied = + PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART; + e3b0_val->mac_1_class_t_guarantied_hyst = + PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST; + } else { + e3b0_val->full_lb_xoff_th = + PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR; + e3b0_val->full_lb_xon_threshold = + PFC_E3B0_2P_BRB_FULL_LB_XON_THR; + e3b0_val->mac_0_class_t_guarantied_hyst = + PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST; + e3b0_val->mac_1_class_t_guarantied = + PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART; + e3b0_val->mac_1_class_t_guarantied_hyst = + PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST; + + if (cos0_pauseable != cos1_pauseable) { + /* nonpauseable= Lossy + pauseable = Lossless*/ + e3b0_val->lb_guarantied = + PFC_E3B0_2P_MIX_PAUSE_LB_GUART; + e3b0_val->mac_0_class_t_guarantied = + PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART; + } else if (cos0_pauseable) { + /* Lossless +Lossless*/ + e3b0_val->lb_guarantied = + PFC_E3B0_2P_PAUSE_LB_GUART; + e3b0_val->mac_0_class_t_guarantied = + PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART; + } else { + /* Lossy +Lossy*/ + e3b0_val->lb_guarantied = + PFC_E3B0_2P_NON_PAUSE_LB_GUART; + e3b0_val->mac_0_class_t_guarantied = + PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART; + } + } +} +static int bnx2x_update_pfc_brb(struct link_params *params, + struct link_vars *vars, + struct bnx2x_nig_brb_pfc_port_params + *pfc_params) +{ + struct bnx2x *bp = params->bp; + struct bnx2x_pfc_brb_th_val config_val = { {0} }; + struct bnx2x_pfc_brb_threshold_val *reg_th_config = + &config_val.pauseable_th; + struct bnx2x_pfc_brb_e3b0_val e3b0_val = {0}; + int set_pfc = params->feature_config_flags & + FEATURE_CONFIG_PFC_ENABLED; + int bnx2x_status = 0; + u8 port = params->port; + + /* default - pause configuration */ + reg_th_config = &config_val.pauseable_th; + bnx2x_status = bnx2x_pfc_brb_get_config_params(params, &config_val); + if (0 != bnx2x_status) + return bnx2x_status; + + if (set_pfc && pfc_params) + /* First COS */ + if (!pfc_params->cos0_pauseable) + reg_th_config = &config_val.non_pauseable_th; + /* + * The number of free blocks below which the pause signal to class 0 + * of MAC #n is asserted. n=0,1 + */ + REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 : + BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , + reg_th_config->pause_xoff); + /* + * The number of free blocks above which the pause signal to class 0 + * of MAC #n is de-asserted. n=0,1 + */ + REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XON_THRESHOLD_1 : + BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , reg_th_config->pause_xon); + /* + * The number of free blocks below which the full signal to class 0 + * of MAC #n is asserted. n=0,1 + */ + REG_WR(bp, (port) ? BRB1_REG_FULL_0_XOFF_THRESHOLD_1 : + BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , reg_th_config->full_xoff); + /* + * The number of free blocks above which the full signal to class 0 + * of MAC #n is de-asserted. n=0,1 + */ + REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 : + BRB1_REG_FULL_0_XON_THRESHOLD_0 , reg_th_config->full_xon); + + if (set_pfc && pfc_params) { + /* Second COS */ + if (pfc_params->cos1_pauseable) + reg_th_config = &config_val.pauseable_th; + else + reg_th_config = &config_val.non_pauseable_th; + /* + * The number of free blocks below which the pause signal to + * class 1 of MAC #n is asserted. n=0,1 + **/ + REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 : + BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, + reg_th_config->pause_xoff); + /* + * The number of free blocks above which the pause signal to + * class 1 of MAC #n is de-asserted. n=0,1 + */ + REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 : + BRB1_REG_PAUSE_1_XON_THRESHOLD_0, + reg_th_config->pause_xon); + /* + * The number of free blocks below which the full signal to + * class 1 of MAC #n is asserted. n=0,1 + */ + REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 : + BRB1_REG_FULL_1_XOFF_THRESHOLD_0, + reg_th_config->full_xoff); + /* + * The number of free blocks above which the full signal to + * class 1 of MAC #n is de-asserted. n=0,1 + */ + REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 : + BRB1_REG_FULL_1_XON_THRESHOLD_0, + reg_th_config->full_xon); + + + if (CHIP_IS_E3B0(bp)) { + /*Should be done by init tool */ + /* + * BRB_empty_for_dup = BRB1_REG_BRB_EMPTY_THRESHOLD + * reset value + * 944 + */ + + /** + * The hysteresis on the guarantied buffer space for the Lb port + * before signaling XON. + **/ + REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST, 80); + + bnx2x_pfc_brb_get_e3b0_config_params( + params, + &e3b0_val, + pfc_params->cos0_pauseable, + pfc_params->cos1_pauseable); + /** + * The number of free blocks below which the full signal to the + * LB port is asserted. + */ + REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, + e3b0_val.full_lb_xoff_th); + /** + * The number of free blocks above which the full signal to the + * LB port is de-asserted. + */ + REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, + e3b0_val.full_lb_xon_threshold); + /** + * The number of blocks guarantied for the MAC #n port. n=0,1 + */ + + /*The number of blocks guarantied for the LB port.*/ + REG_WR(bp, BRB1_REG_LB_GUARANTIED, + e3b0_val.lb_guarantied); + + /** + * The number of blocks guarantied for the MAC #n port. + */ + REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0, + 2 * e3b0_val.mac_0_class_t_guarantied); + REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1, + 2 * e3b0_val.mac_1_class_t_guarantied); + /** + * The number of blocks guarantied for class #t in MAC0. t=0,1 + */ + REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED, + e3b0_val.mac_0_class_t_guarantied); + REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED, + e3b0_val.mac_0_class_t_guarantied); + /** + * The hysteresis on the guarantied buffer space for class in + * MAC0. t=0,1 + */ + REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST, + e3b0_val.mac_0_class_t_guarantied_hyst); + REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST, + e3b0_val.mac_0_class_t_guarantied_hyst); + + /** + * The number of blocks guarantied for class #t in MAC1.t=0,1 + */ + REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED, + e3b0_val.mac_1_class_t_guarantied); + REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED, + e3b0_val.mac_1_class_t_guarantied); + /** + * The hysteresis on the guarantied buffer space for class #t + * in MAC1. t=0,1 + */ + REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST, + e3b0_val.mac_1_class_t_guarantied_hyst); + REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST, + e3b0_val.mac_1_class_t_guarantied_hyst); + + } + + } + + return bnx2x_status; +} + +/****************************************************************************** +* Description: +* This function is needed because NIG ARB_CREDIT_WEIGHT_X are +* not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable. +******************************************************************************/ +int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp, + u8 cos_entry, + u32 priority_mask, u8 port) +{ + u32 nig_reg_rx_priority_mask_add = 0; + + switch (cos_entry) { + case 0: + nig_reg_rx_priority_mask_add = (port) ? + NIG_REG_P1_RX_COS0_PRIORITY_MASK : + NIG_REG_P0_RX_COS0_PRIORITY_MASK; + break; + case 1: + nig_reg_rx_priority_mask_add = (port) ? + NIG_REG_P1_RX_COS1_PRIORITY_MASK : + NIG_REG_P0_RX_COS1_PRIORITY_MASK; + break; + case 2: + nig_reg_rx_priority_mask_add = (port) ? + NIG_REG_P1_RX_COS2_PRIORITY_MASK : + NIG_REG_P0_RX_COS2_PRIORITY_MASK; + break; + case 3: + if (port) + return -EINVAL; + nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS3_PRIORITY_MASK; + break; + case 4: + if (port) + return -EINVAL; + nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS4_PRIORITY_MASK; + break; + case 5: + if (port) + return -EINVAL; + nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS5_PRIORITY_MASK; + break; + } + + REG_WR(bp, nig_reg_rx_priority_mask_add, priority_mask); + + return 0; +} +static void bnx2x_update_mng(struct link_params *params, u32 link_status) +{ + struct bnx2x *bp = params->bp; + + REG_WR(bp, params->shmem_base + + offsetof(struct shmem_region, + port_mb[params->port].link_status), link_status); +} + +static void bnx2x_update_pfc_nig(struct link_params *params, + struct link_vars *vars, + struct bnx2x_nig_brb_pfc_port_params *nig_params) +{ + u32 xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = 0; + u32 llfc_enable = 0, xcm0_out_en = 0, p0_hwpfc_enable = 0; + u32 pkt_priority_to_cos = 0; + struct bnx2x *bp = params->bp; + u8 port = params->port; + + int set_pfc = params->feature_config_flags & + FEATURE_CONFIG_PFC_ENABLED; + DP(NETIF_MSG_LINK, "updating pfc nig parameters\n"); + + /* + * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set + * MAC control frames (that are not pause packets) + * will be forwarded to the XCM. + */ + xcm_mask = REG_RD(bp, + port ? NIG_REG_LLH1_XCM_MASK : + NIG_REG_LLH0_XCM_MASK); + /* + * nig params will override non PFC params, since it's possible to + * do transition from PFC to SAFC + */ + if (set_pfc) { + pause_enable = 0; + llfc_out_en = 0; + llfc_enable = 0; + if (CHIP_IS_E3(bp)) + ppp_enable = 0; + else + ppp_enable = 1; + xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN : + NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN); + xcm0_out_en = 0; + p0_hwpfc_enable = 1; + } else { + if (nig_params) { + llfc_out_en = nig_params->llfc_out_en; + llfc_enable = nig_params->llfc_enable; + pause_enable = nig_params->pause_enable; + } else /*defaul non PFC mode - PAUSE */ + pause_enable = 1; + + xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN : + NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN); + xcm0_out_en = 1; + } + + if (CHIP_IS_E3(bp)) + REG_WR(bp, port ? NIG_REG_BRB1_PAUSE_IN_EN : + NIG_REG_BRB0_PAUSE_IN_EN, pause_enable); + REG_WR(bp, port ? NIG_REG_LLFC_OUT_EN_1 : + NIG_REG_LLFC_OUT_EN_0, llfc_out_en); + REG_WR(bp, port ? NIG_REG_LLFC_ENABLE_1 : + NIG_REG_LLFC_ENABLE_0, llfc_enable); + REG_WR(bp, port ? NIG_REG_PAUSE_ENABLE_1 : + NIG_REG_PAUSE_ENABLE_0, pause_enable); + + REG_WR(bp, port ? NIG_REG_PPP_ENABLE_1 : + NIG_REG_PPP_ENABLE_0, ppp_enable); + + REG_WR(bp, port ? NIG_REG_LLH1_XCM_MASK : + NIG_REG_LLH0_XCM_MASK, xcm_mask); + + REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7); + + /* output enable for RX_XCM # IF */ + REG_WR(bp, NIG_REG_XCM0_OUT_EN, xcm0_out_en); + + /* HW PFC TX enable */ + REG_WR(bp, NIG_REG_P0_HWPFC_ENABLE, p0_hwpfc_enable); + + if (nig_params) { + u8 i = 0; + pkt_priority_to_cos = nig_params->pkt_priority_to_cos; + + for (i = 0; i < nig_params->num_of_rx_cos_priority_mask; i++) + bnx2x_pfc_nig_rx_priority_mask(bp, i, + nig_params->rx_cos_priority_mask[i], port); + + REG_WR(bp, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 : + NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0, + nig_params->llfc_high_priority_classes); + + REG_WR(bp, port ? NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 : + NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0, + nig_params->llfc_low_priority_classes); + } + REG_WR(bp, port ? NIG_REG_P1_PKT_PRIORITY_TO_COS : + NIG_REG_P0_PKT_PRIORITY_TO_COS, + pkt_priority_to_cos); +} + +int bnx2x_update_pfc(struct link_params *params, + struct link_vars *vars, + struct bnx2x_nig_brb_pfc_port_params *pfc_params) +{ + /* + * The PFC and pause are orthogonal to one another, meaning when + * PFC is enabled, the pause are disabled, and when PFC is + * disabled, pause are set according to the pause result. + */ + u32 val; + struct bnx2x *bp = params->bp; + int bnx2x_status = 0; + u8 bmac_loopback = (params->loopback_mode == LOOPBACK_BMAC); + + if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) + vars->link_status |= LINK_STATUS_PFC_ENABLED; + else + vars->link_status &= ~LINK_STATUS_PFC_ENABLED; + + bnx2x_update_mng(params, vars->link_status); + + /* update NIG params */ + bnx2x_update_pfc_nig(params, vars, pfc_params); + + /* update BRB params */ + bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params); + if (0 != bnx2x_status) + return bnx2x_status; + + if (!vars->link_up) + return bnx2x_status; + + DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n"); + if (CHIP_IS_E3(bp)) + bnx2x_update_pfc_xmac(params, vars, 0); + else { + val = REG_RD(bp, MISC_REG_RESET_REG_2); + if ((val & + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) + == 0) { + DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n"); + bnx2x_emac_enable(params, vars, 0); + return bnx2x_status; + } + + if (CHIP_IS_E2(bp)) + bnx2x_update_pfc_bmac2(params, vars, bmac_loopback); + else + bnx2x_update_pfc_bmac1(params, vars); + + val = 0; + if ((params->feature_config_flags & + FEATURE_CONFIG_PFC_ENABLED) || + (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)) + val = 1; + REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val); + } + return bnx2x_status; +} + + +static int bnx2x_bmac1_enable(struct link_params *params, + struct link_vars *vars, + u8 is_lb) +{ + struct bnx2x *bp = params->bp; + u8 port = params->port; + u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM; + u32 wb_data[2]; + u32 val; + + DP(NETIF_MSG_LINK, "Enabling BigMAC1\n"); + + /* XGXS control */ + wb_data[0] = 0x3c; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL, + wb_data, 2); + + /* tx MAC SA */ + wb_data[0] = ((params->mac_addr[2] << 24) | + (params->mac_addr[3] << 16) | + (params->mac_addr[4] << 8) | + params->mac_addr[5]); + wb_data[1] = ((params->mac_addr[0] << 8) | + params->mac_addr[1]); + REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2); + + /* mac control */ + val = 0x3; + if (is_lb) { + val |= 0x4; + DP(NETIF_MSG_LINK, "enable bmac loopback\n"); + } + wb_data[0] = val; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2); + + /* set rx mtu */ + wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2); + + bnx2x_update_pfc_bmac1(params, vars); + + /* set tx mtu */ + wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2); + + /* set cnt max size */ + wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2); + + /* configure safc */ + wb_data[0] = 0x1000200; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS, + wb_data, 2); + + return 0; +} + +static int bnx2x_bmac2_enable(struct link_params *params, + struct link_vars *vars, + u8 is_lb) +{ + struct bnx2x *bp = params->bp; + u8 port = params->port; + u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM; + u32 wb_data[2]; + + DP(NETIF_MSG_LINK, "Enabling BigMAC2\n"); + + wb_data[0] = 0; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2); + udelay(30); + + /* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */ + wb_data[0] = 0x3c; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL, + wb_data, 2); + + udelay(30); + + /* tx MAC SA */ + wb_data[0] = ((params->mac_addr[2] << 24) | + (params->mac_addr[3] << 16) | + (params->mac_addr[4] << 8) | + params->mac_addr[5]); + wb_data[1] = ((params->mac_addr[0] << 8) | + params->mac_addr[1]); + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR, + wb_data, 2); + + udelay(30); + + /* Configure SAFC */ + wb_data[0] = 0x1000200; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS, + wb_data, 2); + udelay(30); + + /* set rx mtu */ + wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2); + udelay(30); + + /* set tx mtu */ + wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2); + udelay(30); + /* set cnt max size */ + wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2); + udelay(30); + bnx2x_update_pfc_bmac2(params, vars, is_lb); + + return 0; +} + +static int bnx2x_bmac_enable(struct link_params *params, + struct link_vars *vars, + u8 is_lb) +{ + int rc = 0; + u8 port = params->port; + struct bnx2x *bp = params->bp; + u32 val; + /* reset and unreset the BigMac */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); + msleep(1); + + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); + + /* enable access for bmac registers */ + REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1); + + /* Enable BMAC according to BMAC type*/ + if (CHIP_IS_E2(bp)) + rc = bnx2x_bmac2_enable(params, vars, is_lb); + else + rc = bnx2x_bmac1_enable(params, vars, is_lb); + REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1); + REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0); + REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0); + val = 0; + if ((params->feature_config_flags & + FEATURE_CONFIG_PFC_ENABLED) || + (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)) + val = 1; + REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val); + REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0); + REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x0); + REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0); + REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x1); + REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x1); + + vars->mac_type = MAC_TYPE_BMAC; + return rc; +} + +static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port) +{ + u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM; + u32 wb_data[2]; + u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4); + + /* Only if the bmac is out of reset */ + if (REG_RD(bp, MISC_REG_RESET_REG_2) & + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) && + nig_bmac_enable) { + + if (CHIP_IS_E2(bp)) { + /* Clear Rx Enable bit in BMAC_CONTROL register */ + REG_RD_DMAE(bp, bmac_addr + + BIGMAC2_REGISTER_BMAC_CONTROL, + wb_data, 2); + wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; + REG_WR_DMAE(bp, bmac_addr + + BIGMAC2_REGISTER_BMAC_CONTROL, + wb_data, 2); + } else { + /* Clear Rx Enable bit in BMAC_CONTROL register */ + REG_RD_DMAE(bp, bmac_addr + + BIGMAC_REGISTER_BMAC_CONTROL, + wb_data, 2); + wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; + REG_WR_DMAE(bp, bmac_addr + + BIGMAC_REGISTER_BMAC_CONTROL, + wb_data, 2); + } + msleep(1); + } +} + +static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl, + u32 line_speed) +{ + struct bnx2x *bp = params->bp; + u8 port = params->port; + u32 init_crd, crd; + u32 count = 1000; + + /* disable port */ + REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1); + + /* wait for init credit */ + init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4); + crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8); + DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd); + + while ((init_crd != crd) && count) { + msleep(5); + + crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8); + count--; + } + crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8); + if (init_crd != crd) { + DP(NETIF_MSG_LINK, "BUG! init_crd 0x%x != crd 0x%x\n", + init_crd, crd); + return -EINVAL; + } + + if (flow_ctrl & BNX2X_FLOW_CTRL_RX || + line_speed == SPEED_10 || + line_speed == SPEED_100 || + line_speed == SPEED_1000 || + line_speed == SPEED_2500) { + REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1); + /* update threshold */ + REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0); + /* update init credit */ + init_crd = 778; /* (800-18-4) */ + + } else { + u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + + ETH_OVREHEAD)/16; + REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); + /* update threshold */ + REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh); + /* update init credit */ + switch (line_speed) { + case SPEED_10000: + init_crd = thresh + 553 - 22; + break; + default: + DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n", + line_speed); + return -EINVAL; + } + } + REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd); + DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n", + line_speed, init_crd); + + /* probe the credit changes */ + REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1); + msleep(5); + REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0); + + /* enable port */ + REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0); + return 0; +} + +/** + * bnx2x_get_emac_base - retrive emac base address + * + * @bp: driver handle + * @mdc_mdio_access: access type + * @port: port id + * + * This function selects the MDC/MDIO access (through emac0 or + * emac1) depend on the mdc_mdio_access, port, port swapped. Each + * phy has a default access mode, which could also be overridden + * by nvram configuration. This parameter, whether this is the + * default phy configuration, or the nvram overrun + * configuration, is passed here as mdc_mdio_access and selects + * the emac_base for the CL45 read/writes operations + */ +static u32 bnx2x_get_emac_base(struct bnx2x *bp, + u32 mdc_mdio_access, u8 port) +{ + u32 emac_base = 0; + switch (mdc_mdio_access) { + case SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE: + break; + case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0: + if (REG_RD(bp, NIG_REG_PORT_SWAP)) + emac_base = GRCBASE_EMAC1; + else + emac_base = GRCBASE_EMAC0; + break; + case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1: + if (REG_RD(bp, NIG_REG_PORT_SWAP)) + emac_base = GRCBASE_EMAC0; + else + emac_base = GRCBASE_EMAC1; + break; + case SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH: + emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + break; + case SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED: + emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1; + break; + default: + break; + } + return emac_base; + +} + +/******************************************************************/ +/* CL22 access functions */ +/******************************************************************/ +static int bnx2x_cl22_write(struct bnx2x *bp, + struct bnx2x_phy *phy, + u16 reg, u16 val) +{ + u32 tmp, mode; + u8 i; + int rc = 0; + /* Switch to CL22 */ + mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); + REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, + mode & ~EMAC_MDIO_MODE_CLAUSE_45); + + /* address */ + tmp = ((phy->addr << 21) | (reg << 16) | val | + EMAC_MDIO_COMM_COMMAND_WRITE_22 | + EMAC_MDIO_COMM_START_BUSY); + REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); + + for (i = 0; i < 50; i++) { + udelay(10); + + tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); + if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { + udelay(5); + break; + } + } + if (tmp & EMAC_MDIO_COMM_START_BUSY) { + DP(NETIF_MSG_LINK, "write phy register failed\n"); + rc = -EFAULT; + } + REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode); + return rc; +} + +static int bnx2x_cl22_read(struct bnx2x *bp, + struct bnx2x_phy *phy, + u16 reg, u16 *ret_val) +{ + u32 val, mode; + u16 i; + int rc = 0; + + /* Switch to CL22 */ + mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); + REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, + mode & ~EMAC_MDIO_MODE_CLAUSE_45); + + /* address */ + val = ((phy->addr << 21) | (reg << 16) | + EMAC_MDIO_COMM_COMMAND_READ_22 | + EMAC_MDIO_COMM_START_BUSY); + REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); + + for (i = 0; i < 50; i++) { + udelay(10); + + val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); + if (!(val & EMAC_MDIO_COMM_START_BUSY)) { + *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA); + udelay(5); + break; + } + } + if (val & EMAC_MDIO_COMM_START_BUSY) { + DP(NETIF_MSG_LINK, "read phy register failed\n"); + + *ret_val = 0; + rc = -EFAULT; + } + REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode); + return rc; +} + +/******************************************************************/ +/* CL45 access functions */ +/******************************************************************/ +static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, + u8 devad, u16 reg, u16 *ret_val) +{ + u32 val; + u16 i; + int rc = 0; + if (phy->flags & FLAGS_MDC_MDIO_WA_B0) + bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, + EMAC_MDIO_STATUS_10MB); + /* address */ + val = ((phy->addr << 21) | (devad << 16) | reg | + EMAC_MDIO_COMM_COMMAND_ADDRESS | + EMAC_MDIO_COMM_START_BUSY); + REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); + + for (i = 0; i < 50; i++) { + udelay(10); + + val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); + if (!(val & EMAC_MDIO_COMM_START_BUSY)) { + udelay(5); + break; + } + } + if (val & EMAC_MDIO_COMM_START_BUSY) { + DP(NETIF_MSG_LINK, "read phy register failed\n"); + netdev_err(bp->dev, "MDC/MDIO access timeout\n"); + *ret_val = 0; + rc = -EFAULT; + } else { + /* data */ + val = ((phy->addr << 21) | (devad << 16) | + EMAC_MDIO_COMM_COMMAND_READ_45 | + EMAC_MDIO_COMM_START_BUSY); + REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); + + for (i = 0; i < 50; i++) { + udelay(10); + + val = REG_RD(bp, phy->mdio_ctrl + + EMAC_REG_EMAC_MDIO_COMM); + if (!(val & EMAC_MDIO_COMM_START_BUSY)) { + *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA); + break; + } + } + if (val & EMAC_MDIO_COMM_START_BUSY) { + DP(NETIF_MSG_LINK, "read phy register failed\n"); + netdev_err(bp->dev, "MDC/MDIO access timeout\n"); + *ret_val = 0; + rc = -EFAULT; + } + } + /* Work around for E3 A0 */ + if (phy->flags & FLAGS_MDC_MDIO_WA) { + phy->flags ^= FLAGS_DUMMY_READ; + if (phy->flags & FLAGS_DUMMY_READ) { + u16 temp_val; + bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val); + } + } + + if (phy->flags & FLAGS_MDC_MDIO_WA_B0) + bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, + EMAC_MDIO_STATUS_10MB); + return rc; +} + +static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, + u8 devad, u16 reg, u16 val) +{ + u32 tmp; + u8 i; + int rc = 0; + if (phy->flags & FLAGS_MDC_MDIO_WA_B0) + bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, + EMAC_MDIO_STATUS_10MB); + + /* address */ + + tmp = ((phy->addr << 21) | (devad << 16) | reg | + EMAC_MDIO_COMM_COMMAND_ADDRESS | + EMAC_MDIO_COMM_START_BUSY); + REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); + + for (i = 0; i < 50; i++) { + udelay(10); + + tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); + if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { + udelay(5); + break; + } + } + if (tmp & EMAC_MDIO_COMM_START_BUSY) { + DP(NETIF_MSG_LINK, "write phy register failed\n"); + netdev_err(bp->dev, "MDC/MDIO access timeout\n"); + rc = -EFAULT; + + } else { + /* data */ + tmp = ((phy->addr << 21) | (devad << 16) | val | + EMAC_MDIO_COMM_COMMAND_WRITE_45 | + EMAC_MDIO_COMM_START_BUSY); + REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); + + for (i = 0; i < 50; i++) { + udelay(10); + + tmp = REG_RD(bp, phy->mdio_ctrl + + EMAC_REG_EMAC_MDIO_COMM); + if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { + udelay(5); + break; + } + } + if (tmp & EMAC_MDIO_COMM_START_BUSY) { + DP(NETIF_MSG_LINK, "write phy register failed\n"); + netdev_err(bp->dev, "MDC/MDIO access timeout\n"); + rc = -EFAULT; + } + } + /* Work around for E3 A0 */ + if (phy->flags & FLAGS_MDC_MDIO_WA) { + phy->flags ^= FLAGS_DUMMY_READ; + if (phy->flags & FLAGS_DUMMY_READ) { + u16 temp_val; + bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val); + } + } + if (phy->flags & FLAGS_MDC_MDIO_WA_B0) + bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, + EMAC_MDIO_STATUS_10MB); + return rc; +} + + +/******************************************************************/ +/* BSC access functions from E3 */ +/******************************************************************/ +static void bnx2x_bsc_module_sel(struct link_params *params) +{ + int idx; + u32 board_cfg, sfp_ctrl; + u32 i2c_pins[I2C_SWITCH_WIDTH], i2c_val[I2C_SWITCH_WIDTH]; + struct bnx2x *bp = params->bp; + u8 port = params->port; + /* Read I2C output PINs */ + board_cfg = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.shared_hw_config.board)); + i2c_pins[I2C_BSC0] = board_cfg & SHARED_HW_CFG_E3_I2C_MUX0_MASK; + i2c_pins[I2C_BSC1] = (board_cfg & SHARED_HW_CFG_E3_I2C_MUX1_MASK) >> + SHARED_HW_CFG_E3_I2C_MUX1_SHIFT; + + /* Read I2C output value */ + sfp_ctrl = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_cmn_pin_cfg)); + i2c_val[I2C_BSC0] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX0_MASK) > 0; + i2c_val[I2C_BSC1] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX1_MASK) > 0; + DP(NETIF_MSG_LINK, "Setting BSC switch\n"); + for (idx = 0; idx < I2C_SWITCH_WIDTH; idx++) + bnx2x_set_cfg_pin(bp, i2c_pins[idx], i2c_val[idx]); +} + +static int bnx2x_bsc_read(struct link_params *params, + struct bnx2x_phy *phy, + u8 sl_devid, + u16 sl_addr, + u8 lc_addr, + u8 xfer_cnt, + u32 *data_array) +{ + u32 val, i; + int rc = 0; + struct bnx2x *bp = params->bp; + + if ((sl_devid != 0xa0) && (sl_devid != 0xa2)) { + DP(NETIF_MSG_LINK, "invalid sl_devid 0x%x\n", sl_devid); + return -EINVAL; + } + + if (xfer_cnt > 16) { + DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n", + xfer_cnt); + return -EINVAL; + } + bnx2x_bsc_module_sel(params); + + xfer_cnt = 16 - lc_addr; + + /* enable the engine */ + val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); + val |= MCPR_IMC_COMMAND_ENABLE; + REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); + + /* program slave device ID */ + val = (sl_devid << 16) | sl_addr; + REG_WR(bp, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val); + + /* start xfer with 0 byte to update the address pointer ???*/ + val = (MCPR_IMC_COMMAND_ENABLE) | + (MCPR_IMC_COMMAND_WRITE_OP << + MCPR_IMC_COMMAND_OPERATION_BITSHIFT) | + (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0); + REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); + + /* poll for completion */ + i = 0; + val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); + while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) { + udelay(10); + val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); + if (i++ > 1000) { + DP(NETIF_MSG_LINK, "wr 0 byte timed out after %d try\n", + i); + rc = -EFAULT; + break; + } + } + if (rc == -EFAULT) + return rc; + + /* start xfer with read op */ + val = (MCPR_IMC_COMMAND_ENABLE) | + (MCPR_IMC_COMMAND_READ_OP << + MCPR_IMC_COMMAND_OPERATION_BITSHIFT) | + (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | + (xfer_cnt); + REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); + + /* poll for completion */ + i = 0; + val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); + while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) { + udelay(10); + val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); + if (i++ > 1000) { + DP(NETIF_MSG_LINK, "rd op timed out after %d try\n", i); + rc = -EFAULT; + break; + } + } + if (rc == -EFAULT) + return rc; + + for (i = (lc_addr >> 2); i < 4; i++) { + data_array[i] = REG_RD(bp, (MCP_REG_MCPR_IMC_DATAREG0 + i*4)); +#ifdef __BIG_ENDIAN + data_array[i] = ((data_array[i] & 0x000000ff) << 24) | + ((data_array[i] & 0x0000ff00) << 8) | + ((data_array[i] & 0x00ff0000) >> 8) | + ((data_array[i] & 0xff000000) >> 24); +#endif + } + return rc; +} + +static void bnx2x_cl45_read_or_write(struct bnx2x *bp, struct bnx2x_phy *phy, + u8 devad, u16 reg, u16 or_val) +{ + u16 val; + bnx2x_cl45_read(bp, phy, devad, reg, &val); + bnx2x_cl45_write(bp, phy, devad, reg, val | or_val); +} + +int bnx2x_phy_read(struct link_params *params, u8 phy_addr, + u8 devad, u16 reg, u16 *ret_val) +{ + u8 phy_index; + /* + * Probe for the phy according to the given phy_addr, and execute + * the read request on it + */ + for (phy_index = 0; phy_index < params->num_phys; phy_index++) { + if (params->phy[phy_index].addr == phy_addr) { + return bnx2x_cl45_read(params->bp, + ¶ms->phy[phy_index], devad, + reg, ret_val); + } + } + return -EINVAL; +} + +int bnx2x_phy_write(struct link_params *params, u8 phy_addr, + u8 devad, u16 reg, u16 val) +{ + u8 phy_index; + /* + * Probe for the phy according to the given phy_addr, and execute + * the write request on it + */ + for (phy_index = 0; phy_index < params->num_phys; phy_index++) { + if (params->phy[phy_index].addr == phy_addr) { + return bnx2x_cl45_write(params->bp, + ¶ms->phy[phy_index], devad, + reg, val); + } + } + return -EINVAL; +} +static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy, + struct link_params *params) +{ + u8 lane = 0; + struct bnx2x *bp = params->bp; + u32 path_swap, path_swap_ovr; + u8 path, port; + + path = BP_PATH(bp); + port = params->port; + + if (bnx2x_is_4_port_mode(bp)) { + u32 port_swap, port_swap_ovr; + + /*figure out path swap value */ + path_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR); + if (path_swap_ovr & 0x1) + path_swap = (path_swap_ovr & 0x2); + else + path_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP); + + if (path_swap) + path = path ^ 1; + + /*figure out port swap value */ + port_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR); + if (port_swap_ovr & 0x1) + port_swap = (port_swap_ovr & 0x2); + else + port_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP); + + if (port_swap) + port = port ^ 1; + + lane = (port<<1) + path; + } else { /* two port mode - no port swap */ + + /*figure out path swap value */ + path_swap_ovr = + REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP_OVWR); + if (path_swap_ovr & 0x1) { + path_swap = (path_swap_ovr & 0x2); + } else { + path_swap = + REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP); + } + if (path_swap) + path = path ^ 1; + + lane = path << 1 ; + } + return lane; +} + +static void bnx2x_set_aer_mmd(struct link_params *params, + struct bnx2x_phy *phy) +{ + u32 ser_lane; + u16 offset, aer_val; + struct bnx2x *bp = params->bp; + ser_lane = ((params->lane_config & + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); + + offset = (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ? + (phy->addr + ser_lane) : 0; + + if (USES_WARPCORE(bp)) { + aer_val = bnx2x_get_warpcore_lane(phy, params); + /* + * In Dual-lane mode, two lanes are joined together, + * so in order to configure them, the AER broadcast method is + * used here. + * 0x200 is the broadcast address for lanes 0,1 + * 0x201 is the broadcast address for lanes 2,3 + */ + if (phy->flags & FLAGS_WC_DUAL_MODE) + aer_val = (aer_val >> 1) | 0x200; + } else if (CHIP_IS_E2(bp)) + aer_val = 0x3800 + offset - 1; + else + aer_val = 0x3800 + offset; + DP(NETIF_MSG_LINK, "Set AER to 0x%x\n", aer_val); + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, aer_val); + +} + +/******************************************************************/ +/* Internal phy section */ +/******************************************************************/ + +static void bnx2x_set_serdes_access(struct bnx2x *bp, u8 port) +{ + u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + + /* Set Clause 22 */ + REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 1); + REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245f8000); + udelay(500); + REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245d000f); + udelay(500); + /* Set Clause 45 */ + REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 0); +} + +static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port) +{ + u32 val; + + DP(NETIF_MSG_LINK, "bnx2x_serdes_deassert\n"); + + val = SERDES_RESET_BITS << (port*16); + + /* reset and unreset the SerDes/XGXS */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); + udelay(500); + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); + + bnx2x_set_serdes_access(bp, port); + + REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + port*0x10, + DEFAULT_PHY_DEV_ADDR); +} + +static void bnx2x_xgxs_deassert(struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u8 port; + u32 val; + DP(NETIF_MSG_LINK, "bnx2x_xgxs_deassert\n"); + port = params->port; + + val = XGXS_RESET_BITS << (port*16); + + /* reset and unreset the SerDes/XGXS */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); + udelay(500); + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); + + REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + port*0x18, 0); + REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, + params->phy[INT_PHY].def_md_devad); +} + +static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy, + struct link_params *params, u16 *ieee_fc) +{ + struct bnx2x *bp = params->bp; + *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; + /** + * resolve pause mode and advertisement Please refer to Table + * 28B-3 of the 802.3ab-1999 spec + */ + + switch (phy->req_flow_ctrl) { + case BNX2X_FLOW_CTRL_AUTO: + if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) + *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; + else + *ieee_fc |= + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; + break; + + case BNX2X_FLOW_CTRL_TX: + *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; + break; + + case BNX2X_FLOW_CTRL_RX: + case BNX2X_FLOW_CTRL_BOTH: + *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; + break; + + case BNX2X_FLOW_CTRL_NONE: + default: + *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE; + break; + } + DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc); +} + +static void set_phy_vars(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u8 actual_phy_idx, phy_index, link_cfg_idx; + u8 phy_config_swapped = params->multi_phy_config & + PORT_HW_CFG_PHY_SWAPPED_ENABLED; + for (phy_index = INT_PHY; phy_index < params->num_phys; + phy_index++) { + link_cfg_idx = LINK_CONFIG_IDX(phy_index); + actual_phy_idx = phy_index; + if (phy_config_swapped) { + if (phy_index == EXT_PHY1) + actual_phy_idx = EXT_PHY2; + else if (phy_index == EXT_PHY2) + actual_phy_idx = EXT_PHY1; + } + params->phy[actual_phy_idx].req_flow_ctrl = + params->req_flow_ctrl[link_cfg_idx]; + + params->phy[actual_phy_idx].req_line_speed = + params->req_line_speed[link_cfg_idx]; + + params->phy[actual_phy_idx].speed_cap_mask = + params->speed_cap_mask[link_cfg_idx]; + + params->phy[actual_phy_idx].req_duplex = + params->req_duplex[link_cfg_idx]; + + if (params->req_line_speed[link_cfg_idx] == + SPEED_AUTO_NEG) + vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED; + + DP(NETIF_MSG_LINK, "req_flow_ctrl %x, req_line_speed %x," + " speed_cap_mask %x\n", + params->phy[actual_phy_idx].req_flow_ctrl, + params->phy[actual_phy_idx].req_line_speed, + params->phy[actual_phy_idx].speed_cap_mask); + } +} + +static void bnx2x_ext_phy_set_pause(struct link_params *params, + struct bnx2x_phy *phy, + struct link_vars *vars) +{ + u16 val; + struct bnx2x *bp = params->bp; + /* read modify write pause advertizing */ + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val); + + val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH; + + /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ + bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); + if ((vars->ieee_fc & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { + val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; + } + if ((vars->ieee_fc & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) { + val |= MDIO_AN_REG_ADV_PAUSE_PAUSE; + } + DP(NETIF_MSG_LINK, "Ext phy AN advertize 0x%x\n", val); + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val); +} + +static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result) +{ /* LD LP */ + switch (pause_result) { /* ASYM P ASYM P */ + case 0xb: /* 1 0 1 1 */ + vars->flow_ctrl = BNX2X_FLOW_CTRL_TX; + break; + + case 0xe: /* 1 1 1 0 */ + vars->flow_ctrl = BNX2X_FLOW_CTRL_RX; + break; + + case 0x5: /* 0 1 0 1 */ + case 0x7: /* 0 1 1 1 */ + case 0xd: /* 1 1 0 1 */ + case 0xf: /* 1 1 1 1 */ + vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH; + break; + + default: + break; + } + if (pause_result & (1<<0)) + vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE; + if (pause_result & (1<<1)) + vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE; +} + +static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u16 ld_pause; /* local */ + u16 lp_pause; /* link partner */ + u16 pause_result; + u8 ret = 0; + /* read twice */ + + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + + if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) + vars->flow_ctrl = phy->req_flow_ctrl; + else if (phy->req_line_speed != SPEED_AUTO_NEG) + vars->flow_ctrl = params->req_fc_auto_adv; + else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { + ret = 1; + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) { + bnx2x_cl22_read(bp, phy, + 0x4, &ld_pause); + bnx2x_cl22_read(bp, phy, + 0x5, &lp_pause); + } else { + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_ADV_PAUSE, &ld_pause); + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_LP_AUTO_NEG, &lp_pause); + } + pause_result = (ld_pause & + MDIO_AN_REG_ADV_PAUSE_MASK) >> 8; + pause_result |= (lp_pause & + MDIO_AN_REG_ADV_PAUSE_MASK) >> 10; + DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n", + pause_result); + bnx2x_pause_resolve(vars, pause_result); + } + return ret; +} +/******************************************************************/ +/* Warpcore section */ +/******************************************************************/ +/* The init_internal_warpcore should mirror the xgxs, + * i.e. reset the lane (if needed), set aer for the + * init configuration, and set/clear SGMII flag. Internal + * phy init is done purely in phy_init stage. + */ +static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) { + u16 val16 = 0, lane, bam37 = 0; + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n"); + /* Check adding advertisement for 1G KX */ + if (((vars->line_speed == SPEED_AUTO_NEG) && + (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || + (vars->line_speed == SPEED_1000)) { + u16 sd_digital; + val16 |= (1<<5); + + /* Enable CL37 1G Parallel Detect */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &sd_digital); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, + (sd_digital | 0x1)); + + DP(NETIF_MSG_LINK, "Advertize 1G\n"); + } + if (((vars->line_speed == SPEED_AUTO_NEG) && + (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) || + (vars->line_speed == SPEED_10000)) { + /* Check adding advertisement for 10G KR */ + val16 |= (1<<7); + /* Enable 10G Parallel Detect */ + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_PAR_DET_10G_CTRL, 1); + + DP(NETIF_MSG_LINK, "Advertize 10G\n"); + } + + /* Set Transmit PMD settings */ + lane = bnx2x_get_warpcore_lane(phy, params); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, + ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | + (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | + (0x09 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET))); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL, + 0x03f0); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL, + 0x03f0); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, + 0x383f); + + /* Advertised speeds */ + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16); + + /* Enable CL37 BAM */ + if (REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_hw_config[params->port].default_cfg)) & + PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) { + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, &bam37); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, bam37 | 1); + DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n"); + } + + /* Advertise pause */ + bnx2x_ext_phy_set_pause(params, phy, vars); + + /* Enable Autoneg */ + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1000); + + /* Over 1G - AN local device user page 1 */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL3_UP1, 0x1f); + + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC7, &val16); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC7, val16 | 0x100); +} + +static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u16 val; + + /* Disable Autoneg */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7); + + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_PAR_DET_10G_CTRL, 0); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0x3f00); + + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0); + + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL3_UP1, 0x1); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC7, 0xa); + + /* Disable CL36 PCS Tx */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0); + + /* Double Wide Single Data Rate @ pll rate */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF); + + /* Leave cl72 training enable, needed for KR */ + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, + MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150, + 0x2); + + /* Leave CL72 enabled */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, + &val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, + val | 0x3800); + + /* Set speed via PMA/PMD register */ + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040); + + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, + MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0xB); + + /*Enable encoded forced speed */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x30); + + /* Turn TX scramble payload only the 64/66 scrambler */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX66_CONTROL, 0x9); + + /* Turn RX scramble payload only the 64/66 scrambler */ + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_CONTROL, 0xF9); + + /* set and clear loopback to cause a reset to 64/66 decoder */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x4000); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0); + +} + +static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy, + struct link_params *params, + u8 is_xfi) +{ + struct bnx2x *bp = params->bp; + u16 misc1_val, tap_val, tx_driver_val, lane, val; + /* Hold rxSeqStart */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val | 0x8000)); + + /* Hold tx_fifo_reset */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, (val | 0x1)); + + /* Disable CL73 AN */ + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0); + + /* Disable 100FX Enable and Auto-Detect */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_FX100_CTRL1, &val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_FX100_CTRL1, (val & 0xFFFA)); + + /* Disable 100FX Idle detect */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_FX100_CTRL3, &val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_FX100_CTRL3, (val | 0x0080)); + + /* Set Block address to Remote PHY & Clear forced_speed[5] */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC3, &val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC3, (val & 0xFF7F)); + + /* Turn off auto-detect & fiber mode */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, + (val & 0xFFEE)); + + /* Set filter_force_link, disable_false_link and parallel_detect */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, + ((val | 0x0006) & 0xFFFE)); + + /* Set XFI / SFI */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_MISC1, &misc1_val); + + misc1_val &= ~(0x1f); + + if (is_xfi) { + misc1_val |= 0x5; + tap_val = ((0x08 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | + (0x37 << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | + (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET)); + tx_driver_val = + ((0x00 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | + (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | + (0x03 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)); + + } else { + misc1_val |= 0x9; + tap_val = ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | + (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | + (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET)); + tx_driver_val = + ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | + (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | + (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)); + } + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val); + + /* Set Transmit PMD settings */ + lane = bnx2x_get_warpcore_lane(phy, params); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX_FIR_TAP, + tap_val | MDIO_WC_REG_TX_FIR_TAP_ENABLE); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, + tx_driver_val); + + /* Enable fiber mode, enable and invert sig_det */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, val | 0xd); + + /* Set Block address to Remote PHY & Set forced_speed[5], 40bit mode */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC3, &val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC3, val | 0x8080); + + /* 10G XFI Full Duplex */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x100); + + /* Release tx_fifo_reset */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, val & 0xFFFE); + + /* Release rxSeqStart */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val & 0x7FFF)); +} + +static void bnx2x_warpcore_set_20G_KR2(struct bnx2x *bp, + struct bnx2x_phy *phy) +{ + DP(NETIF_MSG_LINK, "KR2 still not supported !!!\n"); +} + +static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp, + struct bnx2x_phy *phy, + u16 lane) +{ + /* Rx0 anaRxControl1G */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX0_ANARXCONTROL1G, 0x90); + + /* Rx2 anaRxControl1G */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX2_ANARXCONTROL1G, 0x90); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW0, 0xE070); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW1, 0xC0D0); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW2, 0xA0B0); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW3, 0x8090); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW0_MASK, 0xF0F0); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW1_MASK, 0xF0F0); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW2_MASK, 0xF0F0); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW3_MASK, 0xF0F0); + + /* Serdes Digital Misc1 */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6008); + + /* Serdes Digital4 Misc3 */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC3, 0x8088); + + /* Set Transmit PMD settings */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX_FIR_TAP, + ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | + (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | + (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET) | + MDIO_WC_REG_TX_FIR_TAP_ENABLE)); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, + ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | + (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | + (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET))); +} + +static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy, + struct link_params *params, + u8 fiber_mode) +{ + struct bnx2x *bp = params->bp; + u16 val16, digctrl_kx1, digctrl_kx2; + u8 lane; + + lane = bnx2x_get_warpcore_lane(phy, params); + + /* Clear XFI clock comp in non-10G single lane mode. */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_CONTROL, &val16); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13)); + + if (phy->req_line_speed == SPEED_AUTO_NEG) { + /* SGMII Autoneg */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, + val16 | 0x1000); + DP(NETIF_MSG_LINK, "set SGMII AUTONEG\n"); + } else { + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); + val16 &= 0xcfbf; + switch (phy->req_line_speed) { + case SPEED_10: + break; + case SPEED_100: + val16 |= 0x2000; + break; + case SPEED_1000: + val16 |= 0x0040; + break; + default: + DP(NETIF_MSG_LINK, "Speed not supported: 0x%x" + "\n", phy->req_line_speed); + return; + } + + if (phy->req_duplex == DUPLEX_FULL) + val16 |= 0x0100; + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16); + + DP(NETIF_MSG_LINK, "set SGMII force speed %d\n", + phy->req_line_speed); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); + DP(NETIF_MSG_LINK, " (readback) %x\n", val16); + } + + /* SGMII Slave mode and disable signal detect */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &digctrl_kx1); + if (fiber_mode) + digctrl_kx1 = 1; + else + digctrl_kx1 &= 0xff4a; + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, + digctrl_kx1); + + /* Turn off parallel detect */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &digctrl_kx2); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, + (digctrl_kx2 & ~(1<<2))); + + /* Re-enable parallel detect */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, + (digctrl_kx2 | (1<<2))); + + /* Enable autodet */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, + (digctrl_kx1 | 0x10)); +} + +static void bnx2x_warpcore_reset_lane(struct bnx2x *bp, + struct bnx2x_phy *phy, + u8 reset) +{ + u16 val; + /* Take lane out of reset after configuration is finished */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC6, &val); + if (reset) + val |= 0xC000; + else + val &= 0x3FFF; + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC6, val); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC6, &val); +} + + + /* Clear SFI/XFI link settings registers */ +static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy, + struct link_params *params, + u16 lane) +{ + struct bnx2x *bp = params->bp; + u16 val16; + + /* Set XFI clock comp as default. */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_CONTROL, &val16); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_CONTROL, val16 | (3<<13)); + + bnx2x_warpcore_reset_lane(bp, phy, 1); + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_FX100_CTRL1, 0x014a); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_FX100_CTRL3, 0x0800); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC3, 0x8008); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x0195); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x0007); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x0002); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000); + lane = bnx2x_get_warpcore_lane(phy, params); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX_FIR_TAP, 0x0000); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 0x0990); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140); + bnx2x_warpcore_reset_lane(bp, phy, 0); +} + +static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp, + u32 chip_id, + u32 shmem_base, u8 port, + u8 *gpio_num, u8 *gpio_port) +{ + u32 cfg_pin; + *gpio_num = 0; + *gpio_port = 0; + if (CHIP_IS_E3(bp)) { + cfg_pin = (REG_RD(bp, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_sfp_ctrl)) & + PORT_HW_CFG_E3_MOD_ABS_MASK) >> + PORT_HW_CFG_E3_MOD_ABS_SHIFT; + + /* + * Should not happen. This function called upon interrupt + * triggered by GPIO ( since EPIO can only generate interrupts + * to MCP). + * So if this function was called and none of the GPIOs was set, + * it means the shit hit the fan. + */ + if ((cfg_pin < PIN_CFG_GPIO0_P0) || + (cfg_pin > PIN_CFG_GPIO3_P1)) { + DP(NETIF_MSG_LINK, "ERROR: Invalid cfg pin %x for " + "module detect indication\n", + cfg_pin); + return -EINVAL; + } + + *gpio_num = (cfg_pin - PIN_CFG_GPIO0_P0) & 0x3; + *gpio_port = (cfg_pin - PIN_CFG_GPIO0_P0) >> 2; + } else { + *gpio_num = MISC_REGISTERS_GPIO_3; + *gpio_port = port; + } + DP(NETIF_MSG_LINK, "MOD_ABS int GPIO%d_P%d\n", *gpio_num, *gpio_port); + return 0; +} + +static int bnx2x_is_sfp_module_plugged(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u8 gpio_num, gpio_port; + u32 gpio_val; + if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id, + params->shmem_base, params->port, + &gpio_num, &gpio_port) != 0) + return 0; + gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port); + + /* Call the handling function in case module is detected */ + if (gpio_val == 0) + return 1; + else + return 0; +} + +static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u32 serdes_net_if; + u8 fiber_mode; + u16 lane = bnx2x_get_warpcore_lane(phy, params); + serdes_net_if = (REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_hw_config[params->port].default_cfg)) & + PORT_HW_CFG_NET_SERDES_IF_MASK); + DP(NETIF_MSG_LINK, "Begin Warpcore init, link_speed %d, " + "serdes_net_if = 0x%x\n", + vars->line_speed, serdes_net_if); + bnx2x_set_aer_mmd(params, phy); + + vars->phy_flags |= PHY_XGXS_FLAG; + if ((serdes_net_if == PORT_HW_CFG_NET_SERDES_IF_SGMII) || + (phy->req_line_speed && + ((phy->req_line_speed == SPEED_100) || + (phy->req_line_speed == SPEED_10)))) { + vars->phy_flags |= PHY_SGMII_FLAG; + DP(NETIF_MSG_LINK, "Setting SGMII mode\n"); + bnx2x_warpcore_clear_regs(phy, params, lane); + bnx2x_warpcore_set_sgmii_speed(phy, params, 0); + } else { + switch (serdes_net_if) { + case PORT_HW_CFG_NET_SERDES_IF_KR: + /* Enable KR Auto Neg */ + if (params->loopback_mode == LOOPBACK_NONE) + bnx2x_warpcore_enable_AN_KR(phy, params, vars); + else { + DP(NETIF_MSG_LINK, "Setting KR 10G-Force\n"); + bnx2x_warpcore_set_10G_KR(phy, params, vars); + } + break; + + case PORT_HW_CFG_NET_SERDES_IF_XFI: + bnx2x_warpcore_clear_regs(phy, params, lane); + if (vars->line_speed == SPEED_10000) { + DP(NETIF_MSG_LINK, "Setting 10G XFI\n"); + bnx2x_warpcore_set_10G_XFI(phy, params, 1); + } else { + if (SINGLE_MEDIA_DIRECT(params)) { + DP(NETIF_MSG_LINK, "1G Fiber\n"); + fiber_mode = 1; + } else { + DP(NETIF_MSG_LINK, "10/100/1G SGMII\n"); + fiber_mode = 0; + } + bnx2x_warpcore_set_sgmii_speed(phy, + params, + fiber_mode); + } + + break; + + case PORT_HW_CFG_NET_SERDES_IF_SFI: + + bnx2x_warpcore_clear_regs(phy, params, lane); + if (vars->line_speed == SPEED_10000) { + DP(NETIF_MSG_LINK, "Setting 10G SFI\n"); + bnx2x_warpcore_set_10G_XFI(phy, params, 0); + } else if (vars->line_speed == SPEED_1000) { + DP(NETIF_MSG_LINK, "Setting 1G Fiber\n"); + bnx2x_warpcore_set_sgmii_speed(phy, params, 1); + } + /* Issue Module detection */ + if (bnx2x_is_sfp_module_plugged(phy, params)) + bnx2x_sfp_module_detection(phy, params); + break; + + case PORT_HW_CFG_NET_SERDES_IF_DXGXS: + if (vars->line_speed != SPEED_20000) { + DP(NETIF_MSG_LINK, "Speed not supported yet\n"); + return; + } + DP(NETIF_MSG_LINK, "Setting 20G DXGXS\n"); + bnx2x_warpcore_set_20G_DXGXS(bp, phy, lane); + /* Issue Module detection */ + + bnx2x_sfp_module_detection(phy, params); + break; + + case PORT_HW_CFG_NET_SERDES_IF_KR2: + if (vars->line_speed != SPEED_20000) { + DP(NETIF_MSG_LINK, "Speed not supported yet\n"); + return; + } + DP(NETIF_MSG_LINK, "Setting 20G KR2\n"); + bnx2x_warpcore_set_20G_KR2(bp, phy); + break; + + default: + DP(NETIF_MSG_LINK, "Unsupported Serdes Net Interface " + "0x%x\n", serdes_net_if); + return; + } + } + + /* Take lane out of reset after configuration is finished */ + bnx2x_warpcore_reset_lane(bp, phy, 0); + DP(NETIF_MSG_LINK, "Exit config init\n"); +} + +static void bnx2x_sfp_e3_set_transmitter(struct link_params *params, + struct bnx2x_phy *phy, + u8 tx_en) +{ + struct bnx2x *bp = params->bp; + u32 cfg_pin; + u8 port = params->port; + + cfg_pin = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_sfp_ctrl)) & + PORT_HW_CFG_TX_LASER_MASK; + /* Set the !tx_en since this pin is DISABLE_TX_LASER */ + DP(NETIF_MSG_LINK, "Setting WC TX to %d\n", tx_en); + /* For 20G, the expected pin to be used is 3 pins after the current */ + + bnx2x_set_cfg_pin(bp, cfg_pin, tx_en ^ 1); + if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G) + bnx2x_set_cfg_pin(bp, cfg_pin + 3, tx_en ^ 1); +} + +static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u16 val16; + bnx2x_sfp_e3_set_transmitter(params, phy, 0); + bnx2x_set_mdio_clk(bp, params->chip_id, params->port); + bnx2x_set_aer_mmd(params, phy); + /* Global register */ + bnx2x_warpcore_reset_lane(bp, phy, 1); + + /* Clear loopback settings (if any) */ + /* 10G & 20G */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 & + 0xBFFF); + + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 & 0xfffe); + + /* Update those 1-copy registers */ + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + /* Enable 1G MDIO (1-copy) */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, + &val16); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, + val16 & ~0x10); + + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL2, + val16 & 0xff00); + +} + +static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u16 val16; + u32 lane; + DP(NETIF_MSG_LINK, "Setting Warpcore loopback type %x, speed %d\n", + params->loopback_mode, phy->req_line_speed); + + if (phy->req_line_speed < SPEED_10000) { + /* 10/100/1000 */ + + /* Update those 1-copy registers */ + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + /* Enable 1G MDIO (1-copy) */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, + &val16); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, + val16 | 0x10); + /* Set 1G loopback based on lane (1-copy) */ + lane = bnx2x_get_warpcore_lane(phy, params); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL2, + val16 | (1<bp; + u8 link_10g_plus; + u8 port = params->port; + u32 sync_offset, media_types; + /* Update PHY configuration */ + set_phy_vars(params, vars); + + vars->link_status = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + port_mb[port].link_status)); + + vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP); + vars->phy_flags = PHY_XGXS_FLAG; + if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG) + vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG; + + if (vars->link_up) { + DP(NETIF_MSG_LINK, "phy link up\n"); + + vars->phy_link_up = 1; + vars->duplex = DUPLEX_FULL; + switch (vars->link_status & + LINK_STATUS_SPEED_AND_DUPLEX_MASK) { + case LINK_10THD: + vars->duplex = DUPLEX_HALF; + /* fall thru */ + case LINK_10TFD: + vars->line_speed = SPEED_10; + break; + + case LINK_100TXHD: + vars->duplex = DUPLEX_HALF; + /* fall thru */ + case LINK_100T4: + case LINK_100TXFD: + vars->line_speed = SPEED_100; + break; + + case LINK_1000THD: + vars->duplex = DUPLEX_HALF; + /* fall thru */ + case LINK_1000TFD: + vars->line_speed = SPEED_1000; + break; + + case LINK_2500THD: + vars->duplex = DUPLEX_HALF; + /* fall thru */ + case LINK_2500TFD: + vars->line_speed = SPEED_2500; + break; + + case LINK_10GTFD: + vars->line_speed = SPEED_10000; + break; + case LINK_20GTFD: + vars->line_speed = SPEED_20000; + break; + default: + break; + } + vars->flow_ctrl = 0; + if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED) + vars->flow_ctrl |= BNX2X_FLOW_CTRL_TX; + + if (vars->link_status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED) + vars->flow_ctrl |= BNX2X_FLOW_CTRL_RX; + + if (!vars->flow_ctrl) + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + + if (vars->line_speed && + ((vars->line_speed == SPEED_10) || + (vars->line_speed == SPEED_100))) { + vars->phy_flags |= PHY_SGMII_FLAG; + } else { + vars->phy_flags &= ~PHY_SGMII_FLAG; + } + if (vars->line_speed && + USES_WARPCORE(bp) && + (vars->line_speed == SPEED_1000)) + vars->phy_flags |= PHY_SGMII_FLAG; + /* anything 10 and over uses the bmac */ + link_10g_plus = (vars->line_speed >= SPEED_10000); + + if (link_10g_plus) { + if (USES_WARPCORE(bp)) + vars->mac_type = MAC_TYPE_XMAC; + else + vars->mac_type = MAC_TYPE_BMAC; + } else { + if (USES_WARPCORE(bp)) + vars->mac_type = MAC_TYPE_UMAC; + else + vars->mac_type = MAC_TYPE_EMAC; + } + } else { /* link down */ + DP(NETIF_MSG_LINK, "phy link down\n"); + + vars->phy_link_up = 0; + + vars->line_speed = 0; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + + /* indicate no mac active */ + vars->mac_type = MAC_TYPE_NONE; + if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG) + vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; + } + + /* Sync media type */ + sync_offset = params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].media_type); + media_types = REG_RD(bp, sync_offset); + + params->phy[INT_PHY].media_type = + (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) >> + PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT; + params->phy[EXT_PHY1].media_type = + (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK) >> + PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT; + params->phy[EXT_PHY2].media_type = + (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK) >> + PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT; + DP(NETIF_MSG_LINK, "media_types = 0x%x\n", media_types); + + /* Sync AEU offset */ + sync_offset = params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].aeu_int_mask); + + vars->aeu_int_mask = REG_RD(bp, sync_offset); + + /* Sync PFC status */ + if (vars->link_status & LINK_STATUS_PFC_ENABLED) + params->feature_config_flags |= + FEATURE_CONFIG_PFC_ENABLED; + else + params->feature_config_flags &= + ~FEATURE_CONFIG_PFC_ENABLED; + + DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x int_mask 0x%x\n", + vars->link_status, vars->phy_link_up, vars->aeu_int_mask); + DP(NETIF_MSG_LINK, "line_speed %x duplex %x flow_ctrl 0x%x\n", + vars->line_speed, vars->duplex, vars->flow_ctrl); +} + + +static void bnx2x_set_master_ln(struct link_params *params, + struct bnx2x_phy *phy) +{ + struct bnx2x *bp = params->bp; + u16 new_master_ln, ser_lane; + ser_lane = ((params->lane_config & + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); + + /* set the master_ln for AN */ + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_TEST_MODE_LANE, + &new_master_ln); + + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_XGXS_BLOCK2 , + MDIO_XGXS_BLOCK2_TEST_MODE_LANE, + (new_master_ln | ser_lane)); +} + +static int bnx2x_reset_unicore(struct link_params *params, + struct bnx2x_phy *phy, + u8 set_serdes) +{ + struct bnx2x *bp = params->bp; + u16 mii_control; + u16 i; + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control); + + /* reset the unicore */ + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + (mii_control | + MDIO_COMBO_IEEO_MII_CONTROL_RESET)); + if (set_serdes) + bnx2x_set_serdes_access(bp, params->port); + + /* wait for the reset to self clear */ + for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) { + udelay(5); + + /* the reset erased the previous bank value */ + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + &mii_control); + + if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) { + udelay(5); + return 0; + } + } + + netdev_err(bp->dev, "Warning: PHY was not initialized," + " Port %d\n", + params->port); + DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n"); + return -EINVAL; + +} + +static void bnx2x_set_swap_lanes(struct link_params *params, + struct bnx2x_phy *phy) +{ + struct bnx2x *bp = params->bp; + /* + * Each two bits represents a lane number: + * No swap is 0123 => 0x1b no need to enable the swap + */ + u16 ser_lane, rx_lane_swap, tx_lane_swap; + + ser_lane = ((params->lane_config & + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); + rx_lane_swap = ((params->lane_config & + PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT); + tx_lane_swap = ((params->lane_config & + PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT); + + if (rx_lane_swap != 0x1b) { + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_RX_LN_SWAP, + (rx_lane_swap | + MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE | + MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE)); + } else { + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0); + } + + if (tx_lane_swap != 0x1b) { + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_TX_LN_SWAP, + (tx_lane_swap | + MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE)); + } else { + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0); + } +} + +static void bnx2x_set_parallel_detection(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u16 control2; + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, + &control2); + if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) + control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; + else + control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; + DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n", + phy->speed_cap_mask, control2); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, + control2); + + if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) && + (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { + DP(NETIF_MSG_LINK, "XGXS\n"); + + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_10G_PARALLEL_DETECT, + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK, + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT); + + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_10G_PARALLEL_DETECT, + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, + &control2); + + + control2 |= + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN; + + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_10G_PARALLEL_DETECT, + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, + control2); + + /* Disable parallel detection of HiG */ + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_UNICORE_MODE_10G, + MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS | + MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS); + } +} + +static void bnx2x_set_autoneg(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars, + u8 enable_cl73) +{ + struct bnx2x *bp = params->bp; + u16 reg_val; + + /* CL37 Autoneg */ + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, ®_val); + + /* CL37 Autoneg Enabled */ + if (vars->line_speed == SPEED_AUTO_NEG) + reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN; + else /* CL37 Autoneg Disabled */ + reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | + MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN); + + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); + + /* Enable/Disable Autodetection */ + + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, ®_val); + reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN | + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT); + reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE; + if (vars->line_speed == SPEED_AUTO_NEG) + reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; + else + reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; + + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val); + + /* Enable TetonII and BAM autoneg */ + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_BAM_NEXT_PAGE, + MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, + ®_val); + if (vars->line_speed == SPEED_AUTO_NEG) { + /* Enable BAM aneg Mode and TetonII aneg Mode */ + reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE | + MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN); + } else { + /* TetonII and BAM Autoneg Disabled */ + reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE | + MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN); + } + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_BAM_NEXT_PAGE, + MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, + reg_val); + + if (enable_cl73) { + /* Enable Cl73 FSM status bits */ + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_USERB0, + MDIO_CL73_USERB0_CL73_UCTRL, + 0xe); + + /* Enable BAM Station Manager*/ + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_USERB0, + MDIO_CL73_USERB0_CL73_BAM_CTRL1, + MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN | + MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN | + MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN); + + /* Advertise CL73 link speeds */ + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_ADV2, + ®_val); + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) + reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4; + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) + reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX; + + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_ADV2, + reg_val); + + /* CL73 Autoneg Enabled */ + reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN; + + } else /* CL73 Autoneg Disabled */ + reg_val = 0; + + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB0, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val); +} + +/* program SerDes, forced speed */ +static void bnx2x_program_serdes(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u16 reg_val; + + /* program duplex, disable autoneg and sgmii*/ + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, ®_val); + reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX | + MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | + MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK); + if (phy->req_duplex == DUPLEX_FULL) + reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); + + /* + * program speed + * - needed only if the speed is greater than 1G (2.5G or 10G) + */ + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_MISC1, ®_val); + /* clearing the speed value before setting the right speed */ + DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val); + + reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK | + MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL); + + if (!((vars->line_speed == SPEED_1000) || + (vars->line_speed == SPEED_100) || + (vars->line_speed == SPEED_10))) { + + reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M | + MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL); + if (vars->line_speed == SPEED_10000) + reg_val |= + MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4; + } + + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_MISC1, reg_val); + +} + +static void bnx2x_set_brcm_cl37_advertisement(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u16 val = 0; + + /* configure the 48 bits for BAM AN */ + + /* set extended capabilities */ + if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) + val |= MDIO_OVER_1G_UP1_2_5G; + if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) + val |= MDIO_OVER_1G_UP1_10G; + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_OVER_1G, + MDIO_OVER_1G_UP1, val); + + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_OVER_1G, + MDIO_OVER_1G_UP3, 0x400); +} + +static void bnx2x_set_ieee_aneg_advertisement(struct bnx2x_phy *phy, + struct link_params *params, + u16 ieee_fc) +{ + struct bnx2x *bp = params->bp; + u16 val; + /* for AN, we are always publishing full duplex */ + + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_ADV1, &val); + val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH; + val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_ADV1, val); +} + +static void bnx2x_restart_autoneg(struct bnx2x_phy *phy, + struct link_params *params, + u8 enable_cl73) +{ + struct bnx2x *bp = params->bp; + u16 mii_control; + + DP(NETIF_MSG_LINK, "bnx2x_restart_autoneg\n"); + /* Enable and restart BAM/CL37 aneg */ + + if (enable_cl73) { + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB0, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL, + &mii_control); + + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB0, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL, + (mii_control | + MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN | + MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN)); + } else { + + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + &mii_control); + DP(NETIF_MSG_LINK, + "bnx2x_restart_autoneg mii_control before = 0x%x\n", + mii_control); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + (mii_control | + MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | + MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN)); + } +} + +static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u16 control1; + + /* in SGMII mode, the unicore is always slave */ + + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, + &control1); + control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT; + /* set sgmii mode (and not fiber) */ + control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE | + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET | + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, + control1); + + /* if forced speed */ + if (!(vars->line_speed == SPEED_AUTO_NEG)) { + /* set speed, disable autoneg */ + u16 mii_control; + + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + &mii_control); + mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | + MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK| + MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX); + + switch (vars->line_speed) { + case SPEED_100: + mii_control |= + MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100; + break; + case SPEED_1000: + mii_control |= + MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000; + break; + case SPEED_10: + /* there is nothing to set for 10M */ + break; + default: + /* invalid speed for SGMII */ + DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n", + vars->line_speed); + break; + } + + /* setting the full duplex */ + if (phy->req_duplex == DUPLEX_FULL) + mii_control |= + MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + mii_control); + + } else { /* AN mode */ + /* enable and restart AN */ + bnx2x_restart_autoneg(phy, params, 0); + } +} + + +/* + * link management + */ + +static int bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u16 pd_10g, status2_1000x; + if (phy->req_line_speed != SPEED_AUTO_NEG) + return 0; + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_STATUS2, + &status2_1000x); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_STATUS2, + &status2_1000x); + if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) { + DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n", + params->port); + return 1; + } + + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_10G_PARALLEL_DETECT, + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS, + &pd_10g); + + if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) { + DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n", + params->port); + return 1; + } + return 0; +} + +static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars, + u32 gp_status) +{ + struct bnx2x *bp = params->bp; + u16 ld_pause; /* local driver */ + u16 lp_pause; /* link partner */ + u16 pause_result; + + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + + /* resolve from gp_status in case of AN complete and not sgmii */ + if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) + vars->flow_ctrl = phy->req_flow_ctrl; + else if (phy->req_line_speed != SPEED_AUTO_NEG) + vars->flow_ctrl = params->req_fc_auto_adv; + else if ((gp_status & MDIO_AN_CL73_OR_37_COMPLETE) && + (!(vars->phy_flags & PHY_SGMII_FLAG))) { + if (bnx2x_direct_parallel_detect_used(phy, params)) { + vars->flow_ctrl = params->req_fc_auto_adv; + return; + } + if ((gp_status & + (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | + MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) == + (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | + MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) { + + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_ADV1, + &ld_pause); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_LP_ADV1, + &lp_pause); + pause_result = (ld_pause & + MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK) + >> 8; + pause_result |= (lp_pause & + MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK) + >> 10; + DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n", + pause_result); + } else { + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_AUTO_NEG_ADV, + &ld_pause); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1, + &lp_pause); + pause_result = (ld_pause & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5; + pause_result |= (lp_pause & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7; + DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n", + pause_result); + } + bnx2x_pause_resolve(vars, pause_result); + } + DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl); +} + +static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u16 rx_status, ustat_val, cl37_fsm_received; + DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n"); + /* Step 1: Make sure signal is detected */ + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_RX0, + MDIO_RX0_RX_STATUS, + &rx_status); + if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) != + (MDIO_RX0_RX_STATUS_SIGDET)) { + DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73." + "rx_status(0x80b0) = 0x%x\n", rx_status); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB0, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN); + return; + } + /* Step 2: Check CL73 state machine */ + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_USERB0, + MDIO_CL73_USERB0_CL73_USTAT1, + &ustat_val); + if ((ustat_val & + (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK | + MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) != + (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK | + MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) { + DP(NETIF_MSG_LINK, "CL73 state-machine is not stable. " + "ustat_val(0x8371) = 0x%x\n", ustat_val); + return; + } + /* + * Step 3: Check CL37 Message Pages received to indicate LP + * supports only CL37 + */ + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_REMOTE_PHY, + MDIO_REMOTE_PHY_MISC_RX_STATUS, + &cl37_fsm_received); + if ((cl37_fsm_received & + (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG | + MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) != + (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG | + MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) { + DP(NETIF_MSG_LINK, "No CL37 FSM were received. " + "misc_rx_status(0x8330) = 0x%x\n", + cl37_fsm_received); + return; + } + /* + * The combined cl37/cl73 fsm state information indicating that + * we are connected to a device which does not support cl73, but + * does support cl37 BAM. In this case we disable cl73 and + * restart cl37 auto-neg + */ + + /* Disable CL73 */ + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB0, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL, + 0); + /* Restart CL37 autoneg */ + bnx2x_restart_autoneg(phy, params, 0); + DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n"); +} + +static void bnx2x_xgxs_an_resolve(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars, + u32 gp_status) +{ + if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) + vars->link_status |= + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; + + if (bnx2x_direct_parallel_detect_used(phy, params)) + vars->link_status |= + LINK_STATUS_PARALLEL_DETECTION_USED; +} +static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars, + u16 is_link_up, + u16 speed_mask, + u16 is_duplex) +{ + struct bnx2x *bp = params->bp; + if (phy->req_line_speed == SPEED_AUTO_NEG) + vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED; + if (is_link_up) { + DP(NETIF_MSG_LINK, "phy link up\n"); + + vars->phy_link_up = 1; + vars->link_status |= LINK_STATUS_LINK_UP; + + switch (speed_mask) { + case GP_STATUS_10M: + vars->line_speed = SPEED_10; + if (vars->duplex == DUPLEX_FULL) + vars->link_status |= LINK_10TFD; + else + vars->link_status |= LINK_10THD; + break; + + case GP_STATUS_100M: + vars->line_speed = SPEED_100; + if (vars->duplex == DUPLEX_FULL) + vars->link_status |= LINK_100TXFD; + else + vars->link_status |= LINK_100TXHD; + break; + + case GP_STATUS_1G: + case GP_STATUS_1G_KX: + vars->line_speed = SPEED_1000; + if (vars->duplex == DUPLEX_FULL) + vars->link_status |= LINK_1000TFD; + else + vars->link_status |= LINK_1000THD; + break; + + case GP_STATUS_2_5G: + vars->line_speed = SPEED_2500; + if (vars->duplex == DUPLEX_FULL) + vars->link_status |= LINK_2500TFD; + else + vars->link_status |= LINK_2500THD; + break; + + case GP_STATUS_5G: + case GP_STATUS_6G: + DP(NETIF_MSG_LINK, + "link speed unsupported gp_status 0x%x\n", + speed_mask); + return -EINVAL; + + case GP_STATUS_10G_KX4: + case GP_STATUS_10G_HIG: + case GP_STATUS_10G_CX4: + case GP_STATUS_10G_KR: + case GP_STATUS_10G_SFI: + case GP_STATUS_10G_XFI: + vars->line_speed = SPEED_10000; + vars->link_status |= LINK_10GTFD; + break; + case GP_STATUS_20G_DXGXS: + vars->line_speed = SPEED_20000; + vars->link_status |= LINK_20GTFD; + break; + default: + DP(NETIF_MSG_LINK, + "link speed unsupported gp_status 0x%x\n", + speed_mask); + return -EINVAL; + } + } else { /* link_down */ + DP(NETIF_MSG_LINK, "phy link down\n"); + + vars->phy_link_up = 0; + + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + vars->mac_type = MAC_TYPE_NONE; + } + DP(NETIF_MSG_LINK, " phy_link_up %x line_speed %d\n", + vars->phy_link_up, vars->line_speed); + return 0; +} + +static int bnx2x_link_settings_status(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + + struct bnx2x *bp = params->bp; + + u16 gp_status, duplex = DUPLEX_HALF, link_up = 0, speed_mask; + int rc = 0; + + /* Read gp_status */ + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_GP_STATUS, + MDIO_GP_STATUS_TOP_AN_STATUS1, + &gp_status); + if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS) + duplex = DUPLEX_FULL; + if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) + link_up = 1; + speed_mask = gp_status & GP_STATUS_SPEED_MASK; + DP(NETIF_MSG_LINK, "gp_status 0x%x, is_link_up %d, speed_mask 0x%x\n", + gp_status, link_up, speed_mask); + rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, speed_mask, + duplex); + if (rc == -EINVAL) + return rc; + + if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) { + if (SINGLE_MEDIA_DIRECT(params)) { + bnx2x_flow_ctrl_resolve(phy, params, vars, gp_status); + if (phy->req_line_speed == SPEED_AUTO_NEG) + bnx2x_xgxs_an_resolve(phy, params, vars, + gp_status); + } + } else { /* link_down */ + if ((phy->req_line_speed == SPEED_AUTO_NEG) && + SINGLE_MEDIA_DIRECT(params)) { + /* Check signal is detected */ + bnx2x_check_fallback_to_cl37(phy, params); + } + } + + DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n", + vars->duplex, vars->flow_ctrl, vars->link_status); + return rc; +} + +static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + + struct bnx2x *bp = params->bp; + + u8 lane; + u16 gp_status1, gp_speed, link_up, duplex = DUPLEX_FULL; + int rc = 0; + lane = bnx2x_get_warpcore_lane(phy, params); + /* Read gp_status */ + if (phy->req_line_speed > SPEED_10000) { + u16 temp_link_up; + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + 1, &temp_link_up); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + 1, &link_up); + DP(NETIF_MSG_LINK, "PCS RX link status = 0x%x-->0x%x\n", + temp_link_up, link_up); + link_up &= (1<<2); + if (link_up) + bnx2x_ext_phy_resolve_fc(phy, params, vars); + } else { + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_GP2_STATUS_GP_2_1, &gp_status1); + DP(NETIF_MSG_LINK, "0x81d1 = 0x%x\n", gp_status1); + /* Check for either KR or generic link up. */ + gp_status1 = ((gp_status1 >> 8) & 0xf) | + ((gp_status1 >> 12) & 0xf); + link_up = gp_status1 & (1 << lane); + if (link_up && SINGLE_MEDIA_DIRECT(params)) { + u16 pd, gp_status4; + if (phy->req_line_speed == SPEED_AUTO_NEG) { + /* Check Autoneg complete */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_GP2_STATUS_GP_2_4, + &gp_status4); + if (gp_status4 & ((1<<12)<link_status |= + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; + + /* Check parallel detect used */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_PAR_DET_10G_STATUS, + &pd); + if (pd & (1<<15)) + vars->link_status |= + LINK_STATUS_PARALLEL_DETECTION_USED; + } + bnx2x_ext_phy_resolve_fc(phy, params, vars); + } + } + + if (lane < 2) { + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_GP2_STATUS_GP_2_2, &gp_speed); + } else { + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_GP2_STATUS_GP_2_3, &gp_speed); + } + DP(NETIF_MSG_LINK, "lane %d gp_speed 0x%x\n", lane, gp_speed); + + if ((lane & 1) == 0) + gp_speed <<= 8; + gp_speed &= 0x3f00; + + + rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed, + duplex); + + DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n", + vars->duplex, vars->flow_ctrl, vars->link_status); + return rc; +} +static void bnx2x_set_gmii_tx_driver(struct link_params *params) +{ + struct bnx2x *bp = params->bp; + struct bnx2x_phy *phy = ¶ms->phy[INT_PHY]; + u16 lp_up2; + u16 tx_driver; + u16 bank; + + /* read precomp */ + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_OVER_1G, + MDIO_OVER_1G_LP_UP2, &lp_up2); + + /* bits [10:7] at lp_up2, positioned at [15:12] */ + lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >> + MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) << + MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT); + + if (lp_up2 == 0) + return; + + for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3; + bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) { + CL22_RD_OVER_CL45(bp, phy, + bank, + MDIO_TX0_TX_DRIVER, &tx_driver); + + /* replace tx_driver bits [15:12] */ + if (lp_up2 != + (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) { + tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK; + tx_driver |= lp_up2; + CL22_WR_OVER_CL45(bp, phy, + bank, + MDIO_TX0_TX_DRIVER, tx_driver); + } + } +} + +static int bnx2x_emac_program(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u8 port = params->port; + u16 mode = 0; + + DP(NETIF_MSG_LINK, "setting link speed & duplex\n"); + bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 + + EMAC_REG_EMAC_MODE, + (EMAC_MODE_25G_MODE | + EMAC_MODE_PORT_MII_10M | + EMAC_MODE_HALF_DUPLEX)); + switch (vars->line_speed) { + case SPEED_10: + mode |= EMAC_MODE_PORT_MII_10M; + break; + + case SPEED_100: + mode |= EMAC_MODE_PORT_MII; + break; + + case SPEED_1000: + mode |= EMAC_MODE_PORT_GMII; + break; + + case SPEED_2500: + mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII); + break; + + default: + /* 10G not valid for EMAC */ + DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n", + vars->line_speed); + return -EINVAL; + } + + if (vars->duplex == DUPLEX_HALF) + mode |= EMAC_MODE_HALF_DUPLEX; + bnx2x_bits_en(bp, + GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE, + mode); + + bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed); + return 0; +} + +static void bnx2x_set_preemphasis(struct bnx2x_phy *phy, + struct link_params *params) +{ + + u16 bank, i = 0; + struct bnx2x *bp = params->bp; + + for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3; + bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) { + CL22_WR_OVER_CL45(bp, phy, + bank, + MDIO_RX0_RX_EQ_BOOST, + phy->rx_preemphasis[i]); + } + + for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3; + bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) { + CL22_WR_OVER_CL45(bp, phy, + bank, + MDIO_TX0_TX_DRIVER, + phy->tx_preemphasis[i]); + } +} + +static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u8 enable_cl73 = (SINGLE_MEDIA_DIRECT(params) || + (params->loopback_mode == LOOPBACK_XGXS)); + if (!(vars->phy_flags & PHY_SGMII_FLAG)) { + if (SINGLE_MEDIA_DIRECT(params) && + (params->feature_config_flags & + FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) + bnx2x_set_preemphasis(phy, params); + + /* forced speed requested? */ + if (vars->line_speed != SPEED_AUTO_NEG || + (SINGLE_MEDIA_DIRECT(params) && + params->loopback_mode == LOOPBACK_EXT)) { + DP(NETIF_MSG_LINK, "not SGMII, no AN\n"); + + /* disable autoneg */ + bnx2x_set_autoneg(phy, params, vars, 0); + + /* program speed and duplex */ + bnx2x_program_serdes(phy, params, vars); + + } else { /* AN_mode */ + DP(NETIF_MSG_LINK, "not SGMII, AN\n"); + + /* AN enabled */ + bnx2x_set_brcm_cl37_advertisement(phy, params); + + /* program duplex & pause advertisement (for aneg) */ + bnx2x_set_ieee_aneg_advertisement(phy, params, + vars->ieee_fc); + + /* enable autoneg */ + bnx2x_set_autoneg(phy, params, vars, enable_cl73); + + /* enable and restart AN */ + bnx2x_restart_autoneg(phy, params, enable_cl73); + } + + } else { /* SGMII mode */ + DP(NETIF_MSG_LINK, "SGMII\n"); + + bnx2x_initialize_sgmii_process(phy, params, vars); + } +} + +static int bnx2x_prepare_xgxs(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + int rc; + vars->phy_flags |= PHY_XGXS_FLAG; + if ((phy->req_line_speed && + ((phy->req_line_speed == SPEED_100) || + (phy->req_line_speed == SPEED_10))) || + (!phy->req_line_speed && + (phy->speed_cap_mask >= + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) && + (phy->speed_cap_mask < + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || + (phy->type == PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD)) + vars->phy_flags |= PHY_SGMII_FLAG; + else + vars->phy_flags &= ~PHY_SGMII_FLAG; + + bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); + bnx2x_set_aer_mmd(params, phy); + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) + bnx2x_set_master_ln(params, phy); + + rc = bnx2x_reset_unicore(params, phy, 0); + /* reset the SerDes and wait for reset bit return low */ + if (rc != 0) + return rc; + + bnx2x_set_aer_mmd(params, phy); + /* setting the masterLn_def again after the reset */ + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) { + bnx2x_set_master_ln(params, phy); + bnx2x_set_swap_lanes(params, phy); + } + + return rc; +} + +static u16 bnx2x_wait_reset_complete(struct bnx2x *bp, + struct bnx2x_phy *phy, + struct link_params *params) +{ + u16 cnt, ctrl; + /* Wait for soft reset to get cleared up to 1 sec */ + for (cnt = 0; cnt < 1000; cnt++) { + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) + bnx2x_cl22_read(bp, phy, + MDIO_PMA_REG_CTRL, &ctrl); + else + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_CTRL, &ctrl); + if (!(ctrl & (1<<15))) + break; + msleep(1); + } + + if (cnt == 1000) + netdev_err(bp->dev, "Warning: PHY was not initialized," + " Port %d\n", + params->port); + DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt); + return cnt; +} + +static void bnx2x_link_int_enable(struct link_params *params) +{ + u8 port = params->port; + u32 mask; + struct bnx2x *bp = params->bp; + + /* Setting the status to report on link up for either XGXS or SerDes */ + if (CHIP_IS_E3(bp)) { + mask = NIG_MASK_XGXS0_LINK_STATUS; + if (!(SINGLE_MEDIA_DIRECT(params))) + mask |= NIG_MASK_MI_INT; + } else if (params->switch_cfg == SWITCH_CFG_10G) { + mask = (NIG_MASK_XGXS0_LINK10G | + NIG_MASK_XGXS0_LINK_STATUS); + DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n"); + if (!(SINGLE_MEDIA_DIRECT(params)) && + params->phy[INT_PHY].type != + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) { + mask |= NIG_MASK_MI_INT; + DP(NETIF_MSG_LINK, "enabled external phy int\n"); + } + + } else { /* SerDes */ + mask = NIG_MASK_SERDES0_LINK_STATUS; + DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n"); + if (!(SINGLE_MEDIA_DIRECT(params)) && + params->phy[INT_PHY].type != + PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN) { + mask |= NIG_MASK_MI_INT; + DP(NETIF_MSG_LINK, "enabled external phy int\n"); + } + } + bnx2x_bits_en(bp, + NIG_REG_MASK_INTERRUPT_PORT0 + port*4, + mask); + + DP(NETIF_MSG_LINK, "port %x, is_xgxs %x, int_status 0x%x\n", port, + (params->switch_cfg == SWITCH_CFG_10G), + REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4)); + DP(NETIF_MSG_LINK, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x\n", + REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), + REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18), + REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS+port*0x3c)); + DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n", + REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), + REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)); +} + +static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port, + u8 exp_mi_int) +{ + u32 latch_status = 0; + + /* + * Disable the MI INT ( external phy int ) by writing 1 to the + * status register. Link down indication is high-active-signal, + * so in this case we need to write the status to clear the XOR + */ + /* Read Latched signals */ + latch_status = REG_RD(bp, + NIG_REG_LATCH_STATUS_0 + port*8); + DP(NETIF_MSG_LINK, "latch_status = 0x%x\n", latch_status); + /* Handle only those with latched-signal=up.*/ + if (exp_mi_int) + bnx2x_bits_en(bp, + NIG_REG_STATUS_INTERRUPT_PORT0 + + port*4, + NIG_STATUS_EMAC0_MI_INT); + else + bnx2x_bits_dis(bp, + NIG_REG_STATUS_INTERRUPT_PORT0 + + port*4, + NIG_STATUS_EMAC0_MI_INT); + + if (latch_status & 1) { + + /* For all latched-signal=up : Re-Arm Latch signals */ + REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8, + (latch_status & 0xfffe) | (latch_status & 1)); + } + /* For all latched-signal=up,Write original_signal to status */ +} + +static void bnx2x_link_int_ack(struct link_params *params, + struct link_vars *vars, u8 is_10g_plus) +{ + struct bnx2x *bp = params->bp; + u8 port = params->port; + u32 mask; + /* + * First reset all status we assume only one line will be + * change at a time + */ + bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, + (NIG_STATUS_XGXS0_LINK10G | + NIG_STATUS_XGXS0_LINK_STATUS | + NIG_STATUS_SERDES0_LINK_STATUS)); + if (vars->phy_link_up) { + if (USES_WARPCORE(bp)) + mask = NIG_STATUS_XGXS0_LINK_STATUS; + else { + if (is_10g_plus) + mask = NIG_STATUS_XGXS0_LINK10G; + else if (params->switch_cfg == SWITCH_CFG_10G) { + /* + * Disable the link interrupt by writing 1 to + * the relevant lane in the status register + */ + u32 ser_lane = + ((params->lane_config & + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); + mask = ((1 << ser_lane) << + NIG_STATUS_XGXS0_LINK_STATUS_SIZE); + } else + mask = NIG_STATUS_SERDES0_LINK_STATUS; + } + DP(NETIF_MSG_LINK, "Ack link up interrupt with mask 0x%x\n", + mask); + bnx2x_bits_en(bp, + NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, + mask); + } +} + +static int bnx2x_format_ver(u32 num, u8 *str, u16 *len) +{ + u8 *str_ptr = str; + u32 mask = 0xf0000000; + u8 shift = 8*4; + u8 digit; + u8 remove_leading_zeros = 1; + if (*len < 10) { + /* Need more than 10chars for this format */ + *str_ptr = '\0'; + (*len)--; + return -EINVAL; + } + while (shift > 0) { + + shift -= 4; + digit = ((num & mask) >> shift); + if (digit == 0 && remove_leading_zeros) { + mask = mask >> 4; + continue; + } else if (digit < 0xa) + *str_ptr = digit + '0'; + else + *str_ptr = digit - 0xa + 'a'; + remove_leading_zeros = 0; + str_ptr++; + (*len)--; + mask = mask >> 4; + if (shift == 4*4) { + *str_ptr = '.'; + str_ptr++; + (*len)--; + remove_leading_zeros = 1; + } + } + return 0; +} + + +static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len) +{ + str[0] = '\0'; + (*len)--; + return 0; +} + +int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, + u8 *version, u16 len) +{ + struct bnx2x *bp; + u32 spirom_ver = 0; + int status = 0; + u8 *ver_p = version; + u16 remain_len = len; + if (version == NULL || params == NULL) + return -EINVAL; + bp = params->bp; + + /* Extract first external phy*/ + version[0] = '\0'; + spirom_ver = REG_RD(bp, params->phy[EXT_PHY1].ver_addr); + + if (params->phy[EXT_PHY1].format_fw_ver) { + status |= params->phy[EXT_PHY1].format_fw_ver(spirom_ver, + ver_p, + &remain_len); + ver_p += (len - remain_len); + } + if ((params->num_phys == MAX_PHYS) && + (params->phy[EXT_PHY2].ver_addr != 0)) { + spirom_ver = REG_RD(bp, params->phy[EXT_PHY2].ver_addr); + if (params->phy[EXT_PHY2].format_fw_ver) { + *ver_p = '/'; + ver_p++; + remain_len--; + status |= params->phy[EXT_PHY2].format_fw_ver( + spirom_ver, + ver_p, + &remain_len); + ver_p = version + (len - remain_len); + } + } + *ver_p = '\0'; + return status; +} + +static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy, + struct link_params *params) +{ + u8 port = params->port; + struct bnx2x *bp = params->bp; + + if (phy->req_line_speed != SPEED_1000) { + u32 md_devad = 0; + + DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n"); + + if (!CHIP_IS_E3(bp)) { + /* change the uni_phy_addr in the nig */ + md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + + port*0x18)); + + REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, + 0x5); + } + + bnx2x_cl45_write(bp, phy, + 5, + (MDIO_REG_BANK_AER_BLOCK + + (MDIO_AER_BLOCK_AER_REG & 0xf)), + 0x2800); + + bnx2x_cl45_write(bp, phy, + 5, + (MDIO_REG_BANK_CL73_IEEEB0 + + (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)), + 0x6041); + msleep(200); + /* set aer mmd back */ + bnx2x_set_aer_mmd(params, phy); + + if (!CHIP_IS_E3(bp)) { + /* and md_devad */ + REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, + md_devad); + } + } else { + u16 mii_ctrl; + DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n"); + bnx2x_cl45_read(bp, phy, 5, + (MDIO_REG_BANK_COMBO_IEEE0 + + (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)), + &mii_ctrl); + bnx2x_cl45_write(bp, phy, 5, + (MDIO_REG_BANK_COMBO_IEEE0 + + (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)), + mii_ctrl | + MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK); + } +} + +int bnx2x_set_led(struct link_params *params, + struct link_vars *vars, u8 mode, u32 speed) +{ + u8 port = params->port; + u16 hw_led_mode = params->hw_led_mode; + int rc = 0; + u8 phy_idx; + u32 tmp; + u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode); + DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n", + speed, hw_led_mode); + /* In case */ + for (phy_idx = EXT_PHY1; phy_idx < MAX_PHYS; phy_idx++) { + if (params->phy[phy_idx].set_link_led) { + params->phy[phy_idx].set_link_led( + ¶ms->phy[phy_idx], params, mode); + } + } + + switch (mode) { + case LED_MODE_FRONT_PANEL_OFF: + case LED_MODE_OFF: + REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0); + REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, + SHARED_HW_CFG_LED_MAC1); + + tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); + EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE)); + break; + + case LED_MODE_OPER: + /* + * For all other phys, OPER mode is same as ON, so in case + * link is down, do nothing + */ + if (!vars->link_up) + break; + case LED_MODE_ON: + if (((params->phy[EXT_PHY1].type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) || + (params->phy[EXT_PHY1].type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722)) && + CHIP_IS_E2(bp) && params->num_phys == 2) { + /* + * This is a work-around for E2+8727 Configurations + */ + if (mode == LED_MODE_ON || + speed == SPEED_10000){ + REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); + REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); + + tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); + EMAC_WR(bp, EMAC_REG_EMAC_LED, + (tmp | EMAC_LED_OVERRIDE)); + /* + * return here without enabling traffic + * LED blink andsetting rate in ON mode. + * In oper mode, enabling LED blink + * and setting rate is needed. + */ + if (mode == LED_MODE_ON) + return rc; + } + } else if (SINGLE_MEDIA_DIRECT(params)) { + /* + * This is a work-around for HW issue found when link + * is up in CL73 + */ + REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); + if (CHIP_IS_E1x(bp) || + CHIP_IS_E2(bp) || + (mode == LED_MODE_ON)) + REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); + else + REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, + hw_led_mode); + } else + REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode); + + REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0); + /* Set blinking rate to ~15.9Hz */ + REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4, + LED_BLINK_RATE_VAL); + REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + + port*4, 1); + tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); + EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp & (~EMAC_LED_OVERRIDE))); + + if (CHIP_IS_E1(bp) && + ((speed == SPEED_2500) || + (speed == SPEED_1000) || + (speed == SPEED_100) || + (speed == SPEED_10))) { + /* + * On Everest 1 Ax chip versions for speeds less than + * 10G LED scheme is different + */ + REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + + port*4, 1); + REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + + port*4, 0); + REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 + + port*4, 1); + } + break; + + default: + rc = -EINVAL; + DP(NETIF_MSG_LINK, "bnx2x_set_led: Invalid led mode %d\n", + mode); + break; + } + return rc; + +} + +/* + * This function comes to reflect the actual link state read DIRECTLY from the + * HW + */ +int bnx2x_test_link(struct link_params *params, struct link_vars *vars, + u8 is_serdes) +{ + struct bnx2x *bp = params->bp; + u16 gp_status = 0, phy_index = 0; + u8 ext_phy_link_up = 0, serdes_phy_type; + struct link_vars temp_vars; + struct bnx2x_phy *int_phy = ¶ms->phy[INT_PHY]; + + if (CHIP_IS_E3(bp)) { + u16 link_up; + if (params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)] + > SPEED_10000) { + /* Check 20G link */ + bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD, + 1, &link_up); + bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD, + 1, &link_up); + link_up &= (1<<2); + } else { + /* Check 10G link and below*/ + u8 lane = bnx2x_get_warpcore_lane(int_phy, params); + bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD, + MDIO_WC_REG_GP2_STATUS_GP_2_1, + &gp_status); + gp_status = ((gp_status >> 8) & 0xf) | + ((gp_status >> 12) & 0xf); + link_up = gp_status & (1 << lane); + } + if (!link_up) + return -ESRCH; + } else { + CL22_RD_OVER_CL45(bp, int_phy, + MDIO_REG_BANK_GP_STATUS, + MDIO_GP_STATUS_TOP_AN_STATUS1, + &gp_status); + /* link is up only if both local phy and external phy are up */ + if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)) + return -ESRCH; + } + /* In XGXS loopback mode, do not check external PHY */ + if (params->loopback_mode == LOOPBACK_XGXS) + return 0; + + switch (params->num_phys) { + case 1: + /* No external PHY */ + return 0; + case 2: + ext_phy_link_up = params->phy[EXT_PHY1].read_status( + ¶ms->phy[EXT_PHY1], + params, &temp_vars); + break; + case 3: /* Dual Media */ + for (phy_index = EXT_PHY1; phy_index < params->num_phys; + phy_index++) { + serdes_phy_type = ((params->phy[phy_index].media_type == + ETH_PHY_SFP_FIBER) || + (params->phy[phy_index].media_type == + ETH_PHY_XFP_FIBER) || + (params->phy[phy_index].media_type == + ETH_PHY_DA_TWINAX)); + + if (is_serdes != serdes_phy_type) + continue; + if (params->phy[phy_index].read_status) { + ext_phy_link_up |= + params->phy[phy_index].read_status( + ¶ms->phy[phy_index], + params, &temp_vars); + } + } + break; + } + if (ext_phy_link_up) + return 0; + return -ESRCH; +} + +static int bnx2x_link_initialize(struct link_params *params, + struct link_vars *vars) +{ + int rc = 0; + u8 phy_index, non_ext_phy; + struct bnx2x *bp = params->bp; + /* + * In case of external phy existence, the line speed would be the + * line speed linked up by the external phy. In case it is direct + * only, then the line_speed during initialization will be + * equal to the req_line_speed + */ + vars->line_speed = params->phy[INT_PHY].req_line_speed; + + /* + * Initialize the internal phy in case this is a direct board + * (no external phys), or this board has external phy which requires + * to first. + */ + if (!USES_WARPCORE(bp)) + bnx2x_prepare_xgxs(¶ms->phy[INT_PHY], params, vars); + /* init ext phy and enable link state int */ + non_ext_phy = (SINGLE_MEDIA_DIRECT(params) || + (params->loopback_mode == LOOPBACK_XGXS)); + + if (non_ext_phy || + (params->phy[EXT_PHY1].flags & FLAGS_INIT_XGXS_FIRST) || + (params->loopback_mode == LOOPBACK_EXT_PHY)) { + struct bnx2x_phy *phy = ¶ms->phy[INT_PHY]; + if (vars->line_speed == SPEED_AUTO_NEG && + (CHIP_IS_E1x(bp) || + CHIP_IS_E2(bp))) + bnx2x_set_parallel_detection(phy, params); + if (params->phy[INT_PHY].config_init) + params->phy[INT_PHY].config_init(phy, + params, + vars); + } + + /* Init external phy*/ + if (non_ext_phy) { + if (params->phy[INT_PHY].supported & + SUPPORTED_FIBRE) + vars->link_status |= LINK_STATUS_SERDES_LINK; + } else { + for (phy_index = EXT_PHY1; phy_index < params->num_phys; + phy_index++) { + /* + * No need to initialize second phy in case of first + * phy only selection. In case of second phy, we do + * need to initialize the first phy, since they are + * connected. + */ + if (params->phy[phy_index].supported & + SUPPORTED_FIBRE) + vars->link_status |= LINK_STATUS_SERDES_LINK; + + if (phy_index == EXT_PHY2 && + (bnx2x_phy_selection(params) == + PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) { + DP(NETIF_MSG_LINK, "Not initializing" + " second phy\n"); + continue; + } + params->phy[phy_index].config_init( + ¶ms->phy[phy_index], + params, vars); + } + } + /* Reset the interrupt indication after phy was initialized */ + bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + + params->port*4, + (NIG_STATUS_XGXS0_LINK10G | + NIG_STATUS_XGXS0_LINK_STATUS | + NIG_STATUS_SERDES0_LINK_STATUS | + NIG_MASK_MI_INT)); + bnx2x_update_mng(params, vars->link_status); + return rc; +} + +static void bnx2x_int_link_reset(struct bnx2x_phy *phy, + struct link_params *params) +{ + /* reset the SerDes/XGXS */ + REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, + (0x1ff << (params->port*16))); +} + +static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u8 gpio_port; + /* HW reset */ + if (CHIP_IS_E2(bp)) + gpio_port = BP_PATH(bp); + else + gpio_port = params->port; + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_LOW, + gpio_port); + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_LOW, + gpio_port); + DP(NETIF_MSG_LINK, "reset external PHY\n"); +} + +static int bnx2x_update_link_down(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u8 port = params->port; + + DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port); + bnx2x_set_led(params, vars, LED_MODE_OFF, 0); + vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG; + /* indicate no mac active */ + vars->mac_type = MAC_TYPE_NONE; + + /* update shared memory */ + vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK | + LINK_STATUS_LINK_UP | + LINK_STATUS_PHYSICAL_LINK_FLAG | + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE | + LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK | + LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK | + LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK); + vars->line_speed = 0; + bnx2x_update_mng(params, vars->link_status); + + /* activate nig drain */ + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); + + /* disable emac */ + if (!CHIP_IS_E3(bp)) + REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); + + msleep(10); + /* reset BigMac/Xmac */ + if (CHIP_IS_E1x(bp) || + CHIP_IS_E2(bp)) { + bnx2x_bmac_rx_disable(bp, params->port); + REG_WR(bp, GRCBASE_MISC + + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); + } + if (CHIP_IS_E3(bp)) + bnx2x_xmac_disable(params); + + return 0; +} + +static int bnx2x_update_link_up(struct link_params *params, + struct link_vars *vars, + u8 link_10g) +{ + struct bnx2x *bp = params->bp; + u8 port = params->port; + int rc = 0; + + vars->link_status |= (LINK_STATUS_LINK_UP | + LINK_STATUS_PHYSICAL_LINK_FLAG); + vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG; + + if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) + vars->link_status |= + LINK_STATUS_TX_FLOW_CONTROL_ENABLED; + + if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX) + vars->link_status |= + LINK_STATUS_RX_FLOW_CONTROL_ENABLED; + if (USES_WARPCORE(bp)) { + if (link_10g) { + if (bnx2x_xmac_enable(params, vars, 0) == + -ESRCH) { + DP(NETIF_MSG_LINK, "Found errors on XMAC\n"); + vars->link_up = 0; + vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; + vars->link_status &= ~LINK_STATUS_LINK_UP; + } + } else + bnx2x_umac_enable(params, vars, 0); + bnx2x_set_led(params, vars, + LED_MODE_OPER, vars->line_speed); + } + if ((CHIP_IS_E1x(bp) || + CHIP_IS_E2(bp))) { + if (link_10g) { + if (bnx2x_bmac_enable(params, vars, 0) == + -ESRCH) { + DP(NETIF_MSG_LINK, "Found errors on BMAC\n"); + vars->link_up = 0; + vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; + vars->link_status &= ~LINK_STATUS_LINK_UP; + } + + bnx2x_set_led(params, vars, + LED_MODE_OPER, SPEED_10000); + } else { + rc = bnx2x_emac_program(params, vars); + bnx2x_emac_enable(params, vars, 0); + + /* AN complete? */ + if ((vars->link_status & + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) + && (!(vars->phy_flags & PHY_SGMII_FLAG)) && + SINGLE_MEDIA_DIRECT(params)) + bnx2x_set_gmii_tx_driver(params); + } + } + + /* PBF - link up */ + if (CHIP_IS_E1x(bp)) + rc |= bnx2x_pbf_update(params, vars->flow_ctrl, + vars->line_speed); + + /* disable drain */ + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0); + + /* update shared memory */ + bnx2x_update_mng(params, vars->link_status); + msleep(20); + return rc; +} +/* + * The bnx2x_link_update function should be called upon link + * interrupt. + * Link is considered up as follows: + * - DIRECT_SINGLE_MEDIA - Only XGXS link (internal link) needs + * to be up + * - SINGLE_MEDIA - The link between the 577xx and the external + * phy (XGXS) need to up as well as the external link of the + * phy (PHY_EXT1) + * - DUAL_MEDIA - The link between the 577xx and the first + * external phy needs to be up, and at least one of the 2 + * external phy link must be up. + */ +int bnx2x_link_update(struct link_params *params, struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + struct link_vars phy_vars[MAX_PHYS]; + u8 port = params->port; + u8 link_10g_plus, phy_index; + u8 ext_phy_link_up = 0, cur_link_up; + int rc = 0; + u8 is_mi_int = 0; + u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed; + u8 active_external_phy = INT_PHY; + vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; + for (phy_index = INT_PHY; phy_index < params->num_phys; + phy_index++) { + phy_vars[phy_index].flow_ctrl = 0; + phy_vars[phy_index].link_status = 0; + phy_vars[phy_index].line_speed = 0; + phy_vars[phy_index].duplex = DUPLEX_FULL; + phy_vars[phy_index].phy_link_up = 0; + phy_vars[phy_index].link_up = 0; + phy_vars[phy_index].fault_detected = 0; + } + + if (USES_WARPCORE(bp)) + bnx2x_set_aer_mmd(params, ¶ms->phy[INT_PHY]); + + DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n", + port, (vars->phy_flags & PHY_XGXS_FLAG), + REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4)); + + is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + + port*0x18) > 0); + DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n", + REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), + is_mi_int, + REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c)); + + DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n", + REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), + REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)); + + /* disable emac */ + if (!CHIP_IS_E3(bp)) + REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); + + /* + * Step 1: + * Check external link change only for external phys, and apply + * priority selection between them in case the link on both phys + * is up. Note that instead of the common vars, a temporary + * vars argument is used since each phy may have different link/ + * speed/duplex result + */ + for (phy_index = EXT_PHY1; phy_index < params->num_phys; + phy_index++) { + struct bnx2x_phy *phy = ¶ms->phy[phy_index]; + if (!phy->read_status) + continue; + /* Read link status and params of this ext phy */ + cur_link_up = phy->read_status(phy, params, + &phy_vars[phy_index]); + if (cur_link_up) { + DP(NETIF_MSG_LINK, "phy in index %d link is up\n", + phy_index); + } else { + DP(NETIF_MSG_LINK, "phy in index %d link is down\n", + phy_index); + continue; + } + + if (!ext_phy_link_up) { + ext_phy_link_up = 1; + active_external_phy = phy_index; + } else { + switch (bnx2x_phy_selection(params)) { + case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: + /* + * In this option, the first PHY makes sure to pass the + * traffic through itself only. + * Its not clear how to reset the link on the second phy + */ + active_external_phy = EXT_PHY1; + break; + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: + /* + * In this option, the first PHY makes sure to pass the + * traffic through the second PHY. + */ + active_external_phy = EXT_PHY2; + break; + default: + /* + * Link indication on both PHYs with the following cases + * is invalid: + * - FIRST_PHY means that second phy wasn't initialized, + * hence its link is expected to be down + * - SECOND_PHY means that first phy should not be able + * to link up by itself (using configuration) + * - DEFAULT should be overriden during initialiazation + */ + DP(NETIF_MSG_LINK, "Invalid link indication" + "mpc=0x%x. DISABLING LINK !!!\n", + params->multi_phy_config); + ext_phy_link_up = 0; + break; + } + } + } + prev_line_speed = vars->line_speed; + /* + * Step 2: + * Read the status of the internal phy. In case of + * DIRECT_SINGLE_MEDIA board, this link is the external link, + * otherwise this is the link between the 577xx and the first + * external phy + */ + if (params->phy[INT_PHY].read_status) + params->phy[INT_PHY].read_status( + ¶ms->phy[INT_PHY], + params, vars); + /* + * The INT_PHY flow control reside in the vars. This include the + * case where the speed or flow control are not set to AUTO. + * Otherwise, the active external phy flow control result is set + * to the vars. The ext_phy_line_speed is needed to check if the + * speed is different between the internal phy and external phy. + * This case may be result of intermediate link speed change. + */ + if (active_external_phy > INT_PHY) { + vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl; + /* + * Link speed is taken from the XGXS. AN and FC result from + * the external phy. + */ + vars->link_status |= phy_vars[active_external_phy].link_status; + + /* + * if active_external_phy is first PHY and link is up - disable + * disable TX on second external PHY + */ + if (active_external_phy == EXT_PHY1) { + if (params->phy[EXT_PHY2].phy_specific_func) { + DP(NETIF_MSG_LINK, "Disabling TX on" + " EXT_PHY2\n"); + params->phy[EXT_PHY2].phy_specific_func( + ¶ms->phy[EXT_PHY2], + params, DISABLE_TX); + } + } + + ext_phy_line_speed = phy_vars[active_external_phy].line_speed; + vars->duplex = phy_vars[active_external_phy].duplex; + if (params->phy[active_external_phy].supported & + SUPPORTED_FIBRE) + vars->link_status |= LINK_STATUS_SERDES_LINK; + else + vars->link_status &= ~LINK_STATUS_SERDES_LINK; + DP(NETIF_MSG_LINK, "Active external phy selected: %x\n", + active_external_phy); + } + + for (phy_index = EXT_PHY1; phy_index < params->num_phys; + phy_index++) { + if (params->phy[phy_index].flags & + FLAGS_REARM_LATCH_SIGNAL) { + bnx2x_rearm_latch_signal(bp, port, + phy_index == + active_external_phy); + break; + } + } + DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x," + " ext_phy_line_speed = %d\n", vars->flow_ctrl, + vars->link_status, ext_phy_line_speed); + /* + * Upon link speed change set the NIG into drain mode. Comes to + * deals with possible FIFO glitch due to clk change when speed + * is decreased without link down indicator + */ + + if (vars->phy_link_up) { + if (!(SINGLE_MEDIA_DIRECT(params)) && ext_phy_link_up && + (ext_phy_line_speed != vars->line_speed)) { + DP(NETIF_MSG_LINK, "Internal link speed %d is" + " different than the external" + " link speed %d\n", vars->line_speed, + ext_phy_line_speed); + vars->phy_link_up = 0; + } else if (prev_line_speed != vars->line_speed) { + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, + 0); + msleep(1); + } + } + + /* anything 10 and over uses the bmac */ + link_10g_plus = (vars->line_speed >= SPEED_10000); + + bnx2x_link_int_ack(params, vars, link_10g_plus); + + /* + * In case external phy link is up, and internal link is down + * (not initialized yet probably after link initialization, it + * needs to be initialized. + * Note that after link down-up as result of cable plug, the xgxs + * link would probably become up again without the need + * initialize it + */ + if (!(SINGLE_MEDIA_DIRECT(params))) { + DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d," + " init_preceding = %d\n", ext_phy_link_up, + vars->phy_link_up, + params->phy[EXT_PHY1].flags & + FLAGS_INIT_XGXS_FIRST); + if (!(params->phy[EXT_PHY1].flags & + FLAGS_INIT_XGXS_FIRST) + && ext_phy_link_up && !vars->phy_link_up) { + vars->line_speed = ext_phy_line_speed; + if (vars->line_speed < SPEED_1000) + vars->phy_flags |= PHY_SGMII_FLAG; + else + vars->phy_flags &= ~PHY_SGMII_FLAG; + + if (params->phy[INT_PHY].config_init) + params->phy[INT_PHY].config_init( + ¶ms->phy[INT_PHY], params, + vars); + } + } + /* + * Link is up only if both local phy and external phy (in case of + * non-direct board) are up and no fault detected on active PHY. + */ + vars->link_up = (vars->phy_link_up && + (ext_phy_link_up || + SINGLE_MEDIA_DIRECT(params)) && + (phy_vars[active_external_phy].fault_detected == 0)); + + if (vars->link_up) + rc = bnx2x_update_link_up(params, vars, link_10g_plus); + else + rc = bnx2x_update_link_down(params, vars); + + return rc; +} + + +/*****************************************************************************/ +/* External Phy section */ +/*****************************************************************************/ +void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port) +{ + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_LOW, port); + msleep(1); + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); +} + +static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port, + u32 spirom_ver, u32 ver_addr) +{ + DP(NETIF_MSG_LINK, "FW version 0x%x:0x%x for port %d\n", + (u16)(spirom_ver>>16), (u16)spirom_ver, port); + + if (ver_addr) + REG_WR(bp, ver_addr, spirom_ver); +} + +static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp, + struct bnx2x_phy *phy, + u8 port) +{ + u16 fw_ver1, fw_ver2; + + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER1, &fw_ver1); + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, &fw_ver2); + bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2), + phy->ver_addr); +} + +static void bnx2x_ext_phy_10G_an_resolve(struct bnx2x *bp, + struct bnx2x_phy *phy, + struct link_vars *vars) +{ + u16 val; + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_STATUS, &val); + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_STATUS, &val); + if (val & (1<<5)) + vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; + if ((val & (1<<0)) == 0) + vars->link_status |= LINK_STATUS_PARALLEL_DETECTION_USED; +} + +/******************************************************************/ +/* common BCM8073/BCM8727 PHY SECTION */ +/******************************************************************/ +static void bnx2x_8073_resolve_fc(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + if (phy->req_line_speed == SPEED_10 || + phy->req_line_speed == SPEED_100) { + vars->flow_ctrl = phy->req_flow_ctrl; + return; + } + + if (bnx2x_ext_phy_resolve_fc(phy, params, vars) && + (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE)) { + u16 pause_result; + u16 ld_pause; /* local */ + u16 lp_pause; /* link partner */ + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_CL37_FC_LD, &ld_pause); + + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_CL37_FC_LP, &lp_pause); + pause_result = (ld_pause & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5; + pause_result |= (lp_pause & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7; + + bnx2x_pause_resolve(vars, pause_result); + DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n", + pause_result); + } +} +static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp, + struct bnx2x_phy *phy, + u8 port) +{ + u32 count = 0; + u16 fw_ver1, fw_msgout; + int rc = 0; + + /* Boot port from external ROM */ + /* EDC grst */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + 0x0001); + + /* ucode reboot and rst */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + 0x008c); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_MISC_CTRL1, 0x0001); + + /* Reset internal microprocessor */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); + + /* Release srst bit */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); + + /* Delay 100ms per the PHY specifications */ + msleep(100); + + /* 8073 sometimes taking longer to download */ + do { + count++; + if (count > 300) { + DP(NETIF_MSG_LINK, + "bnx2x_8073_8727_external_rom_boot port %x:" + "Download failed. fw version = 0x%x\n", + port, fw_ver1); + rc = -EINVAL; + break; + } + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER1, &fw_ver1); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout); + + msleep(1); + } while (fw_ver1 == 0 || fw_ver1 == 0x4321 || + ((fw_msgout & 0xff) != 0x03 && (phy->type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))); + + /* Clear ser_boot_ctl bit */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_MISC_CTRL1, 0x0000); + bnx2x_save_bcm_spirom_ver(bp, phy, port); + + DP(NETIF_MSG_LINK, + "bnx2x_8073_8727_external_rom_boot port %x:" + "Download complete. fw version = 0x%x\n", + port, fw_ver1); + + return rc; +} + +/******************************************************************/ +/* BCM8073 PHY SECTION */ +/******************************************************************/ +static int bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy) +{ + /* This is only required for 8073A1, version 102 only */ + u16 val; + + /* Read 8073 HW revision*/ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_CHIP_REV, &val); + + if (val != 1) { + /* No need to workaround in 8073 A1 */ + return 0; + } + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, &val); + + /* SNR should be applied only for version 0x102 */ + if (val != 0x102) + return 0; + + return 1; +} + +static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy) +{ + u16 val, cnt, cnt1 ; + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_CHIP_REV, &val); + + if (val > 0) { + /* No need to workaround in 8073 A1 */ + return 0; + } + /* XAUI workaround in 8073 A0: */ + + /* + * After loading the boot ROM and restarting Autoneg, poll + * Dev1, Reg $C820: + */ + + for (cnt = 0; cnt < 1000; cnt++) { + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_SPEED_LINK_STATUS, + &val); + /* + * If bit [14] = 0 or bit [13] = 0, continue on with + * system initialization (XAUI work-around not required, as + * these bits indicate 2.5G or 1G link up). + */ + if (!(val & (1<<14)) || !(val & (1<<13))) { + DP(NETIF_MSG_LINK, "XAUI work-around not required\n"); + return 0; + } else if (!(val & (1<<15))) { + DP(NETIF_MSG_LINK, "bit 15 went off\n"); + /* + * If bit 15 is 0, then poll Dev1, Reg $C841 until it's + * MSB (bit15) goes to 1 (indicating that the XAUI + * workaround has completed), then continue on with + * system initialization. + */ + for (cnt1 = 0; cnt1 < 1000; cnt1++) { + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_XAUI_WA, &val); + if (val & (1<<15)) { + DP(NETIF_MSG_LINK, + "XAUI workaround has completed\n"); + return 0; + } + msleep(3); + } + break; + } + msleep(3); + } + DP(NETIF_MSG_LINK, "Warning: XAUI work-around timeout !!!\n"); + return -EINVAL; +} + +static void bnx2x_807x_force_10G(struct bnx2x *bp, struct bnx2x_phy *phy) +{ + /* Force KR or KX */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0x000b); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0000); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000); +} + +static void bnx2x_8073_set_pause_cl37(struct link_params *params, + struct bnx2x_phy *phy, + struct link_vars *vars) +{ + u16 cl37_val; + struct bnx2x *bp = params->bp; + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &cl37_val); + + cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; + /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ + bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); + if ((vars->ieee_fc & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) { + cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC; + } + if ((vars->ieee_fc & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { + cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; + } + if ((vars->ieee_fc & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) { + cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; + } + DP(NETIF_MSG_LINK, + "Ext phy AN advertize cl37 0x%x\n", cl37_val); + + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, cl37_val); + msleep(500); +} + +static int bnx2x_8073_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u16 val = 0, tmp1; + u8 gpio_port; + DP(NETIF_MSG_LINK, "Init 8073\n"); + + if (CHIP_IS_E2(bp)) + gpio_port = BP_PATH(bp); + else + gpio_port = params->port; + /* Restore normal power mode*/ + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); + + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); + + /* enable LASI */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2)); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0004); + + bnx2x_8073_set_pause_cl37(params, phy, vars); + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1); + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1); + + DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1); + + /* Swap polarity if required - Must be done only in non-1G mode */ + if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) { + /* Configure the 8073 to swap _P and _N of the KR lines */ + DP(NETIF_MSG_LINK, "Swapping polarity for the 8073\n"); + /* 10G Rx/Tx and 1G Tx signal polarity swap */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, &val); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, + (val | (3<<9))); + } + + + /* Enable CL37 BAM */ + if (REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_hw_config[params->port].default_cfg)) & + PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) { + + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8073_BAM, &val); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8073_BAM, val | 1); + DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n"); + } + if (params->loopback_mode == LOOPBACK_EXT) { + bnx2x_807x_force_10G(bp, phy); + DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n"); + return 0; + } else { + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0002); + } + if (phy->req_line_speed != SPEED_AUTO_NEG) { + if (phy->req_line_speed == SPEED_10000) { + val = (1<<7); + } else if (phy->req_line_speed == SPEED_2500) { + val = (1<<5); + /* + * Note that 2.5G works only when used with 1G + * advertisement + */ + } else + val = (1<<5); + } else { + val = 0; + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) + val |= (1<<7); + + /* Note that 2.5G works only when used with 1G advertisement */ + if (phy->speed_cap_mask & + (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G | + PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) + val |= (1<<5); + DP(NETIF_MSG_LINK, "807x autoneg val = 0x%x\n", val); + } + + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV, val); + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, &tmp1); + + if (((phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) && + (phy->req_line_speed == SPEED_AUTO_NEG)) || + (phy->req_line_speed == SPEED_2500)) { + u16 phy_ver; + /* Allow 2.5G for A1 and above */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, + &phy_ver); + DP(NETIF_MSG_LINK, "Add 2.5G\n"); + if (phy_ver > 0) + tmp1 |= 1; + else + tmp1 &= 0xfffe; + } else { + DP(NETIF_MSG_LINK, "Disable 2.5G\n"); + tmp1 &= 0xfffe; + } + + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, tmp1); + /* Add support for CL37 (passive mode) II */ + + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &tmp1); + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, + (tmp1 | ((phy->req_duplex == DUPLEX_FULL) ? + 0x20 : 0x40))); + + /* Add support for CL37 (passive mode) III */ + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); + + /* + * The SNR will improve about 2db by changing BW and FEE main + * tap. Rest commands are executed after link is up + * Change FFE main cursor to 5 in EDC register + */ + if (bnx2x_8073_is_snr_needed(bp, phy)) + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN, + 0xFB0C); + + /* Enable FEC (Forware Error Correction) Request in the AN */ + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, &tmp1); + tmp1 |= (1<<15); + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, tmp1); + + bnx2x_ext_phy_set_pause(params, phy, vars); + + /* Restart autoneg */ + msleep(500); + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); + DP(NETIF_MSG_LINK, "807x Autoneg Restart: Advertise 1G=%x, 10G=%x\n", + ((val & (1<<5)) > 0), ((val & (1<<7)) > 0)); + return 0; +} + +static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u8 link_up = 0; + u16 val1, val2; + u16 link_status = 0; + u16 an1000_status = 0; + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); + + DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1); + + /* clear the interrupt LASI status register */ + bnx2x_cl45_read(bp, phy, + MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2); + bnx2x_cl45_read(bp, phy, + MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val1); + DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n", val2, val1); + /* Clear MSG-OUT */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1); + + /* Check the LASI */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2); + + DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2); + + /* Check the link status */ + bnx2x_cl45_read(bp, phy, + MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2); + DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2); + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1); + link_up = ((val1 & 4) == 4); + DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1); + + if (link_up && + ((phy->req_line_speed != SPEED_10000))) { + if (bnx2x_8073_xaui_wa(bp, phy) != 0) + return 0; + } + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status); + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status); + + /* Check the link status on 1.1.2 */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1); + DP(NETIF_MSG_LINK, "KR PMA status 0x%x->0x%x," + "an_link_status=0x%x\n", val2, val1, an1000_status); + + link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1))); + if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) { + /* + * The SNR will improve about 2dbby changing the BW and FEE main + * tap. The 1st write to change FFE main tap is set before + * restart AN. Change PLL Bandwidth in EDC register + */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH, + 0x26BC); + + /* Change CDR Bandwidth in EDC register */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CDR_BANDWIDTH, + 0x0333); + } + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_SPEED_LINK_STATUS, + &link_status); + + /* Bits 0..2 --> speed detected, bits 13..15--> link is down */ + if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) { + link_up = 1; + vars->line_speed = SPEED_10000; + DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n", + params->port); + } else if ((link_status & (1<<1)) && (!(link_status & (1<<14)))) { + link_up = 1; + vars->line_speed = SPEED_2500; + DP(NETIF_MSG_LINK, "port %x: External link up in 2.5G\n", + params->port); + } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) { + link_up = 1; + vars->line_speed = SPEED_1000; + DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n", + params->port); + } else { + link_up = 0; + DP(NETIF_MSG_LINK, "port %x: External link is down\n", + params->port); + } + + if (link_up) { + /* Swap polarity if required */ + if (params->lane_config & + PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) { + /* Configure the 8073 to swap P and N of the KR lines */ + bnx2x_cl45_read(bp, phy, + MDIO_XS_DEVAD, + MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1); + /* + * Set bit 3 to invert Rx in 1G mode and clear this bit + * when it`s in 10G mode. + */ + if (vars->line_speed == SPEED_1000) { + DP(NETIF_MSG_LINK, "Swapping 1G polarity for" + "the 8073\n"); + val1 |= (1<<3); + } else + val1 &= ~(1<<3); + + bnx2x_cl45_write(bp, phy, + MDIO_XS_DEVAD, + MDIO_XS_REG_8073_RX_CTRL_PCIE, + val1); + } + bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); + bnx2x_8073_resolve_fc(phy, params, vars); + vars->duplex = DUPLEX_FULL; + } + return link_up; +} + +static void bnx2x_8073_link_reset(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u8 gpio_port; + if (CHIP_IS_E2(bp)) + gpio_port = BP_PATH(bp); + else + gpio_port = params->port; + DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n", + gpio_port); + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_LOW, + gpio_port); +} + +/******************************************************************/ +/* BCM8705 PHY SECTION */ +/******************************************************************/ +static int bnx2x_8705_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "init 8705\n"); + /* Restore normal power mode*/ + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); + /* HW reset */ + bnx2x_ext_phy_hw_reset(bp, params->port); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040); + bnx2x_wait_reset_complete(bp, phy, params); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, 0x7fbf); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CMU_PLL_BYPASS, 0x0100); + bnx2x_cl45_write(bp, phy, + MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_CNTL, 0x1); + /* BCM8705 doesn't have microcode, hence the 0 */ + bnx2x_save_spirom_version(bp, params->port, params->shmem_base, 0); + return 0; +} + +static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + u8 link_up = 0; + u16 val1, rx_sd; + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "read status 8705\n"); + bnx2x_cl45_read(bp, phy, + MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1); + DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1); + + bnx2x_cl45_read(bp, phy, + MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1); + DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1); + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd); + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, 0xc809, &val1); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, 0xc809, &val1); + + DP(NETIF_MSG_LINK, "8705 1.c809 val=0x%x\n", val1); + link_up = ((rx_sd & 0x1) && (val1 & (1<<9)) && ((val1 & (1<<8)) == 0)); + if (link_up) { + vars->line_speed = SPEED_10000; + bnx2x_ext_phy_resolve_fc(phy, params, vars); + } + return link_up; +} + +/******************************************************************/ +/* SFP+ module Section */ +/******************************************************************/ +static void bnx2x_set_disable_pmd_transmit(struct link_params *params, + struct bnx2x_phy *phy, + u8 pmd_dis) +{ + struct bnx2x *bp = params->bp; + /* + * Disable transmitter only for bootcodes which can enable it afterwards + * (for D3 link) + */ + if (pmd_dis) { + if (params->feature_config_flags & + FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED) + DP(NETIF_MSG_LINK, "Disabling PMD transmitter\n"); + else { + DP(NETIF_MSG_LINK, "NOT disabling PMD transmitter\n"); + return; + } + } else + DP(NETIF_MSG_LINK, "Enabling PMD transmitter\n"); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_DISABLE, pmd_dis); +} + +static u8 bnx2x_get_gpio_port(struct link_params *params) +{ + u8 gpio_port; + u32 swap_val, swap_override; + struct bnx2x *bp = params->bp; + if (CHIP_IS_E2(bp)) + gpio_port = BP_PATH(bp); + else + gpio_port = params->port; + swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); + swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); + return gpio_port ^ (swap_val && swap_override); +} + +static void bnx2x_sfp_e1e2_set_transmitter(struct link_params *params, + struct bnx2x_phy *phy, + u8 tx_en) +{ + u16 val; + u8 port = params->port; + struct bnx2x *bp = params->bp; + u32 tx_en_mode; + + /* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/ + tx_en_mode = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].sfp_ctrl)) & + PORT_HW_CFG_TX_LASER_MASK; + DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x " + "mode = %x\n", tx_en, port, tx_en_mode); + switch (tx_en_mode) { + case PORT_HW_CFG_TX_LASER_MDIO: + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, + &val); + + if (tx_en) + val &= ~(1<<15); + else + val |= (1<<15); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, + val); + break; + case PORT_HW_CFG_TX_LASER_GPIO0: + case PORT_HW_CFG_TX_LASER_GPIO1: + case PORT_HW_CFG_TX_LASER_GPIO2: + case PORT_HW_CFG_TX_LASER_GPIO3: + { + u16 gpio_pin; + u8 gpio_port, gpio_mode; + if (tx_en) + gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_HIGH; + else + gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_LOW; + + gpio_pin = tx_en_mode - PORT_HW_CFG_TX_LASER_GPIO0; + gpio_port = bnx2x_get_gpio_port(params); + bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port); + break; + } + default: + DP(NETIF_MSG_LINK, "Invalid TX_LASER_MDIO 0x%x\n", tx_en_mode); + break; + } +} + +static void bnx2x_sfp_set_transmitter(struct link_params *params, + struct bnx2x_phy *phy, + u8 tx_en) +{ + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "Setting SFP+ transmitter to %d\n", tx_en); + if (CHIP_IS_E3(bp)) + bnx2x_sfp_e3_set_transmitter(params, phy, tx_en); + else + bnx2x_sfp_e1e2_set_transmitter(params, phy, tx_en); +} + +static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, + struct link_params *params, + u16 addr, u8 byte_cnt, u8 *o_buf) +{ + struct bnx2x *bp = params->bp; + u16 val = 0; + u16 i; + if (byte_cnt > 16) { + DP(NETIF_MSG_LINK, "Reading from eeprom is" + " is limited to 0xf\n"); + return -EINVAL; + } + /* Set the read command byte count */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, + (byte_cnt | 0xa000)); + + /* Set the read command address */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR, + addr); + + /* Activate read command */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, + 0x2c0f); + + /* Wait up to 500us for command complete status */ + for (i = 0; i < 100; i++) { + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); + if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == + MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) + break; + udelay(5); + } + + if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) != + MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) { + DP(NETIF_MSG_LINK, + "Got bad status 0x%x when reading from SFP+ EEPROM\n", + (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK)); + return -EINVAL; + } + + /* Read the buffer */ + for (i = 0; i < byte_cnt; i++) { + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val); + o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK); + } + + for (i = 0; i < 100; i++) { + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); + if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == + MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) + return 0; + msleep(1); + } + return -EINVAL; +} + +static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, + struct link_params *params, + u16 addr, u8 byte_cnt, + u8 *o_buf) +{ + int rc = 0; + u8 i, j = 0, cnt = 0; + u32 data_array[4]; + u16 addr32; + struct bnx2x *bp = params->bp; + /*DP(NETIF_MSG_LINK, "bnx2x_direct_read_sfp_module_eeprom:" + " addr %d, cnt %d\n", + addr, byte_cnt);*/ + if (byte_cnt > 16) { + DP(NETIF_MSG_LINK, "Reading from eeprom is" + " is limited to 16 bytes\n"); + return -EINVAL; + } + + /* 4 byte aligned address */ + addr32 = addr & (~0x3); + do { + rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt, + data_array); + } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT)); + + if (rc == 0) { + for (i = (addr - addr32); i < byte_cnt + (addr - addr32); i++) { + o_buf[j] = *((u8 *)data_array + i); + j++; + } + } + + return rc; +} + +static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, + struct link_params *params, + u16 addr, u8 byte_cnt, u8 *o_buf) +{ + struct bnx2x *bp = params->bp; + u16 val, i; + + if (byte_cnt > 16) { + DP(NETIF_MSG_LINK, "Reading from eeprom is" + " is limited to 0xf\n"); + return -EINVAL; + } + + /* Need to read from 1.8000 to clear it */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, + &val); + + /* Set the read command byte count */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, + ((byte_cnt < 2) ? 2 : byte_cnt)); + + /* Set the read command address */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR, + addr); + /* Set the destination address */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + 0x8004, + MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF); + + /* Activate read command */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, + 0x8002); + /* + * Wait appropriate time for two-wire command to finish before + * polling the status register + */ + msleep(1); + + /* Wait up to 500us for command complete status */ + for (i = 0; i < 100; i++) { + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); + if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == + MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) + break; + udelay(5); + } + + if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) != + MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) { + DP(NETIF_MSG_LINK, + "Got bad status 0x%x when reading from SFP+ EEPROM\n", + (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK)); + return -EFAULT; + } + + /* Read the buffer */ + for (i = 0; i < byte_cnt; i++) { + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val); + o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK); + } + + for (i = 0; i < 100; i++) { + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); + if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == + MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) + return 0; + msleep(1); + } + + return -EINVAL; +} + +int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, + struct link_params *params, u16 addr, + u8 byte_cnt, u8 *o_buf) +{ + int rc = -EINVAL; + switch (phy->type) { + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: + rc = bnx2x_8726_read_sfp_module_eeprom(phy, params, addr, + byte_cnt, o_buf); + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: + rc = bnx2x_8727_read_sfp_module_eeprom(phy, params, addr, + byte_cnt, o_buf); + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: + rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr, + byte_cnt, o_buf); + break; + } + return rc; +} + +static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, + struct link_params *params, + u16 *edc_mode) +{ + struct bnx2x *bp = params->bp; + u32 sync_offset = 0, phy_idx, media_types; + u8 val, check_limiting_mode = 0; + *edc_mode = EDC_MODE_LIMITING; + + phy->media_type = ETH_PHY_UNSPECIFIED; + /* First check for copper cable */ + if (bnx2x_read_sfp_module_eeprom(phy, + params, + SFP_EEPROM_CON_TYPE_ADDR, + 1, + &val) != 0) { + DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n"); + return -EINVAL; + } + + switch (val) { + case SFP_EEPROM_CON_TYPE_VAL_COPPER: + { + u8 copper_module_type; + phy->media_type = ETH_PHY_DA_TWINAX; + /* + * Check if its active cable (includes SFP+ module) + * of passive cable + */ + if (bnx2x_read_sfp_module_eeprom(phy, + params, + SFP_EEPROM_FC_TX_TECH_ADDR, + 1, + &copper_module_type) != 0) { + DP(NETIF_MSG_LINK, + "Failed to read copper-cable-type" + " from SFP+ EEPROM\n"); + return -EINVAL; + } + + if (copper_module_type & + SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) { + DP(NETIF_MSG_LINK, "Active Copper cable detected\n"); + check_limiting_mode = 1; + } else if (copper_module_type & + SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { + DP(NETIF_MSG_LINK, "Passive Copper" + " cable detected\n"); + *edc_mode = + EDC_MODE_PASSIVE_DAC; + } else { + DP(NETIF_MSG_LINK, "Unknown copper-cable-" + "type 0x%x !!!\n", copper_module_type); + return -EINVAL; + } + break; + } + case SFP_EEPROM_CON_TYPE_VAL_LC: + phy->media_type = ETH_PHY_SFP_FIBER; + DP(NETIF_MSG_LINK, "Optic module detected\n"); + check_limiting_mode = 1; + break; + default: + DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n", + val); + return -EINVAL; + } + sync_offset = params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].media_type); + media_types = REG_RD(bp, sync_offset); + /* Update media type for non-PMF sync */ + for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) { + if (&(params->phy[phy_idx]) == phy) { + media_types &= ~(PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK << + (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx)); + media_types |= ((phy->media_type & + PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) << + (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx)); + break; + } + } + REG_WR(bp, sync_offset, media_types); + if (check_limiting_mode) { + u8 options[SFP_EEPROM_OPTIONS_SIZE]; + if (bnx2x_read_sfp_module_eeprom(phy, + params, + SFP_EEPROM_OPTIONS_ADDR, + SFP_EEPROM_OPTIONS_SIZE, + options) != 0) { + DP(NETIF_MSG_LINK, "Failed to read Option" + " field from module EEPROM\n"); + return -EINVAL; + } + if ((options[0] & SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK)) + *edc_mode = EDC_MODE_LINEAR; + else + *edc_mode = EDC_MODE_LIMITING; + } + DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode); + return 0; +} +/* + * This function read the relevant field from the module (SFP+), and verify it + * is compliant with this board + */ +static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u32 val, cmd; + u32 fw_resp, fw_cmd_param; + char vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE+1]; + char vendor_pn[SFP_EEPROM_PART_NO_SIZE+1]; + phy->flags &= ~FLAGS_SFP_NOT_APPROVED; + val = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_feature_config[params->port].config)); + if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == + PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT) { + DP(NETIF_MSG_LINK, "NOT enforcing module verification\n"); + return 0; + } + + if (params->feature_config_flags & + FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY) { + /* Use specific phy request */ + cmd = DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL; + } else if (params->feature_config_flags & + FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY) { + /* Use first phy request only in case of non-dual media*/ + if (DUAL_MEDIA(params)) { + DP(NETIF_MSG_LINK, "FW does not support OPT MDL " + "verification\n"); + return -EINVAL; + } + cmd = DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL; + } else { + /* No support in OPT MDL detection */ + DP(NETIF_MSG_LINK, "FW does not support OPT MDL " + "verification\n"); + return -EINVAL; + } + + fw_cmd_param = FW_PARAM_SET(phy->addr, phy->type, phy->mdio_ctrl); + fw_resp = bnx2x_fw_command(bp, cmd, fw_cmd_param); + if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) { + DP(NETIF_MSG_LINK, "Approved module\n"); + return 0; + } + + /* format the warning message */ + if (bnx2x_read_sfp_module_eeprom(phy, + params, + SFP_EEPROM_VENDOR_NAME_ADDR, + SFP_EEPROM_VENDOR_NAME_SIZE, + (u8 *)vendor_name)) + vendor_name[0] = '\0'; + else + vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0'; + if (bnx2x_read_sfp_module_eeprom(phy, + params, + SFP_EEPROM_PART_NO_ADDR, + SFP_EEPROM_PART_NO_SIZE, + (u8 *)vendor_pn)) + vendor_pn[0] = '\0'; + else + vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0'; + + netdev_err(bp->dev, "Warning: Unqualified SFP+ module detected," + " Port %d from %s part number %s\n", + params->port, vendor_name, vendor_pn); + phy->flags |= FLAGS_SFP_NOT_APPROVED; + return -EINVAL; +} + +static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy, + struct link_params *params) + +{ + u8 val; + struct bnx2x *bp = params->bp; + u16 timeout; + /* + * Initialization time after hot-plug may take up to 300ms for + * some phys type ( e.g. JDSU ) + */ + + for (timeout = 0; timeout < 60; timeout++) { + if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val) + == 0) { + DP(NETIF_MSG_LINK, "SFP+ module initialization " + "took %d ms\n", timeout * 5); + return 0; + } + msleep(5); + } + return -EINVAL; +} + +static void bnx2x_8727_power_module(struct bnx2x *bp, + struct bnx2x_phy *phy, + u8 is_power_up) { + /* Make sure GPIOs are not using for LED mode */ + u16 val; + /* + * In the GPIO register, bit 4 is use to determine if the GPIOs are + * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for + * output + * Bits 0-1 determine the GPIOs value for OUTPUT in case bit 4 val is 0 + * Bits 8-9 determine the GPIOs value for INPUT in case bit 4 val is 1 + * where the 1st bit is the over-current(only input), and 2nd bit is + * for power( only output ) + * + * In case of NOC feature is disabled and power is up, set GPIO control + * as input to enable listening of over-current indication + */ + if (phy->flags & FLAGS_NOC) + return; + if (is_power_up) + val = (1<<4); + else + /* + * Set GPIO control to OUTPUT, and set the power bit + * to according to the is_power_up + */ + val = (1<<1); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_GPIO_CTRL, + val); +} + +static int bnx2x_8726_set_limiting_mode(struct bnx2x *bp, + struct bnx2x_phy *phy, + u16 edc_mode) +{ + u16 cur_limiting_mode; + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, + &cur_limiting_mode); + DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n", + cur_limiting_mode); + + if (edc_mode == EDC_MODE_LIMITING) { + DP(NETIF_MSG_LINK, "Setting LIMITING MODE\n"); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, + EDC_MODE_LIMITING); + } else { /* LRM mode ( default )*/ + + DP(NETIF_MSG_LINK, "Setting LRM MODE\n"); + + /* + * Changing to LRM mode takes quite few seconds. So do it only + * if current mode is limiting (default is LRM) + */ + if (cur_limiting_mode != EDC_MODE_LIMITING) + return 0; + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_LRM_MODE, + 0); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, + 0x128); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_MISC_CTRL0, + 0x4008); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_LRM_MODE, + 0xaaaa); + } + return 0; +} + +static int bnx2x_8727_set_limiting_mode(struct bnx2x *bp, + struct bnx2x_phy *phy, + u16 edc_mode) +{ + u16 phy_identifier; + u16 rom_ver2_val; + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, + &phy_identifier); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, + (phy_identifier & ~(1<<9))); + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, + &rom_ver2_val); + /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, + (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff)); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, + (phy_identifier | (1<<9))); + + return 0; +} + +static void bnx2x_8727_specific_func(struct bnx2x_phy *phy, + struct link_params *params, + u32 action) +{ + struct bnx2x *bp = params->bp; + + switch (action) { + case DISABLE_TX: + bnx2x_sfp_set_transmitter(params, phy, 0); + break; + case ENABLE_TX: + if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) + bnx2x_sfp_set_transmitter(params, phy, 1); + break; + default: + DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n", + action); + return; + } +} + +static void bnx2x_set_e1e2_module_fault_led(struct link_params *params, + u8 gpio_mode) +{ + struct bnx2x *bp = params->bp; + + u32 fault_led_gpio = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].sfp_ctrl)) & + PORT_HW_CFG_FAULT_MODULE_LED_MASK; + switch (fault_led_gpio) { + case PORT_HW_CFG_FAULT_MODULE_LED_DISABLED: + return; + case PORT_HW_CFG_FAULT_MODULE_LED_GPIO0: + case PORT_HW_CFG_FAULT_MODULE_LED_GPIO1: + case PORT_HW_CFG_FAULT_MODULE_LED_GPIO2: + case PORT_HW_CFG_FAULT_MODULE_LED_GPIO3: + { + u8 gpio_port = bnx2x_get_gpio_port(params); + u16 gpio_pin = fault_led_gpio - + PORT_HW_CFG_FAULT_MODULE_LED_GPIO0; + DP(NETIF_MSG_LINK, "Set fault module-detected led " + "pin %x port %x mode %x\n", + gpio_pin, gpio_port, gpio_mode); + bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port); + } + break; + default: + DP(NETIF_MSG_LINK, "Error: Invalid fault led mode 0x%x\n", + fault_led_gpio); + } +} + +static void bnx2x_set_e3_module_fault_led(struct link_params *params, + u8 gpio_mode) +{ + u32 pin_cfg; + u8 port = params->port; + struct bnx2x *bp = params->bp; + pin_cfg = (REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_sfp_ctrl)) & + PORT_HW_CFG_E3_FAULT_MDL_LED_MASK) >> + PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT; + DP(NETIF_MSG_LINK, "Setting Fault LED to %d using pin cfg %d\n", + gpio_mode, pin_cfg); + bnx2x_set_cfg_pin(bp, pin_cfg, gpio_mode); +} + +static void bnx2x_set_sfp_module_fault_led(struct link_params *params, + u8 gpio_mode) +{ + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "Setting SFP+ module fault LED to %d\n", gpio_mode); + if (CHIP_IS_E3(bp)) { + /* + * Low ==> if SFP+ module is supported otherwise + * High ==> if SFP+ module is not on the approved vendor list + */ + bnx2x_set_e3_module_fault_led(params, gpio_mode); + } else + bnx2x_set_e1e2_module_fault_led(params, gpio_mode); +} + +static void bnx2x_warpcore_power_module(struct link_params *params, + struct bnx2x_phy *phy, + u8 power) +{ + u32 pin_cfg; + struct bnx2x *bp = params->bp; + + pin_cfg = (REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].e3_sfp_ctrl)) & + PORT_HW_CFG_E3_PWR_DIS_MASK) >> + PORT_HW_CFG_E3_PWR_DIS_SHIFT; + + if (pin_cfg == PIN_CFG_NA) + return; + DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n", + power, pin_cfg); + /* + * Low ==> corresponding SFP+ module is powered + * high ==> the SFP+ module is powered down + */ + bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1); +} + +static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy, + struct link_params *params) +{ + bnx2x_warpcore_power_module(params, phy, 0); +} + +static void bnx2x_power_sfp_module(struct link_params *params, + struct bnx2x_phy *phy, + u8 power) +{ + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "Setting SFP+ power to %x\n", power); + + switch (phy->type) { + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: + bnx2x_8727_power_module(params->bp, phy, power); + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: + bnx2x_warpcore_power_module(params, phy, power); + break; + default: + break; + } +} +static void bnx2x_warpcore_set_limiting_mode(struct link_params *params, + struct bnx2x_phy *phy, + u16 edc_mode) +{ + u16 val = 0; + u16 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT; + struct bnx2x *bp = params->bp; + + u8 lane = bnx2x_get_warpcore_lane(phy, params); + /* This is a global register which controls all lanes */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val); + val &= ~(0xf << (lane << 2)); + + switch (edc_mode) { + case EDC_MODE_LINEAR: + case EDC_MODE_LIMITING: + mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT; + break; + case EDC_MODE_PASSIVE_DAC: + mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC; + break; + default: + break; + } + + val |= (mode << (lane << 2)); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, val); + /* A must read */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val); + + /* Restart microcode to re-read the new mode */ + bnx2x_warpcore_reset_lane(bp, phy, 1); + bnx2x_warpcore_reset_lane(bp, phy, 0); + +} + +static void bnx2x_set_limiting_mode(struct link_params *params, + struct bnx2x_phy *phy, + u16 edc_mode) +{ + switch (phy->type) { + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: + bnx2x_8726_set_limiting_mode(params->bp, phy, edc_mode); + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: + bnx2x_8727_set_limiting_mode(params->bp, phy, edc_mode); + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: + bnx2x_warpcore_set_limiting_mode(params, phy, edc_mode); + break; + } +} + +int bnx2x_sfp_module_detection(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u16 edc_mode; + int rc = 0; + + u32 val = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_feature_config[params->port].config)); + + DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n", + params->port); + /* Power up module */ + bnx2x_power_sfp_module(params, phy, 1); + if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) { + DP(NETIF_MSG_LINK, "Failed to get valid module type\n"); + return -EINVAL; + } else if (bnx2x_verify_sfp_module(phy, params) != 0) { + /* check SFP+ module compatibility */ + DP(NETIF_MSG_LINK, "Module verification failed!!\n"); + rc = -EINVAL; + /* Turn on fault module-detected led */ + bnx2x_set_sfp_module_fault_led(params, + MISC_REGISTERS_GPIO_HIGH); + + /* Check if need to power down the SFP+ module */ + if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == + PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN) { + DP(NETIF_MSG_LINK, "Shutdown SFP+ module!!\n"); + bnx2x_power_sfp_module(params, phy, 0); + return rc; + } + } else { + /* Turn off fault module-detected led */ + bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW); + } + + /* + * Check and set limiting mode / LRM mode on 8726. On 8727 it + * is done automatically + */ + bnx2x_set_limiting_mode(params, phy, edc_mode); + + /* + * Enable transmit for this module if the module is approved, or + * if unapproved modules should also enable the Tx laser + */ + if (rc == 0 || + (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) != + PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) + bnx2x_sfp_set_transmitter(params, phy, 1); + else + bnx2x_sfp_set_transmitter(params, phy, 0); + + return rc; +} + +void bnx2x_handle_module_detect_int(struct link_params *params) +{ + struct bnx2x *bp = params->bp; + struct bnx2x_phy *phy; + u32 gpio_val; + u8 gpio_num, gpio_port; + if (CHIP_IS_E3(bp)) + phy = ¶ms->phy[INT_PHY]; + else + phy = ¶ms->phy[EXT_PHY1]; + + if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id, params->shmem_base, + params->port, &gpio_num, &gpio_port) == + -EINVAL) { + DP(NETIF_MSG_LINK, "Failed to get MOD_ABS interrupt config\n"); + return; + } + + /* Set valid module led off */ + bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH); + + /* Get current gpio val reflecting module plugged in / out*/ + gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port); + + /* Call the handling function in case module is detected */ + if (gpio_val == 0) { + bnx2x_power_sfp_module(params, phy, 1); + bnx2x_set_gpio_int(bp, gpio_num, + MISC_REGISTERS_GPIO_INT_OUTPUT_CLR, + gpio_port); + if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) + bnx2x_sfp_module_detection(phy, params); + else + DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n"); + } else { + u32 val = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_feature_config[params->port]. + config)); + bnx2x_set_gpio_int(bp, gpio_num, + MISC_REGISTERS_GPIO_INT_OUTPUT_SET, + gpio_port); + /* + * Module was plugged out. + * Disable transmit for this module + */ + phy->media_type = ETH_PHY_NOT_PRESENT; + if (((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == + PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) || + CHIP_IS_E3(bp)) + bnx2x_sfp_set_transmitter(params, phy, 0); + } +} + +/******************************************************************/ +/* Used by 8706 and 8727 */ +/******************************************************************/ +static void bnx2x_sfp_mask_fault(struct bnx2x *bp, + struct bnx2x_phy *phy, + u16 alarm_status_offset, + u16 alarm_ctrl_offset) +{ + u16 alarm_status, val; + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, alarm_status_offset, + &alarm_status); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, alarm_status_offset, + &alarm_status); + /* Mask or enable the fault event. */ + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, &val); + if (alarm_status & (1<<0)) + val &= ~(1<<0); + else + val |= (1<<0); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, val); +} +/******************************************************************/ +/* common BCM8706/BCM8726 PHY SECTION */ +/******************************************************************/ +static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + u8 link_up = 0; + u16 val1, val2, rx_sd, pcs_status; + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "XGXS 8706/8726\n"); + /* Clear RX Alarm*/ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2); + + bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT, + MDIO_PMA_LASI_TXCTRL); + + /* clear LASI indication*/ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2); + DP(NETIF_MSG_LINK, "8706/8726 LASI status 0x%x--> 0x%x\n", val1, val2); + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd); + bnx2x_cl45_read(bp, phy, + MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &pcs_status); + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2); + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2); + + DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps" + " link_status 0x%x\n", rx_sd, pcs_status, val2); + /* + * link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status + * are set, or if the autoneg bit 1 is set + */ + link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1))); + if (link_up) { + if (val2 & (1<<1)) + vars->line_speed = SPEED_1000; + else + vars->line_speed = SPEED_10000; + bnx2x_ext_phy_resolve_fc(phy, params, vars); + vars->duplex = DUPLEX_FULL; + } + + /* Capture 10G link fault. Read twice to clear stale value. */ + if (vars->line_speed == SPEED_10000) { + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, + MDIO_PMA_LASI_TXSTAT, &val1); + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, + MDIO_PMA_LASI_TXSTAT, &val1); + if (val1 & (1<<0)) + vars->fault_detected = 1; + } + + return link_up; +} + +/******************************************************************/ +/* BCM8706 PHY SECTION */ +/******************************************************************/ +static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + u32 tx_en_mode; + u16 cnt, val, tmp1; + struct bnx2x *bp = params->bp; + + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); + /* HW reset */ + bnx2x_ext_phy_hw_reset(bp, params->port); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040); + bnx2x_wait_reset_complete(bp, phy, params); + + /* Wait until fw is loaded */ + for (cnt = 0; cnt < 100; cnt++) { + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER1, &val); + if (val) + break; + msleep(10); + } + DP(NETIF_MSG_LINK, "XGXS 8706 is initialized after %d ms\n", cnt); + if ((params->feature_config_flags & + FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) { + u8 i; + u16 reg; + for (i = 0; i < 4; i++) { + reg = MDIO_XS_8706_REG_BANK_RX0 + + i*(MDIO_XS_8706_REG_BANK_RX1 - + MDIO_XS_8706_REG_BANK_RX0); + bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, reg, &val); + /* Clear first 3 bits of the control */ + val &= ~0x7; + /* Set control bits according to configuration */ + val |= (phy->rx_preemphasis[i] & 0x7); + DP(NETIF_MSG_LINK, "Setting RX Equalizer to BCM8706" + " reg 0x%x <-- val 0x%x\n", reg, val); + bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, reg, val); + } + } + /* Force speed */ + if (phy->req_line_speed == SPEED_10000) { + DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n"); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_DIGITAL_CTRL, 0x400); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL, + 0); + /* Arm LASI for link and Tx fault. */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 3); + } else { + /* Force 1Gbps using autoneg with 1G advertisement */ + + /* Allow CL37 through CL73 */ + DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n"); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c); + + /* Enable Full-Duplex advertisement on CL37 */ + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LP, 0x0020); + /* Enable CL37 AN */ + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); + /* 1G support */ + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_ADV, (1<<5)); + + /* Enable clause 73 AN */ + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, + 0x0400); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, + 0x0004); + } + bnx2x_save_bcm_spirom_ver(bp, phy, params->port); + + /* + * If TX Laser is controlled by GPIO_0, do not let PHY go into low + * power mode, if TX Laser is disabled + */ + + tx_en_mode = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].sfp_ctrl)) + & PORT_HW_CFG_TX_LASER_MASK; + + if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) { + DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n"); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, &tmp1); + tmp1 |= 0x1; + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1); + } + + return 0; +} + +static int bnx2x_8706_read_status(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + return bnx2x_8706_8726_read_status(phy, params, vars); +} + +/******************************************************************/ +/* BCM8726 PHY SECTION */ +/******************************************************************/ +static void bnx2x_8726_config_loopback(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "PMA/PMD ext_phy_loopback: 8726\n"); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0001); +} + +static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + /* Need to wait 100ms after reset */ + msleep(100); + + /* Micro controller re-boot */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x018B); + + /* Set soft reset */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_MISC_CTRL1, 0x0001); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); + + /* wait for 150ms for microcode load */ + msleep(150); + + /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_MISC_CTRL1, 0x0000); + + msleep(200); + bnx2x_save_bcm_spirom_ver(bp, phy, params->port); +} + +static u8 bnx2x_8726_read_status(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u16 val1; + u8 link_up = bnx2x_8706_8726_read_status(phy, params, vars); + if (link_up) { + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, + &val1); + if (val1 & (1<<15)) { + DP(NETIF_MSG_LINK, "Tx is disabled\n"); + link_up = 0; + vars->line_speed = 0; + } + } + return link_up; +} + + +static int bnx2x_8726_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "Initializing BCM8726\n"); + + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); + bnx2x_wait_reset_complete(bp, phy, params); + + bnx2x_8726_external_rom_boot(phy, params); + + /* + * Need to call module detected on initialization since the module + * detection triggered by actual module insertion might occur before + * driver is loaded, and when driver is loaded, it reset all + * registers, including the transmitter + */ + bnx2x_sfp_module_detection(phy, params); + + if (phy->req_line_speed == SPEED_1000) { + DP(NETIF_MSG_LINK, "Setting 1G force\n"); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x5); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, + 0x400); + } else if ((phy->req_line_speed == SPEED_AUTO_NEG) && + (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) && + ((phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) != + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { + DP(NETIF_MSG_LINK, "Setting 1G clause37\n"); + /* Set Flow control */ + bnx2x_ext_phy_set_pause(params, phy, vars); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_ADV, 0x20); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, 0x0020); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); + /* + * Enable RX-ALARM control to receive interrupt for 1G speed + * change + */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x4); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, + 0x400); + + } else { /* Default 10G. Set only LASI control */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 1); + } + + /* Set TX PreEmphasis if needed */ + if ((params->feature_config_flags & + FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) { + DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x," + "TX_CTRL2 0x%x\n", + phy->tx_preemphasis[0], + phy->tx_preemphasis[1]); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8726_TX_CTRL1, + phy->tx_preemphasis[0]); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8726_TX_CTRL2, + phy->tx_preemphasis[1]); + } + + return 0; + +} + +static void bnx2x_8726_link_reset(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "bnx2x_8726_link_reset port %d\n", params->port); + /* Set serial boot control for external load */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, 0x0001); +} + +/******************************************************************/ +/* BCM8727 PHY SECTION */ +/******************************************************************/ + +static void bnx2x_8727_set_link_led(struct bnx2x_phy *phy, + struct link_params *params, u8 mode) +{ + struct bnx2x *bp = params->bp; + u16 led_mode_bitmask = 0; + u16 gpio_pins_bitmask = 0; + u16 val; + /* Only NOC flavor requires to set the LED specifically */ + if (!(phy->flags & FLAGS_NOC)) + return; + switch (mode) { + case LED_MODE_FRONT_PANEL_OFF: + case LED_MODE_OFF: + led_mode_bitmask = 0; + gpio_pins_bitmask = 0x03; + break; + case LED_MODE_ON: + led_mode_bitmask = 0; + gpio_pins_bitmask = 0x02; + break; + case LED_MODE_OPER: + led_mode_bitmask = 0x60; + gpio_pins_bitmask = 0x11; + break; + } + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_OPT_CTRL, + &val); + val &= 0xff8f; + val |= led_mode_bitmask; + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_OPT_CTRL, + val); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_GPIO_CTRL, + &val); + val &= 0xffe0; + val |= gpio_pins_bitmask; + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_GPIO_CTRL, + val); +} +static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy, + struct link_params *params) { + u32 swap_val, swap_override; + u8 port; + /* + * The PHY reset is controlled by GPIO 1. Fake the port number + * to cancel the swap done in set_gpio() + */ + struct bnx2x *bp = params->bp; + swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); + swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); + port = (swap_val && swap_override) ^ 1; + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_LOW, port); +} + +static int bnx2x_8727_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + u32 tx_en_mode; + u16 tmp1, val, mod_abs, tmp2; + u16 rx_alarm_ctrl_val; + u16 lasi_ctrl_val; + struct bnx2x *bp = params->bp; + /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */ + + bnx2x_wait_reset_complete(bp, phy, params); + rx_alarm_ctrl_val = (1<<2) | (1<<5) ; + /* Should be 0x6 to enable XS on Tx side. */ + lasi_ctrl_val = 0x0006; + + DP(NETIF_MSG_LINK, "Initializing BCM8727\n"); + /* enable LASI */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, + rx_alarm_ctrl_val); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL, + 0); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, lasi_ctrl_val); + + /* + * Initially configure MOD_ABS to interrupt when module is + * presence( bit 8) + */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); + /* + * Set EDC off by setting OPTXLOS signal input to low (bit 9). + * When the EDC is off it locks onto a reference clock and avoids + * becoming 'lost' + */ + mod_abs &= ~(1<<8); + if (!(phy->flags & FLAGS_NOC)) + mod_abs &= ~(1<<9); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); + + + /* Enable/Disable PHY transmitter output */ + bnx2x_set_disable_pmd_transmit(params, phy, 0); + + /* Make MOD_ABS give interrupt on change */ + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, + &val); + val |= (1<<12); + if (phy->flags & FLAGS_NOC) + val |= (3<<5); + + /* + * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0 + * status which reflect SFP+ module over-current + */ + if (!(phy->flags & FLAGS_NOC)) + val &= 0xff8f; /* Reset bits 4-6 */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, val); + + bnx2x_8727_power_module(bp, phy, 1); + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1); + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1); + + /* Set option 1G speed */ + if (phy->req_line_speed == SPEED_1000) { + DP(NETIF_MSG_LINK, "Setting 1G force\n"); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1); + DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1); + /* + * Power down the XAUI until link is up in case of dual-media + * and 1G + */ + if (DUAL_MEDIA(params)) { + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_GP, &val); + val |= (3<<10); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_GP, val); + } + } else if ((phy->req_line_speed == SPEED_AUTO_NEG) && + ((phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) && + ((phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) != + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { + + DP(NETIF_MSG_LINK, "Setting 1G clause37\n"); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300); + } else { + /* + * Since the 8727 has only single reset pin, need to set the 10G + * registers although it is default + */ + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, + 0x0020); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, + 0x0008); + } + + /* + * Set 2-wire transfer rate of SFP+ module EEPROM + * to 100Khz since some DACs(direct attached cables) do + * not work at 400Khz. + */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR, + 0xa001); + + /* Set TX PreEmphasis if needed */ + if ((params->feature_config_flags & + FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) { + DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x\n", + phy->tx_preemphasis[0], + phy->tx_preemphasis[1]); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL1, + phy->tx_preemphasis[0]); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL2, + phy->tx_preemphasis[1]); + } + + /* + * If TX Laser is controlled by GPIO_0, do not let PHY go into low + * power mode, if TX Laser is disabled + */ + tx_en_mode = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].sfp_ctrl)) + & PORT_HW_CFG_TX_LASER_MASK; + + if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) { + + DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n"); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, &tmp2); + tmp2 |= 0x1000; + tmp2 &= 0xFFEF; + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2); + } + + return 0; +} + +static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u16 mod_abs, rx_alarm_status; + u32 val = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_feature_config[params->port]. + config)); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); + if (mod_abs & (1<<8)) { + + /* Module is absent */ + DP(NETIF_MSG_LINK, "MOD_ABS indication " + "show module is absent\n"); + phy->media_type = ETH_PHY_NOT_PRESENT; + /* + * 1. Set mod_abs to detect next module + * presence event + * 2. Set EDC off by setting OPTXLOS signal input to low + * (bit 9). + * When the EDC is off it locks onto a reference clock and + * avoids becoming 'lost'. + */ + mod_abs &= ~(1<<8); + if (!(phy->flags & FLAGS_NOC)) + mod_abs &= ~(1<<9); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); + + /* + * Clear RX alarm since it stays up as long as + * the mod_abs wasn't changed + */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_LASI_RXSTAT, &rx_alarm_status); + + } else { + /* Module is present */ + DP(NETIF_MSG_LINK, "MOD_ABS indication " + "show module is present\n"); + /* + * First disable transmitter, and if the module is ok, the + * module_detection will enable it + * 1. Set mod_abs to detect next module absent event ( bit 8) + * 2. Restore the default polarity of the OPRXLOS signal and + * this signal will then correctly indicate the presence or + * absence of the Rx signal. (bit 9) + */ + mod_abs |= (1<<8); + if (!(phy->flags & FLAGS_NOC)) + mod_abs |= (1<<9); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); + + /* + * Clear RX alarm since it stays up as long as the mod_abs + * wasn't changed. This is need to be done before calling the + * module detection, otherwise it will clear* the link update + * alarm + */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_LASI_RXSTAT, &rx_alarm_status); + + + if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == + PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) + bnx2x_sfp_set_transmitter(params, phy, 0); + + if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) + bnx2x_sfp_module_detection(phy, params); + else + DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n"); + } + + DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", + rx_alarm_status); + /* No need to check link status in case of module plugged in/out */ +} + +static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) + +{ + struct bnx2x *bp = params->bp; + u8 link_up = 0, oc_port = params->port; + u16 link_status = 0; + u16 rx_alarm_status, lasi_ctrl, val1; + + /* If PHY is not initialized, do not check link status */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, + &lasi_ctrl); + if (!lasi_ctrl) + return 0; + + /* Check the LASI on Rx */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, + &rx_alarm_status); + vars->line_speed = 0; + DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", rx_alarm_status); + + bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT, + MDIO_PMA_LASI_TXCTRL); + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); + + DP(NETIF_MSG_LINK, "8727 LASI status 0x%x\n", val1); + + /* Clear MSG-OUT */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1); + + /* + * If a module is present and there is need to check + * for over current + */ + if (!(phy->flags & FLAGS_NOC) && !(rx_alarm_status & (1<<5))) { + /* Check over-current using 8727 GPIO0 input*/ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_GPIO_CTRL, + &val1); + + if ((val1 & (1<<8)) == 0) { + if (!CHIP_IS_E1x(bp)) + oc_port = BP_PATH(bp) + (params->port << 1); + DP(NETIF_MSG_LINK, "8727 Power fault has been detected" + " on port %d\n", oc_port); + netdev_err(bp->dev, "Error: Power fault on Port %d has" + " been detected and the power to " + "that SFP+ module has been removed" + " to prevent failure of the card." + " Please remove the SFP+ module and" + " restart the system to clear this" + " error.\n", + oc_port); + /* Disable all RX_ALARMs except for mod_abs */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_LASI_RXCTRL, (1<<5)); + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, &val1); + /* Wait for module_absent_event */ + val1 |= (1<<8); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, val1); + /* Clear RX alarm */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_LASI_RXSTAT, &rx_alarm_status); + return 0; + } + } /* Over current check */ + + /* When module absent bit is set, check module */ + if (rx_alarm_status & (1<<5)) { + bnx2x_8727_handle_mod_abs(phy, params); + /* Enable all mod_abs and link detection bits */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, + ((1<<5) | (1<<2))); + } + DP(NETIF_MSG_LINK, "Enabling 8727 TX laser if SFP is approved\n"); + bnx2x_8727_specific_func(phy, params, ENABLE_TX); + /* If transmitter is disabled, ignore false link up indication */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &val1); + if (val1 & (1<<15)) { + DP(NETIF_MSG_LINK, "Tx is disabled\n"); + return 0; + } + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status); + + /* + * Bits 0..2 --> speed detected, + * Bits 13..15--> link is down + */ + if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) { + link_up = 1; + vars->line_speed = SPEED_10000; + DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n", + params->port); + } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) { + link_up = 1; + vars->line_speed = SPEED_1000; + DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n", + params->port); + } else { + link_up = 0; + DP(NETIF_MSG_LINK, "port %x: External link is down\n", + params->port); + } + + /* Capture 10G link fault. */ + if (vars->line_speed == SPEED_10000) { + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, + MDIO_PMA_LASI_TXSTAT, &val1); + + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, + MDIO_PMA_LASI_TXSTAT, &val1); + + if (val1 & (1<<0)) { + vars->fault_detected = 1; + } + } + + if (link_up) { + bnx2x_ext_phy_resolve_fc(phy, params, vars); + vars->duplex = DUPLEX_FULL; + DP(NETIF_MSG_LINK, "duplex = 0x%x\n", vars->duplex); + } + + if ((DUAL_MEDIA(params)) && + (phy->req_line_speed == SPEED_1000)) { + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_GP, &val1); + /* + * In case of dual-media board and 1G, power up the XAUI side, + * otherwise power it down. For 10G it is done automatically + */ + if (link_up) + val1 &= ~(3<<10); + else + val1 |= (3<<10); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_GP, val1); + } + return link_up; +} + +static void bnx2x_8727_link_reset(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + + /* Enable/Disable PHY transmitter output */ + bnx2x_set_disable_pmd_transmit(params, phy, 1); + + /* Disable Transmitter */ + bnx2x_sfp_set_transmitter(params, phy, 0); + /* Clear LASI */ + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0); + +} + +/******************************************************************/ +/* BCM8481/BCM84823/BCM84833 PHY SECTION */ +/******************************************************************/ +static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, + struct link_params *params) +{ + u16 val, fw_ver1, fw_ver2, cnt; + u8 port; + struct bnx2x *bp = params->bp; + + port = params->port; + + /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/ + /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009); + + for (cnt = 0; cnt < 100; cnt++) { + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); + if (val & 1) + break; + udelay(5); + } + if (cnt == 100) { + DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(1)\n"); + bnx2x_save_spirom_version(bp, port, 0, + phy->ver_addr); + return; + } + + + /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */ + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A); + for (cnt = 0; cnt < 100; cnt++) { + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); + if (val & 1) + break; + udelay(5); + } + if (cnt == 100) { + DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(2)\n"); + bnx2x_save_spirom_version(bp, port, 0, + phy->ver_addr); + return; + } + + /* lower 16 bits of the register SPI_FW_STATUS */ + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1); + /* upper 16 bits of register SPI_FW_STATUS */ + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2); + + bnx2x_save_spirom_version(bp, port, (fw_ver2<<16) | fw_ver1, + phy->ver_addr); +} + +static void bnx2x_848xx_set_led(struct bnx2x *bp, + struct bnx2x_phy *phy) +{ + u16 val; + + /* PHYC_CTL_LED_CTL */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, &val); + val &= 0xFE00; + val |= 0x0092; + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, val); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + 0x80); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED2_MASK, + 0x18); + + /* Select activity source by Tx and Rx, as suggested by PHY AE */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_MASK, + 0x0006); + + /* Select the closest activity blink rate to that in 10/100/1000 */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_BLINK, + 0); + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val); + val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/ + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_84823_CTL_LED_CTL_1, val); + + /* 'Interrupt Mask' */ + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, + 0xFFFB, 0xFFFD); +} + +static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u16 autoneg_val, an_1000_val, an_10_100_val; + u16 tmp_req_line_speed; + + tmp_req_line_speed = phy->req_line_speed; + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) + if (phy->req_line_speed == SPEED_10000) + phy->req_line_speed = SPEED_AUTO_NEG; + + /* + * This phy uses the NIG latch mechanism since link indication + * arrives through its LED4 and not via its LASI signal, so we + * get steady signal instead of clear on read + */ + bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4, + 1 << NIG_LATCH_BC_ENABLE_MI_INT); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000); + + bnx2x_848xx_set_led(bp, phy); + + /* set 1000 speed advertisement */ + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL, + &an_1000_val); + + bnx2x_ext_phy_set_pause(params, phy, vars); + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_LEGACY_AN_ADV, + &an_10_100_val); + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_MII_CTRL, + &autoneg_val); + /* Disable forced speed */ + autoneg_val &= ~((1<<6) | (1<<8) | (1<<9) | (1<<12) | (1<<13)); + an_10_100_val &= ~((1<<5) | (1<<6) | (1<<7) | (1<<8)); + + if (((phy->req_line_speed == SPEED_AUTO_NEG) && + (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || + (phy->req_line_speed == SPEED_1000)) { + an_1000_val |= (1<<8); + autoneg_val |= (1<<9 | 1<<12); + if (phy->req_duplex == DUPLEX_FULL) + an_1000_val |= (1<<9); + DP(NETIF_MSG_LINK, "Advertising 1G\n"); + } else + an_1000_val &= ~((1<<8) | (1<<9)); + + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL, + an_1000_val); + + /* set 100 speed advertisement */ + if (((phy->req_line_speed == SPEED_AUTO_NEG) && + (phy->speed_cap_mask & + (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) && + (phy->supported & + (SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full)))) { + an_10_100_val |= (1<<7); + /* Enable autoneg and restart autoneg for legacy speeds */ + autoneg_val |= (1<<9 | 1<<12); + + if (phy->req_duplex == DUPLEX_FULL) + an_10_100_val |= (1<<8); + DP(NETIF_MSG_LINK, "Advertising 100M\n"); + } + /* set 10 speed advertisement */ + if (((phy->req_line_speed == SPEED_AUTO_NEG) && + (phy->speed_cap_mask & + (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) && + (phy->supported & + (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full)))) { + an_10_100_val |= (1<<5); + autoneg_val |= (1<<9 | 1<<12); + if (phy->req_duplex == DUPLEX_FULL) + an_10_100_val |= (1<<6); + DP(NETIF_MSG_LINK, "Advertising 10M\n"); + } + + /* Only 10/100 are allowed to work in FORCE mode */ + if ((phy->req_line_speed == SPEED_100) && + (phy->supported & + (SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full))) { + autoneg_val |= (1<<13); + /* Enabled AUTO-MDIX when autoneg is disabled */ + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL, + (1<<15 | 1<<9 | 7<<0)); + DP(NETIF_MSG_LINK, "Setting 100M force\n"); + } + if ((phy->req_line_speed == SPEED_10) && + (phy->supported & + (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full))) { + /* Enabled AUTO-MDIX when autoneg is disabled */ + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL, + (1<<15 | 1<<9 | 7<<0)); + DP(NETIF_MSG_LINK, "Setting 10M force\n"); + } + + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_AN_ADV, + an_10_100_val); + + if (phy->req_duplex == DUPLEX_FULL) + autoneg_val |= (1<<8); + + /* + * Always write this if this is not 84833. + * For 84833, write it only when it's a forced speed. + */ + if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || + ((autoneg_val & (1<<12)) == 0)) + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val); + + if (((phy->req_line_speed == SPEED_AUTO_NEG) && + (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) || + (phy->req_line_speed == SPEED_10000)) { + DP(NETIF_MSG_LINK, "Advertising 10G\n"); + /* Restart autoneg for 10G*/ + + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, + 0x3200); + } else + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_10GBASE_T_AN_CTRL, + 1); + + /* Save spirom version */ + bnx2x_save_848xx_spirom_version(phy, params); + + phy->req_line_speed = tmp_req_line_speed; + + return 0; +} + +static int bnx2x_8481_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + /* Restore normal power mode*/ + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); + + /* HW reset */ + bnx2x_ext_phy_hw_reset(bp, params->port); + bnx2x_wait_reset_complete(bp, phy, params); + + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); + return bnx2x_848xx_cmn_config_init(phy, params, vars); +} + + +#define PHY84833_HDSHK_WAIT 300 +static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + u32 idx; + u32 pair_swap; + u16 val; + u16 data; + struct bnx2x *bp = params->bp; + /* Do pair swap */ + + /* Check for configuration. */ + pair_swap = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].xgbt_phy_cfg)) & + PORT_HW_CFG_RJ45_PAIR_SWAP_MASK; + + if (pair_swap == 0) + return 0; + + data = (u16)pair_swap; + + /* Write CMD_OPEN_OVERRIDE to STATUS reg */ + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_SCRATCH_REG2, + PHY84833_CMD_OPEN_OVERRIDE); + for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) { + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_SCRATCH_REG2, &val); + if (val == PHY84833_CMD_OPEN_FOR_CMDS) + break; + msleep(1); + } + if (idx >= PHY84833_HDSHK_WAIT) { + DP(NETIF_MSG_LINK, "Pairswap: FW not ready.\n"); + return -EINVAL; + } + + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_SCRATCH_REG4, + data); + /* Issue pair swap command */ + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_SCRATCH_REG0, + PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE); + for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) { + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_SCRATCH_REG2, &val); + if ((val == PHY84833_CMD_COMPLETE_PASS) || + (val == PHY84833_CMD_COMPLETE_ERROR)) + break; + msleep(1); + } + if ((idx >= PHY84833_HDSHK_WAIT) || + (val == PHY84833_CMD_COMPLETE_ERROR)) { + DP(NETIF_MSG_LINK, "Pairswap: override failed.\n"); + return -EINVAL; + } + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_SCRATCH_REG2, + PHY84833_CMD_CLEAR_COMPLETE); + DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data); + return 0; +} + + +static u8 bnx2x_84833_get_reset_gpios(struct bnx2x *bp, + u32 shmem_base_path[], + u32 chip_id) +{ + u32 reset_pin[2]; + u32 idx; + u8 reset_gpios; + if (CHIP_IS_E3(bp)) { + /* Assume that these will be GPIOs, not EPIOs. */ + for (idx = 0; idx < 2; idx++) { + /* Map config param to register bit. */ + reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] + + offsetof(struct shmem_region, + dev_info.port_hw_config[0].e3_cmn_pin_cfg)); + reset_pin[idx] = (reset_pin[idx] & + PORT_HW_CFG_E3_PHY_RESET_MASK) >> + PORT_HW_CFG_E3_PHY_RESET_SHIFT; + reset_pin[idx] -= PIN_CFG_GPIO0_P0; + reset_pin[idx] = (1 << reset_pin[idx]); + } + reset_gpios = (u8)(reset_pin[0] | reset_pin[1]); + } else { + /* E2, look from diff place of shmem. */ + for (idx = 0; idx < 2; idx++) { + reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] + + offsetof(struct shmem_region, + dev_info.port_hw_config[0].default_cfg)); + reset_pin[idx] &= PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK; + reset_pin[idx] -= PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0; + reset_pin[idx] >>= PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT; + reset_pin[idx] = (1 << reset_pin[idx]); + } + reset_gpios = (u8)(reset_pin[0] | reset_pin[1]); + } + + return reset_gpios; +} + +static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u8 reset_gpios; + u32 other_shmem_base_addr = REG_RD(bp, params->shmem2_base + + offsetof(struct shmem2_region, + other_shmem_base_addr)); + + u32 shmem_base_path[2]; + shmem_base_path[0] = params->shmem_base; + shmem_base_path[1] = other_shmem_base_addr; + + reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, + params->chip_id); + + bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW); + udelay(10); + DP(NETIF_MSG_LINK, "84833 hw reset on pin values 0x%x\n", + reset_gpios); + + return 0; +} + +static int bnx2x_84833_common_init_phy(struct bnx2x *bp, + u32 shmem_base_path[], + u32 chip_id) +{ + u8 reset_gpios; + + reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id); + + bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW); + udelay(10); + bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH); + msleep(800); + DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n", + reset_gpios); + + return 0; +} + +#define PHY84833_CONSTANT_LATENCY 1193 +static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u8 port, initialize = 1; + u16 val; + u16 temp; + u32 actual_phy_selection, cms_enable, idx; + int rc = 0; + + msleep(1); + + if (!(CHIP_IS_E1(bp))) + port = BP_PATH(bp); + else + port = params->port; + + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) { + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, + port); + } else { + /* MDIO reset */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_CTRL, 0x8000); + /* Bring PHY out of super isolate mode */ + bnx2x_cl45_read(bp, phy, + MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val); + val &= ~MDIO_84833_SUPER_ISOLATE; + bnx2x_cl45_write(bp, phy, + MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_XGPHY_STRAP1, val); + } + + bnx2x_wait_reset_complete(bp, phy, params); + + /* Wait for GPHY to come out of reset */ + msleep(50); + + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) + bnx2x_84833_pair_swap_cfg(phy, params, vars); + + /* + * BCM84823 requires that XGXS links up first @ 10G for normal behavior + */ + temp = vars->line_speed; + vars->line_speed = SPEED_10000; + bnx2x_set_autoneg(¶ms->phy[INT_PHY], params, vars, 0); + bnx2x_program_serdes(¶ms->phy[INT_PHY], params, vars); + vars->line_speed = temp; + + /* Set dual-media configuration according to configuration */ + + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_CTL_REG_84823_MEDIA, &val); + val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK | + MDIO_CTL_REG_84823_MEDIA_LINE_MASK | + MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN | + MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK | + MDIO_CTL_REG_84823_MEDIA_FIBER_1G); + + if (CHIP_IS_E3(bp)) { + val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK | + MDIO_CTL_REG_84823_MEDIA_LINE_MASK); + } else { + val |= (MDIO_CTL_REG_84823_CTRL_MAC_XFI | + MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L); + } + + actual_phy_selection = bnx2x_phy_selection(params); + + switch (actual_phy_selection) { + case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: + /* Do nothing. Essentially this is like the priority copper */ + break; + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: + val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER; + break; + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: + val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER; + break; + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: + /* Do nothing here. The first PHY won't be initialized at all */ + break; + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: + val |= MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN; + initialize = 0; + break; + } + if (params->phy[EXT_PHY2].req_line_speed == SPEED_1000) + val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G; + + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_CTL_REG_84823_MEDIA, val); + DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n", + params->multi_phy_config, val); + + /* AutogrEEEn */ + if (params->feature_config_flags & + FEATURE_CONFIG_AUTOGREEEN_ENABLED) { + /* Ensure that f/w is ready */ + for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) { + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_SCRATCH_REG2, &val); + if (val == PHY84833_CMD_OPEN_FOR_CMDS) + break; + usleep_range(1000, 1000); + } + if (idx >= PHY84833_HDSHK_WAIT) { + DP(NETIF_MSG_LINK, "AutogrEEEn: FW not ready.\n"); + return -EINVAL; + } + + /* Select EEE mode */ + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_SCRATCH_REG3, + 0x2); + + /* Set Idle and Latency */ + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_SCRATCH_REG4, + PHY84833_CONSTANT_LATENCY + 1); + + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_DATA3_REG, + PHY84833_CONSTANT_LATENCY + 1); + + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_DATA4_REG, + PHY84833_CONSTANT_LATENCY); + + /* Send EEE instruction to command register */ + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_SCRATCH_REG0, + PHY84833_DIAG_CMD_SET_EEE_MODE); + + /* Ensure that the command has completed */ + for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) { + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_SCRATCH_REG2, &val); + if ((val == PHY84833_CMD_COMPLETE_PASS) || + (val == PHY84833_CMD_COMPLETE_ERROR)) + break; + usleep_range(1000, 1000); + } + if ((idx >= PHY84833_HDSHK_WAIT) || + (val == PHY84833_CMD_COMPLETE_ERROR)) { + DP(NETIF_MSG_LINK, "AutogrEEEn: command failed.\n"); + return -EINVAL; + } + + /* Reset command handler */ + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_SCRATCH_REG2, + PHY84833_CMD_CLEAR_COMPLETE); + } + + if (initialize) + rc = bnx2x_848xx_cmn_config_init(phy, params, vars); + else + bnx2x_save_848xx_spirom_version(phy, params); + /* 84833 PHY has a better feature and doesn't need to support this. */ + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) { + cms_enable = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].default_cfg)) & + PORT_HW_CFG_ENABLE_CMS_MASK; + + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_CTL_REG_84823_USER_CTRL_REG, &val); + if (cms_enable) + val |= MDIO_CTL_REG_84823_USER_CTRL_CMS; + else + val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS; + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_CTL_REG_84823_USER_CTRL_REG, val); + } + + return rc; +} + +static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u16 val, val1, val2; + u8 link_up = 0; + + + /* Check 10G-BaseT link status */ + /* Check PMD signal ok */ + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, 0xFFFA, &val1); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL, + &val2); + DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2); + + /* Check link 10G */ + if (val2 & (1<<11)) { + vars->line_speed = SPEED_10000; + vars->duplex = DUPLEX_FULL; + link_up = 1; + bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); + } else { /* Check Legacy speed link */ + u16 legacy_status, legacy_speed; + + /* Enable expansion register 0x42 (Operation mode status) */ + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_EXPANSION_REG_ACCESS, 0xf42); + + /* Get legacy speed operation status */ + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_EXPANSION_REG_RD_RW, + &legacy_status); + + DP(NETIF_MSG_LINK, "Legacy speed status" + " = 0x%x\n", legacy_status); + link_up = ((legacy_status & (1<<11)) == (1<<11)); + if (link_up) { + legacy_speed = (legacy_status & (3<<9)); + if (legacy_speed == (0<<9)) + vars->line_speed = SPEED_10; + else if (legacy_speed == (1<<9)) + vars->line_speed = SPEED_100; + else if (legacy_speed == (2<<9)) + vars->line_speed = SPEED_1000; + else /* Should not happen */ + vars->line_speed = 0; + + if (legacy_status & (1<<8)) + vars->duplex = DUPLEX_FULL; + else + vars->duplex = DUPLEX_HALF; + + DP(NETIF_MSG_LINK, "Link is up in %dMbps," + " is_duplex_full= %d\n", vars->line_speed, + (vars->duplex == DUPLEX_FULL)); + /* Check legacy speed AN resolution */ + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_LEGACY_MII_STATUS, + &val); + if (val & (1<<5)) + vars->link_status |= + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_LEGACY_AN_EXPANSION, + &val); + if ((val & (1<<0)) == 0) + vars->link_status |= + LINK_STATUS_PARALLEL_DETECTION_USED; + } + } + if (link_up) { + DP(NETIF_MSG_LINK, "BCM84823: link speed is %d\n", + vars->line_speed); + bnx2x_ext_phy_resolve_fc(phy, params, vars); + } + + return link_up; +} + + +static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len) +{ + int status = 0; + u32 spirom_ver; + spirom_ver = ((raw_ver & 0xF80) >> 7) << 16 | (raw_ver & 0x7F); + status = bnx2x_format_ver(spirom_ver, str, len); + return status; +} + +static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy, + struct link_params *params) +{ + bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_LOW, 0); + bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_LOW, 1); +} + +static void bnx2x_8481_link_reset(struct bnx2x_phy *phy, + struct link_params *params) +{ + bnx2x_cl45_write(params->bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000); + bnx2x_cl45_write(params->bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1); +} + +static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u8 port; + u16 val16; + + if (!(CHIP_IS_E1(bp))) + port = BP_PATH(bp); + else + port = params->port; + + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) { + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, + MISC_REGISTERS_GPIO_OUTPUT_LOW, + port); + } else { + bnx2x_cl45_read(bp, phy, + MDIO_CTL_DEVAD, + 0x400f, &val16); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_CTRL, 0x800); + } +} + +static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, + struct link_params *params, u8 mode) +{ + struct bnx2x *bp = params->bp; + u16 val; + u8 port; + + if (!(CHIP_IS_E1(bp))) + port = BP_PATH(bp); + else + port = params->port; + + switch (mode) { + case LED_MODE_OFF: + + DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OFF\n", port); + + if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == + SHARED_HW_CFG_LED_EXTPHY1) { + + /* Set LED masks */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + 0x0); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED2_MASK, + 0x0); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_MASK, + 0x0); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED5_MASK, + 0x0); + + } else { + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + 0x0); + } + break; + case LED_MODE_FRONT_PANEL_OFF: + + DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE FRONT PANEL OFF\n", + port); + + if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == + SHARED_HW_CFG_LED_EXTPHY1) { + + /* Set LED masks */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + 0x0); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED2_MASK, + 0x0); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_MASK, + 0x0); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED5_MASK, + 0x20); + + } else { + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + 0x0); + } + break; + case LED_MODE_ON: + + DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE ON\n", port); + + if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == + SHARED_HW_CFG_LED_EXTPHY1) { + /* Set control reg */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, + &val); + val &= 0x8000; + val |= 0x2492; + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, + val); + + /* Set LED masks */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + 0x0); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED2_MASK, + 0x20); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_MASK, + 0x20); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED5_MASK, + 0x0); + } else { + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + 0x20); + } + break; + + case LED_MODE_OPER: + + DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OPER\n", port); + + if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == + SHARED_HW_CFG_LED_EXTPHY1) { + + /* Set control reg */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, + &val); + + if (!((val & + MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK) + >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)) { + DP(NETIF_MSG_LINK, "Setting LINK_SIGNAL\n"); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, + 0xa492); + } + + /* Set LED masks */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + 0x10); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED2_MASK, + 0x80); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_MASK, + 0x98); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED5_MASK, + 0x40); + + } else { + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + 0x80); + + /* Tell LED3 to blink on source */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, + &val); + val &= ~(7<<6); + val |= (1<<6); /* A83B[8:6]= 1 */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, + val); + } + break; + } + + /* + * This is a workaround for E3+84833 until autoneg + * restart is fixed in f/w + */ + if (CHIP_IS_E3(bp)) { + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_GP2_STATUS_GP_2_1, &val); + } +} + +/******************************************************************/ +/* 54618SE PHY SECTION */ +/******************************************************************/ +static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u8 port; + u16 autoneg_val, an_1000_val, an_10_100_val, fc_val, temp; + u32 cfg_pin; + + DP(NETIF_MSG_LINK, "54618SE cfg init\n"); + usleep_range(1000, 1000); + + /* This works with E3 only, no need to check the chip + before determining the port. */ + port = params->port; + + cfg_pin = (REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_cmn_pin_cfg)) & + PORT_HW_CFG_E3_PHY_RESET_MASK) >> + PORT_HW_CFG_E3_PHY_RESET_SHIFT; + + /* Drive pin high to bring the GPHY out of reset. */ + bnx2x_set_cfg_pin(bp, cfg_pin, 1); + + /* wait for GPHY to reset */ + msleep(50); + + /* reset phy */ + bnx2x_cl22_write(bp, phy, + MDIO_PMA_REG_CTRL, 0x8000); + bnx2x_wait_reset_complete(bp, phy, params); + + /*wait for GPHY to reset */ + msleep(50); + + /* Configure LED4: set to INTR (0x6). */ + /* Accessing shadow register 0xe. */ + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_LED_SEL2); + bnx2x_cl22_read(bp, phy, + MDIO_REG_GPHY_SHADOW, + &temp); + temp &= ~(0xf << 4); + temp |= (0x6 << 4); + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_WR_ENA | temp); + /* Configure INTR based on link status change. */ + bnx2x_cl22_write(bp, phy, + MDIO_REG_INTR_MASK, + ~MDIO_REG_INTR_MASK_LINK_STATUS); + + /* Flip the signal detect polarity (set 0x1c.0x1e[8]). */ + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_AUTO_DET_MED); + bnx2x_cl22_read(bp, phy, + MDIO_REG_GPHY_SHADOW, + &temp); + temp |= MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD; + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_WR_ENA | temp); + + /* Set up fc */ + /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ + bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); + fc_val = 0; + if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) + fc_val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; + + if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) + fc_val |= MDIO_AN_REG_ADV_PAUSE_PAUSE; + + /* read all advertisement */ + bnx2x_cl22_read(bp, phy, + 0x09, + &an_1000_val); + + bnx2x_cl22_read(bp, phy, + 0x04, + &an_10_100_val); + + bnx2x_cl22_read(bp, phy, + MDIO_PMA_REG_CTRL, + &autoneg_val); + + /* Disable forced speed */ + autoneg_val &= ~((1<<6) | (1<<8) | (1<<9) | (1<<12) | (1<<13)); + an_10_100_val &= ~((1<<5) | (1<<6) | (1<<7) | (1<<8) | (1<<10) | + (1<<11)); + + if (((phy->req_line_speed == SPEED_AUTO_NEG) && + (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || + (phy->req_line_speed == SPEED_1000)) { + an_1000_val |= (1<<8); + autoneg_val |= (1<<9 | 1<<12); + if (phy->req_duplex == DUPLEX_FULL) + an_1000_val |= (1<<9); + DP(NETIF_MSG_LINK, "Advertising 1G\n"); + } else + an_1000_val &= ~((1<<8) | (1<<9)); + + bnx2x_cl22_write(bp, phy, + 0x09, + an_1000_val); + bnx2x_cl22_read(bp, phy, + 0x09, + &an_1000_val); + + /* set 100 speed advertisement */ + if (((phy->req_line_speed == SPEED_AUTO_NEG) && + (phy->speed_cap_mask & + (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) { + an_10_100_val |= (1<<7); + /* Enable autoneg and restart autoneg for legacy speeds */ + autoneg_val |= (1<<9 | 1<<12); + + if (phy->req_duplex == DUPLEX_FULL) + an_10_100_val |= (1<<8); + DP(NETIF_MSG_LINK, "Advertising 100M\n"); + } + + /* set 10 speed advertisement */ + if (((phy->req_line_speed == SPEED_AUTO_NEG) && + (phy->speed_cap_mask & + (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) { + an_10_100_val |= (1<<5); + autoneg_val |= (1<<9 | 1<<12); + if (phy->req_duplex == DUPLEX_FULL) + an_10_100_val |= (1<<6); + DP(NETIF_MSG_LINK, "Advertising 10M\n"); + } + + /* Only 10/100 are allowed to work in FORCE mode */ + if (phy->req_line_speed == SPEED_100) { + autoneg_val |= (1<<13); + /* Enabled AUTO-MDIX when autoneg is disabled */ + bnx2x_cl22_write(bp, phy, + 0x18, + (1<<15 | 1<<9 | 7<<0)); + DP(NETIF_MSG_LINK, "Setting 100M force\n"); + } + if (phy->req_line_speed == SPEED_10) { + /* Enabled AUTO-MDIX when autoneg is disabled */ + bnx2x_cl22_write(bp, phy, + 0x18, + (1<<15 | 1<<9 | 7<<0)); + DP(NETIF_MSG_LINK, "Setting 10M force\n"); + } + + /* Check if we should turn on Auto-GrEEEn */ + bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &temp); + if (temp == MDIO_REG_GPHY_ID_54618SE) { + if (params->feature_config_flags & + FEATURE_CONFIG_AUTOGREEEN_ENABLED) { + temp = 6; + DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n"); + } else { + temp = 0; + DP(NETIF_MSG_LINK, "Disabling Auto-GrEEEn\n"); + } + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_CL45_ADDR_REG, MDIO_AN_DEVAD); + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_CL45_DATA_REG, + MDIO_REG_GPHY_EEE_ADV); + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_CL45_ADDR_REG, + (0x1 << 14) | MDIO_AN_DEVAD); + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_CL45_DATA_REG, + temp); + } + + bnx2x_cl22_write(bp, phy, + 0x04, + an_10_100_val | fc_val); + + if (phy->req_duplex == DUPLEX_FULL) + autoneg_val |= (1<<8); + + bnx2x_cl22_write(bp, phy, + MDIO_PMA_REG_CTRL, autoneg_val); + + return 0; +} + +static void bnx2x_54618se_set_link_led(struct bnx2x_phy *phy, + struct link_params *params, u8 mode) +{ + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "54618SE set link led (mode=%x)\n", mode); + switch (mode) { + case LED_MODE_FRONT_PANEL_OFF: + case LED_MODE_OFF: + case LED_MODE_OPER: + case LED_MODE_ON: + default: + break; + } + return; +} + +static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u32 cfg_pin; + u8 port; + + /* + * In case of no EPIO routed to reset the GPHY, put it + * in low power mode. + */ + bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, 0x800); + /* + * This works with E3 only, no need to check the chip + * before determining the port. + */ + port = params->port; + cfg_pin = (REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_cmn_pin_cfg)) & + PORT_HW_CFG_E3_PHY_RESET_MASK) >> + PORT_HW_CFG_E3_PHY_RESET_SHIFT; + + /* Drive pin low to put GPHY in reset. */ + bnx2x_set_cfg_pin(bp, cfg_pin, 0); +} + +static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u16 val; + u8 link_up = 0; + u16 legacy_status, legacy_speed; + + /* Get speed operation status */ + bnx2x_cl22_read(bp, phy, + 0x19, + &legacy_status); + DP(NETIF_MSG_LINK, "54618SE read_status: 0x%x\n", legacy_status); + + /* Read status to clear the PHY interrupt. */ + bnx2x_cl22_read(bp, phy, + MDIO_REG_INTR_STATUS, + &val); + + link_up = ((legacy_status & (1<<2)) == (1<<2)); + + if (link_up) { + legacy_speed = (legacy_status & (7<<8)); + if (legacy_speed == (7<<8)) { + vars->line_speed = SPEED_1000; + vars->duplex = DUPLEX_FULL; + } else if (legacy_speed == (6<<8)) { + vars->line_speed = SPEED_1000; + vars->duplex = DUPLEX_HALF; + } else if (legacy_speed == (5<<8)) { + vars->line_speed = SPEED_100; + vars->duplex = DUPLEX_FULL; + } + /* Omitting 100Base-T4 for now */ + else if (legacy_speed == (3<<8)) { + vars->line_speed = SPEED_100; + vars->duplex = DUPLEX_HALF; + } else if (legacy_speed == (2<<8)) { + vars->line_speed = SPEED_10; + vars->duplex = DUPLEX_FULL; + } else if (legacy_speed == (1<<8)) { + vars->line_speed = SPEED_10; + vars->duplex = DUPLEX_HALF; + } else /* Should not happen */ + vars->line_speed = 0; + + DP(NETIF_MSG_LINK, "Link is up in %dMbps," + " is_duplex_full= %d\n", vars->line_speed, + (vars->duplex == DUPLEX_FULL)); + + /* Check legacy speed AN resolution */ + bnx2x_cl22_read(bp, phy, + 0x01, + &val); + if (val & (1<<5)) + vars->link_status |= + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; + bnx2x_cl22_read(bp, phy, + 0x06, + &val); + if ((val & (1<<0)) == 0) + vars->link_status |= + LINK_STATUS_PARALLEL_DETECTION_USED; + + DP(NETIF_MSG_LINK, "BCM54618SE: link speed is %d\n", + vars->line_speed); + + /* Report whether EEE is resolved. */ + bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &val); + if (val == MDIO_REG_GPHY_ID_54618SE) { + if (vars->link_status & + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) + val = 0; + else { + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_CL45_ADDR_REG, + MDIO_AN_DEVAD); + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_CL45_DATA_REG, + MDIO_REG_GPHY_EEE_RESOLVED); + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_CL45_ADDR_REG, + (0x1 << 14) | MDIO_AN_DEVAD); + bnx2x_cl22_read(bp, phy, + MDIO_REG_GPHY_CL45_DATA_REG, + &val); + } + DP(NETIF_MSG_LINK, "EEE resolution: 0x%x\n", val); + } + + bnx2x_ext_phy_resolve_fc(phy, params, vars); + } + return link_up; +} + +static void bnx2x_54618se_config_loopback(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u16 val; + u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; + + DP(NETIF_MSG_LINK, "2PMA/PMD ext_phy_loopback: 54618se\n"); + + /* Enable master/slave manual mmode and set to master */ + /* mii write 9 [bits set 11 12] */ + bnx2x_cl22_write(bp, phy, 0x09, 3<<11); + + /* forced 1G and disable autoneg */ + /* set val [mii read 0] */ + /* set val [expr $val & [bits clear 6 12 13]] */ + /* set val [expr $val | [bits set 6 8]] */ + /* mii write 0 $val */ + bnx2x_cl22_read(bp, phy, 0x00, &val); + val &= ~((1<<6) | (1<<12) | (1<<13)); + val |= (1<<6) | (1<<8); + bnx2x_cl22_write(bp, phy, 0x00, val); + + /* Set external loopback and Tx using 6dB coding */ + /* mii write 0x18 7 */ + /* set val [mii read 0x18] */ + /* mii write 0x18 [expr $val | [bits set 10 15]] */ + bnx2x_cl22_write(bp, phy, 0x18, 7); + bnx2x_cl22_read(bp, phy, 0x18, &val); + bnx2x_cl22_write(bp, phy, 0x18, val | (1<<10) | (1<<15)); + + /* This register opens the gate for the UMAC despite its name */ + REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1); + + /* + * Maximum Frame Length (RW). Defines a 14-Bit maximum frame + * length used by the MAC receive logic to check frames. + */ + REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710); +} + +/******************************************************************/ +/* SFX7101 PHY SECTION */ +/******************************************************************/ +static void bnx2x_7101_config_loopback(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + /* SFX7101_XGXS_TEST1 */ + bnx2x_cl45_write(bp, phy, + MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100); +} + +static int bnx2x_7101_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + u16 fw_ver1, fw_ver2, val; + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "Setting the SFX7101 LASI indication\n"); + + /* Restore normal power mode*/ + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); + /* HW reset */ + bnx2x_ext_phy_hw_reset(bp, params->port); + bnx2x_wait_reset_complete(bp, phy, params); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x1); + DP(NETIF_MSG_LINK, "Setting the SFX7101 LED to blink on traffic\n"); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_7107_LED_CNTL, (1<<3)); + + bnx2x_ext_phy_set_pause(params, phy, vars); + /* Restart autoneg */ + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, &val); + val |= 0x200; + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, val); + + /* Save spirom version */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER1, &fw_ver1); + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER2, &fw_ver2); + bnx2x_save_spirom_version(bp, params->port, + (u32)(fw_ver1<<16 | fw_ver2), phy->ver_addr); + return 0; +} + +static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u8 link_up; + u16 val1, val2; + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); + DP(NETIF_MSG_LINK, "10G-base-T LASI status 0x%x->0x%x\n", + val2, val1); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1); + DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n", + val2, val1); + link_up = ((val1 & 4) == 4); + /* if link is up print the AN outcome of the SFX7101 PHY */ + if (link_up) { + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS, + &val2); + vars->line_speed = SPEED_10000; + vars->duplex = DUPLEX_FULL; + DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n", + val2, (val2 & (1<<14))); + bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); + bnx2x_ext_phy_resolve_fc(phy, params, vars); + } + return link_up; +} + +static int bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len) +{ + if (*len < 5) + return -EINVAL; + str[0] = (spirom_ver & 0xFF); + str[1] = (spirom_ver & 0xFF00) >> 8; + str[2] = (spirom_ver & 0xFF0000) >> 16; + str[3] = (spirom_ver & 0xFF000000) >> 24; + str[4] = '\0'; + *len -= 5; + return 0; +} + +void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy) +{ + u16 val, cnt; + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_7101_RESET, &val); + + for (cnt = 0; cnt < 10; cnt++) { + msleep(50); + /* Writes a self-clearing reset */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_7101_RESET, + (val | (1<<15))); + /* Wait for clear */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_7101_RESET, &val); + + if ((val & (1<<15)) == 0) + break; + } +} + +static void bnx2x_7101_hw_reset(struct bnx2x_phy *phy, + struct link_params *params) { + /* Low power mode is controlled by GPIO 2 */ + bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port); + /* The PHY reset is controlled by GPIO 1 */ + bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port); +} + +static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy, + struct link_params *params, u8 mode) +{ + u16 val = 0; + struct bnx2x *bp = params->bp; + switch (mode) { + case LED_MODE_FRONT_PANEL_OFF: + case LED_MODE_OFF: + val = 2; + break; + case LED_MODE_ON: + val = 1; + break; + case LED_MODE_OPER: + val = 0; + break; + } + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_7107_LINK_LED_CNTL, + val); +} + +/******************************************************************/ +/* STATIC PHY DECLARATION */ +/******************************************************************/ + +static struct bnx2x_phy phy_null = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN, + .addr = 0, + .def_md_devad = 0, + .flags = FLAGS_INIT_XGXS_FIRST, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = 0, + .media_type = ETH_PHY_NOT_PRESENT, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)NULL, + .read_status = (read_status_t)NULL, + .link_reset = (link_reset_t)NULL, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)NULL, + .hw_reset = (hw_reset_t)NULL, + .set_link_led = (set_link_led_t)NULL, + .phy_specific_func = (phy_specific_func_t)NULL +}; + +static struct bnx2x_phy phy_serdes = { + .type = PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT, + .addr = 0xff, + .def_md_devad = 0, + .flags = 0, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_2500baseX_Full | + SUPPORTED_TP | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)bnx2x_xgxs_config_init, + .read_status = (read_status_t)bnx2x_link_settings_status, + .link_reset = (link_reset_t)bnx2x_int_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)NULL, + .hw_reset = (hw_reset_t)NULL, + .set_link_led = (set_link_led_t)NULL, + .phy_specific_func = (phy_specific_func_t)NULL +}; + +static struct bnx2x_phy phy_xgxs = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, + .addr = 0xff, + .def_md_devad = 0, + .flags = 0, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_2500baseX_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_CX4, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)bnx2x_xgxs_config_init, + .read_status = (read_status_t)bnx2x_link_settings_status, + .link_reset = (link_reset_t)bnx2x_int_link_reset, + .config_loopback = (config_loopback_t)bnx2x_set_xgxs_loopback, + .format_fw_ver = (format_fw_ver_t)NULL, + .hw_reset = (hw_reset_t)NULL, + .set_link_led = (set_link_led_t)NULL, + .phy_specific_func = (phy_specific_func_t)NULL +}; +static struct bnx2x_phy phy_warpcore = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, + .addr = 0xff, + .def_md_devad = 0, + .flags = (FLAGS_HW_LOCK_REQUIRED | + FLAGS_TX_ERROR_CHECK), + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_20000baseKR2_Full | + SUPPORTED_20000baseMLD2_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_UNSPECIFIED, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + /* req_duplex = */0, + /* rsrv = */0, + .config_init = (config_init_t)bnx2x_warpcore_config_init, + .read_status = (read_status_t)bnx2x_warpcore_read_status, + .link_reset = (link_reset_t)bnx2x_warpcore_link_reset, + .config_loopback = (config_loopback_t)bnx2x_set_warpcore_loopback, + .format_fw_ver = (format_fw_ver_t)NULL, + .hw_reset = (hw_reset_t)bnx2x_warpcore_hw_reset, + .set_link_led = (set_link_led_t)NULL, + .phy_specific_func = (phy_specific_func_t)NULL +}; + + +static struct bnx2x_phy phy_7101 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101, + .addr = 0xff, + .def_md_devad = 0, + .flags = FLAGS_FAN_FAILURE_DET_REQ, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_10000baseT_Full | + SUPPORTED_TP | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)bnx2x_7101_config_init, + .read_status = (read_status_t)bnx2x_7101_read_status, + .link_reset = (link_reset_t)bnx2x_common_ext_link_reset, + .config_loopback = (config_loopback_t)bnx2x_7101_config_loopback, + .format_fw_ver = (format_fw_ver_t)bnx2x_7101_format_ver, + .hw_reset = (hw_reset_t)bnx2x_7101_hw_reset, + .set_link_led = (set_link_led_t)bnx2x_7101_set_link_led, + .phy_specific_func = (phy_specific_func_t)NULL +}; +static struct bnx2x_phy phy_8073 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, + .addr = 0xff, + .def_md_devad = 0, + .flags = FLAGS_HW_LOCK_REQUIRED, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_10000baseT_Full | + SUPPORTED_2500baseX_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_KR, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)bnx2x_8073_config_init, + .read_status = (read_status_t)bnx2x_8073_read_status, + .link_reset = (link_reset_t)bnx2x_8073_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver, + .hw_reset = (hw_reset_t)NULL, + .set_link_led = (set_link_led_t)NULL, + .phy_specific_func = (phy_specific_func_t)NULL +}; +static struct bnx2x_phy phy_8705 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705, + .addr = 0xff, + .def_md_devad = 0, + .flags = FLAGS_INIT_XGXS_FIRST, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_10000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_XFP_FIBER, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)bnx2x_8705_config_init, + .read_status = (read_status_t)bnx2x_8705_read_status, + .link_reset = (link_reset_t)bnx2x_common_ext_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)bnx2x_null_format_ver, + .hw_reset = (hw_reset_t)NULL, + .set_link_led = (set_link_led_t)NULL, + .phy_specific_func = (phy_specific_func_t)NULL +}; +static struct bnx2x_phy phy_8706 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706, + .addr = 0xff, + .def_md_devad = 0, + .flags = (FLAGS_INIT_XGXS_FIRST | + FLAGS_TX_ERROR_CHECK), + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_SFP_FIBER, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)bnx2x_8706_config_init, + .read_status = (read_status_t)bnx2x_8706_read_status, + .link_reset = (link_reset_t)bnx2x_common_ext_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver, + .hw_reset = (hw_reset_t)NULL, + .set_link_led = (set_link_led_t)NULL, + .phy_specific_func = (phy_specific_func_t)NULL +}; + +static struct bnx2x_phy phy_8726 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726, + .addr = 0xff, + .def_md_devad = 0, + .flags = (FLAGS_HW_LOCK_REQUIRED | + FLAGS_INIT_XGXS_FIRST | + FLAGS_TX_ERROR_CHECK), + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_FIBRE | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_NOT_PRESENT, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)bnx2x_8726_config_init, + .read_status = (read_status_t)bnx2x_8726_read_status, + .link_reset = (link_reset_t)bnx2x_8726_link_reset, + .config_loopback = (config_loopback_t)bnx2x_8726_config_loopback, + .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver, + .hw_reset = (hw_reset_t)NULL, + .set_link_led = (set_link_led_t)NULL, + .phy_specific_func = (phy_specific_func_t)NULL +}; + +static struct bnx2x_phy phy_8727 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, + .addr = 0xff, + .def_md_devad = 0, + .flags = (FLAGS_FAN_FAILURE_DET_REQ | + FLAGS_TX_ERROR_CHECK), + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_NOT_PRESENT, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)bnx2x_8727_config_init, + .read_status = (read_status_t)bnx2x_8727_read_status, + .link_reset = (link_reset_t)bnx2x_8727_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver, + .hw_reset = (hw_reset_t)bnx2x_8727_hw_reset, + .set_link_led = (set_link_led_t)bnx2x_8727_set_link_led, + .phy_specific_func = (phy_specific_func_t)bnx2x_8727_specific_func +}; +static struct bnx2x_phy phy_8481 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, + .addr = 0xff, + .def_md_devad = 0, + .flags = FLAGS_FAN_FAILURE_DET_REQ | + FLAGS_REARM_LATCH_SIGNAL, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_TP | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)bnx2x_8481_config_init, + .read_status = (read_status_t)bnx2x_848xx_read_status, + .link_reset = (link_reset_t)bnx2x_8481_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, + .hw_reset = (hw_reset_t)bnx2x_8481_hw_reset, + .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, + .phy_specific_func = (phy_specific_func_t)NULL +}; + +static struct bnx2x_phy phy_84823 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823, + .addr = 0xff, + .def_md_devad = 0, + .flags = FLAGS_FAN_FAILURE_DET_REQ | + FLAGS_REARM_LATCH_SIGNAL, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_TP | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)bnx2x_848x3_config_init, + .read_status = (read_status_t)bnx2x_848xx_read_status, + .link_reset = (link_reset_t)bnx2x_848x3_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, + .hw_reset = (hw_reset_t)NULL, + .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, + .phy_specific_func = (phy_specific_func_t)NULL +}; + +static struct bnx2x_phy phy_84833 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833, + .addr = 0xff, + .def_md_devad = 0, + .flags = FLAGS_FAN_FAILURE_DET_REQ | + FLAGS_REARM_LATCH_SIGNAL, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_TP | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)bnx2x_848x3_config_init, + .read_status = (read_status_t)bnx2x_848xx_read_status, + .link_reset = (link_reset_t)bnx2x_848x3_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, + .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy, + .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, + .phy_specific_func = (phy_specific_func_t)NULL +}; + +static struct bnx2x_phy phy_54618se = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE, + .addr = 0xff, + .def_md_devad = 0, + .flags = FLAGS_INIT_XGXS_FIRST, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_TP | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + /* req_duplex = */0, + /* rsrv = */0, + .config_init = (config_init_t)bnx2x_54618se_config_init, + .read_status = (read_status_t)bnx2x_54618se_read_status, + .link_reset = (link_reset_t)bnx2x_54618se_link_reset, + .config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback, + .format_fw_ver = (format_fw_ver_t)NULL, + .hw_reset = (hw_reset_t)NULL, + .set_link_led = (set_link_led_t)bnx2x_54618se_set_link_led, + .phy_specific_func = (phy_specific_func_t)NULL +}; +/*****************************************************************/ +/* */ +/* Populate the phy according. Main function: bnx2x_populate_phy */ +/* */ +/*****************************************************************/ + +static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base, + struct bnx2x_phy *phy, u8 port, + u8 phy_index) +{ + /* Get the 4 lanes xgxs config rx and tx */ + u32 rx = 0, tx = 0, i; + for (i = 0; i < 2; i++) { + /* + * INT_PHY and EXT_PHY1 share the same value location in the + * shmem. When num_phys is greater than 1, than this value + * applies only to EXT_PHY1 + */ + if (phy_index == INT_PHY || phy_index == EXT_PHY1) { + rx = REG_RD(bp, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].xgxs_config_rx[i<<1])); + + tx = REG_RD(bp, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].xgxs_config_tx[i<<1])); + } else { + rx = REG_RD(bp, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].xgxs_config2_rx[i<<1])); + + tx = REG_RD(bp, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].xgxs_config2_rx[i<<1])); + } + + phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff); + phy->rx_preemphasis[(i << 1) + 1] = (rx & 0xffff); + + phy->tx_preemphasis[i << 1] = ((tx>>16) & 0xffff); + phy->tx_preemphasis[(i << 1) + 1] = (tx & 0xffff); + } +} + +static u32 bnx2x_get_ext_phy_config(struct bnx2x *bp, u32 shmem_base, + u8 phy_index, u8 port) +{ + u32 ext_phy_config = 0; + switch (phy_index) { + case EXT_PHY1: + ext_phy_config = REG_RD(bp, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].external_phy_config)); + break; + case EXT_PHY2: + ext_phy_config = REG_RD(bp, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].external_phy_config2)); + break; + default: + DP(NETIF_MSG_LINK, "Invalid phy_index %d\n", phy_index); + return -EINVAL; + } + + return ext_phy_config; +} +static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port, + struct bnx2x_phy *phy) +{ + u32 phy_addr; + u32 chip_id; + u32 switch_cfg = (REG_RD(bp, shmem_base + + offsetof(struct shmem_region, + dev_info.port_feature_config[port].link_config)) & + PORT_FEATURE_CONNECTED_SWITCH_MASK); + chip_id = REG_RD(bp, MISC_REG_CHIP_NUM) << 16; + DP(NETIF_MSG_LINK, ":chip_id = 0x%x\n", chip_id); + if (USES_WARPCORE(bp)) { + u32 serdes_net_if; + phy_addr = REG_RD(bp, + MISC_REG_WC0_CTRL_PHY_ADDR); + *phy = phy_warpcore; + if (REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR) == 0x3) + phy->flags |= FLAGS_4_PORT_MODE; + else + phy->flags &= ~FLAGS_4_PORT_MODE; + /* Check Dual mode */ + serdes_net_if = (REG_RD(bp, shmem_base + + offsetof(struct shmem_region, dev_info. + port_hw_config[port].default_cfg)) & + PORT_HW_CFG_NET_SERDES_IF_MASK); + /* + * Set the appropriate supported and flags indications per + * interface type of the chip + */ + switch (serdes_net_if) { + case PORT_HW_CFG_NET_SERDES_IF_SGMII: + phy->supported &= (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + phy->media_type = ETH_PHY_BASE_T; + break; + case PORT_HW_CFG_NET_SERDES_IF_XFI: + phy->media_type = ETH_PHY_XFP_FIBER; + break; + case PORT_HW_CFG_NET_SERDES_IF_SFI: + phy->supported &= (SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + phy->media_type = ETH_PHY_SFP_FIBER; + break; + case PORT_HW_CFG_NET_SERDES_IF_KR: + phy->media_type = ETH_PHY_KR; + phy->supported &= (SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + break; + case PORT_HW_CFG_NET_SERDES_IF_DXGXS: + phy->media_type = ETH_PHY_KR; + phy->flags |= FLAGS_WC_DUAL_MODE; + phy->supported &= (SUPPORTED_20000baseMLD2_Full | + SUPPORTED_FIBRE | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + break; + case PORT_HW_CFG_NET_SERDES_IF_KR2: + phy->media_type = ETH_PHY_KR; + phy->flags |= FLAGS_WC_DUAL_MODE; + phy->supported &= (SUPPORTED_20000baseKR2_Full | + SUPPORTED_FIBRE | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + break; + default: + DP(NETIF_MSG_LINK, "Unknown WC interface type 0x%x\n", + serdes_net_if); + break; + } + + /* + * Enable MDC/MDIO work-around for E3 A0 since free running MDC + * was not set as expected. For B0, ECO will be enabled so there + * won't be an issue there + */ + if (CHIP_REV(bp) == CHIP_REV_Ax) + phy->flags |= FLAGS_MDC_MDIO_WA; + else + phy->flags |= FLAGS_MDC_MDIO_WA_B0; + } else { + switch (switch_cfg) { + case SWITCH_CFG_1G: + phy_addr = REG_RD(bp, + NIG_REG_SERDES0_CTRL_PHY_ADDR + + port * 0x10); + *phy = phy_serdes; + break; + case SWITCH_CFG_10G: + phy_addr = REG_RD(bp, + NIG_REG_XGXS0_CTRL_PHY_ADDR + + port * 0x18); + *phy = phy_xgxs; + break; + default: + DP(NETIF_MSG_LINK, "Invalid switch_cfg\n"); + return -EINVAL; + } + } + phy->addr = (u8)phy_addr; + phy->mdio_ctrl = bnx2x_get_emac_base(bp, + SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH, + port); + if (CHIP_IS_E2(bp)) + phy->def_md_devad = E2_DEFAULT_PHY_DEV_ADDR; + else + phy->def_md_devad = DEFAULT_PHY_DEV_ADDR; + + DP(NETIF_MSG_LINK, "Internal phy port=%d, addr=0x%x, mdio_ctl=0x%x\n", + port, phy->addr, phy->mdio_ctrl); + + bnx2x_populate_preemphasis(bp, shmem_base, phy, port, INT_PHY); + return 0; +} + +static int bnx2x_populate_ext_phy(struct bnx2x *bp, + u8 phy_index, + u32 shmem_base, + u32 shmem2_base, + u8 port, + struct bnx2x_phy *phy) +{ + u32 ext_phy_config, phy_type, config2; + u32 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH; + ext_phy_config = bnx2x_get_ext_phy_config(bp, shmem_base, + phy_index, port); + phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config); + /* Select the phy type */ + switch (phy_type) { + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: + mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED; + *phy = phy_8073; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: + *phy = phy_8705; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: + *phy = phy_8706; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: + mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1; + *phy = phy_8726; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC: + /* BCM8727_NOC => BCM8727 no over current */ + mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1; + *phy = phy_8727; + phy->flags |= FLAGS_NOC; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: + mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1; + *phy = phy_8727; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481: + *phy = phy_8481; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823: + *phy = phy_84823; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833: + *phy = phy_84833; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE: + *phy = phy_54618se; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: + *phy = phy_7101; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: + *phy = phy_null; + return -EINVAL; + default: + *phy = phy_null; + return 0; + } + + phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config); + bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index); + + /* + * The shmem address of the phy version is located on different + * structures. In case this structure is too old, do not set + * the address + */ + config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region, + dev_info.shared_hw_config.config2)); + if (phy_index == EXT_PHY1) { + phy->ver_addr = shmem_base + offsetof(struct shmem_region, + port_mb[port].ext_phy_fw_version); + + /* Check specific mdc mdio settings */ + if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK) + mdc_mdio_access = config2 & + SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK; + } else { + u32 size = REG_RD(bp, shmem2_base); + + if (size > + offsetof(struct shmem2_region, ext_phy_fw_version2)) { + phy->ver_addr = shmem2_base + + offsetof(struct shmem2_region, + ext_phy_fw_version2[port]); + } + /* Check specific mdc mdio settings */ + if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK) + mdc_mdio_access = (config2 & + SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK) >> + (SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT - + SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT); + } + phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port); + + /* + * In case mdc/mdio_access of the external phy is different than the + * mdc/mdio access of the XGXS, a HW lock must be taken in each access + * to prevent one port interfere with another port's CL45 operations. + */ + if (mdc_mdio_access != SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH) + phy->flags |= FLAGS_HW_LOCK_REQUIRED; + DP(NETIF_MSG_LINK, "phy_type 0x%x port %d found in index %d\n", + phy_type, port, phy_index); + DP(NETIF_MSG_LINK, " addr=0x%x, mdio_ctl=0x%x\n", + phy->addr, phy->mdio_ctrl); + return 0; +} + +static int bnx2x_populate_phy(struct bnx2x *bp, u8 phy_index, u32 shmem_base, + u32 shmem2_base, u8 port, struct bnx2x_phy *phy) +{ + int status = 0; + phy->type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN; + if (phy_index == INT_PHY) + return bnx2x_populate_int_phy(bp, shmem_base, port, phy); + status = bnx2x_populate_ext_phy(bp, phy_index, shmem_base, shmem2_base, + port, phy); + return status; +} + +static void bnx2x_phy_def_cfg(struct link_params *params, + struct bnx2x_phy *phy, + u8 phy_index) +{ + struct bnx2x *bp = params->bp; + u32 link_config; + /* Populate the default phy configuration for MF mode */ + if (phy_index == EXT_PHY2) { + link_config = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_feature_config[params->port].link_config2)); + phy->speed_cap_mask = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info. + port_hw_config[params->port].speed_capability_mask2)); + } else { + link_config = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_feature_config[params->port].link_config)); + phy->speed_cap_mask = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info. + port_hw_config[params->port].speed_capability_mask)); + } + DP(NETIF_MSG_LINK, "Default config phy idx %x cfg 0x%x speed_cap_mask" + " 0x%x\n", phy_index, link_config, phy->speed_cap_mask); + + phy->req_duplex = DUPLEX_FULL; + switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { + case PORT_FEATURE_LINK_SPEED_10M_HALF: + phy->req_duplex = DUPLEX_HALF; + case PORT_FEATURE_LINK_SPEED_10M_FULL: + phy->req_line_speed = SPEED_10; + break; + case PORT_FEATURE_LINK_SPEED_100M_HALF: + phy->req_duplex = DUPLEX_HALF; + case PORT_FEATURE_LINK_SPEED_100M_FULL: + phy->req_line_speed = SPEED_100; + break; + case PORT_FEATURE_LINK_SPEED_1G: + phy->req_line_speed = SPEED_1000; + break; + case PORT_FEATURE_LINK_SPEED_2_5G: + phy->req_line_speed = SPEED_2500; + break; + case PORT_FEATURE_LINK_SPEED_10G_CX4: + phy->req_line_speed = SPEED_10000; + break; + default: + phy->req_line_speed = SPEED_AUTO_NEG; + break; + } + + switch (link_config & PORT_FEATURE_FLOW_CONTROL_MASK) { + case PORT_FEATURE_FLOW_CONTROL_AUTO: + phy->req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO; + break; + case PORT_FEATURE_FLOW_CONTROL_TX: + phy->req_flow_ctrl = BNX2X_FLOW_CTRL_TX; + break; + case PORT_FEATURE_FLOW_CONTROL_RX: + phy->req_flow_ctrl = BNX2X_FLOW_CTRL_RX; + break; + case PORT_FEATURE_FLOW_CONTROL_BOTH: + phy->req_flow_ctrl = BNX2X_FLOW_CTRL_BOTH; + break; + default: + phy->req_flow_ctrl = BNX2X_FLOW_CTRL_NONE; + break; + } +} + +u32 bnx2x_phy_selection(struct link_params *params) +{ + u32 phy_config_swapped, prio_cfg; + u32 return_cfg = PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT; + + phy_config_swapped = params->multi_phy_config & + PORT_HW_CFG_PHY_SWAPPED_ENABLED; + + prio_cfg = params->multi_phy_config & + PORT_HW_CFG_PHY_SELECTION_MASK; + + if (phy_config_swapped) { + switch (prio_cfg) { + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: + return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY; + break; + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: + return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY; + break; + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: + return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY; + break; + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: + return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY; + break; + } + } else + return_cfg = prio_cfg; + + return return_cfg; +} + + +int bnx2x_phy_probe(struct link_params *params) +{ + u8 phy_index, actual_phy_idx, link_cfg_idx; + u32 phy_config_swapped, sync_offset, media_types; + struct bnx2x *bp = params->bp; + struct bnx2x_phy *phy; + params->num_phys = 0; + DP(NETIF_MSG_LINK, "Begin phy probe\n"); + phy_config_swapped = params->multi_phy_config & + PORT_HW_CFG_PHY_SWAPPED_ENABLED; + + for (phy_index = INT_PHY; phy_index < MAX_PHYS; + phy_index++) { + link_cfg_idx = LINK_CONFIG_IDX(phy_index); + actual_phy_idx = phy_index; + if (phy_config_swapped) { + if (phy_index == EXT_PHY1) + actual_phy_idx = EXT_PHY2; + else if (phy_index == EXT_PHY2) + actual_phy_idx = EXT_PHY1; + } + DP(NETIF_MSG_LINK, "phy_config_swapped %x, phy_index %x," + " actual_phy_idx %x\n", phy_config_swapped, + phy_index, actual_phy_idx); + phy = ¶ms->phy[actual_phy_idx]; + if (bnx2x_populate_phy(bp, phy_index, params->shmem_base, + params->shmem2_base, params->port, + phy) != 0) { + params->num_phys = 0; + DP(NETIF_MSG_LINK, "phy probe failed in phy index %d\n", + phy_index); + for (phy_index = INT_PHY; + phy_index < MAX_PHYS; + phy_index++) + *phy = phy_null; + return -EINVAL; + } + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) + break; + + sync_offset = params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].media_type); + media_types = REG_RD(bp, sync_offset); + + /* + * Update media type for non-PMF sync only for the first time + * In case the media type changes afterwards, it will be updated + * using the update_status function + */ + if ((media_types & (PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK << + (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * + actual_phy_idx))) == 0) { + media_types |= ((phy->media_type & + PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) << + (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * + actual_phy_idx)); + } + REG_WR(bp, sync_offset, media_types); + + bnx2x_phy_def_cfg(params, phy, phy_index); + params->num_phys++; + } + + DP(NETIF_MSG_LINK, "End phy probe. #phys found %x\n", params->num_phys); + return 0; +} + +void bnx2x_init_bmac_loopback(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + vars->link_up = 1; + vars->line_speed = SPEED_10000; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + vars->mac_type = MAC_TYPE_BMAC; + + vars->phy_flags = PHY_XGXS_FLAG; + + bnx2x_xgxs_deassert(params); + + /* set bmac loopback */ + bnx2x_bmac_enable(params, vars, 1); + + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); +} + +void bnx2x_init_emac_loopback(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + vars->link_up = 1; + vars->line_speed = SPEED_1000; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + vars->mac_type = MAC_TYPE_EMAC; + + vars->phy_flags = PHY_XGXS_FLAG; + + bnx2x_xgxs_deassert(params); + /* set bmac loopback */ + bnx2x_emac_enable(params, vars, 1); + bnx2x_emac_program(params, vars); + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); +} + +void bnx2x_init_xmac_loopback(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + vars->link_up = 1; + if (!params->req_line_speed[0]) + vars->line_speed = SPEED_10000; + else + vars->line_speed = params->req_line_speed[0]; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + vars->mac_type = MAC_TYPE_XMAC; + vars->phy_flags = PHY_XGXS_FLAG; + /* + * Set WC to loopback mode since link is required to provide clock + * to the XMAC in 20G mode + */ + bnx2x_set_aer_mmd(params, ¶ms->phy[0]); + bnx2x_warpcore_reset_lane(bp, ¶ms->phy[0], 0); + params->phy[INT_PHY].config_loopback( + ¶ms->phy[INT_PHY], + params); + + bnx2x_xmac_enable(params, vars, 1); + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); +} + +void bnx2x_init_umac_loopback(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + vars->link_up = 1; + vars->line_speed = SPEED_1000; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + vars->mac_type = MAC_TYPE_UMAC; + vars->phy_flags = PHY_XGXS_FLAG; + bnx2x_umac_enable(params, vars, 1); + + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); +} + +void bnx2x_init_xgxs_loopback(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + vars->link_up = 1; + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + vars->duplex = DUPLEX_FULL; + if (params->req_line_speed[0] == SPEED_1000) + vars->line_speed = SPEED_1000; + else + vars->line_speed = SPEED_10000; + + if (!USES_WARPCORE(bp)) + bnx2x_xgxs_deassert(params); + bnx2x_link_initialize(params, vars); + + if (params->req_line_speed[0] == SPEED_1000) { + if (USES_WARPCORE(bp)) + bnx2x_umac_enable(params, vars, 0); + else { + bnx2x_emac_program(params, vars); + bnx2x_emac_enable(params, vars, 0); + } + } else { + if (USES_WARPCORE(bp)) + bnx2x_xmac_enable(params, vars, 0); + else + bnx2x_bmac_enable(params, vars, 0); + } + + if (params->loopback_mode == LOOPBACK_XGXS) { + /* set 10G XGXS loopback */ + params->phy[INT_PHY].config_loopback( + ¶ms->phy[INT_PHY], + params); + + } else { + /* set external phy loopback */ + u8 phy_index; + for (phy_index = EXT_PHY1; + phy_index < params->num_phys; phy_index++) { + if (params->phy[phy_index].config_loopback) + params->phy[phy_index].config_loopback( + ¶ms->phy[phy_index], + params); + } + } + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); + + bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed); +} + +int bnx2x_phy_init(struct link_params *params, struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "Phy Initialization started\n"); + DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n", + params->req_line_speed[0], params->req_flow_ctrl[0]); + DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n", + params->req_line_speed[1], params->req_flow_ctrl[1]); + vars->link_status = 0; + vars->phy_link_up = 0; + vars->link_up = 0; + vars->line_speed = 0; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + vars->mac_type = MAC_TYPE_NONE; + vars->phy_flags = 0; + + /* disable attentions */ + bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, + (NIG_MASK_XGXS0_LINK_STATUS | + NIG_MASK_XGXS0_LINK10G | + NIG_MASK_SERDES0_LINK_STATUS | + NIG_MASK_MI_INT)); + + bnx2x_emac_init(params, vars); + + if (params->num_phys == 0) { + DP(NETIF_MSG_LINK, "No phy found for initialization !!\n"); + return -EINVAL; + } + set_phy_vars(params, vars); + + DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys); + switch (params->loopback_mode) { + case LOOPBACK_BMAC: + bnx2x_init_bmac_loopback(params, vars); + break; + case LOOPBACK_EMAC: + bnx2x_init_emac_loopback(params, vars); + break; + case LOOPBACK_XMAC: + bnx2x_init_xmac_loopback(params, vars); + break; + case LOOPBACK_UMAC: + bnx2x_init_umac_loopback(params, vars); + break; + case LOOPBACK_XGXS: + case LOOPBACK_EXT_PHY: + bnx2x_init_xgxs_loopback(params, vars); + break; + default: + if (!CHIP_IS_E3(bp)) { + if (params->switch_cfg == SWITCH_CFG_10G) + bnx2x_xgxs_deassert(params); + else + bnx2x_serdes_deassert(bp, params->port); + } + bnx2x_link_initialize(params, vars); + msleep(30); + bnx2x_link_int_enable(params); + break; + } + return 0; +} + +int bnx2x_link_reset(struct link_params *params, struct link_vars *vars, + u8 reset_ext_phy) +{ + struct bnx2x *bp = params->bp; + u8 phy_index, port = params->port, clear_latch_ind = 0; + DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port); + /* disable attentions */ + vars->link_status = 0; + bnx2x_update_mng(params, vars->link_status); + bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, + (NIG_MASK_XGXS0_LINK_STATUS | + NIG_MASK_XGXS0_LINK10G | + NIG_MASK_SERDES0_LINK_STATUS | + NIG_MASK_MI_INT)); + + /* activate nig drain */ + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); + + /* disable nig egress interface */ + if (!CHIP_IS_E3(bp)) { + REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0); + REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0); + } + + /* Stop BigMac rx */ + if (!CHIP_IS_E3(bp)) + bnx2x_bmac_rx_disable(bp, port); + else + bnx2x_xmac_disable(params); + /* disable emac */ + if (!CHIP_IS_E3(bp)) + REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); + + msleep(10); + /* The PHY reset is controlled by GPIO 1 + * Hold it as vars low + */ + /* clear link led */ + bnx2x_set_led(params, vars, LED_MODE_OFF, 0); + + if (reset_ext_phy) { + bnx2x_set_mdio_clk(bp, params->chip_id, port); + for (phy_index = EXT_PHY1; phy_index < params->num_phys; + phy_index++) { + if (params->phy[phy_index].link_reset) { + bnx2x_set_aer_mmd(params, + ¶ms->phy[phy_index]); + params->phy[phy_index].link_reset( + ¶ms->phy[phy_index], + params); + } + if (params->phy[phy_index].flags & + FLAGS_REARM_LATCH_SIGNAL) + clear_latch_ind = 1; + } + } + + if (clear_latch_ind) { + /* Clear latching indication */ + bnx2x_rearm_latch_signal(bp, port, 0); + bnx2x_bits_dis(bp, NIG_REG_LATCH_BC_0 + port*4, + 1 << NIG_LATCH_BC_ENABLE_MI_INT); + } + if (params->phy[INT_PHY].link_reset) + params->phy[INT_PHY].link_reset( + ¶ms->phy[INT_PHY], params); + /* reset BigMac */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); + + /* disable nig ingress interface */ + if (!CHIP_IS_E3(bp)) { + REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0); + REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0); + } + vars->link_up = 0; + vars->phy_flags = 0; + return 0; +} + +/****************************************************************************/ +/* Common function */ +/****************************************************************************/ +static int bnx2x_8073_common_init_phy(struct bnx2x *bp, + u32 shmem_base_path[], + u32 shmem2_base_path[], u8 phy_index, + u32 chip_id) +{ + struct bnx2x_phy phy[PORT_MAX]; + struct bnx2x_phy *phy_blk[PORT_MAX]; + u16 val; + s8 port = 0; + s8 port_of_path = 0; + u32 swap_val, swap_override; + swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); + swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); + port ^= (swap_val && swap_override); + bnx2x_ext_phy_hw_reset(bp, port); + /* PART1 - Reset both phys */ + for (port = PORT_MAX - 1; port >= PORT_0; port--) { + u32 shmem_base, shmem2_base; + /* In E2, same phy is using for port0 of the two paths */ + if (CHIP_IS_E1x(bp)) { + shmem_base = shmem_base_path[0]; + shmem2_base = shmem2_base_path[0]; + port_of_path = port; + } else { + shmem_base = shmem_base_path[port]; + shmem2_base = shmem2_base_path[port]; + port_of_path = 0; + } + + /* Extract the ext phy address for the port */ + if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, + port_of_path, &phy[port]) != + 0) { + DP(NETIF_MSG_LINK, "populate_phy failed\n"); + return -EINVAL; + } + /* disable attentions */ + bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + + port_of_path*4, + (NIG_MASK_XGXS0_LINK_STATUS | + NIG_MASK_XGXS0_LINK10G | + NIG_MASK_SERDES0_LINK_STATUS | + NIG_MASK_MI_INT)); + + /* Need to take the phy out of low power mode in order + to write to access its registers */ + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, + port); + + /* Reset the phy */ + bnx2x_cl45_write(bp, &phy[port], + MDIO_PMA_DEVAD, + MDIO_PMA_REG_CTRL, + 1<<15); + } + + /* Add delay of 150ms after reset */ + msleep(150); + + if (phy[PORT_0].addr & 0x1) { + phy_blk[PORT_0] = &(phy[PORT_1]); + phy_blk[PORT_1] = &(phy[PORT_0]); + } else { + phy_blk[PORT_0] = &(phy[PORT_0]); + phy_blk[PORT_1] = &(phy[PORT_1]); + } + + /* PART2 - Download firmware to both phys */ + for (port = PORT_MAX - 1; port >= PORT_0; port--) { + if (CHIP_IS_E1x(bp)) + port_of_path = port; + else + port_of_path = 0; + + DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n", + phy_blk[port]->addr); + if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], + port_of_path)) + return -EINVAL; + + /* Only set bit 10 = 1 (Tx power down) */ + bnx2x_cl45_read(bp, phy_blk[port], + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_POWER_DOWN, &val); + + /* Phase1 of TX_POWER_DOWN reset */ + bnx2x_cl45_write(bp, phy_blk[port], + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_POWER_DOWN, + (val | 1<<10)); + } + + /* + * Toggle Transmitter: Power down and then up with 600ms delay + * between + */ + msleep(600); + + /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */ + for (port = PORT_MAX - 1; port >= PORT_0; port--) { + /* Phase2 of POWER_DOWN_RESET */ + /* Release bit 10 (Release Tx power down) */ + bnx2x_cl45_read(bp, phy_blk[port], + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_POWER_DOWN, &val); + + bnx2x_cl45_write(bp, phy_blk[port], + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10)))); + msleep(15); + + /* Read modify write the SPI-ROM version select register */ + bnx2x_cl45_read(bp, phy_blk[port], + MDIO_PMA_DEVAD, + MDIO_PMA_REG_EDC_FFE_MAIN, &val); + bnx2x_cl45_write(bp, phy_blk[port], + MDIO_PMA_DEVAD, + MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12))); + + /* set GPIO2 back to LOW */ + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_LOW, port); + } + return 0; +} +static int bnx2x_8726_common_init_phy(struct bnx2x *bp, + u32 shmem_base_path[], + u32 shmem2_base_path[], u8 phy_index, + u32 chip_id) +{ + u32 val; + s8 port; + struct bnx2x_phy phy; + /* Use port1 because of the static port-swap */ + /* Enable the module detection interrupt */ + val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN); + val |= ((1<= PORT_0; port--) { + u32 shmem_base, shmem2_base; + + /* In E2, same phy is using for port0 of the two paths */ + if (CHIP_IS_E1x(bp)) { + shmem_base = shmem_base_path[0]; + shmem2_base = shmem2_base_path[0]; + port_of_path = port; + } else { + shmem_base = shmem_base_path[port]; + shmem2_base = shmem2_base_path[port]; + port_of_path = 0; + } + + /* Extract the ext phy address for the port */ + if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, + port_of_path, &phy[port]) != + 0) { + DP(NETIF_MSG_LINK, "populate phy failed\n"); + return -EINVAL; + } + /* disable attentions */ + bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + + port_of_path*4, + (NIG_MASK_XGXS0_LINK_STATUS | + NIG_MASK_XGXS0_LINK10G | + NIG_MASK_SERDES0_LINK_STATUS | + NIG_MASK_MI_INT)); + + + /* Reset the phy */ + bnx2x_cl45_write(bp, &phy[port], + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); + } + + /* Add delay of 150ms after reset */ + msleep(150); + if (phy[PORT_0].addr & 0x1) { + phy_blk[PORT_0] = &(phy[PORT_1]); + phy_blk[PORT_1] = &(phy[PORT_0]); + } else { + phy_blk[PORT_0] = &(phy[PORT_0]); + phy_blk[PORT_1] = &(phy[PORT_1]); + } + /* PART2 - Download firmware to both phys */ + for (port = PORT_MAX - 1; port >= PORT_0; port--) { + if (CHIP_IS_E1x(bp)) + port_of_path = port; + else + port_of_path = 0; + DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n", + phy_blk[port]->addr); + if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], + port_of_path)) + return -EINVAL; + /* Disable PHY transmitter output */ + bnx2x_cl45_write(bp, phy_blk[port], + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_DISABLE, 1); + + } + return 0; +} + +static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], + u32 shmem2_base_path[], u8 phy_index, + u32 ext_phy_type, u32 chip_id) +{ + int rc = 0; + + switch (ext_phy_type) { + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: + rc = bnx2x_8073_common_init_phy(bp, shmem_base_path, + shmem2_base_path, + phy_index, chip_id); + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC: + rc = bnx2x_8727_common_init_phy(bp, shmem_base_path, + shmem2_base_path, + phy_index, chip_id); + break; + + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: + /* + * GPIO1 affects both ports, so there's need to pull + * it for single port alone + */ + rc = bnx2x_8726_common_init_phy(bp, shmem_base_path, + shmem2_base_path, + phy_index, chip_id); + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833: + /* + * GPIO3's are linked, and so both need to be toggled + * to obtain required 2us pulse. + */ + rc = bnx2x_84833_common_init_phy(bp, shmem_base_path, chip_id); + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: + rc = -EINVAL; + break; + default: + DP(NETIF_MSG_LINK, + "ext_phy 0x%x common init not required\n", + ext_phy_type); + break; + } + + if (rc != 0) + netdev_err(bp->dev, "Warning: PHY was not initialized," + " Port %d\n", + 0); + return rc; +} + +int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[], + u32 shmem2_base_path[], u32 chip_id) +{ + int rc = 0; + u32 phy_ver, val; + u8 phy_index = 0; + u32 ext_phy_type, ext_phy_config; + bnx2x_set_mdio_clk(bp, chip_id, PORT_0); + bnx2x_set_mdio_clk(bp, chip_id, PORT_1); + DP(NETIF_MSG_LINK, "Begin common phy init\n"); + if (CHIP_IS_E3(bp)) { + /* Enable EPIO */ + val = REG_RD(bp, MISC_REG_GEN_PURP_HWG); + REG_WR(bp, MISC_REG_GEN_PURP_HWG, val | 1); + } + /* Check if common init was already done */ + phy_ver = REG_RD(bp, shmem_base_path[0] + + offsetof(struct shmem_region, + port_mb[PORT_0].ext_phy_fw_version)); + if (phy_ver) { + DP(NETIF_MSG_LINK, "Not doing common init; phy ver is 0x%x\n", + phy_ver); + return 0; + } + + /* Read the ext_phy_type for arbitrary port(0) */ + for (phy_index = EXT_PHY1; phy_index < MAX_PHYS; + phy_index++) { + ext_phy_config = bnx2x_get_ext_phy_config(bp, + shmem_base_path[0], + phy_index, 0); + ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config); + rc |= bnx2x_ext_phy_common_init(bp, shmem_base_path, + shmem2_base_path, + phy_index, ext_phy_type, + chip_id); + } + return rc; +} + +static void bnx2x_check_over_curr(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u32 cfg_pin; + u8 port = params->port; + u32 pin_val; + + cfg_pin = (REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_cmn_pin_cfg1)) & + PORT_HW_CFG_E3_OVER_CURRENT_MASK) >> + PORT_HW_CFG_E3_OVER_CURRENT_SHIFT; + + /* Ignore check if no external input PIN available */ + if (bnx2x_get_cfg_pin(bp, cfg_pin, &pin_val) != 0) + return; + + if (!pin_val) { + if ((vars->phy_flags & PHY_OVER_CURRENT_FLAG) == 0) { + netdev_err(bp->dev, "Error: Power fault on Port %d has" + " been detected and the power to " + "that SFP+ module has been removed" + " to prevent failure of the card." + " Please remove the SFP+ module and" + " restart the system to clear this" + " error.\n", + params->port); + vars->phy_flags |= PHY_OVER_CURRENT_FLAG; + } + } else + vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG; +} + +static void bnx2x_analyze_link_error(struct link_params *params, + struct link_vars *vars, u32 lss_status) +{ + struct bnx2x *bp = params->bp; + /* Compare new value with previous value */ + u8 led_mode; + u32 half_open_conn = (vars->phy_flags & PHY_HALF_OPEN_CONN_FLAG) > 0; + + if ((lss_status ^ half_open_conn) == 0) + return; + + /* If values differ */ + DP(NETIF_MSG_LINK, "Link changed:%x %x->%x\n", vars->link_up, + half_open_conn, lss_status); + + /* + * a. Update shmem->link_status accordingly + * b. Update link_vars->link_up + */ + if (lss_status) { + DP(NETIF_MSG_LINK, "Remote Fault detected !!!\n"); + vars->link_status &= ~LINK_STATUS_LINK_UP; + vars->link_up = 0; + vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; + /* + * Set LED mode to off since the PHY doesn't know about these + * errors + */ + led_mode = LED_MODE_OFF; + } else { + DP(NETIF_MSG_LINK, "Remote Fault cleared\n"); + vars->link_status |= LINK_STATUS_LINK_UP; + vars->link_up = 1; + vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; + led_mode = LED_MODE_OPER; + } + /* Update the LED according to the link state */ + bnx2x_set_led(params, vars, led_mode, SPEED_10000); + + /* Update link status in the shared memory */ + bnx2x_update_mng(params, vars->link_status); + + /* C. Trigger General Attention */ + vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT; + bnx2x_notify_link_changed(bp); +} + +/****************************************************************************** +* Description: +* This function checks for half opened connection change indication. +* When such change occurs, it calls the bnx2x_analyze_link_error +* to check if Remote Fault is set or cleared. Reception of remote fault +* status message in the MAC indicates that the peer's MAC has detected +* a fault, for example, due to break in the TX side of fiber. +* +******************************************************************************/ +static void bnx2x_check_half_open_conn(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u32 lss_status = 0; + u32 mac_base; + /* In case link status is physically up @ 10G do */ + if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) + return; + + if (CHIP_IS_E3(bp) && + (REG_RD(bp, MISC_REG_RESET_REG_2) & + (MISC_REGISTERS_RESET_REG_2_XMAC))) { + /* Check E3 XMAC */ + /* + * Note that link speed cannot be queried here, since it may be + * zero while link is down. In case UMAC is active, LSS will + * simply not be set + */ + mac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; + + /* Clear stick bits (Requires rising edge) */ + REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0); + REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, + XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS | + XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS); + if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS)) + lss_status = 1; + + bnx2x_analyze_link_error(params, vars, lss_status); + } else if (REG_RD(bp, MISC_REG_RESET_REG_2) & + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) { + /* Check E1X / E2 BMAC */ + u32 lss_status_reg; + u32 wb_data[2]; + mac_base = params->port ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM; + /* Read BIGMAC_REGISTER_RX_LSS_STATUS */ + if (CHIP_IS_E2(bp)) + lss_status_reg = BIGMAC2_REGISTER_RX_LSS_STAT; + else + lss_status_reg = BIGMAC_REGISTER_RX_LSS_STATUS; + + REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2); + lss_status = (wb_data[0] > 0); + + bnx2x_analyze_link_error(params, vars, lss_status); + } +} + +void bnx2x_period_func(struct link_params *params, struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u16 phy_idx; + if (!params) { + DP(NETIF_MSG_LINK, "Uninitialized params !\n"); + return; + } + + for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) { + if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) { + bnx2x_set_aer_mmd(params, ¶ms->phy[phy_idx]); + bnx2x_check_half_open_conn(params, vars); + break; + } + } + + if (CHIP_IS_E3(bp)) + bnx2x_check_over_curr(params, vars); +} + +u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base) +{ + u8 phy_index; + struct bnx2x_phy phy; + for (phy_index = INT_PHY; phy_index < MAX_PHYS; + phy_index++) { + if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, + 0, &phy) != 0) { + DP(NETIF_MSG_LINK, "populate phy failed\n"); + return 0; + } + + if (phy.flags & FLAGS_HW_LOCK_REQUIRED) + return 1; + } + return 0; +} + +u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, + u32 shmem_base, + u32 shmem2_base, + u8 port) +{ + u8 phy_index, fan_failure_det_req = 0; + struct bnx2x_phy phy; + for (phy_index = EXT_PHY1; phy_index < MAX_PHYS; + phy_index++) { + if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, + port, &phy) + != 0) { + DP(NETIF_MSG_LINK, "populate phy failed\n"); + return 0; + } + fan_failure_det_req |= (phy.flags & + FLAGS_FAN_FAILURE_DET_REQ); + } + return fan_failure_det_req; +} + +void bnx2x_hw_reset_phy(struct link_params *params) +{ + u8 phy_index; + struct bnx2x *bp = params->bp; + bnx2x_update_mng(params, 0); + bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, + (NIG_MASK_XGXS0_LINK_STATUS | + NIG_MASK_XGXS0_LINK10G | + NIG_MASK_SERDES0_LINK_STATUS | + NIG_MASK_MI_INT)); + + for (phy_index = INT_PHY; phy_index < MAX_PHYS; + phy_index++) { + if (params->phy[phy_index].hw_reset) { + params->phy[phy_index].hw_reset( + ¶ms->phy[phy_index], + params); + params->phy[phy_index] = phy_null; + } + } +} + +void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars, + u32 chip_id, u32 shmem_base, u32 shmem2_base, + u8 port) +{ + u8 gpio_num = 0xff, gpio_port = 0xff, phy_index; + u32 val; + u32 offset, aeu_mask, swap_val, swap_override, sync_offset; + if (CHIP_IS_E3(bp)) { + if (bnx2x_get_mod_abs_int_cfg(bp, chip_id, + shmem_base, + port, + &gpio_num, + &gpio_port) != 0) + return; + } else { + struct bnx2x_phy phy; + for (phy_index = EXT_PHY1; phy_index < MAX_PHYS; + phy_index++) { + if (bnx2x_populate_phy(bp, phy_index, shmem_base, + shmem2_base, port, &phy) + != 0) { + DP(NETIF_MSG_LINK, "populate phy failed\n"); + return; + } + if (phy.type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) { + gpio_num = MISC_REGISTERS_GPIO_3; + gpio_port = port; + break; + } + } + } + + if (gpio_num == 0xff) + return; + + /* Set GPIO3 to trigger SFP+ module insertion/removal */ + bnx2x_set_gpio(bp, gpio_num, MISC_REGISTERS_GPIO_INPUT_HI_Z, gpio_port); + + swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); + swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); + gpio_port ^= (swap_val && swap_override); + + vars->aeu_int_mask = AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 << + (gpio_num + (gpio_port << 2)); + + sync_offset = shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].aeu_int_mask); + REG_WR(bp, sync_offset, vars->aeu_int_mask); + + DP(NETIF_MSG_LINK, "Setting MOD_ABS (GPIO%d_P%d) AEU to 0x%x\n", + gpio_num, gpio_port, vars->aeu_int_mask); + + if (port == 0) + offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; + else + offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0; + + /* Open appropriate AEU for interrupts */ + aeu_mask = REG_RD(bp, offset); + aeu_mask |= vars->aeu_int_mask; + REG_WR(bp, offset, aeu_mask); + + /* Enable the GPIO to trigger interrupt */ + val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN); + val |= 1 << (gpio_num + (gpio_port << 2)); + REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val); +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h new file mode 100644 index 0000000..c12db6d --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h @@ -0,0 +1,493 @@ +/* Copyright 2008-2011 Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2, available + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a + * license other than the GPL, without Broadcom's express prior written + * consent. + * + * Written by Yaniv Rosner + * + */ + +#ifndef BNX2X_LINK_H +#define BNX2X_LINK_H + + + +/***********************************************************/ +/* Defines */ +/***********************************************************/ +#define DEFAULT_PHY_DEV_ADDR 3 +#define E2_DEFAULT_PHY_DEV_ADDR 5 + + + +#define BNX2X_FLOW_CTRL_AUTO PORT_FEATURE_FLOW_CONTROL_AUTO +#define BNX2X_FLOW_CTRL_TX PORT_FEATURE_FLOW_CONTROL_TX +#define BNX2X_FLOW_CTRL_RX PORT_FEATURE_FLOW_CONTROL_RX +#define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH +#define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE + +#define NET_SERDES_IF_XFI 1 +#define NET_SERDES_IF_SFI 2 +#define NET_SERDES_IF_KR 3 +#define NET_SERDES_IF_DXGXS 4 + +#define SPEED_AUTO_NEG 0 +#define SPEED_20000 20000 + +#define SFP_EEPROM_VENDOR_NAME_ADDR 0x14 +#define SFP_EEPROM_VENDOR_NAME_SIZE 16 +#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25 +#define SFP_EEPROM_VENDOR_OUI_SIZE 3 +#define SFP_EEPROM_PART_NO_ADDR 0x28 +#define SFP_EEPROM_PART_NO_SIZE 16 +#define SFP_EEPROM_REVISION_ADDR 0x38 +#define SFP_EEPROM_REVISION_SIZE 4 +#define SFP_EEPROM_SERIAL_ADDR 0x44 +#define SFP_EEPROM_SERIAL_SIZE 16 +#define SFP_EEPROM_DATE_ADDR 0x54 /* ASCII YYMMDD */ +#define SFP_EEPROM_DATE_SIZE 6 +#define PWR_FLT_ERR_MSG_LEN 250 + +#define XGXS_EXT_PHY_TYPE(ext_phy_config) \ + ((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) +#define XGXS_EXT_PHY_ADDR(ext_phy_config) \ + (((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> \ + PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT) +#define SERDES_EXT_PHY_TYPE(ext_phy_config) \ + ((ext_phy_config) & PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK) + +/* Single Media Direct board is the plain 577xx board with CX4/RJ45 jacks */ +#define SINGLE_MEDIA_DIRECT(params) (params->num_phys == 1) +/* Single Media board contains single external phy */ +#define SINGLE_MEDIA(params) (params->num_phys == 2) +/* Dual Media board contains two external phy with different media */ +#define DUAL_MEDIA(params) (params->num_phys == 3) + +#define FW_PARAM_PHY_ADDR_MASK 0x000000FF +#define FW_PARAM_PHY_TYPE_MASK 0x0000FF00 +#define FW_PARAM_MDIO_CTRL_MASK 0xFFFF0000 +#define FW_PARAM_MDIO_CTRL_OFFSET 16 +#define FW_PARAM_PHY_ADDR(fw_param) (fw_param & \ + FW_PARAM_PHY_ADDR_MASK) +#define FW_PARAM_PHY_TYPE(fw_param) (fw_param & \ + FW_PARAM_PHY_TYPE_MASK) +#define FW_PARAM_MDIO_CTRL(fw_param) ((fw_param & \ + FW_PARAM_MDIO_CTRL_MASK) >> \ + FW_PARAM_MDIO_CTRL_OFFSET) +#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \ + (phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET) + + +#define PFC_BRB_FULL_LB_XOFF_THRESHOLD 170 +#define PFC_BRB_FULL_LB_XON_THRESHOLD 250 + +#define MAXVAL(a, b) (((a) > (b)) ? (a) : (b)) +/***********************************************************/ +/* Structs */ +/***********************************************************/ +#define INT_PHY 0 +#define EXT_PHY1 1 +#define EXT_PHY2 2 +#define MAX_PHYS 3 + +/* Same configuration is shared between the XGXS and the first external phy */ +#define LINK_CONFIG_SIZE (MAX_PHYS - 1) +#define LINK_CONFIG_IDX(_phy_idx) ((_phy_idx == INT_PHY) ? \ + 0 : (_phy_idx - 1)) +/***********************************************************/ +/* bnx2x_phy struct */ +/* Defines the required arguments and function per phy */ +/***********************************************************/ +struct link_vars; +struct link_params; +struct bnx2x_phy; + +typedef u8 (*config_init_t)(struct bnx2x_phy *phy, struct link_params *params, + struct link_vars *vars); +typedef u8 (*read_status_t)(struct bnx2x_phy *phy, struct link_params *params, + struct link_vars *vars); +typedef void (*link_reset_t)(struct bnx2x_phy *phy, + struct link_params *params); +typedef void (*config_loopback_t)(struct bnx2x_phy *phy, + struct link_params *params); +typedef u8 (*format_fw_ver_t)(u32 raw, u8 *str, u16 *len); +typedef void (*hw_reset_t)(struct bnx2x_phy *phy, struct link_params *params); +typedef void (*set_link_led_t)(struct bnx2x_phy *phy, + struct link_params *params, u8 mode); +typedef void (*phy_specific_func_t)(struct bnx2x_phy *phy, + struct link_params *params, u32 action); + +struct bnx2x_phy { + u32 type; + + /* Loaded during init */ + u8 addr; + u8 def_md_devad; + u16 flags; + /* Require HW lock */ +#define FLAGS_HW_LOCK_REQUIRED (1<<0) + /* No Over-Current detection */ +#define FLAGS_NOC (1<<1) + /* Fan failure detection required */ +#define FLAGS_FAN_FAILURE_DET_REQ (1<<2) + /* Initialize first the XGXS and only then the phy itself */ +#define FLAGS_INIT_XGXS_FIRST (1<<3) +#define FLAGS_WC_DUAL_MODE (1<<4) +#define FLAGS_4_PORT_MODE (1<<5) +#define FLAGS_REARM_LATCH_SIGNAL (1<<6) +#define FLAGS_SFP_NOT_APPROVED (1<<7) +#define FLAGS_MDC_MDIO_WA (1<<8) +#define FLAGS_DUMMY_READ (1<<9) +#define FLAGS_MDC_MDIO_WA_B0 (1<<10) +#define FLAGS_TX_ERROR_CHECK (1<<12) + + /* preemphasis values for the rx side */ + u16 rx_preemphasis[4]; + + /* preemphasis values for the tx side */ + u16 tx_preemphasis[4]; + + /* EMAC address for access MDIO */ + u32 mdio_ctrl; + + u32 supported; + + u32 media_type; +#define ETH_PHY_UNSPECIFIED 0x0 +#define ETH_PHY_SFP_FIBER 0x1 +#define ETH_PHY_XFP_FIBER 0x2 +#define ETH_PHY_DA_TWINAX 0x3 +#define ETH_PHY_BASE_T 0x4 +#define ETH_PHY_KR 0xf0 +#define ETH_PHY_CX4 0xf1 +#define ETH_PHY_NOT_PRESENT 0xff + + /* The address in which version is located*/ + u32 ver_addr; + + u16 req_flow_ctrl; + + u16 req_line_speed; + + u32 speed_cap_mask; + + u16 req_duplex; + u16 rsrv; + /* Called per phy/port init, and it configures LASI, speed, autoneg, + duplex, flow control negotiation, etc. */ + config_init_t config_init; + + /* Called due to interrupt. It determines the link, speed */ + read_status_t read_status; + + /* Called when driver is unloading. Should reset the phy */ + link_reset_t link_reset; + + /* Set the loopback configuration for the phy */ + config_loopback_t config_loopback; + + /* Format the given raw number into str up to len */ + format_fw_ver_t format_fw_ver; + + /* Reset the phy (both ports) */ + hw_reset_t hw_reset; + + /* Set link led mode (on/off/oper)*/ + set_link_led_t set_link_led; + + /* PHY Specific tasks */ + phy_specific_func_t phy_specific_func; +#define DISABLE_TX 1 +#define ENABLE_TX 2 +}; + +/* Inputs parameters to the CLC */ +struct link_params { + + u8 port; + + /* Default / User Configuration */ + u8 loopback_mode; +#define LOOPBACK_NONE 0 +#define LOOPBACK_EMAC 1 +#define LOOPBACK_BMAC 2 +#define LOOPBACK_XGXS 3 +#define LOOPBACK_EXT_PHY 4 +#define LOOPBACK_EXT 5 +#define LOOPBACK_UMAC 6 +#define LOOPBACK_XMAC 7 + + /* Device parameters */ + u8 mac_addr[6]; + + u16 req_duplex[LINK_CONFIG_SIZE]; + u16 req_flow_ctrl[LINK_CONFIG_SIZE]; + + u16 req_line_speed[LINK_CONFIG_SIZE]; /* Also determine AutoNeg */ + + /* shmem parameters */ + u32 shmem_base; + u32 shmem2_base; + u32 speed_cap_mask[LINK_CONFIG_SIZE]; + u32 switch_cfg; +#define SWITCH_CFG_1G PORT_FEATURE_CON_SWITCH_1G_SWITCH +#define SWITCH_CFG_10G PORT_FEATURE_CON_SWITCH_10G_SWITCH +#define SWITCH_CFG_AUTO_DETECT PORT_FEATURE_CON_SWITCH_AUTO_DETECT + + u32 lane_config; + + /* Phy register parameter */ + u32 chip_id; + + /* features */ + u32 feature_config_flags; +#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0) +#define FEATURE_CONFIG_PFC_ENABLED (1<<1) +#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2) +#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3) +#define FEATURE_CONFIG_AUTOGREEEN_ENABLED (1<<9) +#define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED (1<<10) + /* Will be populated during common init */ + struct bnx2x_phy phy[MAX_PHYS]; + + /* Will be populated during common init */ + u8 num_phys; + + u8 rsrv; + u16 hw_led_mode; /* part of the hw_config read from the shmem */ + u32 multi_phy_config; + + /* Device pointer passed to all callback functions */ + struct bnx2x *bp; + u16 req_fc_auto_adv; /* Should be set to TX / BOTH when + req_flow_ctrl is set to AUTO */ +}; + +/* Output parameters */ +struct link_vars { + u8 phy_flags; +#define PHY_XGXS_FLAG (1<<0) +#define PHY_SGMII_FLAG (1<<1) +#define PHY_PHYSICAL_LINK_FLAG (1<<2) +#define PHY_HALF_OPEN_CONN_FLAG (1<<3) +#define PHY_OVER_CURRENT_FLAG (1<<4) + + u8 mac_type; +#define MAC_TYPE_NONE 0 +#define MAC_TYPE_EMAC 1 +#define MAC_TYPE_BMAC 2 +#define MAC_TYPE_UMAC 3 +#define MAC_TYPE_XMAC 4 + + u8 phy_link_up; /* internal phy link indication */ + u8 link_up; + + u16 line_speed; + u16 duplex; + + u16 flow_ctrl; + u16 ieee_fc; + + /* The same definitions as the shmem parameter */ + u32 link_status; + u8 fault_detected; + u8 rsrv1; + u16 periodic_flags; +#define PERIODIC_FLAGS_LINK_EVENT 0x0001 + + u32 aeu_int_mask; +}; + +/***********************************************************/ +/* Functions */ +/***********************************************************/ +int bnx2x_phy_init(struct link_params *params, struct link_vars *vars); + +/* Reset the link. Should be called when driver or interface goes down + Before calling phy firmware upgrade, the reset_ext_phy should be set + to 0 */ +int bnx2x_link_reset(struct link_params *params, struct link_vars *vars, + u8 reset_ext_phy); + +/* bnx2x_link_update should be called upon link interrupt */ +int bnx2x_link_update(struct link_params *params, struct link_vars *vars); + +/* use the following phy functions to read/write from external_phy + In order to use it to read/write internal phy registers, use + DEFAULT_PHY_DEV_ADDR as devad, and (_bank + (_addr & 0xf)) as + the register */ +int bnx2x_phy_read(struct link_params *params, u8 phy_addr, + u8 devad, u16 reg, u16 *ret_val); + +int bnx2x_phy_write(struct link_params *params, u8 phy_addr, + u8 devad, u16 reg, u16 val); + +/* Reads the link_status from the shmem, + and update the link vars accordingly */ +void bnx2x_link_status_update(struct link_params *input, + struct link_vars *output); +/* returns string representing the fw_version of the external phy */ +int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, + u8 *version, u16 len); + +/* Set/Unset the led + Basically, the CLC takes care of the led for the link, but in case one needs + to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to + blink the led, and LED_MODE_OFF to set the led off.*/ +int bnx2x_set_led(struct link_params *params, + struct link_vars *vars, u8 mode, u32 speed); +#define LED_MODE_OFF 0 +#define LED_MODE_ON 1 +#define LED_MODE_OPER 2 +#define LED_MODE_FRONT_PANEL_OFF 3 + +/* bnx2x_handle_module_detect_int should be called upon module detection + interrupt */ +void bnx2x_handle_module_detect_int(struct link_params *params); + +/* Get the actual link status. In case it returns 0, link is up, + otherwise link is down*/ +int bnx2x_test_link(struct link_params *params, struct link_vars *vars, + u8 is_serdes); + +/* One-time initialization for external phy after power up */ +int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[], + u32 shmem2_base_path[], u32 chip_id); + +/* Reset the external PHY using GPIO */ +void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port); + +/* Reset the external of SFX7101 */ +void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy); + +/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */ +int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, + struct link_params *params, u16 addr, + u8 byte_cnt, u8 *o_buf); + +void bnx2x_hw_reset_phy(struct link_params *params); + +/* Checks if HW lock is required for this phy/board type */ +u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base, + u32 shmem2_base); + +/* Check swap bit and adjust PHY order */ +u32 bnx2x_phy_selection(struct link_params *params); + +/* Probe the phys on board, and populate them in "params" */ +int bnx2x_phy_probe(struct link_params *params); + +/* Checks if fan failure detection is required on one of the phys on board */ +u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base, + u32 shmem2_base, u8 port); + + + +/* DCBX structs */ + +/* Number of maximum COS per chip */ +#define DCBX_E2E3_MAX_NUM_COS (2) +#define DCBX_E3B0_MAX_NUM_COS_PORT0 (6) +#define DCBX_E3B0_MAX_NUM_COS_PORT1 (3) +#define DCBX_E3B0_MAX_NUM_COS ( \ + MAXVAL(DCBX_E3B0_MAX_NUM_COS_PORT0, \ + DCBX_E3B0_MAX_NUM_COS_PORT1)) + +#define DCBX_MAX_NUM_COS ( \ + MAXVAL(DCBX_E3B0_MAX_NUM_COS, \ + DCBX_E2E3_MAX_NUM_COS)) + +/* PFC port configuration params */ +struct bnx2x_nig_brb_pfc_port_params { + /* NIG */ + u32 pause_enable; + u32 llfc_out_en; + u32 llfc_enable; + u32 pkt_priority_to_cos; + u8 num_of_rx_cos_priority_mask; + u32 rx_cos_priority_mask[DCBX_MAX_NUM_COS]; + u32 llfc_high_priority_classes; + u32 llfc_low_priority_classes; + /* BRB */ + u32 cos0_pauseable; + u32 cos1_pauseable; +}; + + +/* ETS port configuration params */ +struct bnx2x_ets_bw_params { + u8 bw; +}; + +struct bnx2x_ets_sp_params { + /** + * valid values are 0 - 5. 0 is highest strict priority. + * There can't be two COS's with the same pri. + */ + u8 pri; +}; + +enum bnx2x_cos_state { + bnx2x_cos_state_strict = 0, + bnx2x_cos_state_bw = 1, +}; + +struct bnx2x_ets_cos_params { + enum bnx2x_cos_state state ; + union { + struct bnx2x_ets_bw_params bw_params; + struct bnx2x_ets_sp_params sp_params; + } params; +}; + +struct bnx2x_ets_params { + u8 num_of_cos; /* Number of valid COS entries*/ + struct bnx2x_ets_cos_params cos[DCBX_MAX_NUM_COS]; +}; + +/** + * Used to update the PFC attributes in EMAC, BMAC, NIG and BRB + * when link is already up + */ +int bnx2x_update_pfc(struct link_params *params, + struct link_vars *vars, + struct bnx2x_nig_brb_pfc_port_params *pfc_params); + + +/* Used to configure the ETS to disable */ +int bnx2x_ets_disabled(struct link_params *params, + struct link_vars *vars); + +/* Used to configure the ETS to BW limited */ +void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw, + const u32 cos1_bw); + +/* Used to configure the ETS to strict */ +int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos); + + +/* Configure the COS to ETS according to BW and SP settings.*/ +int bnx2x_ets_e3b0_config(const struct link_params *params, + const struct link_vars *vars, + const struct bnx2x_ets_params *ets_params); +/* Read pfc statistic*/ +void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars, + u32 pfc_frames_sent[2], + u32 pfc_frames_received[2]); +void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars, + u32 chip_id, u32 shmem_base, u32 shmem2_base, + u8 port); + +int bnx2x_sfp_module_detection(struct bnx2x_phy *phy, + struct link_params *params); + +void bnx2x_period_func(struct link_params *params, struct link_vars *vars); + +#endif /* BNX2X_LINK_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c new file mode 100644 index 0000000..1507091 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -0,0 +1,11531 @@ +/* bnx2x_main.c: Broadcom Everest network driver. + * + * Copyright (c) 2007-2011 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Eilon Greenstein + * Written by: Eliezer Tamir + * Based on code from Michael Chan's bnx2 driver + * UDP CSUM errata workaround by Arik Gendelman + * Slowpath and fastpath rework by Vladislav Zolotarov + * Statistics and Link management by Yitchak Gertner + * + */ + +#include +#include +#include +#include /* for dev_info() */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bnx2x.h" +#include "bnx2x_init.h" +#include "bnx2x_init_ops.h" +#include "bnx2x_cmn.h" +#include "bnx2x_dcb.h" +#include "bnx2x_sp.h" + +#include +#include "bnx2x_fw_file_hdr.h" +/* FW files */ +#define FW_FILE_VERSION \ + __stringify(BCM_5710_FW_MAJOR_VERSION) "." \ + __stringify(BCM_5710_FW_MINOR_VERSION) "." \ + __stringify(BCM_5710_FW_REVISION_VERSION) "." \ + __stringify(BCM_5710_FW_ENGINEERING_VERSION) +#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw" +#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw" +#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw" + +/* Time in jiffies before concluding the transmitter is hung */ +#define TX_TIMEOUT (5*HZ) + +static char version[] __devinitdata = + "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver " + DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + +MODULE_AUTHOR("Eliezer Tamir"); +MODULE_DESCRIPTION("Broadcom NetXtreme II " + "BCM57710/57711/57711E/" + "57712/57712_MF/57800/57800_MF/57810/57810_MF/" + "57840/57840_MF Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); +MODULE_FIRMWARE(FW_FILE_NAME_E1); +MODULE_FIRMWARE(FW_FILE_NAME_E1H); +MODULE_FIRMWARE(FW_FILE_NAME_E2); + +static int multi_mode = 1; +module_param(multi_mode, int, 0); +MODULE_PARM_DESC(multi_mode, " Multi queue mode " + "(0 Disable; 1 Enable (default))"); + +int num_queues; +module_param(num_queues, int, 0); +MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1" + " (default is as a number of CPUs)"); + +static int disable_tpa; +module_param(disable_tpa, int, 0); +MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature"); + +#define INT_MODE_INTx 1 +#define INT_MODE_MSI 2 +static int int_mode; +module_param(int_mode, int, 0); +MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X " + "(1 INT#x; 2 MSI)"); + +static int dropless_fc; +module_param(dropless_fc, int, 0); +MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring"); + +static int poll; +module_param(poll, int, 0); +MODULE_PARM_DESC(poll, " Use polling (for debug)"); + +static int mrrs = -1; +module_param(mrrs, int, 0); +MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)"); + +static int debug; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, " Default debug msglevel"); + + + +struct workqueue_struct *bnx2x_wq; + +enum bnx2x_board_type { + BCM57710 = 0, + BCM57711, + BCM57711E, + BCM57712, + BCM57712_MF, + BCM57800, + BCM57800_MF, + BCM57810, + BCM57810_MF, + BCM57840, + BCM57840_MF +}; + +/* indexed by board_type, above */ +static struct { + char *name; +} board_info[] __devinitdata = { + { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" }, + { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" }, + { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" }, + { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" }, + { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" }, + { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" }, + { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" }, + { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" }, + { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" }, + { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" }, + { "Broadcom NetXtreme II BCM57840 10/20 Gigabit " + "Ethernet Multi Function"} +}; + +#ifndef PCI_DEVICE_ID_NX2_57710 +#define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710 +#endif +#ifndef PCI_DEVICE_ID_NX2_57711 +#define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711 +#endif +#ifndef PCI_DEVICE_ID_NX2_57711E +#define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E +#endif +#ifndef PCI_DEVICE_ID_NX2_57712 +#define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712 +#endif +#ifndef PCI_DEVICE_ID_NX2_57712_MF +#define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF +#endif +#ifndef PCI_DEVICE_ID_NX2_57800 +#define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800 +#endif +#ifndef PCI_DEVICE_ID_NX2_57800_MF +#define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF +#endif +#ifndef PCI_DEVICE_ID_NX2_57810 +#define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810 +#endif +#ifndef PCI_DEVICE_ID_NX2_57810_MF +#define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF +#endif +#ifndef PCI_DEVICE_ID_NX2_57840 +#define PCI_DEVICE_ID_NX2_57840 CHIP_NUM_57840 +#endif +#ifndef PCI_DEVICE_ID_NX2_57840_MF +#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF +#endif +static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = { + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF }, + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl); + +/**************************************************************************** +* General service functions +****************************************************************************/ + +static inline void __storm_memset_dma_mapping(struct bnx2x *bp, + u32 addr, dma_addr_t mapping) +{ + REG_WR(bp, addr, U64_LO(mapping)); + REG_WR(bp, addr + 4, U64_HI(mapping)); +} + +static inline void storm_memset_spq_addr(struct bnx2x *bp, + dma_addr_t mapping, u16 abs_fid) +{ + u32 addr = XSEM_REG_FAST_MEMORY + + XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid); + + __storm_memset_dma_mapping(bp, addr, mapping); +} + +static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, + u16 pf_id) +{ + REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), + pf_id); + REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), + pf_id); + REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), + pf_id); + REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), + pf_id); +} + +static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, + u8 enable) +{ + REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), + enable); + REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), + enable); + REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), + enable); + REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), + enable); +} + +static inline void storm_memset_eq_data(struct bnx2x *bp, + struct event_ring_data *eq_data, + u16 pfid) +{ + size_t size = sizeof(struct event_ring_data); + + u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid); + + __storm_memset_struct(bp, addr, size, (u32 *)eq_data); +} + +static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod, + u16 pfid) +{ + u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid); + REG_WR16(bp, addr, eq_prod); +} + +/* used only at init + * locking is done by mcp + */ +static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val) +{ + pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); + pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val); + pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, + PCICFG_VENDOR_ID_OFFSET); +} + +static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr) +{ + u32 val; + + pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); + pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val); + pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, + PCICFG_VENDOR_ID_OFFSET); + + return val; +} + +#define DMAE_DP_SRC_GRC "grc src_addr [%08x]" +#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]" +#define DMAE_DP_DST_GRC "grc dst_addr [%08x]" +#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]" +#define DMAE_DP_DST_NONE "dst_addr [none]" + +static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, + int msglvl) +{ + u32 src_type = dmae->opcode & DMAE_COMMAND_SRC; + + switch (dmae->opcode & DMAE_COMMAND_DST) { + case DMAE_CMD_DST_PCI: + if (src_type == DMAE_CMD_SRC_PCI) + DP(msglvl, "DMAE: opcode 0x%08x\n" + "src [%x:%08x], len [%d*4], dst [%x:%08x]\n" + "comp_addr [%x:%08x], comp_val 0x%08x\n", + dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, + dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, + dmae->comp_addr_hi, dmae->comp_addr_lo, + dmae->comp_val); + else + DP(msglvl, "DMAE: opcode 0x%08x\n" + "src [%08x], len [%d*4], dst [%x:%08x]\n" + "comp_addr [%x:%08x], comp_val 0x%08x\n", + dmae->opcode, dmae->src_addr_lo >> 2, + dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, + dmae->comp_addr_hi, dmae->comp_addr_lo, + dmae->comp_val); + break; + case DMAE_CMD_DST_GRC: + if (src_type == DMAE_CMD_SRC_PCI) + DP(msglvl, "DMAE: opcode 0x%08x\n" + "src [%x:%08x], len [%d*4], dst_addr [%08x]\n" + "comp_addr [%x:%08x], comp_val 0x%08x\n", + dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, + dmae->len, dmae->dst_addr_lo >> 2, + dmae->comp_addr_hi, dmae->comp_addr_lo, + dmae->comp_val); + else + DP(msglvl, "DMAE: opcode 0x%08x\n" + "src [%08x], len [%d*4], dst [%08x]\n" + "comp_addr [%x:%08x], comp_val 0x%08x\n", + dmae->opcode, dmae->src_addr_lo >> 2, + dmae->len, dmae->dst_addr_lo >> 2, + dmae->comp_addr_hi, dmae->comp_addr_lo, + dmae->comp_val); + break; + default: + if (src_type == DMAE_CMD_SRC_PCI) + DP(msglvl, "DMAE: opcode 0x%08x\n" + DP_LEVEL "src_addr [%x:%08x] len [%d * 4] " + "dst_addr [none]\n" + DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n", + dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, + dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, + dmae->comp_val); + else + DP(msglvl, "DMAE: opcode 0x%08x\n" + DP_LEVEL "src_addr [%08x] len [%d * 4] " + "dst_addr [none]\n" + DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n", + dmae->opcode, dmae->src_addr_lo >> 2, + dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, + dmae->comp_val); + break; + } + +} + +/* copy command into DMAE command memory and set DMAE command go */ +void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) +{ + u32 cmd_offset; + int i; + + cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx); + for (i = 0; i < (sizeof(struct dmae_command)/4); i++) { + REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i)); + + DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n", + idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i)); + } + REG_WR(bp, dmae_reg_go_c[idx], 1); +} + +u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type) +{ + return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) | + DMAE_CMD_C_ENABLE); +} + +u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode) +{ + return opcode & ~DMAE_CMD_SRC_RESET; +} + +u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, + bool with_comp, u8 comp_type) +{ + u32 opcode = 0; + + opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) | + (dst_type << DMAE_COMMAND_DST_SHIFT)); + + opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); + + opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); + opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) | + (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); + opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); + +#ifdef __BIG_ENDIAN + opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; +#else + opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; +#endif + if (with_comp) + opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type); + return opcode; +} + +static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, + struct dmae_command *dmae, + u8 src_type, u8 dst_type) +{ + memset(dmae, 0, sizeof(struct dmae_command)); + + /* set the opcode */ + dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type, + true, DMAE_COMP_PCI); + + /* fill in the completion parameters */ + dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp)); + dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp)); + dmae->comp_val = DMAE_COMP_VAL; +} + +/* issue a dmae command over the init-channel and wailt for completion */ +static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, + struct dmae_command *dmae) +{ + u32 *wb_comp = bnx2x_sp(bp, wb_comp); + int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; + int rc = 0; + + DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n", + bp->slowpath->wb_data[0], bp->slowpath->wb_data[1], + bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); + + /* + * Lock the dmae channel. Disable BHs to prevent a dead-lock + * as long as this code is called both from syscall context and + * from ndo_set_rx_mode() flow that may be called from BH. + */ + spin_lock_bh(&bp->dmae_lock); + + /* reset completion */ + *wb_comp = 0; + + /* post the command on the channel used for initializations */ + bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); + + /* wait for completion */ + udelay(5); + while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { + DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp); + + if (!cnt) { + BNX2X_ERR("DMAE timeout!\n"); + rc = DMAE_TIMEOUT; + goto unlock; + } + cnt--; + udelay(50); + } + if (*wb_comp & DMAE_PCI_ERR_FLAG) { + BNX2X_ERR("DMAE PCI error!\n"); + rc = DMAE_PCI_ERROR; + } + + DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n", + bp->slowpath->wb_data[0], bp->slowpath->wb_data[1], + bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); + +unlock: + spin_unlock_bh(&bp->dmae_lock); + return rc; +} + +void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, + u32 len32) +{ + struct dmae_command dmae; + + if (!bp->dmae_ready) { + u32 *data = bnx2x_sp(bp, wb_data[0]); + + DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)" + " using indirect\n", dst_addr, len32); + bnx2x_init_ind_wr(bp, dst_addr, data, len32); + return; + } + + /* set opcode and fixed command fields */ + bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); + + /* fill in addresses and len */ + dmae.src_addr_lo = U64_LO(dma_addr); + dmae.src_addr_hi = U64_HI(dma_addr); + dmae.dst_addr_lo = dst_addr >> 2; + dmae.dst_addr_hi = 0; + dmae.len = len32; + + bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF); + + /* issue the command and wait for completion */ + bnx2x_issue_dmae_with_comp(bp, &dmae); +} + +void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) +{ + struct dmae_command dmae; + + if (!bp->dmae_ready) { + u32 *data = bnx2x_sp(bp, wb_data[0]); + int i; + + DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)" + " using indirect\n", src_addr, len32); + for (i = 0; i < len32; i++) + data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4); + return; + } + + /* set opcode and fixed command fields */ + bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); + + /* fill in addresses and len */ + dmae.src_addr_lo = src_addr >> 2; + dmae.src_addr_hi = 0; + dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data)); + dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data)); + dmae.len = len32; + + bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF); + + /* issue the command and wait for completion */ + bnx2x_issue_dmae_with_comp(bp, &dmae); +} + +static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, + u32 addr, u32 len) +{ + int dmae_wr_max = DMAE_LEN32_WR_MAX(bp); + int offset = 0; + + while (len > dmae_wr_max) { + bnx2x_write_dmae(bp, phys_addr + offset, + addr + offset, dmae_wr_max); + offset += dmae_wr_max * 4; + len -= dmae_wr_max; + } + + bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len); +} + +/* used only for slowpath so not inlined */ +static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo) +{ + u32 wb_write[2]; + + wb_write[0] = val_hi; + wb_write[1] = val_lo; + REG_WR_DMAE(bp, reg, wb_write, 2); +} + +#ifdef USE_WB_RD +static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg) +{ + u32 wb_data[2]; + + REG_RD_DMAE(bp, reg, wb_data, 2); + + return HILO_U64(wb_data[0], wb_data[1]); +} +#endif + +static int bnx2x_mc_assert(struct bnx2x *bp) +{ + char last_idx; + int i, rc = 0; + u32 row0, row1, row2, row3; + + /* XSTORM */ + last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM + + XSTORM_ASSERT_LIST_INDEX_OFFSET); + if (last_idx) + BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); + + /* print the asserts */ + for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { + + row0 = REG_RD(bp, BAR_XSTRORM_INTMEM + + XSTORM_ASSERT_LIST_OFFSET(i)); + row1 = REG_RD(bp, BAR_XSTRORM_INTMEM + + XSTORM_ASSERT_LIST_OFFSET(i) + 4); + row2 = REG_RD(bp, BAR_XSTRORM_INTMEM + + XSTORM_ASSERT_LIST_OFFSET(i) + 8); + row3 = REG_RD(bp, BAR_XSTRORM_INTMEM + + XSTORM_ASSERT_LIST_OFFSET(i) + 12); + + if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { + BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x" + " 0x%08x 0x%08x 0x%08x\n", + i, row3, row2, row1, row0); + rc++; + } else { + break; + } + } + + /* TSTORM */ + last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM + + TSTORM_ASSERT_LIST_INDEX_OFFSET); + if (last_idx) + BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); + + /* print the asserts */ + for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { + + row0 = REG_RD(bp, BAR_TSTRORM_INTMEM + + TSTORM_ASSERT_LIST_OFFSET(i)); + row1 = REG_RD(bp, BAR_TSTRORM_INTMEM + + TSTORM_ASSERT_LIST_OFFSET(i) + 4); + row2 = REG_RD(bp, BAR_TSTRORM_INTMEM + + TSTORM_ASSERT_LIST_OFFSET(i) + 8); + row3 = REG_RD(bp, BAR_TSTRORM_INTMEM + + TSTORM_ASSERT_LIST_OFFSET(i) + 12); + + if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { + BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x" + " 0x%08x 0x%08x 0x%08x\n", + i, row3, row2, row1, row0); + rc++; + } else { + break; + } + } + + /* CSTORM */ + last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM + + CSTORM_ASSERT_LIST_INDEX_OFFSET); + if (last_idx) + BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); + + /* print the asserts */ + for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { + + row0 = REG_RD(bp, BAR_CSTRORM_INTMEM + + CSTORM_ASSERT_LIST_OFFSET(i)); + row1 = REG_RD(bp, BAR_CSTRORM_INTMEM + + CSTORM_ASSERT_LIST_OFFSET(i) + 4); + row2 = REG_RD(bp, BAR_CSTRORM_INTMEM + + CSTORM_ASSERT_LIST_OFFSET(i) + 8); + row3 = REG_RD(bp, BAR_CSTRORM_INTMEM + + CSTORM_ASSERT_LIST_OFFSET(i) + 12); + + if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { + BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x" + " 0x%08x 0x%08x 0x%08x\n", + i, row3, row2, row1, row0); + rc++; + } else { + break; + } + } + + /* USTORM */ + last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM + + USTORM_ASSERT_LIST_INDEX_OFFSET); + if (last_idx) + BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); + + /* print the asserts */ + for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { + + row0 = REG_RD(bp, BAR_USTRORM_INTMEM + + USTORM_ASSERT_LIST_OFFSET(i)); + row1 = REG_RD(bp, BAR_USTRORM_INTMEM + + USTORM_ASSERT_LIST_OFFSET(i) + 4); + row2 = REG_RD(bp, BAR_USTRORM_INTMEM + + USTORM_ASSERT_LIST_OFFSET(i) + 8); + row3 = REG_RD(bp, BAR_USTRORM_INTMEM + + USTORM_ASSERT_LIST_OFFSET(i) + 12); + + if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { + BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x" + " 0x%08x 0x%08x 0x%08x\n", + i, row3, row2, row1, row0); + rc++; + } else { + break; + } + } + + return rc; +} + +void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) +{ + u32 addr, val; + u32 mark, offset; + __be32 data[9]; + int word; + u32 trace_shmem_base; + if (BP_NOMCP(bp)) { + BNX2X_ERR("NO MCP - can not dump\n"); + return; + } + netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n", + (bp->common.bc_ver & 0xff0000) >> 16, + (bp->common.bc_ver & 0xff00) >> 8, + (bp->common.bc_ver & 0xff)); + + val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER); + if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER)) + printk("%s" "MCP PC at 0x%x\n", lvl, val); + + if (BP_PATH(bp) == 0) + trace_shmem_base = bp->common.shmem_base; + else + trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr); + addr = trace_shmem_base - 0x0800 + 4; + mark = REG_RD(bp, addr); + mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) + + ((mark + 0x3) & ~0x3) - 0x08000000; + printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark); + + printk("%s", lvl); + for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) { + for (word = 0; word < 8; word++) + data[word] = htonl(REG_RD(bp, offset + 4*word)); + data[8] = 0x0; + pr_cont("%s", (char *)data); + } + for (offset = addr + 4; offset <= mark; offset += 0x8*4) { + for (word = 0; word < 8; word++) + data[word] = htonl(REG_RD(bp, offset + 4*word)); + data[8] = 0x0; + pr_cont("%s", (char *)data); + } + printk("%s" "end of fw dump\n", lvl); +} + +static inline void bnx2x_fw_dump(struct bnx2x *bp) +{ + bnx2x_fw_dump_lvl(bp, KERN_ERR); +} + +void bnx2x_panic_dump(struct bnx2x *bp) +{ + int i; + u16 j; + struct hc_sp_status_block_data sp_sb_data; + int func = BP_FUNC(bp); +#ifdef BNX2X_STOP_ON_ERROR + u16 start = 0, end = 0; + u8 cos; +#endif + + bp->stats_state = STATS_STATE_DISABLED; + DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); + + BNX2X_ERR("begin crash dump -----------------\n"); + + /* Indices */ + /* Common */ + BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)" + " spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n", + bp->def_idx, bp->def_att_idx, bp->attn_state, + bp->spq_prod_idx, bp->stats_counter); + BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n", + bp->def_status_blk->atten_status_block.attn_bits, + bp->def_status_blk->atten_status_block.attn_bits_ack, + bp->def_status_blk->atten_status_block.status_block_id, + bp->def_status_blk->atten_status_block.attn_bits_index); + BNX2X_ERR(" def ("); + for (i = 0; i < HC_SP_SB_MAX_INDICES; i++) + pr_cont("0x%x%s", + bp->def_status_blk->sp_sb.index_values[i], + (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " "); + + for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) + *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM + + CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + + i*sizeof(u32)); + + pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) " + "pf_id(0x%x) vnic_id(0x%x) " + "vf_id(0x%x) vf_valid (0x%x) " + "state(0x%x)\n", + sp_sb_data.igu_sb_id, + sp_sb_data.igu_seg_id, + sp_sb_data.p_func.pf_id, + sp_sb_data.p_func.vnic_id, + sp_sb_data.p_func.vf_id, + sp_sb_data.p_func.vf_valid, + sp_sb_data.state); + + + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + int loop; + struct hc_status_block_data_e2 sb_data_e2; + struct hc_status_block_data_e1x sb_data_e1x; + struct hc_status_block_sm *hc_sm_p = + CHIP_IS_E1x(bp) ? + sb_data_e1x.common.state_machine : + sb_data_e2.common.state_machine; + struct hc_index_data *hc_index_p = + CHIP_IS_E1x(bp) ? + sb_data_e1x.index_data : + sb_data_e2.index_data; + u8 data_size, cos; + u32 *sb_data_p; + struct bnx2x_fp_txdata txdata; + + /* Rx */ + BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)" + " rx_comp_prod(0x%x)" + " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n", + i, fp->rx_bd_prod, fp->rx_bd_cons, + fp->rx_comp_prod, + fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb)); + BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)" + " fp_hc_idx(0x%x)\n", + fp->rx_sge_prod, fp->last_max_sge, + le16_to_cpu(fp->fp_hc_idx)); + + /* Tx */ + for_each_cos_in_tx_queue(fp, cos) + { + txdata = fp->txdata[cos]; + BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)" + " tx_bd_prod(0x%x) tx_bd_cons(0x%x)" + " *tx_cons_sb(0x%x)\n", + i, txdata.tx_pkt_prod, + txdata.tx_pkt_cons, txdata.tx_bd_prod, + txdata.tx_bd_cons, + le16_to_cpu(*txdata.tx_cons_sb)); + } + + loop = CHIP_IS_E1x(bp) ? + HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2; + + /* host sb data */ + +#ifdef BCM_CNIC + if (IS_FCOE_FP(fp)) + continue; +#endif + BNX2X_ERR(" run indexes ("); + for (j = 0; j < HC_SB_MAX_SM; j++) + pr_cont("0x%x%s", + fp->sb_running_index[j], + (j == HC_SB_MAX_SM - 1) ? ")" : " "); + + BNX2X_ERR(" indexes ("); + for (j = 0; j < loop; j++) + pr_cont("0x%x%s", + fp->sb_index_values[j], + (j == loop - 1) ? ")" : " "); + /* fw sb data */ + data_size = CHIP_IS_E1x(bp) ? + sizeof(struct hc_status_block_data_e1x) : + sizeof(struct hc_status_block_data_e2); + data_size /= sizeof(u32); + sb_data_p = CHIP_IS_E1x(bp) ? + (u32 *)&sb_data_e1x : + (u32 *)&sb_data_e2; + /* copy sb data in here */ + for (j = 0; j < data_size; j++) + *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) + + j * sizeof(u32)); + + if (!CHIP_IS_E1x(bp)) { + pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) " + "vnic_id(0x%x) same_igu_sb_1b(0x%x) " + "state(0x%x)\n", + sb_data_e2.common.p_func.pf_id, + sb_data_e2.common.p_func.vf_id, + sb_data_e2.common.p_func.vf_valid, + sb_data_e2.common.p_func.vnic_id, + sb_data_e2.common.same_igu_sb_1b, + sb_data_e2.common.state); + } else { + pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) " + "vnic_id(0x%x) same_igu_sb_1b(0x%x) " + "state(0x%x)\n", + sb_data_e1x.common.p_func.pf_id, + sb_data_e1x.common.p_func.vf_id, + sb_data_e1x.common.p_func.vf_valid, + sb_data_e1x.common.p_func.vnic_id, + sb_data_e1x.common.same_igu_sb_1b, + sb_data_e1x.common.state); + } + + /* SB_SMs data */ + for (j = 0; j < HC_SB_MAX_SM; j++) { + pr_cont("SM[%d] __flags (0x%x) " + "igu_sb_id (0x%x) igu_seg_id(0x%x) " + "time_to_expire (0x%x) " + "timer_value(0x%x)\n", j, + hc_sm_p[j].__flags, + hc_sm_p[j].igu_sb_id, + hc_sm_p[j].igu_seg_id, + hc_sm_p[j].time_to_expire, + hc_sm_p[j].timer_value); + } + + /* Indecies data */ + for (j = 0; j < loop; j++) { + pr_cont("INDEX[%d] flags (0x%x) " + "timeout (0x%x)\n", j, + hc_index_p[j].flags, + hc_index_p[j].timeout); + } + } + +#ifdef BNX2X_STOP_ON_ERROR + /* Rings */ + /* Rx */ + for_each_rx_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + + start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); + end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503); + for (j = start; j != end; j = RX_BD(j + 1)) { + u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j]; + struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j]; + + BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n", + i, j, rx_bd[1], rx_bd[0], sw_bd->skb); + } + + start = RX_SGE(fp->rx_sge_prod); + end = RX_SGE(fp->last_max_sge); + for (j = start; j != end; j = RX_SGE(j + 1)) { + u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j]; + struct sw_rx_page *sw_page = &fp->rx_page_ring[j]; + + BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n", + i, j, rx_sge[1], rx_sge[0], sw_page->page); + } + + start = RCQ_BD(fp->rx_comp_cons - 10); + end = RCQ_BD(fp->rx_comp_cons + 503); + for (j = start; j != end; j = RCQ_BD(j + 1)) { + u32 *cqe = (u32 *)&fp->rx_comp_ring[j]; + + BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n", + i, j, cqe[0], cqe[1], cqe[2], cqe[3]); + } + } + + /* Tx */ + for_each_tx_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + for_each_cos_in_tx_queue(fp, cos) { + struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; + + start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10); + end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245); + for (j = start; j != end; j = TX_BD(j + 1)) { + struct sw_tx_bd *sw_bd = + &txdata->tx_buf_ring[j]; + + BNX2X_ERR("fp%d: txdata %d, " + "packet[%x]=[%p,%x]\n", + i, cos, j, sw_bd->skb, + sw_bd->first_bd); + } + + start = TX_BD(txdata->tx_bd_cons - 10); + end = TX_BD(txdata->tx_bd_cons + 254); + for (j = start; j != end; j = TX_BD(j + 1)) { + u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j]; + + BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=" + "[%x:%x:%x:%x]\n", + i, cos, j, tx_bd[0], tx_bd[1], + tx_bd[2], tx_bd[3]); + } + } + } +#endif + bnx2x_fw_dump(bp); + bnx2x_mc_assert(bp); + BNX2X_ERR("end crash dump -----------------\n"); +} + +/* + * FLR Support for E2 + * + * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW + * initialization. + */ +#define FLR_WAIT_USEC 10000 /* 10 miliseconds */ +#define FLR_WAIT_INTERAVAL 50 /* usec */ +#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERAVAL) /* 200 */ + +struct pbf_pN_buf_regs { + int pN; + u32 init_crd; + u32 crd; + u32 crd_freed; +}; + +struct pbf_pN_cmd_regs { + int pN; + u32 lines_occup; + u32 lines_freed; +}; + +static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp, + struct pbf_pN_buf_regs *regs, + u32 poll_count) +{ + u32 init_crd, crd, crd_start, crd_freed, crd_freed_start; + u32 cur_cnt = poll_count; + + crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed); + crd = crd_start = REG_RD(bp, regs->crd); + init_crd = REG_RD(bp, regs->init_crd); + + DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); + DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd); + DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); + + while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) < + (init_crd - crd_start))) { + if (cur_cnt--) { + udelay(FLR_WAIT_INTERAVAL); + crd = REG_RD(bp, regs->crd); + crd_freed = REG_RD(bp, regs->crd_freed); + } else { + DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n", + regs->pN); + DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n", + regs->pN, crd); + DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n", + regs->pN, crd_freed); + break; + } + } + DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n", + poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN); +} + +static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp, + struct pbf_pN_cmd_regs *regs, + u32 poll_count) +{ + u32 occup, to_free, freed, freed_start; + u32 cur_cnt = poll_count; + + occup = to_free = REG_RD(bp, regs->lines_occup); + freed = freed_start = REG_RD(bp, regs->lines_freed); + + DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); + DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); + + while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) { + if (cur_cnt--) { + udelay(FLR_WAIT_INTERAVAL); + occup = REG_RD(bp, regs->lines_occup); + freed = REG_RD(bp, regs->lines_freed); + } else { + DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n", + regs->pN); + DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", + regs->pN, occup); + DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", + regs->pN, freed); + break; + } + } + DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n", + poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN); +} + +static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg, + u32 expected, u32 poll_count) +{ + u32 cur_cnt = poll_count; + u32 val; + + while ((val = REG_RD(bp, reg)) != expected && cur_cnt--) + udelay(FLR_WAIT_INTERAVAL); + + return val; +} + +static inline int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, + char *msg, u32 poll_cnt) +{ + u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt); + if (val != 0) { + BNX2X_ERR("%s usage count=%d\n", msg, val); + return 1; + } + return 0; +} + +static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp) +{ + /* adjust polling timeout */ + if (CHIP_REV_IS_EMUL(bp)) + return FLR_POLL_CNT * 2000; + + if (CHIP_REV_IS_FPGA(bp)) + return FLR_POLL_CNT * 120; + + return FLR_POLL_CNT; +} + +static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) +{ + struct pbf_pN_cmd_regs cmd_regs[] = { + {0, (CHIP_IS_E3B0(bp)) ? + PBF_REG_TQ_OCCUPANCY_Q0 : + PBF_REG_P0_TQ_OCCUPANCY, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_TQ_LINES_FREED_CNT_Q0 : + PBF_REG_P0_TQ_LINES_FREED_CNT}, + {1, (CHIP_IS_E3B0(bp)) ? + PBF_REG_TQ_OCCUPANCY_Q1 : + PBF_REG_P1_TQ_OCCUPANCY, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_TQ_LINES_FREED_CNT_Q1 : + PBF_REG_P1_TQ_LINES_FREED_CNT}, + {4, (CHIP_IS_E3B0(bp)) ? + PBF_REG_TQ_OCCUPANCY_LB_Q : + PBF_REG_P4_TQ_OCCUPANCY, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_TQ_LINES_FREED_CNT_LB_Q : + PBF_REG_P4_TQ_LINES_FREED_CNT} + }; + + struct pbf_pN_buf_regs buf_regs[] = { + {0, (CHIP_IS_E3B0(bp)) ? + PBF_REG_INIT_CRD_Q0 : + PBF_REG_P0_INIT_CRD , + (CHIP_IS_E3B0(bp)) ? + PBF_REG_CREDIT_Q0 : + PBF_REG_P0_CREDIT, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : + PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, + {1, (CHIP_IS_E3B0(bp)) ? + PBF_REG_INIT_CRD_Q1 : + PBF_REG_P1_INIT_CRD, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_CREDIT_Q1 : + PBF_REG_P1_CREDIT, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : + PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, + {4, (CHIP_IS_E3B0(bp)) ? + PBF_REG_INIT_CRD_LB_Q : + PBF_REG_P4_INIT_CRD, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_CREDIT_LB_Q : + PBF_REG_P4_CREDIT, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : + PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, + }; + + int i; + + /* Verify the command queues are flushed P0, P1, P4 */ + for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) + bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count); + + + /* Verify the transmission buffers are flushed P0, P1, P4 */ + for (i = 0; i < ARRAY_SIZE(buf_regs); i++) + bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count); +} + +#define OP_GEN_PARAM(param) \ + (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) + +#define OP_GEN_TYPE(type) \ + (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) + +#define OP_GEN_AGG_VECT(index) \ + (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) + + +static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, + u32 poll_cnt) +{ + struct sdm_op_gen op_gen = {0}; + + u32 comp_addr = BAR_CSTRORM_INTMEM + + CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func); + int ret = 0; + + if (REG_RD(bp, comp_addr)) { + BNX2X_ERR("Cleanup complete is not 0\n"); + return 1; + } + + op_gen.command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); + op_gen.command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); + op_gen.command |= OP_GEN_AGG_VECT(clnup_func); + op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; + + DP(BNX2X_MSG_SP, "FW Final cleanup\n"); + REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command); + + if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) { + BNX2X_ERR("FW final cleanup did not succeed\n"); + ret = 1; + } + /* Zero completion for nxt FLR */ + REG_WR(bp, comp_addr, 0); + + return ret; +} + +static inline u8 bnx2x_is_pcie_pending(struct pci_dev *dev) +{ + int pos; + u16 status; + + pos = pci_pcie_cap(dev); + if (!pos) + return false; + + pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status); + return status & PCI_EXP_DEVSTA_TRPND; +} + +/* PF FLR specific routines +*/ +static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt) +{ + + /* wait for CFC PF usage-counter to zero (includes all the VFs) */ + if (bnx2x_flr_clnup_poll_hw_counter(bp, + CFC_REG_NUM_LCIDS_INSIDE_PF, + "CFC PF usage counter timed out", + poll_cnt)) + return 1; + + + /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ + if (bnx2x_flr_clnup_poll_hw_counter(bp, + DORQ_REG_PF_USAGE_CNT, + "DQ PF usage counter timed out", + poll_cnt)) + return 1; + + /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ + if (bnx2x_flr_clnup_poll_hw_counter(bp, + QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp), + "QM PF usage counter timed out", + poll_cnt)) + return 1; + + /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ + if (bnx2x_flr_clnup_poll_hw_counter(bp, + TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp), + "Timers VNIC usage counter timed out", + poll_cnt)) + return 1; + if (bnx2x_flr_clnup_poll_hw_counter(bp, + TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp), + "Timers NUM_SCANS usage counter timed out", + poll_cnt)) + return 1; + + /* Wait DMAE PF usage counter to zero */ + if (bnx2x_flr_clnup_poll_hw_counter(bp, + dmae_reg_go_c[INIT_DMAE_C(bp)], + "DMAE dommand register timed out", + poll_cnt)) + return 1; + + return 0; +} + +static void bnx2x_hw_enable_status(struct bnx2x *bp) +{ + u32 val; + + val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF); + DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); + + val = REG_RD(bp, PBF_REG_DISABLE_PF); + DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val); + + val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN); + DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); + + val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN); + DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); + + val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK); + DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); + + val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); + DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); + + val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); + DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); + + val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); + DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", + val); +} + +static int bnx2x_pf_flr_clnup(struct bnx2x *bp) +{ + u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); + + DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp)); + + /* Re-enable PF target read access */ + REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); + + /* Poll HW usage counters */ + if (bnx2x_poll_hw_usage_counters(bp, poll_cnt)) + return -EBUSY; + + /* Zero the igu 'trailing edge' and 'leading edge' */ + + /* Send the FW cleanup command */ + if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt)) + return -EBUSY; + + /* ATC cleanup */ + + /* Verify TX hw is flushed */ + bnx2x_tx_hw_flushed(bp, poll_cnt); + + /* Wait 100ms (not adjusted according to platform) */ + msleep(100); + + /* Verify no pending pci transactions */ + if (bnx2x_is_pcie_pending(bp->pdev)) + BNX2X_ERR("PCIE Transactions still pending\n"); + + /* Debug */ + bnx2x_hw_enable_status(bp); + + /* + * Master enable - Due to WB DMAE writes performed before this + * register is re-initialized as part of the regular function init + */ + REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); + + return 0; +} + +static void bnx2x_hc_int_enable(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; + u32 val = REG_RD(bp, addr); + int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; + int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0; + + if (msix) { + val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | + HC_CONFIG_0_REG_INT_LINE_EN_0); + val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | + HC_CONFIG_0_REG_ATTN_BIT_EN_0); + } else if (msi) { + val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; + val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | + HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | + HC_CONFIG_0_REG_ATTN_BIT_EN_0); + } else { + val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | + HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | + HC_CONFIG_0_REG_INT_LINE_EN_0 | + HC_CONFIG_0_REG_ATTN_BIT_EN_0); + + if (!CHIP_IS_E1(bp)) { + DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n", + val, port, addr); + + REG_WR(bp, addr, val); + + val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; + } + } + + if (CHIP_IS_E1(bp)) + REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF); + + DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n", + val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); + + REG_WR(bp, addr, val); + /* + * Ensure that HC_CONFIG is written before leading/trailing edge config + */ + mmiowb(); + barrier(); + + if (!CHIP_IS_E1(bp)) { + /* init leading/trailing edge */ + if (IS_MF(bp)) { + val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); + if (bp->port.pmf) + /* enable nig and gpio3 attention */ + val |= 0x1100; + } else + val = 0xffff; + + REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); + REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); + } + + /* Make sure that interrupts are indeed enabled from here on */ + mmiowb(); +} + +static void bnx2x_igu_int_enable(struct bnx2x *bp) +{ + u32 val; + int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; + int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0; + + val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); + + if (msix) { + val &= ~(IGU_PF_CONF_INT_LINE_EN | + IGU_PF_CONF_SINGLE_ISR_EN); + val |= (IGU_PF_CONF_FUNC_EN | + IGU_PF_CONF_MSI_MSIX_EN | + IGU_PF_CONF_ATTN_BIT_EN); + } else if (msi) { + val &= ~IGU_PF_CONF_INT_LINE_EN; + val |= (IGU_PF_CONF_FUNC_EN | + IGU_PF_CONF_MSI_MSIX_EN | + IGU_PF_CONF_ATTN_BIT_EN | + IGU_PF_CONF_SINGLE_ISR_EN); + } else { + val &= ~IGU_PF_CONF_MSI_MSIX_EN; + val |= (IGU_PF_CONF_FUNC_EN | + IGU_PF_CONF_INT_LINE_EN | + IGU_PF_CONF_ATTN_BIT_EN | + IGU_PF_CONF_SINGLE_ISR_EN); + } + + DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n", + val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); + + REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); + + barrier(); + + /* init leading/trailing edge */ + if (IS_MF(bp)) { + val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); + if (bp->port.pmf) + /* enable nig and gpio3 attention */ + val |= 0x1100; + } else + val = 0xffff; + + REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); + REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); + + /* Make sure that interrupts are indeed enabled from here on */ + mmiowb(); +} + +void bnx2x_int_enable(struct bnx2x *bp) +{ + if (bp->common.int_block == INT_BLOCK_HC) + bnx2x_hc_int_enable(bp); + else + bnx2x_igu_int_enable(bp); +} + +static void bnx2x_hc_int_disable(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; + u32 val = REG_RD(bp, addr); + + /* + * in E1 we must use only PCI configuration space to disable + * MSI/MSIX capablility + * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block + */ + if (CHIP_IS_E1(bp)) { + /* Since IGU_PF_CONF_MSI_MSIX_EN still always on + * Use mask register to prevent from HC sending interrupts + * after we exit the function + */ + REG_WR(bp, HC_REG_INT_MASK + port*4, 0); + + val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | + HC_CONFIG_0_REG_INT_LINE_EN_0 | + HC_CONFIG_0_REG_ATTN_BIT_EN_0); + } else + val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | + HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | + HC_CONFIG_0_REG_INT_LINE_EN_0 | + HC_CONFIG_0_REG_ATTN_BIT_EN_0); + + DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n", + val, port, addr); + + /* flush all outstanding writes */ + mmiowb(); + + REG_WR(bp, addr, val); + if (REG_RD(bp, addr) != val) + BNX2X_ERR("BUG! proper val not read from IGU!\n"); +} + +static void bnx2x_igu_int_disable(struct bnx2x *bp) +{ + u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); + + val &= ~(IGU_PF_CONF_MSI_MSIX_EN | + IGU_PF_CONF_INT_LINE_EN | + IGU_PF_CONF_ATTN_BIT_EN); + + DP(NETIF_MSG_INTR, "write %x to IGU\n", val); + + /* flush all outstanding writes */ + mmiowb(); + + REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); + if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val) + BNX2X_ERR("BUG! proper val not read from IGU!\n"); +} + +void bnx2x_int_disable(struct bnx2x *bp) +{ + if (bp->common.int_block == INT_BLOCK_HC) + bnx2x_hc_int_disable(bp); + else + bnx2x_igu_int_disable(bp); +} + +void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) +{ + int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; + int i, offset; + + if (disable_hw) + /* prevent the HW from sending interrupts */ + bnx2x_int_disable(bp); + + /* make sure all ISRs are done */ + if (msix) { + synchronize_irq(bp->msix_table[0].vector); + offset = 1; +#ifdef BCM_CNIC + offset++; +#endif + for_each_eth_queue(bp, i) + synchronize_irq(bp->msix_table[offset++].vector); + } else + synchronize_irq(bp->pdev->irq); + + /* make sure sp_task is not running */ + cancel_delayed_work(&bp->sp_task); + cancel_delayed_work(&bp->period_task); + flush_workqueue(bnx2x_wq); +} + +/* fast path */ + +/* + * General service functions + */ + +/* Return true if succeeded to acquire the lock */ +static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource) +{ + u32 lock_status; + u32 resource_bit = (1 << resource); + int func = BP_FUNC(bp); + u32 hw_lock_control_reg; + + DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource); + + /* Validating that the resource is within range */ + if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { + DP(NETIF_MSG_HW, + "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", + resource, HW_LOCK_MAX_RESOURCE_VALUE); + return false; + } + + if (func <= 5) + hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); + else + hw_lock_control_reg = + (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); + + /* Try to acquire the lock */ + REG_WR(bp, hw_lock_control_reg + 4, resource_bit); + lock_status = REG_RD(bp, hw_lock_control_reg); + if (lock_status & resource_bit) + return true; + + DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource); + return false; +} + +/** + * bnx2x_get_leader_lock_resource - get the recovery leader resource id + * + * @bp: driver handle + * + * Returns the recovery leader resource id according to the engine this function + * belongs to. Currently only only 2 engines is supported. + */ +static inline int bnx2x_get_leader_lock_resource(struct bnx2x *bp) +{ + if (BP_PATH(bp)) + return HW_LOCK_RESOURCE_RECOVERY_LEADER_1; + else + return HW_LOCK_RESOURCE_RECOVERY_LEADER_0; +} + +/** + * bnx2x_trylock_leader_lock- try to aquire a leader lock. + * + * @bp: driver handle + * + * Tries to aquire a leader lock for cuurent engine. + */ +static inline bool bnx2x_trylock_leader_lock(struct bnx2x *bp) +{ + return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); +} + +#ifdef BCM_CNIC +static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err); +#endif + +void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) +{ + struct bnx2x *bp = fp->bp; + int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); + int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); + enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX; + struct bnx2x_queue_sp_obj *q_obj = &fp->q_obj; + + DP(BNX2X_MSG_SP, + "fp %d cid %d got ramrod #%d state is %x type is %d\n", + fp->index, cid, command, bp->state, + rr_cqe->ramrod_cqe.ramrod_type); + + switch (command) { + case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): + DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid); + drv_cmd = BNX2X_Q_CMD_UPDATE; + break; + + case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): + DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid); + drv_cmd = BNX2X_Q_CMD_SETUP; + break; + + case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): + DP(NETIF_MSG_IFUP, "got MULTI[%d] tx-only setup ramrod\n", cid); + drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; + break; + + case (RAMROD_CMD_ID_ETH_HALT): + DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid); + drv_cmd = BNX2X_Q_CMD_HALT; + break; + + case (RAMROD_CMD_ID_ETH_TERMINATE): + DP(BNX2X_MSG_SP, "got MULTI[%d] teminate ramrod\n", cid); + drv_cmd = BNX2X_Q_CMD_TERMINATE; + break; + + case (RAMROD_CMD_ID_ETH_EMPTY): + DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid); + drv_cmd = BNX2X_Q_CMD_EMPTY; + break; + + default: + BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n", + command, fp->index); + return; + } + + if ((drv_cmd != BNX2X_Q_CMD_MAX) && + q_obj->complete_cmd(bp, q_obj, drv_cmd)) + /* q_obj->complete_cmd() failure means that this was + * an unexpected completion. + * + * In this case we don't want to increase the bp->spq_left + * because apparently we haven't sent this command the first + * place. + */ +#ifdef BNX2X_STOP_ON_ERROR + bnx2x_panic(); +#else + return; +#endif + + smp_mb__before_atomic_inc(); + atomic_inc(&bp->cq_spq_left); + /* push the change in bp->spq_left and towards the memory */ + smp_mb__after_atomic_inc(); + + DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left)); + + return; +} + +void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, + u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod) +{ + u32 start = BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset; + + bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod, + start); +} + +irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) +{ + struct bnx2x *bp = netdev_priv(dev_instance); + u16 status = bnx2x_ack_int(bp); + u16 mask; + int i; + u8 cos; + + /* Return here if interrupt is shared and it's not for us */ + if (unlikely(status == 0)) { + DP(NETIF_MSG_INTR, "not our interrupt!\n"); + return IRQ_NONE; + } + DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status); + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return IRQ_HANDLED; +#endif + + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + + mask = 0x2 << (fp->index + CNIC_PRESENT); + if (status & mask) { + /* Handle Rx or Tx according to SB id */ + prefetch(fp->rx_cons_sb); + for_each_cos_in_tx_queue(fp, cos) + prefetch(fp->txdata[cos].tx_cons_sb); + prefetch(&fp->sb_running_index[SM_RX_ID]); + napi_schedule(&bnx2x_fp(bp, fp->index, napi)); + status &= ~mask; + } + } + +#ifdef BCM_CNIC + mask = 0x2; + if (status & (mask | 0x1)) { + struct cnic_ops *c_ops = NULL; + + if (likely(bp->state == BNX2X_STATE_OPEN)) { + rcu_read_lock(); + c_ops = rcu_dereference(bp->cnic_ops); + if (c_ops) + c_ops->cnic_handler(bp->cnic_data, NULL); + rcu_read_unlock(); + } + + status &= ~mask; + } +#endif + + if (unlikely(status & 0x1)) { + queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); + + status &= ~0x1; + if (!status) + return IRQ_HANDLED; + } + + if (unlikely(status)) + DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n", + status); + + return IRQ_HANDLED; +} + +/* Link */ + +/* + * General service functions + */ + +int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) +{ + u32 lock_status; + u32 resource_bit = (1 << resource); + int func = BP_FUNC(bp); + u32 hw_lock_control_reg; + int cnt; + + /* Validating that the resource is within range */ + if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { + DP(NETIF_MSG_HW, + "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", + resource, HW_LOCK_MAX_RESOURCE_VALUE); + return -EINVAL; + } + + if (func <= 5) { + hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); + } else { + hw_lock_control_reg = + (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); + } + + /* Validating that the resource is not already taken */ + lock_status = REG_RD(bp, hw_lock_control_reg); + if (lock_status & resource_bit) { + DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", + lock_status, resource_bit); + return -EEXIST; + } + + /* Try for 5 second every 5ms */ + for (cnt = 0; cnt < 1000; cnt++) { + /* Try to acquire the lock */ + REG_WR(bp, hw_lock_control_reg + 4, resource_bit); + lock_status = REG_RD(bp, hw_lock_control_reg); + if (lock_status & resource_bit) + return 0; + + msleep(5); + } + DP(NETIF_MSG_HW, "Timeout\n"); + return -EAGAIN; +} + +int bnx2x_release_leader_lock(struct bnx2x *bp) +{ + return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); +} + +int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) +{ + u32 lock_status; + u32 resource_bit = (1 << resource); + int func = BP_FUNC(bp); + u32 hw_lock_control_reg; + + DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource); + + /* Validating that the resource is within range */ + if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { + DP(NETIF_MSG_HW, + "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", + resource, HW_LOCK_MAX_RESOURCE_VALUE); + return -EINVAL; + } + + if (func <= 5) { + hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); + } else { + hw_lock_control_reg = + (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); + } + + /* Validating that the resource is currently taken */ + lock_status = REG_RD(bp, hw_lock_control_reg); + if (!(lock_status & resource_bit)) { + DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", + lock_status, resource_bit); + return -EFAULT; + } + + REG_WR(bp, hw_lock_control_reg, resource_bit); + return 0; +} + + +int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port) +{ + /* The GPIO should be swapped if swap register is set and active */ + int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && + REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; + int gpio_shift = gpio_num + + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); + u32 gpio_mask = (1 << gpio_shift); + u32 gpio_reg; + int value; + + if (gpio_num > MISC_REGISTERS_GPIO_3) { + BNX2X_ERR("Invalid GPIO %d\n", gpio_num); + return -EINVAL; + } + + /* read GPIO value */ + gpio_reg = REG_RD(bp, MISC_REG_GPIO); + + /* get the requested pin value */ + if ((gpio_reg & gpio_mask) == gpio_mask) + value = 1; + else + value = 0; + + DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value); + + return value; +} + +int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) +{ + /* The GPIO should be swapped if swap register is set and active */ + int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && + REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; + int gpio_shift = gpio_num + + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); + u32 gpio_mask = (1 << gpio_shift); + u32 gpio_reg; + + if (gpio_num > MISC_REGISTERS_GPIO_3) { + BNX2X_ERR("Invalid GPIO %d\n", gpio_num); + return -EINVAL; + } + + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); + /* read GPIO and mask except the float bits */ + gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); + + switch (mode) { + case MISC_REGISTERS_GPIO_OUTPUT_LOW: + DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n", + gpio_num, gpio_shift); + /* clear FLOAT and set CLR */ + gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); + break; + + case MISC_REGISTERS_GPIO_OUTPUT_HIGH: + DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n", + gpio_num, gpio_shift); + /* clear FLOAT and set SET */ + gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); + break; + + case MISC_REGISTERS_GPIO_INPUT_HI_Z: + DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n", + gpio_num, gpio_shift); + /* set FLOAT */ + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); + break; + + default: + break; + } + + REG_WR(bp, MISC_REG_GPIO, gpio_reg); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); + + return 0; +} + +int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode) +{ + u32 gpio_reg = 0; + int rc = 0; + + /* Any port swapping should be handled by caller. */ + + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); + /* read GPIO and mask except the float bits */ + gpio_reg = REG_RD(bp, MISC_REG_GPIO); + gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); + gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); + gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); + + switch (mode) { + case MISC_REGISTERS_GPIO_OUTPUT_LOW: + DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins); + /* set CLR */ + gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); + break; + + case MISC_REGISTERS_GPIO_OUTPUT_HIGH: + DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins); + /* set SET */ + gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); + break; + + case MISC_REGISTERS_GPIO_INPUT_HI_Z: + DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins); + /* set FLOAT */ + gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); + break; + + default: + BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode); + rc = -EINVAL; + break; + } + + if (rc == 0) + REG_WR(bp, MISC_REG_GPIO, gpio_reg); + + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); + + return rc; +} + +int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) +{ + /* The GPIO should be swapped if swap register is set and active */ + int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && + REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; + int gpio_shift = gpio_num + + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); + u32 gpio_mask = (1 << gpio_shift); + u32 gpio_reg; + + if (gpio_num > MISC_REGISTERS_GPIO_3) { + BNX2X_ERR("Invalid GPIO %d\n", gpio_num); + return -EINVAL; + } + + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); + /* read GPIO int */ + gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT); + + switch (mode) { + case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: + DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> " + "output low\n", gpio_num, gpio_shift); + /* clear SET and set CLR */ + gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); + break; + + case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: + DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> " + "output high\n", gpio_num, gpio_shift); + /* clear CLR and set SET */ + gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); + break; + + default: + break; + } + + REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); + + return 0; +} + +static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode) +{ + u32 spio_mask = (1 << spio_num); + u32 spio_reg; + + if ((spio_num < MISC_REGISTERS_SPIO_4) || + (spio_num > MISC_REGISTERS_SPIO_7)) { + BNX2X_ERR("Invalid SPIO %d\n", spio_num); + return -EINVAL; + } + + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); + /* read SPIO and mask except the float bits */ + spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT); + + switch (mode) { + case MISC_REGISTERS_SPIO_OUTPUT_LOW: + DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num); + /* clear FLOAT and set CLR */ + spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); + spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS); + break; + + case MISC_REGISTERS_SPIO_OUTPUT_HIGH: + DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num); + /* clear FLOAT and set SET */ + spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); + spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS); + break; + + case MISC_REGISTERS_SPIO_INPUT_HI_Z: + DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num); + /* set FLOAT */ + spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); + break; + + default: + break; + } + + REG_WR(bp, MISC_REG_SPIO, spio_reg); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); + + return 0; +} + +void bnx2x_calc_fc_adv(struct bnx2x *bp) +{ + u8 cfg_idx = bnx2x_get_link_cfg_idx(bp); + switch (bp->link_vars.ieee_fc & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { + case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: + bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | + ADVERTISED_Pause); + break; + + case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: + bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | + ADVERTISED_Pause); + break; + + case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: + bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; + break; + + default: + bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | + ADVERTISED_Pause); + break; + } +} + +u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) +{ + if (!BP_NOMCP(bp)) { + u8 rc; + int cfx_idx = bnx2x_get_link_cfg_idx(bp); + u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx]; + /* + * Initialize link parameters structure variables + * It is recommended to turn off RX FC for jumbo frames + * for better performance + */ + if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000)) + bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX; + else + bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; + + bnx2x_acquire_phy_lock(bp); + + if (load_mode == LOAD_DIAG) { + struct link_params *lp = &bp->link_params; + lp->loopback_mode = LOOPBACK_XGXS; + /* do PHY loopback at 10G speed, if possible */ + if (lp->req_line_speed[cfx_idx] < SPEED_10000) { + if (lp->speed_cap_mask[cfx_idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) + lp->req_line_speed[cfx_idx] = + SPEED_10000; + else + lp->req_line_speed[cfx_idx] = + SPEED_1000; + } + } + + rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); + + bnx2x_release_phy_lock(bp); + + bnx2x_calc_fc_adv(bp); + + if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) { + bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); + bnx2x_link_report(bp); + } else + queue_delayed_work(bnx2x_wq, &bp->period_task, 0); + bp->link_params.req_line_speed[cfx_idx] = req_line_speed; + return rc; + } + BNX2X_ERR("Bootcode is missing - can not initialize link\n"); + return -EINVAL; +} + +void bnx2x_link_set(struct bnx2x *bp) +{ + if (!BP_NOMCP(bp)) { + bnx2x_acquire_phy_lock(bp); + bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); + bnx2x_phy_init(&bp->link_params, &bp->link_vars); + bnx2x_release_phy_lock(bp); + + bnx2x_calc_fc_adv(bp); + } else + BNX2X_ERR("Bootcode is missing - can not set link\n"); +} + +static void bnx2x__link_reset(struct bnx2x *bp) +{ + if (!BP_NOMCP(bp)) { + bnx2x_acquire_phy_lock(bp); + bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); + bnx2x_release_phy_lock(bp); + } else + BNX2X_ERR("Bootcode is missing - can not reset link\n"); +} + +u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes) +{ + u8 rc = 0; + + if (!BP_NOMCP(bp)) { + bnx2x_acquire_phy_lock(bp); + rc = bnx2x_test_link(&bp->link_params, &bp->link_vars, + is_serdes); + bnx2x_release_phy_lock(bp); + } else + BNX2X_ERR("Bootcode is missing - can not test link\n"); + + return rc; +} + +static void bnx2x_init_port_minmax(struct bnx2x *bp) +{ + u32 r_param = bp->link_vars.line_speed / 8; + u32 fair_periodic_timeout_usec; + u32 t_fair; + + memset(&(bp->cmng.rs_vars), 0, + sizeof(struct rate_shaping_vars_per_port)); + memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port)); + + /* 100 usec in SDM ticks = 25 since each tick is 4 usec */ + bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4; + + /* this is the threshold below which no timer arming will occur + 1.25 coefficient is for the threshold to be a little bigger + than the real time, to compensate for timer in-accuracy */ + bp->cmng.rs_vars.rs_threshold = + (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4; + + /* resolution of fairness timer */ + fair_periodic_timeout_usec = QM_ARB_BYTES / r_param; + /* for 10G it is 1000usec. for 1G it is 10000usec. */ + t_fair = T_FAIR_COEF / bp->link_vars.line_speed; + + /* this is the threshold below which we won't arm the timer anymore */ + bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES; + + /* we multiply by 1e3/8 to get bytes/msec. + We don't want the credits to pass a credit + of the t_fair*FAIR_MEM (algorithm resolution) */ + bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM; + /* since each tick is 4 usec */ + bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4; +} + +/* Calculates the sum of vn_min_rates. + It's needed for further normalizing of the min_rates. + Returns: + sum of vn_min_rates. + or + 0 - if all the min_rates are 0. + In the later case fainess algorithm should be deactivated. + If not all min_rates are zero then those that are zeroes will be set to 1. + */ +static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) +{ + int all_zero = 1; + int vn; + + bp->vn_weight_sum = 0; + for (vn = VN_0; vn < E1HVN_MAX; vn++) { + u32 vn_cfg = bp->mf_config[vn]; + u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> + FUNC_MF_CFG_MIN_BW_SHIFT) * 100; + + /* Skip hidden vns */ + if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) + continue; + + /* If min rate is zero - set it to 1 */ + if (!vn_min_rate) + vn_min_rate = DEF_MIN_RATE; + else + all_zero = 0; + + bp->vn_weight_sum += vn_min_rate; + } + + /* if ETS or all min rates are zeros - disable fairness */ + if (BNX2X_IS_ETS_ENABLED(bp)) { + bp->cmng.flags.cmng_enables &= + ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; + DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n"); + } else if (all_zero) { + bp->cmng.flags.cmng_enables &= + ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; + DP(NETIF_MSG_IFUP, "All MIN values are zeroes" + " fairness will be disabled\n"); + } else + bp->cmng.flags.cmng_enables |= + CMNG_FLAGS_PER_PORT_FAIRNESS_VN; +} + +static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) +{ + struct rate_shaping_vars_per_vn m_rs_vn; + struct fairness_vars_per_vn m_fair_vn; + u32 vn_cfg = bp->mf_config[vn]; + int func = 2*vn + BP_PORT(bp); + u16 vn_min_rate, vn_max_rate; + int i; + + /* If function is hidden - set min and max to zeroes */ + if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { + vn_min_rate = 0; + vn_max_rate = 0; + + } else { + u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg); + + vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> + FUNC_MF_CFG_MIN_BW_SHIFT) * 100; + /* If fairness is enabled (not all min rates are zeroes) and + if current min rate is zero - set it to 1. + This is a requirement of the algorithm. */ + if (bp->vn_weight_sum && (vn_min_rate == 0)) + vn_min_rate = DEF_MIN_RATE; + + if (IS_MF_SI(bp)) + /* maxCfg in percents of linkspeed */ + vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; + else + /* maxCfg is absolute in 100Mb units */ + vn_max_rate = maxCfg * 100; + } + + DP(NETIF_MSG_IFUP, + "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n", + func, vn_min_rate, vn_max_rate, bp->vn_weight_sum); + + memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn)); + memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn)); + + /* global vn counter - maximal Mbps for this vn */ + m_rs_vn.vn_counter.rate = vn_max_rate; + + /* quota - number of bytes transmitted in this period */ + m_rs_vn.vn_counter.quota = + (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8; + + if (bp->vn_weight_sum) { + /* credit for each period of the fairness algorithm: + number of bytes in T_FAIR (the vn share the port rate). + vn_weight_sum should not be larger than 10000, thus + T_FAIR_COEF / (8 * vn_weight_sum) will always be greater + than zero */ + m_fair_vn.vn_credit_delta = + max_t(u32, (vn_min_rate * (T_FAIR_COEF / + (8 * bp->vn_weight_sum))), + (bp->cmng.fair_vars.fair_threshold + + MIN_ABOVE_THRESH)); + DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n", + m_fair_vn.vn_credit_delta); + } + + /* Store it to internal memory */ + for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++) + REG_WR(bp, BAR_XSTRORM_INTMEM + + XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4, + ((u32 *)(&m_rs_vn))[i]); + + for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++) + REG_WR(bp, BAR_XSTRORM_INTMEM + + XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4, + ((u32 *)(&m_fair_vn))[i]); +} + +static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp) +{ + if (CHIP_REV_IS_SLOW(bp)) + return CMNG_FNS_NONE; + if (IS_MF(bp)) + return CMNG_FNS_MINMAX; + + return CMNG_FNS_NONE; +} + +void bnx2x_read_mf_cfg(struct bnx2x *bp) +{ + int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1); + + if (BP_NOMCP(bp)) + return; /* what should be the default bvalue in this case */ + + /* For 2 port configuration the absolute function number formula + * is: + * abs_func = 2 * vn + BP_PORT + BP_PATH + * + * and there are 4 functions per port + * + * For 4 port configuration it is + * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH + * + * and there are 2 functions per port + */ + for (vn = VN_0; vn < E1HVN_MAX; vn++) { + int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); + + if (func >= E1H_FUNC_MAX) + break; + + bp->mf_config[vn] = + MF_CFG_RD(bp, func_mf_config[func].config); + } +} + +static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) +{ + + if (cmng_type == CMNG_FNS_MINMAX) { + int vn; + + /* clear cmng_enables */ + bp->cmng.flags.cmng_enables = 0; + + /* read mf conf from shmem */ + if (read_cfg) + bnx2x_read_mf_cfg(bp); + + /* Init rate shaping and fairness contexts */ + bnx2x_init_port_minmax(bp); + + /* vn_weight_sum and enable fairness if not 0 */ + bnx2x_calc_vn_weight_sum(bp); + + /* calculate and set min-max rate for each vn */ + if (bp->port.pmf) + for (vn = VN_0; vn < E1HVN_MAX; vn++) + bnx2x_init_vn_minmax(bp, vn); + + /* always enable rate shaping and fairness */ + bp->cmng.flags.cmng_enables |= + CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; + if (!bp->vn_weight_sum) + DP(NETIF_MSG_IFUP, "All MIN values are zeroes" + " fairness will be disabled\n"); + return; + } + + /* rate shaping and fairness are disabled */ + DP(NETIF_MSG_IFUP, + "rate shaping and fairness are disabled\n"); +} + +static inline void bnx2x_link_sync_notify(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + int func; + int vn; + + /* Set the attention towards other drivers on the same port */ + for (vn = VN_0; vn < E1HVN_MAX; vn++) { + if (vn == BP_E1HVN(bp)) + continue; + + func = ((vn << 1) | port); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + + (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); + } +} + +/* This function is called upon link interrupt */ +static void bnx2x_link_attn(struct bnx2x *bp) +{ + /* Make sure that we are synced with the current statistics */ + bnx2x_stats_handle(bp, STATS_EVENT_STOP); + + bnx2x_link_update(&bp->link_params, &bp->link_vars); + + if (bp->link_vars.link_up) { + + /* dropless flow control */ + if (!CHIP_IS_E1(bp) && bp->dropless_fc) { + int port = BP_PORT(bp); + u32 pause_enabled = 0; + + if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) + pause_enabled = 1; + + REG_WR(bp, BAR_USTRORM_INTMEM + + USTORM_ETH_PAUSE_ENABLED_OFFSET(port), + pause_enabled); + } + + if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { + struct host_port_stats *pstats; + + pstats = bnx2x_sp(bp, port_stats); + /* reset old mac stats */ + memset(&(pstats->mac_stx[0]), 0, + sizeof(struct mac_stx)); + } + if (bp->state == BNX2X_STATE_OPEN) + bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); + } + + if (bp->link_vars.link_up && bp->link_vars.line_speed) { + int cmng_fns = bnx2x_get_cmng_fns_mode(bp); + + if (cmng_fns != CMNG_FNS_NONE) { + bnx2x_cmng_fns_init(bp, false, cmng_fns); + storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); + } else + /* rate shaping and fairness are disabled */ + DP(NETIF_MSG_IFUP, + "single function mode without fairness\n"); + } + + __bnx2x_link_report(bp); + + if (IS_MF(bp)) + bnx2x_link_sync_notify(bp); +} + +void bnx2x__link_status_update(struct bnx2x *bp) +{ + if (bp->state != BNX2X_STATE_OPEN) + return; + + bnx2x_link_status_update(&bp->link_params, &bp->link_vars); + + if (bp->link_vars.link_up) + bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); + else + bnx2x_stats_handle(bp, STATS_EVENT_STOP); + + /* indicate link status */ + bnx2x_link_report(bp); +} + +static void bnx2x_pmf_update(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + u32 val; + + bp->port.pmf = 1; + DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); + + /* + * We need the mb() to ensure the ordering between the writing to + * bp->port.pmf here and reading it from the bnx2x_periodic_task(). + */ + smp_mb(); + + /* queue a periodic task */ + queue_delayed_work(bnx2x_wq, &bp->period_task, 0); + + bnx2x_dcbx_pmf_update(bp); + + /* enable nig attention */ + val = (0xff0f | (1 << (BP_E1HVN(bp) + 4))); + if (bp->common.int_block == INT_BLOCK_HC) { + REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); + REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); + } else if (!CHIP_IS_E1x(bp)) { + REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); + REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); + } + + bnx2x_stats_handle(bp, STATS_EVENT_PMF); +} + +/* end of Link */ + +/* slow path */ + +/* + * General service functions + */ + +/* send the MCP a request, block until there is a reply */ +u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) +{ + int mb_idx = BP_FW_MB_IDX(bp); + u32 seq; + u32 rc = 0; + u32 cnt = 1; + u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; + + mutex_lock(&bp->fw_mb_mutex); + seq = ++bp->fw_seq; + SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param); + SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq)); + + DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n", + (command | seq), param); + + do { + /* let the FW do it's magic ... */ + msleep(delay); + + rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header); + + /* Give the FW up to 5 second (500*10ms) */ + } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); + + DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n", + cnt*delay, rc, seq); + + /* is this a reply to our command? */ + if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) + rc &= FW_MSG_CODE_MASK; + else { + /* FW BUG! */ + BNX2X_ERR("FW failed to respond!\n"); + bnx2x_fw_dump(bp); + rc = 0; + } + mutex_unlock(&bp->fw_mb_mutex); + + return rc; +} + +static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp) +{ +#ifdef BCM_CNIC + /* Statistics are not supported for CNIC Clients at the moment */ + if (IS_FCOE_FP(fp)) + return false; +#endif + return true; +} + +void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) +{ + if (CHIP_IS_E1x(bp)) { + struct tstorm_eth_function_common_config tcfg = {0}; + + storm_memset_func_cfg(bp, &tcfg, p->func_id); + } + + /* Enable the function in the FW */ + storm_memset_vf_to_pf(bp, p->func_id, p->pf_id); + storm_memset_func_en(bp, p->func_id, 1); + + /* spq */ + if (p->func_flgs & FUNC_FLG_SPQ) { + storm_memset_spq_addr(bp, p->spq_map, p->func_id); + REG_WR(bp, XSEM_REG_FAST_MEMORY + + XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod); + } +} + +/** + * bnx2x_get_tx_only_flags - Return common flags + * + * @bp device handle + * @fp queue handle + * @zero_stats TRUE if statistics zeroing is needed + * + * Return the flags that are common for the Tx-only and not normal connections. + */ +static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp, + struct bnx2x_fastpath *fp, + bool zero_stats) +{ + unsigned long flags = 0; + + /* PF driver will always initialize the Queue to an ACTIVE state */ + __set_bit(BNX2X_Q_FLG_ACTIVE, &flags); + + /* tx only connections collect statistics (on the same index as the + * parent connection). The statistics are zeroed when the parent + * connection is initialized. + */ + if (stat_counter_valid(bp, fp)) { + __set_bit(BNX2X_Q_FLG_STATS, &flags); + if (zero_stats) + __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags); + } + + return flags; +} + +static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp, + struct bnx2x_fastpath *fp, + bool leading) +{ + unsigned long flags = 0; + + /* calculate other queue flags */ + if (IS_MF_SD(bp)) + __set_bit(BNX2X_Q_FLG_OV, &flags); + + if (IS_FCOE_FP(fp)) + __set_bit(BNX2X_Q_FLG_FCOE, &flags); + + if (!fp->disable_tpa) { + __set_bit(BNX2X_Q_FLG_TPA, &flags); + __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags); + } + + if (leading) { + __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags); + __set_bit(BNX2X_Q_FLG_MCAST, &flags); + } + + /* Always set HW VLAN stripping */ + __set_bit(BNX2X_Q_FLG_VLAN, &flags); + + + return flags | bnx2x_get_common_flags(bp, fp, true); +} + +static void bnx2x_pf_q_prep_general(struct bnx2x *bp, + struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init, + u8 cos) +{ + gen_init->stat_id = bnx2x_stats_id(fp); + gen_init->spcl_id = fp->cl_id; + + /* Always use mini-jumbo MTU for FCoE L2 ring */ + if (IS_FCOE_FP(fp)) + gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU; + else + gen_init->mtu = bp->dev->mtu; + + gen_init->cos = cos; +} + +static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, + struct bnx2x_fastpath *fp, struct rxq_pause_params *pause, + struct bnx2x_rxq_setup_params *rxq_init) +{ + u8 max_sge = 0; + u16 sge_sz = 0; + u16 tpa_agg_size = 0; + + if (!fp->disable_tpa) { + pause->sge_th_hi = 250; + pause->sge_th_lo = 150; + tpa_agg_size = min_t(u32, + (min_t(u32, 8, MAX_SKB_FRAGS) * + SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); + max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >> + SGE_PAGE_SHIFT; + max_sge = ((max_sge + PAGES_PER_SGE - 1) & + (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT; + sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE, + 0xffff); + } + + /* pause - not for e1 */ + if (!CHIP_IS_E1(bp)) { + pause->bd_th_hi = 350; + pause->bd_th_lo = 250; + pause->rcq_th_hi = 350; + pause->rcq_th_lo = 250; + + pause->pri_map = 1; + } + + /* rxq setup */ + rxq_init->dscr_map = fp->rx_desc_mapping; + rxq_init->sge_map = fp->rx_sge_mapping; + rxq_init->rcq_map = fp->rx_comp_mapping; + rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE; + + /* This should be a maximum number of data bytes that may be + * placed on the BD (not including paddings). + */ + rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN - + IP_HEADER_ALIGNMENT_PADDING; + + rxq_init->cl_qzone_id = fp->cl_qzone_id; + rxq_init->tpa_agg_sz = tpa_agg_size; + rxq_init->sge_buf_sz = sge_sz; + rxq_init->max_sges_pkt = max_sge; + rxq_init->rss_engine_id = BP_FUNC(bp); + + /* Maximum number or simultaneous TPA aggregation for this Queue. + * + * For PF Clients it should be the maximum avaliable number. + * VF driver(s) may want to define it to a smaller value. + */ + rxq_init->max_tpa_queues = + (CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : + ETH_MAX_AGGREGATION_QUEUES_E1H_E2); + + rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; + rxq_init->fw_sb_id = fp->fw_sb_id; + + if (IS_FCOE_FP(fp)) + rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS; + else + rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; +} + +static void bnx2x_pf_tx_q_prep(struct bnx2x *bp, + struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init, + u8 cos) +{ + txq_init->dscr_map = fp->txdata[cos].tx_desc_mapping; + txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; + txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; + txq_init->fw_sb_id = fp->fw_sb_id; + + /* + * set the tss leading client id for TX classfication == + * leading RSS client id + */ + txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id); + + if (IS_FCOE_FP(fp)) { + txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS; + txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE; + } +} + +static void bnx2x_pf_init(struct bnx2x *bp) +{ + struct bnx2x_func_init_params func_init = {0}; + struct event_ring_data eq_data = { {0} }; + u16 flags; + + if (!CHIP_IS_E1x(bp)) { + /* reset IGU PF statistics: MSIX + ATTN */ + /* PF */ + REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + + BNX2X_IGU_STAS_MSG_VF_CNT*4 + + (CHIP_MODE_IS_4_PORT(bp) ? + BP_FUNC(bp) : BP_VN(bp))*4, 0); + /* ATTN */ + REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + + BNX2X_IGU_STAS_MSG_VF_CNT*4 + + BNX2X_IGU_STAS_MSG_PF_CNT*4 + + (CHIP_MODE_IS_4_PORT(bp) ? + BP_FUNC(bp) : BP_VN(bp))*4, 0); + } + + /* function setup flags */ + flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); + + /* This flag is relevant for E1x only. + * E2 doesn't have a TPA configuration in a function level. + */ + flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0; + + func_init.func_flgs = flags; + func_init.pf_id = BP_FUNC(bp); + func_init.func_id = BP_FUNC(bp); + func_init.spq_map = bp->spq_mapping; + func_init.spq_prod = bp->spq_prod_idx; + + bnx2x_func_init(bp, &func_init); + + memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port)); + + /* + * Congestion management values depend on the link rate + * There is no active link so initial link rate is set to 10 Gbps. + * When the link comes up The congestion management values are + * re-calculated according to the actual link rate. + */ + bp->link_vars.line_speed = SPEED_10000; + bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp)); + + /* Only the PMF sets the HW */ + if (bp->port.pmf) + storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); + + /* init Event Queue */ + eq_data.base_addr.hi = U64_HI(bp->eq_mapping); + eq_data.base_addr.lo = U64_LO(bp->eq_mapping); + eq_data.producer = bp->eq_prod; + eq_data.index_id = HC_SP_INDEX_EQ_CONS; + eq_data.sb_id = DEF_SB_ID; + storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp)); +} + + +static void bnx2x_e1h_disable(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + + bnx2x_tx_disable(bp); + + REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); +} + +static void bnx2x_e1h_enable(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + + REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); + + /* Tx queue should be only reenabled */ + netif_tx_wake_all_queues(bp->dev); + + /* + * Should not call netif_carrier_on since it will be called if the link + * is up when checking for link state + */ +} + +/* called due to MCP event (on pmf): + * reread new bandwidth configuration + * configure FW + * notify others function about the change + */ +static inline void bnx2x_config_mf_bw(struct bnx2x *bp) +{ + if (bp->link_vars.link_up) { + bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX); + bnx2x_link_sync_notify(bp); + } + storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); +} + +static inline void bnx2x_set_mf_bw(struct bnx2x *bp) +{ + bnx2x_config_mf_bw(bp); + bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0); +} + +static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) +{ + DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event); + + if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { + + /* + * This is the only place besides the function initialization + * where the bp->flags can change so it is done without any + * locks + */ + if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { + DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n"); + bp->flags |= MF_FUNC_DIS; + + bnx2x_e1h_disable(bp); + } else { + DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n"); + bp->flags &= ~MF_FUNC_DIS; + + bnx2x_e1h_enable(bp); + } + dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; + } + if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { + bnx2x_config_mf_bw(bp); + dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; + } + + /* Report results to MCP */ + if (dcc_event) + bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0); + else + bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0); +} + +/* must be called under the spq lock */ +static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp) +{ + struct eth_spe *next_spe = bp->spq_prod_bd; + + if (bp->spq_prod_bd == bp->spq_last_bd) { + bp->spq_prod_bd = bp->spq; + bp->spq_prod_idx = 0; + DP(NETIF_MSG_TIMER, "end of spq\n"); + } else { + bp->spq_prod_bd++; + bp->spq_prod_idx++; + } + return next_spe; +} + +/* must be called under the spq lock */ +static inline void bnx2x_sp_prod_update(struct bnx2x *bp) +{ + int func = BP_FUNC(bp); + + /* + * Make sure that BD data is updated before writing the producer: + * BD data is written to the memory, the producer is read from the + * memory, thus we need a full memory barrier to ensure the ordering. + */ + mb(); + + REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), + bp->spq_prod_idx); + mmiowb(); +} + +/** + * bnx2x_is_contextless_ramrod - check if the current command ends on EQ + * + * @cmd: command to check + * @cmd_type: command type + */ +static inline bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type) +{ + if ((cmd_type == NONE_CONNECTION_TYPE) || + (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || + (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || + (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || + (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || + (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || + (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) + return true; + else + return false; + +} + + +/** + * bnx2x_sp_post - place a single command on an SP ring + * + * @bp: driver handle + * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) + * @cid: SW CID the command is related to + * @data_hi: command private data address (high 32 bits) + * @data_lo: command private data address (low 32 bits) + * @cmd_type: command type (e.g. NONE, ETH) + * + * SP data is handled as if it's always an address pair, thus data fields are + * not swapped to little endian in upper functions. Instead this function swaps + * data as if it's two u32 fields. + */ +int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, + u32 data_hi, u32 data_lo, int cmd_type) +{ + struct eth_spe *spe; + u16 type; + bool common = bnx2x_is_contextless_ramrod(command, cmd_type); + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return -EIO; +#endif + + spin_lock_bh(&bp->spq_lock); + + if (common) { + if (!atomic_read(&bp->eq_spq_left)) { + BNX2X_ERR("BUG! EQ ring full!\n"); + spin_unlock_bh(&bp->spq_lock); + bnx2x_panic(); + return -EBUSY; + } + } else if (!atomic_read(&bp->cq_spq_left)) { + BNX2X_ERR("BUG! SPQ ring full!\n"); + spin_unlock_bh(&bp->spq_lock); + bnx2x_panic(); + return -EBUSY; + } + + spe = bnx2x_sp_get_next(bp); + + /* CID needs port number to be encoded int it */ + spe->hdr.conn_and_cmd_data = + cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | + HW_CID(bp, cid)); + + type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; + + type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & + SPE_HDR_FUNCTION_ID); + + spe->hdr.type = cpu_to_le16(type); + + spe->data.update_data_addr.hi = cpu_to_le32(data_hi); + spe->data.update_data_addr.lo = cpu_to_le32(data_lo); + + /* + * It's ok if the actual decrement is issued towards the memory + * somewhere between the spin_lock and spin_unlock. Thus no + * more explict memory barrier is needed. + */ + if (common) + atomic_dec(&bp->eq_spq_left); + else + atomic_dec(&bp->cq_spq_left); + + + DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/, + "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) " + "type(0x%x) left (CQ, EQ) (%x,%x)\n", + bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), + (u32)(U64_LO(bp->spq_mapping) + + (void *)bp->spq_prod_bd - (void *)bp->spq), command, common, + HW_CID(bp, cid), data_hi, data_lo, type, + atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left)); + + bnx2x_sp_prod_update(bp); + spin_unlock_bh(&bp->spq_lock); + return 0; +} + +/* acquire split MCP access lock register */ +static int bnx2x_acquire_alr(struct bnx2x *bp) +{ + u32 j, val; + int rc = 0; + + might_sleep(); + for (j = 0; j < 1000; j++) { + val = (1UL << 31); + REG_WR(bp, GRCBASE_MCP + 0x9c, val); + val = REG_RD(bp, GRCBASE_MCP + 0x9c); + if (val & (1L << 31)) + break; + + msleep(5); + } + if (!(val & (1L << 31))) { + BNX2X_ERR("Cannot acquire MCP access lock register\n"); + rc = -EBUSY; + } + + return rc; +} + +/* release split MCP access lock register */ +static void bnx2x_release_alr(struct bnx2x *bp) +{ + REG_WR(bp, GRCBASE_MCP + 0x9c, 0); +} + +#define BNX2X_DEF_SB_ATT_IDX 0x0001 +#define BNX2X_DEF_SB_IDX 0x0002 + +static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp) +{ + struct host_sp_status_block *def_sb = bp->def_status_blk; + u16 rc = 0; + + barrier(); /* status block is written to by the chip */ + if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { + bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; + rc |= BNX2X_DEF_SB_ATT_IDX; + } + + if (bp->def_idx != def_sb->sp_sb.running_index) { + bp->def_idx = def_sb->sp_sb.running_index; + rc |= BNX2X_DEF_SB_IDX; + } + + /* Do not reorder: indecies reading should complete before handling */ + barrier(); + return rc; +} + +/* + * slow path service functions + */ + +static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) +{ + int port = BP_PORT(bp); + u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : + MISC_REG_AEU_MASK_ATTN_FUNC_0; + u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : + NIG_REG_MASK_INTERRUPT_PORT0; + u32 aeu_mask; + u32 nig_mask = 0; + u32 reg_addr; + + if (bp->attn_state & asserted) + BNX2X_ERR("IGU ERROR\n"); + + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); + aeu_mask = REG_RD(bp, aeu_addr); + + DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", + aeu_mask, asserted); + aeu_mask &= ~(asserted & 0x3ff); + DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); + + REG_WR(bp, aeu_addr, aeu_mask); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); + + DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); + bp->attn_state |= asserted; + DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); + + if (asserted & ATTN_HARD_WIRED_MASK) { + if (asserted & ATTN_NIG_FOR_FUNC) { + + bnx2x_acquire_phy_lock(bp); + + /* save nig interrupt mask */ + nig_mask = REG_RD(bp, nig_int_mask_addr); + + /* If nig_mask is not set, no need to call the update + * function. + */ + if (nig_mask) { + REG_WR(bp, nig_int_mask_addr, 0); + + bnx2x_link_attn(bp); + } + + /* handle unicore attn? */ + } + if (asserted & ATTN_SW_TIMER_4_FUNC) + DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n"); + + if (asserted & GPIO_2_FUNC) + DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n"); + + if (asserted & GPIO_3_FUNC) + DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n"); + + if (asserted & GPIO_4_FUNC) + DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n"); + + if (port == 0) { + if (asserted & ATTN_GENERAL_ATTN_1) { + DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n"); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); + } + if (asserted & ATTN_GENERAL_ATTN_2) { + DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n"); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); + } + if (asserted & ATTN_GENERAL_ATTN_3) { + DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n"); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); + } + } else { + if (asserted & ATTN_GENERAL_ATTN_4) { + DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n"); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); + } + if (asserted & ATTN_GENERAL_ATTN_5) { + DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n"); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); + } + if (asserted & ATTN_GENERAL_ATTN_6) { + DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n"); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); + } + } + + } /* if hardwired */ + + if (bp->common.int_block == INT_BLOCK_HC) + reg_addr = (HC_REG_COMMAND_REG + port*32 + + COMMAND_REG_ATTN_BITS_SET); + else + reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8); + + DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted, + (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); + REG_WR(bp, reg_addr, asserted); + + /* now set back the mask */ + if (asserted & ATTN_NIG_FOR_FUNC) { + REG_WR(bp, nig_int_mask_addr, nig_mask); + bnx2x_release_phy_lock(bp); + } +} + +static inline void bnx2x_fan_failure(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + u32 ext_phy_config; + /* mark the failure */ + ext_phy_config = + SHMEM_RD(bp, + dev_info.port_hw_config[port].external_phy_config); + + ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; + ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; + SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config, + ext_phy_config); + + /* log the failure */ + netdev_err(bp->dev, "Fan Failure on Network Controller has caused" + " the driver to shutdown the card to prevent permanent" + " damage. Please contact OEM Support for assistance\n"); +} + +static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) +{ + int port = BP_PORT(bp); + int reg_offset; + u32 val; + + reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : + MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); + + if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { + + val = REG_RD(bp, reg_offset); + val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; + REG_WR(bp, reg_offset, val); + + BNX2X_ERR("SPIO5 hw attention\n"); + + /* Fan failure attention */ + bnx2x_hw_reset_phy(&bp->link_params); + bnx2x_fan_failure(bp); + } + + if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) { + bnx2x_acquire_phy_lock(bp); + bnx2x_handle_module_detect_int(&bp->link_params); + bnx2x_release_phy_lock(bp); + } + + if (attn & HW_INTERRUT_ASSERT_SET_0) { + + val = REG_RD(bp, reg_offset); + val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); + REG_WR(bp, reg_offset, val); + + BNX2X_ERR("FATAL HW block attention set0 0x%x\n", + (u32)(attn & HW_INTERRUT_ASSERT_SET_0)); + bnx2x_panic(); + } +} + +static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) +{ + u32 val; + + if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { + + val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR); + BNX2X_ERR("DB hw attention 0x%x\n", val); + /* DORQ discard attention */ + if (val & 0x2) + BNX2X_ERR("FATAL error from DORQ\n"); + } + + if (attn & HW_INTERRUT_ASSERT_SET_1) { + + int port = BP_PORT(bp); + int reg_offset; + + reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : + MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); + + val = REG_RD(bp, reg_offset); + val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); + REG_WR(bp, reg_offset, val); + + BNX2X_ERR("FATAL HW block attention set1 0x%x\n", + (u32)(attn & HW_INTERRUT_ASSERT_SET_1)); + bnx2x_panic(); + } +} + +static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) +{ + u32 val; + + if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { + + val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR); + BNX2X_ERR("CFC hw attention 0x%x\n", val); + /* CFC error attention */ + if (val & 0x2) + BNX2X_ERR("FATAL error from CFC\n"); + } + + if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { + val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0); + BNX2X_ERR("PXP hw attention-0 0x%x\n", val); + /* RQ_USDMDP_FIFO_OVERFLOW */ + if (val & 0x18000) + BNX2X_ERR("FATAL error from PXP\n"); + + if (!CHIP_IS_E1x(bp)) { + val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1); + BNX2X_ERR("PXP hw attention-1 0x%x\n", val); + } + } + + if (attn & HW_INTERRUT_ASSERT_SET_2) { + + int port = BP_PORT(bp); + int reg_offset; + + reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : + MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); + + val = REG_RD(bp, reg_offset); + val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); + REG_WR(bp, reg_offset, val); + + BNX2X_ERR("FATAL HW block attention set2 0x%x\n", + (u32)(attn & HW_INTERRUT_ASSERT_SET_2)); + bnx2x_panic(); + } +} + +static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) +{ + u32 val; + + if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { + + if (attn & BNX2X_PMF_LINK_ASSERT) { + int func = BP_FUNC(bp); + + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); + bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp, + func_mf_config[BP_ABS_FUNC(bp)].config); + val = SHMEM_RD(bp, + func_mb[BP_FW_MB_IDX(bp)].drv_status); + if (val & DRV_STATUS_DCC_EVENT_MASK) + bnx2x_dcc_event(bp, + (val & DRV_STATUS_DCC_EVENT_MASK)); + + if (val & DRV_STATUS_SET_MF_BW) + bnx2x_set_mf_bw(bp); + + if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) + bnx2x_pmf_update(bp); + + if (bp->port.pmf && + (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) && + bp->dcbx_enabled > 0) + /* start dcbx state machine */ + bnx2x_dcbx_set_params(bp, + BNX2X_DCBX_STATE_NEG_RECEIVED); + if (bp->link_vars.periodic_flags & + PERIODIC_FLAGS_LINK_EVENT) { + /* sync with link */ + bnx2x_acquire_phy_lock(bp); + bp->link_vars.periodic_flags &= + ~PERIODIC_FLAGS_LINK_EVENT; + bnx2x_release_phy_lock(bp); + if (IS_MF(bp)) + bnx2x_link_sync_notify(bp); + bnx2x_link_report(bp); + } + /* Always call it here: bnx2x_link_report() will + * prevent the link indication duplication. + */ + bnx2x__link_status_update(bp); + } else if (attn & BNX2X_MC_ASSERT_BITS) { + + BNX2X_ERR("MC assert!\n"); + bnx2x_mc_assert(bp); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0); + bnx2x_panic(); + + } else if (attn & BNX2X_MCP_ASSERT) { + + BNX2X_ERR("MCP assert!\n"); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0); + bnx2x_fw_dump(bp); + + } else + BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn); + } + + if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { + BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn); + if (attn & BNX2X_GRC_TIMEOUT) { + val = CHIP_IS_E1(bp) ? 0 : + REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN); + BNX2X_ERR("GRC time-out 0x%08x\n", val); + } + if (attn & BNX2X_GRC_RSV) { + val = CHIP_IS_E1(bp) ? 0 : + REG_RD(bp, MISC_REG_GRC_RSV_ATTN); + BNX2X_ERR("GRC reserved 0x%08x\n", val); + } + REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); + } +} + +/* + * Bits map: + * 0-7 - Engine0 load counter. + * 8-15 - Engine1 load counter. + * 16 - Engine0 RESET_IN_PROGRESS bit. + * 17 - Engine1 RESET_IN_PROGRESS bit. + * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active function + * on the engine + * 19 - Engine1 ONE_IS_LOADED. + * 20 - Chip reset flow bit. When set none-leader must wait for both engines + * leader to complete (check for both RESET_IN_PROGRESS bits and not for + * just the one belonging to its engine). + * + */ +#define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 + +#define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff +#define BNX2X_PATH0_LOAD_CNT_SHIFT 0 +#define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00 +#define BNX2X_PATH1_LOAD_CNT_SHIFT 8 +#define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000 +#define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000 +#define BNX2X_GLOBAL_RESET_BIT 0x00040000 + +/* + * Set the GLOBAL_RESET bit. + * + * Should be run under rtnl lock + */ +void bnx2x_set_reset_global(struct bnx2x *bp) +{ + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + + REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT); + barrier(); + mmiowb(); +} + +/* + * Clear the GLOBAL_RESET bit. + * + * Should be run under rtnl lock + */ +static inline void bnx2x_clear_reset_global(struct bnx2x *bp) +{ + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + + REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT)); + barrier(); + mmiowb(); +} + +/* + * Checks the GLOBAL_RESET bit. + * + * should be run under rtnl lock + */ +static inline bool bnx2x_reset_is_global(struct bnx2x *bp) +{ + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + + DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); + return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false; +} + +/* + * Clear RESET_IN_PROGRESS bit for the current engine. + * + * Should be run under rtnl lock + */ +static inline void bnx2x_set_reset_done(struct bnx2x *bp) +{ + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + u32 bit = BP_PATH(bp) ? + BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; + + /* Clear the bit */ + val &= ~bit; + REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); + barrier(); + mmiowb(); +} + +/* + * Set RESET_IN_PROGRESS for the current engine. + * + * should be run under rtnl lock + */ +void bnx2x_set_reset_in_progress(struct bnx2x *bp) +{ + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + u32 bit = BP_PATH(bp) ? + BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; + + /* Set the bit */ + val |= bit; + REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); + barrier(); + mmiowb(); +} + +/* + * Checks the RESET_IN_PROGRESS bit for the given engine. + * should be run under rtnl lock + */ +bool bnx2x_reset_is_done(struct bnx2x *bp, int engine) +{ + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + u32 bit = engine ? + BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; + + /* return false if bit is set */ + return (val & bit) ? false : true; +} + +/* + * Increment the load counter for the current engine. + * + * should be run under rtnl lock + */ +void bnx2x_inc_load_cnt(struct bnx2x *bp) +{ + u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : + BNX2X_PATH0_LOAD_CNT_MASK; + u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : + BNX2X_PATH0_LOAD_CNT_SHIFT; + + DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val); + + /* get the current counter value */ + val1 = (val & mask) >> shift; + + /* increment... */ + val1++; + + /* clear the old value */ + val &= ~mask; + + /* set the new one */ + val |= ((val1 << shift) & mask); + + REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); + barrier(); + mmiowb(); +} + +/** + * bnx2x_dec_load_cnt - decrement the load counter + * + * @bp: driver handle + * + * Should be run under rtnl lock. + * Decrements the load counter for the current engine. Returns + * the new counter value. + */ +u32 bnx2x_dec_load_cnt(struct bnx2x *bp) +{ + u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : + BNX2X_PATH0_LOAD_CNT_MASK; + u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : + BNX2X_PATH0_LOAD_CNT_SHIFT; + + DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val); + + /* get the current counter value */ + val1 = (val & mask) >> shift; + + /* decrement... */ + val1--; + + /* clear the old value */ + val &= ~mask; + + /* set the new one */ + val |= ((val1 << shift) & mask); + + REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); + barrier(); + mmiowb(); + + return val1; +} + +/* + * Read the load counter for the current engine. + * + * should be run under rtnl lock + */ +static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp, int engine) +{ + u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK : + BNX2X_PATH0_LOAD_CNT_MASK); + u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT : + BNX2X_PATH0_LOAD_CNT_SHIFT); + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + + DP(NETIF_MSG_HW, "GLOB_REG=0x%08x\n", val); + + val = (val & mask) >> shift; + + DP(NETIF_MSG_HW, "load_cnt for engine %d = %d\n", engine, val); + + return val; +} + +/* + * Reset the load counter for the current engine. + * + * should be run under rtnl lock + */ +static inline void bnx2x_clear_load_cnt(struct bnx2x *bp) +{ + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : + BNX2X_PATH0_LOAD_CNT_MASK); + + REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask)); +} + +static inline void _print_next_block(int idx, const char *blk) +{ + if (idx) + pr_cont(", "); + pr_cont("%s", blk); +} + +static inline int bnx2x_check_blocks_with_parity0(u32 sig, int par_num, + bool print) +{ + int i = 0; + u32 cur_bit = 0; + for (i = 0; sig; i++) { + cur_bit = ((u32)0x1 << i); + if (sig & cur_bit) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "BRB"); + break; + case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "PARSER"); + break; + case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "TSDM"); + break; + case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: + if (print) + _print_next_block(par_num++, + "SEARCHER"); + break; + case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "TCM"); + break; + case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "TSEMI"); + break; + case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "XPB"); + break; + } + + /* Clear the bit */ + sig &= ~cur_bit; + } + } + + return par_num; +} + +static inline int bnx2x_check_blocks_with_parity1(u32 sig, int par_num, + bool *global, bool print) +{ + int i = 0; + u32 cur_bit = 0; + for (i = 0; sig; i++) { + cur_bit = ((u32)0x1 << i); + if (sig & cur_bit) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "PBF"); + break; + case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "QM"); + break; + case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "TM"); + break; + case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "XSDM"); + break; + case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "XCM"); + break; + case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "XSEMI"); + break; + case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: + if (print) + _print_next_block(par_num++, + "DOORBELLQ"); + break; + case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "NIG"); + break; + case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: + if (print) + _print_next_block(par_num++, + "VAUX PCI CORE"); + *global = true; + break; + case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "DEBUG"); + break; + case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "USDM"); + break; + case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "UCM"); + break; + case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "USEMI"); + break; + case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "UPB"); + break; + case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "CSDM"); + break; + case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "CCM"); + break; + } + + /* Clear the bit */ + sig &= ~cur_bit; + } + } + + return par_num; +} + +static inline int bnx2x_check_blocks_with_parity2(u32 sig, int par_num, + bool print) +{ + int i = 0; + u32 cur_bit = 0; + for (i = 0; sig; i++) { + cur_bit = ((u32)0x1 << i); + if (sig & cur_bit) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "CSEMI"); + break; + case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "PXP"); + break; + case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: + if (print) + _print_next_block(par_num++, + "PXPPCICLOCKCLIENT"); + break; + case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "CFC"); + break; + case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "CDU"); + break; + case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "DMAE"); + break; + case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "IGU"); + break; + case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "MISC"); + break; + } + + /* Clear the bit */ + sig &= ~cur_bit; + } + } + + return par_num; +} + +static inline int bnx2x_check_blocks_with_parity3(u32 sig, int par_num, + bool *global, bool print) +{ + int i = 0; + u32 cur_bit = 0; + for (i = 0; sig; i++) { + cur_bit = ((u32)0x1 << i); + if (sig & cur_bit) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: + if (print) + _print_next_block(par_num++, "MCP ROM"); + *global = true; + break; + case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: + if (print) + _print_next_block(par_num++, + "MCP UMP RX"); + *global = true; + break; + case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: + if (print) + _print_next_block(par_num++, + "MCP UMP TX"); + *global = true; + break; + case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: + if (print) + _print_next_block(par_num++, + "MCP SCPAD"); + *global = true; + break; + } + + /* Clear the bit */ + sig &= ~cur_bit; + } + } + + return par_num; +} + +static inline int bnx2x_check_blocks_with_parity4(u32 sig, int par_num, + bool print) +{ + int i = 0; + u32 cur_bit = 0; + for (i = 0; sig; i++) { + cur_bit = ((u32)0x1 << i); + if (sig & cur_bit) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "PGLUE_B"); + break; + case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "ATC"); + break; + } + + /* Clear the bit */ + sig &= ~cur_bit; + } + } + + return par_num; +} + +static inline bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, + u32 *sig) +{ + if ((sig[0] & HW_PRTY_ASSERT_SET_0) || + (sig[1] & HW_PRTY_ASSERT_SET_1) || + (sig[2] & HW_PRTY_ASSERT_SET_2) || + (sig[3] & HW_PRTY_ASSERT_SET_3) || + (sig[4] & HW_PRTY_ASSERT_SET_4)) { + int par_num = 0; + DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: " + "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x " + "[4]:0x%08x\n", + sig[0] & HW_PRTY_ASSERT_SET_0, + sig[1] & HW_PRTY_ASSERT_SET_1, + sig[2] & HW_PRTY_ASSERT_SET_2, + sig[3] & HW_PRTY_ASSERT_SET_3, + sig[4] & HW_PRTY_ASSERT_SET_4); + if (print) + netdev_err(bp->dev, + "Parity errors detected in blocks: "); + par_num = bnx2x_check_blocks_with_parity0( + sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print); + par_num = bnx2x_check_blocks_with_parity1( + sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print); + par_num = bnx2x_check_blocks_with_parity2( + sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print); + par_num = bnx2x_check_blocks_with_parity3( + sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print); + par_num = bnx2x_check_blocks_with_parity4( + sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print); + + if (print) + pr_cont("\n"); + + return true; + } else + return false; +} + +/** + * bnx2x_chk_parity_attn - checks for parity attentions. + * + * @bp: driver handle + * @global: true if there was a global attention + * @print: show parity attention in syslog + */ +bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print) +{ + struct attn_route attn = { {0} }; + int port = BP_PORT(bp); + + attn.sig[0] = REG_RD(bp, + MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + + port*4); + attn.sig[1] = REG_RD(bp, + MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + + port*4); + attn.sig[2] = REG_RD(bp, + MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + + port*4); + attn.sig[3] = REG_RD(bp, + MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + + port*4); + + if (!CHIP_IS_E1x(bp)) + attn.sig[4] = REG_RD(bp, + MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + + port*4); + + return bnx2x_parity_attn(bp, global, print, attn.sig); +} + + +static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) +{ + u32 val; + if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { + + val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); + BNX2X_ERR("PGLUE hw attention 0x%x\n", val); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" + "ADDRESS_ERROR\n"); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" + "INCORRECT_RCV_BEHAVIOR\n"); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" + "WAS_ERROR_ATTN\n"); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" + "VF_LENGTH_VIOLATION_ATTN\n"); + if (val & + PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" + "VF_GRC_SPACE_VIOLATION_ATTN\n"); + if (val & + PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" + "VF_MSIX_BAR_VIOLATION_ATTN\n"); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" + "TCPL_ERROR_ATTN\n"); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" + "TCPL_IN_TWO_RCBS_ATTN\n"); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" + "CSSNOOP_FIFO_OVERFLOW\n"); + } + if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { + val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR); + BNX2X_ERR("ATC hw attention 0x%x\n", val); + if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) + BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n"); + if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) + BNX2X_ERR("ATC_ATC_INT_STS_REG" + "_ATC_TCPL_TO_NOT_PEND\n"); + if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) + BNX2X_ERR("ATC_ATC_INT_STS_REG_" + "ATC_GPA_MULTIPLE_HITS\n"); + if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) + BNX2X_ERR("ATC_ATC_INT_STS_REG_" + "ATC_RCPL_TO_EMPTY_CNT\n"); + if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) + BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n"); + if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) + BNX2X_ERR("ATC_ATC_INT_STS_REG_" + "ATC_IREQ_LESS_THAN_STU\n"); + } + + if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | + AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { + BNX2X_ERR("FATAL parity attention set4 0x%x\n", + (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | + AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); + } + +} + +static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) +{ + struct attn_route attn, *group_mask; + int port = BP_PORT(bp); + int index; + u32 reg_addr; + u32 val; + u32 aeu_mask; + bool global = false; + + /* need to take HW lock because MCP or other port might also + try to handle this event */ + bnx2x_acquire_alr(bp); + + if (bnx2x_chk_parity_attn(bp, &global, true)) { +#ifndef BNX2X_STOP_ON_ERROR + bp->recovery_state = BNX2X_RECOVERY_INIT; + schedule_delayed_work(&bp->sp_rtnl_task, 0); + /* Disable HW interrupts */ + bnx2x_int_disable(bp); + /* In case of parity errors don't handle attentions so that + * other function would "see" parity errors. + */ +#else + bnx2x_panic(); +#endif + bnx2x_release_alr(bp); + return; + } + + attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); + attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); + attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); + attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); + if (!CHIP_IS_E1x(bp)) + attn.sig[4] = + REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); + else + attn.sig[4] = 0; + + DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n", + attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]); + + for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { + if (deasserted & (1 << index)) { + group_mask = &bp->attn_group[index]; + + DP(NETIF_MSG_HW, "group[%d]: %08x %08x " + "%08x %08x %08x\n", + index, + group_mask->sig[0], group_mask->sig[1], + group_mask->sig[2], group_mask->sig[3], + group_mask->sig[4]); + + bnx2x_attn_int_deasserted4(bp, + attn.sig[4] & group_mask->sig[4]); + bnx2x_attn_int_deasserted3(bp, + attn.sig[3] & group_mask->sig[3]); + bnx2x_attn_int_deasserted1(bp, + attn.sig[1] & group_mask->sig[1]); + bnx2x_attn_int_deasserted2(bp, + attn.sig[2] & group_mask->sig[2]); + bnx2x_attn_int_deasserted0(bp, + attn.sig[0] & group_mask->sig[0]); + } + } + + bnx2x_release_alr(bp); + + if (bp->common.int_block == INT_BLOCK_HC) + reg_addr = (HC_REG_COMMAND_REG + port*32 + + COMMAND_REG_ATTN_BITS_CLR); + else + reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8); + + val = ~deasserted; + DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val, + (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); + REG_WR(bp, reg_addr, val); + + if (~bp->attn_state & deasserted) + BNX2X_ERR("IGU ERROR\n"); + + reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : + MISC_REG_AEU_MASK_ATTN_FUNC_0; + + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); + aeu_mask = REG_RD(bp, reg_addr); + + DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n", + aeu_mask, deasserted); + aeu_mask |= (deasserted & 0x3ff); + DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); + + REG_WR(bp, reg_addr, aeu_mask); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); + + DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); + bp->attn_state &= ~deasserted; + DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); +} + +static void bnx2x_attn_int(struct bnx2x *bp) +{ + /* read local copy of bits */ + u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block. + attn_bits); + u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block. + attn_bits_ack); + u32 attn_state = bp->attn_state; + + /* look for changed bits */ + u32 asserted = attn_bits & ~attn_ack & ~attn_state; + u32 deasserted = ~attn_bits & attn_ack & attn_state; + + DP(NETIF_MSG_HW, + "attn_bits %x attn_ack %x asserted %x deasserted %x\n", + attn_bits, attn_ack, asserted, deasserted); + + if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) + BNX2X_ERR("BAD attention state\n"); + + /* handle bits that were raised */ + if (asserted) + bnx2x_attn_int_asserted(bp, asserted); + + if (deasserted) + bnx2x_attn_int_deasserted(bp, deasserted); +} + +void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, + u16 index, u8 op, u8 update) +{ + u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; + + bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update, + igu_addr); +} + +static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) +{ + /* No memory barriers */ + storm_memset_eq_prod(bp, prod, BP_FUNC(bp)); + mmiowb(); /* keep prod updates ordered */ +} + +#ifdef BCM_CNIC +static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, + union event_ring_elem *elem) +{ + u8 err = elem->message.error; + + if (!bp->cnic_eth_dev.starting_cid || + (cid < bp->cnic_eth_dev.starting_cid && + cid != bp->cnic_eth_dev.iscsi_l2_cid)) + return 1; + + DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid); + + if (unlikely(err)) { + + BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n", + cid); + bnx2x_panic_dump(bp); + } + bnx2x_cnic_cfc_comp(bp, cid, err); + return 0; +} +#endif + +static inline void bnx2x_handle_mcast_eqe(struct bnx2x *bp) +{ + struct bnx2x_mcast_ramrod_params rparam; + int rc; + + memset(&rparam, 0, sizeof(rparam)); + + rparam.mcast_obj = &bp->mcast_obj; + + netif_addr_lock_bh(bp->dev); + + /* Clear pending state for the last command */ + bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw); + + /* If there are pending mcast commands - send them */ + if (bp->mcast_obj.check_pending(&bp->mcast_obj)) { + rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); + if (rc < 0) + BNX2X_ERR("Failed to send pending mcast commands: %d\n", + rc); + } + + netif_addr_unlock_bh(bp->dev); +} + +static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp, + union event_ring_elem *elem) +{ + unsigned long ramrod_flags = 0; + int rc = 0; + u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; + struct bnx2x_vlan_mac_obj *vlan_mac_obj; + + /* Always push next commands out, don't wait here */ + __set_bit(RAMROD_CONT, &ramrod_flags); + + switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { + case BNX2X_FILTER_MAC_PENDING: +#ifdef BCM_CNIC + if (cid == BNX2X_ISCSI_ETH_CID) + vlan_mac_obj = &bp->iscsi_l2_mac_obj; + else +#endif + vlan_mac_obj = &bp->fp[cid].mac_obj; + + break; + vlan_mac_obj = &bp->fp[cid].mac_obj; + + case BNX2X_FILTER_MCAST_PENDING: + /* This is only relevant for 57710 where multicast MACs are + * configured as unicast MACs using the same ramrod. + */ + bnx2x_handle_mcast_eqe(bp); + return; + default: + BNX2X_ERR("Unsupported classification command: %d\n", + elem->message.data.eth_event.echo); + return; + } + + rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags); + + if (rc < 0) + BNX2X_ERR("Failed to schedule new commands: %d\n", rc); + else if (rc > 0) + DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n"); + +} + +#ifdef BCM_CNIC +static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start); +#endif + +static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp) +{ + netif_addr_lock_bh(bp->dev); + + clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); + + /* Send rx_mode command again if was requested */ + if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state)) + bnx2x_set_storm_rx_mode(bp); +#ifdef BCM_CNIC + else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, + &bp->sp_state)) + bnx2x_set_iscsi_eth_rx_mode(bp, true); + else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, + &bp->sp_state)) + bnx2x_set_iscsi_eth_rx_mode(bp, false); +#endif + + netif_addr_unlock_bh(bp->dev); +} + +static inline struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj( + struct bnx2x *bp, u32 cid) +{ + DP(BNX2X_MSG_SP, "retrieving fp from cid %d", cid); +#ifdef BCM_CNIC + if (cid == BNX2X_FCOE_ETH_CID) + return &bnx2x_fcoe(bp, q_obj); + else +#endif + return &bnx2x_fp(bp, CID_TO_FP(cid), q_obj); +} + +static void bnx2x_eq_int(struct bnx2x *bp) +{ + u16 hw_cons, sw_cons, sw_prod; + union event_ring_elem *elem; + u32 cid; + u8 opcode; + int spqe_cnt = 0; + struct bnx2x_queue_sp_obj *q_obj; + struct bnx2x_func_sp_obj *f_obj = &bp->func_obj; + struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw; + + hw_cons = le16_to_cpu(*bp->eq_cons_sb); + + /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256. + * when we get the the next-page we nned to adjust so the loop + * condition below will be met. The next element is the size of a + * regular element and hence incrementing by 1 + */ + if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) + hw_cons++; + + /* This function may never run in parallel with itself for a + * specific bp, thus there is no need in "paired" read memory + * barrier here. + */ + sw_cons = bp->eq_cons; + sw_prod = bp->eq_prod; + + DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n", + hw_cons, sw_cons, atomic_read(&bp->eq_spq_left)); + + for (; sw_cons != hw_cons; + sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { + + + elem = &bp->eq_ring[EQ_DESC(sw_cons)]; + + cid = SW_CID(elem->message.data.cfc_del_event.cid); + opcode = elem->message.opcode; + + + /* handle eq element */ + switch (opcode) { + case EVENT_RING_OPCODE_STAT_QUERY: + DP(NETIF_MSG_TIMER, "got statistics comp event %d\n", + bp->stats_comp++); + /* nothing to do with stats comp */ + goto next_spqe; + + case EVENT_RING_OPCODE_CFC_DEL: + /* handle according to cid range */ + /* + * we may want to verify here that the bp state is + * HALTING + */ + DP(BNX2X_MSG_SP, + "got delete ramrod for MULTI[%d]\n", cid); +#ifdef BCM_CNIC + if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem)) + goto next_spqe; +#endif + q_obj = bnx2x_cid_to_q_obj(bp, cid); + + if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL)) + break; + + + + goto next_spqe; + + case EVENT_RING_OPCODE_STOP_TRAFFIC: + DP(BNX2X_MSG_SP, "got STOP TRAFFIC\n"); + if (f_obj->complete_cmd(bp, f_obj, + BNX2X_F_CMD_TX_STOP)) + break; + bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); + goto next_spqe; + + case EVENT_RING_OPCODE_START_TRAFFIC: + DP(BNX2X_MSG_SP, "got START TRAFFIC\n"); + if (f_obj->complete_cmd(bp, f_obj, + BNX2X_F_CMD_TX_START)) + break; + bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); + goto next_spqe; + case EVENT_RING_OPCODE_FUNCTION_START: + DP(BNX2X_MSG_SP, "got FUNC_START ramrod\n"); + if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START)) + break; + + goto next_spqe; + + case EVENT_RING_OPCODE_FUNCTION_STOP: + DP(BNX2X_MSG_SP, "got FUNC_STOP ramrod\n"); + if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP)) + break; + + goto next_spqe; + } + + switch (opcode | bp->state) { + case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | + BNX2X_STATE_OPEN): + case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | + BNX2X_STATE_OPENING_WAIT4_PORT): + cid = elem->message.data.eth_event.echo & + BNX2X_SWCID_MASK; + DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n", + cid); + rss_raw->clear_pending(rss_raw); + break; + + case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN): + case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG): + case (EVENT_RING_OPCODE_SET_MAC | + BNX2X_STATE_CLOSING_WAIT4_HALT): + case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | + BNX2X_STATE_OPEN): + case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | + BNX2X_STATE_DIAG): + case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | + BNX2X_STATE_CLOSING_WAIT4_HALT): + DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n"); + bnx2x_handle_classification_eqe(bp, elem); + break; + + case (EVENT_RING_OPCODE_MULTICAST_RULES | + BNX2X_STATE_OPEN): + case (EVENT_RING_OPCODE_MULTICAST_RULES | + BNX2X_STATE_DIAG): + case (EVENT_RING_OPCODE_MULTICAST_RULES | + BNX2X_STATE_CLOSING_WAIT4_HALT): + DP(BNX2X_MSG_SP, "got mcast ramrod\n"); + bnx2x_handle_mcast_eqe(bp); + break; + + case (EVENT_RING_OPCODE_FILTERS_RULES | + BNX2X_STATE_OPEN): + case (EVENT_RING_OPCODE_FILTERS_RULES | + BNX2X_STATE_DIAG): + case (EVENT_RING_OPCODE_FILTERS_RULES | + BNX2X_STATE_CLOSING_WAIT4_HALT): + DP(BNX2X_MSG_SP, "got rx_mode ramrod\n"); + bnx2x_handle_rx_mode_eqe(bp); + break; + default: + /* unknown event log error and continue */ + BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n", + elem->message.opcode, bp->state); + } +next_spqe: + spqe_cnt++; + } /* for */ + + smp_mb__before_atomic_inc(); + atomic_add(spqe_cnt, &bp->eq_spq_left); + + bp->eq_cons = sw_cons; + bp->eq_prod = sw_prod; + /* Make sure that above mem writes were issued towards the memory */ + smp_wmb(); + + /* update producer */ + bnx2x_update_eq_prod(bp, bp->eq_prod); +} + +static void bnx2x_sp_task(struct work_struct *work) +{ + struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); + u16 status; + + status = bnx2x_update_dsb_idx(bp); +/* if (status == 0) */ +/* BNX2X_ERR("spurious slowpath interrupt!\n"); */ + + DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status); + + /* HW attentions */ + if (status & BNX2X_DEF_SB_ATT_IDX) { + bnx2x_attn_int(bp); + status &= ~BNX2X_DEF_SB_ATT_IDX; + } + + /* SP events: STAT_QUERY and others */ + if (status & BNX2X_DEF_SB_IDX) { +#ifdef BCM_CNIC + struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); + + if ((!NO_FCOE(bp)) && + (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { + /* + * Prevent local bottom-halves from running as + * we are going to change the local NAPI list. + */ + local_bh_disable(); + napi_schedule(&bnx2x_fcoe(bp, napi)); + local_bh_enable(); + } +#endif + /* Handle EQ completions */ + bnx2x_eq_int(bp); + + bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, + le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1); + + status &= ~BNX2X_DEF_SB_IDX; + } + + if (unlikely(status)) + DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n", + status); + + bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, + le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); +} + +irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) +{ + struct net_device *dev = dev_instance; + struct bnx2x *bp = netdev_priv(dev); + + bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, + IGU_INT_DISABLE, 0); + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return IRQ_HANDLED; +#endif + +#ifdef BCM_CNIC + { + struct cnic_ops *c_ops; + + rcu_read_lock(); + c_ops = rcu_dereference(bp->cnic_ops); + if (c_ops) + c_ops->cnic_handler(bp->cnic_data, NULL); + rcu_read_unlock(); + } +#endif + queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); + + return IRQ_HANDLED; +} + +/* end of slow path */ + + +void bnx2x_drv_pulse(struct bnx2x *bp) +{ + SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb, + bp->fw_drv_pulse_wr_seq); +} + + +static void bnx2x_timer(unsigned long data) +{ + u8 cos; + struct bnx2x *bp = (struct bnx2x *) data; + + if (!netif_running(bp->dev)) + return; + + if (poll) { + struct bnx2x_fastpath *fp = &bp->fp[0]; + + for_each_cos_in_tx_queue(fp, cos) + bnx2x_tx_int(bp, &fp->txdata[cos]); + bnx2x_rx_int(fp, 1000); + } + + if (!BP_NOMCP(bp)) { + int mb_idx = BP_FW_MB_IDX(bp); + u32 drv_pulse; + u32 mcp_pulse; + + ++bp->fw_drv_pulse_wr_seq; + bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; + /* TBD - add SYSTEM_TIME */ + drv_pulse = bp->fw_drv_pulse_wr_seq; + bnx2x_drv_pulse(bp); + + mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) & + MCP_PULSE_SEQ_MASK); + /* The delta between driver pulse and mcp response + * should be 1 (before mcp response) or 0 (after mcp response) + */ + if ((drv_pulse != mcp_pulse) && + (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { + /* someone lost a heartbeat... */ + BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n", + drv_pulse, mcp_pulse); + } + } + + if (bp->state == BNX2X_STATE_OPEN) + bnx2x_stats_handle(bp, STATS_EVENT_UPDATE); + + mod_timer(&bp->timer, jiffies + bp->current_interval); +} + +/* end of Statistics */ + +/* nic init */ + +/* + * nic init service functions + */ + +static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) +{ + u32 i; + if (!(len%4) && !(addr%4)) + for (i = 0; i < len; i += 4) + REG_WR(bp, addr + i, fill); + else + for (i = 0; i < len; i++) + REG_WR8(bp, addr + i, fill); + +} + +/* helper: writes FP SP data to FW - data_size in dwords */ +static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp, + int fw_sb_id, + u32 *sb_data_p, + u32 data_size) +{ + int index; + for (index = 0; index < data_size; index++) + REG_WR(bp, BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + + sizeof(u32)*index, + *(sb_data_p + index)); +} + +static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id) +{ + u32 *sb_data_p; + u32 data_size = 0; + struct hc_status_block_data_e2 sb_data_e2; + struct hc_status_block_data_e1x sb_data_e1x; + + /* disable the function first */ + if (!CHIP_IS_E1x(bp)) { + memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); + sb_data_e2.common.state = SB_DISABLED; + sb_data_e2.common.p_func.vf_valid = false; + sb_data_p = (u32 *)&sb_data_e2; + data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); + } else { + memset(&sb_data_e1x, 0, + sizeof(struct hc_status_block_data_e1x)); + sb_data_e1x.common.state = SB_DISABLED; + sb_data_e1x.common.p_func.vf_valid = false; + sb_data_p = (u32 *)&sb_data_e1x; + data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); + } + bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); + + bnx2x_fill(bp, BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0, + CSTORM_STATUS_BLOCK_SIZE); + bnx2x_fill(bp, BAR_CSTRORM_INTMEM + + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0, + CSTORM_SYNC_BLOCK_SIZE); +} + +/* helper: writes SP SB data to FW */ +static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp, + struct hc_sp_status_block_data *sp_sb_data) +{ + int func = BP_FUNC(bp); + int i; + for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) + REG_WR(bp, BAR_CSTRORM_INTMEM + + CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + + i*sizeof(u32), + *((u32 *)sp_sb_data + i)); +} + +static inline void bnx2x_zero_sp_sb(struct bnx2x *bp) +{ + int func = BP_FUNC(bp); + struct hc_sp_status_block_data sp_sb_data; + memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); + + sp_sb_data.state = SB_DISABLED; + sp_sb_data.p_func.vf_valid = false; + + bnx2x_wr_sp_sb_data(bp, &sp_sb_data); + + bnx2x_fill(bp, BAR_CSTRORM_INTMEM + + CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0, + CSTORM_SP_STATUS_BLOCK_SIZE); + bnx2x_fill(bp, BAR_CSTRORM_INTMEM + + CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0, + CSTORM_SP_SYNC_BLOCK_SIZE); + +} + + +static inline +void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, + int igu_sb_id, int igu_seg_id) +{ + hc_sm->igu_sb_id = igu_sb_id; + hc_sm->igu_seg_id = igu_seg_id; + hc_sm->timer_value = 0xFF; + hc_sm->time_to_expire = 0xFFFFFFFF; +} + +static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, + u8 vf_valid, int fw_sb_id, int igu_sb_id) +{ + int igu_seg_id; + + struct hc_status_block_data_e2 sb_data_e2; + struct hc_status_block_data_e1x sb_data_e1x; + struct hc_status_block_sm *hc_sm_p; + int data_size; + u32 *sb_data_p; + + if (CHIP_INT_MODE_IS_BC(bp)) + igu_seg_id = HC_SEG_ACCESS_NORM; + else + igu_seg_id = IGU_SEG_ACCESS_NORM; + + bnx2x_zero_fp_sb(bp, fw_sb_id); + + if (!CHIP_IS_E1x(bp)) { + memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); + sb_data_e2.common.state = SB_ENABLED; + sb_data_e2.common.p_func.pf_id = BP_FUNC(bp); + sb_data_e2.common.p_func.vf_id = vfid; + sb_data_e2.common.p_func.vf_valid = vf_valid; + sb_data_e2.common.p_func.vnic_id = BP_VN(bp); + sb_data_e2.common.same_igu_sb_1b = true; + sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping); + sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping); + hc_sm_p = sb_data_e2.common.state_machine; + sb_data_p = (u32 *)&sb_data_e2; + data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); + } else { + memset(&sb_data_e1x, 0, + sizeof(struct hc_status_block_data_e1x)); + sb_data_e1x.common.state = SB_ENABLED; + sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp); + sb_data_e1x.common.p_func.vf_id = 0xff; + sb_data_e1x.common.p_func.vf_valid = false; + sb_data_e1x.common.p_func.vnic_id = BP_VN(bp); + sb_data_e1x.common.same_igu_sb_1b = true; + sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping); + sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping); + hc_sm_p = sb_data_e1x.common.state_machine; + sb_data_p = (u32 *)&sb_data_e1x; + data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); + } + + bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], + igu_sb_id, igu_seg_id); + bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], + igu_sb_id, igu_seg_id); + + DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id); + + /* write indecies to HW */ + bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); +} + +static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id, + u16 tx_usec, u16 rx_usec) +{ + bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS, + false, rx_usec); + bnx2x_update_coalesce_sb_index(bp, fw_sb_id, + HC_INDEX_ETH_TX_CQ_CONS_COS0, false, + tx_usec); + bnx2x_update_coalesce_sb_index(bp, fw_sb_id, + HC_INDEX_ETH_TX_CQ_CONS_COS1, false, + tx_usec); + bnx2x_update_coalesce_sb_index(bp, fw_sb_id, + HC_INDEX_ETH_TX_CQ_CONS_COS2, false, + tx_usec); +} + +static void bnx2x_init_def_sb(struct bnx2x *bp) +{ + struct host_sp_status_block *def_sb = bp->def_status_blk; + dma_addr_t mapping = bp->def_status_blk_mapping; + int igu_sp_sb_index; + int igu_seg_id; + int port = BP_PORT(bp); + int func = BP_FUNC(bp); + int reg_offset; + u64 section; + int index; + struct hc_sp_status_block_data sp_sb_data; + memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); + + if (CHIP_INT_MODE_IS_BC(bp)) { + igu_sp_sb_index = DEF_SB_IGU_ID; + igu_seg_id = HC_SEG_ACCESS_DEF; + } else { + igu_sp_sb_index = bp->igu_dsb_id; + igu_seg_id = IGU_SEG_ACCESS_DEF; + } + + /* ATTN */ + section = ((u64)mapping) + offsetof(struct host_sp_status_block, + atten_status_block); + def_sb->atten_status_block.status_block_id = igu_sp_sb_index; + + bp->attn_state = 0; + + reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : + MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); + for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { + int sindex; + /* take care of sig[0]..sig[4] */ + for (sindex = 0; sindex < 4; sindex++) + bp->attn_group[index].sig[sindex] = + REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index); + + if (!CHIP_IS_E1x(bp)) + /* + * enable5 is separate from the rest of the registers, + * and therefore the address skip is 4 + * and not 16 between the different groups + */ + bp->attn_group[index].sig[4] = REG_RD(bp, + reg_offset + 0x10 + 0x4*index); + else + bp->attn_group[index].sig[4] = 0; + } + + if (bp->common.int_block == INT_BLOCK_HC) { + reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L : + HC_REG_ATTN_MSG0_ADDR_L); + + REG_WR(bp, reg_offset, U64_LO(section)); + REG_WR(bp, reg_offset + 4, U64_HI(section)); + } else if (!CHIP_IS_E1x(bp)) { + REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); + REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); + } + + section = ((u64)mapping) + offsetof(struct host_sp_status_block, + sp_sb); + + bnx2x_zero_sp_sb(bp); + + sp_sb_data.state = SB_ENABLED; + sp_sb_data.host_sb_addr.lo = U64_LO(section); + sp_sb_data.host_sb_addr.hi = U64_HI(section); + sp_sb_data.igu_sb_id = igu_sp_sb_index; + sp_sb_data.igu_seg_id = igu_seg_id; + sp_sb_data.p_func.pf_id = func; + sp_sb_data.p_func.vnic_id = BP_VN(bp); + sp_sb_data.p_func.vf_id = 0xff; + + bnx2x_wr_sp_sb_data(bp, &sp_sb_data); + + bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); +} + +void bnx2x_update_coalesce(struct bnx2x *bp) +{ + int i; + + for_each_eth_queue(bp, i) + bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id, + bp->tx_ticks, bp->rx_ticks); +} + +static void bnx2x_init_sp_ring(struct bnx2x *bp) +{ + spin_lock_init(&bp->spq_lock); + atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING); + + bp->spq_prod_idx = 0; + bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; + bp->spq_prod_bd = bp->spq; + bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; +} + +static void bnx2x_init_eq_ring(struct bnx2x *bp) +{ + int i; + for (i = 1; i <= NUM_EQ_PAGES; i++) { + union event_ring_elem *elem = + &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1]; + + elem->next_page.addr.hi = + cpu_to_le32(U64_HI(bp->eq_mapping + + BCM_PAGE_SIZE * (i % NUM_EQ_PAGES))); + elem->next_page.addr.lo = + cpu_to_le32(U64_LO(bp->eq_mapping + + BCM_PAGE_SIZE*(i % NUM_EQ_PAGES))); + } + bp->eq_cons = 0; + bp->eq_prod = NUM_EQ_DESC; + bp->eq_cons_sb = BNX2X_EQ_INDEX; + /* we want a warning message before it gets rought... */ + atomic_set(&bp->eq_spq_left, + min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); +} + + +/* called with netif_addr_lock_bh() */ +void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, + unsigned long rx_mode_flags, + unsigned long rx_accept_flags, + unsigned long tx_accept_flags, + unsigned long ramrod_flags) +{ + struct bnx2x_rx_mode_ramrod_params ramrod_param; + int rc; + + memset(&ramrod_param, 0, sizeof(ramrod_param)); + + /* Prepare ramrod parameters */ + ramrod_param.cid = 0; + ramrod_param.cl_id = cl_id; + ramrod_param.rx_mode_obj = &bp->rx_mode_obj; + ramrod_param.func_id = BP_FUNC(bp); + + ramrod_param.pstate = &bp->sp_state; + ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING; + + ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata); + ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata); + + set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); + + ramrod_param.ramrod_flags = ramrod_flags; + ramrod_param.rx_mode_flags = rx_mode_flags; + + ramrod_param.rx_accept_flags = rx_accept_flags; + ramrod_param.tx_accept_flags = tx_accept_flags; + + rc = bnx2x_config_rx_mode(bp, &ramrod_param); + if (rc < 0) { + BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode); + return; + } +} + +/* called with netif_addr_lock_bh() */ +void bnx2x_set_storm_rx_mode(struct bnx2x *bp) +{ + unsigned long rx_mode_flags = 0, ramrod_flags = 0; + unsigned long rx_accept_flags = 0, tx_accept_flags = 0; + +#ifdef BCM_CNIC + if (!NO_FCOE(bp)) + + /* Configure rx_mode of FCoE Queue */ + __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags); +#endif + + switch (bp->rx_mode) { + case BNX2X_RX_MODE_NONE: + /* + * 'drop all' supersedes any accept flags that may have been + * passed to the function. + */ + break; + case BNX2X_RX_MODE_NORMAL: + __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); + __set_bit(BNX2X_ACCEPT_MULTICAST, &rx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); + + /* internal switching mode */ + __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); + __set_bit(BNX2X_ACCEPT_MULTICAST, &tx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); + + break; + case BNX2X_RX_MODE_ALLMULTI: + __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); + __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); + + /* internal switching mode */ + __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); + __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); + + break; + case BNX2X_RX_MODE_PROMISC: + /* According to deffinition of SI mode, iface in promisc mode + * should receive matched and unmatched (in resolution of port) + * unicast packets. + */ + __set_bit(BNX2X_ACCEPT_UNMATCHED, &rx_accept_flags); + __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); + __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); + + /* internal switching mode */ + __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); + + if (IS_MF_SI(bp)) + __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &tx_accept_flags); + else + __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); + + break; + default: + BNX2X_ERR("Unknown rx_mode: %d\n", bp->rx_mode); + return; + } + + if (bp->rx_mode != BNX2X_RX_MODE_NONE) { + __set_bit(BNX2X_ACCEPT_ANY_VLAN, &rx_accept_flags); + __set_bit(BNX2X_ACCEPT_ANY_VLAN, &tx_accept_flags); + } + + __set_bit(RAMROD_RX, &ramrod_flags); + __set_bit(RAMROD_TX, &ramrod_flags); + + bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, rx_accept_flags, + tx_accept_flags, ramrod_flags); +} + +static void bnx2x_init_internal_common(struct bnx2x *bp) +{ + int i; + + if (IS_MF_SI(bp)) + /* + * In switch independent mode, the TSTORM needs to accept + * packets that failed classification, since approximate match + * mac addresses aren't written to NIG LLH + */ + REG_WR8(bp, BAR_TSTRORM_INTMEM + + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2); + else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */ + REG_WR8(bp, BAR_TSTRORM_INTMEM + + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0); + + /* Zero this manually as its initialization is + currently missing in the initTool */ + for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) + REG_WR(bp, BAR_USTRORM_INTMEM + + USTORM_AGG_DATA_OFFSET + i * 4, 0); + if (!CHIP_IS_E1x(bp)) { + REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET, + CHIP_INT_MODE_IS_BC(bp) ? + HC_IGU_BC_MODE : HC_IGU_NBC_MODE); + } +} + +static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) +{ + switch (load_code) { + case FW_MSG_CODE_DRV_LOAD_COMMON: + case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: + bnx2x_init_internal_common(bp); + /* no break */ + + case FW_MSG_CODE_DRV_LOAD_PORT: + /* nothing to do */ + /* no break */ + + case FW_MSG_CODE_DRV_LOAD_FUNCTION: + /* internal memory per function is + initialized inside bnx2x_pf_init */ + break; + + default: + BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); + break; + } +} + +static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp) +{ + return fp->bp->igu_base_sb + fp->index + CNIC_PRESENT; +} + +static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp) +{ + return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT; +} + +static inline u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp) +{ + if (CHIP_IS_E1x(fp->bp)) + return BP_L_ID(fp->bp) + fp->index; + else /* We want Client ID to be the same as IGU SB ID for 57712 */ + return bnx2x_fp_igu_sb_id(fp); +} + +static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) +{ + struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; + u8 cos; + unsigned long q_type = 0; + u32 cids[BNX2X_MULTI_TX_COS] = { 0 }; + + fp->cid = fp_idx; + fp->cl_id = bnx2x_fp_cl_id(fp); + fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp); + fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp); + /* qZone id equals to FW (per path) client id */ + fp->cl_qzone_id = bnx2x_fp_qzone_id(fp); + + /* init shortcut */ + fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp); + /* Setup SB indicies */ + fp->rx_cons_sb = BNX2X_RX_SB_INDEX; + + /* Configure Queue State object */ + __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); + __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); + + BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS); + + /* init tx data */ + for_each_cos_in_tx_queue(fp, cos) { + bnx2x_init_txdata(bp, &fp->txdata[cos], + CID_COS_TO_TX_ONLY_CID(fp->cid, cos), + FP_COS_TO_TXQ(fp, cos), + BNX2X_TX_SB_INDEX_BASE + cos); + cids[cos] = fp->txdata[cos].cid; + } + + bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, cids, fp->max_cos, + BP_FUNC(bp), bnx2x_sp(bp, q_rdata), + bnx2x_sp_mapping(bp, q_rdata), q_type); + + /** + * Configure classification DBs: Always enable Tx switching + */ + bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX); + + DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) " + "cl_id %d fw_sb %d igu_sb %d\n", + fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, + fp->igu_sb_id); + bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false, + fp->fw_sb_id, fp->igu_sb_id); + + bnx2x_update_fpsb_idx(fp); +} + +void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) +{ + int i; + + for_each_eth_queue(bp, i) + bnx2x_init_eth_fp(bp, i); +#ifdef BCM_CNIC + if (!NO_FCOE(bp)) + bnx2x_init_fcoe_fp(bp); + + bnx2x_init_sb(bp, bp->cnic_sb_mapping, + BNX2X_VF_ID_INVALID, false, + bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp)); + +#endif + + /* Initialize MOD_ABS interrupts */ + bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, + bp->common.shmem_base, bp->common.shmem2_base, + BP_PORT(bp)); + /* ensure status block indices were read */ + rmb(); + + bnx2x_init_def_sb(bp); + bnx2x_update_dsb_idx(bp); + bnx2x_init_rx_rings(bp); + bnx2x_init_tx_rings(bp); + bnx2x_init_sp_ring(bp); + bnx2x_init_eq_ring(bp); + bnx2x_init_internal(bp, load_code); + bnx2x_pf_init(bp); + bnx2x_stats_init(bp); + + /* flush all before enabling interrupts */ + mb(); + mmiowb(); + + bnx2x_int_enable(bp); + + /* Check for SPIO5 */ + bnx2x_attn_int_deasserted0(bp, + REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) & + AEU_INPUTS_ATTN_BITS_SPIO5); +} + +/* end of nic init */ + +/* + * gzip service functions + */ + +static int bnx2x_gunzip_init(struct bnx2x *bp) +{ + bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE, + &bp->gunzip_mapping, GFP_KERNEL); + if (bp->gunzip_buf == NULL) + goto gunzip_nomem1; + + bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL); + if (bp->strm == NULL) + goto gunzip_nomem2; + + bp->strm->workspace = vmalloc(zlib_inflate_workspacesize()); + if (bp->strm->workspace == NULL) + goto gunzip_nomem3; + + return 0; + +gunzip_nomem3: + kfree(bp->strm); + bp->strm = NULL; + +gunzip_nomem2: + dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, + bp->gunzip_mapping); + bp->gunzip_buf = NULL; + +gunzip_nomem1: + netdev_err(bp->dev, "Cannot allocate firmware buffer for" + " un-compression\n"); + return -ENOMEM; +} + +static void bnx2x_gunzip_end(struct bnx2x *bp) +{ + if (bp->strm) { + vfree(bp->strm->workspace); + kfree(bp->strm); + bp->strm = NULL; + } + + if (bp->gunzip_buf) { + dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, + bp->gunzip_mapping); + bp->gunzip_buf = NULL; + } +} + +static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len) +{ + int n, rc; + + /* check gzip header */ + if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) { + BNX2X_ERR("Bad gzip header\n"); + return -EINVAL; + } + + n = 10; + +#define FNAME 0x8 + + if (zbuf[3] & FNAME) + while ((zbuf[n++] != 0) && (n < len)); + + bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n; + bp->strm->avail_in = len - n; + bp->strm->next_out = bp->gunzip_buf; + bp->strm->avail_out = FW_BUF_SIZE; + + rc = zlib_inflateInit2(bp->strm, -MAX_WBITS); + if (rc != Z_OK) + return rc; + + rc = zlib_inflate(bp->strm, Z_FINISH); + if ((rc != Z_OK) && (rc != Z_STREAM_END)) + netdev_err(bp->dev, "Firmware decompression error: %s\n", + bp->strm->msg); + + bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out); + if (bp->gunzip_outlen & 0x3) + netdev_err(bp->dev, "Firmware decompression error:" + " gunzip_outlen (%d) not aligned\n", + bp->gunzip_outlen); + bp->gunzip_outlen >>= 2; + + zlib_inflateEnd(bp->strm); + + if (rc == Z_STREAM_END) + return 0; + + return rc; +} + +/* nic load/unload */ + +/* + * General service functions + */ + +/* send a NIG loopback debug packet */ +static void bnx2x_lb_pckt(struct bnx2x *bp) +{ + u32 wb_write[3]; + + /* Ethernet source and destination addresses */ + wb_write[0] = 0x55555555; + wb_write[1] = 0x55555555; + wb_write[2] = 0x20; /* SOP */ + REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); + + /* NON-IP protocol */ + wb_write[0] = 0x09000000; + wb_write[1] = 0x55555555; + wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ + REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); +} + +/* some of the internal memories + * are not directly readable from the driver + * to test them we send debug packets + */ +static int bnx2x_int_mem_test(struct bnx2x *bp) +{ + int factor; + int count, i; + u32 val = 0; + + if (CHIP_REV_IS_FPGA(bp)) + factor = 120; + else if (CHIP_REV_IS_EMUL(bp)) + factor = 200; + else + factor = 1; + + /* Disable inputs of parser neighbor blocks */ + REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); + REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); + REG_WR(bp, CFC_REG_DEBUG0, 0x1); + REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); + + /* Write 0 to parser credits for CFC search request */ + REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); + + /* send Ethernet packet */ + bnx2x_lb_pckt(bp); + + /* TODO do i reset NIG statistic? */ + /* Wait until NIG register shows 1 packet of size 0x10 */ + count = 1000 * factor; + while (count) { + + bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); + val = *bnx2x_sp(bp, wb_data[0]); + if (val == 0x10) + break; + + msleep(10); + count--; + } + if (val != 0x10) { + BNX2X_ERR("NIG timeout val = 0x%x\n", val); + return -1; + } + + /* Wait until PRS register shows 1 packet */ + count = 1000 * factor; + while (count) { + val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); + if (val == 1) + break; + + msleep(10); + count--; + } + if (val != 0x1) { + BNX2X_ERR("PRS timeout val = 0x%x\n", val); + return -2; + } + + /* Reset and init BRB, PRS */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); + msleep(50); + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); + msleep(50); + bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); + + DP(NETIF_MSG_HW, "part2\n"); + + /* Disable inputs of parser neighbor blocks */ + REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); + REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); + REG_WR(bp, CFC_REG_DEBUG0, 0x1); + REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); + + /* Write 0 to parser credits for CFC search request */ + REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); + + /* send 10 Ethernet packets */ + for (i = 0; i < 10; i++) + bnx2x_lb_pckt(bp); + + /* Wait until NIG register shows 10 + 1 + packets of size 11*0x10 = 0xb0 */ + count = 1000 * factor; + while (count) { + + bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); + val = *bnx2x_sp(bp, wb_data[0]); + if (val == 0xb0) + break; + + msleep(10); + count--; + } + if (val != 0xb0) { + BNX2X_ERR("NIG timeout val = 0x%x\n", val); + return -3; + } + + /* Wait until PRS register shows 2 packets */ + val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); + if (val != 2) + BNX2X_ERR("PRS timeout val = 0x%x\n", val); + + /* Write 1 to parser credits for CFC search request */ + REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); + + /* Wait until PRS register shows 3 packets */ + msleep(10 * factor); + /* Wait until NIG register shows 1 packet of size 0x10 */ + val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); + if (val != 3) + BNX2X_ERR("PRS timeout val = 0x%x\n", val); + + /* clear NIG EOP FIFO */ + for (i = 0; i < 11; i++) + REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO); + val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY); + if (val != 1) { + BNX2X_ERR("clear of NIG failed\n"); + return -4; + } + + /* Reset and init BRB, PRS, NIG */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); + msleep(50); + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); + msleep(50); + bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); +#ifndef BCM_CNIC + /* set NIC mode */ + REG_WR(bp, PRS_REG_NIC_MODE, 1); +#endif + + /* Enable inputs of parser neighbor blocks */ + REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff); + REG_WR(bp, TCM_REG_PRS_IFEN, 0x1); + REG_WR(bp, CFC_REG_DEBUG0, 0x0); + REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1); + + DP(NETIF_MSG_HW, "done\n"); + + return 0; /* OK */ +} + +static void bnx2x_enable_blocks_attention(struct bnx2x *bp) +{ + REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); + if (!CHIP_IS_E1x(bp)) + REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40); + else + REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0); + REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); + REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); + /* + * mask read length error interrupts in brb for parser + * (parsing unit and 'checksum and crc' unit) + * these errors are legal (PU reads fixed length and CAC can cause + * read length error on truncated packets) + */ + REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00); + REG_WR(bp, QM_REG_QM_INT_MASK, 0); + REG_WR(bp, TM_REG_TM_INT_MASK, 0); + REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0); + REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0); + REG_WR(bp, XCM_REG_XCM_INT_MASK, 0); +/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */ +/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */ + REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0); + REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0); + REG_WR(bp, UCM_REG_UCM_INT_MASK, 0); +/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */ +/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */ + REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); + REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0); + REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0); + REG_WR(bp, CCM_REG_CCM_INT_MASK, 0); +/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */ +/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */ + + if (CHIP_REV_IS_FPGA(bp)) + REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000); + else if (!CHIP_IS_E1x(bp)) + REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, + (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF + | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT + | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN + | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED + | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED)); + else + REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000); + REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0); + REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0); + REG_WR(bp, TCM_REG_TCM_INT_MASK, 0); +/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */ + + if (!CHIP_IS_E1x(bp)) + /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ + REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); + + REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); + REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); +/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ + REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ +} + +static void bnx2x_reset_common(struct bnx2x *bp) +{ + u32 val = 0x1400; + + /* reset_common */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, + 0xd3ffff7f); + + if (CHIP_IS_E3(bp)) { + val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; + val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; + } + + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val); +} + +static void bnx2x_setup_dmae(struct bnx2x *bp) +{ + bp->dmae_ready = 0; + spin_lock_init(&bp->dmae_lock); +} + +static void bnx2x_init_pxp(struct bnx2x *bp) +{ + u16 devctl; + int r_order, w_order; + + pci_read_config_word(bp->pdev, + pci_pcie_cap(bp->pdev) + PCI_EXP_DEVCTL, &devctl); + DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl); + w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); + if (bp->mrrs == -1) + r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12); + else { + DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs); + r_order = bp->mrrs; + } + + bnx2x_init_pxp_arb(bp, r_order, w_order); +} + +static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp) +{ + int is_required; + u32 val; + int port; + + if (BP_NOMCP(bp)) + return; + + is_required = 0; + val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) & + SHARED_HW_CFG_FAN_FAILURE_MASK; + + if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) + is_required = 1; + + /* + * The fan failure mechanism is usually related to the PHY type since + * the power consumption of the board is affected by the PHY. Currently, + * fan is required for most designs with SFX7101, BCM8727 and BCM8481. + */ + else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) + for (port = PORT_0; port < PORT_MAX; port++) { + is_required |= + bnx2x_fan_failure_det_req( + bp, + bp->common.shmem_base, + bp->common.shmem2_base, + port); + } + + DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required); + + if (is_required == 0) + return; + + /* Fan failure is indicated by SPIO 5 */ + bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5, + MISC_REGISTERS_SPIO_INPUT_HI_Z); + + /* set to active low mode */ + val = REG_RD(bp, MISC_REG_SPIO_INT); + val |= ((1 << MISC_REGISTERS_SPIO_5) << + MISC_REGISTERS_SPIO_INT_OLD_SET_POS); + REG_WR(bp, MISC_REG_SPIO_INT, val); + + /* enable interrupt to signal the IGU */ + val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); + val |= (1 << MISC_REGISTERS_SPIO_5); + REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); +} + +static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num) +{ + u32 offset = 0; + + if (CHIP_IS_E1(bp)) + return; + if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX)) + return; + + switch (BP_ABS_FUNC(bp)) { + case 0: + offset = PXP2_REG_PGL_PRETEND_FUNC_F0; + break; + case 1: + offset = PXP2_REG_PGL_PRETEND_FUNC_F1; + break; + case 2: + offset = PXP2_REG_PGL_PRETEND_FUNC_F2; + break; + case 3: + offset = PXP2_REG_PGL_PRETEND_FUNC_F3; + break; + case 4: + offset = PXP2_REG_PGL_PRETEND_FUNC_F4; + break; + case 5: + offset = PXP2_REG_PGL_PRETEND_FUNC_F5; + break; + case 6: + offset = PXP2_REG_PGL_PRETEND_FUNC_F6; + break; + case 7: + offset = PXP2_REG_PGL_PRETEND_FUNC_F7; + break; + default: + return; + } + + REG_WR(bp, offset, pretend_func_num); + REG_RD(bp, offset); + DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num); +} + +void bnx2x_pf_disable(struct bnx2x *bp) +{ + u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); + val &= ~IGU_PF_CONF_FUNC_EN; + + REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); + REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); + REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0); +} + +static inline void bnx2x__common_init_phy(struct bnx2x *bp) +{ + u32 shmem_base[2], shmem2_base[2]; + shmem_base[0] = bp->common.shmem_base; + shmem2_base[0] = bp->common.shmem2_base; + if (!CHIP_IS_E1x(bp)) { + shmem_base[1] = + SHMEM2_RD(bp, other_shmem_base_addr); + shmem2_base[1] = + SHMEM2_RD(bp, other_shmem2_base_addr); + } + bnx2x_acquire_phy_lock(bp); + bnx2x_common_init_phy(bp, shmem_base, shmem2_base, + bp->common.chip_id); + bnx2x_release_phy_lock(bp); +} + +/** + * bnx2x_init_hw_common - initialize the HW at the COMMON phase. + * + * @bp: driver handle + */ +static int bnx2x_init_hw_common(struct bnx2x *bp) +{ + u32 val; + + DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp)); + + bnx2x_reset_common(bp); + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); + + val = 0xfffc; + if (CHIP_IS_E3(bp)) { + val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; + val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; + } + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); + + bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); + + if (!CHIP_IS_E1x(bp)) { + u8 abs_func_id; + + /** + * 4-port mode or 2-port mode we need to turn of master-enable + * for everyone, after that, turn it back on for self. + * so, we disregard multi-function or not, and always disable + * for all functions on the given path, this means 0,2,4,6 for + * path 0 and 1,3,5,7 for path 1 + */ + for (abs_func_id = BP_PATH(bp); + abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) { + if (abs_func_id == BP_ABS_FUNC(bp)) { + REG_WR(bp, + PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, + 1); + continue; + } + + bnx2x_pretend_func(bp, abs_func_id); + /* clear pf enable */ + bnx2x_pf_disable(bp); + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); + } + } + + bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON); + if (CHIP_IS_E1(bp)) { + /* enable HW interrupt from PXP on USDM overflow + bit 16 on INT_MASK_0 */ + REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); + } + + bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON); + bnx2x_init_pxp(bp); + +#ifdef __BIG_ENDIAN + REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1); + REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1); + REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1); + REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1); + REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1); + /* make sure this value is 0 */ + REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0); + +/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */ + REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1); + REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1); + REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1); + REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); +#endif + + bnx2x_ilt_init_page_size(bp, INITOP_SET); + + if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) + REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1); + + /* let the HW do it's magic ... */ + msleep(100); + /* finish PXP init */ + val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE); + if (val != 1) { + BNX2X_ERR("PXP2 CFG failed\n"); + return -EBUSY; + } + val = REG_RD(bp, PXP2_REG_RD_INIT_DONE); + if (val != 1) { + BNX2X_ERR("PXP2 RD_INIT failed\n"); + return -EBUSY; + } + + /* Timers bug workaround E2 only. We need to set the entire ILT to + * have entries with value "0" and valid bit on. + * This needs to be done by the first PF that is loaded in a path + * (i.e. common phase) + */ + if (!CHIP_IS_E1x(bp)) { +/* In E2 there is a bug in the timers block that can cause function 6 / 7 + * (i.e. vnic3) to start even if it is marked as "scan-off". + * This occurs when a different function (func2,3) is being marked + * as "scan-off". Real-life scenario for example: if a driver is being + * load-unloaded while func6,7 are down. This will cause the timer to access + * the ilt, translate to a logical address and send a request to read/write. + * Since the ilt for the function that is down is not valid, this will cause + * a translation error which is unrecoverable. + * The Workaround is intended to make sure that when this happens nothing fatal + * will occur. The workaround: + * 1. First PF driver which loads on a path will: + * a. After taking the chip out of reset, by using pretend, + * it will write "0" to the following registers of + * the other vnics. + * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); + * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); + * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); + * And for itself it will write '1' to + * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable + * dmae-operations (writing to pram for example.) + * note: can be done for only function 6,7 but cleaner this + * way. + * b. Write zero+valid to the entire ILT. + * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of + * VNIC3 (of that port). The range allocated will be the + * entire ILT. This is needed to prevent ILT range error. + * 2. Any PF driver load flow: + * a. ILT update with the physical addresses of the allocated + * logical pages. + * b. Wait 20msec. - note that this timeout is needed to make + * sure there are no requests in one of the PXP internal + * queues with "old" ILT addresses. + * c. PF enable in the PGLC. + * d. Clear the was_error of the PF in the PGLC. (could have + * occured while driver was down) + * e. PF enable in the CFC (WEAK + STRONG) + * f. Timers scan enable + * 3. PF driver unload flow: + * a. Clear the Timers scan_en. + * b. Polling for scan_on=0 for that PF. + * c. Clear the PF enable bit in the PXP. + * d. Clear the PF enable in the CFC (WEAK + STRONG) + * e. Write zero+valid to all ILT entries (The valid bit must + * stay set) + * f. If this is VNIC 3 of a port then also init + * first_timers_ilt_entry to zero and last_timers_ilt_entry + * to the last enrty in the ILT. + * + * Notes: + * Currently the PF error in the PGLC is non recoverable. + * In the future the there will be a recovery routine for this error. + * Currently attention is masked. + * Having an MCP lock on the load/unload process does not guarantee that + * there is no Timer disable during Func6/7 enable. This is because the + * Timers scan is currently being cleared by the MCP on FLR. + * Step 2.d can be done only for PF6/7 and the driver can also check if + * there is error before clearing it. But the flow above is simpler and + * more general. + * All ILT entries are written by zero+valid and not just PF6/7 + * ILT entries since in the future the ILT entries allocation for + * PF-s might be dynamic. + */ + struct ilt_client_info ilt_cli; + struct bnx2x_ilt ilt; + memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); + memset(&ilt, 0, sizeof(struct bnx2x_ilt)); + + /* initialize dummy TM client */ + ilt_cli.start = 0; + ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; + ilt_cli.client_num = ILT_CLIENT_TM; + + /* Step 1: set zeroes to all ilt page entries with valid bit on + * Step 2: set the timers first/last ilt entry to point + * to the entire range to prevent ILT range error for 3rd/4th + * vnic (this code assumes existance of the vnic) + * + * both steps performed by call to bnx2x_ilt_client_init_op() + * with dummy TM client + * + * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT + * and his brother are split registers + */ + bnx2x_pretend_func(bp, (BP_PATH(bp) + 6)); + bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR); + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); + + REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN); + REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN); + REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); + } + + + REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); + REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); + + if (!CHIP_IS_E1x(bp)) { + int factor = CHIP_REV_IS_EMUL(bp) ? 1000 : + (CHIP_REV_IS_FPGA(bp) ? 400 : 0); + bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON); + + bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON); + + /* let the HW do it's magic ... */ + do { + msleep(200); + val = REG_RD(bp, ATC_REG_ATC_INIT_DONE); + } while (factor-- && (val != 1)); + + if (val != 1) { + BNX2X_ERR("ATC_INIT failed\n"); + return -EBUSY; + } + } + + bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON); + + /* clean the DMAE memory */ + bp->dmae_ready = 1; + bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1); + + bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON); + + bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON); + + bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON); + + bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON); + + bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3); + bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3); + bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3); + bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3); + + bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON); + + + /* QM queues pointers table */ + bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); + + /* soft reset pulse */ + REG_WR(bp, QM_REG_SOFT_RESET, 1); + REG_WR(bp, QM_REG_SOFT_RESET, 0); + +#ifdef BCM_CNIC + bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON); +#endif + + bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON); + REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT); + if (!CHIP_REV_IS_SLOW(bp)) + /* enable hw interrupt from doorbell Q */ + REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); + + bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); + + bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); + REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); + + if (!CHIP_IS_E1(bp)) + REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan); + + if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) + /* Bit-map indicating which L2 hdrs may appear + * after the basic Ethernet header + */ + REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, + bp->path_has_ovlan ? 7 : 6); + + bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON); + + if (!CHIP_IS_E1x(bp)) { + /* reset VFC memories */ + REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, + VFC_MEMORIES_RST_REG_CAM_RST | + VFC_MEMORIES_RST_REG_RAM_RST); + REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, + VFC_MEMORIES_RST_REG_CAM_RST | + VFC_MEMORIES_RST_REG_RAM_RST); + + msleep(20); + } + + bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON); + + /* sync semi rtc */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, + 0x80000000); + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, + 0x80000000); + + bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON); + + if (!CHIP_IS_E1x(bp)) + REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, + bp->path_has_ovlan ? 7 : 6); + + REG_WR(bp, SRC_REG_SOFT_RST, 1); + + bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON); + +#ifdef BCM_CNIC + REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); + REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); + REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b); + REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a); + REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116); + REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b); + REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf); + REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); + REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f); + REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7); +#endif + REG_WR(bp, SRC_REG_SOFT_RST, 0); + + if (sizeof(union cdu_context) != 1024) + /* we currently assume that a context is 1024 bytes */ + dev_alert(&bp->pdev->dev, "please adjust the size " + "of cdu_context(%ld)\n", + (long)sizeof(union cdu_context)); + + bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON); + val = (4 << 24) + (0 << 12) + 1024; + REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val); + + bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON); + REG_WR(bp, CFC_REG_INIT_REG, 0x7FF); + /* enable context validation interrupt from CFC */ + REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); + + /* set the thresholds to prevent CFC/CDU race */ + REG_WR(bp, CFC_REG_DEBUG0, 0x20020000); + + bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON); + + if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp)) + REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36); + + bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON); + + /* Reset PCIE errors for debug */ + REG_WR(bp, 0x2814, 0xffffffff); + REG_WR(bp, 0x3820, 0xffffffff); + + if (!CHIP_IS_E1x(bp)) { + REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, + (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | + PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); + REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, + (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | + PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | + PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); + REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, + (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | + PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | + PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); + } + + bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON); + if (!CHIP_IS_E1(bp)) { + /* in E3 this done in per-port section */ + if (!CHIP_IS_E3(bp)) + REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp)); + } + if (CHIP_IS_E1H(bp)) + /* not applicable for E2 (and above ...) */ + REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp)); + + if (CHIP_REV_IS_SLOW(bp)) + msleep(200); + + /* finish CFC init */ + val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10); + if (val != 1) { + BNX2X_ERR("CFC LL_INIT failed\n"); + return -EBUSY; + } + val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10); + if (val != 1) { + BNX2X_ERR("CFC AC_INIT failed\n"); + return -EBUSY; + } + val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10); + if (val != 1) { + BNX2X_ERR("CFC CAM_INIT failed\n"); + return -EBUSY; + } + REG_WR(bp, CFC_REG_DEBUG0, 0); + + if (CHIP_IS_E1(bp)) { + /* read NIG statistic + to see if this is our first up since powerup */ + bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); + val = *bnx2x_sp(bp, wb_data[0]); + + /* do internal memory self test */ + if ((val == 0) && bnx2x_int_mem_test(bp)) { + BNX2X_ERR("internal mem self test failed\n"); + return -EBUSY; + } + } + + bnx2x_setup_fan_failure_detection(bp); + + /* clear PXP2 attentions */ + REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); + + bnx2x_enable_blocks_attention(bp); + bnx2x_enable_blocks_parity(bp); + + if (!BP_NOMCP(bp)) { + if (CHIP_IS_E1x(bp)) + bnx2x__common_init_phy(bp); + } else + BNX2X_ERR("Bootcode is missing - can not initialize link\n"); + + return 0; +} + +/** + * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase. + * + * @bp: driver handle + */ +static int bnx2x_init_hw_common_chip(struct bnx2x *bp) +{ + int rc = bnx2x_init_hw_common(bp); + + if (rc) + return rc; + + /* In E2 2-PORT mode, same ext phy is used for the two paths */ + if (!BP_NOMCP(bp)) + bnx2x__common_init_phy(bp); + + return 0; +} + +static int bnx2x_init_hw_port(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; + u32 low, high; + u32 val; + + bnx2x__link_reset(bp); + + DP(BNX2X_MSG_MCP, "starting port init port %d\n", port); + + REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); + + bnx2x_init_block(bp, BLOCK_MISC, init_phase); + bnx2x_init_block(bp, BLOCK_PXP, init_phase); + bnx2x_init_block(bp, BLOCK_PXP2, init_phase); + + /* Timers bug workaround: disables the pf_master bit in pglue at + * common phase, we need to enable it here before any dmae access are + * attempted. Therefore we manually added the enable-master to the + * port phase (it also happens in the function phase) + */ + if (!CHIP_IS_E1x(bp)) + REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); + + bnx2x_init_block(bp, BLOCK_ATC, init_phase); + bnx2x_init_block(bp, BLOCK_DMAE, init_phase); + bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); + bnx2x_init_block(bp, BLOCK_QM, init_phase); + + bnx2x_init_block(bp, BLOCK_TCM, init_phase); + bnx2x_init_block(bp, BLOCK_UCM, init_phase); + bnx2x_init_block(bp, BLOCK_CCM, init_phase); + bnx2x_init_block(bp, BLOCK_XCM, init_phase); + + /* QM cid (connection) count */ + bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET); + +#ifdef BCM_CNIC + bnx2x_init_block(bp, BLOCK_TM, init_phase); + REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); + REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); +#endif + + bnx2x_init_block(bp, BLOCK_DORQ, init_phase); + + if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) { + bnx2x_init_block(bp, BLOCK_BRB1, init_phase); + + if (IS_MF(bp)) + low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); + else if (bp->dev->mtu > 4096) { + if (bp->flags & ONE_PORT_FLAG) + low = 160; + else { + val = bp->dev->mtu; + /* (24*1024 + val*4)/256 */ + low = 96 + (val/64) + + ((val % 64) ? 1 : 0); + } + } else + low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160); + high = low + 56; /* 14*1024/256 */ + REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); + REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); + } + + if (CHIP_MODE_IS_4_PORT(bp)) + REG_WR(bp, (BP_PORT(bp) ? + BRB1_REG_MAC_GUARANTIED_1 : + BRB1_REG_MAC_GUARANTIED_0), 40); + + + bnx2x_init_block(bp, BLOCK_PRS, init_phase); + if (CHIP_IS_E3B0(bp)) + /* Ovlan exists only if we are in multi-function + + * switch-dependent mode, in switch-independent there + * is no ovlan headers + */ + REG_WR(bp, BP_PORT(bp) ? + PRS_REG_HDRS_AFTER_BASIC_PORT_1 : + PRS_REG_HDRS_AFTER_BASIC_PORT_0, + (bp->path_has_ovlan ? 7 : 6)); + + bnx2x_init_block(bp, BLOCK_TSDM, init_phase); + bnx2x_init_block(bp, BLOCK_CSDM, init_phase); + bnx2x_init_block(bp, BLOCK_USDM, init_phase); + bnx2x_init_block(bp, BLOCK_XSDM, init_phase); + + bnx2x_init_block(bp, BLOCK_TSEM, init_phase); + bnx2x_init_block(bp, BLOCK_USEM, init_phase); + bnx2x_init_block(bp, BLOCK_CSEM, init_phase); + bnx2x_init_block(bp, BLOCK_XSEM, init_phase); + + bnx2x_init_block(bp, BLOCK_UPB, init_phase); + bnx2x_init_block(bp, BLOCK_XPB, init_phase); + + bnx2x_init_block(bp, BLOCK_PBF, init_phase); + + if (CHIP_IS_E1x(bp)) { + /* configure PBF to work without PAUSE mtu 9000 */ + REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); + + /* update threshold */ + REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); + /* update init credit */ + REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); + + /* probe changes */ + REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1); + udelay(50); + REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); + } + +#ifdef BCM_CNIC + bnx2x_init_block(bp, BLOCK_SRC, init_phase); +#endif + bnx2x_init_block(bp, BLOCK_CDU, init_phase); + bnx2x_init_block(bp, BLOCK_CFC, init_phase); + + if (CHIP_IS_E1(bp)) { + REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); + REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); + } + bnx2x_init_block(bp, BLOCK_HC, init_phase); + + bnx2x_init_block(bp, BLOCK_IGU, init_phase); + + bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); + /* init aeu_mask_attn_func_0/1: + * - SF mode: bits 3-7 are masked. only bits 0-2 are in use + * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF + * bits 4-7 are used for "per vn group attention" */ + val = IS_MF(bp) ? 0xF7 : 0x7; + /* Enable DCBX attention for all but E1 */ + val |= CHIP_IS_E1(bp) ? 0 : 0x10; + REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); + + bnx2x_init_block(bp, BLOCK_NIG, init_phase); + + if (!CHIP_IS_E1x(bp)) { + /* Bit-map indicating which L2 hdrs may appear after the + * basic Ethernet header + */ + REG_WR(bp, BP_PORT(bp) ? + NIG_REG_P1_HDRS_AFTER_BASIC : + NIG_REG_P0_HDRS_AFTER_BASIC, + IS_MF_SD(bp) ? 7 : 6); + + if (CHIP_IS_E3(bp)) + REG_WR(bp, BP_PORT(bp) ? + NIG_REG_LLH1_MF_MODE : + NIG_REG_LLH_MF_MODE, IS_MF(bp)); + } + if (!CHIP_IS_E3(bp)) + REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); + + if (!CHIP_IS_E1(bp)) { + /* 0x2 disable mf_ov, 0x1 enable */ + REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, + (IS_MF_SD(bp) ? 0x1 : 0x2)); + + if (!CHIP_IS_E1x(bp)) { + val = 0; + switch (bp->mf_mode) { + case MULTI_FUNCTION_SD: + val = 1; + break; + case MULTI_FUNCTION_SI: + val = 2; + break; + } + + REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE : + NIG_REG_LLH0_CLS_TYPE), val); + } + { + REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0); + REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); + REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); + } + } + + + /* If SPIO5 is set to generate interrupts, enable it for this port */ + val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); + if (val & (1 << MISC_REGISTERS_SPIO_5)) { + u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : + MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); + val = REG_RD(bp, reg_addr); + val |= AEU_INPUTS_ATTN_BITS_SPIO5; + REG_WR(bp, reg_addr, val); + } + + return 0; +} + +static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) +{ + int reg; + + if (CHIP_IS_E1(bp)) + reg = PXP2_REG_RQ_ONCHIP_AT + index*8; + else + reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; + + bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr)); +} + +static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id) +{ + bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/); +} + +static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func) +{ + u32 i, base = FUNC_ILT_BASE(func); + for (i = base; i < base + ILT_PER_FUNC; i++) + bnx2x_ilt_wr(bp, i, 0); +} + +static int bnx2x_init_hw_func(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + int func = BP_FUNC(bp); + int init_phase = PHASE_PF0 + func; + struct bnx2x_ilt *ilt = BP_ILT(bp); + u16 cdu_ilt_start; + u32 addr, val; + u32 main_mem_base, main_mem_size, main_mem_prty_clr; + int i, main_mem_width; + + DP(BNX2X_MSG_MCP, "starting func init func %d\n", func); + + /* FLR cleanup - hmmm */ + if (!CHIP_IS_E1x(bp)) + bnx2x_pf_flr_clnup(bp); + + /* set MSI reconfigure capability */ + if (bp->common.int_block == INT_BLOCK_HC) { + addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); + val = REG_RD(bp, addr); + val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; + REG_WR(bp, addr, val); + } + + bnx2x_init_block(bp, BLOCK_PXP, init_phase); + bnx2x_init_block(bp, BLOCK_PXP2, init_phase); + + ilt = BP_ILT(bp); + cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; + + for (i = 0; i < L2_ILT_LINES(bp); i++) { + ilt->lines[cdu_ilt_start + i].page = + bp->context.vcxt + (ILT_PAGE_CIDS * i); + ilt->lines[cdu_ilt_start + i].page_mapping = + bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i); + /* cdu ilt pages are allocated manually so there's no need to + set the size */ + } + bnx2x_ilt_init_op(bp, INITOP_SET); + +#ifdef BCM_CNIC + bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM); + + /* T1 hash bits value determines the T1 number of entries */ + REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS); +#endif + +#ifndef BCM_CNIC + /* set NIC mode */ + REG_WR(bp, PRS_REG_NIC_MODE, 1); +#endif /* BCM_CNIC */ + + if (!CHIP_IS_E1x(bp)) { + u32 pf_conf = IGU_PF_CONF_FUNC_EN; + + /* Turn on a single ISR mode in IGU if driver is going to use + * INT#x or MSI + */ + if (!(bp->flags & USING_MSIX_FLAG)) + pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; + /* + * Timers workaround bug: function init part. + * Need to wait 20msec after initializing ILT, + * needed to make sure there are no requests in + * one of the PXP internal queues with "old" ILT addresses + */ + msleep(20); + /* + * Master enable - Due to WB DMAE writes performed before this + * register is re-initialized as part of the regular function + * init + */ + REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); + /* Enable the function in IGU */ + REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf); + } + + bp->dmae_ready = 1; + + bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); + + if (!CHIP_IS_E1x(bp)) + REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); + + bnx2x_init_block(bp, BLOCK_ATC, init_phase); + bnx2x_init_block(bp, BLOCK_DMAE, init_phase); + bnx2x_init_block(bp, BLOCK_NIG, init_phase); + bnx2x_init_block(bp, BLOCK_SRC, init_phase); + bnx2x_init_block(bp, BLOCK_MISC, init_phase); + bnx2x_init_block(bp, BLOCK_TCM, init_phase); + bnx2x_init_block(bp, BLOCK_UCM, init_phase); + bnx2x_init_block(bp, BLOCK_CCM, init_phase); + bnx2x_init_block(bp, BLOCK_XCM, init_phase); + bnx2x_init_block(bp, BLOCK_TSEM, init_phase); + bnx2x_init_block(bp, BLOCK_USEM, init_phase); + bnx2x_init_block(bp, BLOCK_CSEM, init_phase); + bnx2x_init_block(bp, BLOCK_XSEM, init_phase); + + if (!CHIP_IS_E1x(bp)) + REG_WR(bp, QM_REG_PF_EN, 1); + + if (!CHIP_IS_E1x(bp)) { + REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); + REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); + REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); + REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); + } + bnx2x_init_block(bp, BLOCK_QM, init_phase); + + bnx2x_init_block(bp, BLOCK_TM, init_phase); + bnx2x_init_block(bp, BLOCK_DORQ, init_phase); + bnx2x_init_block(bp, BLOCK_BRB1, init_phase); + bnx2x_init_block(bp, BLOCK_PRS, init_phase); + bnx2x_init_block(bp, BLOCK_TSDM, init_phase); + bnx2x_init_block(bp, BLOCK_CSDM, init_phase); + bnx2x_init_block(bp, BLOCK_USDM, init_phase); + bnx2x_init_block(bp, BLOCK_XSDM, init_phase); + bnx2x_init_block(bp, BLOCK_UPB, init_phase); + bnx2x_init_block(bp, BLOCK_XPB, init_phase); + bnx2x_init_block(bp, BLOCK_PBF, init_phase); + if (!CHIP_IS_E1x(bp)) + REG_WR(bp, PBF_REG_DISABLE_PF, 0); + + bnx2x_init_block(bp, BLOCK_CDU, init_phase); + + bnx2x_init_block(bp, BLOCK_CFC, init_phase); + + if (!CHIP_IS_E1x(bp)) + REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1); + + if (IS_MF(bp)) { + REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); + REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov); + } + + bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); + + /* HC init per function */ + if (bp->common.int_block == INT_BLOCK_HC) { + if (CHIP_IS_E1H(bp)) { + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); + + REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); + REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); + } + bnx2x_init_block(bp, BLOCK_HC, init_phase); + + } else { + int num_segs, sb_idx, prod_offset; + + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); + + if (!CHIP_IS_E1x(bp)) { + REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); + REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); + } + + bnx2x_init_block(bp, BLOCK_IGU, init_phase); + + if (!CHIP_IS_E1x(bp)) { + int dsb_idx = 0; + /** + * Producer memory: + * E2 mode: address 0-135 match to the mapping memory; + * 136 - PF0 default prod; 137 - PF1 default prod; + * 138 - PF2 default prod; 139 - PF3 default prod; + * 140 - PF0 attn prod; 141 - PF1 attn prod; + * 142 - PF2 attn prod; 143 - PF3 attn prod; + * 144-147 reserved. + * + * E1.5 mode - In backward compatible mode; + * for non default SB; each even line in the memory + * holds the U producer and each odd line hold + * the C producer. The first 128 producers are for + * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 + * producers are for the DSB for each PF. + * Each PF has five segments: (the order inside each + * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; + * 132-135 C prods; 136-139 X prods; 140-143 T prods; + * 144-147 attn prods; + */ + /* non-default-status-blocks */ + num_segs = CHIP_INT_MODE_IS_BC(bp) ? + IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; + for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) { + prod_offset = (bp->igu_base_sb + sb_idx) * + num_segs; + + for (i = 0; i < num_segs; i++) { + addr = IGU_REG_PROD_CONS_MEMORY + + (prod_offset + i) * 4; + REG_WR(bp, addr, 0); + } + /* send consumer update with value 0 */ + bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx, + USTORM_ID, 0, IGU_INT_NOP, 1); + bnx2x_igu_clear_sb(bp, + bp->igu_base_sb + sb_idx); + } + + /* default-status-blocks */ + num_segs = CHIP_INT_MODE_IS_BC(bp) ? + IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; + + if (CHIP_MODE_IS_4_PORT(bp)) + dsb_idx = BP_FUNC(bp); + else + dsb_idx = BP_E1HVN(bp); + + prod_offset = (CHIP_INT_MODE_IS_BC(bp) ? + IGU_BC_BASE_DSB_PROD + dsb_idx : + IGU_NORM_BASE_DSB_PROD + dsb_idx); + + for (i = 0; i < (num_segs * E1HVN_MAX); + i += E1HVN_MAX) { + addr = IGU_REG_PROD_CONS_MEMORY + + (prod_offset + i)*4; + REG_WR(bp, addr, 0); + } + /* send consumer update with 0 */ + if (CHIP_INT_MODE_IS_BC(bp)) { + bnx2x_ack_sb(bp, bp->igu_dsb_id, + USTORM_ID, 0, IGU_INT_NOP, 1); + bnx2x_ack_sb(bp, bp->igu_dsb_id, + CSTORM_ID, 0, IGU_INT_NOP, 1); + bnx2x_ack_sb(bp, bp->igu_dsb_id, + XSTORM_ID, 0, IGU_INT_NOP, 1); + bnx2x_ack_sb(bp, bp->igu_dsb_id, + TSTORM_ID, 0, IGU_INT_NOP, 1); + bnx2x_ack_sb(bp, bp->igu_dsb_id, + ATTENTION_ID, 0, IGU_INT_NOP, 1); + } else { + bnx2x_ack_sb(bp, bp->igu_dsb_id, + USTORM_ID, 0, IGU_INT_NOP, 1); + bnx2x_ack_sb(bp, bp->igu_dsb_id, + ATTENTION_ID, 0, IGU_INT_NOP, 1); + } + bnx2x_igu_clear_sb(bp, bp->igu_dsb_id); + + /* !!! these should become driver const once + rf-tool supports split-68 const */ + REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); + REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); + REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); + REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); + REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); + REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); + } + } + + /* Reset PCIE errors for debug */ + REG_WR(bp, 0x2114, 0xffffffff); + REG_WR(bp, 0x2120, 0xffffffff); + + if (CHIP_IS_E1x(bp)) { + main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ + main_mem_base = HC_REG_MAIN_MEMORY + + BP_PORT(bp) * (main_mem_size * 4); + main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; + main_mem_width = 8; + + val = REG_RD(bp, main_mem_prty_clr); + if (val) + DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC " + "block during " + "function init (0x%x)!\n", val); + + /* Clear "false" parity errors in MSI-X table */ + for (i = main_mem_base; + i < main_mem_base + main_mem_size * 4; + i += main_mem_width) { + bnx2x_read_dmae(bp, i, main_mem_width / 4); + bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), + i, main_mem_width / 4); + } + /* Clear HC parity attention */ + REG_RD(bp, main_mem_prty_clr); + } + +#ifdef BNX2X_STOP_ON_ERROR + /* Enable STORMs SP logging */ + REG_WR8(bp, BAR_USTRORM_INTMEM + + USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); + REG_WR8(bp, BAR_TSTRORM_INTMEM + + TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); + REG_WR8(bp, BAR_CSTRORM_INTMEM + + CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); + REG_WR8(bp, BAR_XSTRORM_INTMEM + + XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); +#endif + + bnx2x_phy_probe(&bp->link_params); + + return 0; +} + + +void bnx2x_free_mem(struct bnx2x *bp) +{ + /* fastpath */ + bnx2x_free_fp_mem(bp); + /* end of fastpath */ + + BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, + sizeof(struct host_sp_status_block)); + + BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, + bp->fw_stats_data_sz + bp->fw_stats_req_sz); + + BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, + sizeof(struct bnx2x_slowpath)); + + BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping, + bp->context.size); + + bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE); + + BNX2X_FREE(bp->ilt->lines); + +#ifdef BCM_CNIC + if (!CHIP_IS_E1x(bp)) + BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping, + sizeof(struct host_hc_status_block_e2)); + else + BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping, + sizeof(struct host_hc_status_block_e1x)); + + BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); +#endif + + BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); + + BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, + BCM_PAGE_SIZE * NUM_EQ_PAGES); +} + +static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) +{ + int num_groups; + + /* number of eth_queues */ + u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp); + + /* Total number of FW statistics requests = + * 1 for port stats + 1 for PF stats + num_eth_queues */ + bp->fw_stats_num = 2 + num_queue_stats; + + + /* Request is built from stats_query_header and an array of + * stats_query_cmd_group each of which contains + * STATS_QUERY_CMD_COUNT rules. The real number or requests is + * configured in the stats_query_header. + */ + num_groups = (2 + num_queue_stats) / STATS_QUERY_CMD_COUNT + + (((2 + num_queue_stats) % STATS_QUERY_CMD_COUNT) ? 1 : 0); + + bp->fw_stats_req_sz = sizeof(struct stats_query_header) + + num_groups * sizeof(struct stats_query_cmd_group); + + /* Data for statistics requests + stats_conter + * + * stats_counter holds per-STORM counters that are incremented + * when STORM has finished with the current request. + */ + bp->fw_stats_data_sz = sizeof(struct per_port_stats) + + sizeof(struct per_pf_stats) + + sizeof(struct per_queue_stats) * num_queue_stats + + sizeof(struct stats_counter); + + BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping, + bp->fw_stats_data_sz + bp->fw_stats_req_sz); + + /* Set shortcuts */ + bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats; + bp->fw_stats_req_mapping = bp->fw_stats_mapping; + + bp->fw_stats_data = (struct bnx2x_fw_stats_data *) + ((u8 *)bp->fw_stats + bp->fw_stats_req_sz); + + bp->fw_stats_data_mapping = bp->fw_stats_mapping + + bp->fw_stats_req_sz; + return 0; + +alloc_mem_err: + BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, + bp->fw_stats_data_sz + bp->fw_stats_req_sz); + return -ENOMEM; +} + + +int bnx2x_alloc_mem(struct bnx2x *bp) +{ +#ifdef BCM_CNIC + if (!CHIP_IS_E1x(bp)) + /* size = the status block + ramrod buffers */ + BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping, + sizeof(struct host_hc_status_block_e2)); + else + BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping, + sizeof(struct host_hc_status_block_e1x)); + + /* allocate searcher T2 table */ + BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); +#endif + + + BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping, + sizeof(struct host_sp_status_block)); + + BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, + sizeof(struct bnx2x_slowpath)); + + /* Allocated memory for FW statistics */ + if (bnx2x_alloc_fw_stats_mem(bp)) + goto alloc_mem_err; + + bp->context.size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp); + + BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping, + bp->context.size); + + BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES); + + if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC)) + goto alloc_mem_err; + + /* Slow path ring */ + BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE); + + /* EQ */ + BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping, + BCM_PAGE_SIZE * NUM_EQ_PAGES); + + + /* fastpath */ + /* need to be done at the end, since it's self adjusting to amount + * of memory available for RSS queues + */ + if (bnx2x_alloc_fp_mem(bp)) + goto alloc_mem_err; + return 0; + +alloc_mem_err: + bnx2x_free_mem(bp); + return -ENOMEM; +} + +/* + * Init service functions + */ + +int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, + struct bnx2x_vlan_mac_obj *obj, bool set, + int mac_type, unsigned long *ramrod_flags) +{ + int rc; + struct bnx2x_vlan_mac_ramrod_params ramrod_param; + + memset(&ramrod_param, 0, sizeof(ramrod_param)); + + /* Fill general parameters */ + ramrod_param.vlan_mac_obj = obj; + ramrod_param.ramrod_flags = *ramrod_flags; + + /* Fill a user request section if needed */ + if (!test_bit(RAMROD_CONT, ramrod_flags)) { + memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN); + + __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); + + /* Set the command: ADD or DEL */ + if (set) + ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; + else + ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL; + } + + rc = bnx2x_config_vlan_mac(bp, &ramrod_param); + if (rc < 0) + BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del")); + return rc; +} + +int bnx2x_del_all_macs(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *mac_obj, + int mac_type, bool wait_for_comp) +{ + int rc; + unsigned long ramrod_flags = 0, vlan_mac_flags = 0; + + /* Wait for completion of requested */ + if (wait_for_comp) + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + + /* Set the mac type of addresses we want to clear */ + __set_bit(mac_type, &vlan_mac_flags); + + rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags); + if (rc < 0) + BNX2X_ERR("Failed to delete MACs: %d\n", rc); + + return rc; +} + +int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) +{ + unsigned long ramrod_flags = 0; + + DP(NETIF_MSG_IFUP, "Adding Eth MAC\n"); + + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + /* Eth MAC is set on RSS leading client (fp[0]) */ + return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->fp->mac_obj, set, + BNX2X_ETH_MAC, &ramrod_flags); +} + +int bnx2x_setup_leading(struct bnx2x *bp) +{ + return bnx2x_setup_queue(bp, &bp->fp[0], 1); +} + +/** + * bnx2x_set_int_mode - configure interrupt mode + * + * @bp: driver handle + * + * In case of MSI-X it will also try to enable MSI-X. + */ +static void __devinit bnx2x_set_int_mode(struct bnx2x *bp) +{ + switch (int_mode) { + case INT_MODE_MSI: + bnx2x_enable_msi(bp); + /* falling through... */ + case INT_MODE_INTx: + bp->num_queues = 1 + NON_ETH_CONTEXT_USE; + DP(NETIF_MSG_IFUP, "set number of queues to 1\n"); + break; + default: + /* Set number of queues according to bp->multi_mode value */ + bnx2x_set_num_queues(bp); + + DP(NETIF_MSG_IFUP, "set number of queues to %d\n", + bp->num_queues); + + /* if we can't use MSI-X we only need one fp, + * so try to enable MSI-X with the requested number of fp's + * and fallback to MSI or legacy INTx with one fp + */ + if (bnx2x_enable_msix(bp)) { + /* failed to enable MSI-X */ + if (bp->multi_mode) + DP(NETIF_MSG_IFUP, + "Multi requested but failed to " + "enable MSI-X (%d), " + "set number of queues to %d\n", + bp->num_queues, + 1 + NON_ETH_CONTEXT_USE); + bp->num_queues = 1 + NON_ETH_CONTEXT_USE; + + /* Try to enable MSI */ + if (!(bp->flags & DISABLE_MSI_FLAG)) + bnx2x_enable_msi(bp); + } + break; + } +} + +/* must be called prioir to any HW initializations */ +static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp) +{ + return L2_ILT_LINES(bp); +} + +void bnx2x_ilt_set_info(struct bnx2x *bp) +{ + struct ilt_client_info *ilt_client; + struct bnx2x_ilt *ilt = BP_ILT(bp); + u16 line = 0; + + ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp)); + DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line); + + /* CDU */ + ilt_client = &ilt->clients[ILT_CLIENT_CDU]; + ilt_client->client_num = ILT_CLIENT_CDU; + ilt_client->page_size = CDU_ILT_PAGE_SZ; + ilt_client->flags = ILT_CLIENT_SKIP_MEM; + ilt_client->start = line; + line += bnx2x_cid_ilt_lines(bp); +#ifdef BCM_CNIC + line += CNIC_ILT_LINES; +#endif + ilt_client->end = line - 1; + + DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, " + "flags 0x%x, hw psz %d\n", + ilt_client->start, + ilt_client->end, + ilt_client->page_size, + ilt_client->flags, + ilog2(ilt_client->page_size >> 12)); + + /* QM */ + if (QM_INIT(bp->qm_cid_count)) { + ilt_client = &ilt->clients[ILT_CLIENT_QM]; + ilt_client->client_num = ILT_CLIENT_QM; + ilt_client->page_size = QM_ILT_PAGE_SZ; + ilt_client->flags = 0; + ilt_client->start = line; + + /* 4 bytes for each cid */ + line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4, + QM_ILT_PAGE_SZ); + + ilt_client->end = line - 1; + + DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, " + "flags 0x%x, hw psz %d\n", + ilt_client->start, + ilt_client->end, + ilt_client->page_size, + ilt_client->flags, + ilog2(ilt_client->page_size >> 12)); + + } + /* SRC */ + ilt_client = &ilt->clients[ILT_CLIENT_SRC]; +#ifdef BCM_CNIC + ilt_client->client_num = ILT_CLIENT_SRC; + ilt_client->page_size = SRC_ILT_PAGE_SZ; + ilt_client->flags = 0; + ilt_client->start = line; + line += SRC_ILT_LINES; + ilt_client->end = line - 1; + + DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, " + "flags 0x%x, hw psz %d\n", + ilt_client->start, + ilt_client->end, + ilt_client->page_size, + ilt_client->flags, + ilog2(ilt_client->page_size >> 12)); + +#else + ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM); +#endif + + /* TM */ + ilt_client = &ilt->clients[ILT_CLIENT_TM]; +#ifdef BCM_CNIC + ilt_client->client_num = ILT_CLIENT_TM; + ilt_client->page_size = TM_ILT_PAGE_SZ; + ilt_client->flags = 0; + ilt_client->start = line; + line += TM_ILT_LINES; + ilt_client->end = line - 1; + + DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, " + "flags 0x%x, hw psz %d\n", + ilt_client->start, + ilt_client->end, + ilt_client->page_size, + ilt_client->flags, + ilog2(ilt_client->page_size >> 12)); + +#else + ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM); +#endif + BUG_ON(line > ILT_MAX_LINES); +} + +/** + * bnx2x_pf_q_prep_init - prepare INIT transition parameters + * + * @bp: driver handle + * @fp: pointer to fastpath + * @init_params: pointer to parameters structure + * + * parameters configured: + * - HC configuration + * - Queue's CDU context + */ +static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp, + struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params) +{ + + u8 cos; + /* FCoE Queue uses Default SB, thus has no HC capabilities */ + if (!IS_FCOE_FP(fp)) { + __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); + __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags); + + /* If HC is supporterd, enable host coalescing in the transition + * to INIT state. + */ + __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags); + __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags); + + /* HC rate */ + init_params->rx.hc_rate = bp->rx_ticks ? + (1000000 / bp->rx_ticks) : 0; + init_params->tx.hc_rate = bp->tx_ticks ? + (1000000 / bp->tx_ticks) : 0; + + /* FW SB ID */ + init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = + fp->fw_sb_id; + + /* + * CQ index among the SB indices: FCoE clients uses the default + * SB, therefore it's different. + */ + init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; + init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; + } + + /* set maximum number of COSs supported by this queue */ + init_params->max_cos = fp->max_cos; + + DP(BNX2X_MSG_SP, "fp: %d setting queue params max cos to: %d", + fp->index, init_params->max_cos); + + /* set the context pointers queue object */ + for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) + init_params->cxts[cos] = + &bp->context.vcxt[fp->txdata[cos].cid].eth; +} + +int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, + struct bnx2x_queue_state_params *q_params, + struct bnx2x_queue_setup_tx_only_params *tx_only_params, + int tx_index, bool leading) +{ + memset(tx_only_params, 0, sizeof(*tx_only_params)); + + /* Set the command */ + q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; + + /* Set tx-only QUEUE flags: don't zero statistics */ + tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false); + + /* choose the index of the cid to send the slow path on */ + tx_only_params->cid_index = tx_index; + + /* Set general TX_ONLY_SETUP parameters */ + bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index); + + /* Set Tx TX_ONLY_SETUP parameters */ + bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index); + + DP(BNX2X_MSG_SP, "preparing to send tx-only ramrod for connection:" + "cos %d, primary cid %d, cid %d, " + "client id %d, sp-client id %d, flags %lx", + tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX], + q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id, + tx_only_params->gen_params.spcl_id, tx_only_params->flags); + + /* send the ramrod */ + return bnx2x_queue_state_change(bp, q_params); +} + + +/** + * bnx2x_setup_queue - setup queue + * + * @bp: driver handle + * @fp: pointer to fastpath + * @leading: is leading + * + * This function performs 2 steps in a Queue state machine + * actually: 1) RESET->INIT 2) INIT->SETUP + */ + +int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, + bool leading) +{ + struct bnx2x_queue_state_params q_params = {0}; + struct bnx2x_queue_setup_params *setup_params = + &q_params.params.setup; + struct bnx2x_queue_setup_tx_only_params *tx_only_params = + &q_params.params.tx_only; + int rc; + u8 tx_index; + + DP(BNX2X_MSG_SP, "setting up queue %d", fp->index); + + /* reset IGU state skip FCoE L2 queue */ + if (!IS_FCOE_FP(fp)) + bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, + IGU_INT_ENABLE, 0); + + q_params.q_obj = &fp->q_obj; + /* We want to wait for completion in this context */ + __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); + + /* Prepare the INIT parameters */ + bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init); + + /* Set the command */ + q_params.cmd = BNX2X_Q_CMD_INIT; + + /* Change the state to INIT */ + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Queue(%d) INIT failed\n", fp->index); + return rc; + } + + DP(BNX2X_MSG_SP, "init complete"); + + + /* Now move the Queue to the SETUP state... */ + memset(setup_params, 0, sizeof(*setup_params)); + + /* Set QUEUE flags */ + setup_params->flags = bnx2x_get_q_flags(bp, fp, leading); + + /* Set general SETUP parameters */ + bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params, + FIRST_TX_COS_INDEX); + + bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params, + &setup_params->rxq_params); + + bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params, + FIRST_TX_COS_INDEX); + + /* Set the command */ + q_params.cmd = BNX2X_Q_CMD_SETUP; + + /* Change the state to SETUP */ + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index); + return rc; + } + + /* loop through the relevant tx-only indices */ + for (tx_index = FIRST_TX_ONLY_COS_INDEX; + tx_index < fp->max_cos; + tx_index++) { + + /* prepare and send tx-only ramrod*/ + rc = bnx2x_setup_tx_only(bp, fp, &q_params, + tx_only_params, tx_index, leading); + if (rc) { + BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n", + fp->index, tx_index); + return rc; + } + } + + return rc; +} + +static int bnx2x_stop_queue(struct bnx2x *bp, int index) +{ + struct bnx2x_fastpath *fp = &bp->fp[index]; + struct bnx2x_fp_txdata *txdata; + struct bnx2x_queue_state_params q_params = {0}; + int rc, tx_index; + + DP(BNX2X_MSG_SP, "stopping queue %d cid %d", index, fp->cid); + + q_params.q_obj = &fp->q_obj; + /* We want to wait for completion in this context */ + __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); + + + /* close tx-only connections */ + for (tx_index = FIRST_TX_ONLY_COS_INDEX; + tx_index < fp->max_cos; + tx_index++){ + + /* ascertain this is a normal queue*/ + txdata = &fp->txdata[tx_index]; + + DP(BNX2X_MSG_SP, "stopping tx-only queue %d", + txdata->txq_index); + + /* send halt terminate on tx-only connection */ + q_params.cmd = BNX2X_Q_CMD_TERMINATE; + memset(&q_params.params.terminate, 0, + sizeof(q_params.params.terminate)); + q_params.params.terminate.cid_index = tx_index; + + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) + return rc; + + /* send halt terminate on tx-only connection */ + q_params.cmd = BNX2X_Q_CMD_CFC_DEL; + memset(&q_params.params.cfc_del, 0, + sizeof(q_params.params.cfc_del)); + q_params.params.cfc_del.cid_index = tx_index; + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) + return rc; + } + /* Stop the primary connection: */ + /* ...halt the connection */ + q_params.cmd = BNX2X_Q_CMD_HALT; + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) + return rc; + + /* ...terminate the connection */ + q_params.cmd = BNX2X_Q_CMD_TERMINATE; + memset(&q_params.params.terminate, 0, + sizeof(q_params.params.terminate)); + q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) + return rc; + /* ...delete cfc entry */ + q_params.cmd = BNX2X_Q_CMD_CFC_DEL; + memset(&q_params.params.cfc_del, 0, + sizeof(q_params.params.cfc_del)); + q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; + return bnx2x_queue_state_change(bp, &q_params); +} + + +static void bnx2x_reset_func(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + int func = BP_FUNC(bp); + int i; + + /* Disable the function in the FW */ + REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); + REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); + REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); + REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); + + /* FP SBs */ + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + REG_WR8(bp, BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), + SB_DISABLED); + } + +#ifdef BCM_CNIC + /* CNIC SB */ + REG_WR8(bp, BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(bnx2x_cnic_fw_sb_id(bp)), + SB_DISABLED); +#endif + /* SP SB */ + REG_WR8(bp, BAR_CSTRORM_INTMEM + + CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), + SB_DISABLED); + + for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) + REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), + 0); + + /* Configure IGU */ + if (bp->common.int_block == INT_BLOCK_HC) { + REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); + REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); + } else { + REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); + REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); + } + +#ifdef BCM_CNIC + /* Disable Timer scan */ + REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); + /* + * Wait for at least 10ms and up to 2 second for the timers scan to + * complete + */ + for (i = 0; i < 200; i++) { + msleep(10); + if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4)) + break; + } +#endif + /* Clear ILT */ + bnx2x_clear_func_ilt(bp, func); + + /* Timers workaround bug for E2: if this is vnic-3, + * we need to set the entire ilt range for this timers. + */ + if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) { + struct ilt_client_info ilt_cli; + /* use dummy TM client */ + memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); + ilt_cli.start = 0; + ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; + ilt_cli.client_num = ILT_CLIENT_TM; + + bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR); + } + + /* this assumes that reset_port() called before reset_func()*/ + if (!CHIP_IS_E1x(bp)) + bnx2x_pf_disable(bp); + + bp->dmae_ready = 0; +} + +static void bnx2x_reset_port(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + u32 val; + + /* Reset physical Link */ + bnx2x__link_reset(bp); + + REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); + + /* Do not rcv packets to BRB */ + REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); + /* Do not direct rcv packets that are not for MCP to the BRB */ + REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : + NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); + + /* Configure AEU */ + REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); + + msleep(100); + /* Check for BRB port occupancy */ + val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); + if (val) + DP(NETIF_MSG_IFDOWN, + "BRB1 is not empty %d blocks are occupied\n", val); + + /* TODO: Close Doorbell port? */ +} + +static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code) +{ + struct bnx2x_func_state_params func_params = {0}; + + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_HW_RESET; + + func_params.params.hw_init.load_phase = load_code; + + return bnx2x_func_state_change(bp, &func_params); +} + +static inline int bnx2x_func_stop(struct bnx2x *bp) +{ + struct bnx2x_func_state_params func_params = {0}; + int rc; + + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_STOP; + + /* + * Try to stop the function the 'good way'. If fails (in case + * of a parity error during bnx2x_chip_cleanup()) and we are + * not in a debug mode, perform a state transaction in order to + * enable further HW_RESET transaction. + */ + rc = bnx2x_func_state_change(bp, &func_params); + if (rc) { +#ifdef BNX2X_STOP_ON_ERROR + return rc; +#else + BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry " + "transaction\n"); + __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); + return bnx2x_func_state_change(bp, &func_params); +#endif + } + + return 0; +} + +/** + * bnx2x_send_unload_req - request unload mode from the MCP. + * + * @bp: driver handle + * @unload_mode: requested function's unload mode + * + * Return unload mode returned by the MCP: COMMON, PORT or FUNC. + */ +u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) +{ + u32 reset_code = 0; + int port = BP_PORT(bp); + + /* Select the UNLOAD request mode */ + if (unload_mode == UNLOAD_NORMAL) + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; + + else if (bp->flags & NO_WOL_FLAG) + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; + + else if (bp->wol) { + u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + u8 *mac_addr = bp->dev->dev_addr; + u32 val; + /* The mac address is written to entries 1-4 to + preserve entry 0 which is used by the PMF */ + u8 entry = (BP_E1HVN(bp) + 1)*8; + + val = (mac_addr[0] << 8) | mac_addr[1]; + EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); + + val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | + (mac_addr[4] << 8) | mac_addr[5]; + EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); + + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; + + } else + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; + + /* Send the request to the MCP */ + if (!BP_NOMCP(bp)) + reset_code = bnx2x_fw_command(bp, reset_code, 0); + else { + int path = BP_PATH(bp); + + DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] " + "%d, %d, %d\n", + path, load_count[path][0], load_count[path][1], + load_count[path][2]); + load_count[path][0]--; + load_count[path][1 + port]--; + DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] " + "%d, %d, %d\n", + path, load_count[path][0], load_count[path][1], + load_count[path][2]); + if (load_count[path][0] == 0) + reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; + else if (load_count[path][1 + port] == 0) + reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; + else + reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; + } + + return reset_code; +} + +/** + * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. + * + * @bp: driver handle + */ +void bnx2x_send_unload_done(struct bnx2x *bp) +{ + /* Report UNLOAD_DONE to MCP */ + if (!BP_NOMCP(bp)) + bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); +} + +static inline int bnx2x_func_wait_started(struct bnx2x *bp) +{ + int tout = 50; + int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; + + if (!bp->port.pmf) + return 0; + + /* + * (assumption: No Attention from MCP at this stage) + * PMF probably in the middle of TXdisable/enable transaction + * 1. Sync IRS for default SB + * 2. Sync SP queue - this guarantes us that attention handling started + * 3. Wait, that TXdisable/enable transaction completes + * + * 1+2 guranty that if DCBx attention was scheduled it already changed + * pending bit of transaction from STARTED-->TX_STOPPED, if we alredy + * received complettion for the transaction the state is TX_STOPPED. + * State will return to STARTED after completion of TX_STOPPED-->STARTED + * transaction. + */ + + /* make sure default SB ISR is done */ + if (msix) + synchronize_irq(bp->msix_table[0].vector); + else + synchronize_irq(bp->pdev->irq); + + flush_workqueue(bnx2x_wq); + + while (bnx2x_func_get_state(bp, &bp->func_obj) != + BNX2X_F_STATE_STARTED && tout--) + msleep(20); + + if (bnx2x_func_get_state(bp, &bp->func_obj) != + BNX2X_F_STATE_STARTED) { +#ifdef BNX2X_STOP_ON_ERROR + return -EBUSY; +#else + /* + * Failed to complete the transaction in a "good way" + * Force both transactions with CLR bit + */ + struct bnx2x_func_state_params func_params = {0}; + + DP(BNX2X_MSG_SP, "Hmmm... unexpected function state! " + "Forcing STARTED-->TX_ST0PPED-->STARTED\n"); + + func_params.f_obj = &bp->func_obj; + __set_bit(RAMROD_DRV_CLR_ONLY, + &func_params.ramrod_flags); + + /* STARTED-->TX_ST0PPED */ + func_params.cmd = BNX2X_F_CMD_TX_STOP; + bnx2x_func_state_change(bp, &func_params); + + /* TX_ST0PPED-->STARTED */ + func_params.cmd = BNX2X_F_CMD_TX_START; + return bnx2x_func_state_change(bp, &func_params); +#endif + } + + return 0; +} + +void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) +{ + int port = BP_PORT(bp); + int i, rc = 0; + u8 cos; + struct bnx2x_mcast_ramrod_params rparam = {0}; + u32 reset_code; + + /* Wait until tx fastpath tasks complete */ + for_each_tx_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + + for_each_cos_in_tx_queue(fp, cos) + rc = bnx2x_clean_tx_queue(bp, &fp->txdata[cos]); +#ifdef BNX2X_STOP_ON_ERROR + if (rc) + return; +#endif + } + + /* Give HW time to discard old tx messages */ + usleep_range(1000, 1000); + + /* Clean all ETH MACs */ + rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_ETH_MAC, false); + if (rc < 0) + BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc); + + /* Clean up UC list */ + rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC, + true); + if (rc < 0) + BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: " + "%d\n", rc); + + /* Disable LLH */ + if (!CHIP_IS_E1(bp)) + REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); + + /* Set "drop all" (stop Rx). + * We need to take a netif_addr_lock() here in order to prevent + * a race between the completion code and this code. + */ + netif_addr_lock_bh(bp->dev); + /* Schedule the rx_mode command */ + if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) + set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); + else + bnx2x_set_storm_rx_mode(bp); + + /* Cleanup multicast configuration */ + rparam.mcast_obj = &bp->mcast_obj; + rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); + if (rc < 0) + BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc); + + netif_addr_unlock_bh(bp->dev); + + + + /* + * Send the UNLOAD_REQUEST to the MCP. This will return if + * this function should perform FUNC, PORT or COMMON HW + * reset. + */ + reset_code = bnx2x_send_unload_req(bp, unload_mode); + + /* + * (assumption: No Attention from MCP at this stage) + * PMF probably in the middle of TXdisable/enable transaction + */ + rc = bnx2x_func_wait_started(bp); + if (rc) { + BNX2X_ERR("bnx2x_func_wait_started failed\n"); +#ifdef BNX2X_STOP_ON_ERROR + return; +#endif + } + + /* Close multi and leading connections + * Completions for ramrods are collected in a synchronous way + */ + for_each_queue(bp, i) + if (bnx2x_stop_queue(bp, i)) +#ifdef BNX2X_STOP_ON_ERROR + return; +#else + goto unload_error; +#endif + /* If SP settings didn't get completed so far - something + * very wrong has happen. + */ + if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) + BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n"); + +#ifndef BNX2X_STOP_ON_ERROR +unload_error: +#endif + rc = bnx2x_func_stop(bp); + if (rc) { + BNX2X_ERR("Function stop failed!\n"); +#ifdef BNX2X_STOP_ON_ERROR + return; +#endif + } + + /* Disable HW interrupts, NAPI */ + bnx2x_netif_stop(bp, 1); + + /* Release IRQs */ + bnx2x_free_irq(bp); + + /* Reset the chip */ + rc = bnx2x_reset_hw(bp, reset_code); + if (rc) + BNX2X_ERR("HW_RESET failed\n"); + + + /* Report UNLOAD_DONE to MCP */ + bnx2x_send_unload_done(bp); +} + +void bnx2x_disable_close_the_gate(struct bnx2x *bp) +{ + u32 val; + + DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n"); + + if (CHIP_IS_E1(bp)) { + int port = BP_PORT(bp); + u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : + MISC_REG_AEU_MASK_ATTN_FUNC_0; + + val = REG_RD(bp, addr); + val &= ~(0x300); + REG_WR(bp, addr, val); + } else { + val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK); + val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | + MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); + REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val); + } +} + +/* Close gates #2, #3 and #4: */ +static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) +{ + u32 val; + + /* Gates #2 and #4a are closed/opened for "not E1" only */ + if (!CHIP_IS_E1(bp)) { + /* #4 */ + REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close); + /* #2 */ + REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); + } + + /* #3 */ + if (CHIP_IS_E1x(bp)) { + /* Prevent interrupts from HC on both ports */ + val = REG_RD(bp, HC_REG_CONFIG_1); + REG_WR(bp, HC_REG_CONFIG_1, + (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) : + (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1)); + + val = REG_RD(bp, HC_REG_CONFIG_0); + REG_WR(bp, HC_REG_CONFIG_0, + (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : + (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); + } else { + /* Prevent incomming interrupts in IGU */ + val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); + + REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, + (!close) ? + (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) : + (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); + } + + DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n", + close ? "closing" : "opening"); + mmiowb(); +} + +#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */ + +static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val) +{ + /* Do some magic... */ + u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); + *magic_val = val & SHARED_MF_CLP_MAGIC; + MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); +} + +/** + * bnx2x_clp_reset_done - restore the value of the `magic' bit. + * + * @bp: driver handle + * @magic_val: old value of the `magic' bit. + */ +static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val) +{ + /* Restore the `magic' bit value... */ + u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); + MF_CFG_WR(bp, shared_mf_config.clp_mb, + (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); +} + +/** + * bnx2x_reset_mcp_prep - prepare for MCP reset. + * + * @bp: driver handle + * @magic_val: old value of 'magic' bit. + * + * Takes care of CLP configurations. + */ +static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val) +{ + u32 shmem; + u32 validity_offset; + + DP(NETIF_MSG_HW, "Starting\n"); + + /* Set `magic' bit in order to save MF config */ + if (!CHIP_IS_E1(bp)) + bnx2x_clp_reset_prep(bp, magic_val); + + /* Get shmem offset */ + shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); + validity_offset = offsetof(struct shmem_region, validity_map[0]); + + /* Clear validity map flags */ + if (shmem > 0) + REG_WR(bp, shmem + validity_offset, 0); +} + +#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ +#define MCP_ONE_TIMEOUT 100 /* 100 ms */ + +/** + * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT + * + * @bp: driver handle + */ +static inline void bnx2x_mcp_wait_one(struct bnx2x *bp) +{ + /* special handling for emulation and FPGA, + wait 10 times longer */ + if (CHIP_REV_IS_SLOW(bp)) + msleep(MCP_ONE_TIMEOUT*10); + else + msleep(MCP_ONE_TIMEOUT); +} + +/* + * initializes bp->common.shmem_base and waits for validity signature to appear + */ +static int bnx2x_init_shmem(struct bnx2x *bp) +{ + int cnt = 0; + u32 val = 0; + + do { + bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); + if (bp->common.shmem_base) { + val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); + if (val & SHR_MEM_VALIDITY_MB) + return 0; + } + + bnx2x_mcp_wait_one(bp); + + } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); + + BNX2X_ERR("BAD MCP validity signature\n"); + + return -ENODEV; +} + +static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val) +{ + int rc = bnx2x_init_shmem(bp); + + /* Restore the `magic' bit value */ + if (!CHIP_IS_E1(bp)) + bnx2x_clp_reset_done(bp, magic_val); + + return rc; +} + +static void bnx2x_pxp_prep(struct bnx2x *bp) +{ + if (!CHIP_IS_E1(bp)) { + REG_WR(bp, PXP2_REG_RD_START_INIT, 0); + REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0); + mmiowb(); + } +} + +/* + * Reset the whole chip except for: + * - PCIE core + * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by + * one reset bit) + * - IGU + * - MISC (including AEU) + * - GRC + * - RBCN, RBCP + */ +static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global) +{ + u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; + u32 global_bits2, stay_reset2; + + /* + * Bits that have to be set in reset_mask2 if we want to reset 'global' + * (per chip) blocks. + */ + global_bits2 = + MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | + MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; + + /* Don't reset the following blocks */ + not_reset_mask1 = + MISC_REGISTERS_RESET_REG_1_RST_HC | + MISC_REGISTERS_RESET_REG_1_RST_PXPV | + MISC_REGISTERS_RESET_REG_1_RST_PXP; + + not_reset_mask2 = + MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | + MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | + MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | + MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | + MISC_REGISTERS_RESET_REG_2_RST_RBCN | + MISC_REGISTERS_RESET_REG_2_RST_GRC | + MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | + MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | + MISC_REGISTERS_RESET_REG_2_RST_ATC | + MISC_REGISTERS_RESET_REG_2_PGLC; + + /* + * Keep the following blocks in reset: + * - all xxMACs are handled by the bnx2x_link code. + */ + stay_reset2 = + MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | + MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | + MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | + MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | + MISC_REGISTERS_RESET_REG_2_UMAC0 | + MISC_REGISTERS_RESET_REG_2_UMAC1 | + MISC_REGISTERS_RESET_REG_2_XMAC | + MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; + + /* Full reset masks according to the chip */ + reset_mask1 = 0xffffffff; + + if (CHIP_IS_E1(bp)) + reset_mask2 = 0xffff; + else if (CHIP_IS_E1H(bp)) + reset_mask2 = 0x1ffff; + else if (CHIP_IS_E2(bp)) + reset_mask2 = 0xfffff; + else /* CHIP_IS_E3 */ + reset_mask2 = 0x3ffffff; + + /* Don't reset global blocks unless we need to */ + if (!global) + reset_mask2 &= ~global_bits2; + + /* + * In case of attention in the QM, we need to reset PXP + * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM + * because otherwise QM reset would release 'close the gates' shortly + * before resetting the PXP, then the PSWRQ would send a write + * request to PGLUE. Then when PXP is reset, PGLUE would try to + * read the payload data from PSWWR, but PSWWR would not + * respond. The write queue in PGLUE would stuck, dmae commands + * would not return. Therefore it's important to reset the second + * reset register (containing the + * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the + * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM + * bit). + */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + reset_mask2 & (~not_reset_mask2)); + + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, + reset_mask1 & (~not_reset_mask1)); + + barrier(); + mmiowb(); + + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + reset_mask2 & (~stay_reset2)); + + barrier(); + mmiowb(); + + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); + mmiowb(); +} + +/** + * bnx2x_er_poll_igu_vq - poll for pending writes bit. + * It should get cleared in no more than 1s. + * + * @bp: driver handle + * + * It should get cleared in no more than 1s. Returns 0 if + * pending writes bit gets cleared. + */ +static int bnx2x_er_poll_igu_vq(struct bnx2x *bp) +{ + u32 cnt = 1000; + u32 pend_bits = 0; + + do { + pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS); + + if (pend_bits == 0) + break; + + usleep_range(1000, 1000); + } while (cnt-- > 0); + + if (cnt <= 0) { + BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n", + pend_bits); + return -EBUSY; + } + + return 0; +} + +static int bnx2x_process_kill(struct bnx2x *bp, bool global) +{ + int cnt = 1000; + u32 val = 0; + u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; + + + /* Empty the Tetris buffer, wait for 1s */ + do { + sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT); + blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT); + port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0); + port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1); + pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2); + if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && + ((port_is_idle_0 & 0x1) == 0x1) && + ((port_is_idle_1 & 0x1) == 0x1) && + (pgl_exp_rom2 == 0xffffffff)) + break; + usleep_range(1000, 1000); + } while (cnt-- > 0); + + if (cnt <= 0) { + DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there" + " are still" + " outstanding read requests after 1s!\n"); + DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x," + " port_is_idle_0=0x%08x," + " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", + sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, + pgl_exp_rom2); + return -EAGAIN; + } + + barrier(); + + /* Close gates #2, #3 and #4 */ + bnx2x_set_234_gates(bp, true); + + /* Poll for IGU VQs for 57712 and newer chips */ + if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp)) + return -EAGAIN; + + + /* TBD: Indicate that "process kill" is in progress to MCP */ + + /* Clear "unprepared" bit */ + REG_WR(bp, MISC_REG_UNPREPARED, 0); + barrier(); + + /* Make sure all is written to the chip before the reset */ + mmiowb(); + + /* Wait for 1ms to empty GLUE and PCI-E core queues, + * PSWHST, GRC and PSWRD Tetris buffer. + */ + usleep_range(1000, 1000); + + /* Prepare to chip reset: */ + /* MCP */ + if (global) + bnx2x_reset_mcp_prep(bp, &val); + + /* PXP */ + bnx2x_pxp_prep(bp); + barrier(); + + /* reset the chip */ + bnx2x_process_kill_chip_reset(bp, global); + barrier(); + + /* Recover after reset: */ + /* MCP */ + if (global && bnx2x_reset_mcp_comp(bp, val)) + return -EAGAIN; + + /* TBD: Add resetting the NO_MCP mode DB here */ + + /* PXP */ + bnx2x_pxp_prep(bp); + + /* Open the gates #2, #3 and #4 */ + bnx2x_set_234_gates(bp, false); + + /* TBD: IGU/AEU preparation bring back the AEU/IGU to a + * reset state, re-enable attentions. */ + + return 0; +} + +int bnx2x_leader_reset(struct bnx2x *bp) +{ + int rc = 0; + bool global = bnx2x_reset_is_global(bp); + + /* Try to recover after the failure */ + if (bnx2x_process_kill(bp, global)) { + netdev_err(bp->dev, "Something bad had happen on engine %d! " + "Aii!\n", BP_PATH(bp)); + rc = -EAGAIN; + goto exit_leader_reset; + } + + /* + * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver + * state. + */ + bnx2x_set_reset_done(bp); + if (global) + bnx2x_clear_reset_global(bp); + +exit_leader_reset: + bp->is_leader = 0; + bnx2x_release_leader_lock(bp); + smp_mb(); + return rc; +} + +static inline void bnx2x_recovery_failed(struct bnx2x *bp) +{ + netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n"); + + /* Disconnect this device */ + netif_device_detach(bp->dev); + + /* + * Block ifup for all function on this engine until "process kill" + * or power cycle. + */ + bnx2x_set_reset_in_progress(bp); + + /* Shut down the power */ + bnx2x_set_power_state(bp, PCI_D3hot); + + bp->recovery_state = BNX2X_RECOVERY_FAILED; + + smp_mb(); +} + +/* + * Assumption: runs under rtnl lock. This together with the fact + * that it's called only from bnx2x_sp_rtnl() ensure that it + * will never be called when netif_running(bp->dev) is false. + */ +static void bnx2x_parity_recover(struct bnx2x *bp) +{ + bool global = false; + + DP(NETIF_MSG_HW, "Handling parity\n"); + while (1) { + switch (bp->recovery_state) { + case BNX2X_RECOVERY_INIT: + DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n"); + bnx2x_chk_parity_attn(bp, &global, false); + + /* Try to get a LEADER_LOCK HW lock */ + if (bnx2x_trylock_leader_lock(bp)) { + bnx2x_set_reset_in_progress(bp); + /* + * Check if there is a global attention and if + * there was a global attention, set the global + * reset bit. + */ + + if (global) + bnx2x_set_reset_global(bp); + + bp->is_leader = 1; + } + + /* Stop the driver */ + /* If interface has been removed - break */ + if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY)) + return; + + bp->recovery_state = BNX2X_RECOVERY_WAIT; + + /* + * Reset MCP command sequence number and MCP mail box + * sequence as we are going to reset the MCP. + */ + if (global) { + bp->fw_seq = 0; + bp->fw_drv_pulse_wr_seq = 0; + } + + /* Ensure "is_leader", MCP command sequence and + * "recovery_state" update values are seen on other + * CPUs. + */ + smp_mb(); + break; + + case BNX2X_RECOVERY_WAIT: + DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n"); + if (bp->is_leader) { + int other_engine = BP_PATH(bp) ? 0 : 1; + u32 other_load_counter = + bnx2x_get_load_cnt(bp, other_engine); + u32 load_counter = + bnx2x_get_load_cnt(bp, BP_PATH(bp)); + global = bnx2x_reset_is_global(bp); + + /* + * In case of a parity in a global block, let + * the first leader that performs a + * leader_reset() reset the global blocks in + * order to clear global attentions. Otherwise + * the the gates will remain closed for that + * engine. + */ + if (load_counter || + (global && other_load_counter)) { + /* Wait until all other functions get + * down. + */ + schedule_delayed_work(&bp->sp_rtnl_task, + HZ/10); + return; + } else { + /* If all other functions got down - + * try to bring the chip back to + * normal. In any case it's an exit + * point for a leader. + */ + if (bnx2x_leader_reset(bp)) { + bnx2x_recovery_failed(bp); + return; + } + + /* If we are here, means that the + * leader has succeeded and doesn't + * want to be a leader any more. Try + * to continue as a none-leader. + */ + break; + } + } else { /* non-leader */ + if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) { + /* Try to get a LEADER_LOCK HW lock as + * long as a former leader may have + * been unloaded by the user or + * released a leadership by another + * reason. + */ + if (bnx2x_trylock_leader_lock(bp)) { + /* I'm a leader now! Restart a + * switch case. + */ + bp->is_leader = 1; + break; + } + + schedule_delayed_work(&bp->sp_rtnl_task, + HZ/10); + return; + + } else { + /* + * If there was a global attention, wait + * for it to be cleared. + */ + if (bnx2x_reset_is_global(bp)) { + schedule_delayed_work( + &bp->sp_rtnl_task, + HZ/10); + return; + } + + if (bnx2x_nic_load(bp, LOAD_NORMAL)) + bnx2x_recovery_failed(bp); + else { + bp->recovery_state = + BNX2X_RECOVERY_DONE; + smp_mb(); + } + + return; + } + } + default: + return; + } + } +} + +/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is + * scheduled on a general queue in order to prevent a dead lock. + */ +static void bnx2x_sp_rtnl_task(struct work_struct *work) +{ + struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work); + + rtnl_lock(); + + if (!netif_running(bp->dev)) + goto sp_rtnl_exit; + + /* if stop on error is defined no recovery flows should be executed */ +#ifdef BNX2X_STOP_ON_ERROR + BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined " + "so reset not done to allow debug dump,\n" + "you will need to reboot when done\n"); + goto sp_rtnl_not_reset; +#endif + + if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) { + /* + * Clear all pending SP commands as we are going to reset the + * function anyway. + */ + bp->sp_rtnl_state = 0; + smp_mb(); + + bnx2x_parity_recover(bp); + + goto sp_rtnl_exit; + } + + if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) { + /* + * Clear all pending SP commands as we are going to reset the + * function anyway. + */ + bp->sp_rtnl_state = 0; + smp_mb(); + + bnx2x_nic_unload(bp, UNLOAD_NORMAL); + bnx2x_nic_load(bp, LOAD_NORMAL); + + goto sp_rtnl_exit; + } +#ifdef BNX2X_STOP_ON_ERROR +sp_rtnl_not_reset: +#endif + if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) + bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos); + +sp_rtnl_exit: + rtnl_unlock(); +} + +/* end of nic load/unload */ + +static void bnx2x_period_task(struct work_struct *work) +{ + struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work); + + if (!netif_running(bp->dev)) + goto period_task_exit; + + if (CHIP_REV_IS_SLOW(bp)) { + BNX2X_ERR("period task called on emulation, ignoring\n"); + goto period_task_exit; + } + + bnx2x_acquire_phy_lock(bp); + /* + * The barrier is needed to ensure the ordering between the writing to + * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and + * the reading here. + */ + smp_mb(); + if (bp->port.pmf) { + bnx2x_period_func(&bp->link_params, &bp->link_vars); + + /* Re-queue task in 1 sec */ + queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ); + } + + bnx2x_release_phy_lock(bp); +period_task_exit: + return; +} + +/* + * Init service functions + */ + +static u32 bnx2x_get_pretend_reg(struct bnx2x *bp) +{ + u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0; + u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base; + return base + (BP_ABS_FUNC(bp)) * stride; +} + +static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp) +{ + u32 reg = bnx2x_get_pretend_reg(bp); + + /* Flush all outstanding writes */ + mmiowb(); + + /* Pretend to be function 0 */ + REG_WR(bp, reg, 0); + REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */ + + /* From now we are in the "like-E1" mode */ + bnx2x_int_disable(bp); + + /* Flush all outstanding writes */ + mmiowb(); + + /* Restore the original function */ + REG_WR(bp, reg, BP_ABS_FUNC(bp)); + REG_RD(bp, reg); +} + +static inline void bnx2x_undi_int_disable(struct bnx2x *bp) +{ + if (CHIP_IS_E1(bp)) + bnx2x_int_disable(bp); + else + bnx2x_undi_int_disable_e1h(bp); +} + +static void __devinit bnx2x_undi_unload(struct bnx2x *bp) +{ + u32 val; + + /* Check if there is any driver already loaded */ + val = REG_RD(bp, MISC_REG_UNPREPARED); + if (val == 0x1) { + /* Check if it is the UNDI driver + * UNDI driver initializes CID offset for normal bell to 0x7 + */ + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); + val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); + if (val == 0x7) { + u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; + /* save our pf_num */ + int orig_pf_num = bp->pf_num; + int port; + u32 swap_en, swap_val, value; + + /* clear the UNDI indication */ + REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); + + BNX2X_DEV_INFO("UNDI is active! reset device\n"); + + /* try unload UNDI on port 0 */ + bp->pf_num = 0; + bp->fw_seq = + (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK); + reset_code = bnx2x_fw_command(bp, reset_code, 0); + + /* if UNDI is loaded on the other port */ + if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) { + + /* send "DONE" for previous unload */ + bnx2x_fw_command(bp, + DRV_MSG_CODE_UNLOAD_DONE, 0); + + /* unload UNDI on port 1 */ + bp->pf_num = 1; + bp->fw_seq = + (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK); + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; + + bnx2x_fw_command(bp, reset_code, 0); + } + + /* now it's safe to release the lock */ + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); + + bnx2x_undi_int_disable(bp); + port = BP_PORT(bp); + + /* close input traffic and wait for it */ + /* Do not rcv packets to BRB */ + REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_DRV_MASK : + NIG_REG_LLH0_BRB1_DRV_MASK), 0x0); + /* Do not direct rcv packets that are not for MCP to + * the BRB */ + REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : + NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); + /* clear AEU */ + REG_WR(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : + MISC_REG_AEU_MASK_ATTN_FUNC_0), 0); + msleep(10); + + /* save NIG port swap info */ + swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); + swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); + /* reset device */ + REG_WR(bp, + GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, + 0xd3ffffff); + + value = 0x1400; + if (CHIP_IS_E3(bp)) { + value |= MISC_REGISTERS_RESET_REG_2_MSTAT0; + value |= MISC_REGISTERS_RESET_REG_2_MSTAT1; + } + + REG_WR(bp, + GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + value); + + /* take the NIG out of reset and restore swap values */ + REG_WR(bp, + GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, + MISC_REGISTERS_RESET_REG_1_RST_NIG); + REG_WR(bp, NIG_REG_PORT_SWAP, swap_val); + REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en); + + /* send unload done to the MCP */ + bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); + + /* restore our func and fw_seq */ + bp->pf_num = orig_pf_num; + bp->fw_seq = + (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK); + } else + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); + } +} + +static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) +{ + u32 val, val2, val3, val4, id; + u16 pmc; + + /* Get the chip revision id and number. */ + /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ + val = REG_RD(bp, MISC_REG_CHIP_NUM); + id = ((val & 0xffff) << 16); + val = REG_RD(bp, MISC_REG_CHIP_REV); + id |= ((val & 0xf) << 12); + val = REG_RD(bp, MISC_REG_CHIP_METAL); + id |= ((val & 0xff) << 4); + val = REG_RD(bp, MISC_REG_BOND_ID); + id |= (val & 0xf); + bp->common.chip_id = id; + + /* Set doorbell size */ + bp->db_size = (1 << BNX2X_DB_SHIFT); + + if (!CHIP_IS_E1x(bp)) { + val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); + if ((val & 1) == 0) + val = REG_RD(bp, MISC_REG_PORT4MODE_EN); + else + val = (val >> 1) & 1; + BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" : + "2_PORT_MODE"); + bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE : + CHIP_2_PORT_MODE; + + if (CHIP_MODE_IS_4_PORT(bp)) + bp->pfid = (bp->pf_num >> 1); /* 0..3 */ + else + bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */ + } else { + bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */ + bp->pfid = bp->pf_num; /* 0..7 */ + } + + bp->link_params.chip_id = bp->common.chip_id; + BNX2X_DEV_INFO("chip ID is 0x%x\n", id); + + val = (REG_RD(bp, 0x2874) & 0x55); + if ((bp->common.chip_id & 0x1) || + (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) { + bp->flags |= ONE_PORT_FLAG; + BNX2X_DEV_INFO("single port device\n"); + } + + val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4); + bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE << + (val & MCPR_NVM_CFG4_FLASH_SIZE)); + BNX2X_DEV_INFO("flash_size 0x%x (%d)\n", + bp->common.flash_size, bp->common.flash_size); + + bnx2x_init_shmem(bp); + + + + bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ? + MISC_REG_GENERIC_CR_1 : + MISC_REG_GENERIC_CR_0)); + + bp->link_params.shmem_base = bp->common.shmem_base; + bp->link_params.shmem2_base = bp->common.shmem2_base; + BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n", + bp->common.shmem_base, bp->common.shmem2_base); + + if (!bp->common.shmem_base) { + BNX2X_DEV_INFO("MCP not active\n"); + bp->flags |= NO_MCP_FLAG; + return; + } + + bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); + BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config); + + bp->link_params.hw_led_mode = ((bp->common.hw_config & + SHARED_HW_CFG_LED_MODE_MASK) >> + SHARED_HW_CFG_LED_MODE_SHIFT); + + bp->link_params.feature_config_flags = 0; + val = SHMEM_RD(bp, dev_info.shared_feature_config.config); + if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) + bp->link_params.feature_config_flags |= + FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; + else + bp->link_params.feature_config_flags &= + ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; + + val = SHMEM_RD(bp, dev_info.bc_rev) >> 8; + bp->common.bc_ver = val; + BNX2X_DEV_INFO("bc_ver %X\n", val); + if (val < BNX2X_BC_VER) { + /* for now only warn + * later we might need to enforce this */ + BNX2X_ERR("This driver needs bc_ver %X but found %X, " + "please upgrade BC\n", BNX2X_BC_VER, val); + } + bp->link_params.feature_config_flags |= + (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ? + FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0; + + bp->link_params.feature_config_flags |= + (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ? + FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0; + + bp->link_params.feature_config_flags |= + (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ? + FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0; + + pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc); + bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; + + BNX2X_DEV_INFO("%sWoL capable\n", + (bp->flags & NO_WOL_FLAG) ? "not " : ""); + + val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num); + val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]); + val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]); + val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]); + + dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n", + val, val2, val3, val4); +} + +#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) +#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) + +static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) +{ + int pfid = BP_FUNC(bp); + int vn = BP_E1HVN(bp); + int igu_sb_id; + u32 val; + u8 fid, igu_sb_cnt = 0; + + bp->igu_base_sb = 0xff; + if (CHIP_INT_MODE_IS_BC(bp)) { + igu_sb_cnt = bp->igu_sb_cnt; + bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * + FP_SB_MAX_E1x; + + bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x + + (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn); + + return; + } + + /* IGU in normal mode - read CAM */ + for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; + igu_sb_id++) { + val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); + if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) + continue; + fid = IGU_FID(val); + if ((fid & IGU_FID_ENCODE_IS_PF)) { + if ((fid & IGU_FID_PF_NUM_MASK) != pfid) + continue; + if (IGU_VEC(val) == 0) + /* default status block */ + bp->igu_dsb_id = igu_sb_id; + else { + if (bp->igu_base_sb == 0xff) + bp->igu_base_sb = igu_sb_id; + igu_sb_cnt++; + } + } + } + +#ifdef CONFIG_PCI_MSI + /* + * It's expected that number of CAM entries for this functions is equal + * to the number evaluated based on the MSI-X table size. We want a + * harsh warning if these values are different! + */ + WARN_ON(bp->igu_sb_cnt != igu_sb_cnt); +#endif + + if (igu_sb_cnt == 0) + BNX2X_ERR("CAM configuration error\n"); +} + +static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp, + u32 switch_cfg) +{ + int cfg_size = 0, idx, port = BP_PORT(bp); + + /* Aggregation of supported attributes of all external phys */ + bp->port.supported[0] = 0; + bp->port.supported[1] = 0; + switch (bp->link_params.num_phys) { + case 1: + bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported; + cfg_size = 1; + break; + case 2: + bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported; + cfg_size = 1; + break; + case 3: + if (bp->link_params.multi_phy_config & + PORT_HW_CFG_PHY_SWAPPED_ENABLED) { + bp->port.supported[1] = + bp->link_params.phy[EXT_PHY1].supported; + bp->port.supported[0] = + bp->link_params.phy[EXT_PHY2].supported; + } else { + bp->port.supported[0] = + bp->link_params.phy[EXT_PHY1].supported; + bp->port.supported[1] = + bp->link_params.phy[EXT_PHY2].supported; + } + cfg_size = 2; + break; + } + + if (!(bp->port.supported[0] || bp->port.supported[1])) { + BNX2X_ERR("NVRAM config error. BAD phy config." + "PHY1 config 0x%x, PHY2 config 0x%x\n", + SHMEM_RD(bp, + dev_info.port_hw_config[port].external_phy_config), + SHMEM_RD(bp, + dev_info.port_hw_config[port].external_phy_config2)); + return; + } + + if (CHIP_IS_E3(bp)) + bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR); + else { + switch (switch_cfg) { + case SWITCH_CFG_1G: + bp->port.phy_addr = REG_RD( + bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); + break; + case SWITCH_CFG_10G: + bp->port.phy_addr = REG_RD( + bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); + break; + default: + BNX2X_ERR("BAD switch_cfg link_config 0x%x\n", + bp->port.link_config[0]); + return; + } + } + BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); + /* mask what we support according to speed_cap_mask per configuration */ + for (idx = 0; idx < cfg_size; idx++) { + if (!(bp->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) + bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half; + + if (!(bp->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) + bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full; + + if (!(bp->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) + bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half; + + if (!(bp->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) + bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full; + + if (!(bp->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) + bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full); + + if (!(bp->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) + bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full; + + if (!(bp->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) + bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full; + + } + + BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0], + bp->port.supported[1]); +} + +static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) +{ + u32 link_config, idx, cfg_size = 0; + bp->port.advertising[0] = 0; + bp->port.advertising[1] = 0; + switch (bp->link_params.num_phys) { + case 1: + case 2: + cfg_size = 1; + break; + case 3: + cfg_size = 2; + break; + } + for (idx = 0; idx < cfg_size; idx++) { + bp->link_params.req_duplex[idx] = DUPLEX_FULL; + link_config = bp->port.link_config[idx]; + switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { + case PORT_FEATURE_LINK_SPEED_AUTO: + if (bp->port.supported[idx] & SUPPORTED_Autoneg) { + bp->link_params.req_line_speed[idx] = + SPEED_AUTO_NEG; + bp->port.advertising[idx] |= + bp->port.supported[idx]; + } else { + /* force 10G, no AN */ + bp->link_params.req_line_speed[idx] = + SPEED_10000; + bp->port.advertising[idx] |= + (ADVERTISED_10000baseT_Full | + ADVERTISED_FIBRE); + continue; + } + break; + + case PORT_FEATURE_LINK_SPEED_10M_FULL: + if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) { + bp->link_params.req_line_speed[idx] = + SPEED_10; + bp->port.advertising[idx] |= + (ADVERTISED_10baseT_Full | + ADVERTISED_TP); + } else { + BNX2X_ERR("NVRAM config error. " + "Invalid link_config 0x%x" + " speed_cap_mask 0x%x\n", + link_config, + bp->link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_10M_HALF: + if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) { + bp->link_params.req_line_speed[idx] = + SPEED_10; + bp->link_params.req_duplex[idx] = + DUPLEX_HALF; + bp->port.advertising[idx] |= + (ADVERTISED_10baseT_Half | + ADVERTISED_TP); + } else { + BNX2X_ERR("NVRAM config error. " + "Invalid link_config 0x%x" + " speed_cap_mask 0x%x\n", + link_config, + bp->link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_100M_FULL: + if (bp->port.supported[idx] & + SUPPORTED_100baseT_Full) { + bp->link_params.req_line_speed[idx] = + SPEED_100; + bp->port.advertising[idx] |= + (ADVERTISED_100baseT_Full | + ADVERTISED_TP); + } else { + BNX2X_ERR("NVRAM config error. " + "Invalid link_config 0x%x" + " speed_cap_mask 0x%x\n", + link_config, + bp->link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_100M_HALF: + if (bp->port.supported[idx] & + SUPPORTED_100baseT_Half) { + bp->link_params.req_line_speed[idx] = + SPEED_100; + bp->link_params.req_duplex[idx] = + DUPLEX_HALF; + bp->port.advertising[idx] |= + (ADVERTISED_100baseT_Half | + ADVERTISED_TP); + } else { + BNX2X_ERR("NVRAM config error. " + "Invalid link_config 0x%x" + " speed_cap_mask 0x%x\n", + link_config, + bp->link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_1G: + if (bp->port.supported[idx] & + SUPPORTED_1000baseT_Full) { + bp->link_params.req_line_speed[idx] = + SPEED_1000; + bp->port.advertising[idx] |= + (ADVERTISED_1000baseT_Full | + ADVERTISED_TP); + } else { + BNX2X_ERR("NVRAM config error. " + "Invalid link_config 0x%x" + " speed_cap_mask 0x%x\n", + link_config, + bp->link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_2_5G: + if (bp->port.supported[idx] & + SUPPORTED_2500baseX_Full) { + bp->link_params.req_line_speed[idx] = + SPEED_2500; + bp->port.advertising[idx] |= + (ADVERTISED_2500baseX_Full | + ADVERTISED_TP); + } else { + BNX2X_ERR("NVRAM config error. " + "Invalid link_config 0x%x" + " speed_cap_mask 0x%x\n", + link_config, + bp->link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_10G_CX4: + if (bp->port.supported[idx] & + SUPPORTED_10000baseT_Full) { + bp->link_params.req_line_speed[idx] = + SPEED_10000; + bp->port.advertising[idx] |= + (ADVERTISED_10000baseT_Full | + ADVERTISED_FIBRE); + } else { + BNX2X_ERR("NVRAM config error. " + "Invalid link_config 0x%x" + " speed_cap_mask 0x%x\n", + link_config, + bp->link_params.speed_cap_mask[idx]); + return; + } + break; + case PORT_FEATURE_LINK_SPEED_20G: + bp->link_params.req_line_speed[idx] = SPEED_20000; + + break; + default: + BNX2X_ERR("NVRAM config error. " + "BAD link speed link_config 0x%x\n", + link_config); + bp->link_params.req_line_speed[idx] = + SPEED_AUTO_NEG; + bp->port.advertising[idx] = + bp->port.supported[idx]; + break; + } + + bp->link_params.req_flow_ctrl[idx] = (link_config & + PORT_FEATURE_FLOW_CONTROL_MASK); + if ((bp->link_params.req_flow_ctrl[idx] == + BNX2X_FLOW_CTRL_AUTO) && + !(bp->port.supported[idx] & SUPPORTED_Autoneg)) { + bp->link_params.req_flow_ctrl[idx] = + BNX2X_FLOW_CTRL_NONE; + } + + BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl" + " 0x%x advertising 0x%x\n", + bp->link_params.req_line_speed[idx], + bp->link_params.req_duplex[idx], + bp->link_params.req_flow_ctrl[idx], + bp->port.advertising[idx]); + } +} + +static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi) +{ + mac_hi = cpu_to_be16(mac_hi); + mac_lo = cpu_to_be32(mac_lo); + memcpy(mac_buf, &mac_hi, sizeof(mac_hi)); + memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo)); +} + +static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + u32 config; + u32 ext_phy_type, ext_phy_config; + + bp->link_params.bp = bp; + bp->link_params.port = port; + + bp->link_params.lane_config = + SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config); + + bp->link_params.speed_cap_mask[0] = + SHMEM_RD(bp, + dev_info.port_hw_config[port].speed_capability_mask); + bp->link_params.speed_cap_mask[1] = + SHMEM_RD(bp, + dev_info.port_hw_config[port].speed_capability_mask2); + bp->port.link_config[0] = + SHMEM_RD(bp, dev_info.port_feature_config[port].link_config); + + bp->port.link_config[1] = + SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2); + + bp->link_params.multi_phy_config = + SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config); + /* If the device is capable of WoL, set the default state according + * to the HW + */ + config = SHMEM_RD(bp, dev_info.port_feature_config[port].config); + bp->wol = (!(bp->flags & NO_WOL_FLAG) && + (config & PORT_FEATURE_WOL_ENABLED)); + + BNX2X_DEV_INFO("lane_config 0x%08x " + "speed_cap_mask0 0x%08x link_config0 0x%08x\n", + bp->link_params.lane_config, + bp->link_params.speed_cap_mask[0], + bp->port.link_config[0]); + + bp->link_params.switch_cfg = (bp->port.link_config[0] & + PORT_FEATURE_CONNECTED_SWITCH_MASK); + bnx2x_phy_probe(&bp->link_params); + bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg); + + bnx2x_link_settings_requested(bp); + + /* + * If connected directly, work with the internal PHY, otherwise, work + * with the external PHY + */ + ext_phy_config = + SHMEM_RD(bp, + dev_info.port_hw_config[port].external_phy_config); + ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config); + if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) + bp->mdio.prtad = bp->port.phy_addr; + + else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) && + (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) + bp->mdio.prtad = + XGXS_EXT_PHY_ADDR(ext_phy_config); + + /* + * Check if hw lock is required to access MDC/MDIO bus to the PHY(s) + * In MF mode, it is set to cover self test cases + */ + if (IS_MF(bp)) + bp->port.need_hw_lock = 1; + else + bp->port.need_hw_lock = bnx2x_hw_lock_required(bp, + bp->common.shmem_base, + bp->common.shmem2_base); +} + +#ifdef BCM_CNIC +static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + int func = BP_ABS_FUNC(bp); + + u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, + drv_lic_key[port].max_iscsi_conn); + u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, + drv_lic_key[port].max_fcoe_conn); + + /* Get the number of maximum allowed iSCSI and FCoE connections */ + bp->cnic_eth_dev.max_iscsi_conn = + (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >> + BNX2X_MAX_ISCSI_INIT_CONN_SHIFT; + + bp->cnic_eth_dev.max_fcoe_conn = + (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >> + BNX2X_MAX_FCOE_INIT_CONN_SHIFT; + + /* Read the WWN: */ + if (!IS_MF(bp)) { + /* Port info */ + bp->cnic_eth_dev.fcoe_wwn_port_name_hi = + SHMEM_RD(bp, + dev_info.port_hw_config[port]. + fcoe_wwn_port_name_upper); + bp->cnic_eth_dev.fcoe_wwn_port_name_lo = + SHMEM_RD(bp, + dev_info.port_hw_config[port]. + fcoe_wwn_port_name_lower); + + /* Node info */ + bp->cnic_eth_dev.fcoe_wwn_node_name_hi = + SHMEM_RD(bp, + dev_info.port_hw_config[port]. + fcoe_wwn_node_name_upper); + bp->cnic_eth_dev.fcoe_wwn_node_name_lo = + SHMEM_RD(bp, + dev_info.port_hw_config[port]. + fcoe_wwn_node_name_lower); + } else if (!IS_MF_SD(bp)) { + u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); + + /* + * Read the WWN info only if the FCoE feature is enabled for + * this function. + */ + if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { + /* Port info */ + bp->cnic_eth_dev.fcoe_wwn_port_name_hi = + MF_CFG_RD(bp, func_ext_config[func]. + fcoe_wwn_port_name_upper); + bp->cnic_eth_dev.fcoe_wwn_port_name_lo = + MF_CFG_RD(bp, func_ext_config[func]. + fcoe_wwn_port_name_lower); + + /* Node info */ + bp->cnic_eth_dev.fcoe_wwn_node_name_hi = + MF_CFG_RD(bp, func_ext_config[func]. + fcoe_wwn_node_name_upper); + bp->cnic_eth_dev.fcoe_wwn_node_name_lo = + MF_CFG_RD(bp, func_ext_config[func]. + fcoe_wwn_node_name_lower); + } + } + + BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n", + bp->cnic_eth_dev.max_iscsi_conn, + bp->cnic_eth_dev.max_fcoe_conn); + + /* + * If maximum allowed number of connections is zero - + * disable the feature. + */ + if (!bp->cnic_eth_dev.max_iscsi_conn) + bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; + + if (!bp->cnic_eth_dev.max_fcoe_conn) + bp->flags |= NO_FCOE_FLAG; +} +#endif + +static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) +{ + u32 val, val2; + int func = BP_ABS_FUNC(bp); + int port = BP_PORT(bp); +#ifdef BCM_CNIC + u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac; + u8 *fip_mac = bp->fip_mac; +#endif + + /* Zero primary MAC configuration */ + memset(bp->dev->dev_addr, 0, ETH_ALEN); + + if (BP_NOMCP(bp)) { + BNX2X_ERROR("warning: random MAC workaround active\n"); + random_ether_addr(bp->dev->dev_addr); + } else if (IS_MF(bp)) { + val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper); + val = MF_CFG_RD(bp, func_mf_config[func].mac_lower); + if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) && + (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) + bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); + +#ifdef BCM_CNIC + /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or + * FCoE MAC then the appropriate feature should be disabled. + */ + if (IS_MF_SI(bp)) { + u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); + if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { + val2 = MF_CFG_RD(bp, func_ext_config[func]. + iscsi_mac_addr_upper); + val = MF_CFG_RD(bp, func_ext_config[func]. + iscsi_mac_addr_lower); + bnx2x_set_mac_buf(iscsi_mac, val, val2); + BNX2X_DEV_INFO("Read iSCSI MAC: " + BNX2X_MAC_FMT"\n", + BNX2X_MAC_PRN_LIST(iscsi_mac)); + } else + bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; + + if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { + val2 = MF_CFG_RD(bp, func_ext_config[func]. + fcoe_mac_addr_upper); + val = MF_CFG_RD(bp, func_ext_config[func]. + fcoe_mac_addr_lower); + bnx2x_set_mac_buf(fip_mac, val, val2); + BNX2X_DEV_INFO("Read FCoE L2 MAC to " + BNX2X_MAC_FMT"\n", + BNX2X_MAC_PRN_LIST(fip_mac)); + + } else + bp->flags |= NO_FCOE_FLAG; + } +#endif + } else { + /* in SF read MACs from port configuration */ + val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); + val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); + bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); + +#ifdef BCM_CNIC + val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. + iscsi_mac_upper); + val = SHMEM_RD(bp, dev_info.port_hw_config[port]. + iscsi_mac_lower); + bnx2x_set_mac_buf(iscsi_mac, val, val2); + + val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. + fcoe_fip_mac_upper); + val = SHMEM_RD(bp, dev_info.port_hw_config[port]. + fcoe_fip_mac_lower); + bnx2x_set_mac_buf(fip_mac, val, val2); +#endif + } + + memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); + memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); + +#ifdef BCM_CNIC + /* Set the FCoE MAC in MF_SD mode */ + if (!CHIP_IS_E1x(bp) && IS_MF_SD(bp)) + memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN); + + /* Disable iSCSI if MAC configuration is + * invalid. + */ + if (!is_valid_ether_addr(iscsi_mac)) { + bp->flags |= NO_ISCSI_FLAG; + memset(iscsi_mac, 0, ETH_ALEN); + } + + /* Disable FCoE if MAC configuration is + * invalid. + */ + if (!is_valid_ether_addr(fip_mac)) { + bp->flags |= NO_FCOE_FLAG; + memset(bp->fip_mac, 0, ETH_ALEN); + } +#endif + + if (!is_valid_ether_addr(bp->dev->dev_addr)) + dev_err(&bp->pdev->dev, + "bad Ethernet MAC address configuration: " + BNX2X_MAC_FMT", change it manually before bringing up " + "the appropriate network interface\n", + BNX2X_MAC_PRN_LIST(bp->dev->dev_addr)); +} + +static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) +{ + int /*abs*/func = BP_ABS_FUNC(bp); + int vn; + u32 val = 0; + int rc = 0; + + bnx2x_get_common_hwinfo(bp); + + /* + * initialize IGU parameters + */ + if (CHIP_IS_E1x(bp)) { + bp->common.int_block = INT_BLOCK_HC; + + bp->igu_dsb_id = DEF_SB_IGU_ID; + bp->igu_base_sb = 0; + } else { + bp->common.int_block = INT_BLOCK_IGU; + val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); + + if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { + int tout = 5000; + + BNX2X_DEV_INFO("FORCING Normal Mode\n"); + + val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); + REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val); + REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f); + + while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) { + tout--; + usleep_range(1000, 1000); + } + + if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) { + dev_err(&bp->pdev->dev, + "FORCING Normal Mode failed!!!\n"); + return -EPERM; + } + } + + if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { + BNX2X_DEV_INFO("IGU Backward Compatible Mode\n"); + bp->common.int_block |= INT_BLOCK_MODE_BW_COMP; + } else + BNX2X_DEV_INFO("IGU Normal Mode\n"); + + bnx2x_get_igu_cam_info(bp); + + } + + /* + * set base FW non-default (fast path) status block id, this value is + * used to initialize the fw_sb_id saved on the fp/queue structure to + * determine the id used by the FW. + */ + if (CHIP_IS_E1x(bp)) + bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp); + else /* + * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of + * the same queue are indicated on the same IGU SB). So we prefer + * FW and IGU SBs to be the same value. + */ + bp->base_fw_ndsb = bp->igu_base_sb; + + BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n" + "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb, + bp->igu_sb_cnt, bp->base_fw_ndsb); + + /* + * Initialize MF configuration + */ + + bp->mf_ov = 0; + bp->mf_mode = 0; + vn = BP_E1HVN(bp); + + if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { + BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", + bp->common.shmem2_base, SHMEM2_RD(bp, size), + (u32)offsetof(struct shmem2_region, mf_cfg_addr)); + + if (SHMEM2_HAS(bp, mf_cfg_addr)) + bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr); + else + bp->common.mf_cfg_base = bp->common.shmem_base + + offsetof(struct shmem_region, func_mb) + + E1H_FUNC_MAX * sizeof(struct drv_func_mb); + /* + * get mf configuration: + * 1. existence of MF configuration + * 2. MAC address must be legal (check only upper bytes) + * for Switch-Independent mode; + * OVLAN must be legal for Switch-Dependent mode + * 3. SF_MODE configures specific MF mode + */ + if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { + /* get mf configuration */ + val = SHMEM_RD(bp, + dev_info.shared_feature_config.config); + val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK; + + switch (val) { + case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: + val = MF_CFG_RD(bp, func_mf_config[func]. + mac_upper); + /* check for legal mac (upper bytes)*/ + if (val != 0xffff) { + bp->mf_mode = MULTI_FUNCTION_SI; + bp->mf_config[vn] = MF_CFG_RD(bp, + func_mf_config[func].config); + } else + BNX2X_DEV_INFO("illegal MAC address " + "for SI\n"); + break; + case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: + /* get OV configuration */ + val = MF_CFG_RD(bp, + func_mf_config[FUNC_0].e1hov_tag); + val &= FUNC_MF_CFG_E1HOV_TAG_MASK; + + if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { + bp->mf_mode = MULTI_FUNCTION_SD; + bp->mf_config[vn] = MF_CFG_RD(bp, + func_mf_config[func].config); + } else + BNX2X_DEV_INFO("illegal OV for SD\n"); + break; + default: + /* Unknown configuration: reset mf_config */ + bp->mf_config[vn] = 0; + BNX2X_DEV_INFO("unkown MF mode 0x%x\n", val); + } + } + + BNX2X_DEV_INFO("%s function mode\n", + IS_MF(bp) ? "multi" : "single"); + + switch (bp->mf_mode) { + case MULTI_FUNCTION_SD: + val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & + FUNC_MF_CFG_E1HOV_TAG_MASK; + if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { + bp->mf_ov = val; + bp->path_has_ovlan = true; + + BNX2X_DEV_INFO("MF OV for func %d is %d " + "(0x%04x)\n", func, bp->mf_ov, + bp->mf_ov); + } else { + dev_err(&bp->pdev->dev, + "No valid MF OV for func %d, " + "aborting\n", func); + return -EPERM; + } + break; + case MULTI_FUNCTION_SI: + BNX2X_DEV_INFO("func %d is in MF " + "switch-independent mode\n", func); + break; + default: + if (vn) { + dev_err(&bp->pdev->dev, + "VN %d is in a single function mode, " + "aborting\n", vn); + return -EPERM; + } + break; + } + + /* check if other port on the path needs ovlan: + * Since MF configuration is shared between ports + * Possible mixed modes are only + * {SF, SI} {SF, SD} {SD, SF} {SI, SF} + */ + if (CHIP_MODE_IS_4_PORT(bp) && + !bp->path_has_ovlan && + !IS_MF(bp) && + bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { + u8 other_port = !BP_PORT(bp); + u8 other_func = BP_PATH(bp) + 2*other_port; + val = MF_CFG_RD(bp, + func_mf_config[other_func].e1hov_tag); + if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) + bp->path_has_ovlan = true; + } + } + + /* adjust igu_sb_cnt to MF for E1x */ + if (CHIP_IS_E1x(bp) && IS_MF(bp)) + bp->igu_sb_cnt /= E1HVN_MAX; + + /* port info */ + bnx2x_get_port_hwinfo(bp); + + if (!BP_NOMCP(bp)) { + bp->fw_seq = + (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK); + BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); + } + + /* Get MAC addresses */ + bnx2x_get_mac_hwinfo(bp); + +#ifdef BCM_CNIC + bnx2x_get_cnic_info(bp); +#endif + + /* Get current FW pulse sequence */ + if (!BP_NOMCP(bp)) { + int mb_idx = BP_FW_MB_IDX(bp); + + bp->fw_drv_pulse_wr_seq = + (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) & + DRV_PULSE_SEQ_MASK); + BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); + } + + return rc; +} + +static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp) +{ + int cnt, i, block_end, rodi; + char vpd_data[BNX2X_VPD_LEN+1]; + char str_id_reg[VENDOR_ID_LEN+1]; + char str_id_cap[VENDOR_ID_LEN+1]; + u8 len; + + cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data); + memset(bp->fw_ver, 0, sizeof(bp->fw_ver)); + + if (cnt < BNX2X_VPD_LEN) + goto out_not_found; + + i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN, + PCI_VPD_LRDT_RO_DATA); + if (i < 0) + goto out_not_found; + + + block_end = i + PCI_VPD_LRDT_TAG_SIZE + + pci_vpd_lrdt_size(&vpd_data[i]); + + i += PCI_VPD_LRDT_TAG_SIZE; + + if (block_end > BNX2X_VPD_LEN) + goto out_not_found; + + rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, + PCI_VPD_RO_KEYWORD_MFR_ID); + if (rodi < 0) + goto out_not_found; + + len = pci_vpd_info_field_size(&vpd_data[rodi]); + + if (len != VENDOR_ID_LEN) + goto out_not_found; + + rodi += PCI_VPD_INFO_FLD_HDR_SIZE; + + /* vendor specific info */ + snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL); + snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL); + if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) || + !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) { + + rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, + PCI_VPD_RO_KEYWORD_VENDOR0); + if (rodi >= 0) { + len = pci_vpd_info_field_size(&vpd_data[rodi]); + + rodi += PCI_VPD_INFO_FLD_HDR_SIZE; + + if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) { + memcpy(bp->fw_ver, &vpd_data[rodi], len); + bp->fw_ver[len] = ' '; + } + } + return; + } +out_not_found: + return; +} + +static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp) +{ + u32 flags = 0; + + if (CHIP_REV_IS_FPGA(bp)) + SET_FLAGS(flags, MODE_FPGA); + else if (CHIP_REV_IS_EMUL(bp)) + SET_FLAGS(flags, MODE_EMUL); + else + SET_FLAGS(flags, MODE_ASIC); + + if (CHIP_MODE_IS_4_PORT(bp)) + SET_FLAGS(flags, MODE_PORT4); + else + SET_FLAGS(flags, MODE_PORT2); + + if (CHIP_IS_E2(bp)) + SET_FLAGS(flags, MODE_E2); + else if (CHIP_IS_E3(bp)) { + SET_FLAGS(flags, MODE_E3); + if (CHIP_REV(bp) == CHIP_REV_Ax) + SET_FLAGS(flags, MODE_E3_A0); + else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/ + SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3); + } + + if (IS_MF(bp)) { + SET_FLAGS(flags, MODE_MF); + switch (bp->mf_mode) { + case MULTI_FUNCTION_SD: + SET_FLAGS(flags, MODE_MF_SD); + break; + case MULTI_FUNCTION_SI: + SET_FLAGS(flags, MODE_MF_SI); + break; + } + } else + SET_FLAGS(flags, MODE_SF); + +#if defined(__LITTLE_ENDIAN) + SET_FLAGS(flags, MODE_LITTLE_ENDIAN); +#else /*(__BIG_ENDIAN)*/ + SET_FLAGS(flags, MODE_BIG_ENDIAN); +#endif + INIT_MODE_FLAGS(bp) = flags; +} + +static int __devinit bnx2x_init_bp(struct bnx2x *bp) +{ + int func; + int timer_interval; + int rc; + + mutex_init(&bp->port.phy_mutex); + mutex_init(&bp->fw_mb_mutex); + spin_lock_init(&bp->stats_lock); +#ifdef BCM_CNIC + mutex_init(&bp->cnic_mutex); +#endif + + INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); + INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); + INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); + rc = bnx2x_get_hwinfo(bp); + if (rc) + return rc; + + bnx2x_set_modes_bitmap(bp); + + rc = bnx2x_alloc_mem_bp(bp); + if (rc) + return rc; + + bnx2x_read_fwinfo(bp); + + func = BP_FUNC(bp); + + /* need to reset chip if undi was active */ + if (!BP_NOMCP(bp)) + bnx2x_undi_unload(bp); + + if (CHIP_REV_IS_FPGA(bp)) + dev_err(&bp->pdev->dev, "FPGA detected\n"); + + if (BP_NOMCP(bp) && (func == 0)) + dev_err(&bp->pdev->dev, "MCP disabled, " + "must load devices in order!\n"); + + bp->multi_mode = multi_mode; + + /* Set TPA flags */ + if (disable_tpa) { + bp->flags &= ~TPA_ENABLE_FLAG; + bp->dev->features &= ~NETIF_F_LRO; + } else { + bp->flags |= TPA_ENABLE_FLAG; + bp->dev->features |= NETIF_F_LRO; + } + bp->disable_tpa = disable_tpa; + + if (CHIP_IS_E1(bp)) + bp->dropless_fc = 0; + else + bp->dropless_fc = dropless_fc; + + bp->mrrs = mrrs; + + bp->tx_ring_size = MAX_TX_AVAIL; + + /* make sure that the numbers are in the right granularity */ + bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR; + bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR; + + timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ); + bp->current_interval = (poll ? poll : timer_interval); + + init_timer(&bp->timer); + bp->timer.expires = jiffies + bp->current_interval; + bp->timer.data = (unsigned long) bp; + bp->timer.function = bnx2x_timer; + + bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON); + bnx2x_dcbx_init_params(bp); + +#ifdef BCM_CNIC + if (CHIP_IS_E1x(bp)) + bp->cnic_base_cl_id = FP_SB_MAX_E1x; + else + bp->cnic_base_cl_id = FP_SB_MAX_E2; +#endif + + /* multiple tx priority */ + if (CHIP_IS_E1x(bp)) + bp->max_cos = BNX2X_MULTI_TX_COS_E1X; + if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) + bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0; + if (CHIP_IS_E3B0(bp)) + bp->max_cos = BNX2X_MULTI_TX_COS_E3B0; + + return rc; +} + + +/**************************************************************************** +* General service functions +****************************************************************************/ + +/* + * net_device service functions + */ + +/* called with rtnl_lock */ +static int bnx2x_open(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + bool global = false; + int other_engine = BP_PATH(bp) ? 0 : 1; + u32 other_load_counter, load_counter; + + netif_carrier_off(dev); + + bnx2x_set_power_state(bp, PCI_D0); + + other_load_counter = bnx2x_get_load_cnt(bp, other_engine); + load_counter = bnx2x_get_load_cnt(bp, BP_PATH(bp)); + + /* + * If parity had happen during the unload, then attentions + * and/or RECOVERY_IN_PROGRES may still be set. In this case we + * want the first function loaded on the current engine to + * complete the recovery. + */ + if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) || + bnx2x_chk_parity_attn(bp, &global, true)) + do { + /* + * If there are attentions and they are in a global + * blocks, set the GLOBAL_RESET bit regardless whether + * it will be this function that will complete the + * recovery or not. + */ + if (global) + bnx2x_set_reset_global(bp); + + /* + * Only the first function on the current engine should + * try to recover in open. In case of attentions in + * global blocks only the first in the chip should try + * to recover. + */ + if ((!load_counter && + (!global || !other_load_counter)) && + bnx2x_trylock_leader_lock(bp) && + !bnx2x_leader_reset(bp)) { + netdev_info(bp->dev, "Recovered in open\n"); + break; + } + + /* recovery has failed... */ + bnx2x_set_power_state(bp, PCI_D3hot); + bp->recovery_state = BNX2X_RECOVERY_FAILED; + + netdev_err(bp->dev, "Recovery flow hasn't been properly" + " completed yet. Try again later. If u still see this" + " message after a few retries then power cycle is" + " required.\n"); + + return -EAGAIN; + } while (0); + + bp->recovery_state = BNX2X_RECOVERY_DONE; + return bnx2x_nic_load(bp, LOAD_OPEN); +} + +/* called with rtnl_lock */ +static int bnx2x_close(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + + /* Unload the driver, release IRQs */ + bnx2x_nic_unload(bp, UNLOAD_CLOSE); + + /* Power off */ + bnx2x_set_power_state(bp, PCI_D3hot); + + return 0; +} + +static inline int bnx2x_init_mcast_macs_list(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p) +{ + int mc_count = netdev_mc_count(bp->dev); + struct bnx2x_mcast_list_elem *mc_mac = + kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC); + struct netdev_hw_addr *ha; + + if (!mc_mac) + return -ENOMEM; + + INIT_LIST_HEAD(&p->mcast_list); + + netdev_for_each_mc_addr(ha, bp->dev) { + mc_mac->mac = bnx2x_mc_addr(ha); + list_add_tail(&mc_mac->link, &p->mcast_list); + mc_mac++; + } + + p->mcast_list_len = mc_count; + + return 0; +} + +static inline void bnx2x_free_mcast_macs_list( + struct bnx2x_mcast_ramrod_params *p) +{ + struct bnx2x_mcast_list_elem *mc_mac = + list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem, + link); + + WARN_ON(!mc_mac); + kfree(mc_mac); +} + +/** + * bnx2x_set_uc_list - configure a new unicast MACs list. + * + * @bp: driver handle + * + * We will use zero (0) as a MAC type for these MACs. + */ +static inline int bnx2x_set_uc_list(struct bnx2x *bp) +{ + int rc; + struct net_device *dev = bp->dev; + struct netdev_hw_addr *ha; + struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; + unsigned long ramrod_flags = 0; + + /* First schedule a cleanup up of old configuration */ + rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false); + if (rc < 0) { + BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc); + return rc; + } + + netdev_for_each_uc_addr(ha, dev) { + rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true, + BNX2X_UC_LIST_MAC, &ramrod_flags); + if (rc < 0) { + BNX2X_ERR("Failed to schedule ADD operations: %d\n", + rc); + return rc; + } + } + + /* Execute the pending commands */ + __set_bit(RAMROD_CONT, &ramrod_flags); + return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */, + BNX2X_UC_LIST_MAC, &ramrod_flags); +} + +static inline int bnx2x_set_mc_list(struct bnx2x *bp) +{ + struct net_device *dev = bp->dev; + struct bnx2x_mcast_ramrod_params rparam = {0}; + int rc = 0; + + rparam.mcast_obj = &bp->mcast_obj; + + /* first, clear all configured multicast MACs */ + rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); + if (rc < 0) { + BNX2X_ERR("Failed to clear multicast " + "configuration: %d\n", rc); + return rc; + } + + /* then, configure a new MACs list */ + if (netdev_mc_count(dev)) { + rc = bnx2x_init_mcast_macs_list(bp, &rparam); + if (rc) { + BNX2X_ERR("Failed to create multicast MACs " + "list: %d\n", rc); + return rc; + } + + /* Now add the new MACs */ + rc = bnx2x_config_mcast(bp, &rparam, + BNX2X_MCAST_CMD_ADD); + if (rc < 0) + BNX2X_ERR("Failed to set a new multicast " + "configuration: %d\n", rc); + + bnx2x_free_mcast_macs_list(&rparam); + } + + return rc; +} + + +/* If bp->state is OPEN, should be called with netif_addr_lock_bh() */ +void bnx2x_set_rx_mode(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + u32 rx_mode = BNX2X_RX_MODE_NORMAL; + + if (bp->state != BNX2X_STATE_OPEN) { + DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); + return; + } + + DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags); + + if (dev->flags & IFF_PROMISC) + rx_mode = BNX2X_RX_MODE_PROMISC; + else if ((dev->flags & IFF_ALLMULTI) || + ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) && + CHIP_IS_E1(bp))) + rx_mode = BNX2X_RX_MODE_ALLMULTI; + else { + /* some multicasts */ + if (bnx2x_set_mc_list(bp) < 0) + rx_mode = BNX2X_RX_MODE_ALLMULTI; + + if (bnx2x_set_uc_list(bp) < 0) + rx_mode = BNX2X_RX_MODE_PROMISC; + } + + bp->rx_mode = rx_mode; + + /* Schedule the rx_mode command */ + if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { + set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); + return; + } + + bnx2x_set_storm_rx_mode(bp); +} + +/* called with rtnl_lock */ +static int bnx2x_mdio_read(struct net_device *netdev, int prtad, + int devad, u16 addr) +{ + struct bnx2x *bp = netdev_priv(netdev); + u16 value; + int rc; + + DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n", + prtad, devad, addr); + + /* The HW expects different devad if CL22 is used */ + devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; + + bnx2x_acquire_phy_lock(bp); + rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value); + bnx2x_release_phy_lock(bp); + DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc); + + if (!rc) + rc = value; + return rc; +} + +/* called with rtnl_lock */ +static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad, + u16 addr, u16 value) +{ + struct bnx2x *bp = netdev_priv(netdev); + int rc; + + DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x," + " value 0x%x\n", prtad, devad, addr, value); + + /* The HW expects different devad if CL22 is used */ + devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; + + bnx2x_acquire_phy_lock(bp); + rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value); + bnx2x_release_phy_lock(bp); + return rc; +} + +/* called with rtnl_lock */ +static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct bnx2x *bp = netdev_priv(dev); + struct mii_ioctl_data *mdio = if_mii(ifr); + + DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n", + mdio->phy_id, mdio->reg_num, mdio->val_in); + + if (!netif_running(dev)) + return -EAGAIN; + + return mdio_mii_ioctl(&bp->mdio, mdio, cmd); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void poll_bnx2x(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + + disable_irq(bp->pdev->irq); + bnx2x_interrupt(bp->pdev->irq, dev); + enable_irq(bp->pdev->irq); +} +#endif + +static const struct net_device_ops bnx2x_netdev_ops = { + .ndo_open = bnx2x_open, + .ndo_stop = bnx2x_close, + .ndo_start_xmit = bnx2x_start_xmit, + .ndo_select_queue = bnx2x_select_queue, + .ndo_set_rx_mode = bnx2x_set_rx_mode, + .ndo_set_mac_address = bnx2x_change_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = bnx2x_ioctl, + .ndo_change_mtu = bnx2x_change_mtu, + .ndo_fix_features = bnx2x_fix_features, + .ndo_set_features = bnx2x_set_features, + .ndo_tx_timeout = bnx2x_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = poll_bnx2x, +#endif + .ndo_setup_tc = bnx2x_setup_tc, + +#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) + .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, +#endif +}; + +static inline int bnx2x_set_coherency_mask(struct bnx2x *bp) +{ + struct device *dev = &bp->pdev->dev; + + if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { + bp->flags |= USING_DAC_FLAG; + if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { + dev_err(dev, "dma_set_coherent_mask failed, " + "aborting\n"); + return -EIO; + } + } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { + dev_err(dev, "System does not support DMA, aborting\n"); + return -EIO; + } + + return 0; +} + +static int __devinit bnx2x_init_dev(struct pci_dev *pdev, + struct net_device *dev, + unsigned long board_type) +{ + struct bnx2x *bp; + int rc; + + SET_NETDEV_DEV(dev, &pdev->dev); + bp = netdev_priv(dev); + + bp->dev = dev; + bp->pdev = pdev; + bp->flags = 0; + bp->pf_num = PCI_FUNC(pdev->devfn); + + rc = pci_enable_device(pdev); + if (rc) { + dev_err(&bp->pdev->dev, + "Cannot enable PCI device, aborting\n"); + goto err_out; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + dev_err(&bp->pdev->dev, + "Cannot find PCI device base address, aborting\n"); + rc = -ENODEV; + goto err_out_disable; + } + + if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { + dev_err(&bp->pdev->dev, "Cannot find second PCI device" + " base address, aborting\n"); + rc = -ENODEV; + goto err_out_disable; + } + + if (atomic_read(&pdev->enable_cnt) == 1) { + rc = pci_request_regions(pdev, DRV_MODULE_NAME); + if (rc) { + dev_err(&bp->pdev->dev, + "Cannot obtain PCI resources, aborting\n"); + goto err_out_disable; + } + + pci_set_master(pdev); + pci_save_state(pdev); + } + + bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); + if (bp->pm_cap == 0) { + dev_err(&bp->pdev->dev, + "Cannot find power management capability, aborting\n"); + rc = -EIO; + goto err_out_release; + } + + if (!pci_is_pcie(pdev)) { + dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n"); + rc = -EIO; + goto err_out_release; + } + + rc = bnx2x_set_coherency_mask(bp); + if (rc) + goto err_out_release; + + dev->mem_start = pci_resource_start(pdev, 0); + dev->base_addr = dev->mem_start; + dev->mem_end = pci_resource_end(pdev, 0); + + dev->irq = pdev->irq; + + bp->regview = pci_ioremap_bar(pdev, 0); + if (!bp->regview) { + dev_err(&bp->pdev->dev, + "Cannot map register space, aborting\n"); + rc = -ENOMEM; + goto err_out_release; + } + + bnx2x_set_power_state(bp, PCI_D0); + + /* clean indirect addresses */ + pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, + PCICFG_VENDOR_ID_OFFSET); + REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0); + REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0); + REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0); + REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0); + + /* + * Enable internal target-read (in case we are probed after PF FLR). + * Must be done prior to any BAR read access. Only for 57712 and up + */ + if (board_type != BCM57710 && + board_type != BCM57711 && + board_type != BCM57711E) + REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); + + /* Reset the load counter */ + bnx2x_clear_load_cnt(bp); + + dev->watchdog_timeo = TX_TIMEOUT; + + dev->netdev_ops = &bnx2x_netdev_ops; + bnx2x_set_ethtool_ops(dev); + + dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | + NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_TX; + + dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; + + dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX; + if (bp->flags & USING_DAC_FLAG) + dev->features |= NETIF_F_HIGHDMA; + + /* Add Loopback capability to the device */ + dev->hw_features |= NETIF_F_LOOPBACK; + +#ifdef BCM_DCBNL + dev->dcbnl_ops = &bnx2x_dcbnl_ops; +#endif + + /* get_port_hwinfo() will set prtad and mmds properly */ + bp->mdio.prtad = MDIO_PRTAD_NONE; + bp->mdio.mmds = 0; + bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; + bp->mdio.dev = dev; + bp->mdio.mdio_read = bnx2x_mdio_read; + bp->mdio.mdio_write = bnx2x_mdio_write; + + return 0; + +err_out_release: + if (atomic_read(&pdev->enable_cnt) == 1) + pci_release_regions(pdev); + +err_out_disable: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + +err_out: + return rc; +} + +static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp, + int *width, int *speed) +{ + u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL); + + *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT; + + /* return value of 1=2.5GHz 2=5GHz */ + *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT; +} + +static int bnx2x_check_firmware(struct bnx2x *bp) +{ + const struct firmware *firmware = bp->firmware; + struct bnx2x_fw_file_hdr *fw_hdr; + struct bnx2x_fw_file_section *sections; + u32 offset, len, num_ops; + u16 *ops_offsets; + int i; + const u8 *fw_ver; + + if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) + return -EINVAL; + + fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data; + sections = (struct bnx2x_fw_file_section *)fw_hdr; + + /* Make sure none of the offsets and sizes make us read beyond + * the end of the firmware data */ + for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) { + offset = be32_to_cpu(sections[i].offset); + len = be32_to_cpu(sections[i].len); + if (offset + len > firmware->size) { + dev_err(&bp->pdev->dev, + "Section %d length is out of bounds\n", i); + return -EINVAL; + } + } + + /* Likewise for the init_ops offsets */ + offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset); + ops_offsets = (u16 *)(firmware->data + offset); + num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op); + + for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) { + if (be16_to_cpu(ops_offsets[i]) > num_ops) { + dev_err(&bp->pdev->dev, + "Section offset %d is out of bounds\n", i); + return -EINVAL; + } + } + + /* Check FW version */ + offset = be32_to_cpu(fw_hdr->fw_version.offset); + fw_ver = firmware->data + offset; + if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) || + (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) || + (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) || + (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) { + dev_err(&bp->pdev->dev, + "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n", + fw_ver[0], fw_ver[1], fw_ver[2], + fw_ver[3], BCM_5710_FW_MAJOR_VERSION, + BCM_5710_FW_MINOR_VERSION, + BCM_5710_FW_REVISION_VERSION, + BCM_5710_FW_ENGINEERING_VERSION); + return -EINVAL; + } + + return 0; +} + +static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n) +{ + const __be32 *source = (const __be32 *)_source; + u32 *target = (u32 *)_target; + u32 i; + + for (i = 0; i < n/4; i++) + target[i] = be32_to_cpu(source[i]); +} + +/* + Ops array is stored in the following format: + {op(8bit), offset(24bit, big endian), data(32bit, big endian)} + */ +static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n) +{ + const __be32 *source = (const __be32 *)_source; + struct raw_op *target = (struct raw_op *)_target; + u32 i, j, tmp; + + for (i = 0, j = 0; i < n/8; i++, j += 2) { + tmp = be32_to_cpu(source[j]); + target[i].op = (tmp >> 24) & 0xff; + target[i].offset = tmp & 0xffffff; + target[i].raw_data = be32_to_cpu(source[j + 1]); + } +} + +/** + * IRO array is stored in the following format: + * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) } + */ +static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n) +{ + const __be32 *source = (const __be32 *)_source; + struct iro *target = (struct iro *)_target; + u32 i, j, tmp; + + for (i = 0, j = 0; i < n/sizeof(struct iro); i++) { + target[i].base = be32_to_cpu(source[j]); + j++; + tmp = be32_to_cpu(source[j]); + target[i].m1 = (tmp >> 16) & 0xffff; + target[i].m2 = tmp & 0xffff; + j++; + tmp = be32_to_cpu(source[j]); + target[i].m3 = (tmp >> 16) & 0xffff; + target[i].size = tmp & 0xffff; + j++; + } +} + +static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n) +{ + const __be16 *source = (const __be16 *)_source; + u16 *target = (u16 *)_target; + u32 i; + + for (i = 0; i < n/2; i++) + target[i] = be16_to_cpu(source[i]); +} + +#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \ +do { \ + u32 len = be32_to_cpu(fw_hdr->arr.len); \ + bp->arr = kmalloc(len, GFP_KERNEL); \ + if (!bp->arr) { \ + pr_err("Failed to allocate %d bytes for "#arr"\n", len); \ + goto lbl; \ + } \ + func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \ + (u8 *)bp->arr, len); \ +} while (0) + +int bnx2x_init_firmware(struct bnx2x *bp) +{ + const char *fw_file_name; + struct bnx2x_fw_file_hdr *fw_hdr; + int rc; + + if (CHIP_IS_E1(bp)) + fw_file_name = FW_FILE_NAME_E1; + else if (CHIP_IS_E1H(bp)) + fw_file_name = FW_FILE_NAME_E1H; + else if (!CHIP_IS_E1x(bp)) + fw_file_name = FW_FILE_NAME_E2; + else { + BNX2X_ERR("Unsupported chip revision\n"); + return -EINVAL; + } + + BNX2X_DEV_INFO("Loading %s\n", fw_file_name); + + rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev); + if (rc) { + BNX2X_ERR("Can't load firmware file %s\n", fw_file_name); + goto request_firmware_exit; + } + + rc = bnx2x_check_firmware(bp); + if (rc) { + BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name); + goto request_firmware_exit; + } + + fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data; + + /* Initialize the pointers to the init arrays */ + /* Blob */ + BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n); + + /* Opcodes */ + BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops); + + /* Offsets */ + BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, + be16_to_cpu_n); + + /* STORMs firmware */ + INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data + + be32_to_cpu(fw_hdr->tsem_int_table_data.offset); + INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data + + be32_to_cpu(fw_hdr->tsem_pram_data.offset); + INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data + + be32_to_cpu(fw_hdr->usem_int_table_data.offset); + INIT_USEM_PRAM_DATA(bp) = bp->firmware->data + + be32_to_cpu(fw_hdr->usem_pram_data.offset); + INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data + + be32_to_cpu(fw_hdr->xsem_int_table_data.offset); + INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data + + be32_to_cpu(fw_hdr->xsem_pram_data.offset); + INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data + + be32_to_cpu(fw_hdr->csem_int_table_data.offset); + INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data + + be32_to_cpu(fw_hdr->csem_pram_data.offset); + /* IRO */ + BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro); + + return 0; + +iro_alloc_err: + kfree(bp->init_ops_offsets); +init_offsets_alloc_err: + kfree(bp->init_ops); +init_ops_alloc_err: + kfree(bp->init_data); +request_firmware_exit: + release_firmware(bp->firmware); + + return rc; +} + +static void bnx2x_release_firmware(struct bnx2x *bp) +{ + kfree(bp->init_ops_offsets); + kfree(bp->init_ops); + kfree(bp->init_data); + release_firmware(bp->firmware); +} + + +static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = { + .init_hw_cmn_chip = bnx2x_init_hw_common_chip, + .init_hw_cmn = bnx2x_init_hw_common, + .init_hw_port = bnx2x_init_hw_port, + .init_hw_func = bnx2x_init_hw_func, + + .reset_hw_cmn = bnx2x_reset_common, + .reset_hw_port = bnx2x_reset_port, + .reset_hw_func = bnx2x_reset_func, + + .gunzip_init = bnx2x_gunzip_init, + .gunzip_end = bnx2x_gunzip_end, + + .init_fw = bnx2x_init_firmware, + .release_fw = bnx2x_release_firmware, +}; + +void bnx2x__init_func_obj(struct bnx2x *bp) +{ + /* Prepare DMAE related driver resources */ + bnx2x_setup_dmae(bp); + + bnx2x_init_func_obj(bp, &bp->func_obj, + bnx2x_sp(bp, func_rdata), + bnx2x_sp_mapping(bp, func_rdata), + &bnx2x_func_sp_drv); +} + +/* must be called after sriov-enable */ +static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp) +{ + int cid_count = BNX2X_L2_CID_COUNT(bp); + +#ifdef BCM_CNIC + cid_count += CNIC_CID_MAX; +#endif + return roundup(cid_count, QM_CID_ROUND); +} + +/** + * bnx2x_get_num_none_def_sbs - return the number of none default SBs + * + * @dev: pci device + * + */ +static inline int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev) +{ + int pos; + u16 control; + + pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); + + /* + * If MSI-X is not supported - return number of SBs needed to support + * one fast path queue: one FP queue + SB for CNIC + */ + if (!pos) + return 1 + CNIC_PRESENT; + + /* + * The value in the PCI configuration space is the index of the last + * entry, namely one less than the actual size of the table, which is + * exactly what we want to return from this function: number of all SBs + * without the default SB. + */ + pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control); + return control & PCI_MSIX_FLAGS_QSIZE; +} + +static int __devinit bnx2x_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *dev = NULL; + struct bnx2x *bp; + int pcie_width, pcie_speed; + int rc, max_non_def_sbs; + int rx_count, tx_count, rss_count; + /* + * An estimated maximum supported CoS number according to the chip + * version. + * We will try to roughly estimate the maximum number of CoSes this chip + * may support in order to minimize the memory allocated for Tx + * netdev_queue's. This number will be accurately calculated during the + * initialization of bp->max_cos based on the chip versions AND chip + * revision in the bnx2x_init_bp(). + */ + u8 max_cos_est = 0; + + switch (ent->driver_data) { + case BCM57710: + case BCM57711: + case BCM57711E: + max_cos_est = BNX2X_MULTI_TX_COS_E1X; + break; + + case BCM57712: + case BCM57712_MF: + max_cos_est = BNX2X_MULTI_TX_COS_E2_E3A0; + break; + + case BCM57800: + case BCM57800_MF: + case BCM57810: + case BCM57810_MF: + case BCM57840: + case BCM57840_MF: + max_cos_est = BNX2X_MULTI_TX_COS_E3B0; + break; + + default: + pr_err("Unknown board_type (%ld), aborting\n", + ent->driver_data); + return -ENODEV; + } + + max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev); + + /* !!! FIXME !!! + * Do not allow the maximum SB count to grow above 16 + * since Special CIDs starts from 16*BNX2X_MULTI_TX_COS=48. + * We will use the FP_SB_MAX_E1x macro for this matter. + */ + max_non_def_sbs = min_t(int, FP_SB_MAX_E1x, max_non_def_sbs); + + WARN_ON(!max_non_def_sbs); + + /* Maximum number of RSS queues: one IGU SB goes to CNIC */ + rss_count = max_non_def_sbs - CNIC_PRESENT; + + /* Maximum number of netdev Rx queues: RSS + FCoE L2 */ + rx_count = rss_count + FCOE_PRESENT; + + /* + * Maximum number of netdev Tx queues: + * Maximum TSS queues * Maximum supported number of CoS + FCoE L2 + */ + tx_count = MAX_TXQS_PER_COS * max_cos_est + FCOE_PRESENT; + + /* dev zeroed in init_etherdev */ + dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count); + if (!dev) { + dev_err(&pdev->dev, "Cannot allocate net device\n"); + return -ENOMEM; + } + + bp = netdev_priv(dev); + + DP(NETIF_MSG_DRV, "Allocated netdev with %d tx and %d rx queues\n", + tx_count, rx_count); + + bp->igu_sb_cnt = max_non_def_sbs; + bp->msg_enable = debug; + pci_set_drvdata(pdev, dev); + + rc = bnx2x_init_dev(pdev, dev, ent->driver_data); + if (rc < 0) { + free_netdev(dev); + return rc; + } + + DP(NETIF_MSG_DRV, "max_non_def_sbs %d", max_non_def_sbs); + + rc = bnx2x_init_bp(bp); + if (rc) + goto init_one_exit; + + /* + * Map doorbels here as we need the real value of bp->max_cos which + * is initialized in bnx2x_init_bp(). + */ + bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), + min_t(u64, BNX2X_DB_SIZE(bp), + pci_resource_len(pdev, 2))); + if (!bp->doorbells) { + dev_err(&bp->pdev->dev, + "Cannot map doorbell space, aborting\n"); + rc = -ENOMEM; + goto init_one_exit; + } + + /* calc qm_cid_count */ + bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); + +#ifdef BCM_CNIC + /* disable FCOE L2 queue for E1x and E3*/ + if (CHIP_IS_E1x(bp) || CHIP_IS_E3(bp)) + bp->flags |= NO_FCOE_FLAG; + +#endif + + /* Configure interrupt mode: try to enable MSI-X/MSI if + * needed, set bp->num_queues appropriately. + */ + bnx2x_set_int_mode(bp); + + /* Add all NAPI objects */ + bnx2x_add_all_napi(bp); + + rc = register_netdev(dev); + if (rc) { + dev_err(&pdev->dev, "Cannot register net device\n"); + goto init_one_exit; + } + +#ifdef BCM_CNIC + if (!NO_FCOE(bp)) { + /* Add storage MAC address */ + rtnl_lock(); + dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); + rtnl_unlock(); + } +#endif + + bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); + + netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx," + " IRQ %d, ", board_info[ent->driver_data].name, + (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), + pcie_width, + ((!CHIP_IS_E2(bp) && pcie_speed == 2) || + (CHIP_IS_E2(bp) && pcie_speed == 1)) ? + "5GHz (Gen2)" : "2.5GHz", + dev->base_addr, bp->pdev->irq); + pr_cont("node addr %pM\n", dev->dev_addr); + + return 0; + +init_one_exit: + if (bp->regview) + iounmap(bp->regview); + + if (bp->doorbells) + iounmap(bp->doorbells); + + free_netdev(dev); + + if (atomic_read(&pdev->enable_cnt) == 1) + pci_release_regions(pdev); + + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + return rc; +} + +static void __devexit bnx2x_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2x *bp; + + if (!dev) { + dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); + return; + } + bp = netdev_priv(dev); + +#ifdef BCM_CNIC + /* Delete storage MAC address */ + if (!NO_FCOE(bp)) { + rtnl_lock(); + dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); + rtnl_unlock(); + } +#endif + +#ifdef BCM_DCBNL + /* Delete app tlvs from dcbnl */ + bnx2x_dcbnl_update_applist(bp, true); +#endif + + unregister_netdev(dev); + + /* Delete all NAPI objects */ + bnx2x_del_all_napi(bp); + + /* Power on: we can't let PCI layer write to us while we are in D3 */ + bnx2x_set_power_state(bp, PCI_D0); + + /* Disable MSI/MSI-X */ + bnx2x_disable_msi(bp); + + /* Power off */ + bnx2x_set_power_state(bp, PCI_D3hot); + + /* Make sure RESET task is not scheduled before continuing */ + cancel_delayed_work_sync(&bp->sp_rtnl_task); + + if (bp->regview) + iounmap(bp->regview); + + if (bp->doorbells) + iounmap(bp->doorbells); + + bnx2x_free_mem_bp(bp); + + free_netdev(dev); + + if (atomic_read(&pdev->enable_cnt) == 1) + pci_release_regions(pdev); + + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +static int bnx2x_eeh_nic_unload(struct bnx2x *bp) +{ + int i; + + bp->state = BNX2X_STATE_ERROR; + + bp->rx_mode = BNX2X_RX_MODE_NONE; + +#ifdef BCM_CNIC + bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); +#endif + /* Stop Tx */ + bnx2x_tx_disable(bp); + + bnx2x_netif_stop(bp, 0); + + del_timer_sync(&bp->timer); + + bnx2x_stats_handle(bp, STATS_EVENT_STOP); + + /* Release IRQs */ + bnx2x_free_irq(bp); + + /* Free SKBs, SGEs, TPA pool and driver internals */ + bnx2x_free_skbs(bp); + + for_each_rx_queue(bp, i) + bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); + + bnx2x_free_mem(bp); + + bp->state = BNX2X_STATE_CLOSED; + + netif_carrier_off(bp->dev); + + return 0; +} + +static void bnx2x_eeh_recover(struct bnx2x *bp) +{ + u32 val; + + mutex_init(&bp->port.phy_mutex); + + bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); + bp->link_params.shmem_base = bp->common.shmem_base; + BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base); + + if (!bp->common.shmem_base || + (bp->common.shmem_base < 0xA0000) || + (bp->common.shmem_base >= 0xC0000)) { + BNX2X_DEV_INFO("MCP not active\n"); + bp->flags |= NO_MCP_FLAG; + return; + } + + val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); + if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) + != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) + BNX2X_ERR("BAD MCP validity signature\n"); + + if (!BP_NOMCP(bp)) { + bp->fw_seq = + (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK); + BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); + } +} + +/** + * bnx2x_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2x *bp = netdev_priv(dev); + + rtnl_lock(); + + netif_device_detach(dev); + + if (state == pci_channel_io_perm_failure) { + rtnl_unlock(); + return PCI_ERS_RESULT_DISCONNECT; + } + + if (netif_running(dev)) + bnx2x_eeh_nic_unload(bp); + + pci_disable_device(pdev); + + rtnl_unlock(); + + /* Request a slot reset */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * bnx2x_io_slot_reset - called after the PCI bus has been reset + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + */ +static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2x *bp = netdev_priv(dev); + + rtnl_lock(); + + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset\n"); + rtnl_unlock(); + return PCI_ERS_RESULT_DISCONNECT; + } + + pci_set_master(pdev); + pci_restore_state(pdev); + + if (netif_running(dev)) + bnx2x_set_power_state(bp, PCI_D0); + + rtnl_unlock(); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * bnx2x_io_resume - called when traffic can start flowing again + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. + */ +static void bnx2x_io_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2x *bp = netdev_priv(dev); + + if (bp->recovery_state != BNX2X_RECOVERY_DONE) { + netdev_err(bp->dev, "Handling parity error recovery. " + "Try again later\n"); + return; + } + + rtnl_lock(); + + bnx2x_eeh_recover(bp); + + if (netif_running(dev)) + bnx2x_nic_load(bp, LOAD_NORMAL); + + netif_device_attach(dev); + + rtnl_unlock(); +} + +static struct pci_error_handlers bnx2x_err_handler = { + .error_detected = bnx2x_io_error_detected, + .slot_reset = bnx2x_io_slot_reset, + .resume = bnx2x_io_resume, +}; + +static struct pci_driver bnx2x_pci_driver = { + .name = DRV_MODULE_NAME, + .id_table = bnx2x_pci_tbl, + .probe = bnx2x_init_one, + .remove = __devexit_p(bnx2x_remove_one), + .suspend = bnx2x_suspend, + .resume = bnx2x_resume, + .err_handler = &bnx2x_err_handler, +}; + +static int __init bnx2x_init(void) +{ + int ret; + + pr_info("%s", version); + + bnx2x_wq = create_singlethread_workqueue("bnx2x"); + if (bnx2x_wq == NULL) { + pr_err("Cannot create workqueue\n"); + return -ENOMEM; + } + + ret = pci_register_driver(&bnx2x_pci_driver); + if (ret) { + pr_err("Cannot register driver\n"); + destroy_workqueue(bnx2x_wq); + } + return ret; +} + +static void __exit bnx2x_cleanup(void) +{ + pci_unregister_driver(&bnx2x_pci_driver); + + destroy_workqueue(bnx2x_wq); +} + +void bnx2x_notify_link_changed(struct bnx2x *bp) +{ + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1); +} + +module_init(bnx2x_init); +module_exit(bnx2x_cleanup); + +#ifdef BCM_CNIC +/** + * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s). + * + * @bp: driver handle + * @set: set or clear the CAM entry + * + * This function will wait until the ramdord completion returns. + * Return 0 if success, -ENODEV if ramrod doesn't return. + */ +static inline int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) +{ + unsigned long ramrod_flags = 0; + + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac, + &bp->iscsi_l2_mac_obj, true, + BNX2X_ISCSI_ETH_MAC, &ramrod_flags); +} + +/* count denotes the number of new completions we have seen */ +static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) +{ + struct eth_spe *spe; + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return; +#endif + + spin_lock_bh(&bp->spq_lock); + BUG_ON(bp->cnic_spq_pending < count); + bp->cnic_spq_pending -= count; + + + for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) { + u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type) + & SPE_HDR_CONN_TYPE) >> + SPE_HDR_CONN_TYPE_SHIFT; + u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data) + >> SPE_HDR_CMD_ID_SHIFT) & 0xff; + + /* Set validation for iSCSI L2 client before sending SETUP + * ramrod + */ + if (type == ETH_CONNECTION_TYPE) { + if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) + bnx2x_set_ctx_validation(bp, &bp->context. + vcxt[BNX2X_ISCSI_ETH_CID].eth, + BNX2X_ISCSI_ETH_CID); + } + + /* + * There may be not more than 8 L2, not more than 8 L5 SPEs + * and in the air. We also check that number of outstanding + * COMMON ramrods is not more than the EQ and SPQ can + * accommodate. + */ + if (type == ETH_CONNECTION_TYPE) { + if (!atomic_read(&bp->cq_spq_left)) + break; + else + atomic_dec(&bp->cq_spq_left); + } else if (type == NONE_CONNECTION_TYPE) { + if (!atomic_read(&bp->eq_spq_left)) + break; + else + atomic_dec(&bp->eq_spq_left); + } else if ((type == ISCSI_CONNECTION_TYPE) || + (type == FCOE_CONNECTION_TYPE)) { + if (bp->cnic_spq_pending >= + bp->cnic_eth_dev.max_kwqe_pending) + break; + else + bp->cnic_spq_pending++; + } else { + BNX2X_ERR("Unknown SPE type: %d\n", type); + bnx2x_panic(); + break; + } + + spe = bnx2x_sp_get_next(bp); + *spe = *bp->cnic_kwq_cons; + + DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n", + bp->cnic_spq_pending, bp->cnic_kwq_pending, count); + + if (bp->cnic_kwq_cons == bp->cnic_kwq_last) + bp->cnic_kwq_cons = bp->cnic_kwq; + else + bp->cnic_kwq_cons++; + } + bnx2x_sp_prod_update(bp); + spin_unlock_bh(&bp->spq_lock); +} + +static int bnx2x_cnic_sp_queue(struct net_device *dev, + struct kwqe_16 *kwqes[], u32 count) +{ + struct bnx2x *bp = netdev_priv(dev); + int i; + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return -EIO; +#endif + + spin_lock_bh(&bp->spq_lock); + + for (i = 0; i < count; i++) { + struct eth_spe *spe = (struct eth_spe *)kwqes[i]; + + if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT) + break; + + *bp->cnic_kwq_prod = *spe; + + bp->cnic_kwq_pending++; + + DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n", + spe->hdr.conn_and_cmd_data, spe->hdr.type, + spe->data.update_data_addr.hi, + spe->data.update_data_addr.lo, + bp->cnic_kwq_pending); + + if (bp->cnic_kwq_prod == bp->cnic_kwq_last) + bp->cnic_kwq_prod = bp->cnic_kwq; + else + bp->cnic_kwq_prod++; + } + + spin_unlock_bh(&bp->spq_lock); + + if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending) + bnx2x_cnic_sp_post(bp, 0); + + return i; +} + +static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl) +{ + struct cnic_ops *c_ops; + int rc = 0; + + mutex_lock(&bp->cnic_mutex); + c_ops = rcu_dereference_protected(bp->cnic_ops, + lockdep_is_held(&bp->cnic_mutex)); + if (c_ops) + rc = c_ops->cnic_ctl(bp->cnic_data, ctl); + mutex_unlock(&bp->cnic_mutex); + + return rc; +} + +static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl) +{ + struct cnic_ops *c_ops; + int rc = 0; + + rcu_read_lock(); + c_ops = rcu_dereference(bp->cnic_ops); + if (c_ops) + rc = c_ops->cnic_ctl(bp->cnic_data, ctl); + rcu_read_unlock(); + + return rc; +} + +/* + * for commands that have no data + */ +int bnx2x_cnic_notify(struct bnx2x *bp, int cmd) +{ + struct cnic_ctl_info ctl = {0}; + + ctl.cmd = cmd; + + return bnx2x_cnic_ctl_send(bp, &ctl); +} + +static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err) +{ + struct cnic_ctl_info ctl = {0}; + + /* first we tell CNIC and only then we count this as a completion */ + ctl.cmd = CNIC_CTL_COMPLETION_CMD; + ctl.data.comp.cid = cid; + ctl.data.comp.error = err; + + bnx2x_cnic_ctl_send_bh(bp, &ctl); + bnx2x_cnic_sp_post(bp, 0); +} + + +/* Called with netif_addr_lock_bh() taken. + * Sets an rx_mode config for an iSCSI ETH client. + * Doesn't block. + * Completion should be checked outside. + */ +static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start) +{ + unsigned long accept_flags = 0, ramrod_flags = 0; + u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); + int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED; + + if (start) { + /* Start accepting on iSCSI L2 ring. Accept all multicasts + * because it's the only way for UIO Queue to accept + * multicasts (in non-promiscuous mode only one Queue per + * function will receive multicast packets (leading in our + * case). + */ + __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags); + __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags); + __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); + + /* Clear STOP_PENDING bit if START is requested */ + clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state); + + sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED; + } else + /* Clear START_PENDING bit if STOP is requested */ + clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state); + + if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) + set_bit(sched_state, &bp->sp_state); + else { + __set_bit(RAMROD_RX, &ramrod_flags); + bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0, + ramrod_flags); + } +} + + +static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) +{ + struct bnx2x *bp = netdev_priv(dev); + int rc = 0; + + switch (ctl->cmd) { + case DRV_CTL_CTXTBL_WR_CMD: { + u32 index = ctl->data.io.offset; + dma_addr_t addr = ctl->data.io.dma_addr; + + bnx2x_ilt_wr(bp, index, addr); + break; + } + + case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: { + int count = ctl->data.credit.credit_count; + + bnx2x_cnic_sp_post(bp, count); + break; + } + + /* rtnl_lock is held. */ + case DRV_CTL_START_L2_CMD: { + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + unsigned long sp_bits = 0; + + /* Configure the iSCSI classification object */ + bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj, + cp->iscsi_l2_client_id, + cp->iscsi_l2_cid, BP_FUNC(bp), + bnx2x_sp(bp, mac_rdata), + bnx2x_sp_mapping(bp, mac_rdata), + BNX2X_FILTER_MAC_PENDING, + &bp->sp_state, BNX2X_OBJ_TYPE_RX, + &bp->macs_pool); + + /* Set iSCSI MAC address */ + rc = bnx2x_set_iscsi_eth_mac_addr(bp); + if (rc) + break; + + mmiowb(); + barrier(); + + /* Start accepting on iSCSI L2 ring */ + + netif_addr_lock_bh(dev); + bnx2x_set_iscsi_eth_rx_mode(bp, true); + netif_addr_unlock_bh(dev); + + /* bits to wait on */ + __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); + __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits); + + if (!bnx2x_wait_sp_comp(bp, sp_bits)) + BNX2X_ERR("rx_mode completion timed out!\n"); + + break; + } + + /* rtnl_lock is held. */ + case DRV_CTL_STOP_L2_CMD: { + unsigned long sp_bits = 0; + + /* Stop accepting on iSCSI L2 ring */ + netif_addr_lock_bh(dev); + bnx2x_set_iscsi_eth_rx_mode(bp, false); + netif_addr_unlock_bh(dev); + + /* bits to wait on */ + __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); + __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits); + + if (!bnx2x_wait_sp_comp(bp, sp_bits)) + BNX2X_ERR("rx_mode completion timed out!\n"); + + mmiowb(); + barrier(); + + /* Unset iSCSI L2 MAC */ + rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj, + BNX2X_ISCSI_ETH_MAC, true); + break; + } + case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: { + int count = ctl->data.credit.credit_count; + + smp_mb__before_atomic_inc(); + atomic_add(count, &bp->cq_spq_left); + smp_mb__after_atomic_inc(); + break; + } + + default: + BNX2X_ERR("unknown command %x\n", ctl->cmd); + rc = -EINVAL; + } + + return rc; +} + +void bnx2x_setup_cnic_irq_info(struct bnx2x *bp) +{ + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + + if (bp->flags & USING_MSIX_FLAG) { + cp->drv_state |= CNIC_DRV_STATE_USING_MSIX; + cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX; + cp->irq_arr[0].vector = bp->msix_table[1].vector; + } else { + cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; + cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; + } + if (!CHIP_IS_E1x(bp)) + cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb; + else + cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb; + + cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp); + cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp); + cp->irq_arr[1].status_blk = bp->def_status_blk; + cp->irq_arr[1].status_blk_num = DEF_SB_ID; + cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID; + + cp->num_irq = 2; +} + +static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, + void *data) +{ + struct bnx2x *bp = netdev_priv(dev); + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + + if (ops == NULL) + return -EINVAL; + + bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!bp->cnic_kwq) + return -ENOMEM; + + bp->cnic_kwq_cons = bp->cnic_kwq; + bp->cnic_kwq_prod = bp->cnic_kwq; + bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT; + + bp->cnic_spq_pending = 0; + bp->cnic_kwq_pending = 0; + + bp->cnic_data = data; + + cp->num_irq = 0; + cp->drv_state |= CNIC_DRV_STATE_REGD; + cp->iro_arr = bp->iro_arr; + + bnx2x_setup_cnic_irq_info(bp); + + rcu_assign_pointer(bp->cnic_ops, ops); + + return 0; +} + +static int bnx2x_unregister_cnic(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + + mutex_lock(&bp->cnic_mutex); + cp->drv_state = 0; + rcu_assign_pointer(bp->cnic_ops, NULL); + mutex_unlock(&bp->cnic_mutex); + synchronize_rcu(); + kfree(bp->cnic_kwq); + bp->cnic_kwq = NULL; + + return 0; +} + +struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + + /* If both iSCSI and FCoE are disabled - return NULL in + * order to indicate CNIC that it should not try to work + * with this device. + */ + if (NO_ISCSI(bp) && NO_FCOE(bp)) + return NULL; + + cp->drv_owner = THIS_MODULE; + cp->chip_id = CHIP_ID(bp); + cp->pdev = bp->pdev; + cp->io_base = bp->regview; + cp->io_base2 = bp->doorbells; + cp->max_kwqe_pending = 8; + cp->ctx_blk_size = CDU_ILT_PAGE_SZ; + cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + + bnx2x_cid_ilt_lines(bp); + cp->ctx_tbl_len = CNIC_ILT_LINES; + cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; + cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue; + cp->drv_ctl = bnx2x_drv_ctl; + cp->drv_register_cnic = bnx2x_register_cnic; + cp->drv_unregister_cnic = bnx2x_unregister_cnic; + cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID; + cp->iscsi_l2_client_id = + bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); + cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID; + + if (NO_ISCSI_OOO(bp)) + cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; + + if (NO_ISCSI(bp)) + cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI; + + if (NO_FCOE(bp)) + cp->drv_state |= CNIC_DRV_STATE_NO_FCOE; + + DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, " + "starting cid %d\n", + cp->ctx_blk_size, + cp->ctx_tbl_offset, + cp->ctx_tbl_len, + cp->starting_cid); + return cp; +} +EXPORT_SYMBOL(bnx2x_cnic_probe); + +#endif /* BCM_CNIC */ + diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h new file mode 100644 index 0000000..27b5ecb --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -0,0 +1,7146 @@ +/* bnx2x_reg.h: Broadcom Everest network driver. + * + * Copyright (c) 2007-2011 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * The registers description starts with the register Access type followed + * by size in bits. For example [RW 32]. The access types are: + * R - Read only + * RC - Clear on read + * RW - Read/Write + * ST - Statistics register (clear on read) + * W - Write only + * WB - Wide bus register - the size is over 32 bits and it should be + * read/write in consecutive 32 bits accesses + * WR - Write Clear (write 1 to clear the bit) + * + */ +#ifndef BNX2X_REG_H +#define BNX2X_REG_H + +#define ATC_ATC_INT_STS_REG_ADDRESS_ERROR (0x1<<0) +#define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS (0x1<<2) +#define ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU (0x1<<5) +#define ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT (0x1<<3) +#define ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR (0x1<<4) +#define ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND (0x1<<1) +/* [RW 1] Initiate the ATC array - reset all the valid bits */ +#define ATC_REG_ATC_INIT_ARRAY 0x1100b8 +/* [R 1] ATC initalization done */ +#define ATC_REG_ATC_INIT_DONE 0x1100bc +/* [RC 6] Interrupt register #0 read clear */ +#define ATC_REG_ATC_INT_STS_CLR 0x1101c0 +/* [RW 5] Parity mask register #0 read/write */ +#define ATC_REG_ATC_PRTY_MASK 0x1101d8 +/* [RC 5] Parity register #0 read clear */ +#define ATC_REG_ATC_PRTY_STS_CLR 0x1101d0 +/* [RW 19] Interrupt mask register #0 read/write */ +#define BRB1_REG_BRB1_INT_MASK 0x60128 +/* [R 19] Interrupt register #0 read */ +#define BRB1_REG_BRB1_INT_STS 0x6011c +/* [RW 4] Parity mask register #0 read/write */ +#define BRB1_REG_BRB1_PRTY_MASK 0x60138 +/* [R 4] Parity register #0 read */ +#define BRB1_REG_BRB1_PRTY_STS 0x6012c +/* [RC 4] Parity register #0 read clear */ +#define BRB1_REG_BRB1_PRTY_STS_CLR 0x60130 +/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At + * address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address + * BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning - + * following reset the first rbc access to this reg must be write; there can + * be no more rbc writes after the first one; there can be any number of rbc + * read following the first write; rbc access not following these rules will + * result in hang condition. */ +#define BRB1_REG_FREE_LIST_PRS_CRDT 0x60200 +/* [RW 10] The number of free blocks below which the full signal to class 0 + * is asserted */ +#define BRB1_REG_FULL_0_XOFF_THRESHOLD_0 0x601d0 +#define BRB1_REG_FULL_0_XOFF_THRESHOLD_1 0x60230 +/* [RW 11] The number of free blocks above which the full signal to class 0 + * is de-asserted */ +#define BRB1_REG_FULL_0_XON_THRESHOLD_0 0x601d4 +#define BRB1_REG_FULL_0_XON_THRESHOLD_1 0x60234 +/* [RW 11] The number of free blocks below which the full signal to class 1 + * is asserted */ +#define BRB1_REG_FULL_1_XOFF_THRESHOLD_0 0x601d8 +#define BRB1_REG_FULL_1_XOFF_THRESHOLD_1 0x60238 +/* [RW 11] The number of free blocks above which the full signal to class 1 + * is de-asserted */ +#define BRB1_REG_FULL_1_XON_THRESHOLD_0 0x601dc +#define BRB1_REG_FULL_1_XON_THRESHOLD_1 0x6023c +/* [RW 11] The number of free blocks below which the full signal to the LB + * port is asserted */ +#define BRB1_REG_FULL_LB_XOFF_THRESHOLD 0x601e0 +/* [RW 10] The number of free blocks above which the full signal to the LB + * port is de-asserted */ +#define BRB1_REG_FULL_LB_XON_THRESHOLD 0x601e4 +/* [RW 10] The number of free blocks above which the High_llfc signal to + interface #n is de-asserted. */ +#define BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD_0 0x6014c +/* [RW 10] The number of free blocks below which the High_llfc signal to + interface #n is asserted. */ +#define BRB1_REG_HIGH_LLFC_LOW_THRESHOLD_0 0x6013c +/* [RW 11] The number of blocks guarantied for the LB port */ +#define BRB1_REG_LB_GUARANTIED 0x601ec +/* [RW 11] The hysteresis on the guarantied buffer space for the Lb port + * before signaling XON. */ +#define BRB1_REG_LB_GUARANTIED_HYST 0x60264 +/* [RW 24] LL RAM data. */ +#define BRB1_REG_LL_RAM 0x61000 +/* [RW 10] The number of free blocks above which the Low_llfc signal to + interface #n is de-asserted. */ +#define BRB1_REG_LOW_LLFC_HIGH_THRESHOLD_0 0x6016c +/* [RW 10] The number of free blocks below which the Low_llfc signal to + interface #n is asserted. */ +#define BRB1_REG_LOW_LLFC_LOW_THRESHOLD_0 0x6015c +/* [RW 11] The number of blocks guarantied for class 0 in MAC 0. The + * register is applicable only when per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_0_CLASS_0_GUARANTIED 0x60244 +/* [RW 11] The hysteresis on the guarantied buffer space for class 0 in MAC + * 1 before signaling XON. The register is applicable only when + * per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST 0x60254 +/* [RW 11] The number of blocks guarantied for class 1 in MAC 0. The + * register is applicable only when per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_0_CLASS_1_GUARANTIED 0x60248 +/* [RW 11] The hysteresis on the guarantied buffer space for class 1in MAC 0 + * before signaling XON. The register is applicable only when + * per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST 0x60258 +/* [RW 11] The number of blocks guarantied for class 0in MAC1.The register + * is applicable only when per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_1_CLASS_0_GUARANTIED 0x6024c +/* [RW 11] The hysteresis on the guarantied buffer space for class 0 in MAC + * 1 before signaling XON. The register is applicable only when + * per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST 0x6025c +/* [RW 11] The number of blocks guarantied for class 1 in MAC 1. The + * register is applicable only when per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_1_CLASS_1_GUARANTIED 0x60250 +/* [RW 11] The hysteresis on the guarantied buffer space for class 1 in MAC + * 1 before signaling XON. The register is applicable only when + * per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST 0x60260 +/* [RW 11] The number of blocks guarantied for the MAC port. The register is + * applicable only when per_class_guaranty_mode is reset. */ +#define BRB1_REG_MAC_GUARANTIED_0 0x601e8 +#define BRB1_REG_MAC_GUARANTIED_1 0x60240 +/* [R 24] The number of full blocks. */ +#define BRB1_REG_NUM_OF_FULL_BLOCKS 0x60090 +/* [ST 32] The number of cycles that the write_full signal towards MAC #0 + was asserted. */ +#define BRB1_REG_NUM_OF_FULL_CYCLES_0 0x600c8 +#define BRB1_REG_NUM_OF_FULL_CYCLES_1 0x600cc +#define BRB1_REG_NUM_OF_FULL_CYCLES_4 0x600d8 +/* [ST 32] The number of cycles that the pause signal towards MAC #0 was + asserted. */ +#define BRB1_REG_NUM_OF_PAUSE_CYCLES_0 0x600b8 +#define BRB1_REG_NUM_OF_PAUSE_CYCLES_1 0x600bc +/* [RW 10] The number of free blocks below which the pause signal to class 0 + * is asserted */ +#define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 0x601c0 +#define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 0x60220 +/* [RW 11] The number of free blocks above which the pause signal to class 0 + * is de-asserted */ +#define BRB1_REG_PAUSE_0_XON_THRESHOLD_0 0x601c4 +#define BRB1_REG_PAUSE_0_XON_THRESHOLD_1 0x60224 +/* [RW 11] The number of free blocks below which the pause signal to class 1 + * is asserted */ +#define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0 0x601c8 +#define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 0x60228 +/* [RW 11] The number of free blocks above which the pause signal to class 1 + * is de-asserted */ +#define BRB1_REG_PAUSE_1_XON_THRESHOLD_0 0x601cc +#define BRB1_REG_PAUSE_1_XON_THRESHOLD_1 0x6022c +/* [RW 10] Write client 0: De-assert pause threshold. Not Functional */ +#define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 0x60078 +#define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 0x6007c +/* [RW 10] Write client 0: Assert pause threshold. */ +#define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068 +#define BRB1_REG_PAUSE_LOW_THRESHOLD_1 0x6006c +/* [R 24] The number of full blocks occupied by port. */ +#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094 +/* [RW 1] Reset the design by software. */ +#define BRB1_REG_SOFT_RESET 0x600dc +/* [R 5] Used to read the value of the XX protection CAM occupancy counter. */ +#define CCM_REG_CAM_OCCUP 0xd0188 +/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define CCM_REG_CCM_CFC_IFEN 0xd003c +/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define CCM_REG_CCM_CQM_IFEN 0xd000c +/* [RW 1] If set the Q index; received from the QM is inserted to event ID. + Otherwise 0 is inserted. */ +#define CCM_REG_CCM_CQM_USE_Q 0xd00c0 +/* [RW 11] Interrupt mask register #0 read/write */ +#define CCM_REG_CCM_INT_MASK 0xd01e4 +/* [R 11] Interrupt register #0 read */ +#define CCM_REG_CCM_INT_STS 0xd01d8 +/* [RW 27] Parity mask register #0 read/write */ +#define CCM_REG_CCM_PRTY_MASK 0xd01f4 +/* [R 27] Parity register #0 read */ +#define CCM_REG_CCM_PRTY_STS 0xd01e8 +/* [RC 27] Parity register #0 read clear */ +#define CCM_REG_CCM_PRTY_STS_CLR 0xd01ec +/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS + REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). + Is used to determine the number of the AG context REG-pairs written back; + when the input message Reg1WbFlg isn't set. */ +#define CCM_REG_CCM_REG0_SZ 0xd00c4 +/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define CCM_REG_CCM_STORM0_IFEN 0xd0004 +/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define CCM_REG_CCM_STORM1_IFEN 0xd0008 +/* [RW 1] CDU AG read Interface enable. If 0 - the request input is + disregarded; valid output is deasserted; all other signals are treated as + usual; if 1 - normal activity. */ +#define CCM_REG_CDU_AG_RD_IFEN 0xd0030 +/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input + are disregarded; all other signals are treated as usual; if 1 - normal + activity. */ +#define CCM_REG_CDU_AG_WR_IFEN 0xd002c +/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is + disregarded; valid output is deasserted; all other signals are treated as + usual; if 1 - normal activity. */ +#define CCM_REG_CDU_SM_RD_IFEN 0xd0038 +/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid + input is disregarded; all other signals are treated as usual; if 1 - + normal activity. */ +#define CCM_REG_CDU_SM_WR_IFEN 0xd0034 +/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes + the initial credit value; read returns the current value of the credit + counter. Must be initialized to 1 at start-up. */ +#define CCM_REG_CFC_INIT_CRD 0xd0204 +/* [RW 2] Auxiliary counter flag Q number 1. */ +#define CCM_REG_CNT_AUX1_Q 0xd00c8 +/* [RW 2] Auxiliary counter flag Q number 2. */ +#define CCM_REG_CNT_AUX2_Q 0xd00cc +/* [RW 28] The CM header value for QM request (primary). */ +#define CCM_REG_CQM_CCM_HDR_P 0xd008c +/* [RW 28] The CM header value for QM request (secondary). */ +#define CCM_REG_CQM_CCM_HDR_S 0xd0090 +/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define CCM_REG_CQM_CCM_IFEN 0xd0014 +/* [RW 6] QM output initial credit. Max credit available - 32. Write writes + the initial credit value; read returns the current value of the credit + counter. Must be initialized to 32 at start-up. */ +#define CCM_REG_CQM_INIT_CRD 0xd020c +/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0 + stands for weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define CCM_REG_CQM_P_WEIGHT 0xd00b8 +/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0 + stands for weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define CCM_REG_CQM_S_WEIGHT 0xd00bc +/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define CCM_REG_CSDM_IFEN 0xd0018 +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the SDM interface is detected. */ +#define CCM_REG_CSDM_LENGTH_MIS 0xd0170 +/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define CCM_REG_CSDM_WEIGHT 0xd00b4 +/* [RW 28] The CM header for QM formatting in case of an error in the QM + inputs. */ +#define CCM_REG_ERR_CCM_HDR 0xd0094 +/* [RW 8] The Event ID in case the input message ErrorFlg is set. */ +#define CCM_REG_ERR_EVNT_ID 0xd0098 +/* [RW 8] FIC0 output initial credit. Max credit available - 255. Write + writes the initial credit value; read returns the current value of the + credit counter. Must be initialized to 64 at start-up. */ +#define CCM_REG_FIC0_INIT_CRD 0xd0210 +/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write + writes the initial credit value; read returns the current value of the + credit counter. Must be initialized to 64 at start-up. */ +#define CCM_REG_FIC1_INIT_CRD 0xd0214 +/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1 + - strict priority defined by ~ccm_registers_gr_ag_pr.gr_ag_pr; + ~ccm_registers_gr_ld0_pr.gr_ld0_pr and + ~ccm_registers_gr_ld1_pr.gr_ld1_pr. Groups are according to channels and + outputs to STORM: aggregation; load FIC0; load FIC1 and store. */ +#define CCM_REG_GR_ARB_TYPE 0xd015c +/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the + highest priority is 3. It is supposed; that the Store channel priority is + the compliment to 4 of the rest priorities - Aggregation channel; Load + (FIC0) channel and Load (FIC1). */ +#define CCM_REG_GR_LD0_PR 0xd0164 +/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the + highest priority is 3. It is supposed; that the Store channel priority is + the compliment to 4 of the rest priorities - Aggregation channel; Load + (FIC0) channel and Load (FIC1). */ +#define CCM_REG_GR_LD1_PR 0xd0168 +/* [RW 2] General flags index. */ +#define CCM_REG_INV_DONE_Q 0xd0108 +/* [RW 4] The number of double REG-pairs(128 bits); loaded from the STORM + context and sent to STORM; for a specific connection type. The double + REG-pairs are used in order to align to STORM context row size of 128 + bits. The offset of these data in the STORM context is always 0. Index + _(0..15) stands for the connection type (one of 16). */ +#define CCM_REG_N_SM_CTX_LD_0 0xd004c +#define CCM_REG_N_SM_CTX_LD_1 0xd0050 +#define CCM_REG_N_SM_CTX_LD_2 0xd0054 +#define CCM_REG_N_SM_CTX_LD_3 0xd0058 +#define CCM_REG_N_SM_CTX_LD_4 0xd005c +/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define CCM_REG_PBF_IFEN 0xd0028 +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the pbf interface is detected. */ +#define CCM_REG_PBF_LENGTH_MIS 0xd0180 +/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define CCM_REG_PBF_WEIGHT 0xd00ac +#define CCM_REG_PHYS_QNUM1_0 0xd0134 +#define CCM_REG_PHYS_QNUM1_1 0xd0138 +#define CCM_REG_PHYS_QNUM2_0 0xd013c +#define CCM_REG_PHYS_QNUM2_1 0xd0140 +#define CCM_REG_PHYS_QNUM3_0 0xd0144 +#define CCM_REG_PHYS_QNUM3_1 0xd0148 +#define CCM_REG_QOS_PHYS_QNUM0_0 0xd0114 +#define CCM_REG_QOS_PHYS_QNUM0_1 0xd0118 +#define CCM_REG_QOS_PHYS_QNUM1_0 0xd011c +#define CCM_REG_QOS_PHYS_QNUM1_1 0xd0120 +#define CCM_REG_QOS_PHYS_QNUM2_0 0xd0124 +#define CCM_REG_QOS_PHYS_QNUM2_1 0xd0128 +#define CCM_REG_QOS_PHYS_QNUM3_0 0xd012c +#define CCM_REG_QOS_PHYS_QNUM3_1 0xd0130 +/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define CCM_REG_STORM_CCM_IFEN 0xd0010 +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the STORM interface is detected. */ +#define CCM_REG_STORM_LENGTH_MIS 0xd016c +/* [RW 3] The weight of the STORM input in the WRR (Weighted Round robin) + mechanism. 0 stands for weight 8 (the most prioritised); 1 stands for + weight 1(least prioritised); 2 stands for weight 2 (more prioritised); + tc. */ +#define CCM_REG_STORM_WEIGHT 0xd009c +/* [RW 1] Input tsem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define CCM_REG_TSEM_IFEN 0xd001c +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the tsem interface is detected. */ +#define CCM_REG_TSEM_LENGTH_MIS 0xd0174 +/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define CCM_REG_TSEM_WEIGHT 0xd00a0 +/* [RW 1] Input usem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define CCM_REG_USEM_IFEN 0xd0024 +/* [RC 1] Set when message length mismatch (relative to last indication) at + the usem interface is detected. */ +#define CCM_REG_USEM_LENGTH_MIS 0xd017c +/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define CCM_REG_USEM_WEIGHT 0xd00a8 +/* [RW 1] Input xsem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define CCM_REG_XSEM_IFEN 0xd0020 +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the xsem interface is detected. */ +#define CCM_REG_XSEM_LENGTH_MIS 0xd0178 +/* [RW 3] The weight of the input xsem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define CCM_REG_XSEM_WEIGHT 0xd00a4 +/* [RW 19] Indirect access to the descriptor table of the XX protection + mechanism. The fields are: [5:0] - message length; [12:6] - message + pointer; 18:13] - next pointer. */ +#define CCM_REG_XX_DESCR_TABLE 0xd0300 +#define CCM_REG_XX_DESCR_TABLE_SIZE 24 +/* [R 7] Used to read the value of XX protection Free counter. */ +#define CCM_REG_XX_FREE 0xd0184 +/* [RW 6] Initial value for the credit counter; responsible for fulfilling + of the Input Stage XX protection buffer by the XX protection pending + messages. Max credit available - 127. Write writes the initial credit + value; read returns the current value of the credit counter. Must be + initialized to maximum XX protected message size - 2 at start-up. */ +#define CCM_REG_XX_INIT_CRD 0xd0220 +/* [RW 7] The maximum number of pending messages; which may be stored in XX + protection. At read the ~ccm_registers_xx_free.xx_free counter is read. + At write comprises the start value of the ~ccm_registers_xx_free.xx_free + counter. */ +#define CCM_REG_XX_MSG_NUM 0xd0224 +/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */ +#define CCM_REG_XX_OVFL_EVNT_ID 0xd0044 +/* [RW 18] Indirect access to the XX table of the XX protection mechanism. + The fields are: [5:0] - tail pointer; 11:6] - Link List size; 17:12] - + header pointer. */ +#define CCM_REG_XX_TABLE 0xd0280 +#define CDU_REG_CDU_CHK_MASK0 0x101000 +#define CDU_REG_CDU_CHK_MASK1 0x101004 +#define CDU_REG_CDU_CONTROL0 0x101008 +#define CDU_REG_CDU_DEBUG 0x101010 +#define CDU_REG_CDU_GLOBAL_PARAMS 0x101020 +/* [RW 7] Interrupt mask register #0 read/write */ +#define CDU_REG_CDU_INT_MASK 0x10103c +/* [R 7] Interrupt register #0 read */ +#define CDU_REG_CDU_INT_STS 0x101030 +/* [RW 5] Parity mask register #0 read/write */ +#define CDU_REG_CDU_PRTY_MASK 0x10104c +/* [R 5] Parity register #0 read */ +#define CDU_REG_CDU_PRTY_STS 0x101040 +/* [RC 5] Parity register #0 read clear */ +#define CDU_REG_CDU_PRTY_STS_CLR 0x101044 +/* [RC 32] logging of error data in case of a CDU load error: + {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error; + ype_error; ctual_active; ctual_compressed_context}; */ +#define CDU_REG_ERROR_DATA 0x101014 +/* [WB 216] L1TT ram access. each entry has the following format : + {mrege_regions[7:0]; ffset12[5:0]...offset0[5:0]; + ength12[5:0]...length0[5:0]; d12[3:0]...id0[3:0]} */ +#define CDU_REG_L1TT 0x101800 +/* [WB 24] MATT ram access. each entry has the following + format:{RegionLength[11:0]; egionOffset[11:0]} */ +#define CDU_REG_MATT 0x101100 +/* [RW 1] when this bit is set the CDU operates in e1hmf mode */ +#define CDU_REG_MF_MODE 0x101050 +/* [R 1] indication the initializing the activity counter by the hardware + was done. */ +#define CFC_REG_AC_INIT_DONE 0x104078 +/* [RW 13] activity counter ram access */ +#define CFC_REG_ACTIVITY_COUNTER 0x104400 +#define CFC_REG_ACTIVITY_COUNTER_SIZE 256 +/* [R 1] indication the initializing the cams by the hardware was done. */ +#define CFC_REG_CAM_INIT_DONE 0x10407c +/* [RW 2] Interrupt mask register #0 read/write */ +#define CFC_REG_CFC_INT_MASK 0x104108 +/* [R 2] Interrupt register #0 read */ +#define CFC_REG_CFC_INT_STS 0x1040fc +/* [RC 2] Interrupt register #0 read clear */ +#define CFC_REG_CFC_INT_STS_CLR 0x104100 +/* [RW 4] Parity mask register #0 read/write */ +#define CFC_REG_CFC_PRTY_MASK 0x104118 +/* [R 4] Parity register #0 read */ +#define CFC_REG_CFC_PRTY_STS 0x10410c +/* [RC 4] Parity register #0 read clear */ +#define CFC_REG_CFC_PRTY_STS_CLR 0x104110 +/* [RW 21] CID cam access (21:1 - Data; alid - 0) */ +#define CFC_REG_CID_CAM 0x104800 +#define CFC_REG_CONTROL0 0x104028 +#define CFC_REG_DEBUG0 0x104050 +/* [RW 14] indicates per error (in #cfc_registers_cfc_error_vector.cfc_error + vector) whether the cfc should be disabled upon it */ +#define CFC_REG_DISABLE_ON_ERROR 0x104044 +/* [RC 14] CFC error vector. when the CFC detects an internal error it will + set one of these bits. the bit description can be found in CFC + specifications */ +#define CFC_REG_ERROR_VECTOR 0x10403c +/* [WB 93] LCID info ram access */ +#define CFC_REG_INFO_RAM 0x105000 +#define CFC_REG_INFO_RAM_SIZE 1024 +#define CFC_REG_INIT_REG 0x10404c +#define CFC_REG_INTERFACES 0x104058 +/* [RW 24] {weight_load_client7[2:0] to weight_load_client0[2:0]}. this + field allows changing the priorities of the weighted-round-robin arbiter + which selects which CFC load client should be served next */ +#define CFC_REG_LCREQ_WEIGHTS 0x104084 +/* [RW 16] Link List ram access; data = {prev_lcid; ext_lcid} */ +#define CFC_REG_LINK_LIST 0x104c00 +#define CFC_REG_LINK_LIST_SIZE 256 +/* [R 1] indication the initializing the link list by the hardware was done. */ +#define CFC_REG_LL_INIT_DONE 0x104074 +/* [R 9] Number of allocated LCIDs which are at empty state */ +#define CFC_REG_NUM_LCIDS_ALLOC 0x104020 +/* [R 9] Number of Arriving LCIDs in Link List Block */ +#define CFC_REG_NUM_LCIDS_ARRIVING 0x104004 +#define CFC_REG_NUM_LCIDS_INSIDE_PF 0x104120 +/* [R 9] Number of Leaving LCIDs in Link List Block */ +#define CFC_REG_NUM_LCIDS_LEAVING 0x104018 +#define CFC_REG_WEAK_ENABLE_PF 0x104124 +/* [RW 8] The event id for aggregated interrupt 0 */ +#define CSDM_REG_AGG_INT_EVENT_0 0xc2038 +#define CSDM_REG_AGG_INT_EVENT_10 0xc2060 +#define CSDM_REG_AGG_INT_EVENT_11 0xc2064 +#define CSDM_REG_AGG_INT_EVENT_12 0xc2068 +#define CSDM_REG_AGG_INT_EVENT_13 0xc206c +#define CSDM_REG_AGG_INT_EVENT_14 0xc2070 +#define CSDM_REG_AGG_INT_EVENT_15 0xc2074 +#define CSDM_REG_AGG_INT_EVENT_16 0xc2078 +#define CSDM_REG_AGG_INT_EVENT_2 0xc2040 +#define CSDM_REG_AGG_INT_EVENT_3 0xc2044 +#define CSDM_REG_AGG_INT_EVENT_4 0xc2048 +#define CSDM_REG_AGG_INT_EVENT_5 0xc204c +#define CSDM_REG_AGG_INT_EVENT_6 0xc2050 +#define CSDM_REG_AGG_INT_EVENT_7 0xc2054 +#define CSDM_REG_AGG_INT_EVENT_8 0xc2058 +#define CSDM_REG_AGG_INT_EVENT_9 0xc205c +/* [RW 1] For each aggregated interrupt index whether the mode is normal (0) + or auto-mask-mode (1) */ +#define CSDM_REG_AGG_INT_MODE_10 0xc21e0 +#define CSDM_REG_AGG_INT_MODE_11 0xc21e4 +#define CSDM_REG_AGG_INT_MODE_12 0xc21e8 +#define CSDM_REG_AGG_INT_MODE_13 0xc21ec +#define CSDM_REG_AGG_INT_MODE_14 0xc21f0 +#define CSDM_REG_AGG_INT_MODE_15 0xc21f4 +#define CSDM_REG_AGG_INT_MODE_16 0xc21f8 +#define CSDM_REG_AGG_INT_MODE_6 0xc21d0 +#define CSDM_REG_AGG_INT_MODE_7 0xc21d4 +#define CSDM_REG_AGG_INT_MODE_8 0xc21d8 +#define CSDM_REG_AGG_INT_MODE_9 0xc21dc +/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */ +#define CSDM_REG_CFC_RSP_START_ADDR 0xc2008 +/* [RW 16] The maximum value of the completion counter #0 */ +#define CSDM_REG_CMP_COUNTER_MAX0 0xc201c +/* [RW 16] The maximum value of the completion counter #1 */ +#define CSDM_REG_CMP_COUNTER_MAX1 0xc2020 +/* [RW 16] The maximum value of the completion counter #2 */ +#define CSDM_REG_CMP_COUNTER_MAX2 0xc2024 +/* [RW 16] The maximum value of the completion counter #3 */ +#define CSDM_REG_CMP_COUNTER_MAX3 0xc2028 +/* [RW 13] The start address in the internal RAM for the completion + counters. */ +#define CSDM_REG_CMP_COUNTER_START_ADDR 0xc200c +/* [RW 32] Interrupt mask register #0 read/write */ +#define CSDM_REG_CSDM_INT_MASK_0 0xc229c +#define CSDM_REG_CSDM_INT_MASK_1 0xc22ac +/* [R 32] Interrupt register #0 read */ +#define CSDM_REG_CSDM_INT_STS_0 0xc2290 +#define CSDM_REG_CSDM_INT_STS_1 0xc22a0 +/* [RW 11] Parity mask register #0 read/write */ +#define CSDM_REG_CSDM_PRTY_MASK 0xc22bc +/* [R 11] Parity register #0 read */ +#define CSDM_REG_CSDM_PRTY_STS 0xc22b0 +/* [RC 11] Parity register #0 read clear */ +#define CSDM_REG_CSDM_PRTY_STS_CLR 0xc22b4 +#define CSDM_REG_ENABLE_IN1 0xc2238 +#define CSDM_REG_ENABLE_IN2 0xc223c +#define CSDM_REG_ENABLE_OUT1 0xc2240 +#define CSDM_REG_ENABLE_OUT2 0xc2244 +/* [RW 4] The initial number of messages that can be sent to the pxp control + interface without receiving any ACK. */ +#define CSDM_REG_INIT_CREDIT_PXP_CTRL 0xc24bc +/* [ST 32] The number of ACK after placement messages received */ +#define CSDM_REG_NUM_OF_ACK_AFTER_PLACE 0xc227c +/* [ST 32] The number of packet end messages received from the parser */ +#define CSDM_REG_NUM_OF_PKT_END_MSG 0xc2274 +/* [ST 32] The number of requests received from the pxp async if */ +#define CSDM_REG_NUM_OF_PXP_ASYNC_REQ 0xc2278 +/* [ST 32] The number of commands received in queue 0 */ +#define CSDM_REG_NUM_OF_Q0_CMD 0xc2248 +/* [ST 32] The number of commands received in queue 10 */ +#define CSDM_REG_NUM_OF_Q10_CMD 0xc226c +/* [ST 32] The number of commands received in queue 11 */ +#define CSDM_REG_NUM_OF_Q11_CMD 0xc2270 +/* [ST 32] The number of commands received in queue 1 */ +#define CSDM_REG_NUM_OF_Q1_CMD 0xc224c +/* [ST 32] The number of commands received in queue 3 */ +#define CSDM_REG_NUM_OF_Q3_CMD 0xc2250 +/* [ST 32] The number of commands received in queue 4 */ +#define CSDM_REG_NUM_OF_Q4_CMD 0xc2254 +/* [ST 32] The number of commands received in queue 5 */ +#define CSDM_REG_NUM_OF_Q5_CMD 0xc2258 +/* [ST 32] The number of commands received in queue 6 */ +#define CSDM_REG_NUM_OF_Q6_CMD 0xc225c +/* [ST 32] The number of commands received in queue 7 */ +#define CSDM_REG_NUM_OF_Q7_CMD 0xc2260 +/* [ST 32] The number of commands received in queue 8 */ +#define CSDM_REG_NUM_OF_Q8_CMD 0xc2264 +/* [ST 32] The number of commands received in queue 9 */ +#define CSDM_REG_NUM_OF_Q9_CMD 0xc2268 +/* [RW 13] The start address in the internal RAM for queue counters */ +#define CSDM_REG_Q_COUNTER_START_ADDR 0xc2010 +/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */ +#define CSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0xc2548 +/* [R 1] parser fifo empty in sdm_sync block */ +#define CSDM_REG_SYNC_PARSER_EMPTY 0xc2550 +/* [R 1] parser serial fifo empty in sdm_sync block */ +#define CSDM_REG_SYNC_SYNC_EMPTY 0xc2558 +/* [RW 32] Tick for timer counter. Applicable only when + ~csdm_registers_timer_tick_enable.timer_tick_enable =1 */ +#define CSDM_REG_TIMER_TICK 0xc2000 +/* [RW 5] The number of time_slots in the arbitration cycle */ +#define CSEM_REG_ARB_CYCLE_SIZE 0x200034 +/* [RW 3] The source that is associated with arbitration element 0. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2 */ +#define CSEM_REG_ARB_ELEMENT0 0x200020 +/* [RW 3] The source that is associated with arbitration element 1. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~csem_registers_arb_element0.arb_element0 */ +#define CSEM_REG_ARB_ELEMENT1 0x200024 +/* [RW 3] The source that is associated with arbitration element 2. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~csem_registers_arb_element0.arb_element0 + and ~csem_registers_arb_element1.arb_element1 */ +#define CSEM_REG_ARB_ELEMENT2 0x200028 +/* [RW 3] The source that is associated with arbitration element 3. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2.Could + not be equal to register ~csem_registers_arb_element0.arb_element0 and + ~csem_registers_arb_element1.arb_element1 and + ~csem_registers_arb_element2.arb_element2 */ +#define CSEM_REG_ARB_ELEMENT3 0x20002c +/* [RW 3] The source that is associated with arbitration element 4. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~csem_registers_arb_element0.arb_element0 + and ~csem_registers_arb_element1.arb_element1 and + ~csem_registers_arb_element2.arb_element2 and + ~csem_registers_arb_element3.arb_element3 */ +#define CSEM_REG_ARB_ELEMENT4 0x200030 +/* [RW 32] Interrupt mask register #0 read/write */ +#define CSEM_REG_CSEM_INT_MASK_0 0x200110 +#define CSEM_REG_CSEM_INT_MASK_1 0x200120 +/* [R 32] Interrupt register #0 read */ +#define CSEM_REG_CSEM_INT_STS_0 0x200104 +#define CSEM_REG_CSEM_INT_STS_1 0x200114 +/* [RW 32] Parity mask register #0 read/write */ +#define CSEM_REG_CSEM_PRTY_MASK_0 0x200130 +#define CSEM_REG_CSEM_PRTY_MASK_1 0x200140 +/* [R 32] Parity register #0 read */ +#define CSEM_REG_CSEM_PRTY_STS_0 0x200124 +#define CSEM_REG_CSEM_PRTY_STS_1 0x200134 +/* [RC 32] Parity register #0 read clear */ +#define CSEM_REG_CSEM_PRTY_STS_CLR_0 0x200128 +#define CSEM_REG_CSEM_PRTY_STS_CLR_1 0x200138 +#define CSEM_REG_ENABLE_IN 0x2000a4 +#define CSEM_REG_ENABLE_OUT 0x2000a8 +/* [RW 32] This address space contains all registers and memories that are + placed in SEM_FAST block. The SEM_FAST registers are described in + appendix B. In order to access the sem_fast registers the base address + ~fast_memory.fast_memory should be added to eachsem_fast register offset. */ +#define CSEM_REG_FAST_MEMORY 0x220000 +/* [RW 1] Disables input messages from FIC0 May be updated during run_time + by the microcode */ +#define CSEM_REG_FIC0_DISABLE 0x200224 +/* [RW 1] Disables input messages from FIC1 May be updated during run_time + by the microcode */ +#define CSEM_REG_FIC1_DISABLE 0x200234 +/* [RW 15] Interrupt table Read and write access to it is not possible in + the middle of the work */ +#define CSEM_REG_INT_TABLE 0x200400 +/* [ST 24] Statistics register. The number of messages that entered through + FIC0 */ +#define CSEM_REG_MSG_NUM_FIC0 0x200000 +/* [ST 24] Statistics register. The number of messages that entered through + FIC1 */ +#define CSEM_REG_MSG_NUM_FIC1 0x200004 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC0 */ +#define CSEM_REG_MSG_NUM_FOC0 0x200008 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC1 */ +#define CSEM_REG_MSG_NUM_FOC1 0x20000c +/* [ST 24] Statistics register. The number of messages that were sent to + FOC2 */ +#define CSEM_REG_MSG_NUM_FOC2 0x200010 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC3 */ +#define CSEM_REG_MSG_NUM_FOC3 0x200014 +/* [RW 1] Disables input messages from the passive buffer May be updated + during run_time by the microcode */ +#define CSEM_REG_PAS_DISABLE 0x20024c +/* [WB 128] Debug only. Passive buffer memory */ +#define CSEM_REG_PASSIVE_BUFFER 0x202000 +/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */ +#define CSEM_REG_PRAM 0x240000 +/* [R 16] Valid sleeping threads indication have bit per thread */ +#define CSEM_REG_SLEEP_THREADS_VALID 0x20026c +/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */ +#define CSEM_REG_SLOW_EXT_STORE_EMPTY 0x2002a0 +/* [RW 16] List of free threads . There is a bit per thread. */ +#define CSEM_REG_THREADS_LIST 0x2002e4 +/* [RW 3] The arbitration scheme of time_slot 0 */ +#define CSEM_REG_TS_0_AS 0x200038 +/* [RW 3] The arbitration scheme of time_slot 10 */ +#define CSEM_REG_TS_10_AS 0x200060 +/* [RW 3] The arbitration scheme of time_slot 11 */ +#define CSEM_REG_TS_11_AS 0x200064 +/* [RW 3] The arbitration scheme of time_slot 12 */ +#define CSEM_REG_TS_12_AS 0x200068 +/* [RW 3] The arbitration scheme of time_slot 13 */ +#define CSEM_REG_TS_13_AS 0x20006c +/* [RW 3] The arbitration scheme of time_slot 14 */ +#define CSEM_REG_TS_14_AS 0x200070 +/* [RW 3] The arbitration scheme of time_slot 15 */ +#define CSEM_REG_TS_15_AS 0x200074 +/* [RW 3] The arbitration scheme of time_slot 16 */ +#define CSEM_REG_TS_16_AS 0x200078 +/* [RW 3] The arbitration scheme of time_slot 17 */ +#define CSEM_REG_TS_17_AS 0x20007c +/* [RW 3] The arbitration scheme of time_slot 18 */ +#define CSEM_REG_TS_18_AS 0x200080 +/* [RW 3] The arbitration scheme of time_slot 1 */ +#define CSEM_REG_TS_1_AS 0x20003c +/* [RW 3] The arbitration scheme of time_slot 2 */ +#define CSEM_REG_TS_2_AS 0x200040 +/* [RW 3] The arbitration scheme of time_slot 3 */ +#define CSEM_REG_TS_3_AS 0x200044 +/* [RW 3] The arbitration scheme of time_slot 4 */ +#define CSEM_REG_TS_4_AS 0x200048 +/* [RW 3] The arbitration scheme of time_slot 5 */ +#define CSEM_REG_TS_5_AS 0x20004c +/* [RW 3] The arbitration scheme of time_slot 6 */ +#define CSEM_REG_TS_6_AS 0x200050 +/* [RW 3] The arbitration scheme of time_slot 7 */ +#define CSEM_REG_TS_7_AS 0x200054 +/* [RW 3] The arbitration scheme of time_slot 8 */ +#define CSEM_REG_TS_8_AS 0x200058 +/* [RW 3] The arbitration scheme of time_slot 9 */ +#define CSEM_REG_TS_9_AS 0x20005c +/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64 + * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */ +#define CSEM_REG_VFPF_ERR_NUM 0x200380 +/* [RW 1] Parity mask register #0 read/write */ +#define DBG_REG_DBG_PRTY_MASK 0xc0a8 +/* [R 1] Parity register #0 read */ +#define DBG_REG_DBG_PRTY_STS 0xc09c +/* [RC 1] Parity register #0 read clear */ +#define DBG_REG_DBG_PRTY_STS_CLR 0xc0a0 +/* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The + * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0; + * 4.Completion function=0; 5.Error handling=0 */ +#define DMAE_REG_BACKWARD_COMP_EN 0x10207c +/* [RW 32] Commands memory. The address to command X; row Y is to calculated + as 14*X+Y. */ +#define DMAE_REG_CMD_MEM 0x102400 +#define DMAE_REG_CMD_MEM_SIZE 224 +/* [RW 1] If 0 - the CRC-16c initial value is all zeroes; if 1 - the CRC-16c + initial value is all ones. */ +#define DMAE_REG_CRC16C_INIT 0x10201c +/* [RW 1] If 0 - the CRC-16 T10 initial value is all zeroes; if 1 - the + CRC-16 T10 initial value is all ones. */ +#define DMAE_REG_CRC16T10_INIT 0x102020 +/* [RW 2] Interrupt mask register #0 read/write */ +#define DMAE_REG_DMAE_INT_MASK 0x102054 +/* [RW 4] Parity mask register #0 read/write */ +#define DMAE_REG_DMAE_PRTY_MASK 0x102064 +/* [R 4] Parity register #0 read */ +#define DMAE_REG_DMAE_PRTY_STS 0x102058 +/* [RC 4] Parity register #0 read clear */ +#define DMAE_REG_DMAE_PRTY_STS_CLR 0x10205c +/* [RW 1] Command 0 go. */ +#define DMAE_REG_GO_C0 0x102080 +/* [RW 1] Command 1 go. */ +#define DMAE_REG_GO_C1 0x102084 +/* [RW 1] Command 10 go. */ +#define DMAE_REG_GO_C10 0x102088 +/* [RW 1] Command 11 go. */ +#define DMAE_REG_GO_C11 0x10208c +/* [RW 1] Command 12 go. */ +#define DMAE_REG_GO_C12 0x102090 +/* [RW 1] Command 13 go. */ +#define DMAE_REG_GO_C13 0x102094 +/* [RW 1] Command 14 go. */ +#define DMAE_REG_GO_C14 0x102098 +/* [RW 1] Command 15 go. */ +#define DMAE_REG_GO_C15 0x10209c +/* [RW 1] Command 2 go. */ +#define DMAE_REG_GO_C2 0x1020a0 +/* [RW 1] Command 3 go. */ +#define DMAE_REG_GO_C3 0x1020a4 +/* [RW 1] Command 4 go. */ +#define DMAE_REG_GO_C4 0x1020a8 +/* [RW 1] Command 5 go. */ +#define DMAE_REG_GO_C5 0x1020ac +/* [RW 1] Command 6 go. */ +#define DMAE_REG_GO_C6 0x1020b0 +/* [RW 1] Command 7 go. */ +#define DMAE_REG_GO_C7 0x1020b4 +/* [RW 1] Command 8 go. */ +#define DMAE_REG_GO_C8 0x1020b8 +/* [RW 1] Command 9 go. */ +#define DMAE_REG_GO_C9 0x1020bc +/* [RW 1] DMAE GRC Interface (Target; aster) enable. If 0 - the acknowledge + input is disregarded; valid is deasserted; all other signals are treated + as usual; if 1 - normal activity. */ +#define DMAE_REG_GRC_IFEN 0x102008 +/* [RW 1] DMAE PCI Interface (Request; ead; rite) enable. If 0 - the + acknowledge input is disregarded; valid is deasserted; full is asserted; + all other signals are treated as usual; if 1 - normal activity. */ +#define DMAE_REG_PCI_IFEN 0x102004 +/* [RW 4] DMAE- PCI Request Interface initial credit. Write writes the + initial value to the credit counter; related to the address. Read returns + the current value of the counter. */ +#define DMAE_REG_PXP_REQ_INIT_CRD 0x1020c0 +/* [RW 8] Aggregation command. */ +#define DORQ_REG_AGG_CMD0 0x170060 +/* [RW 8] Aggregation command. */ +#define DORQ_REG_AGG_CMD1 0x170064 +/* [RW 8] Aggregation command. */ +#define DORQ_REG_AGG_CMD2 0x170068 +/* [RW 8] Aggregation command. */ +#define DORQ_REG_AGG_CMD3 0x17006c +/* [RW 28] UCM Header. */ +#define DORQ_REG_CMHEAD_RX 0x170050 +/* [RW 32] Doorbell address for RBC doorbells (function 0). */ +#define DORQ_REG_DB_ADDR0 0x17008c +/* [RW 5] Interrupt mask register #0 read/write */ +#define DORQ_REG_DORQ_INT_MASK 0x170180 +/* [R 5] Interrupt register #0 read */ +#define DORQ_REG_DORQ_INT_STS 0x170174 +/* [RC 5] Interrupt register #0 read clear */ +#define DORQ_REG_DORQ_INT_STS_CLR 0x170178 +/* [RW 2] Parity mask register #0 read/write */ +#define DORQ_REG_DORQ_PRTY_MASK 0x170190 +/* [R 2] Parity register #0 read */ +#define DORQ_REG_DORQ_PRTY_STS 0x170184 +/* [RC 2] Parity register #0 read clear */ +#define DORQ_REG_DORQ_PRTY_STS_CLR 0x170188 +/* [RW 8] The address to write the DPM CID to STORM. */ +#define DORQ_REG_DPM_CID_ADDR 0x170044 +/* [RW 5] The DPM mode CID extraction offset. */ +#define DORQ_REG_DPM_CID_OFST 0x170030 +/* [RW 12] The threshold of the DQ FIFO to send the almost full interrupt. */ +#define DORQ_REG_DQ_FIFO_AFULL_TH 0x17007c +/* [RW 12] The threshold of the DQ FIFO to send the full interrupt. */ +#define DORQ_REG_DQ_FIFO_FULL_TH 0x170078 +/* [R 13] Current value of the DQ FIFO fill level according to following + pointer. The range is 0 - 256 FIFO rows; where each row stands for the + doorbell. */ +#define DORQ_REG_DQ_FILL_LVLF 0x1700a4 +/* [R 1] DQ FIFO full status. Is set; when FIFO filling level is more or + equal to full threshold; reset on full clear. */ +#define DORQ_REG_DQ_FULL_ST 0x1700c0 +/* [RW 28] The value sent to CM header in the case of CFC load error. */ +#define DORQ_REG_ERR_CMHEAD 0x170058 +#define DORQ_REG_IF_EN 0x170004 +#define DORQ_REG_MODE_ACT 0x170008 +/* [RW 5] The normal mode CID extraction offset. */ +#define DORQ_REG_NORM_CID_OFST 0x17002c +/* [RW 28] TCM Header when only TCP context is loaded. */ +#define DORQ_REG_NORM_CMHEAD_TX 0x17004c +/* [RW 3] The number of simultaneous outstanding requests to Context Fetch + Interface. */ +#define DORQ_REG_OUTST_REQ 0x17003c +#define DORQ_REG_PF_USAGE_CNT 0x1701d0 +#define DORQ_REG_REGN 0x170038 +/* [R 4] Current value of response A counter credit. Initial credit is + configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd + register. */ +#define DORQ_REG_RSPA_CRD_CNT 0x1700ac +/* [R 4] Current value of response B counter credit. Initial credit is + configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd + register. */ +#define DORQ_REG_RSPB_CRD_CNT 0x1700b0 +/* [RW 4] The initial credit at the Doorbell Response Interface. The write + writes the same initial credit to the rspa_crd_cnt and rspb_crd_cnt. The + read reads this written value. */ +#define DORQ_REG_RSP_INIT_CRD 0x170048 +/* [RW 4] Initial activity counter value on the load request; when the + shortcut is done. */ +#define DORQ_REG_SHRT_ACT_CNT 0x170070 +/* [RW 28] TCM Header when both ULP and TCP context is loaded. */ +#define DORQ_REG_SHRT_CMHEAD 0x170054 +#define HC_CONFIG_0_REG_ATTN_BIT_EN_0 (0x1<<4) +#define HC_CONFIG_0_REG_BLOCK_DISABLE_0 (0x1<<0) +#define HC_CONFIG_0_REG_INT_LINE_EN_0 (0x1<<3) +#define HC_CONFIG_0_REG_MSI_ATTN_EN_0 (0x1<<7) +#define HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 (0x1<<2) +#define HC_CONFIG_0_REG_SINGLE_ISR_EN_0 (0x1<<1) +#define HC_CONFIG_1_REG_BLOCK_DISABLE_1 (0x1<<0) +#define HC_REG_AGG_INT_0 0x108050 +#define HC_REG_AGG_INT_1 0x108054 +#define HC_REG_ATTN_BIT 0x108120 +#define HC_REG_ATTN_IDX 0x108100 +#define HC_REG_ATTN_MSG0_ADDR_L 0x108018 +#define HC_REG_ATTN_MSG1_ADDR_L 0x108020 +#define HC_REG_ATTN_NUM_P0 0x108038 +#define HC_REG_ATTN_NUM_P1 0x10803c +#define HC_REG_COMMAND_REG 0x108180 +#define HC_REG_CONFIG_0 0x108000 +#define HC_REG_CONFIG_1 0x108004 +#define HC_REG_FUNC_NUM_P0 0x1080ac +#define HC_REG_FUNC_NUM_P1 0x1080b0 +/* [RW 3] Parity mask register #0 read/write */ +#define HC_REG_HC_PRTY_MASK 0x1080a0 +/* [R 3] Parity register #0 read */ +#define HC_REG_HC_PRTY_STS 0x108094 +/* [RC 3] Parity register #0 read clear */ +#define HC_REG_HC_PRTY_STS_CLR 0x108098 +#define HC_REG_INT_MASK 0x108108 +#define HC_REG_LEADING_EDGE_0 0x108040 +#define HC_REG_LEADING_EDGE_1 0x108048 +#define HC_REG_MAIN_MEMORY 0x108800 +#define HC_REG_MAIN_MEMORY_SIZE 152 +#define HC_REG_P0_PROD_CONS 0x108200 +#define HC_REG_P1_PROD_CONS 0x108400 +#define HC_REG_PBA_COMMAND 0x108140 +#define HC_REG_PCI_CONFIG_0 0x108010 +#define HC_REG_PCI_CONFIG_1 0x108014 +#define HC_REG_STATISTIC_COUNTERS 0x109000 +#define HC_REG_TRAILING_EDGE_0 0x108044 +#define HC_REG_TRAILING_EDGE_1 0x10804c +#define HC_REG_UC_RAM_ADDR_0 0x108028 +#define HC_REG_UC_RAM_ADDR_1 0x108030 +#define HC_REG_USTORM_ADDR_FOR_COALESCE 0x108068 +#define HC_REG_VQID_0 0x108008 +#define HC_REG_VQID_1 0x10800c +#define IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN (0x1<<1) +#define IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE (0x1<<0) +#define IGU_REG_ATTENTION_ACK_BITS 0x130108 +/* [R 4] Debug: attn_fsm */ +#define IGU_REG_ATTN_FSM 0x130054 +#define IGU_REG_ATTN_MSG_ADDR_H 0x13011c +#define IGU_REG_ATTN_MSG_ADDR_L 0x130120 +/* [R 4] Debug: [3] - attention write done message is pending (0-no pending; + * 1-pending). [2:0] = PFID. Pending means attention message was sent; but + * write done didn't receive. */ +#define IGU_REG_ATTN_WRITE_DONE_PENDING 0x130030 +#define IGU_REG_BLOCK_CONFIGURATION 0x130000 +#define IGU_REG_COMMAND_REG_32LSB_DATA 0x130124 +#define IGU_REG_COMMAND_REG_CTRL 0x13012c +/* [WB_R 32] Cleanup bit status per SB. 1 = cleanup is set. 0 = cleanup bit + * is clear. The bits in this registers are set and clear via the producer + * command. Data valid only in addresses 0-4. all the rest are zero. */ +#define IGU_REG_CSTORM_TYPE_0_SB_CLEANUP 0x130200 +/* [R 5] Debug: ctrl_fsm */ +#define IGU_REG_CTRL_FSM 0x130064 +/* [R 1] data available for error memory. If this bit is clear do not red + * from error_handling_memory. */ +#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x130130 +/* [RW 11] Parity mask register #0 read/write */ +#define IGU_REG_IGU_PRTY_MASK 0x1300a8 +/* [R 11] Parity register #0 read */ +#define IGU_REG_IGU_PRTY_STS 0x13009c +/* [RC 11] Parity register #0 read clear */ +#define IGU_REG_IGU_PRTY_STS_CLR 0x1300a0 +/* [R 4] Debug: int_handle_fsm */ +#define IGU_REG_INT_HANDLE_FSM 0x130050 +#define IGU_REG_LEADING_EDGE_LATCH 0x130134 +/* [RW 14] mapping CAM; relevant for E2 operating mode only. [0] - valid. + * [6:1] - vector number; [13:7] - FID (if VF - [13] = 0; [12:7] = VF + * number; if PF - [13] = 1; [12:10] = 0; [9:7] = PF number); */ +#define IGU_REG_MAPPING_MEMORY 0x131000 +#define IGU_REG_MAPPING_MEMORY_SIZE 136 +#define IGU_REG_PBA_STATUS_LSB 0x130138 +#define IGU_REG_PBA_STATUS_MSB 0x13013c +#define IGU_REG_PCI_PF_MSI_EN 0x130140 +#define IGU_REG_PCI_PF_MSIX_EN 0x130144 +#define IGU_REG_PCI_PF_MSIX_FUNC_MASK 0x130148 +/* [WB_R 32] Each bit represent the pending bits status for that SB. 0 = no + * pending; 1 = pending. Pendings means interrupt was asserted; and write + * done was not received. Data valid only in addresses 0-4. all the rest are + * zero. */ +#define IGU_REG_PENDING_BITS_STATUS 0x130300 +#define IGU_REG_PF_CONFIGURATION 0x130154 +/* [RW 20] producers only. E2 mode: address 0-135 match to the mapping + * memory; 136 - PF0 default prod; 137 PF1 default prod; 138 - PF2 default + * prod; 139 PF3 default prod; 140 - PF0 - ATTN prod; 141 - PF1 - ATTN prod; + * 142 - PF2 - ATTN prod; 143 - PF3 - ATTN prod; 144-147 reserved. E1.5 mode + * - In backward compatible mode; for non default SB; each even line in the + * memory holds the U producer and each odd line hold the C producer. The + * first 128 producer are for NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The + * last 20 producers are for the DSB for each PF. each PF has five segments + * (the order inside each segment is PF0; PF1; PF2; PF3) - 128-131 U prods; + * 132-135 C prods; 136-139 X prods; 140-143 T prods; 144-147 ATTN prods; */ +#define IGU_REG_PROD_CONS_MEMORY 0x132000 +/* [R 3] Debug: pxp_arb_fsm */ +#define IGU_REG_PXP_ARB_FSM 0x130068 +/* [RW 6] Write one for each bit will reset the appropriate memory. When the + * memory reset finished the appropriate bit will be clear. Bit 0 - mapping + * memory; Bit 1 - SB memory; Bit 2 - SB interrupt and mask register; Bit 3 + * - MSIX memory; Bit 4 - PBA memory; Bit 5 - statistics; */ +#define IGU_REG_RESET_MEMORIES 0x130158 +/* [R 4] Debug: sb_ctrl_fsm */ +#define IGU_REG_SB_CTRL_FSM 0x13004c +#define IGU_REG_SB_INT_BEFORE_MASK_LSB 0x13015c +#define IGU_REG_SB_INT_BEFORE_MASK_MSB 0x130160 +#define IGU_REG_SB_MASK_LSB 0x130164 +#define IGU_REG_SB_MASK_MSB 0x130168 +/* [RW 16] Number of command that were dropped without causing an interrupt + * due to: read access for WO BAR address; or write access for RO BAR + * address or any access for reserved address or PCI function error is set + * and address is not MSIX; PBA or cleanup */ +#define IGU_REG_SILENT_DROP 0x13016c +/* [RW 10] Number of MSI/MSIX/ATTN messages sent for the function: 0-63 - + * number of MSIX messages per VF; 64-67 - number of MSI/MSIX messages per + * PF; 68-71 number of ATTN messages per PF */ +#define IGU_REG_STATISTIC_NUM_MESSAGE_SENT 0x130800 +/* [RW 32] Number of cycles the timer mask masking the IGU interrupt when a + * timer mask command arrives. Value must be bigger than 100. */ +#define IGU_REG_TIMER_MASKING_VALUE 0x13003c +#define IGU_REG_TRAILING_EDGE_LATCH 0x130104 +#define IGU_REG_VF_CONFIGURATION 0x130170 +/* [WB_R 32] Each bit represent write done pending bits status for that SB + * (MSI/MSIX message was sent and write done was not received yet). 0 = + * clear; 1 = set. Data valid only in addresses 0-4. all the rest are zero. */ +#define IGU_REG_WRITE_DONE_PENDING 0x130480 +#define MCP_A_REG_MCPR_SCRATCH 0x3a0000 +#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER 0x8501c +#define MCP_REG_MCPR_GP_INPUTS 0x800c0 +#define MCP_REG_MCPR_GP_OENABLE 0x800c8 +#define MCP_REG_MCPR_GP_OUTPUTS 0x800c4 +#define MCP_REG_MCPR_IMC_COMMAND 0x85900 +#define MCP_REG_MCPR_IMC_DATAREG0 0x85920 +#define MCP_REG_MCPR_IMC_SLAVE_CONTROL 0x85904 +#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER 0x8501c +#define MCP_REG_MCPR_NVM_ACCESS_ENABLE 0x86424 +#define MCP_REG_MCPR_NVM_ADDR 0x8640c +#define MCP_REG_MCPR_NVM_CFG4 0x8642c +#define MCP_REG_MCPR_NVM_COMMAND 0x86400 +#define MCP_REG_MCPR_NVM_READ 0x86410 +#define MCP_REG_MCPR_NVM_SW_ARB 0x86420 +#define MCP_REG_MCPR_NVM_WRITE 0x86408 +#define MCP_REG_MCPR_SCRATCH 0xa0000 +#define MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK (0x1<<1) +#define MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK (0x1<<0) +/* [R 32] read first 32 bit after inversion of function 0. mapped as + follows: [0] NIG attention for function0; [1] NIG attention for + function1; [2] GPIO1 mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; + [6] GPIO1 function 1; [7] GPIO2 function 1; [8] GPIO3 function 1; [9] + GPIO4 function 1; [10] PCIE glue/PXP VPD event function0; [11] PCIE + glue/PXP VPD event function1; [12] PCIE glue/PXP Expansion ROM event0; + [13] PCIE glue/PXP Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] + MSI/X indication for mcp; [17] MSI/X indication for function 1; [18] BRB + Parity error; [19] BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw + interrupt; [22] SRC Parity error; [23] SRC Hw interrupt; [24] TSDM Parity + error; [25] TSDM Hw interrupt; [26] TCM Parity error; [27] TCM Hw + interrupt; [28] TSEMI Parity error; [29] TSEMI Hw interrupt; [30] PBF + Parity error; [31] PBF Hw interrupt; */ +#define MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 0xa42c +#define MISC_REG_AEU_AFTER_INVERT_1_FUNC_1 0xa430 +/* [R 32] read first 32 bit after inversion of mcp. mapped as follows: [0] + NIG attention for function0; [1] NIG attention for function1; [2] GPIO1 + mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1; + [7] GPIO2 function 1; [8] GPIO3 function 1; [9] GPIO4 function 1; [10] + PCIE glue/PXP VPD event function0; [11] PCIE glue/PXP VPD event + function1; [12] PCIE glue/PXP Expansion ROM event0; [13] PCIE glue/PXP + Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] MSI/X indication for + mcp; [17] MSI/X indication for function 1; [18] BRB Parity error; [19] + BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC + Parity error; [23] SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw + interrupt; [26] TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI + Parity error; [29] TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw + interrupt; */ +#define MISC_REG_AEU_AFTER_INVERT_1_MCP 0xa434 +/* [R 32] read second 32 bit after inversion of function 0. mapped as + follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM + Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw + interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity + error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw + interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] + NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; + [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw + interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM + Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI + Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM + Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw + interrupt; */ +#define MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 0xa438 +#define MISC_REG_AEU_AFTER_INVERT_2_FUNC_1 0xa43c +/* [R 32] read second 32 bit after inversion of mcp. mapped as follows: [0] + PBClient Parity error; [1] PBClient Hw interrupt; [2] QM Parity error; + [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw interrupt; + [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity error; [9] + XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw interrupt; [12] + DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] NIG Parity + error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; [17] Vaux + PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw interrupt; + [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM Parity error; + [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI Hw interrupt; + [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM Parity error; + [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw interrupt; */ +#define MISC_REG_AEU_AFTER_INVERT_2_MCP 0xa440 +/* [R 32] read third 32 bit after inversion of function 0. mapped as + follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP Parity + error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error; [5] + PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw + interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity + error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) + Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16] + pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20] + MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23] + SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW + timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 + func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General + attn1; */ +#define MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 0xa444 +#define MISC_REG_AEU_AFTER_INVERT_3_FUNC_1 0xa448 +/* [R 32] read third 32 bit after inversion of mcp. mapped as follows: [0] + CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP Parity error; [3] PXP + Hw interrupt; [4] PXPpciClockClient Parity error; [5] PXPpciClockClient + Hw interrupt; [6] CFC Parity error; [7] CFC Hw interrupt; [8] CDU Parity + error; [9] CDU Hw interrupt; [10] DMAE Parity error; [11] DMAE Hw + interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) Hw interrupt; [14] + MISC Parity error; [15] MISC Hw interrupt; [16] pxp_misc_mps_attn; [17] + Flash event; [18] SMB event; [19] MCP attn0; [20] MCP attn1; [21] SW + timers attn_1 func0; [22] SW timers attn_2 func0; [23] SW timers attn_3 + func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW timers attn_1 + func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 func1; [29] SW + timers attn_4 func1; [30] General attn0; [31] General attn1; */ +#define MISC_REG_AEU_AFTER_INVERT_3_MCP 0xa44c +/* [R 32] read fourth 32 bit after inversion of function 0. mapped as + follows: [0] General attn2; [1] General attn3; [2] General attn4; [3] + General attn5; [4] General attn6; [5] General attn7; [6] General attn8; + [7] General attn9; [8] General attn10; [9] General attn11; [10] General + attn12; [11] General attn13; [12] General attn14; [13] General attn15; + [14] General attn16; [15] General attn17; [16] General attn18; [17] + General attn19; [18] General attn20; [19] General attn21; [20] Main power + interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN + Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC + Latched timeout attention; [27] GRC Latched reserved access attention; + [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP + Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ +#define MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 0xa450 +#define MISC_REG_AEU_AFTER_INVERT_4_FUNC_1 0xa454 +/* [R 32] read fourth 32 bit after inversion of mcp. mapped as follows: [0] + General attn2; [1] General attn3; [2] General attn4; [3] General attn5; + [4] General attn6; [5] General attn7; [6] General attn8; [7] General + attn9; [8] General attn10; [9] General attn11; [10] General attn12; [11] + General attn13; [12] General attn14; [13] General attn15; [14] General + attn16; [15] General attn17; [16] General attn18; [17] General attn19; + [18] General attn20; [19] General attn21; [20] Main power interrupt; [21] + RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN Latched attn; [24] + RBCU Latched attn; [25] RBCP Latched attn; [26] GRC Latched timeout + attention; [27] GRC Latched reserved access attention; [28] MCP Latched + rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP Latched + ump_tx_parity; [31] MCP Latched scpad_parity; */ +#define MISC_REG_AEU_AFTER_INVERT_4_MCP 0xa458 +/* [R 32] Read fifth 32 bit after inversion of function 0. Mapped as + * follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC + * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6] + * CNIG attention (reserved); [7] CNIG parity (reserved); [31-8] Reserved; */ +#define MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 0xa700 +/* [W 14] write to this register results with the clear of the latched + signals; one in d0 clears RBCR latch; one in d1 clears RBCT latch; one in + d2 clears RBCN latch; one in d3 clears RBCU latch; one in d4 clears RBCP + latch; one in d5 clears GRC Latched timeout attention; one in d6 clears + GRC Latched reserved access attention; one in d7 clears Latched + rom_parity; one in d8 clears Latched ump_rx_parity; one in d9 clears + Latched ump_tx_parity; one in d10 clears Latched scpad_parity (both + ports); one in d11 clears pxpv_misc_mps_attn; one in d12 clears + pxp_misc_exp_rom_attn0; one in d13 clears pxp_misc_exp_rom_attn1; read + from this register return zero */ +#define MISC_REG_AEU_CLR_LATCH_SIGNAL 0xa45c +/* [RW 32] first 32b for enabling the output for function 0 output0. mapped + as follows: [0] NIG attention for function0; [1] NIG attention for + function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function + 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8] + GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event + function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP + Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14] + SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X + indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt; + [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23] + SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26] + TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29] + TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */ +#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0 0xa06c +#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1 0xa07c +#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2 0xa08c +#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_3 0xa09c +#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_5 0xa0bc +#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_6 0xa0cc +#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_7 0xa0dc +/* [RW 32] first 32b for enabling the output for function 1 output0. mapped + as follows: [0] NIG attention for function0; [1] NIG attention for + function1; [2] GPIO1 function 1; [3] GPIO2 function 1; [4] GPIO3 function + 1; [5] GPIO4 function 1; [6] GPIO1 function 1; [7] GPIO2 function 1; [8] + GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event + function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP + Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14] + SPIO4; [15] SPIO5; [16] MSI/X indication for function 1; [17] MSI/X + indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt; + [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23] + SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26] + TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29] + TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */ +#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 0xa10c +#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 0xa11c +#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 0xa12c +#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_3 0xa13c +#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_5 0xa15c +#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_6 0xa16c +#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_7 0xa17c +/* [RW 32] first 32b for enabling the output for close the gate nig. mapped + as follows: [0] NIG attention for function0; [1] NIG attention for + function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function + 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8] + GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event + function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP + Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14] + SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X + indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt; + [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23] + SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26] + TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29] + TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */ +#define MISC_REG_AEU_ENABLE1_NIG_0 0xa0ec +#define MISC_REG_AEU_ENABLE1_NIG_1 0xa18c +/* [RW 32] first 32b for enabling the output for close the gate pxp. mapped + as follows: [0] NIG attention for function0; [1] NIG attention for + function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function + 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8] + GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event + function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP + Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14] + SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X + indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt; + [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23] + SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26] + TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29] + TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */ +#define MISC_REG_AEU_ENABLE1_PXP_0 0xa0fc +#define MISC_REG_AEU_ENABLE1_PXP_1 0xa19c +/* [RW 32] second 32b for enabling the output for function 0 output0. mapped + as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM + Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw + interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity + error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw + interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] + NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; + [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw + interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM + Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI + Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM + Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw + interrupt; */ +#define MISC_REG_AEU_ENABLE2_FUNC_0_OUT_0 0xa070 +#define MISC_REG_AEU_ENABLE2_FUNC_0_OUT_1 0xa080 +/* [RW 32] second 32b for enabling the output for function 1 output0. mapped + as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM + Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw + interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity + error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw + interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] + NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; + [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw + interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM + Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI + Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM + Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw + interrupt; */ +#define MISC_REG_AEU_ENABLE2_FUNC_1_OUT_0 0xa110 +#define MISC_REG_AEU_ENABLE2_FUNC_1_OUT_1 0xa120 +/* [RW 32] second 32b for enabling the output for close the gate nig. mapped + as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM + Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw + interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity + error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw + interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] + NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; + [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw + interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM + Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI + Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM + Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw + interrupt; */ +#define MISC_REG_AEU_ENABLE2_NIG_0 0xa0f0 +#define MISC_REG_AEU_ENABLE2_NIG_1 0xa190 +/* [RW 32] second 32b for enabling the output for close the gate pxp. mapped + as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM + Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw + interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity + error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw + interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] + NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; + [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw + interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM + Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI + Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM + Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw + interrupt; */ +#define MISC_REG_AEU_ENABLE2_PXP_0 0xa100 +#define MISC_REG_AEU_ENABLE2_PXP_1 0xa1a0 +/* [RW 32] third 32b for enabling the output for function 0 output0. mapped + as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP + Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error; + [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw + interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity + error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) + Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16] + pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20] + MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23] + SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW + timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 + func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General + attn1; */ +#define MISC_REG_AEU_ENABLE3_FUNC_0_OUT_0 0xa074 +#define MISC_REG_AEU_ENABLE3_FUNC_0_OUT_1 0xa084 +/* [RW 32] third 32b for enabling the output for function 1 output0. mapped + as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP + Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error; + [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw + interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity + error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) + Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16] + pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20] + MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23] + SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW + timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 + func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General + attn1; */ +#define MISC_REG_AEU_ENABLE3_FUNC_1_OUT_0 0xa114 +#define MISC_REG_AEU_ENABLE3_FUNC_1_OUT_1 0xa124 +/* [RW 32] third 32b for enabling the output for close the gate nig. mapped + as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP + Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error; + [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw + interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity + error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) + Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16] + pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20] + MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23] + SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW + timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 + func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General + attn1; */ +#define MISC_REG_AEU_ENABLE3_NIG_0 0xa0f4 +#define MISC_REG_AEU_ENABLE3_NIG_1 0xa194 +/* [RW 32] third 32b for enabling the output for close the gate pxp. mapped + as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP + Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error; + [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw + interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity + error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) + Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16] + pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20] + MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23] + SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW + timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 + func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General + attn1; */ +#define MISC_REG_AEU_ENABLE3_PXP_0 0xa104 +#define MISC_REG_AEU_ENABLE3_PXP_1 0xa1a4 +/* [RW 32] fourth 32b for enabling the output for function 0 output0.mapped + as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3] + General attn5; [4] General attn6; [5] General attn7; [6] General attn8; + [7] General attn9; [8] General attn10; [9] General attn11; [10] General + attn12; [11] General attn13; [12] General attn14; [13] General attn15; + [14] General attn16; [15] General attn17; [16] General attn18; [17] + General attn19; [18] General attn20; [19] General attn21; [20] Main power + interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN + Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC + Latched timeout attention; [27] GRC Latched reserved access attention; + [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP + Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ +#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 0xa078 +#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_2 0xa098 +#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_4 0xa0b8 +#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_5 0xa0c8 +#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_6 0xa0d8 +#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_7 0xa0e8 +/* [RW 32] fourth 32b for enabling the output for function 1 output0.mapped + as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3] + General attn5; [4] General attn6; [5] General attn7; [6] General attn8; + [7] General attn9; [8] General attn10; [9] General attn11; [10] General + attn12; [11] General attn13; [12] General attn14; [13] General attn15; + [14] General attn16; [15] General attn17; [16] General attn18; [17] + General attn19; [18] General attn20; [19] General attn21; [20] Main power + interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN + Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC + Latched timeout attention; [27] GRC Latched reserved access attention; + [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP + Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ +#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0 0xa118 +#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_2 0xa138 +#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_4 0xa158 +#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_5 0xa168 +#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_6 0xa178 +#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_7 0xa188 +/* [RW 32] fourth 32b for enabling the output for close the gate nig.mapped + as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3] + General attn5; [4] General attn6; [5] General attn7; [6] General attn8; + [7] General attn9; [8] General attn10; [9] General attn11; [10] General + attn12; [11] General attn13; [12] General attn14; [13] General attn15; + [14] General attn16; [15] General attn17; [16] General attn18; [17] + General attn19; [18] General attn20; [19] General attn21; [20] Main power + interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN + Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC + Latched timeout attention; [27] GRC Latched reserved access attention; + [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP + Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ +#define MISC_REG_AEU_ENABLE4_NIG_0 0xa0f8 +#define MISC_REG_AEU_ENABLE4_NIG_1 0xa198 +/* [RW 32] fourth 32b for enabling the output for close the gate pxp.mapped + as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3] + General attn5; [4] General attn6; [5] General attn7; [6] General attn8; + [7] General attn9; [8] General attn10; [9] General attn11; [10] General + attn12; [11] General attn13; [12] General attn14; [13] General attn15; + [14] General attn16; [15] General attn17; [16] General attn18; [17] + General attn19; [18] General attn20; [19] General attn21; [20] Main power + interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN + Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC + Latched timeout attention; [27] GRC Latched reserved access attention; + [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP + Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ +#define MISC_REG_AEU_ENABLE4_PXP_0 0xa108 +#define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8 +/* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu + 128 bit vector */ +#define MISC_REG_AEU_GENERAL_ATTN_0 0xa000 +#define MISC_REG_AEU_GENERAL_ATTN_1 0xa004 +#define MISC_REG_AEU_GENERAL_ATTN_10 0xa028 +#define MISC_REG_AEU_GENERAL_ATTN_11 0xa02c +#define MISC_REG_AEU_GENERAL_ATTN_12 0xa030 +#define MISC_REG_AEU_GENERAL_ATTN_2 0xa008 +#define MISC_REG_AEU_GENERAL_ATTN_3 0xa00c +#define MISC_REG_AEU_GENERAL_ATTN_4 0xa010 +#define MISC_REG_AEU_GENERAL_ATTN_5 0xa014 +#define MISC_REG_AEU_GENERAL_ATTN_6 0xa018 +#define MISC_REG_AEU_GENERAL_ATTN_7 0xa01c +#define MISC_REG_AEU_GENERAL_ATTN_8 0xa020 +#define MISC_REG_AEU_GENERAL_ATTN_9 0xa024 +#define MISC_REG_AEU_GENERAL_MASK 0xa61c +/* [RW 32] first 32b for inverting the input for function 0; for each bit: + 0= do not invert; 1= invert; mapped as follows: [0] NIG attention for + function0; [1] NIG attention for function1; [2] GPIO1 mcp; [3] GPIO2 mcp; + [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1; [7] GPIO2 function 1; + [8] GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event + function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP + Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14] + SPIO4; [15] SPIO5; [16] MSI/X indication for mcp; [17] MSI/X indication + for function 1; [18] BRB Parity error; [19] BRB Hw interrupt; [20] PRS + Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23] SRC Hw + interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26] TCM + Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29] TSEMI + Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */ +#define MISC_REG_AEU_INVERTER_1_FUNC_0 0xa22c +#define MISC_REG_AEU_INVERTER_1_FUNC_1 0xa23c +/* [RW 32] second 32b for inverting the input for function 0; for each bit: + 0= do not invert; 1= invert. mapped as follows: [0] PBClient Parity + error; [1] PBClient Hw interrupt; [2] QM Parity error; [3] QM Hw + interrupt; [4] Timers Parity error; [5] Timers Hw interrupt; [6] XSDM + Parity error; [7] XSDM Hw interrupt; [8] XCM Parity error; [9] XCM Hw + interrupt; [10] XSEMI Parity error; [11] XSEMI Hw interrupt; [12] + DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] NIG Parity + error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; [17] Vaux + PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw interrupt; + [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM Parity error; + [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI Hw interrupt; + [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM Parity error; + [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw interrupt; */ +#define MISC_REG_AEU_INVERTER_2_FUNC_0 0xa230 +#define MISC_REG_AEU_INVERTER_2_FUNC_1 0xa240 +/* [RW 10] [7:0] = mask 8 attention output signals toward IGU function0; + [9:8] = raserved. Zero = mask; one = unmask */ +#define MISC_REG_AEU_MASK_ATTN_FUNC_0 0xa060 +#define MISC_REG_AEU_MASK_ATTN_FUNC_1 0xa064 +/* [RW 1] If set a system kill occurred */ +#define MISC_REG_AEU_SYS_KILL_OCCURRED 0xa610 +/* [RW 32] Represent the status of the input vector to the AEU when a system + kill occurred. The register is reset in por reset. Mapped as follows: [0] + NIG attention for function0; [1] NIG attention for function1; [2] GPIO1 + mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1; + [7] GPIO2 function 1; [8] GPIO3 function 1; [9] GPIO4 function 1; [10] + PCIE glue/PXP VPD event function0; [11] PCIE glue/PXP VPD event + function1; [12] PCIE glue/PXP Expansion ROM event0; [13] PCIE glue/PXP + Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] MSI/X indication for + mcp; [17] MSI/X indication for function 1; [18] BRB Parity error; [19] + BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC + Parity error; [23] SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw + interrupt; [26] TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI + Parity error; [29] TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw + interrupt; */ +#define MISC_REG_AEU_SYS_KILL_STATUS_0 0xa600 +#define MISC_REG_AEU_SYS_KILL_STATUS_1 0xa604 +#define MISC_REG_AEU_SYS_KILL_STATUS_2 0xa608 +#define MISC_REG_AEU_SYS_KILL_STATUS_3 0xa60c +/* [R 4] This field indicates the type of the device. '0' - 2 Ports; '1' - 1 + Port. */ +#define MISC_REG_BOND_ID 0xa400 +/* [R 8] These bits indicate the metal revision of the chip. This value + starts at 0x00 for each all-layer tape-out and increments by one for each + tape-out. */ +#define MISC_REG_CHIP_METAL 0xa404 +/* [R 16] These bits indicate the part number for the chip. */ +#define MISC_REG_CHIP_NUM 0xa408 +/* [R 4] These bits indicate the base revision of the chip. This value + starts at 0x0 for the A0 tape-out and increments by one for each + all-layer tape-out. */ +#define MISC_REG_CHIP_REV 0xa40c +/* [RW 32] The following driver registers(1...16) represent 16 drivers and + 32 clients. Each client can be controlled by one driver only. One in each + bit represent that this driver control the appropriate client (Ex: bit 5 + is set means this driver control client number 5). addr1 = set; addr0 = + clear; read from both addresses will give the same result = status. write + to address 1 will set a request to control all the clients that their + appropriate bit (in the write command) is set. if the client is free (the + appropriate bit in all the other drivers is clear) one will be written to + that driver register; if the client isn't free the bit will remain zero. + if the appropriate bit is set (the driver request to gain control on a + client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW + interrupt will be asserted). write to address 0 will set a request to + free all the clients that their appropriate bit (in the write command) is + set. if the appropriate bit is clear (the driver request to free a client + it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will + be asserted). */ +#define MISC_REG_DRIVER_CONTROL_1 0xa510 +#define MISC_REG_DRIVER_CONTROL_7 0xa3c8 +/* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0 + only. */ +#define MISC_REG_E1HMF_MODE 0xa5f8 +/* [R 1] Status of four port mode path swap input pin. */ +#define MISC_REG_FOUR_PORT_PATH_SWAP 0xa75c +/* [RW 2] 4 port path swap overwrite.[0] - Overwrite control; if it is 0 - + the path_swap output is equal to 4 port mode path swap input pin; if it + is 1 - the path_swap output is equal to bit[1] of this register; [1] - + Overwrite value. If bit[0] of this register is 1 this is the value that + receives the path_swap output. Reset on Hard reset. */ +#define MISC_REG_FOUR_PORT_PATH_SWAP_OVWR 0xa738 +/* [R 1] Status of 4 port mode port swap input pin. */ +#define MISC_REG_FOUR_PORT_PORT_SWAP 0xa754 +/* [RW 2] 4 port port swap overwrite.[0] - Overwrite control; if it is 0 - + the port_swap output is equal to 4 port mode port swap input pin; if it + is 1 - the port_swap output is equal to bit[1] of this register; [1] - + Overwrite value. If bit[0] of this register is 1 this is the value that + receives the port_swap output. Reset on Hard reset. */ +#define MISC_REG_FOUR_PORT_PORT_SWAP_OVWR 0xa734 +/* [RW 32] Debug only: spare RW register reset by core reset */ +#define MISC_REG_GENERIC_CR_0 0xa460 +#define MISC_REG_GENERIC_CR_1 0xa464 +/* [RW 32] Debug only: spare RW register reset by por reset */ +#define MISC_REG_GENERIC_POR_1 0xa474 +/* [RW 32] Bit[0]: EPIO MODE SEL: Setting this bit to 1 will allow SW/FW to + use all of the 32 Extended GPIO pins. Without setting this bit; an EPIO + can not be configured as an output. Each output has its output enable in + the MCP register space; but this bit needs to be set to make use of that. + Bit[3:1] spare. Bit[4]: WCVTMON_PWRDN: Powerdown for Warpcore VTMON. When + set to 1 - Powerdown. Bit[5]: WCVTMON_RESETB: Reset for Warpcore VTMON. + When set to 0 - vTMON is in reset. Bit[6]: setting this bit will change + the i/o to an output and will drive the TimeSync output. Bit[31:7]: + spare. Global register. Reset by hard reset. */ +#define MISC_REG_GEN_PURP_HWG 0xa9a0 +/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of + these bits is written as a '1'; the corresponding SPIO bit will turn off + it's drivers and become an input. This is the reset state of all GPIO + pins. The read value of these bits will be a '1' if that last command + (#SET; #CLR; or #FLOAT) for this bit was a #FLOAT. (reset value 0xff). + [23-20] CLR port 1; 19-16] CLR port 0; When any of these bits is written + as a '1'; the corresponding GPIO bit will drive low. The read value of + these bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for + this bit was a #CLR. (reset value 0). [15-12] SET port 1; 11-8] port 0; + SET When any of these bits is written as a '1'; the corresponding GPIO + bit will drive high (if it has that capability). The read value of these + bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for this + bit was a #SET. (reset value 0). [7-4] VALUE port 1; [3-0] VALUE port 0; + RO; These bits indicate the read value of each of the eight GPIO pins. + This is the result value of the pin; not the drive value. Writing these + bits will have not effect. */ +#define MISC_REG_GPIO 0xa490 +/* [RW 8] These bits enable the GPIO_INTs to signals event to the + IGU/MCP.according to the following map: [0] p0_gpio_0; [1] p0_gpio_1; [2] + p0_gpio_2; [3] p0_gpio_3; [4] p1_gpio_0; [5] p1_gpio_1; [6] p1_gpio_2; + [7] p1_gpio_3; */ +#define MISC_REG_GPIO_EVENT_EN 0xa2bc +/* [RW 32] GPIO INT. [31-28] OLD_CLR port1; [27-24] OLD_CLR port0; Writing a + '1' to these bit clears the corresponding bit in the #OLD_VALUE register. + This will acknowledge an interrupt on the falling edge of corresponding + GPIO input (reset value 0). [23-16] OLD_SET [23-16] port1; OLD_SET port0; + Writing a '1' to these bit sets the corresponding bit in the #OLD_VALUE + register. This will acknowledge an interrupt on the rising edge of + corresponding SPIO input (reset value 0). [15-12] OLD_VALUE [11-8] port1; + OLD_VALUE port0; RO; These bits indicate the old value of the GPIO input + value. When the ~INT_STATE bit is set; this bit indicates the OLD value + of the pin such that if ~INT_STATE is set and this bit is '0'; then the + interrupt is due to a low to high edge. If ~INT_STATE is set and this bit + is '1'; then the interrupt is due to a high to low edge (reset value 0). + [7-4] INT_STATE port1; [3-0] INT_STATE RO port0; These bits indicate the + current GPIO interrupt state for each GPIO pin. This bit is cleared when + the appropriate #OLD_SET or #OLD_CLR command bit is written. This bit is + set when the GPIO input does not match the current value in #OLD_VALUE + (reset value 0). */ +#define MISC_REG_GPIO_INT 0xa494 +/* [R 28] this field hold the last information that caused reserved + attention. bits [19:0] - address; [22:20] function; [23] reserved; + [27:24] the master that caused the attention - according to the following + encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 = + dbu; 8 = dmae */ +#define MISC_REG_GRC_RSV_ATTN 0xa3c0 +/* [R 28] this field hold the last information that caused timeout + attention. bits [19:0] - address; [22:20] function; [23] reserved; + [27:24] the master that caused the attention - according to the following + encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 = + dbu; 8 = dmae */ +#define MISC_REG_GRC_TIMEOUT_ATTN 0xa3c4 +/* [RW 1] Setting this bit enables a timer in the GRC block to timeout any + access that does not finish within + ~misc_registers_grc_timout_val.grc_timeout_val cycles. When this bit is + cleared; this timeout is disabled. If this timeout occurs; the GRC shall + assert it attention output. */ +#define MISC_REG_GRC_TIMEOUT_EN 0xa280 +/* [RW 28] 28 LSB of LCPLL first register; reset val = 521. inside order of + the bits is: [2:0] OAC reset value 001) CML output buffer bias control; + 111 for +40%; 011 for +20%; 001 for 0%; 000 for -20%. [5:3] Icp_ctrl + (reset value 001) Charge pump current control; 111 for 720u; 011 for + 600u; 001 for 480u and 000 for 360u. [7:6] Bias_ctrl (reset value 00) + Global bias control; When bit 7 is high bias current will be 10 0gh; When + bit 6 is high bias will be 100w; Valid values are 00; 10; 01. [10:8] + Pll_observe (reset value 010) Bits to control observability. bit 10 is + for test bias; bit 9 is for test CK; bit 8 is test Vc. [12:11] Vth_ctrl + (reset value 00) Comparator threshold control. 00 for 0.6V; 01 for 0.54V + and 10 for 0.66V. [13] pllSeqStart (reset value 0) Enables VCO tuning + sequencer: 1= sequencer disabled; 0= sequencer enabled (inverted + internally). [14] reserved (reset value 0) Reset for VCO sequencer is + connected to RESET input directly. [15] capRetry_en (reset value 0) + enable retry on cap search failure (inverted). [16] freqMonitor_e (reset + value 0) bit to continuously monitor vco freq (inverted). [17] + freqDetRestart_en (reset value 0) bit to enable restart when not freq + locked (inverted). [18] freqDetRetry_en (reset value 0) bit to enable + retry on freq det failure(inverted). [19] pllForceFdone_en (reset value + 0) bit to enable pllForceFdone & pllForceFpass into pllSeq. [20] + pllForceFdone (reset value 0) bit to force freqDone. [21] pllForceFpass + (reset value 0) bit to force freqPass. [22] pllForceDone_en (reset value + 0) bit to enable pllForceCapDone. [23] pllForceCapDone (reset value 0) + bit to force capDone. [24] pllForceCapPass_en (reset value 0) bit to + enable pllForceCapPass. [25] pllForceCapPass (reset value 0) bit to force + capPass. [26] capRestart (reset value 0) bit to force cap sequencer to + restart. [27] capSelectM_en (reset value 0) bit to enable cap select + register bits. */ +#define MISC_REG_LCPLL_CTRL_1 0xa2a4 +#define MISC_REG_LCPLL_CTRL_REG_2 0xa2a8 +/* [RW 4] Interrupt mask register #0 read/write */ +#define MISC_REG_MISC_INT_MASK 0xa388 +/* [RW 1] Parity mask register #0 read/write */ +#define MISC_REG_MISC_PRTY_MASK 0xa398 +/* [R 1] Parity register #0 read */ +#define MISC_REG_MISC_PRTY_STS 0xa38c +/* [RC 1] Parity register #0 read clear */ +#define MISC_REG_MISC_PRTY_STS_CLR 0xa390 +#define MISC_REG_NIG_WOL_P0 0xa270 +#define MISC_REG_NIG_WOL_P1 0xa274 +/* [R 1] If set indicate that the pcie_rst_b was asserted without perst + assertion */ +#define MISC_REG_PCIE_HOT_RESET 0xa618 +/* [RW 32] 32 LSB of storm PLL first register; reset val = 0x 071d2911. + inside order of the bits is: [0] P1 divider[0] (reset value 1); [1] P1 + divider[1] (reset value 0); [2] P1 divider[2] (reset value 0); [3] P1 + divider[3] (reset value 0); [4] P2 divider[0] (reset value 1); [5] P2 + divider[1] (reset value 0); [6] P2 divider[2] (reset value 0); [7] P2 + divider[3] (reset value 0); [8] ph_det_dis (reset value 1); [9] + freq_det_dis (reset value 0); [10] Icpx[0] (reset value 0); [11] Icpx[1] + (reset value 1); [12] Icpx[2] (reset value 0); [13] Icpx[3] (reset value + 1); [14] Icpx[4] (reset value 0); [15] Icpx[5] (reset value 0); [16] + Rx[0] (reset value 1); [17] Rx[1] (reset value 0); [18] vc_en (reset + value 1); [19] vco_rng[0] (reset value 1); [20] vco_rng[1] (reset value + 1); [21] Kvco_xf[0] (reset value 0); [22] Kvco_xf[1] (reset value 0); + [23] Kvco_xf[2] (reset value 0); [24] Kvco_xs[0] (reset value 1); [25] + Kvco_xs[1] (reset value 1); [26] Kvco_xs[2] (reset value 1); [27] + testd_en (reset value 0); [28] testd_sel[0] (reset value 0); [29] + testd_sel[1] (reset value 0); [30] testd_sel[2] (reset value 0); [31] + testa_en (reset value 0); */ +#define MISC_REG_PLL_STORM_CTRL_1 0xa294 +#define MISC_REG_PLL_STORM_CTRL_2 0xa298 +#define MISC_REG_PLL_STORM_CTRL_3 0xa29c +#define MISC_REG_PLL_STORM_CTRL_4 0xa2a0 +/* [R 1] Status of 4 port mode enable input pin. */ +#define MISC_REG_PORT4MODE_EN 0xa750 +/* [RW 2] 4 port mode enable overwrite.[0] - Overwrite control; if it is 0 - + * the port4mode_en output is equal to 4 port mode input pin; if it is 1 - + * the port4mode_en output is equal to bit[1] of this register; [1] - + * Overwrite value. If bit[0] of this register is 1 this is the value that + * receives the port4mode_en output . */ +#define MISC_REG_PORT4MODE_EN_OVWR 0xa720 +/* [RW 32] reset reg#2; rite/read one = the specific block is out of reset; + write/read zero = the specific block is in reset; addr 0-wr- the write + value will be written to the register; addr 1-set - one will be written + to all the bits that have the value of one in the data written (bits that + have the value of zero will not be change) ; addr 2-clear - zero will be + written to all the bits that have the value of one in the data written + (bits that have the value of zero will not be change); addr 3-ignore; + read ignore from all addr except addr 00; inside order of the bits is: + [0] rst_bmac0; [1] rst_bmac1; [2] rst_emac0; [3] rst_emac1; [4] rst_grc; + [5] rst_mcp_n_reset_reg_hard_core; [6] rst_ mcp_n_hard_core_rst_b; [7] + rst_ mcp_n_reset_cmn_cpu; [8] rst_ mcp_n_reset_cmn_core; [9] rst_rbcn; + [10] rst_dbg; [11] rst_misc_core; [12] rst_dbue (UART); [13] + Pci_resetmdio_n; [14] rst_emac0_hard_core; [15] rst_emac1_hard_core; 16] + rst_pxp_rq_rd_wr; 31:17] reserved */ +#define MISC_REG_RESET_REG_2 0xa590 +/* [RW 20] 20 bit GRC address where the scratch-pad of the MCP that is + shared with the driver resides */ +#define MISC_REG_SHARED_MEM_ADDR 0xa2b4 +/* [RW 32] SPIO. [31-24] FLOAT When any of these bits is written as a '1'; + the corresponding SPIO bit will turn off it's drivers and become an + input. This is the reset state of all SPIO pins. The read value of these + bits will be a '1' if that last command (#SET; #CL; or #FLOAT) for this + bit was a #FLOAT. (reset value 0xff). [23-16] CLR When any of these bits + is written as a '1'; the corresponding SPIO bit will drive low. The read + value of these bits will be a '1' if that last command (#SET; #CLR; or +#FLOAT) for this bit was a #CLR. (reset value 0). [15-8] SET When any of + these bits is written as a '1'; the corresponding SPIO bit will drive + high (if it has that capability). The read value of these bits will be a + '1' if that last command (#SET; #CLR; or #FLOAT) for this bit was a #SET. + (reset value 0). [7-0] VALUE RO; These bits indicate the read value of + each of the eight SPIO pins. This is the result value of the pin; not the + drive value. Writing these bits will have not effect. Each 8 bits field + is divided as follows: [0] VAUX Enable; when pulsed low; enables supply + from VAUX. (This is an output pin only; the FLOAT field is not applicable + for this pin); [1] VAUX Disable; when pulsed low; disables supply form + VAUX. (This is an output pin only; FLOAT field is not applicable for this + pin); [2] SEL_VAUX_B - Control to power switching logic. Drive low to + select VAUX supply. (This is an output pin only; it is not controlled by + the SET and CLR fields; it is controlled by the Main Power SM; the FLOAT + field is not applicable for this pin; only the VALUE fields is relevant - + it reflects the output value); [3] port swap [4] spio_4; [5] spio_5; [6] + Bit 0 of UMP device ID select; read by UMP firmware; [7] Bit 1 of UMP + device ID select; read by UMP firmware. */ +#define MISC_REG_SPIO 0xa4fc +/* [RW 8] These bits enable the SPIO_INTs to signals event to the IGU/MC. + according to the following map: [3:0] reserved; [4] spio_4 [5] spio_5; + [7:0] reserved */ +#define MISC_REG_SPIO_EVENT_EN 0xa2b8 +/* [RW 32] SPIO INT. [31-24] OLD_CLR Writing a '1' to these bit clears the + corresponding bit in the #OLD_VALUE register. This will acknowledge an + interrupt on the falling edge of corresponding SPIO input (reset value + 0). [23-16] OLD_SET Writing a '1' to these bit sets the corresponding bit + in the #OLD_VALUE register. This will acknowledge an interrupt on the + rising edge of corresponding SPIO input (reset value 0). [15-8] OLD_VALUE + RO; These bits indicate the old value of the SPIO input value. When the + ~INT_STATE bit is set; this bit indicates the OLD value of the pin such + that if ~INT_STATE is set and this bit is '0'; then the interrupt is due + to a low to high edge. If ~INT_STATE is set and this bit is '1'; then the + interrupt is due to a high to low edge (reset value 0). [7-0] INT_STATE + RO; These bits indicate the current SPIO interrupt state for each SPIO + pin. This bit is cleared when the appropriate #OLD_SET or #OLD_CLR + command bit is written. This bit is set when the SPIO input does not + match the current value in #OLD_VALUE (reset value 0). */ +#define MISC_REG_SPIO_INT 0xa500 +/* [RW 32] reload value for counter 4 if reload; the value will be reload if + the counter reached zero and the reload bit + (~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */ +#define MISC_REG_SW_TIMER_RELOAD_VAL_4 0xa2fc +/* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses + in this register. address 0 - timer 1; address 1 - timer 2, ... address 7 - + timer 8 */ +#define MISC_REG_SW_TIMER_VAL 0xa5c0 +/* [R 1] Status of two port mode path swap input pin. */ +#define MISC_REG_TWO_PORT_PATH_SWAP 0xa758 +/* [RW 2] 2 port swap overwrite.[0] - Overwrite control; if it is 0 - the + path_swap output is equal to 2 port mode path swap input pin; if it is 1 + - the path_swap output is equal to bit[1] of this register; [1] - + Overwrite value. If bit[0] of this register is 1 this is the value that + receives the path_swap output. Reset on Hard reset. */ +#define MISC_REG_TWO_PORT_PATH_SWAP_OVWR 0xa72c +/* [RW 1] Set by the MCP to remember if one or more of the drivers is/are + loaded; 0-prepare; -unprepare */ +#define MISC_REG_UNPREPARED 0xa424 +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST (0x1<<0) +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST (0x1<<1) +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4) +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2) +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3) +/* [RW 5] MDIO PHY Address. The WC uses this address to determine whether or + * not it is the recipient of the message on the MDIO interface. The value + * is compared to the value on ctrl_md_devad. Drives output + * misc_xgxs0_phy_addr. Global register. */ +#define MISC_REG_WC0_CTRL_PHY_ADDR 0xa9cc +/* [RW 2] XMAC Core port mode. Indicates the number of ports on the system + side. This should be less than or equal to phy_port_mode; if some of the + ports are not used. This enables reduction of frequency on the core side. + This is a strap input for the XMAC_MP core. 00 - Single Port Mode; 01 - + Dual Port Mode; 10 - Tri Port Mode; 11 - Quad Port Mode. This is a strap + input for the XMAC_MP core; and should be changed only while reset is + held low. Reset on Hard reset. */ +#define MISC_REG_XMAC_CORE_PORT_MODE 0xa964 +/* [RW 2] XMAC PHY port mode. Indicates the number of ports on the Warp + Core. This is a strap input for the XMAC_MP core. 00 - Single Port Mode; + 01 - Dual Port Mode; 1x - Quad Port Mode; This is a strap input for the + XMAC_MP core; and should be changed only while reset is held low. Reset + on Hard reset. */ +#define MISC_REG_XMAC_PHY_PORT_MODE 0xa960 +/* [RW 32] 1 [47] Packet Size = 64 Write to this register write bits 31:0. + * Reads from this register will clear bits 31:0. */ +#define MSTAT_REG_RX_STAT_GR64_LO 0x200 +/* [RW 32] 1 [00] Tx Good Packet Count Write to this register write bits + * 31:0. Reads from this register will clear bits 31:0. */ +#define MSTAT_REG_TX_STAT_GTXPOK_LO 0 +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST (0x1<<0) +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST (0x1<<1) +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4) +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2) +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3) +#define NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN (0x1<<0) +#define NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN (0x1<<0) +#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT (0x1<<0) +#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS (0x1<<9) +#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G (0x1<<15) +#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS (0xf<<18) +/* [RW 1] Input enable for RX_BMAC0 IF */ +#define NIG_REG_BMAC0_IN_EN 0x100ac +/* [RW 1] output enable for TX_BMAC0 IF */ +#define NIG_REG_BMAC0_OUT_EN 0x100e0 +/* [RW 1] output enable for TX BMAC pause port 0 IF */ +#define NIG_REG_BMAC0_PAUSE_OUT_EN 0x10110 +/* [RW 1] output enable for RX_BMAC0_REGS IF */ +#define NIG_REG_BMAC0_REGS_OUT_EN 0x100e8 +/* [RW 1] output enable for RX BRB1 port0 IF */ +#define NIG_REG_BRB0_OUT_EN 0x100f8 +/* [RW 1] Input enable for TX BRB1 pause port 0 IF */ +#define NIG_REG_BRB0_PAUSE_IN_EN 0x100c4 +/* [RW 1] output enable for RX BRB1 port1 IF */ +#define NIG_REG_BRB1_OUT_EN 0x100fc +/* [RW 1] Input enable for TX BRB1 pause port 1 IF */ +#define NIG_REG_BRB1_PAUSE_IN_EN 0x100c8 +/* [RW 1] output enable for RX BRB1 LP IF */ +#define NIG_REG_BRB_LB_OUT_EN 0x10100 +/* [WB_W 82] Debug packet to LP from RBC; Data spelling:[63:0] data; 64] + error; [67:65]eop_bvalid; [68]eop; [69]sop; [70]port_id; 71]flush; + 72:73]-vnic_num; 81:74]-sideband_info */ +#define NIG_REG_DEBUG_PACKET_LB 0x10800 +/* [RW 1] Input enable for TX Debug packet */ +#define NIG_REG_EGRESS_DEBUG_IN_EN 0x100dc +/* [RW 1] If 1 - egress drain mode for port0 is active. In this mode all + packets from PBFare not forwarded to the MAC and just deleted from FIFO. + First packet may be deleted from the middle. And last packet will be + always deleted till the end. */ +#define NIG_REG_EGRESS_DRAIN0_MODE 0x10060 +/* [RW 1] Output enable to EMAC0 */ +#define NIG_REG_EGRESS_EMAC0_OUT_EN 0x10120 +/* [RW 1] MAC configuration for packets of port0. If 1 - all packet outputs + to emac for port0; other way to bmac for port0 */ +#define NIG_REG_EGRESS_EMAC0_PORT 0x10058 +/* [RW 1] Input enable for TX PBF user packet port0 IF */ +#define NIG_REG_EGRESS_PBF0_IN_EN 0x100cc +/* [RW 1] Input enable for TX PBF user packet port1 IF */ +#define NIG_REG_EGRESS_PBF1_IN_EN 0x100d0 +/* [RW 1] Input enable for TX UMP management packet port0 IF */ +#define NIG_REG_EGRESS_UMP0_IN_EN 0x100d4 +/* [RW 1] Input enable for RX_EMAC0 IF */ +#define NIG_REG_EMAC0_IN_EN 0x100a4 +/* [RW 1] output enable for TX EMAC pause port 0 IF */ +#define NIG_REG_EMAC0_PAUSE_OUT_EN 0x10118 +/* [R 1] status from emac0. This bit is set when MDINT from either the + EXT_MDINT pin or from the Copper PHY is driven low. This condition must + be cleared in the attached PHY device that is driving the MINT pin. */ +#define NIG_REG_EMAC0_STATUS_MISC_MI_INT 0x10494 +/* [WB 48] This address space contains BMAC0 registers. The BMAC registers + are described in appendix A. In order to access the BMAC0 registers; the + base address; NIG_REGISTERS_INGRESS_BMAC0_MEM; Offset: 0x10c00; should be + added to each BMAC register offset */ +#define NIG_REG_INGRESS_BMAC0_MEM 0x10c00 +/* [WB 48] This address space contains BMAC1 registers. The BMAC registers + are described in appendix A. In order to access the BMAC0 registers; the + base address; NIG_REGISTERS_INGRESS_BMAC1_MEM; Offset: 0x11000; should be + added to each BMAC register offset */ +#define NIG_REG_INGRESS_BMAC1_MEM 0x11000 +/* [R 1] FIFO empty in EOP descriptor FIFO of LP in NIG_RX_EOP */ +#define NIG_REG_INGRESS_EOP_LB_EMPTY 0x104e0 +/* [RW 17] Debug only. RX_EOP_DSCR_lb_FIFO in NIG_RX_EOP. Data + packet_length[13:0]; mac_error[14]; trunc_error[15]; parity[16] */ +#define NIG_REG_INGRESS_EOP_LB_FIFO 0x104e4 +/* [RW 27] 0 - must be active for Everest A0; 1- for Everest B0 when latch + logic for interrupts must be used. Enable per bit of interrupt of + ~latch_status.latch_status */ +#define NIG_REG_LATCH_BC_0 0x16210 +/* [RW 27] Latch for each interrupt from Unicore.b[0] + status_emac0_misc_mi_int; b[1] status_emac0_misc_mi_complete; + b[2]status_emac0_misc_cfg_change; b[3]status_emac0_misc_link_status; + b[4]status_emac0_misc_link_change; b[5]status_emac0_misc_attn; + b[6]status_serdes0_mac_crs; b[7]status_serdes0_autoneg_complete; + b[8]status_serdes0_fiber_rxact; b[9]status_serdes0_link_status; + b[10]status_serdes0_mr_page_rx; b[11]status_serdes0_cl73_an_complete; + b[12]status_serdes0_cl73_mr_page_rx; b[13]status_serdes0_rx_sigdet; + b[14]status_xgxs0_remotemdioreq; b[15]status_xgxs0_link10g; + b[16]status_xgxs0_autoneg_complete; b[17]status_xgxs0_fiber_rxact; + b[21:18]status_xgxs0_link_status; b[22]status_xgxs0_mr_page_rx; + b[23]status_xgxs0_cl73_an_complete; b[24]status_xgxs0_cl73_mr_page_rx; + b[25]status_xgxs0_rx_sigdet; b[26]status_xgxs0_mac_crs */ +#define NIG_REG_LATCH_STATUS_0 0x18000 +/* [RW 1] led 10g for port 0 */ +#define NIG_REG_LED_10G_P0 0x10320 +/* [RW 1] led 10g for port 1 */ +#define NIG_REG_LED_10G_P1 0x10324 +/* [RW 1] Port0: This bit is set to enable the use of the + ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 field + defined below. If this bit is cleared; then the blink rate will be about + 8Hz. */ +#define NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 0x10318 +/* [RW 12] Port0: Specifies the period of each blink cycle (on + off) for + Traffic LED in milliseconds. Must be a non-zero value. This 12-bit field + is reset to 0x080; giving a default blink period of approximately 8Hz. */ +#define NIG_REG_LED_CONTROL_BLINK_RATE_P0 0x10310 +/* [RW 1] Port0: If set along with the + ~nig_registers_led_control_override_traffic_p0.led_control_override_traffic_p0 + bit and ~nig_registers_led_control_traffic_p0.led_control_traffic_p0 LED + bit; the Traffic LED will blink with the blink rate specified in + ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and + ~nig_registers_led_control_blink_rate_ena_p0.led_control_blink_rate_ena_p0 + fields. */ +#define NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 0x10308 +/* [RW 1] Port0: If set overrides hardware control of the Traffic LED. The + Traffic LED will then be controlled via bit ~nig_registers_ + led_control_traffic_p0.led_control_traffic_p0 and bit + ~nig_registers_led_control_blink_traffic_p0.led_control_blink_traffic_p0 */ +#define NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 0x102f8 +/* [RW 1] Port0: If set along with the led_control_override_trafic_p0 bit; + turns on the Traffic LED. If the led_control_blink_traffic_p0 bit is also + set; the LED will blink with blink rate specified in + ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and + ~nig_regsters_led_control_blink_rate_ena_p0.led_control_blink_rate_ena_p0 + fields. */ +#define NIG_REG_LED_CONTROL_TRAFFIC_P0 0x10300 +/* [RW 4] led mode for port0: 0 MAC; 1-3 PHY1; 4 MAC2; 5-7 PHY4; 8-MAC3; + 9-11PHY7; 12 MAC4; 13-15 PHY10; */ +#define NIG_REG_LED_MODE_P0 0x102f0 +/* [RW 3] for port0 enable for llfc ppp and pause. b0 - brb1 enable; b1- + tsdm enable; b2- usdm enable */ +#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 0x16070 +#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 0x16074 +/* [RW 1] SAFC enable for port0. This register may get 1 only when + ~ppp_enable.ppp_enable = 0 and pause_enable.pause_enable =0 for the same + port */ +#define NIG_REG_LLFC_ENABLE_0 0x16208 +#define NIG_REG_LLFC_ENABLE_1 0x1620c +/* [RW 16] classes are high-priority for port0 */ +#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0 0x16058 +#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 0x1605c +/* [RW 16] classes are low-priority for port0 */ +#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0 0x16060 +#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 0x16064 +/* [RW 1] Output enable of message to LLFC BMAC IF for port0 */ +#define NIG_REG_LLFC_OUT_EN_0 0x160c8 +#define NIG_REG_LLFC_OUT_EN_1 0x160cc +#define NIG_REG_LLH0_ACPI_PAT_0_CRC 0x1015c +#define NIG_REG_LLH0_ACPI_PAT_6_LEN 0x10154 +#define NIG_REG_LLH0_BRB1_DRV_MASK 0x10244 +#define NIG_REG_LLH0_BRB1_DRV_MASK_MF 0x16048 +/* [RW 1] send to BRB1 if no match on any of RMP rules. */ +#define NIG_REG_LLH0_BRB1_NOT_MCP 0x1025c +/* [RW 2] Determine the classification participants. 0: no classification.1: + classification upon VLAN id. 2: classification upon MAC address. 3: + classification upon both VLAN id & MAC addr. */ +#define NIG_REG_LLH0_CLS_TYPE 0x16080 +/* [RW 32] cm header for llh0 */ +#define NIG_REG_LLH0_CM_HEADER 0x1007c +#define NIG_REG_LLH0_DEST_IP_0_1 0x101dc +#define NIG_REG_LLH0_DEST_MAC_0_0 0x101c0 +/* [RW 16] destination TCP address 1. The LLH will look for this address in + all incoming packets. */ +#define NIG_REG_LLH0_DEST_TCP_0 0x10220 +/* [RW 16] destination UDP address 1 The LLH will look for this address in + all incoming packets. */ +#define NIG_REG_LLH0_DEST_UDP_0 0x10214 +#define NIG_REG_LLH0_ERROR_MASK 0x1008c +/* [RW 8] event id for llh0 */ +#define NIG_REG_LLH0_EVENT_ID 0x10084 +#define NIG_REG_LLH0_FUNC_EN 0x160fc +#define NIG_REG_LLH0_FUNC_MEM 0x16180 +#define NIG_REG_LLH0_FUNC_MEM_ENABLE 0x16140 +#define NIG_REG_LLH0_FUNC_VLAN_ID 0x16100 +/* [RW 1] Determine the IP version to look for in + ~nig_registers_llh0_dest_ip_0.llh0_dest_ip_0. 0 - IPv6; 1-IPv4 */ +#define NIG_REG_LLH0_IPV4_IPV6_0 0x10208 +/* [RW 1] t bit for llh0 */ +#define NIG_REG_LLH0_T_BIT 0x10074 +/* [RW 12] VLAN ID 1. In case of VLAN packet the LLH will look for this ID. */ +#define NIG_REG_LLH0_VLAN_ID_0 0x1022c +/* [RW 8] init credit counter for port0 in LLH */ +#define NIG_REG_LLH0_XCM_INIT_CREDIT 0x10554 +#define NIG_REG_LLH0_XCM_MASK 0x10130 +#define NIG_REG_LLH1_BRB1_DRV_MASK 0x10248 +/* [RW 1] send to BRB1 if no match on any of RMP rules. */ +#define NIG_REG_LLH1_BRB1_NOT_MCP 0x102dc +/* [RW 2] Determine the classification participants. 0: no classification.1: + classification upon VLAN id. 2: classification upon MAC address. 3: + classification upon both VLAN id & MAC addr. */ +#define NIG_REG_LLH1_CLS_TYPE 0x16084 +/* [RW 32] cm header for llh1 */ +#define NIG_REG_LLH1_CM_HEADER 0x10080 +#define NIG_REG_LLH1_ERROR_MASK 0x10090 +/* [RW 8] event id for llh1 */ +#define NIG_REG_LLH1_EVENT_ID 0x10088 +#define NIG_REG_LLH1_FUNC_MEM 0x161c0 +#define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160 +#define NIG_REG_LLH1_FUNC_MEM_SIZE 16 +/* [RW 1] When this bit is set; the LLH will classify the packet before + * sending it to the BRB or calculating WoL on it. This bit controls port 1 + * only. The legacy llh_multi_function_mode bit controls port 0. */ +#define NIG_REG_LLH1_MF_MODE 0x18614 +/* [RW 8] init credit counter for port1 in LLH */ +#define NIG_REG_LLH1_XCM_INIT_CREDIT 0x10564 +#define NIG_REG_LLH1_XCM_MASK 0x10134 +/* [RW 1] When this bit is set; the LLH will expect all packets to be with + e1hov */ +#define NIG_REG_LLH_E1HOV_MODE 0x160d8 +/* [RW 1] When this bit is set; the LLH will classify the packet before + sending it to the BRB or calculating WoL on it. */ +#define NIG_REG_LLH_MF_MODE 0x16024 +#define NIG_REG_MASK_INTERRUPT_PORT0 0x10330 +#define NIG_REG_MASK_INTERRUPT_PORT1 0x10334 +/* [RW 1] Output signal from NIG to EMAC0. When set enables the EMAC0 block. */ +#define NIG_REG_NIG_EMAC0_EN 0x1003c +/* [RW 1] Output signal from NIG to EMAC1. When set enables the EMAC1 block. */ +#define NIG_REG_NIG_EMAC1_EN 0x10040 +/* [RW 1] Output signal from NIG to TX_EMAC0. When set indicates to the + EMAC0 to strip the CRC from the ingress packets. */ +#define NIG_REG_NIG_INGRESS_EMAC0_NO_CRC 0x10044 +/* [R 32] Interrupt register #0 read */ +#define NIG_REG_NIG_INT_STS_0 0x103b0 +#define NIG_REG_NIG_INT_STS_1 0x103c0 +/* [R 32] Legacy E1 and E1H location for parity error mask register. */ +#define NIG_REG_NIG_PRTY_MASK 0x103dc +/* [RW 32] Parity mask register #0 read/write */ +#define NIG_REG_NIG_PRTY_MASK_0 0x183c8 +#define NIG_REG_NIG_PRTY_MASK_1 0x183d8 +/* [R 32] Legacy E1 and E1H location for parity error status register. */ +#define NIG_REG_NIG_PRTY_STS 0x103d0 +/* [R 32] Parity register #0 read */ +#define NIG_REG_NIG_PRTY_STS_0 0x183bc +#define NIG_REG_NIG_PRTY_STS_1 0x183cc +/* [R 32] Legacy E1 and E1H location for parity error status clear register. */ +#define NIG_REG_NIG_PRTY_STS_CLR 0x103d4 +/* [RC 32] Parity register #0 read clear */ +#define NIG_REG_NIG_PRTY_STS_CLR_0 0x183c0 +#define NIG_REG_NIG_PRTY_STS_CLR_1 0x183d0 +#define MCPR_IMC_COMMAND_ENABLE (1L<<31) +#define MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT 16 +#define MCPR_IMC_COMMAND_OPERATION_BITSHIFT 28 +#define MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT 8 +/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic + * Ethernet header. */ +#define NIG_REG_P0_HDRS_AFTER_BASIC 0x18038 +/* [RW 1] HW PFC enable bit. Set this bit to enable the PFC functionality in + * the NIG. Other flow control modes such as PAUSE and SAFC/LLFC should be + * disabled when this bit is set. */ +#define NIG_REG_P0_HWPFC_ENABLE 0x18078 +#define NIG_REG_P0_LLH_FUNC_MEM2 0x18480 +#define NIG_REG_P0_LLH_FUNC_MEM2_ENABLE 0x18440 +/* [RW 1] Input enable for RX MAC interface. */ +#define NIG_REG_P0_MAC_IN_EN 0x185ac +/* [RW 1] Output enable for TX MAC interface */ +#define NIG_REG_P0_MAC_OUT_EN 0x185b0 +/* [RW 1] Output enable for TX PAUSE signal to the MAC. */ +#define NIG_REG_P0_MAC_PAUSE_OUT_EN 0x185b4 +/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for + * future expansion) each priorty is to be mapped to. Bits 3:0 specify the + * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit + * priority field is extracted from the outer-most VLAN in receive packet. + * Only COS 0 and COS 1 are supported in E2. */ +#define NIG_REG_P0_PKT_PRIORITY_TO_COS 0x18054 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A + * priority is mapped to COS 0 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P0_RX_COS0_PRIORITY_MASK 0x18058 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A + * priority is mapped to COS 1 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P0_RX_COS1_PRIORITY_MASK 0x1805c +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A + * priority is mapped to COS 2 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P0_RX_COS2_PRIORITY_MASK 0x186b0 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 3. A + * priority is mapped to COS 3 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P0_RX_COS3_PRIORITY_MASK 0x186b4 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 4. A + * priority is mapped to COS 4 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P0_RX_COS4_PRIORITY_MASK 0x186b8 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 5. A + * priority is mapped to COS 5 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P0_RX_COS5_PRIORITY_MASK 0x186bc +/* [R 1] RX FIFO for receiving data from MAC is empty. */ +/* [RW 15] Specify which of the credit registers the client is to be mapped + * to. Bits[2:0] are for client 0; bits [14:12] are for client 4. For + * clients that are not subject to WFQ credit blocking - their + * specifications here are not used. */ +#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP 0x180f0 +/* [RW 32] Specify which of the credit registers the client is to be mapped + * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are + * for client 0; bits [35:32] are for client 8. For clients that are not + * subject to WFQ credit blocking - their specifications here are not used. + * This is a new register (with 2_) added in E3 B0 to accommodate the 9 + * input clients to ETS arbiter. The reset default is set for management and + * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to + * use credit registers 0-5 respectively (0x543210876). Note that credit + * registers can not be shared between clients. */ +#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB 0x18688 +/* [RW 4] Specify which of the credit registers the client is to be mapped + * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are + * for client 0; bits [35:32] are for client 8. For clients that are not + * subject to WFQ credit blocking - their specifications here are not used. + * This is a new register (with 2_) added in E3 B0 to accommodate the 9 + * input clients to ETS arbiter. The reset default is set for management and + * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to + * use credit registers 0-5 respectively (0x543210876). Note that credit + * registers can not be shared between clients. */ +#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB 0x1868c +/* [RW 5] Specify whether the client competes directly in the strict + * priority arbiter. The bits are mapped according to client ID (client IDs + * are defined in tx_arb_priority_client). Default value is set to enable + * strict priorities for clients 0-2 -- management and debug traffic. */ +#define NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT 0x180e8 +/* [RW 5] Specify whether the client is subject to WFQ credit blocking. The + * bits are mapped according to client ID (client IDs are defined in + * tx_arb_priority_client). Default value is 0 for not using WFQ credit + * blocking. */ +#define NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x180ec +/* [RW 32] Specify the upper bound that credit register 0 is allowed to + * reach. */ +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0 0x1810c +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1 0x18110 +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2 0x18114 +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3 0x18118 +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4 0x1811c +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5 0x186a0 +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6 0x186a4 +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7 0x186a8 +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8 0x186ac +/* [RW 32] Specify the weight (in bytes) to be added to credit register 0 + * when it is time to increment. */ +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0 0x180f8 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1 0x180fc +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2 0x18100 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3 0x18104 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4 0x18108 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5 0x18690 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6 0x18694 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7 0x18698 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8 0x1869c +/* [RW 12] Specify the number of strict priority arbitration slots between + * two round-robin arbitration slots to avoid starvation. A value of 0 means + * no strict priority cycles - the strict priority with anti-starvation + * arbiter becomes a round-robin arbiter. */ +#define NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS 0x180f4 +/* [RW 15] Specify the client number to be assigned to each priority of the + * strict priority arbiter. Priority 0 is the highest priority. Bits [2:0] + * are for priority 0 client; bits [14:12] are for priority 4 client. The + * clients are assigned the following IDs: 0-management; 1-debug traffic + * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1 + * traffic. The reset value[14:0] is set to 0x4688 (15'b100_011_010_001_000) + * for management at priority 0; debug traffic at priorities 1 and 2; COS0 + * traffic at priority 3; and COS1 traffic at priority 4. */ +#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT 0x180e4 +/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic + * Ethernet header. */ +#define NIG_REG_P1_HDRS_AFTER_BASIC 0x1818c +#define NIG_REG_P1_LLH_FUNC_MEM2 0x184c0 +#define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE 0x18460 +/* [RW 32] Specify the client number to be assigned to each priority of the + * strict priority arbiter. This register specifies bits 31:0 of the 36-bit + * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 + * client; bits [35-32] are for priority 8 client. The clients are assigned + * the following IDs: 0-management; 1-debug traffic from this port; 2-debug + * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic; + * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is + * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to + * accommodate the 9 input clients to ETS arbiter. */ +#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB 0x18680 +/* [RW 4] Specify the client number to be assigned to each priority of the + * strict priority arbiter. This register specifies bits 35:32 of the 36-bit + * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 + * client; bits [35-32] are for priority 8 client. The clients are assigned + * the following IDs: 0-management; 1-debug traffic from this port; 2-debug + * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic; + * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is + * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to + * accommodate the 9 input clients to ETS arbiter. */ +#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB 0x18684 +#define NIG_REG_P1_MAC_IN_EN 0x185c0 +/* [RW 1] Output enable for TX MAC interface */ +#define NIG_REG_P1_MAC_OUT_EN 0x185c4 +/* [RW 1] Output enable for TX PAUSE signal to the MAC. */ +#define NIG_REG_P1_MAC_PAUSE_OUT_EN 0x185c8 +/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for + * future expansion) each priorty is to be mapped to. Bits 3:0 specify the + * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit + * priority field is extracted from the outer-most VLAN in receive packet. + * Only COS 0 and COS 1 are supported in E2. */ +#define NIG_REG_P1_PKT_PRIORITY_TO_COS 0x181a8 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A + * priority is mapped to COS 0 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P1_RX_COS0_PRIORITY_MASK 0x181ac +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A + * priority is mapped to COS 1 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P1_RX_COS1_PRIORITY_MASK 0x181b0 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A + * priority is mapped to COS 2 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P1_RX_COS2_PRIORITY_MASK 0x186f8 +/* [R 1] RX FIFO for receiving data from MAC is empty. */ +#define NIG_REG_P1_RX_MACFIFO_EMPTY 0x1858c +/* [R 1] TLLH FIFO is empty. */ +#define NIG_REG_P1_TLLH_FIFO_EMPTY 0x18338 +/* [RW 32] Specify which of the credit registers the client is to be mapped + * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are + * for client 0; bits [35:32] are for client 8. For clients that are not + * subject to WFQ credit blocking - their specifications here are not used. + * This is a new register (with 2_) added in E3 B0 to accommodate the 9 + * input clients to ETS arbiter. The reset default is set for management and + * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to + * use credit registers 0-5 respectively (0x543210876). Note that credit + * registers can not be shared between clients. Note also that there are + * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only + * credit registers 0-5 are valid. This register should be configured + * appropriately before enabling WFQ. */ +#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB 0x186e8 +/* [RW 4] Specify which of the credit registers the client is to be mapped + * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are + * for client 0; bits [35:32] are for client 8. For clients that are not + * subject to WFQ credit blocking - their specifications here are not used. + * This is a new register (with 2_) added in E3 B0 to accommodate the 9 + * input clients to ETS arbiter. The reset default is set for management and + * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to + * use credit registers 0-5 respectively (0x543210876). Note that credit + * registers can not be shared between clients. Note also that there are + * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only + * credit registers 0-5 are valid. This register should be configured + * appropriately before enabling WFQ. */ +#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB 0x186ec +/* [RW 9] Specify whether the client competes directly in the strict + * priority arbiter. The bits are mapped according to client ID (client IDs + * are defined in tx_arb_priority_client2): 0-management; 1-debug traffic + * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1 + * traffic; 5-COS2 traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. + * Default value is set to enable strict priorities for all clients. */ +#define NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT 0x18234 +/* [RW 9] Specify whether the client is subject to WFQ credit blocking. The + * bits are mapped according to client ID (client IDs are defined in + * tx_arb_priority_client2): 0-management; 1-debug traffic from this port; + * 2-debug traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 + * traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. Default value is + * 0 for not using WFQ credit blocking. */ +#define NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x18238 +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 0x18258 +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 0x1825c +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 0x18260 +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 0x18264 +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 0x18268 +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 0x186f4 +/* [RW 32] Specify the weight (in bytes) to be added to credit register 0 + * when it is time to increment. */ +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 0x18244 +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 0x18248 +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 0x1824c +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 0x18250 +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 0x18254 +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 0x186f0 +/* [RW 12] Specify the number of strict priority arbitration slots between + two round-robin arbitration slots to avoid starvation. A value of 0 means + no strict priority cycles - the strict priority with anti-starvation + arbiter becomes a round-robin arbiter. */ +#define NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS 0x18240 +/* [RW 32] Specify the client number to be assigned to each priority of the + strict priority arbiter. This register specifies bits 31:0 of the 36-bit + value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 + client; bits [35-32] are for priority 8 client. The clients are assigned + the following IDs: 0-management; 1-debug traffic from this port; 2-debug + traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic; + 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is + set to 0x345678021. This is a new register (with 2_) added in E3 B0 to + accommodate the 9 input clients to ETS arbiter. Note that this register + is the same as the one for port 0, except that port 1 only has COS 0-2 + traffic. There is no traffic for COS 3-5 of port 1. */ +#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB 0x186e0 +/* [RW 4] Specify the client number to be assigned to each priority of the + strict priority arbiter. This register specifies bits 35:32 of the 36-bit + value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 + client; bits [35-32] are for priority 8 client. The clients are assigned + the following IDs: 0-management; 1-debug traffic from this port; 2-debug + traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic; + 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is + set to 0x345678021. This is a new register (with 2_) added in E3 B0 to + accommodate the 9 input clients to ETS arbiter. Note that this register + is the same as the one for port 0, except that port 1 only has COS 0-2 + traffic. There is no traffic for COS 3-5 of port 1. */ +#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB 0x186e4 +/* [R 1] TX FIFO for transmitting data to MAC is empty. */ +#define NIG_REG_P1_TX_MACFIFO_EMPTY 0x18594 +/* [R 1] FIFO empty status of the MCP TX FIFO used for storing MCP packets + forwarded to the host. */ +#define NIG_REG_P1_TX_MNG_HOST_FIFO_EMPTY 0x182b8 +/* [RW 32] Specify the upper bound that credit register 0 is allowed to + * reach. */ +/* [RW 1] Pause enable for port0. This register may get 1 only when + ~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same + port */ +#define NIG_REG_PAUSE_ENABLE_0 0x160c0 +#define NIG_REG_PAUSE_ENABLE_1 0x160c4 +/* [RW 1] Input enable for RX PBF LP IF */ +#define NIG_REG_PBF_LB_IN_EN 0x100b4 +/* [RW 1] Value of this register will be transmitted to port swap when + ~nig_registers_strap_override.strap_override =1 */ +#define NIG_REG_PORT_SWAP 0x10394 +/* [RW 1] PPP enable for port0. This register may get 1 only when + * ~safc_enable.safc_enable = 0 and pause_enable.pause_enable =0 for the + * same port */ +#define NIG_REG_PPP_ENABLE_0 0x160b0 +#define NIG_REG_PPP_ENABLE_1 0x160b4 +/* [RW 1] output enable for RX parser descriptor IF */ +#define NIG_REG_PRS_EOP_OUT_EN 0x10104 +/* [RW 1] Input enable for RX parser request IF */ +#define NIG_REG_PRS_REQ_IN_EN 0x100b8 +/* [RW 5] control to serdes - CL45 DEVAD */ +#define NIG_REG_SERDES0_CTRL_MD_DEVAD 0x10370 +/* [RW 1] control to serdes; 0 - clause 45; 1 - clause 22 */ +#define NIG_REG_SERDES0_CTRL_MD_ST 0x1036c +/* [RW 5] control to serdes - CL22 PHY_ADD and CL45 PRTAD */ +#define NIG_REG_SERDES0_CTRL_PHY_ADDR 0x10374 +/* [R 1] status from serdes0 that inputs to interrupt logic of link status */ +#define NIG_REG_SERDES0_STATUS_LINK_STATUS 0x10578 +/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure + for port0 */ +#define NIG_REG_STAT0_BRB_DISCARD 0x105f0 +/* [R 32] Rx statistics : In user packets truncated due to BRB backpressure + for port0 */ +#define NIG_REG_STAT0_BRB_TRUNCATE 0x105f8 +/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that + between 1024 and 1522 bytes for port0 */ +#define NIG_REG_STAT0_EGRESS_MAC_PKT0 0x10750 +/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that + between 1523 bytes and above for port0 */ +#define NIG_REG_STAT0_EGRESS_MAC_PKT1 0x10760 +/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure + for port1 */ +#define NIG_REG_STAT1_BRB_DISCARD 0x10628 +/* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that + between 1024 and 1522 bytes for port1 */ +#define NIG_REG_STAT1_EGRESS_MAC_PKT0 0x107a0 +/* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that + between 1523 bytes and above for port1 */ +#define NIG_REG_STAT1_EGRESS_MAC_PKT1 0x107b0 +/* [WB_R 64] Rx statistics : User octets received for LP */ +#define NIG_REG_STAT2_BRB_OCTET 0x107e0 +#define NIG_REG_STATUS_INTERRUPT_PORT0 0x10328 +#define NIG_REG_STATUS_INTERRUPT_PORT1 0x1032c +/* [RW 1] port swap mux selection. If this register equal to 0 then port + swap is equal to SPIO pin that inputs from ifmux_serdes_swap. If 1 then + ort swap is equal to ~nig_registers_port_swap.port_swap */ +#define NIG_REG_STRAP_OVERRIDE 0x10398 +/* [RW 1] output enable for RX_XCM0 IF */ +#define NIG_REG_XCM0_OUT_EN 0x100f0 +/* [RW 1] output enable for RX_XCM1 IF */ +#define NIG_REG_XCM1_OUT_EN 0x100f4 +/* [RW 1] control to xgxs - remote PHY in-band MDIO */ +#define NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST 0x10348 +/* [RW 5] control to xgxs - CL45 DEVAD */ +#define NIG_REG_XGXS0_CTRL_MD_DEVAD 0x1033c +/* [RW 1] control to xgxs; 0 - clause 45; 1 - clause 22 */ +#define NIG_REG_XGXS0_CTRL_MD_ST 0x10338 +/* [RW 5] control to xgxs - CL22 PHY_ADD and CL45 PRTAD */ +#define NIG_REG_XGXS0_CTRL_PHY_ADDR 0x10340 +/* [R 1] status from xgxs0 that inputs to interrupt logic of link10g. */ +#define NIG_REG_XGXS0_STATUS_LINK10G 0x10680 +/* [R 4] status from xgxs0 that inputs to interrupt logic of link status */ +#define NIG_REG_XGXS0_STATUS_LINK_STATUS 0x10684 +/* [RW 2] selection for XGXS lane of port 0 in NIG_MUX block */ +#define NIG_REG_XGXS_LANE_SEL_P0 0x102e8 +/* [RW 1] selection for port0 for NIG_MUX block : 0 = SerDes; 1 = XGXS */ +#define NIG_REG_XGXS_SERDES0_MODE_SEL 0x102e0 +#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT (0x1<<0) +#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS (0x1<<9) +#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G (0x1<<15) +#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS (0xf<<18) +#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE 18 +/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter. */ +#define PBF_REG_COS0_UPPER_BOUND 0x15c05c +/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter + * of port 0. */ +#define PBF_REG_COS0_UPPER_BOUND_P0 0x15c2cc +/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter + * of port 1. */ +#define PBF_REG_COS0_UPPER_BOUND_P1 0x15c2e4 +/* [RW 31] The weight of COS0 in the ETS command arbiter. */ +#define PBF_REG_COS0_WEIGHT 0x15c054 +/* [RW 31] The weight of COS0 in port 0 ETS command arbiter. */ +#define PBF_REG_COS0_WEIGHT_P0 0x15c2a8 +/* [RW 31] The weight of COS0 in port 1 ETS command arbiter. */ +#define PBF_REG_COS0_WEIGHT_P1 0x15c2c0 +/* [RW 31] The upper bound of the weight of COS1 in the ETS command arbiter. */ +#define PBF_REG_COS1_UPPER_BOUND 0x15c060 +/* [RW 31] The weight of COS1 in the ETS command arbiter. */ +#define PBF_REG_COS1_WEIGHT 0x15c058 +/* [RW 31] The weight of COS1 in port 0 ETS command arbiter. */ +#define PBF_REG_COS1_WEIGHT_P0 0x15c2ac +/* [RW 31] The weight of COS1 in port 1 ETS command arbiter. */ +#define PBF_REG_COS1_WEIGHT_P1 0x15c2c4 +/* [RW 31] The weight of COS2 in port 0 ETS command arbiter. */ +#define PBF_REG_COS2_WEIGHT_P0 0x15c2b0 +/* [RW 31] The weight of COS2 in port 1 ETS command arbiter. */ +#define PBF_REG_COS2_WEIGHT_P1 0x15c2c8 +/* [RW 31] The weight of COS3 in port 0 ETS command arbiter. */ +#define PBF_REG_COS3_WEIGHT_P0 0x15c2b4 +/* [RW 31] The weight of COS4 in port 0 ETS command arbiter. */ +#define PBF_REG_COS4_WEIGHT_P0 0x15c2b8 +/* [RW 31] The weight of COS5 in port 0 ETS command arbiter. */ +#define PBF_REG_COS5_WEIGHT_P0 0x15c2bc +/* [R 11] Current credit for the LB queue in the tx port buffers in 16 byte + * lines. */ +#define PBF_REG_CREDIT_LB_Q 0x140338 +/* [R 11] Current credit for queue 0 in the tx port buffers in 16 byte + * lines. */ +#define PBF_REG_CREDIT_Q0 0x14033c +/* [R 11] Current credit for queue 1 in the tx port buffers in 16 byte + * lines. */ +#define PBF_REG_CREDIT_Q1 0x140340 +/* [RW 1] Disable processing further tasks from port 0 (after ending the + current task in process). */ +#define PBF_REG_DISABLE_NEW_TASK_PROC_P0 0x14005c +/* [RW 1] Disable processing further tasks from port 1 (after ending the + current task in process). */ +#define PBF_REG_DISABLE_NEW_TASK_PROC_P1 0x140060 +/* [RW 1] Disable processing further tasks from port 4 (after ending the + current task in process). */ +#define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c +#define PBF_REG_DISABLE_PF 0x1402e8 +/* [RW 18] For port 0: For each client that is subject to WFQ (the + * corresponding bit is 1); indicates to which of the credit registers this + * client is mapped. For clients which are not credit blocked; their mapping + * is dont care. */ +#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0 0x15c288 +/* [RW 9] For port 1: For each client that is subject to WFQ (the + * corresponding bit is 1); indicates to which of the credit registers this + * client is mapped. For clients which are not credit blocked; their mapping + * is dont care. */ +#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1 0x15c28c +/* [RW 6] For port 0: Bit per client to indicate if the client competes in + * the strict priority arbiter directly (corresponding bit = 1); or first + * goes to the RR arbiter (corresponding bit = 0); and then competes in the + * lowest priority in the strict-priority arbiter. */ +#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 0x15c278 +/* [RW 3] For port 1: Bit per client to indicate if the client competes in + * the strict priority arbiter directly (corresponding bit = 1); or first + * goes to the RR arbiter (corresponding bit = 0); and then competes in the + * lowest priority in the strict-priority arbiter. */ +#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 0x15c27c +/* [RW 6] For port 0: Bit per client to indicate if the client is subject to + * WFQ credit blocking (corresponding bit = 1). */ +#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 0x15c280 +/* [RW 3] For port 0: Bit per client to indicate if the client is subject to + * WFQ credit blocking (corresponding bit = 1). */ +#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 0x15c284 +/* [RW 16] For port 0: The number of strict priority arbitration slots + * between 2 RR arbitration slots. A value of 0 means no strict priority + * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR + * arbiter. */ +#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 0x15c2a0 +/* [RW 16] For port 1: The number of strict priority arbitration slots + * between 2 RR arbitration slots. A value of 0 means no strict priority + * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR + * arbiter. */ +#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 0x15c2a4 +/* [RW 18] For port 0: Indicates which client is connected to each priority + * in the strict-priority arbiter. Priority 0 is the highest priority, and + * priority 5 is the lowest; to which the RR output is connected to (this is + * not configurable). */ +#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 0x15c270 +/* [RW 9] For port 1: Indicates which client is connected to each priority + * in the strict-priority arbiter. Priority 0 is the highest priority, and + * priority 5 is the lowest; to which the RR output is connected to (this is + * not configurable). */ +#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 0x15c274 +/* [RW 1] Indicates that ETS is performed between the COSes in the command + * arbiter. If reset strict priority w/ anti-starvation will be performed + * w/o WFQ. */ +#define PBF_REG_ETS_ENABLED 0x15c050 +/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic + * Ethernet header. */ +#define PBF_REG_HDRS_AFTER_BASIC 0x15c0a8 +/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */ +#define PBF_REG_HDRS_AFTER_TAG_0 0x15c0b8 +/* [R 1] Removed for E3 B0 - Indicates which COS is conncted to the highest + * priority in the command arbiter. */ +#define PBF_REG_HIGH_PRIORITY_COS_NUM 0x15c04c +#define PBF_REG_IF_ENABLE_REG 0x140044 +/* [RW 1] Init bit. When set the initial credits are copied to the credit + registers (except the port credits). Should be set and then reset after + the configuration of the block has ended. */ +#define PBF_REG_INIT 0x140000 +/* [RW 11] Initial credit for the LB queue in the tx port buffers in 16 byte + * lines. */ +#define PBF_REG_INIT_CRD_LB_Q 0x15c248 +/* [RW 11] Initial credit for queue 0 in the tx port buffers in 16 byte + * lines. */ +#define PBF_REG_INIT_CRD_Q0 0x15c230 +/* [RW 11] Initial credit for queue 1 in the tx port buffers in 16 byte + * lines. */ +#define PBF_REG_INIT_CRD_Q1 0x15c234 +/* [RW 1] Init bit for port 0. When set the initial credit of port 0 is + copied to the credit register. Should be set and then reset after the + configuration of the port has ended. */ +#define PBF_REG_INIT_P0 0x140004 +/* [RW 1] Init bit for port 1. When set the initial credit of port 1 is + copied to the credit register. Should be set and then reset after the + configuration of the port has ended. */ +#define PBF_REG_INIT_P1 0x140008 +/* [RW 1] Init bit for port 4. When set the initial credit of port 4 is + copied to the credit register. Should be set and then reset after the + configuration of the port has ended. */ +#define PBF_REG_INIT_P4 0x14000c +/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for + * the LB queue. Reset upon init. */ +#define PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q 0x140354 +/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for + * queue 0. Reset upon init. */ +#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 0x140358 +/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for + * queue 1. Reset upon init. */ +#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 0x14035c +/* [RW 1] Enable for mac interface 0. */ +#define PBF_REG_MAC_IF0_ENABLE 0x140030 +/* [RW 1] Enable for mac interface 1. */ +#define PBF_REG_MAC_IF1_ENABLE 0x140034 +/* [RW 1] Enable for the loopback interface. */ +#define PBF_REG_MAC_LB_ENABLE 0x140040 +/* [RW 6] Bit-map indicating which headers must appear in the packet */ +#define PBF_REG_MUST_HAVE_HDRS 0x15c0c4 +/* [RW 16] The number of strict priority arbitration slots between 2 RR + * arbitration slots. A value of 0 means no strict priority cycles; i.e. the + * strict-priority w/ anti-starvation arbiter is a RR arbiter. */ +#define PBF_REG_NUM_STRICT_ARB_SLOTS 0x15c064 +/* [RW 10] Port 0 threshold used by arbiter in 16 byte lines used when pause + not suppoterd. */ +#define PBF_REG_P0_ARB_THRSH 0x1400e4 +/* [R 11] Current credit for port 0 in the tx port buffers in 16 byte lines. */ +#define PBF_REG_P0_CREDIT 0x140200 +/* [RW 11] Initial credit for port 0 in the tx port buffers in 16 byte + lines. */ +#define PBF_REG_P0_INIT_CRD 0x1400d0 +/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for + * port 0. Reset upon init. */ +#define PBF_REG_P0_INTERNAL_CRD_FREED_CNT 0x140308 +/* [R 1] Removed for E3 B0 - Indication that pause is enabled for port 0. */ +#define PBF_REG_P0_PAUSE_ENABLE 0x140014 +/* [R 8] Removed for E3 B0 - Number of tasks in port 0 task queue. */ +#define PBF_REG_P0_TASK_CNT 0x140204 +/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines + * freed from the task queue of port 0. Reset upon init. */ +#define PBF_REG_P0_TQ_LINES_FREED_CNT 0x1402f0 +/* [R 12] Number of 8 bytes lines occupied in the task queue of port 0. */ +#define PBF_REG_P0_TQ_OCCUPANCY 0x1402fc +/* [R 11] Removed for E3 B0 - Current credit for port 1 in the tx port + * buffers in 16 byte lines. */ +#define PBF_REG_P1_CREDIT 0x140208 +/* [R 11] Removed for E3 B0 - Initial credit for port 0 in the tx port + * buffers in 16 byte lines. */ +#define PBF_REG_P1_INIT_CRD 0x1400d4 +/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for + * port 1. Reset upon init. */ +#define PBF_REG_P1_INTERNAL_CRD_FREED_CNT 0x14030c +/* [R 8] Removed for E3 B0 - Number of tasks in port 1 task queue. */ +#define PBF_REG_P1_TASK_CNT 0x14020c +/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines + * freed from the task queue of port 1. Reset upon init. */ +#define PBF_REG_P1_TQ_LINES_FREED_CNT 0x1402f4 +/* [R 12] Number of 8 bytes lines occupied in the task queue of port 1. */ +#define PBF_REG_P1_TQ_OCCUPANCY 0x140300 +/* [R 11] Current credit for port 4 in the tx port buffers in 16 byte lines. */ +#define PBF_REG_P4_CREDIT 0x140210 +/* [RW 11] Initial credit for port 4 in the tx port buffers in 16 byte + lines. */ +#define PBF_REG_P4_INIT_CRD 0x1400e0 +/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for + * port 4. Reset upon init. */ +#define PBF_REG_P4_INTERNAL_CRD_FREED_CNT 0x140310 +/* [R 8] Removed for E3 B0 - Number of tasks in port 4 task queue. */ +#define PBF_REG_P4_TASK_CNT 0x140214 +/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines + * freed from the task queue of port 4. Reset upon init. */ +#define PBF_REG_P4_TQ_LINES_FREED_CNT 0x1402f8 +/* [R 12] Number of 8 bytes lines occupied in the task queue of port 4. */ +#define PBF_REG_P4_TQ_OCCUPANCY 0x140304 +/* [RW 5] Interrupt mask register #0 read/write */ +#define PBF_REG_PBF_INT_MASK 0x1401d4 +/* [R 5] Interrupt register #0 read */ +#define PBF_REG_PBF_INT_STS 0x1401c8 +/* [RW 20] Parity mask register #0 read/write */ +#define PBF_REG_PBF_PRTY_MASK 0x1401e4 +/* [RC 20] Parity register #0 read clear */ +#define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc +/* [RW 16] The Ethernet type value for L2 tag 0 */ +#define PBF_REG_TAG_ETHERTYPE_0 0x15c090 +/* [RW 4] The length of the info field for L2 tag 0. The length is between + * 2B and 14B; in 2B granularity */ +#define PBF_REG_TAG_LEN_0 0x15c09c +/* [R 32] Cyclic counter for number of 8 byte lines freed from the LB task + * queue. Reset upon init. */ +#define PBF_REG_TQ_LINES_FREED_CNT_LB_Q 0x14038c +/* [R 32] Cyclic counter for number of 8 byte lines freed from the task + * queue 0. Reset upon init. */ +#define PBF_REG_TQ_LINES_FREED_CNT_Q0 0x140390 +/* [R 32] Cyclic counter for number of 8 byte lines freed from task queue 1. + * Reset upon init. */ +#define PBF_REG_TQ_LINES_FREED_CNT_Q1 0x140394 +/* [R 13] Number of 8 bytes lines occupied in the task queue of the LB + * queue. */ +#define PBF_REG_TQ_OCCUPANCY_LB_Q 0x1403a8 +/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 0. */ +#define PBF_REG_TQ_OCCUPANCY_Q0 0x1403ac +/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 1. */ +#define PBF_REG_TQ_OCCUPANCY_Q1 0x1403b0 +#define PB_REG_CONTROL 0 +/* [RW 2] Interrupt mask register #0 read/write */ +#define PB_REG_PB_INT_MASK 0x28 +/* [R 2] Interrupt register #0 read */ +#define PB_REG_PB_INT_STS 0x1c +/* [RW 4] Parity mask register #0 read/write */ +#define PB_REG_PB_PRTY_MASK 0x38 +/* [R 4] Parity register #0 read */ +#define PB_REG_PB_PRTY_STS 0x2c +/* [RC 4] Parity register #0 read clear */ +#define PB_REG_PB_PRTY_STS_CLR 0x30 +#define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR (0x1<<0) +#define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW (0x1<<8) +#define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR (0x1<<1) +#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN (0x1<<6) +#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN (0x1<<7) +#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN (0x1<<4) +#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN (0x1<<3) +#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN (0x1<<5) +#define PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN (0x1<<2) +/* [R 8] Config space A attention dirty bits. Each bit indicates that the + * corresponding PF generates config space A attention. Set by PXP. Reset by + * MCP writing 1 to icfg_space_a_request_clr. Note: register contains bits + * from both paths. */ +#define PGLUE_B_REG_CFG_SPACE_A_REQUEST 0x9010 +/* [R 8] Config space B attention dirty bits. Each bit indicates that the + * corresponding PF generates config space B attention. Set by PXP. Reset by + * MCP writing 1 to icfg_space_b_request_clr. Note: register contains bits + * from both paths. */ +#define PGLUE_B_REG_CFG_SPACE_B_REQUEST 0x9014 +/* [RW 1] Type A PF enable inbound interrupt table for CSDM. 0 - disable; 1 + * - enable. */ +#define PGLUE_B_REG_CSDM_INB_INT_A_PF_ENABLE 0x9194 +/* [RW 18] Type B VF inbound interrupt table for CSDM: bits[17:9]-mask; + * its[8:0]-address. Bits [1:0] must be zero (DW resolution address). */ +#define PGLUE_B_REG_CSDM_INB_INT_B_VF 0x916c +/* [RW 1] Type B VF enable inbound interrupt table for CSDM. 0 - disable; 1 + * - enable. */ +#define PGLUE_B_REG_CSDM_INB_INT_B_VF_ENABLE 0x919c +/* [RW 16] Start offset of CSDM zone A (queue zone) in the internal RAM */ +#define PGLUE_B_REG_CSDM_START_OFFSET_A 0x9100 +/* [RW 16] Start offset of CSDM zone B (legacy zone) in the internal RAM */ +#define PGLUE_B_REG_CSDM_START_OFFSET_B 0x9108 +/* [RW 5] VF Shift of CSDM zone B (legacy zone) in the internal RAM */ +#define PGLUE_B_REG_CSDM_VF_SHIFT_B 0x9110 +/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */ +#define PGLUE_B_REG_CSDM_ZONE_A_SIZE_PF 0x91ac +/* [R 8] FLR request attention dirty bits for PFs 0 to 7. Each bit indicates + * that the FLR register of the corresponding PF was set. Set by PXP. Reset + * by MCP writing 1 to flr_request_pf_7_0_clr. Note: register contains bits + * from both paths. */ +#define PGLUE_B_REG_FLR_REQUEST_PF_7_0 0x9028 +/* [W 8] FLR request attention dirty bits clear for PFs 0 to 7. MCP writes 1 + * to a bit in this register in order to clear the corresponding bit in + * flr_request_pf_7_0 register. Note: register contains bits from both + * paths. */ +#define PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR 0x9418 +/* [R 32] FLR request attention dirty bits for VFs 96 to 127. Each bit + * indicates that the FLR register of the corresponding VF was set. Set by + * PXP. Reset by MCP writing 1 to flr_request_vf_127_96_clr. */ +#define PGLUE_B_REG_FLR_REQUEST_VF_127_96 0x9024 +/* [R 32] FLR request attention dirty bits for VFs 0 to 31. Each bit + * indicates that the FLR register of the corresponding VF was set. Set by + * PXP. Reset by MCP writing 1 to flr_request_vf_31_0_clr. */ +#define PGLUE_B_REG_FLR_REQUEST_VF_31_0 0x9018 +/* [R 32] FLR request attention dirty bits for VFs 32 to 63. Each bit + * indicates that the FLR register of the corresponding VF was set. Set by + * PXP. Reset by MCP writing 1 to flr_request_vf_63_32_clr. */ +#define PGLUE_B_REG_FLR_REQUEST_VF_63_32 0x901c +/* [R 32] FLR request attention dirty bits for VFs 64 to 95. Each bit + * indicates that the FLR register of the corresponding VF was set. Set by + * PXP. Reset by MCP writing 1 to flr_request_vf_95_64_clr. */ +#define PGLUE_B_REG_FLR_REQUEST_VF_95_64 0x9020 +/* [R 8] Each bit indicates an incorrect behavior in user RX interface. Bit + * 0 - Target memory read arrived with a correctable error. Bit 1 - Target + * memory read arrived with an uncorrectable error. Bit 2 - Configuration RW + * arrived with a correctable error. Bit 3 - Configuration RW arrived with + * an uncorrectable error. Bit 4 - Completion with Configuration Request + * Retry Status. Bit 5 - Expansion ROM access received with a write request. + * Bit 6 - Completion with pcie_rx_err of 0000; CMPL_STATUS of non-zero; and + * pcie_rx_last not asserted. Bit 7 - Completion with pcie_rx_err of 1010; + * and pcie_rx_last not asserted. */ +#define PGLUE_B_REG_INCORRECT_RCV_DETAILS 0x9068 +#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER 0x942c +#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ 0x9430 +#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_WRITE 0x9434 +#define PGLUE_B_REG_INTERNAL_VFID_ENABLE 0x9438 +/* [R 9] Interrupt register #0 read */ +#define PGLUE_B_REG_PGLUE_B_INT_STS 0x9298 +/* [RC 9] Interrupt register #0 read clear */ +#define PGLUE_B_REG_PGLUE_B_INT_STS_CLR 0x929c +/* [RW 2] Parity mask register #0 read/write */ +#define PGLUE_B_REG_PGLUE_B_PRTY_MASK 0x92b4 +/* [R 2] Parity register #0 read */ +#define PGLUE_B_REG_PGLUE_B_PRTY_STS 0x92a8 +/* [RC 2] Parity register #0 read clear */ +#define PGLUE_B_REG_PGLUE_B_PRTY_STS_CLR 0x92ac +/* [R 13] Details of first request received with error. [2:0] - PFID. [3] - + * VF_VALID. [9:4] - VFID. [11:10] - Error Code - 0 - Indicates Completion + * Timeout of a User Tx non-posted request. 1 - unsupported request. 2 - + * completer abort. 3 - Illegal value for this field. [12] valid - indicates + * if there was a completion error since the last time this register was + * cleared. */ +#define PGLUE_B_REG_RX_ERR_DETAILS 0x9080 +/* [R 18] Details of first ATS Translation Completion request received with + * error. [2:0] - PFID. [3] - VF_VALID. [9:4] - VFID. [11:10] - Error Code - + * 0 - Indicates Completion Timeout of a User Tx non-posted request. 1 - + * unsupported request. 2 - completer abort. 3 - Illegal value for this + * field. [16:12] - ATC OTB EntryID. [17] valid - indicates if there was a + * completion error since the last time this register was cleared. */ +#define PGLUE_B_REG_RX_TCPL_ERR_DETAILS 0x9084 +/* [W 8] Debug only - Shadow BME bits clear for PFs 0 to 7. MCP writes 1 to + * a bit in this register in order to clear the corresponding bit in + * shadow_bme_pf_7_0 register. MCP should never use this unless a + * work-around is needed. Note: register contains bits from both paths. */ +#define PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR 0x9458 +/* [R 8] SR IOV disabled attention dirty bits. Each bit indicates that the + * VF enable register of the corresponding PF is written to 0 and was + * previously 1. Set by PXP. Reset by MCP writing 1 to + * sr_iov_disabled_request_clr. Note: register contains bits from both + * paths. */ +#define PGLUE_B_REG_SR_IOV_DISABLED_REQUEST 0x9030 +/* [R 32] Indicates the status of tags 32-63. 0 - tags is used - read + * completion did not return yet. 1 - tag is unused. Same functionality as + * pxp2_registers_pgl_exp_rom_data2 for tags 0-31. */ +#define PGLUE_B_REG_TAGS_63_32 0x9244 +/* [RW 1] Type A PF enable inbound interrupt table for TSDM. 0 - disable; 1 + * - enable. */ +#define PGLUE_B_REG_TSDM_INB_INT_A_PF_ENABLE 0x9170 +/* [RW 16] Start offset of TSDM zone A (queue zone) in the internal RAM */ +#define PGLUE_B_REG_TSDM_START_OFFSET_A 0x90c4 +/* [RW 16] Start offset of TSDM zone B (legacy zone) in the internal RAM */ +#define PGLUE_B_REG_TSDM_START_OFFSET_B 0x90cc +/* [RW 5] VF Shift of TSDM zone B (legacy zone) in the internal RAM */ +#define PGLUE_B_REG_TSDM_VF_SHIFT_B 0x90d4 +/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */ +#define PGLUE_B_REG_TSDM_ZONE_A_SIZE_PF 0x91a0 +/* [R 32] Address [31:0] of first read request not submitted due to error */ +#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0 0x9098 +/* [R 32] Address [63:32] of first read request not submitted due to error */ +#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32 0x909c +/* [R 31] Details of first read request not submitted due to error. [4:0] + * VQID. [5] TREQ. 1 - Indicates the request is a Translation Request. + * [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25] - + * VFID. */ +#define PGLUE_B_REG_TX_ERR_RD_DETAILS 0x90a0 +/* [R 26] Details of first read request not submitted due to error. [15:0] + * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type - + * [21] - Indicates was_error was set; [22] - Indicates BME was cleared; + * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent + * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid - + * indicates if there was a request not submitted due to error since the + * last time this register was cleared. */ +#define PGLUE_B_REG_TX_ERR_RD_DETAILS2 0x90a4 +/* [R 32] Address [31:0] of first write request not submitted due to error */ +#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0 0x9088 +/* [R 32] Address [63:32] of first write request not submitted due to error */ +#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32 0x908c +/* [R 31] Details of first write request not submitted due to error. [4:0] + * VQID. [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25] + * - VFID. */ +#define PGLUE_B_REG_TX_ERR_WR_DETAILS 0x9090 +/* [R 26] Details of first write request not submitted due to error. [15:0] + * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type - + * [21] - Indicates was_error was set; [22] - Indicates BME was cleared; + * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent + * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid - + * indicates if there was a request not submitted due to error since the + * last time this register was cleared. */ +#define PGLUE_B_REG_TX_ERR_WR_DETAILS2 0x9094 +/* [RW 10] Type A PF/VF inbound interrupt table for USDM: bits[9:5]-mask; + * its[4:0]-address relative to start_offset_a. Bits [1:0] can have any + * value (Byte resolution address). */ +#define PGLUE_B_REG_USDM_INB_INT_A_0 0x9128 +#define PGLUE_B_REG_USDM_INB_INT_A_1 0x912c +#define PGLUE_B_REG_USDM_INB_INT_A_2 0x9130 +#define PGLUE_B_REG_USDM_INB_INT_A_3 0x9134 +#define PGLUE_B_REG_USDM_INB_INT_A_4 0x9138 +#define PGLUE_B_REG_USDM_INB_INT_A_5 0x913c +#define PGLUE_B_REG_USDM_INB_INT_A_6 0x9140 +/* [RW 1] Type A PF enable inbound interrupt table for USDM. 0 - disable; 1 + * - enable. */ +#define PGLUE_B_REG_USDM_INB_INT_A_PF_ENABLE 0x917c +/* [RW 1] Type A VF enable inbound interrupt table for USDM. 0 - disable; 1 + * - enable. */ +#define PGLUE_B_REG_USDM_INB_INT_A_VF_ENABLE 0x9180 +/* [RW 1] Type B VF enable inbound interrupt table for USDM. 0 - disable; 1 + * - enable. */ +#define PGLUE_B_REG_USDM_INB_INT_B_VF_ENABLE 0x9184 +/* [RW 16] Start offset of USDM zone A (queue zone) in the internal RAM */ +#define PGLUE_B_REG_USDM_START_OFFSET_A 0x90d8 +/* [RW 16] Start offset of USDM zone B (legacy zone) in the internal RAM */ +#define PGLUE_B_REG_USDM_START_OFFSET_B 0x90e0 +/* [RW 5] VF Shift of USDM zone B (legacy zone) in the internal RAM */ +#define PGLUE_B_REG_USDM_VF_SHIFT_B 0x90e8 +/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */ +#define PGLUE_B_REG_USDM_ZONE_A_SIZE_PF 0x91a4 +/* [R 26] Details of first target VF request accessing VF GRC space that + * failed permission check. [14:0] Address. [15] w_nr: 0 - Read; 1 - Write. + * [21:16] VFID. [24:22] - PFID. [25] valid - indicates if there was a + * request accessing VF GRC space that failed permission check since the + * last time this register was cleared. Permission checks are: function + * permission; R/W permission; address range permission. */ +#define PGLUE_B_REG_VF_GRC_SPACE_VIOLATION_DETAILS 0x9234 +/* [R 31] Details of first target VF request with length violation (too many + * DWs) accessing BAR0. [12:0] Address in DWs (bits [14:2] of byte address). + * [14:13] BAR. [20:15] VFID. [23:21] - PFID. [29:24] - Length in DWs. [30] + * valid - indicates if there was a request with length violation since the + * last time this register was cleared. Length violations: length of more + * than 2DWs; length of 2DWs and address not QW aligned; window is GRC and + * length is more than 1 DW. */ +#define PGLUE_B_REG_VF_LENGTH_VIOLATION_DETAILS 0x9230 +/* [R 8] Was_error indication dirty bits for PFs 0 to 7. Each bit indicates + * that there was a completion with uncorrectable error for the + * corresponding PF. Set by PXP. Reset by MCP writing 1 to + * was_error_pf_7_0_clr. */ +#define PGLUE_B_REG_WAS_ERROR_PF_7_0 0x907c +/* [W 8] Was_error indication dirty bits clear for PFs 0 to 7. MCP writes 1 + * to a bit in this register in order to clear the corresponding bit in + * flr_request_pf_7_0 register. */ +#define PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR 0x9470 +/* [R 32] Was_error indication dirty bits for VFs 96 to 127. Each bit + * indicates that there was a completion with uncorrectable error for the + * corresponding VF. Set by PXP. Reset by MCP writing 1 to + * was_error_vf_127_96_clr. */ +#define PGLUE_B_REG_WAS_ERROR_VF_127_96 0x9078 +/* [W 32] Was_error indication dirty bits clear for VFs 96 to 127. MCP + * writes 1 to a bit in this register in order to clear the corresponding + * bit in was_error_vf_127_96 register. */ +#define PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR 0x9474 +/* [R 32] Was_error indication dirty bits for VFs 0 to 31. Each bit + * indicates that there was a completion with uncorrectable error for the + * corresponding VF. Set by PXP. Reset by MCP writing 1 to + * was_error_vf_31_0_clr. */ +#define PGLUE_B_REG_WAS_ERROR_VF_31_0 0x906c +/* [W 32] Was_error indication dirty bits clear for VFs 0 to 31. MCP writes + * 1 to a bit in this register in order to clear the corresponding bit in + * was_error_vf_31_0 register. */ +#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR 0x9478 +/* [R 32] Was_error indication dirty bits for VFs 32 to 63. Each bit + * indicates that there was a completion with uncorrectable error for the + * corresponding VF. Set by PXP. Reset by MCP writing 1 to + * was_error_vf_63_32_clr. */ +#define PGLUE_B_REG_WAS_ERROR_VF_63_32 0x9070 +/* [W 32] Was_error indication dirty bits clear for VFs 32 to 63. MCP writes + * 1 to a bit in this register in order to clear the corresponding bit in + * was_error_vf_63_32 register. */ +#define PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR 0x947c +/* [R 32] Was_error indication dirty bits for VFs 64 to 95. Each bit + * indicates that there was a completion with uncorrectable error for the + * corresponding VF. Set by PXP. Reset by MCP writing 1 to + * was_error_vf_95_64_clr. */ +#define PGLUE_B_REG_WAS_ERROR_VF_95_64 0x9074 +/* [W 32] Was_error indication dirty bits clear for VFs 64 to 95. MCP writes + * 1 to a bit in this register in order to clear the corresponding bit in + * was_error_vf_95_64 register. */ +#define PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR 0x9480 +/* [RW 1] Type A PF enable inbound interrupt table for XSDM. 0 - disable; 1 + * - enable. */ +#define PGLUE_B_REG_XSDM_INB_INT_A_PF_ENABLE 0x9188 +/* [RW 16] Start offset of XSDM zone A (queue zone) in the internal RAM */ +#define PGLUE_B_REG_XSDM_START_OFFSET_A 0x90ec +/* [RW 16] Start offset of XSDM zone B (legacy zone) in the internal RAM */ +#define PGLUE_B_REG_XSDM_START_OFFSET_B 0x90f4 +/* [RW 5] VF Shift of XSDM zone B (legacy zone) in the internal RAM */ +#define PGLUE_B_REG_XSDM_VF_SHIFT_B 0x90fc +/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */ +#define PGLUE_B_REG_XSDM_ZONE_A_SIZE_PF 0x91a8 +#define PRS_REG_A_PRSU_20 0x40134 +/* [R 8] debug only: CFC load request current credit. Transaction based. */ +#define PRS_REG_CFC_LD_CURRENT_CREDIT 0x40164 +/* [R 8] debug only: CFC search request current credit. Transaction based. */ +#define PRS_REG_CFC_SEARCH_CURRENT_CREDIT 0x40168 +/* [RW 6] The initial credit for the search message to the CFC interface. + Credit is transaction based. */ +#define PRS_REG_CFC_SEARCH_INITIAL_CREDIT 0x4011c +/* [RW 24] CID for port 0 if no match */ +#define PRS_REG_CID_PORT_0 0x400fc +/* [RW 32] The CM header for flush message where 'load existed' bit in CFC + load response is reset and packet type is 0. Used in packet start message + to TCM. */ +#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_0 0x400dc +#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_1 0x400e0 +#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_2 0x400e4 +#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_3 0x400e8 +#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_4 0x400ec +#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_5 0x400f0 +/* [RW 32] The CM header for flush message where 'load existed' bit in CFC + load response is set and packet type is 0. Used in packet start message + to TCM. */ +#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_0 0x400bc +#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_1 0x400c0 +#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_2 0x400c4 +#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_3 0x400c8 +#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_4 0x400cc +#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_5 0x400d0 +/* [RW 32] The CM header for a match and packet type 1 for loopback port. + Used in packet start message to TCM. */ +#define PRS_REG_CM_HDR_LOOPBACK_TYPE_1 0x4009c +#define PRS_REG_CM_HDR_LOOPBACK_TYPE_2 0x400a0 +#define PRS_REG_CM_HDR_LOOPBACK_TYPE_3 0x400a4 +#define PRS_REG_CM_HDR_LOOPBACK_TYPE_4 0x400a8 +/* [RW 32] The CM header for a match and packet type 0. Used in packet start + message to TCM. */ +#define PRS_REG_CM_HDR_TYPE_0 0x40078 +#define PRS_REG_CM_HDR_TYPE_1 0x4007c +#define PRS_REG_CM_HDR_TYPE_2 0x40080 +#define PRS_REG_CM_HDR_TYPE_3 0x40084 +#define PRS_REG_CM_HDR_TYPE_4 0x40088 +/* [RW 32] The CM header in case there was not a match on the connection */ +#define PRS_REG_CM_NO_MATCH_HDR 0x400b8 +/* [RW 1] Indicates if in e1hov mode. 0=non-e1hov mode; 1=e1hov mode. */ +#define PRS_REG_E1HOV_MODE 0x401c8 +/* [RW 8] The 8-bit event ID for a match and packet type 1. Used in packet + start message to TCM. */ +#define PRS_REG_EVENT_ID_1 0x40054 +#define PRS_REG_EVENT_ID_2 0x40058 +#define PRS_REG_EVENT_ID_3 0x4005c +/* [RW 16] The Ethernet type value for FCoE */ +#define PRS_REG_FCOE_TYPE 0x401d0 +/* [RW 8] Context region for flush packet with packet type 0. Used in CFC + load request message. */ +#define PRS_REG_FLUSH_REGIONS_TYPE_0 0x40004 +#define PRS_REG_FLUSH_REGIONS_TYPE_1 0x40008 +#define PRS_REG_FLUSH_REGIONS_TYPE_2 0x4000c +#define PRS_REG_FLUSH_REGIONS_TYPE_3 0x40010 +#define PRS_REG_FLUSH_REGIONS_TYPE_4 0x40014 +#define PRS_REG_FLUSH_REGIONS_TYPE_5 0x40018 +#define PRS_REG_FLUSH_REGIONS_TYPE_6 0x4001c +#define PRS_REG_FLUSH_REGIONS_TYPE_7 0x40020 +/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic + * Ethernet header. */ +#define PRS_REG_HDRS_AFTER_BASIC 0x40238 +/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic + * Ethernet header for port 0 packets. */ +#define PRS_REG_HDRS_AFTER_BASIC_PORT_0 0x40270 +#define PRS_REG_HDRS_AFTER_BASIC_PORT_1 0x40290 +/* [R 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */ +#define PRS_REG_HDRS_AFTER_TAG_0 0x40248 +/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 for + * port 0 packets */ +#define PRS_REG_HDRS_AFTER_TAG_0_PORT_0 0x40280 +#define PRS_REG_HDRS_AFTER_TAG_0_PORT_1 0x402a0 +/* [RW 4] The increment value to send in the CFC load request message */ +#define PRS_REG_INC_VALUE 0x40048 +/* [RW 6] Bit-map indicating which headers must appear in the packet */ +#define PRS_REG_MUST_HAVE_HDRS 0x40254 +/* [RW 6] Bit-map indicating which headers must appear in the packet for + * port 0 packets */ +#define PRS_REG_MUST_HAVE_HDRS_PORT_0 0x4028c +#define PRS_REG_MUST_HAVE_HDRS_PORT_1 0x402ac +#define PRS_REG_NIC_MODE 0x40138 +/* [RW 8] The 8-bit event ID for cases where there is no match on the + connection. Used in packet start message to TCM. */ +#define PRS_REG_NO_MATCH_EVENT_ID 0x40070 +/* [ST 24] The number of input CFC flush packets */ +#define PRS_REG_NUM_OF_CFC_FLUSH_MESSAGES 0x40128 +/* [ST 32] The number of cycles the Parser halted its operation since it + could not allocate the next serial number */ +#define PRS_REG_NUM_OF_DEAD_CYCLES 0x40130 +/* [ST 24] The number of input packets */ +#define PRS_REG_NUM_OF_PACKETS 0x40124 +/* [ST 24] The number of input transparent flush packets */ +#define PRS_REG_NUM_OF_TRANSPARENT_FLUSH_MESSAGES 0x4012c +/* [RW 8] Context region for received Ethernet packet with a match and + packet type 0. Used in CFC load request message */ +#define PRS_REG_PACKET_REGIONS_TYPE_0 0x40028 +#define PRS_REG_PACKET_REGIONS_TYPE_1 0x4002c +#define PRS_REG_PACKET_REGIONS_TYPE_2 0x40030 +#define PRS_REG_PACKET_REGIONS_TYPE_3 0x40034 +#define PRS_REG_PACKET_REGIONS_TYPE_4 0x40038 +#define PRS_REG_PACKET_REGIONS_TYPE_5 0x4003c +#define PRS_REG_PACKET_REGIONS_TYPE_6 0x40040 +#define PRS_REG_PACKET_REGIONS_TYPE_7 0x40044 +/* [R 2] debug only: Number of pending requests for CAC on port 0. */ +#define PRS_REG_PENDING_BRB_CAC0_RQ 0x40174 +/* [R 2] debug only: Number of pending requests for header parsing. */ +#define PRS_REG_PENDING_BRB_PRS_RQ 0x40170 +/* [R 1] Interrupt register #0 read */ +#define PRS_REG_PRS_INT_STS 0x40188 +/* [RW 8] Parity mask register #0 read/write */ +#define PRS_REG_PRS_PRTY_MASK 0x401a4 +/* [R 8] Parity register #0 read */ +#define PRS_REG_PRS_PRTY_STS 0x40198 +/* [RC 8] Parity register #0 read clear */ +#define PRS_REG_PRS_PRTY_STS_CLR 0x4019c +/* [RW 8] Context region for pure acknowledge packets. Used in CFC load + request message */ +#define PRS_REG_PURE_REGIONS 0x40024 +/* [R 32] debug only: Serial number status lsb 32 bits. '1' indicates this + serail number was released by SDM but cannot be used because a previous + serial number was not released. */ +#define PRS_REG_SERIAL_NUM_STATUS_LSB 0x40154 +/* [R 32] debug only: Serial number status msb 32 bits. '1' indicates this + serail number was released by SDM but cannot be used because a previous + serial number was not released. */ +#define PRS_REG_SERIAL_NUM_STATUS_MSB 0x40158 +/* [R 4] debug only: SRC current credit. Transaction based. */ +#define PRS_REG_SRC_CURRENT_CREDIT 0x4016c +/* [RW 16] The Ethernet type value for L2 tag 0 */ +#define PRS_REG_TAG_ETHERTYPE_0 0x401d4 +/* [RW 4] The length of the info field for L2 tag 0. The length is between + * 2B and 14B; in 2B granularity */ +#define PRS_REG_TAG_LEN_0 0x4022c +/* [R 8] debug only: TCM current credit. Cycle based. */ +#define PRS_REG_TCM_CURRENT_CREDIT 0x40160 +/* [R 8] debug only: TSDM current credit. Transaction based. */ +#define PRS_REG_TSDM_CURRENT_CREDIT 0x4015c +#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT (0x1<<19) +#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF (0x1<<20) +#define PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN (0x1<<22) +#define PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED (0x1<<23) +#define PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED (0x1<<24) +#define PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR (0x1<<7) +#define PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR (0x1<<7) +/* [R 6] Debug only: Number of used entries in the data FIFO */ +#define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c +/* [R 7] Debug only: Number of used entries in the header FIFO */ +#define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478 +#define PXP2_REG_PGL_ADDR_88_F0 0x120534 +#define PXP2_REG_PGL_ADDR_8C_F0 0x120538 +#define PXP2_REG_PGL_ADDR_90_F0 0x12053c +#define PXP2_REG_PGL_ADDR_94_F0 0x120540 +#define PXP2_REG_PGL_CONTROL0 0x120490 +#define PXP2_REG_PGL_CONTROL1 0x120514 +#define PXP2_REG_PGL_DEBUG 0x120520 +/* [RW 32] third dword data of expansion rom request. this register is + special. reading from it provides a vector outstanding read requests. if + a bit is zero it means that a read request on the corresponding tag did + not finish yet (not all completions have arrived for it) */ +#define PXP2_REG_PGL_EXP_ROM2 0x120808 +/* [RW 32] Inbound interrupt table for CSDM: bits[31:16]-mask; + its[15:0]-address */ +#define PXP2_REG_PGL_INT_CSDM_0 0x1204f4 +#define PXP2_REG_PGL_INT_CSDM_1 0x1204f8 +#define PXP2_REG_PGL_INT_CSDM_2 0x1204fc +#define PXP2_REG_PGL_INT_CSDM_3 0x120500 +#define PXP2_REG_PGL_INT_CSDM_4 0x120504 +#define PXP2_REG_PGL_INT_CSDM_5 0x120508 +#define PXP2_REG_PGL_INT_CSDM_6 0x12050c +#define PXP2_REG_PGL_INT_CSDM_7 0x120510 +/* [RW 32] Inbound interrupt table for TSDM: bits[31:16]-mask; + its[15:0]-address */ +#define PXP2_REG_PGL_INT_TSDM_0 0x120494 +#define PXP2_REG_PGL_INT_TSDM_1 0x120498 +#define PXP2_REG_PGL_INT_TSDM_2 0x12049c +#define PXP2_REG_PGL_INT_TSDM_3 0x1204a0 +#define PXP2_REG_PGL_INT_TSDM_4 0x1204a4 +#define PXP2_REG_PGL_INT_TSDM_5 0x1204a8 +#define PXP2_REG_PGL_INT_TSDM_6 0x1204ac +#define PXP2_REG_PGL_INT_TSDM_7 0x1204b0 +/* [RW 32] Inbound interrupt table for USDM: bits[31:16]-mask; + its[15:0]-address */ +#define PXP2_REG_PGL_INT_USDM_0 0x1204b4 +#define PXP2_REG_PGL_INT_USDM_1 0x1204b8 +#define PXP2_REG_PGL_INT_USDM_2 0x1204bc +#define PXP2_REG_PGL_INT_USDM_3 0x1204c0 +#define PXP2_REG_PGL_INT_USDM_4 0x1204c4 +#define PXP2_REG_PGL_INT_USDM_5 0x1204c8 +#define PXP2_REG_PGL_INT_USDM_6 0x1204cc +#define PXP2_REG_PGL_INT_USDM_7 0x1204d0 +/* [RW 32] Inbound interrupt table for XSDM: bits[31:16]-mask; + its[15:0]-address */ +#define PXP2_REG_PGL_INT_XSDM_0 0x1204d4 +#define PXP2_REG_PGL_INT_XSDM_1 0x1204d8 +#define PXP2_REG_PGL_INT_XSDM_2 0x1204dc +#define PXP2_REG_PGL_INT_XSDM_3 0x1204e0 +#define PXP2_REG_PGL_INT_XSDM_4 0x1204e4 +#define PXP2_REG_PGL_INT_XSDM_5 0x1204e8 +#define PXP2_REG_PGL_INT_XSDM_6 0x1204ec +#define PXP2_REG_PGL_INT_XSDM_7 0x1204f0 +/* [RW 3] this field allows one function to pretend being another function + when accessing any BAR mapped resource within the device. the value of + the field is the number of the function that will be accessed + effectively. after software write to this bit it must read it in order to + know that the new value is updated */ +#define PXP2_REG_PGL_PRETEND_FUNC_F0 0x120674 +#define PXP2_REG_PGL_PRETEND_FUNC_F1 0x120678 +#define PXP2_REG_PGL_PRETEND_FUNC_F2 0x12067c +#define PXP2_REG_PGL_PRETEND_FUNC_F3 0x120680 +#define PXP2_REG_PGL_PRETEND_FUNC_F4 0x120684 +#define PXP2_REG_PGL_PRETEND_FUNC_F5 0x120688 +#define PXP2_REG_PGL_PRETEND_FUNC_F6 0x12068c +#define PXP2_REG_PGL_PRETEND_FUNC_F7 0x120690 +/* [R 1] this bit indicates that a read request was blocked because of + bus_master_en was deasserted */ +#define PXP2_REG_PGL_READ_BLOCKED 0x120568 +#define PXP2_REG_PGL_TAGS_LIMIT 0x1205a8 +/* [R 18] debug only */ +#define PXP2_REG_PGL_TXW_CDTS 0x12052c +/* [R 1] this bit indicates that a write request was blocked because of + bus_master_en was deasserted */ +#define PXP2_REG_PGL_WRITE_BLOCKED 0x120564 +#define PXP2_REG_PSWRQ_BW_ADD1 0x1201c0 +#define PXP2_REG_PSWRQ_BW_ADD10 0x1201e4 +#define PXP2_REG_PSWRQ_BW_ADD11 0x1201e8 +#define PXP2_REG_PSWRQ_BW_ADD2 0x1201c4 +#define PXP2_REG_PSWRQ_BW_ADD28 0x120228 +#define PXP2_REG_PSWRQ_BW_ADD3 0x1201c8 +#define PXP2_REG_PSWRQ_BW_ADD6 0x1201d4 +#define PXP2_REG_PSWRQ_BW_ADD7 0x1201d8 +#define PXP2_REG_PSWRQ_BW_ADD8 0x1201dc +#define PXP2_REG_PSWRQ_BW_ADD9 0x1201e0 +#define PXP2_REG_PSWRQ_BW_CREDIT 0x12032c +#define PXP2_REG_PSWRQ_BW_L1 0x1202b0 +#define PXP2_REG_PSWRQ_BW_L10 0x1202d4 +#define PXP2_REG_PSWRQ_BW_L11 0x1202d8 +#define PXP2_REG_PSWRQ_BW_L2 0x1202b4 +#define PXP2_REG_PSWRQ_BW_L28 0x120318 +#define PXP2_REG_PSWRQ_BW_L3 0x1202b8 +#define PXP2_REG_PSWRQ_BW_L6 0x1202c4 +#define PXP2_REG_PSWRQ_BW_L7 0x1202c8 +#define PXP2_REG_PSWRQ_BW_L8 0x1202cc +#define PXP2_REG_PSWRQ_BW_L9 0x1202d0 +#define PXP2_REG_PSWRQ_BW_RD 0x120324 +#define PXP2_REG_PSWRQ_BW_UB1 0x120238 +#define PXP2_REG_PSWRQ_BW_UB10 0x12025c +#define PXP2_REG_PSWRQ_BW_UB11 0x120260 +#define PXP2_REG_PSWRQ_BW_UB2 0x12023c +#define PXP2_REG_PSWRQ_BW_UB28 0x1202a0 +#define PXP2_REG_PSWRQ_BW_UB3 0x120240 +#define PXP2_REG_PSWRQ_BW_UB6 0x12024c +#define PXP2_REG_PSWRQ_BW_UB7 0x120250 +#define PXP2_REG_PSWRQ_BW_UB8 0x120254 +#define PXP2_REG_PSWRQ_BW_UB9 0x120258 +#define PXP2_REG_PSWRQ_BW_WR 0x120328 +#define PXP2_REG_PSWRQ_CDU0_L2P 0x120000 +#define PXP2_REG_PSWRQ_QM0_L2P 0x120038 +#define PXP2_REG_PSWRQ_SRC0_L2P 0x120054 +#define PXP2_REG_PSWRQ_TM0_L2P 0x12001c +#define PXP2_REG_PSWRQ_TSDM0_L2P 0x1200e0 +/* [RW 32] Interrupt mask register #0 read/write */ +#define PXP2_REG_PXP2_INT_MASK_0 0x120578 +/* [R 32] Interrupt register #0 read */ +#define PXP2_REG_PXP2_INT_STS_0 0x12056c +#define PXP2_REG_PXP2_INT_STS_1 0x120608 +/* [RC 32] Interrupt register #0 read clear */ +#define PXP2_REG_PXP2_INT_STS_CLR_0 0x120570 +/* [RW 32] Parity mask register #0 read/write */ +#define PXP2_REG_PXP2_PRTY_MASK_0 0x120588 +#define PXP2_REG_PXP2_PRTY_MASK_1 0x120598 +/* [R 32] Parity register #0 read */ +#define PXP2_REG_PXP2_PRTY_STS_0 0x12057c +#define PXP2_REG_PXP2_PRTY_STS_1 0x12058c +/* [RC 32] Parity register #0 read clear */ +#define PXP2_REG_PXP2_PRTY_STS_CLR_0 0x120580 +#define PXP2_REG_PXP2_PRTY_STS_CLR_1 0x120590 +/* [R 1] Debug only: The 'almost full' indication from each fifo (gives + indication about backpressure) */ +#define PXP2_REG_RD_ALMOST_FULL_0 0x120424 +/* [R 8] Debug only: The blocks counter - number of unused block ids */ +#define PXP2_REG_RD_BLK_CNT 0x120418 +/* [RW 8] Debug only: Total number of available blocks in Tetris Buffer. + Must be bigger than 6. Normally should not be changed. */ +#define PXP2_REG_RD_BLK_NUM_CFG 0x12040c +/* [RW 2] CDU byte swapping mode configuration for master read requests */ +#define PXP2_REG_RD_CDURD_SWAP_MODE 0x120404 +/* [RW 1] When '1'; inputs to the PSWRD block are ignored */ +#define PXP2_REG_RD_DISABLE_INPUTS 0x120374 +/* [R 1] PSWRD internal memories initialization is done */ +#define PXP2_REG_RD_INIT_DONE 0x120370 +/* [RW 8] The maximum number of blocks in Tetris Buffer that can be + allocated for vq10 */ +#define PXP2_REG_RD_MAX_BLKS_VQ10 0x1203a0 +/* [RW 8] The maximum number of blocks in Tetris Buffer that can be + allocated for vq11 */ +#define PXP2_REG_RD_MAX_BLKS_VQ11 0x1203a4 +/* [RW 8] The maximum number of blocks in Tetris Buffer that can be + allocated for vq17 */ +#define PXP2_REG_RD_MAX_BLKS_VQ17 0x1203bc +/* [RW 8] The maximum number of blocks in Tetris Buffer that can be + allocated for vq18 */ +#define PXP2_REG_RD_MAX_BLKS_VQ18 0x1203c0 +/* [RW 8] The maximum number of blocks in Tetris Buffer that can be + allocated for vq19 */ +#define PXP2_REG_RD_MAX_BLKS_VQ19 0x1203c4 +/* [RW 8] The maximum number of blocks in Tetris Buffer that can be + allocated for vq22 */ +#define PXP2_REG_RD_MAX_BLKS_VQ22 0x1203d0 +/* [RW 8] The maximum number of blocks in Tetris Buffer that can be + allocated for vq25 */ +#define PXP2_REG_RD_MAX_BLKS_VQ25 0x1203dc +/* [RW 8] The maximum number of blocks in Tetris Buffer that can be + allocated for vq6 */ +#define PXP2_REG_RD_MAX_BLKS_VQ6 0x120390 +/* [RW 8] The maximum number of blocks in Tetris Buffer that can be + allocated for vq9 */ +#define PXP2_REG_RD_MAX_BLKS_VQ9 0x12039c +/* [RW 2] PBF byte swapping mode configuration for master read requests */ +#define PXP2_REG_RD_PBF_SWAP_MODE 0x1203f4 +/* [R 1] Debug only: Indication if delivery ports are idle */ +#define PXP2_REG_RD_PORT_IS_IDLE_0 0x12041c +#define PXP2_REG_RD_PORT_IS_IDLE_1 0x120420 +/* [RW 2] QM byte swapping mode configuration for master read requests */ +#define PXP2_REG_RD_QM_SWAP_MODE 0x1203f8 +/* [R 7] Debug only: The SR counter - number of unused sub request ids */ +#define PXP2_REG_RD_SR_CNT 0x120414 +/* [RW 2] SRC byte swapping mode configuration for master read requests */ +#define PXP2_REG_RD_SRC_SWAP_MODE 0x120400 +/* [RW 7] Debug only: Total number of available PCI read sub-requests. Must + be bigger than 1. Normally should not be changed. */ +#define PXP2_REG_RD_SR_NUM_CFG 0x120408 +/* [RW 1] Signals the PSWRD block to start initializing internal memories */ +#define PXP2_REG_RD_START_INIT 0x12036c +/* [RW 2] TM byte swapping mode configuration for master read requests */ +#define PXP2_REG_RD_TM_SWAP_MODE 0x1203fc +/* [RW 10] Bandwidth addition to VQ0 write requests */ +#define PXP2_REG_RQ_BW_RD_ADD0 0x1201bc +/* [RW 10] Bandwidth addition to VQ12 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD12 0x1201ec +/* [RW 10] Bandwidth addition to VQ13 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD13 0x1201f0 +/* [RW 10] Bandwidth addition to VQ14 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD14 0x1201f4 +/* [RW 10] Bandwidth addition to VQ15 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD15 0x1201f8 +/* [RW 10] Bandwidth addition to VQ16 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD16 0x1201fc +/* [RW 10] Bandwidth addition to VQ17 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD17 0x120200 +/* [RW 10] Bandwidth addition to VQ18 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD18 0x120204 +/* [RW 10] Bandwidth addition to VQ19 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD19 0x120208 +/* [RW 10] Bandwidth addition to VQ20 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD20 0x12020c +/* [RW 10] Bandwidth addition to VQ22 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD22 0x120210 +/* [RW 10] Bandwidth addition to VQ23 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD23 0x120214 +/* [RW 10] Bandwidth addition to VQ24 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD24 0x120218 +/* [RW 10] Bandwidth addition to VQ25 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD25 0x12021c +/* [RW 10] Bandwidth addition to VQ26 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD26 0x120220 +/* [RW 10] Bandwidth addition to VQ27 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD27 0x120224 +/* [RW 10] Bandwidth addition to VQ4 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD4 0x1201cc +/* [RW 10] Bandwidth addition to VQ5 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD5 0x1201d0 +/* [RW 10] Bandwidth Typical L for VQ0 Read requests */ +#define PXP2_REG_RQ_BW_RD_L0 0x1202ac +/* [RW 10] Bandwidth Typical L for VQ12 Read requests */ +#define PXP2_REG_RQ_BW_RD_L12 0x1202dc +/* [RW 10] Bandwidth Typical L for VQ13 Read requests */ +#define PXP2_REG_RQ_BW_RD_L13 0x1202e0 +/* [RW 10] Bandwidth Typical L for VQ14 Read requests */ +#define PXP2_REG_RQ_BW_RD_L14 0x1202e4 +/* [RW 10] Bandwidth Typical L for VQ15 Read requests */ +#define PXP2_REG_RQ_BW_RD_L15 0x1202e8 +/* [RW 10] Bandwidth Typical L for VQ16 Read requests */ +#define PXP2_REG_RQ_BW_RD_L16 0x1202ec +/* [RW 10] Bandwidth Typical L for VQ17 Read requests */ +#define PXP2_REG_RQ_BW_RD_L17 0x1202f0 +/* [RW 10] Bandwidth Typical L for VQ18 Read requests */ +#define PXP2_REG_RQ_BW_RD_L18 0x1202f4 +/* [RW 10] Bandwidth Typical L for VQ19 Read requests */ +#define PXP2_REG_RQ_BW_RD_L19 0x1202f8 +/* [RW 10] Bandwidth Typical L for VQ20 Read requests */ +#define PXP2_REG_RQ_BW_RD_L20 0x1202fc +/* [RW 10] Bandwidth Typical L for VQ22 Read requests */ +#define PXP2_REG_RQ_BW_RD_L22 0x120300 +/* [RW 10] Bandwidth Typical L for VQ23 Read requests */ +#define PXP2_REG_RQ_BW_RD_L23 0x120304 +/* [RW 10] Bandwidth Typical L for VQ24 Read requests */ +#define PXP2_REG_RQ_BW_RD_L24 0x120308 +/* [RW 10] Bandwidth Typical L for VQ25 Read requests */ +#define PXP2_REG_RQ_BW_RD_L25 0x12030c +/* [RW 10] Bandwidth Typical L for VQ26 Read requests */ +#define PXP2_REG_RQ_BW_RD_L26 0x120310 +/* [RW 10] Bandwidth Typical L for VQ27 Read requests */ +#define PXP2_REG_RQ_BW_RD_L27 0x120314 +/* [RW 10] Bandwidth Typical L for VQ4 Read requests */ +#define PXP2_REG_RQ_BW_RD_L4 0x1202bc +/* [RW 10] Bandwidth Typical L for VQ5 Read- currently not used */ +#define PXP2_REG_RQ_BW_RD_L5 0x1202c0 +/* [RW 7] Bandwidth upper bound for VQ0 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND0 0x120234 +/* [RW 7] Bandwidth upper bound for VQ12 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND12 0x120264 +/* [RW 7] Bandwidth upper bound for VQ13 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND13 0x120268 +/* [RW 7] Bandwidth upper bound for VQ14 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND14 0x12026c +/* [RW 7] Bandwidth upper bound for VQ15 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND15 0x120270 +/* [RW 7] Bandwidth upper bound for VQ16 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND16 0x120274 +/* [RW 7] Bandwidth upper bound for VQ17 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND17 0x120278 +/* [RW 7] Bandwidth upper bound for VQ18 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND18 0x12027c +/* [RW 7] Bandwidth upper bound for VQ19 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND19 0x120280 +/* [RW 7] Bandwidth upper bound for VQ20 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND20 0x120284 +/* [RW 7] Bandwidth upper bound for VQ22 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND22 0x120288 +/* [RW 7] Bandwidth upper bound for VQ23 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND23 0x12028c +/* [RW 7] Bandwidth upper bound for VQ24 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND24 0x120290 +/* [RW 7] Bandwidth upper bound for VQ25 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND25 0x120294 +/* [RW 7] Bandwidth upper bound for VQ26 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND26 0x120298 +/* [RW 7] Bandwidth upper bound for VQ27 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND27 0x12029c +/* [RW 7] Bandwidth upper bound for VQ4 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND4 0x120244 +/* [RW 7] Bandwidth upper bound for VQ5 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND5 0x120248 +/* [RW 10] Bandwidth addition to VQ29 write requests */ +#define PXP2_REG_RQ_BW_WR_ADD29 0x12022c +/* [RW 10] Bandwidth addition to VQ30 write requests */ +#define PXP2_REG_RQ_BW_WR_ADD30 0x120230 +/* [RW 10] Bandwidth Typical L for VQ29 Write requests */ +#define PXP2_REG_RQ_BW_WR_L29 0x12031c +/* [RW 10] Bandwidth Typical L for VQ30 Write requests */ +#define PXP2_REG_RQ_BW_WR_L30 0x120320 +/* [RW 7] Bandwidth upper bound for VQ29 */ +#define PXP2_REG_RQ_BW_WR_UBOUND29 0x1202a4 +/* [RW 7] Bandwidth upper bound for VQ30 */ +#define PXP2_REG_RQ_BW_WR_UBOUND30 0x1202a8 +/* [RW 18] external first_mem_addr field in L2P table for CDU module port 0 */ +#define PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR 0x120008 +/* [RW 2] Endian mode for cdu */ +#define PXP2_REG_RQ_CDU_ENDIAN_M 0x1201a0 +#define PXP2_REG_RQ_CDU_FIRST_ILT 0x12061c +#define PXP2_REG_RQ_CDU_LAST_ILT 0x120620 +/* [RW 3] page size in L2P table for CDU module; -4k; -8k; -16k; -32k; -64k; + -128k */ +#define PXP2_REG_RQ_CDU_P_SIZE 0x120018 +/* [R 1] 1' indicates that the requester has finished its internal + configuration */ +#define PXP2_REG_RQ_CFG_DONE 0x1201b4 +/* [RW 2] Endian mode for debug */ +#define PXP2_REG_RQ_DBG_ENDIAN_M 0x1201a4 +/* [RW 1] When '1'; requests will enter input buffers but wont get out + towards the glue */ +#define PXP2_REG_RQ_DISABLE_INPUTS 0x120330 +/* [RW 4] Determines alignment of write SRs when a request is split into + * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B + * aligned. 4 - 512B aligned. */ +#define PXP2_REG_RQ_DRAM_ALIGN 0x1205b0 +/* [RW 4] Determines alignment of read SRs when a request is split into + * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B + * aligned. 4 - 512B aligned. */ +#define PXP2_REG_RQ_DRAM_ALIGN_RD 0x12092c +/* [RW 1] when set the new alignment method (E2) will be applied; when reset + * the original alignment method (E1 E1H) will be applied */ +#define PXP2_REG_RQ_DRAM_ALIGN_SEL 0x120930 +/* [RW 1] If 1 ILT failiue will not result in ELT access; An interrupt will + be asserted */ +#define PXP2_REG_RQ_ELT_DISABLE 0x12066c +/* [RW 2] Endian mode for hc */ +#define PXP2_REG_RQ_HC_ENDIAN_M 0x1201a8 +/* [RW 1] when '0' ILT logic will work as in A0; otherwise B0; for back + compatibility needs; Note that different registers are used per mode */ +#define PXP2_REG_RQ_ILT_MODE 0x1205b4 +/* [WB 53] Onchip address table */ +#define PXP2_REG_RQ_ONCHIP_AT 0x122000 +/* [WB 53] Onchip address table - B0 */ +#define PXP2_REG_RQ_ONCHIP_AT_B0 0x128000 +/* [RW 13] Pending read limiter threshold; in Dwords */ +#define PXP2_REG_RQ_PDR_LIMIT 0x12033c +/* [RW 2] Endian mode for qm */ +#define PXP2_REG_RQ_QM_ENDIAN_M 0x120194 +#define PXP2_REG_RQ_QM_FIRST_ILT 0x120634 +#define PXP2_REG_RQ_QM_LAST_ILT 0x120638 +/* [RW 3] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k; + -128k */ +#define PXP2_REG_RQ_QM_P_SIZE 0x120050 +/* [RW 1] 1' indicates that the RBC has finished configuring the PSWRQ */ +#define PXP2_REG_RQ_RBC_DONE 0x1201b0 +/* [RW 3] Max burst size filed for read requests port 0; 000 - 128B; + 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */ +#define PXP2_REG_RQ_RD_MBS0 0x120160 +/* [RW 3] Max burst size filed for read requests port 1; 000 - 128B; + 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */ +#define PXP2_REG_RQ_RD_MBS1 0x120168 +/* [RW 2] Endian mode for src */ +#define PXP2_REG_RQ_SRC_ENDIAN_M 0x12019c +#define PXP2_REG_RQ_SRC_FIRST_ILT 0x12063c +#define PXP2_REG_RQ_SRC_LAST_ILT 0x120640 +/* [RW 3] page size in L2P table for SRC module; -4k; -8k; -16k; -32k; -64k; + -128k */ +#define PXP2_REG_RQ_SRC_P_SIZE 0x12006c +/* [RW 2] Endian mode for tm */ +#define PXP2_REG_RQ_TM_ENDIAN_M 0x120198 +#define PXP2_REG_RQ_TM_FIRST_ILT 0x120644 +#define PXP2_REG_RQ_TM_LAST_ILT 0x120648 +/* [RW 3] page size in L2P table for TM module; -4k; -8k; -16k; -32k; -64k; + -128k */ +#define PXP2_REG_RQ_TM_P_SIZE 0x120034 +/* [R 5] Number of entries in the ufifo; his fifo has l2p completions */ +#define PXP2_REG_RQ_UFIFO_NUM_OF_ENTRY 0x12080c +/* [RW 18] external first_mem_addr field in L2P table for USDM module port 0 */ +#define PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR 0x120094 +/* [R 8] Number of entries occupied by vq 0 in pswrq memory */ +#define PXP2_REG_RQ_VQ0_ENTRY_CNT 0x120810 +/* [R 8] Number of entries occupied by vq 10 in pswrq memory */ +#define PXP2_REG_RQ_VQ10_ENTRY_CNT 0x120818 +/* [R 8] Number of entries occupied by vq 11 in pswrq memory */ +#define PXP2_REG_RQ_VQ11_ENTRY_CNT 0x120820 +/* [R 8] Number of entries occupied by vq 12 in pswrq memory */ +#define PXP2_REG_RQ_VQ12_ENTRY_CNT 0x120828 +/* [R 8] Number of entries occupied by vq 13 in pswrq memory */ +#define PXP2_REG_RQ_VQ13_ENTRY_CNT 0x120830 +/* [R 8] Number of entries occupied by vq 14 in pswrq memory */ +#define PXP2_REG_RQ_VQ14_ENTRY_CNT 0x120838 +/* [R 8] Number of entries occupied by vq 15 in pswrq memory */ +#define PXP2_REG_RQ_VQ15_ENTRY_CNT 0x120840 +/* [R 8] Number of entries occupied by vq 16 in pswrq memory */ +#define PXP2_REG_RQ_VQ16_ENTRY_CNT 0x120848 +/* [R 8] Number of entries occupied by vq 17 in pswrq memory */ +#define PXP2_REG_RQ_VQ17_ENTRY_CNT 0x120850 +/* [R 8] Number of entries occupied by vq 18 in pswrq memory */ +#define PXP2_REG_RQ_VQ18_ENTRY_CNT 0x120858 +/* [R 8] Number of entries occupied by vq 19 in pswrq memory */ +#define PXP2_REG_RQ_VQ19_ENTRY_CNT 0x120860 +/* [R 8] Number of entries occupied by vq 1 in pswrq memory */ +#define PXP2_REG_RQ_VQ1_ENTRY_CNT 0x120868 +/* [R 8] Number of entries occupied by vq 20 in pswrq memory */ +#define PXP2_REG_RQ_VQ20_ENTRY_CNT 0x120870 +/* [R 8] Number of entries occupied by vq 21 in pswrq memory */ +#define PXP2_REG_RQ_VQ21_ENTRY_CNT 0x120878 +/* [R 8] Number of entries occupied by vq 22 in pswrq memory */ +#define PXP2_REG_RQ_VQ22_ENTRY_CNT 0x120880 +/* [R 8] Number of entries occupied by vq 23 in pswrq memory */ +#define PXP2_REG_RQ_VQ23_ENTRY_CNT 0x120888 +/* [R 8] Number of entries occupied by vq 24 in pswrq memory */ +#define PXP2_REG_RQ_VQ24_ENTRY_CNT 0x120890 +/* [R 8] Number of entries occupied by vq 25 in pswrq memory */ +#define PXP2_REG_RQ_VQ25_ENTRY_CNT 0x120898 +/* [R 8] Number of entries occupied by vq 26 in pswrq memory */ +#define PXP2_REG_RQ_VQ26_ENTRY_CNT 0x1208a0 +/* [R 8] Number of entries occupied by vq 27 in pswrq memory */ +#define PXP2_REG_RQ_VQ27_ENTRY_CNT 0x1208a8 +/* [R 8] Number of entries occupied by vq 28 in pswrq memory */ +#define PXP2_REG_RQ_VQ28_ENTRY_CNT 0x1208b0 +/* [R 8] Number of entries occupied by vq 29 in pswrq memory */ +#define PXP2_REG_RQ_VQ29_ENTRY_CNT 0x1208b8 +/* [R 8] Number of entries occupied by vq 2 in pswrq memory */ +#define PXP2_REG_RQ_VQ2_ENTRY_CNT 0x1208c0 +/* [R 8] Number of entries occupied by vq 30 in pswrq memory */ +#define PXP2_REG_RQ_VQ30_ENTRY_CNT 0x1208c8 +/* [R 8] Number of entries occupied by vq 31 in pswrq memory */ +#define PXP2_REG_RQ_VQ31_ENTRY_CNT 0x1208d0 +/* [R 8] Number of entries occupied by vq 3 in pswrq memory */ +#define PXP2_REG_RQ_VQ3_ENTRY_CNT 0x1208d8 +/* [R 8] Number of entries occupied by vq 4 in pswrq memory */ +#define PXP2_REG_RQ_VQ4_ENTRY_CNT 0x1208e0 +/* [R 8] Number of entries occupied by vq 5 in pswrq memory */ +#define PXP2_REG_RQ_VQ5_ENTRY_CNT 0x1208e8 +/* [R 8] Number of entries occupied by vq 6 in pswrq memory */ +#define PXP2_REG_RQ_VQ6_ENTRY_CNT 0x1208f0 +/* [R 8] Number of entries occupied by vq 7 in pswrq memory */ +#define PXP2_REG_RQ_VQ7_ENTRY_CNT 0x1208f8 +/* [R 8] Number of entries occupied by vq 8 in pswrq memory */ +#define PXP2_REG_RQ_VQ8_ENTRY_CNT 0x120900 +/* [R 8] Number of entries occupied by vq 9 in pswrq memory */ +#define PXP2_REG_RQ_VQ9_ENTRY_CNT 0x120908 +/* [RW 3] Max burst size filed for write requests port 0; 000 - 128B; + 001:256B; 010: 512B; */ +#define PXP2_REG_RQ_WR_MBS0 0x12015c +/* [RW 3] Max burst size filed for write requests port 1; 000 - 128B; + 001:256B; 010: 512B; */ +#define PXP2_REG_RQ_WR_MBS1 0x120164 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_CDU_MPS 0x1205f0 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_CSDM_MPS 0x1205d0 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_DBG_MPS 0x1205e8 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_DMAE_MPS 0x1205ec +/* [RW 10] if Number of entries in dmae fifo will be higher than this + threshold then has_payload indication will be asserted; the default value + should be equal to > write MBS size! */ +#define PXP2_REG_WR_DMAE_TH 0x120368 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_HC_MPS 0x1205c8 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_QM_MPS 0x1205dc +/* [RW 1] 0 - working in A0 mode; - working in B0 mode */ +#define PXP2_REG_WR_REV_MODE 0x120670 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_SRC_MPS 0x1205e4 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_TM_MPS 0x1205e0 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_TSDM_MPS 0x1205d4 +/* [RW 10] if Number of entries in usdmdp fifo will be higher than this + threshold then has_payload indication will be asserted; the default value + should be equal to > write MBS size! */ +#define PXP2_REG_WR_USDMDP_TH 0x120348 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_USDM_MPS 0x1205cc +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_XSDM_MPS 0x1205d8 +/* [R 1] debug only: Indication if PSWHST arbiter is idle */ +#define PXP_REG_HST_ARB_IS_IDLE 0x103004 +/* [R 8] debug only: A bit mask for all PSWHST arbiter clients. '1' means + this client is waiting for the arbiter. */ +#define PXP_REG_HST_CLIENTS_WAITING_TO_ARB 0x103008 +/* [RW 1] When 1; doorbells are discarded and not passed to doorbell queue + block. Should be used for close the gates. */ +#define PXP_REG_HST_DISCARD_DOORBELLS 0x1030a4 +/* [R 1] debug only: '1' means this PSWHST is discarding doorbells. This bit + should update according to 'hst_discard_doorbells' register when the state + machine is idle */ +#define PXP_REG_HST_DISCARD_DOORBELLS_STATUS 0x1030a0 +/* [RW 1] When 1; new internal writes arriving to the block are discarded. + Should be used for close the gates. */ +#define PXP_REG_HST_DISCARD_INTERNAL_WRITES 0x1030a8 +/* [R 6] debug only: A bit mask for all PSWHST internal write clients. '1' + means this PSWHST is discarding inputs from this client. Each bit should + update according to 'hst_discard_internal_writes' register when the state + machine is idle. */ +#define PXP_REG_HST_DISCARD_INTERNAL_WRITES_STATUS 0x10309c +/* [WB 160] Used for initialization of the inbound interrupts memory */ +#define PXP_REG_HST_INBOUND_INT 0x103800 +/* [RW 32] Interrupt mask register #0 read/write */ +#define PXP_REG_PXP_INT_MASK_0 0x103074 +#define PXP_REG_PXP_INT_MASK_1 0x103084 +/* [R 32] Interrupt register #0 read */ +#define PXP_REG_PXP_INT_STS_0 0x103068 +#define PXP_REG_PXP_INT_STS_1 0x103078 +/* [RC 32] Interrupt register #0 read clear */ +#define PXP_REG_PXP_INT_STS_CLR_0 0x10306c +#define PXP_REG_PXP_INT_STS_CLR_1 0x10307c +/* [RW 27] Parity mask register #0 read/write */ +#define PXP_REG_PXP_PRTY_MASK 0x103094 +/* [R 26] Parity register #0 read */ +#define PXP_REG_PXP_PRTY_STS 0x103088 +/* [RC 27] Parity register #0 read clear */ +#define PXP_REG_PXP_PRTY_STS_CLR 0x10308c +/* [RW 4] The activity counter initial increment value sent in the load + request */ +#define QM_REG_ACTCTRINITVAL_0 0x168040 +#define QM_REG_ACTCTRINITVAL_1 0x168044 +#define QM_REG_ACTCTRINITVAL_2 0x168048 +#define QM_REG_ACTCTRINITVAL_3 0x16804c +/* [RW 32] The base logical address (in bytes) of each physical queue. The + index I represents the physical queue number. The 12 lsbs are ignore and + considered zero so practically there are only 20 bits in this register; + queues 63-0 */ +#define QM_REG_BASEADDR 0x168900 +/* [RW 32] The base logical address (in bytes) of each physical queue. The + index I represents the physical queue number. The 12 lsbs are ignore and + considered zero so practically there are only 20 bits in this register; + queues 127-64 */ +#define QM_REG_BASEADDR_EXT_A 0x16e100 +/* [RW 16] The byte credit cost for each task. This value is for both ports */ +#define QM_REG_BYTECRDCOST 0x168234 +/* [RW 16] The initial byte credit value for both ports. */ +#define QM_REG_BYTECRDINITVAL 0x168238 +/* [RW 32] A bit per physical queue. If the bit is cleared then the physical + queue uses port 0 else it uses port 1; queues 31-0 */ +#define QM_REG_BYTECRDPORT_LSB 0x168228 +/* [RW 32] A bit per physical queue. If the bit is cleared then the physical + queue uses port 0 else it uses port 1; queues 95-64 */ +#define QM_REG_BYTECRDPORT_LSB_EXT_A 0x16e520 +/* [RW 32] A bit per physical queue. If the bit is cleared then the physical + queue uses port 0 else it uses port 1; queues 63-32 */ +#define QM_REG_BYTECRDPORT_MSB 0x168224 +/* [RW 32] A bit per physical queue. If the bit is cleared then the physical + queue uses port 0 else it uses port 1; queues 127-96 */ +#define QM_REG_BYTECRDPORT_MSB_EXT_A 0x16e51c +/* [RW 16] The byte credit value that if above the QM is considered almost + full */ +#define QM_REG_BYTECREDITAFULLTHR 0x168094 +/* [RW 4] The initial credit for interface */ +#define QM_REG_CMINITCRD_0 0x1680cc +#define QM_REG_BYTECRDCMDQ_0 0x16e6e8 +#define QM_REG_CMINITCRD_1 0x1680d0 +#define QM_REG_CMINITCRD_2 0x1680d4 +#define QM_REG_CMINITCRD_3 0x1680d8 +#define QM_REG_CMINITCRD_4 0x1680dc +#define QM_REG_CMINITCRD_5 0x1680e0 +#define QM_REG_CMINITCRD_6 0x1680e4 +#define QM_REG_CMINITCRD_7 0x1680e8 +/* [RW 8] A mask bit per CM interface. If this bit is 0 then this interface + is masked */ +#define QM_REG_CMINTEN 0x1680ec +/* [RW 12] A bit vector which indicates which one of the queues are tied to + interface 0 */ +#define QM_REG_CMINTVOQMASK_0 0x1681f4 +#define QM_REG_CMINTVOQMASK_1 0x1681f8 +#define QM_REG_CMINTVOQMASK_2 0x1681fc +#define QM_REG_CMINTVOQMASK_3 0x168200 +#define QM_REG_CMINTVOQMASK_4 0x168204 +#define QM_REG_CMINTVOQMASK_5 0x168208 +#define QM_REG_CMINTVOQMASK_6 0x16820c +#define QM_REG_CMINTVOQMASK_7 0x168210 +/* [RW 20] The number of connections divided by 16 which dictates the size + of each queue which belongs to even function number. */ +#define QM_REG_CONNNUM_0 0x168020 +/* [R 6] Keep the fill level of the fifo from write client 4 */ +#define QM_REG_CQM_WRC_FIFOLVL 0x168018 +/* [RW 8] The context regions sent in the CFC load request */ +#define QM_REG_CTXREG_0 0x168030 +#define QM_REG_CTXREG_1 0x168034 +#define QM_REG_CTXREG_2 0x168038 +#define QM_REG_CTXREG_3 0x16803c +/* [RW 12] The VOQ mask used to select the VOQs which needs to be full for + bypass enable */ +#define QM_REG_ENBYPVOQMASK 0x16823c +/* [RW 32] A bit mask per each physical queue. If a bit is set then the + physical queue uses the byte credit; queues 31-0 */ +#define QM_REG_ENBYTECRD_LSB 0x168220 +/* [RW 32] A bit mask per each physical queue. If a bit is set then the + physical queue uses the byte credit; queues 95-64 */ +#define QM_REG_ENBYTECRD_LSB_EXT_A 0x16e518 +/* [RW 32] A bit mask per each physical queue. If a bit is set then the + physical queue uses the byte credit; queues 63-32 */ +#define QM_REG_ENBYTECRD_MSB 0x16821c +/* [RW 32] A bit mask per each physical queue. If a bit is set then the + physical queue uses the byte credit; queues 127-96 */ +#define QM_REG_ENBYTECRD_MSB_EXT_A 0x16e514 +/* [RW 4] If cleared then the secondary interface will not be served by the + RR arbiter */ +#define QM_REG_ENSEC 0x1680f0 +/* [RW 32] NA */ +#define QM_REG_FUNCNUMSEL_LSB 0x168230 +/* [RW 32] NA */ +#define QM_REG_FUNCNUMSEL_MSB 0x16822c +/* [RW 32] A mask register to mask the Almost empty signals which will not + be use for the almost empty indication to the HW block; queues 31:0 */ +#define QM_REG_HWAEMPTYMASK_LSB 0x168218 +/* [RW 32] A mask register to mask the Almost empty signals which will not + be use for the almost empty indication to the HW block; queues 95-64 */ +#define QM_REG_HWAEMPTYMASK_LSB_EXT_A 0x16e510 +/* [RW 32] A mask register to mask the Almost empty signals which will not + be use for the almost empty indication to the HW block; queues 63:32 */ +#define QM_REG_HWAEMPTYMASK_MSB 0x168214 +/* [RW 32] A mask register to mask the Almost empty signals which will not + be use for the almost empty indication to the HW block; queues 127-96 */ +#define QM_REG_HWAEMPTYMASK_MSB_EXT_A 0x16e50c +/* [RW 4] The number of outstanding request to CFC */ +#define QM_REG_OUTLDREQ 0x168804 +/* [RC 1] A flag to indicate that overflow error occurred in one of the + queues. */ +#define QM_REG_OVFERROR 0x16805c +/* [RC 7] the Q where the overflow occurs */ +#define QM_REG_OVFQNUM 0x168058 +/* [R 16] Pause state for physical queues 15-0 */ +#define QM_REG_PAUSESTATE0 0x168410 +/* [R 16] Pause state for physical queues 31-16 */ +#define QM_REG_PAUSESTATE1 0x168414 +/* [R 16] Pause state for physical queues 47-32 */ +#define QM_REG_PAUSESTATE2 0x16e684 +/* [R 16] Pause state for physical queues 63-48 */ +#define QM_REG_PAUSESTATE3 0x16e688 +/* [R 16] Pause state for physical queues 79-64 */ +#define QM_REG_PAUSESTATE4 0x16e68c +/* [R 16] Pause state for physical queues 95-80 */ +#define QM_REG_PAUSESTATE5 0x16e690 +/* [R 16] Pause state for physical queues 111-96 */ +#define QM_REG_PAUSESTATE6 0x16e694 +/* [R 16] Pause state for physical queues 127-112 */ +#define QM_REG_PAUSESTATE7 0x16e698 +/* [RW 2] The PCI attributes field used in the PCI request. */ +#define QM_REG_PCIREQAT 0x168054 +#define QM_REG_PF_EN 0x16e70c +/* [R 24] The number of tasks stored in the QM for the PF. only even + * functions are valid in E2 (odd I registers will be hard wired to 0) */ +#define QM_REG_PF_USG_CNT_0 0x16e040 +/* [R 16] NOT USED */ +#define QM_REG_PORT0BYTECRD 0x168300 +/* [R 16] The byte credit of port 1 */ +#define QM_REG_PORT1BYTECRD 0x168304 +/* [RW 3] pci function number of queues 15-0 */ +#define QM_REG_PQ2PCIFUNC_0 0x16e6bc +#define QM_REG_PQ2PCIFUNC_1 0x16e6c0 +#define QM_REG_PQ2PCIFUNC_2 0x16e6c4 +#define QM_REG_PQ2PCIFUNC_3 0x16e6c8 +#define QM_REG_PQ2PCIFUNC_4 0x16e6cc +#define QM_REG_PQ2PCIFUNC_5 0x16e6d0 +#define QM_REG_PQ2PCIFUNC_6 0x16e6d4 +#define QM_REG_PQ2PCIFUNC_7 0x16e6d8 +/* [WB 54] Pointer Table Memory for queues 63-0; The mapping is as follow: + ptrtbl[53:30] read pointer; ptrtbl[29:6] write pointer; ptrtbl[5:4] read + bank0; ptrtbl[3:2] read bank 1; ptrtbl[1:0] write bank; */ +#define QM_REG_PTRTBL 0x168a00 +/* [WB 54] Pointer Table Memory for queues 127-64; The mapping is as follow: + ptrtbl[53:30] read pointer; ptrtbl[29:6] write pointer; ptrtbl[5:4] read + bank0; ptrtbl[3:2] read bank 1; ptrtbl[1:0] write bank; */ +#define QM_REG_PTRTBL_EXT_A 0x16e200 +/* [RW 2] Interrupt mask register #0 read/write */ +#define QM_REG_QM_INT_MASK 0x168444 +/* [R 2] Interrupt register #0 read */ +#define QM_REG_QM_INT_STS 0x168438 +/* [RW 12] Parity mask register #0 read/write */ +#define QM_REG_QM_PRTY_MASK 0x168454 +/* [R 12] Parity register #0 read */ +#define QM_REG_QM_PRTY_STS 0x168448 +/* [RC 12] Parity register #0 read clear */ +#define QM_REG_QM_PRTY_STS_CLR 0x16844c +/* [R 32] Current queues in pipeline: Queues from 32 to 63 */ +#define QM_REG_QSTATUS_HIGH 0x16802c +/* [R 32] Current queues in pipeline: Queues from 96 to 127 */ +#define QM_REG_QSTATUS_HIGH_EXT_A 0x16e408 +/* [R 32] Current queues in pipeline: Queues from 0 to 31 */ +#define QM_REG_QSTATUS_LOW 0x168028 +/* [R 32] Current queues in pipeline: Queues from 64 to 95 */ +#define QM_REG_QSTATUS_LOW_EXT_A 0x16e404 +/* [R 24] The number of tasks queued for each queue; queues 63-0 */ +#define QM_REG_QTASKCTR_0 0x168308 +/* [R 24] The number of tasks queued for each queue; queues 127-64 */ +#define QM_REG_QTASKCTR_EXT_A_0 0x16e584 +/* [RW 4] Queue tied to VOQ */ +#define QM_REG_QVOQIDX_0 0x1680f4 +#define QM_REG_QVOQIDX_10 0x16811c +#define QM_REG_QVOQIDX_100 0x16e49c +#define QM_REG_QVOQIDX_101 0x16e4a0 +#define QM_REG_QVOQIDX_102 0x16e4a4 +#define QM_REG_QVOQIDX_103 0x16e4a8 +#define QM_REG_QVOQIDX_104 0x16e4ac +#define QM_REG_QVOQIDX_105 0x16e4b0 +#define QM_REG_QVOQIDX_106 0x16e4b4 +#define QM_REG_QVOQIDX_107 0x16e4b8 +#define QM_REG_QVOQIDX_108 0x16e4bc +#define QM_REG_QVOQIDX_109 0x16e4c0 +#define QM_REG_QVOQIDX_11 0x168120 +#define QM_REG_QVOQIDX_110 0x16e4c4 +#define QM_REG_QVOQIDX_111 0x16e4c8 +#define QM_REG_QVOQIDX_112 0x16e4cc +#define QM_REG_QVOQIDX_113 0x16e4d0 +#define QM_REG_QVOQIDX_114 0x16e4d4 +#define QM_REG_QVOQIDX_115 0x16e4d8 +#define QM_REG_QVOQIDX_116 0x16e4dc +#define QM_REG_QVOQIDX_117 0x16e4e0 +#define QM_REG_QVOQIDX_118 0x16e4e4 +#define QM_REG_QVOQIDX_119 0x16e4e8 +#define QM_REG_QVOQIDX_12 0x168124 +#define QM_REG_QVOQIDX_120 0x16e4ec +#define QM_REG_QVOQIDX_121 0x16e4f0 +#define QM_REG_QVOQIDX_122 0x16e4f4 +#define QM_REG_QVOQIDX_123 0x16e4f8 +#define QM_REG_QVOQIDX_124 0x16e4fc +#define QM_REG_QVOQIDX_125 0x16e500 +#define QM_REG_QVOQIDX_126 0x16e504 +#define QM_REG_QVOQIDX_127 0x16e508 +#define QM_REG_QVOQIDX_13 0x168128 +#define QM_REG_QVOQIDX_14 0x16812c +#define QM_REG_QVOQIDX_15 0x168130 +#define QM_REG_QVOQIDX_16 0x168134 +#define QM_REG_QVOQIDX_17 0x168138 +#define QM_REG_QVOQIDX_21 0x168148 +#define QM_REG_QVOQIDX_22 0x16814c +#define QM_REG_QVOQIDX_23 0x168150 +#define QM_REG_QVOQIDX_24 0x168154 +#define QM_REG_QVOQIDX_25 0x168158 +#define QM_REG_QVOQIDX_26 0x16815c +#define QM_REG_QVOQIDX_27 0x168160 +#define QM_REG_QVOQIDX_28 0x168164 +#define QM_REG_QVOQIDX_29 0x168168 +#define QM_REG_QVOQIDX_30 0x16816c +#define QM_REG_QVOQIDX_31 0x168170 +#define QM_REG_QVOQIDX_32 0x168174 +#define QM_REG_QVOQIDX_33 0x168178 +#define QM_REG_QVOQIDX_34 0x16817c +#define QM_REG_QVOQIDX_35 0x168180 +#define QM_REG_QVOQIDX_36 0x168184 +#define QM_REG_QVOQIDX_37 0x168188 +#define QM_REG_QVOQIDX_38 0x16818c +#define QM_REG_QVOQIDX_39 0x168190 +#define QM_REG_QVOQIDX_40 0x168194 +#define QM_REG_QVOQIDX_41 0x168198 +#define QM_REG_QVOQIDX_42 0x16819c +#define QM_REG_QVOQIDX_43 0x1681a0 +#define QM_REG_QVOQIDX_44 0x1681a4 +#define QM_REG_QVOQIDX_45 0x1681a8 +#define QM_REG_QVOQIDX_46 0x1681ac +#define QM_REG_QVOQIDX_47 0x1681b0 +#define QM_REG_QVOQIDX_48 0x1681b4 +#define QM_REG_QVOQIDX_49 0x1681b8 +#define QM_REG_QVOQIDX_5 0x168108 +#define QM_REG_QVOQIDX_50 0x1681bc +#define QM_REG_QVOQIDX_51 0x1681c0 +#define QM_REG_QVOQIDX_52 0x1681c4 +#define QM_REG_QVOQIDX_53 0x1681c8 +#define QM_REG_QVOQIDX_54 0x1681cc +#define QM_REG_QVOQIDX_55 0x1681d0 +#define QM_REG_QVOQIDX_56 0x1681d4 +#define QM_REG_QVOQIDX_57 0x1681d8 +#define QM_REG_QVOQIDX_58 0x1681dc +#define QM_REG_QVOQIDX_59 0x1681e0 +#define QM_REG_QVOQIDX_6 0x16810c +#define QM_REG_QVOQIDX_60 0x1681e4 +#define QM_REG_QVOQIDX_61 0x1681e8 +#define QM_REG_QVOQIDX_62 0x1681ec +#define QM_REG_QVOQIDX_63 0x1681f0 +#define QM_REG_QVOQIDX_64 0x16e40c +#define QM_REG_QVOQIDX_65 0x16e410 +#define QM_REG_QVOQIDX_69 0x16e420 +#define QM_REG_QVOQIDX_7 0x168110 +#define QM_REG_QVOQIDX_70 0x16e424 +#define QM_REG_QVOQIDX_71 0x16e428 +#define QM_REG_QVOQIDX_72 0x16e42c +#define QM_REG_QVOQIDX_73 0x16e430 +#define QM_REG_QVOQIDX_74 0x16e434 +#define QM_REG_QVOQIDX_75 0x16e438 +#define QM_REG_QVOQIDX_76 0x16e43c +#define QM_REG_QVOQIDX_77 0x16e440 +#define QM_REG_QVOQIDX_78 0x16e444 +#define QM_REG_QVOQIDX_79 0x16e448 +#define QM_REG_QVOQIDX_8 0x168114 +#define QM_REG_QVOQIDX_80 0x16e44c +#define QM_REG_QVOQIDX_81 0x16e450 +#define QM_REG_QVOQIDX_85 0x16e460 +#define QM_REG_QVOQIDX_86 0x16e464 +#define QM_REG_QVOQIDX_87 0x16e468 +#define QM_REG_QVOQIDX_88 0x16e46c +#define QM_REG_QVOQIDX_89 0x16e470 +#define QM_REG_QVOQIDX_9 0x168118 +#define QM_REG_QVOQIDX_90 0x16e474 +#define QM_REG_QVOQIDX_91 0x16e478 +#define QM_REG_QVOQIDX_92 0x16e47c +#define QM_REG_QVOQIDX_93 0x16e480 +#define QM_REG_QVOQIDX_94 0x16e484 +#define QM_REG_QVOQIDX_95 0x16e488 +#define QM_REG_QVOQIDX_96 0x16e48c +#define QM_REG_QVOQIDX_97 0x16e490 +#define QM_REG_QVOQIDX_98 0x16e494 +#define QM_REG_QVOQIDX_99 0x16e498 +/* [RW 1] Initialization bit command */ +#define QM_REG_SOFT_RESET 0x168428 +/* [RW 8] The credit cost per every task in the QM. A value per each VOQ */ +#define QM_REG_TASKCRDCOST_0 0x16809c +#define QM_REG_TASKCRDCOST_1 0x1680a0 +#define QM_REG_TASKCRDCOST_2 0x1680a4 +#define QM_REG_TASKCRDCOST_4 0x1680ac +#define QM_REG_TASKCRDCOST_5 0x1680b0 +/* [R 6] Keep the fill level of the fifo from write client 3 */ +#define QM_REG_TQM_WRC_FIFOLVL 0x168010 +/* [R 6] Keep the fill level of the fifo from write client 2 */ +#define QM_REG_UQM_WRC_FIFOLVL 0x168008 +/* [RC 32] Credit update error register */ +#define QM_REG_VOQCRDERRREG 0x168408 +/* [R 16] The credit value for each VOQ */ +#define QM_REG_VOQCREDIT_0 0x1682d0 +#define QM_REG_VOQCREDIT_1 0x1682d4 +#define QM_REG_VOQCREDIT_4 0x1682e0 +/* [RW 16] The credit value that if above the QM is considered almost full */ +#define QM_REG_VOQCREDITAFULLTHR 0x168090 +/* [RW 16] The init and maximum credit for each VoQ */ +#define QM_REG_VOQINITCREDIT_0 0x168060 +#define QM_REG_VOQINITCREDIT_1 0x168064 +#define QM_REG_VOQINITCREDIT_2 0x168068 +#define QM_REG_VOQINITCREDIT_4 0x168070 +#define QM_REG_VOQINITCREDIT_5 0x168074 +/* [RW 1] The port of which VOQ belongs */ +#define QM_REG_VOQPORT_0 0x1682a0 +#define QM_REG_VOQPORT_1 0x1682a4 +#define QM_REG_VOQPORT_2 0x1682a8 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_0_LSB 0x168240 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_0_LSB_EXT_A 0x16e524 +/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ +#define QM_REG_VOQQMASK_0_MSB 0x168244 +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_0_MSB_EXT_A 0x16e528 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_10_LSB 0x168290 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_10_LSB_EXT_A 0x16e574 +/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ +#define QM_REG_VOQQMASK_10_MSB 0x168294 +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_10_MSB_EXT_A 0x16e578 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_11_LSB 0x168298 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_11_LSB_EXT_A 0x16e57c +/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ +#define QM_REG_VOQQMASK_11_MSB 0x16829c +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_11_MSB_EXT_A 0x16e580 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_1_LSB 0x168248 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_1_LSB_EXT_A 0x16e52c +/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ +#define QM_REG_VOQQMASK_1_MSB 0x16824c +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_1_MSB_EXT_A 0x16e530 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_2_LSB 0x168250 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_2_LSB_EXT_A 0x16e534 +/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ +#define QM_REG_VOQQMASK_2_MSB 0x168254 +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_2_MSB_EXT_A 0x16e538 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_3_LSB 0x168258 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_3_LSB_EXT_A 0x16e53c +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_3_MSB_EXT_A 0x16e540 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_4_LSB 0x168260 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_4_LSB_EXT_A 0x16e544 +/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ +#define QM_REG_VOQQMASK_4_MSB 0x168264 +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_4_MSB_EXT_A 0x16e548 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_5_LSB 0x168268 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_5_LSB_EXT_A 0x16e54c +/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ +#define QM_REG_VOQQMASK_5_MSB 0x16826c +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_5_MSB_EXT_A 0x16e550 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_6_LSB 0x168270 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_6_LSB_EXT_A 0x16e554 +/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ +#define QM_REG_VOQQMASK_6_MSB 0x168274 +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_6_MSB_EXT_A 0x16e558 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_7_LSB 0x168278 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_7_LSB_EXT_A 0x16e55c +/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ +#define QM_REG_VOQQMASK_7_MSB 0x16827c +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_7_MSB_EXT_A 0x16e560 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_8_LSB 0x168280 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_8_LSB_EXT_A 0x16e564 +/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ +#define QM_REG_VOQQMASK_8_MSB 0x168284 +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_8_MSB_EXT_A 0x16e568 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_9_LSB 0x168288 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_9_LSB_EXT_A 0x16e56c +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_9_MSB_EXT_A 0x16e570 +/* [RW 32] Wrr weights */ +#define QM_REG_WRRWEIGHTS_0 0x16880c +#define QM_REG_WRRWEIGHTS_1 0x168810 +#define QM_REG_WRRWEIGHTS_10 0x168814 +#define QM_REG_WRRWEIGHTS_11 0x168818 +#define QM_REG_WRRWEIGHTS_12 0x16881c +#define QM_REG_WRRWEIGHTS_13 0x168820 +#define QM_REG_WRRWEIGHTS_14 0x168824 +#define QM_REG_WRRWEIGHTS_15 0x168828 +#define QM_REG_WRRWEIGHTS_16 0x16e000 +#define QM_REG_WRRWEIGHTS_17 0x16e004 +#define QM_REG_WRRWEIGHTS_18 0x16e008 +#define QM_REG_WRRWEIGHTS_19 0x16e00c +#define QM_REG_WRRWEIGHTS_2 0x16882c +#define QM_REG_WRRWEIGHTS_20 0x16e010 +#define QM_REG_WRRWEIGHTS_21 0x16e014 +#define QM_REG_WRRWEIGHTS_22 0x16e018 +#define QM_REG_WRRWEIGHTS_23 0x16e01c +#define QM_REG_WRRWEIGHTS_24 0x16e020 +#define QM_REG_WRRWEIGHTS_25 0x16e024 +#define QM_REG_WRRWEIGHTS_26 0x16e028 +#define QM_REG_WRRWEIGHTS_27 0x16e02c +#define QM_REG_WRRWEIGHTS_28 0x16e030 +#define QM_REG_WRRWEIGHTS_29 0x16e034 +#define QM_REG_WRRWEIGHTS_3 0x168830 +#define QM_REG_WRRWEIGHTS_30 0x16e038 +#define QM_REG_WRRWEIGHTS_31 0x16e03c +#define QM_REG_WRRWEIGHTS_4 0x168834 +#define QM_REG_WRRWEIGHTS_5 0x168838 +#define QM_REG_WRRWEIGHTS_6 0x16883c +#define QM_REG_WRRWEIGHTS_7 0x168840 +#define QM_REG_WRRWEIGHTS_8 0x168844 +#define QM_REG_WRRWEIGHTS_9 0x168848 +/* [R 6] Keep the fill level of the fifo from write client 1 */ +#define QM_REG_XQM_WRC_FIFOLVL 0x168000 +/* [W 1] reset to parity interrupt */ +#define SEM_FAST_REG_PARITY_RST 0x18840 +#define SRC_REG_COUNTFREE0 0x40500 +/* [RW 1] If clr the searcher is compatible to E1 A0 - support only two + ports. If set the searcher support 8 functions. */ +#define SRC_REG_E1HMF_ENABLE 0x404cc +#define SRC_REG_FIRSTFREE0 0x40510 +#define SRC_REG_KEYRSS0_0 0x40408 +#define SRC_REG_KEYRSS0_7 0x40424 +#define SRC_REG_KEYRSS1_9 0x40454 +#define SRC_REG_KEYSEARCH_0 0x40458 +#define SRC_REG_KEYSEARCH_1 0x4045c +#define SRC_REG_KEYSEARCH_2 0x40460 +#define SRC_REG_KEYSEARCH_3 0x40464 +#define SRC_REG_KEYSEARCH_4 0x40468 +#define SRC_REG_KEYSEARCH_5 0x4046c +#define SRC_REG_KEYSEARCH_6 0x40470 +#define SRC_REG_KEYSEARCH_7 0x40474 +#define SRC_REG_KEYSEARCH_8 0x40478 +#define SRC_REG_KEYSEARCH_9 0x4047c +#define SRC_REG_LASTFREE0 0x40530 +#define SRC_REG_NUMBER_HASH_BITS0 0x40400 +/* [RW 1] Reset internal state machines. */ +#define SRC_REG_SOFT_RST 0x4049c +/* [R 3] Interrupt register #0 read */ +#define SRC_REG_SRC_INT_STS 0x404ac +/* [RW 3] Parity mask register #0 read/write */ +#define SRC_REG_SRC_PRTY_MASK 0x404c8 +/* [R 3] Parity register #0 read */ +#define SRC_REG_SRC_PRTY_STS 0x404bc +/* [RC 3] Parity register #0 read clear */ +#define SRC_REG_SRC_PRTY_STS_CLR 0x404c0 +/* [R 4] Used to read the value of the XX protection CAM occupancy counter. */ +#define TCM_REG_CAM_OCCUP 0x5017c +/* [RW 1] CDU AG read Interface enable. If 0 - the request input is + disregarded; valid output is deasserted; all other signals are treated as + usual; if 1 - normal activity. */ +#define TCM_REG_CDU_AG_RD_IFEN 0x50034 +/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input + are disregarded; all other signals are treated as usual; if 1 - normal + activity. */ +#define TCM_REG_CDU_AG_WR_IFEN 0x50030 +/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is + disregarded; valid output is deasserted; all other signals are treated as + usual; if 1 - normal activity. */ +#define TCM_REG_CDU_SM_RD_IFEN 0x5003c +/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid + input is disregarded; all other signals are treated as usual; if 1 - + normal activity. */ +#define TCM_REG_CDU_SM_WR_IFEN 0x50038 +/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes + the initial credit value; read returns the current value of the credit + counter. Must be initialized to 1 at start-up. */ +#define TCM_REG_CFC_INIT_CRD 0x50204 +/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define TCM_REG_CP_WEIGHT 0x500c0 +/* [RW 1] Input csem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define TCM_REG_CSEM_IFEN 0x5002c +/* [RC 1] Message length mismatch (relative to last indication) at the In#9 + interface. */ +#define TCM_REG_CSEM_LENGTH_MIS 0x50174 +/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define TCM_REG_CSEM_WEIGHT 0x500bc +/* [RW 8] The Event ID in case of ErrorFlg is set in the input message. */ +#define TCM_REG_ERR_EVNT_ID 0x500a0 +/* [RW 28] The CM erroneous header for QM and Timers formatting. */ +#define TCM_REG_ERR_TCM_HDR 0x5009c +/* [RW 8] The Event ID for Timers expiration. */ +#define TCM_REG_EXPR_EVNT_ID 0x500a4 +/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write + writes the initial credit value; read returns the current value of the + credit counter. Must be initialized to 64 at start-up. */ +#define TCM_REG_FIC0_INIT_CRD 0x5020c +/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write + writes the initial credit value; read returns the current value of the + credit counter. Must be initialized to 64 at start-up. */ +#define TCM_REG_FIC1_INIT_CRD 0x50210 +/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1 + - strict priority defined by ~tcm_registers_gr_ag_pr.gr_ag_pr; + ~tcm_registers_gr_ld0_pr.gr_ld0_pr and + ~tcm_registers_gr_ld1_pr.gr_ld1_pr. */ +#define TCM_REG_GR_ARB_TYPE 0x50114 +/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the + highest priority is 3. It is supposed that the Store channel is the + compliment of the other 3 groups. */ +#define TCM_REG_GR_LD0_PR 0x5011c +/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the + highest priority is 3. It is supposed that the Store channel is the + compliment of the other 3 groups. */ +#define TCM_REG_GR_LD1_PR 0x50120 +/* [RW 4] The number of double REG-pairs; loaded from the STORM context and + sent to STORM; for a specific connection type. The double REG-pairs are + used to align to STORM context row size of 128 bits. The offset of these + data in the STORM context is always 0. Index _i stands for the connection + type (one of 16). */ +#define TCM_REG_N_SM_CTX_LD_0 0x50050 +#define TCM_REG_N_SM_CTX_LD_1 0x50054 +#define TCM_REG_N_SM_CTX_LD_2 0x50058 +#define TCM_REG_N_SM_CTX_LD_3 0x5005c +#define TCM_REG_N_SM_CTX_LD_4 0x50060 +#define TCM_REG_N_SM_CTX_LD_5 0x50064 +/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define TCM_REG_PBF_IFEN 0x50024 +/* [RC 1] Message length mismatch (relative to last indication) at the In#7 + interface. */ +#define TCM_REG_PBF_LENGTH_MIS 0x5016c +/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define TCM_REG_PBF_WEIGHT 0x500b4 +#define TCM_REG_PHYS_QNUM0_0 0x500e0 +#define TCM_REG_PHYS_QNUM0_1 0x500e4 +#define TCM_REG_PHYS_QNUM1_0 0x500e8 +#define TCM_REG_PHYS_QNUM1_1 0x500ec +#define TCM_REG_PHYS_QNUM2_0 0x500f0 +#define TCM_REG_PHYS_QNUM2_1 0x500f4 +#define TCM_REG_PHYS_QNUM3_0 0x500f8 +#define TCM_REG_PHYS_QNUM3_1 0x500fc +/* [RW 1] Input prs Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define TCM_REG_PRS_IFEN 0x50020 +/* [RC 1] Message length mismatch (relative to last indication) at the In#6 + interface. */ +#define TCM_REG_PRS_LENGTH_MIS 0x50168 +/* [RW 3] The weight of the input prs in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define TCM_REG_PRS_WEIGHT 0x500b0 +/* [RW 8] The Event ID for Timers formatting in case of stop done. */ +#define TCM_REG_STOP_EVNT_ID 0x500a8 +/* [RC 1] Message length mismatch (relative to last indication) at the STORM + interface. */ +#define TCM_REG_STORM_LENGTH_MIS 0x50160 +/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define TCM_REG_STORM_TCM_IFEN 0x50010 +/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define TCM_REG_STORM_WEIGHT 0x500ac +/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define TCM_REG_TCM_CFC_IFEN 0x50040 +/* [RW 11] Interrupt mask register #0 read/write */ +#define TCM_REG_TCM_INT_MASK 0x501dc +/* [R 11] Interrupt register #0 read */ +#define TCM_REG_TCM_INT_STS 0x501d0 +/* [RW 27] Parity mask register #0 read/write */ +#define TCM_REG_TCM_PRTY_MASK 0x501ec +/* [R 27] Parity register #0 read */ +#define TCM_REG_TCM_PRTY_STS 0x501e0 +/* [RC 27] Parity register #0 read clear */ +#define TCM_REG_TCM_PRTY_STS_CLR 0x501e4 +/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS + REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). + Is used to determine the number of the AG context REG-pairs written back; + when the input message Reg1WbFlg isn't set. */ +#define TCM_REG_TCM_REG0_SZ 0x500d8 +/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define TCM_REG_TCM_STORM0_IFEN 0x50004 +/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define TCM_REG_TCM_STORM1_IFEN 0x50008 +/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define TCM_REG_TCM_TQM_IFEN 0x5000c +/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */ +#define TCM_REG_TCM_TQM_USE_Q 0x500d4 +/* [RW 28] The CM header for Timers expiration command. */ +#define TCM_REG_TM_TCM_HDR 0x50098 +/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define TCM_REG_TM_TCM_IFEN 0x5001c +/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define TCM_REG_TM_WEIGHT 0x500d0 +/* [RW 6] QM output initial credit. Max credit available - 32.Write writes + the initial credit value; read returns the current value of the credit + counter. Must be initialized to 32 at start-up. */ +#define TCM_REG_TQM_INIT_CRD 0x5021c +/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0 + stands for weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define TCM_REG_TQM_P_WEIGHT 0x500c8 +/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0 + stands for weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define TCM_REG_TQM_S_WEIGHT 0x500cc +/* [RW 28] The CM header value for QM request (primary). */ +#define TCM_REG_TQM_TCM_HDR_P 0x50090 +/* [RW 28] The CM header value for QM request (secondary). */ +#define TCM_REG_TQM_TCM_HDR_S 0x50094 +/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define TCM_REG_TQM_TCM_IFEN 0x50014 +/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define TCM_REG_TSDM_IFEN 0x50018 +/* [RC 1] Message length mismatch (relative to last indication) at the SDM + interface. */ +#define TCM_REG_TSDM_LENGTH_MIS 0x50164 +/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define TCM_REG_TSDM_WEIGHT 0x500c4 +/* [RW 1] Input usem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define TCM_REG_USEM_IFEN 0x50028 +/* [RC 1] Message length mismatch (relative to last indication) at the In#8 + interface. */ +#define TCM_REG_USEM_LENGTH_MIS 0x50170 +/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define TCM_REG_USEM_WEIGHT 0x500b8 +/* [RW 21] Indirect access to the descriptor table of the XX protection + mechanism. The fields are: [5:0] - length of the message; 15:6] - message + pointer; 20:16] - next pointer. */ +#define TCM_REG_XX_DESCR_TABLE 0x50280 +#define TCM_REG_XX_DESCR_TABLE_SIZE 29 +/* [R 6] Use to read the value of XX protection Free counter. */ +#define TCM_REG_XX_FREE 0x50178 +/* [RW 6] Initial value for the credit counter; responsible for fulfilling + of the Input Stage XX protection buffer by the XX protection pending + messages. Max credit available - 127.Write writes the initial credit + value; read returns the current value of the credit counter. Must be + initialized to 19 at start-up. */ +#define TCM_REG_XX_INIT_CRD 0x50220 +/* [RW 6] Maximum link list size (messages locked) per connection in the XX + protection. */ +#define TCM_REG_XX_MAX_LL_SZ 0x50044 +/* [RW 6] The maximum number of pending messages; which may be stored in XX + protection. ~tcm_registers_xx_free.xx_free is read on read. */ +#define TCM_REG_XX_MSG_NUM 0x50224 +/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */ +#define TCM_REG_XX_OVFL_EVNT_ID 0x50048 +/* [RW 16] Indirect access to the XX table of the XX protection mechanism. + The fields are:[4:0] - tail pointer; [10:5] - Link List size; 15:11] - + header pointer. */ +#define TCM_REG_XX_TABLE 0x50240 +/* [RW 4] Load value for cfc ac credit cnt. */ +#define TM_REG_CFC_AC_CRDCNT_VAL 0x164208 +/* [RW 4] Load value for cfc cld credit cnt. */ +#define TM_REG_CFC_CLD_CRDCNT_VAL 0x164210 +/* [RW 8] Client0 context region. */ +#define TM_REG_CL0_CONT_REGION 0x164030 +/* [RW 8] Client1 context region. */ +#define TM_REG_CL1_CONT_REGION 0x164034 +/* [RW 8] Client2 context region. */ +#define TM_REG_CL2_CONT_REGION 0x164038 +/* [RW 2] Client in High priority client number. */ +#define TM_REG_CLIN_PRIOR0_CLIENT 0x164024 +/* [RW 4] Load value for clout0 cred cnt. */ +#define TM_REG_CLOUT_CRDCNT0_VAL 0x164220 +/* [RW 4] Load value for clout1 cred cnt. */ +#define TM_REG_CLOUT_CRDCNT1_VAL 0x164228 +/* [RW 4] Load value for clout2 cred cnt. */ +#define TM_REG_CLOUT_CRDCNT2_VAL 0x164230 +/* [RW 1] Enable client0 input. */ +#define TM_REG_EN_CL0_INPUT 0x164008 +/* [RW 1] Enable client1 input. */ +#define TM_REG_EN_CL1_INPUT 0x16400c +/* [RW 1] Enable client2 input. */ +#define TM_REG_EN_CL2_INPUT 0x164010 +#define TM_REG_EN_LINEAR0_TIMER 0x164014 +/* [RW 1] Enable real time counter. */ +#define TM_REG_EN_REAL_TIME_CNT 0x1640d8 +/* [RW 1] Enable for Timers state machines. */ +#define TM_REG_EN_TIMERS 0x164000 +/* [RW 4] Load value for expiration credit cnt. CFC max number of + outstanding load requests for timers (expiration) context loading. */ +#define TM_REG_EXP_CRDCNT_VAL 0x164238 +/* [RW 32] Linear0 logic address. */ +#define TM_REG_LIN0_LOGIC_ADDR 0x164240 +/* [RW 18] Linear0 Max active cid (in banks of 32 entries). */ +#define TM_REG_LIN0_MAX_ACTIVE_CID 0x164048 +/* [ST 16] Linear0 Number of scans counter. */ +#define TM_REG_LIN0_NUM_SCANS 0x1640a0 +/* [WB 64] Linear0 phy address. */ +#define TM_REG_LIN0_PHY_ADDR 0x164270 +/* [RW 1] Linear0 physical address valid. */ +#define TM_REG_LIN0_PHY_ADDR_VALID 0x164248 +#define TM_REG_LIN0_SCAN_ON 0x1640d0 +/* [RW 24] Linear0 array scan timeout. */ +#define TM_REG_LIN0_SCAN_TIME 0x16403c +#define TM_REG_LIN0_VNIC_UC 0x164128 +/* [RW 32] Linear1 logic address. */ +#define TM_REG_LIN1_LOGIC_ADDR 0x164250 +/* [WB 64] Linear1 phy address. */ +#define TM_REG_LIN1_PHY_ADDR 0x164280 +/* [RW 1] Linear1 physical address valid. */ +#define TM_REG_LIN1_PHY_ADDR_VALID 0x164258 +/* [RW 6] Linear timer set_clear fifo threshold. */ +#define TM_REG_LIN_SETCLR_FIFO_ALFULL_THR 0x164070 +/* [RW 2] Load value for pci arbiter credit cnt. */ +#define TM_REG_PCIARB_CRDCNT_VAL 0x164260 +/* [RW 20] The amount of hardware cycles for each timer tick. */ +#define TM_REG_TIMER_TICK_SIZE 0x16401c +/* [RW 8] Timers Context region. */ +#define TM_REG_TM_CONTEXT_REGION 0x164044 +/* [RW 1] Interrupt mask register #0 read/write */ +#define TM_REG_TM_INT_MASK 0x1640fc +/* [R 1] Interrupt register #0 read */ +#define TM_REG_TM_INT_STS 0x1640f0 +/* [RW 7] Parity mask register #0 read/write */ +#define TM_REG_TM_PRTY_MASK 0x16410c +/* [RC 7] Parity register #0 read clear */ +#define TM_REG_TM_PRTY_STS_CLR 0x164104 +/* [RW 8] The event id for aggregated interrupt 0 */ +#define TSDM_REG_AGG_INT_EVENT_0 0x42038 +#define TSDM_REG_AGG_INT_EVENT_1 0x4203c +#define TSDM_REG_AGG_INT_EVENT_2 0x42040 +#define TSDM_REG_AGG_INT_EVENT_3 0x42044 +#define TSDM_REG_AGG_INT_EVENT_4 0x42048 +/* [RW 1] The T bit for aggregated interrupt 0 */ +#define TSDM_REG_AGG_INT_T_0 0x420b8 +#define TSDM_REG_AGG_INT_T_1 0x420bc +/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */ +#define TSDM_REG_CFC_RSP_START_ADDR 0x42008 +/* [RW 16] The maximum value of the completion counter #0 */ +#define TSDM_REG_CMP_COUNTER_MAX0 0x4201c +/* [RW 16] The maximum value of the completion counter #1 */ +#define TSDM_REG_CMP_COUNTER_MAX1 0x42020 +/* [RW 16] The maximum value of the completion counter #2 */ +#define TSDM_REG_CMP_COUNTER_MAX2 0x42024 +/* [RW 16] The maximum value of the completion counter #3 */ +#define TSDM_REG_CMP_COUNTER_MAX3 0x42028 +/* [RW 13] The start address in the internal RAM for the completion + counters. */ +#define TSDM_REG_CMP_COUNTER_START_ADDR 0x4200c +#define TSDM_REG_ENABLE_IN1 0x42238 +#define TSDM_REG_ENABLE_IN2 0x4223c +#define TSDM_REG_ENABLE_OUT1 0x42240 +#define TSDM_REG_ENABLE_OUT2 0x42244 +/* [RW 4] The initial number of messages that can be sent to the pxp control + interface without receiving any ACK. */ +#define TSDM_REG_INIT_CREDIT_PXP_CTRL 0x424bc +/* [ST 32] The number of ACK after placement messages received */ +#define TSDM_REG_NUM_OF_ACK_AFTER_PLACE 0x4227c +/* [ST 32] The number of packet end messages received from the parser */ +#define TSDM_REG_NUM_OF_PKT_END_MSG 0x42274 +/* [ST 32] The number of requests received from the pxp async if */ +#define TSDM_REG_NUM_OF_PXP_ASYNC_REQ 0x42278 +/* [ST 32] The number of commands received in queue 0 */ +#define TSDM_REG_NUM_OF_Q0_CMD 0x42248 +/* [ST 32] The number of commands received in queue 10 */ +#define TSDM_REG_NUM_OF_Q10_CMD 0x4226c +/* [ST 32] The number of commands received in queue 11 */ +#define TSDM_REG_NUM_OF_Q11_CMD 0x42270 +/* [ST 32] The number of commands received in queue 1 */ +#define TSDM_REG_NUM_OF_Q1_CMD 0x4224c +/* [ST 32] The number of commands received in queue 3 */ +#define TSDM_REG_NUM_OF_Q3_CMD 0x42250 +/* [ST 32] The number of commands received in queue 4 */ +#define TSDM_REG_NUM_OF_Q4_CMD 0x42254 +/* [ST 32] The number of commands received in queue 5 */ +#define TSDM_REG_NUM_OF_Q5_CMD 0x42258 +/* [ST 32] The number of commands received in queue 6 */ +#define TSDM_REG_NUM_OF_Q6_CMD 0x4225c +/* [ST 32] The number of commands received in queue 7 */ +#define TSDM_REG_NUM_OF_Q7_CMD 0x42260 +/* [ST 32] The number of commands received in queue 8 */ +#define TSDM_REG_NUM_OF_Q8_CMD 0x42264 +/* [ST 32] The number of commands received in queue 9 */ +#define TSDM_REG_NUM_OF_Q9_CMD 0x42268 +/* [RW 13] The start address in the internal RAM for the packet end message */ +#define TSDM_REG_PCK_END_MSG_START_ADDR 0x42014 +/* [RW 13] The start address in the internal RAM for queue counters */ +#define TSDM_REG_Q_COUNTER_START_ADDR 0x42010 +/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */ +#define TSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0x42548 +/* [R 1] parser fifo empty in sdm_sync block */ +#define TSDM_REG_SYNC_PARSER_EMPTY 0x42550 +/* [R 1] parser serial fifo empty in sdm_sync block */ +#define TSDM_REG_SYNC_SYNC_EMPTY 0x42558 +/* [RW 32] Tick for timer counter. Applicable only when + ~tsdm_registers_timer_tick_enable.timer_tick_enable =1 */ +#define TSDM_REG_TIMER_TICK 0x42000 +/* [RW 32] Interrupt mask register #0 read/write */ +#define TSDM_REG_TSDM_INT_MASK_0 0x4229c +#define TSDM_REG_TSDM_INT_MASK_1 0x422ac +/* [R 32] Interrupt register #0 read */ +#define TSDM_REG_TSDM_INT_STS_0 0x42290 +#define TSDM_REG_TSDM_INT_STS_1 0x422a0 +/* [RW 11] Parity mask register #0 read/write */ +#define TSDM_REG_TSDM_PRTY_MASK 0x422bc +/* [R 11] Parity register #0 read */ +#define TSDM_REG_TSDM_PRTY_STS 0x422b0 +/* [RC 11] Parity register #0 read clear */ +#define TSDM_REG_TSDM_PRTY_STS_CLR 0x422b4 +/* [RW 5] The number of time_slots in the arbitration cycle */ +#define TSEM_REG_ARB_CYCLE_SIZE 0x180034 +/* [RW 3] The source that is associated with arbitration element 0. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2 */ +#define TSEM_REG_ARB_ELEMENT0 0x180020 +/* [RW 3] The source that is associated with arbitration element 1. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~tsem_registers_arb_element0.arb_element0 */ +#define TSEM_REG_ARB_ELEMENT1 0x180024 +/* [RW 3] The source that is associated with arbitration element 2. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~tsem_registers_arb_element0.arb_element0 + and ~tsem_registers_arb_element1.arb_element1 */ +#define TSEM_REG_ARB_ELEMENT2 0x180028 +/* [RW 3] The source that is associated with arbitration element 3. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2.Could + not be equal to register ~tsem_registers_arb_element0.arb_element0 and + ~tsem_registers_arb_element1.arb_element1 and + ~tsem_registers_arb_element2.arb_element2 */ +#define TSEM_REG_ARB_ELEMENT3 0x18002c +/* [RW 3] The source that is associated with arbitration element 4. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~tsem_registers_arb_element0.arb_element0 + and ~tsem_registers_arb_element1.arb_element1 and + ~tsem_registers_arb_element2.arb_element2 and + ~tsem_registers_arb_element3.arb_element3 */ +#define TSEM_REG_ARB_ELEMENT4 0x180030 +#define TSEM_REG_ENABLE_IN 0x1800a4 +#define TSEM_REG_ENABLE_OUT 0x1800a8 +/* [RW 32] This address space contains all registers and memories that are + placed in SEM_FAST block. The SEM_FAST registers are described in + appendix B. In order to access the sem_fast registers the base address + ~fast_memory.fast_memory should be added to eachsem_fast register offset. */ +#define TSEM_REG_FAST_MEMORY 0x1a0000 +/* [RW 1] Disables input messages from FIC0 May be updated during run_time + by the microcode */ +#define TSEM_REG_FIC0_DISABLE 0x180224 +/* [RW 1] Disables input messages from FIC1 May be updated during run_time + by the microcode */ +#define TSEM_REG_FIC1_DISABLE 0x180234 +/* [RW 15] Interrupt table Read and write access to it is not possible in + the middle of the work */ +#define TSEM_REG_INT_TABLE 0x180400 +/* [ST 24] Statistics register. The number of messages that entered through + FIC0 */ +#define TSEM_REG_MSG_NUM_FIC0 0x180000 +/* [ST 24] Statistics register. The number of messages that entered through + FIC1 */ +#define TSEM_REG_MSG_NUM_FIC1 0x180004 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC0 */ +#define TSEM_REG_MSG_NUM_FOC0 0x180008 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC1 */ +#define TSEM_REG_MSG_NUM_FOC1 0x18000c +/* [ST 24] Statistics register. The number of messages that were sent to + FOC2 */ +#define TSEM_REG_MSG_NUM_FOC2 0x180010 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC3 */ +#define TSEM_REG_MSG_NUM_FOC3 0x180014 +/* [RW 1] Disables input messages from the passive buffer May be updated + during run_time by the microcode */ +#define TSEM_REG_PAS_DISABLE 0x18024c +/* [WB 128] Debug only. Passive buffer memory */ +#define TSEM_REG_PASSIVE_BUFFER 0x181000 +/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */ +#define TSEM_REG_PRAM 0x1c0000 +/* [R 8] Valid sleeping threads indication have bit per thread */ +#define TSEM_REG_SLEEP_THREADS_VALID 0x18026c +/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */ +#define TSEM_REG_SLOW_EXT_STORE_EMPTY 0x1802a0 +/* [RW 8] List of free threads . There is a bit per thread. */ +#define TSEM_REG_THREADS_LIST 0x1802e4 +/* [RC 32] Parity register #0 read clear */ +#define TSEM_REG_TSEM_PRTY_STS_CLR_0 0x180118 +#define TSEM_REG_TSEM_PRTY_STS_CLR_1 0x180128 +/* [RW 3] The arbitration scheme of time_slot 0 */ +#define TSEM_REG_TS_0_AS 0x180038 +/* [RW 3] The arbitration scheme of time_slot 10 */ +#define TSEM_REG_TS_10_AS 0x180060 +/* [RW 3] The arbitration scheme of time_slot 11 */ +#define TSEM_REG_TS_11_AS 0x180064 +/* [RW 3] The arbitration scheme of time_slot 12 */ +#define TSEM_REG_TS_12_AS 0x180068 +/* [RW 3] The arbitration scheme of time_slot 13 */ +#define TSEM_REG_TS_13_AS 0x18006c +/* [RW 3] The arbitration scheme of time_slot 14 */ +#define TSEM_REG_TS_14_AS 0x180070 +/* [RW 3] The arbitration scheme of time_slot 15 */ +#define TSEM_REG_TS_15_AS 0x180074 +/* [RW 3] The arbitration scheme of time_slot 16 */ +#define TSEM_REG_TS_16_AS 0x180078 +/* [RW 3] The arbitration scheme of time_slot 17 */ +#define TSEM_REG_TS_17_AS 0x18007c +/* [RW 3] The arbitration scheme of time_slot 18 */ +#define TSEM_REG_TS_18_AS 0x180080 +/* [RW 3] The arbitration scheme of time_slot 1 */ +#define TSEM_REG_TS_1_AS 0x18003c +/* [RW 3] The arbitration scheme of time_slot 2 */ +#define TSEM_REG_TS_2_AS 0x180040 +/* [RW 3] The arbitration scheme of time_slot 3 */ +#define TSEM_REG_TS_3_AS 0x180044 +/* [RW 3] The arbitration scheme of time_slot 4 */ +#define TSEM_REG_TS_4_AS 0x180048 +/* [RW 3] The arbitration scheme of time_slot 5 */ +#define TSEM_REG_TS_5_AS 0x18004c +/* [RW 3] The arbitration scheme of time_slot 6 */ +#define TSEM_REG_TS_6_AS 0x180050 +/* [RW 3] The arbitration scheme of time_slot 7 */ +#define TSEM_REG_TS_7_AS 0x180054 +/* [RW 3] The arbitration scheme of time_slot 8 */ +#define TSEM_REG_TS_8_AS 0x180058 +/* [RW 3] The arbitration scheme of time_slot 9 */ +#define TSEM_REG_TS_9_AS 0x18005c +/* [RW 32] Interrupt mask register #0 read/write */ +#define TSEM_REG_TSEM_INT_MASK_0 0x180100 +#define TSEM_REG_TSEM_INT_MASK_1 0x180110 +/* [R 32] Interrupt register #0 read */ +#define TSEM_REG_TSEM_INT_STS_0 0x1800f4 +#define TSEM_REG_TSEM_INT_STS_1 0x180104 +/* [RW 32] Parity mask register #0 read/write */ +#define TSEM_REG_TSEM_PRTY_MASK_0 0x180120 +#define TSEM_REG_TSEM_PRTY_MASK_1 0x180130 +/* [R 32] Parity register #0 read */ +#define TSEM_REG_TSEM_PRTY_STS_0 0x180114 +#define TSEM_REG_TSEM_PRTY_STS_1 0x180124 +/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64 + * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */ +#define TSEM_REG_VFPF_ERR_NUM 0x180380 +/* [RW 32] Indirect access to AG context with 32-bits granularity. The bits + * [10:8] of the address should be the offset within the accessed LCID + * context; the bits [7:0] are the accessed LCID.Example: to write to REG10 + * LCID100. The RBC address should be 12'ha64. */ +#define UCM_REG_AG_CTX 0xe2000 +/* [R 5] Used to read the XX protection CAM occupancy counter. */ +#define UCM_REG_CAM_OCCUP 0xe0170 +/* [RW 1] CDU AG read Interface enable. If 0 - the request input is + disregarded; valid output is deasserted; all other signals are treated as + usual; if 1 - normal activity. */ +#define UCM_REG_CDU_AG_RD_IFEN 0xe0038 +/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input + are disregarded; all other signals are treated as usual; if 1 - normal + activity. */ +#define UCM_REG_CDU_AG_WR_IFEN 0xe0034 +/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is + disregarded; valid output is deasserted; all other signals are treated as + usual; if 1 - normal activity. */ +#define UCM_REG_CDU_SM_RD_IFEN 0xe0040 +/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid + input is disregarded; all other signals are treated as usual; if 1 - + normal activity. */ +#define UCM_REG_CDU_SM_WR_IFEN 0xe003c +/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes + the initial credit value; read returns the current value of the credit + counter. Must be initialized to 1 at start-up. */ +#define UCM_REG_CFC_INIT_CRD 0xe0204 +/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define UCM_REG_CP_WEIGHT 0xe00c4 +/* [RW 1] Input csem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define UCM_REG_CSEM_IFEN 0xe0028 +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the csem interface is detected. */ +#define UCM_REG_CSEM_LENGTH_MIS 0xe0160 +/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define UCM_REG_CSEM_WEIGHT 0xe00b8 +/* [RW 1] Input dorq Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define UCM_REG_DORQ_IFEN 0xe0030 +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the dorq interface is detected. */ +#define UCM_REG_DORQ_LENGTH_MIS 0xe0168 +/* [RW 3] The weight of the input dorq in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define UCM_REG_DORQ_WEIGHT 0xe00c0 +/* [RW 8] The Event ID in case ErrorFlg input message bit is set. */ +#define UCM_REG_ERR_EVNT_ID 0xe00a4 +/* [RW 28] The CM erroneous header for QM and Timers formatting. */ +#define UCM_REG_ERR_UCM_HDR 0xe00a0 +/* [RW 8] The Event ID for Timers expiration. */ +#define UCM_REG_EXPR_EVNT_ID 0xe00a8 +/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write + writes the initial credit value; read returns the current value of the + credit counter. Must be initialized to 64 at start-up. */ +#define UCM_REG_FIC0_INIT_CRD 0xe020c +/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write + writes the initial credit value; read returns the current value of the + credit counter. Must be initialized to 64 at start-up. */ +#define UCM_REG_FIC1_INIT_CRD 0xe0210 +/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1 + - strict priority defined by ~ucm_registers_gr_ag_pr.gr_ag_pr; + ~ucm_registers_gr_ld0_pr.gr_ld0_pr and + ~ucm_registers_gr_ld1_pr.gr_ld1_pr. */ +#define UCM_REG_GR_ARB_TYPE 0xe0144 +/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the + highest priority is 3. It is supposed that the Store channel group is + compliment to the others. */ +#define UCM_REG_GR_LD0_PR 0xe014c +/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the + highest priority is 3. It is supposed that the Store channel group is + compliment to the others. */ +#define UCM_REG_GR_LD1_PR 0xe0150 +/* [RW 2] The queue index for invalidate counter flag decision. */ +#define UCM_REG_INV_CFLG_Q 0xe00e4 +/* [RW 5] The number of double REG-pairs; loaded from the STORM context and + sent to STORM; for a specific connection type. the double REG-pairs are + used in order to align to STORM context row size of 128 bits. The offset + of these data in the STORM context is always 0. Index _i stands for the + connection type (one of 16). */ +#define UCM_REG_N_SM_CTX_LD_0 0xe0054 +#define UCM_REG_N_SM_CTX_LD_1 0xe0058 +#define UCM_REG_N_SM_CTX_LD_2 0xe005c +#define UCM_REG_N_SM_CTX_LD_3 0xe0060 +#define UCM_REG_N_SM_CTX_LD_4 0xe0064 +#define UCM_REG_N_SM_CTX_LD_5 0xe0068 +#define UCM_REG_PHYS_QNUM0_0 0xe0110 +#define UCM_REG_PHYS_QNUM0_1 0xe0114 +#define UCM_REG_PHYS_QNUM1_0 0xe0118 +#define UCM_REG_PHYS_QNUM1_1 0xe011c +#define UCM_REG_PHYS_QNUM2_0 0xe0120 +#define UCM_REG_PHYS_QNUM2_1 0xe0124 +#define UCM_REG_PHYS_QNUM3_0 0xe0128 +#define UCM_REG_PHYS_QNUM3_1 0xe012c +/* [RW 8] The Event ID for Timers formatting in case of stop done. */ +#define UCM_REG_STOP_EVNT_ID 0xe00ac +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the STORM interface is detected. */ +#define UCM_REG_STORM_LENGTH_MIS 0xe0154 +/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define UCM_REG_STORM_UCM_IFEN 0xe0010 +/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define UCM_REG_STORM_WEIGHT 0xe00b0 +/* [RW 4] Timers output initial credit. Max credit available - 15.Write + writes the initial credit value; read returns the current value of the + credit counter. Must be initialized to 4 at start-up. */ +#define UCM_REG_TM_INIT_CRD 0xe021c +/* [RW 28] The CM header for Timers expiration command. */ +#define UCM_REG_TM_UCM_HDR 0xe009c +/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define UCM_REG_TM_UCM_IFEN 0xe001c +/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define UCM_REG_TM_WEIGHT 0xe00d4 +/* [RW 1] Input tsem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define UCM_REG_TSEM_IFEN 0xe0024 +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the tsem interface is detected. */ +#define UCM_REG_TSEM_LENGTH_MIS 0xe015c +/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define UCM_REG_TSEM_WEIGHT 0xe00b4 +/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define UCM_REG_UCM_CFC_IFEN 0xe0044 +/* [RW 11] Interrupt mask register #0 read/write */ +#define UCM_REG_UCM_INT_MASK 0xe01d4 +/* [R 11] Interrupt register #0 read */ +#define UCM_REG_UCM_INT_STS 0xe01c8 +/* [RW 27] Parity mask register #0 read/write */ +#define UCM_REG_UCM_PRTY_MASK 0xe01e4 +/* [R 27] Parity register #0 read */ +#define UCM_REG_UCM_PRTY_STS 0xe01d8 +/* [RC 27] Parity register #0 read clear */ +#define UCM_REG_UCM_PRTY_STS_CLR 0xe01dc +/* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS + REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). + Is used to determine the number of the AG context REG-pairs written back; + when the Reg1WbFlg isn't set. */ +#define UCM_REG_UCM_REG0_SZ 0xe00dc +/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define UCM_REG_UCM_STORM0_IFEN 0xe0004 +/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define UCM_REG_UCM_STORM1_IFEN 0xe0008 +/* [RW 1] CM - Timers Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define UCM_REG_UCM_TM_IFEN 0xe0020 +/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define UCM_REG_UCM_UQM_IFEN 0xe000c +/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */ +#define UCM_REG_UCM_UQM_USE_Q 0xe00d8 +/* [RW 6] QM output initial credit. Max credit available - 32.Write writes + the initial credit value; read returns the current value of the credit + counter. Must be initialized to 32 at start-up. */ +#define UCM_REG_UQM_INIT_CRD 0xe0220 +/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0 + stands for weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define UCM_REG_UQM_P_WEIGHT 0xe00cc +/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0 + stands for weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define UCM_REG_UQM_S_WEIGHT 0xe00d0 +/* [RW 28] The CM header value for QM request (primary). */ +#define UCM_REG_UQM_UCM_HDR_P 0xe0094 +/* [RW 28] The CM header value for QM request (secondary). */ +#define UCM_REG_UQM_UCM_HDR_S 0xe0098 +/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define UCM_REG_UQM_UCM_IFEN 0xe0014 +/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define UCM_REG_USDM_IFEN 0xe0018 +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the SDM interface is detected. */ +#define UCM_REG_USDM_LENGTH_MIS 0xe0158 +/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define UCM_REG_USDM_WEIGHT 0xe00c8 +/* [RW 1] Input xsem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define UCM_REG_XSEM_IFEN 0xe002c +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the xsem interface isdetected. */ +#define UCM_REG_XSEM_LENGTH_MIS 0xe0164 +/* [RW 3] The weight of the input xsem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define UCM_REG_XSEM_WEIGHT 0xe00bc +/* [RW 20] Indirect access to the descriptor table of the XX protection + mechanism. The fields are:[5:0] - message length; 14:6] - message + pointer; 19:15] - next pointer. */ +#define UCM_REG_XX_DESCR_TABLE 0xe0280 +#define UCM_REG_XX_DESCR_TABLE_SIZE 27 +/* [R 6] Use to read the XX protection Free counter. */ +#define UCM_REG_XX_FREE 0xe016c +/* [RW 6] Initial value for the credit counter; responsible for fulfilling + of the Input Stage XX protection buffer by the XX protection pending + messages. Write writes the initial credit value; read returns the current + value of the credit counter. Must be initialized to 12 at start-up. */ +#define UCM_REG_XX_INIT_CRD 0xe0224 +/* [RW 6] The maximum number of pending messages; which may be stored in XX + protection. ~ucm_registers_xx_free.xx_free read on read. */ +#define UCM_REG_XX_MSG_NUM 0xe0228 +/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */ +#define UCM_REG_XX_OVFL_EVNT_ID 0xe004c +/* [RW 16] Indirect access to the XX table of the XX protection mechanism. + The fields are: [4:0] - tail pointer; 10:5] - Link List size; 15:11] - + header pointer. */ +#define UCM_REG_XX_TABLE 0xe0300 +#define UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE (0x1<<28) +#define UMAC_COMMAND_CONFIG_REG_LOOP_ENA (0x1<<15) +#define UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK (0x1<<24) +#define UMAC_COMMAND_CONFIG_REG_PAD_EN (0x1<<5) +#define UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE (0x1<<8) +#define UMAC_COMMAND_CONFIG_REG_PROMIS_EN (0x1<<4) +#define UMAC_COMMAND_CONFIG_REG_RX_ENA (0x1<<1) +#define UMAC_COMMAND_CONFIG_REG_SW_RESET (0x1<<13) +#define UMAC_COMMAND_CONFIG_REG_TX_ENA (0x1<<0) +#define UMAC_REG_COMMAND_CONFIG 0x8 +/* [RW 32] Register Bit 0 refers to Bit 16 of the MAC address; Bit 1 refers + * to bit 17 of the MAC address etc. */ +#define UMAC_REG_MAC_ADDR0 0xc +/* [RW 16] Register Bit 0 refers to Bit 0 of the MAC address; Register Bit 1 + * refers to Bit 1 of the MAC address etc. Bits 16 to 31 are reserved. */ +#define UMAC_REG_MAC_ADDR1 0x10 +/* [RW 14] Defines a 14-Bit maximum frame length used by the MAC receive + * logic to check frames. */ +#define UMAC_REG_MAXFR 0x14 +/* [RW 8] The event id for aggregated interrupt 0 */ +#define USDM_REG_AGG_INT_EVENT_0 0xc4038 +#define USDM_REG_AGG_INT_EVENT_1 0xc403c +#define USDM_REG_AGG_INT_EVENT_2 0xc4040 +#define USDM_REG_AGG_INT_EVENT_4 0xc4048 +#define USDM_REG_AGG_INT_EVENT_5 0xc404c +#define USDM_REG_AGG_INT_EVENT_6 0xc4050 +/* [RW 1] For each aggregated interrupt index whether the mode is normal (0) + or auto-mask-mode (1) */ +#define USDM_REG_AGG_INT_MODE_0 0xc41b8 +#define USDM_REG_AGG_INT_MODE_1 0xc41bc +#define USDM_REG_AGG_INT_MODE_4 0xc41c8 +#define USDM_REG_AGG_INT_MODE_5 0xc41cc +#define USDM_REG_AGG_INT_MODE_6 0xc41d0 +/* [RW 1] The T bit for aggregated interrupt 5 */ +#define USDM_REG_AGG_INT_T_5 0xc40cc +#define USDM_REG_AGG_INT_T_6 0xc40d0 +/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */ +#define USDM_REG_CFC_RSP_START_ADDR 0xc4008 +/* [RW 16] The maximum value of the completion counter #0 */ +#define USDM_REG_CMP_COUNTER_MAX0 0xc401c +/* [RW 16] The maximum value of the completion counter #1 */ +#define USDM_REG_CMP_COUNTER_MAX1 0xc4020 +/* [RW 16] The maximum value of the completion counter #2 */ +#define USDM_REG_CMP_COUNTER_MAX2 0xc4024 +/* [RW 16] The maximum value of the completion counter #3 */ +#define USDM_REG_CMP_COUNTER_MAX3 0xc4028 +/* [RW 13] The start address in the internal RAM for the completion + counters. */ +#define USDM_REG_CMP_COUNTER_START_ADDR 0xc400c +#define USDM_REG_ENABLE_IN1 0xc4238 +#define USDM_REG_ENABLE_IN2 0xc423c +#define USDM_REG_ENABLE_OUT1 0xc4240 +#define USDM_REG_ENABLE_OUT2 0xc4244 +/* [RW 4] The initial number of messages that can be sent to the pxp control + interface without receiving any ACK. */ +#define USDM_REG_INIT_CREDIT_PXP_CTRL 0xc44c0 +/* [ST 32] The number of ACK after placement messages received */ +#define USDM_REG_NUM_OF_ACK_AFTER_PLACE 0xc4280 +/* [ST 32] The number of packet end messages received from the parser */ +#define USDM_REG_NUM_OF_PKT_END_MSG 0xc4278 +/* [ST 32] The number of requests received from the pxp async if */ +#define USDM_REG_NUM_OF_PXP_ASYNC_REQ 0xc427c +/* [ST 32] The number of commands received in queue 0 */ +#define USDM_REG_NUM_OF_Q0_CMD 0xc4248 +/* [ST 32] The number of commands received in queue 10 */ +#define USDM_REG_NUM_OF_Q10_CMD 0xc4270 +/* [ST 32] The number of commands received in queue 11 */ +#define USDM_REG_NUM_OF_Q11_CMD 0xc4274 +/* [ST 32] The number of commands received in queue 1 */ +#define USDM_REG_NUM_OF_Q1_CMD 0xc424c +/* [ST 32] The number of commands received in queue 2 */ +#define USDM_REG_NUM_OF_Q2_CMD 0xc4250 +/* [ST 32] The number of commands received in queue 3 */ +#define USDM_REG_NUM_OF_Q3_CMD 0xc4254 +/* [ST 32] The number of commands received in queue 4 */ +#define USDM_REG_NUM_OF_Q4_CMD 0xc4258 +/* [ST 32] The number of commands received in queue 5 */ +#define USDM_REG_NUM_OF_Q5_CMD 0xc425c +/* [ST 32] The number of commands received in queue 6 */ +#define USDM_REG_NUM_OF_Q6_CMD 0xc4260 +/* [ST 32] The number of commands received in queue 7 */ +#define USDM_REG_NUM_OF_Q7_CMD 0xc4264 +/* [ST 32] The number of commands received in queue 8 */ +#define USDM_REG_NUM_OF_Q8_CMD 0xc4268 +/* [ST 32] The number of commands received in queue 9 */ +#define USDM_REG_NUM_OF_Q9_CMD 0xc426c +/* [RW 13] The start address in the internal RAM for the packet end message */ +#define USDM_REG_PCK_END_MSG_START_ADDR 0xc4014 +/* [RW 13] The start address in the internal RAM for queue counters */ +#define USDM_REG_Q_COUNTER_START_ADDR 0xc4010 +/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */ +#define USDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0xc4550 +/* [R 1] parser fifo empty in sdm_sync block */ +#define USDM_REG_SYNC_PARSER_EMPTY 0xc4558 +/* [R 1] parser serial fifo empty in sdm_sync block */ +#define USDM_REG_SYNC_SYNC_EMPTY 0xc4560 +/* [RW 32] Tick for timer counter. Applicable only when + ~usdm_registers_timer_tick_enable.timer_tick_enable =1 */ +#define USDM_REG_TIMER_TICK 0xc4000 +/* [RW 32] Interrupt mask register #0 read/write */ +#define USDM_REG_USDM_INT_MASK_0 0xc42a0 +#define USDM_REG_USDM_INT_MASK_1 0xc42b0 +/* [R 32] Interrupt register #0 read */ +#define USDM_REG_USDM_INT_STS_0 0xc4294 +#define USDM_REG_USDM_INT_STS_1 0xc42a4 +/* [RW 11] Parity mask register #0 read/write */ +#define USDM_REG_USDM_PRTY_MASK 0xc42c0 +/* [R 11] Parity register #0 read */ +#define USDM_REG_USDM_PRTY_STS 0xc42b4 +/* [RC 11] Parity register #0 read clear */ +#define USDM_REG_USDM_PRTY_STS_CLR 0xc42b8 +/* [RW 5] The number of time_slots in the arbitration cycle */ +#define USEM_REG_ARB_CYCLE_SIZE 0x300034 +/* [RW 3] The source that is associated with arbitration element 0. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2 */ +#define USEM_REG_ARB_ELEMENT0 0x300020 +/* [RW 3] The source that is associated with arbitration element 1. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~usem_registers_arb_element0.arb_element0 */ +#define USEM_REG_ARB_ELEMENT1 0x300024 +/* [RW 3] The source that is associated with arbitration element 2. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~usem_registers_arb_element0.arb_element0 + and ~usem_registers_arb_element1.arb_element1 */ +#define USEM_REG_ARB_ELEMENT2 0x300028 +/* [RW 3] The source that is associated with arbitration element 3. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2.Could + not be equal to register ~usem_registers_arb_element0.arb_element0 and + ~usem_registers_arb_element1.arb_element1 and + ~usem_registers_arb_element2.arb_element2 */ +#define USEM_REG_ARB_ELEMENT3 0x30002c +/* [RW 3] The source that is associated with arbitration element 4. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~usem_registers_arb_element0.arb_element0 + and ~usem_registers_arb_element1.arb_element1 and + ~usem_registers_arb_element2.arb_element2 and + ~usem_registers_arb_element3.arb_element3 */ +#define USEM_REG_ARB_ELEMENT4 0x300030 +#define USEM_REG_ENABLE_IN 0x3000a4 +#define USEM_REG_ENABLE_OUT 0x3000a8 +/* [RW 32] This address space contains all registers and memories that are + placed in SEM_FAST block. The SEM_FAST registers are described in + appendix B. In order to access the sem_fast registers the base address + ~fast_memory.fast_memory should be added to eachsem_fast register offset. */ +#define USEM_REG_FAST_MEMORY 0x320000 +/* [RW 1] Disables input messages from FIC0 May be updated during run_time + by the microcode */ +#define USEM_REG_FIC0_DISABLE 0x300224 +/* [RW 1] Disables input messages from FIC1 May be updated during run_time + by the microcode */ +#define USEM_REG_FIC1_DISABLE 0x300234 +/* [RW 15] Interrupt table Read and write access to it is not possible in + the middle of the work */ +#define USEM_REG_INT_TABLE 0x300400 +/* [ST 24] Statistics register. The number of messages that entered through + FIC0 */ +#define USEM_REG_MSG_NUM_FIC0 0x300000 +/* [ST 24] Statistics register. The number of messages that entered through + FIC1 */ +#define USEM_REG_MSG_NUM_FIC1 0x300004 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC0 */ +#define USEM_REG_MSG_NUM_FOC0 0x300008 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC1 */ +#define USEM_REG_MSG_NUM_FOC1 0x30000c +/* [ST 24] Statistics register. The number of messages that were sent to + FOC2 */ +#define USEM_REG_MSG_NUM_FOC2 0x300010 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC3 */ +#define USEM_REG_MSG_NUM_FOC3 0x300014 +/* [RW 1] Disables input messages from the passive buffer May be updated + during run_time by the microcode */ +#define USEM_REG_PAS_DISABLE 0x30024c +/* [WB 128] Debug only. Passive buffer memory */ +#define USEM_REG_PASSIVE_BUFFER 0x302000 +/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */ +#define USEM_REG_PRAM 0x340000 +/* [R 16] Valid sleeping threads indication have bit per thread */ +#define USEM_REG_SLEEP_THREADS_VALID 0x30026c +/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */ +#define USEM_REG_SLOW_EXT_STORE_EMPTY 0x3002a0 +/* [RW 16] List of free threads . There is a bit per thread. */ +#define USEM_REG_THREADS_LIST 0x3002e4 +/* [RW 3] The arbitration scheme of time_slot 0 */ +#define USEM_REG_TS_0_AS 0x300038 +/* [RW 3] The arbitration scheme of time_slot 10 */ +#define USEM_REG_TS_10_AS 0x300060 +/* [RW 3] The arbitration scheme of time_slot 11 */ +#define USEM_REG_TS_11_AS 0x300064 +/* [RW 3] The arbitration scheme of time_slot 12 */ +#define USEM_REG_TS_12_AS 0x300068 +/* [RW 3] The arbitration scheme of time_slot 13 */ +#define USEM_REG_TS_13_AS 0x30006c +/* [RW 3] The arbitration scheme of time_slot 14 */ +#define USEM_REG_TS_14_AS 0x300070 +/* [RW 3] The arbitration scheme of time_slot 15 */ +#define USEM_REG_TS_15_AS 0x300074 +/* [RW 3] The arbitration scheme of time_slot 16 */ +#define USEM_REG_TS_16_AS 0x300078 +/* [RW 3] The arbitration scheme of time_slot 17 */ +#define USEM_REG_TS_17_AS 0x30007c +/* [RW 3] The arbitration scheme of time_slot 18 */ +#define USEM_REG_TS_18_AS 0x300080 +/* [RW 3] The arbitration scheme of time_slot 1 */ +#define USEM_REG_TS_1_AS 0x30003c +/* [RW 3] The arbitration scheme of time_slot 2 */ +#define USEM_REG_TS_2_AS 0x300040 +/* [RW 3] The arbitration scheme of time_slot 3 */ +#define USEM_REG_TS_3_AS 0x300044 +/* [RW 3] The arbitration scheme of time_slot 4 */ +#define USEM_REG_TS_4_AS 0x300048 +/* [RW 3] The arbitration scheme of time_slot 5 */ +#define USEM_REG_TS_5_AS 0x30004c +/* [RW 3] The arbitration scheme of time_slot 6 */ +#define USEM_REG_TS_6_AS 0x300050 +/* [RW 3] The arbitration scheme of time_slot 7 */ +#define USEM_REG_TS_7_AS 0x300054 +/* [RW 3] The arbitration scheme of time_slot 8 */ +#define USEM_REG_TS_8_AS 0x300058 +/* [RW 3] The arbitration scheme of time_slot 9 */ +#define USEM_REG_TS_9_AS 0x30005c +/* [RW 32] Interrupt mask register #0 read/write */ +#define USEM_REG_USEM_INT_MASK_0 0x300110 +#define USEM_REG_USEM_INT_MASK_1 0x300120 +/* [R 32] Interrupt register #0 read */ +#define USEM_REG_USEM_INT_STS_0 0x300104 +#define USEM_REG_USEM_INT_STS_1 0x300114 +/* [RW 32] Parity mask register #0 read/write */ +#define USEM_REG_USEM_PRTY_MASK_0 0x300130 +#define USEM_REG_USEM_PRTY_MASK_1 0x300140 +/* [R 32] Parity register #0 read */ +#define USEM_REG_USEM_PRTY_STS_0 0x300124 +#define USEM_REG_USEM_PRTY_STS_1 0x300134 +/* [RC 32] Parity register #0 read clear */ +#define USEM_REG_USEM_PRTY_STS_CLR_0 0x300128 +#define USEM_REG_USEM_PRTY_STS_CLR_1 0x300138 +/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64 + * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */ +#define USEM_REG_VFPF_ERR_NUM 0x300380 +#define VFC_MEMORIES_RST_REG_CAM_RST (0x1<<0) +#define VFC_MEMORIES_RST_REG_RAM_RST (0x1<<1) +#define VFC_REG_MEMORIES_RST 0x1943c +/* [RW 32] Indirect access to AG context with 32-bits granularity. The bits + * [12:8] of the address should be the offset within the accessed LCID + * context; the bits [7:0] are the accessed LCID.Example: to write to REG10 + * LCID100. The RBC address should be 13'ha64. */ +#define XCM_REG_AG_CTX 0x28000 +/* [RW 2] The queue index for registration on Aux1 counter flag. */ +#define XCM_REG_AUX1_Q 0x20134 +/* [RW 2] Per each decision rule the queue index to register to. */ +#define XCM_REG_AUX_CNT_FLG_Q_19 0x201b0 +/* [R 5] Used to read the XX protection CAM occupancy counter. */ +#define XCM_REG_CAM_OCCUP 0x20244 +/* [RW 1] CDU AG read Interface enable. If 0 - the request input is + disregarded; valid output is deasserted; all other signals are treated as + usual; if 1 - normal activity. */ +#define XCM_REG_CDU_AG_RD_IFEN 0x20044 +/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input + are disregarded; all other signals are treated as usual; if 1 - normal + activity. */ +#define XCM_REG_CDU_AG_WR_IFEN 0x20040 +/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is + disregarded; valid output is deasserted; all other signals are treated as + usual; if 1 - normal activity. */ +#define XCM_REG_CDU_SM_RD_IFEN 0x2004c +/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid + input is disregarded; all other signals are treated as usual; if 1 - + normal activity. */ +#define XCM_REG_CDU_SM_WR_IFEN 0x20048 +/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes + the initial credit value; read returns the current value of the credit + counter. Must be initialized to 1 at start-up. */ +#define XCM_REG_CFC_INIT_CRD 0x20404 +/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_CP_WEIGHT 0x200dc +/* [RW 1] Input csem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define XCM_REG_CSEM_IFEN 0x20028 +/* [RC 1] Set at message length mismatch (relative to last indication) at + the csem interface. */ +#define XCM_REG_CSEM_LENGTH_MIS 0x20228 +/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_CSEM_WEIGHT 0x200c4 +/* [RW 1] Input dorq Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define XCM_REG_DORQ_IFEN 0x20030 +/* [RC 1] Set at message length mismatch (relative to last indication) at + the dorq interface. */ +#define XCM_REG_DORQ_LENGTH_MIS 0x20230 +/* [RW 3] The weight of the input dorq in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_DORQ_WEIGHT 0x200cc +/* [RW 8] The Event ID in case the ErrorFlg input message bit is set. */ +#define XCM_REG_ERR_EVNT_ID 0x200b0 +/* [RW 28] The CM erroneous header for QM and Timers formatting. */ +#define XCM_REG_ERR_XCM_HDR 0x200ac +/* [RW 8] The Event ID for Timers expiration. */ +#define XCM_REG_EXPR_EVNT_ID 0x200b4 +/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write + writes the initial credit value; read returns the current value of the + credit counter. Must be initialized to 64 at start-up. */ +#define XCM_REG_FIC0_INIT_CRD 0x2040c +/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write + writes the initial credit value; read returns the current value of the + credit counter. Must be initialized to 64 at start-up. */ +#define XCM_REG_FIC1_INIT_CRD 0x20410 +#define XCM_REG_GLB_DEL_ACK_MAX_CNT_0 0x20118 +#define XCM_REG_GLB_DEL_ACK_MAX_CNT_1 0x2011c +#define XCM_REG_GLB_DEL_ACK_TMR_VAL_0 0x20108 +#define XCM_REG_GLB_DEL_ACK_TMR_VAL_1 0x2010c +/* [RW 1] Arbitratiojn between Input Arbiter groups: 0 - fair Round-Robin; 1 + - strict priority defined by ~xcm_registers_gr_ag_pr.gr_ag_pr; + ~xcm_registers_gr_ld0_pr.gr_ld0_pr and + ~xcm_registers_gr_ld1_pr.gr_ld1_pr. */ +#define XCM_REG_GR_ARB_TYPE 0x2020c +/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the + highest priority is 3. It is supposed that the Channel group is the + compliment of the other 3 groups. */ +#define XCM_REG_GR_LD0_PR 0x20214 +/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the + highest priority is 3. It is supposed that the Channel group is the + compliment of the other 3 groups. */ +#define XCM_REG_GR_LD1_PR 0x20218 +/* [RW 1] Input nig0 Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define XCM_REG_NIG0_IFEN 0x20038 +/* [RC 1] Set at message length mismatch (relative to last indication) at + the nig0 interface. */ +#define XCM_REG_NIG0_LENGTH_MIS 0x20238 +/* [RW 3] The weight of the input nig0 in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_NIG0_WEIGHT 0x200d4 +/* [RW 1] Input nig1 Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define XCM_REG_NIG1_IFEN 0x2003c +/* [RC 1] Set at message length mismatch (relative to last indication) at + the nig1 interface. */ +#define XCM_REG_NIG1_LENGTH_MIS 0x2023c +/* [RW 5] The number of double REG-pairs; loaded from the STORM context and + sent to STORM; for a specific connection type. The double REG-pairs are + used in order to align to STORM context row size of 128 bits. The offset + of these data in the STORM context is always 0. Index _i stands for the + connection type (one of 16). */ +#define XCM_REG_N_SM_CTX_LD_0 0x20060 +#define XCM_REG_N_SM_CTX_LD_1 0x20064 +#define XCM_REG_N_SM_CTX_LD_2 0x20068 +#define XCM_REG_N_SM_CTX_LD_3 0x2006c +#define XCM_REG_N_SM_CTX_LD_4 0x20070 +#define XCM_REG_N_SM_CTX_LD_5 0x20074 +/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define XCM_REG_PBF_IFEN 0x20034 +/* [RC 1] Set at message length mismatch (relative to last indication) at + the pbf interface. */ +#define XCM_REG_PBF_LENGTH_MIS 0x20234 +/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_PBF_WEIGHT 0x200d0 +#define XCM_REG_PHYS_QNUM3_0 0x20100 +#define XCM_REG_PHYS_QNUM3_1 0x20104 +/* [RW 8] The Event ID for Timers formatting in case of stop done. */ +#define XCM_REG_STOP_EVNT_ID 0x200b8 +/* [RC 1] Set at message length mismatch (relative to last indication) at + the STORM interface. */ +#define XCM_REG_STORM_LENGTH_MIS 0x2021c +/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_STORM_WEIGHT 0x200bc +/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define XCM_REG_STORM_XCM_IFEN 0x20010 +/* [RW 4] Timers output initial credit. Max credit available - 15.Write + writes the initial credit value; read returns the current value of the + credit counter. Must be initialized to 4 at start-up. */ +#define XCM_REG_TM_INIT_CRD 0x2041c +/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_TM_WEIGHT 0x200ec +/* [RW 28] The CM header for Timers expiration command. */ +#define XCM_REG_TM_XCM_HDR 0x200a8 +/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define XCM_REG_TM_XCM_IFEN 0x2001c +/* [RW 1] Input tsem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define XCM_REG_TSEM_IFEN 0x20024 +/* [RC 1] Set at message length mismatch (relative to last indication) at + the tsem interface. */ +#define XCM_REG_TSEM_LENGTH_MIS 0x20224 +/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_TSEM_WEIGHT 0x200c0 +/* [RW 2] The queue index for registration on UNA greater NXT decision rule. */ +#define XCM_REG_UNA_GT_NXT_Q 0x20120 +/* [RW 1] Input usem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define XCM_REG_USEM_IFEN 0x2002c +/* [RC 1] Message length mismatch (relative to last indication) at the usem + interface. */ +#define XCM_REG_USEM_LENGTH_MIS 0x2022c +/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_USEM_WEIGHT 0x200c8 +#define XCM_REG_WU_DA_CNT_CMD00 0x201d4 +#define XCM_REG_WU_DA_CNT_CMD01 0x201d8 +#define XCM_REG_WU_DA_CNT_CMD10 0x201dc +#define XCM_REG_WU_DA_CNT_CMD11 0x201e0 +#define XCM_REG_WU_DA_CNT_UPD_VAL00 0x201e4 +#define XCM_REG_WU_DA_CNT_UPD_VAL01 0x201e8 +#define XCM_REG_WU_DA_CNT_UPD_VAL10 0x201ec +#define XCM_REG_WU_DA_CNT_UPD_VAL11 0x201f0 +#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00 0x201c4 +#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01 0x201c8 +#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD10 0x201cc +#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD11 0x201d0 +/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define XCM_REG_XCM_CFC_IFEN 0x20050 +/* [RW 14] Interrupt mask register #0 read/write */ +#define XCM_REG_XCM_INT_MASK 0x202b4 +/* [R 14] Interrupt register #0 read */ +#define XCM_REG_XCM_INT_STS 0x202a8 +/* [RW 30] Parity mask register #0 read/write */ +#define XCM_REG_XCM_PRTY_MASK 0x202c4 +/* [R 30] Parity register #0 read */ +#define XCM_REG_XCM_PRTY_STS 0x202b8 +/* [RC 30] Parity register #0 read clear */ +#define XCM_REG_XCM_PRTY_STS_CLR 0x202bc + +/* [RW 4] The size of AG context region 0 in REG-pairs. Designates the MS + REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). + Is used to determine the number of the AG context REG-pairs written back; + when the Reg1WbFlg isn't set. */ +#define XCM_REG_XCM_REG0_SZ 0x200f4 +/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define XCM_REG_XCM_STORM0_IFEN 0x20004 +/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define XCM_REG_XCM_STORM1_IFEN 0x20008 +/* [RW 1] CM - Timers Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define XCM_REG_XCM_TM_IFEN 0x20020 +/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define XCM_REG_XCM_XQM_IFEN 0x2000c +/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */ +#define XCM_REG_XCM_XQM_USE_Q 0x200f0 +/* [RW 4] The value by which CFC updates the activity counter at QM bypass. */ +#define XCM_REG_XQM_BYP_ACT_UPD 0x200fc +/* [RW 6] QM output initial credit. Max credit available - 32.Write writes + the initial credit value; read returns the current value of the credit + counter. Must be initialized to 32 at start-up. */ +#define XCM_REG_XQM_INIT_CRD 0x20420 +/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0 + stands for weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_XQM_P_WEIGHT 0x200e4 +/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0 + stands for weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_XQM_S_WEIGHT 0x200e8 +/* [RW 28] The CM header value for QM request (primary). */ +#define XCM_REG_XQM_XCM_HDR_P 0x200a0 +/* [RW 28] The CM header value for QM request (secondary). */ +#define XCM_REG_XQM_XCM_HDR_S 0x200a4 +/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define XCM_REG_XQM_XCM_IFEN 0x20014 +/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define XCM_REG_XSDM_IFEN 0x20018 +/* [RC 1] Set at message length mismatch (relative to last indication) at + the SDM interface. */ +#define XCM_REG_XSDM_LENGTH_MIS 0x20220 +/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_XSDM_WEIGHT 0x200e0 +/* [RW 17] Indirect access to the descriptor table of the XX protection + mechanism. The fields are: [5:0] - message length; 11:6] - message + pointer; 16:12] - next pointer. */ +#define XCM_REG_XX_DESCR_TABLE 0x20480 +#define XCM_REG_XX_DESCR_TABLE_SIZE 32 +/* [R 6] Used to read the XX protection Free counter. */ +#define XCM_REG_XX_FREE 0x20240 +/* [RW 6] Initial value for the credit counter; responsible for fulfilling + of the Input Stage XX protection buffer by the XX protection pending + messages. Max credit available - 3.Write writes the initial credit value; + read returns the current value of the credit counter. Must be initialized + to 2 at start-up. */ +#define XCM_REG_XX_INIT_CRD 0x20424 +/* [RW 6] The maximum number of pending messages; which may be stored in XX + protection. ~xcm_registers_xx_free.xx_free read on read. */ +#define XCM_REG_XX_MSG_NUM 0x20428 +/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */ +#define XCM_REG_XX_OVFL_EVNT_ID 0x20058 +#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0) +#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1) +#define XMAC_CTRL_REG_CORE_LOCAL_LPBK (0x1<<3) +#define XMAC_CTRL_REG_RX_EN (0x1<<1) +#define XMAC_CTRL_REG_SOFT_RESET (0x1<<6) +#define XMAC_CTRL_REG_TX_EN (0x1<<0) +#define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN (0x1<<18) +#define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN (0x1<<17) +#define XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN (0x1<<0) +#define XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN (0x1<<3) +#define XMAC_PFC_CTRL_HI_REG_RX_PFC_EN (0x1<<4) +#define XMAC_PFC_CTRL_HI_REG_TX_PFC_EN (0x1<<5) +#define XMAC_REG_CLEAR_RX_LSS_STATUS 0x60 +#define XMAC_REG_CTRL 0 +/* [RW 16] Upper 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC + * packets transmitted by the MAC */ +#define XMAC_REG_CTRL_SA_HI 0x2c +/* [RW 32] Lower 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC + * packets transmitted by the MAC */ +#define XMAC_REG_CTRL_SA_LO 0x28 +#define XMAC_REG_PAUSE_CTRL 0x68 +#define XMAC_REG_PFC_CTRL 0x70 +#define XMAC_REG_PFC_CTRL_HI 0x74 +#define XMAC_REG_RX_LSS_STATUS 0x58 +/* [RW 14] Maximum packet size in receive direction; exclusive of preamble & + * CRC in strip mode */ +#define XMAC_REG_RX_MAX_SIZE 0x40 +#define XMAC_REG_TX_CTRL 0x20 +/* [RW 16] Indirect access to the XX table of the XX protection mechanism. + The fields are:[4:0] - tail pointer; 9:5] - Link List size; 14:10] - + header pointer. */ +#define XCM_REG_XX_TABLE 0x20500 +/* [RW 8] The event id for aggregated interrupt 0 */ +#define XSDM_REG_AGG_INT_EVENT_0 0x166038 +#define XSDM_REG_AGG_INT_EVENT_1 0x16603c +#define XSDM_REG_AGG_INT_EVENT_10 0x166060 +#define XSDM_REG_AGG_INT_EVENT_11 0x166064 +#define XSDM_REG_AGG_INT_EVENT_12 0x166068 +#define XSDM_REG_AGG_INT_EVENT_13 0x16606c +#define XSDM_REG_AGG_INT_EVENT_14 0x166070 +#define XSDM_REG_AGG_INT_EVENT_2 0x166040 +#define XSDM_REG_AGG_INT_EVENT_3 0x166044 +#define XSDM_REG_AGG_INT_EVENT_4 0x166048 +#define XSDM_REG_AGG_INT_EVENT_5 0x16604c +#define XSDM_REG_AGG_INT_EVENT_6 0x166050 +#define XSDM_REG_AGG_INT_EVENT_7 0x166054 +#define XSDM_REG_AGG_INT_EVENT_8 0x166058 +#define XSDM_REG_AGG_INT_EVENT_9 0x16605c +/* [RW 1] For each aggregated interrupt index whether the mode is normal (0) + or auto-mask-mode (1) */ +#define XSDM_REG_AGG_INT_MODE_0 0x1661b8 +#define XSDM_REG_AGG_INT_MODE_1 0x1661bc +/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */ +#define XSDM_REG_CFC_RSP_START_ADDR 0x166008 +/* [RW 16] The maximum value of the completion counter #0 */ +#define XSDM_REG_CMP_COUNTER_MAX0 0x16601c +/* [RW 16] The maximum value of the completion counter #1 */ +#define XSDM_REG_CMP_COUNTER_MAX1 0x166020 +/* [RW 16] The maximum value of the completion counter #2 */ +#define XSDM_REG_CMP_COUNTER_MAX2 0x166024 +/* [RW 16] The maximum value of the completion counter #3 */ +#define XSDM_REG_CMP_COUNTER_MAX3 0x166028 +/* [RW 13] The start address in the internal RAM for the completion + counters. */ +#define XSDM_REG_CMP_COUNTER_START_ADDR 0x16600c +#define XSDM_REG_ENABLE_IN1 0x166238 +#define XSDM_REG_ENABLE_IN2 0x16623c +#define XSDM_REG_ENABLE_OUT1 0x166240 +#define XSDM_REG_ENABLE_OUT2 0x166244 +/* [RW 4] The initial number of messages that can be sent to the pxp control + interface without receiving any ACK. */ +#define XSDM_REG_INIT_CREDIT_PXP_CTRL 0x1664bc +/* [ST 32] The number of ACK after placement messages received */ +#define XSDM_REG_NUM_OF_ACK_AFTER_PLACE 0x16627c +/* [ST 32] The number of packet end messages received from the parser */ +#define XSDM_REG_NUM_OF_PKT_END_MSG 0x166274 +/* [ST 32] The number of requests received from the pxp async if */ +#define XSDM_REG_NUM_OF_PXP_ASYNC_REQ 0x166278 +/* [ST 32] The number of commands received in queue 0 */ +#define XSDM_REG_NUM_OF_Q0_CMD 0x166248 +/* [ST 32] The number of commands received in queue 10 */ +#define XSDM_REG_NUM_OF_Q10_CMD 0x16626c +/* [ST 32] The number of commands received in queue 11 */ +#define XSDM_REG_NUM_OF_Q11_CMD 0x166270 +/* [ST 32] The number of commands received in queue 1 */ +#define XSDM_REG_NUM_OF_Q1_CMD 0x16624c +/* [ST 32] The number of commands received in queue 3 */ +#define XSDM_REG_NUM_OF_Q3_CMD 0x166250 +/* [ST 32] The number of commands received in queue 4 */ +#define XSDM_REG_NUM_OF_Q4_CMD 0x166254 +/* [ST 32] The number of commands received in queue 5 */ +#define XSDM_REG_NUM_OF_Q5_CMD 0x166258 +/* [ST 32] The number of commands received in queue 6 */ +#define XSDM_REG_NUM_OF_Q6_CMD 0x16625c +/* [ST 32] The number of commands received in queue 7 */ +#define XSDM_REG_NUM_OF_Q7_CMD 0x166260 +/* [ST 32] The number of commands received in queue 8 */ +#define XSDM_REG_NUM_OF_Q8_CMD 0x166264 +/* [ST 32] The number of commands received in queue 9 */ +#define XSDM_REG_NUM_OF_Q9_CMD 0x166268 +/* [RW 13] The start address in the internal RAM for queue counters */ +#define XSDM_REG_Q_COUNTER_START_ADDR 0x166010 +/* [W 17] Generate an operation after completion; bit-16 is + * AggVectIdx_valid; bits 15:8 are AggVectIdx; bits 7:5 are the TRIG and + * bits 4:0 are the T124Param[4:0] */ +#define XSDM_REG_OPERATION_GEN 0x1664c4 +/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */ +#define XSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0x166548 +/* [R 1] parser fifo empty in sdm_sync block */ +#define XSDM_REG_SYNC_PARSER_EMPTY 0x166550 +/* [R 1] parser serial fifo empty in sdm_sync block */ +#define XSDM_REG_SYNC_SYNC_EMPTY 0x166558 +/* [RW 32] Tick for timer counter. Applicable only when + ~xsdm_registers_timer_tick_enable.timer_tick_enable =1 */ +#define XSDM_REG_TIMER_TICK 0x166000 +/* [RW 32] Interrupt mask register #0 read/write */ +#define XSDM_REG_XSDM_INT_MASK_0 0x16629c +#define XSDM_REG_XSDM_INT_MASK_1 0x1662ac +/* [R 32] Interrupt register #0 read */ +#define XSDM_REG_XSDM_INT_STS_0 0x166290 +#define XSDM_REG_XSDM_INT_STS_1 0x1662a0 +/* [RW 11] Parity mask register #0 read/write */ +#define XSDM_REG_XSDM_PRTY_MASK 0x1662bc +/* [R 11] Parity register #0 read */ +#define XSDM_REG_XSDM_PRTY_STS 0x1662b0 +/* [RC 11] Parity register #0 read clear */ +#define XSDM_REG_XSDM_PRTY_STS_CLR 0x1662b4 +/* [RW 5] The number of time_slots in the arbitration cycle */ +#define XSEM_REG_ARB_CYCLE_SIZE 0x280034 +/* [RW 3] The source that is associated with arbitration element 0. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2 */ +#define XSEM_REG_ARB_ELEMENT0 0x280020 +/* [RW 3] The source that is associated with arbitration element 1. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~xsem_registers_arb_element0.arb_element0 */ +#define XSEM_REG_ARB_ELEMENT1 0x280024 +/* [RW 3] The source that is associated with arbitration element 2. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~xsem_registers_arb_element0.arb_element0 + and ~xsem_registers_arb_element1.arb_element1 */ +#define XSEM_REG_ARB_ELEMENT2 0x280028 +/* [RW 3] The source that is associated with arbitration element 3. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2.Could + not be equal to register ~xsem_registers_arb_element0.arb_element0 and + ~xsem_registers_arb_element1.arb_element1 and + ~xsem_registers_arb_element2.arb_element2 */ +#define XSEM_REG_ARB_ELEMENT3 0x28002c +/* [RW 3] The source that is associated with arbitration element 4. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~xsem_registers_arb_element0.arb_element0 + and ~xsem_registers_arb_element1.arb_element1 and + ~xsem_registers_arb_element2.arb_element2 and + ~xsem_registers_arb_element3.arb_element3 */ +#define XSEM_REG_ARB_ELEMENT4 0x280030 +#define XSEM_REG_ENABLE_IN 0x2800a4 +#define XSEM_REG_ENABLE_OUT 0x2800a8 +/* [RW 32] This address space contains all registers and memories that are + placed in SEM_FAST block. The SEM_FAST registers are described in + appendix B. In order to access the sem_fast registers the base address + ~fast_memory.fast_memory should be added to eachsem_fast register offset. */ +#define XSEM_REG_FAST_MEMORY 0x2a0000 +/* [RW 1] Disables input messages from FIC0 May be updated during run_time + by the microcode */ +#define XSEM_REG_FIC0_DISABLE 0x280224 +/* [RW 1] Disables input messages from FIC1 May be updated during run_time + by the microcode */ +#define XSEM_REG_FIC1_DISABLE 0x280234 +/* [RW 15] Interrupt table Read and write access to it is not possible in + the middle of the work */ +#define XSEM_REG_INT_TABLE 0x280400 +/* [ST 24] Statistics register. The number of messages that entered through + FIC0 */ +#define XSEM_REG_MSG_NUM_FIC0 0x280000 +/* [ST 24] Statistics register. The number of messages that entered through + FIC1 */ +#define XSEM_REG_MSG_NUM_FIC1 0x280004 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC0 */ +#define XSEM_REG_MSG_NUM_FOC0 0x280008 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC1 */ +#define XSEM_REG_MSG_NUM_FOC1 0x28000c +/* [ST 24] Statistics register. The number of messages that were sent to + FOC2 */ +#define XSEM_REG_MSG_NUM_FOC2 0x280010 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC3 */ +#define XSEM_REG_MSG_NUM_FOC3 0x280014 +/* [RW 1] Disables input messages from the passive buffer May be updated + during run_time by the microcode */ +#define XSEM_REG_PAS_DISABLE 0x28024c +/* [WB 128] Debug only. Passive buffer memory */ +#define XSEM_REG_PASSIVE_BUFFER 0x282000 +/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */ +#define XSEM_REG_PRAM 0x2c0000 +/* [R 16] Valid sleeping threads indication have bit per thread */ +#define XSEM_REG_SLEEP_THREADS_VALID 0x28026c +/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */ +#define XSEM_REG_SLOW_EXT_STORE_EMPTY 0x2802a0 +/* [RW 16] List of free threads . There is a bit per thread. */ +#define XSEM_REG_THREADS_LIST 0x2802e4 +/* [RW 3] The arbitration scheme of time_slot 0 */ +#define XSEM_REG_TS_0_AS 0x280038 +/* [RW 3] The arbitration scheme of time_slot 10 */ +#define XSEM_REG_TS_10_AS 0x280060 +/* [RW 3] The arbitration scheme of time_slot 11 */ +#define XSEM_REG_TS_11_AS 0x280064 +/* [RW 3] The arbitration scheme of time_slot 12 */ +#define XSEM_REG_TS_12_AS 0x280068 +/* [RW 3] The arbitration scheme of time_slot 13 */ +#define XSEM_REG_TS_13_AS 0x28006c +/* [RW 3] The arbitration scheme of time_slot 14 */ +#define XSEM_REG_TS_14_AS 0x280070 +/* [RW 3] The arbitration scheme of time_slot 15 */ +#define XSEM_REG_TS_15_AS 0x280074 +/* [RW 3] The arbitration scheme of time_slot 16 */ +#define XSEM_REG_TS_16_AS 0x280078 +/* [RW 3] The arbitration scheme of time_slot 17 */ +#define XSEM_REG_TS_17_AS 0x28007c +/* [RW 3] The arbitration scheme of time_slot 18 */ +#define XSEM_REG_TS_18_AS 0x280080 +/* [RW 3] The arbitration scheme of time_slot 1 */ +#define XSEM_REG_TS_1_AS 0x28003c +/* [RW 3] The arbitration scheme of time_slot 2 */ +#define XSEM_REG_TS_2_AS 0x280040 +/* [RW 3] The arbitration scheme of time_slot 3 */ +#define XSEM_REG_TS_3_AS 0x280044 +/* [RW 3] The arbitration scheme of time_slot 4 */ +#define XSEM_REG_TS_4_AS 0x280048 +/* [RW 3] The arbitration scheme of time_slot 5 */ +#define XSEM_REG_TS_5_AS 0x28004c +/* [RW 3] The arbitration scheme of time_slot 6 */ +#define XSEM_REG_TS_6_AS 0x280050 +/* [RW 3] The arbitration scheme of time_slot 7 */ +#define XSEM_REG_TS_7_AS 0x280054 +/* [RW 3] The arbitration scheme of time_slot 8 */ +#define XSEM_REG_TS_8_AS 0x280058 +/* [RW 3] The arbitration scheme of time_slot 9 */ +#define XSEM_REG_TS_9_AS 0x28005c +/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64 + * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */ +#define XSEM_REG_VFPF_ERR_NUM 0x280380 +/* [RW 32] Interrupt mask register #0 read/write */ +#define XSEM_REG_XSEM_INT_MASK_0 0x280110 +#define XSEM_REG_XSEM_INT_MASK_1 0x280120 +/* [R 32] Interrupt register #0 read */ +#define XSEM_REG_XSEM_INT_STS_0 0x280104 +#define XSEM_REG_XSEM_INT_STS_1 0x280114 +/* [RW 32] Parity mask register #0 read/write */ +#define XSEM_REG_XSEM_PRTY_MASK_0 0x280130 +#define XSEM_REG_XSEM_PRTY_MASK_1 0x280140 +/* [R 32] Parity register #0 read */ +#define XSEM_REG_XSEM_PRTY_STS_0 0x280124 +#define XSEM_REG_XSEM_PRTY_STS_1 0x280134 +/* [RC 32] Parity register #0 read clear */ +#define XSEM_REG_XSEM_PRTY_STS_CLR_0 0x280128 +#define XSEM_REG_XSEM_PRTY_STS_CLR_1 0x280138 +#define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0) +#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1) +#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0) +#define MCPR_NVM_CFG4_FLASH_SIZE (0x7L<<0) +#define MCPR_NVM_COMMAND_DOIT (1L<<4) +#define MCPR_NVM_COMMAND_DONE (1L<<3) +#define MCPR_NVM_COMMAND_FIRST (1L<<7) +#define MCPR_NVM_COMMAND_LAST (1L<<8) +#define MCPR_NVM_COMMAND_WR (1L<<5) +#define MCPR_NVM_SW_ARB_ARB_ARB1 (1L<<9) +#define MCPR_NVM_SW_ARB_ARB_REQ_CLR1 (1L<<5) +#define MCPR_NVM_SW_ARB_ARB_REQ_SET1 (1L<<1) +#define BIGMAC_REGISTER_BMAC_CONTROL (0x00<<3) +#define BIGMAC_REGISTER_BMAC_XGXS_CONTROL (0x01<<3) +#define BIGMAC_REGISTER_CNT_MAX_SIZE (0x05<<3) +#define BIGMAC_REGISTER_RX_CONTROL (0x21<<3) +#define BIGMAC_REGISTER_RX_LLFC_MSG_FLDS (0x46<<3) +#define BIGMAC_REGISTER_RX_LSS_STATUS (0x43<<3) +#define BIGMAC_REGISTER_RX_MAX_SIZE (0x23<<3) +#define BIGMAC_REGISTER_RX_STAT_GR64 (0x26<<3) +#define BIGMAC_REGISTER_RX_STAT_GRIPJ (0x42<<3) +#define BIGMAC_REGISTER_TX_CONTROL (0x07<<3) +#define BIGMAC_REGISTER_TX_MAX_SIZE (0x09<<3) +#define BIGMAC_REGISTER_TX_PAUSE_THRESHOLD (0x0A<<3) +#define BIGMAC_REGISTER_TX_SOURCE_ADDR (0x08<<3) +#define BIGMAC_REGISTER_TX_STAT_GTBYT (0x20<<3) +#define BIGMAC_REGISTER_TX_STAT_GTPKT (0x0C<<3) +#define BIGMAC2_REGISTER_BMAC_CONTROL (0x00<<3) +#define BIGMAC2_REGISTER_BMAC_XGXS_CONTROL (0x01<<3) +#define BIGMAC2_REGISTER_CNT_MAX_SIZE (0x05<<3) +#define BIGMAC2_REGISTER_PFC_CONTROL (0x06<<3) +#define BIGMAC2_REGISTER_RX_CONTROL (0x3A<<3) +#define BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS (0x62<<3) +#define BIGMAC2_REGISTER_RX_LSS_STAT (0x3E<<3) +#define BIGMAC2_REGISTER_RX_MAX_SIZE (0x3C<<3) +#define BIGMAC2_REGISTER_RX_STAT_GR64 (0x40<<3) +#define BIGMAC2_REGISTER_RX_STAT_GRIPJ (0x5f<<3) +#define BIGMAC2_REGISTER_RX_STAT_GRPP (0x51<<3) +#define BIGMAC2_REGISTER_TX_CONTROL (0x1C<<3) +#define BIGMAC2_REGISTER_TX_MAX_SIZE (0x1E<<3) +#define BIGMAC2_REGISTER_TX_PAUSE_CONTROL (0x20<<3) +#define BIGMAC2_REGISTER_TX_SOURCE_ADDR (0x1D<<3) +#define BIGMAC2_REGISTER_TX_STAT_GTBYT (0x39<<3) +#define BIGMAC2_REGISTER_TX_STAT_GTPOK (0x22<<3) +#define BIGMAC2_REGISTER_TX_STAT_GTPP (0x24<<3) +#define EMAC_LED_1000MB_OVERRIDE (1L<<1) +#define EMAC_LED_100MB_OVERRIDE (1L<<2) +#define EMAC_LED_10MB_OVERRIDE (1L<<3) +#define EMAC_LED_2500MB_OVERRIDE (1L<<12) +#define EMAC_LED_OVERRIDE (1L<<0) +#define EMAC_LED_TRAFFIC (1L<<6) +#define EMAC_MDIO_COMM_COMMAND_ADDRESS (0L<<26) +#define EMAC_MDIO_COMM_COMMAND_READ_22 (2L<<26) +#define EMAC_MDIO_COMM_COMMAND_READ_45 (3L<<26) +#define EMAC_MDIO_COMM_COMMAND_WRITE_22 (1L<<26) +#define EMAC_MDIO_COMM_COMMAND_WRITE_45 (1L<<26) +#define EMAC_MDIO_COMM_DATA (0xffffL<<0) +#define EMAC_MDIO_COMM_START_BUSY (1L<<29) +#define EMAC_MDIO_MODE_AUTO_POLL (1L<<4) +#define EMAC_MDIO_MODE_CLAUSE_45 (1L<<31) +#define EMAC_MDIO_MODE_CLOCK_CNT (0x3ffL<<16) +#define EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT 16 +#define EMAC_MDIO_STATUS_10MB (1L<<1) +#define EMAC_MODE_25G_MODE (1L<<5) +#define EMAC_MODE_HALF_DUPLEX (1L<<1) +#define EMAC_MODE_PORT_GMII (2L<<2) +#define EMAC_MODE_PORT_MII (1L<<2) +#define EMAC_MODE_PORT_MII_10M (3L<<2) +#define EMAC_MODE_RESET (1L<<0) +#define EMAC_REG_EMAC_LED 0xc +#define EMAC_REG_EMAC_MAC_MATCH 0x10 +#define EMAC_REG_EMAC_MDIO_COMM 0xac +#define EMAC_REG_EMAC_MDIO_MODE 0xb4 +#define EMAC_REG_EMAC_MDIO_STATUS 0xb0 +#define EMAC_REG_EMAC_MODE 0x0 +#define EMAC_REG_EMAC_RX_MODE 0xc8 +#define EMAC_REG_EMAC_RX_MTU_SIZE 0x9c +#define EMAC_REG_EMAC_RX_STAT_AC 0x180 +#define EMAC_REG_EMAC_RX_STAT_AC_28 0x1f4 +#define EMAC_REG_EMAC_RX_STAT_AC_COUNT 23 +#define EMAC_REG_EMAC_TX_MODE 0xbc +#define EMAC_REG_EMAC_TX_STAT_AC 0x280 +#define EMAC_REG_EMAC_TX_STAT_AC_COUNT 22 +#define EMAC_REG_RX_PFC_MODE 0x320 +#define EMAC_REG_RX_PFC_MODE_PRIORITIES (1L<<2) +#define EMAC_REG_RX_PFC_MODE_RX_EN (1L<<1) +#define EMAC_REG_RX_PFC_MODE_TX_EN (1L<<0) +#define EMAC_REG_RX_PFC_PARAM 0x324 +#define EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT 0 +#define EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT 16 +#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD 0x328 +#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT (0xffff<<0) +#define EMAC_REG_RX_PFC_STATS_XOFF_SENT 0x330 +#define EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT (0xffff<<0) +#define EMAC_REG_RX_PFC_STATS_XON_RCVD 0x32c +#define EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT (0xffff<<0) +#define EMAC_REG_RX_PFC_STATS_XON_SENT 0x334 +#define EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT (0xffff<<0) +#define EMAC_RX_MODE_FLOW_EN (1L<<2) +#define EMAC_RX_MODE_KEEP_MAC_CONTROL (1L<<3) +#define EMAC_RX_MODE_KEEP_VLAN_TAG (1L<<10) +#define EMAC_RX_MODE_PROMISCUOUS (1L<<8) +#define EMAC_RX_MODE_RESET (1L<<0) +#define EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31) +#define EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3) +#define EMAC_TX_MODE_FLOW_EN (1L<<4) +#define EMAC_TX_MODE_RESET (1L<<0) +#define MISC_REGISTERS_GPIO_0 0 +#define MISC_REGISTERS_GPIO_1 1 +#define MISC_REGISTERS_GPIO_2 2 +#define MISC_REGISTERS_GPIO_3 3 +#define MISC_REGISTERS_GPIO_CLR_POS 16 +#define MISC_REGISTERS_GPIO_FLOAT (0xffL<<24) +#define MISC_REGISTERS_GPIO_FLOAT_POS 24 +#define MISC_REGISTERS_GPIO_HIGH 1 +#define MISC_REGISTERS_GPIO_INPUT_HI_Z 2 +#define MISC_REGISTERS_GPIO_INT_CLR_POS 24 +#define MISC_REGISTERS_GPIO_INT_OUTPUT_CLR 0 +#define MISC_REGISTERS_GPIO_INT_OUTPUT_SET 1 +#define MISC_REGISTERS_GPIO_INT_SET_POS 16 +#define MISC_REGISTERS_GPIO_LOW 0 +#define MISC_REGISTERS_GPIO_OUTPUT_HIGH 1 +#define MISC_REGISTERS_GPIO_OUTPUT_LOW 0 +#define MISC_REGISTERS_GPIO_PORT_SHIFT 4 +#define MISC_REGISTERS_GPIO_SET_POS 8 +#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588 +#define MISC_REGISTERS_RESET_REG_1_RST_HC (0x1<<29) +#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7) +#define MISC_REGISTERS_RESET_REG_1_RST_PXP (0x1<<26) +#define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27) +#define MISC_REGISTERS_RESET_REG_1_SET 0x584 +#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598 +#define MISC_REGISTERS_RESET_REG_2_MSTAT0 (0x1<<24) +#define MISC_REGISTERS_RESET_REG_2_MSTAT1 (0x1<<25) +#define MISC_REGISTERS_RESET_REG_2_PGLC (0x1<<19) +#define MISC_REGISTERS_RESET_REG_2_RST_ATC (0x1<<17) +#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0) +#define MISC_REGISTERS_RESET_REG_2_RST_BMAC1 (0x1<<1) +#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0 (0x1<<2) +#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE (0x1<<14) +#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1 (0x1<<3) +#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE (0x1<<15) +#define MISC_REGISTERS_RESET_REG_2_RST_GRC (0x1<<4) +#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B (0x1<<6) +#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE (0x1<<8) +#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU (0x1<<7) +#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE (0x1<<5) +#define MISC_REGISTERS_RESET_REG_2_RST_MDIO (0x1<<13) +#define MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE (0x1<<11) +#define MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO (0x1<<13) +#define MISC_REGISTERS_RESET_REG_2_RST_RBCN (0x1<<9) +#define MISC_REGISTERS_RESET_REG_2_SET 0x594 +#define MISC_REGISTERS_RESET_REG_2_UMAC0 (0x1<<20) +#define MISC_REGISTERS_RESET_REG_2_UMAC1 (0x1<<21) +#define MISC_REGISTERS_RESET_REG_2_XMAC (0x1<<22) +#define MISC_REGISTERS_RESET_REG_2_XMAC_SOFT (0x1<<23) +#define MISC_REGISTERS_RESET_REG_3_CLEAR 0x5a8 +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ (0x1<<1) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN (0x1<<2) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD (0x1<<3) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW (0x1<<0) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ (0x1<<5) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN (0x1<<6) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD (0x1<<7) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW (0x1<<4) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB (0x1<<8) +#define MISC_REGISTERS_RESET_REG_3_SET 0x5a4 +#define MISC_REGISTERS_SPIO_4 4 +#define MISC_REGISTERS_SPIO_5 5 +#define MISC_REGISTERS_SPIO_7 7 +#define MISC_REGISTERS_SPIO_CLR_POS 16 +#define MISC_REGISTERS_SPIO_FLOAT (0xffL<<24) +#define MISC_REGISTERS_SPIO_FLOAT_POS 24 +#define MISC_REGISTERS_SPIO_INPUT_HI_Z 2 +#define MISC_REGISTERS_SPIO_INT_OLD_SET_POS 16 +#define MISC_REGISTERS_SPIO_OUTPUT_HIGH 1 +#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0 +#define MISC_REGISTERS_SPIO_SET_POS 8 +#define HW_LOCK_DRV_FLAGS 10 +#define HW_LOCK_MAX_RESOURCE_VALUE 31 +#define HW_LOCK_RESOURCE_GPIO 1 +#define HW_LOCK_RESOURCE_MDIO 0 +#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3 +#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8 +#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9 +#define HW_LOCK_RESOURCE_SPIO 2 +#define HW_LOCK_RESOURCE_UNDI 5 +#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4) +#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5) +#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18) +#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (0x1<<31) +#define AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR (0x1<<30) +#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (0x1<<9) +#define AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR (0x1<<8) +#define AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT (0x1<<7) +#define AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR (0x1<<6) +#define AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT (0x1<<29) +#define AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR (0x1<<28) +#define AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT (0x1<<1) +#define AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR (0x1<<0) +#define AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR (0x1<<18) +#define AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT (0x1<<11) +#define AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR (0x1<<10) +#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT (0x1<<13) +#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR (0x1<<12) +#define AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 (0x1<<2) +#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (0x1<<12) +#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (0x1<<28) +#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (0x1<<31) +#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (0x1<<29) +#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (0x1<<30) +#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (0x1<<15) +#define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR (0x1<<14) +#define AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR (0x1<<14) +#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (0x1<<20) +#define AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT (0x1<<31) +#define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR (0x1<<30) +#define AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR (0x1<<0) +#define AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT (0x1<<2) +#define AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR (0x1<<3) +#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT (0x1<<5) +#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR (0x1<<4) +#define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT (0x1<<3) +#define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR (0x1<<2) +#define AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT (0x1<<3) +#define AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR (0x1<<2) +#define AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR (0x1<<22) +#define AEU_INPUTS_ATTN_BITS_SPIO5 (0x1<<15) +#define AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT (0x1<<27) +#define AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR (0x1<<26) +#define AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT (0x1<<5) +#define AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR (0x1<<4) +#define AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT (0x1<<25) +#define AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR (0x1<<24) +#define AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT (0x1<<29) +#define AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR (0x1<<28) +#define AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT (0x1<<23) +#define AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR (0x1<<22) +#define AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT (0x1<<27) +#define AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR (0x1<<26) +#define AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT (0x1<<21) +#define AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR (0x1<<20) +#define AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT (0x1<<25) +#define AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR (0x1<<24) +#define AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR (0x1<<16) +#define AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT (0x1<<9) +#define AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR (0x1<<8) +#define AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT (0x1<<7) +#define AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR (0x1<<6) +#define AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT (0x1<<11) +#define AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR (0x1<<10) + +#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 (0x1<<5) +#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 (0x1<<9) + +#define RESERVED_GENERAL_ATTENTION_BIT_0 0 + +#define EVEREST_GEN_ATTN_IN_USE_MASK 0x7ffe0 +#define EVEREST_LATCHED_ATTN_IN_USE_MASK 0xffe00000 + +#define RESERVED_GENERAL_ATTENTION_BIT_6 6 +#define RESERVED_GENERAL_ATTENTION_BIT_7 7 +#define RESERVED_GENERAL_ATTENTION_BIT_8 8 +#define RESERVED_GENERAL_ATTENTION_BIT_9 9 +#define RESERVED_GENERAL_ATTENTION_BIT_10 10 +#define RESERVED_GENERAL_ATTENTION_BIT_11 11 +#define RESERVED_GENERAL_ATTENTION_BIT_12 12 +#define RESERVED_GENERAL_ATTENTION_BIT_13 13 +#define RESERVED_GENERAL_ATTENTION_BIT_14 14 +#define RESERVED_GENERAL_ATTENTION_BIT_15 15 +#define RESERVED_GENERAL_ATTENTION_BIT_16 16 +#define RESERVED_GENERAL_ATTENTION_BIT_17 17 +#define RESERVED_GENERAL_ATTENTION_BIT_18 18 +#define RESERVED_GENERAL_ATTENTION_BIT_19 19 +#define RESERVED_GENERAL_ATTENTION_BIT_20 20 +#define RESERVED_GENERAL_ATTENTION_BIT_21 21 + +/* storm asserts attention bits */ +#define TSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_7 +#define USTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_8 +#define CSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_9 +#define XSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_10 + +/* mcp error attention bit */ +#define MCP_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_11 + +/*E1H NIG status sync attention mapped to group 4-7*/ +#define LINK_SYNC_ATTENTION_BIT_FUNC_0 RESERVED_GENERAL_ATTENTION_BIT_12 +#define LINK_SYNC_ATTENTION_BIT_FUNC_1 RESERVED_GENERAL_ATTENTION_BIT_13 +#define LINK_SYNC_ATTENTION_BIT_FUNC_2 RESERVED_GENERAL_ATTENTION_BIT_14 +#define LINK_SYNC_ATTENTION_BIT_FUNC_3 RESERVED_GENERAL_ATTENTION_BIT_15 +#define LINK_SYNC_ATTENTION_BIT_FUNC_4 RESERVED_GENERAL_ATTENTION_BIT_16 +#define LINK_SYNC_ATTENTION_BIT_FUNC_5 RESERVED_GENERAL_ATTENTION_BIT_17 +#define LINK_SYNC_ATTENTION_BIT_FUNC_6 RESERVED_GENERAL_ATTENTION_BIT_18 +#define LINK_SYNC_ATTENTION_BIT_FUNC_7 RESERVED_GENERAL_ATTENTION_BIT_19 + + +#define LATCHED_ATTN_RBCR 23 +#define LATCHED_ATTN_RBCT 24 +#define LATCHED_ATTN_RBCN 25 +#define LATCHED_ATTN_RBCU 26 +#define LATCHED_ATTN_RBCP 27 +#define LATCHED_ATTN_TIMEOUT_GRC 28 +#define LATCHED_ATTN_RSVD_GRC 29 +#define LATCHED_ATTN_ROM_PARITY_MCP 30 +#define LATCHED_ATTN_UM_RX_PARITY_MCP 31 +#define LATCHED_ATTN_UM_TX_PARITY_MCP 32 +#define LATCHED_ATTN_SCPAD_PARITY_MCP 33 + +#define GENERAL_ATTEN_WORD(atten_name) ((94 + atten_name) / 32) +#define GENERAL_ATTEN_OFFSET(atten_name)\ + (1UL << ((94 + atten_name) % 32)) +/* + * This file defines GRC base address for every block. + * This file is included by chipsim, asm microcode and cpp microcode. + * These values are used in Design.xml on regBase attribute + * Use the base with the generated offsets of specific registers. + */ + +#define GRCBASE_PXPCS 0x000000 +#define GRCBASE_PCICONFIG 0x002000 +#define GRCBASE_PCIREG 0x002400 +#define GRCBASE_EMAC0 0x008000 +#define GRCBASE_EMAC1 0x008400 +#define GRCBASE_DBU 0x008800 +#define GRCBASE_MISC 0x00A000 +#define GRCBASE_DBG 0x00C000 +#define GRCBASE_NIG 0x010000 +#define GRCBASE_XCM 0x020000 +#define GRCBASE_PRS 0x040000 +#define GRCBASE_SRCH 0x040400 +#define GRCBASE_TSDM 0x042000 +#define GRCBASE_TCM 0x050000 +#define GRCBASE_BRB1 0x060000 +#define GRCBASE_MCP 0x080000 +#define GRCBASE_UPB 0x0C1000 +#define GRCBASE_CSDM 0x0C2000 +#define GRCBASE_USDM 0x0C4000 +#define GRCBASE_CCM 0x0D0000 +#define GRCBASE_UCM 0x0E0000 +#define GRCBASE_CDU 0x101000 +#define GRCBASE_DMAE 0x102000 +#define GRCBASE_PXP 0x103000 +#define GRCBASE_CFC 0x104000 +#define GRCBASE_HC 0x108000 +#define GRCBASE_PXP2 0x120000 +#define GRCBASE_PBF 0x140000 +#define GRCBASE_UMAC0 0x160000 +#define GRCBASE_UMAC1 0x160400 +#define GRCBASE_XPB 0x161000 +#define GRCBASE_MSTAT0 0x162000 +#define GRCBASE_MSTAT1 0x162800 +#define GRCBASE_XMAC0 0x163000 +#define GRCBASE_XMAC1 0x163800 +#define GRCBASE_TIMERS 0x164000 +#define GRCBASE_XSDM 0x166000 +#define GRCBASE_QM 0x168000 +#define GRCBASE_DQ 0x170000 +#define GRCBASE_TSEM 0x180000 +#define GRCBASE_CSEM 0x200000 +#define GRCBASE_XSEM 0x280000 +#define GRCBASE_USEM 0x300000 +#define GRCBASE_MISC_AEU GRCBASE_MISC + + +/* offset of configuration space in the pci core register */ +#define PCICFG_OFFSET 0x2000 +#define PCICFG_VENDOR_ID_OFFSET 0x00 +#define PCICFG_DEVICE_ID_OFFSET 0x02 +#define PCICFG_COMMAND_OFFSET 0x04 +#define PCICFG_COMMAND_IO_SPACE (1<<0) +#define PCICFG_COMMAND_MEM_SPACE (1<<1) +#define PCICFG_COMMAND_BUS_MASTER (1<<2) +#define PCICFG_COMMAND_SPECIAL_CYCLES (1<<3) +#define PCICFG_COMMAND_MWI_CYCLES (1<<4) +#define PCICFG_COMMAND_VGA_SNOOP (1<<5) +#define PCICFG_COMMAND_PERR_ENA (1<<6) +#define PCICFG_COMMAND_STEPPING (1<<7) +#define PCICFG_COMMAND_SERR_ENA (1<<8) +#define PCICFG_COMMAND_FAST_B2B (1<<9) +#define PCICFG_COMMAND_INT_DISABLE (1<<10) +#define PCICFG_COMMAND_RESERVED (0x1f<<11) +#define PCICFG_STATUS_OFFSET 0x06 +#define PCICFG_REVESION_ID_OFFSET 0x08 +#define PCICFG_CACHE_LINE_SIZE 0x0c +#define PCICFG_LATENCY_TIMER 0x0d +#define PCICFG_BAR_1_LOW 0x10 +#define PCICFG_BAR_1_HIGH 0x14 +#define PCICFG_BAR_2_LOW 0x18 +#define PCICFG_BAR_2_HIGH 0x1c +#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c +#define PCICFG_SUBSYSTEM_ID_OFFSET 0x2e +#define PCICFG_INT_LINE 0x3c +#define PCICFG_INT_PIN 0x3d +#define PCICFG_PM_CAPABILITY 0x48 +#define PCICFG_PM_CAPABILITY_VERSION (0x3<<16) +#define PCICFG_PM_CAPABILITY_CLOCK (1<<19) +#define PCICFG_PM_CAPABILITY_RESERVED (1<<20) +#define PCICFG_PM_CAPABILITY_DSI (1<<21) +#define PCICFG_PM_CAPABILITY_AUX_CURRENT (0x7<<22) +#define PCICFG_PM_CAPABILITY_D1_SUPPORT (1<<25) +#define PCICFG_PM_CAPABILITY_D2_SUPPORT (1<<26) +#define PCICFG_PM_CAPABILITY_PME_IN_D0 (1<<27) +#define PCICFG_PM_CAPABILITY_PME_IN_D1 (1<<28) +#define PCICFG_PM_CAPABILITY_PME_IN_D2 (1<<29) +#define PCICFG_PM_CAPABILITY_PME_IN_D3_HOT (1<<30) +#define PCICFG_PM_CAPABILITY_PME_IN_D3_COLD (1<<31) +#define PCICFG_PM_CSR_OFFSET 0x4c +#define PCICFG_PM_CSR_STATE (0x3<<0) +#define PCICFG_PM_CSR_PME_ENABLE (1<<8) +#define PCICFG_PM_CSR_PME_STATUS (1<<15) +#define PCICFG_MSI_CAP_ID_OFFSET 0x58 +#define PCICFG_MSI_CONTROL_ENABLE (0x1<<16) +#define PCICFG_MSI_CONTROL_MCAP (0x7<<17) +#define PCICFG_MSI_CONTROL_MENA (0x7<<20) +#define PCICFG_MSI_CONTROL_64_BIT_ADDR_CAP (0x1<<23) +#define PCICFG_MSI_CONTROL_MSI_PVMASK_CAPABLE (0x1<<24) +#define PCICFG_GRC_ADDRESS 0x78 +#define PCICFG_GRC_DATA 0x80 +#define PCICFG_MSIX_CAP_ID_OFFSET 0xa0 +#define PCICFG_MSIX_CONTROL_TABLE_SIZE (0x7ff<<16) +#define PCICFG_MSIX_CONTROL_RESERVED (0x7<<27) +#define PCICFG_MSIX_CONTROL_FUNC_MASK (0x1<<30) +#define PCICFG_MSIX_CONTROL_MSIX_ENABLE (0x1<<31) + +#define PCICFG_DEVICE_CONTROL 0xb4 +#define PCICFG_DEVICE_STATUS 0xb6 +#define PCICFG_DEVICE_STATUS_CORR_ERR_DET (1<<0) +#define PCICFG_DEVICE_STATUS_NON_FATAL_ERR_DET (1<<1) +#define PCICFG_DEVICE_STATUS_FATAL_ERR_DET (1<<2) +#define PCICFG_DEVICE_STATUS_UNSUP_REQ_DET (1<<3) +#define PCICFG_DEVICE_STATUS_AUX_PWR_DET (1<<4) +#define PCICFG_DEVICE_STATUS_NO_PEND (1<<5) +#define PCICFG_LINK_CONTROL 0xbc + + +#define BAR_USTRORM_INTMEM 0x400000 +#define BAR_CSTRORM_INTMEM 0x410000 +#define BAR_XSTRORM_INTMEM 0x420000 +#define BAR_TSTRORM_INTMEM 0x430000 + +/* for accessing the IGU in case of status block ACK */ +#define BAR_IGU_INTMEM 0x440000 + +#define BAR_DOORBELL_OFFSET 0x800000 + +#define BAR_ME_REGISTER 0x450000 + +/* config_2 offset */ +#define GRC_CONFIG_2_SIZE_REG 0x408 +#define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0) +#define PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_256K (3L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_512K (4L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_1M (5L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_2M (6L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_4M (7L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_8M (8L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_16M (9L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_32M (10L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_64M (11L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_128M (12L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0) +#define PCI_CONFIG_2_BAR1_64ENA (1L<<4) +#define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5) +#define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6) +#define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7) +#define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_2K (1L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_4K (2L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_8K (3L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_16K (4L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_32K (5L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_64K (6L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_128K (7L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_256K (8L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_512K (9L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_1M (10L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_2M (11L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_4M (12L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_8M (13L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_16M (14L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_32M (15L<<8) +#define PCI_CONFIG_2_BAR_PREFETCH (1L<<16) +#define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17) + +/* config_3 offset */ +#define GRC_CONFIG_3_SIZE_REG 0x40c +#define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0) +#define PCI_CONFIG_3_FORCE_PME (1L<<24) +#define PCI_CONFIG_3_PME_STATUS (1L<<25) +#define PCI_CONFIG_3_PME_ENABLE (1L<<26) +#define PCI_CONFIG_3_PM_STATE (0x3L<<27) +#define PCI_CONFIG_3_VAUX_PRESET (1L<<30) +#define PCI_CONFIG_3_PCI_POWER (1L<<31) + +#define GRC_BAR2_CONFIG 0x4e0 +#define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0) +#define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0) +#define PCI_CONFIG_2_BAR2_64ENA (1L<<4) + +#define PCI_PM_DATA_A 0x410 +#define PCI_PM_DATA_B 0x414 +#define PCI_ID_VAL1 0x434 +#define PCI_ID_VAL2 0x438 + +#define PXPCS_TL_CONTROL_5 0x814 +#define PXPCS_TL_CONTROL_5_UNKNOWNTYPE_ERR_ATTN (1 << 29) /*WC*/ +#define PXPCS_TL_CONTROL_5_BOUNDARY4K_ERR_ATTN (1 << 28) /*WC*/ +#define PXPCS_TL_CONTROL_5_MRRS_ERR_ATTN (1 << 27) /*WC*/ +#define PXPCS_TL_CONTROL_5_MPS_ERR_ATTN (1 << 26) /*WC*/ +#define PXPCS_TL_CONTROL_5_TTX_BRIDGE_FORWARD_ERR (1 << 25) /*WC*/ +#define PXPCS_TL_CONTROL_5_TTX_TXINTF_OVERFLOW (1 << 24) /*WC*/ +#define PXPCS_TL_CONTROL_5_PHY_ERR_ATTN (1 << 23) /*RO*/ +#define PXPCS_TL_CONTROL_5_DL_ERR_ATTN (1 << 22) /*RO*/ +#define PXPCS_TL_CONTROL_5_TTX_ERR_NP_TAG_IN_USE (1 << 21) /*WC*/ +#define PXPCS_TL_CONTROL_5_TRX_ERR_UNEXP_RTAG (1 << 20) /*WC*/ +#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT1 (1 << 19) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 (1 << 18) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_ECRC1 (1 << 17) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP1 (1 << 16) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW1 (1 << 15) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL1 (1 << 14) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT1 (1 << 13) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT1 (1 << 12) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL1 (1 << 11) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP1 (1 << 10) /*WC*/ +#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT (1 << 9) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT (1 << 8) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_ECRC (1 << 7) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP (1 << 6) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW (1 << 5) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL (1 << 4) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT (1 << 3) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT (1 << 2) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL (1 << 1) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP (1 << 0) /*WC*/ + + +#define PXPCS_TL_FUNC345_STAT 0x854 +#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT4 (1 << 29) /* WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4\ + (1 << 28) /* Unsupported Request Error Status in function4, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_ECRC4\ + (1 << 27) /* ECRC Error TLP Status Status in function 4, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP4\ + (1 << 26) /* Malformed TLP Status Status in function 4, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW4\ + (1 << 25) /* Receiver Overflow Status Status in function 4, if \ + set, generate pcie_err_attn output when this error is seen.. WC \ + */ +#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL4\ + (1 << 24) /* Unexpected Completion Status Status in function 4, \ + if set, generate pcie_err_attn output when this error is seen. WC \ + */ +#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT4\ + (1 << 23) /* Receive UR Statusin function 4. If set, generate \ + pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT4\ + (1 << 22) /* Completer Timeout Status Status in function 4, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL4\ + (1 << 21) /* Flow Control Protocol Error Status Status in \ + function 4, if set, generate pcie_err_attn output when this error \ + is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP4\ + (1 << 20) /* Poisoned Error Status Status in function 4, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT3 (1 << 19) /* WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3\ + (1 << 18) /* Unsupported Request Error Status in function3, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_ECRC3\ + (1 << 17) /* ECRC Error TLP Status Status in function 3, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP3\ + (1 << 16) /* Malformed TLP Status Status in function 3, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW3\ + (1 << 15) /* Receiver Overflow Status Status in function 3, if \ + set, generate pcie_err_attn output when this error is seen.. WC \ + */ +#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL3\ + (1 << 14) /* Unexpected Completion Status Status in function 3, \ + if set, generate pcie_err_attn output when this error is seen. WC \ + */ +#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT3\ + (1 << 13) /* Receive UR Statusin function 3. If set, generate \ + pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT3\ + (1 << 12) /* Completer Timeout Status Status in function 3, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL3\ + (1 << 11) /* Flow Control Protocol Error Status Status in \ + function 3, if set, generate pcie_err_attn output when this error \ + is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP3\ + (1 << 10) /* Poisoned Error Status Status in function 3, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT2 (1 << 9) /* WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2\ + (1 << 8) /* Unsupported Request Error Status for Function 2, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_ECRC2\ + (1 << 7) /* ECRC Error TLP Status Status for Function 2, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP2\ + (1 << 6) /* Malformed TLP Status Status for Function 2, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW2\ + (1 << 5) /* Receiver Overflow Status Status for Function 2, if \ + set, generate pcie_err_attn output when this error is seen.. WC \ + */ +#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL2\ + (1 << 4) /* Unexpected Completion Status Status for Function 2, \ + if set, generate pcie_err_attn output when this error is seen. WC \ + */ +#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT2\ + (1 << 3) /* Receive UR Statusfor Function 2. If set, generate \ + pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT2\ + (1 << 2) /* Completer Timeout Status Status for Function 2, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL2\ + (1 << 1) /* Flow Control Protocol Error Status Status for \ + Function 2, if set, generate pcie_err_attn output when this error \ + is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP2\ + (1 << 0) /* Poisoned Error Status Status for Function 2, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ + + +#define PXPCS_TL_FUNC678_STAT 0x85C +#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT7 (1 << 29) /* WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7\ + (1 << 28) /* Unsupported Request Error Status in function7, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_ECRC7\ + (1 << 27) /* ECRC Error TLP Status Status in function 7, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP7\ + (1 << 26) /* Malformed TLP Status Status in function 7, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW7\ + (1 << 25) /* Receiver Overflow Status Status in function 7, if \ + set, generate pcie_err_attn output when this error is seen.. WC \ + */ +#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL7\ + (1 << 24) /* Unexpected Completion Status Status in function 7, \ + if set, generate pcie_err_attn output when this error is seen. WC \ + */ +#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT7\ + (1 << 23) /* Receive UR Statusin function 7. If set, generate \ + pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT7\ + (1 << 22) /* Completer Timeout Status Status in function 7, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL7\ + (1 << 21) /* Flow Control Protocol Error Status Status in \ + function 7, if set, generate pcie_err_attn output when this error \ + is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP7\ + (1 << 20) /* Poisoned Error Status Status in function 7, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT6 (1 << 19) /* WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6\ + (1 << 18) /* Unsupported Request Error Status in function6, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_ECRC6\ + (1 << 17) /* ECRC Error TLP Status Status in function 6, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP6\ + (1 << 16) /* Malformed TLP Status Status in function 6, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW6\ + (1 << 15) /* Receiver Overflow Status Status in function 6, if \ + set, generate pcie_err_attn output when this error is seen.. WC \ + */ +#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL6\ + (1 << 14) /* Unexpected Completion Status Status in function 6, \ + if set, generate pcie_err_attn output when this error is seen. WC \ + */ +#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT6\ + (1 << 13) /* Receive UR Statusin function 6. If set, generate \ + pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT6\ + (1 << 12) /* Completer Timeout Status Status in function 6, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL6\ + (1 << 11) /* Flow Control Protocol Error Status Status in \ + function 6, if set, generate pcie_err_attn output when this error \ + is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP6\ + (1 << 10) /* Poisoned Error Status Status in function 6, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT5 (1 << 9) /* WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5\ + (1 << 8) /* Unsupported Request Error Status for Function 5, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_ECRC5\ + (1 << 7) /* ECRC Error TLP Status Status for Function 5, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP5\ + (1 << 6) /* Malformed TLP Status Status for Function 5, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW5\ + (1 << 5) /* Receiver Overflow Status Status for Function 5, if \ + set, generate pcie_err_attn output when this error is seen.. WC \ + */ +#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL5\ + (1 << 4) /* Unexpected Completion Status Status for Function 5, \ + if set, generate pcie_err_attn output when this error is seen. WC \ + */ +#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT5\ + (1 << 3) /* Receive UR Statusfor Function 5. If set, generate \ + pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT5\ + (1 << 2) /* Completer Timeout Status Status for Function 5, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL5\ + (1 << 1) /* Flow Control Protocol Error Status Status for \ + Function 5, if set, generate pcie_err_attn output when this error \ + is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP5\ + (1 << 0) /* Poisoned Error Status Status for Function 5, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ + + +#define BAR_USTRORM_INTMEM 0x400000 +#define BAR_CSTRORM_INTMEM 0x410000 +#define BAR_XSTRORM_INTMEM 0x420000 +#define BAR_TSTRORM_INTMEM 0x430000 + +/* for accessing the IGU in case of status block ACK */ +#define BAR_IGU_INTMEM 0x440000 + +#define BAR_DOORBELL_OFFSET 0x800000 + +#define BAR_ME_REGISTER 0x450000 +#define ME_REG_PF_NUM_SHIFT 0 +#define ME_REG_PF_NUM\ + (7L<> 1; + } + + /* split the crc into 8 bits */ + for (i = 0; i < 8; i++) { + C[i] = crc & 1; + crc = crc >> 1; + } + + NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^ + D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^ + C[6] ^ C[7]; + NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^ + D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^ + D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^ + C[6]; + NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^ + D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^ + C[0] ^ C[1] ^ C[4] ^ C[5]; + NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^ + D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^ + C[1] ^ C[2] ^ C[5] ^ C[6]; + NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^ + D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^ + C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7]; + NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^ + D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^ + C[3] ^ C[4] ^ C[7]; + NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^ + D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^ + C[5]; + NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^ + D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^ + C[6]; + + crc_res = 0; + for (i = 0; i < 8; i++) + crc_res |= (NewCRC[i] << i); + + return crc_res; +} + + +#endif /* BNX2X_REG_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c new file mode 100644 index 0000000..df52f11 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -0,0 +1,5692 @@ +/* bnx2x_sp.c: Broadcom Everest network driver. + * + * Copyright 2011 Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2, available + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a + * license other than the GPL, without Broadcom's express prior written + * consent. + * + * Maintained by: Eilon Greenstein + * Written by: Vladislav Zolotarov + * + */ +#include +#include +#include +#include +#include +#include "bnx2x.h" +#include "bnx2x_cmn.h" +#include "bnx2x_sp.h" + +#define BNX2X_MAX_EMUL_MULTI 16 + +/**** Exe Queue interfaces ****/ + +/** + * bnx2x_exe_queue_init - init the Exe Queue object + * + * @o: poiter to the object + * @exe_len: length + * @owner: poiter to the owner + * @validate: validate function pointer + * @optimize: optimize function pointer + * @exec: execute function pointer + * @get: get function pointer + */ +static inline void bnx2x_exe_queue_init(struct bnx2x *bp, + struct bnx2x_exe_queue_obj *o, + int exe_len, + union bnx2x_qable_obj *owner, + exe_q_validate validate, + exe_q_optimize optimize, + exe_q_execute exec, + exe_q_get get) +{ + memset(o, 0, sizeof(*o)); + + INIT_LIST_HEAD(&o->exe_queue); + INIT_LIST_HEAD(&o->pending_comp); + + spin_lock_init(&o->lock); + + o->exe_chunk_len = exe_len; + o->owner = owner; + + /* Owner specific callbacks */ + o->validate = validate; + o->optimize = optimize; + o->execute = exec; + o->get = get; + + DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk " + "length of %d\n", exe_len); +} + +static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp, + struct bnx2x_exeq_elem *elem) +{ + DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n"); + kfree(elem); +} + +static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o) +{ + struct bnx2x_exeq_elem *elem; + int cnt = 0; + + spin_lock_bh(&o->lock); + + list_for_each_entry(elem, &o->exe_queue, link) + cnt++; + + spin_unlock_bh(&o->lock); + + return cnt; +} + +/** + * bnx2x_exe_queue_add - add a new element to the execution queue + * + * @bp: driver handle + * @o: queue + * @cmd: new command to add + * @restore: true - do not optimize the command + * + * If the element is optimized or is illegal, frees it. + */ +static inline int bnx2x_exe_queue_add(struct bnx2x *bp, + struct bnx2x_exe_queue_obj *o, + struct bnx2x_exeq_elem *elem, + bool restore) +{ + int rc; + + spin_lock_bh(&o->lock); + + if (!restore) { + /* Try to cancel this element queue */ + rc = o->optimize(bp, o->owner, elem); + if (rc) + goto free_and_exit; + + /* Check if this request is ok */ + rc = o->validate(bp, o->owner, elem); + if (rc) { + BNX2X_ERR("Preamble failed: %d\n", rc); + goto free_and_exit; + } + } + + /* If so, add it to the execution queue */ + list_add_tail(&elem->link, &o->exe_queue); + + spin_unlock_bh(&o->lock); + + return 0; + +free_and_exit: + bnx2x_exe_queue_free_elem(bp, elem); + + spin_unlock_bh(&o->lock); + + return rc; + +} + +static inline void __bnx2x_exe_queue_reset_pending( + struct bnx2x *bp, + struct bnx2x_exe_queue_obj *o) +{ + struct bnx2x_exeq_elem *elem; + + while (!list_empty(&o->pending_comp)) { + elem = list_first_entry(&o->pending_comp, + struct bnx2x_exeq_elem, link); + + list_del(&elem->link); + bnx2x_exe_queue_free_elem(bp, elem); + } +} + +static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp, + struct bnx2x_exe_queue_obj *o) +{ + + spin_lock_bh(&o->lock); + + __bnx2x_exe_queue_reset_pending(bp, o); + + spin_unlock_bh(&o->lock); + +} + +/** + * bnx2x_exe_queue_step - execute one execution chunk atomically + * + * @bp: driver handle + * @o: queue + * @ramrod_flags: flags + * + * (Atomicy is ensured using the exe_queue->lock). + */ +static inline int bnx2x_exe_queue_step(struct bnx2x *bp, + struct bnx2x_exe_queue_obj *o, + unsigned long *ramrod_flags) +{ + struct bnx2x_exeq_elem *elem, spacer; + int cur_len = 0, rc; + + memset(&spacer, 0, sizeof(spacer)); + + spin_lock_bh(&o->lock); + + /* + * Next step should not be performed until the current is finished, + * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to + * properly clear object internals without sending any command to the FW + * which also implies there won't be any completion to clear the + * 'pending' list. + */ + if (!list_empty(&o->pending_comp)) { + if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { + DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: " + "resetting pending_comp\n"); + __bnx2x_exe_queue_reset_pending(bp, o); + } else { + spin_unlock_bh(&o->lock); + return 1; + } + } + + /* + * Run through the pending commands list and create a next + * execution chunk. + */ + while (!list_empty(&o->exe_queue)) { + elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem, + link); + WARN_ON(!elem->cmd_len); + + if (cur_len + elem->cmd_len <= o->exe_chunk_len) { + cur_len += elem->cmd_len; + /* + * Prevent from both lists being empty when moving an + * element. This will allow the call of + * bnx2x_exe_queue_empty() without locking. + */ + list_add_tail(&spacer.link, &o->pending_comp); + mb(); + list_del(&elem->link); + list_add_tail(&elem->link, &o->pending_comp); + list_del(&spacer.link); + } else + break; + } + + /* Sanity check */ + if (!cur_len) { + spin_unlock_bh(&o->lock); + return 0; + } + + rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags); + if (rc < 0) + /* + * In case of an error return the commands back to the queue + * and reset the pending_comp. + */ + list_splice_init(&o->pending_comp, &o->exe_queue); + else if (!rc) + /* + * If zero is returned, means there are no outstanding pending + * completions and we may dismiss the pending list. + */ + __bnx2x_exe_queue_reset_pending(bp, o); + + spin_unlock_bh(&o->lock); + return rc; +} + +static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o) +{ + bool empty = list_empty(&o->exe_queue); + + /* Don't reorder!!! */ + mb(); + + return empty && list_empty(&o->pending_comp); +} + +static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem( + struct bnx2x *bp) +{ + DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n"); + return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC); +} + +/************************ raw_obj functions ***********************************/ +static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o) +{ + return !!test_bit(o->state, o->pstate); +} + +static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o) +{ + smp_mb__before_clear_bit(); + clear_bit(o->state, o->pstate); + smp_mb__after_clear_bit(); +} + +static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o) +{ + smp_mb__before_clear_bit(); + set_bit(o->state, o->pstate); + smp_mb__after_clear_bit(); +} + +/** + * bnx2x_state_wait - wait until the given bit(state) is cleared + * + * @bp: device handle + * @state: state which is to be cleared + * @state_p: state buffer + * + */ +static inline int bnx2x_state_wait(struct bnx2x *bp, int state, + unsigned long *pstate) +{ + /* can take a while if any port is running */ + int cnt = 5000; + + + if (CHIP_REV_IS_EMUL(bp)) + cnt *= 20; + + DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state); + + might_sleep(); + while (cnt--) { + if (!test_bit(state, pstate)) { +#ifdef BNX2X_STOP_ON_ERROR + DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt); +#endif + return 0; + } + + usleep_range(1000, 1000); + + if (bp->panic) + return -EIO; + } + + /* timeout! */ + BNX2X_ERR("timeout waiting for state %d\n", state); +#ifdef BNX2X_STOP_ON_ERROR + bnx2x_panic(); +#endif + + return -EBUSY; +} + +static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw) +{ + return bnx2x_state_wait(bp, raw->state, raw->pstate); +} + +/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ +/* credit handling callbacks */ +static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset) +{ + struct bnx2x_credit_pool_obj *mp = o->macs_pool; + + WARN_ON(!mp); + + return mp->get_entry(mp, offset); +} + +static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o) +{ + struct bnx2x_credit_pool_obj *mp = o->macs_pool; + + WARN_ON(!mp); + + return mp->get(mp, 1); +} + +static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset) +{ + struct bnx2x_credit_pool_obj *vp = o->vlans_pool; + + WARN_ON(!vp); + + return vp->get_entry(vp, offset); +} + +static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o) +{ + struct bnx2x_credit_pool_obj *vp = o->vlans_pool; + + WARN_ON(!vp); + + return vp->get(vp, 1); +} + +static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) +{ + struct bnx2x_credit_pool_obj *mp = o->macs_pool; + struct bnx2x_credit_pool_obj *vp = o->vlans_pool; + + if (!mp->get(mp, 1)) + return false; + + if (!vp->get(vp, 1)) { + mp->put(mp, 1); + return false; + } + + return true; +} + +static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset) +{ + struct bnx2x_credit_pool_obj *mp = o->macs_pool; + + return mp->put_entry(mp, offset); +} + +static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o) +{ + struct bnx2x_credit_pool_obj *mp = o->macs_pool; + + return mp->put(mp, 1); +} + +static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset) +{ + struct bnx2x_credit_pool_obj *vp = o->vlans_pool; + + return vp->put_entry(vp, offset); +} + +static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o) +{ + struct bnx2x_credit_pool_obj *vp = o->vlans_pool; + + return vp->put(vp, 1); +} + +static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) +{ + struct bnx2x_credit_pool_obj *mp = o->macs_pool; + struct bnx2x_credit_pool_obj *vp = o->vlans_pool; + + if (!mp->put(mp, 1)) + return false; + + if (!vp->put(vp, 1)) { + mp->get(mp, 1); + return false; + } + + return true; +} + +/* check_add() callbacks */ +static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o, + union bnx2x_classification_ramrod_data *data) +{ + struct bnx2x_vlan_mac_registry_elem *pos; + + if (!is_valid_ether_addr(data->mac.mac)) + return -EINVAL; + + /* Check if a requested MAC already exists */ + list_for_each_entry(pos, &o->head, link) + if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) + return -EEXIST; + + return 0; +} + +static int bnx2x_check_vlan_add(struct bnx2x_vlan_mac_obj *o, + union bnx2x_classification_ramrod_data *data) +{ + struct bnx2x_vlan_mac_registry_elem *pos; + + list_for_each_entry(pos, &o->head, link) + if (data->vlan.vlan == pos->u.vlan.vlan) + return -EEXIST; + + return 0; +} + +static int bnx2x_check_vlan_mac_add(struct bnx2x_vlan_mac_obj *o, + union bnx2x_classification_ramrod_data *data) +{ + struct bnx2x_vlan_mac_registry_elem *pos; + + list_for_each_entry(pos, &o->head, link) + if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && + (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, + ETH_ALEN))) + return -EEXIST; + + return 0; +} + + +/* check_del() callbacks */ +static struct bnx2x_vlan_mac_registry_elem * + bnx2x_check_mac_del(struct bnx2x_vlan_mac_obj *o, + union bnx2x_classification_ramrod_data *data) +{ + struct bnx2x_vlan_mac_registry_elem *pos; + + list_for_each_entry(pos, &o->head, link) + if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) + return pos; + + return NULL; +} + +static struct bnx2x_vlan_mac_registry_elem * + bnx2x_check_vlan_del(struct bnx2x_vlan_mac_obj *o, + union bnx2x_classification_ramrod_data *data) +{ + struct bnx2x_vlan_mac_registry_elem *pos; + + list_for_each_entry(pos, &o->head, link) + if (data->vlan.vlan == pos->u.vlan.vlan) + return pos; + + return NULL; +} + +static struct bnx2x_vlan_mac_registry_elem * + bnx2x_check_vlan_mac_del(struct bnx2x_vlan_mac_obj *o, + union bnx2x_classification_ramrod_data *data) +{ + struct bnx2x_vlan_mac_registry_elem *pos; + + list_for_each_entry(pos, &o->head, link) + if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && + (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, + ETH_ALEN))) + return pos; + + return NULL; +} + +/* check_move() callback */ +static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o, + struct bnx2x_vlan_mac_obj *dst_o, + union bnx2x_classification_ramrod_data *data) +{ + struct bnx2x_vlan_mac_registry_elem *pos; + int rc; + + /* Check if we can delete the requested configuration from the first + * object. + */ + pos = src_o->check_del(src_o, data); + + /* check if configuration can be added */ + rc = dst_o->check_add(dst_o, data); + + /* If this classification can not be added (is already set) + * or can't be deleted - return an error. + */ + if (rc || !pos) + return false; + + return true; +} + +static bool bnx2x_check_move_always_err( + struct bnx2x_vlan_mac_obj *src_o, + struct bnx2x_vlan_mac_obj *dst_o, + union bnx2x_classification_ramrod_data *data) +{ + return false; +} + + +static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o) +{ + struct bnx2x_raw_obj *raw = &o->raw; + u8 rx_tx_flag = 0; + + if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) || + (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) + rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD; + + if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) || + (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) + rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD; + + return rx_tx_flag; +} + +/* LLH CAM line allocations */ +enum { + LLH_CAM_ISCSI_ETH_LINE = 0, + LLH_CAM_ETH_LINE, + LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2 +}; + +static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp, + bool add, unsigned char *dev_addr, int index) +{ + u32 wb_data[2]; + u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM : + NIG_REG_LLH0_FUNC_MEM; + + if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE) + return; + + DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n", + (add ? "ADD" : "DELETE"), index); + + if (add) { + /* LLH_FUNC_MEM is a u64 WB register */ + reg_offset += 8*index; + + wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) | + (dev_addr[4] << 8) | dev_addr[5]); + wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]); + + REG_WR_DMAE(bp, reg_offset, wb_data, 2); + } + + REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE : + NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add); +} + +/** + * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod + * + * @bp: device handle + * @o: queue for which we want to configure this rule + * @add: if true the command is an ADD command, DEL otherwise + * @opcode: CLASSIFY_RULE_OPCODE_XXX + * @hdr: pointer to a header to setup + * + */ +static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, bool add, int opcode, + struct eth_classify_cmd_header *hdr) +{ + struct bnx2x_raw_obj *raw = &o->raw; + + hdr->client_id = raw->cl_id; + hdr->func_id = raw->func_id; + + /* Rx or/and Tx (internal switching) configuration ? */ + hdr->cmd_general_data |= + bnx2x_vlan_mac_get_rx_tx_flag(o); + + if (add) + hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD; + + hdr->cmd_general_data |= + (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT); +} + +/** + * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header + * + * @cid: connection id + * @type: BNX2X_FILTER_XXX_PENDING + * @hdr: poiter to header to setup + * @rule_cnt: + * + * currently we always configure one rule and echo field to contain a CID and an + * opcode type. + */ +static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type, + struct eth_classify_header *hdr, int rule_cnt) +{ + hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT); + hdr->rule_cnt = (u8)rule_cnt; +} + + +/* hw_config() callbacks */ +static void bnx2x_set_one_mac_e2(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + struct bnx2x_exeq_elem *elem, int rule_idx, + int cam_offset) +{ + struct bnx2x_raw_obj *raw = &o->raw; + struct eth_classify_rules_ramrod_data *data = + (struct eth_classify_rules_ramrod_data *)(raw->rdata); + int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd; + union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; + bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; + unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags; + u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac; + + /* + * Set LLH CAM entry: currently only iSCSI and ETH macs are + * relevant. In addition, current implementation is tuned for a + * single ETH MAC. + * + * When multiple unicast ETH MACs PF configuration in switch + * independent mode is required (NetQ, multiple netdev MACs, + * etc.), consider better utilisation of 8 per function MAC + * entries in the LLH register. There is also + * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the + * total number of CAM entries to 16. + * + * Currently we won't configure NIG for MACs other than a primary ETH + * MAC and iSCSI L2 MAC. + * + * If this MAC is moving from one Queue to another, no need to change + * NIG configuration. + */ + if (cmd != BNX2X_VLAN_MAC_MOVE) { + if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags)) + bnx2x_set_mac_in_nig(bp, add, mac, + LLH_CAM_ISCSI_ETH_LINE); + else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags)) + bnx2x_set_mac_in_nig(bp, add, mac, LLH_CAM_ETH_LINE); + } + + /* Reset the ramrod data buffer for the first rule */ + if (rule_idx == 0) + memset(data, 0, sizeof(*data)); + + /* Setup a command header */ + bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC, + &rule_entry->mac.header); + + DP(BNX2X_MSG_SP, "About to %s MAC "BNX2X_MAC_FMT" for " + "Queue %d\n", (add ? "add" : "delete"), + BNX2X_MAC_PRN_LIST(mac), raw->cl_id); + + /* Set a MAC itself */ + bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb, + &rule_entry->mac.mac_mid, + &rule_entry->mac.mac_lsb, mac); + + /* MOVE: Add a rule that will add this MAC to the target Queue */ + if (cmd == BNX2X_VLAN_MAC_MOVE) { + rule_entry++; + rule_cnt++; + + /* Setup ramrod data */ + bnx2x_vlan_mac_set_cmd_hdr_e2(bp, + elem->cmd_data.vlan_mac.target_obj, + true, CLASSIFY_RULE_OPCODE_MAC, + &rule_entry->mac.header); + + /* Set a MAC itself */ + bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb, + &rule_entry->mac.mac_mid, + &rule_entry->mac.mac_lsb, mac); + } + + /* Set the ramrod data header */ + /* TODO: take this to the higher level in order to prevent multiple + writing */ + bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, + rule_cnt); +} + +/** + * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod + * + * @bp: device handle + * @o: queue + * @type: + * @cam_offset: offset in cam memory + * @hdr: pointer to a header to setup + * + * E1/E1H + */ +static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, + struct mac_configuration_hdr *hdr) +{ + struct bnx2x_raw_obj *r = &o->raw; + + hdr->length = 1; + hdr->offset = (u8)cam_offset; + hdr->client_id = 0xff; + hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT)); +} + +static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac, + u16 vlan_id, struct mac_configuration_entry *cfg_entry) +{ + struct bnx2x_raw_obj *r = &o->raw; + u32 cl_bit_vec = (1 << r->cl_id); + + cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec); + cfg_entry->pf_id = r->func_id; + cfg_entry->vlan_id = cpu_to_le16(vlan_id); + + if (add) { + SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE, + T_ETH_MAC_COMMAND_SET); + SET_FLAG(cfg_entry->flags, + MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode); + + /* Set a MAC in a ramrod data */ + bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr, + &cfg_entry->middle_mac_addr, + &cfg_entry->lsb_mac_addr, mac); + } else + SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE, + T_ETH_MAC_COMMAND_INVALIDATE); +} + +static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add, + u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config) +{ + struct mac_configuration_entry *cfg_entry = &config->config_table[0]; + struct bnx2x_raw_obj *raw = &o->raw; + + bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset, + &config->hdr); + bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id, + cfg_entry); + + DP(BNX2X_MSG_SP, "%s MAC "BNX2X_MAC_FMT" CLID %d CAM offset %d\n", + (add ? "setting" : "clearing"), + BNX2X_MAC_PRN_LIST(mac), raw->cl_id, cam_offset); +} + +/** + * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data + * + * @bp: device handle + * @o: bnx2x_vlan_mac_obj + * @elem: bnx2x_exeq_elem + * @rule_idx: rule_idx + * @cam_offset: cam_offset + */ +static void bnx2x_set_one_mac_e1x(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + struct bnx2x_exeq_elem *elem, int rule_idx, + int cam_offset) +{ + struct bnx2x_raw_obj *raw = &o->raw; + struct mac_configuration_cmd *config = + (struct mac_configuration_cmd *)(raw->rdata); + /* + * 57710 and 57711 do not support MOVE command, + * so it's either ADD or DEL + */ + bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? + true : false; + + /* Reset the ramrod data buffer */ + memset(config, 0, sizeof(*config)); + + bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_MAC_PENDING, + cam_offset, add, + elem->cmd_data.vlan_mac.u.mac.mac, 0, + ETH_VLAN_FILTER_ANY_VLAN, config); +} + +static void bnx2x_set_one_vlan_e2(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + struct bnx2x_exeq_elem *elem, int rule_idx, + int cam_offset) +{ + struct bnx2x_raw_obj *raw = &o->raw; + struct eth_classify_rules_ramrod_data *data = + (struct eth_classify_rules_ramrod_data *)(raw->rdata); + int rule_cnt = rule_idx + 1; + union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; + int cmd = elem->cmd_data.vlan_mac.cmd; + bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; + u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan; + + /* Reset the ramrod data buffer for the first rule */ + if (rule_idx == 0) + memset(data, 0, sizeof(*data)); + + /* Set a rule header */ + bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN, + &rule_entry->vlan.header); + + DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"), + vlan); + + /* Set a VLAN itself */ + rule_entry->vlan.vlan = cpu_to_le16(vlan); + + /* MOVE: Add a rule that will add this MAC to the target Queue */ + if (cmd == BNX2X_VLAN_MAC_MOVE) { + rule_entry++; + rule_cnt++; + + /* Setup ramrod data */ + bnx2x_vlan_mac_set_cmd_hdr_e2(bp, + elem->cmd_data.vlan_mac.target_obj, + true, CLASSIFY_RULE_OPCODE_VLAN, + &rule_entry->vlan.header); + + /* Set a VLAN itself */ + rule_entry->vlan.vlan = cpu_to_le16(vlan); + } + + /* Set the ramrod data header */ + /* TODO: take this to the higher level in order to prevent multiple + writing */ + bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, + rule_cnt); +} + +static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + struct bnx2x_exeq_elem *elem, + int rule_idx, int cam_offset) +{ + struct bnx2x_raw_obj *raw = &o->raw; + struct eth_classify_rules_ramrod_data *data = + (struct eth_classify_rules_ramrod_data *)(raw->rdata); + int rule_cnt = rule_idx + 1; + union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; + int cmd = elem->cmd_data.vlan_mac.cmd; + bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; + u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan; + u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac; + + + /* Reset the ramrod data buffer for the first rule */ + if (rule_idx == 0) + memset(data, 0, sizeof(*data)); + + /* Set a rule header */ + bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR, + &rule_entry->pair.header); + + /* Set VLAN and MAC themselvs */ + rule_entry->pair.vlan = cpu_to_le16(vlan); + bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, + &rule_entry->pair.mac_mid, + &rule_entry->pair.mac_lsb, mac); + + /* MOVE: Add a rule that will add this MAC to the target Queue */ + if (cmd == BNX2X_VLAN_MAC_MOVE) { + rule_entry++; + rule_cnt++; + + /* Setup ramrod data */ + bnx2x_vlan_mac_set_cmd_hdr_e2(bp, + elem->cmd_data.vlan_mac.target_obj, + true, CLASSIFY_RULE_OPCODE_PAIR, + &rule_entry->pair.header); + + /* Set a VLAN itself */ + rule_entry->pair.vlan = cpu_to_le16(vlan); + bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, + &rule_entry->pair.mac_mid, + &rule_entry->pair.mac_lsb, mac); + } + + /* Set the ramrod data header */ + /* TODO: take this to the higher level in order to prevent multiple + writing */ + bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, + rule_cnt); +} + +/** + * bnx2x_set_one_vlan_mac_e1h - + * + * @bp: device handle + * @o: bnx2x_vlan_mac_obj + * @elem: bnx2x_exeq_elem + * @rule_idx: rule_idx + * @cam_offset: cam_offset + */ +static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + struct bnx2x_exeq_elem *elem, + int rule_idx, int cam_offset) +{ + struct bnx2x_raw_obj *raw = &o->raw; + struct mac_configuration_cmd *config = + (struct mac_configuration_cmd *)(raw->rdata); + /* + * 57710 and 57711 do not support MOVE command, + * so it's either ADD or DEL + */ + bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? + true : false; + + /* Reset the ramrod data buffer */ + memset(config, 0, sizeof(*config)); + + bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING, + cam_offset, add, + elem->cmd_data.vlan_mac.u.vlan_mac.mac, + elem->cmd_data.vlan_mac.u.vlan_mac.vlan, + ETH_VLAN_FILTER_CLASSIFY, config); +} + +#define list_next_entry(pos, member) \ + list_entry((pos)->member.next, typeof(*(pos)), member) + +/** + * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element + * + * @bp: device handle + * @p: command parameters + * @ppos: pointer to the cooky + * + * reconfigure next MAC/VLAN/VLAN-MAC element from the + * previously configured elements list. + * + * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken + * into an account + * + * pointer to the cooky - that should be given back in the next call to make + * function handle the next element. If *ppos is set to NULL it will restart the + * iterator. If returned *ppos == NULL this means that the last element has been + * handled. + * + */ +static int bnx2x_vlan_mac_restore(struct bnx2x *bp, + struct bnx2x_vlan_mac_ramrod_params *p, + struct bnx2x_vlan_mac_registry_elem **ppos) +{ + struct bnx2x_vlan_mac_registry_elem *pos; + struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; + + /* If list is empty - there is nothing to do here */ + if (list_empty(&o->head)) { + *ppos = NULL; + return 0; + } + + /* make a step... */ + if (*ppos == NULL) + *ppos = list_first_entry(&o->head, + struct bnx2x_vlan_mac_registry_elem, + link); + else + *ppos = list_next_entry(*ppos, link); + + pos = *ppos; + + /* If it's the last step - return NULL */ + if (list_is_last(&pos->link, &o->head)) + *ppos = NULL; + + /* Prepare a 'user_req' */ + memcpy(&p->user_req.u, &pos->u, sizeof(pos->u)); + + /* Set the command */ + p->user_req.cmd = BNX2X_VLAN_MAC_ADD; + + /* Set vlan_mac_flags */ + p->user_req.vlan_mac_flags = pos->vlan_mac_flags; + + /* Set a restore bit */ + __set_bit(RAMROD_RESTORE, &p->ramrod_flags); + + return bnx2x_config_vlan_mac(bp, p); +} + +/* + * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a + * pointer to an element with a specific criteria and NULL if such an element + * hasn't been found. + */ +static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac( + struct bnx2x_exe_queue_obj *o, + struct bnx2x_exeq_elem *elem) +{ + struct bnx2x_exeq_elem *pos; + struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac; + + /* Check pending for execution commands */ + list_for_each_entry(pos, &o->exe_queue, link) + if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data, + sizeof(*data)) && + (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) + return pos; + + return NULL; +} + +static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan( + struct bnx2x_exe_queue_obj *o, + struct bnx2x_exeq_elem *elem) +{ + struct bnx2x_exeq_elem *pos; + struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan; + + /* Check pending for execution commands */ + list_for_each_entry(pos, &o->exe_queue, link) + if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data, + sizeof(*data)) && + (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) + return pos; + + return NULL; +} + +static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac( + struct bnx2x_exe_queue_obj *o, + struct bnx2x_exeq_elem *elem) +{ + struct bnx2x_exeq_elem *pos; + struct bnx2x_vlan_mac_ramrod_data *data = + &elem->cmd_data.vlan_mac.u.vlan_mac; + + /* Check pending for execution commands */ + list_for_each_entry(pos, &o->exe_queue, link) + if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data, + sizeof(*data)) && + (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) + return pos; + + return NULL; +} + +/** + * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed + * + * @bp: device handle + * @qo: bnx2x_qable_obj + * @elem: bnx2x_exeq_elem + * + * Checks that the requested configuration can be added. If yes and if + * requested, consume CAM credit. + * + * The 'validate' is run after the 'optimize'. + * + */ +static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp, + union bnx2x_qable_obj *qo, + struct bnx2x_exeq_elem *elem) +{ + struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; + struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; + int rc; + + /* Check the registry */ + rc = o->check_add(o, &elem->cmd_data.vlan_mac.u); + if (rc) { + DP(BNX2X_MSG_SP, "ADD command is not allowed considering " + "current registry state\n"); + return rc; + } + + /* + * Check if there is a pending ADD command for this + * MAC/VLAN/VLAN-MAC. Return an error if there is. + */ + if (exeq->get(exeq, elem)) { + DP(BNX2X_MSG_SP, "There is a pending ADD command already\n"); + return -EEXIST; + } + + /* + * TODO: Check the pending MOVE from other objects where this + * object is a destination object. + */ + + /* Consume the credit if not requested not to */ + if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, + &elem->cmd_data.vlan_mac.vlan_mac_flags) || + o->get_credit(o))) + return -EINVAL; + + return 0; +} + +/** + * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed + * + * @bp: device handle + * @qo: quable object to check + * @elem: element that needs to be deleted + * + * Checks that the requested configuration can be deleted. If yes and if + * requested, returns a CAM credit. + * + * The 'validate' is run after the 'optimize'. + */ +static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp, + union bnx2x_qable_obj *qo, + struct bnx2x_exeq_elem *elem) +{ + struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; + struct bnx2x_vlan_mac_registry_elem *pos; + struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; + struct bnx2x_exeq_elem query_elem; + + /* If this classification can not be deleted (doesn't exist) + * - return a BNX2X_EXIST. + */ + pos = o->check_del(o, &elem->cmd_data.vlan_mac.u); + if (!pos) { + DP(BNX2X_MSG_SP, "DEL command is not allowed considering " + "current registry state\n"); + return -EEXIST; + } + + /* + * Check if there are pending DEL or MOVE commands for this + * MAC/VLAN/VLAN-MAC. Return an error if so. + */ + memcpy(&query_elem, elem, sizeof(query_elem)); + + /* Check for MOVE commands */ + query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE; + if (exeq->get(exeq, &query_elem)) { + BNX2X_ERR("There is a pending MOVE command already\n"); + return -EINVAL; + } + + /* Check for DEL commands */ + if (exeq->get(exeq, elem)) { + DP(BNX2X_MSG_SP, "There is a pending DEL command already\n"); + return -EEXIST; + } + + /* Return the credit to the credit pool if not requested not to */ + if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, + &elem->cmd_data.vlan_mac.vlan_mac_flags) || + o->put_credit(o))) { + BNX2X_ERR("Failed to return a credit\n"); + return -EINVAL; + } + + return 0; +} + +/** + * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed + * + * @bp: device handle + * @qo: quable object to check (source) + * @elem: element that needs to be moved + * + * Checks that the requested configuration can be moved. If yes and if + * requested, returns a CAM credit. + * + * The 'validate' is run after the 'optimize'. + */ +static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp, + union bnx2x_qable_obj *qo, + struct bnx2x_exeq_elem *elem) +{ + struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac; + struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj; + struct bnx2x_exeq_elem query_elem; + struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue; + struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue; + + /* + * Check if we can perform this operation based on the current registry + * state. + */ + if (!src_o->check_move(src_o, dest_o, &elem->cmd_data.vlan_mac.u)) { + DP(BNX2X_MSG_SP, "MOVE command is not allowed considering " + "current registry state\n"); + return -EINVAL; + } + + /* + * Check if there is an already pending DEL or MOVE command for the + * source object or ADD command for a destination object. Return an + * error if so. + */ + memcpy(&query_elem, elem, sizeof(query_elem)); + + /* Check DEL on source */ + query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL; + if (src_exeq->get(src_exeq, &query_elem)) { + BNX2X_ERR("There is a pending DEL command on the source " + "queue already\n"); + return -EINVAL; + } + + /* Check MOVE on source */ + if (src_exeq->get(src_exeq, elem)) { + DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n"); + return -EEXIST; + } + + /* Check ADD on destination */ + query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD; + if (dest_exeq->get(dest_exeq, &query_elem)) { + BNX2X_ERR("There is a pending ADD command on the " + "destination queue already\n"); + return -EINVAL; + } + + /* Consume the credit if not requested not to */ + if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, + &elem->cmd_data.vlan_mac.vlan_mac_flags) || + dest_o->get_credit(dest_o))) + return -EINVAL; + + if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, + &elem->cmd_data.vlan_mac.vlan_mac_flags) || + src_o->put_credit(src_o))) { + /* return the credit taken from dest... */ + dest_o->put_credit(dest_o); + return -EINVAL; + } + + return 0; +} + +static int bnx2x_validate_vlan_mac(struct bnx2x *bp, + union bnx2x_qable_obj *qo, + struct bnx2x_exeq_elem *elem) +{ + switch (elem->cmd_data.vlan_mac.cmd) { + case BNX2X_VLAN_MAC_ADD: + return bnx2x_validate_vlan_mac_add(bp, qo, elem); + case BNX2X_VLAN_MAC_DEL: + return bnx2x_validate_vlan_mac_del(bp, qo, elem); + case BNX2X_VLAN_MAC_MOVE: + return bnx2x_validate_vlan_mac_move(bp, qo, elem); + default: + return -EINVAL; + } +} + +/** + * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes. + * + * @bp: device handle + * @o: bnx2x_vlan_mac_obj + * + */ +static int bnx2x_wait_vlan_mac(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + int cnt = 5000, rc; + struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; + struct bnx2x_raw_obj *raw = &o->raw; + + while (cnt--) { + /* Wait for the current command to complete */ + rc = raw->wait_comp(bp, raw); + if (rc) + return rc; + + /* Wait until there are no pending commands */ + if (!bnx2x_exe_queue_empty(exeq)) + usleep_range(1000, 1000); + else + return 0; + } + + return -EBUSY; +} + +/** + * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod + * + * @bp: device handle + * @o: bnx2x_vlan_mac_obj + * @cqe: + * @cont: if true schedule next execution chunk + * + */ +static int bnx2x_complete_vlan_mac(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + union event_ring_elem *cqe, + unsigned long *ramrod_flags) +{ + struct bnx2x_raw_obj *r = &o->raw; + int rc; + + /* Reset pending list */ + bnx2x_exe_queue_reset_pending(bp, &o->exe_queue); + + /* Clear pending */ + r->clear_pending(r); + + /* If ramrod failed this is most likely a SW bug */ + if (cqe->message.error) + return -EINVAL; + + /* Run the next bulk of pending commands if requeted */ + if (test_bit(RAMROD_CONT, ramrod_flags)) { + rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); + if (rc < 0) + return rc; + } + + /* If there is more work to do return PENDING */ + if (!bnx2x_exe_queue_empty(&o->exe_queue)) + return 1; + + return 0; +} + +/** + * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands. + * + * @bp: device handle + * @o: bnx2x_qable_obj + * @elem: bnx2x_exeq_elem + */ +static int bnx2x_optimize_vlan_mac(struct bnx2x *bp, + union bnx2x_qable_obj *qo, + struct bnx2x_exeq_elem *elem) +{ + struct bnx2x_exeq_elem query, *pos; + struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; + struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; + + memcpy(&query, elem, sizeof(query)); + + switch (elem->cmd_data.vlan_mac.cmd) { + case BNX2X_VLAN_MAC_ADD: + query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL; + break; + case BNX2X_VLAN_MAC_DEL: + query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD; + break; + default: + /* Don't handle anything other than ADD or DEL */ + return 0; + } + + /* If we found the appropriate element - delete it */ + pos = exeq->get(exeq, &query); + if (pos) { + + /* Return the credit of the optimized command */ + if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, + &pos->cmd_data.vlan_mac.vlan_mac_flags)) { + if ((query.cmd_data.vlan_mac.cmd == + BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) { + BNX2X_ERR("Failed to return the credit for the " + "optimized ADD command\n"); + return -EINVAL; + } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */ + BNX2X_ERR("Failed to recover the credit from " + "the optimized DEL command\n"); + return -EINVAL; + } + } + + DP(BNX2X_MSG_SP, "Optimizing %s command\n", + (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? + "ADD" : "DEL"); + + list_del(&pos->link); + bnx2x_exe_queue_free_elem(bp, pos); + return 1; + } + + return 0; +} + +/** + * bnx2x_vlan_mac_get_registry_elem - prepare a registry element + * + * @bp: device handle + * @o: + * @elem: + * @restore: + * @re: + * + * prepare a registry element according to the current command request. + */ +static inline int bnx2x_vlan_mac_get_registry_elem( + struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + struct bnx2x_exeq_elem *elem, + bool restore, + struct bnx2x_vlan_mac_registry_elem **re) +{ + int cmd = elem->cmd_data.vlan_mac.cmd; + struct bnx2x_vlan_mac_registry_elem *reg_elem; + + /* Allocate a new registry element if needed. */ + if (!restore && + ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) { + reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC); + if (!reg_elem) + return -ENOMEM; + + /* Get a new CAM offset */ + if (!o->get_cam_offset(o, ®_elem->cam_offset)) { + /* + * This shell never happen, because we have checked the + * CAM availiability in the 'validate'. + */ + WARN_ON(1); + kfree(reg_elem); + return -EINVAL; + } + + DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset); + + /* Set a VLAN-MAC data */ + memcpy(®_elem->u, &elem->cmd_data.vlan_mac.u, + sizeof(reg_elem->u)); + + /* Copy the flags (needed for DEL and RESTORE flows) */ + reg_elem->vlan_mac_flags = + elem->cmd_data.vlan_mac.vlan_mac_flags; + } else /* DEL, RESTORE */ + reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u); + + *re = reg_elem; + return 0; +} + +/** + * bnx2x_execute_vlan_mac - execute vlan mac command + * + * @bp: device handle + * @qo: + * @exe_chunk: + * @ramrod_flags: + * + * go and send a ramrod! + */ +static int bnx2x_execute_vlan_mac(struct bnx2x *bp, + union bnx2x_qable_obj *qo, + struct list_head *exe_chunk, + unsigned long *ramrod_flags) +{ + struct bnx2x_exeq_elem *elem; + struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj; + struct bnx2x_raw_obj *r = &o->raw; + int rc, idx = 0; + bool restore = test_bit(RAMROD_RESTORE, ramrod_flags); + bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags); + struct bnx2x_vlan_mac_registry_elem *reg_elem; + int cmd; + + /* + * If DRIVER_ONLY execution is requested, cleanup a registry + * and exit. Otherwise send a ramrod to FW. + */ + if (!drv_only) { + WARN_ON(r->check_pending(r)); + + /* Set pending */ + r->set_pending(r); + + /* Fill tha ramrod data */ + list_for_each_entry(elem, exe_chunk, link) { + cmd = elem->cmd_data.vlan_mac.cmd; + /* + * We will add to the target object in MOVE command, so + * change the object for a CAM search. + */ + if (cmd == BNX2X_VLAN_MAC_MOVE) + cam_obj = elem->cmd_data.vlan_mac.target_obj; + else + cam_obj = o; + + rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj, + elem, restore, + ®_elem); + if (rc) + goto error_exit; + + WARN_ON(!reg_elem); + + /* Push a new entry into the registry */ + if (!restore && + ((cmd == BNX2X_VLAN_MAC_ADD) || + (cmd == BNX2X_VLAN_MAC_MOVE))) + list_add(®_elem->link, &cam_obj->head); + + /* Configure a single command in a ramrod data buffer */ + o->set_one_rule(bp, o, elem, idx, + reg_elem->cam_offset); + + /* MOVE command consumes 2 entries in the ramrod data */ + if (cmd == BNX2X_VLAN_MAC_MOVE) + idx += 2; + else + idx++; + } + + /* + * No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). + */ + + rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid, + U64_HI(r->rdata_mapping), + U64_LO(r->rdata_mapping), + ETH_CONNECTION_TYPE); + if (rc) + goto error_exit; + } + + /* Now, when we are done with the ramrod - clean up the registry */ + list_for_each_entry(elem, exe_chunk, link) { + cmd = elem->cmd_data.vlan_mac.cmd; + if ((cmd == BNX2X_VLAN_MAC_DEL) || + (cmd == BNX2X_VLAN_MAC_MOVE)) { + reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u); + + WARN_ON(!reg_elem); + + o->put_cam_offset(o, reg_elem->cam_offset); + list_del(®_elem->link); + kfree(reg_elem); + } + } + + if (!drv_only) + return 1; + else + return 0; + +error_exit: + r->clear_pending(r); + + /* Cleanup a registry in case of a failure */ + list_for_each_entry(elem, exe_chunk, link) { + cmd = elem->cmd_data.vlan_mac.cmd; + + if (cmd == BNX2X_VLAN_MAC_MOVE) + cam_obj = elem->cmd_data.vlan_mac.target_obj; + else + cam_obj = o; + + /* Delete all newly added above entries */ + if (!restore && + ((cmd == BNX2X_VLAN_MAC_ADD) || + (cmd == BNX2X_VLAN_MAC_MOVE))) { + reg_elem = o->check_del(cam_obj, + &elem->cmd_data.vlan_mac.u); + if (reg_elem) { + list_del(®_elem->link); + kfree(reg_elem); + } + } + } + + return rc; +} + +static inline int bnx2x_vlan_mac_push_new_cmd( + struct bnx2x *bp, + struct bnx2x_vlan_mac_ramrod_params *p) +{ + struct bnx2x_exeq_elem *elem; + struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; + bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags); + + /* Allocate the execution queue element */ + elem = bnx2x_exe_queue_alloc_elem(bp); + if (!elem) + return -ENOMEM; + + /* Set the command 'length' */ + switch (p->user_req.cmd) { + case BNX2X_VLAN_MAC_MOVE: + elem->cmd_len = 2; + break; + default: + elem->cmd_len = 1; + } + + /* Fill the object specific info */ + memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req)); + + /* Try to add a new command to the pending list */ + return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore); +} + +/** + * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules. + * + * @bp: device handle + * @p: + * + */ +int bnx2x_config_vlan_mac( + struct bnx2x *bp, + struct bnx2x_vlan_mac_ramrod_params *p) +{ + int rc = 0; + struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; + unsigned long *ramrod_flags = &p->ramrod_flags; + bool cont = test_bit(RAMROD_CONT, ramrod_flags); + struct bnx2x_raw_obj *raw = &o->raw; + + /* + * Add new elements to the execution list for commands that require it. + */ + if (!cont) { + rc = bnx2x_vlan_mac_push_new_cmd(bp, p); + if (rc) + return rc; + } + + /* + * If nothing will be executed further in this iteration we want to + * return PENDING if there are pending commands + */ + if (!bnx2x_exe_queue_empty(&o->exe_queue)) + rc = 1; + + if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { + DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: " + "clearing a pending bit.\n"); + raw->clear_pending(raw); + } + + /* Execute commands if required */ + if (cont || test_bit(RAMROD_EXEC, ramrod_flags) || + test_bit(RAMROD_COMP_WAIT, ramrod_flags)) { + rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); + if (rc < 0) + return rc; + } + + /* + * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set + * then user want to wait until the last command is done. + */ + if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) { + /* + * Wait maximum for the current exe_queue length iterations plus + * one (for the current pending command). + */ + int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1; + + while (!bnx2x_exe_queue_empty(&o->exe_queue) && + max_iterations--) { + + /* Wait for the current command to complete */ + rc = raw->wait_comp(bp, raw); + if (rc) + return rc; + + /* Make a next step */ + rc = bnx2x_exe_queue_step(bp, &o->exe_queue, + ramrod_flags); + if (rc < 0) + return rc; + } + + return 0; + } + + return rc; +} + + + +/** + * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec + * + * @bp: device handle + * @o: + * @vlan_mac_flags: + * @ramrod_flags: execution flags to be used for this deletion + * + * if the last operation has completed successfully and there are no + * moreelements left, positive value if the last operation has completed + * successfully and there are more previously configured elements, negative + * value is current operation has failed. + */ +static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + unsigned long *vlan_mac_flags, + unsigned long *ramrod_flags) +{ + struct bnx2x_vlan_mac_registry_elem *pos = NULL; + int rc = 0; + struct bnx2x_vlan_mac_ramrod_params p; + struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; + struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; + + /* Clear pending commands first */ + + spin_lock_bh(&exeq->lock); + + list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) { + if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags == + *vlan_mac_flags) + list_del(&exeq_pos->link); + } + + spin_unlock_bh(&exeq->lock); + + /* Prepare a command request */ + memset(&p, 0, sizeof(p)); + p.vlan_mac_obj = o; + p.ramrod_flags = *ramrod_flags; + p.user_req.cmd = BNX2X_VLAN_MAC_DEL; + + /* + * Add all but the last VLAN-MAC to the execution queue without actually + * execution anything. + */ + __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags); + __clear_bit(RAMROD_EXEC, &p.ramrod_flags); + __clear_bit(RAMROD_CONT, &p.ramrod_flags); + + list_for_each_entry(pos, &o->head, link) { + if (pos->vlan_mac_flags == *vlan_mac_flags) { + p.user_req.vlan_mac_flags = pos->vlan_mac_flags; + memcpy(&p.user_req.u, &pos->u, sizeof(pos->u)); + rc = bnx2x_config_vlan_mac(bp, &p); + if (rc < 0) { + BNX2X_ERR("Failed to add a new DEL command\n"); + return rc; + } + } + } + + p.ramrod_flags = *ramrod_flags; + __set_bit(RAMROD_CONT, &p.ramrod_flags); + + return bnx2x_config_vlan_mac(bp, &p); +} + +static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id, + u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state, + unsigned long *pstate, bnx2x_obj_type type) +{ + raw->func_id = func_id; + raw->cid = cid; + raw->cl_id = cl_id; + raw->rdata = rdata; + raw->rdata_mapping = rdata_mapping; + raw->state = state; + raw->pstate = pstate; + raw->obj_type = type; + raw->check_pending = bnx2x_raw_check_pending; + raw->clear_pending = bnx2x_raw_clear_pending; + raw->set_pending = bnx2x_raw_set_pending; + raw->wait_comp = bnx2x_raw_wait; +} + +static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o, + u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, + int state, unsigned long *pstate, bnx2x_obj_type type, + struct bnx2x_credit_pool_obj *macs_pool, + struct bnx2x_credit_pool_obj *vlans_pool) +{ + INIT_LIST_HEAD(&o->head); + + o->macs_pool = macs_pool; + o->vlans_pool = vlans_pool; + + o->delete_all = bnx2x_vlan_mac_del_all; + o->restore = bnx2x_vlan_mac_restore; + o->complete = bnx2x_complete_vlan_mac; + o->wait = bnx2x_wait_vlan_mac; + + bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping, + state, pstate, type); +} + + +void bnx2x_init_mac_obj(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *mac_obj, + u8 cl_id, u32 cid, u8 func_id, void *rdata, + dma_addr_t rdata_mapping, int state, + unsigned long *pstate, bnx2x_obj_type type, + struct bnx2x_credit_pool_obj *macs_pool) +{ + union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj; + + bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata, + rdata_mapping, state, pstate, type, + macs_pool, NULL); + + /* CAM credit pool handling */ + mac_obj->get_credit = bnx2x_get_credit_mac; + mac_obj->put_credit = bnx2x_put_credit_mac; + mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac; + mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac; + + if (CHIP_IS_E1x(bp)) { + mac_obj->set_one_rule = bnx2x_set_one_mac_e1x; + mac_obj->check_del = bnx2x_check_mac_del; + mac_obj->check_add = bnx2x_check_mac_add; + mac_obj->check_move = bnx2x_check_move_always_err; + mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; + + /* Exe Queue */ + bnx2x_exe_queue_init(bp, + &mac_obj->exe_queue, 1, qable_obj, + bnx2x_validate_vlan_mac, + bnx2x_optimize_vlan_mac, + bnx2x_execute_vlan_mac, + bnx2x_exeq_get_mac); + } else { + mac_obj->set_one_rule = bnx2x_set_one_mac_e2; + mac_obj->check_del = bnx2x_check_mac_del; + mac_obj->check_add = bnx2x_check_mac_add; + mac_obj->check_move = bnx2x_check_move; + mac_obj->ramrod_cmd = + RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; + + /* Exe Queue */ + bnx2x_exe_queue_init(bp, + &mac_obj->exe_queue, CLASSIFY_RULES_COUNT, + qable_obj, bnx2x_validate_vlan_mac, + bnx2x_optimize_vlan_mac, + bnx2x_execute_vlan_mac, + bnx2x_exeq_get_mac); + } +} + +void bnx2x_init_vlan_obj(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *vlan_obj, + u8 cl_id, u32 cid, u8 func_id, void *rdata, + dma_addr_t rdata_mapping, int state, + unsigned long *pstate, bnx2x_obj_type type, + struct bnx2x_credit_pool_obj *vlans_pool) +{ + union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj; + + bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata, + rdata_mapping, state, pstate, type, NULL, + vlans_pool); + + vlan_obj->get_credit = bnx2x_get_credit_vlan; + vlan_obj->put_credit = bnx2x_put_credit_vlan; + vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan; + vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan; + + if (CHIP_IS_E1x(bp)) { + BNX2X_ERR("Do not support chips others than E2 and newer\n"); + BUG(); + } else { + vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2; + vlan_obj->check_del = bnx2x_check_vlan_del; + vlan_obj->check_add = bnx2x_check_vlan_add; + vlan_obj->check_move = bnx2x_check_move; + vlan_obj->ramrod_cmd = + RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; + + /* Exe Queue */ + bnx2x_exe_queue_init(bp, + &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT, + qable_obj, bnx2x_validate_vlan_mac, + bnx2x_optimize_vlan_mac, + bnx2x_execute_vlan_mac, + bnx2x_exeq_get_vlan); + } +} + +void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *vlan_mac_obj, + u8 cl_id, u32 cid, u8 func_id, void *rdata, + dma_addr_t rdata_mapping, int state, + unsigned long *pstate, bnx2x_obj_type type, + struct bnx2x_credit_pool_obj *macs_pool, + struct bnx2x_credit_pool_obj *vlans_pool) +{ + union bnx2x_qable_obj *qable_obj = + (union bnx2x_qable_obj *)vlan_mac_obj; + + bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata, + rdata_mapping, state, pstate, type, + macs_pool, vlans_pool); + + /* CAM pool handling */ + vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac; + vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac; + /* + * CAM offset is relevant for 57710 and 57711 chips only which have a + * single CAM for both MACs and VLAN-MAC pairs. So the offset + * will be taken from MACs' pool object only. + */ + vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac; + vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac; + + if (CHIP_IS_E1(bp)) { + BNX2X_ERR("Do not support chips others than E2\n"); + BUG(); + } else if (CHIP_IS_E1H(bp)) { + vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h; + vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del; + vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add; + vlan_mac_obj->check_move = bnx2x_check_move_always_err; + vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; + + /* Exe Queue */ + bnx2x_exe_queue_init(bp, + &vlan_mac_obj->exe_queue, 1, qable_obj, + bnx2x_validate_vlan_mac, + bnx2x_optimize_vlan_mac, + bnx2x_execute_vlan_mac, + bnx2x_exeq_get_vlan_mac); + } else { + vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2; + vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del; + vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add; + vlan_mac_obj->check_move = bnx2x_check_move; + vlan_mac_obj->ramrod_cmd = + RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; + + /* Exe Queue */ + bnx2x_exe_queue_init(bp, + &vlan_mac_obj->exe_queue, + CLASSIFY_RULES_COUNT, + qable_obj, bnx2x_validate_vlan_mac, + bnx2x_optimize_vlan_mac, + bnx2x_execute_vlan_mac, + bnx2x_exeq_get_vlan_mac); + } + +} + +/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ +static inline void __storm_memset_mac_filters(struct bnx2x *bp, + struct tstorm_eth_mac_filter_config *mac_filters, + u16 pf_id) +{ + size_t size = sizeof(struct tstorm_eth_mac_filter_config); + + u32 addr = BAR_TSTRORM_INTMEM + + TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id); + + __storm_memset_struct(bp, addr, size, (u32 *)mac_filters); +} + +static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp, + struct bnx2x_rx_mode_ramrod_params *p) +{ + /* update the bp MAC filter structure */ + u32 mask = (1 << p->cl_id); + + struct tstorm_eth_mac_filter_config *mac_filters = + (struct tstorm_eth_mac_filter_config *)p->rdata; + + /* initial seeting is drop-all */ + u8 drop_all_ucast = 1, drop_all_mcast = 1; + u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; + u8 unmatched_unicast = 0; + + /* In e1x there we only take into account rx acceot flag since tx switching + * isn't enabled. */ + if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags)) + /* accept matched ucast */ + drop_all_ucast = 0; + + if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags)) + /* accept matched mcast */ + drop_all_mcast = 0; + + if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) { + /* accept all mcast */ + drop_all_ucast = 0; + accp_all_ucast = 1; + } + if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) { + /* accept all mcast */ + drop_all_mcast = 0; + accp_all_mcast = 1; + } + if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags)) + /* accept (all) bcast */ + accp_all_bcast = 1; + if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags)) + /* accept unmatched unicasts */ + unmatched_unicast = 1; + + mac_filters->ucast_drop_all = drop_all_ucast ? + mac_filters->ucast_drop_all | mask : + mac_filters->ucast_drop_all & ~mask; + + mac_filters->mcast_drop_all = drop_all_mcast ? + mac_filters->mcast_drop_all | mask : + mac_filters->mcast_drop_all & ~mask; + + mac_filters->ucast_accept_all = accp_all_ucast ? + mac_filters->ucast_accept_all | mask : + mac_filters->ucast_accept_all & ~mask; + + mac_filters->mcast_accept_all = accp_all_mcast ? + mac_filters->mcast_accept_all | mask : + mac_filters->mcast_accept_all & ~mask; + + mac_filters->bcast_accept_all = accp_all_bcast ? + mac_filters->bcast_accept_all | mask : + mac_filters->bcast_accept_all & ~mask; + + mac_filters->unmatched_unicast = unmatched_unicast ? + mac_filters->unmatched_unicast | mask : + mac_filters->unmatched_unicast & ~mask; + + DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n" + "accp_mcast 0x%x\naccp_bcast 0x%x\n", + mac_filters->ucast_drop_all, + mac_filters->mcast_drop_all, + mac_filters->ucast_accept_all, + mac_filters->mcast_accept_all, + mac_filters->bcast_accept_all); + + /* write the MAC filter structure*/ + __storm_memset_mac_filters(bp, mac_filters, p->func_id); + + /* The operation is completed */ + clear_bit(p->state, p->pstate); + smp_mb__after_clear_bit(); + + return 0; +} + +/* Setup ramrod data */ +static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid, + struct eth_classify_header *hdr, + u8 rule_cnt) +{ + hdr->echo = cid; + hdr->rule_cnt = rule_cnt; +} + +static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp, + unsigned long accept_flags, + struct eth_filter_rules_cmd *cmd, + bool clear_accept_all) +{ + u16 state; + + /* start with 'drop-all' */ + state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL | + ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; + + if (accept_flags) { + if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags)) + state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; + + if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags)) + state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; + + if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) { + state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; + state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; + } + + if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) { + state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; + state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; + } + if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags)) + state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; + + if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) { + state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; + state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; + } + if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags)) + state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN; + } + + /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */ + if (clear_accept_all) { + state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; + state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; + state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; + state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; + } + + cmd->state = cpu_to_le16(state); + +} + +static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, + struct bnx2x_rx_mode_ramrod_params *p) +{ + struct eth_filter_rules_ramrod_data *data = p->rdata; + int rc; + u8 rule_idx = 0; + + /* Reset the ramrod data buffer */ + memset(data, 0, sizeof(*data)); + + /* Setup ramrod data */ + + /* Tx (internal switching) */ + if (test_bit(RAMROD_TX, &p->ramrod_flags)) { + data->rules[rule_idx].client_id = p->cl_id; + data->rules[rule_idx].func_id = p->func_id; + + data->rules[rule_idx].cmd_general_data = + ETH_FILTER_RULES_CMD_TX_CMD; + + bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags, + &(data->rules[rule_idx++]), false); + } + + /* Rx */ + if (test_bit(RAMROD_RX, &p->ramrod_flags)) { + data->rules[rule_idx].client_id = p->cl_id; + data->rules[rule_idx].func_id = p->func_id; + + data->rules[rule_idx].cmd_general_data = + ETH_FILTER_RULES_CMD_RX_CMD; + + bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags, + &(data->rules[rule_idx++]), false); + } + + + /* + * If FCoE Queue configuration has been requested configure the Rx and + * internal switching modes for this queue in separate rules. + * + * FCoE queue shell never be set to ACCEPT_ALL packets of any sort: + * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED. + */ + if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) { + /* Tx (internal switching) */ + if (test_bit(RAMROD_TX, &p->ramrod_flags)) { + data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id); + data->rules[rule_idx].func_id = p->func_id; + + data->rules[rule_idx].cmd_general_data = + ETH_FILTER_RULES_CMD_TX_CMD; + + bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags, + &(data->rules[rule_idx++]), + true); + } + + /* Rx */ + if (test_bit(RAMROD_RX, &p->ramrod_flags)) { + data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id); + data->rules[rule_idx].func_id = p->func_id; + + data->rules[rule_idx].cmd_general_data = + ETH_FILTER_RULES_CMD_RX_CMD; + + bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags, + &(data->rules[rule_idx++]), + true); + } + } + + /* + * Set the ramrod header (most importantly - number of rules to + * configure). + */ + bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx); + + DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, " + "tx_accept_flags 0x%lx\n", + data->header.rule_cnt, p->rx_accept_flags, + p->tx_accept_flags); + + /* + * No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). + */ + + /* Send a ramrod */ + rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid, + U64_HI(p->rdata_mapping), + U64_LO(p->rdata_mapping), + ETH_CONNECTION_TYPE); + if (rc) + return rc; + + /* Ramrod completion is pending */ + return 1; +} + +static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp, + struct bnx2x_rx_mode_ramrod_params *p) +{ + return bnx2x_state_wait(bp, p->state, p->pstate); +} + +static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp, + struct bnx2x_rx_mode_ramrod_params *p) +{ + /* Do nothing */ + return 0; +} + +int bnx2x_config_rx_mode(struct bnx2x *bp, + struct bnx2x_rx_mode_ramrod_params *p) +{ + int rc; + + /* Configure the new classification in the chip */ + rc = p->rx_mode_obj->config_rx_mode(bp, p); + if (rc < 0) + return rc; + + /* Wait for a ramrod completion if was requested */ + if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) { + rc = p->rx_mode_obj->wait_comp(bp, p); + if (rc) + return rc; + } + + return rc; +} + +void bnx2x_init_rx_mode_obj(struct bnx2x *bp, + struct bnx2x_rx_mode_obj *o) +{ + if (CHIP_IS_E1x(bp)) { + o->wait_comp = bnx2x_empty_rx_mode_wait; + o->config_rx_mode = bnx2x_set_rx_mode_e1x; + } else { + o->wait_comp = bnx2x_wait_rx_mode_comp_e2; + o->config_rx_mode = bnx2x_set_rx_mode_e2; + } +} + +/********************* Multicast verbs: SET, CLEAR ****************************/ +static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac) +{ + return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff; +} + +struct bnx2x_mcast_mac_elem { + struct list_head link; + u8 mac[ETH_ALEN]; + u8 pad[2]; /* For a natural alignment of the following buffer */ +}; + +struct bnx2x_pending_mcast_cmd { + struct list_head link; + int type; /* BNX2X_MCAST_CMD_X */ + union { + struct list_head macs_head; + u32 macs_num; /* Needed for DEL command */ + int next_bin; /* Needed for RESTORE flow with aprox match */ + } data; + + bool done; /* set to true, when the command has been handled, + * practically used in 57712 handling only, where one pending + * command may be handled in a few operations. As long as for + * other chips every operation handling is completed in a + * single ramrod, there is no need to utilize this field. + */ +}; + +static int bnx2x_mcast_wait(struct bnx2x *bp, + struct bnx2x_mcast_obj *o) +{ + if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) || + o->raw.wait_comp(bp, &o->raw)) + return -EBUSY; + + return 0; +} + +static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, + struct bnx2x_mcast_ramrod_params *p, + int cmd) +{ + int total_sz; + struct bnx2x_pending_mcast_cmd *new_cmd; + struct bnx2x_mcast_mac_elem *cur_mac = NULL; + struct bnx2x_mcast_list_elem *pos; + int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ? + p->mcast_list_len : 0); + + /* If the command is empty ("handle pending commands only"), break */ + if (!p->mcast_list_len) + return 0; + + total_sz = sizeof(*new_cmd) + + macs_list_len * sizeof(struct bnx2x_mcast_mac_elem); + + /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */ + new_cmd = kzalloc(total_sz, GFP_ATOMIC); + + if (!new_cmd) + return -ENOMEM; + + DP(BNX2X_MSG_SP, "About to enqueue a new %d command. " + "macs_list_len=%d\n", cmd, macs_list_len); + + INIT_LIST_HEAD(&new_cmd->data.macs_head); + + new_cmd->type = cmd; + new_cmd->done = false; + + switch (cmd) { + case BNX2X_MCAST_CMD_ADD: + cur_mac = (struct bnx2x_mcast_mac_elem *) + ((u8 *)new_cmd + sizeof(*new_cmd)); + + /* Push the MACs of the current command into the pendig command + * MACs list: FIFO + */ + list_for_each_entry(pos, &p->mcast_list, link) { + memcpy(cur_mac->mac, pos->mac, ETH_ALEN); + list_add_tail(&cur_mac->link, &new_cmd->data.macs_head); + cur_mac++; + } + + break; + + case BNX2X_MCAST_CMD_DEL: + new_cmd->data.macs_num = p->mcast_list_len; + break; + + case BNX2X_MCAST_CMD_RESTORE: + new_cmd->data.next_bin = 0; + break; + + default: + BNX2X_ERR("Unknown command: %d\n", cmd); + return -EINVAL; + } + + /* Push the new pending command to the tail of the pending list: FIFO */ + list_add_tail(&new_cmd->link, &o->pending_cmds_head); + + o->set_sched(o); + + return 1; +} + +/** + * bnx2x_mcast_get_next_bin - get the next set bin (index) + * + * @o: + * @last: index to start looking from (including) + * + * returns the next found (set) bin or a negative value if none is found. + */ +static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last) +{ + int i, j, inner_start = last % BIT_VEC64_ELEM_SZ; + + for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) { + if (o->registry.aprox_match.vec[i]) + for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) { + int cur_bit = j + BIT_VEC64_ELEM_SZ * i; + if (BIT_VEC64_TEST_BIT(o->registry.aprox_match. + vec, cur_bit)) { + return cur_bit; + } + } + inner_start = 0; + } + + /* None found */ + return -1; +} + +/** + * bnx2x_mcast_clear_first_bin - find the first set bin and clear it + * + * @o: + * + * returns the index of the found bin or -1 if none is found + */ +static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o) +{ + int cur_bit = bnx2x_mcast_get_next_bin(o, 0); + + if (cur_bit >= 0) + BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit); + + return cur_bit; +} + +static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o) +{ + struct bnx2x_raw_obj *raw = &o->raw; + u8 rx_tx_flag = 0; + + if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) || + (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) + rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD; + + if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) || + (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) + rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD; + + return rx_tx_flag; +} + +static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, int idx, + union bnx2x_mcast_config_data *cfg_data, + int cmd) +{ + struct bnx2x_raw_obj *r = &o->raw; + struct eth_multicast_rules_ramrod_data *data = + (struct eth_multicast_rules_ramrod_data *)(r->rdata); + u8 func_id = r->func_id; + u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o); + int bin; + + if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) + rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD; + + data->rules[idx].cmd_general_data |= rx_tx_add_flag; + + /* Get a bin and update a bins' vector */ + switch (cmd) { + case BNX2X_MCAST_CMD_ADD: + bin = bnx2x_mcast_bin_from_mac(cfg_data->mac); + BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin); + break; + + case BNX2X_MCAST_CMD_DEL: + /* If there were no more bins to clear + * (bnx2x_mcast_clear_first_bin() returns -1) then we would + * clear any (0xff) bin. + * See bnx2x_mcast_validate_e2() for explanation when it may + * happen. + */ + bin = bnx2x_mcast_clear_first_bin(o); + break; + + case BNX2X_MCAST_CMD_RESTORE: + bin = cfg_data->bin; + break; + + default: + BNX2X_ERR("Unknown command: %d\n", cmd); + return; + } + + DP(BNX2X_MSG_SP, "%s bin %d\n", + ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ? + "Setting" : "Clearing"), bin); + + data->rules[idx].bin_id = (u8)bin; + data->rules[idx].func_id = func_id; + data->rules[idx].engine_id = o->engine_id; +} + +/** + * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry + * + * @bp: device handle + * @o: + * @start_bin: index in the registry to start from (including) + * @rdata_idx: index in the ramrod data to start from + * + * returns last handled bin index or -1 if all bins have been handled + */ +static inline int bnx2x_mcast_handle_restore_cmd_e2( + struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin, + int *rdata_idx) +{ + int cur_bin, cnt = *rdata_idx; + union bnx2x_mcast_config_data cfg_data = {0}; + + /* go through the registry and configure the bins from it */ + for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0; + cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) { + + cfg_data.bin = (u8)cur_bin; + o->set_one_rule(bp, o, cnt, &cfg_data, + BNX2X_MCAST_CMD_RESTORE); + + cnt++; + + DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin); + + /* Break if we reached the maximum number + * of rules. + */ + if (cnt >= o->max_cmd_len) + break; + } + + *rdata_idx = cnt; + + return cur_bin; +} + +static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, + int *line_idx) +{ + struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n; + int cnt = *line_idx; + union bnx2x_mcast_config_data cfg_data = {0}; + + list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head, + link) { + + cfg_data.mac = &pmac_pos->mac[0]; + o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type); + + cnt++; + + DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT + " mcast MAC\n", + BNX2X_MAC_PRN_LIST(pmac_pos->mac)); + + list_del(&pmac_pos->link); + + /* Break if we reached the maximum number + * of rules. + */ + if (cnt >= o->max_cmd_len) + break; + } + + *line_idx = cnt; + + /* if no more MACs to configure - we are done */ + if (list_empty(&cmd_pos->data.macs_head)) + cmd_pos->done = true; +} + +static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, + int *line_idx) +{ + int cnt = *line_idx; + + while (cmd_pos->data.macs_num) { + o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type); + + cnt++; + + cmd_pos->data.macs_num--; + + DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n", + cmd_pos->data.macs_num, cnt); + + /* Break if we reached the maximum + * number of rules. + */ + if (cnt >= o->max_cmd_len) + break; + } + + *line_idx = cnt; + + /* If we cleared all bins - we are done */ + if (!cmd_pos->data.macs_num) + cmd_pos->done = true; +} + +static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, + int *line_idx) +{ + cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin, + line_idx); + + if (cmd_pos->data.next_bin < 0) + /* If o->set_restore returned -1 we are done */ + cmd_pos->done = true; + else + /* Start from the next bin next time */ + cmd_pos->data.next_bin++; +} + +static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p) +{ + struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n; + int cnt = 0; + struct bnx2x_mcast_obj *o = p->mcast_obj; + + list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head, + link) { + switch (cmd_pos->type) { + case BNX2X_MCAST_CMD_ADD: + bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt); + break; + + case BNX2X_MCAST_CMD_DEL: + bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt); + break; + + case BNX2X_MCAST_CMD_RESTORE: + bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos, + &cnt); + break; + + default: + BNX2X_ERR("Unknown command: %d\n", cmd_pos->type); + return -EINVAL; + } + + /* If the command has been completed - remove it from the list + * and free the memory + */ + if (cmd_pos->done) { + list_del(&cmd_pos->link); + kfree(cmd_pos); + } + + /* Break if we reached the maximum number of rules */ + if (cnt >= o->max_cmd_len) + break; + } + + return cnt; +} + +static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, + int *line_idx) +{ + struct bnx2x_mcast_list_elem *mlist_pos; + union bnx2x_mcast_config_data cfg_data = {0}; + int cnt = *line_idx; + + list_for_each_entry(mlist_pos, &p->mcast_list, link) { + cfg_data.mac = mlist_pos->mac; + o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD); + + cnt++; + + DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT + " mcast MAC\n", + BNX2X_MAC_PRN_LIST(mlist_pos->mac)); + } + + *line_idx = cnt; +} + +static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, + int *line_idx) +{ + int cnt = *line_idx, i; + + for (i = 0; i < p->mcast_list_len; i++) { + o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL); + + cnt++; + + DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n", + p->mcast_list_len - i - 1); + } + + *line_idx = cnt; +} + +/** + * bnx2x_mcast_handle_current_cmd - + * + * @bp: device handle + * @p: + * @cmd: + * @start_cnt: first line in the ramrod data that may be used + * + * This function is called iff there is enough place for the current command in + * the ramrod data. + * Returns number of lines filled in the ramrod data in total. + */ +static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, int cmd, + int start_cnt) +{ + struct bnx2x_mcast_obj *o = p->mcast_obj; + int cnt = start_cnt; + + DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len); + + switch (cmd) { + case BNX2X_MCAST_CMD_ADD: + bnx2x_mcast_hdl_add(bp, o, p, &cnt); + break; + + case BNX2X_MCAST_CMD_DEL: + bnx2x_mcast_hdl_del(bp, o, p, &cnt); + break; + + case BNX2X_MCAST_CMD_RESTORE: + o->hdl_restore(bp, o, 0, &cnt); + break; + + default: + BNX2X_ERR("Unknown command: %d\n", cmd); + return -EINVAL; + } + + /* The current command has been handled */ + p->mcast_list_len = 0; + + return cnt; +} + +static int bnx2x_mcast_validate_e2(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + int cmd) +{ + struct bnx2x_mcast_obj *o = p->mcast_obj; + int reg_sz = o->get_registry_size(o); + + switch (cmd) { + /* DEL command deletes all currently configured MACs */ + case BNX2X_MCAST_CMD_DEL: + o->set_registry_size(o, 0); + /* Don't break */ + + /* RESTORE command will restore the entire multicast configuration */ + case BNX2X_MCAST_CMD_RESTORE: + /* Here we set the approximate amount of work to do, which in + * fact may be only less as some MACs in postponed ADD + * command(s) scheduled before this command may fall into + * the same bin and the actual number of bins set in the + * registry would be less than we estimated here. See + * bnx2x_mcast_set_one_rule_e2() for further details. + */ + p->mcast_list_len = reg_sz; + break; + + case BNX2X_MCAST_CMD_ADD: + case BNX2X_MCAST_CMD_CONT: + /* Here we assume that all new MACs will fall into new bins. + * However we will correct the real registry size after we + * handle all pending commands. + */ + o->set_registry_size(o, reg_sz + p->mcast_list_len); + break; + + default: + BNX2X_ERR("Unknown command: %d\n", cmd); + return -EINVAL; + + } + + /* Increase the total number of MACs pending to be configured */ + o->total_pending_num += p->mcast_list_len; + + return 0; +} + +static void bnx2x_mcast_revert_e2(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + int old_num_bins) +{ + struct bnx2x_mcast_obj *o = p->mcast_obj; + + o->set_registry_size(o, old_num_bins); + o->total_pending_num -= p->mcast_list_len; +} + +/** + * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values + * + * @bp: device handle + * @p: + * @len: number of rules to handle + */ +static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + u8 len) +{ + struct bnx2x_raw_obj *r = &p->mcast_obj->raw; + struct eth_multicast_rules_ramrod_data *data = + (struct eth_multicast_rules_ramrod_data *)(r->rdata); + + data->header.echo = ((r->cid & BNX2X_SWCID_MASK) | + (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT)); + data->header.rule_cnt = len; +} + +/** + * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins + * + * @bp: device handle + * @o: + * + * Recalculate the actual number of set bins in the registry using Brian + * Kernighan's algorithm: it's execution complexity is as a number of set bins. + * + * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1(). + */ +static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp, + struct bnx2x_mcast_obj *o) +{ + int i, cnt = 0; + u64 elem; + + for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) { + elem = o->registry.aprox_match.vec[i]; + for (; elem; cnt++) + elem &= elem - 1; + } + + o->set_registry_size(o, cnt); + + return 0; +} + +static int bnx2x_mcast_setup_e2(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + int cmd) +{ + struct bnx2x_raw_obj *raw = &p->mcast_obj->raw; + struct bnx2x_mcast_obj *o = p->mcast_obj; + struct eth_multicast_rules_ramrod_data *data = + (struct eth_multicast_rules_ramrod_data *)(raw->rdata); + int cnt = 0, rc; + + /* Reset the ramrod data buffer */ + memset(data, 0, sizeof(*data)); + + cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p); + + /* If there are no more pending commands - clear SCHEDULED state */ + if (list_empty(&o->pending_cmds_head)) + o->clear_sched(o); + + /* The below may be true iff there was enough room in ramrod + * data for all pending commands and for the current + * command. Otherwise the current command would have been added + * to the pending commands and p->mcast_list_len would have been + * zeroed. + */ + if (p->mcast_list_len > 0) + cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt); + + /* We've pulled out some MACs - update the total number of + * outstanding. + */ + o->total_pending_num -= cnt; + + /* send a ramrod */ + WARN_ON(o->total_pending_num < 0); + WARN_ON(cnt > o->max_cmd_len); + + bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt); + + /* Update a registry size if there are no more pending operations. + * + * We don't want to change the value of the registry size if there are + * pending operations because we want it to always be equal to the + * exact or the approximate number (see bnx2x_mcast_validate_e2()) of + * set bins after the last requested operation in order to properly + * evaluate the size of the next DEL/RESTORE operation. + * + * Note that we update the registry itself during command(s) handling + * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we + * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but + * with a limited amount of update commands (per MAC/bin) and we don't + * know in this scope what the actual state of bins configuration is + * going to be after this ramrod. + */ + if (!o->total_pending_num) + bnx2x_mcast_refresh_registry_e2(bp, o); + + /* + * If CLEAR_ONLY was requested - don't send a ramrod and clear + * RAMROD_PENDING status immediately. + */ + if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { + raw->clear_pending(raw); + return 0; + } else { + /* + * No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). + */ + + /* Send a ramrod */ + rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES, + raw->cid, U64_HI(raw->rdata_mapping), + U64_LO(raw->rdata_mapping), + ETH_CONNECTION_TYPE); + if (rc) + return rc; + + /* Ramrod completion is pending */ + return 1; + } +} + +static int bnx2x_mcast_validate_e1h(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + int cmd) +{ + /* Mark, that there is a work to do */ + if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE)) + p->mcast_list_len = 1; + + return 0; +} + +static void bnx2x_mcast_revert_e1h(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + int old_num_bins) +{ + /* Do nothing */ +} + +#define BNX2X_57711_SET_MC_FILTER(filter, bit) \ +do { \ + (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \ +} while (0) + +static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, + struct bnx2x_mcast_ramrod_params *p, + u32 *mc_filter) +{ + struct bnx2x_mcast_list_elem *mlist_pos; + int bit; + + list_for_each_entry(mlist_pos, &p->mcast_list, link) { + bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac); + BNX2X_57711_SET_MC_FILTER(mc_filter, bit); + + DP(BNX2X_MSG_SP, "About to configure " + BNX2X_MAC_FMT" mcast MAC, bin %d\n", + BNX2X_MAC_PRN_LIST(mlist_pos->mac), bit); + + /* bookkeeping... */ + BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, + bit); + } +} + +static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, + u32 *mc_filter) +{ + int bit; + + for (bit = bnx2x_mcast_get_next_bin(o, 0); + bit >= 0; + bit = bnx2x_mcast_get_next_bin(o, bit + 1)) { + BNX2X_57711_SET_MC_FILTER(mc_filter, bit); + DP(BNX2X_MSG_SP, "About to set bin %d\n", bit); + } +} + +/* On 57711 we write the multicast MACs' aproximate match + * table by directly into the TSTORM's internal RAM. So we don't + * really need to handle any tricks to make it work. + */ +static int bnx2x_mcast_setup_e1h(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + int cmd) +{ + int i; + struct bnx2x_mcast_obj *o = p->mcast_obj; + struct bnx2x_raw_obj *r = &o->raw; + + /* If CLEAR_ONLY has been requested - clear the registry + * and clear a pending bit. + */ + if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { + u32 mc_filter[MC_HASH_SIZE] = {0}; + + /* Set the multicast filter bits before writing it into + * the internal memory. + */ + switch (cmd) { + case BNX2X_MCAST_CMD_ADD: + bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter); + break; + + case BNX2X_MCAST_CMD_DEL: + DP(BNX2X_MSG_SP, "Invalidating multicast " + "MACs configuration\n"); + + /* clear the registry */ + memset(o->registry.aprox_match.vec, 0, + sizeof(o->registry.aprox_match.vec)); + break; + + case BNX2X_MCAST_CMD_RESTORE: + bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter); + break; + + default: + BNX2X_ERR("Unknown command: %d\n", cmd); + return -EINVAL; + } + + /* Set the mcast filter in the internal memory */ + for (i = 0; i < MC_HASH_SIZE; i++) + REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]); + } else + /* clear the registry */ + memset(o->registry.aprox_match.vec, 0, + sizeof(o->registry.aprox_match.vec)); + + /* We are done */ + r->clear_pending(r); + + return 0; +} + +static int bnx2x_mcast_validate_e1(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + int cmd) +{ + struct bnx2x_mcast_obj *o = p->mcast_obj; + int reg_sz = o->get_registry_size(o); + + switch (cmd) { + /* DEL command deletes all currently configured MACs */ + case BNX2X_MCAST_CMD_DEL: + o->set_registry_size(o, 0); + /* Don't break */ + + /* RESTORE command will restore the entire multicast configuration */ + case BNX2X_MCAST_CMD_RESTORE: + p->mcast_list_len = reg_sz; + DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n", + cmd, p->mcast_list_len); + break; + + case BNX2X_MCAST_CMD_ADD: + case BNX2X_MCAST_CMD_CONT: + /* Multicast MACs on 57710 are configured as unicast MACs and + * there is only a limited number of CAM entries for that + * matter. + */ + if (p->mcast_list_len > o->max_cmd_len) { + BNX2X_ERR("Can't configure more than %d multicast MACs" + "on 57710\n", o->max_cmd_len); + return -EINVAL; + } + /* Every configured MAC should be cleared if DEL command is + * called. Only the last ADD command is relevant as long as + * every ADD commands overrides the previous configuration. + */ + DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len); + if (p->mcast_list_len > 0) + o->set_registry_size(o, p->mcast_list_len); + + break; + + default: + BNX2X_ERR("Unknown command: %d\n", cmd); + return -EINVAL; + + } + + /* We want to ensure that commands are executed one by one for 57710. + * Therefore each none-empty command will consume o->max_cmd_len. + */ + if (p->mcast_list_len) + o->total_pending_num += o->max_cmd_len; + + return 0; +} + +static void bnx2x_mcast_revert_e1(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + int old_num_macs) +{ + struct bnx2x_mcast_obj *o = p->mcast_obj; + + o->set_registry_size(o, old_num_macs); + + /* If current command hasn't been handled yet and we are + * here means that it's meant to be dropped and we have to + * update the number of outstandling MACs accordingly. + */ + if (p->mcast_list_len) + o->total_pending_num -= o->max_cmd_len; +} + +static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, int idx, + union bnx2x_mcast_config_data *cfg_data, + int cmd) +{ + struct bnx2x_raw_obj *r = &o->raw; + struct mac_configuration_cmd *data = + (struct mac_configuration_cmd *)(r->rdata); + + /* copy mac */ + if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) { + bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr, + &data->config_table[idx].middle_mac_addr, + &data->config_table[idx].lsb_mac_addr, + cfg_data->mac); + + data->config_table[idx].vlan_id = 0; + data->config_table[idx].pf_id = r->func_id; + data->config_table[idx].clients_bit_vector = + cpu_to_le32(1 << r->cl_id); + + SET_FLAG(data->config_table[idx].flags, + MAC_CONFIGURATION_ENTRY_ACTION_TYPE, + T_ETH_MAC_COMMAND_SET); + } +} + +/** + * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd + * + * @bp: device handle + * @p: + * @len: number of rules to handle + */ +static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + u8 len) +{ + struct bnx2x_raw_obj *r = &p->mcast_obj->raw; + struct mac_configuration_cmd *data = + (struct mac_configuration_cmd *)(r->rdata); + + u8 offset = (CHIP_REV_IS_SLOW(bp) ? + BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) : + BNX2X_MAX_MULTICAST*(1 + r->func_id)); + + data->hdr.offset = offset; + data->hdr.client_id = 0xff; + data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) | + (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT)); + data->hdr.length = len; +} + +/** + * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710 + * + * @bp: device handle + * @o: + * @start_idx: index in the registry to start from + * @rdata_idx: index in the ramrod data to start from + * + * restore command for 57710 is like all other commands - always a stand alone + * command - start_idx and rdata_idx will always be 0. This function will always + * succeed. + * returns -1 to comply with 57712 variant. + */ +static inline int bnx2x_mcast_handle_restore_cmd_e1( + struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx, + int *rdata_idx) +{ + struct bnx2x_mcast_mac_elem *elem; + int i = 0; + union bnx2x_mcast_config_data cfg_data = {0}; + + /* go through the registry and configure the MACs from it. */ + list_for_each_entry(elem, &o->registry.exact_match.macs, link) { + cfg_data.mac = &elem->mac[0]; + o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE); + + i++; + + DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT + " mcast MAC\n", + BNX2X_MAC_PRN_LIST(cfg_data.mac)); + } + + *rdata_idx = i; + + return -1; +} + + +static inline int bnx2x_mcast_handle_pending_cmds_e1( + struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p) +{ + struct bnx2x_pending_mcast_cmd *cmd_pos; + struct bnx2x_mcast_mac_elem *pmac_pos; + struct bnx2x_mcast_obj *o = p->mcast_obj; + union bnx2x_mcast_config_data cfg_data = {0}; + int cnt = 0; + + + /* If nothing to be done - return */ + if (list_empty(&o->pending_cmds_head)) + return 0; + + /* Handle the first command */ + cmd_pos = list_first_entry(&o->pending_cmds_head, + struct bnx2x_pending_mcast_cmd, link); + + switch (cmd_pos->type) { + case BNX2X_MCAST_CMD_ADD: + list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) { + cfg_data.mac = &pmac_pos->mac[0]; + o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type); + + cnt++; + + DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT + " mcast MAC\n", + BNX2X_MAC_PRN_LIST(pmac_pos->mac)); + } + break; + + case BNX2X_MCAST_CMD_DEL: + cnt = cmd_pos->data.macs_num; + DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt); + break; + + case BNX2X_MCAST_CMD_RESTORE: + o->hdl_restore(bp, o, 0, &cnt); + break; + + default: + BNX2X_ERR("Unknown command: %d\n", cmd_pos->type); + return -EINVAL; + } + + list_del(&cmd_pos->link); + kfree(cmd_pos); + + return cnt; +} + +/** + * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr(). + * + * @fw_hi: + * @fw_mid: + * @fw_lo: + * @mac: + */ +static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid, + __le16 *fw_lo, u8 *mac) +{ + mac[1] = ((u8 *)fw_hi)[0]; + mac[0] = ((u8 *)fw_hi)[1]; + mac[3] = ((u8 *)fw_mid)[0]; + mac[2] = ((u8 *)fw_mid)[1]; + mac[5] = ((u8 *)fw_lo)[0]; + mac[4] = ((u8 *)fw_lo)[1]; +} + +/** + * bnx2x_mcast_refresh_registry_e1 - + * + * @bp: device handle + * @cnt: + * + * Check the ramrod data first entry flag to see if it's a DELETE or ADD command + * and update the registry correspondingly: if ADD - allocate a memory and add + * the entries to the registry (list), if DELETE - clear the registry and free + * the memory. + */ +static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp, + struct bnx2x_mcast_obj *o) +{ + struct bnx2x_raw_obj *raw = &o->raw; + struct bnx2x_mcast_mac_elem *elem; + struct mac_configuration_cmd *data = + (struct mac_configuration_cmd *)(raw->rdata); + + /* If first entry contains a SET bit - the command was ADD, + * otherwise - DEL_ALL + */ + if (GET_FLAG(data->config_table[0].flags, + MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) { + int i, len = data->hdr.length; + + /* Break if it was a RESTORE command */ + if (!list_empty(&o->registry.exact_match.macs)) + return 0; + + elem = kzalloc(sizeof(*elem)*len, GFP_ATOMIC); + if (!elem) { + BNX2X_ERR("Failed to allocate registry memory\n"); + return -ENOMEM; + } + + for (i = 0; i < len; i++, elem++) { + bnx2x_get_fw_mac_addr( + &data->config_table[i].msb_mac_addr, + &data->config_table[i].middle_mac_addr, + &data->config_table[i].lsb_mac_addr, + elem->mac); + DP(BNX2X_MSG_SP, "Adding registry entry for [" + BNX2X_MAC_FMT"]\n", + BNX2X_MAC_PRN_LIST(elem->mac)); + list_add_tail(&elem->link, + &o->registry.exact_match.macs); + } + } else { + elem = list_first_entry(&o->registry.exact_match.macs, + struct bnx2x_mcast_mac_elem, link); + DP(BNX2X_MSG_SP, "Deleting a registry\n"); + kfree(elem); + INIT_LIST_HEAD(&o->registry.exact_match.macs); + } + + return 0; +} + +static int bnx2x_mcast_setup_e1(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + int cmd) +{ + struct bnx2x_mcast_obj *o = p->mcast_obj; + struct bnx2x_raw_obj *raw = &o->raw; + struct mac_configuration_cmd *data = + (struct mac_configuration_cmd *)(raw->rdata); + int cnt = 0, i, rc; + + /* Reset the ramrod data buffer */ + memset(data, 0, sizeof(*data)); + + /* First set all entries as invalid */ + for (i = 0; i < o->max_cmd_len ; i++) + SET_FLAG(data->config_table[i].flags, + MAC_CONFIGURATION_ENTRY_ACTION_TYPE, + T_ETH_MAC_COMMAND_INVALIDATE); + + /* Handle pending commands first */ + cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p); + + /* If there are no more pending commands - clear SCHEDULED state */ + if (list_empty(&o->pending_cmds_head)) + o->clear_sched(o); + + /* The below may be true iff there were no pending commands */ + if (!cnt) + cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0); + + /* For 57710 every command has o->max_cmd_len length to ensure that + * commands are done one at a time. + */ + o->total_pending_num -= o->max_cmd_len; + + /* send a ramrod */ + + WARN_ON(cnt > o->max_cmd_len); + + /* Set ramrod header (in particular, a number of entries to update) */ + bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt); + + /* update a registry: we need the registry contents to be always up + * to date in order to be able to execute a RESTORE opcode. Here + * we use the fact that for 57710 we sent one command at a time + * hence we may take the registry update out of the command handling + * and do it in a simpler way here. + */ + rc = bnx2x_mcast_refresh_registry_e1(bp, o); + if (rc) + return rc; + + /* + * If CLEAR_ONLY was requested - don't send a ramrod and clear + * RAMROD_PENDING status immediately. + */ + if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { + raw->clear_pending(raw); + return 0; + } else { + /* + * No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). + */ + + /* Send a ramrod */ + rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid, + U64_HI(raw->rdata_mapping), + U64_LO(raw->rdata_mapping), + ETH_CONNECTION_TYPE); + if (rc) + return rc; + + /* Ramrod completion is pending */ + return 1; + } + +} + +static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o) +{ + return o->registry.exact_match.num_macs_set; +} + +static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o) +{ + return o->registry.aprox_match.num_bins_set; +} + +static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o, + int n) +{ + o->registry.exact_match.num_macs_set = n; +} + +static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o, + int n) +{ + o->registry.aprox_match.num_bins_set = n; +} + +int bnx2x_config_mcast(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + int cmd) +{ + struct bnx2x_mcast_obj *o = p->mcast_obj; + struct bnx2x_raw_obj *r = &o->raw; + int rc = 0, old_reg_size; + + /* This is needed to recover number of currently configured mcast macs + * in case of failure. + */ + old_reg_size = o->get_registry_size(o); + + /* Do some calculations and checks */ + rc = o->validate(bp, p, cmd); + if (rc) + return rc; + + /* Return if there is no work to do */ + if ((!p->mcast_list_len) && (!o->check_sched(o))) + return 0; + + DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d " + "o->max_cmd_len=%d\n", o->total_pending_num, + p->mcast_list_len, o->max_cmd_len); + + /* Enqueue the current command to the pending list if we can't complete + * it in the current iteration + */ + if (r->check_pending(r) || + ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) { + rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd); + if (rc < 0) + goto error_exit1; + + /* As long as the current command is in a command list we + * don't need to handle it separately. + */ + p->mcast_list_len = 0; + } + + if (!r->check_pending(r)) { + + /* Set 'pending' state */ + r->set_pending(r); + + /* Configure the new classification in the chip */ + rc = o->config_mcast(bp, p, cmd); + if (rc < 0) + goto error_exit2; + + /* Wait for a ramrod completion if was requested */ + if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) + rc = o->wait_comp(bp, o); + } + + return rc; + +error_exit2: + r->clear_pending(r); + +error_exit1: + o->revert(bp, p, old_reg_size); + + return rc; +} + +static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o) +{ + smp_mb__before_clear_bit(); + clear_bit(o->sched_state, o->raw.pstate); + smp_mb__after_clear_bit(); +} + +static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o) +{ + smp_mb__before_clear_bit(); + set_bit(o->sched_state, o->raw.pstate); + smp_mb__after_clear_bit(); +} + +static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o) +{ + return !!test_bit(o->sched_state, o->raw.pstate); +} + +static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o) +{ + return o->raw.check_pending(&o->raw) || o->check_sched(o); +} + +void bnx2x_init_mcast_obj(struct bnx2x *bp, + struct bnx2x_mcast_obj *mcast_obj, + u8 mcast_cl_id, u32 mcast_cid, u8 func_id, + u8 engine_id, void *rdata, dma_addr_t rdata_mapping, + int state, unsigned long *pstate, bnx2x_obj_type type) +{ + memset(mcast_obj, 0, sizeof(*mcast_obj)); + + bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id, + rdata, rdata_mapping, state, pstate, type); + + mcast_obj->engine_id = engine_id; + + INIT_LIST_HEAD(&mcast_obj->pending_cmds_head); + + mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED; + mcast_obj->check_sched = bnx2x_mcast_check_sched; + mcast_obj->set_sched = bnx2x_mcast_set_sched; + mcast_obj->clear_sched = bnx2x_mcast_clear_sched; + + if (CHIP_IS_E1(bp)) { + mcast_obj->config_mcast = bnx2x_mcast_setup_e1; + mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd; + mcast_obj->hdl_restore = + bnx2x_mcast_handle_restore_cmd_e1; + mcast_obj->check_pending = bnx2x_mcast_check_pending; + + if (CHIP_REV_IS_SLOW(bp)) + mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI; + else + mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST; + + mcast_obj->wait_comp = bnx2x_mcast_wait; + mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1; + mcast_obj->validate = bnx2x_mcast_validate_e1; + mcast_obj->revert = bnx2x_mcast_revert_e1; + mcast_obj->get_registry_size = + bnx2x_mcast_get_registry_size_exact; + mcast_obj->set_registry_size = + bnx2x_mcast_set_registry_size_exact; + + /* 57710 is the only chip that uses the exact match for mcast + * at the moment. + */ + INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs); + + } else if (CHIP_IS_E1H(bp)) { + mcast_obj->config_mcast = bnx2x_mcast_setup_e1h; + mcast_obj->enqueue_cmd = NULL; + mcast_obj->hdl_restore = NULL; + mcast_obj->check_pending = bnx2x_mcast_check_pending; + + /* 57711 doesn't send a ramrod, so it has unlimited credit + * for one command. + */ + mcast_obj->max_cmd_len = -1; + mcast_obj->wait_comp = bnx2x_mcast_wait; + mcast_obj->set_one_rule = NULL; + mcast_obj->validate = bnx2x_mcast_validate_e1h; + mcast_obj->revert = bnx2x_mcast_revert_e1h; + mcast_obj->get_registry_size = + bnx2x_mcast_get_registry_size_aprox; + mcast_obj->set_registry_size = + bnx2x_mcast_set_registry_size_aprox; + } else { + mcast_obj->config_mcast = bnx2x_mcast_setup_e2; + mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd; + mcast_obj->hdl_restore = + bnx2x_mcast_handle_restore_cmd_e2; + mcast_obj->check_pending = bnx2x_mcast_check_pending; + /* TODO: There should be a proper HSI define for this number!!! + */ + mcast_obj->max_cmd_len = 16; + mcast_obj->wait_comp = bnx2x_mcast_wait; + mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2; + mcast_obj->validate = bnx2x_mcast_validate_e2; + mcast_obj->revert = bnx2x_mcast_revert_e2; + mcast_obj->get_registry_size = + bnx2x_mcast_get_registry_size_aprox; + mcast_obj->set_registry_size = + bnx2x_mcast_set_registry_size_aprox; + } +} + +/*************************** Credit handling **********************************/ + +/** + * atomic_add_ifless - add if the result is less than a given value. + * + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...if (v + a) is less than u. + * + * returns true if (v + a) was less than u, and false otherwise. + * + */ +static inline bool __atomic_add_ifless(atomic_t *v, int a, int u) +{ + int c, old; + + c = atomic_read(v); + for (;;) { + if (unlikely(c + a >= u)) + return false; + + old = atomic_cmpxchg((v), c, c + a); + if (likely(old == c)) + break; + c = old; + } + + return true; +} + +/** + * atomic_dec_ifmoe - dec if the result is more or equal than a given value. + * + * @v: pointer of type atomic_t + * @a: the amount to dec from v... + * @u: ...if (v - a) is more or equal than u. + * + * returns true if (v - a) was more or equal than u, and false + * otherwise. + */ +static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u) +{ + int c, old; + + c = atomic_read(v); + for (;;) { + if (unlikely(c - a < u)) + return false; + + old = atomic_cmpxchg((v), c, c - a); + if (likely(old == c)) + break; + c = old; + } + + return true; +} + +static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt) +{ + bool rc; + + smp_mb(); + rc = __atomic_dec_ifmoe(&o->credit, cnt, 0); + smp_mb(); + + return rc; +} + +static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt) +{ + bool rc; + + smp_mb(); + + /* Don't let to refill if credit + cnt > pool_sz */ + rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1); + + smp_mb(); + + return rc; +} + +static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o) +{ + int cur_credit; + + smp_mb(); + cur_credit = atomic_read(&o->credit); + + return cur_credit; +} + +static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o, + int cnt) +{ + return true; +} + + +static bool bnx2x_credit_pool_get_entry( + struct bnx2x_credit_pool_obj *o, + int *offset) +{ + int idx, vec, i; + + *offset = -1; + + /* Find "internal cam-offset" then add to base for this object... */ + for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) { + + /* Skip the current vector if there are no free entries in it */ + if (!o->pool_mirror[vec]) + continue; + + /* If we've got here we are going to find a free entry */ + for (idx = vec * BNX2X_POOL_VEC_SIZE, i = 0; + i < BIT_VEC64_ELEM_SZ; idx++, i++) + + if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) { + /* Got one!! */ + BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx); + *offset = o->base_pool_offset + idx; + return true; + } + } + + return false; +} + +static bool bnx2x_credit_pool_put_entry( + struct bnx2x_credit_pool_obj *o, + int offset) +{ + if (offset < o->base_pool_offset) + return false; + + offset -= o->base_pool_offset; + + if (offset >= o->pool_sz) + return false; + + /* Return the entry to the pool */ + BIT_VEC64_SET_BIT(o->pool_mirror, offset); + + return true; +} + +static bool bnx2x_credit_pool_put_entry_always_true( + struct bnx2x_credit_pool_obj *o, + int offset) +{ + return true; +} + +static bool bnx2x_credit_pool_get_entry_always_true( + struct bnx2x_credit_pool_obj *o, + int *offset) +{ + *offset = -1; + return true; +} +/** + * bnx2x_init_credit_pool - initialize credit pool internals. + * + * @p: + * @base: Base entry in the CAM to use. + * @credit: pool size. + * + * If base is negative no CAM entries handling will be performed. + * If credit is negative pool operations will always succeed (unlimited pool). + * + */ +static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p, + int base, int credit) +{ + /* Zero the object first */ + memset(p, 0, sizeof(*p)); + + /* Set the table to all 1s */ + memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror)); + + /* Init a pool as full */ + atomic_set(&p->credit, credit); + + /* The total poll size */ + p->pool_sz = credit; + + p->base_pool_offset = base; + + /* Commit the change */ + smp_mb(); + + p->check = bnx2x_credit_pool_check; + + /* if pool credit is negative - disable the checks */ + if (credit >= 0) { + p->put = bnx2x_credit_pool_put; + p->get = bnx2x_credit_pool_get; + p->put_entry = bnx2x_credit_pool_put_entry; + p->get_entry = bnx2x_credit_pool_get_entry; + } else { + p->put = bnx2x_credit_pool_always_true; + p->get = bnx2x_credit_pool_always_true; + p->put_entry = bnx2x_credit_pool_put_entry_always_true; + p->get_entry = bnx2x_credit_pool_get_entry_always_true; + } + + /* If base is negative - disable entries handling */ + if (base < 0) { + p->put_entry = bnx2x_credit_pool_put_entry_always_true; + p->get_entry = bnx2x_credit_pool_get_entry_always_true; + } +} + +void bnx2x_init_mac_credit_pool(struct bnx2x *bp, + struct bnx2x_credit_pool_obj *p, u8 func_id, + u8 func_num) +{ +/* TODO: this will be defined in consts as well... */ +#define BNX2X_CAM_SIZE_EMUL 5 + + int cam_sz; + + if (CHIP_IS_E1(bp)) { + /* In E1, Multicast is saved in cam... */ + if (!CHIP_REV_IS_SLOW(bp)) + cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST; + else + cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI; + + bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz); + + } else if (CHIP_IS_E1H(bp)) { + /* CAM credit is equaly divided between all active functions + * on the PORT!. + */ + if ((func_num > 0)) { + if (!CHIP_REV_IS_SLOW(bp)) + cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num)); + else + cam_sz = BNX2X_CAM_SIZE_EMUL; + bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz); + } else { + /* this should never happen! Block MAC operations. */ + bnx2x_init_credit_pool(p, 0, 0); + } + + } else { + + /* + * CAM credit is equaly divided between all active functions + * on the PATH. + */ + if ((func_num > 0)) { + if (!CHIP_REV_IS_SLOW(bp)) + cam_sz = (MAX_MAC_CREDIT_E2 / func_num); + else + cam_sz = BNX2X_CAM_SIZE_EMUL; + + /* + * No need for CAM entries handling for 57712 and + * newer. + */ + bnx2x_init_credit_pool(p, -1, cam_sz); + } else { + /* this should never happen! Block MAC operations. */ + bnx2x_init_credit_pool(p, 0, 0); + } + + } +} + +void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, + struct bnx2x_credit_pool_obj *p, + u8 func_id, + u8 func_num) +{ + if (CHIP_IS_E1x(bp)) { + /* + * There is no VLAN credit in HW on 57710 and 57711 only + * MAC / MAC-VLAN can be set + */ + bnx2x_init_credit_pool(p, 0, -1); + } else { + /* + * CAM credit is equaly divided between all active functions + * on the PATH. + */ + if (func_num > 0) { + int credit = MAX_VLAN_CREDIT_E2 / func_num; + bnx2x_init_credit_pool(p, func_id * credit, credit); + } else + /* this should never happen! Block VLAN operations. */ + bnx2x_init_credit_pool(p, 0, 0); + } +} + +/****************** RSS Configuration ******************/ +/** + * bnx2x_debug_print_ind_table - prints the indirection table configuration. + * + * @bp: driver hanlde + * @p: pointer to rss configuration + * + * Prints it when NETIF_MSG_IFUP debug level is configured. + */ +static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp, + struct bnx2x_config_rss_params *p) +{ + int i; + + DP(BNX2X_MSG_SP, "Setting indirection table to:\n"); + DP(BNX2X_MSG_SP, "0x0000: "); + for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { + DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]); + + /* Print 4 bytes in a line */ + if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) && + (((i + 1) & 0x3) == 0)) { + DP_CONT(BNX2X_MSG_SP, "\n"); + DP(BNX2X_MSG_SP, "0x%04x: ", i + 1); + } + } + + DP_CONT(BNX2X_MSG_SP, "\n"); +} + +/** + * bnx2x_setup_rss - configure RSS + * + * @bp: device handle + * @p: rss configuration + * + * sends on UPDATE ramrod for that matter. + */ +static int bnx2x_setup_rss(struct bnx2x *bp, + struct bnx2x_config_rss_params *p) +{ + struct bnx2x_rss_config_obj *o = p->rss_obj; + struct bnx2x_raw_obj *r = &o->raw; + struct eth_rss_update_ramrod_data *data = + (struct eth_rss_update_ramrod_data *)(r->rdata); + u8 rss_mode = 0; + int rc; + + memset(data, 0, sizeof(*data)); + + DP(BNX2X_MSG_SP, "Configuring RSS\n"); + + /* Set an echo field */ + data->echo = (r->cid & BNX2X_SWCID_MASK) | + (r->state << BNX2X_SWCID_SHIFT); + + /* RSS mode */ + if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags)) + rss_mode = ETH_RSS_MODE_DISABLED; + else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags)) + rss_mode = ETH_RSS_MODE_REGULAR; + else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags)) + rss_mode = ETH_RSS_MODE_VLAN_PRI; + else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags)) + rss_mode = ETH_RSS_MODE_E1HOV_PRI; + else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags)) + rss_mode = ETH_RSS_MODE_IP_DSCP; + + data->rss_mode = rss_mode; + + DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode); + + /* RSS capabilities */ + if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags)) + data->capabilities |= + ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY; + + if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags)) + data->capabilities |= + ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY; + + if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags)) + data->capabilities |= + ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY; + + if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags)) + data->capabilities |= + ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY; + + /* Hashing mask */ + data->rss_result_mask = p->rss_result_mask; + + /* RSS engine ID */ + data->rss_engine_id = o->engine_id; + + DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id); + + /* Indirection table */ + memcpy(data->indirection_table, p->ind_table, + T_ETH_INDIRECTION_TABLE_SIZE); + + /* Remember the last configuration */ + memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); + + /* Print the indirection table */ + if (netif_msg_ifup(bp)) + bnx2x_debug_print_ind_table(bp, p); + + /* RSS keys */ + if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) { + memcpy(&data->rss_key[0], &p->rss_key[0], + sizeof(data->rss_key)); + data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; + } + + /* + * No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). + */ + + /* Send a ramrod */ + rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid, + U64_HI(r->rdata_mapping), + U64_LO(r->rdata_mapping), + ETH_CONNECTION_TYPE); + + if (rc < 0) + return rc; + + return 1; +} + +void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, + u8 *ind_table) +{ + memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table)); +} + +int bnx2x_config_rss(struct bnx2x *bp, + struct bnx2x_config_rss_params *p) +{ + int rc; + struct bnx2x_rss_config_obj *o = p->rss_obj; + struct bnx2x_raw_obj *r = &o->raw; + + /* Do nothing if only driver cleanup was requested */ + if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) + return 0; + + r->set_pending(r); + + rc = o->config_rss(bp, p); + if (rc < 0) { + r->clear_pending(r); + return rc; + } + + if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) + rc = r->wait_comp(bp, r); + + return rc; +} + + +void bnx2x_init_rss_config_obj(struct bnx2x *bp, + struct bnx2x_rss_config_obj *rss_obj, + u8 cl_id, u32 cid, u8 func_id, u8 engine_id, + void *rdata, dma_addr_t rdata_mapping, + int state, unsigned long *pstate, + bnx2x_obj_type type) +{ + bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata, + rdata_mapping, state, pstate, type); + + rss_obj->engine_id = engine_id; + rss_obj->config_rss = bnx2x_setup_rss; +} + +/********************** Queue state object ***********************************/ + +/** + * bnx2x_queue_state_change - perform Queue state change transition + * + * @bp: device handle + * @params: parameters to perform the transition + * + * returns 0 in case of successfully completed transition, negative error + * code in case of failure, positive (EBUSY) value if there is a completion + * to that is still pending (possible only if RAMROD_COMP_WAIT is + * not set in params->ramrod_flags for asynchronous commands). + * + */ +int bnx2x_queue_state_change(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_sp_obj *o = params->q_obj; + int rc, pending_bit; + unsigned long *pending = &o->pending; + + /* Check that the requested transition is legal */ + if (o->check_transition(bp, o, params)) + return -EINVAL; + + /* Set "pending" bit */ + pending_bit = o->set_pending(o, params); + + /* Don't send a command if only driver cleanup was requested */ + if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) + o->complete_cmd(bp, o, pending_bit); + else { + /* Send a ramrod */ + rc = o->send_cmd(bp, params); + if (rc) { + o->next_state = BNX2X_Q_STATE_MAX; + clear_bit(pending_bit, pending); + smp_mb__after_clear_bit(); + return rc; + } + + if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { + rc = o->wait_comp(bp, o, pending_bit); + if (rc) + return rc; + + return 0; + } + } + + return !!test_bit(pending_bit, pending); +} + + +static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj, + struct bnx2x_queue_state_params *params) +{ + enum bnx2x_queue_cmd cmd = params->cmd, bit; + + /* ACTIVATE and DEACTIVATE commands are implemented on top of + * UPDATE command. + */ + if ((cmd == BNX2X_Q_CMD_ACTIVATE) || + (cmd == BNX2X_Q_CMD_DEACTIVATE)) + bit = BNX2X_Q_CMD_UPDATE; + else + bit = cmd; + + set_bit(bit, &obj->pending); + return bit; +} + +static int bnx2x_queue_wait_comp(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *o, + enum bnx2x_queue_cmd cmd) +{ + return bnx2x_state_wait(bp, cmd, &o->pending); +} + +/** + * bnx2x_queue_comp_cmd - complete the state change command. + * + * @bp: device handle + * @o: + * @cmd: + * + * Checks that the arrived completion is expected. + */ +static int bnx2x_queue_comp_cmd(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *o, + enum bnx2x_queue_cmd cmd) +{ + unsigned long cur_pending = o->pending; + + if (!test_and_clear_bit(cmd, &cur_pending)) { + BNX2X_ERR("Bad MC reply %d for queue %d in state %d " + "pending 0x%lx, next_state %d\n", cmd, + o->cids[BNX2X_PRIMARY_CID_INDEX], + o->state, cur_pending, o->next_state); + return -EINVAL; + } + + if (o->next_tx_only >= o->max_cos) + /* >= becuase tx only must always be smaller than cos since the + * primary connection suports COS 0 + */ + BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d", + o->next_tx_only, o->max_cos); + + DP(BNX2X_MSG_SP, "Completing command %d for queue %d, " + "setting state to %d\n", cmd, + o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state); + + if (o->next_tx_only) /* print num tx-only if any exist */ + DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d", + o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only); + + o->state = o->next_state; + o->num_tx_only = o->next_tx_only; + o->next_state = BNX2X_Q_STATE_MAX; + + /* It's important that o->state and o->next_state are + * updated before o->pending. + */ + wmb(); + + clear_bit(cmd, &o->pending); + smp_mb__after_clear_bit(); + + return 0; +} + +static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp, + struct bnx2x_queue_state_params *cmd_params, + struct client_init_ramrod_data *data) +{ + struct bnx2x_queue_setup_params *params = &cmd_params->params.setup; + + /* Rx data */ + + /* IPv6 TPA supported for E2 and above only */ + data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, ¶ms->flags) * + CLIENT_INIT_RX_DATA_TPA_EN_IPV6; +} + +static void bnx2x_q_fill_init_general_data(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *o, + struct bnx2x_general_setup_params *params, + struct client_init_general_data *gen_data, + unsigned long *flags) +{ + gen_data->client_id = o->cl_id; + + if (test_bit(BNX2X_Q_FLG_STATS, flags)) { + gen_data->statistics_counter_id = + params->stat_id; + gen_data->statistics_en_flg = 1; + gen_data->statistics_zero_flg = + test_bit(BNX2X_Q_FLG_ZERO_STATS, flags); + } else + gen_data->statistics_counter_id = + DISABLE_STATISTIC_COUNTER_ID_VALUE; + + gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags); + gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags); + gen_data->sp_client_id = params->spcl_id; + gen_data->mtu = cpu_to_le16(params->mtu); + gen_data->func_id = o->func_id; + + + gen_data->cos = params->cos; + + gen_data->traffic_type = + test_bit(BNX2X_Q_FLG_FCOE, flags) ? + LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW; + + DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d", + gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg); +} + +static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o, + struct bnx2x_txq_setup_params *params, + struct client_init_tx_data *tx_data, + unsigned long *flags) +{ + tx_data->enforce_security_flg = + test_bit(BNX2X_Q_FLG_TX_SEC, flags); + tx_data->default_vlan = + cpu_to_le16(params->default_vlan); + tx_data->default_vlan_flg = + test_bit(BNX2X_Q_FLG_DEF_VLAN, flags); + tx_data->tx_switching_flg = + test_bit(BNX2X_Q_FLG_TX_SWITCH, flags); + tx_data->anti_spoofing_flg = + test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags); + tx_data->tx_status_block_id = params->fw_sb_id; + tx_data->tx_sb_index_number = params->sb_cq_index; + tx_data->tss_leading_client_id = params->tss_leading_cl_id; + + tx_data->tx_bd_page_base.lo = + cpu_to_le32(U64_LO(params->dscr_map)); + tx_data->tx_bd_page_base.hi = + cpu_to_le32(U64_HI(params->dscr_map)); + + /* Don't configure any Tx switching mode during queue SETUP */ + tx_data->state = 0; +} + +static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o, + struct rxq_pause_params *params, + struct client_init_rx_data *rx_data) +{ + /* flow control data */ + rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo); + rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi); + rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo); + rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi); + rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo); + rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi); + rx_data->rx_cos_mask = cpu_to_le16(params->pri_map); +} + +static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o, + struct bnx2x_rxq_setup_params *params, + struct client_init_rx_data *rx_data, + unsigned long *flags) +{ + /* Rx data */ + rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) * + CLIENT_INIT_RX_DATA_TPA_EN_IPV4; + rx_data->vmqueue_mode_en_flg = 0; + + rx_data->cache_line_alignment_log_size = + params->cache_line_log; + rx_data->enable_dynamic_hc = + test_bit(BNX2X_Q_FLG_DHC, flags); + rx_data->max_sges_for_packet = params->max_sges_pkt; + rx_data->client_qzone_id = params->cl_qzone_id; + rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz); + + /* Always start in DROP_ALL mode */ + rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL | + CLIENT_INIT_RX_DATA_MCAST_DROP_ALL); + + /* We don't set drop flags */ + rx_data->drop_ip_cs_err_flg = 0; + rx_data->drop_tcp_cs_err_flg = 0; + rx_data->drop_ttl0_flg = 0; + rx_data->drop_udp_cs_err_flg = 0; + rx_data->inner_vlan_removal_enable_flg = + test_bit(BNX2X_Q_FLG_VLAN, flags); + rx_data->outer_vlan_removal_enable_flg = + test_bit(BNX2X_Q_FLG_OV, flags); + rx_data->status_block_id = params->fw_sb_id; + rx_data->rx_sb_index_number = params->sb_cq_index; + rx_data->max_tpa_queues = params->max_tpa_queues; + rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz); + rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz); + rx_data->bd_page_base.lo = + cpu_to_le32(U64_LO(params->dscr_map)); + rx_data->bd_page_base.hi = + cpu_to_le32(U64_HI(params->dscr_map)); + rx_data->sge_page_base.lo = + cpu_to_le32(U64_LO(params->sge_map)); + rx_data->sge_page_base.hi = + cpu_to_le32(U64_HI(params->sge_map)); + rx_data->cqe_page_base.lo = + cpu_to_le32(U64_LO(params->rcq_map)); + rx_data->cqe_page_base.hi = + cpu_to_le32(U64_HI(params->rcq_map)); + rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags); + + if (test_bit(BNX2X_Q_FLG_MCAST, flags)) { + rx_data->approx_mcast_engine_id = o->func_id; + rx_data->is_approx_mcast = 1; + } + + rx_data->rss_engine_id = params->rss_engine_id; + + /* silent vlan removal */ + rx_data->silent_vlan_removal_flg = + test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags); + rx_data->silent_vlan_value = + cpu_to_le16(params->silent_removal_value); + rx_data->silent_vlan_mask = + cpu_to_le16(params->silent_removal_mask); + +} + +/* initialize the general, tx and rx parts of a queue object */ +static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp, + struct bnx2x_queue_state_params *cmd_params, + struct client_init_ramrod_data *data) +{ + bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj, + &cmd_params->params.setup.gen_params, + &data->general, + &cmd_params->params.setup.flags); + + bnx2x_q_fill_init_tx_data(cmd_params->q_obj, + &cmd_params->params.setup.txq_params, + &data->tx, + &cmd_params->params.setup.flags); + + bnx2x_q_fill_init_rx_data(cmd_params->q_obj, + &cmd_params->params.setup.rxq_params, + &data->rx, + &cmd_params->params.setup.flags); + + bnx2x_q_fill_init_pause_data(cmd_params->q_obj, + &cmd_params->params.setup.pause_params, + &data->rx); +} + +/* initialize the general and tx parts of a tx-only queue object */ +static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp, + struct bnx2x_queue_state_params *cmd_params, + struct tx_queue_init_ramrod_data *data) +{ + bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj, + &cmd_params->params.tx_only.gen_params, + &data->general, + &cmd_params->params.tx_only.flags); + + bnx2x_q_fill_init_tx_data(cmd_params->q_obj, + &cmd_params->params.tx_only.txq_params, + &data->tx, + &cmd_params->params.tx_only.flags); + + DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",cmd_params->q_obj->cids[0], + data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi); +} + +/** + * bnx2x_q_init - init HW/FW queue + * + * @bp: device handle + * @params: + * + * HW/FW initial Queue configuration: + * - HC: Rx and Tx + * - CDU context validation + * + */ +static inline int bnx2x_q_init(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_sp_obj *o = params->q_obj; + struct bnx2x_queue_init_params *init = ¶ms->params.init; + u16 hc_usec; + u8 cos; + + /* Tx HC configuration */ + if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) && + test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) { + hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0; + + bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id, + init->tx.sb_cq_index, + !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags), + hc_usec); + } + + /* Rx HC configuration */ + if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) && + test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) { + hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0; + + bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id, + init->rx.sb_cq_index, + !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags), + hc_usec); + } + + /* Set CDU context validation values */ + for (cos = 0; cos < o->max_cos; cos++) { + DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d", + o->cids[cos], cos); + DP(BNX2X_MSG_SP, "context pointer %p", init->cxts[cos]); + bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]); + } + + /* As no ramrod is sent, complete the command immediately */ + o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT); + + mmiowb(); + smp_mb(); + + return 0; +} + +static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_sp_obj *o = params->q_obj; + struct client_init_ramrod_data *rdata = + (struct client_init_ramrod_data *)o->rdata; + dma_addr_t data_mapping = o->rdata_mapping; + int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; + + /* Clear the ramrod data */ + memset(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data */ + bnx2x_q_fill_setup_data_cmn(bp, params, rdata); + + /* + * No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). + */ + + return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], + U64_HI(data_mapping), + U64_LO(data_mapping), ETH_CONNECTION_TYPE); +} + +static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_sp_obj *o = params->q_obj; + struct client_init_ramrod_data *rdata = + (struct client_init_ramrod_data *)o->rdata; + dma_addr_t data_mapping = o->rdata_mapping; + int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; + + /* Clear the ramrod data */ + memset(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data */ + bnx2x_q_fill_setup_data_cmn(bp, params, rdata); + bnx2x_q_fill_setup_data_e2(bp, params, rdata); + + /* + * No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). + */ + + return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], + U64_HI(data_mapping), + U64_LO(data_mapping), ETH_CONNECTION_TYPE); +} + +static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_sp_obj *o = params->q_obj; + struct tx_queue_init_ramrod_data *rdata = + (struct tx_queue_init_ramrod_data *)o->rdata; + dma_addr_t data_mapping = o->rdata_mapping; + int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP; + struct bnx2x_queue_setup_tx_only_params *tx_only_params = + ¶ms->params.tx_only; + u8 cid_index = tx_only_params->cid_index; + + + if (cid_index >= o->max_cos) { + BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", + o->cl_id, cid_index); + return -EINVAL; + } + + DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d", + tx_only_params->gen_params.cos, + tx_only_params->gen_params.spcl_id); + + /* Clear the ramrod data */ + memset(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data */ + bnx2x_q_fill_setup_tx_only(bp, params, rdata); + + DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d," + "sp-client id %d, cos %d", + o->cids[cid_index], + rdata->general.client_id, + rdata->general.sp_client_id, rdata->general.cos); + + /* + * No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). + */ + + return bnx2x_sp_post(bp, ramrod, o->cids[cid_index], + U64_HI(data_mapping), + U64_LO(data_mapping), ETH_CONNECTION_TYPE); +} + +static void bnx2x_q_fill_update_data(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *obj, + struct bnx2x_queue_update_params *params, + struct client_update_ramrod_data *data) +{ + /* Client ID of the client to update */ + data->client_id = obj->cl_id; + + /* Function ID of the client to update */ + data->func_id = obj->func_id; + + /* Default VLAN value */ + data->default_vlan = cpu_to_le16(params->def_vlan); + + /* Inner VLAN stripping */ + data->inner_vlan_removal_enable_flg = + test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, ¶ms->update_flags); + data->inner_vlan_removal_change_flg = + test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG, + ¶ms->update_flags); + + /* Outer VLAN sripping */ + data->outer_vlan_removal_enable_flg = + test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags); + data->outer_vlan_removal_change_flg = + test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG, + ¶ms->update_flags); + + /* Drop packets that have source MAC that doesn't belong to this + * Queue. + */ + data->anti_spoofing_enable_flg = + test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, ¶ms->update_flags); + data->anti_spoofing_change_flg = + test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, ¶ms->update_flags); + + /* Activate/Deactivate */ + data->activate_flg = + test_bit(BNX2X_Q_UPDATE_ACTIVATE, ¶ms->update_flags); + data->activate_change_flg = + test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, ¶ms->update_flags); + + /* Enable default VLAN */ + data->default_vlan_enable_flg = + test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, ¶ms->update_flags); + data->default_vlan_change_flg = + test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, + ¶ms->update_flags); + + /* silent vlan removal */ + data->silent_vlan_change_flg = + test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, + ¶ms->update_flags); + data->silent_vlan_removal_flg = + test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, ¶ms->update_flags); + data->silent_vlan_value = cpu_to_le16(params->silent_removal_value); + data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask); +} + +static inline int bnx2x_q_send_update(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_sp_obj *o = params->q_obj; + struct client_update_ramrod_data *rdata = + (struct client_update_ramrod_data *)o->rdata; + dma_addr_t data_mapping = o->rdata_mapping; + struct bnx2x_queue_update_params *update_params = + ¶ms->params.update; + u8 cid_index = update_params->cid_index; + + if (cid_index >= o->max_cos) { + BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", + o->cl_id, cid_index); + return -EINVAL; + } + + + /* Clear the ramrod data */ + memset(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data */ + bnx2x_q_fill_update_data(bp, o, update_params, rdata); + + /* + * No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). + */ + + return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, + o->cids[cid_index], U64_HI(data_mapping), + U64_LO(data_mapping), ETH_CONNECTION_TYPE); +} + +/** + * bnx2x_q_send_deactivate - send DEACTIVATE command + * + * @bp: device handle + * @params: + * + * implemented using the UPDATE command. + */ +static inline int bnx2x_q_send_deactivate(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_update_params *update = ¶ms->params.update; + + memset(update, 0, sizeof(*update)); + + __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); + + return bnx2x_q_send_update(bp, params); +} + +/** + * bnx2x_q_send_activate - send ACTIVATE command + * + * @bp: device handle + * @params: + * + * implemented using the UPDATE command. + */ +static inline int bnx2x_q_send_activate(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_update_params *update = ¶ms->params.update; + + memset(update, 0, sizeof(*update)); + + __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags); + __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); + + return bnx2x_q_send_update(bp, params); +} + +static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + /* TODO: Not implemented yet. */ + return -1; +} + +static inline int bnx2x_q_send_halt(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_sp_obj *o = params->q_obj; + + return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, + o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id, + ETH_CONNECTION_TYPE); +} + +static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_sp_obj *o = params->q_obj; + u8 cid_idx = params->params.cfc_del.cid_index; + + if (cid_idx >= o->max_cos) { + BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", + o->cl_id, cid_idx); + return -EINVAL; + } + + return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, + o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE); +} + +static inline int bnx2x_q_send_terminate(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_sp_obj *o = params->q_obj; + u8 cid_index = params->params.terminate.cid_index; + + if (cid_index >= o->max_cos) { + BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", + o->cl_id, cid_index); + return -EINVAL; + } + + return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, + o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE); +} + +static inline int bnx2x_q_send_empty(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_sp_obj *o = params->q_obj; + + return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY, + o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0, + ETH_CONNECTION_TYPE); +} + +static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + switch (params->cmd) { + case BNX2X_Q_CMD_INIT: + return bnx2x_q_init(bp, params); + case BNX2X_Q_CMD_SETUP_TX_ONLY: + return bnx2x_q_send_setup_tx_only(bp, params); + case BNX2X_Q_CMD_DEACTIVATE: + return bnx2x_q_send_deactivate(bp, params); + case BNX2X_Q_CMD_ACTIVATE: + return bnx2x_q_send_activate(bp, params); + case BNX2X_Q_CMD_UPDATE: + return bnx2x_q_send_update(bp, params); + case BNX2X_Q_CMD_UPDATE_TPA: + return bnx2x_q_send_update_tpa(bp, params); + case BNX2X_Q_CMD_HALT: + return bnx2x_q_send_halt(bp, params); + case BNX2X_Q_CMD_CFC_DEL: + return bnx2x_q_send_cfc_del(bp, params); + case BNX2X_Q_CMD_TERMINATE: + return bnx2x_q_send_terminate(bp, params); + case BNX2X_Q_CMD_EMPTY: + return bnx2x_q_send_empty(bp, params); + default: + BNX2X_ERR("Unknown command: %d\n", params->cmd); + return -EINVAL; + } +} + +static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + switch (params->cmd) { + case BNX2X_Q_CMD_SETUP: + return bnx2x_q_send_setup_e1x(bp, params); + case BNX2X_Q_CMD_INIT: + case BNX2X_Q_CMD_SETUP_TX_ONLY: + case BNX2X_Q_CMD_DEACTIVATE: + case BNX2X_Q_CMD_ACTIVATE: + case BNX2X_Q_CMD_UPDATE: + case BNX2X_Q_CMD_UPDATE_TPA: + case BNX2X_Q_CMD_HALT: + case BNX2X_Q_CMD_CFC_DEL: + case BNX2X_Q_CMD_TERMINATE: + case BNX2X_Q_CMD_EMPTY: + return bnx2x_queue_send_cmd_cmn(bp, params); + default: + BNX2X_ERR("Unknown command: %d\n", params->cmd); + return -EINVAL; + } +} + +static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + switch (params->cmd) { + case BNX2X_Q_CMD_SETUP: + return bnx2x_q_send_setup_e2(bp, params); + case BNX2X_Q_CMD_INIT: + case BNX2X_Q_CMD_SETUP_TX_ONLY: + case BNX2X_Q_CMD_DEACTIVATE: + case BNX2X_Q_CMD_ACTIVATE: + case BNX2X_Q_CMD_UPDATE: + case BNX2X_Q_CMD_UPDATE_TPA: + case BNX2X_Q_CMD_HALT: + case BNX2X_Q_CMD_CFC_DEL: + case BNX2X_Q_CMD_TERMINATE: + case BNX2X_Q_CMD_EMPTY: + return bnx2x_queue_send_cmd_cmn(bp, params); + default: + BNX2X_ERR("Unknown command: %d\n", params->cmd); + return -EINVAL; + } +} + +/** + * bnx2x_queue_chk_transition - check state machine of a regular Queue + * + * @bp: device handle + * @o: + * @params: + * + * (not Forwarding) + * It both checks if the requested command is legal in a current + * state and, if it's legal, sets a `next_state' in the object + * that will be used in the completion flow to set the `state' + * of the object. + * + * returns 0 if a requested command is a legal transition, + * -EINVAL otherwise. + */ +static int bnx2x_queue_chk_transition(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *o, + struct bnx2x_queue_state_params *params) +{ + enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX; + enum bnx2x_queue_cmd cmd = params->cmd; + struct bnx2x_queue_update_params *update_params = + ¶ms->params.update; + u8 next_tx_only = o->num_tx_only; + + /* + * Forget all pending for completion commands if a driver only state + * transition has been requested. + */ + if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { + o->pending = 0; + o->next_state = BNX2X_Q_STATE_MAX; + } + + /* + * Don't allow a next state transition if we are in the middle of + * the previous one. + */ + if (o->pending) + return -EBUSY; + + switch (state) { + case BNX2X_Q_STATE_RESET: + if (cmd == BNX2X_Q_CMD_INIT) + next_state = BNX2X_Q_STATE_INITIALIZED; + + break; + case BNX2X_Q_STATE_INITIALIZED: + if (cmd == BNX2X_Q_CMD_SETUP) { + if (test_bit(BNX2X_Q_FLG_ACTIVE, + ¶ms->params.setup.flags)) + next_state = BNX2X_Q_STATE_ACTIVE; + else + next_state = BNX2X_Q_STATE_INACTIVE; + } + + break; + case BNX2X_Q_STATE_ACTIVE: + if (cmd == BNX2X_Q_CMD_DEACTIVATE) + next_state = BNX2X_Q_STATE_INACTIVE; + + else if ((cmd == BNX2X_Q_CMD_EMPTY) || + (cmd == BNX2X_Q_CMD_UPDATE_TPA)) + next_state = BNX2X_Q_STATE_ACTIVE; + + else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) { + next_state = BNX2X_Q_STATE_MULTI_COS; + next_tx_only = 1; + } + + else if (cmd == BNX2X_Q_CMD_HALT) + next_state = BNX2X_Q_STATE_STOPPED; + + else if (cmd == BNX2X_Q_CMD_UPDATE) { + /* If "active" state change is requested, update the + * state accordingly. + */ + if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, + &update_params->update_flags) && + !test_bit(BNX2X_Q_UPDATE_ACTIVATE, + &update_params->update_flags)) + next_state = BNX2X_Q_STATE_INACTIVE; + else + next_state = BNX2X_Q_STATE_ACTIVE; + } + + break; + case BNX2X_Q_STATE_MULTI_COS: + if (cmd == BNX2X_Q_CMD_TERMINATE) + next_state = BNX2X_Q_STATE_MCOS_TERMINATED; + + else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) { + next_state = BNX2X_Q_STATE_MULTI_COS; + next_tx_only = o->num_tx_only + 1; + } + + else if ((cmd == BNX2X_Q_CMD_EMPTY) || + (cmd == BNX2X_Q_CMD_UPDATE_TPA)) + next_state = BNX2X_Q_STATE_MULTI_COS; + + else if (cmd == BNX2X_Q_CMD_UPDATE) { + /* If "active" state change is requested, update the + * state accordingly. + */ + if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, + &update_params->update_flags) && + !test_bit(BNX2X_Q_UPDATE_ACTIVATE, + &update_params->update_flags)) + next_state = BNX2X_Q_STATE_INACTIVE; + else + next_state = BNX2X_Q_STATE_MULTI_COS; + } + + break; + case BNX2X_Q_STATE_MCOS_TERMINATED: + if (cmd == BNX2X_Q_CMD_CFC_DEL) { + next_tx_only = o->num_tx_only - 1; + if (next_tx_only == 0) + next_state = BNX2X_Q_STATE_ACTIVE; + else + next_state = BNX2X_Q_STATE_MULTI_COS; + } + + break; + case BNX2X_Q_STATE_INACTIVE: + if (cmd == BNX2X_Q_CMD_ACTIVATE) + next_state = BNX2X_Q_STATE_ACTIVE; + + else if ((cmd == BNX2X_Q_CMD_EMPTY) || + (cmd == BNX2X_Q_CMD_UPDATE_TPA)) + next_state = BNX2X_Q_STATE_INACTIVE; + + else if (cmd == BNX2X_Q_CMD_HALT) + next_state = BNX2X_Q_STATE_STOPPED; + + else if (cmd == BNX2X_Q_CMD_UPDATE) { + /* If "active" state change is requested, update the + * state accordingly. + */ + if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, + &update_params->update_flags) && + test_bit(BNX2X_Q_UPDATE_ACTIVATE, + &update_params->update_flags)){ + if (o->num_tx_only == 0) + next_state = BNX2X_Q_STATE_ACTIVE; + else /* tx only queues exist for this queue */ + next_state = BNX2X_Q_STATE_MULTI_COS; + } else + next_state = BNX2X_Q_STATE_INACTIVE; + } + + break; + case BNX2X_Q_STATE_STOPPED: + if (cmd == BNX2X_Q_CMD_TERMINATE) + next_state = BNX2X_Q_STATE_TERMINATED; + + break; + case BNX2X_Q_STATE_TERMINATED: + if (cmd == BNX2X_Q_CMD_CFC_DEL) + next_state = BNX2X_Q_STATE_RESET; + + break; + default: + BNX2X_ERR("Illegal state: %d\n", state); + } + + /* Transition is assured */ + if (next_state != BNX2X_Q_STATE_MAX) { + DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n", + state, cmd, next_state); + o->next_state = next_state; + o->next_tx_only = next_tx_only; + return 0; + } + + DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd); + + return -EINVAL; +} + +void bnx2x_init_queue_obj(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *obj, + u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id, + void *rdata, + dma_addr_t rdata_mapping, unsigned long type) +{ + memset(obj, 0, sizeof(*obj)); + + /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */ + BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt); + + memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt); + obj->max_cos = cid_cnt; + obj->cl_id = cl_id; + obj->func_id = func_id; + obj->rdata = rdata; + obj->rdata_mapping = rdata_mapping; + obj->type = type; + obj->next_state = BNX2X_Q_STATE_MAX; + + if (CHIP_IS_E1x(bp)) + obj->send_cmd = bnx2x_queue_send_cmd_e1x; + else + obj->send_cmd = bnx2x_queue_send_cmd_e2; + + obj->check_transition = bnx2x_queue_chk_transition; + + obj->complete_cmd = bnx2x_queue_comp_cmd; + obj->wait_comp = bnx2x_queue_wait_comp; + obj->set_pending = bnx2x_queue_set_pending; +} + +void bnx2x_queue_set_cos_cid(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *obj, + u32 cid, u8 index) +{ + obj->cids[index] = cid; +} + +/********************** Function state object *********************************/ +enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp, + struct bnx2x_func_sp_obj *o) +{ + /* in the middle of transaction - return INVALID state */ + if (o->pending) + return BNX2X_F_STATE_MAX; + + /* + * unsure the order of reading of o->pending and o->state + * o->pending should be read first + */ + rmb(); + + return o->state; +} + +static int bnx2x_func_wait_comp(struct bnx2x *bp, + struct bnx2x_func_sp_obj *o, + enum bnx2x_func_cmd cmd) +{ + return bnx2x_state_wait(bp, cmd, &o->pending); +} + +/** + * bnx2x_func_state_change_comp - complete the state machine transition + * + * @bp: device handle + * @o: + * @cmd: + * + * Called on state change transition. Completes the state + * machine transition only - no HW interaction. + */ +static inline int bnx2x_func_state_change_comp(struct bnx2x *bp, + struct bnx2x_func_sp_obj *o, + enum bnx2x_func_cmd cmd) +{ + unsigned long cur_pending = o->pending; + + if (!test_and_clear_bit(cmd, &cur_pending)) { + BNX2X_ERR("Bad MC reply %d for func %d in state %d " + "pending 0x%lx, next_state %d\n", cmd, BP_FUNC(bp), + o->state, cur_pending, o->next_state); + return -EINVAL; + } + + DP(BNX2X_MSG_SP, "Completing command %d for func %d, setting state to " + "%d\n", cmd, BP_FUNC(bp), o->next_state); + + o->state = o->next_state; + o->next_state = BNX2X_F_STATE_MAX; + + /* It's important that o->state and o->next_state are + * updated before o->pending. + */ + wmb(); + + clear_bit(cmd, &o->pending); + smp_mb__after_clear_bit(); + + return 0; +} + +/** + * bnx2x_func_comp_cmd - complete the state change command + * + * @bp: device handle + * @o: + * @cmd: + * + * Checks that the arrived completion is expected. + */ +static int bnx2x_func_comp_cmd(struct bnx2x *bp, + struct bnx2x_func_sp_obj *o, + enum bnx2x_func_cmd cmd) +{ + /* Complete the state machine part first, check if it's a + * legal completion. + */ + int rc = bnx2x_func_state_change_comp(bp, o, cmd); + return rc; +} + +/** + * bnx2x_func_chk_transition - perform function state machine transition + * + * @bp: device handle + * @o: + * @params: + * + * It both checks if the requested command is legal in a current + * state and, if it's legal, sets a `next_state' in the object + * that will be used in the completion flow to set the `state' + * of the object. + * + * returns 0 if a requested command is a legal transition, + * -EINVAL otherwise. + */ +static int bnx2x_func_chk_transition(struct bnx2x *bp, + struct bnx2x_func_sp_obj *o, + struct bnx2x_func_state_params *params) +{ + enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX; + enum bnx2x_func_cmd cmd = params->cmd; + + /* + * Forget all pending for completion commands if a driver only state + * transition has been requested. + */ + if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { + o->pending = 0; + o->next_state = BNX2X_F_STATE_MAX; + } + + /* + * Don't allow a next state transition if we are in the middle of + * the previous one. + */ + if (o->pending) + return -EBUSY; + + switch (state) { + case BNX2X_F_STATE_RESET: + if (cmd == BNX2X_F_CMD_HW_INIT) + next_state = BNX2X_F_STATE_INITIALIZED; + + break; + case BNX2X_F_STATE_INITIALIZED: + if (cmd == BNX2X_F_CMD_START) + next_state = BNX2X_F_STATE_STARTED; + + else if (cmd == BNX2X_F_CMD_HW_RESET) + next_state = BNX2X_F_STATE_RESET; + + break; + case BNX2X_F_STATE_STARTED: + if (cmd == BNX2X_F_CMD_STOP) + next_state = BNX2X_F_STATE_INITIALIZED; + else if (cmd == BNX2X_F_CMD_TX_STOP) + next_state = BNX2X_F_STATE_TX_STOPPED; + + break; + case BNX2X_F_STATE_TX_STOPPED: + if (cmd == BNX2X_F_CMD_TX_START) + next_state = BNX2X_F_STATE_STARTED; + + break; + default: + BNX2X_ERR("Unknown state: %d\n", state); + } + + /* Transition is assured */ + if (next_state != BNX2X_F_STATE_MAX) { + DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n", + state, cmd, next_state); + o->next_state = next_state; + return 0; + } + + DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n", + state, cmd); + + return -EINVAL; +} + +/** + * bnx2x_func_init_func - performs HW init at function stage + * + * @bp: device handle + * @drv: + * + * Init HW when the current phase is + * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only + * HW blocks. + */ +static inline int bnx2x_func_init_func(struct bnx2x *bp, + const struct bnx2x_func_sp_drv_ops *drv) +{ + return drv->init_hw_func(bp); +} + +/** + * bnx2x_func_init_port - performs HW init at port stage + * + * @bp: device handle + * @drv: + * + * Init HW when the current phase is + * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and + * FUNCTION-only HW blocks. + * + */ +static inline int bnx2x_func_init_port(struct bnx2x *bp, + const struct bnx2x_func_sp_drv_ops *drv) +{ + int rc = drv->init_hw_port(bp); + if (rc) + return rc; + + return bnx2x_func_init_func(bp, drv); +} + +/** + * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage + * + * @bp: device handle + * @drv: + * + * Init HW when the current phase is + * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP, + * PORT-only and FUNCTION-only HW blocks. + */ +static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp, + const struct bnx2x_func_sp_drv_ops *drv) +{ + int rc = drv->init_hw_cmn_chip(bp); + if (rc) + return rc; + + return bnx2x_func_init_port(bp, drv); +} + +/** + * bnx2x_func_init_cmn - performs HW init at common stage + * + * @bp: device handle + * @drv: + * + * Init HW when the current phase is + * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON, + * PORT-only and FUNCTION-only HW blocks. + */ +static inline int bnx2x_func_init_cmn(struct bnx2x *bp, + const struct bnx2x_func_sp_drv_ops *drv) +{ + int rc = drv->init_hw_cmn(bp); + if (rc) + return rc; + + return bnx2x_func_init_port(bp, drv); +} + +static int bnx2x_func_hw_init(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + u32 load_code = params->params.hw_init.load_phase; + struct bnx2x_func_sp_obj *o = params->f_obj; + const struct bnx2x_func_sp_drv_ops *drv = o->drv; + int rc = 0; + + DP(BNX2X_MSG_SP, "function %d load_code %x\n", + BP_ABS_FUNC(bp), load_code); + + /* Prepare buffers for unzipping the FW */ + rc = drv->gunzip_init(bp); + if (rc) + return rc; + + /* Prepare FW */ + rc = drv->init_fw(bp); + if (rc) { + BNX2X_ERR("Error loading firmware\n"); + goto fw_init_err; + } + + /* Handle the beginning of COMMON_XXX pases separatelly... */ + switch (load_code) { + case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: + rc = bnx2x_func_init_cmn_chip(bp, drv); + if (rc) + goto init_hw_err; + + break; + case FW_MSG_CODE_DRV_LOAD_COMMON: + rc = bnx2x_func_init_cmn(bp, drv); + if (rc) + goto init_hw_err; + + break; + case FW_MSG_CODE_DRV_LOAD_PORT: + rc = bnx2x_func_init_port(bp, drv); + if (rc) + goto init_hw_err; + + break; + case FW_MSG_CODE_DRV_LOAD_FUNCTION: + rc = bnx2x_func_init_func(bp, drv); + if (rc) + goto init_hw_err; + + break; + default: + BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); + rc = -EINVAL; + } + +init_hw_err: + drv->release_fw(bp); + +fw_init_err: + drv->gunzip_end(bp); + + /* In case of success, complete the comand immediatelly: no ramrods + * have been sent. + */ + if (!rc) + o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT); + + return rc; +} + +/** + * bnx2x_func_reset_func - reset HW at function stage + * + * @bp: device handle + * @drv: + * + * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only + * FUNCTION-only HW blocks. + */ +static inline void bnx2x_func_reset_func(struct bnx2x *bp, + const struct bnx2x_func_sp_drv_ops *drv) +{ + drv->reset_hw_func(bp); +} + +/** + * bnx2x_func_reset_port - reser HW at port stage + * + * @bp: device handle + * @drv: + * + * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset + * FUNCTION-only and PORT-only HW blocks. + * + * !!!IMPORTANT!!! + * + * It's important to call reset_port before reset_func() as the last thing + * reset_func does is pf_disable() thus disabling PGLUE_B, which + * makes impossible any DMAE transactions. + */ +static inline void bnx2x_func_reset_port(struct bnx2x *bp, + const struct bnx2x_func_sp_drv_ops *drv) +{ + drv->reset_hw_port(bp); + bnx2x_func_reset_func(bp, drv); +} + +/** + * bnx2x_func_reset_cmn - reser HW at common stage + * + * @bp: device handle + * @drv: + * + * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and + * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON, + * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks. + */ +static inline void bnx2x_func_reset_cmn(struct bnx2x *bp, + const struct bnx2x_func_sp_drv_ops *drv) +{ + bnx2x_func_reset_port(bp, drv); + drv->reset_hw_cmn(bp); +} + + +static inline int bnx2x_func_hw_reset(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + u32 reset_phase = params->params.hw_reset.reset_phase; + struct bnx2x_func_sp_obj *o = params->f_obj; + const struct bnx2x_func_sp_drv_ops *drv = o->drv; + + DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp), + reset_phase); + + switch (reset_phase) { + case FW_MSG_CODE_DRV_UNLOAD_COMMON: + bnx2x_func_reset_cmn(bp, drv); + break; + case FW_MSG_CODE_DRV_UNLOAD_PORT: + bnx2x_func_reset_port(bp, drv); + break; + case FW_MSG_CODE_DRV_UNLOAD_FUNCTION: + bnx2x_func_reset_func(bp, drv); + break; + default: + BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n", + reset_phase); + break; + } + + /* Complete the comand immediatelly: no ramrods have been sent. */ + o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET); + + return 0; +} + +static inline int bnx2x_func_send_start(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + struct bnx2x_func_sp_obj *o = params->f_obj; + struct function_start_data *rdata = + (struct function_start_data *)o->rdata; + dma_addr_t data_mapping = o->rdata_mapping; + struct bnx2x_func_start_params *start_params = ¶ms->params.start; + + memset(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data with provided parameters */ + rdata->function_mode = cpu_to_le16(start_params->mf_mode); + rdata->sd_vlan_tag = start_params->sd_vlan_tag; + rdata->path_id = BP_PATH(bp); + rdata->network_cos_mode = start_params->network_cos_mode; + + /* + * No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). + */ + + return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, + U64_HI(data_mapping), + U64_LO(data_mapping), NONE_CONNECTION_TYPE); +} + +static inline int bnx2x_func_send_stop(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, + NONE_CONNECTION_TYPE); +} + +static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0, + NONE_CONNECTION_TYPE); +} +static inline int bnx2x_func_send_tx_start(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + struct bnx2x_func_sp_obj *o = params->f_obj; + struct flow_control_configuration *rdata = + (struct flow_control_configuration *)o->rdata; + dma_addr_t data_mapping = o->rdata_mapping; + struct bnx2x_func_tx_start_params *tx_start_params = + ¶ms->params.tx_start; + int i; + + memset(rdata, 0, sizeof(*rdata)); + + rdata->dcb_enabled = tx_start_params->dcb_enabled; + rdata->dcb_version = tx_start_params->dcb_version; + rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en; + + for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++) + rdata->traffic_type_to_priority_cos[i] = + tx_start_params->traffic_type_to_priority_cos[i]; + + return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0, + U64_HI(data_mapping), + U64_LO(data_mapping), NONE_CONNECTION_TYPE); +} + +static int bnx2x_func_send_cmd(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + switch (params->cmd) { + case BNX2X_F_CMD_HW_INIT: + return bnx2x_func_hw_init(bp, params); + case BNX2X_F_CMD_START: + return bnx2x_func_send_start(bp, params); + case BNX2X_F_CMD_STOP: + return bnx2x_func_send_stop(bp, params); + case BNX2X_F_CMD_HW_RESET: + return bnx2x_func_hw_reset(bp, params); + case BNX2X_F_CMD_TX_STOP: + return bnx2x_func_send_tx_stop(bp, params); + case BNX2X_F_CMD_TX_START: + return bnx2x_func_send_tx_start(bp, params); + default: + BNX2X_ERR("Unknown command: %d\n", params->cmd); + return -EINVAL; + } +} + +void bnx2x_init_func_obj(struct bnx2x *bp, + struct bnx2x_func_sp_obj *obj, + void *rdata, dma_addr_t rdata_mapping, + struct bnx2x_func_sp_drv_ops *drv_iface) +{ + memset(obj, 0, sizeof(*obj)); + + mutex_init(&obj->one_pending_mutex); + + obj->rdata = rdata; + obj->rdata_mapping = rdata_mapping; + + obj->send_cmd = bnx2x_func_send_cmd; + obj->check_transition = bnx2x_func_chk_transition; + obj->complete_cmd = bnx2x_func_comp_cmd; + obj->wait_comp = bnx2x_func_wait_comp; + + obj->drv = drv_iface; +} + +/** + * bnx2x_func_state_change - perform Function state change transition + * + * @bp: device handle + * @params: parameters to perform the transaction + * + * returns 0 in case of successfully completed transition, + * negative error code in case of failure, positive + * (EBUSY) value if there is a completion to that is + * still pending (possible only if RAMROD_COMP_WAIT is + * not set in params->ramrod_flags for asynchronous + * commands). + */ +int bnx2x_func_state_change(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + struct bnx2x_func_sp_obj *o = params->f_obj; + int rc; + enum bnx2x_func_cmd cmd = params->cmd; + unsigned long *pending = &o->pending; + + mutex_lock(&o->one_pending_mutex); + + /* Check that the requested transition is legal */ + if (o->check_transition(bp, o, params)) { + mutex_unlock(&o->one_pending_mutex); + return -EINVAL; + } + + /* Set "pending" bit */ + set_bit(cmd, pending); + + /* Don't send a command if only driver cleanup was requested */ + if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { + bnx2x_func_state_change_comp(bp, o, cmd); + mutex_unlock(&o->one_pending_mutex); + } else { + /* Send a ramrod */ + rc = o->send_cmd(bp, params); + + mutex_unlock(&o->one_pending_mutex); + + if (rc) { + o->next_state = BNX2X_F_STATE_MAX; + clear_bit(cmd, pending); + smp_mb__after_clear_bit(); + return rc; + } + + if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { + rc = o->wait_comp(bp, o, cmd); + if (rc) + return rc; + + return 0; + } + } + + return !!test_bit(cmd, pending); +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h new file mode 100644 index 0000000..9a517c2 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -0,0 +1,1297 @@ +/* bnx2x_sp.h: Broadcom Everest network driver. + * + * Copyright 2011 Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2, available + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a + * license other than the GPL, without Broadcom's express prior written + * consent. + * + * Maintained by: Eilon Greenstein + * Written by: Vladislav Zolotarov + * + */ +#ifndef BNX2X_SP_VERBS +#define BNX2X_SP_VERBS + +struct bnx2x; +struct eth_context; + +/* Bits representing general command's configuration */ +enum { + RAMROD_TX, + RAMROD_RX, + /* Wait until all pending commands complete */ + RAMROD_COMP_WAIT, + /* Don't send a ramrod, only update a registry */ + RAMROD_DRV_CLR_ONLY, + /* Configure HW according to the current object state */ + RAMROD_RESTORE, + /* Execute the next command now */ + RAMROD_EXEC, + /* + * Don't add a new command and continue execution of posponed + * commands. If not set a new command will be added to the + * pending commands list. + */ + RAMROD_CONT, +}; + +typedef enum { + BNX2X_OBJ_TYPE_RX, + BNX2X_OBJ_TYPE_TX, + BNX2X_OBJ_TYPE_RX_TX, +} bnx2x_obj_type; + +/* Filtering states */ +enum { + BNX2X_FILTER_MAC_PENDING, + BNX2X_FILTER_VLAN_PENDING, + BNX2X_FILTER_VLAN_MAC_PENDING, + BNX2X_FILTER_RX_MODE_PENDING, + BNX2X_FILTER_RX_MODE_SCHED, + BNX2X_FILTER_ISCSI_ETH_START_SCHED, + BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, + BNX2X_FILTER_FCOE_ETH_START_SCHED, + BNX2X_FILTER_FCOE_ETH_STOP_SCHED, + BNX2X_FILTER_MCAST_PENDING, + BNX2X_FILTER_MCAST_SCHED, + BNX2X_FILTER_RSS_CONF_PENDING, +}; + +struct bnx2x_raw_obj { + u8 func_id; + + /* Queue params */ + u8 cl_id; + u32 cid; + + /* Ramrod data buffer params */ + void *rdata; + dma_addr_t rdata_mapping; + + /* Ramrod state params */ + int state; /* "ramrod is pending" state bit */ + unsigned long *pstate; /* pointer to state buffer */ + + bnx2x_obj_type obj_type; + + int (*wait_comp)(struct bnx2x *bp, + struct bnx2x_raw_obj *o); + + bool (*check_pending)(struct bnx2x_raw_obj *o); + void (*clear_pending)(struct bnx2x_raw_obj *o); + void (*set_pending)(struct bnx2x_raw_obj *o); +}; + +/************************* VLAN-MAC commands related parameters ***************/ +struct bnx2x_mac_ramrod_data { + u8 mac[ETH_ALEN]; +}; + +struct bnx2x_vlan_ramrod_data { + u16 vlan; +}; + +struct bnx2x_vlan_mac_ramrod_data { + u8 mac[ETH_ALEN]; + u16 vlan; +}; + +union bnx2x_classification_ramrod_data { + struct bnx2x_mac_ramrod_data mac; + struct bnx2x_vlan_ramrod_data vlan; + struct bnx2x_vlan_mac_ramrod_data vlan_mac; +}; + +/* VLAN_MAC commands */ +enum bnx2x_vlan_mac_cmd { + BNX2X_VLAN_MAC_ADD, + BNX2X_VLAN_MAC_DEL, + BNX2X_VLAN_MAC_MOVE, +}; + +struct bnx2x_vlan_mac_data { + /* Requested command: BNX2X_VLAN_MAC_XX */ + enum bnx2x_vlan_mac_cmd cmd; + /* + * used to contain the data related vlan_mac_flags bits from + * ramrod parameters. + */ + unsigned long vlan_mac_flags; + + /* Needed for MOVE command */ + struct bnx2x_vlan_mac_obj *target_obj; + + union bnx2x_classification_ramrod_data u; +}; + +/*************************** Exe Queue obj ************************************/ +union bnx2x_exe_queue_cmd_data { + struct bnx2x_vlan_mac_data vlan_mac; + + struct { + /* TODO */ + } mcast; +}; + +struct bnx2x_exeq_elem { + struct list_head link; + + /* Length of this element in the exe_chunk. */ + int cmd_len; + + union bnx2x_exe_queue_cmd_data cmd_data; +}; + +union bnx2x_qable_obj; + +union bnx2x_exeq_comp_elem { + union event_ring_elem *elem; +}; + +struct bnx2x_exe_queue_obj; + +typedef int (*exe_q_validate)(struct bnx2x *bp, + union bnx2x_qable_obj *o, + struct bnx2x_exeq_elem *elem); + +/** + * @return positive is entry was optimized, 0 - if not, negative + * in case of an error. + */ +typedef int (*exe_q_optimize)(struct bnx2x *bp, + union bnx2x_qable_obj *o, + struct bnx2x_exeq_elem *elem); +typedef int (*exe_q_execute)(struct bnx2x *bp, + union bnx2x_qable_obj *o, + struct list_head *exe_chunk, + unsigned long *ramrod_flags); +typedef struct bnx2x_exeq_elem * + (*exe_q_get)(struct bnx2x_exe_queue_obj *o, + struct bnx2x_exeq_elem *elem); + +struct bnx2x_exe_queue_obj { + /* + * Commands pending for an execution. + */ + struct list_head exe_queue; + + /* + * Commands pending for an completion. + */ + struct list_head pending_comp; + + spinlock_t lock; + + /* Maximum length of commands' list for one execution */ + int exe_chunk_len; + + union bnx2x_qable_obj *owner; + + /****** Virtual functions ******/ + /** + * Called before commands execution for commands that are really + * going to be executed (after 'optimize'). + * + * Must run under exe_queue->lock + */ + exe_q_validate validate; + + + /** + * This will try to cancel the current pending commands list + * considering the new command. + * + * Must run under exe_queue->lock + */ + exe_q_optimize optimize; + + /** + * Run the next commands chunk (owner specific). + */ + exe_q_execute execute; + + /** + * Return the exe_queue element containing the specific command + * if any. Otherwise return NULL. + */ + exe_q_get get; +}; +/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ +/* + * Element in the VLAN_MAC registry list having all currenty configured + * rules. + */ +struct bnx2x_vlan_mac_registry_elem { + struct list_head link; + + /* + * Used to store the cam offset used for the mac/vlan/vlan-mac. + * Relevant for 57710 and 57711 only. VLANs and MACs share the + * same CAM for these chips. + */ + int cam_offset; + + /* Needed for DEL and RESTORE flows */ + unsigned long vlan_mac_flags; + + union bnx2x_classification_ramrod_data u; +}; + +/* Bits representing VLAN_MAC commands specific flags */ +enum { + BNX2X_UC_LIST_MAC, + BNX2X_ETH_MAC, + BNX2X_ISCSI_ETH_MAC, + BNX2X_NETQ_ETH_MAC, + BNX2X_DONT_CONSUME_CAM_CREDIT, + BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, +}; + +struct bnx2x_vlan_mac_ramrod_params { + /* Object to run the command from */ + struct bnx2x_vlan_mac_obj *vlan_mac_obj; + + /* General command flags: COMP_WAIT, etc. */ + unsigned long ramrod_flags; + + /* Command specific configuration request */ + struct bnx2x_vlan_mac_data user_req; +}; + +struct bnx2x_vlan_mac_obj { + struct bnx2x_raw_obj raw; + + /* Bookkeeping list: will prevent the addition of already existing + * entries. + */ + struct list_head head; + + /* TODO: Add it's initialization in the init functions */ + struct bnx2x_exe_queue_obj exe_queue; + + /* MACs credit pool */ + struct bnx2x_credit_pool_obj *macs_pool; + + /* VLANs credit pool */ + struct bnx2x_credit_pool_obj *vlans_pool; + + /* RAMROD command to be used */ + int ramrod_cmd; + + /** + * Checks if ADD-ramrod with the given params may be performed. + * + * @return zero if the element may be added + */ + + int (*check_add)(struct bnx2x_vlan_mac_obj *o, + union bnx2x_classification_ramrod_data *data); + + /** + * Checks if DEL-ramrod with the given params may be performed. + * + * @return true if the element may be deleted + */ + struct bnx2x_vlan_mac_registry_elem * + (*check_del)(struct bnx2x_vlan_mac_obj *o, + union bnx2x_classification_ramrod_data *data); + + /** + * Checks if DEL-ramrod with the given params may be performed. + * + * @return true if the element may be deleted + */ + bool (*check_move)(struct bnx2x_vlan_mac_obj *src_o, + struct bnx2x_vlan_mac_obj *dst_o, + union bnx2x_classification_ramrod_data *data); + + /** + * Update the relevant credit object(s) (consume/return + * correspondingly). + */ + bool (*get_credit)(struct bnx2x_vlan_mac_obj *o); + bool (*put_credit)(struct bnx2x_vlan_mac_obj *o); + bool (*get_cam_offset)(struct bnx2x_vlan_mac_obj *o, int *offset); + bool (*put_cam_offset)(struct bnx2x_vlan_mac_obj *o, int offset); + + /** + * Configures one rule in the ramrod data buffer. + */ + void (*set_one_rule)(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + struct bnx2x_exeq_elem *elem, int rule_idx, + int cam_offset); + + /** + * Delete all configured elements having the given + * vlan_mac_flags specification. Assumes no pending for + * execution commands. Will schedule all all currently + * configured MACs/VLANs/VLAN-MACs matching the vlan_mac_flags + * specification for deletion and will use the given + * ramrod_flags for the last DEL operation. + * + * @param bp + * @param o + * @param ramrod_flags RAMROD_XX flags + * + * @return 0 if the last operation has completed successfully + * and there are no more elements left, positive value + * if there are pending for completion commands, + * negative value in case of failure. + */ + int (*delete_all)(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + unsigned long *vlan_mac_flags, + unsigned long *ramrod_flags); + + /** + * Reconfigures the next MAC/VLAN/VLAN-MAC element from the previously + * configured elements list. + * + * @param bp + * @param p Command parameters (RAMROD_COMP_WAIT bit in + * ramrod_flags is only taken into an account) + * @param ppos a pointer to the cooky that should be given back in the + * next call to make function handle the next element. If + * *ppos is set to NULL it will restart the iterator. + * If returned *ppos == NULL this means that the last + * element has been handled. + * + * @return int + */ + int (*restore)(struct bnx2x *bp, + struct bnx2x_vlan_mac_ramrod_params *p, + struct bnx2x_vlan_mac_registry_elem **ppos); + + /** + * Should be called on a completion arival. + * + * @param bp + * @param o + * @param cqe Completion element we are handling + * @param ramrod_flags if RAMROD_CONT is set the next bulk of + * pending commands will be executed. + * RAMROD_DRV_CLR_ONLY and RAMROD_RESTORE + * may also be set if needed. + * + * @return 0 if there are neither pending nor waiting for + * completion commands. Positive value if there are + * pending for execution or for completion commands. + * Negative value in case of an error (including an + * error in the cqe). + */ + int (*complete)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, + union event_ring_elem *cqe, + unsigned long *ramrod_flags); + + /** + * Wait for completion of all commands. Don't schedule new ones, + * just wait. It assumes that the completion code will schedule + * for new commands. + */ + int (*wait)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o); +}; + +/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ + +/* RX_MODE ramrod spesial flags: set in rx_mode_flags field in + * a bnx2x_rx_mode_ramrod_params. + */ +enum { + BNX2X_RX_MODE_FCOE_ETH, + BNX2X_RX_MODE_ISCSI_ETH, +}; + +enum { + BNX2X_ACCEPT_UNICAST, + BNX2X_ACCEPT_MULTICAST, + BNX2X_ACCEPT_ALL_UNICAST, + BNX2X_ACCEPT_ALL_MULTICAST, + BNX2X_ACCEPT_BROADCAST, + BNX2X_ACCEPT_UNMATCHED, + BNX2X_ACCEPT_ANY_VLAN +}; + +struct bnx2x_rx_mode_ramrod_params { + struct bnx2x_rx_mode_obj *rx_mode_obj; + unsigned long *pstate; + int state; + u8 cl_id; + u32 cid; + u8 func_id; + unsigned long ramrod_flags; + unsigned long rx_mode_flags; + + /* + * rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to + * a tstorm_eth_mac_filter_config (e1x). + */ + void *rdata; + dma_addr_t rdata_mapping; + + /* Rx mode settings */ + unsigned long rx_accept_flags; + + /* internal switching settings */ + unsigned long tx_accept_flags; +}; + +struct bnx2x_rx_mode_obj { + int (*config_rx_mode)(struct bnx2x *bp, + struct bnx2x_rx_mode_ramrod_params *p); + + int (*wait_comp)(struct bnx2x *bp, + struct bnx2x_rx_mode_ramrod_params *p); +}; + +/********************** Set multicast group ***********************************/ + +struct bnx2x_mcast_list_elem { + struct list_head link; + u8 *mac; +}; + +union bnx2x_mcast_config_data { + u8 *mac; + u8 bin; /* used in a RESTORE flow */ +}; + +struct bnx2x_mcast_ramrod_params { + struct bnx2x_mcast_obj *mcast_obj; + + /* Relevant options are RAMROD_COMP_WAIT and RAMROD_DRV_CLR_ONLY */ + unsigned long ramrod_flags; + + struct list_head mcast_list; /* list of struct bnx2x_mcast_list_elem */ + /** TODO: + * - rename it to macs_num. + * - Add a new command type for handling pending commands + * (remove "zero semantics"). + * + * Length of mcast_list. If zero and ADD_CONT command - post + * pending commands. + */ + int mcast_list_len; +}; + +enum { + BNX2X_MCAST_CMD_ADD, + BNX2X_MCAST_CMD_CONT, + BNX2X_MCAST_CMD_DEL, + BNX2X_MCAST_CMD_RESTORE, +}; + +struct bnx2x_mcast_obj { + struct bnx2x_raw_obj raw; + + union { + struct { + #define BNX2X_MCAST_BINS_NUM 256 + #define BNX2X_MCAST_VEC_SZ (BNX2X_MCAST_BINS_NUM / 64) + u64 vec[BNX2X_MCAST_VEC_SZ]; + + /** Number of BINs to clear. Should be updated + * immediately when a command arrives in order to + * properly create DEL commands. + */ + int num_bins_set; + } aprox_match; + + struct { + struct list_head macs; + int num_macs_set; + } exact_match; + } registry; + + /* Pending commands */ + struct list_head pending_cmds_head; + + /* A state that is set in raw.pstate, when there are pending commands */ + int sched_state; + + /* Maximal number of mcast MACs configured in one command */ + int max_cmd_len; + + /* Total number of currently pending MACs to configure: both + * in the pending commands list and in the current command. + */ + int total_pending_num; + + u8 engine_id; + + /** + * @param cmd command to execute (BNX2X_MCAST_CMD_X, see above) + */ + int (*config_mcast)(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, int cmd); + + /** + * Fills the ramrod data during the RESTORE flow. + * + * @param bp + * @param o + * @param start_idx Registry index to start from + * @param rdata_idx Index in the ramrod data to start from + * + * @return -1 if we handled the whole registry or index of the last + * handled registry element. + */ + int (*hdl_restore)(struct bnx2x *bp, struct bnx2x_mcast_obj *o, + int start_bin, int *rdata_idx); + + int (*enqueue_cmd)(struct bnx2x *bp, struct bnx2x_mcast_obj *o, + struct bnx2x_mcast_ramrod_params *p, int cmd); + + void (*set_one_rule)(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, int idx, + union bnx2x_mcast_config_data *cfg_data, int cmd); + + /** Checks if there are more mcast MACs to be set or a previous + * command is still pending. + */ + bool (*check_pending)(struct bnx2x_mcast_obj *o); + + /** + * Set/Clear/Check SCHEDULED state of the object + */ + void (*set_sched)(struct bnx2x_mcast_obj *o); + void (*clear_sched)(struct bnx2x_mcast_obj *o); + bool (*check_sched)(struct bnx2x_mcast_obj *o); + + /* Wait until all pending commands complete */ + int (*wait_comp)(struct bnx2x *bp, struct bnx2x_mcast_obj *o); + + /** + * Handle the internal object counters needed for proper + * commands handling. Checks that the provided parameters are + * feasible. + */ + int (*validate)(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, int cmd); + + /** + * Restore the values of internal counters in case of a failure. + */ + void (*revert)(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + int old_num_bins); + + int (*get_registry_size)(struct bnx2x_mcast_obj *o); + void (*set_registry_size)(struct bnx2x_mcast_obj *o, int n); +}; + +/*************************** Credit handling **********************************/ +struct bnx2x_credit_pool_obj { + + /* Current amount of credit in the pool */ + atomic_t credit; + + /* Maximum allowed credit. put() will check against it. */ + int pool_sz; + + /* + * Allocate a pool table statically. + * + * Currently the mamimum allowed size is MAX_MAC_CREDIT_E2(272) + * + * The set bit in the table will mean that the entry is available. + */ +#define BNX2X_POOL_VEC_SIZE (MAX_MAC_CREDIT_E2 / 64) + u64 pool_mirror[BNX2X_POOL_VEC_SIZE]; + + /* Base pool offset (initialized differently */ + int base_pool_offset; + + /** + * Get the next free pool entry. + * + * @return true if there was a free entry in the pool + */ + bool (*get_entry)(struct bnx2x_credit_pool_obj *o, int *entry); + + /** + * Return the entry back to the pool. + * + * @return true if entry is legal and has been successfully + * returned to the pool. + */ + bool (*put_entry)(struct bnx2x_credit_pool_obj *o, int entry); + + /** + * Get the requested amount of credit from the pool. + * + * @param cnt Amount of requested credit + * @return true if the operation is successful + */ + bool (*get)(struct bnx2x_credit_pool_obj *o, int cnt); + + /** + * Returns the credit to the pool. + * + * @param cnt Amount of credit to return + * @return true if the operation is successful + */ + bool (*put)(struct bnx2x_credit_pool_obj *o, int cnt); + + /** + * Reads the current amount of credit. + */ + int (*check)(struct bnx2x_credit_pool_obj *o); +}; + +/*************************** RSS configuration ********************************/ +enum { + /* RSS_MODE bits are mutually exclusive */ + BNX2X_RSS_MODE_DISABLED, + BNX2X_RSS_MODE_REGULAR, + BNX2X_RSS_MODE_VLAN_PRI, + BNX2X_RSS_MODE_E1HOV_PRI, + BNX2X_RSS_MODE_IP_DSCP, + + BNX2X_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */ + + BNX2X_RSS_IPV4, + BNX2X_RSS_IPV4_TCP, + BNX2X_RSS_IPV6, + BNX2X_RSS_IPV6_TCP, +}; + +struct bnx2x_config_rss_params { + struct bnx2x_rss_config_obj *rss_obj; + + /* may have RAMROD_COMP_WAIT set only */ + unsigned long ramrod_flags; + + /* BNX2X_RSS_X bits */ + unsigned long rss_flags; + + /* Number hash bits to take into an account */ + u8 rss_result_mask; + + /* Indirection table */ + u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; + + /* RSS hash values */ + u32 rss_key[10]; + + /* valid only iff BNX2X_RSS_UPDATE_TOE is set */ + u16 toe_rss_bitmap; +}; + +struct bnx2x_rss_config_obj { + struct bnx2x_raw_obj raw; + + /* RSS engine to use */ + u8 engine_id; + + /* Last configured indirection table */ + u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; + + int (*config_rss)(struct bnx2x *bp, + struct bnx2x_config_rss_params *p); +}; + +/*********************** Queue state update ***********************************/ + +/* UPDATE command options */ +enum { + BNX2X_Q_UPDATE_IN_VLAN_REM, + BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG, + BNX2X_Q_UPDATE_OUT_VLAN_REM, + BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG, + BNX2X_Q_UPDATE_ANTI_SPOOF, + BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, + BNX2X_Q_UPDATE_ACTIVATE, + BNX2X_Q_UPDATE_ACTIVATE_CHNG, + BNX2X_Q_UPDATE_DEF_VLAN_EN, + BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, + BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, + BNX2X_Q_UPDATE_SILENT_VLAN_REM +}; + +/* Allowed Queue states */ +enum bnx2x_q_state { + BNX2X_Q_STATE_RESET, + BNX2X_Q_STATE_INITIALIZED, + BNX2X_Q_STATE_ACTIVE, + BNX2X_Q_STATE_MULTI_COS, + BNX2X_Q_STATE_MCOS_TERMINATED, + BNX2X_Q_STATE_INACTIVE, + BNX2X_Q_STATE_STOPPED, + BNX2X_Q_STATE_TERMINATED, + BNX2X_Q_STATE_FLRED, + BNX2X_Q_STATE_MAX, +}; + +/* Allowed commands */ +enum bnx2x_queue_cmd { + BNX2X_Q_CMD_INIT, + BNX2X_Q_CMD_SETUP, + BNX2X_Q_CMD_SETUP_TX_ONLY, + BNX2X_Q_CMD_DEACTIVATE, + BNX2X_Q_CMD_ACTIVATE, + BNX2X_Q_CMD_UPDATE, + BNX2X_Q_CMD_UPDATE_TPA, + BNX2X_Q_CMD_HALT, + BNX2X_Q_CMD_CFC_DEL, + BNX2X_Q_CMD_TERMINATE, + BNX2X_Q_CMD_EMPTY, + BNX2X_Q_CMD_MAX, +}; + +/* queue SETUP + INIT flags */ +enum { + BNX2X_Q_FLG_TPA, + BNX2X_Q_FLG_TPA_IPV6, + BNX2X_Q_FLG_STATS, + BNX2X_Q_FLG_ZERO_STATS, + BNX2X_Q_FLG_ACTIVE, + BNX2X_Q_FLG_OV, + BNX2X_Q_FLG_VLAN, + BNX2X_Q_FLG_COS, + BNX2X_Q_FLG_HC, + BNX2X_Q_FLG_HC_EN, + BNX2X_Q_FLG_DHC, + BNX2X_Q_FLG_FCOE, + BNX2X_Q_FLG_LEADING_RSS, + BNX2X_Q_FLG_MCAST, + BNX2X_Q_FLG_DEF_VLAN, + BNX2X_Q_FLG_TX_SWITCH, + BNX2X_Q_FLG_TX_SEC, + BNX2X_Q_FLG_ANTI_SPOOF, + BNX2X_Q_FLG_SILENT_VLAN_REM +}; + +/* Queue type options: queue type may be a compination of below. */ +enum bnx2x_q_type { + /** TODO: Consider moving both these flags into the init() + * ramrod params. + */ + BNX2X_Q_TYPE_HAS_RX, + BNX2X_Q_TYPE_HAS_TX, +}; + +#define BNX2X_PRIMARY_CID_INDEX 0 +#define BNX2X_MULTI_TX_COS_E1X 1 +#define BNX2X_MULTI_TX_COS_E2_E3A0 2 +#define BNX2X_MULTI_TX_COS_E3B0 3 +#define BNX2X_MULTI_TX_COS BNX2X_MULTI_TX_COS_E3B0 + + +struct bnx2x_queue_init_params { + struct { + unsigned long flags; + u16 hc_rate; + u8 fw_sb_id; + u8 sb_cq_index; + } tx; + + struct { + unsigned long flags; + u16 hc_rate; + u8 fw_sb_id; + u8 sb_cq_index; + } rx; + + /* CID context in the host memory */ + struct eth_context *cxts[BNX2X_MULTI_TX_COS]; + + /* maximum number of cos supported by hardware */ + u8 max_cos; +}; + +struct bnx2x_queue_terminate_params { + /* index within the tx_only cids of this queue object */ + u8 cid_index; +}; + +struct bnx2x_queue_cfc_del_params { + /* index within the tx_only cids of this queue object */ + u8 cid_index; +}; + +struct bnx2x_queue_update_params { + unsigned long update_flags; /* BNX2X_Q_UPDATE_XX bits */ + u16 def_vlan; + u16 silent_removal_value; + u16 silent_removal_mask; +/* index within the tx_only cids of this queue object */ + u8 cid_index; +}; + +struct rxq_pause_params { + u16 bd_th_lo; + u16 bd_th_hi; + u16 rcq_th_lo; + u16 rcq_th_hi; + u16 sge_th_lo; /* valid iff BNX2X_Q_FLG_TPA */ + u16 sge_th_hi; /* valid iff BNX2X_Q_FLG_TPA */ + u16 pri_map; +}; + +/* general */ +struct bnx2x_general_setup_params { + /* valid iff BNX2X_Q_FLG_STATS */ + u8 stat_id; + + u8 spcl_id; + u16 mtu; + u8 cos; +}; + +struct bnx2x_rxq_setup_params { + /* dma */ + dma_addr_t dscr_map; + dma_addr_t sge_map; + dma_addr_t rcq_map; + dma_addr_t rcq_np_map; + + u16 drop_flags; + u16 buf_sz; + u8 fw_sb_id; + u8 cl_qzone_id; + + /* valid iff BNX2X_Q_FLG_TPA */ + u16 tpa_agg_sz; + u16 sge_buf_sz; + u8 max_sges_pkt; + u8 max_tpa_queues; + u8 rss_engine_id; + + u8 cache_line_log; + + u8 sb_cq_index; + + /* valid iff BXN2X_Q_FLG_SILENT_VLAN_REM */ + u16 silent_removal_value; + u16 silent_removal_mask; +}; + +struct bnx2x_txq_setup_params { + /* dma */ + dma_addr_t dscr_map; + + u8 fw_sb_id; + u8 sb_cq_index; + u8 cos; /* valid iff BNX2X_Q_FLG_COS */ + u16 traffic_type; + /* equals to the leading rss client id, used for TX classification*/ + u8 tss_leading_cl_id; + + /* valid iff BNX2X_Q_FLG_DEF_VLAN */ + u16 default_vlan; +}; + +struct bnx2x_queue_setup_params { + struct bnx2x_general_setup_params gen_params; + struct bnx2x_txq_setup_params txq_params; + struct bnx2x_rxq_setup_params rxq_params; + struct rxq_pause_params pause_params; + unsigned long flags; +}; + +struct bnx2x_queue_setup_tx_only_params { + struct bnx2x_general_setup_params gen_params; + struct bnx2x_txq_setup_params txq_params; + unsigned long flags; + /* index within the tx_only cids of this queue object */ + u8 cid_index; +}; + +struct bnx2x_queue_state_params { + struct bnx2x_queue_sp_obj *q_obj; + + /* Current command */ + enum bnx2x_queue_cmd cmd; + + /* may have RAMROD_COMP_WAIT set only */ + unsigned long ramrod_flags; + + /* Params according to the current command */ + union { + struct bnx2x_queue_update_params update; + struct bnx2x_queue_setup_params setup; + struct bnx2x_queue_init_params init; + struct bnx2x_queue_setup_tx_only_params tx_only; + struct bnx2x_queue_terminate_params terminate; + struct bnx2x_queue_cfc_del_params cfc_del; + } params; +}; + +struct bnx2x_queue_sp_obj { + u32 cids[BNX2X_MULTI_TX_COS]; + u8 cl_id; + u8 func_id; + + /* + * number of traffic classes supported by queue. + * The primary connection of the queue suppotrs the first traffic + * class. Any further traffic class is suppoted by a tx-only + * connection. + * + * Therefore max_cos is also a number of valid entries in the cids + * array. + */ + u8 max_cos; + u8 num_tx_only, next_tx_only; + + enum bnx2x_q_state state, next_state; + + /* bits from enum bnx2x_q_type */ + unsigned long type; + + /* BNX2X_Q_CMD_XX bits. This object implements "one + * pending" paradigm but for debug and tracing purposes it's + * more convinient to have different bits for different + * commands. + */ + unsigned long pending; + + /* Buffer to use as a ramrod data and its mapping */ + void *rdata; + dma_addr_t rdata_mapping; + + /** + * Performs one state change according to the given parameters. + * + * @return 0 in case of success and negative value otherwise. + */ + int (*send_cmd)(struct bnx2x *bp, + struct bnx2x_queue_state_params *params); + + /** + * Sets the pending bit according to the requested transition. + */ + int (*set_pending)(struct bnx2x_queue_sp_obj *o, + struct bnx2x_queue_state_params *params); + + /** + * Checks that the requested state transition is legal. + */ + int (*check_transition)(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *o, + struct bnx2x_queue_state_params *params); + + /** + * Completes the pending command. + */ + int (*complete_cmd)(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *o, + enum bnx2x_queue_cmd); + + int (*wait_comp)(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *o, + enum bnx2x_queue_cmd cmd); +}; + +/********************** Function state update *********************************/ +/* Allowed Function states */ +enum bnx2x_func_state { + BNX2X_F_STATE_RESET, + BNX2X_F_STATE_INITIALIZED, + BNX2X_F_STATE_STARTED, + BNX2X_F_STATE_TX_STOPPED, + BNX2X_F_STATE_MAX, +}; + +/* Allowed Function commands */ +enum bnx2x_func_cmd { + BNX2X_F_CMD_HW_INIT, + BNX2X_F_CMD_START, + BNX2X_F_CMD_STOP, + BNX2X_F_CMD_HW_RESET, + BNX2X_F_CMD_TX_STOP, + BNX2X_F_CMD_TX_START, + BNX2X_F_CMD_MAX, +}; + +struct bnx2x_func_hw_init_params { + /* A load phase returned by MCP. + * + * May be: + * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP + * FW_MSG_CODE_DRV_LOAD_COMMON + * FW_MSG_CODE_DRV_LOAD_PORT + * FW_MSG_CODE_DRV_LOAD_FUNCTION + */ + u32 load_phase; +}; + +struct bnx2x_func_hw_reset_params { + /* A load phase returned by MCP. + * + * May be: + * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP + * FW_MSG_CODE_DRV_LOAD_COMMON + * FW_MSG_CODE_DRV_LOAD_PORT + * FW_MSG_CODE_DRV_LOAD_FUNCTION + */ + u32 reset_phase; +}; + +struct bnx2x_func_start_params { + /* Multi Function mode: + * - Single Function + * - Switch Dependent + * - Switch Independent + */ + u16 mf_mode; + + /* Switch Dependent mode outer VLAN tag */ + u16 sd_vlan_tag; + + /* Function cos mode */ + u8 network_cos_mode; +}; + +struct bnx2x_func_tx_start_params { + struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES]; + u8 dcb_enabled; + u8 dcb_version; + u8 dont_add_pri_0_en; +}; + +struct bnx2x_func_state_params { + struct bnx2x_func_sp_obj *f_obj; + + /* Current command */ + enum bnx2x_func_cmd cmd; + + /* may have RAMROD_COMP_WAIT set only */ + unsigned long ramrod_flags; + + /* Params according to the current command */ + union { + struct bnx2x_func_hw_init_params hw_init; + struct bnx2x_func_hw_reset_params hw_reset; + struct bnx2x_func_start_params start; + struct bnx2x_func_tx_start_params tx_start; + } params; +}; + +struct bnx2x_func_sp_drv_ops { + /* Init tool + runtime initialization: + * - Common Chip + * - Common (per Path) + * - Port + * - Function phases + */ + int (*init_hw_cmn_chip)(struct bnx2x *bp); + int (*init_hw_cmn)(struct bnx2x *bp); + int (*init_hw_port)(struct bnx2x *bp); + int (*init_hw_func)(struct bnx2x *bp); + + /* Reset Function HW: Common, Port, Function phases. */ + void (*reset_hw_cmn)(struct bnx2x *bp); + void (*reset_hw_port)(struct bnx2x *bp); + void (*reset_hw_func)(struct bnx2x *bp); + + /* Init/Free GUNZIP resources */ + int (*gunzip_init)(struct bnx2x *bp); + void (*gunzip_end)(struct bnx2x *bp); + + /* Prepare/Release FW resources */ + int (*init_fw)(struct bnx2x *bp); + void (*release_fw)(struct bnx2x *bp); +}; + +struct bnx2x_func_sp_obj { + enum bnx2x_func_state state, next_state; + + /* BNX2X_FUNC_CMD_XX bits. This object implements "one + * pending" paradigm but for debug and tracing purposes it's + * more convinient to have different bits for different + * commands. + */ + unsigned long pending; + + /* Buffer to use as a ramrod data and its mapping */ + void *rdata; + dma_addr_t rdata_mapping; + + /* this mutex validates that when pending flag is taken, the next + * ramrod to be sent will be the one set the pending bit + */ + struct mutex one_pending_mutex; + + /* Driver interface */ + struct bnx2x_func_sp_drv_ops *drv; + + /** + * Performs one state change according to the given parameters. + * + * @return 0 in case of success and negative value otherwise. + */ + int (*send_cmd)(struct bnx2x *bp, + struct bnx2x_func_state_params *params); + + /** + * Checks that the requested state transition is legal. + */ + int (*check_transition)(struct bnx2x *bp, + struct bnx2x_func_sp_obj *o, + struct bnx2x_func_state_params *params); + + /** + * Completes the pending command. + */ + int (*complete_cmd)(struct bnx2x *bp, + struct bnx2x_func_sp_obj *o, + enum bnx2x_func_cmd cmd); + + int (*wait_comp)(struct bnx2x *bp, struct bnx2x_func_sp_obj *o, + enum bnx2x_func_cmd cmd); +}; + +/********************** Interfaces ********************************************/ +/* Queueable objects set */ +union bnx2x_qable_obj { + struct bnx2x_vlan_mac_obj vlan_mac; +}; +/************** Function state update *********/ +void bnx2x_init_func_obj(struct bnx2x *bp, + struct bnx2x_func_sp_obj *obj, + void *rdata, dma_addr_t rdata_mapping, + struct bnx2x_func_sp_drv_ops *drv_iface); + +int bnx2x_func_state_change(struct bnx2x *bp, + struct bnx2x_func_state_params *params); + +enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp, + struct bnx2x_func_sp_obj *o); +/******************* Queue State **************/ +void bnx2x_init_queue_obj(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *obj, u8 cl_id, u32 *cids, + u8 cid_cnt, u8 func_id, void *rdata, + dma_addr_t rdata_mapping, unsigned long type); + +int bnx2x_queue_state_change(struct bnx2x *bp, + struct bnx2x_queue_state_params *params); + +/********************* VLAN-MAC ****************/ +void bnx2x_init_mac_obj(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *mac_obj, + u8 cl_id, u32 cid, u8 func_id, void *rdata, + dma_addr_t rdata_mapping, int state, + unsigned long *pstate, bnx2x_obj_type type, + struct bnx2x_credit_pool_obj *macs_pool); + +void bnx2x_init_vlan_obj(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *vlan_obj, + u8 cl_id, u32 cid, u8 func_id, void *rdata, + dma_addr_t rdata_mapping, int state, + unsigned long *pstate, bnx2x_obj_type type, + struct bnx2x_credit_pool_obj *vlans_pool); + +void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *vlan_mac_obj, + u8 cl_id, u32 cid, u8 func_id, void *rdata, + dma_addr_t rdata_mapping, int state, + unsigned long *pstate, bnx2x_obj_type type, + struct bnx2x_credit_pool_obj *macs_pool, + struct bnx2x_credit_pool_obj *vlans_pool); + +int bnx2x_config_vlan_mac(struct bnx2x *bp, + struct bnx2x_vlan_mac_ramrod_params *p); + +int bnx2x_vlan_mac_move(struct bnx2x *bp, + struct bnx2x_vlan_mac_ramrod_params *p, + struct bnx2x_vlan_mac_obj *dest_o); + +/********************* RX MODE ****************/ + +void bnx2x_init_rx_mode_obj(struct bnx2x *bp, + struct bnx2x_rx_mode_obj *o); + +/** + * Send and RX_MODE ramrod according to the provided parameters. + * + * @param bp + * @param p Command parameters + * + * @return 0 - if operation was successfull and there is no pending completions, + * positive number - if there are pending completions, + * negative - if there were errors + */ +int bnx2x_config_rx_mode(struct bnx2x *bp, + struct bnx2x_rx_mode_ramrod_params *p); + +/****************** MULTICASTS ****************/ + +void bnx2x_init_mcast_obj(struct bnx2x *bp, + struct bnx2x_mcast_obj *mcast_obj, + u8 mcast_cl_id, u32 mcast_cid, u8 func_id, + u8 engine_id, void *rdata, dma_addr_t rdata_mapping, + int state, unsigned long *pstate, + bnx2x_obj_type type); + +/** + * Configure multicast MACs list. May configure a new list + * provided in p->mcast_list (BNX2X_MCAST_CMD_ADD), clean up + * (BNX2X_MCAST_CMD_DEL) or restore (BNX2X_MCAST_CMD_RESTORE) a current + * configuration, continue to execute the pending commands + * (BNX2X_MCAST_CMD_CONT). + * + * If previous command is still pending or if number of MACs to + * configure is more that maximum number of MACs in one command, + * the current command will be enqueued to the tail of the + * pending commands list. + * + * @param bp + * @param p + * @param command to execute: BNX2X_MCAST_CMD_X + * + * @return 0 is operation was sucessfull and there are no pending completions, + * negative if there were errors, positive if there are pending + * completions. + */ +int bnx2x_config_mcast(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, int cmd); + +/****************** CREDIT POOL ****************/ +void bnx2x_init_mac_credit_pool(struct bnx2x *bp, + struct bnx2x_credit_pool_obj *p, u8 func_id, + u8 func_num); +void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, + struct bnx2x_credit_pool_obj *p, u8 func_id, + u8 func_num); + + +/****************** RSS CONFIGURATION ****************/ +void bnx2x_init_rss_config_obj(struct bnx2x *bp, + struct bnx2x_rss_config_obj *rss_obj, + u8 cl_id, u32 cid, u8 func_id, u8 engine_id, + void *rdata, dma_addr_t rdata_mapping, + int state, unsigned long *pstate, + bnx2x_obj_type type); + +/** + * Updates RSS configuration according to provided parameters. + * + * @param bp + * @param p + * + * @return 0 in case of success + */ +int bnx2x_config_rss(struct bnx2x *bp, + struct bnx2x_config_rss_params *p); + +/** + * Return the current ind_table configuration. + * + * @param bp + * @param ind_table buffer to fill with the current indirection + * table content. Should be at least + * T_ETH_INDIRECTION_TABLE_SIZE bytes long. + */ +void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, + u8 *ind_table); + +#endif /* BNX2X_SP_VERBS */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c new file mode 100644 index 0000000..771f680 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -0,0 +1,1598 @@ +/* bnx2x_stats.c: Broadcom Everest network driver. + * + * Copyright (c) 2007-2011 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Eilon Greenstein + * Written by: Eliezer Tamir + * Based on code from Michael Chan's bnx2 driver + * UDP CSUM errata workaround by Arik Gendelman + * Slowpath and fastpath rework by Vladislav Zolotarov + * Statistics and Link management by Yitchak Gertner + * + */ +#include "bnx2x_stats.h" +#include "bnx2x_cmn.h" + + +/* Statistics */ + +/* + * General service functions + */ + +static inline long bnx2x_hilo(u32 *hiref) +{ + u32 lo = *(hiref + 1); +#if (BITS_PER_LONG == 64) + u32 hi = *hiref; + + return HILO_U64(hi, lo); +#else + return lo; +#endif +} + +/* + * Init service functions + */ + +/* Post the next statistics ramrod. Protect it with the spin in + * order to ensure the strict order between statistics ramrods + * (each ramrod has a sequence number passed in a + * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be + * sent in order). + */ +static void bnx2x_storm_stats_post(struct bnx2x *bp) +{ + if (!bp->stats_pending) { + int rc; + + spin_lock_bh(&bp->stats_lock); + + if (bp->stats_pending) { + spin_unlock_bh(&bp->stats_lock); + return; + } + + bp->fw_stats_req->hdr.drv_stats_counter = + cpu_to_le16(bp->stats_counter++); + + DP(NETIF_MSG_TIMER, "Sending statistics ramrod %d\n", + bp->fw_stats_req->hdr.drv_stats_counter); + + + + /* send FW stats ramrod */ + rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, + U64_HI(bp->fw_stats_req_mapping), + U64_LO(bp->fw_stats_req_mapping), + NONE_CONNECTION_TYPE); + if (rc == 0) + bp->stats_pending = 1; + + spin_unlock_bh(&bp->stats_lock); + } +} + +static void bnx2x_hw_stats_post(struct bnx2x *bp) +{ + struct dmae_command *dmae = &bp->stats_dmae; + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + *stats_comp = DMAE_COMP_VAL; + if (CHIP_REV_IS_SLOW(bp)) + return; + + /* loader */ + if (bp->executer_idx) { + int loader_idx = PMF_DMAE_C(bp); + u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, + true, DMAE_COMP_GRC); + opcode = bnx2x_dmae_opcode_clr_src_reset(opcode); + + memset(dmae, 0, sizeof(struct dmae_command)); + dmae->opcode = opcode; + dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0])); + dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0])); + dmae->dst_addr_lo = (DMAE_REG_CMD_MEM + + sizeof(struct dmae_command) * + (loader_idx + 1)) >> 2; + dmae->dst_addr_hi = 0; + dmae->len = sizeof(struct dmae_command) >> 2; + if (CHIP_IS_E1(bp)) + dmae->len--; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + *stats_comp = 0; + bnx2x_post_dmae(bp, dmae, loader_idx); + + } else if (bp->func_stx) { + *stats_comp = 0; + bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); + } +} + +static int bnx2x_stats_comp(struct bnx2x *bp) +{ + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + int cnt = 10; + + might_sleep(); + while (*stats_comp != DMAE_COMP_VAL) { + if (!cnt) { + BNX2X_ERR("timeout waiting for stats finished\n"); + break; + } + cnt--; + usleep_range(1000, 1000); + } + return 1; +} + +/* + * Statistics service functions + */ + +static void bnx2x_stats_pmf_update(struct bnx2x *bp) +{ + struct dmae_command *dmae; + u32 opcode; + int loader_idx = PMF_DMAE_C(bp); + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + /* sanity */ + if (!IS_MF(bp) || !bp->port.pmf || !bp->port.port_stx) { + BNX2X_ERR("BUG!\n"); + return; + } + + bp->executer_idx = 0; + + opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0); + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC); + dmae->src_addr_lo = bp->port.port_stx >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); + dmae->len = DMAE_LEN32_RD_MAX; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); + dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) + + DMAE_LEN32_RD_MAX * 4); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) + + DMAE_LEN32_RD_MAX * 4); + dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX; + dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; + bnx2x_hw_stats_post(bp); + bnx2x_stats_comp(bp); +} + +static void bnx2x_port_stats_init(struct bnx2x *bp) +{ + struct dmae_command *dmae; + int port = BP_PORT(bp); + u32 opcode; + int loader_idx = PMF_DMAE_C(bp); + u32 mac_addr; + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + /* sanity */ + if (!bp->link_vars.link_up || !bp->port.pmf) { + BNX2X_ERR("BUG!\n"); + return; + } + + bp->executer_idx = 0; + + /* MCP */ + opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, + true, DMAE_COMP_GRC); + + if (bp->port.port_stx) { + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); + dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); + dmae->dst_addr_lo = bp->port.port_stx >> 2; + dmae->dst_addr_hi = 0; + dmae->len = sizeof(struct host_port_stats) >> 2; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } + + if (bp->func_stx) { + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); + dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); + dmae->dst_addr_lo = bp->func_stx >> 2; + dmae->dst_addr_hi = 0; + dmae->len = sizeof(struct host_func_stats) >> 2; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } + + /* MAC */ + opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, + true, DMAE_COMP_GRC); + + /* EMAC is special */ + if (bp->link_vars.mac_type == MAC_TYPE_EMAC) { + mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0); + + /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/ + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = (mac_addr + + EMAC_REG_EMAC_RX_STAT_AC) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); + dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + /* EMAC_REG_EMAC_RX_STAT_AC_28 */ + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = (mac_addr + + EMAC_REG_EMAC_RX_STAT_AC_28) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + + offsetof(struct emac_stats, rx_stat_falsecarriererrors)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + + offsetof(struct emac_stats, rx_stat_falsecarriererrors)); + dmae->len = 1; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/ + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = (mac_addr + + EMAC_REG_EMAC_TX_STAT_AC) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + + offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + + offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); + dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } else { + u32 tx_src_addr_lo, rx_src_addr_lo; + u16 rx_len, tx_len; + + /* configure the params according to MAC type */ + switch (bp->link_vars.mac_type) { + case MAC_TYPE_BMAC: + mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM); + + /* BIGMAC_REGISTER_TX_STAT_GTPKT .. + BIGMAC_REGISTER_TX_STAT_GTBYT */ + if (CHIP_IS_E1x(bp)) { + tx_src_addr_lo = (mac_addr + + BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; + tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT - + BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; + rx_src_addr_lo = (mac_addr + + BIGMAC_REGISTER_RX_STAT_GR64) >> 2; + rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ - + BIGMAC_REGISTER_RX_STAT_GR64) >> 2; + } else { + tx_src_addr_lo = (mac_addr + + BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; + tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT - + BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; + rx_src_addr_lo = (mac_addr + + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; + rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ - + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; + } + break; + + case MAC_TYPE_UMAC: /* handled by MSTAT */ + case MAC_TYPE_XMAC: /* handled by MSTAT */ + default: + mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0; + tx_src_addr_lo = (mac_addr + + MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2; + rx_src_addr_lo = (mac_addr + + MSTAT_REG_RX_STAT_GR64_LO) >> 2; + tx_len = sizeof(bp->slowpath-> + mac_stats.mstat_stats.stats_tx) >> 2; + rx_len = sizeof(bp->slowpath-> + mac_stats.mstat_stats.stats_rx) >> 2; + break; + } + + /* TX stats */ + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = tx_src_addr_lo; + dmae->src_addr_hi = 0; + dmae->len = tx_len; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + /* RX stats */ + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_hi = 0; + dmae->src_addr_lo = rx_src_addr_lo; + dmae->dst_addr_lo = + U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); + dmae->dst_addr_hi = + U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); + dmae->len = rx_len; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } + + /* NIG */ + if (!CHIP_IS_E3(bp)) { + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 : + NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + + offsetof(struct nig_stats, egress_mac_pkt0_lo)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + + offsetof(struct nig_stats, egress_mac_pkt0_lo)); + dmae->len = (2*sizeof(u32)) >> 2; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 : + NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + + offsetof(struct nig_stats, egress_mac_pkt1_lo)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + + offsetof(struct nig_stats, egress_mac_pkt1_lo)); + dmae->len = (2*sizeof(u32)) >> 2; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, + true, DMAE_COMP_PCI); + dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD : + NIG_REG_STAT0_BRB_DISCARD) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats)); + dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2; + + dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; +} + +static void bnx2x_func_stats_init(struct bnx2x *bp) +{ + struct dmae_command *dmae = &bp->stats_dmae; + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + /* sanity */ + if (!bp->func_stx) { + BNX2X_ERR("BUG!\n"); + return; + } + + bp->executer_idx = 0; + memset(dmae, 0, sizeof(struct dmae_command)); + + dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, + true, DMAE_COMP_PCI); + dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); + dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); + dmae->dst_addr_lo = bp->func_stx >> 2; + dmae->dst_addr_hi = 0; + dmae->len = sizeof(struct host_func_stats) >> 2; + dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; +} + +static void bnx2x_stats_start(struct bnx2x *bp) +{ + if (bp->port.pmf) + bnx2x_port_stats_init(bp); + + else if (bp->func_stx) + bnx2x_func_stats_init(bp); + + bnx2x_hw_stats_post(bp); + bnx2x_storm_stats_post(bp); +} + +static void bnx2x_stats_pmf_start(struct bnx2x *bp) +{ + bnx2x_stats_comp(bp); + bnx2x_stats_pmf_update(bp); + bnx2x_stats_start(bp); +} + +static void bnx2x_stats_restart(struct bnx2x *bp) +{ + bnx2x_stats_comp(bp); + bnx2x_stats_start(bp); +} + +static void bnx2x_bmac_stats_update(struct bnx2x *bp) +{ + struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); + struct bnx2x_eth_stats *estats = &bp->eth_stats; + struct { + u32 lo; + u32 hi; + } diff; + + if (CHIP_IS_E1x(bp)) { + struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats); + + /* the macros below will use "bmac1_stats" type */ + UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); + UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); + UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); + UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); + UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); + UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); + UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); + UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); + UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); + + UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); + UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); + UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); + UPDATE_STAT64(tx_stat_gt127, + tx_stat_etherstatspkts65octetsto127octets); + UPDATE_STAT64(tx_stat_gt255, + tx_stat_etherstatspkts128octetsto255octets); + UPDATE_STAT64(tx_stat_gt511, + tx_stat_etherstatspkts256octetsto511octets); + UPDATE_STAT64(tx_stat_gt1023, + tx_stat_etherstatspkts512octetsto1023octets); + UPDATE_STAT64(tx_stat_gt1518, + tx_stat_etherstatspkts1024octetsto1522octets); + UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); + UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); + UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); + UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); + UPDATE_STAT64(tx_stat_gterr, + tx_stat_dot3statsinternalmactransmiterrors); + UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); + + } else { + struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats); + + /* the macros below will use "bmac2_stats" type */ + UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); + UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); + UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); + UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); + UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); + UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); + UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); + UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); + UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); + UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); + UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); + UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); + UPDATE_STAT64(tx_stat_gt127, + tx_stat_etherstatspkts65octetsto127octets); + UPDATE_STAT64(tx_stat_gt255, + tx_stat_etherstatspkts128octetsto255octets); + UPDATE_STAT64(tx_stat_gt511, + tx_stat_etherstatspkts256octetsto511octets); + UPDATE_STAT64(tx_stat_gt1023, + tx_stat_etherstatspkts512octetsto1023octets); + UPDATE_STAT64(tx_stat_gt1518, + tx_stat_etherstatspkts1024octetsto1522octets); + UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); + UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); + UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); + UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); + UPDATE_STAT64(tx_stat_gterr, + tx_stat_dot3statsinternalmactransmiterrors); + UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); + } + + estats->pause_frames_received_hi = + pstats->mac_stx[1].rx_stat_mac_xpf_hi; + estats->pause_frames_received_lo = + pstats->mac_stx[1].rx_stat_mac_xpf_lo; + + estats->pause_frames_sent_hi = + pstats->mac_stx[1].tx_stat_outxoffsent_hi; + estats->pause_frames_sent_lo = + pstats->mac_stx[1].tx_stat_outxoffsent_lo; +} + +static void bnx2x_mstat_stats_update(struct bnx2x *bp) +{ + struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); + struct bnx2x_eth_stats *estats = &bp->eth_stats; + + struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats); + + ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets); + ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors); + ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts); + ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong); + ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments); + ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived); + ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered); + ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf); + ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent); + ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone); + + + ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets); + ADD_STAT64(stats_tx.tx_gt127, + tx_stat_etherstatspkts65octetsto127octets); + ADD_STAT64(stats_tx.tx_gt255, + tx_stat_etherstatspkts128octetsto255octets); + ADD_STAT64(stats_tx.tx_gt511, + tx_stat_etherstatspkts256octetsto511octets); + ADD_STAT64(stats_tx.tx_gt1023, + tx_stat_etherstatspkts512octetsto1023octets); + ADD_STAT64(stats_tx.tx_gt1518, + tx_stat_etherstatspkts1024octetsto1522octets); + ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047); + + ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095); + ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216); + ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383); + + ADD_STAT64(stats_tx.tx_gterr, + tx_stat_dot3statsinternalmactransmiterrors); + ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl); + + ADD_64(estats->etherstatspkts1024octetsto1522octets_hi, + new->stats_tx.tx_gt1518_hi, + estats->etherstatspkts1024octetsto1522octets_lo, + new->stats_tx.tx_gt1518_lo); + + ADD_64(estats->etherstatspktsover1522octets_hi, + new->stats_tx.tx_gt2047_hi, + estats->etherstatspktsover1522octets_lo, + new->stats_tx.tx_gt2047_lo); + + ADD_64(estats->etherstatspktsover1522octets_hi, + new->stats_tx.tx_gt4095_hi, + estats->etherstatspktsover1522octets_lo, + new->stats_tx.tx_gt4095_lo); + + ADD_64(estats->etherstatspktsover1522octets_hi, + new->stats_tx.tx_gt9216_hi, + estats->etherstatspktsover1522octets_lo, + new->stats_tx.tx_gt9216_lo); + + + ADD_64(estats->etherstatspktsover1522octets_hi, + new->stats_tx.tx_gt16383_hi, + estats->etherstatspktsover1522octets_lo, + new->stats_tx.tx_gt16383_lo); + + estats->pause_frames_received_hi = + pstats->mac_stx[1].rx_stat_mac_xpf_hi; + estats->pause_frames_received_lo = + pstats->mac_stx[1].rx_stat_mac_xpf_lo; + + estats->pause_frames_sent_hi = + pstats->mac_stx[1].tx_stat_outxoffsent_hi; + estats->pause_frames_sent_lo = + pstats->mac_stx[1].tx_stat_outxoffsent_lo; +} + +static void bnx2x_emac_stats_update(struct bnx2x *bp) +{ + struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats); + struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); + struct bnx2x_eth_stats *estats = &bp->eth_stats; + + UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets); + UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets); + UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors); + UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors); + UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors); + UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors); + UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts); + UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong); + UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments); + UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers); + UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived); + UPDATE_EXTEND_STAT(rx_stat_xoffstateentered); + UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived); + UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived); + UPDATE_EXTEND_STAT(tx_stat_outxonsent); + UPDATE_EXTEND_STAT(tx_stat_outxoffsent); + UPDATE_EXTEND_STAT(tx_stat_flowcontroldone); + UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions); + UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes); + UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes); + UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions); + UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions); + UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets); + UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors); + + estats->pause_frames_received_hi = + pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi; + estats->pause_frames_received_lo = + pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo; + ADD_64(estats->pause_frames_received_hi, + pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi, + estats->pause_frames_received_lo, + pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo); + + estats->pause_frames_sent_hi = + pstats->mac_stx[1].tx_stat_outxonsent_hi; + estats->pause_frames_sent_lo = + pstats->mac_stx[1].tx_stat_outxonsent_lo; + ADD_64(estats->pause_frames_sent_hi, + pstats->mac_stx[1].tx_stat_outxoffsent_hi, + estats->pause_frames_sent_lo, + pstats->mac_stx[1].tx_stat_outxoffsent_lo); +} + +static int bnx2x_hw_stats_update(struct bnx2x *bp) +{ + struct nig_stats *new = bnx2x_sp(bp, nig_stats); + struct nig_stats *old = &(bp->port.old_nig_stats); + struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); + struct bnx2x_eth_stats *estats = &bp->eth_stats; + struct { + u32 lo; + u32 hi; + } diff; + + switch (bp->link_vars.mac_type) { + case MAC_TYPE_BMAC: + bnx2x_bmac_stats_update(bp); + break; + + case MAC_TYPE_EMAC: + bnx2x_emac_stats_update(bp); + break; + + case MAC_TYPE_UMAC: + case MAC_TYPE_XMAC: + bnx2x_mstat_stats_update(bp); + break; + + case MAC_TYPE_NONE: /* unreached */ + BNX2X_ERR("stats updated by DMAE but no MAC active\n"); + return -1; + + default: /* unreached */ + BNX2X_ERR("Unknown MAC type\n"); + } + + ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, + new->brb_discard - old->brb_discard); + ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo, + new->brb_truncate - old->brb_truncate); + + if (!CHIP_IS_E3(bp)) { + UPDATE_STAT64_NIG(egress_mac_pkt0, + etherstatspkts1024octetsto1522octets); + UPDATE_STAT64_NIG(egress_mac_pkt1, + etherstatspktsover1522octets); + } + + memcpy(old, new, sizeof(struct nig_stats)); + + memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]), + sizeof(struct mac_stx)); + estats->brb_drop_hi = pstats->brb_drop_hi; + estats->brb_drop_lo = pstats->brb_drop_lo; + + pstats->host_port_stats_start = ++pstats->host_port_stats_end; + + if (!BP_NOMCP(bp)) { + u32 nig_timer_max = + SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer); + if (nig_timer_max != estats->nig_timer_max) { + estats->nig_timer_max = nig_timer_max; + BNX2X_ERR("NIG timer max (%u)\n", + estats->nig_timer_max); + } + } + + return 0; +} + +static int bnx2x_storm_stats_update(struct bnx2x *bp) +{ + struct tstorm_per_port_stats *tport = + &bp->fw_stats_data->port.tstorm_port_statistics; + struct tstorm_per_pf_stats *tfunc = + &bp->fw_stats_data->pf.tstorm_pf_statistics; + struct host_func_stats *fstats = bnx2x_sp(bp, func_stats); + struct bnx2x_eth_stats *estats = &bp->eth_stats; + struct stats_counter *counters = &bp->fw_stats_data->storm_counters; + int i; + u16 cur_stats_counter; + + /* Make sure we use the value of the counter + * used for sending the last stats ramrod. + */ + spin_lock_bh(&bp->stats_lock); + cur_stats_counter = bp->stats_counter - 1; + spin_unlock_bh(&bp->stats_lock); + + /* are storm stats valid? */ + if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { + DP(BNX2X_MSG_STATS, "stats not updated by xstorm" + " xstorm counter (0x%x) != stats_counter (0x%x)\n", + le16_to_cpu(counters->xstats_counter), bp->stats_counter); + return -EAGAIN; + } + + if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) { + DP(BNX2X_MSG_STATS, "stats not updated by ustorm" + " ustorm counter (0x%x) != stats_counter (0x%x)\n", + le16_to_cpu(counters->ustats_counter), bp->stats_counter); + return -EAGAIN; + } + + if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) { + DP(BNX2X_MSG_STATS, "stats not updated by cstorm" + " cstorm counter (0x%x) != stats_counter (0x%x)\n", + le16_to_cpu(counters->cstats_counter), bp->stats_counter); + return -EAGAIN; + } + + if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) { + DP(BNX2X_MSG_STATS, "stats not updated by tstorm" + " tstorm counter (0x%x) != stats_counter (0x%x)\n", + le16_to_cpu(counters->tstats_counter), bp->stats_counter); + return -EAGAIN; + } + + memcpy(&(fstats->total_bytes_received_hi), + &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi), + sizeof(struct host_func_stats) - 2*sizeof(u32)); + estats->error_bytes_received_hi = 0; + estats->error_bytes_received_lo = 0; + estats->etherstatsoverrsizepkts_hi = 0; + estats->etherstatsoverrsizepkts_lo = 0; + estats->no_buff_discard_hi = 0; + estats->no_buff_discard_lo = 0; + estats->total_tpa_aggregations_hi = 0; + estats->total_tpa_aggregations_lo = 0; + estats->total_tpa_aggregated_frames_hi = 0; + estats->total_tpa_aggregated_frames_lo = 0; + estats->total_tpa_bytes_hi = 0; + estats->total_tpa_bytes_lo = 0; + + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + struct tstorm_per_queue_stats *tclient = + &bp->fw_stats_data->queue_stats[i]. + tstorm_queue_statistics; + struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient; + struct ustorm_per_queue_stats *uclient = + &bp->fw_stats_data->queue_stats[i]. + ustorm_queue_statistics; + struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient; + struct xstorm_per_queue_stats *xclient = + &bp->fw_stats_data->queue_stats[i]. + xstorm_queue_statistics; + struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient; + struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; + u32 diff; + + DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, " + "bcast_sent 0x%x mcast_sent 0x%x\n", + i, xclient->ucast_pkts_sent, + xclient->bcast_pkts_sent, xclient->mcast_pkts_sent); + + DP(BNX2X_MSG_STATS, "---------------\n"); + + qstats->total_broadcast_bytes_received_hi = + le32_to_cpu(tclient->rcv_bcast_bytes.hi); + qstats->total_broadcast_bytes_received_lo = + le32_to_cpu(tclient->rcv_bcast_bytes.lo); + + qstats->total_multicast_bytes_received_hi = + le32_to_cpu(tclient->rcv_mcast_bytes.hi); + qstats->total_multicast_bytes_received_lo = + le32_to_cpu(tclient->rcv_mcast_bytes.lo); + + qstats->total_unicast_bytes_received_hi = + le32_to_cpu(tclient->rcv_ucast_bytes.hi); + qstats->total_unicast_bytes_received_lo = + le32_to_cpu(tclient->rcv_ucast_bytes.lo); + + /* + * sum to total_bytes_received all + * unicast/multicast/broadcast + */ + qstats->total_bytes_received_hi = + qstats->total_broadcast_bytes_received_hi; + qstats->total_bytes_received_lo = + qstats->total_broadcast_bytes_received_lo; + + ADD_64(qstats->total_bytes_received_hi, + qstats->total_multicast_bytes_received_hi, + qstats->total_bytes_received_lo, + qstats->total_multicast_bytes_received_lo); + + ADD_64(qstats->total_bytes_received_hi, + qstats->total_unicast_bytes_received_hi, + qstats->total_bytes_received_lo, + qstats->total_unicast_bytes_received_lo); + + qstats->valid_bytes_received_hi = + qstats->total_bytes_received_hi; + qstats->valid_bytes_received_lo = + qstats->total_bytes_received_lo; + + + UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, + total_unicast_packets_received); + UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, + total_multicast_packets_received); + UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, + total_broadcast_packets_received); + UPDATE_EXTEND_TSTAT(pkts_too_big_discard, + etherstatsoverrsizepkts); + UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard); + + SUB_EXTEND_USTAT(ucast_no_buff_pkts, + total_unicast_packets_received); + SUB_EXTEND_USTAT(mcast_no_buff_pkts, + total_multicast_packets_received); + SUB_EXTEND_USTAT(bcast_no_buff_pkts, + total_broadcast_packets_received); + UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard); + UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard); + UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard); + + qstats->total_broadcast_bytes_transmitted_hi = + le32_to_cpu(xclient->bcast_bytes_sent.hi); + qstats->total_broadcast_bytes_transmitted_lo = + le32_to_cpu(xclient->bcast_bytes_sent.lo); + + qstats->total_multicast_bytes_transmitted_hi = + le32_to_cpu(xclient->mcast_bytes_sent.hi); + qstats->total_multicast_bytes_transmitted_lo = + le32_to_cpu(xclient->mcast_bytes_sent.lo); + + qstats->total_unicast_bytes_transmitted_hi = + le32_to_cpu(xclient->ucast_bytes_sent.hi); + qstats->total_unicast_bytes_transmitted_lo = + le32_to_cpu(xclient->ucast_bytes_sent.lo); + /* + * sum to total_bytes_transmitted all + * unicast/multicast/broadcast + */ + qstats->total_bytes_transmitted_hi = + qstats->total_unicast_bytes_transmitted_hi; + qstats->total_bytes_transmitted_lo = + qstats->total_unicast_bytes_transmitted_lo; + + ADD_64(qstats->total_bytes_transmitted_hi, + qstats->total_broadcast_bytes_transmitted_hi, + qstats->total_bytes_transmitted_lo, + qstats->total_broadcast_bytes_transmitted_lo); + + ADD_64(qstats->total_bytes_transmitted_hi, + qstats->total_multicast_bytes_transmitted_hi, + qstats->total_bytes_transmitted_lo, + qstats->total_multicast_bytes_transmitted_lo); + + UPDATE_EXTEND_XSTAT(ucast_pkts_sent, + total_unicast_packets_transmitted); + UPDATE_EXTEND_XSTAT(mcast_pkts_sent, + total_multicast_packets_transmitted); + UPDATE_EXTEND_XSTAT(bcast_pkts_sent, + total_broadcast_packets_transmitted); + + UPDATE_EXTEND_TSTAT(checksum_discard, + total_packets_received_checksum_discarded); + UPDATE_EXTEND_TSTAT(ttl0_discard, + total_packets_received_ttl0_discarded); + + UPDATE_EXTEND_XSTAT(error_drop_pkts, + total_transmitted_dropped_packets_error); + + /* TPA aggregations completed */ + UPDATE_EXTEND_USTAT(coalesced_events, total_tpa_aggregations); + /* Number of network frames aggregated by TPA */ + UPDATE_EXTEND_USTAT(coalesced_pkts, + total_tpa_aggregated_frames); + /* Total number of bytes in completed TPA aggregations */ + qstats->total_tpa_bytes_lo = + le32_to_cpu(uclient->coalesced_bytes.lo); + qstats->total_tpa_bytes_hi = + le32_to_cpu(uclient->coalesced_bytes.hi); + + /* TPA stats per-function */ + ADD_64(estats->total_tpa_aggregations_hi, + qstats->total_tpa_aggregations_hi, + estats->total_tpa_aggregations_lo, + qstats->total_tpa_aggregations_lo); + ADD_64(estats->total_tpa_aggregated_frames_hi, + qstats->total_tpa_aggregated_frames_hi, + estats->total_tpa_aggregated_frames_lo, + qstats->total_tpa_aggregated_frames_lo); + ADD_64(estats->total_tpa_bytes_hi, + qstats->total_tpa_bytes_hi, + estats->total_tpa_bytes_lo, + qstats->total_tpa_bytes_lo); + + ADD_64(fstats->total_bytes_received_hi, + qstats->total_bytes_received_hi, + fstats->total_bytes_received_lo, + qstats->total_bytes_received_lo); + ADD_64(fstats->total_bytes_transmitted_hi, + qstats->total_bytes_transmitted_hi, + fstats->total_bytes_transmitted_lo, + qstats->total_bytes_transmitted_lo); + ADD_64(fstats->total_unicast_packets_received_hi, + qstats->total_unicast_packets_received_hi, + fstats->total_unicast_packets_received_lo, + qstats->total_unicast_packets_received_lo); + ADD_64(fstats->total_multicast_packets_received_hi, + qstats->total_multicast_packets_received_hi, + fstats->total_multicast_packets_received_lo, + qstats->total_multicast_packets_received_lo); + ADD_64(fstats->total_broadcast_packets_received_hi, + qstats->total_broadcast_packets_received_hi, + fstats->total_broadcast_packets_received_lo, + qstats->total_broadcast_packets_received_lo); + ADD_64(fstats->total_unicast_packets_transmitted_hi, + qstats->total_unicast_packets_transmitted_hi, + fstats->total_unicast_packets_transmitted_lo, + qstats->total_unicast_packets_transmitted_lo); + ADD_64(fstats->total_multicast_packets_transmitted_hi, + qstats->total_multicast_packets_transmitted_hi, + fstats->total_multicast_packets_transmitted_lo, + qstats->total_multicast_packets_transmitted_lo); + ADD_64(fstats->total_broadcast_packets_transmitted_hi, + qstats->total_broadcast_packets_transmitted_hi, + fstats->total_broadcast_packets_transmitted_lo, + qstats->total_broadcast_packets_transmitted_lo); + ADD_64(fstats->valid_bytes_received_hi, + qstats->valid_bytes_received_hi, + fstats->valid_bytes_received_lo, + qstats->valid_bytes_received_lo); + + ADD_64(estats->etherstatsoverrsizepkts_hi, + qstats->etherstatsoverrsizepkts_hi, + estats->etherstatsoverrsizepkts_lo, + qstats->etherstatsoverrsizepkts_lo); + ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi, + estats->no_buff_discard_lo, qstats->no_buff_discard_lo); + } + + ADD_64(fstats->total_bytes_received_hi, + estats->rx_stat_ifhcinbadoctets_hi, + fstats->total_bytes_received_lo, + estats->rx_stat_ifhcinbadoctets_lo); + + ADD_64(fstats->total_bytes_received_hi, + tfunc->rcv_error_bytes.hi, + fstats->total_bytes_received_lo, + tfunc->rcv_error_bytes.lo); + + memcpy(estats, &(fstats->total_bytes_received_hi), + sizeof(struct host_func_stats) - 2*sizeof(u32)); + + ADD_64(estats->error_bytes_received_hi, + tfunc->rcv_error_bytes.hi, + estats->error_bytes_received_lo, + tfunc->rcv_error_bytes.lo); + + ADD_64(estats->etherstatsoverrsizepkts_hi, + estats->rx_stat_dot3statsframestoolong_hi, + estats->etherstatsoverrsizepkts_lo, + estats->rx_stat_dot3statsframestoolong_lo); + ADD_64(estats->error_bytes_received_hi, + estats->rx_stat_ifhcinbadoctets_hi, + estats->error_bytes_received_lo, + estats->rx_stat_ifhcinbadoctets_lo); + + if (bp->port.pmf) { + estats->mac_filter_discard = + le32_to_cpu(tport->mac_filter_discard); + estats->mf_tag_discard = + le32_to_cpu(tport->mf_tag_discard); + estats->brb_truncate_discard = + le32_to_cpu(tport->brb_truncate_discard); + estats->mac_discard = le32_to_cpu(tport->mac_discard); + } + + fstats->host_func_stats_start = ++fstats->host_func_stats_end; + + bp->stats_pending = 0; + + return 0; +} + +static void bnx2x_net_stats_update(struct bnx2x *bp) +{ + struct bnx2x_eth_stats *estats = &bp->eth_stats; + struct net_device_stats *nstats = &bp->dev->stats; + unsigned long tmp; + int i; + + nstats->rx_packets = + bnx2x_hilo(&estats->total_unicast_packets_received_hi) + + bnx2x_hilo(&estats->total_multicast_packets_received_hi) + + bnx2x_hilo(&estats->total_broadcast_packets_received_hi); + + nstats->tx_packets = + bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) + + bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) + + bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi); + + nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi); + + nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); + + tmp = estats->mac_discard; + for_each_rx_queue(bp, i) + tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); + nstats->rx_dropped = tmp; + + nstats->tx_dropped = 0; + + nstats->multicast = + bnx2x_hilo(&estats->total_multicast_packets_received_hi); + + nstats->collisions = + bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi); + + nstats->rx_length_errors = + bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) + + bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi); + nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) + + bnx2x_hilo(&estats->brb_truncate_hi); + nstats->rx_crc_errors = + bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi); + nstats->rx_frame_errors = + bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi); + nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi); + nstats->rx_missed_errors = 0; + + nstats->rx_errors = nstats->rx_length_errors + + nstats->rx_over_errors + + nstats->rx_crc_errors + + nstats->rx_frame_errors + + nstats->rx_fifo_errors + + nstats->rx_missed_errors; + + nstats->tx_aborted_errors = + bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) + + bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi); + nstats->tx_carrier_errors = + bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi); + nstats->tx_fifo_errors = 0; + nstats->tx_heartbeat_errors = 0; + nstats->tx_window_errors = 0; + + nstats->tx_errors = nstats->tx_aborted_errors + + nstats->tx_carrier_errors + + bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi); +} + +static void bnx2x_drv_stats_update(struct bnx2x *bp) +{ + struct bnx2x_eth_stats *estats = &bp->eth_stats; + int i; + + estats->driver_xoff = 0; + estats->rx_err_discard_pkt = 0; + estats->rx_skb_alloc_failed = 0; + estats->hw_csum_err = 0; + for_each_queue(bp, i) { + struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats; + + estats->driver_xoff += qstats->driver_xoff; + estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt; + estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed; + estats->hw_csum_err += qstats->hw_csum_err; + } +} + +static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp) +{ + u32 val; + + if (SHMEM2_HAS(bp, edebug_driver_if[1])) { + val = SHMEM2_RD(bp, edebug_driver_if[1]); + + if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT) + return true; + } + + return false; +} + +static void bnx2x_stats_update(struct bnx2x *bp) +{ + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + if (bnx2x_edebug_stats_stopped(bp)) + return; + + if (*stats_comp != DMAE_COMP_VAL) + return; + + if (bp->port.pmf) + bnx2x_hw_stats_update(bp); + + if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) { + BNX2X_ERR("storm stats were not updated for 3 times\n"); + bnx2x_panic(); + return; + } + + bnx2x_net_stats_update(bp); + bnx2x_drv_stats_update(bp); + + if (netif_msg_timer(bp)) { + struct bnx2x_eth_stats *estats = &bp->eth_stats; + int i, cos; + + netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n", + estats->brb_drop_lo, estats->brb_truncate_lo); + + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; + + printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)" + " rx pkt(%lu) rx calls(%lu %lu)\n", + fp->name, (le16_to_cpu(*fp->rx_cons_sb) - + fp->rx_comp_cons), + le16_to_cpu(*fp->rx_cons_sb), + bnx2x_hilo(&qstats-> + total_unicast_packets_received_hi), + fp->rx_calls, fp->rx_pkt); + } + + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + struct bnx2x_fp_txdata *txdata; + struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; + struct netdev_queue *txq; + + printk(KERN_DEBUG "%s: tx pkt(%lu) (Xoff events %u)", + fp->name, bnx2x_hilo( + &qstats->total_unicast_packets_transmitted_hi), + qstats->driver_xoff); + + for_each_cos_in_tx_queue(fp, cos) { + txdata = &fp->txdata[cos]; + txq = netdev_get_tx_queue(bp->dev, + FP_COS_TO_TXQ(fp, cos)); + + printk(KERN_DEBUG "%d: tx avail(%4u)" + " *tx_cons_sb(%u)" + " tx calls (%lu)" + " %s\n", + cos, + bnx2x_tx_avail(bp, txdata), + le16_to_cpu(*txdata->tx_cons_sb), + txdata->tx_pkt, + (netif_tx_queue_stopped(txq) ? + "Xoff" : "Xon") + ); + } + } + } + + bnx2x_hw_stats_post(bp); + bnx2x_storm_stats_post(bp); +} + +static void bnx2x_port_stats_stop(struct bnx2x *bp) +{ + struct dmae_command *dmae; + u32 opcode; + int loader_idx = PMF_DMAE_C(bp); + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + bp->executer_idx = 0; + + opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0); + + if (bp->port.port_stx) { + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + if (bp->func_stx) + dmae->opcode = bnx2x_dmae_opcode_add_comp( + opcode, DMAE_COMP_GRC); + else + dmae->opcode = bnx2x_dmae_opcode_add_comp( + opcode, DMAE_COMP_PCI); + + dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); + dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); + dmae->dst_addr_lo = bp->port.port_stx >> 2; + dmae->dst_addr_hi = 0; + dmae->len = sizeof(struct host_port_stats) >> 2; + if (bp->func_stx) { + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } else { + dmae->comp_addr_lo = + U64_LO(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_addr_hi = + U64_HI(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; + } + } + + if (bp->func_stx) { + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = + bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); + dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); + dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); + dmae->dst_addr_lo = bp->func_stx >> 2; + dmae->dst_addr_hi = 0; + dmae->len = sizeof(struct host_func_stats) >> 2; + dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; + } +} + +static void bnx2x_stats_stop(struct bnx2x *bp) +{ + int update = 0; + + bnx2x_stats_comp(bp); + + if (bp->port.pmf) + update = (bnx2x_hw_stats_update(bp) == 0); + + update |= (bnx2x_storm_stats_update(bp) == 0); + + if (update) { + bnx2x_net_stats_update(bp); + + if (bp->port.pmf) + bnx2x_port_stats_stop(bp); + + bnx2x_hw_stats_post(bp); + bnx2x_stats_comp(bp); + } +} + +static void bnx2x_stats_do_nothing(struct bnx2x *bp) +{ +} + +static const struct { + void (*action)(struct bnx2x *bp); + enum bnx2x_stats_state next_state; +} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = { +/* state event */ +{ +/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED}, +/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED}, +/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}, +/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED} +}, +{ +/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED}, +/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED}, +/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED}, +/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED} +} +}; + +void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) +{ + enum bnx2x_stats_state state; + if (unlikely(bp->panic)) + return; + bnx2x_stats_stm[bp->stats_state][event].action(bp); + spin_lock_bh(&bp->stats_lock); + state = bp->stats_state; + bp->stats_state = bnx2x_stats_stm[state][event].next_state; + spin_unlock_bh(&bp->stats_lock); + + if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) + DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", + state, event, bp->stats_state); +} + +static void bnx2x_port_stats_base_init(struct bnx2x *bp) +{ + struct dmae_command *dmae; + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + /* sanity */ + if (!bp->port.pmf || !bp->port.port_stx) { + BNX2X_ERR("BUG!\n"); + return; + } + + bp->executer_idx = 0; + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, + true, DMAE_COMP_PCI); + dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); + dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); + dmae->dst_addr_lo = bp->port.port_stx >> 2; + dmae->dst_addr_hi = 0; + dmae->len = sizeof(struct host_port_stats) >> 2; + dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; + bnx2x_hw_stats_post(bp); + bnx2x_stats_comp(bp); +} + +static void bnx2x_func_stats_base_init(struct bnx2x *bp) +{ + int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX; + u32 func_stx; + + /* sanity */ + if (!bp->port.pmf || !bp->func_stx) { + BNX2X_ERR("BUG!\n"); + return; + } + + /* save our func_stx */ + func_stx = bp->func_stx; + + for (vn = VN_0; vn < vn_max; vn++) { + int mb_idx = CHIP_IS_E1x(bp) ? 2*vn + BP_PORT(bp) : vn; + + bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); + bnx2x_func_stats_init(bp); + bnx2x_hw_stats_post(bp); + bnx2x_stats_comp(bp); + } + + /* restore our func_stx */ + bp->func_stx = func_stx; +} + +static void bnx2x_func_stats_base_update(struct bnx2x *bp) +{ + struct dmae_command *dmae = &bp->stats_dmae; + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + /* sanity */ + if (!bp->func_stx) { + BNX2X_ERR("BUG!\n"); + return; + } + + bp->executer_idx = 0; + memset(dmae, 0, sizeof(struct dmae_command)); + + dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, + true, DMAE_COMP_PCI); + dmae->src_addr_lo = bp->func_stx >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base)); + dmae->len = sizeof(struct host_func_stats) >> 2; + dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; + bnx2x_hw_stats_post(bp); + bnx2x_stats_comp(bp); +} + +/** + * This function will prepare the statistics ramrod data the way + * we will only have to increment the statistics counter and + * send the ramrod each time we have to. + * + * @param bp + */ +static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp) +{ + int i; + struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr; + + dma_addr_t cur_data_offset; + struct stats_query_entry *cur_query_entry; + + stats_hdr->cmd_num = bp->fw_stats_num; + stats_hdr->drv_stats_counter = 0; + + /* storm_counters struct contains the counters of completed + * statistics requests per storm which are incremented by FW + * each time it completes hadning a statistics ramrod. We will + * check these counters in the timer handler and discard a + * (statistics) ramrod completion. + */ + cur_data_offset = bp->fw_stats_data_mapping + + offsetof(struct bnx2x_fw_stats_data, storm_counters); + + stats_hdr->stats_counters_addrs.hi = + cpu_to_le32(U64_HI(cur_data_offset)); + stats_hdr->stats_counters_addrs.lo = + cpu_to_le32(U64_LO(cur_data_offset)); + + /* prepare to the first stats ramrod (will be completed with + * the counters equal to zero) - init counters to somethig different. + */ + memset(&bp->fw_stats_data->storm_counters, 0xff, + sizeof(struct stats_counter)); + + /**** Port FW statistics data ****/ + cur_data_offset = bp->fw_stats_data_mapping + + offsetof(struct bnx2x_fw_stats_data, port); + + cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX]; + + cur_query_entry->kind = STATS_TYPE_PORT; + /* For port query index is a DONT CARE */ + cur_query_entry->index = BP_PORT(bp); + /* For port query funcID is a DONT CARE */ + cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); + cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); + cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); + + /**** PF FW statistics data ****/ + cur_data_offset = bp->fw_stats_data_mapping + + offsetof(struct bnx2x_fw_stats_data, pf); + + cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX]; + + cur_query_entry->kind = STATS_TYPE_PF; + /* For PF query index is a DONT CARE */ + cur_query_entry->index = BP_PORT(bp); + cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); + cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); + cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); + + /**** Clients' queries ****/ + cur_data_offset = bp->fw_stats_data_mapping + + offsetof(struct bnx2x_fw_stats_data, queue_stats); + + for_each_eth_queue(bp, i) { + cur_query_entry = + &bp->fw_stats_req-> + query[BNX2X_FIRST_QUEUE_QUERY_IDX + i]; + + cur_query_entry->kind = STATS_TYPE_QUEUE; + cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]); + cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); + cur_query_entry->address.hi = + cpu_to_le32(U64_HI(cur_data_offset)); + cur_query_entry->address.lo = + cpu_to_le32(U64_LO(cur_data_offset)); + + cur_data_offset += sizeof(struct per_queue_stats); + } +} + +void bnx2x_stats_init(struct bnx2x *bp) +{ + int /*abs*/port = BP_PORT(bp); + int mb_idx = BP_FW_MB_IDX(bp); + int i; + + bp->stats_pending = 0; + bp->executer_idx = 0; + bp->stats_counter = 0; + + /* port and func stats for management */ + if (!BP_NOMCP(bp)) { + bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx); + bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); + + } else { + bp->port.port_stx = 0; + bp->func_stx = 0; + } + DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n", + bp->port.port_stx, bp->func_stx); + + port = BP_PORT(bp); + /* port stats */ + memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); + bp->port.old_nig_stats.brb_discard = + REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); + bp->port.old_nig_stats.brb_truncate = + REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); + if (!CHIP_IS_E3(bp)) { + REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, + &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); + REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, + &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); + } + + /* function stats */ + for_each_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + + memset(&fp->old_tclient, 0, sizeof(fp->old_tclient)); + memset(&fp->old_uclient, 0, sizeof(fp->old_uclient)); + memset(&fp->old_xclient, 0, sizeof(fp->old_xclient)); + memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats)); + } + + /* Prepare statistics ramrod data */ + bnx2x_prep_fw_stats_req(bp); + + memset(&bp->dev->stats, 0, sizeof(bp->dev->stats)); + memset(&bp->eth_stats, 0, sizeof(bp->eth_stats)); + + bp->stats_state = STATS_STATE_DISABLED; + + if (bp->port.pmf) { + if (bp->port.port_stx) + bnx2x_port_stats_base_init(bp); + + if (bp->func_stx) + bnx2x_func_stats_base_init(bp); + + } else if (bp->func_stx) + bnx2x_func_stats_base_update(bp); +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h new file mode 100644 index 0000000..5d8ce2f --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h @@ -0,0 +1,381 @@ +/* bnx2x_stats.h: Broadcom Everest network driver. + * + * Copyright (c) 2007-2011 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Eilon Greenstein + * Written by: Eliezer Tamir + * Based on code from Michael Chan's bnx2 driver + * UDP CSUM errata workaround by Arik Gendelman + * Slowpath and fastpath rework by Vladislav Zolotarov + * Statistics and Link management by Yitchak Gertner + * + */ +#ifndef BNX2X_STATS_H +#define BNX2X_STATS_H + +#include + +struct nig_stats { + u32 brb_discard; + u32 brb_packet; + u32 brb_truncate; + u32 flow_ctrl_discard; + u32 flow_ctrl_octets; + u32 flow_ctrl_packet; + u32 mng_discard; + u32 mng_octet_inp; + u32 mng_octet_out; + u32 mng_packet_inp; + u32 mng_packet_out; + u32 pbf_octets; + u32 pbf_packet; + u32 safc_inp; + u32 egress_mac_pkt0_lo; + u32 egress_mac_pkt0_hi; + u32 egress_mac_pkt1_lo; + u32 egress_mac_pkt1_hi; +}; + + +enum bnx2x_stats_event { + STATS_EVENT_PMF = 0, + STATS_EVENT_LINK_UP, + STATS_EVENT_UPDATE, + STATS_EVENT_STOP, + STATS_EVENT_MAX +}; + +enum bnx2x_stats_state { + STATS_STATE_DISABLED = 0, + STATS_STATE_ENABLED, + STATS_STATE_MAX +}; + +struct bnx2x_eth_stats { + u32 total_bytes_received_hi; + u32 total_bytes_received_lo; + u32 total_bytes_transmitted_hi; + u32 total_bytes_transmitted_lo; + u32 total_unicast_packets_received_hi; + u32 total_unicast_packets_received_lo; + u32 total_multicast_packets_received_hi; + u32 total_multicast_packets_received_lo; + u32 total_broadcast_packets_received_hi; + u32 total_broadcast_packets_received_lo; + u32 total_unicast_packets_transmitted_hi; + u32 total_unicast_packets_transmitted_lo; + u32 total_multicast_packets_transmitted_hi; + u32 total_multicast_packets_transmitted_lo; + u32 total_broadcast_packets_transmitted_hi; + u32 total_broadcast_packets_transmitted_lo; + u32 valid_bytes_received_hi; + u32 valid_bytes_received_lo; + + u32 error_bytes_received_hi; + u32 error_bytes_received_lo; + u32 etherstatsoverrsizepkts_hi; + u32 etherstatsoverrsizepkts_lo; + u32 no_buff_discard_hi; + u32 no_buff_discard_lo; + + u32 rx_stat_ifhcinbadoctets_hi; + u32 rx_stat_ifhcinbadoctets_lo; + u32 tx_stat_ifhcoutbadoctets_hi; + u32 tx_stat_ifhcoutbadoctets_lo; + u32 rx_stat_dot3statsfcserrors_hi; + u32 rx_stat_dot3statsfcserrors_lo; + u32 rx_stat_dot3statsalignmenterrors_hi; + u32 rx_stat_dot3statsalignmenterrors_lo; + u32 rx_stat_dot3statscarriersenseerrors_hi; + u32 rx_stat_dot3statscarriersenseerrors_lo; + u32 rx_stat_falsecarriererrors_hi; + u32 rx_stat_falsecarriererrors_lo; + u32 rx_stat_etherstatsundersizepkts_hi; + u32 rx_stat_etherstatsundersizepkts_lo; + u32 rx_stat_dot3statsframestoolong_hi; + u32 rx_stat_dot3statsframestoolong_lo; + u32 rx_stat_etherstatsfragments_hi; + u32 rx_stat_etherstatsfragments_lo; + u32 rx_stat_etherstatsjabbers_hi; + u32 rx_stat_etherstatsjabbers_lo; + u32 rx_stat_maccontrolframesreceived_hi; + u32 rx_stat_maccontrolframesreceived_lo; + u32 rx_stat_bmac_xpf_hi; + u32 rx_stat_bmac_xpf_lo; + u32 rx_stat_bmac_xcf_hi; + u32 rx_stat_bmac_xcf_lo; + u32 rx_stat_xoffstateentered_hi; + u32 rx_stat_xoffstateentered_lo; + u32 rx_stat_xonpauseframesreceived_hi; + u32 rx_stat_xonpauseframesreceived_lo; + u32 rx_stat_xoffpauseframesreceived_hi; + u32 rx_stat_xoffpauseframesreceived_lo; + u32 tx_stat_outxonsent_hi; + u32 tx_stat_outxonsent_lo; + u32 tx_stat_outxoffsent_hi; + u32 tx_stat_outxoffsent_lo; + u32 tx_stat_flowcontroldone_hi; + u32 tx_stat_flowcontroldone_lo; + u32 tx_stat_etherstatscollisions_hi; + u32 tx_stat_etherstatscollisions_lo; + u32 tx_stat_dot3statssinglecollisionframes_hi; + u32 tx_stat_dot3statssinglecollisionframes_lo; + u32 tx_stat_dot3statsmultiplecollisionframes_hi; + u32 tx_stat_dot3statsmultiplecollisionframes_lo; + u32 tx_stat_dot3statsdeferredtransmissions_hi; + u32 tx_stat_dot3statsdeferredtransmissions_lo; + u32 tx_stat_dot3statsexcessivecollisions_hi; + u32 tx_stat_dot3statsexcessivecollisions_lo; + u32 tx_stat_dot3statslatecollisions_hi; + u32 tx_stat_dot3statslatecollisions_lo; + u32 tx_stat_etherstatspkts64octets_hi; + u32 tx_stat_etherstatspkts64octets_lo; + u32 tx_stat_etherstatspkts65octetsto127octets_hi; + u32 tx_stat_etherstatspkts65octetsto127octets_lo; + u32 tx_stat_etherstatspkts128octetsto255octets_hi; + u32 tx_stat_etherstatspkts128octetsto255octets_lo; + u32 tx_stat_etherstatspkts256octetsto511octets_hi; + u32 tx_stat_etherstatspkts256octetsto511octets_lo; + u32 tx_stat_etherstatspkts512octetsto1023octets_hi; + u32 tx_stat_etherstatspkts512octetsto1023octets_lo; + u32 tx_stat_etherstatspkts1024octetsto1522octets_hi; + u32 tx_stat_etherstatspkts1024octetsto1522octets_lo; + u32 tx_stat_etherstatspktsover1522octets_hi; + u32 tx_stat_etherstatspktsover1522octets_lo; + u32 tx_stat_bmac_2047_hi; + u32 tx_stat_bmac_2047_lo; + u32 tx_stat_bmac_4095_hi; + u32 tx_stat_bmac_4095_lo; + u32 tx_stat_bmac_9216_hi; + u32 tx_stat_bmac_9216_lo; + u32 tx_stat_bmac_16383_hi; + u32 tx_stat_bmac_16383_lo; + u32 tx_stat_dot3statsinternalmactransmiterrors_hi; + u32 tx_stat_dot3statsinternalmactransmiterrors_lo; + u32 tx_stat_bmac_ufl_hi; + u32 tx_stat_bmac_ufl_lo; + + u32 pause_frames_received_hi; + u32 pause_frames_received_lo; + u32 pause_frames_sent_hi; + u32 pause_frames_sent_lo; + + u32 etherstatspkts1024octetsto1522octets_hi; + u32 etherstatspkts1024octetsto1522octets_lo; + u32 etherstatspktsover1522octets_hi; + u32 etherstatspktsover1522octets_lo; + + u32 brb_drop_hi; + u32 brb_drop_lo; + u32 brb_truncate_hi; + u32 brb_truncate_lo; + + u32 mac_filter_discard; + u32 mf_tag_discard; + u32 brb_truncate_discard; + u32 mac_discard; + + u32 driver_xoff; + u32 rx_err_discard_pkt; + u32 rx_skb_alloc_failed; + u32 hw_csum_err; + + u32 nig_timer_max; + + /* TPA */ + u32 total_tpa_aggregations_hi; + u32 total_tpa_aggregations_lo; + u32 total_tpa_aggregated_frames_hi; + u32 total_tpa_aggregated_frames_lo; + u32 total_tpa_bytes_hi; + u32 total_tpa_bytes_lo; +}; + + +struct bnx2x_eth_q_stats { + u32 total_unicast_bytes_received_hi; + u32 total_unicast_bytes_received_lo; + u32 total_broadcast_bytes_received_hi; + u32 total_broadcast_bytes_received_lo; + u32 total_multicast_bytes_received_hi; + u32 total_multicast_bytes_received_lo; + u32 total_bytes_received_hi; + u32 total_bytes_received_lo; + u32 total_unicast_bytes_transmitted_hi; + u32 total_unicast_bytes_transmitted_lo; + u32 total_broadcast_bytes_transmitted_hi; + u32 total_broadcast_bytes_transmitted_lo; + u32 total_multicast_bytes_transmitted_hi; + u32 total_multicast_bytes_transmitted_lo; + u32 total_bytes_transmitted_hi; + u32 total_bytes_transmitted_lo; + u32 total_unicast_packets_received_hi; + u32 total_unicast_packets_received_lo; + u32 total_multicast_packets_received_hi; + u32 total_multicast_packets_received_lo; + u32 total_broadcast_packets_received_hi; + u32 total_broadcast_packets_received_lo; + u32 total_unicast_packets_transmitted_hi; + u32 total_unicast_packets_transmitted_lo; + u32 total_multicast_packets_transmitted_hi; + u32 total_multicast_packets_transmitted_lo; + u32 total_broadcast_packets_transmitted_hi; + u32 total_broadcast_packets_transmitted_lo; + u32 valid_bytes_received_hi; + u32 valid_bytes_received_lo; + + u32 etherstatsoverrsizepkts_hi; + u32 etherstatsoverrsizepkts_lo; + u32 no_buff_discard_hi; + u32 no_buff_discard_lo; + + u32 driver_xoff; + u32 rx_err_discard_pkt; + u32 rx_skb_alloc_failed; + u32 hw_csum_err; + + u32 total_packets_received_checksum_discarded_hi; + u32 total_packets_received_checksum_discarded_lo; + u32 total_packets_received_ttl0_discarded_hi; + u32 total_packets_received_ttl0_discarded_lo; + u32 total_transmitted_dropped_packets_error_hi; + u32 total_transmitted_dropped_packets_error_lo; + + /* TPA */ + u32 total_tpa_aggregations_hi; + u32 total_tpa_aggregations_lo; + u32 total_tpa_aggregated_frames_hi; + u32 total_tpa_aggregated_frames_lo; + u32 total_tpa_bytes_hi; + u32 total_tpa_bytes_lo; +}; + +/**************************************************************************** +* Macros +****************************************************************************/ + +/* sum[hi:lo] += add[hi:lo] */ +#define ADD_64(s_hi, a_hi, s_lo, a_lo) \ + do { \ + s_lo += a_lo; \ + s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \ + } while (0) + +/* difference = minuend - subtrahend */ +#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \ + do { \ + if (m_lo < s_lo) { \ + /* underflow */ \ + d_hi = m_hi - s_hi; \ + if (d_hi > 0) { \ + /* we can 'loan' 1 */ \ + d_hi--; \ + d_lo = m_lo + (UINT_MAX - s_lo) + 1; \ + } else { \ + /* m_hi <= s_hi */ \ + d_hi = 0; \ + d_lo = 0; \ + } \ + } else { \ + /* m_lo >= s_lo */ \ + if (m_hi < s_hi) { \ + d_hi = 0; \ + d_lo = 0; \ + } else { \ + /* m_hi >= s_hi */ \ + d_hi = m_hi - s_hi; \ + d_lo = m_lo - s_lo; \ + } \ + } \ + } while (0) + +#define UPDATE_STAT64(s, t) \ + do { \ + DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \ + diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \ + pstats->mac_stx[0].t##_hi = new->s##_hi; \ + pstats->mac_stx[0].t##_lo = new->s##_lo; \ + ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \ + pstats->mac_stx[1].t##_lo, diff.lo); \ + } while (0) + +#define UPDATE_STAT64_NIG(s, t) \ + do { \ + DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \ + diff.lo, new->s##_lo, old->s##_lo); \ + ADD_64(estats->t##_hi, diff.hi, \ + estats->t##_lo, diff.lo); \ + } while (0) + +/* sum[hi:lo] += add */ +#define ADD_EXTEND_64(s_hi, s_lo, a) \ + do { \ + s_lo += a; \ + s_hi += (s_lo < a) ? 1 : 0; \ + } while (0) + +#define ADD_STAT64(diff, t) \ + do { \ + ADD_64(pstats->mac_stx[1].t##_hi, new->diff##_hi, \ + pstats->mac_stx[1].t##_lo, new->diff##_lo); \ + } while (0) + +#define UPDATE_EXTEND_STAT(s) \ + do { \ + ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \ + pstats->mac_stx[1].s##_lo, \ + new->s); \ + } while (0) + +#define UPDATE_EXTEND_TSTAT(s, t) \ + do { \ + diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \ + old_tclient->s = tclient->s; \ + ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ + } while (0) + +#define UPDATE_EXTEND_USTAT(s, t) \ + do { \ + diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \ + old_uclient->s = uclient->s; \ + ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ + } while (0) + +#define UPDATE_EXTEND_XSTAT(s, t) \ + do { \ + diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \ + old_xclient->s = xclient->s; \ + ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ + } while (0) + +/* minuend -= subtrahend */ +#define SUB_64(m_hi, s_hi, m_lo, s_lo) \ + do { \ + DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \ + } while (0) + +/* minuend[hi:lo] -= subtrahend */ +#define SUB_EXTEND_64(m_hi, m_lo, s) \ + do { \ + SUB_64(m_hi, 0, m_lo, s); \ + } while (0) + +#define SUB_EXTEND_USTAT(s, t) \ + do { \ + diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \ + SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ + } while (0) + + +/* forward */ +struct bnx2x; + +void bnx2x_stats_init(struct bnx2x *bp); + +void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); + +#endif /* BNX2X_STATS_H */ diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c new file mode 100644 index 0000000..7698161 --- /dev/null +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -0,0 +1,5487 @@ +/* cnic.c: Broadcom CNIC core network driver. + * + * Copyright (c) 2006-2011 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com) + * Modified and maintained by: Michael Chan + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +#define BCM_VLAN 1 +#endif +#include +#include +#include +#include +#include +#include +#include + +#include "cnic_if.h" +#include "bnx2.h" +#include "bnx2x/bnx2x_reg.h" +#include "bnx2x/bnx2x_fw_defs.h" +#include "bnx2x/bnx2x_hsi.h" +#include "../../../scsi/bnx2i/57xx_iscsi_constants.h" +#include "../../../scsi/bnx2i/57xx_iscsi_hsi.h" +#include "cnic.h" +#include "cnic_defs.h" + +#define DRV_MODULE_NAME "cnic" + +static char version[] __devinitdata = + "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n"; + +MODULE_AUTHOR("Michael Chan and John(Zongxi) " + "Chen (zongxi@broadcom.com"); +MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(CNIC_MODULE_VERSION); + +/* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */ +static LIST_HEAD(cnic_dev_list); +static LIST_HEAD(cnic_udev_list); +static DEFINE_RWLOCK(cnic_dev_lock); +static DEFINE_MUTEX(cnic_lock); + +static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; + +/* helper function, assuming cnic_lock is held */ +static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type) +{ + return rcu_dereference_protected(cnic_ulp_tbl[type], + lockdep_is_held(&cnic_lock)); +} + +static int cnic_service_bnx2(void *, void *); +static int cnic_service_bnx2x(void *, void *); +static int cnic_ctl(void *, struct cnic_ctl_info *); + +static struct cnic_ops cnic_bnx2_ops = { + .cnic_owner = THIS_MODULE, + .cnic_handler = cnic_service_bnx2, + .cnic_ctl = cnic_ctl, +}; + +static struct cnic_ops cnic_bnx2x_ops = { + .cnic_owner = THIS_MODULE, + .cnic_handler = cnic_service_bnx2x, + .cnic_ctl = cnic_ctl, +}; + +static struct workqueue_struct *cnic_wq; + +static void cnic_shutdown_rings(struct cnic_dev *); +static void cnic_init_rings(struct cnic_dev *); +static int cnic_cm_set_pg(struct cnic_sock *); + +static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode) +{ + struct cnic_uio_dev *udev = uinfo->priv; + struct cnic_dev *dev; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (udev->uio_dev != -1) + return -EBUSY; + + rtnl_lock(); + dev = udev->dev; + + if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) { + rtnl_unlock(); + return -ENODEV; + } + + udev->uio_dev = iminor(inode); + + cnic_shutdown_rings(dev); + cnic_init_rings(dev); + rtnl_unlock(); + + return 0; +} + +static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode) +{ + struct cnic_uio_dev *udev = uinfo->priv; + + udev->uio_dev = -1; + return 0; +} + +static inline void cnic_hold(struct cnic_dev *dev) +{ + atomic_inc(&dev->ref_count); +} + +static inline void cnic_put(struct cnic_dev *dev) +{ + atomic_dec(&dev->ref_count); +} + +static inline void csk_hold(struct cnic_sock *csk) +{ + atomic_inc(&csk->ref_count); +} + +static inline void csk_put(struct cnic_sock *csk) +{ + atomic_dec(&csk->ref_count); +} + +static struct cnic_dev *cnic_from_netdev(struct net_device *netdev) +{ + struct cnic_dev *cdev; + + read_lock(&cnic_dev_lock); + list_for_each_entry(cdev, &cnic_dev_list, list) { + if (netdev == cdev->netdev) { + cnic_hold(cdev); + read_unlock(&cnic_dev_lock); + return cdev; + } + } + read_unlock(&cnic_dev_lock); + return NULL; +} + +static inline void ulp_get(struct cnic_ulp_ops *ulp_ops) +{ + atomic_inc(&ulp_ops->ref_count); +} + +static inline void ulp_put(struct cnic_ulp_ops *ulp_ops) +{ + atomic_dec(&ulp_ops->ref_count); +} + +static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + struct drv_ctl_info info; + struct drv_ctl_io *io = &info.data.io; + + info.cmd = DRV_CTL_CTX_WR_CMD; + io->cid_addr = cid_addr; + io->offset = off; + io->data = val; + ethdev->drv_ctl(dev->netdev, &info); +} + +static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + struct drv_ctl_info info; + struct drv_ctl_io *io = &info.data.io; + + info.cmd = DRV_CTL_CTXTBL_WR_CMD; + io->offset = off; + io->dma_addr = addr; + ethdev->drv_ctl(dev->netdev, &info); +} + +static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + struct drv_ctl_info info; + struct drv_ctl_l2_ring *ring = &info.data.ring; + + if (start) + info.cmd = DRV_CTL_START_L2_CMD; + else + info.cmd = DRV_CTL_STOP_L2_CMD; + + ring->cid = cid; + ring->client_id = cl_id; + ethdev->drv_ctl(dev->netdev, &info); +} + +static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + struct drv_ctl_info info; + struct drv_ctl_io *io = &info.data.io; + + info.cmd = DRV_CTL_IO_WR_CMD; + io->offset = off; + io->data = val; + ethdev->drv_ctl(dev->netdev, &info); +} + +static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + struct drv_ctl_info info; + struct drv_ctl_io *io = &info.data.io; + + info.cmd = DRV_CTL_IO_RD_CMD; + io->offset = off; + ethdev->drv_ctl(dev->netdev, &info); + return io->data; +} + +static int cnic_in_use(struct cnic_sock *csk) +{ + return test_bit(SK_F_INUSE, &csk->flags); +} + +static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + struct drv_ctl_info info; + + info.cmd = cmd; + info.data.credit.credit_count = count; + ethdev->drv_ctl(dev->netdev, &info); +} + +static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid) +{ + u32 i; + + for (i = 0; i < cp->max_cid_space; i++) { + if (cp->ctx_tbl[i].cid == cid) { + *l5_cid = i; + return 0; + } + } + return -EINVAL; +} + +static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, + struct cnic_sock *csk) +{ + struct iscsi_path path_req; + char *buf = NULL; + u16 len = 0; + u32 msg_type = ISCSI_KEVENT_IF_DOWN; + struct cnic_ulp_ops *ulp_ops; + struct cnic_uio_dev *udev = cp->udev; + int rc = 0, retry = 0; + + if (!udev || udev->uio_dev == -1) + return -ENODEV; + + if (csk) { + len = sizeof(path_req); + buf = (char *) &path_req; + memset(&path_req, 0, len); + + msg_type = ISCSI_KEVENT_PATH_REQ; + path_req.handle = (u64) csk->l5_cid; + if (test_bit(SK_F_IPV6, &csk->flags)) { + memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0], + sizeof(struct in6_addr)); + path_req.ip_addr_len = 16; + } else { + memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0], + sizeof(struct in_addr)); + path_req.ip_addr_len = 4; + } + path_req.vlan_id = csk->vlan_id; + path_req.pmtu = csk->mtu; + } + + while (retry < 3) { + rc = 0; + rcu_read_lock(); + ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]); + if (ulp_ops) + rc = ulp_ops->iscsi_nl_send_msg( + cp->ulp_handle[CNIC_ULP_ISCSI], + msg_type, buf, len); + rcu_read_unlock(); + if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ) + break; + + msleep(100); + retry++; + } + return rc; +} + +static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8); + +static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, + char *buf, u16 len) +{ + int rc = -EINVAL; + + switch (msg_type) { + case ISCSI_UEVENT_PATH_UPDATE: { + struct cnic_local *cp; + u32 l5_cid; + struct cnic_sock *csk; + struct iscsi_path *path_resp; + + if (len < sizeof(*path_resp)) + break; + + path_resp = (struct iscsi_path *) buf; + cp = dev->cnic_priv; + l5_cid = (u32) path_resp->handle; + if (l5_cid >= MAX_CM_SK_TBL_SZ) + break; + + rcu_read_lock(); + if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) { + rc = -ENODEV; + rcu_read_unlock(); + break; + } + csk = &cp->csk_tbl[l5_cid]; + csk_hold(csk); + if (cnic_in_use(csk) && + test_bit(SK_F_CONNECT_START, &csk->flags)) { + + memcpy(csk->ha, path_resp->mac_addr, 6); + if (test_bit(SK_F_IPV6, &csk->flags)) + memcpy(&csk->src_ip[0], &path_resp->src.v6_addr, + sizeof(struct in6_addr)); + else + memcpy(&csk->src_ip[0], &path_resp->src.v4_addr, + sizeof(struct in_addr)); + + if (is_valid_ether_addr(csk->ha)) { + cnic_cm_set_pg(csk); + } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) && + !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { + + cnic_cm_upcall(cp, csk, + L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); + clear_bit(SK_F_CONNECT_START, &csk->flags); + } + } + csk_put(csk); + rcu_read_unlock(); + rc = 0; + } + } + + return rc; +} + +static int cnic_offld_prep(struct cnic_sock *csk) +{ + if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) + return 0; + + if (!test_bit(SK_F_CONNECT_START, &csk->flags)) { + clear_bit(SK_F_OFFLD_SCHED, &csk->flags); + return 0; + } + + return 1; +} + +static int cnic_close_prep(struct cnic_sock *csk) +{ + clear_bit(SK_F_CONNECT_START, &csk->flags); + smp_mb__after_clear_bit(); + + if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { + while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) + msleep(1); + + return 1; + } + return 0; +} + +static int cnic_abort_prep(struct cnic_sock *csk) +{ + clear_bit(SK_F_CONNECT_START, &csk->flags); + smp_mb__after_clear_bit(); + + while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) + msleep(1); + + if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { + csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP; + return 1; + } + + return 0; +} + +int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) +{ + struct cnic_dev *dev; + + if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { + pr_err("%s: Bad type %d\n", __func__, ulp_type); + return -EINVAL; + } + mutex_lock(&cnic_lock); + if (cnic_ulp_tbl_prot(ulp_type)) { + pr_err("%s: Type %d has already been registered\n", + __func__, ulp_type); + mutex_unlock(&cnic_lock); + return -EBUSY; + } + + read_lock(&cnic_dev_lock); + list_for_each_entry(dev, &cnic_dev_list, list) { + struct cnic_local *cp = dev->cnic_priv; + + clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]); + } + read_unlock(&cnic_dev_lock); + + atomic_set(&ulp_ops->ref_count, 0); + rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops); + mutex_unlock(&cnic_lock); + + /* Prevent race conditions with netdev_event */ + rtnl_lock(); + list_for_each_entry(dev, &cnic_dev_list, list) { + struct cnic_local *cp = dev->cnic_priv; + + if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type])) + ulp_ops->cnic_init(dev); + } + rtnl_unlock(); + + return 0; +} + +int cnic_unregister_driver(int ulp_type) +{ + struct cnic_dev *dev; + struct cnic_ulp_ops *ulp_ops; + int i = 0; + + if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { + pr_err("%s: Bad type %d\n", __func__, ulp_type); + return -EINVAL; + } + mutex_lock(&cnic_lock); + ulp_ops = cnic_ulp_tbl_prot(ulp_type); + if (!ulp_ops) { + pr_err("%s: Type %d has not been registered\n", + __func__, ulp_type); + goto out_unlock; + } + read_lock(&cnic_dev_lock); + list_for_each_entry(dev, &cnic_dev_list, list) { + struct cnic_local *cp = dev->cnic_priv; + + if (rcu_dereference(cp->ulp_ops[ulp_type])) { + pr_err("%s: Type %d still has devices registered\n", + __func__, ulp_type); + read_unlock(&cnic_dev_lock); + goto out_unlock; + } + } + read_unlock(&cnic_dev_lock); + + rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL); + + mutex_unlock(&cnic_lock); + synchronize_rcu(); + while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) { + msleep(100); + i++; + } + + if (atomic_read(&ulp_ops->ref_count) != 0) + netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n"); + return 0; + +out_unlock: + mutex_unlock(&cnic_lock); + return -EINVAL; +} + +static int cnic_start_hw(struct cnic_dev *); +static void cnic_stop_hw(struct cnic_dev *); + +static int cnic_register_device(struct cnic_dev *dev, int ulp_type, + void *ulp_ctx) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_ulp_ops *ulp_ops; + + if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { + pr_err("%s: Bad type %d\n", __func__, ulp_type); + return -EINVAL; + } + mutex_lock(&cnic_lock); + if (cnic_ulp_tbl_prot(ulp_type) == NULL) { + pr_err("%s: Driver with type %d has not been registered\n", + __func__, ulp_type); + mutex_unlock(&cnic_lock); + return -EAGAIN; + } + if (rcu_dereference(cp->ulp_ops[ulp_type])) { + pr_err("%s: Type %d has already been registered to this device\n", + __func__, ulp_type); + mutex_unlock(&cnic_lock); + return -EBUSY; + } + + clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]); + cp->ulp_handle[ulp_type] = ulp_ctx; + ulp_ops = cnic_ulp_tbl_prot(ulp_type); + rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops); + cnic_hold(dev); + + if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) + if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type])) + ulp_ops->cnic_start(cp->ulp_handle[ulp_type]); + + mutex_unlock(&cnic_lock); + + return 0; + +} +EXPORT_SYMBOL(cnic_register_driver); + +static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) +{ + struct cnic_local *cp = dev->cnic_priv; + int i = 0; + + if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { + pr_err("%s: Bad type %d\n", __func__, ulp_type); + return -EINVAL; + } + mutex_lock(&cnic_lock); + if (rcu_dereference(cp->ulp_ops[ulp_type])) { + rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL); + cnic_put(dev); + } else { + pr_err("%s: device not registered to this ulp type %d\n", + __func__, ulp_type); + mutex_unlock(&cnic_lock); + return -EINVAL; + } + mutex_unlock(&cnic_lock); + + if (ulp_type == CNIC_ULP_ISCSI) + cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); + + synchronize_rcu(); + + while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) && + i < 20) { + msleep(100); + i++; + } + if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type])) + netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n"); + + return 0; +} +EXPORT_SYMBOL(cnic_unregister_driver); + +static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id, + u32 next) +{ + id_tbl->start = start_id; + id_tbl->max = size; + id_tbl->next = next; + spin_lock_init(&id_tbl->lock); + id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL); + if (!id_tbl->table) + return -ENOMEM; + + return 0; +} + +static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl) +{ + kfree(id_tbl->table); + id_tbl->table = NULL; +} + +static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id) +{ + int ret = -1; + + id -= id_tbl->start; + if (id >= id_tbl->max) + return ret; + + spin_lock(&id_tbl->lock); + if (!test_bit(id, id_tbl->table)) { + set_bit(id, id_tbl->table); + ret = 0; + } + spin_unlock(&id_tbl->lock); + return ret; +} + +/* Returns -1 if not successful */ +static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl) +{ + u32 id; + + spin_lock(&id_tbl->lock); + id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next); + if (id >= id_tbl->max) { + id = -1; + if (id_tbl->next != 0) { + id = find_first_zero_bit(id_tbl->table, id_tbl->next); + if (id >= id_tbl->next) + id = -1; + } + } + + if (id < id_tbl->max) { + set_bit(id, id_tbl->table); + id_tbl->next = (id + 1) & (id_tbl->max - 1); + id += id_tbl->start; + } + + spin_unlock(&id_tbl->lock); + + return id; +} + +static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id) +{ + if (id == -1) + return; + + id -= id_tbl->start; + if (id >= id_tbl->max) + return; + + clear_bit(id, id_tbl->table); +} + +static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma) +{ + int i; + + if (!dma->pg_arr) + return; + + for (i = 0; i < dma->num_pages; i++) { + if (dma->pg_arr[i]) { + dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE, + dma->pg_arr[i], dma->pg_map_arr[i]); + dma->pg_arr[i] = NULL; + } + } + if (dma->pgtbl) { + dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size, + dma->pgtbl, dma->pgtbl_map); + dma->pgtbl = NULL; + } + kfree(dma->pg_arr); + dma->pg_arr = NULL; + dma->num_pages = 0; +} + +static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) +{ + int i; + __le32 *page_table = (__le32 *) dma->pgtbl; + + for (i = 0; i < dma->num_pages; i++) { + /* Each entry needs to be in big endian format. */ + *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32); + page_table++; + *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff); + page_table++; + } +} + +static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma) +{ + int i; + __le32 *page_table = (__le32 *) dma->pgtbl; + + for (i = 0; i < dma->num_pages; i++) { + /* Each entry needs to be in little endian format. */ + *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff); + page_table++; + *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32); + page_table++; + } +} + +static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, + int pages, int use_pg_tbl) +{ + int i, size; + struct cnic_local *cp = dev->cnic_priv; + + size = pages * (sizeof(void *) + sizeof(dma_addr_t)); + dma->pg_arr = kzalloc(size, GFP_ATOMIC); + if (dma->pg_arr == NULL) + return -ENOMEM; + + dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages); + dma->num_pages = pages; + + for (i = 0; i < pages; i++) { + dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev, + BCM_PAGE_SIZE, + &dma->pg_map_arr[i], + GFP_ATOMIC); + if (dma->pg_arr[i] == NULL) + goto error; + } + if (!use_pg_tbl) + return 0; + + dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) & + ~(BCM_PAGE_SIZE - 1); + dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size, + &dma->pgtbl_map, GFP_ATOMIC); + if (dma->pgtbl == NULL) + goto error; + + cp->setup_pgtbl(dev, dma); + + return 0; + +error: + cnic_free_dma(dev, dma); + return -ENOMEM; +} + +static void cnic_free_context(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + int i; + + for (i = 0; i < cp->ctx_blks; i++) { + if (cp->ctx_arr[i].ctx) { + dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size, + cp->ctx_arr[i].ctx, + cp->ctx_arr[i].mapping); + cp->ctx_arr[i].ctx = NULL; + } + } +} + +static void __cnic_free_uio(struct cnic_uio_dev *udev) +{ + uio_unregister_device(&udev->cnic_uinfo); + + if (udev->l2_buf) { + dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size, + udev->l2_buf, udev->l2_buf_map); + udev->l2_buf = NULL; + } + + if (udev->l2_ring) { + dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size, + udev->l2_ring, udev->l2_ring_map); + udev->l2_ring = NULL; + } + + pci_dev_put(udev->pdev); + kfree(udev); +} + +static void cnic_free_uio(struct cnic_uio_dev *udev) +{ + if (!udev) + return; + + write_lock(&cnic_dev_lock); + list_del_init(&udev->list); + write_unlock(&cnic_dev_lock); + __cnic_free_uio(udev); +} + +static void cnic_free_resc(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_uio_dev *udev = cp->udev; + + if (udev) { + udev->dev = NULL; + cp->udev = NULL; + } + + cnic_free_context(dev); + kfree(cp->ctx_arr); + cp->ctx_arr = NULL; + cp->ctx_blks = 0; + + cnic_free_dma(dev, &cp->gbl_buf_info); + cnic_free_dma(dev, &cp->kwq_info); + cnic_free_dma(dev, &cp->kwq_16_data_info); + cnic_free_dma(dev, &cp->kcq2.dma); + cnic_free_dma(dev, &cp->kcq1.dma); + kfree(cp->iscsi_tbl); + cp->iscsi_tbl = NULL; + kfree(cp->ctx_tbl); + cp->ctx_tbl = NULL; + + cnic_free_id_tbl(&cp->fcoe_cid_tbl); + cnic_free_id_tbl(&cp->cid_tbl); +} + +static int cnic_alloc_context(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + + if (CHIP_NUM(cp) == CHIP_NUM_5709) { + int i, k, arr_size; + + cp->ctx_blk_size = BCM_PAGE_SIZE; + cp->cids_per_blk = BCM_PAGE_SIZE / 128; + arr_size = BNX2_MAX_CID / cp->cids_per_blk * + sizeof(struct cnic_ctx); + cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL); + if (cp->ctx_arr == NULL) + return -ENOMEM; + + k = 0; + for (i = 0; i < 2; i++) { + u32 j, reg, off, lo, hi; + + if (i == 0) + off = BNX2_PG_CTX_MAP; + else + off = BNX2_ISCSI_CTX_MAP; + + reg = cnic_reg_rd_ind(dev, off); + lo = reg >> 16; + hi = reg & 0xffff; + for (j = lo; j < hi; j += cp->cids_per_blk, k++) + cp->ctx_arr[k].cid = j; + } + + cp->ctx_blks = k; + if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) { + cp->ctx_blks = 0; + return -ENOMEM; + } + + for (i = 0; i < cp->ctx_blks; i++) { + cp->ctx_arr[i].ctx = + dma_alloc_coherent(&dev->pcidev->dev, + BCM_PAGE_SIZE, + &cp->ctx_arr[i].mapping, + GFP_KERNEL); + if (cp->ctx_arr[i].ctx == NULL) + return -ENOMEM; + } + } + return 0; +} + +static u16 cnic_bnx2_next_idx(u16 idx) +{ + return idx + 1; +} + +static u16 cnic_bnx2_hw_idx(u16 idx) +{ + return idx; +} + +static u16 cnic_bnx2x_next_idx(u16 idx) +{ + idx++; + if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT) + idx++; + + return idx; +} + +static u16 cnic_bnx2x_hw_idx(u16 idx) +{ + if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT) + idx++; + return idx; +} + +static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info, + bool use_pg_tbl) +{ + int err, i, use_page_tbl = 0; + struct kcqe **kcq; + + if (use_pg_tbl) + use_page_tbl = 1; + + err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl); + if (err) + return err; + + kcq = (struct kcqe **) info->dma.pg_arr; + info->kcq = kcq; + + info->next_idx = cnic_bnx2_next_idx; + info->hw_idx = cnic_bnx2_hw_idx; + if (use_pg_tbl) + return 0; + + info->next_idx = cnic_bnx2x_next_idx; + info->hw_idx = cnic_bnx2x_hw_idx; + + for (i = 0; i < KCQ_PAGE_CNT; i++) { + struct bnx2x_bd_chain_next *next = + (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT]; + int j = i + 1; + + if (j >= KCQ_PAGE_CNT) + j = 0; + next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32; + next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff; + } + return 0; +} + +static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_uio_dev *udev; + + read_lock(&cnic_dev_lock); + list_for_each_entry(udev, &cnic_udev_list, list) { + if (udev->pdev == dev->pcidev) { + udev->dev = dev; + cp->udev = udev; + read_unlock(&cnic_dev_lock); + return 0; + } + } + read_unlock(&cnic_dev_lock); + + udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC); + if (!udev) + return -ENOMEM; + + udev->uio_dev = -1; + + udev->dev = dev; + udev->pdev = dev->pcidev; + udev->l2_ring_size = pages * BCM_PAGE_SIZE; + udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size, + &udev->l2_ring_map, + GFP_KERNEL | __GFP_COMP); + if (!udev->l2_ring) + goto err_udev; + + udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; + udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size); + udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size, + &udev->l2_buf_map, + GFP_KERNEL | __GFP_COMP); + if (!udev->l2_buf) + goto err_dma; + + write_lock(&cnic_dev_lock); + list_add(&udev->list, &cnic_udev_list); + write_unlock(&cnic_dev_lock); + + pci_dev_get(udev->pdev); + + cp->udev = udev; + + return 0; + err_dma: + dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size, + udev->l2_ring, udev->l2_ring_map); + err_udev: + kfree(udev); + return -ENOMEM; +} + +static int cnic_init_uio(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_uio_dev *udev = cp->udev; + struct uio_info *uinfo; + int ret = 0; + + if (!udev) + return -ENOMEM; + + uinfo = &udev->cnic_uinfo; + + uinfo->mem[0].addr = dev->netdev->base_addr; + uinfo->mem[0].internal_addr = dev->regview; + uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start; + uinfo->mem[0].memtype = UIO_MEM_PHYS; + + if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { + uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & + PAGE_MASK; + if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) + uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; + else + uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE; + + uinfo->name = "bnx2_cnic"; + } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { + uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & + PAGE_MASK; + uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk); + + uinfo->name = "bnx2x_cnic"; + } + + uinfo->mem[1].memtype = UIO_MEM_LOGICAL; + + uinfo->mem[2].addr = (unsigned long) udev->l2_ring; + uinfo->mem[2].size = udev->l2_ring_size; + uinfo->mem[2].memtype = UIO_MEM_LOGICAL; + + uinfo->mem[3].addr = (unsigned long) udev->l2_buf; + uinfo->mem[3].size = udev->l2_buf_size; + uinfo->mem[3].memtype = UIO_MEM_LOGICAL; + + uinfo->version = CNIC_MODULE_VERSION; + uinfo->irq = UIO_IRQ_CUSTOM; + + uinfo->open = cnic_uio_open; + uinfo->release = cnic_uio_close; + + if (udev->uio_dev == -1) { + if (!uinfo->priv) { + uinfo->priv = udev; + + ret = uio_register_device(&udev->pdev->dev, uinfo); + } + } else { + cnic_init_rings(dev); + } + + return ret; +} + +static int cnic_alloc_bnx2_resc(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + int ret; + + ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1); + if (ret) + goto error; + cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr; + + ret = cnic_alloc_kcq(dev, &cp->kcq1, true); + if (ret) + goto error; + + ret = cnic_alloc_context(dev); + if (ret) + goto error; + + ret = cnic_alloc_uio_rings(dev, 2); + if (ret) + goto error; + + ret = cnic_init_uio(dev); + if (ret) + goto error; + + return 0; + +error: + cnic_free_resc(dev); + return ret; +} + +static int cnic_alloc_bnx2x_context(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + int ctx_blk_size = cp->ethdev->ctx_blk_size; + int total_mem, blks, i; + + total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space; + blks = total_mem / ctx_blk_size; + if (total_mem % ctx_blk_size) + blks++; + + if (blks > cp->ethdev->ctx_tbl_len) + return -ENOMEM; + + cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL); + if (cp->ctx_arr == NULL) + return -ENOMEM; + + cp->ctx_blks = blks; + cp->ctx_blk_size = ctx_blk_size; + if (!BNX2X_CHIP_IS_57710(cp->chip_id)) + cp->ctx_align = 0; + else + cp->ctx_align = ctx_blk_size; + + cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE; + + for (i = 0; i < blks; i++) { + cp->ctx_arr[i].ctx = + dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size, + &cp->ctx_arr[i].mapping, + GFP_KERNEL); + if (cp->ctx_arr[i].ctx == NULL) + return -ENOMEM; + + if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) { + if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) { + cnic_free_context(dev); + cp->ctx_blk_size += cp->ctx_align; + i = -1; + continue; + } + } + } + return 0; +} + +static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + u32 start_cid = ethdev->starting_cid; + int i, j, n, ret, pages; + struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info; + + cp->iro_arr = ethdev->iro_arr; + + cp->max_cid_space = MAX_ISCSI_TBL_SZ; + cp->iscsi_start_cid = start_cid; + cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ; + + if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { + cp->max_cid_space += BNX2X_FCOE_NUM_CONNECTIONS; + cp->fcoe_init_cid = ethdev->fcoe_init_cid; + if (!cp->fcoe_init_cid) + cp->fcoe_init_cid = 0x10; + } + + cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ, + GFP_KERNEL); + if (!cp->iscsi_tbl) + goto error; + + cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) * + cp->max_cid_space, GFP_KERNEL); + if (!cp->ctx_tbl) + goto error; + + for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) { + cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i]; + cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI; + } + + for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++) + cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE; + + pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / + PAGE_SIZE; + + ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); + if (ret) + return -ENOMEM; + + n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; + for (i = 0, j = 0; i < cp->max_cid_space; i++) { + long off = CNIC_KWQ16_DATA_SIZE * (i % n); + + cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off; + cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] + + off; + + if ((i % n) == (n - 1)) + j++; + } + + ret = cnic_alloc_kcq(dev, &cp->kcq1, false); + if (ret) + goto error; + + if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { + ret = cnic_alloc_kcq(dev, &cp->kcq2, true); + if (ret) + goto error; + } + + pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE; + ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0); + if (ret) + goto error; + + ret = cnic_alloc_bnx2x_context(dev); + if (ret) + goto error; + + cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk; + + cp->l2_rx_ring_size = 15; + + ret = cnic_alloc_uio_rings(dev, 4); + if (ret) + goto error; + + ret = cnic_init_uio(dev); + if (ret) + goto error; + + return 0; + +error: + cnic_free_resc(dev); + return -ENOMEM; +} + +static inline u32 cnic_kwq_avail(struct cnic_local *cp) +{ + return cp->max_kwq_idx - + ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx); +} + +static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], + u32 num_wqes) +{ + struct cnic_local *cp = dev->cnic_priv; + struct kwqe *prod_qe; + u16 prod, sw_prod, i; + + if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) + return -EAGAIN; /* bnx2 is down */ + + spin_lock_bh(&cp->cnic_ulp_lock); + if (num_wqes > cnic_kwq_avail(cp) && + !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) { + spin_unlock_bh(&cp->cnic_ulp_lock); + return -EAGAIN; + } + + clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags); + + prod = cp->kwq_prod_idx; + sw_prod = prod & MAX_KWQ_IDX; + for (i = 0; i < num_wqes; i++) { + prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)]; + memcpy(prod_qe, wqes[i], sizeof(struct kwqe)); + prod++; + sw_prod = prod & MAX_KWQ_IDX; + } + cp->kwq_prod_idx = prod; + + CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx); + + spin_unlock_bh(&cp->cnic_ulp_lock); + return 0; +} + +static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid, + union l5cm_specific_data *l5_data) +{ + struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; + dma_addr_t map; + + map = ctx->kwqe_data_mapping; + l5_data->phy_address.lo = (u64) map & 0xffffffff; + l5_data->phy_address.hi = (u64) map >> 32; + return ctx->kwqe_data; +} + +static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid, + u32 type, union l5cm_specific_data *l5_data) +{ + struct cnic_local *cp = dev->cnic_priv; + struct l5cm_spe kwqe; + struct kwqe_16 *kwq[1]; + u16 type_16; + int ret; + + kwqe.hdr.conn_and_cmd_data = + cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) | + BNX2X_HW_CID(cp, cid))); + + type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; + type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) & + SPE_HDR_FUNCTION_ID; + + kwqe.hdr.type = cpu_to_le16(type_16); + kwqe.hdr.reserved1 = 0; + kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo); + kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi); + + kwq[0] = (struct kwqe_16 *) &kwqe; + + spin_lock_bh(&cp->cnic_ulp_lock); + ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1); + spin_unlock_bh(&cp->cnic_ulp_lock); + + if (ret == 1) + return 0; + + return -EBUSY; +} + +static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type, + struct kcqe *cqes[], u32 num_cqes) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_ulp_ops *ulp_ops; + + rcu_read_lock(); + ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); + if (likely(ulp_ops)) { + ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], + cqes, num_cqes); + } + rcu_read_unlock(); +} + +static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct cnic_local *cp = dev->cnic_priv; + struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe; + int hq_bds, pages; + u32 pfid = cp->pfid; + + cp->num_iscsi_tasks = req1->num_tasks_per_conn; + cp->num_ccells = req1->num_ccells_per_conn; + cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE * + cp->num_iscsi_tasks; + cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS * + BNX2X_ISCSI_R2TQE_SIZE; + cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE; + pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; + hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE); + cp->num_cqs = req1->num_cqs; + + if (!dev->max_iscsi_conn) + return 0; + + /* init Tstorm RAM */ + CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid), + req1->rq_num_wqes); + CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), + PAGE_SIZE); + CNIC_WR8(dev, BAR_TSTRORM_INTMEM + + TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); + CNIC_WR16(dev, BAR_TSTRORM_INTMEM + + TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), + req1->num_tasks_per_conn); + + /* init Ustorm RAM */ + CNIC_WR16(dev, BAR_USTRORM_INTMEM + + USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid), + req1->rq_buffer_size); + CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), + PAGE_SIZE); + CNIC_WR8(dev, BAR_USTRORM_INTMEM + + USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); + CNIC_WR16(dev, BAR_USTRORM_INTMEM + + USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), + req1->num_tasks_per_conn); + CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid), + req1->rq_num_wqes); + CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid), + req1->cq_num_wqes); + CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid), + cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); + + /* init Xstorm RAM */ + CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), + PAGE_SIZE); + CNIC_WR8(dev, BAR_XSTRORM_INTMEM + + XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); + CNIC_WR16(dev, BAR_XSTRORM_INTMEM + + XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), + req1->num_tasks_per_conn); + CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid), + hq_bds); + CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid), + req1->num_tasks_per_conn); + CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid), + cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); + + /* init Cstorm RAM */ + CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), + PAGE_SIZE); + CNIC_WR8(dev, BAR_CSTRORM_INTMEM + + CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); + CNIC_WR16(dev, BAR_CSTRORM_INTMEM + + CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), + req1->num_tasks_per_conn); + CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid), + req1->cq_num_wqes); + CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid), + hq_bds); + + return 0; +} + +static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe; + struct cnic_local *cp = dev->cnic_priv; + u32 pfid = cp->pfid; + struct iscsi_kcqe kcqe; + struct kcqe *cqes[1]; + + memset(&kcqe, 0, sizeof(kcqe)); + if (!dev->max_iscsi_conn) { + kcqe.completion_status = + ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED; + goto done; + } + + CNIC_WR(dev, BAR_TSTRORM_INTMEM + + TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]); + CNIC_WR(dev, BAR_TSTRORM_INTMEM + + TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4, + req2->error_bit_map[1]); + + CNIC_WR16(dev, BAR_USTRORM_INTMEM + + USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn); + CNIC_WR(dev, BAR_USTRORM_INTMEM + + USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]); + CNIC_WR(dev, BAR_USTRORM_INTMEM + + USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4, + req2->error_bit_map[1]); + + CNIC_WR16(dev, BAR_CSTRORM_INTMEM + + CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn); + + kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; + +done: + kcqe.op_code = ISCSI_KCQE_OPCODE_INIT; + cqes[0] = (struct kcqe *) &kcqe; + cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); + + return 0; +} + +static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; + + if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) { + struct cnic_iscsi *iscsi = ctx->proto.iscsi; + + cnic_free_dma(dev, &iscsi->hq_info); + cnic_free_dma(dev, &iscsi->r2tq_info); + cnic_free_dma(dev, &iscsi->task_array_info); + cnic_free_id(&cp->cid_tbl, ctx->cid); + } else { + cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid); + } + + ctx->cid = 0; +} + +static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) +{ + u32 cid; + int ret, pages; + struct cnic_local *cp = dev->cnic_priv; + struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; + struct cnic_iscsi *iscsi = ctx->proto.iscsi; + + if (ctx->ulp_proto_id == CNIC_ULP_FCOE) { + cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl); + if (cid == -1) { + ret = -ENOMEM; + goto error; + } + ctx->cid = cid; + return 0; + } + + cid = cnic_alloc_new_id(&cp->cid_tbl); + if (cid == -1) { + ret = -ENOMEM; + goto error; + } + + ctx->cid = cid; + pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE; + + ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1); + if (ret) + goto error; + + pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE; + ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1); + if (ret) + goto error; + + pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; + ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1); + if (ret) + goto error; + + return 0; + +error: + cnic_free_bnx2x_conn_resc(dev, l5_cid); + return ret; +} + +static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init, + struct regpair *ctx_addr) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk; + int off = (cid - ethdev->starting_cid) % cp->cids_per_blk; + unsigned long align_off = 0; + dma_addr_t ctx_map; + void *ctx; + + if (cp->ctx_align) { + unsigned long mask = cp->ctx_align - 1; + + if (cp->ctx_arr[blk].mapping & mask) + align_off = cp->ctx_align - + (cp->ctx_arr[blk].mapping & mask); + } + ctx_map = cp->ctx_arr[blk].mapping + align_off + + (off * BNX2X_CONTEXT_MEM_SIZE); + ctx = cp->ctx_arr[blk].ctx + align_off + + (off * BNX2X_CONTEXT_MEM_SIZE); + if (init) + memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE); + + ctx_addr->lo = ctx_map & 0xffffffff; + ctx_addr->hi = (u64) ctx_map >> 32; + return ctx; +} + +static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], + u32 num) +{ + struct cnic_local *cp = dev->cnic_priv; + struct iscsi_kwqe_conn_offload1 *req1 = + (struct iscsi_kwqe_conn_offload1 *) wqes[0]; + struct iscsi_kwqe_conn_offload2 *req2 = + (struct iscsi_kwqe_conn_offload2 *) wqes[1]; + struct iscsi_kwqe_conn_offload3 *req3; + struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id]; + struct cnic_iscsi *iscsi = ctx->proto.iscsi; + u32 cid = ctx->cid; + u32 hw_cid = BNX2X_HW_CID(cp, cid); + struct iscsi_context *ictx; + struct regpair context_addr; + int i, j, n = 2, n_max; + u8 port = CNIC_PORT(cp); + + ctx->ctx_flags = 0; + if (!req2->num_additional_wqes) + return -EINVAL; + + n_max = req2->num_additional_wqes + 2; + + ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr); + if (ictx == NULL) + return -ENOMEM; + + req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; + + ictx->xstorm_ag_context.hq_prod = 1; + + ictx->xstorm_st_context.iscsi.first_burst_length = + ISCSI_DEF_FIRST_BURST_LEN; + ictx->xstorm_st_context.iscsi.max_send_pdu_length = + ISCSI_DEF_MAX_RECV_SEG_LEN; + ictx->xstorm_st_context.iscsi.sq_pbl_base.lo = + req1->sq_page_table_addr_lo; + ictx->xstorm_st_context.iscsi.sq_pbl_base.hi = + req1->sq_page_table_addr_hi; + ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi; + ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo; + ictx->xstorm_st_context.iscsi.hq_pbl_base.lo = + iscsi->hq_info.pgtbl_map & 0xffffffff; + ictx->xstorm_st_context.iscsi.hq_pbl_base.hi = + (u64) iscsi->hq_info.pgtbl_map >> 32; + ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo = + iscsi->hq_info.pgtbl[0]; + ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi = + iscsi->hq_info.pgtbl[1]; + ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo = + iscsi->r2tq_info.pgtbl_map & 0xffffffff; + ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi = + (u64) iscsi->r2tq_info.pgtbl_map >> 32; + ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo = + iscsi->r2tq_info.pgtbl[0]; + ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi = + iscsi->r2tq_info.pgtbl[1]; + ictx->xstorm_st_context.iscsi.task_pbl_base.lo = + iscsi->task_array_info.pgtbl_map & 0xffffffff; + ictx->xstorm_st_context.iscsi.task_pbl_base.hi = + (u64) iscsi->task_array_info.pgtbl_map >> 32; + ictx->xstorm_st_context.iscsi.task_pbl_cache_idx = + BNX2X_ISCSI_PBL_NOT_CACHED; + ictx->xstorm_st_context.iscsi.flags.flags |= + XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA; + ictx->xstorm_st_context.iscsi.flags.flags |= + XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T; + ictx->xstorm_st_context.common.ethernet.reserved_vlan_type = + ETH_P_8021Q; + if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) && + cp->port_mode == CHIP_2_PORT_MODE) { + + port = 0; + } + ictx->xstorm_st_context.common.flags = + 1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT; + ictx->xstorm_st_context.common.flags = + port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT; + + ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE; + /* TSTORM requires the base address of RQ DB & not PTE */ + ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo = + req2->rq_page_table_addr_lo & PAGE_MASK; + ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi = + req2->rq_page_table_addr_hi; + ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id; + ictx->tstorm_st_context.tcp.cwnd = 0x5A8; + ictx->tstorm_st_context.tcp.flags2 |= + TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN; + ictx->tstorm_st_context.tcp.ooo_support_mode = + TCP_TSTORM_OOO_DROP_AND_PROC_ACK; + + ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG; + + ictx->ustorm_st_context.ring.rq.pbl_base.lo = + req2->rq_page_table_addr_lo; + ictx->ustorm_st_context.ring.rq.pbl_base.hi = + req2->rq_page_table_addr_hi; + ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi; + ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo; + ictx->ustorm_st_context.ring.r2tq.pbl_base.lo = + iscsi->r2tq_info.pgtbl_map & 0xffffffff; + ictx->ustorm_st_context.ring.r2tq.pbl_base.hi = + (u64) iscsi->r2tq_info.pgtbl_map >> 32; + ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo = + iscsi->r2tq_info.pgtbl[0]; + ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi = + iscsi->r2tq_info.pgtbl[1]; + ictx->ustorm_st_context.ring.cq_pbl_base.lo = + req1->cq_page_table_addr_lo; + ictx->ustorm_st_context.ring.cq_pbl_base.hi = + req1->cq_page_table_addr_hi; + ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN; + ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi; + ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo; + ictx->ustorm_st_context.task_pbe_cache_index = + BNX2X_ISCSI_PBL_NOT_CACHED; + ictx->ustorm_st_context.task_pdu_cache_index = + BNX2X_ISCSI_PDU_HEADER_NOT_CACHED; + + for (i = 1, j = 1; i < cp->num_cqs; i++, j++) { + if (j == 3) { + if (n >= n_max) + break; + req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; + j = 0; + } + ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN; + ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo = + req3->qp_first_pte[j].hi; + ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi = + req3->qp_first_pte[j].lo; + } + + ictx->ustorm_st_context.task_pbl_base.lo = + iscsi->task_array_info.pgtbl_map & 0xffffffff; + ictx->ustorm_st_context.task_pbl_base.hi = + (u64) iscsi->task_array_info.pgtbl_map >> 32; + ictx->ustorm_st_context.tce_phy_addr.lo = + iscsi->task_array_info.pgtbl[0]; + ictx->ustorm_st_context.tce_phy_addr.hi = + iscsi->task_array_info.pgtbl[1]; + ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; + ictx->ustorm_st_context.num_cqs = cp->num_cqs; + ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN; + ictx->ustorm_st_context.negotiated_rx_and_flags |= + ISCSI_DEF_MAX_BURST_LEN; + ictx->ustorm_st_context.negotiated_rx |= + ISCSI_DEFAULT_MAX_OUTSTANDING_R2T << + USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT; + + ictx->cstorm_st_context.hq_pbl_base.lo = + iscsi->hq_info.pgtbl_map & 0xffffffff; + ictx->cstorm_st_context.hq_pbl_base.hi = + (u64) iscsi->hq_info.pgtbl_map >> 32; + ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0]; + ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1]; + ictx->cstorm_st_context.task_pbl_base.lo = + iscsi->task_array_info.pgtbl_map & 0xffffffff; + ictx->cstorm_st_context.task_pbl_base.hi = + (u64) iscsi->task_array_info.pgtbl_map >> 32; + /* CSTORM and USTORM initialization is different, CSTORM requires + * CQ DB base & not PTE addr */ + ictx->cstorm_st_context.cq_db_base.lo = + req1->cq_page_table_addr_lo & PAGE_MASK; + ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi; + ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; + ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1; + for (i = 0; i < cp->num_cqs; i++) { + ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] = + ISCSI_INITIAL_SN; + ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] = + ISCSI_INITIAL_SN; + } + + ictx->xstorm_ag_context.cdu_reserved = + CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG, + ISCSI_CONNECTION_TYPE); + ictx->ustorm_ag_context.cdu_usage = + CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG, + ISCSI_CONNECTION_TYPE); + return 0; + +} + +static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], + u32 num, int *work) +{ + struct iscsi_kwqe_conn_offload1 *req1; + struct iscsi_kwqe_conn_offload2 *req2; + struct cnic_local *cp = dev->cnic_priv; + struct cnic_context *ctx; + struct iscsi_kcqe kcqe; + struct kcqe *cqes[1]; + u32 l5_cid; + int ret = 0; + + if (num < 2) { + *work = num; + return -EINVAL; + } + + req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0]; + req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1]; + if ((num - 2) < req2->num_additional_wqes) { + *work = num; + return -EINVAL; + } + *work = 2 + req2->num_additional_wqes; + + l5_cid = req1->iscsi_conn_id; + if (l5_cid >= MAX_ISCSI_TBL_SZ) + return -EINVAL; + + memset(&kcqe, 0, sizeof(kcqe)); + kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN; + kcqe.iscsi_conn_id = l5_cid; + kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE; + + ctx = &cp->ctx_tbl[l5_cid]; + if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) { + kcqe.completion_status = + ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY; + goto done; + } + + if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) { + atomic_dec(&cp->iscsi_conn); + goto done; + } + ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid); + if (ret) { + atomic_dec(&cp->iscsi_conn); + ret = 0; + goto done; + } + ret = cnic_setup_bnx2x_ctx(dev, wqes, num); + if (ret < 0) { + cnic_free_bnx2x_conn_resc(dev, l5_cid); + atomic_dec(&cp->iscsi_conn); + goto done; + } + + kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; + kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid); + +done: + cqes[0] = (struct kcqe *) &kcqe; + cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); + return ret; +} + + +static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct cnic_local *cp = dev->cnic_priv; + struct iscsi_kwqe_conn_update *req = + (struct iscsi_kwqe_conn_update *) kwqe; + void *data; + union l5cm_specific_data l5_data; + u32 l5_cid, cid = BNX2X_SW_CID(req->context_id); + int ret; + + if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0) + return -EINVAL; + + data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); + if (!data) + return -ENOMEM; + + memcpy(data, kwqe, sizeof(struct kwqe)); + + ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN, + req->context_id, ISCSI_CONNECTION_TYPE, &l5_data); + return ret; +} + +static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; + union l5cm_specific_data l5_data; + int ret; + u32 hw_cid; + + init_waitqueue_head(&ctx->waitq); + ctx->wait_cond = 0; + memset(&l5_data, 0, sizeof(l5_data)); + hw_cid = BNX2X_HW_CID(cp, ctx->cid); + + ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, + hw_cid, NONE_CONNECTION_TYPE, &l5_data); + + if (ret == 0) { + wait_event(ctx->waitq, ctx->wait_cond); + if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags))) + return -EBUSY; + } + + return ret; +} + +static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct cnic_local *cp = dev->cnic_priv; + struct iscsi_kwqe_conn_destroy *req = + (struct iscsi_kwqe_conn_destroy *) kwqe; + u32 l5_cid = req->reserved0; + struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; + int ret = 0; + struct iscsi_kcqe kcqe; + struct kcqe *cqes[1]; + + if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) + goto skip_cfc_delete; + + if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) { + unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies; + + if (delta > (2 * HZ)) + delta = 0; + + set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags); + queue_delayed_work(cnic_wq, &cp->delete_task, delta); + goto destroy_reply; + } + + ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid); + +skip_cfc_delete: + cnic_free_bnx2x_conn_resc(dev, l5_cid); + + if (!ret) { + atomic_dec(&cp->iscsi_conn); + clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); + } + +destroy_reply: + memset(&kcqe, 0, sizeof(kcqe)); + kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN; + kcqe.iscsi_conn_id = l5_cid; + kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; + kcqe.iscsi_conn_context_id = req->context_id; + + cqes[0] = (struct kcqe *) &kcqe; + cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); + + return ret; +} + +static void cnic_init_storm_conn_bufs(struct cnic_dev *dev, + struct l4_kwq_connect_req1 *kwqe1, + struct l4_kwq_connect_req3 *kwqe3, + struct l5cm_active_conn_buffer *conn_buf) +{ + struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf; + struct l5cm_xstorm_conn_buffer *xstorm_buf = + &conn_buf->xstorm_conn_buffer; + struct l5cm_tstorm_conn_buffer *tstorm_buf = + &conn_buf->tstorm_conn_buffer; + struct regpair context_addr; + u32 cid = BNX2X_SW_CID(kwqe1->cid); + struct in6_addr src_ip, dst_ip; + int i; + u32 *addrp; + + addrp = (u32 *) &conn_addr->local_ip_addr; + for (i = 0; i < 4; i++, addrp++) + src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp); + + addrp = (u32 *) &conn_addr->remote_ip_addr; + for (i = 0; i < 4; i++, addrp++) + dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp); + + cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr); + + xstorm_buf->context_addr.hi = context_addr.hi; + xstorm_buf->context_addr.lo = context_addr.lo; + xstorm_buf->mss = 0xffff; + xstorm_buf->rcv_buf = kwqe3->rcv_buf; + if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE) + xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE; + xstorm_buf->pseudo_header_checksum = + swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0)); + + if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK)) + tstorm_buf->params |= + L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE; + if (kwqe3->ka_timeout) { + tstorm_buf->ka_enable = 1; + tstorm_buf->ka_timeout = kwqe3->ka_timeout; + tstorm_buf->ka_interval = kwqe3->ka_interval; + tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count; + } + tstorm_buf->max_rt_time = 0xffffffff; +} + +static void cnic_init_bnx2x_mac(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + u32 pfid = cp->pfid; + u8 *mac = dev->mac_addr; + + CNIC_WR8(dev, BAR_XSTRORM_INTMEM + + XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]); + CNIC_WR8(dev, BAR_XSTRORM_INTMEM + + XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]); + CNIC_WR8(dev, BAR_XSTRORM_INTMEM + + XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]); + CNIC_WR8(dev, BAR_XSTRORM_INTMEM + + XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]); + CNIC_WR8(dev, BAR_XSTRORM_INTMEM + + XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]); + CNIC_WR8(dev, BAR_XSTRORM_INTMEM + + XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]); + + CNIC_WR8(dev, BAR_TSTRORM_INTMEM + + TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]); + CNIC_WR8(dev, BAR_TSTRORM_INTMEM + + TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1, + mac[4]); + CNIC_WR8(dev, BAR_TSTRORM_INTMEM + + TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]); + CNIC_WR8(dev, BAR_TSTRORM_INTMEM + + TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1, + mac[2]); + CNIC_WR8(dev, BAR_TSTRORM_INTMEM + + TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]); + CNIC_WR8(dev, BAR_TSTRORM_INTMEM + + TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1, + mac[0]); +} + +static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts) +{ + struct cnic_local *cp = dev->cnic_priv; + u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN; + u16 tstorm_flags = 0; + + if (tcp_ts) { + xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED; + tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED; + } + + CNIC_WR8(dev, BAR_XSTRORM_INTMEM + + XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags); + + CNIC_WR16(dev, BAR_TSTRORM_INTMEM + + TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags); +} + +static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[], + u32 num, int *work) +{ + struct cnic_local *cp = dev->cnic_priv; + struct l4_kwq_connect_req1 *kwqe1 = + (struct l4_kwq_connect_req1 *) wqes[0]; + struct l4_kwq_connect_req3 *kwqe3; + struct l5cm_active_conn_buffer *conn_buf; + struct l5cm_conn_addr_params *conn_addr; + union l5cm_specific_data l5_data; + u32 l5_cid = kwqe1->pg_cid; + struct cnic_sock *csk = &cp->csk_tbl[l5_cid]; + struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; + int ret; + + if (num < 2) { + *work = num; + return -EINVAL; + } + + if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) + *work = 3; + else + *work = 2; + + if (num < *work) { + *work = num; + return -EINVAL; + } + + if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) { + netdev_err(dev->netdev, "conn_buf size too big\n"); + return -ENOMEM; + } + conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); + if (!conn_buf) + return -ENOMEM; + + memset(conn_buf, 0, sizeof(*conn_buf)); + + conn_addr = &conn_buf->conn_addr_buf; + conn_addr->remote_addr_0 = csk->ha[0]; + conn_addr->remote_addr_1 = csk->ha[1]; + conn_addr->remote_addr_2 = csk->ha[2]; + conn_addr->remote_addr_3 = csk->ha[3]; + conn_addr->remote_addr_4 = csk->ha[4]; + conn_addr->remote_addr_5 = csk->ha[5]; + + if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) { + struct l4_kwq_connect_req2 *kwqe2 = + (struct l4_kwq_connect_req2 *) wqes[1]; + + conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4; + conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3; + conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2; + + conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4; + conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3; + conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2; + conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION; + } + kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1]; + + conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip; + conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip; + conn_addr->local_tcp_port = kwqe1->src_port; + conn_addr->remote_tcp_port = kwqe1->dst_port; + + conn_addr->pmtu = kwqe3->pmtu; + cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf); + + CNIC_WR16(dev, BAR_XSTRORM_INTMEM + + XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id); + + cnic_bnx2x_set_tcp_timestamp(dev, + kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP); + + ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT, + kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data); + if (!ret) + set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); + + return ret; +} + +static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe; + union l5cm_specific_data l5_data; + int ret; + + memset(&l5_data, 0, sizeof(l5_data)); + ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE, + req->cid, ISCSI_CONNECTION_TYPE, &l5_data); + return ret; +} + +static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe; + union l5cm_specific_data l5_data; + int ret; + + memset(&l5_data, 0, sizeof(l5_data)); + ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT, + req->cid, ISCSI_CONNECTION_TYPE, &l5_data); + return ret; +} +static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe; + struct l4_kcq kcqe; + struct kcqe *cqes[1]; + + memset(&kcqe, 0, sizeof(kcqe)); + kcqe.pg_host_opaque = req->host_opaque; + kcqe.pg_cid = req->host_opaque; + kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG; + cqes[0] = (struct kcqe *) &kcqe; + cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1); + return 0; +} + +static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe; + struct l4_kcq kcqe; + struct kcqe *cqes[1]; + + memset(&kcqe, 0, sizeof(kcqe)); + kcqe.pg_host_opaque = req->pg_host_opaque; + kcqe.pg_cid = req->pg_cid; + kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG; + cqes[0] = (struct kcqe *) &kcqe; + cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1); + return 0; +} + +static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct fcoe_kwqe_stat *req; + struct fcoe_stat_ramrod_params *fcoe_stat; + union l5cm_specific_data l5_data; + struct cnic_local *cp = dev->cnic_priv; + int ret; + u32 cid; + + req = (struct fcoe_kwqe_stat *) kwqe; + cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); + + fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data); + if (!fcoe_stat) + return -ENOMEM; + + memset(fcoe_stat, 0, sizeof(*fcoe_stat)); + memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req)); + + ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid, + FCOE_CONNECTION_TYPE, &l5_data); + return ret; +} + +static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[], + u32 num, int *work) +{ + int ret; + struct cnic_local *cp = dev->cnic_priv; + u32 cid; + struct fcoe_init_ramrod_params *fcoe_init; + struct fcoe_kwqe_init1 *req1; + struct fcoe_kwqe_init2 *req2; + struct fcoe_kwqe_init3 *req3; + union l5cm_specific_data l5_data; + + if (num < 3) { + *work = num; + return -EINVAL; + } + req1 = (struct fcoe_kwqe_init1 *) wqes[0]; + req2 = (struct fcoe_kwqe_init2 *) wqes[1]; + req3 = (struct fcoe_kwqe_init3 *) wqes[2]; + if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) { + *work = 1; + return -EINVAL; + } + if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) { + *work = 2; + return -EINVAL; + } + + if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) { + netdev_err(dev->netdev, "fcoe_init size too big\n"); + return -ENOMEM; + } + fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data); + if (!fcoe_init) + return -ENOMEM; + + memset(fcoe_init, 0, sizeof(*fcoe_init)); + memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1)); + memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2)); + memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3)); + fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff; + fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32; + fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages; + + fcoe_init->sb_num = cp->status_blk_num; + fcoe_init->eq_prod = MAX_KCQ_IDX; + fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS; + cp->kcq2.sw_prod_idx = 0; + + cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); + ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid, + FCOE_CONNECTION_TYPE, &l5_data); + *work = 3; + return ret; +} + +static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], + u32 num, int *work) +{ + int ret = 0; + u32 cid = -1, l5_cid; + struct cnic_local *cp = dev->cnic_priv; + struct fcoe_kwqe_conn_offload1 *req1; + struct fcoe_kwqe_conn_offload2 *req2; + struct fcoe_kwqe_conn_offload3 *req3; + struct fcoe_kwqe_conn_offload4 *req4; + struct fcoe_conn_offload_ramrod_params *fcoe_offload; + struct cnic_context *ctx; + struct fcoe_context *fctx; + struct regpair ctx_addr; + union l5cm_specific_data l5_data; + struct fcoe_kcqe kcqe; + struct kcqe *cqes[1]; + + if (num < 4) { + *work = num; + return -EINVAL; + } + req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0]; + req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1]; + req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2]; + req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3]; + + *work = 4; + + l5_cid = req1->fcoe_conn_id; + if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS) + goto err_reply; + + l5_cid += BNX2X_FCOE_L5_CID_BASE; + + ctx = &cp->ctx_tbl[l5_cid]; + if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) + goto err_reply; + + ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid); + if (ret) { + ret = 0; + goto err_reply; + } + cid = ctx->cid; + + fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr); + if (fctx) { + u32 hw_cid = BNX2X_HW_CID(cp, cid); + u32 val; + + val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG, + FCOE_CONNECTION_TYPE); + fctx->xstorm_ag_context.cdu_reserved = val; + val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG, + FCOE_CONNECTION_TYPE); + fctx->ustorm_ag_context.cdu_usage = val; + } + if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) { + netdev_err(dev->netdev, "fcoe_offload size too big\n"); + goto err_reply; + } + fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); + if (!fcoe_offload) + goto err_reply; + + memset(fcoe_offload, 0, sizeof(*fcoe_offload)); + memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1)); + memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2)); + memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3)); + memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4)); + + cid = BNX2X_HW_CID(cp, cid); + ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid, + FCOE_CONNECTION_TYPE, &l5_data); + if (!ret) + set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); + + return ret; + +err_reply: + if (cid != -1) + cnic_free_bnx2x_conn_resc(dev, l5_cid); + + memset(&kcqe, 0, sizeof(kcqe)); + kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN; + kcqe.fcoe_conn_id = req1->fcoe_conn_id; + kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE; + + cqes[0] = (struct kcqe *) &kcqe; + cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1); + return ret; +} + +static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct fcoe_kwqe_conn_enable_disable *req; + struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable; + union l5cm_specific_data l5_data; + int ret; + u32 cid, l5_cid; + struct cnic_local *cp = dev->cnic_priv; + + req = (struct fcoe_kwqe_conn_enable_disable *) kwqe; + cid = req->context_id; + l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE; + + if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) { + netdev_err(dev->netdev, "fcoe_enable size too big\n"); + return -ENOMEM; + } + fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); + if (!fcoe_enable) + return -ENOMEM; + + memset(fcoe_enable, 0, sizeof(*fcoe_enable)); + memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req)); + ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid, + FCOE_CONNECTION_TYPE, &l5_data); + return ret; +} + +static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct fcoe_kwqe_conn_enable_disable *req; + struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable; + union l5cm_specific_data l5_data; + int ret; + u32 cid, l5_cid; + struct cnic_local *cp = dev->cnic_priv; + + req = (struct fcoe_kwqe_conn_enable_disable *) kwqe; + cid = req->context_id; + l5_cid = req->conn_id; + if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS) + return -EINVAL; + + l5_cid += BNX2X_FCOE_L5_CID_BASE; + + if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) { + netdev_err(dev->netdev, "fcoe_disable size too big\n"); + return -ENOMEM; + } + fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); + if (!fcoe_disable) + return -ENOMEM; + + memset(fcoe_disable, 0, sizeof(*fcoe_disable)); + memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req)); + ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid, + FCOE_CONNECTION_TYPE, &l5_data); + return ret; +} + +static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct fcoe_kwqe_conn_destroy *req; + union l5cm_specific_data l5_data; + int ret; + u32 cid, l5_cid; + struct cnic_local *cp = dev->cnic_priv; + struct cnic_context *ctx; + struct fcoe_kcqe kcqe; + struct kcqe *cqes[1]; + + req = (struct fcoe_kwqe_conn_destroy *) kwqe; + cid = req->context_id; + l5_cid = req->conn_id; + if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS) + return -EINVAL; + + l5_cid += BNX2X_FCOE_L5_CID_BASE; + + ctx = &cp->ctx_tbl[l5_cid]; + + init_waitqueue_head(&ctx->waitq); + ctx->wait_cond = 0; + + memset(&l5_data, 0, sizeof(l5_data)); + ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid, + FCOE_CONNECTION_TYPE, &l5_data); + if (ret == 0) { + wait_event(ctx->waitq, ctx->wait_cond); + set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags); + queue_delayed_work(cnic_wq, &cp->delete_task, + msecs_to_jiffies(2000)); + } + + memset(&kcqe, 0, sizeof(kcqe)); + kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN; + kcqe.fcoe_conn_id = req->conn_id; + kcqe.fcoe_conn_context_id = cid; + + cqes[0] = (struct kcqe *) &kcqe; + cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1); + return ret; +} + +static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid) +{ + struct cnic_local *cp = dev->cnic_priv; + u32 i; + + for (i = start_cid; i < cp->max_cid_space; i++) { + struct cnic_context *ctx = &cp->ctx_tbl[i]; + int j; + + while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) + msleep(10); + + for (j = 0; j < 5; j++) { + if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) + break; + msleep(20); + } + + if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) + netdev_warn(dev->netdev, "CID %x not deleted\n", + ctx->cid); + } +} + +static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct fcoe_kwqe_destroy *req; + union l5cm_specific_data l5_data; + struct cnic_local *cp = dev->cnic_priv; + int ret; + u32 cid; + + cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ); + + req = (struct fcoe_kwqe_destroy *) kwqe; + cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); + + memset(&l5_data, 0, sizeof(l5_data)); + ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid, + FCOE_CONNECTION_TYPE, &l5_data); + return ret; +} + +static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev, + struct kwqe *wqes[], u32 num_wqes) +{ + int i, work, ret; + u32 opcode; + struct kwqe *kwqe; + + if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) + return -EAGAIN; /* bnx2 is down */ + + for (i = 0; i < num_wqes; ) { + kwqe = wqes[i]; + opcode = KWQE_OPCODE(kwqe->kwqe_op_flag); + work = 1; + + switch (opcode) { + case ISCSI_KWQE_OPCODE_INIT1: + ret = cnic_bnx2x_iscsi_init1(dev, kwqe); + break; + case ISCSI_KWQE_OPCODE_INIT2: + ret = cnic_bnx2x_iscsi_init2(dev, kwqe); + break; + case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1: + ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i], + num_wqes - i, &work); + break; + case ISCSI_KWQE_OPCODE_UPDATE_CONN: + ret = cnic_bnx2x_iscsi_update(dev, kwqe); + break; + case ISCSI_KWQE_OPCODE_DESTROY_CONN: + ret = cnic_bnx2x_iscsi_destroy(dev, kwqe); + break; + case L4_KWQE_OPCODE_VALUE_CONNECT1: + ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i, + &work); + break; + case L4_KWQE_OPCODE_VALUE_CLOSE: + ret = cnic_bnx2x_close(dev, kwqe); + break; + case L4_KWQE_OPCODE_VALUE_RESET: + ret = cnic_bnx2x_reset(dev, kwqe); + break; + case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG: + ret = cnic_bnx2x_offload_pg(dev, kwqe); + break; + case L4_KWQE_OPCODE_VALUE_UPDATE_PG: + ret = cnic_bnx2x_update_pg(dev, kwqe); + break; + case L4_KWQE_OPCODE_VALUE_UPLOAD_PG: + ret = 0; + break; + default: + ret = 0; + netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n", + opcode); + break; + } + if (ret < 0) + netdev_err(dev->netdev, "KWQE(0x%x) failed\n", + opcode); + i += work; + } + return 0; +} + +static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev, + struct kwqe *wqes[], u32 num_wqes) +{ + struct cnic_local *cp = dev->cnic_priv; + int i, work, ret; + u32 opcode; + struct kwqe *kwqe; + + if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) + return -EAGAIN; /* bnx2 is down */ + + if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) + return -EINVAL; + + for (i = 0; i < num_wqes; ) { + kwqe = wqes[i]; + opcode = KWQE_OPCODE(kwqe->kwqe_op_flag); + work = 1; + + switch (opcode) { + case FCOE_KWQE_OPCODE_INIT1: + ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i], + num_wqes - i, &work); + break; + case FCOE_KWQE_OPCODE_OFFLOAD_CONN1: + ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i], + num_wqes - i, &work); + break; + case FCOE_KWQE_OPCODE_ENABLE_CONN: + ret = cnic_bnx2x_fcoe_enable(dev, kwqe); + break; + case FCOE_KWQE_OPCODE_DISABLE_CONN: + ret = cnic_bnx2x_fcoe_disable(dev, kwqe); + break; + case FCOE_KWQE_OPCODE_DESTROY_CONN: + ret = cnic_bnx2x_fcoe_destroy(dev, kwqe); + break; + case FCOE_KWQE_OPCODE_DESTROY: + ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe); + break; + case FCOE_KWQE_OPCODE_STAT: + ret = cnic_bnx2x_fcoe_stat(dev, kwqe); + break; + default: + ret = 0; + netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n", + opcode); + break; + } + if (ret < 0) + netdev_err(dev->netdev, "KWQE(0x%x) failed\n", + opcode); + i += work; + } + return 0; +} + +static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], + u32 num_wqes) +{ + int ret = -EINVAL; + u32 layer_code; + + if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) + return -EAGAIN; /* bnx2x is down */ + + if (!num_wqes) + return 0; + + layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK; + switch (layer_code) { + case KWQE_FLAGS_LAYER_MASK_L5_ISCSI: + case KWQE_FLAGS_LAYER_MASK_L4: + case KWQE_FLAGS_LAYER_MASK_L2: + ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes); + break; + + case KWQE_FLAGS_LAYER_MASK_L5_FCOE: + ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes); + break; + } + return ret; +} + +static inline u32 cnic_get_kcqe_layer_mask(u32 opflag) +{ + if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN)) + return KCQE_FLAGS_LAYER_MASK_L4; + + return opflag & KCQE_FLAGS_LAYER_MASK; +} + +static void service_kcqes(struct cnic_dev *dev, int num_cqes) +{ + struct cnic_local *cp = dev->cnic_priv; + int i, j, comp = 0; + + i = 0; + j = 1; + while (num_cqes) { + struct cnic_ulp_ops *ulp_ops; + int ulp_type; + u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag; + u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag); + + if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION)) + comp++; + + while (j < num_cqes) { + u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag; + + if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer) + break; + + if (unlikely(next_op & KCQE_RAMROD_COMPLETION)) + comp++; + j++; + } + + if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA) + ulp_type = CNIC_ULP_RDMA; + else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI) + ulp_type = CNIC_ULP_ISCSI; + else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE) + ulp_type = CNIC_ULP_FCOE; + else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4) + ulp_type = CNIC_ULP_L4; + else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2) + goto end; + else { + netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n", + kcqe_op_flag); + goto end; + } + + rcu_read_lock(); + ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); + if (likely(ulp_ops)) { + ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], + cp->completed_kcq + i, j); + } + rcu_read_unlock(); +end: + num_cqes -= j; + i += j; + j = 1; + } + if (unlikely(comp)) + cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp); +} + +static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info) +{ + struct cnic_local *cp = dev->cnic_priv; + u16 i, ri, hw_prod, last; + struct kcqe *kcqe; + int kcqe_cnt = 0, last_cnt = 0; + + i = ri = last = info->sw_prod_idx; + ri &= MAX_KCQ_IDX; + hw_prod = *info->hw_prod_idx_ptr; + hw_prod = info->hw_idx(hw_prod); + + while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) { + kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)]; + cp->completed_kcq[kcqe_cnt++] = kcqe; + i = info->next_idx(i); + ri = i & MAX_KCQ_IDX; + if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) { + last_cnt = kcqe_cnt; + last = i; + } + } + + info->sw_prod_idx = last; + return last_cnt; +} + +static int cnic_l2_completion(struct cnic_local *cp) +{ + u16 hw_cons, sw_cons; + struct cnic_uio_dev *udev = cp->udev; + union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *) + (udev->l2_ring + (2 * BCM_PAGE_SIZE)); + u32 cmd; + int comp = 0; + + if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags)) + return 0; + + hw_cons = *cp->rx_cons_ptr; + if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT) + hw_cons++; + + sw_cons = cp->rx_cons; + while (sw_cons != hw_cons) { + u8 cqe_fp_flags; + + cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT]; + cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; + if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) { + cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data); + cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT; + if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP || + cmd == RAMROD_CMD_ID_ETH_HALT) + comp++; + } + sw_cons = BNX2X_NEXT_RCQE(sw_cons); + } + return comp; +} + +static void cnic_chk_pkt_rings(struct cnic_local *cp) +{ + u16 rx_cons, tx_cons; + int comp = 0; + + if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) + return; + + rx_cons = *cp->rx_cons_ptr; + tx_cons = *cp->tx_cons_ptr; + if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) { + if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) + comp = cnic_l2_completion(cp); + + cp->tx_cons = tx_cons; + cp->rx_cons = rx_cons; + + if (cp->udev) + uio_event_notify(&cp->udev->cnic_uinfo); + } + if (comp) + clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); +} + +static u32 cnic_service_bnx2_queues(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + u32 status_idx = (u16) *cp->kcq1.status_idx_ptr; + int kcqe_cnt; + + /* status block index must be read before reading other fields */ + rmb(); + cp->kwq_con_idx = *cp->kwq_con_idx_ptr; + + while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) { + + service_kcqes(dev, kcqe_cnt); + + /* Tell compiler that status_blk fields can change. */ + barrier(); + status_idx = (u16) *cp->kcq1.status_idx_ptr; + /* status block index must be read first */ + rmb(); + cp->kwq_con_idx = *cp->kwq_con_idx_ptr; + } + + CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx); + + cnic_chk_pkt_rings(cp); + + return status_idx; +} + +static int cnic_service_bnx2(void *data, void *status_blk) +{ + struct cnic_dev *dev = data; + + if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) { + struct status_block *sblk = status_blk; + + return sblk->status_idx; + } + + return cnic_service_bnx2_queues(dev); +} + +static void cnic_service_bnx2_msix(unsigned long data) +{ + struct cnic_dev *dev = (struct cnic_dev *) data; + struct cnic_local *cp = dev->cnic_priv; + + cp->last_status_idx = cnic_service_bnx2_queues(dev); + + CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); +} + +static void cnic_doirq(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + + if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) { + u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX; + + prefetch(cp->status_blk.gen); + prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); + + tasklet_schedule(&cp->cnic_irq_task); + } +} + +static irqreturn_t cnic_irq(int irq, void *dev_instance) +{ + struct cnic_dev *dev = dev_instance; + struct cnic_local *cp = dev->cnic_priv; + + if (cp->ack_int) + cp->ack_int(dev); + + cnic_doirq(dev); + + return IRQ_HANDLED; +} + +static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm, + u16 index, u8 op, u8 update) +{ + struct cnic_local *cp = dev->cnic_priv; + u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 + + COMMAND_REG_INT_ACK); + struct igu_ack_register igu_ack; + + igu_ack.status_block_index = index; + igu_ack.sb_id_and_flags = + ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | + (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | + (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | + (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); + + CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack)); +} + +static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment, + u16 index, u8 op, u8 update) +{ + struct igu_regular cmd_data; + u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8; + + cmd_data.sb_id_and_flags = + (index << IGU_REGULAR_SB_INDEX_SHIFT) | + (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | + (update << IGU_REGULAR_BUPDATE_SHIFT) | + (op << IGU_REGULAR_ENABLE_INT_SHIFT); + + + CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags); +} + +static void cnic_ack_bnx2x_msix(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + + cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0, + IGU_INT_DISABLE, 0); +} + +static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + + cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0, + IGU_INT_DISABLE, 0); +} + +static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info) +{ + u32 last_status = *info->status_idx_ptr; + int kcqe_cnt; + + /* status block index must be read before reading the KCQ */ + rmb(); + while ((kcqe_cnt = cnic_get_kcqes(dev, info))) { + + service_kcqes(dev, kcqe_cnt); + + /* Tell compiler that sblk fields can change. */ + barrier(); + + last_status = *info->status_idx_ptr; + /* status block index must be read before reading the KCQ */ + rmb(); + } + return last_status; +} + +static void cnic_service_bnx2x_bh(unsigned long data) +{ + struct cnic_dev *dev = (struct cnic_dev *) data; + struct cnic_local *cp = dev->cnic_priv; + u32 status_idx, new_status_idx; + + if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) + return; + + while (1) { + status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); + + CNIC_WR16(dev, cp->kcq1.io_addr, + cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); + + if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { + cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, + status_idx, IGU_INT_ENABLE, 1); + break; + } + + new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2); + + if (new_status_idx != status_idx) + continue; + + CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx + + MAX_KCQ_IDX); + + cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, + status_idx, IGU_INT_ENABLE, 1); + + break; + } +} + +static int cnic_service_bnx2x(void *data, void *status_blk) +{ + struct cnic_dev *dev = data; + struct cnic_local *cp = dev->cnic_priv; + + if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) + cnic_doirq(dev); + + cnic_chk_pkt_rings(cp); + + return 0; +} + +static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type) +{ + struct cnic_ulp_ops *ulp_ops; + + if (if_type == CNIC_ULP_ISCSI) + cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); + + mutex_lock(&cnic_lock); + ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type], + lockdep_is_held(&cnic_lock)); + if (!ulp_ops) { + mutex_unlock(&cnic_lock); + return; + } + set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); + mutex_unlock(&cnic_lock); + + if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type])) + ulp_ops->cnic_stop(cp->ulp_handle[if_type]); + + clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); +} + +static void cnic_ulp_stop(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + int if_type; + + for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) + cnic_ulp_stop_one(cp, if_type); +} + +static void cnic_ulp_start(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + int if_type; + + for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { + struct cnic_ulp_ops *ulp_ops; + + mutex_lock(&cnic_lock); + ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type], + lockdep_is_held(&cnic_lock)); + if (!ulp_ops || !ulp_ops->cnic_start) { + mutex_unlock(&cnic_lock); + continue; + } + set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); + mutex_unlock(&cnic_lock); + + if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type])) + ulp_ops->cnic_start(cp->ulp_handle[if_type]); + + clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); + } +} + +static int cnic_ctl(void *data, struct cnic_ctl_info *info) +{ + struct cnic_dev *dev = data; + + switch (info->cmd) { + case CNIC_CTL_STOP_CMD: + cnic_hold(dev); + + cnic_ulp_stop(dev); + cnic_stop_hw(dev); + + cnic_put(dev); + break; + case CNIC_CTL_START_CMD: + cnic_hold(dev); + + if (!cnic_start_hw(dev)) + cnic_ulp_start(dev); + + cnic_put(dev); + break; + case CNIC_CTL_STOP_ISCSI_CMD: { + struct cnic_local *cp = dev->cnic_priv; + set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags); + queue_delayed_work(cnic_wq, &cp->delete_task, 0); + break; + } + case CNIC_CTL_COMPLETION_CMD: { + struct cnic_ctl_completion *comp = &info->data.comp; + u32 cid = BNX2X_SW_CID(comp->cid); + u32 l5_cid; + struct cnic_local *cp = dev->cnic_priv; + + if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) { + struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; + + if (unlikely(comp->error)) { + set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags); + netdev_err(dev->netdev, + "CID %x CFC delete comp error %x\n", + cid, comp->error); + } + + ctx->wait_cond = 1; + wake_up(&ctx->waitq); + } + break; + } + default: + return -EINVAL; + } + return 0; +} + +static void cnic_ulp_init(struct cnic_dev *dev) +{ + int i; + struct cnic_local *cp = dev->cnic_priv; + + for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { + struct cnic_ulp_ops *ulp_ops; + + mutex_lock(&cnic_lock); + ulp_ops = cnic_ulp_tbl_prot(i); + if (!ulp_ops || !ulp_ops->cnic_init) { + mutex_unlock(&cnic_lock); + continue; + } + ulp_get(ulp_ops); + mutex_unlock(&cnic_lock); + + if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i])) + ulp_ops->cnic_init(dev); + + ulp_put(ulp_ops); + } +} + +static void cnic_ulp_exit(struct cnic_dev *dev) +{ + int i; + struct cnic_local *cp = dev->cnic_priv; + + for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { + struct cnic_ulp_ops *ulp_ops; + + mutex_lock(&cnic_lock); + ulp_ops = cnic_ulp_tbl_prot(i); + if (!ulp_ops || !ulp_ops->cnic_exit) { + mutex_unlock(&cnic_lock); + continue; + } + ulp_get(ulp_ops); + mutex_unlock(&cnic_lock); + + if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i])) + ulp_ops->cnic_exit(dev); + + ulp_put(ulp_ops); + } +} + +static int cnic_cm_offload_pg(struct cnic_sock *csk) +{ + struct cnic_dev *dev = csk->dev; + struct l4_kwq_offload_pg *l4kwqe; + struct kwqe *wqes[1]; + + l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1; + memset(l4kwqe, 0, sizeof(*l4kwqe)); + wqes[0] = (struct kwqe *) l4kwqe; + + l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG; + l4kwqe->flags = + L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT; + l4kwqe->l2hdr_nbytes = ETH_HLEN; + + l4kwqe->da0 = csk->ha[0]; + l4kwqe->da1 = csk->ha[1]; + l4kwqe->da2 = csk->ha[2]; + l4kwqe->da3 = csk->ha[3]; + l4kwqe->da4 = csk->ha[4]; + l4kwqe->da5 = csk->ha[5]; + + l4kwqe->sa0 = dev->mac_addr[0]; + l4kwqe->sa1 = dev->mac_addr[1]; + l4kwqe->sa2 = dev->mac_addr[2]; + l4kwqe->sa3 = dev->mac_addr[3]; + l4kwqe->sa4 = dev->mac_addr[4]; + l4kwqe->sa5 = dev->mac_addr[5]; + + l4kwqe->etype = ETH_P_IP; + l4kwqe->ipid_start = DEF_IPID_START; + l4kwqe->host_opaque = csk->l5_cid; + + if (csk->vlan_id) { + l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING; + l4kwqe->vlan_tag = csk->vlan_id; + l4kwqe->l2hdr_nbytes += 4; + } + + return dev->submit_kwqes(dev, wqes, 1); +} + +static int cnic_cm_update_pg(struct cnic_sock *csk) +{ + struct cnic_dev *dev = csk->dev; + struct l4_kwq_update_pg *l4kwqe; + struct kwqe *wqes[1]; + + l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1; + memset(l4kwqe, 0, sizeof(*l4kwqe)); + wqes[0] = (struct kwqe *) l4kwqe; + + l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG; + l4kwqe->flags = + L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT; + l4kwqe->pg_cid = csk->pg_cid; + + l4kwqe->da0 = csk->ha[0]; + l4kwqe->da1 = csk->ha[1]; + l4kwqe->da2 = csk->ha[2]; + l4kwqe->da3 = csk->ha[3]; + l4kwqe->da4 = csk->ha[4]; + l4kwqe->da5 = csk->ha[5]; + + l4kwqe->pg_host_opaque = csk->l5_cid; + l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA; + + return dev->submit_kwqes(dev, wqes, 1); +} + +static int cnic_cm_upload_pg(struct cnic_sock *csk) +{ + struct cnic_dev *dev = csk->dev; + struct l4_kwq_upload *l4kwqe; + struct kwqe *wqes[1]; + + l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1; + memset(l4kwqe, 0, sizeof(*l4kwqe)); + wqes[0] = (struct kwqe *) l4kwqe; + + l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG; + l4kwqe->flags = + L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT; + l4kwqe->cid = csk->pg_cid; + + return dev->submit_kwqes(dev, wqes, 1); +} + +static int cnic_cm_conn_req(struct cnic_sock *csk) +{ + struct cnic_dev *dev = csk->dev; + struct l4_kwq_connect_req1 *l4kwqe1; + struct l4_kwq_connect_req2 *l4kwqe2; + struct l4_kwq_connect_req3 *l4kwqe3; + struct kwqe *wqes[3]; + u8 tcp_flags = 0; + int num_wqes = 2; + + l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1; + l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2; + l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3; + memset(l4kwqe1, 0, sizeof(*l4kwqe1)); + memset(l4kwqe2, 0, sizeof(*l4kwqe2)); + memset(l4kwqe3, 0, sizeof(*l4kwqe3)); + + l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3; + l4kwqe3->flags = + L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT; + l4kwqe3->ka_timeout = csk->ka_timeout; + l4kwqe3->ka_interval = csk->ka_interval; + l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count; + l4kwqe3->tos = csk->tos; + l4kwqe3->ttl = csk->ttl; + l4kwqe3->snd_seq_scale = csk->snd_seq_scale; + l4kwqe3->pmtu = csk->mtu; + l4kwqe3->rcv_buf = csk->rcv_buf; + l4kwqe3->snd_buf = csk->snd_buf; + l4kwqe3->seed = csk->seed; + + wqes[0] = (struct kwqe *) l4kwqe1; + if (test_bit(SK_F_IPV6, &csk->flags)) { + wqes[1] = (struct kwqe *) l4kwqe2; + wqes[2] = (struct kwqe *) l4kwqe3; + num_wqes = 3; + + l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6; + l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2; + l4kwqe2->flags = + L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT | + L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT; + l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]); + l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]); + l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]); + l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]); + l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]); + l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]); + l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) - + sizeof(struct tcphdr); + } else { + wqes[1] = (struct kwqe *) l4kwqe3; + l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) - + sizeof(struct tcphdr); + } + + l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1; + l4kwqe1->flags = + (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) | + L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT; + l4kwqe1->cid = csk->cid; + l4kwqe1->pg_cid = csk->pg_cid; + l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]); + l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]); + l4kwqe1->src_port = be16_to_cpu(csk->src_port); + l4kwqe1->dst_port = be16_to_cpu(csk->dst_port); + if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK) + tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK; + if (csk->tcp_flags & SK_TCP_KEEP_ALIVE) + tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE; + if (csk->tcp_flags & SK_TCP_NAGLE) + tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE; + if (csk->tcp_flags & SK_TCP_TIMESTAMP) + tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP; + if (csk->tcp_flags & SK_TCP_SACK) + tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK; + if (csk->tcp_flags & SK_TCP_SEG_SCALING) + tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING; + + l4kwqe1->tcp_flags = tcp_flags; + + return dev->submit_kwqes(dev, wqes, num_wqes); +} + +static int cnic_cm_close_req(struct cnic_sock *csk) +{ + struct cnic_dev *dev = csk->dev; + struct l4_kwq_close_req *l4kwqe; + struct kwqe *wqes[1]; + + l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2; + memset(l4kwqe, 0, sizeof(*l4kwqe)); + wqes[0] = (struct kwqe *) l4kwqe; + + l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE; + l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT; + l4kwqe->cid = csk->cid; + + return dev->submit_kwqes(dev, wqes, 1); +} + +static int cnic_cm_abort_req(struct cnic_sock *csk) +{ + struct cnic_dev *dev = csk->dev; + struct l4_kwq_reset_req *l4kwqe; + struct kwqe *wqes[1]; + + l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2; + memset(l4kwqe, 0, sizeof(*l4kwqe)); + wqes[0] = (struct kwqe *) l4kwqe; + + l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET; + l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT; + l4kwqe->cid = csk->cid; + + return dev->submit_kwqes(dev, wqes, 1); +} + +static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid, + u32 l5_cid, struct cnic_sock **csk, void *context) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_sock *csk1; + + if (l5_cid >= MAX_CM_SK_TBL_SZ) + return -EINVAL; + + if (cp->ctx_tbl) { + struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; + + if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) + return -EAGAIN; + } + + csk1 = &cp->csk_tbl[l5_cid]; + if (atomic_read(&csk1->ref_count)) + return -EAGAIN; + + if (test_and_set_bit(SK_F_INUSE, &csk1->flags)) + return -EBUSY; + + csk1->dev = dev; + csk1->cid = cid; + csk1->l5_cid = l5_cid; + csk1->ulp_type = ulp_type; + csk1->context = context; + + csk1->ka_timeout = DEF_KA_TIMEOUT; + csk1->ka_interval = DEF_KA_INTERVAL; + csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT; + csk1->tos = DEF_TOS; + csk1->ttl = DEF_TTL; + csk1->snd_seq_scale = DEF_SND_SEQ_SCALE; + csk1->rcv_buf = DEF_RCV_BUF; + csk1->snd_buf = DEF_SND_BUF; + csk1->seed = DEF_SEED; + + *csk = csk1; + return 0; +} + +static void cnic_cm_cleanup(struct cnic_sock *csk) +{ + if (csk->src_port) { + struct cnic_dev *dev = csk->dev; + struct cnic_local *cp = dev->cnic_priv; + + cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port)); + csk->src_port = 0; + } +} + +static void cnic_close_conn(struct cnic_sock *csk) +{ + if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) { + cnic_cm_upload_pg(csk); + clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags); + } + cnic_cm_cleanup(csk); +} + +static int cnic_cm_destroy(struct cnic_sock *csk) +{ + if (!cnic_in_use(csk)) + return -EINVAL; + + csk_hold(csk); + clear_bit(SK_F_INUSE, &csk->flags); + smp_mb__after_clear_bit(); + while (atomic_read(&csk->ref_count) != 1) + msleep(1); + cnic_cm_cleanup(csk); + + csk->flags = 0; + csk_put(csk); + return 0; +} + +static inline u16 cnic_get_vlan(struct net_device *dev, + struct net_device **vlan_dev) +{ + if (dev->priv_flags & IFF_802_1Q_VLAN) { + *vlan_dev = vlan_dev_real_dev(dev); + return vlan_dev_vlan_id(dev); + } + *vlan_dev = dev; + return 0; +} + +static int cnic_get_v4_route(struct sockaddr_in *dst_addr, + struct dst_entry **dst) +{ +#if defined(CONFIG_INET) + struct rtable *rt; + + rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0); + if (!IS_ERR(rt)) { + *dst = &rt->dst; + return 0; + } + return PTR_ERR(rt); +#else + return -ENETUNREACH; +#endif +} + +static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr, + struct dst_entry **dst) +{ +#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE)) + struct flowi6 fl6; + + memset(&fl6, 0, sizeof(fl6)); + ipv6_addr_copy(&fl6.daddr, &dst_addr->sin6_addr); + if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) + fl6.flowi6_oif = dst_addr->sin6_scope_id; + + *dst = ip6_route_output(&init_net, NULL, &fl6); + if (*dst) + return 0; +#endif + + return -ENETUNREACH; +} + +static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr, + int ulp_type) +{ + struct cnic_dev *dev = NULL; + struct dst_entry *dst; + struct net_device *netdev = NULL; + int err = -ENETUNREACH; + + if (dst_addr->sin_family == AF_INET) + err = cnic_get_v4_route(dst_addr, &dst); + else if (dst_addr->sin_family == AF_INET6) { + struct sockaddr_in6 *dst_addr6 = + (struct sockaddr_in6 *) dst_addr; + + err = cnic_get_v6_route(dst_addr6, &dst); + } else + return NULL; + + if (err) + return NULL; + + if (!dst->dev) + goto done; + + cnic_get_vlan(dst->dev, &netdev); + + dev = cnic_from_netdev(netdev); + +done: + dst_release(dst); + if (dev) + cnic_put(dev); + return dev; +} + +static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr) +{ + struct cnic_dev *dev = csk->dev; + struct cnic_local *cp = dev->cnic_priv; + + return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk); +} + +static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr) +{ + struct cnic_dev *dev = csk->dev; + struct cnic_local *cp = dev->cnic_priv; + int is_v6, rc = 0; + struct dst_entry *dst = NULL; + struct net_device *realdev; + __be16 local_port; + u32 port_id; + + if (saddr->local.v6.sin6_family == AF_INET6 && + saddr->remote.v6.sin6_family == AF_INET6) + is_v6 = 1; + else if (saddr->local.v4.sin_family == AF_INET && + saddr->remote.v4.sin_family == AF_INET) + is_v6 = 0; + else + return -EINVAL; + + clear_bit(SK_F_IPV6, &csk->flags); + + if (is_v6) { + set_bit(SK_F_IPV6, &csk->flags); + cnic_get_v6_route(&saddr->remote.v6, &dst); + + memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr, + sizeof(struct in6_addr)); + csk->dst_port = saddr->remote.v6.sin6_port; + local_port = saddr->local.v6.sin6_port; + + } else { + cnic_get_v4_route(&saddr->remote.v4, &dst); + + csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr; + csk->dst_port = saddr->remote.v4.sin_port; + local_port = saddr->local.v4.sin_port; + } + + csk->vlan_id = 0; + csk->mtu = dev->netdev->mtu; + if (dst && dst->dev) { + u16 vlan = cnic_get_vlan(dst->dev, &realdev); + if (realdev == dev->netdev) { + csk->vlan_id = vlan; + csk->mtu = dst_mtu(dst); + } + } + + port_id = be16_to_cpu(local_port); + if (port_id >= CNIC_LOCAL_PORT_MIN && + port_id < CNIC_LOCAL_PORT_MAX) { + if (cnic_alloc_id(&cp->csk_port_tbl, port_id)) + port_id = 0; + } else + port_id = 0; + + if (!port_id) { + port_id = cnic_alloc_new_id(&cp->csk_port_tbl); + if (port_id == -1) { + rc = -ENOMEM; + goto err_out; + } + local_port = cpu_to_be16(port_id); + } + csk->src_port = local_port; + +err_out: + dst_release(dst); + return rc; +} + +static void cnic_init_csk_state(struct cnic_sock *csk) +{ + csk->state = 0; + clear_bit(SK_F_OFFLD_SCHED, &csk->flags); + clear_bit(SK_F_CLOSING, &csk->flags); +} + +static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr) +{ + struct cnic_local *cp = csk->dev->cnic_priv; + int err = 0; + + if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI) + return -EOPNOTSUPP; + + if (!cnic_in_use(csk)) + return -EINVAL; + + if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags)) + return -EINVAL; + + cnic_init_csk_state(csk); + + err = cnic_get_route(csk, saddr); + if (err) + goto err_out; + + err = cnic_resolve_addr(csk, saddr); + if (!err) + return 0; + +err_out: + clear_bit(SK_F_CONNECT_START, &csk->flags); + return err; +} + +static int cnic_cm_abort(struct cnic_sock *csk) +{ + struct cnic_local *cp = csk->dev->cnic_priv; + u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP; + + if (!cnic_in_use(csk)) + return -EINVAL; + + if (cnic_abort_prep(csk)) + return cnic_cm_abort_req(csk); + + /* Getting here means that we haven't started connect, or + * connect was not successful. + */ + + cp->close_conn(csk, opcode); + if (csk->state != opcode) + return -EALREADY; + + return 0; +} + +static int cnic_cm_close(struct cnic_sock *csk) +{ + if (!cnic_in_use(csk)) + return -EINVAL; + + if (cnic_close_prep(csk)) { + csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; + return cnic_cm_close_req(csk); + } else { + return -EALREADY; + } + return 0; +} + +static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk, + u8 opcode) +{ + struct cnic_ulp_ops *ulp_ops; + int ulp_type = csk->ulp_type; + + rcu_read_lock(); + ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); + if (ulp_ops) { + if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE) + ulp_ops->cm_connect_complete(csk); + else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) + ulp_ops->cm_close_complete(csk); + else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) + ulp_ops->cm_remote_abort(csk); + else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP) + ulp_ops->cm_abort_complete(csk); + else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED) + ulp_ops->cm_remote_close(csk); + } + rcu_read_unlock(); +} + +static int cnic_cm_set_pg(struct cnic_sock *csk) +{ + if (cnic_offld_prep(csk)) { + if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) + cnic_cm_update_pg(csk); + else + cnic_cm_offload_pg(csk); + } + return 0; +} + +static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe) +{ + struct cnic_local *cp = dev->cnic_priv; + u32 l5_cid = kcqe->pg_host_opaque; + u8 opcode = kcqe->op_code; + struct cnic_sock *csk = &cp->csk_tbl[l5_cid]; + + csk_hold(csk); + if (!cnic_in_use(csk)) + goto done; + + if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { + clear_bit(SK_F_OFFLD_SCHED, &csk->flags); + goto done; + } + /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */ + if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) { + clear_bit(SK_F_OFFLD_SCHED, &csk->flags); + cnic_cm_upcall(cp, csk, + L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); + goto done; + } + + csk->pg_cid = kcqe->pg_cid; + set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags); + cnic_cm_conn_req(csk); + +done: + csk_put(csk); +} + +static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe) +{ + struct cnic_local *cp = dev->cnic_priv; + struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe; + u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE; + struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; + + ctx->timestamp = jiffies; + ctx->wait_cond = 1; + wake_up(&ctx->waitq); +} + +static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) +{ + struct cnic_local *cp = dev->cnic_priv; + struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe; + u8 opcode = l4kcqe->op_code; + u32 l5_cid; + struct cnic_sock *csk; + + if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) { + cnic_process_fcoe_term_conn(dev, kcqe); + return; + } + if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG || + opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { + cnic_cm_process_offld_pg(dev, l4kcqe); + return; + } + + l5_cid = l4kcqe->conn_id; + if (opcode & 0x80) + l5_cid = l4kcqe->cid; + if (l5_cid >= MAX_CM_SK_TBL_SZ) + return; + + csk = &cp->csk_tbl[l5_cid]; + csk_hold(csk); + + if (!cnic_in_use(csk)) { + csk_put(csk); + return; + } + + switch (opcode) { + case L5CM_RAMROD_CMD_ID_TCP_CONNECT: + if (l4kcqe->status != 0) { + clear_bit(SK_F_OFFLD_SCHED, &csk->flags); + cnic_cm_upcall(cp, csk, + L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); + } + break; + case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE: + if (l4kcqe->status == 0) + set_bit(SK_F_OFFLD_COMPLETE, &csk->flags); + + smp_mb__before_clear_bit(); + clear_bit(SK_F_OFFLD_SCHED, &csk->flags); + cnic_cm_upcall(cp, csk, opcode); + break; + + case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: + case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: + case L4_KCQE_OPCODE_VALUE_RESET_COMP: + case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: + case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD: + cp->close_conn(csk, opcode); + break; + + case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED: + /* after we already sent CLOSE_REQ */ + if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) && + !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) && + csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) + cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP); + else + cnic_cm_upcall(cp, csk, opcode); + break; + } + csk_put(csk); +} + +static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num) +{ + struct cnic_dev *dev = data; + int i; + + for (i = 0; i < num; i++) + cnic_cm_process_kcqe(dev, kcqe[i]); +} + +static struct cnic_ulp_ops cm_ulp_ops = { + .indicate_kcqes = cnic_cm_indicate_kcqe, +}; + +static void cnic_cm_free_mem(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + + kfree(cp->csk_tbl); + cp->csk_tbl = NULL; + cnic_free_id_tbl(&cp->csk_port_tbl); +} + +static int cnic_cm_alloc_mem(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + u32 port_id; + + cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ, + GFP_KERNEL); + if (!cp->csk_tbl) + return -ENOMEM; + + port_id = random32(); + port_id %= CNIC_LOCAL_PORT_RANGE; + if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE, + CNIC_LOCAL_PORT_MIN, port_id)) { + cnic_cm_free_mem(dev); + return -ENOMEM; + } + return 0; +} + +static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode) +{ + if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { + /* Unsolicited RESET_COMP or RESET_RECEIVED */ + opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED; + csk->state = opcode; + } + + /* 1. If event opcode matches the expected event in csk->state + * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any + * event + * 3. If the expected event is 0, meaning the connection was never + * never established, we accept the opcode from cm_abort. + */ + if (opcode == csk->state || csk->state == 0 || + csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP || + csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) { + if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) { + if (csk->state == 0) + csk->state = opcode; + return 1; + } + } + return 0; +} + +static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode) +{ + struct cnic_dev *dev = csk->dev; + struct cnic_local *cp = dev->cnic_priv; + + if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) { + cnic_cm_upcall(cp, csk, opcode); + return; + } + + clear_bit(SK_F_CONNECT_START, &csk->flags); + cnic_close_conn(csk); + csk->state = opcode; + cnic_cm_upcall(cp, csk, opcode); +} + +static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev) +{ +} + +static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev) +{ + u32 seed; + + seed = random32(); + cnic_ctx_wr(dev, 45, 0, seed); + return 0; +} + +static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode) +{ + struct cnic_dev *dev = csk->dev; + struct cnic_local *cp = dev->cnic_priv; + struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid]; + union l5cm_specific_data l5_data; + u32 cmd = 0; + int close_complete = 0; + + switch (opcode) { + case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: + case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: + case L4_KCQE_OPCODE_VALUE_RESET_COMP: + if (cnic_ready_to_close(csk, opcode)) { + if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) + cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE; + else + close_complete = 1; + } + break; + case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: + cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD; + break; + case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD: + close_complete = 1; + break; + } + if (cmd) { + memset(&l5_data, 0, sizeof(l5_data)); + + cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE, + &l5_data); + } else if (close_complete) { + ctx->timestamp = jiffies; + cnic_close_conn(csk); + cnic_cm_upcall(cp, csk, csk->state); + } +} + +static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + + if (!cp->ctx_tbl) + return; + + if (!netif_running(dev->netdev)) + return; + + cnic_bnx2x_delete_wait(dev, 0); + + cancel_delayed_work(&cp->delete_task); + flush_workqueue(cnic_wq); + + if (atomic_read(&cp->iscsi_conn) != 0) + netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n", + atomic_read(&cp->iscsi_conn)); +} + +static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + u32 pfid = cp->pfid; + u32 port = CNIC_PORT(cp); + + cnic_init_bnx2x_mac(dev); + cnic_bnx2x_set_tcp_timestamp(dev, 1); + + CNIC_WR16(dev, BAR_XSTRORM_INTMEM + + XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0); + + CNIC_WR(dev, BAR_XSTRORM_INTMEM + + XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1); + CNIC_WR(dev, BAR_XSTRORM_INTMEM + + XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port), + DEF_MAX_DA_COUNT); + + CNIC_WR8(dev, BAR_XSTRORM_INTMEM + + XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL); + CNIC_WR8(dev, BAR_XSTRORM_INTMEM + + XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS); + CNIC_WR8(dev, BAR_XSTRORM_INTMEM + + XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2); + CNIC_WR(dev, BAR_XSTRORM_INTMEM + + XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER); + + CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid), + DEF_MAX_CWND); + return 0; +} + +static void cnic_delete_task(struct work_struct *work) +{ + struct cnic_local *cp; + struct cnic_dev *dev; + u32 i; + int need_resched = 0; + + cp = container_of(work, struct cnic_local, delete_task.work); + dev = cp->dev; + + if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) { + struct drv_ctl_info info; + + cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI); + + info.cmd = DRV_CTL_ISCSI_STOPPED_CMD; + cp->ethdev->drv_ctl(dev->netdev, &info); + } + + for (i = 0; i < cp->max_cid_space; i++) { + struct cnic_context *ctx = &cp->ctx_tbl[i]; + int err; + + if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) || + !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) + continue; + + if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) { + need_resched = 1; + continue; + } + + if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) + continue; + + err = cnic_bnx2x_destroy_ramrod(dev, i); + + cnic_free_bnx2x_conn_resc(dev, i); + if (!err) { + if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) + atomic_dec(&cp->iscsi_conn); + + clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); + } + } + + if (need_resched) + queue_delayed_work(cnic_wq, &cp->delete_task, + msecs_to_jiffies(10)); + +} + +static int cnic_cm_open(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + int err; + + err = cnic_cm_alloc_mem(dev); + if (err) + return err; + + err = cp->start_cm(dev); + + if (err) + goto err_out; + + INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task); + + dev->cm_create = cnic_cm_create; + dev->cm_destroy = cnic_cm_destroy; + dev->cm_connect = cnic_cm_connect; + dev->cm_abort = cnic_cm_abort; + dev->cm_close = cnic_cm_close; + dev->cm_select_dev = cnic_cm_select_dev; + + cp->ulp_handle[CNIC_ULP_L4] = dev; + rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops); + return 0; + +err_out: + cnic_cm_free_mem(dev); + return err; +} + +static int cnic_cm_shutdown(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + int i; + + cp->stop_cm(dev); + + if (!cp->csk_tbl) + return 0; + + for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) { + struct cnic_sock *csk = &cp->csk_tbl[i]; + + clear_bit(SK_F_INUSE, &csk->flags); + cnic_cm_cleanup(csk); + } + cnic_cm_free_mem(dev); + + return 0; +} + +static void cnic_init_context(struct cnic_dev *dev, u32 cid) +{ + u32 cid_addr; + int i; + + cid_addr = GET_CID_ADDR(cid); + + for (i = 0; i < CTX_SIZE; i += 4) + cnic_ctx_wr(dev, cid_addr, i, 0); +} + +static int cnic_setup_5709_context(struct cnic_dev *dev, int valid) +{ + struct cnic_local *cp = dev->cnic_priv; + int ret = 0, i; + u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0; + + if (CHIP_NUM(cp) != CHIP_NUM_5709) + return 0; + + for (i = 0; i < cp->ctx_blks; i++) { + int j; + u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk; + u32 val; + + memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE); + + CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0, + (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit); + CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1, + (u64) cp->ctx_arr[i].mapping >> 32); + CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx | + BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); + for (j = 0; j < 10; j++) { + + val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL); + if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ)) + break; + udelay(5); + } + if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) { + ret = -EBUSY; + break; + } + } + return ret; +} + +static void cnic_free_irq(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + + if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { + cp->disable_int_sync(dev); + tasklet_kill(&cp->cnic_irq_task); + free_irq(ethdev->irq_arr[0].vector, dev); + } +} + +static int cnic_request_irq(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + int err; + + err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev); + if (err) + tasklet_disable(&cp->cnic_irq_task); + + return err; +} + +static int cnic_init_bnx2_irq(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + + if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { + int err, i = 0; + int sblk_num = cp->status_blk_num; + u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) + + BNX2_HC_SB_CONFIG_1; + + CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT); + + CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8); + CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220); + CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220); + + cp->last_status_idx = cp->status_blk.bnx2->status_idx; + tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix, + (unsigned long) dev); + err = cnic_request_irq(dev); + if (err) + return err; + + while (cp->status_blk.bnx2->status_completion_producer_index && + i < 10) { + CNIC_WR(dev, BNX2_HC_COALESCE_NOW, + 1 << (11 + sblk_num)); + udelay(10); + i++; + barrier(); + } + if (cp->status_blk.bnx2->status_completion_producer_index) { + cnic_free_irq(dev); + goto failed; + } + + } else { + struct status_block *sblk = cp->status_blk.gen; + u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND); + int i = 0; + + while (sblk->status_completion_producer_index && i < 10) { + CNIC_WR(dev, BNX2_HC_COMMAND, + hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); + udelay(10); + i++; + barrier(); + } + if (sblk->status_completion_producer_index) + goto failed; + + } + return 0; + +failed: + netdev_err(dev->netdev, "KCQ index not resetting to 0\n"); + return -EBUSY; +} + +static void cnic_enable_bnx2_int(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + + if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) + return; + + CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); +} + +static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + + if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) + return; + + CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | + BNX2_PCICFG_INT_ACK_CMD_MASK_INT); + CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD); + synchronize_irq(ethdev->irq_arr[0].vector); +} + +static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + struct cnic_uio_dev *udev = cp->udev; + u32 cid_addr, tx_cid, sb_id; + u32 val, offset0, offset1, offset2, offset3; + int i; + struct tx_bd *txbd; + dma_addr_t buf_map, ring_map = udev->l2_ring_map; + struct status_block *s_blk = cp->status_blk.gen; + + sb_id = cp->status_blk_num; + tx_cid = 20; + cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2; + if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { + struct status_block_msix *sblk = cp->status_blk.bnx2; + + tx_cid = TX_TSS_CID + sb_id - 1; + CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) | + (TX_TSS_CID << 7)); + cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index; + } + cp->tx_cons = *cp->tx_cons_ptr; + + cid_addr = GET_CID_ADDR(tx_cid); + if (CHIP_NUM(cp) == CHIP_NUM_5709) { + u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40; + + for (i = 0; i < PHY_CTX_SIZE; i += 4) + cnic_ctx_wr(dev, cid_addr2, i, 0); + + offset0 = BNX2_L2CTX_TYPE_XI; + offset1 = BNX2_L2CTX_CMD_TYPE_XI; + offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI; + offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI; + } else { + cnic_init_context(dev, tx_cid); + cnic_init_context(dev, tx_cid + 1); + + offset0 = BNX2_L2CTX_TYPE; + offset1 = BNX2_L2CTX_CMD_TYPE; + offset2 = BNX2_L2CTX_TBDR_BHADDR_HI; + offset3 = BNX2_L2CTX_TBDR_BHADDR_LO; + } + val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2; + cnic_ctx_wr(dev, cid_addr, offset0, val); + + val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); + cnic_ctx_wr(dev, cid_addr, offset1, val); + + txbd = udev->l2_ring; + + buf_map = udev->l2_buf_map; + for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) { + txbd->tx_bd_haddr_hi = (u64) buf_map >> 32; + txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff; + } + val = (u64) ring_map >> 32; + cnic_ctx_wr(dev, cid_addr, offset2, val); + txbd->tx_bd_haddr_hi = val; + + val = (u64) ring_map & 0xffffffff; + cnic_ctx_wr(dev, cid_addr, offset3, val); + txbd->tx_bd_haddr_lo = val; +} + +static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + struct cnic_uio_dev *udev = cp->udev; + u32 cid_addr, sb_id, val, coal_reg, coal_val; + int i; + struct rx_bd *rxbd; + struct status_block *s_blk = cp->status_blk.gen; + dma_addr_t ring_map = udev->l2_ring_map; + + sb_id = cp->status_blk_num; + cnic_init_context(dev, 2); + cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2; + coal_reg = BNX2_HC_COMMAND; + coal_val = CNIC_RD(dev, coal_reg); + if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { + struct status_block_msix *sblk = cp->status_blk.bnx2; + + cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index; + coal_reg = BNX2_HC_COALESCE_NOW; + coal_val = 1 << (11 + sb_id); + } + i = 0; + while (!(*cp->rx_cons_ptr != 0) && i < 10) { + CNIC_WR(dev, coal_reg, coal_val); + udelay(10); + i++; + barrier(); + } + cp->rx_cons = *cp->rx_cons_ptr; + + cid_addr = GET_CID_ADDR(2); + val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | + BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8); + cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val); + + if (sb_id == 0) + val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT; + else + val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id); + cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); + + rxbd = udev->l2_ring + BCM_PAGE_SIZE; + for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) { + dma_addr_t buf_map; + int n = (i % cp->l2_rx_ring_size) + 1; + + buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size); + rxbd->rx_bd_len = cp->l2_single_buf_size; + rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END; + rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32; + rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff; + } + val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32; + cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); + rxbd->rx_bd_haddr_hi = val; + + val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff; + cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); + rxbd->rx_bd_haddr_lo = val; + + val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD); + cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2)); +} + +static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev) +{ + struct kwqe *wqes[1], l2kwqe; + + memset(&l2kwqe, 0, sizeof(l2kwqe)); + wqes[0] = &l2kwqe; + l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) | + (L2_KWQE_OPCODE_VALUE_FLUSH << + KWQE_OPCODE_SHIFT) | 2; + dev->submit_kwqes(dev, wqes, 1); +} + +static void cnic_set_bnx2_mac(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + u32 val; + + val = cp->func << 2; + + cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val); + + val = cnic_reg_rd_ind(dev, cp->shmem_base + + BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER); + dev->mac_addr[0] = (u8) (val >> 8); + dev->mac_addr[1] = (u8) val; + + CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val); + + val = cnic_reg_rd_ind(dev, cp->shmem_base + + BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER); + dev->mac_addr[2] = (u8) (val >> 24); + dev->mac_addr[3] = (u8) (val >> 16); + dev->mac_addr[4] = (u8) (val >> 8); + dev->mac_addr[5] = (u8) val; + + CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val); + + val = 4 | BNX2_RPM_SORT_USER2_BC_EN; + if (CHIP_NUM(cp) != CHIP_NUM_5709) + val |= BNX2_RPM_SORT_USER2_PROM_VLAN; + + CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0); + CNIC_WR(dev, BNX2_RPM_SORT_USER2, val); + CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA); +} + +static int cnic_start_bnx2_hw(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + struct status_block *sblk = cp->status_blk.gen; + u32 val, kcq_cid_addr, kwq_cid_addr; + int err; + + cnic_set_bnx2_mac(dev); + + val = CNIC_RD(dev, BNX2_MQ_CONFIG); + val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; + if (BCM_PAGE_BITS > 12) + val |= (12 - 8) << 4; + else + val |= (BCM_PAGE_BITS - 8) << 4; + + CNIC_WR(dev, BNX2_MQ_CONFIG, val); + + CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8); + CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220); + CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220); + + err = cnic_setup_5709_context(dev, 1); + if (err) + return err; + + cnic_init_context(dev, KWQ_CID); + cnic_init_context(dev, KCQ_CID); + + kwq_cid_addr = GET_CID_ADDR(KWQ_CID); + cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX; + + cp->max_kwq_idx = MAX_KWQ_IDX; + cp->kwq_prod_idx = 0; + cp->kwq_con_idx = 0; + set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags); + + if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708) + cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15; + else + cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index; + + /* Initialize the kernel work queue context. */ + val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | + (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; + cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val); + + val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; + cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); + + val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; + cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); + + val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); + cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); + + val = (u32) cp->kwq_info.pgtbl_map; + cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); + + kcq_cid_addr = GET_CID_ADDR(KCQ_CID); + cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX; + + cp->kcq1.sw_prod_idx = 0; + cp->kcq1.hw_prod_idx_ptr = + (u16 *) &sblk->status_completion_producer_index; + + cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx; + + /* Initialize the kernel complete queue context. */ + val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | + (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; + cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val); + + val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; + cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); + + val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; + cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); + + val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32); + cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); + + val = (u32) cp->kcq1.dma.pgtbl_map; + cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); + + cp->int_num = 0; + if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { + struct status_block_msix *msblk = cp->status_blk.bnx2; + u32 sb_id = cp->status_blk_num; + u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id); + + cp->kcq1.hw_prod_idx_ptr = + (u16 *) &msblk->status_completion_producer_index; + cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx; + cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index; + cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT; + cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); + cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); + } + + /* Enable Commnad Scheduler notification when we write to the + * host producer index of the kernel contexts. */ + CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2); + + /* Enable Command Scheduler notification when we write to either + * the Send Queue or Receive Queue producer indexes of the kernel + * bypass contexts. */ + CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7); + CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7); + + /* Notify COM when the driver post an application buffer. */ + CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000); + + /* Set the CP and COM doorbells. These two processors polls the + * doorbell for a non zero value before running. This must be done + * after setting up the kernel queue contexts. */ + cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1); + cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1); + + cnic_init_bnx2_tx_ring(dev); + cnic_init_bnx2_rx_ring(dev); + + err = cnic_init_bnx2_irq(dev); + if (err) { + netdev_err(dev->netdev, "cnic_init_irq failed\n"); + cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); + cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0); + return err; + } + + return 0; +} + +static void cnic_setup_bnx2x_context(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + u32 start_offset = ethdev->ctx_tbl_offset; + int i; + + for (i = 0; i < cp->ctx_blks; i++) { + struct cnic_ctx *ctx = &cp->ctx_arr[i]; + dma_addr_t map = ctx->mapping; + + if (cp->ctx_align) { + unsigned long mask = cp->ctx_align - 1; + + map = (map + mask) & ~mask; + } + + cnic_ctx_tbl_wr(dev, start_offset + i, map); + } +} + +static int cnic_init_bnx2x_irq(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + int err = 0; + + tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh, + (unsigned long) dev); + if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) + err = cnic_request_irq(dev); + + return err; +} + +static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev, + u16 sb_id, u8 sb_index, + u8 disable) +{ + + u32 addr = BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) + + offsetof(struct hc_status_block_data_e1x, index_data) + + sizeof(struct hc_index_data)*sb_index + + offsetof(struct hc_index_data, flags); + u16 flags = CNIC_RD16(dev, addr); + /* clear and set */ + flags &= ~HC_INDEX_DATA_HC_ENABLED; + flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) & + HC_INDEX_DATA_HC_ENABLED); + CNIC_WR16(dev, addr, flags); +} + +static void cnic_enable_bnx2x_int(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + u8 sb_id = cp->status_blk_num; + + CNIC_WR8(dev, BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) + + offsetof(struct hc_status_block_data_e1x, index_data) + + sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS + + offsetof(struct hc_index_data, timeout), 64 / 4); + cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0); +} + +static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev) +{ +} + +static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev, + struct client_init_ramrod_data *data) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_uio_dev *udev = cp->udev; + union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring; + dma_addr_t buf_map, ring_map = udev->l2_ring_map; + struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; + int i; + u32 cli = cp->ethdev->iscsi_l2_client_id; + u32 val; + + memset(txbd, 0, BCM_PAGE_SIZE); + + buf_map = udev->l2_buf_map; + for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) { + struct eth_tx_start_bd *start_bd = &txbd->start_bd; + struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd); + + start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32); + start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); + reg_bd->addr_hi = start_bd->addr_hi; + reg_bd->addr_lo = start_bd->addr_lo + 0x10; + start_bd->nbytes = cpu_to_le16(0x10); + start_bd->nbd = cpu_to_le16(3); + start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; + start_bd->general_data = (UNICAST_ADDRESS << + ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT); + start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); + + } + + val = (u64) ring_map >> 32; + txbd->next_bd.addr_hi = cpu_to_le32(val); + + data->tx.tx_bd_page_base.hi = cpu_to_le32(val); + + val = (u64) ring_map & 0xffffffff; + txbd->next_bd.addr_lo = cpu_to_le32(val); + + data->tx.tx_bd_page_base.lo = cpu_to_le32(val); + + /* Other ramrod params */ + data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS; + data->tx.tx_status_block_id = BNX2X_DEF_SB_ID; + + /* reset xstorm per client statistics */ + if (cli < MAX_STAT_COUNTER_ID) { + data->general.statistics_zero_flg = 1; + data->general.statistics_en_flg = 1; + data->general.statistics_counter_id = cli; + } + + cp->tx_cons_ptr = + &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS]; +} + +static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, + struct client_init_ramrod_data *data) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_uio_dev *udev = cp->udev; + struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring + + BCM_PAGE_SIZE); + struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) + (udev->l2_ring + (2 * BCM_PAGE_SIZE)); + struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; + int i; + u32 cli = cp->ethdev->iscsi_l2_client_id; + int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); + u32 val; + dma_addr_t ring_map = udev->l2_ring_map; + + /* General data */ + data->general.client_id = cli; + data->general.activate_flg = 1; + data->general.sp_client_id = cli; + data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14); + data->general.func_id = cp->pfid; + + for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) { + dma_addr_t buf_map; + int n = (i % cp->l2_rx_ring_size) + 1; + + buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size); + rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32); + rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); + } + + val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32; + rxbd->addr_hi = cpu_to_le32(val); + data->rx.bd_page_base.hi = cpu_to_le32(val); + + val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff; + rxbd->addr_lo = cpu_to_le32(val); + data->rx.bd_page_base.lo = cpu_to_le32(val); + + rxcqe += BNX2X_MAX_RCQ_DESC_CNT; + val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32; + rxcqe->addr_hi = cpu_to_le32(val); + data->rx.cqe_page_base.hi = cpu_to_le32(val); + + val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff; + rxcqe->addr_lo = cpu_to_le32(val); + data->rx.cqe_page_base.lo = cpu_to_le32(val); + + /* Other ramrod params */ + data->rx.client_qzone_id = cl_qzone_id; + data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS; + data->rx.status_block_id = BNX2X_DEF_SB_ID; + + data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT; + + data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size); + data->rx.outer_vlan_removal_enable_flg = 1; + data->rx.silent_vlan_removal_flg = 1; + data->rx.silent_vlan_value = 0; + data->rx.silent_vlan_mask = 0xffff; + + cp->rx_cons_ptr = + &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS]; + cp->rx_cons = *cp->rx_cons_ptr; +} + +static void cnic_init_bnx2x_kcq(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + u32 pfid = cp->pfid; + + cp->kcq1.io_addr = BAR_CSTRORM_INTMEM + + CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0); + cp->kcq1.sw_prod_idx = 0; + + if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { + struct host_hc_status_block_e2 *sb = cp->status_blk.gen; + + cp->kcq1.hw_prod_idx_ptr = + &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]; + cp->kcq1.status_idx_ptr = + &sb->sb.running_index[SM_RX_ID]; + } else { + struct host_hc_status_block_e1x *sb = cp->status_blk.gen; + + cp->kcq1.hw_prod_idx_ptr = + &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]; + cp->kcq1.status_idx_ptr = + &sb->sb.running_index[SM_RX_ID]; + } + + if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { + struct host_hc_status_block_e2 *sb = cp->status_blk.gen; + + cp->kcq2.io_addr = BAR_USTRORM_INTMEM + + USTORM_FCOE_EQ_PROD_OFFSET(pfid); + cp->kcq2.sw_prod_idx = 0; + cp->kcq2.hw_prod_idx_ptr = + &sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS]; + cp->kcq2.status_idx_ptr = + &sb->sb.running_index[SM_RX_ID]; + } +} + +static int cnic_start_bnx2x_hw(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + int func = CNIC_FUNC(cp), ret; + u32 pfid; + + cp->port_mode = CHIP_PORT_MODE_NONE; + + if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { + u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR); + + if (!(val & 1)) + val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN); + else + val = (val >> 1) & 1; + + if (val) { + cp->port_mode = CHIP_4_PORT_MODE; + cp->pfid = func >> 1; + } else { + cp->port_mode = CHIP_2_PORT_MODE; + cp->pfid = func & 0x6; + } + } else { + cp->pfid = func; + } + pfid = cp->pfid; + + ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ, + cp->iscsi_start_cid, 0); + + if (ret) + return -ENOMEM; + + if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { + ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, + BNX2X_FCOE_NUM_CONNECTIONS, + cp->fcoe_start_cid, 0); + + if (ret) + return -ENOMEM; + } + + cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2; + + cnic_init_bnx2x_kcq(dev); + + /* Only 1 EQ */ + CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX); + CNIC_WR(dev, BAR_CSTRORM_INTMEM + + CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0); + CNIC_WR(dev, BAR_CSTRORM_INTMEM + + CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0), + cp->kcq1.dma.pg_map_arr[1] & 0xffffffff); + CNIC_WR(dev, BAR_CSTRORM_INTMEM + + CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4, + (u64) cp->kcq1.dma.pg_map_arr[1] >> 32); + CNIC_WR(dev, BAR_CSTRORM_INTMEM + + CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0), + cp->kcq1.dma.pg_map_arr[0] & 0xffffffff); + CNIC_WR(dev, BAR_CSTRORM_INTMEM + + CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4, + (u64) cp->kcq1.dma.pg_map_arr[0] >> 32); + CNIC_WR8(dev, BAR_CSTRORM_INTMEM + + CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1); + CNIC_WR16(dev, BAR_CSTRORM_INTMEM + + CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num); + CNIC_WR8(dev, BAR_CSTRORM_INTMEM + + CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0), + HC_INDEX_ISCSI_EQ_CONS); + + CNIC_WR(dev, BAR_USTRORM_INTMEM + + USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid), + cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff); + CNIC_WR(dev, BAR_USTRORM_INTMEM + + USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4, + (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32); + + CNIC_WR(dev, BAR_TSTRORM_INTMEM + + TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF); + + cnic_setup_bnx2x_context(dev); + + ret = cnic_init_bnx2x_irq(dev); + if (ret) + return ret; + + return 0; +} + +static void cnic_init_rings(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_uio_dev *udev = cp->udev; + + if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) + return; + + if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { + cnic_init_bnx2_tx_ring(dev); + cnic_init_bnx2_rx_ring(dev); + set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); + } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { + u32 cli = cp->ethdev->iscsi_l2_client_id; + u32 cid = cp->ethdev->iscsi_l2_cid; + u32 cl_qzone_id; + struct client_init_ramrod_data *data; + union l5cm_specific_data l5_data; + struct ustorm_eth_rx_producers rx_prods = {0}; + u32 off, i, *cid_ptr; + + rx_prods.bd_prod = 0; + rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT; + barrier(); + + cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); + + off = BAR_USTRORM_INTMEM + + (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? + USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) : + USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli)); + + for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++) + CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]); + + set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); + + data = udev->l2_buf; + cid_ptr = udev->l2_buf + 12; + + memset(data, 0, sizeof(*data)); + + cnic_init_bnx2x_tx_ring(dev, data); + cnic_init_bnx2x_rx_ring(dev, data); + + l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff; + l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32; + + set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); + + cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP, + cid, ETH_CONNECTION_TYPE, &l5_data); + + i = 0; + while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && + ++i < 10) + msleep(1); + + if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) + netdev_err(dev->netdev, + "iSCSI CLIENT_SETUP did not complete\n"); + cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1); + cnic_ring_ctl(dev, cid, cli, 1); + *cid_ptr = cid; + } +} + +static void cnic_shutdown_rings(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_uio_dev *udev = cp->udev; + void *rx_ring; + + if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) + return; + + if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { + cnic_shutdown_bnx2_rx_ring(dev); + } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { + u32 cli = cp->ethdev->iscsi_l2_client_id; + u32 cid = cp->ethdev->iscsi_l2_cid; + union l5cm_specific_data l5_data; + int i; + + cnic_ring_ctl(dev, cid, cli, 0); + + set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); + + l5_data.phy_address.lo = cli; + l5_data.phy_address.hi = 0; + cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT, + cid, ETH_CONNECTION_TYPE, &l5_data); + i = 0; + while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && + ++i < 10) + msleep(1); + + if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) + netdev_err(dev->netdev, + "iSCSI CLIENT_HALT did not complete\n"); + cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1); + + memset(&l5_data, 0, sizeof(l5_data)); + cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, + cid, NONE_CONNECTION_TYPE, &l5_data); + msleep(10); + } + clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); + rx_ring = udev->l2_ring + BCM_PAGE_SIZE; + memset(rx_ring, 0, BCM_PAGE_SIZE); +} + +static int cnic_register_netdev(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + int err; + + if (!ethdev) + return -ENODEV; + + if (ethdev->drv_state & CNIC_DRV_STATE_REGD) + return 0; + + err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev); + if (err) + netdev_err(dev->netdev, "register_cnic failed\n"); + + return err; +} + +static void cnic_unregister_netdev(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + + if (!ethdev) + return; + + ethdev->drv_unregister_cnic(dev->netdev); +} + +static int cnic_start_hw(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + int err; + + if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) + return -EALREADY; + + dev->regview = ethdev->io_base; + pci_dev_get(dev->pcidev); + cp->func = PCI_FUNC(dev->pcidev->devfn); + cp->status_blk.gen = ethdev->irq_arr[0].status_blk; + cp->status_blk_num = ethdev->irq_arr[0].status_blk_num; + + err = cp->alloc_resc(dev); + if (err) { + netdev_err(dev->netdev, "allocate resource failure\n"); + goto err1; + } + + err = cp->start_hw(dev); + if (err) + goto err1; + + err = cnic_cm_open(dev); + if (err) + goto err1; + + set_bit(CNIC_F_CNIC_UP, &dev->flags); + + cp->enable_int(dev); + + return 0; + +err1: + cp->free_resc(dev); + pci_dev_put(dev->pcidev); + return err; +} + +static void cnic_stop_bnx2_hw(struct cnic_dev *dev) +{ + cnic_disable_bnx2_int_sync(dev); + + cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); + cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0); + + cnic_init_context(dev, KWQ_CID); + cnic_init_context(dev, KCQ_CID); + + cnic_setup_5709_context(dev, 0); + cnic_free_irq(dev); + + cnic_free_resc(dev); +} + + +static void cnic_stop_bnx2x_hw(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + + cnic_free_irq(dev); + *cp->kcq1.hw_prod_idx_ptr = 0; + CNIC_WR(dev, BAR_CSTRORM_INTMEM + + CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0); + CNIC_WR16(dev, cp->kcq1.io_addr, 0); + cnic_free_resc(dev); +} + +static void cnic_stop_hw(struct cnic_dev *dev) +{ + if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { + struct cnic_local *cp = dev->cnic_priv; + int i = 0; + + /* Need to wait for the ring shutdown event to complete + * before clearing the CNIC_UP flag. + */ + while (cp->udev->uio_dev != -1 && i < 15) { + msleep(100); + i++; + } + cnic_shutdown_rings(dev); + clear_bit(CNIC_F_CNIC_UP, &dev->flags); + rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL); + synchronize_rcu(); + cnic_cm_shutdown(dev); + cp->stop_hw(dev); + pci_dev_put(dev->pcidev); + } +} + +static void cnic_free_dev(struct cnic_dev *dev) +{ + int i = 0; + + while ((atomic_read(&dev->ref_count) != 0) && i < 10) { + msleep(100); + i++; + } + if (atomic_read(&dev->ref_count) != 0) + netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n"); + + netdev_info(dev->netdev, "Removed CNIC device\n"); + dev_put(dev->netdev); + kfree(dev); +} + +static struct cnic_dev *cnic_alloc_dev(struct net_device *dev, + struct pci_dev *pdev) +{ + struct cnic_dev *cdev; + struct cnic_local *cp; + int alloc_size; + + alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local); + + cdev = kzalloc(alloc_size , GFP_KERNEL); + if (cdev == NULL) { + netdev_err(dev, "allocate dev struct failure\n"); + return NULL; + } + + cdev->netdev = dev; + cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev); + cdev->register_device = cnic_register_device; + cdev->unregister_device = cnic_unregister_device; + cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv; + + cp = cdev->cnic_priv; + cp->dev = cdev; + cp->l2_single_buf_size = 0x400; + cp->l2_rx_ring_size = 3; + + spin_lock_init(&cp->cnic_ulp_lock); + + netdev_info(dev, "Added CNIC device\n"); + + return cdev; +} + +static struct cnic_dev *init_bnx2_cnic(struct net_device *dev) +{ + struct pci_dev *pdev; + struct cnic_dev *cdev; + struct cnic_local *cp; + struct cnic_eth_dev *ethdev = NULL; + struct cnic_eth_dev *(*probe)(struct net_device *) = NULL; + + probe = symbol_get(bnx2_cnic_probe); + if (probe) { + ethdev = (*probe)(dev); + symbol_put(bnx2_cnic_probe); + } + if (!ethdev) + return NULL; + + pdev = ethdev->pdev; + if (!pdev) + return NULL; + + dev_hold(dev); + pci_dev_get(pdev); + if ((pdev->device == PCI_DEVICE_ID_NX2_5709 || + pdev->device == PCI_DEVICE_ID_NX2_5709S) && + (pdev->revision < 0x10)) { + pci_dev_put(pdev); + goto cnic_err; + } + pci_dev_put(pdev); + + cdev = cnic_alloc_dev(dev, pdev); + if (cdev == NULL) + goto cnic_err; + + set_bit(CNIC_F_BNX2_CLASS, &cdev->flags); + cdev->submit_kwqes = cnic_submit_bnx2_kwqes; + + cp = cdev->cnic_priv; + cp->ethdev = ethdev; + cdev->pcidev = pdev; + cp->chip_id = ethdev->chip_id; + + cdev->max_iscsi_conn = ethdev->max_iscsi_conn; + + cp->cnic_ops = &cnic_bnx2_ops; + cp->start_hw = cnic_start_bnx2_hw; + cp->stop_hw = cnic_stop_bnx2_hw; + cp->setup_pgtbl = cnic_setup_page_tbl; + cp->alloc_resc = cnic_alloc_bnx2_resc; + cp->free_resc = cnic_free_resc; + cp->start_cm = cnic_cm_init_bnx2_hw; + cp->stop_cm = cnic_cm_stop_bnx2_hw; + cp->enable_int = cnic_enable_bnx2_int; + cp->disable_int_sync = cnic_disable_bnx2_int_sync; + cp->close_conn = cnic_close_bnx2_conn; + return cdev; + +cnic_err: + dev_put(dev); + return NULL; +} + +static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev) +{ + struct pci_dev *pdev; + struct cnic_dev *cdev; + struct cnic_local *cp; + struct cnic_eth_dev *ethdev = NULL; + struct cnic_eth_dev *(*probe)(struct net_device *) = NULL; + + probe = symbol_get(bnx2x_cnic_probe); + if (probe) { + ethdev = (*probe)(dev); + symbol_put(bnx2x_cnic_probe); + } + if (!ethdev) + return NULL; + + pdev = ethdev->pdev; + if (!pdev) + return NULL; + + dev_hold(dev); + cdev = cnic_alloc_dev(dev, pdev); + if (cdev == NULL) { + dev_put(dev); + return NULL; + } + + set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags); + cdev->submit_kwqes = cnic_submit_bnx2x_kwqes; + + cp = cdev->cnic_priv; + cp->ethdev = ethdev; + cdev->pcidev = pdev; + cp->chip_id = ethdev->chip_id; + + if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)) + cdev->max_iscsi_conn = ethdev->max_iscsi_conn; + if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) && + !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE)) + cdev->max_fcoe_conn = ethdev->max_fcoe_conn; + + memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6); + + cp->cnic_ops = &cnic_bnx2x_ops; + cp->start_hw = cnic_start_bnx2x_hw; + cp->stop_hw = cnic_stop_bnx2x_hw; + cp->setup_pgtbl = cnic_setup_page_tbl_le; + cp->alloc_resc = cnic_alloc_bnx2x_resc; + cp->free_resc = cnic_free_resc; + cp->start_cm = cnic_cm_init_bnx2x_hw; + cp->stop_cm = cnic_cm_stop_bnx2x_hw; + cp->enable_int = cnic_enable_bnx2x_int; + cp->disable_int_sync = cnic_disable_bnx2x_int_sync; + if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) + cp->ack_int = cnic_ack_bnx2x_e2_msix; + else + cp->ack_int = cnic_ack_bnx2x_msix; + cp->close_conn = cnic_close_bnx2x_conn; + return cdev; +} + +static struct cnic_dev *is_cnic_dev(struct net_device *dev) +{ + struct ethtool_drvinfo drvinfo; + struct cnic_dev *cdev = NULL; + + if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) { + memset(&drvinfo, 0, sizeof(drvinfo)); + dev->ethtool_ops->get_drvinfo(dev, &drvinfo); + + if (!strcmp(drvinfo.driver, "bnx2")) + cdev = init_bnx2_cnic(dev); + if (!strcmp(drvinfo.driver, "bnx2x")) + cdev = init_bnx2x_cnic(dev); + if (cdev) { + write_lock(&cnic_dev_lock); + list_add(&cdev->list, &cnic_dev_list); + write_unlock(&cnic_dev_lock); + } + } + return cdev; +} + +static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event, + u16 vlan_id) +{ + int if_type; + + rcu_read_lock(); + for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { + struct cnic_ulp_ops *ulp_ops; + void *ctx; + + ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); + if (!ulp_ops || !ulp_ops->indicate_netevent) + continue; + + ctx = cp->ulp_handle[if_type]; + + ulp_ops->indicate_netevent(ctx, event, vlan_id); + } + rcu_read_unlock(); +} + +/** + * netdev event handler + */ +static int cnic_netdev_event(struct notifier_block *this, unsigned long event, + void *ptr) +{ + struct net_device *netdev = ptr; + struct cnic_dev *dev; + int new_dev = 0; + + dev = cnic_from_netdev(netdev); + + if (!dev && (event == NETDEV_REGISTER || netif_running(netdev))) { + /* Check for the hot-plug device */ + dev = is_cnic_dev(netdev); + if (dev) { + new_dev = 1; + cnic_hold(dev); + } + } + if (dev) { + struct cnic_local *cp = dev->cnic_priv; + + if (new_dev) + cnic_ulp_init(dev); + else if (event == NETDEV_UNREGISTER) + cnic_ulp_exit(dev); + + if (event == NETDEV_UP || (new_dev && netif_running(netdev))) { + if (cnic_register_netdev(dev) != 0) { + cnic_put(dev); + goto done; + } + if (!cnic_start_hw(dev)) + cnic_ulp_start(dev); + } + + cnic_rcv_netevent(cp, event, 0); + + if (event == NETDEV_GOING_DOWN) { + cnic_ulp_stop(dev); + cnic_stop_hw(dev); + cnic_unregister_netdev(dev); + } else if (event == NETDEV_UNREGISTER) { + write_lock(&cnic_dev_lock); + list_del_init(&dev->list); + write_unlock(&cnic_dev_lock); + + cnic_put(dev); + cnic_free_dev(dev); + goto done; + } + cnic_put(dev); + } else { + struct net_device *realdev; + u16 vid; + + vid = cnic_get_vlan(netdev, &realdev); + if (realdev) { + dev = cnic_from_netdev(realdev); + if (dev) { + vid |= VLAN_TAG_PRESENT; + cnic_rcv_netevent(dev->cnic_priv, event, vid); + cnic_put(dev); + } + } + } +done: + return NOTIFY_DONE; +} + +static struct notifier_block cnic_netdev_notifier = { + .notifier_call = cnic_netdev_event +}; + +static void cnic_release(void) +{ + struct cnic_dev *dev; + struct cnic_uio_dev *udev; + + while (!list_empty(&cnic_dev_list)) { + dev = list_entry(cnic_dev_list.next, struct cnic_dev, list); + if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { + cnic_ulp_stop(dev); + cnic_stop_hw(dev); + } + + cnic_ulp_exit(dev); + cnic_unregister_netdev(dev); + list_del_init(&dev->list); + cnic_free_dev(dev); + } + while (!list_empty(&cnic_udev_list)) { + udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev, + list); + cnic_free_uio(udev); + } +} + +static int __init cnic_init(void) +{ + int rc = 0; + + pr_info("%s", version); + + rc = register_netdevice_notifier(&cnic_netdev_notifier); + if (rc) { + cnic_release(); + return rc; + } + + cnic_wq = create_singlethread_workqueue("cnic_wq"); + if (!cnic_wq) { + cnic_release(); + unregister_netdevice_notifier(&cnic_netdev_notifier); + return -ENOMEM; + } + + return 0; +} + +static void __exit cnic_exit(void) +{ + unregister_netdevice_notifier(&cnic_netdev_notifier); + cnic_release(); + destroy_workqueue(cnic_wq); +} + +module_init(cnic_init); +module_exit(cnic_exit); diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h new file mode 100644 index 0000000..7a2928f --- /dev/null +++ b/drivers/net/ethernet/broadcom/cnic.h @@ -0,0 +1,478 @@ +/* cnic.h: Broadcom CNIC core network driver. + * + * Copyright (c) 2006-2011 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + */ + + +#ifndef CNIC_H +#define CNIC_H + +#define HC_INDEX_ISCSI_EQ_CONS 6 + +#define HC_INDEX_FCOE_EQ_CONS 3 + +#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5 +#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1 + +#define KWQ_PAGE_CNT 4 +#define KCQ_PAGE_CNT 16 + +#define KWQ_CID 24 +#define KCQ_CID 25 + +/* + * krnlq_context definition + */ +#define L5_KRNLQ_FLAGS 0x00000000 +#define L5_KRNLQ_SIZE 0x00000000 +#define L5_KRNLQ_TYPE 0x00000000 +#define KRNLQ_FLAGS_PG_SZ (0xf<<0) +#define KRNLQ_FLAGS_PG_SZ_256 (0<<0) +#define KRNLQ_FLAGS_PG_SZ_512 (1<<0) +#define KRNLQ_FLAGS_PG_SZ_1K (2<<0) +#define KRNLQ_FLAGS_PG_SZ_2K (3<<0) +#define KRNLQ_FLAGS_PG_SZ_4K (4<<0) +#define KRNLQ_FLAGS_PG_SZ_8K (5<<0) +#define KRNLQ_FLAGS_PG_SZ_16K (6<<0) +#define KRNLQ_FLAGS_PG_SZ_32K (7<<0) +#define KRNLQ_FLAGS_PG_SZ_64K (8<<0) +#define KRNLQ_FLAGS_PG_SZ_128K (9<<0) +#define KRNLQ_FLAGS_PG_SZ_256K (10<<0) +#define KRNLQ_FLAGS_PG_SZ_512K (11<<0) +#define KRNLQ_FLAGS_PG_SZ_1M (12<<0) +#define KRNLQ_FLAGS_PG_SZ_2M (13<<0) +#define KRNLQ_FLAGS_QE_SELF_SEQ (1<<15) +#define KRNLQ_SIZE_TYPE_SIZE ((((0x28 + 0x1f) & ~0x1f) / 0x20) << 16) +#define KRNLQ_TYPE_TYPE (0xf<<28) +#define KRNLQ_TYPE_TYPE_EMPTY (0<<28) +#define KRNLQ_TYPE_TYPE_KRNLQ (6<<28) + +#define L5_KRNLQ_HOST_QIDX 0x00000004 +#define L5_KRNLQ_HOST_FW_QIDX 0x00000008 +#define L5_KRNLQ_NX_QE_SELF_SEQ 0x0000000c +#define L5_KRNLQ_QE_SELF_SEQ_MAX 0x0000000c +#define L5_KRNLQ_NX_QE_HADDR_HI 0x00000010 +#define L5_KRNLQ_NX_QE_HADDR_LO 0x00000014 +#define L5_KRNLQ_PGTBL_PGIDX 0x00000018 +#define L5_KRNLQ_NX_PG_QIDX 0x00000018 +#define L5_KRNLQ_PGTBL_NPAGES 0x0000001c +#define L5_KRNLQ_QIDX_INCR 0x0000001c +#define L5_KRNLQ_PGTBL_HADDR_HI 0x00000020 +#define L5_KRNLQ_PGTBL_HADDR_LO 0x00000024 + +#define BNX2_PG_CTX_MAP 0x1a0034 +#define BNX2_ISCSI_CTX_MAP 0x1a0074 + +#define MAX_COMPLETED_KCQE 64 + +#define MAX_CNIC_L5_CONTEXT 256 + +#define MAX_CM_SK_TBL_SZ MAX_CNIC_L5_CONTEXT + +#define MAX_ISCSI_TBL_SZ 256 + +#define CNIC_LOCAL_PORT_MIN 60000 +#define CNIC_LOCAL_PORT_MAX 61024 +#define CNIC_LOCAL_PORT_RANGE (CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN) + +#define KWQE_CNT (BCM_PAGE_SIZE / sizeof(struct kwqe)) +#define KCQE_CNT (BCM_PAGE_SIZE / sizeof(struct kcqe)) +#define MAX_KWQE_CNT (KWQE_CNT - 1) +#define MAX_KCQE_CNT (KCQE_CNT - 1) + +#define MAX_KWQ_IDX ((KWQ_PAGE_CNT * KWQE_CNT) - 1) +#define MAX_KCQ_IDX ((KCQ_PAGE_CNT * KCQE_CNT) - 1) + +#define KWQ_PG(x) (((x) & ~MAX_KWQE_CNT) >> (BCM_PAGE_BITS - 5)) +#define KWQ_IDX(x) ((x) & MAX_KWQE_CNT) + +#define KCQ_PG(x) (((x) & ~MAX_KCQE_CNT) >> (BCM_PAGE_BITS - 5)) +#define KCQ_IDX(x) ((x) & MAX_KCQE_CNT) + +#define BNX2X_NEXT_KCQE(x) (((x) & (MAX_KCQE_CNT - 1)) == \ + (MAX_KCQE_CNT - 1)) ? \ + (x) + 2 : (x) + 1 + +#define BNX2X_KWQ_DATA_PG(cp, x) ((x) / (cp)->kwq_16_data_pp) +#define BNX2X_KWQ_DATA_IDX(cp, x) ((x) % (cp)->kwq_16_data_pp) +#define BNX2X_KWQ_DATA(cp, x) \ + &(cp)->kwq_16_data[BNX2X_KWQ_DATA_PG(cp, x)][BNX2X_KWQ_DATA_IDX(cp, x)] + +#define DEF_IPID_START 0x8000 + +#define DEF_KA_TIMEOUT 10000 +#define DEF_KA_INTERVAL 300000 +#define DEF_KA_MAX_PROBE_COUNT 3 +#define DEF_TOS 0 +#define DEF_TTL 0xfe +#define DEF_SND_SEQ_SCALE 0 +#define DEF_RCV_BUF 0xffff +#define DEF_SND_BUF 0xffff +#define DEF_SEED 0 +#define DEF_MAX_RT_TIME 500 +#define DEF_MAX_DA_COUNT 2 +#define DEF_SWS_TIMER 1000 +#define DEF_MAX_CWND 0xffff + +struct cnic_ctx { + u32 cid; + void *ctx; + dma_addr_t mapping; +}; + +#define BNX2_MAX_CID 0x2000 + +struct cnic_dma { + int num_pages; + void **pg_arr; + dma_addr_t *pg_map_arr; + int pgtbl_size; + u32 *pgtbl; + dma_addr_t pgtbl_map; +}; + +struct cnic_id_tbl { + spinlock_t lock; + u32 start; + u32 max; + u32 next; + unsigned long *table; +}; + +#define CNIC_KWQ16_DATA_SIZE 128 + +struct kwqe_16_data { + u8 data[CNIC_KWQ16_DATA_SIZE]; +}; + +struct cnic_iscsi { + struct cnic_dma task_array_info; + struct cnic_dma r2tq_info; + struct cnic_dma hq_info; +}; + +struct cnic_context { + u32 cid; + struct kwqe_16_data *kwqe_data; + dma_addr_t kwqe_data_mapping; + wait_queue_head_t waitq; + int wait_cond; + unsigned long timestamp; + unsigned long ctx_flags; +#define CTX_FL_OFFLD_START 0 +#define CTX_FL_DELETE_WAIT 1 +#define CTX_FL_CID_ERROR 2 + u8 ulp_proto_id; + union { + struct cnic_iscsi *iscsi; + } proto; +}; + +struct kcq_info { + struct cnic_dma dma; + struct kcqe **kcq; + + u16 *hw_prod_idx_ptr; + u16 sw_prod_idx; + u16 *status_idx_ptr; + u32 io_addr; + + u16 (*next_idx)(u16); + u16 (*hw_idx)(u16); +}; + +struct iro { + u32 base; + u16 m1; + u16 m2; + u16 m3; + u16 size; +}; + +struct cnic_uio_dev { + struct uio_info cnic_uinfo; + u32 uio_dev; + + int l2_ring_size; + void *l2_ring; + dma_addr_t l2_ring_map; + + int l2_buf_size; + void *l2_buf; + dma_addr_t l2_buf_map; + + struct cnic_dev *dev; + struct pci_dev *pdev; + struct list_head list; +}; + +struct cnic_local { + + spinlock_t cnic_ulp_lock; + void *ulp_handle[MAX_CNIC_ULP_TYPE]; + unsigned long ulp_flags[MAX_CNIC_ULP_TYPE]; +#define ULP_F_INIT 0 +#define ULP_F_START 1 +#define ULP_F_CALL_PENDING 2 + struct cnic_ulp_ops __rcu *ulp_ops[MAX_CNIC_ULP_TYPE]; + + unsigned long cnic_local_flags; +#define CNIC_LCL_FL_KWQ_INIT 0x0 +#define CNIC_LCL_FL_L2_WAIT 0x1 +#define CNIC_LCL_FL_RINGS_INITED 0x2 +#define CNIC_LCL_FL_STOP_ISCSI 0x4 + + struct cnic_dev *dev; + + struct cnic_eth_dev *ethdev; + + struct cnic_uio_dev *udev; + + int l2_rx_ring_size; + int l2_single_buf_size; + + u16 *rx_cons_ptr; + u16 *tx_cons_ptr; + u16 rx_cons; + u16 tx_cons; + + const struct iro *iro_arr; +#define IRO (((struct cnic_local *) dev->cnic_priv)->iro_arr) + + struct cnic_dma kwq_info; + struct kwqe **kwq; + + struct cnic_dma kwq_16_data_info; + + u16 max_kwq_idx; + + u16 kwq_prod_idx; + u32 kwq_io_addr; + + u16 *kwq_con_idx_ptr; + u16 kwq_con_idx; + + struct kcq_info kcq1; + struct kcq_info kcq2; + + union { + void *gen; + struct status_block_msix *bnx2; + struct host_hc_status_block_e1x *bnx2x_e1x; + /* index values - which counter to update */ + #define SM_RX_ID 0 + #define SM_TX_ID 1 + } status_blk; + + struct host_sp_status_block *bnx2x_def_status_blk; + + u32 status_blk_num; + u32 bnx2x_igu_sb_id; + u32 int_num; + u32 last_status_idx; + struct tasklet_struct cnic_irq_task; + + struct kcqe *completed_kcq[MAX_COMPLETED_KCQE]; + + struct cnic_sock *csk_tbl; + struct cnic_id_tbl csk_port_tbl; + + struct cnic_dma gbl_buf_info; + + struct cnic_iscsi *iscsi_tbl; + struct cnic_context *ctx_tbl; + struct cnic_id_tbl cid_tbl; + atomic_t iscsi_conn; + u32 iscsi_start_cid; + + u32 fcoe_init_cid; + u32 fcoe_start_cid; + struct cnic_id_tbl fcoe_cid_tbl; + + u32 max_cid_space; + + /* per connection parameters */ + int num_iscsi_tasks; + int num_ccells; + int task_array_size; + int r2tq_size; + int hq_size; + int num_cqs; + + struct delayed_work delete_task; + + struct cnic_ctx *ctx_arr; + int ctx_blks; + int ctx_blk_size; + unsigned long ctx_align; + int cids_per_blk; + + u32 chip_id; + int func; + u32 pfid; + u8 port_mode; +#define CHIP_4_PORT_MODE 0 +#define CHIP_2_PORT_MODE 1 +#define CHIP_PORT_MODE_NONE 2 + + u32 shmem_base; + + struct cnic_ops *cnic_ops; + int (*start_hw)(struct cnic_dev *); + void (*stop_hw)(struct cnic_dev *); + void (*setup_pgtbl)(struct cnic_dev *, + struct cnic_dma *); + int (*alloc_resc)(struct cnic_dev *); + void (*free_resc)(struct cnic_dev *); + int (*start_cm)(struct cnic_dev *); + void (*stop_cm)(struct cnic_dev *); + void (*enable_int)(struct cnic_dev *); + void (*disable_int_sync)(struct cnic_dev *); + void (*ack_int)(struct cnic_dev *); + void (*close_conn)(struct cnic_sock *, u32 opcode); +}; + +struct bnx2x_bd_chain_next { + u32 addr_lo; + u32 addr_hi; + u8 reserved[8]; +}; + +#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1) + +#define ISCSI_RAMROD_CMD_ID_UPDATE_CONN (ISCSI_KCQE_OPCODE_UPDATE_CONN) +#define ISCSI_RAMROD_CMD_ID_INIT (ISCSI_KCQE_OPCODE_INIT) + +#define CDU_REGION_NUMBER_XCM_AG 2 +#define CDU_REGION_NUMBER_UCM_AG 4 + +#define CDU_VALID_DATA(_cid, _region, _type) \ + (((_cid) << 8) | (((_region)&0xf)<<4) | (((_type)&0xf))) + +#define CDU_CRC8(_cid, _region, _type) \ + (calc_crc8(CDU_VALID_DATA(_cid, _region, _type), 0xff)) + +#define CDU_RSRVD_VALUE_TYPE_A(_cid, _region, _type) \ + (0x80 | ((CDU_CRC8(_cid, _region, _type)) & 0x7f)) + +#define BNX2X_CONTEXT_MEM_SIZE 1024 +#define BNX2X_FCOE_CID 16 + +#define BNX2X_ISCSI_START_CID 18 +#define BNX2X_ISCSI_NUM_CONNECTIONS 128 +#define BNX2X_ISCSI_TASK_CONTEXT_SIZE 128 +#define BNX2X_ISCSI_MAX_PENDING_R2TS 4 +#define BNX2X_ISCSI_R2TQE_SIZE 8 +#define BNX2X_ISCSI_HQ_BD_SIZE 64 +#define BNX2X_ISCSI_GLB_BUF_SIZE 64 +#define BNX2X_ISCSI_PBL_NOT_CACHED 0xff +#define BNX2X_ISCSI_PDU_HEADER_NOT_CACHED 0xff + +#define BNX2X_FCOE_NUM_CONNECTIONS 128 + +#define BNX2X_FCOE_L5_CID_BASE MAX_ISCSI_TBL_SZ + +#define BNX2X_CHIP_NUM_57710 0x164e +#define BNX2X_CHIP_NUM_57711 0x164f +#define BNX2X_CHIP_NUM_57711E 0x1650 +#define BNX2X_CHIP_NUM_57712 0x1662 +#define BNX2X_CHIP_NUM_57712E 0x1663 +#define BNX2X_CHIP_NUM_57713 0x1651 +#define BNX2X_CHIP_NUM_57713E 0x1652 +#define BNX2X_CHIP_NUM_57800 0x168a +#define BNX2X_CHIP_NUM_57810 0x168e +#define BNX2X_CHIP_NUM_57840 0x168d + +#define BNX2X_CHIP_NUM(x) (x >> 16) +#define BNX2X_CHIP_IS_57710(x) \ + (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57710) +#define BNX2X_CHIP_IS_57711(x) \ + (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711) +#define BNX2X_CHIP_IS_57711E(x) \ + (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711E) +#define BNX2X_CHIP_IS_E1H(x) \ + (BNX2X_CHIP_IS_57711(x) || BNX2X_CHIP_IS_57711E(x)) +#define BNX2X_CHIP_IS_57712(x) \ + (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712) +#define BNX2X_CHIP_IS_57712E(x) \ + (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712E) +#define BNX2X_CHIP_IS_57713(x) \ + (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713) +#define BNX2X_CHIP_IS_57713E(x) \ + (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713E) +#define BNX2X_CHIP_IS_57800(x) \ + (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57800) +#define BNX2X_CHIP_IS_57810(x) \ + (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57810) +#define BNX2X_CHIP_IS_57840(x) \ + (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57840) +#define BNX2X_CHIP_IS_E2(x) \ + (BNX2X_CHIP_IS_57712(x) || BNX2X_CHIP_IS_57712E(x) || \ + BNX2X_CHIP_IS_57713(x) || BNX2X_CHIP_IS_57713E(x)) +#define BNX2X_CHIP_IS_E3(x) \ + (BNX2X_CHIP_IS_57800(x) || BNX2X_CHIP_IS_57810(x) || \ + BNX2X_CHIP_IS_57840(x)) +#define BNX2X_CHIP_IS_E2_PLUS(x) (BNX2X_CHIP_IS_E2(x) || BNX2X_CHIP_IS_E3(x)) + +#define IS_E1H_OFFSET BNX2X_CHIP_IS_E1H(cp->chip_id) + +#define BNX2X_RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) +#define BNX2X_MAX_RX_DESC_CNT (BNX2X_RX_DESC_CNT - 2) +#define BNX2X_RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) +#define BNX2X_MAX_RCQ_DESC_CNT (BNX2X_RCQ_DESC_CNT - 1) + +#define BNX2X_NEXT_RCQE(x) (((x) & BNX2X_MAX_RCQ_DESC_CNT) == \ + (BNX2X_MAX_RCQ_DESC_CNT - 1)) ? \ + ((x) + 2) : ((x) + 1) + +#define BNX2X_DEF_SB_ID HC_SP_SB_ID + +#define BNX2X_SHMEM_MF_BLK_OFFSET 0x7e4 + +#define BNX2X_SHMEM_ADDR(base, field) (base + \ + offsetof(struct shmem_region, field)) + +#define BNX2X_SHMEM2_ADDR(base, field) (base + \ + offsetof(struct shmem2_region, field)) + +#define BNX2X_SHMEM2_HAS(base, field) \ + ((base) && \ + (CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base, size)) > \ + offsetof(struct shmem2_region, field))) + +#define BNX2X_MF_CFG_ADDR(base, field) \ + ((base) + offsetof(struct mf_cfg, field)) + +#ifndef ETH_MAX_RX_CLIENTS_E2 +#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H +#endif + +#define CNIC_PORT(cp) ((cp)->pfid & 1) +#define CNIC_FUNC(cp) ((cp)->func) +#define CNIC_PATH(cp) (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? \ + 0 : (CNIC_FUNC(cp) & 1)) +#define CNIC_E1HVN(cp) ((cp)->pfid >> 1) + +#define BNX2X_HW_CID(cp, x) ((CNIC_PORT(cp) << 23) | \ + (CNIC_E1HVN(cp) << 17) | (x)) + +#define BNX2X_SW_CID(x) (x & 0x1ffff) + +#define BNX2X_CL_QZONE_ID(cp, cli) \ + (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? cli : \ + cli + (CNIC_PORT(cp) * ETH_MAX_RX_CLIENTS_E1H)) + +#ifndef MAX_STAT_COUNTER_ID +#define MAX_STAT_COUNTER_ID \ + (BNX2X_CHIP_IS_E1H((cp)->chip_id) ? MAX_STAT_COUNTER_ID_E1H : \ + ((BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id)) ? MAX_STAT_COUNTER_ID_E2 :\ + MAX_STAT_COUNTER_ID_E1)) +#endif + +#endif + diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h new file mode 100644 index 0000000..e47d210 --- /dev/null +++ b/drivers/net/ethernet/broadcom/cnic_defs.h @@ -0,0 +1,5484 @@ + +/* cnic.c: Broadcom CNIC core network driver. + * + * Copyright (c) 2006-2009 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + */ + +#ifndef CNIC_DEFS_H +#define CNIC_DEFS_H + +/* KWQ (kernel work queue) request op codes */ +#define L2_KWQE_OPCODE_VALUE_FLUSH (4) +#define L2_KWQE_OPCODE_VALUE_VM_FREE_RX_QUEUE (8) + +#define L4_KWQE_OPCODE_VALUE_CONNECT1 (50) +#define L4_KWQE_OPCODE_VALUE_CONNECT2 (51) +#define L4_KWQE_OPCODE_VALUE_CONNECT3 (52) +#define L4_KWQE_OPCODE_VALUE_RESET (53) +#define L4_KWQE_OPCODE_VALUE_CLOSE (54) +#define L4_KWQE_OPCODE_VALUE_UPDATE_SECRET (60) +#define L4_KWQE_OPCODE_VALUE_INIT_ULP (61) + +#define L4_KWQE_OPCODE_VALUE_OFFLOAD_PG (1) +#define L4_KWQE_OPCODE_VALUE_UPDATE_PG (9) +#define L4_KWQE_OPCODE_VALUE_UPLOAD_PG (14) + +#define L5CM_RAMROD_CMD_ID_BASE (0x80) +#define L5CM_RAMROD_CMD_ID_TCP_CONNECT (L5CM_RAMROD_CMD_ID_BASE + 3) +#define L5CM_RAMROD_CMD_ID_CLOSE (L5CM_RAMROD_CMD_ID_BASE + 12) +#define L5CM_RAMROD_CMD_ID_ABORT (L5CM_RAMROD_CMD_ID_BASE + 13) +#define L5CM_RAMROD_CMD_ID_SEARCHER_DELETE (L5CM_RAMROD_CMD_ID_BASE + 14) +#define L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD (L5CM_RAMROD_CMD_ID_BASE + 15) + +#define FCOE_KCQE_OPCODE_INIT_FUNC (0x10) +#define FCOE_KCQE_OPCODE_DESTROY_FUNC (0x11) +#define FCOE_KCQE_OPCODE_STAT_FUNC (0x12) +#define FCOE_KCQE_OPCODE_OFFLOAD_CONN (0x15) +#define FCOE_KCQE_OPCODE_ENABLE_CONN (0x16) +#define FCOE_KCQE_OPCODE_DISABLE_CONN (0x17) +#define FCOE_KCQE_OPCODE_DESTROY_CONN (0x18) +#define FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20) +#define FCOE_KCQE_OPCODE_FCOE_ERROR (0x21) + +#define FCOE_RAMROD_CMD_ID_INIT_FUNC (FCOE_KCQE_OPCODE_INIT_FUNC) +#define FCOE_RAMROD_CMD_ID_DESTROY_FUNC (FCOE_KCQE_OPCODE_DESTROY_FUNC) +#define FCOE_RAMROD_CMD_ID_STAT_FUNC (FCOE_KCQE_OPCODE_STAT_FUNC) +#define FCOE_RAMROD_CMD_ID_OFFLOAD_CONN (FCOE_KCQE_OPCODE_OFFLOAD_CONN) +#define FCOE_RAMROD_CMD_ID_ENABLE_CONN (FCOE_KCQE_OPCODE_ENABLE_CONN) +#define FCOE_RAMROD_CMD_ID_DISABLE_CONN (FCOE_KCQE_OPCODE_DISABLE_CONN) +#define FCOE_RAMROD_CMD_ID_DESTROY_CONN (FCOE_KCQE_OPCODE_DESTROY_CONN) +#define FCOE_RAMROD_CMD_ID_TERMINATE_CONN (0x81) + +#define FCOE_KWQE_OPCODE_INIT1 (0) +#define FCOE_KWQE_OPCODE_INIT2 (1) +#define FCOE_KWQE_OPCODE_INIT3 (2) +#define FCOE_KWQE_OPCODE_OFFLOAD_CONN1 (3) +#define FCOE_KWQE_OPCODE_OFFLOAD_CONN2 (4) +#define FCOE_KWQE_OPCODE_OFFLOAD_CONN3 (5) +#define FCOE_KWQE_OPCODE_OFFLOAD_CONN4 (6) +#define FCOE_KWQE_OPCODE_ENABLE_CONN (7) +#define FCOE_KWQE_OPCODE_DISABLE_CONN (8) +#define FCOE_KWQE_OPCODE_DESTROY_CONN (9) +#define FCOE_KWQE_OPCODE_DESTROY (10) +#define FCOE_KWQE_OPCODE_STAT (11) + +#define FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x3) + +/* KCQ (kernel completion queue) response op codes */ +#define L4_KCQE_OPCODE_VALUE_CLOSE_COMP (53) +#define L4_KCQE_OPCODE_VALUE_RESET_COMP (54) +#define L4_KCQE_OPCODE_VALUE_FW_TCP_UPDATE (55) +#define L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE (56) +#define L4_KCQE_OPCODE_VALUE_RESET_RECEIVED (57) +#define L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED (58) +#define L4_KCQE_OPCODE_VALUE_INIT_ULP (61) + +#define L4_KCQE_OPCODE_VALUE_OFFLOAD_PG (1) +#define L4_KCQE_OPCODE_VALUE_UPDATE_PG (9) +#define L4_KCQE_OPCODE_VALUE_UPLOAD_PG (14) + +/* KCQ (kernel completion queue) completion status */ +#define L4_KCQE_COMPLETION_STATUS_SUCCESS (0) +#define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93) + +#define L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL (0x83) +#define L4_KCQE_COMPLETION_STATUS_OFFLOADED_PG (0x89) + +#define L4_KCQE_OPCODE_VALUE_OOO_EVENT_NOTIFICATION (0xa0) +#define L4_KCQE_OPCODE_VALUE_OOO_FLUSH (0xa1) + +#define L4_LAYER_CODE (4) +#define L2_LAYER_CODE (2) + +/* + * L4 KCQ CQE + */ +struct l4_kcq { + u32 cid; + u32 pg_cid; + u32 conn_id; + u32 pg_host_opaque; +#if defined(__BIG_ENDIAN) + u16 status; + u16 reserved1; +#elif defined(__LITTLE_ENDIAN) + u16 reserved1; + u16 status; +#endif + u32 reserved2[2]; +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KCQ_RESERVED3 (0x7<<0) +#define L4_KCQ_RESERVED3_SHIFT 0 +#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */ +#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3 +#define L4_KCQ_LAYER_CODE (0x7<<4) +#define L4_KCQ_LAYER_CODE_SHIFT 4 +#define L4_KCQ_RESERVED4 (0x1<<7) +#define L4_KCQ_RESERVED4_SHIFT 7 + u8 op_code; + u16 qe_self_seq; +#elif defined(__LITTLE_ENDIAN) + u16 qe_self_seq; + u8 op_code; + u8 flags; +#define L4_KCQ_RESERVED3 (0xF<<0) +#define L4_KCQ_RESERVED3_SHIFT 0 +#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */ +#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3 +#define L4_KCQ_LAYER_CODE (0x7<<4) +#define L4_KCQ_LAYER_CODE_SHIFT 4 +#define L4_KCQ_RESERVED4 (0x1<<7) +#define L4_KCQ_RESERVED4_SHIFT 7 +#endif +}; + + +/* + * L4 KCQ CQE PG upload + */ +struct l4_kcq_upload_pg { + u32 pg_cid; +#if defined(__BIG_ENDIAN) + u16 pg_status; + u16 pg_ipid_count; +#elif defined(__LITTLE_ENDIAN) + u16 pg_ipid_count; + u16 pg_status; +#endif + u32 reserved1[5]; +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0) +#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0 +#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4) +#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4 +#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7) +#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7 + u8 op_code; + u16 qe_self_seq; +#elif defined(__LITTLE_ENDIAN) + u16 qe_self_seq; + u8 op_code; + u8 flags; +#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0) +#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0 +#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4) +#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4 +#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7) +#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7 +#endif +}; + + +/* + * Gracefully close the connection request + */ +struct l4_kwq_close_req { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0) +#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0 +#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4) +#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7 + u8 op_code; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 op_code; + u8 flags; +#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0) +#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0 +#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4) +#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7 +#endif + u32 cid; + u32 reserved2[6]; +}; + + +/* + * The first request to be passed in order to establish connection in option2 + */ +struct l4_kwq_connect_req1 { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0) +#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0 +#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4) +#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7 + u8 op_code; + u8 reserved0; + u8 conn_flags; +#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0) +#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0 +#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1) +#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1 +#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2) +#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2 +#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3) +#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3 +#elif defined(__LITTLE_ENDIAN) + u8 conn_flags; +#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0) +#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0 +#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1) +#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1 +#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2) +#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2 +#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3) +#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3 + u8 reserved0; + u8 op_code; + u8 flags; +#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0) +#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0 +#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4) +#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7 +#endif + u32 cid; + u32 pg_cid; + u32 src_ip; + u32 dst_ip; +#if defined(__BIG_ENDIAN) + u16 dst_port; + u16 src_port; +#elif defined(__LITTLE_ENDIAN) + u16 src_port; + u16 dst_port; +#endif +#if defined(__BIG_ENDIAN) + u8 rsrv1[3]; + u8 tcp_flags; +#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0) +#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0 +#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1) +#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1 +#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2) +#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2 +#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3) +#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3 +#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4) +#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4 +#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5) +#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5 +#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6) +#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6 +#elif defined(__LITTLE_ENDIAN) + u8 tcp_flags; +#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0) +#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0 +#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1) +#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1 +#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2) +#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2 +#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3) +#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3 +#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4) +#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4 +#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5) +#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5 +#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6) +#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6 + u8 rsrv1[3]; +#endif + u32 rsrv2; +}; + + +/* + * The second ( optional )request to be passed in order to establish + * connection in option2 - for IPv6 only + */ +struct l4_kwq_connect_req2 { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0) +#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0 +#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4) +#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7 + u8 op_code; + u8 reserved0; + u8 rsrv; +#elif defined(__LITTLE_ENDIAN) + u8 rsrv; + u8 reserved0; + u8 op_code; + u8 flags; +#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0) +#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0 +#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4) +#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7 +#endif + u32 reserved2; + u32 src_ip_v6_2; + u32 src_ip_v6_3; + u32 src_ip_v6_4; + u32 dst_ip_v6_2; + u32 dst_ip_v6_3; + u32 dst_ip_v6_4; +}; + + +/* + * The third ( and last )request to be passed in order to establish + * connection in option2 + */ +struct l4_kwq_connect_req3 { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0) +#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0 +#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4) +#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7 + u8 op_code; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 op_code; + u8 flags; +#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0) +#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0 +#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4) +#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7 +#endif + u32 ka_timeout; + u32 ka_interval ; +#if defined(__BIG_ENDIAN) + u8 snd_seq_scale; + u8 ttl; + u8 tos; + u8 ka_max_probe_count; +#elif defined(__LITTLE_ENDIAN) + u8 ka_max_probe_count; + u8 tos; + u8 ttl; + u8 snd_seq_scale; +#endif +#if defined(__BIG_ENDIAN) + u16 pmtu; + u16 mss; +#elif defined(__LITTLE_ENDIAN) + u16 mss; + u16 pmtu; +#endif + u32 rcv_buf; + u32 snd_buf; + u32 seed; +}; + + +/* + * a KWQE request to offload a PG connection + */ +struct l4_kwq_offload_pg { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0) +#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0 +#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4) +#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4 +#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7 + u8 op_code; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 op_code; + u8 flags; +#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0) +#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0 +#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4) +#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4 +#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7 +#endif +#if defined(__BIG_ENDIAN) + u8 l2hdr_nbytes; + u8 pg_flags; +#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0) +#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0 +#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1) +#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1 +#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2) +#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2 + u8 da0; + u8 da1; +#elif defined(__LITTLE_ENDIAN) + u8 da1; + u8 da0; + u8 pg_flags; +#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0) +#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0 +#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1) +#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1 +#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2) +#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2 + u8 l2hdr_nbytes; +#endif +#if defined(__BIG_ENDIAN) + u8 da2; + u8 da3; + u8 da4; + u8 da5; +#elif defined(__LITTLE_ENDIAN) + u8 da5; + u8 da4; + u8 da3; + u8 da2; +#endif +#if defined(__BIG_ENDIAN) + u8 sa0; + u8 sa1; + u8 sa2; + u8 sa3; +#elif defined(__LITTLE_ENDIAN) + u8 sa3; + u8 sa2; + u8 sa1; + u8 sa0; +#endif +#if defined(__BIG_ENDIAN) + u8 sa4; + u8 sa5; + u16 etype; +#elif defined(__LITTLE_ENDIAN) + u16 etype; + u8 sa5; + u8 sa4; +#endif +#if defined(__BIG_ENDIAN) + u16 vlan_tag; + u16 ipid_start; +#elif defined(__LITTLE_ENDIAN) + u16 ipid_start; + u16 vlan_tag; +#endif +#if defined(__BIG_ENDIAN) + u16 ipid_count; + u16 reserved3; +#elif defined(__LITTLE_ENDIAN) + u16 reserved3; + u16 ipid_count; +#endif + u32 host_opaque; +}; + + +/* + * Abortively close the connection request + */ +struct l4_kwq_reset_req { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0) +#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0 +#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4) +#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4 +#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7 + u8 op_code; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 op_code; + u8 flags; +#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0) +#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0 +#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4) +#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4 +#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7 +#endif + u32 cid; + u32 reserved2[6]; +}; + + +/* + * a KWQE request to update a PG connection + */ +struct l4_kwq_update_pg { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0) +#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0 +#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4) +#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4 +#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7 + u8 opcode; + u16 oper16; +#elif defined(__LITTLE_ENDIAN) + u16 oper16; + u8 opcode; + u8 flags; +#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0) +#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0 +#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4) +#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4 +#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7 +#endif + u32 pg_cid; + u32 pg_host_opaque; +#if defined(__BIG_ENDIAN) + u8 pg_valids; +#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0) +#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0 +#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1) +#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1 +#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2) +#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2 + u8 pg_unused_a; + u16 pg_ipid_count; +#elif defined(__LITTLE_ENDIAN) + u16 pg_ipid_count; + u8 pg_unused_a; + u8 pg_valids; +#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0) +#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0 +#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1) +#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1 +#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2) +#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2 +#endif +#if defined(__BIG_ENDIAN) + u16 reserverd3; + u8 da0; + u8 da1; +#elif defined(__LITTLE_ENDIAN) + u8 da1; + u8 da0; + u16 reserverd3; +#endif +#if defined(__BIG_ENDIAN) + u8 da2; + u8 da3; + u8 da4; + u8 da5; +#elif defined(__LITTLE_ENDIAN) + u8 da5; + u8 da4; + u8 da3; + u8 da2; +#endif + u32 reserved4; + u32 reserved5; +}; + + +/* + * a KWQE request to upload a PG or L4 context + */ +struct l4_kwq_upload { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0) +#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0 +#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4) +#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4 +#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7 + u8 opcode; + u16 oper16; +#elif defined(__LITTLE_ENDIAN) + u16 oper16; + u8 opcode; + u8 flags; +#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0) +#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0 +#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4) +#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4 +#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7 +#endif + u32 cid; + u32 reserved2[6]; +}; + +/* + * bnx2x structures + */ + +/* + * The iscsi aggregative context of Cstorm + */ +struct cstorm_iscsi_ag_context { + u32 agg_vars1; +#define CSTORM_ISCSI_AG_CONTEXT_STATE (0xFF<<0) +#define CSTORM_ISCSI_AG_CONTEXT_STATE_SHIFT 0 +#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<8) +#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 8 +#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<9) +#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 9 +#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<10) +#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 10 +#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<11) +#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 11 +#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN (0x1<<12) +#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN_SHIFT 12 +#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN (0x1<<13) +#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN_SHIFT 13 +#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF (0x3<<14) +#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF_SHIFT 14 +#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66 (0x3<<16) +#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66_SHIFT 16 +#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN (0x1<<18) +#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN_SHIFT 18 +#define __CSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<19) +#define __CSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 19 +#define __CSTORM_ISCSI_AG_CONTEXT_AUX2_CF_EN (0x1<<20) +#define __CSTORM_ISCSI_AG_CONTEXT_AUX2_CF_EN_SHIFT 20 +#define __CSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<21) +#define __CSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 21 +#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN (0x1<<22) +#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN_SHIFT 22 +#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE (0x7<<23) +#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE_SHIFT 23 +#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE (0x3<<26) +#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE_SHIFT 26 +#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52 (0x3<<28) +#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52_SHIFT 28 +#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53 (0x3<<30) +#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53_SHIFT 30 +#if defined(__BIG_ENDIAN) + u8 __aux1_th; + u8 __aux1_val; + u16 __agg_vars2; +#elif defined(__LITTLE_ENDIAN) + u16 __agg_vars2; + u8 __aux1_val; + u8 __aux1_th; +#endif + u32 rel_seq; + u32 rel_seq_th; +#if defined(__BIG_ENDIAN) + u16 hq_cons; + u16 hq_prod; +#elif defined(__LITTLE_ENDIAN) + u16 hq_prod; + u16 hq_cons; +#endif +#if defined(__BIG_ENDIAN) + u8 __reserved62; + u8 __reserved61; + u8 __reserved60; + u8 __reserved59; +#elif defined(__LITTLE_ENDIAN) + u8 __reserved59; + u8 __reserved60; + u8 __reserved61; + u8 __reserved62; +#endif +#if defined(__BIG_ENDIAN) + u16 __reserved64; + u16 cq_u_prod; +#elif defined(__LITTLE_ENDIAN) + u16 cq_u_prod; + u16 __reserved64; +#endif + u32 __cq_u_prod1; +#if defined(__BIG_ENDIAN) + u16 __agg_vars3; + u16 cq_u_pend; +#elif defined(__LITTLE_ENDIAN) + u16 cq_u_pend; + u16 __agg_vars3; +#endif +#if defined(__BIG_ENDIAN) + u16 __aux2_th; + u16 aux2_val; +#elif defined(__LITTLE_ENDIAN) + u16 aux2_val; + u16 __aux2_th; +#endif +}; + +/* + * The fcoe extra aggregative context section of Tstorm + */ +struct tstorm_fcoe_extra_ag_context_section { + u32 __agg_val1; +#if defined(__BIG_ENDIAN) + u8 __tcp_agg_vars2; + u8 __agg_val3; + u16 __agg_val2; +#elif defined(__LITTLE_ENDIAN) + u16 __agg_val2; + u8 __agg_val3; + u8 __tcp_agg_vars2; +#endif +#if defined(__BIG_ENDIAN) + u16 __agg_val5; + u8 __agg_val6; + u8 __tcp_agg_vars3; +#elif defined(__LITTLE_ENDIAN) + u8 __tcp_agg_vars3; + u8 __agg_val6; + u16 __agg_val5; +#endif + u32 __lcq_prod; + u32 rtt_seq; + u32 rtt_time; + u32 __reserved66; + u32 wnd_right_edge; + u32 tcp_agg_vars1; +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 0 +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG (0x1<<1) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG_SHIFT 1 +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF (0x3<<2) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_SHIFT 2 +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF (0x3<<4) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_SHIFT 4 +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_EN (0x1<<6) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_EN_SHIFT 6 +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_EN (0x1<<7) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_EN_SHIFT 7 +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN (0x1<<8) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN_SHIFT 8 +#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LCQ_SND_EN (0x1<<9) +#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LCQ_SND_EN_SHIFT 9 +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<10) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 10 +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_FLAG (0x1<<11) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_FLAG_SHIFT 11 +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_EN (0x1<<12) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_EN_SHIFT 12 +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_EN (0x1<<13) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_EN_SHIFT 13 +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF (0x3<<14) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_SHIFT 14 +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF (0x3<<16) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_SHIFT 16 +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_BLOCKED (0x1<<18) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_BLOCKED_SHIFT 18 +#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<19) +#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 19 +#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX11_CF_EN (0x1<<20) +#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX11_CF_EN_SHIFT 20 +#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX12_CF_EN (0x1<<21) +#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX12_CF_EN_SHIFT 21 +#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED1 (0x3<<22) +#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED1_SHIFT 22 +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ (0xF<<24) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ_SHIFT 24 +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ (0xF<<28) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ_SHIFT 28 + u32 snd_max; + u32 __lcq_cons; + u32 __reserved2; +}; + +/* + * The fcoe aggregative context of Tstorm + */ +struct tstorm_fcoe_ag_context { +#if defined(__BIG_ENDIAN) + u16 ulp_credit; + u8 agg_vars1; +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 +#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF (0x3<<4) +#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_SHIFT 4 +#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG (0x1<<6) +#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG_SHIFT 6 +#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG (0x1<<7) +#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG_SHIFT 7 + u8 state; +#elif defined(__LITTLE_ENDIAN) + u8 state; + u8 agg_vars1; +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 +#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF (0x3<<4) +#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_SHIFT 4 +#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG (0x1<<6) +#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG_SHIFT 6 +#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG (0x1<<7) +#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG_SHIFT 7 + u16 ulp_credit; +#endif +#if defined(__BIG_ENDIAN) + u16 __agg_val4; + u16 agg_vars2; +#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG (0x1<<0) +#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG_SHIFT 0 +#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG (0x1<<1) +#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG_SHIFT 1 +#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF (0x3<<2) +#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF_SHIFT 2 +#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF (0x3<<4) +#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF_SHIFT 4 +#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF (0x3<<6) +#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF_SHIFT 6 +#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF (0x3<<8) +#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF_SHIFT 8 +#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG (0x1<<10) +#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG_SHIFT 10 +#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN (0x1<<11) +#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN_SHIFT 11 +#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN (0x1<<12) +#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN_SHIFT 12 +#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN (0x1<<13) +#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN_SHIFT 13 +#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN (0x1<<14) +#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN_SHIFT 14 +#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN (0x1<<15) +#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN_SHIFT 15 +#elif defined(__LITTLE_ENDIAN) + u16 agg_vars2; +#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG (0x1<<0) +#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG_SHIFT 0 +#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG (0x1<<1) +#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG_SHIFT 1 +#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF (0x3<<2) +#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF_SHIFT 2 +#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF (0x3<<4) +#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF_SHIFT 4 +#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF (0x3<<6) +#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF_SHIFT 6 +#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF (0x3<<8) +#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF_SHIFT 8 +#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG (0x1<<10) +#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG_SHIFT 10 +#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN (0x1<<11) +#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN_SHIFT 11 +#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN (0x1<<12) +#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN_SHIFT 12 +#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN (0x1<<13) +#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN_SHIFT 13 +#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN (0x1<<14) +#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN_SHIFT 14 +#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN (0x1<<15) +#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN_SHIFT 15 + u16 __agg_val4; +#endif + struct tstorm_fcoe_extra_ag_context_section __extra_section; +}; + + + +/* + * The tcp aggregative context section of Tstorm + */ +struct tstorm_tcp_tcp_ag_context_section { + u32 __agg_val1; +#if defined(__BIG_ENDIAN) + u8 __tcp_agg_vars2; + u8 __agg_val3; + u16 __agg_val2; +#elif defined(__LITTLE_ENDIAN) + u16 __agg_val2; + u8 __agg_val3; + u8 __tcp_agg_vars2; +#endif +#if defined(__BIG_ENDIAN) + u16 __agg_val5; + u8 __agg_val6; + u8 __tcp_agg_vars3; +#elif defined(__LITTLE_ENDIAN) + u8 __tcp_agg_vars3; + u8 __agg_val6; + u16 __agg_val5; +#endif + u32 snd_nxt; + u32 rtt_seq; + u32 rtt_time; + u32 __reserved66; + u32 wnd_right_edge; + u32 tcp_agg_vars1; +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 0 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG (0x1<<1) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG_SHIFT 1 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF (0x3<<2) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_SHIFT 2 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF (0x3<<4) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_SHIFT 4 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN (0x1<<6) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN_SHIFT 6 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN (0x1<<7) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN_SHIFT 7 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN (0x1<<8) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN_SHIFT 8 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN (0x1<<9) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN_SHIFT 9 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<10) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 10 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG (0x1<<11) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG_SHIFT 11 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN (0x1<<12) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN_SHIFT 12 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN (0x1<<13) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN_SHIFT 13 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF (0x3<<14) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_SHIFT 14 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF (0x3<<16) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_SHIFT 16 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED (0x1<<18) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED_SHIFT 18 +#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<19) +#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 19 +#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN (0x1<<20) +#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN_SHIFT 20 +#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN (0x1<<21) +#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN_SHIFT 21 +#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1 (0x3<<22) +#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1_SHIFT 22 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ (0xF<<24) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ_SHIFT 24 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ (0xF<<28) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ_SHIFT 28 + u32 snd_max; + u32 snd_una; + u32 __reserved2; +}; + +/* + * The iscsi aggregative context of Tstorm + */ +struct tstorm_iscsi_ag_context { +#if defined(__BIG_ENDIAN) + u16 ulp_credit; + u8 agg_vars1; +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 +#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4) +#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4 +#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6) +#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6 +#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7) +#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7 + u8 state; +#elif defined(__LITTLE_ENDIAN) + u8 state; + u8 agg_vars1; +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 +#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4) +#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4 +#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6) +#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6 +#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7) +#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7 + u16 ulp_credit; +#endif +#if defined(__BIG_ENDIAN) + u16 __agg_val4; + u16 agg_vars2; +#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0) +#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0 +#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1) +#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1 +#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2) +#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2 +#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4) +#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4 +#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6) +#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6 +#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8) +#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8 +#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10) +#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10 +#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11) +#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11 +#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12) +#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12 +#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13) +#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13 +#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14) +#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14 +#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15) +#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15 +#elif defined(__LITTLE_ENDIAN) + u16 agg_vars2; +#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0) +#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0 +#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1) +#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1 +#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2) +#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2 +#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4) +#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4 +#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6) +#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6 +#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8) +#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8 +#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10) +#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10 +#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11) +#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11 +#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12) +#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12 +#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13) +#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13 +#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14) +#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14 +#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15) +#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15 + u16 __agg_val4; +#endif + struct tstorm_tcp_tcp_ag_context_section tcp; +}; + + + +/* + * The fcoe aggregative context of Ustorm + */ +struct ustorm_fcoe_ag_context { +#if defined(__BIG_ENDIAN) + u8 __aux_counter_flags; + u8 agg_vars2; +#define USTORM_FCOE_AG_CONTEXT_TX_CF (0x3<<0) +#define USTORM_FCOE_AG_CONTEXT_TX_CF_SHIFT 0 +#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF (0x3<<2) +#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_SHIFT 2 +#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4) +#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4 +#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7) +#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7 + u8 agg_vars1; +#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) +#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 +#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) +#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 +#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) +#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 +#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) +#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 +#define USTORM_FCOE_AG_CONTEXT_INV_CF (0x3<<4) +#define USTORM_FCOE_AG_CONTEXT_INV_CF_SHIFT 4 +#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF (0x3<<6) +#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_SHIFT 6 + u8 state; +#elif defined(__LITTLE_ENDIAN) + u8 state; + u8 agg_vars1; +#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) +#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 +#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) +#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 +#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) +#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 +#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) +#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 +#define USTORM_FCOE_AG_CONTEXT_INV_CF (0x3<<4) +#define USTORM_FCOE_AG_CONTEXT_INV_CF_SHIFT 4 +#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF (0x3<<6) +#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_SHIFT 6 + u8 agg_vars2; +#define USTORM_FCOE_AG_CONTEXT_TX_CF (0x3<<0) +#define USTORM_FCOE_AG_CONTEXT_TX_CF_SHIFT 0 +#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF (0x3<<2) +#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_SHIFT 2 +#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4) +#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4 +#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7) +#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7 + u8 __aux_counter_flags; +#endif +#if defined(__BIG_ENDIAN) + u8 cdu_usage; + u8 agg_misc2; + u16 pbf_tx_seq_ack; +#elif defined(__LITTLE_ENDIAN) + u16 pbf_tx_seq_ack; + u8 agg_misc2; + u8 cdu_usage; +#endif + u32 agg_misc4; +#if defined(__BIG_ENDIAN) + u8 agg_val3_th; + u8 agg_val3; + u16 agg_misc3; +#elif defined(__LITTLE_ENDIAN) + u16 agg_misc3; + u8 agg_val3; + u8 agg_val3_th; +#endif + u32 expired_task_id; + u32 agg_misc4_th; +#if defined(__BIG_ENDIAN) + u16 cq_prod; + u16 cq_cons; +#elif defined(__LITTLE_ENDIAN) + u16 cq_cons; + u16 cq_prod; +#endif +#if defined(__BIG_ENDIAN) + u16 __reserved2; + u8 decision_rules; +#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE (0x7<<0) +#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE_SHIFT 0 +#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3) +#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3 +#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG (0x1<<6) +#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG_SHIFT 6 +#define __USTORM_FCOE_AG_CONTEXT_RESERVED1 (0x1<<7) +#define __USTORM_FCOE_AG_CONTEXT_RESERVED1_SHIFT 7 + u8 decision_rule_enable_bits; +#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN (0x1<<0) +#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN_SHIFT 0 +#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1) +#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1 +#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN (0x1<<2) +#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN_SHIFT 2 +#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN (0x1<<3) +#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN_SHIFT 3 +#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<4) +#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 4 +#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN (0x1<<5) +#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN_SHIFT 5 +#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN (0x1<<6) +#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN_SHIFT 6 +#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7) +#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7 +#elif defined(__LITTLE_ENDIAN) + u8 decision_rule_enable_bits; +#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN (0x1<<0) +#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN_SHIFT 0 +#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1) +#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1 +#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN (0x1<<2) +#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN_SHIFT 2 +#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN (0x1<<3) +#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN_SHIFT 3 +#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<4) +#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 4 +#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN (0x1<<5) +#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN_SHIFT 5 +#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN (0x1<<6) +#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN_SHIFT 6 +#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7) +#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7 + u8 decision_rules; +#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE (0x7<<0) +#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE_SHIFT 0 +#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3) +#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3 +#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG (0x1<<6) +#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG_SHIFT 6 +#define __USTORM_FCOE_AG_CONTEXT_RESERVED1 (0x1<<7) +#define __USTORM_FCOE_AG_CONTEXT_RESERVED1_SHIFT 7 + u16 __reserved2; +#endif +}; + + +/* + * The iscsi aggregative context of Ustorm + */ +struct ustorm_iscsi_ag_context { +#if defined(__BIG_ENDIAN) + u8 __aux_counter_flags; + u8 agg_vars2; +#define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0) +#define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0 +#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2) +#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2 +#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4) +#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4 +#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7) +#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7 + u8 agg_vars1; +#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) +#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 +#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) +#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 +#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) +#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 +#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) +#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 +#define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4) +#define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4 +#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6) +#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6 + u8 state; +#elif defined(__LITTLE_ENDIAN) + u8 state; + u8 agg_vars1; +#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) +#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 +#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) +#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 +#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) +#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 +#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) +#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 +#define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4) +#define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4 +#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6) +#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6 + u8 agg_vars2; +#define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0) +#define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0 +#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2) +#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2 +#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4) +#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4 +#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7) +#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7 + u8 __aux_counter_flags; +#endif +#if defined(__BIG_ENDIAN) + u8 cdu_usage; + u8 agg_misc2; + u16 __cq_local_comp_itt_val; +#elif defined(__LITTLE_ENDIAN) + u16 __cq_local_comp_itt_val; + u8 agg_misc2; + u8 cdu_usage; +#endif + u32 agg_misc4; +#if defined(__BIG_ENDIAN) + u8 agg_val3_th; + u8 agg_val3; + u16 agg_misc3; +#elif defined(__LITTLE_ENDIAN) + u16 agg_misc3; + u8 agg_val3; + u8 agg_val3_th; +#endif + u32 agg_val1; + u32 agg_misc4_th; +#if defined(__BIG_ENDIAN) + u16 agg_val2_th; + u16 agg_val2; +#elif defined(__LITTLE_ENDIAN) + u16 agg_val2; + u16 agg_val2_th; +#endif +#if defined(__BIG_ENDIAN) + u16 __reserved2; + u8 decision_rules; +#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0) +#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0 +#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3) +#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3 +#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6) +#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6 +#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7) +#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7 + u8 decision_rule_enable_bits; +#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0) +#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0 +#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1) +#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1 +#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2) +#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2 +#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3) +#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3 +#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4) +#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4 +#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5) +#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5 +#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6) +#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6 +#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7) +#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7 +#elif defined(__LITTLE_ENDIAN) + u8 decision_rule_enable_bits; +#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0) +#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0 +#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1) +#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1 +#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2) +#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2 +#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3) +#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3 +#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4) +#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4 +#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5) +#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5 +#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6) +#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6 +#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7) +#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7 + u8 decision_rules; +#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0) +#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0 +#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3) +#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3 +#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6) +#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6 +#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7) +#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7 + u16 __reserved2; +#endif +}; + + +/* + * The fcoe aggregative context section of Xstorm + */ +struct xstorm_fcoe_extra_ag_context_section { +#if defined(__BIG_ENDIAN) + u8 tcp_agg_vars1; +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51 (0x3<<0) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51_SHIFT 0 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2 +#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4) +#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN (0x1<<6) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN_SHIFT 6 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG (0x1<<7) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG_SHIFT 7 + u8 __reserved_da_cnt; + u16 __mtu; +#elif defined(__LITTLE_ENDIAN) + u16 __mtu; + u8 __reserved_da_cnt; + u8 tcp_agg_vars1; +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51 (0x3<<0) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51_SHIFT 0 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2 +#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4) +#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN (0x1<<6) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN_SHIFT 6 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG (0x1<<7) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG_SHIFT 7 +#endif + u32 snd_nxt; + u32 tx_wnd; + u32 __reserved55; + u32 local_adv_wnd; +#if defined(__BIG_ENDIAN) + u8 __agg_val8_th; + u8 __tx_dest; + u16 tcp_agg_vars2; +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57 (0x1<<0) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57_SHIFT 0 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58 (0x1<<1) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58_SHIFT 1 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59 (0x1<<2) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59_SHIFT 2 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60 (0x1<<5) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60_SHIFT 5 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN (0x1<<6) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN_SHIFT 6 +#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7) +#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN (0x1<<8) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN_SHIFT 8 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 tcp_agg_vars2; +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57 (0x1<<0) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57_SHIFT 0 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58 (0x1<<1) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58_SHIFT 1 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59 (0x1<<2) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59_SHIFT 2 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60 (0x1<<5) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60_SHIFT 5 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN (0x1<<6) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN_SHIFT 6 +#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7) +#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN (0x1<<8) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN_SHIFT 8 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14 + u8 __tx_dest; + u8 __agg_val8_th; +#endif + u32 __sq_base_addr_lo; + u32 __sq_base_addr_hi; + u32 __xfrq_base_addr_lo; + u32 __xfrq_base_addr_hi; +#if defined(__BIG_ENDIAN) + u16 __xfrq_cons; + u16 __xfrq_prod; +#elif defined(__LITTLE_ENDIAN) + u16 __xfrq_prod; + u16 __xfrq_cons; +#endif +#if defined(__BIG_ENDIAN) + u8 __tcp_agg_vars5; + u8 __tcp_agg_vars4; + u8 __tcp_agg_vars3; + u8 __reserved_force_pure_ack_cnt; +#elif defined(__LITTLE_ENDIAN) + u8 __reserved_force_pure_ack_cnt; + u8 __tcp_agg_vars3; + u8 __tcp_agg_vars4; + u8 __tcp_agg_vars5; +#endif + u32 __tcp_agg_vars6; +#if defined(__BIG_ENDIAN) + u16 __agg_misc6; + u16 __tcp_agg_vars7; +#elif defined(__LITTLE_ENDIAN) + u16 __tcp_agg_vars7; + u16 __agg_misc6; +#endif + u32 __agg_val10; + u32 __agg_val10_th; +#if defined(__BIG_ENDIAN) + u16 __reserved3; + u8 __reserved2; + u8 __da_only_cnt; +#elif defined(__LITTLE_ENDIAN) + u8 __da_only_cnt; + u8 __reserved2; + u16 __reserved3; +#endif +}; + +/* + * The fcoe aggregative context of Xstorm + */ +struct xstorm_fcoe_ag_context { +#if defined(__BIG_ENDIAN) + u16 agg_val1; + u8 agg_vars1; +#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) +#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 +#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) +#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51 (0x1<<2) +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51_SHIFT 2 +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52 (0x1<<3) +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52_SHIFT 3 +#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4) +#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4 +#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN (0x1<<5) +#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN_SHIFT 5 +#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6) +#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6 +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN (0x1<<7) +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN_SHIFT 7 + u8 __state; +#elif defined(__LITTLE_ENDIAN) + u8 __state; + u8 agg_vars1; +#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) +#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 +#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) +#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51 (0x1<<2) +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51_SHIFT 2 +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52 (0x1<<3) +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52_SHIFT 3 +#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4) +#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4 +#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN (0x1<<5) +#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN_SHIFT 5 +#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6) +#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6 +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN (0x1<<7) +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN_SHIFT 7 + u16 agg_val1; +#endif +#if defined(__BIG_ENDIAN) + u8 cdu_reserved; + u8 __agg_vars4; + u8 agg_vars3; +#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) +#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 +#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF (0x3<<6) +#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF_SHIFT 6 + u8 agg_vars2; +#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF (0x3<<0) +#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_SHIFT 0 +#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2) +#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2 +#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG (0x1<<3) +#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG_SHIFT 3 +#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG (0x1<<4) +#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG_SHIFT 4 +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1 (0x3<<5) +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1_SHIFT 5 +#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7) +#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7 +#elif defined(__LITTLE_ENDIAN) + u8 agg_vars2; +#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF (0x3<<0) +#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_SHIFT 0 +#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2) +#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2 +#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG (0x1<<3) +#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG_SHIFT 3 +#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG (0x1<<4) +#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG_SHIFT 4 +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1 (0x3<<5) +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1_SHIFT 5 +#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7) +#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7 + u8 agg_vars3; +#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) +#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 +#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF (0x3<<6) +#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF_SHIFT 6 + u8 __agg_vars4; + u8 cdu_reserved; +#endif + u32 more_to_send; +#if defined(__BIG_ENDIAN) + u16 agg_vars5; +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5 (0x3<<0) +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5_SHIFT 0 +#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2) +#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2 +#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8) +#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8 +#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE (0x3<<14) +#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE_SHIFT 14 + u16 sq_cons; +#elif defined(__LITTLE_ENDIAN) + u16 sq_cons; + u16 agg_vars5; +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5 (0x3<<0) +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5_SHIFT 0 +#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2) +#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2 +#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8) +#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8 +#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE (0x3<<14) +#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE_SHIFT 14 +#endif + struct xstorm_fcoe_extra_ag_context_section __extra_section; +#if defined(__BIG_ENDIAN) + u16 agg_vars7; +#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0) +#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 +#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG (0x1<<3) +#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG_SHIFT 3 +#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF (0x3<<4) +#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF_SHIFT 4 +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3 (0x3<<6) +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3_SHIFT 6 +#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF (0x3<<8) +#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF_SHIFT 8 +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62 (0x1<<10) +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62_SHIFT 10 +#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<11) +#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 11 +#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG (0x1<<12) +#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG_SHIFT 12 +#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG (0x1<<13) +#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG_SHIFT 13 +#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG (0x1<<14) +#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG_SHIFT 14 +#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG (0x1<<15) +#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG_SHIFT 15 + u8 agg_val3_th; + u8 agg_vars6; +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6 (0x7<<0) +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6_SHIFT 0 +#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE (0x7<<3) +#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE_SHIFT 3 +#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE (0x3<<6) +#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE_SHIFT 6 +#elif defined(__LITTLE_ENDIAN) + u8 agg_vars6; +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6 (0x7<<0) +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6_SHIFT 0 +#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE (0x7<<3) +#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE_SHIFT 3 +#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE (0x3<<6) +#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE_SHIFT 6 + u8 agg_val3_th; + u16 agg_vars7; +#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0) +#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 +#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG (0x1<<3) +#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG_SHIFT 3 +#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF (0x3<<4) +#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF_SHIFT 4 +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3 (0x3<<6) +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3_SHIFT 6 +#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF (0x3<<8) +#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF_SHIFT 8 +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62 (0x1<<10) +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62_SHIFT 10 +#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<11) +#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 11 +#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG (0x1<<12) +#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG_SHIFT 12 +#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG (0x1<<13) +#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG_SHIFT 13 +#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG (0x1<<14) +#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG_SHIFT 14 +#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG (0x1<<15) +#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG_SHIFT 15 +#endif +#if defined(__BIG_ENDIAN) + u16 __agg_val11_th; + u16 __agg_val11; +#elif defined(__LITTLE_ENDIAN) + u16 __agg_val11; + u16 __agg_val11_th; +#endif +#if defined(__BIG_ENDIAN) + u8 __reserved1; + u8 __agg_val6_th; + u16 __agg_val9; +#elif defined(__LITTLE_ENDIAN) + u16 __agg_val9; + u8 __agg_val6_th; + u8 __reserved1; +#endif +#if defined(__BIG_ENDIAN) + u16 confq_cons; + u16 confq_prod; +#elif defined(__LITTLE_ENDIAN) + u16 confq_prod; + u16 confq_cons; +#endif + u32 agg_vars8; +#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0) +#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC2_SHIFT 0 +#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC3 (0xFF<<24) +#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC3_SHIFT 24 +#if defined(__BIG_ENDIAN) + u16 agg_misc0; + u16 sq_prod; +#elif defined(__LITTLE_ENDIAN) + u16 sq_prod; + u16 agg_misc0; +#endif +#if defined(__BIG_ENDIAN) + u8 agg_val3; + u8 agg_val6; + u8 agg_val5_th; + u8 agg_val5; +#elif defined(__LITTLE_ENDIAN) + u8 agg_val5; + u8 agg_val5_th; + u8 agg_val6; + u8 agg_val3; +#endif +#if defined(__BIG_ENDIAN) + u16 __agg_misc1; + u16 agg_limit1; +#elif defined(__LITTLE_ENDIAN) + u16 agg_limit1; + u16 __agg_misc1; +#endif + u32 completion_seq; + u32 confq_pbl_base_lo; + u32 confq_pbl_base_hi; +}; + + + +/* + * The tcp aggregative context section of Xstorm + */ +struct xstorm_tcp_tcp_ag_context_section { +#if defined(__BIG_ENDIAN) + u8 tcp_agg_vars1; +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF (0x3<<0) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF_SHIFT 0 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2 +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4) +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN (0x1<<6) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN_SHIFT 6 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG (0x1<<7) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG_SHIFT 7 + u8 __da_cnt; + u16 mss; +#elif defined(__LITTLE_ENDIAN) + u16 mss; + u8 __da_cnt; + u8 tcp_agg_vars1; +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF (0x3<<0) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF_SHIFT 0 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2 +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4) +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN (0x1<<6) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN_SHIFT 6 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG (0x1<<7) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG_SHIFT 7 +#endif + u32 snd_nxt; + u32 tx_wnd; + u32 snd_una; + u32 local_adv_wnd; +#if defined(__BIG_ENDIAN) + u8 __agg_val8_th; + u8 __tx_dest; + u16 tcp_agg_vars2; +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG (0x1<<0) +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_SHIFT 0 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED (0x1<<1) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED_SHIFT 1 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE (0x1<<2) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE_SHIFT 2 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4 +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE (0x1<<5) +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE_SHIFT 5 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN (0x1<<6) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN_SHIFT 6 +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7) +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN (0x1<<8) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN_SHIFT 8 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 tcp_agg_vars2; +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG (0x1<<0) +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_SHIFT 0 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED (0x1<<1) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED_SHIFT 1 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE (0x1<<2) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE_SHIFT 2 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4 +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE (0x1<<5) +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE_SHIFT 5 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN (0x1<<6) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN_SHIFT 6 +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7) +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN (0x1<<8) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN_SHIFT 8 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14 + u8 __tx_dest; + u8 __agg_val8_th; +#endif + u32 ack_to_far_end; + u32 rto_timer; + u32 ka_timer; + u32 ts_to_echo; +#if defined(__BIG_ENDIAN) + u16 __agg_val7_th; + u16 __agg_val7; +#elif defined(__LITTLE_ENDIAN) + u16 __agg_val7; + u16 __agg_val7_th; +#endif +#if defined(__BIG_ENDIAN) + u8 __tcp_agg_vars5; + u8 __tcp_agg_vars4; + u8 __tcp_agg_vars3; + u8 __force_pure_ack_cnt; +#elif defined(__LITTLE_ENDIAN) + u8 __force_pure_ack_cnt; + u8 __tcp_agg_vars3; + u8 __tcp_agg_vars4; + u8 __tcp_agg_vars5; +#endif + u32 tcp_agg_vars6; +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_CF_EN (0x1<<0) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_CF_EN_SHIFT 0 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_EN (0x1<<1) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_EN_SHIFT 1 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_EN (0x1<<2) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_EN_SHIFT 2 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<3) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 3 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX6_FLAG (0x1<<4) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX6_FLAG_SHIFT 4 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX7_FLAG (0x1<<5) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX7_FLAG_SHIFT 5 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX5_CF (0x3<<6) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX5_CF_SHIFT 6 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF (0x3<<8) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_SHIFT 8 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF (0x3<<10) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_SHIFT 10 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF (0x3<<12) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_SHIFT 12 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF (0x3<<14) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_SHIFT 14 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX13_CF (0x3<<16) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX13_CF_SHIFT 16 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX14_CF (0x3<<18) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX14_CF_SHIFT 18 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX15_CF (0x3<<20) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX15_CF_SHIFT 20 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX16_CF (0x3<<22) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX16_CF_SHIFT 22 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX17_CF (0x3<<24) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX17_CF_SHIFT 24 +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ECE_FLAG (0x1<<26) +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ECE_FLAG_SHIFT 26 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED71 (0x1<<27) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED71_SHIFT 27 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_FORCE_PURE_ACK_CNT_DIRTY (0x1<<28) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_FORCE_PURE_ACK_CNT_DIRTY_SHIFT 28 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TCP_AUTO_STOP_FLAG (0x1<<29) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TCP_AUTO_STOP_FLAG_SHIFT 29 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DO_TS_UPDATE_FLAG (0x1<<30) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DO_TS_UPDATE_FLAG_SHIFT 30 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CANCEL_RETRANSMIT_FLAG (0x1<<31) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CANCEL_RETRANSMIT_FLAG_SHIFT 31 +#if defined(__BIG_ENDIAN) + u16 __agg_misc6; + u16 __tcp_agg_vars7; +#elif defined(__LITTLE_ENDIAN) + u16 __tcp_agg_vars7; + u16 __agg_misc6; +#endif + u32 __agg_val10; + u32 __agg_val10_th; +#if defined(__BIG_ENDIAN) + u16 __reserved3; + u8 __reserved2; + u8 __da_only_cnt; +#elif defined(__LITTLE_ENDIAN) + u8 __da_only_cnt; + u8 __reserved2; + u16 __reserved3; +#endif +}; + +/* + * The iscsi aggregative context of Xstorm + */ +struct xstorm_iscsi_ag_context { +#if defined(__BIG_ENDIAN) + u16 agg_val1; + u8 agg_vars1; +#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) +#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 +#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) +#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 +#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) +#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 +#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) +#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 +#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4) +#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4 +#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN (0x1<<5) +#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN_SHIFT 5 +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6) +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6 +#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7) +#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7 + u8 state; +#elif defined(__LITTLE_ENDIAN) + u8 state; + u8 agg_vars1; +#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) +#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 +#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) +#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 +#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) +#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 +#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) +#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 +#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4) +#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4 +#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN (0x1<<5) +#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN_SHIFT 5 +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6) +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6 +#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7) +#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7 + u16 agg_val1; +#endif +#if defined(__BIG_ENDIAN) + u8 cdu_reserved; + u8 __agg_vars4; + u8 agg_vars3; +#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) +#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 +#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF (0x3<<6) +#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6 + u8 agg_vars2; +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0) +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0 +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2) +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2 +#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG (0x1<<3) +#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG_SHIFT 3 +#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG (0x1<<4) +#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG_SHIFT 4 +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1 (0x3<<5) +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1_SHIFT 5 +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7) +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7 +#elif defined(__LITTLE_ENDIAN) + u8 agg_vars2; +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0) +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0 +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2) +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2 +#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG (0x1<<3) +#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG_SHIFT 3 +#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG (0x1<<4) +#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG_SHIFT 4 +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1 (0x3<<5) +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1_SHIFT 5 +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7) +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7 + u8 agg_vars3; +#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) +#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 +#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF (0x3<<6) +#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6 + u8 __agg_vars4; + u8 cdu_reserved; +#endif + u32 more_to_send; +#if defined(__BIG_ENDIAN) + u16 agg_vars5; +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5 (0x3<<0) +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5_SHIFT 0 +#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2) +#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2 +#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8) +#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8 +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2 (0x3<<14) +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2_SHIFT 14 + u16 sq_cons; +#elif defined(__LITTLE_ENDIAN) + u16 sq_cons; + u16 agg_vars5; +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5 (0x3<<0) +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5_SHIFT 0 +#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2) +#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2 +#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8) +#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8 +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2 (0x3<<14) +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2_SHIFT 14 +#endif + struct xstorm_tcp_tcp_ag_context_section tcp; +#if defined(__BIG_ENDIAN) + u16 agg_vars7; +#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0) +#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 +#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3) +#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3 +#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4) +#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4 +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6) +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6 +#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8) +#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_SHIFT 8 +#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10) +#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10 +#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<11) +#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 11 +#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG (0x1<<12) +#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG_SHIFT 12 +#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG (0x1<<13) +#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13 +#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14) +#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14 +#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15) +#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15 + u8 agg_val3_th; + u8 agg_vars6; +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0) +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6_SHIFT 0 +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7 (0x7<<3) +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7_SHIFT 3 +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4 (0x3<<6) +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4_SHIFT 6 +#elif defined(__LITTLE_ENDIAN) + u8 agg_vars6; +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0) +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6_SHIFT 0 +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7 (0x7<<3) +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7_SHIFT 3 +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4 (0x3<<6) +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4_SHIFT 6 + u8 agg_val3_th; + u16 agg_vars7; +#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0) +#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 +#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3) +#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3 +#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4) +#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4 +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6) +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6 +#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8) +#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_SHIFT 8 +#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10) +#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10 +#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<11) +#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 11 +#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG (0x1<<12) +#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG_SHIFT 12 +#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG (0x1<<13) +#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13 +#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14) +#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14 +#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15) +#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15 +#endif +#if defined(__BIG_ENDIAN) + u16 __agg_val11_th; + u16 __gen_data; +#elif defined(__LITTLE_ENDIAN) + u16 __gen_data; + u16 __agg_val11_th; +#endif +#if defined(__BIG_ENDIAN) + u8 __reserved1; + u8 __agg_val6_th; + u16 __agg_val9; +#elif defined(__LITTLE_ENDIAN) + u16 __agg_val9; + u8 __agg_val6_th; + u8 __reserved1; +#endif +#if defined(__BIG_ENDIAN) + u16 hq_prod; + u16 hq_cons; +#elif defined(__LITTLE_ENDIAN) + u16 hq_cons; + u16 hq_prod; +#endif + u32 agg_vars8; +#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0) +#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC2_SHIFT 0 +#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC3 (0xFF<<24) +#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC3_SHIFT 24 +#if defined(__BIG_ENDIAN) + u16 r2tq_prod; + u16 sq_prod; +#elif defined(__LITTLE_ENDIAN) + u16 sq_prod; + u16 r2tq_prod; +#endif +#if defined(__BIG_ENDIAN) + u8 agg_val3; + u8 agg_val6; + u8 agg_val5_th; + u8 agg_val5; +#elif defined(__LITTLE_ENDIAN) + u8 agg_val5; + u8 agg_val5_th; + u8 agg_val6; + u8 agg_val3; +#endif +#if defined(__BIG_ENDIAN) + u16 __agg_misc1; + u16 agg_limit1; +#elif defined(__LITTLE_ENDIAN) + u16 agg_limit1; + u16 __agg_misc1; +#endif + u32 hq_cons_tcp_seq; + u32 exp_stat_sn; + u32 rst_seq_num; +}; + + +/* + * The L5cm aggregative context of XStorm + */ +struct xstorm_l5cm_ag_context { +#if defined(__BIG_ENDIAN) + u16 agg_val1; + u8 agg_vars1; +#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) +#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 +#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) +#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 +#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) +#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 +#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) +#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 +#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4) +#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4 +#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN (0x1<<5) +#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN_SHIFT 5 +#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6) +#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6 +#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7) +#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7 + u8 state; +#elif defined(__LITTLE_ENDIAN) + u8 state; + u8 agg_vars1; +#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) +#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 +#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) +#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 +#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) +#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 +#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) +#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 +#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4) +#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4 +#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN (0x1<<5) +#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN_SHIFT 5 +#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6) +#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6 +#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7) +#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7 + u16 agg_val1; +#endif +#if defined(__BIG_ENDIAN) + u8 cdu_reserved; + u8 __agg_vars4; + u8 agg_vars3; +#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) +#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 +#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF (0x3<<6) +#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6 + u8 agg_vars2; +#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF (0x3<<0) +#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_SHIFT 0 +#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2) +#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2 +#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG (0x1<<3) +#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG_SHIFT 3 +#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG (0x1<<4) +#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG_SHIFT 4 +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1 (0x3<<5) +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1_SHIFT 5 +#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN (0x1<<7) +#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN_SHIFT 7 +#elif defined(__LITTLE_ENDIAN) + u8 agg_vars2; +#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF (0x3<<0) +#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_SHIFT 0 +#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2) +#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2 +#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG (0x1<<3) +#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG_SHIFT 3 +#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG (0x1<<4) +#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG_SHIFT 4 +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1 (0x3<<5) +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1_SHIFT 5 +#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN (0x1<<7) +#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN_SHIFT 7 + u8 agg_vars3; +#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) +#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 +#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF (0x3<<6) +#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6 + u8 __agg_vars4; + u8 cdu_reserved; +#endif + u32 more_to_send; +#if defined(__BIG_ENDIAN) + u16 agg_vars5; +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5 (0x3<<0) +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5_SHIFT 0 +#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2) +#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2 +#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8) +#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8 +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2 (0x3<<14) +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2_SHIFT 14 + u16 agg_val4_th; +#elif defined(__LITTLE_ENDIAN) + u16 agg_val4_th; + u16 agg_vars5; +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5 (0x3<<0) +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5_SHIFT 0 +#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2) +#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2 +#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8) +#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8 +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2 (0x3<<14) +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2_SHIFT 14 +#endif + struct xstorm_tcp_tcp_ag_context_section tcp; +#if defined(__BIG_ENDIAN) + u16 agg_vars7; +#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0) +#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 +#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG (0x1<<3) +#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG_SHIFT 3 +#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4) +#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4 +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3 (0x3<<6) +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3_SHIFT 6 +#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF (0x3<<8) +#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF_SHIFT 8 +#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10) +#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10 +#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN (0x1<<11) +#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN_SHIFT 11 +#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG (0x1<<12) +#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG_SHIFT 12 +#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG (0x1<<13) +#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG_SHIFT 13 +#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG (0x1<<14) +#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG_SHIFT 14 +#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15) +#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15 + u8 agg_val3_th; + u8 agg_vars6; +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6 (0x7<<0) +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6_SHIFT 0 +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7 (0x7<<3) +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7_SHIFT 3 +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4 (0x3<<6) +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4_SHIFT 6 +#elif defined(__LITTLE_ENDIAN) + u8 agg_vars6; +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6 (0x7<<0) +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6_SHIFT 0 +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7 (0x7<<3) +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7_SHIFT 3 +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4 (0x3<<6) +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4_SHIFT 6 + u8 agg_val3_th; + u16 agg_vars7; +#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0) +#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 +#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG (0x1<<3) +#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG_SHIFT 3 +#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4) +#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4 +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3 (0x3<<6) +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3_SHIFT 6 +#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF (0x3<<8) +#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF_SHIFT 8 +#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10) +#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10 +#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN (0x1<<11) +#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN_SHIFT 11 +#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG (0x1<<12) +#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG_SHIFT 12 +#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG (0x1<<13) +#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG_SHIFT 13 +#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG (0x1<<14) +#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG_SHIFT 14 +#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15) +#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15 +#endif +#if defined(__BIG_ENDIAN) + u16 __agg_val11_th; + u16 __gen_data; +#elif defined(__LITTLE_ENDIAN) + u16 __gen_data; + u16 __agg_val11_th; +#endif +#if defined(__BIG_ENDIAN) + u8 __reserved1; + u8 __agg_val6_th; + u16 __agg_val9; +#elif defined(__LITTLE_ENDIAN) + u16 __agg_val9; + u8 __agg_val6_th; + u8 __reserved1; +#endif +#if defined(__BIG_ENDIAN) + u16 agg_val2_th; + u16 agg_val2; +#elif defined(__LITTLE_ENDIAN) + u16 agg_val2; + u16 agg_val2_th; +#endif + u32 agg_vars8; +#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0) +#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC2_SHIFT 0 +#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC3 (0xFF<<24) +#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC3_SHIFT 24 +#if defined(__BIG_ENDIAN) + u16 agg_misc0; + u16 agg_val4; +#elif defined(__LITTLE_ENDIAN) + u16 agg_val4; + u16 agg_misc0; +#endif +#if defined(__BIG_ENDIAN) + u8 agg_val3; + u8 agg_val6; + u8 agg_val5_th; + u8 agg_val5; +#elif defined(__LITTLE_ENDIAN) + u8 agg_val5; + u8 agg_val5_th; + u8 agg_val6; + u8 agg_val3; +#endif +#if defined(__BIG_ENDIAN) + u16 __agg_misc1; + u16 agg_limit1; +#elif defined(__LITTLE_ENDIAN) + u16 agg_limit1; + u16 __agg_misc1; +#endif + u32 completion_seq; + u32 agg_misc4; + u32 rst_seq_num; +}; + +/* + * ABTS info $$KEEP_ENDIANNESS$$ + */ +struct fcoe_abts_info { + __le16 aborted_task_id; + __le16 reserved0; + __le32 reserved1; +}; + + +/* + * Fixed size structure in order to plant it in Union structure + * $$KEEP_ENDIANNESS$$ + */ +struct fcoe_abts_rsp_union { + u8 r_ctl; + u8 rsrv[3]; + __le32 abts_rsp_payload[7]; +}; + + +/* + * 4 regs size $$KEEP_ENDIANNESS$$ + */ +struct fcoe_bd_ctx { + __le32 buf_addr_hi; + __le32 buf_addr_lo; + __le16 buf_len; + __le16 rsrv0; + __le16 flags; + __le16 rsrv1; +}; + + +/* + * FCoE cached sges context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_cached_sge_ctx { + struct regpair cur_buf_addr; + __le16 cur_buf_rem; + __le16 second_buf_rem; + struct regpair second_buf_addr; +}; + + +/* + * Cleanup info $$KEEP_ENDIANNESS$$ + */ +struct fcoe_cleanup_info { + __le16 cleaned_task_id; + __le16 rolled_tx_seq_cnt; + __le32 rolled_tx_data_offset; +}; + + +/* + * Fcp RSP flags $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fcp_rsp_flags { + u8 flags; +#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0) +#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0 +#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1) +#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1 +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2) +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2 +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3) +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3 +#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ (0x1<<4) +#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4 +#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS (0x7<<5) +#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5 +}; + +/* + * Fcp RSP payload $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fcp_rsp_payload { + struct regpair reserved0; + __le32 fcp_resid; + u8 scsi_status_code; + struct fcoe_fcp_rsp_flags fcp_flags; + __le16 retry_delay_timer; + __le32 fcp_rsp_len; + __le32 fcp_sns_len; +}; + +/* + * Fixed size structure in order to plant it in Union structure + * $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fcp_rsp_union { + struct fcoe_fcp_rsp_payload payload; + struct regpair reserved0; +}; + +/* + * FC header $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fc_hdr { + u8 s_id[3]; + u8 cs_ctl; + u8 d_id[3]; + u8 r_ctl; + __le16 seq_cnt; + u8 df_ctl; + u8 seq_id; + u8 f_ctl[3]; + u8 type; + __le32 parameters; + __le16 rx_id; + __le16 ox_id; +}; + +/* + * FC header union $$KEEP_ENDIANNESS$$ + */ +struct fcoe_mp_rsp_union { + struct fcoe_fc_hdr fc_hdr; + __le32 mp_payload_len; + __le32 rsrv; +}; + +/* + * Completion information $$KEEP_ENDIANNESS$$ + */ +union fcoe_comp_flow_info { + struct fcoe_fcp_rsp_union fcp_rsp; + struct fcoe_abts_rsp_union abts_rsp; + struct fcoe_mp_rsp_union mp_rsp; + __le32 opaque[8]; +}; + + +/* + * External ABTS info $$KEEP_ENDIANNESS$$ + */ +struct fcoe_ext_abts_info { + __le32 rsrv0[6]; + struct fcoe_abts_info ctx; +}; + + +/* + * External cleanup info $$KEEP_ENDIANNESS$$ + */ +struct fcoe_ext_cleanup_info { + __le32 rsrv0[6]; + struct fcoe_cleanup_info ctx; +}; + + +/* + * Fcoe FW Tx sequence context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fw_tx_seq_ctx { + __le32 data_offset; + __le16 seq_cnt; + __le16 rsrv0; +}; + +/* + * Fcoe external FW Tx sequence context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_ext_fw_tx_seq_ctx { + __le32 rsrv0[6]; + struct fcoe_fw_tx_seq_ctx ctx; +}; + + +/* + * FCoE multiple sges context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_mul_sges_ctx { + struct regpair cur_sge_addr; + __le16 cur_sge_off; + u8 cur_sge_idx; + u8 sgl_size; +}; + +/* + * FCoE external multiple sges context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_ext_mul_sges_ctx { + struct fcoe_mul_sges_ctx mul_sgl; + struct regpair rsrv0; +}; + + +/* + * FCP CMD payload $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fcp_cmd_payload { + __le32 opaque[8]; +}; + + + + + +/* + * Fcp xfr rdy payload $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fcp_xfr_rdy_payload { + __le32 burst_len; + __le32 data_ro; +}; + + +/* + * FC frame $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fc_frame { + struct fcoe_fc_hdr fc_hdr; + __le32 reserved0[2]; +}; + + + + +/* + * FCoE KCQ CQE parameters $$KEEP_ENDIANNESS$$ + */ +union fcoe_kcqe_params { + __le32 reserved0[4]; +}; + +/* + * FCoE KCQ CQE $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kcqe { + __le32 fcoe_conn_id; + __le32 completion_status; + __le32 fcoe_conn_context_id; + union fcoe_kcqe_params params; + __le16 qe_self_seq; + u8 op_code; + u8 flags; +#define FCOE_KCQE_RESERVED0 (0x7<<0) +#define FCOE_KCQE_RESERVED0_SHIFT 0 +#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3) +#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3 +#define FCOE_KCQE_LAYER_CODE (0x7<<4) +#define FCOE_KCQE_LAYER_CODE_SHIFT 4 +#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7) +#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7 +}; + + + +/* + * FCoE KWQE header $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_header { + u8 op_code; + u8 flags; +#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0) +#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0 +#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4) +#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4 +#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7) +#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7 +}; + +/* + * FCoE firmware init request 1 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_init1 { + __le16 num_tasks; + struct fcoe_kwqe_header hdr; + __le32 task_list_pbl_addr_lo; + __le32 task_list_pbl_addr_hi; + __le32 dummy_buffer_addr_lo; + __le32 dummy_buffer_addr_hi; + __le16 sq_num_wqes; + __le16 rq_num_wqes; + __le16 rq_buffer_log_size; + __le16 cq_num_wqes; + __le16 mtu; + u8 num_sessions_log; + u8 flags; +#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0) +#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0 +#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4) +#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4 +#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7) +#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7 +}; + +/* + * FCoE firmware init request 2 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_init2 { + u8 hsi_major_version; + u8 hsi_minor_version; + struct fcoe_kwqe_header hdr; + __le32 hash_tbl_pbl_addr_lo; + __le32 hash_tbl_pbl_addr_hi; + __le32 t2_hash_tbl_addr_lo; + __le32 t2_hash_tbl_addr_hi; + __le32 t2_ptr_hash_tbl_addr_lo; + __le32 t2_ptr_hash_tbl_addr_hi; + __le32 free_list_count; +}; + +/* + * FCoE firmware init request 3 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_init3 { + __le16 reserved0; + struct fcoe_kwqe_header hdr; + __le32 error_bit_map_lo; + __le32 error_bit_map_hi; + u8 perf_config; + u8 reserved21[3]; + __le32 reserved2[4]; +}; + +/* + * FCoE connection offload request 1 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_conn_offload1 { + __le16 fcoe_conn_id; + struct fcoe_kwqe_header hdr; + __le32 sq_addr_lo; + __le32 sq_addr_hi; + __le32 rq_pbl_addr_lo; + __le32 rq_pbl_addr_hi; + __le32 rq_first_pbe_addr_lo; + __le32 rq_first_pbe_addr_hi; + __le16 rq_prod; + __le16 reserved0; +}; + +/* + * FCoE connection offload request 2 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_conn_offload2 { + __le16 tx_max_fc_pay_len; + struct fcoe_kwqe_header hdr; + __le32 cq_addr_lo; + __le32 cq_addr_hi; + __le32 xferq_addr_lo; + __le32 xferq_addr_hi; + __le32 conn_db_addr_lo; + __le32 conn_db_addr_hi; + __le32 reserved1; +}; + +/* + * FCoE connection offload request 3 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_conn_offload3 { + __le16 vlan_tag; +#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0) +#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0 +#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12) +#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12 +#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13) +#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13 + struct fcoe_kwqe_header hdr; + u8 s_id[3]; + u8 tx_max_conc_seqs_c3; + u8 d_id[3]; + u8 flags; +#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0) +#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0 +#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1) +#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1 +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2) +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2 +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3) +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3 +#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4) +#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4 +#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5) +#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5 +#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6) +#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6 +#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7) +#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7 + __le32 reserved; + __le32 confq_first_pbe_addr_lo; + __le32 confq_first_pbe_addr_hi; + __le16 tx_total_conc_seqs; + __le16 rx_max_fc_pay_len; + __le16 rx_total_conc_seqs; + u8 rx_max_conc_seqs_c3; + u8 rx_open_seqs_exch_c3; +}; + +/* + * FCoE connection offload request 4 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_conn_offload4 { + u8 e_d_tov_timer_val; + u8 reserved2; + struct fcoe_kwqe_header hdr; + u8 src_mac_addr_lo[2]; + u8 src_mac_addr_mid[2]; + u8 src_mac_addr_hi[2]; + u8 dst_mac_addr_hi[2]; + u8 dst_mac_addr_lo[2]; + u8 dst_mac_addr_mid[2]; + __le32 lcq_addr_lo; + __le32 lcq_addr_hi; + __le32 confq_pbl_base_addr_lo; + __le32 confq_pbl_base_addr_hi; +}; + +/* + * FCoE connection enable request $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_conn_enable_disable { + __le16 reserved0; + struct fcoe_kwqe_header hdr; + u8 src_mac_addr_lo[2]; + u8 src_mac_addr_mid[2]; + u8 src_mac_addr_hi[2]; + u16 vlan_tag; +#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0) +#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0 +#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12) +#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12 +#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13) +#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13 + u8 dst_mac_addr_lo[2]; + u8 dst_mac_addr_mid[2]; + u8 dst_mac_addr_hi[2]; + __le16 reserved1; + u8 s_id[3]; + u8 vlan_flag; + u8 d_id[3]; + u8 reserved3; + __le32 context_id; + __le32 conn_id; + __le32 reserved4; +}; + +/* + * FCoE connection destroy request $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_conn_destroy { + __le16 reserved0; + struct fcoe_kwqe_header hdr; + __le32 context_id; + __le32 conn_id; + __le32 reserved1[5]; +}; + +/* + * FCoe destroy request $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_destroy { + __le16 reserved0; + struct fcoe_kwqe_header hdr; + __le32 reserved1[7]; +}; + +/* + * FCoe statistics request $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_stat { + __le16 reserved0; + struct fcoe_kwqe_header hdr; + __le32 stat_params_addr_lo; + __le32 stat_params_addr_hi; + __le32 reserved1[5]; +}; + +/* + * FCoE KWQ WQE $$KEEP_ENDIANNESS$$ + */ +union fcoe_kwqe { + struct fcoe_kwqe_init1 init1; + struct fcoe_kwqe_init2 init2; + struct fcoe_kwqe_init3 init3; + struct fcoe_kwqe_conn_offload1 conn_offload1; + struct fcoe_kwqe_conn_offload2 conn_offload2; + struct fcoe_kwqe_conn_offload3 conn_offload3; + struct fcoe_kwqe_conn_offload4 conn_offload4; + struct fcoe_kwqe_conn_enable_disable conn_enable_disable; + struct fcoe_kwqe_conn_destroy conn_destroy; + struct fcoe_kwqe_destroy destroy; + struct fcoe_kwqe_stat statistics; +}; + + + + + + + + + + + + + + + + +/* + * TX SGL context $$KEEP_ENDIANNESS$$ + */ +union fcoe_sgl_union_ctx { + struct fcoe_cached_sge_ctx cached_sge; + struct fcoe_ext_mul_sges_ctx sgl; + __le32 opaque[5]; +}; + +/* + * Data-In/ELS/BLS information $$KEEP_ENDIANNESS$$ + */ +struct fcoe_read_flow_info { + union fcoe_sgl_union_ctx sgl_ctx; + __le32 rsrv0[3]; +}; + + +/* + * Fcoe stat context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_s_stat_ctx { + u8 flags; +#define FCOE_S_STAT_CTX_ACTIVE (0x1<<0) +#define FCOE_S_STAT_CTX_ACTIVE_SHIFT 0 +#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND (0x1<<1) +#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND_SHIFT 1 +#define FCOE_S_STAT_CTX_ABTS_PERFORMED (0x1<<2) +#define FCOE_S_STAT_CTX_ABTS_PERFORMED_SHIFT 2 +#define FCOE_S_STAT_CTX_SEQ_TIMEOUT (0x1<<3) +#define FCOE_S_STAT_CTX_SEQ_TIMEOUT_SHIFT 3 +#define FCOE_S_STAT_CTX_P_RJT (0x1<<4) +#define FCOE_S_STAT_CTX_P_RJT_SHIFT 4 +#define FCOE_S_STAT_CTX_ACK_EOFT (0x1<<5) +#define FCOE_S_STAT_CTX_ACK_EOFT_SHIFT 5 +#define FCOE_S_STAT_CTX_RSRV1 (0x3<<6) +#define FCOE_S_STAT_CTX_RSRV1_SHIFT 6 +}; + +/* + * Fcoe rx seq context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_rx_seq_ctx { + u8 seq_id; + struct fcoe_s_stat_ctx s_stat; + __le16 seq_cnt; + __le32 low_exp_ro; + __le32 high_exp_ro; +}; + + +/* + * Fcoe rx_wr union context $$KEEP_ENDIANNESS$$ + */ +union fcoe_rx_wr_union_ctx { + struct fcoe_read_flow_info read_info; + union fcoe_comp_flow_info comp_info; + __le32 opaque[8]; +}; + + + +/* + * FCoE SQ element $$KEEP_ENDIANNESS$$ + */ +struct fcoe_sqe { + __le16 wqe; +#define FCOE_SQE_TASK_ID (0x7FFF<<0) +#define FCOE_SQE_TASK_ID_SHIFT 0 +#define FCOE_SQE_TOGGLE_BIT (0x1<<15) +#define FCOE_SQE_TOGGLE_BIT_SHIFT 15 +}; + + + +/* + * 14 regs $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_tx_only { + union fcoe_sgl_union_ctx sgl_ctx; + __le32 rsrv0; +}; + +/* + * 32 bytes (8 regs) used for TX only purposes $$KEEP_ENDIANNESS$$ + */ +union fcoe_tx_wr_rx_rd_union_ctx { + struct fcoe_fc_frame tx_frame; + struct fcoe_fcp_cmd_payload fcp_cmd; + struct fcoe_ext_cleanup_info cleanup; + struct fcoe_ext_abts_info abts; + struct fcoe_ext_fw_tx_seq_ctx tx_seq; + __le32 opaque[8]; +}; + +/* + * tce_tx_wr_rx_rd_const $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_tx_wr_rx_rd_const { + u8 init_flags; +#define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE (0x7<<0) +#define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT 0 +#define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE (0x1<<3) +#define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT 3 +#define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE (0x1<<4) +#define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT 4 +#define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE (0x3<<5) +#define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT 5 +#define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV (0x1<<7) +#define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV_SHIFT 7 + u8 tx_flags; +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID (0x1<<0) +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID_SHIFT 0 +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE (0xF<<1) +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT 1 +#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1 (0x1<<5) +#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1_SHIFT 5 +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT (0x1<<6) +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT_SHIFT 6 +#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV2 (0x1<<7) +#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV2_SHIFT 7 + __le16 rsrv3; + __le32 verify_tx_seq; +}; + +/* + * tce_tx_wr_rx_rd $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_tx_wr_rx_rd { + union fcoe_tx_wr_rx_rd_union_ctx union_ctx; + struct fcoe_tce_tx_wr_rx_rd_const const_ctx; +}; + +/* + * tce_rx_wr_tx_rd_const $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_rx_wr_tx_rd_const { + __le32 data_2_trns; + __le32 init_flags; +#define FCOE_TCE_RX_WR_TX_RD_CONST_CID (0xFFFFFF<<0) +#define FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT 0 +#define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0 (0xFF<<24) +#define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0_SHIFT 24 +}; + +/* + * tce_rx_wr_tx_rd_var $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_rx_wr_tx_rd_var { + __le16 rx_flags; +#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1 (0xF<<0) +#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1_SHIFT 0 +#define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE (0x7<<4) +#define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT 4 +#define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ (0x1<<7) +#define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ_SHIFT 7 +#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE (0xF<<8) +#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT 8 +#define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME (0x1<<12) +#define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT 12 +#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT (0x1<<13) +#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT_SHIFT 13 +#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2 (0x1<<14) +#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2_SHIFT 14 +#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID (0x1<<15) +#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID_SHIFT 15 + __le16 rx_id; + struct fcoe_fcp_xfr_rdy_payload fcp_xfr_rdy; +}; + +/* + * tce_rx_wr_tx_rd $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_rx_wr_tx_rd { + struct fcoe_tce_rx_wr_tx_rd_const const_ctx; + struct fcoe_tce_rx_wr_tx_rd_var var_ctx; +}; + +/* + * tce_rx_only $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_rx_only { + struct fcoe_rx_seq_ctx rx_seq_ctx; + union fcoe_rx_wr_union_ctx union_ctx; +}; + +/* + * task_ctx_entry $$KEEP_ENDIANNESS$$ + */ +struct fcoe_task_ctx_entry { + struct fcoe_tce_tx_only txwr_only; + struct fcoe_tce_tx_wr_rx_rd txwr_rxrd; + struct fcoe_tce_rx_wr_tx_rd rxwr_txrd; + struct fcoe_tce_rx_only rxwr_only; +}; + + + + + + + + + + +/* + * FCoE XFRQ element $$KEEP_ENDIANNESS$$ + */ +struct fcoe_xfrqe { + __le16 wqe; +#define FCOE_XFRQE_TASK_ID (0x7FFF<<0) +#define FCOE_XFRQE_TASK_ID_SHIFT 0 +#define FCOE_XFRQE_TOGGLE_BIT (0x1<<15) +#define FCOE_XFRQE_TOGGLE_BIT_SHIFT 15 +}; + + +/* + * Cached SGEs $$KEEP_ENDIANNESS$$ + */ +struct common_fcoe_sgl { + struct fcoe_bd_ctx sge[3]; +}; + + +/* + * FCoE SQ\XFRQ element + */ +struct fcoe_cached_wqe { + struct fcoe_sqe sqe; + struct fcoe_xfrqe xfrqe; +}; + + +/* + * FCoE connection enable\disable params passed by driver to FW in FCoE enable + * ramrod $$KEEP_ENDIANNESS$$ + */ +struct fcoe_conn_enable_disable_ramrod_params { + struct fcoe_kwqe_conn_enable_disable enable_disable_kwqe; +}; + + +/* + * FCoE connection offload params passed by driver to FW in FCoE offload ramrod + * $$KEEP_ENDIANNESS$$ + */ +struct fcoe_conn_offload_ramrod_params { + struct fcoe_kwqe_conn_offload1 offload_kwqe1; + struct fcoe_kwqe_conn_offload2 offload_kwqe2; + struct fcoe_kwqe_conn_offload3 offload_kwqe3; + struct fcoe_kwqe_conn_offload4 offload_kwqe4; +}; + + +struct ustorm_fcoe_mng_ctx { +#if defined(__BIG_ENDIAN) + u8 mid_seq_proc_flag; + u8 tce_in_cam_flag; + u8 tce_on_ior_flag; + u8 en_cached_tce_flag; +#elif defined(__LITTLE_ENDIAN) + u8 en_cached_tce_flag; + u8 tce_on_ior_flag; + u8 tce_in_cam_flag; + u8 mid_seq_proc_flag; +#endif +#if defined(__BIG_ENDIAN) + u8 tce_cam_addr; + u8 cached_conn_flag; + u16 rsrv0; +#elif defined(__LITTLE_ENDIAN) + u16 rsrv0; + u8 cached_conn_flag; + u8 tce_cam_addr; +#endif +#if defined(__BIG_ENDIAN) + u16 dma_tce_ram_addr; + u16 tce_ram_addr; +#elif defined(__LITTLE_ENDIAN) + u16 tce_ram_addr; + u16 dma_tce_ram_addr; +#endif +#if defined(__BIG_ENDIAN) + u16 ox_id; + u16 wr_done_seq; +#elif defined(__LITTLE_ENDIAN) + u16 wr_done_seq; + u16 ox_id; +#endif + struct regpair task_addr; +}; + +/* + * Parameters initialized during offloaded according to FLOGI/PLOGI/PRLI and + * used in FCoE context section + */ +struct ustorm_fcoe_params { +#if defined(__BIG_ENDIAN) + u16 fcoe_conn_id; + u16 flags; +#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS (0x1<<0) +#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS_SHIFT 0 +#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES (0x1<<1) +#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES_SHIFT 1 +#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT (0x1<<2) +#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT_SHIFT 2 +#define USTORM_FCOE_PARAMS_B_CONF_REQ (0x1<<3) +#define USTORM_FCOE_PARAMS_B_CONF_REQ_SHIFT 3 +#define USTORM_FCOE_PARAMS_B_REC_VALID (0x1<<4) +#define USTORM_FCOE_PARAMS_B_REC_VALID_SHIFT 4 +#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT (0x1<<5) +#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT_SHIFT 5 +#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT (0x1<<6) +#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT_SHIFT 6 +#define USTORM_FCOE_PARAMS_RSRV0 (0x1FF<<7) +#define USTORM_FCOE_PARAMS_RSRV0_SHIFT 7 +#elif defined(__LITTLE_ENDIAN) + u16 flags; +#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS (0x1<<0) +#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS_SHIFT 0 +#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES (0x1<<1) +#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES_SHIFT 1 +#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT (0x1<<2) +#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT_SHIFT 2 +#define USTORM_FCOE_PARAMS_B_CONF_REQ (0x1<<3) +#define USTORM_FCOE_PARAMS_B_CONF_REQ_SHIFT 3 +#define USTORM_FCOE_PARAMS_B_REC_VALID (0x1<<4) +#define USTORM_FCOE_PARAMS_B_REC_VALID_SHIFT 4 +#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT (0x1<<5) +#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT_SHIFT 5 +#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT (0x1<<6) +#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT_SHIFT 6 +#define USTORM_FCOE_PARAMS_RSRV0 (0x1FF<<7) +#define USTORM_FCOE_PARAMS_RSRV0_SHIFT 7 + u16 fcoe_conn_id; +#endif +#if defined(__BIG_ENDIAN) + u8 hc_csdm_byte_en; + u8 func_id; + u8 port_id; + u8 vnic_id; +#elif defined(__LITTLE_ENDIAN) + u8 vnic_id; + u8 port_id; + u8 func_id; + u8 hc_csdm_byte_en; +#endif +#if defined(__BIG_ENDIAN) + u16 rx_total_conc_seqs; + u16 rx_max_fc_pay_len; +#elif defined(__LITTLE_ENDIAN) + u16 rx_max_fc_pay_len; + u16 rx_total_conc_seqs; +#endif +#if defined(__BIG_ENDIAN) + u8 task_pbe_idx_off; + u8 task_in_page_log_size; + u16 rx_max_conc_seqs; +#elif defined(__LITTLE_ENDIAN) + u16 rx_max_conc_seqs; + u8 task_in_page_log_size; + u8 task_pbe_idx_off; +#endif +}; + +/* + * FCoE 16-bits index structure + */ +struct fcoe_idx16_fields { + u16 fields; +#define FCOE_IDX16_FIELDS_IDX (0x7FFF<<0) +#define FCOE_IDX16_FIELDS_IDX_SHIFT 0 +#define FCOE_IDX16_FIELDS_MSB (0x1<<15) +#define FCOE_IDX16_FIELDS_MSB_SHIFT 15 +}; + +/* + * FCoE 16-bits index union + */ +union fcoe_idx16_field_union { + struct fcoe_idx16_fields fields; + u16 val; +}; + +/* + * Parameters required for placement according to SGL + */ +struct ustorm_fcoe_data_place_mng { +#if defined(__BIG_ENDIAN) + u16 sge_off; + u8 num_sges; + u8 sge_idx; +#elif defined(__LITTLE_ENDIAN) + u8 sge_idx; + u8 num_sges; + u16 sge_off; +#endif +}; + +/* + * Parameters required for placement according to SGL + */ +struct ustorm_fcoe_data_place { + struct ustorm_fcoe_data_place_mng cached_mng; + struct fcoe_bd_ctx cached_sge[2]; +}; + +/* + * TX processing shall write and RX processing shall read from this section + */ +union fcoe_u_tce_tx_wr_rx_rd_union { + struct fcoe_abts_info abts; + struct fcoe_cleanup_info cleanup; + struct fcoe_fw_tx_seq_ctx tx_seq_ctx; + u32 opaque[2]; +}; + +/* + * TX processing shall write and RX processing shall read from this section + */ +struct fcoe_u_tce_tx_wr_rx_rd { + union fcoe_u_tce_tx_wr_rx_rd_union union_ctx; + struct fcoe_tce_tx_wr_rx_rd_const const_ctx; +}; + +struct ustorm_fcoe_tce { + struct fcoe_u_tce_tx_wr_rx_rd txwr_rxrd; + struct fcoe_tce_rx_wr_tx_rd rxwr_txrd; + struct fcoe_tce_rx_only rxwr; +}; + +struct ustorm_fcoe_cache_ctx { + u32 rsrv0; + struct ustorm_fcoe_data_place data_place; + struct ustorm_fcoe_tce tce; +}; + +/* + * Ustorm FCoE Storm Context + */ +struct ustorm_fcoe_st_context { + struct ustorm_fcoe_mng_ctx mng_ctx; + struct ustorm_fcoe_params fcoe_params; + struct regpair cq_base_addr; + struct regpair rq_pbl_base; + struct regpair rq_cur_page_addr; + struct regpair confq_pbl_base_addr; + struct regpair conn_db_base; + struct regpair xfrq_base_addr; + struct regpair lcq_base_addr; +#if defined(__BIG_ENDIAN) + union fcoe_idx16_field_union rq_cons; + union fcoe_idx16_field_union rq_prod; +#elif defined(__LITTLE_ENDIAN) + union fcoe_idx16_field_union rq_prod; + union fcoe_idx16_field_union rq_cons; +#endif +#if defined(__BIG_ENDIAN) + u16 xfrq_prod; + u16 cq_cons; +#elif defined(__LITTLE_ENDIAN) + u16 cq_cons; + u16 xfrq_prod; +#endif +#if defined(__BIG_ENDIAN) + u16 lcq_cons; + u16 hc_cram_address; +#elif defined(__LITTLE_ENDIAN) + u16 hc_cram_address; + u16 lcq_cons; +#endif +#if defined(__BIG_ENDIAN) + u16 sq_xfrq_lcq_confq_size; + u16 confq_prod; +#elif defined(__LITTLE_ENDIAN) + u16 confq_prod; + u16 sq_xfrq_lcq_confq_size; +#endif +#if defined(__BIG_ENDIAN) + u8 hc_csdm_agg_int; + u8 rsrv2; + u8 available_rqes; + u8 sp_q_flush_cnt; +#elif defined(__LITTLE_ENDIAN) + u8 sp_q_flush_cnt; + u8 available_rqes; + u8 rsrv2; + u8 hc_csdm_agg_int; +#endif +#if defined(__BIG_ENDIAN) + u16 num_pend_tasks; + u16 pbf_ack_ram_addr; +#elif defined(__LITTLE_ENDIAN) + u16 pbf_ack_ram_addr; + u16 num_pend_tasks; +#endif + struct ustorm_fcoe_cache_ctx cache_ctx; +}; + +/* + * The FCoE non-aggregative context of Tstorm + */ +struct tstorm_fcoe_st_context { + struct regpair reserved0; + struct regpair reserved1; +}; + +/* + * Ethernet context section + */ +struct xstorm_fcoe_eth_context_section { +#if defined(__BIG_ENDIAN) + u8 remote_addr_4; + u8 remote_addr_5; + u8 local_addr_0; + u8 local_addr_1; +#elif defined(__LITTLE_ENDIAN) + u8 local_addr_1; + u8 local_addr_0; + u8 remote_addr_5; + u8 remote_addr_4; +#endif +#if defined(__BIG_ENDIAN) + u8 remote_addr_0; + u8 remote_addr_1; + u8 remote_addr_2; + u8 remote_addr_3; +#elif defined(__LITTLE_ENDIAN) + u8 remote_addr_3; + u8 remote_addr_2; + u8 remote_addr_1; + u8 remote_addr_0; +#endif +#if defined(__BIG_ENDIAN) + u16 reserved_vlan_type; + u16 params; +#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0) +#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0 +#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI (0x1<<12) +#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI_SHIFT 12 +#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13) +#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13 +#elif defined(__LITTLE_ENDIAN) + u16 params; +#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0) +#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0 +#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI (0x1<<12) +#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI_SHIFT 12 +#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13) +#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13 + u16 reserved_vlan_type; +#endif +#if defined(__BIG_ENDIAN) + u8 local_addr_2; + u8 local_addr_3; + u8 local_addr_4; + u8 local_addr_5; +#elif defined(__LITTLE_ENDIAN) + u8 local_addr_5; + u8 local_addr_4; + u8 local_addr_3; + u8 local_addr_2; +#endif +}; + +/* + * Flags used in FCoE context section - 1 byte + */ +struct xstorm_fcoe_context_flags { + u8 flags; +#define XSTORM_FCOE_CONTEXT_FLAGS_B_PROC_Q (0x3<<0) +#define XSTORM_FCOE_CONTEXT_FLAGS_B_PROC_Q_SHIFT 0 +#define XSTORM_FCOE_CONTEXT_FLAGS_B_MID_SEQ (0x1<<2) +#define XSTORM_FCOE_CONTEXT_FLAGS_B_MID_SEQ_SHIFT 2 +#define XSTORM_FCOE_CONTEXT_FLAGS_B_BLOCK_SQ (0x1<<3) +#define XSTORM_FCOE_CONTEXT_FLAGS_B_BLOCK_SQ_SHIFT 3 +#define XSTORM_FCOE_CONTEXT_FLAGS_B_REC_SUPPORT (0x1<<4) +#define XSTORM_FCOE_CONTEXT_FLAGS_B_REC_SUPPORT_SHIFT 4 +#define XSTORM_FCOE_CONTEXT_FLAGS_B_SQ_TOGGLE (0x1<<5) +#define XSTORM_FCOE_CONTEXT_FLAGS_B_SQ_TOGGLE_SHIFT 5 +#define XSTORM_FCOE_CONTEXT_FLAGS_B_XFRQ_TOGGLE (0x1<<6) +#define XSTORM_FCOE_CONTEXT_FLAGS_B_XFRQ_TOGGLE_SHIFT 6 +#define XSTORM_FCOE_CONTEXT_FLAGS_B_VNTAG_VLAN (0x1<<7) +#define XSTORM_FCOE_CONTEXT_FLAGS_B_VNTAG_VLAN_SHIFT 7 +}; + +struct xstorm_fcoe_tce { + struct fcoe_tce_tx_only txwr; + struct fcoe_tce_tx_wr_rx_rd txwr_rxrd; +}; + +/* + * FCP_DATA parameters required for transmission + */ +struct xstorm_fcoe_fcp_data { + u32 io_rem; +#if defined(__BIG_ENDIAN) + u16 cached_sge_off; + u8 cached_num_sges; + u8 cached_sge_idx; +#elif defined(__LITTLE_ENDIAN) + u8 cached_sge_idx; + u8 cached_num_sges; + u16 cached_sge_off; +#endif + u32 buf_addr_hi_0; + u32 buf_addr_lo_0; +#if defined(__BIG_ENDIAN) + u16 num_of_pending_tasks; + u16 buf_len_0; +#elif defined(__LITTLE_ENDIAN) + u16 buf_len_0; + u16 num_of_pending_tasks; +#endif + u32 buf_addr_hi_1; + u32 buf_addr_lo_1; +#if defined(__BIG_ENDIAN) + u16 task_pbe_idx_off; + u16 buf_len_1; +#elif defined(__LITTLE_ENDIAN) + u16 buf_len_1; + u16 task_pbe_idx_off; +#endif + u32 buf_addr_hi_2; + u32 buf_addr_lo_2; +#if defined(__BIG_ENDIAN) + u16 ox_id; + u16 buf_len_2; +#elif defined(__LITTLE_ENDIAN) + u16 buf_len_2; + u16 ox_id; +#endif +}; + +/* + * vlan configuration + */ +struct xstorm_fcoe_vlan_conf { + u8 vlan_conf; +#define XSTORM_FCOE_VLAN_CONF_PRIORITY (0x7<<0) +#define XSTORM_FCOE_VLAN_CONF_PRIORITY_SHIFT 0 +#define XSTORM_FCOE_VLAN_CONF_INNER_VLAN_FLAG (0x1<<3) +#define XSTORM_FCOE_VLAN_CONF_INNER_VLAN_FLAG_SHIFT 3 +#define XSTORM_FCOE_VLAN_CONF_RESERVED (0xF<<4) +#define XSTORM_FCOE_VLAN_CONF_RESERVED_SHIFT 4 +}; + +/* + * FCoE 16-bits vlan structure + */ +struct fcoe_vlan_fields { + u16 fields; +#define FCOE_VLAN_FIELDS_VID (0xFFF<<0) +#define FCOE_VLAN_FIELDS_VID_SHIFT 0 +#define FCOE_VLAN_FIELDS_CLI (0x1<<12) +#define FCOE_VLAN_FIELDS_CLI_SHIFT 12 +#define FCOE_VLAN_FIELDS_PRI (0x7<<13) +#define FCOE_VLAN_FIELDS_PRI_SHIFT 13 +}; + +/* + * FCoE 16-bits vlan union + */ +union fcoe_vlan_field_union { + struct fcoe_vlan_fields fields; + u16 val; +}; + +/* + * FCoE 16-bits vlan, vif union + */ +union fcoe_vlan_vif_field_union { + union fcoe_vlan_field_union vlan; + u16 vif; +}; + +/* + * FCoE context section + */ +struct xstorm_fcoe_context_section { +#if defined(__BIG_ENDIAN) + u8 cs_ctl; + u8 s_id[3]; +#elif defined(__LITTLE_ENDIAN) + u8 s_id[3]; + u8 cs_ctl; +#endif +#if defined(__BIG_ENDIAN) + u8 rctl; + u8 d_id[3]; +#elif defined(__LITTLE_ENDIAN) + u8 d_id[3]; + u8 rctl; +#endif +#if defined(__BIG_ENDIAN) + u16 sq_xfrq_lcq_confq_size; + u16 tx_max_fc_pay_len; +#elif defined(__LITTLE_ENDIAN) + u16 tx_max_fc_pay_len; + u16 sq_xfrq_lcq_confq_size; +#endif + u32 lcq_prod; +#if defined(__BIG_ENDIAN) + u8 port_id; + u8 func_id; + u8 seq_id; + struct xstorm_fcoe_context_flags tx_flags; +#elif defined(__LITTLE_ENDIAN) + struct xstorm_fcoe_context_flags tx_flags; + u8 seq_id; + u8 func_id; + u8 port_id; +#endif +#if defined(__BIG_ENDIAN) + u16 mtu; + u8 func_mode; + u8 vnic_id; +#elif defined(__LITTLE_ENDIAN) + u8 vnic_id; + u8 func_mode; + u16 mtu; +#endif + struct regpair confq_curr_page_addr; + struct fcoe_cached_wqe cached_wqe[8]; + struct regpair lcq_base_addr; + struct xstorm_fcoe_tce tce; + struct xstorm_fcoe_fcp_data fcp_data; +#if defined(__BIG_ENDIAN) + u8 tx_max_conc_seqs_c3; + u8 vlan_flag; + u8 dcb_val; + u8 data_pb_cmd_size; +#elif defined(__LITTLE_ENDIAN) + u8 data_pb_cmd_size; + u8 dcb_val; + u8 vlan_flag; + u8 tx_max_conc_seqs_c3; +#endif +#if defined(__BIG_ENDIAN) + u16 fcoe_tx_stat_params_ram_addr; + u16 fcoe_tx_fc_seq_ram_addr; +#elif defined(__LITTLE_ENDIAN) + u16 fcoe_tx_fc_seq_ram_addr; + u16 fcoe_tx_stat_params_ram_addr; +#endif +#if defined(__BIG_ENDIAN) + u8 fcp_cmd_line_credit; + u8 eth_hdr_size; + u16 pbf_addr; +#elif defined(__LITTLE_ENDIAN) + u16 pbf_addr; + u8 eth_hdr_size; + u8 fcp_cmd_line_credit; +#endif +#if defined(__BIG_ENDIAN) + union fcoe_vlan_vif_field_union multi_func_val; + u8 page_log_size; + struct xstorm_fcoe_vlan_conf orig_vlan_conf; +#elif defined(__LITTLE_ENDIAN) + struct xstorm_fcoe_vlan_conf orig_vlan_conf; + u8 page_log_size; + union fcoe_vlan_vif_field_union multi_func_val; +#endif +#if defined(__BIG_ENDIAN) + u16 fcp_cmd_frame_size; + u16 pbf_addr_ff; +#elif defined(__LITTLE_ENDIAN) + u16 pbf_addr_ff; + u16 fcp_cmd_frame_size; +#endif +#if defined(__BIG_ENDIAN) + u8 vlan_num; + u8 cos; + u8 cache_xfrq_cons; + u8 cache_sq_cons; +#elif defined(__LITTLE_ENDIAN) + u8 cache_sq_cons; + u8 cache_xfrq_cons; + u8 cos; + u8 vlan_num; +#endif + u32 verify_tx_seq; +}; + +/* + * Xstorm FCoE Storm Context + */ +struct xstorm_fcoe_st_context { + struct xstorm_fcoe_eth_context_section eth; + struct xstorm_fcoe_context_section fcoe; +}; + +/* + * Fcoe connection context + */ +struct fcoe_context { + struct ustorm_fcoe_st_context ustorm_st_context; + struct tstorm_fcoe_st_context tstorm_st_context; + struct xstorm_fcoe_ag_context xstorm_ag_context; + struct tstorm_fcoe_ag_context tstorm_ag_context; + struct ustorm_fcoe_ag_context ustorm_ag_context; + struct timers_block_context timers_context; + struct xstorm_fcoe_st_context xstorm_st_context; +}; + +/* + * FCoE init params passed by driver to FW in FCoE init ramrod + * $$KEEP_ENDIANNESS$$ + */ +struct fcoe_init_ramrod_params { + struct fcoe_kwqe_init1 init_kwqe1; + struct fcoe_kwqe_init2 init_kwqe2; + struct fcoe_kwqe_init3 init_kwqe3; + struct regpair eq_pbl_base; + __le32 eq_pbl_size; + __le32 reserved2; + __le16 eq_prod; + __le16 sb_num; + u8 sb_id; + u8 reserved0; + __le16 reserved1; +}; + +/* + * FCoE statistics params buffer passed by driver to FW in FCoE statistics + * ramrod $$KEEP_ENDIANNESS$$ + */ +struct fcoe_stat_ramrod_params { + struct fcoe_kwqe_stat stat_kwqe; +}; + +/* + * CQ DB CQ producer and pending completion counter + */ +struct iscsi_cq_db_prod_pnd_cmpltn_cnt { +#if defined(__BIG_ENDIAN) + u16 cntr; + u16 prod; +#elif defined(__LITTLE_ENDIAN) + u16 prod; + u16 cntr; +#endif +}; + +/* + * CQ DB pending completion ITT array + */ +struct iscsi_cq_db_prod_pnd_cmpltn_cnt_arr { + struct iscsi_cq_db_prod_pnd_cmpltn_cnt prod_pend_comp[8]; +}; + +/* + * Cstorm CQ sequence to notify array, updated by driver + */ +struct iscsi_cq_db_sqn_2_notify_arr { + u16 sqn[8]; +}; + +/* + * Cstorm iSCSI Storm Context + */ +struct cstorm_iscsi_st_context { + struct iscsi_cq_db_prod_pnd_cmpltn_cnt_arr cq_c_prod_pend_comp_ctr_arr; + struct iscsi_cq_db_sqn_2_notify_arr cq_c_prod_sqn_arr; + struct iscsi_cq_db_sqn_2_notify_arr cq_c_sqn_2_notify_arr; + struct regpair hq_pbl_base; + struct regpair hq_curr_pbe; + struct regpair task_pbl_base; + struct regpair cq_db_base; +#if defined(__BIG_ENDIAN) + u16 hq_bd_itt; + u16 iscsi_conn_id; +#elif defined(__LITTLE_ENDIAN) + u16 iscsi_conn_id; + u16 hq_bd_itt; +#endif + u32 hq_bd_data_segment_len; + u32 hq_bd_buffer_offset; +#if defined(__BIG_ENDIAN) + u8 rsrv; + u8 cq_proc_en_bit_map; + u8 cq_pend_comp_itt_valid_bit_map; + u8 hq_bd_opcode; +#elif defined(__LITTLE_ENDIAN) + u8 hq_bd_opcode; + u8 cq_pend_comp_itt_valid_bit_map; + u8 cq_proc_en_bit_map; + u8 rsrv; +#endif + u32 hq_tcp_seq; +#if defined(__BIG_ENDIAN) + u16 flags; +#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN (0x1<<0) +#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN_SHIFT 0 +#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN (0x1<<1) +#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN_SHIFT 1 +#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID (0x1<<2) +#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID_SHIFT 2 +#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG (0x1<<3) +#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG_SHIFT 3 +#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK (0x1<<4) +#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK_SHIFT 4 +#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV (0x7FF<<5) +#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV_SHIFT 5 + u16 hq_cons; +#elif defined(__LITTLE_ENDIAN) + u16 hq_cons; + u16 flags; +#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN (0x1<<0) +#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN_SHIFT 0 +#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN (0x1<<1) +#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN_SHIFT 1 +#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID (0x1<<2) +#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID_SHIFT 2 +#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG (0x1<<3) +#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG_SHIFT 3 +#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK (0x1<<4) +#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK_SHIFT 4 +#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV (0x7FF<<5) +#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV_SHIFT 5 +#endif + struct regpair rsrv1; +}; + + +/* + * SCSI read/write SQ WQE + */ +struct iscsi_cmd_pdu_hdr_little_endian { +#if defined(__BIG_ENDIAN) + u8 opcode; + u8 op_attr; +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES (0x7<<0) +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES_SHIFT 0 +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x3<<3) +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 3 +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG (0x1<<5) +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG_SHIFT 5 +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG (0x1<<6) +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG_SHIFT 6 +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7) +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7 + u16 rsrv0; +#elif defined(__LITTLE_ENDIAN) + u16 rsrv0; + u8 op_attr; +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES (0x7<<0) +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES_SHIFT 0 +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x3<<3) +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 3 +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG (0x1<<5) +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG_SHIFT 5 +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG (0x1<<6) +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG_SHIFT 6 +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7) +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7 + u8 opcode; +#endif + u32 data_fields; +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0) +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0 +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24) +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24 + struct regpair lun; + u32 itt; + u32 expected_data_transfer_length; + u32 cmd_sn; + u32 exp_stat_sn; + u32 scsi_command_block[4]; +}; + + +/* + * Buffer per connection, used in Tstorm + */ +struct iscsi_conn_buf { + struct regpair reserved[8]; +}; + + +/* + * iSCSI context region, used only in iSCSI + */ +struct ustorm_iscsi_rq_db { + struct regpair pbl_base; + struct regpair curr_pbe; +}; + +/* + * iSCSI context region, used only in iSCSI + */ +struct ustorm_iscsi_r2tq_db { + struct regpair pbl_base; + struct regpair curr_pbe; +}; + +/* + * iSCSI context region, used only in iSCSI + */ +struct ustorm_iscsi_cq_db { +#if defined(__BIG_ENDIAN) + u16 cq_sn; + u16 prod; +#elif defined(__LITTLE_ENDIAN) + u16 prod; + u16 cq_sn; +#endif + struct regpair curr_pbe; +}; + +/* + * iSCSI context region, used only in iSCSI + */ +struct rings_db { + struct ustorm_iscsi_rq_db rq; + struct ustorm_iscsi_r2tq_db r2tq; + struct ustorm_iscsi_cq_db cq[8]; +#if defined(__BIG_ENDIAN) + u16 rq_prod; + u16 r2tq_prod; +#elif defined(__LITTLE_ENDIAN) + u16 r2tq_prod; + u16 rq_prod; +#endif + struct regpair cq_pbl_base; +}; + +/* + * iSCSI context region, used only in iSCSI + */ +struct ustorm_iscsi_placement_db { + u32 sgl_base_lo; + u32 sgl_base_hi; + u32 local_sge_0_address_hi; + u32 local_sge_0_address_lo; +#if defined(__BIG_ENDIAN) + u16 curr_sge_offset; + u16 local_sge_0_size; +#elif defined(__LITTLE_ENDIAN) + u16 local_sge_0_size; + u16 curr_sge_offset; +#endif + u32 local_sge_1_address_hi; + u32 local_sge_1_address_lo; +#if defined(__BIG_ENDIAN) + u8 exp_padding_2b; + u8 nal_len_3b; + u16 local_sge_1_size; +#elif defined(__LITTLE_ENDIAN) + u16 local_sge_1_size; + u8 nal_len_3b; + u8 exp_padding_2b; +#endif +#if defined(__BIG_ENDIAN) + u8 sgl_size; + u8 local_sge_index_2b; + u16 reserved7; +#elif defined(__LITTLE_ENDIAN) + u16 reserved7; + u8 local_sge_index_2b; + u8 sgl_size; +#endif + u32 rem_pdu; + u32 place_db_bitfield_1; +#define USTORM_ISCSI_PLACEMENT_DB_REM_PDU_PAYLOAD (0xFFFFFF<<0) +#define USTORM_ISCSI_PLACEMENT_DB_REM_PDU_PAYLOAD_SHIFT 0 +#define USTORM_ISCSI_PLACEMENT_DB_CQ_ID (0xFF<<24) +#define USTORM_ISCSI_PLACEMENT_DB_CQ_ID_SHIFT 24 + u32 place_db_bitfield_2; +#define USTORM_ISCSI_PLACEMENT_DB_BYTES_2_TRUNCATE (0xFFFFFF<<0) +#define USTORM_ISCSI_PLACEMENT_DB_BYTES_2_TRUNCATE_SHIFT 0 +#define USTORM_ISCSI_PLACEMENT_DB_HOST_SGE_INDEX (0xFF<<24) +#define USTORM_ISCSI_PLACEMENT_DB_HOST_SGE_INDEX_SHIFT 24 + u32 nal; +#define USTORM_ISCSI_PLACEMENT_DB_REM_SGE_SIZE (0xFFFFFF<<0) +#define USTORM_ISCSI_PLACEMENT_DB_REM_SGE_SIZE_SHIFT 0 +#define USTORM_ISCSI_PLACEMENT_DB_EXP_DIGEST_3B (0xFF<<24) +#define USTORM_ISCSI_PLACEMENT_DB_EXP_DIGEST_3B_SHIFT 24 +}; + +/* + * Ustorm iSCSI Storm Context + */ +struct ustorm_iscsi_st_context { + u32 exp_stat_sn; + u32 exp_data_sn; + struct rings_db ring; + struct regpair task_pbl_base; + struct regpair tce_phy_addr; + struct ustorm_iscsi_placement_db place_db; + u32 reserved8; + u32 rem_rcv_len; +#if defined(__BIG_ENDIAN) + u16 hdr_itt; + u16 iscsi_conn_id; +#elif defined(__LITTLE_ENDIAN) + u16 iscsi_conn_id; + u16 hdr_itt; +#endif + u32 nal_bytes; +#if defined(__BIG_ENDIAN) + u8 hdr_second_byte_union; + u8 bitfield_0; +#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU (0x1<<0) +#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0 +#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1) +#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1 +#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC (0x1<<2) +#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC_SHIFT 2 +#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x1F<<3) +#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 3 + u8 task_pdu_cache_index; + u8 task_pbe_cache_index; +#elif defined(__LITTLE_ENDIAN) + u8 task_pbe_cache_index; + u8 task_pdu_cache_index; + u8 bitfield_0; +#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU (0x1<<0) +#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0 +#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1) +#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1 +#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC (0x1<<2) +#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC_SHIFT 2 +#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x1F<<3) +#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 3 + u8 hdr_second_byte_union; +#endif +#if defined(__BIG_ENDIAN) + u16 reserved3; + u8 reserved2; + u8 acDecrement; +#elif defined(__LITTLE_ENDIAN) + u8 acDecrement; + u8 reserved2; + u16 reserved3; +#endif + u32 task_stat; +#if defined(__BIG_ENDIAN) + u8 hdr_opcode; + u8 num_cqs; + u16 reserved5; +#elif defined(__LITTLE_ENDIAN) + u16 reserved5; + u8 num_cqs; + u8 hdr_opcode; +#endif + u32 negotiated_rx; +#define USTORM_ISCSI_ST_CONTEXT_MAX_RECV_PDU_LENGTH (0xFFFFFF<<0) +#define USTORM_ISCSI_ST_CONTEXT_MAX_RECV_PDU_LENGTH_SHIFT 0 +#define USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS (0xFF<<24) +#define USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT 24 + u32 negotiated_rx_and_flags; +#define USTORM_ISCSI_ST_CONTEXT_MAX_BURST_LENGTH (0xFFFFFF<<0) +#define USTORM_ISCSI_ST_CONTEXT_MAX_BURST_LENGTH_SHIFT 0 +#define USTORM_ISCSI_ST_CONTEXT_B_CQE_POSTED_OR_HEADER_CACHED (0x1<<24) +#define USTORM_ISCSI_ST_CONTEXT_B_CQE_POSTED_OR_HEADER_CACHED_SHIFT 24 +#define USTORM_ISCSI_ST_CONTEXT_B_HDR_DIGEST_EN (0x1<<25) +#define USTORM_ISCSI_ST_CONTEXT_B_HDR_DIGEST_EN_SHIFT 25 +#define USTORM_ISCSI_ST_CONTEXT_B_DATA_DIGEST_EN (0x1<<26) +#define USTORM_ISCSI_ST_CONTEXT_B_DATA_DIGEST_EN_SHIFT 26 +#define USTORM_ISCSI_ST_CONTEXT_B_PROTOCOL_ERROR (0x1<<27) +#define USTORM_ISCSI_ST_CONTEXT_B_PROTOCOL_ERROR_SHIFT 27 +#define USTORM_ISCSI_ST_CONTEXT_B_TASK_VALID (0x1<<28) +#define USTORM_ISCSI_ST_CONTEXT_B_TASK_VALID_SHIFT 28 +#define USTORM_ISCSI_ST_CONTEXT_TASK_TYPE (0x3<<29) +#define USTORM_ISCSI_ST_CONTEXT_TASK_TYPE_SHIFT 29 +#define USTORM_ISCSI_ST_CONTEXT_B_ALL_DATA_ACKED (0x1<<31) +#define USTORM_ISCSI_ST_CONTEXT_B_ALL_DATA_ACKED_SHIFT 31 +}; + +/* + * TCP context region, shared in TOE, RDMA and ISCSI + */ +struct tstorm_tcp_st_context_section { + u32 flags1; +#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT (0xFFFFFF<<0) +#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_SHIFT 0 +#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID (0x1<<24) +#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID_SHIFT 24 +#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS (0x1<<25) +#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS_SHIFT 25 +#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED0 (0x1<<26) +#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED0_SHIFT 26 +#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD (0x1<<27) +#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD_SHIFT 27 +#define TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED (0x1<<28) +#define TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED_SHIFT 28 +#define TSTORM_TCP_ST_CONTEXT_SECTION_FIRST_RTO_ESTIMATE (0x1<<29) +#define TSTORM_TCP_ST_CONTEXT_SECTION_FIRST_RTO_ESTIMATE_SHIFT 29 +#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN (0x1<<30) +#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN_SHIFT 30 +#define TSTORM_TCP_ST_CONTEXT_SECTION_LAST_ISLE_HAS_FIN (0x1<<31) +#define TSTORM_TCP_ST_CONTEXT_SECTION_LAST_ISLE_HAS_FIN_SHIFT 31 + u32 flags2; +#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION (0xFFFFFF<<0) +#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_SHIFT 0 +#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN (0x1<<24) +#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN_SHIFT 24 +#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN (0x1<<25) +#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN_SHIFT 25 +#define __TSTORM_TCP_ST_CONTEXT_SECTION_KA_PROBE_SENT (0x1<<26) +#define __TSTORM_TCP_ST_CONTEXT_SECTION_KA_PROBE_SENT_SHIFT 26 +#define __TSTORM_TCP_ST_CONTEXT_SECTION_PERSIST_PROBE_SENT (0x1<<27) +#define __TSTORM_TCP_ST_CONTEXT_SECTION_PERSIST_PROBE_SENT_SHIFT 27 +#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<28) +#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 28 +#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<29) +#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 29 +#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_RST_ATTACK (0x1<<30) +#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_RST_ATTACK_SHIFT 30 +#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_SYN_ATTACK (0x1<<31) +#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_SYN_ATTACK_SHIFT 31 +#if defined(__BIG_ENDIAN) + u16 mss; + u8 tcp_sm_state; + u8 rto_exp; +#elif defined(__LITTLE_ENDIAN) + u8 rto_exp; + u8 tcp_sm_state; + u16 mss; +#endif + u32 rcv_nxt; + u32 timestamp_recent; + u32 timestamp_recent_time; + u32 cwnd; + u32 ss_thresh; + u32 cwnd_accum; + u32 prev_seg_seq; + u32 expected_rel_seq; + u32 recover; +#if defined(__BIG_ENDIAN) + u8 retransmit_count; + u8 ka_max_probe_count; + u8 persist_probe_count; + u8 ka_probe_count; +#elif defined(__LITTLE_ENDIAN) + u8 ka_probe_count; + u8 persist_probe_count; + u8 ka_max_probe_count; + u8 retransmit_count; +#endif +#if defined(__BIG_ENDIAN) + u8 statistics_counter_id; + u8 ooo_support_mode; + u8 snd_wnd_scale; + u8 dup_ack_count; +#elif defined(__LITTLE_ENDIAN) + u8 dup_ack_count; + u8 snd_wnd_scale; + u8 ooo_support_mode; + u8 statistics_counter_id; +#endif + u32 retransmit_start_time; + u32 ka_timeout; + u32 ka_interval; + u32 isle_start_seq; + u32 isle_end_seq; +#if defined(__BIG_ENDIAN) + u16 second_isle_address; + u16 recent_seg_wnd; +#elif defined(__LITTLE_ENDIAN) + u16 recent_seg_wnd; + u16 second_isle_address; +#endif +#if defined(__BIG_ENDIAN) + u8 max_isles_ever_happened; + u8 isles_number; + u16 last_isle_address; +#elif defined(__LITTLE_ENDIAN) + u16 last_isle_address; + u8 isles_number; + u8 max_isles_ever_happened; +#endif + u32 max_rt_time; +#if defined(__BIG_ENDIAN) + u16 lsb_mac_address; + u16 vlan_id; +#elif defined(__LITTLE_ENDIAN) + u16 vlan_id; + u16 lsb_mac_address; +#endif +#if defined(__BIG_ENDIAN) + u16 msb_mac_address; + u16 mid_mac_address; +#elif defined(__LITTLE_ENDIAN) + u16 mid_mac_address; + u16 msb_mac_address; +#endif + u32 rightmost_received_seq; +}; + +/* + * Termination variables + */ +struct iscsi_term_vars { + u8 BitMap; +#define ISCSI_TERM_VARS_TCP_STATE (0xF<<0) +#define ISCSI_TERM_VARS_TCP_STATE_SHIFT 0 +#define ISCSI_TERM_VARS_FIN_RECEIVED_SBIT (0x1<<4) +#define ISCSI_TERM_VARS_FIN_RECEIVED_SBIT_SHIFT 4 +#define ISCSI_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT (0x1<<5) +#define ISCSI_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT_SHIFT 5 +#define ISCSI_TERM_VARS_TERM_ON_CHIP (0x1<<6) +#define ISCSI_TERM_VARS_TERM_ON_CHIP_SHIFT 6 +#define ISCSI_TERM_VARS_RSRV (0x1<<7) +#define ISCSI_TERM_VARS_RSRV_SHIFT 7 +}; + +/* + * iSCSI context region, used only in iSCSI + */ +struct tstorm_iscsi_st_context_section { + u32 nalPayload; + u32 b2nh; +#if defined(__BIG_ENDIAN) + u16 rq_cons; + u8 flags; +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN (0x1<<0) +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN_SHIFT 0 +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN (0x1<<1) +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN_SHIFT 1 +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER (0x1<<2) +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER_SHIFT 2 +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE (0x1<<3) +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE_SHIFT 3 +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS (0x1<<4) +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS_SHIFT 4 +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN (0x3<<5) +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN_SHIFT 5 +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0 (0x1<<7) +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0_SHIFT 7 + u8 hdr_bytes_2_fetch; +#elif defined(__LITTLE_ENDIAN) + u8 hdr_bytes_2_fetch; + u8 flags; +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN (0x1<<0) +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN_SHIFT 0 +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN (0x1<<1) +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN_SHIFT 1 +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER (0x1<<2) +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER_SHIFT 2 +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE (0x1<<3) +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE_SHIFT 3 +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS (0x1<<4) +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS_SHIFT 4 +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN (0x3<<5) +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN_SHIFT 5 +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0 (0x1<<7) +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0_SHIFT 7 + u16 rq_cons; +#endif + struct regpair rq_db_phy_addr; +#if defined(__BIG_ENDIAN) + struct iscsi_term_vars term_vars; + u8 rsrv1; + u16 iscsi_conn_id; +#elif defined(__LITTLE_ENDIAN) + u16 iscsi_conn_id; + u8 rsrv1; + struct iscsi_term_vars term_vars; +#endif + u32 process_nxt; +}; + +/* + * The iSCSI non-aggregative context of Tstorm + */ +struct tstorm_iscsi_st_context { + struct tstorm_tcp_st_context_section tcp; + struct tstorm_iscsi_st_context_section iscsi; +}; + +/* + * Ethernet context section, shared in TOE, RDMA and ISCSI + */ +struct xstorm_eth_context_section { +#if defined(__BIG_ENDIAN) + u8 remote_addr_4; + u8 remote_addr_5; + u8 local_addr_0; + u8 local_addr_1; +#elif defined(__LITTLE_ENDIAN) + u8 local_addr_1; + u8 local_addr_0; + u8 remote_addr_5; + u8 remote_addr_4; +#endif +#if defined(__BIG_ENDIAN) + u8 remote_addr_0; + u8 remote_addr_1; + u8 remote_addr_2; + u8 remote_addr_3; +#elif defined(__LITTLE_ENDIAN) + u8 remote_addr_3; + u8 remote_addr_2; + u8 remote_addr_1; + u8 remote_addr_0; +#endif +#if defined(__BIG_ENDIAN) + u16 reserved_vlan_type; + u16 params; +#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0) +#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0 +#define XSTORM_ETH_CONTEXT_SECTION_CFI (0x1<<12) +#define XSTORM_ETH_CONTEXT_SECTION_CFI_SHIFT 12 +#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13) +#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13 +#elif defined(__LITTLE_ENDIAN) + u16 params; +#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0) +#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0 +#define XSTORM_ETH_CONTEXT_SECTION_CFI (0x1<<12) +#define XSTORM_ETH_CONTEXT_SECTION_CFI_SHIFT 12 +#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13) +#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13 + u16 reserved_vlan_type; +#endif +#if defined(__BIG_ENDIAN) + u8 local_addr_2; + u8 local_addr_3; + u8 local_addr_4; + u8 local_addr_5; +#elif defined(__LITTLE_ENDIAN) + u8 local_addr_5; + u8 local_addr_4; + u8 local_addr_3; + u8 local_addr_2; +#endif +}; + +/* + * IpV4 context section, shared in TOE, RDMA and ISCSI + */ +struct xstorm_ip_v4_context_section { +#if defined(__BIG_ENDIAN) + u16 __pbf_hdr_cmd_rsvd_id; + u16 __pbf_hdr_cmd_rsvd_flags_offset; +#elif defined(__LITTLE_ENDIAN) + u16 __pbf_hdr_cmd_rsvd_flags_offset; + u16 __pbf_hdr_cmd_rsvd_id; +#endif +#if defined(__BIG_ENDIAN) + u8 __pbf_hdr_cmd_rsvd_ver_ihl; + u8 tos; + u16 __pbf_hdr_cmd_rsvd_length; +#elif defined(__LITTLE_ENDIAN) + u16 __pbf_hdr_cmd_rsvd_length; + u8 tos; + u8 __pbf_hdr_cmd_rsvd_ver_ihl; +#endif + u32 ip_local_addr; +#if defined(__BIG_ENDIAN) + u8 ttl; + u8 __pbf_hdr_cmd_rsvd_protocol; + u16 __pbf_hdr_cmd_rsvd_csum; +#elif defined(__LITTLE_ENDIAN) + u16 __pbf_hdr_cmd_rsvd_csum; + u8 __pbf_hdr_cmd_rsvd_protocol; + u8 ttl; +#endif + u32 __pbf_hdr_cmd_rsvd_1; + u32 ip_remote_addr; +}; + +/* + * context section, shared in TOE, RDMA and ISCSI + */ +struct xstorm_padded_ip_v4_context_section { + struct xstorm_ip_v4_context_section ip_v4; + u32 reserved1[4]; +}; + +/* + * IpV6 context section, shared in TOE, RDMA and ISCSI + */ +struct xstorm_ip_v6_context_section { +#if defined(__BIG_ENDIAN) + u16 pbf_hdr_cmd_rsvd_payload_len; + u8 pbf_hdr_cmd_rsvd_nxt_hdr; + u8 hop_limit; +#elif defined(__LITTLE_ENDIAN) + u8 hop_limit; + u8 pbf_hdr_cmd_rsvd_nxt_hdr; + u16 pbf_hdr_cmd_rsvd_payload_len; +#endif + u32 priority_flow_label; +#define XSTORM_IP_V6_CONTEXT_SECTION_FLOW_LABEL (0xFFFFF<<0) +#define XSTORM_IP_V6_CONTEXT_SECTION_FLOW_LABEL_SHIFT 0 +#define XSTORM_IP_V6_CONTEXT_SECTION_TRAFFIC_CLASS (0xFF<<20) +#define XSTORM_IP_V6_CONTEXT_SECTION_TRAFFIC_CLASS_SHIFT 20 +#define XSTORM_IP_V6_CONTEXT_SECTION_PBF_HDR_CMD_RSVD_VER (0xF<<28) +#define XSTORM_IP_V6_CONTEXT_SECTION_PBF_HDR_CMD_RSVD_VER_SHIFT 28 + u32 ip_local_addr_lo_hi; + u32 ip_local_addr_lo_lo; + u32 ip_local_addr_hi_hi; + u32 ip_local_addr_hi_lo; + u32 ip_remote_addr_lo_hi; + u32 ip_remote_addr_lo_lo; + u32 ip_remote_addr_hi_hi; + u32 ip_remote_addr_hi_lo; +}; + +union xstorm_ip_context_section_types { + struct xstorm_padded_ip_v4_context_section padded_ip_v4; + struct xstorm_ip_v6_context_section ip_v6; +}; + +/* + * TCP context section, shared in TOE, RDMA and ISCSI + */ +struct xstorm_tcp_context_section { + u32 snd_max; +#if defined(__BIG_ENDIAN) + u16 remote_port; + u16 local_port; +#elif defined(__LITTLE_ENDIAN) + u16 local_port; + u16 remote_port; +#endif +#if defined(__BIG_ENDIAN) + u8 original_nagle_1b; + u8 ts_enabled; + u16 tcp_params; +#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE (0xFF<<0) +#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE_SHIFT 0 +#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT (0x1<<8) +#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT_SHIFT 8 +#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED (0x1<<9) +#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9 +#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10) +#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10 +#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV (0x1<<11) +#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV_SHIFT 11 +#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12) +#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12 +#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13) +#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED_SHIFT 13 +#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER (0x3<<14) +#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 tcp_params; +#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE (0xFF<<0) +#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE_SHIFT 0 +#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT (0x1<<8) +#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT_SHIFT 8 +#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED (0x1<<9) +#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9 +#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10) +#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10 +#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV (0x1<<11) +#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV_SHIFT 11 +#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12) +#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12 +#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13) +#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED_SHIFT 13 +#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER (0x3<<14) +#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER_SHIFT 14 + u8 ts_enabled; + u8 original_nagle_1b; +#endif +#if defined(__BIG_ENDIAN) + u16 pseudo_csum; + u16 window_scaling_factor; +#elif defined(__LITTLE_ENDIAN) + u16 window_scaling_factor; + u16 pseudo_csum; +#endif +#if defined(__BIG_ENDIAN) + u16 reserved2; + u8 statistics_counter_id; + u8 statistics_params; +#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<0) +#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 0 +#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<1) +#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1 +#define XSTORM_TCP_CONTEXT_SECTION_RESERVED (0x3F<<2) +#define XSTORM_TCP_CONTEXT_SECTION_RESERVED_SHIFT 2 +#elif defined(__LITTLE_ENDIAN) + u8 statistics_params; +#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<0) +#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 0 +#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<1) +#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1 +#define XSTORM_TCP_CONTEXT_SECTION_RESERVED (0x3F<<2) +#define XSTORM_TCP_CONTEXT_SECTION_RESERVED_SHIFT 2 + u8 statistics_counter_id; + u16 reserved2; +#endif + u32 ts_time_diff; + u32 __next_timer_expir; +}; + +/* + * Common context section, shared in TOE, RDMA and ISCSI + */ +struct xstorm_common_context_section { + struct xstorm_eth_context_section ethernet; + union xstorm_ip_context_section_types ip_union; + struct xstorm_tcp_context_section tcp; +#if defined(__BIG_ENDIAN) + u8 __dcb_val; + u8 flags; +#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED (0x1<<0) +#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT 0 +#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT (0x7<<1) +#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT 1 +#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE (0x1<<4) +#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE_SHIFT 4 +#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY (0x7<<5) +#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY_SHIFT 5 + u8 reserved; + u8 ip_version_1b; +#elif defined(__LITTLE_ENDIAN) + u8 ip_version_1b; + u8 reserved; + u8 flags; +#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED (0x1<<0) +#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT 0 +#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT (0x7<<1) +#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT 1 +#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE (0x1<<4) +#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE_SHIFT 4 +#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY (0x7<<5) +#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY_SHIFT 5 + u8 __dcb_val; +#endif +}; + +/* + * Flags used in ISCSI context section + */ +struct xstorm_iscsi_context_flags { + u8 flags; +#define XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA (0x1<<0) +#define XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA_SHIFT 0 +#define XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T (0x1<<1) +#define XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T_SHIFT 1 +#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_HEADER_DIGEST (0x1<<2) +#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_HEADER_DIGEST_SHIFT 2 +#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_DATA_DIGEST (0x1<<3) +#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_DATA_DIGEST_SHIFT 3 +#define XSTORM_ISCSI_CONTEXT_FLAGS_B_HQ_BD_WRITTEN (0x1<<4) +#define XSTORM_ISCSI_CONTEXT_FLAGS_B_HQ_BD_WRITTEN_SHIFT 4 +#define XSTORM_ISCSI_CONTEXT_FLAGS_B_LAST_OP_SQ (0x1<<5) +#define XSTORM_ISCSI_CONTEXT_FLAGS_B_LAST_OP_SQ_SHIFT 5 +#define XSTORM_ISCSI_CONTEXT_FLAGS_B_UPDATE_SND_NXT (0x1<<6) +#define XSTORM_ISCSI_CONTEXT_FLAGS_B_UPDATE_SND_NXT_SHIFT 6 +#define XSTORM_ISCSI_CONTEXT_FLAGS_RESERVED4 (0x1<<7) +#define XSTORM_ISCSI_CONTEXT_FLAGS_RESERVED4_SHIFT 7 +}; + +struct iscsi_task_context_entry_x { + u32 data_out_buffer_offset; + u32 itt; + u32 data_sn; +}; + +struct iscsi_task_context_entry_xuc_x_write_only { + u32 tx_r2t_sn; +}; + +struct iscsi_task_context_entry_xuc_xu_write_both { + u32 sgl_base_lo; + u32 sgl_base_hi; +#if defined(__BIG_ENDIAN) + u8 sgl_size; + u8 sge_index; + u16 sge_offset; +#elif defined(__LITTLE_ENDIAN) + u16 sge_offset; + u8 sge_index; + u8 sgl_size; +#endif +}; + +/* + * iSCSI context section + */ +struct xstorm_iscsi_context_section { + u32 first_burst_length; + u32 max_send_pdu_length; + struct regpair sq_pbl_base; + struct regpair sq_curr_pbe; + struct regpair hq_pbl_base; + struct regpair hq_curr_pbe_base; + struct regpair r2tq_pbl_base; + struct regpair r2tq_curr_pbe_base; + struct regpair task_pbl_base; +#if defined(__BIG_ENDIAN) + u16 data_out_count; + struct xstorm_iscsi_context_flags flags; + u8 task_pbl_cache_idx; +#elif defined(__LITTLE_ENDIAN) + u8 task_pbl_cache_idx; + struct xstorm_iscsi_context_flags flags; + u16 data_out_count; +#endif + u32 seq_more_2_send; + u32 pdu_more_2_send; + struct iscsi_task_context_entry_x temp_tce_x; + struct iscsi_task_context_entry_xuc_x_write_only temp_tce_x_wr; + struct iscsi_task_context_entry_xuc_xu_write_both temp_tce_xu_wr; + struct regpair lun; + u32 exp_data_transfer_len_ttt; + u32 pdu_data_2_rxmit; + u32 rxmit_bytes_2_dr; +#if defined(__BIG_ENDIAN) + u16 rxmit_sge_offset; + u16 hq_rxmit_cons; +#elif defined(__LITTLE_ENDIAN) + u16 hq_rxmit_cons; + u16 rxmit_sge_offset; +#endif +#if defined(__BIG_ENDIAN) + u16 r2tq_cons; + u8 rxmit_flags; +#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD (0x1<<0) +#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD_SHIFT 0 +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR (0x1<<1) +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR_SHIFT 1 +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU (0x1<<2) +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU_SHIFT 2 +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR (0x1<<3) +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR_SHIFT 3 +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR (0x1<<4) +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR_SHIFT 4 +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING (0x3<<5) +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING_SHIFT 5 +#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT (0x1<<7) +#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT_SHIFT 7 + u8 rxmit_sge_idx; +#elif defined(__LITTLE_ENDIAN) + u8 rxmit_sge_idx; + u8 rxmit_flags; +#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD (0x1<<0) +#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD_SHIFT 0 +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR (0x1<<1) +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR_SHIFT 1 +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU (0x1<<2) +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU_SHIFT 2 +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR (0x1<<3) +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR_SHIFT 3 +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR (0x1<<4) +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR_SHIFT 4 +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING (0x3<<5) +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING_SHIFT 5 +#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT (0x1<<7) +#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT_SHIFT 7 + u16 r2tq_cons; +#endif + u32 hq_rxmit_tcp_seq; +}; + +/* + * Xstorm iSCSI Storm Context + */ +struct xstorm_iscsi_st_context { + struct xstorm_common_context_section common; + struct xstorm_iscsi_context_section iscsi; +}; + +/* + * Iscsi connection context + */ +struct iscsi_context { + struct ustorm_iscsi_st_context ustorm_st_context; + struct tstorm_iscsi_st_context tstorm_st_context; + struct xstorm_iscsi_ag_context xstorm_ag_context; + struct tstorm_iscsi_ag_context tstorm_ag_context; + struct cstorm_iscsi_ag_context cstorm_ag_context; + struct ustorm_iscsi_ag_context ustorm_ag_context; + struct timers_block_context timers_context; + struct regpair upb_context; + struct xstorm_iscsi_st_context xstorm_st_context; + struct regpair xpb_context; + struct cstorm_iscsi_st_context cstorm_st_context; +}; + + +/* + * PDU header of an iSCSI DATA-OUT + */ +struct iscsi_data_pdu_hdr_little_endian { +#if defined(__BIG_ENDIAN) + u8 opcode; + u8 op_attr; +#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0) +#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0 +#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7) +#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7 + u16 rsrv0; +#elif defined(__LITTLE_ENDIAN) + u16 rsrv0; + u8 op_attr; +#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0) +#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0 +#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7) +#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7 + u8 opcode; +#endif + u32 data_fields; +#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0) +#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0 +#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24) +#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24 + struct regpair lun; + u32 itt; + u32 ttt; + u32 rsrv2; + u32 exp_stat_sn; + u32 rsrv3; + u32 data_sn; + u32 buffer_offset; + u32 rsrv4; +}; + + +/* + * PDU header of an iSCSI login request + */ +struct iscsi_login_req_hdr_little_endian { +#if defined(__BIG_ENDIAN) + u8 opcode; + u8 op_attr; +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG (0x3<<0) +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG_SHIFT 0 +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG (0x3<<2) +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG_SHIFT 2 +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0 (0x3<<4) +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0_SHIFT 4 +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6) +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6 +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT (0x1<<7) +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT_SHIFT 7 + u8 version_max; + u8 version_min; +#elif defined(__LITTLE_ENDIAN) + u8 version_min; + u8 version_max; + u8 op_attr; +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG (0x3<<0) +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG_SHIFT 0 +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG (0x3<<2) +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG_SHIFT 2 +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0 (0x3<<4) +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0_SHIFT 4 +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6) +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6 +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT (0x1<<7) +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT_SHIFT 7 + u8 opcode; +#endif + u32 data_fields; +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0) +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0 +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24) +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24 + u32 isid_lo; +#if defined(__BIG_ENDIAN) + u16 isid_hi; + u16 tsih; +#elif defined(__LITTLE_ENDIAN) + u16 tsih; + u16 isid_hi; +#endif + u32 itt; +#if defined(__BIG_ENDIAN) + u16 cid; + u16 rsrv1; +#elif defined(__LITTLE_ENDIAN) + u16 rsrv1; + u16 cid; +#endif + u32 cmd_sn; + u32 exp_stat_sn; + u32 rsrv2[4]; +}; + +/* + * PDU header of an iSCSI logout request + */ +struct iscsi_logout_req_hdr_little_endian { +#if defined(__BIG_ENDIAN) + u8 opcode; + u8 op_attr; +#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE (0x7F<<0) +#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE_SHIFT 0 +#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7) +#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7 + u16 rsrv0; +#elif defined(__LITTLE_ENDIAN) + u16 rsrv0; + u8 op_attr; +#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE (0x7F<<0) +#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE_SHIFT 0 +#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7) +#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7 + u8 opcode; +#endif + u32 data_fields; +#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0) +#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0 +#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24) +#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24 + u32 rsrv2[2]; + u32 itt; +#if defined(__BIG_ENDIAN) + u16 cid; + u16 rsrv1; +#elif defined(__LITTLE_ENDIAN) + u16 rsrv1; + u16 cid; +#endif + u32 cmd_sn; + u32 exp_stat_sn; + u32 rsrv3[4]; +}; + +/* + * PDU header of an iSCSI TMF request + */ +struct iscsi_tmf_req_hdr_little_endian { +#if defined(__BIG_ENDIAN) + u8 opcode; + u8 op_attr; +#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION (0x7F<<0) +#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION_SHIFT 0 +#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7) +#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7 + u16 rsrv0; +#elif defined(__LITTLE_ENDIAN) + u16 rsrv0; + u8 op_attr; +#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION (0x7F<<0) +#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION_SHIFT 0 +#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7) +#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7 + u8 opcode; +#endif + u32 data_fields; +#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0) +#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0 +#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24) +#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24 + struct regpair lun; + u32 itt; + u32 referenced_task_tag; + u32 cmd_sn; + u32 exp_stat_sn; + u32 ref_cmd_sn; + u32 exp_data_sn; + u32 rsrv2[2]; +}; + +/* + * PDU header of an iSCSI Text request + */ +struct iscsi_text_req_hdr_little_endian { +#if defined(__BIG_ENDIAN) + u8 opcode; + u8 op_attr; +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1 (0x3F<<0) +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0 +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6) +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6 +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL (0x1<<7) +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL_SHIFT 7 + u16 rsrv0; +#elif defined(__LITTLE_ENDIAN) + u16 rsrv0; + u8 op_attr; +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1 (0x3F<<0) +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0 +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6) +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6 +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL (0x1<<7) +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL_SHIFT 7 + u8 opcode; +#endif + u32 data_fields; +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0) +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0 +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24) +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24 + struct regpair lun; + u32 itt; + u32 ttt; + u32 cmd_sn; + u32 exp_stat_sn; + u32 rsrv3[4]; +}; + +/* + * PDU header of an iSCSI Nop-Out + */ +struct iscsi_nop_out_hdr_little_endian { +#if defined(__BIG_ENDIAN) + u8 opcode; + u8 op_attr; +#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0) +#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0 +#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1 (0x1<<7) +#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1_SHIFT 7 + u16 rsrv0; +#elif defined(__LITTLE_ENDIAN) + u16 rsrv0; + u8 op_attr; +#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0) +#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0 +#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1 (0x1<<7) +#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1_SHIFT 7 + u8 opcode; +#endif + u32 data_fields; +#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0) +#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0 +#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24) +#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24 + struct regpair lun; + u32 itt; + u32 ttt; + u32 cmd_sn; + u32 exp_stat_sn; + u32 rsrv3[4]; +}; + +/* + * iscsi pdu headers in little endian form. + */ +union iscsi_pdu_headers_little_endian { + u32 fullHeaderSize[12]; + struct iscsi_cmd_pdu_hdr_little_endian command_pdu_hdr; + struct iscsi_data_pdu_hdr_little_endian data_out_pdu_hdr; + struct iscsi_login_req_hdr_little_endian login_req_pdu_hdr; + struct iscsi_logout_req_hdr_little_endian logout_req_pdu_hdr; + struct iscsi_tmf_req_hdr_little_endian tmf_req_pdu_hdr; + struct iscsi_text_req_hdr_little_endian text_req_pdu_hdr; + struct iscsi_nop_out_hdr_little_endian nop_out_pdu_hdr; +}; + +struct iscsi_hq_bd { + union iscsi_pdu_headers_little_endian pdu_header; +#if defined(__BIG_ENDIAN) + u16 reserved1; + u16 lcl_cmp_flg; +#elif defined(__LITTLE_ENDIAN) + u16 lcl_cmp_flg; + u16 reserved1; +#endif + u32 sgl_base_lo; + u32 sgl_base_hi; +#if defined(__BIG_ENDIAN) + u8 sgl_size; + u8 sge_index; + u16 sge_offset; +#elif defined(__LITTLE_ENDIAN) + u16 sge_offset; + u8 sge_index; + u8 sgl_size; +#endif +}; + + +/* + * CQE data for L2 OOO connection $$KEEP_ENDIANNESS$$ + */ +struct iscsi_l2_ooo_data { + __le32 iscsi_cid; + u8 drop_isle; + u8 drop_size; + u8 ooo_opcode; + u8 ooo_isle; + u8 reserved[8]; +}; + + + + + + +struct iscsi_task_context_entry_xuc_c_write_only { + u32 total_data_acked; +}; + +struct iscsi_task_context_r2t_table_entry { + u32 ttt; + u32 desired_data_len; +}; + +struct iscsi_task_context_entry_xuc_u_write_only { + u32 exp_r2t_sn; + struct iscsi_task_context_r2t_table_entry r2t_table[4]; +#if defined(__BIG_ENDIAN) + u16 data_in_count; + u8 cq_id; + u8 valid_1b; +#elif defined(__LITTLE_ENDIAN) + u8 valid_1b; + u8 cq_id; + u16 data_in_count; +#endif +}; + +struct iscsi_task_context_entry_xuc { + struct iscsi_task_context_entry_xuc_c_write_only write_c; + u32 exp_data_transfer_len; + struct iscsi_task_context_entry_xuc_x_write_only write_x; + u32 lun_lo; + struct iscsi_task_context_entry_xuc_xu_write_both write_xu; + u32 lun_hi; + struct iscsi_task_context_entry_xuc_u_write_only write_u; +}; + +struct iscsi_task_context_entry_u { + u32 exp_r2t_buff_offset; + u32 rem_rcv_len; + u32 exp_data_sn; +}; + +struct iscsi_task_context_entry { + struct iscsi_task_context_entry_x tce_x; +#if defined(__BIG_ENDIAN) + u16 data_out_count; + u16 rsrv0; +#elif defined(__LITTLE_ENDIAN) + u16 rsrv0; + u16 data_out_count; +#endif + struct iscsi_task_context_entry_xuc tce_xuc; + struct iscsi_task_context_entry_u tce_u; + u32 rsrv1[7]; +}; + + + + + + + + +struct iscsi_task_context_entry_xuc_x_init_only { + struct regpair lun; + u32 exp_data_transfer_len; +}; + + + + + + + + + + + + + + + + + +/* + * ipv6 structure + */ +struct ip_v6_addr { + u32 ip_addr_lo_lo; + u32 ip_addr_lo_hi; + u32 ip_addr_hi_lo; + u32 ip_addr_hi_hi; +}; + + + +/* + * l5cm- connection identification params + */ +struct l5cm_conn_addr_params { + u32 pmtu; +#if defined(__BIG_ENDIAN) + u8 remote_addr_3; + u8 remote_addr_2; + u8 remote_addr_1; + u8 remote_addr_0; +#elif defined(__LITTLE_ENDIAN) + u8 remote_addr_0; + u8 remote_addr_1; + u8 remote_addr_2; + u8 remote_addr_3; +#endif +#if defined(__BIG_ENDIAN) + u16 params; +#define L5CM_CONN_ADDR_PARAMS_IP_VERSION (0x1<<0) +#define L5CM_CONN_ADDR_PARAMS_IP_VERSION_SHIFT 0 +#define L5CM_CONN_ADDR_PARAMS_RSRV (0x7FFF<<1) +#define L5CM_CONN_ADDR_PARAMS_RSRV_SHIFT 1 + u8 remote_addr_5; + u8 remote_addr_4; +#elif defined(__LITTLE_ENDIAN) + u8 remote_addr_4; + u8 remote_addr_5; + u16 params; +#define L5CM_CONN_ADDR_PARAMS_IP_VERSION (0x1<<0) +#define L5CM_CONN_ADDR_PARAMS_IP_VERSION_SHIFT 0 +#define L5CM_CONN_ADDR_PARAMS_RSRV (0x7FFF<<1) +#define L5CM_CONN_ADDR_PARAMS_RSRV_SHIFT 1 +#endif + struct ip_v6_addr local_ip_addr; + struct ip_v6_addr remote_ip_addr; + u32 ipv6_flow_label_20b; + u32 reserved1; +#if defined(__BIG_ENDIAN) + u16 remote_tcp_port; + u16 local_tcp_port; +#elif defined(__LITTLE_ENDIAN) + u16 local_tcp_port; + u16 remote_tcp_port; +#endif +}; + +/* + * l5cm-xstorm connection buffer + */ +struct l5cm_xstorm_conn_buffer { +#if defined(__BIG_ENDIAN) + u16 rsrv1; + u16 params; +#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE (0x1<<0) +#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE_SHIFT 0 +#define L5CM_XSTORM_CONN_BUFFER_RSRV (0x7FFF<<1) +#define L5CM_XSTORM_CONN_BUFFER_RSRV_SHIFT 1 +#elif defined(__LITTLE_ENDIAN) + u16 params; +#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE (0x1<<0) +#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE_SHIFT 0 +#define L5CM_XSTORM_CONN_BUFFER_RSRV (0x7FFF<<1) +#define L5CM_XSTORM_CONN_BUFFER_RSRV_SHIFT 1 + u16 rsrv1; +#endif +#if defined(__BIG_ENDIAN) + u16 mss; + u16 pseudo_header_checksum; +#elif defined(__LITTLE_ENDIAN) + u16 pseudo_header_checksum; + u16 mss; +#endif + u32 rcv_buf; + u32 rsrv2; + struct regpair context_addr; +}; + +/* + * l5cm-tstorm connection buffer + */ +struct l5cm_tstorm_conn_buffer { + u32 rsrv1[2]; +#if defined(__BIG_ENDIAN) + u16 params; +#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE (0x1<<0) +#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE_SHIFT 0 +#define L5CM_TSTORM_CONN_BUFFER_RSRV (0x7FFF<<1) +#define L5CM_TSTORM_CONN_BUFFER_RSRV_SHIFT 1 + u8 ka_max_probe_count; + u8 ka_enable; +#elif defined(__LITTLE_ENDIAN) + u8 ka_enable; + u8 ka_max_probe_count; + u16 params; +#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE (0x1<<0) +#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE_SHIFT 0 +#define L5CM_TSTORM_CONN_BUFFER_RSRV (0x7FFF<<1) +#define L5CM_TSTORM_CONN_BUFFER_RSRV_SHIFT 1 +#endif + u32 ka_timeout; + u32 ka_interval; + u32 max_rt_time; +}; + +/* + * l5cm connection buffer for active side + */ +struct l5cm_active_conn_buffer { + struct l5cm_conn_addr_params conn_addr_buf; + struct l5cm_xstorm_conn_buffer xstorm_conn_buffer; + struct l5cm_tstorm_conn_buffer tstorm_conn_buffer; +}; + + + +/* + * The l5cm opaque buffer passed in add new connection ramrod passive side + */ +struct l5cm_hash_input_string { + u32 __opaque1; +#if defined(__BIG_ENDIAN) + u16 __opaque3; + u16 __opaque2; +#elif defined(__LITTLE_ENDIAN) + u16 __opaque2; + u16 __opaque3; +#endif + struct ip_v6_addr __opaque4; + struct ip_v6_addr __opaque5; + u32 __opaque6; + u32 __opaque7[5]; +}; + + +/* + * syn cookie component + */ +struct l5cm_syn_cookie_comp { + u32 __opaque; +}; + +/* + * data related to listeners of a TCP port + */ +struct l5cm_port_listener_data { + u8 params; +#define L5CM_PORT_LISTENER_DATA_ENABLE (0x1<<0) +#define L5CM_PORT_LISTENER_DATA_ENABLE_SHIFT 0 +#define L5CM_PORT_LISTENER_DATA_IP_INDEX (0xF<<1) +#define L5CM_PORT_LISTENER_DATA_IP_INDEX_SHIFT 1 +#define L5CM_PORT_LISTENER_DATA_NET_FILTER (0x1<<5) +#define L5CM_PORT_LISTENER_DATA_NET_FILTER_SHIFT 5 +#define L5CM_PORT_LISTENER_DATA_DEFFERED_MODE (0x1<<6) +#define L5CM_PORT_LISTENER_DATA_DEFFERED_MODE_SHIFT 6 +#define L5CM_PORT_LISTENER_DATA_MPA_MODE (0x1<<7) +#define L5CM_PORT_LISTENER_DATA_MPA_MODE_SHIFT 7 +}; + +/* + * Opaque structure passed from U to X when final ack arrives + */ +struct l5cm_opaque_buf { + u32 __opaque1; + u32 __opaque2; + u32 __opaque3; + u32 __opaque4; + struct l5cm_syn_cookie_comp __opaque5; +#if defined(__BIG_ENDIAN) + u16 rsrv2; + u8 rsrv; + struct l5cm_port_listener_data __opaque6; +#elif defined(__LITTLE_ENDIAN) + struct l5cm_port_listener_data __opaque6; + u8 rsrv; + u16 rsrv2; +#endif +}; + + +/* + * l5cm slow path element + */ +struct l5cm_packet_size { + u32 size; + u32 rsrv; +}; + + +/* + * The final-ack union structure in PCS entry after final ack arrived + */ +struct l5cm_pcse_ack { + struct l5cm_xstorm_conn_buffer tx_socket_params; + struct l5cm_opaque_buf opaque_buf; + struct l5cm_tstorm_conn_buffer rx_socket_params; +}; + + +/* + * The syn union structure in PCS entry after syn arrived + */ +struct l5cm_pcse_syn { + struct l5cm_opaque_buf opaque_buf; + u32 rsrv[12]; +}; + + +/* + * pcs entry data for passive connections + */ +struct l5cm_pcs_attributes { +#if defined(__BIG_ENDIAN) + u16 pcs_id; + u8 status; + u8 flags; +#define L5CM_PCS_ATTRIBUTES_NET_FILTER (0x1<<0) +#define L5CM_PCS_ATTRIBUTES_NET_FILTER_SHIFT 0 +#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH (0x1<<1) +#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH_SHIFT 1 +#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT (0x1<<2) +#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT_SHIFT 2 +#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT (0x1<<3) +#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT_SHIFT 3 +#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC (0x1<<4) +#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC_SHIFT 4 +#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD (0x1<<5) +#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD_SHIFT 5 +#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET (0x1<<6) +#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET_SHIFT 6 +#define L5CM_PCS_ATTRIBUTES_RSRV (0x1<<7) +#define L5CM_PCS_ATTRIBUTES_RSRV_SHIFT 7 +#elif defined(__LITTLE_ENDIAN) + u8 flags; +#define L5CM_PCS_ATTRIBUTES_NET_FILTER (0x1<<0) +#define L5CM_PCS_ATTRIBUTES_NET_FILTER_SHIFT 0 +#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH (0x1<<1) +#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH_SHIFT 1 +#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT (0x1<<2) +#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT_SHIFT 2 +#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT (0x1<<3) +#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT_SHIFT 3 +#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC (0x1<<4) +#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC_SHIFT 4 +#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD (0x1<<5) +#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD_SHIFT 5 +#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET (0x1<<6) +#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET_SHIFT 6 +#define L5CM_PCS_ATTRIBUTES_RSRV (0x1<<7) +#define L5CM_PCS_ATTRIBUTES_RSRV_SHIFT 7 + u8 status; + u16 pcs_id; +#endif +}; + + +union l5cm_seg_params { + struct l5cm_pcse_syn syn_seg_params; + struct l5cm_pcse_ack ack_seg_params; +}; + +/* + * pcs entry data for passive connections + */ +struct l5cm_pcs_hdr { + struct l5cm_hash_input_string hash_input_string; + struct l5cm_conn_addr_params conn_addr_buf; + u32 cid; + u32 hash_result; + union l5cm_seg_params seg_params; + struct l5cm_pcs_attributes att; +#if defined(__BIG_ENDIAN) + u16 rsrv; + u16 rx_seg_size; +#elif defined(__LITTLE_ENDIAN) + u16 rx_seg_size; + u16 rsrv; +#endif +}; + +/* + * pcs entry for passive connections + */ +struct l5cm_pcs_entry { + struct l5cm_pcs_hdr hdr; + u8 rx_segment[1516]; +}; + + + + +/* + * l5cm connection parameters + */ +union l5cm_reduce_param_union { + u32 opaque1; + u32 opaque2; +}; + +/* + * l5cm connection parameters + */ +struct l5cm_reduce_conn { + union l5cm_reduce_param_union opaque1; + u32 opaque2; +}; + +/* + * l5cm slow path element + */ +union l5cm_specific_data { + u8 protocol_data[8]; + struct regpair phy_address; + struct l5cm_packet_size packet_size; + struct l5cm_reduce_conn reduced_conn; +}; + +/* + * l5 slow path element + */ +struct l5cm_spe { + struct spe_hdr hdr; + union l5cm_specific_data data; +}; + + + + +/* + * Termination variables + */ +struct l5cm_term_vars { + u8 BitMap; +#define L5CM_TERM_VARS_TCP_STATE (0xF<<0) +#define L5CM_TERM_VARS_TCP_STATE_SHIFT 0 +#define L5CM_TERM_VARS_FIN_RECEIVED_SBIT (0x1<<4) +#define L5CM_TERM_VARS_FIN_RECEIVED_SBIT_SHIFT 4 +#define L5CM_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT (0x1<<5) +#define L5CM_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT_SHIFT 5 +#define L5CM_TERM_VARS_TERM_ON_CHIP (0x1<<6) +#define L5CM_TERM_VARS_TERM_ON_CHIP_SHIFT 6 +#define L5CM_TERM_VARS_RSRV (0x1<<7) +#define L5CM_TERM_VARS_RSRV_SHIFT 7 +}; + + + + +/* + * Tstorm Tcp flags + */ +struct tstorm_l5cm_tcp_flags { + u16 flags; +#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID (0xFFF<<0) +#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID_SHIFT 0 +#define TSTORM_L5CM_TCP_FLAGS_RSRV0 (0x1<<12) +#define TSTORM_L5CM_TCP_FLAGS_RSRV0_SHIFT 12 +#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED (0x1<<13) +#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED_SHIFT 13 +#define TSTORM_L5CM_TCP_FLAGS_RSRV1 (0x3<<14) +#define TSTORM_L5CM_TCP_FLAGS_RSRV1_SHIFT 14 +}; + + +/* + * Xstorm Tcp flags + */ +struct xstorm_l5cm_tcp_flags { + u8 flags; +#define XSTORM_L5CM_TCP_FLAGS_ENC_ENABLED (0x1<<0) +#define XSTORM_L5CM_TCP_FLAGS_ENC_ENABLED_SHIFT 0 +#define XSTORM_L5CM_TCP_FLAGS_TS_ENABLED (0x1<<1) +#define XSTORM_L5CM_TCP_FLAGS_TS_ENABLED_SHIFT 1 +#define XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN (0x1<<2) +#define XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN_SHIFT 2 +#define XSTORM_L5CM_TCP_FLAGS_RSRV (0x1F<<3) +#define XSTORM_L5CM_TCP_FLAGS_RSRV_SHIFT 3 +}; + + + +/* + * Out-of-order states + */ +enum tcp_ooo_event { + TCP_EVENT_ADD_PEN = 0, + TCP_EVENT_ADD_NEW_ISLE = 1, + TCP_EVENT_ADD_ISLE_RIGHT = 2, + TCP_EVENT_ADD_ISLE_LEFT = 3, + TCP_EVENT_JOIN = 4, + TCP_EVENT_NOP = 5, + MAX_TCP_OOO_EVENT +}; + + +/* + * OOO support modes + */ +enum tcp_tstorm_ooo { + TCP_TSTORM_OOO_DROP_AND_PROC_ACK = 0, + TCP_TSTORM_OOO_SEND_PURE_ACK = 1, + TCP_TSTORM_OOO_SUPPORTED = 2, + MAX_TCP_TSTORM_OOO +}; + + + + + + + + + +#endif /* __5710_HSI_CNIC_LE__ */ diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h new file mode 100644 index 0000000..79443e0 --- /dev/null +++ b/drivers/net/ethernet/broadcom/cnic_if.h @@ -0,0 +1,340 @@ +/* cnic_if.h: Broadcom CNIC core network driver. + * + * Copyright (c) 2006-2011 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + */ + + +#ifndef CNIC_IF_H +#define CNIC_IF_H + +#define CNIC_MODULE_VERSION "2.5.7" +#define CNIC_MODULE_RELDATE "July 20, 2011" + +#define CNIC_ULP_RDMA 0 +#define CNIC_ULP_ISCSI 1 +#define CNIC_ULP_FCOE 2 +#define CNIC_ULP_L4 3 +#define MAX_CNIC_ULP_TYPE_EXT 3 +#define MAX_CNIC_ULP_TYPE 4 + +struct kwqe { + u32 kwqe_op_flag; + +#define KWQE_QID_SHIFT 8 +#define KWQE_OPCODE_MASK 0x00ff0000 +#define KWQE_OPCODE_SHIFT 16 +#define KWQE_OPCODE(x) ((x & KWQE_OPCODE_MASK) >> KWQE_OPCODE_SHIFT) +#define KWQE_LAYER_MASK 0x70000000 +#define KWQE_LAYER_SHIFT 28 +#define KWQE_FLAGS_LAYER_MASK_L2 (2<<28) +#define KWQE_FLAGS_LAYER_MASK_L3 (3<<28) +#define KWQE_FLAGS_LAYER_MASK_L4 (4<<28) +#define KWQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28) +#define KWQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28) +#define KWQE_FLAGS_LAYER_MASK_L5_FCOE (7<<28) + + u32 kwqe_info0; + u32 kwqe_info1; + u32 kwqe_info2; + u32 kwqe_info3; + u32 kwqe_info4; + u32 kwqe_info5; + u32 kwqe_info6; +}; + +struct kwqe_16 { + u32 kwqe_info0; + u32 kwqe_info1; + u32 kwqe_info2; + u32 kwqe_info3; +}; + +struct kcqe { + u32 kcqe_info0; + u32 kcqe_info1; + u32 kcqe_info2; + u32 kcqe_info3; + u32 kcqe_info4; + u32 kcqe_info5; + u32 kcqe_info6; + u32 kcqe_op_flag; + #define KCQE_RAMROD_COMPLETION (0x1<<27) /* Everest */ + #define KCQE_FLAGS_LAYER_MASK (0x7<<28) + #define KCQE_FLAGS_LAYER_MASK_MISC (0<<28) + #define KCQE_FLAGS_LAYER_MASK_L2 (2<<28) + #define KCQE_FLAGS_LAYER_MASK_L3 (3<<28) + #define KCQE_FLAGS_LAYER_MASK_L4 (4<<28) + #define KCQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28) + #define KCQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28) + #define KCQE_FLAGS_LAYER_MASK_L5_FCOE (7<<28) + #define KCQE_FLAGS_NEXT (1<<31) + #define KCQE_FLAGS_OPCODE_MASK (0xff<<16) + #define KCQE_FLAGS_OPCODE_SHIFT (16) + #define KCQE_OPCODE(op) \ + (((op) & KCQE_FLAGS_OPCODE_MASK) >> KCQE_FLAGS_OPCODE_SHIFT) +}; + +#define MAX_CNIC_CTL_DATA 64 +#define MAX_DRV_CTL_DATA 64 + +#define CNIC_CTL_STOP_CMD 1 +#define CNIC_CTL_START_CMD 2 +#define CNIC_CTL_COMPLETION_CMD 3 +#define CNIC_CTL_STOP_ISCSI_CMD 4 + +#define DRV_CTL_IO_WR_CMD 0x101 +#define DRV_CTL_IO_RD_CMD 0x102 +#define DRV_CTL_CTX_WR_CMD 0x103 +#define DRV_CTL_CTXTBL_WR_CMD 0x104 +#define DRV_CTL_RET_L5_SPQ_CREDIT_CMD 0x105 +#define DRV_CTL_START_L2_CMD 0x106 +#define DRV_CTL_STOP_L2_CMD 0x107 +#define DRV_CTL_RET_L2_SPQ_CREDIT_CMD 0x10c +#define DRV_CTL_ISCSI_STOPPED_CMD 0x10d + +struct cnic_ctl_completion { + u32 cid; + u8 opcode; + u8 error; +}; + +struct cnic_ctl_info { + int cmd; + union { + struct cnic_ctl_completion comp; + char bytes[MAX_CNIC_CTL_DATA]; + } data; +}; + +struct drv_ctl_spq_credit { + u32 credit_count; +}; + +struct drv_ctl_io { + u32 cid_addr; + u32 offset; + u32 data; + dma_addr_t dma_addr; +}; + +struct drv_ctl_l2_ring { + u32 client_id; + u32 cid; +}; + +struct drv_ctl_info { + int cmd; + union { + struct drv_ctl_spq_credit credit; + struct drv_ctl_io io; + struct drv_ctl_l2_ring ring; + char bytes[MAX_DRV_CTL_DATA]; + } data; +}; + +struct cnic_ops { + struct module *cnic_owner; + /* Calls to these functions are protected by RCU. When + * unregistering, we wait for any calls to complete before + * continuing. + */ + int (*cnic_handler)(void *, void *); + int (*cnic_ctl)(void *, struct cnic_ctl_info *); +}; + +#define MAX_CNIC_VEC 8 + +struct cnic_irq { + unsigned int vector; + void *status_blk; + u32 status_blk_num; + u32 status_blk_num2; + u32 irq_flags; +#define CNIC_IRQ_FL_MSIX 0x00000001 +}; + +struct cnic_eth_dev { + struct module *drv_owner; + u32 drv_state; +#define CNIC_DRV_STATE_REGD 0x00000001 +#define CNIC_DRV_STATE_USING_MSIX 0x00000002 +#define CNIC_DRV_STATE_NO_ISCSI_OOO 0x00000004 +#define CNIC_DRV_STATE_NO_ISCSI 0x00000008 +#define CNIC_DRV_STATE_NO_FCOE 0x00000010 + u32 chip_id; + u32 max_kwqe_pending; + struct pci_dev *pdev; + void __iomem *io_base; + void __iomem *io_base2; + const void *iro_arr; + + u32 ctx_tbl_offset; + u32 ctx_tbl_len; + int ctx_blk_size; + u32 starting_cid; + u32 max_iscsi_conn; + u32 max_fcoe_conn; + u32 max_rdma_conn; + u32 fcoe_init_cid; + u32 fcoe_wwn_port_name_hi; + u32 fcoe_wwn_port_name_lo; + u32 fcoe_wwn_node_name_hi; + u32 fcoe_wwn_node_name_lo; + + u16 iscsi_l2_client_id; + u16 iscsi_l2_cid; + u8 iscsi_mac[ETH_ALEN]; + + int num_irq; + struct cnic_irq irq_arr[MAX_CNIC_VEC]; + int (*drv_register_cnic)(struct net_device *, + struct cnic_ops *, void *); + int (*drv_unregister_cnic)(struct net_device *); + int (*drv_submit_kwqes_32)(struct net_device *, + struct kwqe *[], u32); + int (*drv_submit_kwqes_16)(struct net_device *, + struct kwqe_16 *[], u32); + int (*drv_ctl)(struct net_device *, struct drv_ctl_info *); + unsigned long reserved1[2]; +}; + +struct cnic_sockaddr { + union { + struct sockaddr_in v4; + struct sockaddr_in6 v6; + } local; + union { + struct sockaddr_in v4; + struct sockaddr_in6 v6; + } remote; +}; + +struct cnic_sock { + struct cnic_dev *dev; + void *context; + u32 src_ip[4]; + u32 dst_ip[4]; + u16 src_port; + u16 dst_port; + u16 vlan_id; + unsigned char old_ha[6]; + unsigned char ha[6]; + u32 mtu; + u32 cid; + u32 l5_cid; + u32 pg_cid; + int ulp_type; + + u32 ka_timeout; + u32 ka_interval; + u8 ka_max_probe_count; + u8 tos; + u8 ttl; + u8 snd_seq_scale; + u32 rcv_buf; + u32 snd_buf; + u32 seed; + + unsigned long tcp_flags; +#define SK_TCP_NO_DELAY_ACK 0x1 +#define SK_TCP_KEEP_ALIVE 0x2 +#define SK_TCP_NAGLE 0x4 +#define SK_TCP_TIMESTAMP 0x8 +#define SK_TCP_SACK 0x10 +#define SK_TCP_SEG_SCALING 0x20 + unsigned long flags; +#define SK_F_INUSE 0 +#define SK_F_OFFLD_COMPLETE 1 +#define SK_F_OFFLD_SCHED 2 +#define SK_F_PG_OFFLD_COMPLETE 3 +#define SK_F_CONNECT_START 4 +#define SK_F_IPV6 5 +#define SK_F_CLOSING 7 + + atomic_t ref_count; + u32 state; + struct kwqe kwqe1; + struct kwqe kwqe2; + struct kwqe kwqe3; +}; + +struct cnic_dev { + struct net_device *netdev; + struct pci_dev *pcidev; + void __iomem *regview; + struct list_head list; + + int (*register_device)(struct cnic_dev *dev, int ulp_type, + void *ulp_ctx); + int (*unregister_device)(struct cnic_dev *dev, int ulp_type); + int (*submit_kwqes)(struct cnic_dev *dev, struct kwqe *wqes[], + u32 num_wqes); + int (*submit_kwqes_16)(struct cnic_dev *dev, struct kwqe_16 *wqes[], + u32 num_wqes); + + int (*cm_create)(struct cnic_dev *, int, u32, u32, struct cnic_sock **, + void *); + int (*cm_destroy)(struct cnic_sock *); + int (*cm_connect)(struct cnic_sock *, struct cnic_sockaddr *); + int (*cm_abort)(struct cnic_sock *); + int (*cm_close)(struct cnic_sock *); + struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type); + int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type, + char *data, u16 data_size); + unsigned long flags; +#define CNIC_F_CNIC_UP 1 +#define CNIC_F_BNX2_CLASS 3 +#define CNIC_F_BNX2X_CLASS 4 + atomic_t ref_count; + u8 mac_addr[6]; + + int max_iscsi_conn; + int max_fcoe_conn; + int max_rdma_conn; + + void *cnic_priv; +}; + +#define CNIC_WR(dev, off, val) writel(val, dev->regview + off) +#define CNIC_WR16(dev, off, val) writew(val, dev->regview + off) +#define CNIC_WR8(dev, off, val) writeb(val, dev->regview + off) +#define CNIC_RD(dev, off) readl(dev->regview + off) +#define CNIC_RD16(dev, off) readw(dev->regview + off) + +struct cnic_ulp_ops { + /* Calls to these functions are protected by RCU. When + * unregistering, we wait for any calls to complete before + * continuing. + */ + + void (*cnic_init)(struct cnic_dev *dev); + void (*cnic_exit)(struct cnic_dev *dev); + void (*cnic_start)(void *ulp_ctx); + void (*cnic_stop)(void *ulp_ctx); + void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[], + u32 num_cqes); + void (*indicate_netevent)(void *ulp_ctx, unsigned long event, u16 vid); + void (*cm_connect_complete)(struct cnic_sock *); + void (*cm_close_complete)(struct cnic_sock *); + void (*cm_abort_complete)(struct cnic_sock *); + void (*cm_remote_close)(struct cnic_sock *); + void (*cm_remote_abort)(struct cnic_sock *); + int (*iscsi_nl_send_msg)(void *ulp_ctx, u32 msg_type, + char *data, u16 data_size); + struct module *owner; + atomic_t ref_count; +}; + +extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops); + +extern int cnic_unregister_driver(int ulp_type); + +extern struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev); +extern struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev); + +#endif diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c new file mode 100644 index 0000000..ea65f7e --- /dev/null +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -0,0 +1,2690 @@ +/* + * Copyright (C) 2001,2002,2003,2004 Broadcom Corporation + * Copyright (c) 2006, 2007 Maciej W. Rozycki + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * + * This driver is designed for the Broadcom SiByte SOC built-in + * Ethernet controllers. Written by Mitch Lichtenberg at Broadcom Corp. + * + * Updated to the driver model and the PHY abstraction layer + * by Maciej W. Rozycki. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include /* Processor type for cache alignment. */ + +/* Operational parameters that usually are not changed. */ + +#define CONFIG_SBMAC_COALESCE + +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (2*HZ) + + +MODULE_AUTHOR("Mitch Lichtenberg (Broadcom Corp.)"); +MODULE_DESCRIPTION("Broadcom SiByte SOC GB Ethernet driver"); + +/* A few user-configurable values which may be modified when a driver + module is loaded. */ + +/* 1 normal messages, 0 quiet .. 7 verbose. */ +static int debug = 1; +module_param(debug, int, S_IRUGO); +MODULE_PARM_DESC(debug, "Debug messages"); + +#ifdef CONFIG_SBMAC_COALESCE +static int int_pktcnt_tx = 255; +module_param(int_pktcnt_tx, int, S_IRUGO); +MODULE_PARM_DESC(int_pktcnt_tx, "TX packet count"); + +static int int_timeout_tx = 255; +module_param(int_timeout_tx, int, S_IRUGO); +MODULE_PARM_DESC(int_timeout_tx, "TX timeout value"); + +static int int_pktcnt_rx = 64; +module_param(int_pktcnt_rx, int, S_IRUGO); +MODULE_PARM_DESC(int_pktcnt_rx, "RX packet count"); + +static int int_timeout_rx = 64; +module_param(int_timeout_rx, int, S_IRUGO); +MODULE_PARM_DESC(int_timeout_rx, "RX timeout value"); +#endif + +#include +#include +#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) +#include +#include +#define R_MAC_DMA_OODPKTLOST_RX R_MAC_DMA_OODPKTLOST +#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) +#include +#include +#else +#error invalid SiByte MAC configuration +#endif +#include +#include +#include + +#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) +#define UNIT_INT(n) (K_BCM1480_INT_MAC_0 + ((n) * 2)) +#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) +#define UNIT_INT(n) (K_INT_MAC_0 + (n)) +#else +#error invalid SiByte MAC configuration +#endif + +#ifdef K_INT_PHY +#define SBMAC_PHY_INT K_INT_PHY +#else +#define SBMAC_PHY_INT PHY_POLL +#endif + +/********************************************************************** + * Simple types + ********************************************************************* */ + +enum sbmac_speed { + sbmac_speed_none = 0, + sbmac_speed_10 = SPEED_10, + sbmac_speed_100 = SPEED_100, + sbmac_speed_1000 = SPEED_1000, +}; + +enum sbmac_duplex { + sbmac_duplex_none = -1, + sbmac_duplex_half = DUPLEX_HALF, + sbmac_duplex_full = DUPLEX_FULL, +}; + +enum sbmac_fc { + sbmac_fc_none, + sbmac_fc_disabled, + sbmac_fc_frame, + sbmac_fc_collision, + sbmac_fc_carrier, +}; + +enum sbmac_state { + sbmac_state_uninit, + sbmac_state_off, + sbmac_state_on, + sbmac_state_broken, +}; + + +/********************************************************************** + * Macros + ********************************************************************* */ + + +#define SBDMA_NEXTBUF(d,f) ((((d)->f+1) == (d)->sbdma_dscrtable_end) ? \ + (d)->sbdma_dscrtable : (d)->f+1) + + +#define NUMCACHEBLKS(x) (((x)+SMP_CACHE_BYTES-1)/SMP_CACHE_BYTES) + +#define SBMAC_MAX_TXDESCR 256 +#define SBMAC_MAX_RXDESCR 256 + +#define ETHER_ADDR_LEN 6 +#define ENET_PACKET_SIZE 1518 +/*#define ENET_PACKET_SIZE 9216 */ + +/********************************************************************** + * DMA Descriptor structure + ********************************************************************* */ + +struct sbdmadscr { + uint64_t dscr_a; + uint64_t dscr_b; +}; + +/********************************************************************** + * DMA Controller structure + ********************************************************************* */ + +struct sbmacdma { + + /* + * This stuff is used to identify the channel and the registers + * associated with it. + */ + struct sbmac_softc *sbdma_eth; /* back pointer to associated + MAC */ + int sbdma_channel; /* channel number */ + int sbdma_txdir; /* direction (1=transmit) */ + int sbdma_maxdescr; /* total # of descriptors + in ring */ +#ifdef CONFIG_SBMAC_COALESCE + int sbdma_int_pktcnt; + /* # descriptors rx/tx + before interrupt */ + int sbdma_int_timeout; + /* # usec rx/tx interrupt */ +#endif + void __iomem *sbdma_config0; /* DMA config register 0 */ + void __iomem *sbdma_config1; /* DMA config register 1 */ + void __iomem *sbdma_dscrbase; + /* descriptor base address */ + void __iomem *sbdma_dscrcnt; /* descriptor count register */ + void __iomem *sbdma_curdscr; /* current descriptor + address */ + void __iomem *sbdma_oodpktlost; + /* pkt drop (rx only) */ + + /* + * This stuff is for maintenance of the ring + */ + void *sbdma_dscrtable_unaligned; + struct sbdmadscr *sbdma_dscrtable; + /* base of descriptor table */ + struct sbdmadscr *sbdma_dscrtable_end; + /* end of descriptor table */ + struct sk_buff **sbdma_ctxtable; + /* context table, one + per descr */ + dma_addr_t sbdma_dscrtable_phys; + /* and also the phys addr */ + struct sbdmadscr *sbdma_addptr; /* next dscr for sw to add */ + struct sbdmadscr *sbdma_remptr; /* next dscr for sw + to remove */ +}; + + +/********************************************************************** + * Ethernet softc structure + ********************************************************************* */ + +struct sbmac_softc { + + /* + * Linux-specific things + */ + struct net_device *sbm_dev; /* pointer to linux device */ + struct napi_struct napi; + struct phy_device *phy_dev; /* the associated PHY device */ + struct mii_bus *mii_bus; /* the MII bus */ + int phy_irq[PHY_MAX_ADDR]; + spinlock_t sbm_lock; /* spin lock */ + int sbm_devflags; /* current device flags */ + + /* + * Controller-specific things + */ + void __iomem *sbm_base; /* MAC's base address */ + enum sbmac_state sbm_state; /* current state */ + + void __iomem *sbm_macenable; /* MAC Enable Register */ + void __iomem *sbm_maccfg; /* MAC Config Register */ + void __iomem *sbm_fifocfg; /* FIFO Config Register */ + void __iomem *sbm_framecfg; /* Frame Config Register */ + void __iomem *sbm_rxfilter; /* Receive Filter Register */ + void __iomem *sbm_isr; /* Interrupt Status Register */ + void __iomem *sbm_imr; /* Interrupt Mask Register */ + void __iomem *sbm_mdio; /* MDIO Register */ + + enum sbmac_speed sbm_speed; /* current speed */ + enum sbmac_duplex sbm_duplex; /* current duplex */ + enum sbmac_fc sbm_fc; /* cur. flow control setting */ + int sbm_pause; /* current pause setting */ + int sbm_link; /* current link state */ + + unsigned char sbm_hwaddr[ETHER_ADDR_LEN]; + + struct sbmacdma sbm_txdma; /* only channel 0 for now */ + struct sbmacdma sbm_rxdma; + int rx_hw_checksum; + int sbe_idx; +}; + + +/********************************************************************** + * Externs + ********************************************************************* */ + +/********************************************************************** + * Prototypes + ********************************************************************* */ + +static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, + int txrx, int maxdescr); +static void sbdma_channel_start(struct sbmacdma *d, int rxtx); +static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d, + struct sk_buff *m); +static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m); +static void sbdma_emptyring(struct sbmacdma *d); +static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d); +static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, + int work_to_do, int poll); +static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, + int poll); +static int sbmac_initctx(struct sbmac_softc *s); +static void sbmac_channel_start(struct sbmac_softc *s); +static void sbmac_channel_stop(struct sbmac_softc *s); +static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *, + enum sbmac_state); +static void sbmac_promiscuous_mode(struct sbmac_softc *sc, int onoff); +static uint64_t sbmac_addr2reg(unsigned char *ptr); +static irqreturn_t sbmac_intr(int irq, void *dev_instance); +static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev); +static void sbmac_setmulti(struct sbmac_softc *sc); +static int sbmac_init(struct platform_device *pldev, long long base); +static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed); +static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex, + enum sbmac_fc fc); + +static int sbmac_open(struct net_device *dev); +static void sbmac_tx_timeout (struct net_device *dev); +static void sbmac_set_rx_mode(struct net_device *dev); +static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static int sbmac_close(struct net_device *dev); +static int sbmac_poll(struct napi_struct *napi, int budget); + +static void sbmac_mii_poll(struct net_device *dev); +static int sbmac_mii_probe(struct net_device *dev); + +static void sbmac_mii_sync(void __iomem *sbm_mdio); +static void sbmac_mii_senddata(void __iomem *sbm_mdio, unsigned int data, + int bitcnt); +static int sbmac_mii_read(struct mii_bus *bus, int phyaddr, int regidx); +static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx, + u16 val); + + +/********************************************************************** + * Globals + ********************************************************************* */ + +static char sbmac_string[] = "sb1250-mac"; + +static char sbmac_mdio_string[] = "sb1250-mac-mdio"; + + +/********************************************************************** + * MDIO constants + ********************************************************************* */ + +#define MII_COMMAND_START 0x01 +#define MII_COMMAND_READ 0x02 +#define MII_COMMAND_WRITE 0x01 +#define MII_COMMAND_ACK 0x02 + +#define M_MAC_MDIO_DIR_OUTPUT 0 /* for clarity */ + +#define ENABLE 1 +#define DISABLE 0 + +/********************************************************************** + * SBMAC_MII_SYNC(sbm_mdio) + * + * Synchronize with the MII - send a pattern of bits to the MII + * that will guarantee that it is ready to accept a command. + * + * Input parameters: + * sbm_mdio - address of the MAC's MDIO register + * + * Return value: + * nothing + ********************************************************************* */ + +static void sbmac_mii_sync(void __iomem *sbm_mdio) +{ + int cnt; + uint64_t bits; + int mac_mdio_genc; + + mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; + + bits = M_MAC_MDIO_DIR_OUTPUT | M_MAC_MDIO_OUT; + + __raw_writeq(bits | mac_mdio_genc, sbm_mdio); + + for (cnt = 0; cnt < 32; cnt++) { + __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, sbm_mdio); + __raw_writeq(bits | mac_mdio_genc, sbm_mdio); + } +} + +/********************************************************************** + * SBMAC_MII_SENDDATA(sbm_mdio, data, bitcnt) + * + * Send some bits to the MII. The bits to be sent are right- + * justified in the 'data' parameter. + * + * Input parameters: + * sbm_mdio - address of the MAC's MDIO register + * data - data to send + * bitcnt - number of bits to send + ********************************************************************* */ + +static void sbmac_mii_senddata(void __iomem *sbm_mdio, unsigned int data, + int bitcnt) +{ + int i; + uint64_t bits; + unsigned int curmask; + int mac_mdio_genc; + + mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; + + bits = M_MAC_MDIO_DIR_OUTPUT; + __raw_writeq(bits | mac_mdio_genc, sbm_mdio); + + curmask = 1 << (bitcnt - 1); + + for (i = 0; i < bitcnt; i++) { + if (data & curmask) + bits |= M_MAC_MDIO_OUT; + else bits &= ~M_MAC_MDIO_OUT; + __raw_writeq(bits | mac_mdio_genc, sbm_mdio); + __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, sbm_mdio); + __raw_writeq(bits | mac_mdio_genc, sbm_mdio); + curmask >>= 1; + } +} + + + +/********************************************************************** + * SBMAC_MII_READ(bus, phyaddr, regidx) + * Read a PHY register. + * + * Input parameters: + * bus - MDIO bus handle + * phyaddr - PHY's address + * regnum - index of register to read + * + * Return value: + * value read, or 0xffff if an error occurred. + ********************************************************************* */ + +static int sbmac_mii_read(struct mii_bus *bus, int phyaddr, int regidx) +{ + struct sbmac_softc *sc = (struct sbmac_softc *)bus->priv; + void __iomem *sbm_mdio = sc->sbm_mdio; + int idx; + int error; + int regval; + int mac_mdio_genc; + + /* + * Synchronize ourselves so that the PHY knows the next + * thing coming down is a command + */ + sbmac_mii_sync(sbm_mdio); + + /* + * Send the data to the PHY. The sequence is + * a "start" command (2 bits) + * a "read" command (2 bits) + * the PHY addr (5 bits) + * the register index (5 bits) + */ + sbmac_mii_senddata(sbm_mdio, MII_COMMAND_START, 2); + sbmac_mii_senddata(sbm_mdio, MII_COMMAND_READ, 2); + sbmac_mii_senddata(sbm_mdio, phyaddr, 5); + sbmac_mii_senddata(sbm_mdio, regidx, 5); + + mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; + + /* + * Switch the port around without a clock transition. + */ + __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); + + /* + * Send out a clock pulse to signal we want the status + */ + __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, + sbm_mdio); + __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); + + /* + * If an error occurred, the PHY will signal '1' back + */ + error = __raw_readq(sbm_mdio) & M_MAC_MDIO_IN; + + /* + * Issue an 'idle' clock pulse, but keep the direction + * the same. + */ + __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, + sbm_mdio); + __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); + + regval = 0; + + for (idx = 0; idx < 16; idx++) { + regval <<= 1; + + if (error == 0) { + if (__raw_readq(sbm_mdio) & M_MAC_MDIO_IN) + regval |= 1; + } + + __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, + sbm_mdio); + __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); + } + + /* Switch back to output */ + __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, sbm_mdio); + + if (error == 0) + return regval; + return 0xffff; +} + + +/********************************************************************** + * SBMAC_MII_WRITE(bus, phyaddr, regidx, regval) + * + * Write a value to a PHY register. + * + * Input parameters: + * bus - MDIO bus handle + * phyaddr - PHY to use + * regidx - register within the PHY + * regval - data to write to register + * + * Return value: + * 0 for success + ********************************************************************* */ + +static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx, + u16 regval) +{ + struct sbmac_softc *sc = (struct sbmac_softc *)bus->priv; + void __iomem *sbm_mdio = sc->sbm_mdio; + int mac_mdio_genc; + + sbmac_mii_sync(sbm_mdio); + + sbmac_mii_senddata(sbm_mdio, MII_COMMAND_START, 2); + sbmac_mii_senddata(sbm_mdio, MII_COMMAND_WRITE, 2); + sbmac_mii_senddata(sbm_mdio, phyaddr, 5); + sbmac_mii_senddata(sbm_mdio, regidx, 5); + sbmac_mii_senddata(sbm_mdio, MII_COMMAND_ACK, 2); + sbmac_mii_senddata(sbm_mdio, regval, 16); + + mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; + + __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, sbm_mdio); + + return 0; +} + + + +/********************************************************************** + * SBDMA_INITCTX(d,s,chan,txrx,maxdescr) + * + * Initialize a DMA channel context. Since there are potentially + * eight DMA channels per MAC, it's nice to do this in a standard + * way. + * + * Input parameters: + * d - struct sbmacdma (DMA channel context) + * s - struct sbmac_softc (pointer to a MAC) + * chan - channel number (0..1 right now) + * txrx - Identifies DMA_TX or DMA_RX for channel direction + * maxdescr - number of descriptors + * + * Return value: + * nothing + ********************************************************************* */ + +static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, + int txrx, int maxdescr) +{ +#ifdef CONFIG_SBMAC_COALESCE + int int_pktcnt, int_timeout; +#endif + + /* + * Save away interesting stuff in the structure + */ + + d->sbdma_eth = s; + d->sbdma_channel = chan; + d->sbdma_txdir = txrx; + +#if 0 + /* RMON clearing */ + s->sbe_idx =(s->sbm_base - A_MAC_BASE_0)/MAC_SPACING; +#endif + + __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_BYTES); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_COLLISIONS); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_LATE_COL); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_EX_COL); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_FCS_ERROR); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_ABORT); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_BAD); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_GOOD); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_RUNT); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_OVERSIZE); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BYTES); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_MCAST); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BCAST); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BAD); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_GOOD); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_RUNT); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_OVERSIZE); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_FCS_ERROR); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_LENGTH_ERROR); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_CODE_ERROR); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_ALIGN_ERROR); + + /* + * initialize register pointers + */ + + d->sbdma_config0 = + s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG0); + d->sbdma_config1 = + s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG1); + d->sbdma_dscrbase = + s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_BASE); + d->sbdma_dscrcnt = + s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_CNT); + d->sbdma_curdscr = + s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CUR_DSCRADDR); + if (d->sbdma_txdir) + d->sbdma_oodpktlost = NULL; + else + d->sbdma_oodpktlost = + s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_OODPKTLOST_RX); + + /* + * Allocate memory for the ring + */ + + d->sbdma_maxdescr = maxdescr; + + d->sbdma_dscrtable_unaligned = kcalloc(d->sbdma_maxdescr + 1, + sizeof(*d->sbdma_dscrtable), + GFP_KERNEL); + + /* + * The descriptor table must be aligned to at least 16 bytes or the + * MAC will corrupt it. + */ + d->sbdma_dscrtable = (struct sbdmadscr *) + ALIGN((unsigned long)d->sbdma_dscrtable_unaligned, + sizeof(*d->sbdma_dscrtable)); + + d->sbdma_dscrtable_end = d->sbdma_dscrtable + d->sbdma_maxdescr; + + d->sbdma_dscrtable_phys = virt_to_phys(d->sbdma_dscrtable); + + /* + * And context table + */ + + d->sbdma_ctxtable = kcalloc(d->sbdma_maxdescr, + sizeof(*d->sbdma_ctxtable), GFP_KERNEL); + +#ifdef CONFIG_SBMAC_COALESCE + /* + * Setup Rx/Tx DMA coalescing defaults + */ + + int_pktcnt = (txrx == DMA_TX) ? int_pktcnt_tx : int_pktcnt_rx; + if ( int_pktcnt ) { + d->sbdma_int_pktcnt = int_pktcnt; + } else { + d->sbdma_int_pktcnt = 1; + } + + int_timeout = (txrx == DMA_TX) ? int_timeout_tx : int_timeout_rx; + if ( int_timeout ) { + d->sbdma_int_timeout = int_timeout; + } else { + d->sbdma_int_timeout = 0; + } +#endif + +} + +/********************************************************************** + * SBDMA_CHANNEL_START(d) + * + * Initialize the hardware registers for a DMA channel. + * + * Input parameters: + * d - DMA channel to init (context must be previously init'd + * rxtx - DMA_RX or DMA_TX depending on what type of channel + * + * Return value: + * nothing + ********************************************************************* */ + +static void sbdma_channel_start(struct sbmacdma *d, int rxtx) +{ + /* + * Turn on the DMA channel + */ + +#ifdef CONFIG_SBMAC_COALESCE + __raw_writeq(V_DMA_INT_TIMEOUT(d->sbdma_int_timeout) | + 0, d->sbdma_config1); + __raw_writeq(M_DMA_EOP_INT_EN | + V_DMA_RINGSZ(d->sbdma_maxdescr) | + V_DMA_INT_PKTCNT(d->sbdma_int_pktcnt) | + 0, d->sbdma_config0); +#else + __raw_writeq(0, d->sbdma_config1); + __raw_writeq(V_DMA_RINGSZ(d->sbdma_maxdescr) | + 0, d->sbdma_config0); +#endif + + __raw_writeq(d->sbdma_dscrtable_phys, d->sbdma_dscrbase); + + /* + * Initialize ring pointers + */ + + d->sbdma_addptr = d->sbdma_dscrtable; + d->sbdma_remptr = d->sbdma_dscrtable; +} + +/********************************************************************** + * SBDMA_CHANNEL_STOP(d) + * + * Initialize the hardware registers for a DMA channel. + * + * Input parameters: + * d - DMA channel to init (context must be previously init'd + * + * Return value: + * nothing + ********************************************************************* */ + +static void sbdma_channel_stop(struct sbmacdma *d) +{ + /* + * Turn off the DMA channel + */ + + __raw_writeq(0, d->sbdma_config1); + + __raw_writeq(0, d->sbdma_dscrbase); + + __raw_writeq(0, d->sbdma_config0); + + /* + * Zero ring pointers + */ + + d->sbdma_addptr = NULL; + d->sbdma_remptr = NULL; +} + +static inline void sbdma_align_skb(struct sk_buff *skb, + unsigned int power2, unsigned int offset) +{ + unsigned char *addr = skb->data; + unsigned char *newaddr = PTR_ALIGN(addr, power2); + + skb_reserve(skb, newaddr - addr + offset); +} + + +/********************************************************************** + * SBDMA_ADD_RCVBUFFER(d,sb) + * + * Add a buffer to the specified DMA channel. For receive channels, + * this queues a buffer for inbound packets. + * + * Input parameters: + * sc - softc structure + * d - DMA channel descriptor + * sb - sk_buff to add, or NULL if we should allocate one + * + * Return value: + * 0 if buffer could not be added (ring is full) + * 1 if buffer added successfully + ********************************************************************* */ + + +static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d, + struct sk_buff *sb) +{ + struct net_device *dev = sc->sbm_dev; + struct sbdmadscr *dsc; + struct sbdmadscr *nextdsc; + struct sk_buff *sb_new = NULL; + int pktsize = ENET_PACKET_SIZE; + + /* get pointer to our current place in the ring */ + + dsc = d->sbdma_addptr; + nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr); + + /* + * figure out if the ring is full - if the next descriptor + * is the same as the one that we're going to remove from + * the ring, the ring is full + */ + + if (nextdsc == d->sbdma_remptr) { + return -ENOSPC; + } + + /* + * Allocate a sk_buff if we don't already have one. + * If we do have an sk_buff, reset it so that it's empty. + * + * Note: sk_buffs don't seem to be guaranteed to have any sort + * of alignment when they are allocated. Therefore, allocate enough + * extra space to make sure that: + * + * 1. the data does not start in the middle of a cache line. + * 2. The data does not end in the middle of a cache line + * 3. The buffer can be aligned such that the IP addresses are + * naturally aligned. + * + * Remember, the SOCs MAC writes whole cache lines at a time, + * without reading the old contents first. So, if the sk_buff's + * data portion starts in the middle of a cache line, the SOC + * DMA will trash the beginning (and ending) portions. + */ + + if (sb == NULL) { + sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE + + SMP_CACHE_BYTES * 2 + + NET_IP_ALIGN); + if (sb_new == NULL) { + pr_info("%s: sk_buff allocation failed\n", + d->sbdma_eth->sbm_dev->name); + return -ENOBUFS; + } + + sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN); + } + else { + sb_new = sb; + /* + * nothing special to reinit buffer, it's already aligned + * and sb->data already points to a good place. + */ + } + + /* + * fill in the descriptor + */ + +#ifdef CONFIG_SBMAC_COALESCE + /* + * Do not interrupt per DMA transfer. + */ + dsc->dscr_a = virt_to_phys(sb_new->data) | + V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | 0; +#else + dsc->dscr_a = virt_to_phys(sb_new->data) | + V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | + M_DMA_DSCRA_INTERRUPT; +#endif + + /* receiving: no options */ + dsc->dscr_b = 0; + + /* + * fill in the context + */ + + d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb_new; + + /* + * point at next packet + */ + + d->sbdma_addptr = nextdsc; + + /* + * Give the buffer to the DMA engine. + */ + + __raw_writeq(1, d->sbdma_dscrcnt); + + return 0; /* we did it */ +} + +/********************************************************************** + * SBDMA_ADD_TXBUFFER(d,sb) + * + * Add a transmit buffer to the specified DMA channel, causing a + * transmit to start. + * + * Input parameters: + * d - DMA channel descriptor + * sb - sk_buff to add + * + * Return value: + * 0 transmit queued successfully + * otherwise error code + ********************************************************************* */ + + +static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *sb) +{ + struct sbdmadscr *dsc; + struct sbdmadscr *nextdsc; + uint64_t phys; + uint64_t ncb; + int length; + + /* get pointer to our current place in the ring */ + + dsc = d->sbdma_addptr; + nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr); + + /* + * figure out if the ring is full - if the next descriptor + * is the same as the one that we're going to remove from + * the ring, the ring is full + */ + + if (nextdsc == d->sbdma_remptr) { + return -ENOSPC; + } + + /* + * Under Linux, it's not necessary to copy/coalesce buffers + * like it is on NetBSD. We think they're all contiguous, + * but that may not be true for GBE. + */ + + length = sb->len; + + /* + * fill in the descriptor. Note that the number of cache + * blocks in the descriptor is the number of blocks + * *spanned*, so we need to add in the offset (if any) + * while doing the calculation. + */ + + phys = virt_to_phys(sb->data); + ncb = NUMCACHEBLKS(length+(phys & (SMP_CACHE_BYTES - 1))); + + dsc->dscr_a = phys | + V_DMA_DSCRA_A_SIZE(ncb) | +#ifndef CONFIG_SBMAC_COALESCE + M_DMA_DSCRA_INTERRUPT | +#endif + M_DMA_ETHTX_SOP; + + /* transmitting: set outbound options and length */ + + dsc->dscr_b = V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) | + V_DMA_DSCRB_PKT_SIZE(length); + + /* + * fill in the context + */ + + d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb; + + /* + * point at next packet + */ + + d->sbdma_addptr = nextdsc; + + /* + * Give the buffer to the DMA engine. + */ + + __raw_writeq(1, d->sbdma_dscrcnt); + + return 0; /* we did it */ +} + + + + +/********************************************************************** + * SBDMA_EMPTYRING(d) + * + * Free all allocated sk_buffs on the specified DMA channel; + * + * Input parameters: + * d - DMA channel + * + * Return value: + * nothing + ********************************************************************* */ + +static void sbdma_emptyring(struct sbmacdma *d) +{ + int idx; + struct sk_buff *sb; + + for (idx = 0; idx < d->sbdma_maxdescr; idx++) { + sb = d->sbdma_ctxtable[idx]; + if (sb) { + dev_kfree_skb(sb); + d->sbdma_ctxtable[idx] = NULL; + } + } +} + + +/********************************************************************** + * SBDMA_FILLRING(d) + * + * Fill the specified DMA channel (must be receive channel) + * with sk_buffs + * + * Input parameters: + * sc - softc structure + * d - DMA channel + * + * Return value: + * nothing + ********************************************************************* */ + +static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d) +{ + int idx; + + for (idx = 0; idx < SBMAC_MAX_RXDESCR - 1; idx++) { + if (sbdma_add_rcvbuffer(sc, d, NULL) != 0) + break; + } +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void sbmac_netpoll(struct net_device *netdev) +{ + struct sbmac_softc *sc = netdev_priv(netdev); + int irq = sc->sbm_dev->irq; + + __raw_writeq(0, sc->sbm_imr); + + sbmac_intr(irq, netdev); + +#ifdef CONFIG_SBMAC_COALESCE + __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | + ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), + sc->sbm_imr); +#else + __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) | + (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr); +#endif +} +#endif + +/********************************************************************** + * SBDMA_RX_PROCESS(sc,d,work_to_do,poll) + * + * Process "completed" receive buffers on the specified DMA channel. + * + * Input parameters: + * sc - softc structure + * d - DMA channel context + * work_to_do - no. of packets to process before enabling interrupt + * again (for NAPI) + * poll - 1: using polling (for NAPI) + * + * Return value: + * nothing + ********************************************************************* */ + +static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, + int work_to_do, int poll) +{ + struct net_device *dev = sc->sbm_dev; + int curidx; + int hwidx; + struct sbdmadscr *dsc; + struct sk_buff *sb; + int len; + int work_done = 0; + int dropped = 0; + + prefetch(d); + +again: + /* Check if the HW dropped any frames */ + dev->stats.rx_fifo_errors + += __raw_readq(sc->sbm_rxdma.sbdma_oodpktlost) & 0xffff; + __raw_writeq(0, sc->sbm_rxdma.sbdma_oodpktlost); + + while (work_to_do-- > 0) { + /* + * figure out where we are (as an index) and where + * the hardware is (also as an index) + * + * This could be done faster if (for example) the + * descriptor table was page-aligned and contiguous in + * both virtual and physical memory -- you could then + * just compare the low-order bits of the virtual address + * (sbdma_remptr) and the physical address (sbdma_curdscr CSR) + */ + + dsc = d->sbdma_remptr; + curidx = dsc - d->sbdma_dscrtable; + + prefetch(dsc); + prefetch(&d->sbdma_ctxtable[curidx]); + + hwidx = ((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) - + d->sbdma_dscrtable_phys) / + sizeof(*d->sbdma_dscrtable); + + /* + * If they're the same, that means we've processed all + * of the descriptors up to (but not including) the one that + * the hardware is working on right now. + */ + + if (curidx == hwidx) + goto done; + + /* + * Otherwise, get the packet's sk_buff ptr back + */ + + sb = d->sbdma_ctxtable[curidx]; + d->sbdma_ctxtable[curidx] = NULL; + + len = (int)G_DMA_DSCRB_PKT_SIZE(dsc->dscr_b) - 4; + + /* + * Check packet status. If good, process it. + * If not, silently drop it and put it back on the + * receive ring. + */ + + if (likely (!(dsc->dscr_a & M_DMA_ETHRX_BAD))) { + + /* + * Add a new buffer to replace the old one. If we fail + * to allocate a buffer, we're going to drop this + * packet and put it right back on the receive ring. + */ + + if (unlikely(sbdma_add_rcvbuffer(sc, d, NULL) == + -ENOBUFS)) { + dev->stats.rx_dropped++; + /* Re-add old buffer */ + sbdma_add_rcvbuffer(sc, d, sb); + /* No point in continuing at the moment */ + printk(KERN_ERR "dropped packet (1)\n"); + d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); + goto done; + } else { + /* + * Set length into the packet + */ + skb_put(sb,len); + + /* + * Buffer has been replaced on the + * receive ring. Pass the buffer to + * the kernel + */ + sb->protocol = eth_type_trans(sb,d->sbdma_eth->sbm_dev); + /* Check hw IPv4/TCP checksum if supported */ + if (sc->rx_hw_checksum == ENABLE) { + if (!((dsc->dscr_a) & M_DMA_ETHRX_BADIP4CS) && + !((dsc->dscr_a) & M_DMA_ETHRX_BADTCPCS)) { + sb->ip_summed = CHECKSUM_UNNECESSARY; + /* don't need to set sb->csum */ + } else { + skb_checksum_none_assert(sb); + } + } + prefetch(sb->data); + prefetch((const void *)(((char *)sb->data)+32)); + if (poll) + dropped = netif_receive_skb(sb); + else + dropped = netif_rx(sb); + + if (dropped == NET_RX_DROP) { + dev->stats.rx_dropped++; + d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); + goto done; + } + else { + dev->stats.rx_bytes += len; + dev->stats.rx_packets++; + } + } + } else { + /* + * Packet was mangled somehow. Just drop it and + * put it back on the receive ring. + */ + dev->stats.rx_errors++; + sbdma_add_rcvbuffer(sc, d, sb); + } + + + /* + * .. and advance to the next buffer. + */ + + d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); + work_done++; + } + if (!poll) { + work_to_do = 32; + goto again; /* collect fifo drop statistics again */ + } +done: + return work_done; +} + +/********************************************************************** + * SBDMA_TX_PROCESS(sc,d) + * + * Process "completed" transmit buffers on the specified DMA channel. + * This is normally called within the interrupt service routine. + * Note that this isn't really ideal for priority channels, since + * it processes all of the packets on a given channel before + * returning. + * + * Input parameters: + * sc - softc structure + * d - DMA channel context + * poll - 1: using polling (for NAPI) + * + * Return value: + * nothing + ********************************************************************* */ + +static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, + int poll) +{ + struct net_device *dev = sc->sbm_dev; + int curidx; + int hwidx; + struct sbdmadscr *dsc; + struct sk_buff *sb; + unsigned long flags; + int packets_handled = 0; + + spin_lock_irqsave(&(sc->sbm_lock), flags); + + if (d->sbdma_remptr == d->sbdma_addptr) + goto end_unlock; + + hwidx = ((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) - + d->sbdma_dscrtable_phys) / sizeof(*d->sbdma_dscrtable); + + for (;;) { + /* + * figure out where we are (as an index) and where + * the hardware is (also as an index) + * + * This could be done faster if (for example) the + * descriptor table was page-aligned and contiguous in + * both virtual and physical memory -- you could then + * just compare the low-order bits of the virtual address + * (sbdma_remptr) and the physical address (sbdma_curdscr CSR) + */ + + curidx = d->sbdma_remptr - d->sbdma_dscrtable; + + /* + * If they're the same, that means we've processed all + * of the descriptors up to (but not including) the one that + * the hardware is working on right now. + */ + + if (curidx == hwidx) + break; + + /* + * Otherwise, get the packet's sk_buff ptr back + */ + + dsc = &(d->sbdma_dscrtable[curidx]); + sb = d->sbdma_ctxtable[curidx]; + d->sbdma_ctxtable[curidx] = NULL; + + /* + * Stats + */ + + dev->stats.tx_bytes += sb->len; + dev->stats.tx_packets++; + + /* + * for transmits, we just free buffers. + */ + + dev_kfree_skb_irq(sb); + + /* + * .. and advance to the next buffer. + */ + + d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); + + packets_handled++; + + } + + /* + * Decide if we should wake up the protocol or not. + * Other drivers seem to do this when we reach a low + * watermark on the transmit queue. + */ + + if (packets_handled) + netif_wake_queue(d->sbdma_eth->sbm_dev); + +end_unlock: + spin_unlock_irqrestore(&(sc->sbm_lock), flags); + +} + + + +/********************************************************************** + * SBMAC_INITCTX(s) + * + * Initialize an Ethernet context structure - this is called + * once per MAC on the 1250. Memory is allocated here, so don't + * call it again from inside the ioctl routines that bring the + * interface up/down + * + * Input parameters: + * s - sbmac context structure + * + * Return value: + * 0 + ********************************************************************* */ + +static int sbmac_initctx(struct sbmac_softc *s) +{ + + /* + * figure out the addresses of some ports + */ + + s->sbm_macenable = s->sbm_base + R_MAC_ENABLE; + s->sbm_maccfg = s->sbm_base + R_MAC_CFG; + s->sbm_fifocfg = s->sbm_base + R_MAC_THRSH_CFG; + s->sbm_framecfg = s->sbm_base + R_MAC_FRAMECFG; + s->sbm_rxfilter = s->sbm_base + R_MAC_ADFILTER_CFG; + s->sbm_isr = s->sbm_base + R_MAC_STATUS; + s->sbm_imr = s->sbm_base + R_MAC_INT_MASK; + s->sbm_mdio = s->sbm_base + R_MAC_MDIO; + + /* + * Initialize the DMA channels. Right now, only one per MAC is used + * Note: Only do this _once_, as it allocates memory from the kernel! + */ + + sbdma_initctx(&(s->sbm_txdma),s,0,DMA_TX,SBMAC_MAX_TXDESCR); + sbdma_initctx(&(s->sbm_rxdma),s,0,DMA_RX,SBMAC_MAX_RXDESCR); + + /* + * initial state is OFF + */ + + s->sbm_state = sbmac_state_off; + + return 0; +} + + +static void sbdma_uninitctx(struct sbmacdma *d) +{ + if (d->sbdma_dscrtable_unaligned) { + kfree(d->sbdma_dscrtable_unaligned); + d->sbdma_dscrtable_unaligned = d->sbdma_dscrtable = NULL; + } + + if (d->sbdma_ctxtable) { + kfree(d->sbdma_ctxtable); + d->sbdma_ctxtable = NULL; + } +} + + +static void sbmac_uninitctx(struct sbmac_softc *sc) +{ + sbdma_uninitctx(&(sc->sbm_txdma)); + sbdma_uninitctx(&(sc->sbm_rxdma)); +} + + +/********************************************************************** + * SBMAC_CHANNEL_START(s) + * + * Start packet processing on this MAC. + * + * Input parameters: + * s - sbmac structure + * + * Return value: + * nothing + ********************************************************************* */ + +static void sbmac_channel_start(struct sbmac_softc *s) +{ + uint64_t reg; + void __iomem *port; + uint64_t cfg,fifo,framecfg; + int idx, th_value; + + /* + * Don't do this if running + */ + + if (s->sbm_state == sbmac_state_on) + return; + + /* + * Bring the controller out of reset, but leave it off. + */ + + __raw_writeq(0, s->sbm_macenable); + + /* + * Ignore all received packets + */ + + __raw_writeq(0, s->sbm_rxfilter); + + /* + * Calculate values for various control registers. + */ + + cfg = M_MAC_RETRY_EN | + M_MAC_TX_HOLD_SOP_EN | + V_MAC_TX_PAUSE_CNT_16K | + M_MAC_AP_STAT_EN | + M_MAC_FAST_SYNC | + M_MAC_SS_EN | + 0; + + /* + * Be sure that RD_THRSH+WR_THRSH <= 32 for pass1 pars + * and make sure that RD_THRSH + WR_THRSH <=128 for pass2 and above + * Use a larger RD_THRSH for gigabit + */ + if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2) + th_value = 28; + else + th_value = 64; + + fifo = V_MAC_TX_WR_THRSH(4) | /* Must be '4' or '8' */ + ((s->sbm_speed == sbmac_speed_1000) + ? V_MAC_TX_RD_THRSH(th_value) : V_MAC_TX_RD_THRSH(4)) | + V_MAC_TX_RL_THRSH(4) | + V_MAC_RX_PL_THRSH(4) | + V_MAC_RX_RD_THRSH(4) | /* Must be '4' */ + V_MAC_RX_RL_THRSH(8) | + 0; + + framecfg = V_MAC_MIN_FRAMESZ_DEFAULT | + V_MAC_MAX_FRAMESZ_DEFAULT | + V_MAC_BACKOFF_SEL(1); + + /* + * Clear out the hash address map + */ + + port = s->sbm_base + R_MAC_HASH_BASE; + for (idx = 0; idx < MAC_HASH_COUNT; idx++) { + __raw_writeq(0, port); + port += sizeof(uint64_t); + } + + /* + * Clear out the exact-match table + */ + + port = s->sbm_base + R_MAC_ADDR_BASE; + for (idx = 0; idx < MAC_ADDR_COUNT; idx++) { + __raw_writeq(0, port); + port += sizeof(uint64_t); + } + + /* + * Clear out the DMA Channel mapping table registers + */ + + port = s->sbm_base + R_MAC_CHUP0_BASE; + for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) { + __raw_writeq(0, port); + port += sizeof(uint64_t); + } + + + port = s->sbm_base + R_MAC_CHLO0_BASE; + for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) { + __raw_writeq(0, port); + port += sizeof(uint64_t); + } + + /* + * Program the hardware address. It goes into the hardware-address + * register as well as the first filter register. + */ + + reg = sbmac_addr2reg(s->sbm_hwaddr); + + port = s->sbm_base + R_MAC_ADDR_BASE; + __raw_writeq(reg, port); + port = s->sbm_base + R_MAC_ETHERNET_ADDR; + +#ifdef CONFIG_SB1_PASS_1_WORKAROUNDS + /* + * Pass1 SOCs do not receive packets addressed to the + * destination address in the R_MAC_ETHERNET_ADDR register. + * Set the value to zero. + */ + __raw_writeq(0, port); +#else + __raw_writeq(reg, port); +#endif + + /* + * Set the receive filter for no packets, and write values + * to the various config registers + */ + + __raw_writeq(0, s->sbm_rxfilter); + __raw_writeq(0, s->sbm_imr); + __raw_writeq(framecfg, s->sbm_framecfg); + __raw_writeq(fifo, s->sbm_fifocfg); + __raw_writeq(cfg, s->sbm_maccfg); + + /* + * Initialize DMA channels (rings should be ok now) + */ + + sbdma_channel_start(&(s->sbm_rxdma), DMA_RX); + sbdma_channel_start(&(s->sbm_txdma), DMA_TX); + + /* + * Configure the speed, duplex, and flow control + */ + + sbmac_set_speed(s,s->sbm_speed); + sbmac_set_duplex(s,s->sbm_duplex,s->sbm_fc); + + /* + * Fill the receive ring + */ + + sbdma_fillring(s, &(s->sbm_rxdma)); + + /* + * Turn on the rest of the bits in the enable register + */ + +#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) + __raw_writeq(M_MAC_RXDMA_EN0 | + M_MAC_TXDMA_EN0, s->sbm_macenable); +#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) + __raw_writeq(M_MAC_RXDMA_EN0 | + M_MAC_TXDMA_EN0 | + M_MAC_RX_ENABLE | + M_MAC_TX_ENABLE, s->sbm_macenable); +#else +#error invalid SiByte MAC configuration +#endif + +#ifdef CONFIG_SBMAC_COALESCE + __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | + ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), s->sbm_imr); +#else + __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) | + (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), s->sbm_imr); +#endif + + /* + * Enable receiving unicasts and broadcasts + */ + + __raw_writeq(M_MAC_UCAST_EN | M_MAC_BCAST_EN, s->sbm_rxfilter); + + /* + * we're running now. + */ + + s->sbm_state = sbmac_state_on; + + /* + * Program multicast addresses + */ + + sbmac_setmulti(s); + + /* + * If channel was in promiscuous mode before, turn that on + */ + + if (s->sbm_devflags & IFF_PROMISC) { + sbmac_promiscuous_mode(s,1); + } + +} + + +/********************************************************************** + * SBMAC_CHANNEL_STOP(s) + * + * Stop packet processing on this MAC. + * + * Input parameters: + * s - sbmac structure + * + * Return value: + * nothing + ********************************************************************* */ + +static void sbmac_channel_stop(struct sbmac_softc *s) +{ + /* don't do this if already stopped */ + + if (s->sbm_state == sbmac_state_off) + return; + + /* don't accept any packets, disable all interrupts */ + + __raw_writeq(0, s->sbm_rxfilter); + __raw_writeq(0, s->sbm_imr); + + /* Turn off ticker */ + + /* XXX */ + + /* turn off receiver and transmitter */ + + __raw_writeq(0, s->sbm_macenable); + + /* We're stopped now. */ + + s->sbm_state = sbmac_state_off; + + /* + * Stop DMA channels (rings should be ok now) + */ + + sbdma_channel_stop(&(s->sbm_rxdma)); + sbdma_channel_stop(&(s->sbm_txdma)); + + /* Empty the receive and transmit rings */ + + sbdma_emptyring(&(s->sbm_rxdma)); + sbdma_emptyring(&(s->sbm_txdma)); + +} + +/********************************************************************** + * SBMAC_SET_CHANNEL_STATE(state) + * + * Set the channel's state ON or OFF + * + * Input parameters: + * state - new state + * + * Return value: + * old state + ********************************************************************* */ +static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *sc, + enum sbmac_state state) +{ + enum sbmac_state oldstate = sc->sbm_state; + + /* + * If same as previous state, return + */ + + if (state == oldstate) { + return oldstate; + } + + /* + * If new state is ON, turn channel on + */ + + if (state == sbmac_state_on) { + sbmac_channel_start(sc); + } + else { + sbmac_channel_stop(sc); + } + + /* + * Return previous state + */ + + return oldstate; +} + + +/********************************************************************** + * SBMAC_PROMISCUOUS_MODE(sc,onoff) + * + * Turn on or off promiscuous mode + * + * Input parameters: + * sc - softc + * onoff - 1 to turn on, 0 to turn off + * + * Return value: + * nothing + ********************************************************************* */ + +static void sbmac_promiscuous_mode(struct sbmac_softc *sc,int onoff) +{ + uint64_t reg; + + if (sc->sbm_state != sbmac_state_on) + return; + + if (onoff) { + reg = __raw_readq(sc->sbm_rxfilter); + reg |= M_MAC_ALLPKT_EN; + __raw_writeq(reg, sc->sbm_rxfilter); + } + else { + reg = __raw_readq(sc->sbm_rxfilter); + reg &= ~M_MAC_ALLPKT_EN; + __raw_writeq(reg, sc->sbm_rxfilter); + } +} + +/********************************************************************** + * SBMAC_SETIPHDR_OFFSET(sc,onoff) + * + * Set the iphdr offset as 15 assuming ethernet encapsulation + * + * Input parameters: + * sc - softc + * + * Return value: + * nothing + ********************************************************************* */ + +static void sbmac_set_iphdr_offset(struct sbmac_softc *sc) +{ + uint64_t reg; + + /* Hard code the off set to 15 for now */ + reg = __raw_readq(sc->sbm_rxfilter); + reg &= ~M_MAC_IPHDR_OFFSET | V_MAC_IPHDR_OFFSET(15); + __raw_writeq(reg, sc->sbm_rxfilter); + + /* BCM1250 pass1 didn't have hardware checksum. Everything + later does. */ + if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2) { + sc->rx_hw_checksum = DISABLE; + } else { + sc->rx_hw_checksum = ENABLE; + } +} + + +/********************************************************************** + * SBMAC_ADDR2REG(ptr) + * + * Convert six bytes into the 64-bit register value that + * we typically write into the SBMAC's address/mcast registers + * + * Input parameters: + * ptr - pointer to 6 bytes + * + * Return value: + * register value + ********************************************************************* */ + +static uint64_t sbmac_addr2reg(unsigned char *ptr) +{ + uint64_t reg = 0; + + ptr += 6; + + reg |= (uint64_t) *(--ptr); + reg <<= 8; + reg |= (uint64_t) *(--ptr); + reg <<= 8; + reg |= (uint64_t) *(--ptr); + reg <<= 8; + reg |= (uint64_t) *(--ptr); + reg <<= 8; + reg |= (uint64_t) *(--ptr); + reg <<= 8; + reg |= (uint64_t) *(--ptr); + + return reg; +} + + +/********************************************************************** + * SBMAC_SET_SPEED(s,speed) + * + * Configure LAN speed for the specified MAC. + * Warning: must be called when MAC is off! + * + * Input parameters: + * s - sbmac structure + * speed - speed to set MAC to (see enum sbmac_speed) + * + * Return value: + * 1 if successful + * 0 indicates invalid parameters + ********************************************************************* */ + +static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed) +{ + uint64_t cfg; + uint64_t framecfg; + + /* + * Save new current values + */ + + s->sbm_speed = speed; + + if (s->sbm_state == sbmac_state_on) + return 0; /* save for next restart */ + + /* + * Read current register values + */ + + cfg = __raw_readq(s->sbm_maccfg); + framecfg = __raw_readq(s->sbm_framecfg); + + /* + * Mask out the stuff we want to change + */ + + cfg &= ~(M_MAC_BURST_EN | M_MAC_SPEED_SEL); + framecfg &= ~(M_MAC_IFG_RX | M_MAC_IFG_TX | M_MAC_IFG_THRSH | + M_MAC_SLOT_SIZE); + + /* + * Now add in the new bits + */ + + switch (speed) { + case sbmac_speed_10: + framecfg |= V_MAC_IFG_RX_10 | + V_MAC_IFG_TX_10 | + K_MAC_IFG_THRSH_10 | + V_MAC_SLOT_SIZE_10; + cfg |= V_MAC_SPEED_SEL_10MBPS; + break; + + case sbmac_speed_100: + framecfg |= V_MAC_IFG_RX_100 | + V_MAC_IFG_TX_100 | + V_MAC_IFG_THRSH_100 | + V_MAC_SLOT_SIZE_100; + cfg |= V_MAC_SPEED_SEL_100MBPS ; + break; + + case sbmac_speed_1000: + framecfg |= V_MAC_IFG_RX_1000 | + V_MAC_IFG_TX_1000 | + V_MAC_IFG_THRSH_1000 | + V_MAC_SLOT_SIZE_1000; + cfg |= V_MAC_SPEED_SEL_1000MBPS | M_MAC_BURST_EN; + break; + + default: + return 0; + } + + /* + * Send the bits back to the hardware + */ + + __raw_writeq(framecfg, s->sbm_framecfg); + __raw_writeq(cfg, s->sbm_maccfg); + + return 1; +} + +/********************************************************************** + * SBMAC_SET_DUPLEX(s,duplex,fc) + * + * Set Ethernet duplex and flow control options for this MAC + * Warning: must be called when MAC is off! + * + * Input parameters: + * s - sbmac structure + * duplex - duplex setting (see enum sbmac_duplex) + * fc - flow control setting (see enum sbmac_fc) + * + * Return value: + * 1 if ok + * 0 if an invalid parameter combination was specified + ********************************************************************* */ + +static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex, + enum sbmac_fc fc) +{ + uint64_t cfg; + + /* + * Save new current values + */ + + s->sbm_duplex = duplex; + s->sbm_fc = fc; + + if (s->sbm_state == sbmac_state_on) + return 0; /* save for next restart */ + + /* + * Read current register values + */ + + cfg = __raw_readq(s->sbm_maccfg); + + /* + * Mask off the stuff we're about to change + */ + + cfg &= ~(M_MAC_FC_SEL | M_MAC_FC_CMD | M_MAC_HDX_EN); + + + switch (duplex) { + case sbmac_duplex_half: + switch (fc) { + case sbmac_fc_disabled: + cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_DISABLED; + break; + + case sbmac_fc_collision: + cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENABLED; + break; + + case sbmac_fc_carrier: + cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENAB_FALSECARR; + break; + + case sbmac_fc_frame: /* not valid in half duplex */ + default: /* invalid selection */ + return 0; + } + break; + + case sbmac_duplex_full: + switch (fc) { + case sbmac_fc_disabled: + cfg |= V_MAC_FC_CMD_DISABLED; + break; + + case sbmac_fc_frame: + cfg |= V_MAC_FC_CMD_ENABLED; + break; + + case sbmac_fc_collision: /* not valid in full duplex */ + case sbmac_fc_carrier: /* not valid in full duplex */ + default: + return 0; + } + break; + default: + return 0; + } + + /* + * Send the bits back to the hardware + */ + + __raw_writeq(cfg, s->sbm_maccfg); + + return 1; +} + + + + +/********************************************************************** + * SBMAC_INTR() + * + * Interrupt handler for MAC interrupts + * + * Input parameters: + * MAC structure + * + * Return value: + * nothing + ********************************************************************* */ +static irqreturn_t sbmac_intr(int irq,void *dev_instance) +{ + struct net_device *dev = (struct net_device *) dev_instance; + struct sbmac_softc *sc = netdev_priv(dev); + uint64_t isr; + int handled = 0; + + /* + * Read the ISR (this clears the bits in the real + * register, except for counter addr) + */ + + isr = __raw_readq(sc->sbm_isr) & ~M_MAC_COUNTER_ADDR; + + if (isr == 0) + return IRQ_RETVAL(0); + handled = 1; + + /* + * Transmits on channel 0 + */ + + if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) + sbdma_tx_process(sc,&(sc->sbm_txdma), 0); + + if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) { + if (napi_schedule_prep(&sc->napi)) { + __raw_writeq(0, sc->sbm_imr); + __napi_schedule(&sc->napi); + /* Depend on the exit from poll to reenable intr */ + } + else { + /* may leave some packets behind */ + sbdma_rx_process(sc,&(sc->sbm_rxdma), + SBMAC_MAX_RXDESCR * 2, 0); + } + } + return IRQ_RETVAL(handled); +} + +/********************************************************************** + * SBMAC_START_TX(skb,dev) + * + * Start output on the specified interface. Basically, we + * queue as many buffers as we can until the ring fills up, or + * we run off the end of the queue, whichever comes first. + * + * Input parameters: + * + * + * Return value: + * nothing + ********************************************************************* */ +static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct sbmac_softc *sc = netdev_priv(dev); + unsigned long flags; + + /* lock eth irq */ + spin_lock_irqsave(&sc->sbm_lock, flags); + + /* + * Put the buffer on the transmit ring. If we + * don't have room, stop the queue. + */ + + if (sbdma_add_txbuffer(&(sc->sbm_txdma),skb)) { + /* XXX save skb that we could not send */ + netif_stop_queue(dev); + spin_unlock_irqrestore(&sc->sbm_lock, flags); + + return NETDEV_TX_BUSY; + } + + spin_unlock_irqrestore(&sc->sbm_lock, flags); + + return NETDEV_TX_OK; +} + +/********************************************************************** + * SBMAC_SETMULTI(sc) + * + * Reprogram the multicast table into the hardware, given + * the list of multicasts associated with the interface + * structure. + * + * Input parameters: + * sc - softc + * + * Return value: + * nothing + ********************************************************************* */ + +static void sbmac_setmulti(struct sbmac_softc *sc) +{ + uint64_t reg; + void __iomem *port; + int idx; + struct netdev_hw_addr *ha; + struct net_device *dev = sc->sbm_dev; + + /* + * Clear out entire multicast table. We do this by nuking + * the entire hash table and all the direct matches except + * the first one, which is used for our station address + */ + + for (idx = 1; idx < MAC_ADDR_COUNT; idx++) { + port = sc->sbm_base + R_MAC_ADDR_BASE+(idx*sizeof(uint64_t)); + __raw_writeq(0, port); + } + + for (idx = 0; idx < MAC_HASH_COUNT; idx++) { + port = sc->sbm_base + R_MAC_HASH_BASE+(idx*sizeof(uint64_t)); + __raw_writeq(0, port); + } + + /* + * Clear the filter to say we don't want any multicasts. + */ + + reg = __raw_readq(sc->sbm_rxfilter); + reg &= ~(M_MAC_MCAST_INV | M_MAC_MCAST_EN); + __raw_writeq(reg, sc->sbm_rxfilter); + + if (dev->flags & IFF_ALLMULTI) { + /* + * Enable ALL multicasts. Do this by inverting the + * multicast enable bit. + */ + reg = __raw_readq(sc->sbm_rxfilter); + reg |= (M_MAC_MCAST_INV | M_MAC_MCAST_EN); + __raw_writeq(reg, sc->sbm_rxfilter); + return; + } + + + /* + * Progam new multicast entries. For now, only use the + * perfect filter. In the future we'll need to use the + * hash filter if the perfect filter overflows + */ + + /* XXX only using perfect filter for now, need to use hash + * XXX if the table overflows */ + + idx = 1; /* skip station address */ + netdev_for_each_mc_addr(ha, dev) { + if (idx == MAC_ADDR_COUNT) + break; + reg = sbmac_addr2reg(ha->addr); + port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t)); + __raw_writeq(reg, port); + idx++; + } + + /* + * Enable the "accept multicast bits" if we programmed at least one + * multicast. + */ + + if (idx > 1) { + reg = __raw_readq(sc->sbm_rxfilter); + reg |= M_MAC_MCAST_EN; + __raw_writeq(reg, sc->sbm_rxfilter); + } +} + +static int sb1250_change_mtu(struct net_device *_dev, int new_mtu) +{ + if (new_mtu > ENET_PACKET_SIZE) + return -EINVAL; + _dev->mtu = new_mtu; + pr_info("changing the mtu to %d\n", new_mtu); + return 0; +} + +static const struct net_device_ops sbmac_netdev_ops = { + .ndo_open = sbmac_open, + .ndo_stop = sbmac_close, + .ndo_start_xmit = sbmac_start_tx, + .ndo_set_multicast_list = sbmac_set_rx_mode, + .ndo_tx_timeout = sbmac_tx_timeout, + .ndo_do_ioctl = sbmac_mii_ioctl, + .ndo_change_mtu = sb1250_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = sbmac_netpoll, +#endif +}; + +/********************************************************************** + * SBMAC_INIT(dev) + * + * Attach routine - init hardware and hook ourselves into linux + * + * Input parameters: + * dev - net_device structure + * + * Return value: + * status + ********************************************************************* */ + +static int sbmac_init(struct platform_device *pldev, long long base) +{ + struct net_device *dev = dev_get_drvdata(&pldev->dev); + int idx = pldev->id; + struct sbmac_softc *sc = netdev_priv(dev); + unsigned char *eaddr; + uint64_t ea_reg; + int i; + int err; + + sc->sbm_dev = dev; + sc->sbe_idx = idx; + + eaddr = sc->sbm_hwaddr; + + /* + * Read the ethernet address. The firmware left this programmed + * for us in the ethernet address register for each mac. + */ + + ea_reg = __raw_readq(sc->sbm_base + R_MAC_ETHERNET_ADDR); + __raw_writeq(0, sc->sbm_base + R_MAC_ETHERNET_ADDR); + for (i = 0; i < 6; i++) { + eaddr[i] = (uint8_t) (ea_reg & 0xFF); + ea_reg >>= 8; + } + + for (i = 0; i < 6; i++) { + dev->dev_addr[i] = eaddr[i]; + } + + /* + * Initialize context (get pointers to registers and stuff), then + * allocate the memory for the descriptor tables. + */ + + sbmac_initctx(sc); + + /* + * Set up Linux device callins + */ + + spin_lock_init(&(sc->sbm_lock)); + + dev->netdev_ops = &sbmac_netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + + netif_napi_add(dev, &sc->napi, sbmac_poll, 16); + + dev->irq = UNIT_INT(idx); + + /* This is needed for PASS2 for Rx H/W checksum feature */ + sbmac_set_iphdr_offset(sc); + + sc->mii_bus = mdiobus_alloc(); + if (sc->mii_bus == NULL) { + err = -ENOMEM; + goto uninit_ctx; + } + + sc->mii_bus->name = sbmac_mdio_string; + snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%x", idx); + sc->mii_bus->priv = sc; + sc->mii_bus->read = sbmac_mii_read; + sc->mii_bus->write = sbmac_mii_write; + sc->mii_bus->irq = sc->phy_irq; + for (i = 0; i < PHY_MAX_ADDR; ++i) + sc->mii_bus->irq[i] = SBMAC_PHY_INT; + + sc->mii_bus->parent = &pldev->dev; + /* + * Probe PHY address + */ + err = mdiobus_register(sc->mii_bus); + if (err) { + printk(KERN_ERR "%s: unable to register MDIO bus\n", + dev->name); + goto free_mdio; + } + dev_set_drvdata(&pldev->dev, sc->mii_bus); + + err = register_netdev(dev); + if (err) { + printk(KERN_ERR "%s.%d: unable to register netdev\n", + sbmac_string, idx); + goto unreg_mdio; + } + + pr_info("%s.%d: registered as %s\n", sbmac_string, idx, dev->name); + + if (sc->rx_hw_checksum == ENABLE) + pr_info("%s: enabling TCP rcv checksum\n", dev->name); + + /* + * Display Ethernet address (this is called during the config + * process so we need to finish off the config message that + * was being displayed) + */ + pr_info("%s: SiByte Ethernet at 0x%08Lx, address: %pM\n", + dev->name, base, eaddr); + + return 0; +unreg_mdio: + mdiobus_unregister(sc->mii_bus); + dev_set_drvdata(&pldev->dev, NULL); +free_mdio: + mdiobus_free(sc->mii_bus); +uninit_ctx: + sbmac_uninitctx(sc); + return err; +} + + +static int sbmac_open(struct net_device *dev) +{ + struct sbmac_softc *sc = netdev_priv(dev); + int err; + + if (debug > 1) + pr_debug("%s: sbmac_open() irq %d.\n", dev->name, dev->irq); + + /* + * map/route interrupt (clear status first, in case something + * weird is pending; we haven't initialized the mac registers + * yet) + */ + + __raw_readq(sc->sbm_isr); + err = request_irq(dev->irq, sbmac_intr, IRQF_SHARED, dev->name, dev); + if (err) { + printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name, + dev->irq); + goto out_err; + } + + sc->sbm_speed = sbmac_speed_none; + sc->sbm_duplex = sbmac_duplex_none; + sc->sbm_fc = sbmac_fc_none; + sc->sbm_pause = -1; + sc->sbm_link = 0; + + /* + * Attach to the PHY + */ + err = sbmac_mii_probe(dev); + if (err) + goto out_unregister; + + /* + * Turn on the channel + */ + + sbmac_set_channel_state(sc,sbmac_state_on); + + netif_start_queue(dev); + + sbmac_set_rx_mode(dev); + + phy_start(sc->phy_dev); + + napi_enable(&sc->napi); + + return 0; + +out_unregister: + free_irq(dev->irq, dev); +out_err: + return err; +} + +static int sbmac_mii_probe(struct net_device *dev) +{ + struct sbmac_softc *sc = netdev_priv(dev); + struct phy_device *phy_dev; + int i; + + for (i = 0; i < PHY_MAX_ADDR; i++) { + phy_dev = sc->mii_bus->phy_map[i]; + if (phy_dev) + break; + } + if (!phy_dev) { + printk(KERN_ERR "%s: no PHY found\n", dev->name); + return -ENXIO; + } + + phy_dev = phy_connect(dev, dev_name(&phy_dev->dev), &sbmac_mii_poll, 0, + PHY_INTERFACE_MODE_GMII); + if (IS_ERR(phy_dev)) { + printk(KERN_ERR "%s: could not attach to PHY\n", dev->name); + return PTR_ERR(phy_dev); + } + + /* Remove any features not supported by the controller */ + phy_dev->supported &= SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_MII | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause; + phy_dev->advertising = phy_dev->supported; + + pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", + dev->name, phy_dev->drv->name, + dev_name(&phy_dev->dev), phy_dev->irq); + + sc->phy_dev = phy_dev; + + return 0; +} + + +static void sbmac_mii_poll(struct net_device *dev) +{ + struct sbmac_softc *sc = netdev_priv(dev); + struct phy_device *phy_dev = sc->phy_dev; + unsigned long flags; + enum sbmac_fc fc; + int link_chg, speed_chg, duplex_chg, pause_chg, fc_chg; + + link_chg = (sc->sbm_link != phy_dev->link); + speed_chg = (sc->sbm_speed != phy_dev->speed); + duplex_chg = (sc->sbm_duplex != phy_dev->duplex); + pause_chg = (sc->sbm_pause != phy_dev->pause); + + if (!link_chg && !speed_chg && !duplex_chg && !pause_chg) + return; /* Hmmm... */ + + if (!phy_dev->link) { + if (link_chg) { + sc->sbm_link = phy_dev->link; + sc->sbm_speed = sbmac_speed_none; + sc->sbm_duplex = sbmac_duplex_none; + sc->sbm_fc = sbmac_fc_disabled; + sc->sbm_pause = -1; + pr_info("%s: link unavailable\n", dev->name); + } + return; + } + + if (phy_dev->duplex == DUPLEX_FULL) { + if (phy_dev->pause) + fc = sbmac_fc_frame; + else + fc = sbmac_fc_disabled; + } else + fc = sbmac_fc_collision; + fc_chg = (sc->sbm_fc != fc); + + pr_info("%s: link available: %dbase-%cD\n", dev->name, phy_dev->speed, + phy_dev->duplex == DUPLEX_FULL ? 'F' : 'H'); + + spin_lock_irqsave(&sc->sbm_lock, flags); + + sc->sbm_speed = phy_dev->speed; + sc->sbm_duplex = phy_dev->duplex; + sc->sbm_fc = fc; + sc->sbm_pause = phy_dev->pause; + sc->sbm_link = phy_dev->link; + + if ((speed_chg || duplex_chg || fc_chg) && + sc->sbm_state != sbmac_state_off) { + /* + * something changed, restart the channel + */ + if (debug > 1) + pr_debug("%s: restarting channel " + "because PHY state changed\n", dev->name); + sbmac_channel_stop(sc); + sbmac_channel_start(sc); + } + + spin_unlock_irqrestore(&sc->sbm_lock, flags); +} + + +static void sbmac_tx_timeout (struct net_device *dev) +{ + struct sbmac_softc *sc = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&sc->sbm_lock, flags); + + + dev->trans_start = jiffies; /* prevent tx timeout */ + dev->stats.tx_errors++; + + spin_unlock_irqrestore(&sc->sbm_lock, flags); + + printk (KERN_WARNING "%s: Transmit timed out\n",dev->name); +} + + + + +static void sbmac_set_rx_mode(struct net_device *dev) +{ + unsigned long flags; + struct sbmac_softc *sc = netdev_priv(dev); + + spin_lock_irqsave(&sc->sbm_lock, flags); + if ((dev->flags ^ sc->sbm_devflags) & IFF_PROMISC) { + /* + * Promiscuous changed. + */ + + if (dev->flags & IFF_PROMISC) { + sbmac_promiscuous_mode(sc,1); + } + else { + sbmac_promiscuous_mode(sc,0); + } + } + spin_unlock_irqrestore(&sc->sbm_lock, flags); + + /* + * Program the multicasts. Do this every time. + */ + + sbmac_setmulti(sc); + +} + +static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct sbmac_softc *sc = netdev_priv(dev); + + if (!netif_running(dev) || !sc->phy_dev) + return -EINVAL; + + return phy_mii_ioctl(sc->phy_dev, rq, cmd); +} + +static int sbmac_close(struct net_device *dev) +{ + struct sbmac_softc *sc = netdev_priv(dev); + + napi_disable(&sc->napi); + + phy_stop(sc->phy_dev); + + sbmac_set_channel_state(sc, sbmac_state_off); + + netif_stop_queue(dev); + + if (debug > 1) + pr_debug("%s: Shutting down ethercard\n", dev->name); + + phy_disconnect(sc->phy_dev); + sc->phy_dev = NULL; + free_irq(dev->irq, dev); + + sbdma_emptyring(&(sc->sbm_txdma)); + sbdma_emptyring(&(sc->sbm_rxdma)); + + return 0; +} + +static int sbmac_poll(struct napi_struct *napi, int budget) +{ + struct sbmac_softc *sc = container_of(napi, struct sbmac_softc, napi); + int work_done; + + work_done = sbdma_rx_process(sc, &(sc->sbm_rxdma), budget, 1); + sbdma_tx_process(sc, &(sc->sbm_txdma), 1); + + if (work_done < budget) { + napi_complete(napi); + +#ifdef CONFIG_SBMAC_COALESCE + __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | + ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), + sc->sbm_imr); +#else + __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) | + (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr); +#endif + } + + return work_done; +} + + +static int __devinit sbmac_probe(struct platform_device *pldev) +{ + struct net_device *dev; + struct sbmac_softc *sc; + void __iomem *sbm_base; + struct resource *res; + u64 sbmac_orig_hwaddr; + int err; + + res = platform_get_resource(pldev, IORESOURCE_MEM, 0); + BUG_ON(!res); + sbm_base = ioremap_nocache(res->start, resource_size(res)); + if (!sbm_base) { + printk(KERN_ERR "%s: unable to map device registers\n", + dev_name(&pldev->dev)); + err = -ENOMEM; + goto out_out; + } + + /* + * The R_MAC_ETHERNET_ADDR register will be set to some nonzero + * value for us by the firmware if we're going to use this MAC. + * If we find a zero, skip this MAC. + */ + sbmac_orig_hwaddr = __raw_readq(sbm_base + R_MAC_ETHERNET_ADDR); + pr_debug("%s: %sconfiguring MAC at 0x%08Lx\n", dev_name(&pldev->dev), + sbmac_orig_hwaddr ? "" : "not ", (long long)res->start); + if (sbmac_orig_hwaddr == 0) { + err = 0; + goto out_unmap; + } + + /* + * Okay, cool. Initialize this MAC. + */ + dev = alloc_etherdev(sizeof(struct sbmac_softc)); + if (!dev) { + printk(KERN_ERR "%s: unable to allocate etherdev\n", + dev_name(&pldev->dev)); + err = -ENOMEM; + goto out_unmap; + } + + dev_set_drvdata(&pldev->dev, dev); + SET_NETDEV_DEV(dev, &pldev->dev); + + sc = netdev_priv(dev); + sc->sbm_base = sbm_base; + + err = sbmac_init(pldev, res->start); + if (err) + goto out_kfree; + + return 0; + +out_kfree: + free_netdev(dev); + __raw_writeq(sbmac_orig_hwaddr, sbm_base + R_MAC_ETHERNET_ADDR); + +out_unmap: + iounmap(sbm_base); + +out_out: + return err; +} + +static int __exit sbmac_remove(struct platform_device *pldev) +{ + struct net_device *dev = dev_get_drvdata(&pldev->dev); + struct sbmac_softc *sc = netdev_priv(dev); + + unregister_netdev(dev); + sbmac_uninitctx(sc); + mdiobus_unregister(sc->mii_bus); + mdiobus_free(sc->mii_bus); + iounmap(sc->sbm_base); + free_netdev(dev); + + return 0; +} + +static struct platform_driver sbmac_driver = { + .probe = sbmac_probe, + .remove = __exit_p(sbmac_remove), + .driver = { + .name = sbmac_string, + .owner = THIS_MODULE, + }, +}; + +static int __init sbmac_init_module(void) +{ + return platform_driver_register(&sbmac_driver); +} + +static void __exit sbmac_cleanup_module(void) +{ + platform_driver_unregister(&sbmac_driver); +} + +module_init(sbmac_init_module); +module_exit(sbmac_cleanup_module); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c new file mode 100644 index 0000000..dc3fbf6 --- /dev/null +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -0,0 +1,15862 @@ +/* + * tg3.c: Broadcom Tigon3 ethernet driver. + * + * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) + * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) + * Copyright (C) 2004 Sun Microsystems Inc. + * Copyright (C) 2005-2011 Broadcom Corporation. + * + * Firmware is: + * Derived from proprietary unpublished source code, + * Copyright (C) 2000-2003 Broadcom Corporation. + * + * Permission is hereby granted for the distribution of this firmware + * data in hexadecimal or equivalent format, provided this copyright + * notice is accompanying it. + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#ifdef CONFIG_SPARC +#include +#include +#endif + +#define BAR_0 0 +#define BAR_2 2 + +#include "tg3.h" + +/* Functions & macros to verify TG3_FLAGS types */ + +static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits) +{ + return test_bit(flag, bits); +} + +static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits) +{ + set_bit(flag, bits); +} + +static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) +{ + clear_bit(flag, bits); +} + +#define tg3_flag(tp, flag) \ + _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags) +#define tg3_flag_set(tp, flag) \ + _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags) +#define tg3_flag_clear(tp, flag) \ + _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags) + +#define DRV_MODULE_NAME "tg3" +#define TG3_MAJ_NUM 3 +#define TG3_MIN_NUM 119 +#define DRV_MODULE_VERSION \ + __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) +#define DRV_MODULE_RELDATE "May 18, 2011" + +#define TG3_DEF_MAC_MODE 0 +#define TG3_DEF_RX_MODE 0 +#define TG3_DEF_TX_MODE 0 +#define TG3_DEF_MSG_ENABLE \ + (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | \ + NETIF_MSG_IFDOWN | \ + NETIF_MSG_IFUP | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR) + +#define TG3_GRC_LCLCTL_PWRSW_DELAY 100 + +/* length of time before we decide the hardware is borked, + * and dev->tx_timeout() should be called to fix the problem + */ + +#define TG3_TX_TIMEOUT (5 * HZ) + +/* hardware minimum and maximum for a single frame's data payload */ +#define TG3_MIN_MTU 60 +#define TG3_MAX_MTU(tp) \ + (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500) + +/* These numbers seem to be hard coded in the NIC firmware somehow. + * You can't change the ring sizes, but you can change where you place + * them in the NIC onboard memory. + */ +#define TG3_RX_STD_RING_SIZE(tp) \ + (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ + TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700) +#define TG3_DEF_RX_RING_PENDING 200 +#define TG3_RX_JMB_RING_SIZE(tp) \ + (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ + TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700) +#define TG3_DEF_RX_JUMBO_RING_PENDING 100 +#define TG3_RSS_INDIR_TBL_SIZE 128 + +/* Do not place this n-ring entries value into the tp struct itself, + * we really want to expose these constants to GCC so that modulo et + * al. operations are done with shifts and masks instead of with + * hw multiply/modulo instructions. Another solution would be to + * replace things like '% foo' with '& (foo - 1)'. + */ + +#define TG3_TX_RING_SIZE 512 +#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1) + +#define TG3_RX_STD_RING_BYTES(tp) \ + (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp)) +#define TG3_RX_JMB_RING_BYTES(tp) \ + (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp)) +#define TG3_RX_RCB_RING_BYTES(tp) \ + (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1)) +#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ + TG3_TX_RING_SIZE) +#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) + +#define TG3_DMA_BYTE_ENAB 64 + +#define TG3_RX_STD_DMA_SZ 1536 +#define TG3_RX_JMB_DMA_SZ 9046 + +#define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB) + +#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) +#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) + +#define TG3_RX_STD_BUFF_RING_SIZE(tp) \ + (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp)) + +#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \ + (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp)) + +/* Due to a hardware bug, the 5701 can only DMA to memory addresses + * that are at least dword aligned when used in PCIX mode. The driver + * works around this bug by double copying the packet. This workaround + * is built into the normal double copy length check for efficiency. + * + * However, the double copy is only necessary on those architectures + * where unaligned memory accesses are inefficient. For those architectures + * where unaligned memory accesses incur little penalty, we can reintegrate + * the 5701 in the normal rx path. Doing so saves a device structure + * dereference by hardcoding the double copy threshold in place. + */ +#define TG3_RX_COPY_THRESHOLD 256 +#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD +#else + #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh) +#endif + +/* minimum number of free TX descriptors required to wake up TX process */ +#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) +#define TG3_TX_BD_DMA_MAX 4096 + +#define TG3_RAW_IP_ALIGN 2 + +#define TG3_FW_UPDATE_TIMEOUT_SEC 5 + +#define FIRMWARE_TG3 "tigon/tg3.bin" +#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin" +#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin" + +static char version[] __devinitdata = + DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")"; + +MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)"); +MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); +MODULE_FIRMWARE(FIRMWARE_TG3); +MODULE_FIRMWARE(FIRMWARE_TG3TSO); +MODULE_FIRMWARE(FIRMWARE_TG3TSO5); + +static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */ +module_param(tg3_debug, int, 0); +MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); + +static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = { + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)}, + {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, + {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, + {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, + {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)}, + {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)}, + {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)}, + {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)}, + {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */ + {} +}; + +MODULE_DEVICE_TABLE(pci, tg3_pci_tbl); + +static const struct { + const char string[ETH_GSTRING_LEN]; +} ethtool_stats_keys[] = { + { "rx_octets" }, + { "rx_fragments" }, + { "rx_ucast_packets" }, + { "rx_mcast_packets" }, + { "rx_bcast_packets" }, + { "rx_fcs_errors" }, + { "rx_align_errors" }, + { "rx_xon_pause_rcvd" }, + { "rx_xoff_pause_rcvd" }, + { "rx_mac_ctrl_rcvd" }, + { "rx_xoff_entered" }, + { "rx_frame_too_long_errors" }, + { "rx_jabbers" }, + { "rx_undersize_packets" }, + { "rx_in_length_errors" }, + { "rx_out_length_errors" }, + { "rx_64_or_less_octet_packets" }, + { "rx_65_to_127_octet_packets" }, + { "rx_128_to_255_octet_packets" }, + { "rx_256_to_511_octet_packets" }, + { "rx_512_to_1023_octet_packets" }, + { "rx_1024_to_1522_octet_packets" }, + { "rx_1523_to_2047_octet_packets" }, + { "rx_2048_to_4095_octet_packets" }, + { "rx_4096_to_8191_octet_packets" }, + { "rx_8192_to_9022_octet_packets" }, + + { "tx_octets" }, + { "tx_collisions" }, + + { "tx_xon_sent" }, + { "tx_xoff_sent" }, + { "tx_flow_control" }, + { "tx_mac_errors" }, + { "tx_single_collisions" }, + { "tx_mult_collisions" }, + { "tx_deferred" }, + { "tx_excessive_collisions" }, + { "tx_late_collisions" }, + { "tx_collide_2times" }, + { "tx_collide_3times" }, + { "tx_collide_4times" }, + { "tx_collide_5times" }, + { "tx_collide_6times" }, + { "tx_collide_7times" }, + { "tx_collide_8times" }, + { "tx_collide_9times" }, + { "tx_collide_10times" }, + { "tx_collide_11times" }, + { "tx_collide_12times" }, + { "tx_collide_13times" }, + { "tx_collide_14times" }, + { "tx_collide_15times" }, + { "tx_ucast_packets" }, + { "tx_mcast_packets" }, + { "tx_bcast_packets" }, + { "tx_carrier_sense_errors" }, + { "tx_discards" }, + { "tx_errors" }, + + { "dma_writeq_full" }, + { "dma_write_prioq_full" }, + { "rxbds_empty" }, + { "rx_discards" }, + { "rx_errors" }, + { "rx_threshold_hit" }, + + { "dma_readq_full" }, + { "dma_read_prioq_full" }, + { "tx_comp_queue_full" }, + + { "ring_set_send_prod_index" }, + { "ring_status_update" }, + { "nic_irqs" }, + { "nic_avoided_irqs" }, + { "nic_tx_threshold_hit" }, + + { "mbuf_lwm_thresh_hit" }, +}; + +#define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys) + + +static const struct { + const char string[ETH_GSTRING_LEN]; +} ethtool_test_keys[] = { + { "nvram test (online) " }, + { "link test (online) " }, + { "register test (offline)" }, + { "memory test (offline)" }, + { "loopback test (offline)" }, + { "interrupt test (offline)" }, +}; + +#define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys) + + +static void tg3_write32(struct tg3 *tp, u32 off, u32 val) +{ + writel(val, tp->regs + off); +} + +static u32 tg3_read32(struct tg3 *tp, u32 off) +{ + return readl(tp->regs + off); +} + +static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val) +{ + writel(val, tp->aperegs + off); +} + +static u32 tg3_ape_read32(struct tg3 *tp, u32 off) +{ + return readl(tp->aperegs + off); +} + +static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) +{ + unsigned long flags; + + spin_lock_irqsave(&tp->indirect_lock, flags); + pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); + pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); + spin_unlock_irqrestore(&tp->indirect_lock, flags); +} + +static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val) +{ + writel(val, tp->regs + off); + readl(tp->regs + off); +} + +static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off) +{ + unsigned long flags; + u32 val; + + spin_lock_irqsave(&tp->indirect_lock, flags); + pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); + pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); + spin_unlock_irqrestore(&tp->indirect_lock, flags); + return val; +} + +static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val) +{ + unsigned long flags; + + if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) { + pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX + + TG3_64BIT_REG_LOW, val); + return; + } + if (off == TG3_RX_STD_PROD_IDX_REG) { + pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + + TG3_64BIT_REG_LOW, val); + return; + } + + spin_lock_irqsave(&tp->indirect_lock, flags); + pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); + pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); + spin_unlock_irqrestore(&tp->indirect_lock, flags); + + /* In indirect mode when disabling interrupts, we also need + * to clear the interrupt bit in the GRC local ctrl register. + */ + if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) && + (val == 0x1)) { + pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL, + tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT); + } +} + +static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off) +{ + unsigned long flags; + u32 val; + + spin_lock_irqsave(&tp->indirect_lock, flags); + pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); + pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); + spin_unlock_irqrestore(&tp->indirect_lock, flags); + return val; +} + +/* usec_wait specifies the wait time in usec when writing to certain registers + * where it is unsafe to read back the register without some delay. + * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power. + * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed. + */ +static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) +{ + if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND)) + /* Non-posted methods */ + tp->write32(tp, off, val); + else { + /* Posted method */ + tg3_write32(tp, off, val); + if (usec_wait) + udelay(usec_wait); + tp->read32(tp, off); + } + /* Wait again after the read for the posted method to guarantee that + * the wait time is met. + */ + if (usec_wait) + udelay(usec_wait); +} + +static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) +{ + tp->write32_mbox(tp, off, val); + if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND)) + tp->read32_mbox(tp, off); +} + +static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val) +{ + void __iomem *mbox = tp->regs + off; + writel(val, mbox); + if (tg3_flag(tp, TXD_MBOX_HWBUG)) + writel(val, mbox); + if (tg3_flag(tp, MBOX_WRITE_REORDER)) + readl(mbox); +} + +static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off) +{ + return readl(tp->regs + off + GRCMBOX_BASE); +} + +static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val) +{ + writel(val, tp->regs + off + GRCMBOX_BASE); +} + +#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val) +#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val)) +#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val) +#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val) +#define tr32_mailbox(reg) tp->read32_mbox(tp, reg) + +#define tw32(reg, val) tp->write32(tp, reg, val) +#define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0) +#define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us)) +#define tr32(reg) tp->read32(tp, reg) + +static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) +{ + unsigned long flags; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 && + (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) + return; + + spin_lock_irqsave(&tp->indirect_lock, flags); + if (tg3_flag(tp, SRAM_USE_CONFIG)) { + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); + + /* Always leave this as zero. */ + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); + } else { + tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); + tw32_f(TG3PCI_MEM_WIN_DATA, val); + + /* Always leave this as zero. */ + tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); + } + spin_unlock_irqrestore(&tp->indirect_lock, flags); +} + +static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) +{ + unsigned long flags; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 && + (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) { + *val = 0; + return; + } + + spin_lock_irqsave(&tp->indirect_lock, flags); + if (tg3_flag(tp, SRAM_USE_CONFIG)) { + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); + pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); + + /* Always leave this as zero. */ + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); + } else { + tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); + *val = tr32(TG3PCI_MEM_WIN_DATA); + + /* Always leave this as zero. */ + tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); + } + spin_unlock_irqrestore(&tp->indirect_lock, flags); +} + +static void tg3_ape_lock_init(struct tg3 *tp) +{ + int i; + u32 regbase, bit; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + regbase = TG3_APE_LOCK_GRANT; + else + regbase = TG3_APE_PER_LOCK_GRANT; + + /* Make sure the driver hasn't any stale locks. */ + for (i = 0; i < 8; i++) { + if (i == TG3_APE_LOCK_GPIO) + continue; + tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER); + } + + /* Clear the correct bit of the GPIO lock too. */ + if (!tp->pci_fn) + bit = APE_LOCK_GRANT_DRIVER; + else + bit = 1 << tp->pci_fn; + + tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit); +} + +static int tg3_ape_lock(struct tg3 *tp, int locknum) +{ + int i, off; + int ret = 0; + u32 status, req, gnt, bit; + + if (!tg3_flag(tp, ENABLE_APE)) + return 0; + + switch (locknum) { + case TG3_APE_LOCK_GPIO: + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + return 0; + case TG3_APE_LOCK_GRC: + case TG3_APE_LOCK_MEM: + break; + default: + return -EINVAL; + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { + req = TG3_APE_LOCK_REQ; + gnt = TG3_APE_LOCK_GRANT; + } else { + req = TG3_APE_PER_LOCK_REQ; + gnt = TG3_APE_PER_LOCK_GRANT; + } + + off = 4 * locknum; + + if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn) + bit = APE_LOCK_REQ_DRIVER; + else + bit = 1 << tp->pci_fn; + + tg3_ape_write32(tp, req + off, bit); + + /* Wait for up to 1 millisecond to acquire lock. */ + for (i = 0; i < 100; i++) { + status = tg3_ape_read32(tp, gnt + off); + if (status == bit) + break; + udelay(10); + } + + if (status != bit) { + /* Revoke the lock request. */ + tg3_ape_write32(tp, gnt + off, bit); + ret = -EBUSY; + } + + return ret; +} + +static void tg3_ape_unlock(struct tg3 *tp, int locknum) +{ + u32 gnt, bit; + + if (!tg3_flag(tp, ENABLE_APE)) + return; + + switch (locknum) { + case TG3_APE_LOCK_GPIO: + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + return; + case TG3_APE_LOCK_GRC: + case TG3_APE_LOCK_MEM: + break; + default: + return; + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + gnt = TG3_APE_LOCK_GRANT; + else + gnt = TG3_APE_PER_LOCK_GRANT; + + if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn) + bit = APE_LOCK_GRANT_DRIVER; + else + bit = 1 << tp->pci_fn; + + tg3_ape_write32(tp, gnt + 4 * locknum, bit); +} + +static void tg3_disable_ints(struct tg3 *tp) +{ + int i; + + tw32(TG3PCI_MISC_HOST_CTRL, + (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); + for (i = 0; i < tp->irq_max; i++) + tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001); +} + +static void tg3_enable_ints(struct tg3 *tp) +{ + int i; + + tp->irq_sync = 0; + wmb(); + + tw32(TG3PCI_MISC_HOST_CTRL, + (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); + + tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE; + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); + if (tg3_flag(tp, 1SHOT_MSI)) + tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); + + tp->coal_now |= tnapi->coal_now; + } + + /* Force an initial interrupt */ + if (!tg3_flag(tp, TAGGED_STATUS) && + (tp->napi[0].hw_status->status & SD_STATUS_UPDATED)) + tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); + else + tw32(HOSTCC_MODE, tp->coal_now); + + tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now); +} + +static inline unsigned int tg3_has_work(struct tg3_napi *tnapi) +{ + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; + unsigned int work_exists = 0; + + /* check for phy events */ + if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { + if (sblk->status & SD_STATUS_LINK_CHG) + work_exists = 1; + } + /* check for RX/TX work to do */ + if (sblk->idx[0].tx_consumer != tnapi->tx_cons || + *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) + work_exists = 1; + + return work_exists; +} + +/* tg3_int_reenable + * similar to tg3_enable_ints, but it accurately determines whether there + * is new work pending and can return without flushing the PIO write + * which reenables interrupts + */ +static void tg3_int_reenable(struct tg3_napi *tnapi) +{ + struct tg3 *tp = tnapi->tp; + + tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); + mmiowb(); + + /* When doing tagged status, this work check is unnecessary. + * The last_tag we write above tells the chip which piece of + * work we've completed. + */ + if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi)) + tw32(HOSTCC_MODE, tp->coalesce_mode | + HOSTCC_MODE_ENABLE | tnapi->coal_now); +} + +static void tg3_switch_clocks(struct tg3 *tp) +{ + u32 clock_ctrl; + u32 orig_clock_ctrl; + + if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS)) + return; + + clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); + + orig_clock_ctrl = clock_ctrl; + clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN | + CLOCK_CTRL_CLKRUN_OENABLE | + 0x1f); + tp->pci_clock_ctrl = clock_ctrl; + + if (tg3_flag(tp, 5705_PLUS)) { + if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) { + tw32_wait_f(TG3PCI_CLOCK_CTRL, + clock_ctrl | CLOCK_CTRL_625_CORE, 40); + } + } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) { + tw32_wait_f(TG3PCI_CLOCK_CTRL, + clock_ctrl | + (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK), + 40); + tw32_wait_f(TG3PCI_CLOCK_CTRL, + clock_ctrl | (CLOCK_CTRL_ALTCLK), + 40); + } + tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40); +} + +#define PHY_BUSY_LOOPS 5000 + +static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) +{ + u32 frame_val; + unsigned int loops; + int ret; + + if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { + tw32_f(MAC_MI_MODE, + (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); + udelay(80); + } + + *val = 0x0; + + frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) & + MI_COM_PHY_ADDR_MASK); + frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & + MI_COM_REG_ADDR_MASK); + frame_val |= (MI_COM_CMD_READ | MI_COM_START); + + tw32_f(MAC_MI_COM, frame_val); + + loops = PHY_BUSY_LOOPS; + while (loops != 0) { + udelay(10); + frame_val = tr32(MAC_MI_COM); + + if ((frame_val & MI_COM_BUSY) == 0) { + udelay(5); + frame_val = tr32(MAC_MI_COM); + break; + } + loops -= 1; + } + + ret = -EBUSY; + if (loops != 0) { + *val = frame_val & MI_COM_DATA_MASK; + ret = 0; + } + + if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { + tw32_f(MAC_MI_MODE, tp->mi_mode); + udelay(80); + } + + return ret; +} + +static int tg3_writephy(struct tg3 *tp, int reg, u32 val) +{ + u32 frame_val; + unsigned int loops; + int ret; + + if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && + (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL)) + return 0; + + if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { + tw32_f(MAC_MI_MODE, + (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); + udelay(80); + } + + frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) & + MI_COM_PHY_ADDR_MASK); + frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & + MI_COM_REG_ADDR_MASK); + frame_val |= (val & MI_COM_DATA_MASK); + frame_val |= (MI_COM_CMD_WRITE | MI_COM_START); + + tw32_f(MAC_MI_COM, frame_val); + + loops = PHY_BUSY_LOOPS; + while (loops != 0) { + udelay(10); + frame_val = tr32(MAC_MI_COM); + if ((frame_val & MI_COM_BUSY) == 0) { + udelay(5); + frame_val = tr32(MAC_MI_COM); + break; + } + loops -= 1; + } + + ret = -EBUSY; + if (loops != 0) + ret = 0; + + if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { + tw32_f(MAC_MI_MODE, tp->mi_mode); + udelay(80); + } + + return ret; +} + +static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val) +{ + int err; + + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, + MII_TG3_MMD_CTRL_DATA_NOINC | devad); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val); + +done: + return err; +} + +static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val) +{ + int err; + + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, + MII_TG3_MMD_CTRL_DATA_NOINC | devad); + if (err) + goto done; + + err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val); + +done: + return err; +} + +static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val) +{ + int err; + + err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); + if (!err) + err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val); + + return err; +} + +static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) +{ + int err; + + err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); + if (!err) + err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); + + return err; +} + +static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val) +{ + int err; + + err = tg3_writephy(tp, MII_TG3_AUX_CTRL, + (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) | + MII_TG3_AUXCTL_SHDWSEL_MISC); + if (!err) + err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val); + + return err; +} + +static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set) +{ + if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC) + set |= MII_TG3_AUXCTL_MISC_WREN; + + return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg); +} + +#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \ + tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \ + MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \ + MII_TG3_AUXCTL_ACTL_TX_6DB) + +#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \ + tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \ + MII_TG3_AUXCTL_ACTL_TX_6DB); + +static int tg3_bmcr_reset(struct tg3 *tp) +{ + u32 phy_control; + int limit, err; + + /* OK, reset it, and poll the BMCR_RESET bit until it + * clears or we time out. + */ + phy_control = BMCR_RESET; + err = tg3_writephy(tp, MII_BMCR, phy_control); + if (err != 0) + return -EBUSY; + + limit = 5000; + while (limit--) { + err = tg3_readphy(tp, MII_BMCR, &phy_control); + if (err != 0) + return -EBUSY; + + if ((phy_control & BMCR_RESET) == 0) { + udelay(40); + break; + } + udelay(10); + } + if (limit < 0) + return -EBUSY; + + return 0; +} + +static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) +{ + struct tg3 *tp = bp->priv; + u32 val; + + spin_lock_bh(&tp->lock); + + if (tg3_readphy(tp, reg, &val)) + val = -EIO; + + spin_unlock_bh(&tp->lock); + + return val; +} + +static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) +{ + struct tg3 *tp = bp->priv; + u32 ret = 0; + + spin_lock_bh(&tp->lock); + + if (tg3_writephy(tp, reg, val)) + ret = -EIO; + + spin_unlock_bh(&tp->lock); + + return ret; +} + +static int tg3_mdio_reset(struct mii_bus *bp) +{ + return 0; +} + +static void tg3_mdio_config_5785(struct tg3 *tp) +{ + u32 val; + struct phy_device *phydev; + + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { + case PHY_ID_BCM50610: + case PHY_ID_BCM50610M: + val = MAC_PHYCFG2_50610_LED_MODES; + break; + case PHY_ID_BCMAC131: + val = MAC_PHYCFG2_AC131_LED_MODES; + break; + case PHY_ID_RTL8211C: + val = MAC_PHYCFG2_RTL8211C_LED_MODES; + break; + case PHY_ID_RTL8201E: + val = MAC_PHYCFG2_RTL8201E_LED_MODES; + break; + default: + return; + } + + if (phydev->interface != PHY_INTERFACE_MODE_RGMII) { + tw32(MAC_PHYCFG2, val); + + val = tr32(MAC_PHYCFG1); + val &= ~(MAC_PHYCFG1_RGMII_INT | + MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK); + val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT; + tw32(MAC_PHYCFG1, val); + + return; + } + + if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) + val |= MAC_PHYCFG2_EMODE_MASK_MASK | + MAC_PHYCFG2_FMODE_MASK_MASK | + MAC_PHYCFG2_GMODE_MASK_MASK | + MAC_PHYCFG2_ACT_MASK_MASK | + MAC_PHYCFG2_QUAL_MASK_MASK | + MAC_PHYCFG2_INBAND_ENABLE; + + tw32(MAC_PHYCFG2, val); + + val = tr32(MAC_PHYCFG1); + val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | + MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); + if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { + if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) + val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; + if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) + val |= MAC_PHYCFG1_RGMII_SND_STAT_EN; + } + val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT | + MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV; + tw32(MAC_PHYCFG1, val); + + val = tr32(MAC_EXT_RGMII_MODE); + val &= ~(MAC_RGMII_MODE_RX_INT_B | + MAC_RGMII_MODE_RX_QUALITY | + MAC_RGMII_MODE_RX_ACTIVITY | + MAC_RGMII_MODE_RX_ENG_DET | + MAC_RGMII_MODE_TX_ENABLE | + MAC_RGMII_MODE_TX_LOWPWR | + MAC_RGMII_MODE_TX_RESET); + if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { + if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) + val |= MAC_RGMII_MODE_RX_INT_B | + MAC_RGMII_MODE_RX_QUALITY | + MAC_RGMII_MODE_RX_ACTIVITY | + MAC_RGMII_MODE_RX_ENG_DET; + if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) + val |= MAC_RGMII_MODE_TX_ENABLE | + MAC_RGMII_MODE_TX_LOWPWR | + MAC_RGMII_MODE_TX_RESET; + } + tw32(MAC_EXT_RGMII_MODE, val); +} + +static void tg3_mdio_start(struct tg3 *tp) +{ + tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; + tw32_f(MAC_MI_MODE, tp->mi_mode); + udelay(80); + + if (tg3_flag(tp, MDIOBUS_INITED) && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + tg3_mdio_config_5785(tp); +} + +static int tg3_mdio_init(struct tg3 *tp) +{ + int i; + u32 reg; + struct phy_device *phydev; + + if (tg3_flag(tp, 5717_PLUS)) { + u32 is_serdes; + + tp->phy_addr = tp->pci_fn + 1; + + if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) + is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES; + else + is_serdes = tr32(TG3_CPMU_PHY_STRAP) & + TG3_CPMU_PHY_STRAP_IS_SERDES; + if (is_serdes) + tp->phy_addr += 7; + } else + tp->phy_addr = TG3_PHY_MII_ADDR; + + tg3_mdio_start(tp); + + if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED)) + return 0; + + tp->mdio_bus = mdiobus_alloc(); + if (tp->mdio_bus == NULL) + return -ENOMEM; + + tp->mdio_bus->name = "tg3 mdio bus"; + snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", + (tp->pdev->bus->number << 8) | tp->pdev->devfn); + tp->mdio_bus->priv = tp; + tp->mdio_bus->parent = &tp->pdev->dev; + tp->mdio_bus->read = &tg3_mdio_read; + tp->mdio_bus->write = &tg3_mdio_write; + tp->mdio_bus->reset = &tg3_mdio_reset; + tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR); + tp->mdio_bus->irq = &tp->mdio_irq[0]; + + for (i = 0; i < PHY_MAX_ADDR; i++) + tp->mdio_bus->irq[i] = PHY_POLL; + + /* The bus registration will look for all the PHYs on the mdio bus. + * Unfortunately, it does not ensure the PHY is powered up before + * accessing the PHY ID registers. A chip reset is the + * quickest way to bring the device back to an operational state.. + */ + if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN)) + tg3_bmcr_reset(tp); + + i = mdiobus_register(tp->mdio_bus); + if (i) { + dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i); + mdiobus_free(tp->mdio_bus); + return i; + } + + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + + if (!phydev || !phydev->drv) { + dev_warn(&tp->pdev->dev, "No PHY devices\n"); + mdiobus_unregister(tp->mdio_bus); + mdiobus_free(tp->mdio_bus); + return -ENODEV; + } + + switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { + case PHY_ID_BCM57780: + phydev->interface = PHY_INTERFACE_MODE_GMII; + phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; + break; + case PHY_ID_BCM50610: + case PHY_ID_BCM50610M: + phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE | + PHY_BRCM_RX_REFCLK_UNUSED | + PHY_BRCM_DIS_TXCRXC_NOENRGY | + PHY_BRCM_AUTO_PWRDWN_ENABLE; + if (tg3_flag(tp, RGMII_INBAND_DISABLE)) + phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE; + if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) + phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE; + if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) + phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE; + /* fallthru */ + case PHY_ID_RTL8211C: + phydev->interface = PHY_INTERFACE_MODE_RGMII; + break; + case PHY_ID_RTL8201E: + case PHY_ID_BCMAC131: + phydev->interface = PHY_INTERFACE_MODE_MII; + phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; + tp->phy_flags |= TG3_PHYFLG_IS_FET; + break; + } + + tg3_flag_set(tp, MDIOBUS_INITED); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + tg3_mdio_config_5785(tp); + + return 0; +} + +static void tg3_mdio_fini(struct tg3 *tp) +{ + if (tg3_flag(tp, MDIOBUS_INITED)) { + tg3_flag_clear(tp, MDIOBUS_INITED); + mdiobus_unregister(tp->mdio_bus); + mdiobus_free(tp->mdio_bus); + } +} + +/* tp->lock is held. */ +static inline void tg3_generate_fw_event(struct tg3 *tp) +{ + u32 val; + + val = tr32(GRC_RX_CPU_EVENT); + val |= GRC_RX_CPU_DRIVER_EVENT; + tw32_f(GRC_RX_CPU_EVENT, val); + + tp->last_event_jiffies = jiffies; +} + +#define TG3_FW_EVENT_TIMEOUT_USEC 2500 + +/* tp->lock is held. */ +static void tg3_wait_for_event_ack(struct tg3 *tp) +{ + int i; + unsigned int delay_cnt; + long time_remain; + + /* If enough time has passed, no wait is necessary. */ + time_remain = (long)(tp->last_event_jiffies + 1 + + usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) - + (long)jiffies; + if (time_remain < 0) + return; + + /* Check if we can shorten the wait time. */ + delay_cnt = jiffies_to_usecs(time_remain); + if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC) + delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC; + delay_cnt = (delay_cnt >> 3) + 1; + + for (i = 0; i < delay_cnt; i++) { + if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) + break; + udelay(8); + } +} + +/* tp->lock is held. */ +static void tg3_ump_link_report(struct tg3 *tp) +{ + u32 reg; + u32 val; + + if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF)) + return; + + tg3_wait_for_event_ack(tp); + + tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE); + + tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14); + + val = 0; + if (!tg3_readphy(tp, MII_BMCR, ®)) + val = reg << 16; + if (!tg3_readphy(tp, MII_BMSR, ®)) + val |= (reg & 0xffff); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val); + + val = 0; + if (!tg3_readphy(tp, MII_ADVERTISE, ®)) + val = reg << 16; + if (!tg3_readphy(tp, MII_LPA, ®)) + val |= (reg & 0xffff); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val); + + val = 0; + if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) { + if (!tg3_readphy(tp, MII_CTRL1000, ®)) + val = reg << 16; + if (!tg3_readphy(tp, MII_STAT1000, ®)) + val |= (reg & 0xffff); + } + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val); + + if (!tg3_readphy(tp, MII_PHYADDR, ®)) + val = reg << 16; + else + val = 0; + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val); + + tg3_generate_fw_event(tp); +} + +static void tg3_link_report(struct tg3 *tp) +{ + if (!netif_carrier_ok(tp->dev)) { + netif_info(tp, link, tp->dev, "Link is down\n"); + tg3_ump_link_report(tp); + } else if (netif_msg_link(tp)) { + netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n", + (tp->link_config.active_speed == SPEED_1000 ? + 1000 : + (tp->link_config.active_speed == SPEED_100 ? + 100 : 10)), + (tp->link_config.active_duplex == DUPLEX_FULL ? + "full" : "half")); + + netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n", + (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ? + "on" : "off", + (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? + "on" : "off"); + + if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) + netdev_info(tp->dev, "EEE is %s\n", + tp->setlpicnt ? "enabled" : "disabled"); + + tg3_ump_link_report(tp); + } +} + +static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl) +{ + u16 miireg; + + if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) + miireg = ADVERTISE_PAUSE_CAP; + else if (flow_ctrl & FLOW_CTRL_TX) + miireg = ADVERTISE_PAUSE_ASYM; + else if (flow_ctrl & FLOW_CTRL_RX) + miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + else + miireg = 0; + + return miireg; +} + +static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) +{ + u16 miireg; + + if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) + miireg = ADVERTISE_1000XPAUSE; + else if (flow_ctrl & FLOW_CTRL_TX) + miireg = ADVERTISE_1000XPSE_ASYM; + else if (flow_ctrl & FLOW_CTRL_RX) + miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; + else + miireg = 0; + + return miireg; +} + +static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv) +{ + u8 cap = 0; + + if (lcladv & ADVERTISE_1000XPAUSE) { + if (lcladv & ADVERTISE_1000XPSE_ASYM) { + if (rmtadv & LPA_1000XPAUSE) + cap = FLOW_CTRL_TX | FLOW_CTRL_RX; + else if (rmtadv & LPA_1000XPAUSE_ASYM) + cap = FLOW_CTRL_RX; + } else { + if (rmtadv & LPA_1000XPAUSE) + cap = FLOW_CTRL_TX | FLOW_CTRL_RX; + } + } else if (lcladv & ADVERTISE_1000XPSE_ASYM) { + if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM)) + cap = FLOW_CTRL_TX; + } + + return cap; +} + +static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) +{ + u8 autoneg; + u8 flowctrl = 0; + u32 old_rx_mode = tp->rx_mode; + u32 old_tx_mode = tp->tx_mode; + + if (tg3_flag(tp, USE_PHYLIB)) + autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg; + else + autoneg = tp->link_config.autoneg; + + if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) { + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) + flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv); + else + flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv); + } else + flowctrl = tp->link_config.flowctrl; + + tp->link_config.active_flowctrl = flowctrl; + + if (flowctrl & FLOW_CTRL_RX) + tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE; + else + tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE; + + if (old_rx_mode != tp->rx_mode) + tw32_f(MAC_RX_MODE, tp->rx_mode); + + if (flowctrl & FLOW_CTRL_TX) + tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE; + else + tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE; + + if (old_tx_mode != tp->tx_mode) + tw32_f(MAC_TX_MODE, tp->tx_mode); +} + +static void tg3_adjust_link(struct net_device *dev) +{ + u8 oldflowctrl, linkmesg = 0; + u32 mac_mode, lcl_adv, rmt_adv; + struct tg3 *tp = netdev_priv(dev); + struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + + spin_lock_bh(&tp->lock); + + mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | + MAC_MODE_HALF_DUPLEX); + + oldflowctrl = tp->link_config.active_flowctrl; + + if (phydev->link) { + lcl_adv = 0; + rmt_adv = 0; + + if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) + mac_mode |= MAC_MODE_PORT_MODE_MII; + else if (phydev->speed == SPEED_1000 || + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) + mac_mode |= MAC_MODE_PORT_MODE_GMII; + else + mac_mode |= MAC_MODE_PORT_MODE_MII; + + if (phydev->duplex == DUPLEX_HALF) + mac_mode |= MAC_MODE_HALF_DUPLEX; + else { + lcl_adv = tg3_advert_flowctrl_1000T( + tp->link_config.flowctrl); + + if (phydev->pause) + rmt_adv = LPA_PAUSE_CAP; + if (phydev->asym_pause) + rmt_adv |= LPA_PAUSE_ASYM; + } + + tg3_setup_flow_control(tp, lcl_adv, rmt_adv); + } else + mac_mode |= MAC_MODE_PORT_MODE_GMII; + + if (mac_mode != tp->mac_mode) { + tp->mac_mode = mac_mode; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { + if (phydev->speed == SPEED_10) + tw32(MAC_MI_STAT, + MAC_MI_STAT_10MBPS_MODE | + MAC_MI_STAT_LNKSTAT_ATTN_ENAB); + else + tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); + } + + if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF) + tw32(MAC_TX_LENGTHS, + ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | + (6 << TX_LENGTHS_IPG_SHIFT) | + (0xff << TX_LENGTHS_SLOT_TIME_SHIFT))); + else + tw32(MAC_TX_LENGTHS, + ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | + (6 << TX_LENGTHS_IPG_SHIFT) | + (32 << TX_LENGTHS_SLOT_TIME_SHIFT))); + + if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) || + (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) || + phydev->speed != tp->link_config.active_speed || + phydev->duplex != tp->link_config.active_duplex || + oldflowctrl != tp->link_config.active_flowctrl) + linkmesg = 1; + + tp->link_config.active_speed = phydev->speed; + tp->link_config.active_duplex = phydev->duplex; + + spin_unlock_bh(&tp->lock); + + if (linkmesg) + tg3_link_report(tp); +} + +static int tg3_phy_init(struct tg3 *tp) +{ + struct phy_device *phydev; + + if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) + return 0; + + /* Bring the PHY back to a known state. */ + tg3_bmcr_reset(tp); + + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + + /* Attach the MAC to the PHY. */ + phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link, + phydev->dev_flags, phydev->interface); + if (IS_ERR(phydev)) { + dev_err(&tp->pdev->dev, "Could not attach to PHY\n"); + return PTR_ERR(phydev); + } + + /* Mask with MAC supported features. */ + switch (phydev->interface) { + case PHY_INTERFACE_MODE_GMII: + case PHY_INTERFACE_MODE_RGMII: + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { + phydev->supported &= (PHY_GBIT_FEATURES | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + break; + } + /* fallthru */ + case PHY_INTERFACE_MODE_MII: + phydev->supported &= (PHY_BASIC_FEATURES | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + break; + default: + phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); + return -EINVAL; + } + + tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED; + + phydev->advertising = phydev->supported; + + return 0; +} + +static void tg3_phy_start(struct tg3 *tp) +{ + struct phy_device *phydev; + + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) + return; + + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { + tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; + phydev->speed = tp->link_config.orig_speed; + phydev->duplex = tp->link_config.orig_duplex; + phydev->autoneg = tp->link_config.orig_autoneg; + phydev->advertising = tp->link_config.orig_advertising; + } + + phy_start(phydev); + + phy_start_aneg(phydev); +} + +static void tg3_phy_stop(struct tg3 *tp) +{ + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) + return; + + phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); +} + +static void tg3_phy_fini(struct tg3 *tp) +{ + if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { + phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); + tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED; + } +} + +static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable) +{ + u32 phytest; + + if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { + u32 phy; + + tg3_writephy(tp, MII_TG3_FET_TEST, + phytest | MII_TG3_FET_SHADOW_EN); + if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) { + if (enable) + phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD; + else + phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD; + tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy); + } + tg3_writephy(tp, MII_TG3_FET_TEST, phytest); + } +} + +static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) +{ + u32 reg; + + if (!tg3_flag(tp, 5705_PLUS) || + (tg3_flag(tp, 5717_PLUS) && + (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) + return; + + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + tg3_phy_fet_toggle_apd(tp, enable); + return; + } + + reg = MII_TG3_MISC_SHDW_WREN | + MII_TG3_MISC_SHDW_SCR5_SEL | + MII_TG3_MISC_SHDW_SCR5_LPED | + MII_TG3_MISC_SHDW_SCR5_DLPTLM | + MII_TG3_MISC_SHDW_SCR5_SDTL | + MII_TG3_MISC_SHDW_SCR5_C125OE; + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable) + reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD; + + tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); + + + reg = MII_TG3_MISC_SHDW_WREN | + MII_TG3_MISC_SHDW_APD_SEL | + MII_TG3_MISC_SHDW_APD_WKTM_84MS; + if (enable) + reg |= MII_TG3_MISC_SHDW_APD_ENABLE; + + tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); +} + +static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable) +{ + u32 phy; + + if (!tg3_flag(tp, 5705_PLUS) || + (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) + return; + + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + u32 ephy; + + if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) { + u32 reg = MII_TG3_FET_SHDW_MISCCTRL; + + tg3_writephy(tp, MII_TG3_FET_TEST, + ephy | MII_TG3_FET_SHADOW_EN); + if (!tg3_readphy(tp, reg, &phy)) { + if (enable) + phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX; + else + phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX; + tg3_writephy(tp, reg, phy); + } + tg3_writephy(tp, MII_TG3_FET_TEST, ephy); + } + } else { + int ret; + + ret = tg3_phy_auxctl_read(tp, + MII_TG3_AUXCTL_SHDWSEL_MISC, &phy); + if (!ret) { + if (enable) + phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; + else + phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; + tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_MISC, phy); + } + } +} + +static void tg3_phy_set_wirespeed(struct tg3 *tp) +{ + int ret; + u32 val; + + if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) + return; + + ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val); + if (!ret) + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, + val | MII_TG3_AUXCTL_MISC_WIRESPD_EN); +} + +static void tg3_phy_apply_otp(struct tg3 *tp) +{ + u32 otp, phy; + + if (!tp->phy_otp) + return; + + otp = tp->phy_otp; + + if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) + return; + + phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); + phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT; + tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy); + + phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) | + ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT); + tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy); + + phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT); + phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ; + tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy); + + phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT); + tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy); + + phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT); + tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy); + + phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) | + ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); + tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); + + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); +} + +static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up) +{ + u32 val; + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) + return; + + tp->setlpicnt = 0; + + if (tp->link_config.autoneg == AUTONEG_ENABLE && + current_link_up == 1 && + tp->link_config.active_duplex == DUPLEX_FULL && + (tp->link_config.active_speed == SPEED_100 || + tp->link_config.active_speed == SPEED_1000)) { + u32 eeectl; + + if (tp->link_config.active_speed == SPEED_1000) + eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US; + else + eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US; + + tw32(TG3_CPMU_EEE_CTRL, eeectl); + + tg3_phy_cl45_read(tp, MDIO_MMD_AN, + TG3_CL45_D7_EEERES_STAT, &val); + + if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || + val == TG3_CL45_D7_EEERES_STAT_LP_100TX) + tp->setlpicnt = 2; + } + + if (!tp->setlpicnt) { + if (current_link_up == 1 && + !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000); + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } + + val = tr32(TG3_CPMU_EEE_MODE); + tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE); + } +} + +static void tg3_phy_eee_enable(struct tg3 *tp) +{ + u32 val; + + if (tp->link_config.active_speed == SPEED_1000 && + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && + !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + val = MII_TG3_DSP_TAP26_ALNOKO | + MII_TG3_DSP_TAP26_RMRXSTO; + tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } + + val = tr32(TG3_CPMU_EEE_MODE); + tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE); +} + +static int tg3_wait_macro_done(struct tg3 *tp) +{ + int limit = 100; + + while (limit--) { + u32 tmp32; + + if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) { + if ((tmp32 & 0x1000) == 0) + break; + } + } + if (limit < 0) + return -EBUSY; + + return 0; +} + +static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp) +{ + static const u32 test_pat[4][6] = { + { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 }, + { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 }, + { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 }, + { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 } + }; + int chan; + + for (chan = 0; chan < 4; chan++) { + int i; + + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + (chan * 0x2000) | 0x0200); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); + + for (i = 0; i < 6; i++) + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, + test_pat[chan][i]); + + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); + if (tg3_wait_macro_done(tp)) { + *resetp = 1; + return -EBUSY; + } + + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + (chan * 0x2000) | 0x0200); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082); + if (tg3_wait_macro_done(tp)) { + *resetp = 1; + return -EBUSY; + } + + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802); + if (tg3_wait_macro_done(tp)) { + *resetp = 1; + return -EBUSY; + } + + for (i = 0; i < 6; i += 2) { + u32 low, high; + + if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) || + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) || + tg3_wait_macro_done(tp)) { + *resetp = 1; + return -EBUSY; + } + low &= 0x7fff; + high &= 0x000f; + if (low != test_pat[chan][i] || + high != test_pat[chan][i+1]) { + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b); + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001); + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005); + + return -EBUSY; + } + } + } + + return 0; +} + +static int tg3_phy_reset_chanpat(struct tg3 *tp) +{ + int chan; + + for (chan = 0; chan < 4; chan++) { + int i; + + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + (chan * 0x2000) | 0x0200); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); + for (i = 0; i < 6; i++) + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); + if (tg3_wait_macro_done(tp)) + return -EBUSY; + } + + return 0; +} + +static int tg3_phy_reset_5703_4_5(struct tg3 *tp) +{ + u32 reg32, phy9_orig; + int retries, do_phy_reset, err; + + retries = 10; + do_phy_reset = 1; + do { + if (do_phy_reset) { + err = tg3_bmcr_reset(tp); + if (err) + return err; + do_phy_reset = 0; + } + + /* Disable transmitter and interrupt. */ + if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) + continue; + + reg32 |= 0x3000; + tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); + + /* Set full-duplex, 1000 mbps. */ + tg3_writephy(tp, MII_BMCR, + BMCR_FULLDPLX | BMCR_SPEED1000); + + /* Set to master mode. */ + if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig)) + continue; + + tg3_writephy(tp, MII_CTRL1000, + CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); + + err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); + if (err) + return err; + + /* Block the PHY control access. */ + tg3_phydsp_write(tp, 0x8005, 0x0800); + + err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset); + if (!err) + break; + } while (--retries); + + err = tg3_phy_reset_chanpat(tp); + if (err) + return err; + + tg3_phydsp_write(tp, 0x8005, 0x0000); + + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000); + + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + + tg3_writephy(tp, MII_CTRL1000, phy9_orig); + + if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) { + reg32 &= ~0x3000; + tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); + } else if (!err) + err = -EBUSY; + + return err; +} + +/* This will reset the tigon3 PHY if there is no valid + * link unless the FORCE argument is non-zero. + */ +static int tg3_phy_reset(struct tg3 *tp) +{ + u32 val, cpmuctrl; + int err; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + val = tr32(GRC_MISC_CFG); + tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ); + udelay(40); + } + err = tg3_readphy(tp, MII_BMSR, &val); + err |= tg3_readphy(tp, MII_BMSR, &val); + if (err != 0) + return -EBUSY; + + if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) { + netif_carrier_off(tp->dev); + tg3_link_report(tp); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + err = tg3_phy_reset_5703_4_5(tp); + if (err) + return err; + goto out; + } + + cpmuctrl = 0; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && + GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) { + cpmuctrl = tr32(TG3_CPMU_CTRL); + if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) + tw32(TG3_CPMU_CTRL, + cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY); + } + + err = tg3_bmcr_reset(tp); + if (err) + return err; + + if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) { + val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz; + tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val); + + tw32(TG3_CPMU_CTRL, cpmuctrl); + } + + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX || + GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) { + val = tr32(TG3_CPMU_LSPD_1000MB_CLK); + if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) == + CPMU_LSPD_1000MB_MACCLK_12_5) { + val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; + udelay(40); + tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); + } + } + + if (tg3_flag(tp, 5717_PLUS) && + (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) + return 0; + + tg3_phy_apply_otp(tp); + + if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) + tg3_phy_toggle_apd(tp, true); + else + tg3_phy_toggle_apd(tp, false); + +out: + if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) && + !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + tg3_phydsp_write(tp, 0x201f, 0x2aaa); + tg3_phydsp_write(tp, 0x000a, 0x0323); + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } + + if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); + } + + if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { + if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + tg3_phydsp_write(tp, 0x000a, 0x310b); + tg3_phydsp_write(tp, 0x201f, 0x9506); + tg3_phydsp_write(tp, 0x401f, 0x14e2); + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } + } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { + if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); + if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); + tg3_writephy(tp, MII_TG3_TEST1, + MII_TG3_TEST1_TRIM_EN | 0x4); + } else + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); + + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } + } + + /* Set Extended packet length bit (bit 14) on all chips that */ + /* support jumbo frames */ + if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { + /* Cannot do read-modify-write on 5401 */ + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); + } else if (tg3_flag(tp, JUMBO_CAPABLE)) { + /* Set bit 14 with read-modify-write to preserve other bits */ + err = tg3_phy_auxctl_read(tp, + MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); + if (!err) + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, + val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN); + } + + /* Set phy register 0x10 bit 0 to high fifo elasticity to support + * jumbo frames transmission. + */ + if (tg3_flag(tp, JUMBO_CAPABLE)) { + if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val)) + tg3_writephy(tp, MII_TG3_EXT_CTRL, + val | MII_TG3_EXT_CTRL_FIFO_ELASTIC); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + /* adjust output voltage */ + tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12); + } + + tg3_phy_toggle_automdix(tp, 1); + tg3_phy_set_wirespeed(tp); + return 0; +} + +#define TG3_GPIO_MSG_DRVR_PRES 0x00000001 +#define TG3_GPIO_MSG_NEED_VAUX 0x00000002 +#define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \ + TG3_GPIO_MSG_NEED_VAUX) +#define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \ + ((TG3_GPIO_MSG_DRVR_PRES << 0) | \ + (TG3_GPIO_MSG_DRVR_PRES << 4) | \ + (TG3_GPIO_MSG_DRVR_PRES << 8) | \ + (TG3_GPIO_MSG_DRVR_PRES << 12)) + +#define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \ + ((TG3_GPIO_MSG_NEED_VAUX << 0) | \ + (TG3_GPIO_MSG_NEED_VAUX << 4) | \ + (TG3_GPIO_MSG_NEED_VAUX << 8) | \ + (TG3_GPIO_MSG_NEED_VAUX << 12)) + +static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat) +{ + u32 status, shift; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG); + else + status = tr32(TG3_CPMU_DRV_STATUS); + + shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn; + status &= ~(TG3_GPIO_MSG_MASK << shift); + status |= (newstat << shift); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status); + else + tw32(TG3_CPMU_DRV_STATUS, status); + + return status >> TG3_APE_GPIO_MSG_SHIFT; +} + +static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp) +{ + if (!tg3_flag(tp, IS_NIC)) + return 0; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) + return -EIO; + + tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES); + + tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); + } else { + tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + } + + return 0; +} + +static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp) +{ + u32 grc_local_ctrl; + + if (!tg3_flag(tp, IS_NIC) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) + return; + + grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1; + + tw32_wait_f(GRC_LOCAL_CTRL, + grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + tw32_wait_f(GRC_LOCAL_CTRL, + grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + tw32_wait_f(GRC_LOCAL_CTRL, + grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, + TG3_GRC_LCLCTL_PWRSW_DELAY); +} + +static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp) +{ + if (!tg3_flag(tp, IS_NIC)) + return; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | + (GRC_LCLCTRL_GPIO_OE0 | + GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OE2 | + GRC_LCLCTRL_GPIO_OUTPUT0 | + GRC_LCLCTRL_GPIO_OUTPUT1), + TG3_GRC_LCLCTL_PWRSW_DELAY); + } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { + /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */ + u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 | + GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OE2 | + GRC_LCLCTRL_GPIO_OUTPUT0 | + GRC_LCLCTRL_GPIO_OUTPUT1 | + tp->grc_local_ctrl; + tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2; + tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0; + tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + } else { + u32 no_gpio2; + u32 grc_local_ctrl = 0; + + /* Workaround to prevent overdrawing Amps. */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { + grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; + tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | + grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + } + + /* On 5753 and variants, GPIO2 cannot be used. */ + no_gpio2 = tp->nic_sram_data_cfg & + NIC_SRAM_DATA_CFG_NO_GPIO2; + + grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | + GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OE2 | + GRC_LCLCTRL_GPIO_OUTPUT1 | + GRC_LCLCTRL_GPIO_OUTPUT2; + if (no_gpio2) { + grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 | + GRC_LCLCTRL_GPIO_OUTPUT2); + } + tw32_wait_f(GRC_LOCAL_CTRL, + tp->grc_local_ctrl | grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0; + + tw32_wait_f(GRC_LOCAL_CTRL, + tp->grc_local_ctrl | grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + if (!no_gpio2) { + grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2; + tw32_wait_f(GRC_LOCAL_CTRL, + tp->grc_local_ctrl | grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + } + } +} + +static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable) +{ + u32 msg = 0; + + /* Serialize power state transitions */ + if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) + return; + + if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable) + msg = TG3_GPIO_MSG_NEED_VAUX; + + msg = tg3_set_function_status(tp, msg); + + if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK) + goto done; + + if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK) + tg3_pwrsrc_switch_to_vaux(tp); + else + tg3_pwrsrc_die_with_vmain(tp); + +done: + tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); +} + +static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol) +{ + bool need_vaux = false; + + /* The GPIOs do something completely different on 57765. */ + if (!tg3_flag(tp, IS_NIC) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + return; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + tg3_frob_aux_power_5717(tp, include_wol ? + tg3_flag(tp, WOL_ENABLE) != 0 : 0); + return; + } + + if (tp->pdev_peer && tp->pdev_peer != tp->pdev) { + struct net_device *dev_peer; + + dev_peer = pci_get_drvdata(tp->pdev_peer); + + /* remove_one() may have been run on the peer. */ + if (dev_peer) { + struct tg3 *tp_peer = netdev_priv(dev_peer); + + if (tg3_flag(tp_peer, INIT_COMPLETE)) + return; + + if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) || + tg3_flag(tp_peer, ENABLE_ASF)) + need_vaux = true; + } + } + + if ((include_wol && tg3_flag(tp, WOL_ENABLE)) || + tg3_flag(tp, ENABLE_ASF)) + need_vaux = true; + + if (need_vaux) + tg3_pwrsrc_switch_to_vaux(tp); + else + tg3_pwrsrc_die_with_vmain(tp); +} + +static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) +{ + if (tp->led_ctrl == LED_CTRL_MODE_PHY_2) + return 1; + else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) { + if (speed != SPEED_10) + return 1; + } else if (speed == SPEED_10) + return 1; + + return 0; +} + +static int tg3_setup_phy(struct tg3 *, int); + +#define RESET_KIND_SHUTDOWN 0 +#define RESET_KIND_INIT 1 +#define RESET_KIND_SUSPEND 2 + +static void tg3_write_sig_post_reset(struct tg3 *, int); +static int tg3_halt_cpu(struct tg3 *, u32); + +static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) +{ + u32 val; + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { + u32 sg_dig_ctrl = tr32(SG_DIG_CTRL); + u32 serdes_cfg = tr32(MAC_SERDES_CFG); + + sg_dig_ctrl |= + SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET; + tw32(SG_DIG_CTRL, sg_dig_ctrl); + tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15)); + } + return; + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + tg3_bmcr_reset(tp); + val = tr32(GRC_MISC_CFG); + tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); + udelay(40); + return; + } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + u32 phytest; + if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { + u32 phy; + + tg3_writephy(tp, MII_ADVERTISE, 0); + tg3_writephy(tp, MII_BMCR, + BMCR_ANENABLE | BMCR_ANRESTART); + + tg3_writephy(tp, MII_TG3_FET_TEST, + phytest | MII_TG3_FET_SHADOW_EN); + if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) { + phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD; + tg3_writephy(tp, + MII_TG3_FET_SHDW_AUXMODE4, + phy); + } + tg3_writephy(tp, MII_TG3_FET_TEST, phytest); + } + return; + } else if (do_low_power) { + tg3_writephy(tp, MII_TG3_EXT_CTRL, + MII_TG3_EXT_CTRL_FORCE_LED_OFF); + + val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | + MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | + MII_TG3_AUXCTL_PCTL_VREG_11V; + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val); + } + + /* The PHY should not be powered down on some chips because + * of bugs. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 && + (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) + return; + + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX || + GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) { + val = tr32(TG3_CPMU_LSPD_1000MB_CLK); + val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; + val |= CPMU_LSPD_1000MB_MACCLK_12_5; + tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); + } + + tg3_writephy(tp, MII_BMCR, BMCR_PDOWN); +} + +/* tp->lock is held. */ +static int tg3_nvram_lock(struct tg3 *tp) +{ + if (tg3_flag(tp, NVRAM)) { + int i; + + if (tp->nvram_lock_cnt == 0) { + tw32(NVRAM_SWARB, SWARB_REQ_SET1); + for (i = 0; i < 8000; i++) { + if (tr32(NVRAM_SWARB) & SWARB_GNT1) + break; + udelay(20); + } + if (i == 8000) { + tw32(NVRAM_SWARB, SWARB_REQ_CLR1); + return -ENODEV; + } + } + tp->nvram_lock_cnt++; + } + return 0; +} + +/* tp->lock is held. */ +static void tg3_nvram_unlock(struct tg3 *tp) +{ + if (tg3_flag(tp, NVRAM)) { + if (tp->nvram_lock_cnt > 0) + tp->nvram_lock_cnt--; + if (tp->nvram_lock_cnt == 0) + tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); + } +} + +/* tp->lock is held. */ +static void tg3_enable_nvram_access(struct tg3 *tp) +{ + if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { + u32 nvaccess = tr32(NVRAM_ACCESS); + + tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); + } +} + +/* tp->lock is held. */ +static void tg3_disable_nvram_access(struct tg3 *tp) +{ + if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { + u32 nvaccess = tr32(NVRAM_ACCESS); + + tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); + } +} + +static int tg3_nvram_read_using_eeprom(struct tg3 *tp, + u32 offset, u32 *val) +{ + u32 tmp; + int i; + + if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0) + return -EINVAL; + + tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK | + EEPROM_ADDR_DEVID_MASK | + EEPROM_ADDR_READ); + tw32(GRC_EEPROM_ADDR, + tmp | + (0 << EEPROM_ADDR_DEVID_SHIFT) | + ((offset << EEPROM_ADDR_ADDR_SHIFT) & + EEPROM_ADDR_ADDR_MASK) | + EEPROM_ADDR_READ | EEPROM_ADDR_START); + + for (i = 0; i < 1000; i++) { + tmp = tr32(GRC_EEPROM_ADDR); + + if (tmp & EEPROM_ADDR_COMPLETE) + break; + msleep(1); + } + if (!(tmp & EEPROM_ADDR_COMPLETE)) + return -EBUSY; + + tmp = tr32(GRC_EEPROM_DATA); + + /* + * The data will always be opposite the native endian + * format. Perform a blind byteswap to compensate. + */ + *val = swab32(tmp); + + return 0; +} + +#define NVRAM_CMD_TIMEOUT 10000 + +static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) +{ + int i; + + tw32(NVRAM_CMD, nvram_cmd); + for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) { + udelay(10); + if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) { + udelay(10); + break; + } + } + + if (i == NVRAM_CMD_TIMEOUT) + return -EBUSY; + + return 0; +} + +static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) +{ + if (tg3_flag(tp, NVRAM) && + tg3_flag(tp, NVRAM_BUFFERED) && + tg3_flag(tp, FLASH) && + !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && + (tp->nvram_jedecnum == JEDEC_ATMEL)) + + addr = ((addr / tp->nvram_pagesize) << + ATMEL_AT45DB0X1B_PAGE_POS) + + (addr % tp->nvram_pagesize); + + return addr; +} + +static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) +{ + if (tg3_flag(tp, NVRAM) && + tg3_flag(tp, NVRAM_BUFFERED) && + tg3_flag(tp, FLASH) && + !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && + (tp->nvram_jedecnum == JEDEC_ATMEL)) + + addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * + tp->nvram_pagesize) + + (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1)); + + return addr; +} + +/* NOTE: Data read in from NVRAM is byteswapped according to + * the byteswapping settings for all other register accesses. + * tg3 devices are BE devices, so on a BE machine, the data + * returned will be exactly as it is seen in NVRAM. On a LE + * machine, the 32-bit value will be byteswapped. + */ +static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) +{ + int ret; + + if (!tg3_flag(tp, NVRAM)) + return tg3_nvram_read_using_eeprom(tp, offset, val); + + offset = tg3_nvram_phys_addr(tp, offset); + + if (offset > NVRAM_ADDR_MSK) + return -EINVAL; + + ret = tg3_nvram_lock(tp); + if (ret) + return ret; + + tg3_enable_nvram_access(tp); + + tw32(NVRAM_ADDR, offset); + ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO | + NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE); + + if (ret == 0) + *val = tr32(NVRAM_RDDATA); + + tg3_disable_nvram_access(tp); + + tg3_nvram_unlock(tp); + + return ret; +} + +/* Ensures NVRAM data is in bytestream format. */ +static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val) +{ + u32 v; + int res = tg3_nvram_read(tp, offset, &v); + if (!res) + *val = cpu_to_be32(v); + return res; +} + +/* tp->lock is held. */ +static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1) +{ + u32 addr_high, addr_low; + int i; + + addr_high = ((tp->dev->dev_addr[0] << 8) | + tp->dev->dev_addr[1]); + addr_low = ((tp->dev->dev_addr[2] << 24) | + (tp->dev->dev_addr[3] << 16) | + (tp->dev->dev_addr[4] << 8) | + (tp->dev->dev_addr[5] << 0)); + for (i = 0; i < 4; i++) { + if (i == 1 && skip_mac_1) + continue; + tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high); + tw32(MAC_ADDR_0_LOW + (i * 8), addr_low); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { + for (i = 0; i < 12; i++) { + tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high); + tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low); + } + } + + addr_high = (tp->dev->dev_addr[0] + + tp->dev->dev_addr[1] + + tp->dev->dev_addr[2] + + tp->dev->dev_addr[3] + + tp->dev->dev_addr[4] + + tp->dev->dev_addr[5]) & + TX_BACKOFF_SEED_MASK; + tw32(MAC_TX_BACKOFF_SEED, addr_high); +} + +static void tg3_enable_register_access(struct tg3 *tp) +{ + /* + * Make sure register accesses (indirect or otherwise) will function + * correctly. + */ + pci_write_config_dword(tp->pdev, + TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl); +} + +static int tg3_power_up(struct tg3 *tp) +{ + int err; + + tg3_enable_register_access(tp); + + err = pci_set_power_state(tp->pdev, PCI_D0); + if (!err) { + /* Switch out of Vaux if it is a NIC */ + tg3_pwrsrc_switch_to_vmain(tp); + } else { + netdev_err(tp->dev, "Transition to D0 failed\n"); + } + + return err; +} + +static int tg3_power_down_prepare(struct tg3 *tp) +{ + u32 misc_host_ctrl; + bool device_should_wake, do_low_power; + + tg3_enable_register_access(tp); + + /* Restore the CLKREQ setting. */ + if (tg3_flag(tp, CLKREQ_BUG)) { + u16 lnkctl; + + pci_read_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, + &lnkctl); + lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN; + pci_write_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, + lnkctl); + } + + misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); + tw32(TG3PCI_MISC_HOST_CTRL, + misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); + + device_should_wake = device_may_wakeup(&tp->pdev->dev) && + tg3_flag(tp, WOL_ENABLE); + + if (tg3_flag(tp, USE_PHYLIB)) { + do_low_power = false; + if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) && + !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { + struct phy_device *phydev; + u32 phyid, advertising; + + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + + tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; + + tp->link_config.orig_speed = phydev->speed; + tp->link_config.orig_duplex = phydev->duplex; + tp->link_config.orig_autoneg = phydev->autoneg; + tp->link_config.orig_advertising = phydev->advertising; + + advertising = ADVERTISED_TP | + ADVERTISED_Pause | + ADVERTISED_Autoneg | + ADVERTISED_10baseT_Half; + + if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) { + if (tg3_flag(tp, WOL_SPEED_100MB)) + advertising |= + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Full; + else + advertising |= ADVERTISED_10baseT_Full; + } + + phydev->advertising = advertising; + + phy_start_aneg(phydev); + + phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; + if (phyid != PHY_ID_BCMAC131) { + phyid &= PHY_BCM_OUI_MASK; + if (phyid == PHY_BCM_OUI_1 || + phyid == PHY_BCM_OUI_2 || + phyid == PHY_BCM_OUI_3) + do_low_power = true; + } + } + } else { + do_low_power = true; + + if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { + tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; + tp->link_config.orig_speed = tp->link_config.speed; + tp->link_config.orig_duplex = tp->link_config.duplex; + tp->link_config.orig_autoneg = tp->link_config.autoneg; + } + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { + tp->link_config.speed = SPEED_10; + tp->link_config.duplex = DUPLEX_HALF; + tp->link_config.autoneg = AUTONEG_ENABLE; + tg3_setup_phy(tp, 0); + } + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + u32 val; + + val = tr32(GRC_VCPU_EXT_CTRL); + tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL); + } else if (!tg3_flag(tp, ENABLE_ASF)) { + int i; + u32 val; + + for (i = 0; i < 200; i++) { + tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val); + if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) + break; + msleep(1); + } + } + if (tg3_flag(tp, WOL_CAP)) + tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE | + WOL_DRV_STATE_SHUTDOWN | + WOL_DRV_WOL | + WOL_SET_MAGIC_PKT); + + if (device_should_wake) { + u32 mac_mode; + + if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { + if (do_low_power && + !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { + tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_PWRCTL, + MII_TG3_AUXCTL_PCTL_WOL_EN | + MII_TG3_AUXCTL_PCTL_100TX_LPWR | + MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC); + udelay(40); + } + + if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) + mac_mode = MAC_MODE_PORT_MODE_GMII; + else + mac_mode = MAC_MODE_PORT_MODE_MII; + + mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == + ASIC_REV_5700) { + u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ? + SPEED_100 : SPEED_10; + if (tg3_5700_link_polarity(tp, speed)) + mac_mode |= MAC_MODE_LINK_POLARITY; + else + mac_mode &= ~MAC_MODE_LINK_POLARITY; + } + } else { + mac_mode = MAC_MODE_PORT_MODE_TBI; + } + + if (!tg3_flag(tp, 5750_PLUS)) + tw32(MAC_LED_CTRL, tp->led_ctrl); + + mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; + if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) && + (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE))) + mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; + + if (tg3_flag(tp, ENABLE_APE)) + mac_mode |= MAC_MODE_APE_TX_EN | + MAC_MODE_APE_RX_EN | + MAC_MODE_TDE_ENABLE; + + tw32_f(MAC_MODE, mac_mode); + udelay(100); + + tw32_f(MAC_RX_MODE, RX_MODE_ENABLE); + udelay(10); + } + + if (!tg3_flag(tp, WOL_SPEED_100MB) && + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { + u32 base_val; + + base_val = tp->pci_clock_ctrl; + base_val |= (CLOCK_CTRL_RXCLK_DISABLE | + CLOCK_CTRL_TXCLK_DISABLE); + + tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | + CLOCK_CTRL_PWRDOWN_PLL133, 40); + } else if (tg3_flag(tp, 5780_CLASS) || + tg3_flag(tp, CPMU_PRESENT) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + /* do nothing */ + } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) { + u32 newbits1, newbits2; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + newbits1 = (CLOCK_CTRL_RXCLK_DISABLE | + CLOCK_CTRL_TXCLK_DISABLE | + CLOCK_CTRL_ALTCLK); + newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; + } else if (tg3_flag(tp, 5705_PLUS)) { + newbits1 = CLOCK_CTRL_625_CORE; + newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; + } else { + newbits1 = CLOCK_CTRL_ALTCLK; + newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; + } + + tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1, + 40); + + tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, + 40); + + if (!tg3_flag(tp, 5705_PLUS)) { + u32 newbits3; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + newbits3 = (CLOCK_CTRL_RXCLK_DISABLE | + CLOCK_CTRL_TXCLK_DISABLE | + CLOCK_CTRL_44MHZ_CORE); + } else { + newbits3 = CLOCK_CTRL_44MHZ_CORE; + } + + tw32_wait_f(TG3PCI_CLOCK_CTRL, + tp->pci_clock_ctrl | newbits3, 40); + } + } + + if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF)) + tg3_power_down_phy(tp, do_low_power); + + tg3_frob_aux_power(tp, true); + + /* Workaround for unstable PLL clock */ + if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) || + (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) { + u32 val = tr32(0x7d00); + + val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); + tw32(0x7d00, val); + if (!tg3_flag(tp, ENABLE_ASF)) { + int err; + + err = tg3_nvram_lock(tp); + tg3_halt_cpu(tp, RX_CPU_BASE); + if (!err) + tg3_nvram_unlock(tp); + } + } + + tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); + + return 0; +} + +static void tg3_power_down(struct tg3 *tp) +{ + tg3_power_down_prepare(tp); + + pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE)); + pci_set_power_state(tp->pdev, PCI_D3hot); +} + +static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex) +{ + switch (val & MII_TG3_AUX_STAT_SPDMASK) { + case MII_TG3_AUX_STAT_10HALF: + *speed = SPEED_10; + *duplex = DUPLEX_HALF; + break; + + case MII_TG3_AUX_STAT_10FULL: + *speed = SPEED_10; + *duplex = DUPLEX_FULL; + break; + + case MII_TG3_AUX_STAT_100HALF: + *speed = SPEED_100; + *duplex = DUPLEX_HALF; + break; + + case MII_TG3_AUX_STAT_100FULL: + *speed = SPEED_100; + *duplex = DUPLEX_FULL; + break; + + case MII_TG3_AUX_STAT_1000HALF: + *speed = SPEED_1000; + *duplex = DUPLEX_HALF; + break; + + case MII_TG3_AUX_STAT_1000FULL: + *speed = SPEED_1000; + *duplex = DUPLEX_FULL; + break; + + default: + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : + SPEED_10; + *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : + DUPLEX_HALF; + break; + } + *speed = SPEED_INVALID; + *duplex = DUPLEX_INVALID; + break; + } +} + +static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) +{ + int err = 0; + u32 val, new_adv; + + new_adv = ADVERTISE_CSMA; + if (advertise & ADVERTISED_10baseT_Half) + new_adv |= ADVERTISE_10HALF; + if (advertise & ADVERTISED_10baseT_Full) + new_adv |= ADVERTISE_10FULL; + if (advertise & ADVERTISED_100baseT_Half) + new_adv |= ADVERTISE_100HALF; + if (advertise & ADVERTISED_100baseT_Full) + new_adv |= ADVERTISE_100FULL; + + new_adv |= tg3_advert_flowctrl_1000T(flowctrl); + + err = tg3_writephy(tp, MII_ADVERTISE, new_adv); + if (err) + goto done; + + if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) + goto done; + + new_adv = 0; + if (advertise & ADVERTISED_1000baseT_Half) + new_adv |= ADVERTISE_1000HALF; + if (advertise & ADVERTISED_1000baseT_Full) + new_adv |= ADVERTISE_1000FULL; + + if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) + new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; + + err = tg3_writephy(tp, MII_CTRL1000, new_adv); + if (err) + goto done; + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) + goto done; + + tw32(TG3_CPMU_EEE_MODE, + tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); + + err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); + if (!err) { + u32 err2; + + val = 0; + /* Advertise 100-BaseTX EEE ability */ + if (advertise & ADVERTISED_100baseT_Full) + val |= MDIO_AN_EEE_ADV_100TX; + /* Advertise 1000-BaseT EEE ability */ + if (advertise & ADVERTISED_1000baseT_Full) + val |= MDIO_AN_EEE_ADV_1000T; + err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); + if (err) + val = 0; + + switch (GET_ASIC_REV(tp->pci_chip_rev_id)) { + case ASIC_REV_5717: + case ASIC_REV_57765: + case ASIC_REV_5719: + /* If we advertised any eee advertisements above... */ + if (val) + val = MII_TG3_DSP_TAP26_ALNOKO | + MII_TG3_DSP_TAP26_RMRXSTO | + MII_TG3_DSP_TAP26_OPCSINPT; + tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); + /* Fall through */ + case ASIC_REV_5720: + if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) + tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val | + MII_TG3_DSP_CH34TP2_HIBW01); + } + + err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + if (!err) + err = err2; + } + +done: + return err; +} + +static void tg3_phy_copper_begin(struct tg3 *tp) +{ + u32 new_adv; + int i; + + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { + new_adv = ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full; + if (tg3_flag(tp, WOL_SPEED_100MB)) + new_adv |= ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full; + + tg3_phy_autoneg_cfg(tp, new_adv, + FLOW_CTRL_TX | FLOW_CTRL_RX); + } else if (tp->link_config.speed == SPEED_INVALID) { + if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) + tp->link_config.advertising &= + ~(ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full); + + tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, + tp->link_config.flowctrl); + } else { + /* Asking for a specific link mode. */ + if (tp->link_config.speed == SPEED_1000) { + if (tp->link_config.duplex == DUPLEX_FULL) + new_adv = ADVERTISED_1000baseT_Full; + else + new_adv = ADVERTISED_1000baseT_Half; + } else if (tp->link_config.speed == SPEED_100) { + if (tp->link_config.duplex == DUPLEX_FULL) + new_adv = ADVERTISED_100baseT_Full; + else + new_adv = ADVERTISED_100baseT_Half; + } else { + if (tp->link_config.duplex == DUPLEX_FULL) + new_adv = ADVERTISED_10baseT_Full; + else + new_adv = ADVERTISED_10baseT_Half; + } + + tg3_phy_autoneg_cfg(tp, new_adv, + tp->link_config.flowctrl); + } + + if (tp->link_config.autoneg == AUTONEG_DISABLE && + tp->link_config.speed != SPEED_INVALID) { + u32 bmcr, orig_bmcr; + + tp->link_config.active_speed = tp->link_config.speed; + tp->link_config.active_duplex = tp->link_config.duplex; + + bmcr = 0; + switch (tp->link_config.speed) { + default: + case SPEED_10: + break; + + case SPEED_100: + bmcr |= BMCR_SPEED100; + break; + + case SPEED_1000: + bmcr |= BMCR_SPEED1000; + break; + } + + if (tp->link_config.duplex == DUPLEX_FULL) + bmcr |= BMCR_FULLDPLX; + + if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) && + (bmcr != orig_bmcr)) { + tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK); + for (i = 0; i < 1500; i++) { + u32 tmp; + + udelay(10); + if (tg3_readphy(tp, MII_BMSR, &tmp) || + tg3_readphy(tp, MII_BMSR, &tmp)) + continue; + if (!(tmp & BMSR_LSTATUS)) { + udelay(40); + break; + } + } + tg3_writephy(tp, MII_BMCR, bmcr); + udelay(40); + } + } else { + tg3_writephy(tp, MII_BMCR, + BMCR_ANENABLE | BMCR_ANRESTART); + } +} + +static int tg3_init_5401phy_dsp(struct tg3 *tp) +{ + int err; + + /* Turn off tap power management. */ + /* Set Extended packet length bit */ + err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); + + err |= tg3_phydsp_write(tp, 0x0012, 0x1804); + err |= tg3_phydsp_write(tp, 0x0013, 0x1204); + err |= tg3_phydsp_write(tp, 0x8006, 0x0132); + err |= tg3_phydsp_write(tp, 0x8006, 0x0232); + err |= tg3_phydsp_write(tp, 0x201f, 0x0a20); + + udelay(40); + + return err; +} + +static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask) +{ + u32 adv_reg, all_mask = 0; + + if (mask & ADVERTISED_10baseT_Half) + all_mask |= ADVERTISE_10HALF; + if (mask & ADVERTISED_10baseT_Full) + all_mask |= ADVERTISE_10FULL; + if (mask & ADVERTISED_100baseT_Half) + all_mask |= ADVERTISE_100HALF; + if (mask & ADVERTISED_100baseT_Full) + all_mask |= ADVERTISE_100FULL; + + if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg)) + return 0; + + if ((adv_reg & all_mask) != all_mask) + return 0; + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { + u32 tg3_ctrl; + + all_mask = 0; + if (mask & ADVERTISED_1000baseT_Half) + all_mask |= ADVERTISE_1000HALF; + if (mask & ADVERTISED_1000baseT_Full) + all_mask |= ADVERTISE_1000FULL; + + if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl)) + return 0; + + if ((tg3_ctrl & all_mask) != all_mask) + return 0; + } + return 1; +} + +static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv) +{ + u32 curadv, reqadv; + + if (tg3_readphy(tp, MII_ADVERTISE, lcladv)) + return 1; + + curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl); + + if (tp->link_config.active_duplex == DUPLEX_FULL) { + if (curadv != reqadv) + return 0; + + if (tg3_flag(tp, PAUSE_AUTONEG)) + tg3_readphy(tp, MII_LPA, rmtadv); + } else { + /* Reprogram the advertisement register, even if it + * does not affect the current link. If the link + * gets renegotiated in the future, we can save an + * additional renegotiation cycle by advertising + * it correctly in the first place. + */ + if (curadv != reqadv) { + *lcladv &= ~(ADVERTISE_PAUSE_CAP | + ADVERTISE_PAUSE_ASYM); + tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv); + } + } + + return 1; +} + +static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) +{ + int current_link_up; + u32 bmsr, val; + u32 lcl_adv, rmt_adv; + u16 current_speed; + u8 current_duplex; + int i, err; + + tw32(MAC_EVENT, 0); + + tw32_f(MAC_STATUS, + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED | + MAC_STATUS_MI_COMPLETION | + MAC_STATUS_LNKSTATE_CHANGED)); + udelay(40); + + if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { + tw32_f(MAC_MI_MODE, + (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); + udelay(80); + } + + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0); + + /* Some third-party PHYs need to be reset on link going + * down. + */ + if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) && + netif_carrier_ok(tp->dev)) { + tg3_readphy(tp, MII_BMSR, &bmsr); + if (!tg3_readphy(tp, MII_BMSR, &bmsr) && + !(bmsr & BMSR_LSTATUS)) + force_reset = 1; + } + if (force_reset) + tg3_phy_reset(tp); + + if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { + tg3_readphy(tp, MII_BMSR, &bmsr); + if (tg3_readphy(tp, MII_BMSR, &bmsr) || + !tg3_flag(tp, INIT_COMPLETE)) + bmsr = 0; + + if (!(bmsr & BMSR_LSTATUS)) { + err = tg3_init_5401phy_dsp(tp); + if (err) + return err; + + tg3_readphy(tp, MII_BMSR, &bmsr); + for (i = 0; i < 1000; i++) { + udelay(10); + if (!tg3_readphy(tp, MII_BMSR, &bmsr) && + (bmsr & BMSR_LSTATUS)) { + udelay(40); + break; + } + } + + if ((tp->phy_id & TG3_PHY_ID_REV_MASK) == + TG3_PHY_REV_BCM5401_B0 && + !(bmsr & BMSR_LSTATUS) && + tp->link_config.active_speed == SPEED_1000) { + err = tg3_phy_reset(tp); + if (!err) + err = tg3_init_5401phy_dsp(tp); + if (err) + return err; + } + } + } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) { + /* 5701 {A0,B0} CRC bug workaround */ + tg3_writephy(tp, 0x15, 0x0a75); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); + } + + /* Clear pending interrupts... */ + tg3_readphy(tp, MII_TG3_ISTAT, &val); + tg3_readphy(tp, MII_TG3_ISTAT, &val); + + if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) + tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); + else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) + tg3_writephy(tp, MII_TG3_IMASK, ~0); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + if (tp->led_ctrl == LED_CTRL_MODE_PHY_1) + tg3_writephy(tp, MII_TG3_EXT_CTRL, + MII_TG3_EXT_CTRL_LNK3_LED_MODE); + else + tg3_writephy(tp, MII_TG3_EXT_CTRL, 0); + } + + current_link_up = 0; + current_speed = SPEED_INVALID; + current_duplex = DUPLEX_INVALID; + + if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { + err = tg3_phy_auxctl_read(tp, + MII_TG3_AUXCTL_SHDWSEL_MISCTEST, + &val); + if (!err && !(val & (1 << 10))) { + tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_MISCTEST, + val | (1 << 10)); + goto relink; + } + } + + bmsr = 0; + for (i = 0; i < 100; i++) { + tg3_readphy(tp, MII_BMSR, &bmsr); + if (!tg3_readphy(tp, MII_BMSR, &bmsr) && + (bmsr & BMSR_LSTATUS)) + break; + udelay(40); + } + + if (bmsr & BMSR_LSTATUS) { + u32 aux_stat, bmcr; + + tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat); + for (i = 0; i < 2000; i++) { + udelay(10); + if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) && + aux_stat) + break; + } + + tg3_aux_stat_to_speed_duplex(tp, aux_stat, + ¤t_speed, + ¤t_duplex); + + bmcr = 0; + for (i = 0; i < 200; i++) { + tg3_readphy(tp, MII_BMCR, &bmcr); + if (tg3_readphy(tp, MII_BMCR, &bmcr)) + continue; + if (bmcr && bmcr != 0x7fff) + break; + udelay(10); + } + + lcl_adv = 0; + rmt_adv = 0; + + tp->link_config.active_speed = current_speed; + tp->link_config.active_duplex = current_duplex; + + if (tp->link_config.autoneg == AUTONEG_ENABLE) { + if ((bmcr & BMCR_ANENABLE) && + tg3_copper_is_advertising_all(tp, + tp->link_config.advertising)) { + if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv, + &rmt_adv)) + current_link_up = 1; + } + } else { + if (!(bmcr & BMCR_ANENABLE) && + tp->link_config.speed == current_speed && + tp->link_config.duplex == current_duplex && + tp->link_config.flowctrl == + tp->link_config.active_flowctrl) { + current_link_up = 1; + } + } + + if (current_link_up == 1 && + tp->link_config.active_duplex == DUPLEX_FULL) + tg3_setup_flow_control(tp, lcl_adv, rmt_adv); + } + +relink: + if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { + tg3_phy_copper_begin(tp); + + tg3_readphy(tp, MII_BMSR, &bmsr); + if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) || + (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) + current_link_up = 1; + } + + tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; + if (current_link_up == 1) { + if (tp->link_config.active_speed == SPEED_100 || + tp->link_config.active_speed == SPEED_10) + tp->mac_mode |= MAC_MODE_PORT_MODE_MII; + else + tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; + } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) + tp->mac_mode |= MAC_MODE_PORT_MODE_MII; + else + tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; + + tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; + if (tp->link_config.active_duplex == DUPLEX_HALF) + tp->mac_mode |= MAC_MODE_HALF_DUPLEX; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { + if (current_link_up == 1 && + tg3_5700_link_polarity(tp, tp->link_config.active_speed)) + tp->mac_mode |= MAC_MODE_LINK_POLARITY; + else + tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; + } + + /* ??? Without this setting Netgear GA302T PHY does not + * ??? send/receive packets... + */ + if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 && + tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) { + tp->mi_mode |= MAC_MI_MODE_AUTO_POLL; + tw32_f(MAC_MI_MODE, tp->mi_mode); + udelay(80); + } + + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + tg3_phy_eee_adjust(tp, current_link_up); + + if (tg3_flag(tp, USE_LINKCHG_REG)) { + /* Polled via timer. */ + tw32_f(MAC_EVENT, 0); + } else { + tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); + } + udelay(40); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 && + current_link_up == 1 && + tp->link_config.active_speed == SPEED_1000 && + (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) { + udelay(120); + tw32_f(MAC_STATUS, + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED)); + udelay(40); + tg3_write_mem(tp, + NIC_SRAM_FIRMWARE_MBOX, + NIC_SRAM_FIRMWARE_MBOX_MAGIC2); + } + + /* Prevent send BD corruption. */ + if (tg3_flag(tp, CLKREQ_BUG)) { + u16 oldlnkctl, newlnkctl; + + pci_read_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, + &oldlnkctl); + if (tp->link_config.active_speed == SPEED_100 || + tp->link_config.active_speed == SPEED_10) + newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN; + else + newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN; + if (newlnkctl != oldlnkctl) + pci_write_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, + newlnkctl); + } + + if (current_link_up != netif_carrier_ok(tp->dev)) { + if (current_link_up) + netif_carrier_on(tp->dev); + else + netif_carrier_off(tp->dev); + tg3_link_report(tp); + } + + return 0; +} + +struct tg3_fiber_aneginfo { + int state; +#define ANEG_STATE_UNKNOWN 0 +#define ANEG_STATE_AN_ENABLE 1 +#define ANEG_STATE_RESTART_INIT 2 +#define ANEG_STATE_RESTART 3 +#define ANEG_STATE_DISABLE_LINK_OK 4 +#define ANEG_STATE_ABILITY_DETECT_INIT 5 +#define ANEG_STATE_ABILITY_DETECT 6 +#define ANEG_STATE_ACK_DETECT_INIT 7 +#define ANEG_STATE_ACK_DETECT 8 +#define ANEG_STATE_COMPLETE_ACK_INIT 9 +#define ANEG_STATE_COMPLETE_ACK 10 +#define ANEG_STATE_IDLE_DETECT_INIT 11 +#define ANEG_STATE_IDLE_DETECT 12 +#define ANEG_STATE_LINK_OK 13 +#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14 +#define ANEG_STATE_NEXT_PAGE_WAIT 15 + + u32 flags; +#define MR_AN_ENABLE 0x00000001 +#define MR_RESTART_AN 0x00000002 +#define MR_AN_COMPLETE 0x00000004 +#define MR_PAGE_RX 0x00000008 +#define MR_NP_LOADED 0x00000010 +#define MR_TOGGLE_TX 0x00000020 +#define MR_LP_ADV_FULL_DUPLEX 0x00000040 +#define MR_LP_ADV_HALF_DUPLEX 0x00000080 +#define MR_LP_ADV_SYM_PAUSE 0x00000100 +#define MR_LP_ADV_ASYM_PAUSE 0x00000200 +#define MR_LP_ADV_REMOTE_FAULT1 0x00000400 +#define MR_LP_ADV_REMOTE_FAULT2 0x00000800 +#define MR_LP_ADV_NEXT_PAGE 0x00001000 +#define MR_TOGGLE_RX 0x00002000 +#define MR_NP_RX 0x00004000 + +#define MR_LINK_OK 0x80000000 + + unsigned long link_time, cur_time; + + u32 ability_match_cfg; + int ability_match_count; + + char ability_match, idle_match, ack_match; + + u32 txconfig, rxconfig; +#define ANEG_CFG_NP 0x00000080 +#define ANEG_CFG_ACK 0x00000040 +#define ANEG_CFG_RF2 0x00000020 +#define ANEG_CFG_RF1 0x00000010 +#define ANEG_CFG_PS2 0x00000001 +#define ANEG_CFG_PS1 0x00008000 +#define ANEG_CFG_HD 0x00004000 +#define ANEG_CFG_FD 0x00002000 +#define ANEG_CFG_INVAL 0x00001f06 + +}; +#define ANEG_OK 0 +#define ANEG_DONE 1 +#define ANEG_TIMER_ENAB 2 +#define ANEG_FAILED -1 + +#define ANEG_STATE_SETTLE_TIME 10000 + +static int tg3_fiber_aneg_smachine(struct tg3 *tp, + struct tg3_fiber_aneginfo *ap) +{ + u16 flowctrl; + unsigned long delta; + u32 rx_cfg_reg; + int ret; + + if (ap->state == ANEG_STATE_UNKNOWN) { + ap->rxconfig = 0; + ap->link_time = 0; + ap->cur_time = 0; + ap->ability_match_cfg = 0; + ap->ability_match_count = 0; + ap->ability_match = 0; + ap->idle_match = 0; + ap->ack_match = 0; + } + ap->cur_time++; + + if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) { + rx_cfg_reg = tr32(MAC_RX_AUTO_NEG); + + if (rx_cfg_reg != ap->ability_match_cfg) { + ap->ability_match_cfg = rx_cfg_reg; + ap->ability_match = 0; + ap->ability_match_count = 0; + } else { + if (++ap->ability_match_count > 1) { + ap->ability_match = 1; + ap->ability_match_cfg = rx_cfg_reg; + } + } + if (rx_cfg_reg & ANEG_CFG_ACK) + ap->ack_match = 1; + else + ap->ack_match = 0; + + ap->idle_match = 0; + } else { + ap->idle_match = 1; + ap->ability_match_cfg = 0; + ap->ability_match_count = 0; + ap->ability_match = 0; + ap->ack_match = 0; + + rx_cfg_reg = 0; + } + + ap->rxconfig = rx_cfg_reg; + ret = ANEG_OK; + + switch (ap->state) { + case ANEG_STATE_UNKNOWN: + if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) + ap->state = ANEG_STATE_AN_ENABLE; + + /* fallthru */ + case ANEG_STATE_AN_ENABLE: + ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); + if (ap->flags & MR_AN_ENABLE) { + ap->link_time = 0; + ap->cur_time = 0; + ap->ability_match_cfg = 0; + ap->ability_match_count = 0; + ap->ability_match = 0; + ap->idle_match = 0; + ap->ack_match = 0; + + ap->state = ANEG_STATE_RESTART_INIT; + } else { + ap->state = ANEG_STATE_DISABLE_LINK_OK; + } + break; + + case ANEG_STATE_RESTART_INIT: + ap->link_time = ap->cur_time; + ap->flags &= ~(MR_NP_LOADED); + ap->txconfig = 0; + tw32(MAC_TX_AUTO_NEG, 0); + tp->mac_mode |= MAC_MODE_SEND_CONFIGS; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + ret = ANEG_TIMER_ENAB; + ap->state = ANEG_STATE_RESTART; + + /* fallthru */ + case ANEG_STATE_RESTART: + delta = ap->cur_time - ap->link_time; + if (delta > ANEG_STATE_SETTLE_TIME) + ap->state = ANEG_STATE_ABILITY_DETECT_INIT; + else + ret = ANEG_TIMER_ENAB; + break; + + case ANEG_STATE_DISABLE_LINK_OK: + ret = ANEG_DONE; + break; + + case ANEG_STATE_ABILITY_DETECT_INIT: + ap->flags &= ~(MR_TOGGLE_TX); + ap->txconfig = ANEG_CFG_FD; + flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); + if (flowctrl & ADVERTISE_1000XPAUSE) + ap->txconfig |= ANEG_CFG_PS1; + if (flowctrl & ADVERTISE_1000XPSE_ASYM) + ap->txconfig |= ANEG_CFG_PS2; + tw32(MAC_TX_AUTO_NEG, ap->txconfig); + tp->mac_mode |= MAC_MODE_SEND_CONFIGS; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + ap->state = ANEG_STATE_ABILITY_DETECT; + break; + + case ANEG_STATE_ABILITY_DETECT: + if (ap->ability_match != 0 && ap->rxconfig != 0) + ap->state = ANEG_STATE_ACK_DETECT_INIT; + break; + + case ANEG_STATE_ACK_DETECT_INIT: + ap->txconfig |= ANEG_CFG_ACK; + tw32(MAC_TX_AUTO_NEG, ap->txconfig); + tp->mac_mode |= MAC_MODE_SEND_CONFIGS; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + ap->state = ANEG_STATE_ACK_DETECT; + + /* fallthru */ + case ANEG_STATE_ACK_DETECT: + if (ap->ack_match != 0) { + if ((ap->rxconfig & ~ANEG_CFG_ACK) == + (ap->ability_match_cfg & ~ANEG_CFG_ACK)) { + ap->state = ANEG_STATE_COMPLETE_ACK_INIT; + } else { + ap->state = ANEG_STATE_AN_ENABLE; + } + } else if (ap->ability_match != 0 && + ap->rxconfig == 0) { + ap->state = ANEG_STATE_AN_ENABLE; + } + break; + + case ANEG_STATE_COMPLETE_ACK_INIT: + if (ap->rxconfig & ANEG_CFG_INVAL) { + ret = ANEG_FAILED; + break; + } + ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX | + MR_LP_ADV_HALF_DUPLEX | + MR_LP_ADV_SYM_PAUSE | + MR_LP_ADV_ASYM_PAUSE | + MR_LP_ADV_REMOTE_FAULT1 | + MR_LP_ADV_REMOTE_FAULT2 | + MR_LP_ADV_NEXT_PAGE | + MR_TOGGLE_RX | + MR_NP_RX); + if (ap->rxconfig & ANEG_CFG_FD) + ap->flags |= MR_LP_ADV_FULL_DUPLEX; + if (ap->rxconfig & ANEG_CFG_HD) + ap->flags |= MR_LP_ADV_HALF_DUPLEX; + if (ap->rxconfig & ANEG_CFG_PS1) + ap->flags |= MR_LP_ADV_SYM_PAUSE; + if (ap->rxconfig & ANEG_CFG_PS2) + ap->flags |= MR_LP_ADV_ASYM_PAUSE; + if (ap->rxconfig & ANEG_CFG_RF1) + ap->flags |= MR_LP_ADV_REMOTE_FAULT1; + if (ap->rxconfig & ANEG_CFG_RF2) + ap->flags |= MR_LP_ADV_REMOTE_FAULT2; + if (ap->rxconfig & ANEG_CFG_NP) + ap->flags |= MR_LP_ADV_NEXT_PAGE; + + ap->link_time = ap->cur_time; + + ap->flags ^= (MR_TOGGLE_TX); + if (ap->rxconfig & 0x0008) + ap->flags |= MR_TOGGLE_RX; + if (ap->rxconfig & ANEG_CFG_NP) + ap->flags |= MR_NP_RX; + ap->flags |= MR_PAGE_RX; + + ap->state = ANEG_STATE_COMPLETE_ACK; + ret = ANEG_TIMER_ENAB; + break; + + case ANEG_STATE_COMPLETE_ACK: + if (ap->ability_match != 0 && + ap->rxconfig == 0) { + ap->state = ANEG_STATE_AN_ENABLE; + break; + } + delta = ap->cur_time - ap->link_time; + if (delta > ANEG_STATE_SETTLE_TIME) { + if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) { + ap->state = ANEG_STATE_IDLE_DETECT_INIT; + } else { + if ((ap->txconfig & ANEG_CFG_NP) == 0 && + !(ap->flags & MR_NP_RX)) { + ap->state = ANEG_STATE_IDLE_DETECT_INIT; + } else { + ret = ANEG_FAILED; + } + } + } + break; + + case ANEG_STATE_IDLE_DETECT_INIT: + ap->link_time = ap->cur_time; + tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + ap->state = ANEG_STATE_IDLE_DETECT; + ret = ANEG_TIMER_ENAB; + break; + + case ANEG_STATE_IDLE_DETECT: + if (ap->ability_match != 0 && + ap->rxconfig == 0) { + ap->state = ANEG_STATE_AN_ENABLE; + break; + } + delta = ap->cur_time - ap->link_time; + if (delta > ANEG_STATE_SETTLE_TIME) { + /* XXX another gem from the Broadcom driver :( */ + ap->state = ANEG_STATE_LINK_OK; + } + break; + + case ANEG_STATE_LINK_OK: + ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK); + ret = ANEG_DONE; + break; + + case ANEG_STATE_NEXT_PAGE_WAIT_INIT: + /* ??? unimplemented */ + break; + + case ANEG_STATE_NEXT_PAGE_WAIT: + /* ??? unimplemented */ + break; + + default: + ret = ANEG_FAILED; + break; + } + + return ret; +} + +static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags) +{ + int res = 0; + struct tg3_fiber_aneginfo aninfo; + int status = ANEG_FAILED; + unsigned int tick; + u32 tmp; + + tw32_f(MAC_TX_AUTO_NEG, 0); + + tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; + tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII); + udelay(40); + + tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS); + udelay(40); + + memset(&aninfo, 0, sizeof(aninfo)); + aninfo.flags |= MR_AN_ENABLE; + aninfo.state = ANEG_STATE_UNKNOWN; + aninfo.cur_time = 0; + tick = 0; + while (++tick < 195000) { + status = tg3_fiber_aneg_smachine(tp, &aninfo); + if (status == ANEG_DONE || status == ANEG_FAILED) + break; + + udelay(1); + } + + tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + *txflags = aninfo.txconfig; + *rxflags = aninfo.flags; + + if (status == ANEG_DONE && + (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK | + MR_LP_ADV_FULL_DUPLEX))) + res = 1; + + return res; +} + +static void tg3_init_bcm8002(struct tg3 *tp) +{ + u32 mac_status = tr32(MAC_STATUS); + int i; + + /* Reset when initting first time or we have a link. */ + if (tg3_flag(tp, INIT_COMPLETE) && + !(mac_status & MAC_STATUS_PCS_SYNCED)) + return; + + /* Set PLL lock range. */ + tg3_writephy(tp, 0x16, 0x8007); + + /* SW reset */ + tg3_writephy(tp, MII_BMCR, BMCR_RESET); + + /* Wait for reset to complete. */ + /* XXX schedule_timeout() ... */ + for (i = 0; i < 500; i++) + udelay(10); + + /* Config mode; select PMA/Ch 1 regs. */ + tg3_writephy(tp, 0x10, 0x8411); + + /* Enable auto-lock and comdet, select txclk for tx. */ + tg3_writephy(tp, 0x11, 0x0a10); + + tg3_writephy(tp, 0x18, 0x00a0); + tg3_writephy(tp, 0x16, 0x41ff); + + /* Assert and deassert POR. */ + tg3_writephy(tp, 0x13, 0x0400); + udelay(40); + tg3_writephy(tp, 0x13, 0x0000); + + tg3_writephy(tp, 0x11, 0x0a50); + udelay(40); + tg3_writephy(tp, 0x11, 0x0a10); + + /* Wait for signal to stabilize */ + /* XXX schedule_timeout() ... */ + for (i = 0; i < 15000; i++) + udelay(10); + + /* Deselect the channel register so we can read the PHYID + * later. + */ + tg3_writephy(tp, 0x10, 0x8011); +} + +static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) +{ + u16 flowctrl; + u32 sg_dig_ctrl, sg_dig_status; + u32 serdes_cfg, expected_sg_dig_ctrl; + int workaround, port_a; + int current_link_up; + + serdes_cfg = 0; + expected_sg_dig_ctrl = 0; + workaround = 0; + port_a = 1; + current_link_up = 0; + + if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 && + tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) { + workaround = 1; + if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) + port_a = 0; + + /* preserve bits 0-11,13,14 for signal pre-emphasis */ + /* preserve bits 20-23 for voltage regulator */ + serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff; + } + + sg_dig_ctrl = tr32(SG_DIG_CTRL); + + if (tp->link_config.autoneg != AUTONEG_ENABLE) { + if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) { + if (workaround) { + u32 val = serdes_cfg; + + if (port_a) + val |= 0xc010000; + else + val |= 0x4010000; + tw32_f(MAC_SERDES_CFG, val); + } + + tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); + } + if (mac_status & MAC_STATUS_PCS_SYNCED) { + tg3_setup_flow_control(tp, 0, 0); + current_link_up = 1; + } + goto out; + } + + /* Want auto-negotiation. */ + expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP; + + flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); + if (flowctrl & ADVERTISE_1000XPAUSE) + expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP; + if (flowctrl & ADVERTISE_1000XPSE_ASYM) + expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE; + + if (sg_dig_ctrl != expected_sg_dig_ctrl) { + if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) && + tp->serdes_counter && + ((mac_status & (MAC_STATUS_PCS_SYNCED | + MAC_STATUS_RCVD_CFG)) == + MAC_STATUS_PCS_SYNCED)) { + tp->serdes_counter--; + current_link_up = 1; + goto out; + } +restart_autoneg: + if (workaround) + tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000); + tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET); + udelay(5); + tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl); + + tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + } else if (mac_status & (MAC_STATUS_PCS_SYNCED | + MAC_STATUS_SIGNAL_DET)) { + sg_dig_status = tr32(SG_DIG_STATUS); + mac_status = tr32(MAC_STATUS); + + if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) && + (mac_status & MAC_STATUS_PCS_SYNCED)) { + u32 local_adv = 0, remote_adv = 0; + + if (sg_dig_ctrl & SG_DIG_PAUSE_CAP) + local_adv |= ADVERTISE_1000XPAUSE; + if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE) + local_adv |= ADVERTISE_1000XPSE_ASYM; + + if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE) + remote_adv |= LPA_1000XPAUSE; + if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE) + remote_adv |= LPA_1000XPAUSE_ASYM; + + tg3_setup_flow_control(tp, local_adv, remote_adv); + current_link_up = 1; + tp->serdes_counter = 0; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) { + if (tp->serdes_counter) + tp->serdes_counter--; + else { + if (workaround) { + u32 val = serdes_cfg; + + if (port_a) + val |= 0xc010000; + else + val |= 0x4010000; + + tw32_f(MAC_SERDES_CFG, val); + } + + tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); + udelay(40); + + /* Link parallel detection - link is up */ + /* only if we have PCS_SYNC and not */ + /* receiving config code words */ + mac_status = tr32(MAC_STATUS); + if ((mac_status & MAC_STATUS_PCS_SYNCED) && + !(mac_status & MAC_STATUS_RCVD_CFG)) { + tg3_setup_flow_control(tp, 0, 0); + current_link_up = 1; + tp->phy_flags |= + TG3_PHYFLG_PARALLEL_DETECT; + tp->serdes_counter = + SERDES_PARALLEL_DET_TIMEOUT; + } else + goto restart_autoneg; + } + } + } else { + tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + } + +out: + return current_link_up; +} + +static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) +{ + int current_link_up = 0; + + if (!(mac_status & MAC_STATUS_PCS_SYNCED)) + goto out; + + if (tp->link_config.autoneg == AUTONEG_ENABLE) { + u32 txflags, rxflags; + int i; + + if (fiber_autoneg(tp, &txflags, &rxflags)) { + u32 local_adv = 0, remote_adv = 0; + + if (txflags & ANEG_CFG_PS1) + local_adv |= ADVERTISE_1000XPAUSE; + if (txflags & ANEG_CFG_PS2) + local_adv |= ADVERTISE_1000XPSE_ASYM; + + if (rxflags & MR_LP_ADV_SYM_PAUSE) + remote_adv |= LPA_1000XPAUSE; + if (rxflags & MR_LP_ADV_ASYM_PAUSE) + remote_adv |= LPA_1000XPAUSE_ASYM; + + tg3_setup_flow_control(tp, local_adv, remote_adv); + + current_link_up = 1; + } + for (i = 0; i < 30; i++) { + udelay(20); + tw32_f(MAC_STATUS, + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED)); + udelay(40); + if ((tr32(MAC_STATUS) & + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED)) == 0) + break; + } + + mac_status = tr32(MAC_STATUS); + if (current_link_up == 0 && + (mac_status & MAC_STATUS_PCS_SYNCED) && + !(mac_status & MAC_STATUS_RCVD_CFG)) + current_link_up = 1; + } else { + tg3_setup_flow_control(tp, 0, 0); + + /* Forcing 1000FD link up. */ + current_link_up = 1; + + tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); + udelay(40); + + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + } + +out: + return current_link_up; +} + +static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset) +{ + u32 orig_pause_cfg; + u16 orig_active_speed; + u8 orig_active_duplex; + u32 mac_status; + int current_link_up; + int i; + + orig_pause_cfg = tp->link_config.active_flowctrl; + orig_active_speed = tp->link_config.active_speed; + orig_active_duplex = tp->link_config.active_duplex; + + if (!tg3_flag(tp, HW_AUTONEG) && + netif_carrier_ok(tp->dev) && + tg3_flag(tp, INIT_COMPLETE)) { + mac_status = tr32(MAC_STATUS); + mac_status &= (MAC_STATUS_PCS_SYNCED | + MAC_STATUS_SIGNAL_DET | + MAC_STATUS_CFG_CHANGED | + MAC_STATUS_RCVD_CFG); + if (mac_status == (MAC_STATUS_PCS_SYNCED | + MAC_STATUS_SIGNAL_DET)) { + tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED)); + return 0; + } + } + + tw32_f(MAC_TX_AUTO_NEG, 0); + + tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); + tp->mac_mode |= MAC_MODE_PORT_MODE_TBI; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + if (tp->phy_id == TG3_PHY_ID_BCM8002) + tg3_init_bcm8002(tp); + + /* Enable link change event even when serdes polling. */ + tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); + udelay(40); + + current_link_up = 0; + mac_status = tr32(MAC_STATUS); + + if (tg3_flag(tp, HW_AUTONEG)) + current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status); + else + current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); + + tp->napi[0].hw_status->status = + (SD_STATUS_UPDATED | + (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG)); + + for (i = 0; i < 100; i++) { + tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED)); + udelay(5); + if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED | + MAC_STATUS_LNKSTATE_CHANGED)) == 0) + break; + } + + mac_status = tr32(MAC_STATUS); + if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) { + current_link_up = 0; + if (tp->link_config.autoneg == AUTONEG_ENABLE && + tp->serdes_counter == 0) { + tw32_f(MAC_MODE, (tp->mac_mode | + MAC_MODE_SEND_CONFIGS)); + udelay(1); + tw32_f(MAC_MODE, tp->mac_mode); + } + } + + if (current_link_up == 1) { + tp->link_config.active_speed = SPEED_1000; + tp->link_config.active_duplex = DUPLEX_FULL; + tw32(MAC_LED_CTRL, (tp->led_ctrl | + LED_CTRL_LNKLED_OVERRIDE | + LED_CTRL_1000MBPS_ON)); + } else { + tp->link_config.active_speed = SPEED_INVALID; + tp->link_config.active_duplex = DUPLEX_INVALID; + tw32(MAC_LED_CTRL, (tp->led_ctrl | + LED_CTRL_LNKLED_OVERRIDE | + LED_CTRL_TRAFFIC_OVERRIDE)); + } + + if (current_link_up != netif_carrier_ok(tp->dev)) { + if (current_link_up) + netif_carrier_on(tp->dev); + else + netif_carrier_off(tp->dev); + tg3_link_report(tp); + } else { + u32 now_pause_cfg = tp->link_config.active_flowctrl; + if (orig_pause_cfg != now_pause_cfg || + orig_active_speed != tp->link_config.active_speed || + orig_active_duplex != tp->link_config.active_duplex) + tg3_link_report(tp); + } + + return 0; +} + +static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) +{ + int current_link_up, err = 0; + u32 bmsr, bmcr; + u16 current_speed; + u8 current_duplex; + u32 local_adv, remote_adv; + + tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + tw32(MAC_EVENT, 0); + + tw32_f(MAC_STATUS, + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED | + MAC_STATUS_MI_COMPLETION | + MAC_STATUS_LNKSTATE_CHANGED)); + udelay(40); + + if (force_reset) + tg3_phy_reset(tp); + + current_link_up = 0; + current_speed = SPEED_INVALID; + current_duplex = DUPLEX_INVALID; + + err |= tg3_readphy(tp, MII_BMSR, &bmsr); + err |= tg3_readphy(tp, MII_BMSR, &bmsr); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { + if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) + bmsr |= BMSR_LSTATUS; + else + bmsr &= ~BMSR_LSTATUS; + } + + err |= tg3_readphy(tp, MII_BMCR, &bmcr); + + if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && + (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { + /* do nothing, just check for link up at the end */ + } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { + u32 adv, new_adv; + + err |= tg3_readphy(tp, MII_ADVERTISE, &adv); + new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | + ADVERTISE_1000XPAUSE | + ADVERTISE_1000XPSE_ASYM | + ADVERTISE_SLCT); + + new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); + + if (tp->link_config.advertising & ADVERTISED_1000baseT_Half) + new_adv |= ADVERTISE_1000XHALF; + if (tp->link_config.advertising & ADVERTISED_1000baseT_Full) + new_adv |= ADVERTISE_1000XFULL; + + if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) { + tg3_writephy(tp, MII_ADVERTISE, new_adv); + bmcr |= BMCR_ANENABLE | BMCR_ANRESTART; + tg3_writephy(tp, MII_BMCR, bmcr); + + tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); + tp->serdes_counter = SERDES_AN_TIMEOUT_5714S; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + + return err; + } + } else { + u32 new_bmcr; + + bmcr &= ~BMCR_SPEED1000; + new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX); + + if (tp->link_config.duplex == DUPLEX_FULL) + new_bmcr |= BMCR_FULLDPLX; + + if (new_bmcr != bmcr) { + /* BMCR_SPEED1000 is a reserved bit that needs + * to be set on write. + */ + new_bmcr |= BMCR_SPEED1000; + + /* Force a linkdown */ + if (netif_carrier_ok(tp->dev)) { + u32 adv; + + err |= tg3_readphy(tp, MII_ADVERTISE, &adv); + adv &= ~(ADVERTISE_1000XFULL | + ADVERTISE_1000XHALF | + ADVERTISE_SLCT); + tg3_writephy(tp, MII_ADVERTISE, adv); + tg3_writephy(tp, MII_BMCR, bmcr | + BMCR_ANRESTART | + BMCR_ANENABLE); + udelay(10); + netif_carrier_off(tp->dev); + } + tg3_writephy(tp, MII_BMCR, new_bmcr); + bmcr = new_bmcr; + err |= tg3_readphy(tp, MII_BMSR, &bmsr); + err |= tg3_readphy(tp, MII_BMSR, &bmsr); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == + ASIC_REV_5714) { + if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) + bmsr |= BMSR_LSTATUS; + else + bmsr &= ~BMSR_LSTATUS; + } + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + } + } + + if (bmsr & BMSR_LSTATUS) { + current_speed = SPEED_1000; + current_link_up = 1; + if (bmcr & BMCR_FULLDPLX) + current_duplex = DUPLEX_FULL; + else + current_duplex = DUPLEX_HALF; + + local_adv = 0; + remote_adv = 0; + + if (bmcr & BMCR_ANENABLE) { + u32 common; + + err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv); + err |= tg3_readphy(tp, MII_LPA, &remote_adv); + common = local_adv & remote_adv; + if (common & (ADVERTISE_1000XHALF | + ADVERTISE_1000XFULL)) { + if (common & ADVERTISE_1000XFULL) + current_duplex = DUPLEX_FULL; + else + current_duplex = DUPLEX_HALF; + } else if (!tg3_flag(tp, 5780_CLASS)) { + /* Link is up via parallel detect */ + } else { + current_link_up = 0; + } + } + } + + if (current_link_up == 1 && current_duplex == DUPLEX_FULL) + tg3_setup_flow_control(tp, local_adv, remote_adv); + + tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; + if (tp->link_config.active_duplex == DUPLEX_HALF) + tp->mac_mode |= MAC_MODE_HALF_DUPLEX; + + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); + + tp->link_config.active_speed = current_speed; + tp->link_config.active_duplex = current_duplex; + + if (current_link_up != netif_carrier_ok(tp->dev)) { + if (current_link_up) + netif_carrier_on(tp->dev); + else { + netif_carrier_off(tp->dev); + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + } + tg3_link_report(tp); + } + return err; +} + +static void tg3_serdes_parallel_detect(struct tg3 *tp) +{ + if (tp->serdes_counter) { + /* Give autoneg time to complete. */ + tp->serdes_counter--; + return; + } + + if (!netif_carrier_ok(tp->dev) && + (tp->link_config.autoneg == AUTONEG_ENABLE)) { + u32 bmcr; + + tg3_readphy(tp, MII_BMCR, &bmcr); + if (bmcr & BMCR_ANENABLE) { + u32 phy1, phy2; + + /* Select shadow register 0x1f */ + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00); + tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1); + + /* Select expansion interrupt status register */ + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + MII_TG3_DSP_EXP1_INT_STAT); + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); + + if ((phy1 & 0x10) && !(phy2 & 0x20)) { + /* We have signal detect and not receiving + * config code words, link is up by parallel + * detection. + */ + + bmcr &= ~BMCR_ANENABLE; + bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; + tg3_writephy(tp, MII_BMCR, bmcr); + tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT; + } + } + } else if (netif_carrier_ok(tp->dev) && + (tp->link_config.autoneg == AUTONEG_ENABLE) && + (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { + u32 phy2; + + /* Select expansion interrupt status register */ + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + MII_TG3_DSP_EXP1_INT_STAT); + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); + if (phy2 & 0x20) { + u32 bmcr; + + /* Config code words received, turn on autoneg. */ + tg3_readphy(tp, MII_BMCR, &bmcr); + tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE); + + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + + } + } +} + +static int tg3_setup_phy(struct tg3 *tp, int force_reset) +{ + u32 val; + int err; + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) + err = tg3_setup_fiber_phy(tp, force_reset); + else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) + err = tg3_setup_fiber_mii_phy(tp, force_reset); + else + err = tg3_setup_copper_phy(tp, force_reset); + + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) { + u32 scale; + + val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; + if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5) + scale = 65; + else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25) + scale = 6; + else + scale = 12; + + val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK; + val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT); + tw32(GRC_MISC_CFG, val); + } + + val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | + (6 << TX_LENGTHS_IPG_SHIFT); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + val |= tr32(MAC_TX_LENGTHS) & + (TX_LENGTHS_JMB_FRM_LEN_MSK | + TX_LENGTHS_CNT_DWN_VAL_MSK); + + if (tp->link_config.active_speed == SPEED_1000 && + tp->link_config.active_duplex == DUPLEX_HALF) + tw32(MAC_TX_LENGTHS, val | + (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)); + else + tw32(MAC_TX_LENGTHS, val | + (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); + + if (!tg3_flag(tp, 5705_PLUS)) { + if (netif_carrier_ok(tp->dev)) { + tw32(HOSTCC_STAT_COAL_TICKS, + tp->coal.stats_block_coalesce_usecs); + } else { + tw32(HOSTCC_STAT_COAL_TICKS, 0); + } + } + + if (tg3_flag(tp, ASPM_WORKAROUND)) { + val = tr32(PCIE_PWR_MGMT_THRESH); + if (!netif_carrier_ok(tp->dev)) + val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | + tp->pwrmgmt_thresh; + else + val |= PCIE_PWR_MGMT_L1_THRESH_MSK; + tw32(PCIE_PWR_MGMT_THRESH, val); + } + + return err; +} + +static inline int tg3_irq_sync(struct tg3 *tp) +{ + return tp->irq_sync; +} + +static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len) +{ + int i; + + dst = (u32 *)((u8 *)dst + off); + for (i = 0; i < len; i += sizeof(u32)) + *dst++ = tr32(off + i); +} + +static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs) +{ + tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0); + tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200); + tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0); + tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0); + tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04); + tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80); + tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48); + tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04); + tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20); + tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c); + tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c); + tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c); + tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44); + tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04); + tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20); + tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14); + tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08); + tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08); + tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100); + + if (tg3_flag(tp, SUPPORT_MSIX)) + tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180); + + tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10); + tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58); + tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08); + tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08); + tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04); + tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04); + tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04); + tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04); + + if (!tg3_flag(tp, 5705_PLUS)) { + tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04); + tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04); + tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04); + } + + tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110); + tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120); + tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c); + tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04); + tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c); + + if (tg3_flag(tp, NVRAM)) + tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24); +} + +static void tg3_dump_state(struct tg3 *tp) +{ + int i; + u32 *regs; + + regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC); + if (!regs) { + netdev_err(tp->dev, "Failed allocating register dump buffer\n"); + return; + } + + if (tg3_flag(tp, PCI_EXPRESS)) { + /* Read up to but not including private PCI registers */ + for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32)) + regs[i / sizeof(u32)] = tr32(i); + } else + tg3_dump_legacy_regs(tp, regs); + + for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) { + if (!regs[i + 0] && !regs[i + 1] && + !regs[i + 2] && !regs[i + 3]) + continue; + + netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", + i * 4, + regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]); + } + + kfree(regs); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + /* SW status block */ + netdev_err(tp->dev, + "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", + i, + tnapi->hw_status->status, + tnapi->hw_status->status_tag, + tnapi->hw_status->rx_jumbo_consumer, + tnapi->hw_status->rx_consumer, + tnapi->hw_status->rx_mini_consumer, + tnapi->hw_status->idx[0].rx_producer, + tnapi->hw_status->idx[0].tx_consumer); + + netdev_err(tp->dev, + "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n", + i, + tnapi->last_tag, tnapi->last_irq_tag, + tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending, + tnapi->rx_rcb_ptr, + tnapi->prodring.rx_std_prod_idx, + tnapi->prodring.rx_std_cons_idx, + tnapi->prodring.rx_jmb_prod_idx, + tnapi->prodring.rx_jmb_cons_idx); + } +} + +/* This is called whenever we suspect that the system chipset is re- + * ordering the sequence of MMIO to the tx send mailbox. The symptom + * is bogus tx completions. We try to recover by setting the + * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later + * in the workqueue. + */ +static void tg3_tx_recover(struct tg3 *tp) +{ + BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) || + tp->write32_tx_mbox == tg3_write_indirect_mbox); + + netdev_warn(tp->dev, + "The system may be re-ordering memory-mapped I/O " + "cycles to the network device, attempting to recover. " + "Please report the problem to the driver maintainer " + "and include system chipset information.\n"); + + spin_lock(&tp->lock); + tg3_flag_set(tp, TX_RECOVERY_PENDING); + spin_unlock(&tp->lock); +} + +static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) +{ + /* Tell compiler to fetch tx indices from memory. */ + barrier(); + return tnapi->tx_pending - + ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1)); +} + +/* Tigon3 never reports partial packet sends. So we do not + * need special logic to handle SKBs that have not had all + * of their frags sent yet, like SunGEM does. + */ +static void tg3_tx(struct tg3_napi *tnapi) +{ + struct tg3 *tp = tnapi->tp; + u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer; + u32 sw_idx = tnapi->tx_cons; + struct netdev_queue *txq; + int index = tnapi - tp->napi; + + if (tg3_flag(tp, ENABLE_TSS)) + index--; + + txq = netdev_get_tx_queue(tp->dev, index); + + while (sw_idx != hw_idx) { + struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx]; + struct sk_buff *skb = ri->skb; + int i, tx_bug = 0; + + if (unlikely(skb == NULL)) { + tg3_tx_recover(tp); + return; + } + + pci_unmap_single(tp->pdev, + dma_unmap_addr(ri, mapping), + skb_headlen(skb), + PCI_DMA_TODEVICE); + + ri->skb = NULL; + + while (ri->fragmented) { + ri->fragmented = false; + sw_idx = NEXT_TX(sw_idx); + ri = &tnapi->tx_buffers[sw_idx]; + } + + sw_idx = NEXT_TX(sw_idx); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + ri = &tnapi->tx_buffers[sw_idx]; + if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) + tx_bug = 1; + + pci_unmap_page(tp->pdev, + dma_unmap_addr(ri, mapping), + skb_shinfo(skb)->frags[i].size, + PCI_DMA_TODEVICE); + + while (ri->fragmented) { + ri->fragmented = false; + sw_idx = NEXT_TX(sw_idx); + ri = &tnapi->tx_buffers[sw_idx]; + } + + sw_idx = NEXT_TX(sw_idx); + } + + dev_kfree_skb(skb); + + if (unlikely(tx_bug)) { + tg3_tx_recover(tp); + return; + } + } + + tnapi->tx_cons = sw_idx; + + /* Need to make the tx_cons update visible to tg3_start_xmit() + * before checking for netif_queue_stopped(). Without the + * memory barrier, there is a small possibility that tg3_start_xmit() + * will miss it and cause the queue to be stopped forever. + */ + smp_mb(); + + if (unlikely(netif_tx_queue_stopped(txq) && + (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) { + __netif_tx_lock(txq, smp_processor_id()); + if (netif_tx_queue_stopped(txq) && + (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))) + netif_tx_wake_queue(txq); + __netif_tx_unlock(txq); + } +} + +static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) +{ + if (!ri->skb) + return; + + pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping), + map_sz, PCI_DMA_FROMDEVICE); + dev_kfree_skb_any(ri->skb); + ri->skb = NULL; +} + +/* Returns size of skb allocated or < 0 on error. + * + * We only need to fill in the address because the other members + * of the RX descriptor are invariant, see tg3_init_rings. + * + * Note the purposeful assymetry of cpu vs. chip accesses. For + * posting buffers we only dirty the first cache line of the RX + * descriptor (containing the address). Whereas for the RX status + * buffers the cpu only reads the last cacheline of the RX descriptor + * (to fetch the error flags, vlan tag, checksum, and opaque cookie). + */ +static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, + u32 opaque_key, u32 dest_idx_unmasked) +{ + struct tg3_rx_buffer_desc *desc; + struct ring_info *map; + struct sk_buff *skb; + dma_addr_t mapping; + int skb_size, dest_idx; + + switch (opaque_key) { + case RXD_OPAQUE_RING_STD: + dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; + desc = &tpr->rx_std[dest_idx]; + map = &tpr->rx_std_buffers[dest_idx]; + skb_size = tp->rx_pkt_map_sz; + break; + + case RXD_OPAQUE_RING_JUMBO: + dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; + desc = &tpr->rx_jmb[dest_idx].std; + map = &tpr->rx_jmb_buffers[dest_idx]; + skb_size = TG3_RX_JMB_MAP_SZ; + break; + + default: + return -EINVAL; + } + + /* Do not overwrite any of the map or rp information + * until we are sure we can commit to a new buffer. + * + * Callers depend upon this behavior and assume that + * we leave everything unchanged if we fail. + */ + skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset); + if (skb == NULL) + return -ENOMEM; + + skb_reserve(skb, tp->rx_offset); + + mapping = pci_map_single(tp->pdev, skb->data, skb_size, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(tp->pdev, mapping)) { + dev_kfree_skb(skb); + return -EIO; + } + + map->skb = skb; + dma_unmap_addr_set(map, mapping, mapping); + + desc->addr_hi = ((u64)mapping >> 32); + desc->addr_lo = ((u64)mapping & 0xffffffff); + + return skb_size; +} + +/* We only need to move over in the address because the other + * members of the RX descriptor are invariant. See notes above + * tg3_alloc_rx_skb for full details. + */ +static void tg3_recycle_rx(struct tg3_napi *tnapi, + struct tg3_rx_prodring_set *dpr, + u32 opaque_key, int src_idx, + u32 dest_idx_unmasked) +{ + struct tg3 *tp = tnapi->tp; + struct tg3_rx_buffer_desc *src_desc, *dest_desc; + struct ring_info *src_map, *dest_map; + struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring; + int dest_idx; + + switch (opaque_key) { + case RXD_OPAQUE_RING_STD: + dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; + dest_desc = &dpr->rx_std[dest_idx]; + dest_map = &dpr->rx_std_buffers[dest_idx]; + src_desc = &spr->rx_std[src_idx]; + src_map = &spr->rx_std_buffers[src_idx]; + break; + + case RXD_OPAQUE_RING_JUMBO: + dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; + dest_desc = &dpr->rx_jmb[dest_idx].std; + dest_map = &dpr->rx_jmb_buffers[dest_idx]; + src_desc = &spr->rx_jmb[src_idx].std; + src_map = &spr->rx_jmb_buffers[src_idx]; + break; + + default: + return; + } + + dest_map->skb = src_map->skb; + dma_unmap_addr_set(dest_map, mapping, + dma_unmap_addr(src_map, mapping)); + dest_desc->addr_hi = src_desc->addr_hi; + dest_desc->addr_lo = src_desc->addr_lo; + + /* Ensure that the update to the skb happens after the physical + * addresses have been transferred to the new BD location. + */ + smp_wmb(); + + src_map->skb = NULL; +} + +/* The RX ring scheme is composed of multiple rings which post fresh + * buffers to the chip, and one special ring the chip uses to report + * status back to the host. + * + * The special ring reports the status of received packets to the + * host. The chip does not write into the original descriptor the + * RX buffer was obtained from. The chip simply takes the original + * descriptor as provided by the host, updates the status and length + * field, then writes this into the next status ring entry. + * + * Each ring the host uses to post buffers to the chip is described + * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives, + * it is first placed into the on-chip ram. When the packet's length + * is known, it walks down the TG3_BDINFO entries to select the ring. + * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO + * which is within the range of the new packet's length is chosen. + * + * The "separate ring for rx status" scheme may sound queer, but it makes + * sense from a cache coherency perspective. If only the host writes + * to the buffer post rings, and only the chip writes to the rx status + * rings, then cache lines never move beyond shared-modified state. + * If both the host and chip were to write into the same ring, cache line + * eviction could occur since both entities want it in an exclusive state. + */ +static int tg3_rx(struct tg3_napi *tnapi, int budget) +{ + struct tg3 *tp = tnapi->tp; + u32 work_mask, rx_std_posted = 0; + u32 std_prod_idx, jmb_prod_idx; + u32 sw_idx = tnapi->rx_rcb_ptr; + u16 hw_idx; + int received; + struct tg3_rx_prodring_set *tpr = &tnapi->prodring; + + hw_idx = *(tnapi->rx_rcb_prod_idx); + /* + * We need to order the read of hw_idx and the read of + * the opaque cookie. + */ + rmb(); + work_mask = 0; + received = 0; + std_prod_idx = tpr->rx_std_prod_idx; + jmb_prod_idx = tpr->rx_jmb_prod_idx; + while (sw_idx != hw_idx && budget > 0) { + struct ring_info *ri; + struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; + unsigned int len; + struct sk_buff *skb; + dma_addr_t dma_addr; + u32 opaque_key, desc_idx, *post_ptr; + + desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; + opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; + if (opaque_key == RXD_OPAQUE_RING_STD) { + ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx]; + dma_addr = dma_unmap_addr(ri, mapping); + skb = ri->skb; + post_ptr = &std_prod_idx; + rx_std_posted++; + } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { + ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx]; + dma_addr = dma_unmap_addr(ri, mapping); + skb = ri->skb; + post_ptr = &jmb_prod_idx; + } else + goto next_pkt_nopost; + + work_mask |= opaque_key; + + if ((desc->err_vlan & RXD_ERR_MASK) != 0 && + (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { + drop_it: + tg3_recycle_rx(tnapi, tpr, opaque_key, + desc_idx, *post_ptr); + drop_it_no_recycle: + /* Other statistics kept track of by card. */ + tp->rx_dropped++; + goto next_pkt; + } + + len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - + ETH_FCS_LEN; + + if (len > TG3_RX_COPY_THRESH(tp)) { + int skb_size; + + skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key, + *post_ptr); + if (skb_size < 0) + goto drop_it; + + pci_unmap_single(tp->pdev, dma_addr, skb_size, + PCI_DMA_FROMDEVICE); + + /* Ensure that the update to the skb happens + * after the usage of the old DMA mapping. + */ + smp_wmb(); + + ri->skb = NULL; + + skb_put(skb, len); + } else { + struct sk_buff *copy_skb; + + tg3_recycle_rx(tnapi, tpr, opaque_key, + desc_idx, *post_ptr); + + copy_skb = netdev_alloc_skb(tp->dev, len + + TG3_RAW_IP_ALIGN); + if (copy_skb == NULL) + goto drop_it_no_recycle; + + skb_reserve(copy_skb, TG3_RAW_IP_ALIGN); + skb_put(copy_skb, len); + pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); + skb_copy_from_linear_data(skb, copy_skb->data, len); + pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); + + /* We'll reuse the original ring buffer. */ + skb = copy_skb; + } + + if ((tp->dev->features & NETIF_F_RXCSUM) && + (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && + (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) + >> RXD_TCPCSUM_SHIFT) == 0xffff)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); + + skb->protocol = eth_type_trans(skb, tp->dev); + + if (len > (tp->dev->mtu + ETH_HLEN) && + skb->protocol != htons(ETH_P_8021Q)) { + dev_kfree_skb(skb); + goto drop_it_no_recycle; + } + + if (desc->type_flags & RXD_FLAG_VLAN && + !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) + __vlan_hwaccel_put_tag(skb, + desc->err_vlan & RXD_VLAN_MASK); + + napi_gro_receive(&tnapi->napi, skb); + + received++; + budget--; + +next_pkt: + (*post_ptr)++; + + if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { + tpr->rx_std_prod_idx = std_prod_idx & + tp->rx_std_ring_mask; + tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, + tpr->rx_std_prod_idx); + work_mask &= ~RXD_OPAQUE_RING_STD; + rx_std_posted = 0; + } +next_pkt_nopost: + sw_idx++; + sw_idx &= tp->rx_ret_ring_mask; + + /* Refresh hw_idx to see if there is new work */ + if (sw_idx == hw_idx) { + hw_idx = *(tnapi->rx_rcb_prod_idx); + rmb(); + } + } + + /* ACK the status ring. */ + tnapi->rx_rcb_ptr = sw_idx; + tw32_rx_mbox(tnapi->consmbox, sw_idx); + + /* Refill RX ring(s). */ + if (!tg3_flag(tp, ENABLE_RSS)) { + if (work_mask & RXD_OPAQUE_RING_STD) { + tpr->rx_std_prod_idx = std_prod_idx & + tp->rx_std_ring_mask; + tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, + tpr->rx_std_prod_idx); + } + if (work_mask & RXD_OPAQUE_RING_JUMBO) { + tpr->rx_jmb_prod_idx = jmb_prod_idx & + tp->rx_jmb_ring_mask; + tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, + tpr->rx_jmb_prod_idx); + } + mmiowb(); + } else if (work_mask) { + /* rx_std_buffers[] and rx_jmb_buffers[] entries must be + * updated before the producer indices can be updated. + */ + smp_wmb(); + + tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask; + tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask; + + if (tnapi != &tp->napi[1]) + napi_schedule(&tp->napi[1].napi); + } + + return received; +} + +static void tg3_poll_link(struct tg3 *tp) +{ + /* handle link change and other phy events */ + if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { + struct tg3_hw_status *sblk = tp->napi[0].hw_status; + + if (sblk->status & SD_STATUS_LINK_CHG) { + sblk->status = SD_STATUS_UPDATED | + (sblk->status & ~SD_STATUS_LINK_CHG); + spin_lock(&tp->lock); + if (tg3_flag(tp, USE_PHYLIB)) { + tw32_f(MAC_STATUS, + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED | + MAC_STATUS_MI_COMPLETION | + MAC_STATUS_LNKSTATE_CHANGED)); + udelay(40); + } else + tg3_setup_phy(tp, 0); + spin_unlock(&tp->lock); + } + } +} + +static int tg3_rx_prodring_xfer(struct tg3 *tp, + struct tg3_rx_prodring_set *dpr, + struct tg3_rx_prodring_set *spr) +{ + u32 si, di, cpycnt, src_prod_idx; + int i, err = 0; + + while (1) { + src_prod_idx = spr->rx_std_prod_idx; + + /* Make sure updates to the rx_std_buffers[] entries and the + * standard producer index are seen in the correct order. + */ + smp_rmb(); + + if (spr->rx_std_cons_idx == src_prod_idx) + break; + + if (spr->rx_std_cons_idx < src_prod_idx) + cpycnt = src_prod_idx - spr->rx_std_cons_idx; + else + cpycnt = tp->rx_std_ring_mask + 1 - + spr->rx_std_cons_idx; + + cpycnt = min(cpycnt, + tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx); + + si = spr->rx_std_cons_idx; + di = dpr->rx_std_prod_idx; + + for (i = di; i < di + cpycnt; i++) { + if (dpr->rx_std_buffers[i].skb) { + cpycnt = i - di; + err = -ENOSPC; + break; + } + } + + if (!cpycnt) + break; + + /* Ensure that updates to the rx_std_buffers ring and the + * shadowed hardware producer ring from tg3_recycle_skb() are + * ordered correctly WRT the skb check above. + */ + smp_rmb(); + + memcpy(&dpr->rx_std_buffers[di], + &spr->rx_std_buffers[si], + cpycnt * sizeof(struct ring_info)); + + for (i = 0; i < cpycnt; i++, di++, si++) { + struct tg3_rx_buffer_desc *sbd, *dbd; + sbd = &spr->rx_std[si]; + dbd = &dpr->rx_std[di]; + dbd->addr_hi = sbd->addr_hi; + dbd->addr_lo = sbd->addr_lo; + } + + spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) & + tp->rx_std_ring_mask; + dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) & + tp->rx_std_ring_mask; + } + + while (1) { + src_prod_idx = spr->rx_jmb_prod_idx; + + /* Make sure updates to the rx_jmb_buffers[] entries and + * the jumbo producer index are seen in the correct order. + */ + smp_rmb(); + + if (spr->rx_jmb_cons_idx == src_prod_idx) + break; + + if (spr->rx_jmb_cons_idx < src_prod_idx) + cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; + else + cpycnt = tp->rx_jmb_ring_mask + 1 - + spr->rx_jmb_cons_idx; + + cpycnt = min(cpycnt, + tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx); + + si = spr->rx_jmb_cons_idx; + di = dpr->rx_jmb_prod_idx; + + for (i = di; i < di + cpycnt; i++) { + if (dpr->rx_jmb_buffers[i].skb) { + cpycnt = i - di; + err = -ENOSPC; + break; + } + } + + if (!cpycnt) + break; + + /* Ensure that updates to the rx_jmb_buffers ring and the + * shadowed hardware producer ring from tg3_recycle_skb() are + * ordered correctly WRT the skb check above. + */ + smp_rmb(); + + memcpy(&dpr->rx_jmb_buffers[di], + &spr->rx_jmb_buffers[si], + cpycnt * sizeof(struct ring_info)); + + for (i = 0; i < cpycnt; i++, di++, si++) { + struct tg3_rx_buffer_desc *sbd, *dbd; + sbd = &spr->rx_jmb[si].std; + dbd = &dpr->rx_jmb[di].std; + dbd->addr_hi = sbd->addr_hi; + dbd->addr_lo = sbd->addr_lo; + } + + spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) & + tp->rx_jmb_ring_mask; + dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) & + tp->rx_jmb_ring_mask; + } + + return err; +} + +static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) +{ + struct tg3 *tp = tnapi->tp; + + /* run TX completion thread */ + if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { + tg3_tx(tnapi); + if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) + return work_done; + } + + /* run RX thread, within the bounds set by NAPI. + * All RX "locking" is done by ensuring outside + * code synchronizes with tg3->napi.poll() + */ + if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) + work_done += tg3_rx(tnapi, budget - work_done); + + if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) { + struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring; + int i, err = 0; + u32 std_prod_idx = dpr->rx_std_prod_idx; + u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; + + for (i = 1; i < tp->irq_cnt; i++) + err |= tg3_rx_prodring_xfer(tp, dpr, + &tp->napi[i].prodring); + + wmb(); + + if (std_prod_idx != dpr->rx_std_prod_idx) + tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, + dpr->rx_std_prod_idx); + + if (jmb_prod_idx != dpr->rx_jmb_prod_idx) + tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, + dpr->rx_jmb_prod_idx); + + mmiowb(); + + if (err) + tw32_f(HOSTCC_MODE, tp->coal_now); + } + + return work_done; +} + +static int tg3_poll_msix(struct napi_struct *napi, int budget) +{ + struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); + struct tg3 *tp = tnapi->tp; + int work_done = 0; + struct tg3_hw_status *sblk = tnapi->hw_status; + + while (1) { + work_done = tg3_poll_work(tnapi, work_done, budget); + + if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) + goto tx_recovery; + + if (unlikely(work_done >= budget)) + break; + + /* tp->last_tag is used in tg3_int_reenable() below + * to tell the hw how much work has been processed, + * so we must read it before checking for more work. + */ + tnapi->last_tag = sblk->status_tag; + tnapi->last_irq_tag = tnapi->last_tag; + rmb(); + + /* check for RX/TX work to do */ + if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons && + *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) { + napi_complete(napi); + /* Reenable interrupts. */ + tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); + mmiowb(); + break; + } + } + + return work_done; + +tx_recovery: + /* work_done is guaranteed to be less than budget. */ + napi_complete(napi); + schedule_work(&tp->reset_task); + return work_done; +} + +static void tg3_process_error(struct tg3 *tp) +{ + u32 val; + bool real_error = false; + + if (tg3_flag(tp, ERROR_PROCESSED)) + return; + + /* Check Flow Attention register */ + val = tr32(HOSTCC_FLOW_ATTN); + if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) { + netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n"); + real_error = true; + } + + if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) { + netdev_err(tp->dev, "MSI Status error. Resetting chip.\n"); + real_error = true; + } + + if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) { + netdev_err(tp->dev, "DMA Status error. Resetting chip.\n"); + real_error = true; + } + + if (!real_error) + return; + + tg3_dump_state(tp); + + tg3_flag_set(tp, ERROR_PROCESSED); + schedule_work(&tp->reset_task); +} + +static int tg3_poll(struct napi_struct *napi, int budget) +{ + struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); + struct tg3 *tp = tnapi->tp; + int work_done = 0; + struct tg3_hw_status *sblk = tnapi->hw_status; + + while (1) { + if (sblk->status & SD_STATUS_ERROR) + tg3_process_error(tp); + + tg3_poll_link(tp); + + work_done = tg3_poll_work(tnapi, work_done, budget); + + if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) + goto tx_recovery; + + if (unlikely(work_done >= budget)) + break; + + if (tg3_flag(tp, TAGGED_STATUS)) { + /* tp->last_tag is used in tg3_int_reenable() below + * to tell the hw how much work has been processed, + * so we must read it before checking for more work. + */ + tnapi->last_tag = sblk->status_tag; + tnapi->last_irq_tag = tnapi->last_tag; + rmb(); + } else + sblk->status &= ~SD_STATUS_UPDATED; + + if (likely(!tg3_has_work(tnapi))) { + napi_complete(napi); + tg3_int_reenable(tnapi); + break; + } + } + + return work_done; + +tx_recovery: + /* work_done is guaranteed to be less than budget. */ + napi_complete(napi); + schedule_work(&tp->reset_task); + return work_done; +} + +static void tg3_napi_disable(struct tg3 *tp) +{ + int i; + + for (i = tp->irq_cnt - 1; i >= 0; i--) + napi_disable(&tp->napi[i].napi); +} + +static void tg3_napi_enable(struct tg3 *tp) +{ + int i; + + for (i = 0; i < tp->irq_cnt; i++) + napi_enable(&tp->napi[i].napi); +} + +static void tg3_napi_init(struct tg3 *tp) +{ + int i; + + netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64); + for (i = 1; i < tp->irq_cnt; i++) + netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64); +} + +static void tg3_napi_fini(struct tg3 *tp) +{ + int i; + + for (i = 0; i < tp->irq_cnt; i++) + netif_napi_del(&tp->napi[i].napi); +} + +static inline void tg3_netif_stop(struct tg3 *tp) +{ + tp->dev->trans_start = jiffies; /* prevent tx timeout */ + tg3_napi_disable(tp); + netif_tx_disable(tp->dev); +} + +static inline void tg3_netif_start(struct tg3 *tp) +{ + /* NOTE: unconditional netif_tx_wake_all_queues is only + * appropriate so long as all callers are assured to + * have free tx slots (such as after tg3_init_hw) + */ + netif_tx_wake_all_queues(tp->dev); + + tg3_napi_enable(tp); + tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; + tg3_enable_ints(tp); +} + +static void tg3_irq_quiesce(struct tg3 *tp) +{ + int i; + + BUG_ON(tp->irq_sync); + + tp->irq_sync = 1; + smp_mb(); + + for (i = 0; i < tp->irq_cnt; i++) + synchronize_irq(tp->napi[i].irq_vec); +} + +/* Fully shutdown all tg3 driver activity elsewhere in the system. + * If irq_sync is non-zero, then the IRQ handler must be synchronized + * with as well. Most of the time, this is not necessary except when + * shutting down the device. + */ +static inline void tg3_full_lock(struct tg3 *tp, int irq_sync) +{ + spin_lock_bh(&tp->lock); + if (irq_sync) + tg3_irq_quiesce(tp); +} + +static inline void tg3_full_unlock(struct tg3 *tp) +{ + spin_unlock_bh(&tp->lock); +} + +/* One-shot MSI handler - Chip automatically disables interrupt + * after sending MSI so driver doesn't have to do it. + */ +static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) +{ + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + + prefetch(tnapi->hw_status); + if (tnapi->rx_rcb) + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); + + if (likely(!tg3_irq_sync(tp))) + napi_schedule(&tnapi->napi); + + return IRQ_HANDLED; +} + +/* MSI ISR - No need to check for interrupt sharing and no need to + * flush status block and interrupt mailbox. PCI ordering rules + * guarantee that MSI will arrive after the status block. + */ +static irqreturn_t tg3_msi(int irq, void *dev_id) +{ + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + + prefetch(tnapi->hw_status); + if (tnapi->rx_rcb) + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); + /* + * Writing any value to intr-mbox-0 clears PCI INTA# and + * chip-internal interrupt pending events. + * Writing non-zero to intr-mbox-0 additional tells the + * NIC to stop sending us irqs, engaging "in-intr-handler" + * event coalescing. + */ + tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); + if (likely(!tg3_irq_sync(tp))) + napi_schedule(&tnapi->napi); + + return IRQ_RETVAL(1); +} + +static irqreturn_t tg3_interrupt(int irq, void *dev_id) +{ + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; + unsigned int handled = 1; + + /* In INTx mode, it is possible for the interrupt to arrive at + * the CPU before the status block posted prior to the interrupt. + * Reading the PCI State register will confirm whether the + * interrupt is ours and will flush the status block. + */ + if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) { + if (tg3_flag(tp, CHIP_RESETTING) || + (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { + handled = 0; + goto out; + } + } + + /* + * Writing any value to intr-mbox-0 clears PCI INTA# and + * chip-internal interrupt pending events. + * Writing non-zero to intr-mbox-0 additional tells the + * NIC to stop sending us irqs, engaging "in-intr-handler" + * event coalescing. + * + * Flush the mailbox to de-assert the IRQ immediately to prevent + * spurious interrupts. The flush impacts performance but + * excessive spurious interrupts can be worse in some cases. + */ + tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); + if (tg3_irq_sync(tp)) + goto out; + sblk->status &= ~SD_STATUS_UPDATED; + if (likely(tg3_has_work(tnapi))) { + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); + napi_schedule(&tnapi->napi); + } else { + /* No work, shared interrupt perhaps? re-enable + * interrupts, and flush that PCI write + */ + tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, + 0x00000000); + } +out: + return IRQ_RETVAL(handled); +} + +static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) +{ + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; + unsigned int handled = 1; + + /* In INTx mode, it is possible for the interrupt to arrive at + * the CPU before the status block posted prior to the interrupt. + * Reading the PCI State register will confirm whether the + * interrupt is ours and will flush the status block. + */ + if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) { + if (tg3_flag(tp, CHIP_RESETTING) || + (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { + handled = 0; + goto out; + } + } + + /* + * writing any value to intr-mbox-0 clears PCI INTA# and + * chip-internal interrupt pending events. + * writing non-zero to intr-mbox-0 additional tells the + * NIC to stop sending us irqs, engaging "in-intr-handler" + * event coalescing. + * + * Flush the mailbox to de-assert the IRQ immediately to prevent + * spurious interrupts. The flush impacts performance but + * excessive spurious interrupts can be worse in some cases. + */ + tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); + + /* + * In a shared interrupt configuration, sometimes other devices' + * interrupts will scream. We record the current status tag here + * so that the above check can report that the screaming interrupts + * are unhandled. Eventually they will be silenced. + */ + tnapi->last_irq_tag = sblk->status_tag; + + if (tg3_irq_sync(tp)) + goto out; + + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); + + napi_schedule(&tnapi->napi); + +out: + return IRQ_RETVAL(handled); +} + +/* ISR for interrupt test */ +static irqreturn_t tg3_test_isr(int irq, void *dev_id) +{ + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; + + if ((sblk->status & SD_STATUS_UPDATED) || + !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { + tg3_disable_ints(tp); + return IRQ_RETVAL(1); + } + return IRQ_RETVAL(0); +} + +static int tg3_init_hw(struct tg3 *, int); +static int tg3_halt(struct tg3 *, int, int); + +/* Restart hardware after configuration changes, self-test, etc. + * Invoked with tp->lock held. + */ +static int tg3_restart_hw(struct tg3 *tp, int reset_phy) + __releases(tp->lock) + __acquires(tp->lock) +{ + int err; + + err = tg3_init_hw(tp, reset_phy); + if (err) { + netdev_err(tp->dev, + "Failed to re-initialize device, aborting\n"); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + tg3_full_unlock(tp); + del_timer_sync(&tp->timer); + tp->irq_sync = 0; + tg3_napi_enable(tp); + dev_close(tp->dev); + tg3_full_lock(tp, 0); + } + return err; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void tg3_poll_controller(struct net_device *dev) +{ + int i; + struct tg3 *tp = netdev_priv(dev); + + for (i = 0; i < tp->irq_cnt; i++) + tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]); +} +#endif + +static void tg3_reset_task(struct work_struct *work) +{ + struct tg3 *tp = container_of(work, struct tg3, reset_task); + int err; + unsigned int restart_timer; + + tg3_full_lock(tp, 0); + + if (!netif_running(tp->dev)) { + tg3_full_unlock(tp); + return; + } + + tg3_full_unlock(tp); + + tg3_phy_stop(tp); + + tg3_netif_stop(tp); + + tg3_full_lock(tp, 1); + + restart_timer = tg3_flag(tp, RESTART_TIMER); + tg3_flag_clear(tp, RESTART_TIMER); + + if (tg3_flag(tp, TX_RECOVERY_PENDING)) { + tp->write32_tx_mbox = tg3_write32_tx_mbox; + tp->write32_rx_mbox = tg3_write_flush_reg32; + tg3_flag_set(tp, MBOX_WRITE_REORDER); + tg3_flag_clear(tp, TX_RECOVERY_PENDING); + } + + tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); + err = tg3_init_hw(tp, 1); + if (err) + goto out; + + tg3_netif_start(tp); + + if (restart_timer) + mod_timer(&tp->timer, jiffies + 1); + +out: + tg3_full_unlock(tp); + + if (!err) + tg3_phy_start(tp); +} + +static void tg3_tx_timeout(struct net_device *dev) +{ + struct tg3 *tp = netdev_priv(dev); + + if (netif_msg_tx_err(tp)) { + netdev_err(dev, "transmit timed out, resetting\n"); + tg3_dump_state(tp); + } + + schedule_work(&tp->reset_task); +} + +/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ +static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) +{ + u32 base = (u32) mapping & 0xffffffff; + + return (base > 0xffffdcc0) && (base + len + 8 < base); +} + +/* Test for DMA addresses > 40-bit */ +static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, + int len) +{ +#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) + if (tg3_flag(tp, 40BIT_DMA_BUG)) + return ((u64) mapping + len) > DMA_BIT_MASK(40); + return 0; +#else + return 0; +#endif +} + +static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd, + dma_addr_t mapping, u32 len, u32 flags, + u32 mss, u32 vlan) +{ + txbd->addr_hi = ((u64) mapping >> 32); + txbd->addr_lo = ((u64) mapping & 0xffffffff); + txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff); + txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT); +} + +static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, + dma_addr_t map, u32 len, u32 flags, + u32 mss, u32 vlan) +{ + struct tg3 *tp = tnapi->tp; + bool hwbug = false; + + if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8) + hwbug = 1; + + if (tg3_4g_overflow_test(map, len)) + hwbug = 1; + + if (tg3_40bit_overflow_test(tp, map, len)) + hwbug = 1; + + if (tg3_flag(tp, 4K_FIFO_LIMIT)) { + u32 tmp_flag = flags & ~TXD_FLAG_END; + while (len > TG3_TX_BD_DMA_MAX) { + u32 frag_len = TG3_TX_BD_DMA_MAX; + len -= TG3_TX_BD_DMA_MAX; + + if (len) { + tnapi->tx_buffers[*entry].fragmented = true; + /* Avoid the 8byte DMA problem */ + if (len <= 8) { + len += TG3_TX_BD_DMA_MAX / 2; + frag_len = TG3_TX_BD_DMA_MAX / 2; + } + } else + tmp_flag = flags; + + if (*budget) { + tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, + frag_len, tmp_flag, mss, vlan); + (*budget)--; + *entry = NEXT_TX(*entry); + } else { + hwbug = 1; + break; + } + + map += frag_len; + } + + if (len) { + if (*budget) { + tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, + len, flags, mss, vlan); + (*budget)--; + *entry = NEXT_TX(*entry); + } else { + hwbug = 1; + } + } + } else { + tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, + len, flags, mss, vlan); + *entry = NEXT_TX(*entry); + } + + return hwbug; +} + +static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last) +{ + int i; + struct sk_buff *skb; + struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry]; + + skb = txb->skb; + txb->skb = NULL; + + pci_unmap_single(tnapi->tp->pdev, + dma_unmap_addr(txb, mapping), + skb_headlen(skb), + PCI_DMA_TODEVICE); + + while (txb->fragmented) { + txb->fragmented = false; + entry = NEXT_TX(entry); + txb = &tnapi->tx_buffers[entry]; + } + + for (i = 0; i < last; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + entry = NEXT_TX(entry); + txb = &tnapi->tx_buffers[entry]; + + pci_unmap_page(tnapi->tp->pdev, + dma_unmap_addr(txb, mapping), + frag->size, PCI_DMA_TODEVICE); + + while (txb->fragmented) { + txb->fragmented = false; + entry = NEXT_TX(entry); + txb = &tnapi->tx_buffers[entry]; + } + } +} + +/* Workaround 4GB and 40-bit hardware DMA bugs. */ +static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, + struct sk_buff *skb, + u32 *entry, u32 *budget, + u32 base_flags, u32 mss, u32 vlan) +{ + struct tg3 *tp = tnapi->tp; + struct sk_buff *new_skb; + dma_addr_t new_addr = 0; + int ret = 0; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) + new_skb = skb_copy(skb, GFP_ATOMIC); + else { + int more_headroom = 4 - ((unsigned long)skb->data & 3); + + new_skb = skb_copy_expand(skb, + skb_headroom(skb) + more_headroom, + skb_tailroom(skb), GFP_ATOMIC); + } + + if (!new_skb) { + ret = -1; + } else { + /* New SKB is guaranteed to be linear. */ + new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, + PCI_DMA_TODEVICE); + /* Make sure the mapping succeeded */ + if (pci_dma_mapping_error(tp->pdev, new_addr)) { + dev_kfree_skb(new_skb); + ret = -1; + } else { + base_flags |= TXD_FLAG_END; + + tnapi->tx_buffers[*entry].skb = new_skb; + dma_unmap_addr_set(&tnapi->tx_buffers[*entry], + mapping, new_addr); + + if (tg3_tx_frag_set(tnapi, entry, budget, new_addr, + new_skb->len, base_flags, + mss, vlan)) { + tg3_tx_skb_unmap(tnapi, *entry, 0); + dev_kfree_skb(new_skb); + ret = -1; + } + } + } + + dev_kfree_skb(skb); + + return ret; +} + +static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); + +/* Use GSO to workaround a rare TSO bug that may be triggered when the + * TSO header is greater than 80 bytes. + */ +static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) +{ + struct sk_buff *segs, *nskb; + u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; + + /* Estimate the number of fragments in the worst case */ + if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) { + netif_stop_queue(tp->dev); + + /* netif_tx_stop_queue() must be done before checking + * checking tx index in tg3_tx_avail() below, because in + * tg3_tx(), we update tx index before checking for + * netif_tx_queue_stopped(). + */ + smp_mb(); + if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est) + return NETDEV_TX_BUSY; + + netif_wake_queue(tp->dev); + } + + segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO); + if (IS_ERR(segs)) + goto tg3_tso_bug_end; + + do { + nskb = segs; + segs = segs->next; + nskb->next = NULL; + tg3_start_xmit(nskb, tp->dev); + } while (segs); + +tg3_tso_bug_end: + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and + * support TG3_FLAG_HW_TSO_1 or firmware TSO only. + */ +static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct tg3 *tp = netdev_priv(dev); + u32 len, entry, base_flags, mss, vlan = 0; + u32 budget; + int i = -1, would_hit_hwbug; + dma_addr_t mapping; + struct tg3_napi *tnapi; + struct netdev_queue *txq; + unsigned int last; + + txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); + tnapi = &tp->napi[skb_get_queue_mapping(skb)]; + if (tg3_flag(tp, ENABLE_TSS)) + tnapi++; + + budget = tg3_tx_avail(tnapi); + + /* We are running in BH disabled context with netif_tx_lock + * and TX reclaim runs via tp->napi.poll inside of a software + * interrupt. Furthermore, IRQ processing runs lockless so we have + * no IRQ context deadlocks to worry about either. Rejoice! + */ + if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) { + if (!netif_tx_queue_stopped(txq)) { + netif_tx_stop_queue(txq); + + /* This is a hard error, log it. */ + netdev_err(dev, + "BUG! Tx Ring full when queue awake!\n"); + } + return NETDEV_TX_BUSY; + } + + entry = tnapi->tx_prod; + base_flags = 0; + if (skb->ip_summed == CHECKSUM_PARTIAL) + base_flags |= TXD_FLAG_TCPUDP_CSUM; + + mss = skb_shinfo(skb)->gso_size; + if (mss) { + struct iphdr *iph; + u32 tcp_opt_len, hdr_len; + + if (skb_header_cloned(skb) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { + dev_kfree_skb(skb); + goto out_unlock; + } + + iph = ip_hdr(skb); + tcp_opt_len = tcp_optlen(skb); + + if (skb_is_gso_v6(skb)) { + hdr_len = skb_headlen(skb) - ETH_HLEN; + } else { + u32 ip_tcp_len; + + ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); + hdr_len = ip_tcp_len + tcp_opt_len; + + iph->check = 0; + iph->tot_len = htons(mss + hdr_len); + } + + if (unlikely((ETH_HLEN + hdr_len) > 80) && + tg3_flag(tp, TSO_BUG)) + return tg3_tso_bug(tp, skb); + + base_flags |= (TXD_FLAG_CPU_PRE_DMA | + TXD_FLAG_CPU_POST_DMA); + + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) { + tcp_hdr(skb)->check = 0; + base_flags &= ~TXD_FLAG_TCPUDP_CSUM; + } else + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + + if (tg3_flag(tp, HW_TSO_3)) { + mss |= (hdr_len & 0xc) << 12; + if (hdr_len & 0x10) + base_flags |= 0x00000010; + base_flags |= (hdr_len & 0x3e0) << 5; + } else if (tg3_flag(tp, HW_TSO_2)) + mss |= hdr_len << 9; + else if (tg3_flag(tp, HW_TSO_1) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + if (tcp_opt_len || iph->ihl > 5) { + int tsflags; + + tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); + mss |= (tsflags << 11); + } + } else { + if (tcp_opt_len || iph->ihl > 5) { + int tsflags; + + tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); + base_flags |= tsflags << 12; + } + } + } + +#ifdef BCM_KERNEL_SUPPORTS_8021Q + if (vlan_tx_tag_present(skb)) { + base_flags |= TXD_FLAG_VLAN; + vlan = vlan_tx_tag_get(skb); + } +#endif + + if (tg3_flag(tp, USE_JUMBO_BDFLAG) && + !mss && skb->len > VLAN_ETH_FRAME_LEN) + base_flags |= TXD_FLAG_JMB_PKT; + + len = skb_headlen(skb); + + mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(tp->pdev, mapping)) { + dev_kfree_skb(skb); + goto out_unlock; + } + + tnapi->tx_buffers[entry].skb = skb; + dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); + + would_hit_hwbug = 0; + + if (tg3_flag(tp, 5701_DMA_BUG)) + would_hit_hwbug = 1; + + if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags | + ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0), + mss, vlan)) + would_hit_hwbug = 1; + + /* Now loop through additional data fragments, and queue them. */ + if (skb_shinfo(skb)->nr_frags > 0) { + u32 tmp_mss = mss; + + if (!tg3_flag(tp, HW_TSO_1) && + !tg3_flag(tp, HW_TSO_2) && + !tg3_flag(tp, HW_TSO_3)) + tmp_mss = 0; + + last = skb_shinfo(skb)->nr_frags - 1; + for (i = 0; i <= last; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + len = frag->size; + mapping = pci_map_page(tp->pdev, + frag->page, + frag->page_offset, + len, PCI_DMA_TODEVICE); + + tnapi->tx_buffers[entry].skb = NULL; + dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, + mapping); + if (pci_dma_mapping_error(tp->pdev, mapping)) + goto dma_error; + + if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, + len, base_flags | + ((i == last) ? TXD_FLAG_END : 0), + tmp_mss, vlan)) + would_hit_hwbug = 1; + } + } + + if (would_hit_hwbug) { + tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); + + /* If the workaround fails due to memory/mapping + * failure, silently drop this packet. + */ + entry = tnapi->tx_prod; + budget = tg3_tx_avail(tnapi); + if (tigon3_dma_hwbug_workaround(tnapi, skb, &entry, &budget, + base_flags, mss, vlan)) + goto out_unlock; + } + + skb_tx_timestamp(skb); + + /* Packets are ready, update Tx producer idx local and on card. */ + tw32_tx_mbox(tnapi->prodmbox, entry); + + tnapi->tx_prod = entry; + if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { + netif_tx_stop_queue(txq); + + /* netif_tx_stop_queue() must be done before checking + * checking tx index in tg3_tx_avail() below, because in + * tg3_tx(), we update tx index before checking for + * netif_tx_queue_stopped(). + */ + smp_mb(); + if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) + netif_tx_wake_queue(txq); + } + +out_unlock: + mmiowb(); + + return NETDEV_TX_OK; + +dma_error: + tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); + dev_kfree_skb(skb); + tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; + return NETDEV_TX_OK; +} + +static void tg3_set_loopback(struct net_device *dev, u32 features) +{ + struct tg3 *tp = netdev_priv(dev); + + if (features & NETIF_F_LOOPBACK) { + if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK) + return; + + /* + * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in + * loopback mode if Half-Duplex mode was negotiated earlier. + */ + tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; + + /* Enable internal MAC loopback mode */ + tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK; + spin_lock_bh(&tp->lock); + tw32(MAC_MODE, tp->mac_mode); + netif_carrier_on(tp->dev); + spin_unlock_bh(&tp->lock); + netdev_info(dev, "Internal MAC loopback mode enabled.\n"); + } else { + if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) + return; + + /* Disable internal MAC loopback mode */ + tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK; + spin_lock_bh(&tp->lock); + tw32(MAC_MODE, tp->mac_mode); + /* Force link status check */ + tg3_setup_phy(tp, 1); + spin_unlock_bh(&tp->lock); + netdev_info(dev, "Internal MAC loopback mode disabled.\n"); + } +} + +static u32 tg3_fix_features(struct net_device *dev, u32 features) +{ + struct tg3 *tp = netdev_priv(dev); + + if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS)) + features &= ~NETIF_F_ALL_TSO; + + return features; +} + +static int tg3_set_features(struct net_device *dev, u32 features) +{ + u32 changed = dev->features ^ features; + + if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) + tg3_set_loopback(dev, features); + + return 0; +} + +static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, + int new_mtu) +{ + dev->mtu = new_mtu; + + if (new_mtu > ETH_DATA_LEN) { + if (tg3_flag(tp, 5780_CLASS)) { + netdev_update_features(dev); + tg3_flag_clear(tp, TSO_CAPABLE); + } else { + tg3_flag_set(tp, JUMBO_RING_ENABLE); + } + } else { + if (tg3_flag(tp, 5780_CLASS)) { + tg3_flag_set(tp, TSO_CAPABLE); + netdev_update_features(dev); + } + tg3_flag_clear(tp, JUMBO_RING_ENABLE); + } +} + +static int tg3_change_mtu(struct net_device *dev, int new_mtu) +{ + struct tg3 *tp = netdev_priv(dev); + int err; + + if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) + return -EINVAL; + + if (!netif_running(dev)) { + /* We'll just catch it later when the + * device is up'd. + */ + tg3_set_mtu(dev, tp, new_mtu); + return 0; + } + + tg3_phy_stop(tp); + + tg3_netif_stop(tp); + + tg3_full_lock(tp, 1); + + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + + tg3_set_mtu(dev, tp, new_mtu); + + err = tg3_restart_hw(tp, 0); + + if (!err) + tg3_netif_start(tp); + + tg3_full_unlock(tp); + + if (!err) + tg3_phy_start(tp); + + return err; +} + +static void tg3_rx_prodring_free(struct tg3 *tp, + struct tg3_rx_prodring_set *tpr) +{ + int i; + + if (tpr != &tp->napi[0].prodring) { + for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; + i = (i + 1) & tp->rx_std_ring_mask) + tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], + tp->rx_pkt_map_sz); + + if (tg3_flag(tp, JUMBO_CAPABLE)) { + for (i = tpr->rx_jmb_cons_idx; + i != tpr->rx_jmb_prod_idx; + i = (i + 1) & tp->rx_jmb_ring_mask) { + tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], + TG3_RX_JMB_MAP_SZ); + } + } + + return; + } + + for (i = 0; i <= tp->rx_std_ring_mask; i++) + tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], + tp->rx_pkt_map_sz); + + if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { + for (i = 0; i <= tp->rx_jmb_ring_mask; i++) + tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], + TG3_RX_JMB_MAP_SZ); + } +} + +/* Initialize rx rings for packet processing. + * + * The chip has been shut down and the driver detached from + * the networking, so no interrupts or new tx packets will + * end up in the driver. tp->{tx,}lock are held and thus + * we may not sleep. + */ +static int tg3_rx_prodring_alloc(struct tg3 *tp, + struct tg3_rx_prodring_set *tpr) +{ + u32 i, rx_pkt_dma_sz; + + tpr->rx_std_cons_idx = 0; + tpr->rx_std_prod_idx = 0; + tpr->rx_jmb_cons_idx = 0; + tpr->rx_jmb_prod_idx = 0; + + if (tpr != &tp->napi[0].prodring) { + memset(&tpr->rx_std_buffers[0], 0, + TG3_RX_STD_BUFF_RING_SIZE(tp)); + if (tpr->rx_jmb_buffers) + memset(&tpr->rx_jmb_buffers[0], 0, + TG3_RX_JMB_BUFF_RING_SIZE(tp)); + goto done; + } + + /* Zero out all descriptors. */ + memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp)); + + rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; + if (tg3_flag(tp, 5780_CLASS) && + tp->dev->mtu > ETH_DATA_LEN) + rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ; + tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); + + /* Initialize invariants of the rings, we only set this + * stuff once. This works because the card does not + * write into the rx buffer posting rings. + */ + for (i = 0; i <= tp->rx_std_ring_mask; i++) { + struct tg3_rx_buffer_desc *rxd; + + rxd = &tpr->rx_std[i]; + rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT; + rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); + rxd->opaque = (RXD_OPAQUE_RING_STD | + (i << RXD_OPAQUE_INDEX_SHIFT)); + } + + /* Now allocate fresh SKBs for each rx ring. */ + for (i = 0; i < tp->rx_pending; i++) { + if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) { + netdev_warn(tp->dev, + "Using a smaller RX standard ring. Only " + "%d out of %d buffers were allocated " + "successfully\n", i, tp->rx_pending); + if (i == 0) + goto initfail; + tp->rx_pending = i; + break; + } + } + + if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) + goto done; + + memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp)); + + if (!tg3_flag(tp, JUMBO_RING_ENABLE)) + goto done; + + for (i = 0; i <= tp->rx_jmb_ring_mask; i++) { + struct tg3_rx_buffer_desc *rxd; + + rxd = &tpr->rx_jmb[i].std; + rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; + rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | + RXD_FLAG_JUMBO; + rxd->opaque = (RXD_OPAQUE_RING_JUMBO | + (i << RXD_OPAQUE_INDEX_SHIFT)); + } + + for (i = 0; i < tp->rx_jumbo_pending; i++) { + if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) { + netdev_warn(tp->dev, + "Using a smaller RX jumbo ring. Only %d " + "out of %d buffers were allocated " + "successfully\n", i, tp->rx_jumbo_pending); + if (i == 0) + goto initfail; + tp->rx_jumbo_pending = i; + break; + } + } + +done: + return 0; + +initfail: + tg3_rx_prodring_free(tp, tpr); + return -ENOMEM; +} + +static void tg3_rx_prodring_fini(struct tg3 *tp, + struct tg3_rx_prodring_set *tpr) +{ + kfree(tpr->rx_std_buffers); + tpr->rx_std_buffers = NULL; + kfree(tpr->rx_jmb_buffers); + tpr->rx_jmb_buffers = NULL; + if (tpr->rx_std) { + dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp), + tpr->rx_std, tpr->rx_std_mapping); + tpr->rx_std = NULL; + } + if (tpr->rx_jmb) { + dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp), + tpr->rx_jmb, tpr->rx_jmb_mapping); + tpr->rx_jmb = NULL; + } +} + +static int tg3_rx_prodring_init(struct tg3 *tp, + struct tg3_rx_prodring_set *tpr) +{ + tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp), + GFP_KERNEL); + if (!tpr->rx_std_buffers) + return -ENOMEM; + + tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev, + TG3_RX_STD_RING_BYTES(tp), + &tpr->rx_std_mapping, + GFP_KERNEL); + if (!tpr->rx_std) + goto err_out; + + if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { + tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp), + GFP_KERNEL); + if (!tpr->rx_jmb_buffers) + goto err_out; + + tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev, + TG3_RX_JMB_RING_BYTES(tp), + &tpr->rx_jmb_mapping, + GFP_KERNEL); + if (!tpr->rx_jmb) + goto err_out; + } + + return 0; + +err_out: + tg3_rx_prodring_fini(tp, tpr); + return -ENOMEM; +} + +/* Free up pending packets in all rx/tx rings. + * + * The chip has been shut down and the driver detached from + * the networking, so no interrupts or new tx packets will + * end up in the driver. tp->{tx,}lock is not held and we are not + * in an interrupt context and thus may sleep. + */ +static void tg3_free_rings(struct tg3 *tp) +{ + int i, j; + + for (j = 0; j < tp->irq_cnt; j++) { + struct tg3_napi *tnapi = &tp->napi[j]; + + tg3_rx_prodring_free(tp, &tnapi->prodring); + + if (!tnapi->tx_buffers) + continue; + + for (i = 0; i < TG3_TX_RING_SIZE; i++) { + struct sk_buff *skb = tnapi->tx_buffers[i].skb; + + if (!skb) + continue; + + tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags); + + dev_kfree_skb_any(skb); + } + } +} + +/* Initialize tx/rx rings for packet processing. + * + * The chip has been shut down and the driver detached from + * the networking, so no interrupts or new tx packets will + * end up in the driver. tp->{tx,}lock are held and thus + * we may not sleep. + */ +static int tg3_init_rings(struct tg3 *tp) +{ + int i; + + /* Free up all the SKBs. */ + tg3_free_rings(tp); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + tnapi->last_tag = 0; + tnapi->last_irq_tag = 0; + tnapi->hw_status->status = 0; + tnapi->hw_status->status_tag = 0; + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + + tnapi->tx_prod = 0; + tnapi->tx_cons = 0; + if (tnapi->tx_ring) + memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES); + + tnapi->rx_rcb_ptr = 0; + if (tnapi->rx_rcb) + memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); + + if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { + tg3_free_rings(tp); + return -ENOMEM; + } + } + + return 0; +} + +/* + * Must not be invoked with interrupt sources disabled and + * the hardware shutdown down. + */ +static void tg3_free_consistent(struct tg3 *tp) +{ + int i; + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + if (tnapi->tx_ring) { + dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES, + tnapi->tx_ring, tnapi->tx_desc_mapping); + tnapi->tx_ring = NULL; + } + + kfree(tnapi->tx_buffers); + tnapi->tx_buffers = NULL; + + if (tnapi->rx_rcb) { + dma_free_coherent(&tp->pdev->dev, + TG3_RX_RCB_RING_BYTES(tp), + tnapi->rx_rcb, + tnapi->rx_rcb_mapping); + tnapi->rx_rcb = NULL; + } + + tg3_rx_prodring_fini(tp, &tnapi->prodring); + + if (tnapi->hw_status) { + dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE, + tnapi->hw_status, + tnapi->status_mapping); + tnapi->hw_status = NULL; + } + } + + if (tp->hw_stats) { + dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), + tp->hw_stats, tp->stats_mapping); + tp->hw_stats = NULL; + } +} + +/* + * Must not be invoked with interrupt sources disabled and + * the hardware shutdown down. Can sleep. + */ +static int tg3_alloc_consistent(struct tg3 *tp) +{ + int i; + + tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, + sizeof(struct tg3_hw_stats), + &tp->stats_mapping, + GFP_KERNEL); + if (!tp->hw_stats) + goto err_out; + + memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + struct tg3_hw_status *sblk; + + tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, + TG3_HW_STATUS_SIZE, + &tnapi->status_mapping, + GFP_KERNEL); + if (!tnapi->hw_status) + goto err_out; + + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + sblk = tnapi->hw_status; + + if (tg3_rx_prodring_init(tp, &tnapi->prodring)) + goto err_out; + + /* If multivector TSS is enabled, vector 0 does not handle + * tx interrupts. Don't allocate any resources for it. + */ + if ((!i && !tg3_flag(tp, ENABLE_TSS)) || + (i && tg3_flag(tp, ENABLE_TSS))) { + tnapi->tx_buffers = kzalloc( + sizeof(struct tg3_tx_ring_info) * + TG3_TX_RING_SIZE, GFP_KERNEL); + if (!tnapi->tx_buffers) + goto err_out; + + tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev, + TG3_TX_RING_BYTES, + &tnapi->tx_desc_mapping, + GFP_KERNEL); + if (!tnapi->tx_ring) + goto err_out; + } + + /* + * When RSS is enabled, the status block format changes + * slightly. The "rx_jumbo_consumer", "reserved", + * and "rx_mini_consumer" members get mapped to the + * other three rx return ring producer indexes. + */ + switch (i) { + default: + tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; + break; + case 2: + tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer; + break; + case 3: + tnapi->rx_rcb_prod_idx = &sblk->reserved; + break; + case 4: + tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer; + break; + } + + /* + * If multivector RSS is enabled, vector 0 does not handle + * rx or tx interrupts. Don't allocate any resources for it. + */ + if (!i && tg3_flag(tp, ENABLE_RSS)) + continue; + + tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, + TG3_RX_RCB_RING_BYTES(tp), + &tnapi->rx_rcb_mapping, + GFP_KERNEL); + if (!tnapi->rx_rcb) + goto err_out; + + memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); + } + + return 0; + +err_out: + tg3_free_consistent(tp); + return -ENOMEM; +} + +#define MAX_WAIT_CNT 1000 + +/* To stop a block, clear the enable bit and poll till it + * clears. tp->lock is held. + */ +static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent) +{ + unsigned int i; + u32 val; + + if (tg3_flag(tp, 5705_PLUS)) { + switch (ofs) { + case RCVLSC_MODE: + case DMAC_MODE: + case MBFREE_MODE: + case BUFMGR_MODE: + case MEMARB_MODE: + /* We can't enable/disable these bits of the + * 5705/5750, just say success. + */ + return 0; + + default: + break; + } + } + + val = tr32(ofs); + val &= ~enable_bit; + tw32_f(ofs, val); + + for (i = 0; i < MAX_WAIT_CNT; i++) { + udelay(100); + val = tr32(ofs); + if ((val & enable_bit) == 0) + break; + } + + if (i == MAX_WAIT_CNT && !silent) { + dev_err(&tp->pdev->dev, + "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n", + ofs, enable_bit); + return -ENODEV; + } + + return 0; +} + +/* tp->lock is held. */ +static int tg3_abort_hw(struct tg3 *tp, int silent) +{ + int i, err; + + tg3_disable_ints(tp); + + tp->rx_mode &= ~RX_MODE_ENABLE; + tw32_f(MAC_RX_MODE, tp->rx_mode); + udelay(10); + + err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent); + + err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent); + + tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + tp->tx_mode &= ~TX_MODE_ENABLE; + tw32_f(MAC_TX_MODE, tp->tx_mode); + + for (i = 0; i < MAX_WAIT_CNT; i++) { + udelay(100); + if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE)) + break; + } + if (i >= MAX_WAIT_CNT) { + dev_err(&tp->pdev->dev, + "%s timed out, TX_MODE_ENABLE will not clear " + "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE)); + err |= -ENODEV; + } + + err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent); + + tw32(FTQ_RESET, 0xffffffff); + tw32(FTQ_RESET, 0x00000000); + + err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + if (tnapi->hw_status) + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + } + if (tp->hw_stats) + memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); + + return err; +} + +static void tg3_ape_send_event(struct tg3 *tp, u32 event) +{ + int i; + u32 apedata; + + /* NCSI does not support APE events */ + if (tg3_flag(tp, APE_HAS_NCSI)) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); + if (apedata != APE_SEG_SIG_MAGIC) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); + if (!(apedata & APE_FW_STATUS_READY)) + return; + + /* Wait for up to 1 millisecond for APE to service previous event. */ + for (i = 0; i < 10; i++) { + if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM)) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); + + if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) + tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, + event | APE_EVENT_STATUS_EVENT_PENDING); + + tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); + + if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) + break; + + udelay(100); + } + + if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) + tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); +} + +static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) +{ + u32 event; + u32 apedata; + + if (!tg3_flag(tp, ENABLE_APE)) + return; + + switch (kind) { + case RESET_KIND_INIT: + tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, + APE_HOST_SEG_SIG_MAGIC); + tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, + APE_HOST_SEG_LEN_MAGIC); + apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); + tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); + tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, + APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM)); + tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, + APE_HOST_BEHAV_NO_PHYLOCK); + tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, + TG3_APE_HOST_DRVR_STATE_START); + + event = APE_EVENT_STATUS_STATE_START; + break; + case RESET_KIND_SHUTDOWN: + /* With the interface we are currently using, + * APE does not track driver state. Wiping + * out the HOST SEGMENT SIGNATURE forces + * the APE to assume OS absent status. + */ + tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0); + + if (device_may_wakeup(&tp->pdev->dev) && + tg3_flag(tp, WOL_ENABLE)) { + tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, + TG3_APE_HOST_WOL_SPEED_AUTO); + apedata = TG3_APE_HOST_DRVR_STATE_WOL; + } else + apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD; + + tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata); + + event = APE_EVENT_STATUS_STATE_UNLOAD; + break; + case RESET_KIND_SUSPEND: + event = APE_EVENT_STATUS_STATE_SUSPEND; + break; + default: + return; + } + + event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; + + tg3_ape_send_event(tp, event); +} + +/* tp->lock is held. */ +static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) +{ + tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, + NIC_SRAM_FIRMWARE_MBOX_MAGIC1); + + if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { + switch (kind) { + case RESET_KIND_INIT: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_START); + break; + + case RESET_KIND_SHUTDOWN: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_UNLOAD); + break; + + case RESET_KIND_SUSPEND: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_SUSPEND); + break; + + default: + break; + } + } + + if (kind == RESET_KIND_INIT || + kind == RESET_KIND_SUSPEND) + tg3_ape_driver_state_change(tp, kind); +} + +/* tp->lock is held. */ +static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) +{ + if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { + switch (kind) { + case RESET_KIND_INIT: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_START_DONE); + break; + + case RESET_KIND_SHUTDOWN: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_UNLOAD_DONE); + break; + + default: + break; + } + } + + if (kind == RESET_KIND_SHUTDOWN) + tg3_ape_driver_state_change(tp, kind); +} + +/* tp->lock is held. */ +static void tg3_write_sig_legacy(struct tg3 *tp, int kind) +{ + if (tg3_flag(tp, ENABLE_ASF)) { + switch (kind) { + case RESET_KIND_INIT: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_START); + break; + + case RESET_KIND_SHUTDOWN: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_UNLOAD); + break; + + case RESET_KIND_SUSPEND: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_SUSPEND); + break; + + default: + break; + } + } +} + +static int tg3_poll_fw(struct tg3 *tp) +{ + int i; + u32 val; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + /* Wait up to 20ms for init done. */ + for (i = 0; i < 200; i++) { + if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) + return 0; + udelay(100); + } + return -ENODEV; + } + + /* Wait for firmware initialization to complete. */ + for (i = 0; i < 100000; i++) { + tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); + if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) + break; + udelay(10); + } + + /* Chip might not be fitted with firmware. Some Sun onboard + * parts are configured like that. So don't signal the timeout + * of the above loop as an error, but do report the lack of + * running firmware once. + */ + if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) { + tg3_flag_set(tp, NO_FWARE_REPORTED); + + netdev_info(tp->dev, "No firmware running\n"); + } + + if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { + /* The 57765 A0 needs a little more + * time to do some important work. + */ + mdelay(10); + } + + return 0; +} + +/* Save PCI command register before chip reset */ +static void tg3_save_pci_state(struct tg3 *tp) +{ + pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd); +} + +/* Restore PCI state after chip reset */ +static void tg3_restore_pci_state(struct tg3 *tp) +{ + u32 val; + + /* Re-enable indirect register accesses. */ + pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, + tp->misc_host_ctrl); + + /* Set MAX PCI retry to zero. */ + val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); + if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && + tg3_flag(tp, PCIX_MODE)) + val |= PCISTATE_RETRY_SAME_DMA; + /* Allow reads and writes to the APE register and memory space. */ + if (tg3_flag(tp, ENABLE_APE)) + val |= PCISTATE_ALLOW_APE_CTLSPC_WR | + PCISTATE_ALLOW_APE_SHMEM_WR | + PCISTATE_ALLOW_APE_PSPACE_WR; + pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); + + pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) { + if (tg3_flag(tp, PCI_EXPRESS)) + pcie_set_readrq(tp->pdev, tp->pcie_readrq); + else { + pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, + tp->pci_cacheline_sz); + pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, + tp->pci_lat_timer); + } + } + + /* Make sure PCI-X relaxed ordering bit is clear. */ + if (tg3_flag(tp, PCIX_MODE)) { + u16 pcix_cmd; + + pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, + &pcix_cmd); + pcix_cmd &= ~PCI_X_CMD_ERO; + pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, + pcix_cmd); + } + + if (tg3_flag(tp, 5780_CLASS)) { + + /* Chip reset on 5780 will reset MSI enable bit, + * so need to restore it. + */ + if (tg3_flag(tp, USING_MSI)) { + u16 ctrl; + + pci_read_config_word(tp->pdev, + tp->msi_cap + PCI_MSI_FLAGS, + &ctrl); + pci_write_config_word(tp->pdev, + tp->msi_cap + PCI_MSI_FLAGS, + ctrl | PCI_MSI_FLAGS_ENABLE); + val = tr32(MSGINT_MODE); + tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE); + } + } +} + +static void tg3_stop_fw(struct tg3 *); + +/* tp->lock is held. */ +static int tg3_chip_reset(struct tg3 *tp) +{ + u32 val; + void (*write_op)(struct tg3 *, u32, u32); + int i, err; + + tg3_nvram_lock(tp); + + tg3_ape_lock(tp, TG3_APE_LOCK_GRC); + + /* No matching tg3_nvram_unlock() after this because + * chip reset below will undo the nvram lock. + */ + tp->nvram_lock_cnt = 0; + + /* GRC_MISC_CFG core clock reset will clear the memory + * enable bit in PCI register 4 and the MSI enable bit + * on some chips, so we save relevant registers here. + */ + tg3_save_pci_state(tp); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || + tg3_flag(tp, 5755_PLUS)) + tw32(GRC_FASTBOOT_PC, 0); + + /* + * We must avoid the readl() that normally takes place. + * It locks machines, causes machine checks, and other + * fun things. So, temporarily disable the 5701 + * hardware workaround, while we do the reset. + */ + write_op = tp->write32; + if (write_op == tg3_write_flush_reg32) + tp->write32 = tg3_write32; + + /* Prevent the irq handler from reading or writing PCI registers + * during chip reset when the memory enable bit in the PCI command + * register may be cleared. The chip does not generate interrupt + * at this time, but the irq handler may still be called due to irq + * sharing or irqpoll. + */ + tg3_flag_set(tp, CHIP_RESETTING); + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + if (tnapi->hw_status) { + tnapi->hw_status->status = 0; + tnapi->hw_status->status_tag = 0; + } + tnapi->last_tag = 0; + tnapi->last_irq_tag = 0; + } + smp_mb(); + + for (i = 0; i < tp->irq_cnt; i++) + synchronize_irq(tp->napi[i].irq_vec); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { + val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; + tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); + } + + /* do the reset */ + val = GRC_MISC_CFG_CORECLK_RESET; + + if (tg3_flag(tp, PCI_EXPRESS)) { + /* Force PCIe 1.0a mode */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && + !tg3_flag(tp, 57765_PLUS) && + tr32(TG3_PCIE_PHY_TSTCTL) == + (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM)) + tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM); + + if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) { + tw32(GRC_MISC_CFG, (1 << 29)); + val |= (1 << 29); + } + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET); + tw32(GRC_VCPU_EXT_CTRL, + tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU); + } + + /* Manage gphy power for all CPMU absent PCIe devices. */ + if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT)) + val |= GRC_MISC_CFG_KEEP_GPHY_POWER; + + tw32(GRC_MISC_CFG, val); + + /* restore 5701 hardware bug workaround write method */ + tp->write32 = write_op; + + /* Unfortunately, we have to delay before the PCI read back. + * Some 575X chips even will not respond to a PCI cfg access + * when the reset command is given to the chip. + * + * How do these hardware designers expect things to work + * properly if the PCI write is posted for a long period + * of time? It is always necessary to have some method by + * which a register read back can occur to push the write + * out which does the reset. + * + * For most tg3 variants the trick below was working. + * Ho hum... + */ + udelay(120); + + /* Flush PCI posted writes. The normal MMIO registers + * are inaccessible at this time so this is the only + * way to make this reliably (actually, this is no longer + * the case, see above). I tried to use indirect + * register read/write but this upset some 5701 variants. + */ + pci_read_config_dword(tp->pdev, PCI_COMMAND, &val); + + udelay(120); + + if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) { + u16 val16; + + if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) { + int i; + u32 cfg_val; + + /* Wait for link training to complete. */ + for (i = 0; i < 5000; i++) + udelay(100); + + pci_read_config_dword(tp->pdev, 0xc4, &cfg_val); + pci_write_config_dword(tp->pdev, 0xc4, + cfg_val | (1 << 15)); + } + + /* Clear the "no snoop" and "relaxed ordering" bits. */ + pci_read_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL, + &val16); + val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN | + PCI_EXP_DEVCTL_NOSNOOP_EN); + /* + * Older PCIe devices only support the 128 byte + * MPS setting. Enforce the restriction. + */ + if (!tg3_flag(tp, CPMU_PRESENT)) + val16 &= ~PCI_EXP_DEVCTL_PAYLOAD; + pci_write_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL, + val16); + + pcie_set_readrq(tp->pdev, tp->pcie_readrq); + + /* Clear error status */ + pci_write_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA, + PCI_EXP_DEVSTA_CED | + PCI_EXP_DEVSTA_NFED | + PCI_EXP_DEVSTA_FED | + PCI_EXP_DEVSTA_URD); + } + + tg3_restore_pci_state(tp); + + tg3_flag_clear(tp, CHIP_RESETTING); + tg3_flag_clear(tp, ERROR_PROCESSED); + + val = 0; + if (tg3_flag(tp, 5780_CLASS)) + val = tr32(MEMARB_MODE); + tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); + + if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) { + tg3_stop_fw(tp); + tw32(0x5000, 0x400); + } + + tw32(GRC_MODE, tp->grc_mode); + + if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) { + val = tr32(0xc4); + + tw32(0xc4, val | (1 << 15)); + } + + if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE; + if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) + tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN; + tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); + } + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { + tp->mac_mode = MAC_MODE_PORT_MODE_TBI; + val = tp->mac_mode; + } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { + tp->mac_mode = MAC_MODE_PORT_MODE_GMII; + val = tp->mac_mode; + } else + val = 0; + + tw32_f(MAC_MODE, val); + udelay(40); + + tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); + + err = tg3_poll_fw(tp); + if (err) + return err; + + tg3_mdio_start(tp); + + if (tg3_flag(tp, PCI_EXPRESS) && + tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && + !tg3_flag(tp, 57765_PLUS)) { + val = tr32(0x7c00); + + tw32(0x7c00, val | (1 << 25)); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + val = tr32(TG3_CPMU_CLCK_ORIDE); + tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN); + } + + /* Reprobe ASF enable state. */ + tg3_flag_clear(tp, ENABLE_ASF); + tg3_flag_clear(tp, ASF_NEW_HANDSHAKE); + tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); + if (val == NIC_SRAM_DATA_SIG_MAGIC) { + u32 nic_cfg; + + tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); + if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { + tg3_flag_set(tp, ENABLE_ASF); + tp->last_event_jiffies = jiffies; + if (tg3_flag(tp, 5750_PLUS)) + tg3_flag_set(tp, ASF_NEW_HANDSHAKE); + } + } + + return 0; +} + +/* tp->lock is held. */ +static void tg3_stop_fw(struct tg3 *tp) +{ + if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { + /* Wait for RX cpu to ACK the previous event. */ + tg3_wait_for_event_ack(tp); + + tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); + + tg3_generate_fw_event(tp); + + /* Wait for RX cpu to ACK this event. */ + tg3_wait_for_event_ack(tp); + } +} + +/* tp->lock is held. */ +static int tg3_halt(struct tg3 *tp, int kind, int silent) +{ + int err; + + tg3_stop_fw(tp); + + tg3_write_sig_pre_reset(tp, kind); + + tg3_abort_hw(tp, silent); + err = tg3_chip_reset(tp); + + __tg3_set_mac_addr(tp, 0); + + tg3_write_sig_legacy(tp, kind); + tg3_write_sig_post_reset(tp, kind); + + if (err) + return err; + + return 0; +} + +#define RX_CPU_SCRATCH_BASE 0x30000 +#define RX_CPU_SCRATCH_SIZE 0x04000 +#define TX_CPU_SCRATCH_BASE 0x34000 +#define TX_CPU_SCRATCH_SIZE 0x04000 + +/* tp->lock is held. */ +static int tg3_halt_cpu(struct tg3 *tp, u32 offset) +{ + int i; + + BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + u32 val = tr32(GRC_VCPU_EXT_CTRL); + + tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); + return 0; + } + if (offset == RX_CPU_BASE) { + for (i = 0; i < 10000; i++) { + tw32(offset + CPU_STATE, 0xffffffff); + tw32(offset + CPU_MODE, CPU_MODE_HALT); + if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) + break; + } + + tw32(offset + CPU_STATE, 0xffffffff); + tw32_f(offset + CPU_MODE, CPU_MODE_HALT); + udelay(10); + } else { + for (i = 0; i < 10000; i++) { + tw32(offset + CPU_STATE, 0xffffffff); + tw32(offset + CPU_MODE, CPU_MODE_HALT); + if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) + break; + } + } + + if (i >= 10000) { + netdev_err(tp->dev, "%s timed out, %s CPU\n", + __func__, offset == RX_CPU_BASE ? "RX" : "TX"); + return -ENODEV; + } + + /* Clear firmware's nvram arbitration. */ + if (tg3_flag(tp, NVRAM)) + tw32(NVRAM_SWARB, SWARB_REQ_CLR0); + return 0; +} + +struct fw_info { + unsigned int fw_base; + unsigned int fw_len; + const __be32 *fw_data; +}; + +/* tp->lock is held. */ +static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base, + int cpu_scratch_size, struct fw_info *info) +{ + int err, lock_err, i; + void (*write_op)(struct tg3 *, u32, u32); + + if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) { + netdev_err(tp->dev, + "%s: Trying to load TX cpu firmware which is 5705\n", + __func__); + return -EINVAL; + } + + if (tg3_flag(tp, 5705_PLUS)) + write_op = tg3_write_mem; + else + write_op = tg3_write_indirect_reg32; + + /* It is possible that bootcode is still loading at this point. + * Get the nvram lock first before halting the cpu. + */ + lock_err = tg3_nvram_lock(tp); + err = tg3_halt_cpu(tp, cpu_base); + if (!lock_err) + tg3_nvram_unlock(tp); + if (err) + goto out; + + for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) + write_op(tp, cpu_scratch_base + i, 0); + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT); + for (i = 0; i < (info->fw_len / sizeof(u32)); i++) + write_op(tp, (cpu_scratch_base + + (info->fw_base & 0xffff) + + (i * sizeof(u32))), + be32_to_cpu(info->fw_data[i])); + + err = 0; + +out: + return err; +} + +/* tp->lock is held. */ +static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) +{ + struct fw_info info; + const __be32 *fw_data; + int err, i; + + fw_data = (void *)tp->fw->data; + + /* Firmware blob starts with version numbers, followed by + start address and length. We are setting complete length. + length = end_address_of_bss - start_address_of_text. + Remainder is the blob to be loaded contiguously + from start address. */ + + info.fw_base = be32_to_cpu(fw_data[1]); + info.fw_len = tp->fw->size - 12; + info.fw_data = &fw_data[3]; + + err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, + RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, + &info); + if (err) + return err; + + err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, + TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, + &info); + if (err) + return err; + + /* Now startup only the RX cpu. */ + tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); + tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); + + for (i = 0; i < 5; i++) { + if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base) + break; + tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); + tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); + tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); + udelay(1000); + } + if (i >= 5) { + netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " + "should be %08x\n", __func__, + tr32(RX_CPU_BASE + CPU_PC), info.fw_base); + return -ENODEV; + } + tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); + tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000); + + return 0; +} + +/* tp->lock is held. */ +static int tg3_load_tso_firmware(struct tg3 *tp) +{ + struct fw_info info; + const __be32 *fw_data; + unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; + int err, i; + + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) + return 0; + + fw_data = (void *)tp->fw->data; + + /* Firmware blob starts with version numbers, followed by + start address and length. We are setting complete length. + length = end_address_of_bss - start_address_of_text. + Remainder is the blob to be loaded contiguously + from start address. */ + + info.fw_base = be32_to_cpu(fw_data[1]); + cpu_scratch_size = tp->fw_len; + info.fw_len = tp->fw->size - 12; + info.fw_data = &fw_data[3]; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + cpu_base = RX_CPU_BASE; + cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; + } else { + cpu_base = TX_CPU_BASE; + cpu_scratch_base = TX_CPU_SCRATCH_BASE; + cpu_scratch_size = TX_CPU_SCRATCH_SIZE; + } + + err = tg3_load_firmware_cpu(tp, cpu_base, + cpu_scratch_base, cpu_scratch_size, + &info); + if (err) + return err; + + /* Now startup the cpu. */ + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32_f(cpu_base + CPU_PC, info.fw_base); + + for (i = 0; i < 5; i++) { + if (tr32(cpu_base + CPU_PC) == info.fw_base) + break; + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); + tw32_f(cpu_base + CPU_PC, info.fw_base); + udelay(1000); + } + if (i >= 5) { + netdev_err(tp->dev, + "%s fails to set CPU PC, is %08x should be %08x\n", + __func__, tr32(cpu_base + CPU_PC), info.fw_base); + return -ENODEV; + } + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32_f(cpu_base + CPU_MODE, 0x00000000); + return 0; +} + + +static int tg3_set_mac_addr(struct net_device *dev, void *p) +{ + struct tg3 *tp = netdev_priv(dev); + struct sockaddr *addr = p; + int err = 0, skip_mac_1 = 0; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + if (!netif_running(dev)) + return 0; + + if (tg3_flag(tp, ENABLE_ASF)) { + u32 addr0_high, addr0_low, addr1_high, addr1_low; + + addr0_high = tr32(MAC_ADDR_0_HIGH); + addr0_low = tr32(MAC_ADDR_0_LOW); + addr1_high = tr32(MAC_ADDR_1_HIGH); + addr1_low = tr32(MAC_ADDR_1_LOW); + + /* Skip MAC addr 1 if ASF is using it. */ + if ((addr0_high != addr1_high || addr0_low != addr1_low) && + !(addr1_high == 0 && addr1_low == 0)) + skip_mac_1 = 1; + } + spin_lock_bh(&tp->lock); + __tg3_set_mac_addr(tp, skip_mac_1); + spin_unlock_bh(&tp->lock); + + return err; +} + +/* tp->lock is held. */ +static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr, + dma_addr_t mapping, u32 maxlen_flags, + u32 nic_addr) +{ + tg3_write_mem(tp, + (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH), + ((u64) mapping >> 32)); + tg3_write_mem(tp, + (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW), + ((u64) mapping & 0xffffffff)); + tg3_write_mem(tp, + (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS), + maxlen_flags); + + if (!tg3_flag(tp, 5705_PLUS)) + tg3_write_mem(tp, + (bdinfo_addr + TG3_BDINFO_NIC_ADDR), + nic_addr); +} + +static void __tg3_set_rx_mode(struct net_device *); +static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) +{ + int i; + + if (!tg3_flag(tp, ENABLE_TSS)) { + tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); + tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); + tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); + } else { + tw32(HOSTCC_TXCOL_TICKS, 0); + tw32(HOSTCC_TXMAX_FRAMES, 0); + tw32(HOSTCC_TXCOAL_MAXF_INT, 0); + } + + if (!tg3_flag(tp, ENABLE_RSS)) { + tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); + tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); + tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); + } else { + tw32(HOSTCC_RXCOL_TICKS, 0); + tw32(HOSTCC_RXMAX_FRAMES, 0); + tw32(HOSTCC_RXCOAL_MAXF_INT, 0); + } + + if (!tg3_flag(tp, 5705_PLUS)) { + u32 val = ec->stats_block_coalesce_usecs; + + tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); + tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); + + if (!netif_carrier_ok(tp->dev)) + val = 0; + + tw32(HOSTCC_STAT_COAL_TICKS, val); + } + + for (i = 0; i < tp->irq_cnt - 1; i++) { + u32 reg; + + reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18; + tw32(reg, ec->rx_coalesce_usecs); + reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18; + tw32(reg, ec->rx_max_coalesced_frames); + reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; + tw32(reg, ec->rx_max_coalesced_frames_irq); + + if (tg3_flag(tp, ENABLE_TSS)) { + reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; + tw32(reg, ec->tx_coalesce_usecs); + reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; + tw32(reg, ec->tx_max_coalesced_frames); + reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18; + tw32(reg, ec->tx_max_coalesced_frames_irq); + } + } + + for (; i < tp->irq_max - 1; i++) { + tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0); + tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); + tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); + + if (tg3_flag(tp, ENABLE_TSS)) { + tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); + tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); + tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); + } + } +} + +/* tp->lock is held. */ +static void tg3_rings_reset(struct tg3 *tp) +{ + int i; + u32 stblk, txrcb, rxrcb, limit; + struct tg3_napi *tnapi = &tp->napi[0]; + + /* Disable all transmit rings but the first. */ + if (!tg3_flag(tp, 5705_PLUS)) + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; + else if (tg3_flag(tp, 5717_PLUS)) + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; + else + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; + + for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; + txrcb < limit; txrcb += TG3_BDINFO_SIZE) + tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS, + BDINFO_FLAGS_DISABLED); + + + /* Disable all receive return rings but the first. */ + if (tg3_flag(tp, 5717_PLUS)) + limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; + else if (!tg3_flag(tp, 5705_PLUS)) + limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; + else + limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; + + for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; + rxrcb < limit; rxrcb += TG3_BDINFO_SIZE) + tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS, + BDINFO_FLAGS_DISABLED); + + /* Disable interrupts */ + tw32_mailbox_f(tp->napi[0].int_mbox, 1); + tp->napi[0].chk_msi_cnt = 0; + tp->napi[0].last_rx_cons = 0; + tp->napi[0].last_tx_cons = 0; + + /* Zero mailbox registers. */ + if (tg3_flag(tp, SUPPORT_MSIX)) { + for (i = 1; i < tp->irq_max; i++) { + tp->napi[i].tx_prod = 0; + tp->napi[i].tx_cons = 0; + if (tg3_flag(tp, ENABLE_TSS)) + tw32_mailbox(tp->napi[i].prodmbox, 0); + tw32_rx_mbox(tp->napi[i].consmbox, 0); + tw32_mailbox_f(tp->napi[i].int_mbox, 1); + tp->napi[0].chk_msi_cnt = 0; + tp->napi[i].last_rx_cons = 0; + tp->napi[i].last_tx_cons = 0; + } + if (!tg3_flag(tp, ENABLE_TSS)) + tw32_mailbox(tp->napi[0].prodmbox, 0); + } else { + tp->napi[0].tx_prod = 0; + tp->napi[0].tx_cons = 0; + tw32_mailbox(tp->napi[0].prodmbox, 0); + tw32_rx_mbox(tp->napi[0].consmbox, 0); + } + + /* Make sure the NIC-based send BD rings are disabled. */ + if (!tg3_flag(tp, 5705_PLUS)) { + u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW; + for (i = 0; i < 16; i++) + tw32_tx_mbox(mbox + i * 8, 0); + } + + txrcb = NIC_SRAM_SEND_RCB; + rxrcb = NIC_SRAM_RCV_RET_RCB; + + /* Clear status block in ram. */ + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + + /* Set status block DMA address */ + tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, + ((u64) tnapi->status_mapping >> 32)); + tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, + ((u64) tnapi->status_mapping & 0xffffffff)); + + if (tnapi->tx_ring) { + tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, + (TG3_TX_RING_SIZE << + BDINFO_FLAGS_MAXLEN_SHIFT), + NIC_SRAM_TX_BUFFER_DESC); + txrcb += TG3_BDINFO_SIZE; + } + + if (tnapi->rx_rcb) { + tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, + (tp->rx_ret_ring_mask + 1) << + BDINFO_FLAGS_MAXLEN_SHIFT, 0); + rxrcb += TG3_BDINFO_SIZE; + } + + stblk = HOSTCC_STATBLCK_RING1; + + for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) { + u64 mapping = (u64)tnapi->status_mapping; + tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32); + tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff); + + /* Clear status block in ram. */ + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + + if (tnapi->tx_ring) { + tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, + (TG3_TX_RING_SIZE << + BDINFO_FLAGS_MAXLEN_SHIFT), + NIC_SRAM_TX_BUFFER_DESC); + txrcb += TG3_BDINFO_SIZE; + } + + tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, + ((tp->rx_ret_ring_mask + 1) << + BDINFO_FLAGS_MAXLEN_SHIFT), 0); + + stblk += 8; + rxrcb += TG3_BDINFO_SIZE; + } +} + +static void tg3_setup_rxbd_thresholds(struct tg3 *tp) +{ + u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh; + + if (!tg3_flag(tp, 5750_PLUS) || + tg3_flag(tp, 5780_CLASS) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) + bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) + bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755; + else + bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906; + + nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post); + host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1); + + val = min(nic_rep_thresh, host_rep_thresh); + tw32(RCVBDI_STD_THRESH, val); + + if (tg3_flag(tp, 57765_PLUS)) + tw32(STD_REPLENISH_LWM, bdcache_maxcnt); + + if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) + return; + + if (!tg3_flag(tp, 5705_PLUS)) + bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700; + else + bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717; + + host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1); + + val = min(bdcache_maxcnt / 2, host_rep_thresh); + tw32(RCVBDI_JUMBO_THRESH, val); + + if (tg3_flag(tp, 57765_PLUS)) + tw32(JMB_REPLENISH_LWM, bdcache_maxcnt); +} + +/* tp->lock is held. */ +static int tg3_reset_hw(struct tg3 *tp, int reset_phy) +{ + u32 val, rdmac_mode; + int i, err, limit; + struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; + + tg3_disable_ints(tp); + + tg3_stop_fw(tp); + + tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); + + if (tg3_flag(tp, INIT_COMPLETE)) + tg3_abort_hw(tp, 1); + + /* Enable MAC control of LPI */ + if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) { + tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, + TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | + TG3_CPMU_EEE_LNKIDL_UART_IDL); + + tw32_f(TG3_CPMU_EEE_CTRL, + TG3_CPMU_EEE_CTRL_EXIT_20_1_US); + + val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET | + TG3_CPMU_EEEMD_LPI_IN_TX | + TG3_CPMU_EEEMD_LPI_IN_RX | + TG3_CPMU_EEEMD_EEE_ENABLE; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) + val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; + + if (tg3_flag(tp, ENABLE_APE)) + val |= TG3_CPMU_EEEMD_APE_TX_DET_EN; + + tw32_f(TG3_CPMU_EEE_MODE, val); + + tw32_f(TG3_CPMU_EEE_DBTMR1, + TG3_CPMU_DBTMR1_PCIEXIT_2047US | + TG3_CPMU_DBTMR1_LNKIDLE_2047US); + + tw32_f(TG3_CPMU_EEE_DBTMR2, + TG3_CPMU_DBTMR2_APE_TX_2047US | + TG3_CPMU_DBTMR2_TXIDXEQ_2047US); + } + + if (reset_phy) + tg3_phy_reset(tp); + + err = tg3_chip_reset(tp); + if (err) + return err; + + tg3_write_sig_legacy(tp, RESET_KIND_INIT); + + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) { + val = tr32(TG3_CPMU_CTRL); + val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE); + tw32(TG3_CPMU_CTRL, val); + + val = tr32(TG3_CPMU_LSPD_10MB_CLK); + val &= ~CPMU_LSPD_10MB_MACCLK_MASK; + val |= CPMU_LSPD_10MB_MACCLK_6_25; + tw32(TG3_CPMU_LSPD_10MB_CLK, val); + + val = tr32(TG3_CPMU_LNK_AWARE_PWRMD); + val &= ~CPMU_LNK_AWARE_MACCLK_MASK; + val |= CPMU_LNK_AWARE_MACCLK_6_25; + tw32(TG3_CPMU_LNK_AWARE_PWRMD, val); + + val = tr32(TG3_CPMU_HST_ACC); + val &= ~CPMU_HST_ACC_MACCLK_MASK; + val |= CPMU_HST_ACC_MACCLK_6_25; + tw32(TG3_CPMU_HST_ACC, val); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { + val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK; + val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN | + PCIE_PWR_MGMT_L1_THRESH_4MS; + tw32(PCIE_PWR_MGMT_THRESH, val); + + val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK; + tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS); + + tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR); + + val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; + tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); + } + + if (tg3_flag(tp, L1PLLPD_EN)) { + u32 grc_mode = tr32(GRC_MODE); + + /* Access the lower 1K of PL PCIE block registers. */ + val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; + tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); + + val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1); + tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1, + val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN); + + tw32(GRC_MODE, grc_mode); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { + if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { + u32 grc_mode = tr32(GRC_MODE); + + /* Access the lower 1K of PL PCIE block registers. */ + val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; + tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); + + val = tr32(TG3_PCIE_TLDLPL_PORT + + TG3_PCIE_PL_LO_PHYCTL5); + tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5, + val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ); + + tw32(GRC_MODE, grc_mode); + } + + if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) { + u32 grc_mode = tr32(GRC_MODE); + + /* Access the lower 1K of DL PCIE block registers. */ + val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; + tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL); + + val = tr32(TG3_PCIE_TLDLPL_PORT + + TG3_PCIE_DL_LO_FTSMAX); + val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK; + tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX, + val | TG3_PCIE_DL_LO_FTSMAX_VAL); + + tw32(GRC_MODE, grc_mode); + } + + val = tr32(TG3_CPMU_LSPD_10MB_CLK); + val &= ~CPMU_LSPD_10MB_MACCLK_MASK; + val |= CPMU_LSPD_10MB_MACCLK_6_25; + tw32(TG3_CPMU_LSPD_10MB_CLK, val); + } + + /* This works around an issue with Athlon chipsets on + * B3 tigon3 silicon. This bit has no effect on any + * other revision. But do not set this on PCI Express + * chips and don't even touch the clocks if the CPMU is present. + */ + if (!tg3_flag(tp, CPMU_PRESENT)) { + if (!tg3_flag(tp, PCI_EXPRESS)) + tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; + tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); + } + + if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && + tg3_flag(tp, PCIX_MODE)) { + val = tr32(TG3PCI_PCISTATE); + val |= PCISTATE_RETRY_SAME_DMA; + tw32(TG3PCI_PCISTATE, val); + } + + if (tg3_flag(tp, ENABLE_APE)) { + /* Allow reads and writes to the + * APE register and memory space. + */ + val = tr32(TG3PCI_PCISTATE); + val |= PCISTATE_ALLOW_APE_CTLSPC_WR | + PCISTATE_ALLOW_APE_SHMEM_WR | + PCISTATE_ALLOW_APE_PSPACE_WR; + tw32(TG3PCI_PCISTATE, val); + } + + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) { + /* Enable some hw fixes. */ + val = tr32(TG3PCI_MSI_DATA); + val |= (1 << 26) | (1 << 28) | (1 << 29); + tw32(TG3PCI_MSI_DATA, val); + } + + /* Descriptor ring init may make accesses to the + * NIC SRAM area to setup the TX descriptors, so we + * can only do this after the hardware has been + * successfully reset. + */ + err = tg3_init_rings(tp); + if (err) + return err; + + if (tg3_flag(tp, 57765_PLUS)) { + val = tr32(TG3PCI_DMA_RW_CTRL) & + ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; + if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) + val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK; + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) + val |= DMA_RWCTRL_TAGGED_STAT_WA; + tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) { + /* This value is determined during the probe time DMA + * engine test, tg3_test_dma. + */ + tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); + } + + tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS | + GRC_MODE_4X_NIC_SEND_RINGS | + GRC_MODE_NO_TX_PHDR_CSUM | + GRC_MODE_NO_RX_PHDR_CSUM); + tp->grc_mode |= GRC_MODE_HOST_SENDBDS; + + /* Pseudo-header checksum is done by hardware logic and not + * the offload processers, so make the chip do the pseudo- + * header checksums on receive. For transmit it is more + * convenient to do the pseudo-header checksum in software + * as Linux does that on transmit for us in all cases. + */ + tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM; + + tw32(GRC_MODE, + tp->grc_mode | + (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP)); + + /* Setup the timer prescalar register. Clock is always 66Mhz. */ + val = tr32(GRC_MISC_CFG); + val &= ~0xff; + val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT); + tw32(GRC_MISC_CFG, val); + + /* Initialize MBUF/DESC pool. */ + if (tg3_flag(tp, 5750_PLUS)) { + /* Do nothing. */ + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) { + tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) + tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64); + else + tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); + tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); + tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); + } else if (tg3_flag(tp, TSO_CAPABLE)) { + int fw_len; + + fw_len = tp->fw_len; + fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1); + tw32(BUFMGR_MB_POOL_ADDR, + NIC_SRAM_MBUF_POOL_BASE5705 + fw_len); + tw32(BUFMGR_MB_POOL_SIZE, + NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00); + } + + if (tp->dev->mtu <= ETH_DATA_LEN) { + tw32(BUFMGR_MB_RDMA_LOW_WATER, + tp->bufmgr_config.mbuf_read_dma_low_water); + tw32(BUFMGR_MB_MACRX_LOW_WATER, + tp->bufmgr_config.mbuf_mac_rx_low_water); + tw32(BUFMGR_MB_HIGH_WATER, + tp->bufmgr_config.mbuf_high_water); + } else { + tw32(BUFMGR_MB_RDMA_LOW_WATER, + tp->bufmgr_config.mbuf_read_dma_low_water_jumbo); + tw32(BUFMGR_MB_MACRX_LOW_WATER, + tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo); + tw32(BUFMGR_MB_HIGH_WATER, + tp->bufmgr_config.mbuf_high_water_jumbo); + } + tw32(BUFMGR_DMA_LOW_WATER, + tp->bufmgr_config.dma_low_water); + tw32(BUFMGR_DMA_HIGH_WATER, + tp->bufmgr_config.dma_high_water); + + val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + val |= BUFMGR_MODE_NO_TX_UNDERRUN; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) + val |= BUFMGR_MODE_MBLOW_ATTN_ENAB; + tw32(BUFMGR_MODE, val); + for (i = 0; i < 2000; i++) { + if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE) + break; + udelay(10); + } + if (i >= 2000) { + netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__); + return -ENODEV; + } + + if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1) + tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2); + + tg3_setup_rxbd_thresholds(tp); + + /* Initialize TG3_BDINFO's at: + * RCVDBDI_STD_BD: standard eth size rx ring + * RCVDBDI_JUMBO_BD: jumbo frame rx ring + * RCVDBDI_MINI_BD: small frame rx ring (??? does not work) + * + * like so: + * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring + * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) | + * ring attribute flags + * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM + * + * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries. + * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries. + * + * The size of each ring is fixed in the firmware, but the location is + * configurable. + */ + tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, + ((u64) tpr->rx_std_mapping >> 32)); + tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, + ((u64) tpr->rx_std_mapping & 0xffffffff)); + if (!tg3_flag(tp, 5717_PLUS)) + tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, + NIC_SRAM_RX_BUFFER_DESC); + + /* Disable the mini ring */ + if (!tg3_flag(tp, 5705_PLUS)) + tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, + BDINFO_FLAGS_DISABLED); + + /* Program the jumbo buffer descriptor ring control + * blocks on those devices that have them. + */ + if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 || + (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) { + + if (tg3_flag(tp, JUMBO_RING_ENABLE)) { + tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, + ((u64) tpr->rx_jmb_mapping >> 32)); + tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, + ((u64) tpr->rx_jmb_mapping & 0xffffffff)); + val = TG3_RX_JMB_RING_SIZE(tp) << + BDINFO_FLAGS_MAXLEN_SHIFT; + tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, + val | BDINFO_FLAGS_USE_EXT_RECV); + if (!tg3_flag(tp, USE_JUMBO_BDFLAG) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, + NIC_SRAM_RX_JUMBO_BUFFER_DESC); + } else { + tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, + BDINFO_FLAGS_DISABLED); + } + + if (tg3_flag(tp, 57765_PLUS)) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + val = TG3_RX_STD_MAX_SIZE_5700; + else + val = TG3_RX_STD_MAX_SIZE_5717; + val <<= BDINFO_FLAGS_MAXLEN_SHIFT; + val |= (TG3_RX_STD_DMA_SZ << 2); + } else + val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT; + } else + val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT; + + tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); + + tpr->rx_std_prod_idx = tp->rx_pending; + tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); + + tpr->rx_jmb_prod_idx = + tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0; + tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); + + tg3_rings_reset(tp); + + /* Initialize MAC address and backoff seed. */ + __tg3_set_mac_addr(tp, 0); + + /* MTU + ethernet header + FCS + optional VLAN tag */ + tw32(MAC_RX_MTU_SIZE, + tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); + + /* The slot time is changed by tg3_setup_phy if we + * run at gigabit with half duplex. + */ + val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | + (6 << TX_LENGTHS_IPG_SHIFT) | + (32 << TX_LENGTHS_SLOT_TIME_SHIFT); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + val |= tr32(MAC_TX_LENGTHS) & + (TX_LENGTHS_JMB_FRM_LEN_MSK | + TX_LENGTHS_CNT_DWN_VAL_MSK); + + tw32(MAC_TX_LENGTHS, val); + + /* Receive rules. */ + tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS); + tw32(RCVLPC_CONFIG, 0x0181); + + /* Calculate RDMAC_MODE setting early, we need it to determine + * the RCVLPC_STATE_ENABLE mask. + */ + rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB | + RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB | + RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB | + RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | + RDMAC_MODE_LNGREAD_ENAB); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) + rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) + rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB | + RDMAC_MODE_MBUF_RBD_CRPT_ENAB | + RDMAC_MODE_MBUF_SBD_CRPT_ENAB; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && + tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { + if (tg3_flag(tp, TSO_CAPABLE) && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; + } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && + !tg3_flag(tp, IS_5788)) { + rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; + } + } + + if (tg3_flag(tp, PCI_EXPRESS)) + rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; + + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) + rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; + + if (tg3_flag(tp, 57765_PLUS) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) + rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || + tg3_flag(tp, 57765_PLUS)) { + val = tr32(TG3_RDMA_RSRVCTRL_REG); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK | + TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK | + TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK); + val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B | + TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K | + TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K; + } + tw32(TG3_RDMA_RSRVCTRL_REG, + val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); + tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val | + TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K | + TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K); + } + + /* Receive/send statistics. */ + if (tg3_flag(tp, 5750_PLUS)) { + val = tr32(RCVLPC_STATS_ENABLE); + val &= ~RCVLPC_STATSENAB_DACK_FIX; + tw32(RCVLPC_STATS_ENABLE, val); + } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && + tg3_flag(tp, TSO_CAPABLE)) { + val = tr32(RCVLPC_STATS_ENABLE); + val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; + tw32(RCVLPC_STATS_ENABLE, val); + } else { + tw32(RCVLPC_STATS_ENABLE, 0xffffff); + } + tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE); + tw32(SNDDATAI_STATSENAB, 0xffffff); + tw32(SNDDATAI_STATSCTRL, + (SNDDATAI_SCTRL_ENABLE | + SNDDATAI_SCTRL_FASTUPD)); + + /* Setup host coalescing engine. */ + tw32(HOSTCC_MODE, 0); + for (i = 0; i < 2000; i++) { + if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE)) + break; + udelay(10); + } + + __tg3_set_coalesce(tp, &tp->coal); + + if (!tg3_flag(tp, 5705_PLUS)) { + /* Status/statistics block address. See tg3_timer, + * the tg3_periodic_fetch_stats call there, and + * tg3_get_stats to see how this works for 5705/5750 chips. + */ + tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, + ((u64) tp->stats_mapping >> 32)); + tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, + ((u64) tp->stats_mapping & 0xffffffff)); + tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK); + + tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK); + + /* Clear statistics and status block memory areas */ + for (i = NIC_SRAM_STATS_BLK; + i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE; + i += sizeof(u32)) { + tg3_write_mem(tp, i, 0); + udelay(40); + } + } + + tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode); + + tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE); + tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE); + if (!tg3_flag(tp, 5705_PLUS)) + tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); + + if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + /* reset to prevent losing 1st rx packet intermittently */ + tw32_f(MAC_RX_MODE, RX_MODE_RESET); + udelay(10); + } + + tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | + MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | + MAC_MODE_FHDE_ENABLE; + if (tg3_flag(tp, ENABLE_APE)) + tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; + if (!tg3_flag(tp, 5705_PLUS) && + !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) + tp->mac_mode |= MAC_MODE_LINK_POLARITY; + tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); + udelay(40); + + /* tp->grc_local_ctrl is partially set up during tg3_get_invariants(). + * If TG3_FLAG_IS_NIC is zero, we should read the + * register to preserve the GPIO settings for LOMs. The GPIOs, + * whether used as inputs or outputs, are set by boot code after + * reset. + */ + if (!tg3_flag(tp, IS_NIC)) { + u32 gpio_mask; + + gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 | + GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) + gpio_mask |= GRC_LCLCTRL_GPIO_OE3 | + GRC_LCLCTRL_GPIO_OUTPUT3; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) + gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL; + + tp->grc_local_ctrl &= ~gpio_mask; + tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; + + /* GPIO1 must be driven high for eeprom write protect */ + if (tg3_flag(tp, EEPROM_WRITE_PROT)) + tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OUTPUT1); + } + tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); + udelay(100); + + if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) { + val = tr32(MSGINT_MODE); + val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE; + tw32(MSGINT_MODE, val); + } + + if (!tg3_flag(tp, 5705_PLUS)) { + tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); + udelay(40); + } + + val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB | + WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB | + WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB | + WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | + WDMAC_MODE_LNGREAD_ENAB); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && + tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { + if (tg3_flag(tp, TSO_CAPABLE) && + (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 || + tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) { + /* nothing */ + } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && + !tg3_flag(tp, IS_5788)) { + val |= WDMAC_MODE_RX_ACCEL; + } + } + + /* Enable host coalescing bug fix */ + if (tg3_flag(tp, 5755_PLUS)) + val |= WDMAC_MODE_STATUS_TAG_FIX; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + val |= WDMAC_MODE_BURST_ALL_DATA; + + tw32_f(WDMAC_MODE, val); + udelay(40); + + if (tg3_flag(tp, PCIX_MODE)) { + u16 pcix_cmd; + + pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, + &pcix_cmd); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) { + pcix_cmd &= ~PCI_X_CMD_MAX_READ; + pcix_cmd |= PCI_X_CMD_READ_2K; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { + pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ); + pcix_cmd |= PCI_X_CMD_READ_2K; + } + pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, + pcix_cmd); + } + + tw32_f(RDMAC_MODE, rdmac_mode); + udelay(40); + + tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); + if (!tg3_flag(tp, 5705_PLUS)) + tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + tw32(SNDDATAC_MODE, + SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY); + else + tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE); + + tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); + tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); + val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ; + if (tg3_flag(tp, LRG_PROD_RING_CAP)) + val |= RCVDBDI_MODE_LRG_RING_SZ; + tw32(RCVDBDI_MODE, val); + tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) + tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); + val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE; + if (tg3_flag(tp, ENABLE_TSS)) + val |= SNDBDI_MODE_MULTI_TXQ_EN; + tw32(SNDBDI_MODE, val); + tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); + + if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) { + err = tg3_load_5701_a0_firmware_fix(tp); + if (err) + return err; + } + + if (tg3_flag(tp, TSO_CAPABLE)) { + err = tg3_load_tso_firmware(tp); + if (err) + return err; + } + + tp->tx_mode = TX_MODE_ENABLE; + + if (tg3_flag(tp, 5755_PLUS) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE; + tp->tx_mode &= ~val; + tp->tx_mode |= tr32(MAC_TX_MODE) & val; + } + + tw32_f(MAC_TX_MODE, tp->tx_mode); + udelay(100); + + if (tg3_flag(tp, ENABLE_RSS)) { + int i = 0; + u32 reg = MAC_RSS_INDIR_TBL_0; + + if (tp->irq_cnt == 2) { + for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) { + tw32(reg, 0x0); + reg += 4; + } + } else { + u32 val; + + while (i < TG3_RSS_INDIR_TBL_SIZE) { + val = i % (tp->irq_cnt - 1); + i++; + for (; i % 8; i++) { + val <<= 4; + val |= (i % (tp->irq_cnt - 1)); + } + tw32(reg, val); + reg += 4; + } + } + + /* Setup the "secret" hash key. */ + tw32(MAC_RSS_HASH_KEY_0, 0x5f865437); + tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc); + tw32(MAC_RSS_HASH_KEY_2, 0x50103a45); + tw32(MAC_RSS_HASH_KEY_3, 0x36621985); + tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8); + tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e); + tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556); + tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe); + tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7); + tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481); + } + + tp->rx_mode = RX_MODE_ENABLE; + if (tg3_flag(tp, 5755_PLUS)) + tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; + + if (tg3_flag(tp, ENABLE_RSS)) + tp->rx_mode |= RX_MODE_RSS_ENABLE | + RX_MODE_RSS_ITBL_HASH_BITS_7 | + RX_MODE_RSS_IPV6_HASH_EN | + RX_MODE_RSS_TCP_IPV6_HASH_EN | + RX_MODE_RSS_IPV4_HASH_EN | + RX_MODE_RSS_TCP_IPV4_HASH_EN; + + tw32_f(MAC_RX_MODE, tp->rx_mode); + udelay(10); + + tw32(MAC_LED_CTRL, tp->led_ctrl); + + tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { + tw32_f(MAC_RX_MODE, RX_MODE_RESET); + udelay(10); + } + tw32_f(MAC_RX_MODE, tp->rx_mode); + udelay(10); + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { + if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) && + !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) { + /* Set drive transmission level to 1.2V */ + /* only if the signal pre-emphasis bit is not set */ + val = tr32(MAC_SERDES_CFG); + val &= 0xfffff000; + val |= 0x880; + tw32(MAC_SERDES_CFG, val); + } + if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) + tw32(MAC_SERDES_CFG, 0x616000); + } + + /* Prevent chip from dropping frames when flow control + * is enabled. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + val = 1; + else + val = 2; + tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && + (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { + /* Use hardware link auto-negotiation */ + tg3_flag_set(tp, HW_AUTONEG); + } + + if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { + u32 tmp; + + tmp = tr32(SERDES_RX_CTRL); + tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT); + tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT; + tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT; + tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); + } + + if (!tg3_flag(tp, USE_PHYLIB)) { + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { + tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; + tp->link_config.speed = tp->link_config.orig_speed; + tp->link_config.duplex = tp->link_config.orig_duplex; + tp->link_config.autoneg = tp->link_config.orig_autoneg; + } + + err = tg3_setup_phy(tp, 0); + if (err) + return err; + + if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && + !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { + u32 tmp; + + /* Clear CRC stats. */ + if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) { + tg3_writephy(tp, MII_TG3_TEST1, + tmp | MII_TG3_TEST1_CRC_EN); + tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp); + } + } + } + + __tg3_set_rx_mode(tp->dev); + + /* Initialize receive rules. */ + tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK); + tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK); + tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK); + tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); + + if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) + limit = 8; + else + limit = 16; + if (tg3_flag(tp, ENABLE_ASF)) + limit -= 4; + switch (limit) { + case 16: + tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0); + case 15: + tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0); + case 14: + tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0); + case 13: + tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0); + case 12: + tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0); + case 11: + tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0); + case 10: + tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0); + case 9: + tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0); + case 8: + tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0); + case 7: + tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0); + case 6: + tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0); + case 5: + tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0); + case 4: + /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */ + case 3: + /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */ + case 2: + case 1: + + default: + break; + } + + if (tg3_flag(tp, ENABLE_APE)) + /* Write our heartbeat update interval to APE. */ + tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, + APE_HOST_HEARTBEAT_INT_DISABLE); + + tg3_write_sig_post_reset(tp, RESET_KIND_INIT); + + return 0; +} + +/* Called at device open time to get the chip ready for + * packet processing. Invoked with tp->lock held. + */ +static int tg3_init_hw(struct tg3 *tp, int reset_phy) +{ + tg3_switch_clocks(tp); + + tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); + + return tg3_reset_hw(tp, reset_phy); +} + +#define TG3_STAT_ADD32(PSTAT, REG) \ +do { u32 __val = tr32(REG); \ + (PSTAT)->low += __val; \ + if ((PSTAT)->low < __val) \ + (PSTAT)->high += 1; \ +} while (0) + +static void tg3_periodic_fetch_stats(struct tg3 *tp) +{ + struct tg3_hw_stats *sp = tp->hw_stats; + + if (!netif_carrier_ok(tp->dev)) + return; + + TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS); + TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS); + TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT); + TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT); + TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS); + TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS); + TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS); + TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED); + TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL); + TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL); + TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST); + TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST); + TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST); + + TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS); + TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS); + TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST); + TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST); + TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST); + TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS); + TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS); + TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD); + TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD); + TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD); + TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED); + TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG); + TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS); + TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); + + TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && + tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 && + tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) { + TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); + } else { + u32 val = tr32(HOSTCC_FLOW_ATTN); + val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0; + if (val) { + tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM); + sp->rx_discards.low += val; + if (sp->rx_discards.low < val) + sp->rx_discards.high += 1; + } + sp->mbuf_lwm_thresh_hit = sp->rx_discards; + } + TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT); +} + +static void tg3_chk_missed_msi(struct tg3 *tp) +{ + u32 i; + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + if (tg3_has_work(tnapi)) { + if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr && + tnapi->last_tx_cons == tnapi->tx_cons) { + if (tnapi->chk_msi_cnt < 1) { + tnapi->chk_msi_cnt++; + return; + } + tw32_mailbox(tnapi->int_mbox, + tnapi->last_tag << 24); + } + } + tnapi->chk_msi_cnt = 0; + tnapi->last_rx_cons = tnapi->rx_rcb_ptr; + tnapi->last_tx_cons = tnapi->tx_cons; + } +} + +static void tg3_timer(unsigned long __opaque) +{ + struct tg3 *tp = (struct tg3 *) __opaque; + + if (tp->irq_sync) + goto restart_timer; + + spin_lock(&tp->lock); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tg3_chk_missed_msi(tp); + + if (!tg3_flag(tp, TAGGED_STATUS)) { + /* All of this garbage is because when using non-tagged + * IRQ status the mailbox/status_block protocol the chip + * uses with the cpu is race prone. + */ + if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) { + tw32(GRC_LOCAL_CTRL, + tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); + } else { + tw32(HOSTCC_MODE, tp->coalesce_mode | + HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW); + } + + if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { + tg3_flag_set(tp, RESTART_TIMER); + spin_unlock(&tp->lock); + schedule_work(&tp->reset_task); + return; + } + } + + /* This part only runs once per second. */ + if (!--tp->timer_counter) { + if (tg3_flag(tp, 5705_PLUS)) + tg3_periodic_fetch_stats(tp); + + if (tp->setlpicnt && !--tp->setlpicnt) + tg3_phy_eee_enable(tp); + + if (tg3_flag(tp, USE_LINKCHG_REG)) { + u32 mac_stat; + int phy_event; + + mac_stat = tr32(MAC_STATUS); + + phy_event = 0; + if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) { + if (mac_stat & MAC_STATUS_MI_INTERRUPT) + phy_event = 1; + } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED) + phy_event = 1; + + if (phy_event) + tg3_setup_phy(tp, 0); + } else if (tg3_flag(tp, POLL_SERDES)) { + u32 mac_stat = tr32(MAC_STATUS); + int need_setup = 0; + + if (netif_carrier_ok(tp->dev) && + (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) { + need_setup = 1; + } + if (!netif_carrier_ok(tp->dev) && + (mac_stat & (MAC_STATUS_PCS_SYNCED | + MAC_STATUS_SIGNAL_DET))) { + need_setup = 1; + } + if (need_setup) { + if (!tp->serdes_counter) { + tw32_f(MAC_MODE, + (tp->mac_mode & + ~MAC_MODE_PORT_MODE_MASK)); + udelay(40); + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + } + tg3_setup_phy(tp, 0); + } + } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && + tg3_flag(tp, 5780_CLASS)) { + tg3_serdes_parallel_detect(tp); + } + + tp->timer_counter = tp->timer_multiplier; + } + + /* Heartbeat is only sent once every 2 seconds. + * + * The heartbeat is to tell the ASF firmware that the host + * driver is still alive. In the event that the OS crashes, + * ASF needs to reset the hardware to free up the FIFO space + * that may be filled with rx packets destined for the host. + * If the FIFO is full, ASF will no longer function properly. + * + * Unintended resets have been reported on real time kernels + * where the timer doesn't run on time. Netpoll will also have + * same problem. + * + * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware + * to check the ring condition when the heartbeat is expiring + * before doing the reset. This will prevent most unintended + * resets. + */ + if (!--tp->asf_counter) { + if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { + tg3_wait_for_event_ack(tp); + + tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, + FWCMD_NICDRV_ALIVE3); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, + TG3_FW_UPDATE_TIMEOUT_SEC); + + tg3_generate_fw_event(tp); + } + tp->asf_counter = tp->asf_multiplier; + } + + spin_unlock(&tp->lock); + +restart_timer: + tp->timer.expires = jiffies + tp->timer_offset; + add_timer(&tp->timer); +} + +static int tg3_request_irq(struct tg3 *tp, int irq_num) +{ + irq_handler_t fn; + unsigned long flags; + char *name; + struct tg3_napi *tnapi = &tp->napi[irq_num]; + + if (tp->irq_cnt == 1) + name = tp->dev->name; + else { + name = &tnapi->irq_lbl[0]; + snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num); + name[IFNAMSIZ-1] = 0; + } + + if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { + fn = tg3_msi; + if (tg3_flag(tp, 1SHOT_MSI)) + fn = tg3_msi_1shot; + flags = 0; + } else { + fn = tg3_interrupt; + if (tg3_flag(tp, TAGGED_STATUS)) + fn = tg3_interrupt_tagged; + flags = IRQF_SHARED; + } + + return request_irq(tnapi->irq_vec, fn, flags, name, tnapi); +} + +static int tg3_test_interrupt(struct tg3 *tp) +{ + struct tg3_napi *tnapi = &tp->napi[0]; + struct net_device *dev = tp->dev; + int err, i, intr_ok = 0; + u32 val; + + if (!netif_running(dev)) + return -ENODEV; + + tg3_disable_ints(tp); + + free_irq(tnapi->irq_vec, tnapi); + + /* + * Turn off MSI one shot mode. Otherwise this test has no + * observable way to know whether the interrupt was delivered. + */ + if (tg3_flag(tp, 57765_PLUS)) { + val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; + tw32(MSGINT_MODE, val); + } + + err = request_irq(tnapi->irq_vec, tg3_test_isr, + IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi); + if (err) + return err; + + tnapi->hw_status->status &= ~SD_STATUS_UPDATED; + tg3_enable_ints(tp); + + tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | + tnapi->coal_now); + + for (i = 0; i < 5; i++) { + u32 int_mbox, misc_host_ctrl; + + int_mbox = tr32_mailbox(tnapi->int_mbox); + misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); + + if ((int_mbox != 0) || + (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) { + intr_ok = 1; + break; + } + + if (tg3_flag(tp, 57765_PLUS) && + tnapi->hw_status->status_tag != tnapi->last_tag) + tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); + + msleep(10); + } + + tg3_disable_ints(tp); + + free_irq(tnapi->irq_vec, tnapi); + + err = tg3_request_irq(tp, 0); + + if (err) + return err; + + if (intr_ok) { + /* Reenable MSI one shot mode. */ + if (tg3_flag(tp, 57765_PLUS)) { + val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; + tw32(MSGINT_MODE, val); + } + return 0; + } + + return -EIO; +} + +/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is + * successfully restored + */ +static int tg3_test_msi(struct tg3 *tp) +{ + int err; + u16 pci_cmd; + + if (!tg3_flag(tp, USING_MSI)) + return 0; + + /* Turn off SERR reporting in case MSI terminates with Master + * Abort. + */ + pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); + pci_write_config_word(tp->pdev, PCI_COMMAND, + pci_cmd & ~PCI_COMMAND_SERR); + + err = tg3_test_interrupt(tp); + + pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); + + if (!err) + return 0; + + /* other failures */ + if (err != -EIO) + return err; + + /* MSI test failed, go back to INTx mode */ + netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching " + "to INTx mode. Please report this failure to the PCI " + "maintainer and include system chipset information\n"); + + free_irq(tp->napi[0].irq_vec, &tp->napi[0]); + + pci_disable_msi(tp->pdev); + + tg3_flag_clear(tp, USING_MSI); + tp->napi[0].irq_vec = tp->pdev->irq; + + err = tg3_request_irq(tp, 0); + if (err) + return err; + + /* Need to reset the chip because the MSI cycle may have terminated + * with Master Abort. + */ + tg3_full_lock(tp, 1); + + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + err = tg3_init_hw(tp, 1); + + tg3_full_unlock(tp); + + if (err) + free_irq(tp->napi[0].irq_vec, &tp->napi[0]); + + return err; +} + +static int tg3_request_firmware(struct tg3 *tp) +{ + const __be32 *fw_data; + + if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) { + netdev_err(tp->dev, "Failed to load firmware \"%s\"\n", + tp->fw_needed); + return -ENOENT; + } + + fw_data = (void *)tp->fw->data; + + /* Firmware blob starts with version numbers, followed by + * start address and _full_ length including BSS sections + * (which must be longer than the actual data, of course + */ + + tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */ + if (tp->fw_len < (tp->fw->size - 12)) { + netdev_err(tp->dev, "bogus length %d in \"%s\"\n", + tp->fw_len, tp->fw_needed); + release_firmware(tp->fw); + tp->fw = NULL; + return -EINVAL; + } + + /* We no longer need firmware; we have it. */ + tp->fw_needed = NULL; + return 0; +} + +static bool tg3_enable_msix(struct tg3 *tp) +{ + int i, rc, cpus = num_online_cpus(); + struct msix_entry msix_ent[tp->irq_max]; + + if (cpus == 1) + /* Just fallback to the simpler MSI mode. */ + return false; + + /* + * We want as many rx rings enabled as there are cpus. + * The first MSIX vector only deals with link interrupts, etc, + * so we add one to the number of vectors we are requesting. + */ + tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max); + + for (i = 0; i < tp->irq_max; i++) { + msix_ent[i].entry = i; + msix_ent[i].vector = 0; + } + + rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt); + if (rc < 0) { + return false; + } else if (rc != 0) { + if (pci_enable_msix(tp->pdev, msix_ent, rc)) + return false; + netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n", + tp->irq_cnt, rc); + tp->irq_cnt = rc; + } + + for (i = 0; i < tp->irq_max; i++) + tp->napi[i].irq_vec = msix_ent[i].vector; + + netif_set_real_num_tx_queues(tp->dev, 1); + rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1; + if (netif_set_real_num_rx_queues(tp->dev, rc)) { + pci_disable_msix(tp->pdev); + return false; + } + + if (tp->irq_cnt > 1) { + tg3_flag_set(tp, ENABLE_RSS); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + tg3_flag_set(tp, ENABLE_TSS); + netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1); + } + } + + return true; +} + +static void tg3_ints_init(struct tg3 *tp) +{ + if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) && + !tg3_flag(tp, TAGGED_STATUS)) { + /* All MSI supporting chips should support tagged + * status. Assert that this is the case. + */ + netdev_warn(tp->dev, + "MSI without TAGGED_STATUS? Not using MSI\n"); + goto defcfg; + } + + if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp)) + tg3_flag_set(tp, USING_MSIX); + else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0) + tg3_flag_set(tp, USING_MSI); + + if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { + u32 msi_mode = tr32(MSGINT_MODE); + if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) + msi_mode |= MSGINT_MODE_MULTIVEC_EN; + tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); + } +defcfg: + if (!tg3_flag(tp, USING_MSIX)) { + tp->irq_cnt = 1; + tp->napi[0].irq_vec = tp->pdev->irq; + netif_set_real_num_tx_queues(tp->dev, 1); + netif_set_real_num_rx_queues(tp->dev, 1); + } +} + +static void tg3_ints_fini(struct tg3 *tp) +{ + if (tg3_flag(tp, USING_MSIX)) + pci_disable_msix(tp->pdev); + else if (tg3_flag(tp, USING_MSI)) + pci_disable_msi(tp->pdev); + tg3_flag_clear(tp, USING_MSI); + tg3_flag_clear(tp, USING_MSIX); + tg3_flag_clear(tp, ENABLE_RSS); + tg3_flag_clear(tp, ENABLE_TSS); +} + +static int tg3_open(struct net_device *dev) +{ + struct tg3 *tp = netdev_priv(dev); + int i, err; + + if (tp->fw_needed) { + err = tg3_request_firmware(tp); + if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) { + if (err) + return err; + } else if (err) { + netdev_warn(tp->dev, "TSO capability disabled\n"); + tg3_flag_clear(tp, TSO_CAPABLE); + } else if (!tg3_flag(tp, TSO_CAPABLE)) { + netdev_notice(tp->dev, "TSO capability restored\n"); + tg3_flag_set(tp, TSO_CAPABLE); + } + } + + netif_carrier_off(tp->dev); + + err = tg3_power_up(tp); + if (err) + return err; + + tg3_full_lock(tp, 0); + + tg3_disable_ints(tp); + tg3_flag_clear(tp, INIT_COMPLETE); + + tg3_full_unlock(tp); + + /* + * Setup interrupts first so we know how + * many NAPI resources to allocate + */ + tg3_ints_init(tp); + + /* The placement of this call is tied + * to the setup and use of Host TX descriptors. + */ + err = tg3_alloc_consistent(tp); + if (err) + goto err_out1; + + tg3_napi_init(tp); + + tg3_napi_enable(tp); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + err = tg3_request_irq(tp, i); + if (err) { + for (i--; i >= 0; i--) + free_irq(tnapi->irq_vec, tnapi); + break; + } + } + + if (err) + goto err_out2; + + tg3_full_lock(tp, 0); + + err = tg3_init_hw(tp, 1); + if (err) { + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + tg3_free_rings(tp); + } else { + if (tg3_flag(tp, TAGGED_STATUS) && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) + tp->timer_offset = HZ; + else + tp->timer_offset = HZ / 10; + + BUG_ON(tp->timer_offset > HZ); + tp->timer_counter = tp->timer_multiplier = + (HZ / tp->timer_offset); + tp->asf_counter = tp->asf_multiplier = + ((HZ / tp->timer_offset) * 2); + + init_timer(&tp->timer); + tp->timer.expires = jiffies + tp->timer_offset; + tp->timer.data = (unsigned long) tp; + tp->timer.function = tg3_timer; + } + + tg3_full_unlock(tp); + + if (err) + goto err_out3; + + if (tg3_flag(tp, USING_MSI)) { + err = tg3_test_msi(tp); + + if (err) { + tg3_full_lock(tp, 0); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + tg3_free_rings(tp); + tg3_full_unlock(tp); + + goto err_out2; + } + + if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { + u32 val = tr32(PCIE_TRANSACTION_CFG); + + tw32(PCIE_TRANSACTION_CFG, + val | PCIE_TRANS_CFG_1SHOT_MSI); + } + } + + tg3_phy_start(tp); + + tg3_full_lock(tp, 0); + + add_timer(&tp->timer); + tg3_flag_set(tp, INIT_COMPLETE); + tg3_enable_ints(tp); + + tg3_full_unlock(tp); + + netif_tx_start_all_queues(dev); + + /* + * Reset loopback feature if it was turned on while the device was down + * make sure that it's installed properly now. + */ + if (dev->features & NETIF_F_LOOPBACK) + tg3_set_loopback(dev, dev->features); + + return 0; + +err_out3: + for (i = tp->irq_cnt - 1; i >= 0; i--) { + struct tg3_napi *tnapi = &tp->napi[i]; + free_irq(tnapi->irq_vec, tnapi); + } + +err_out2: + tg3_napi_disable(tp); + tg3_napi_fini(tp); + tg3_free_consistent(tp); + +err_out1: + tg3_ints_fini(tp); + tg3_frob_aux_power(tp, false); + pci_set_power_state(tp->pdev, PCI_D3hot); + return err; +} + +static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *, + struct rtnl_link_stats64 *); +static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *); + +static int tg3_close(struct net_device *dev) +{ + int i; + struct tg3 *tp = netdev_priv(dev); + + tg3_napi_disable(tp); + cancel_work_sync(&tp->reset_task); + + netif_tx_stop_all_queues(dev); + + del_timer_sync(&tp->timer); + + tg3_phy_stop(tp); + + tg3_full_lock(tp, 1); + + tg3_disable_ints(tp); + + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + tg3_free_rings(tp); + tg3_flag_clear(tp, INIT_COMPLETE); + + tg3_full_unlock(tp); + + for (i = tp->irq_cnt - 1; i >= 0; i--) { + struct tg3_napi *tnapi = &tp->napi[i]; + free_irq(tnapi->irq_vec, tnapi); + } + + tg3_ints_fini(tp); + + tg3_get_stats64(tp->dev, &tp->net_stats_prev); + + memcpy(&tp->estats_prev, tg3_get_estats(tp), + sizeof(tp->estats_prev)); + + tg3_napi_fini(tp); + + tg3_free_consistent(tp); + + tg3_power_down(tp); + + netif_carrier_off(tp->dev); + + return 0; +} + +static inline u64 get_stat64(tg3_stat64_t *val) +{ + return ((u64)val->high << 32) | ((u64)val->low); +} + +static u64 calc_crc_errors(struct tg3 *tp) +{ + struct tg3_hw_stats *hw_stats = tp->hw_stats; + + if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { + u32 val; + + spin_lock_bh(&tp->lock); + if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) { + tg3_writephy(tp, MII_TG3_TEST1, + val | MII_TG3_TEST1_CRC_EN); + tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val); + } else + val = 0; + spin_unlock_bh(&tp->lock); + + tp->phy_crc_errors += val; + + return tp->phy_crc_errors; + } + + return get_stat64(&hw_stats->rx_fcs_errors); +} + +#define ESTAT_ADD(member) \ + estats->member = old_estats->member + \ + get_stat64(&hw_stats->member) + +static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp) +{ + struct tg3_ethtool_stats *estats = &tp->estats; + struct tg3_ethtool_stats *old_estats = &tp->estats_prev; + struct tg3_hw_stats *hw_stats = tp->hw_stats; + + if (!hw_stats) + return old_estats; + + ESTAT_ADD(rx_octets); + ESTAT_ADD(rx_fragments); + ESTAT_ADD(rx_ucast_packets); + ESTAT_ADD(rx_mcast_packets); + ESTAT_ADD(rx_bcast_packets); + ESTAT_ADD(rx_fcs_errors); + ESTAT_ADD(rx_align_errors); + ESTAT_ADD(rx_xon_pause_rcvd); + ESTAT_ADD(rx_xoff_pause_rcvd); + ESTAT_ADD(rx_mac_ctrl_rcvd); + ESTAT_ADD(rx_xoff_entered); + ESTAT_ADD(rx_frame_too_long_errors); + ESTAT_ADD(rx_jabbers); + ESTAT_ADD(rx_undersize_packets); + ESTAT_ADD(rx_in_length_errors); + ESTAT_ADD(rx_out_length_errors); + ESTAT_ADD(rx_64_or_less_octet_packets); + ESTAT_ADD(rx_65_to_127_octet_packets); + ESTAT_ADD(rx_128_to_255_octet_packets); + ESTAT_ADD(rx_256_to_511_octet_packets); + ESTAT_ADD(rx_512_to_1023_octet_packets); + ESTAT_ADD(rx_1024_to_1522_octet_packets); + ESTAT_ADD(rx_1523_to_2047_octet_packets); + ESTAT_ADD(rx_2048_to_4095_octet_packets); + ESTAT_ADD(rx_4096_to_8191_octet_packets); + ESTAT_ADD(rx_8192_to_9022_octet_packets); + + ESTAT_ADD(tx_octets); + ESTAT_ADD(tx_collisions); + ESTAT_ADD(tx_xon_sent); + ESTAT_ADD(tx_xoff_sent); + ESTAT_ADD(tx_flow_control); + ESTAT_ADD(tx_mac_errors); + ESTAT_ADD(tx_single_collisions); + ESTAT_ADD(tx_mult_collisions); + ESTAT_ADD(tx_deferred); + ESTAT_ADD(tx_excessive_collisions); + ESTAT_ADD(tx_late_collisions); + ESTAT_ADD(tx_collide_2times); + ESTAT_ADD(tx_collide_3times); + ESTAT_ADD(tx_collide_4times); + ESTAT_ADD(tx_collide_5times); + ESTAT_ADD(tx_collide_6times); + ESTAT_ADD(tx_collide_7times); + ESTAT_ADD(tx_collide_8times); + ESTAT_ADD(tx_collide_9times); + ESTAT_ADD(tx_collide_10times); + ESTAT_ADD(tx_collide_11times); + ESTAT_ADD(tx_collide_12times); + ESTAT_ADD(tx_collide_13times); + ESTAT_ADD(tx_collide_14times); + ESTAT_ADD(tx_collide_15times); + ESTAT_ADD(tx_ucast_packets); + ESTAT_ADD(tx_mcast_packets); + ESTAT_ADD(tx_bcast_packets); + ESTAT_ADD(tx_carrier_sense_errors); + ESTAT_ADD(tx_discards); + ESTAT_ADD(tx_errors); + + ESTAT_ADD(dma_writeq_full); + ESTAT_ADD(dma_write_prioq_full); + ESTAT_ADD(rxbds_empty); + ESTAT_ADD(rx_discards); + ESTAT_ADD(rx_errors); + ESTAT_ADD(rx_threshold_hit); + + ESTAT_ADD(dma_readq_full); + ESTAT_ADD(dma_read_prioq_full); + ESTAT_ADD(tx_comp_queue_full); + + ESTAT_ADD(ring_set_send_prod_index); + ESTAT_ADD(ring_status_update); + ESTAT_ADD(nic_irqs); + ESTAT_ADD(nic_avoided_irqs); + ESTAT_ADD(nic_tx_threshold_hit); + + ESTAT_ADD(mbuf_lwm_thresh_hit); + + return estats; +} + +static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct tg3 *tp = netdev_priv(dev); + struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev; + struct tg3_hw_stats *hw_stats = tp->hw_stats; + + if (!hw_stats) + return old_stats; + + stats->rx_packets = old_stats->rx_packets + + get_stat64(&hw_stats->rx_ucast_packets) + + get_stat64(&hw_stats->rx_mcast_packets) + + get_stat64(&hw_stats->rx_bcast_packets); + + stats->tx_packets = old_stats->tx_packets + + get_stat64(&hw_stats->tx_ucast_packets) + + get_stat64(&hw_stats->tx_mcast_packets) + + get_stat64(&hw_stats->tx_bcast_packets); + + stats->rx_bytes = old_stats->rx_bytes + + get_stat64(&hw_stats->rx_octets); + stats->tx_bytes = old_stats->tx_bytes + + get_stat64(&hw_stats->tx_octets); + + stats->rx_errors = old_stats->rx_errors + + get_stat64(&hw_stats->rx_errors); + stats->tx_errors = old_stats->tx_errors + + get_stat64(&hw_stats->tx_errors) + + get_stat64(&hw_stats->tx_mac_errors) + + get_stat64(&hw_stats->tx_carrier_sense_errors) + + get_stat64(&hw_stats->tx_discards); + + stats->multicast = old_stats->multicast + + get_stat64(&hw_stats->rx_mcast_packets); + stats->collisions = old_stats->collisions + + get_stat64(&hw_stats->tx_collisions); + + stats->rx_length_errors = old_stats->rx_length_errors + + get_stat64(&hw_stats->rx_frame_too_long_errors) + + get_stat64(&hw_stats->rx_undersize_packets); + + stats->rx_over_errors = old_stats->rx_over_errors + + get_stat64(&hw_stats->rxbds_empty); + stats->rx_frame_errors = old_stats->rx_frame_errors + + get_stat64(&hw_stats->rx_align_errors); + stats->tx_aborted_errors = old_stats->tx_aborted_errors + + get_stat64(&hw_stats->tx_discards); + stats->tx_carrier_errors = old_stats->tx_carrier_errors + + get_stat64(&hw_stats->tx_carrier_sense_errors); + + stats->rx_crc_errors = old_stats->rx_crc_errors + + calc_crc_errors(tp); + + stats->rx_missed_errors = old_stats->rx_missed_errors + + get_stat64(&hw_stats->rx_discards); + + stats->rx_dropped = tp->rx_dropped; + + return stats; +} + +static inline u32 calc_crc(unsigned char *buf, int len) +{ + u32 reg; + u32 tmp; + int j, k; + + reg = 0xffffffff; + + for (j = 0; j < len; j++) { + reg ^= buf[j]; + + for (k = 0; k < 8; k++) { + tmp = reg & 0x01; + + reg >>= 1; + + if (tmp) + reg ^= 0xedb88320; + } + } + + return ~reg; +} + +static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all) +{ + /* accept or reject all multicast frames */ + tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0); + tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0); + tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0); + tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0); +} + +static void __tg3_set_rx_mode(struct net_device *dev) +{ + struct tg3 *tp = netdev_priv(dev); + u32 rx_mode; + + rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | + RX_MODE_KEEP_VLAN_TAG); + +#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE) + /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG + * flag clear. + */ + if (!tg3_flag(tp, ENABLE_ASF)) + rx_mode |= RX_MODE_KEEP_VLAN_TAG; +#endif + + if (dev->flags & IFF_PROMISC) { + /* Promiscuous mode. */ + rx_mode |= RX_MODE_PROMISC; + } else if (dev->flags & IFF_ALLMULTI) { + /* Accept all multicast. */ + tg3_set_multi(tp, 1); + } else if (netdev_mc_empty(dev)) { + /* Reject all multicast. */ + tg3_set_multi(tp, 0); + } else { + /* Accept one or more multicast(s). */ + struct netdev_hw_addr *ha; + u32 mc_filter[4] = { 0, }; + u32 regidx; + u32 bit; + u32 crc; + + netdev_for_each_mc_addr(ha, dev) { + crc = calc_crc(ha->addr, ETH_ALEN); + bit = ~crc & 0x7f; + regidx = (bit & 0x60) >> 5; + bit &= 0x1f; + mc_filter[regidx] |= (1 << bit); + } + + tw32(MAC_HASH_REG_0, mc_filter[0]); + tw32(MAC_HASH_REG_1, mc_filter[1]); + tw32(MAC_HASH_REG_2, mc_filter[2]); + tw32(MAC_HASH_REG_3, mc_filter[3]); + } + + if (rx_mode != tp->rx_mode) { + tp->rx_mode = rx_mode; + tw32_f(MAC_RX_MODE, rx_mode); + udelay(10); + } +} + +static void tg3_set_rx_mode(struct net_device *dev) +{ + struct tg3 *tp = netdev_priv(dev); + + if (!netif_running(dev)) + return; + + tg3_full_lock(tp, 0); + __tg3_set_rx_mode(dev); + tg3_full_unlock(tp); +} + +static int tg3_get_regs_len(struct net_device *dev) +{ + return TG3_REG_BLK_SIZE; +} + +static void tg3_get_regs(struct net_device *dev, + struct ethtool_regs *regs, void *_p) +{ + struct tg3 *tp = netdev_priv(dev); + + regs->version = 0; + + memset(_p, 0, TG3_REG_BLK_SIZE); + + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) + return; + + tg3_full_lock(tp, 0); + + tg3_dump_legacy_regs(tp, (u32 *)_p); + + tg3_full_unlock(tp); +} + +static int tg3_get_eeprom_len(struct net_device *dev) +{ + struct tg3 *tp = netdev_priv(dev); + + return tp->nvram_size; +} + +static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) +{ + struct tg3 *tp = netdev_priv(dev); + int ret; + u8 *pd; + u32 i, offset, len, b_offset, b_count; + __be32 val; + + if (tg3_flag(tp, NO_NVRAM)) + return -EINVAL; + + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) + return -EAGAIN; + + offset = eeprom->offset; + len = eeprom->len; + eeprom->len = 0; + + eeprom->magic = TG3_EEPROM_MAGIC; + + if (offset & 3) { + /* adjustments to start on required 4 byte boundary */ + b_offset = offset & 3; + b_count = 4 - b_offset; + if (b_count > len) { + /* i.e. offset=1 len=2 */ + b_count = len; + } + ret = tg3_nvram_read_be32(tp, offset-b_offset, &val); + if (ret) + return ret; + memcpy(data, ((char *)&val) + b_offset, b_count); + len -= b_count; + offset += b_count; + eeprom->len += b_count; + } + + /* read bytes up to the last 4 byte boundary */ + pd = &data[eeprom->len]; + for (i = 0; i < (len - (len & 3)); i += 4) { + ret = tg3_nvram_read_be32(tp, offset + i, &val); + if (ret) { + eeprom->len += i; + return ret; + } + memcpy(pd + i, &val, 4); + } + eeprom->len += i; + + if (len & 3) { + /* read last bytes not ending on 4 byte boundary */ + pd = &data[eeprom->len]; + b_count = len & 3; + b_offset = offset + len - b_count; + ret = tg3_nvram_read_be32(tp, b_offset, &val); + if (ret) + return ret; + memcpy(pd, &val, b_count); + eeprom->len += b_count; + } + return 0; +} + +static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); + +static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) +{ + struct tg3 *tp = netdev_priv(dev); + int ret; + u32 offset, len, b_offset, odd_len; + u8 *buf; + __be32 start, end; + + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) + return -EAGAIN; + + if (tg3_flag(tp, NO_NVRAM) || + eeprom->magic != TG3_EEPROM_MAGIC) + return -EINVAL; + + offset = eeprom->offset; + len = eeprom->len; + + if ((b_offset = (offset & 3))) { + /* adjustments to start on required 4 byte boundary */ + ret = tg3_nvram_read_be32(tp, offset-b_offset, &start); + if (ret) + return ret; + len += b_offset; + offset &= ~3; + if (len < 4) + len = 4; + } + + odd_len = 0; + if (len & 3) { + /* adjustments to end on required 4 byte boundary */ + odd_len = 1; + len = (len + 3) & ~3; + ret = tg3_nvram_read_be32(tp, offset+len-4, &end); + if (ret) + return ret; + } + + buf = data; + if (b_offset || odd_len) { + buf = kmalloc(len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + if (b_offset) + memcpy(buf, &start, 4); + if (odd_len) + memcpy(buf+len-4, &end, 4); + memcpy(buf + b_offset, data, eeprom->len); + } + + ret = tg3_nvram_write_block(tp, offset, len, buf); + + if (buf != data) + kfree(buf); + + return ret; +} + +static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct tg3 *tp = netdev_priv(dev); + + if (tg3_flag(tp, USE_PHYLIB)) { + struct phy_device *phydev; + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) + return -EAGAIN; + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + return phy_ethtool_gset(phydev, cmd); + } + + cmd->supported = (SUPPORTED_Autoneg); + + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) + cmd->supported |= (SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full); + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { + cmd->supported |= (SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_TP); + cmd->port = PORT_TP; + } else { + cmd->supported |= SUPPORTED_FIBRE; + cmd->port = PORT_FIBRE; + } + + cmd->advertising = tp->link_config.advertising; + if (tg3_flag(tp, PAUSE_AUTONEG)) { + if (tp->link_config.flowctrl & FLOW_CTRL_RX) { + if (tp->link_config.flowctrl & FLOW_CTRL_TX) { + cmd->advertising |= ADVERTISED_Pause; + } else { + cmd->advertising |= ADVERTISED_Pause | + ADVERTISED_Asym_Pause; + } + } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) { + cmd->advertising |= ADVERTISED_Asym_Pause; + } + } + if (netif_running(dev)) { + ethtool_cmd_speed_set(cmd, tp->link_config.active_speed); + cmd->duplex = tp->link_config.active_duplex; + } else { + ethtool_cmd_speed_set(cmd, SPEED_INVALID); + cmd->duplex = DUPLEX_INVALID; + } + cmd->phy_address = tp->phy_addr; + cmd->transceiver = XCVR_INTERNAL; + cmd->autoneg = tp->link_config.autoneg; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + return 0; +} + +static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct tg3 *tp = netdev_priv(dev); + u32 speed = ethtool_cmd_speed(cmd); + + if (tg3_flag(tp, USE_PHYLIB)) { + struct phy_device *phydev; + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) + return -EAGAIN; + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + return phy_ethtool_sset(phydev, cmd); + } + + if (cmd->autoneg != AUTONEG_ENABLE && + cmd->autoneg != AUTONEG_DISABLE) + return -EINVAL; + + if (cmd->autoneg == AUTONEG_DISABLE && + cmd->duplex != DUPLEX_FULL && + cmd->duplex != DUPLEX_HALF) + return -EINVAL; + + if (cmd->autoneg == AUTONEG_ENABLE) { + u32 mask = ADVERTISED_Autoneg | + ADVERTISED_Pause | + ADVERTISED_Asym_Pause; + + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) + mask |= ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full; + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) + mask |= ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_TP; + else + mask |= ADVERTISED_FIBRE; + + if (cmd->advertising & ~mask) + return -EINVAL; + + mask &= (ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full); + + cmd->advertising &= mask; + } else { + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) { + if (speed != SPEED_1000) + return -EINVAL; + + if (cmd->duplex != DUPLEX_FULL) + return -EINVAL; + } else { + if (speed != SPEED_100 && + speed != SPEED_10) + return -EINVAL; + } + } + + tg3_full_lock(tp, 0); + + tp->link_config.autoneg = cmd->autoneg; + if (cmd->autoneg == AUTONEG_ENABLE) { + tp->link_config.advertising = (cmd->advertising | + ADVERTISED_Autoneg); + tp->link_config.speed = SPEED_INVALID; + tp->link_config.duplex = DUPLEX_INVALID; + } else { + tp->link_config.advertising = 0; + tp->link_config.speed = speed; + tp->link_config.duplex = cmd->duplex; + } + + tp->link_config.orig_speed = tp->link_config.speed; + tp->link_config.orig_duplex = tp->link_config.duplex; + tp->link_config.orig_autoneg = tp->link_config.autoneg; + + if (netif_running(dev)) + tg3_setup_phy(tp, 1); + + tg3_full_unlock(tp); + + return 0; +} + +static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct tg3 *tp = netdev_priv(dev); + + strcpy(info->driver, DRV_MODULE_NAME); + strcpy(info->version, DRV_MODULE_VERSION); + strcpy(info->fw_version, tp->fw_ver); + strcpy(info->bus_info, pci_name(tp->pdev)); +} + +static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct tg3 *tp = netdev_priv(dev); + + if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev)) + wol->supported = WAKE_MAGIC; + else + wol->supported = 0; + wol->wolopts = 0; + if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev)) + wol->wolopts = WAKE_MAGIC; + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + +static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct tg3 *tp = netdev_priv(dev); + struct device *dp = &tp->pdev->dev; + + if (wol->wolopts & ~WAKE_MAGIC) + return -EINVAL; + if ((wol->wolopts & WAKE_MAGIC) && + !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp))) + return -EINVAL; + + device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC); + + spin_lock_bh(&tp->lock); + if (device_may_wakeup(dp)) + tg3_flag_set(tp, WOL_ENABLE); + else + tg3_flag_clear(tp, WOL_ENABLE); + spin_unlock_bh(&tp->lock); + + return 0; +} + +static u32 tg3_get_msglevel(struct net_device *dev) +{ + struct tg3 *tp = netdev_priv(dev); + return tp->msg_enable; +} + +static void tg3_set_msglevel(struct net_device *dev, u32 value) +{ + struct tg3 *tp = netdev_priv(dev); + tp->msg_enable = value; +} + +static int tg3_nway_reset(struct net_device *dev) +{ + struct tg3 *tp = netdev_priv(dev); + int r; + + if (!netif_running(dev)) + return -EAGAIN; + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) + return -EINVAL; + + if (tg3_flag(tp, USE_PHYLIB)) { + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) + return -EAGAIN; + r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); + } else { + u32 bmcr; + + spin_lock_bh(&tp->lock); + r = -EINVAL; + tg3_readphy(tp, MII_BMCR, &bmcr); + if (!tg3_readphy(tp, MII_BMCR, &bmcr) && + ((bmcr & BMCR_ANENABLE) || + (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) { + tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | + BMCR_ANENABLE); + r = 0; + } + spin_unlock_bh(&tp->lock); + } + + return r; +} + +static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) +{ + struct tg3 *tp = netdev_priv(dev); + + ering->rx_max_pending = tp->rx_std_ring_mask; + ering->rx_mini_max_pending = 0; + if (tg3_flag(tp, JUMBO_RING_ENABLE)) + ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask; + else + ering->rx_jumbo_max_pending = 0; + + ering->tx_max_pending = TG3_TX_RING_SIZE - 1; + + ering->rx_pending = tp->rx_pending; + ering->rx_mini_pending = 0; + if (tg3_flag(tp, JUMBO_RING_ENABLE)) + ering->rx_jumbo_pending = tp->rx_jumbo_pending; + else + ering->rx_jumbo_pending = 0; + + ering->tx_pending = tp->napi[0].tx_pending; +} + +static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) +{ + struct tg3 *tp = netdev_priv(dev); + int i, irq_sync = 0, err = 0; + + if ((ering->rx_pending > tp->rx_std_ring_mask) || + (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || + (ering->tx_pending > TG3_TX_RING_SIZE - 1) || + (ering->tx_pending <= MAX_SKB_FRAGS) || + (tg3_flag(tp, TSO_BUG) && + (ering->tx_pending <= (MAX_SKB_FRAGS * 3)))) + return -EINVAL; + + if (netif_running(dev)) { + tg3_phy_stop(tp); + tg3_netif_stop(tp); + irq_sync = 1; + } + + tg3_full_lock(tp, irq_sync); + + tp->rx_pending = ering->rx_pending; + + if (tg3_flag(tp, MAX_RXPEND_64) && + tp->rx_pending > 63) + tp->rx_pending = 63; + tp->rx_jumbo_pending = ering->rx_jumbo_pending; + + for (i = 0; i < tp->irq_max; i++) + tp->napi[i].tx_pending = ering->tx_pending; + + if (netif_running(dev)) { + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + err = tg3_restart_hw(tp, 1); + if (!err) + tg3_netif_start(tp); + } + + tg3_full_unlock(tp); + + if (irq_sync && !err) + tg3_phy_start(tp); + + return err; +} + +static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) +{ + struct tg3 *tp = netdev_priv(dev); + + epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG); + + if (tp->link_config.active_flowctrl & FLOW_CTRL_RX) + epause->rx_pause = 1; + else + epause->rx_pause = 0; + + if (tp->link_config.active_flowctrl & FLOW_CTRL_TX) + epause->tx_pause = 1; + else + epause->tx_pause = 0; +} + +static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) +{ + struct tg3 *tp = netdev_priv(dev); + int err = 0; + + if (tg3_flag(tp, USE_PHYLIB)) { + u32 newadv; + struct phy_device *phydev; + + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + + if (!(phydev->supported & SUPPORTED_Pause) || + (!(phydev->supported & SUPPORTED_Asym_Pause) && + (epause->rx_pause != epause->tx_pause))) + return -EINVAL; + + tp->link_config.flowctrl = 0; + if (epause->rx_pause) { + tp->link_config.flowctrl |= FLOW_CTRL_RX; + + if (epause->tx_pause) { + tp->link_config.flowctrl |= FLOW_CTRL_TX; + newadv = ADVERTISED_Pause; + } else + newadv = ADVERTISED_Pause | + ADVERTISED_Asym_Pause; + } else if (epause->tx_pause) { + tp->link_config.flowctrl |= FLOW_CTRL_TX; + newadv = ADVERTISED_Asym_Pause; + } else + newadv = 0; + + if (epause->autoneg) + tg3_flag_set(tp, PAUSE_AUTONEG); + else + tg3_flag_clear(tp, PAUSE_AUTONEG); + + if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { + u32 oldadv = phydev->advertising & + (ADVERTISED_Pause | ADVERTISED_Asym_Pause); + if (oldadv != newadv) { + phydev->advertising &= + ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + phydev->advertising |= newadv; + if (phydev->autoneg) { + /* + * Always renegotiate the link to + * inform our link partner of our + * flow control settings, even if the + * flow control is forced. Let + * tg3_adjust_link() do the final + * flow control setup. + */ + return phy_start_aneg(phydev); + } + } + + if (!epause->autoneg) + tg3_setup_flow_control(tp, 0, 0); + } else { + tp->link_config.orig_advertising &= + ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + tp->link_config.orig_advertising |= newadv; + } + } else { + int irq_sync = 0; + + if (netif_running(dev)) { + tg3_netif_stop(tp); + irq_sync = 1; + } + + tg3_full_lock(tp, irq_sync); + + if (epause->autoneg) + tg3_flag_set(tp, PAUSE_AUTONEG); + else + tg3_flag_clear(tp, PAUSE_AUTONEG); + if (epause->rx_pause) + tp->link_config.flowctrl |= FLOW_CTRL_RX; + else + tp->link_config.flowctrl &= ~FLOW_CTRL_RX; + if (epause->tx_pause) + tp->link_config.flowctrl |= FLOW_CTRL_TX; + else + tp->link_config.flowctrl &= ~FLOW_CTRL_TX; + + if (netif_running(dev)) { + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + err = tg3_restart_hw(tp, 1); + if (!err) + tg3_netif_start(tp); + } + + tg3_full_unlock(tp); + } + + return err; +} + +static int tg3_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return TG3_NUM_TEST; + case ETH_SS_STATS: + return TG3_NUM_STATS; + default: + return -EOPNOTSUPP; + } +} + +static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); + break; + case ETH_SS_TEST: + memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys)); + break; + default: + WARN_ON(1); /* we need a WARN() */ + break; + } +} + +static int tg3_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct tg3 *tp = netdev_priv(dev); + + if (!netif_running(tp->dev)) + return -EAGAIN; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 1; /* cycle on/off once per second */ + + case ETHTOOL_ID_ON: + tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | + LED_CTRL_1000MBPS_ON | + LED_CTRL_100MBPS_ON | + LED_CTRL_10MBPS_ON | + LED_CTRL_TRAFFIC_OVERRIDE | + LED_CTRL_TRAFFIC_BLINK | + LED_CTRL_TRAFFIC_LED); + break; + + case ETHTOOL_ID_OFF: + tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | + LED_CTRL_TRAFFIC_OVERRIDE); + break; + + case ETHTOOL_ID_INACTIVE: + tw32(MAC_LED_CTRL, tp->led_ctrl); + break; + } + + return 0; +} + +static void tg3_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *estats, u64 *tmp_stats) +{ + struct tg3 *tp = netdev_priv(dev); + memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats)); +} + +static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen) +{ + int i; + __be32 *buf; + u32 offset = 0, len = 0; + u32 magic, val; + + if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic)) + return NULL; + + if (magic == TG3_EEPROM_MAGIC) { + for (offset = TG3_NVM_DIR_START; + offset < TG3_NVM_DIR_END; + offset += TG3_NVM_DIRENT_SIZE) { + if (tg3_nvram_read(tp, offset, &val)) + return NULL; + + if ((val >> TG3_NVM_DIRTYPE_SHIFT) == + TG3_NVM_DIRTYPE_EXTVPD) + break; + } + + if (offset != TG3_NVM_DIR_END) { + len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4; + if (tg3_nvram_read(tp, offset + 4, &offset)) + return NULL; + + offset = tg3_nvram_logical_addr(tp, offset); + } + } + + if (!offset || !len) { + offset = TG3_NVM_VPD_OFF; + len = TG3_NVM_VPD_LEN; + } + + buf = kmalloc(len, GFP_KERNEL); + if (buf == NULL) + return NULL; + + if (magic == TG3_EEPROM_MAGIC) { + for (i = 0; i < len; i += 4) { + /* The data is in little-endian format in NVRAM. + * Use the big-endian read routines to preserve + * the byte order as it exists in NVRAM. + */ + if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4])) + goto error; + } + } else { + u8 *ptr; + ssize_t cnt; + unsigned int pos = 0; + + ptr = (u8 *)&buf[0]; + for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) { + cnt = pci_read_vpd(tp->pdev, pos, + len - pos, ptr); + if (cnt == -ETIMEDOUT || cnt == -EINTR) + cnt = 0; + else if (cnt < 0) + goto error; + } + if (pos != len) + goto error; + } + + *vpdlen = len; + + return buf; + +error: + kfree(buf); + return NULL; +} + +#define NVRAM_TEST_SIZE 0x100 +#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14 +#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18 +#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c +#define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20 +#define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24 +#define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50 +#define NVRAM_SELFBOOT_HW_SIZE 0x20 +#define NVRAM_SELFBOOT_DATA_SIZE 0x1c + +static int tg3_test_nvram(struct tg3 *tp) +{ + u32 csum, magic, len; + __be32 *buf; + int i, j, k, err = 0, size; + + if (tg3_flag(tp, NO_NVRAM)) + return 0; + + if (tg3_nvram_read(tp, 0, &magic) != 0) + return -EIO; + + if (magic == TG3_EEPROM_MAGIC) + size = NVRAM_TEST_SIZE; + else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) { + if ((magic & TG3_EEPROM_SB_FORMAT_MASK) == + TG3_EEPROM_SB_FORMAT_1) { + switch (magic & TG3_EEPROM_SB_REVISION_MASK) { + case TG3_EEPROM_SB_REVISION_0: + size = NVRAM_SELFBOOT_FORMAT1_0_SIZE; + break; + case TG3_EEPROM_SB_REVISION_2: + size = NVRAM_SELFBOOT_FORMAT1_2_SIZE; + break; + case TG3_EEPROM_SB_REVISION_3: + size = NVRAM_SELFBOOT_FORMAT1_3_SIZE; + break; + case TG3_EEPROM_SB_REVISION_4: + size = NVRAM_SELFBOOT_FORMAT1_4_SIZE; + break; + case TG3_EEPROM_SB_REVISION_5: + size = NVRAM_SELFBOOT_FORMAT1_5_SIZE; + break; + case TG3_EEPROM_SB_REVISION_6: + size = NVRAM_SELFBOOT_FORMAT1_6_SIZE; + break; + default: + return -EIO; + } + } else + return 0; + } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) + size = NVRAM_SELFBOOT_HW_SIZE; + else + return -EIO; + + buf = kmalloc(size, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + err = -EIO; + for (i = 0, j = 0; i < size; i += 4, j++) { + err = tg3_nvram_read_be32(tp, i, &buf[j]); + if (err) + break; + } + if (i < size) + goto out; + + /* Selfboot format */ + magic = be32_to_cpu(buf[0]); + if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == + TG3_EEPROM_MAGIC_FW) { + u8 *buf8 = (u8 *) buf, csum8 = 0; + + if ((magic & TG3_EEPROM_SB_REVISION_MASK) == + TG3_EEPROM_SB_REVISION_2) { + /* For rev 2, the csum doesn't include the MBA. */ + for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++) + csum8 += buf8[i]; + for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++) + csum8 += buf8[i]; + } else { + for (i = 0; i < size; i++) + csum8 += buf8[i]; + } + + if (csum8 == 0) { + err = 0; + goto out; + } + + err = -EIO; + goto out; + } + + if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == + TG3_EEPROM_MAGIC_HW) { + u8 data[NVRAM_SELFBOOT_DATA_SIZE]; + u8 parity[NVRAM_SELFBOOT_DATA_SIZE]; + u8 *buf8 = (u8 *) buf; + + /* Separate the parity bits and the data bytes. */ + for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) { + if ((i == 0) || (i == 8)) { + int l; + u8 msk; + + for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1) + parity[k++] = buf8[i] & msk; + i++; + } else if (i == 16) { + int l; + u8 msk; + + for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1) + parity[k++] = buf8[i] & msk; + i++; + + for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1) + parity[k++] = buf8[i] & msk; + i++; + } + data[j++] = buf8[i]; + } + + err = -EIO; + for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) { + u8 hw8 = hweight8(data[i]); + + if ((hw8 & 0x1) && parity[i]) + goto out; + else if (!(hw8 & 0x1) && !parity[i]) + goto out; + } + err = 0; + goto out; + } + + err = -EIO; + + /* Bootstrap checksum at offset 0x10 */ + csum = calc_crc((unsigned char *) buf, 0x10); + if (csum != le32_to_cpu(buf[0x10/4])) + goto out; + + /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ + csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); + if (csum != le32_to_cpu(buf[0xfc/4])) + goto out; + + kfree(buf); + + buf = tg3_vpd_readblock(tp, &len); + if (!buf) + return -ENOMEM; + + i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA); + if (i > 0) { + j = pci_vpd_lrdt_size(&((u8 *)buf)[i]); + if (j < 0) + goto out; + + if (i + PCI_VPD_LRDT_TAG_SIZE + j > len) + goto out; + + i += PCI_VPD_LRDT_TAG_SIZE; + j = pci_vpd_find_info_keyword((u8 *)buf, i, j, + PCI_VPD_RO_KEYWORD_CHKSUM); + if (j > 0) { + u8 csum8 = 0; + + j += PCI_VPD_INFO_FLD_HDR_SIZE; + + for (i = 0; i <= j; i++) + csum8 += ((u8 *)buf)[i]; + + if (csum8) + goto out; + } + } + + err = 0; + +out: + kfree(buf); + return err; +} + +#define TG3_SERDES_TIMEOUT_SEC 2 +#define TG3_COPPER_TIMEOUT_SEC 6 + +static int tg3_test_link(struct tg3 *tp) +{ + int i, max; + + if (!netif_running(tp->dev)) + return -ENODEV; + + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) + max = TG3_SERDES_TIMEOUT_SEC; + else + max = TG3_COPPER_TIMEOUT_SEC; + + for (i = 0; i < max; i++) { + if (netif_carrier_ok(tp->dev)) + return 0; + + if (msleep_interruptible(1000)) + break; + } + + return -EIO; +} + +/* Only test the commonly used registers */ +static int tg3_test_registers(struct tg3 *tp) +{ + int i, is_5705, is_5750; + u32 offset, read_mask, write_mask, val, save_val, read_val; + static struct { + u16 offset; + u16 flags; +#define TG3_FL_5705 0x1 +#define TG3_FL_NOT_5705 0x2 +#define TG3_FL_NOT_5788 0x4 +#define TG3_FL_NOT_5750 0x8 + u32 read_mask; + u32 write_mask; + } reg_tbl[] = { + /* MAC Control Registers */ + { MAC_MODE, TG3_FL_NOT_5705, + 0x00000000, 0x00ef6f8c }, + { MAC_MODE, TG3_FL_5705, + 0x00000000, 0x01ef6b8c }, + { MAC_STATUS, TG3_FL_NOT_5705, + 0x03800107, 0x00000000 }, + { MAC_STATUS, TG3_FL_5705, + 0x03800100, 0x00000000 }, + { MAC_ADDR_0_HIGH, 0x0000, + 0x00000000, 0x0000ffff }, + { MAC_ADDR_0_LOW, 0x0000, + 0x00000000, 0xffffffff }, + { MAC_RX_MTU_SIZE, 0x0000, + 0x00000000, 0x0000ffff }, + { MAC_TX_MODE, 0x0000, + 0x00000000, 0x00000070 }, + { MAC_TX_LENGTHS, 0x0000, + 0x00000000, 0x00003fff }, + { MAC_RX_MODE, TG3_FL_NOT_5705, + 0x00000000, 0x000007fc }, + { MAC_RX_MODE, TG3_FL_5705, + 0x00000000, 0x000007dc }, + { MAC_HASH_REG_0, 0x0000, + 0x00000000, 0xffffffff }, + { MAC_HASH_REG_1, 0x0000, + 0x00000000, 0xffffffff }, + { MAC_HASH_REG_2, 0x0000, + 0x00000000, 0xffffffff }, + { MAC_HASH_REG_3, 0x0000, + 0x00000000, 0xffffffff }, + + /* Receive Data and Receive BD Initiator Control Registers. */ + { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705, + 0x00000000, 0x00000003 }, + { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { RCVDBDI_STD_BD+0, 0x0000, + 0x00000000, 0xffffffff }, + { RCVDBDI_STD_BD+4, 0x0000, + 0x00000000, 0xffffffff }, + { RCVDBDI_STD_BD+8, 0x0000, + 0x00000000, 0xffff0002 }, + { RCVDBDI_STD_BD+0xc, 0x0000, + 0x00000000, 0xffffffff }, + + /* Receive BD Initiator Control Registers. */ + { RCVBDI_STD_THRESH, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { RCVBDI_STD_THRESH, TG3_FL_5705, + 0x00000000, 0x000003ff }, + { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + + /* Host Coalescing Control Registers. */ + { HOSTCC_MODE, TG3_FL_NOT_5705, + 0x00000000, 0x00000004 }, + { HOSTCC_MODE, TG3_FL_5705, + 0x00000000, 0x000000f6 }, + { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_RXCOL_TICKS, TG3_FL_5705, + 0x00000000, 0x000003ff }, + { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_TXCOL_TICKS, TG3_FL_5705, + 0x00000000, 0x000003ff }, + { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, + 0x00000000, 0x000000ff }, + { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, + 0x00000000, 0x000000ff }, + { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, + 0x00000000, 0x000000ff }, + { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, + 0x00000000, 0x000000ff }, + { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000, + 0x00000000, 0xffffffff }, + { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000, + 0x00000000, 0xffffffff }, + { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000, + 0xffffffff, 0x00000000 }, + { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000, + 0xffffffff, 0x00000000 }, + + /* Buffer Manager Control Registers. */ + { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750, + 0x00000000, 0x007fff80 }, + { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750, + 0x00000000, 0x007fffff }, + { BUFMGR_MB_RDMA_LOW_WATER, 0x0000, + 0x00000000, 0x0000003f }, + { BUFMGR_MB_MACRX_LOW_WATER, 0x0000, + 0x00000000, 0x000001ff }, + { BUFMGR_MB_HIGH_WATER, 0x0000, + 0x00000000, 0x000001ff }, + { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705, + 0xffffffff, 0x00000000 }, + { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705, + 0xffffffff, 0x00000000 }, + + /* Mailbox Registers */ + { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000, + 0x00000000, 0x000001ff }, + { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705, + 0x00000000, 0x000001ff }, + { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000, + 0x00000000, 0x000007ff }, + { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000, + 0x00000000, 0x000001ff }, + + { 0xffff, 0x0000, 0x00000000, 0x00000000 }, + }; + + is_5705 = is_5750 = 0; + if (tg3_flag(tp, 5705_PLUS)) { + is_5705 = 1; + if (tg3_flag(tp, 5750_PLUS)) + is_5750 = 1; + } + + for (i = 0; reg_tbl[i].offset != 0xffff; i++) { + if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705)) + continue; + + if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705)) + continue; + + if (tg3_flag(tp, IS_5788) && + (reg_tbl[i].flags & TG3_FL_NOT_5788)) + continue; + + if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750)) + continue; + + offset = (u32) reg_tbl[i].offset; + read_mask = reg_tbl[i].read_mask; + write_mask = reg_tbl[i].write_mask; + + /* Save the original register content */ + save_val = tr32(offset); + + /* Determine the read-only value. */ + read_val = save_val & read_mask; + + /* Write zero to the register, then make sure the read-only bits + * are not changed and the read/write bits are all zeros. + */ + tw32(offset, 0); + + val = tr32(offset); + + /* Test the read-only and read/write bits. */ + if (((val & read_mask) != read_val) || (val & write_mask)) + goto out; + + /* Write ones to all the bits defined by RdMask and WrMask, then + * make sure the read-only bits are not changed and the + * read/write bits are all ones. + */ + tw32(offset, read_mask | write_mask); + + val = tr32(offset); + + /* Test the read-only bits. */ + if ((val & read_mask) != read_val) + goto out; + + /* Test the read/write bits. */ + if ((val & write_mask) != write_mask) + goto out; + + tw32(offset, save_val); + } + + return 0; + +out: + if (netif_msg_hw(tp)) + netdev_err(tp->dev, + "Register test failed at offset %x\n", offset); + tw32(offset, save_val); + return -EIO; +} + +static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) +{ + static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a }; + int i; + u32 j; + + for (i = 0; i < ARRAY_SIZE(test_pattern); i++) { + for (j = 0; j < len; j += 4) { + u32 val; + + tg3_write_mem(tp, offset + j, test_pattern[i]); + tg3_read_mem(tp, offset + j, &val); + if (val != test_pattern[i]) + return -EIO; + } + } + return 0; +} + +static int tg3_test_memory(struct tg3 *tp) +{ + static struct mem_entry { + u32 offset; + u32 len; + } mem_tbl_570x[] = { + { 0x00000000, 0x00b50}, + { 0x00002000, 0x1c000}, + { 0xffffffff, 0x00000} + }, mem_tbl_5705[] = { + { 0x00000100, 0x0000c}, + { 0x00000200, 0x00008}, + { 0x00004000, 0x00800}, + { 0x00006000, 0x01000}, + { 0x00008000, 0x02000}, + { 0x00010000, 0x0e000}, + { 0xffffffff, 0x00000} + }, mem_tbl_5755[] = { + { 0x00000200, 0x00008}, + { 0x00004000, 0x00800}, + { 0x00006000, 0x00800}, + { 0x00008000, 0x02000}, + { 0x00010000, 0x0c000}, + { 0xffffffff, 0x00000} + }, mem_tbl_5906[] = { + { 0x00000200, 0x00008}, + { 0x00004000, 0x00400}, + { 0x00006000, 0x00400}, + { 0x00008000, 0x01000}, + { 0x00010000, 0x01000}, + { 0xffffffff, 0x00000} + }, mem_tbl_5717[] = { + { 0x00000200, 0x00008}, + { 0x00010000, 0x0a000}, + { 0x00020000, 0x13c00}, + { 0xffffffff, 0x00000} + }, mem_tbl_57765[] = { + { 0x00000200, 0x00008}, + { 0x00004000, 0x00800}, + { 0x00006000, 0x09800}, + { 0x00010000, 0x0a000}, + { 0xffffffff, 0x00000} + }; + struct mem_entry *mem_tbl; + int err = 0; + int i; + + if (tg3_flag(tp, 5717_PLUS)) + mem_tbl = mem_tbl_5717; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + mem_tbl = mem_tbl_57765; + else if (tg3_flag(tp, 5755_PLUS)) + mem_tbl = mem_tbl_5755; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + mem_tbl = mem_tbl_5906; + else if (tg3_flag(tp, 5705_PLUS)) + mem_tbl = mem_tbl_5705; + else + mem_tbl = mem_tbl_570x; + + for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { + err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len); + if (err) + break; + } + + return err; +} + +#define TG3_MAC_LOOPBACK 0 +#define TG3_PHY_LOOPBACK 1 +#define TG3_TSO_LOOPBACK 2 + +#define TG3_TSO_MSS 500 + +#define TG3_TSO_IP_HDR_LEN 20 +#define TG3_TSO_TCP_HDR_LEN 20 +#define TG3_TSO_TCP_OPT_LEN 12 + +static const u8 tg3_tso_header[] = { +0x08, 0x00, +0x45, 0x00, 0x00, 0x00, +0x00, 0x00, 0x40, 0x00, +0x40, 0x06, 0x00, 0x00, +0x0a, 0x00, 0x00, 0x01, +0x0a, 0x00, 0x00, 0x02, +0x0d, 0x00, 0xe0, 0x00, +0x00, 0x00, 0x01, 0x00, +0x00, 0x00, 0x02, 0x00, +0x80, 0x10, 0x10, 0x00, +0x14, 0x09, 0x00, 0x00, +0x01, 0x01, 0x08, 0x0a, +0x11, 0x11, 0x11, 0x11, +0x11, 0x11, 0x11, 0x11, +}; + +static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode) +{ + u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key; + u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val; + u32 budget; + struct sk_buff *skb, *rx_skb; + u8 *tx_data; + dma_addr_t map; + int num_pkts, tx_len, rx_len, i, err; + struct tg3_rx_buffer_desc *desc; + struct tg3_napi *tnapi, *rnapi; + struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; + + tnapi = &tp->napi[0]; + rnapi = &tp->napi[0]; + if (tp->irq_cnt > 1) { + if (tg3_flag(tp, ENABLE_RSS)) + rnapi = &tp->napi[1]; + if (tg3_flag(tp, ENABLE_TSS)) + tnapi = &tp->napi[1]; + } + coal_now = tnapi->coal_now | rnapi->coal_now; + + if (loopback_mode == TG3_MAC_LOOPBACK) { + /* HW errata - mac loopback fails in some cases on 5780. + * Normal traffic and PHY loopback are not affected by + * errata. Also, the MAC loopback test is deprecated for + * all newer ASIC revisions. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 || + tg3_flag(tp, CPMU_PRESENT)) + return 0; + + mac_mode = tp->mac_mode & + ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); + mac_mode |= MAC_MODE_PORT_INT_LPBACK; + if (!tg3_flag(tp, 5705_PLUS)) + mac_mode |= MAC_MODE_LINK_POLARITY; + if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) + mac_mode |= MAC_MODE_PORT_MODE_MII; + else + mac_mode |= MAC_MODE_PORT_MODE_GMII; + tw32(MAC_MODE, mac_mode); + } else { + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + tg3_phy_fet_toggle_apd(tp, false); + val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100; + } else + val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000; + + tg3_phy_toggle_automdix(tp, 0); + + tg3_writephy(tp, MII_BMCR, val); + udelay(40); + + mac_mode = tp->mac_mode & + ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + tg3_writephy(tp, MII_TG3_FET_PTEST, + MII_TG3_FET_PTEST_FRC_TX_LINK | + MII_TG3_FET_PTEST_FRC_TX_LOCK); + /* The write needs to be flushed for the AC131 */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + tg3_readphy(tp, MII_TG3_FET_PTEST, &val); + mac_mode |= MAC_MODE_PORT_MODE_MII; + } else + mac_mode |= MAC_MODE_PORT_MODE_GMII; + + /* reset to prevent losing 1st rx packet intermittently */ + if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { + tw32_f(MAC_RX_MODE, RX_MODE_RESET); + udelay(10); + tw32_f(MAC_RX_MODE, tp->rx_mode); + } + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { + u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK; + if (masked_phy_id == TG3_PHY_ID_BCM5401) + mac_mode &= ~MAC_MODE_LINK_POLARITY; + else if (masked_phy_id == TG3_PHY_ID_BCM5411) + mac_mode |= MAC_MODE_LINK_POLARITY; + tg3_writephy(tp, MII_TG3_EXT_CTRL, + MII_TG3_EXT_CTRL_LNK3_LED_MODE); + } + tw32(MAC_MODE, mac_mode); + + /* Wait for link */ + for (i = 0; i < 100; i++) { + if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) + break; + mdelay(1); + } + } + + err = -EIO; + + tx_len = pktsz; + skb = netdev_alloc_skb(tp->dev, tx_len); + if (!skb) + return -ENOMEM; + + tx_data = skb_put(skb, tx_len); + memcpy(tx_data, tp->dev->dev_addr, 6); + memset(tx_data + 6, 0x0, 8); + + tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN); + + if (loopback_mode == TG3_TSO_LOOPBACK) { + struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN]; + + u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN + + TG3_TSO_TCP_OPT_LEN; + + memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header, + sizeof(tg3_tso_header)); + mss = TG3_TSO_MSS; + + val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header); + num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS); + + /* Set the total length field in the IP header */ + iph->tot_len = htons((u16)(mss + hdr_len)); + + base_flags = (TXD_FLAG_CPU_PRE_DMA | + TXD_FLAG_CPU_POST_DMA); + + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) { + struct tcphdr *th; + val = ETH_HLEN + TG3_TSO_IP_HDR_LEN; + th = (struct tcphdr *)&tx_data[val]; + th->check = 0; + } else + base_flags |= TXD_FLAG_TCPUDP_CSUM; + + if (tg3_flag(tp, HW_TSO_3)) { + mss |= (hdr_len & 0xc) << 12; + if (hdr_len & 0x10) + base_flags |= 0x00000010; + base_flags |= (hdr_len & 0x3e0) << 5; + } else if (tg3_flag(tp, HW_TSO_2)) + mss |= hdr_len << 9; + else if (tg3_flag(tp, HW_TSO_1) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + mss |= (TG3_TSO_TCP_OPT_LEN << 9); + } else { + base_flags |= (TG3_TSO_TCP_OPT_LEN << 10); + } + + data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header); + } else { + num_pkts = 1; + data_off = ETH_HLEN; + } + + for (i = data_off; i < tx_len; i++) + tx_data[i] = (u8) (i & 0xff); + + map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(tp->pdev, map)) { + dev_kfree_skb(skb); + return -EIO; + } + + val = tnapi->tx_prod; + tnapi->tx_buffers[val].skb = skb; + dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map); + + tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | + rnapi->coal_now); + + udelay(10); + + rx_start_idx = rnapi->hw_status->idx[0].rx_producer; + + budget = tg3_tx_avail(tnapi); + if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len, + base_flags | TXD_FLAG_END, mss, 0)) { + tnapi->tx_buffers[val].skb = NULL; + dev_kfree_skb(skb); + return -EIO; + } + + tnapi->tx_prod++; + + tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); + tr32_mailbox(tnapi->prodmbox); + + udelay(10); + + /* 350 usec to allow enough time on some 10/100 Mbps devices. */ + for (i = 0; i < 35; i++) { + tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | + coal_now); + + udelay(10); + + tx_idx = tnapi->hw_status->idx[0].tx_consumer; + rx_idx = rnapi->hw_status->idx[0].rx_producer; + if ((tx_idx == tnapi->tx_prod) && + (rx_idx == (rx_start_idx + num_pkts))) + break; + } + + tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0); + dev_kfree_skb(skb); + + if (tx_idx != tnapi->tx_prod) + goto out; + + if (rx_idx != rx_start_idx + num_pkts) + goto out; + + val = data_off; + while (rx_idx != rx_start_idx) { + desc = &rnapi->rx_rcb[rx_start_idx++]; + desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; + opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; + + if ((desc->err_vlan & RXD_ERR_MASK) != 0 && + (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) + goto out; + + rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) + - ETH_FCS_LEN; + + if (loopback_mode != TG3_TSO_LOOPBACK) { + if (rx_len != tx_len) + goto out; + + if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) { + if (opaque_key != RXD_OPAQUE_RING_STD) + goto out; + } else { + if (opaque_key != RXD_OPAQUE_RING_JUMBO) + goto out; + } + } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && + (desc->ip_tcp_csum & RXD_TCPCSUM_MASK) + >> RXD_TCPCSUM_SHIFT != 0xffff) { + goto out; + } + + if (opaque_key == RXD_OPAQUE_RING_STD) { + rx_skb = tpr->rx_std_buffers[desc_idx].skb; + map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], + mapping); + } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { + rx_skb = tpr->rx_jmb_buffers[desc_idx].skb; + map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx], + mapping); + } else + goto out; + + pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, + PCI_DMA_FROMDEVICE); + + for (i = data_off; i < rx_len; i++, val++) { + if (*(rx_skb->data + i) != (u8) (val & 0xff)) + goto out; + } + } + + err = 0; + + /* tg3_free_rings will unmap and free the rx_skb */ +out: + return err; +} + +#define TG3_STD_LOOPBACK_FAILED 1 +#define TG3_JMB_LOOPBACK_FAILED 2 +#define TG3_TSO_LOOPBACK_FAILED 4 + +#define TG3_MAC_LOOPBACK_SHIFT 0 +#define TG3_PHY_LOOPBACK_SHIFT 4 +#define TG3_LOOPBACK_FAILED 0x00000077 + +static int tg3_test_loopback(struct tg3 *tp) +{ + int err = 0; + u32 eee_cap, cpmuctrl = 0; + + if (!netif_running(tp->dev)) + return TG3_LOOPBACK_FAILED; + + eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP; + tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; + + err = tg3_reset_hw(tp, 1); + if (err) { + err = TG3_LOOPBACK_FAILED; + goto done; + } + + if (tg3_flag(tp, ENABLE_RSS)) { + int i; + + /* Reroute all rx packets to the 1st queue */ + for (i = MAC_RSS_INDIR_TBL_0; + i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4) + tw32(i, 0x0); + } + + /* Turn off gphy autopowerdown. */ + if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) + tg3_phy_toggle_apd(tp, false); + + if (tg3_flag(tp, CPMU_PRESENT)) { + int i; + u32 status; + + tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER); + + /* Wait for up to 40 microseconds to acquire lock. */ + for (i = 0; i < 4; i++) { + status = tr32(TG3_CPMU_MUTEX_GNT); + if (status == CPMU_MUTEX_GNT_DRIVER) + break; + udelay(10); + } + + if (status != CPMU_MUTEX_GNT_DRIVER) { + err = TG3_LOOPBACK_FAILED; + goto done; + } + + /* Turn off link-based power management. */ + cpmuctrl = tr32(TG3_CPMU_CTRL); + tw32(TG3_CPMU_CTRL, + cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE | + CPMU_CTRL_LINK_AWARE_MODE)); + } + + if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK)) + err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT; + + if (tg3_flag(tp, JUMBO_RING_ENABLE) && + tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK)) + err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT; + + if (tg3_flag(tp, CPMU_PRESENT)) { + tw32(TG3_CPMU_CTRL, cpmuctrl); + + /* Release the mutex */ + tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER); + } + + if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && + !tg3_flag(tp, USE_PHYLIB)) { + if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK)) + err |= TG3_STD_LOOPBACK_FAILED << + TG3_PHY_LOOPBACK_SHIFT; + if (tg3_flag(tp, TSO_CAPABLE) && + tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK)) + err |= TG3_TSO_LOOPBACK_FAILED << + TG3_PHY_LOOPBACK_SHIFT; + if (tg3_flag(tp, JUMBO_RING_ENABLE) && + tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK)) + err |= TG3_JMB_LOOPBACK_FAILED << + TG3_PHY_LOOPBACK_SHIFT; + } + + /* Re-enable gphy autopowerdown. */ + if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) + tg3_phy_toggle_apd(tp, true); + +done: + tp->phy_flags |= eee_cap; + + return err; +} + +static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, + u64 *data) +{ + struct tg3 *tp = netdev_priv(dev); + + if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && + tg3_power_up(tp)) { + etest->flags |= ETH_TEST_FL_FAILED; + memset(data, 1, sizeof(u64) * TG3_NUM_TEST); + return; + } + + memset(data, 0, sizeof(u64) * TG3_NUM_TEST); + + if (tg3_test_nvram(tp) != 0) { + etest->flags |= ETH_TEST_FL_FAILED; + data[0] = 1; + } + if (tg3_test_link(tp) != 0) { + etest->flags |= ETH_TEST_FL_FAILED; + data[1] = 1; + } + if (etest->flags & ETH_TEST_FL_OFFLINE) { + int err, err2 = 0, irq_sync = 0; + + if (netif_running(dev)) { + tg3_phy_stop(tp); + tg3_netif_stop(tp); + irq_sync = 1; + } + + tg3_full_lock(tp, irq_sync); + + tg3_halt(tp, RESET_KIND_SUSPEND, 1); + err = tg3_nvram_lock(tp); + tg3_halt_cpu(tp, RX_CPU_BASE); + if (!tg3_flag(tp, 5705_PLUS)) + tg3_halt_cpu(tp, TX_CPU_BASE); + if (!err) + tg3_nvram_unlock(tp); + + if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) + tg3_phy_reset(tp); + + if (tg3_test_registers(tp) != 0) { + etest->flags |= ETH_TEST_FL_FAILED; + data[2] = 1; + } + if (tg3_test_memory(tp) != 0) { + etest->flags |= ETH_TEST_FL_FAILED; + data[3] = 1; + } + if ((data[4] = tg3_test_loopback(tp)) != 0) + etest->flags |= ETH_TEST_FL_FAILED; + + tg3_full_unlock(tp); + + if (tg3_test_interrupt(tp) != 0) { + etest->flags |= ETH_TEST_FL_FAILED; + data[5] = 1; + } + + tg3_full_lock(tp, 0); + + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + if (netif_running(dev)) { + tg3_flag_set(tp, INIT_COMPLETE); + err2 = tg3_restart_hw(tp, 1); + if (!err2) + tg3_netif_start(tp); + } + + tg3_full_unlock(tp); + + if (irq_sync && !err2) + tg3_phy_start(tp); + } + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) + tg3_power_down(tp); + +} + +static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mii_ioctl_data *data = if_mii(ifr); + struct tg3 *tp = netdev_priv(dev); + int err; + + if (tg3_flag(tp, USE_PHYLIB)) { + struct phy_device *phydev; + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) + return -EAGAIN; + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + return phy_mii_ioctl(phydev, ifr, cmd); + } + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = tp->phy_addr; + + /* fallthru */ + case SIOCGMIIREG: { + u32 mii_regval; + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) + break; /* We have no PHY */ + + if (!netif_running(dev)) + return -EAGAIN; + + spin_lock_bh(&tp->lock); + err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval); + spin_unlock_bh(&tp->lock); + + data->val_out = mii_regval; + + return err; + } + + case SIOCSMIIREG: + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) + break; /* We have no PHY */ + + if (!netif_running(dev)) + return -EAGAIN; + + spin_lock_bh(&tp->lock); + err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in); + spin_unlock_bh(&tp->lock); + + return err; + + default: + /* do nothing */ + break; + } + return -EOPNOTSUPP; +} + +static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +{ + struct tg3 *tp = netdev_priv(dev); + + memcpy(ec, &tp->coal, sizeof(*ec)); + return 0; +} + +static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +{ + struct tg3 *tp = netdev_priv(dev); + u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; + u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0; + + if (!tg3_flag(tp, 5705_PLUS)) { + max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT; + max_txcoal_tick_int = MAX_TXCOAL_TICK_INT; + max_stat_coal_ticks = MAX_STAT_COAL_TICKS; + min_stat_coal_ticks = MIN_STAT_COAL_TICKS; + } + + if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || + (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || + (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || + (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || + (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) || + (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) || + (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) || + (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) || + (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) || + (ec->stats_block_coalesce_usecs < min_stat_coal_ticks)) + return -EINVAL; + + /* No rx interrupts will be generated if both are zero */ + if ((ec->rx_coalesce_usecs == 0) && + (ec->rx_max_coalesced_frames == 0)) + return -EINVAL; + + /* No tx interrupts will be generated if both are zero */ + if ((ec->tx_coalesce_usecs == 0) && + (ec->tx_max_coalesced_frames == 0)) + return -EINVAL; + + /* Only copy relevant parameters, ignore all others. */ + tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs; + tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs; + tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames; + tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames; + tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq; + tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq; + tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq; + tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq; + tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs; + + if (netif_running(dev)) { + tg3_full_lock(tp, 0); + __tg3_set_coalesce(tp, &tp->coal); + tg3_full_unlock(tp); + } + return 0; +} + +static const struct ethtool_ops tg3_ethtool_ops = { + .get_settings = tg3_get_settings, + .set_settings = tg3_set_settings, + .get_drvinfo = tg3_get_drvinfo, + .get_regs_len = tg3_get_regs_len, + .get_regs = tg3_get_regs, + .get_wol = tg3_get_wol, + .set_wol = tg3_set_wol, + .get_msglevel = tg3_get_msglevel, + .set_msglevel = tg3_set_msglevel, + .nway_reset = tg3_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = tg3_get_eeprom_len, + .get_eeprom = tg3_get_eeprom, + .set_eeprom = tg3_set_eeprom, + .get_ringparam = tg3_get_ringparam, + .set_ringparam = tg3_set_ringparam, + .get_pauseparam = tg3_get_pauseparam, + .set_pauseparam = tg3_set_pauseparam, + .self_test = tg3_self_test, + .get_strings = tg3_get_strings, + .set_phys_id = tg3_set_phys_id, + .get_ethtool_stats = tg3_get_ethtool_stats, + .get_coalesce = tg3_get_coalesce, + .set_coalesce = tg3_set_coalesce, + .get_sset_count = tg3_get_sset_count, +}; + +static void __devinit tg3_get_eeprom_size(struct tg3 *tp) +{ + u32 cursize, val, magic; + + tp->nvram_size = EEPROM_CHIP_SIZE; + + if (tg3_nvram_read(tp, 0, &magic) != 0) + return; + + if ((magic != TG3_EEPROM_MAGIC) && + ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) && + ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW)) + return; + + /* + * Size the chip by reading offsets at increasing powers of two. + * When we encounter our validation signature, we know the addressing + * has wrapped around, and thus have our chip size. + */ + cursize = 0x10; + + while (cursize < tp->nvram_size) { + if (tg3_nvram_read(tp, cursize, &val) != 0) + return; + + if (val == magic) + break; + + cursize <<= 1; + } + + tp->nvram_size = cursize; +} + +static void __devinit tg3_get_nvram_size(struct tg3 *tp) +{ + u32 val; + + if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0) + return; + + /* Selfboot format */ + if (val != TG3_EEPROM_MAGIC) { + tg3_get_eeprom_size(tp); + return; + } + + if (tg3_nvram_read(tp, 0xf0, &val) == 0) { + if (val != 0) { + /* This is confusing. We want to operate on the + * 16-bit value at offset 0xf2. The tg3_nvram_read() + * call will read from NVRAM and byteswap the data + * according to the byteswapping settings for all + * other register accesses. This ensures the data we + * want will always reside in the lower 16-bits. + * However, the data in NVRAM is in LE format, which + * means the data from the NVRAM read will always be + * opposite the endianness of the CPU. The 16-bit + * byteswap then brings the data to CPU endianness. + */ + tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024; + return; + } + } + tp->nvram_size = TG3_NVRAM_SIZE_512KB; +} + +static void __devinit tg3_get_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1; + + nvcfg1 = tr32(NVRAM_CFG1); + if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { + tg3_flag_set(tp, FLASH); + } else { + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || + tg3_flag(tp, 5780_CLASS)) { + switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { + case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; + tg3_flag_set(tp, NVRAM_BUFFERED); + break; + case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; + break; + case FLASH_VENDOR_ATMEL_EEPROM: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + tg3_flag_set(tp, NVRAM_BUFFERED); + break; + case FLASH_VENDOR_ST: + tp->nvram_jedecnum = JEDEC_ST; + tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; + tg3_flag_set(tp, NVRAM_BUFFERED); + break; + case FLASH_VENDOR_SAIFUN: + tp->nvram_jedecnum = JEDEC_SAIFUN; + tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; + break; + case FLASH_VENDOR_SST_SMALL: + case FLASH_VENDOR_SST_LARGE: + tp->nvram_jedecnum = JEDEC_SST; + tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; + break; + } + } else { + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; + tg3_flag_set(tp, NVRAM_BUFFERED); + } +} + +static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1) +{ + switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) { + case FLASH_5752PAGE_SIZE_256: + tp->nvram_pagesize = 256; + break; + case FLASH_5752PAGE_SIZE_512: + tp->nvram_pagesize = 512; + break; + case FLASH_5752PAGE_SIZE_1K: + tp->nvram_pagesize = 1024; + break; + case FLASH_5752PAGE_SIZE_2K: + tp->nvram_pagesize = 2048; + break; + case FLASH_5752PAGE_SIZE_4K: + tp->nvram_pagesize = 4096; + break; + case FLASH_5752PAGE_SIZE_264: + tp->nvram_pagesize = 264; + break; + case FLASH_5752PAGE_SIZE_528: + tp->nvram_pagesize = 528; + break; + } +} + +static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1; + + nvcfg1 = tr32(NVRAM_CFG1); + + /* NVRAM protection for TPM */ + if (nvcfg1 & (1 << 27)) + tg3_flag_set(tp, PROTECTED_NVRAM); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: + case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + break; + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + break; + } + + if (tg3_flag(tp, FLASH)) { + tg3_nvram_get_pagesize(tp, nvcfg1); + } else { + /* For eeprom, set pagesize to maximum eeprom size */ + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + } +} + +static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1, protect = 0; + + nvcfg1 = tr32(NVRAM_CFG1); + + /* NVRAM protection for TPM */ + if (nvcfg1 & (1 << 27)) { + tg3_flag_set(tp, PROTECTED_NVRAM); + protect = 1; + } + + nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; + switch (nvcfg1) { + case FLASH_5755VENDOR_ATMEL_FLASH_1: + case FLASH_5755VENDOR_ATMEL_FLASH_2: + case FLASH_5755VENDOR_ATMEL_FLASH_3: + case FLASH_5755VENDOR_ATMEL_FLASH_5: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tp->nvram_pagesize = 264; + if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || + nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) + tp->nvram_size = (protect ? 0x3e200 : + TG3_NVRAM_SIZE_512KB); + else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) + tp->nvram_size = (protect ? 0x1f200 : + TG3_NVRAM_SIZE_256KB); + else + tp->nvram_size = (protect ? 0x1f200 : + TG3_NVRAM_SIZE_128KB); + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tp->nvram_pagesize = 256; + if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) + tp->nvram_size = (protect ? + TG3_NVRAM_SIZE_64KB : + TG3_NVRAM_SIZE_128KB); + else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) + tp->nvram_size = (protect ? + TG3_NVRAM_SIZE_64KB : + TG3_NVRAM_SIZE_256KB); + else + tp->nvram_size = (protect ? + TG3_NVRAM_SIZE_128KB : + TG3_NVRAM_SIZE_512KB); + break; + } +} + +static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1; + + nvcfg1 = tr32(NVRAM_CFG1); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: + case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: + case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: + case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + break; + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: + case FLASH_5755VENDOR_ATMEL_FLASH_1: + case FLASH_5755VENDOR_ATMEL_FLASH_2: + case FLASH_5755VENDOR_ATMEL_FLASH_3: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tp->nvram_pagesize = 264; + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tp->nvram_pagesize = 256; + break; + } +} + +static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1, protect = 0; + + nvcfg1 = tr32(NVRAM_CFG1); + + /* NVRAM protection for TPM */ + if (nvcfg1 & (1 << 27)) { + tg3_flag_set(tp, PROTECTED_NVRAM); + protect = 1; + } + + nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; + switch (nvcfg1) { + case FLASH_5761VENDOR_ATMEL_ADB021D: + case FLASH_5761VENDOR_ATMEL_ADB041D: + case FLASH_5761VENDOR_ATMEL_ADB081D: + case FLASH_5761VENDOR_ATMEL_ADB161D: + case FLASH_5761VENDOR_ATMEL_MDB021D: + case FLASH_5761VENDOR_ATMEL_MDB041D: + case FLASH_5761VENDOR_ATMEL_MDB081D: + case FLASH_5761VENDOR_ATMEL_MDB161D: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); + tp->nvram_pagesize = 256; + break; + case FLASH_5761VENDOR_ST_A_M45PE20: + case FLASH_5761VENDOR_ST_A_M45PE40: + case FLASH_5761VENDOR_ST_A_M45PE80: + case FLASH_5761VENDOR_ST_A_M45PE16: + case FLASH_5761VENDOR_ST_M_M45PE20: + case FLASH_5761VENDOR_ST_M_M45PE40: + case FLASH_5761VENDOR_ST_M_M45PE80: + case FLASH_5761VENDOR_ST_M_M45PE16: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tp->nvram_pagesize = 256; + break; + } + + if (protect) { + tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT); + } else { + switch (nvcfg1) { + case FLASH_5761VENDOR_ATMEL_ADB161D: + case FLASH_5761VENDOR_ATMEL_MDB161D: + case FLASH_5761VENDOR_ST_A_M45PE16: + case FLASH_5761VENDOR_ST_M_M45PE16: + tp->nvram_size = TG3_NVRAM_SIZE_2MB; + break; + case FLASH_5761VENDOR_ATMEL_ADB081D: + case FLASH_5761VENDOR_ATMEL_MDB081D: + case FLASH_5761VENDOR_ST_A_M45PE80: + case FLASH_5761VENDOR_ST_M_M45PE80: + tp->nvram_size = TG3_NVRAM_SIZE_1MB; + break; + case FLASH_5761VENDOR_ATMEL_ADB041D: + case FLASH_5761VENDOR_ATMEL_MDB041D: + case FLASH_5761VENDOR_ST_A_M45PE40: + case FLASH_5761VENDOR_ST_M_M45PE40: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + case FLASH_5761VENDOR_ATMEL_ADB021D: + case FLASH_5761VENDOR_ATMEL_MDB021D: + case FLASH_5761VENDOR_ST_A_M45PE20: + case FLASH_5761VENDOR_ST_M_M45PE20: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + } + } +} + +static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp) +{ + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; +} + +static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1; + + nvcfg1 = tr32(NVRAM_CFG1); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: + case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + return; + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: + case FLASH_57780VENDOR_ATMEL_AT45DB011D: + case FLASH_57780VENDOR_ATMEL_AT45DB011B: + case FLASH_57780VENDOR_ATMEL_AT45DB021D: + case FLASH_57780VENDOR_ATMEL_AT45DB021B: + case FLASH_57780VENDOR_ATMEL_AT45DB041D: + case FLASH_57780VENDOR_ATMEL_AT45DB041B: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: + case FLASH_57780VENDOR_ATMEL_AT45DB011D: + case FLASH_57780VENDOR_ATMEL_AT45DB011B: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + case FLASH_57780VENDOR_ATMEL_AT45DB021D: + case FLASH_57780VENDOR_ATMEL_AT45DB021B: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + case FLASH_57780VENDOR_ATMEL_AT45DB041D: + case FLASH_57780VENDOR_ATMEL_AT45DB041B: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + } + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5752VENDOR_ST_M45PE10: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + case FLASH_5752VENDOR_ST_M45PE20: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + } + break; + default: + tg3_flag_set(tp, NO_NVRAM); + return; + } + + tg3_nvram_get_pagesize(tp, nvcfg1); + if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); +} + + +static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1; + + nvcfg1 = tr32(NVRAM_CFG1); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5717VENDOR_ATMEL_EEPROM: + case FLASH_5717VENDOR_MICRO_EEPROM: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + return; + case FLASH_5717VENDOR_ATMEL_MDB011D: + case FLASH_5717VENDOR_ATMEL_ADB011B: + case FLASH_5717VENDOR_ATMEL_ADB011D: + case FLASH_5717VENDOR_ATMEL_MDB021D: + case FLASH_5717VENDOR_ATMEL_ADB021B: + case FLASH_5717VENDOR_ATMEL_ADB021D: + case FLASH_5717VENDOR_ATMEL_45USPT: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5717VENDOR_ATMEL_MDB021D: + /* Detect size with tg3_nvram_get_size() */ + break; + case FLASH_5717VENDOR_ATMEL_ADB021B: + case FLASH_5717VENDOR_ATMEL_ADB021D: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + default: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + } + break; + case FLASH_5717VENDOR_ST_M_M25PE10: + case FLASH_5717VENDOR_ST_A_M25PE10: + case FLASH_5717VENDOR_ST_M_M45PE10: + case FLASH_5717VENDOR_ST_A_M45PE10: + case FLASH_5717VENDOR_ST_M_M25PE20: + case FLASH_5717VENDOR_ST_A_M25PE20: + case FLASH_5717VENDOR_ST_M_M45PE20: + case FLASH_5717VENDOR_ST_A_M45PE20: + case FLASH_5717VENDOR_ST_25USPT: + case FLASH_5717VENDOR_ST_45USPT: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5717VENDOR_ST_M_M25PE20: + case FLASH_5717VENDOR_ST_M_M45PE20: + /* Detect size with tg3_nvram_get_size() */ + break; + case FLASH_5717VENDOR_ST_A_M25PE20: + case FLASH_5717VENDOR_ST_A_M45PE20: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + default: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + } + break; + default: + tg3_flag_set(tp, NO_NVRAM); + return; + } + + tg3_nvram_get_pagesize(tp, nvcfg1); + if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); +} + +static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1, nvmpinstrp; + + nvcfg1 = tr32(NVRAM_CFG1); + nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK; + + switch (nvmpinstrp) { + case FLASH_5720_EEPROM_HD: + case FLASH_5720_EEPROM_LD: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + if (nvmpinstrp == FLASH_5720_EEPROM_HD) + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + else + tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE; + return; + case FLASH_5720VENDOR_M_ATMEL_DB011D: + case FLASH_5720VENDOR_A_ATMEL_DB011B: + case FLASH_5720VENDOR_A_ATMEL_DB011D: + case FLASH_5720VENDOR_M_ATMEL_DB021D: + case FLASH_5720VENDOR_A_ATMEL_DB021B: + case FLASH_5720VENDOR_A_ATMEL_DB021D: + case FLASH_5720VENDOR_M_ATMEL_DB041D: + case FLASH_5720VENDOR_A_ATMEL_DB041B: + case FLASH_5720VENDOR_A_ATMEL_DB041D: + case FLASH_5720VENDOR_M_ATMEL_DB081D: + case FLASH_5720VENDOR_A_ATMEL_DB081D: + case FLASH_5720VENDOR_ATMEL_45USPT: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvmpinstrp) { + case FLASH_5720VENDOR_M_ATMEL_DB021D: + case FLASH_5720VENDOR_A_ATMEL_DB021B: + case FLASH_5720VENDOR_A_ATMEL_DB021D: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + case FLASH_5720VENDOR_M_ATMEL_DB041D: + case FLASH_5720VENDOR_A_ATMEL_DB041B: + case FLASH_5720VENDOR_A_ATMEL_DB041D: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + case FLASH_5720VENDOR_M_ATMEL_DB081D: + case FLASH_5720VENDOR_A_ATMEL_DB081D: + tp->nvram_size = TG3_NVRAM_SIZE_1MB; + break; + default: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + } + break; + case FLASH_5720VENDOR_M_ST_M25PE10: + case FLASH_5720VENDOR_M_ST_M45PE10: + case FLASH_5720VENDOR_A_ST_M25PE10: + case FLASH_5720VENDOR_A_ST_M45PE10: + case FLASH_5720VENDOR_M_ST_M25PE20: + case FLASH_5720VENDOR_M_ST_M45PE20: + case FLASH_5720VENDOR_A_ST_M25PE20: + case FLASH_5720VENDOR_A_ST_M45PE20: + case FLASH_5720VENDOR_M_ST_M25PE40: + case FLASH_5720VENDOR_M_ST_M45PE40: + case FLASH_5720VENDOR_A_ST_M25PE40: + case FLASH_5720VENDOR_A_ST_M45PE40: + case FLASH_5720VENDOR_M_ST_M25PE80: + case FLASH_5720VENDOR_M_ST_M45PE80: + case FLASH_5720VENDOR_A_ST_M25PE80: + case FLASH_5720VENDOR_A_ST_M45PE80: + case FLASH_5720VENDOR_ST_25USPT: + case FLASH_5720VENDOR_ST_45USPT: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvmpinstrp) { + case FLASH_5720VENDOR_M_ST_M25PE20: + case FLASH_5720VENDOR_M_ST_M45PE20: + case FLASH_5720VENDOR_A_ST_M25PE20: + case FLASH_5720VENDOR_A_ST_M45PE20: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + case FLASH_5720VENDOR_M_ST_M25PE40: + case FLASH_5720VENDOR_M_ST_M45PE40: + case FLASH_5720VENDOR_A_ST_M25PE40: + case FLASH_5720VENDOR_A_ST_M45PE40: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + case FLASH_5720VENDOR_M_ST_M25PE80: + case FLASH_5720VENDOR_M_ST_M45PE80: + case FLASH_5720VENDOR_A_ST_M25PE80: + case FLASH_5720VENDOR_A_ST_M45PE80: + tp->nvram_size = TG3_NVRAM_SIZE_1MB; + break; + default: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + } + break; + default: + tg3_flag_set(tp, NO_NVRAM); + return; + } + + tg3_nvram_get_pagesize(tp, nvcfg1); + if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); +} + +/* Chips other than 5700/5701 use the NVRAM for fetching info. */ +static void __devinit tg3_nvram_init(struct tg3 *tp) +{ + tw32_f(GRC_EEPROM_ADDR, + (EEPROM_ADDR_FSM_RESET | + (EEPROM_DEFAULT_CLOCK_PERIOD << + EEPROM_ADDR_CLKPERD_SHIFT))); + + msleep(1); + + /* Enable seeprom accesses. */ + tw32_f(GRC_LOCAL_CTRL, + tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM); + udelay(100); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { + tg3_flag_set(tp, NVRAM); + + if (tg3_nvram_lock(tp)) { + netdev_warn(tp->dev, + "Cannot get nvram lock, %s failed\n", + __func__); + return; + } + tg3_enable_nvram_access(tp); + + tp->nvram_size = 0; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) + tg3_get_5752_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) + tg3_get_5755_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + tg3_get_5787_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + tg3_get_5761_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tg3_get_5906_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tg3_get_57780_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + tg3_get_5717_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + tg3_get_5720_nvram_info(tp); + else + tg3_get_nvram_info(tp); + + if (tp->nvram_size == 0) + tg3_get_nvram_size(tp); + + tg3_disable_nvram_access(tp); + tg3_nvram_unlock(tp); + + } else { + tg3_flag_clear(tp, NVRAM); + tg3_flag_clear(tp, NVRAM_BUFFERED); + + tg3_get_eeprom_size(tp); + } +} + +static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, + u32 offset, u32 len, u8 *buf) +{ + int i, j, rc = 0; + u32 val; + + for (i = 0; i < len; i += 4) { + u32 addr; + __be32 data; + + addr = offset + i; + + memcpy(&data, buf + i, 4); + + /* + * The SEEPROM interface expects the data to always be opposite + * the native endian format. We accomplish this by reversing + * all the operations that would have been performed on the + * data from a call to tg3_nvram_read_be32(). + */ + tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data))); + + val = tr32(GRC_EEPROM_ADDR); + tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE); + + val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK | + EEPROM_ADDR_READ); + tw32(GRC_EEPROM_ADDR, val | + (0 << EEPROM_ADDR_DEVID_SHIFT) | + (addr & EEPROM_ADDR_ADDR_MASK) | + EEPROM_ADDR_START | + EEPROM_ADDR_WRITE); + + for (j = 0; j < 1000; j++) { + val = tr32(GRC_EEPROM_ADDR); + + if (val & EEPROM_ADDR_COMPLETE) + break; + msleep(1); + } + if (!(val & EEPROM_ADDR_COMPLETE)) { + rc = -EBUSY; + break; + } + } + + return rc; +} + +/* offset and length are dword aligned */ +static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, + u8 *buf) +{ + int ret = 0; + u32 pagesize = tp->nvram_pagesize; + u32 pagemask = pagesize - 1; + u32 nvram_cmd; + u8 *tmp; + + tmp = kmalloc(pagesize, GFP_KERNEL); + if (tmp == NULL) + return -ENOMEM; + + while (len) { + int j; + u32 phy_addr, page_off, size; + + phy_addr = offset & ~pagemask; + + for (j = 0; j < pagesize; j += 4) { + ret = tg3_nvram_read_be32(tp, phy_addr + j, + (__be32 *) (tmp + j)); + if (ret) + break; + } + if (ret) + break; + + page_off = offset & pagemask; + size = pagesize; + if (len < size) + size = len; + + len -= size; + + memcpy(tmp + page_off, buf, size); + + offset = offset + (pagesize - page_off); + + tg3_enable_nvram_access(tp); + + /* + * Before we can erase the flash page, we need + * to issue a special "write enable" command. + */ + nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; + + if (tg3_nvram_exec_cmd(tp, nvram_cmd)) + break; + + /* Erase the target page */ + tw32(NVRAM_ADDR, phy_addr); + + nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR | + NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE; + + if (tg3_nvram_exec_cmd(tp, nvram_cmd)) + break; + + /* Issue another write enable to start the write. */ + nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; + + if (tg3_nvram_exec_cmd(tp, nvram_cmd)) + break; + + for (j = 0; j < pagesize; j += 4) { + __be32 data; + + data = *((__be32 *) (tmp + j)); + + tw32(NVRAM_WRDATA, be32_to_cpu(data)); + + tw32(NVRAM_ADDR, phy_addr + j); + + nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | + NVRAM_CMD_WR; + + if (j == 0) + nvram_cmd |= NVRAM_CMD_FIRST; + else if (j == (pagesize - 4)) + nvram_cmd |= NVRAM_CMD_LAST; + + if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd))) + break; + } + if (ret) + break; + } + + nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE; + tg3_nvram_exec_cmd(tp, nvram_cmd); + + kfree(tmp); + + return ret; +} + +/* offset and length are dword aligned */ +static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, + u8 *buf) +{ + int i, ret = 0; + + for (i = 0; i < len; i += 4, offset += 4) { + u32 page_off, phy_addr, nvram_cmd; + __be32 data; + + memcpy(&data, buf + i, 4); + tw32(NVRAM_WRDATA, be32_to_cpu(data)); + + page_off = offset % tp->nvram_pagesize; + + phy_addr = tg3_nvram_phys_addr(tp, offset); + + tw32(NVRAM_ADDR, phy_addr); + + nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR; + + if (page_off == 0 || i == 0) + nvram_cmd |= NVRAM_CMD_FIRST; + if (page_off == (tp->nvram_pagesize - 4)) + nvram_cmd |= NVRAM_CMD_LAST; + + if (i == (len - 4)) + nvram_cmd |= NVRAM_CMD_LAST; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 && + !tg3_flag(tp, 5755_PLUS) && + (tp->nvram_jedecnum == JEDEC_ST) && + (nvram_cmd & NVRAM_CMD_FIRST)) { + + if ((ret = tg3_nvram_exec_cmd(tp, + NVRAM_CMD_WREN | NVRAM_CMD_GO | + NVRAM_CMD_DONE))) + + break; + } + if (!tg3_flag(tp, FLASH)) { + /* We always do complete word writes to eeprom. */ + nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST); + } + + if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd))) + break; + } + return ret; +} + +/* offset and length are dword aligned */ +static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) +{ + int ret; + + if (tg3_flag(tp, EEPROM_WRITE_PROT)) { + tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & + ~GRC_LCLCTRL_GPIO_OUTPUT1); + udelay(40); + } + + if (!tg3_flag(tp, NVRAM)) { + ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); + } else { + u32 grc_mode; + + ret = tg3_nvram_lock(tp); + if (ret) + return ret; + + tg3_enable_nvram_access(tp); + if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) + tw32(NVRAM_WRITE1, 0x406); + + grc_mode = tr32(GRC_MODE); + tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); + + if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) { + ret = tg3_nvram_write_block_buffered(tp, offset, len, + buf); + } else { + ret = tg3_nvram_write_block_unbuffered(tp, offset, len, + buf); + } + + grc_mode = tr32(GRC_MODE); + tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE); + + tg3_disable_nvram_access(tp); + tg3_nvram_unlock(tp); + } + + if (tg3_flag(tp, EEPROM_WRITE_PROT)) { + tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); + udelay(40); + } + + return ret; +} + +struct subsys_tbl_ent { + u16 subsys_vendor, subsys_devid; + u32 phy_id; +}; + +static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = { + /* Broadcom boards. */ + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 }, + + /* 3com boards. */ + { TG3PCI_SUBVENDOR_ID_3COM, + TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 }, + { TG3PCI_SUBVENDOR_ID_3COM, + TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_3COM, + TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 }, + { TG3PCI_SUBVENDOR_ID_3COM, + TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_3COM, + TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 }, + + /* DELL boards. */ + { TG3PCI_SUBVENDOR_ID_DELL, + TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 }, + { TG3PCI_SUBVENDOR_ID_DELL, + TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 }, + { TG3PCI_SUBVENDOR_ID_DELL, + TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 }, + { TG3PCI_SUBVENDOR_ID_DELL, + TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 }, + + /* Compaq boards. */ + { TG3PCI_SUBVENDOR_ID_COMPAQ, + TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_COMPAQ, + TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_COMPAQ, + TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 }, + { TG3PCI_SUBVENDOR_ID_COMPAQ, + TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_COMPAQ, + TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 }, + + /* IBM boards. */ + { TG3PCI_SUBVENDOR_ID_IBM, + TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 } +}; + +static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) { + if ((subsys_id_to_phy_id[i].subsys_vendor == + tp->pdev->subsystem_vendor) && + (subsys_id_to_phy_id[i].subsys_devid == + tp->pdev->subsystem_device)) + return &subsys_id_to_phy_id[i]; + } + return NULL; +} + +static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) +{ + u32 val; + + tp->phy_id = TG3_PHY_ID_INVALID; + tp->led_ctrl = LED_CTRL_MODE_PHY_1; + + /* Assume an onboard device and WOL capable by default. */ + tg3_flag_set(tp, EEPROM_WRITE_PROT); + tg3_flag_set(tp, WOL_CAP); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) { + tg3_flag_clear(tp, EEPROM_WRITE_PROT); + tg3_flag_set(tp, IS_NIC); + } + val = tr32(VCPU_CFGSHDW); + if (val & VCPU_CFGSHDW_ASPM_DBNC) + tg3_flag_set(tp, ASPM_WORKAROUND); + if ((val & VCPU_CFGSHDW_WOL_ENABLE) && + (val & VCPU_CFGSHDW_WOL_MAGPKT)) { + tg3_flag_set(tp, WOL_ENABLE); + device_set_wakeup_enable(&tp->pdev->dev, true); + } + goto done; + } + + tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); + if (val == NIC_SRAM_DATA_SIG_MAGIC) { + u32 nic_cfg, led_cfg; + u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id; + int eeprom_phy_serdes = 0; + + tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); + tp->nic_sram_data_cfg = nic_cfg; + + tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver); + ver >>= NIC_SRAM_DATA_VER_SHIFT; + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 && + (ver > 0) && (ver < 0x100)) + tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4); + + if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) == + NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) + eeprom_phy_serdes = 1; + + tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id); + if (nic_phy_id != 0) { + u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK; + u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK; + + eeprom_phy_id = (id1 >> 16) << 10; + eeprom_phy_id |= (id2 & 0xfc00) << 16; + eeprom_phy_id |= (id2 & 0x03ff) << 0; + } else + eeprom_phy_id = 0; + + tp->phy_id = eeprom_phy_id; + if (eeprom_phy_serdes) { + if (!tg3_flag(tp, 5705_PLUS)) + tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; + else + tp->phy_flags |= TG3_PHYFLG_MII_SERDES; + } + + if (tg3_flag(tp, 5750_PLUS)) + led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK | + SHASTA_EXT_LED_MODE_MASK); + else + led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK; + + switch (led_cfg) { + default: + case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1: + tp->led_ctrl = LED_CTRL_MODE_PHY_1; + break; + + case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2: + tp->led_ctrl = LED_CTRL_MODE_PHY_2; + break; + + case NIC_SRAM_DATA_CFG_LED_MODE_MAC: + tp->led_ctrl = LED_CTRL_MODE_MAC; + + /* Default to PHY_1_MODE if 0 (MAC_MODE) is + * read on some older 5700/5701 bootcode. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == + ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == + ASIC_REV_5701) + tp->led_ctrl = LED_CTRL_MODE_PHY_1; + + break; + + case SHASTA_EXT_LED_SHARED: + tp->led_ctrl = LED_CTRL_MODE_SHARED; + if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && + tp->pci_chip_rev_id != CHIPREV_ID_5750_A1) + tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | + LED_CTRL_MODE_PHY_2); + break; + + case SHASTA_EXT_LED_MAC: + tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC; + break; + + case SHASTA_EXT_LED_COMBO: + tp->led_ctrl = LED_CTRL_MODE_COMBO; + if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) + tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | + LED_CTRL_MODE_PHY_2); + break; + + } + + if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) && + tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) + tp->led_ctrl = LED_CTRL_MODE_PHY_2; + + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) + tp->led_ctrl = LED_CTRL_MODE_PHY_1; + + if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) { + tg3_flag_set(tp, EEPROM_WRITE_PROT); + if ((tp->pdev->subsystem_vendor == + PCI_VENDOR_ID_ARIMA) && + (tp->pdev->subsystem_device == 0x205a || + tp->pdev->subsystem_device == 0x2063)) + tg3_flag_clear(tp, EEPROM_WRITE_PROT); + } else { + tg3_flag_clear(tp, EEPROM_WRITE_PROT); + tg3_flag_set(tp, IS_NIC); + } + + if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { + tg3_flag_set(tp, ENABLE_ASF); + if (tg3_flag(tp, 5750_PLUS)) + tg3_flag_set(tp, ASF_NEW_HANDSHAKE); + } + + if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) && + tg3_flag(tp, 5750_PLUS)) + tg3_flag_set(tp, ENABLE_APE); + + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES && + !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) + tg3_flag_clear(tp, WOL_CAP); + + if (tg3_flag(tp, WOL_CAP) && + (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) { + tg3_flag_set(tp, WOL_ENABLE); + device_set_wakeup_enable(&tp->pdev->dev, true); + } + + if (cfg2 & (1 << 17)) + tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING; + + /* serdes signal pre-emphasis in register 0x590 set by */ + /* bootcode if bit 18 is set */ + if (cfg2 & (1 << 18)) + tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; + + if ((tg3_flag(tp, 57765_PLUS) || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && + GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) && + (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) + tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; + + if (tg3_flag(tp, PCI_EXPRESS) && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && + !tg3_flag(tp, 57765_PLUS)) { + u32 cfg3; + + tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); + if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE) + tg3_flag_set(tp, ASPM_WORKAROUND); + } + + if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE) + tg3_flag_set(tp, RGMII_INBAND_DISABLE); + if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN) + tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN); + if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN) + tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN); + } +done: + if (tg3_flag(tp, WOL_CAP)) + device_set_wakeup_enable(&tp->pdev->dev, + tg3_flag(tp, WOL_ENABLE)); + else + device_set_wakeup_capable(&tp->pdev->dev, false); +} + +static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd) +{ + int i; + u32 val; + + tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START); + tw32(OTP_CTRL, cmd); + + /* Wait for up to 1 ms for command to execute. */ + for (i = 0; i < 100; i++) { + val = tr32(OTP_STATUS); + if (val & OTP_STATUS_CMD_DONE) + break; + udelay(10); + } + + return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY; +} + +/* Read the gphy configuration from the OTP region of the chip. The gphy + * configuration is a 32-bit value that straddles the alignment boundary. + * We do two 32-bit reads and then shift and merge the results. + */ +static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp) +{ + u32 bhalf_otp, thalf_otp; + + tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC); + + if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT)) + return 0; + + tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1); + + if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) + return 0; + + thalf_otp = tr32(OTP_READ_DATA); + + tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2); + + if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) + return 0; + + bhalf_otp = tr32(OTP_READ_DATA); + + return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16); +} + +static void __devinit tg3_phy_init_link_config(struct tg3 *tp) +{ + u32 adv = ADVERTISED_Autoneg | + ADVERTISED_Pause; + + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) + adv |= ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full; + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) + adv |= ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_TP; + else + adv |= ADVERTISED_FIBRE; + + tp->link_config.advertising = adv; + tp->link_config.speed = SPEED_INVALID; + tp->link_config.duplex = DUPLEX_INVALID; + tp->link_config.autoneg = AUTONEG_ENABLE; + tp->link_config.active_speed = SPEED_INVALID; + tp->link_config.active_duplex = DUPLEX_INVALID; + tp->link_config.orig_speed = SPEED_INVALID; + tp->link_config.orig_duplex = DUPLEX_INVALID; + tp->link_config.orig_autoneg = AUTONEG_INVALID; +} + +static int __devinit tg3_phy_probe(struct tg3 *tp) +{ + u32 hw_phy_id_1, hw_phy_id_2; + u32 hw_phy_id, hw_phy_id_masked; + int err; + + /* flow control autonegotiation is default behavior */ + tg3_flag_set(tp, PAUSE_AUTONEG); + tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; + + if (tg3_flag(tp, USE_PHYLIB)) + return tg3_phy_init(tp); + + /* Reading the PHY ID register can conflict with ASF + * firmware access to the PHY hardware. + */ + err = 0; + if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) { + hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID; + } else { + /* Now read the physical PHY_ID from the chip and verify + * that it is sane. If it doesn't look good, we fall back + * to either the hard-coded table based PHY_ID and failing + * that the value found in the eeprom area. + */ + err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1); + err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2); + + hw_phy_id = (hw_phy_id_1 & 0xffff) << 10; + hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16; + hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0; + + hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK; + } + + if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) { + tp->phy_id = hw_phy_id; + if (hw_phy_id_masked == TG3_PHY_ID_BCM8002) + tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; + else + tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES; + } else { + if (tp->phy_id != TG3_PHY_ID_INVALID) { + /* Do nothing, phy ID already set up in + * tg3_get_eeprom_hw_cfg(). + */ + } else { + struct subsys_tbl_ent *p; + + /* No eeprom signature? Try the hardcoded + * subsys device table. + */ + p = tg3_lookup_by_subsys(tp); + if (!p) + return -ENODEV; + + tp->phy_id = p->phy_id; + if (!tp->phy_id || + tp->phy_id == TG3_PHY_ID_BCM8002) + tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; + } + } + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 || + (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 && + tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && + tp->pci_chip_rev_id != CHIPREV_ID_57765_A0))) + tp->phy_flags |= TG3_PHYFLG_EEE_CAP; + + tg3_phy_init_link_config(tp); + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && + !tg3_flag(tp, ENABLE_APE) && + !tg3_flag(tp, ENABLE_ASF)) { + u32 bmsr, mask; + + tg3_readphy(tp, MII_BMSR, &bmsr); + if (!tg3_readphy(tp, MII_BMSR, &bmsr) && + (bmsr & BMSR_LSTATUS)) + goto skip_phy_reset; + + err = tg3_phy_reset(tp); + if (err) + return err; + + tg3_phy_set_wirespeed(tp); + + mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full); + if (!tg3_copper_is_advertising_all(tp, mask)) { + tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, + tp->link_config.flowctrl); + + tg3_writephy(tp, MII_BMCR, + BMCR_ANENABLE | BMCR_ANRESTART); + } + } + +skip_phy_reset: + if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { + err = tg3_init_5401phy_dsp(tp); + if (err) + return err; + + err = tg3_init_5401phy_dsp(tp); + } + + return err; +} + +static void __devinit tg3_read_vpd(struct tg3 *tp) +{ + u8 *vpd_data; + unsigned int block_end, rosize, len; + u32 vpdlen; + int j, i = 0; + + vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen); + if (!vpd_data) + goto out_no_vpd; + + i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA); + if (i < 0) + goto out_not_found; + + rosize = pci_vpd_lrdt_size(&vpd_data[i]); + block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize; + i += PCI_VPD_LRDT_TAG_SIZE; + + if (block_end > vpdlen) + goto out_not_found; + + j = pci_vpd_find_info_keyword(vpd_data, i, rosize, + PCI_VPD_RO_KEYWORD_MFR_ID); + if (j > 0) { + len = pci_vpd_info_field_size(&vpd_data[j]); + + j += PCI_VPD_INFO_FLD_HDR_SIZE; + if (j + len > block_end || len != 4 || + memcmp(&vpd_data[j], "1028", 4)) + goto partno; + + j = pci_vpd_find_info_keyword(vpd_data, i, rosize, + PCI_VPD_RO_KEYWORD_VENDOR0); + if (j < 0) + goto partno; + + len = pci_vpd_info_field_size(&vpd_data[j]); + + j += PCI_VPD_INFO_FLD_HDR_SIZE; + if (j + len > block_end) + goto partno; + + memcpy(tp->fw_ver, &vpd_data[j], len); + strncat(tp->fw_ver, " bc ", vpdlen - len - 1); + } + +partno: + i = pci_vpd_find_info_keyword(vpd_data, i, rosize, + PCI_VPD_RO_KEYWORD_PARTNO); + if (i < 0) + goto out_not_found; + + len = pci_vpd_info_field_size(&vpd_data[i]); + + i += PCI_VPD_INFO_FLD_HDR_SIZE; + if (len > TG3_BPN_SIZE || + (len + i) > vpdlen) + goto out_not_found; + + memcpy(tp->board_part_number, &vpd_data[i], len); + +out_not_found: + kfree(vpd_data); + if (tp->board_part_number[0]) + return; + +out_no_vpd: + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717) + strcpy(tp->board_part_number, "BCM5717"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718) + strcpy(tp->board_part_number, "BCM5718"); + else + goto nomatch; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) + strcpy(tp->board_part_number, "BCM57780"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760) + strcpy(tp->board_part_number, "BCM57760"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790) + strcpy(tp->board_part_number, "BCM57790"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) + strcpy(tp->board_part_number, "BCM57788"); + else + goto nomatch; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761) + strcpy(tp->board_part_number, "BCM57761"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765) + strcpy(tp->board_part_number, "BCM57765"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781) + strcpy(tp->board_part_number, "BCM57781"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785) + strcpy(tp->board_part_number, "BCM57785"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791) + strcpy(tp->board_part_number, "BCM57791"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) + strcpy(tp->board_part_number, "BCM57795"); + else + goto nomatch; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + strcpy(tp->board_part_number, "BCM95906"); + } else { +nomatch: + strcpy(tp->board_part_number, "none"); + } +} + +static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset) +{ + u32 val; + + if (tg3_nvram_read(tp, offset, &val) || + (val & 0xfc000000) != 0x0c000000 || + tg3_nvram_read(tp, offset + 4, &val) || + val != 0) + return 0; + + return 1; +} + +static void __devinit tg3_read_bc_ver(struct tg3 *tp) +{ + u32 val, offset, start, ver_offset; + int i, dst_off; + bool newver = false; + + if (tg3_nvram_read(tp, 0xc, &offset) || + tg3_nvram_read(tp, 0x4, &start)) + return; + + offset = tg3_nvram_logical_addr(tp, offset); + + if (tg3_nvram_read(tp, offset, &val)) + return; + + if ((val & 0xfc000000) == 0x0c000000) { + if (tg3_nvram_read(tp, offset + 4, &val)) + return; + + if (val == 0) + newver = true; + } + + dst_off = strlen(tp->fw_ver); + + if (newver) { + if (TG3_VER_SIZE - dst_off < 16 || + tg3_nvram_read(tp, offset + 8, &ver_offset)) + return; + + offset = offset + ver_offset - start; + for (i = 0; i < 16; i += 4) { + __be32 v; + if (tg3_nvram_read_be32(tp, offset + i, &v)) + return; + + memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v)); + } + } else { + u32 major, minor; + + if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset)) + return; + + major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >> + TG3_NVM_BCVER_MAJSFT; + minor = ver_offset & TG3_NVM_BCVER_MINMSK; + snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off, + "v%d.%02d", major, minor); + } +} + +static void __devinit tg3_read_hwsb_ver(struct tg3 *tp) +{ + u32 val, major, minor; + + /* Use native endian representation */ + if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val)) + return; + + major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >> + TG3_NVM_HWSB_CFG1_MAJSFT; + minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >> + TG3_NVM_HWSB_CFG1_MINSFT; + + snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor); +} + +static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val) +{ + u32 offset, major, minor, build; + + strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1); + + if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1) + return; + + switch (val & TG3_EEPROM_SB_REVISION_MASK) { + case TG3_EEPROM_SB_REVISION_0: + offset = TG3_EEPROM_SB_F1R0_EDH_OFF; + break; + case TG3_EEPROM_SB_REVISION_2: + offset = TG3_EEPROM_SB_F1R2_EDH_OFF; + break; + case TG3_EEPROM_SB_REVISION_3: + offset = TG3_EEPROM_SB_F1R3_EDH_OFF; + break; + case TG3_EEPROM_SB_REVISION_4: + offset = TG3_EEPROM_SB_F1R4_EDH_OFF; + break; + case TG3_EEPROM_SB_REVISION_5: + offset = TG3_EEPROM_SB_F1R5_EDH_OFF; + break; + case TG3_EEPROM_SB_REVISION_6: + offset = TG3_EEPROM_SB_F1R6_EDH_OFF; + break; + default: + return; + } + + if (tg3_nvram_read(tp, offset, &val)) + return; + + build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >> + TG3_EEPROM_SB_EDH_BLD_SHFT; + major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >> + TG3_EEPROM_SB_EDH_MAJ_SHFT; + minor = val & TG3_EEPROM_SB_EDH_MIN_MASK; + + if (minor > 99 || build > 26) + return; + + offset = strlen(tp->fw_ver); + snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset, + " v%d.%02d", major, minor); + + if (build > 0) { + offset = strlen(tp->fw_ver); + if (offset < TG3_VER_SIZE - 1) + tp->fw_ver[offset] = 'a' + build - 1; + } +} + +static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp) +{ + u32 val, offset, start; + int i, vlen; + + for (offset = TG3_NVM_DIR_START; + offset < TG3_NVM_DIR_END; + offset += TG3_NVM_DIRENT_SIZE) { + if (tg3_nvram_read(tp, offset, &val)) + return; + + if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI) + break; + } + + if (offset == TG3_NVM_DIR_END) + return; + + if (!tg3_flag(tp, 5705_PLUS)) + start = 0x08000000; + else if (tg3_nvram_read(tp, offset - 4, &start)) + return; + + if (tg3_nvram_read(tp, offset + 4, &offset) || + !tg3_fw_img_is_valid(tp, offset) || + tg3_nvram_read(tp, offset + 8, &val)) + return; + + offset += val - start; + + vlen = strlen(tp->fw_ver); + + tp->fw_ver[vlen++] = ','; + tp->fw_ver[vlen++] = ' '; + + for (i = 0; i < 4; i++) { + __be32 v; + if (tg3_nvram_read_be32(tp, offset, &v)) + return; + + offset += sizeof(v); + + if (vlen > TG3_VER_SIZE - sizeof(v)) { + memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen); + break; + } + + memcpy(&tp->fw_ver[vlen], &v, sizeof(v)); + vlen += sizeof(v); + } +} + +static void __devinit tg3_read_dash_ver(struct tg3 *tp) +{ + int vlen; + u32 apedata; + char *fwtype; + + if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF)) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); + if (apedata != APE_SEG_SIG_MAGIC) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); + if (!(apedata & APE_FW_STATUS_READY)) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); + + if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) { + tg3_flag_set(tp, APE_HAS_NCSI); + fwtype = "NCSI"; + } else { + fwtype = "DASH"; + } + + vlen = strlen(tp->fw_ver); + + snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d", + fwtype, + (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT, + (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT, + (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT, + (apedata & APE_FW_VERSION_BLDMSK)); +} + +static void __devinit tg3_read_fw_ver(struct tg3 *tp) +{ + u32 val; + bool vpd_vers = false; + + if (tp->fw_ver[0] != 0) + vpd_vers = true; + + if (tg3_flag(tp, NO_NVRAM)) { + strcat(tp->fw_ver, "sb"); + return; + } + + if (tg3_nvram_read(tp, 0, &val)) + return; + + if (val == TG3_EEPROM_MAGIC) + tg3_read_bc_ver(tp); + else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) + tg3_read_sb_ver(tp, val); + else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) + tg3_read_hwsb_ver(tp); + else + return; + + if (vpd_vers) + goto done; + + if (tg3_flag(tp, ENABLE_APE)) { + if (tg3_flag(tp, ENABLE_ASF)) + tg3_read_dash_ver(tp); + } else if (tg3_flag(tp, ENABLE_ASF)) { + tg3_read_mgmtfw_ver(tp); + } + +done: + tp->fw_ver[TG3_VER_SIZE - 1] = 0; +} + +static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); + +static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) +{ + if (tg3_flag(tp, LRG_PROD_RING_CAP)) + return TG3_RX_RET_MAX_SIZE_5717; + else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) + return TG3_RX_RET_MAX_SIZE_5700; + else + return TG3_RX_RET_MAX_SIZE_5705; +} + +static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = { + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) }, + { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) }, + { }, +}; + +static int __devinit tg3_get_invariants(struct tg3 *tp) +{ + u32 misc_ctrl_reg; + u32 pci_state_reg, grc_misc_cfg; + u32 val; + u16 pci_cmd; + int err; + + /* Force memory write invalidate off. If we leave it on, + * then on 5700_BX chips we have to enable a workaround. + * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary + * to match the cacheline size. The Broadcom driver have this + * workaround but turns MWI off all the times so never uses + * it. This seems to suggest that the workaround is insufficient. + */ + pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); + pci_cmd &= ~PCI_COMMAND_INVALIDATE; + pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); + + /* Important! -- Make sure register accesses are byteswapped + * correctly. Also, for those chips that require it, make + * sure that indirect register accesses are enabled before + * the first operation. + */ + pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, + &misc_ctrl_reg); + tp->misc_host_ctrl |= (misc_ctrl_reg & + MISC_HOST_CTRL_CHIPREV); + pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, + tp->misc_host_ctrl); + + tp->pci_chip_rev_id = (misc_ctrl_reg >> + MISC_HOST_CTRL_CHIPREV_SHIFT); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) { + u32 prod_id_asic_rev; + + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) + pci_read_config_dword(tp->pdev, + TG3PCI_GEN2_PRODID_ASICREV, + &prod_id_asic_rev); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) + pci_read_config_dword(tp->pdev, + TG3PCI_GEN15_PRODID_ASICREV, + &prod_id_asic_rev); + else + pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV, + &prod_id_asic_rev); + + tp->pci_chip_rev_id = prod_id_asic_rev; + } + + /* Wrong chip ID in 5752 A0. This code can be removed later + * as A0 is not in production. + */ + if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW) + tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; + + /* If we have 5702/03 A1 or A2 on certain ICH chipsets, + * we need to disable memory and use config. cycles + * only to access all registers. The 5702/03 chips + * can mistakenly decode the special cycles from the + * ICH chipsets as memory write cycles, causing corruption + * of register and memory space. Only certain ICH bridges + * will drive special cycles with non-zero data during the + * address phase which can fall within the 5703's address + * range. This is not an ICH bug as the PCI spec allows + * non-zero address during special cycles. However, only + * these ICH bridges are known to drive non-zero addresses + * during special cycles. + * + * Since special cycles do not cross PCI bridges, we only + * enable this workaround if the 5703 is on the secondary + * bus of these ICH bridges. + */ + if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) || + (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) { + static struct tg3_dev_id { + u32 vendor; + u32 device; + u32 rev; + } ich_chipsets[] = { + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8, + PCI_ANY_ID }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8, + PCI_ANY_ID }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11, + 0xa }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6, + PCI_ANY_ID }, + { }, + }; + struct tg3_dev_id *pci_id = &ich_chipsets[0]; + struct pci_dev *bridge = NULL; + + while (pci_id->vendor != 0) { + bridge = pci_get_device(pci_id->vendor, pci_id->device, + bridge); + if (!bridge) { + pci_id++; + continue; + } + if (pci_id->rev != PCI_ANY_ID) { + if (bridge->revision > pci_id->rev) + continue; + } + if (bridge->subordinate && + (bridge->subordinate->number == + tp->pdev->bus->number)) { + tg3_flag_set(tp, ICH_WORKAROUND); + pci_dev_put(bridge); + break; + } + } + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + static struct tg3_dev_id { + u32 vendor; + u32 device; + } bridge_chipsets[] = { + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 }, + { }, + }; + struct tg3_dev_id *pci_id = &bridge_chipsets[0]; + struct pci_dev *bridge = NULL; + + while (pci_id->vendor != 0) { + bridge = pci_get_device(pci_id->vendor, + pci_id->device, + bridge); + if (!bridge) { + pci_id++; + continue; + } + if (bridge->subordinate && + (bridge->subordinate->number <= + tp->pdev->bus->number) && + (bridge->subordinate->subordinate >= + tp->pdev->bus->number)) { + tg3_flag_set(tp, 5701_DMA_BUG); + pci_dev_put(bridge); + break; + } + } + } + + /* The EPB bridge inside 5714, 5715, and 5780 cannot support + * DMA addresses > 40-bit. This bridge may have other additional + * 57xx devices behind it in some 4-port NIC designs for example. + * Any tg3 device found behind the bridge will also need the 40-bit + * DMA workaround. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { + tg3_flag_set(tp, 5780_CLASS); + tg3_flag_set(tp, 40BIT_DMA_BUG); + tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); + } else { + struct pci_dev *bridge = NULL; + + do { + bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS, + PCI_DEVICE_ID_SERVERWORKS_EPB, + bridge); + if (bridge && bridge->subordinate && + (bridge->subordinate->number <= + tp->pdev->bus->number) && + (bridge->subordinate->subordinate >= + tp->pdev->bus->number)) { + tg3_flag_set(tp, 40BIT_DMA_BUG); + pci_dev_put(bridge); + break; + } + } while (bridge); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) + tp->pdev_peer = tg3_find_peer(tp); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + tg3_flag_set(tp, 5717_PLUS); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 || + tg3_flag(tp, 5717_PLUS)) + tg3_flag_set(tp, 57765_PLUS); + + /* Intentionally exclude ASIC_REV_5906 */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || + tg3_flag(tp, 57765_PLUS)) + tg3_flag_set(tp, 5755_PLUS); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || + tg3_flag(tp, 5755_PLUS) || + tg3_flag(tp, 5780_CLASS)) + tg3_flag_set(tp, 5750_PLUS); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || + tg3_flag(tp, 5750_PLUS)) + tg3_flag_set(tp, 5705_PLUS); + + /* Determine TSO capabilities */ + if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) + ; /* Do nothing. HW bug. */ + else if (tg3_flag(tp, 57765_PLUS)) + tg3_flag_set(tp, HW_TSO_3); + else if (tg3_flag(tp, 5755_PLUS) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tg3_flag_set(tp, HW_TSO_2); + else if (tg3_flag(tp, 5750_PLUS)) { + tg3_flag_set(tp, HW_TSO_1); + tg3_flag_set(tp, TSO_BUG); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 && + tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2) + tg3_flag_clear(tp, TSO_BUG); + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && + tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { + tg3_flag_set(tp, TSO_BUG); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) + tp->fw_needed = FIRMWARE_TG3TSO5; + else + tp->fw_needed = FIRMWARE_TG3TSO; + } + + /* Selectively allow TSO based on operating conditions */ + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3) || + (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF))) + tg3_flag_set(tp, TSO_CAPABLE); + else { + tg3_flag_clear(tp, TSO_CAPABLE); + tg3_flag_clear(tp, TSO_BUG); + tp->fw_needed = NULL; + } + + if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) + tp->fw_needed = FIRMWARE_TG3; + + tp->irq_max = 1; + + if (tg3_flag(tp, 5750_PLUS)) { + tg3_flag_set(tp, SUPPORT_MSI); + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX || + GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 && + tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 && + tp->pdev_peer == tp->pdev)) + tg3_flag_clear(tp, SUPPORT_MSI); + + if (tg3_flag(tp, 5755_PLUS) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + tg3_flag_set(tp, 1SHOT_MSI); + } + + if (tg3_flag(tp, 57765_PLUS)) { + tg3_flag_set(tp, SUPPORT_MSIX); + tp->irq_max = TG3_IRQ_MAX_VECS; + } + } + + if (tg3_flag(tp, 5755_PLUS)) + tg3_flag_set(tp, SHORT_DMA_BUG); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + tg3_flag_set(tp, 4K_FIFO_LIMIT); + + if (tg3_flag(tp, 5717_PLUS)) + tg3_flag_set(tp, LRG_PROD_RING_CAP); + + if (tg3_flag(tp, 57765_PLUS) && + tp->pci_chip_rev_id != CHIPREV_ID_5719_A0) + tg3_flag_set(tp, USE_JUMBO_BDFLAG); + + if (!tg3_flag(tp, 5705_PLUS) || + tg3_flag(tp, 5780_CLASS) || + tg3_flag(tp, USE_JUMBO_BDFLAG)) + tg3_flag_set(tp, JUMBO_CAPABLE); + + pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, + &pci_state_reg); + + if (pci_is_pcie(tp->pdev)) { + u16 lnkctl; + + tg3_flag_set(tp, PCI_EXPRESS); + + tp->pcie_readrq = 4096; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + tp->pcie_readrq = 2048; + + pcie_set_readrq(tp->pdev, tp->pcie_readrq); + + pci_read_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, + &lnkctl); + if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == + ASIC_REV_5906) { + tg3_flag_clear(tp, HW_TSO_2); + tg3_flag_clear(tp, TSO_CAPABLE); + } + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_57780_A1) + tg3_flag_set(tp, CLKREQ_BUG); + } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) { + tg3_flag_set(tp, L1PLLPD_EN); + } + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { + /* BCM5785 devices are effectively PCIe devices, and should + * follow PCIe codepaths, but do not have a PCIe capabilities + * section. + */ + tg3_flag_set(tp, PCI_EXPRESS); + } else if (!tg3_flag(tp, 5705_PLUS) || + tg3_flag(tp, 5780_CLASS)) { + tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); + if (!tp->pcix_cap) { + dev_err(&tp->pdev->dev, + "Cannot find PCI-X capability, aborting\n"); + return -EIO; + } + + if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE)) + tg3_flag_set(tp, PCIX_MODE); + } + + /* If we have an AMD 762 or VIA K8T800 chipset, write + * reordering to the mailbox registers done by the host + * controller can cause major troubles. We read back from + * every mailbox register write to force the writes to be + * posted to the chip in order. + */ + if (pci_dev_present(tg3_write_reorder_chipsets) && + !tg3_flag(tp, PCI_EXPRESS)) + tg3_flag_set(tp, MBOX_WRITE_REORDER); + + pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, + &tp->pci_cacheline_sz); + pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER, + &tp->pci_lat_timer); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && + tp->pci_lat_timer < 64) { + tp->pci_lat_timer = 64; + pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, + tp->pci_lat_timer); + } + + /* Important! -- It is critical that the PCI-X hw workaround + * situation is decided before the first MMIO register access. + */ + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) { + /* 5700 BX chips need to have their TX producer index + * mailboxes written twice to workaround a bug. + */ + tg3_flag_set(tp, TXD_MBOX_HWBUG); + + /* If we are in PCI-X mode, enable register write workaround. + * + * The workaround is to use indirect register accesses + * for all chip writes not to mailbox registers. + */ + if (tg3_flag(tp, PCIX_MODE)) { + u32 pm_reg; + + tg3_flag_set(tp, PCIX_TARGET_HWBUG); + + /* The chip can have it's power management PCI config + * space registers clobbered due to this bug. + * So explicitly force the chip into D0 here. + */ + pci_read_config_dword(tp->pdev, + tp->pm_cap + PCI_PM_CTRL, + &pm_reg); + pm_reg &= ~PCI_PM_CTRL_STATE_MASK; + pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */; + pci_write_config_dword(tp->pdev, + tp->pm_cap + PCI_PM_CTRL, + pm_reg); + + /* Also, force SERR#/PERR# in PCI command. */ + pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); + pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; + pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); + } + } + + if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0) + tg3_flag_set(tp, PCI_HIGH_SPEED); + if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0) + tg3_flag_set(tp, PCI_32BIT); + + /* Chip-specific fixup from Broadcom driver */ + if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) && + (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) { + pci_state_reg |= PCISTATE_RETRY_SAME_DMA; + pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); + } + + /* Default fast path register access methods */ + tp->read32 = tg3_read32; + tp->write32 = tg3_write32; + tp->read32_mbox = tg3_read32; + tp->write32_mbox = tg3_write32; + tp->write32_tx_mbox = tg3_write32; + tp->write32_rx_mbox = tg3_write32; + + /* Various workaround register access methods */ + if (tg3_flag(tp, PCIX_TARGET_HWBUG)) + tp->write32 = tg3_write_indirect_reg32; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 || + (tg3_flag(tp, PCI_EXPRESS) && + tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) { + /* + * Back to back register writes can cause problems on these + * chips, the workaround is to read back all reg writes + * except those to mailbox regs. + * + * See tg3_write_indirect_reg32(). + */ + tp->write32 = tg3_write_flush_reg32; + } + + if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) { + tp->write32_tx_mbox = tg3_write32_tx_mbox; + if (tg3_flag(tp, MBOX_WRITE_REORDER)) + tp->write32_rx_mbox = tg3_write_flush_reg32; + } + + if (tg3_flag(tp, ICH_WORKAROUND)) { + tp->read32 = tg3_read_indirect_reg32; + tp->write32 = tg3_write_indirect_reg32; + tp->read32_mbox = tg3_read_indirect_mbox; + tp->write32_mbox = tg3_write_indirect_mbox; + tp->write32_tx_mbox = tg3_write_indirect_mbox; + tp->write32_rx_mbox = tg3_write_indirect_mbox; + + iounmap(tp->regs); + tp->regs = NULL; + + pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); + pci_cmd &= ~PCI_COMMAND_MEMORY; + pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); + } + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + tp->read32_mbox = tg3_read32_mbox_5906; + tp->write32_mbox = tg3_write32_mbox_5906; + tp->write32_tx_mbox = tg3_write32_mbox_5906; + tp->write32_rx_mbox = tg3_write32_mbox_5906; + } + + if (tp->write32 == tg3_write_indirect_reg32 || + (tg3_flag(tp, PCIX_MODE) && + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701))) + tg3_flag_set(tp, SRAM_USE_CONFIG); + + /* The memory arbiter has to be enabled in order for SRAM accesses + * to succeed. Normally on powerup the tg3 chip firmware will make + * sure it is enabled, but other entities such as system netboot + * code might disable it. + */ + val = tr32(MEMARB_MODE); + tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); + + if (tg3_flag(tp, PCIX_MODE)) { + pci_read_config_dword(tp->pdev, + tp->pcix_cap + PCI_X_STATUS, &val); + tp->pci_fn = val & 0x7; + } else { + tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3; + } + + /* Get eeprom hw config before calling tg3_set_power_state(). + * In particular, the TG3_FLAG_IS_NIC flag must be + * determined before calling tg3_set_power_state() so that + * we know whether or not to switch out of Vaux power. + * When the flag is set, it means that GPIO1 is used for eeprom + * write protect and also implies that it is a LOM where GPIOs + * are not used to switch power. + */ + tg3_get_eeprom_hw_cfg(tp); + + if (tg3_flag(tp, ENABLE_APE)) { + /* Allow reads and writes to the + * APE register and memory space. + */ + pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR | + PCISTATE_ALLOW_APE_SHMEM_WR | + PCISTATE_ALLOW_APE_PSPACE_WR; + pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, + pci_state_reg); + + tg3_ape_lock_init(tp); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || + tg3_flag(tp, 57765_PLUS)) + tg3_flag_set(tp, CPMU_PRESENT); + + /* Set up tp->grc_local_ctrl before calling + * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high + * will bring 5700's external PHY out of reset. + * It is also used as eeprom write protect on LOMs. + */ + tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + tg3_flag(tp, EEPROM_WRITE_PROT)) + tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OUTPUT1); + /* Unused GPIO3 must be driven as output on 5752 because there + * are no pull-up resistors on unused GPIO pins. + */ + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) + tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; + + if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { + /* Turn off the debug UART. */ + tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; + if (tg3_flag(tp, IS_NIC)) + /* Keep VMain power. */ + tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | + GRC_LCLCTRL_GPIO_OUTPUT0; + } + + /* Switch out of Vaux if it is a NIC */ + tg3_pwrsrc_switch_to_vmain(tp); + + /* Derive initial jumbo mode from MTU assigned in + * ether_setup() via the alloc_etherdev() call + */ + if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS)) + tg3_flag_set(tp, JUMBO_RING_ENABLE); + + /* Determine WakeOnLan speed to use. */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 || + tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) { + tg3_flag_clear(tp, WOL_SPEED_100MB); + } else { + tg3_flag_set(tp, WOL_SPEED_100MB); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tp->phy_flags |= TG3_PHYFLG_IS_FET; + + /* A few boards don't want Ethernet@WireSpeed phy feature */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && + (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) && + (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) || + (tp->phy_flags & TG3_PHYFLG_IS_FET) || + (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) + tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED; + + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX || + GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX) + tp->phy_flags |= TG3_PHYFLG_ADC_BUG; + if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) + tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG; + + if (tg3_flag(tp, 5705_PLUS) && + !(tp->phy_flags & TG3_PHYFLG_IS_FET) && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 && + !tg3_flag(tp, 57765_PLUS)) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { + if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 && + tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722) + tp->phy_flags |= TG3_PHYFLG_JITTER_BUG; + if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) + tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM; + } else + tp->phy_flags |= TG3_PHYFLG_BER_BUG; + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && + GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) { + tp->phy_otp = tg3_read_otp_phycfg(tp); + if (tp->phy_otp == 0) + tp->phy_otp = TG3_OTP_DEFAULT; + } + + if (tg3_flag(tp, CPMU_PRESENT)) + tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; + else + tp->mi_mode = MAC_MI_MODE_BASE; + + tp->coalesce_mode = 0; + if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && + GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) + tp->coalesce_mode |= HOSTCC_MODE_32BYTE; + + /* Set these bits to enable statistics workaround. */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) { + tp->coalesce_mode |= HOSTCC_MODE_ATTN; + tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN; + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) + tg3_flag_set(tp, USE_PHYLIB); + + err = tg3_mdio_init(tp); + if (err) + return err; + + /* Initialize data/descriptor byte/word swapping. */ + val = tr32(GRC_MODE); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA | + GRC_MODE_WORD_SWAP_B2HRX_DATA | + GRC_MODE_B2HRX_ENABLE | + GRC_MODE_HTX2B_ENABLE | + GRC_MODE_HOST_STACKUP); + else + val &= GRC_MODE_HOST_STACKUP; + + tw32(GRC_MODE, val | tp->grc_mode); + + tg3_switch_clocks(tp); + + /* Clear this out for sanity. */ + tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); + + pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, + &pci_state_reg); + if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && + !tg3_flag(tp, PCIX_TARGET_HWBUG)) { + u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl); + + if (chiprevid == CHIPREV_ID_5701_A0 || + chiprevid == CHIPREV_ID_5701_B0 || + chiprevid == CHIPREV_ID_5701_B2 || + chiprevid == CHIPREV_ID_5701_B5) { + void __iomem *sram_base; + + /* Write some dummy words into the SRAM status block + * area, see if it reads back correctly. If the return + * value is bad, force enable the PCIX workaround. + */ + sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK; + + writel(0x00000000, sram_base); + writel(0x00000000, sram_base + 4); + writel(0xffffffff, sram_base + 4); + if (readl(sram_base) != 0x00000000) + tg3_flag_set(tp, PCIX_TARGET_HWBUG); + } + } + + udelay(50); + tg3_nvram_init(tp); + + grc_misc_cfg = tr32(GRC_MISC_CFG); + grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && + (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 || + grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) + tg3_flag_set(tp, IS_5788); + + if (!tg3_flag(tp, IS_5788) && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) + tg3_flag_set(tp, TAGGED_STATUS); + if (tg3_flag(tp, TAGGED_STATUS)) { + tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | + HOSTCC_MODE_CLRTICK_TXBD); + + tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS; + pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, + tp->misc_host_ctrl); + } + + /* Preserve the APE MAC_MODE bits */ + if (tg3_flag(tp, ENABLE_APE)) + tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; + else + tp->mac_mode = TG3_DEF_MAC_MODE; + + /* these are limited to 10/100 only */ + if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && + (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && + tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM && + (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 || + tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 || + tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) || + (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM && + (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F || + tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F || + tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 || + (tp->phy_flags & TG3_PHYFLG_IS_FET)) + tp->phy_flags |= TG3_PHYFLG_10_100_ONLY; + + err = tg3_phy_probe(tp); + if (err) { + dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err); + /* ... but do not return immediately ... */ + tg3_mdio_fini(tp); + } + + tg3_read_vpd(tp); + tg3_read_fw_ver(tp); + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { + tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; + } else { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) + tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; + else + tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; + } + + /* 5700 {AX,BX} chips have a broken status block link + * change bit implementation, so we must use the + * status register in those cases. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) + tg3_flag_set(tp, USE_LINKCHG_REG); + else + tg3_flag_clear(tp, USE_LINKCHG_REG); + + /* The led_ctrl is set during tg3_phy_probe, here we might + * have to force the link status polling mechanism based + * upon subsystem IDs. + */ + if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && + !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { + tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; + tg3_flag_set(tp, USE_LINKCHG_REG); + } + + /* For all SERDES we poll the MAC status register. */ + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) + tg3_flag_set(tp, POLL_SERDES); + else + tg3_flag_clear(tp, POLL_SERDES); + + tp->rx_offset = NET_IP_ALIGN; + tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && + tg3_flag(tp, PCIX_MODE)) { + tp->rx_offset = 0; +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + tp->rx_copy_thresh = ~(u16)0; +#endif + } + + tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1; + tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1; + tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1; + + tp->rx_std_max_post = tp->rx_std_ring_mask + 1; + + /* Increment the rx prod index on the rx std ring by at most + * 8 for these chips to workaround hw errata. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) + tp->rx_std_max_post = 8; + + if (tg3_flag(tp, ASPM_WORKAROUND)) + tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) & + PCIE_PWR_MGMT_L1_THRESH_MSK; + + return err; +} + +#ifdef CONFIG_SPARC +static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp) +{ + struct net_device *dev = tp->dev; + struct pci_dev *pdev = tp->pdev; + struct device_node *dp = pci_device_to_OF_node(pdev); + const unsigned char *addr; + int len; + + addr = of_get_property(dp, "local-mac-address", &len); + if (addr && len == 6) { + memcpy(dev->dev_addr, addr, 6); + memcpy(dev->perm_addr, dev->dev_addr, 6); + return 0; + } + return -ENODEV; +} + +static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp) +{ + struct net_device *dev = tp->dev; + + memcpy(dev->dev_addr, idprom->id_ethaddr, 6); + memcpy(dev->perm_addr, idprom->id_ethaddr, 6); + return 0; +} +#endif + +static int __devinit tg3_get_device_address(struct tg3 *tp) +{ + struct net_device *dev = tp->dev; + u32 hi, lo, mac_offset; + int addr_ok = 0; + +#ifdef CONFIG_SPARC + if (!tg3_get_macaddr_sparc(tp)) + return 0; +#endif + + mac_offset = 0x7c; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || + tg3_flag(tp, 5780_CLASS)) { + if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) + mac_offset = 0xcc; + if (tg3_nvram_lock(tp)) + tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); + else + tg3_nvram_unlock(tp); + } else if (tg3_flag(tp, 5717_PLUS)) { + if (tp->pci_fn & 1) + mac_offset = 0xcc; + if (tp->pci_fn > 1) + mac_offset += 0x18c; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + mac_offset = 0x10; + + /* First try to get it from MAC address mailbox. */ + tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi); + if ((hi >> 16) == 0x484b) { + dev->dev_addr[0] = (hi >> 8) & 0xff; + dev->dev_addr[1] = (hi >> 0) & 0xff; + + tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo); + dev->dev_addr[2] = (lo >> 24) & 0xff; + dev->dev_addr[3] = (lo >> 16) & 0xff; + dev->dev_addr[4] = (lo >> 8) & 0xff; + dev->dev_addr[5] = (lo >> 0) & 0xff; + + /* Some old bootcode may report a 0 MAC address in SRAM */ + addr_ok = is_valid_ether_addr(&dev->dev_addr[0]); + } + if (!addr_ok) { + /* Next, try NVRAM. */ + if (!tg3_flag(tp, NO_NVRAM) && + !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && + !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { + memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2); + memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo)); + } + /* Finally just fetch it out of the MAC control regs. */ + else { + hi = tr32(MAC_ADDR_0_HIGH); + lo = tr32(MAC_ADDR_0_LOW); + + dev->dev_addr[5] = lo & 0xff; + dev->dev_addr[4] = (lo >> 8) & 0xff; + dev->dev_addr[3] = (lo >> 16) & 0xff; + dev->dev_addr[2] = (lo >> 24) & 0xff; + dev->dev_addr[1] = hi & 0xff; + dev->dev_addr[0] = (hi >> 8) & 0xff; + } + } + + if (!is_valid_ether_addr(&dev->dev_addr[0])) { +#ifdef CONFIG_SPARC + if (!tg3_get_default_macaddr_sparc(tp)) + return 0; +#endif + return -EINVAL; + } + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); + return 0; +} + +#define BOUNDARY_SINGLE_CACHELINE 1 +#define BOUNDARY_MULTI_CACHELINE 2 + +static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) +{ + int cacheline_size; + u8 byte; + int goal; + + pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte); + if (byte == 0) + cacheline_size = 1024; + else + cacheline_size = (int) byte * 4; + + /* On 5703 and later chips, the boundary bits have no + * effect. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && + !tg3_flag(tp, PCI_EXPRESS)) + goto out; + +#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) + goal = BOUNDARY_MULTI_CACHELINE; +#else +#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA) + goal = BOUNDARY_SINGLE_CACHELINE; +#else + goal = 0; +#endif +#endif + + if (tg3_flag(tp, 57765_PLUS)) { + val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; + goto out; + } + + if (!goal) + goto out; + + /* PCI controllers on most RISC systems tend to disconnect + * when a device tries to burst across a cache-line boundary. + * Therefore, letting tg3 do so just wastes PCI bandwidth. + * + * Unfortunately, for PCI-E there are only limited + * write-side controls for this, and thus for reads + * we will still get the disconnects. We'll also waste + * these PCI cycles for both read and write for chips + * other than 5700 and 5701 which do not implement the + * boundary bits. + */ + if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) { + switch (cacheline_size) { + case 16: + case 32: + case 64: + case 128: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX | + DMA_RWCTRL_WRITE_BNDRY_128_PCIX); + } else { + val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | + DMA_RWCTRL_WRITE_BNDRY_384_PCIX); + } + break; + + case 256: + val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX | + DMA_RWCTRL_WRITE_BNDRY_256_PCIX); + break; + + default: + val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | + DMA_RWCTRL_WRITE_BNDRY_384_PCIX); + break; + } + } else if (tg3_flag(tp, PCI_EXPRESS)) { + switch (cacheline_size) { + case 16: + case 32: + case 64: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; + val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE; + break; + } + /* fallthrough */ + case 128: + default: + val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; + val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE; + break; + } + } else { + switch (cacheline_size) { + case 16: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val |= (DMA_RWCTRL_READ_BNDRY_16 | + DMA_RWCTRL_WRITE_BNDRY_16); + break; + } + /* fallthrough */ + case 32: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val |= (DMA_RWCTRL_READ_BNDRY_32 | + DMA_RWCTRL_WRITE_BNDRY_32); + break; + } + /* fallthrough */ + case 64: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val |= (DMA_RWCTRL_READ_BNDRY_64 | + DMA_RWCTRL_WRITE_BNDRY_64); + break; + } + /* fallthrough */ + case 128: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val |= (DMA_RWCTRL_READ_BNDRY_128 | + DMA_RWCTRL_WRITE_BNDRY_128); + break; + } + /* fallthrough */ + case 256: + val |= (DMA_RWCTRL_READ_BNDRY_256 | + DMA_RWCTRL_WRITE_BNDRY_256); + break; + case 512: + val |= (DMA_RWCTRL_READ_BNDRY_512 | + DMA_RWCTRL_WRITE_BNDRY_512); + break; + case 1024: + default: + val |= (DMA_RWCTRL_READ_BNDRY_1024 | + DMA_RWCTRL_WRITE_BNDRY_1024); + break; + } + } + +out: + return val; +} + +static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device) +{ + struct tg3_internal_buffer_desc test_desc; + u32 sram_dma_descs; + int i, ret; + + sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE; + + tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0); + tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0); + tw32(RDMAC_STATUS, 0); + tw32(WDMAC_STATUS, 0); + + tw32(BUFMGR_MODE, 0); + tw32(FTQ_RESET, 0); + + test_desc.addr_hi = ((u64) buf_dma) >> 32; + test_desc.addr_lo = buf_dma & 0xffffffff; + test_desc.nic_mbuf = 0x00002100; + test_desc.len = size; + + /* + * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz + * the *second* time the tg3 driver was getting loaded after an + * initial scan. + * + * Broadcom tells me: + * ...the DMA engine is connected to the GRC block and a DMA + * reset may affect the GRC block in some unpredictable way... + * The behavior of resets to individual blocks has not been tested. + * + * Broadcom noted the GRC reset will also reset all sub-components. + */ + if (to_device) { + test_desc.cqid_sqid = (13 << 8) | 2; + + tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE); + udelay(40); + } else { + test_desc.cqid_sqid = (16 << 8) | 7; + + tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE); + udelay(40); + } + test_desc.flags = 0x00000005; + + for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) { + u32 val; + + val = *(((u32 *)&test_desc) + i); + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, + sram_dma_descs + (i * sizeof(u32))); + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); + } + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); + + if (to_device) + tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs); + else + tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs); + + ret = -ENODEV; + for (i = 0; i < 40; i++) { + u32 val; + + if (to_device) + val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ); + else + val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ); + if ((val & 0xffff) == sram_dma_descs) { + ret = 0; + break; + } + + udelay(100); + } + + return ret; +} + +#define TEST_BUFFER_SIZE 0x2000 + +static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = { + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, + { }, +}; + +static int __devinit tg3_test_dma(struct tg3 *tp) +{ + dma_addr_t buf_dma; + u32 *buf, saved_dma_rwctrl; + int ret = 0; + + buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, + &buf_dma, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto out_nofree; + } + + tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | + (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); + + tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); + + if (tg3_flag(tp, 57765_PLUS)) + goto out; + + if (tg3_flag(tp, PCI_EXPRESS)) { + /* DMA read watermark not used on PCIE */ + tp->dma_rwctrl |= 0x00180000; + } else if (!tg3_flag(tp, PCIX_MODE)) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) + tp->dma_rwctrl |= 0x003f0000; + else + tp->dma_rwctrl |= 0x003f000f; + } else { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { + u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f); + u32 read_water = 0x7; + + /* If the 5704 is behind the EPB bridge, we can + * do the less restrictive ONE_DMA workaround for + * better performance. + */ + if (tg3_flag(tp, 40BIT_DMA_BUG) && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) + tp->dma_rwctrl |= 0x8000; + else if (ccval == 0x6 || ccval == 0x7) + tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) + read_water = 4; + /* Set bit 23 to enable PCIX hw bug fix */ + tp->dma_rwctrl |= + (read_water << DMA_RWCTRL_READ_WATER_SHIFT) | + (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) | + (1 << 23); + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) { + /* 5780 always in PCIX mode */ + tp->dma_rwctrl |= 0x00144000; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { + /* 5714 always in PCIX mode */ + tp->dma_rwctrl |= 0x00148000; + } else { + tp->dma_rwctrl |= 0x001b000f; + } + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) + tp->dma_rwctrl &= 0xfffffff0; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + /* Remove this if it causes problems for some boards. */ + tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT; + + /* On 5700/5701 chips, we need to set this bit. + * Otherwise the chip will issue cacheline transactions + * to streamable DMA memory with not all the byte + * enables turned on. This is an error on several + * RISC PCI controllers, in particular sparc64. + * + * On 5703/5704 chips, this bit has been reassigned + * a different meaning. In particular, it is used + * on those chips to enable a PCI-X workaround. + */ + tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE; + } + + tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); + +#if 0 + /* Unneeded, already done by tg3_get_invariants. */ + tg3_switch_clocks(tp); +#endif + + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) + goto out; + + /* It is best to perform DMA test with maximum write burst size + * to expose the 5700/5701 write DMA bug. + */ + saved_dma_rwctrl = tp->dma_rwctrl; + tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; + tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); + + while (1) { + u32 *p = buf, i; + + for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) + p[i] = i; + + /* Send the buffer to the chip. */ + ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1); + if (ret) { + dev_err(&tp->pdev->dev, + "%s: Buffer write failed. err = %d\n", + __func__, ret); + break; + } + +#if 0 + /* validate data reached card RAM correctly. */ + for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { + u32 val; + tg3_read_mem(tp, 0x2100 + (i*4), &val); + if (le32_to_cpu(val) != p[i]) { + dev_err(&tp->pdev->dev, + "%s: Buffer corrupted on device! " + "(%d != %d)\n", __func__, val, i); + /* ret = -ENODEV here? */ + } + p[i] = 0; + } +#endif + /* Now read it back. */ + ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0); + if (ret) { + dev_err(&tp->pdev->dev, "%s: Buffer read failed. " + "err = %d\n", __func__, ret); + break; + } + + /* Verify it. */ + for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { + if (p[i] == i) + continue; + + if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != + DMA_RWCTRL_WRITE_BNDRY_16) { + tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; + tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; + tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); + break; + } else { + dev_err(&tp->pdev->dev, + "%s: Buffer corrupted on read back! " + "(%d != %d)\n", __func__, p[i], i); + ret = -ENODEV; + goto out; + } + } + + if (i == (TEST_BUFFER_SIZE / sizeof(u32))) { + /* Success. */ + ret = 0; + break; + } + } + if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != + DMA_RWCTRL_WRITE_BNDRY_16) { + /* DMA test passed without adjusting DMA boundary, + * now look for chipsets that are known to expose the + * DMA bug without failing the test. + */ + if (pci_dev_present(tg3_dma_wait_state_chipsets)) { + tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; + tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; + } else { + /* Safe to use the calculated DMA boundary. */ + tp->dma_rwctrl = saved_dma_rwctrl; + } + + tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); + } + +out: + dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma); +out_nofree: + return ret; +} + +static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) +{ + if (tg3_flag(tp, 57765_PLUS)) { + tp->bufmgr_config.mbuf_read_dma_low_water = + DEFAULT_MB_RDMA_LOW_WATER_5705; + tp->bufmgr_config.mbuf_mac_rx_low_water = + DEFAULT_MB_MACRX_LOW_WATER_57765; + tp->bufmgr_config.mbuf_high_water = + DEFAULT_MB_HIGH_WATER_57765; + + tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = + DEFAULT_MB_RDMA_LOW_WATER_5705; + tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = + DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765; + tp->bufmgr_config.mbuf_high_water_jumbo = + DEFAULT_MB_HIGH_WATER_JUMBO_57765; + } else if (tg3_flag(tp, 5705_PLUS)) { + tp->bufmgr_config.mbuf_read_dma_low_water = + DEFAULT_MB_RDMA_LOW_WATER_5705; + tp->bufmgr_config.mbuf_mac_rx_low_water = + DEFAULT_MB_MACRX_LOW_WATER_5705; + tp->bufmgr_config.mbuf_high_water = + DEFAULT_MB_HIGH_WATER_5705; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + tp->bufmgr_config.mbuf_mac_rx_low_water = + DEFAULT_MB_MACRX_LOW_WATER_5906; + tp->bufmgr_config.mbuf_high_water = + DEFAULT_MB_HIGH_WATER_5906; + } + + tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = + DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780; + tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = + DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780; + tp->bufmgr_config.mbuf_high_water_jumbo = + DEFAULT_MB_HIGH_WATER_JUMBO_5780; + } else { + tp->bufmgr_config.mbuf_read_dma_low_water = + DEFAULT_MB_RDMA_LOW_WATER; + tp->bufmgr_config.mbuf_mac_rx_low_water = + DEFAULT_MB_MACRX_LOW_WATER; + tp->bufmgr_config.mbuf_high_water = + DEFAULT_MB_HIGH_WATER; + + tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = + DEFAULT_MB_RDMA_LOW_WATER_JUMBO; + tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = + DEFAULT_MB_MACRX_LOW_WATER_JUMBO; + tp->bufmgr_config.mbuf_high_water_jumbo = + DEFAULT_MB_HIGH_WATER_JUMBO; + } + + tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER; + tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER; +} + +static char * __devinit tg3_phy_string(struct tg3 *tp) +{ + switch (tp->phy_id & TG3_PHY_ID_MASK) { + case TG3_PHY_ID_BCM5400: return "5400"; + case TG3_PHY_ID_BCM5401: return "5401"; + case TG3_PHY_ID_BCM5411: return "5411"; + case TG3_PHY_ID_BCM5701: return "5701"; + case TG3_PHY_ID_BCM5703: return "5703"; + case TG3_PHY_ID_BCM5704: return "5704"; + case TG3_PHY_ID_BCM5705: return "5705"; + case TG3_PHY_ID_BCM5750: return "5750"; + case TG3_PHY_ID_BCM5752: return "5752"; + case TG3_PHY_ID_BCM5714: return "5714"; + case TG3_PHY_ID_BCM5780: return "5780"; + case TG3_PHY_ID_BCM5755: return "5755"; + case TG3_PHY_ID_BCM5787: return "5787"; + case TG3_PHY_ID_BCM5784: return "5784"; + case TG3_PHY_ID_BCM5756: return "5722/5756"; + case TG3_PHY_ID_BCM5906: return "5906"; + case TG3_PHY_ID_BCM5761: return "5761"; + case TG3_PHY_ID_BCM5718C: return "5718C"; + case TG3_PHY_ID_BCM5718S: return "5718S"; + case TG3_PHY_ID_BCM57765: return "57765"; + case TG3_PHY_ID_BCM5719C: return "5719C"; + case TG3_PHY_ID_BCM5720C: return "5720C"; + case TG3_PHY_ID_BCM8002: return "8002/serdes"; + case 0: return "serdes"; + default: return "unknown"; + } +} + +static char * __devinit tg3_bus_string(struct tg3 *tp, char *str) +{ + if (tg3_flag(tp, PCI_EXPRESS)) { + strcpy(str, "PCI Express"); + return str; + } else if (tg3_flag(tp, PCIX_MODE)) { + u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f; + + strcpy(str, "PCIX:"); + + if ((clock_ctrl == 7) || + ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) == + GRC_MISC_CFG_BOARD_ID_5704CIOBE)) + strcat(str, "133MHz"); + else if (clock_ctrl == 0) + strcat(str, "33MHz"); + else if (clock_ctrl == 2) + strcat(str, "50MHz"); + else if (clock_ctrl == 4) + strcat(str, "66MHz"); + else if (clock_ctrl == 6) + strcat(str, "100MHz"); + } else { + strcpy(str, "PCI:"); + if (tg3_flag(tp, PCI_HIGH_SPEED)) + strcat(str, "66MHz"); + else + strcat(str, "33MHz"); + } + if (tg3_flag(tp, PCI_32BIT)) + strcat(str, ":32-bit"); + else + strcat(str, ":64-bit"); + return str; +} + +static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp) +{ + struct pci_dev *peer; + unsigned int func, devnr = tp->pdev->devfn & ~7; + + for (func = 0; func < 8; func++) { + peer = pci_get_slot(tp->pdev->bus, devnr | func); + if (peer && peer != tp->pdev) + break; + pci_dev_put(peer); + } + /* 5704 can be configured in single-port mode, set peer to + * tp->pdev in that case. + */ + if (!peer) { + peer = tp->pdev; + return peer; + } + + /* + * We don't need to keep the refcount elevated; there's no way + * to remove one half of this device without removing the other + */ + pci_dev_put(peer); + + return peer; +} + +static void __devinit tg3_init_coal(struct tg3 *tp) +{ + struct ethtool_coalesce *ec = &tp->coal; + + memset(ec, 0, sizeof(*ec)); + ec->cmd = ETHTOOL_GCOALESCE; + ec->rx_coalesce_usecs = LOW_RXCOL_TICKS; + ec->tx_coalesce_usecs = LOW_TXCOL_TICKS; + ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES; + ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES; + ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT; + ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT; + ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT; + ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT; + ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS; + + if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD | + HOSTCC_MODE_CLRTICK_TXBD)) { + ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS; + ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS; + ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS; + ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; + } + + if (tg3_flag(tp, 5705_PLUS)) { + ec->rx_coalesce_usecs_irq = 0; + ec->tx_coalesce_usecs_irq = 0; + ec->stats_block_coalesce_usecs = 0; + } +} + +static const struct net_device_ops tg3_netdev_ops = { + .ndo_open = tg3_open, + .ndo_stop = tg3_close, + .ndo_start_xmit = tg3_start_xmit, + .ndo_get_stats64 = tg3_get_stats64, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_multicast_list = tg3_set_rx_mode, + .ndo_set_mac_address = tg3_set_mac_addr, + .ndo_do_ioctl = tg3_ioctl, + .ndo_tx_timeout = tg3_tx_timeout, + .ndo_change_mtu = tg3_change_mtu, + .ndo_fix_features = tg3_fix_features, + .ndo_set_features = tg3_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = tg3_poll_controller, +#endif +}; + +static int __devinit tg3_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *dev; + struct tg3 *tp; + int i, err, pm_cap; + u32 sndmbx, rcvmbx, intmbx; + char str[40]; + u64 dma_mask, persist_dma_mask; + u32 features = 0; + + printk_once(KERN_INFO "%s\n", version); + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); + return err; + } + + err = pci_request_regions(pdev, DRV_MODULE_NAME); + if (err) { + dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); + goto err_out_disable_pdev; + } + + pci_set_master(pdev); + + /* Find power-management capability. */ + pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); + if (pm_cap == 0) { + dev_err(&pdev->dev, + "Cannot find Power Management capability, aborting\n"); + err = -EIO; + goto err_out_free_res; + } + + err = pci_set_power_state(pdev, PCI_D0); + if (err) { + dev_err(&pdev->dev, "Transition to D0 failed, aborting\n"); + goto err_out_free_res; + } + + dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); + if (!dev) { + dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n"); + err = -ENOMEM; + goto err_out_power_down; + } + + SET_NETDEV_DEV(dev, &pdev->dev); + + tp = netdev_priv(dev); + tp->pdev = pdev; + tp->dev = dev; + tp->pm_cap = pm_cap; + tp->rx_mode = TG3_DEF_RX_MODE; + tp->tx_mode = TG3_DEF_TX_MODE; + + if (tg3_debug > 0) + tp->msg_enable = tg3_debug; + else + tp->msg_enable = TG3_DEF_MSG_ENABLE; + + /* The word/byte swap controls here control register access byte + * swapping. DMA data byte swapping is controlled in the GRC_MODE + * setting below. + */ + tp->misc_host_ctrl = + MISC_HOST_CTRL_MASK_PCI_INT | + MISC_HOST_CTRL_WORD_SWAP | + MISC_HOST_CTRL_INDIR_ACCESS | + MISC_HOST_CTRL_PCISTATE_RW; + + /* The NONFRM (non-frame) byte/word swap controls take effect + * on descriptor entries, anything which isn't packet data. + * + * The StrongARM chips on the board (one for tx, one for rx) + * are running in big-endian mode. + */ + tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA | + GRC_MODE_WSWAP_NONFRM_DATA); +#ifdef __BIG_ENDIAN + tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; +#endif + spin_lock_init(&tp->lock); + spin_lock_init(&tp->indirect_lock); + INIT_WORK(&tp->reset_task, tg3_reset_task); + + tp->regs = pci_ioremap_bar(pdev, BAR_0); + if (!tp->regs) { + dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); + err = -ENOMEM; + goto err_out_free_dev; + } + + if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || + tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) { + tg3_flag_set(tp, ENABLE_APE); + tp->aperegs = pci_ioremap_bar(pdev, BAR_2); + if (!tp->aperegs) { + dev_err(&pdev->dev, + "Cannot map APE registers, aborting\n"); + err = -ENOMEM; + goto err_out_iounmap; + } + } + + tp->rx_pending = TG3_DEF_RX_RING_PENDING; + tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; + + dev->ethtool_ops = &tg3_ethtool_ops; + dev->watchdog_timeo = TG3_TX_TIMEOUT; + dev->netdev_ops = &tg3_netdev_ops; + dev->irq = pdev->irq; + + err = tg3_get_invariants(tp); + if (err) { + dev_err(&pdev->dev, + "Problem fetching invariants of chip, aborting\n"); + goto err_out_apeunmap; + } + + /* The EPB bridge inside 5714, 5715, and 5780 and any + * device behind the EPB cannot support DMA addresses > 40-bit. + * On 64-bit systems with IOMMU, use 40-bit dma_mask. + * On 64-bit systems without IOMMU, use 64-bit dma_mask and + * do DMA address check in tg3_start_xmit(). + */ + if (tg3_flag(tp, IS_5788)) + persist_dma_mask = dma_mask = DMA_BIT_MASK(32); + else if (tg3_flag(tp, 40BIT_DMA_BUG)) { + persist_dma_mask = dma_mask = DMA_BIT_MASK(40); +#ifdef CONFIG_HIGHMEM + dma_mask = DMA_BIT_MASK(64); +#endif + } else + persist_dma_mask = dma_mask = DMA_BIT_MASK(64); + + /* Configure DMA attributes. */ + if (dma_mask > DMA_BIT_MASK(32)) { + err = pci_set_dma_mask(pdev, dma_mask); + if (!err) { + features |= NETIF_F_HIGHDMA; + err = pci_set_consistent_dma_mask(pdev, + persist_dma_mask); + if (err < 0) { + dev_err(&pdev->dev, "Unable to obtain 64 bit " + "DMA for consistent allocations\n"); + goto err_out_apeunmap; + } + } + } + if (err || dma_mask == DMA_BIT_MASK(32)) { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_out_apeunmap; + } + } + + tg3_init_bufmgr_config(tp); + + features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + + /* 5700 B0 chips do not support checksumming correctly due + * to hardware bugs. + */ + if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) { + features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; + + if (tg3_flag(tp, 5755_PLUS)) + features |= NETIF_F_IPV6_CSUM; + } + + /* TSO is on by default on chips that support hardware TSO. + * Firmware TSO on older chips gives lower performance, so it + * is off by default, but can be enabled using ethtool. + */ + if ((tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) && + (features & NETIF_F_IP_CSUM)) + features |= NETIF_F_TSO; + if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) { + if (features & NETIF_F_IPV6_CSUM) + features |= NETIF_F_TSO6; + if (tg3_flag(tp, HW_TSO_3) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && + GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) + features |= NETIF_F_TSO_ECN; + } + + dev->features |= features; + dev->vlan_features |= features; + + /* + * Add loopback capability only for a subset of devices that support + * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY + * loopback for the remaining devices. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 && + !tg3_flag(tp, CPMU_PRESENT)) + /* Add the loopback capability */ + features |= NETIF_F_LOOPBACK; + + dev->hw_features |= features; + + if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && + !tg3_flag(tp, TSO_CAPABLE) && + !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { + tg3_flag_set(tp, MAX_RXPEND_64); + tp->rx_pending = 63; + } + + err = tg3_get_device_address(tp); + if (err) { + dev_err(&pdev->dev, + "Could not obtain valid ethernet address, aborting\n"); + goto err_out_apeunmap; + } + + /* + * Reset chip in case UNDI or EFI driver did not shutdown + * DMA self test will enable WDMAC and we'll see (spurious) + * pending DMA on the PCI bus at that point. + */ + if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || + (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { + tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + } + + err = tg3_test_dma(tp); + if (err) { + dev_err(&pdev->dev, "DMA engine test failed, aborting\n"); + goto err_out_apeunmap; + } + + intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; + rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; + sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; + for (i = 0; i < tp->irq_max; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + tnapi->tp = tp; + tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; + + tnapi->int_mbox = intmbx; + if (i < 4) + intmbx += 0x8; + else + intmbx += 0x4; + + tnapi->consmbox = rcvmbx; + tnapi->prodmbox = sndmbx; + + if (i) + tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); + else + tnapi->coal_now = HOSTCC_MODE_NOW; + + if (!tg3_flag(tp, SUPPORT_MSIX)) + break; + + /* + * If we support MSIX, we'll be using RSS. If we're using + * RSS, the first vector only handles link interrupts and the + * remaining vectors handle rx and tx interrupts. Reuse the + * mailbox values for the next iteration. The values we setup + * above are still useful for the single vectored mode. + */ + if (!i) + continue; + + rcvmbx += 0x8; + + if (sndmbx & 0x4) + sndmbx -= 0x4; + else + sndmbx += 0xc; + } + + tg3_init_coal(tp); + + pci_set_drvdata(pdev, dev); + + if (tg3_flag(tp, 5717_PLUS)) { + /* Resume a low-power mode */ + tg3_frob_aux_power(tp, false); + } + + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, "Cannot register net device, aborting\n"); + goto err_out_apeunmap; + } + + netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n", + tp->board_part_number, + tp->pci_chip_rev_id, + tg3_bus_string(tp, str), + dev->dev_addr); + + if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { + struct phy_device *phydev; + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + netdev_info(dev, + "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", + phydev->drv->name, dev_name(&phydev->dev)); + } else { + char *ethtype; + + if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) + ethtype = "10/100Base-TX"; + else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) + ethtype = "1000Base-SX"; + else + ethtype = "10/100/1000Base-T"; + + netdev_info(dev, "attached PHY is %s (%s Ethernet) " + "(WireSpeed[%d], EEE[%d])\n", + tg3_phy_string(tp), ethtype, + (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0, + (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0); + } + + netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", + (dev->features & NETIF_F_RXCSUM) != 0, + tg3_flag(tp, USE_LINKCHG_REG) != 0, + (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0, + tg3_flag(tp, ENABLE_ASF) != 0, + tg3_flag(tp, TSO_CAPABLE) != 0); + netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n", + tp->dma_rwctrl, + pdev->dma_mask == DMA_BIT_MASK(32) ? 32 : + ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64); + + pci_save_state(pdev); + + return 0; + +err_out_apeunmap: + if (tp->aperegs) { + iounmap(tp->aperegs); + tp->aperegs = NULL; + } + +err_out_iounmap: + if (tp->regs) { + iounmap(tp->regs); + tp->regs = NULL; + } + +err_out_free_dev: + free_netdev(dev); + +err_out_power_down: + pci_set_power_state(pdev, PCI_D3hot); + +err_out_free_res: + pci_release_regions(pdev); + +err_out_disable_pdev: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + return err; +} + +static void __devexit tg3_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + if (dev) { + struct tg3 *tp = netdev_priv(dev); + + if (tp->fw) + release_firmware(tp->fw); + + cancel_work_sync(&tp->reset_task); + + if (!tg3_flag(tp, USE_PHYLIB)) { + tg3_phy_fini(tp); + tg3_mdio_fini(tp); + } + + unregister_netdev(dev); + if (tp->aperegs) { + iounmap(tp->aperegs); + tp->aperegs = NULL; + } + if (tp->regs) { + iounmap(tp->regs); + tp->regs = NULL; + } + free_netdev(dev); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + } +} + +#ifdef CONFIG_PM_SLEEP +static int tg3_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(dev); + int err; + + if (!netif_running(dev)) + return 0; + + flush_work_sync(&tp->reset_task); + tg3_phy_stop(tp); + tg3_netif_stop(tp); + + del_timer_sync(&tp->timer); + + tg3_full_lock(tp, 1); + tg3_disable_ints(tp); + tg3_full_unlock(tp); + + netif_device_detach(dev); + + tg3_full_lock(tp, 0); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + tg3_flag_clear(tp, INIT_COMPLETE); + tg3_full_unlock(tp); + + err = tg3_power_down_prepare(tp); + if (err) { + int err2; + + tg3_full_lock(tp, 0); + + tg3_flag_set(tp, INIT_COMPLETE); + err2 = tg3_restart_hw(tp, 1); + if (err2) + goto out; + + tp->timer.expires = jiffies + tp->timer_offset; + add_timer(&tp->timer); + + netif_device_attach(dev); + tg3_netif_start(tp); + +out: + tg3_full_unlock(tp); + + if (!err2) + tg3_phy_start(tp); + } + + return err; +} + +static int tg3_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(dev); + int err; + + if (!netif_running(dev)) + return 0; + + netif_device_attach(dev); + + tg3_full_lock(tp, 0); + + tg3_flag_set(tp, INIT_COMPLETE); + err = tg3_restart_hw(tp, 1); + if (err) + goto out; + + tp->timer.expires = jiffies + tp->timer_offset; + add_timer(&tp->timer); + + tg3_netif_start(tp); + +out: + tg3_full_unlock(tp); + + if (!err) + tg3_phy_start(tp); + + return err; +} + +static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume); +#define TG3_PM_OPS (&tg3_pm_ops) + +#else + +#define TG3_PM_OPS NULL + +#endif /* CONFIG_PM_SLEEP */ + +/** + * tg3_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(netdev); + pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET; + + netdev_info(netdev, "PCI I/O error detected\n"); + + rtnl_lock(); + + if (!netif_running(netdev)) + goto done; + + tg3_phy_stop(tp); + + tg3_netif_stop(tp); + + del_timer_sync(&tp->timer); + tg3_flag_clear(tp, RESTART_TIMER); + + /* Want to make sure that the reset task doesn't run */ + cancel_work_sync(&tp->reset_task); + tg3_flag_clear(tp, TX_RECOVERY_PENDING); + tg3_flag_clear(tp, RESTART_TIMER); + + netif_device_detach(netdev); + + /* Clean up software state, even if MMIO is blocked */ + tg3_full_lock(tp, 0); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); + tg3_full_unlock(tp); + +done: + if (state == pci_channel_io_perm_failure) + err = PCI_ERS_RESULT_DISCONNECT; + else + pci_disable_device(pdev); + + rtnl_unlock(); + + return err; +} + +/** + * tg3_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + * At this point, the card has exprienced a hard reset, + * followed by fixups by BIOS, and has its config space + * set up identically to what it was at cold boot. + */ +static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(netdev); + pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; + int err; + + rtnl_lock(); + + if (pci_enable_device(pdev)) { + netdev_err(netdev, "Cannot re-enable PCI device after reset.\n"); + goto done; + } + + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (!netif_running(netdev)) { + rc = PCI_ERS_RESULT_RECOVERED; + goto done; + } + + err = tg3_power_up(tp); + if (err) + goto done; + + rc = PCI_ERS_RESULT_RECOVERED; + +done: + rtnl_unlock(); + + return rc; +} + +/** + * tg3_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells + * us that its OK to resume normal operation. + */ +static void tg3_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(netdev); + int err; + + rtnl_lock(); + + if (!netif_running(netdev)) + goto done; + + tg3_full_lock(tp, 0); + tg3_flag_set(tp, INIT_COMPLETE); + err = tg3_restart_hw(tp, 1); + tg3_full_unlock(tp); + if (err) { + netdev_err(netdev, "Cannot restart hardware after reset.\n"); + goto done; + } + + netif_device_attach(netdev); + + tp->timer.expires = jiffies + tp->timer_offset; + add_timer(&tp->timer); + + tg3_netif_start(tp); + + tg3_phy_start(tp); + +done: + rtnl_unlock(); +} + +static struct pci_error_handlers tg3_err_handler = { + .error_detected = tg3_io_error_detected, + .slot_reset = tg3_io_slot_reset, + .resume = tg3_io_resume +}; + +static struct pci_driver tg3_driver = { + .name = DRV_MODULE_NAME, + .id_table = tg3_pci_tbl, + .probe = tg3_init_one, + .remove = __devexit_p(tg3_remove_one), + .err_handler = &tg3_err_handler, + .driver.pm = TG3_PM_OPS, +}; + +static int __init tg3_init(void) +{ + return pci_register_driver(&tg3_driver); +} + +static void __exit tg3_cleanup(void) +{ + pci_unregister_driver(&tg3_driver); +} + +module_init(tg3_init); +module_exit(tg3_cleanup); diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h new file mode 100644 index 0000000..2ea456d --- /dev/null +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -0,0 +1,3183 @@ +/* $Id: tg3.h,v 1.37.2.32 2002/03/11 12:18:18 davem Exp $ + * tg3.h: Definitions for Broadcom Tigon3 ethernet driver. + * + * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) + * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com) + * Copyright (C) 2004 Sun Microsystems Inc. + * Copyright (C) 2007-2011 Broadcom Corporation. + */ + +#ifndef _T3_H +#define _T3_H + +#define TG3_64BIT_REG_HIGH 0x00UL +#define TG3_64BIT_REG_LOW 0x04UL + +/* Descriptor block info. */ +#define TG3_BDINFO_HOST_ADDR 0x0UL /* 64-bit */ +#define TG3_BDINFO_MAXLEN_FLAGS 0x8UL /* 32-bit */ +#define BDINFO_FLAGS_USE_EXT_RECV 0x00000001 /* ext rx_buffer_desc */ +#define BDINFO_FLAGS_DISABLED 0x00000002 +#define BDINFO_FLAGS_MAXLEN_MASK 0xffff0000 +#define BDINFO_FLAGS_MAXLEN_SHIFT 16 +#define TG3_BDINFO_NIC_ADDR 0xcUL /* 32-bit */ +#define TG3_BDINFO_SIZE 0x10UL + +#define TG3_RX_STD_MAX_SIZE_5700 512 +#define TG3_RX_STD_MAX_SIZE_5717 2048 +#define TG3_RX_JMB_MAX_SIZE_5700 256 +#define TG3_RX_JMB_MAX_SIZE_5717 1024 +#define TG3_RX_RET_MAX_SIZE_5700 1024 +#define TG3_RX_RET_MAX_SIZE_5705 512 +#define TG3_RX_RET_MAX_SIZE_5717 4096 + +/* First 256 bytes are a mirror of PCI config space. */ +#define TG3PCI_VENDOR 0x00000000 +#define TG3PCI_VENDOR_BROADCOM 0x14e4 +#define TG3PCI_DEVICE 0x00000002 +#define TG3PCI_DEVICE_TIGON3_1 0x1644 /* BCM5700 */ +#define TG3PCI_DEVICE_TIGON3_2 0x1645 /* BCM5701 */ +#define TG3PCI_DEVICE_TIGON3_3 0x1646 /* BCM5702 */ +#define TG3PCI_DEVICE_TIGON3_4 0x1647 /* BCM5703 */ +#define TG3PCI_DEVICE_TIGON3_5761S 0x1688 +#define TG3PCI_DEVICE_TIGON3_5761SE 0x1689 +#define TG3PCI_DEVICE_TIGON3_57780 0x1692 +#define TG3PCI_DEVICE_TIGON3_57760 0x1690 +#define TG3PCI_DEVICE_TIGON3_57790 0x1694 +#define TG3PCI_DEVICE_TIGON3_57788 0x1691 +#define TG3PCI_DEVICE_TIGON3_5785_G 0x1699 /* GPHY */ +#define TG3PCI_DEVICE_TIGON3_5785_F 0x16a0 /* 10/100 only */ +#define TG3PCI_DEVICE_TIGON3_5717 0x1655 +#define TG3PCI_DEVICE_TIGON3_5718 0x1656 +#define TG3PCI_DEVICE_TIGON3_57781 0x16b1 +#define TG3PCI_DEVICE_TIGON3_57785 0x16b5 +#define TG3PCI_DEVICE_TIGON3_57761 0x16b0 +#define TG3PCI_DEVICE_TIGON3_57765 0x16b4 +#define TG3PCI_DEVICE_TIGON3_57791 0x16b2 +#define TG3PCI_DEVICE_TIGON3_57795 0x16b6 +#define TG3PCI_DEVICE_TIGON3_5719 0x1657 +#define TG3PCI_DEVICE_TIGON3_5720 0x165f +/* 0x04 --> 0x2c unused */ +#define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM +#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644 +#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5 0x0001 +#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6 0x0002 +#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9 0x0003 +#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1 0x0005 +#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8 0x0006 +#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7 0x0007 +#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10 0x0008 +#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12 0x8008 +#define TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1 0x0009 +#define TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2 0x8009 +#define TG3PCI_SUBVENDOR_ID_3COM PCI_VENDOR_ID_3COM +#define TG3PCI_SUBDEVICE_ID_3COM_3C996T 0x1000 +#define TG3PCI_SUBDEVICE_ID_3COM_3C996BT 0x1006 +#define TG3PCI_SUBDEVICE_ID_3COM_3C996SX 0x1004 +#define TG3PCI_SUBDEVICE_ID_3COM_3C1000T 0x1007 +#define TG3PCI_SUBDEVICE_ID_3COM_3C940BR01 0x1008 +#define TG3PCI_SUBVENDOR_ID_DELL PCI_VENDOR_ID_DELL +#define TG3PCI_SUBDEVICE_ID_DELL_VIPER 0x00d1 +#define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR 0x0106 +#define TG3PCI_SUBDEVICE_ID_DELL_MERLOT 0x0109 +#define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT 0x010a +#define TG3PCI_SUBVENDOR_ID_COMPAQ PCI_VENDOR_ID_COMPAQ +#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE 0x007c +#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2 0x009a +#define TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING 0x007d +#define TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780 0x0085 +#define TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2 0x0099 +#define TG3PCI_SUBVENDOR_ID_IBM PCI_VENDOR_ID_IBM +#define TG3PCI_SUBDEVICE_ID_IBM_5703SAX2 0x0281 +/* 0x30 --> 0x64 unused */ +#define TG3PCI_MSI_DATA 0x00000064 +/* 0x66 --> 0x68 unused */ +#define TG3PCI_MISC_HOST_CTRL 0x00000068 +#define MISC_HOST_CTRL_CLEAR_INT 0x00000001 +#define MISC_HOST_CTRL_MASK_PCI_INT 0x00000002 +#define MISC_HOST_CTRL_BYTE_SWAP 0x00000004 +#define MISC_HOST_CTRL_WORD_SWAP 0x00000008 +#define MISC_HOST_CTRL_PCISTATE_RW 0x00000010 +#define MISC_HOST_CTRL_CLKREG_RW 0x00000020 +#define MISC_HOST_CTRL_REGWORD_SWAP 0x00000040 +#define MISC_HOST_CTRL_INDIR_ACCESS 0x00000080 +#define MISC_HOST_CTRL_IRQ_MASK_MODE 0x00000100 +#define MISC_HOST_CTRL_TAGGED_STATUS 0x00000200 +#define MISC_HOST_CTRL_CHIPREV 0xffff0000 +#define MISC_HOST_CTRL_CHIPREV_SHIFT 16 +#define GET_CHIP_REV_ID(MISC_HOST_CTRL) \ + (((MISC_HOST_CTRL) & MISC_HOST_CTRL_CHIPREV) >> \ + MISC_HOST_CTRL_CHIPREV_SHIFT) +#define CHIPREV_ID_5700_A0 0x7000 +#define CHIPREV_ID_5700_A1 0x7001 +#define CHIPREV_ID_5700_B0 0x7100 +#define CHIPREV_ID_5700_B1 0x7101 +#define CHIPREV_ID_5700_B3 0x7102 +#define CHIPREV_ID_5700_ALTIMA 0x7104 +#define CHIPREV_ID_5700_C0 0x7200 +#define CHIPREV_ID_5701_A0 0x0000 +#define CHIPREV_ID_5701_B0 0x0100 +#define CHIPREV_ID_5701_B2 0x0102 +#define CHIPREV_ID_5701_B5 0x0105 +#define CHIPREV_ID_5703_A0 0x1000 +#define CHIPREV_ID_5703_A1 0x1001 +#define CHIPREV_ID_5703_A2 0x1002 +#define CHIPREV_ID_5703_A3 0x1003 +#define CHIPREV_ID_5704_A0 0x2000 +#define CHIPREV_ID_5704_A1 0x2001 +#define CHIPREV_ID_5704_A2 0x2002 +#define CHIPREV_ID_5704_A3 0x2003 +#define CHIPREV_ID_5705_A0 0x3000 +#define CHIPREV_ID_5705_A1 0x3001 +#define CHIPREV_ID_5705_A2 0x3002 +#define CHIPREV_ID_5705_A3 0x3003 +#define CHIPREV_ID_5750_A0 0x4000 +#define CHIPREV_ID_5750_A1 0x4001 +#define CHIPREV_ID_5750_A3 0x4003 +#define CHIPREV_ID_5750_C2 0x4202 +#define CHIPREV_ID_5752_A0_HW 0x5000 +#define CHIPREV_ID_5752_A0 0x6000 +#define CHIPREV_ID_5752_A1 0x6001 +#define CHIPREV_ID_5714_A2 0x9002 +#define CHIPREV_ID_5906_A1 0xc001 +#define CHIPREV_ID_57780_A0 0x57780000 +#define CHIPREV_ID_57780_A1 0x57780001 +#define CHIPREV_ID_5717_A0 0x05717000 +#define CHIPREV_ID_57765_A0 0x57785000 +#define CHIPREV_ID_5719_A0 0x05719000 +#define CHIPREV_ID_5720_A0 0x05720000 +#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12) +#define ASIC_REV_5700 0x07 +#define ASIC_REV_5701 0x00 +#define ASIC_REV_5703 0x01 +#define ASIC_REV_5704 0x02 +#define ASIC_REV_5705 0x03 +#define ASIC_REV_5750 0x04 +#define ASIC_REV_5752 0x06 +#define ASIC_REV_5780 0x08 +#define ASIC_REV_5714 0x09 +#define ASIC_REV_5755 0x0a +#define ASIC_REV_5787 0x0b +#define ASIC_REV_5906 0x0c +#define ASIC_REV_USE_PROD_ID_REG 0x0f +#define ASIC_REV_5784 0x5784 +#define ASIC_REV_5761 0x5761 +#define ASIC_REV_5785 0x5785 +#define ASIC_REV_57780 0x57780 +#define ASIC_REV_5717 0x5717 +#define ASIC_REV_57765 0x57785 +#define ASIC_REV_5719 0x5719 +#define ASIC_REV_5720 0x5720 +#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8) +#define CHIPREV_5700_AX 0x70 +#define CHIPREV_5700_BX 0x71 +#define CHIPREV_5700_CX 0x72 +#define CHIPREV_5701_AX 0x00 +#define CHIPREV_5703_AX 0x10 +#define CHIPREV_5704_AX 0x20 +#define CHIPREV_5704_BX 0x21 +#define CHIPREV_5750_AX 0x40 +#define CHIPREV_5750_BX 0x41 +#define CHIPREV_5784_AX 0x57840 +#define CHIPREV_5761_AX 0x57610 +#define CHIPREV_57765_AX 0x577650 +#define GET_METAL_REV(CHIP_REV_ID) ((CHIP_REV_ID) & 0xff) +#define METAL_REV_A0 0x00 +#define METAL_REV_A1 0x01 +#define METAL_REV_B0 0x00 +#define METAL_REV_B1 0x01 +#define METAL_REV_B2 0x02 +#define TG3PCI_DMA_RW_CTRL 0x0000006c +#define DMA_RWCTRL_DIS_CACHE_ALIGNMENT 0x00000001 +#define DMA_RWCTRL_TAGGED_STAT_WA 0x00000080 +#define DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK 0x00000380 +#define DMA_RWCTRL_READ_BNDRY_MASK 0x00000700 +#define DMA_RWCTRL_READ_BNDRY_DISAB 0x00000000 +#define DMA_RWCTRL_READ_BNDRY_16 0x00000100 +#define DMA_RWCTRL_READ_BNDRY_128_PCIX 0x00000100 +#define DMA_RWCTRL_READ_BNDRY_32 0x00000200 +#define DMA_RWCTRL_READ_BNDRY_256_PCIX 0x00000200 +#define DMA_RWCTRL_READ_BNDRY_64 0x00000300 +#define DMA_RWCTRL_READ_BNDRY_384_PCIX 0x00000300 +#define DMA_RWCTRL_READ_BNDRY_128 0x00000400 +#define DMA_RWCTRL_READ_BNDRY_256 0x00000500 +#define DMA_RWCTRL_READ_BNDRY_512 0x00000600 +#define DMA_RWCTRL_READ_BNDRY_1024 0x00000700 +#define DMA_RWCTRL_WRITE_BNDRY_MASK 0x00003800 +#define DMA_RWCTRL_WRITE_BNDRY_DISAB 0x00000000 +#define DMA_RWCTRL_WRITE_BNDRY_16 0x00000800 +#define DMA_RWCTRL_WRITE_BNDRY_128_PCIX 0x00000800 +#define DMA_RWCTRL_WRITE_BNDRY_32 0x00001000 +#define DMA_RWCTRL_WRITE_BNDRY_256_PCIX 0x00001000 +#define DMA_RWCTRL_WRITE_BNDRY_64 0x00001800 +#define DMA_RWCTRL_WRITE_BNDRY_384_PCIX 0x00001800 +#define DMA_RWCTRL_WRITE_BNDRY_128 0x00002000 +#define DMA_RWCTRL_WRITE_BNDRY_256 0x00002800 +#define DMA_RWCTRL_WRITE_BNDRY_512 0x00003000 +#define DMA_RWCTRL_WRITE_BNDRY_1024 0x00003800 +#define DMA_RWCTRL_ONE_DMA 0x00004000 +#define DMA_RWCTRL_READ_WATER 0x00070000 +#define DMA_RWCTRL_READ_WATER_SHIFT 16 +#define DMA_RWCTRL_WRITE_WATER 0x00380000 +#define DMA_RWCTRL_WRITE_WATER_SHIFT 19 +#define DMA_RWCTRL_USE_MEM_READ_MULT 0x00400000 +#define DMA_RWCTRL_ASSERT_ALL_BE 0x00800000 +#define DMA_RWCTRL_PCI_READ_CMD 0x0f000000 +#define DMA_RWCTRL_PCI_READ_CMD_SHIFT 24 +#define DMA_RWCTRL_PCI_WRITE_CMD 0xf0000000 +#define DMA_RWCTRL_PCI_WRITE_CMD_SHIFT 28 +#define DMA_RWCTRL_WRITE_BNDRY_64_PCIE 0x10000000 +#define DMA_RWCTRL_WRITE_BNDRY_128_PCIE 0x30000000 +#define DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE 0x70000000 +#define TG3PCI_PCISTATE 0x00000070 +#define PCISTATE_FORCE_RESET 0x00000001 +#define PCISTATE_INT_NOT_ACTIVE 0x00000002 +#define PCISTATE_CONV_PCI_MODE 0x00000004 +#define PCISTATE_BUS_SPEED_HIGH 0x00000008 +#define PCISTATE_BUS_32BIT 0x00000010 +#define PCISTATE_ROM_ENABLE 0x00000020 +#define PCISTATE_ROM_RETRY_ENABLE 0x00000040 +#define PCISTATE_FLAT_VIEW 0x00000100 +#define PCISTATE_RETRY_SAME_DMA 0x00002000 +#define PCISTATE_ALLOW_APE_CTLSPC_WR 0x00010000 +#define PCISTATE_ALLOW_APE_SHMEM_WR 0x00020000 +#define PCISTATE_ALLOW_APE_PSPACE_WR 0x00040000 +#define TG3PCI_CLOCK_CTRL 0x00000074 +#define CLOCK_CTRL_CORECLK_DISABLE 0x00000200 +#define CLOCK_CTRL_RXCLK_DISABLE 0x00000400 +#define CLOCK_CTRL_TXCLK_DISABLE 0x00000800 +#define CLOCK_CTRL_ALTCLK 0x00001000 +#define CLOCK_CTRL_PWRDOWN_PLL133 0x00008000 +#define CLOCK_CTRL_44MHZ_CORE 0x00040000 +#define CLOCK_CTRL_625_CORE 0x00100000 +#define CLOCK_CTRL_FORCE_CLKRUN 0x00200000 +#define CLOCK_CTRL_CLKRUN_OENABLE 0x00400000 +#define CLOCK_CTRL_DELAY_PCI_GRANT 0x80000000 +#define TG3PCI_REG_BASE_ADDR 0x00000078 +#define TG3PCI_MEM_WIN_BASE_ADDR 0x0000007c +#define TG3PCI_REG_DATA 0x00000080 +#define TG3PCI_MEM_WIN_DATA 0x00000084 +#define TG3PCI_MISC_LOCAL_CTRL 0x00000090 +/* 0x94 --> 0x98 unused */ +#define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */ +#define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */ +/* 0xa8 --> 0xb8 unused */ +#define TG3PCI_DUAL_MAC_CTRL 0x000000b8 +#define DUAL_MAC_CTRL_CH_MASK 0x00000003 +#define DUAL_MAC_CTRL_ID 0x00000004 +#define TG3PCI_PRODID_ASICREV 0x000000bc +#define PROD_ID_ASIC_REV_MASK 0x0fffffff +/* 0xc0 --> 0xf4 unused */ + +#define TG3PCI_GEN2_PRODID_ASICREV 0x000000f4 +#define TG3PCI_GEN15_PRODID_ASICREV 0x000000fc +/* 0xf8 --> 0x200 unused */ + +#define TG3_CORR_ERR_STAT 0x00000110 +#define TG3_CORR_ERR_STAT_CLEAR 0xffffffff +/* 0x114 --> 0x200 unused */ + +/* Mailbox registers */ +#define MAILBOX_INTERRUPT_0 0x00000200 /* 64-bit */ +#define MAILBOX_INTERRUPT_1 0x00000208 /* 64-bit */ +#define MAILBOX_INTERRUPT_2 0x00000210 /* 64-bit */ +#define MAILBOX_INTERRUPT_3 0x00000218 /* 64-bit */ +#define MAILBOX_GENERAL_0 0x00000220 /* 64-bit */ +#define MAILBOX_GENERAL_1 0x00000228 /* 64-bit */ +#define MAILBOX_GENERAL_2 0x00000230 /* 64-bit */ +#define MAILBOX_GENERAL_3 0x00000238 /* 64-bit */ +#define MAILBOX_GENERAL_4 0x00000240 /* 64-bit */ +#define MAILBOX_GENERAL_5 0x00000248 /* 64-bit */ +#define MAILBOX_GENERAL_6 0x00000250 /* 64-bit */ +#define MAILBOX_GENERAL_7 0x00000258 /* 64-bit */ +#define MAILBOX_RELOAD_STAT 0x00000260 /* 64-bit */ +#define MAILBOX_RCV_STD_PROD_IDX 0x00000268 /* 64-bit */ +#define TG3_RX_STD_PROD_IDX_REG (MAILBOX_RCV_STD_PROD_IDX + \ + TG3_64BIT_REG_LOW) +#define MAILBOX_RCV_JUMBO_PROD_IDX 0x00000270 /* 64-bit */ +#define TG3_RX_JMB_PROD_IDX_REG (MAILBOX_RCV_JUMBO_PROD_IDX + \ + TG3_64BIT_REG_LOW) +#define MAILBOX_RCV_MINI_PROD_IDX 0x00000278 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_0 0x00000280 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_1 0x00000288 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_2 0x00000290 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_3 0x00000298 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_4 0x000002a0 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_5 0x000002a8 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_6 0x000002b0 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_7 0x000002b8 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_8 0x000002c0 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_9 0x000002c8 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_10 0x000002d0 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_11 0x000002d8 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_12 0x000002e0 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_13 0x000002e8 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_14 0x000002f0 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_15 0x000002f8 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_0 0x00000300 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_1 0x00000308 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_2 0x00000310 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_3 0x00000318 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_4 0x00000320 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_5 0x00000328 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_6 0x00000330 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_7 0x00000338 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_8 0x00000340 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_9 0x00000348 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_10 0x00000350 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_11 0x00000358 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_12 0x00000360 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_13 0x00000368 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_14 0x00000370 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_15 0x00000378 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_0 0x00000380 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_1 0x00000388 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_2 0x00000390 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_3 0x00000398 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_4 0x000003a0 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_5 0x000003a8 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_6 0x000003b0 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_7 0x000003b8 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_8 0x000003c0 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_9 0x000003c8 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_10 0x000003d0 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_11 0x000003d8 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_12 0x000003e0 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_13 0x000003e8 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_14 0x000003f0 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_15 0x000003f8 /* 64-bit */ + +/* MAC control registers */ +#define MAC_MODE 0x00000400 +#define MAC_MODE_RESET 0x00000001 +#define MAC_MODE_HALF_DUPLEX 0x00000002 +#define MAC_MODE_PORT_MODE_MASK 0x0000000c +#define MAC_MODE_PORT_MODE_TBI 0x0000000c +#define MAC_MODE_PORT_MODE_GMII 0x00000008 +#define MAC_MODE_PORT_MODE_MII 0x00000004 +#define MAC_MODE_PORT_MODE_NONE 0x00000000 +#define MAC_MODE_PORT_INT_LPBACK 0x00000010 +#define MAC_MODE_TAGGED_MAC_CTRL 0x00000080 +#define MAC_MODE_TX_BURSTING 0x00000100 +#define MAC_MODE_MAX_DEFER 0x00000200 +#define MAC_MODE_LINK_POLARITY 0x00000400 +#define MAC_MODE_RXSTAT_ENABLE 0x00000800 +#define MAC_MODE_RXSTAT_CLEAR 0x00001000 +#define MAC_MODE_RXSTAT_FLUSH 0x00002000 +#define MAC_MODE_TXSTAT_ENABLE 0x00004000 +#define MAC_MODE_TXSTAT_CLEAR 0x00008000 +#define MAC_MODE_TXSTAT_FLUSH 0x00010000 +#define MAC_MODE_SEND_CONFIGS 0x00020000 +#define MAC_MODE_MAGIC_PKT_ENABLE 0x00040000 +#define MAC_MODE_ACPI_ENABLE 0x00080000 +#define MAC_MODE_MIP_ENABLE 0x00100000 +#define MAC_MODE_TDE_ENABLE 0x00200000 +#define MAC_MODE_RDE_ENABLE 0x00400000 +#define MAC_MODE_FHDE_ENABLE 0x00800000 +#define MAC_MODE_KEEP_FRAME_IN_WOL 0x01000000 +#define MAC_MODE_APE_RX_EN 0x08000000 +#define MAC_MODE_APE_TX_EN 0x10000000 +#define MAC_STATUS 0x00000404 +#define MAC_STATUS_PCS_SYNCED 0x00000001 +#define MAC_STATUS_SIGNAL_DET 0x00000002 +#define MAC_STATUS_RCVD_CFG 0x00000004 +#define MAC_STATUS_CFG_CHANGED 0x00000008 +#define MAC_STATUS_SYNC_CHANGED 0x00000010 +#define MAC_STATUS_PORT_DEC_ERR 0x00000400 +#define MAC_STATUS_LNKSTATE_CHANGED 0x00001000 +#define MAC_STATUS_MI_COMPLETION 0x00400000 +#define MAC_STATUS_MI_INTERRUPT 0x00800000 +#define MAC_STATUS_AP_ERROR 0x01000000 +#define MAC_STATUS_ODI_ERROR 0x02000000 +#define MAC_STATUS_RXSTAT_OVERRUN 0x04000000 +#define MAC_STATUS_TXSTAT_OVERRUN 0x08000000 +#define MAC_EVENT 0x00000408 +#define MAC_EVENT_PORT_DECODE_ERR 0x00000400 +#define MAC_EVENT_LNKSTATE_CHANGED 0x00001000 +#define MAC_EVENT_MI_COMPLETION 0x00400000 +#define MAC_EVENT_MI_INTERRUPT 0x00800000 +#define MAC_EVENT_AP_ERROR 0x01000000 +#define MAC_EVENT_ODI_ERROR 0x02000000 +#define MAC_EVENT_RXSTAT_OVERRUN 0x04000000 +#define MAC_EVENT_TXSTAT_OVERRUN 0x08000000 +#define MAC_LED_CTRL 0x0000040c +#define LED_CTRL_LNKLED_OVERRIDE 0x00000001 +#define LED_CTRL_1000MBPS_ON 0x00000002 +#define LED_CTRL_100MBPS_ON 0x00000004 +#define LED_CTRL_10MBPS_ON 0x00000008 +#define LED_CTRL_TRAFFIC_OVERRIDE 0x00000010 +#define LED_CTRL_TRAFFIC_BLINK 0x00000020 +#define LED_CTRL_TRAFFIC_LED 0x00000040 +#define LED_CTRL_1000MBPS_STATUS 0x00000080 +#define LED_CTRL_100MBPS_STATUS 0x00000100 +#define LED_CTRL_10MBPS_STATUS 0x00000200 +#define LED_CTRL_TRAFFIC_STATUS 0x00000400 +#define LED_CTRL_MODE_MAC 0x00000000 +#define LED_CTRL_MODE_PHY_1 0x00000800 +#define LED_CTRL_MODE_PHY_2 0x00001000 +#define LED_CTRL_MODE_SHASTA_MAC 0x00002000 +#define LED_CTRL_MODE_SHARED 0x00004000 +#define LED_CTRL_MODE_COMBO 0x00008000 +#define LED_CTRL_BLINK_RATE_MASK 0x7ff80000 +#define LED_CTRL_BLINK_RATE_SHIFT 19 +#define LED_CTRL_BLINK_PER_OVERRIDE 0x00080000 +#define LED_CTRL_BLINK_RATE_OVERRIDE 0x80000000 +#define MAC_ADDR_0_HIGH 0x00000410 /* upper 2 bytes */ +#define MAC_ADDR_0_LOW 0x00000414 /* lower 4 bytes */ +#define MAC_ADDR_1_HIGH 0x00000418 /* upper 2 bytes */ +#define MAC_ADDR_1_LOW 0x0000041c /* lower 4 bytes */ +#define MAC_ADDR_2_HIGH 0x00000420 /* upper 2 bytes */ +#define MAC_ADDR_2_LOW 0x00000424 /* lower 4 bytes */ +#define MAC_ADDR_3_HIGH 0x00000428 /* upper 2 bytes */ +#define MAC_ADDR_3_LOW 0x0000042c /* lower 4 bytes */ +#define MAC_ACPI_MBUF_PTR 0x00000430 +#define MAC_ACPI_LEN_OFFSET 0x00000434 +#define ACPI_LENOFF_LEN_MASK 0x0000ffff +#define ACPI_LENOFF_LEN_SHIFT 0 +#define ACPI_LENOFF_OFF_MASK 0x0fff0000 +#define ACPI_LENOFF_OFF_SHIFT 16 +#define MAC_TX_BACKOFF_SEED 0x00000438 +#define TX_BACKOFF_SEED_MASK 0x000003ff +#define MAC_RX_MTU_SIZE 0x0000043c +#define RX_MTU_SIZE_MASK 0x0000ffff +#define MAC_PCS_TEST 0x00000440 +#define PCS_TEST_PATTERN_MASK 0x000fffff +#define PCS_TEST_PATTERN_SHIFT 0 +#define PCS_TEST_ENABLE 0x00100000 +#define MAC_TX_AUTO_NEG 0x00000444 +#define TX_AUTO_NEG_MASK 0x0000ffff +#define TX_AUTO_NEG_SHIFT 0 +#define MAC_RX_AUTO_NEG 0x00000448 +#define RX_AUTO_NEG_MASK 0x0000ffff +#define RX_AUTO_NEG_SHIFT 0 +#define MAC_MI_COM 0x0000044c +#define MI_COM_CMD_MASK 0x0c000000 +#define MI_COM_CMD_WRITE 0x04000000 +#define MI_COM_CMD_READ 0x08000000 +#define MI_COM_READ_FAILED 0x10000000 +#define MI_COM_START 0x20000000 +#define MI_COM_BUSY 0x20000000 +#define MI_COM_PHY_ADDR_MASK 0x03e00000 +#define MI_COM_PHY_ADDR_SHIFT 21 +#define MI_COM_REG_ADDR_MASK 0x001f0000 +#define MI_COM_REG_ADDR_SHIFT 16 +#define MI_COM_DATA_MASK 0x0000ffff +#define MAC_MI_STAT 0x00000450 +#define MAC_MI_STAT_LNKSTAT_ATTN_ENAB 0x00000001 +#define MAC_MI_STAT_10MBPS_MODE 0x00000002 +#define MAC_MI_MODE 0x00000454 +#define MAC_MI_MODE_CLK_10MHZ 0x00000001 +#define MAC_MI_MODE_SHORT_PREAMBLE 0x00000002 +#define MAC_MI_MODE_AUTO_POLL 0x00000010 +#define MAC_MI_MODE_500KHZ_CONST 0x00008000 +#define MAC_MI_MODE_BASE 0x000c0000 /* XXX magic values XXX */ +#define MAC_AUTO_POLL_STATUS 0x00000458 +#define MAC_AUTO_POLL_ERROR 0x00000001 +#define MAC_TX_MODE 0x0000045c +#define TX_MODE_RESET 0x00000001 +#define TX_MODE_ENABLE 0x00000002 +#define TX_MODE_FLOW_CTRL_ENABLE 0x00000010 +#define TX_MODE_BIG_BCKOFF_ENABLE 0x00000020 +#define TX_MODE_LONG_PAUSE_ENABLE 0x00000040 +#define TX_MODE_MBUF_LOCKUP_FIX 0x00000100 +#define TX_MODE_JMB_FRM_LEN 0x00400000 +#define TX_MODE_CNT_DN_MODE 0x00800000 +#define MAC_TX_STATUS 0x00000460 +#define TX_STATUS_XOFFED 0x00000001 +#define TX_STATUS_SENT_XOFF 0x00000002 +#define TX_STATUS_SENT_XON 0x00000004 +#define TX_STATUS_LINK_UP 0x00000008 +#define TX_STATUS_ODI_UNDERRUN 0x00000010 +#define TX_STATUS_ODI_OVERRUN 0x00000020 +#define MAC_TX_LENGTHS 0x00000464 +#define TX_LENGTHS_SLOT_TIME_MASK 0x000000ff +#define TX_LENGTHS_SLOT_TIME_SHIFT 0 +#define TX_LENGTHS_IPG_MASK 0x00000f00 +#define TX_LENGTHS_IPG_SHIFT 8 +#define TX_LENGTHS_IPG_CRS_MASK 0x00003000 +#define TX_LENGTHS_IPG_CRS_SHIFT 12 +#define TX_LENGTHS_JMB_FRM_LEN_MSK 0x00ff0000 +#define TX_LENGTHS_CNT_DWN_VAL_MSK 0xff000000 +#define MAC_RX_MODE 0x00000468 +#define RX_MODE_RESET 0x00000001 +#define RX_MODE_ENABLE 0x00000002 +#define RX_MODE_FLOW_CTRL_ENABLE 0x00000004 +#define RX_MODE_KEEP_MAC_CTRL 0x00000008 +#define RX_MODE_KEEP_PAUSE 0x00000010 +#define RX_MODE_ACCEPT_OVERSIZED 0x00000020 +#define RX_MODE_ACCEPT_RUNTS 0x00000040 +#define RX_MODE_LEN_CHECK 0x00000080 +#define RX_MODE_PROMISC 0x00000100 +#define RX_MODE_NO_CRC_CHECK 0x00000200 +#define RX_MODE_KEEP_VLAN_TAG 0x00000400 +#define RX_MODE_RSS_IPV4_HASH_EN 0x00010000 +#define RX_MODE_RSS_TCP_IPV4_HASH_EN 0x00020000 +#define RX_MODE_RSS_IPV6_HASH_EN 0x00040000 +#define RX_MODE_RSS_TCP_IPV6_HASH_EN 0x00080000 +#define RX_MODE_RSS_ITBL_HASH_BITS_7 0x00700000 +#define RX_MODE_RSS_ENABLE 0x00800000 +#define RX_MODE_IPV6_CSUM_ENABLE 0x01000000 +#define MAC_RX_STATUS 0x0000046c +#define RX_STATUS_REMOTE_TX_XOFFED 0x00000001 +#define RX_STATUS_XOFF_RCVD 0x00000002 +#define RX_STATUS_XON_RCVD 0x00000004 +#define MAC_HASH_REG_0 0x00000470 +#define MAC_HASH_REG_1 0x00000474 +#define MAC_HASH_REG_2 0x00000478 +#define MAC_HASH_REG_3 0x0000047c +#define MAC_RCV_RULE_0 0x00000480 +#define MAC_RCV_VALUE_0 0x00000484 +#define MAC_RCV_RULE_1 0x00000488 +#define MAC_RCV_VALUE_1 0x0000048c +#define MAC_RCV_RULE_2 0x00000490 +#define MAC_RCV_VALUE_2 0x00000494 +#define MAC_RCV_RULE_3 0x00000498 +#define MAC_RCV_VALUE_3 0x0000049c +#define MAC_RCV_RULE_4 0x000004a0 +#define MAC_RCV_VALUE_4 0x000004a4 +#define MAC_RCV_RULE_5 0x000004a8 +#define MAC_RCV_VALUE_5 0x000004ac +#define MAC_RCV_RULE_6 0x000004b0 +#define MAC_RCV_VALUE_6 0x000004b4 +#define MAC_RCV_RULE_7 0x000004b8 +#define MAC_RCV_VALUE_7 0x000004bc +#define MAC_RCV_RULE_8 0x000004c0 +#define MAC_RCV_VALUE_8 0x000004c4 +#define MAC_RCV_RULE_9 0x000004c8 +#define MAC_RCV_VALUE_9 0x000004cc +#define MAC_RCV_RULE_10 0x000004d0 +#define MAC_RCV_VALUE_10 0x000004d4 +#define MAC_RCV_RULE_11 0x000004d8 +#define MAC_RCV_VALUE_11 0x000004dc +#define MAC_RCV_RULE_12 0x000004e0 +#define MAC_RCV_VALUE_12 0x000004e4 +#define MAC_RCV_RULE_13 0x000004e8 +#define MAC_RCV_VALUE_13 0x000004ec +#define MAC_RCV_RULE_14 0x000004f0 +#define MAC_RCV_VALUE_14 0x000004f4 +#define MAC_RCV_RULE_15 0x000004f8 +#define MAC_RCV_VALUE_15 0x000004fc +#define RCV_RULE_DISABLE_MASK 0x7fffffff +#define MAC_RCV_RULE_CFG 0x00000500 +#define RCV_RULE_CFG_DEFAULT_CLASS 0x00000008 +#define MAC_LOW_WMARK_MAX_RX_FRAME 0x00000504 +/* 0x508 --> 0x520 unused */ +#define MAC_HASHREGU_0 0x00000520 +#define MAC_HASHREGU_1 0x00000524 +#define MAC_HASHREGU_2 0x00000528 +#define MAC_HASHREGU_3 0x0000052c +#define MAC_EXTADDR_0_HIGH 0x00000530 +#define MAC_EXTADDR_0_LOW 0x00000534 +#define MAC_EXTADDR_1_HIGH 0x00000538 +#define MAC_EXTADDR_1_LOW 0x0000053c +#define MAC_EXTADDR_2_HIGH 0x00000540 +#define MAC_EXTADDR_2_LOW 0x00000544 +#define MAC_EXTADDR_3_HIGH 0x00000548 +#define MAC_EXTADDR_3_LOW 0x0000054c +#define MAC_EXTADDR_4_HIGH 0x00000550 +#define MAC_EXTADDR_4_LOW 0x00000554 +#define MAC_EXTADDR_5_HIGH 0x00000558 +#define MAC_EXTADDR_5_LOW 0x0000055c +#define MAC_EXTADDR_6_HIGH 0x00000560 +#define MAC_EXTADDR_6_LOW 0x00000564 +#define MAC_EXTADDR_7_HIGH 0x00000568 +#define MAC_EXTADDR_7_LOW 0x0000056c +#define MAC_EXTADDR_8_HIGH 0x00000570 +#define MAC_EXTADDR_8_LOW 0x00000574 +#define MAC_EXTADDR_9_HIGH 0x00000578 +#define MAC_EXTADDR_9_LOW 0x0000057c +#define MAC_EXTADDR_10_HIGH 0x00000580 +#define MAC_EXTADDR_10_LOW 0x00000584 +#define MAC_EXTADDR_11_HIGH 0x00000588 +#define MAC_EXTADDR_11_LOW 0x0000058c +#define MAC_SERDES_CFG 0x00000590 +#define MAC_SERDES_CFG_EDGE_SELECT 0x00001000 +#define MAC_SERDES_STAT 0x00000594 +/* 0x598 --> 0x5a0 unused */ +#define MAC_PHYCFG1 0x000005a0 +#define MAC_PHYCFG1_RGMII_INT 0x00000001 +#define MAC_PHYCFG1_RXCLK_TO_MASK 0x00001ff0 +#define MAC_PHYCFG1_RXCLK_TIMEOUT 0x00001000 +#define MAC_PHYCFG1_TXCLK_TO_MASK 0x01ff0000 +#define MAC_PHYCFG1_TXCLK_TIMEOUT 0x01000000 +#define MAC_PHYCFG1_RGMII_EXT_RX_DEC 0x02000000 +#define MAC_PHYCFG1_RGMII_SND_STAT_EN 0x04000000 +#define MAC_PHYCFG1_TXC_DRV 0x20000000 +#define MAC_PHYCFG2 0x000005a4 +#define MAC_PHYCFG2_INBAND_ENABLE 0x00000001 +#define MAC_PHYCFG2_EMODE_MASK_MASK 0x000001c0 +#define MAC_PHYCFG2_EMODE_MASK_AC131 0x000000c0 +#define MAC_PHYCFG2_EMODE_MASK_50610 0x00000100 +#define MAC_PHYCFG2_EMODE_MASK_RT8211 0x00000000 +#define MAC_PHYCFG2_EMODE_MASK_RT8201 0x000001c0 +#define MAC_PHYCFG2_EMODE_COMP_MASK 0x00000e00 +#define MAC_PHYCFG2_EMODE_COMP_AC131 0x00000600 +#define MAC_PHYCFG2_EMODE_COMP_50610 0x00000400 +#define MAC_PHYCFG2_EMODE_COMP_RT8211 0x00000800 +#define MAC_PHYCFG2_EMODE_COMP_RT8201 0x00000000 +#define MAC_PHYCFG2_FMODE_MASK_MASK 0x00007000 +#define MAC_PHYCFG2_FMODE_MASK_AC131 0x00006000 +#define MAC_PHYCFG2_FMODE_MASK_50610 0x00004000 +#define MAC_PHYCFG2_FMODE_MASK_RT8211 0x00000000 +#define MAC_PHYCFG2_FMODE_MASK_RT8201 0x00007000 +#define MAC_PHYCFG2_FMODE_COMP_MASK 0x00038000 +#define MAC_PHYCFG2_FMODE_COMP_AC131 0x00030000 +#define MAC_PHYCFG2_FMODE_COMP_50610 0x00008000 +#define MAC_PHYCFG2_FMODE_COMP_RT8211 0x00038000 +#define MAC_PHYCFG2_FMODE_COMP_RT8201 0x00000000 +#define MAC_PHYCFG2_GMODE_MASK_MASK 0x001c0000 +#define MAC_PHYCFG2_GMODE_MASK_AC131 0x001c0000 +#define MAC_PHYCFG2_GMODE_MASK_50610 0x00100000 +#define MAC_PHYCFG2_GMODE_MASK_RT8211 0x00000000 +#define MAC_PHYCFG2_GMODE_MASK_RT8201 0x001c0000 +#define MAC_PHYCFG2_GMODE_COMP_MASK 0x00e00000 +#define MAC_PHYCFG2_GMODE_COMP_AC131 0x00e00000 +#define MAC_PHYCFG2_GMODE_COMP_50610 0x00000000 +#define MAC_PHYCFG2_GMODE_COMP_RT8211 0x00200000 +#define MAC_PHYCFG2_GMODE_COMP_RT8201 0x00000000 +#define MAC_PHYCFG2_ACT_MASK_MASK 0x03000000 +#define MAC_PHYCFG2_ACT_MASK_AC131 0x03000000 +#define MAC_PHYCFG2_ACT_MASK_50610 0x01000000 +#define MAC_PHYCFG2_ACT_MASK_RT8211 0x03000000 +#define MAC_PHYCFG2_ACT_MASK_RT8201 0x01000000 +#define MAC_PHYCFG2_ACT_COMP_MASK 0x0c000000 +#define MAC_PHYCFG2_ACT_COMP_AC131 0x00000000 +#define MAC_PHYCFG2_ACT_COMP_50610 0x00000000 +#define MAC_PHYCFG2_ACT_COMP_RT8211 0x00000000 +#define MAC_PHYCFG2_ACT_COMP_RT8201 0x08000000 +#define MAC_PHYCFG2_QUAL_MASK_MASK 0x30000000 +#define MAC_PHYCFG2_QUAL_MASK_AC131 0x30000000 +#define MAC_PHYCFG2_QUAL_MASK_50610 0x30000000 +#define MAC_PHYCFG2_QUAL_MASK_RT8211 0x30000000 +#define MAC_PHYCFG2_QUAL_MASK_RT8201 0x30000000 +#define MAC_PHYCFG2_QUAL_COMP_MASK 0xc0000000 +#define MAC_PHYCFG2_QUAL_COMP_AC131 0x00000000 +#define MAC_PHYCFG2_QUAL_COMP_50610 0x00000000 +#define MAC_PHYCFG2_QUAL_COMP_RT8211 0x00000000 +#define MAC_PHYCFG2_QUAL_COMP_RT8201 0x00000000 +#define MAC_PHYCFG2_50610_LED_MODES \ + (MAC_PHYCFG2_EMODE_MASK_50610 | \ + MAC_PHYCFG2_EMODE_COMP_50610 | \ + MAC_PHYCFG2_FMODE_MASK_50610 | \ + MAC_PHYCFG2_FMODE_COMP_50610 | \ + MAC_PHYCFG2_GMODE_MASK_50610 | \ + MAC_PHYCFG2_GMODE_COMP_50610 | \ + MAC_PHYCFG2_ACT_MASK_50610 | \ + MAC_PHYCFG2_ACT_COMP_50610 | \ + MAC_PHYCFG2_QUAL_MASK_50610 | \ + MAC_PHYCFG2_QUAL_COMP_50610) +#define MAC_PHYCFG2_AC131_LED_MODES \ + (MAC_PHYCFG2_EMODE_MASK_AC131 | \ + MAC_PHYCFG2_EMODE_COMP_AC131 | \ + MAC_PHYCFG2_FMODE_MASK_AC131 | \ + MAC_PHYCFG2_FMODE_COMP_AC131 | \ + MAC_PHYCFG2_GMODE_MASK_AC131 | \ + MAC_PHYCFG2_GMODE_COMP_AC131 | \ + MAC_PHYCFG2_ACT_MASK_AC131 | \ + MAC_PHYCFG2_ACT_COMP_AC131 | \ + MAC_PHYCFG2_QUAL_MASK_AC131 | \ + MAC_PHYCFG2_QUAL_COMP_AC131) +#define MAC_PHYCFG2_RTL8211C_LED_MODES \ + (MAC_PHYCFG2_EMODE_MASK_RT8211 | \ + MAC_PHYCFG2_EMODE_COMP_RT8211 | \ + MAC_PHYCFG2_FMODE_MASK_RT8211 | \ + MAC_PHYCFG2_FMODE_COMP_RT8211 | \ + MAC_PHYCFG2_GMODE_MASK_RT8211 | \ + MAC_PHYCFG2_GMODE_COMP_RT8211 | \ + MAC_PHYCFG2_ACT_MASK_RT8211 | \ + MAC_PHYCFG2_ACT_COMP_RT8211 | \ + MAC_PHYCFG2_QUAL_MASK_RT8211 | \ + MAC_PHYCFG2_QUAL_COMP_RT8211) +#define MAC_PHYCFG2_RTL8201E_LED_MODES \ + (MAC_PHYCFG2_EMODE_MASK_RT8201 | \ + MAC_PHYCFG2_EMODE_COMP_RT8201 | \ + MAC_PHYCFG2_FMODE_MASK_RT8201 | \ + MAC_PHYCFG2_FMODE_COMP_RT8201 | \ + MAC_PHYCFG2_GMODE_MASK_RT8201 | \ + MAC_PHYCFG2_GMODE_COMP_RT8201 | \ + MAC_PHYCFG2_ACT_MASK_RT8201 | \ + MAC_PHYCFG2_ACT_COMP_RT8201 | \ + MAC_PHYCFG2_QUAL_MASK_RT8201 | \ + MAC_PHYCFG2_QUAL_COMP_RT8201) +#define MAC_EXT_RGMII_MODE 0x000005a8 +#define MAC_RGMII_MODE_TX_ENABLE 0x00000001 +#define MAC_RGMII_MODE_TX_LOWPWR 0x00000002 +#define MAC_RGMII_MODE_TX_RESET 0x00000004 +#define MAC_RGMII_MODE_RX_INT_B 0x00000100 +#define MAC_RGMII_MODE_RX_QUALITY 0x00000200 +#define MAC_RGMII_MODE_RX_ACTIVITY 0x00000400 +#define MAC_RGMII_MODE_RX_ENG_DET 0x00000800 +/* 0x5ac --> 0x5b0 unused */ +#define SERDES_RX_CTRL 0x000005b0 /* 5780/5714 only */ +#define SERDES_RX_SIG_DETECT 0x00000400 +#define SG_DIG_CTRL 0x000005b0 +#define SG_DIG_USING_HW_AUTONEG 0x80000000 +#define SG_DIG_SOFT_RESET 0x40000000 +#define SG_DIG_DISABLE_LINKRDY 0x20000000 +#define SG_DIG_CRC16_CLEAR_N 0x01000000 +#define SG_DIG_EN10B 0x00800000 +#define SG_DIG_CLEAR_STATUS 0x00400000 +#define SG_DIG_LOCAL_DUPLEX_STATUS 0x00200000 +#define SG_DIG_LOCAL_LINK_STATUS 0x00100000 +#define SG_DIG_SPEED_STATUS_MASK 0x000c0000 +#define SG_DIG_SPEED_STATUS_SHIFT 18 +#define SG_DIG_JUMBO_PACKET_DISABLE 0x00020000 +#define SG_DIG_RESTART_AUTONEG 0x00010000 +#define SG_DIG_FIBER_MODE 0x00008000 +#define SG_DIG_REMOTE_FAULT_MASK 0x00006000 +#define SG_DIG_PAUSE_MASK 0x00001800 +#define SG_DIG_PAUSE_CAP 0x00000800 +#define SG_DIG_ASYM_PAUSE 0x00001000 +#define SG_DIG_GBIC_ENABLE 0x00000400 +#define SG_DIG_CHECK_END_ENABLE 0x00000200 +#define SG_DIG_SGMII_AUTONEG_TIMER 0x00000100 +#define SG_DIG_CLOCK_PHASE_SELECT 0x00000080 +#define SG_DIG_GMII_INPUT_SELECT 0x00000040 +#define SG_DIG_MRADV_CRC16_SELECT 0x00000020 +#define SG_DIG_COMMA_DETECT_ENABLE 0x00000010 +#define SG_DIG_AUTONEG_TIMER_REDUCE 0x00000008 +#define SG_DIG_AUTONEG_LOW_ENABLE 0x00000004 +#define SG_DIG_REMOTE_LOOPBACK 0x00000002 +#define SG_DIG_LOOPBACK 0x00000001 +#define SG_DIG_COMMON_SETUP (SG_DIG_CRC16_CLEAR_N | \ + SG_DIG_LOCAL_DUPLEX_STATUS | \ + SG_DIG_LOCAL_LINK_STATUS | \ + (0x2 << SG_DIG_SPEED_STATUS_SHIFT) | \ + SG_DIG_FIBER_MODE | SG_DIG_GBIC_ENABLE) +#define SG_DIG_STATUS 0x000005b4 +#define SG_DIG_CRC16_BUS_MASK 0xffff0000 +#define SG_DIG_PARTNER_FAULT_MASK 0x00600000 /* If !MRADV_CRC16_SELECT */ +#define SG_DIG_PARTNER_ASYM_PAUSE 0x00100000 /* If !MRADV_CRC16_SELECT */ +#define SG_DIG_PARTNER_PAUSE_CAPABLE 0x00080000 /* If !MRADV_CRC16_SELECT */ +#define SG_DIG_PARTNER_HALF_DUPLEX 0x00040000 /* If !MRADV_CRC16_SELECT */ +#define SG_DIG_PARTNER_FULL_DUPLEX 0x00020000 /* If !MRADV_CRC16_SELECT */ +#define SG_DIG_PARTNER_NEXT_PAGE 0x00010000 /* If !MRADV_CRC16_SELECT */ +#define SG_DIG_AUTONEG_STATE_MASK 0x00000ff0 +#define SG_DIG_IS_SERDES 0x00000100 +#define SG_DIG_COMMA_DETECTOR 0x00000008 +#define SG_DIG_MAC_ACK_STATUS 0x00000004 +#define SG_DIG_AUTONEG_COMPLETE 0x00000002 +#define SG_DIG_AUTONEG_ERROR 0x00000001 +/* 0x5b8 --> 0x600 unused */ +#define MAC_TX_MAC_STATE_BASE 0x00000600 /* 16 bytes */ +#define MAC_RX_MAC_STATE_BASE 0x00000610 /* 20 bytes */ +/* 0x624 --> 0x670 unused */ + +#define MAC_RSS_INDIR_TBL_0 0x00000630 + +#define MAC_RSS_HASH_KEY_0 0x00000670 +#define MAC_RSS_HASH_KEY_1 0x00000674 +#define MAC_RSS_HASH_KEY_2 0x00000678 +#define MAC_RSS_HASH_KEY_3 0x0000067c +#define MAC_RSS_HASH_KEY_4 0x00000680 +#define MAC_RSS_HASH_KEY_5 0x00000684 +#define MAC_RSS_HASH_KEY_6 0x00000688 +#define MAC_RSS_HASH_KEY_7 0x0000068c +#define MAC_RSS_HASH_KEY_8 0x00000690 +#define MAC_RSS_HASH_KEY_9 0x00000694 +/* 0x698 --> 0x800 unused */ + +#define MAC_TX_STATS_OCTETS 0x00000800 +#define MAC_TX_STATS_RESV1 0x00000804 +#define MAC_TX_STATS_COLLISIONS 0x00000808 +#define MAC_TX_STATS_XON_SENT 0x0000080c +#define MAC_TX_STATS_XOFF_SENT 0x00000810 +#define MAC_TX_STATS_RESV2 0x00000814 +#define MAC_TX_STATS_MAC_ERRORS 0x00000818 +#define MAC_TX_STATS_SINGLE_COLLISIONS 0x0000081c +#define MAC_TX_STATS_MULT_COLLISIONS 0x00000820 +#define MAC_TX_STATS_DEFERRED 0x00000824 +#define MAC_TX_STATS_RESV3 0x00000828 +#define MAC_TX_STATS_EXCESSIVE_COL 0x0000082c +#define MAC_TX_STATS_LATE_COL 0x00000830 +#define MAC_TX_STATS_RESV4_1 0x00000834 +#define MAC_TX_STATS_RESV4_2 0x00000838 +#define MAC_TX_STATS_RESV4_3 0x0000083c +#define MAC_TX_STATS_RESV4_4 0x00000840 +#define MAC_TX_STATS_RESV4_5 0x00000844 +#define MAC_TX_STATS_RESV4_6 0x00000848 +#define MAC_TX_STATS_RESV4_7 0x0000084c +#define MAC_TX_STATS_RESV4_8 0x00000850 +#define MAC_TX_STATS_RESV4_9 0x00000854 +#define MAC_TX_STATS_RESV4_10 0x00000858 +#define MAC_TX_STATS_RESV4_11 0x0000085c +#define MAC_TX_STATS_RESV4_12 0x00000860 +#define MAC_TX_STATS_RESV4_13 0x00000864 +#define MAC_TX_STATS_RESV4_14 0x00000868 +#define MAC_TX_STATS_UCAST 0x0000086c +#define MAC_TX_STATS_MCAST 0x00000870 +#define MAC_TX_STATS_BCAST 0x00000874 +#define MAC_TX_STATS_RESV5_1 0x00000878 +#define MAC_TX_STATS_RESV5_2 0x0000087c +#define MAC_RX_STATS_OCTETS 0x00000880 +#define MAC_RX_STATS_RESV1 0x00000884 +#define MAC_RX_STATS_FRAGMENTS 0x00000888 +#define MAC_RX_STATS_UCAST 0x0000088c +#define MAC_RX_STATS_MCAST 0x00000890 +#define MAC_RX_STATS_BCAST 0x00000894 +#define MAC_RX_STATS_FCS_ERRORS 0x00000898 +#define MAC_RX_STATS_ALIGN_ERRORS 0x0000089c +#define MAC_RX_STATS_XON_PAUSE_RECVD 0x000008a0 +#define MAC_RX_STATS_XOFF_PAUSE_RECVD 0x000008a4 +#define MAC_RX_STATS_MAC_CTRL_RECVD 0x000008a8 +#define MAC_RX_STATS_XOFF_ENTERED 0x000008ac +#define MAC_RX_STATS_FRAME_TOO_LONG 0x000008b0 +#define MAC_RX_STATS_JABBERS 0x000008b4 +#define MAC_RX_STATS_UNDERSIZE 0x000008b8 +/* 0x8bc --> 0xc00 unused */ + +/* Send data initiator control registers */ +#define SNDDATAI_MODE 0x00000c00 +#define SNDDATAI_MODE_RESET 0x00000001 +#define SNDDATAI_MODE_ENABLE 0x00000002 +#define SNDDATAI_MODE_STAT_OFLOW_ENAB 0x00000004 +#define SNDDATAI_STATUS 0x00000c04 +#define SNDDATAI_STATUS_STAT_OFLOW 0x00000004 +#define SNDDATAI_STATSCTRL 0x00000c08 +#define SNDDATAI_SCTRL_ENABLE 0x00000001 +#define SNDDATAI_SCTRL_FASTUPD 0x00000002 +#define SNDDATAI_SCTRL_CLEAR 0x00000004 +#define SNDDATAI_SCTRL_FLUSH 0x00000008 +#define SNDDATAI_SCTRL_FORCE_ZERO 0x00000010 +#define SNDDATAI_STATSENAB 0x00000c0c +#define SNDDATAI_STATSINCMASK 0x00000c10 +#define ISO_PKT_TX 0x00000c20 +/* 0xc24 --> 0xc80 unused */ +#define SNDDATAI_COS_CNT_0 0x00000c80 +#define SNDDATAI_COS_CNT_1 0x00000c84 +#define SNDDATAI_COS_CNT_2 0x00000c88 +#define SNDDATAI_COS_CNT_3 0x00000c8c +#define SNDDATAI_COS_CNT_4 0x00000c90 +#define SNDDATAI_COS_CNT_5 0x00000c94 +#define SNDDATAI_COS_CNT_6 0x00000c98 +#define SNDDATAI_COS_CNT_7 0x00000c9c +#define SNDDATAI_COS_CNT_8 0x00000ca0 +#define SNDDATAI_COS_CNT_9 0x00000ca4 +#define SNDDATAI_COS_CNT_10 0x00000ca8 +#define SNDDATAI_COS_CNT_11 0x00000cac +#define SNDDATAI_COS_CNT_12 0x00000cb0 +#define SNDDATAI_COS_CNT_13 0x00000cb4 +#define SNDDATAI_COS_CNT_14 0x00000cb8 +#define SNDDATAI_COS_CNT_15 0x00000cbc +#define SNDDATAI_DMA_RDQ_FULL_CNT 0x00000cc0 +#define SNDDATAI_DMA_PRIO_RDQ_FULL_CNT 0x00000cc4 +#define SNDDATAI_SDCQ_FULL_CNT 0x00000cc8 +#define SNDDATAI_NICRNG_SSND_PIDX_CNT 0x00000ccc +#define SNDDATAI_STATS_UPDATED_CNT 0x00000cd0 +#define SNDDATAI_INTERRUPTS_CNT 0x00000cd4 +#define SNDDATAI_AVOID_INTERRUPTS_CNT 0x00000cd8 +#define SNDDATAI_SND_THRESH_HIT_CNT 0x00000cdc +/* 0xce0 --> 0x1000 unused */ + +/* Send data completion control registers */ +#define SNDDATAC_MODE 0x00001000 +#define SNDDATAC_MODE_RESET 0x00000001 +#define SNDDATAC_MODE_ENABLE 0x00000002 +#define SNDDATAC_MODE_CDELAY 0x00000010 +/* 0x1004 --> 0x1400 unused */ + +/* Send BD ring selector */ +#define SNDBDS_MODE 0x00001400 +#define SNDBDS_MODE_RESET 0x00000001 +#define SNDBDS_MODE_ENABLE 0x00000002 +#define SNDBDS_MODE_ATTN_ENABLE 0x00000004 +#define SNDBDS_STATUS 0x00001404 +#define SNDBDS_STATUS_ERROR_ATTN 0x00000004 +#define SNDBDS_HWDIAG 0x00001408 +/* 0x140c --> 0x1440 */ +#define SNDBDS_SEL_CON_IDX_0 0x00001440 +#define SNDBDS_SEL_CON_IDX_1 0x00001444 +#define SNDBDS_SEL_CON_IDX_2 0x00001448 +#define SNDBDS_SEL_CON_IDX_3 0x0000144c +#define SNDBDS_SEL_CON_IDX_4 0x00001450 +#define SNDBDS_SEL_CON_IDX_5 0x00001454 +#define SNDBDS_SEL_CON_IDX_6 0x00001458 +#define SNDBDS_SEL_CON_IDX_7 0x0000145c +#define SNDBDS_SEL_CON_IDX_8 0x00001460 +#define SNDBDS_SEL_CON_IDX_9 0x00001464 +#define SNDBDS_SEL_CON_IDX_10 0x00001468 +#define SNDBDS_SEL_CON_IDX_11 0x0000146c +#define SNDBDS_SEL_CON_IDX_12 0x00001470 +#define SNDBDS_SEL_CON_IDX_13 0x00001474 +#define SNDBDS_SEL_CON_IDX_14 0x00001478 +#define SNDBDS_SEL_CON_IDX_15 0x0000147c +/* 0x1480 --> 0x1800 unused */ + +/* Send BD initiator control registers */ +#define SNDBDI_MODE 0x00001800 +#define SNDBDI_MODE_RESET 0x00000001 +#define SNDBDI_MODE_ENABLE 0x00000002 +#define SNDBDI_MODE_ATTN_ENABLE 0x00000004 +#define SNDBDI_MODE_MULTI_TXQ_EN 0x00000020 +#define SNDBDI_STATUS 0x00001804 +#define SNDBDI_STATUS_ERROR_ATTN 0x00000004 +#define SNDBDI_IN_PROD_IDX_0 0x00001808 +#define SNDBDI_IN_PROD_IDX_1 0x0000180c +#define SNDBDI_IN_PROD_IDX_2 0x00001810 +#define SNDBDI_IN_PROD_IDX_3 0x00001814 +#define SNDBDI_IN_PROD_IDX_4 0x00001818 +#define SNDBDI_IN_PROD_IDX_5 0x0000181c +#define SNDBDI_IN_PROD_IDX_6 0x00001820 +#define SNDBDI_IN_PROD_IDX_7 0x00001824 +#define SNDBDI_IN_PROD_IDX_8 0x00001828 +#define SNDBDI_IN_PROD_IDX_9 0x0000182c +#define SNDBDI_IN_PROD_IDX_10 0x00001830 +#define SNDBDI_IN_PROD_IDX_11 0x00001834 +#define SNDBDI_IN_PROD_IDX_12 0x00001838 +#define SNDBDI_IN_PROD_IDX_13 0x0000183c +#define SNDBDI_IN_PROD_IDX_14 0x00001840 +#define SNDBDI_IN_PROD_IDX_15 0x00001844 +/* 0x1848 --> 0x1c00 unused */ + +/* Send BD completion control registers */ +#define SNDBDC_MODE 0x00001c00 +#define SNDBDC_MODE_RESET 0x00000001 +#define SNDBDC_MODE_ENABLE 0x00000002 +#define SNDBDC_MODE_ATTN_ENABLE 0x00000004 +/* 0x1c04 --> 0x2000 unused */ + +/* Receive list placement control registers */ +#define RCVLPC_MODE 0x00002000 +#define RCVLPC_MODE_RESET 0x00000001 +#define RCVLPC_MODE_ENABLE 0x00000002 +#define RCVLPC_MODE_CLASS0_ATTN_ENAB 0x00000004 +#define RCVLPC_MODE_MAPOOR_AATTN_ENAB 0x00000008 +#define RCVLPC_MODE_STAT_OFLOW_ENAB 0x00000010 +#define RCVLPC_STATUS 0x00002004 +#define RCVLPC_STATUS_CLASS0 0x00000004 +#define RCVLPC_STATUS_MAPOOR 0x00000008 +#define RCVLPC_STATUS_STAT_OFLOW 0x00000010 +#define RCVLPC_LOCK 0x00002008 +#define RCVLPC_LOCK_REQ_MASK 0x0000ffff +#define RCVLPC_LOCK_REQ_SHIFT 0 +#define RCVLPC_LOCK_GRANT_MASK 0xffff0000 +#define RCVLPC_LOCK_GRANT_SHIFT 16 +#define RCVLPC_NON_EMPTY_BITS 0x0000200c +#define RCVLPC_NON_EMPTY_BITS_MASK 0x0000ffff +#define RCVLPC_CONFIG 0x00002010 +#define RCVLPC_STATSCTRL 0x00002014 +#define RCVLPC_STATSCTRL_ENABLE 0x00000001 +#define RCVLPC_STATSCTRL_FASTUPD 0x00000002 +#define RCVLPC_STATS_ENABLE 0x00002018 +#define RCVLPC_STATSENAB_ASF_FIX 0x00000002 +#define RCVLPC_STATSENAB_DACK_FIX 0x00040000 +#define RCVLPC_STATSENAB_LNGBRST_RFIX 0x00400000 +#define RCVLPC_STATS_INCMASK 0x0000201c +/* 0x2020 --> 0x2100 unused */ +#define RCVLPC_SELLST_BASE 0x00002100 /* 16 16-byte entries */ +#define SELLST_TAIL 0x00000004 +#define SELLST_CONT 0x00000008 +#define SELLST_UNUSED 0x0000000c +#define RCVLPC_COS_CNTL_BASE 0x00002200 /* 16 4-byte entries */ +#define RCVLPC_DROP_FILTER_CNT 0x00002240 +#define RCVLPC_DMA_WQ_FULL_CNT 0x00002244 +#define RCVLPC_DMA_HIPRIO_WQ_FULL_CNT 0x00002248 +#define RCVLPC_NO_RCV_BD_CNT 0x0000224c +#define RCVLPC_IN_DISCARDS_CNT 0x00002250 +#define RCVLPC_IN_ERRORS_CNT 0x00002254 +#define RCVLPC_RCV_THRESH_HIT_CNT 0x00002258 +/* 0x225c --> 0x2400 unused */ + +/* Receive Data and Receive BD Initiator Control */ +#define RCVDBDI_MODE 0x00002400 +#define RCVDBDI_MODE_RESET 0x00000001 +#define RCVDBDI_MODE_ENABLE 0x00000002 +#define RCVDBDI_MODE_JUMBOBD_NEEDED 0x00000004 +#define RCVDBDI_MODE_FRM_TOO_BIG 0x00000008 +#define RCVDBDI_MODE_INV_RING_SZ 0x00000010 +#define RCVDBDI_MODE_LRG_RING_SZ 0x00010000 +#define RCVDBDI_STATUS 0x00002404 +#define RCVDBDI_STATUS_JUMBOBD_NEEDED 0x00000004 +#define RCVDBDI_STATUS_FRM_TOO_BIG 0x00000008 +#define RCVDBDI_STATUS_INV_RING_SZ 0x00000010 +#define RCVDBDI_SPLIT_FRAME_MINSZ 0x00002408 +/* 0x240c --> 0x2440 unused */ +#define RCVDBDI_JUMBO_BD 0x00002440 /* TG3_BDINFO_... */ +#define RCVDBDI_STD_BD 0x00002450 /* TG3_BDINFO_... */ +#define RCVDBDI_MINI_BD 0x00002460 /* TG3_BDINFO_... */ +#define RCVDBDI_JUMBO_CON_IDX 0x00002470 +#define RCVDBDI_STD_CON_IDX 0x00002474 +#define RCVDBDI_MINI_CON_IDX 0x00002478 +/* 0x247c --> 0x2480 unused */ +#define RCVDBDI_BD_PROD_IDX_0 0x00002480 +#define RCVDBDI_BD_PROD_IDX_1 0x00002484 +#define RCVDBDI_BD_PROD_IDX_2 0x00002488 +#define RCVDBDI_BD_PROD_IDX_3 0x0000248c +#define RCVDBDI_BD_PROD_IDX_4 0x00002490 +#define RCVDBDI_BD_PROD_IDX_5 0x00002494 +#define RCVDBDI_BD_PROD_IDX_6 0x00002498 +#define RCVDBDI_BD_PROD_IDX_7 0x0000249c +#define RCVDBDI_BD_PROD_IDX_8 0x000024a0 +#define RCVDBDI_BD_PROD_IDX_9 0x000024a4 +#define RCVDBDI_BD_PROD_IDX_10 0x000024a8 +#define RCVDBDI_BD_PROD_IDX_11 0x000024ac +#define RCVDBDI_BD_PROD_IDX_12 0x000024b0 +#define RCVDBDI_BD_PROD_IDX_13 0x000024b4 +#define RCVDBDI_BD_PROD_IDX_14 0x000024b8 +#define RCVDBDI_BD_PROD_IDX_15 0x000024bc +#define RCVDBDI_HWDIAG 0x000024c0 +/* 0x24c4 --> 0x2800 unused */ + +/* Receive Data Completion Control */ +#define RCVDCC_MODE 0x00002800 +#define RCVDCC_MODE_RESET 0x00000001 +#define RCVDCC_MODE_ENABLE 0x00000002 +#define RCVDCC_MODE_ATTN_ENABLE 0x00000004 +/* 0x2804 --> 0x2c00 unused */ + +/* Receive BD Initiator Control Registers */ +#define RCVBDI_MODE 0x00002c00 +#define RCVBDI_MODE_RESET 0x00000001 +#define RCVBDI_MODE_ENABLE 0x00000002 +#define RCVBDI_MODE_RCB_ATTN_ENAB 0x00000004 +#define RCVBDI_STATUS 0x00002c04 +#define RCVBDI_STATUS_RCB_ATTN 0x00000004 +#define RCVBDI_JUMBO_PROD_IDX 0x00002c08 +#define RCVBDI_STD_PROD_IDX 0x00002c0c +#define RCVBDI_MINI_PROD_IDX 0x00002c10 +#define RCVBDI_MINI_THRESH 0x00002c14 +#define RCVBDI_STD_THRESH 0x00002c18 +#define RCVBDI_JUMBO_THRESH 0x00002c1c +/* 0x2c20 --> 0x2d00 unused */ + +#define STD_REPLENISH_LWM 0x00002d00 +#define JMB_REPLENISH_LWM 0x00002d04 +/* 0x2d08 --> 0x3000 unused */ + +/* Receive BD Completion Control Registers */ +#define RCVCC_MODE 0x00003000 +#define RCVCC_MODE_RESET 0x00000001 +#define RCVCC_MODE_ENABLE 0x00000002 +#define RCVCC_MODE_ATTN_ENABLE 0x00000004 +#define RCVCC_STATUS 0x00003004 +#define RCVCC_STATUS_ERROR_ATTN 0x00000004 +#define RCVCC_JUMP_PROD_IDX 0x00003008 +#define RCVCC_STD_PROD_IDX 0x0000300c +#define RCVCC_MINI_PROD_IDX 0x00003010 +/* 0x3014 --> 0x3400 unused */ + +/* Receive list selector control registers */ +#define RCVLSC_MODE 0x00003400 +#define RCVLSC_MODE_RESET 0x00000001 +#define RCVLSC_MODE_ENABLE 0x00000002 +#define RCVLSC_MODE_ATTN_ENABLE 0x00000004 +#define RCVLSC_STATUS 0x00003404 +#define RCVLSC_STATUS_ERROR_ATTN 0x00000004 +/* 0x3408 --> 0x3600 unused */ + +#define TG3_CPMU_DRV_STATUS 0x0000344c + +/* CPMU registers */ +#define TG3_CPMU_CTRL 0x00003600 +#define CPMU_CTRL_LINK_IDLE_MODE 0x00000200 +#define CPMU_CTRL_LINK_AWARE_MODE 0x00000400 +#define CPMU_CTRL_LINK_SPEED_MODE 0x00004000 +#define CPMU_CTRL_GPHY_10MB_RXONLY 0x00010000 +#define TG3_CPMU_LSPD_10MB_CLK 0x00003604 +#define CPMU_LSPD_10MB_MACCLK_MASK 0x001f0000 +#define CPMU_LSPD_10MB_MACCLK_6_25 0x00130000 +/* 0x3608 --> 0x360c unused */ + +#define TG3_CPMU_LSPD_1000MB_CLK 0x0000360c +#define CPMU_LSPD_1000MB_MACCLK_62_5 0x00000000 +#define CPMU_LSPD_1000MB_MACCLK_12_5 0x00110000 +#define CPMU_LSPD_1000MB_MACCLK_MASK 0x001f0000 +#define TG3_CPMU_LNK_AWARE_PWRMD 0x00003610 +#define CPMU_LNK_AWARE_MACCLK_MASK 0x001f0000 +#define CPMU_LNK_AWARE_MACCLK_6_25 0x00130000 +/* 0x3614 --> 0x361c unused */ + +#define TG3_CPMU_HST_ACC 0x0000361c +#define CPMU_HST_ACC_MACCLK_MASK 0x001f0000 +#define CPMU_HST_ACC_MACCLK_6_25 0x00130000 +/* 0x3620 --> 0x3630 unused */ + +#define TG3_CPMU_CLCK_ORIDE 0x00003624 +#define CPMU_CLCK_ORIDE_MAC_ORIDE_EN 0x80000000 + +#define TG3_CPMU_CLCK_STAT 0x00003630 +#define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000 +#define CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000 +#define CPMU_CLCK_STAT_MAC_CLCK_12_5 0x00110000 +#define CPMU_CLCK_STAT_MAC_CLCK_6_25 0x00130000 +/* 0x3634 --> 0x365c unused */ + +#define TG3_CPMU_MUTEX_REQ 0x0000365c +#define CPMU_MUTEX_REQ_DRIVER 0x00001000 +#define TG3_CPMU_MUTEX_GNT 0x00003660 +#define CPMU_MUTEX_GNT_DRIVER 0x00001000 +#define TG3_CPMU_PHY_STRAP 0x00003664 +#define TG3_CPMU_PHY_STRAP_IS_SERDES 0x00000020 +/* 0x3664 --> 0x36b0 unused */ + +#define TG3_CPMU_EEE_MODE 0x000036b0 +#define TG3_CPMU_EEEMD_APE_TX_DET_EN 0x00000004 +#define TG3_CPMU_EEEMD_ERLY_L1_XIT_DET 0x00000008 +#define TG3_CPMU_EEEMD_SND_IDX_DET_EN 0x00000040 +#define TG3_CPMU_EEEMD_LPI_ENABLE 0x00000080 +#define TG3_CPMU_EEEMD_LPI_IN_TX 0x00000100 +#define TG3_CPMU_EEEMD_LPI_IN_RX 0x00000200 +#define TG3_CPMU_EEEMD_EEE_ENABLE 0x00100000 +#define TG3_CPMU_EEE_DBTMR1 0x000036b4 +#define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000 +#define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000007ff +#define TG3_CPMU_EEE_DBTMR2 0x000036b8 +#define TG3_CPMU_DBTMR2_APE_TX_2047US 0x07ff0000 +#define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000007ff +#define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc +#define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000 +#define TG3_CPMU_EEE_LNKIDL_UART_IDL 0x00000004 +/* 0x36c0 --> 0x36d0 unused */ + +#define TG3_CPMU_EEE_CTRL 0x000036d0 +#define TG3_CPMU_EEE_CTRL_EXIT_16_5_US 0x0000019d +#define TG3_CPMU_EEE_CTRL_EXIT_36_US 0x00000384 +#define TG3_CPMU_EEE_CTRL_EXIT_20_1_US 0x000001f8 +/* 0x36d4 --> 0x3800 unused */ + +/* Mbuf cluster free registers */ +#define MBFREE_MODE 0x00003800 +#define MBFREE_MODE_RESET 0x00000001 +#define MBFREE_MODE_ENABLE 0x00000002 +#define MBFREE_STATUS 0x00003804 +/* 0x3808 --> 0x3c00 unused */ + +/* Host coalescing control registers */ +#define HOSTCC_MODE 0x00003c00 +#define HOSTCC_MODE_RESET 0x00000001 +#define HOSTCC_MODE_ENABLE 0x00000002 +#define HOSTCC_MODE_ATTN 0x00000004 +#define HOSTCC_MODE_NOW 0x00000008 +#define HOSTCC_MODE_FULL_STATUS 0x00000000 +#define HOSTCC_MODE_64BYTE 0x00000080 +#define HOSTCC_MODE_32BYTE 0x00000100 +#define HOSTCC_MODE_CLRTICK_RXBD 0x00000200 +#define HOSTCC_MODE_CLRTICK_TXBD 0x00000400 +#define HOSTCC_MODE_NOINT_ON_NOW 0x00000800 +#define HOSTCC_MODE_NOINT_ON_FORCE 0x00001000 +#define HOSTCC_MODE_COAL_VEC1_NOW 0x00002000 +#define HOSTCC_STATUS 0x00003c04 +#define HOSTCC_STATUS_ERROR_ATTN 0x00000004 +#define HOSTCC_RXCOL_TICKS 0x00003c08 +#define LOW_RXCOL_TICKS 0x00000032 +#define LOW_RXCOL_TICKS_CLRTCKS 0x00000014 +#define DEFAULT_RXCOL_TICKS 0x00000048 +#define HIGH_RXCOL_TICKS 0x00000096 +#define MAX_RXCOL_TICKS 0x000003ff +#define HOSTCC_TXCOL_TICKS 0x00003c0c +#define LOW_TXCOL_TICKS 0x00000096 +#define LOW_TXCOL_TICKS_CLRTCKS 0x00000048 +#define DEFAULT_TXCOL_TICKS 0x0000012c +#define HIGH_TXCOL_TICKS 0x00000145 +#define MAX_TXCOL_TICKS 0x000003ff +#define HOSTCC_RXMAX_FRAMES 0x00003c10 +#define LOW_RXMAX_FRAMES 0x00000005 +#define DEFAULT_RXMAX_FRAMES 0x00000008 +#define HIGH_RXMAX_FRAMES 0x00000012 +#define MAX_RXMAX_FRAMES 0x000000ff +#define HOSTCC_TXMAX_FRAMES 0x00003c14 +#define LOW_TXMAX_FRAMES 0x00000035 +#define DEFAULT_TXMAX_FRAMES 0x0000004b +#define HIGH_TXMAX_FRAMES 0x00000052 +#define MAX_TXMAX_FRAMES 0x000000ff +#define HOSTCC_RXCOAL_TICK_INT 0x00003c18 +#define DEFAULT_RXCOAL_TICK_INT 0x00000019 +#define DEFAULT_RXCOAL_TICK_INT_CLRTCKS 0x00000014 +#define MAX_RXCOAL_TICK_INT 0x000003ff +#define HOSTCC_TXCOAL_TICK_INT 0x00003c1c +#define DEFAULT_TXCOAL_TICK_INT 0x00000019 +#define DEFAULT_TXCOAL_TICK_INT_CLRTCKS 0x00000014 +#define MAX_TXCOAL_TICK_INT 0x000003ff +#define HOSTCC_RXCOAL_MAXF_INT 0x00003c20 +#define DEFAULT_RXCOAL_MAXF_INT 0x00000005 +#define MAX_RXCOAL_MAXF_INT 0x000000ff +#define HOSTCC_TXCOAL_MAXF_INT 0x00003c24 +#define DEFAULT_TXCOAL_MAXF_INT 0x00000005 +#define MAX_TXCOAL_MAXF_INT 0x000000ff +#define HOSTCC_STAT_COAL_TICKS 0x00003c28 +#define DEFAULT_STAT_COAL_TICKS 0x000f4240 +#define MAX_STAT_COAL_TICKS 0xd693d400 +#define MIN_STAT_COAL_TICKS 0x00000064 +/* 0x3c2c --> 0x3c30 unused */ +#define HOSTCC_STATS_BLK_HOST_ADDR 0x00003c30 /* 64-bit */ +#define HOSTCC_STATUS_BLK_HOST_ADDR 0x00003c38 /* 64-bit */ +#define HOSTCC_STATS_BLK_NIC_ADDR 0x00003c40 +#define HOSTCC_STATUS_BLK_NIC_ADDR 0x00003c44 +#define HOSTCC_FLOW_ATTN 0x00003c48 +#define HOSTCC_FLOW_ATTN_MBUF_LWM 0x00000040 +/* 0x3c4c --> 0x3c50 unused */ +#define HOSTCC_JUMBO_CON_IDX 0x00003c50 +#define HOSTCC_STD_CON_IDX 0x00003c54 +#define HOSTCC_MINI_CON_IDX 0x00003c58 +/* 0x3c5c --> 0x3c80 unused */ +#define HOSTCC_RET_PROD_IDX_0 0x00003c80 +#define HOSTCC_RET_PROD_IDX_1 0x00003c84 +#define HOSTCC_RET_PROD_IDX_2 0x00003c88 +#define HOSTCC_RET_PROD_IDX_3 0x00003c8c +#define HOSTCC_RET_PROD_IDX_4 0x00003c90 +#define HOSTCC_RET_PROD_IDX_5 0x00003c94 +#define HOSTCC_RET_PROD_IDX_6 0x00003c98 +#define HOSTCC_RET_PROD_IDX_7 0x00003c9c +#define HOSTCC_RET_PROD_IDX_8 0x00003ca0 +#define HOSTCC_RET_PROD_IDX_9 0x00003ca4 +#define HOSTCC_RET_PROD_IDX_10 0x00003ca8 +#define HOSTCC_RET_PROD_IDX_11 0x00003cac +#define HOSTCC_RET_PROD_IDX_12 0x00003cb0 +#define HOSTCC_RET_PROD_IDX_13 0x00003cb4 +#define HOSTCC_RET_PROD_IDX_14 0x00003cb8 +#define HOSTCC_RET_PROD_IDX_15 0x00003cbc +#define HOSTCC_SND_CON_IDX_0 0x00003cc0 +#define HOSTCC_SND_CON_IDX_1 0x00003cc4 +#define HOSTCC_SND_CON_IDX_2 0x00003cc8 +#define HOSTCC_SND_CON_IDX_3 0x00003ccc +#define HOSTCC_SND_CON_IDX_4 0x00003cd0 +#define HOSTCC_SND_CON_IDX_5 0x00003cd4 +#define HOSTCC_SND_CON_IDX_6 0x00003cd8 +#define HOSTCC_SND_CON_IDX_7 0x00003cdc +#define HOSTCC_SND_CON_IDX_8 0x00003ce0 +#define HOSTCC_SND_CON_IDX_9 0x00003ce4 +#define HOSTCC_SND_CON_IDX_10 0x00003ce8 +#define HOSTCC_SND_CON_IDX_11 0x00003cec +#define HOSTCC_SND_CON_IDX_12 0x00003cf0 +#define HOSTCC_SND_CON_IDX_13 0x00003cf4 +#define HOSTCC_SND_CON_IDX_14 0x00003cf8 +#define HOSTCC_SND_CON_IDX_15 0x00003cfc +#define HOSTCC_STATBLCK_RING1 0x00003d00 +/* 0x3d00 --> 0x3d80 unused */ + +#define HOSTCC_RXCOL_TICKS_VEC1 0x00003d80 +#define HOSTCC_TXCOL_TICKS_VEC1 0x00003d84 +#define HOSTCC_RXMAX_FRAMES_VEC1 0x00003d88 +#define HOSTCC_TXMAX_FRAMES_VEC1 0x00003d8c +#define HOSTCC_RXCOAL_MAXF_INT_VEC1 0x00003d90 +#define HOSTCC_TXCOAL_MAXF_INT_VEC1 0x00003d94 +/* 0x3d98 --> 0x4000 unused */ + +/* Memory arbiter control registers */ +#define MEMARB_MODE 0x00004000 +#define MEMARB_MODE_RESET 0x00000001 +#define MEMARB_MODE_ENABLE 0x00000002 +#define MEMARB_STATUS 0x00004004 +#define MEMARB_TRAP_ADDR_LOW 0x00004008 +#define MEMARB_TRAP_ADDR_HIGH 0x0000400c +/* 0x4010 --> 0x4400 unused */ + +/* Buffer manager control registers */ +#define BUFMGR_MODE 0x00004400 +#define BUFMGR_MODE_RESET 0x00000001 +#define BUFMGR_MODE_ENABLE 0x00000002 +#define BUFMGR_MODE_ATTN_ENABLE 0x00000004 +#define BUFMGR_MODE_BM_TEST 0x00000008 +#define BUFMGR_MODE_MBLOW_ATTN_ENAB 0x00000010 +#define BUFMGR_MODE_NO_TX_UNDERRUN 0x80000000 +#define BUFMGR_STATUS 0x00004404 +#define BUFMGR_STATUS_ERROR 0x00000004 +#define BUFMGR_STATUS_MBLOW 0x00000010 +#define BUFMGR_MB_POOL_ADDR 0x00004408 +#define BUFMGR_MB_POOL_SIZE 0x0000440c +#define BUFMGR_MB_RDMA_LOW_WATER 0x00004410 +#define DEFAULT_MB_RDMA_LOW_WATER 0x00000050 +#define DEFAULT_MB_RDMA_LOW_WATER_5705 0x00000000 +#define DEFAULT_MB_RDMA_LOW_WATER_JUMBO 0x00000130 +#define DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780 0x00000000 +#define BUFMGR_MB_MACRX_LOW_WATER 0x00004414 +#define DEFAULT_MB_MACRX_LOW_WATER 0x00000020 +#define DEFAULT_MB_MACRX_LOW_WATER_5705 0x00000010 +#define DEFAULT_MB_MACRX_LOW_WATER_5906 0x00000004 +#define DEFAULT_MB_MACRX_LOW_WATER_57765 0x0000002a +#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO 0x00000098 +#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780 0x0000004b +#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765 0x0000007e +#define BUFMGR_MB_HIGH_WATER 0x00004418 +#define DEFAULT_MB_HIGH_WATER 0x00000060 +#define DEFAULT_MB_HIGH_WATER_5705 0x00000060 +#define DEFAULT_MB_HIGH_WATER_5906 0x00000010 +#define DEFAULT_MB_HIGH_WATER_57765 0x000000a0 +#define DEFAULT_MB_HIGH_WATER_JUMBO 0x0000017c +#define DEFAULT_MB_HIGH_WATER_JUMBO_5780 0x00000096 +#define DEFAULT_MB_HIGH_WATER_JUMBO_57765 0x000000ea +#define BUFMGR_RX_MB_ALLOC_REQ 0x0000441c +#define BUFMGR_MB_ALLOC_BIT 0x10000000 +#define BUFMGR_RX_MB_ALLOC_RESP 0x00004420 +#define BUFMGR_TX_MB_ALLOC_REQ 0x00004424 +#define BUFMGR_TX_MB_ALLOC_RESP 0x00004428 +#define BUFMGR_DMA_DESC_POOL_ADDR 0x0000442c +#define BUFMGR_DMA_DESC_POOL_SIZE 0x00004430 +#define BUFMGR_DMA_LOW_WATER 0x00004434 +#define DEFAULT_DMA_LOW_WATER 0x00000005 +#define BUFMGR_DMA_HIGH_WATER 0x00004438 +#define DEFAULT_DMA_HIGH_WATER 0x0000000a +#define BUFMGR_RX_DMA_ALLOC_REQ 0x0000443c +#define BUFMGR_RX_DMA_ALLOC_RESP 0x00004440 +#define BUFMGR_TX_DMA_ALLOC_REQ 0x00004444 +#define BUFMGR_TX_DMA_ALLOC_RESP 0x00004448 +#define BUFMGR_HWDIAG_0 0x0000444c +#define BUFMGR_HWDIAG_1 0x00004450 +#define BUFMGR_HWDIAG_2 0x00004454 +/* 0x4458 --> 0x4800 unused */ + +/* Read DMA control registers */ +#define RDMAC_MODE 0x00004800 +#define RDMAC_MODE_RESET 0x00000001 +#define RDMAC_MODE_ENABLE 0x00000002 +#define RDMAC_MODE_TGTABORT_ENAB 0x00000004 +#define RDMAC_MODE_MSTABORT_ENAB 0x00000008 +#define RDMAC_MODE_PARITYERR_ENAB 0x00000010 +#define RDMAC_MODE_ADDROFLOW_ENAB 0x00000020 +#define RDMAC_MODE_FIFOOFLOW_ENAB 0x00000040 +#define RDMAC_MODE_FIFOURUN_ENAB 0x00000080 +#define RDMAC_MODE_FIFOOREAD_ENAB 0x00000100 +#define RDMAC_MODE_LNGREAD_ENAB 0x00000200 +#define RDMAC_MODE_SPLIT_ENABLE 0x00000800 +#define RDMAC_MODE_BD_SBD_CRPT_ENAB 0x00000800 +#define RDMAC_MODE_SPLIT_RESET 0x00001000 +#define RDMAC_MODE_MBUF_RBD_CRPT_ENAB 0x00001000 +#define RDMAC_MODE_MBUF_SBD_CRPT_ENAB 0x00002000 +#define RDMAC_MODE_FIFO_SIZE_128 0x00020000 +#define RDMAC_MODE_FIFO_LONG_BURST 0x00030000 +#define RDMAC_MODE_MULT_DMA_RD_DIS 0x01000000 +#define RDMAC_MODE_IPV4_LSO_EN 0x08000000 +#define RDMAC_MODE_IPV6_LSO_EN 0x10000000 +#define RDMAC_MODE_H2BNC_VLAN_DET 0x20000000 +#define RDMAC_STATUS 0x00004804 +#define RDMAC_STATUS_TGTABORT 0x00000004 +#define RDMAC_STATUS_MSTABORT 0x00000008 +#define RDMAC_STATUS_PARITYERR 0x00000010 +#define RDMAC_STATUS_ADDROFLOW 0x00000020 +#define RDMAC_STATUS_FIFOOFLOW 0x00000040 +#define RDMAC_STATUS_FIFOURUN 0x00000080 +#define RDMAC_STATUS_FIFOOREAD 0x00000100 +#define RDMAC_STATUS_LNGREAD 0x00000200 +/* 0x4808 --> 0x4900 unused */ + +#define TG3_RDMA_RSRVCTRL_REG 0x00004900 +#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004 +#define TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K 0x00000c00 +#define TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK 0x00000ff0 +#define TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K 0x000c0000 +#define TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK 0x000ff000 +#define TG3_RDMA_RSRVCTRL_TXMRGN_320B 0x28000000 +#define TG3_RDMA_RSRVCTRL_TXMRGN_MASK 0xffe00000 +/* 0x4904 --> 0x4910 unused */ + +#define TG3_LSO_RD_DMA_CRPTEN_CTRL 0x00004910 +#define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K 0x00030000 +#define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K 0x000c0000 +/* 0x4914 --> 0x4c00 unused */ + +/* Write DMA control registers */ +#define WDMAC_MODE 0x00004c00 +#define WDMAC_MODE_RESET 0x00000001 +#define WDMAC_MODE_ENABLE 0x00000002 +#define WDMAC_MODE_TGTABORT_ENAB 0x00000004 +#define WDMAC_MODE_MSTABORT_ENAB 0x00000008 +#define WDMAC_MODE_PARITYERR_ENAB 0x00000010 +#define WDMAC_MODE_ADDROFLOW_ENAB 0x00000020 +#define WDMAC_MODE_FIFOOFLOW_ENAB 0x00000040 +#define WDMAC_MODE_FIFOURUN_ENAB 0x00000080 +#define WDMAC_MODE_FIFOOREAD_ENAB 0x00000100 +#define WDMAC_MODE_LNGREAD_ENAB 0x00000200 +#define WDMAC_MODE_RX_ACCEL 0x00000400 +#define WDMAC_MODE_STATUS_TAG_FIX 0x20000000 +#define WDMAC_MODE_BURST_ALL_DATA 0xc0000000 +#define WDMAC_STATUS 0x00004c04 +#define WDMAC_STATUS_TGTABORT 0x00000004 +#define WDMAC_STATUS_MSTABORT 0x00000008 +#define WDMAC_STATUS_PARITYERR 0x00000010 +#define WDMAC_STATUS_ADDROFLOW 0x00000020 +#define WDMAC_STATUS_FIFOOFLOW 0x00000040 +#define WDMAC_STATUS_FIFOURUN 0x00000080 +#define WDMAC_STATUS_FIFOOREAD 0x00000100 +#define WDMAC_STATUS_LNGREAD 0x00000200 +/* 0x4c08 --> 0x5000 unused */ + +/* Per-cpu register offsets (arm9) */ +#define CPU_MODE 0x00000000 +#define CPU_MODE_RESET 0x00000001 +#define CPU_MODE_HALT 0x00000400 +#define CPU_STATE 0x00000004 +#define CPU_EVTMASK 0x00000008 +/* 0xc --> 0x1c reserved */ +#define CPU_PC 0x0000001c +#define CPU_INSN 0x00000020 +#define CPU_SPAD_UFLOW 0x00000024 +#define CPU_WDOG_CLEAR 0x00000028 +#define CPU_WDOG_VECTOR 0x0000002c +#define CPU_WDOG_PC 0x00000030 +#define CPU_HW_BP 0x00000034 +/* 0x38 --> 0x44 unused */ +#define CPU_WDOG_SAVED_STATE 0x00000044 +#define CPU_LAST_BRANCH_ADDR 0x00000048 +#define CPU_SPAD_UFLOW_SET 0x0000004c +/* 0x50 --> 0x200 unused */ +#define CPU_R0 0x00000200 +#define CPU_R1 0x00000204 +#define CPU_R2 0x00000208 +#define CPU_R3 0x0000020c +#define CPU_R4 0x00000210 +#define CPU_R5 0x00000214 +#define CPU_R6 0x00000218 +#define CPU_R7 0x0000021c +#define CPU_R8 0x00000220 +#define CPU_R9 0x00000224 +#define CPU_R10 0x00000228 +#define CPU_R11 0x0000022c +#define CPU_R12 0x00000230 +#define CPU_R13 0x00000234 +#define CPU_R14 0x00000238 +#define CPU_R15 0x0000023c +#define CPU_R16 0x00000240 +#define CPU_R17 0x00000244 +#define CPU_R18 0x00000248 +#define CPU_R19 0x0000024c +#define CPU_R20 0x00000250 +#define CPU_R21 0x00000254 +#define CPU_R22 0x00000258 +#define CPU_R23 0x0000025c +#define CPU_R24 0x00000260 +#define CPU_R25 0x00000264 +#define CPU_R26 0x00000268 +#define CPU_R27 0x0000026c +#define CPU_R28 0x00000270 +#define CPU_R29 0x00000274 +#define CPU_R30 0x00000278 +#define CPU_R31 0x0000027c +/* 0x280 --> 0x400 unused */ + +#define RX_CPU_BASE 0x00005000 +#define RX_CPU_MODE 0x00005000 +#define RX_CPU_STATE 0x00005004 +#define RX_CPU_PGMCTR 0x0000501c +#define RX_CPU_HWBKPT 0x00005034 +#define TX_CPU_BASE 0x00005400 +#define TX_CPU_MODE 0x00005400 +#define TX_CPU_STATE 0x00005404 +#define TX_CPU_PGMCTR 0x0000541c + +#define VCPU_STATUS 0x00005100 +#define VCPU_STATUS_INIT_DONE 0x04000000 +#define VCPU_STATUS_DRV_RESET 0x08000000 + +#define VCPU_CFGSHDW 0x00005104 +#define VCPU_CFGSHDW_WOL_ENABLE 0x00000001 +#define VCPU_CFGSHDW_WOL_MAGPKT 0x00000004 +#define VCPU_CFGSHDW_ASPM_DBNC 0x00001000 + +/* Mailboxes */ +#define GRCMBOX_BASE 0x00005600 +#define GRCMBOX_INTERRUPT_0 0x00005800 /* 64-bit */ +#define GRCMBOX_INTERRUPT_1 0x00005808 /* 64-bit */ +#define GRCMBOX_INTERRUPT_2 0x00005810 /* 64-bit */ +#define GRCMBOX_INTERRUPT_3 0x00005818 /* 64-bit */ +#define GRCMBOX_GENERAL_0 0x00005820 /* 64-bit */ +#define GRCMBOX_GENERAL_1 0x00005828 /* 64-bit */ +#define GRCMBOX_GENERAL_2 0x00005830 /* 64-bit */ +#define GRCMBOX_GENERAL_3 0x00005838 /* 64-bit */ +#define GRCMBOX_GENERAL_4 0x00005840 /* 64-bit */ +#define GRCMBOX_GENERAL_5 0x00005848 /* 64-bit */ +#define GRCMBOX_GENERAL_6 0x00005850 /* 64-bit */ +#define GRCMBOX_GENERAL_7 0x00005858 /* 64-bit */ +#define GRCMBOX_RELOAD_STAT 0x00005860 /* 64-bit */ +#define GRCMBOX_RCVSTD_PROD_IDX 0x00005868 /* 64-bit */ +#define GRCMBOX_RCVJUMBO_PROD_IDX 0x00005870 /* 64-bit */ +#define GRCMBOX_RCVMINI_PROD_IDX 0x00005878 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_0 0x00005880 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_1 0x00005888 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_2 0x00005890 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_3 0x00005898 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_4 0x000058a0 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_5 0x000058a8 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_6 0x000058b0 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_7 0x000058b8 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_8 0x000058c0 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_9 0x000058c8 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_10 0x000058d0 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_11 0x000058d8 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_12 0x000058e0 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_13 0x000058e8 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_14 0x000058f0 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_15 0x000058f8 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_0 0x00005900 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_1 0x00005908 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_2 0x00005910 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_3 0x00005918 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_4 0x00005920 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_5 0x00005928 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_6 0x00005930 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_7 0x00005938 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_8 0x00005940 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_9 0x00005948 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_10 0x00005950 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_11 0x00005958 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_12 0x00005960 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_13 0x00005968 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_14 0x00005970 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_15 0x00005978 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_0 0x00005980 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_1 0x00005988 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_2 0x00005990 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_3 0x00005998 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_4 0x000059a0 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_5 0x000059a8 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_6 0x000059b0 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_7 0x000059b8 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_8 0x000059c0 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_9 0x000059c8 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_10 0x000059d0 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_11 0x000059d8 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_12 0x000059e0 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_13 0x000059e8 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_14 0x000059f0 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_15 0x000059f8 /* 64-bit */ +#define GRCMBOX_HIGH_PRIO_EV_VECTOR 0x00005a00 +#define GRCMBOX_HIGH_PRIO_EV_MASK 0x00005a04 +#define GRCMBOX_LOW_PRIO_EV_VEC 0x00005a08 +#define GRCMBOX_LOW_PRIO_EV_MASK 0x00005a0c +/* 0x5a10 --> 0x5c00 */ + +/* Flow Through queues */ +#define FTQ_RESET 0x00005c00 +/* 0x5c04 --> 0x5c10 unused */ +#define FTQ_DMA_NORM_READ_CTL 0x00005c10 +#define FTQ_DMA_NORM_READ_FULL_CNT 0x00005c14 +#define FTQ_DMA_NORM_READ_FIFO_ENQDEQ 0x00005c18 +#define FTQ_DMA_NORM_READ_WRITE_PEEK 0x00005c1c +#define FTQ_DMA_HIGH_READ_CTL 0x00005c20 +#define FTQ_DMA_HIGH_READ_FULL_CNT 0x00005c24 +#define FTQ_DMA_HIGH_READ_FIFO_ENQDEQ 0x00005c28 +#define FTQ_DMA_HIGH_READ_WRITE_PEEK 0x00005c2c +#define FTQ_DMA_COMP_DISC_CTL 0x00005c30 +#define FTQ_DMA_COMP_DISC_FULL_CNT 0x00005c34 +#define FTQ_DMA_COMP_DISC_FIFO_ENQDEQ 0x00005c38 +#define FTQ_DMA_COMP_DISC_WRITE_PEEK 0x00005c3c +#define FTQ_SEND_BD_COMP_CTL 0x00005c40 +#define FTQ_SEND_BD_COMP_FULL_CNT 0x00005c44 +#define FTQ_SEND_BD_COMP_FIFO_ENQDEQ 0x00005c48 +#define FTQ_SEND_BD_COMP_WRITE_PEEK 0x00005c4c +#define FTQ_SEND_DATA_INIT_CTL 0x00005c50 +#define FTQ_SEND_DATA_INIT_FULL_CNT 0x00005c54 +#define FTQ_SEND_DATA_INIT_FIFO_ENQDEQ 0x00005c58 +#define FTQ_SEND_DATA_INIT_WRITE_PEEK 0x00005c5c +#define FTQ_DMA_NORM_WRITE_CTL 0x00005c60 +#define FTQ_DMA_NORM_WRITE_FULL_CNT 0x00005c64 +#define FTQ_DMA_NORM_WRITE_FIFO_ENQDEQ 0x00005c68 +#define FTQ_DMA_NORM_WRITE_WRITE_PEEK 0x00005c6c +#define FTQ_DMA_HIGH_WRITE_CTL 0x00005c70 +#define FTQ_DMA_HIGH_WRITE_FULL_CNT 0x00005c74 +#define FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ 0x00005c78 +#define FTQ_DMA_HIGH_WRITE_WRITE_PEEK 0x00005c7c +#define FTQ_SWTYPE1_CTL 0x00005c80 +#define FTQ_SWTYPE1_FULL_CNT 0x00005c84 +#define FTQ_SWTYPE1_FIFO_ENQDEQ 0x00005c88 +#define FTQ_SWTYPE1_WRITE_PEEK 0x00005c8c +#define FTQ_SEND_DATA_COMP_CTL 0x00005c90 +#define FTQ_SEND_DATA_COMP_FULL_CNT 0x00005c94 +#define FTQ_SEND_DATA_COMP_FIFO_ENQDEQ 0x00005c98 +#define FTQ_SEND_DATA_COMP_WRITE_PEEK 0x00005c9c +#define FTQ_HOST_COAL_CTL 0x00005ca0 +#define FTQ_HOST_COAL_FULL_CNT 0x00005ca4 +#define FTQ_HOST_COAL_FIFO_ENQDEQ 0x00005ca8 +#define FTQ_HOST_COAL_WRITE_PEEK 0x00005cac +#define FTQ_MAC_TX_CTL 0x00005cb0 +#define FTQ_MAC_TX_FULL_CNT 0x00005cb4 +#define FTQ_MAC_TX_FIFO_ENQDEQ 0x00005cb8 +#define FTQ_MAC_TX_WRITE_PEEK 0x00005cbc +#define FTQ_MB_FREE_CTL 0x00005cc0 +#define FTQ_MB_FREE_FULL_CNT 0x00005cc4 +#define FTQ_MB_FREE_FIFO_ENQDEQ 0x00005cc8 +#define FTQ_MB_FREE_WRITE_PEEK 0x00005ccc +#define FTQ_RCVBD_COMP_CTL 0x00005cd0 +#define FTQ_RCVBD_COMP_FULL_CNT 0x00005cd4 +#define FTQ_RCVBD_COMP_FIFO_ENQDEQ 0x00005cd8 +#define FTQ_RCVBD_COMP_WRITE_PEEK 0x00005cdc +#define FTQ_RCVLST_PLMT_CTL 0x00005ce0 +#define FTQ_RCVLST_PLMT_FULL_CNT 0x00005ce4 +#define FTQ_RCVLST_PLMT_FIFO_ENQDEQ 0x00005ce8 +#define FTQ_RCVLST_PLMT_WRITE_PEEK 0x00005cec +#define FTQ_RCVDATA_INI_CTL 0x00005cf0 +#define FTQ_RCVDATA_INI_FULL_CNT 0x00005cf4 +#define FTQ_RCVDATA_INI_FIFO_ENQDEQ 0x00005cf8 +#define FTQ_RCVDATA_INI_WRITE_PEEK 0x00005cfc +#define FTQ_RCVDATA_COMP_CTL 0x00005d00 +#define FTQ_RCVDATA_COMP_FULL_CNT 0x00005d04 +#define FTQ_RCVDATA_COMP_FIFO_ENQDEQ 0x00005d08 +#define FTQ_RCVDATA_COMP_WRITE_PEEK 0x00005d0c +#define FTQ_SWTYPE2_CTL 0x00005d10 +#define FTQ_SWTYPE2_FULL_CNT 0x00005d14 +#define FTQ_SWTYPE2_FIFO_ENQDEQ 0x00005d18 +#define FTQ_SWTYPE2_WRITE_PEEK 0x00005d1c +/* 0x5d20 --> 0x6000 unused */ + +/* Message signaled interrupt registers */ +#define MSGINT_MODE 0x00006000 +#define MSGINT_MODE_RESET 0x00000001 +#define MSGINT_MODE_ENABLE 0x00000002 +#define MSGINT_MODE_ONE_SHOT_DISABLE 0x00000020 +#define MSGINT_MODE_MULTIVEC_EN 0x00000080 +#define MSGINT_STATUS 0x00006004 +#define MSGINT_STATUS_MSI_REQ 0x00000001 +#define MSGINT_FIFO 0x00006008 +/* 0x600c --> 0x6400 unused */ + +/* DMA completion registers */ +#define DMAC_MODE 0x00006400 +#define DMAC_MODE_RESET 0x00000001 +#define DMAC_MODE_ENABLE 0x00000002 +/* 0x6404 --> 0x6800 unused */ + +/* GRC registers */ +#define GRC_MODE 0x00006800 +#define GRC_MODE_UPD_ON_COAL 0x00000001 +#define GRC_MODE_BSWAP_NONFRM_DATA 0x00000002 +#define GRC_MODE_WSWAP_NONFRM_DATA 0x00000004 +#define GRC_MODE_BSWAP_DATA 0x00000010 +#define GRC_MODE_WSWAP_DATA 0x00000020 +#define GRC_MODE_BYTE_SWAP_B2HRX_DATA 0x00000040 +#define GRC_MODE_WORD_SWAP_B2HRX_DATA 0x00000080 +#define GRC_MODE_SPLITHDR 0x00000100 +#define GRC_MODE_NOFRM_CRACKING 0x00000200 +#define GRC_MODE_INCL_CRC 0x00000400 +#define GRC_MODE_ALLOW_BAD_FRMS 0x00000800 +#define GRC_MODE_NOIRQ_ON_SENDS 0x00002000 +#define GRC_MODE_NOIRQ_ON_RCV 0x00004000 +#define GRC_MODE_FORCE_PCI32BIT 0x00008000 +#define GRC_MODE_B2HRX_ENABLE 0x00008000 +#define GRC_MODE_HOST_STACKUP 0x00010000 +#define GRC_MODE_HOST_SENDBDS 0x00020000 +#define GRC_MODE_HTX2B_ENABLE 0x00040000 +#define GRC_MODE_NO_TX_PHDR_CSUM 0x00100000 +#define GRC_MODE_NVRAM_WR_ENABLE 0x00200000 +#define GRC_MODE_PCIE_TL_SEL 0x00000000 +#define GRC_MODE_PCIE_PL_SEL 0x00400000 +#define GRC_MODE_NO_RX_PHDR_CSUM 0x00800000 +#define GRC_MODE_IRQ_ON_TX_CPU_ATTN 0x01000000 +#define GRC_MODE_IRQ_ON_RX_CPU_ATTN 0x02000000 +#define GRC_MODE_IRQ_ON_MAC_ATTN 0x04000000 +#define GRC_MODE_IRQ_ON_DMA_ATTN 0x08000000 +#define GRC_MODE_IRQ_ON_FLOW_ATTN 0x10000000 +#define GRC_MODE_4X_NIC_SEND_RINGS 0x20000000 +#define GRC_MODE_PCIE_DL_SEL 0x20000000 +#define GRC_MODE_MCAST_FRM_ENABLE 0x40000000 +#define GRC_MODE_PCIE_HI_1K_EN 0x80000000 +#define GRC_MODE_PCIE_PORT_MASK (GRC_MODE_PCIE_TL_SEL | \ + GRC_MODE_PCIE_PL_SEL | \ + GRC_MODE_PCIE_DL_SEL | \ + GRC_MODE_PCIE_HI_1K_EN) +#define GRC_MISC_CFG 0x00006804 +#define GRC_MISC_CFG_CORECLK_RESET 0x00000001 +#define GRC_MISC_CFG_PRESCALAR_MASK 0x000000fe +#define GRC_MISC_CFG_PRESCALAR_SHIFT 1 +#define GRC_MISC_CFG_BOARD_ID_MASK 0x0001e000 +#define GRC_MISC_CFG_BOARD_ID_5700 0x0001e000 +#define GRC_MISC_CFG_BOARD_ID_5701 0x00000000 +#define GRC_MISC_CFG_BOARD_ID_5702FE 0x00004000 +#define GRC_MISC_CFG_BOARD_ID_5703 0x00000000 +#define GRC_MISC_CFG_BOARD_ID_5703S 0x00002000 +#define GRC_MISC_CFG_BOARD_ID_5704 0x00000000 +#define GRC_MISC_CFG_BOARD_ID_5704CIOBE 0x00004000 +#define GRC_MISC_CFG_BOARD_ID_5704_A2 0x00008000 +#define GRC_MISC_CFG_BOARD_ID_5788 0x00010000 +#define GRC_MISC_CFG_BOARD_ID_5788M 0x00018000 +#define GRC_MISC_CFG_BOARD_ID_AC91002A1 0x00018000 +#define GRC_MISC_CFG_EPHY_IDDQ 0x00200000 +#define GRC_MISC_CFG_KEEP_GPHY_POWER 0x04000000 +#define GRC_LOCAL_CTRL 0x00006808 +#define GRC_LCLCTRL_INT_ACTIVE 0x00000001 +#define GRC_LCLCTRL_CLEARINT 0x00000002 +#define GRC_LCLCTRL_SETINT 0x00000004 +#define GRC_LCLCTRL_INT_ON_ATTN 0x00000008 +#define GRC_LCLCTRL_GPIO_UART_SEL 0x00000010 /* 5755 only */ +#define GRC_LCLCTRL_USE_SIG_DETECT 0x00000010 /* 5714/5780 only */ +#define GRC_LCLCTRL_USE_EXT_SIG_DETECT 0x00000020 /* 5714/5780 only */ +#define GRC_LCLCTRL_GPIO_INPUT3 0x00000020 +#define GRC_LCLCTRL_GPIO_OE3 0x00000040 +#define GRC_LCLCTRL_GPIO_OUTPUT3 0x00000080 +#define GRC_LCLCTRL_GPIO_INPUT0 0x00000100 +#define GRC_LCLCTRL_GPIO_INPUT1 0x00000200 +#define GRC_LCLCTRL_GPIO_INPUT2 0x00000400 +#define GRC_LCLCTRL_GPIO_OE0 0x00000800 +#define GRC_LCLCTRL_GPIO_OE1 0x00001000 +#define GRC_LCLCTRL_GPIO_OE2 0x00002000 +#define GRC_LCLCTRL_GPIO_OUTPUT0 0x00004000 +#define GRC_LCLCTRL_GPIO_OUTPUT1 0x00008000 +#define GRC_LCLCTRL_GPIO_OUTPUT2 0x00010000 +#define GRC_LCLCTRL_EXTMEM_ENABLE 0x00020000 +#define GRC_LCLCTRL_MEMSZ_MASK 0x001c0000 +#define GRC_LCLCTRL_MEMSZ_256K 0x00000000 +#define GRC_LCLCTRL_MEMSZ_512K 0x00040000 +#define GRC_LCLCTRL_MEMSZ_1M 0x00080000 +#define GRC_LCLCTRL_MEMSZ_2M 0x000c0000 +#define GRC_LCLCTRL_MEMSZ_4M 0x00100000 +#define GRC_LCLCTRL_MEMSZ_8M 0x00140000 +#define GRC_LCLCTRL_MEMSZ_16M 0x00180000 +#define GRC_LCLCTRL_BANK_SELECT 0x00200000 +#define GRC_LCLCTRL_SSRAM_TYPE 0x00400000 +#define GRC_LCLCTRL_AUTO_SEEPROM 0x01000000 +#define GRC_TIMER 0x0000680c +#define GRC_RX_CPU_EVENT 0x00006810 +#define GRC_RX_CPU_DRIVER_EVENT 0x00004000 +#define GRC_RX_TIMER_REF 0x00006814 +#define GRC_RX_CPU_SEM 0x00006818 +#define GRC_REMOTE_RX_CPU_ATTN 0x0000681c +#define GRC_TX_CPU_EVENT 0x00006820 +#define GRC_TX_TIMER_REF 0x00006824 +#define GRC_TX_CPU_SEM 0x00006828 +#define GRC_REMOTE_TX_CPU_ATTN 0x0000682c +#define GRC_MEM_POWER_UP 0x00006830 /* 64-bit */ +#define GRC_EEPROM_ADDR 0x00006838 +#define EEPROM_ADDR_WRITE 0x00000000 +#define EEPROM_ADDR_READ 0x80000000 +#define EEPROM_ADDR_COMPLETE 0x40000000 +#define EEPROM_ADDR_FSM_RESET 0x20000000 +#define EEPROM_ADDR_DEVID_MASK 0x1c000000 +#define EEPROM_ADDR_DEVID_SHIFT 26 +#define EEPROM_ADDR_START 0x02000000 +#define EEPROM_ADDR_CLKPERD_SHIFT 16 +#define EEPROM_ADDR_ADDR_MASK 0x0000ffff +#define EEPROM_ADDR_ADDR_SHIFT 0 +#define EEPROM_DEFAULT_CLOCK_PERIOD 0x60 +#define EEPROM_CHIP_SIZE (64 * 1024) +#define GRC_EEPROM_DATA 0x0000683c +#define GRC_EEPROM_CTRL 0x00006840 +#define GRC_MDI_CTRL 0x00006844 +#define GRC_SEEPROM_DELAY 0x00006848 +/* 0x684c --> 0x6890 unused */ +#define GRC_VCPU_EXT_CTRL 0x00006890 +#define GRC_VCPU_EXT_CTRL_HALT_CPU 0x00400000 +#define GRC_VCPU_EXT_CTRL_DISABLE_WOL 0x20000000 +#define GRC_FASTBOOT_PC 0x00006894 /* 5752, 5755, 5787 */ + +/* 0x6c00 --> 0x7000 unused */ + +/* NVRAM Control registers */ +#define NVRAM_CMD 0x00007000 +#define NVRAM_CMD_RESET 0x00000001 +#define NVRAM_CMD_DONE 0x00000008 +#define NVRAM_CMD_GO 0x00000010 +#define NVRAM_CMD_WR 0x00000020 +#define NVRAM_CMD_RD 0x00000000 +#define NVRAM_CMD_ERASE 0x00000040 +#define NVRAM_CMD_FIRST 0x00000080 +#define NVRAM_CMD_LAST 0x00000100 +#define NVRAM_CMD_WREN 0x00010000 +#define NVRAM_CMD_WRDI 0x00020000 +#define NVRAM_STAT 0x00007004 +#define NVRAM_WRDATA 0x00007008 +#define NVRAM_ADDR 0x0000700c +#define NVRAM_ADDR_MSK 0x00ffffff +#define NVRAM_RDDATA 0x00007010 +#define NVRAM_CFG1 0x00007014 +#define NVRAM_CFG1_FLASHIF_ENAB 0x00000001 +#define NVRAM_CFG1_BUFFERED_MODE 0x00000002 +#define NVRAM_CFG1_PASS_THRU 0x00000004 +#define NVRAM_CFG1_STATUS_BITS 0x00000070 +#define NVRAM_CFG1_BIT_BANG 0x00000008 +#define NVRAM_CFG1_FLASH_SIZE 0x02000000 +#define NVRAM_CFG1_COMPAT_BYPASS 0x80000000 +#define NVRAM_CFG1_VENDOR_MASK 0x03000003 +#define FLASH_VENDOR_ATMEL_EEPROM 0x02000000 +#define FLASH_VENDOR_ATMEL_FLASH_BUFFERED 0x02000003 +#define FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED 0x00000003 +#define FLASH_VENDOR_ST 0x03000001 +#define FLASH_VENDOR_SAIFUN 0x01000003 +#define FLASH_VENDOR_SST_SMALL 0x00000001 +#define FLASH_VENDOR_SST_LARGE 0x02000001 +#define NVRAM_CFG1_5752VENDOR_MASK 0x03c00003 +#define FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ 0x00000000 +#define FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ 0x02000000 +#define FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED 0x02000003 +#define FLASH_5752VENDOR_ST_M45PE10 0x02400000 +#define FLASH_5752VENDOR_ST_M45PE20 0x02400002 +#define FLASH_5752VENDOR_ST_M45PE40 0x02400001 +#define FLASH_5755VENDOR_ATMEL_FLASH_1 0x03400001 +#define FLASH_5755VENDOR_ATMEL_FLASH_2 0x03400002 +#define FLASH_5755VENDOR_ATMEL_FLASH_3 0x03400000 +#define FLASH_5755VENDOR_ATMEL_FLASH_4 0x00000003 +#define FLASH_5755VENDOR_ATMEL_FLASH_5 0x02000003 +#define FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ 0x03c00003 +#define FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ 0x03c00002 +#define FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ 0x03000003 +#define FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ 0x03000002 +#define FLASH_5787VENDOR_MICRO_EEPROM_64KHZ 0x03000000 +#define FLASH_5787VENDOR_MICRO_EEPROM_376KHZ 0x02000000 +#define FLASH_5761VENDOR_ATMEL_MDB021D 0x00800003 +#define FLASH_5761VENDOR_ATMEL_MDB041D 0x00800000 +#define FLASH_5761VENDOR_ATMEL_MDB081D 0x00800002 +#define FLASH_5761VENDOR_ATMEL_MDB161D 0x00800001 +#define FLASH_5761VENDOR_ATMEL_ADB021D 0x00000003 +#define FLASH_5761VENDOR_ATMEL_ADB041D 0x00000000 +#define FLASH_5761VENDOR_ATMEL_ADB081D 0x00000002 +#define FLASH_5761VENDOR_ATMEL_ADB161D 0x00000001 +#define FLASH_5761VENDOR_ST_M_M45PE20 0x02800001 +#define FLASH_5761VENDOR_ST_M_M45PE40 0x02800000 +#define FLASH_5761VENDOR_ST_M_M45PE80 0x02800002 +#define FLASH_5761VENDOR_ST_M_M45PE16 0x02800003 +#define FLASH_5761VENDOR_ST_A_M45PE20 0x02000001 +#define FLASH_5761VENDOR_ST_A_M45PE40 0x02000000 +#define FLASH_5761VENDOR_ST_A_M45PE80 0x02000002 +#define FLASH_5761VENDOR_ST_A_M45PE16 0x02000003 +#define FLASH_57780VENDOR_ATMEL_AT45DB011D 0x00400000 +#define FLASH_57780VENDOR_ATMEL_AT45DB011B 0x03400000 +#define FLASH_57780VENDOR_ATMEL_AT45DB021D 0x00400002 +#define FLASH_57780VENDOR_ATMEL_AT45DB021B 0x03400002 +#define FLASH_57780VENDOR_ATMEL_AT45DB041D 0x00400001 +#define FLASH_57780VENDOR_ATMEL_AT45DB041B 0x03400001 +#define FLASH_5717VENDOR_ATMEL_EEPROM 0x02000001 +#define FLASH_5717VENDOR_MICRO_EEPROM 0x02000003 +#define FLASH_5717VENDOR_ATMEL_MDB011D 0x01000001 +#define FLASH_5717VENDOR_ATMEL_MDB021D 0x01000003 +#define FLASH_5717VENDOR_ST_M_M25PE10 0x02000000 +#define FLASH_5717VENDOR_ST_M_M25PE20 0x02000002 +#define FLASH_5717VENDOR_ST_M_M45PE10 0x00000001 +#define FLASH_5717VENDOR_ST_M_M45PE20 0x00000003 +#define FLASH_5717VENDOR_ATMEL_ADB011B 0x01400000 +#define FLASH_5717VENDOR_ATMEL_ADB021B 0x01400002 +#define FLASH_5717VENDOR_ATMEL_ADB011D 0x01400001 +#define FLASH_5717VENDOR_ATMEL_ADB021D 0x01400003 +#define FLASH_5717VENDOR_ST_A_M25PE10 0x02400000 +#define FLASH_5717VENDOR_ST_A_M25PE20 0x02400002 +#define FLASH_5717VENDOR_ST_A_M45PE10 0x02400001 +#define FLASH_5717VENDOR_ST_A_M45PE20 0x02400003 +#define FLASH_5717VENDOR_ATMEL_45USPT 0x03400000 +#define FLASH_5717VENDOR_ST_25USPT 0x03400002 +#define FLASH_5717VENDOR_ST_45USPT 0x03400001 +#define FLASH_5720_EEPROM_HD 0x00000001 +#define FLASH_5720_EEPROM_LD 0x00000003 +#define FLASH_5720VENDOR_M_ATMEL_DB011D 0x01000000 +#define FLASH_5720VENDOR_M_ATMEL_DB021D 0x01000002 +#define FLASH_5720VENDOR_M_ATMEL_DB041D 0x01000001 +#define FLASH_5720VENDOR_M_ATMEL_DB081D 0x01000003 +#define FLASH_5720VENDOR_M_ST_M25PE10 0x02000000 +#define FLASH_5720VENDOR_M_ST_M25PE20 0x02000002 +#define FLASH_5720VENDOR_M_ST_M25PE40 0x02000001 +#define FLASH_5720VENDOR_M_ST_M25PE80 0x02000003 +#define FLASH_5720VENDOR_M_ST_M45PE10 0x03000000 +#define FLASH_5720VENDOR_M_ST_M45PE20 0x03000002 +#define FLASH_5720VENDOR_M_ST_M45PE40 0x03000001 +#define FLASH_5720VENDOR_M_ST_M45PE80 0x03000003 +#define FLASH_5720VENDOR_A_ATMEL_DB011B 0x01800000 +#define FLASH_5720VENDOR_A_ATMEL_DB021B 0x01800002 +#define FLASH_5720VENDOR_A_ATMEL_DB041B 0x01800001 +#define FLASH_5720VENDOR_A_ATMEL_DB011D 0x01c00000 +#define FLASH_5720VENDOR_A_ATMEL_DB021D 0x01c00002 +#define FLASH_5720VENDOR_A_ATMEL_DB041D 0x01c00001 +#define FLASH_5720VENDOR_A_ATMEL_DB081D 0x01c00003 +#define FLASH_5720VENDOR_A_ST_M25PE10 0x02800000 +#define FLASH_5720VENDOR_A_ST_M25PE20 0x02800002 +#define FLASH_5720VENDOR_A_ST_M25PE40 0x02800001 +#define FLASH_5720VENDOR_A_ST_M25PE80 0x02800003 +#define FLASH_5720VENDOR_A_ST_M45PE10 0x02c00000 +#define FLASH_5720VENDOR_A_ST_M45PE20 0x02c00002 +#define FLASH_5720VENDOR_A_ST_M45PE40 0x02c00001 +#define FLASH_5720VENDOR_A_ST_M45PE80 0x02c00003 +#define FLASH_5720VENDOR_ATMEL_45USPT 0x03c00000 +#define FLASH_5720VENDOR_ST_25USPT 0x03c00002 +#define FLASH_5720VENDOR_ST_45USPT 0x03c00001 +#define NVRAM_CFG1_5752PAGE_SIZE_MASK 0x70000000 +#define FLASH_5752PAGE_SIZE_256 0x00000000 +#define FLASH_5752PAGE_SIZE_512 0x10000000 +#define FLASH_5752PAGE_SIZE_1K 0x20000000 +#define FLASH_5752PAGE_SIZE_2K 0x30000000 +#define FLASH_5752PAGE_SIZE_4K 0x40000000 +#define FLASH_5752PAGE_SIZE_264 0x50000000 +#define FLASH_5752PAGE_SIZE_528 0x60000000 +#define NVRAM_CFG2 0x00007018 +#define NVRAM_CFG3 0x0000701c +#define NVRAM_SWARB 0x00007020 +#define SWARB_REQ_SET0 0x00000001 +#define SWARB_REQ_SET1 0x00000002 +#define SWARB_REQ_SET2 0x00000004 +#define SWARB_REQ_SET3 0x00000008 +#define SWARB_REQ_CLR0 0x00000010 +#define SWARB_REQ_CLR1 0x00000020 +#define SWARB_REQ_CLR2 0x00000040 +#define SWARB_REQ_CLR3 0x00000080 +#define SWARB_GNT0 0x00000100 +#define SWARB_GNT1 0x00000200 +#define SWARB_GNT2 0x00000400 +#define SWARB_GNT3 0x00000800 +#define SWARB_REQ0 0x00001000 +#define SWARB_REQ1 0x00002000 +#define SWARB_REQ2 0x00004000 +#define SWARB_REQ3 0x00008000 +#define NVRAM_ACCESS 0x00007024 +#define ACCESS_ENABLE 0x00000001 +#define ACCESS_WR_ENABLE 0x00000002 +#define NVRAM_WRITE1 0x00007028 +/* 0x702c unused */ + +#define NVRAM_ADDR_LOCKOUT 0x00007030 +/* 0x7034 --> 0x7500 unused */ + +#define OTP_MODE 0x00007500 +#define OTP_MODE_OTP_THRU_GRC 0x00000001 +#define OTP_CTRL 0x00007504 +#define OTP_CTRL_OTP_PROG_ENABLE 0x00200000 +#define OTP_CTRL_OTP_CMD_READ 0x00000000 +#define OTP_CTRL_OTP_CMD_INIT 0x00000008 +#define OTP_CTRL_OTP_CMD_START 0x00000001 +#define OTP_STATUS 0x00007508 +#define OTP_STATUS_CMD_DONE 0x00000001 +#define OTP_ADDRESS 0x0000750c +#define OTP_ADDRESS_MAGIC1 0x000000a0 +#define OTP_ADDRESS_MAGIC2 0x00000080 +/* 0x7510 unused */ + +#define OTP_READ_DATA 0x00007514 +/* 0x7518 --> 0x7c04 unused */ + +#define PCIE_TRANSACTION_CFG 0x00007c04 +#define PCIE_TRANS_CFG_1SHOT_MSI 0x20000000 +#define PCIE_TRANS_CFG_LOM 0x00000020 +/* 0x7c08 --> 0x7d28 unused */ + +#define PCIE_PWR_MGMT_THRESH 0x00007d28 +#define PCIE_PWR_MGMT_L1_THRESH_MSK 0x0000ff00 +#define PCIE_PWR_MGMT_L1_THRESH_4MS 0x0000ff00 +#define PCIE_PWR_MGMT_EXT_ASPM_TMR_EN 0x01000000 +/* 0x7d2c --> 0x7d54 unused */ + +#define TG3_PCIE_LNKCTL 0x00007d54 +#define TG3_PCIE_LNKCTL_L1_PLL_PD_EN 0x00000008 +#define TG3_PCIE_LNKCTL_L1_PLL_PD_DIS 0x00000080 +/* 0x7d58 --> 0x7e70 unused */ + +#define TG3_PCIE_PHY_TSTCTL 0x00007e2c +#define TG3_PCIE_PHY_TSTCTL_PCIE10 0x00000040 +#define TG3_PCIE_PHY_TSTCTL_PSCRAM 0x00000020 + +#define TG3_PCIE_EIDLE_DELAY 0x00007e70 +#define TG3_PCIE_EIDLE_DELAY_MASK 0x0000001f +#define TG3_PCIE_EIDLE_DELAY_13_CLKS 0x0000000c +/* 0x7e74 --> 0x8000 unused */ + + +/* Alternate PCIE definitions */ +#define TG3_PCIE_TLDLPL_PORT 0x00007c00 +#define TG3_PCIE_DL_LO_FTSMAX 0x0000000c +#define TG3_PCIE_DL_LO_FTSMAX_MSK 0x000000ff +#define TG3_PCIE_DL_LO_FTSMAX_VAL 0x0000002c +#define TG3_PCIE_PL_LO_PHYCTL1 0x00000004 +#define TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN 0x00001000 +#define TG3_PCIE_PL_LO_PHYCTL5 0x00000014 +#define TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ 0x80000000 + +#define TG3_REG_BLK_SIZE 0x00008000 + +/* OTP bit definitions */ +#define TG3_OTP_AGCTGT_MASK 0x000000e0 +#define TG3_OTP_AGCTGT_SHIFT 1 +#define TG3_OTP_HPFFLTR_MASK 0x00000300 +#define TG3_OTP_HPFFLTR_SHIFT 1 +#define TG3_OTP_HPFOVER_MASK 0x00000400 +#define TG3_OTP_HPFOVER_SHIFT 1 +#define TG3_OTP_LPFDIS_MASK 0x00000800 +#define TG3_OTP_LPFDIS_SHIFT 11 +#define TG3_OTP_VDAC_MASK 0xff000000 +#define TG3_OTP_VDAC_SHIFT 24 +#define TG3_OTP_10BTAMP_MASK 0x0000f000 +#define TG3_OTP_10BTAMP_SHIFT 8 +#define TG3_OTP_ROFF_MASK 0x00e00000 +#define TG3_OTP_ROFF_SHIFT 11 +#define TG3_OTP_RCOFF_MASK 0x001c0000 +#define TG3_OTP_RCOFF_SHIFT 16 + +#define TG3_OTP_DEFAULT 0x286c1640 + + +/* Hardware Legacy NVRAM layout */ +#define TG3_NVM_VPD_OFF 0x100 +#define TG3_NVM_VPD_LEN 256 + +/* Hardware Selfboot NVRAM layout */ +#define TG3_NVM_HWSB_CFG1 0x00000004 +#define TG3_NVM_HWSB_CFG1_MAJMSK 0xf8000000 +#define TG3_NVM_HWSB_CFG1_MAJSFT 27 +#define TG3_NVM_HWSB_CFG1_MINMSK 0x07c00000 +#define TG3_NVM_HWSB_CFG1_MINSFT 22 + +#define TG3_EEPROM_MAGIC 0x669955aa +#define TG3_EEPROM_MAGIC_FW 0xa5000000 +#define TG3_EEPROM_MAGIC_FW_MSK 0xff000000 +#define TG3_EEPROM_SB_FORMAT_MASK 0x00e00000 +#define TG3_EEPROM_SB_FORMAT_1 0x00200000 +#define TG3_EEPROM_SB_REVISION_MASK 0x001f0000 +#define TG3_EEPROM_SB_REVISION_0 0x00000000 +#define TG3_EEPROM_SB_REVISION_2 0x00020000 +#define TG3_EEPROM_SB_REVISION_3 0x00030000 +#define TG3_EEPROM_SB_REVISION_4 0x00040000 +#define TG3_EEPROM_SB_REVISION_5 0x00050000 +#define TG3_EEPROM_SB_REVISION_6 0x00060000 +#define TG3_EEPROM_MAGIC_HW 0xabcd +#define TG3_EEPROM_MAGIC_HW_MSK 0xffff + +#define TG3_NVM_DIR_START 0x18 +#define TG3_NVM_DIR_END 0x78 +#define TG3_NVM_DIRENT_SIZE 0xc +#define TG3_NVM_DIRTYPE_SHIFT 24 +#define TG3_NVM_DIRTYPE_LENMSK 0x003fffff +#define TG3_NVM_DIRTYPE_ASFINI 1 +#define TG3_NVM_DIRTYPE_EXTVPD 20 +#define TG3_NVM_PTREV_BCVER 0x94 +#define TG3_NVM_BCVER_MAJMSK 0x0000ff00 +#define TG3_NVM_BCVER_MAJSFT 8 +#define TG3_NVM_BCVER_MINMSK 0x000000ff + +#define TG3_EEPROM_SB_F1R0_EDH_OFF 0x10 +#define TG3_EEPROM_SB_F1R2_EDH_OFF 0x14 +#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10 +#define TG3_EEPROM_SB_F1R3_EDH_OFF 0x18 +#define TG3_EEPROM_SB_F1R4_EDH_OFF 0x1c +#define TG3_EEPROM_SB_F1R5_EDH_OFF 0x20 +#define TG3_EEPROM_SB_F1R6_EDH_OFF 0x4c +#define TG3_EEPROM_SB_EDH_MAJ_MASK 0x00000700 +#define TG3_EEPROM_SB_EDH_MAJ_SHFT 8 +#define TG3_EEPROM_SB_EDH_MIN_MASK 0x000000ff +#define TG3_EEPROM_SB_EDH_BLD_MASK 0x0000f800 +#define TG3_EEPROM_SB_EDH_BLD_SHFT 11 + + +/* 32K Window into NIC internal memory */ +#define NIC_SRAM_WIN_BASE 0x00008000 + +/* Offsets into first 32k of NIC internal memory. */ +#define NIC_SRAM_PAGE_ZERO 0x00000000 +#define NIC_SRAM_SEND_RCB 0x00000100 /* 16 * TG3_BDINFO_... */ +#define NIC_SRAM_RCV_RET_RCB 0x00000200 /* 16 * TG3_BDINFO_... */ +#define NIC_SRAM_STATS_BLK 0x00000300 +#define NIC_SRAM_STATUS_BLK 0x00000b00 + +#define NIC_SRAM_FIRMWARE_MBOX 0x00000b50 +#define NIC_SRAM_FIRMWARE_MBOX_MAGIC1 0x4B657654 +#define NIC_SRAM_FIRMWARE_MBOX_MAGIC2 0x4861764b /* !dma on linkchg */ + +#define NIC_SRAM_DATA_SIG 0x00000b54 +#define NIC_SRAM_DATA_SIG_MAGIC 0x4b657654 /* ascii for 'KevT' */ + +#define NIC_SRAM_DATA_CFG 0x00000b58 +#define NIC_SRAM_DATA_CFG_LED_MODE_MASK 0x0000000c +#define NIC_SRAM_DATA_CFG_LED_MODE_MAC 0x00000000 +#define NIC_SRAM_DATA_CFG_LED_MODE_PHY_1 0x00000004 +#define NIC_SRAM_DATA_CFG_LED_MODE_PHY_2 0x00000008 +#define NIC_SRAM_DATA_CFG_PHY_TYPE_MASK 0x00000030 +#define NIC_SRAM_DATA_CFG_PHY_TYPE_UNKNOWN 0x00000000 +#define NIC_SRAM_DATA_CFG_PHY_TYPE_COPPER 0x00000010 +#define NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER 0x00000020 +#define NIC_SRAM_DATA_CFG_WOL_ENABLE 0x00000040 +#define NIC_SRAM_DATA_CFG_ASF_ENABLE 0x00000080 +#define NIC_SRAM_DATA_CFG_EEPROM_WP 0x00000100 +#define NIC_SRAM_DATA_CFG_MINI_PCI 0x00001000 +#define NIC_SRAM_DATA_CFG_FIBER_WOL 0x00004000 +#define NIC_SRAM_DATA_CFG_NO_GPIO2 0x00100000 +#define NIC_SRAM_DATA_CFG_APE_ENABLE 0x00200000 + +#define NIC_SRAM_DATA_VER 0x00000b5c +#define NIC_SRAM_DATA_VER_SHIFT 16 + +#define NIC_SRAM_DATA_PHY_ID 0x00000b74 +#define NIC_SRAM_DATA_PHY_ID1_MASK 0xffff0000 +#define NIC_SRAM_DATA_PHY_ID2_MASK 0x0000ffff + +#define NIC_SRAM_FW_CMD_MBOX 0x00000b78 +#define FWCMD_NICDRV_ALIVE 0x00000001 +#define FWCMD_NICDRV_PAUSE_FW 0x00000002 +#define FWCMD_NICDRV_IPV4ADDR_CHG 0x00000003 +#define FWCMD_NICDRV_IPV6ADDR_CHG 0x00000004 +#define FWCMD_NICDRV_FIX_DMAR 0x00000005 +#define FWCMD_NICDRV_FIX_DMAW 0x00000006 +#define FWCMD_NICDRV_LINK_UPDATE 0x0000000c +#define FWCMD_NICDRV_ALIVE2 0x0000000d +#define FWCMD_NICDRV_ALIVE3 0x0000000e +#define NIC_SRAM_FW_CMD_LEN_MBOX 0x00000b7c +#define NIC_SRAM_FW_CMD_DATA_MBOX 0x00000b80 +#define NIC_SRAM_FW_ASF_STATUS_MBOX 0x00000c00 +#define NIC_SRAM_FW_DRV_STATE_MBOX 0x00000c04 +#define DRV_STATE_START 0x00000001 +#define DRV_STATE_START_DONE 0x80000001 +#define DRV_STATE_UNLOAD 0x00000002 +#define DRV_STATE_UNLOAD_DONE 0x80000002 +#define DRV_STATE_WOL 0x00000003 +#define DRV_STATE_SUSPEND 0x00000004 + +#define NIC_SRAM_FW_RESET_TYPE_MBOX 0x00000c08 + +#define NIC_SRAM_MAC_ADDR_HIGH_MBOX 0x00000c14 +#define NIC_SRAM_MAC_ADDR_LOW_MBOX 0x00000c18 + +#define NIC_SRAM_WOL_MBOX 0x00000d30 +#define WOL_SIGNATURE 0x474c0000 +#define WOL_DRV_STATE_SHUTDOWN 0x00000001 +#define WOL_DRV_WOL 0x00000002 +#define WOL_SET_MAGIC_PKT 0x00000004 + +#define NIC_SRAM_DATA_CFG_2 0x00000d38 + +#define NIC_SRAM_DATA_CFG_2_APD_EN 0x00000400 +#define SHASTA_EXT_LED_MODE_MASK 0x00018000 +#define SHASTA_EXT_LED_LEGACY 0x00000000 +#define SHASTA_EXT_LED_SHARED 0x00008000 +#define SHASTA_EXT_LED_MAC 0x00010000 +#define SHASTA_EXT_LED_COMBO 0x00018000 + +#define NIC_SRAM_DATA_CFG_3 0x00000d3c +#define NIC_SRAM_ASPM_DEBOUNCE 0x00000002 + +#define NIC_SRAM_DATA_CFG_4 0x00000d60 +#define NIC_SRAM_GMII_MODE 0x00000002 +#define NIC_SRAM_RGMII_INBAND_DISABLE 0x00000004 +#define NIC_SRAM_RGMII_EXT_IBND_RX_EN 0x00000008 +#define NIC_SRAM_RGMII_EXT_IBND_TX_EN 0x00000010 + +#define NIC_SRAM_RX_MINI_BUFFER_DESC 0x00001000 + +#define NIC_SRAM_DMA_DESC_POOL_BASE 0x00002000 +#define NIC_SRAM_DMA_DESC_POOL_SIZE 0x00002000 +#define NIC_SRAM_TX_BUFFER_DESC 0x00004000 /* 512 entries */ +#define NIC_SRAM_RX_BUFFER_DESC 0x00006000 /* 256 entries */ +#define NIC_SRAM_RX_JUMBO_BUFFER_DESC 0x00007000 /* 256 entries */ +#define NIC_SRAM_MBUF_POOL_BASE 0x00008000 +#define NIC_SRAM_MBUF_POOL_SIZE96 0x00018000 +#define NIC_SRAM_MBUF_POOL_SIZE64 0x00010000 +#define NIC_SRAM_MBUF_POOL_BASE5705 0x00010000 +#define NIC_SRAM_MBUF_POOL_SIZE5705 0x0000e000 + +#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5700 128 +#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5755 64 +#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5906 32 + +#define TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700 64 +#define TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717 16 + + +/* Currently this is fixed. */ +#define TG3_PHY_MII_ADDR 0x01 + + +/*** Tigon3 specific PHY MII registers. ***/ +#define MII_TG3_MMD_CTRL 0x0d /* MMD Access Control register */ +#define MII_TG3_MMD_CTRL_DATA_NOINC 0x4000 +#define MII_TG3_MMD_ADDRESS 0x0e /* MMD Address Data register */ + +#define MII_TG3_EXT_CTRL 0x10 /* Extended control register */ +#define MII_TG3_EXT_CTRL_FIFO_ELASTIC 0x0001 +#define MII_TG3_EXT_CTRL_LNK3_LED_MODE 0x0002 +#define MII_TG3_EXT_CTRL_FORCE_LED_OFF 0x0008 +#define MII_TG3_EXT_CTRL_TBI 0x8000 + +#define MII_TG3_EXT_STAT 0x11 /* Extended status register */ +#define MII_TG3_EXT_STAT_LPASS 0x0100 + +#define MII_TG3_RXR_COUNTERS 0x14 /* Local/Remote Receiver Counts */ +#define MII_TG3_DSP_RW_PORT 0x15 /* DSP coefficient read/write port */ +#define MII_TG3_DSP_CONTROL 0x16 /* DSP control register */ +#define MII_TG3_DSP_ADDRESS 0x17 /* DSP address register */ + +#define MII_TG3_DSP_TAP1 0x0001 +#define MII_TG3_DSP_TAP1_AGCTGT_DFLT 0x0007 +#define MII_TG3_DSP_TAP26 0x001a +#define MII_TG3_DSP_TAP26_ALNOKO 0x0001 +#define MII_TG3_DSP_TAP26_RMRXSTO 0x0002 +#define MII_TG3_DSP_TAP26_OPCSINPT 0x0004 +#define MII_TG3_DSP_AADJ1CH0 0x001f +#define MII_TG3_DSP_CH34TP2 0x4022 +#define MII_TG3_DSP_CH34TP2_HIBW01 0x01ff +#define MII_TG3_DSP_AADJ1CH3 0x601f +#define MII_TG3_DSP_AADJ1CH3_ADCCKADJ 0x0002 +#define MII_TG3_DSP_EXP1_INT_STAT 0x0f01 +#define MII_TG3_DSP_EXP8 0x0f08 +#define MII_TG3_DSP_EXP8_REJ2MHz 0x0001 +#define MII_TG3_DSP_EXP8_AEDW 0x0200 +#define MII_TG3_DSP_EXP75 0x0f75 +#define MII_TG3_DSP_EXP96 0x0f96 +#define MII_TG3_DSP_EXP97 0x0f97 + +#define MII_TG3_AUX_CTRL 0x18 /* auxiliary control register */ + +#define MII_TG3_AUXCTL_SHDWSEL_AUXCTL 0x0000 +#define MII_TG3_AUXCTL_ACTL_TX_6DB 0x0400 +#define MII_TG3_AUXCTL_ACTL_SMDSP_ENA 0x0800 +#define MII_TG3_AUXCTL_ACTL_EXTPKTLEN 0x4000 + +#define MII_TG3_AUXCTL_SHDWSEL_PWRCTL 0x0002 +#define MII_TG3_AUXCTL_PCTL_WOL_EN 0x0008 +#define MII_TG3_AUXCTL_PCTL_100TX_LPWR 0x0010 +#define MII_TG3_AUXCTL_PCTL_SPR_ISOLATE 0x0020 +#define MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC 0x0040 +#define MII_TG3_AUXCTL_PCTL_VREG_11V 0x0180 + +#define MII_TG3_AUXCTL_SHDWSEL_MISCTEST 0x0004 + +#define MII_TG3_AUXCTL_SHDWSEL_MISC 0x0007 +#define MII_TG3_AUXCTL_MISC_WIRESPD_EN 0x0010 +#define MII_TG3_AUXCTL_MISC_FORCE_AMDIX 0x0200 +#define MII_TG3_AUXCTL_MISC_RDSEL_SHIFT 12 +#define MII_TG3_AUXCTL_MISC_WREN 0x8000 + + +#define MII_TG3_AUX_STAT 0x19 /* auxiliary status register */ +#define MII_TG3_AUX_STAT_LPASS 0x0004 +#define MII_TG3_AUX_STAT_SPDMASK 0x0700 +#define MII_TG3_AUX_STAT_10HALF 0x0100 +#define MII_TG3_AUX_STAT_10FULL 0x0200 +#define MII_TG3_AUX_STAT_100HALF 0x0300 +#define MII_TG3_AUX_STAT_100_4 0x0400 +#define MII_TG3_AUX_STAT_100FULL 0x0500 +#define MII_TG3_AUX_STAT_1000HALF 0x0600 +#define MII_TG3_AUX_STAT_1000FULL 0x0700 +#define MII_TG3_AUX_STAT_100 0x0008 +#define MII_TG3_AUX_STAT_FULL 0x0001 + +#define MII_TG3_ISTAT 0x1a /* IRQ status register */ +#define MII_TG3_IMASK 0x1b /* IRQ mask register */ + +/* ISTAT/IMASK event bits */ +#define MII_TG3_INT_LINKCHG 0x0002 +#define MII_TG3_INT_SPEEDCHG 0x0004 +#define MII_TG3_INT_DUPLEXCHG 0x0008 +#define MII_TG3_INT_ANEG_PAGE_RX 0x0400 + +#define MII_TG3_MISC_SHDW 0x1c +#define MII_TG3_MISC_SHDW_WREN 0x8000 + +#define MII_TG3_MISC_SHDW_APD_WKTM_84MS 0x0001 +#define MII_TG3_MISC_SHDW_APD_ENABLE 0x0020 +#define MII_TG3_MISC_SHDW_APD_SEL 0x2800 + +#define MII_TG3_MISC_SHDW_SCR5_C125OE 0x0001 +#define MII_TG3_MISC_SHDW_SCR5_DLLAPD 0x0002 +#define MII_TG3_MISC_SHDW_SCR5_SDTL 0x0004 +#define MII_TG3_MISC_SHDW_SCR5_DLPTLM 0x0008 +#define MII_TG3_MISC_SHDW_SCR5_LPED 0x0010 +#define MII_TG3_MISC_SHDW_SCR5_SEL 0x1400 + +#define MII_TG3_TEST1 0x1e +#define MII_TG3_TEST1_TRIM_EN 0x0010 +#define MII_TG3_TEST1_CRC_EN 0x8000 + +/* Clause 45 expansion registers */ +#define TG3_CL45_D7_EEERES_STAT 0x803e +#define TG3_CL45_D7_EEERES_STAT_LP_100TX 0x0002 +#define TG3_CL45_D7_EEERES_STAT_LP_1000T 0x0004 + + +/* Fast Ethernet Tranceiver definitions */ +#define MII_TG3_FET_PTEST 0x17 +#define MII_TG3_FET_PTEST_FRC_TX_LINK 0x1000 +#define MII_TG3_FET_PTEST_FRC_TX_LOCK 0x0800 + +#define MII_TG3_FET_TEST 0x1f +#define MII_TG3_FET_SHADOW_EN 0x0080 + +#define MII_TG3_FET_SHDW_MISCCTRL 0x10 +#define MII_TG3_FET_SHDW_MISCCTRL_MDIX 0x4000 + +#define MII_TG3_FET_SHDW_AUXMODE4 0x1a +#define MII_TG3_FET_SHDW_AUXMODE4_SBPD 0x0008 + +#define MII_TG3_FET_SHDW_AUXSTAT2 0x1b +#define MII_TG3_FET_SHDW_AUXSTAT2_APD 0x0020 + + +/* APE registers. Accessible through BAR1 */ +#define TG3_APE_GPIO_MSG 0x0008 +#define TG3_APE_GPIO_MSG_SHIFT 4 +#define TG3_APE_EVENT 0x000c +#define APE_EVENT_1 0x00000001 +#define TG3_APE_LOCK_REQ 0x002c +#define APE_LOCK_REQ_DRIVER 0x00001000 +#define TG3_APE_LOCK_GRANT 0x004c +#define APE_LOCK_GRANT_DRIVER 0x00001000 +#define TG3_APE_SEG_SIG 0x4000 +#define APE_SEG_SIG_MAGIC 0x41504521 + +/* APE shared memory. Accessible through BAR1 */ +#define TG3_APE_FW_STATUS 0x400c +#define APE_FW_STATUS_READY 0x00000100 +#define TG3_APE_FW_FEATURES 0x4010 +#define TG3_APE_FW_FEATURE_NCSI 0x00000002 +#define TG3_APE_FW_VERSION 0x4018 +#define APE_FW_VERSION_MAJMSK 0xff000000 +#define APE_FW_VERSION_MAJSFT 24 +#define APE_FW_VERSION_MINMSK 0x00ff0000 +#define APE_FW_VERSION_MINSFT 16 +#define APE_FW_VERSION_REVMSK 0x0000ff00 +#define APE_FW_VERSION_REVSFT 8 +#define APE_FW_VERSION_BLDMSK 0x000000ff +#define TG3_APE_HOST_SEG_SIG 0x4200 +#define APE_HOST_SEG_SIG_MAGIC 0x484f5354 +#define TG3_APE_HOST_SEG_LEN 0x4204 +#define APE_HOST_SEG_LEN_MAGIC 0x00000020 +#define TG3_APE_HOST_INIT_COUNT 0x4208 +#define TG3_APE_HOST_DRIVER_ID 0x420c +#define APE_HOST_DRIVER_ID_LINUX 0xf0000000 +#define APE_HOST_DRIVER_ID_MAGIC(maj, min) \ + (APE_HOST_DRIVER_ID_LINUX | (maj & 0xff) << 16 | (min & 0xff) << 8) +#define TG3_APE_HOST_BEHAVIOR 0x4210 +#define APE_HOST_BEHAV_NO_PHYLOCK 0x00000001 +#define TG3_APE_HOST_HEARTBEAT_INT_MS 0x4214 +#define APE_HOST_HEARTBEAT_INT_DISABLE 0 +#define APE_HOST_HEARTBEAT_INT_5SEC 5000 +#define TG3_APE_HOST_HEARTBEAT_COUNT 0x4218 +#define TG3_APE_HOST_DRVR_STATE 0x421c +#define TG3_APE_HOST_DRVR_STATE_START 0x00000001 +#define TG3_APE_HOST_DRVR_STATE_UNLOAD 0x00000002 +#define TG3_APE_HOST_DRVR_STATE_WOL 0x00000003 +#define TG3_APE_HOST_WOL_SPEED 0x4224 +#define TG3_APE_HOST_WOL_SPEED_AUTO 0x00008000 + +#define TG3_APE_EVENT_STATUS 0x4300 + +#define APE_EVENT_STATUS_DRIVER_EVNT 0x00000010 +#define APE_EVENT_STATUS_STATE_CHNGE 0x00000500 +#define APE_EVENT_STATUS_STATE_START 0x00010000 +#define APE_EVENT_STATUS_STATE_UNLOAD 0x00020000 +#define APE_EVENT_STATUS_STATE_WOL 0x00030000 +#define APE_EVENT_STATUS_STATE_SUSPEND 0x00040000 +#define APE_EVENT_STATUS_EVENT_PENDING 0x80000000 + +#define TG3_APE_PER_LOCK_REQ 0x8400 +#define APE_LOCK_PER_REQ_DRIVER 0x00001000 +#define TG3_APE_PER_LOCK_GRANT 0x8420 +#define APE_PER_LOCK_GRANT_DRIVER 0x00001000 + +/* APE convenience enumerations. */ +#define TG3_APE_LOCK_GRC 1 +#define TG3_APE_LOCK_MEM 4 +#define TG3_APE_LOCK_GPIO 7 + +#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10 + + +/* There are two ways to manage the TX descriptors on the tigon3. + * Either the descriptors are in host DMA'able memory, or they + * exist only in the cards on-chip SRAM. All 16 send bds are under + * the same mode, they may not be configured individually. + * + * This driver always uses host memory TX descriptors. + * + * To use host memory TX descriptors: + * 1) Set GRC_MODE_HOST_SENDBDS in GRC_MODE register. + * Make sure GRC_MODE_4X_NIC_SEND_RINGS is clear. + * 2) Allocate DMA'able memory. + * 3) In NIC_SRAM_SEND_RCB (of desired index) of on-chip SRAM: + * a) Set TG3_BDINFO_HOST_ADDR to DMA address of memory + * obtained in step 2 + * b) Set TG3_BDINFO_NIC_ADDR to NIC_SRAM_TX_BUFFER_DESC. + * c) Set len field of TG3_BDINFO_MAXLEN_FLAGS to number + * of TX descriptors. Leave flags field clear. + * 4) Access TX descriptors via host memory. The chip + * will refetch into local SRAM as needed when producer + * index mailboxes are updated. + * + * To use on-chip TX descriptors: + * 1) Set GRC_MODE_4X_NIC_SEND_RINGS in GRC_MODE register. + * Make sure GRC_MODE_HOST_SENDBDS is clear. + * 2) In NIC_SRAM_SEND_RCB (of desired index) of on-chip SRAM: + * a) Set TG3_BDINFO_HOST_ADDR to zero. + * b) Set TG3_BDINFO_NIC_ADDR to NIC_SRAM_TX_BUFFER_DESC + * c) TG3_BDINFO_MAXLEN_FLAGS is don't care. + * 3) Access TX descriptors directly in on-chip SRAM + * using normal {read,write}l(). (and not using + * pointer dereferencing of ioremap()'d memory like + * the broken Broadcom driver does) + * + * Note that BDINFO_FLAGS_DISABLED should be set in the flags field of + * TG3_BDINFO_MAXLEN_FLAGS of all unused SEND_RCB indices. + */ +struct tg3_tx_buffer_desc { + u32 addr_hi; + u32 addr_lo; + + u32 len_flags; +#define TXD_FLAG_TCPUDP_CSUM 0x0001 +#define TXD_FLAG_IP_CSUM 0x0002 +#define TXD_FLAG_END 0x0004 +#define TXD_FLAG_IP_FRAG 0x0008 +#define TXD_FLAG_JMB_PKT 0x0008 +#define TXD_FLAG_IP_FRAG_END 0x0010 +#define TXD_FLAG_VLAN 0x0040 +#define TXD_FLAG_COAL_NOW 0x0080 +#define TXD_FLAG_CPU_PRE_DMA 0x0100 +#define TXD_FLAG_CPU_POST_DMA 0x0200 +#define TXD_FLAG_ADD_SRC_ADDR 0x1000 +#define TXD_FLAG_CHOOSE_SRC_ADDR 0x6000 +#define TXD_FLAG_NO_CRC 0x8000 +#define TXD_LEN_SHIFT 16 + + u32 vlan_tag; +#define TXD_VLAN_TAG_SHIFT 0 +#define TXD_MSS_SHIFT 16 +}; + +#define TXD_ADDR 0x00UL /* 64-bit */ +#define TXD_LEN_FLAGS 0x08UL /* 32-bit (upper 16-bits are len) */ +#define TXD_VLAN_TAG 0x0cUL /* 32-bit (upper 16-bits are tag) */ +#define TXD_SIZE 0x10UL + +struct tg3_rx_buffer_desc { + u32 addr_hi; + u32 addr_lo; + + u32 idx_len; +#define RXD_IDX_MASK 0xffff0000 +#define RXD_IDX_SHIFT 16 +#define RXD_LEN_MASK 0x0000ffff +#define RXD_LEN_SHIFT 0 + + u32 type_flags; +#define RXD_TYPE_SHIFT 16 +#define RXD_FLAGS_SHIFT 0 + +#define RXD_FLAG_END 0x0004 +#define RXD_FLAG_MINI 0x0800 +#define RXD_FLAG_JUMBO 0x0020 +#define RXD_FLAG_VLAN 0x0040 +#define RXD_FLAG_ERROR 0x0400 +#define RXD_FLAG_IP_CSUM 0x1000 +#define RXD_FLAG_TCPUDP_CSUM 0x2000 +#define RXD_FLAG_IS_TCP 0x4000 + + u32 ip_tcp_csum; +#define RXD_IPCSUM_MASK 0xffff0000 +#define RXD_IPCSUM_SHIFT 16 +#define RXD_TCPCSUM_MASK 0x0000ffff +#define RXD_TCPCSUM_SHIFT 0 + + u32 err_vlan; + +#define RXD_VLAN_MASK 0x0000ffff + +#define RXD_ERR_BAD_CRC 0x00010000 +#define RXD_ERR_COLLISION 0x00020000 +#define RXD_ERR_LINK_LOST 0x00040000 +#define RXD_ERR_PHY_DECODE 0x00080000 +#define RXD_ERR_ODD_NIBBLE_RCVD_MII 0x00100000 +#define RXD_ERR_MAC_ABRT 0x00200000 +#define RXD_ERR_TOO_SMALL 0x00400000 +#define RXD_ERR_NO_RESOURCES 0x00800000 +#define RXD_ERR_HUGE_FRAME 0x01000000 +#define RXD_ERR_MASK 0xffff0000 + + u32 reserved; + u32 opaque; +#define RXD_OPAQUE_INDEX_MASK 0x0000ffff +#define RXD_OPAQUE_INDEX_SHIFT 0 +#define RXD_OPAQUE_RING_STD 0x00010000 +#define RXD_OPAQUE_RING_JUMBO 0x00020000 +#define RXD_OPAQUE_RING_MINI 0x00040000 +#define RXD_OPAQUE_RING_MASK 0x00070000 +}; + +struct tg3_ext_rx_buffer_desc { + struct { + u32 addr_hi; + u32 addr_lo; + } addrlist[3]; + u32 len2_len1; + u32 resv_len3; + struct tg3_rx_buffer_desc std; +}; + +/* We only use this when testing out the DMA engine + * at probe time. This is the internal format of buffer + * descriptors used by the chip at NIC_SRAM_DMA_DESCS. + */ +struct tg3_internal_buffer_desc { + u32 addr_hi; + u32 addr_lo; + u32 nic_mbuf; + /* XXX FIX THIS */ +#ifdef __BIG_ENDIAN + u16 cqid_sqid; + u16 len; +#else + u16 len; + u16 cqid_sqid; +#endif + u32 flags; + u32 __cookie1; + u32 __cookie2; + u32 __cookie3; +}; + +#define TG3_HW_STATUS_SIZE 0x50 +struct tg3_hw_status { + u32 status; +#define SD_STATUS_UPDATED 0x00000001 +#define SD_STATUS_LINK_CHG 0x00000002 +#define SD_STATUS_ERROR 0x00000004 + + u32 status_tag; + +#ifdef __BIG_ENDIAN + u16 rx_consumer; + u16 rx_jumbo_consumer; +#else + u16 rx_jumbo_consumer; + u16 rx_consumer; +#endif + +#ifdef __BIG_ENDIAN + u16 reserved; + u16 rx_mini_consumer; +#else + u16 rx_mini_consumer; + u16 reserved; +#endif + struct { +#ifdef __BIG_ENDIAN + u16 tx_consumer; + u16 rx_producer; +#else + u16 rx_producer; + u16 tx_consumer; +#endif + } idx[16]; +}; + +typedef struct { + u32 high, low; +} tg3_stat64_t; + +struct tg3_hw_stats { + u8 __reserved0[0x400-0x300]; + + /* Statistics maintained by Receive MAC. */ + tg3_stat64_t rx_octets; + u64 __reserved1; + tg3_stat64_t rx_fragments; + tg3_stat64_t rx_ucast_packets; + tg3_stat64_t rx_mcast_packets; + tg3_stat64_t rx_bcast_packets; + tg3_stat64_t rx_fcs_errors; + tg3_stat64_t rx_align_errors; + tg3_stat64_t rx_xon_pause_rcvd; + tg3_stat64_t rx_xoff_pause_rcvd; + tg3_stat64_t rx_mac_ctrl_rcvd; + tg3_stat64_t rx_xoff_entered; + tg3_stat64_t rx_frame_too_long_errors; + tg3_stat64_t rx_jabbers; + tg3_stat64_t rx_undersize_packets; + tg3_stat64_t rx_in_length_errors; + tg3_stat64_t rx_out_length_errors; + tg3_stat64_t rx_64_or_less_octet_packets; + tg3_stat64_t rx_65_to_127_octet_packets; + tg3_stat64_t rx_128_to_255_octet_packets; + tg3_stat64_t rx_256_to_511_octet_packets; + tg3_stat64_t rx_512_to_1023_octet_packets; + tg3_stat64_t rx_1024_to_1522_octet_packets; + tg3_stat64_t rx_1523_to_2047_octet_packets; + tg3_stat64_t rx_2048_to_4095_octet_packets; + tg3_stat64_t rx_4096_to_8191_octet_packets; + tg3_stat64_t rx_8192_to_9022_octet_packets; + + u64 __unused0[37]; + + /* Statistics maintained by Transmit MAC. */ + tg3_stat64_t tx_octets; + u64 __reserved2; + tg3_stat64_t tx_collisions; + tg3_stat64_t tx_xon_sent; + tg3_stat64_t tx_xoff_sent; + tg3_stat64_t tx_flow_control; + tg3_stat64_t tx_mac_errors; + tg3_stat64_t tx_single_collisions; + tg3_stat64_t tx_mult_collisions; + tg3_stat64_t tx_deferred; + u64 __reserved3; + tg3_stat64_t tx_excessive_collisions; + tg3_stat64_t tx_late_collisions; + tg3_stat64_t tx_collide_2times; + tg3_stat64_t tx_collide_3times; + tg3_stat64_t tx_collide_4times; + tg3_stat64_t tx_collide_5times; + tg3_stat64_t tx_collide_6times; + tg3_stat64_t tx_collide_7times; + tg3_stat64_t tx_collide_8times; + tg3_stat64_t tx_collide_9times; + tg3_stat64_t tx_collide_10times; + tg3_stat64_t tx_collide_11times; + tg3_stat64_t tx_collide_12times; + tg3_stat64_t tx_collide_13times; + tg3_stat64_t tx_collide_14times; + tg3_stat64_t tx_collide_15times; + tg3_stat64_t tx_ucast_packets; + tg3_stat64_t tx_mcast_packets; + tg3_stat64_t tx_bcast_packets; + tg3_stat64_t tx_carrier_sense_errors; + tg3_stat64_t tx_discards; + tg3_stat64_t tx_errors; + + u64 __unused1[31]; + + /* Statistics maintained by Receive List Placement. */ + tg3_stat64_t COS_rx_packets[16]; + tg3_stat64_t COS_rx_filter_dropped; + tg3_stat64_t dma_writeq_full; + tg3_stat64_t dma_write_prioq_full; + tg3_stat64_t rxbds_empty; + tg3_stat64_t rx_discards; + tg3_stat64_t rx_errors; + tg3_stat64_t rx_threshold_hit; + + u64 __unused2[9]; + + /* Statistics maintained by Send Data Initiator. */ + tg3_stat64_t COS_out_packets[16]; + tg3_stat64_t dma_readq_full; + tg3_stat64_t dma_read_prioq_full; + tg3_stat64_t tx_comp_queue_full; + + /* Statistics maintained by Host Coalescing. */ + tg3_stat64_t ring_set_send_prod_index; + tg3_stat64_t ring_status_update; + tg3_stat64_t nic_irqs; + tg3_stat64_t nic_avoided_irqs; + tg3_stat64_t nic_tx_threshold_hit; + + /* NOT a part of the hardware statistics block format. + * These stats are here as storage for tg3_periodic_fetch_stats(). + */ + tg3_stat64_t mbuf_lwm_thresh_hit; + + u8 __reserved4[0xb00-0x9c8]; +}; + +/* 'mapping' is superfluous as the chip does not write into + * the tx/rx post rings so we could just fetch it from there. + * But the cache behavior is better how we are doing it now. + */ +struct ring_info { + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(mapping); +}; + +struct tg3_tx_ring_info { + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(mapping); + bool fragmented; +}; + +struct tg3_link_config { + /* Describes what we're trying to get. */ + u32 advertising; + u16 speed; + u8 duplex; + u8 autoneg; + u8 flowctrl; + + /* Describes what we actually have. */ + u8 active_flowctrl; + + u8 active_duplex; +#define SPEED_INVALID 0xffff +#define DUPLEX_INVALID 0xff +#define AUTONEG_INVALID 0xff + u16 active_speed; + + /* When we go in and out of low power mode we need + * to swap with this state. + */ + u16 orig_speed; + u8 orig_duplex; + u8 orig_autoneg; + u32 orig_advertising; +}; + +struct tg3_bufmgr_config { + u32 mbuf_read_dma_low_water; + u32 mbuf_mac_rx_low_water; + u32 mbuf_high_water; + + u32 mbuf_read_dma_low_water_jumbo; + u32 mbuf_mac_rx_low_water_jumbo; + u32 mbuf_high_water_jumbo; + + u32 dma_low_water; + u32 dma_high_water; +}; + +struct tg3_ethtool_stats { + /* Statistics maintained by Receive MAC. */ + u64 rx_octets; + u64 rx_fragments; + u64 rx_ucast_packets; + u64 rx_mcast_packets; + u64 rx_bcast_packets; + u64 rx_fcs_errors; + u64 rx_align_errors; + u64 rx_xon_pause_rcvd; + u64 rx_xoff_pause_rcvd; + u64 rx_mac_ctrl_rcvd; + u64 rx_xoff_entered; + u64 rx_frame_too_long_errors; + u64 rx_jabbers; + u64 rx_undersize_packets; + u64 rx_in_length_errors; + u64 rx_out_length_errors; + u64 rx_64_or_less_octet_packets; + u64 rx_65_to_127_octet_packets; + u64 rx_128_to_255_octet_packets; + u64 rx_256_to_511_octet_packets; + u64 rx_512_to_1023_octet_packets; + u64 rx_1024_to_1522_octet_packets; + u64 rx_1523_to_2047_octet_packets; + u64 rx_2048_to_4095_octet_packets; + u64 rx_4096_to_8191_octet_packets; + u64 rx_8192_to_9022_octet_packets; + + /* Statistics maintained by Transmit MAC. */ + u64 tx_octets; + u64 tx_collisions; + u64 tx_xon_sent; + u64 tx_xoff_sent; + u64 tx_flow_control; + u64 tx_mac_errors; + u64 tx_single_collisions; + u64 tx_mult_collisions; + u64 tx_deferred; + u64 tx_excessive_collisions; + u64 tx_late_collisions; + u64 tx_collide_2times; + u64 tx_collide_3times; + u64 tx_collide_4times; + u64 tx_collide_5times; + u64 tx_collide_6times; + u64 tx_collide_7times; + u64 tx_collide_8times; + u64 tx_collide_9times; + u64 tx_collide_10times; + u64 tx_collide_11times; + u64 tx_collide_12times; + u64 tx_collide_13times; + u64 tx_collide_14times; + u64 tx_collide_15times; + u64 tx_ucast_packets; + u64 tx_mcast_packets; + u64 tx_bcast_packets; + u64 tx_carrier_sense_errors; + u64 tx_discards; + u64 tx_errors; + + /* Statistics maintained by Receive List Placement. */ + u64 dma_writeq_full; + u64 dma_write_prioq_full; + u64 rxbds_empty; + u64 rx_discards; + u64 rx_errors; + u64 rx_threshold_hit; + + /* Statistics maintained by Send Data Initiator. */ + u64 dma_readq_full; + u64 dma_read_prioq_full; + u64 tx_comp_queue_full; + + /* Statistics maintained by Host Coalescing. */ + u64 ring_set_send_prod_index; + u64 ring_status_update; + u64 nic_irqs; + u64 nic_avoided_irqs; + u64 nic_tx_threshold_hit; + + u64 mbuf_lwm_thresh_hit; +}; + +struct tg3_rx_prodring_set { + u32 rx_std_prod_idx; + u32 rx_std_cons_idx; + u32 rx_jmb_prod_idx; + u32 rx_jmb_cons_idx; + struct tg3_rx_buffer_desc *rx_std; + struct tg3_ext_rx_buffer_desc *rx_jmb; + struct ring_info *rx_std_buffers; + struct ring_info *rx_jmb_buffers; + dma_addr_t rx_std_mapping; + dma_addr_t rx_jmb_mapping; +}; + +#define TG3_IRQ_MAX_VECS_RSS 5 +#define TG3_IRQ_MAX_VECS TG3_IRQ_MAX_VECS_RSS + +struct tg3_napi { + struct napi_struct napi ____cacheline_aligned; + struct tg3 *tp; + struct tg3_hw_status *hw_status; + + u32 chk_msi_cnt; + u32 last_tag; + u32 last_irq_tag; + u32 int_mbox; + u32 coal_now; + + u32 consmbox ____cacheline_aligned; + u32 rx_rcb_ptr; + u32 last_rx_cons; + u16 *rx_rcb_prod_idx; + struct tg3_rx_prodring_set prodring; + struct tg3_rx_buffer_desc *rx_rcb; + + u32 tx_prod ____cacheline_aligned; + u32 tx_cons; + u32 tx_pending; + u32 last_tx_cons; + u32 prodmbox; + struct tg3_tx_buffer_desc *tx_ring; + struct tg3_tx_ring_info *tx_buffers; + + dma_addr_t status_mapping; + dma_addr_t rx_rcb_mapping; + dma_addr_t tx_desc_mapping; + + char irq_lbl[IFNAMSIZ]; + unsigned int irq_vec; +}; + +enum TG3_FLAGS { + TG3_FLAG_TAGGED_STATUS = 0, + TG3_FLAG_TXD_MBOX_HWBUG, + TG3_FLAG_USE_LINKCHG_REG, + TG3_FLAG_ERROR_PROCESSED, + TG3_FLAG_ENABLE_ASF, + TG3_FLAG_ASPM_WORKAROUND, + TG3_FLAG_POLL_SERDES, + TG3_FLAG_MBOX_WRITE_REORDER, + TG3_FLAG_PCIX_TARGET_HWBUG, + TG3_FLAG_WOL_SPEED_100MB, + TG3_FLAG_WOL_ENABLE, + TG3_FLAG_EEPROM_WRITE_PROT, + TG3_FLAG_NVRAM, + TG3_FLAG_NVRAM_BUFFERED, + TG3_FLAG_SUPPORT_MSI, + TG3_FLAG_SUPPORT_MSIX, + TG3_FLAG_PCIX_MODE, + TG3_FLAG_PCI_HIGH_SPEED, + TG3_FLAG_PCI_32BIT, + TG3_FLAG_SRAM_USE_CONFIG, + TG3_FLAG_TX_RECOVERY_PENDING, + TG3_FLAG_WOL_CAP, + TG3_FLAG_JUMBO_RING_ENABLE, + TG3_FLAG_PAUSE_AUTONEG, + TG3_FLAG_CPMU_PRESENT, + TG3_FLAG_40BIT_DMA_BUG, + TG3_FLAG_BROKEN_CHECKSUMS, + TG3_FLAG_JUMBO_CAPABLE, + TG3_FLAG_CHIP_RESETTING, + TG3_FLAG_INIT_COMPLETE, + TG3_FLAG_RESTART_TIMER, + TG3_FLAG_TSO_BUG, + TG3_FLAG_IS_5788, + TG3_FLAG_MAX_RXPEND_64, + TG3_FLAG_TSO_CAPABLE, + TG3_FLAG_PCI_EXPRESS, /* BCM5785 + pci_is_pcie() */ + TG3_FLAG_ASF_NEW_HANDSHAKE, + TG3_FLAG_HW_AUTONEG, + TG3_FLAG_IS_NIC, + TG3_FLAG_FLASH, + TG3_FLAG_HW_TSO_1, + TG3_FLAG_5705_PLUS, + TG3_FLAG_5750_PLUS, + TG3_FLAG_HW_TSO_3, + TG3_FLAG_USING_MSI, + TG3_FLAG_USING_MSIX, + TG3_FLAG_ICH_WORKAROUND, + TG3_FLAG_5780_CLASS, + TG3_FLAG_HW_TSO_2, + TG3_FLAG_1SHOT_MSI, + TG3_FLAG_NO_FWARE_REPORTED, + TG3_FLAG_NO_NVRAM_ADDR_TRANS, + TG3_FLAG_ENABLE_APE, + TG3_FLAG_PROTECTED_NVRAM, + TG3_FLAG_5701_DMA_BUG, + TG3_FLAG_USE_PHYLIB, + TG3_FLAG_MDIOBUS_INITED, + TG3_FLAG_LRG_PROD_RING_CAP, + TG3_FLAG_RGMII_INBAND_DISABLE, + TG3_FLAG_RGMII_EXT_IBND_RX_EN, + TG3_FLAG_RGMII_EXT_IBND_TX_EN, + TG3_FLAG_CLKREQ_BUG, + TG3_FLAG_5755_PLUS, + TG3_FLAG_NO_NVRAM, + TG3_FLAG_ENABLE_RSS, + TG3_FLAG_ENABLE_TSS, + TG3_FLAG_SHORT_DMA_BUG, + TG3_FLAG_USE_JUMBO_BDFLAG, + TG3_FLAG_L1PLLPD_EN, + TG3_FLAG_57765_PLUS, + TG3_FLAG_APE_HAS_NCSI, + TG3_FLAG_5717_PLUS, + TG3_FLAG_4K_FIFO_LIMIT, + + /* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */ + TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */ +}; + +struct tg3 { + /* begin "general, frequently-used members" cacheline section */ + + /* If the IRQ handler (which runs lockless) needs to be + * quiesced, the following bitmask state is used. The + * SYNC flag is set by non-IRQ context code to initiate + * the quiescence. + * + * When the IRQ handler notices that SYNC is set, it + * disables interrupts and returns. + * + * When all outstanding IRQ handlers have returned after + * the SYNC flag has been set, the setter can be assured + * that interrupts will no longer get run. + * + * In this way all SMP driver locks are never acquired + * in hw IRQ context, only sw IRQ context or lower. + */ + unsigned int irq_sync; + + /* SMP locking strategy: + * + * lock: Held during reset, PHY access, timer, and when + * updating tg3_flags. + * + * netif_tx_lock: Held during tg3_start_xmit. tg3_tx holds + * netif_tx_lock when it needs to call + * netif_wake_queue. + * + * Both of these locks are to be held with BH safety. + * + * Because the IRQ handler, tg3_poll, and tg3_start_xmit + * are running lockless, it is necessary to completely + * quiesce the chip with tg3_netif_stop and tg3_full_lock + * before reconfiguring the device. + * + * indirect_lock: Held when accessing registers indirectly + * with IRQ disabling. + */ + spinlock_t lock; + spinlock_t indirect_lock; + + u32 (*read32) (struct tg3 *, u32); + void (*write32) (struct tg3 *, u32, u32); + u32 (*read32_mbox) (struct tg3 *, u32); + void (*write32_mbox) (struct tg3 *, u32, + u32); + void __iomem *regs; + void __iomem *aperegs; + struct net_device *dev; + struct pci_dev *pdev; + + u32 coal_now; + u32 msg_enable; + + /* begin "tx thread" cacheline section */ + void (*write32_tx_mbox) (struct tg3 *, u32, + u32); + + /* begin "rx thread" cacheline section */ + struct tg3_napi napi[TG3_IRQ_MAX_VECS]; + void (*write32_rx_mbox) (struct tg3 *, u32, + u32); + u32 rx_copy_thresh; + u32 rx_std_ring_mask; + u32 rx_jmb_ring_mask; + u32 rx_ret_ring_mask; + u32 rx_pending; + u32 rx_jumbo_pending; + u32 rx_std_max_post; + u32 rx_offset; + u32 rx_pkt_map_sz; + + + /* begin "everything else" cacheline(s) section */ + unsigned long rx_dropped; + struct rtnl_link_stats64 net_stats_prev; + struct tg3_ethtool_stats estats; + struct tg3_ethtool_stats estats_prev; + + DECLARE_BITMAP(tg3_flags, TG3_FLAG_NUMBER_OF_FLAGS); + + union { + unsigned long phy_crc_errors; + unsigned long last_event_jiffies; + }; + + struct timer_list timer; + u16 timer_counter; + u16 timer_multiplier; + u32 timer_offset; + u16 asf_counter; + u16 asf_multiplier; + + /* 1 second counter for transient serdes link events */ + u32 serdes_counter; +#define SERDES_AN_TIMEOUT_5704S 2 +#define SERDES_PARALLEL_DET_TIMEOUT 1 +#define SERDES_AN_TIMEOUT_5714S 1 + + struct tg3_link_config link_config; + struct tg3_bufmgr_config bufmgr_config; + + /* cache h/w values, often passed straight to h/w */ + u32 rx_mode; + u32 tx_mode; + u32 mac_mode; + u32 mi_mode; + u32 misc_host_ctrl; + u32 grc_mode; + u32 grc_local_ctrl; + u32 dma_rwctrl; + u32 coalesce_mode; + u32 pwrmgmt_thresh; + + /* PCI block */ + u32 pci_chip_rev_id; + u16 pci_cmd; + u8 pci_cacheline_sz; + u8 pci_lat_timer; + + int pci_fn; + int pm_cap; + int msi_cap; + int pcix_cap; + int pcie_readrq; + + struct mii_bus *mdio_bus; + int mdio_irq[PHY_MAX_ADDR]; + + u8 phy_addr; + + /* PHY info */ + u32 phy_id; +#define TG3_PHY_ID_MASK 0xfffffff0 +#define TG3_PHY_ID_BCM5400 0x60008040 +#define TG3_PHY_ID_BCM5401 0x60008050 +#define TG3_PHY_ID_BCM5411 0x60008070 +#define TG3_PHY_ID_BCM5701 0x60008110 +#define TG3_PHY_ID_BCM5703 0x60008160 +#define TG3_PHY_ID_BCM5704 0x60008190 +#define TG3_PHY_ID_BCM5705 0x600081a0 +#define TG3_PHY_ID_BCM5750 0x60008180 +#define TG3_PHY_ID_BCM5752 0x60008100 +#define TG3_PHY_ID_BCM5714 0x60008340 +#define TG3_PHY_ID_BCM5780 0x60008350 +#define TG3_PHY_ID_BCM5755 0xbc050cc0 +#define TG3_PHY_ID_BCM5787 0xbc050ce0 +#define TG3_PHY_ID_BCM5756 0xbc050ed0 +#define TG3_PHY_ID_BCM5784 0xbc050fa0 +#define TG3_PHY_ID_BCM5761 0xbc050fd0 +#define TG3_PHY_ID_BCM5718C 0x5c0d8a00 +#define TG3_PHY_ID_BCM5718S 0xbc050ff0 +#define TG3_PHY_ID_BCM57765 0x5c0d8a40 +#define TG3_PHY_ID_BCM5719C 0x5c0d8a20 +#define TG3_PHY_ID_BCM5720C 0x5c0d8b60 +#define TG3_PHY_ID_BCM5906 0xdc00ac40 +#define TG3_PHY_ID_BCM8002 0x60010140 +#define TG3_PHY_ID_INVALID 0xffffffff + +#define PHY_ID_RTL8211C 0x001cc910 +#define PHY_ID_RTL8201E 0x00008200 + +#define TG3_PHY_ID_REV_MASK 0x0000000f +#define TG3_PHY_REV_BCM5401_B0 0x1 + + /* This macro assumes the passed PHY ID is + * already masked with TG3_PHY_ID_MASK. + */ +#define TG3_KNOWN_PHY_ID(X) \ + ((X) == TG3_PHY_ID_BCM5400 || (X) == TG3_PHY_ID_BCM5401 || \ + (X) == TG3_PHY_ID_BCM5411 || (X) == TG3_PHY_ID_BCM5701 || \ + (X) == TG3_PHY_ID_BCM5703 || (X) == TG3_PHY_ID_BCM5704 || \ + (X) == TG3_PHY_ID_BCM5705 || (X) == TG3_PHY_ID_BCM5750 || \ + (X) == TG3_PHY_ID_BCM5752 || (X) == TG3_PHY_ID_BCM5714 || \ + (X) == TG3_PHY_ID_BCM5780 || (X) == TG3_PHY_ID_BCM5787 || \ + (X) == TG3_PHY_ID_BCM5755 || (X) == TG3_PHY_ID_BCM5756 || \ + (X) == TG3_PHY_ID_BCM5906 || (X) == TG3_PHY_ID_BCM5761 || \ + (X) == TG3_PHY_ID_BCM5718C || (X) == TG3_PHY_ID_BCM5718S || \ + (X) == TG3_PHY_ID_BCM57765 || (X) == TG3_PHY_ID_BCM5719C || \ + (X) == TG3_PHY_ID_BCM8002) + + u32 phy_flags; +#define TG3_PHYFLG_IS_LOW_POWER 0x00000001 +#define TG3_PHYFLG_IS_CONNECTED 0x00000002 +#define TG3_PHYFLG_USE_MI_INTERRUPT 0x00000004 +#define TG3_PHYFLG_PHY_SERDES 0x00000010 +#define TG3_PHYFLG_MII_SERDES 0x00000020 +#define TG3_PHYFLG_ANY_SERDES (TG3_PHYFLG_PHY_SERDES | \ + TG3_PHYFLG_MII_SERDES) +#define TG3_PHYFLG_IS_FET 0x00000040 +#define TG3_PHYFLG_10_100_ONLY 0x00000080 +#define TG3_PHYFLG_ENABLE_APD 0x00000100 +#define TG3_PHYFLG_CAPACITIVE_COUPLING 0x00000200 +#define TG3_PHYFLG_NO_ETH_WIRE_SPEED 0x00000400 +#define TG3_PHYFLG_JITTER_BUG 0x00000800 +#define TG3_PHYFLG_ADJUST_TRIM 0x00001000 +#define TG3_PHYFLG_ADC_BUG 0x00002000 +#define TG3_PHYFLG_5704_A0_BUG 0x00004000 +#define TG3_PHYFLG_BER_BUG 0x00008000 +#define TG3_PHYFLG_SERDES_PREEMPHASIS 0x00010000 +#define TG3_PHYFLG_PARALLEL_DETECT 0x00020000 +#define TG3_PHYFLG_EEE_CAP 0x00040000 + + u32 led_ctrl; + u32 phy_otp; + u32 setlpicnt; + +#define TG3_BPN_SIZE 24 + char board_part_number[TG3_BPN_SIZE]; +#define TG3_VER_SIZE ETHTOOL_FWVERS_LEN + char fw_ver[TG3_VER_SIZE]; + u32 nic_sram_data_cfg; + u32 pci_clock_ctrl; + struct pci_dev *pdev_peer; + + struct tg3_hw_stats *hw_stats; + dma_addr_t stats_mapping; + struct work_struct reset_task; + + int nvram_lock_cnt; + u32 nvram_size; +#define TG3_NVRAM_SIZE_2KB 0x00000800 +#define TG3_NVRAM_SIZE_64KB 0x00010000 +#define TG3_NVRAM_SIZE_128KB 0x00020000 +#define TG3_NVRAM_SIZE_256KB 0x00040000 +#define TG3_NVRAM_SIZE_512KB 0x00080000 +#define TG3_NVRAM_SIZE_1MB 0x00100000 +#define TG3_NVRAM_SIZE_2MB 0x00200000 + + u32 nvram_pagesize; + u32 nvram_jedecnum; + +#define JEDEC_ATMEL 0x1f +#define JEDEC_ST 0x20 +#define JEDEC_SAIFUN 0x4f +#define JEDEC_SST 0xbf + +#define ATMEL_AT24C02_CHIP_SIZE TG3_NVRAM_SIZE_2KB +#define ATMEL_AT24C02_PAGE_SIZE (8) + +#define ATMEL_AT24C64_CHIP_SIZE TG3_NVRAM_SIZE_64KB +#define ATMEL_AT24C64_PAGE_SIZE (32) + +#define ATMEL_AT24C512_CHIP_SIZE TG3_NVRAM_SIZE_512KB +#define ATMEL_AT24C512_PAGE_SIZE (128) + +#define ATMEL_AT45DB0X1B_PAGE_POS 9 +#define ATMEL_AT45DB0X1B_PAGE_SIZE 264 + +#define ATMEL_AT25F512_PAGE_SIZE 256 + +#define ST_M45PEX0_PAGE_SIZE 256 + +#define SAIFUN_SA25F0XX_PAGE_SIZE 256 + +#define SST_25VF0X0_PAGE_SIZE 4098 + + unsigned int irq_max; + unsigned int irq_cnt; + + struct ethtool_coalesce coal; + + /* firmware info */ + const char *fw_needed; + const struct firmware *fw; + u32 fw_len; /* includes BSS */ +}; + +#endif /* !(_T3_H) */ diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c deleted file mode 100644 index ea65f7e..0000000 --- a/drivers/net/sb1250-mac.c +++ /dev/null @@ -1,2690 +0,0 @@ -/* - * Copyright (C) 2001,2002,2003,2004 Broadcom Corporation - * Copyright (c) 2006, 2007 Maciej W. Rozycki - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * - * This driver is designed for the Broadcom SiByte SOC built-in - * Ethernet controllers. Written by Mitch Lichtenberg at Broadcom Corp. - * - * Updated to the driver model and the PHY abstraction layer - * by Maciej W. Rozycki. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include /* Processor type for cache alignment. */ - -/* Operational parameters that usually are not changed. */ - -#define CONFIG_SBMAC_COALESCE - -/* Time in jiffies before concluding the transmitter is hung. */ -#define TX_TIMEOUT (2*HZ) - - -MODULE_AUTHOR("Mitch Lichtenberg (Broadcom Corp.)"); -MODULE_DESCRIPTION("Broadcom SiByte SOC GB Ethernet driver"); - -/* A few user-configurable values which may be modified when a driver - module is loaded. */ - -/* 1 normal messages, 0 quiet .. 7 verbose. */ -static int debug = 1; -module_param(debug, int, S_IRUGO); -MODULE_PARM_DESC(debug, "Debug messages"); - -#ifdef CONFIG_SBMAC_COALESCE -static int int_pktcnt_tx = 255; -module_param(int_pktcnt_tx, int, S_IRUGO); -MODULE_PARM_DESC(int_pktcnt_tx, "TX packet count"); - -static int int_timeout_tx = 255; -module_param(int_timeout_tx, int, S_IRUGO); -MODULE_PARM_DESC(int_timeout_tx, "TX timeout value"); - -static int int_pktcnt_rx = 64; -module_param(int_pktcnt_rx, int, S_IRUGO); -MODULE_PARM_DESC(int_pktcnt_rx, "RX packet count"); - -static int int_timeout_rx = 64; -module_param(int_timeout_rx, int, S_IRUGO); -MODULE_PARM_DESC(int_timeout_rx, "RX timeout value"); -#endif - -#include -#include -#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) -#include -#include -#define R_MAC_DMA_OODPKTLOST_RX R_MAC_DMA_OODPKTLOST -#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) -#include -#include -#else -#error invalid SiByte MAC configuration -#endif -#include -#include -#include - -#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) -#define UNIT_INT(n) (K_BCM1480_INT_MAC_0 + ((n) * 2)) -#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) -#define UNIT_INT(n) (K_INT_MAC_0 + (n)) -#else -#error invalid SiByte MAC configuration -#endif - -#ifdef K_INT_PHY -#define SBMAC_PHY_INT K_INT_PHY -#else -#define SBMAC_PHY_INT PHY_POLL -#endif - -/********************************************************************** - * Simple types - ********************************************************************* */ - -enum sbmac_speed { - sbmac_speed_none = 0, - sbmac_speed_10 = SPEED_10, - sbmac_speed_100 = SPEED_100, - sbmac_speed_1000 = SPEED_1000, -}; - -enum sbmac_duplex { - sbmac_duplex_none = -1, - sbmac_duplex_half = DUPLEX_HALF, - sbmac_duplex_full = DUPLEX_FULL, -}; - -enum sbmac_fc { - sbmac_fc_none, - sbmac_fc_disabled, - sbmac_fc_frame, - sbmac_fc_collision, - sbmac_fc_carrier, -}; - -enum sbmac_state { - sbmac_state_uninit, - sbmac_state_off, - sbmac_state_on, - sbmac_state_broken, -}; - - -/********************************************************************** - * Macros - ********************************************************************* */ - - -#define SBDMA_NEXTBUF(d,f) ((((d)->f+1) == (d)->sbdma_dscrtable_end) ? \ - (d)->sbdma_dscrtable : (d)->f+1) - - -#define NUMCACHEBLKS(x) (((x)+SMP_CACHE_BYTES-1)/SMP_CACHE_BYTES) - -#define SBMAC_MAX_TXDESCR 256 -#define SBMAC_MAX_RXDESCR 256 - -#define ETHER_ADDR_LEN 6 -#define ENET_PACKET_SIZE 1518 -/*#define ENET_PACKET_SIZE 9216 */ - -/********************************************************************** - * DMA Descriptor structure - ********************************************************************* */ - -struct sbdmadscr { - uint64_t dscr_a; - uint64_t dscr_b; -}; - -/********************************************************************** - * DMA Controller structure - ********************************************************************* */ - -struct sbmacdma { - - /* - * This stuff is used to identify the channel and the registers - * associated with it. - */ - struct sbmac_softc *sbdma_eth; /* back pointer to associated - MAC */ - int sbdma_channel; /* channel number */ - int sbdma_txdir; /* direction (1=transmit) */ - int sbdma_maxdescr; /* total # of descriptors - in ring */ -#ifdef CONFIG_SBMAC_COALESCE - int sbdma_int_pktcnt; - /* # descriptors rx/tx - before interrupt */ - int sbdma_int_timeout; - /* # usec rx/tx interrupt */ -#endif - void __iomem *sbdma_config0; /* DMA config register 0 */ - void __iomem *sbdma_config1; /* DMA config register 1 */ - void __iomem *sbdma_dscrbase; - /* descriptor base address */ - void __iomem *sbdma_dscrcnt; /* descriptor count register */ - void __iomem *sbdma_curdscr; /* current descriptor - address */ - void __iomem *sbdma_oodpktlost; - /* pkt drop (rx only) */ - - /* - * This stuff is for maintenance of the ring - */ - void *sbdma_dscrtable_unaligned; - struct sbdmadscr *sbdma_dscrtable; - /* base of descriptor table */ - struct sbdmadscr *sbdma_dscrtable_end; - /* end of descriptor table */ - struct sk_buff **sbdma_ctxtable; - /* context table, one - per descr */ - dma_addr_t sbdma_dscrtable_phys; - /* and also the phys addr */ - struct sbdmadscr *sbdma_addptr; /* next dscr for sw to add */ - struct sbdmadscr *sbdma_remptr; /* next dscr for sw - to remove */ -}; - - -/********************************************************************** - * Ethernet softc structure - ********************************************************************* */ - -struct sbmac_softc { - - /* - * Linux-specific things - */ - struct net_device *sbm_dev; /* pointer to linux device */ - struct napi_struct napi; - struct phy_device *phy_dev; /* the associated PHY device */ - struct mii_bus *mii_bus; /* the MII bus */ - int phy_irq[PHY_MAX_ADDR]; - spinlock_t sbm_lock; /* spin lock */ - int sbm_devflags; /* current device flags */ - - /* - * Controller-specific things - */ - void __iomem *sbm_base; /* MAC's base address */ - enum sbmac_state sbm_state; /* current state */ - - void __iomem *sbm_macenable; /* MAC Enable Register */ - void __iomem *sbm_maccfg; /* MAC Config Register */ - void __iomem *sbm_fifocfg; /* FIFO Config Register */ - void __iomem *sbm_framecfg; /* Frame Config Register */ - void __iomem *sbm_rxfilter; /* Receive Filter Register */ - void __iomem *sbm_isr; /* Interrupt Status Register */ - void __iomem *sbm_imr; /* Interrupt Mask Register */ - void __iomem *sbm_mdio; /* MDIO Register */ - - enum sbmac_speed sbm_speed; /* current speed */ - enum sbmac_duplex sbm_duplex; /* current duplex */ - enum sbmac_fc sbm_fc; /* cur. flow control setting */ - int sbm_pause; /* current pause setting */ - int sbm_link; /* current link state */ - - unsigned char sbm_hwaddr[ETHER_ADDR_LEN]; - - struct sbmacdma sbm_txdma; /* only channel 0 for now */ - struct sbmacdma sbm_rxdma; - int rx_hw_checksum; - int sbe_idx; -}; - - -/********************************************************************** - * Externs - ********************************************************************* */ - -/********************************************************************** - * Prototypes - ********************************************************************* */ - -static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, - int txrx, int maxdescr); -static void sbdma_channel_start(struct sbmacdma *d, int rxtx); -static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d, - struct sk_buff *m); -static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m); -static void sbdma_emptyring(struct sbmacdma *d); -static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d); -static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, - int work_to_do, int poll); -static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, - int poll); -static int sbmac_initctx(struct sbmac_softc *s); -static void sbmac_channel_start(struct sbmac_softc *s); -static void sbmac_channel_stop(struct sbmac_softc *s); -static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *, - enum sbmac_state); -static void sbmac_promiscuous_mode(struct sbmac_softc *sc, int onoff); -static uint64_t sbmac_addr2reg(unsigned char *ptr); -static irqreturn_t sbmac_intr(int irq, void *dev_instance); -static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev); -static void sbmac_setmulti(struct sbmac_softc *sc); -static int sbmac_init(struct platform_device *pldev, long long base); -static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed); -static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex, - enum sbmac_fc fc); - -static int sbmac_open(struct net_device *dev); -static void sbmac_tx_timeout (struct net_device *dev); -static void sbmac_set_rx_mode(struct net_device *dev); -static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); -static int sbmac_close(struct net_device *dev); -static int sbmac_poll(struct napi_struct *napi, int budget); - -static void sbmac_mii_poll(struct net_device *dev); -static int sbmac_mii_probe(struct net_device *dev); - -static void sbmac_mii_sync(void __iomem *sbm_mdio); -static void sbmac_mii_senddata(void __iomem *sbm_mdio, unsigned int data, - int bitcnt); -static int sbmac_mii_read(struct mii_bus *bus, int phyaddr, int regidx); -static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx, - u16 val); - - -/********************************************************************** - * Globals - ********************************************************************* */ - -static char sbmac_string[] = "sb1250-mac"; - -static char sbmac_mdio_string[] = "sb1250-mac-mdio"; - - -/********************************************************************** - * MDIO constants - ********************************************************************* */ - -#define MII_COMMAND_START 0x01 -#define MII_COMMAND_READ 0x02 -#define MII_COMMAND_WRITE 0x01 -#define MII_COMMAND_ACK 0x02 - -#define M_MAC_MDIO_DIR_OUTPUT 0 /* for clarity */ - -#define ENABLE 1 -#define DISABLE 0 - -/********************************************************************** - * SBMAC_MII_SYNC(sbm_mdio) - * - * Synchronize with the MII - send a pattern of bits to the MII - * that will guarantee that it is ready to accept a command. - * - * Input parameters: - * sbm_mdio - address of the MAC's MDIO register - * - * Return value: - * nothing - ********************************************************************* */ - -static void sbmac_mii_sync(void __iomem *sbm_mdio) -{ - int cnt; - uint64_t bits; - int mac_mdio_genc; - - mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; - - bits = M_MAC_MDIO_DIR_OUTPUT | M_MAC_MDIO_OUT; - - __raw_writeq(bits | mac_mdio_genc, sbm_mdio); - - for (cnt = 0; cnt < 32; cnt++) { - __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, sbm_mdio); - __raw_writeq(bits | mac_mdio_genc, sbm_mdio); - } -} - -/********************************************************************** - * SBMAC_MII_SENDDATA(sbm_mdio, data, bitcnt) - * - * Send some bits to the MII. The bits to be sent are right- - * justified in the 'data' parameter. - * - * Input parameters: - * sbm_mdio - address of the MAC's MDIO register - * data - data to send - * bitcnt - number of bits to send - ********************************************************************* */ - -static void sbmac_mii_senddata(void __iomem *sbm_mdio, unsigned int data, - int bitcnt) -{ - int i; - uint64_t bits; - unsigned int curmask; - int mac_mdio_genc; - - mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; - - bits = M_MAC_MDIO_DIR_OUTPUT; - __raw_writeq(bits | mac_mdio_genc, sbm_mdio); - - curmask = 1 << (bitcnt - 1); - - for (i = 0; i < bitcnt; i++) { - if (data & curmask) - bits |= M_MAC_MDIO_OUT; - else bits &= ~M_MAC_MDIO_OUT; - __raw_writeq(bits | mac_mdio_genc, sbm_mdio); - __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, sbm_mdio); - __raw_writeq(bits | mac_mdio_genc, sbm_mdio); - curmask >>= 1; - } -} - - - -/********************************************************************** - * SBMAC_MII_READ(bus, phyaddr, regidx) - * Read a PHY register. - * - * Input parameters: - * bus - MDIO bus handle - * phyaddr - PHY's address - * regnum - index of register to read - * - * Return value: - * value read, or 0xffff if an error occurred. - ********************************************************************* */ - -static int sbmac_mii_read(struct mii_bus *bus, int phyaddr, int regidx) -{ - struct sbmac_softc *sc = (struct sbmac_softc *)bus->priv; - void __iomem *sbm_mdio = sc->sbm_mdio; - int idx; - int error; - int regval; - int mac_mdio_genc; - - /* - * Synchronize ourselves so that the PHY knows the next - * thing coming down is a command - */ - sbmac_mii_sync(sbm_mdio); - - /* - * Send the data to the PHY. The sequence is - * a "start" command (2 bits) - * a "read" command (2 bits) - * the PHY addr (5 bits) - * the register index (5 bits) - */ - sbmac_mii_senddata(sbm_mdio, MII_COMMAND_START, 2); - sbmac_mii_senddata(sbm_mdio, MII_COMMAND_READ, 2); - sbmac_mii_senddata(sbm_mdio, phyaddr, 5); - sbmac_mii_senddata(sbm_mdio, regidx, 5); - - mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; - - /* - * Switch the port around without a clock transition. - */ - __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); - - /* - * Send out a clock pulse to signal we want the status - */ - __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, - sbm_mdio); - __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); - - /* - * If an error occurred, the PHY will signal '1' back - */ - error = __raw_readq(sbm_mdio) & M_MAC_MDIO_IN; - - /* - * Issue an 'idle' clock pulse, but keep the direction - * the same. - */ - __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, - sbm_mdio); - __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); - - regval = 0; - - for (idx = 0; idx < 16; idx++) { - regval <<= 1; - - if (error == 0) { - if (__raw_readq(sbm_mdio) & M_MAC_MDIO_IN) - regval |= 1; - } - - __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, - sbm_mdio); - __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); - } - - /* Switch back to output */ - __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, sbm_mdio); - - if (error == 0) - return regval; - return 0xffff; -} - - -/********************************************************************** - * SBMAC_MII_WRITE(bus, phyaddr, regidx, regval) - * - * Write a value to a PHY register. - * - * Input parameters: - * bus - MDIO bus handle - * phyaddr - PHY to use - * regidx - register within the PHY - * regval - data to write to register - * - * Return value: - * 0 for success - ********************************************************************* */ - -static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx, - u16 regval) -{ - struct sbmac_softc *sc = (struct sbmac_softc *)bus->priv; - void __iomem *sbm_mdio = sc->sbm_mdio; - int mac_mdio_genc; - - sbmac_mii_sync(sbm_mdio); - - sbmac_mii_senddata(sbm_mdio, MII_COMMAND_START, 2); - sbmac_mii_senddata(sbm_mdio, MII_COMMAND_WRITE, 2); - sbmac_mii_senddata(sbm_mdio, phyaddr, 5); - sbmac_mii_senddata(sbm_mdio, regidx, 5); - sbmac_mii_senddata(sbm_mdio, MII_COMMAND_ACK, 2); - sbmac_mii_senddata(sbm_mdio, regval, 16); - - mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; - - __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, sbm_mdio); - - return 0; -} - - - -/********************************************************************** - * SBDMA_INITCTX(d,s,chan,txrx,maxdescr) - * - * Initialize a DMA channel context. Since there are potentially - * eight DMA channels per MAC, it's nice to do this in a standard - * way. - * - * Input parameters: - * d - struct sbmacdma (DMA channel context) - * s - struct sbmac_softc (pointer to a MAC) - * chan - channel number (0..1 right now) - * txrx - Identifies DMA_TX or DMA_RX for channel direction - * maxdescr - number of descriptors - * - * Return value: - * nothing - ********************************************************************* */ - -static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, - int txrx, int maxdescr) -{ -#ifdef CONFIG_SBMAC_COALESCE - int int_pktcnt, int_timeout; -#endif - - /* - * Save away interesting stuff in the structure - */ - - d->sbdma_eth = s; - d->sbdma_channel = chan; - d->sbdma_txdir = txrx; - -#if 0 - /* RMON clearing */ - s->sbe_idx =(s->sbm_base - A_MAC_BASE_0)/MAC_SPACING; -#endif - - __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_BYTES); - __raw_writeq(0, s->sbm_base + R_MAC_RMON_COLLISIONS); - __raw_writeq(0, s->sbm_base + R_MAC_RMON_LATE_COL); - __raw_writeq(0, s->sbm_base + R_MAC_RMON_EX_COL); - __raw_writeq(0, s->sbm_base + R_MAC_RMON_FCS_ERROR); - __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_ABORT); - __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_BAD); - __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_GOOD); - __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_RUNT); - __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_OVERSIZE); - __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BYTES); - __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_MCAST); - __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BCAST); - __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BAD); - __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_GOOD); - __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_RUNT); - __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_OVERSIZE); - __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_FCS_ERROR); - __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_LENGTH_ERROR); - __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_CODE_ERROR); - __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_ALIGN_ERROR); - - /* - * initialize register pointers - */ - - d->sbdma_config0 = - s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG0); - d->sbdma_config1 = - s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG1); - d->sbdma_dscrbase = - s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_BASE); - d->sbdma_dscrcnt = - s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_CNT); - d->sbdma_curdscr = - s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CUR_DSCRADDR); - if (d->sbdma_txdir) - d->sbdma_oodpktlost = NULL; - else - d->sbdma_oodpktlost = - s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_OODPKTLOST_RX); - - /* - * Allocate memory for the ring - */ - - d->sbdma_maxdescr = maxdescr; - - d->sbdma_dscrtable_unaligned = kcalloc(d->sbdma_maxdescr + 1, - sizeof(*d->sbdma_dscrtable), - GFP_KERNEL); - - /* - * The descriptor table must be aligned to at least 16 bytes or the - * MAC will corrupt it. - */ - d->sbdma_dscrtable = (struct sbdmadscr *) - ALIGN((unsigned long)d->sbdma_dscrtable_unaligned, - sizeof(*d->sbdma_dscrtable)); - - d->sbdma_dscrtable_end = d->sbdma_dscrtable + d->sbdma_maxdescr; - - d->sbdma_dscrtable_phys = virt_to_phys(d->sbdma_dscrtable); - - /* - * And context table - */ - - d->sbdma_ctxtable = kcalloc(d->sbdma_maxdescr, - sizeof(*d->sbdma_ctxtable), GFP_KERNEL); - -#ifdef CONFIG_SBMAC_COALESCE - /* - * Setup Rx/Tx DMA coalescing defaults - */ - - int_pktcnt = (txrx == DMA_TX) ? int_pktcnt_tx : int_pktcnt_rx; - if ( int_pktcnt ) { - d->sbdma_int_pktcnt = int_pktcnt; - } else { - d->sbdma_int_pktcnt = 1; - } - - int_timeout = (txrx == DMA_TX) ? int_timeout_tx : int_timeout_rx; - if ( int_timeout ) { - d->sbdma_int_timeout = int_timeout; - } else { - d->sbdma_int_timeout = 0; - } -#endif - -} - -/********************************************************************** - * SBDMA_CHANNEL_START(d) - * - * Initialize the hardware registers for a DMA channel. - * - * Input parameters: - * d - DMA channel to init (context must be previously init'd - * rxtx - DMA_RX or DMA_TX depending on what type of channel - * - * Return value: - * nothing - ********************************************************************* */ - -static void sbdma_channel_start(struct sbmacdma *d, int rxtx) -{ - /* - * Turn on the DMA channel - */ - -#ifdef CONFIG_SBMAC_COALESCE - __raw_writeq(V_DMA_INT_TIMEOUT(d->sbdma_int_timeout) | - 0, d->sbdma_config1); - __raw_writeq(M_DMA_EOP_INT_EN | - V_DMA_RINGSZ(d->sbdma_maxdescr) | - V_DMA_INT_PKTCNT(d->sbdma_int_pktcnt) | - 0, d->sbdma_config0); -#else - __raw_writeq(0, d->sbdma_config1); - __raw_writeq(V_DMA_RINGSZ(d->sbdma_maxdescr) | - 0, d->sbdma_config0); -#endif - - __raw_writeq(d->sbdma_dscrtable_phys, d->sbdma_dscrbase); - - /* - * Initialize ring pointers - */ - - d->sbdma_addptr = d->sbdma_dscrtable; - d->sbdma_remptr = d->sbdma_dscrtable; -} - -/********************************************************************** - * SBDMA_CHANNEL_STOP(d) - * - * Initialize the hardware registers for a DMA channel. - * - * Input parameters: - * d - DMA channel to init (context must be previously init'd - * - * Return value: - * nothing - ********************************************************************* */ - -static void sbdma_channel_stop(struct sbmacdma *d) -{ - /* - * Turn off the DMA channel - */ - - __raw_writeq(0, d->sbdma_config1); - - __raw_writeq(0, d->sbdma_dscrbase); - - __raw_writeq(0, d->sbdma_config0); - - /* - * Zero ring pointers - */ - - d->sbdma_addptr = NULL; - d->sbdma_remptr = NULL; -} - -static inline void sbdma_align_skb(struct sk_buff *skb, - unsigned int power2, unsigned int offset) -{ - unsigned char *addr = skb->data; - unsigned char *newaddr = PTR_ALIGN(addr, power2); - - skb_reserve(skb, newaddr - addr + offset); -} - - -/********************************************************************** - * SBDMA_ADD_RCVBUFFER(d,sb) - * - * Add a buffer to the specified DMA channel. For receive channels, - * this queues a buffer for inbound packets. - * - * Input parameters: - * sc - softc structure - * d - DMA channel descriptor - * sb - sk_buff to add, or NULL if we should allocate one - * - * Return value: - * 0 if buffer could not be added (ring is full) - * 1 if buffer added successfully - ********************************************************************* */ - - -static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d, - struct sk_buff *sb) -{ - struct net_device *dev = sc->sbm_dev; - struct sbdmadscr *dsc; - struct sbdmadscr *nextdsc; - struct sk_buff *sb_new = NULL; - int pktsize = ENET_PACKET_SIZE; - - /* get pointer to our current place in the ring */ - - dsc = d->sbdma_addptr; - nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr); - - /* - * figure out if the ring is full - if the next descriptor - * is the same as the one that we're going to remove from - * the ring, the ring is full - */ - - if (nextdsc == d->sbdma_remptr) { - return -ENOSPC; - } - - /* - * Allocate a sk_buff if we don't already have one. - * If we do have an sk_buff, reset it so that it's empty. - * - * Note: sk_buffs don't seem to be guaranteed to have any sort - * of alignment when they are allocated. Therefore, allocate enough - * extra space to make sure that: - * - * 1. the data does not start in the middle of a cache line. - * 2. The data does not end in the middle of a cache line - * 3. The buffer can be aligned such that the IP addresses are - * naturally aligned. - * - * Remember, the SOCs MAC writes whole cache lines at a time, - * without reading the old contents first. So, if the sk_buff's - * data portion starts in the middle of a cache line, the SOC - * DMA will trash the beginning (and ending) portions. - */ - - if (sb == NULL) { - sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE + - SMP_CACHE_BYTES * 2 + - NET_IP_ALIGN); - if (sb_new == NULL) { - pr_info("%s: sk_buff allocation failed\n", - d->sbdma_eth->sbm_dev->name); - return -ENOBUFS; - } - - sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN); - } - else { - sb_new = sb; - /* - * nothing special to reinit buffer, it's already aligned - * and sb->data already points to a good place. - */ - } - - /* - * fill in the descriptor - */ - -#ifdef CONFIG_SBMAC_COALESCE - /* - * Do not interrupt per DMA transfer. - */ - dsc->dscr_a = virt_to_phys(sb_new->data) | - V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | 0; -#else - dsc->dscr_a = virt_to_phys(sb_new->data) | - V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | - M_DMA_DSCRA_INTERRUPT; -#endif - - /* receiving: no options */ - dsc->dscr_b = 0; - - /* - * fill in the context - */ - - d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb_new; - - /* - * point at next packet - */ - - d->sbdma_addptr = nextdsc; - - /* - * Give the buffer to the DMA engine. - */ - - __raw_writeq(1, d->sbdma_dscrcnt); - - return 0; /* we did it */ -} - -/********************************************************************** - * SBDMA_ADD_TXBUFFER(d,sb) - * - * Add a transmit buffer to the specified DMA channel, causing a - * transmit to start. - * - * Input parameters: - * d - DMA channel descriptor - * sb - sk_buff to add - * - * Return value: - * 0 transmit queued successfully - * otherwise error code - ********************************************************************* */ - - -static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *sb) -{ - struct sbdmadscr *dsc; - struct sbdmadscr *nextdsc; - uint64_t phys; - uint64_t ncb; - int length; - - /* get pointer to our current place in the ring */ - - dsc = d->sbdma_addptr; - nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr); - - /* - * figure out if the ring is full - if the next descriptor - * is the same as the one that we're going to remove from - * the ring, the ring is full - */ - - if (nextdsc == d->sbdma_remptr) { - return -ENOSPC; - } - - /* - * Under Linux, it's not necessary to copy/coalesce buffers - * like it is on NetBSD. We think they're all contiguous, - * but that may not be true for GBE. - */ - - length = sb->len; - - /* - * fill in the descriptor. Note that the number of cache - * blocks in the descriptor is the number of blocks - * *spanned*, so we need to add in the offset (if any) - * while doing the calculation. - */ - - phys = virt_to_phys(sb->data); - ncb = NUMCACHEBLKS(length+(phys & (SMP_CACHE_BYTES - 1))); - - dsc->dscr_a = phys | - V_DMA_DSCRA_A_SIZE(ncb) | -#ifndef CONFIG_SBMAC_COALESCE - M_DMA_DSCRA_INTERRUPT | -#endif - M_DMA_ETHTX_SOP; - - /* transmitting: set outbound options and length */ - - dsc->dscr_b = V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) | - V_DMA_DSCRB_PKT_SIZE(length); - - /* - * fill in the context - */ - - d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb; - - /* - * point at next packet - */ - - d->sbdma_addptr = nextdsc; - - /* - * Give the buffer to the DMA engine. - */ - - __raw_writeq(1, d->sbdma_dscrcnt); - - return 0; /* we did it */ -} - - - - -/********************************************************************** - * SBDMA_EMPTYRING(d) - * - * Free all allocated sk_buffs on the specified DMA channel; - * - * Input parameters: - * d - DMA channel - * - * Return value: - * nothing - ********************************************************************* */ - -static void sbdma_emptyring(struct sbmacdma *d) -{ - int idx; - struct sk_buff *sb; - - for (idx = 0; idx < d->sbdma_maxdescr; idx++) { - sb = d->sbdma_ctxtable[idx]; - if (sb) { - dev_kfree_skb(sb); - d->sbdma_ctxtable[idx] = NULL; - } - } -} - - -/********************************************************************** - * SBDMA_FILLRING(d) - * - * Fill the specified DMA channel (must be receive channel) - * with sk_buffs - * - * Input parameters: - * sc - softc structure - * d - DMA channel - * - * Return value: - * nothing - ********************************************************************* */ - -static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d) -{ - int idx; - - for (idx = 0; idx < SBMAC_MAX_RXDESCR - 1; idx++) { - if (sbdma_add_rcvbuffer(sc, d, NULL) != 0) - break; - } -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void sbmac_netpoll(struct net_device *netdev) -{ - struct sbmac_softc *sc = netdev_priv(netdev); - int irq = sc->sbm_dev->irq; - - __raw_writeq(0, sc->sbm_imr); - - sbmac_intr(irq, netdev); - -#ifdef CONFIG_SBMAC_COALESCE - __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | - ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), - sc->sbm_imr); -#else - __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) | - (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr); -#endif -} -#endif - -/********************************************************************** - * SBDMA_RX_PROCESS(sc,d,work_to_do,poll) - * - * Process "completed" receive buffers on the specified DMA channel. - * - * Input parameters: - * sc - softc structure - * d - DMA channel context - * work_to_do - no. of packets to process before enabling interrupt - * again (for NAPI) - * poll - 1: using polling (for NAPI) - * - * Return value: - * nothing - ********************************************************************* */ - -static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, - int work_to_do, int poll) -{ - struct net_device *dev = sc->sbm_dev; - int curidx; - int hwidx; - struct sbdmadscr *dsc; - struct sk_buff *sb; - int len; - int work_done = 0; - int dropped = 0; - - prefetch(d); - -again: - /* Check if the HW dropped any frames */ - dev->stats.rx_fifo_errors - += __raw_readq(sc->sbm_rxdma.sbdma_oodpktlost) & 0xffff; - __raw_writeq(0, sc->sbm_rxdma.sbdma_oodpktlost); - - while (work_to_do-- > 0) { - /* - * figure out where we are (as an index) and where - * the hardware is (also as an index) - * - * This could be done faster if (for example) the - * descriptor table was page-aligned and contiguous in - * both virtual and physical memory -- you could then - * just compare the low-order bits of the virtual address - * (sbdma_remptr) and the physical address (sbdma_curdscr CSR) - */ - - dsc = d->sbdma_remptr; - curidx = dsc - d->sbdma_dscrtable; - - prefetch(dsc); - prefetch(&d->sbdma_ctxtable[curidx]); - - hwidx = ((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) - - d->sbdma_dscrtable_phys) / - sizeof(*d->sbdma_dscrtable); - - /* - * If they're the same, that means we've processed all - * of the descriptors up to (but not including) the one that - * the hardware is working on right now. - */ - - if (curidx == hwidx) - goto done; - - /* - * Otherwise, get the packet's sk_buff ptr back - */ - - sb = d->sbdma_ctxtable[curidx]; - d->sbdma_ctxtable[curidx] = NULL; - - len = (int)G_DMA_DSCRB_PKT_SIZE(dsc->dscr_b) - 4; - - /* - * Check packet status. If good, process it. - * If not, silently drop it and put it back on the - * receive ring. - */ - - if (likely (!(dsc->dscr_a & M_DMA_ETHRX_BAD))) { - - /* - * Add a new buffer to replace the old one. If we fail - * to allocate a buffer, we're going to drop this - * packet and put it right back on the receive ring. - */ - - if (unlikely(sbdma_add_rcvbuffer(sc, d, NULL) == - -ENOBUFS)) { - dev->stats.rx_dropped++; - /* Re-add old buffer */ - sbdma_add_rcvbuffer(sc, d, sb); - /* No point in continuing at the moment */ - printk(KERN_ERR "dropped packet (1)\n"); - d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); - goto done; - } else { - /* - * Set length into the packet - */ - skb_put(sb,len); - - /* - * Buffer has been replaced on the - * receive ring. Pass the buffer to - * the kernel - */ - sb->protocol = eth_type_trans(sb,d->sbdma_eth->sbm_dev); - /* Check hw IPv4/TCP checksum if supported */ - if (sc->rx_hw_checksum == ENABLE) { - if (!((dsc->dscr_a) & M_DMA_ETHRX_BADIP4CS) && - !((dsc->dscr_a) & M_DMA_ETHRX_BADTCPCS)) { - sb->ip_summed = CHECKSUM_UNNECESSARY; - /* don't need to set sb->csum */ - } else { - skb_checksum_none_assert(sb); - } - } - prefetch(sb->data); - prefetch((const void *)(((char *)sb->data)+32)); - if (poll) - dropped = netif_receive_skb(sb); - else - dropped = netif_rx(sb); - - if (dropped == NET_RX_DROP) { - dev->stats.rx_dropped++; - d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); - goto done; - } - else { - dev->stats.rx_bytes += len; - dev->stats.rx_packets++; - } - } - } else { - /* - * Packet was mangled somehow. Just drop it and - * put it back on the receive ring. - */ - dev->stats.rx_errors++; - sbdma_add_rcvbuffer(sc, d, sb); - } - - - /* - * .. and advance to the next buffer. - */ - - d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); - work_done++; - } - if (!poll) { - work_to_do = 32; - goto again; /* collect fifo drop statistics again */ - } -done: - return work_done; -} - -/********************************************************************** - * SBDMA_TX_PROCESS(sc,d) - * - * Process "completed" transmit buffers on the specified DMA channel. - * This is normally called within the interrupt service routine. - * Note that this isn't really ideal for priority channels, since - * it processes all of the packets on a given channel before - * returning. - * - * Input parameters: - * sc - softc structure - * d - DMA channel context - * poll - 1: using polling (for NAPI) - * - * Return value: - * nothing - ********************************************************************* */ - -static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, - int poll) -{ - struct net_device *dev = sc->sbm_dev; - int curidx; - int hwidx; - struct sbdmadscr *dsc; - struct sk_buff *sb; - unsigned long flags; - int packets_handled = 0; - - spin_lock_irqsave(&(sc->sbm_lock), flags); - - if (d->sbdma_remptr == d->sbdma_addptr) - goto end_unlock; - - hwidx = ((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) - - d->sbdma_dscrtable_phys) / sizeof(*d->sbdma_dscrtable); - - for (;;) { - /* - * figure out where we are (as an index) and where - * the hardware is (also as an index) - * - * This could be done faster if (for example) the - * descriptor table was page-aligned and contiguous in - * both virtual and physical memory -- you could then - * just compare the low-order bits of the virtual address - * (sbdma_remptr) and the physical address (sbdma_curdscr CSR) - */ - - curidx = d->sbdma_remptr - d->sbdma_dscrtable; - - /* - * If they're the same, that means we've processed all - * of the descriptors up to (but not including) the one that - * the hardware is working on right now. - */ - - if (curidx == hwidx) - break; - - /* - * Otherwise, get the packet's sk_buff ptr back - */ - - dsc = &(d->sbdma_dscrtable[curidx]); - sb = d->sbdma_ctxtable[curidx]; - d->sbdma_ctxtable[curidx] = NULL; - - /* - * Stats - */ - - dev->stats.tx_bytes += sb->len; - dev->stats.tx_packets++; - - /* - * for transmits, we just free buffers. - */ - - dev_kfree_skb_irq(sb); - - /* - * .. and advance to the next buffer. - */ - - d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); - - packets_handled++; - - } - - /* - * Decide if we should wake up the protocol or not. - * Other drivers seem to do this when we reach a low - * watermark on the transmit queue. - */ - - if (packets_handled) - netif_wake_queue(d->sbdma_eth->sbm_dev); - -end_unlock: - spin_unlock_irqrestore(&(sc->sbm_lock), flags); - -} - - - -/********************************************************************** - * SBMAC_INITCTX(s) - * - * Initialize an Ethernet context structure - this is called - * once per MAC on the 1250. Memory is allocated here, so don't - * call it again from inside the ioctl routines that bring the - * interface up/down - * - * Input parameters: - * s - sbmac context structure - * - * Return value: - * 0 - ********************************************************************* */ - -static int sbmac_initctx(struct sbmac_softc *s) -{ - - /* - * figure out the addresses of some ports - */ - - s->sbm_macenable = s->sbm_base + R_MAC_ENABLE; - s->sbm_maccfg = s->sbm_base + R_MAC_CFG; - s->sbm_fifocfg = s->sbm_base + R_MAC_THRSH_CFG; - s->sbm_framecfg = s->sbm_base + R_MAC_FRAMECFG; - s->sbm_rxfilter = s->sbm_base + R_MAC_ADFILTER_CFG; - s->sbm_isr = s->sbm_base + R_MAC_STATUS; - s->sbm_imr = s->sbm_base + R_MAC_INT_MASK; - s->sbm_mdio = s->sbm_base + R_MAC_MDIO; - - /* - * Initialize the DMA channels. Right now, only one per MAC is used - * Note: Only do this _once_, as it allocates memory from the kernel! - */ - - sbdma_initctx(&(s->sbm_txdma),s,0,DMA_TX,SBMAC_MAX_TXDESCR); - sbdma_initctx(&(s->sbm_rxdma),s,0,DMA_RX,SBMAC_MAX_RXDESCR); - - /* - * initial state is OFF - */ - - s->sbm_state = sbmac_state_off; - - return 0; -} - - -static void sbdma_uninitctx(struct sbmacdma *d) -{ - if (d->sbdma_dscrtable_unaligned) { - kfree(d->sbdma_dscrtable_unaligned); - d->sbdma_dscrtable_unaligned = d->sbdma_dscrtable = NULL; - } - - if (d->sbdma_ctxtable) { - kfree(d->sbdma_ctxtable); - d->sbdma_ctxtable = NULL; - } -} - - -static void sbmac_uninitctx(struct sbmac_softc *sc) -{ - sbdma_uninitctx(&(sc->sbm_txdma)); - sbdma_uninitctx(&(sc->sbm_rxdma)); -} - - -/********************************************************************** - * SBMAC_CHANNEL_START(s) - * - * Start packet processing on this MAC. - * - * Input parameters: - * s - sbmac structure - * - * Return value: - * nothing - ********************************************************************* */ - -static void sbmac_channel_start(struct sbmac_softc *s) -{ - uint64_t reg; - void __iomem *port; - uint64_t cfg,fifo,framecfg; - int idx, th_value; - - /* - * Don't do this if running - */ - - if (s->sbm_state == sbmac_state_on) - return; - - /* - * Bring the controller out of reset, but leave it off. - */ - - __raw_writeq(0, s->sbm_macenable); - - /* - * Ignore all received packets - */ - - __raw_writeq(0, s->sbm_rxfilter); - - /* - * Calculate values for various control registers. - */ - - cfg = M_MAC_RETRY_EN | - M_MAC_TX_HOLD_SOP_EN | - V_MAC_TX_PAUSE_CNT_16K | - M_MAC_AP_STAT_EN | - M_MAC_FAST_SYNC | - M_MAC_SS_EN | - 0; - - /* - * Be sure that RD_THRSH+WR_THRSH <= 32 for pass1 pars - * and make sure that RD_THRSH + WR_THRSH <=128 for pass2 and above - * Use a larger RD_THRSH for gigabit - */ - if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2) - th_value = 28; - else - th_value = 64; - - fifo = V_MAC_TX_WR_THRSH(4) | /* Must be '4' or '8' */ - ((s->sbm_speed == sbmac_speed_1000) - ? V_MAC_TX_RD_THRSH(th_value) : V_MAC_TX_RD_THRSH(4)) | - V_MAC_TX_RL_THRSH(4) | - V_MAC_RX_PL_THRSH(4) | - V_MAC_RX_RD_THRSH(4) | /* Must be '4' */ - V_MAC_RX_RL_THRSH(8) | - 0; - - framecfg = V_MAC_MIN_FRAMESZ_DEFAULT | - V_MAC_MAX_FRAMESZ_DEFAULT | - V_MAC_BACKOFF_SEL(1); - - /* - * Clear out the hash address map - */ - - port = s->sbm_base + R_MAC_HASH_BASE; - for (idx = 0; idx < MAC_HASH_COUNT; idx++) { - __raw_writeq(0, port); - port += sizeof(uint64_t); - } - - /* - * Clear out the exact-match table - */ - - port = s->sbm_base + R_MAC_ADDR_BASE; - for (idx = 0; idx < MAC_ADDR_COUNT; idx++) { - __raw_writeq(0, port); - port += sizeof(uint64_t); - } - - /* - * Clear out the DMA Channel mapping table registers - */ - - port = s->sbm_base + R_MAC_CHUP0_BASE; - for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) { - __raw_writeq(0, port); - port += sizeof(uint64_t); - } - - - port = s->sbm_base + R_MAC_CHLO0_BASE; - for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) { - __raw_writeq(0, port); - port += sizeof(uint64_t); - } - - /* - * Program the hardware address. It goes into the hardware-address - * register as well as the first filter register. - */ - - reg = sbmac_addr2reg(s->sbm_hwaddr); - - port = s->sbm_base + R_MAC_ADDR_BASE; - __raw_writeq(reg, port); - port = s->sbm_base + R_MAC_ETHERNET_ADDR; - -#ifdef CONFIG_SB1_PASS_1_WORKAROUNDS - /* - * Pass1 SOCs do not receive packets addressed to the - * destination address in the R_MAC_ETHERNET_ADDR register. - * Set the value to zero. - */ - __raw_writeq(0, port); -#else - __raw_writeq(reg, port); -#endif - - /* - * Set the receive filter for no packets, and write values - * to the various config registers - */ - - __raw_writeq(0, s->sbm_rxfilter); - __raw_writeq(0, s->sbm_imr); - __raw_writeq(framecfg, s->sbm_framecfg); - __raw_writeq(fifo, s->sbm_fifocfg); - __raw_writeq(cfg, s->sbm_maccfg); - - /* - * Initialize DMA channels (rings should be ok now) - */ - - sbdma_channel_start(&(s->sbm_rxdma), DMA_RX); - sbdma_channel_start(&(s->sbm_txdma), DMA_TX); - - /* - * Configure the speed, duplex, and flow control - */ - - sbmac_set_speed(s,s->sbm_speed); - sbmac_set_duplex(s,s->sbm_duplex,s->sbm_fc); - - /* - * Fill the receive ring - */ - - sbdma_fillring(s, &(s->sbm_rxdma)); - - /* - * Turn on the rest of the bits in the enable register - */ - -#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) - __raw_writeq(M_MAC_RXDMA_EN0 | - M_MAC_TXDMA_EN0, s->sbm_macenable); -#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) - __raw_writeq(M_MAC_RXDMA_EN0 | - M_MAC_TXDMA_EN0 | - M_MAC_RX_ENABLE | - M_MAC_TX_ENABLE, s->sbm_macenable); -#else -#error invalid SiByte MAC configuration -#endif - -#ifdef CONFIG_SBMAC_COALESCE - __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | - ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), s->sbm_imr); -#else - __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) | - (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), s->sbm_imr); -#endif - - /* - * Enable receiving unicasts and broadcasts - */ - - __raw_writeq(M_MAC_UCAST_EN | M_MAC_BCAST_EN, s->sbm_rxfilter); - - /* - * we're running now. - */ - - s->sbm_state = sbmac_state_on; - - /* - * Program multicast addresses - */ - - sbmac_setmulti(s); - - /* - * If channel was in promiscuous mode before, turn that on - */ - - if (s->sbm_devflags & IFF_PROMISC) { - sbmac_promiscuous_mode(s,1); - } - -} - - -/********************************************************************** - * SBMAC_CHANNEL_STOP(s) - * - * Stop packet processing on this MAC. - * - * Input parameters: - * s - sbmac structure - * - * Return value: - * nothing - ********************************************************************* */ - -static void sbmac_channel_stop(struct sbmac_softc *s) -{ - /* don't do this if already stopped */ - - if (s->sbm_state == sbmac_state_off) - return; - - /* don't accept any packets, disable all interrupts */ - - __raw_writeq(0, s->sbm_rxfilter); - __raw_writeq(0, s->sbm_imr); - - /* Turn off ticker */ - - /* XXX */ - - /* turn off receiver and transmitter */ - - __raw_writeq(0, s->sbm_macenable); - - /* We're stopped now. */ - - s->sbm_state = sbmac_state_off; - - /* - * Stop DMA channels (rings should be ok now) - */ - - sbdma_channel_stop(&(s->sbm_rxdma)); - sbdma_channel_stop(&(s->sbm_txdma)); - - /* Empty the receive and transmit rings */ - - sbdma_emptyring(&(s->sbm_rxdma)); - sbdma_emptyring(&(s->sbm_txdma)); - -} - -/********************************************************************** - * SBMAC_SET_CHANNEL_STATE(state) - * - * Set the channel's state ON or OFF - * - * Input parameters: - * state - new state - * - * Return value: - * old state - ********************************************************************* */ -static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *sc, - enum sbmac_state state) -{ - enum sbmac_state oldstate = sc->sbm_state; - - /* - * If same as previous state, return - */ - - if (state == oldstate) { - return oldstate; - } - - /* - * If new state is ON, turn channel on - */ - - if (state == sbmac_state_on) { - sbmac_channel_start(sc); - } - else { - sbmac_channel_stop(sc); - } - - /* - * Return previous state - */ - - return oldstate; -} - - -/********************************************************************** - * SBMAC_PROMISCUOUS_MODE(sc,onoff) - * - * Turn on or off promiscuous mode - * - * Input parameters: - * sc - softc - * onoff - 1 to turn on, 0 to turn off - * - * Return value: - * nothing - ********************************************************************* */ - -static void sbmac_promiscuous_mode(struct sbmac_softc *sc,int onoff) -{ - uint64_t reg; - - if (sc->sbm_state != sbmac_state_on) - return; - - if (onoff) { - reg = __raw_readq(sc->sbm_rxfilter); - reg |= M_MAC_ALLPKT_EN; - __raw_writeq(reg, sc->sbm_rxfilter); - } - else { - reg = __raw_readq(sc->sbm_rxfilter); - reg &= ~M_MAC_ALLPKT_EN; - __raw_writeq(reg, sc->sbm_rxfilter); - } -} - -/********************************************************************** - * SBMAC_SETIPHDR_OFFSET(sc,onoff) - * - * Set the iphdr offset as 15 assuming ethernet encapsulation - * - * Input parameters: - * sc - softc - * - * Return value: - * nothing - ********************************************************************* */ - -static void sbmac_set_iphdr_offset(struct sbmac_softc *sc) -{ - uint64_t reg; - - /* Hard code the off set to 15 for now */ - reg = __raw_readq(sc->sbm_rxfilter); - reg &= ~M_MAC_IPHDR_OFFSET | V_MAC_IPHDR_OFFSET(15); - __raw_writeq(reg, sc->sbm_rxfilter); - - /* BCM1250 pass1 didn't have hardware checksum. Everything - later does. */ - if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2) { - sc->rx_hw_checksum = DISABLE; - } else { - sc->rx_hw_checksum = ENABLE; - } -} - - -/********************************************************************** - * SBMAC_ADDR2REG(ptr) - * - * Convert six bytes into the 64-bit register value that - * we typically write into the SBMAC's address/mcast registers - * - * Input parameters: - * ptr - pointer to 6 bytes - * - * Return value: - * register value - ********************************************************************* */ - -static uint64_t sbmac_addr2reg(unsigned char *ptr) -{ - uint64_t reg = 0; - - ptr += 6; - - reg |= (uint64_t) *(--ptr); - reg <<= 8; - reg |= (uint64_t) *(--ptr); - reg <<= 8; - reg |= (uint64_t) *(--ptr); - reg <<= 8; - reg |= (uint64_t) *(--ptr); - reg <<= 8; - reg |= (uint64_t) *(--ptr); - reg <<= 8; - reg |= (uint64_t) *(--ptr); - - return reg; -} - - -/********************************************************************** - * SBMAC_SET_SPEED(s,speed) - * - * Configure LAN speed for the specified MAC. - * Warning: must be called when MAC is off! - * - * Input parameters: - * s - sbmac structure - * speed - speed to set MAC to (see enum sbmac_speed) - * - * Return value: - * 1 if successful - * 0 indicates invalid parameters - ********************************************************************* */ - -static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed) -{ - uint64_t cfg; - uint64_t framecfg; - - /* - * Save new current values - */ - - s->sbm_speed = speed; - - if (s->sbm_state == sbmac_state_on) - return 0; /* save for next restart */ - - /* - * Read current register values - */ - - cfg = __raw_readq(s->sbm_maccfg); - framecfg = __raw_readq(s->sbm_framecfg); - - /* - * Mask out the stuff we want to change - */ - - cfg &= ~(M_MAC_BURST_EN | M_MAC_SPEED_SEL); - framecfg &= ~(M_MAC_IFG_RX | M_MAC_IFG_TX | M_MAC_IFG_THRSH | - M_MAC_SLOT_SIZE); - - /* - * Now add in the new bits - */ - - switch (speed) { - case sbmac_speed_10: - framecfg |= V_MAC_IFG_RX_10 | - V_MAC_IFG_TX_10 | - K_MAC_IFG_THRSH_10 | - V_MAC_SLOT_SIZE_10; - cfg |= V_MAC_SPEED_SEL_10MBPS; - break; - - case sbmac_speed_100: - framecfg |= V_MAC_IFG_RX_100 | - V_MAC_IFG_TX_100 | - V_MAC_IFG_THRSH_100 | - V_MAC_SLOT_SIZE_100; - cfg |= V_MAC_SPEED_SEL_100MBPS ; - break; - - case sbmac_speed_1000: - framecfg |= V_MAC_IFG_RX_1000 | - V_MAC_IFG_TX_1000 | - V_MAC_IFG_THRSH_1000 | - V_MAC_SLOT_SIZE_1000; - cfg |= V_MAC_SPEED_SEL_1000MBPS | M_MAC_BURST_EN; - break; - - default: - return 0; - } - - /* - * Send the bits back to the hardware - */ - - __raw_writeq(framecfg, s->sbm_framecfg); - __raw_writeq(cfg, s->sbm_maccfg); - - return 1; -} - -/********************************************************************** - * SBMAC_SET_DUPLEX(s,duplex,fc) - * - * Set Ethernet duplex and flow control options for this MAC - * Warning: must be called when MAC is off! - * - * Input parameters: - * s - sbmac structure - * duplex - duplex setting (see enum sbmac_duplex) - * fc - flow control setting (see enum sbmac_fc) - * - * Return value: - * 1 if ok - * 0 if an invalid parameter combination was specified - ********************************************************************* */ - -static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex, - enum sbmac_fc fc) -{ - uint64_t cfg; - - /* - * Save new current values - */ - - s->sbm_duplex = duplex; - s->sbm_fc = fc; - - if (s->sbm_state == sbmac_state_on) - return 0; /* save for next restart */ - - /* - * Read current register values - */ - - cfg = __raw_readq(s->sbm_maccfg); - - /* - * Mask off the stuff we're about to change - */ - - cfg &= ~(M_MAC_FC_SEL | M_MAC_FC_CMD | M_MAC_HDX_EN); - - - switch (duplex) { - case sbmac_duplex_half: - switch (fc) { - case sbmac_fc_disabled: - cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_DISABLED; - break; - - case sbmac_fc_collision: - cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENABLED; - break; - - case sbmac_fc_carrier: - cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENAB_FALSECARR; - break; - - case sbmac_fc_frame: /* not valid in half duplex */ - default: /* invalid selection */ - return 0; - } - break; - - case sbmac_duplex_full: - switch (fc) { - case sbmac_fc_disabled: - cfg |= V_MAC_FC_CMD_DISABLED; - break; - - case sbmac_fc_frame: - cfg |= V_MAC_FC_CMD_ENABLED; - break; - - case sbmac_fc_collision: /* not valid in full duplex */ - case sbmac_fc_carrier: /* not valid in full duplex */ - default: - return 0; - } - break; - default: - return 0; - } - - /* - * Send the bits back to the hardware - */ - - __raw_writeq(cfg, s->sbm_maccfg); - - return 1; -} - - - - -/********************************************************************** - * SBMAC_INTR() - * - * Interrupt handler for MAC interrupts - * - * Input parameters: - * MAC structure - * - * Return value: - * nothing - ********************************************************************* */ -static irqreturn_t sbmac_intr(int irq,void *dev_instance) -{ - struct net_device *dev = (struct net_device *) dev_instance; - struct sbmac_softc *sc = netdev_priv(dev); - uint64_t isr; - int handled = 0; - - /* - * Read the ISR (this clears the bits in the real - * register, except for counter addr) - */ - - isr = __raw_readq(sc->sbm_isr) & ~M_MAC_COUNTER_ADDR; - - if (isr == 0) - return IRQ_RETVAL(0); - handled = 1; - - /* - * Transmits on channel 0 - */ - - if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) - sbdma_tx_process(sc,&(sc->sbm_txdma), 0); - - if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) { - if (napi_schedule_prep(&sc->napi)) { - __raw_writeq(0, sc->sbm_imr); - __napi_schedule(&sc->napi); - /* Depend on the exit from poll to reenable intr */ - } - else { - /* may leave some packets behind */ - sbdma_rx_process(sc,&(sc->sbm_rxdma), - SBMAC_MAX_RXDESCR * 2, 0); - } - } - return IRQ_RETVAL(handled); -} - -/********************************************************************** - * SBMAC_START_TX(skb,dev) - * - * Start output on the specified interface. Basically, we - * queue as many buffers as we can until the ring fills up, or - * we run off the end of the queue, whichever comes first. - * - * Input parameters: - * - * - * Return value: - * nothing - ********************************************************************* */ -static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev) -{ - struct sbmac_softc *sc = netdev_priv(dev); - unsigned long flags; - - /* lock eth irq */ - spin_lock_irqsave(&sc->sbm_lock, flags); - - /* - * Put the buffer on the transmit ring. If we - * don't have room, stop the queue. - */ - - if (sbdma_add_txbuffer(&(sc->sbm_txdma),skb)) { - /* XXX save skb that we could not send */ - netif_stop_queue(dev); - spin_unlock_irqrestore(&sc->sbm_lock, flags); - - return NETDEV_TX_BUSY; - } - - spin_unlock_irqrestore(&sc->sbm_lock, flags); - - return NETDEV_TX_OK; -} - -/********************************************************************** - * SBMAC_SETMULTI(sc) - * - * Reprogram the multicast table into the hardware, given - * the list of multicasts associated with the interface - * structure. - * - * Input parameters: - * sc - softc - * - * Return value: - * nothing - ********************************************************************* */ - -static void sbmac_setmulti(struct sbmac_softc *sc) -{ - uint64_t reg; - void __iomem *port; - int idx; - struct netdev_hw_addr *ha; - struct net_device *dev = sc->sbm_dev; - - /* - * Clear out entire multicast table. We do this by nuking - * the entire hash table and all the direct matches except - * the first one, which is used for our station address - */ - - for (idx = 1; idx < MAC_ADDR_COUNT; idx++) { - port = sc->sbm_base + R_MAC_ADDR_BASE+(idx*sizeof(uint64_t)); - __raw_writeq(0, port); - } - - for (idx = 0; idx < MAC_HASH_COUNT; idx++) { - port = sc->sbm_base + R_MAC_HASH_BASE+(idx*sizeof(uint64_t)); - __raw_writeq(0, port); - } - - /* - * Clear the filter to say we don't want any multicasts. - */ - - reg = __raw_readq(sc->sbm_rxfilter); - reg &= ~(M_MAC_MCAST_INV | M_MAC_MCAST_EN); - __raw_writeq(reg, sc->sbm_rxfilter); - - if (dev->flags & IFF_ALLMULTI) { - /* - * Enable ALL multicasts. Do this by inverting the - * multicast enable bit. - */ - reg = __raw_readq(sc->sbm_rxfilter); - reg |= (M_MAC_MCAST_INV | M_MAC_MCAST_EN); - __raw_writeq(reg, sc->sbm_rxfilter); - return; - } - - - /* - * Progam new multicast entries. For now, only use the - * perfect filter. In the future we'll need to use the - * hash filter if the perfect filter overflows - */ - - /* XXX only using perfect filter for now, need to use hash - * XXX if the table overflows */ - - idx = 1; /* skip station address */ - netdev_for_each_mc_addr(ha, dev) { - if (idx == MAC_ADDR_COUNT) - break; - reg = sbmac_addr2reg(ha->addr); - port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t)); - __raw_writeq(reg, port); - idx++; - } - - /* - * Enable the "accept multicast bits" if we programmed at least one - * multicast. - */ - - if (idx > 1) { - reg = __raw_readq(sc->sbm_rxfilter); - reg |= M_MAC_MCAST_EN; - __raw_writeq(reg, sc->sbm_rxfilter); - } -} - -static int sb1250_change_mtu(struct net_device *_dev, int new_mtu) -{ - if (new_mtu > ENET_PACKET_SIZE) - return -EINVAL; - _dev->mtu = new_mtu; - pr_info("changing the mtu to %d\n", new_mtu); - return 0; -} - -static const struct net_device_ops sbmac_netdev_ops = { - .ndo_open = sbmac_open, - .ndo_stop = sbmac_close, - .ndo_start_xmit = sbmac_start_tx, - .ndo_set_multicast_list = sbmac_set_rx_mode, - .ndo_tx_timeout = sbmac_tx_timeout, - .ndo_do_ioctl = sbmac_mii_ioctl, - .ndo_change_mtu = sb1250_change_mtu, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = sbmac_netpoll, -#endif -}; - -/********************************************************************** - * SBMAC_INIT(dev) - * - * Attach routine - init hardware and hook ourselves into linux - * - * Input parameters: - * dev - net_device structure - * - * Return value: - * status - ********************************************************************* */ - -static int sbmac_init(struct platform_device *pldev, long long base) -{ - struct net_device *dev = dev_get_drvdata(&pldev->dev); - int idx = pldev->id; - struct sbmac_softc *sc = netdev_priv(dev); - unsigned char *eaddr; - uint64_t ea_reg; - int i; - int err; - - sc->sbm_dev = dev; - sc->sbe_idx = idx; - - eaddr = sc->sbm_hwaddr; - - /* - * Read the ethernet address. The firmware left this programmed - * for us in the ethernet address register for each mac. - */ - - ea_reg = __raw_readq(sc->sbm_base + R_MAC_ETHERNET_ADDR); - __raw_writeq(0, sc->sbm_base + R_MAC_ETHERNET_ADDR); - for (i = 0; i < 6; i++) { - eaddr[i] = (uint8_t) (ea_reg & 0xFF); - ea_reg >>= 8; - } - - for (i = 0; i < 6; i++) { - dev->dev_addr[i] = eaddr[i]; - } - - /* - * Initialize context (get pointers to registers and stuff), then - * allocate the memory for the descriptor tables. - */ - - sbmac_initctx(sc); - - /* - * Set up Linux device callins - */ - - spin_lock_init(&(sc->sbm_lock)); - - dev->netdev_ops = &sbmac_netdev_ops; - dev->watchdog_timeo = TX_TIMEOUT; - - netif_napi_add(dev, &sc->napi, sbmac_poll, 16); - - dev->irq = UNIT_INT(idx); - - /* This is needed for PASS2 for Rx H/W checksum feature */ - sbmac_set_iphdr_offset(sc); - - sc->mii_bus = mdiobus_alloc(); - if (sc->mii_bus == NULL) { - err = -ENOMEM; - goto uninit_ctx; - } - - sc->mii_bus->name = sbmac_mdio_string; - snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%x", idx); - sc->mii_bus->priv = sc; - sc->mii_bus->read = sbmac_mii_read; - sc->mii_bus->write = sbmac_mii_write; - sc->mii_bus->irq = sc->phy_irq; - for (i = 0; i < PHY_MAX_ADDR; ++i) - sc->mii_bus->irq[i] = SBMAC_PHY_INT; - - sc->mii_bus->parent = &pldev->dev; - /* - * Probe PHY address - */ - err = mdiobus_register(sc->mii_bus); - if (err) { - printk(KERN_ERR "%s: unable to register MDIO bus\n", - dev->name); - goto free_mdio; - } - dev_set_drvdata(&pldev->dev, sc->mii_bus); - - err = register_netdev(dev); - if (err) { - printk(KERN_ERR "%s.%d: unable to register netdev\n", - sbmac_string, idx); - goto unreg_mdio; - } - - pr_info("%s.%d: registered as %s\n", sbmac_string, idx, dev->name); - - if (sc->rx_hw_checksum == ENABLE) - pr_info("%s: enabling TCP rcv checksum\n", dev->name); - - /* - * Display Ethernet address (this is called during the config - * process so we need to finish off the config message that - * was being displayed) - */ - pr_info("%s: SiByte Ethernet at 0x%08Lx, address: %pM\n", - dev->name, base, eaddr); - - return 0; -unreg_mdio: - mdiobus_unregister(sc->mii_bus); - dev_set_drvdata(&pldev->dev, NULL); -free_mdio: - mdiobus_free(sc->mii_bus); -uninit_ctx: - sbmac_uninitctx(sc); - return err; -} - - -static int sbmac_open(struct net_device *dev) -{ - struct sbmac_softc *sc = netdev_priv(dev); - int err; - - if (debug > 1) - pr_debug("%s: sbmac_open() irq %d.\n", dev->name, dev->irq); - - /* - * map/route interrupt (clear status first, in case something - * weird is pending; we haven't initialized the mac registers - * yet) - */ - - __raw_readq(sc->sbm_isr); - err = request_irq(dev->irq, sbmac_intr, IRQF_SHARED, dev->name, dev); - if (err) { - printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name, - dev->irq); - goto out_err; - } - - sc->sbm_speed = sbmac_speed_none; - sc->sbm_duplex = sbmac_duplex_none; - sc->sbm_fc = sbmac_fc_none; - sc->sbm_pause = -1; - sc->sbm_link = 0; - - /* - * Attach to the PHY - */ - err = sbmac_mii_probe(dev); - if (err) - goto out_unregister; - - /* - * Turn on the channel - */ - - sbmac_set_channel_state(sc,sbmac_state_on); - - netif_start_queue(dev); - - sbmac_set_rx_mode(dev); - - phy_start(sc->phy_dev); - - napi_enable(&sc->napi); - - return 0; - -out_unregister: - free_irq(dev->irq, dev); -out_err: - return err; -} - -static int sbmac_mii_probe(struct net_device *dev) -{ - struct sbmac_softc *sc = netdev_priv(dev); - struct phy_device *phy_dev; - int i; - - for (i = 0; i < PHY_MAX_ADDR; i++) { - phy_dev = sc->mii_bus->phy_map[i]; - if (phy_dev) - break; - } - if (!phy_dev) { - printk(KERN_ERR "%s: no PHY found\n", dev->name); - return -ENXIO; - } - - phy_dev = phy_connect(dev, dev_name(&phy_dev->dev), &sbmac_mii_poll, 0, - PHY_INTERFACE_MODE_GMII); - if (IS_ERR(phy_dev)) { - printk(KERN_ERR "%s: could not attach to PHY\n", dev->name); - return PTR_ERR(phy_dev); - } - - /* Remove any features not supported by the controller */ - phy_dev->supported &= SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full | - SUPPORTED_Autoneg | - SUPPORTED_MII | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause; - phy_dev->advertising = phy_dev->supported; - - pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - dev->name, phy_dev->drv->name, - dev_name(&phy_dev->dev), phy_dev->irq); - - sc->phy_dev = phy_dev; - - return 0; -} - - -static void sbmac_mii_poll(struct net_device *dev) -{ - struct sbmac_softc *sc = netdev_priv(dev); - struct phy_device *phy_dev = sc->phy_dev; - unsigned long flags; - enum sbmac_fc fc; - int link_chg, speed_chg, duplex_chg, pause_chg, fc_chg; - - link_chg = (sc->sbm_link != phy_dev->link); - speed_chg = (sc->sbm_speed != phy_dev->speed); - duplex_chg = (sc->sbm_duplex != phy_dev->duplex); - pause_chg = (sc->sbm_pause != phy_dev->pause); - - if (!link_chg && !speed_chg && !duplex_chg && !pause_chg) - return; /* Hmmm... */ - - if (!phy_dev->link) { - if (link_chg) { - sc->sbm_link = phy_dev->link; - sc->sbm_speed = sbmac_speed_none; - sc->sbm_duplex = sbmac_duplex_none; - sc->sbm_fc = sbmac_fc_disabled; - sc->sbm_pause = -1; - pr_info("%s: link unavailable\n", dev->name); - } - return; - } - - if (phy_dev->duplex == DUPLEX_FULL) { - if (phy_dev->pause) - fc = sbmac_fc_frame; - else - fc = sbmac_fc_disabled; - } else - fc = sbmac_fc_collision; - fc_chg = (sc->sbm_fc != fc); - - pr_info("%s: link available: %dbase-%cD\n", dev->name, phy_dev->speed, - phy_dev->duplex == DUPLEX_FULL ? 'F' : 'H'); - - spin_lock_irqsave(&sc->sbm_lock, flags); - - sc->sbm_speed = phy_dev->speed; - sc->sbm_duplex = phy_dev->duplex; - sc->sbm_fc = fc; - sc->sbm_pause = phy_dev->pause; - sc->sbm_link = phy_dev->link; - - if ((speed_chg || duplex_chg || fc_chg) && - sc->sbm_state != sbmac_state_off) { - /* - * something changed, restart the channel - */ - if (debug > 1) - pr_debug("%s: restarting channel " - "because PHY state changed\n", dev->name); - sbmac_channel_stop(sc); - sbmac_channel_start(sc); - } - - spin_unlock_irqrestore(&sc->sbm_lock, flags); -} - - -static void sbmac_tx_timeout (struct net_device *dev) -{ - struct sbmac_softc *sc = netdev_priv(dev); - unsigned long flags; - - spin_lock_irqsave(&sc->sbm_lock, flags); - - - dev->trans_start = jiffies; /* prevent tx timeout */ - dev->stats.tx_errors++; - - spin_unlock_irqrestore(&sc->sbm_lock, flags); - - printk (KERN_WARNING "%s: Transmit timed out\n",dev->name); -} - - - - -static void sbmac_set_rx_mode(struct net_device *dev) -{ - unsigned long flags; - struct sbmac_softc *sc = netdev_priv(dev); - - spin_lock_irqsave(&sc->sbm_lock, flags); - if ((dev->flags ^ sc->sbm_devflags) & IFF_PROMISC) { - /* - * Promiscuous changed. - */ - - if (dev->flags & IFF_PROMISC) { - sbmac_promiscuous_mode(sc,1); - } - else { - sbmac_promiscuous_mode(sc,0); - } - } - spin_unlock_irqrestore(&sc->sbm_lock, flags); - - /* - * Program the multicasts. Do this every time. - */ - - sbmac_setmulti(sc); - -} - -static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -{ - struct sbmac_softc *sc = netdev_priv(dev); - - if (!netif_running(dev) || !sc->phy_dev) - return -EINVAL; - - return phy_mii_ioctl(sc->phy_dev, rq, cmd); -} - -static int sbmac_close(struct net_device *dev) -{ - struct sbmac_softc *sc = netdev_priv(dev); - - napi_disable(&sc->napi); - - phy_stop(sc->phy_dev); - - sbmac_set_channel_state(sc, sbmac_state_off); - - netif_stop_queue(dev); - - if (debug > 1) - pr_debug("%s: Shutting down ethercard\n", dev->name); - - phy_disconnect(sc->phy_dev); - sc->phy_dev = NULL; - free_irq(dev->irq, dev); - - sbdma_emptyring(&(sc->sbm_txdma)); - sbdma_emptyring(&(sc->sbm_rxdma)); - - return 0; -} - -static int sbmac_poll(struct napi_struct *napi, int budget) -{ - struct sbmac_softc *sc = container_of(napi, struct sbmac_softc, napi); - int work_done; - - work_done = sbdma_rx_process(sc, &(sc->sbm_rxdma), budget, 1); - sbdma_tx_process(sc, &(sc->sbm_txdma), 1); - - if (work_done < budget) { - napi_complete(napi); - -#ifdef CONFIG_SBMAC_COALESCE - __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | - ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), - sc->sbm_imr); -#else - __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) | - (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr); -#endif - } - - return work_done; -} - - -static int __devinit sbmac_probe(struct platform_device *pldev) -{ - struct net_device *dev; - struct sbmac_softc *sc; - void __iomem *sbm_base; - struct resource *res; - u64 sbmac_orig_hwaddr; - int err; - - res = platform_get_resource(pldev, IORESOURCE_MEM, 0); - BUG_ON(!res); - sbm_base = ioremap_nocache(res->start, resource_size(res)); - if (!sbm_base) { - printk(KERN_ERR "%s: unable to map device registers\n", - dev_name(&pldev->dev)); - err = -ENOMEM; - goto out_out; - } - - /* - * The R_MAC_ETHERNET_ADDR register will be set to some nonzero - * value for us by the firmware if we're going to use this MAC. - * If we find a zero, skip this MAC. - */ - sbmac_orig_hwaddr = __raw_readq(sbm_base + R_MAC_ETHERNET_ADDR); - pr_debug("%s: %sconfiguring MAC at 0x%08Lx\n", dev_name(&pldev->dev), - sbmac_orig_hwaddr ? "" : "not ", (long long)res->start); - if (sbmac_orig_hwaddr == 0) { - err = 0; - goto out_unmap; - } - - /* - * Okay, cool. Initialize this MAC. - */ - dev = alloc_etherdev(sizeof(struct sbmac_softc)); - if (!dev) { - printk(KERN_ERR "%s: unable to allocate etherdev\n", - dev_name(&pldev->dev)); - err = -ENOMEM; - goto out_unmap; - } - - dev_set_drvdata(&pldev->dev, dev); - SET_NETDEV_DEV(dev, &pldev->dev); - - sc = netdev_priv(dev); - sc->sbm_base = sbm_base; - - err = sbmac_init(pldev, res->start); - if (err) - goto out_kfree; - - return 0; - -out_kfree: - free_netdev(dev); - __raw_writeq(sbmac_orig_hwaddr, sbm_base + R_MAC_ETHERNET_ADDR); - -out_unmap: - iounmap(sbm_base); - -out_out: - return err; -} - -static int __exit sbmac_remove(struct platform_device *pldev) -{ - struct net_device *dev = dev_get_drvdata(&pldev->dev); - struct sbmac_softc *sc = netdev_priv(dev); - - unregister_netdev(dev); - sbmac_uninitctx(sc); - mdiobus_unregister(sc->mii_bus); - mdiobus_free(sc->mii_bus); - iounmap(sc->sbm_base); - free_netdev(dev); - - return 0; -} - -static struct platform_driver sbmac_driver = { - .probe = sbmac_probe, - .remove = __exit_p(sbmac_remove), - .driver = { - .name = sbmac_string, - .owner = THIS_MODULE, - }, -}; - -static int __init sbmac_init_module(void) -{ - return platform_driver_register(&sbmac_driver); -} - -static void __exit sbmac_cleanup_module(void) -{ - platform_driver_unregister(&sbmac_driver); -} - -module_init(sbmac_init_module); -module_exit(sbmac_cleanup_module); diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c deleted file mode 100644 index dc3fbf6..0000000 --- a/drivers/net/tg3.c +++ /dev/null @@ -1,15862 +0,0 @@ -/* - * tg3.c: Broadcom Tigon3 ethernet driver. - * - * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) - * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) - * Copyright (C) 2004 Sun Microsystems Inc. - * Copyright (C) 2005-2011 Broadcom Corporation. - * - * Firmware is: - * Derived from proprietary unpublished source code, - * Copyright (C) 2000-2003 Broadcom Corporation. - * - * Permission is hereby granted for the distribution of this firmware - * data in hexadecimal or equivalent format, provided this copyright - * notice is accompanying it. - */ - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -#include -#include -#include - -#ifdef CONFIG_SPARC -#include -#include -#endif - -#define BAR_0 0 -#define BAR_2 2 - -#include "tg3.h" - -/* Functions & macros to verify TG3_FLAGS types */ - -static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits) -{ - return test_bit(flag, bits); -} - -static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits) -{ - set_bit(flag, bits); -} - -static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) -{ - clear_bit(flag, bits); -} - -#define tg3_flag(tp, flag) \ - _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags) -#define tg3_flag_set(tp, flag) \ - _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags) -#define tg3_flag_clear(tp, flag) \ - _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags) - -#define DRV_MODULE_NAME "tg3" -#define TG3_MAJ_NUM 3 -#define TG3_MIN_NUM 119 -#define DRV_MODULE_VERSION \ - __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) -#define DRV_MODULE_RELDATE "May 18, 2011" - -#define TG3_DEF_MAC_MODE 0 -#define TG3_DEF_RX_MODE 0 -#define TG3_DEF_TX_MODE 0 -#define TG3_DEF_MSG_ENABLE \ - (NETIF_MSG_DRV | \ - NETIF_MSG_PROBE | \ - NETIF_MSG_LINK | \ - NETIF_MSG_TIMER | \ - NETIF_MSG_IFDOWN | \ - NETIF_MSG_IFUP | \ - NETIF_MSG_RX_ERR | \ - NETIF_MSG_TX_ERR) - -#define TG3_GRC_LCLCTL_PWRSW_DELAY 100 - -/* length of time before we decide the hardware is borked, - * and dev->tx_timeout() should be called to fix the problem - */ - -#define TG3_TX_TIMEOUT (5 * HZ) - -/* hardware minimum and maximum for a single frame's data payload */ -#define TG3_MIN_MTU 60 -#define TG3_MAX_MTU(tp) \ - (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500) - -/* These numbers seem to be hard coded in the NIC firmware somehow. - * You can't change the ring sizes, but you can change where you place - * them in the NIC onboard memory. - */ -#define TG3_RX_STD_RING_SIZE(tp) \ - (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ - TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700) -#define TG3_DEF_RX_RING_PENDING 200 -#define TG3_RX_JMB_RING_SIZE(tp) \ - (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ - TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700) -#define TG3_DEF_RX_JUMBO_RING_PENDING 100 -#define TG3_RSS_INDIR_TBL_SIZE 128 - -/* Do not place this n-ring entries value into the tp struct itself, - * we really want to expose these constants to GCC so that modulo et - * al. operations are done with shifts and masks instead of with - * hw multiply/modulo instructions. Another solution would be to - * replace things like '% foo' with '& (foo - 1)'. - */ - -#define TG3_TX_RING_SIZE 512 -#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1) - -#define TG3_RX_STD_RING_BYTES(tp) \ - (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp)) -#define TG3_RX_JMB_RING_BYTES(tp) \ - (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp)) -#define TG3_RX_RCB_RING_BYTES(tp) \ - (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1)) -#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ - TG3_TX_RING_SIZE) -#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) - -#define TG3_DMA_BYTE_ENAB 64 - -#define TG3_RX_STD_DMA_SZ 1536 -#define TG3_RX_JMB_DMA_SZ 9046 - -#define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB) - -#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) -#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) - -#define TG3_RX_STD_BUFF_RING_SIZE(tp) \ - (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp)) - -#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \ - (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp)) - -/* Due to a hardware bug, the 5701 can only DMA to memory addresses - * that are at least dword aligned when used in PCIX mode. The driver - * works around this bug by double copying the packet. This workaround - * is built into the normal double copy length check for efficiency. - * - * However, the double copy is only necessary on those architectures - * where unaligned memory accesses are inefficient. For those architectures - * where unaligned memory accesses incur little penalty, we can reintegrate - * the 5701 in the normal rx path. Doing so saves a device structure - * dereference by hardcoding the double copy threshold in place. - */ -#define TG3_RX_COPY_THRESHOLD 256 -#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) - #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD -#else - #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh) -#endif - -/* minimum number of free TX descriptors required to wake up TX process */ -#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) -#define TG3_TX_BD_DMA_MAX 4096 - -#define TG3_RAW_IP_ALIGN 2 - -#define TG3_FW_UPDATE_TIMEOUT_SEC 5 - -#define FIRMWARE_TG3 "tigon/tg3.bin" -#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin" -#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin" - -static char version[] __devinitdata = - DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")"; - -MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)"); -MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_MODULE_VERSION); -MODULE_FIRMWARE(FIRMWARE_TG3); -MODULE_FIRMWARE(FIRMWARE_TG3TSO); -MODULE_FIRMWARE(FIRMWARE_TG3TSO5); - -static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */ -module_param(tg3_debug, int, 0); -MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); - -static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = { - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)}, - {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, - {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, - {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, - {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)}, - {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)}, - {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)}, - {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)}, - {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */ - {} -}; - -MODULE_DEVICE_TABLE(pci, tg3_pci_tbl); - -static const struct { - const char string[ETH_GSTRING_LEN]; -} ethtool_stats_keys[] = { - { "rx_octets" }, - { "rx_fragments" }, - { "rx_ucast_packets" }, - { "rx_mcast_packets" }, - { "rx_bcast_packets" }, - { "rx_fcs_errors" }, - { "rx_align_errors" }, - { "rx_xon_pause_rcvd" }, - { "rx_xoff_pause_rcvd" }, - { "rx_mac_ctrl_rcvd" }, - { "rx_xoff_entered" }, - { "rx_frame_too_long_errors" }, - { "rx_jabbers" }, - { "rx_undersize_packets" }, - { "rx_in_length_errors" }, - { "rx_out_length_errors" }, - { "rx_64_or_less_octet_packets" }, - { "rx_65_to_127_octet_packets" }, - { "rx_128_to_255_octet_packets" }, - { "rx_256_to_511_octet_packets" }, - { "rx_512_to_1023_octet_packets" }, - { "rx_1024_to_1522_octet_packets" }, - { "rx_1523_to_2047_octet_packets" }, - { "rx_2048_to_4095_octet_packets" }, - { "rx_4096_to_8191_octet_packets" }, - { "rx_8192_to_9022_octet_packets" }, - - { "tx_octets" }, - { "tx_collisions" }, - - { "tx_xon_sent" }, - { "tx_xoff_sent" }, - { "tx_flow_control" }, - { "tx_mac_errors" }, - { "tx_single_collisions" }, - { "tx_mult_collisions" }, - { "tx_deferred" }, - { "tx_excessive_collisions" }, - { "tx_late_collisions" }, - { "tx_collide_2times" }, - { "tx_collide_3times" }, - { "tx_collide_4times" }, - { "tx_collide_5times" }, - { "tx_collide_6times" }, - { "tx_collide_7times" }, - { "tx_collide_8times" }, - { "tx_collide_9times" }, - { "tx_collide_10times" }, - { "tx_collide_11times" }, - { "tx_collide_12times" }, - { "tx_collide_13times" }, - { "tx_collide_14times" }, - { "tx_collide_15times" }, - { "tx_ucast_packets" }, - { "tx_mcast_packets" }, - { "tx_bcast_packets" }, - { "tx_carrier_sense_errors" }, - { "tx_discards" }, - { "tx_errors" }, - - { "dma_writeq_full" }, - { "dma_write_prioq_full" }, - { "rxbds_empty" }, - { "rx_discards" }, - { "rx_errors" }, - { "rx_threshold_hit" }, - - { "dma_readq_full" }, - { "dma_read_prioq_full" }, - { "tx_comp_queue_full" }, - - { "ring_set_send_prod_index" }, - { "ring_status_update" }, - { "nic_irqs" }, - { "nic_avoided_irqs" }, - { "nic_tx_threshold_hit" }, - - { "mbuf_lwm_thresh_hit" }, -}; - -#define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys) - - -static const struct { - const char string[ETH_GSTRING_LEN]; -} ethtool_test_keys[] = { - { "nvram test (online) " }, - { "link test (online) " }, - { "register test (offline)" }, - { "memory test (offline)" }, - { "loopback test (offline)" }, - { "interrupt test (offline)" }, -}; - -#define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys) - - -static void tg3_write32(struct tg3 *tp, u32 off, u32 val) -{ - writel(val, tp->regs + off); -} - -static u32 tg3_read32(struct tg3 *tp, u32 off) -{ - return readl(tp->regs + off); -} - -static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val) -{ - writel(val, tp->aperegs + off); -} - -static u32 tg3_ape_read32(struct tg3 *tp, u32 off) -{ - return readl(tp->aperegs + off); -} - -static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) -{ - unsigned long flags; - - spin_lock_irqsave(&tp->indirect_lock, flags); - pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); - pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); - spin_unlock_irqrestore(&tp->indirect_lock, flags); -} - -static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val) -{ - writel(val, tp->regs + off); - readl(tp->regs + off); -} - -static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off) -{ - unsigned long flags; - u32 val; - - spin_lock_irqsave(&tp->indirect_lock, flags); - pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); - pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); - spin_unlock_irqrestore(&tp->indirect_lock, flags); - return val; -} - -static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val) -{ - unsigned long flags; - - if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) { - pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX + - TG3_64BIT_REG_LOW, val); - return; - } - if (off == TG3_RX_STD_PROD_IDX_REG) { - pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + - TG3_64BIT_REG_LOW, val); - return; - } - - spin_lock_irqsave(&tp->indirect_lock, flags); - pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); - pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); - spin_unlock_irqrestore(&tp->indirect_lock, flags); - - /* In indirect mode when disabling interrupts, we also need - * to clear the interrupt bit in the GRC local ctrl register. - */ - if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) && - (val == 0x1)) { - pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL, - tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT); - } -} - -static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off) -{ - unsigned long flags; - u32 val; - - spin_lock_irqsave(&tp->indirect_lock, flags); - pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); - pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); - spin_unlock_irqrestore(&tp->indirect_lock, flags); - return val; -} - -/* usec_wait specifies the wait time in usec when writing to certain registers - * where it is unsafe to read back the register without some delay. - * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power. - * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed. - */ -static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) -{ - if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND)) - /* Non-posted methods */ - tp->write32(tp, off, val); - else { - /* Posted method */ - tg3_write32(tp, off, val); - if (usec_wait) - udelay(usec_wait); - tp->read32(tp, off); - } - /* Wait again after the read for the posted method to guarantee that - * the wait time is met. - */ - if (usec_wait) - udelay(usec_wait); -} - -static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) -{ - tp->write32_mbox(tp, off, val); - if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND)) - tp->read32_mbox(tp, off); -} - -static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val) -{ - void __iomem *mbox = tp->regs + off; - writel(val, mbox); - if (tg3_flag(tp, TXD_MBOX_HWBUG)) - writel(val, mbox); - if (tg3_flag(tp, MBOX_WRITE_REORDER)) - readl(mbox); -} - -static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off) -{ - return readl(tp->regs + off + GRCMBOX_BASE); -} - -static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val) -{ - writel(val, tp->regs + off + GRCMBOX_BASE); -} - -#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val) -#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val)) -#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val) -#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val) -#define tr32_mailbox(reg) tp->read32_mbox(tp, reg) - -#define tw32(reg, val) tp->write32(tp, reg, val) -#define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0) -#define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us)) -#define tr32(reg) tp->read32(tp, reg) - -static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) -{ - unsigned long flags; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 && - (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) - return; - - spin_lock_irqsave(&tp->indirect_lock, flags); - if (tg3_flag(tp, SRAM_USE_CONFIG)) { - pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); - pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); - - /* Always leave this as zero. */ - pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); - } else { - tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); - tw32_f(TG3PCI_MEM_WIN_DATA, val); - - /* Always leave this as zero. */ - tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); - } - spin_unlock_irqrestore(&tp->indirect_lock, flags); -} - -static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) -{ - unsigned long flags; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 && - (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) { - *val = 0; - return; - } - - spin_lock_irqsave(&tp->indirect_lock, flags); - if (tg3_flag(tp, SRAM_USE_CONFIG)) { - pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); - pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); - - /* Always leave this as zero. */ - pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); - } else { - tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); - *val = tr32(TG3PCI_MEM_WIN_DATA); - - /* Always leave this as zero. */ - tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); - } - spin_unlock_irqrestore(&tp->indirect_lock, flags); -} - -static void tg3_ape_lock_init(struct tg3 *tp) -{ - int i; - u32 regbase, bit; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) - regbase = TG3_APE_LOCK_GRANT; - else - regbase = TG3_APE_PER_LOCK_GRANT; - - /* Make sure the driver hasn't any stale locks. */ - for (i = 0; i < 8; i++) { - if (i == TG3_APE_LOCK_GPIO) - continue; - tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER); - } - - /* Clear the correct bit of the GPIO lock too. */ - if (!tp->pci_fn) - bit = APE_LOCK_GRANT_DRIVER; - else - bit = 1 << tp->pci_fn; - - tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit); -} - -static int tg3_ape_lock(struct tg3 *tp, int locknum) -{ - int i, off; - int ret = 0; - u32 status, req, gnt, bit; - - if (!tg3_flag(tp, ENABLE_APE)) - return 0; - - switch (locknum) { - case TG3_APE_LOCK_GPIO: - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) - return 0; - case TG3_APE_LOCK_GRC: - case TG3_APE_LOCK_MEM: - break; - default: - return -EINVAL; - } - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { - req = TG3_APE_LOCK_REQ; - gnt = TG3_APE_LOCK_GRANT; - } else { - req = TG3_APE_PER_LOCK_REQ; - gnt = TG3_APE_PER_LOCK_GRANT; - } - - off = 4 * locknum; - - if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn) - bit = APE_LOCK_REQ_DRIVER; - else - bit = 1 << tp->pci_fn; - - tg3_ape_write32(tp, req + off, bit); - - /* Wait for up to 1 millisecond to acquire lock. */ - for (i = 0; i < 100; i++) { - status = tg3_ape_read32(tp, gnt + off); - if (status == bit) - break; - udelay(10); - } - - if (status != bit) { - /* Revoke the lock request. */ - tg3_ape_write32(tp, gnt + off, bit); - ret = -EBUSY; - } - - return ret; -} - -static void tg3_ape_unlock(struct tg3 *tp, int locknum) -{ - u32 gnt, bit; - - if (!tg3_flag(tp, ENABLE_APE)) - return; - - switch (locknum) { - case TG3_APE_LOCK_GPIO: - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) - return; - case TG3_APE_LOCK_GRC: - case TG3_APE_LOCK_MEM: - break; - default: - return; - } - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) - gnt = TG3_APE_LOCK_GRANT; - else - gnt = TG3_APE_PER_LOCK_GRANT; - - if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn) - bit = APE_LOCK_GRANT_DRIVER; - else - bit = 1 << tp->pci_fn; - - tg3_ape_write32(tp, gnt + 4 * locknum, bit); -} - -static void tg3_disable_ints(struct tg3 *tp) -{ - int i; - - tw32(TG3PCI_MISC_HOST_CTRL, - (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); - for (i = 0; i < tp->irq_max; i++) - tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001); -} - -static void tg3_enable_ints(struct tg3 *tp) -{ - int i; - - tp->irq_sync = 0; - wmb(); - - tw32(TG3PCI_MISC_HOST_CTRL, - (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); - - tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE; - for (i = 0; i < tp->irq_cnt; i++) { - struct tg3_napi *tnapi = &tp->napi[i]; - - tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); - if (tg3_flag(tp, 1SHOT_MSI)) - tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); - - tp->coal_now |= tnapi->coal_now; - } - - /* Force an initial interrupt */ - if (!tg3_flag(tp, TAGGED_STATUS) && - (tp->napi[0].hw_status->status & SD_STATUS_UPDATED)) - tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); - else - tw32(HOSTCC_MODE, tp->coal_now); - - tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now); -} - -static inline unsigned int tg3_has_work(struct tg3_napi *tnapi) -{ - struct tg3 *tp = tnapi->tp; - struct tg3_hw_status *sblk = tnapi->hw_status; - unsigned int work_exists = 0; - - /* check for phy events */ - if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { - if (sblk->status & SD_STATUS_LINK_CHG) - work_exists = 1; - } - /* check for RX/TX work to do */ - if (sblk->idx[0].tx_consumer != tnapi->tx_cons || - *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) - work_exists = 1; - - return work_exists; -} - -/* tg3_int_reenable - * similar to tg3_enable_ints, but it accurately determines whether there - * is new work pending and can return without flushing the PIO write - * which reenables interrupts - */ -static void tg3_int_reenable(struct tg3_napi *tnapi) -{ - struct tg3 *tp = tnapi->tp; - - tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); - mmiowb(); - - /* When doing tagged status, this work check is unnecessary. - * The last_tag we write above tells the chip which piece of - * work we've completed. - */ - if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi)) - tw32(HOSTCC_MODE, tp->coalesce_mode | - HOSTCC_MODE_ENABLE | tnapi->coal_now); -} - -static void tg3_switch_clocks(struct tg3 *tp) -{ - u32 clock_ctrl; - u32 orig_clock_ctrl; - - if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS)) - return; - - clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); - - orig_clock_ctrl = clock_ctrl; - clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN | - CLOCK_CTRL_CLKRUN_OENABLE | - 0x1f); - tp->pci_clock_ctrl = clock_ctrl; - - if (tg3_flag(tp, 5705_PLUS)) { - if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) { - tw32_wait_f(TG3PCI_CLOCK_CTRL, - clock_ctrl | CLOCK_CTRL_625_CORE, 40); - } - } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) { - tw32_wait_f(TG3PCI_CLOCK_CTRL, - clock_ctrl | - (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK), - 40); - tw32_wait_f(TG3PCI_CLOCK_CTRL, - clock_ctrl | (CLOCK_CTRL_ALTCLK), - 40); - } - tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40); -} - -#define PHY_BUSY_LOOPS 5000 - -static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) -{ - u32 frame_val; - unsigned int loops; - int ret; - - if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { - tw32_f(MAC_MI_MODE, - (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); - udelay(80); - } - - *val = 0x0; - - frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) & - MI_COM_PHY_ADDR_MASK); - frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & - MI_COM_REG_ADDR_MASK); - frame_val |= (MI_COM_CMD_READ | MI_COM_START); - - tw32_f(MAC_MI_COM, frame_val); - - loops = PHY_BUSY_LOOPS; - while (loops != 0) { - udelay(10); - frame_val = tr32(MAC_MI_COM); - - if ((frame_val & MI_COM_BUSY) == 0) { - udelay(5); - frame_val = tr32(MAC_MI_COM); - break; - } - loops -= 1; - } - - ret = -EBUSY; - if (loops != 0) { - *val = frame_val & MI_COM_DATA_MASK; - ret = 0; - } - - if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { - tw32_f(MAC_MI_MODE, tp->mi_mode); - udelay(80); - } - - return ret; -} - -static int tg3_writephy(struct tg3 *tp, int reg, u32 val) -{ - u32 frame_val; - unsigned int loops; - int ret; - - if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && - (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL)) - return 0; - - if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { - tw32_f(MAC_MI_MODE, - (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); - udelay(80); - } - - frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) & - MI_COM_PHY_ADDR_MASK); - frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & - MI_COM_REG_ADDR_MASK); - frame_val |= (val & MI_COM_DATA_MASK); - frame_val |= (MI_COM_CMD_WRITE | MI_COM_START); - - tw32_f(MAC_MI_COM, frame_val); - - loops = PHY_BUSY_LOOPS; - while (loops != 0) { - udelay(10); - frame_val = tr32(MAC_MI_COM); - if ((frame_val & MI_COM_BUSY) == 0) { - udelay(5); - frame_val = tr32(MAC_MI_COM); - break; - } - loops -= 1; - } - - ret = -EBUSY; - if (loops != 0) - ret = 0; - - if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { - tw32_f(MAC_MI_MODE, tp->mi_mode); - udelay(80); - } - - return ret; -} - -static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val) -{ - int err; - - err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); - if (err) - goto done; - - err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); - if (err) - goto done; - - err = tg3_writephy(tp, MII_TG3_MMD_CTRL, - MII_TG3_MMD_CTRL_DATA_NOINC | devad); - if (err) - goto done; - - err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val); - -done: - return err; -} - -static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val) -{ - int err; - - err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); - if (err) - goto done; - - err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); - if (err) - goto done; - - err = tg3_writephy(tp, MII_TG3_MMD_CTRL, - MII_TG3_MMD_CTRL_DATA_NOINC | devad); - if (err) - goto done; - - err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val); - -done: - return err; -} - -static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val) -{ - int err; - - err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); - if (!err) - err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val); - - return err; -} - -static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) -{ - int err; - - err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); - if (!err) - err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); - - return err; -} - -static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val) -{ - int err; - - err = tg3_writephy(tp, MII_TG3_AUX_CTRL, - (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) | - MII_TG3_AUXCTL_SHDWSEL_MISC); - if (!err) - err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val); - - return err; -} - -static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set) -{ - if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC) - set |= MII_TG3_AUXCTL_MISC_WREN; - - return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg); -} - -#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \ - tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \ - MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \ - MII_TG3_AUXCTL_ACTL_TX_6DB) - -#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \ - tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \ - MII_TG3_AUXCTL_ACTL_TX_6DB); - -static int tg3_bmcr_reset(struct tg3 *tp) -{ - u32 phy_control; - int limit, err; - - /* OK, reset it, and poll the BMCR_RESET bit until it - * clears or we time out. - */ - phy_control = BMCR_RESET; - err = tg3_writephy(tp, MII_BMCR, phy_control); - if (err != 0) - return -EBUSY; - - limit = 5000; - while (limit--) { - err = tg3_readphy(tp, MII_BMCR, &phy_control); - if (err != 0) - return -EBUSY; - - if ((phy_control & BMCR_RESET) == 0) { - udelay(40); - break; - } - udelay(10); - } - if (limit < 0) - return -EBUSY; - - return 0; -} - -static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) -{ - struct tg3 *tp = bp->priv; - u32 val; - - spin_lock_bh(&tp->lock); - - if (tg3_readphy(tp, reg, &val)) - val = -EIO; - - spin_unlock_bh(&tp->lock); - - return val; -} - -static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) -{ - struct tg3 *tp = bp->priv; - u32 ret = 0; - - spin_lock_bh(&tp->lock); - - if (tg3_writephy(tp, reg, val)) - ret = -EIO; - - spin_unlock_bh(&tp->lock); - - return ret; -} - -static int tg3_mdio_reset(struct mii_bus *bp) -{ - return 0; -} - -static void tg3_mdio_config_5785(struct tg3 *tp) -{ - u32 val; - struct phy_device *phydev; - - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; - switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { - case PHY_ID_BCM50610: - case PHY_ID_BCM50610M: - val = MAC_PHYCFG2_50610_LED_MODES; - break; - case PHY_ID_BCMAC131: - val = MAC_PHYCFG2_AC131_LED_MODES; - break; - case PHY_ID_RTL8211C: - val = MAC_PHYCFG2_RTL8211C_LED_MODES; - break; - case PHY_ID_RTL8201E: - val = MAC_PHYCFG2_RTL8201E_LED_MODES; - break; - default: - return; - } - - if (phydev->interface != PHY_INTERFACE_MODE_RGMII) { - tw32(MAC_PHYCFG2, val); - - val = tr32(MAC_PHYCFG1); - val &= ~(MAC_PHYCFG1_RGMII_INT | - MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK); - val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT; - tw32(MAC_PHYCFG1, val); - - return; - } - - if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) - val |= MAC_PHYCFG2_EMODE_MASK_MASK | - MAC_PHYCFG2_FMODE_MASK_MASK | - MAC_PHYCFG2_GMODE_MASK_MASK | - MAC_PHYCFG2_ACT_MASK_MASK | - MAC_PHYCFG2_QUAL_MASK_MASK | - MAC_PHYCFG2_INBAND_ENABLE; - - tw32(MAC_PHYCFG2, val); - - val = tr32(MAC_PHYCFG1); - val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | - MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); - if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { - if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) - val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; - if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) - val |= MAC_PHYCFG1_RGMII_SND_STAT_EN; - } - val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT | - MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV; - tw32(MAC_PHYCFG1, val); - - val = tr32(MAC_EXT_RGMII_MODE); - val &= ~(MAC_RGMII_MODE_RX_INT_B | - MAC_RGMII_MODE_RX_QUALITY | - MAC_RGMII_MODE_RX_ACTIVITY | - MAC_RGMII_MODE_RX_ENG_DET | - MAC_RGMII_MODE_TX_ENABLE | - MAC_RGMII_MODE_TX_LOWPWR | - MAC_RGMII_MODE_TX_RESET); - if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { - if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) - val |= MAC_RGMII_MODE_RX_INT_B | - MAC_RGMII_MODE_RX_QUALITY | - MAC_RGMII_MODE_RX_ACTIVITY | - MAC_RGMII_MODE_RX_ENG_DET; - if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) - val |= MAC_RGMII_MODE_TX_ENABLE | - MAC_RGMII_MODE_TX_LOWPWR | - MAC_RGMII_MODE_TX_RESET; - } - tw32(MAC_EXT_RGMII_MODE, val); -} - -static void tg3_mdio_start(struct tg3 *tp) -{ - tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; - tw32_f(MAC_MI_MODE, tp->mi_mode); - udelay(80); - - if (tg3_flag(tp, MDIOBUS_INITED) && - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) - tg3_mdio_config_5785(tp); -} - -static int tg3_mdio_init(struct tg3 *tp) -{ - int i; - u32 reg; - struct phy_device *phydev; - - if (tg3_flag(tp, 5717_PLUS)) { - u32 is_serdes; - - tp->phy_addr = tp->pci_fn + 1; - - if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) - is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES; - else - is_serdes = tr32(TG3_CPMU_PHY_STRAP) & - TG3_CPMU_PHY_STRAP_IS_SERDES; - if (is_serdes) - tp->phy_addr += 7; - } else - tp->phy_addr = TG3_PHY_MII_ADDR; - - tg3_mdio_start(tp); - - if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED)) - return 0; - - tp->mdio_bus = mdiobus_alloc(); - if (tp->mdio_bus == NULL) - return -ENOMEM; - - tp->mdio_bus->name = "tg3 mdio bus"; - snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", - (tp->pdev->bus->number << 8) | tp->pdev->devfn); - tp->mdio_bus->priv = tp; - tp->mdio_bus->parent = &tp->pdev->dev; - tp->mdio_bus->read = &tg3_mdio_read; - tp->mdio_bus->write = &tg3_mdio_write; - tp->mdio_bus->reset = &tg3_mdio_reset; - tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR); - tp->mdio_bus->irq = &tp->mdio_irq[0]; - - for (i = 0; i < PHY_MAX_ADDR; i++) - tp->mdio_bus->irq[i] = PHY_POLL; - - /* The bus registration will look for all the PHYs on the mdio bus. - * Unfortunately, it does not ensure the PHY is powered up before - * accessing the PHY ID registers. A chip reset is the - * quickest way to bring the device back to an operational state.. - */ - if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN)) - tg3_bmcr_reset(tp); - - i = mdiobus_register(tp->mdio_bus); - if (i) { - dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i); - mdiobus_free(tp->mdio_bus); - return i; - } - - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; - - if (!phydev || !phydev->drv) { - dev_warn(&tp->pdev->dev, "No PHY devices\n"); - mdiobus_unregister(tp->mdio_bus); - mdiobus_free(tp->mdio_bus); - return -ENODEV; - } - - switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { - case PHY_ID_BCM57780: - phydev->interface = PHY_INTERFACE_MODE_GMII; - phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; - break; - case PHY_ID_BCM50610: - case PHY_ID_BCM50610M: - phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE | - PHY_BRCM_RX_REFCLK_UNUSED | - PHY_BRCM_DIS_TXCRXC_NOENRGY | - PHY_BRCM_AUTO_PWRDWN_ENABLE; - if (tg3_flag(tp, RGMII_INBAND_DISABLE)) - phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE; - if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) - phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE; - if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) - phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE; - /* fallthru */ - case PHY_ID_RTL8211C: - phydev->interface = PHY_INTERFACE_MODE_RGMII; - break; - case PHY_ID_RTL8201E: - case PHY_ID_BCMAC131: - phydev->interface = PHY_INTERFACE_MODE_MII; - phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; - tp->phy_flags |= TG3_PHYFLG_IS_FET; - break; - } - - tg3_flag_set(tp, MDIOBUS_INITED); - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) - tg3_mdio_config_5785(tp); - - return 0; -} - -static void tg3_mdio_fini(struct tg3 *tp) -{ - if (tg3_flag(tp, MDIOBUS_INITED)) { - tg3_flag_clear(tp, MDIOBUS_INITED); - mdiobus_unregister(tp->mdio_bus); - mdiobus_free(tp->mdio_bus); - } -} - -/* tp->lock is held. */ -static inline void tg3_generate_fw_event(struct tg3 *tp) -{ - u32 val; - - val = tr32(GRC_RX_CPU_EVENT); - val |= GRC_RX_CPU_DRIVER_EVENT; - tw32_f(GRC_RX_CPU_EVENT, val); - - tp->last_event_jiffies = jiffies; -} - -#define TG3_FW_EVENT_TIMEOUT_USEC 2500 - -/* tp->lock is held. */ -static void tg3_wait_for_event_ack(struct tg3 *tp) -{ - int i; - unsigned int delay_cnt; - long time_remain; - - /* If enough time has passed, no wait is necessary. */ - time_remain = (long)(tp->last_event_jiffies + 1 + - usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) - - (long)jiffies; - if (time_remain < 0) - return; - - /* Check if we can shorten the wait time. */ - delay_cnt = jiffies_to_usecs(time_remain); - if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC) - delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC; - delay_cnt = (delay_cnt >> 3) + 1; - - for (i = 0; i < delay_cnt; i++) { - if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) - break; - udelay(8); - } -} - -/* tp->lock is held. */ -static void tg3_ump_link_report(struct tg3 *tp) -{ - u32 reg; - u32 val; - - if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF)) - return; - - tg3_wait_for_event_ack(tp); - - tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE); - - tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14); - - val = 0; - if (!tg3_readphy(tp, MII_BMCR, ®)) - val = reg << 16; - if (!tg3_readphy(tp, MII_BMSR, ®)) - val |= (reg & 0xffff); - tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val); - - val = 0; - if (!tg3_readphy(tp, MII_ADVERTISE, ®)) - val = reg << 16; - if (!tg3_readphy(tp, MII_LPA, ®)) - val |= (reg & 0xffff); - tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val); - - val = 0; - if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) { - if (!tg3_readphy(tp, MII_CTRL1000, ®)) - val = reg << 16; - if (!tg3_readphy(tp, MII_STAT1000, ®)) - val |= (reg & 0xffff); - } - tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val); - - if (!tg3_readphy(tp, MII_PHYADDR, ®)) - val = reg << 16; - else - val = 0; - tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val); - - tg3_generate_fw_event(tp); -} - -static void tg3_link_report(struct tg3 *tp) -{ - if (!netif_carrier_ok(tp->dev)) { - netif_info(tp, link, tp->dev, "Link is down\n"); - tg3_ump_link_report(tp); - } else if (netif_msg_link(tp)) { - netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n", - (tp->link_config.active_speed == SPEED_1000 ? - 1000 : - (tp->link_config.active_speed == SPEED_100 ? - 100 : 10)), - (tp->link_config.active_duplex == DUPLEX_FULL ? - "full" : "half")); - - netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n", - (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ? - "on" : "off", - (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? - "on" : "off"); - - if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) - netdev_info(tp->dev, "EEE is %s\n", - tp->setlpicnt ? "enabled" : "disabled"); - - tg3_ump_link_report(tp); - } -} - -static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl) -{ - u16 miireg; - - if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) - miireg = ADVERTISE_PAUSE_CAP; - else if (flow_ctrl & FLOW_CTRL_TX) - miireg = ADVERTISE_PAUSE_ASYM; - else if (flow_ctrl & FLOW_CTRL_RX) - miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; - else - miireg = 0; - - return miireg; -} - -static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) -{ - u16 miireg; - - if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) - miireg = ADVERTISE_1000XPAUSE; - else if (flow_ctrl & FLOW_CTRL_TX) - miireg = ADVERTISE_1000XPSE_ASYM; - else if (flow_ctrl & FLOW_CTRL_RX) - miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; - else - miireg = 0; - - return miireg; -} - -static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv) -{ - u8 cap = 0; - - if (lcladv & ADVERTISE_1000XPAUSE) { - if (lcladv & ADVERTISE_1000XPSE_ASYM) { - if (rmtadv & LPA_1000XPAUSE) - cap = FLOW_CTRL_TX | FLOW_CTRL_RX; - else if (rmtadv & LPA_1000XPAUSE_ASYM) - cap = FLOW_CTRL_RX; - } else { - if (rmtadv & LPA_1000XPAUSE) - cap = FLOW_CTRL_TX | FLOW_CTRL_RX; - } - } else if (lcladv & ADVERTISE_1000XPSE_ASYM) { - if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM)) - cap = FLOW_CTRL_TX; - } - - return cap; -} - -static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) -{ - u8 autoneg; - u8 flowctrl = 0; - u32 old_rx_mode = tp->rx_mode; - u32 old_tx_mode = tp->tx_mode; - - if (tg3_flag(tp, USE_PHYLIB)) - autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg; - else - autoneg = tp->link_config.autoneg; - - if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) { - if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) - flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv); - else - flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv); - } else - flowctrl = tp->link_config.flowctrl; - - tp->link_config.active_flowctrl = flowctrl; - - if (flowctrl & FLOW_CTRL_RX) - tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE; - else - tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE; - - if (old_rx_mode != tp->rx_mode) - tw32_f(MAC_RX_MODE, tp->rx_mode); - - if (flowctrl & FLOW_CTRL_TX) - tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE; - else - tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE; - - if (old_tx_mode != tp->tx_mode) - tw32_f(MAC_TX_MODE, tp->tx_mode); -} - -static void tg3_adjust_link(struct net_device *dev) -{ - u8 oldflowctrl, linkmesg = 0; - u32 mac_mode, lcl_adv, rmt_adv; - struct tg3 *tp = netdev_priv(dev); - struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; - - spin_lock_bh(&tp->lock); - - mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | - MAC_MODE_HALF_DUPLEX); - - oldflowctrl = tp->link_config.active_flowctrl; - - if (phydev->link) { - lcl_adv = 0; - rmt_adv = 0; - - if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) - mac_mode |= MAC_MODE_PORT_MODE_MII; - else if (phydev->speed == SPEED_1000 || - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) - mac_mode |= MAC_MODE_PORT_MODE_GMII; - else - mac_mode |= MAC_MODE_PORT_MODE_MII; - - if (phydev->duplex == DUPLEX_HALF) - mac_mode |= MAC_MODE_HALF_DUPLEX; - else { - lcl_adv = tg3_advert_flowctrl_1000T( - tp->link_config.flowctrl); - - if (phydev->pause) - rmt_adv = LPA_PAUSE_CAP; - if (phydev->asym_pause) - rmt_adv |= LPA_PAUSE_ASYM; - } - - tg3_setup_flow_control(tp, lcl_adv, rmt_adv); - } else - mac_mode |= MAC_MODE_PORT_MODE_GMII; - - if (mac_mode != tp->mac_mode) { - tp->mac_mode = mac_mode; - tw32_f(MAC_MODE, tp->mac_mode); - udelay(40); - } - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { - if (phydev->speed == SPEED_10) - tw32(MAC_MI_STAT, - MAC_MI_STAT_10MBPS_MODE | - MAC_MI_STAT_LNKSTAT_ATTN_ENAB); - else - tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); - } - - if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF) - tw32(MAC_TX_LENGTHS, - ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | - (6 << TX_LENGTHS_IPG_SHIFT) | - (0xff << TX_LENGTHS_SLOT_TIME_SHIFT))); - else - tw32(MAC_TX_LENGTHS, - ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | - (6 << TX_LENGTHS_IPG_SHIFT) | - (32 << TX_LENGTHS_SLOT_TIME_SHIFT))); - - if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) || - (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) || - phydev->speed != tp->link_config.active_speed || - phydev->duplex != tp->link_config.active_duplex || - oldflowctrl != tp->link_config.active_flowctrl) - linkmesg = 1; - - tp->link_config.active_speed = phydev->speed; - tp->link_config.active_duplex = phydev->duplex; - - spin_unlock_bh(&tp->lock); - - if (linkmesg) - tg3_link_report(tp); -} - -static int tg3_phy_init(struct tg3 *tp) -{ - struct phy_device *phydev; - - if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) - return 0; - - /* Bring the PHY back to a known state. */ - tg3_bmcr_reset(tp); - - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; - - /* Attach the MAC to the PHY. */ - phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link, - phydev->dev_flags, phydev->interface); - if (IS_ERR(phydev)) { - dev_err(&tp->pdev->dev, "Could not attach to PHY\n"); - return PTR_ERR(phydev); - } - - /* Mask with MAC supported features. */ - switch (phydev->interface) { - case PHY_INTERFACE_MODE_GMII: - case PHY_INTERFACE_MODE_RGMII: - if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { - phydev->supported &= (PHY_GBIT_FEATURES | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause); - break; - } - /* fallthru */ - case PHY_INTERFACE_MODE_MII: - phydev->supported &= (PHY_BASIC_FEATURES | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause); - break; - default: - phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); - return -EINVAL; - } - - tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED; - - phydev->advertising = phydev->supported; - - return 0; -} - -static void tg3_phy_start(struct tg3 *tp) -{ - struct phy_device *phydev; - - if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) - return; - - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; - - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { - tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; - phydev->speed = tp->link_config.orig_speed; - phydev->duplex = tp->link_config.orig_duplex; - phydev->autoneg = tp->link_config.orig_autoneg; - phydev->advertising = tp->link_config.orig_advertising; - } - - phy_start(phydev); - - phy_start_aneg(phydev); -} - -static void tg3_phy_stop(struct tg3 *tp) -{ - if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) - return; - - phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); -} - -static void tg3_phy_fini(struct tg3 *tp) -{ - if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { - phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); - tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED; - } -} - -static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable) -{ - u32 phytest; - - if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { - u32 phy; - - tg3_writephy(tp, MII_TG3_FET_TEST, - phytest | MII_TG3_FET_SHADOW_EN); - if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) { - if (enable) - phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD; - else - phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD; - tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy); - } - tg3_writephy(tp, MII_TG3_FET_TEST, phytest); - } -} - -static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) -{ - u32 reg; - - if (!tg3_flag(tp, 5705_PLUS) || - (tg3_flag(tp, 5717_PLUS) && - (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) - return; - - if (tp->phy_flags & TG3_PHYFLG_IS_FET) { - tg3_phy_fet_toggle_apd(tp, enable); - return; - } - - reg = MII_TG3_MISC_SHDW_WREN | - MII_TG3_MISC_SHDW_SCR5_SEL | - MII_TG3_MISC_SHDW_SCR5_LPED | - MII_TG3_MISC_SHDW_SCR5_DLPTLM | - MII_TG3_MISC_SHDW_SCR5_SDTL | - MII_TG3_MISC_SHDW_SCR5_C125OE; - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable) - reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD; - - tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); - - - reg = MII_TG3_MISC_SHDW_WREN | - MII_TG3_MISC_SHDW_APD_SEL | - MII_TG3_MISC_SHDW_APD_WKTM_84MS; - if (enable) - reg |= MII_TG3_MISC_SHDW_APD_ENABLE; - - tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); -} - -static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable) -{ - u32 phy; - - if (!tg3_flag(tp, 5705_PLUS) || - (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) - return; - - if (tp->phy_flags & TG3_PHYFLG_IS_FET) { - u32 ephy; - - if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) { - u32 reg = MII_TG3_FET_SHDW_MISCCTRL; - - tg3_writephy(tp, MII_TG3_FET_TEST, - ephy | MII_TG3_FET_SHADOW_EN); - if (!tg3_readphy(tp, reg, &phy)) { - if (enable) - phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX; - else - phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX; - tg3_writephy(tp, reg, phy); - } - tg3_writephy(tp, MII_TG3_FET_TEST, ephy); - } - } else { - int ret; - - ret = tg3_phy_auxctl_read(tp, - MII_TG3_AUXCTL_SHDWSEL_MISC, &phy); - if (!ret) { - if (enable) - phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; - else - phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; - tg3_phy_auxctl_write(tp, - MII_TG3_AUXCTL_SHDWSEL_MISC, phy); - } - } -} - -static void tg3_phy_set_wirespeed(struct tg3 *tp) -{ - int ret; - u32 val; - - if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) - return; - - ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val); - if (!ret) - tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, - val | MII_TG3_AUXCTL_MISC_WIRESPD_EN); -} - -static void tg3_phy_apply_otp(struct tg3 *tp) -{ - u32 otp, phy; - - if (!tp->phy_otp) - return; - - otp = tp->phy_otp; - - if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) - return; - - phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); - phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT; - tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy); - - phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) | - ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT); - tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy); - - phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT); - phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ; - tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy); - - phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT); - tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy); - - phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT); - tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy); - - phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) | - ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); - tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); - - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); -} - -static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up) -{ - u32 val; - - if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) - return; - - tp->setlpicnt = 0; - - if (tp->link_config.autoneg == AUTONEG_ENABLE && - current_link_up == 1 && - tp->link_config.active_duplex == DUPLEX_FULL && - (tp->link_config.active_speed == SPEED_100 || - tp->link_config.active_speed == SPEED_1000)) { - u32 eeectl; - - if (tp->link_config.active_speed == SPEED_1000) - eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US; - else - eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US; - - tw32(TG3_CPMU_EEE_CTRL, eeectl); - - tg3_phy_cl45_read(tp, MDIO_MMD_AN, - TG3_CL45_D7_EEERES_STAT, &val); - - if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || - val == TG3_CL45_D7_EEERES_STAT_LP_100TX) - tp->setlpicnt = 2; - } - - if (!tp->setlpicnt) { - if (current_link_up == 1 && - !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { - tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000); - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); - } - - val = tr32(TG3_CPMU_EEE_MODE); - tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE); - } -} - -static void tg3_phy_eee_enable(struct tg3 *tp) -{ - u32 val; - - if (tp->link_config.active_speed == SPEED_1000 && - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && - !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { - val = MII_TG3_DSP_TAP26_ALNOKO | - MII_TG3_DSP_TAP26_RMRXSTO; - tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); - } - - val = tr32(TG3_CPMU_EEE_MODE); - tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE); -} - -static int tg3_wait_macro_done(struct tg3 *tp) -{ - int limit = 100; - - while (limit--) { - u32 tmp32; - - if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) { - if ((tmp32 & 0x1000) == 0) - break; - } - } - if (limit < 0) - return -EBUSY; - - return 0; -} - -static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp) -{ - static const u32 test_pat[4][6] = { - { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 }, - { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 }, - { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 }, - { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 } - }; - int chan; - - for (chan = 0; chan < 4; chan++) { - int i; - - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, - (chan * 0x2000) | 0x0200); - tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); - - for (i = 0; i < 6; i++) - tg3_writephy(tp, MII_TG3_DSP_RW_PORT, - test_pat[chan][i]); - - tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); - if (tg3_wait_macro_done(tp)) { - *resetp = 1; - return -EBUSY; - } - - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, - (chan * 0x2000) | 0x0200); - tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082); - if (tg3_wait_macro_done(tp)) { - *resetp = 1; - return -EBUSY; - } - - tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802); - if (tg3_wait_macro_done(tp)) { - *resetp = 1; - return -EBUSY; - } - - for (i = 0; i < 6; i += 2) { - u32 low, high; - - if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) || - tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) || - tg3_wait_macro_done(tp)) { - *resetp = 1; - return -EBUSY; - } - low &= 0x7fff; - high &= 0x000f; - if (low != test_pat[chan][i] || - high != test_pat[chan][i+1]) { - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b); - tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001); - tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005); - - return -EBUSY; - } - } - } - - return 0; -} - -static int tg3_phy_reset_chanpat(struct tg3 *tp) -{ - int chan; - - for (chan = 0; chan < 4; chan++) { - int i; - - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, - (chan * 0x2000) | 0x0200); - tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); - for (i = 0; i < 6; i++) - tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000); - tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); - if (tg3_wait_macro_done(tp)) - return -EBUSY; - } - - return 0; -} - -static int tg3_phy_reset_5703_4_5(struct tg3 *tp) -{ - u32 reg32, phy9_orig; - int retries, do_phy_reset, err; - - retries = 10; - do_phy_reset = 1; - do { - if (do_phy_reset) { - err = tg3_bmcr_reset(tp); - if (err) - return err; - do_phy_reset = 0; - } - - /* Disable transmitter and interrupt. */ - if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) - continue; - - reg32 |= 0x3000; - tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); - - /* Set full-duplex, 1000 mbps. */ - tg3_writephy(tp, MII_BMCR, - BMCR_FULLDPLX | BMCR_SPEED1000); - - /* Set to master mode. */ - if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig)) - continue; - - tg3_writephy(tp, MII_CTRL1000, - CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); - - err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); - if (err) - return err; - - /* Block the PHY control access. */ - tg3_phydsp_write(tp, 0x8005, 0x0800); - - err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset); - if (!err) - break; - } while (--retries); - - err = tg3_phy_reset_chanpat(tp); - if (err) - return err; - - tg3_phydsp_write(tp, 0x8005, 0x0000); - - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); - tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000); - - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); - - tg3_writephy(tp, MII_CTRL1000, phy9_orig); - - if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) { - reg32 &= ~0x3000; - tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); - } else if (!err) - err = -EBUSY; - - return err; -} - -/* This will reset the tigon3 PHY if there is no valid - * link unless the FORCE argument is non-zero. - */ -static int tg3_phy_reset(struct tg3 *tp) -{ - u32 val, cpmuctrl; - int err; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - val = tr32(GRC_MISC_CFG); - tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ); - udelay(40); - } - err = tg3_readphy(tp, MII_BMSR, &val); - err |= tg3_readphy(tp, MII_BMSR, &val); - if (err != 0) - return -EBUSY; - - if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) { - netif_carrier_off(tp->dev); - tg3_link_report(tp); - } - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { - err = tg3_phy_reset_5703_4_5(tp); - if (err) - return err; - goto out; - } - - cpmuctrl = 0; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && - GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) { - cpmuctrl = tr32(TG3_CPMU_CTRL); - if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) - tw32(TG3_CPMU_CTRL, - cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY); - } - - err = tg3_bmcr_reset(tp); - if (err) - return err; - - if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) { - val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz; - tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val); - - tw32(TG3_CPMU_CTRL, cpmuctrl); - } - - if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX || - GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) { - val = tr32(TG3_CPMU_LSPD_1000MB_CLK); - if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) == - CPMU_LSPD_1000MB_MACCLK_12_5) { - val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; - udelay(40); - tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); - } - } - - if (tg3_flag(tp, 5717_PLUS) && - (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) - return 0; - - tg3_phy_apply_otp(tp); - - if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) - tg3_phy_toggle_apd(tp, true); - else - tg3_phy_toggle_apd(tp, false); - -out: - if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) && - !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { - tg3_phydsp_write(tp, 0x201f, 0x2aaa); - tg3_phydsp_write(tp, 0x000a, 0x0323); - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); - } - - if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { - tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); - tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); - } - - if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { - if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { - tg3_phydsp_write(tp, 0x000a, 0x310b); - tg3_phydsp_write(tp, 0x201f, 0x9506); - tg3_phydsp_write(tp, 0x401f, 0x14e2); - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); - } - } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { - if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); - if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { - tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); - tg3_writephy(tp, MII_TG3_TEST1, - MII_TG3_TEST1_TRIM_EN | 0x4); - } else - tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); - - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); - } - } - - /* Set Extended packet length bit (bit 14) on all chips that */ - /* support jumbo frames */ - if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { - /* Cannot do read-modify-write on 5401 */ - tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); - } else if (tg3_flag(tp, JUMBO_CAPABLE)) { - /* Set bit 14 with read-modify-write to preserve other bits */ - err = tg3_phy_auxctl_read(tp, - MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); - if (!err) - tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, - val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN); - } - - /* Set phy register 0x10 bit 0 to high fifo elasticity to support - * jumbo frames transmission. - */ - if (tg3_flag(tp, JUMBO_CAPABLE)) { - if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val)) - tg3_writephy(tp, MII_TG3_EXT_CTRL, - val | MII_TG3_EXT_CTRL_FIFO_ELASTIC); - } - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - /* adjust output voltage */ - tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12); - } - - tg3_phy_toggle_automdix(tp, 1); - tg3_phy_set_wirespeed(tp); - return 0; -} - -#define TG3_GPIO_MSG_DRVR_PRES 0x00000001 -#define TG3_GPIO_MSG_NEED_VAUX 0x00000002 -#define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \ - TG3_GPIO_MSG_NEED_VAUX) -#define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \ - ((TG3_GPIO_MSG_DRVR_PRES << 0) | \ - (TG3_GPIO_MSG_DRVR_PRES << 4) | \ - (TG3_GPIO_MSG_DRVR_PRES << 8) | \ - (TG3_GPIO_MSG_DRVR_PRES << 12)) - -#define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \ - ((TG3_GPIO_MSG_NEED_VAUX << 0) | \ - (TG3_GPIO_MSG_NEED_VAUX << 4) | \ - (TG3_GPIO_MSG_NEED_VAUX << 8) | \ - (TG3_GPIO_MSG_NEED_VAUX << 12)) - -static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat) -{ - u32 status, shift; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) - status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG); - else - status = tr32(TG3_CPMU_DRV_STATUS); - - shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn; - status &= ~(TG3_GPIO_MSG_MASK << shift); - status |= (newstat << shift); - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) - tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status); - else - tw32(TG3_CPMU_DRV_STATUS, status); - - return status >> TG3_APE_GPIO_MSG_SHIFT; -} - -static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp) -{ - if (!tg3_flag(tp, IS_NIC)) - return 0; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { - if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) - return -EIO; - - tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES); - - tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, - TG3_GRC_LCLCTL_PWRSW_DELAY); - - tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); - } else { - tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, - TG3_GRC_LCLCTL_PWRSW_DELAY); - } - - return 0; -} - -static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp) -{ - u32 grc_local_ctrl; - - if (!tg3_flag(tp, IS_NIC) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) - return; - - grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1; - - tw32_wait_f(GRC_LOCAL_CTRL, - grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, - TG3_GRC_LCLCTL_PWRSW_DELAY); - - tw32_wait_f(GRC_LOCAL_CTRL, - grc_local_ctrl, - TG3_GRC_LCLCTL_PWRSW_DELAY); - - tw32_wait_f(GRC_LOCAL_CTRL, - grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, - TG3_GRC_LCLCTL_PWRSW_DELAY); -} - -static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp) -{ - if (!tg3_flag(tp, IS_NIC)) - return; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { - tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | - (GRC_LCLCTRL_GPIO_OE0 | - GRC_LCLCTRL_GPIO_OE1 | - GRC_LCLCTRL_GPIO_OE2 | - GRC_LCLCTRL_GPIO_OUTPUT0 | - GRC_LCLCTRL_GPIO_OUTPUT1), - TG3_GRC_LCLCTL_PWRSW_DELAY); - } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { - /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */ - u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 | - GRC_LCLCTRL_GPIO_OE1 | - GRC_LCLCTRL_GPIO_OE2 | - GRC_LCLCTRL_GPIO_OUTPUT0 | - GRC_LCLCTRL_GPIO_OUTPUT1 | - tp->grc_local_ctrl; - tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, - TG3_GRC_LCLCTL_PWRSW_DELAY); - - grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2; - tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, - TG3_GRC_LCLCTL_PWRSW_DELAY); - - grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0; - tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, - TG3_GRC_LCLCTL_PWRSW_DELAY); - } else { - u32 no_gpio2; - u32 grc_local_ctrl = 0; - - /* Workaround to prevent overdrawing Amps. */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { - grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; - tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | - grc_local_ctrl, - TG3_GRC_LCLCTL_PWRSW_DELAY); - } - - /* On 5753 and variants, GPIO2 cannot be used. */ - no_gpio2 = tp->nic_sram_data_cfg & - NIC_SRAM_DATA_CFG_NO_GPIO2; - - grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | - GRC_LCLCTRL_GPIO_OE1 | - GRC_LCLCTRL_GPIO_OE2 | - GRC_LCLCTRL_GPIO_OUTPUT1 | - GRC_LCLCTRL_GPIO_OUTPUT2; - if (no_gpio2) { - grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 | - GRC_LCLCTRL_GPIO_OUTPUT2); - } - tw32_wait_f(GRC_LOCAL_CTRL, - tp->grc_local_ctrl | grc_local_ctrl, - TG3_GRC_LCLCTL_PWRSW_DELAY); - - grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0; - - tw32_wait_f(GRC_LOCAL_CTRL, - tp->grc_local_ctrl | grc_local_ctrl, - TG3_GRC_LCLCTL_PWRSW_DELAY); - - if (!no_gpio2) { - grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2; - tw32_wait_f(GRC_LOCAL_CTRL, - tp->grc_local_ctrl | grc_local_ctrl, - TG3_GRC_LCLCTL_PWRSW_DELAY); - } - } -} - -static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable) -{ - u32 msg = 0; - - /* Serialize power state transitions */ - if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) - return; - - if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable) - msg = TG3_GPIO_MSG_NEED_VAUX; - - msg = tg3_set_function_status(tp, msg); - - if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK) - goto done; - - if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK) - tg3_pwrsrc_switch_to_vaux(tp); - else - tg3_pwrsrc_die_with_vmain(tp); - -done: - tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); -} - -static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol) -{ - bool need_vaux = false; - - /* The GPIOs do something completely different on 57765. */ - if (!tg3_flag(tp, IS_NIC) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) - return; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { - tg3_frob_aux_power_5717(tp, include_wol ? - tg3_flag(tp, WOL_ENABLE) != 0 : 0); - return; - } - - if (tp->pdev_peer && tp->pdev_peer != tp->pdev) { - struct net_device *dev_peer; - - dev_peer = pci_get_drvdata(tp->pdev_peer); - - /* remove_one() may have been run on the peer. */ - if (dev_peer) { - struct tg3 *tp_peer = netdev_priv(dev_peer); - - if (tg3_flag(tp_peer, INIT_COMPLETE)) - return; - - if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) || - tg3_flag(tp_peer, ENABLE_ASF)) - need_vaux = true; - } - } - - if ((include_wol && tg3_flag(tp, WOL_ENABLE)) || - tg3_flag(tp, ENABLE_ASF)) - need_vaux = true; - - if (need_vaux) - tg3_pwrsrc_switch_to_vaux(tp); - else - tg3_pwrsrc_die_with_vmain(tp); -} - -static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) -{ - if (tp->led_ctrl == LED_CTRL_MODE_PHY_2) - return 1; - else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) { - if (speed != SPEED_10) - return 1; - } else if (speed == SPEED_10) - return 1; - - return 0; -} - -static int tg3_setup_phy(struct tg3 *, int); - -#define RESET_KIND_SHUTDOWN 0 -#define RESET_KIND_INIT 1 -#define RESET_KIND_SUSPEND 2 - -static void tg3_write_sig_post_reset(struct tg3 *, int); -static int tg3_halt_cpu(struct tg3 *, u32); - -static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) -{ - u32 val; - - if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { - u32 sg_dig_ctrl = tr32(SG_DIG_CTRL); - u32 serdes_cfg = tr32(MAC_SERDES_CFG); - - sg_dig_ctrl |= - SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET; - tw32(SG_DIG_CTRL, sg_dig_ctrl); - tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15)); - } - return; - } - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - tg3_bmcr_reset(tp); - val = tr32(GRC_MISC_CFG); - tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); - udelay(40); - return; - } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) { - u32 phytest; - if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { - u32 phy; - - tg3_writephy(tp, MII_ADVERTISE, 0); - tg3_writephy(tp, MII_BMCR, - BMCR_ANENABLE | BMCR_ANRESTART); - - tg3_writephy(tp, MII_TG3_FET_TEST, - phytest | MII_TG3_FET_SHADOW_EN); - if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) { - phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD; - tg3_writephy(tp, - MII_TG3_FET_SHDW_AUXMODE4, - phy); - } - tg3_writephy(tp, MII_TG3_FET_TEST, phytest); - } - return; - } else if (do_low_power) { - tg3_writephy(tp, MII_TG3_EXT_CTRL, - MII_TG3_EXT_CTRL_FORCE_LED_OFF); - - val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | - MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | - MII_TG3_AUXCTL_PCTL_VREG_11V; - tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val); - } - - /* The PHY should not be powered down on some chips because - * of bugs. - */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 && - (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) - return; - - if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX || - GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) { - val = tr32(TG3_CPMU_LSPD_1000MB_CLK); - val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; - val |= CPMU_LSPD_1000MB_MACCLK_12_5; - tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); - } - - tg3_writephy(tp, MII_BMCR, BMCR_PDOWN); -} - -/* tp->lock is held. */ -static int tg3_nvram_lock(struct tg3 *tp) -{ - if (tg3_flag(tp, NVRAM)) { - int i; - - if (tp->nvram_lock_cnt == 0) { - tw32(NVRAM_SWARB, SWARB_REQ_SET1); - for (i = 0; i < 8000; i++) { - if (tr32(NVRAM_SWARB) & SWARB_GNT1) - break; - udelay(20); - } - if (i == 8000) { - tw32(NVRAM_SWARB, SWARB_REQ_CLR1); - return -ENODEV; - } - } - tp->nvram_lock_cnt++; - } - return 0; -} - -/* tp->lock is held. */ -static void tg3_nvram_unlock(struct tg3 *tp) -{ - if (tg3_flag(tp, NVRAM)) { - if (tp->nvram_lock_cnt > 0) - tp->nvram_lock_cnt--; - if (tp->nvram_lock_cnt == 0) - tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); - } -} - -/* tp->lock is held. */ -static void tg3_enable_nvram_access(struct tg3 *tp) -{ - if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { - u32 nvaccess = tr32(NVRAM_ACCESS); - - tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); - } -} - -/* tp->lock is held. */ -static void tg3_disable_nvram_access(struct tg3 *tp) -{ - if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { - u32 nvaccess = tr32(NVRAM_ACCESS); - - tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); - } -} - -static int tg3_nvram_read_using_eeprom(struct tg3 *tp, - u32 offset, u32 *val) -{ - u32 tmp; - int i; - - if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0) - return -EINVAL; - - tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK | - EEPROM_ADDR_DEVID_MASK | - EEPROM_ADDR_READ); - tw32(GRC_EEPROM_ADDR, - tmp | - (0 << EEPROM_ADDR_DEVID_SHIFT) | - ((offset << EEPROM_ADDR_ADDR_SHIFT) & - EEPROM_ADDR_ADDR_MASK) | - EEPROM_ADDR_READ | EEPROM_ADDR_START); - - for (i = 0; i < 1000; i++) { - tmp = tr32(GRC_EEPROM_ADDR); - - if (tmp & EEPROM_ADDR_COMPLETE) - break; - msleep(1); - } - if (!(tmp & EEPROM_ADDR_COMPLETE)) - return -EBUSY; - - tmp = tr32(GRC_EEPROM_DATA); - - /* - * The data will always be opposite the native endian - * format. Perform a blind byteswap to compensate. - */ - *val = swab32(tmp); - - return 0; -} - -#define NVRAM_CMD_TIMEOUT 10000 - -static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) -{ - int i; - - tw32(NVRAM_CMD, nvram_cmd); - for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) { - udelay(10); - if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) { - udelay(10); - break; - } - } - - if (i == NVRAM_CMD_TIMEOUT) - return -EBUSY; - - return 0; -} - -static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) -{ - if (tg3_flag(tp, NVRAM) && - tg3_flag(tp, NVRAM_BUFFERED) && - tg3_flag(tp, FLASH) && - !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && - (tp->nvram_jedecnum == JEDEC_ATMEL)) - - addr = ((addr / tp->nvram_pagesize) << - ATMEL_AT45DB0X1B_PAGE_POS) + - (addr % tp->nvram_pagesize); - - return addr; -} - -static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) -{ - if (tg3_flag(tp, NVRAM) && - tg3_flag(tp, NVRAM_BUFFERED) && - tg3_flag(tp, FLASH) && - !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && - (tp->nvram_jedecnum == JEDEC_ATMEL)) - - addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * - tp->nvram_pagesize) + - (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1)); - - return addr; -} - -/* NOTE: Data read in from NVRAM is byteswapped according to - * the byteswapping settings for all other register accesses. - * tg3 devices are BE devices, so on a BE machine, the data - * returned will be exactly as it is seen in NVRAM. On a LE - * machine, the 32-bit value will be byteswapped. - */ -static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) -{ - int ret; - - if (!tg3_flag(tp, NVRAM)) - return tg3_nvram_read_using_eeprom(tp, offset, val); - - offset = tg3_nvram_phys_addr(tp, offset); - - if (offset > NVRAM_ADDR_MSK) - return -EINVAL; - - ret = tg3_nvram_lock(tp); - if (ret) - return ret; - - tg3_enable_nvram_access(tp); - - tw32(NVRAM_ADDR, offset); - ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO | - NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE); - - if (ret == 0) - *val = tr32(NVRAM_RDDATA); - - tg3_disable_nvram_access(tp); - - tg3_nvram_unlock(tp); - - return ret; -} - -/* Ensures NVRAM data is in bytestream format. */ -static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val) -{ - u32 v; - int res = tg3_nvram_read(tp, offset, &v); - if (!res) - *val = cpu_to_be32(v); - return res; -} - -/* tp->lock is held. */ -static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1) -{ - u32 addr_high, addr_low; - int i; - - addr_high = ((tp->dev->dev_addr[0] << 8) | - tp->dev->dev_addr[1]); - addr_low = ((tp->dev->dev_addr[2] << 24) | - (tp->dev->dev_addr[3] << 16) | - (tp->dev->dev_addr[4] << 8) | - (tp->dev->dev_addr[5] << 0)); - for (i = 0; i < 4; i++) { - if (i == 1 && skip_mac_1) - continue; - tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high); - tw32(MAC_ADDR_0_LOW + (i * 8), addr_low); - } - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { - for (i = 0; i < 12; i++) { - tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high); - tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low); - } - } - - addr_high = (tp->dev->dev_addr[0] + - tp->dev->dev_addr[1] + - tp->dev->dev_addr[2] + - tp->dev->dev_addr[3] + - tp->dev->dev_addr[4] + - tp->dev->dev_addr[5]) & - TX_BACKOFF_SEED_MASK; - tw32(MAC_TX_BACKOFF_SEED, addr_high); -} - -static void tg3_enable_register_access(struct tg3 *tp) -{ - /* - * Make sure register accesses (indirect or otherwise) will function - * correctly. - */ - pci_write_config_dword(tp->pdev, - TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl); -} - -static int tg3_power_up(struct tg3 *tp) -{ - int err; - - tg3_enable_register_access(tp); - - err = pci_set_power_state(tp->pdev, PCI_D0); - if (!err) { - /* Switch out of Vaux if it is a NIC */ - tg3_pwrsrc_switch_to_vmain(tp); - } else { - netdev_err(tp->dev, "Transition to D0 failed\n"); - } - - return err; -} - -static int tg3_power_down_prepare(struct tg3 *tp) -{ - u32 misc_host_ctrl; - bool device_should_wake, do_low_power; - - tg3_enable_register_access(tp); - - /* Restore the CLKREQ setting. */ - if (tg3_flag(tp, CLKREQ_BUG)) { - u16 lnkctl; - - pci_read_config_word(tp->pdev, - pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, - &lnkctl); - lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN; - pci_write_config_word(tp->pdev, - pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, - lnkctl); - } - - misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); - tw32(TG3PCI_MISC_HOST_CTRL, - misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); - - device_should_wake = device_may_wakeup(&tp->pdev->dev) && - tg3_flag(tp, WOL_ENABLE); - - if (tg3_flag(tp, USE_PHYLIB)) { - do_low_power = false; - if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) && - !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { - struct phy_device *phydev; - u32 phyid, advertising; - - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; - - tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; - - tp->link_config.orig_speed = phydev->speed; - tp->link_config.orig_duplex = phydev->duplex; - tp->link_config.orig_autoneg = phydev->autoneg; - tp->link_config.orig_advertising = phydev->advertising; - - advertising = ADVERTISED_TP | - ADVERTISED_Pause | - ADVERTISED_Autoneg | - ADVERTISED_10baseT_Half; - - if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) { - if (tg3_flag(tp, WOL_SPEED_100MB)) - advertising |= - ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | - ADVERTISED_10baseT_Full; - else - advertising |= ADVERTISED_10baseT_Full; - } - - phydev->advertising = advertising; - - phy_start_aneg(phydev); - - phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; - if (phyid != PHY_ID_BCMAC131) { - phyid &= PHY_BCM_OUI_MASK; - if (phyid == PHY_BCM_OUI_1 || - phyid == PHY_BCM_OUI_2 || - phyid == PHY_BCM_OUI_3) - do_low_power = true; - } - } - } else { - do_low_power = true; - - if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { - tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; - tp->link_config.orig_speed = tp->link_config.speed; - tp->link_config.orig_duplex = tp->link_config.duplex; - tp->link_config.orig_autoneg = tp->link_config.autoneg; - } - - if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { - tp->link_config.speed = SPEED_10; - tp->link_config.duplex = DUPLEX_HALF; - tp->link_config.autoneg = AUTONEG_ENABLE; - tg3_setup_phy(tp, 0); - } - } - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - u32 val; - - val = tr32(GRC_VCPU_EXT_CTRL); - tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL); - } else if (!tg3_flag(tp, ENABLE_ASF)) { - int i; - u32 val; - - for (i = 0; i < 200; i++) { - tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val); - if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) - break; - msleep(1); - } - } - if (tg3_flag(tp, WOL_CAP)) - tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE | - WOL_DRV_STATE_SHUTDOWN | - WOL_DRV_WOL | - WOL_SET_MAGIC_PKT); - - if (device_should_wake) { - u32 mac_mode; - - if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { - if (do_low_power && - !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { - tg3_phy_auxctl_write(tp, - MII_TG3_AUXCTL_SHDWSEL_PWRCTL, - MII_TG3_AUXCTL_PCTL_WOL_EN | - MII_TG3_AUXCTL_PCTL_100TX_LPWR | - MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC); - udelay(40); - } - - if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) - mac_mode = MAC_MODE_PORT_MODE_GMII; - else - mac_mode = MAC_MODE_PORT_MODE_MII; - - mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == - ASIC_REV_5700) { - u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ? - SPEED_100 : SPEED_10; - if (tg3_5700_link_polarity(tp, speed)) - mac_mode |= MAC_MODE_LINK_POLARITY; - else - mac_mode &= ~MAC_MODE_LINK_POLARITY; - } - } else { - mac_mode = MAC_MODE_PORT_MODE_TBI; - } - - if (!tg3_flag(tp, 5750_PLUS)) - tw32(MAC_LED_CTRL, tp->led_ctrl); - - mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; - if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) && - (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE))) - mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; - - if (tg3_flag(tp, ENABLE_APE)) - mac_mode |= MAC_MODE_APE_TX_EN | - MAC_MODE_APE_RX_EN | - MAC_MODE_TDE_ENABLE; - - tw32_f(MAC_MODE, mac_mode); - udelay(100); - - tw32_f(MAC_RX_MODE, RX_MODE_ENABLE); - udelay(10); - } - - if (!tg3_flag(tp, WOL_SPEED_100MB) && - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { - u32 base_val; - - base_val = tp->pci_clock_ctrl; - base_val |= (CLOCK_CTRL_RXCLK_DISABLE | - CLOCK_CTRL_TXCLK_DISABLE); - - tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | - CLOCK_CTRL_PWRDOWN_PLL133, 40); - } else if (tg3_flag(tp, 5780_CLASS) || - tg3_flag(tp, CPMU_PRESENT) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - /* do nothing */ - } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) { - u32 newbits1, newbits2; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { - newbits1 = (CLOCK_CTRL_RXCLK_DISABLE | - CLOCK_CTRL_TXCLK_DISABLE | - CLOCK_CTRL_ALTCLK); - newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; - } else if (tg3_flag(tp, 5705_PLUS)) { - newbits1 = CLOCK_CTRL_625_CORE; - newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; - } else { - newbits1 = CLOCK_CTRL_ALTCLK; - newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; - } - - tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1, - 40); - - tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, - 40); - - if (!tg3_flag(tp, 5705_PLUS)) { - u32 newbits3; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { - newbits3 = (CLOCK_CTRL_RXCLK_DISABLE | - CLOCK_CTRL_TXCLK_DISABLE | - CLOCK_CTRL_44MHZ_CORE); - } else { - newbits3 = CLOCK_CTRL_44MHZ_CORE; - } - - tw32_wait_f(TG3PCI_CLOCK_CTRL, - tp->pci_clock_ctrl | newbits3, 40); - } - } - - if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF)) - tg3_power_down_phy(tp, do_low_power); - - tg3_frob_aux_power(tp, true); - - /* Workaround for unstable PLL clock */ - if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) || - (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) { - u32 val = tr32(0x7d00); - - val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); - tw32(0x7d00, val); - if (!tg3_flag(tp, ENABLE_ASF)) { - int err; - - err = tg3_nvram_lock(tp); - tg3_halt_cpu(tp, RX_CPU_BASE); - if (!err) - tg3_nvram_unlock(tp); - } - } - - tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); - - return 0; -} - -static void tg3_power_down(struct tg3 *tp) -{ - tg3_power_down_prepare(tp); - - pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE)); - pci_set_power_state(tp->pdev, PCI_D3hot); -} - -static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex) -{ - switch (val & MII_TG3_AUX_STAT_SPDMASK) { - case MII_TG3_AUX_STAT_10HALF: - *speed = SPEED_10; - *duplex = DUPLEX_HALF; - break; - - case MII_TG3_AUX_STAT_10FULL: - *speed = SPEED_10; - *duplex = DUPLEX_FULL; - break; - - case MII_TG3_AUX_STAT_100HALF: - *speed = SPEED_100; - *duplex = DUPLEX_HALF; - break; - - case MII_TG3_AUX_STAT_100FULL: - *speed = SPEED_100; - *duplex = DUPLEX_FULL; - break; - - case MII_TG3_AUX_STAT_1000HALF: - *speed = SPEED_1000; - *duplex = DUPLEX_HALF; - break; - - case MII_TG3_AUX_STAT_1000FULL: - *speed = SPEED_1000; - *duplex = DUPLEX_FULL; - break; - - default: - if (tp->phy_flags & TG3_PHYFLG_IS_FET) { - *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : - SPEED_10; - *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : - DUPLEX_HALF; - break; - } - *speed = SPEED_INVALID; - *duplex = DUPLEX_INVALID; - break; - } -} - -static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) -{ - int err = 0; - u32 val, new_adv; - - new_adv = ADVERTISE_CSMA; - if (advertise & ADVERTISED_10baseT_Half) - new_adv |= ADVERTISE_10HALF; - if (advertise & ADVERTISED_10baseT_Full) - new_adv |= ADVERTISE_10FULL; - if (advertise & ADVERTISED_100baseT_Half) - new_adv |= ADVERTISE_100HALF; - if (advertise & ADVERTISED_100baseT_Full) - new_adv |= ADVERTISE_100FULL; - - new_adv |= tg3_advert_flowctrl_1000T(flowctrl); - - err = tg3_writephy(tp, MII_ADVERTISE, new_adv); - if (err) - goto done; - - if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) - goto done; - - new_adv = 0; - if (advertise & ADVERTISED_1000baseT_Half) - new_adv |= ADVERTISE_1000HALF; - if (advertise & ADVERTISED_1000baseT_Full) - new_adv |= ADVERTISE_1000FULL; - - if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || - tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) - new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; - - err = tg3_writephy(tp, MII_CTRL1000, new_adv); - if (err) - goto done; - - if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) - goto done; - - tw32(TG3_CPMU_EEE_MODE, - tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); - - err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); - if (!err) { - u32 err2; - - val = 0; - /* Advertise 100-BaseTX EEE ability */ - if (advertise & ADVERTISED_100baseT_Full) - val |= MDIO_AN_EEE_ADV_100TX; - /* Advertise 1000-BaseT EEE ability */ - if (advertise & ADVERTISED_1000baseT_Full) - val |= MDIO_AN_EEE_ADV_1000T; - err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); - if (err) - val = 0; - - switch (GET_ASIC_REV(tp->pci_chip_rev_id)) { - case ASIC_REV_5717: - case ASIC_REV_57765: - case ASIC_REV_5719: - /* If we advertised any eee advertisements above... */ - if (val) - val = MII_TG3_DSP_TAP26_ALNOKO | - MII_TG3_DSP_TAP26_RMRXSTO | - MII_TG3_DSP_TAP26_OPCSINPT; - tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); - /* Fall through */ - case ASIC_REV_5720: - if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) - tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val | - MII_TG3_DSP_CH34TP2_HIBW01); - } - - err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); - if (!err) - err = err2; - } - -done: - return err; -} - -static void tg3_phy_copper_begin(struct tg3 *tp) -{ - u32 new_adv; - int i; - - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { - new_adv = ADVERTISED_10baseT_Half | - ADVERTISED_10baseT_Full; - if (tg3_flag(tp, WOL_SPEED_100MB)) - new_adv |= ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full; - - tg3_phy_autoneg_cfg(tp, new_adv, - FLOW_CTRL_TX | FLOW_CTRL_RX); - } else if (tp->link_config.speed == SPEED_INVALID) { - if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) - tp->link_config.advertising &= - ~(ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full); - - tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, - tp->link_config.flowctrl); - } else { - /* Asking for a specific link mode. */ - if (tp->link_config.speed == SPEED_1000) { - if (tp->link_config.duplex == DUPLEX_FULL) - new_adv = ADVERTISED_1000baseT_Full; - else - new_adv = ADVERTISED_1000baseT_Half; - } else if (tp->link_config.speed == SPEED_100) { - if (tp->link_config.duplex == DUPLEX_FULL) - new_adv = ADVERTISED_100baseT_Full; - else - new_adv = ADVERTISED_100baseT_Half; - } else { - if (tp->link_config.duplex == DUPLEX_FULL) - new_adv = ADVERTISED_10baseT_Full; - else - new_adv = ADVERTISED_10baseT_Half; - } - - tg3_phy_autoneg_cfg(tp, new_adv, - tp->link_config.flowctrl); - } - - if (tp->link_config.autoneg == AUTONEG_DISABLE && - tp->link_config.speed != SPEED_INVALID) { - u32 bmcr, orig_bmcr; - - tp->link_config.active_speed = tp->link_config.speed; - tp->link_config.active_duplex = tp->link_config.duplex; - - bmcr = 0; - switch (tp->link_config.speed) { - default: - case SPEED_10: - break; - - case SPEED_100: - bmcr |= BMCR_SPEED100; - break; - - case SPEED_1000: - bmcr |= BMCR_SPEED1000; - break; - } - - if (tp->link_config.duplex == DUPLEX_FULL) - bmcr |= BMCR_FULLDPLX; - - if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) && - (bmcr != orig_bmcr)) { - tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK); - for (i = 0; i < 1500; i++) { - u32 tmp; - - udelay(10); - if (tg3_readphy(tp, MII_BMSR, &tmp) || - tg3_readphy(tp, MII_BMSR, &tmp)) - continue; - if (!(tmp & BMSR_LSTATUS)) { - udelay(40); - break; - } - } - tg3_writephy(tp, MII_BMCR, bmcr); - udelay(40); - } - } else { - tg3_writephy(tp, MII_BMCR, - BMCR_ANENABLE | BMCR_ANRESTART); - } -} - -static int tg3_init_5401phy_dsp(struct tg3 *tp) -{ - int err; - - /* Turn off tap power management. */ - /* Set Extended packet length bit */ - err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); - - err |= tg3_phydsp_write(tp, 0x0012, 0x1804); - err |= tg3_phydsp_write(tp, 0x0013, 0x1204); - err |= tg3_phydsp_write(tp, 0x8006, 0x0132); - err |= tg3_phydsp_write(tp, 0x8006, 0x0232); - err |= tg3_phydsp_write(tp, 0x201f, 0x0a20); - - udelay(40); - - return err; -} - -static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask) -{ - u32 adv_reg, all_mask = 0; - - if (mask & ADVERTISED_10baseT_Half) - all_mask |= ADVERTISE_10HALF; - if (mask & ADVERTISED_10baseT_Full) - all_mask |= ADVERTISE_10FULL; - if (mask & ADVERTISED_100baseT_Half) - all_mask |= ADVERTISE_100HALF; - if (mask & ADVERTISED_100baseT_Full) - all_mask |= ADVERTISE_100FULL; - - if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg)) - return 0; - - if ((adv_reg & all_mask) != all_mask) - return 0; - if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { - u32 tg3_ctrl; - - all_mask = 0; - if (mask & ADVERTISED_1000baseT_Half) - all_mask |= ADVERTISE_1000HALF; - if (mask & ADVERTISED_1000baseT_Full) - all_mask |= ADVERTISE_1000FULL; - - if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl)) - return 0; - - if ((tg3_ctrl & all_mask) != all_mask) - return 0; - } - return 1; -} - -static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv) -{ - u32 curadv, reqadv; - - if (tg3_readphy(tp, MII_ADVERTISE, lcladv)) - return 1; - - curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); - reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl); - - if (tp->link_config.active_duplex == DUPLEX_FULL) { - if (curadv != reqadv) - return 0; - - if (tg3_flag(tp, PAUSE_AUTONEG)) - tg3_readphy(tp, MII_LPA, rmtadv); - } else { - /* Reprogram the advertisement register, even if it - * does not affect the current link. If the link - * gets renegotiated in the future, we can save an - * additional renegotiation cycle by advertising - * it correctly in the first place. - */ - if (curadv != reqadv) { - *lcladv &= ~(ADVERTISE_PAUSE_CAP | - ADVERTISE_PAUSE_ASYM); - tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv); - } - } - - return 1; -} - -static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) -{ - int current_link_up; - u32 bmsr, val; - u32 lcl_adv, rmt_adv; - u16 current_speed; - u8 current_duplex; - int i, err; - - tw32(MAC_EVENT, 0); - - tw32_f(MAC_STATUS, - (MAC_STATUS_SYNC_CHANGED | - MAC_STATUS_CFG_CHANGED | - MAC_STATUS_MI_COMPLETION | - MAC_STATUS_LNKSTATE_CHANGED)); - udelay(40); - - if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { - tw32_f(MAC_MI_MODE, - (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); - udelay(80); - } - - tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0); - - /* Some third-party PHYs need to be reset on link going - * down. - */ - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) && - netif_carrier_ok(tp->dev)) { - tg3_readphy(tp, MII_BMSR, &bmsr); - if (!tg3_readphy(tp, MII_BMSR, &bmsr) && - !(bmsr & BMSR_LSTATUS)) - force_reset = 1; - } - if (force_reset) - tg3_phy_reset(tp); - - if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { - tg3_readphy(tp, MII_BMSR, &bmsr); - if (tg3_readphy(tp, MII_BMSR, &bmsr) || - !tg3_flag(tp, INIT_COMPLETE)) - bmsr = 0; - - if (!(bmsr & BMSR_LSTATUS)) { - err = tg3_init_5401phy_dsp(tp); - if (err) - return err; - - tg3_readphy(tp, MII_BMSR, &bmsr); - for (i = 0; i < 1000; i++) { - udelay(10); - if (!tg3_readphy(tp, MII_BMSR, &bmsr) && - (bmsr & BMSR_LSTATUS)) { - udelay(40); - break; - } - } - - if ((tp->phy_id & TG3_PHY_ID_REV_MASK) == - TG3_PHY_REV_BCM5401_B0 && - !(bmsr & BMSR_LSTATUS) && - tp->link_config.active_speed == SPEED_1000) { - err = tg3_phy_reset(tp); - if (!err) - err = tg3_init_5401phy_dsp(tp); - if (err) - return err; - } - } - } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || - tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) { - /* 5701 {A0,B0} CRC bug workaround */ - tg3_writephy(tp, 0x15, 0x0a75); - tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); - tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); - tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); - } - - /* Clear pending interrupts... */ - tg3_readphy(tp, MII_TG3_ISTAT, &val); - tg3_readphy(tp, MII_TG3_ISTAT, &val); - - if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) - tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); - else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) - tg3_writephy(tp, MII_TG3_IMASK, ~0); - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { - if (tp->led_ctrl == LED_CTRL_MODE_PHY_1) - tg3_writephy(tp, MII_TG3_EXT_CTRL, - MII_TG3_EXT_CTRL_LNK3_LED_MODE); - else - tg3_writephy(tp, MII_TG3_EXT_CTRL, 0); - } - - current_link_up = 0; - current_speed = SPEED_INVALID; - current_duplex = DUPLEX_INVALID; - - if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { - err = tg3_phy_auxctl_read(tp, - MII_TG3_AUXCTL_SHDWSEL_MISCTEST, - &val); - if (!err && !(val & (1 << 10))) { - tg3_phy_auxctl_write(tp, - MII_TG3_AUXCTL_SHDWSEL_MISCTEST, - val | (1 << 10)); - goto relink; - } - } - - bmsr = 0; - for (i = 0; i < 100; i++) { - tg3_readphy(tp, MII_BMSR, &bmsr); - if (!tg3_readphy(tp, MII_BMSR, &bmsr) && - (bmsr & BMSR_LSTATUS)) - break; - udelay(40); - } - - if (bmsr & BMSR_LSTATUS) { - u32 aux_stat, bmcr; - - tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat); - for (i = 0; i < 2000; i++) { - udelay(10); - if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) && - aux_stat) - break; - } - - tg3_aux_stat_to_speed_duplex(tp, aux_stat, - ¤t_speed, - ¤t_duplex); - - bmcr = 0; - for (i = 0; i < 200; i++) { - tg3_readphy(tp, MII_BMCR, &bmcr); - if (tg3_readphy(tp, MII_BMCR, &bmcr)) - continue; - if (bmcr && bmcr != 0x7fff) - break; - udelay(10); - } - - lcl_adv = 0; - rmt_adv = 0; - - tp->link_config.active_speed = current_speed; - tp->link_config.active_duplex = current_duplex; - - if (tp->link_config.autoneg == AUTONEG_ENABLE) { - if ((bmcr & BMCR_ANENABLE) && - tg3_copper_is_advertising_all(tp, - tp->link_config.advertising)) { - if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv, - &rmt_adv)) - current_link_up = 1; - } - } else { - if (!(bmcr & BMCR_ANENABLE) && - tp->link_config.speed == current_speed && - tp->link_config.duplex == current_duplex && - tp->link_config.flowctrl == - tp->link_config.active_flowctrl) { - current_link_up = 1; - } - } - - if (current_link_up == 1 && - tp->link_config.active_duplex == DUPLEX_FULL) - tg3_setup_flow_control(tp, lcl_adv, rmt_adv); - } - -relink: - if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { - tg3_phy_copper_begin(tp); - - tg3_readphy(tp, MII_BMSR, &bmsr); - if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) || - (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) - current_link_up = 1; - } - - tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; - if (current_link_up == 1) { - if (tp->link_config.active_speed == SPEED_100 || - tp->link_config.active_speed == SPEED_10) - tp->mac_mode |= MAC_MODE_PORT_MODE_MII; - else - tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; - } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) - tp->mac_mode |= MAC_MODE_PORT_MODE_MII; - else - tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; - - tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; - if (tp->link_config.active_duplex == DUPLEX_HALF) - tp->mac_mode |= MAC_MODE_HALF_DUPLEX; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { - if (current_link_up == 1 && - tg3_5700_link_polarity(tp, tp->link_config.active_speed)) - tp->mac_mode |= MAC_MODE_LINK_POLARITY; - else - tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; - } - - /* ??? Without this setting Netgear GA302T PHY does not - * ??? send/receive packets... - */ - if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 && - tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) { - tp->mi_mode |= MAC_MI_MODE_AUTO_POLL; - tw32_f(MAC_MI_MODE, tp->mi_mode); - udelay(80); - } - - tw32_f(MAC_MODE, tp->mac_mode); - udelay(40); - - tg3_phy_eee_adjust(tp, current_link_up); - - if (tg3_flag(tp, USE_LINKCHG_REG)) { - /* Polled via timer. */ - tw32_f(MAC_EVENT, 0); - } else { - tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); - } - udelay(40); - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 && - current_link_up == 1 && - tp->link_config.active_speed == SPEED_1000 && - (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) { - udelay(120); - tw32_f(MAC_STATUS, - (MAC_STATUS_SYNC_CHANGED | - MAC_STATUS_CFG_CHANGED)); - udelay(40); - tg3_write_mem(tp, - NIC_SRAM_FIRMWARE_MBOX, - NIC_SRAM_FIRMWARE_MBOX_MAGIC2); - } - - /* Prevent send BD corruption. */ - if (tg3_flag(tp, CLKREQ_BUG)) { - u16 oldlnkctl, newlnkctl; - - pci_read_config_word(tp->pdev, - pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, - &oldlnkctl); - if (tp->link_config.active_speed == SPEED_100 || - tp->link_config.active_speed == SPEED_10) - newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN; - else - newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN; - if (newlnkctl != oldlnkctl) - pci_write_config_word(tp->pdev, - pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, - newlnkctl); - } - - if (current_link_up != netif_carrier_ok(tp->dev)) { - if (current_link_up) - netif_carrier_on(tp->dev); - else - netif_carrier_off(tp->dev); - tg3_link_report(tp); - } - - return 0; -} - -struct tg3_fiber_aneginfo { - int state; -#define ANEG_STATE_UNKNOWN 0 -#define ANEG_STATE_AN_ENABLE 1 -#define ANEG_STATE_RESTART_INIT 2 -#define ANEG_STATE_RESTART 3 -#define ANEG_STATE_DISABLE_LINK_OK 4 -#define ANEG_STATE_ABILITY_DETECT_INIT 5 -#define ANEG_STATE_ABILITY_DETECT 6 -#define ANEG_STATE_ACK_DETECT_INIT 7 -#define ANEG_STATE_ACK_DETECT 8 -#define ANEG_STATE_COMPLETE_ACK_INIT 9 -#define ANEG_STATE_COMPLETE_ACK 10 -#define ANEG_STATE_IDLE_DETECT_INIT 11 -#define ANEG_STATE_IDLE_DETECT 12 -#define ANEG_STATE_LINK_OK 13 -#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14 -#define ANEG_STATE_NEXT_PAGE_WAIT 15 - - u32 flags; -#define MR_AN_ENABLE 0x00000001 -#define MR_RESTART_AN 0x00000002 -#define MR_AN_COMPLETE 0x00000004 -#define MR_PAGE_RX 0x00000008 -#define MR_NP_LOADED 0x00000010 -#define MR_TOGGLE_TX 0x00000020 -#define MR_LP_ADV_FULL_DUPLEX 0x00000040 -#define MR_LP_ADV_HALF_DUPLEX 0x00000080 -#define MR_LP_ADV_SYM_PAUSE 0x00000100 -#define MR_LP_ADV_ASYM_PAUSE 0x00000200 -#define MR_LP_ADV_REMOTE_FAULT1 0x00000400 -#define MR_LP_ADV_REMOTE_FAULT2 0x00000800 -#define MR_LP_ADV_NEXT_PAGE 0x00001000 -#define MR_TOGGLE_RX 0x00002000 -#define MR_NP_RX 0x00004000 - -#define MR_LINK_OK 0x80000000 - - unsigned long link_time, cur_time; - - u32 ability_match_cfg; - int ability_match_count; - - char ability_match, idle_match, ack_match; - - u32 txconfig, rxconfig; -#define ANEG_CFG_NP 0x00000080 -#define ANEG_CFG_ACK 0x00000040 -#define ANEG_CFG_RF2 0x00000020 -#define ANEG_CFG_RF1 0x00000010 -#define ANEG_CFG_PS2 0x00000001 -#define ANEG_CFG_PS1 0x00008000 -#define ANEG_CFG_HD 0x00004000 -#define ANEG_CFG_FD 0x00002000 -#define ANEG_CFG_INVAL 0x00001f06 - -}; -#define ANEG_OK 0 -#define ANEG_DONE 1 -#define ANEG_TIMER_ENAB 2 -#define ANEG_FAILED -1 - -#define ANEG_STATE_SETTLE_TIME 10000 - -static int tg3_fiber_aneg_smachine(struct tg3 *tp, - struct tg3_fiber_aneginfo *ap) -{ - u16 flowctrl; - unsigned long delta; - u32 rx_cfg_reg; - int ret; - - if (ap->state == ANEG_STATE_UNKNOWN) { - ap->rxconfig = 0; - ap->link_time = 0; - ap->cur_time = 0; - ap->ability_match_cfg = 0; - ap->ability_match_count = 0; - ap->ability_match = 0; - ap->idle_match = 0; - ap->ack_match = 0; - } - ap->cur_time++; - - if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) { - rx_cfg_reg = tr32(MAC_RX_AUTO_NEG); - - if (rx_cfg_reg != ap->ability_match_cfg) { - ap->ability_match_cfg = rx_cfg_reg; - ap->ability_match = 0; - ap->ability_match_count = 0; - } else { - if (++ap->ability_match_count > 1) { - ap->ability_match = 1; - ap->ability_match_cfg = rx_cfg_reg; - } - } - if (rx_cfg_reg & ANEG_CFG_ACK) - ap->ack_match = 1; - else - ap->ack_match = 0; - - ap->idle_match = 0; - } else { - ap->idle_match = 1; - ap->ability_match_cfg = 0; - ap->ability_match_count = 0; - ap->ability_match = 0; - ap->ack_match = 0; - - rx_cfg_reg = 0; - } - - ap->rxconfig = rx_cfg_reg; - ret = ANEG_OK; - - switch (ap->state) { - case ANEG_STATE_UNKNOWN: - if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) - ap->state = ANEG_STATE_AN_ENABLE; - - /* fallthru */ - case ANEG_STATE_AN_ENABLE: - ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); - if (ap->flags & MR_AN_ENABLE) { - ap->link_time = 0; - ap->cur_time = 0; - ap->ability_match_cfg = 0; - ap->ability_match_count = 0; - ap->ability_match = 0; - ap->idle_match = 0; - ap->ack_match = 0; - - ap->state = ANEG_STATE_RESTART_INIT; - } else { - ap->state = ANEG_STATE_DISABLE_LINK_OK; - } - break; - - case ANEG_STATE_RESTART_INIT: - ap->link_time = ap->cur_time; - ap->flags &= ~(MR_NP_LOADED); - ap->txconfig = 0; - tw32(MAC_TX_AUTO_NEG, 0); - tp->mac_mode |= MAC_MODE_SEND_CONFIGS; - tw32_f(MAC_MODE, tp->mac_mode); - udelay(40); - - ret = ANEG_TIMER_ENAB; - ap->state = ANEG_STATE_RESTART; - - /* fallthru */ - case ANEG_STATE_RESTART: - delta = ap->cur_time - ap->link_time; - if (delta > ANEG_STATE_SETTLE_TIME) - ap->state = ANEG_STATE_ABILITY_DETECT_INIT; - else - ret = ANEG_TIMER_ENAB; - break; - - case ANEG_STATE_DISABLE_LINK_OK: - ret = ANEG_DONE; - break; - - case ANEG_STATE_ABILITY_DETECT_INIT: - ap->flags &= ~(MR_TOGGLE_TX); - ap->txconfig = ANEG_CFG_FD; - flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); - if (flowctrl & ADVERTISE_1000XPAUSE) - ap->txconfig |= ANEG_CFG_PS1; - if (flowctrl & ADVERTISE_1000XPSE_ASYM) - ap->txconfig |= ANEG_CFG_PS2; - tw32(MAC_TX_AUTO_NEG, ap->txconfig); - tp->mac_mode |= MAC_MODE_SEND_CONFIGS; - tw32_f(MAC_MODE, tp->mac_mode); - udelay(40); - - ap->state = ANEG_STATE_ABILITY_DETECT; - break; - - case ANEG_STATE_ABILITY_DETECT: - if (ap->ability_match != 0 && ap->rxconfig != 0) - ap->state = ANEG_STATE_ACK_DETECT_INIT; - break; - - case ANEG_STATE_ACK_DETECT_INIT: - ap->txconfig |= ANEG_CFG_ACK; - tw32(MAC_TX_AUTO_NEG, ap->txconfig); - tp->mac_mode |= MAC_MODE_SEND_CONFIGS; - tw32_f(MAC_MODE, tp->mac_mode); - udelay(40); - - ap->state = ANEG_STATE_ACK_DETECT; - - /* fallthru */ - case ANEG_STATE_ACK_DETECT: - if (ap->ack_match != 0) { - if ((ap->rxconfig & ~ANEG_CFG_ACK) == - (ap->ability_match_cfg & ~ANEG_CFG_ACK)) { - ap->state = ANEG_STATE_COMPLETE_ACK_INIT; - } else { - ap->state = ANEG_STATE_AN_ENABLE; - } - } else if (ap->ability_match != 0 && - ap->rxconfig == 0) { - ap->state = ANEG_STATE_AN_ENABLE; - } - break; - - case ANEG_STATE_COMPLETE_ACK_INIT: - if (ap->rxconfig & ANEG_CFG_INVAL) { - ret = ANEG_FAILED; - break; - } - ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX | - MR_LP_ADV_HALF_DUPLEX | - MR_LP_ADV_SYM_PAUSE | - MR_LP_ADV_ASYM_PAUSE | - MR_LP_ADV_REMOTE_FAULT1 | - MR_LP_ADV_REMOTE_FAULT2 | - MR_LP_ADV_NEXT_PAGE | - MR_TOGGLE_RX | - MR_NP_RX); - if (ap->rxconfig & ANEG_CFG_FD) - ap->flags |= MR_LP_ADV_FULL_DUPLEX; - if (ap->rxconfig & ANEG_CFG_HD) - ap->flags |= MR_LP_ADV_HALF_DUPLEX; - if (ap->rxconfig & ANEG_CFG_PS1) - ap->flags |= MR_LP_ADV_SYM_PAUSE; - if (ap->rxconfig & ANEG_CFG_PS2) - ap->flags |= MR_LP_ADV_ASYM_PAUSE; - if (ap->rxconfig & ANEG_CFG_RF1) - ap->flags |= MR_LP_ADV_REMOTE_FAULT1; - if (ap->rxconfig & ANEG_CFG_RF2) - ap->flags |= MR_LP_ADV_REMOTE_FAULT2; - if (ap->rxconfig & ANEG_CFG_NP) - ap->flags |= MR_LP_ADV_NEXT_PAGE; - - ap->link_time = ap->cur_time; - - ap->flags ^= (MR_TOGGLE_TX); - if (ap->rxconfig & 0x0008) - ap->flags |= MR_TOGGLE_RX; - if (ap->rxconfig & ANEG_CFG_NP) - ap->flags |= MR_NP_RX; - ap->flags |= MR_PAGE_RX; - - ap->state = ANEG_STATE_COMPLETE_ACK; - ret = ANEG_TIMER_ENAB; - break; - - case ANEG_STATE_COMPLETE_ACK: - if (ap->ability_match != 0 && - ap->rxconfig == 0) { - ap->state = ANEG_STATE_AN_ENABLE; - break; - } - delta = ap->cur_time - ap->link_time; - if (delta > ANEG_STATE_SETTLE_TIME) { - if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) { - ap->state = ANEG_STATE_IDLE_DETECT_INIT; - } else { - if ((ap->txconfig & ANEG_CFG_NP) == 0 && - !(ap->flags & MR_NP_RX)) { - ap->state = ANEG_STATE_IDLE_DETECT_INIT; - } else { - ret = ANEG_FAILED; - } - } - } - break; - - case ANEG_STATE_IDLE_DETECT_INIT: - ap->link_time = ap->cur_time; - tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; - tw32_f(MAC_MODE, tp->mac_mode); - udelay(40); - - ap->state = ANEG_STATE_IDLE_DETECT; - ret = ANEG_TIMER_ENAB; - break; - - case ANEG_STATE_IDLE_DETECT: - if (ap->ability_match != 0 && - ap->rxconfig == 0) { - ap->state = ANEG_STATE_AN_ENABLE; - break; - } - delta = ap->cur_time - ap->link_time; - if (delta > ANEG_STATE_SETTLE_TIME) { - /* XXX another gem from the Broadcom driver :( */ - ap->state = ANEG_STATE_LINK_OK; - } - break; - - case ANEG_STATE_LINK_OK: - ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK); - ret = ANEG_DONE; - break; - - case ANEG_STATE_NEXT_PAGE_WAIT_INIT: - /* ??? unimplemented */ - break; - - case ANEG_STATE_NEXT_PAGE_WAIT: - /* ??? unimplemented */ - break; - - default: - ret = ANEG_FAILED; - break; - } - - return ret; -} - -static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags) -{ - int res = 0; - struct tg3_fiber_aneginfo aninfo; - int status = ANEG_FAILED; - unsigned int tick; - u32 tmp; - - tw32_f(MAC_TX_AUTO_NEG, 0); - - tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; - tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII); - udelay(40); - - tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS); - udelay(40); - - memset(&aninfo, 0, sizeof(aninfo)); - aninfo.flags |= MR_AN_ENABLE; - aninfo.state = ANEG_STATE_UNKNOWN; - aninfo.cur_time = 0; - tick = 0; - while (++tick < 195000) { - status = tg3_fiber_aneg_smachine(tp, &aninfo); - if (status == ANEG_DONE || status == ANEG_FAILED) - break; - - udelay(1); - } - - tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; - tw32_f(MAC_MODE, tp->mac_mode); - udelay(40); - - *txflags = aninfo.txconfig; - *rxflags = aninfo.flags; - - if (status == ANEG_DONE && - (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK | - MR_LP_ADV_FULL_DUPLEX))) - res = 1; - - return res; -} - -static void tg3_init_bcm8002(struct tg3 *tp) -{ - u32 mac_status = tr32(MAC_STATUS); - int i; - - /* Reset when initting first time or we have a link. */ - if (tg3_flag(tp, INIT_COMPLETE) && - !(mac_status & MAC_STATUS_PCS_SYNCED)) - return; - - /* Set PLL lock range. */ - tg3_writephy(tp, 0x16, 0x8007); - - /* SW reset */ - tg3_writephy(tp, MII_BMCR, BMCR_RESET); - - /* Wait for reset to complete. */ - /* XXX schedule_timeout() ... */ - for (i = 0; i < 500; i++) - udelay(10); - - /* Config mode; select PMA/Ch 1 regs. */ - tg3_writephy(tp, 0x10, 0x8411); - - /* Enable auto-lock and comdet, select txclk for tx. */ - tg3_writephy(tp, 0x11, 0x0a10); - - tg3_writephy(tp, 0x18, 0x00a0); - tg3_writephy(tp, 0x16, 0x41ff); - - /* Assert and deassert POR. */ - tg3_writephy(tp, 0x13, 0x0400); - udelay(40); - tg3_writephy(tp, 0x13, 0x0000); - - tg3_writephy(tp, 0x11, 0x0a50); - udelay(40); - tg3_writephy(tp, 0x11, 0x0a10); - - /* Wait for signal to stabilize */ - /* XXX schedule_timeout() ... */ - for (i = 0; i < 15000; i++) - udelay(10); - - /* Deselect the channel register so we can read the PHYID - * later. - */ - tg3_writephy(tp, 0x10, 0x8011); -} - -static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) -{ - u16 flowctrl; - u32 sg_dig_ctrl, sg_dig_status; - u32 serdes_cfg, expected_sg_dig_ctrl; - int workaround, port_a; - int current_link_up; - - serdes_cfg = 0; - expected_sg_dig_ctrl = 0; - workaround = 0; - port_a = 1; - current_link_up = 0; - - if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 && - tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) { - workaround = 1; - if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) - port_a = 0; - - /* preserve bits 0-11,13,14 for signal pre-emphasis */ - /* preserve bits 20-23 for voltage regulator */ - serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff; - } - - sg_dig_ctrl = tr32(SG_DIG_CTRL); - - if (tp->link_config.autoneg != AUTONEG_ENABLE) { - if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) { - if (workaround) { - u32 val = serdes_cfg; - - if (port_a) - val |= 0xc010000; - else - val |= 0x4010000; - tw32_f(MAC_SERDES_CFG, val); - } - - tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); - } - if (mac_status & MAC_STATUS_PCS_SYNCED) { - tg3_setup_flow_control(tp, 0, 0); - current_link_up = 1; - } - goto out; - } - - /* Want auto-negotiation. */ - expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP; - - flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); - if (flowctrl & ADVERTISE_1000XPAUSE) - expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP; - if (flowctrl & ADVERTISE_1000XPSE_ASYM) - expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE; - - if (sg_dig_ctrl != expected_sg_dig_ctrl) { - if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) && - tp->serdes_counter && - ((mac_status & (MAC_STATUS_PCS_SYNCED | - MAC_STATUS_RCVD_CFG)) == - MAC_STATUS_PCS_SYNCED)) { - tp->serdes_counter--; - current_link_up = 1; - goto out; - } -restart_autoneg: - if (workaround) - tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000); - tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET); - udelay(5); - tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl); - - tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; - tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; - } else if (mac_status & (MAC_STATUS_PCS_SYNCED | - MAC_STATUS_SIGNAL_DET)) { - sg_dig_status = tr32(SG_DIG_STATUS); - mac_status = tr32(MAC_STATUS); - - if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) && - (mac_status & MAC_STATUS_PCS_SYNCED)) { - u32 local_adv = 0, remote_adv = 0; - - if (sg_dig_ctrl & SG_DIG_PAUSE_CAP) - local_adv |= ADVERTISE_1000XPAUSE; - if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE) - local_adv |= ADVERTISE_1000XPSE_ASYM; - - if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE) - remote_adv |= LPA_1000XPAUSE; - if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE) - remote_adv |= LPA_1000XPAUSE_ASYM; - - tg3_setup_flow_control(tp, local_adv, remote_adv); - current_link_up = 1; - tp->serdes_counter = 0; - tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; - } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) { - if (tp->serdes_counter) - tp->serdes_counter--; - else { - if (workaround) { - u32 val = serdes_cfg; - - if (port_a) - val |= 0xc010000; - else - val |= 0x4010000; - - tw32_f(MAC_SERDES_CFG, val); - } - - tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); - udelay(40); - - /* Link parallel detection - link is up */ - /* only if we have PCS_SYNC and not */ - /* receiving config code words */ - mac_status = tr32(MAC_STATUS); - if ((mac_status & MAC_STATUS_PCS_SYNCED) && - !(mac_status & MAC_STATUS_RCVD_CFG)) { - tg3_setup_flow_control(tp, 0, 0); - current_link_up = 1; - tp->phy_flags |= - TG3_PHYFLG_PARALLEL_DETECT; - tp->serdes_counter = - SERDES_PARALLEL_DET_TIMEOUT; - } else - goto restart_autoneg; - } - } - } else { - tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; - tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; - } - -out: - return current_link_up; -} - -static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) -{ - int current_link_up = 0; - - if (!(mac_status & MAC_STATUS_PCS_SYNCED)) - goto out; - - if (tp->link_config.autoneg == AUTONEG_ENABLE) { - u32 txflags, rxflags; - int i; - - if (fiber_autoneg(tp, &txflags, &rxflags)) { - u32 local_adv = 0, remote_adv = 0; - - if (txflags & ANEG_CFG_PS1) - local_adv |= ADVERTISE_1000XPAUSE; - if (txflags & ANEG_CFG_PS2) - local_adv |= ADVERTISE_1000XPSE_ASYM; - - if (rxflags & MR_LP_ADV_SYM_PAUSE) - remote_adv |= LPA_1000XPAUSE; - if (rxflags & MR_LP_ADV_ASYM_PAUSE) - remote_adv |= LPA_1000XPAUSE_ASYM; - - tg3_setup_flow_control(tp, local_adv, remote_adv); - - current_link_up = 1; - } - for (i = 0; i < 30; i++) { - udelay(20); - tw32_f(MAC_STATUS, - (MAC_STATUS_SYNC_CHANGED | - MAC_STATUS_CFG_CHANGED)); - udelay(40); - if ((tr32(MAC_STATUS) & - (MAC_STATUS_SYNC_CHANGED | - MAC_STATUS_CFG_CHANGED)) == 0) - break; - } - - mac_status = tr32(MAC_STATUS); - if (current_link_up == 0 && - (mac_status & MAC_STATUS_PCS_SYNCED) && - !(mac_status & MAC_STATUS_RCVD_CFG)) - current_link_up = 1; - } else { - tg3_setup_flow_control(tp, 0, 0); - - /* Forcing 1000FD link up. */ - current_link_up = 1; - - tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); - udelay(40); - - tw32_f(MAC_MODE, tp->mac_mode); - udelay(40); - } - -out: - return current_link_up; -} - -static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset) -{ - u32 orig_pause_cfg; - u16 orig_active_speed; - u8 orig_active_duplex; - u32 mac_status; - int current_link_up; - int i; - - orig_pause_cfg = tp->link_config.active_flowctrl; - orig_active_speed = tp->link_config.active_speed; - orig_active_duplex = tp->link_config.active_duplex; - - if (!tg3_flag(tp, HW_AUTONEG) && - netif_carrier_ok(tp->dev) && - tg3_flag(tp, INIT_COMPLETE)) { - mac_status = tr32(MAC_STATUS); - mac_status &= (MAC_STATUS_PCS_SYNCED | - MAC_STATUS_SIGNAL_DET | - MAC_STATUS_CFG_CHANGED | - MAC_STATUS_RCVD_CFG); - if (mac_status == (MAC_STATUS_PCS_SYNCED | - MAC_STATUS_SIGNAL_DET)) { - tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | - MAC_STATUS_CFG_CHANGED)); - return 0; - } - } - - tw32_f(MAC_TX_AUTO_NEG, 0); - - tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); - tp->mac_mode |= MAC_MODE_PORT_MODE_TBI; - tw32_f(MAC_MODE, tp->mac_mode); - udelay(40); - - if (tp->phy_id == TG3_PHY_ID_BCM8002) - tg3_init_bcm8002(tp); - - /* Enable link change event even when serdes polling. */ - tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); - udelay(40); - - current_link_up = 0; - mac_status = tr32(MAC_STATUS); - - if (tg3_flag(tp, HW_AUTONEG)) - current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status); - else - current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); - - tp->napi[0].hw_status->status = - (SD_STATUS_UPDATED | - (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG)); - - for (i = 0; i < 100; i++) { - tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | - MAC_STATUS_CFG_CHANGED)); - udelay(5); - if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED | - MAC_STATUS_CFG_CHANGED | - MAC_STATUS_LNKSTATE_CHANGED)) == 0) - break; - } - - mac_status = tr32(MAC_STATUS); - if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) { - current_link_up = 0; - if (tp->link_config.autoneg == AUTONEG_ENABLE && - tp->serdes_counter == 0) { - tw32_f(MAC_MODE, (tp->mac_mode | - MAC_MODE_SEND_CONFIGS)); - udelay(1); - tw32_f(MAC_MODE, tp->mac_mode); - } - } - - if (current_link_up == 1) { - tp->link_config.active_speed = SPEED_1000; - tp->link_config.active_duplex = DUPLEX_FULL; - tw32(MAC_LED_CTRL, (tp->led_ctrl | - LED_CTRL_LNKLED_OVERRIDE | - LED_CTRL_1000MBPS_ON)); - } else { - tp->link_config.active_speed = SPEED_INVALID; - tp->link_config.active_duplex = DUPLEX_INVALID; - tw32(MAC_LED_CTRL, (tp->led_ctrl | - LED_CTRL_LNKLED_OVERRIDE | - LED_CTRL_TRAFFIC_OVERRIDE)); - } - - if (current_link_up != netif_carrier_ok(tp->dev)) { - if (current_link_up) - netif_carrier_on(tp->dev); - else - netif_carrier_off(tp->dev); - tg3_link_report(tp); - } else { - u32 now_pause_cfg = tp->link_config.active_flowctrl; - if (orig_pause_cfg != now_pause_cfg || - orig_active_speed != tp->link_config.active_speed || - orig_active_duplex != tp->link_config.active_duplex) - tg3_link_report(tp); - } - - return 0; -} - -static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) -{ - int current_link_up, err = 0; - u32 bmsr, bmcr; - u16 current_speed; - u8 current_duplex; - u32 local_adv, remote_adv; - - tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; - tw32_f(MAC_MODE, tp->mac_mode); - udelay(40); - - tw32(MAC_EVENT, 0); - - tw32_f(MAC_STATUS, - (MAC_STATUS_SYNC_CHANGED | - MAC_STATUS_CFG_CHANGED | - MAC_STATUS_MI_COMPLETION | - MAC_STATUS_LNKSTATE_CHANGED)); - udelay(40); - - if (force_reset) - tg3_phy_reset(tp); - - current_link_up = 0; - current_speed = SPEED_INVALID; - current_duplex = DUPLEX_INVALID; - - err |= tg3_readphy(tp, MII_BMSR, &bmsr); - err |= tg3_readphy(tp, MII_BMSR, &bmsr); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { - if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) - bmsr |= BMSR_LSTATUS; - else - bmsr &= ~BMSR_LSTATUS; - } - - err |= tg3_readphy(tp, MII_BMCR, &bmcr); - - if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && - (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { - /* do nothing, just check for link up at the end */ - } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { - u32 adv, new_adv; - - err |= tg3_readphy(tp, MII_ADVERTISE, &adv); - new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | - ADVERTISE_1000XPAUSE | - ADVERTISE_1000XPSE_ASYM | - ADVERTISE_SLCT); - - new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); - - if (tp->link_config.advertising & ADVERTISED_1000baseT_Half) - new_adv |= ADVERTISE_1000XHALF; - if (tp->link_config.advertising & ADVERTISED_1000baseT_Full) - new_adv |= ADVERTISE_1000XFULL; - - if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) { - tg3_writephy(tp, MII_ADVERTISE, new_adv); - bmcr |= BMCR_ANENABLE | BMCR_ANRESTART; - tg3_writephy(tp, MII_BMCR, bmcr); - - tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); - tp->serdes_counter = SERDES_AN_TIMEOUT_5714S; - tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; - - return err; - } - } else { - u32 new_bmcr; - - bmcr &= ~BMCR_SPEED1000; - new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX); - - if (tp->link_config.duplex == DUPLEX_FULL) - new_bmcr |= BMCR_FULLDPLX; - - if (new_bmcr != bmcr) { - /* BMCR_SPEED1000 is a reserved bit that needs - * to be set on write. - */ - new_bmcr |= BMCR_SPEED1000; - - /* Force a linkdown */ - if (netif_carrier_ok(tp->dev)) { - u32 adv; - - err |= tg3_readphy(tp, MII_ADVERTISE, &adv); - adv &= ~(ADVERTISE_1000XFULL | - ADVERTISE_1000XHALF | - ADVERTISE_SLCT); - tg3_writephy(tp, MII_ADVERTISE, adv); - tg3_writephy(tp, MII_BMCR, bmcr | - BMCR_ANRESTART | - BMCR_ANENABLE); - udelay(10); - netif_carrier_off(tp->dev); - } - tg3_writephy(tp, MII_BMCR, new_bmcr); - bmcr = new_bmcr; - err |= tg3_readphy(tp, MII_BMSR, &bmsr); - err |= tg3_readphy(tp, MII_BMSR, &bmsr); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == - ASIC_REV_5714) { - if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) - bmsr |= BMSR_LSTATUS; - else - bmsr &= ~BMSR_LSTATUS; - } - tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; - } - } - - if (bmsr & BMSR_LSTATUS) { - current_speed = SPEED_1000; - current_link_up = 1; - if (bmcr & BMCR_FULLDPLX) - current_duplex = DUPLEX_FULL; - else - current_duplex = DUPLEX_HALF; - - local_adv = 0; - remote_adv = 0; - - if (bmcr & BMCR_ANENABLE) { - u32 common; - - err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv); - err |= tg3_readphy(tp, MII_LPA, &remote_adv); - common = local_adv & remote_adv; - if (common & (ADVERTISE_1000XHALF | - ADVERTISE_1000XFULL)) { - if (common & ADVERTISE_1000XFULL) - current_duplex = DUPLEX_FULL; - else - current_duplex = DUPLEX_HALF; - } else if (!tg3_flag(tp, 5780_CLASS)) { - /* Link is up via parallel detect */ - } else { - current_link_up = 0; - } - } - } - - if (current_link_up == 1 && current_duplex == DUPLEX_FULL) - tg3_setup_flow_control(tp, local_adv, remote_adv); - - tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; - if (tp->link_config.active_duplex == DUPLEX_HALF) - tp->mac_mode |= MAC_MODE_HALF_DUPLEX; - - tw32_f(MAC_MODE, tp->mac_mode); - udelay(40); - - tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); - - tp->link_config.active_speed = current_speed; - tp->link_config.active_duplex = current_duplex; - - if (current_link_up != netif_carrier_ok(tp->dev)) { - if (current_link_up) - netif_carrier_on(tp->dev); - else { - netif_carrier_off(tp->dev); - tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; - } - tg3_link_report(tp); - } - return err; -} - -static void tg3_serdes_parallel_detect(struct tg3 *tp) -{ - if (tp->serdes_counter) { - /* Give autoneg time to complete. */ - tp->serdes_counter--; - return; - } - - if (!netif_carrier_ok(tp->dev) && - (tp->link_config.autoneg == AUTONEG_ENABLE)) { - u32 bmcr; - - tg3_readphy(tp, MII_BMCR, &bmcr); - if (bmcr & BMCR_ANENABLE) { - u32 phy1, phy2; - - /* Select shadow register 0x1f */ - tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00); - tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1); - - /* Select expansion interrupt status register */ - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, - MII_TG3_DSP_EXP1_INT_STAT); - tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); - tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); - - if ((phy1 & 0x10) && !(phy2 & 0x20)) { - /* We have signal detect and not receiving - * config code words, link is up by parallel - * detection. - */ - - bmcr &= ~BMCR_ANENABLE; - bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; - tg3_writephy(tp, MII_BMCR, bmcr); - tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT; - } - } - } else if (netif_carrier_ok(tp->dev) && - (tp->link_config.autoneg == AUTONEG_ENABLE) && - (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { - u32 phy2; - - /* Select expansion interrupt status register */ - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, - MII_TG3_DSP_EXP1_INT_STAT); - tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); - if (phy2 & 0x20) { - u32 bmcr; - - /* Config code words received, turn on autoneg. */ - tg3_readphy(tp, MII_BMCR, &bmcr); - tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE); - - tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; - - } - } -} - -static int tg3_setup_phy(struct tg3 *tp, int force_reset) -{ - u32 val; - int err; - - if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) - err = tg3_setup_fiber_phy(tp, force_reset); - else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) - err = tg3_setup_fiber_mii_phy(tp, force_reset); - else - err = tg3_setup_copper_phy(tp, force_reset); - - if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) { - u32 scale; - - val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; - if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5) - scale = 65; - else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25) - scale = 6; - else - scale = 12; - - val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK; - val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT); - tw32(GRC_MISC_CFG, val); - } - - val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | - (6 << TX_LENGTHS_IPG_SHIFT); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) - val |= tr32(MAC_TX_LENGTHS) & - (TX_LENGTHS_JMB_FRM_LEN_MSK | - TX_LENGTHS_CNT_DWN_VAL_MSK); - - if (tp->link_config.active_speed == SPEED_1000 && - tp->link_config.active_duplex == DUPLEX_HALF) - tw32(MAC_TX_LENGTHS, val | - (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)); - else - tw32(MAC_TX_LENGTHS, val | - (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); - - if (!tg3_flag(tp, 5705_PLUS)) { - if (netif_carrier_ok(tp->dev)) { - tw32(HOSTCC_STAT_COAL_TICKS, - tp->coal.stats_block_coalesce_usecs); - } else { - tw32(HOSTCC_STAT_COAL_TICKS, 0); - } - } - - if (tg3_flag(tp, ASPM_WORKAROUND)) { - val = tr32(PCIE_PWR_MGMT_THRESH); - if (!netif_carrier_ok(tp->dev)) - val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | - tp->pwrmgmt_thresh; - else - val |= PCIE_PWR_MGMT_L1_THRESH_MSK; - tw32(PCIE_PWR_MGMT_THRESH, val); - } - - return err; -} - -static inline int tg3_irq_sync(struct tg3 *tp) -{ - return tp->irq_sync; -} - -static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len) -{ - int i; - - dst = (u32 *)((u8 *)dst + off); - for (i = 0; i < len; i += sizeof(u32)) - *dst++ = tr32(off + i); -} - -static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs) -{ - tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0); - tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200); - tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0); - tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0); - tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04); - tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80); - tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48); - tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04); - tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20); - tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c); - tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c); - tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c); - tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44); - tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04); - tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20); - tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14); - tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08); - tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08); - tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100); - - if (tg3_flag(tp, SUPPORT_MSIX)) - tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180); - - tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10); - tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58); - tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08); - tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08); - tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04); - tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04); - tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04); - tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04); - - if (!tg3_flag(tp, 5705_PLUS)) { - tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04); - tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04); - tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04); - } - - tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110); - tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120); - tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c); - tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04); - tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c); - - if (tg3_flag(tp, NVRAM)) - tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24); -} - -static void tg3_dump_state(struct tg3 *tp) -{ - int i; - u32 *regs; - - regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC); - if (!regs) { - netdev_err(tp->dev, "Failed allocating register dump buffer\n"); - return; - } - - if (tg3_flag(tp, PCI_EXPRESS)) { - /* Read up to but not including private PCI registers */ - for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32)) - regs[i / sizeof(u32)] = tr32(i); - } else - tg3_dump_legacy_regs(tp, regs); - - for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) { - if (!regs[i + 0] && !regs[i + 1] && - !regs[i + 2] && !regs[i + 3]) - continue; - - netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", - i * 4, - regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]); - } - - kfree(regs); - - for (i = 0; i < tp->irq_cnt; i++) { - struct tg3_napi *tnapi = &tp->napi[i]; - - /* SW status block */ - netdev_err(tp->dev, - "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", - i, - tnapi->hw_status->status, - tnapi->hw_status->status_tag, - tnapi->hw_status->rx_jumbo_consumer, - tnapi->hw_status->rx_consumer, - tnapi->hw_status->rx_mini_consumer, - tnapi->hw_status->idx[0].rx_producer, - tnapi->hw_status->idx[0].tx_consumer); - - netdev_err(tp->dev, - "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n", - i, - tnapi->last_tag, tnapi->last_irq_tag, - tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending, - tnapi->rx_rcb_ptr, - tnapi->prodring.rx_std_prod_idx, - tnapi->prodring.rx_std_cons_idx, - tnapi->prodring.rx_jmb_prod_idx, - tnapi->prodring.rx_jmb_cons_idx); - } -} - -/* This is called whenever we suspect that the system chipset is re- - * ordering the sequence of MMIO to the tx send mailbox. The symptom - * is bogus tx completions. We try to recover by setting the - * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later - * in the workqueue. - */ -static void tg3_tx_recover(struct tg3 *tp) -{ - BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) || - tp->write32_tx_mbox == tg3_write_indirect_mbox); - - netdev_warn(tp->dev, - "The system may be re-ordering memory-mapped I/O " - "cycles to the network device, attempting to recover. " - "Please report the problem to the driver maintainer " - "and include system chipset information.\n"); - - spin_lock(&tp->lock); - tg3_flag_set(tp, TX_RECOVERY_PENDING); - spin_unlock(&tp->lock); -} - -static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) -{ - /* Tell compiler to fetch tx indices from memory. */ - barrier(); - return tnapi->tx_pending - - ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1)); -} - -/* Tigon3 never reports partial packet sends. So we do not - * need special logic to handle SKBs that have not had all - * of their frags sent yet, like SunGEM does. - */ -static void tg3_tx(struct tg3_napi *tnapi) -{ - struct tg3 *tp = tnapi->tp; - u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer; - u32 sw_idx = tnapi->tx_cons; - struct netdev_queue *txq; - int index = tnapi - tp->napi; - - if (tg3_flag(tp, ENABLE_TSS)) - index--; - - txq = netdev_get_tx_queue(tp->dev, index); - - while (sw_idx != hw_idx) { - struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx]; - struct sk_buff *skb = ri->skb; - int i, tx_bug = 0; - - if (unlikely(skb == NULL)) { - tg3_tx_recover(tp); - return; - } - - pci_unmap_single(tp->pdev, - dma_unmap_addr(ri, mapping), - skb_headlen(skb), - PCI_DMA_TODEVICE); - - ri->skb = NULL; - - while (ri->fragmented) { - ri->fragmented = false; - sw_idx = NEXT_TX(sw_idx); - ri = &tnapi->tx_buffers[sw_idx]; - } - - sw_idx = NEXT_TX(sw_idx); - - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - ri = &tnapi->tx_buffers[sw_idx]; - if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) - tx_bug = 1; - - pci_unmap_page(tp->pdev, - dma_unmap_addr(ri, mapping), - skb_shinfo(skb)->frags[i].size, - PCI_DMA_TODEVICE); - - while (ri->fragmented) { - ri->fragmented = false; - sw_idx = NEXT_TX(sw_idx); - ri = &tnapi->tx_buffers[sw_idx]; - } - - sw_idx = NEXT_TX(sw_idx); - } - - dev_kfree_skb(skb); - - if (unlikely(tx_bug)) { - tg3_tx_recover(tp); - return; - } - } - - tnapi->tx_cons = sw_idx; - - /* Need to make the tx_cons update visible to tg3_start_xmit() - * before checking for netif_queue_stopped(). Without the - * memory barrier, there is a small possibility that tg3_start_xmit() - * will miss it and cause the queue to be stopped forever. - */ - smp_mb(); - - if (unlikely(netif_tx_queue_stopped(txq) && - (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) { - __netif_tx_lock(txq, smp_processor_id()); - if (netif_tx_queue_stopped(txq) && - (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))) - netif_tx_wake_queue(txq); - __netif_tx_unlock(txq); - } -} - -static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) -{ - if (!ri->skb) - return; - - pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping), - map_sz, PCI_DMA_FROMDEVICE); - dev_kfree_skb_any(ri->skb); - ri->skb = NULL; -} - -/* Returns size of skb allocated or < 0 on error. - * - * We only need to fill in the address because the other members - * of the RX descriptor are invariant, see tg3_init_rings. - * - * Note the purposeful assymetry of cpu vs. chip accesses. For - * posting buffers we only dirty the first cache line of the RX - * descriptor (containing the address). Whereas for the RX status - * buffers the cpu only reads the last cacheline of the RX descriptor - * (to fetch the error flags, vlan tag, checksum, and opaque cookie). - */ -static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, - u32 opaque_key, u32 dest_idx_unmasked) -{ - struct tg3_rx_buffer_desc *desc; - struct ring_info *map; - struct sk_buff *skb; - dma_addr_t mapping; - int skb_size, dest_idx; - - switch (opaque_key) { - case RXD_OPAQUE_RING_STD: - dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; - desc = &tpr->rx_std[dest_idx]; - map = &tpr->rx_std_buffers[dest_idx]; - skb_size = tp->rx_pkt_map_sz; - break; - - case RXD_OPAQUE_RING_JUMBO: - dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; - desc = &tpr->rx_jmb[dest_idx].std; - map = &tpr->rx_jmb_buffers[dest_idx]; - skb_size = TG3_RX_JMB_MAP_SZ; - break; - - default: - return -EINVAL; - } - - /* Do not overwrite any of the map or rp information - * until we are sure we can commit to a new buffer. - * - * Callers depend upon this behavior and assume that - * we leave everything unchanged if we fail. - */ - skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset); - if (skb == NULL) - return -ENOMEM; - - skb_reserve(skb, tp->rx_offset); - - mapping = pci_map_single(tp->pdev, skb->data, skb_size, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(tp->pdev, mapping)) { - dev_kfree_skb(skb); - return -EIO; - } - - map->skb = skb; - dma_unmap_addr_set(map, mapping, mapping); - - desc->addr_hi = ((u64)mapping >> 32); - desc->addr_lo = ((u64)mapping & 0xffffffff); - - return skb_size; -} - -/* We only need to move over in the address because the other - * members of the RX descriptor are invariant. See notes above - * tg3_alloc_rx_skb for full details. - */ -static void tg3_recycle_rx(struct tg3_napi *tnapi, - struct tg3_rx_prodring_set *dpr, - u32 opaque_key, int src_idx, - u32 dest_idx_unmasked) -{ - struct tg3 *tp = tnapi->tp; - struct tg3_rx_buffer_desc *src_desc, *dest_desc; - struct ring_info *src_map, *dest_map; - struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring; - int dest_idx; - - switch (opaque_key) { - case RXD_OPAQUE_RING_STD: - dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; - dest_desc = &dpr->rx_std[dest_idx]; - dest_map = &dpr->rx_std_buffers[dest_idx]; - src_desc = &spr->rx_std[src_idx]; - src_map = &spr->rx_std_buffers[src_idx]; - break; - - case RXD_OPAQUE_RING_JUMBO: - dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; - dest_desc = &dpr->rx_jmb[dest_idx].std; - dest_map = &dpr->rx_jmb_buffers[dest_idx]; - src_desc = &spr->rx_jmb[src_idx].std; - src_map = &spr->rx_jmb_buffers[src_idx]; - break; - - default: - return; - } - - dest_map->skb = src_map->skb; - dma_unmap_addr_set(dest_map, mapping, - dma_unmap_addr(src_map, mapping)); - dest_desc->addr_hi = src_desc->addr_hi; - dest_desc->addr_lo = src_desc->addr_lo; - - /* Ensure that the update to the skb happens after the physical - * addresses have been transferred to the new BD location. - */ - smp_wmb(); - - src_map->skb = NULL; -} - -/* The RX ring scheme is composed of multiple rings which post fresh - * buffers to the chip, and one special ring the chip uses to report - * status back to the host. - * - * The special ring reports the status of received packets to the - * host. The chip does not write into the original descriptor the - * RX buffer was obtained from. The chip simply takes the original - * descriptor as provided by the host, updates the status and length - * field, then writes this into the next status ring entry. - * - * Each ring the host uses to post buffers to the chip is described - * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives, - * it is first placed into the on-chip ram. When the packet's length - * is known, it walks down the TG3_BDINFO entries to select the ring. - * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO - * which is within the range of the new packet's length is chosen. - * - * The "separate ring for rx status" scheme may sound queer, but it makes - * sense from a cache coherency perspective. If only the host writes - * to the buffer post rings, and only the chip writes to the rx status - * rings, then cache lines never move beyond shared-modified state. - * If both the host and chip were to write into the same ring, cache line - * eviction could occur since both entities want it in an exclusive state. - */ -static int tg3_rx(struct tg3_napi *tnapi, int budget) -{ - struct tg3 *tp = tnapi->tp; - u32 work_mask, rx_std_posted = 0; - u32 std_prod_idx, jmb_prod_idx; - u32 sw_idx = tnapi->rx_rcb_ptr; - u16 hw_idx; - int received; - struct tg3_rx_prodring_set *tpr = &tnapi->prodring; - - hw_idx = *(tnapi->rx_rcb_prod_idx); - /* - * We need to order the read of hw_idx and the read of - * the opaque cookie. - */ - rmb(); - work_mask = 0; - received = 0; - std_prod_idx = tpr->rx_std_prod_idx; - jmb_prod_idx = tpr->rx_jmb_prod_idx; - while (sw_idx != hw_idx && budget > 0) { - struct ring_info *ri; - struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; - unsigned int len; - struct sk_buff *skb; - dma_addr_t dma_addr; - u32 opaque_key, desc_idx, *post_ptr; - - desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; - opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; - if (opaque_key == RXD_OPAQUE_RING_STD) { - ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx]; - dma_addr = dma_unmap_addr(ri, mapping); - skb = ri->skb; - post_ptr = &std_prod_idx; - rx_std_posted++; - } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { - ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx]; - dma_addr = dma_unmap_addr(ri, mapping); - skb = ri->skb; - post_ptr = &jmb_prod_idx; - } else - goto next_pkt_nopost; - - work_mask |= opaque_key; - - if ((desc->err_vlan & RXD_ERR_MASK) != 0 && - (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { - drop_it: - tg3_recycle_rx(tnapi, tpr, opaque_key, - desc_idx, *post_ptr); - drop_it_no_recycle: - /* Other statistics kept track of by card. */ - tp->rx_dropped++; - goto next_pkt; - } - - len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - - ETH_FCS_LEN; - - if (len > TG3_RX_COPY_THRESH(tp)) { - int skb_size; - - skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key, - *post_ptr); - if (skb_size < 0) - goto drop_it; - - pci_unmap_single(tp->pdev, dma_addr, skb_size, - PCI_DMA_FROMDEVICE); - - /* Ensure that the update to the skb happens - * after the usage of the old DMA mapping. - */ - smp_wmb(); - - ri->skb = NULL; - - skb_put(skb, len); - } else { - struct sk_buff *copy_skb; - - tg3_recycle_rx(tnapi, tpr, opaque_key, - desc_idx, *post_ptr); - - copy_skb = netdev_alloc_skb(tp->dev, len + - TG3_RAW_IP_ALIGN); - if (copy_skb == NULL) - goto drop_it_no_recycle; - - skb_reserve(copy_skb, TG3_RAW_IP_ALIGN); - skb_put(copy_skb, len); - pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); - skb_copy_from_linear_data(skb, copy_skb->data, len); - pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); - - /* We'll reuse the original ring buffer. */ - skb = copy_skb; - } - - if ((tp->dev->features & NETIF_F_RXCSUM) && - (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && - (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) - >> RXD_TCPCSUM_SHIFT) == 0xffff)) - skb->ip_summed = CHECKSUM_UNNECESSARY; - else - skb_checksum_none_assert(skb); - - skb->protocol = eth_type_trans(skb, tp->dev); - - if (len > (tp->dev->mtu + ETH_HLEN) && - skb->protocol != htons(ETH_P_8021Q)) { - dev_kfree_skb(skb); - goto drop_it_no_recycle; - } - - if (desc->type_flags & RXD_FLAG_VLAN && - !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) - __vlan_hwaccel_put_tag(skb, - desc->err_vlan & RXD_VLAN_MASK); - - napi_gro_receive(&tnapi->napi, skb); - - received++; - budget--; - -next_pkt: - (*post_ptr)++; - - if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { - tpr->rx_std_prod_idx = std_prod_idx & - tp->rx_std_ring_mask; - tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, - tpr->rx_std_prod_idx); - work_mask &= ~RXD_OPAQUE_RING_STD; - rx_std_posted = 0; - } -next_pkt_nopost: - sw_idx++; - sw_idx &= tp->rx_ret_ring_mask; - - /* Refresh hw_idx to see if there is new work */ - if (sw_idx == hw_idx) { - hw_idx = *(tnapi->rx_rcb_prod_idx); - rmb(); - } - } - - /* ACK the status ring. */ - tnapi->rx_rcb_ptr = sw_idx; - tw32_rx_mbox(tnapi->consmbox, sw_idx); - - /* Refill RX ring(s). */ - if (!tg3_flag(tp, ENABLE_RSS)) { - if (work_mask & RXD_OPAQUE_RING_STD) { - tpr->rx_std_prod_idx = std_prod_idx & - tp->rx_std_ring_mask; - tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, - tpr->rx_std_prod_idx); - } - if (work_mask & RXD_OPAQUE_RING_JUMBO) { - tpr->rx_jmb_prod_idx = jmb_prod_idx & - tp->rx_jmb_ring_mask; - tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, - tpr->rx_jmb_prod_idx); - } - mmiowb(); - } else if (work_mask) { - /* rx_std_buffers[] and rx_jmb_buffers[] entries must be - * updated before the producer indices can be updated. - */ - smp_wmb(); - - tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask; - tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask; - - if (tnapi != &tp->napi[1]) - napi_schedule(&tp->napi[1].napi); - } - - return received; -} - -static void tg3_poll_link(struct tg3 *tp) -{ - /* handle link change and other phy events */ - if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { - struct tg3_hw_status *sblk = tp->napi[0].hw_status; - - if (sblk->status & SD_STATUS_LINK_CHG) { - sblk->status = SD_STATUS_UPDATED | - (sblk->status & ~SD_STATUS_LINK_CHG); - spin_lock(&tp->lock); - if (tg3_flag(tp, USE_PHYLIB)) { - tw32_f(MAC_STATUS, - (MAC_STATUS_SYNC_CHANGED | - MAC_STATUS_CFG_CHANGED | - MAC_STATUS_MI_COMPLETION | - MAC_STATUS_LNKSTATE_CHANGED)); - udelay(40); - } else - tg3_setup_phy(tp, 0); - spin_unlock(&tp->lock); - } - } -} - -static int tg3_rx_prodring_xfer(struct tg3 *tp, - struct tg3_rx_prodring_set *dpr, - struct tg3_rx_prodring_set *spr) -{ - u32 si, di, cpycnt, src_prod_idx; - int i, err = 0; - - while (1) { - src_prod_idx = spr->rx_std_prod_idx; - - /* Make sure updates to the rx_std_buffers[] entries and the - * standard producer index are seen in the correct order. - */ - smp_rmb(); - - if (spr->rx_std_cons_idx == src_prod_idx) - break; - - if (spr->rx_std_cons_idx < src_prod_idx) - cpycnt = src_prod_idx - spr->rx_std_cons_idx; - else - cpycnt = tp->rx_std_ring_mask + 1 - - spr->rx_std_cons_idx; - - cpycnt = min(cpycnt, - tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx); - - si = spr->rx_std_cons_idx; - di = dpr->rx_std_prod_idx; - - for (i = di; i < di + cpycnt; i++) { - if (dpr->rx_std_buffers[i].skb) { - cpycnt = i - di; - err = -ENOSPC; - break; - } - } - - if (!cpycnt) - break; - - /* Ensure that updates to the rx_std_buffers ring and the - * shadowed hardware producer ring from tg3_recycle_skb() are - * ordered correctly WRT the skb check above. - */ - smp_rmb(); - - memcpy(&dpr->rx_std_buffers[di], - &spr->rx_std_buffers[si], - cpycnt * sizeof(struct ring_info)); - - for (i = 0; i < cpycnt; i++, di++, si++) { - struct tg3_rx_buffer_desc *sbd, *dbd; - sbd = &spr->rx_std[si]; - dbd = &dpr->rx_std[di]; - dbd->addr_hi = sbd->addr_hi; - dbd->addr_lo = sbd->addr_lo; - } - - spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) & - tp->rx_std_ring_mask; - dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) & - tp->rx_std_ring_mask; - } - - while (1) { - src_prod_idx = spr->rx_jmb_prod_idx; - - /* Make sure updates to the rx_jmb_buffers[] entries and - * the jumbo producer index are seen in the correct order. - */ - smp_rmb(); - - if (spr->rx_jmb_cons_idx == src_prod_idx) - break; - - if (spr->rx_jmb_cons_idx < src_prod_idx) - cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; - else - cpycnt = tp->rx_jmb_ring_mask + 1 - - spr->rx_jmb_cons_idx; - - cpycnt = min(cpycnt, - tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx); - - si = spr->rx_jmb_cons_idx; - di = dpr->rx_jmb_prod_idx; - - for (i = di; i < di + cpycnt; i++) { - if (dpr->rx_jmb_buffers[i].skb) { - cpycnt = i - di; - err = -ENOSPC; - break; - } - } - - if (!cpycnt) - break; - - /* Ensure that updates to the rx_jmb_buffers ring and the - * shadowed hardware producer ring from tg3_recycle_skb() are - * ordered correctly WRT the skb check above. - */ - smp_rmb(); - - memcpy(&dpr->rx_jmb_buffers[di], - &spr->rx_jmb_buffers[si], - cpycnt * sizeof(struct ring_info)); - - for (i = 0; i < cpycnt; i++, di++, si++) { - struct tg3_rx_buffer_desc *sbd, *dbd; - sbd = &spr->rx_jmb[si].std; - dbd = &dpr->rx_jmb[di].std; - dbd->addr_hi = sbd->addr_hi; - dbd->addr_lo = sbd->addr_lo; - } - - spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) & - tp->rx_jmb_ring_mask; - dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) & - tp->rx_jmb_ring_mask; - } - - return err; -} - -static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) -{ - struct tg3 *tp = tnapi->tp; - - /* run TX completion thread */ - if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { - tg3_tx(tnapi); - if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) - return work_done; - } - - /* run RX thread, within the bounds set by NAPI. - * All RX "locking" is done by ensuring outside - * code synchronizes with tg3->napi.poll() - */ - if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) - work_done += tg3_rx(tnapi, budget - work_done); - - if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) { - struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring; - int i, err = 0; - u32 std_prod_idx = dpr->rx_std_prod_idx; - u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; - - for (i = 1; i < tp->irq_cnt; i++) - err |= tg3_rx_prodring_xfer(tp, dpr, - &tp->napi[i].prodring); - - wmb(); - - if (std_prod_idx != dpr->rx_std_prod_idx) - tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, - dpr->rx_std_prod_idx); - - if (jmb_prod_idx != dpr->rx_jmb_prod_idx) - tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, - dpr->rx_jmb_prod_idx); - - mmiowb(); - - if (err) - tw32_f(HOSTCC_MODE, tp->coal_now); - } - - return work_done; -} - -static int tg3_poll_msix(struct napi_struct *napi, int budget) -{ - struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); - struct tg3 *tp = tnapi->tp; - int work_done = 0; - struct tg3_hw_status *sblk = tnapi->hw_status; - - while (1) { - work_done = tg3_poll_work(tnapi, work_done, budget); - - if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) - goto tx_recovery; - - if (unlikely(work_done >= budget)) - break; - - /* tp->last_tag is used in tg3_int_reenable() below - * to tell the hw how much work has been processed, - * so we must read it before checking for more work. - */ - tnapi->last_tag = sblk->status_tag; - tnapi->last_irq_tag = tnapi->last_tag; - rmb(); - - /* check for RX/TX work to do */ - if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons && - *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) { - napi_complete(napi); - /* Reenable interrupts. */ - tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); - mmiowb(); - break; - } - } - - return work_done; - -tx_recovery: - /* work_done is guaranteed to be less than budget. */ - napi_complete(napi); - schedule_work(&tp->reset_task); - return work_done; -} - -static void tg3_process_error(struct tg3 *tp) -{ - u32 val; - bool real_error = false; - - if (tg3_flag(tp, ERROR_PROCESSED)) - return; - - /* Check Flow Attention register */ - val = tr32(HOSTCC_FLOW_ATTN); - if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) { - netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n"); - real_error = true; - } - - if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) { - netdev_err(tp->dev, "MSI Status error. Resetting chip.\n"); - real_error = true; - } - - if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) { - netdev_err(tp->dev, "DMA Status error. Resetting chip.\n"); - real_error = true; - } - - if (!real_error) - return; - - tg3_dump_state(tp); - - tg3_flag_set(tp, ERROR_PROCESSED); - schedule_work(&tp->reset_task); -} - -static int tg3_poll(struct napi_struct *napi, int budget) -{ - struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); - struct tg3 *tp = tnapi->tp; - int work_done = 0; - struct tg3_hw_status *sblk = tnapi->hw_status; - - while (1) { - if (sblk->status & SD_STATUS_ERROR) - tg3_process_error(tp); - - tg3_poll_link(tp); - - work_done = tg3_poll_work(tnapi, work_done, budget); - - if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) - goto tx_recovery; - - if (unlikely(work_done >= budget)) - break; - - if (tg3_flag(tp, TAGGED_STATUS)) { - /* tp->last_tag is used in tg3_int_reenable() below - * to tell the hw how much work has been processed, - * so we must read it before checking for more work. - */ - tnapi->last_tag = sblk->status_tag; - tnapi->last_irq_tag = tnapi->last_tag; - rmb(); - } else - sblk->status &= ~SD_STATUS_UPDATED; - - if (likely(!tg3_has_work(tnapi))) { - napi_complete(napi); - tg3_int_reenable(tnapi); - break; - } - } - - return work_done; - -tx_recovery: - /* work_done is guaranteed to be less than budget. */ - napi_complete(napi); - schedule_work(&tp->reset_task); - return work_done; -} - -static void tg3_napi_disable(struct tg3 *tp) -{ - int i; - - for (i = tp->irq_cnt - 1; i >= 0; i--) - napi_disable(&tp->napi[i].napi); -} - -static void tg3_napi_enable(struct tg3 *tp) -{ - int i; - - for (i = 0; i < tp->irq_cnt; i++) - napi_enable(&tp->napi[i].napi); -} - -static void tg3_napi_init(struct tg3 *tp) -{ - int i; - - netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64); - for (i = 1; i < tp->irq_cnt; i++) - netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64); -} - -static void tg3_napi_fini(struct tg3 *tp) -{ - int i; - - for (i = 0; i < tp->irq_cnt; i++) - netif_napi_del(&tp->napi[i].napi); -} - -static inline void tg3_netif_stop(struct tg3 *tp) -{ - tp->dev->trans_start = jiffies; /* prevent tx timeout */ - tg3_napi_disable(tp); - netif_tx_disable(tp->dev); -} - -static inline void tg3_netif_start(struct tg3 *tp) -{ - /* NOTE: unconditional netif_tx_wake_all_queues is only - * appropriate so long as all callers are assured to - * have free tx slots (such as after tg3_init_hw) - */ - netif_tx_wake_all_queues(tp->dev); - - tg3_napi_enable(tp); - tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; - tg3_enable_ints(tp); -} - -static void tg3_irq_quiesce(struct tg3 *tp) -{ - int i; - - BUG_ON(tp->irq_sync); - - tp->irq_sync = 1; - smp_mb(); - - for (i = 0; i < tp->irq_cnt; i++) - synchronize_irq(tp->napi[i].irq_vec); -} - -/* Fully shutdown all tg3 driver activity elsewhere in the system. - * If irq_sync is non-zero, then the IRQ handler must be synchronized - * with as well. Most of the time, this is not necessary except when - * shutting down the device. - */ -static inline void tg3_full_lock(struct tg3 *tp, int irq_sync) -{ - spin_lock_bh(&tp->lock); - if (irq_sync) - tg3_irq_quiesce(tp); -} - -static inline void tg3_full_unlock(struct tg3 *tp) -{ - spin_unlock_bh(&tp->lock); -} - -/* One-shot MSI handler - Chip automatically disables interrupt - * after sending MSI so driver doesn't have to do it. - */ -static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) -{ - struct tg3_napi *tnapi = dev_id; - struct tg3 *tp = tnapi->tp; - - prefetch(tnapi->hw_status); - if (tnapi->rx_rcb) - prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); - - if (likely(!tg3_irq_sync(tp))) - napi_schedule(&tnapi->napi); - - return IRQ_HANDLED; -} - -/* MSI ISR - No need to check for interrupt sharing and no need to - * flush status block and interrupt mailbox. PCI ordering rules - * guarantee that MSI will arrive after the status block. - */ -static irqreturn_t tg3_msi(int irq, void *dev_id) -{ - struct tg3_napi *tnapi = dev_id; - struct tg3 *tp = tnapi->tp; - - prefetch(tnapi->hw_status); - if (tnapi->rx_rcb) - prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); - /* - * Writing any value to intr-mbox-0 clears PCI INTA# and - * chip-internal interrupt pending events. - * Writing non-zero to intr-mbox-0 additional tells the - * NIC to stop sending us irqs, engaging "in-intr-handler" - * event coalescing. - */ - tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); - if (likely(!tg3_irq_sync(tp))) - napi_schedule(&tnapi->napi); - - return IRQ_RETVAL(1); -} - -static irqreturn_t tg3_interrupt(int irq, void *dev_id) -{ - struct tg3_napi *tnapi = dev_id; - struct tg3 *tp = tnapi->tp; - struct tg3_hw_status *sblk = tnapi->hw_status; - unsigned int handled = 1; - - /* In INTx mode, it is possible for the interrupt to arrive at - * the CPU before the status block posted prior to the interrupt. - * Reading the PCI State register will confirm whether the - * interrupt is ours and will flush the status block. - */ - if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) { - if (tg3_flag(tp, CHIP_RESETTING) || - (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { - handled = 0; - goto out; - } - } - - /* - * Writing any value to intr-mbox-0 clears PCI INTA# and - * chip-internal interrupt pending events. - * Writing non-zero to intr-mbox-0 additional tells the - * NIC to stop sending us irqs, engaging "in-intr-handler" - * event coalescing. - * - * Flush the mailbox to de-assert the IRQ immediately to prevent - * spurious interrupts. The flush impacts performance but - * excessive spurious interrupts can be worse in some cases. - */ - tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); - if (tg3_irq_sync(tp)) - goto out; - sblk->status &= ~SD_STATUS_UPDATED; - if (likely(tg3_has_work(tnapi))) { - prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); - napi_schedule(&tnapi->napi); - } else { - /* No work, shared interrupt perhaps? re-enable - * interrupts, and flush that PCI write - */ - tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, - 0x00000000); - } -out: - return IRQ_RETVAL(handled); -} - -static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) -{ - struct tg3_napi *tnapi = dev_id; - struct tg3 *tp = tnapi->tp; - struct tg3_hw_status *sblk = tnapi->hw_status; - unsigned int handled = 1; - - /* In INTx mode, it is possible for the interrupt to arrive at - * the CPU before the status block posted prior to the interrupt. - * Reading the PCI State register will confirm whether the - * interrupt is ours and will flush the status block. - */ - if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) { - if (tg3_flag(tp, CHIP_RESETTING) || - (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { - handled = 0; - goto out; - } - } - - /* - * writing any value to intr-mbox-0 clears PCI INTA# and - * chip-internal interrupt pending events. - * writing non-zero to intr-mbox-0 additional tells the - * NIC to stop sending us irqs, engaging "in-intr-handler" - * event coalescing. - * - * Flush the mailbox to de-assert the IRQ immediately to prevent - * spurious interrupts. The flush impacts performance but - * excessive spurious interrupts can be worse in some cases. - */ - tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); - - /* - * In a shared interrupt configuration, sometimes other devices' - * interrupts will scream. We record the current status tag here - * so that the above check can report that the screaming interrupts - * are unhandled. Eventually they will be silenced. - */ - tnapi->last_irq_tag = sblk->status_tag; - - if (tg3_irq_sync(tp)) - goto out; - - prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); - - napi_schedule(&tnapi->napi); - -out: - return IRQ_RETVAL(handled); -} - -/* ISR for interrupt test */ -static irqreturn_t tg3_test_isr(int irq, void *dev_id) -{ - struct tg3_napi *tnapi = dev_id; - struct tg3 *tp = tnapi->tp; - struct tg3_hw_status *sblk = tnapi->hw_status; - - if ((sblk->status & SD_STATUS_UPDATED) || - !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { - tg3_disable_ints(tp); - return IRQ_RETVAL(1); - } - return IRQ_RETVAL(0); -} - -static int tg3_init_hw(struct tg3 *, int); -static int tg3_halt(struct tg3 *, int, int); - -/* Restart hardware after configuration changes, self-test, etc. - * Invoked with tp->lock held. - */ -static int tg3_restart_hw(struct tg3 *tp, int reset_phy) - __releases(tp->lock) - __acquires(tp->lock) -{ - int err; - - err = tg3_init_hw(tp, reset_phy); - if (err) { - netdev_err(tp->dev, - "Failed to re-initialize device, aborting\n"); - tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); - tg3_full_unlock(tp); - del_timer_sync(&tp->timer); - tp->irq_sync = 0; - tg3_napi_enable(tp); - dev_close(tp->dev); - tg3_full_lock(tp, 0); - } - return err; -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void tg3_poll_controller(struct net_device *dev) -{ - int i; - struct tg3 *tp = netdev_priv(dev); - - for (i = 0; i < tp->irq_cnt; i++) - tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]); -} -#endif - -static void tg3_reset_task(struct work_struct *work) -{ - struct tg3 *tp = container_of(work, struct tg3, reset_task); - int err; - unsigned int restart_timer; - - tg3_full_lock(tp, 0); - - if (!netif_running(tp->dev)) { - tg3_full_unlock(tp); - return; - } - - tg3_full_unlock(tp); - - tg3_phy_stop(tp); - - tg3_netif_stop(tp); - - tg3_full_lock(tp, 1); - - restart_timer = tg3_flag(tp, RESTART_TIMER); - tg3_flag_clear(tp, RESTART_TIMER); - - if (tg3_flag(tp, TX_RECOVERY_PENDING)) { - tp->write32_tx_mbox = tg3_write32_tx_mbox; - tp->write32_rx_mbox = tg3_write_flush_reg32; - tg3_flag_set(tp, MBOX_WRITE_REORDER); - tg3_flag_clear(tp, TX_RECOVERY_PENDING); - } - - tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); - err = tg3_init_hw(tp, 1); - if (err) - goto out; - - tg3_netif_start(tp); - - if (restart_timer) - mod_timer(&tp->timer, jiffies + 1); - -out: - tg3_full_unlock(tp); - - if (!err) - tg3_phy_start(tp); -} - -static void tg3_tx_timeout(struct net_device *dev) -{ - struct tg3 *tp = netdev_priv(dev); - - if (netif_msg_tx_err(tp)) { - netdev_err(dev, "transmit timed out, resetting\n"); - tg3_dump_state(tp); - } - - schedule_work(&tp->reset_task); -} - -/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ -static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) -{ - u32 base = (u32) mapping & 0xffffffff; - - return (base > 0xffffdcc0) && (base + len + 8 < base); -} - -/* Test for DMA addresses > 40-bit */ -static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, - int len) -{ -#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) - if (tg3_flag(tp, 40BIT_DMA_BUG)) - return ((u64) mapping + len) > DMA_BIT_MASK(40); - return 0; -#else - return 0; -#endif -} - -static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd, - dma_addr_t mapping, u32 len, u32 flags, - u32 mss, u32 vlan) -{ - txbd->addr_hi = ((u64) mapping >> 32); - txbd->addr_lo = ((u64) mapping & 0xffffffff); - txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff); - txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT); -} - -static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, - dma_addr_t map, u32 len, u32 flags, - u32 mss, u32 vlan) -{ - struct tg3 *tp = tnapi->tp; - bool hwbug = false; - - if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8) - hwbug = 1; - - if (tg3_4g_overflow_test(map, len)) - hwbug = 1; - - if (tg3_40bit_overflow_test(tp, map, len)) - hwbug = 1; - - if (tg3_flag(tp, 4K_FIFO_LIMIT)) { - u32 tmp_flag = flags & ~TXD_FLAG_END; - while (len > TG3_TX_BD_DMA_MAX) { - u32 frag_len = TG3_TX_BD_DMA_MAX; - len -= TG3_TX_BD_DMA_MAX; - - if (len) { - tnapi->tx_buffers[*entry].fragmented = true; - /* Avoid the 8byte DMA problem */ - if (len <= 8) { - len += TG3_TX_BD_DMA_MAX / 2; - frag_len = TG3_TX_BD_DMA_MAX / 2; - } - } else - tmp_flag = flags; - - if (*budget) { - tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, - frag_len, tmp_flag, mss, vlan); - (*budget)--; - *entry = NEXT_TX(*entry); - } else { - hwbug = 1; - break; - } - - map += frag_len; - } - - if (len) { - if (*budget) { - tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, - len, flags, mss, vlan); - (*budget)--; - *entry = NEXT_TX(*entry); - } else { - hwbug = 1; - } - } - } else { - tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, - len, flags, mss, vlan); - *entry = NEXT_TX(*entry); - } - - return hwbug; -} - -static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last) -{ - int i; - struct sk_buff *skb; - struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry]; - - skb = txb->skb; - txb->skb = NULL; - - pci_unmap_single(tnapi->tp->pdev, - dma_unmap_addr(txb, mapping), - skb_headlen(skb), - PCI_DMA_TODEVICE); - - while (txb->fragmented) { - txb->fragmented = false; - entry = NEXT_TX(entry); - txb = &tnapi->tx_buffers[entry]; - } - - for (i = 0; i < last; i++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - - entry = NEXT_TX(entry); - txb = &tnapi->tx_buffers[entry]; - - pci_unmap_page(tnapi->tp->pdev, - dma_unmap_addr(txb, mapping), - frag->size, PCI_DMA_TODEVICE); - - while (txb->fragmented) { - txb->fragmented = false; - entry = NEXT_TX(entry); - txb = &tnapi->tx_buffers[entry]; - } - } -} - -/* Workaround 4GB and 40-bit hardware DMA bugs. */ -static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, - struct sk_buff *skb, - u32 *entry, u32 *budget, - u32 base_flags, u32 mss, u32 vlan) -{ - struct tg3 *tp = tnapi->tp; - struct sk_buff *new_skb; - dma_addr_t new_addr = 0; - int ret = 0; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) - new_skb = skb_copy(skb, GFP_ATOMIC); - else { - int more_headroom = 4 - ((unsigned long)skb->data & 3); - - new_skb = skb_copy_expand(skb, - skb_headroom(skb) + more_headroom, - skb_tailroom(skb), GFP_ATOMIC); - } - - if (!new_skb) { - ret = -1; - } else { - /* New SKB is guaranteed to be linear. */ - new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, - PCI_DMA_TODEVICE); - /* Make sure the mapping succeeded */ - if (pci_dma_mapping_error(tp->pdev, new_addr)) { - dev_kfree_skb(new_skb); - ret = -1; - } else { - base_flags |= TXD_FLAG_END; - - tnapi->tx_buffers[*entry].skb = new_skb; - dma_unmap_addr_set(&tnapi->tx_buffers[*entry], - mapping, new_addr); - - if (tg3_tx_frag_set(tnapi, entry, budget, new_addr, - new_skb->len, base_flags, - mss, vlan)) { - tg3_tx_skb_unmap(tnapi, *entry, 0); - dev_kfree_skb(new_skb); - ret = -1; - } - } - } - - dev_kfree_skb(skb); - - return ret; -} - -static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); - -/* Use GSO to workaround a rare TSO bug that may be triggered when the - * TSO header is greater than 80 bytes. - */ -static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) -{ - struct sk_buff *segs, *nskb; - u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; - - /* Estimate the number of fragments in the worst case */ - if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) { - netif_stop_queue(tp->dev); - - /* netif_tx_stop_queue() must be done before checking - * checking tx index in tg3_tx_avail() below, because in - * tg3_tx(), we update tx index before checking for - * netif_tx_queue_stopped(). - */ - smp_mb(); - if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est) - return NETDEV_TX_BUSY; - - netif_wake_queue(tp->dev); - } - - segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO); - if (IS_ERR(segs)) - goto tg3_tso_bug_end; - - do { - nskb = segs; - segs = segs->next; - nskb->next = NULL; - tg3_start_xmit(nskb, tp->dev); - } while (segs); - -tg3_tso_bug_end: - dev_kfree_skb(skb); - - return NETDEV_TX_OK; -} - -/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and - * support TG3_FLAG_HW_TSO_1 or firmware TSO only. - */ -static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct tg3 *tp = netdev_priv(dev); - u32 len, entry, base_flags, mss, vlan = 0; - u32 budget; - int i = -1, would_hit_hwbug; - dma_addr_t mapping; - struct tg3_napi *tnapi; - struct netdev_queue *txq; - unsigned int last; - - txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); - tnapi = &tp->napi[skb_get_queue_mapping(skb)]; - if (tg3_flag(tp, ENABLE_TSS)) - tnapi++; - - budget = tg3_tx_avail(tnapi); - - /* We are running in BH disabled context with netif_tx_lock - * and TX reclaim runs via tp->napi.poll inside of a software - * interrupt. Furthermore, IRQ processing runs lockless so we have - * no IRQ context deadlocks to worry about either. Rejoice! - */ - if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) { - if (!netif_tx_queue_stopped(txq)) { - netif_tx_stop_queue(txq); - - /* This is a hard error, log it. */ - netdev_err(dev, - "BUG! Tx Ring full when queue awake!\n"); - } - return NETDEV_TX_BUSY; - } - - entry = tnapi->tx_prod; - base_flags = 0; - if (skb->ip_summed == CHECKSUM_PARTIAL) - base_flags |= TXD_FLAG_TCPUDP_CSUM; - - mss = skb_shinfo(skb)->gso_size; - if (mss) { - struct iphdr *iph; - u32 tcp_opt_len, hdr_len; - - if (skb_header_cloned(skb) && - pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { - dev_kfree_skb(skb); - goto out_unlock; - } - - iph = ip_hdr(skb); - tcp_opt_len = tcp_optlen(skb); - - if (skb_is_gso_v6(skb)) { - hdr_len = skb_headlen(skb) - ETH_HLEN; - } else { - u32 ip_tcp_len; - - ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); - hdr_len = ip_tcp_len + tcp_opt_len; - - iph->check = 0; - iph->tot_len = htons(mss + hdr_len); - } - - if (unlikely((ETH_HLEN + hdr_len) > 80) && - tg3_flag(tp, TSO_BUG)) - return tg3_tso_bug(tp, skb); - - base_flags |= (TXD_FLAG_CPU_PRE_DMA | - TXD_FLAG_CPU_POST_DMA); - - if (tg3_flag(tp, HW_TSO_1) || - tg3_flag(tp, HW_TSO_2) || - tg3_flag(tp, HW_TSO_3)) { - tcp_hdr(skb)->check = 0; - base_flags &= ~TXD_FLAG_TCPUDP_CSUM; - } else - tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, - 0); - - if (tg3_flag(tp, HW_TSO_3)) { - mss |= (hdr_len & 0xc) << 12; - if (hdr_len & 0x10) - base_flags |= 0x00000010; - base_flags |= (hdr_len & 0x3e0) << 5; - } else if (tg3_flag(tp, HW_TSO_2)) - mss |= hdr_len << 9; - else if (tg3_flag(tp, HW_TSO_1) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { - if (tcp_opt_len || iph->ihl > 5) { - int tsflags; - - tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); - mss |= (tsflags << 11); - } - } else { - if (tcp_opt_len || iph->ihl > 5) { - int tsflags; - - tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); - base_flags |= tsflags << 12; - } - } - } - -#ifdef BCM_KERNEL_SUPPORTS_8021Q - if (vlan_tx_tag_present(skb)) { - base_flags |= TXD_FLAG_VLAN; - vlan = vlan_tx_tag_get(skb); - } -#endif - - if (tg3_flag(tp, USE_JUMBO_BDFLAG) && - !mss && skb->len > VLAN_ETH_FRAME_LEN) - base_flags |= TXD_FLAG_JMB_PKT; - - len = skb_headlen(skb); - - mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(tp->pdev, mapping)) { - dev_kfree_skb(skb); - goto out_unlock; - } - - tnapi->tx_buffers[entry].skb = skb; - dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); - - would_hit_hwbug = 0; - - if (tg3_flag(tp, 5701_DMA_BUG)) - would_hit_hwbug = 1; - - if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags | - ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0), - mss, vlan)) - would_hit_hwbug = 1; - - /* Now loop through additional data fragments, and queue them. */ - if (skb_shinfo(skb)->nr_frags > 0) { - u32 tmp_mss = mss; - - if (!tg3_flag(tp, HW_TSO_1) && - !tg3_flag(tp, HW_TSO_2) && - !tg3_flag(tp, HW_TSO_3)) - tmp_mss = 0; - - last = skb_shinfo(skb)->nr_frags - 1; - for (i = 0; i <= last; i++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - - len = frag->size; - mapping = pci_map_page(tp->pdev, - frag->page, - frag->page_offset, - len, PCI_DMA_TODEVICE); - - tnapi->tx_buffers[entry].skb = NULL; - dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, - mapping); - if (pci_dma_mapping_error(tp->pdev, mapping)) - goto dma_error; - - if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, - len, base_flags | - ((i == last) ? TXD_FLAG_END : 0), - tmp_mss, vlan)) - would_hit_hwbug = 1; - } - } - - if (would_hit_hwbug) { - tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); - - /* If the workaround fails due to memory/mapping - * failure, silently drop this packet. - */ - entry = tnapi->tx_prod; - budget = tg3_tx_avail(tnapi); - if (tigon3_dma_hwbug_workaround(tnapi, skb, &entry, &budget, - base_flags, mss, vlan)) - goto out_unlock; - } - - skb_tx_timestamp(skb); - - /* Packets are ready, update Tx producer idx local and on card. */ - tw32_tx_mbox(tnapi->prodmbox, entry); - - tnapi->tx_prod = entry; - if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { - netif_tx_stop_queue(txq); - - /* netif_tx_stop_queue() must be done before checking - * checking tx index in tg3_tx_avail() below, because in - * tg3_tx(), we update tx index before checking for - * netif_tx_queue_stopped(). - */ - smp_mb(); - if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) - netif_tx_wake_queue(txq); - } - -out_unlock: - mmiowb(); - - return NETDEV_TX_OK; - -dma_error: - tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); - dev_kfree_skb(skb); - tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; - return NETDEV_TX_OK; -} - -static void tg3_set_loopback(struct net_device *dev, u32 features) -{ - struct tg3 *tp = netdev_priv(dev); - - if (features & NETIF_F_LOOPBACK) { - if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK) - return; - - /* - * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in - * loopback mode if Half-Duplex mode was negotiated earlier. - */ - tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; - - /* Enable internal MAC loopback mode */ - tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK; - spin_lock_bh(&tp->lock); - tw32(MAC_MODE, tp->mac_mode); - netif_carrier_on(tp->dev); - spin_unlock_bh(&tp->lock); - netdev_info(dev, "Internal MAC loopback mode enabled.\n"); - } else { - if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) - return; - - /* Disable internal MAC loopback mode */ - tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK; - spin_lock_bh(&tp->lock); - tw32(MAC_MODE, tp->mac_mode); - /* Force link status check */ - tg3_setup_phy(tp, 1); - spin_unlock_bh(&tp->lock); - netdev_info(dev, "Internal MAC loopback mode disabled.\n"); - } -} - -static u32 tg3_fix_features(struct net_device *dev, u32 features) -{ - struct tg3 *tp = netdev_priv(dev); - - if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS)) - features &= ~NETIF_F_ALL_TSO; - - return features; -} - -static int tg3_set_features(struct net_device *dev, u32 features) -{ - u32 changed = dev->features ^ features; - - if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) - tg3_set_loopback(dev, features); - - return 0; -} - -static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, - int new_mtu) -{ - dev->mtu = new_mtu; - - if (new_mtu > ETH_DATA_LEN) { - if (tg3_flag(tp, 5780_CLASS)) { - netdev_update_features(dev); - tg3_flag_clear(tp, TSO_CAPABLE); - } else { - tg3_flag_set(tp, JUMBO_RING_ENABLE); - } - } else { - if (tg3_flag(tp, 5780_CLASS)) { - tg3_flag_set(tp, TSO_CAPABLE); - netdev_update_features(dev); - } - tg3_flag_clear(tp, JUMBO_RING_ENABLE); - } -} - -static int tg3_change_mtu(struct net_device *dev, int new_mtu) -{ - struct tg3 *tp = netdev_priv(dev); - int err; - - if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) - return -EINVAL; - - if (!netif_running(dev)) { - /* We'll just catch it later when the - * device is up'd. - */ - tg3_set_mtu(dev, tp, new_mtu); - return 0; - } - - tg3_phy_stop(tp); - - tg3_netif_stop(tp); - - tg3_full_lock(tp, 1); - - tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); - - tg3_set_mtu(dev, tp, new_mtu); - - err = tg3_restart_hw(tp, 0); - - if (!err) - tg3_netif_start(tp); - - tg3_full_unlock(tp); - - if (!err) - tg3_phy_start(tp); - - return err; -} - -static void tg3_rx_prodring_free(struct tg3 *tp, - struct tg3_rx_prodring_set *tpr) -{ - int i; - - if (tpr != &tp->napi[0].prodring) { - for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; - i = (i + 1) & tp->rx_std_ring_mask) - tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], - tp->rx_pkt_map_sz); - - if (tg3_flag(tp, JUMBO_CAPABLE)) { - for (i = tpr->rx_jmb_cons_idx; - i != tpr->rx_jmb_prod_idx; - i = (i + 1) & tp->rx_jmb_ring_mask) { - tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], - TG3_RX_JMB_MAP_SZ); - } - } - - return; - } - - for (i = 0; i <= tp->rx_std_ring_mask; i++) - tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], - tp->rx_pkt_map_sz); - - if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { - for (i = 0; i <= tp->rx_jmb_ring_mask; i++) - tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], - TG3_RX_JMB_MAP_SZ); - } -} - -/* Initialize rx rings for packet processing. - * - * The chip has been shut down and the driver detached from - * the networking, so no interrupts or new tx packets will - * end up in the driver. tp->{tx,}lock are held and thus - * we may not sleep. - */ -static int tg3_rx_prodring_alloc(struct tg3 *tp, - struct tg3_rx_prodring_set *tpr) -{ - u32 i, rx_pkt_dma_sz; - - tpr->rx_std_cons_idx = 0; - tpr->rx_std_prod_idx = 0; - tpr->rx_jmb_cons_idx = 0; - tpr->rx_jmb_prod_idx = 0; - - if (tpr != &tp->napi[0].prodring) { - memset(&tpr->rx_std_buffers[0], 0, - TG3_RX_STD_BUFF_RING_SIZE(tp)); - if (tpr->rx_jmb_buffers) - memset(&tpr->rx_jmb_buffers[0], 0, - TG3_RX_JMB_BUFF_RING_SIZE(tp)); - goto done; - } - - /* Zero out all descriptors. */ - memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp)); - - rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; - if (tg3_flag(tp, 5780_CLASS) && - tp->dev->mtu > ETH_DATA_LEN) - rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ; - tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); - - /* Initialize invariants of the rings, we only set this - * stuff once. This works because the card does not - * write into the rx buffer posting rings. - */ - for (i = 0; i <= tp->rx_std_ring_mask; i++) { - struct tg3_rx_buffer_desc *rxd; - - rxd = &tpr->rx_std[i]; - rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT; - rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); - rxd->opaque = (RXD_OPAQUE_RING_STD | - (i << RXD_OPAQUE_INDEX_SHIFT)); - } - - /* Now allocate fresh SKBs for each rx ring. */ - for (i = 0; i < tp->rx_pending; i++) { - if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) { - netdev_warn(tp->dev, - "Using a smaller RX standard ring. Only " - "%d out of %d buffers were allocated " - "successfully\n", i, tp->rx_pending); - if (i == 0) - goto initfail; - tp->rx_pending = i; - break; - } - } - - if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) - goto done; - - memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp)); - - if (!tg3_flag(tp, JUMBO_RING_ENABLE)) - goto done; - - for (i = 0; i <= tp->rx_jmb_ring_mask; i++) { - struct tg3_rx_buffer_desc *rxd; - - rxd = &tpr->rx_jmb[i].std; - rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; - rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | - RXD_FLAG_JUMBO; - rxd->opaque = (RXD_OPAQUE_RING_JUMBO | - (i << RXD_OPAQUE_INDEX_SHIFT)); - } - - for (i = 0; i < tp->rx_jumbo_pending; i++) { - if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) { - netdev_warn(tp->dev, - "Using a smaller RX jumbo ring. Only %d " - "out of %d buffers were allocated " - "successfully\n", i, tp->rx_jumbo_pending); - if (i == 0) - goto initfail; - tp->rx_jumbo_pending = i; - break; - } - } - -done: - return 0; - -initfail: - tg3_rx_prodring_free(tp, tpr); - return -ENOMEM; -} - -static void tg3_rx_prodring_fini(struct tg3 *tp, - struct tg3_rx_prodring_set *tpr) -{ - kfree(tpr->rx_std_buffers); - tpr->rx_std_buffers = NULL; - kfree(tpr->rx_jmb_buffers); - tpr->rx_jmb_buffers = NULL; - if (tpr->rx_std) { - dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp), - tpr->rx_std, tpr->rx_std_mapping); - tpr->rx_std = NULL; - } - if (tpr->rx_jmb) { - dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp), - tpr->rx_jmb, tpr->rx_jmb_mapping); - tpr->rx_jmb = NULL; - } -} - -static int tg3_rx_prodring_init(struct tg3 *tp, - struct tg3_rx_prodring_set *tpr) -{ - tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp), - GFP_KERNEL); - if (!tpr->rx_std_buffers) - return -ENOMEM; - - tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev, - TG3_RX_STD_RING_BYTES(tp), - &tpr->rx_std_mapping, - GFP_KERNEL); - if (!tpr->rx_std) - goto err_out; - - if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { - tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp), - GFP_KERNEL); - if (!tpr->rx_jmb_buffers) - goto err_out; - - tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev, - TG3_RX_JMB_RING_BYTES(tp), - &tpr->rx_jmb_mapping, - GFP_KERNEL); - if (!tpr->rx_jmb) - goto err_out; - } - - return 0; - -err_out: - tg3_rx_prodring_fini(tp, tpr); - return -ENOMEM; -} - -/* Free up pending packets in all rx/tx rings. - * - * The chip has been shut down and the driver detached from - * the networking, so no interrupts or new tx packets will - * end up in the driver. tp->{tx,}lock is not held and we are not - * in an interrupt context and thus may sleep. - */ -static void tg3_free_rings(struct tg3 *tp) -{ - int i, j; - - for (j = 0; j < tp->irq_cnt; j++) { - struct tg3_napi *tnapi = &tp->napi[j]; - - tg3_rx_prodring_free(tp, &tnapi->prodring); - - if (!tnapi->tx_buffers) - continue; - - for (i = 0; i < TG3_TX_RING_SIZE; i++) { - struct sk_buff *skb = tnapi->tx_buffers[i].skb; - - if (!skb) - continue; - - tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags); - - dev_kfree_skb_any(skb); - } - } -} - -/* Initialize tx/rx rings for packet processing. - * - * The chip has been shut down and the driver detached from - * the networking, so no interrupts or new tx packets will - * end up in the driver. tp->{tx,}lock are held and thus - * we may not sleep. - */ -static int tg3_init_rings(struct tg3 *tp) -{ - int i; - - /* Free up all the SKBs. */ - tg3_free_rings(tp); - - for (i = 0; i < tp->irq_cnt; i++) { - struct tg3_napi *tnapi = &tp->napi[i]; - - tnapi->last_tag = 0; - tnapi->last_irq_tag = 0; - tnapi->hw_status->status = 0; - tnapi->hw_status->status_tag = 0; - memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); - - tnapi->tx_prod = 0; - tnapi->tx_cons = 0; - if (tnapi->tx_ring) - memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES); - - tnapi->rx_rcb_ptr = 0; - if (tnapi->rx_rcb) - memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); - - if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { - tg3_free_rings(tp); - return -ENOMEM; - } - } - - return 0; -} - -/* - * Must not be invoked with interrupt sources disabled and - * the hardware shutdown down. - */ -static void tg3_free_consistent(struct tg3 *tp) -{ - int i; - - for (i = 0; i < tp->irq_cnt; i++) { - struct tg3_napi *tnapi = &tp->napi[i]; - - if (tnapi->tx_ring) { - dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES, - tnapi->tx_ring, tnapi->tx_desc_mapping); - tnapi->tx_ring = NULL; - } - - kfree(tnapi->tx_buffers); - tnapi->tx_buffers = NULL; - - if (tnapi->rx_rcb) { - dma_free_coherent(&tp->pdev->dev, - TG3_RX_RCB_RING_BYTES(tp), - tnapi->rx_rcb, - tnapi->rx_rcb_mapping); - tnapi->rx_rcb = NULL; - } - - tg3_rx_prodring_fini(tp, &tnapi->prodring); - - if (tnapi->hw_status) { - dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE, - tnapi->hw_status, - tnapi->status_mapping); - tnapi->hw_status = NULL; - } - } - - if (tp->hw_stats) { - dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), - tp->hw_stats, tp->stats_mapping); - tp->hw_stats = NULL; - } -} - -/* - * Must not be invoked with interrupt sources disabled and - * the hardware shutdown down. Can sleep. - */ -static int tg3_alloc_consistent(struct tg3 *tp) -{ - int i; - - tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, - sizeof(struct tg3_hw_stats), - &tp->stats_mapping, - GFP_KERNEL); - if (!tp->hw_stats) - goto err_out; - - memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); - - for (i = 0; i < tp->irq_cnt; i++) { - struct tg3_napi *tnapi = &tp->napi[i]; - struct tg3_hw_status *sblk; - - tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, - TG3_HW_STATUS_SIZE, - &tnapi->status_mapping, - GFP_KERNEL); - if (!tnapi->hw_status) - goto err_out; - - memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); - sblk = tnapi->hw_status; - - if (tg3_rx_prodring_init(tp, &tnapi->prodring)) - goto err_out; - - /* If multivector TSS is enabled, vector 0 does not handle - * tx interrupts. Don't allocate any resources for it. - */ - if ((!i && !tg3_flag(tp, ENABLE_TSS)) || - (i && tg3_flag(tp, ENABLE_TSS))) { - tnapi->tx_buffers = kzalloc( - sizeof(struct tg3_tx_ring_info) * - TG3_TX_RING_SIZE, GFP_KERNEL); - if (!tnapi->tx_buffers) - goto err_out; - - tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev, - TG3_TX_RING_BYTES, - &tnapi->tx_desc_mapping, - GFP_KERNEL); - if (!tnapi->tx_ring) - goto err_out; - } - - /* - * When RSS is enabled, the status block format changes - * slightly. The "rx_jumbo_consumer", "reserved", - * and "rx_mini_consumer" members get mapped to the - * other three rx return ring producer indexes. - */ - switch (i) { - default: - tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; - break; - case 2: - tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer; - break; - case 3: - tnapi->rx_rcb_prod_idx = &sblk->reserved; - break; - case 4: - tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer; - break; - } - - /* - * If multivector RSS is enabled, vector 0 does not handle - * rx or tx interrupts. Don't allocate any resources for it. - */ - if (!i && tg3_flag(tp, ENABLE_RSS)) - continue; - - tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, - TG3_RX_RCB_RING_BYTES(tp), - &tnapi->rx_rcb_mapping, - GFP_KERNEL); - if (!tnapi->rx_rcb) - goto err_out; - - memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); - } - - return 0; - -err_out: - tg3_free_consistent(tp); - return -ENOMEM; -} - -#define MAX_WAIT_CNT 1000 - -/* To stop a block, clear the enable bit and poll till it - * clears. tp->lock is held. - */ -static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent) -{ - unsigned int i; - u32 val; - - if (tg3_flag(tp, 5705_PLUS)) { - switch (ofs) { - case RCVLSC_MODE: - case DMAC_MODE: - case MBFREE_MODE: - case BUFMGR_MODE: - case MEMARB_MODE: - /* We can't enable/disable these bits of the - * 5705/5750, just say success. - */ - return 0; - - default: - break; - } - } - - val = tr32(ofs); - val &= ~enable_bit; - tw32_f(ofs, val); - - for (i = 0; i < MAX_WAIT_CNT; i++) { - udelay(100); - val = tr32(ofs); - if ((val & enable_bit) == 0) - break; - } - - if (i == MAX_WAIT_CNT && !silent) { - dev_err(&tp->pdev->dev, - "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n", - ofs, enable_bit); - return -ENODEV; - } - - return 0; -} - -/* tp->lock is held. */ -static int tg3_abort_hw(struct tg3 *tp, int silent) -{ - int i, err; - - tg3_disable_ints(tp); - - tp->rx_mode &= ~RX_MODE_ENABLE; - tw32_f(MAC_RX_MODE, tp->rx_mode); - udelay(10); - - err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent); - err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent); - err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent); - err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent); - err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent); - err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent); - - err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent); - err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent); - err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent); - err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent); - err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent); - err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent); - err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent); - - tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; - tw32_f(MAC_MODE, tp->mac_mode); - udelay(40); - - tp->tx_mode &= ~TX_MODE_ENABLE; - tw32_f(MAC_TX_MODE, tp->tx_mode); - - for (i = 0; i < MAX_WAIT_CNT; i++) { - udelay(100); - if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE)) - break; - } - if (i >= MAX_WAIT_CNT) { - dev_err(&tp->pdev->dev, - "%s timed out, TX_MODE_ENABLE will not clear " - "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE)); - err |= -ENODEV; - } - - err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent); - err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent); - err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent); - - tw32(FTQ_RESET, 0xffffffff); - tw32(FTQ_RESET, 0x00000000); - - err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); - err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); - - for (i = 0; i < tp->irq_cnt; i++) { - struct tg3_napi *tnapi = &tp->napi[i]; - if (tnapi->hw_status) - memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); - } - if (tp->hw_stats) - memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); - - return err; -} - -static void tg3_ape_send_event(struct tg3 *tp, u32 event) -{ - int i; - u32 apedata; - - /* NCSI does not support APE events */ - if (tg3_flag(tp, APE_HAS_NCSI)) - return; - - apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); - if (apedata != APE_SEG_SIG_MAGIC) - return; - - apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); - if (!(apedata & APE_FW_STATUS_READY)) - return; - - /* Wait for up to 1 millisecond for APE to service previous event. */ - for (i = 0; i < 10; i++) { - if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM)) - return; - - apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); - - if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) - tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, - event | APE_EVENT_STATUS_EVENT_PENDING); - - tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); - - if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) - break; - - udelay(100); - } - - if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) - tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); -} - -static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) -{ - u32 event; - u32 apedata; - - if (!tg3_flag(tp, ENABLE_APE)) - return; - - switch (kind) { - case RESET_KIND_INIT: - tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, - APE_HOST_SEG_SIG_MAGIC); - tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, - APE_HOST_SEG_LEN_MAGIC); - apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); - tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); - tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, - APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM)); - tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, - APE_HOST_BEHAV_NO_PHYLOCK); - tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, - TG3_APE_HOST_DRVR_STATE_START); - - event = APE_EVENT_STATUS_STATE_START; - break; - case RESET_KIND_SHUTDOWN: - /* With the interface we are currently using, - * APE does not track driver state. Wiping - * out the HOST SEGMENT SIGNATURE forces - * the APE to assume OS absent status. - */ - tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0); - - if (device_may_wakeup(&tp->pdev->dev) && - tg3_flag(tp, WOL_ENABLE)) { - tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, - TG3_APE_HOST_WOL_SPEED_AUTO); - apedata = TG3_APE_HOST_DRVR_STATE_WOL; - } else - apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD; - - tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata); - - event = APE_EVENT_STATUS_STATE_UNLOAD; - break; - case RESET_KIND_SUSPEND: - event = APE_EVENT_STATUS_STATE_SUSPEND; - break; - default: - return; - } - - event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; - - tg3_ape_send_event(tp, event); -} - -/* tp->lock is held. */ -static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) -{ - tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, - NIC_SRAM_FIRMWARE_MBOX_MAGIC1); - - if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { - switch (kind) { - case RESET_KIND_INIT: - tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, - DRV_STATE_START); - break; - - case RESET_KIND_SHUTDOWN: - tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, - DRV_STATE_UNLOAD); - break; - - case RESET_KIND_SUSPEND: - tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, - DRV_STATE_SUSPEND); - break; - - default: - break; - } - } - - if (kind == RESET_KIND_INIT || - kind == RESET_KIND_SUSPEND) - tg3_ape_driver_state_change(tp, kind); -} - -/* tp->lock is held. */ -static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) -{ - if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { - switch (kind) { - case RESET_KIND_INIT: - tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, - DRV_STATE_START_DONE); - break; - - case RESET_KIND_SHUTDOWN: - tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, - DRV_STATE_UNLOAD_DONE); - break; - - default: - break; - } - } - - if (kind == RESET_KIND_SHUTDOWN) - tg3_ape_driver_state_change(tp, kind); -} - -/* tp->lock is held. */ -static void tg3_write_sig_legacy(struct tg3 *tp, int kind) -{ - if (tg3_flag(tp, ENABLE_ASF)) { - switch (kind) { - case RESET_KIND_INIT: - tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, - DRV_STATE_START); - break; - - case RESET_KIND_SHUTDOWN: - tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, - DRV_STATE_UNLOAD); - break; - - case RESET_KIND_SUSPEND: - tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, - DRV_STATE_SUSPEND); - break; - - default: - break; - } - } -} - -static int tg3_poll_fw(struct tg3 *tp) -{ - int i; - u32 val; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - /* Wait up to 20ms for init done. */ - for (i = 0; i < 200; i++) { - if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) - return 0; - udelay(100); - } - return -ENODEV; - } - - /* Wait for firmware initialization to complete. */ - for (i = 0; i < 100000; i++) { - tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); - if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) - break; - udelay(10); - } - - /* Chip might not be fitted with firmware. Some Sun onboard - * parts are configured like that. So don't signal the timeout - * of the above loop as an error, but do report the lack of - * running firmware once. - */ - if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) { - tg3_flag_set(tp, NO_FWARE_REPORTED); - - netdev_info(tp->dev, "No firmware running\n"); - } - - if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { - /* The 57765 A0 needs a little more - * time to do some important work. - */ - mdelay(10); - } - - return 0; -} - -/* Save PCI command register before chip reset */ -static void tg3_save_pci_state(struct tg3 *tp) -{ - pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd); -} - -/* Restore PCI state after chip reset */ -static void tg3_restore_pci_state(struct tg3 *tp) -{ - u32 val; - - /* Re-enable indirect register accesses. */ - pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, - tp->misc_host_ctrl); - - /* Set MAX PCI retry to zero. */ - val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); - if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && - tg3_flag(tp, PCIX_MODE)) - val |= PCISTATE_RETRY_SAME_DMA; - /* Allow reads and writes to the APE register and memory space. */ - if (tg3_flag(tp, ENABLE_APE)) - val |= PCISTATE_ALLOW_APE_CTLSPC_WR | - PCISTATE_ALLOW_APE_SHMEM_WR | - PCISTATE_ALLOW_APE_PSPACE_WR; - pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); - - pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); - - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) { - if (tg3_flag(tp, PCI_EXPRESS)) - pcie_set_readrq(tp->pdev, tp->pcie_readrq); - else { - pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, - tp->pci_cacheline_sz); - pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, - tp->pci_lat_timer); - } - } - - /* Make sure PCI-X relaxed ordering bit is clear. */ - if (tg3_flag(tp, PCIX_MODE)) { - u16 pcix_cmd; - - pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, - &pcix_cmd); - pcix_cmd &= ~PCI_X_CMD_ERO; - pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, - pcix_cmd); - } - - if (tg3_flag(tp, 5780_CLASS)) { - - /* Chip reset on 5780 will reset MSI enable bit, - * so need to restore it. - */ - if (tg3_flag(tp, USING_MSI)) { - u16 ctrl; - - pci_read_config_word(tp->pdev, - tp->msi_cap + PCI_MSI_FLAGS, - &ctrl); - pci_write_config_word(tp->pdev, - tp->msi_cap + PCI_MSI_FLAGS, - ctrl | PCI_MSI_FLAGS_ENABLE); - val = tr32(MSGINT_MODE); - tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE); - } - } -} - -static void tg3_stop_fw(struct tg3 *); - -/* tp->lock is held. */ -static int tg3_chip_reset(struct tg3 *tp) -{ - u32 val; - void (*write_op)(struct tg3 *, u32, u32); - int i, err; - - tg3_nvram_lock(tp); - - tg3_ape_lock(tp, TG3_APE_LOCK_GRC); - - /* No matching tg3_nvram_unlock() after this because - * chip reset below will undo the nvram lock. - */ - tp->nvram_lock_cnt = 0; - - /* GRC_MISC_CFG core clock reset will clear the memory - * enable bit in PCI register 4 and the MSI enable bit - * on some chips, so we save relevant registers here. - */ - tg3_save_pci_state(tp); - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || - tg3_flag(tp, 5755_PLUS)) - tw32(GRC_FASTBOOT_PC, 0); - - /* - * We must avoid the readl() that normally takes place. - * It locks machines, causes machine checks, and other - * fun things. So, temporarily disable the 5701 - * hardware workaround, while we do the reset. - */ - write_op = tp->write32; - if (write_op == tg3_write_flush_reg32) - tp->write32 = tg3_write32; - - /* Prevent the irq handler from reading or writing PCI registers - * during chip reset when the memory enable bit in the PCI command - * register may be cleared. The chip does not generate interrupt - * at this time, but the irq handler may still be called due to irq - * sharing or irqpoll. - */ - tg3_flag_set(tp, CHIP_RESETTING); - for (i = 0; i < tp->irq_cnt; i++) { - struct tg3_napi *tnapi = &tp->napi[i]; - if (tnapi->hw_status) { - tnapi->hw_status->status = 0; - tnapi->hw_status->status_tag = 0; - } - tnapi->last_tag = 0; - tnapi->last_irq_tag = 0; - } - smp_mb(); - - for (i = 0; i < tp->irq_cnt; i++) - synchronize_irq(tp->napi[i].irq_vec); - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { - val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; - tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); - } - - /* do the reset */ - val = GRC_MISC_CFG_CORECLK_RESET; - - if (tg3_flag(tp, PCI_EXPRESS)) { - /* Force PCIe 1.0a mode */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && - !tg3_flag(tp, 57765_PLUS) && - tr32(TG3_PCIE_PHY_TSTCTL) == - (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM)) - tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM); - - if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) { - tw32(GRC_MISC_CFG, (1 << 29)); - val |= (1 << 29); - } - } - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET); - tw32(GRC_VCPU_EXT_CTRL, - tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU); - } - - /* Manage gphy power for all CPMU absent PCIe devices. */ - if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT)) - val |= GRC_MISC_CFG_KEEP_GPHY_POWER; - - tw32(GRC_MISC_CFG, val); - - /* restore 5701 hardware bug workaround write method */ - tp->write32 = write_op; - - /* Unfortunately, we have to delay before the PCI read back. - * Some 575X chips even will not respond to a PCI cfg access - * when the reset command is given to the chip. - * - * How do these hardware designers expect things to work - * properly if the PCI write is posted for a long period - * of time? It is always necessary to have some method by - * which a register read back can occur to push the write - * out which does the reset. - * - * For most tg3 variants the trick below was working. - * Ho hum... - */ - udelay(120); - - /* Flush PCI posted writes. The normal MMIO registers - * are inaccessible at this time so this is the only - * way to make this reliably (actually, this is no longer - * the case, see above). I tried to use indirect - * register read/write but this upset some 5701 variants. - */ - pci_read_config_dword(tp->pdev, PCI_COMMAND, &val); - - udelay(120); - - if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) { - u16 val16; - - if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) { - int i; - u32 cfg_val; - - /* Wait for link training to complete. */ - for (i = 0; i < 5000; i++) - udelay(100); - - pci_read_config_dword(tp->pdev, 0xc4, &cfg_val); - pci_write_config_dword(tp->pdev, 0xc4, - cfg_val | (1 << 15)); - } - - /* Clear the "no snoop" and "relaxed ordering" bits. */ - pci_read_config_word(tp->pdev, - pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL, - &val16); - val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN | - PCI_EXP_DEVCTL_NOSNOOP_EN); - /* - * Older PCIe devices only support the 128 byte - * MPS setting. Enforce the restriction. - */ - if (!tg3_flag(tp, CPMU_PRESENT)) - val16 &= ~PCI_EXP_DEVCTL_PAYLOAD; - pci_write_config_word(tp->pdev, - pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL, - val16); - - pcie_set_readrq(tp->pdev, tp->pcie_readrq); - - /* Clear error status */ - pci_write_config_word(tp->pdev, - pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA, - PCI_EXP_DEVSTA_CED | - PCI_EXP_DEVSTA_NFED | - PCI_EXP_DEVSTA_FED | - PCI_EXP_DEVSTA_URD); - } - - tg3_restore_pci_state(tp); - - tg3_flag_clear(tp, CHIP_RESETTING); - tg3_flag_clear(tp, ERROR_PROCESSED); - - val = 0; - if (tg3_flag(tp, 5780_CLASS)) - val = tr32(MEMARB_MODE); - tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); - - if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) { - tg3_stop_fw(tp); - tw32(0x5000, 0x400); - } - - tw32(GRC_MODE, tp->grc_mode); - - if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) { - val = tr32(0xc4); - - tw32(0xc4, val | (1 << 15)); - } - - if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 && - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { - tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE; - if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) - tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN; - tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); - } - - if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { - tp->mac_mode = MAC_MODE_PORT_MODE_TBI; - val = tp->mac_mode; - } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { - tp->mac_mode = MAC_MODE_PORT_MODE_GMII; - val = tp->mac_mode; - } else - val = 0; - - tw32_f(MAC_MODE, val); - udelay(40); - - tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); - - err = tg3_poll_fw(tp); - if (err) - return err; - - tg3_mdio_start(tp); - - if (tg3_flag(tp, PCI_EXPRESS) && - tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && - !tg3_flag(tp, 57765_PLUS)) { - val = tr32(0x7c00); - - tw32(0x7c00, val | (1 << 25)); - } - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { - val = tr32(TG3_CPMU_CLCK_ORIDE); - tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN); - } - - /* Reprobe ASF enable state. */ - tg3_flag_clear(tp, ENABLE_ASF); - tg3_flag_clear(tp, ASF_NEW_HANDSHAKE); - tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); - if (val == NIC_SRAM_DATA_SIG_MAGIC) { - u32 nic_cfg; - - tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); - if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { - tg3_flag_set(tp, ENABLE_ASF); - tp->last_event_jiffies = jiffies; - if (tg3_flag(tp, 5750_PLUS)) - tg3_flag_set(tp, ASF_NEW_HANDSHAKE); - } - } - - return 0; -} - -/* tp->lock is held. */ -static void tg3_stop_fw(struct tg3 *tp) -{ - if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { - /* Wait for RX cpu to ACK the previous event. */ - tg3_wait_for_event_ack(tp); - - tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); - - tg3_generate_fw_event(tp); - - /* Wait for RX cpu to ACK this event. */ - tg3_wait_for_event_ack(tp); - } -} - -/* tp->lock is held. */ -static int tg3_halt(struct tg3 *tp, int kind, int silent) -{ - int err; - - tg3_stop_fw(tp); - - tg3_write_sig_pre_reset(tp, kind); - - tg3_abort_hw(tp, silent); - err = tg3_chip_reset(tp); - - __tg3_set_mac_addr(tp, 0); - - tg3_write_sig_legacy(tp, kind); - tg3_write_sig_post_reset(tp, kind); - - if (err) - return err; - - return 0; -} - -#define RX_CPU_SCRATCH_BASE 0x30000 -#define RX_CPU_SCRATCH_SIZE 0x04000 -#define TX_CPU_SCRATCH_BASE 0x34000 -#define TX_CPU_SCRATCH_SIZE 0x04000 - -/* tp->lock is held. */ -static int tg3_halt_cpu(struct tg3 *tp, u32 offset) -{ - int i; - - BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - u32 val = tr32(GRC_VCPU_EXT_CTRL); - - tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); - return 0; - } - if (offset == RX_CPU_BASE) { - for (i = 0; i < 10000; i++) { - tw32(offset + CPU_STATE, 0xffffffff); - tw32(offset + CPU_MODE, CPU_MODE_HALT); - if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) - break; - } - - tw32(offset + CPU_STATE, 0xffffffff); - tw32_f(offset + CPU_MODE, CPU_MODE_HALT); - udelay(10); - } else { - for (i = 0; i < 10000; i++) { - tw32(offset + CPU_STATE, 0xffffffff); - tw32(offset + CPU_MODE, CPU_MODE_HALT); - if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) - break; - } - } - - if (i >= 10000) { - netdev_err(tp->dev, "%s timed out, %s CPU\n", - __func__, offset == RX_CPU_BASE ? "RX" : "TX"); - return -ENODEV; - } - - /* Clear firmware's nvram arbitration. */ - if (tg3_flag(tp, NVRAM)) - tw32(NVRAM_SWARB, SWARB_REQ_CLR0); - return 0; -} - -struct fw_info { - unsigned int fw_base; - unsigned int fw_len; - const __be32 *fw_data; -}; - -/* tp->lock is held. */ -static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base, - int cpu_scratch_size, struct fw_info *info) -{ - int err, lock_err, i; - void (*write_op)(struct tg3 *, u32, u32); - - if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) { - netdev_err(tp->dev, - "%s: Trying to load TX cpu firmware which is 5705\n", - __func__); - return -EINVAL; - } - - if (tg3_flag(tp, 5705_PLUS)) - write_op = tg3_write_mem; - else - write_op = tg3_write_indirect_reg32; - - /* It is possible that bootcode is still loading at this point. - * Get the nvram lock first before halting the cpu. - */ - lock_err = tg3_nvram_lock(tp); - err = tg3_halt_cpu(tp, cpu_base); - if (!lock_err) - tg3_nvram_unlock(tp); - if (err) - goto out; - - for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) - write_op(tp, cpu_scratch_base + i, 0); - tw32(cpu_base + CPU_STATE, 0xffffffff); - tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT); - for (i = 0; i < (info->fw_len / sizeof(u32)); i++) - write_op(tp, (cpu_scratch_base + - (info->fw_base & 0xffff) + - (i * sizeof(u32))), - be32_to_cpu(info->fw_data[i])); - - err = 0; - -out: - return err; -} - -/* tp->lock is held. */ -static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) -{ - struct fw_info info; - const __be32 *fw_data; - int err, i; - - fw_data = (void *)tp->fw->data; - - /* Firmware blob starts with version numbers, followed by - start address and length. We are setting complete length. - length = end_address_of_bss - start_address_of_text. - Remainder is the blob to be loaded contiguously - from start address. */ - - info.fw_base = be32_to_cpu(fw_data[1]); - info.fw_len = tp->fw->size - 12; - info.fw_data = &fw_data[3]; - - err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, - RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, - &info); - if (err) - return err; - - err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, - TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, - &info); - if (err) - return err; - - /* Now startup only the RX cpu. */ - tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); - tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); - - for (i = 0; i < 5; i++) { - if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base) - break; - tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); - tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); - tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); - udelay(1000); - } - if (i >= 5) { - netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " - "should be %08x\n", __func__, - tr32(RX_CPU_BASE + CPU_PC), info.fw_base); - return -ENODEV; - } - tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); - tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000); - - return 0; -} - -/* tp->lock is held. */ -static int tg3_load_tso_firmware(struct tg3 *tp) -{ - struct fw_info info; - const __be32 *fw_data; - unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; - int err, i; - - if (tg3_flag(tp, HW_TSO_1) || - tg3_flag(tp, HW_TSO_2) || - tg3_flag(tp, HW_TSO_3)) - return 0; - - fw_data = (void *)tp->fw->data; - - /* Firmware blob starts with version numbers, followed by - start address and length. We are setting complete length. - length = end_address_of_bss - start_address_of_text. - Remainder is the blob to be loaded contiguously - from start address. */ - - info.fw_base = be32_to_cpu(fw_data[1]); - cpu_scratch_size = tp->fw_len; - info.fw_len = tp->fw->size - 12; - info.fw_data = &fw_data[3]; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { - cpu_base = RX_CPU_BASE; - cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; - } else { - cpu_base = TX_CPU_BASE; - cpu_scratch_base = TX_CPU_SCRATCH_BASE; - cpu_scratch_size = TX_CPU_SCRATCH_SIZE; - } - - err = tg3_load_firmware_cpu(tp, cpu_base, - cpu_scratch_base, cpu_scratch_size, - &info); - if (err) - return err; - - /* Now startup the cpu. */ - tw32(cpu_base + CPU_STATE, 0xffffffff); - tw32_f(cpu_base + CPU_PC, info.fw_base); - - for (i = 0; i < 5; i++) { - if (tr32(cpu_base + CPU_PC) == info.fw_base) - break; - tw32(cpu_base + CPU_STATE, 0xffffffff); - tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); - tw32_f(cpu_base + CPU_PC, info.fw_base); - udelay(1000); - } - if (i >= 5) { - netdev_err(tp->dev, - "%s fails to set CPU PC, is %08x should be %08x\n", - __func__, tr32(cpu_base + CPU_PC), info.fw_base); - return -ENODEV; - } - tw32(cpu_base + CPU_STATE, 0xffffffff); - tw32_f(cpu_base + CPU_MODE, 0x00000000); - return 0; -} - - -static int tg3_set_mac_addr(struct net_device *dev, void *p) -{ - struct tg3 *tp = netdev_priv(dev); - struct sockaddr *addr = p; - int err = 0, skip_mac_1 = 0; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EINVAL; - - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - - if (!netif_running(dev)) - return 0; - - if (tg3_flag(tp, ENABLE_ASF)) { - u32 addr0_high, addr0_low, addr1_high, addr1_low; - - addr0_high = tr32(MAC_ADDR_0_HIGH); - addr0_low = tr32(MAC_ADDR_0_LOW); - addr1_high = tr32(MAC_ADDR_1_HIGH); - addr1_low = tr32(MAC_ADDR_1_LOW); - - /* Skip MAC addr 1 if ASF is using it. */ - if ((addr0_high != addr1_high || addr0_low != addr1_low) && - !(addr1_high == 0 && addr1_low == 0)) - skip_mac_1 = 1; - } - spin_lock_bh(&tp->lock); - __tg3_set_mac_addr(tp, skip_mac_1); - spin_unlock_bh(&tp->lock); - - return err; -} - -/* tp->lock is held. */ -static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr, - dma_addr_t mapping, u32 maxlen_flags, - u32 nic_addr) -{ - tg3_write_mem(tp, - (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH), - ((u64) mapping >> 32)); - tg3_write_mem(tp, - (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW), - ((u64) mapping & 0xffffffff)); - tg3_write_mem(tp, - (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS), - maxlen_flags); - - if (!tg3_flag(tp, 5705_PLUS)) - tg3_write_mem(tp, - (bdinfo_addr + TG3_BDINFO_NIC_ADDR), - nic_addr); -} - -static void __tg3_set_rx_mode(struct net_device *); -static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) -{ - int i; - - if (!tg3_flag(tp, ENABLE_TSS)) { - tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); - tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); - tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); - } else { - tw32(HOSTCC_TXCOL_TICKS, 0); - tw32(HOSTCC_TXMAX_FRAMES, 0); - tw32(HOSTCC_TXCOAL_MAXF_INT, 0); - } - - if (!tg3_flag(tp, ENABLE_RSS)) { - tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); - tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); - tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); - } else { - tw32(HOSTCC_RXCOL_TICKS, 0); - tw32(HOSTCC_RXMAX_FRAMES, 0); - tw32(HOSTCC_RXCOAL_MAXF_INT, 0); - } - - if (!tg3_flag(tp, 5705_PLUS)) { - u32 val = ec->stats_block_coalesce_usecs; - - tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); - tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); - - if (!netif_carrier_ok(tp->dev)) - val = 0; - - tw32(HOSTCC_STAT_COAL_TICKS, val); - } - - for (i = 0; i < tp->irq_cnt - 1; i++) { - u32 reg; - - reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18; - tw32(reg, ec->rx_coalesce_usecs); - reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18; - tw32(reg, ec->rx_max_coalesced_frames); - reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; - tw32(reg, ec->rx_max_coalesced_frames_irq); - - if (tg3_flag(tp, ENABLE_TSS)) { - reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; - tw32(reg, ec->tx_coalesce_usecs); - reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; - tw32(reg, ec->tx_max_coalesced_frames); - reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18; - tw32(reg, ec->tx_max_coalesced_frames_irq); - } - } - - for (; i < tp->irq_max - 1; i++) { - tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0); - tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); - tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); - - if (tg3_flag(tp, ENABLE_TSS)) { - tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); - tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); - tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); - } - } -} - -/* tp->lock is held. */ -static void tg3_rings_reset(struct tg3 *tp) -{ - int i; - u32 stblk, txrcb, rxrcb, limit; - struct tg3_napi *tnapi = &tp->napi[0]; - - /* Disable all transmit rings but the first. */ - if (!tg3_flag(tp, 5705_PLUS)) - limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; - else if (tg3_flag(tp, 5717_PLUS)) - limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4; - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) - limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; - else - limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; - - for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; - txrcb < limit; txrcb += TG3_BDINFO_SIZE) - tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS, - BDINFO_FLAGS_DISABLED); - - - /* Disable all receive return rings but the first. */ - if (tg3_flag(tp, 5717_PLUS)) - limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; - else if (!tg3_flag(tp, 5705_PLUS)) - limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) - limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; - else - limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; - - for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; - rxrcb < limit; rxrcb += TG3_BDINFO_SIZE) - tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS, - BDINFO_FLAGS_DISABLED); - - /* Disable interrupts */ - tw32_mailbox_f(tp->napi[0].int_mbox, 1); - tp->napi[0].chk_msi_cnt = 0; - tp->napi[0].last_rx_cons = 0; - tp->napi[0].last_tx_cons = 0; - - /* Zero mailbox registers. */ - if (tg3_flag(tp, SUPPORT_MSIX)) { - for (i = 1; i < tp->irq_max; i++) { - tp->napi[i].tx_prod = 0; - tp->napi[i].tx_cons = 0; - if (tg3_flag(tp, ENABLE_TSS)) - tw32_mailbox(tp->napi[i].prodmbox, 0); - tw32_rx_mbox(tp->napi[i].consmbox, 0); - tw32_mailbox_f(tp->napi[i].int_mbox, 1); - tp->napi[0].chk_msi_cnt = 0; - tp->napi[i].last_rx_cons = 0; - tp->napi[i].last_tx_cons = 0; - } - if (!tg3_flag(tp, ENABLE_TSS)) - tw32_mailbox(tp->napi[0].prodmbox, 0); - } else { - tp->napi[0].tx_prod = 0; - tp->napi[0].tx_cons = 0; - tw32_mailbox(tp->napi[0].prodmbox, 0); - tw32_rx_mbox(tp->napi[0].consmbox, 0); - } - - /* Make sure the NIC-based send BD rings are disabled. */ - if (!tg3_flag(tp, 5705_PLUS)) { - u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW; - for (i = 0; i < 16; i++) - tw32_tx_mbox(mbox + i * 8, 0); - } - - txrcb = NIC_SRAM_SEND_RCB; - rxrcb = NIC_SRAM_RCV_RET_RCB; - - /* Clear status block in ram. */ - memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); - - /* Set status block DMA address */ - tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, - ((u64) tnapi->status_mapping >> 32)); - tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, - ((u64) tnapi->status_mapping & 0xffffffff)); - - if (tnapi->tx_ring) { - tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, - (TG3_TX_RING_SIZE << - BDINFO_FLAGS_MAXLEN_SHIFT), - NIC_SRAM_TX_BUFFER_DESC); - txrcb += TG3_BDINFO_SIZE; - } - - if (tnapi->rx_rcb) { - tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, - (tp->rx_ret_ring_mask + 1) << - BDINFO_FLAGS_MAXLEN_SHIFT, 0); - rxrcb += TG3_BDINFO_SIZE; - } - - stblk = HOSTCC_STATBLCK_RING1; - - for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) { - u64 mapping = (u64)tnapi->status_mapping; - tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32); - tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff); - - /* Clear status block in ram. */ - memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); - - if (tnapi->tx_ring) { - tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, - (TG3_TX_RING_SIZE << - BDINFO_FLAGS_MAXLEN_SHIFT), - NIC_SRAM_TX_BUFFER_DESC); - txrcb += TG3_BDINFO_SIZE; - } - - tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, - ((tp->rx_ret_ring_mask + 1) << - BDINFO_FLAGS_MAXLEN_SHIFT), 0); - - stblk += 8; - rxrcb += TG3_BDINFO_SIZE; - } -} - -static void tg3_setup_rxbd_thresholds(struct tg3 *tp) -{ - u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh; - - if (!tg3_flag(tp, 5750_PLUS) || - tg3_flag(tp, 5780_CLASS) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) - bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700; - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) - bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755; - else - bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906; - - nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post); - host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1); - - val = min(nic_rep_thresh, host_rep_thresh); - tw32(RCVBDI_STD_THRESH, val); - - if (tg3_flag(tp, 57765_PLUS)) - tw32(STD_REPLENISH_LWM, bdcache_maxcnt); - - if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) - return; - - if (!tg3_flag(tp, 5705_PLUS)) - bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700; - else - bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717; - - host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1); - - val = min(bdcache_maxcnt / 2, host_rep_thresh); - tw32(RCVBDI_JUMBO_THRESH, val); - - if (tg3_flag(tp, 57765_PLUS)) - tw32(JMB_REPLENISH_LWM, bdcache_maxcnt); -} - -/* tp->lock is held. */ -static int tg3_reset_hw(struct tg3 *tp, int reset_phy) -{ - u32 val, rdmac_mode; - int i, err, limit; - struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; - - tg3_disable_ints(tp); - - tg3_stop_fw(tp); - - tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); - - if (tg3_flag(tp, INIT_COMPLETE)) - tg3_abort_hw(tp, 1); - - /* Enable MAC control of LPI */ - if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) { - tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, - TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | - TG3_CPMU_EEE_LNKIDL_UART_IDL); - - tw32_f(TG3_CPMU_EEE_CTRL, - TG3_CPMU_EEE_CTRL_EXIT_20_1_US); - - val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET | - TG3_CPMU_EEEMD_LPI_IN_TX | - TG3_CPMU_EEEMD_LPI_IN_RX | - TG3_CPMU_EEEMD_EEE_ENABLE; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) - val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; - - if (tg3_flag(tp, ENABLE_APE)) - val |= TG3_CPMU_EEEMD_APE_TX_DET_EN; - - tw32_f(TG3_CPMU_EEE_MODE, val); - - tw32_f(TG3_CPMU_EEE_DBTMR1, - TG3_CPMU_DBTMR1_PCIEXIT_2047US | - TG3_CPMU_DBTMR1_LNKIDLE_2047US); - - tw32_f(TG3_CPMU_EEE_DBTMR2, - TG3_CPMU_DBTMR2_APE_TX_2047US | - TG3_CPMU_DBTMR2_TXIDXEQ_2047US); - } - - if (reset_phy) - tg3_phy_reset(tp); - - err = tg3_chip_reset(tp); - if (err) - return err; - - tg3_write_sig_legacy(tp, RESET_KIND_INIT); - - if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) { - val = tr32(TG3_CPMU_CTRL); - val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE); - tw32(TG3_CPMU_CTRL, val); - - val = tr32(TG3_CPMU_LSPD_10MB_CLK); - val &= ~CPMU_LSPD_10MB_MACCLK_MASK; - val |= CPMU_LSPD_10MB_MACCLK_6_25; - tw32(TG3_CPMU_LSPD_10MB_CLK, val); - - val = tr32(TG3_CPMU_LNK_AWARE_PWRMD); - val &= ~CPMU_LNK_AWARE_MACCLK_MASK; - val |= CPMU_LNK_AWARE_MACCLK_6_25; - tw32(TG3_CPMU_LNK_AWARE_PWRMD, val); - - val = tr32(TG3_CPMU_HST_ACC); - val &= ~CPMU_HST_ACC_MACCLK_MASK; - val |= CPMU_HST_ACC_MACCLK_6_25; - tw32(TG3_CPMU_HST_ACC, val); - } - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { - val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK; - val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN | - PCIE_PWR_MGMT_L1_THRESH_4MS; - tw32(PCIE_PWR_MGMT_THRESH, val); - - val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK; - tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS); - - tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR); - - val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; - tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); - } - - if (tg3_flag(tp, L1PLLPD_EN)) { - u32 grc_mode = tr32(GRC_MODE); - - /* Access the lower 1K of PL PCIE block registers. */ - val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; - tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); - - val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1); - tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1, - val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN); - - tw32(GRC_MODE, grc_mode); - } - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { - if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { - u32 grc_mode = tr32(GRC_MODE); - - /* Access the lower 1K of PL PCIE block registers. */ - val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; - tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); - - val = tr32(TG3_PCIE_TLDLPL_PORT + - TG3_PCIE_PL_LO_PHYCTL5); - tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5, - val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ); - - tw32(GRC_MODE, grc_mode); - } - - if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) { - u32 grc_mode = tr32(GRC_MODE); - - /* Access the lower 1K of DL PCIE block registers. */ - val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; - tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL); - - val = tr32(TG3_PCIE_TLDLPL_PORT + - TG3_PCIE_DL_LO_FTSMAX); - val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK; - tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX, - val | TG3_PCIE_DL_LO_FTSMAX_VAL); - - tw32(GRC_MODE, grc_mode); - } - - val = tr32(TG3_CPMU_LSPD_10MB_CLK); - val &= ~CPMU_LSPD_10MB_MACCLK_MASK; - val |= CPMU_LSPD_10MB_MACCLK_6_25; - tw32(TG3_CPMU_LSPD_10MB_CLK, val); - } - - /* This works around an issue with Athlon chipsets on - * B3 tigon3 silicon. This bit has no effect on any - * other revision. But do not set this on PCI Express - * chips and don't even touch the clocks if the CPMU is present. - */ - if (!tg3_flag(tp, CPMU_PRESENT)) { - if (!tg3_flag(tp, PCI_EXPRESS)) - tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; - tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); - } - - if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && - tg3_flag(tp, PCIX_MODE)) { - val = tr32(TG3PCI_PCISTATE); - val |= PCISTATE_RETRY_SAME_DMA; - tw32(TG3PCI_PCISTATE, val); - } - - if (tg3_flag(tp, ENABLE_APE)) { - /* Allow reads and writes to the - * APE register and memory space. - */ - val = tr32(TG3PCI_PCISTATE); - val |= PCISTATE_ALLOW_APE_CTLSPC_WR | - PCISTATE_ALLOW_APE_SHMEM_WR | - PCISTATE_ALLOW_APE_PSPACE_WR; - tw32(TG3PCI_PCISTATE, val); - } - - if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) { - /* Enable some hw fixes. */ - val = tr32(TG3PCI_MSI_DATA); - val |= (1 << 26) | (1 << 28) | (1 << 29); - tw32(TG3PCI_MSI_DATA, val); - } - - /* Descriptor ring init may make accesses to the - * NIC SRAM area to setup the TX descriptors, so we - * can only do this after the hardware has been - * successfully reset. - */ - err = tg3_init_rings(tp); - if (err) - return err; - - if (tg3_flag(tp, 57765_PLUS)) { - val = tr32(TG3PCI_DMA_RW_CTRL) & - ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; - if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) - val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK; - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) - val |= DMA_RWCTRL_TAGGED_STAT_WA; - tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) { - /* This value is determined during the probe time DMA - * engine test, tg3_test_dma. - */ - tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); - } - - tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS | - GRC_MODE_4X_NIC_SEND_RINGS | - GRC_MODE_NO_TX_PHDR_CSUM | - GRC_MODE_NO_RX_PHDR_CSUM); - tp->grc_mode |= GRC_MODE_HOST_SENDBDS; - - /* Pseudo-header checksum is done by hardware logic and not - * the offload processers, so make the chip do the pseudo- - * header checksums on receive. For transmit it is more - * convenient to do the pseudo-header checksum in software - * as Linux does that on transmit for us in all cases. - */ - tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM; - - tw32(GRC_MODE, - tp->grc_mode | - (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP)); - - /* Setup the timer prescalar register. Clock is always 66Mhz. */ - val = tr32(GRC_MISC_CFG); - val &= ~0xff; - val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT); - tw32(GRC_MISC_CFG, val); - - /* Initialize MBUF/DESC pool. */ - if (tg3_flag(tp, 5750_PLUS)) { - /* Do nothing. */ - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) { - tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) - tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64); - else - tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); - tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); - tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); - } else if (tg3_flag(tp, TSO_CAPABLE)) { - int fw_len; - - fw_len = tp->fw_len; - fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1); - tw32(BUFMGR_MB_POOL_ADDR, - NIC_SRAM_MBUF_POOL_BASE5705 + fw_len); - tw32(BUFMGR_MB_POOL_SIZE, - NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00); - } - - if (tp->dev->mtu <= ETH_DATA_LEN) { - tw32(BUFMGR_MB_RDMA_LOW_WATER, - tp->bufmgr_config.mbuf_read_dma_low_water); - tw32(BUFMGR_MB_MACRX_LOW_WATER, - tp->bufmgr_config.mbuf_mac_rx_low_water); - tw32(BUFMGR_MB_HIGH_WATER, - tp->bufmgr_config.mbuf_high_water); - } else { - tw32(BUFMGR_MB_RDMA_LOW_WATER, - tp->bufmgr_config.mbuf_read_dma_low_water_jumbo); - tw32(BUFMGR_MB_MACRX_LOW_WATER, - tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo); - tw32(BUFMGR_MB_HIGH_WATER, - tp->bufmgr_config.mbuf_high_water_jumbo); - } - tw32(BUFMGR_DMA_LOW_WATER, - tp->bufmgr_config.dma_low_water); - tw32(BUFMGR_DMA_HIGH_WATER, - tp->bufmgr_config.dma_high_water); - - val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) - val |= BUFMGR_MODE_NO_TX_UNDERRUN; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 || - tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) - val |= BUFMGR_MODE_MBLOW_ATTN_ENAB; - tw32(BUFMGR_MODE, val); - for (i = 0; i < 2000; i++) { - if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE) - break; - udelay(10); - } - if (i >= 2000) { - netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__); - return -ENODEV; - } - - if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1) - tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2); - - tg3_setup_rxbd_thresholds(tp); - - /* Initialize TG3_BDINFO's at: - * RCVDBDI_STD_BD: standard eth size rx ring - * RCVDBDI_JUMBO_BD: jumbo frame rx ring - * RCVDBDI_MINI_BD: small frame rx ring (??? does not work) - * - * like so: - * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring - * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) | - * ring attribute flags - * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM - * - * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries. - * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries. - * - * The size of each ring is fixed in the firmware, but the location is - * configurable. - */ - tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, - ((u64) tpr->rx_std_mapping >> 32)); - tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, - ((u64) tpr->rx_std_mapping & 0xffffffff)); - if (!tg3_flag(tp, 5717_PLUS)) - tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, - NIC_SRAM_RX_BUFFER_DESC); - - /* Disable the mini ring */ - if (!tg3_flag(tp, 5705_PLUS)) - tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, - BDINFO_FLAGS_DISABLED); - - /* Program the jumbo buffer descriptor ring control - * blocks on those devices that have them. - */ - if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 || - (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) { - - if (tg3_flag(tp, JUMBO_RING_ENABLE)) { - tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, - ((u64) tpr->rx_jmb_mapping >> 32)); - tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, - ((u64) tpr->rx_jmb_mapping & 0xffffffff)); - val = TG3_RX_JMB_RING_SIZE(tp) << - BDINFO_FLAGS_MAXLEN_SHIFT; - tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, - val | BDINFO_FLAGS_USE_EXT_RECV); - if (!tg3_flag(tp, USE_JUMBO_BDFLAG) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) - tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, - NIC_SRAM_RX_JUMBO_BUFFER_DESC); - } else { - tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, - BDINFO_FLAGS_DISABLED); - } - - if (tg3_flag(tp, 57765_PLUS)) { - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) - val = TG3_RX_STD_MAX_SIZE_5700; - else - val = TG3_RX_STD_MAX_SIZE_5717; - val <<= BDINFO_FLAGS_MAXLEN_SHIFT; - val |= (TG3_RX_STD_DMA_SZ << 2); - } else - val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT; - } else - val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT; - - tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); - - tpr->rx_std_prod_idx = tp->rx_pending; - tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); - - tpr->rx_jmb_prod_idx = - tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0; - tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); - - tg3_rings_reset(tp); - - /* Initialize MAC address and backoff seed. */ - __tg3_set_mac_addr(tp, 0); - - /* MTU + ethernet header + FCS + optional VLAN tag */ - tw32(MAC_RX_MTU_SIZE, - tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); - - /* The slot time is changed by tg3_setup_phy if we - * run at gigabit with half duplex. - */ - val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | - (6 << TX_LENGTHS_IPG_SHIFT) | - (32 << TX_LENGTHS_SLOT_TIME_SHIFT); - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) - val |= tr32(MAC_TX_LENGTHS) & - (TX_LENGTHS_JMB_FRM_LEN_MSK | - TX_LENGTHS_CNT_DWN_VAL_MSK); - - tw32(MAC_TX_LENGTHS, val); - - /* Receive rules. */ - tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS); - tw32(RCVLPC_CONFIG, 0x0181); - - /* Calculate RDMAC_MODE setting early, we need it to determine - * the RCVLPC_STATE_ENABLE mask. - */ - rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB | - RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB | - RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB | - RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | - RDMAC_MODE_LNGREAD_ENAB); - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) - rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) - rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB | - RDMAC_MODE_MBUF_RBD_CRPT_ENAB | - RDMAC_MODE_MBUF_SBD_CRPT_ENAB; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && - tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { - if (tg3_flag(tp, TSO_CAPABLE) && - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { - rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; - } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && - !tg3_flag(tp, IS_5788)) { - rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; - } - } - - if (tg3_flag(tp, PCI_EXPRESS)) - rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; - - if (tg3_flag(tp, HW_TSO_1) || - tg3_flag(tp, HW_TSO_2) || - tg3_flag(tp, HW_TSO_3)) - rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; - - if (tg3_flag(tp, 57765_PLUS) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) - rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) - rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || - tg3_flag(tp, 57765_PLUS)) { - val = tr32(TG3_RDMA_RSRVCTRL_REG); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { - val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK | - TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK | - TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK); - val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B | - TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K | - TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K; - } - tw32(TG3_RDMA_RSRVCTRL_REG, - val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); - } - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { - val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); - tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val | - TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K | - TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K); - } - - /* Receive/send statistics. */ - if (tg3_flag(tp, 5750_PLUS)) { - val = tr32(RCVLPC_STATS_ENABLE); - val &= ~RCVLPC_STATSENAB_DACK_FIX; - tw32(RCVLPC_STATS_ENABLE, val); - } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && - tg3_flag(tp, TSO_CAPABLE)) { - val = tr32(RCVLPC_STATS_ENABLE); - val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; - tw32(RCVLPC_STATS_ENABLE, val); - } else { - tw32(RCVLPC_STATS_ENABLE, 0xffffff); - } - tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE); - tw32(SNDDATAI_STATSENAB, 0xffffff); - tw32(SNDDATAI_STATSCTRL, - (SNDDATAI_SCTRL_ENABLE | - SNDDATAI_SCTRL_FASTUPD)); - - /* Setup host coalescing engine. */ - tw32(HOSTCC_MODE, 0); - for (i = 0; i < 2000; i++) { - if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE)) - break; - udelay(10); - } - - __tg3_set_coalesce(tp, &tp->coal); - - if (!tg3_flag(tp, 5705_PLUS)) { - /* Status/statistics block address. See tg3_timer, - * the tg3_periodic_fetch_stats call there, and - * tg3_get_stats to see how this works for 5705/5750 chips. - */ - tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, - ((u64) tp->stats_mapping >> 32)); - tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, - ((u64) tp->stats_mapping & 0xffffffff)); - tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK); - - tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK); - - /* Clear statistics and status block memory areas */ - for (i = NIC_SRAM_STATS_BLK; - i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE; - i += sizeof(u32)) { - tg3_write_mem(tp, i, 0); - udelay(40); - } - } - - tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode); - - tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE); - tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE); - if (!tg3_flag(tp, 5705_PLUS)) - tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); - - if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { - tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; - /* reset to prevent losing 1st rx packet intermittently */ - tw32_f(MAC_RX_MODE, RX_MODE_RESET); - udelay(10); - } - - tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | - MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | - MAC_MODE_FHDE_ENABLE; - if (tg3_flag(tp, ENABLE_APE)) - tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; - if (!tg3_flag(tp, 5705_PLUS) && - !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) - tp->mac_mode |= MAC_MODE_LINK_POLARITY; - tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); - udelay(40); - - /* tp->grc_local_ctrl is partially set up during tg3_get_invariants(). - * If TG3_FLAG_IS_NIC is zero, we should read the - * register to preserve the GPIO settings for LOMs. The GPIOs, - * whether used as inputs or outputs, are set by boot code after - * reset. - */ - if (!tg3_flag(tp, IS_NIC)) { - u32 gpio_mask; - - gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | - GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 | - GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) - gpio_mask |= GRC_LCLCTRL_GPIO_OE3 | - GRC_LCLCTRL_GPIO_OUTPUT3; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) - gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL; - - tp->grc_local_ctrl &= ~gpio_mask; - tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; - - /* GPIO1 must be driven high for eeprom write protect */ - if (tg3_flag(tp, EEPROM_WRITE_PROT)) - tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | - GRC_LCLCTRL_GPIO_OUTPUT1); - } - tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); - udelay(100); - - if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) { - val = tr32(MSGINT_MODE); - val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE; - tw32(MSGINT_MODE, val); - } - - if (!tg3_flag(tp, 5705_PLUS)) { - tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); - udelay(40); - } - - val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB | - WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB | - WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB | - WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | - WDMAC_MODE_LNGREAD_ENAB); - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && - tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { - if (tg3_flag(tp, TSO_CAPABLE) && - (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 || - tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) { - /* nothing */ - } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && - !tg3_flag(tp, IS_5788)) { - val |= WDMAC_MODE_RX_ACCEL; - } - } - - /* Enable host coalescing bug fix */ - if (tg3_flag(tp, 5755_PLUS)) - val |= WDMAC_MODE_STATUS_TAG_FIX; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) - val |= WDMAC_MODE_BURST_ALL_DATA; - - tw32_f(WDMAC_MODE, val); - udelay(40); - - if (tg3_flag(tp, PCIX_MODE)) { - u16 pcix_cmd; - - pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, - &pcix_cmd); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) { - pcix_cmd &= ~PCI_X_CMD_MAX_READ; - pcix_cmd |= PCI_X_CMD_READ_2K; - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { - pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ); - pcix_cmd |= PCI_X_CMD_READ_2K; - } - pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, - pcix_cmd); - } - - tw32_f(RDMAC_MODE, rdmac_mode); - udelay(40); - - tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); - if (!tg3_flag(tp, 5705_PLUS)) - tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) - tw32(SNDDATAC_MODE, - SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY); - else - tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE); - - tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); - tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); - val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ; - if (tg3_flag(tp, LRG_PROD_RING_CAP)) - val |= RCVDBDI_MODE_LRG_RING_SZ; - tw32(RCVDBDI_MODE, val); - tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); - if (tg3_flag(tp, HW_TSO_1) || - tg3_flag(tp, HW_TSO_2) || - tg3_flag(tp, HW_TSO_3)) - tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); - val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE; - if (tg3_flag(tp, ENABLE_TSS)) - val |= SNDBDI_MODE_MULTI_TXQ_EN; - tw32(SNDBDI_MODE, val); - tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); - - if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) { - err = tg3_load_5701_a0_firmware_fix(tp); - if (err) - return err; - } - - if (tg3_flag(tp, TSO_CAPABLE)) { - err = tg3_load_tso_firmware(tp); - if (err) - return err; - } - - tp->tx_mode = TX_MODE_ENABLE; - - if (tg3_flag(tp, 5755_PLUS) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) - tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { - val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE; - tp->tx_mode &= ~val; - tp->tx_mode |= tr32(MAC_TX_MODE) & val; - } - - tw32_f(MAC_TX_MODE, tp->tx_mode); - udelay(100); - - if (tg3_flag(tp, ENABLE_RSS)) { - int i = 0; - u32 reg = MAC_RSS_INDIR_TBL_0; - - if (tp->irq_cnt == 2) { - for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) { - tw32(reg, 0x0); - reg += 4; - } - } else { - u32 val; - - while (i < TG3_RSS_INDIR_TBL_SIZE) { - val = i % (tp->irq_cnt - 1); - i++; - for (; i % 8; i++) { - val <<= 4; - val |= (i % (tp->irq_cnt - 1)); - } - tw32(reg, val); - reg += 4; - } - } - - /* Setup the "secret" hash key. */ - tw32(MAC_RSS_HASH_KEY_0, 0x5f865437); - tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc); - tw32(MAC_RSS_HASH_KEY_2, 0x50103a45); - tw32(MAC_RSS_HASH_KEY_3, 0x36621985); - tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8); - tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e); - tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556); - tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe); - tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7); - tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481); - } - - tp->rx_mode = RX_MODE_ENABLE; - if (tg3_flag(tp, 5755_PLUS)) - tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; - - if (tg3_flag(tp, ENABLE_RSS)) - tp->rx_mode |= RX_MODE_RSS_ENABLE | - RX_MODE_RSS_ITBL_HASH_BITS_7 | - RX_MODE_RSS_IPV6_HASH_EN | - RX_MODE_RSS_TCP_IPV6_HASH_EN | - RX_MODE_RSS_IPV4_HASH_EN | - RX_MODE_RSS_TCP_IPV4_HASH_EN; - - tw32_f(MAC_RX_MODE, tp->rx_mode); - udelay(10); - - tw32(MAC_LED_CTRL, tp->led_ctrl); - - tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); - if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { - tw32_f(MAC_RX_MODE, RX_MODE_RESET); - udelay(10); - } - tw32_f(MAC_RX_MODE, tp->rx_mode); - udelay(10); - - if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) && - !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) { - /* Set drive transmission level to 1.2V */ - /* only if the signal pre-emphasis bit is not set */ - val = tr32(MAC_SERDES_CFG); - val &= 0xfffff000; - val |= 0x880; - tw32(MAC_SERDES_CFG, val); - } - if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) - tw32(MAC_SERDES_CFG, 0x616000); - } - - /* Prevent chip from dropping frames when flow control - * is enabled. - */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) - val = 1; - else - val = 2; - tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val); - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && - (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { - /* Use hardware link auto-negotiation */ - tg3_flag_set(tp, HW_AUTONEG); - } - - if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { - u32 tmp; - - tmp = tr32(SERDES_RX_CTRL); - tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT); - tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT; - tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT; - tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); - } - - if (!tg3_flag(tp, USE_PHYLIB)) { - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { - tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; - tp->link_config.speed = tp->link_config.orig_speed; - tp->link_config.duplex = tp->link_config.orig_duplex; - tp->link_config.autoneg = tp->link_config.orig_autoneg; - } - - err = tg3_setup_phy(tp, 0); - if (err) - return err; - - if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && - !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { - u32 tmp; - - /* Clear CRC stats. */ - if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) { - tg3_writephy(tp, MII_TG3_TEST1, - tmp | MII_TG3_TEST1_CRC_EN); - tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp); - } - } - } - - __tg3_set_rx_mode(tp->dev); - - /* Initialize receive rules. */ - tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK); - tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK); - tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK); - tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); - - if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) - limit = 8; - else - limit = 16; - if (tg3_flag(tp, ENABLE_ASF)) - limit -= 4; - switch (limit) { - case 16: - tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0); - case 15: - tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0); - case 14: - tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0); - case 13: - tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0); - case 12: - tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0); - case 11: - tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0); - case 10: - tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0); - case 9: - tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0); - case 8: - tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0); - case 7: - tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0); - case 6: - tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0); - case 5: - tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0); - case 4: - /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */ - case 3: - /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */ - case 2: - case 1: - - default: - break; - } - - if (tg3_flag(tp, ENABLE_APE)) - /* Write our heartbeat update interval to APE. */ - tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, - APE_HOST_HEARTBEAT_INT_DISABLE); - - tg3_write_sig_post_reset(tp, RESET_KIND_INIT); - - return 0; -} - -/* Called at device open time to get the chip ready for - * packet processing. Invoked with tp->lock held. - */ -static int tg3_init_hw(struct tg3 *tp, int reset_phy) -{ - tg3_switch_clocks(tp); - - tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); - - return tg3_reset_hw(tp, reset_phy); -} - -#define TG3_STAT_ADD32(PSTAT, REG) \ -do { u32 __val = tr32(REG); \ - (PSTAT)->low += __val; \ - if ((PSTAT)->low < __val) \ - (PSTAT)->high += 1; \ -} while (0) - -static void tg3_periodic_fetch_stats(struct tg3 *tp) -{ - struct tg3_hw_stats *sp = tp->hw_stats; - - if (!netif_carrier_ok(tp->dev)) - return; - - TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS); - TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS); - TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT); - TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT); - TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS); - TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS); - TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS); - TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED); - TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL); - TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL); - TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST); - TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST); - TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST); - - TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS); - TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS); - TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST); - TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST); - TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST); - TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS); - TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS); - TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD); - TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD); - TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD); - TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED); - TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG); - TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS); - TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); - - TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && - tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 && - tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) { - TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); - } else { - u32 val = tr32(HOSTCC_FLOW_ATTN); - val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0; - if (val) { - tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM); - sp->rx_discards.low += val; - if (sp->rx_discards.low < val) - sp->rx_discards.high += 1; - } - sp->mbuf_lwm_thresh_hit = sp->rx_discards; - } - TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT); -} - -static void tg3_chk_missed_msi(struct tg3 *tp) -{ - u32 i; - - for (i = 0; i < tp->irq_cnt; i++) { - struct tg3_napi *tnapi = &tp->napi[i]; - - if (tg3_has_work(tnapi)) { - if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr && - tnapi->last_tx_cons == tnapi->tx_cons) { - if (tnapi->chk_msi_cnt < 1) { - tnapi->chk_msi_cnt++; - return; - } - tw32_mailbox(tnapi->int_mbox, - tnapi->last_tag << 24); - } - } - tnapi->chk_msi_cnt = 0; - tnapi->last_rx_cons = tnapi->rx_rcb_ptr; - tnapi->last_tx_cons = tnapi->tx_cons; - } -} - -static void tg3_timer(unsigned long __opaque) -{ - struct tg3 *tp = (struct tg3 *) __opaque; - - if (tp->irq_sync) - goto restart_timer; - - spin_lock(&tp->lock); - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) - tg3_chk_missed_msi(tp); - - if (!tg3_flag(tp, TAGGED_STATUS)) { - /* All of this garbage is because when using non-tagged - * IRQ status the mailbox/status_block protocol the chip - * uses with the cpu is race prone. - */ - if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) { - tw32(GRC_LOCAL_CTRL, - tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); - } else { - tw32(HOSTCC_MODE, tp->coalesce_mode | - HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW); - } - - if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { - tg3_flag_set(tp, RESTART_TIMER); - spin_unlock(&tp->lock); - schedule_work(&tp->reset_task); - return; - } - } - - /* This part only runs once per second. */ - if (!--tp->timer_counter) { - if (tg3_flag(tp, 5705_PLUS)) - tg3_periodic_fetch_stats(tp); - - if (tp->setlpicnt && !--tp->setlpicnt) - tg3_phy_eee_enable(tp); - - if (tg3_flag(tp, USE_LINKCHG_REG)) { - u32 mac_stat; - int phy_event; - - mac_stat = tr32(MAC_STATUS); - - phy_event = 0; - if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) { - if (mac_stat & MAC_STATUS_MI_INTERRUPT) - phy_event = 1; - } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED) - phy_event = 1; - - if (phy_event) - tg3_setup_phy(tp, 0); - } else if (tg3_flag(tp, POLL_SERDES)) { - u32 mac_stat = tr32(MAC_STATUS); - int need_setup = 0; - - if (netif_carrier_ok(tp->dev) && - (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) { - need_setup = 1; - } - if (!netif_carrier_ok(tp->dev) && - (mac_stat & (MAC_STATUS_PCS_SYNCED | - MAC_STATUS_SIGNAL_DET))) { - need_setup = 1; - } - if (need_setup) { - if (!tp->serdes_counter) { - tw32_f(MAC_MODE, - (tp->mac_mode & - ~MAC_MODE_PORT_MODE_MASK)); - udelay(40); - tw32_f(MAC_MODE, tp->mac_mode); - udelay(40); - } - tg3_setup_phy(tp, 0); - } - } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && - tg3_flag(tp, 5780_CLASS)) { - tg3_serdes_parallel_detect(tp); - } - - tp->timer_counter = tp->timer_multiplier; - } - - /* Heartbeat is only sent once every 2 seconds. - * - * The heartbeat is to tell the ASF firmware that the host - * driver is still alive. In the event that the OS crashes, - * ASF needs to reset the hardware to free up the FIFO space - * that may be filled with rx packets destined for the host. - * If the FIFO is full, ASF will no longer function properly. - * - * Unintended resets have been reported on real time kernels - * where the timer doesn't run on time. Netpoll will also have - * same problem. - * - * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware - * to check the ring condition when the heartbeat is expiring - * before doing the reset. This will prevent most unintended - * resets. - */ - if (!--tp->asf_counter) { - if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { - tg3_wait_for_event_ack(tp); - - tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, - FWCMD_NICDRV_ALIVE3); - tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); - tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, - TG3_FW_UPDATE_TIMEOUT_SEC); - - tg3_generate_fw_event(tp); - } - tp->asf_counter = tp->asf_multiplier; - } - - spin_unlock(&tp->lock); - -restart_timer: - tp->timer.expires = jiffies + tp->timer_offset; - add_timer(&tp->timer); -} - -static int tg3_request_irq(struct tg3 *tp, int irq_num) -{ - irq_handler_t fn; - unsigned long flags; - char *name; - struct tg3_napi *tnapi = &tp->napi[irq_num]; - - if (tp->irq_cnt == 1) - name = tp->dev->name; - else { - name = &tnapi->irq_lbl[0]; - snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num); - name[IFNAMSIZ-1] = 0; - } - - if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { - fn = tg3_msi; - if (tg3_flag(tp, 1SHOT_MSI)) - fn = tg3_msi_1shot; - flags = 0; - } else { - fn = tg3_interrupt; - if (tg3_flag(tp, TAGGED_STATUS)) - fn = tg3_interrupt_tagged; - flags = IRQF_SHARED; - } - - return request_irq(tnapi->irq_vec, fn, flags, name, tnapi); -} - -static int tg3_test_interrupt(struct tg3 *tp) -{ - struct tg3_napi *tnapi = &tp->napi[0]; - struct net_device *dev = tp->dev; - int err, i, intr_ok = 0; - u32 val; - - if (!netif_running(dev)) - return -ENODEV; - - tg3_disable_ints(tp); - - free_irq(tnapi->irq_vec, tnapi); - - /* - * Turn off MSI one shot mode. Otherwise this test has no - * observable way to know whether the interrupt was delivered. - */ - if (tg3_flag(tp, 57765_PLUS)) { - val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; - tw32(MSGINT_MODE, val); - } - - err = request_irq(tnapi->irq_vec, tg3_test_isr, - IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi); - if (err) - return err; - - tnapi->hw_status->status &= ~SD_STATUS_UPDATED; - tg3_enable_ints(tp); - - tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | - tnapi->coal_now); - - for (i = 0; i < 5; i++) { - u32 int_mbox, misc_host_ctrl; - - int_mbox = tr32_mailbox(tnapi->int_mbox); - misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); - - if ((int_mbox != 0) || - (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) { - intr_ok = 1; - break; - } - - if (tg3_flag(tp, 57765_PLUS) && - tnapi->hw_status->status_tag != tnapi->last_tag) - tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); - - msleep(10); - } - - tg3_disable_ints(tp); - - free_irq(tnapi->irq_vec, tnapi); - - err = tg3_request_irq(tp, 0); - - if (err) - return err; - - if (intr_ok) { - /* Reenable MSI one shot mode. */ - if (tg3_flag(tp, 57765_PLUS)) { - val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; - tw32(MSGINT_MODE, val); - } - return 0; - } - - return -EIO; -} - -/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is - * successfully restored - */ -static int tg3_test_msi(struct tg3 *tp) -{ - int err; - u16 pci_cmd; - - if (!tg3_flag(tp, USING_MSI)) - return 0; - - /* Turn off SERR reporting in case MSI terminates with Master - * Abort. - */ - pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); - pci_write_config_word(tp->pdev, PCI_COMMAND, - pci_cmd & ~PCI_COMMAND_SERR); - - err = tg3_test_interrupt(tp); - - pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); - - if (!err) - return 0; - - /* other failures */ - if (err != -EIO) - return err; - - /* MSI test failed, go back to INTx mode */ - netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching " - "to INTx mode. Please report this failure to the PCI " - "maintainer and include system chipset information\n"); - - free_irq(tp->napi[0].irq_vec, &tp->napi[0]); - - pci_disable_msi(tp->pdev); - - tg3_flag_clear(tp, USING_MSI); - tp->napi[0].irq_vec = tp->pdev->irq; - - err = tg3_request_irq(tp, 0); - if (err) - return err; - - /* Need to reset the chip because the MSI cycle may have terminated - * with Master Abort. - */ - tg3_full_lock(tp, 1); - - tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); - err = tg3_init_hw(tp, 1); - - tg3_full_unlock(tp); - - if (err) - free_irq(tp->napi[0].irq_vec, &tp->napi[0]); - - return err; -} - -static int tg3_request_firmware(struct tg3 *tp) -{ - const __be32 *fw_data; - - if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) { - netdev_err(tp->dev, "Failed to load firmware \"%s\"\n", - tp->fw_needed); - return -ENOENT; - } - - fw_data = (void *)tp->fw->data; - - /* Firmware blob starts with version numbers, followed by - * start address and _full_ length including BSS sections - * (which must be longer than the actual data, of course - */ - - tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */ - if (tp->fw_len < (tp->fw->size - 12)) { - netdev_err(tp->dev, "bogus length %d in \"%s\"\n", - tp->fw_len, tp->fw_needed); - release_firmware(tp->fw); - tp->fw = NULL; - return -EINVAL; - } - - /* We no longer need firmware; we have it. */ - tp->fw_needed = NULL; - return 0; -} - -static bool tg3_enable_msix(struct tg3 *tp) -{ - int i, rc, cpus = num_online_cpus(); - struct msix_entry msix_ent[tp->irq_max]; - - if (cpus == 1) - /* Just fallback to the simpler MSI mode. */ - return false; - - /* - * We want as many rx rings enabled as there are cpus. - * The first MSIX vector only deals with link interrupts, etc, - * so we add one to the number of vectors we are requesting. - */ - tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max); - - for (i = 0; i < tp->irq_max; i++) { - msix_ent[i].entry = i; - msix_ent[i].vector = 0; - } - - rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt); - if (rc < 0) { - return false; - } else if (rc != 0) { - if (pci_enable_msix(tp->pdev, msix_ent, rc)) - return false; - netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n", - tp->irq_cnt, rc); - tp->irq_cnt = rc; - } - - for (i = 0; i < tp->irq_max; i++) - tp->napi[i].irq_vec = msix_ent[i].vector; - - netif_set_real_num_tx_queues(tp->dev, 1); - rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1; - if (netif_set_real_num_rx_queues(tp->dev, rc)) { - pci_disable_msix(tp->pdev); - return false; - } - - if (tp->irq_cnt > 1) { - tg3_flag_set(tp, ENABLE_RSS); - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { - tg3_flag_set(tp, ENABLE_TSS); - netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1); - } - } - - return true; -} - -static void tg3_ints_init(struct tg3 *tp) -{ - if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) && - !tg3_flag(tp, TAGGED_STATUS)) { - /* All MSI supporting chips should support tagged - * status. Assert that this is the case. - */ - netdev_warn(tp->dev, - "MSI without TAGGED_STATUS? Not using MSI\n"); - goto defcfg; - } - - if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp)) - tg3_flag_set(tp, USING_MSIX); - else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0) - tg3_flag_set(tp, USING_MSI); - - if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { - u32 msi_mode = tr32(MSGINT_MODE); - if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) - msi_mode |= MSGINT_MODE_MULTIVEC_EN; - tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); - } -defcfg: - if (!tg3_flag(tp, USING_MSIX)) { - tp->irq_cnt = 1; - tp->napi[0].irq_vec = tp->pdev->irq; - netif_set_real_num_tx_queues(tp->dev, 1); - netif_set_real_num_rx_queues(tp->dev, 1); - } -} - -static void tg3_ints_fini(struct tg3 *tp) -{ - if (tg3_flag(tp, USING_MSIX)) - pci_disable_msix(tp->pdev); - else if (tg3_flag(tp, USING_MSI)) - pci_disable_msi(tp->pdev); - tg3_flag_clear(tp, USING_MSI); - tg3_flag_clear(tp, USING_MSIX); - tg3_flag_clear(tp, ENABLE_RSS); - tg3_flag_clear(tp, ENABLE_TSS); -} - -static int tg3_open(struct net_device *dev) -{ - struct tg3 *tp = netdev_priv(dev); - int i, err; - - if (tp->fw_needed) { - err = tg3_request_firmware(tp); - if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) { - if (err) - return err; - } else if (err) { - netdev_warn(tp->dev, "TSO capability disabled\n"); - tg3_flag_clear(tp, TSO_CAPABLE); - } else if (!tg3_flag(tp, TSO_CAPABLE)) { - netdev_notice(tp->dev, "TSO capability restored\n"); - tg3_flag_set(tp, TSO_CAPABLE); - } - } - - netif_carrier_off(tp->dev); - - err = tg3_power_up(tp); - if (err) - return err; - - tg3_full_lock(tp, 0); - - tg3_disable_ints(tp); - tg3_flag_clear(tp, INIT_COMPLETE); - - tg3_full_unlock(tp); - - /* - * Setup interrupts first so we know how - * many NAPI resources to allocate - */ - tg3_ints_init(tp); - - /* The placement of this call is tied - * to the setup and use of Host TX descriptors. - */ - err = tg3_alloc_consistent(tp); - if (err) - goto err_out1; - - tg3_napi_init(tp); - - tg3_napi_enable(tp); - - for (i = 0; i < tp->irq_cnt; i++) { - struct tg3_napi *tnapi = &tp->napi[i]; - err = tg3_request_irq(tp, i); - if (err) { - for (i--; i >= 0; i--) - free_irq(tnapi->irq_vec, tnapi); - break; - } - } - - if (err) - goto err_out2; - - tg3_full_lock(tp, 0); - - err = tg3_init_hw(tp, 1); - if (err) { - tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); - tg3_free_rings(tp); - } else { - if (tg3_flag(tp, TAGGED_STATUS) && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) - tp->timer_offset = HZ; - else - tp->timer_offset = HZ / 10; - - BUG_ON(tp->timer_offset > HZ); - tp->timer_counter = tp->timer_multiplier = - (HZ / tp->timer_offset); - tp->asf_counter = tp->asf_multiplier = - ((HZ / tp->timer_offset) * 2); - - init_timer(&tp->timer); - tp->timer.expires = jiffies + tp->timer_offset; - tp->timer.data = (unsigned long) tp; - tp->timer.function = tg3_timer; - } - - tg3_full_unlock(tp); - - if (err) - goto err_out3; - - if (tg3_flag(tp, USING_MSI)) { - err = tg3_test_msi(tp); - - if (err) { - tg3_full_lock(tp, 0); - tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); - tg3_free_rings(tp); - tg3_full_unlock(tp); - - goto err_out2; - } - - if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { - u32 val = tr32(PCIE_TRANSACTION_CFG); - - tw32(PCIE_TRANSACTION_CFG, - val | PCIE_TRANS_CFG_1SHOT_MSI); - } - } - - tg3_phy_start(tp); - - tg3_full_lock(tp, 0); - - add_timer(&tp->timer); - tg3_flag_set(tp, INIT_COMPLETE); - tg3_enable_ints(tp); - - tg3_full_unlock(tp); - - netif_tx_start_all_queues(dev); - - /* - * Reset loopback feature if it was turned on while the device was down - * make sure that it's installed properly now. - */ - if (dev->features & NETIF_F_LOOPBACK) - tg3_set_loopback(dev, dev->features); - - return 0; - -err_out3: - for (i = tp->irq_cnt - 1; i >= 0; i--) { - struct tg3_napi *tnapi = &tp->napi[i]; - free_irq(tnapi->irq_vec, tnapi); - } - -err_out2: - tg3_napi_disable(tp); - tg3_napi_fini(tp); - tg3_free_consistent(tp); - -err_out1: - tg3_ints_fini(tp); - tg3_frob_aux_power(tp, false); - pci_set_power_state(tp->pdev, PCI_D3hot); - return err; -} - -static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *, - struct rtnl_link_stats64 *); -static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *); - -static int tg3_close(struct net_device *dev) -{ - int i; - struct tg3 *tp = netdev_priv(dev); - - tg3_napi_disable(tp); - cancel_work_sync(&tp->reset_task); - - netif_tx_stop_all_queues(dev); - - del_timer_sync(&tp->timer); - - tg3_phy_stop(tp); - - tg3_full_lock(tp, 1); - - tg3_disable_ints(tp); - - tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); - tg3_free_rings(tp); - tg3_flag_clear(tp, INIT_COMPLETE); - - tg3_full_unlock(tp); - - for (i = tp->irq_cnt - 1; i >= 0; i--) { - struct tg3_napi *tnapi = &tp->napi[i]; - free_irq(tnapi->irq_vec, tnapi); - } - - tg3_ints_fini(tp); - - tg3_get_stats64(tp->dev, &tp->net_stats_prev); - - memcpy(&tp->estats_prev, tg3_get_estats(tp), - sizeof(tp->estats_prev)); - - tg3_napi_fini(tp); - - tg3_free_consistent(tp); - - tg3_power_down(tp); - - netif_carrier_off(tp->dev); - - return 0; -} - -static inline u64 get_stat64(tg3_stat64_t *val) -{ - return ((u64)val->high << 32) | ((u64)val->low); -} - -static u64 calc_crc_errors(struct tg3 *tp) -{ - struct tg3_hw_stats *hw_stats = tp->hw_stats; - - if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { - u32 val; - - spin_lock_bh(&tp->lock); - if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) { - tg3_writephy(tp, MII_TG3_TEST1, - val | MII_TG3_TEST1_CRC_EN); - tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val); - } else - val = 0; - spin_unlock_bh(&tp->lock); - - tp->phy_crc_errors += val; - - return tp->phy_crc_errors; - } - - return get_stat64(&hw_stats->rx_fcs_errors); -} - -#define ESTAT_ADD(member) \ - estats->member = old_estats->member + \ - get_stat64(&hw_stats->member) - -static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp) -{ - struct tg3_ethtool_stats *estats = &tp->estats; - struct tg3_ethtool_stats *old_estats = &tp->estats_prev; - struct tg3_hw_stats *hw_stats = tp->hw_stats; - - if (!hw_stats) - return old_estats; - - ESTAT_ADD(rx_octets); - ESTAT_ADD(rx_fragments); - ESTAT_ADD(rx_ucast_packets); - ESTAT_ADD(rx_mcast_packets); - ESTAT_ADD(rx_bcast_packets); - ESTAT_ADD(rx_fcs_errors); - ESTAT_ADD(rx_align_errors); - ESTAT_ADD(rx_xon_pause_rcvd); - ESTAT_ADD(rx_xoff_pause_rcvd); - ESTAT_ADD(rx_mac_ctrl_rcvd); - ESTAT_ADD(rx_xoff_entered); - ESTAT_ADD(rx_frame_too_long_errors); - ESTAT_ADD(rx_jabbers); - ESTAT_ADD(rx_undersize_packets); - ESTAT_ADD(rx_in_length_errors); - ESTAT_ADD(rx_out_length_errors); - ESTAT_ADD(rx_64_or_less_octet_packets); - ESTAT_ADD(rx_65_to_127_octet_packets); - ESTAT_ADD(rx_128_to_255_octet_packets); - ESTAT_ADD(rx_256_to_511_octet_packets); - ESTAT_ADD(rx_512_to_1023_octet_packets); - ESTAT_ADD(rx_1024_to_1522_octet_packets); - ESTAT_ADD(rx_1523_to_2047_octet_packets); - ESTAT_ADD(rx_2048_to_4095_octet_packets); - ESTAT_ADD(rx_4096_to_8191_octet_packets); - ESTAT_ADD(rx_8192_to_9022_octet_packets); - - ESTAT_ADD(tx_octets); - ESTAT_ADD(tx_collisions); - ESTAT_ADD(tx_xon_sent); - ESTAT_ADD(tx_xoff_sent); - ESTAT_ADD(tx_flow_control); - ESTAT_ADD(tx_mac_errors); - ESTAT_ADD(tx_single_collisions); - ESTAT_ADD(tx_mult_collisions); - ESTAT_ADD(tx_deferred); - ESTAT_ADD(tx_excessive_collisions); - ESTAT_ADD(tx_late_collisions); - ESTAT_ADD(tx_collide_2times); - ESTAT_ADD(tx_collide_3times); - ESTAT_ADD(tx_collide_4times); - ESTAT_ADD(tx_collide_5times); - ESTAT_ADD(tx_collide_6times); - ESTAT_ADD(tx_collide_7times); - ESTAT_ADD(tx_collide_8times); - ESTAT_ADD(tx_collide_9times); - ESTAT_ADD(tx_collide_10times); - ESTAT_ADD(tx_collide_11times); - ESTAT_ADD(tx_collide_12times); - ESTAT_ADD(tx_collide_13times); - ESTAT_ADD(tx_collide_14times); - ESTAT_ADD(tx_collide_15times); - ESTAT_ADD(tx_ucast_packets); - ESTAT_ADD(tx_mcast_packets); - ESTAT_ADD(tx_bcast_packets); - ESTAT_ADD(tx_carrier_sense_errors); - ESTAT_ADD(tx_discards); - ESTAT_ADD(tx_errors); - - ESTAT_ADD(dma_writeq_full); - ESTAT_ADD(dma_write_prioq_full); - ESTAT_ADD(rxbds_empty); - ESTAT_ADD(rx_discards); - ESTAT_ADD(rx_errors); - ESTAT_ADD(rx_threshold_hit); - - ESTAT_ADD(dma_readq_full); - ESTAT_ADD(dma_read_prioq_full); - ESTAT_ADD(tx_comp_queue_full); - - ESTAT_ADD(ring_set_send_prod_index); - ESTAT_ADD(ring_status_update); - ESTAT_ADD(nic_irqs); - ESTAT_ADD(nic_avoided_irqs); - ESTAT_ADD(nic_tx_threshold_hit); - - ESTAT_ADD(mbuf_lwm_thresh_hit); - - return estats; -} - -static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) -{ - struct tg3 *tp = netdev_priv(dev); - struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev; - struct tg3_hw_stats *hw_stats = tp->hw_stats; - - if (!hw_stats) - return old_stats; - - stats->rx_packets = old_stats->rx_packets + - get_stat64(&hw_stats->rx_ucast_packets) + - get_stat64(&hw_stats->rx_mcast_packets) + - get_stat64(&hw_stats->rx_bcast_packets); - - stats->tx_packets = old_stats->tx_packets + - get_stat64(&hw_stats->tx_ucast_packets) + - get_stat64(&hw_stats->tx_mcast_packets) + - get_stat64(&hw_stats->tx_bcast_packets); - - stats->rx_bytes = old_stats->rx_bytes + - get_stat64(&hw_stats->rx_octets); - stats->tx_bytes = old_stats->tx_bytes + - get_stat64(&hw_stats->tx_octets); - - stats->rx_errors = old_stats->rx_errors + - get_stat64(&hw_stats->rx_errors); - stats->tx_errors = old_stats->tx_errors + - get_stat64(&hw_stats->tx_errors) + - get_stat64(&hw_stats->tx_mac_errors) + - get_stat64(&hw_stats->tx_carrier_sense_errors) + - get_stat64(&hw_stats->tx_discards); - - stats->multicast = old_stats->multicast + - get_stat64(&hw_stats->rx_mcast_packets); - stats->collisions = old_stats->collisions + - get_stat64(&hw_stats->tx_collisions); - - stats->rx_length_errors = old_stats->rx_length_errors + - get_stat64(&hw_stats->rx_frame_too_long_errors) + - get_stat64(&hw_stats->rx_undersize_packets); - - stats->rx_over_errors = old_stats->rx_over_errors + - get_stat64(&hw_stats->rxbds_empty); - stats->rx_frame_errors = old_stats->rx_frame_errors + - get_stat64(&hw_stats->rx_align_errors); - stats->tx_aborted_errors = old_stats->tx_aborted_errors + - get_stat64(&hw_stats->tx_discards); - stats->tx_carrier_errors = old_stats->tx_carrier_errors + - get_stat64(&hw_stats->tx_carrier_sense_errors); - - stats->rx_crc_errors = old_stats->rx_crc_errors + - calc_crc_errors(tp); - - stats->rx_missed_errors = old_stats->rx_missed_errors + - get_stat64(&hw_stats->rx_discards); - - stats->rx_dropped = tp->rx_dropped; - - return stats; -} - -static inline u32 calc_crc(unsigned char *buf, int len) -{ - u32 reg; - u32 tmp; - int j, k; - - reg = 0xffffffff; - - for (j = 0; j < len; j++) { - reg ^= buf[j]; - - for (k = 0; k < 8; k++) { - tmp = reg & 0x01; - - reg >>= 1; - - if (tmp) - reg ^= 0xedb88320; - } - } - - return ~reg; -} - -static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all) -{ - /* accept or reject all multicast frames */ - tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0); - tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0); - tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0); - tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0); -} - -static void __tg3_set_rx_mode(struct net_device *dev) -{ - struct tg3 *tp = netdev_priv(dev); - u32 rx_mode; - - rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | - RX_MODE_KEEP_VLAN_TAG); - -#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE) - /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG - * flag clear. - */ - if (!tg3_flag(tp, ENABLE_ASF)) - rx_mode |= RX_MODE_KEEP_VLAN_TAG; -#endif - - if (dev->flags & IFF_PROMISC) { - /* Promiscuous mode. */ - rx_mode |= RX_MODE_PROMISC; - } else if (dev->flags & IFF_ALLMULTI) { - /* Accept all multicast. */ - tg3_set_multi(tp, 1); - } else if (netdev_mc_empty(dev)) { - /* Reject all multicast. */ - tg3_set_multi(tp, 0); - } else { - /* Accept one or more multicast(s). */ - struct netdev_hw_addr *ha; - u32 mc_filter[4] = { 0, }; - u32 regidx; - u32 bit; - u32 crc; - - netdev_for_each_mc_addr(ha, dev) { - crc = calc_crc(ha->addr, ETH_ALEN); - bit = ~crc & 0x7f; - regidx = (bit & 0x60) >> 5; - bit &= 0x1f; - mc_filter[regidx] |= (1 << bit); - } - - tw32(MAC_HASH_REG_0, mc_filter[0]); - tw32(MAC_HASH_REG_1, mc_filter[1]); - tw32(MAC_HASH_REG_2, mc_filter[2]); - tw32(MAC_HASH_REG_3, mc_filter[3]); - } - - if (rx_mode != tp->rx_mode) { - tp->rx_mode = rx_mode; - tw32_f(MAC_RX_MODE, rx_mode); - udelay(10); - } -} - -static void tg3_set_rx_mode(struct net_device *dev) -{ - struct tg3 *tp = netdev_priv(dev); - - if (!netif_running(dev)) - return; - - tg3_full_lock(tp, 0); - __tg3_set_rx_mode(dev); - tg3_full_unlock(tp); -} - -static int tg3_get_regs_len(struct net_device *dev) -{ - return TG3_REG_BLK_SIZE; -} - -static void tg3_get_regs(struct net_device *dev, - struct ethtool_regs *regs, void *_p) -{ - struct tg3 *tp = netdev_priv(dev); - - regs->version = 0; - - memset(_p, 0, TG3_REG_BLK_SIZE); - - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) - return; - - tg3_full_lock(tp, 0); - - tg3_dump_legacy_regs(tp, (u32 *)_p); - - tg3_full_unlock(tp); -} - -static int tg3_get_eeprom_len(struct net_device *dev) -{ - struct tg3 *tp = netdev_priv(dev); - - return tp->nvram_size; -} - -static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) -{ - struct tg3 *tp = netdev_priv(dev); - int ret; - u8 *pd; - u32 i, offset, len, b_offset, b_count; - __be32 val; - - if (tg3_flag(tp, NO_NVRAM)) - return -EINVAL; - - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) - return -EAGAIN; - - offset = eeprom->offset; - len = eeprom->len; - eeprom->len = 0; - - eeprom->magic = TG3_EEPROM_MAGIC; - - if (offset & 3) { - /* adjustments to start on required 4 byte boundary */ - b_offset = offset & 3; - b_count = 4 - b_offset; - if (b_count > len) { - /* i.e. offset=1 len=2 */ - b_count = len; - } - ret = tg3_nvram_read_be32(tp, offset-b_offset, &val); - if (ret) - return ret; - memcpy(data, ((char *)&val) + b_offset, b_count); - len -= b_count; - offset += b_count; - eeprom->len += b_count; - } - - /* read bytes up to the last 4 byte boundary */ - pd = &data[eeprom->len]; - for (i = 0; i < (len - (len & 3)); i += 4) { - ret = tg3_nvram_read_be32(tp, offset + i, &val); - if (ret) { - eeprom->len += i; - return ret; - } - memcpy(pd + i, &val, 4); - } - eeprom->len += i; - - if (len & 3) { - /* read last bytes not ending on 4 byte boundary */ - pd = &data[eeprom->len]; - b_count = len & 3; - b_offset = offset + len - b_count; - ret = tg3_nvram_read_be32(tp, b_offset, &val); - if (ret) - return ret; - memcpy(pd, &val, b_count); - eeprom->len += b_count; - } - return 0; -} - -static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); - -static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) -{ - struct tg3 *tp = netdev_priv(dev); - int ret; - u32 offset, len, b_offset, odd_len; - u8 *buf; - __be32 start, end; - - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) - return -EAGAIN; - - if (tg3_flag(tp, NO_NVRAM) || - eeprom->magic != TG3_EEPROM_MAGIC) - return -EINVAL; - - offset = eeprom->offset; - len = eeprom->len; - - if ((b_offset = (offset & 3))) { - /* adjustments to start on required 4 byte boundary */ - ret = tg3_nvram_read_be32(tp, offset-b_offset, &start); - if (ret) - return ret; - len += b_offset; - offset &= ~3; - if (len < 4) - len = 4; - } - - odd_len = 0; - if (len & 3) { - /* adjustments to end on required 4 byte boundary */ - odd_len = 1; - len = (len + 3) & ~3; - ret = tg3_nvram_read_be32(tp, offset+len-4, &end); - if (ret) - return ret; - } - - buf = data; - if (b_offset || odd_len) { - buf = kmalloc(len, GFP_KERNEL); - if (!buf) - return -ENOMEM; - if (b_offset) - memcpy(buf, &start, 4); - if (odd_len) - memcpy(buf+len-4, &end, 4); - memcpy(buf + b_offset, data, eeprom->len); - } - - ret = tg3_nvram_write_block(tp, offset, len, buf); - - if (buf != data) - kfree(buf); - - return ret; -} - -static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct tg3 *tp = netdev_priv(dev); - - if (tg3_flag(tp, USE_PHYLIB)) { - struct phy_device *phydev; - if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) - return -EAGAIN; - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; - return phy_ethtool_gset(phydev, cmd); - } - - cmd->supported = (SUPPORTED_Autoneg); - - if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) - cmd->supported |= (SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full); - - if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { - cmd->supported |= (SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_TP); - cmd->port = PORT_TP; - } else { - cmd->supported |= SUPPORTED_FIBRE; - cmd->port = PORT_FIBRE; - } - - cmd->advertising = tp->link_config.advertising; - if (tg3_flag(tp, PAUSE_AUTONEG)) { - if (tp->link_config.flowctrl & FLOW_CTRL_RX) { - if (tp->link_config.flowctrl & FLOW_CTRL_TX) { - cmd->advertising |= ADVERTISED_Pause; - } else { - cmd->advertising |= ADVERTISED_Pause | - ADVERTISED_Asym_Pause; - } - } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) { - cmd->advertising |= ADVERTISED_Asym_Pause; - } - } - if (netif_running(dev)) { - ethtool_cmd_speed_set(cmd, tp->link_config.active_speed); - cmd->duplex = tp->link_config.active_duplex; - } else { - ethtool_cmd_speed_set(cmd, SPEED_INVALID); - cmd->duplex = DUPLEX_INVALID; - } - cmd->phy_address = tp->phy_addr; - cmd->transceiver = XCVR_INTERNAL; - cmd->autoneg = tp->link_config.autoneg; - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 0; - return 0; -} - -static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct tg3 *tp = netdev_priv(dev); - u32 speed = ethtool_cmd_speed(cmd); - - if (tg3_flag(tp, USE_PHYLIB)) { - struct phy_device *phydev; - if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) - return -EAGAIN; - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; - return phy_ethtool_sset(phydev, cmd); - } - - if (cmd->autoneg != AUTONEG_ENABLE && - cmd->autoneg != AUTONEG_DISABLE) - return -EINVAL; - - if (cmd->autoneg == AUTONEG_DISABLE && - cmd->duplex != DUPLEX_FULL && - cmd->duplex != DUPLEX_HALF) - return -EINVAL; - - if (cmd->autoneg == AUTONEG_ENABLE) { - u32 mask = ADVERTISED_Autoneg | - ADVERTISED_Pause | - ADVERTISED_Asym_Pause; - - if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) - mask |= ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full; - - if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) - mask |= ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | - ADVERTISED_10baseT_Half | - ADVERTISED_10baseT_Full | - ADVERTISED_TP; - else - mask |= ADVERTISED_FIBRE; - - if (cmd->advertising & ~mask) - return -EINVAL; - - mask &= (ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full | - ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | - ADVERTISED_10baseT_Half | - ADVERTISED_10baseT_Full); - - cmd->advertising &= mask; - } else { - if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) { - if (speed != SPEED_1000) - return -EINVAL; - - if (cmd->duplex != DUPLEX_FULL) - return -EINVAL; - } else { - if (speed != SPEED_100 && - speed != SPEED_10) - return -EINVAL; - } - } - - tg3_full_lock(tp, 0); - - tp->link_config.autoneg = cmd->autoneg; - if (cmd->autoneg == AUTONEG_ENABLE) { - tp->link_config.advertising = (cmd->advertising | - ADVERTISED_Autoneg); - tp->link_config.speed = SPEED_INVALID; - tp->link_config.duplex = DUPLEX_INVALID; - } else { - tp->link_config.advertising = 0; - tp->link_config.speed = speed; - tp->link_config.duplex = cmd->duplex; - } - - tp->link_config.orig_speed = tp->link_config.speed; - tp->link_config.orig_duplex = tp->link_config.duplex; - tp->link_config.orig_autoneg = tp->link_config.autoneg; - - if (netif_running(dev)) - tg3_setup_phy(tp, 1); - - tg3_full_unlock(tp); - - return 0; -} - -static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) -{ - struct tg3 *tp = netdev_priv(dev); - - strcpy(info->driver, DRV_MODULE_NAME); - strcpy(info->version, DRV_MODULE_VERSION); - strcpy(info->fw_version, tp->fw_ver); - strcpy(info->bus_info, pci_name(tp->pdev)); -} - -static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - struct tg3 *tp = netdev_priv(dev); - - if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev)) - wol->supported = WAKE_MAGIC; - else - wol->supported = 0; - wol->wolopts = 0; - if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev)) - wol->wolopts = WAKE_MAGIC; - memset(&wol->sopass, 0, sizeof(wol->sopass)); -} - -static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - struct tg3 *tp = netdev_priv(dev); - struct device *dp = &tp->pdev->dev; - - if (wol->wolopts & ~WAKE_MAGIC) - return -EINVAL; - if ((wol->wolopts & WAKE_MAGIC) && - !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp))) - return -EINVAL; - - device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC); - - spin_lock_bh(&tp->lock); - if (device_may_wakeup(dp)) - tg3_flag_set(tp, WOL_ENABLE); - else - tg3_flag_clear(tp, WOL_ENABLE); - spin_unlock_bh(&tp->lock); - - return 0; -} - -static u32 tg3_get_msglevel(struct net_device *dev) -{ - struct tg3 *tp = netdev_priv(dev); - return tp->msg_enable; -} - -static void tg3_set_msglevel(struct net_device *dev, u32 value) -{ - struct tg3 *tp = netdev_priv(dev); - tp->msg_enable = value; -} - -static int tg3_nway_reset(struct net_device *dev) -{ - struct tg3 *tp = netdev_priv(dev); - int r; - - if (!netif_running(dev)) - return -EAGAIN; - - if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) - return -EINVAL; - - if (tg3_flag(tp, USE_PHYLIB)) { - if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) - return -EAGAIN; - r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); - } else { - u32 bmcr; - - spin_lock_bh(&tp->lock); - r = -EINVAL; - tg3_readphy(tp, MII_BMCR, &bmcr); - if (!tg3_readphy(tp, MII_BMCR, &bmcr) && - ((bmcr & BMCR_ANENABLE) || - (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) { - tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | - BMCR_ANENABLE); - r = 0; - } - spin_unlock_bh(&tp->lock); - } - - return r; -} - -static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) -{ - struct tg3 *tp = netdev_priv(dev); - - ering->rx_max_pending = tp->rx_std_ring_mask; - ering->rx_mini_max_pending = 0; - if (tg3_flag(tp, JUMBO_RING_ENABLE)) - ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask; - else - ering->rx_jumbo_max_pending = 0; - - ering->tx_max_pending = TG3_TX_RING_SIZE - 1; - - ering->rx_pending = tp->rx_pending; - ering->rx_mini_pending = 0; - if (tg3_flag(tp, JUMBO_RING_ENABLE)) - ering->rx_jumbo_pending = tp->rx_jumbo_pending; - else - ering->rx_jumbo_pending = 0; - - ering->tx_pending = tp->napi[0].tx_pending; -} - -static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) -{ - struct tg3 *tp = netdev_priv(dev); - int i, irq_sync = 0, err = 0; - - if ((ering->rx_pending > tp->rx_std_ring_mask) || - (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || - (ering->tx_pending > TG3_TX_RING_SIZE - 1) || - (ering->tx_pending <= MAX_SKB_FRAGS) || - (tg3_flag(tp, TSO_BUG) && - (ering->tx_pending <= (MAX_SKB_FRAGS * 3)))) - return -EINVAL; - - if (netif_running(dev)) { - tg3_phy_stop(tp); - tg3_netif_stop(tp); - irq_sync = 1; - } - - tg3_full_lock(tp, irq_sync); - - tp->rx_pending = ering->rx_pending; - - if (tg3_flag(tp, MAX_RXPEND_64) && - tp->rx_pending > 63) - tp->rx_pending = 63; - tp->rx_jumbo_pending = ering->rx_jumbo_pending; - - for (i = 0; i < tp->irq_max; i++) - tp->napi[i].tx_pending = ering->tx_pending; - - if (netif_running(dev)) { - tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); - err = tg3_restart_hw(tp, 1); - if (!err) - tg3_netif_start(tp); - } - - tg3_full_unlock(tp); - - if (irq_sync && !err) - tg3_phy_start(tp); - - return err; -} - -static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) -{ - struct tg3 *tp = netdev_priv(dev); - - epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG); - - if (tp->link_config.active_flowctrl & FLOW_CTRL_RX) - epause->rx_pause = 1; - else - epause->rx_pause = 0; - - if (tp->link_config.active_flowctrl & FLOW_CTRL_TX) - epause->tx_pause = 1; - else - epause->tx_pause = 0; -} - -static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) -{ - struct tg3 *tp = netdev_priv(dev); - int err = 0; - - if (tg3_flag(tp, USE_PHYLIB)) { - u32 newadv; - struct phy_device *phydev; - - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; - - if (!(phydev->supported & SUPPORTED_Pause) || - (!(phydev->supported & SUPPORTED_Asym_Pause) && - (epause->rx_pause != epause->tx_pause))) - return -EINVAL; - - tp->link_config.flowctrl = 0; - if (epause->rx_pause) { - tp->link_config.flowctrl |= FLOW_CTRL_RX; - - if (epause->tx_pause) { - tp->link_config.flowctrl |= FLOW_CTRL_TX; - newadv = ADVERTISED_Pause; - } else - newadv = ADVERTISED_Pause | - ADVERTISED_Asym_Pause; - } else if (epause->tx_pause) { - tp->link_config.flowctrl |= FLOW_CTRL_TX; - newadv = ADVERTISED_Asym_Pause; - } else - newadv = 0; - - if (epause->autoneg) - tg3_flag_set(tp, PAUSE_AUTONEG); - else - tg3_flag_clear(tp, PAUSE_AUTONEG); - - if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { - u32 oldadv = phydev->advertising & - (ADVERTISED_Pause | ADVERTISED_Asym_Pause); - if (oldadv != newadv) { - phydev->advertising &= - ~(ADVERTISED_Pause | - ADVERTISED_Asym_Pause); - phydev->advertising |= newadv; - if (phydev->autoneg) { - /* - * Always renegotiate the link to - * inform our link partner of our - * flow control settings, even if the - * flow control is forced. Let - * tg3_adjust_link() do the final - * flow control setup. - */ - return phy_start_aneg(phydev); - } - } - - if (!epause->autoneg) - tg3_setup_flow_control(tp, 0, 0); - } else { - tp->link_config.orig_advertising &= - ~(ADVERTISED_Pause | - ADVERTISED_Asym_Pause); - tp->link_config.orig_advertising |= newadv; - } - } else { - int irq_sync = 0; - - if (netif_running(dev)) { - tg3_netif_stop(tp); - irq_sync = 1; - } - - tg3_full_lock(tp, irq_sync); - - if (epause->autoneg) - tg3_flag_set(tp, PAUSE_AUTONEG); - else - tg3_flag_clear(tp, PAUSE_AUTONEG); - if (epause->rx_pause) - tp->link_config.flowctrl |= FLOW_CTRL_RX; - else - tp->link_config.flowctrl &= ~FLOW_CTRL_RX; - if (epause->tx_pause) - tp->link_config.flowctrl |= FLOW_CTRL_TX; - else - tp->link_config.flowctrl &= ~FLOW_CTRL_TX; - - if (netif_running(dev)) { - tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); - err = tg3_restart_hw(tp, 1); - if (!err) - tg3_netif_start(tp); - } - - tg3_full_unlock(tp); - } - - return err; -} - -static int tg3_get_sset_count(struct net_device *dev, int sset) -{ - switch (sset) { - case ETH_SS_TEST: - return TG3_NUM_TEST; - case ETH_SS_STATS: - return TG3_NUM_STATS; - default: - return -EOPNOTSUPP; - } -} - -static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf) -{ - switch (stringset) { - case ETH_SS_STATS: - memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); - break; - case ETH_SS_TEST: - memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys)); - break; - default: - WARN_ON(1); /* we need a WARN() */ - break; - } -} - -static int tg3_set_phys_id(struct net_device *dev, - enum ethtool_phys_id_state state) -{ - struct tg3 *tp = netdev_priv(dev); - - if (!netif_running(tp->dev)) - return -EAGAIN; - - switch (state) { - case ETHTOOL_ID_ACTIVE: - return 1; /* cycle on/off once per second */ - - case ETHTOOL_ID_ON: - tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | - LED_CTRL_1000MBPS_ON | - LED_CTRL_100MBPS_ON | - LED_CTRL_10MBPS_ON | - LED_CTRL_TRAFFIC_OVERRIDE | - LED_CTRL_TRAFFIC_BLINK | - LED_CTRL_TRAFFIC_LED); - break; - - case ETHTOOL_ID_OFF: - tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | - LED_CTRL_TRAFFIC_OVERRIDE); - break; - - case ETHTOOL_ID_INACTIVE: - tw32(MAC_LED_CTRL, tp->led_ctrl); - break; - } - - return 0; -} - -static void tg3_get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *estats, u64 *tmp_stats) -{ - struct tg3 *tp = netdev_priv(dev); - memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats)); -} - -static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen) -{ - int i; - __be32 *buf; - u32 offset = 0, len = 0; - u32 magic, val; - - if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic)) - return NULL; - - if (magic == TG3_EEPROM_MAGIC) { - for (offset = TG3_NVM_DIR_START; - offset < TG3_NVM_DIR_END; - offset += TG3_NVM_DIRENT_SIZE) { - if (tg3_nvram_read(tp, offset, &val)) - return NULL; - - if ((val >> TG3_NVM_DIRTYPE_SHIFT) == - TG3_NVM_DIRTYPE_EXTVPD) - break; - } - - if (offset != TG3_NVM_DIR_END) { - len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4; - if (tg3_nvram_read(tp, offset + 4, &offset)) - return NULL; - - offset = tg3_nvram_logical_addr(tp, offset); - } - } - - if (!offset || !len) { - offset = TG3_NVM_VPD_OFF; - len = TG3_NVM_VPD_LEN; - } - - buf = kmalloc(len, GFP_KERNEL); - if (buf == NULL) - return NULL; - - if (magic == TG3_EEPROM_MAGIC) { - for (i = 0; i < len; i += 4) { - /* The data is in little-endian format in NVRAM. - * Use the big-endian read routines to preserve - * the byte order as it exists in NVRAM. - */ - if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4])) - goto error; - } - } else { - u8 *ptr; - ssize_t cnt; - unsigned int pos = 0; - - ptr = (u8 *)&buf[0]; - for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) { - cnt = pci_read_vpd(tp->pdev, pos, - len - pos, ptr); - if (cnt == -ETIMEDOUT || cnt == -EINTR) - cnt = 0; - else if (cnt < 0) - goto error; - } - if (pos != len) - goto error; - } - - *vpdlen = len; - - return buf; - -error: - kfree(buf); - return NULL; -} - -#define NVRAM_TEST_SIZE 0x100 -#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14 -#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18 -#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c -#define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20 -#define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24 -#define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50 -#define NVRAM_SELFBOOT_HW_SIZE 0x20 -#define NVRAM_SELFBOOT_DATA_SIZE 0x1c - -static int tg3_test_nvram(struct tg3 *tp) -{ - u32 csum, magic, len; - __be32 *buf; - int i, j, k, err = 0, size; - - if (tg3_flag(tp, NO_NVRAM)) - return 0; - - if (tg3_nvram_read(tp, 0, &magic) != 0) - return -EIO; - - if (magic == TG3_EEPROM_MAGIC) - size = NVRAM_TEST_SIZE; - else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) { - if ((magic & TG3_EEPROM_SB_FORMAT_MASK) == - TG3_EEPROM_SB_FORMAT_1) { - switch (magic & TG3_EEPROM_SB_REVISION_MASK) { - case TG3_EEPROM_SB_REVISION_0: - size = NVRAM_SELFBOOT_FORMAT1_0_SIZE; - break; - case TG3_EEPROM_SB_REVISION_2: - size = NVRAM_SELFBOOT_FORMAT1_2_SIZE; - break; - case TG3_EEPROM_SB_REVISION_3: - size = NVRAM_SELFBOOT_FORMAT1_3_SIZE; - break; - case TG3_EEPROM_SB_REVISION_4: - size = NVRAM_SELFBOOT_FORMAT1_4_SIZE; - break; - case TG3_EEPROM_SB_REVISION_5: - size = NVRAM_SELFBOOT_FORMAT1_5_SIZE; - break; - case TG3_EEPROM_SB_REVISION_6: - size = NVRAM_SELFBOOT_FORMAT1_6_SIZE; - break; - default: - return -EIO; - } - } else - return 0; - } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) - size = NVRAM_SELFBOOT_HW_SIZE; - else - return -EIO; - - buf = kmalloc(size, GFP_KERNEL); - if (buf == NULL) - return -ENOMEM; - - err = -EIO; - for (i = 0, j = 0; i < size; i += 4, j++) { - err = tg3_nvram_read_be32(tp, i, &buf[j]); - if (err) - break; - } - if (i < size) - goto out; - - /* Selfboot format */ - magic = be32_to_cpu(buf[0]); - if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == - TG3_EEPROM_MAGIC_FW) { - u8 *buf8 = (u8 *) buf, csum8 = 0; - - if ((magic & TG3_EEPROM_SB_REVISION_MASK) == - TG3_EEPROM_SB_REVISION_2) { - /* For rev 2, the csum doesn't include the MBA. */ - for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++) - csum8 += buf8[i]; - for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++) - csum8 += buf8[i]; - } else { - for (i = 0; i < size; i++) - csum8 += buf8[i]; - } - - if (csum8 == 0) { - err = 0; - goto out; - } - - err = -EIO; - goto out; - } - - if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == - TG3_EEPROM_MAGIC_HW) { - u8 data[NVRAM_SELFBOOT_DATA_SIZE]; - u8 parity[NVRAM_SELFBOOT_DATA_SIZE]; - u8 *buf8 = (u8 *) buf; - - /* Separate the parity bits and the data bytes. */ - for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) { - if ((i == 0) || (i == 8)) { - int l; - u8 msk; - - for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1) - parity[k++] = buf8[i] & msk; - i++; - } else if (i == 16) { - int l; - u8 msk; - - for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1) - parity[k++] = buf8[i] & msk; - i++; - - for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1) - parity[k++] = buf8[i] & msk; - i++; - } - data[j++] = buf8[i]; - } - - err = -EIO; - for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) { - u8 hw8 = hweight8(data[i]); - - if ((hw8 & 0x1) && parity[i]) - goto out; - else if (!(hw8 & 0x1) && !parity[i]) - goto out; - } - err = 0; - goto out; - } - - err = -EIO; - - /* Bootstrap checksum at offset 0x10 */ - csum = calc_crc((unsigned char *) buf, 0x10); - if (csum != le32_to_cpu(buf[0x10/4])) - goto out; - - /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ - csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); - if (csum != le32_to_cpu(buf[0xfc/4])) - goto out; - - kfree(buf); - - buf = tg3_vpd_readblock(tp, &len); - if (!buf) - return -ENOMEM; - - i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA); - if (i > 0) { - j = pci_vpd_lrdt_size(&((u8 *)buf)[i]); - if (j < 0) - goto out; - - if (i + PCI_VPD_LRDT_TAG_SIZE + j > len) - goto out; - - i += PCI_VPD_LRDT_TAG_SIZE; - j = pci_vpd_find_info_keyword((u8 *)buf, i, j, - PCI_VPD_RO_KEYWORD_CHKSUM); - if (j > 0) { - u8 csum8 = 0; - - j += PCI_VPD_INFO_FLD_HDR_SIZE; - - for (i = 0; i <= j; i++) - csum8 += ((u8 *)buf)[i]; - - if (csum8) - goto out; - } - } - - err = 0; - -out: - kfree(buf); - return err; -} - -#define TG3_SERDES_TIMEOUT_SEC 2 -#define TG3_COPPER_TIMEOUT_SEC 6 - -static int tg3_test_link(struct tg3 *tp) -{ - int i, max; - - if (!netif_running(tp->dev)) - return -ENODEV; - - if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) - max = TG3_SERDES_TIMEOUT_SEC; - else - max = TG3_COPPER_TIMEOUT_SEC; - - for (i = 0; i < max; i++) { - if (netif_carrier_ok(tp->dev)) - return 0; - - if (msleep_interruptible(1000)) - break; - } - - return -EIO; -} - -/* Only test the commonly used registers */ -static int tg3_test_registers(struct tg3 *tp) -{ - int i, is_5705, is_5750; - u32 offset, read_mask, write_mask, val, save_val, read_val; - static struct { - u16 offset; - u16 flags; -#define TG3_FL_5705 0x1 -#define TG3_FL_NOT_5705 0x2 -#define TG3_FL_NOT_5788 0x4 -#define TG3_FL_NOT_5750 0x8 - u32 read_mask; - u32 write_mask; - } reg_tbl[] = { - /* MAC Control Registers */ - { MAC_MODE, TG3_FL_NOT_5705, - 0x00000000, 0x00ef6f8c }, - { MAC_MODE, TG3_FL_5705, - 0x00000000, 0x01ef6b8c }, - { MAC_STATUS, TG3_FL_NOT_5705, - 0x03800107, 0x00000000 }, - { MAC_STATUS, TG3_FL_5705, - 0x03800100, 0x00000000 }, - { MAC_ADDR_0_HIGH, 0x0000, - 0x00000000, 0x0000ffff }, - { MAC_ADDR_0_LOW, 0x0000, - 0x00000000, 0xffffffff }, - { MAC_RX_MTU_SIZE, 0x0000, - 0x00000000, 0x0000ffff }, - { MAC_TX_MODE, 0x0000, - 0x00000000, 0x00000070 }, - { MAC_TX_LENGTHS, 0x0000, - 0x00000000, 0x00003fff }, - { MAC_RX_MODE, TG3_FL_NOT_5705, - 0x00000000, 0x000007fc }, - { MAC_RX_MODE, TG3_FL_5705, - 0x00000000, 0x000007dc }, - { MAC_HASH_REG_0, 0x0000, - 0x00000000, 0xffffffff }, - { MAC_HASH_REG_1, 0x0000, - 0x00000000, 0xffffffff }, - { MAC_HASH_REG_2, 0x0000, - 0x00000000, 0xffffffff }, - { MAC_HASH_REG_3, 0x0000, - 0x00000000, 0xffffffff }, - - /* Receive Data and Receive BD Initiator Control Registers. */ - { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705, - 0x00000000, 0xffffffff }, - { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705, - 0x00000000, 0xffffffff }, - { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705, - 0x00000000, 0x00000003 }, - { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705, - 0x00000000, 0xffffffff }, - { RCVDBDI_STD_BD+0, 0x0000, - 0x00000000, 0xffffffff }, - { RCVDBDI_STD_BD+4, 0x0000, - 0x00000000, 0xffffffff }, - { RCVDBDI_STD_BD+8, 0x0000, - 0x00000000, 0xffff0002 }, - { RCVDBDI_STD_BD+0xc, 0x0000, - 0x00000000, 0xffffffff }, - - /* Receive BD Initiator Control Registers. */ - { RCVBDI_STD_THRESH, TG3_FL_NOT_5705, - 0x00000000, 0xffffffff }, - { RCVBDI_STD_THRESH, TG3_FL_5705, - 0x00000000, 0x000003ff }, - { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705, - 0x00000000, 0xffffffff }, - - /* Host Coalescing Control Registers. */ - { HOSTCC_MODE, TG3_FL_NOT_5705, - 0x00000000, 0x00000004 }, - { HOSTCC_MODE, TG3_FL_5705, - 0x00000000, 0x000000f6 }, - { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705, - 0x00000000, 0xffffffff }, - { HOSTCC_RXCOL_TICKS, TG3_FL_5705, - 0x00000000, 0x000003ff }, - { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705, - 0x00000000, 0xffffffff }, - { HOSTCC_TXCOL_TICKS, TG3_FL_5705, - 0x00000000, 0x000003ff }, - { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705, - 0x00000000, 0xffffffff }, - { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, - 0x00000000, 0x000000ff }, - { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705, - 0x00000000, 0xffffffff }, - { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, - 0x00000000, 0x000000ff }, - { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705, - 0x00000000, 0xffffffff }, - { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705, - 0x00000000, 0xffffffff }, - { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705, - 0x00000000, 0xffffffff }, - { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, - 0x00000000, 0x000000ff }, - { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705, - 0x00000000, 0xffffffff }, - { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, - 0x00000000, 0x000000ff }, - { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705, - 0x00000000, 0xffffffff }, - { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705, - 0x00000000, 0xffffffff }, - { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705, - 0x00000000, 0xffffffff }, - { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000, - 0x00000000, 0xffffffff }, - { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000, - 0x00000000, 0xffffffff }, - { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000, - 0xffffffff, 0x00000000 }, - { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000, - 0xffffffff, 0x00000000 }, - - /* Buffer Manager Control Registers. */ - { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750, - 0x00000000, 0x007fff80 }, - { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750, - 0x00000000, 0x007fffff }, - { BUFMGR_MB_RDMA_LOW_WATER, 0x0000, - 0x00000000, 0x0000003f }, - { BUFMGR_MB_MACRX_LOW_WATER, 0x0000, - 0x00000000, 0x000001ff }, - { BUFMGR_MB_HIGH_WATER, 0x0000, - 0x00000000, 0x000001ff }, - { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705, - 0xffffffff, 0x00000000 }, - { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705, - 0xffffffff, 0x00000000 }, - - /* Mailbox Registers */ - { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000, - 0x00000000, 0x000001ff }, - { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705, - 0x00000000, 0x000001ff }, - { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000, - 0x00000000, 0x000007ff }, - { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000, - 0x00000000, 0x000001ff }, - - { 0xffff, 0x0000, 0x00000000, 0x00000000 }, - }; - - is_5705 = is_5750 = 0; - if (tg3_flag(tp, 5705_PLUS)) { - is_5705 = 1; - if (tg3_flag(tp, 5750_PLUS)) - is_5750 = 1; - } - - for (i = 0; reg_tbl[i].offset != 0xffff; i++) { - if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705)) - continue; - - if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705)) - continue; - - if (tg3_flag(tp, IS_5788) && - (reg_tbl[i].flags & TG3_FL_NOT_5788)) - continue; - - if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750)) - continue; - - offset = (u32) reg_tbl[i].offset; - read_mask = reg_tbl[i].read_mask; - write_mask = reg_tbl[i].write_mask; - - /* Save the original register content */ - save_val = tr32(offset); - - /* Determine the read-only value. */ - read_val = save_val & read_mask; - - /* Write zero to the register, then make sure the read-only bits - * are not changed and the read/write bits are all zeros. - */ - tw32(offset, 0); - - val = tr32(offset); - - /* Test the read-only and read/write bits. */ - if (((val & read_mask) != read_val) || (val & write_mask)) - goto out; - - /* Write ones to all the bits defined by RdMask and WrMask, then - * make sure the read-only bits are not changed and the - * read/write bits are all ones. - */ - tw32(offset, read_mask | write_mask); - - val = tr32(offset); - - /* Test the read-only bits. */ - if ((val & read_mask) != read_val) - goto out; - - /* Test the read/write bits. */ - if ((val & write_mask) != write_mask) - goto out; - - tw32(offset, save_val); - } - - return 0; - -out: - if (netif_msg_hw(tp)) - netdev_err(tp->dev, - "Register test failed at offset %x\n", offset); - tw32(offset, save_val); - return -EIO; -} - -static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) -{ - static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a }; - int i; - u32 j; - - for (i = 0; i < ARRAY_SIZE(test_pattern); i++) { - for (j = 0; j < len; j += 4) { - u32 val; - - tg3_write_mem(tp, offset + j, test_pattern[i]); - tg3_read_mem(tp, offset + j, &val); - if (val != test_pattern[i]) - return -EIO; - } - } - return 0; -} - -static int tg3_test_memory(struct tg3 *tp) -{ - static struct mem_entry { - u32 offset; - u32 len; - } mem_tbl_570x[] = { - { 0x00000000, 0x00b50}, - { 0x00002000, 0x1c000}, - { 0xffffffff, 0x00000} - }, mem_tbl_5705[] = { - { 0x00000100, 0x0000c}, - { 0x00000200, 0x00008}, - { 0x00004000, 0x00800}, - { 0x00006000, 0x01000}, - { 0x00008000, 0x02000}, - { 0x00010000, 0x0e000}, - { 0xffffffff, 0x00000} - }, mem_tbl_5755[] = { - { 0x00000200, 0x00008}, - { 0x00004000, 0x00800}, - { 0x00006000, 0x00800}, - { 0x00008000, 0x02000}, - { 0x00010000, 0x0c000}, - { 0xffffffff, 0x00000} - }, mem_tbl_5906[] = { - { 0x00000200, 0x00008}, - { 0x00004000, 0x00400}, - { 0x00006000, 0x00400}, - { 0x00008000, 0x01000}, - { 0x00010000, 0x01000}, - { 0xffffffff, 0x00000} - }, mem_tbl_5717[] = { - { 0x00000200, 0x00008}, - { 0x00010000, 0x0a000}, - { 0x00020000, 0x13c00}, - { 0xffffffff, 0x00000} - }, mem_tbl_57765[] = { - { 0x00000200, 0x00008}, - { 0x00004000, 0x00800}, - { 0x00006000, 0x09800}, - { 0x00010000, 0x0a000}, - { 0xffffffff, 0x00000} - }; - struct mem_entry *mem_tbl; - int err = 0; - int i; - - if (tg3_flag(tp, 5717_PLUS)) - mem_tbl = mem_tbl_5717; - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) - mem_tbl = mem_tbl_57765; - else if (tg3_flag(tp, 5755_PLUS)) - mem_tbl = mem_tbl_5755; - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) - mem_tbl = mem_tbl_5906; - else if (tg3_flag(tp, 5705_PLUS)) - mem_tbl = mem_tbl_5705; - else - mem_tbl = mem_tbl_570x; - - for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { - err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len); - if (err) - break; - } - - return err; -} - -#define TG3_MAC_LOOPBACK 0 -#define TG3_PHY_LOOPBACK 1 -#define TG3_TSO_LOOPBACK 2 - -#define TG3_TSO_MSS 500 - -#define TG3_TSO_IP_HDR_LEN 20 -#define TG3_TSO_TCP_HDR_LEN 20 -#define TG3_TSO_TCP_OPT_LEN 12 - -static const u8 tg3_tso_header[] = { -0x08, 0x00, -0x45, 0x00, 0x00, 0x00, -0x00, 0x00, 0x40, 0x00, -0x40, 0x06, 0x00, 0x00, -0x0a, 0x00, 0x00, 0x01, -0x0a, 0x00, 0x00, 0x02, -0x0d, 0x00, 0xe0, 0x00, -0x00, 0x00, 0x01, 0x00, -0x00, 0x00, 0x02, 0x00, -0x80, 0x10, 0x10, 0x00, -0x14, 0x09, 0x00, 0x00, -0x01, 0x01, 0x08, 0x0a, -0x11, 0x11, 0x11, 0x11, -0x11, 0x11, 0x11, 0x11, -}; - -static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode) -{ - u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key; - u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val; - u32 budget; - struct sk_buff *skb, *rx_skb; - u8 *tx_data; - dma_addr_t map; - int num_pkts, tx_len, rx_len, i, err; - struct tg3_rx_buffer_desc *desc; - struct tg3_napi *tnapi, *rnapi; - struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; - - tnapi = &tp->napi[0]; - rnapi = &tp->napi[0]; - if (tp->irq_cnt > 1) { - if (tg3_flag(tp, ENABLE_RSS)) - rnapi = &tp->napi[1]; - if (tg3_flag(tp, ENABLE_TSS)) - tnapi = &tp->napi[1]; - } - coal_now = tnapi->coal_now | rnapi->coal_now; - - if (loopback_mode == TG3_MAC_LOOPBACK) { - /* HW errata - mac loopback fails in some cases on 5780. - * Normal traffic and PHY loopback are not affected by - * errata. Also, the MAC loopback test is deprecated for - * all newer ASIC revisions. - */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 || - tg3_flag(tp, CPMU_PRESENT)) - return 0; - - mac_mode = tp->mac_mode & - ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); - mac_mode |= MAC_MODE_PORT_INT_LPBACK; - if (!tg3_flag(tp, 5705_PLUS)) - mac_mode |= MAC_MODE_LINK_POLARITY; - if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) - mac_mode |= MAC_MODE_PORT_MODE_MII; - else - mac_mode |= MAC_MODE_PORT_MODE_GMII; - tw32(MAC_MODE, mac_mode); - } else { - if (tp->phy_flags & TG3_PHYFLG_IS_FET) { - tg3_phy_fet_toggle_apd(tp, false); - val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100; - } else - val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000; - - tg3_phy_toggle_automdix(tp, 0); - - tg3_writephy(tp, MII_BMCR, val); - udelay(40); - - mac_mode = tp->mac_mode & - ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); - if (tp->phy_flags & TG3_PHYFLG_IS_FET) { - tg3_writephy(tp, MII_TG3_FET_PTEST, - MII_TG3_FET_PTEST_FRC_TX_LINK | - MII_TG3_FET_PTEST_FRC_TX_LOCK); - /* The write needs to be flushed for the AC131 */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) - tg3_readphy(tp, MII_TG3_FET_PTEST, &val); - mac_mode |= MAC_MODE_PORT_MODE_MII; - } else - mac_mode |= MAC_MODE_PORT_MODE_GMII; - - /* reset to prevent losing 1st rx packet intermittently */ - if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { - tw32_f(MAC_RX_MODE, RX_MODE_RESET); - udelay(10); - tw32_f(MAC_RX_MODE, tp->rx_mode); - } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { - u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK; - if (masked_phy_id == TG3_PHY_ID_BCM5401) - mac_mode &= ~MAC_MODE_LINK_POLARITY; - else if (masked_phy_id == TG3_PHY_ID_BCM5411) - mac_mode |= MAC_MODE_LINK_POLARITY; - tg3_writephy(tp, MII_TG3_EXT_CTRL, - MII_TG3_EXT_CTRL_LNK3_LED_MODE); - } - tw32(MAC_MODE, mac_mode); - - /* Wait for link */ - for (i = 0; i < 100; i++) { - if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) - break; - mdelay(1); - } - } - - err = -EIO; - - tx_len = pktsz; - skb = netdev_alloc_skb(tp->dev, tx_len); - if (!skb) - return -ENOMEM; - - tx_data = skb_put(skb, tx_len); - memcpy(tx_data, tp->dev->dev_addr, 6); - memset(tx_data + 6, 0x0, 8); - - tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN); - - if (loopback_mode == TG3_TSO_LOOPBACK) { - struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN]; - - u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN + - TG3_TSO_TCP_OPT_LEN; - - memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header, - sizeof(tg3_tso_header)); - mss = TG3_TSO_MSS; - - val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header); - num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS); - - /* Set the total length field in the IP header */ - iph->tot_len = htons((u16)(mss + hdr_len)); - - base_flags = (TXD_FLAG_CPU_PRE_DMA | - TXD_FLAG_CPU_POST_DMA); - - if (tg3_flag(tp, HW_TSO_1) || - tg3_flag(tp, HW_TSO_2) || - tg3_flag(tp, HW_TSO_3)) { - struct tcphdr *th; - val = ETH_HLEN + TG3_TSO_IP_HDR_LEN; - th = (struct tcphdr *)&tx_data[val]; - th->check = 0; - } else - base_flags |= TXD_FLAG_TCPUDP_CSUM; - - if (tg3_flag(tp, HW_TSO_3)) { - mss |= (hdr_len & 0xc) << 12; - if (hdr_len & 0x10) - base_flags |= 0x00000010; - base_flags |= (hdr_len & 0x3e0) << 5; - } else if (tg3_flag(tp, HW_TSO_2)) - mss |= hdr_len << 9; - else if (tg3_flag(tp, HW_TSO_1) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { - mss |= (TG3_TSO_TCP_OPT_LEN << 9); - } else { - base_flags |= (TG3_TSO_TCP_OPT_LEN << 10); - } - - data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header); - } else { - num_pkts = 1; - data_off = ETH_HLEN; - } - - for (i = data_off; i < tx_len; i++) - tx_data[i] = (u8) (i & 0xff); - - map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(tp->pdev, map)) { - dev_kfree_skb(skb); - return -EIO; - } - - val = tnapi->tx_prod; - tnapi->tx_buffers[val].skb = skb; - dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map); - - tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | - rnapi->coal_now); - - udelay(10); - - rx_start_idx = rnapi->hw_status->idx[0].rx_producer; - - budget = tg3_tx_avail(tnapi); - if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len, - base_flags | TXD_FLAG_END, mss, 0)) { - tnapi->tx_buffers[val].skb = NULL; - dev_kfree_skb(skb); - return -EIO; - } - - tnapi->tx_prod++; - - tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); - tr32_mailbox(tnapi->prodmbox); - - udelay(10); - - /* 350 usec to allow enough time on some 10/100 Mbps devices. */ - for (i = 0; i < 35; i++) { - tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | - coal_now); - - udelay(10); - - tx_idx = tnapi->hw_status->idx[0].tx_consumer; - rx_idx = rnapi->hw_status->idx[0].rx_producer; - if ((tx_idx == tnapi->tx_prod) && - (rx_idx == (rx_start_idx + num_pkts))) - break; - } - - tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0); - dev_kfree_skb(skb); - - if (tx_idx != tnapi->tx_prod) - goto out; - - if (rx_idx != rx_start_idx + num_pkts) - goto out; - - val = data_off; - while (rx_idx != rx_start_idx) { - desc = &rnapi->rx_rcb[rx_start_idx++]; - desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; - opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; - - if ((desc->err_vlan & RXD_ERR_MASK) != 0 && - (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) - goto out; - - rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - - ETH_FCS_LEN; - - if (loopback_mode != TG3_TSO_LOOPBACK) { - if (rx_len != tx_len) - goto out; - - if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) { - if (opaque_key != RXD_OPAQUE_RING_STD) - goto out; - } else { - if (opaque_key != RXD_OPAQUE_RING_JUMBO) - goto out; - } - } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && - (desc->ip_tcp_csum & RXD_TCPCSUM_MASK) - >> RXD_TCPCSUM_SHIFT != 0xffff) { - goto out; - } - - if (opaque_key == RXD_OPAQUE_RING_STD) { - rx_skb = tpr->rx_std_buffers[desc_idx].skb; - map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], - mapping); - } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { - rx_skb = tpr->rx_jmb_buffers[desc_idx].skb; - map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx], - mapping); - } else - goto out; - - pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, - PCI_DMA_FROMDEVICE); - - for (i = data_off; i < rx_len; i++, val++) { - if (*(rx_skb->data + i) != (u8) (val & 0xff)) - goto out; - } - } - - err = 0; - - /* tg3_free_rings will unmap and free the rx_skb */ -out: - return err; -} - -#define TG3_STD_LOOPBACK_FAILED 1 -#define TG3_JMB_LOOPBACK_FAILED 2 -#define TG3_TSO_LOOPBACK_FAILED 4 - -#define TG3_MAC_LOOPBACK_SHIFT 0 -#define TG3_PHY_LOOPBACK_SHIFT 4 -#define TG3_LOOPBACK_FAILED 0x00000077 - -static int tg3_test_loopback(struct tg3 *tp) -{ - int err = 0; - u32 eee_cap, cpmuctrl = 0; - - if (!netif_running(tp->dev)) - return TG3_LOOPBACK_FAILED; - - eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP; - tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; - - err = tg3_reset_hw(tp, 1); - if (err) { - err = TG3_LOOPBACK_FAILED; - goto done; - } - - if (tg3_flag(tp, ENABLE_RSS)) { - int i; - - /* Reroute all rx packets to the 1st queue */ - for (i = MAC_RSS_INDIR_TBL_0; - i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4) - tw32(i, 0x0); - } - - /* Turn off gphy autopowerdown. */ - if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) - tg3_phy_toggle_apd(tp, false); - - if (tg3_flag(tp, CPMU_PRESENT)) { - int i; - u32 status; - - tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER); - - /* Wait for up to 40 microseconds to acquire lock. */ - for (i = 0; i < 4; i++) { - status = tr32(TG3_CPMU_MUTEX_GNT); - if (status == CPMU_MUTEX_GNT_DRIVER) - break; - udelay(10); - } - - if (status != CPMU_MUTEX_GNT_DRIVER) { - err = TG3_LOOPBACK_FAILED; - goto done; - } - - /* Turn off link-based power management. */ - cpmuctrl = tr32(TG3_CPMU_CTRL); - tw32(TG3_CPMU_CTRL, - cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE | - CPMU_CTRL_LINK_AWARE_MODE)); - } - - if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK)) - err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT; - - if (tg3_flag(tp, JUMBO_RING_ENABLE) && - tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK)) - err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT; - - if (tg3_flag(tp, CPMU_PRESENT)) { - tw32(TG3_CPMU_CTRL, cpmuctrl); - - /* Release the mutex */ - tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER); - } - - if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && - !tg3_flag(tp, USE_PHYLIB)) { - if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK)) - err |= TG3_STD_LOOPBACK_FAILED << - TG3_PHY_LOOPBACK_SHIFT; - if (tg3_flag(tp, TSO_CAPABLE) && - tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK)) - err |= TG3_TSO_LOOPBACK_FAILED << - TG3_PHY_LOOPBACK_SHIFT; - if (tg3_flag(tp, JUMBO_RING_ENABLE) && - tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK)) - err |= TG3_JMB_LOOPBACK_FAILED << - TG3_PHY_LOOPBACK_SHIFT; - } - - /* Re-enable gphy autopowerdown. */ - if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) - tg3_phy_toggle_apd(tp, true); - -done: - tp->phy_flags |= eee_cap; - - return err; -} - -static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, - u64 *data) -{ - struct tg3 *tp = netdev_priv(dev); - - if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && - tg3_power_up(tp)) { - etest->flags |= ETH_TEST_FL_FAILED; - memset(data, 1, sizeof(u64) * TG3_NUM_TEST); - return; - } - - memset(data, 0, sizeof(u64) * TG3_NUM_TEST); - - if (tg3_test_nvram(tp) != 0) { - etest->flags |= ETH_TEST_FL_FAILED; - data[0] = 1; - } - if (tg3_test_link(tp) != 0) { - etest->flags |= ETH_TEST_FL_FAILED; - data[1] = 1; - } - if (etest->flags & ETH_TEST_FL_OFFLINE) { - int err, err2 = 0, irq_sync = 0; - - if (netif_running(dev)) { - tg3_phy_stop(tp); - tg3_netif_stop(tp); - irq_sync = 1; - } - - tg3_full_lock(tp, irq_sync); - - tg3_halt(tp, RESET_KIND_SUSPEND, 1); - err = tg3_nvram_lock(tp); - tg3_halt_cpu(tp, RX_CPU_BASE); - if (!tg3_flag(tp, 5705_PLUS)) - tg3_halt_cpu(tp, TX_CPU_BASE); - if (!err) - tg3_nvram_unlock(tp); - - if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) - tg3_phy_reset(tp); - - if (tg3_test_registers(tp) != 0) { - etest->flags |= ETH_TEST_FL_FAILED; - data[2] = 1; - } - if (tg3_test_memory(tp) != 0) { - etest->flags |= ETH_TEST_FL_FAILED; - data[3] = 1; - } - if ((data[4] = tg3_test_loopback(tp)) != 0) - etest->flags |= ETH_TEST_FL_FAILED; - - tg3_full_unlock(tp); - - if (tg3_test_interrupt(tp) != 0) { - etest->flags |= ETH_TEST_FL_FAILED; - data[5] = 1; - } - - tg3_full_lock(tp, 0); - - tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); - if (netif_running(dev)) { - tg3_flag_set(tp, INIT_COMPLETE); - err2 = tg3_restart_hw(tp, 1); - if (!err2) - tg3_netif_start(tp); - } - - tg3_full_unlock(tp); - - if (irq_sync && !err2) - tg3_phy_start(tp); - } - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) - tg3_power_down(tp); - -} - -static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - struct mii_ioctl_data *data = if_mii(ifr); - struct tg3 *tp = netdev_priv(dev); - int err; - - if (tg3_flag(tp, USE_PHYLIB)) { - struct phy_device *phydev; - if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) - return -EAGAIN; - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; - return phy_mii_ioctl(phydev, ifr, cmd); - } - - switch (cmd) { - case SIOCGMIIPHY: - data->phy_id = tp->phy_addr; - - /* fallthru */ - case SIOCGMIIREG: { - u32 mii_regval; - - if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) - break; /* We have no PHY */ - - if (!netif_running(dev)) - return -EAGAIN; - - spin_lock_bh(&tp->lock); - err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval); - spin_unlock_bh(&tp->lock); - - data->val_out = mii_regval; - - return err; - } - - case SIOCSMIIREG: - if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) - break; /* We have no PHY */ - - if (!netif_running(dev)) - return -EAGAIN; - - spin_lock_bh(&tp->lock); - err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in); - spin_unlock_bh(&tp->lock); - - return err; - - default: - /* do nothing */ - break; - } - return -EOPNOTSUPP; -} - -static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) -{ - struct tg3 *tp = netdev_priv(dev); - - memcpy(ec, &tp->coal, sizeof(*ec)); - return 0; -} - -static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) -{ - struct tg3 *tp = netdev_priv(dev); - u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; - u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0; - - if (!tg3_flag(tp, 5705_PLUS)) { - max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT; - max_txcoal_tick_int = MAX_TXCOAL_TICK_INT; - max_stat_coal_ticks = MAX_STAT_COAL_TICKS; - min_stat_coal_ticks = MIN_STAT_COAL_TICKS; - } - - if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || - (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || - (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || - (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || - (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) || - (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) || - (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) || - (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) || - (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) || - (ec->stats_block_coalesce_usecs < min_stat_coal_ticks)) - return -EINVAL; - - /* No rx interrupts will be generated if both are zero */ - if ((ec->rx_coalesce_usecs == 0) && - (ec->rx_max_coalesced_frames == 0)) - return -EINVAL; - - /* No tx interrupts will be generated if both are zero */ - if ((ec->tx_coalesce_usecs == 0) && - (ec->tx_max_coalesced_frames == 0)) - return -EINVAL; - - /* Only copy relevant parameters, ignore all others. */ - tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs; - tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs; - tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames; - tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames; - tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq; - tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq; - tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq; - tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq; - tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs; - - if (netif_running(dev)) { - tg3_full_lock(tp, 0); - __tg3_set_coalesce(tp, &tp->coal); - tg3_full_unlock(tp); - } - return 0; -} - -static const struct ethtool_ops tg3_ethtool_ops = { - .get_settings = tg3_get_settings, - .set_settings = tg3_set_settings, - .get_drvinfo = tg3_get_drvinfo, - .get_regs_len = tg3_get_regs_len, - .get_regs = tg3_get_regs, - .get_wol = tg3_get_wol, - .set_wol = tg3_set_wol, - .get_msglevel = tg3_get_msglevel, - .set_msglevel = tg3_set_msglevel, - .nway_reset = tg3_nway_reset, - .get_link = ethtool_op_get_link, - .get_eeprom_len = tg3_get_eeprom_len, - .get_eeprom = tg3_get_eeprom, - .set_eeprom = tg3_set_eeprom, - .get_ringparam = tg3_get_ringparam, - .set_ringparam = tg3_set_ringparam, - .get_pauseparam = tg3_get_pauseparam, - .set_pauseparam = tg3_set_pauseparam, - .self_test = tg3_self_test, - .get_strings = tg3_get_strings, - .set_phys_id = tg3_set_phys_id, - .get_ethtool_stats = tg3_get_ethtool_stats, - .get_coalesce = tg3_get_coalesce, - .set_coalesce = tg3_set_coalesce, - .get_sset_count = tg3_get_sset_count, -}; - -static void __devinit tg3_get_eeprom_size(struct tg3 *tp) -{ - u32 cursize, val, magic; - - tp->nvram_size = EEPROM_CHIP_SIZE; - - if (tg3_nvram_read(tp, 0, &magic) != 0) - return; - - if ((magic != TG3_EEPROM_MAGIC) && - ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) && - ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW)) - return; - - /* - * Size the chip by reading offsets at increasing powers of two. - * When we encounter our validation signature, we know the addressing - * has wrapped around, and thus have our chip size. - */ - cursize = 0x10; - - while (cursize < tp->nvram_size) { - if (tg3_nvram_read(tp, cursize, &val) != 0) - return; - - if (val == magic) - break; - - cursize <<= 1; - } - - tp->nvram_size = cursize; -} - -static void __devinit tg3_get_nvram_size(struct tg3 *tp) -{ - u32 val; - - if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0) - return; - - /* Selfboot format */ - if (val != TG3_EEPROM_MAGIC) { - tg3_get_eeprom_size(tp); - return; - } - - if (tg3_nvram_read(tp, 0xf0, &val) == 0) { - if (val != 0) { - /* This is confusing. We want to operate on the - * 16-bit value at offset 0xf2. The tg3_nvram_read() - * call will read from NVRAM and byteswap the data - * according to the byteswapping settings for all - * other register accesses. This ensures the data we - * want will always reside in the lower 16-bits. - * However, the data in NVRAM is in LE format, which - * means the data from the NVRAM read will always be - * opposite the endianness of the CPU. The 16-bit - * byteswap then brings the data to CPU endianness. - */ - tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024; - return; - } - } - tp->nvram_size = TG3_NVRAM_SIZE_512KB; -} - -static void __devinit tg3_get_nvram_info(struct tg3 *tp) -{ - u32 nvcfg1; - - nvcfg1 = tr32(NVRAM_CFG1); - if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { - tg3_flag_set(tp, FLASH); - } else { - nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; - tw32(NVRAM_CFG1, nvcfg1); - } - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || - tg3_flag(tp, 5780_CLASS)) { - switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { - case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: - tp->nvram_jedecnum = JEDEC_ATMEL; - tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; - tg3_flag_set(tp, NVRAM_BUFFERED); - break; - case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: - tp->nvram_jedecnum = JEDEC_ATMEL; - tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; - break; - case FLASH_VENDOR_ATMEL_EEPROM: - tp->nvram_jedecnum = JEDEC_ATMEL; - tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; - tg3_flag_set(tp, NVRAM_BUFFERED); - break; - case FLASH_VENDOR_ST: - tp->nvram_jedecnum = JEDEC_ST; - tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; - tg3_flag_set(tp, NVRAM_BUFFERED); - break; - case FLASH_VENDOR_SAIFUN: - tp->nvram_jedecnum = JEDEC_SAIFUN; - tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; - break; - case FLASH_VENDOR_SST_SMALL: - case FLASH_VENDOR_SST_LARGE: - tp->nvram_jedecnum = JEDEC_SST; - tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; - break; - } - } else { - tp->nvram_jedecnum = JEDEC_ATMEL; - tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; - tg3_flag_set(tp, NVRAM_BUFFERED); - } -} - -static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1) -{ - switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) { - case FLASH_5752PAGE_SIZE_256: - tp->nvram_pagesize = 256; - break; - case FLASH_5752PAGE_SIZE_512: - tp->nvram_pagesize = 512; - break; - case FLASH_5752PAGE_SIZE_1K: - tp->nvram_pagesize = 1024; - break; - case FLASH_5752PAGE_SIZE_2K: - tp->nvram_pagesize = 2048; - break; - case FLASH_5752PAGE_SIZE_4K: - tp->nvram_pagesize = 4096; - break; - case FLASH_5752PAGE_SIZE_264: - tp->nvram_pagesize = 264; - break; - case FLASH_5752PAGE_SIZE_528: - tp->nvram_pagesize = 528; - break; - } -} - -static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp) -{ - u32 nvcfg1; - - nvcfg1 = tr32(NVRAM_CFG1); - - /* NVRAM protection for TPM */ - if (nvcfg1 & (1 << 27)) - tg3_flag_set(tp, PROTECTED_NVRAM); - - switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { - case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: - case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: - tp->nvram_jedecnum = JEDEC_ATMEL; - tg3_flag_set(tp, NVRAM_BUFFERED); - break; - case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: - tp->nvram_jedecnum = JEDEC_ATMEL; - tg3_flag_set(tp, NVRAM_BUFFERED); - tg3_flag_set(tp, FLASH); - break; - case FLASH_5752VENDOR_ST_M45PE10: - case FLASH_5752VENDOR_ST_M45PE20: - case FLASH_5752VENDOR_ST_M45PE40: - tp->nvram_jedecnum = JEDEC_ST; - tg3_flag_set(tp, NVRAM_BUFFERED); - tg3_flag_set(tp, FLASH); - break; - } - - if (tg3_flag(tp, FLASH)) { - tg3_nvram_get_pagesize(tp, nvcfg1); - } else { - /* For eeprom, set pagesize to maximum eeprom size */ - tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; - - nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; - tw32(NVRAM_CFG1, nvcfg1); - } -} - -static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) -{ - u32 nvcfg1, protect = 0; - - nvcfg1 = tr32(NVRAM_CFG1); - - /* NVRAM protection for TPM */ - if (nvcfg1 & (1 << 27)) { - tg3_flag_set(tp, PROTECTED_NVRAM); - protect = 1; - } - - nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; - switch (nvcfg1) { - case FLASH_5755VENDOR_ATMEL_FLASH_1: - case FLASH_5755VENDOR_ATMEL_FLASH_2: - case FLASH_5755VENDOR_ATMEL_FLASH_3: - case FLASH_5755VENDOR_ATMEL_FLASH_5: - tp->nvram_jedecnum = JEDEC_ATMEL; - tg3_flag_set(tp, NVRAM_BUFFERED); - tg3_flag_set(tp, FLASH); - tp->nvram_pagesize = 264; - if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || - nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) - tp->nvram_size = (protect ? 0x3e200 : - TG3_NVRAM_SIZE_512KB); - else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) - tp->nvram_size = (protect ? 0x1f200 : - TG3_NVRAM_SIZE_256KB); - else - tp->nvram_size = (protect ? 0x1f200 : - TG3_NVRAM_SIZE_128KB); - break; - case FLASH_5752VENDOR_ST_M45PE10: - case FLASH_5752VENDOR_ST_M45PE20: - case FLASH_5752VENDOR_ST_M45PE40: - tp->nvram_jedecnum = JEDEC_ST; - tg3_flag_set(tp, NVRAM_BUFFERED); - tg3_flag_set(tp, FLASH); - tp->nvram_pagesize = 256; - if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) - tp->nvram_size = (protect ? - TG3_NVRAM_SIZE_64KB : - TG3_NVRAM_SIZE_128KB); - else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) - tp->nvram_size = (protect ? - TG3_NVRAM_SIZE_64KB : - TG3_NVRAM_SIZE_256KB); - else - tp->nvram_size = (protect ? - TG3_NVRAM_SIZE_128KB : - TG3_NVRAM_SIZE_512KB); - break; - } -} - -static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp) -{ - u32 nvcfg1; - - nvcfg1 = tr32(NVRAM_CFG1); - - switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { - case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: - case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: - case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: - case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: - tp->nvram_jedecnum = JEDEC_ATMEL; - tg3_flag_set(tp, NVRAM_BUFFERED); - tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; - - nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; - tw32(NVRAM_CFG1, nvcfg1); - break; - case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: - case FLASH_5755VENDOR_ATMEL_FLASH_1: - case FLASH_5755VENDOR_ATMEL_FLASH_2: - case FLASH_5755VENDOR_ATMEL_FLASH_3: - tp->nvram_jedecnum = JEDEC_ATMEL; - tg3_flag_set(tp, NVRAM_BUFFERED); - tg3_flag_set(tp, FLASH); - tp->nvram_pagesize = 264; - break; - case FLASH_5752VENDOR_ST_M45PE10: - case FLASH_5752VENDOR_ST_M45PE20: - case FLASH_5752VENDOR_ST_M45PE40: - tp->nvram_jedecnum = JEDEC_ST; - tg3_flag_set(tp, NVRAM_BUFFERED); - tg3_flag_set(tp, FLASH); - tp->nvram_pagesize = 256; - break; - } -} - -static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp) -{ - u32 nvcfg1, protect = 0; - - nvcfg1 = tr32(NVRAM_CFG1); - - /* NVRAM protection for TPM */ - if (nvcfg1 & (1 << 27)) { - tg3_flag_set(tp, PROTECTED_NVRAM); - protect = 1; - } - - nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; - switch (nvcfg1) { - case FLASH_5761VENDOR_ATMEL_ADB021D: - case FLASH_5761VENDOR_ATMEL_ADB041D: - case FLASH_5761VENDOR_ATMEL_ADB081D: - case FLASH_5761VENDOR_ATMEL_ADB161D: - case FLASH_5761VENDOR_ATMEL_MDB021D: - case FLASH_5761VENDOR_ATMEL_MDB041D: - case FLASH_5761VENDOR_ATMEL_MDB081D: - case FLASH_5761VENDOR_ATMEL_MDB161D: - tp->nvram_jedecnum = JEDEC_ATMEL; - tg3_flag_set(tp, NVRAM_BUFFERED); - tg3_flag_set(tp, FLASH); - tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); - tp->nvram_pagesize = 256; - break; - case FLASH_5761VENDOR_ST_A_M45PE20: - case FLASH_5761VENDOR_ST_A_M45PE40: - case FLASH_5761VENDOR_ST_A_M45PE80: - case FLASH_5761VENDOR_ST_A_M45PE16: - case FLASH_5761VENDOR_ST_M_M45PE20: - case FLASH_5761VENDOR_ST_M_M45PE40: - case FLASH_5761VENDOR_ST_M_M45PE80: - case FLASH_5761VENDOR_ST_M_M45PE16: - tp->nvram_jedecnum = JEDEC_ST; - tg3_flag_set(tp, NVRAM_BUFFERED); - tg3_flag_set(tp, FLASH); - tp->nvram_pagesize = 256; - break; - } - - if (protect) { - tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT); - } else { - switch (nvcfg1) { - case FLASH_5761VENDOR_ATMEL_ADB161D: - case FLASH_5761VENDOR_ATMEL_MDB161D: - case FLASH_5761VENDOR_ST_A_M45PE16: - case FLASH_5761VENDOR_ST_M_M45PE16: - tp->nvram_size = TG3_NVRAM_SIZE_2MB; - break; - case FLASH_5761VENDOR_ATMEL_ADB081D: - case FLASH_5761VENDOR_ATMEL_MDB081D: - case FLASH_5761VENDOR_ST_A_M45PE80: - case FLASH_5761VENDOR_ST_M_M45PE80: - tp->nvram_size = TG3_NVRAM_SIZE_1MB; - break; - case FLASH_5761VENDOR_ATMEL_ADB041D: - case FLASH_5761VENDOR_ATMEL_MDB041D: - case FLASH_5761VENDOR_ST_A_M45PE40: - case FLASH_5761VENDOR_ST_M_M45PE40: - tp->nvram_size = TG3_NVRAM_SIZE_512KB; - break; - case FLASH_5761VENDOR_ATMEL_ADB021D: - case FLASH_5761VENDOR_ATMEL_MDB021D: - case FLASH_5761VENDOR_ST_A_M45PE20: - case FLASH_5761VENDOR_ST_M_M45PE20: - tp->nvram_size = TG3_NVRAM_SIZE_256KB; - break; - } - } -} - -static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp) -{ - tp->nvram_jedecnum = JEDEC_ATMEL; - tg3_flag_set(tp, NVRAM_BUFFERED); - tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; -} - -static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp) -{ - u32 nvcfg1; - - nvcfg1 = tr32(NVRAM_CFG1); - - switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { - case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: - case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: - tp->nvram_jedecnum = JEDEC_ATMEL; - tg3_flag_set(tp, NVRAM_BUFFERED); - tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; - - nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; - tw32(NVRAM_CFG1, nvcfg1); - return; - case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: - case FLASH_57780VENDOR_ATMEL_AT45DB011D: - case FLASH_57780VENDOR_ATMEL_AT45DB011B: - case FLASH_57780VENDOR_ATMEL_AT45DB021D: - case FLASH_57780VENDOR_ATMEL_AT45DB021B: - case FLASH_57780VENDOR_ATMEL_AT45DB041D: - case FLASH_57780VENDOR_ATMEL_AT45DB041B: - tp->nvram_jedecnum = JEDEC_ATMEL; - tg3_flag_set(tp, NVRAM_BUFFERED); - tg3_flag_set(tp, FLASH); - - switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { - case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: - case FLASH_57780VENDOR_ATMEL_AT45DB011D: - case FLASH_57780VENDOR_ATMEL_AT45DB011B: - tp->nvram_size = TG3_NVRAM_SIZE_128KB; - break; - case FLASH_57780VENDOR_ATMEL_AT45DB021D: - case FLASH_57780VENDOR_ATMEL_AT45DB021B: - tp->nvram_size = TG3_NVRAM_SIZE_256KB; - break; - case FLASH_57780VENDOR_ATMEL_AT45DB041D: - case FLASH_57780VENDOR_ATMEL_AT45DB041B: - tp->nvram_size = TG3_NVRAM_SIZE_512KB; - break; - } - break; - case FLASH_5752VENDOR_ST_M45PE10: - case FLASH_5752VENDOR_ST_M45PE20: - case FLASH_5752VENDOR_ST_M45PE40: - tp->nvram_jedecnum = JEDEC_ST; - tg3_flag_set(tp, NVRAM_BUFFERED); - tg3_flag_set(tp, FLASH); - - switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { - case FLASH_5752VENDOR_ST_M45PE10: - tp->nvram_size = TG3_NVRAM_SIZE_128KB; - break; - case FLASH_5752VENDOR_ST_M45PE20: - tp->nvram_size = TG3_NVRAM_SIZE_256KB; - break; - case FLASH_5752VENDOR_ST_M45PE40: - tp->nvram_size = TG3_NVRAM_SIZE_512KB; - break; - } - break; - default: - tg3_flag_set(tp, NO_NVRAM); - return; - } - - tg3_nvram_get_pagesize(tp, nvcfg1); - if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) - tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); -} - - -static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp) -{ - u32 nvcfg1; - - nvcfg1 = tr32(NVRAM_CFG1); - - switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { - case FLASH_5717VENDOR_ATMEL_EEPROM: - case FLASH_5717VENDOR_MICRO_EEPROM: - tp->nvram_jedecnum = JEDEC_ATMEL; - tg3_flag_set(tp, NVRAM_BUFFERED); - tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; - - nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; - tw32(NVRAM_CFG1, nvcfg1); - return; - case FLASH_5717VENDOR_ATMEL_MDB011D: - case FLASH_5717VENDOR_ATMEL_ADB011B: - case FLASH_5717VENDOR_ATMEL_ADB011D: - case FLASH_5717VENDOR_ATMEL_MDB021D: - case FLASH_5717VENDOR_ATMEL_ADB021B: - case FLASH_5717VENDOR_ATMEL_ADB021D: - case FLASH_5717VENDOR_ATMEL_45USPT: - tp->nvram_jedecnum = JEDEC_ATMEL; - tg3_flag_set(tp, NVRAM_BUFFERED); - tg3_flag_set(tp, FLASH); - - switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { - case FLASH_5717VENDOR_ATMEL_MDB021D: - /* Detect size with tg3_nvram_get_size() */ - break; - case FLASH_5717VENDOR_ATMEL_ADB021B: - case FLASH_5717VENDOR_ATMEL_ADB021D: - tp->nvram_size = TG3_NVRAM_SIZE_256KB; - break; - default: - tp->nvram_size = TG3_NVRAM_SIZE_128KB; - break; - } - break; - case FLASH_5717VENDOR_ST_M_M25PE10: - case FLASH_5717VENDOR_ST_A_M25PE10: - case FLASH_5717VENDOR_ST_M_M45PE10: - case FLASH_5717VENDOR_ST_A_M45PE10: - case FLASH_5717VENDOR_ST_M_M25PE20: - case FLASH_5717VENDOR_ST_A_M25PE20: - case FLASH_5717VENDOR_ST_M_M45PE20: - case FLASH_5717VENDOR_ST_A_M45PE20: - case FLASH_5717VENDOR_ST_25USPT: - case FLASH_5717VENDOR_ST_45USPT: - tp->nvram_jedecnum = JEDEC_ST; - tg3_flag_set(tp, NVRAM_BUFFERED); - tg3_flag_set(tp, FLASH); - - switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { - case FLASH_5717VENDOR_ST_M_M25PE20: - case FLASH_5717VENDOR_ST_M_M45PE20: - /* Detect size with tg3_nvram_get_size() */ - break; - case FLASH_5717VENDOR_ST_A_M25PE20: - case FLASH_5717VENDOR_ST_A_M45PE20: - tp->nvram_size = TG3_NVRAM_SIZE_256KB; - break; - default: - tp->nvram_size = TG3_NVRAM_SIZE_128KB; - break; - } - break; - default: - tg3_flag_set(tp, NO_NVRAM); - return; - } - - tg3_nvram_get_pagesize(tp, nvcfg1); - if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) - tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); -} - -static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp) -{ - u32 nvcfg1, nvmpinstrp; - - nvcfg1 = tr32(NVRAM_CFG1); - nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK; - - switch (nvmpinstrp) { - case FLASH_5720_EEPROM_HD: - case FLASH_5720_EEPROM_LD: - tp->nvram_jedecnum = JEDEC_ATMEL; - tg3_flag_set(tp, NVRAM_BUFFERED); - - nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; - tw32(NVRAM_CFG1, nvcfg1); - if (nvmpinstrp == FLASH_5720_EEPROM_HD) - tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; - else - tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE; - return; - case FLASH_5720VENDOR_M_ATMEL_DB011D: - case FLASH_5720VENDOR_A_ATMEL_DB011B: - case FLASH_5720VENDOR_A_ATMEL_DB011D: - case FLASH_5720VENDOR_M_ATMEL_DB021D: - case FLASH_5720VENDOR_A_ATMEL_DB021B: - case FLASH_5720VENDOR_A_ATMEL_DB021D: - case FLASH_5720VENDOR_M_ATMEL_DB041D: - case FLASH_5720VENDOR_A_ATMEL_DB041B: - case FLASH_5720VENDOR_A_ATMEL_DB041D: - case FLASH_5720VENDOR_M_ATMEL_DB081D: - case FLASH_5720VENDOR_A_ATMEL_DB081D: - case FLASH_5720VENDOR_ATMEL_45USPT: - tp->nvram_jedecnum = JEDEC_ATMEL; - tg3_flag_set(tp, NVRAM_BUFFERED); - tg3_flag_set(tp, FLASH); - - switch (nvmpinstrp) { - case FLASH_5720VENDOR_M_ATMEL_DB021D: - case FLASH_5720VENDOR_A_ATMEL_DB021B: - case FLASH_5720VENDOR_A_ATMEL_DB021D: - tp->nvram_size = TG3_NVRAM_SIZE_256KB; - break; - case FLASH_5720VENDOR_M_ATMEL_DB041D: - case FLASH_5720VENDOR_A_ATMEL_DB041B: - case FLASH_5720VENDOR_A_ATMEL_DB041D: - tp->nvram_size = TG3_NVRAM_SIZE_512KB; - break; - case FLASH_5720VENDOR_M_ATMEL_DB081D: - case FLASH_5720VENDOR_A_ATMEL_DB081D: - tp->nvram_size = TG3_NVRAM_SIZE_1MB; - break; - default: - tp->nvram_size = TG3_NVRAM_SIZE_128KB; - break; - } - break; - case FLASH_5720VENDOR_M_ST_M25PE10: - case FLASH_5720VENDOR_M_ST_M45PE10: - case FLASH_5720VENDOR_A_ST_M25PE10: - case FLASH_5720VENDOR_A_ST_M45PE10: - case FLASH_5720VENDOR_M_ST_M25PE20: - case FLASH_5720VENDOR_M_ST_M45PE20: - case FLASH_5720VENDOR_A_ST_M25PE20: - case FLASH_5720VENDOR_A_ST_M45PE20: - case FLASH_5720VENDOR_M_ST_M25PE40: - case FLASH_5720VENDOR_M_ST_M45PE40: - case FLASH_5720VENDOR_A_ST_M25PE40: - case FLASH_5720VENDOR_A_ST_M45PE40: - case FLASH_5720VENDOR_M_ST_M25PE80: - case FLASH_5720VENDOR_M_ST_M45PE80: - case FLASH_5720VENDOR_A_ST_M25PE80: - case FLASH_5720VENDOR_A_ST_M45PE80: - case FLASH_5720VENDOR_ST_25USPT: - case FLASH_5720VENDOR_ST_45USPT: - tp->nvram_jedecnum = JEDEC_ST; - tg3_flag_set(tp, NVRAM_BUFFERED); - tg3_flag_set(tp, FLASH); - - switch (nvmpinstrp) { - case FLASH_5720VENDOR_M_ST_M25PE20: - case FLASH_5720VENDOR_M_ST_M45PE20: - case FLASH_5720VENDOR_A_ST_M25PE20: - case FLASH_5720VENDOR_A_ST_M45PE20: - tp->nvram_size = TG3_NVRAM_SIZE_256KB; - break; - case FLASH_5720VENDOR_M_ST_M25PE40: - case FLASH_5720VENDOR_M_ST_M45PE40: - case FLASH_5720VENDOR_A_ST_M25PE40: - case FLASH_5720VENDOR_A_ST_M45PE40: - tp->nvram_size = TG3_NVRAM_SIZE_512KB; - break; - case FLASH_5720VENDOR_M_ST_M25PE80: - case FLASH_5720VENDOR_M_ST_M45PE80: - case FLASH_5720VENDOR_A_ST_M25PE80: - case FLASH_5720VENDOR_A_ST_M45PE80: - tp->nvram_size = TG3_NVRAM_SIZE_1MB; - break; - default: - tp->nvram_size = TG3_NVRAM_SIZE_128KB; - break; - } - break; - default: - tg3_flag_set(tp, NO_NVRAM); - return; - } - - tg3_nvram_get_pagesize(tp, nvcfg1); - if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) - tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); -} - -/* Chips other than 5700/5701 use the NVRAM for fetching info. */ -static void __devinit tg3_nvram_init(struct tg3 *tp) -{ - tw32_f(GRC_EEPROM_ADDR, - (EEPROM_ADDR_FSM_RESET | - (EEPROM_DEFAULT_CLOCK_PERIOD << - EEPROM_ADDR_CLKPERD_SHIFT))); - - msleep(1); - - /* Enable seeprom accesses. */ - tw32_f(GRC_LOCAL_CTRL, - tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM); - udelay(100); - - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { - tg3_flag_set(tp, NVRAM); - - if (tg3_nvram_lock(tp)) { - netdev_warn(tp->dev, - "Cannot get nvram lock, %s failed\n", - __func__); - return; - } - tg3_enable_nvram_access(tp); - - tp->nvram_size = 0; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) - tg3_get_5752_nvram_info(tp); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) - tg3_get_5755_nvram_info(tp); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) - tg3_get_5787_nvram_info(tp); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) - tg3_get_5761_nvram_info(tp); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) - tg3_get_5906_nvram_info(tp); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) - tg3_get_57780_nvram_info(tp); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) - tg3_get_5717_nvram_info(tp); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) - tg3_get_5720_nvram_info(tp); - else - tg3_get_nvram_info(tp); - - if (tp->nvram_size == 0) - tg3_get_nvram_size(tp); - - tg3_disable_nvram_access(tp); - tg3_nvram_unlock(tp); - - } else { - tg3_flag_clear(tp, NVRAM); - tg3_flag_clear(tp, NVRAM_BUFFERED); - - tg3_get_eeprom_size(tp); - } -} - -static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, - u32 offset, u32 len, u8 *buf) -{ - int i, j, rc = 0; - u32 val; - - for (i = 0; i < len; i += 4) { - u32 addr; - __be32 data; - - addr = offset + i; - - memcpy(&data, buf + i, 4); - - /* - * The SEEPROM interface expects the data to always be opposite - * the native endian format. We accomplish this by reversing - * all the operations that would have been performed on the - * data from a call to tg3_nvram_read_be32(). - */ - tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data))); - - val = tr32(GRC_EEPROM_ADDR); - tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE); - - val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK | - EEPROM_ADDR_READ); - tw32(GRC_EEPROM_ADDR, val | - (0 << EEPROM_ADDR_DEVID_SHIFT) | - (addr & EEPROM_ADDR_ADDR_MASK) | - EEPROM_ADDR_START | - EEPROM_ADDR_WRITE); - - for (j = 0; j < 1000; j++) { - val = tr32(GRC_EEPROM_ADDR); - - if (val & EEPROM_ADDR_COMPLETE) - break; - msleep(1); - } - if (!(val & EEPROM_ADDR_COMPLETE)) { - rc = -EBUSY; - break; - } - } - - return rc; -} - -/* offset and length are dword aligned */ -static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, - u8 *buf) -{ - int ret = 0; - u32 pagesize = tp->nvram_pagesize; - u32 pagemask = pagesize - 1; - u32 nvram_cmd; - u8 *tmp; - - tmp = kmalloc(pagesize, GFP_KERNEL); - if (tmp == NULL) - return -ENOMEM; - - while (len) { - int j; - u32 phy_addr, page_off, size; - - phy_addr = offset & ~pagemask; - - for (j = 0; j < pagesize; j += 4) { - ret = tg3_nvram_read_be32(tp, phy_addr + j, - (__be32 *) (tmp + j)); - if (ret) - break; - } - if (ret) - break; - - page_off = offset & pagemask; - size = pagesize; - if (len < size) - size = len; - - len -= size; - - memcpy(tmp + page_off, buf, size); - - offset = offset + (pagesize - page_off); - - tg3_enable_nvram_access(tp); - - /* - * Before we can erase the flash page, we need - * to issue a special "write enable" command. - */ - nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; - - if (tg3_nvram_exec_cmd(tp, nvram_cmd)) - break; - - /* Erase the target page */ - tw32(NVRAM_ADDR, phy_addr); - - nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR | - NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE; - - if (tg3_nvram_exec_cmd(tp, nvram_cmd)) - break; - - /* Issue another write enable to start the write. */ - nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; - - if (tg3_nvram_exec_cmd(tp, nvram_cmd)) - break; - - for (j = 0; j < pagesize; j += 4) { - __be32 data; - - data = *((__be32 *) (tmp + j)); - - tw32(NVRAM_WRDATA, be32_to_cpu(data)); - - tw32(NVRAM_ADDR, phy_addr + j); - - nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | - NVRAM_CMD_WR; - - if (j == 0) - nvram_cmd |= NVRAM_CMD_FIRST; - else if (j == (pagesize - 4)) - nvram_cmd |= NVRAM_CMD_LAST; - - if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd))) - break; - } - if (ret) - break; - } - - nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE; - tg3_nvram_exec_cmd(tp, nvram_cmd); - - kfree(tmp); - - return ret; -} - -/* offset and length are dword aligned */ -static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, - u8 *buf) -{ - int i, ret = 0; - - for (i = 0; i < len; i += 4, offset += 4) { - u32 page_off, phy_addr, nvram_cmd; - __be32 data; - - memcpy(&data, buf + i, 4); - tw32(NVRAM_WRDATA, be32_to_cpu(data)); - - page_off = offset % tp->nvram_pagesize; - - phy_addr = tg3_nvram_phys_addr(tp, offset); - - tw32(NVRAM_ADDR, phy_addr); - - nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR; - - if (page_off == 0 || i == 0) - nvram_cmd |= NVRAM_CMD_FIRST; - if (page_off == (tp->nvram_pagesize - 4)) - nvram_cmd |= NVRAM_CMD_LAST; - - if (i == (len - 4)) - nvram_cmd |= NVRAM_CMD_LAST; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 && - !tg3_flag(tp, 5755_PLUS) && - (tp->nvram_jedecnum == JEDEC_ST) && - (nvram_cmd & NVRAM_CMD_FIRST)) { - - if ((ret = tg3_nvram_exec_cmd(tp, - NVRAM_CMD_WREN | NVRAM_CMD_GO | - NVRAM_CMD_DONE))) - - break; - } - if (!tg3_flag(tp, FLASH)) { - /* We always do complete word writes to eeprom. */ - nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST); - } - - if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd))) - break; - } - return ret; -} - -/* offset and length are dword aligned */ -static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) -{ - int ret; - - if (tg3_flag(tp, EEPROM_WRITE_PROT)) { - tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & - ~GRC_LCLCTRL_GPIO_OUTPUT1); - udelay(40); - } - - if (!tg3_flag(tp, NVRAM)) { - ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); - } else { - u32 grc_mode; - - ret = tg3_nvram_lock(tp); - if (ret) - return ret; - - tg3_enable_nvram_access(tp); - if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) - tw32(NVRAM_WRITE1, 0x406); - - grc_mode = tr32(GRC_MODE); - tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); - - if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) { - ret = tg3_nvram_write_block_buffered(tp, offset, len, - buf); - } else { - ret = tg3_nvram_write_block_unbuffered(tp, offset, len, - buf); - } - - grc_mode = tr32(GRC_MODE); - tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE); - - tg3_disable_nvram_access(tp); - tg3_nvram_unlock(tp); - } - - if (tg3_flag(tp, EEPROM_WRITE_PROT)) { - tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); - udelay(40); - } - - return ret; -} - -struct subsys_tbl_ent { - u16 subsys_vendor, subsys_devid; - u32 phy_id; -}; - -static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = { - /* Broadcom boards. */ - { TG3PCI_SUBVENDOR_ID_BROADCOM, - TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 }, - { TG3PCI_SUBVENDOR_ID_BROADCOM, - TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 }, - { TG3PCI_SUBVENDOR_ID_BROADCOM, - TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 }, - { TG3PCI_SUBVENDOR_ID_BROADCOM, - TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 }, - { TG3PCI_SUBVENDOR_ID_BROADCOM, - TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 }, - { TG3PCI_SUBVENDOR_ID_BROADCOM, - TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 }, - { TG3PCI_SUBVENDOR_ID_BROADCOM, - TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 }, - { TG3PCI_SUBVENDOR_ID_BROADCOM, - TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 }, - { TG3PCI_SUBVENDOR_ID_BROADCOM, - TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 }, - { TG3PCI_SUBVENDOR_ID_BROADCOM, - TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 }, - { TG3PCI_SUBVENDOR_ID_BROADCOM, - TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 }, - - /* 3com boards. */ - { TG3PCI_SUBVENDOR_ID_3COM, - TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 }, - { TG3PCI_SUBVENDOR_ID_3COM, - TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 }, - { TG3PCI_SUBVENDOR_ID_3COM, - TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 }, - { TG3PCI_SUBVENDOR_ID_3COM, - TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 }, - { TG3PCI_SUBVENDOR_ID_3COM, - TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 }, - - /* DELL boards. */ - { TG3PCI_SUBVENDOR_ID_DELL, - TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 }, - { TG3PCI_SUBVENDOR_ID_DELL, - TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 }, - { TG3PCI_SUBVENDOR_ID_DELL, - TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 }, - { TG3PCI_SUBVENDOR_ID_DELL, - TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 }, - - /* Compaq boards. */ - { TG3PCI_SUBVENDOR_ID_COMPAQ, - TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 }, - { TG3PCI_SUBVENDOR_ID_COMPAQ, - TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 }, - { TG3PCI_SUBVENDOR_ID_COMPAQ, - TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 }, - { TG3PCI_SUBVENDOR_ID_COMPAQ, - TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 }, - { TG3PCI_SUBVENDOR_ID_COMPAQ, - TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 }, - - /* IBM boards. */ - { TG3PCI_SUBVENDOR_ID_IBM, - TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 } -}; - -static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) { - if ((subsys_id_to_phy_id[i].subsys_vendor == - tp->pdev->subsystem_vendor) && - (subsys_id_to_phy_id[i].subsys_devid == - tp->pdev->subsystem_device)) - return &subsys_id_to_phy_id[i]; - } - return NULL; -} - -static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) -{ - u32 val; - - tp->phy_id = TG3_PHY_ID_INVALID; - tp->led_ctrl = LED_CTRL_MODE_PHY_1; - - /* Assume an onboard device and WOL capable by default. */ - tg3_flag_set(tp, EEPROM_WRITE_PROT); - tg3_flag_set(tp, WOL_CAP); - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) { - tg3_flag_clear(tp, EEPROM_WRITE_PROT); - tg3_flag_set(tp, IS_NIC); - } - val = tr32(VCPU_CFGSHDW); - if (val & VCPU_CFGSHDW_ASPM_DBNC) - tg3_flag_set(tp, ASPM_WORKAROUND); - if ((val & VCPU_CFGSHDW_WOL_ENABLE) && - (val & VCPU_CFGSHDW_WOL_MAGPKT)) { - tg3_flag_set(tp, WOL_ENABLE); - device_set_wakeup_enable(&tp->pdev->dev, true); - } - goto done; - } - - tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); - if (val == NIC_SRAM_DATA_SIG_MAGIC) { - u32 nic_cfg, led_cfg; - u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id; - int eeprom_phy_serdes = 0; - - tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); - tp->nic_sram_data_cfg = nic_cfg; - - tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver); - ver >>= NIC_SRAM_DATA_VER_SHIFT; - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 && - (ver > 0) && (ver < 0x100)) - tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) - tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4); - - if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) == - NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) - eeprom_phy_serdes = 1; - - tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id); - if (nic_phy_id != 0) { - u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK; - u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK; - - eeprom_phy_id = (id1 >> 16) << 10; - eeprom_phy_id |= (id2 & 0xfc00) << 16; - eeprom_phy_id |= (id2 & 0x03ff) << 0; - } else - eeprom_phy_id = 0; - - tp->phy_id = eeprom_phy_id; - if (eeprom_phy_serdes) { - if (!tg3_flag(tp, 5705_PLUS)) - tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; - else - tp->phy_flags |= TG3_PHYFLG_MII_SERDES; - } - - if (tg3_flag(tp, 5750_PLUS)) - led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK | - SHASTA_EXT_LED_MODE_MASK); - else - led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK; - - switch (led_cfg) { - default: - case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1: - tp->led_ctrl = LED_CTRL_MODE_PHY_1; - break; - - case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2: - tp->led_ctrl = LED_CTRL_MODE_PHY_2; - break; - - case NIC_SRAM_DATA_CFG_LED_MODE_MAC: - tp->led_ctrl = LED_CTRL_MODE_MAC; - - /* Default to PHY_1_MODE if 0 (MAC_MODE) is - * read on some older 5700/5701 bootcode. - */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == - ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == - ASIC_REV_5701) - tp->led_ctrl = LED_CTRL_MODE_PHY_1; - - break; - - case SHASTA_EXT_LED_SHARED: - tp->led_ctrl = LED_CTRL_MODE_SHARED; - if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && - tp->pci_chip_rev_id != CHIPREV_ID_5750_A1) - tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | - LED_CTRL_MODE_PHY_2); - break; - - case SHASTA_EXT_LED_MAC: - tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC; - break; - - case SHASTA_EXT_LED_COMBO: - tp->led_ctrl = LED_CTRL_MODE_COMBO; - if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) - tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | - LED_CTRL_MODE_PHY_2); - break; - - } - - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) && - tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) - tp->led_ctrl = LED_CTRL_MODE_PHY_2; - - if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) - tp->led_ctrl = LED_CTRL_MODE_PHY_1; - - if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) { - tg3_flag_set(tp, EEPROM_WRITE_PROT); - if ((tp->pdev->subsystem_vendor == - PCI_VENDOR_ID_ARIMA) && - (tp->pdev->subsystem_device == 0x205a || - tp->pdev->subsystem_device == 0x2063)) - tg3_flag_clear(tp, EEPROM_WRITE_PROT); - } else { - tg3_flag_clear(tp, EEPROM_WRITE_PROT); - tg3_flag_set(tp, IS_NIC); - } - - if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { - tg3_flag_set(tp, ENABLE_ASF); - if (tg3_flag(tp, 5750_PLUS)) - tg3_flag_set(tp, ASF_NEW_HANDSHAKE); - } - - if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) && - tg3_flag(tp, 5750_PLUS)) - tg3_flag_set(tp, ENABLE_APE); - - if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES && - !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) - tg3_flag_clear(tp, WOL_CAP); - - if (tg3_flag(tp, WOL_CAP) && - (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) { - tg3_flag_set(tp, WOL_ENABLE); - device_set_wakeup_enable(&tp->pdev->dev, true); - } - - if (cfg2 & (1 << 17)) - tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING; - - /* serdes signal pre-emphasis in register 0x590 set by */ - /* bootcode if bit 18 is set */ - if (cfg2 & (1 << 18)) - tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; - - if ((tg3_flag(tp, 57765_PLUS) || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && - GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) && - (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) - tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; - - if (tg3_flag(tp, PCI_EXPRESS) && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && - !tg3_flag(tp, 57765_PLUS)) { - u32 cfg3; - - tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); - if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE) - tg3_flag_set(tp, ASPM_WORKAROUND); - } - - if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE) - tg3_flag_set(tp, RGMII_INBAND_DISABLE); - if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN) - tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN); - if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN) - tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN); - } -done: - if (tg3_flag(tp, WOL_CAP)) - device_set_wakeup_enable(&tp->pdev->dev, - tg3_flag(tp, WOL_ENABLE)); - else - device_set_wakeup_capable(&tp->pdev->dev, false); -} - -static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd) -{ - int i; - u32 val; - - tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START); - tw32(OTP_CTRL, cmd); - - /* Wait for up to 1 ms for command to execute. */ - for (i = 0; i < 100; i++) { - val = tr32(OTP_STATUS); - if (val & OTP_STATUS_CMD_DONE) - break; - udelay(10); - } - - return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY; -} - -/* Read the gphy configuration from the OTP region of the chip. The gphy - * configuration is a 32-bit value that straddles the alignment boundary. - * We do two 32-bit reads and then shift and merge the results. - */ -static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp) -{ - u32 bhalf_otp, thalf_otp; - - tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC); - - if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT)) - return 0; - - tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1); - - if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) - return 0; - - thalf_otp = tr32(OTP_READ_DATA); - - tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2); - - if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) - return 0; - - bhalf_otp = tr32(OTP_READ_DATA); - - return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16); -} - -static void __devinit tg3_phy_init_link_config(struct tg3 *tp) -{ - u32 adv = ADVERTISED_Autoneg | - ADVERTISED_Pause; - - if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) - adv |= ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full; - - if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) - adv |= ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | - ADVERTISED_10baseT_Half | - ADVERTISED_10baseT_Full | - ADVERTISED_TP; - else - adv |= ADVERTISED_FIBRE; - - tp->link_config.advertising = adv; - tp->link_config.speed = SPEED_INVALID; - tp->link_config.duplex = DUPLEX_INVALID; - tp->link_config.autoneg = AUTONEG_ENABLE; - tp->link_config.active_speed = SPEED_INVALID; - tp->link_config.active_duplex = DUPLEX_INVALID; - tp->link_config.orig_speed = SPEED_INVALID; - tp->link_config.orig_duplex = DUPLEX_INVALID; - tp->link_config.orig_autoneg = AUTONEG_INVALID; -} - -static int __devinit tg3_phy_probe(struct tg3 *tp) -{ - u32 hw_phy_id_1, hw_phy_id_2; - u32 hw_phy_id, hw_phy_id_masked; - int err; - - /* flow control autonegotiation is default behavior */ - tg3_flag_set(tp, PAUSE_AUTONEG); - tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; - - if (tg3_flag(tp, USE_PHYLIB)) - return tg3_phy_init(tp); - - /* Reading the PHY ID register can conflict with ASF - * firmware access to the PHY hardware. - */ - err = 0; - if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) { - hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID; - } else { - /* Now read the physical PHY_ID from the chip and verify - * that it is sane. If it doesn't look good, we fall back - * to either the hard-coded table based PHY_ID and failing - * that the value found in the eeprom area. - */ - err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1); - err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2); - - hw_phy_id = (hw_phy_id_1 & 0xffff) << 10; - hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16; - hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0; - - hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK; - } - - if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) { - tp->phy_id = hw_phy_id; - if (hw_phy_id_masked == TG3_PHY_ID_BCM8002) - tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; - else - tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES; - } else { - if (tp->phy_id != TG3_PHY_ID_INVALID) { - /* Do nothing, phy ID already set up in - * tg3_get_eeprom_hw_cfg(). - */ - } else { - struct subsys_tbl_ent *p; - - /* No eeprom signature? Try the hardcoded - * subsys device table. - */ - p = tg3_lookup_by_subsys(tp); - if (!p) - return -ENODEV; - - tp->phy_id = p->phy_id; - if (!tp->phy_id || - tp->phy_id == TG3_PHY_ID_BCM8002) - tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; - } - } - - if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 || - (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 && - tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && - tp->pci_chip_rev_id != CHIPREV_ID_57765_A0))) - tp->phy_flags |= TG3_PHYFLG_EEE_CAP; - - tg3_phy_init_link_config(tp); - - if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && - !tg3_flag(tp, ENABLE_APE) && - !tg3_flag(tp, ENABLE_ASF)) { - u32 bmsr, mask; - - tg3_readphy(tp, MII_BMSR, &bmsr); - if (!tg3_readphy(tp, MII_BMSR, &bmsr) && - (bmsr & BMSR_LSTATUS)) - goto skip_phy_reset; - - err = tg3_phy_reset(tp); - if (err) - return err; - - tg3_phy_set_wirespeed(tp); - - mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | - ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full); - if (!tg3_copper_is_advertising_all(tp, mask)) { - tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, - tp->link_config.flowctrl); - - tg3_writephy(tp, MII_BMCR, - BMCR_ANENABLE | BMCR_ANRESTART); - } - } - -skip_phy_reset: - if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { - err = tg3_init_5401phy_dsp(tp); - if (err) - return err; - - err = tg3_init_5401phy_dsp(tp); - } - - return err; -} - -static void __devinit tg3_read_vpd(struct tg3 *tp) -{ - u8 *vpd_data; - unsigned int block_end, rosize, len; - u32 vpdlen; - int j, i = 0; - - vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen); - if (!vpd_data) - goto out_no_vpd; - - i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA); - if (i < 0) - goto out_not_found; - - rosize = pci_vpd_lrdt_size(&vpd_data[i]); - block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize; - i += PCI_VPD_LRDT_TAG_SIZE; - - if (block_end > vpdlen) - goto out_not_found; - - j = pci_vpd_find_info_keyword(vpd_data, i, rosize, - PCI_VPD_RO_KEYWORD_MFR_ID); - if (j > 0) { - len = pci_vpd_info_field_size(&vpd_data[j]); - - j += PCI_VPD_INFO_FLD_HDR_SIZE; - if (j + len > block_end || len != 4 || - memcmp(&vpd_data[j], "1028", 4)) - goto partno; - - j = pci_vpd_find_info_keyword(vpd_data, i, rosize, - PCI_VPD_RO_KEYWORD_VENDOR0); - if (j < 0) - goto partno; - - len = pci_vpd_info_field_size(&vpd_data[j]); - - j += PCI_VPD_INFO_FLD_HDR_SIZE; - if (j + len > block_end) - goto partno; - - memcpy(tp->fw_ver, &vpd_data[j], len); - strncat(tp->fw_ver, " bc ", vpdlen - len - 1); - } - -partno: - i = pci_vpd_find_info_keyword(vpd_data, i, rosize, - PCI_VPD_RO_KEYWORD_PARTNO); - if (i < 0) - goto out_not_found; - - len = pci_vpd_info_field_size(&vpd_data[i]); - - i += PCI_VPD_INFO_FLD_HDR_SIZE; - if (len > TG3_BPN_SIZE || - (len + i) > vpdlen) - goto out_not_found; - - memcpy(tp->board_part_number, &vpd_data[i], len); - -out_not_found: - kfree(vpd_data); - if (tp->board_part_number[0]) - return; - -out_no_vpd: - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { - if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717) - strcpy(tp->board_part_number, "BCM5717"); - else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718) - strcpy(tp->board_part_number, "BCM5718"); - else - goto nomatch; - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { - if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) - strcpy(tp->board_part_number, "BCM57780"); - else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760) - strcpy(tp->board_part_number, "BCM57760"); - else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790) - strcpy(tp->board_part_number, "BCM57790"); - else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) - strcpy(tp->board_part_number, "BCM57788"); - else - goto nomatch; - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { - if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761) - strcpy(tp->board_part_number, "BCM57761"); - else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765) - strcpy(tp->board_part_number, "BCM57765"); - else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781) - strcpy(tp->board_part_number, "BCM57781"); - else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785) - strcpy(tp->board_part_number, "BCM57785"); - else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791) - strcpy(tp->board_part_number, "BCM57791"); - else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) - strcpy(tp->board_part_number, "BCM57795"); - else - goto nomatch; - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - strcpy(tp->board_part_number, "BCM95906"); - } else { -nomatch: - strcpy(tp->board_part_number, "none"); - } -} - -static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset) -{ - u32 val; - - if (tg3_nvram_read(tp, offset, &val) || - (val & 0xfc000000) != 0x0c000000 || - tg3_nvram_read(tp, offset + 4, &val) || - val != 0) - return 0; - - return 1; -} - -static void __devinit tg3_read_bc_ver(struct tg3 *tp) -{ - u32 val, offset, start, ver_offset; - int i, dst_off; - bool newver = false; - - if (tg3_nvram_read(tp, 0xc, &offset) || - tg3_nvram_read(tp, 0x4, &start)) - return; - - offset = tg3_nvram_logical_addr(tp, offset); - - if (tg3_nvram_read(tp, offset, &val)) - return; - - if ((val & 0xfc000000) == 0x0c000000) { - if (tg3_nvram_read(tp, offset + 4, &val)) - return; - - if (val == 0) - newver = true; - } - - dst_off = strlen(tp->fw_ver); - - if (newver) { - if (TG3_VER_SIZE - dst_off < 16 || - tg3_nvram_read(tp, offset + 8, &ver_offset)) - return; - - offset = offset + ver_offset - start; - for (i = 0; i < 16; i += 4) { - __be32 v; - if (tg3_nvram_read_be32(tp, offset + i, &v)) - return; - - memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v)); - } - } else { - u32 major, minor; - - if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset)) - return; - - major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >> - TG3_NVM_BCVER_MAJSFT; - minor = ver_offset & TG3_NVM_BCVER_MINMSK; - snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off, - "v%d.%02d", major, minor); - } -} - -static void __devinit tg3_read_hwsb_ver(struct tg3 *tp) -{ - u32 val, major, minor; - - /* Use native endian representation */ - if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val)) - return; - - major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >> - TG3_NVM_HWSB_CFG1_MAJSFT; - minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >> - TG3_NVM_HWSB_CFG1_MINSFT; - - snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor); -} - -static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val) -{ - u32 offset, major, minor, build; - - strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1); - - if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1) - return; - - switch (val & TG3_EEPROM_SB_REVISION_MASK) { - case TG3_EEPROM_SB_REVISION_0: - offset = TG3_EEPROM_SB_F1R0_EDH_OFF; - break; - case TG3_EEPROM_SB_REVISION_2: - offset = TG3_EEPROM_SB_F1R2_EDH_OFF; - break; - case TG3_EEPROM_SB_REVISION_3: - offset = TG3_EEPROM_SB_F1R3_EDH_OFF; - break; - case TG3_EEPROM_SB_REVISION_4: - offset = TG3_EEPROM_SB_F1R4_EDH_OFF; - break; - case TG3_EEPROM_SB_REVISION_5: - offset = TG3_EEPROM_SB_F1R5_EDH_OFF; - break; - case TG3_EEPROM_SB_REVISION_6: - offset = TG3_EEPROM_SB_F1R6_EDH_OFF; - break; - default: - return; - } - - if (tg3_nvram_read(tp, offset, &val)) - return; - - build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >> - TG3_EEPROM_SB_EDH_BLD_SHFT; - major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >> - TG3_EEPROM_SB_EDH_MAJ_SHFT; - minor = val & TG3_EEPROM_SB_EDH_MIN_MASK; - - if (minor > 99 || build > 26) - return; - - offset = strlen(tp->fw_ver); - snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset, - " v%d.%02d", major, minor); - - if (build > 0) { - offset = strlen(tp->fw_ver); - if (offset < TG3_VER_SIZE - 1) - tp->fw_ver[offset] = 'a' + build - 1; - } -} - -static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp) -{ - u32 val, offset, start; - int i, vlen; - - for (offset = TG3_NVM_DIR_START; - offset < TG3_NVM_DIR_END; - offset += TG3_NVM_DIRENT_SIZE) { - if (tg3_nvram_read(tp, offset, &val)) - return; - - if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI) - break; - } - - if (offset == TG3_NVM_DIR_END) - return; - - if (!tg3_flag(tp, 5705_PLUS)) - start = 0x08000000; - else if (tg3_nvram_read(tp, offset - 4, &start)) - return; - - if (tg3_nvram_read(tp, offset + 4, &offset) || - !tg3_fw_img_is_valid(tp, offset) || - tg3_nvram_read(tp, offset + 8, &val)) - return; - - offset += val - start; - - vlen = strlen(tp->fw_ver); - - tp->fw_ver[vlen++] = ','; - tp->fw_ver[vlen++] = ' '; - - for (i = 0; i < 4; i++) { - __be32 v; - if (tg3_nvram_read_be32(tp, offset, &v)) - return; - - offset += sizeof(v); - - if (vlen > TG3_VER_SIZE - sizeof(v)) { - memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen); - break; - } - - memcpy(&tp->fw_ver[vlen], &v, sizeof(v)); - vlen += sizeof(v); - } -} - -static void __devinit tg3_read_dash_ver(struct tg3 *tp) -{ - int vlen; - u32 apedata; - char *fwtype; - - if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF)) - return; - - apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); - if (apedata != APE_SEG_SIG_MAGIC) - return; - - apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); - if (!(apedata & APE_FW_STATUS_READY)) - return; - - apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); - - if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) { - tg3_flag_set(tp, APE_HAS_NCSI); - fwtype = "NCSI"; - } else { - fwtype = "DASH"; - } - - vlen = strlen(tp->fw_ver); - - snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d", - fwtype, - (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT, - (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT, - (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT, - (apedata & APE_FW_VERSION_BLDMSK)); -} - -static void __devinit tg3_read_fw_ver(struct tg3 *tp) -{ - u32 val; - bool vpd_vers = false; - - if (tp->fw_ver[0] != 0) - vpd_vers = true; - - if (tg3_flag(tp, NO_NVRAM)) { - strcat(tp->fw_ver, "sb"); - return; - } - - if (tg3_nvram_read(tp, 0, &val)) - return; - - if (val == TG3_EEPROM_MAGIC) - tg3_read_bc_ver(tp); - else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) - tg3_read_sb_ver(tp, val); - else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) - tg3_read_hwsb_ver(tp); - else - return; - - if (vpd_vers) - goto done; - - if (tg3_flag(tp, ENABLE_APE)) { - if (tg3_flag(tp, ENABLE_ASF)) - tg3_read_dash_ver(tp); - } else if (tg3_flag(tp, ENABLE_ASF)) { - tg3_read_mgmtfw_ver(tp); - } - -done: - tp->fw_ver[TG3_VER_SIZE - 1] = 0; -} - -static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); - -static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) -{ - if (tg3_flag(tp, LRG_PROD_RING_CAP)) - return TG3_RX_RET_MAX_SIZE_5717; - else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) - return TG3_RX_RET_MAX_SIZE_5700; - else - return TG3_RX_RET_MAX_SIZE_5705; -} - -static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = { - { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, - { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) }, - { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) }, - { }, -}; - -static int __devinit tg3_get_invariants(struct tg3 *tp) -{ - u32 misc_ctrl_reg; - u32 pci_state_reg, grc_misc_cfg; - u32 val; - u16 pci_cmd; - int err; - - /* Force memory write invalidate off. If we leave it on, - * then on 5700_BX chips we have to enable a workaround. - * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary - * to match the cacheline size. The Broadcom driver have this - * workaround but turns MWI off all the times so never uses - * it. This seems to suggest that the workaround is insufficient. - */ - pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); - pci_cmd &= ~PCI_COMMAND_INVALIDATE; - pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); - - /* Important! -- Make sure register accesses are byteswapped - * correctly. Also, for those chips that require it, make - * sure that indirect register accesses are enabled before - * the first operation. - */ - pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, - &misc_ctrl_reg); - tp->misc_host_ctrl |= (misc_ctrl_reg & - MISC_HOST_CTRL_CHIPREV); - pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, - tp->misc_host_ctrl); - - tp->pci_chip_rev_id = (misc_ctrl_reg >> - MISC_HOST_CTRL_CHIPREV_SHIFT); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) { - u32 prod_id_asic_rev; - - if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) - pci_read_config_dword(tp->pdev, - TG3PCI_GEN2_PRODID_ASICREV, - &prod_id_asic_rev); - else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) - pci_read_config_dword(tp->pdev, - TG3PCI_GEN15_PRODID_ASICREV, - &prod_id_asic_rev); - else - pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV, - &prod_id_asic_rev); - - tp->pci_chip_rev_id = prod_id_asic_rev; - } - - /* Wrong chip ID in 5752 A0. This code can be removed later - * as A0 is not in production. - */ - if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW) - tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; - - /* If we have 5702/03 A1 or A2 on certain ICH chipsets, - * we need to disable memory and use config. cycles - * only to access all registers. The 5702/03 chips - * can mistakenly decode the special cycles from the - * ICH chipsets as memory write cycles, causing corruption - * of register and memory space. Only certain ICH bridges - * will drive special cycles with non-zero data during the - * address phase which can fall within the 5703's address - * range. This is not an ICH bug as the PCI spec allows - * non-zero address during special cycles. However, only - * these ICH bridges are known to drive non-zero addresses - * during special cycles. - * - * Since special cycles do not cross PCI bridges, we only - * enable this workaround if the 5703 is on the secondary - * bus of these ICH bridges. - */ - if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) || - (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) { - static struct tg3_dev_id { - u32 vendor; - u32 device; - u32 rev; - } ich_chipsets[] = { - { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8, - PCI_ANY_ID }, - { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8, - PCI_ANY_ID }, - { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11, - 0xa }, - { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6, - PCI_ANY_ID }, - { }, - }; - struct tg3_dev_id *pci_id = &ich_chipsets[0]; - struct pci_dev *bridge = NULL; - - while (pci_id->vendor != 0) { - bridge = pci_get_device(pci_id->vendor, pci_id->device, - bridge); - if (!bridge) { - pci_id++; - continue; - } - if (pci_id->rev != PCI_ANY_ID) { - if (bridge->revision > pci_id->rev) - continue; - } - if (bridge->subordinate && - (bridge->subordinate->number == - tp->pdev->bus->number)) { - tg3_flag_set(tp, ICH_WORKAROUND); - pci_dev_put(bridge); - break; - } - } - } - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { - static struct tg3_dev_id { - u32 vendor; - u32 device; - } bridge_chipsets[] = { - { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 }, - { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 }, - { }, - }; - struct tg3_dev_id *pci_id = &bridge_chipsets[0]; - struct pci_dev *bridge = NULL; - - while (pci_id->vendor != 0) { - bridge = pci_get_device(pci_id->vendor, - pci_id->device, - bridge); - if (!bridge) { - pci_id++; - continue; - } - if (bridge->subordinate && - (bridge->subordinate->number <= - tp->pdev->bus->number) && - (bridge->subordinate->subordinate >= - tp->pdev->bus->number)) { - tg3_flag_set(tp, 5701_DMA_BUG); - pci_dev_put(bridge); - break; - } - } - } - - /* The EPB bridge inside 5714, 5715, and 5780 cannot support - * DMA addresses > 40-bit. This bridge may have other additional - * 57xx devices behind it in some 4-port NIC designs for example. - * Any tg3 device found behind the bridge will also need the 40-bit - * DMA workaround. - */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { - tg3_flag_set(tp, 5780_CLASS); - tg3_flag_set(tp, 40BIT_DMA_BUG); - tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); - } else { - struct pci_dev *bridge = NULL; - - do { - bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS, - PCI_DEVICE_ID_SERVERWORKS_EPB, - bridge); - if (bridge && bridge->subordinate && - (bridge->subordinate->number <= - tp->pdev->bus->number) && - (bridge->subordinate->subordinate >= - tp->pdev->bus->number)) { - tg3_flag_set(tp, 40BIT_DMA_BUG); - pci_dev_put(bridge); - break; - } - } while (bridge); - } - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) - tp->pdev_peer = tg3_find_peer(tp); - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) - tg3_flag_set(tp, 5717_PLUS); - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 || - tg3_flag(tp, 5717_PLUS)) - tg3_flag_set(tp, 57765_PLUS); - - /* Intentionally exclude ASIC_REV_5906 */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || - tg3_flag(tp, 57765_PLUS)) - tg3_flag_set(tp, 5755_PLUS); - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || - tg3_flag(tp, 5755_PLUS) || - tg3_flag(tp, 5780_CLASS)) - tg3_flag_set(tp, 5750_PLUS); - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || - tg3_flag(tp, 5750_PLUS)) - tg3_flag_set(tp, 5705_PLUS); - - /* Determine TSO capabilities */ - if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) - ; /* Do nothing. HW bug. */ - else if (tg3_flag(tp, 57765_PLUS)) - tg3_flag_set(tp, HW_TSO_3); - else if (tg3_flag(tp, 5755_PLUS) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) - tg3_flag_set(tp, HW_TSO_2); - else if (tg3_flag(tp, 5750_PLUS)) { - tg3_flag_set(tp, HW_TSO_1); - tg3_flag_set(tp, TSO_BUG); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 && - tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2) - tg3_flag_clear(tp, TSO_BUG); - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && - tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { - tg3_flag_set(tp, TSO_BUG); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) - tp->fw_needed = FIRMWARE_TG3TSO5; - else - tp->fw_needed = FIRMWARE_TG3TSO; - } - - /* Selectively allow TSO based on operating conditions */ - if (tg3_flag(tp, HW_TSO_1) || - tg3_flag(tp, HW_TSO_2) || - tg3_flag(tp, HW_TSO_3) || - (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF))) - tg3_flag_set(tp, TSO_CAPABLE); - else { - tg3_flag_clear(tp, TSO_CAPABLE); - tg3_flag_clear(tp, TSO_BUG); - tp->fw_needed = NULL; - } - - if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) - tp->fw_needed = FIRMWARE_TG3; - - tp->irq_max = 1; - - if (tg3_flag(tp, 5750_PLUS)) { - tg3_flag_set(tp, SUPPORT_MSI); - if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX || - GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 && - tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 && - tp->pdev_peer == tp->pdev)) - tg3_flag_clear(tp, SUPPORT_MSI); - - if (tg3_flag(tp, 5755_PLUS) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - tg3_flag_set(tp, 1SHOT_MSI); - } - - if (tg3_flag(tp, 57765_PLUS)) { - tg3_flag_set(tp, SUPPORT_MSIX); - tp->irq_max = TG3_IRQ_MAX_VECS; - } - } - - if (tg3_flag(tp, 5755_PLUS)) - tg3_flag_set(tp, SHORT_DMA_BUG); - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) - tg3_flag_set(tp, 4K_FIFO_LIMIT); - - if (tg3_flag(tp, 5717_PLUS)) - tg3_flag_set(tp, LRG_PROD_RING_CAP); - - if (tg3_flag(tp, 57765_PLUS) && - tp->pci_chip_rev_id != CHIPREV_ID_5719_A0) - tg3_flag_set(tp, USE_JUMBO_BDFLAG); - - if (!tg3_flag(tp, 5705_PLUS) || - tg3_flag(tp, 5780_CLASS) || - tg3_flag(tp, USE_JUMBO_BDFLAG)) - tg3_flag_set(tp, JUMBO_CAPABLE); - - pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, - &pci_state_reg); - - if (pci_is_pcie(tp->pdev)) { - u16 lnkctl; - - tg3_flag_set(tp, PCI_EXPRESS); - - tp->pcie_readrq = 4096; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) - tp->pcie_readrq = 2048; - - pcie_set_readrq(tp->pdev, tp->pcie_readrq); - - pci_read_config_word(tp->pdev, - pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, - &lnkctl); - if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) { - if (GET_ASIC_REV(tp->pci_chip_rev_id) == - ASIC_REV_5906) { - tg3_flag_clear(tp, HW_TSO_2); - tg3_flag_clear(tp, TSO_CAPABLE); - } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || - tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 || - tp->pci_chip_rev_id == CHIPREV_ID_57780_A1) - tg3_flag_set(tp, CLKREQ_BUG); - } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) { - tg3_flag_set(tp, L1PLLPD_EN); - } - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { - /* BCM5785 devices are effectively PCIe devices, and should - * follow PCIe codepaths, but do not have a PCIe capabilities - * section. - */ - tg3_flag_set(tp, PCI_EXPRESS); - } else if (!tg3_flag(tp, 5705_PLUS) || - tg3_flag(tp, 5780_CLASS)) { - tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); - if (!tp->pcix_cap) { - dev_err(&tp->pdev->dev, - "Cannot find PCI-X capability, aborting\n"); - return -EIO; - } - - if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE)) - tg3_flag_set(tp, PCIX_MODE); - } - - /* If we have an AMD 762 or VIA K8T800 chipset, write - * reordering to the mailbox registers done by the host - * controller can cause major troubles. We read back from - * every mailbox register write to force the writes to be - * posted to the chip in order. - */ - if (pci_dev_present(tg3_write_reorder_chipsets) && - !tg3_flag(tp, PCI_EXPRESS)) - tg3_flag_set(tp, MBOX_WRITE_REORDER); - - pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, - &tp->pci_cacheline_sz); - pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER, - &tp->pci_lat_timer); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && - tp->pci_lat_timer < 64) { - tp->pci_lat_timer = 64; - pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, - tp->pci_lat_timer); - } - - /* Important! -- It is critical that the PCI-X hw workaround - * situation is decided before the first MMIO register access. - */ - if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) { - /* 5700 BX chips need to have their TX producer index - * mailboxes written twice to workaround a bug. - */ - tg3_flag_set(tp, TXD_MBOX_HWBUG); - - /* If we are in PCI-X mode, enable register write workaround. - * - * The workaround is to use indirect register accesses - * for all chip writes not to mailbox registers. - */ - if (tg3_flag(tp, PCIX_MODE)) { - u32 pm_reg; - - tg3_flag_set(tp, PCIX_TARGET_HWBUG); - - /* The chip can have it's power management PCI config - * space registers clobbered due to this bug. - * So explicitly force the chip into D0 here. - */ - pci_read_config_dword(tp->pdev, - tp->pm_cap + PCI_PM_CTRL, - &pm_reg); - pm_reg &= ~PCI_PM_CTRL_STATE_MASK; - pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */; - pci_write_config_dword(tp->pdev, - tp->pm_cap + PCI_PM_CTRL, - pm_reg); - - /* Also, force SERR#/PERR# in PCI command. */ - pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); - pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; - pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); - } - } - - if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0) - tg3_flag_set(tp, PCI_HIGH_SPEED); - if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0) - tg3_flag_set(tp, PCI_32BIT); - - /* Chip-specific fixup from Broadcom driver */ - if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) && - (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) { - pci_state_reg |= PCISTATE_RETRY_SAME_DMA; - pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); - } - - /* Default fast path register access methods */ - tp->read32 = tg3_read32; - tp->write32 = tg3_write32; - tp->read32_mbox = tg3_read32; - tp->write32_mbox = tg3_write32; - tp->write32_tx_mbox = tg3_write32; - tp->write32_rx_mbox = tg3_write32; - - /* Various workaround register access methods */ - if (tg3_flag(tp, PCIX_TARGET_HWBUG)) - tp->write32 = tg3_write_indirect_reg32; - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 || - (tg3_flag(tp, PCI_EXPRESS) && - tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) { - /* - * Back to back register writes can cause problems on these - * chips, the workaround is to read back all reg writes - * except those to mailbox regs. - * - * See tg3_write_indirect_reg32(). - */ - tp->write32 = tg3_write_flush_reg32; - } - - if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) { - tp->write32_tx_mbox = tg3_write32_tx_mbox; - if (tg3_flag(tp, MBOX_WRITE_REORDER)) - tp->write32_rx_mbox = tg3_write_flush_reg32; - } - - if (tg3_flag(tp, ICH_WORKAROUND)) { - tp->read32 = tg3_read_indirect_reg32; - tp->write32 = tg3_write_indirect_reg32; - tp->read32_mbox = tg3_read_indirect_mbox; - tp->write32_mbox = tg3_write_indirect_mbox; - tp->write32_tx_mbox = tg3_write_indirect_mbox; - tp->write32_rx_mbox = tg3_write_indirect_mbox; - - iounmap(tp->regs); - tp->regs = NULL; - - pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); - pci_cmd &= ~PCI_COMMAND_MEMORY; - pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); - } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - tp->read32_mbox = tg3_read32_mbox_5906; - tp->write32_mbox = tg3_write32_mbox_5906; - tp->write32_tx_mbox = tg3_write32_mbox_5906; - tp->write32_rx_mbox = tg3_write32_mbox_5906; - } - - if (tp->write32 == tg3_write_indirect_reg32 || - (tg3_flag(tp, PCIX_MODE) && - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701))) - tg3_flag_set(tp, SRAM_USE_CONFIG); - - /* The memory arbiter has to be enabled in order for SRAM accesses - * to succeed. Normally on powerup the tg3 chip firmware will make - * sure it is enabled, but other entities such as system netboot - * code might disable it. - */ - val = tr32(MEMARB_MODE); - tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); - - if (tg3_flag(tp, PCIX_MODE)) { - pci_read_config_dword(tp->pdev, - tp->pcix_cap + PCI_X_STATUS, &val); - tp->pci_fn = val & 0x7; - } else { - tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3; - } - - /* Get eeprom hw config before calling tg3_set_power_state(). - * In particular, the TG3_FLAG_IS_NIC flag must be - * determined before calling tg3_set_power_state() so that - * we know whether or not to switch out of Vaux power. - * When the flag is set, it means that GPIO1 is used for eeprom - * write protect and also implies that it is a LOM where GPIOs - * are not used to switch power. - */ - tg3_get_eeprom_hw_cfg(tp); - - if (tg3_flag(tp, ENABLE_APE)) { - /* Allow reads and writes to the - * APE register and memory space. - */ - pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR | - PCISTATE_ALLOW_APE_SHMEM_WR | - PCISTATE_ALLOW_APE_PSPACE_WR; - pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, - pci_state_reg); - - tg3_ape_lock_init(tp); - } - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || - tg3_flag(tp, 57765_PLUS)) - tg3_flag_set(tp, CPMU_PRESENT); - - /* Set up tp->grc_local_ctrl before calling - * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high - * will bring 5700's external PHY out of reset. - * It is also used as eeprom write protect on LOMs. - */ - tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - tg3_flag(tp, EEPROM_WRITE_PROT)) - tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | - GRC_LCLCTRL_GPIO_OUTPUT1); - /* Unused GPIO3 must be driven as output on 5752 because there - * are no pull-up resistors on unused GPIO pins. - */ - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) - tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) - tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; - - if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { - /* Turn off the debug UART. */ - tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; - if (tg3_flag(tp, IS_NIC)) - /* Keep VMain power. */ - tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | - GRC_LCLCTRL_GPIO_OUTPUT0; - } - - /* Switch out of Vaux if it is a NIC */ - tg3_pwrsrc_switch_to_vmain(tp); - - /* Derive initial jumbo mode from MTU assigned in - * ether_setup() via the alloc_etherdev() call - */ - if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS)) - tg3_flag_set(tp, JUMBO_RING_ENABLE); - - /* Determine WakeOnLan speed to use. */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || - tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 || - tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) { - tg3_flag_clear(tp, WOL_SPEED_100MB); - } else { - tg3_flag_set(tp, WOL_SPEED_100MB); - } - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) - tp->phy_flags |= TG3_PHYFLG_IS_FET; - - /* A few boards don't want Ethernet@WireSpeed phy feature */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && - (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) && - (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) || - (tp->phy_flags & TG3_PHYFLG_IS_FET) || - (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) - tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED; - - if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX || - GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX) - tp->phy_flags |= TG3_PHYFLG_ADC_BUG; - if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) - tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG; - - if (tg3_flag(tp, 5705_PLUS) && - !(tp->phy_flags & TG3_PHYFLG_IS_FET) && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 && - !tg3_flag(tp, 57765_PLUS)) { - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { - if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 && - tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722) - tp->phy_flags |= TG3_PHYFLG_JITTER_BUG; - if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) - tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM; - } else - tp->phy_flags |= TG3_PHYFLG_BER_BUG; - } - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && - GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) { - tp->phy_otp = tg3_read_otp_phycfg(tp); - if (tp->phy_otp == 0) - tp->phy_otp = TG3_OTP_DEFAULT; - } - - if (tg3_flag(tp, CPMU_PRESENT)) - tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; - else - tp->mi_mode = MAC_MI_MODE_BASE; - - tp->coalesce_mode = 0; - if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && - GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) - tp->coalesce_mode |= HOSTCC_MODE_32BYTE; - - /* Set these bits to enable statistics workaround. */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 || - tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) { - tp->coalesce_mode |= HOSTCC_MODE_ATTN; - tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN; - } - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) - tg3_flag_set(tp, USE_PHYLIB); - - err = tg3_mdio_init(tp); - if (err) - return err; - - /* Initialize data/descriptor byte/word swapping. */ - val = tr32(GRC_MODE); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) - val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA | - GRC_MODE_WORD_SWAP_B2HRX_DATA | - GRC_MODE_B2HRX_ENABLE | - GRC_MODE_HTX2B_ENABLE | - GRC_MODE_HOST_STACKUP); - else - val &= GRC_MODE_HOST_STACKUP; - - tw32(GRC_MODE, val | tp->grc_mode); - - tg3_switch_clocks(tp); - - /* Clear this out for sanity. */ - tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); - - pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, - &pci_state_reg); - if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && - !tg3_flag(tp, PCIX_TARGET_HWBUG)) { - u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl); - - if (chiprevid == CHIPREV_ID_5701_A0 || - chiprevid == CHIPREV_ID_5701_B0 || - chiprevid == CHIPREV_ID_5701_B2 || - chiprevid == CHIPREV_ID_5701_B5) { - void __iomem *sram_base; - - /* Write some dummy words into the SRAM status block - * area, see if it reads back correctly. If the return - * value is bad, force enable the PCIX workaround. - */ - sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK; - - writel(0x00000000, sram_base); - writel(0x00000000, sram_base + 4); - writel(0xffffffff, sram_base + 4); - if (readl(sram_base) != 0x00000000) - tg3_flag_set(tp, PCIX_TARGET_HWBUG); - } - } - - udelay(50); - tg3_nvram_init(tp); - - grc_misc_cfg = tr32(GRC_MISC_CFG); - grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && - (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 || - grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) - tg3_flag_set(tp, IS_5788); - - if (!tg3_flag(tp, IS_5788) && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) - tg3_flag_set(tp, TAGGED_STATUS); - if (tg3_flag(tp, TAGGED_STATUS)) { - tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | - HOSTCC_MODE_CLRTICK_TXBD); - - tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS; - pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, - tp->misc_host_ctrl); - } - - /* Preserve the APE MAC_MODE bits */ - if (tg3_flag(tp, ENABLE_APE)) - tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; - else - tp->mac_mode = TG3_DEF_MAC_MODE; - - /* these are limited to 10/100 only */ - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && - (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && - tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM && - (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 || - tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 || - tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) || - (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM && - (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F || - tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F || - tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 || - (tp->phy_flags & TG3_PHYFLG_IS_FET)) - tp->phy_flags |= TG3_PHYFLG_10_100_ONLY; - - err = tg3_phy_probe(tp); - if (err) { - dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err); - /* ... but do not return immediately ... */ - tg3_mdio_fini(tp); - } - - tg3_read_vpd(tp); - tg3_read_fw_ver(tp); - - if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { - tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; - } else { - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) - tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; - else - tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; - } - - /* 5700 {AX,BX} chips have a broken status block link - * change bit implementation, so we must use the - * status register in those cases. - */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) - tg3_flag_set(tp, USE_LINKCHG_REG); - else - tg3_flag_clear(tp, USE_LINKCHG_REG); - - /* The led_ctrl is set during tg3_phy_probe, here we might - * have to force the link status polling mechanism based - * upon subsystem IDs. - */ - if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && - !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { - tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; - tg3_flag_set(tp, USE_LINKCHG_REG); - } - - /* For all SERDES we poll the MAC status register. */ - if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) - tg3_flag_set(tp, POLL_SERDES); - else - tg3_flag_clear(tp, POLL_SERDES); - - tp->rx_offset = NET_IP_ALIGN; - tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && - tg3_flag(tp, PCIX_MODE)) { - tp->rx_offset = 0; -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS - tp->rx_copy_thresh = ~(u16)0; -#endif - } - - tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1; - tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1; - tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1; - - tp->rx_std_max_post = tp->rx_std_ring_mask + 1; - - /* Increment the rx prod index on the rx std ring by at most - * 8 for these chips to workaround hw errata. - */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) - tp->rx_std_max_post = 8; - - if (tg3_flag(tp, ASPM_WORKAROUND)) - tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) & - PCIE_PWR_MGMT_L1_THRESH_MSK; - - return err; -} - -#ifdef CONFIG_SPARC -static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp) -{ - struct net_device *dev = tp->dev; - struct pci_dev *pdev = tp->pdev; - struct device_node *dp = pci_device_to_OF_node(pdev); - const unsigned char *addr; - int len; - - addr = of_get_property(dp, "local-mac-address", &len); - if (addr && len == 6) { - memcpy(dev->dev_addr, addr, 6); - memcpy(dev->perm_addr, dev->dev_addr, 6); - return 0; - } - return -ENODEV; -} - -static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp) -{ - struct net_device *dev = tp->dev; - - memcpy(dev->dev_addr, idprom->id_ethaddr, 6); - memcpy(dev->perm_addr, idprom->id_ethaddr, 6); - return 0; -} -#endif - -static int __devinit tg3_get_device_address(struct tg3 *tp) -{ - struct net_device *dev = tp->dev; - u32 hi, lo, mac_offset; - int addr_ok = 0; - -#ifdef CONFIG_SPARC - if (!tg3_get_macaddr_sparc(tp)) - return 0; -#endif - - mac_offset = 0x7c; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || - tg3_flag(tp, 5780_CLASS)) { - if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) - mac_offset = 0xcc; - if (tg3_nvram_lock(tp)) - tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); - else - tg3_nvram_unlock(tp); - } else if (tg3_flag(tp, 5717_PLUS)) { - if (tp->pci_fn & 1) - mac_offset = 0xcc; - if (tp->pci_fn > 1) - mac_offset += 0x18c; - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) - mac_offset = 0x10; - - /* First try to get it from MAC address mailbox. */ - tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi); - if ((hi >> 16) == 0x484b) { - dev->dev_addr[0] = (hi >> 8) & 0xff; - dev->dev_addr[1] = (hi >> 0) & 0xff; - - tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo); - dev->dev_addr[2] = (lo >> 24) & 0xff; - dev->dev_addr[3] = (lo >> 16) & 0xff; - dev->dev_addr[4] = (lo >> 8) & 0xff; - dev->dev_addr[5] = (lo >> 0) & 0xff; - - /* Some old bootcode may report a 0 MAC address in SRAM */ - addr_ok = is_valid_ether_addr(&dev->dev_addr[0]); - } - if (!addr_ok) { - /* Next, try NVRAM. */ - if (!tg3_flag(tp, NO_NVRAM) && - !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && - !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { - memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2); - memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo)); - } - /* Finally just fetch it out of the MAC control regs. */ - else { - hi = tr32(MAC_ADDR_0_HIGH); - lo = tr32(MAC_ADDR_0_LOW); - - dev->dev_addr[5] = lo & 0xff; - dev->dev_addr[4] = (lo >> 8) & 0xff; - dev->dev_addr[3] = (lo >> 16) & 0xff; - dev->dev_addr[2] = (lo >> 24) & 0xff; - dev->dev_addr[1] = hi & 0xff; - dev->dev_addr[0] = (hi >> 8) & 0xff; - } - } - - if (!is_valid_ether_addr(&dev->dev_addr[0])) { -#ifdef CONFIG_SPARC - if (!tg3_get_default_macaddr_sparc(tp)) - return 0; -#endif - return -EINVAL; - } - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); - return 0; -} - -#define BOUNDARY_SINGLE_CACHELINE 1 -#define BOUNDARY_MULTI_CACHELINE 2 - -static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) -{ - int cacheline_size; - u8 byte; - int goal; - - pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte); - if (byte == 0) - cacheline_size = 1024; - else - cacheline_size = (int) byte * 4; - - /* On 5703 and later chips, the boundary bits have no - * effect. - */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && - !tg3_flag(tp, PCI_EXPRESS)) - goto out; - -#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) - goal = BOUNDARY_MULTI_CACHELINE; -#else -#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA) - goal = BOUNDARY_SINGLE_CACHELINE; -#else - goal = 0; -#endif -#endif - - if (tg3_flag(tp, 57765_PLUS)) { - val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; - goto out; - } - - if (!goal) - goto out; - - /* PCI controllers on most RISC systems tend to disconnect - * when a device tries to burst across a cache-line boundary. - * Therefore, letting tg3 do so just wastes PCI bandwidth. - * - * Unfortunately, for PCI-E there are only limited - * write-side controls for this, and thus for reads - * we will still get the disconnects. We'll also waste - * these PCI cycles for both read and write for chips - * other than 5700 and 5701 which do not implement the - * boundary bits. - */ - if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) { - switch (cacheline_size) { - case 16: - case 32: - case 64: - case 128: - if (goal == BOUNDARY_SINGLE_CACHELINE) { - val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX | - DMA_RWCTRL_WRITE_BNDRY_128_PCIX); - } else { - val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | - DMA_RWCTRL_WRITE_BNDRY_384_PCIX); - } - break; - - case 256: - val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX | - DMA_RWCTRL_WRITE_BNDRY_256_PCIX); - break; - - default: - val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | - DMA_RWCTRL_WRITE_BNDRY_384_PCIX); - break; - } - } else if (tg3_flag(tp, PCI_EXPRESS)) { - switch (cacheline_size) { - case 16: - case 32: - case 64: - if (goal == BOUNDARY_SINGLE_CACHELINE) { - val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; - val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE; - break; - } - /* fallthrough */ - case 128: - default: - val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; - val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE; - break; - } - } else { - switch (cacheline_size) { - case 16: - if (goal == BOUNDARY_SINGLE_CACHELINE) { - val |= (DMA_RWCTRL_READ_BNDRY_16 | - DMA_RWCTRL_WRITE_BNDRY_16); - break; - } - /* fallthrough */ - case 32: - if (goal == BOUNDARY_SINGLE_CACHELINE) { - val |= (DMA_RWCTRL_READ_BNDRY_32 | - DMA_RWCTRL_WRITE_BNDRY_32); - break; - } - /* fallthrough */ - case 64: - if (goal == BOUNDARY_SINGLE_CACHELINE) { - val |= (DMA_RWCTRL_READ_BNDRY_64 | - DMA_RWCTRL_WRITE_BNDRY_64); - break; - } - /* fallthrough */ - case 128: - if (goal == BOUNDARY_SINGLE_CACHELINE) { - val |= (DMA_RWCTRL_READ_BNDRY_128 | - DMA_RWCTRL_WRITE_BNDRY_128); - break; - } - /* fallthrough */ - case 256: - val |= (DMA_RWCTRL_READ_BNDRY_256 | - DMA_RWCTRL_WRITE_BNDRY_256); - break; - case 512: - val |= (DMA_RWCTRL_READ_BNDRY_512 | - DMA_RWCTRL_WRITE_BNDRY_512); - break; - case 1024: - default: - val |= (DMA_RWCTRL_READ_BNDRY_1024 | - DMA_RWCTRL_WRITE_BNDRY_1024); - break; - } - } - -out: - return val; -} - -static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device) -{ - struct tg3_internal_buffer_desc test_desc; - u32 sram_dma_descs; - int i, ret; - - sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE; - - tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0); - tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0); - tw32(RDMAC_STATUS, 0); - tw32(WDMAC_STATUS, 0); - - tw32(BUFMGR_MODE, 0); - tw32(FTQ_RESET, 0); - - test_desc.addr_hi = ((u64) buf_dma) >> 32; - test_desc.addr_lo = buf_dma & 0xffffffff; - test_desc.nic_mbuf = 0x00002100; - test_desc.len = size; - - /* - * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz - * the *second* time the tg3 driver was getting loaded after an - * initial scan. - * - * Broadcom tells me: - * ...the DMA engine is connected to the GRC block and a DMA - * reset may affect the GRC block in some unpredictable way... - * The behavior of resets to individual blocks has not been tested. - * - * Broadcom noted the GRC reset will also reset all sub-components. - */ - if (to_device) { - test_desc.cqid_sqid = (13 << 8) | 2; - - tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE); - udelay(40); - } else { - test_desc.cqid_sqid = (16 << 8) | 7; - - tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE); - udelay(40); - } - test_desc.flags = 0x00000005; - - for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) { - u32 val; - - val = *(((u32 *)&test_desc) + i); - pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, - sram_dma_descs + (i * sizeof(u32))); - pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); - } - pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); - - if (to_device) - tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs); - else - tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs); - - ret = -ENODEV; - for (i = 0; i < 40; i++) { - u32 val; - - if (to_device) - val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ); - else - val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ); - if ((val & 0xffff) == sram_dma_descs) { - ret = 0; - break; - } - - udelay(100); - } - - return ret; -} - -#define TEST_BUFFER_SIZE 0x2000 - -static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = { - { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, - { }, -}; - -static int __devinit tg3_test_dma(struct tg3 *tp) -{ - dma_addr_t buf_dma; - u32 *buf, saved_dma_rwctrl; - int ret = 0; - - buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, - &buf_dma, GFP_KERNEL); - if (!buf) { - ret = -ENOMEM; - goto out_nofree; - } - - tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | - (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); - - tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); - - if (tg3_flag(tp, 57765_PLUS)) - goto out; - - if (tg3_flag(tp, PCI_EXPRESS)) { - /* DMA read watermark not used on PCIE */ - tp->dma_rwctrl |= 0x00180000; - } else if (!tg3_flag(tp, PCIX_MODE)) { - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) - tp->dma_rwctrl |= 0x003f0000; - else - tp->dma_rwctrl |= 0x003f000f; - } else { - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { - u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f); - u32 read_water = 0x7; - - /* If the 5704 is behind the EPB bridge, we can - * do the less restrictive ONE_DMA workaround for - * better performance. - */ - if (tg3_flag(tp, 40BIT_DMA_BUG) && - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) - tp->dma_rwctrl |= 0x8000; - else if (ccval == 0x6 || ccval == 0x7) - tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) - read_water = 4; - /* Set bit 23 to enable PCIX hw bug fix */ - tp->dma_rwctrl |= - (read_water << DMA_RWCTRL_READ_WATER_SHIFT) | - (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) | - (1 << 23); - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) { - /* 5780 always in PCIX mode */ - tp->dma_rwctrl |= 0x00144000; - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { - /* 5714 always in PCIX mode */ - tp->dma_rwctrl |= 0x00148000; - } else { - tp->dma_rwctrl |= 0x001b000f; - } - } - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) - tp->dma_rwctrl &= 0xfffffff0; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { - /* Remove this if it causes problems for some boards. */ - tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT; - - /* On 5700/5701 chips, we need to set this bit. - * Otherwise the chip will issue cacheline transactions - * to streamable DMA memory with not all the byte - * enables turned on. This is an error on several - * RISC PCI controllers, in particular sparc64. - * - * On 5703/5704 chips, this bit has been reassigned - * a different meaning. In particular, it is used - * on those chips to enable a PCI-X workaround. - */ - tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE; - } - - tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); - -#if 0 - /* Unneeded, already done by tg3_get_invariants. */ - tg3_switch_clocks(tp); -#endif - - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) - goto out; - - /* It is best to perform DMA test with maximum write burst size - * to expose the 5700/5701 write DMA bug. - */ - saved_dma_rwctrl = tp->dma_rwctrl; - tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; - tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); - - while (1) { - u32 *p = buf, i; - - for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) - p[i] = i; - - /* Send the buffer to the chip. */ - ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1); - if (ret) { - dev_err(&tp->pdev->dev, - "%s: Buffer write failed. err = %d\n", - __func__, ret); - break; - } - -#if 0 - /* validate data reached card RAM correctly. */ - for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { - u32 val; - tg3_read_mem(tp, 0x2100 + (i*4), &val); - if (le32_to_cpu(val) != p[i]) { - dev_err(&tp->pdev->dev, - "%s: Buffer corrupted on device! " - "(%d != %d)\n", __func__, val, i); - /* ret = -ENODEV here? */ - } - p[i] = 0; - } -#endif - /* Now read it back. */ - ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0); - if (ret) { - dev_err(&tp->pdev->dev, "%s: Buffer read failed. " - "err = %d\n", __func__, ret); - break; - } - - /* Verify it. */ - for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { - if (p[i] == i) - continue; - - if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != - DMA_RWCTRL_WRITE_BNDRY_16) { - tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; - tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; - tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); - break; - } else { - dev_err(&tp->pdev->dev, - "%s: Buffer corrupted on read back! " - "(%d != %d)\n", __func__, p[i], i); - ret = -ENODEV; - goto out; - } - } - - if (i == (TEST_BUFFER_SIZE / sizeof(u32))) { - /* Success. */ - ret = 0; - break; - } - } - if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != - DMA_RWCTRL_WRITE_BNDRY_16) { - /* DMA test passed without adjusting DMA boundary, - * now look for chipsets that are known to expose the - * DMA bug without failing the test. - */ - if (pci_dev_present(tg3_dma_wait_state_chipsets)) { - tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; - tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; - } else { - /* Safe to use the calculated DMA boundary. */ - tp->dma_rwctrl = saved_dma_rwctrl; - } - - tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); - } - -out: - dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma); -out_nofree: - return ret; -} - -static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) -{ - if (tg3_flag(tp, 57765_PLUS)) { - tp->bufmgr_config.mbuf_read_dma_low_water = - DEFAULT_MB_RDMA_LOW_WATER_5705; - tp->bufmgr_config.mbuf_mac_rx_low_water = - DEFAULT_MB_MACRX_LOW_WATER_57765; - tp->bufmgr_config.mbuf_high_water = - DEFAULT_MB_HIGH_WATER_57765; - - tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = - DEFAULT_MB_RDMA_LOW_WATER_5705; - tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = - DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765; - tp->bufmgr_config.mbuf_high_water_jumbo = - DEFAULT_MB_HIGH_WATER_JUMBO_57765; - } else if (tg3_flag(tp, 5705_PLUS)) { - tp->bufmgr_config.mbuf_read_dma_low_water = - DEFAULT_MB_RDMA_LOW_WATER_5705; - tp->bufmgr_config.mbuf_mac_rx_low_water = - DEFAULT_MB_MACRX_LOW_WATER_5705; - tp->bufmgr_config.mbuf_high_water = - DEFAULT_MB_HIGH_WATER_5705; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - tp->bufmgr_config.mbuf_mac_rx_low_water = - DEFAULT_MB_MACRX_LOW_WATER_5906; - tp->bufmgr_config.mbuf_high_water = - DEFAULT_MB_HIGH_WATER_5906; - } - - tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = - DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780; - tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = - DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780; - tp->bufmgr_config.mbuf_high_water_jumbo = - DEFAULT_MB_HIGH_WATER_JUMBO_5780; - } else { - tp->bufmgr_config.mbuf_read_dma_low_water = - DEFAULT_MB_RDMA_LOW_WATER; - tp->bufmgr_config.mbuf_mac_rx_low_water = - DEFAULT_MB_MACRX_LOW_WATER; - tp->bufmgr_config.mbuf_high_water = - DEFAULT_MB_HIGH_WATER; - - tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = - DEFAULT_MB_RDMA_LOW_WATER_JUMBO; - tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = - DEFAULT_MB_MACRX_LOW_WATER_JUMBO; - tp->bufmgr_config.mbuf_high_water_jumbo = - DEFAULT_MB_HIGH_WATER_JUMBO; - } - - tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER; - tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER; -} - -static char * __devinit tg3_phy_string(struct tg3 *tp) -{ - switch (tp->phy_id & TG3_PHY_ID_MASK) { - case TG3_PHY_ID_BCM5400: return "5400"; - case TG3_PHY_ID_BCM5401: return "5401"; - case TG3_PHY_ID_BCM5411: return "5411"; - case TG3_PHY_ID_BCM5701: return "5701"; - case TG3_PHY_ID_BCM5703: return "5703"; - case TG3_PHY_ID_BCM5704: return "5704"; - case TG3_PHY_ID_BCM5705: return "5705"; - case TG3_PHY_ID_BCM5750: return "5750"; - case TG3_PHY_ID_BCM5752: return "5752"; - case TG3_PHY_ID_BCM5714: return "5714"; - case TG3_PHY_ID_BCM5780: return "5780"; - case TG3_PHY_ID_BCM5755: return "5755"; - case TG3_PHY_ID_BCM5787: return "5787"; - case TG3_PHY_ID_BCM5784: return "5784"; - case TG3_PHY_ID_BCM5756: return "5722/5756"; - case TG3_PHY_ID_BCM5906: return "5906"; - case TG3_PHY_ID_BCM5761: return "5761"; - case TG3_PHY_ID_BCM5718C: return "5718C"; - case TG3_PHY_ID_BCM5718S: return "5718S"; - case TG3_PHY_ID_BCM57765: return "57765"; - case TG3_PHY_ID_BCM5719C: return "5719C"; - case TG3_PHY_ID_BCM5720C: return "5720C"; - case TG3_PHY_ID_BCM8002: return "8002/serdes"; - case 0: return "serdes"; - default: return "unknown"; - } -} - -static char * __devinit tg3_bus_string(struct tg3 *tp, char *str) -{ - if (tg3_flag(tp, PCI_EXPRESS)) { - strcpy(str, "PCI Express"); - return str; - } else if (tg3_flag(tp, PCIX_MODE)) { - u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f; - - strcpy(str, "PCIX:"); - - if ((clock_ctrl == 7) || - ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) == - GRC_MISC_CFG_BOARD_ID_5704CIOBE)) - strcat(str, "133MHz"); - else if (clock_ctrl == 0) - strcat(str, "33MHz"); - else if (clock_ctrl == 2) - strcat(str, "50MHz"); - else if (clock_ctrl == 4) - strcat(str, "66MHz"); - else if (clock_ctrl == 6) - strcat(str, "100MHz"); - } else { - strcpy(str, "PCI:"); - if (tg3_flag(tp, PCI_HIGH_SPEED)) - strcat(str, "66MHz"); - else - strcat(str, "33MHz"); - } - if (tg3_flag(tp, PCI_32BIT)) - strcat(str, ":32-bit"); - else - strcat(str, ":64-bit"); - return str; -} - -static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp) -{ - struct pci_dev *peer; - unsigned int func, devnr = tp->pdev->devfn & ~7; - - for (func = 0; func < 8; func++) { - peer = pci_get_slot(tp->pdev->bus, devnr | func); - if (peer && peer != tp->pdev) - break; - pci_dev_put(peer); - } - /* 5704 can be configured in single-port mode, set peer to - * tp->pdev in that case. - */ - if (!peer) { - peer = tp->pdev; - return peer; - } - - /* - * We don't need to keep the refcount elevated; there's no way - * to remove one half of this device without removing the other - */ - pci_dev_put(peer); - - return peer; -} - -static void __devinit tg3_init_coal(struct tg3 *tp) -{ - struct ethtool_coalesce *ec = &tp->coal; - - memset(ec, 0, sizeof(*ec)); - ec->cmd = ETHTOOL_GCOALESCE; - ec->rx_coalesce_usecs = LOW_RXCOL_TICKS; - ec->tx_coalesce_usecs = LOW_TXCOL_TICKS; - ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES; - ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES; - ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT; - ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT; - ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT; - ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT; - ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS; - - if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD | - HOSTCC_MODE_CLRTICK_TXBD)) { - ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS; - ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS; - ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS; - ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; - } - - if (tg3_flag(tp, 5705_PLUS)) { - ec->rx_coalesce_usecs_irq = 0; - ec->tx_coalesce_usecs_irq = 0; - ec->stats_block_coalesce_usecs = 0; - } -} - -static const struct net_device_ops tg3_netdev_ops = { - .ndo_open = tg3_open, - .ndo_stop = tg3_close, - .ndo_start_xmit = tg3_start_xmit, - .ndo_get_stats64 = tg3_get_stats64, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_multicast_list = tg3_set_rx_mode, - .ndo_set_mac_address = tg3_set_mac_addr, - .ndo_do_ioctl = tg3_ioctl, - .ndo_tx_timeout = tg3_tx_timeout, - .ndo_change_mtu = tg3_change_mtu, - .ndo_fix_features = tg3_fix_features, - .ndo_set_features = tg3_set_features, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = tg3_poll_controller, -#endif -}; - -static int __devinit tg3_init_one(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - struct net_device *dev; - struct tg3 *tp; - int i, err, pm_cap; - u32 sndmbx, rcvmbx, intmbx; - char str[40]; - u64 dma_mask, persist_dma_mask; - u32 features = 0; - - printk_once(KERN_INFO "%s\n", version); - - err = pci_enable_device(pdev); - if (err) { - dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); - return err; - } - - err = pci_request_regions(pdev, DRV_MODULE_NAME); - if (err) { - dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); - goto err_out_disable_pdev; - } - - pci_set_master(pdev); - - /* Find power-management capability. */ - pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); - if (pm_cap == 0) { - dev_err(&pdev->dev, - "Cannot find Power Management capability, aborting\n"); - err = -EIO; - goto err_out_free_res; - } - - err = pci_set_power_state(pdev, PCI_D0); - if (err) { - dev_err(&pdev->dev, "Transition to D0 failed, aborting\n"); - goto err_out_free_res; - } - - dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); - if (!dev) { - dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n"); - err = -ENOMEM; - goto err_out_power_down; - } - - SET_NETDEV_DEV(dev, &pdev->dev); - - tp = netdev_priv(dev); - tp->pdev = pdev; - tp->dev = dev; - tp->pm_cap = pm_cap; - tp->rx_mode = TG3_DEF_RX_MODE; - tp->tx_mode = TG3_DEF_TX_MODE; - - if (tg3_debug > 0) - tp->msg_enable = tg3_debug; - else - tp->msg_enable = TG3_DEF_MSG_ENABLE; - - /* The word/byte swap controls here control register access byte - * swapping. DMA data byte swapping is controlled in the GRC_MODE - * setting below. - */ - tp->misc_host_ctrl = - MISC_HOST_CTRL_MASK_PCI_INT | - MISC_HOST_CTRL_WORD_SWAP | - MISC_HOST_CTRL_INDIR_ACCESS | - MISC_HOST_CTRL_PCISTATE_RW; - - /* The NONFRM (non-frame) byte/word swap controls take effect - * on descriptor entries, anything which isn't packet data. - * - * The StrongARM chips on the board (one for tx, one for rx) - * are running in big-endian mode. - */ - tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA | - GRC_MODE_WSWAP_NONFRM_DATA); -#ifdef __BIG_ENDIAN - tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; -#endif - spin_lock_init(&tp->lock); - spin_lock_init(&tp->indirect_lock); - INIT_WORK(&tp->reset_task, tg3_reset_task); - - tp->regs = pci_ioremap_bar(pdev, BAR_0); - if (!tp->regs) { - dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); - err = -ENOMEM; - goto err_out_free_dev; - } - - if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || - tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) { - tg3_flag_set(tp, ENABLE_APE); - tp->aperegs = pci_ioremap_bar(pdev, BAR_2); - if (!tp->aperegs) { - dev_err(&pdev->dev, - "Cannot map APE registers, aborting\n"); - err = -ENOMEM; - goto err_out_iounmap; - } - } - - tp->rx_pending = TG3_DEF_RX_RING_PENDING; - tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; - - dev->ethtool_ops = &tg3_ethtool_ops; - dev->watchdog_timeo = TG3_TX_TIMEOUT; - dev->netdev_ops = &tg3_netdev_ops; - dev->irq = pdev->irq; - - err = tg3_get_invariants(tp); - if (err) { - dev_err(&pdev->dev, - "Problem fetching invariants of chip, aborting\n"); - goto err_out_apeunmap; - } - - /* The EPB bridge inside 5714, 5715, and 5780 and any - * device behind the EPB cannot support DMA addresses > 40-bit. - * On 64-bit systems with IOMMU, use 40-bit dma_mask. - * On 64-bit systems without IOMMU, use 64-bit dma_mask and - * do DMA address check in tg3_start_xmit(). - */ - if (tg3_flag(tp, IS_5788)) - persist_dma_mask = dma_mask = DMA_BIT_MASK(32); - else if (tg3_flag(tp, 40BIT_DMA_BUG)) { - persist_dma_mask = dma_mask = DMA_BIT_MASK(40); -#ifdef CONFIG_HIGHMEM - dma_mask = DMA_BIT_MASK(64); -#endif - } else - persist_dma_mask = dma_mask = DMA_BIT_MASK(64); - - /* Configure DMA attributes. */ - if (dma_mask > DMA_BIT_MASK(32)) { - err = pci_set_dma_mask(pdev, dma_mask); - if (!err) { - features |= NETIF_F_HIGHDMA; - err = pci_set_consistent_dma_mask(pdev, - persist_dma_mask); - if (err < 0) { - dev_err(&pdev->dev, "Unable to obtain 64 bit " - "DMA for consistent allocations\n"); - goto err_out_apeunmap; - } - } - } - if (err || dma_mask == DMA_BIT_MASK(32)) { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, - "No usable DMA configuration, aborting\n"); - goto err_out_apeunmap; - } - } - - tg3_init_bufmgr_config(tp); - - features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; - - /* 5700 B0 chips do not support checksumming correctly due - * to hardware bugs. - */ - if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) { - features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; - - if (tg3_flag(tp, 5755_PLUS)) - features |= NETIF_F_IPV6_CSUM; - } - - /* TSO is on by default on chips that support hardware TSO. - * Firmware TSO on older chips gives lower performance, so it - * is off by default, but can be enabled using ethtool. - */ - if ((tg3_flag(tp, HW_TSO_1) || - tg3_flag(tp, HW_TSO_2) || - tg3_flag(tp, HW_TSO_3)) && - (features & NETIF_F_IP_CSUM)) - features |= NETIF_F_TSO; - if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) { - if (features & NETIF_F_IPV6_CSUM) - features |= NETIF_F_TSO6; - if (tg3_flag(tp, HW_TSO_3) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && - GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) - features |= NETIF_F_TSO_ECN; - } - - dev->features |= features; - dev->vlan_features |= features; - - /* - * Add loopback capability only for a subset of devices that support - * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY - * loopback for the remaining devices. - */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 && - !tg3_flag(tp, CPMU_PRESENT)) - /* Add the loopback capability */ - features |= NETIF_F_LOOPBACK; - - dev->hw_features |= features; - - if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && - !tg3_flag(tp, TSO_CAPABLE) && - !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { - tg3_flag_set(tp, MAX_RXPEND_64); - tp->rx_pending = 63; - } - - err = tg3_get_device_address(tp); - if (err) { - dev_err(&pdev->dev, - "Could not obtain valid ethernet address, aborting\n"); - goto err_out_apeunmap; - } - - /* - * Reset chip in case UNDI or EFI driver did not shutdown - * DMA self test will enable WDMAC and we'll see (spurious) - * pending DMA on the PCI bus at that point. - */ - if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || - (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { - tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); - tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); - } - - err = tg3_test_dma(tp); - if (err) { - dev_err(&pdev->dev, "DMA engine test failed, aborting\n"); - goto err_out_apeunmap; - } - - intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; - rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; - sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; - for (i = 0; i < tp->irq_max; i++) { - struct tg3_napi *tnapi = &tp->napi[i]; - - tnapi->tp = tp; - tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; - - tnapi->int_mbox = intmbx; - if (i < 4) - intmbx += 0x8; - else - intmbx += 0x4; - - tnapi->consmbox = rcvmbx; - tnapi->prodmbox = sndmbx; - - if (i) - tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); - else - tnapi->coal_now = HOSTCC_MODE_NOW; - - if (!tg3_flag(tp, SUPPORT_MSIX)) - break; - - /* - * If we support MSIX, we'll be using RSS. If we're using - * RSS, the first vector only handles link interrupts and the - * remaining vectors handle rx and tx interrupts. Reuse the - * mailbox values for the next iteration. The values we setup - * above are still useful for the single vectored mode. - */ - if (!i) - continue; - - rcvmbx += 0x8; - - if (sndmbx & 0x4) - sndmbx -= 0x4; - else - sndmbx += 0xc; - } - - tg3_init_coal(tp); - - pci_set_drvdata(pdev, dev); - - if (tg3_flag(tp, 5717_PLUS)) { - /* Resume a low-power mode */ - tg3_frob_aux_power(tp, false); - } - - err = register_netdev(dev); - if (err) { - dev_err(&pdev->dev, "Cannot register net device, aborting\n"); - goto err_out_apeunmap; - } - - netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n", - tp->board_part_number, - tp->pci_chip_rev_id, - tg3_bus_string(tp, str), - dev->dev_addr); - - if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { - struct phy_device *phydev; - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; - netdev_info(dev, - "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", - phydev->drv->name, dev_name(&phydev->dev)); - } else { - char *ethtype; - - if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) - ethtype = "10/100Base-TX"; - else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) - ethtype = "1000Base-SX"; - else - ethtype = "10/100/1000Base-T"; - - netdev_info(dev, "attached PHY is %s (%s Ethernet) " - "(WireSpeed[%d], EEE[%d])\n", - tg3_phy_string(tp), ethtype, - (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0, - (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0); - } - - netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", - (dev->features & NETIF_F_RXCSUM) != 0, - tg3_flag(tp, USE_LINKCHG_REG) != 0, - (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0, - tg3_flag(tp, ENABLE_ASF) != 0, - tg3_flag(tp, TSO_CAPABLE) != 0); - netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n", - tp->dma_rwctrl, - pdev->dma_mask == DMA_BIT_MASK(32) ? 32 : - ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64); - - pci_save_state(pdev); - - return 0; - -err_out_apeunmap: - if (tp->aperegs) { - iounmap(tp->aperegs); - tp->aperegs = NULL; - } - -err_out_iounmap: - if (tp->regs) { - iounmap(tp->regs); - tp->regs = NULL; - } - -err_out_free_dev: - free_netdev(dev); - -err_out_power_down: - pci_set_power_state(pdev, PCI_D3hot); - -err_out_free_res: - pci_release_regions(pdev); - -err_out_disable_pdev: - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - return err; -} - -static void __devexit tg3_remove_one(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - - if (dev) { - struct tg3 *tp = netdev_priv(dev); - - if (tp->fw) - release_firmware(tp->fw); - - cancel_work_sync(&tp->reset_task); - - if (!tg3_flag(tp, USE_PHYLIB)) { - tg3_phy_fini(tp); - tg3_mdio_fini(tp); - } - - unregister_netdev(dev); - if (tp->aperegs) { - iounmap(tp->aperegs); - tp->aperegs = NULL; - } - if (tp->regs) { - iounmap(tp->regs); - tp->regs = NULL; - } - free_netdev(dev); - pci_release_regions(pdev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - } -} - -#ifdef CONFIG_PM_SLEEP -static int tg3_suspend(struct device *device) -{ - struct pci_dev *pdev = to_pci_dev(device); - struct net_device *dev = pci_get_drvdata(pdev); - struct tg3 *tp = netdev_priv(dev); - int err; - - if (!netif_running(dev)) - return 0; - - flush_work_sync(&tp->reset_task); - tg3_phy_stop(tp); - tg3_netif_stop(tp); - - del_timer_sync(&tp->timer); - - tg3_full_lock(tp, 1); - tg3_disable_ints(tp); - tg3_full_unlock(tp); - - netif_device_detach(dev); - - tg3_full_lock(tp, 0); - tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); - tg3_flag_clear(tp, INIT_COMPLETE); - tg3_full_unlock(tp); - - err = tg3_power_down_prepare(tp); - if (err) { - int err2; - - tg3_full_lock(tp, 0); - - tg3_flag_set(tp, INIT_COMPLETE); - err2 = tg3_restart_hw(tp, 1); - if (err2) - goto out; - - tp->timer.expires = jiffies + tp->timer_offset; - add_timer(&tp->timer); - - netif_device_attach(dev); - tg3_netif_start(tp); - -out: - tg3_full_unlock(tp); - - if (!err2) - tg3_phy_start(tp); - } - - return err; -} - -static int tg3_resume(struct device *device) -{ - struct pci_dev *pdev = to_pci_dev(device); - struct net_device *dev = pci_get_drvdata(pdev); - struct tg3 *tp = netdev_priv(dev); - int err; - - if (!netif_running(dev)) - return 0; - - netif_device_attach(dev); - - tg3_full_lock(tp, 0); - - tg3_flag_set(tp, INIT_COMPLETE); - err = tg3_restart_hw(tp, 1); - if (err) - goto out; - - tp->timer.expires = jiffies + tp->timer_offset; - add_timer(&tp->timer); - - tg3_netif_start(tp); - -out: - tg3_full_unlock(tp); - - if (!err) - tg3_phy_start(tp); - - return err; -} - -static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume); -#define TG3_PM_OPS (&tg3_pm_ops) - -#else - -#define TG3_PM_OPS NULL - -#endif /* CONFIG_PM_SLEEP */ - -/** - * tg3_io_error_detected - called when PCI error is detected - * @pdev: Pointer to PCI device - * @state: The current pci connection state - * - * This function is called after a PCI bus error affecting - * this device has been detected. - */ -static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct tg3 *tp = netdev_priv(netdev); - pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET; - - netdev_info(netdev, "PCI I/O error detected\n"); - - rtnl_lock(); - - if (!netif_running(netdev)) - goto done; - - tg3_phy_stop(tp); - - tg3_netif_stop(tp); - - del_timer_sync(&tp->timer); - tg3_flag_clear(tp, RESTART_TIMER); - - /* Want to make sure that the reset task doesn't run */ - cancel_work_sync(&tp->reset_task); - tg3_flag_clear(tp, TX_RECOVERY_PENDING); - tg3_flag_clear(tp, RESTART_TIMER); - - netif_device_detach(netdev); - - /* Clean up software state, even if MMIO is blocked */ - tg3_full_lock(tp, 0); - tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); - tg3_full_unlock(tp); - -done: - if (state == pci_channel_io_perm_failure) - err = PCI_ERS_RESULT_DISCONNECT; - else - pci_disable_device(pdev); - - rtnl_unlock(); - - return err; -} - -/** - * tg3_io_slot_reset - called after the pci bus has been reset. - * @pdev: Pointer to PCI device - * - * Restart the card from scratch, as if from a cold-boot. - * At this point, the card has exprienced a hard reset, - * followed by fixups by BIOS, and has its config space - * set up identically to what it was at cold boot. - */ -static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct tg3 *tp = netdev_priv(netdev); - pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; - int err; - - rtnl_lock(); - - if (pci_enable_device(pdev)) { - netdev_err(netdev, "Cannot re-enable PCI device after reset.\n"); - goto done; - } - - pci_set_master(pdev); - pci_restore_state(pdev); - pci_save_state(pdev); - - if (!netif_running(netdev)) { - rc = PCI_ERS_RESULT_RECOVERED; - goto done; - } - - err = tg3_power_up(tp); - if (err) - goto done; - - rc = PCI_ERS_RESULT_RECOVERED; - -done: - rtnl_unlock(); - - return rc; -} - -/** - * tg3_io_resume - called when traffic can start flowing again. - * @pdev: Pointer to PCI device - * - * This callback is called when the error recovery driver tells - * us that its OK to resume normal operation. - */ -static void tg3_io_resume(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct tg3 *tp = netdev_priv(netdev); - int err; - - rtnl_lock(); - - if (!netif_running(netdev)) - goto done; - - tg3_full_lock(tp, 0); - tg3_flag_set(tp, INIT_COMPLETE); - err = tg3_restart_hw(tp, 1); - tg3_full_unlock(tp); - if (err) { - netdev_err(netdev, "Cannot restart hardware after reset.\n"); - goto done; - } - - netif_device_attach(netdev); - - tp->timer.expires = jiffies + tp->timer_offset; - add_timer(&tp->timer); - - tg3_netif_start(tp); - - tg3_phy_start(tp); - -done: - rtnl_unlock(); -} - -static struct pci_error_handlers tg3_err_handler = { - .error_detected = tg3_io_error_detected, - .slot_reset = tg3_io_slot_reset, - .resume = tg3_io_resume -}; - -static struct pci_driver tg3_driver = { - .name = DRV_MODULE_NAME, - .id_table = tg3_pci_tbl, - .probe = tg3_init_one, - .remove = __devexit_p(tg3_remove_one), - .err_handler = &tg3_err_handler, - .driver.pm = TG3_PM_OPS, -}; - -static int __init tg3_init(void) -{ - return pci_register_driver(&tg3_driver); -} - -static void __exit tg3_cleanup(void) -{ - pci_unregister_driver(&tg3_driver); -} - -module_init(tg3_init); -module_exit(tg3_cleanup); diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h deleted file mode 100644 index 2ea456d..0000000 --- a/drivers/net/tg3.h +++ /dev/null @@ -1,3183 +0,0 @@ -/* $Id: tg3.h,v 1.37.2.32 2002/03/11 12:18:18 davem Exp $ - * tg3.h: Definitions for Broadcom Tigon3 ethernet driver. - * - * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) - * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com) - * Copyright (C) 2004 Sun Microsystems Inc. - * Copyright (C) 2007-2011 Broadcom Corporation. - */ - -#ifndef _T3_H -#define _T3_H - -#define TG3_64BIT_REG_HIGH 0x00UL -#define TG3_64BIT_REG_LOW 0x04UL - -/* Descriptor block info. */ -#define TG3_BDINFO_HOST_ADDR 0x0UL /* 64-bit */ -#define TG3_BDINFO_MAXLEN_FLAGS 0x8UL /* 32-bit */ -#define BDINFO_FLAGS_USE_EXT_RECV 0x00000001 /* ext rx_buffer_desc */ -#define BDINFO_FLAGS_DISABLED 0x00000002 -#define BDINFO_FLAGS_MAXLEN_MASK 0xffff0000 -#define BDINFO_FLAGS_MAXLEN_SHIFT 16 -#define TG3_BDINFO_NIC_ADDR 0xcUL /* 32-bit */ -#define TG3_BDINFO_SIZE 0x10UL - -#define TG3_RX_STD_MAX_SIZE_5700 512 -#define TG3_RX_STD_MAX_SIZE_5717 2048 -#define TG3_RX_JMB_MAX_SIZE_5700 256 -#define TG3_RX_JMB_MAX_SIZE_5717 1024 -#define TG3_RX_RET_MAX_SIZE_5700 1024 -#define TG3_RX_RET_MAX_SIZE_5705 512 -#define TG3_RX_RET_MAX_SIZE_5717 4096 - -/* First 256 bytes are a mirror of PCI config space. */ -#define TG3PCI_VENDOR 0x00000000 -#define TG3PCI_VENDOR_BROADCOM 0x14e4 -#define TG3PCI_DEVICE 0x00000002 -#define TG3PCI_DEVICE_TIGON3_1 0x1644 /* BCM5700 */ -#define TG3PCI_DEVICE_TIGON3_2 0x1645 /* BCM5701 */ -#define TG3PCI_DEVICE_TIGON3_3 0x1646 /* BCM5702 */ -#define TG3PCI_DEVICE_TIGON3_4 0x1647 /* BCM5703 */ -#define TG3PCI_DEVICE_TIGON3_5761S 0x1688 -#define TG3PCI_DEVICE_TIGON3_5761SE 0x1689 -#define TG3PCI_DEVICE_TIGON3_57780 0x1692 -#define TG3PCI_DEVICE_TIGON3_57760 0x1690 -#define TG3PCI_DEVICE_TIGON3_57790 0x1694 -#define TG3PCI_DEVICE_TIGON3_57788 0x1691 -#define TG3PCI_DEVICE_TIGON3_5785_G 0x1699 /* GPHY */ -#define TG3PCI_DEVICE_TIGON3_5785_F 0x16a0 /* 10/100 only */ -#define TG3PCI_DEVICE_TIGON3_5717 0x1655 -#define TG3PCI_DEVICE_TIGON3_5718 0x1656 -#define TG3PCI_DEVICE_TIGON3_57781 0x16b1 -#define TG3PCI_DEVICE_TIGON3_57785 0x16b5 -#define TG3PCI_DEVICE_TIGON3_57761 0x16b0 -#define TG3PCI_DEVICE_TIGON3_57765 0x16b4 -#define TG3PCI_DEVICE_TIGON3_57791 0x16b2 -#define TG3PCI_DEVICE_TIGON3_57795 0x16b6 -#define TG3PCI_DEVICE_TIGON3_5719 0x1657 -#define TG3PCI_DEVICE_TIGON3_5720 0x165f -/* 0x04 --> 0x2c unused */ -#define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM -#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644 -#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5 0x0001 -#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6 0x0002 -#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9 0x0003 -#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1 0x0005 -#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8 0x0006 -#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7 0x0007 -#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10 0x0008 -#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12 0x8008 -#define TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1 0x0009 -#define TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2 0x8009 -#define TG3PCI_SUBVENDOR_ID_3COM PCI_VENDOR_ID_3COM -#define TG3PCI_SUBDEVICE_ID_3COM_3C996T 0x1000 -#define TG3PCI_SUBDEVICE_ID_3COM_3C996BT 0x1006 -#define TG3PCI_SUBDEVICE_ID_3COM_3C996SX 0x1004 -#define TG3PCI_SUBDEVICE_ID_3COM_3C1000T 0x1007 -#define TG3PCI_SUBDEVICE_ID_3COM_3C940BR01 0x1008 -#define TG3PCI_SUBVENDOR_ID_DELL PCI_VENDOR_ID_DELL -#define TG3PCI_SUBDEVICE_ID_DELL_VIPER 0x00d1 -#define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR 0x0106 -#define TG3PCI_SUBDEVICE_ID_DELL_MERLOT 0x0109 -#define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT 0x010a -#define TG3PCI_SUBVENDOR_ID_COMPAQ PCI_VENDOR_ID_COMPAQ -#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE 0x007c -#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2 0x009a -#define TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING 0x007d -#define TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780 0x0085 -#define TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2 0x0099 -#define TG3PCI_SUBVENDOR_ID_IBM PCI_VENDOR_ID_IBM -#define TG3PCI_SUBDEVICE_ID_IBM_5703SAX2 0x0281 -/* 0x30 --> 0x64 unused */ -#define TG3PCI_MSI_DATA 0x00000064 -/* 0x66 --> 0x68 unused */ -#define TG3PCI_MISC_HOST_CTRL 0x00000068 -#define MISC_HOST_CTRL_CLEAR_INT 0x00000001 -#define MISC_HOST_CTRL_MASK_PCI_INT 0x00000002 -#define MISC_HOST_CTRL_BYTE_SWAP 0x00000004 -#define MISC_HOST_CTRL_WORD_SWAP 0x00000008 -#define MISC_HOST_CTRL_PCISTATE_RW 0x00000010 -#define MISC_HOST_CTRL_CLKREG_RW 0x00000020 -#define MISC_HOST_CTRL_REGWORD_SWAP 0x00000040 -#define MISC_HOST_CTRL_INDIR_ACCESS 0x00000080 -#define MISC_HOST_CTRL_IRQ_MASK_MODE 0x00000100 -#define MISC_HOST_CTRL_TAGGED_STATUS 0x00000200 -#define MISC_HOST_CTRL_CHIPREV 0xffff0000 -#define MISC_HOST_CTRL_CHIPREV_SHIFT 16 -#define GET_CHIP_REV_ID(MISC_HOST_CTRL) \ - (((MISC_HOST_CTRL) & MISC_HOST_CTRL_CHIPREV) >> \ - MISC_HOST_CTRL_CHIPREV_SHIFT) -#define CHIPREV_ID_5700_A0 0x7000 -#define CHIPREV_ID_5700_A1 0x7001 -#define CHIPREV_ID_5700_B0 0x7100 -#define CHIPREV_ID_5700_B1 0x7101 -#define CHIPREV_ID_5700_B3 0x7102 -#define CHIPREV_ID_5700_ALTIMA 0x7104 -#define CHIPREV_ID_5700_C0 0x7200 -#define CHIPREV_ID_5701_A0 0x0000 -#define CHIPREV_ID_5701_B0 0x0100 -#define CHIPREV_ID_5701_B2 0x0102 -#define CHIPREV_ID_5701_B5 0x0105 -#define CHIPREV_ID_5703_A0 0x1000 -#define CHIPREV_ID_5703_A1 0x1001 -#define CHIPREV_ID_5703_A2 0x1002 -#define CHIPREV_ID_5703_A3 0x1003 -#define CHIPREV_ID_5704_A0 0x2000 -#define CHIPREV_ID_5704_A1 0x2001 -#define CHIPREV_ID_5704_A2 0x2002 -#define CHIPREV_ID_5704_A3 0x2003 -#define CHIPREV_ID_5705_A0 0x3000 -#define CHIPREV_ID_5705_A1 0x3001 -#define CHIPREV_ID_5705_A2 0x3002 -#define CHIPREV_ID_5705_A3 0x3003 -#define CHIPREV_ID_5750_A0 0x4000 -#define CHIPREV_ID_5750_A1 0x4001 -#define CHIPREV_ID_5750_A3 0x4003 -#define CHIPREV_ID_5750_C2 0x4202 -#define CHIPREV_ID_5752_A0_HW 0x5000 -#define CHIPREV_ID_5752_A0 0x6000 -#define CHIPREV_ID_5752_A1 0x6001 -#define CHIPREV_ID_5714_A2 0x9002 -#define CHIPREV_ID_5906_A1 0xc001 -#define CHIPREV_ID_57780_A0 0x57780000 -#define CHIPREV_ID_57780_A1 0x57780001 -#define CHIPREV_ID_5717_A0 0x05717000 -#define CHIPREV_ID_57765_A0 0x57785000 -#define CHIPREV_ID_5719_A0 0x05719000 -#define CHIPREV_ID_5720_A0 0x05720000 -#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12) -#define ASIC_REV_5700 0x07 -#define ASIC_REV_5701 0x00 -#define ASIC_REV_5703 0x01 -#define ASIC_REV_5704 0x02 -#define ASIC_REV_5705 0x03 -#define ASIC_REV_5750 0x04 -#define ASIC_REV_5752 0x06 -#define ASIC_REV_5780 0x08 -#define ASIC_REV_5714 0x09 -#define ASIC_REV_5755 0x0a -#define ASIC_REV_5787 0x0b -#define ASIC_REV_5906 0x0c -#define ASIC_REV_USE_PROD_ID_REG 0x0f -#define ASIC_REV_5784 0x5784 -#define ASIC_REV_5761 0x5761 -#define ASIC_REV_5785 0x5785 -#define ASIC_REV_57780 0x57780 -#define ASIC_REV_5717 0x5717 -#define ASIC_REV_57765 0x57785 -#define ASIC_REV_5719 0x5719 -#define ASIC_REV_5720 0x5720 -#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8) -#define CHIPREV_5700_AX 0x70 -#define CHIPREV_5700_BX 0x71 -#define CHIPREV_5700_CX 0x72 -#define CHIPREV_5701_AX 0x00 -#define CHIPREV_5703_AX 0x10 -#define CHIPREV_5704_AX 0x20 -#define CHIPREV_5704_BX 0x21 -#define CHIPREV_5750_AX 0x40 -#define CHIPREV_5750_BX 0x41 -#define CHIPREV_5784_AX 0x57840 -#define CHIPREV_5761_AX 0x57610 -#define CHIPREV_57765_AX 0x577650 -#define GET_METAL_REV(CHIP_REV_ID) ((CHIP_REV_ID) & 0xff) -#define METAL_REV_A0 0x00 -#define METAL_REV_A1 0x01 -#define METAL_REV_B0 0x00 -#define METAL_REV_B1 0x01 -#define METAL_REV_B2 0x02 -#define TG3PCI_DMA_RW_CTRL 0x0000006c -#define DMA_RWCTRL_DIS_CACHE_ALIGNMENT 0x00000001 -#define DMA_RWCTRL_TAGGED_STAT_WA 0x00000080 -#define DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK 0x00000380 -#define DMA_RWCTRL_READ_BNDRY_MASK 0x00000700 -#define DMA_RWCTRL_READ_BNDRY_DISAB 0x00000000 -#define DMA_RWCTRL_READ_BNDRY_16 0x00000100 -#define DMA_RWCTRL_READ_BNDRY_128_PCIX 0x00000100 -#define DMA_RWCTRL_READ_BNDRY_32 0x00000200 -#define DMA_RWCTRL_READ_BNDRY_256_PCIX 0x00000200 -#define DMA_RWCTRL_READ_BNDRY_64 0x00000300 -#define DMA_RWCTRL_READ_BNDRY_384_PCIX 0x00000300 -#define DMA_RWCTRL_READ_BNDRY_128 0x00000400 -#define DMA_RWCTRL_READ_BNDRY_256 0x00000500 -#define DMA_RWCTRL_READ_BNDRY_512 0x00000600 -#define DMA_RWCTRL_READ_BNDRY_1024 0x00000700 -#define DMA_RWCTRL_WRITE_BNDRY_MASK 0x00003800 -#define DMA_RWCTRL_WRITE_BNDRY_DISAB 0x00000000 -#define DMA_RWCTRL_WRITE_BNDRY_16 0x00000800 -#define DMA_RWCTRL_WRITE_BNDRY_128_PCIX 0x00000800 -#define DMA_RWCTRL_WRITE_BNDRY_32 0x00001000 -#define DMA_RWCTRL_WRITE_BNDRY_256_PCIX 0x00001000 -#define DMA_RWCTRL_WRITE_BNDRY_64 0x00001800 -#define DMA_RWCTRL_WRITE_BNDRY_384_PCIX 0x00001800 -#define DMA_RWCTRL_WRITE_BNDRY_128 0x00002000 -#define DMA_RWCTRL_WRITE_BNDRY_256 0x00002800 -#define DMA_RWCTRL_WRITE_BNDRY_512 0x00003000 -#define DMA_RWCTRL_WRITE_BNDRY_1024 0x00003800 -#define DMA_RWCTRL_ONE_DMA 0x00004000 -#define DMA_RWCTRL_READ_WATER 0x00070000 -#define DMA_RWCTRL_READ_WATER_SHIFT 16 -#define DMA_RWCTRL_WRITE_WATER 0x00380000 -#define DMA_RWCTRL_WRITE_WATER_SHIFT 19 -#define DMA_RWCTRL_USE_MEM_READ_MULT 0x00400000 -#define DMA_RWCTRL_ASSERT_ALL_BE 0x00800000 -#define DMA_RWCTRL_PCI_READ_CMD 0x0f000000 -#define DMA_RWCTRL_PCI_READ_CMD_SHIFT 24 -#define DMA_RWCTRL_PCI_WRITE_CMD 0xf0000000 -#define DMA_RWCTRL_PCI_WRITE_CMD_SHIFT 28 -#define DMA_RWCTRL_WRITE_BNDRY_64_PCIE 0x10000000 -#define DMA_RWCTRL_WRITE_BNDRY_128_PCIE 0x30000000 -#define DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE 0x70000000 -#define TG3PCI_PCISTATE 0x00000070 -#define PCISTATE_FORCE_RESET 0x00000001 -#define PCISTATE_INT_NOT_ACTIVE 0x00000002 -#define PCISTATE_CONV_PCI_MODE 0x00000004 -#define PCISTATE_BUS_SPEED_HIGH 0x00000008 -#define PCISTATE_BUS_32BIT 0x00000010 -#define PCISTATE_ROM_ENABLE 0x00000020 -#define PCISTATE_ROM_RETRY_ENABLE 0x00000040 -#define PCISTATE_FLAT_VIEW 0x00000100 -#define PCISTATE_RETRY_SAME_DMA 0x00002000 -#define PCISTATE_ALLOW_APE_CTLSPC_WR 0x00010000 -#define PCISTATE_ALLOW_APE_SHMEM_WR 0x00020000 -#define PCISTATE_ALLOW_APE_PSPACE_WR 0x00040000 -#define TG3PCI_CLOCK_CTRL 0x00000074 -#define CLOCK_CTRL_CORECLK_DISABLE 0x00000200 -#define CLOCK_CTRL_RXCLK_DISABLE 0x00000400 -#define CLOCK_CTRL_TXCLK_DISABLE 0x00000800 -#define CLOCK_CTRL_ALTCLK 0x00001000 -#define CLOCK_CTRL_PWRDOWN_PLL133 0x00008000 -#define CLOCK_CTRL_44MHZ_CORE 0x00040000 -#define CLOCK_CTRL_625_CORE 0x00100000 -#define CLOCK_CTRL_FORCE_CLKRUN 0x00200000 -#define CLOCK_CTRL_CLKRUN_OENABLE 0x00400000 -#define CLOCK_CTRL_DELAY_PCI_GRANT 0x80000000 -#define TG3PCI_REG_BASE_ADDR 0x00000078 -#define TG3PCI_MEM_WIN_BASE_ADDR 0x0000007c -#define TG3PCI_REG_DATA 0x00000080 -#define TG3PCI_MEM_WIN_DATA 0x00000084 -#define TG3PCI_MISC_LOCAL_CTRL 0x00000090 -/* 0x94 --> 0x98 unused */ -#define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */ -#define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */ -/* 0xa8 --> 0xb8 unused */ -#define TG3PCI_DUAL_MAC_CTRL 0x000000b8 -#define DUAL_MAC_CTRL_CH_MASK 0x00000003 -#define DUAL_MAC_CTRL_ID 0x00000004 -#define TG3PCI_PRODID_ASICREV 0x000000bc -#define PROD_ID_ASIC_REV_MASK 0x0fffffff -/* 0xc0 --> 0xf4 unused */ - -#define TG3PCI_GEN2_PRODID_ASICREV 0x000000f4 -#define TG3PCI_GEN15_PRODID_ASICREV 0x000000fc -/* 0xf8 --> 0x200 unused */ - -#define TG3_CORR_ERR_STAT 0x00000110 -#define TG3_CORR_ERR_STAT_CLEAR 0xffffffff -/* 0x114 --> 0x200 unused */ - -/* Mailbox registers */ -#define MAILBOX_INTERRUPT_0 0x00000200 /* 64-bit */ -#define MAILBOX_INTERRUPT_1 0x00000208 /* 64-bit */ -#define MAILBOX_INTERRUPT_2 0x00000210 /* 64-bit */ -#define MAILBOX_INTERRUPT_3 0x00000218 /* 64-bit */ -#define MAILBOX_GENERAL_0 0x00000220 /* 64-bit */ -#define MAILBOX_GENERAL_1 0x00000228 /* 64-bit */ -#define MAILBOX_GENERAL_2 0x00000230 /* 64-bit */ -#define MAILBOX_GENERAL_3 0x00000238 /* 64-bit */ -#define MAILBOX_GENERAL_4 0x00000240 /* 64-bit */ -#define MAILBOX_GENERAL_5 0x00000248 /* 64-bit */ -#define MAILBOX_GENERAL_6 0x00000250 /* 64-bit */ -#define MAILBOX_GENERAL_7 0x00000258 /* 64-bit */ -#define MAILBOX_RELOAD_STAT 0x00000260 /* 64-bit */ -#define MAILBOX_RCV_STD_PROD_IDX 0x00000268 /* 64-bit */ -#define TG3_RX_STD_PROD_IDX_REG (MAILBOX_RCV_STD_PROD_IDX + \ - TG3_64BIT_REG_LOW) -#define MAILBOX_RCV_JUMBO_PROD_IDX 0x00000270 /* 64-bit */ -#define TG3_RX_JMB_PROD_IDX_REG (MAILBOX_RCV_JUMBO_PROD_IDX + \ - TG3_64BIT_REG_LOW) -#define MAILBOX_RCV_MINI_PROD_IDX 0x00000278 /* 64-bit */ -#define MAILBOX_RCVRET_CON_IDX_0 0x00000280 /* 64-bit */ -#define MAILBOX_RCVRET_CON_IDX_1 0x00000288 /* 64-bit */ -#define MAILBOX_RCVRET_CON_IDX_2 0x00000290 /* 64-bit */ -#define MAILBOX_RCVRET_CON_IDX_3 0x00000298 /* 64-bit */ -#define MAILBOX_RCVRET_CON_IDX_4 0x000002a0 /* 64-bit */ -#define MAILBOX_RCVRET_CON_IDX_5 0x000002a8 /* 64-bit */ -#define MAILBOX_RCVRET_CON_IDX_6 0x000002b0 /* 64-bit */ -#define MAILBOX_RCVRET_CON_IDX_7 0x000002b8 /* 64-bit */ -#define MAILBOX_RCVRET_CON_IDX_8 0x000002c0 /* 64-bit */ -#define MAILBOX_RCVRET_CON_IDX_9 0x000002c8 /* 64-bit */ -#define MAILBOX_RCVRET_CON_IDX_10 0x000002d0 /* 64-bit */ -#define MAILBOX_RCVRET_CON_IDX_11 0x000002d8 /* 64-bit */ -#define MAILBOX_RCVRET_CON_IDX_12 0x000002e0 /* 64-bit */ -#define MAILBOX_RCVRET_CON_IDX_13 0x000002e8 /* 64-bit */ -#define MAILBOX_RCVRET_CON_IDX_14 0x000002f0 /* 64-bit */ -#define MAILBOX_RCVRET_CON_IDX_15 0x000002f8 /* 64-bit */ -#define MAILBOX_SNDHOST_PROD_IDX_0 0x00000300 /* 64-bit */ -#define MAILBOX_SNDHOST_PROD_IDX_1 0x00000308 /* 64-bit */ -#define MAILBOX_SNDHOST_PROD_IDX_2 0x00000310 /* 64-bit */ -#define MAILBOX_SNDHOST_PROD_IDX_3 0x00000318 /* 64-bit */ -#define MAILBOX_SNDHOST_PROD_IDX_4 0x00000320 /* 64-bit */ -#define MAILBOX_SNDHOST_PROD_IDX_5 0x00000328 /* 64-bit */ -#define MAILBOX_SNDHOST_PROD_IDX_6 0x00000330 /* 64-bit */ -#define MAILBOX_SNDHOST_PROD_IDX_7 0x00000338 /* 64-bit */ -#define MAILBOX_SNDHOST_PROD_IDX_8 0x00000340 /* 64-bit */ -#define MAILBOX_SNDHOST_PROD_IDX_9 0x00000348 /* 64-bit */ -#define MAILBOX_SNDHOST_PROD_IDX_10 0x00000350 /* 64-bit */ -#define MAILBOX_SNDHOST_PROD_IDX_11 0x00000358 /* 64-bit */ -#define MAILBOX_SNDHOST_PROD_IDX_12 0x00000360 /* 64-bit */ -#define MAILBOX_SNDHOST_PROD_IDX_13 0x00000368 /* 64-bit */ -#define MAILBOX_SNDHOST_PROD_IDX_14 0x00000370 /* 64-bit */ -#define MAILBOX_SNDHOST_PROD_IDX_15 0x00000378 /* 64-bit */ -#define MAILBOX_SNDNIC_PROD_IDX_0 0x00000380 /* 64-bit */ -#define MAILBOX_SNDNIC_PROD_IDX_1 0x00000388 /* 64-bit */ -#define MAILBOX_SNDNIC_PROD_IDX_2 0x00000390 /* 64-bit */ -#define MAILBOX_SNDNIC_PROD_IDX_3 0x00000398 /* 64-bit */ -#define MAILBOX_SNDNIC_PROD_IDX_4 0x000003a0 /* 64-bit */ -#define MAILBOX_SNDNIC_PROD_IDX_5 0x000003a8 /* 64-bit */ -#define MAILBOX_SNDNIC_PROD_IDX_6 0x000003b0 /* 64-bit */ -#define MAILBOX_SNDNIC_PROD_IDX_7 0x000003b8 /* 64-bit */ -#define MAILBOX_SNDNIC_PROD_IDX_8 0x000003c0 /* 64-bit */ -#define MAILBOX_SNDNIC_PROD_IDX_9 0x000003c8 /* 64-bit */ -#define MAILBOX_SNDNIC_PROD_IDX_10 0x000003d0 /* 64-bit */ -#define MAILBOX_SNDNIC_PROD_IDX_11 0x000003d8 /* 64-bit */ -#define MAILBOX_SNDNIC_PROD_IDX_12 0x000003e0 /* 64-bit */ -#define MAILBOX_SNDNIC_PROD_IDX_13 0x000003e8 /* 64-bit */ -#define MAILBOX_SNDNIC_PROD_IDX_14 0x000003f0 /* 64-bit */ -#define MAILBOX_SNDNIC_PROD_IDX_15 0x000003f8 /* 64-bit */ - -/* MAC control registers */ -#define MAC_MODE 0x00000400 -#define MAC_MODE_RESET 0x00000001 -#define MAC_MODE_HALF_DUPLEX 0x00000002 -#define MAC_MODE_PORT_MODE_MASK 0x0000000c -#define MAC_MODE_PORT_MODE_TBI 0x0000000c -#define MAC_MODE_PORT_MODE_GMII 0x00000008 -#define MAC_MODE_PORT_MODE_MII 0x00000004 -#define MAC_MODE_PORT_MODE_NONE 0x00000000 -#define MAC_MODE_PORT_INT_LPBACK 0x00000010 -#define MAC_MODE_TAGGED_MAC_CTRL 0x00000080 -#define MAC_MODE_TX_BURSTING 0x00000100 -#define MAC_MODE_MAX_DEFER 0x00000200 -#define MAC_MODE_LINK_POLARITY 0x00000400 -#define MAC_MODE_RXSTAT_ENABLE 0x00000800 -#define MAC_MODE_RXSTAT_CLEAR 0x00001000 -#define MAC_MODE_RXSTAT_FLUSH 0x00002000 -#define MAC_MODE_TXSTAT_ENABLE 0x00004000 -#define MAC_MODE_TXSTAT_CLEAR 0x00008000 -#define MAC_MODE_TXSTAT_FLUSH 0x00010000 -#define MAC_MODE_SEND_CONFIGS 0x00020000 -#define MAC_MODE_MAGIC_PKT_ENABLE 0x00040000 -#define MAC_MODE_ACPI_ENABLE 0x00080000 -#define MAC_MODE_MIP_ENABLE 0x00100000 -#define MAC_MODE_TDE_ENABLE 0x00200000 -#define MAC_MODE_RDE_ENABLE 0x00400000 -#define MAC_MODE_FHDE_ENABLE 0x00800000 -#define MAC_MODE_KEEP_FRAME_IN_WOL 0x01000000 -#define MAC_MODE_APE_RX_EN 0x08000000 -#define MAC_MODE_APE_TX_EN 0x10000000 -#define MAC_STATUS 0x00000404 -#define MAC_STATUS_PCS_SYNCED 0x00000001 -#define MAC_STATUS_SIGNAL_DET 0x00000002 -#define MAC_STATUS_RCVD_CFG 0x00000004 -#define MAC_STATUS_CFG_CHANGED 0x00000008 -#define MAC_STATUS_SYNC_CHANGED 0x00000010 -#define MAC_STATUS_PORT_DEC_ERR 0x00000400 -#define MAC_STATUS_LNKSTATE_CHANGED 0x00001000 -#define MAC_STATUS_MI_COMPLETION 0x00400000 -#define MAC_STATUS_MI_INTERRUPT 0x00800000 -#define MAC_STATUS_AP_ERROR 0x01000000 -#define MAC_STATUS_ODI_ERROR 0x02000000 -#define MAC_STATUS_RXSTAT_OVERRUN 0x04000000 -#define MAC_STATUS_TXSTAT_OVERRUN 0x08000000 -#define MAC_EVENT 0x00000408 -#define MAC_EVENT_PORT_DECODE_ERR 0x00000400 -#define MAC_EVENT_LNKSTATE_CHANGED 0x00001000 -#define MAC_EVENT_MI_COMPLETION 0x00400000 -#define MAC_EVENT_MI_INTERRUPT 0x00800000 -#define MAC_EVENT_AP_ERROR 0x01000000 -#define MAC_EVENT_ODI_ERROR 0x02000000 -#define MAC_EVENT_RXSTAT_OVERRUN 0x04000000 -#define MAC_EVENT_TXSTAT_OVERRUN 0x08000000 -#define MAC_LED_CTRL 0x0000040c -#define LED_CTRL_LNKLED_OVERRIDE 0x00000001 -#define LED_CTRL_1000MBPS_ON 0x00000002 -#define LED_CTRL_100MBPS_ON 0x00000004 -#define LED_CTRL_10MBPS_ON 0x00000008 -#define LED_CTRL_TRAFFIC_OVERRIDE 0x00000010 -#define LED_CTRL_TRAFFIC_BLINK 0x00000020 -#define LED_CTRL_TRAFFIC_LED 0x00000040 -#define LED_CTRL_1000MBPS_STATUS 0x00000080 -#define LED_CTRL_100MBPS_STATUS 0x00000100 -#define LED_CTRL_10MBPS_STATUS 0x00000200 -#define LED_CTRL_TRAFFIC_STATUS 0x00000400 -#define LED_CTRL_MODE_MAC 0x00000000 -#define LED_CTRL_MODE_PHY_1 0x00000800 -#define LED_CTRL_MODE_PHY_2 0x00001000 -#define LED_CTRL_MODE_SHASTA_MAC 0x00002000 -#define LED_CTRL_MODE_SHARED 0x00004000 -#define LED_CTRL_MODE_COMBO 0x00008000 -#define LED_CTRL_BLINK_RATE_MASK 0x7ff80000 -#define LED_CTRL_BLINK_RATE_SHIFT 19 -#define LED_CTRL_BLINK_PER_OVERRIDE 0x00080000 -#define LED_CTRL_BLINK_RATE_OVERRIDE 0x80000000 -#define MAC_ADDR_0_HIGH 0x00000410 /* upper 2 bytes */ -#define MAC_ADDR_0_LOW 0x00000414 /* lower 4 bytes */ -#define MAC_ADDR_1_HIGH 0x00000418 /* upper 2 bytes */ -#define MAC_ADDR_1_LOW 0x0000041c /* lower 4 bytes */ -#define MAC_ADDR_2_HIGH 0x00000420 /* upper 2 bytes */ -#define MAC_ADDR_2_LOW 0x00000424 /* lower 4 bytes */ -#define MAC_ADDR_3_HIGH 0x00000428 /* upper 2 bytes */ -#define MAC_ADDR_3_LOW 0x0000042c /* lower 4 bytes */ -#define MAC_ACPI_MBUF_PTR 0x00000430 -#define MAC_ACPI_LEN_OFFSET 0x00000434 -#define ACPI_LENOFF_LEN_MASK 0x0000ffff -#define ACPI_LENOFF_LEN_SHIFT 0 -#define ACPI_LENOFF_OFF_MASK 0x0fff0000 -#define ACPI_LENOFF_OFF_SHIFT 16 -#define MAC_TX_BACKOFF_SEED 0x00000438 -#define TX_BACKOFF_SEED_MASK 0x000003ff -#define MAC_RX_MTU_SIZE 0x0000043c -#define RX_MTU_SIZE_MASK 0x0000ffff -#define MAC_PCS_TEST 0x00000440 -#define PCS_TEST_PATTERN_MASK 0x000fffff -#define PCS_TEST_PATTERN_SHIFT 0 -#define PCS_TEST_ENABLE 0x00100000 -#define MAC_TX_AUTO_NEG 0x00000444 -#define TX_AUTO_NEG_MASK 0x0000ffff -#define TX_AUTO_NEG_SHIFT 0 -#define MAC_RX_AUTO_NEG 0x00000448 -#define RX_AUTO_NEG_MASK 0x0000ffff -#define RX_AUTO_NEG_SHIFT 0 -#define MAC_MI_COM 0x0000044c -#define MI_COM_CMD_MASK 0x0c000000 -#define MI_COM_CMD_WRITE 0x04000000 -#define MI_COM_CMD_READ 0x08000000 -#define MI_COM_READ_FAILED 0x10000000 -#define MI_COM_START 0x20000000 -#define MI_COM_BUSY 0x20000000 -#define MI_COM_PHY_ADDR_MASK 0x03e00000 -#define MI_COM_PHY_ADDR_SHIFT 21 -#define MI_COM_REG_ADDR_MASK 0x001f0000 -#define MI_COM_REG_ADDR_SHIFT 16 -#define MI_COM_DATA_MASK 0x0000ffff -#define MAC_MI_STAT 0x00000450 -#define MAC_MI_STAT_LNKSTAT_ATTN_ENAB 0x00000001 -#define MAC_MI_STAT_10MBPS_MODE 0x00000002 -#define MAC_MI_MODE 0x00000454 -#define MAC_MI_MODE_CLK_10MHZ 0x00000001 -#define MAC_MI_MODE_SHORT_PREAMBLE 0x00000002 -#define MAC_MI_MODE_AUTO_POLL 0x00000010 -#define MAC_MI_MODE_500KHZ_CONST 0x00008000 -#define MAC_MI_MODE_BASE 0x000c0000 /* XXX magic values XXX */ -#define MAC_AUTO_POLL_STATUS 0x00000458 -#define MAC_AUTO_POLL_ERROR 0x00000001 -#define MAC_TX_MODE 0x0000045c -#define TX_MODE_RESET 0x00000001 -#define TX_MODE_ENABLE 0x00000002 -#define TX_MODE_FLOW_CTRL_ENABLE 0x00000010 -#define TX_MODE_BIG_BCKOFF_ENABLE 0x00000020 -#define TX_MODE_LONG_PAUSE_ENABLE 0x00000040 -#define TX_MODE_MBUF_LOCKUP_FIX 0x00000100 -#define TX_MODE_JMB_FRM_LEN 0x00400000 -#define TX_MODE_CNT_DN_MODE 0x00800000 -#define MAC_TX_STATUS 0x00000460 -#define TX_STATUS_XOFFED 0x00000001 -#define TX_STATUS_SENT_XOFF 0x00000002 -#define TX_STATUS_SENT_XON 0x00000004 -#define TX_STATUS_LINK_UP 0x00000008 -#define TX_STATUS_ODI_UNDERRUN 0x00000010 -#define TX_STATUS_ODI_OVERRUN 0x00000020 -#define MAC_TX_LENGTHS 0x00000464 -#define TX_LENGTHS_SLOT_TIME_MASK 0x000000ff -#define TX_LENGTHS_SLOT_TIME_SHIFT 0 -#define TX_LENGTHS_IPG_MASK 0x00000f00 -#define TX_LENGTHS_IPG_SHIFT 8 -#define TX_LENGTHS_IPG_CRS_MASK 0x00003000 -#define TX_LENGTHS_IPG_CRS_SHIFT 12 -#define TX_LENGTHS_JMB_FRM_LEN_MSK 0x00ff0000 -#define TX_LENGTHS_CNT_DWN_VAL_MSK 0xff000000 -#define MAC_RX_MODE 0x00000468 -#define RX_MODE_RESET 0x00000001 -#define RX_MODE_ENABLE 0x00000002 -#define RX_MODE_FLOW_CTRL_ENABLE 0x00000004 -#define RX_MODE_KEEP_MAC_CTRL 0x00000008 -#define RX_MODE_KEEP_PAUSE 0x00000010 -#define RX_MODE_ACCEPT_OVERSIZED 0x00000020 -#define RX_MODE_ACCEPT_RUNTS 0x00000040 -#define RX_MODE_LEN_CHECK 0x00000080 -#define RX_MODE_PROMISC 0x00000100 -#define RX_MODE_NO_CRC_CHECK 0x00000200 -#define RX_MODE_KEEP_VLAN_TAG 0x00000400 -#define RX_MODE_RSS_IPV4_HASH_EN 0x00010000 -#define RX_MODE_RSS_TCP_IPV4_HASH_EN 0x00020000 -#define RX_MODE_RSS_IPV6_HASH_EN 0x00040000 -#define RX_MODE_RSS_TCP_IPV6_HASH_EN 0x00080000 -#define RX_MODE_RSS_ITBL_HASH_BITS_7 0x00700000 -#define RX_MODE_RSS_ENABLE 0x00800000 -#define RX_MODE_IPV6_CSUM_ENABLE 0x01000000 -#define MAC_RX_STATUS 0x0000046c -#define RX_STATUS_REMOTE_TX_XOFFED 0x00000001 -#define RX_STATUS_XOFF_RCVD 0x00000002 -#define RX_STATUS_XON_RCVD 0x00000004 -#define MAC_HASH_REG_0 0x00000470 -#define MAC_HASH_REG_1 0x00000474 -#define MAC_HASH_REG_2 0x00000478 -#define MAC_HASH_REG_3 0x0000047c -#define MAC_RCV_RULE_0 0x00000480 -#define MAC_RCV_VALUE_0 0x00000484 -#define MAC_RCV_RULE_1 0x00000488 -#define MAC_RCV_VALUE_1 0x0000048c -#define MAC_RCV_RULE_2 0x00000490 -#define MAC_RCV_VALUE_2 0x00000494 -#define MAC_RCV_RULE_3 0x00000498 -#define MAC_RCV_VALUE_3 0x0000049c -#define MAC_RCV_RULE_4 0x000004a0 -#define MAC_RCV_VALUE_4 0x000004a4 -#define MAC_RCV_RULE_5 0x000004a8 -#define MAC_RCV_VALUE_5 0x000004ac -#define MAC_RCV_RULE_6 0x000004b0 -#define MAC_RCV_VALUE_6 0x000004b4 -#define MAC_RCV_RULE_7 0x000004b8 -#define MAC_RCV_VALUE_7 0x000004bc -#define MAC_RCV_RULE_8 0x000004c0 -#define MAC_RCV_VALUE_8 0x000004c4 -#define MAC_RCV_RULE_9 0x000004c8 -#define MAC_RCV_VALUE_9 0x000004cc -#define MAC_RCV_RULE_10 0x000004d0 -#define MAC_RCV_VALUE_10 0x000004d4 -#define MAC_RCV_RULE_11 0x000004d8 -#define MAC_RCV_VALUE_11 0x000004dc -#define MAC_RCV_RULE_12 0x000004e0 -#define MAC_RCV_VALUE_12 0x000004e4 -#define MAC_RCV_RULE_13 0x000004e8 -#define MAC_RCV_VALUE_13 0x000004ec -#define MAC_RCV_RULE_14 0x000004f0 -#define MAC_RCV_VALUE_14 0x000004f4 -#define MAC_RCV_RULE_15 0x000004f8 -#define MAC_RCV_VALUE_15 0x000004fc -#define RCV_RULE_DISABLE_MASK 0x7fffffff -#define MAC_RCV_RULE_CFG 0x00000500 -#define RCV_RULE_CFG_DEFAULT_CLASS 0x00000008 -#define MAC_LOW_WMARK_MAX_RX_FRAME 0x00000504 -/* 0x508 --> 0x520 unused */ -#define MAC_HASHREGU_0 0x00000520 -#define MAC_HASHREGU_1 0x00000524 -#define MAC_HASHREGU_2 0x00000528 -#define MAC_HASHREGU_3 0x0000052c -#define MAC_EXTADDR_0_HIGH 0x00000530 -#define MAC_EXTADDR_0_LOW 0x00000534 -#define MAC_EXTADDR_1_HIGH 0x00000538 -#define MAC_EXTADDR_1_LOW 0x0000053c -#define MAC_EXTADDR_2_HIGH 0x00000540 -#define MAC_EXTADDR_2_LOW 0x00000544 -#define MAC_EXTADDR_3_HIGH 0x00000548 -#define MAC_EXTADDR_3_LOW 0x0000054c -#define MAC_EXTADDR_4_HIGH 0x00000550 -#define MAC_EXTADDR_4_LOW 0x00000554 -#define MAC_EXTADDR_5_HIGH 0x00000558 -#define MAC_EXTADDR_5_LOW 0x0000055c -#define MAC_EXTADDR_6_HIGH 0x00000560 -#define MAC_EXTADDR_6_LOW 0x00000564 -#define MAC_EXTADDR_7_HIGH 0x00000568 -#define MAC_EXTADDR_7_LOW 0x0000056c -#define MAC_EXTADDR_8_HIGH 0x00000570 -#define MAC_EXTADDR_8_LOW 0x00000574 -#define MAC_EXTADDR_9_HIGH 0x00000578 -#define MAC_EXTADDR_9_LOW 0x0000057c -#define MAC_EXTADDR_10_HIGH 0x00000580 -#define MAC_EXTADDR_10_LOW 0x00000584 -#define MAC_EXTADDR_11_HIGH 0x00000588 -#define MAC_EXTADDR_11_LOW 0x0000058c -#define MAC_SERDES_CFG 0x00000590 -#define MAC_SERDES_CFG_EDGE_SELECT 0x00001000 -#define MAC_SERDES_STAT 0x00000594 -/* 0x598 --> 0x5a0 unused */ -#define MAC_PHYCFG1 0x000005a0 -#define MAC_PHYCFG1_RGMII_INT 0x00000001 -#define MAC_PHYCFG1_RXCLK_TO_MASK 0x00001ff0 -#define MAC_PHYCFG1_RXCLK_TIMEOUT 0x00001000 -#define MAC_PHYCFG1_TXCLK_TO_MASK 0x01ff0000 -#define MAC_PHYCFG1_TXCLK_TIMEOUT 0x01000000 -#define MAC_PHYCFG1_RGMII_EXT_RX_DEC 0x02000000 -#define MAC_PHYCFG1_RGMII_SND_STAT_EN 0x04000000 -#define MAC_PHYCFG1_TXC_DRV 0x20000000 -#define MAC_PHYCFG2 0x000005a4 -#define MAC_PHYCFG2_INBAND_ENABLE 0x00000001 -#define MAC_PHYCFG2_EMODE_MASK_MASK 0x000001c0 -#define MAC_PHYCFG2_EMODE_MASK_AC131 0x000000c0 -#define MAC_PHYCFG2_EMODE_MASK_50610 0x00000100 -#define MAC_PHYCFG2_EMODE_MASK_RT8211 0x00000000 -#define MAC_PHYCFG2_EMODE_MASK_RT8201 0x000001c0 -#define MAC_PHYCFG2_EMODE_COMP_MASK 0x00000e00 -#define MAC_PHYCFG2_EMODE_COMP_AC131 0x00000600 -#define MAC_PHYCFG2_EMODE_COMP_50610 0x00000400 -#define MAC_PHYCFG2_EMODE_COMP_RT8211 0x00000800 -#define MAC_PHYCFG2_EMODE_COMP_RT8201 0x00000000 -#define MAC_PHYCFG2_FMODE_MASK_MASK 0x00007000 -#define MAC_PHYCFG2_FMODE_MASK_AC131 0x00006000 -#define MAC_PHYCFG2_FMODE_MASK_50610 0x00004000 -#define MAC_PHYCFG2_FMODE_MASK_RT8211 0x00000000 -#define MAC_PHYCFG2_FMODE_MASK_RT8201 0x00007000 -#define MAC_PHYCFG2_FMODE_COMP_MASK 0x00038000 -#define MAC_PHYCFG2_FMODE_COMP_AC131 0x00030000 -#define MAC_PHYCFG2_FMODE_COMP_50610 0x00008000 -#define MAC_PHYCFG2_FMODE_COMP_RT8211 0x00038000 -#define MAC_PHYCFG2_FMODE_COMP_RT8201 0x00000000 -#define MAC_PHYCFG2_GMODE_MASK_MASK 0x001c0000 -#define MAC_PHYCFG2_GMODE_MASK_AC131 0x001c0000 -#define MAC_PHYCFG2_GMODE_MASK_50610 0x00100000 -#define MAC_PHYCFG2_GMODE_MASK_RT8211 0x00000000 -#define MAC_PHYCFG2_GMODE_MASK_RT8201 0x001c0000 -#define MAC_PHYCFG2_GMODE_COMP_MASK 0x00e00000 -#define MAC_PHYCFG2_GMODE_COMP_AC131 0x00e00000 -#define MAC_PHYCFG2_GMODE_COMP_50610 0x00000000 -#define MAC_PHYCFG2_GMODE_COMP_RT8211 0x00200000 -#define MAC_PHYCFG2_GMODE_COMP_RT8201 0x00000000 -#define MAC_PHYCFG2_ACT_MASK_MASK 0x03000000 -#define MAC_PHYCFG2_ACT_MASK_AC131 0x03000000 -#define MAC_PHYCFG2_ACT_MASK_50610 0x01000000 -#define MAC_PHYCFG2_ACT_MASK_RT8211 0x03000000 -#define MAC_PHYCFG2_ACT_MASK_RT8201 0x01000000 -#define MAC_PHYCFG2_ACT_COMP_MASK 0x0c000000 -#define MAC_PHYCFG2_ACT_COMP_AC131 0x00000000 -#define MAC_PHYCFG2_ACT_COMP_50610 0x00000000 -#define MAC_PHYCFG2_ACT_COMP_RT8211 0x00000000 -#define MAC_PHYCFG2_ACT_COMP_RT8201 0x08000000 -#define MAC_PHYCFG2_QUAL_MASK_MASK 0x30000000 -#define MAC_PHYCFG2_QUAL_MASK_AC131 0x30000000 -#define MAC_PHYCFG2_QUAL_MASK_50610 0x30000000 -#define MAC_PHYCFG2_QUAL_MASK_RT8211 0x30000000 -#define MAC_PHYCFG2_QUAL_MASK_RT8201 0x30000000 -#define MAC_PHYCFG2_QUAL_COMP_MASK 0xc0000000 -#define MAC_PHYCFG2_QUAL_COMP_AC131 0x00000000 -#define MAC_PHYCFG2_QUAL_COMP_50610 0x00000000 -#define MAC_PHYCFG2_QUAL_COMP_RT8211 0x00000000 -#define MAC_PHYCFG2_QUAL_COMP_RT8201 0x00000000 -#define MAC_PHYCFG2_50610_LED_MODES \ - (MAC_PHYCFG2_EMODE_MASK_50610 | \ - MAC_PHYCFG2_EMODE_COMP_50610 | \ - MAC_PHYCFG2_FMODE_MASK_50610 | \ - MAC_PHYCFG2_FMODE_COMP_50610 | \ - MAC_PHYCFG2_GMODE_MASK_50610 | \ - MAC_PHYCFG2_GMODE_COMP_50610 | \ - MAC_PHYCFG2_ACT_MASK_50610 | \ - MAC_PHYCFG2_ACT_COMP_50610 | \ - MAC_PHYCFG2_QUAL_MASK_50610 | \ - MAC_PHYCFG2_QUAL_COMP_50610) -#define MAC_PHYCFG2_AC131_LED_MODES \ - (MAC_PHYCFG2_EMODE_MASK_AC131 | \ - MAC_PHYCFG2_EMODE_COMP_AC131 | \ - MAC_PHYCFG2_FMODE_MASK_AC131 | \ - MAC_PHYCFG2_FMODE_COMP_AC131 | \ - MAC_PHYCFG2_GMODE_MASK_AC131 | \ - MAC_PHYCFG2_GMODE_COMP_AC131 | \ - MAC_PHYCFG2_ACT_MASK_AC131 | \ - MAC_PHYCFG2_ACT_COMP_AC131 | \ - MAC_PHYCFG2_QUAL_MASK_AC131 | \ - MAC_PHYCFG2_QUAL_COMP_AC131) -#define MAC_PHYCFG2_RTL8211C_LED_MODES \ - (MAC_PHYCFG2_EMODE_MASK_RT8211 | \ - MAC_PHYCFG2_EMODE_COMP_RT8211 | \ - MAC_PHYCFG2_FMODE_MASK_RT8211 | \ - MAC_PHYCFG2_FMODE_COMP_RT8211 | \ - MAC_PHYCFG2_GMODE_MASK_RT8211 | \ - MAC_PHYCFG2_GMODE_COMP_RT8211 | \ - MAC_PHYCFG2_ACT_MASK_RT8211 | \ - MAC_PHYCFG2_ACT_COMP_RT8211 | \ - MAC_PHYCFG2_QUAL_MASK_RT8211 | \ - MAC_PHYCFG2_QUAL_COMP_RT8211) -#define MAC_PHYCFG2_RTL8201E_LED_MODES \ - (MAC_PHYCFG2_EMODE_MASK_RT8201 | \ - MAC_PHYCFG2_EMODE_COMP_RT8201 | \ - MAC_PHYCFG2_FMODE_MASK_RT8201 | \ - MAC_PHYCFG2_FMODE_COMP_RT8201 | \ - MAC_PHYCFG2_GMODE_MASK_RT8201 | \ - MAC_PHYCFG2_GMODE_COMP_RT8201 | \ - MAC_PHYCFG2_ACT_MASK_RT8201 | \ - MAC_PHYCFG2_ACT_COMP_RT8201 | \ - MAC_PHYCFG2_QUAL_MASK_RT8201 | \ - MAC_PHYCFG2_QUAL_COMP_RT8201) -#define MAC_EXT_RGMII_MODE 0x000005a8 -#define MAC_RGMII_MODE_TX_ENABLE 0x00000001 -#define MAC_RGMII_MODE_TX_LOWPWR 0x00000002 -#define MAC_RGMII_MODE_TX_RESET 0x00000004 -#define MAC_RGMII_MODE_RX_INT_B 0x00000100 -#define MAC_RGMII_MODE_RX_QUALITY 0x00000200 -#define MAC_RGMII_MODE_RX_ACTIVITY 0x00000400 -#define MAC_RGMII_MODE_RX_ENG_DET 0x00000800 -/* 0x5ac --> 0x5b0 unused */ -#define SERDES_RX_CTRL 0x000005b0 /* 5780/5714 only */ -#define SERDES_RX_SIG_DETECT 0x00000400 -#define SG_DIG_CTRL 0x000005b0 -#define SG_DIG_USING_HW_AUTONEG 0x80000000 -#define SG_DIG_SOFT_RESET 0x40000000 -#define SG_DIG_DISABLE_LINKRDY 0x20000000 -#define SG_DIG_CRC16_CLEAR_N 0x01000000 -#define SG_DIG_EN10B 0x00800000 -#define SG_DIG_CLEAR_STATUS 0x00400000 -#define SG_DIG_LOCAL_DUPLEX_STATUS 0x00200000 -#define SG_DIG_LOCAL_LINK_STATUS 0x00100000 -#define SG_DIG_SPEED_STATUS_MASK 0x000c0000 -#define SG_DIG_SPEED_STATUS_SHIFT 18 -#define SG_DIG_JUMBO_PACKET_DISABLE 0x00020000 -#define SG_DIG_RESTART_AUTONEG 0x00010000 -#define SG_DIG_FIBER_MODE 0x00008000 -#define SG_DIG_REMOTE_FAULT_MASK 0x00006000 -#define SG_DIG_PAUSE_MASK 0x00001800 -#define SG_DIG_PAUSE_CAP 0x00000800 -#define SG_DIG_ASYM_PAUSE 0x00001000 -#define SG_DIG_GBIC_ENABLE 0x00000400 -#define SG_DIG_CHECK_END_ENABLE 0x00000200 -#define SG_DIG_SGMII_AUTONEG_TIMER 0x00000100 -#define SG_DIG_CLOCK_PHASE_SELECT 0x00000080 -#define SG_DIG_GMII_INPUT_SELECT 0x00000040 -#define SG_DIG_MRADV_CRC16_SELECT 0x00000020 -#define SG_DIG_COMMA_DETECT_ENABLE 0x00000010 -#define SG_DIG_AUTONEG_TIMER_REDUCE 0x00000008 -#define SG_DIG_AUTONEG_LOW_ENABLE 0x00000004 -#define SG_DIG_REMOTE_LOOPBACK 0x00000002 -#define SG_DIG_LOOPBACK 0x00000001 -#define SG_DIG_COMMON_SETUP (SG_DIG_CRC16_CLEAR_N | \ - SG_DIG_LOCAL_DUPLEX_STATUS | \ - SG_DIG_LOCAL_LINK_STATUS | \ - (0x2 << SG_DIG_SPEED_STATUS_SHIFT) | \ - SG_DIG_FIBER_MODE | SG_DIG_GBIC_ENABLE) -#define SG_DIG_STATUS 0x000005b4 -#define SG_DIG_CRC16_BUS_MASK 0xffff0000 -#define SG_DIG_PARTNER_FAULT_MASK 0x00600000 /* If !MRADV_CRC16_SELECT */ -#define SG_DIG_PARTNER_ASYM_PAUSE 0x00100000 /* If !MRADV_CRC16_SELECT */ -#define SG_DIG_PARTNER_PAUSE_CAPABLE 0x00080000 /* If !MRADV_CRC16_SELECT */ -#define SG_DIG_PARTNER_HALF_DUPLEX 0x00040000 /* If !MRADV_CRC16_SELECT */ -#define SG_DIG_PARTNER_FULL_DUPLEX 0x00020000 /* If !MRADV_CRC16_SELECT */ -#define SG_DIG_PARTNER_NEXT_PAGE 0x00010000 /* If !MRADV_CRC16_SELECT */ -#define SG_DIG_AUTONEG_STATE_MASK 0x00000ff0 -#define SG_DIG_IS_SERDES 0x00000100 -#define SG_DIG_COMMA_DETECTOR 0x00000008 -#define SG_DIG_MAC_ACK_STATUS 0x00000004 -#define SG_DIG_AUTONEG_COMPLETE 0x00000002 -#define SG_DIG_AUTONEG_ERROR 0x00000001 -/* 0x5b8 --> 0x600 unused */ -#define MAC_TX_MAC_STATE_BASE 0x00000600 /* 16 bytes */ -#define MAC_RX_MAC_STATE_BASE 0x00000610 /* 20 bytes */ -/* 0x624 --> 0x670 unused */ - -#define MAC_RSS_INDIR_TBL_0 0x00000630 - -#define MAC_RSS_HASH_KEY_0 0x00000670 -#define MAC_RSS_HASH_KEY_1 0x00000674 -#define MAC_RSS_HASH_KEY_2 0x00000678 -#define MAC_RSS_HASH_KEY_3 0x0000067c -#define MAC_RSS_HASH_KEY_4 0x00000680 -#define MAC_RSS_HASH_KEY_5 0x00000684 -#define MAC_RSS_HASH_KEY_6 0x00000688 -#define MAC_RSS_HASH_KEY_7 0x0000068c -#define MAC_RSS_HASH_KEY_8 0x00000690 -#define MAC_RSS_HASH_KEY_9 0x00000694 -/* 0x698 --> 0x800 unused */ - -#define MAC_TX_STATS_OCTETS 0x00000800 -#define MAC_TX_STATS_RESV1 0x00000804 -#define MAC_TX_STATS_COLLISIONS 0x00000808 -#define MAC_TX_STATS_XON_SENT 0x0000080c -#define MAC_TX_STATS_XOFF_SENT 0x00000810 -#define MAC_TX_STATS_RESV2 0x00000814 -#define MAC_TX_STATS_MAC_ERRORS 0x00000818 -#define MAC_TX_STATS_SINGLE_COLLISIONS 0x0000081c -#define MAC_TX_STATS_MULT_COLLISIONS 0x00000820 -#define MAC_TX_STATS_DEFERRED 0x00000824 -#define MAC_TX_STATS_RESV3 0x00000828 -#define MAC_TX_STATS_EXCESSIVE_COL 0x0000082c -#define MAC_TX_STATS_LATE_COL 0x00000830 -#define MAC_TX_STATS_RESV4_1 0x00000834 -#define MAC_TX_STATS_RESV4_2 0x00000838 -#define MAC_TX_STATS_RESV4_3 0x0000083c -#define MAC_TX_STATS_RESV4_4 0x00000840 -#define MAC_TX_STATS_RESV4_5 0x00000844 -#define MAC_TX_STATS_RESV4_6 0x00000848 -#define MAC_TX_STATS_RESV4_7 0x0000084c -#define MAC_TX_STATS_RESV4_8 0x00000850 -#define MAC_TX_STATS_RESV4_9 0x00000854 -#define MAC_TX_STATS_RESV4_10 0x00000858 -#define MAC_TX_STATS_RESV4_11 0x0000085c -#define MAC_TX_STATS_RESV4_12 0x00000860 -#define MAC_TX_STATS_RESV4_13 0x00000864 -#define MAC_TX_STATS_RESV4_14 0x00000868 -#define MAC_TX_STATS_UCAST 0x0000086c -#define MAC_TX_STATS_MCAST 0x00000870 -#define MAC_TX_STATS_BCAST 0x00000874 -#define MAC_TX_STATS_RESV5_1 0x00000878 -#define MAC_TX_STATS_RESV5_2 0x0000087c -#define MAC_RX_STATS_OCTETS 0x00000880 -#define MAC_RX_STATS_RESV1 0x00000884 -#define MAC_RX_STATS_FRAGMENTS 0x00000888 -#define MAC_RX_STATS_UCAST 0x0000088c -#define MAC_RX_STATS_MCAST 0x00000890 -#define MAC_RX_STATS_BCAST 0x00000894 -#define MAC_RX_STATS_FCS_ERRORS 0x00000898 -#define MAC_RX_STATS_ALIGN_ERRORS 0x0000089c -#define MAC_RX_STATS_XON_PAUSE_RECVD 0x000008a0 -#define MAC_RX_STATS_XOFF_PAUSE_RECVD 0x000008a4 -#define MAC_RX_STATS_MAC_CTRL_RECVD 0x000008a8 -#define MAC_RX_STATS_XOFF_ENTERED 0x000008ac -#define MAC_RX_STATS_FRAME_TOO_LONG 0x000008b0 -#define MAC_RX_STATS_JABBERS 0x000008b4 -#define MAC_RX_STATS_UNDERSIZE 0x000008b8 -/* 0x8bc --> 0xc00 unused */ - -/* Send data initiator control registers */ -#define SNDDATAI_MODE 0x00000c00 -#define SNDDATAI_MODE_RESET 0x00000001 -#define SNDDATAI_MODE_ENABLE 0x00000002 -#define SNDDATAI_MODE_STAT_OFLOW_ENAB 0x00000004 -#define SNDDATAI_STATUS 0x00000c04 -#define SNDDATAI_STATUS_STAT_OFLOW 0x00000004 -#define SNDDATAI_STATSCTRL 0x00000c08 -#define SNDDATAI_SCTRL_ENABLE 0x00000001 -#define SNDDATAI_SCTRL_FASTUPD 0x00000002 -#define SNDDATAI_SCTRL_CLEAR 0x00000004 -#define SNDDATAI_SCTRL_FLUSH 0x00000008 -#define SNDDATAI_SCTRL_FORCE_ZERO 0x00000010 -#define SNDDATAI_STATSENAB 0x00000c0c -#define SNDDATAI_STATSINCMASK 0x00000c10 -#define ISO_PKT_TX 0x00000c20 -/* 0xc24 --> 0xc80 unused */ -#define SNDDATAI_COS_CNT_0 0x00000c80 -#define SNDDATAI_COS_CNT_1 0x00000c84 -#define SNDDATAI_COS_CNT_2 0x00000c88 -#define SNDDATAI_COS_CNT_3 0x00000c8c -#define SNDDATAI_COS_CNT_4 0x00000c90 -#define SNDDATAI_COS_CNT_5 0x00000c94 -#define SNDDATAI_COS_CNT_6 0x00000c98 -#define SNDDATAI_COS_CNT_7 0x00000c9c -#define SNDDATAI_COS_CNT_8 0x00000ca0 -#define SNDDATAI_COS_CNT_9 0x00000ca4 -#define SNDDATAI_COS_CNT_10 0x00000ca8 -#define SNDDATAI_COS_CNT_11 0x00000cac -#define SNDDATAI_COS_CNT_12 0x00000cb0 -#define SNDDATAI_COS_CNT_13 0x00000cb4 -#define SNDDATAI_COS_CNT_14 0x00000cb8 -#define SNDDATAI_COS_CNT_15 0x00000cbc -#define SNDDATAI_DMA_RDQ_FULL_CNT 0x00000cc0 -#define SNDDATAI_DMA_PRIO_RDQ_FULL_CNT 0x00000cc4 -#define SNDDATAI_SDCQ_FULL_CNT 0x00000cc8 -#define SNDDATAI_NICRNG_SSND_PIDX_CNT 0x00000ccc -#define SNDDATAI_STATS_UPDATED_CNT 0x00000cd0 -#define SNDDATAI_INTERRUPTS_CNT 0x00000cd4 -#define SNDDATAI_AVOID_INTERRUPTS_CNT 0x00000cd8 -#define SNDDATAI_SND_THRESH_HIT_CNT 0x00000cdc -/* 0xce0 --> 0x1000 unused */ - -/* Send data completion control registers */ -#define SNDDATAC_MODE 0x00001000 -#define SNDDATAC_MODE_RESET 0x00000001 -#define SNDDATAC_MODE_ENABLE 0x00000002 -#define SNDDATAC_MODE_CDELAY 0x00000010 -/* 0x1004 --> 0x1400 unused */ - -/* Send BD ring selector */ -#define SNDBDS_MODE 0x00001400 -#define SNDBDS_MODE_RESET 0x00000001 -#define SNDBDS_MODE_ENABLE 0x00000002 -#define SNDBDS_MODE_ATTN_ENABLE 0x00000004 -#define SNDBDS_STATUS 0x00001404 -#define SNDBDS_STATUS_ERROR_ATTN 0x00000004 -#define SNDBDS_HWDIAG 0x00001408 -/* 0x140c --> 0x1440 */ -#define SNDBDS_SEL_CON_IDX_0 0x00001440 -#define SNDBDS_SEL_CON_IDX_1 0x00001444 -#define SNDBDS_SEL_CON_IDX_2 0x00001448 -#define SNDBDS_SEL_CON_IDX_3 0x0000144c -#define SNDBDS_SEL_CON_IDX_4 0x00001450 -#define SNDBDS_SEL_CON_IDX_5 0x00001454 -#define SNDBDS_SEL_CON_IDX_6 0x00001458 -#define SNDBDS_SEL_CON_IDX_7 0x0000145c -#define SNDBDS_SEL_CON_IDX_8 0x00001460 -#define SNDBDS_SEL_CON_IDX_9 0x00001464 -#define SNDBDS_SEL_CON_IDX_10 0x00001468 -#define SNDBDS_SEL_CON_IDX_11 0x0000146c -#define SNDBDS_SEL_CON_IDX_12 0x00001470 -#define SNDBDS_SEL_CON_IDX_13 0x00001474 -#define SNDBDS_SEL_CON_IDX_14 0x00001478 -#define SNDBDS_SEL_CON_IDX_15 0x0000147c -/* 0x1480 --> 0x1800 unused */ - -/* Send BD initiator control registers */ -#define SNDBDI_MODE 0x00001800 -#define SNDBDI_MODE_RESET 0x00000001 -#define SNDBDI_MODE_ENABLE 0x00000002 -#define SNDBDI_MODE_ATTN_ENABLE 0x00000004 -#define SNDBDI_MODE_MULTI_TXQ_EN 0x00000020 -#define SNDBDI_STATUS 0x00001804 -#define SNDBDI_STATUS_ERROR_ATTN 0x00000004 -#define SNDBDI_IN_PROD_IDX_0 0x00001808 -#define SNDBDI_IN_PROD_IDX_1 0x0000180c -#define SNDBDI_IN_PROD_IDX_2 0x00001810 -#define SNDBDI_IN_PROD_IDX_3 0x00001814 -#define SNDBDI_IN_PROD_IDX_4 0x00001818 -#define SNDBDI_IN_PROD_IDX_5 0x0000181c -#define SNDBDI_IN_PROD_IDX_6 0x00001820 -#define SNDBDI_IN_PROD_IDX_7 0x00001824 -#define SNDBDI_IN_PROD_IDX_8 0x00001828 -#define SNDBDI_IN_PROD_IDX_9 0x0000182c -#define SNDBDI_IN_PROD_IDX_10 0x00001830 -#define SNDBDI_IN_PROD_IDX_11 0x00001834 -#define SNDBDI_IN_PROD_IDX_12 0x00001838 -#define SNDBDI_IN_PROD_IDX_13 0x0000183c -#define SNDBDI_IN_PROD_IDX_14 0x00001840 -#define SNDBDI_IN_PROD_IDX_15 0x00001844 -/* 0x1848 --> 0x1c00 unused */ - -/* Send BD completion control registers */ -#define SNDBDC_MODE 0x00001c00 -#define SNDBDC_MODE_RESET 0x00000001 -#define SNDBDC_MODE_ENABLE 0x00000002 -#define SNDBDC_MODE_ATTN_ENABLE 0x00000004 -/* 0x1c04 --> 0x2000 unused */ - -/* Receive list placement control registers */ -#define RCVLPC_MODE 0x00002000 -#define RCVLPC_MODE_RESET 0x00000001 -#define RCVLPC_MODE_ENABLE 0x00000002 -#define RCVLPC_MODE_CLASS0_ATTN_ENAB 0x00000004 -#define RCVLPC_MODE_MAPOOR_AATTN_ENAB 0x00000008 -#define RCVLPC_MODE_STAT_OFLOW_ENAB 0x00000010 -#define RCVLPC_STATUS 0x00002004 -#define RCVLPC_STATUS_CLASS0 0x00000004 -#define RCVLPC_STATUS_MAPOOR 0x00000008 -#define RCVLPC_STATUS_STAT_OFLOW 0x00000010 -#define RCVLPC_LOCK 0x00002008 -#define RCVLPC_LOCK_REQ_MASK 0x0000ffff -#define RCVLPC_LOCK_REQ_SHIFT 0 -#define RCVLPC_LOCK_GRANT_MASK 0xffff0000 -#define RCVLPC_LOCK_GRANT_SHIFT 16 -#define RCVLPC_NON_EMPTY_BITS 0x0000200c -#define RCVLPC_NON_EMPTY_BITS_MASK 0x0000ffff -#define RCVLPC_CONFIG 0x00002010 -#define RCVLPC_STATSCTRL 0x00002014 -#define RCVLPC_STATSCTRL_ENABLE 0x00000001 -#define RCVLPC_STATSCTRL_FASTUPD 0x00000002 -#define RCVLPC_STATS_ENABLE 0x00002018 -#define RCVLPC_STATSENAB_ASF_FIX 0x00000002 -#define RCVLPC_STATSENAB_DACK_FIX 0x00040000 -#define RCVLPC_STATSENAB_LNGBRST_RFIX 0x00400000 -#define RCVLPC_STATS_INCMASK 0x0000201c -/* 0x2020 --> 0x2100 unused */ -#define RCVLPC_SELLST_BASE 0x00002100 /* 16 16-byte entries */ -#define SELLST_TAIL 0x00000004 -#define SELLST_CONT 0x00000008 -#define SELLST_UNUSED 0x0000000c -#define RCVLPC_COS_CNTL_BASE 0x00002200 /* 16 4-byte entries */ -#define RCVLPC_DROP_FILTER_CNT 0x00002240 -#define RCVLPC_DMA_WQ_FULL_CNT 0x00002244 -#define RCVLPC_DMA_HIPRIO_WQ_FULL_CNT 0x00002248 -#define RCVLPC_NO_RCV_BD_CNT 0x0000224c -#define RCVLPC_IN_DISCARDS_CNT 0x00002250 -#define RCVLPC_IN_ERRORS_CNT 0x00002254 -#define RCVLPC_RCV_THRESH_HIT_CNT 0x00002258 -/* 0x225c --> 0x2400 unused */ - -/* Receive Data and Receive BD Initiator Control */ -#define RCVDBDI_MODE 0x00002400 -#define RCVDBDI_MODE_RESET 0x00000001 -#define RCVDBDI_MODE_ENABLE 0x00000002 -#define RCVDBDI_MODE_JUMBOBD_NEEDED 0x00000004 -#define RCVDBDI_MODE_FRM_TOO_BIG 0x00000008 -#define RCVDBDI_MODE_INV_RING_SZ 0x00000010 -#define RCVDBDI_MODE_LRG_RING_SZ 0x00010000 -#define RCVDBDI_STATUS 0x00002404 -#define RCVDBDI_STATUS_JUMBOBD_NEEDED 0x00000004 -#define RCVDBDI_STATUS_FRM_TOO_BIG 0x00000008 -#define RCVDBDI_STATUS_INV_RING_SZ 0x00000010 -#define RCVDBDI_SPLIT_FRAME_MINSZ 0x00002408 -/* 0x240c --> 0x2440 unused */ -#define RCVDBDI_JUMBO_BD 0x00002440 /* TG3_BDINFO_... */ -#define RCVDBDI_STD_BD 0x00002450 /* TG3_BDINFO_... */ -#define RCVDBDI_MINI_BD 0x00002460 /* TG3_BDINFO_... */ -#define RCVDBDI_JUMBO_CON_IDX 0x00002470 -#define RCVDBDI_STD_CON_IDX 0x00002474 -#define RCVDBDI_MINI_CON_IDX 0x00002478 -/* 0x247c --> 0x2480 unused */ -#define RCVDBDI_BD_PROD_IDX_0 0x00002480 -#define RCVDBDI_BD_PROD_IDX_1 0x00002484 -#define RCVDBDI_BD_PROD_IDX_2 0x00002488 -#define RCVDBDI_BD_PROD_IDX_3 0x0000248c -#define RCVDBDI_BD_PROD_IDX_4 0x00002490 -#define RCVDBDI_BD_PROD_IDX_5 0x00002494 -#define RCVDBDI_BD_PROD_IDX_6 0x00002498 -#define RCVDBDI_BD_PROD_IDX_7 0x0000249c -#define RCVDBDI_BD_PROD_IDX_8 0x000024a0 -#define RCVDBDI_BD_PROD_IDX_9 0x000024a4 -#define RCVDBDI_BD_PROD_IDX_10 0x000024a8 -#define RCVDBDI_BD_PROD_IDX_11 0x000024ac -#define RCVDBDI_BD_PROD_IDX_12 0x000024b0 -#define RCVDBDI_BD_PROD_IDX_13 0x000024b4 -#define RCVDBDI_BD_PROD_IDX_14 0x000024b8 -#define RCVDBDI_BD_PROD_IDX_15 0x000024bc -#define RCVDBDI_HWDIAG 0x000024c0 -/* 0x24c4 --> 0x2800 unused */ - -/* Receive Data Completion Control */ -#define RCVDCC_MODE 0x00002800 -#define RCVDCC_MODE_RESET 0x00000001 -#define RCVDCC_MODE_ENABLE 0x00000002 -#define RCVDCC_MODE_ATTN_ENABLE 0x00000004 -/* 0x2804 --> 0x2c00 unused */ - -/* Receive BD Initiator Control Registers */ -#define RCVBDI_MODE 0x00002c00 -#define RCVBDI_MODE_RESET 0x00000001 -#define RCVBDI_MODE_ENABLE 0x00000002 -#define RCVBDI_MODE_RCB_ATTN_ENAB 0x00000004 -#define RCVBDI_STATUS 0x00002c04 -#define RCVBDI_STATUS_RCB_ATTN 0x00000004 -#define RCVBDI_JUMBO_PROD_IDX 0x00002c08 -#define RCVBDI_STD_PROD_IDX 0x00002c0c -#define RCVBDI_MINI_PROD_IDX 0x00002c10 -#define RCVBDI_MINI_THRESH 0x00002c14 -#define RCVBDI_STD_THRESH 0x00002c18 -#define RCVBDI_JUMBO_THRESH 0x00002c1c -/* 0x2c20 --> 0x2d00 unused */ - -#define STD_REPLENISH_LWM 0x00002d00 -#define JMB_REPLENISH_LWM 0x00002d04 -/* 0x2d08 --> 0x3000 unused */ - -/* Receive BD Completion Control Registers */ -#define RCVCC_MODE 0x00003000 -#define RCVCC_MODE_RESET 0x00000001 -#define RCVCC_MODE_ENABLE 0x00000002 -#define RCVCC_MODE_ATTN_ENABLE 0x00000004 -#define RCVCC_STATUS 0x00003004 -#define RCVCC_STATUS_ERROR_ATTN 0x00000004 -#define RCVCC_JUMP_PROD_IDX 0x00003008 -#define RCVCC_STD_PROD_IDX 0x0000300c -#define RCVCC_MINI_PROD_IDX 0x00003010 -/* 0x3014 --> 0x3400 unused */ - -/* Receive list selector control registers */ -#define RCVLSC_MODE 0x00003400 -#define RCVLSC_MODE_RESET 0x00000001 -#define RCVLSC_MODE_ENABLE 0x00000002 -#define RCVLSC_MODE_ATTN_ENABLE 0x00000004 -#define RCVLSC_STATUS 0x00003404 -#define RCVLSC_STATUS_ERROR_ATTN 0x00000004 -/* 0x3408 --> 0x3600 unused */ - -#define TG3_CPMU_DRV_STATUS 0x0000344c - -/* CPMU registers */ -#define TG3_CPMU_CTRL 0x00003600 -#define CPMU_CTRL_LINK_IDLE_MODE 0x00000200 -#define CPMU_CTRL_LINK_AWARE_MODE 0x00000400 -#define CPMU_CTRL_LINK_SPEED_MODE 0x00004000 -#define CPMU_CTRL_GPHY_10MB_RXONLY 0x00010000 -#define TG3_CPMU_LSPD_10MB_CLK 0x00003604 -#define CPMU_LSPD_10MB_MACCLK_MASK 0x001f0000 -#define CPMU_LSPD_10MB_MACCLK_6_25 0x00130000 -/* 0x3608 --> 0x360c unused */ - -#define TG3_CPMU_LSPD_1000MB_CLK 0x0000360c -#define CPMU_LSPD_1000MB_MACCLK_62_5 0x00000000 -#define CPMU_LSPD_1000MB_MACCLK_12_5 0x00110000 -#define CPMU_LSPD_1000MB_MACCLK_MASK 0x001f0000 -#define TG3_CPMU_LNK_AWARE_PWRMD 0x00003610 -#define CPMU_LNK_AWARE_MACCLK_MASK 0x001f0000 -#define CPMU_LNK_AWARE_MACCLK_6_25 0x00130000 -/* 0x3614 --> 0x361c unused */ - -#define TG3_CPMU_HST_ACC 0x0000361c -#define CPMU_HST_ACC_MACCLK_MASK 0x001f0000 -#define CPMU_HST_ACC_MACCLK_6_25 0x00130000 -/* 0x3620 --> 0x3630 unused */ - -#define TG3_CPMU_CLCK_ORIDE 0x00003624 -#define CPMU_CLCK_ORIDE_MAC_ORIDE_EN 0x80000000 - -#define TG3_CPMU_CLCK_STAT 0x00003630 -#define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000 -#define CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000 -#define CPMU_CLCK_STAT_MAC_CLCK_12_5 0x00110000 -#define CPMU_CLCK_STAT_MAC_CLCK_6_25 0x00130000 -/* 0x3634 --> 0x365c unused */ - -#define TG3_CPMU_MUTEX_REQ 0x0000365c -#define CPMU_MUTEX_REQ_DRIVER 0x00001000 -#define TG3_CPMU_MUTEX_GNT 0x00003660 -#define CPMU_MUTEX_GNT_DRIVER 0x00001000 -#define TG3_CPMU_PHY_STRAP 0x00003664 -#define TG3_CPMU_PHY_STRAP_IS_SERDES 0x00000020 -/* 0x3664 --> 0x36b0 unused */ - -#define TG3_CPMU_EEE_MODE 0x000036b0 -#define TG3_CPMU_EEEMD_APE_TX_DET_EN 0x00000004 -#define TG3_CPMU_EEEMD_ERLY_L1_XIT_DET 0x00000008 -#define TG3_CPMU_EEEMD_SND_IDX_DET_EN 0x00000040 -#define TG3_CPMU_EEEMD_LPI_ENABLE 0x00000080 -#define TG3_CPMU_EEEMD_LPI_IN_TX 0x00000100 -#define TG3_CPMU_EEEMD_LPI_IN_RX 0x00000200 -#define TG3_CPMU_EEEMD_EEE_ENABLE 0x00100000 -#define TG3_CPMU_EEE_DBTMR1 0x000036b4 -#define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000 -#define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000007ff -#define TG3_CPMU_EEE_DBTMR2 0x000036b8 -#define TG3_CPMU_DBTMR2_APE_TX_2047US 0x07ff0000 -#define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000007ff -#define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc -#define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000 -#define TG3_CPMU_EEE_LNKIDL_UART_IDL 0x00000004 -/* 0x36c0 --> 0x36d0 unused */ - -#define TG3_CPMU_EEE_CTRL 0x000036d0 -#define TG3_CPMU_EEE_CTRL_EXIT_16_5_US 0x0000019d -#define TG3_CPMU_EEE_CTRL_EXIT_36_US 0x00000384 -#define TG3_CPMU_EEE_CTRL_EXIT_20_1_US 0x000001f8 -/* 0x36d4 --> 0x3800 unused */ - -/* Mbuf cluster free registers */ -#define MBFREE_MODE 0x00003800 -#define MBFREE_MODE_RESET 0x00000001 -#define MBFREE_MODE_ENABLE 0x00000002 -#define MBFREE_STATUS 0x00003804 -/* 0x3808 --> 0x3c00 unused */ - -/* Host coalescing control registers */ -#define HOSTCC_MODE 0x00003c00 -#define HOSTCC_MODE_RESET 0x00000001 -#define HOSTCC_MODE_ENABLE 0x00000002 -#define HOSTCC_MODE_ATTN 0x00000004 -#define HOSTCC_MODE_NOW 0x00000008 -#define HOSTCC_MODE_FULL_STATUS 0x00000000 -#define HOSTCC_MODE_64BYTE 0x00000080 -#define HOSTCC_MODE_32BYTE 0x00000100 -#define HOSTCC_MODE_CLRTICK_RXBD 0x00000200 -#define HOSTCC_MODE_CLRTICK_TXBD 0x00000400 -#define HOSTCC_MODE_NOINT_ON_NOW 0x00000800 -#define HOSTCC_MODE_NOINT_ON_FORCE 0x00001000 -#define HOSTCC_MODE_COAL_VEC1_NOW 0x00002000 -#define HOSTCC_STATUS 0x00003c04 -#define HOSTCC_STATUS_ERROR_ATTN 0x00000004 -#define HOSTCC_RXCOL_TICKS 0x00003c08 -#define LOW_RXCOL_TICKS 0x00000032 -#define LOW_RXCOL_TICKS_CLRTCKS 0x00000014 -#define DEFAULT_RXCOL_TICKS 0x00000048 -#define HIGH_RXCOL_TICKS 0x00000096 -#define MAX_RXCOL_TICKS 0x000003ff -#define HOSTCC_TXCOL_TICKS 0x00003c0c -#define LOW_TXCOL_TICKS 0x00000096 -#define LOW_TXCOL_TICKS_CLRTCKS 0x00000048 -#define DEFAULT_TXCOL_TICKS 0x0000012c -#define HIGH_TXCOL_TICKS 0x00000145 -#define MAX_TXCOL_TICKS 0x000003ff -#define HOSTCC_RXMAX_FRAMES 0x00003c10 -#define LOW_RXMAX_FRAMES 0x00000005 -#define DEFAULT_RXMAX_FRAMES 0x00000008 -#define HIGH_RXMAX_FRAMES 0x00000012 -#define MAX_RXMAX_FRAMES 0x000000ff -#define HOSTCC_TXMAX_FRAMES 0x00003c14 -#define LOW_TXMAX_FRAMES 0x00000035 -#define DEFAULT_TXMAX_FRAMES 0x0000004b -#define HIGH_TXMAX_FRAMES 0x00000052 -#define MAX_TXMAX_FRAMES 0x000000ff -#define HOSTCC_RXCOAL_TICK_INT 0x00003c18 -#define DEFAULT_RXCOAL_TICK_INT 0x00000019 -#define DEFAULT_RXCOAL_TICK_INT_CLRTCKS 0x00000014 -#define MAX_RXCOAL_TICK_INT 0x000003ff -#define HOSTCC_TXCOAL_TICK_INT 0x00003c1c -#define DEFAULT_TXCOAL_TICK_INT 0x00000019 -#define DEFAULT_TXCOAL_TICK_INT_CLRTCKS 0x00000014 -#define MAX_TXCOAL_TICK_INT 0x000003ff -#define HOSTCC_RXCOAL_MAXF_INT 0x00003c20 -#define DEFAULT_RXCOAL_MAXF_INT 0x00000005 -#define MAX_RXCOAL_MAXF_INT 0x000000ff -#define HOSTCC_TXCOAL_MAXF_INT 0x00003c24 -#define DEFAULT_TXCOAL_MAXF_INT 0x00000005 -#define MAX_TXCOAL_MAXF_INT 0x000000ff -#define HOSTCC_STAT_COAL_TICKS 0x00003c28 -#define DEFAULT_STAT_COAL_TICKS 0x000f4240 -#define MAX_STAT_COAL_TICKS 0xd693d400 -#define MIN_STAT_COAL_TICKS 0x00000064 -/* 0x3c2c --> 0x3c30 unused */ -#define HOSTCC_STATS_BLK_HOST_ADDR 0x00003c30 /* 64-bit */ -#define HOSTCC_STATUS_BLK_HOST_ADDR 0x00003c38 /* 64-bit */ -#define HOSTCC_STATS_BLK_NIC_ADDR 0x00003c40 -#define HOSTCC_STATUS_BLK_NIC_ADDR 0x00003c44 -#define HOSTCC_FLOW_ATTN 0x00003c48 -#define HOSTCC_FLOW_ATTN_MBUF_LWM 0x00000040 -/* 0x3c4c --> 0x3c50 unused */ -#define HOSTCC_JUMBO_CON_IDX 0x00003c50 -#define HOSTCC_STD_CON_IDX 0x00003c54 -#define HOSTCC_MINI_CON_IDX 0x00003c58 -/* 0x3c5c --> 0x3c80 unused */ -#define HOSTCC_RET_PROD_IDX_0 0x00003c80 -#define HOSTCC_RET_PROD_IDX_1 0x00003c84 -#define HOSTCC_RET_PROD_IDX_2 0x00003c88 -#define HOSTCC_RET_PROD_IDX_3 0x00003c8c -#define HOSTCC_RET_PROD_IDX_4 0x00003c90 -#define HOSTCC_RET_PROD_IDX_5 0x00003c94 -#define HOSTCC_RET_PROD_IDX_6 0x00003c98 -#define HOSTCC_RET_PROD_IDX_7 0x00003c9c -#define HOSTCC_RET_PROD_IDX_8 0x00003ca0 -#define HOSTCC_RET_PROD_IDX_9 0x00003ca4 -#define HOSTCC_RET_PROD_IDX_10 0x00003ca8 -#define HOSTCC_RET_PROD_IDX_11 0x00003cac -#define HOSTCC_RET_PROD_IDX_12 0x00003cb0 -#define HOSTCC_RET_PROD_IDX_13 0x00003cb4 -#define HOSTCC_RET_PROD_IDX_14 0x00003cb8 -#define HOSTCC_RET_PROD_IDX_15 0x00003cbc -#define HOSTCC_SND_CON_IDX_0 0x00003cc0 -#define HOSTCC_SND_CON_IDX_1 0x00003cc4 -#define HOSTCC_SND_CON_IDX_2 0x00003cc8 -#define HOSTCC_SND_CON_IDX_3 0x00003ccc -#define HOSTCC_SND_CON_IDX_4 0x00003cd0 -#define HOSTCC_SND_CON_IDX_5 0x00003cd4 -#define HOSTCC_SND_CON_IDX_6 0x00003cd8 -#define HOSTCC_SND_CON_IDX_7 0x00003cdc -#define HOSTCC_SND_CON_IDX_8 0x00003ce0 -#define HOSTCC_SND_CON_IDX_9 0x00003ce4 -#define HOSTCC_SND_CON_IDX_10 0x00003ce8 -#define HOSTCC_SND_CON_IDX_11 0x00003cec -#define HOSTCC_SND_CON_IDX_12 0x00003cf0 -#define HOSTCC_SND_CON_IDX_13 0x00003cf4 -#define HOSTCC_SND_CON_IDX_14 0x00003cf8 -#define HOSTCC_SND_CON_IDX_15 0x00003cfc -#define HOSTCC_STATBLCK_RING1 0x00003d00 -/* 0x3d00 --> 0x3d80 unused */ - -#define HOSTCC_RXCOL_TICKS_VEC1 0x00003d80 -#define HOSTCC_TXCOL_TICKS_VEC1 0x00003d84 -#define HOSTCC_RXMAX_FRAMES_VEC1 0x00003d88 -#define HOSTCC_TXMAX_FRAMES_VEC1 0x00003d8c -#define HOSTCC_RXCOAL_MAXF_INT_VEC1 0x00003d90 -#define HOSTCC_TXCOAL_MAXF_INT_VEC1 0x00003d94 -/* 0x3d98 --> 0x4000 unused */ - -/* Memory arbiter control registers */ -#define MEMARB_MODE 0x00004000 -#define MEMARB_MODE_RESET 0x00000001 -#define MEMARB_MODE_ENABLE 0x00000002 -#define MEMARB_STATUS 0x00004004 -#define MEMARB_TRAP_ADDR_LOW 0x00004008 -#define MEMARB_TRAP_ADDR_HIGH 0x0000400c -/* 0x4010 --> 0x4400 unused */ - -/* Buffer manager control registers */ -#define BUFMGR_MODE 0x00004400 -#define BUFMGR_MODE_RESET 0x00000001 -#define BUFMGR_MODE_ENABLE 0x00000002 -#define BUFMGR_MODE_ATTN_ENABLE 0x00000004 -#define BUFMGR_MODE_BM_TEST 0x00000008 -#define BUFMGR_MODE_MBLOW_ATTN_ENAB 0x00000010 -#define BUFMGR_MODE_NO_TX_UNDERRUN 0x80000000 -#define BUFMGR_STATUS 0x00004404 -#define BUFMGR_STATUS_ERROR 0x00000004 -#define BUFMGR_STATUS_MBLOW 0x00000010 -#define BUFMGR_MB_POOL_ADDR 0x00004408 -#define BUFMGR_MB_POOL_SIZE 0x0000440c -#define BUFMGR_MB_RDMA_LOW_WATER 0x00004410 -#define DEFAULT_MB_RDMA_LOW_WATER 0x00000050 -#define DEFAULT_MB_RDMA_LOW_WATER_5705 0x00000000 -#define DEFAULT_MB_RDMA_LOW_WATER_JUMBO 0x00000130 -#define DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780 0x00000000 -#define BUFMGR_MB_MACRX_LOW_WATER 0x00004414 -#define DEFAULT_MB_MACRX_LOW_WATER 0x00000020 -#define DEFAULT_MB_MACRX_LOW_WATER_5705 0x00000010 -#define DEFAULT_MB_MACRX_LOW_WATER_5906 0x00000004 -#define DEFAULT_MB_MACRX_LOW_WATER_57765 0x0000002a -#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO 0x00000098 -#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780 0x0000004b -#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765 0x0000007e -#define BUFMGR_MB_HIGH_WATER 0x00004418 -#define DEFAULT_MB_HIGH_WATER 0x00000060 -#define DEFAULT_MB_HIGH_WATER_5705 0x00000060 -#define DEFAULT_MB_HIGH_WATER_5906 0x00000010 -#define DEFAULT_MB_HIGH_WATER_57765 0x000000a0 -#define DEFAULT_MB_HIGH_WATER_JUMBO 0x0000017c -#define DEFAULT_MB_HIGH_WATER_JUMBO_5780 0x00000096 -#define DEFAULT_MB_HIGH_WATER_JUMBO_57765 0x000000ea -#define BUFMGR_RX_MB_ALLOC_REQ 0x0000441c -#define BUFMGR_MB_ALLOC_BIT 0x10000000 -#define BUFMGR_RX_MB_ALLOC_RESP 0x00004420 -#define BUFMGR_TX_MB_ALLOC_REQ 0x00004424 -#define BUFMGR_TX_MB_ALLOC_RESP 0x00004428 -#define BUFMGR_DMA_DESC_POOL_ADDR 0x0000442c -#define BUFMGR_DMA_DESC_POOL_SIZE 0x00004430 -#define BUFMGR_DMA_LOW_WATER 0x00004434 -#define DEFAULT_DMA_LOW_WATER 0x00000005 -#define BUFMGR_DMA_HIGH_WATER 0x00004438 -#define DEFAULT_DMA_HIGH_WATER 0x0000000a -#define BUFMGR_RX_DMA_ALLOC_REQ 0x0000443c -#define BUFMGR_RX_DMA_ALLOC_RESP 0x00004440 -#define BUFMGR_TX_DMA_ALLOC_REQ 0x00004444 -#define BUFMGR_TX_DMA_ALLOC_RESP 0x00004448 -#define BUFMGR_HWDIAG_0 0x0000444c -#define BUFMGR_HWDIAG_1 0x00004450 -#define BUFMGR_HWDIAG_2 0x00004454 -/* 0x4458 --> 0x4800 unused */ - -/* Read DMA control registers */ -#define RDMAC_MODE 0x00004800 -#define RDMAC_MODE_RESET 0x00000001 -#define RDMAC_MODE_ENABLE 0x00000002 -#define RDMAC_MODE_TGTABORT_ENAB 0x00000004 -#define RDMAC_MODE_MSTABORT_ENAB 0x00000008 -#define RDMAC_MODE_PARITYERR_ENAB 0x00000010 -#define RDMAC_MODE_ADDROFLOW_ENAB 0x00000020 -#define RDMAC_MODE_FIFOOFLOW_ENAB 0x00000040 -#define RDMAC_MODE_FIFOURUN_ENAB 0x00000080 -#define RDMAC_MODE_FIFOOREAD_ENAB 0x00000100 -#define RDMAC_MODE_LNGREAD_ENAB 0x00000200 -#define RDMAC_MODE_SPLIT_ENABLE 0x00000800 -#define RDMAC_MODE_BD_SBD_CRPT_ENAB 0x00000800 -#define RDMAC_MODE_SPLIT_RESET 0x00001000 -#define RDMAC_MODE_MBUF_RBD_CRPT_ENAB 0x00001000 -#define RDMAC_MODE_MBUF_SBD_CRPT_ENAB 0x00002000 -#define RDMAC_MODE_FIFO_SIZE_128 0x00020000 -#define RDMAC_MODE_FIFO_LONG_BURST 0x00030000 -#define RDMAC_MODE_MULT_DMA_RD_DIS 0x01000000 -#define RDMAC_MODE_IPV4_LSO_EN 0x08000000 -#define RDMAC_MODE_IPV6_LSO_EN 0x10000000 -#define RDMAC_MODE_H2BNC_VLAN_DET 0x20000000 -#define RDMAC_STATUS 0x00004804 -#define RDMAC_STATUS_TGTABORT 0x00000004 -#define RDMAC_STATUS_MSTABORT 0x00000008 -#define RDMAC_STATUS_PARITYERR 0x00000010 -#define RDMAC_STATUS_ADDROFLOW 0x00000020 -#define RDMAC_STATUS_FIFOOFLOW 0x00000040 -#define RDMAC_STATUS_FIFOURUN 0x00000080 -#define RDMAC_STATUS_FIFOOREAD 0x00000100 -#define RDMAC_STATUS_LNGREAD 0x00000200 -/* 0x4808 --> 0x4900 unused */ - -#define TG3_RDMA_RSRVCTRL_REG 0x00004900 -#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004 -#define TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K 0x00000c00 -#define TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK 0x00000ff0 -#define TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K 0x000c0000 -#define TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK 0x000ff000 -#define TG3_RDMA_RSRVCTRL_TXMRGN_320B 0x28000000 -#define TG3_RDMA_RSRVCTRL_TXMRGN_MASK 0xffe00000 -/* 0x4904 --> 0x4910 unused */ - -#define TG3_LSO_RD_DMA_CRPTEN_CTRL 0x00004910 -#define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K 0x00030000 -#define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K 0x000c0000 -/* 0x4914 --> 0x4c00 unused */ - -/* Write DMA control registers */ -#define WDMAC_MODE 0x00004c00 -#define WDMAC_MODE_RESET 0x00000001 -#define WDMAC_MODE_ENABLE 0x00000002 -#define WDMAC_MODE_TGTABORT_ENAB 0x00000004 -#define WDMAC_MODE_MSTABORT_ENAB 0x00000008 -#define WDMAC_MODE_PARITYERR_ENAB 0x00000010 -#define WDMAC_MODE_ADDROFLOW_ENAB 0x00000020 -#define WDMAC_MODE_FIFOOFLOW_ENAB 0x00000040 -#define WDMAC_MODE_FIFOURUN_ENAB 0x00000080 -#define WDMAC_MODE_FIFOOREAD_ENAB 0x00000100 -#define WDMAC_MODE_LNGREAD_ENAB 0x00000200 -#define WDMAC_MODE_RX_ACCEL 0x00000400 -#define WDMAC_MODE_STATUS_TAG_FIX 0x20000000 -#define WDMAC_MODE_BURST_ALL_DATA 0xc0000000 -#define WDMAC_STATUS 0x00004c04 -#define WDMAC_STATUS_TGTABORT 0x00000004 -#define WDMAC_STATUS_MSTABORT 0x00000008 -#define WDMAC_STATUS_PARITYERR 0x00000010 -#define WDMAC_STATUS_ADDROFLOW 0x00000020 -#define WDMAC_STATUS_FIFOOFLOW 0x00000040 -#define WDMAC_STATUS_FIFOURUN 0x00000080 -#define WDMAC_STATUS_FIFOOREAD 0x00000100 -#define WDMAC_STATUS_LNGREAD 0x00000200 -/* 0x4c08 --> 0x5000 unused */ - -/* Per-cpu register offsets (arm9) */ -#define CPU_MODE 0x00000000 -#define CPU_MODE_RESET 0x00000001 -#define CPU_MODE_HALT 0x00000400 -#define CPU_STATE 0x00000004 -#define CPU_EVTMASK 0x00000008 -/* 0xc --> 0x1c reserved */ -#define CPU_PC 0x0000001c -#define CPU_INSN 0x00000020 -#define CPU_SPAD_UFLOW 0x00000024 -#define CPU_WDOG_CLEAR 0x00000028 -#define CPU_WDOG_VECTOR 0x0000002c -#define CPU_WDOG_PC 0x00000030 -#define CPU_HW_BP 0x00000034 -/* 0x38 --> 0x44 unused */ -#define CPU_WDOG_SAVED_STATE 0x00000044 -#define CPU_LAST_BRANCH_ADDR 0x00000048 -#define CPU_SPAD_UFLOW_SET 0x0000004c -/* 0x50 --> 0x200 unused */ -#define CPU_R0 0x00000200 -#define CPU_R1 0x00000204 -#define CPU_R2 0x00000208 -#define CPU_R3 0x0000020c -#define CPU_R4 0x00000210 -#define CPU_R5 0x00000214 -#define CPU_R6 0x00000218 -#define CPU_R7 0x0000021c -#define CPU_R8 0x00000220 -#define CPU_R9 0x00000224 -#define CPU_R10 0x00000228 -#define CPU_R11 0x0000022c -#define CPU_R12 0x00000230 -#define CPU_R13 0x00000234 -#define CPU_R14 0x00000238 -#define CPU_R15 0x0000023c -#define CPU_R16 0x00000240 -#define CPU_R17 0x00000244 -#define CPU_R18 0x00000248 -#define CPU_R19 0x0000024c -#define CPU_R20 0x00000250 -#define CPU_R21 0x00000254 -#define CPU_R22 0x00000258 -#define CPU_R23 0x0000025c -#define CPU_R24 0x00000260 -#define CPU_R25 0x00000264 -#define CPU_R26 0x00000268 -#define CPU_R27 0x0000026c -#define CPU_R28 0x00000270 -#define CPU_R29 0x00000274 -#define CPU_R30 0x00000278 -#define CPU_R31 0x0000027c -/* 0x280 --> 0x400 unused */ - -#define RX_CPU_BASE 0x00005000 -#define RX_CPU_MODE 0x00005000 -#define RX_CPU_STATE 0x00005004 -#define RX_CPU_PGMCTR 0x0000501c -#define RX_CPU_HWBKPT 0x00005034 -#define TX_CPU_BASE 0x00005400 -#define TX_CPU_MODE 0x00005400 -#define TX_CPU_STATE 0x00005404 -#define TX_CPU_PGMCTR 0x0000541c - -#define VCPU_STATUS 0x00005100 -#define VCPU_STATUS_INIT_DONE 0x04000000 -#define VCPU_STATUS_DRV_RESET 0x08000000 - -#define VCPU_CFGSHDW 0x00005104 -#define VCPU_CFGSHDW_WOL_ENABLE 0x00000001 -#define VCPU_CFGSHDW_WOL_MAGPKT 0x00000004 -#define VCPU_CFGSHDW_ASPM_DBNC 0x00001000 - -/* Mailboxes */ -#define GRCMBOX_BASE 0x00005600 -#define GRCMBOX_INTERRUPT_0 0x00005800 /* 64-bit */ -#define GRCMBOX_INTERRUPT_1 0x00005808 /* 64-bit */ -#define GRCMBOX_INTERRUPT_2 0x00005810 /* 64-bit */ -#define GRCMBOX_INTERRUPT_3 0x00005818 /* 64-bit */ -#define GRCMBOX_GENERAL_0 0x00005820 /* 64-bit */ -#define GRCMBOX_GENERAL_1 0x00005828 /* 64-bit */ -#define GRCMBOX_GENERAL_2 0x00005830 /* 64-bit */ -#define GRCMBOX_GENERAL_3 0x00005838 /* 64-bit */ -#define GRCMBOX_GENERAL_4 0x00005840 /* 64-bit */ -#define GRCMBOX_GENERAL_5 0x00005848 /* 64-bit */ -#define GRCMBOX_GENERAL_6 0x00005850 /* 64-bit */ -#define GRCMBOX_GENERAL_7 0x00005858 /* 64-bit */ -#define GRCMBOX_RELOAD_STAT 0x00005860 /* 64-bit */ -#define GRCMBOX_RCVSTD_PROD_IDX 0x00005868 /* 64-bit */ -#define GRCMBOX_RCVJUMBO_PROD_IDX 0x00005870 /* 64-bit */ -#define GRCMBOX_RCVMINI_PROD_IDX 0x00005878 /* 64-bit */ -#define GRCMBOX_RCVRET_CON_IDX_0 0x00005880 /* 64-bit */ -#define GRCMBOX_RCVRET_CON_IDX_1 0x00005888 /* 64-bit */ -#define GRCMBOX_RCVRET_CON_IDX_2 0x00005890 /* 64-bit */ -#define GRCMBOX_RCVRET_CON_IDX_3 0x00005898 /* 64-bit */ -#define GRCMBOX_RCVRET_CON_IDX_4 0x000058a0 /* 64-bit */ -#define GRCMBOX_RCVRET_CON_IDX_5 0x000058a8 /* 64-bit */ -#define GRCMBOX_RCVRET_CON_IDX_6 0x000058b0 /* 64-bit */ -#define GRCMBOX_RCVRET_CON_IDX_7 0x000058b8 /* 64-bit */ -#define GRCMBOX_RCVRET_CON_IDX_8 0x000058c0 /* 64-bit */ -#define GRCMBOX_RCVRET_CON_IDX_9 0x000058c8 /* 64-bit */ -#define GRCMBOX_RCVRET_CON_IDX_10 0x000058d0 /* 64-bit */ -#define GRCMBOX_RCVRET_CON_IDX_11 0x000058d8 /* 64-bit */ -#define GRCMBOX_RCVRET_CON_IDX_12 0x000058e0 /* 64-bit */ -#define GRCMBOX_RCVRET_CON_IDX_13 0x000058e8 /* 64-bit */ -#define GRCMBOX_RCVRET_CON_IDX_14 0x000058f0 /* 64-bit */ -#define GRCMBOX_RCVRET_CON_IDX_15 0x000058f8 /* 64-bit */ -#define GRCMBOX_SNDHOST_PROD_IDX_0 0x00005900 /* 64-bit */ -#define GRCMBOX_SNDHOST_PROD_IDX_1 0x00005908 /* 64-bit */ -#define GRCMBOX_SNDHOST_PROD_IDX_2 0x00005910 /* 64-bit */ -#define GRCMBOX_SNDHOST_PROD_IDX_3 0x00005918 /* 64-bit */ -#define GRCMBOX_SNDHOST_PROD_IDX_4 0x00005920 /* 64-bit */ -#define GRCMBOX_SNDHOST_PROD_IDX_5 0x00005928 /* 64-bit */ -#define GRCMBOX_SNDHOST_PROD_IDX_6 0x00005930 /* 64-bit */ -#define GRCMBOX_SNDHOST_PROD_IDX_7 0x00005938 /* 64-bit */ -#define GRCMBOX_SNDHOST_PROD_IDX_8 0x00005940 /* 64-bit */ -#define GRCMBOX_SNDHOST_PROD_IDX_9 0x00005948 /* 64-bit */ -#define GRCMBOX_SNDHOST_PROD_IDX_10 0x00005950 /* 64-bit */ -#define GRCMBOX_SNDHOST_PROD_IDX_11 0x00005958 /* 64-bit */ -#define GRCMBOX_SNDHOST_PROD_IDX_12 0x00005960 /* 64-bit */ -#define GRCMBOX_SNDHOST_PROD_IDX_13 0x00005968 /* 64-bit */ -#define GRCMBOX_SNDHOST_PROD_IDX_14 0x00005970 /* 64-bit */ -#define GRCMBOX_SNDHOST_PROD_IDX_15 0x00005978 /* 64-bit */ -#define GRCMBOX_SNDNIC_PROD_IDX_0 0x00005980 /* 64-bit */ -#define GRCMBOX_SNDNIC_PROD_IDX_1 0x00005988 /* 64-bit */ -#define GRCMBOX_SNDNIC_PROD_IDX_2 0x00005990 /* 64-bit */ -#define GRCMBOX_SNDNIC_PROD_IDX_3 0x00005998 /* 64-bit */ -#define GRCMBOX_SNDNIC_PROD_IDX_4 0x000059a0 /* 64-bit */ -#define GRCMBOX_SNDNIC_PROD_IDX_5 0x000059a8 /* 64-bit */ -#define GRCMBOX_SNDNIC_PROD_IDX_6 0x000059b0 /* 64-bit */ -#define GRCMBOX_SNDNIC_PROD_IDX_7 0x000059b8 /* 64-bit */ -#define GRCMBOX_SNDNIC_PROD_IDX_8 0x000059c0 /* 64-bit */ -#define GRCMBOX_SNDNIC_PROD_IDX_9 0x000059c8 /* 64-bit */ -#define GRCMBOX_SNDNIC_PROD_IDX_10 0x000059d0 /* 64-bit */ -#define GRCMBOX_SNDNIC_PROD_IDX_11 0x000059d8 /* 64-bit */ -#define GRCMBOX_SNDNIC_PROD_IDX_12 0x000059e0 /* 64-bit */ -#define GRCMBOX_SNDNIC_PROD_IDX_13 0x000059e8 /* 64-bit */ -#define GRCMBOX_SNDNIC_PROD_IDX_14 0x000059f0 /* 64-bit */ -#define GRCMBOX_SNDNIC_PROD_IDX_15 0x000059f8 /* 64-bit */ -#define GRCMBOX_HIGH_PRIO_EV_VECTOR 0x00005a00 -#define GRCMBOX_HIGH_PRIO_EV_MASK 0x00005a04 -#define GRCMBOX_LOW_PRIO_EV_VEC 0x00005a08 -#define GRCMBOX_LOW_PRIO_EV_MASK 0x00005a0c -/* 0x5a10 --> 0x5c00 */ - -/* Flow Through queues */ -#define FTQ_RESET 0x00005c00 -/* 0x5c04 --> 0x5c10 unused */ -#define FTQ_DMA_NORM_READ_CTL 0x00005c10 -#define FTQ_DMA_NORM_READ_FULL_CNT 0x00005c14 -#define FTQ_DMA_NORM_READ_FIFO_ENQDEQ 0x00005c18 -#define FTQ_DMA_NORM_READ_WRITE_PEEK 0x00005c1c -#define FTQ_DMA_HIGH_READ_CTL 0x00005c20 -#define FTQ_DMA_HIGH_READ_FULL_CNT 0x00005c24 -#define FTQ_DMA_HIGH_READ_FIFO_ENQDEQ 0x00005c28 -#define FTQ_DMA_HIGH_READ_WRITE_PEEK 0x00005c2c -#define FTQ_DMA_COMP_DISC_CTL 0x00005c30 -#define FTQ_DMA_COMP_DISC_FULL_CNT 0x00005c34 -#define FTQ_DMA_COMP_DISC_FIFO_ENQDEQ 0x00005c38 -#define FTQ_DMA_COMP_DISC_WRITE_PEEK 0x00005c3c -#define FTQ_SEND_BD_COMP_CTL 0x00005c40 -#define FTQ_SEND_BD_COMP_FULL_CNT 0x00005c44 -#define FTQ_SEND_BD_COMP_FIFO_ENQDEQ 0x00005c48 -#define FTQ_SEND_BD_COMP_WRITE_PEEK 0x00005c4c -#define FTQ_SEND_DATA_INIT_CTL 0x00005c50 -#define FTQ_SEND_DATA_INIT_FULL_CNT 0x00005c54 -#define FTQ_SEND_DATA_INIT_FIFO_ENQDEQ 0x00005c58 -#define FTQ_SEND_DATA_INIT_WRITE_PEEK 0x00005c5c -#define FTQ_DMA_NORM_WRITE_CTL 0x00005c60 -#define FTQ_DMA_NORM_WRITE_FULL_CNT 0x00005c64 -#define FTQ_DMA_NORM_WRITE_FIFO_ENQDEQ 0x00005c68 -#define FTQ_DMA_NORM_WRITE_WRITE_PEEK 0x00005c6c -#define FTQ_DMA_HIGH_WRITE_CTL 0x00005c70 -#define FTQ_DMA_HIGH_WRITE_FULL_CNT 0x00005c74 -#define FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ 0x00005c78 -#define FTQ_DMA_HIGH_WRITE_WRITE_PEEK 0x00005c7c -#define FTQ_SWTYPE1_CTL 0x00005c80 -#define FTQ_SWTYPE1_FULL_CNT 0x00005c84 -#define FTQ_SWTYPE1_FIFO_ENQDEQ 0x00005c88 -#define FTQ_SWTYPE1_WRITE_PEEK 0x00005c8c -#define FTQ_SEND_DATA_COMP_CTL 0x00005c90 -#define FTQ_SEND_DATA_COMP_FULL_CNT 0x00005c94 -#define FTQ_SEND_DATA_COMP_FIFO_ENQDEQ 0x00005c98 -#define FTQ_SEND_DATA_COMP_WRITE_PEEK 0x00005c9c -#define FTQ_HOST_COAL_CTL 0x00005ca0 -#define FTQ_HOST_COAL_FULL_CNT 0x00005ca4 -#define FTQ_HOST_COAL_FIFO_ENQDEQ 0x00005ca8 -#define FTQ_HOST_COAL_WRITE_PEEK 0x00005cac -#define FTQ_MAC_TX_CTL 0x00005cb0 -#define FTQ_MAC_TX_FULL_CNT 0x00005cb4 -#define FTQ_MAC_TX_FIFO_ENQDEQ 0x00005cb8 -#define FTQ_MAC_TX_WRITE_PEEK 0x00005cbc -#define FTQ_MB_FREE_CTL 0x00005cc0 -#define FTQ_MB_FREE_FULL_CNT 0x00005cc4 -#define FTQ_MB_FREE_FIFO_ENQDEQ 0x00005cc8 -#define FTQ_MB_FREE_WRITE_PEEK 0x00005ccc -#define FTQ_RCVBD_COMP_CTL 0x00005cd0 -#define FTQ_RCVBD_COMP_FULL_CNT 0x00005cd4 -#define FTQ_RCVBD_COMP_FIFO_ENQDEQ 0x00005cd8 -#define FTQ_RCVBD_COMP_WRITE_PEEK 0x00005cdc -#define FTQ_RCVLST_PLMT_CTL 0x00005ce0 -#define FTQ_RCVLST_PLMT_FULL_CNT 0x00005ce4 -#define FTQ_RCVLST_PLMT_FIFO_ENQDEQ 0x00005ce8 -#define FTQ_RCVLST_PLMT_WRITE_PEEK 0x00005cec -#define FTQ_RCVDATA_INI_CTL 0x00005cf0 -#define FTQ_RCVDATA_INI_FULL_CNT 0x00005cf4 -#define FTQ_RCVDATA_INI_FIFO_ENQDEQ 0x00005cf8 -#define FTQ_RCVDATA_INI_WRITE_PEEK 0x00005cfc -#define FTQ_RCVDATA_COMP_CTL 0x00005d00 -#define FTQ_RCVDATA_COMP_FULL_CNT 0x00005d04 -#define FTQ_RCVDATA_COMP_FIFO_ENQDEQ 0x00005d08 -#define FTQ_RCVDATA_COMP_WRITE_PEEK 0x00005d0c -#define FTQ_SWTYPE2_CTL 0x00005d10 -#define FTQ_SWTYPE2_FULL_CNT 0x00005d14 -#define FTQ_SWTYPE2_FIFO_ENQDEQ 0x00005d18 -#define FTQ_SWTYPE2_WRITE_PEEK 0x00005d1c -/* 0x5d20 --> 0x6000 unused */ - -/* Message signaled interrupt registers */ -#define MSGINT_MODE 0x00006000 -#define MSGINT_MODE_RESET 0x00000001 -#define MSGINT_MODE_ENABLE 0x00000002 -#define MSGINT_MODE_ONE_SHOT_DISABLE 0x00000020 -#define MSGINT_MODE_MULTIVEC_EN 0x00000080 -#define MSGINT_STATUS 0x00006004 -#define MSGINT_STATUS_MSI_REQ 0x00000001 -#define MSGINT_FIFO 0x00006008 -/* 0x600c --> 0x6400 unused */ - -/* DMA completion registers */ -#define DMAC_MODE 0x00006400 -#define DMAC_MODE_RESET 0x00000001 -#define DMAC_MODE_ENABLE 0x00000002 -/* 0x6404 --> 0x6800 unused */ - -/* GRC registers */ -#define GRC_MODE 0x00006800 -#define GRC_MODE_UPD_ON_COAL 0x00000001 -#define GRC_MODE_BSWAP_NONFRM_DATA 0x00000002 -#define GRC_MODE_WSWAP_NONFRM_DATA 0x00000004 -#define GRC_MODE_BSWAP_DATA 0x00000010 -#define GRC_MODE_WSWAP_DATA 0x00000020 -#define GRC_MODE_BYTE_SWAP_B2HRX_DATA 0x00000040 -#define GRC_MODE_WORD_SWAP_B2HRX_DATA 0x00000080 -#define GRC_MODE_SPLITHDR 0x00000100 -#define GRC_MODE_NOFRM_CRACKING 0x00000200 -#define GRC_MODE_INCL_CRC 0x00000400 -#define GRC_MODE_ALLOW_BAD_FRMS 0x00000800 -#define GRC_MODE_NOIRQ_ON_SENDS 0x00002000 -#define GRC_MODE_NOIRQ_ON_RCV 0x00004000 -#define GRC_MODE_FORCE_PCI32BIT 0x00008000 -#define GRC_MODE_B2HRX_ENABLE 0x00008000 -#define GRC_MODE_HOST_STACKUP 0x00010000 -#define GRC_MODE_HOST_SENDBDS 0x00020000 -#define GRC_MODE_HTX2B_ENABLE 0x00040000 -#define GRC_MODE_NO_TX_PHDR_CSUM 0x00100000 -#define GRC_MODE_NVRAM_WR_ENABLE 0x00200000 -#define GRC_MODE_PCIE_TL_SEL 0x00000000 -#define GRC_MODE_PCIE_PL_SEL 0x00400000 -#define GRC_MODE_NO_RX_PHDR_CSUM 0x00800000 -#define GRC_MODE_IRQ_ON_TX_CPU_ATTN 0x01000000 -#define GRC_MODE_IRQ_ON_RX_CPU_ATTN 0x02000000 -#define GRC_MODE_IRQ_ON_MAC_ATTN 0x04000000 -#define GRC_MODE_IRQ_ON_DMA_ATTN 0x08000000 -#define GRC_MODE_IRQ_ON_FLOW_ATTN 0x10000000 -#define GRC_MODE_4X_NIC_SEND_RINGS 0x20000000 -#define GRC_MODE_PCIE_DL_SEL 0x20000000 -#define GRC_MODE_MCAST_FRM_ENABLE 0x40000000 -#define GRC_MODE_PCIE_HI_1K_EN 0x80000000 -#define GRC_MODE_PCIE_PORT_MASK (GRC_MODE_PCIE_TL_SEL | \ - GRC_MODE_PCIE_PL_SEL | \ - GRC_MODE_PCIE_DL_SEL | \ - GRC_MODE_PCIE_HI_1K_EN) -#define GRC_MISC_CFG 0x00006804 -#define GRC_MISC_CFG_CORECLK_RESET 0x00000001 -#define GRC_MISC_CFG_PRESCALAR_MASK 0x000000fe -#define GRC_MISC_CFG_PRESCALAR_SHIFT 1 -#define GRC_MISC_CFG_BOARD_ID_MASK 0x0001e000 -#define GRC_MISC_CFG_BOARD_ID_5700 0x0001e000 -#define GRC_MISC_CFG_BOARD_ID_5701 0x00000000 -#define GRC_MISC_CFG_BOARD_ID_5702FE 0x00004000 -#define GRC_MISC_CFG_BOARD_ID_5703 0x00000000 -#define GRC_MISC_CFG_BOARD_ID_5703S 0x00002000 -#define GRC_MISC_CFG_BOARD_ID_5704 0x00000000 -#define GRC_MISC_CFG_BOARD_ID_5704CIOBE 0x00004000 -#define GRC_MISC_CFG_BOARD_ID_5704_A2 0x00008000 -#define GRC_MISC_CFG_BOARD_ID_5788 0x00010000 -#define GRC_MISC_CFG_BOARD_ID_5788M 0x00018000 -#define GRC_MISC_CFG_BOARD_ID_AC91002A1 0x00018000 -#define GRC_MISC_CFG_EPHY_IDDQ 0x00200000 -#define GRC_MISC_CFG_KEEP_GPHY_POWER 0x04000000 -#define GRC_LOCAL_CTRL 0x00006808 -#define GRC_LCLCTRL_INT_ACTIVE 0x00000001 -#define GRC_LCLCTRL_CLEARINT 0x00000002 -#define GRC_LCLCTRL_SETINT 0x00000004 -#define GRC_LCLCTRL_INT_ON_ATTN 0x00000008 -#define GRC_LCLCTRL_GPIO_UART_SEL 0x00000010 /* 5755 only */ -#define GRC_LCLCTRL_USE_SIG_DETECT 0x00000010 /* 5714/5780 only */ -#define GRC_LCLCTRL_USE_EXT_SIG_DETECT 0x00000020 /* 5714/5780 only */ -#define GRC_LCLCTRL_GPIO_INPUT3 0x00000020 -#define GRC_LCLCTRL_GPIO_OE3 0x00000040 -#define GRC_LCLCTRL_GPIO_OUTPUT3 0x00000080 -#define GRC_LCLCTRL_GPIO_INPUT0 0x00000100 -#define GRC_LCLCTRL_GPIO_INPUT1 0x00000200 -#define GRC_LCLCTRL_GPIO_INPUT2 0x00000400 -#define GRC_LCLCTRL_GPIO_OE0 0x00000800 -#define GRC_LCLCTRL_GPIO_OE1 0x00001000 -#define GRC_LCLCTRL_GPIO_OE2 0x00002000 -#define GRC_LCLCTRL_GPIO_OUTPUT0 0x00004000 -#define GRC_LCLCTRL_GPIO_OUTPUT1 0x00008000 -#define GRC_LCLCTRL_GPIO_OUTPUT2 0x00010000 -#define GRC_LCLCTRL_EXTMEM_ENABLE 0x00020000 -#define GRC_LCLCTRL_MEMSZ_MASK 0x001c0000 -#define GRC_LCLCTRL_MEMSZ_256K 0x00000000 -#define GRC_LCLCTRL_MEMSZ_512K 0x00040000 -#define GRC_LCLCTRL_MEMSZ_1M 0x00080000 -#define GRC_LCLCTRL_MEMSZ_2M 0x000c0000 -#define GRC_LCLCTRL_MEMSZ_4M 0x00100000 -#define GRC_LCLCTRL_MEMSZ_8M 0x00140000 -#define GRC_LCLCTRL_MEMSZ_16M 0x00180000 -#define GRC_LCLCTRL_BANK_SELECT 0x00200000 -#define GRC_LCLCTRL_SSRAM_TYPE 0x00400000 -#define GRC_LCLCTRL_AUTO_SEEPROM 0x01000000 -#define GRC_TIMER 0x0000680c -#define GRC_RX_CPU_EVENT 0x00006810 -#define GRC_RX_CPU_DRIVER_EVENT 0x00004000 -#define GRC_RX_TIMER_REF 0x00006814 -#define GRC_RX_CPU_SEM 0x00006818 -#define GRC_REMOTE_RX_CPU_ATTN 0x0000681c -#define GRC_TX_CPU_EVENT 0x00006820 -#define GRC_TX_TIMER_REF 0x00006824 -#define GRC_TX_CPU_SEM 0x00006828 -#define GRC_REMOTE_TX_CPU_ATTN 0x0000682c -#define GRC_MEM_POWER_UP 0x00006830 /* 64-bit */ -#define GRC_EEPROM_ADDR 0x00006838 -#define EEPROM_ADDR_WRITE 0x00000000 -#define EEPROM_ADDR_READ 0x80000000 -#define EEPROM_ADDR_COMPLETE 0x40000000 -#define EEPROM_ADDR_FSM_RESET 0x20000000 -#define EEPROM_ADDR_DEVID_MASK 0x1c000000 -#define EEPROM_ADDR_DEVID_SHIFT 26 -#define EEPROM_ADDR_START 0x02000000 -#define EEPROM_ADDR_CLKPERD_SHIFT 16 -#define EEPROM_ADDR_ADDR_MASK 0x0000ffff -#define EEPROM_ADDR_ADDR_SHIFT 0 -#define EEPROM_DEFAULT_CLOCK_PERIOD 0x60 -#define EEPROM_CHIP_SIZE (64 * 1024) -#define GRC_EEPROM_DATA 0x0000683c -#define GRC_EEPROM_CTRL 0x00006840 -#define GRC_MDI_CTRL 0x00006844 -#define GRC_SEEPROM_DELAY 0x00006848 -/* 0x684c --> 0x6890 unused */ -#define GRC_VCPU_EXT_CTRL 0x00006890 -#define GRC_VCPU_EXT_CTRL_HALT_CPU 0x00400000 -#define GRC_VCPU_EXT_CTRL_DISABLE_WOL 0x20000000 -#define GRC_FASTBOOT_PC 0x00006894 /* 5752, 5755, 5787 */ - -/* 0x6c00 --> 0x7000 unused */ - -/* NVRAM Control registers */ -#define NVRAM_CMD 0x00007000 -#define NVRAM_CMD_RESET 0x00000001 -#define NVRAM_CMD_DONE 0x00000008 -#define NVRAM_CMD_GO 0x00000010 -#define NVRAM_CMD_WR 0x00000020 -#define NVRAM_CMD_RD 0x00000000 -#define NVRAM_CMD_ERASE 0x00000040 -#define NVRAM_CMD_FIRST 0x00000080 -#define NVRAM_CMD_LAST 0x00000100 -#define NVRAM_CMD_WREN 0x00010000 -#define NVRAM_CMD_WRDI 0x00020000 -#define NVRAM_STAT 0x00007004 -#define NVRAM_WRDATA 0x00007008 -#define NVRAM_ADDR 0x0000700c -#define NVRAM_ADDR_MSK 0x00ffffff -#define NVRAM_RDDATA 0x00007010 -#define NVRAM_CFG1 0x00007014 -#define NVRAM_CFG1_FLASHIF_ENAB 0x00000001 -#define NVRAM_CFG1_BUFFERED_MODE 0x00000002 -#define NVRAM_CFG1_PASS_THRU 0x00000004 -#define NVRAM_CFG1_STATUS_BITS 0x00000070 -#define NVRAM_CFG1_BIT_BANG 0x00000008 -#define NVRAM_CFG1_FLASH_SIZE 0x02000000 -#define NVRAM_CFG1_COMPAT_BYPASS 0x80000000 -#define NVRAM_CFG1_VENDOR_MASK 0x03000003 -#define FLASH_VENDOR_ATMEL_EEPROM 0x02000000 -#define FLASH_VENDOR_ATMEL_FLASH_BUFFERED 0x02000003 -#define FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED 0x00000003 -#define FLASH_VENDOR_ST 0x03000001 -#define FLASH_VENDOR_SAIFUN 0x01000003 -#define FLASH_VENDOR_SST_SMALL 0x00000001 -#define FLASH_VENDOR_SST_LARGE 0x02000001 -#define NVRAM_CFG1_5752VENDOR_MASK 0x03c00003 -#define FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ 0x00000000 -#define FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ 0x02000000 -#define FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED 0x02000003 -#define FLASH_5752VENDOR_ST_M45PE10 0x02400000 -#define FLASH_5752VENDOR_ST_M45PE20 0x02400002 -#define FLASH_5752VENDOR_ST_M45PE40 0x02400001 -#define FLASH_5755VENDOR_ATMEL_FLASH_1 0x03400001 -#define FLASH_5755VENDOR_ATMEL_FLASH_2 0x03400002 -#define FLASH_5755VENDOR_ATMEL_FLASH_3 0x03400000 -#define FLASH_5755VENDOR_ATMEL_FLASH_4 0x00000003 -#define FLASH_5755VENDOR_ATMEL_FLASH_5 0x02000003 -#define FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ 0x03c00003 -#define FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ 0x03c00002 -#define FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ 0x03000003 -#define FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ 0x03000002 -#define FLASH_5787VENDOR_MICRO_EEPROM_64KHZ 0x03000000 -#define FLASH_5787VENDOR_MICRO_EEPROM_376KHZ 0x02000000 -#define FLASH_5761VENDOR_ATMEL_MDB021D 0x00800003 -#define FLASH_5761VENDOR_ATMEL_MDB041D 0x00800000 -#define FLASH_5761VENDOR_ATMEL_MDB081D 0x00800002 -#define FLASH_5761VENDOR_ATMEL_MDB161D 0x00800001 -#define FLASH_5761VENDOR_ATMEL_ADB021D 0x00000003 -#define FLASH_5761VENDOR_ATMEL_ADB041D 0x00000000 -#define FLASH_5761VENDOR_ATMEL_ADB081D 0x00000002 -#define FLASH_5761VENDOR_ATMEL_ADB161D 0x00000001 -#define FLASH_5761VENDOR_ST_M_M45PE20 0x02800001 -#define FLASH_5761VENDOR_ST_M_M45PE40 0x02800000 -#define FLASH_5761VENDOR_ST_M_M45PE80 0x02800002 -#define FLASH_5761VENDOR_ST_M_M45PE16 0x02800003 -#define FLASH_5761VENDOR_ST_A_M45PE20 0x02000001 -#define FLASH_5761VENDOR_ST_A_M45PE40 0x02000000 -#define FLASH_5761VENDOR_ST_A_M45PE80 0x02000002 -#define FLASH_5761VENDOR_ST_A_M45PE16 0x02000003 -#define FLASH_57780VENDOR_ATMEL_AT45DB011D 0x00400000 -#define FLASH_57780VENDOR_ATMEL_AT45DB011B 0x03400000 -#define FLASH_57780VENDOR_ATMEL_AT45DB021D 0x00400002 -#define FLASH_57780VENDOR_ATMEL_AT45DB021B 0x03400002 -#define FLASH_57780VENDOR_ATMEL_AT45DB041D 0x00400001 -#define FLASH_57780VENDOR_ATMEL_AT45DB041B 0x03400001 -#define FLASH_5717VENDOR_ATMEL_EEPROM 0x02000001 -#define FLASH_5717VENDOR_MICRO_EEPROM 0x02000003 -#define FLASH_5717VENDOR_ATMEL_MDB011D 0x01000001 -#define FLASH_5717VENDOR_ATMEL_MDB021D 0x01000003 -#define FLASH_5717VENDOR_ST_M_M25PE10 0x02000000 -#define FLASH_5717VENDOR_ST_M_M25PE20 0x02000002 -#define FLASH_5717VENDOR_ST_M_M45PE10 0x00000001 -#define FLASH_5717VENDOR_ST_M_M45PE20 0x00000003 -#define FLASH_5717VENDOR_ATMEL_ADB011B 0x01400000 -#define FLASH_5717VENDOR_ATMEL_ADB021B 0x01400002 -#define FLASH_5717VENDOR_ATMEL_ADB011D 0x01400001 -#define FLASH_5717VENDOR_ATMEL_ADB021D 0x01400003 -#define FLASH_5717VENDOR_ST_A_M25PE10 0x02400000 -#define FLASH_5717VENDOR_ST_A_M25PE20 0x02400002 -#define FLASH_5717VENDOR_ST_A_M45PE10 0x02400001 -#define FLASH_5717VENDOR_ST_A_M45PE20 0x02400003 -#define FLASH_5717VENDOR_ATMEL_45USPT 0x03400000 -#define FLASH_5717VENDOR_ST_25USPT 0x03400002 -#define FLASH_5717VENDOR_ST_45USPT 0x03400001 -#define FLASH_5720_EEPROM_HD 0x00000001 -#define FLASH_5720_EEPROM_LD 0x00000003 -#define FLASH_5720VENDOR_M_ATMEL_DB011D 0x01000000 -#define FLASH_5720VENDOR_M_ATMEL_DB021D 0x01000002 -#define FLASH_5720VENDOR_M_ATMEL_DB041D 0x01000001 -#define FLASH_5720VENDOR_M_ATMEL_DB081D 0x01000003 -#define FLASH_5720VENDOR_M_ST_M25PE10 0x02000000 -#define FLASH_5720VENDOR_M_ST_M25PE20 0x02000002 -#define FLASH_5720VENDOR_M_ST_M25PE40 0x02000001 -#define FLASH_5720VENDOR_M_ST_M25PE80 0x02000003 -#define FLASH_5720VENDOR_M_ST_M45PE10 0x03000000 -#define FLASH_5720VENDOR_M_ST_M45PE20 0x03000002 -#define FLASH_5720VENDOR_M_ST_M45PE40 0x03000001 -#define FLASH_5720VENDOR_M_ST_M45PE80 0x03000003 -#define FLASH_5720VENDOR_A_ATMEL_DB011B 0x01800000 -#define FLASH_5720VENDOR_A_ATMEL_DB021B 0x01800002 -#define FLASH_5720VENDOR_A_ATMEL_DB041B 0x01800001 -#define FLASH_5720VENDOR_A_ATMEL_DB011D 0x01c00000 -#define FLASH_5720VENDOR_A_ATMEL_DB021D 0x01c00002 -#define FLASH_5720VENDOR_A_ATMEL_DB041D 0x01c00001 -#define FLASH_5720VENDOR_A_ATMEL_DB081D 0x01c00003 -#define FLASH_5720VENDOR_A_ST_M25PE10 0x02800000 -#define FLASH_5720VENDOR_A_ST_M25PE20 0x02800002 -#define FLASH_5720VENDOR_A_ST_M25PE40 0x02800001 -#define FLASH_5720VENDOR_A_ST_M25PE80 0x02800003 -#define FLASH_5720VENDOR_A_ST_M45PE10 0x02c00000 -#define FLASH_5720VENDOR_A_ST_M45PE20 0x02c00002 -#define FLASH_5720VENDOR_A_ST_M45PE40 0x02c00001 -#define FLASH_5720VENDOR_A_ST_M45PE80 0x02c00003 -#define FLASH_5720VENDOR_ATMEL_45USPT 0x03c00000 -#define FLASH_5720VENDOR_ST_25USPT 0x03c00002 -#define FLASH_5720VENDOR_ST_45USPT 0x03c00001 -#define NVRAM_CFG1_5752PAGE_SIZE_MASK 0x70000000 -#define FLASH_5752PAGE_SIZE_256 0x00000000 -#define FLASH_5752PAGE_SIZE_512 0x10000000 -#define FLASH_5752PAGE_SIZE_1K 0x20000000 -#define FLASH_5752PAGE_SIZE_2K 0x30000000 -#define FLASH_5752PAGE_SIZE_4K 0x40000000 -#define FLASH_5752PAGE_SIZE_264 0x50000000 -#define FLASH_5752PAGE_SIZE_528 0x60000000 -#define NVRAM_CFG2 0x00007018 -#define NVRAM_CFG3 0x0000701c -#define NVRAM_SWARB 0x00007020 -#define SWARB_REQ_SET0 0x00000001 -#define SWARB_REQ_SET1 0x00000002 -#define SWARB_REQ_SET2 0x00000004 -#define SWARB_REQ_SET3 0x00000008 -#define SWARB_REQ_CLR0 0x00000010 -#define SWARB_REQ_CLR1 0x00000020 -#define SWARB_REQ_CLR2 0x00000040 -#define SWARB_REQ_CLR3 0x00000080 -#define SWARB_GNT0 0x00000100 -#define SWARB_GNT1 0x00000200 -#define SWARB_GNT2 0x00000400 -#define SWARB_GNT3 0x00000800 -#define SWARB_REQ0 0x00001000 -#define SWARB_REQ1 0x00002000 -#define SWARB_REQ2 0x00004000 -#define SWARB_REQ3 0x00008000 -#define NVRAM_ACCESS 0x00007024 -#define ACCESS_ENABLE 0x00000001 -#define ACCESS_WR_ENABLE 0x00000002 -#define NVRAM_WRITE1 0x00007028 -/* 0x702c unused */ - -#define NVRAM_ADDR_LOCKOUT 0x00007030 -/* 0x7034 --> 0x7500 unused */ - -#define OTP_MODE 0x00007500 -#define OTP_MODE_OTP_THRU_GRC 0x00000001 -#define OTP_CTRL 0x00007504 -#define OTP_CTRL_OTP_PROG_ENABLE 0x00200000 -#define OTP_CTRL_OTP_CMD_READ 0x00000000 -#define OTP_CTRL_OTP_CMD_INIT 0x00000008 -#define OTP_CTRL_OTP_CMD_START 0x00000001 -#define OTP_STATUS 0x00007508 -#define OTP_STATUS_CMD_DONE 0x00000001 -#define OTP_ADDRESS 0x0000750c -#define OTP_ADDRESS_MAGIC1 0x000000a0 -#define OTP_ADDRESS_MAGIC2 0x00000080 -/* 0x7510 unused */ - -#define OTP_READ_DATA 0x00007514 -/* 0x7518 --> 0x7c04 unused */ - -#define PCIE_TRANSACTION_CFG 0x00007c04 -#define PCIE_TRANS_CFG_1SHOT_MSI 0x20000000 -#define PCIE_TRANS_CFG_LOM 0x00000020 -/* 0x7c08 --> 0x7d28 unused */ - -#define PCIE_PWR_MGMT_THRESH 0x00007d28 -#define PCIE_PWR_MGMT_L1_THRESH_MSK 0x0000ff00 -#define PCIE_PWR_MGMT_L1_THRESH_4MS 0x0000ff00 -#define PCIE_PWR_MGMT_EXT_ASPM_TMR_EN 0x01000000 -/* 0x7d2c --> 0x7d54 unused */ - -#define TG3_PCIE_LNKCTL 0x00007d54 -#define TG3_PCIE_LNKCTL_L1_PLL_PD_EN 0x00000008 -#define TG3_PCIE_LNKCTL_L1_PLL_PD_DIS 0x00000080 -/* 0x7d58 --> 0x7e70 unused */ - -#define TG3_PCIE_PHY_TSTCTL 0x00007e2c -#define TG3_PCIE_PHY_TSTCTL_PCIE10 0x00000040 -#define TG3_PCIE_PHY_TSTCTL_PSCRAM 0x00000020 - -#define TG3_PCIE_EIDLE_DELAY 0x00007e70 -#define TG3_PCIE_EIDLE_DELAY_MASK 0x0000001f -#define TG3_PCIE_EIDLE_DELAY_13_CLKS 0x0000000c -/* 0x7e74 --> 0x8000 unused */ - - -/* Alternate PCIE definitions */ -#define TG3_PCIE_TLDLPL_PORT 0x00007c00 -#define TG3_PCIE_DL_LO_FTSMAX 0x0000000c -#define TG3_PCIE_DL_LO_FTSMAX_MSK 0x000000ff -#define TG3_PCIE_DL_LO_FTSMAX_VAL 0x0000002c -#define TG3_PCIE_PL_LO_PHYCTL1 0x00000004 -#define TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN 0x00001000 -#define TG3_PCIE_PL_LO_PHYCTL5 0x00000014 -#define TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ 0x80000000 - -#define TG3_REG_BLK_SIZE 0x00008000 - -/* OTP bit definitions */ -#define TG3_OTP_AGCTGT_MASK 0x000000e0 -#define TG3_OTP_AGCTGT_SHIFT 1 -#define TG3_OTP_HPFFLTR_MASK 0x00000300 -#define TG3_OTP_HPFFLTR_SHIFT 1 -#define TG3_OTP_HPFOVER_MASK 0x00000400 -#define TG3_OTP_HPFOVER_SHIFT 1 -#define TG3_OTP_LPFDIS_MASK 0x00000800 -#define TG3_OTP_LPFDIS_SHIFT 11 -#define TG3_OTP_VDAC_MASK 0xff000000 -#define TG3_OTP_VDAC_SHIFT 24 -#define TG3_OTP_10BTAMP_MASK 0x0000f000 -#define TG3_OTP_10BTAMP_SHIFT 8 -#define TG3_OTP_ROFF_MASK 0x00e00000 -#define TG3_OTP_ROFF_SHIFT 11 -#define TG3_OTP_RCOFF_MASK 0x001c0000 -#define TG3_OTP_RCOFF_SHIFT 16 - -#define TG3_OTP_DEFAULT 0x286c1640 - - -/* Hardware Legacy NVRAM layout */ -#define TG3_NVM_VPD_OFF 0x100 -#define TG3_NVM_VPD_LEN 256 - -/* Hardware Selfboot NVRAM layout */ -#define TG3_NVM_HWSB_CFG1 0x00000004 -#define TG3_NVM_HWSB_CFG1_MAJMSK 0xf8000000 -#define TG3_NVM_HWSB_CFG1_MAJSFT 27 -#define TG3_NVM_HWSB_CFG1_MINMSK 0x07c00000 -#define TG3_NVM_HWSB_CFG1_MINSFT 22 - -#define TG3_EEPROM_MAGIC 0x669955aa -#define TG3_EEPROM_MAGIC_FW 0xa5000000 -#define TG3_EEPROM_MAGIC_FW_MSK 0xff000000 -#define TG3_EEPROM_SB_FORMAT_MASK 0x00e00000 -#define TG3_EEPROM_SB_FORMAT_1 0x00200000 -#define TG3_EEPROM_SB_REVISION_MASK 0x001f0000 -#define TG3_EEPROM_SB_REVISION_0 0x00000000 -#define TG3_EEPROM_SB_REVISION_2 0x00020000 -#define TG3_EEPROM_SB_REVISION_3 0x00030000 -#define TG3_EEPROM_SB_REVISION_4 0x00040000 -#define TG3_EEPROM_SB_REVISION_5 0x00050000 -#define TG3_EEPROM_SB_REVISION_6 0x00060000 -#define TG3_EEPROM_MAGIC_HW 0xabcd -#define TG3_EEPROM_MAGIC_HW_MSK 0xffff - -#define TG3_NVM_DIR_START 0x18 -#define TG3_NVM_DIR_END 0x78 -#define TG3_NVM_DIRENT_SIZE 0xc -#define TG3_NVM_DIRTYPE_SHIFT 24 -#define TG3_NVM_DIRTYPE_LENMSK 0x003fffff -#define TG3_NVM_DIRTYPE_ASFINI 1 -#define TG3_NVM_DIRTYPE_EXTVPD 20 -#define TG3_NVM_PTREV_BCVER 0x94 -#define TG3_NVM_BCVER_MAJMSK 0x0000ff00 -#define TG3_NVM_BCVER_MAJSFT 8 -#define TG3_NVM_BCVER_MINMSK 0x000000ff - -#define TG3_EEPROM_SB_F1R0_EDH_OFF 0x10 -#define TG3_EEPROM_SB_F1R2_EDH_OFF 0x14 -#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10 -#define TG3_EEPROM_SB_F1R3_EDH_OFF 0x18 -#define TG3_EEPROM_SB_F1R4_EDH_OFF 0x1c -#define TG3_EEPROM_SB_F1R5_EDH_OFF 0x20 -#define TG3_EEPROM_SB_F1R6_EDH_OFF 0x4c -#define TG3_EEPROM_SB_EDH_MAJ_MASK 0x00000700 -#define TG3_EEPROM_SB_EDH_MAJ_SHFT 8 -#define TG3_EEPROM_SB_EDH_MIN_MASK 0x000000ff -#define TG3_EEPROM_SB_EDH_BLD_MASK 0x0000f800 -#define TG3_EEPROM_SB_EDH_BLD_SHFT 11 - - -/* 32K Window into NIC internal memory */ -#define NIC_SRAM_WIN_BASE 0x00008000 - -/* Offsets into first 32k of NIC internal memory. */ -#define NIC_SRAM_PAGE_ZERO 0x00000000 -#define NIC_SRAM_SEND_RCB 0x00000100 /* 16 * TG3_BDINFO_... */ -#define NIC_SRAM_RCV_RET_RCB 0x00000200 /* 16 * TG3_BDINFO_... */ -#define NIC_SRAM_STATS_BLK 0x00000300 -#define NIC_SRAM_STATUS_BLK 0x00000b00 - -#define NIC_SRAM_FIRMWARE_MBOX 0x00000b50 -#define NIC_SRAM_FIRMWARE_MBOX_MAGIC1 0x4B657654 -#define NIC_SRAM_FIRMWARE_MBOX_MAGIC2 0x4861764b /* !dma on linkchg */ - -#define NIC_SRAM_DATA_SIG 0x00000b54 -#define NIC_SRAM_DATA_SIG_MAGIC 0x4b657654 /* ascii for 'KevT' */ - -#define NIC_SRAM_DATA_CFG 0x00000b58 -#define NIC_SRAM_DATA_CFG_LED_MODE_MASK 0x0000000c -#define NIC_SRAM_DATA_CFG_LED_MODE_MAC 0x00000000 -#define NIC_SRAM_DATA_CFG_LED_MODE_PHY_1 0x00000004 -#define NIC_SRAM_DATA_CFG_LED_MODE_PHY_2 0x00000008 -#define NIC_SRAM_DATA_CFG_PHY_TYPE_MASK 0x00000030 -#define NIC_SRAM_DATA_CFG_PHY_TYPE_UNKNOWN 0x00000000 -#define NIC_SRAM_DATA_CFG_PHY_TYPE_COPPER 0x00000010 -#define NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER 0x00000020 -#define NIC_SRAM_DATA_CFG_WOL_ENABLE 0x00000040 -#define NIC_SRAM_DATA_CFG_ASF_ENABLE 0x00000080 -#define NIC_SRAM_DATA_CFG_EEPROM_WP 0x00000100 -#define NIC_SRAM_DATA_CFG_MINI_PCI 0x00001000 -#define NIC_SRAM_DATA_CFG_FIBER_WOL 0x00004000 -#define NIC_SRAM_DATA_CFG_NO_GPIO2 0x00100000 -#define NIC_SRAM_DATA_CFG_APE_ENABLE 0x00200000 - -#define NIC_SRAM_DATA_VER 0x00000b5c -#define NIC_SRAM_DATA_VER_SHIFT 16 - -#define NIC_SRAM_DATA_PHY_ID 0x00000b74 -#define NIC_SRAM_DATA_PHY_ID1_MASK 0xffff0000 -#define NIC_SRAM_DATA_PHY_ID2_MASK 0x0000ffff - -#define NIC_SRAM_FW_CMD_MBOX 0x00000b78 -#define FWCMD_NICDRV_ALIVE 0x00000001 -#define FWCMD_NICDRV_PAUSE_FW 0x00000002 -#define FWCMD_NICDRV_IPV4ADDR_CHG 0x00000003 -#define FWCMD_NICDRV_IPV6ADDR_CHG 0x00000004 -#define FWCMD_NICDRV_FIX_DMAR 0x00000005 -#define FWCMD_NICDRV_FIX_DMAW 0x00000006 -#define FWCMD_NICDRV_LINK_UPDATE 0x0000000c -#define FWCMD_NICDRV_ALIVE2 0x0000000d -#define FWCMD_NICDRV_ALIVE3 0x0000000e -#define NIC_SRAM_FW_CMD_LEN_MBOX 0x00000b7c -#define NIC_SRAM_FW_CMD_DATA_MBOX 0x00000b80 -#define NIC_SRAM_FW_ASF_STATUS_MBOX 0x00000c00 -#define NIC_SRAM_FW_DRV_STATE_MBOX 0x00000c04 -#define DRV_STATE_START 0x00000001 -#define DRV_STATE_START_DONE 0x80000001 -#define DRV_STATE_UNLOAD 0x00000002 -#define DRV_STATE_UNLOAD_DONE 0x80000002 -#define DRV_STATE_WOL 0x00000003 -#define DRV_STATE_SUSPEND 0x00000004 - -#define NIC_SRAM_FW_RESET_TYPE_MBOX 0x00000c08 - -#define NIC_SRAM_MAC_ADDR_HIGH_MBOX 0x00000c14 -#define NIC_SRAM_MAC_ADDR_LOW_MBOX 0x00000c18 - -#define NIC_SRAM_WOL_MBOX 0x00000d30 -#define WOL_SIGNATURE 0x474c0000 -#define WOL_DRV_STATE_SHUTDOWN 0x00000001 -#define WOL_DRV_WOL 0x00000002 -#define WOL_SET_MAGIC_PKT 0x00000004 - -#define NIC_SRAM_DATA_CFG_2 0x00000d38 - -#define NIC_SRAM_DATA_CFG_2_APD_EN 0x00000400 -#define SHASTA_EXT_LED_MODE_MASK 0x00018000 -#define SHASTA_EXT_LED_LEGACY 0x00000000 -#define SHASTA_EXT_LED_SHARED 0x00008000 -#define SHASTA_EXT_LED_MAC 0x00010000 -#define SHASTA_EXT_LED_COMBO 0x00018000 - -#define NIC_SRAM_DATA_CFG_3 0x00000d3c -#define NIC_SRAM_ASPM_DEBOUNCE 0x00000002 - -#define NIC_SRAM_DATA_CFG_4 0x00000d60 -#define NIC_SRAM_GMII_MODE 0x00000002 -#define NIC_SRAM_RGMII_INBAND_DISABLE 0x00000004 -#define NIC_SRAM_RGMII_EXT_IBND_RX_EN 0x00000008 -#define NIC_SRAM_RGMII_EXT_IBND_TX_EN 0x00000010 - -#define NIC_SRAM_RX_MINI_BUFFER_DESC 0x00001000 - -#define NIC_SRAM_DMA_DESC_POOL_BASE 0x00002000 -#define NIC_SRAM_DMA_DESC_POOL_SIZE 0x00002000 -#define NIC_SRAM_TX_BUFFER_DESC 0x00004000 /* 512 entries */ -#define NIC_SRAM_RX_BUFFER_DESC 0x00006000 /* 256 entries */ -#define NIC_SRAM_RX_JUMBO_BUFFER_DESC 0x00007000 /* 256 entries */ -#define NIC_SRAM_MBUF_POOL_BASE 0x00008000 -#define NIC_SRAM_MBUF_POOL_SIZE96 0x00018000 -#define NIC_SRAM_MBUF_POOL_SIZE64 0x00010000 -#define NIC_SRAM_MBUF_POOL_BASE5705 0x00010000 -#define NIC_SRAM_MBUF_POOL_SIZE5705 0x0000e000 - -#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5700 128 -#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5755 64 -#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5906 32 - -#define TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700 64 -#define TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717 16 - - -/* Currently this is fixed. */ -#define TG3_PHY_MII_ADDR 0x01 - - -/*** Tigon3 specific PHY MII registers. ***/ -#define MII_TG3_MMD_CTRL 0x0d /* MMD Access Control register */ -#define MII_TG3_MMD_CTRL_DATA_NOINC 0x4000 -#define MII_TG3_MMD_ADDRESS 0x0e /* MMD Address Data register */ - -#define MII_TG3_EXT_CTRL 0x10 /* Extended control register */ -#define MII_TG3_EXT_CTRL_FIFO_ELASTIC 0x0001 -#define MII_TG3_EXT_CTRL_LNK3_LED_MODE 0x0002 -#define MII_TG3_EXT_CTRL_FORCE_LED_OFF 0x0008 -#define MII_TG3_EXT_CTRL_TBI 0x8000 - -#define MII_TG3_EXT_STAT 0x11 /* Extended status register */ -#define MII_TG3_EXT_STAT_LPASS 0x0100 - -#define MII_TG3_RXR_COUNTERS 0x14 /* Local/Remote Receiver Counts */ -#define MII_TG3_DSP_RW_PORT 0x15 /* DSP coefficient read/write port */ -#define MII_TG3_DSP_CONTROL 0x16 /* DSP control register */ -#define MII_TG3_DSP_ADDRESS 0x17 /* DSP address register */ - -#define MII_TG3_DSP_TAP1 0x0001 -#define MII_TG3_DSP_TAP1_AGCTGT_DFLT 0x0007 -#define MII_TG3_DSP_TAP26 0x001a -#define MII_TG3_DSP_TAP26_ALNOKO 0x0001 -#define MII_TG3_DSP_TAP26_RMRXSTO 0x0002 -#define MII_TG3_DSP_TAP26_OPCSINPT 0x0004 -#define MII_TG3_DSP_AADJ1CH0 0x001f -#define MII_TG3_DSP_CH34TP2 0x4022 -#define MII_TG3_DSP_CH34TP2_HIBW01 0x01ff -#define MII_TG3_DSP_AADJ1CH3 0x601f -#define MII_TG3_DSP_AADJ1CH3_ADCCKADJ 0x0002 -#define MII_TG3_DSP_EXP1_INT_STAT 0x0f01 -#define MII_TG3_DSP_EXP8 0x0f08 -#define MII_TG3_DSP_EXP8_REJ2MHz 0x0001 -#define MII_TG3_DSP_EXP8_AEDW 0x0200 -#define MII_TG3_DSP_EXP75 0x0f75 -#define MII_TG3_DSP_EXP96 0x0f96 -#define MII_TG3_DSP_EXP97 0x0f97 - -#define MII_TG3_AUX_CTRL 0x18 /* auxiliary control register */ - -#define MII_TG3_AUXCTL_SHDWSEL_AUXCTL 0x0000 -#define MII_TG3_AUXCTL_ACTL_TX_6DB 0x0400 -#define MII_TG3_AUXCTL_ACTL_SMDSP_ENA 0x0800 -#define MII_TG3_AUXCTL_ACTL_EXTPKTLEN 0x4000 - -#define MII_TG3_AUXCTL_SHDWSEL_PWRCTL 0x0002 -#define MII_TG3_AUXCTL_PCTL_WOL_EN 0x0008 -#define MII_TG3_AUXCTL_PCTL_100TX_LPWR 0x0010 -#define MII_TG3_AUXCTL_PCTL_SPR_ISOLATE 0x0020 -#define MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC 0x0040 -#define MII_TG3_AUXCTL_PCTL_VREG_11V 0x0180 - -#define MII_TG3_AUXCTL_SHDWSEL_MISCTEST 0x0004 - -#define MII_TG3_AUXCTL_SHDWSEL_MISC 0x0007 -#define MII_TG3_AUXCTL_MISC_WIRESPD_EN 0x0010 -#define MII_TG3_AUXCTL_MISC_FORCE_AMDIX 0x0200 -#define MII_TG3_AUXCTL_MISC_RDSEL_SHIFT 12 -#define MII_TG3_AUXCTL_MISC_WREN 0x8000 - - -#define MII_TG3_AUX_STAT 0x19 /* auxiliary status register */ -#define MII_TG3_AUX_STAT_LPASS 0x0004 -#define MII_TG3_AUX_STAT_SPDMASK 0x0700 -#define MII_TG3_AUX_STAT_10HALF 0x0100 -#define MII_TG3_AUX_STAT_10FULL 0x0200 -#define MII_TG3_AUX_STAT_100HALF 0x0300 -#define MII_TG3_AUX_STAT_100_4 0x0400 -#define MII_TG3_AUX_STAT_100FULL 0x0500 -#define MII_TG3_AUX_STAT_1000HALF 0x0600 -#define MII_TG3_AUX_STAT_1000FULL 0x0700 -#define MII_TG3_AUX_STAT_100 0x0008 -#define MII_TG3_AUX_STAT_FULL 0x0001 - -#define MII_TG3_ISTAT 0x1a /* IRQ status register */ -#define MII_TG3_IMASK 0x1b /* IRQ mask register */ - -/* ISTAT/IMASK event bits */ -#define MII_TG3_INT_LINKCHG 0x0002 -#define MII_TG3_INT_SPEEDCHG 0x0004 -#define MII_TG3_INT_DUPLEXCHG 0x0008 -#define MII_TG3_INT_ANEG_PAGE_RX 0x0400 - -#define MII_TG3_MISC_SHDW 0x1c -#define MII_TG3_MISC_SHDW_WREN 0x8000 - -#define MII_TG3_MISC_SHDW_APD_WKTM_84MS 0x0001 -#define MII_TG3_MISC_SHDW_APD_ENABLE 0x0020 -#define MII_TG3_MISC_SHDW_APD_SEL 0x2800 - -#define MII_TG3_MISC_SHDW_SCR5_C125OE 0x0001 -#define MII_TG3_MISC_SHDW_SCR5_DLLAPD 0x0002 -#define MII_TG3_MISC_SHDW_SCR5_SDTL 0x0004 -#define MII_TG3_MISC_SHDW_SCR5_DLPTLM 0x0008 -#define MII_TG3_MISC_SHDW_SCR5_LPED 0x0010 -#define MII_TG3_MISC_SHDW_SCR5_SEL 0x1400 - -#define MII_TG3_TEST1 0x1e -#define MII_TG3_TEST1_TRIM_EN 0x0010 -#define MII_TG3_TEST1_CRC_EN 0x8000 - -/* Clause 45 expansion registers */ -#define TG3_CL45_D7_EEERES_STAT 0x803e -#define TG3_CL45_D7_EEERES_STAT_LP_100TX 0x0002 -#define TG3_CL45_D7_EEERES_STAT_LP_1000T 0x0004 - - -/* Fast Ethernet Tranceiver definitions */ -#define MII_TG3_FET_PTEST 0x17 -#define MII_TG3_FET_PTEST_FRC_TX_LINK 0x1000 -#define MII_TG3_FET_PTEST_FRC_TX_LOCK 0x0800 - -#define MII_TG3_FET_TEST 0x1f -#define MII_TG3_FET_SHADOW_EN 0x0080 - -#define MII_TG3_FET_SHDW_MISCCTRL 0x10 -#define MII_TG3_FET_SHDW_MISCCTRL_MDIX 0x4000 - -#define MII_TG3_FET_SHDW_AUXMODE4 0x1a -#define MII_TG3_FET_SHDW_AUXMODE4_SBPD 0x0008 - -#define MII_TG3_FET_SHDW_AUXSTAT2 0x1b -#define MII_TG3_FET_SHDW_AUXSTAT2_APD 0x0020 - - -/* APE registers. Accessible through BAR1 */ -#define TG3_APE_GPIO_MSG 0x0008 -#define TG3_APE_GPIO_MSG_SHIFT 4 -#define TG3_APE_EVENT 0x000c -#define APE_EVENT_1 0x00000001 -#define TG3_APE_LOCK_REQ 0x002c -#define APE_LOCK_REQ_DRIVER 0x00001000 -#define TG3_APE_LOCK_GRANT 0x004c -#define APE_LOCK_GRANT_DRIVER 0x00001000 -#define TG3_APE_SEG_SIG 0x4000 -#define APE_SEG_SIG_MAGIC 0x41504521 - -/* APE shared memory. Accessible through BAR1 */ -#define TG3_APE_FW_STATUS 0x400c -#define APE_FW_STATUS_READY 0x00000100 -#define TG3_APE_FW_FEATURES 0x4010 -#define TG3_APE_FW_FEATURE_NCSI 0x00000002 -#define TG3_APE_FW_VERSION 0x4018 -#define APE_FW_VERSION_MAJMSK 0xff000000 -#define APE_FW_VERSION_MAJSFT 24 -#define APE_FW_VERSION_MINMSK 0x00ff0000 -#define APE_FW_VERSION_MINSFT 16 -#define APE_FW_VERSION_REVMSK 0x0000ff00 -#define APE_FW_VERSION_REVSFT 8 -#define APE_FW_VERSION_BLDMSK 0x000000ff -#define TG3_APE_HOST_SEG_SIG 0x4200 -#define APE_HOST_SEG_SIG_MAGIC 0x484f5354 -#define TG3_APE_HOST_SEG_LEN 0x4204 -#define APE_HOST_SEG_LEN_MAGIC 0x00000020 -#define TG3_APE_HOST_INIT_COUNT 0x4208 -#define TG3_APE_HOST_DRIVER_ID 0x420c -#define APE_HOST_DRIVER_ID_LINUX 0xf0000000 -#define APE_HOST_DRIVER_ID_MAGIC(maj, min) \ - (APE_HOST_DRIVER_ID_LINUX | (maj & 0xff) << 16 | (min & 0xff) << 8) -#define TG3_APE_HOST_BEHAVIOR 0x4210 -#define APE_HOST_BEHAV_NO_PHYLOCK 0x00000001 -#define TG3_APE_HOST_HEARTBEAT_INT_MS 0x4214 -#define APE_HOST_HEARTBEAT_INT_DISABLE 0 -#define APE_HOST_HEARTBEAT_INT_5SEC 5000 -#define TG3_APE_HOST_HEARTBEAT_COUNT 0x4218 -#define TG3_APE_HOST_DRVR_STATE 0x421c -#define TG3_APE_HOST_DRVR_STATE_START 0x00000001 -#define TG3_APE_HOST_DRVR_STATE_UNLOAD 0x00000002 -#define TG3_APE_HOST_DRVR_STATE_WOL 0x00000003 -#define TG3_APE_HOST_WOL_SPEED 0x4224 -#define TG3_APE_HOST_WOL_SPEED_AUTO 0x00008000 - -#define TG3_APE_EVENT_STATUS 0x4300 - -#define APE_EVENT_STATUS_DRIVER_EVNT 0x00000010 -#define APE_EVENT_STATUS_STATE_CHNGE 0x00000500 -#define APE_EVENT_STATUS_STATE_START 0x00010000 -#define APE_EVENT_STATUS_STATE_UNLOAD 0x00020000 -#define APE_EVENT_STATUS_STATE_WOL 0x00030000 -#define APE_EVENT_STATUS_STATE_SUSPEND 0x00040000 -#define APE_EVENT_STATUS_EVENT_PENDING 0x80000000 - -#define TG3_APE_PER_LOCK_REQ 0x8400 -#define APE_LOCK_PER_REQ_DRIVER 0x00001000 -#define TG3_APE_PER_LOCK_GRANT 0x8420 -#define APE_PER_LOCK_GRANT_DRIVER 0x00001000 - -/* APE convenience enumerations. */ -#define TG3_APE_LOCK_GRC 1 -#define TG3_APE_LOCK_MEM 4 -#define TG3_APE_LOCK_GPIO 7 - -#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10 - - -/* There are two ways to manage the TX descriptors on the tigon3. - * Either the descriptors are in host DMA'able memory, or they - * exist only in the cards on-chip SRAM. All 16 send bds are under - * the same mode, they may not be configured individually. - * - * This driver always uses host memory TX descriptors. - * - * To use host memory TX descriptors: - * 1) Set GRC_MODE_HOST_SENDBDS in GRC_MODE register. - * Make sure GRC_MODE_4X_NIC_SEND_RINGS is clear. - * 2) Allocate DMA'able memory. - * 3) In NIC_SRAM_SEND_RCB (of desired index) of on-chip SRAM: - * a) Set TG3_BDINFO_HOST_ADDR to DMA address of memory - * obtained in step 2 - * b) Set TG3_BDINFO_NIC_ADDR to NIC_SRAM_TX_BUFFER_DESC. - * c) Set len field of TG3_BDINFO_MAXLEN_FLAGS to number - * of TX descriptors. Leave flags field clear. - * 4) Access TX descriptors via host memory. The chip - * will refetch into local SRAM as needed when producer - * index mailboxes are updated. - * - * To use on-chip TX descriptors: - * 1) Set GRC_MODE_4X_NIC_SEND_RINGS in GRC_MODE register. - * Make sure GRC_MODE_HOST_SENDBDS is clear. - * 2) In NIC_SRAM_SEND_RCB (of desired index) of on-chip SRAM: - * a) Set TG3_BDINFO_HOST_ADDR to zero. - * b) Set TG3_BDINFO_NIC_ADDR to NIC_SRAM_TX_BUFFER_DESC - * c) TG3_BDINFO_MAXLEN_FLAGS is don't care. - * 3) Access TX descriptors directly in on-chip SRAM - * using normal {read,write}l(). (and not using - * pointer dereferencing of ioremap()'d memory like - * the broken Broadcom driver does) - * - * Note that BDINFO_FLAGS_DISABLED should be set in the flags field of - * TG3_BDINFO_MAXLEN_FLAGS of all unused SEND_RCB indices. - */ -struct tg3_tx_buffer_desc { - u32 addr_hi; - u32 addr_lo; - - u32 len_flags; -#define TXD_FLAG_TCPUDP_CSUM 0x0001 -#define TXD_FLAG_IP_CSUM 0x0002 -#define TXD_FLAG_END 0x0004 -#define TXD_FLAG_IP_FRAG 0x0008 -#define TXD_FLAG_JMB_PKT 0x0008 -#define TXD_FLAG_IP_FRAG_END 0x0010 -#define TXD_FLAG_VLAN 0x0040 -#define TXD_FLAG_COAL_NOW 0x0080 -#define TXD_FLAG_CPU_PRE_DMA 0x0100 -#define TXD_FLAG_CPU_POST_DMA 0x0200 -#define TXD_FLAG_ADD_SRC_ADDR 0x1000 -#define TXD_FLAG_CHOOSE_SRC_ADDR 0x6000 -#define TXD_FLAG_NO_CRC 0x8000 -#define TXD_LEN_SHIFT 16 - - u32 vlan_tag; -#define TXD_VLAN_TAG_SHIFT 0 -#define TXD_MSS_SHIFT 16 -}; - -#define TXD_ADDR 0x00UL /* 64-bit */ -#define TXD_LEN_FLAGS 0x08UL /* 32-bit (upper 16-bits are len) */ -#define TXD_VLAN_TAG 0x0cUL /* 32-bit (upper 16-bits are tag) */ -#define TXD_SIZE 0x10UL - -struct tg3_rx_buffer_desc { - u32 addr_hi; - u32 addr_lo; - - u32 idx_len; -#define RXD_IDX_MASK 0xffff0000 -#define RXD_IDX_SHIFT 16 -#define RXD_LEN_MASK 0x0000ffff -#define RXD_LEN_SHIFT 0 - - u32 type_flags; -#define RXD_TYPE_SHIFT 16 -#define RXD_FLAGS_SHIFT 0 - -#define RXD_FLAG_END 0x0004 -#define RXD_FLAG_MINI 0x0800 -#define RXD_FLAG_JUMBO 0x0020 -#define RXD_FLAG_VLAN 0x0040 -#define RXD_FLAG_ERROR 0x0400 -#define RXD_FLAG_IP_CSUM 0x1000 -#define RXD_FLAG_TCPUDP_CSUM 0x2000 -#define RXD_FLAG_IS_TCP 0x4000 - - u32 ip_tcp_csum; -#define RXD_IPCSUM_MASK 0xffff0000 -#define RXD_IPCSUM_SHIFT 16 -#define RXD_TCPCSUM_MASK 0x0000ffff -#define RXD_TCPCSUM_SHIFT 0 - - u32 err_vlan; - -#define RXD_VLAN_MASK 0x0000ffff - -#define RXD_ERR_BAD_CRC 0x00010000 -#define RXD_ERR_COLLISION 0x00020000 -#define RXD_ERR_LINK_LOST 0x00040000 -#define RXD_ERR_PHY_DECODE 0x00080000 -#define RXD_ERR_ODD_NIBBLE_RCVD_MII 0x00100000 -#define RXD_ERR_MAC_ABRT 0x00200000 -#define RXD_ERR_TOO_SMALL 0x00400000 -#define RXD_ERR_NO_RESOURCES 0x00800000 -#define RXD_ERR_HUGE_FRAME 0x01000000 -#define RXD_ERR_MASK 0xffff0000 - - u32 reserved; - u32 opaque; -#define RXD_OPAQUE_INDEX_MASK 0x0000ffff -#define RXD_OPAQUE_INDEX_SHIFT 0 -#define RXD_OPAQUE_RING_STD 0x00010000 -#define RXD_OPAQUE_RING_JUMBO 0x00020000 -#define RXD_OPAQUE_RING_MINI 0x00040000 -#define RXD_OPAQUE_RING_MASK 0x00070000 -}; - -struct tg3_ext_rx_buffer_desc { - struct { - u32 addr_hi; - u32 addr_lo; - } addrlist[3]; - u32 len2_len1; - u32 resv_len3; - struct tg3_rx_buffer_desc std; -}; - -/* We only use this when testing out the DMA engine - * at probe time. This is the internal format of buffer - * descriptors used by the chip at NIC_SRAM_DMA_DESCS. - */ -struct tg3_internal_buffer_desc { - u32 addr_hi; - u32 addr_lo; - u32 nic_mbuf; - /* XXX FIX THIS */ -#ifdef __BIG_ENDIAN - u16 cqid_sqid; - u16 len; -#else - u16 len; - u16 cqid_sqid; -#endif - u32 flags; - u32 __cookie1; - u32 __cookie2; - u32 __cookie3; -}; - -#define TG3_HW_STATUS_SIZE 0x50 -struct tg3_hw_status { - u32 status; -#define SD_STATUS_UPDATED 0x00000001 -#define SD_STATUS_LINK_CHG 0x00000002 -#define SD_STATUS_ERROR 0x00000004 - - u32 status_tag; - -#ifdef __BIG_ENDIAN - u16 rx_consumer; - u16 rx_jumbo_consumer; -#else - u16 rx_jumbo_consumer; - u16 rx_consumer; -#endif - -#ifdef __BIG_ENDIAN - u16 reserved; - u16 rx_mini_consumer; -#else - u16 rx_mini_consumer; - u16 reserved; -#endif - struct { -#ifdef __BIG_ENDIAN - u16 tx_consumer; - u16 rx_producer; -#else - u16 rx_producer; - u16 tx_consumer; -#endif - } idx[16]; -}; - -typedef struct { - u32 high, low; -} tg3_stat64_t; - -struct tg3_hw_stats { - u8 __reserved0[0x400-0x300]; - - /* Statistics maintained by Receive MAC. */ - tg3_stat64_t rx_octets; - u64 __reserved1; - tg3_stat64_t rx_fragments; - tg3_stat64_t rx_ucast_packets; - tg3_stat64_t rx_mcast_packets; - tg3_stat64_t rx_bcast_packets; - tg3_stat64_t rx_fcs_errors; - tg3_stat64_t rx_align_errors; - tg3_stat64_t rx_xon_pause_rcvd; - tg3_stat64_t rx_xoff_pause_rcvd; - tg3_stat64_t rx_mac_ctrl_rcvd; - tg3_stat64_t rx_xoff_entered; - tg3_stat64_t rx_frame_too_long_errors; - tg3_stat64_t rx_jabbers; - tg3_stat64_t rx_undersize_packets; - tg3_stat64_t rx_in_length_errors; - tg3_stat64_t rx_out_length_errors; - tg3_stat64_t rx_64_or_less_octet_packets; - tg3_stat64_t rx_65_to_127_octet_packets; - tg3_stat64_t rx_128_to_255_octet_packets; - tg3_stat64_t rx_256_to_511_octet_packets; - tg3_stat64_t rx_512_to_1023_octet_packets; - tg3_stat64_t rx_1024_to_1522_octet_packets; - tg3_stat64_t rx_1523_to_2047_octet_packets; - tg3_stat64_t rx_2048_to_4095_octet_packets; - tg3_stat64_t rx_4096_to_8191_octet_packets; - tg3_stat64_t rx_8192_to_9022_octet_packets; - - u64 __unused0[37]; - - /* Statistics maintained by Transmit MAC. */ - tg3_stat64_t tx_octets; - u64 __reserved2; - tg3_stat64_t tx_collisions; - tg3_stat64_t tx_xon_sent; - tg3_stat64_t tx_xoff_sent; - tg3_stat64_t tx_flow_control; - tg3_stat64_t tx_mac_errors; - tg3_stat64_t tx_single_collisions; - tg3_stat64_t tx_mult_collisions; - tg3_stat64_t tx_deferred; - u64 __reserved3; - tg3_stat64_t tx_excessive_collisions; - tg3_stat64_t tx_late_collisions; - tg3_stat64_t tx_collide_2times; - tg3_stat64_t tx_collide_3times; - tg3_stat64_t tx_collide_4times; - tg3_stat64_t tx_collide_5times; - tg3_stat64_t tx_collide_6times; - tg3_stat64_t tx_collide_7times; - tg3_stat64_t tx_collide_8times; - tg3_stat64_t tx_collide_9times; - tg3_stat64_t tx_collide_10times; - tg3_stat64_t tx_collide_11times; - tg3_stat64_t tx_collide_12times; - tg3_stat64_t tx_collide_13times; - tg3_stat64_t tx_collide_14times; - tg3_stat64_t tx_collide_15times; - tg3_stat64_t tx_ucast_packets; - tg3_stat64_t tx_mcast_packets; - tg3_stat64_t tx_bcast_packets; - tg3_stat64_t tx_carrier_sense_errors; - tg3_stat64_t tx_discards; - tg3_stat64_t tx_errors; - - u64 __unused1[31]; - - /* Statistics maintained by Receive List Placement. */ - tg3_stat64_t COS_rx_packets[16]; - tg3_stat64_t COS_rx_filter_dropped; - tg3_stat64_t dma_writeq_full; - tg3_stat64_t dma_write_prioq_full; - tg3_stat64_t rxbds_empty; - tg3_stat64_t rx_discards; - tg3_stat64_t rx_errors; - tg3_stat64_t rx_threshold_hit; - - u64 __unused2[9]; - - /* Statistics maintained by Send Data Initiator. */ - tg3_stat64_t COS_out_packets[16]; - tg3_stat64_t dma_readq_full; - tg3_stat64_t dma_read_prioq_full; - tg3_stat64_t tx_comp_queue_full; - - /* Statistics maintained by Host Coalescing. */ - tg3_stat64_t ring_set_send_prod_index; - tg3_stat64_t ring_status_update; - tg3_stat64_t nic_irqs; - tg3_stat64_t nic_avoided_irqs; - tg3_stat64_t nic_tx_threshold_hit; - - /* NOT a part of the hardware statistics block format. - * These stats are here as storage for tg3_periodic_fetch_stats(). - */ - tg3_stat64_t mbuf_lwm_thresh_hit; - - u8 __reserved4[0xb00-0x9c8]; -}; - -/* 'mapping' is superfluous as the chip does not write into - * the tx/rx post rings so we could just fetch it from there. - * But the cache behavior is better how we are doing it now. - */ -struct ring_info { - struct sk_buff *skb; - DEFINE_DMA_UNMAP_ADDR(mapping); -}; - -struct tg3_tx_ring_info { - struct sk_buff *skb; - DEFINE_DMA_UNMAP_ADDR(mapping); - bool fragmented; -}; - -struct tg3_link_config { - /* Describes what we're trying to get. */ - u32 advertising; - u16 speed; - u8 duplex; - u8 autoneg; - u8 flowctrl; - - /* Describes what we actually have. */ - u8 active_flowctrl; - - u8 active_duplex; -#define SPEED_INVALID 0xffff -#define DUPLEX_INVALID 0xff -#define AUTONEG_INVALID 0xff - u16 active_speed; - - /* When we go in and out of low power mode we need - * to swap with this state. - */ - u16 orig_speed; - u8 orig_duplex; - u8 orig_autoneg; - u32 orig_advertising; -}; - -struct tg3_bufmgr_config { - u32 mbuf_read_dma_low_water; - u32 mbuf_mac_rx_low_water; - u32 mbuf_high_water; - - u32 mbuf_read_dma_low_water_jumbo; - u32 mbuf_mac_rx_low_water_jumbo; - u32 mbuf_high_water_jumbo; - - u32 dma_low_water; - u32 dma_high_water; -}; - -struct tg3_ethtool_stats { - /* Statistics maintained by Receive MAC. */ - u64 rx_octets; - u64 rx_fragments; - u64 rx_ucast_packets; - u64 rx_mcast_packets; - u64 rx_bcast_packets; - u64 rx_fcs_errors; - u64 rx_align_errors; - u64 rx_xon_pause_rcvd; - u64 rx_xoff_pause_rcvd; - u64 rx_mac_ctrl_rcvd; - u64 rx_xoff_entered; - u64 rx_frame_too_long_errors; - u64 rx_jabbers; - u64 rx_undersize_packets; - u64 rx_in_length_errors; - u64 rx_out_length_errors; - u64 rx_64_or_less_octet_packets; - u64 rx_65_to_127_octet_packets; - u64 rx_128_to_255_octet_packets; - u64 rx_256_to_511_octet_packets; - u64 rx_512_to_1023_octet_packets; - u64 rx_1024_to_1522_octet_packets; - u64 rx_1523_to_2047_octet_packets; - u64 rx_2048_to_4095_octet_packets; - u64 rx_4096_to_8191_octet_packets; - u64 rx_8192_to_9022_octet_packets; - - /* Statistics maintained by Transmit MAC. */ - u64 tx_octets; - u64 tx_collisions; - u64 tx_xon_sent; - u64 tx_xoff_sent; - u64 tx_flow_control; - u64 tx_mac_errors; - u64 tx_single_collisions; - u64 tx_mult_collisions; - u64 tx_deferred; - u64 tx_excessive_collisions; - u64 tx_late_collisions; - u64 tx_collide_2times; - u64 tx_collide_3times; - u64 tx_collide_4times; - u64 tx_collide_5times; - u64 tx_collide_6times; - u64 tx_collide_7times; - u64 tx_collide_8times; - u64 tx_collide_9times; - u64 tx_collide_10times; - u64 tx_collide_11times; - u64 tx_collide_12times; - u64 tx_collide_13times; - u64 tx_collide_14times; - u64 tx_collide_15times; - u64 tx_ucast_packets; - u64 tx_mcast_packets; - u64 tx_bcast_packets; - u64 tx_carrier_sense_errors; - u64 tx_discards; - u64 tx_errors; - - /* Statistics maintained by Receive List Placement. */ - u64 dma_writeq_full; - u64 dma_write_prioq_full; - u64 rxbds_empty; - u64 rx_discards; - u64 rx_errors; - u64 rx_threshold_hit; - - /* Statistics maintained by Send Data Initiator. */ - u64 dma_readq_full; - u64 dma_read_prioq_full; - u64 tx_comp_queue_full; - - /* Statistics maintained by Host Coalescing. */ - u64 ring_set_send_prod_index; - u64 ring_status_update; - u64 nic_irqs; - u64 nic_avoided_irqs; - u64 nic_tx_threshold_hit; - - u64 mbuf_lwm_thresh_hit; -}; - -struct tg3_rx_prodring_set { - u32 rx_std_prod_idx; - u32 rx_std_cons_idx; - u32 rx_jmb_prod_idx; - u32 rx_jmb_cons_idx; - struct tg3_rx_buffer_desc *rx_std; - struct tg3_ext_rx_buffer_desc *rx_jmb; - struct ring_info *rx_std_buffers; - struct ring_info *rx_jmb_buffers; - dma_addr_t rx_std_mapping; - dma_addr_t rx_jmb_mapping; -}; - -#define TG3_IRQ_MAX_VECS_RSS 5 -#define TG3_IRQ_MAX_VECS TG3_IRQ_MAX_VECS_RSS - -struct tg3_napi { - struct napi_struct napi ____cacheline_aligned; - struct tg3 *tp; - struct tg3_hw_status *hw_status; - - u32 chk_msi_cnt; - u32 last_tag; - u32 last_irq_tag; - u32 int_mbox; - u32 coal_now; - - u32 consmbox ____cacheline_aligned; - u32 rx_rcb_ptr; - u32 last_rx_cons; - u16 *rx_rcb_prod_idx; - struct tg3_rx_prodring_set prodring; - struct tg3_rx_buffer_desc *rx_rcb; - - u32 tx_prod ____cacheline_aligned; - u32 tx_cons; - u32 tx_pending; - u32 last_tx_cons; - u32 prodmbox; - struct tg3_tx_buffer_desc *tx_ring; - struct tg3_tx_ring_info *tx_buffers; - - dma_addr_t status_mapping; - dma_addr_t rx_rcb_mapping; - dma_addr_t tx_desc_mapping; - - char irq_lbl[IFNAMSIZ]; - unsigned int irq_vec; -}; - -enum TG3_FLAGS { - TG3_FLAG_TAGGED_STATUS = 0, - TG3_FLAG_TXD_MBOX_HWBUG, - TG3_FLAG_USE_LINKCHG_REG, - TG3_FLAG_ERROR_PROCESSED, - TG3_FLAG_ENABLE_ASF, - TG3_FLAG_ASPM_WORKAROUND, - TG3_FLAG_POLL_SERDES, - TG3_FLAG_MBOX_WRITE_REORDER, - TG3_FLAG_PCIX_TARGET_HWBUG, - TG3_FLAG_WOL_SPEED_100MB, - TG3_FLAG_WOL_ENABLE, - TG3_FLAG_EEPROM_WRITE_PROT, - TG3_FLAG_NVRAM, - TG3_FLAG_NVRAM_BUFFERED, - TG3_FLAG_SUPPORT_MSI, - TG3_FLAG_SUPPORT_MSIX, - TG3_FLAG_PCIX_MODE, - TG3_FLAG_PCI_HIGH_SPEED, - TG3_FLAG_PCI_32BIT, - TG3_FLAG_SRAM_USE_CONFIG, - TG3_FLAG_TX_RECOVERY_PENDING, - TG3_FLAG_WOL_CAP, - TG3_FLAG_JUMBO_RING_ENABLE, - TG3_FLAG_PAUSE_AUTONEG, - TG3_FLAG_CPMU_PRESENT, - TG3_FLAG_40BIT_DMA_BUG, - TG3_FLAG_BROKEN_CHECKSUMS, - TG3_FLAG_JUMBO_CAPABLE, - TG3_FLAG_CHIP_RESETTING, - TG3_FLAG_INIT_COMPLETE, - TG3_FLAG_RESTART_TIMER, - TG3_FLAG_TSO_BUG, - TG3_FLAG_IS_5788, - TG3_FLAG_MAX_RXPEND_64, - TG3_FLAG_TSO_CAPABLE, - TG3_FLAG_PCI_EXPRESS, /* BCM5785 + pci_is_pcie() */ - TG3_FLAG_ASF_NEW_HANDSHAKE, - TG3_FLAG_HW_AUTONEG, - TG3_FLAG_IS_NIC, - TG3_FLAG_FLASH, - TG3_FLAG_HW_TSO_1, - TG3_FLAG_5705_PLUS, - TG3_FLAG_5750_PLUS, - TG3_FLAG_HW_TSO_3, - TG3_FLAG_USING_MSI, - TG3_FLAG_USING_MSIX, - TG3_FLAG_ICH_WORKAROUND, - TG3_FLAG_5780_CLASS, - TG3_FLAG_HW_TSO_2, - TG3_FLAG_1SHOT_MSI, - TG3_FLAG_NO_FWARE_REPORTED, - TG3_FLAG_NO_NVRAM_ADDR_TRANS, - TG3_FLAG_ENABLE_APE, - TG3_FLAG_PROTECTED_NVRAM, - TG3_FLAG_5701_DMA_BUG, - TG3_FLAG_USE_PHYLIB, - TG3_FLAG_MDIOBUS_INITED, - TG3_FLAG_LRG_PROD_RING_CAP, - TG3_FLAG_RGMII_INBAND_DISABLE, - TG3_FLAG_RGMII_EXT_IBND_RX_EN, - TG3_FLAG_RGMII_EXT_IBND_TX_EN, - TG3_FLAG_CLKREQ_BUG, - TG3_FLAG_5755_PLUS, - TG3_FLAG_NO_NVRAM, - TG3_FLAG_ENABLE_RSS, - TG3_FLAG_ENABLE_TSS, - TG3_FLAG_SHORT_DMA_BUG, - TG3_FLAG_USE_JUMBO_BDFLAG, - TG3_FLAG_L1PLLPD_EN, - TG3_FLAG_57765_PLUS, - TG3_FLAG_APE_HAS_NCSI, - TG3_FLAG_5717_PLUS, - TG3_FLAG_4K_FIFO_LIMIT, - - /* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */ - TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */ -}; - -struct tg3 { - /* begin "general, frequently-used members" cacheline section */ - - /* If the IRQ handler (which runs lockless) needs to be - * quiesced, the following bitmask state is used. The - * SYNC flag is set by non-IRQ context code to initiate - * the quiescence. - * - * When the IRQ handler notices that SYNC is set, it - * disables interrupts and returns. - * - * When all outstanding IRQ handlers have returned after - * the SYNC flag has been set, the setter can be assured - * that interrupts will no longer get run. - * - * In this way all SMP driver locks are never acquired - * in hw IRQ context, only sw IRQ context or lower. - */ - unsigned int irq_sync; - - /* SMP locking strategy: - * - * lock: Held during reset, PHY access, timer, and when - * updating tg3_flags. - * - * netif_tx_lock: Held during tg3_start_xmit. tg3_tx holds - * netif_tx_lock when it needs to call - * netif_wake_queue. - * - * Both of these locks are to be held with BH safety. - * - * Because the IRQ handler, tg3_poll, and tg3_start_xmit - * are running lockless, it is necessary to completely - * quiesce the chip with tg3_netif_stop and tg3_full_lock - * before reconfiguring the device. - * - * indirect_lock: Held when accessing registers indirectly - * with IRQ disabling. - */ - spinlock_t lock; - spinlock_t indirect_lock; - - u32 (*read32) (struct tg3 *, u32); - void (*write32) (struct tg3 *, u32, u32); - u32 (*read32_mbox) (struct tg3 *, u32); - void (*write32_mbox) (struct tg3 *, u32, - u32); - void __iomem *regs; - void __iomem *aperegs; - struct net_device *dev; - struct pci_dev *pdev; - - u32 coal_now; - u32 msg_enable; - - /* begin "tx thread" cacheline section */ - void (*write32_tx_mbox) (struct tg3 *, u32, - u32); - - /* begin "rx thread" cacheline section */ - struct tg3_napi napi[TG3_IRQ_MAX_VECS]; - void (*write32_rx_mbox) (struct tg3 *, u32, - u32); - u32 rx_copy_thresh; - u32 rx_std_ring_mask; - u32 rx_jmb_ring_mask; - u32 rx_ret_ring_mask; - u32 rx_pending; - u32 rx_jumbo_pending; - u32 rx_std_max_post; - u32 rx_offset; - u32 rx_pkt_map_sz; - - - /* begin "everything else" cacheline(s) section */ - unsigned long rx_dropped; - struct rtnl_link_stats64 net_stats_prev; - struct tg3_ethtool_stats estats; - struct tg3_ethtool_stats estats_prev; - - DECLARE_BITMAP(tg3_flags, TG3_FLAG_NUMBER_OF_FLAGS); - - union { - unsigned long phy_crc_errors; - unsigned long last_event_jiffies; - }; - - struct timer_list timer; - u16 timer_counter; - u16 timer_multiplier; - u32 timer_offset; - u16 asf_counter; - u16 asf_multiplier; - - /* 1 second counter for transient serdes link events */ - u32 serdes_counter; -#define SERDES_AN_TIMEOUT_5704S 2 -#define SERDES_PARALLEL_DET_TIMEOUT 1 -#define SERDES_AN_TIMEOUT_5714S 1 - - struct tg3_link_config link_config; - struct tg3_bufmgr_config bufmgr_config; - - /* cache h/w values, often passed straight to h/w */ - u32 rx_mode; - u32 tx_mode; - u32 mac_mode; - u32 mi_mode; - u32 misc_host_ctrl; - u32 grc_mode; - u32 grc_local_ctrl; - u32 dma_rwctrl; - u32 coalesce_mode; - u32 pwrmgmt_thresh; - - /* PCI block */ - u32 pci_chip_rev_id; - u16 pci_cmd; - u8 pci_cacheline_sz; - u8 pci_lat_timer; - - int pci_fn; - int pm_cap; - int msi_cap; - int pcix_cap; - int pcie_readrq; - - struct mii_bus *mdio_bus; - int mdio_irq[PHY_MAX_ADDR]; - - u8 phy_addr; - - /* PHY info */ - u32 phy_id; -#define TG3_PHY_ID_MASK 0xfffffff0 -#define TG3_PHY_ID_BCM5400 0x60008040 -#define TG3_PHY_ID_BCM5401 0x60008050 -#define TG3_PHY_ID_BCM5411 0x60008070 -#define TG3_PHY_ID_BCM5701 0x60008110 -#define TG3_PHY_ID_BCM5703 0x60008160 -#define TG3_PHY_ID_BCM5704 0x60008190 -#define TG3_PHY_ID_BCM5705 0x600081a0 -#define TG3_PHY_ID_BCM5750 0x60008180 -#define TG3_PHY_ID_BCM5752 0x60008100 -#define TG3_PHY_ID_BCM5714 0x60008340 -#define TG3_PHY_ID_BCM5780 0x60008350 -#define TG3_PHY_ID_BCM5755 0xbc050cc0 -#define TG3_PHY_ID_BCM5787 0xbc050ce0 -#define TG3_PHY_ID_BCM5756 0xbc050ed0 -#define TG3_PHY_ID_BCM5784 0xbc050fa0 -#define TG3_PHY_ID_BCM5761 0xbc050fd0 -#define TG3_PHY_ID_BCM5718C 0x5c0d8a00 -#define TG3_PHY_ID_BCM5718S 0xbc050ff0 -#define TG3_PHY_ID_BCM57765 0x5c0d8a40 -#define TG3_PHY_ID_BCM5719C 0x5c0d8a20 -#define TG3_PHY_ID_BCM5720C 0x5c0d8b60 -#define TG3_PHY_ID_BCM5906 0xdc00ac40 -#define TG3_PHY_ID_BCM8002 0x60010140 -#define TG3_PHY_ID_INVALID 0xffffffff - -#define PHY_ID_RTL8211C 0x001cc910 -#define PHY_ID_RTL8201E 0x00008200 - -#define TG3_PHY_ID_REV_MASK 0x0000000f -#define TG3_PHY_REV_BCM5401_B0 0x1 - - /* This macro assumes the passed PHY ID is - * already masked with TG3_PHY_ID_MASK. - */ -#define TG3_KNOWN_PHY_ID(X) \ - ((X) == TG3_PHY_ID_BCM5400 || (X) == TG3_PHY_ID_BCM5401 || \ - (X) == TG3_PHY_ID_BCM5411 || (X) == TG3_PHY_ID_BCM5701 || \ - (X) == TG3_PHY_ID_BCM5703 || (X) == TG3_PHY_ID_BCM5704 || \ - (X) == TG3_PHY_ID_BCM5705 || (X) == TG3_PHY_ID_BCM5750 || \ - (X) == TG3_PHY_ID_BCM5752 || (X) == TG3_PHY_ID_BCM5714 || \ - (X) == TG3_PHY_ID_BCM5780 || (X) == TG3_PHY_ID_BCM5787 || \ - (X) == TG3_PHY_ID_BCM5755 || (X) == TG3_PHY_ID_BCM5756 || \ - (X) == TG3_PHY_ID_BCM5906 || (X) == TG3_PHY_ID_BCM5761 || \ - (X) == TG3_PHY_ID_BCM5718C || (X) == TG3_PHY_ID_BCM5718S || \ - (X) == TG3_PHY_ID_BCM57765 || (X) == TG3_PHY_ID_BCM5719C || \ - (X) == TG3_PHY_ID_BCM8002) - - u32 phy_flags; -#define TG3_PHYFLG_IS_LOW_POWER 0x00000001 -#define TG3_PHYFLG_IS_CONNECTED 0x00000002 -#define TG3_PHYFLG_USE_MI_INTERRUPT 0x00000004 -#define TG3_PHYFLG_PHY_SERDES 0x00000010 -#define TG3_PHYFLG_MII_SERDES 0x00000020 -#define TG3_PHYFLG_ANY_SERDES (TG3_PHYFLG_PHY_SERDES | \ - TG3_PHYFLG_MII_SERDES) -#define TG3_PHYFLG_IS_FET 0x00000040 -#define TG3_PHYFLG_10_100_ONLY 0x00000080 -#define TG3_PHYFLG_ENABLE_APD 0x00000100 -#define TG3_PHYFLG_CAPACITIVE_COUPLING 0x00000200 -#define TG3_PHYFLG_NO_ETH_WIRE_SPEED 0x00000400 -#define TG3_PHYFLG_JITTER_BUG 0x00000800 -#define TG3_PHYFLG_ADJUST_TRIM 0x00001000 -#define TG3_PHYFLG_ADC_BUG 0x00002000 -#define TG3_PHYFLG_5704_A0_BUG 0x00004000 -#define TG3_PHYFLG_BER_BUG 0x00008000 -#define TG3_PHYFLG_SERDES_PREEMPHASIS 0x00010000 -#define TG3_PHYFLG_PARALLEL_DETECT 0x00020000 -#define TG3_PHYFLG_EEE_CAP 0x00040000 - - u32 led_ctrl; - u32 phy_otp; - u32 setlpicnt; - -#define TG3_BPN_SIZE 24 - char board_part_number[TG3_BPN_SIZE]; -#define TG3_VER_SIZE ETHTOOL_FWVERS_LEN - char fw_ver[TG3_VER_SIZE]; - u32 nic_sram_data_cfg; - u32 pci_clock_ctrl; - struct pci_dev *pdev_peer; - - struct tg3_hw_stats *hw_stats; - dma_addr_t stats_mapping; - struct work_struct reset_task; - - int nvram_lock_cnt; - u32 nvram_size; -#define TG3_NVRAM_SIZE_2KB 0x00000800 -#define TG3_NVRAM_SIZE_64KB 0x00010000 -#define TG3_NVRAM_SIZE_128KB 0x00020000 -#define TG3_NVRAM_SIZE_256KB 0x00040000 -#define TG3_NVRAM_SIZE_512KB 0x00080000 -#define TG3_NVRAM_SIZE_1MB 0x00100000 -#define TG3_NVRAM_SIZE_2MB 0x00200000 - - u32 nvram_pagesize; - u32 nvram_jedecnum; - -#define JEDEC_ATMEL 0x1f -#define JEDEC_ST 0x20 -#define JEDEC_SAIFUN 0x4f -#define JEDEC_SST 0xbf - -#define ATMEL_AT24C02_CHIP_SIZE TG3_NVRAM_SIZE_2KB -#define ATMEL_AT24C02_PAGE_SIZE (8) - -#define ATMEL_AT24C64_CHIP_SIZE TG3_NVRAM_SIZE_64KB -#define ATMEL_AT24C64_PAGE_SIZE (32) - -#define ATMEL_AT24C512_CHIP_SIZE TG3_NVRAM_SIZE_512KB -#define ATMEL_AT24C512_PAGE_SIZE (128) - -#define ATMEL_AT45DB0X1B_PAGE_POS 9 -#define ATMEL_AT45DB0X1B_PAGE_SIZE 264 - -#define ATMEL_AT25F512_PAGE_SIZE 256 - -#define ST_M45PEX0_PAGE_SIZE 256 - -#define SAIFUN_SA25F0XX_PAGE_SIZE 256 - -#define SST_25VF0X0_PAGE_SIZE 4098 - - unsigned int irq_max; - unsigned int irq_cnt; - - struct ethtool_coalesce coal; - - /* firmware info */ - const char *fw_needed; - const struct firmware *fw; - u32 fw_len; /* includes BSS */ -}; - -#endif /* !(_T3_H) */ diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index 42228ca..5613e8a 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h @@ -58,7 +58,7 @@ #include "57xx_hsi_bnx2fc.h" #include "bnx2fc_debug.h" -#include "../../net/cnic_if.h" +#include "../../net/ethernet/broadcom/cnic_if.h" #include "bnx2fc_constants.h" #define BNX2FC_NAME "bnx2fc" diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h index dc57007..0bd70e8 100644 --- a/drivers/scsi/bnx2i/bnx2i.h +++ b/drivers/scsi/bnx2i/bnx2i.h @@ -40,7 +40,7 @@ #include #include -#include "../../net/cnic_if.h" +#include "../../net/ethernet/broadcom/cnic_if.h" #include "57xx_iscsi_hsi.h" #include "57xx_iscsi_constants.h" -- cgit v0.10.2 From f7917c009c28c941ba151ee66f04dc7f6a2e1e0b Mon Sep 17 00:00:00 2001 From: Jeff Kirsher Date: Thu, 7 Apr 2011 06:57:17 -0700 Subject: chelsio: Move the Chelsio drivers Moves the drivers for the Chelsio chipsets into drivers/net/ethernet/chelsio/ and the necessary Kconfig and Makefile changes. CC: Divy Le Ray CC: Dimitris Michailidis CC: Casey Leedom Signed-off-by: Jeff Kirsher diff --git a/MAINTAINERS b/MAINTAINERS index af9de64..0853d00 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1994,7 +1994,7 @@ M: Divy Le Ray L: netdev@vger.kernel.org W: http://www.chelsio.com S: Supported -F: drivers/net/cxgb3/ +F: drivers/net/ethernet/chelsio/cxgb3/ CXGB3 IWARP RNIC DRIVER (IW_CXGB3) M: Steve Wise @@ -2008,7 +2008,7 @@ M: Dimitris Michailidis L: netdev@vger.kernel.org W: http://www.chelsio.com S: Supported -F: drivers/net/cxgb4/ +F: drivers/net/ethernet/chelsio/cxgb4/ CXGB4 IWARP RNIC DRIVER (IW_CXGB4) M: Steve Wise @@ -2022,7 +2022,7 @@ M: Casey Leedom L: netdev@vger.kernel.org W: http://www.chelsio.com S: Supported -F: drivers/net/cxgb4vf/ +F: drivers/net/ethernet/chelsio/cxgb4vf/ STMMAC ETHERNET DRIVER M: Giuseppe Cavallaro diff --git a/drivers/infiniband/hw/cxgb3/Makefile b/drivers/infiniband/hw/cxgb3/Makefile index 621619c..2761364 100644 --- a/drivers/infiniband/hw/cxgb3/Makefile +++ b/drivers/infiniband/hw/cxgb3/Makefile @@ -1,4 +1,4 @@ -ccflags-y := -Idrivers/net/cxgb3 +ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb3 obj-$(CONFIG_INFINIBAND_CXGB3) += iw_cxgb3.o diff --git a/drivers/infiniband/hw/cxgb4/Makefile b/drivers/infiniband/hw/cxgb4/Makefile index cd20b13..46b878c 100644 --- a/drivers/infiniband/hw/cxgb4/Makefile +++ b/drivers/infiniband/hw/cxgb4/Makefile @@ -1,4 +1,4 @@ -ccflags-y := -Idrivers/net/cxgb4 +ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4 obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 2f9f208..77ab2e1 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1942,92 +1942,6 @@ if NETDEV_10000 config MDIO tristate -config CHELSIO_T1 - tristate "Chelsio 10Gb Ethernet support" - depends on PCI - select CRC32 - select MDIO - help - This driver supports Chelsio gigabit and 10-gigabit - Ethernet cards. More information about adapter features and - performance tuning is in . - - For general information about Chelsio and our products, visit - our website at . - - For customer support, please visit our customer support page at - . - - Please send feedback to . - - To compile this driver as a module, choose M here: the module - will be called cxgb. - -config CHELSIO_T1_1G - bool "Chelsio gigabit Ethernet support" - depends on CHELSIO_T1 - help - Enables support for Chelsio's gigabit Ethernet PCI cards. If you - are using only 10G cards say 'N' here. - -config CHELSIO_T3 - tristate "Chelsio Communications T3 10Gb Ethernet support" - depends on PCI && INET - select FW_LOADER - select MDIO - help - This driver supports Chelsio T3-based gigabit and 10Gb Ethernet - adapters. - - For general information about Chelsio and our products, visit - our website at . - - For customer support, please visit our customer support page at - . - - Please send feedback to . - - To compile this driver as a module, choose M here: the module - will be called cxgb3. - -config CHELSIO_T4 - tristate "Chelsio Communications T4 Ethernet support" - depends on PCI - select FW_LOADER - select MDIO - help - This driver supports Chelsio T4-based gigabit and 10Gb Ethernet - adapters. - - For general information about Chelsio and our products, visit - our website at . - - For customer support, please visit our customer support page at - . - - Please send feedback to . - - To compile this driver as a module choose M here; the module - will be called cxgb4. - -config CHELSIO_T4VF - tristate "Chelsio Communications T4 Virtual Function Ethernet support" - depends on PCI - help - This driver supports Chelsio T4-based gigabit and 10Gb Ethernet - adapters with PCI-E SR-IOV Virtual Functions. - - For general information about Chelsio and our products, visit - our website at . - - For customer support, please visit our customer support page at - . - - Please send feedback to . - - To compile this driver as a module choose M here; the module - will be called cxgb4vf. - config EHEA tristate "eHEA Ethernet support" depends on IBMEBUS && INET && SPARSEMEM diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 4cc2dcd..a987d46 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -19,10 +19,6 @@ obj-$(CONFIG_IXGBE) += ixgbe/ obj-$(CONFIG_IXGBEVF) += ixgbevf/ obj-$(CONFIG_IXGB) += ixgb/ obj-$(CONFIG_IP1000) += ipg.o -obj-$(CONFIG_CHELSIO_T1) += chelsio/ -obj-$(CONFIG_CHELSIO_T3) += cxgb3/ -obj-$(CONFIG_CHELSIO_T4) += cxgb4/ -obj-$(CONFIG_CHELSIO_T4VF) += cxgb4vf/ obj-$(CONFIG_EHEA) += ehea/ obj-$(CONFIG_CAN) += can/ obj-$(CONFIG_BONDING) += bonding/ diff --git a/drivers/net/chelsio/Makefile b/drivers/net/chelsio/Makefile deleted file mode 100644 index 57a4b26..0000000 --- a/drivers/net/chelsio/Makefile +++ /dev/null @@ -1,9 +0,0 @@ -# -# Chelsio T1 driver -# - -obj-$(CONFIG_CHELSIO_T1) += cxgb.o - -cxgb-$(CONFIG_CHELSIO_T1_1G) += mv88e1xxx.o vsc7326.o -cxgb-objs := cxgb2.o espi.o tp.o pm3393.o sge.o subr.o \ - mv88x201x.o my3126.o $(cxgb-y) diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h deleted file mode 100644 index 5ccbed1..0000000 --- a/drivers/net/chelsio/common.h +++ /dev/null @@ -1,353 +0,0 @@ -/***************************************************************************** - * * - * File: common.h * - * $Revision: 1.21 $ * - * $Date: 2005/06/22 00:43:25 $ * - * Description: * - * part of the Chelsio 10Gb Ethernet Driver. * - * * - * This program is free software; you can redistribute it and/or modify * - * it under the terms of the GNU General Public License, version 2, as * - * published by the Free Software Foundation. * - * * - * You should have received a copy of the GNU General Public License along * - * with this program; if not, write to the Free Software Foundation, Inc., * - * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * - * * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * - * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * - * * - * http://www.chelsio.com * - * * - * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * - * All rights reserved. * - * * - * Maintainers: maintainers@chelsio.com * - * * - * Authors: Dimitrios Michailidis * - * Tina Yang * - * Felix Marti * - * Scott Bardone * - * Kurt Ottaway * - * Frank DiMambro * - * * - * History: * - * * - ****************************************************************************/ - -#define pr_fmt(fmt) "cxgb: " fmt - -#ifndef _CXGB_COMMON_H_ -#define _CXGB_COMMON_H_ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define DRV_DESCRIPTION "Chelsio 10Gb Ethernet Driver" -#define DRV_NAME "cxgb" -#define DRV_VERSION "2.2" - -#define CH_DEVICE(devid, ssid, idx) \ - { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx } - -#define SUPPORTED_PAUSE (1 << 13) -#define SUPPORTED_LOOPBACK (1 << 15) - -#define ADVERTISED_PAUSE (1 << 13) -#define ADVERTISED_ASYM_PAUSE (1 << 14) - -typedef struct adapter adapter_t; - -struct t1_rx_mode { - struct net_device *dev; -}; - -#define t1_rx_mode_promisc(rm) (rm->dev->flags & IFF_PROMISC) -#define t1_rx_mode_allmulti(rm) (rm->dev->flags & IFF_ALLMULTI) -#define t1_rx_mode_mc_cnt(rm) (netdev_mc_count(rm->dev)) -#define t1_get_netdev(rm) (rm->dev) - -#define MAX_NPORTS 4 -#define PORT_MASK ((1 << MAX_NPORTS) - 1) -#define NMTUS 8 -#define TCB_SIZE 128 - -#define SPEED_INVALID 0xffff -#define DUPLEX_INVALID 0xff - -enum { - CHBT_BOARD_N110, - CHBT_BOARD_N210, - CHBT_BOARD_7500, - CHBT_BOARD_8000, - CHBT_BOARD_CHT101, - CHBT_BOARD_CHT110, - CHBT_BOARD_CHT210, - CHBT_BOARD_CHT204, - CHBT_BOARD_CHT204V, - CHBT_BOARD_CHT204E, - CHBT_BOARD_CHN204, - CHBT_BOARD_COUGAR, - CHBT_BOARD_6800, - CHBT_BOARD_SIMUL, -}; - -enum { - CHBT_TERM_FPGA, - CHBT_TERM_T1, - CHBT_TERM_T2, - CHBT_TERM_T3 -}; - -enum { - CHBT_MAC_CHELSIO_A, - CHBT_MAC_IXF1010, - CHBT_MAC_PM3393, - CHBT_MAC_VSC7321, - CHBT_MAC_DUMMY -}; - -enum { - CHBT_PHY_88E1041, - CHBT_PHY_88E1111, - CHBT_PHY_88X2010, - CHBT_PHY_XPAK, - CHBT_PHY_MY3126, - CHBT_PHY_8244, - CHBT_PHY_DUMMY -}; - -enum { - PAUSE_RX = 1 << 0, - PAUSE_TX = 1 << 1, - PAUSE_AUTONEG = 1 << 2 -}; - -/* Revisions of T1 chip */ -enum { - TERM_T1A = 0, - TERM_T1B = 1, - TERM_T2 = 3 -}; - -struct sge_params { - unsigned int cmdQ_size[2]; - unsigned int freelQ_size[2]; - unsigned int large_buf_capacity; - unsigned int rx_coalesce_usecs; - unsigned int last_rx_coalesce_raw; - unsigned int default_rx_coalesce_usecs; - unsigned int sample_interval_usecs; - unsigned int coalesce_enable; - unsigned int polling; -}; - -struct chelsio_pci_params { - unsigned short speed; - unsigned char width; - unsigned char is_pcix; -}; - -struct tp_params { - unsigned int pm_size; - unsigned int cm_size; - unsigned int pm_rx_base; - unsigned int pm_tx_base; - unsigned int pm_rx_pg_size; - unsigned int pm_tx_pg_size; - unsigned int pm_rx_num_pgs; - unsigned int pm_tx_num_pgs; - unsigned int rx_coalescing_size; - unsigned int use_5tuple_mode; -}; - -struct mc5_params { - unsigned int mode; /* selects MC5 width */ - unsigned int nservers; /* size of server region */ - unsigned int nroutes; /* size of routing region */ -}; - -/* Default MC5 region sizes */ -#define DEFAULT_SERVER_REGION_LEN 256 -#define DEFAULT_RT_REGION_LEN 1024 - -struct adapter_params { - struct sge_params sge; - struct mc5_params mc5; - struct tp_params tp; - struct chelsio_pci_params pci; - - const struct board_info *brd_info; - - unsigned short mtus[NMTUS]; - unsigned int nports; /* # of ethernet ports */ - unsigned int stats_update_period; - unsigned short chip_revision; - unsigned char chip_version; - unsigned char is_asic; - unsigned char has_msi; -}; - -struct link_config { - unsigned int supported; /* link capabilities */ - unsigned int advertising; /* advertised capabilities */ - unsigned short requested_speed; /* speed user has requested */ - unsigned short speed; /* actual link speed */ - unsigned char requested_duplex; /* duplex user has requested */ - unsigned char duplex; /* actual link duplex */ - unsigned char requested_fc; /* flow control user has requested */ - unsigned char fc; /* actual link flow control */ - unsigned char autoneg; /* autonegotiating? */ -}; - -struct cmac; -struct cphy; - -struct port_info { - struct net_device *dev; - struct cmac *mac; - struct cphy *phy; - struct link_config link_config; - struct net_device_stats netstats; -}; - -struct sge; -struct peespi; - -struct adapter { - u8 __iomem *regs; - struct pci_dev *pdev; - unsigned long registered_device_map; - unsigned long open_device_map; - unsigned long flags; - - const char *name; - int msg_enable; - u32 mmio_len; - - struct work_struct ext_intr_handler_task; - struct adapter_params params; - - /* Terminator modules. */ - struct sge *sge; - struct peespi *espi; - struct petp *tp; - - struct napi_struct napi; - struct port_info port[MAX_NPORTS]; - struct delayed_work stats_update_task; - struct timer_list stats_update_timer; - - spinlock_t tpi_lock; - spinlock_t work_lock; - spinlock_t mac_lock; - - /* guards async operations */ - spinlock_t async_lock ____cacheline_aligned; - u32 slow_intr_mask; - int t1powersave; -}; - -enum { /* adapter flags */ - FULL_INIT_DONE = 1 << 0, -}; - -struct mdio_ops; -struct gmac; -struct gphy; - -struct board_info { - unsigned char board; - unsigned char port_number; - unsigned long caps; - unsigned char chip_term; - unsigned char chip_mac; - unsigned char chip_phy; - unsigned int clock_core; - unsigned int clock_mc3; - unsigned int clock_mc4; - unsigned int espi_nports; - unsigned int clock_elmer0; - unsigned char mdio_mdien; - unsigned char mdio_mdiinv; - unsigned char mdio_mdc; - unsigned char mdio_phybaseaddr; - const struct gmac *gmac; - const struct gphy *gphy; - const struct mdio_ops *mdio_ops; - const char *desc; -}; - -static inline int t1_is_asic(const adapter_t *adapter) -{ - return adapter->params.is_asic; -} - -extern const struct pci_device_id t1_pci_tbl[]; - -static inline int adapter_matches_type(const adapter_t *adapter, - int version, int revision) -{ - return adapter->params.chip_version == version && - adapter->params.chip_revision == revision; -} - -#define t1_is_T1B(adap) adapter_matches_type(adap, CHBT_TERM_T1, TERM_T1B) -#define is_T2(adap) adapter_matches_type(adap, CHBT_TERM_T2, TERM_T2) - -/* Returns true if an adapter supports VLAN acceleration and TSO */ -static inline int vlan_tso_capable(const adapter_t *adapter) -{ - return !t1_is_T1B(adapter); -} - -#define for_each_port(adapter, iter) \ - for (iter = 0; iter < (adapter)->params.nports; ++iter) - -#define board_info(adapter) ((adapter)->params.brd_info) -#define is_10G(adapter) (board_info(adapter)->caps & SUPPORTED_10000baseT_Full) - -static inline unsigned int core_ticks_per_usec(const adapter_t *adap) -{ - return board_info(adap)->clock_core / 1000000; -} - -extern int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp); -extern int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value); -extern int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value); -extern int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value); - -extern void t1_interrupts_enable(adapter_t *adapter); -extern void t1_interrupts_disable(adapter_t *adapter); -extern void t1_interrupts_clear(adapter_t *adapter); -extern int t1_elmer0_ext_intr_handler(adapter_t *adapter); -extern void t1_elmer0_ext_intr(adapter_t *adapter); -extern int t1_slow_intr_handler(adapter_t *adapter); - -extern int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc); -extern const struct board_info *t1_get_board_info(unsigned int board_id); -extern const struct board_info *t1_get_board_info_from_ids(unsigned int devid, - unsigned short ssid); -extern int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data); -extern int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi, - struct adapter_params *p); -extern int t1_init_hw_modules(adapter_t *adapter); -extern int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi); -extern void t1_free_sw_modules(adapter_t *adapter); -extern void t1_fatal_err(adapter_t *adapter); -extern void t1_link_changed(adapter_t *adapter, int port_id); -extern void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat, - int speed, int duplex, int pause); -#endif /* _CXGB_COMMON_H_ */ diff --git a/drivers/net/chelsio/cphy.h b/drivers/net/chelsio/cphy.h deleted file mode 100644 index 1f095a9..0000000 --- a/drivers/net/chelsio/cphy.h +++ /dev/null @@ -1,175 +0,0 @@ -/***************************************************************************** - * * - * File: cphy.h * - * $Revision: 1.7 $ * - * $Date: 2005/06/21 18:29:47 $ * - * Description: * - * part of the Chelsio 10Gb Ethernet Driver. * - * * - * This program is free software; you can redistribute it and/or modify * - * it under the terms of the GNU General Public License, version 2, as * - * published by the Free Software Foundation. * - * * - * You should have received a copy of the GNU General Public License along * - * with this program; if not, write to the Free Software Foundation, Inc., * - * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * - * * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * - * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * - * * - * http://www.chelsio.com * - * * - * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * - * All rights reserved. * - * * - * Maintainers: maintainers@chelsio.com * - * * - * Authors: Dimitrios Michailidis * - * Tina Yang * - * Felix Marti * - * Scott Bardone * - * Kurt Ottaway * - * Frank DiMambro * - * * - * History: * - * * - ****************************************************************************/ - -#ifndef _CXGB_CPHY_H_ -#define _CXGB_CPHY_H_ - -#include "common.h" - -struct mdio_ops { - void (*init)(adapter_t *adapter, const struct board_info *bi); - int (*read)(struct net_device *dev, int phy_addr, int mmd_addr, - u16 reg_addr); - int (*write)(struct net_device *dev, int phy_addr, int mmd_addr, - u16 reg_addr, u16 val); - unsigned mode_support; -}; - -/* PHY interrupt types */ -enum { - cphy_cause_link_change = 0x1, - cphy_cause_error = 0x2, - cphy_cause_fifo_error = 0x3 -}; - -enum { - PHY_LINK_UP = 0x1, - PHY_AUTONEG_RDY = 0x2, - PHY_AUTONEG_EN = 0x4 -}; - -struct cphy; - -/* PHY operations */ -struct cphy_ops { - void (*destroy)(struct cphy *); - int (*reset)(struct cphy *, int wait); - - int (*interrupt_enable)(struct cphy *); - int (*interrupt_disable)(struct cphy *); - int (*interrupt_clear)(struct cphy *); - int (*interrupt_handler)(struct cphy *); - - int (*autoneg_enable)(struct cphy *); - int (*autoneg_disable)(struct cphy *); - int (*autoneg_restart)(struct cphy *); - - int (*advertise)(struct cphy *phy, unsigned int advertise_map); - int (*set_loopback)(struct cphy *, int on); - int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex); - int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed, - int *duplex, int *fc); - - u32 mmds; -}; - -/* A PHY instance */ -struct cphy { - int state; /* Link status state machine */ - adapter_t *adapter; /* associated adapter */ - - struct delayed_work phy_update; - - u16 bmsr; - int count; - int act_count; - int act_on; - - u32 elmer_gpo; - - const struct cphy_ops *ops; /* PHY operations */ - struct mdio_if_info mdio; - struct cphy_instance *instance; -}; - -/* Convenience MDIO read/write wrappers */ -static inline int cphy_mdio_read(struct cphy *cphy, int mmd, int reg, - unsigned int *valp) -{ - int rc = cphy->mdio.mdio_read(cphy->mdio.dev, cphy->mdio.prtad, mmd, - reg); - *valp = (rc >= 0) ? rc : -1; - return (rc >= 0) ? 0 : rc; -} - -static inline int cphy_mdio_write(struct cphy *cphy, int mmd, int reg, - unsigned int val) -{ - return cphy->mdio.mdio_write(cphy->mdio.dev, cphy->mdio.prtad, mmd, - reg, val); -} - -static inline int simple_mdio_read(struct cphy *cphy, int reg, - unsigned int *valp) -{ - return cphy_mdio_read(cphy, MDIO_DEVAD_NONE, reg, valp); -} - -static inline int simple_mdio_write(struct cphy *cphy, int reg, - unsigned int val) -{ - return cphy_mdio_write(cphy, MDIO_DEVAD_NONE, reg, val); -} - -/* Convenience initializer */ -static inline void cphy_init(struct cphy *phy, struct net_device *dev, - int phy_addr, struct cphy_ops *phy_ops, - const struct mdio_ops *mdio_ops) -{ - struct adapter *adapter = netdev_priv(dev); - phy->adapter = adapter; - phy->ops = phy_ops; - if (mdio_ops) { - phy->mdio.prtad = phy_addr; - phy->mdio.mmds = phy_ops->mmds; - phy->mdio.mode_support = mdio_ops->mode_support; - phy->mdio.mdio_read = mdio_ops->read; - phy->mdio.mdio_write = mdio_ops->write; - } - phy->mdio.dev = dev; -} - -/* Operations of the PHY-instance factory */ -struct gphy { - /* Construct a PHY instance with the given PHY address */ - struct cphy *(*create)(struct net_device *dev, int phy_addr, - const struct mdio_ops *mdio_ops); - - /* - * Reset the PHY chip. This resets the whole PHY chip, not individual - * ports. - */ - int (*reset)(adapter_t *adapter); -}; - -extern const struct gphy t1_my3126_ops; -extern const struct gphy t1_mv88e1xxx_ops; -extern const struct gphy t1_vsc8244_ops; -extern const struct gphy t1_mv88x201x_ops; - -#endif /* _CXGB_CPHY_H_ */ diff --git a/drivers/net/chelsio/cpl5_cmd.h b/drivers/net/chelsio/cpl5_cmd.h deleted file mode 100644 index e36d45b..0000000 --- a/drivers/net/chelsio/cpl5_cmd.h +++ /dev/null @@ -1,639 +0,0 @@ -/***************************************************************************** - * * - * File: cpl5_cmd.h * - * $Revision: 1.6 $ * - * $Date: 2005/06/21 18:29:47 $ * - * Description: * - * part of the Chelsio 10Gb Ethernet Driver. * - * * - * This program is free software; you can redistribute it and/or modify * - * it under the terms of the GNU General Public License, version 2, as * - * published by the Free Software Foundation. * - * * - * You should have received a copy of the GNU General Public License along * - * with this program; if not, write to the Free Software Foundation, Inc., * - * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * - * * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * - * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * - * * - * http://www.chelsio.com * - * * - * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * - * All rights reserved. * - * * - * Maintainers: maintainers@chelsio.com * - * * - * Authors: Dimitrios Michailidis * - * Tina Yang * - * Felix Marti * - * Scott Bardone * - * Kurt Ottaway * - * Frank DiMambro * - * * - * History: * - * * - ****************************************************************************/ - -#ifndef _CXGB_CPL5_CMD_H_ -#define _CXGB_CPL5_CMD_H_ - -#include - -#if !defined(__LITTLE_ENDIAN_BITFIELD) && !defined(__BIG_ENDIAN_BITFIELD) -#error "Adjust your defines" -#endif - -enum CPL_opcode { - CPL_PASS_OPEN_REQ = 0x1, - CPL_PASS_OPEN_RPL = 0x2, - CPL_PASS_ESTABLISH = 0x3, - CPL_PASS_ACCEPT_REQ = 0xE, - CPL_PASS_ACCEPT_RPL = 0x4, - CPL_ACT_OPEN_REQ = 0x5, - CPL_ACT_OPEN_RPL = 0x6, - CPL_CLOSE_CON_REQ = 0x7, - CPL_CLOSE_CON_RPL = 0x8, - CPL_CLOSE_LISTSRV_REQ = 0x9, - CPL_CLOSE_LISTSRV_RPL = 0xA, - CPL_ABORT_REQ = 0xB, - CPL_ABORT_RPL = 0xC, - CPL_PEER_CLOSE = 0xD, - CPL_ACT_ESTABLISH = 0x17, - - CPL_GET_TCB = 0x24, - CPL_GET_TCB_RPL = 0x25, - CPL_SET_TCB = 0x26, - CPL_SET_TCB_FIELD = 0x27, - CPL_SET_TCB_RPL = 0x28, - CPL_PCMD = 0x29, - - CPL_PCMD_READ = 0x31, - CPL_PCMD_READ_RPL = 0x32, - - - CPL_RX_DATA = 0xA0, - CPL_RX_DATA_DDP = 0xA1, - CPL_RX_DATA_ACK = 0xA3, - CPL_RX_PKT = 0xAD, - CPL_RX_ISCSI_HDR = 0xAF, - CPL_TX_DATA_ACK = 0xB0, - CPL_TX_DATA = 0xB1, - CPL_TX_PKT = 0xB2, - CPL_TX_PKT_LSO = 0xB6, - - CPL_RTE_DELETE_REQ = 0xC0, - CPL_RTE_DELETE_RPL = 0xC1, - CPL_RTE_WRITE_REQ = 0xC2, - CPL_RTE_WRITE_RPL = 0xD3, - CPL_RTE_READ_REQ = 0xC3, - CPL_RTE_READ_RPL = 0xC4, - CPL_L2T_WRITE_REQ = 0xC5, - CPL_L2T_WRITE_RPL = 0xD4, - CPL_L2T_READ_REQ = 0xC6, - CPL_L2T_READ_RPL = 0xC7, - CPL_SMT_WRITE_REQ = 0xC8, - CPL_SMT_WRITE_RPL = 0xD5, - CPL_SMT_READ_REQ = 0xC9, - CPL_SMT_READ_RPL = 0xCA, - CPL_ARP_MISS_REQ = 0xCD, - CPL_ARP_MISS_RPL = 0xCE, - CPL_MIGRATE_C2T_REQ = 0xDC, - CPL_MIGRATE_C2T_RPL = 0xDD, - CPL_ERROR = 0xD7, - - /* internal: driver -> TOM */ - CPL_MSS_CHANGE = 0xE1 -}; - -#define NUM_CPL_CMDS 256 - -enum CPL_error { - CPL_ERR_NONE = 0, - CPL_ERR_TCAM_PARITY = 1, - CPL_ERR_TCAM_FULL = 3, - CPL_ERR_CONN_RESET = 20, - CPL_ERR_CONN_EXIST = 22, - CPL_ERR_ARP_MISS = 23, - CPL_ERR_BAD_SYN = 24, - CPL_ERR_CONN_TIMEDOUT = 30, - CPL_ERR_XMIT_TIMEDOUT = 31, - CPL_ERR_PERSIST_TIMEDOUT = 32, - CPL_ERR_FINWAIT2_TIMEDOUT = 33, - CPL_ERR_KEEPALIVE_TIMEDOUT = 34, - CPL_ERR_ABORT_FAILED = 42, - CPL_ERR_GENERAL = 99 -}; - -enum { - CPL_CONN_POLICY_AUTO = 0, - CPL_CONN_POLICY_ASK = 1, - CPL_CONN_POLICY_DENY = 3 -}; - -enum { - ULP_MODE_NONE = 0, - ULP_MODE_TCPDDP = 1, - ULP_MODE_ISCSI = 2, - ULP_MODE_IWARP = 3, - ULP_MODE_SSL = 4 -}; - -enum { - CPL_PASS_OPEN_ACCEPT, - CPL_PASS_OPEN_REJECT -}; - -enum { - CPL_ABORT_SEND_RST = 0, - CPL_ABORT_NO_RST, - CPL_ABORT_POST_CLOSE_REQ = 2 -}; - -enum { // TX_PKT_LSO ethernet types - CPL_ETH_II, - CPL_ETH_II_VLAN, - CPL_ETH_802_3, - CPL_ETH_802_3_VLAN -}; - -union opcode_tid { - u32 opcode_tid; - u8 opcode; -}; - -#define S_OPCODE 24 -#define V_OPCODE(x) ((x) << S_OPCODE) -#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF) -#define G_TID(x) ((x) & 0xFFFFFF) - -/* tid is assumed to be 24-bits */ -#define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid)) - -#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid) - -/* extract the TID from a CPL command */ -#define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd)))) - -struct tcp_options { - u16 mss; - u8 wsf; -#if defined(__LITTLE_ENDIAN_BITFIELD) - u8 rsvd:4; - u8 ecn:1; - u8 sack:1; - u8 tstamp:1; -#else - u8 tstamp:1; - u8 sack:1; - u8 ecn:1; - u8 rsvd:4; -#endif -}; - -struct cpl_pass_open_req { - union opcode_tid ot; - u16 local_port; - u16 peer_port; - u32 local_ip; - u32 peer_ip; - u32 opt0h; - u32 opt0l; - u32 peer_netmask; - u32 opt1; -}; - -struct cpl_pass_open_rpl { - union opcode_tid ot; - u16 local_port; - u16 peer_port; - u32 local_ip; - u32 peer_ip; - u8 resvd[7]; - u8 status; -}; - -struct cpl_pass_establish { - union opcode_tid ot; - u16 local_port; - u16 peer_port; - u32 local_ip; - u32 peer_ip; - u32 tos_tid; - u8 l2t_idx; - u8 rsvd[3]; - u32 snd_isn; - u32 rcv_isn; -}; - -struct cpl_pass_accept_req { - union opcode_tid ot; - u16 local_port; - u16 peer_port; - u32 local_ip; - u32 peer_ip; - u32 tos_tid; - struct tcp_options tcp_options; - u8 dst_mac[6]; - u16 vlan_tag; - u8 src_mac[6]; - u8 rsvd[2]; - u32 rcv_isn; - u32 unknown_tcp_options; -}; - -struct cpl_pass_accept_rpl { - union opcode_tid ot; - u32 rsvd0; - u32 rsvd1; - u32 peer_ip; - u32 opt0h; - union { - u32 opt0l; - struct { - u8 rsvd[3]; - u8 status; - }; - }; -}; - -struct cpl_act_open_req { - union opcode_tid ot; - u16 local_port; - u16 peer_port; - u32 local_ip; - u32 peer_ip; - u32 opt0h; - u32 opt0l; - u32 iff_vlantag; - u32 rsvd; -}; - -struct cpl_act_open_rpl { - union opcode_tid ot; - u16 local_port; - u16 peer_port; - u32 local_ip; - u32 peer_ip; - u32 new_tid; - u8 rsvd[3]; - u8 status; -}; - -struct cpl_act_establish { - union opcode_tid ot; - u16 local_port; - u16 peer_port; - u32 local_ip; - u32 peer_ip; - u32 tos_tid; - u32 rsvd; - u32 snd_isn; - u32 rcv_isn; -}; - -struct cpl_get_tcb { - union opcode_tid ot; - u32 rsvd; -}; - -struct cpl_get_tcb_rpl { - union opcode_tid ot; - u16 len; - u8 rsvd; - u8 status; -}; - -struct cpl_set_tcb { - union opcode_tid ot; - u16 len; - u16 rsvd; -}; - -struct cpl_set_tcb_field { - union opcode_tid ot; - u8 rsvd[3]; - u8 offset; - u32 mask; - u32 val; -}; - -struct cpl_set_tcb_rpl { - union opcode_tid ot; - u8 rsvd[3]; - u8 status; -}; - -struct cpl_pcmd { - union opcode_tid ot; - u16 dlen_in; - u16 dlen_out; - u32 pcmd_parm[2]; -}; - -struct cpl_pcmd_read { - union opcode_tid ot; - u32 rsvd1; - u16 rsvd2; - u32 addr; - u16 len; -}; - -struct cpl_pcmd_read_rpl { - union opcode_tid ot; - u16 len; -}; - -struct cpl_close_con_req { - union opcode_tid ot; - u32 rsvd; -}; - -struct cpl_close_con_rpl { - union opcode_tid ot; - u8 rsvd[3]; - u8 status; - u32 snd_nxt; - u32 rcv_nxt; -}; - -struct cpl_close_listserv_req { - union opcode_tid ot; - u32 rsvd; -}; - -struct cpl_close_listserv_rpl { - union opcode_tid ot; - u8 rsvd[3]; - u8 status; -}; - -struct cpl_abort_req { - union opcode_tid ot; - u32 rsvd0; - u8 rsvd1; - u8 cmd; - u8 rsvd2[6]; -}; - -struct cpl_abort_rpl { - union opcode_tid ot; - u32 rsvd0; - u8 rsvd1; - u8 status; - u8 rsvd2[6]; -}; - -struct cpl_peer_close { - union opcode_tid ot; - u32 rsvd; -}; - -struct cpl_tx_data { - union opcode_tid ot; - u32 len; - u32 rsvd0; - u16 urg; - u16 flags; -}; - -struct cpl_tx_data_ack { - union opcode_tid ot; - u32 ack_seq; -}; - -struct cpl_rx_data { - union opcode_tid ot; - u32 len; - u32 seq; - u16 urg; - u8 rsvd; - u8 status; -}; - -struct cpl_rx_data_ack { - union opcode_tid ot; - u32 credit; -}; - -struct cpl_rx_data_ddp { - union opcode_tid ot; - u32 len; - u32 seq; - u32 nxt_seq; - u32 ulp_crc; - u16 ddp_status; - u8 rsvd; - u8 status; -}; - -/* - * We want this header's alignment to be no more stringent than 2-byte aligned. - * All fields are u8 or u16 except for the length. However that field is not - * used so we break it into 2 16-bit parts to easily meet our alignment needs. - */ -struct cpl_tx_pkt { - u8 opcode; -#if defined(__LITTLE_ENDIAN_BITFIELD) - u8 iff:4; - u8 ip_csum_dis:1; - u8 l4_csum_dis:1; - u8 vlan_valid:1; - u8 rsvd:1; -#else - u8 rsvd:1; - u8 vlan_valid:1; - u8 l4_csum_dis:1; - u8 ip_csum_dis:1; - u8 iff:4; -#endif - u16 vlan; - u16 len_hi; - u16 len_lo; -}; - -struct cpl_tx_pkt_lso { - u8 opcode; -#if defined(__LITTLE_ENDIAN_BITFIELD) - u8 iff:4; - u8 ip_csum_dis:1; - u8 l4_csum_dis:1; - u8 vlan_valid:1; - u8 :1; -#else - u8 :1; - u8 vlan_valid:1; - u8 l4_csum_dis:1; - u8 ip_csum_dis:1; - u8 iff:4; -#endif - u16 vlan; - __be32 len; - - u8 rsvd[5]; -#if defined(__LITTLE_ENDIAN_BITFIELD) - u8 tcp_hdr_words:4; - u8 ip_hdr_words:4; -#else - u8 ip_hdr_words:4; - u8 tcp_hdr_words:4; -#endif - __be16 eth_type_mss; -}; - -struct cpl_rx_pkt { - u8 opcode; -#if defined(__LITTLE_ENDIAN_BITFIELD) - u8 iff:4; - u8 csum_valid:1; - u8 bad_pkt:1; - u8 vlan_valid:1; - u8 rsvd:1; -#else - u8 rsvd:1; - u8 vlan_valid:1; - u8 bad_pkt:1; - u8 csum_valid:1; - u8 iff:4; -#endif - u16 csum; - u16 vlan; - u16 len; -}; - -struct cpl_l2t_write_req { - union opcode_tid ot; - u32 params; - u8 rsvd1[2]; - u8 dst_mac[6]; -}; - -struct cpl_l2t_write_rpl { - union opcode_tid ot; - u8 status; - u8 rsvd[3]; -}; - -struct cpl_l2t_read_req { - union opcode_tid ot; - u8 rsvd[3]; - u8 l2t_idx; -}; - -struct cpl_l2t_read_rpl { - union opcode_tid ot; - u32 params; - u8 rsvd1[2]; - u8 dst_mac[6]; -}; - -struct cpl_smt_write_req { - union opcode_tid ot; - u8 rsvd0; -#if defined(__LITTLE_ENDIAN_BITFIELD) - u8 rsvd1:1; - u8 mtu_idx:3; - u8 iff:4; -#else - u8 iff:4; - u8 mtu_idx:3; - u8 rsvd1:1; -#endif - u16 rsvd2; - u16 rsvd3; - u8 src_mac1[6]; - u16 rsvd4; - u8 src_mac0[6]; -}; - -struct cpl_smt_write_rpl { - union opcode_tid ot; - u8 status; - u8 rsvd[3]; -}; - -struct cpl_smt_read_req { - union opcode_tid ot; - u8 rsvd0; -#if defined(__LITTLE_ENDIAN_BITFIELD) - u8 rsvd1:4; - u8 iff:4; -#else - u8 iff:4; - u8 rsvd1:4; -#endif - u16 rsvd2; -}; - -struct cpl_smt_read_rpl { - union opcode_tid ot; - u8 status; -#if defined(__LITTLE_ENDIAN_BITFIELD) - u8 rsvd1:1; - u8 mtu_idx:3; - u8 rsvd0:4; -#else - u8 rsvd0:4; - u8 mtu_idx:3; - u8 rsvd1:1; -#endif - u16 rsvd2; - u16 rsvd3; - u8 src_mac1[6]; - u16 rsvd4; - u8 src_mac0[6]; -}; - -struct cpl_rte_delete_req { - union opcode_tid ot; - u32 params; -}; - -struct cpl_rte_delete_rpl { - union opcode_tid ot; - u8 status; - u8 rsvd[3]; -}; - -struct cpl_rte_write_req { - union opcode_tid ot; - u32 params; - u32 netmask; - u32 faddr; -}; - -struct cpl_rte_write_rpl { - union opcode_tid ot; - u8 status; - u8 rsvd[3]; -}; - -struct cpl_rte_read_req { - union opcode_tid ot; - u32 params; -}; - -struct cpl_rte_read_rpl { - union opcode_tid ot; - u8 status; - u8 rsvd0[2]; - u8 l2t_idx; -#if defined(__LITTLE_ENDIAN_BITFIELD) - u8 rsvd1:7; - u8 select:1; -#else - u8 select:1; - u8 rsvd1:7; -#endif - u8 rsvd2[3]; - u32 addr; -}; - -struct cpl_mss_change { - union opcode_tid ot; - u32 mss; -}; - -#endif /* _CXGB_CPL5_CMD_H_ */ - diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c deleted file mode 100644 index 3edbbc4..0000000 --- a/drivers/net/chelsio/cxgb2.c +++ /dev/null @@ -1,1379 +0,0 @@ -/***************************************************************************** - * * - * File: cxgb2.c * - * $Revision: 1.25 $ * - * $Date: 2005/06/22 00:43:25 $ * - * Description: * - * Chelsio 10Gb Ethernet Driver. * - * * - * This program is free software; you can redistribute it and/or modify * - * it under the terms of the GNU General Public License, version 2, as * - * published by the Free Software Foundation. * - * * - * You should have received a copy of the GNU General Public License along * - * with this program; if not, write to the Free Software Foundation, Inc., * - * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * - * * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * - * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * - * * - * http://www.chelsio.com * - * * - * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * - * All rights reserved. * - * * - * Maintainers: maintainers@chelsio.com * - * * - * Authors: Dimitrios Michailidis * - * Tina Yang * - * Felix Marti * - * Scott Bardone * - * Kurt Ottaway * - * Frank DiMambro * - * * - * History: * - * * - ****************************************************************************/ - -#include "common.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "cpl5_cmd.h" -#include "regs.h" -#include "gmac.h" -#include "cphy.h" -#include "sge.h" -#include "tp.h" -#include "espi.h" -#include "elmer0.h" - -#include - -static inline void schedule_mac_stats_update(struct adapter *ap, int secs) -{ - schedule_delayed_work(&ap->stats_update_task, secs * HZ); -} - -static inline void cancel_mac_stats_update(struct adapter *ap) -{ - cancel_delayed_work(&ap->stats_update_task); -} - -#define MAX_CMDQ_ENTRIES 16384 -#define MAX_CMDQ1_ENTRIES 1024 -#define MAX_RX_BUFFERS 16384 -#define MAX_RX_JUMBO_BUFFERS 16384 -#define MAX_TX_BUFFERS_HIGH 16384U -#define MAX_TX_BUFFERS_LOW 1536U -#define MAX_TX_BUFFERS 1460U -#define MIN_FL_ENTRIES 32 - -#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ - NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ - NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) - -/* - * The EEPROM is actually bigger but only the first few bytes are used so we - * only report those. - */ -#define EEPROM_SIZE 32 - -MODULE_DESCRIPTION(DRV_DESCRIPTION); -MODULE_AUTHOR("Chelsio Communications"); -MODULE_LICENSE("GPL"); - -static int dflt_msg_enable = DFLT_MSG_ENABLE; - -module_param(dflt_msg_enable, int, 0); -MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap"); - -#define HCLOCK 0x0 -#define LCLOCK 0x1 - -/* T1 cards powersave mode */ -static int t1_clock(struct adapter *adapter, int mode); -static int t1powersave = 1; /* HW default is powersave mode. */ - -module_param(t1powersave, int, 0); -MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode"); - -static int disable_msi = 0; -module_param(disable_msi, int, 0); -MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); - -static const char pci_speed[][4] = { - "33", "66", "100", "133" -}; - -/* - * Setup MAC to receive the types of packets we want. - */ -static void t1_set_rxmode(struct net_device *dev) -{ - struct adapter *adapter = dev->ml_priv; - struct cmac *mac = adapter->port[dev->if_port].mac; - struct t1_rx_mode rm; - - rm.dev = dev; - mac->ops->set_rx_mode(mac, &rm); -} - -static void link_report(struct port_info *p) -{ - if (!netif_carrier_ok(p->dev)) - printk(KERN_INFO "%s: link down\n", p->dev->name); - else { - const char *s = "10Mbps"; - - switch (p->link_config.speed) { - case SPEED_10000: s = "10Gbps"; break; - case SPEED_1000: s = "1000Mbps"; break; - case SPEED_100: s = "100Mbps"; break; - } - - printk(KERN_INFO "%s: link up, %s, %s-duplex\n", - p->dev->name, s, - p->link_config.duplex == DUPLEX_FULL ? "full" : "half"); - } -} - -void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat, - int speed, int duplex, int pause) -{ - struct port_info *p = &adapter->port[port_id]; - - if (link_stat != netif_carrier_ok(p->dev)) { - if (link_stat) - netif_carrier_on(p->dev); - else - netif_carrier_off(p->dev); - link_report(p); - - /* multi-ports: inform toe */ - if ((speed > 0) && (adapter->params.nports > 1)) { - unsigned int sched_speed = 10; - switch (speed) { - case SPEED_1000: - sched_speed = 1000; - break; - case SPEED_100: - sched_speed = 100; - break; - case SPEED_10: - sched_speed = 10; - break; - } - t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed); - } - } -} - -static void link_start(struct port_info *p) -{ - struct cmac *mac = p->mac; - - mac->ops->reset(mac); - if (mac->ops->macaddress_set) - mac->ops->macaddress_set(mac, p->dev->dev_addr); - t1_set_rxmode(p->dev); - t1_link_start(p->phy, mac, &p->link_config); - mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); -} - -static void enable_hw_csum(struct adapter *adapter) -{ - if (adapter->port[0].dev->hw_features & NETIF_F_TSO) - t1_tp_set_ip_checksum_offload(adapter->tp, 1); /* for TSO only */ - t1_tp_set_tcp_checksum_offload(adapter->tp, 1); -} - -/* - * Things to do upon first use of a card. - * This must run with the rtnl lock held. - */ -static int cxgb_up(struct adapter *adapter) -{ - int err = 0; - - if (!(adapter->flags & FULL_INIT_DONE)) { - err = t1_init_hw_modules(adapter); - if (err) - goto out_err; - - enable_hw_csum(adapter); - adapter->flags |= FULL_INIT_DONE; - } - - t1_interrupts_clear(adapter); - - adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev); - err = request_irq(adapter->pdev->irq, t1_interrupt, - adapter->params.has_msi ? 0 : IRQF_SHARED, - adapter->name, adapter); - if (err) { - if (adapter->params.has_msi) - pci_disable_msi(adapter->pdev); - - goto out_err; - } - - t1_sge_start(adapter->sge); - t1_interrupts_enable(adapter); -out_err: - return err; -} - -/* - * Release resources when all the ports have been stopped. - */ -static void cxgb_down(struct adapter *adapter) -{ - t1_sge_stop(adapter->sge); - t1_interrupts_disable(adapter); - free_irq(adapter->pdev->irq, adapter); - if (adapter->params.has_msi) - pci_disable_msi(adapter->pdev); -} - -static int cxgb_open(struct net_device *dev) -{ - int err; - struct adapter *adapter = dev->ml_priv; - int other_ports = adapter->open_device_map & PORT_MASK; - - napi_enable(&adapter->napi); - if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) { - napi_disable(&adapter->napi); - return err; - } - - __set_bit(dev->if_port, &adapter->open_device_map); - link_start(&adapter->port[dev->if_port]); - netif_start_queue(dev); - if (!other_ports && adapter->params.stats_update_period) - schedule_mac_stats_update(adapter, - adapter->params.stats_update_period); - - t1_vlan_mode(adapter, dev->features); - return 0; -} - -static int cxgb_close(struct net_device *dev) -{ - struct adapter *adapter = dev->ml_priv; - struct port_info *p = &adapter->port[dev->if_port]; - struct cmac *mac = p->mac; - - netif_stop_queue(dev); - napi_disable(&adapter->napi); - mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX); - netif_carrier_off(dev); - - clear_bit(dev->if_port, &adapter->open_device_map); - if (adapter->params.stats_update_period && - !(adapter->open_device_map & PORT_MASK)) { - /* Stop statistics accumulation. */ - smp_mb__after_clear_bit(); - spin_lock(&adapter->work_lock); /* sync with update task */ - spin_unlock(&adapter->work_lock); - cancel_mac_stats_update(adapter); - } - - if (!adapter->open_device_map) - cxgb_down(adapter); - return 0; -} - -static struct net_device_stats *t1_get_stats(struct net_device *dev) -{ - struct adapter *adapter = dev->ml_priv; - struct port_info *p = &adapter->port[dev->if_port]; - struct net_device_stats *ns = &p->netstats; - const struct cmac_statistics *pstats; - - /* Do a full update of the MAC stats */ - pstats = p->mac->ops->statistics_update(p->mac, - MAC_STATS_UPDATE_FULL); - - ns->tx_packets = pstats->TxUnicastFramesOK + - pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK; - - ns->rx_packets = pstats->RxUnicastFramesOK + - pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK; - - ns->tx_bytes = pstats->TxOctetsOK; - ns->rx_bytes = pstats->RxOctetsOK; - - ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors + - pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions; - ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors + - pstats->RxFCSErrors + pstats->RxAlignErrors + - pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors + - pstats->RxSymbolErrors + pstats->RxRuntErrors; - - ns->multicast = pstats->RxMulticastFramesOK; - ns->collisions = pstats->TxTotalCollisions; - - /* detailed rx_errors */ - ns->rx_length_errors = pstats->RxFrameTooLongErrors + - pstats->RxJabberErrors; - ns->rx_over_errors = 0; - ns->rx_crc_errors = pstats->RxFCSErrors; - ns->rx_frame_errors = pstats->RxAlignErrors; - ns->rx_fifo_errors = 0; - ns->rx_missed_errors = 0; - - /* detailed tx_errors */ - ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions; - ns->tx_carrier_errors = 0; - ns->tx_fifo_errors = pstats->TxUnderrun; - ns->tx_heartbeat_errors = 0; - ns->tx_window_errors = pstats->TxLateCollisions; - return ns; -} - -static u32 get_msglevel(struct net_device *dev) -{ - struct adapter *adapter = dev->ml_priv; - - return adapter->msg_enable; -} - -static void set_msglevel(struct net_device *dev, u32 val) -{ - struct adapter *adapter = dev->ml_priv; - - adapter->msg_enable = val; -} - -static char stats_strings[][ETH_GSTRING_LEN] = { - "TxOctetsOK", - "TxOctetsBad", - "TxUnicastFramesOK", - "TxMulticastFramesOK", - "TxBroadcastFramesOK", - "TxPauseFrames", - "TxFramesWithDeferredXmissions", - "TxLateCollisions", - "TxTotalCollisions", - "TxFramesAbortedDueToXSCollisions", - "TxUnderrun", - "TxLengthErrors", - "TxInternalMACXmitError", - "TxFramesWithExcessiveDeferral", - "TxFCSErrors", - "TxJumboFramesOk", - "TxJumboOctetsOk", - - "RxOctetsOK", - "RxOctetsBad", - "RxUnicastFramesOK", - "RxMulticastFramesOK", - "RxBroadcastFramesOK", - "RxPauseFrames", - "RxFCSErrors", - "RxAlignErrors", - "RxSymbolErrors", - "RxDataErrors", - "RxSequenceErrors", - "RxRuntErrors", - "RxJabberErrors", - "RxInternalMACRcvError", - "RxInRangeLengthErrors", - "RxOutOfRangeLengthField", - "RxFrameTooLongErrors", - "RxJumboFramesOk", - "RxJumboOctetsOk", - - /* Port stats */ - "RxCsumGood", - "TxCsumOffload", - "TxTso", - "RxVlan", - "TxVlan", - "TxNeedHeadroom", - - /* Interrupt stats */ - "rx drops", - "pure_rsps", - "unhandled irqs", - "respQ_empty", - "respQ_overflow", - "freelistQ_empty", - "pkt_too_big", - "pkt_mismatch", - "cmdQ_full0", - "cmdQ_full1", - - "espi_DIP2ParityErr", - "espi_DIP4Err", - "espi_RxDrops", - "espi_TxDrops", - "espi_RxOvfl", - "espi_ParityErr" -}; - -#define T2_REGMAP_SIZE (3 * 1024) - -static int get_regs_len(struct net_device *dev) -{ - return T2_REGMAP_SIZE; -} - -static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) -{ - struct adapter *adapter = dev->ml_priv; - - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->fw_version, "N/A"); - strcpy(info->bus_info, pci_name(adapter->pdev)); -} - -static int get_sset_count(struct net_device *dev, int sset) -{ - switch (sset) { - case ETH_SS_STATS: - return ARRAY_SIZE(stats_strings); - default: - return -EOPNOTSUPP; - } -} - -static void get_strings(struct net_device *dev, u32 stringset, u8 *data) -{ - if (stringset == ETH_SS_STATS) - memcpy(data, stats_strings, sizeof(stats_strings)); -} - -static void get_stats(struct net_device *dev, struct ethtool_stats *stats, - u64 *data) -{ - struct adapter *adapter = dev->ml_priv; - struct cmac *mac = adapter->port[dev->if_port].mac; - const struct cmac_statistics *s; - const struct sge_intr_counts *t; - struct sge_port_stats ss; - - s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL); - t = t1_sge_get_intr_counts(adapter->sge); - t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss); - - *data++ = s->TxOctetsOK; - *data++ = s->TxOctetsBad; - *data++ = s->TxUnicastFramesOK; - *data++ = s->TxMulticastFramesOK; - *data++ = s->TxBroadcastFramesOK; - *data++ = s->TxPauseFrames; - *data++ = s->TxFramesWithDeferredXmissions; - *data++ = s->TxLateCollisions; - *data++ = s->TxTotalCollisions; - *data++ = s->TxFramesAbortedDueToXSCollisions; - *data++ = s->TxUnderrun; - *data++ = s->TxLengthErrors; - *data++ = s->TxInternalMACXmitError; - *data++ = s->TxFramesWithExcessiveDeferral; - *data++ = s->TxFCSErrors; - *data++ = s->TxJumboFramesOK; - *data++ = s->TxJumboOctetsOK; - - *data++ = s->RxOctetsOK; - *data++ = s->RxOctetsBad; - *data++ = s->RxUnicastFramesOK; - *data++ = s->RxMulticastFramesOK; - *data++ = s->RxBroadcastFramesOK; - *data++ = s->RxPauseFrames; - *data++ = s->RxFCSErrors; - *data++ = s->RxAlignErrors; - *data++ = s->RxSymbolErrors; - *data++ = s->RxDataErrors; - *data++ = s->RxSequenceErrors; - *data++ = s->RxRuntErrors; - *data++ = s->RxJabberErrors; - *data++ = s->RxInternalMACRcvError; - *data++ = s->RxInRangeLengthErrors; - *data++ = s->RxOutOfRangeLengthField; - *data++ = s->RxFrameTooLongErrors; - *data++ = s->RxJumboFramesOK; - *data++ = s->RxJumboOctetsOK; - - *data++ = ss.rx_cso_good; - *data++ = ss.tx_cso; - *data++ = ss.tx_tso; - *data++ = ss.vlan_xtract; - *data++ = ss.vlan_insert; - *data++ = ss.tx_need_hdrroom; - - *data++ = t->rx_drops; - *data++ = t->pure_rsps; - *data++ = t->unhandled_irqs; - *data++ = t->respQ_empty; - *data++ = t->respQ_overflow; - *data++ = t->freelistQ_empty; - *data++ = t->pkt_too_big; - *data++ = t->pkt_mismatch; - *data++ = t->cmdQ_full[0]; - *data++ = t->cmdQ_full[1]; - - if (adapter->espi) { - const struct espi_intr_counts *e; - - e = t1_espi_get_intr_counts(adapter->espi); - *data++ = e->DIP2_parity_err; - *data++ = e->DIP4_err; - *data++ = e->rx_drops; - *data++ = e->tx_drops; - *data++ = e->rx_ovflw; - *data++ = e->parity_err; - } -} - -static inline void reg_block_dump(struct adapter *ap, void *buf, - unsigned int start, unsigned int end) -{ - u32 *p = buf + start; - - for ( ; start <= end; start += sizeof(u32)) - *p++ = readl(ap->regs + start); -} - -static void get_regs(struct net_device *dev, struct ethtool_regs *regs, - void *buf) -{ - struct adapter *ap = dev->ml_priv; - - /* - * Version scheme: bits 0..9: chip version, bits 10..15: chip revision - */ - regs->version = 2; - - memset(buf, 0, T2_REGMAP_SIZE); - reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER); - reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE); - reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR); - reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT); - reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE); - reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE); - reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT); - reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL); - reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE); - reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD); -} - -static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct adapter *adapter = dev->ml_priv; - struct port_info *p = &adapter->port[dev->if_port]; - - cmd->supported = p->link_config.supported; - cmd->advertising = p->link_config.advertising; - - if (netif_carrier_ok(dev)) { - ethtool_cmd_speed_set(cmd, p->link_config.speed); - cmd->duplex = p->link_config.duplex; - } else { - ethtool_cmd_speed_set(cmd, -1); - cmd->duplex = -1; - } - - cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; - cmd->phy_address = p->phy->mdio.prtad; - cmd->transceiver = XCVR_EXTERNAL; - cmd->autoneg = p->link_config.autoneg; - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 0; - return 0; -} - -static int speed_duplex_to_caps(int speed, int duplex) -{ - int cap = 0; - - switch (speed) { - case SPEED_10: - if (duplex == DUPLEX_FULL) - cap = SUPPORTED_10baseT_Full; - else - cap = SUPPORTED_10baseT_Half; - break; - case SPEED_100: - if (duplex == DUPLEX_FULL) - cap = SUPPORTED_100baseT_Full; - else - cap = SUPPORTED_100baseT_Half; - break; - case SPEED_1000: - if (duplex == DUPLEX_FULL) - cap = SUPPORTED_1000baseT_Full; - else - cap = SUPPORTED_1000baseT_Half; - break; - case SPEED_10000: - if (duplex == DUPLEX_FULL) - cap = SUPPORTED_10000baseT_Full; - } - return cap; -} - -#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \ - ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \ - ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \ - ADVERTISED_10000baseT_Full) - -static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct adapter *adapter = dev->ml_priv; - struct port_info *p = &adapter->port[dev->if_port]; - struct link_config *lc = &p->link_config; - - if (!(lc->supported & SUPPORTED_Autoneg)) - return -EOPNOTSUPP; /* can't change speed/duplex */ - - if (cmd->autoneg == AUTONEG_DISABLE) { - u32 speed = ethtool_cmd_speed(cmd); - int cap = speed_duplex_to_caps(speed, cmd->duplex); - - if (!(lc->supported & cap) || (speed == SPEED_1000)) - return -EINVAL; - lc->requested_speed = speed; - lc->requested_duplex = cmd->duplex; - lc->advertising = 0; - } else { - cmd->advertising &= ADVERTISED_MASK; - if (cmd->advertising & (cmd->advertising - 1)) - cmd->advertising = lc->supported; - cmd->advertising &= lc->supported; - if (!cmd->advertising) - return -EINVAL; - lc->requested_speed = SPEED_INVALID; - lc->requested_duplex = DUPLEX_INVALID; - lc->advertising = cmd->advertising | ADVERTISED_Autoneg; - } - lc->autoneg = cmd->autoneg; - if (netif_running(dev)) - t1_link_start(p->phy, p->mac, lc); - return 0; -} - -static void get_pauseparam(struct net_device *dev, - struct ethtool_pauseparam *epause) -{ - struct adapter *adapter = dev->ml_priv; - struct port_info *p = &adapter->port[dev->if_port]; - - epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0; - epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0; - epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0; -} - -static int set_pauseparam(struct net_device *dev, - struct ethtool_pauseparam *epause) -{ - struct adapter *adapter = dev->ml_priv; - struct port_info *p = &adapter->port[dev->if_port]; - struct link_config *lc = &p->link_config; - - if (epause->autoneg == AUTONEG_DISABLE) - lc->requested_fc = 0; - else if (lc->supported & SUPPORTED_Autoneg) - lc->requested_fc = PAUSE_AUTONEG; - else - return -EINVAL; - - if (epause->rx_pause) - lc->requested_fc |= PAUSE_RX; - if (epause->tx_pause) - lc->requested_fc |= PAUSE_TX; - if (lc->autoneg == AUTONEG_ENABLE) { - if (netif_running(dev)) - t1_link_start(p->phy, p->mac, lc); - } else { - lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); - if (netif_running(dev)) - p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1, - lc->fc); - } - return 0; -} - -static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) -{ - struct adapter *adapter = dev->ml_priv; - int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; - - e->rx_max_pending = MAX_RX_BUFFERS; - e->rx_mini_max_pending = 0; - e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS; - e->tx_max_pending = MAX_CMDQ_ENTRIES; - - e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl]; - e->rx_mini_pending = 0; - e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl]; - e->tx_pending = adapter->params.sge.cmdQ_size[0]; -} - -static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) -{ - struct adapter *adapter = dev->ml_priv; - int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; - - if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending || - e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS || - e->tx_pending > MAX_CMDQ_ENTRIES || - e->rx_pending < MIN_FL_ENTRIES || - e->rx_jumbo_pending < MIN_FL_ENTRIES || - e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1)) - return -EINVAL; - - if (adapter->flags & FULL_INIT_DONE) - return -EBUSY; - - adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending; - adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending; - adapter->params.sge.cmdQ_size[0] = e->tx_pending; - adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ? - MAX_CMDQ1_ENTRIES : e->tx_pending; - return 0; -} - -static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) -{ - struct adapter *adapter = dev->ml_priv; - - adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs; - adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce; - adapter->params.sge.sample_interval_usecs = c->rate_sample_interval; - t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge); - return 0; -} - -static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) -{ - struct adapter *adapter = dev->ml_priv; - - c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs; - c->rate_sample_interval = adapter->params.sge.sample_interval_usecs; - c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable; - return 0; -} - -static int get_eeprom_len(struct net_device *dev) -{ - struct adapter *adapter = dev->ml_priv; - - return t1_is_asic(adapter) ? EEPROM_SIZE : 0; -} - -#define EEPROM_MAGIC(ap) \ - (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16)) - -static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, - u8 *data) -{ - int i; - u8 buf[EEPROM_SIZE] __attribute__((aligned(4))); - struct adapter *adapter = dev->ml_priv; - - e->magic = EEPROM_MAGIC(adapter); - for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32)) - t1_seeprom_read(adapter, i, (__le32 *)&buf[i]); - memcpy(data, buf + e->offset, e->len); - return 0; -} - -static const struct ethtool_ops t1_ethtool_ops = { - .get_settings = get_settings, - .set_settings = set_settings, - .get_drvinfo = get_drvinfo, - .get_msglevel = get_msglevel, - .set_msglevel = set_msglevel, - .get_ringparam = get_sge_param, - .set_ringparam = set_sge_param, - .get_coalesce = get_coalesce, - .set_coalesce = set_coalesce, - .get_eeprom_len = get_eeprom_len, - .get_eeprom = get_eeprom, - .get_pauseparam = get_pauseparam, - .set_pauseparam = set_pauseparam, - .get_link = ethtool_op_get_link, - .get_strings = get_strings, - .get_sset_count = get_sset_count, - .get_ethtool_stats = get_stats, - .get_regs_len = get_regs_len, - .get_regs = get_regs, -}; - -static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd) -{ - struct adapter *adapter = dev->ml_priv; - struct mdio_if_info *mdio = &adapter->port[dev->if_port].phy->mdio; - - return mdio_mii_ioctl(mdio, if_mii(req), cmd); -} - -static int t1_change_mtu(struct net_device *dev, int new_mtu) -{ - int ret; - struct adapter *adapter = dev->ml_priv; - struct cmac *mac = adapter->port[dev->if_port].mac; - - if (!mac->ops->set_mtu) - return -EOPNOTSUPP; - if (new_mtu < 68) - return -EINVAL; - if ((ret = mac->ops->set_mtu(mac, new_mtu))) - return ret; - dev->mtu = new_mtu; - return 0; -} - -static int t1_set_mac_addr(struct net_device *dev, void *p) -{ - struct adapter *adapter = dev->ml_priv; - struct cmac *mac = adapter->port[dev->if_port].mac; - struct sockaddr *addr = p; - - if (!mac->ops->macaddress_set) - return -EOPNOTSUPP; - - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - mac->ops->macaddress_set(mac, dev->dev_addr); - return 0; -} - -static u32 t1_fix_features(struct net_device *dev, u32 features) -{ - /* - * Since there is no support for separate rx/tx vlan accel - * enable/disable make sure tx flag is always in same state as rx. - */ - if (features & NETIF_F_HW_VLAN_RX) - features |= NETIF_F_HW_VLAN_TX; - else - features &= ~NETIF_F_HW_VLAN_TX; - - return features; -} - -static int t1_set_features(struct net_device *dev, u32 features) -{ - u32 changed = dev->features ^ features; - struct adapter *adapter = dev->ml_priv; - - if (changed & NETIF_F_HW_VLAN_RX) - t1_vlan_mode(adapter, features); - - return 0; -} -#ifdef CONFIG_NET_POLL_CONTROLLER -static void t1_netpoll(struct net_device *dev) -{ - unsigned long flags; - struct adapter *adapter = dev->ml_priv; - - local_irq_save(flags); - t1_interrupt(adapter->pdev->irq, adapter); - local_irq_restore(flags); -} -#endif - -/* - * Periodic accumulation of MAC statistics. This is used only if the MAC - * does not have any other way to prevent stats counter overflow. - */ -static void mac_stats_task(struct work_struct *work) -{ - int i; - struct adapter *adapter = - container_of(work, struct adapter, stats_update_task.work); - - for_each_port(adapter, i) { - struct port_info *p = &adapter->port[i]; - - if (netif_running(p->dev)) - p->mac->ops->statistics_update(p->mac, - MAC_STATS_UPDATE_FAST); - } - - /* Schedule the next statistics update if any port is active. */ - spin_lock(&adapter->work_lock); - if (adapter->open_device_map & PORT_MASK) - schedule_mac_stats_update(adapter, - adapter->params.stats_update_period); - spin_unlock(&adapter->work_lock); -} - -/* - * Processes elmer0 external interrupts in process context. - */ -static void ext_intr_task(struct work_struct *work) -{ - struct adapter *adapter = - container_of(work, struct adapter, ext_intr_handler_task); - - t1_elmer0_ext_intr_handler(adapter); - - /* Now reenable external interrupts */ - spin_lock_irq(&adapter->async_lock); - adapter->slow_intr_mask |= F_PL_INTR_EXT; - writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE); - writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA, - adapter->regs + A_PL_ENABLE); - spin_unlock_irq(&adapter->async_lock); -} - -/* - * Interrupt-context handler for elmer0 external interrupts. - */ -void t1_elmer0_ext_intr(struct adapter *adapter) -{ - /* - * Schedule a task to handle external interrupts as we require - * a process context. We disable EXT interrupts in the interim - * and let the task reenable them when it's done. - */ - adapter->slow_intr_mask &= ~F_PL_INTR_EXT; - writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA, - adapter->regs + A_PL_ENABLE); - schedule_work(&adapter->ext_intr_handler_task); -} - -void t1_fatal_err(struct adapter *adapter) -{ - if (adapter->flags & FULL_INIT_DONE) { - t1_sge_stop(adapter->sge); - t1_interrupts_disable(adapter); - } - pr_alert("%s: encountered fatal error, operation suspended\n", - adapter->name); -} - -static const struct net_device_ops cxgb_netdev_ops = { - .ndo_open = cxgb_open, - .ndo_stop = cxgb_close, - .ndo_start_xmit = t1_start_xmit, - .ndo_get_stats = t1_get_stats, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_multicast_list = t1_set_rxmode, - .ndo_do_ioctl = t1_ioctl, - .ndo_change_mtu = t1_change_mtu, - .ndo_set_mac_address = t1_set_mac_addr, - .ndo_fix_features = t1_fix_features, - .ndo_set_features = t1_set_features, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = t1_netpoll, -#endif -}; - -static int __devinit init_one(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - static int version_printed; - - int i, err, pci_using_dac = 0; - unsigned long mmio_start, mmio_len; - const struct board_info *bi; - struct adapter *adapter = NULL; - struct port_info *pi; - - if (!version_printed) { - printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION, - DRV_VERSION); - ++version_printed; - } - - err = pci_enable_device(pdev); - if (err) - return err; - - if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { - pr_err("%s: cannot find PCI device memory base address\n", - pci_name(pdev)); - err = -ENODEV; - goto out_disable_pdev; - } - - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { - pci_using_dac = 1; - - if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { - pr_err("%s: unable to obtain 64-bit DMA for " - "consistent allocations\n", pci_name(pdev)); - err = -ENODEV; - goto out_disable_pdev; - } - - } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) { - pr_err("%s: no usable DMA configuration\n", pci_name(pdev)); - goto out_disable_pdev; - } - - err = pci_request_regions(pdev, DRV_NAME); - if (err) { - pr_err("%s: cannot obtain PCI resources\n", pci_name(pdev)); - goto out_disable_pdev; - } - - pci_set_master(pdev); - - mmio_start = pci_resource_start(pdev, 0); - mmio_len = pci_resource_len(pdev, 0); - bi = t1_get_board_info(ent->driver_data); - - for (i = 0; i < bi->port_number; ++i) { - struct net_device *netdev; - - netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter)); - if (!netdev) { - err = -ENOMEM; - goto out_free_dev; - } - - SET_NETDEV_DEV(netdev, &pdev->dev); - - if (!adapter) { - adapter = netdev_priv(netdev); - adapter->pdev = pdev; - adapter->port[0].dev = netdev; /* so we don't leak it */ - - adapter->regs = ioremap(mmio_start, mmio_len); - if (!adapter->regs) { - pr_err("%s: cannot map device registers\n", - pci_name(pdev)); - err = -ENOMEM; - goto out_free_dev; - } - - if (t1_get_board_rev(adapter, bi, &adapter->params)) { - err = -ENODEV; /* Can't handle this chip rev */ - goto out_free_dev; - } - - adapter->name = pci_name(pdev); - adapter->msg_enable = dflt_msg_enable; - adapter->mmio_len = mmio_len; - - spin_lock_init(&adapter->tpi_lock); - spin_lock_init(&adapter->work_lock); - spin_lock_init(&adapter->async_lock); - spin_lock_init(&adapter->mac_lock); - - INIT_WORK(&adapter->ext_intr_handler_task, - ext_intr_task); - INIT_DELAYED_WORK(&adapter->stats_update_task, - mac_stats_task); - - pci_set_drvdata(pdev, netdev); - } - - pi = &adapter->port[i]; - pi->dev = netdev; - netif_carrier_off(netdev); - netdev->irq = pdev->irq; - netdev->if_port = i; - netdev->mem_start = mmio_start; - netdev->mem_end = mmio_start + mmio_len - 1; - netdev->ml_priv = adapter; - netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | - NETIF_F_RXCSUM; - netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | - NETIF_F_RXCSUM | NETIF_F_LLTX; - - if (pci_using_dac) - netdev->features |= NETIF_F_HIGHDMA; - if (vlan_tso_capable(adapter)) { - netdev->features |= - NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; - netdev->hw_features |= NETIF_F_HW_VLAN_RX; - - /* T204: disable TSO */ - if (!(is_T2(adapter)) || bi->port_number != 4) { - netdev->hw_features |= NETIF_F_TSO; - netdev->features |= NETIF_F_TSO; - } - } - - netdev->netdev_ops = &cxgb_netdev_ops; - netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ? - sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt); - - netif_napi_add(netdev, &adapter->napi, t1_poll, 64); - - SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops); - } - - if (t1_init_sw_modules(adapter, bi) < 0) { - err = -ENODEV; - goto out_free_dev; - } - - /* - * The card is now ready to go. If any errors occur during device - * registration we do not fail the whole card but rather proceed only - * with the ports we manage to register successfully. However we must - * register at least one net device. - */ - for (i = 0; i < bi->port_number; ++i) { - err = register_netdev(adapter->port[i].dev); - if (err) - pr_warning("%s: cannot register net device %s, skipping\n", - pci_name(pdev), adapter->port[i].dev->name); - else { - /* - * Change the name we use for messages to the name of - * the first successfully registered interface. - */ - if (!adapter->registered_device_map) - adapter->name = adapter->port[i].dev->name; - - __set_bit(i, &adapter->registered_device_map); - } - } - if (!adapter->registered_device_map) { - pr_err("%s: could not register any net devices\n", - pci_name(pdev)); - goto out_release_adapter_res; - } - - printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name, - bi->desc, adapter->params.chip_revision, - adapter->params.pci.is_pcix ? "PCIX" : "PCI", - adapter->params.pci.speed, adapter->params.pci.width); - - /* - * Set the T1B ASIC and memory clocks. - */ - if (t1powersave) - adapter->t1powersave = LCLOCK; /* HW default is powersave mode. */ - else - adapter->t1powersave = HCLOCK; - if (t1_is_T1B(adapter)) - t1_clock(adapter, t1powersave); - - return 0; - -out_release_adapter_res: - t1_free_sw_modules(adapter); -out_free_dev: - if (adapter) { - if (adapter->regs) - iounmap(adapter->regs); - for (i = bi->port_number - 1; i >= 0; --i) - if (adapter->port[i].dev) - free_netdev(adapter->port[i].dev); - } - pci_release_regions(pdev); -out_disable_pdev: - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - return err; -} - -static void bit_bang(struct adapter *adapter, int bitdata, int nbits) -{ - int data; - int i; - u32 val; - - enum { - S_CLOCK = 1 << 3, - S_DATA = 1 << 4 - }; - - for (i = (nbits - 1); i > -1; i--) { - - udelay(50); - - data = ((bitdata >> i) & 0x1); - __t1_tpi_read(adapter, A_ELMER0_GPO, &val); - - if (data) - val |= S_DATA; - else - val &= ~S_DATA; - - udelay(50); - - /* Set SCLOCK low */ - val &= ~S_CLOCK; - __t1_tpi_write(adapter, A_ELMER0_GPO, val); - - udelay(50); - - /* Write SCLOCK high */ - val |= S_CLOCK; - __t1_tpi_write(adapter, A_ELMER0_GPO, val); - - } -} - -static int t1_clock(struct adapter *adapter, int mode) -{ - u32 val; - int M_CORE_VAL; - int M_MEM_VAL; - - enum { - M_CORE_BITS = 9, - T_CORE_VAL = 0, - T_CORE_BITS = 2, - N_CORE_VAL = 0, - N_CORE_BITS = 2, - M_MEM_BITS = 9, - T_MEM_VAL = 0, - T_MEM_BITS = 2, - N_MEM_VAL = 0, - N_MEM_BITS = 2, - NP_LOAD = 1 << 17, - S_LOAD_MEM = 1 << 5, - S_LOAD_CORE = 1 << 6, - S_CLOCK = 1 << 3 - }; - - if (!t1_is_T1B(adapter)) - return -ENODEV; /* Can't re-clock this chip. */ - - if (mode & 2) - return 0; /* show current mode. */ - - if ((adapter->t1powersave & 1) == (mode & 1)) - return -EALREADY; /* ASIC already running in mode. */ - - if ((mode & 1) == HCLOCK) { - M_CORE_VAL = 0x14; - M_MEM_VAL = 0x18; - adapter->t1powersave = HCLOCK; /* overclock */ - } else { - M_CORE_VAL = 0xe; - M_MEM_VAL = 0x10; - adapter->t1powersave = LCLOCK; /* underclock */ - } - - /* Don't interrupt this serial stream! */ - spin_lock(&adapter->tpi_lock); - - /* Initialize for ASIC core */ - __t1_tpi_read(adapter, A_ELMER0_GPO, &val); - val |= NP_LOAD; - udelay(50); - __t1_tpi_write(adapter, A_ELMER0_GPO, val); - udelay(50); - __t1_tpi_read(adapter, A_ELMER0_GPO, &val); - val &= ~S_LOAD_CORE; - val &= ~S_CLOCK; - __t1_tpi_write(adapter, A_ELMER0_GPO, val); - udelay(50); - - /* Serial program the ASIC clock synthesizer */ - bit_bang(adapter, T_CORE_VAL, T_CORE_BITS); - bit_bang(adapter, N_CORE_VAL, N_CORE_BITS); - bit_bang(adapter, M_CORE_VAL, M_CORE_BITS); - udelay(50); - - /* Finish ASIC core */ - __t1_tpi_read(adapter, A_ELMER0_GPO, &val); - val |= S_LOAD_CORE; - udelay(50); - __t1_tpi_write(adapter, A_ELMER0_GPO, val); - udelay(50); - __t1_tpi_read(adapter, A_ELMER0_GPO, &val); - val &= ~S_LOAD_CORE; - udelay(50); - __t1_tpi_write(adapter, A_ELMER0_GPO, val); - udelay(50); - - /* Initialize for memory */ - __t1_tpi_read(adapter, A_ELMER0_GPO, &val); - val |= NP_LOAD; - udelay(50); - __t1_tpi_write(adapter, A_ELMER0_GPO, val); - udelay(50); - __t1_tpi_read(adapter, A_ELMER0_GPO, &val); - val &= ~S_LOAD_MEM; - val &= ~S_CLOCK; - udelay(50); - __t1_tpi_write(adapter, A_ELMER0_GPO, val); - udelay(50); - - /* Serial program the memory clock synthesizer */ - bit_bang(adapter, T_MEM_VAL, T_MEM_BITS); - bit_bang(adapter, N_MEM_VAL, N_MEM_BITS); - bit_bang(adapter, M_MEM_VAL, M_MEM_BITS); - udelay(50); - - /* Finish memory */ - __t1_tpi_read(adapter, A_ELMER0_GPO, &val); - val |= S_LOAD_MEM; - udelay(50); - __t1_tpi_write(adapter, A_ELMER0_GPO, val); - udelay(50); - __t1_tpi_read(adapter, A_ELMER0_GPO, &val); - val &= ~S_LOAD_MEM; - udelay(50); - __t1_tpi_write(adapter, A_ELMER0_GPO, val); - - spin_unlock(&adapter->tpi_lock); - - return 0; -} - -static inline void t1_sw_reset(struct pci_dev *pdev) -{ - pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3); - pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0); -} - -static void __devexit remove_one(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct adapter *adapter = dev->ml_priv; - int i; - - for_each_port(adapter, i) { - if (test_bit(i, &adapter->registered_device_map)) - unregister_netdev(adapter->port[i].dev); - } - - t1_free_sw_modules(adapter); - iounmap(adapter->regs); - - while (--i >= 0) { - if (adapter->port[i].dev) - free_netdev(adapter->port[i].dev); - } - - pci_release_regions(pdev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - t1_sw_reset(pdev); -} - -static struct pci_driver driver = { - .name = DRV_NAME, - .id_table = t1_pci_tbl, - .probe = init_one, - .remove = __devexit_p(remove_one), -}; - -static int __init t1_init_module(void) -{ - return pci_register_driver(&driver); -} - -static void __exit t1_cleanup_module(void) -{ - pci_unregister_driver(&driver); -} - -module_init(t1_init_module); -module_exit(t1_cleanup_module); diff --git a/drivers/net/chelsio/elmer0.h b/drivers/net/chelsio/elmer0.h deleted file mode 100644 index eef655c..0000000 --- a/drivers/net/chelsio/elmer0.h +++ /dev/null @@ -1,158 +0,0 @@ -/***************************************************************************** - * * - * File: elmer0.h * - * $Revision: 1.6 $ * - * $Date: 2005/06/21 22:49:43 $ * - * Description: * - * part of the Chelsio 10Gb Ethernet Driver. * - * * - * This program is free software; you can redistribute it and/or modify * - * it under the terms of the GNU General Public License, version 2, as * - * published by the Free Software Foundation. * - * * - * You should have received a copy of the GNU General Public License along * - * with this program; if not, write to the Free Software Foundation, Inc., * - * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * - * * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * - * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * - * * - * http://www.chelsio.com * - * * - * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * - * All rights reserved. * - * * - * Maintainers: maintainers@chelsio.com * - * * - * Authors: Dimitrios Michailidis * - * Tina Yang * - * Felix Marti * - * Scott Bardone * - * Kurt Ottaway * - * Frank DiMambro * - * * - * History: * - * * - ****************************************************************************/ - -#ifndef _CXGB_ELMER0_H_ -#define _CXGB_ELMER0_H_ - -/* ELMER0 flavors */ -enum { - ELMER0_XC2S300E_6FT256_C, - ELMER0_XC2S100E_6TQ144_C -}; - -/* ELMER0 registers */ -#define A_ELMER0_VERSION 0x100000 -#define A_ELMER0_PHY_CFG 0x100004 -#define A_ELMER0_INT_ENABLE 0x100008 -#define A_ELMER0_INT_CAUSE 0x10000c -#define A_ELMER0_GPI_CFG 0x100010 -#define A_ELMER0_GPI_STAT 0x100014 -#define A_ELMER0_GPO 0x100018 -#define A_ELMER0_PORT0_MI1_CFG 0x400000 - -#define S_MI1_MDI_ENABLE 0 -#define V_MI1_MDI_ENABLE(x) ((x) << S_MI1_MDI_ENABLE) -#define F_MI1_MDI_ENABLE V_MI1_MDI_ENABLE(1U) - -#define S_MI1_MDI_INVERT 1 -#define V_MI1_MDI_INVERT(x) ((x) << S_MI1_MDI_INVERT) -#define F_MI1_MDI_INVERT V_MI1_MDI_INVERT(1U) - -#define S_MI1_PREAMBLE_ENABLE 2 -#define V_MI1_PREAMBLE_ENABLE(x) ((x) << S_MI1_PREAMBLE_ENABLE) -#define F_MI1_PREAMBLE_ENABLE V_MI1_PREAMBLE_ENABLE(1U) - -#define S_MI1_SOF 3 -#define M_MI1_SOF 0x3 -#define V_MI1_SOF(x) ((x) << S_MI1_SOF) -#define G_MI1_SOF(x) (((x) >> S_MI1_SOF) & M_MI1_SOF) - -#define S_MI1_CLK_DIV 5 -#define M_MI1_CLK_DIV 0xff -#define V_MI1_CLK_DIV(x) ((x) << S_MI1_CLK_DIV) -#define G_MI1_CLK_DIV(x) (((x) >> S_MI1_CLK_DIV) & M_MI1_CLK_DIV) - -#define A_ELMER0_PORT0_MI1_ADDR 0x400004 - -#define S_MI1_REG_ADDR 0 -#define M_MI1_REG_ADDR 0x1f -#define V_MI1_REG_ADDR(x) ((x) << S_MI1_REG_ADDR) -#define G_MI1_REG_ADDR(x) (((x) >> S_MI1_REG_ADDR) & M_MI1_REG_ADDR) - -#define S_MI1_PHY_ADDR 5 -#define M_MI1_PHY_ADDR 0x1f -#define V_MI1_PHY_ADDR(x) ((x) << S_MI1_PHY_ADDR) -#define G_MI1_PHY_ADDR(x) (((x) >> S_MI1_PHY_ADDR) & M_MI1_PHY_ADDR) - -#define A_ELMER0_PORT0_MI1_DATA 0x400008 - -#define S_MI1_DATA 0 -#define M_MI1_DATA 0xffff -#define V_MI1_DATA(x) ((x) << S_MI1_DATA) -#define G_MI1_DATA(x) (((x) >> S_MI1_DATA) & M_MI1_DATA) - -#define A_ELMER0_PORT0_MI1_OP 0x40000c - -#define S_MI1_OP 0 -#define M_MI1_OP 0x3 -#define V_MI1_OP(x) ((x) << S_MI1_OP) -#define G_MI1_OP(x) (((x) >> S_MI1_OP) & M_MI1_OP) - -#define S_MI1_ADDR_AUTOINC 2 -#define V_MI1_ADDR_AUTOINC(x) ((x) << S_MI1_ADDR_AUTOINC) -#define F_MI1_ADDR_AUTOINC V_MI1_ADDR_AUTOINC(1U) - -#define S_MI1_OP_BUSY 31 -#define V_MI1_OP_BUSY(x) ((x) << S_MI1_OP_BUSY) -#define F_MI1_OP_BUSY V_MI1_OP_BUSY(1U) - -#define A_ELMER0_PORT1_MI1_CFG 0x500000 -#define A_ELMER0_PORT1_MI1_ADDR 0x500004 -#define A_ELMER0_PORT1_MI1_DATA 0x500008 -#define A_ELMER0_PORT1_MI1_OP 0x50000c -#define A_ELMER0_PORT2_MI1_CFG 0x600000 -#define A_ELMER0_PORT2_MI1_ADDR 0x600004 -#define A_ELMER0_PORT2_MI1_DATA 0x600008 -#define A_ELMER0_PORT2_MI1_OP 0x60000c -#define A_ELMER0_PORT3_MI1_CFG 0x700000 -#define A_ELMER0_PORT3_MI1_ADDR 0x700004 -#define A_ELMER0_PORT3_MI1_DATA 0x700008 -#define A_ELMER0_PORT3_MI1_OP 0x70000c - -/* Simple bit definition for GPI and GP0 registers. */ -#define ELMER0_GP_BIT0 0x0001 -#define ELMER0_GP_BIT1 0x0002 -#define ELMER0_GP_BIT2 0x0004 -#define ELMER0_GP_BIT3 0x0008 -#define ELMER0_GP_BIT4 0x0010 -#define ELMER0_GP_BIT5 0x0020 -#define ELMER0_GP_BIT6 0x0040 -#define ELMER0_GP_BIT7 0x0080 -#define ELMER0_GP_BIT8 0x0100 -#define ELMER0_GP_BIT9 0x0200 -#define ELMER0_GP_BIT10 0x0400 -#define ELMER0_GP_BIT11 0x0800 -#define ELMER0_GP_BIT12 0x1000 -#define ELMER0_GP_BIT13 0x2000 -#define ELMER0_GP_BIT14 0x4000 -#define ELMER0_GP_BIT15 0x8000 -#define ELMER0_GP_BIT16 0x10000 -#define ELMER0_GP_BIT17 0x20000 -#define ELMER0_GP_BIT18 0x40000 -#define ELMER0_GP_BIT19 0x80000 - -#define MI1_OP_DIRECT_WRITE 1 -#define MI1_OP_DIRECT_READ 2 - -#define MI1_OP_INDIRECT_ADDRESS 0 -#define MI1_OP_INDIRECT_WRITE 1 -#define MI1_OP_INDIRECT_READ_INC 2 -#define MI1_OP_INDIRECT_READ 3 - -#endif /* _CXGB_ELMER0_H_ */ - diff --git a/drivers/net/chelsio/espi.c b/drivers/net/chelsio/espi.c deleted file mode 100644 index 639ff19..0000000 --- a/drivers/net/chelsio/espi.c +++ /dev/null @@ -1,373 +0,0 @@ -/***************************************************************************** - * * - * File: espi.c * - * $Revision: 1.14 $ * - * $Date: 2005/05/14 00:59:32 $ * - * Description: * - * Ethernet SPI functionality. * - * part of the Chelsio 10Gb Ethernet Driver. * - * * - * This program is free software; you can redistribute it and/or modify * - * it under the terms of the GNU General Public License, version 2, as * - * published by the Free Software Foundation. * - * * - * You should have received a copy of the GNU General Public License along * - * with this program; if not, write to the Free Software Foundation, Inc., * - * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * - * * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * - * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * - * * - * http://www.chelsio.com * - * * - * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * - * All rights reserved. * - * * - * Maintainers: maintainers@chelsio.com * - * * - * Authors: Dimitrios Michailidis * - * Tina Yang * - * Felix Marti * - * Scott Bardone * - * Kurt Ottaway * - * Frank DiMambro * - * * - * History: * - * * - ****************************************************************************/ - -#include "common.h" -#include "regs.h" -#include "espi.h" - -struct peespi { - adapter_t *adapter; - struct espi_intr_counts intr_cnt; - u32 misc_ctrl; - spinlock_t lock; -}; - -#define ESPI_INTR_MASK (F_DIP4ERR | F_RXDROP | F_TXDROP | F_RXOVERFLOW | \ - F_RAMPARITYERR | F_DIP2PARITYERR) -#define MON_MASK (V_MONITORED_PORT_NUM(3) | F_MONITORED_DIRECTION \ - | F_MONITORED_INTERFACE) - -#define TRICN_CNFG 14 -#define TRICN_CMD_READ 0x11 -#define TRICN_CMD_WRITE 0x21 -#define TRICN_CMD_ATTEMPTS 10 - -static int tricn_write(adapter_t *adapter, int bundle_addr, int module_addr, - int ch_addr, int reg_offset, u32 wr_data) -{ - int busy, attempts = TRICN_CMD_ATTEMPTS; - - writel(V_WRITE_DATA(wr_data) | - V_REGISTER_OFFSET(reg_offset) | - V_CHANNEL_ADDR(ch_addr) | V_MODULE_ADDR(module_addr) | - V_BUNDLE_ADDR(bundle_addr) | - V_SPI4_COMMAND(TRICN_CMD_WRITE), - adapter->regs + A_ESPI_CMD_ADDR); - writel(0, adapter->regs + A_ESPI_GOSTAT); - - do { - busy = readl(adapter->regs + A_ESPI_GOSTAT) & F_ESPI_CMD_BUSY; - } while (busy && --attempts); - - if (busy) - pr_err("%s: TRICN write timed out\n", adapter->name); - - return busy; -} - -static int tricn_init(adapter_t *adapter) -{ - int i, sme = 1; - - if (!(readl(adapter->regs + A_ESPI_RX_RESET) & F_RX_CLK_STATUS)) { - pr_err("%s: ESPI clock not ready\n", adapter->name); - return -1; - } - - writel(F_ESPI_RX_CORE_RST, adapter->regs + A_ESPI_RX_RESET); - - if (sme) { - tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81); - tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81); - tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81); - } - for (i = 1; i <= 8; i++) - tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1); - for (i = 1; i <= 2; i++) - tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1); - for (i = 1; i <= 3; i++) - tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1); - tricn_write(adapter, 0, 2, 4, TRICN_CNFG, 0xf1); - tricn_write(adapter, 0, 2, 5, TRICN_CNFG, 0xe1); - tricn_write(adapter, 0, 2, 6, TRICN_CNFG, 0xf1); - tricn_write(adapter, 0, 2, 7, TRICN_CNFG, 0x80); - tricn_write(adapter, 0, 2, 8, TRICN_CNFG, 0xf1); - - writel(F_ESPI_RX_CORE_RST | F_ESPI_RX_LNK_RST, - adapter->regs + A_ESPI_RX_RESET); - - return 0; -} - -void t1_espi_intr_enable(struct peespi *espi) -{ - u32 enable, pl_intr = readl(espi->adapter->regs + A_PL_ENABLE); - - /* - * Cannot enable ESPI interrupts on T1B because HW asserts the - * interrupt incorrectly, namely the driver gets ESPI interrupts - * but no data is actually dropped (can verify this reading the ESPI - * drop registers). Also, once the ESPI interrupt is asserted it - * cannot be cleared (HW bug). - */ - enable = t1_is_T1B(espi->adapter) ? 0 : ESPI_INTR_MASK; - writel(enable, espi->adapter->regs + A_ESPI_INTR_ENABLE); - writel(pl_intr | F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE); -} - -void t1_espi_intr_clear(struct peespi *espi) -{ - readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT); - writel(0xffffffff, espi->adapter->regs + A_ESPI_INTR_STATUS); - writel(F_PL_INTR_ESPI, espi->adapter->regs + A_PL_CAUSE); -} - -void t1_espi_intr_disable(struct peespi *espi) -{ - u32 pl_intr = readl(espi->adapter->regs + A_PL_ENABLE); - - writel(0, espi->adapter->regs + A_ESPI_INTR_ENABLE); - writel(pl_intr & ~F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE); -} - -int t1_espi_intr_handler(struct peespi *espi) -{ - u32 status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS); - - if (status & F_DIP4ERR) - espi->intr_cnt.DIP4_err++; - if (status & F_RXDROP) - espi->intr_cnt.rx_drops++; - if (status & F_TXDROP) - espi->intr_cnt.tx_drops++; - if (status & F_RXOVERFLOW) - espi->intr_cnt.rx_ovflw++; - if (status & F_RAMPARITYERR) - espi->intr_cnt.parity_err++; - if (status & F_DIP2PARITYERR) { - espi->intr_cnt.DIP2_parity_err++; - - /* - * Must read the error count to clear the interrupt - * that it causes. - */ - readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT); - } - - /* - * For T1B we need to write 1 to clear ESPI interrupts. For T2+ we - * write the status as is. - */ - if (status && t1_is_T1B(espi->adapter)) - status = 1; - writel(status, espi->adapter->regs + A_ESPI_INTR_STATUS); - return 0; -} - -const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi) -{ - return &espi->intr_cnt; -} - -static void espi_setup_for_pm3393(adapter_t *adapter) -{ - u32 wmark = t1_is_T1B(adapter) ? 0x4000 : 0x3200; - - writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0); - writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN1); - writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2); - writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN3); - writel(0x100, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK); - writel(wmark, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK); - writel(3, adapter->regs + A_ESPI_CALENDAR_LENGTH); - writel(0x08000008, adapter->regs + A_ESPI_TRAIN); - writel(V_RX_NPORTS(1) | V_TX_NPORTS(1), adapter->regs + A_PORT_CONFIG); -} - -static void espi_setup_for_vsc7321(adapter_t *adapter) -{ - writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0); - writel(0x1f401f4, adapter->regs + A_ESPI_SCH_TOKEN1); - writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2); - writel(0xa00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK); - writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK); - writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH); - writel(V_RX_NPORTS(4) | V_TX_NPORTS(4), adapter->regs + A_PORT_CONFIG); - - writel(0x08000008, adapter->regs + A_ESPI_TRAIN); -} - -/* - * Note that T1B requires at least 2 ports for IXF1010 due to a HW bug. - */ -static void espi_setup_for_ixf1010(adapter_t *adapter, int nports) -{ - writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH); - if (nports == 4) { - if (is_T2(adapter)) { - writel(0xf00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK); - writel(0x3c0, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK); - } else { - writel(0x7ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK); - writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK); - } - } else { - writel(0x1fff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK); - writel(0x7ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK); - } - writel(V_RX_NPORTS(nports) | V_TX_NPORTS(nports), adapter->regs + A_PORT_CONFIG); - -} - -int t1_espi_init(struct peespi *espi, int mac_type, int nports) -{ - u32 status_enable_extra = 0; - adapter_t *adapter = espi->adapter; - - /* Disable ESPI training. MACs that can handle it enable it below. */ - writel(0, adapter->regs + A_ESPI_TRAIN); - - if (is_T2(adapter)) { - writel(V_OUT_OF_SYNC_COUNT(4) | - V_DIP2_PARITY_ERR_THRES(3) | - V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL); - writel(nports == 4 ? 0x200040 : 0x1000080, - adapter->regs + A_ESPI_MAXBURST1_MAXBURST2); - } else - writel(0x800100, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2); - - if (mac_type == CHBT_MAC_PM3393) - espi_setup_for_pm3393(adapter); - else if (mac_type == CHBT_MAC_VSC7321) - espi_setup_for_vsc7321(adapter); - else if (mac_type == CHBT_MAC_IXF1010) { - status_enable_extra = F_INTEL1010MODE; - espi_setup_for_ixf1010(adapter, nports); - } else - return -1; - - writel(status_enable_extra | F_RXSTATUSENABLE, - adapter->regs + A_ESPI_FIFO_STATUS_ENABLE); - - if (is_T2(adapter)) { - tricn_init(adapter); - /* - * Always position the control at the 1st port egress IN - * (sop,eop) counter to reduce PIOs for T/N210 workaround. - */ - espi->misc_ctrl = readl(adapter->regs + A_ESPI_MISC_CONTROL); - espi->misc_ctrl &= ~MON_MASK; - espi->misc_ctrl |= F_MONITORED_DIRECTION; - if (adapter->params.nports == 1) - espi->misc_ctrl |= F_MONITORED_INTERFACE; - writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); - spin_lock_init(&espi->lock); - } - - return 0; -} - -void t1_espi_destroy(struct peespi *espi) -{ - kfree(espi); -} - -struct peespi *t1_espi_create(adapter_t *adapter) -{ - struct peespi *espi = kzalloc(sizeof(*espi), GFP_KERNEL); - - if (espi) - espi->adapter = adapter; - return espi; -} - -#if 0 -void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val) -{ - struct peespi *espi = adapter->espi; - - if (!is_T2(adapter)) - return; - spin_lock(&espi->lock); - espi->misc_ctrl = (val & ~MON_MASK) | - (espi->misc_ctrl & MON_MASK); - writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); - spin_unlock(&espi->lock); -} -#endif /* 0 */ - -u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait) -{ - struct peespi *espi = adapter->espi; - u32 sel; - - if (!is_T2(adapter)) - return 0; - - sel = V_MONITORED_PORT_NUM((addr & 0x3c) >> 2); - if (!wait) { - if (!spin_trylock(&espi->lock)) - return 0; - } else - spin_lock(&espi->lock); - - if ((sel != (espi->misc_ctrl & MON_MASK))) { - writel(((espi->misc_ctrl & ~MON_MASK) | sel), - adapter->regs + A_ESPI_MISC_CONTROL); - sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3); - writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); - } else - sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3); - spin_unlock(&espi->lock); - return sel; -} - -/* - * This function is for T204 only. - * compare with t1_espi_get_mon(), it reads espiInTxSop[0 ~ 3] in - * one shot, since there is no per port counter on the out side. - */ -int t1_espi_get_mon_t204(adapter_t *adapter, u32 *valp, u8 wait) -{ - struct peespi *espi = adapter->espi; - u8 i, nport = (u8)adapter->params.nports; - - if (!wait) { - if (!spin_trylock(&espi->lock)) - return -1; - } else - spin_lock(&espi->lock); - - if ((espi->misc_ctrl & MON_MASK) != F_MONITORED_DIRECTION) { - espi->misc_ctrl = (espi->misc_ctrl & ~MON_MASK) | - F_MONITORED_DIRECTION; - writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); - } - for (i = 0 ; i < nport; i++, valp++) { - if (i) { - writel(espi->misc_ctrl | V_MONITORED_PORT_NUM(i), - adapter->regs + A_ESPI_MISC_CONTROL); - } - *valp = readl(adapter->regs + A_ESPI_SCH_TOKEN3); - } - - writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); - spin_unlock(&espi->lock); - return 0; -} diff --git a/drivers/net/chelsio/espi.h b/drivers/net/chelsio/espi.h deleted file mode 100644 index 5694aad..0000000 --- a/drivers/net/chelsio/espi.h +++ /dev/null @@ -1,68 +0,0 @@ -/***************************************************************************** - * * - * File: espi.h * - * $Revision: 1.7 $ * - * $Date: 2005/06/21 18:29:47 $ * - * Description: * - * part of the Chelsio 10Gb Ethernet Driver. * - * * - * This program is free software; you can redistribute it and/or modify * - * it under the terms of the GNU General Public License, version 2, as * - * published by the Free Software Foundation. * - * * - * You should have received a copy of the GNU General Public License along * - * with this program; if not, write to the Free Software Foundation, Inc., * - * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * - * * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * - * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * - * * - * http://www.chelsio.com * - * * - * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * - * All rights reserved. * - * * - * Maintainers: maintainers@chelsio.com * - * * - * Authors: Dimitrios Michailidis * - * Tina Yang * - * Felix Marti * - * Scott Bardone * - * Kurt Ottaway * - * Frank DiMambro * - * * - * History: * - * * - ****************************************************************************/ - -#ifndef _CXGB_ESPI_H_ -#define _CXGB_ESPI_H_ - -#include "common.h" - -struct espi_intr_counts { - unsigned int DIP4_err; - unsigned int rx_drops; - unsigned int tx_drops; - unsigned int rx_ovflw; - unsigned int parity_err; - unsigned int DIP2_parity_err; -}; - -struct peespi; - -struct peespi *t1_espi_create(adapter_t *adapter); -void t1_espi_destroy(struct peespi *espi); -int t1_espi_init(struct peespi *espi, int mac_type, int nports); - -void t1_espi_intr_enable(struct peespi *); -void t1_espi_intr_clear(struct peespi *); -void t1_espi_intr_disable(struct peespi *); -int t1_espi_intr_handler(struct peespi *); -const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi); - -u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait); -int t1_espi_get_mon_t204(adapter_t *, u32 *, u8); - -#endif /* _CXGB_ESPI_H_ */ diff --git a/drivers/net/chelsio/fpga_defs.h b/drivers/net/chelsio/fpga_defs.h deleted file mode 100644 index ccdb2bc..0000000 --- a/drivers/net/chelsio/fpga_defs.h +++ /dev/null @@ -1,232 +0,0 @@ -/* $Date: 2005/03/07 23:59:05 $ $RCSfile: fpga_defs.h,v $ $Revision: 1.4 $ */ - -/* - * FPGA specific definitions - */ - -#ifndef __CHELSIO_FPGA_DEFS_H__ -#define __CHELSIO_FPGA_DEFS_H__ - -#define FPGA_PCIX_ADDR_VERSION 0xA08 -#define FPGA_PCIX_ADDR_STAT 0xA0C - -/* FPGA master interrupt Cause/Enable bits */ -#define FPGA_PCIX_INTERRUPT_SGE_ERROR 0x1 -#define FPGA_PCIX_INTERRUPT_SGE_DATA 0x2 -#define FPGA_PCIX_INTERRUPT_TP 0x4 -#define FPGA_PCIX_INTERRUPT_MC3 0x8 -#define FPGA_PCIX_INTERRUPT_GMAC 0x10 -#define FPGA_PCIX_INTERRUPT_PCIX 0x20 - -/* TP interrupt register addresses */ -#define FPGA_TP_ADDR_INTERRUPT_ENABLE 0xA10 -#define FPGA_TP_ADDR_INTERRUPT_CAUSE 0xA14 -#define FPGA_TP_ADDR_VERSION 0xA18 - -/* TP interrupt Cause/Enable bits */ -#define FPGA_TP_INTERRUPT_MC4 0x1 -#define FPGA_TP_INTERRUPT_MC5 0x2 - -/* - * PM interrupt register addresses - */ -#define FPGA_MC3_REG_INTRENABLE 0xA20 -#define FPGA_MC3_REG_INTRCAUSE 0xA24 -#define FPGA_MC3_REG_VERSION 0xA28 - -/* - * GMAC interrupt register addresses - */ -#define FPGA_GMAC_ADDR_INTERRUPT_ENABLE 0xA30 -#define FPGA_GMAC_ADDR_INTERRUPT_CAUSE 0xA34 -#define FPGA_GMAC_ADDR_VERSION 0xA38 - -/* GMAC Cause/Enable bits */ -#define FPGA_GMAC_INTERRUPT_PORT0 0x1 -#define FPGA_GMAC_INTERRUPT_PORT1 0x2 -#define FPGA_GMAC_INTERRUPT_PORT2 0x4 -#define FPGA_GMAC_INTERRUPT_PORT3 0x8 - -/* MI0 registers */ -#define A_MI0_CLK 0xb00 - -#define S_MI0_CLK_DIV 0 -#define M_MI0_CLK_DIV 0xff -#define V_MI0_CLK_DIV(x) ((x) << S_MI0_CLK_DIV) -#define G_MI0_CLK_DIV(x) (((x) >> S_MI0_CLK_DIV) & M_MI0_CLK_DIV) - -#define S_MI0_CLK_CNT 8 -#define M_MI0_CLK_CNT 0xff -#define V_MI0_CLK_CNT(x) ((x) << S_MI0_CLK_CNT) -#define G_MI0_CLK_CNT(x) (((x) >> S_MI0_CLK_CNT) & M_MI0_CLK_CNT) - -#define A_MI0_CSR 0xb04 - -#define S_MI0_CSR_POLL 0 -#define V_MI0_CSR_POLL(x) ((x) << S_MI0_CSR_POLL) -#define F_MI0_CSR_POLL V_MI0_CSR_POLL(1U) - -#define S_MI0_PREAMBLE 1 -#define V_MI0_PREAMBLE(x) ((x) << S_MI0_PREAMBLE) -#define F_MI0_PREAMBLE V_MI0_PREAMBLE(1U) - -#define S_MI0_INTR_ENABLE 2 -#define V_MI0_INTR_ENABLE(x) ((x) << S_MI0_INTR_ENABLE) -#define F_MI0_INTR_ENABLE V_MI0_INTR_ENABLE(1U) - -#define S_MI0_BUSY 3 -#define V_MI0_BUSY(x) ((x) << S_MI0_BUSY) -#define F_MI0_BUSY V_MI0_BUSY(1U) - -#define S_MI0_MDIO 4 -#define V_MI0_MDIO(x) ((x) << S_MI0_MDIO) -#define F_MI0_MDIO V_MI0_MDIO(1U) - -#define A_MI0_ADDR 0xb08 - -#define S_MI0_PHY_REG_ADDR 0 -#define M_MI0_PHY_REG_ADDR 0x1f -#define V_MI0_PHY_REG_ADDR(x) ((x) << S_MI0_PHY_REG_ADDR) -#define G_MI0_PHY_REG_ADDR(x) (((x) >> S_MI0_PHY_REG_ADDR) & M_MI0_PHY_REG_ADDR) - -#define S_MI0_PHY_ADDR 5 -#define M_MI0_PHY_ADDR 0x1f -#define V_MI0_PHY_ADDR(x) ((x) << S_MI0_PHY_ADDR) -#define G_MI0_PHY_ADDR(x) (((x) >> S_MI0_PHY_ADDR) & M_MI0_PHY_ADDR) - -#define A_MI0_DATA_EXT 0xb0c -#define A_MI0_DATA_INT 0xb10 - -/* GMAC registers */ -#define A_GMAC_MACID_LO 0x28 -#define A_GMAC_MACID_HI 0x2c -#define A_GMAC_CSR 0x30 - -#define S_INTERFACE 0 -#define M_INTERFACE 0x3 -#define V_INTERFACE(x) ((x) << S_INTERFACE) -#define G_INTERFACE(x) (((x) >> S_INTERFACE) & M_INTERFACE) - -#define S_MAC_TX_ENABLE 2 -#define V_MAC_TX_ENABLE(x) ((x) << S_MAC_TX_ENABLE) -#define F_MAC_TX_ENABLE V_MAC_TX_ENABLE(1U) - -#define S_MAC_RX_ENABLE 3 -#define V_MAC_RX_ENABLE(x) ((x) << S_MAC_RX_ENABLE) -#define F_MAC_RX_ENABLE V_MAC_RX_ENABLE(1U) - -#define S_MAC_LB_ENABLE 4 -#define V_MAC_LB_ENABLE(x) ((x) << S_MAC_LB_ENABLE) -#define F_MAC_LB_ENABLE V_MAC_LB_ENABLE(1U) - -#define S_MAC_SPEED 5 -#define M_MAC_SPEED 0x3 -#define V_MAC_SPEED(x) ((x) << S_MAC_SPEED) -#define G_MAC_SPEED(x) (((x) >> S_MAC_SPEED) & M_MAC_SPEED) - -#define S_MAC_HD_FC_ENABLE 7 -#define V_MAC_HD_FC_ENABLE(x) ((x) << S_MAC_HD_FC_ENABLE) -#define F_MAC_HD_FC_ENABLE V_MAC_HD_FC_ENABLE(1U) - -#define S_MAC_HALF_DUPLEX 8 -#define V_MAC_HALF_DUPLEX(x) ((x) << S_MAC_HALF_DUPLEX) -#define F_MAC_HALF_DUPLEX V_MAC_HALF_DUPLEX(1U) - -#define S_MAC_PROMISC 9 -#define V_MAC_PROMISC(x) ((x) << S_MAC_PROMISC) -#define F_MAC_PROMISC V_MAC_PROMISC(1U) - -#define S_MAC_MC_ENABLE 10 -#define V_MAC_MC_ENABLE(x) ((x) << S_MAC_MC_ENABLE) -#define F_MAC_MC_ENABLE V_MAC_MC_ENABLE(1U) - -#define S_MAC_RESET 11 -#define V_MAC_RESET(x) ((x) << S_MAC_RESET) -#define F_MAC_RESET V_MAC_RESET(1U) - -#define S_MAC_RX_PAUSE_ENABLE 12 -#define V_MAC_RX_PAUSE_ENABLE(x) ((x) << S_MAC_RX_PAUSE_ENABLE) -#define F_MAC_RX_PAUSE_ENABLE V_MAC_RX_PAUSE_ENABLE(1U) - -#define S_MAC_TX_PAUSE_ENABLE 13 -#define V_MAC_TX_PAUSE_ENABLE(x) ((x) << S_MAC_TX_PAUSE_ENABLE) -#define F_MAC_TX_PAUSE_ENABLE V_MAC_TX_PAUSE_ENABLE(1U) - -#define S_MAC_LWM_ENABLE 14 -#define V_MAC_LWM_ENABLE(x) ((x) << S_MAC_LWM_ENABLE) -#define F_MAC_LWM_ENABLE V_MAC_LWM_ENABLE(1U) - -#define S_MAC_MAGIC_PKT_ENABLE 15 -#define V_MAC_MAGIC_PKT_ENABLE(x) ((x) << S_MAC_MAGIC_PKT_ENABLE) -#define F_MAC_MAGIC_PKT_ENABLE V_MAC_MAGIC_PKT_ENABLE(1U) - -#define S_MAC_ISL_ENABLE 16 -#define V_MAC_ISL_ENABLE(x) ((x) << S_MAC_ISL_ENABLE) -#define F_MAC_ISL_ENABLE V_MAC_ISL_ENABLE(1U) - -#define S_MAC_JUMBO_ENABLE 17 -#define V_MAC_JUMBO_ENABLE(x) ((x) << S_MAC_JUMBO_ENABLE) -#define F_MAC_JUMBO_ENABLE V_MAC_JUMBO_ENABLE(1U) - -#define S_MAC_RX_PAD_ENABLE 18 -#define V_MAC_RX_PAD_ENABLE(x) ((x) << S_MAC_RX_PAD_ENABLE) -#define F_MAC_RX_PAD_ENABLE V_MAC_RX_PAD_ENABLE(1U) - -#define S_MAC_RX_CRC_ENABLE 19 -#define V_MAC_RX_CRC_ENABLE(x) ((x) << S_MAC_RX_CRC_ENABLE) -#define F_MAC_RX_CRC_ENABLE V_MAC_RX_CRC_ENABLE(1U) - -#define A_GMAC_IFS 0x34 - -#define S_MAC_IFS2 0 -#define M_MAC_IFS2 0x3f -#define V_MAC_IFS2(x) ((x) << S_MAC_IFS2) -#define G_MAC_IFS2(x) (((x) >> S_MAC_IFS2) & M_MAC_IFS2) - -#define S_MAC_IFS1 8 -#define M_MAC_IFS1 0x7f -#define V_MAC_IFS1(x) ((x) << S_MAC_IFS1) -#define G_MAC_IFS1(x) (((x) >> S_MAC_IFS1) & M_MAC_IFS1) - -#define A_GMAC_JUMBO_FRAME_LEN 0x38 -#define A_GMAC_LNK_DLY 0x3c -#define A_GMAC_PAUSETIME 0x40 -#define A_GMAC_MCAST_LO 0x44 -#define A_GMAC_MCAST_HI 0x48 -#define A_GMAC_MCAST_MASK_LO 0x4c -#define A_GMAC_MCAST_MASK_HI 0x50 -#define A_GMAC_RMT_CNT 0x54 -#define A_GMAC_RMT_DATA 0x58 -#define A_GMAC_BACKOFF_SEED 0x5c -#define A_GMAC_TXF_THRES 0x60 - -#define S_TXF_READ_THRESHOLD 0 -#define M_TXF_READ_THRESHOLD 0xff -#define V_TXF_READ_THRESHOLD(x) ((x) << S_TXF_READ_THRESHOLD) -#define G_TXF_READ_THRESHOLD(x) (((x) >> S_TXF_READ_THRESHOLD) & M_TXF_READ_THRESHOLD) - -#define S_TXF_WRITE_THRESHOLD 16 -#define M_TXF_WRITE_THRESHOLD 0xff -#define V_TXF_WRITE_THRESHOLD(x) ((x) << S_TXF_WRITE_THRESHOLD) -#define G_TXF_WRITE_THRESHOLD(x) (((x) >> S_TXF_WRITE_THRESHOLD) & M_TXF_WRITE_THRESHOLD) - -#define MAC_REG_BASE 0x600 -#define MAC_REG_ADDR(idx, reg) (MAC_REG_BASE + (idx) * 128 + (reg)) - -#define MAC_REG_IDLO(idx) MAC_REG_ADDR(idx, A_GMAC_MACID_LO) -#define MAC_REG_IDHI(idx) MAC_REG_ADDR(idx, A_GMAC_MACID_HI) -#define MAC_REG_CSR(idx) MAC_REG_ADDR(idx, A_GMAC_CSR) -#define MAC_REG_IFS(idx) MAC_REG_ADDR(idx, A_GMAC_IFS) -#define MAC_REG_LARGEFRAMELENGTH(idx) MAC_REG_ADDR(idx, A_GMAC_JUMBO_FRAME_LEN) -#define MAC_REG_LINKDLY(idx) MAC_REG_ADDR(idx, A_GMAC_LNK_DLY) -#define MAC_REG_PAUSETIME(idx) MAC_REG_ADDR(idx, A_GMAC_PAUSETIME) -#define MAC_REG_CASTLO(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_LO) -#define MAC_REG_MCASTHI(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_HI) -#define MAC_REG_CASTMASKLO(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_MASK_LO) -#define MAC_REG_MCASTMASKHI(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_MASK_HI) -#define MAC_REG_RMCNT(idx) MAC_REG_ADDR(idx, A_GMAC_RMT_CNT) -#define MAC_REG_RMDATA(idx) MAC_REG_ADDR(idx, A_GMAC_RMT_DATA) -#define MAC_REG_GMRANDBACKOFFSEED(idx) MAC_REG_ADDR(idx, A_GMAC_BACKOFF_SEED) -#define MAC_REG_TXFTHRESHOLDS(idx) MAC_REG_ADDR(idx, A_GMAC_TXF_THRES) - -#endif diff --git a/drivers/net/chelsio/gmac.h b/drivers/net/chelsio/gmac.h deleted file mode 100644 index d423374..0000000 --- a/drivers/net/chelsio/gmac.h +++ /dev/null @@ -1,142 +0,0 @@ -/***************************************************************************** - * * - * File: gmac.h * - * $Revision: 1.6 $ * - * $Date: 2005/06/21 18:29:47 $ * - * Description: * - * Generic MAC functionality. * - * part of the Chelsio 10Gb Ethernet Driver. * - * * - * This program is free software; you can redistribute it and/or modify * - * it under the terms of the GNU General Public License, version 2, as * - * published by the Free Software Foundation. * - * * - * You should have received a copy of the GNU General Public License along * - * with this program; if not, write to the Free Software Foundation, Inc., * - * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * - * * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * - * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * - * * - * http://www.chelsio.com * - * * - * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * - * All rights reserved. * - * * - * Maintainers: maintainers@chelsio.com * - * * - * Authors: Dimitrios Michailidis * - * Tina Yang * - * Felix Marti * - * Scott Bardone * - * Kurt Ottaway * - * Frank DiMambro * - * * - * History: * - * * - ****************************************************************************/ - -#ifndef _CXGB_GMAC_H_ -#define _CXGB_GMAC_H_ - -#include "common.h" - -enum { - MAC_STATS_UPDATE_FAST, - MAC_STATS_UPDATE_FULL -}; - -enum { - MAC_DIRECTION_RX = 1, - MAC_DIRECTION_TX = 2 -}; - -struct cmac_statistics { - /* Transmit */ - u64 TxOctetsOK; - u64 TxOctetsBad; - u64 TxUnicastFramesOK; - u64 TxMulticastFramesOK; - u64 TxBroadcastFramesOK; - u64 TxPauseFrames; - u64 TxFramesWithDeferredXmissions; - u64 TxLateCollisions; - u64 TxTotalCollisions; - u64 TxFramesAbortedDueToXSCollisions; - u64 TxUnderrun; - u64 TxLengthErrors; - u64 TxInternalMACXmitError; - u64 TxFramesWithExcessiveDeferral; - u64 TxFCSErrors; - u64 TxJumboFramesOK; - u64 TxJumboOctetsOK; - - /* Receive */ - u64 RxOctetsOK; - u64 RxOctetsBad; - u64 RxUnicastFramesOK; - u64 RxMulticastFramesOK; - u64 RxBroadcastFramesOK; - u64 RxPauseFrames; - u64 RxFCSErrors; - u64 RxAlignErrors; - u64 RxSymbolErrors; - u64 RxDataErrors; - u64 RxSequenceErrors; - u64 RxRuntErrors; - u64 RxJabberErrors; - u64 RxInternalMACRcvError; - u64 RxInRangeLengthErrors; - u64 RxOutOfRangeLengthField; - u64 RxFrameTooLongErrors; - u64 RxJumboFramesOK; - u64 RxJumboOctetsOK; -}; - -struct cmac_ops { - void (*destroy)(struct cmac *); - int (*reset)(struct cmac *); - int (*interrupt_enable)(struct cmac *); - int (*interrupt_disable)(struct cmac *); - int (*interrupt_clear)(struct cmac *); - int (*interrupt_handler)(struct cmac *); - - int (*enable)(struct cmac *, int); - int (*disable)(struct cmac *, int); - - int (*loopback_enable)(struct cmac *); - int (*loopback_disable)(struct cmac *); - - int (*set_mtu)(struct cmac *, int mtu); - int (*set_rx_mode)(struct cmac *, struct t1_rx_mode *rm); - - int (*set_speed_duplex_fc)(struct cmac *, int speed, int duplex, int fc); - int (*get_speed_duplex_fc)(struct cmac *, int *speed, int *duplex, - int *fc); - - const struct cmac_statistics *(*statistics_update)(struct cmac *, int); - - int (*macaddress_get)(struct cmac *, u8 mac_addr[6]); - int (*macaddress_set)(struct cmac *, u8 mac_addr[6]); -}; - -typedef struct _cmac_instance cmac_instance; - -struct cmac { - struct cmac_statistics stats; - adapter_t *adapter; - const struct cmac_ops *ops; - cmac_instance *instance; -}; - -struct gmac { - unsigned int stats_update_period; - struct cmac *(*create)(adapter_t *adapter, int index); - int (*reset)(adapter_t *); -}; - -extern const struct gmac t1_pm3393_ops; -extern const struct gmac t1_vsc7326_ops; - -#endif /* _CXGB_GMAC_H_ */ diff --git a/drivers/net/chelsio/mv88e1xxx.c b/drivers/net/chelsio/mv88e1xxx.c deleted file mode 100644 index 71018a4..0000000 --- a/drivers/net/chelsio/mv88e1xxx.c +++ /dev/null @@ -1,397 +0,0 @@ -/* $Date: 2005/10/24 23:18:13 $ $RCSfile: mv88e1xxx.c,v $ $Revision: 1.49 $ */ -#include "common.h" -#include "mv88e1xxx.h" -#include "cphy.h" -#include "elmer0.h" - -/* MV88E1XXX MDI crossover register values */ -#define CROSSOVER_MDI 0 -#define CROSSOVER_MDIX 1 -#define CROSSOVER_AUTO 3 - -#define INTR_ENABLE_MASK 0x6CA0 - -/* - * Set the bits given by 'bitval' in PHY register 'reg'. - */ -static void mdio_set_bit(struct cphy *cphy, int reg, u32 bitval) -{ - u32 val; - - (void) simple_mdio_read(cphy, reg, &val); - (void) simple_mdio_write(cphy, reg, val | bitval); -} - -/* - * Clear the bits given by 'bitval' in PHY register 'reg'. - */ -static void mdio_clear_bit(struct cphy *cphy, int reg, u32 bitval) -{ - u32 val; - - (void) simple_mdio_read(cphy, reg, &val); - (void) simple_mdio_write(cphy, reg, val & ~bitval); -} - -/* - * NAME: phy_reset - * - * DESC: Reset the given PHY's port. NOTE: This is not a global - * chip reset. - * - * PARAMS: cphy - Pointer to PHY instance data. - * - * RETURN: 0 - Successful reset. - * -1 - Timeout. - */ -static int mv88e1xxx_reset(struct cphy *cphy, int wait) -{ - u32 ctl; - int time_out = 1000; - - mdio_set_bit(cphy, MII_BMCR, BMCR_RESET); - - do { - (void) simple_mdio_read(cphy, MII_BMCR, &ctl); - ctl &= BMCR_RESET; - if (ctl) - udelay(1); - } while (ctl && --time_out); - - return ctl ? -1 : 0; -} - -static int mv88e1xxx_interrupt_enable(struct cphy *cphy) -{ - /* Enable PHY interrupts. */ - (void) simple_mdio_write(cphy, MV88E1XXX_INTERRUPT_ENABLE_REGISTER, - INTR_ENABLE_MASK); - - /* Enable Marvell interrupts through Elmer0. */ - if (t1_is_asic(cphy->adapter)) { - u32 elmer; - - t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); - elmer |= ELMER0_GP_BIT1; - if (is_T2(cphy->adapter)) - elmer |= ELMER0_GP_BIT2 | ELMER0_GP_BIT3 | ELMER0_GP_BIT4; - t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); - } - return 0; -} - -static int mv88e1xxx_interrupt_disable(struct cphy *cphy) -{ - /* Disable all phy interrupts. */ - (void) simple_mdio_write(cphy, MV88E1XXX_INTERRUPT_ENABLE_REGISTER, 0); - - /* Disable Marvell interrupts through Elmer0. */ - if (t1_is_asic(cphy->adapter)) { - u32 elmer; - - t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); - elmer &= ~ELMER0_GP_BIT1; - if (is_T2(cphy->adapter)) - elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4); - t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); - } - return 0; -} - -static int mv88e1xxx_interrupt_clear(struct cphy *cphy) -{ - u32 elmer; - - /* Clear PHY interrupts by reading the register. */ - (void) simple_mdio_read(cphy, - MV88E1XXX_INTERRUPT_STATUS_REGISTER, &elmer); - - /* Clear Marvell interrupts through Elmer0. */ - if (t1_is_asic(cphy->adapter)) { - t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer); - elmer |= ELMER0_GP_BIT1; - if (is_T2(cphy->adapter)) - elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4; - t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer); - } - return 0; -} - -/* - * Set the PHY speed and duplex. This also disables auto-negotiation, except - * for 1Gb/s, where auto-negotiation is mandatory. - */ -static int mv88e1xxx_set_speed_duplex(struct cphy *phy, int speed, int duplex) -{ - u32 ctl; - - (void) simple_mdio_read(phy, MII_BMCR, &ctl); - if (speed >= 0) { - ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE); - if (speed == SPEED_100) - ctl |= BMCR_SPEED100; - else if (speed == SPEED_1000) - ctl |= BMCR_SPEED1000; - } - if (duplex >= 0) { - ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE); - if (duplex == DUPLEX_FULL) - ctl |= BMCR_FULLDPLX; - } - if (ctl & BMCR_SPEED1000) /* auto-negotiation required for 1Gb/s */ - ctl |= BMCR_ANENABLE; - (void) simple_mdio_write(phy, MII_BMCR, ctl); - return 0; -} - -static int mv88e1xxx_crossover_set(struct cphy *cphy, int crossover) -{ - u32 data32; - - (void) simple_mdio_read(cphy, - MV88E1XXX_SPECIFIC_CNTRL_REGISTER, &data32); - data32 &= ~V_PSCR_MDI_XOVER_MODE(M_PSCR_MDI_XOVER_MODE); - data32 |= V_PSCR_MDI_XOVER_MODE(crossover); - (void) simple_mdio_write(cphy, - MV88E1XXX_SPECIFIC_CNTRL_REGISTER, data32); - return 0; -} - -static int mv88e1xxx_autoneg_enable(struct cphy *cphy) -{ - u32 ctl; - - (void) mv88e1xxx_crossover_set(cphy, CROSSOVER_AUTO); - - (void) simple_mdio_read(cphy, MII_BMCR, &ctl); - /* restart autoneg for change to take effect */ - ctl |= BMCR_ANENABLE | BMCR_ANRESTART; - (void) simple_mdio_write(cphy, MII_BMCR, ctl); - return 0; -} - -static int mv88e1xxx_autoneg_disable(struct cphy *cphy) -{ - u32 ctl; - - /* - * Crossover *must* be set to manual in order to disable auto-neg. - * The Alaska FAQs document highlights this point. - */ - (void) mv88e1xxx_crossover_set(cphy, CROSSOVER_MDI); - - /* - * Must include autoneg reset when disabling auto-neg. This - * is described in the Alaska FAQ document. - */ - (void) simple_mdio_read(cphy, MII_BMCR, &ctl); - ctl &= ~BMCR_ANENABLE; - (void) simple_mdio_write(cphy, MII_BMCR, ctl | BMCR_ANRESTART); - return 0; -} - -static int mv88e1xxx_autoneg_restart(struct cphy *cphy) -{ - mdio_set_bit(cphy, MII_BMCR, BMCR_ANRESTART); - return 0; -} - -static int mv88e1xxx_advertise(struct cphy *phy, unsigned int advertise_map) -{ - u32 val = 0; - - if (advertise_map & - (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) { - (void) simple_mdio_read(phy, MII_GBCR, &val); - val &= ~(GBCR_ADV_1000HALF | GBCR_ADV_1000FULL); - if (advertise_map & ADVERTISED_1000baseT_Half) - val |= GBCR_ADV_1000HALF; - if (advertise_map & ADVERTISED_1000baseT_Full) - val |= GBCR_ADV_1000FULL; - } - (void) simple_mdio_write(phy, MII_GBCR, val); - - val = 1; - if (advertise_map & ADVERTISED_10baseT_Half) - val |= ADVERTISE_10HALF; - if (advertise_map & ADVERTISED_10baseT_Full) - val |= ADVERTISE_10FULL; - if (advertise_map & ADVERTISED_100baseT_Half) - val |= ADVERTISE_100HALF; - if (advertise_map & ADVERTISED_100baseT_Full) - val |= ADVERTISE_100FULL; - if (advertise_map & ADVERTISED_PAUSE) - val |= ADVERTISE_PAUSE; - if (advertise_map & ADVERTISED_ASYM_PAUSE) - val |= ADVERTISE_PAUSE_ASYM; - (void) simple_mdio_write(phy, MII_ADVERTISE, val); - return 0; -} - -static int mv88e1xxx_set_loopback(struct cphy *cphy, int on) -{ - if (on) - mdio_set_bit(cphy, MII_BMCR, BMCR_LOOPBACK); - else - mdio_clear_bit(cphy, MII_BMCR, BMCR_LOOPBACK); - return 0; -} - -static int mv88e1xxx_get_link_status(struct cphy *cphy, int *link_ok, - int *speed, int *duplex, int *fc) -{ - u32 status; - int sp = -1, dplx = -1, pause = 0; - - (void) simple_mdio_read(cphy, - MV88E1XXX_SPECIFIC_STATUS_REGISTER, &status); - if ((status & V_PSSR_STATUS_RESOLVED) != 0) { - if (status & V_PSSR_RX_PAUSE) - pause |= PAUSE_RX; - if (status & V_PSSR_TX_PAUSE) - pause |= PAUSE_TX; - dplx = (status & V_PSSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF; - sp = G_PSSR_SPEED(status); - if (sp == 0) - sp = SPEED_10; - else if (sp == 1) - sp = SPEED_100; - else - sp = SPEED_1000; - } - if (link_ok) - *link_ok = (status & V_PSSR_LINK) != 0; - if (speed) - *speed = sp; - if (duplex) - *duplex = dplx; - if (fc) - *fc = pause; - return 0; -} - -static int mv88e1xxx_downshift_set(struct cphy *cphy, int downshift_enable) -{ - u32 val; - - (void) simple_mdio_read(cphy, - MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_REGISTER, &val); - - /* - * Set the downshift counter to 2 so we try to establish Gb link - * twice before downshifting. - */ - val &= ~(V_DOWNSHIFT_ENABLE | V_DOWNSHIFT_CNT(M_DOWNSHIFT_CNT)); - - if (downshift_enable) - val |= V_DOWNSHIFT_ENABLE | V_DOWNSHIFT_CNT(2); - (void) simple_mdio_write(cphy, - MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_REGISTER, val); - return 0; -} - -static int mv88e1xxx_interrupt_handler(struct cphy *cphy) -{ - int cphy_cause = 0; - u32 status; - - /* - * Loop until cause reads zero. Need to handle bouncing interrupts. - */ - while (1) { - u32 cause; - - (void) simple_mdio_read(cphy, - MV88E1XXX_INTERRUPT_STATUS_REGISTER, - &cause); - cause &= INTR_ENABLE_MASK; - if (!cause) - break; - - if (cause & MV88E1XXX_INTR_LINK_CHNG) { - (void) simple_mdio_read(cphy, - MV88E1XXX_SPECIFIC_STATUS_REGISTER, &status); - - if (status & MV88E1XXX_INTR_LINK_CHNG) - cphy->state |= PHY_LINK_UP; - else { - cphy->state &= ~PHY_LINK_UP; - if (cphy->state & PHY_AUTONEG_EN) - cphy->state &= ~PHY_AUTONEG_RDY; - cphy_cause |= cphy_cause_link_change; - } - } - - if (cause & MV88E1XXX_INTR_AUTONEG_DONE) - cphy->state |= PHY_AUTONEG_RDY; - - if ((cphy->state & (PHY_LINK_UP | PHY_AUTONEG_RDY)) == - (PHY_LINK_UP | PHY_AUTONEG_RDY)) - cphy_cause |= cphy_cause_link_change; - } - return cphy_cause; -} - -static void mv88e1xxx_destroy(struct cphy *cphy) -{ - kfree(cphy); -} - -static struct cphy_ops mv88e1xxx_ops = { - .destroy = mv88e1xxx_destroy, - .reset = mv88e1xxx_reset, - .interrupt_enable = mv88e1xxx_interrupt_enable, - .interrupt_disable = mv88e1xxx_interrupt_disable, - .interrupt_clear = mv88e1xxx_interrupt_clear, - .interrupt_handler = mv88e1xxx_interrupt_handler, - .autoneg_enable = mv88e1xxx_autoneg_enable, - .autoneg_disable = mv88e1xxx_autoneg_disable, - .autoneg_restart = mv88e1xxx_autoneg_restart, - .advertise = mv88e1xxx_advertise, - .set_loopback = mv88e1xxx_set_loopback, - .set_speed_duplex = mv88e1xxx_set_speed_duplex, - .get_link_status = mv88e1xxx_get_link_status, -}; - -static struct cphy *mv88e1xxx_phy_create(struct net_device *dev, int phy_addr, - const struct mdio_ops *mdio_ops) -{ - struct adapter *adapter = netdev_priv(dev); - struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL); - - if (!cphy) - return NULL; - - cphy_init(cphy, dev, phy_addr, &mv88e1xxx_ops, mdio_ops); - - /* Configure particular PHY's to run in a different mode. */ - if ((board_info(adapter)->caps & SUPPORTED_TP) && - board_info(adapter)->chip_phy == CHBT_PHY_88E1111) { - /* - * Configure the PHY transmitter as class A to reduce EMI. - */ - (void) simple_mdio_write(cphy, - MV88E1XXX_EXTENDED_ADDR_REGISTER, 0xB); - (void) simple_mdio_write(cphy, - MV88E1XXX_EXTENDED_REGISTER, 0x8004); - } - (void) mv88e1xxx_downshift_set(cphy, 1); /* Enable downshift */ - - /* LED */ - if (is_T2(adapter)) { - (void) simple_mdio_write(cphy, - MV88E1XXX_LED_CONTROL_REGISTER, 0x1); - } - - return cphy; -} - -static int mv88e1xxx_phy_reset(adapter_t* adapter) -{ - return 0; -} - -const struct gphy t1_mv88e1xxx_ops = { - .create = mv88e1xxx_phy_create, - .reset = mv88e1xxx_phy_reset -}; diff --git a/drivers/net/chelsio/mv88e1xxx.h b/drivers/net/chelsio/mv88e1xxx.h deleted file mode 100644 index 967cc42..0000000 --- a/drivers/net/chelsio/mv88e1xxx.h +++ /dev/null @@ -1,127 +0,0 @@ -/* $Date: 2005/03/07 23:59:05 $ $RCSfile: mv88e1xxx.h,v $ $Revision: 1.13 $ */ -#ifndef CHELSIO_MV8E1XXX_H -#define CHELSIO_MV8E1XXX_H - -#ifndef BMCR_SPEED1000 -# define BMCR_SPEED1000 0x40 -#endif - -#ifndef ADVERTISE_PAUSE -# define ADVERTISE_PAUSE 0x400 -#endif -#ifndef ADVERTISE_PAUSE_ASYM -# define ADVERTISE_PAUSE_ASYM 0x800 -#endif - -/* Gigabit MII registers */ -#define MII_GBCR 9 /* 1000Base-T control register */ -#define MII_GBSR 10 /* 1000Base-T status register */ - -/* 1000Base-T control register fields */ -#define GBCR_ADV_1000HALF 0x100 -#define GBCR_ADV_1000FULL 0x200 -#define GBCR_PREFER_MASTER 0x400 -#define GBCR_MANUAL_AS_MASTER 0x800 -#define GBCR_MANUAL_CONFIG_ENABLE 0x1000 - -/* 1000Base-T status register fields */ -#define GBSR_LP_1000HALF 0x400 -#define GBSR_LP_1000FULL 0x800 -#define GBSR_REMOTE_OK 0x1000 -#define GBSR_LOCAL_OK 0x2000 -#define GBSR_LOCAL_MASTER 0x4000 -#define GBSR_MASTER_FAULT 0x8000 - -/* Marvell PHY interrupt status bits. */ -#define MV88E1XXX_INTR_JABBER 0x0001 -#define MV88E1XXX_INTR_POLARITY_CHNG 0x0002 -#define MV88E1XXX_INTR_ENG_DETECT_CHNG 0x0010 -#define MV88E1XXX_INTR_DOWNSHIFT 0x0020 -#define MV88E1XXX_INTR_MDI_XOVER_CHNG 0x0040 -#define MV88E1XXX_INTR_FIFO_OVER_UNDER 0x0080 -#define MV88E1XXX_INTR_FALSE_CARRIER 0x0100 -#define MV88E1XXX_INTR_SYMBOL_ERROR 0x0200 -#define MV88E1XXX_INTR_LINK_CHNG 0x0400 -#define MV88E1XXX_INTR_AUTONEG_DONE 0x0800 -#define MV88E1XXX_INTR_PAGE_RECV 0x1000 -#define MV88E1XXX_INTR_DUPLEX_CHNG 0x2000 -#define MV88E1XXX_INTR_SPEED_CHNG 0x4000 -#define MV88E1XXX_INTR_AUTONEG_ERR 0x8000 - -/* Marvell PHY specific registers. */ -#define MV88E1XXX_SPECIFIC_CNTRL_REGISTER 16 -#define MV88E1XXX_SPECIFIC_STATUS_REGISTER 17 -#define MV88E1XXX_INTERRUPT_ENABLE_REGISTER 18 -#define MV88E1XXX_INTERRUPT_STATUS_REGISTER 19 -#define MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_REGISTER 20 -#define MV88E1XXX_RECV_ERR_CNTR_REGISTER 21 -#define MV88E1XXX_RES_REGISTER 22 -#define MV88E1XXX_GLOBAL_STATUS_REGISTER 23 -#define MV88E1XXX_LED_CONTROL_REGISTER 24 -#define MV88E1XXX_MANUAL_LED_OVERRIDE_REGISTER 25 -#define MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_2_REGISTER 26 -#define MV88E1XXX_EXT_PHY_SPECIFIC_STATUS_REGISTER 27 -#define MV88E1XXX_VIRTUAL_CABLE_TESTER_REGISTER 28 -#define MV88E1XXX_EXTENDED_ADDR_REGISTER 29 -#define MV88E1XXX_EXTENDED_REGISTER 30 - -/* PHY specific control register fields */ -#define S_PSCR_MDI_XOVER_MODE 5 -#define M_PSCR_MDI_XOVER_MODE 0x3 -#define V_PSCR_MDI_XOVER_MODE(x) ((x) << S_PSCR_MDI_XOVER_MODE) -#define G_PSCR_MDI_XOVER_MODE(x) (((x) >> S_PSCR_MDI_XOVER_MODE) & M_PSCR_MDI_XOVER_MODE) - -/* Extended PHY specific control register fields */ -#define S_DOWNSHIFT_ENABLE 8 -#define V_DOWNSHIFT_ENABLE (1 << S_DOWNSHIFT_ENABLE) - -#define S_DOWNSHIFT_CNT 9 -#define M_DOWNSHIFT_CNT 0x7 -#define V_DOWNSHIFT_CNT(x) ((x) << S_DOWNSHIFT_CNT) -#define G_DOWNSHIFT_CNT(x) (((x) >> S_DOWNSHIFT_CNT) & M_DOWNSHIFT_CNT) - -/* PHY specific status register fields */ -#define S_PSSR_JABBER 0 -#define V_PSSR_JABBER (1 << S_PSSR_JABBER) - -#define S_PSSR_POLARITY 1 -#define V_PSSR_POLARITY (1 << S_PSSR_POLARITY) - -#define S_PSSR_RX_PAUSE 2 -#define V_PSSR_RX_PAUSE (1 << S_PSSR_RX_PAUSE) - -#define S_PSSR_TX_PAUSE 3 -#define V_PSSR_TX_PAUSE (1 << S_PSSR_TX_PAUSE) - -#define S_PSSR_ENERGY_DETECT 4 -#define V_PSSR_ENERGY_DETECT (1 << S_PSSR_ENERGY_DETECT) - -#define S_PSSR_DOWNSHIFT_STATUS 5 -#define V_PSSR_DOWNSHIFT_STATUS (1 << S_PSSR_DOWNSHIFT_STATUS) - -#define S_PSSR_MDI 6 -#define V_PSSR_MDI (1 << S_PSSR_MDI) - -#define S_PSSR_CABLE_LEN 7 -#define M_PSSR_CABLE_LEN 0x7 -#define V_PSSR_CABLE_LEN(x) ((x) << S_PSSR_CABLE_LEN) -#define G_PSSR_CABLE_LEN(x) (((x) >> S_PSSR_CABLE_LEN) & M_PSSR_CABLE_LEN) - -#define S_PSSR_LINK 10 -#define V_PSSR_LINK (1 << S_PSSR_LINK) - -#define S_PSSR_STATUS_RESOLVED 11 -#define V_PSSR_STATUS_RESOLVED (1 << S_PSSR_STATUS_RESOLVED) - -#define S_PSSR_PAGE_RECEIVED 12 -#define V_PSSR_PAGE_RECEIVED (1 << S_PSSR_PAGE_RECEIVED) - -#define S_PSSR_DUPLEX 13 -#define V_PSSR_DUPLEX (1 << S_PSSR_DUPLEX) - -#define S_PSSR_SPEED 14 -#define M_PSSR_SPEED 0x3 -#define V_PSSR_SPEED(x) ((x) << S_PSSR_SPEED) -#define G_PSSR_SPEED(x) (((x) >> S_PSSR_SPEED) & M_PSSR_SPEED) - -#endif diff --git a/drivers/net/chelsio/mv88x201x.c b/drivers/net/chelsio/mv88x201x.c deleted file mode 100644 index f7136b2..0000000 --- a/drivers/net/chelsio/mv88x201x.c +++ /dev/null @@ -1,260 +0,0 @@ -/***************************************************************************** - * * - * File: mv88x201x.c * - * $Revision: 1.12 $ * - * $Date: 2005/04/15 19:27:14 $ * - * Description: * - * Marvell PHY (mv88x201x) functionality. * - * part of the Chelsio 10Gb Ethernet Driver. * - * * - * This program is free software; you can redistribute it and/or modify * - * it under the terms of the GNU General Public License, version 2, as * - * published by the Free Software Foundation. * - * * - * You should have received a copy of the GNU General Public License along * - * with this program; if not, write to the Free Software Foundation, Inc., * - * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * - * * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * - * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * - * * - * http://www.chelsio.com * - * * - * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * - * All rights reserved. * - * * - * Maintainers: maintainers@chelsio.com * - * * - * Authors: Dimitrios Michailidis * - * Tina Yang * - * Felix Marti * - * Scott Bardone * - * Kurt Ottaway * - * Frank DiMambro * - * * - * History: * - * * - ****************************************************************************/ - -#include "cphy.h" -#include "elmer0.h" - -/* - * The 88x2010 Rev C. requires some link status registers * to be read - * twice in order to get the right values. Future * revisions will fix - * this problem and then this macro * can disappear. - */ -#define MV88x2010_LINK_STATUS_BUGS 1 - -static int led_init(struct cphy *cphy) -{ - /* Setup the LED registers so we can turn on/off. - * Writing these bits maps control to another - * register. mmd(0x1) addr(0x7) - */ - cphy_mdio_write(cphy, MDIO_MMD_PCS, 0x8304, 0xdddd); - return 0; -} - -static int led_link(struct cphy *cphy, u32 do_enable) -{ - u32 led = 0; -#define LINK_ENABLE_BIT 0x1 - - cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, &led); - - if (do_enable & LINK_ENABLE_BIT) { - led |= LINK_ENABLE_BIT; - cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, led); - } else { - led &= ~LINK_ENABLE_BIT; - cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, led); - } - return 0; -} - -/* Port Reset */ -static int mv88x201x_reset(struct cphy *cphy, int wait) -{ - /* This can be done through registers. It is not required since - * a full chip reset is used. - */ - return 0; -} - -static int mv88x201x_interrupt_enable(struct cphy *cphy) -{ - /* Enable PHY LASI interrupts. */ - cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, - MDIO_PMA_LASI_LSALARM); - - /* Enable Marvell interrupts through Elmer0. */ - if (t1_is_asic(cphy->adapter)) { - u32 elmer; - - t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); - elmer |= ELMER0_GP_BIT6; - t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); - } - return 0; -} - -static int mv88x201x_interrupt_disable(struct cphy *cphy) -{ - /* Disable PHY LASI interrupts. */ - cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0x0); - - /* Disable Marvell interrupts through Elmer0. */ - if (t1_is_asic(cphy->adapter)) { - u32 elmer; - - t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); - elmer &= ~ELMER0_GP_BIT6; - t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); - } - return 0; -} - -static int mv88x201x_interrupt_clear(struct cphy *cphy) -{ - u32 elmer; - u32 val; - -#ifdef MV88x2010_LINK_STATUS_BUGS - /* Required to read twice before clear takes affect. */ - cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_RXSTAT, &val); - cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_TXSTAT, &val); - cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val); - - /* Read this register after the others above it else - * the register doesn't clear correctly. - */ - cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val); -#endif - - /* Clear link status. */ - cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val); - /* Clear PHY LASI interrupts. */ - cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val); - -#ifdef MV88x2010_LINK_STATUS_BUGS - /* Do it again. */ - cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_RXSTAT, &val); - cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_TXSTAT, &val); -#endif - - /* Clear Marvell interrupts through Elmer0. */ - if (t1_is_asic(cphy->adapter)) { - t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer); - elmer |= ELMER0_GP_BIT6; - t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer); - } - return 0; -} - -static int mv88x201x_interrupt_handler(struct cphy *cphy) -{ - /* Clear interrupts */ - mv88x201x_interrupt_clear(cphy); - - /* We have only enabled link change interrupts and so - * cphy_cause must be a link change interrupt. - */ - return cphy_cause_link_change; -} - -static int mv88x201x_set_loopback(struct cphy *cphy, int on) -{ - return 0; -} - -static int mv88x201x_get_link_status(struct cphy *cphy, int *link_ok, - int *speed, int *duplex, int *fc) -{ - u32 val = 0; - - if (link_ok) { - /* Read link status. */ - cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val); - val &= MDIO_STAT1_LSTATUS; - *link_ok = (val == MDIO_STAT1_LSTATUS); - /* Turn on/off Link LED */ - led_link(cphy, *link_ok); - } - if (speed) - *speed = SPEED_10000; - if (duplex) - *duplex = DUPLEX_FULL; - if (fc) - *fc = PAUSE_RX | PAUSE_TX; - return 0; -} - -static void mv88x201x_destroy(struct cphy *cphy) -{ - kfree(cphy); -} - -static struct cphy_ops mv88x201x_ops = { - .destroy = mv88x201x_destroy, - .reset = mv88x201x_reset, - .interrupt_enable = mv88x201x_interrupt_enable, - .interrupt_disable = mv88x201x_interrupt_disable, - .interrupt_clear = mv88x201x_interrupt_clear, - .interrupt_handler = mv88x201x_interrupt_handler, - .get_link_status = mv88x201x_get_link_status, - .set_loopback = mv88x201x_set_loopback, - .mmds = (MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | - MDIO_DEVS_PHYXS | MDIO_DEVS_WIS), -}; - -static struct cphy *mv88x201x_phy_create(struct net_device *dev, int phy_addr, - const struct mdio_ops *mdio_ops) -{ - u32 val; - struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL); - - if (!cphy) - return NULL; - - cphy_init(cphy, dev, phy_addr, &mv88x201x_ops, mdio_ops); - - /* Commands the PHY to enable XFP's clock. */ - cphy_mdio_read(cphy, MDIO_MMD_PCS, 0x8300, &val); - cphy_mdio_write(cphy, MDIO_MMD_PCS, 0x8300, val | 1); - - /* Clear link status. Required because of a bug in the PHY. */ - cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT2, &val); - cphy_mdio_read(cphy, MDIO_MMD_PCS, MDIO_STAT2, &val); - - /* Allows for Link,Ack LED turn on/off */ - led_init(cphy); - return cphy; -} - -/* Chip Reset */ -static int mv88x201x_phy_reset(adapter_t *adapter) -{ - u32 val; - - t1_tpi_read(adapter, A_ELMER0_GPO, &val); - val &= ~4; - t1_tpi_write(adapter, A_ELMER0_GPO, val); - msleep(100); - - t1_tpi_write(adapter, A_ELMER0_GPO, val | 4); - msleep(1000); - - /* Now lets enable the Laser. Delay 100us */ - t1_tpi_read(adapter, A_ELMER0_GPO, &val); - val |= 0x8000; - t1_tpi_write(adapter, A_ELMER0_GPO, val); - udelay(100); - return 0; -} - -const struct gphy t1_mv88x201x_ops = { - .create = mv88x201x_phy_create, - .reset = mv88x201x_phy_reset -}; diff --git a/drivers/net/chelsio/my3126.c b/drivers/net/chelsio/my3126.c deleted file mode 100644 index a683fd3..0000000 --- a/drivers/net/chelsio/my3126.c +++ /dev/null @@ -1,209 +0,0 @@ -/* $Date: 2005/11/12 02:13:49 $ $RCSfile: my3126.c,v $ $Revision: 1.15 $ */ -#include "cphy.h" -#include "elmer0.h" -#include "suni1x10gexp_regs.h" - -/* Port Reset */ -static int my3126_reset(struct cphy *cphy, int wait) -{ - /* - * This can be done through registers. It is not required since - * a full chip reset is used. - */ - return 0; -} - -static int my3126_interrupt_enable(struct cphy *cphy) -{ - schedule_delayed_work(&cphy->phy_update, HZ/30); - t1_tpi_read(cphy->adapter, A_ELMER0_GPO, &cphy->elmer_gpo); - return 0; -} - -static int my3126_interrupt_disable(struct cphy *cphy) -{ - cancel_delayed_work_sync(&cphy->phy_update); - return 0; -} - -static int my3126_interrupt_clear(struct cphy *cphy) -{ - return 0; -} - -#define OFFSET(REG_ADDR) (REG_ADDR << 2) - -static int my3126_interrupt_handler(struct cphy *cphy) -{ - u32 val; - u16 val16; - u16 status; - u32 act_count; - adapter_t *adapter; - adapter = cphy->adapter; - - if (cphy->count == 50) { - cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val); - val16 = (u16) val; - status = cphy->bmsr ^ val16; - - if (status & MDIO_STAT1_LSTATUS) - t1_link_changed(adapter, 0); - cphy->bmsr = val16; - - /* We have only enabled link change interrupts so it - must be that - */ - cphy->count = 0; - } - - t1_tpi_write(adapter, OFFSET(SUNI1x10GEXP_REG_MSTAT_CONTROL), - SUNI1x10GEXP_BITMSK_MSTAT_SNAP); - t1_tpi_read(adapter, - OFFSET(SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW), &act_count); - t1_tpi_read(adapter, - OFFSET(SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW), &val); - act_count += val; - - /* Populate elmer_gpo with the register value */ - t1_tpi_read(adapter, A_ELMER0_GPO, &val); - cphy->elmer_gpo = val; - - if ( (val & (1 << 8)) || (val & (1 << 19)) || - (cphy->act_count == act_count) || cphy->act_on ) { - if (is_T2(adapter)) - val |= (1 << 9); - else if (t1_is_T1B(adapter)) - val |= (1 << 20); - cphy->act_on = 0; - } else { - if (is_T2(adapter)) - val &= ~(1 << 9); - else if (t1_is_T1B(adapter)) - val &= ~(1 << 20); - cphy->act_on = 1; - } - - t1_tpi_write(adapter, A_ELMER0_GPO, val); - - cphy->elmer_gpo = val; - cphy->act_count = act_count; - cphy->count++; - - return cphy_cause_link_change; -} - -static void my3216_poll(struct work_struct *work) -{ - struct cphy *cphy = container_of(work, struct cphy, phy_update.work); - - my3126_interrupt_handler(cphy); -} - -static int my3126_set_loopback(struct cphy *cphy, int on) -{ - return 0; -} - -/* To check the activity LED */ -static int my3126_get_link_status(struct cphy *cphy, - int *link_ok, int *speed, int *duplex, int *fc) -{ - u32 val; - u16 val16; - adapter_t *adapter; - - adapter = cphy->adapter; - cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val); - val16 = (u16) val; - - /* Populate elmer_gpo with the register value */ - t1_tpi_read(adapter, A_ELMER0_GPO, &val); - cphy->elmer_gpo = val; - - *link_ok = (val16 & MDIO_STAT1_LSTATUS); - - if (*link_ok) { - /* Turn on the LED. */ - if (is_T2(adapter)) - val &= ~(1 << 8); - else if (t1_is_T1B(adapter)) - val &= ~(1 << 19); - } else { - /* Turn off the LED. */ - if (is_T2(adapter)) - val |= (1 << 8); - else if (t1_is_T1B(adapter)) - val |= (1 << 19); - } - - t1_tpi_write(adapter, A_ELMER0_GPO, val); - cphy->elmer_gpo = val; - *speed = SPEED_10000; - *duplex = DUPLEX_FULL; - - /* need to add flow control */ - if (fc) - *fc = PAUSE_RX | PAUSE_TX; - - return 0; -} - -static void my3126_destroy(struct cphy *cphy) -{ - kfree(cphy); -} - -static struct cphy_ops my3126_ops = { - .destroy = my3126_destroy, - .reset = my3126_reset, - .interrupt_enable = my3126_interrupt_enable, - .interrupt_disable = my3126_interrupt_disable, - .interrupt_clear = my3126_interrupt_clear, - .interrupt_handler = my3126_interrupt_handler, - .get_link_status = my3126_get_link_status, - .set_loopback = my3126_set_loopback, - .mmds = (MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | - MDIO_DEVS_PHYXS), -}; - -static struct cphy *my3126_phy_create(struct net_device *dev, - int phy_addr, const struct mdio_ops *mdio_ops) -{ - struct cphy *cphy = kzalloc(sizeof (*cphy), GFP_KERNEL); - - if (!cphy) - return NULL; - - cphy_init(cphy, dev, phy_addr, &my3126_ops, mdio_ops); - INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll); - cphy->bmsr = 0; - - return cphy; -} - -/* Chip Reset */ -static int my3126_phy_reset(adapter_t * adapter) -{ - u32 val; - - t1_tpi_read(adapter, A_ELMER0_GPO, &val); - val &= ~4; - t1_tpi_write(adapter, A_ELMER0_GPO, val); - msleep(100); - - t1_tpi_write(adapter, A_ELMER0_GPO, val | 4); - msleep(1000); - - /* Now lets enable the Laser. Delay 100us */ - t1_tpi_read(adapter, A_ELMER0_GPO, &val); - val |= 0x8000; - t1_tpi_write(adapter, A_ELMER0_GPO, val); - udelay(100); - return 0; -} - -const struct gphy t1_my3126_ops = { - .create = my3126_phy_create, - .reset = my3126_phy_reset -}; diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c deleted file mode 100644 index 40c7b93..0000000 --- a/drivers/net/chelsio/pm3393.c +++ /dev/null @@ -1,796 +0,0 @@ -/***************************************************************************** - * * - * File: pm3393.c * - * $Revision: 1.16 $ * - * $Date: 2005/05/14 00:59:32 $ * - * Description: * - * PMC/SIERRA (pm3393) MAC-PHY functionality. * - * part of the Chelsio 10Gb Ethernet Driver. * - * * - * This program is free software; you can redistribute it and/or modify * - * it under the terms of the GNU General Public License, version 2, as * - * published by the Free Software Foundation. * - * * - * You should have received a copy of the GNU General Public License along * - * with this program; if not, write to the Free Software Foundation, Inc., * - * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * - * * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * - * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * - * * - * http://www.chelsio.com * - * * - * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * - * All rights reserved. * - * * - * Maintainers: maintainers@chelsio.com * - * * - * Authors: Dimitrios Michailidis * - * Tina Yang * - * Felix Marti * - * Scott Bardone * - * Kurt Ottaway * - * Frank DiMambro * - * * - * History: * - * * - ****************************************************************************/ - -#include "common.h" -#include "regs.h" -#include "gmac.h" -#include "elmer0.h" -#include "suni1x10gexp_regs.h" - -#include -#include - -#define OFFSET(REG_ADDR) ((REG_ADDR) << 2) - -/* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */ -#define MAX_FRAME_SIZE 9600 - -#define IPG 12 -#define TXXG_CONF1_VAL ((IPG << SUNI1x10GEXP_BITOFF_TXXG_IPGT) | \ - SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN | SUNI1x10GEXP_BITMSK_TXXG_CRCEN | \ - SUNI1x10GEXP_BITMSK_TXXG_PADEN) -#define RXXG_CONF1_VAL (SUNI1x10GEXP_BITMSK_RXXG_PUREP | 0x14 | \ - SUNI1x10GEXP_BITMSK_RXXG_FLCHK | SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP) - -/* Update statistics every 15 minutes */ -#define STATS_TICK_SECS (15 * 60) - -enum { /* RMON registers */ - RxOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW, - RxUnicastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW, - RxMulticastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW, - RxBroadcastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW, - RxPAUSEMACCtrlFramesReceived = SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW, - RxFrameCheckSequenceErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW, - RxFramesLostDueToInternalMACErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW, - RxSymbolErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW, - RxInRangeLengthErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW, - RxFramesTooLongErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW, - RxJabbers = SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW, - RxFragments = SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW, - RxUndersizedFrames = SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW, - RxJumboFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_25_LOW, - RxJumboOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_26_LOW, - - TxOctetsTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW, - TxFramesLostDueToInternalMACTransmissionError = SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW, - TxTransmitSystemError = SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW, - TxUnicastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW, - TxMulticastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW, - TxBroadcastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW, - TxPAUSEMACCtrlFramesTransmitted = SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW, - TxJumboFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_51_LOW, - TxJumboOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_52_LOW -}; - -struct _cmac_instance { - u8 enabled; - u8 fc; - u8 mac_addr[6]; -}; - -static int pmread(struct cmac *cmac, u32 reg, u32 * data32) -{ - t1_tpi_read(cmac->adapter, OFFSET(reg), data32); - return 0; -} - -static int pmwrite(struct cmac *cmac, u32 reg, u32 data32) -{ - t1_tpi_write(cmac->adapter, OFFSET(reg), data32); - return 0; -} - -/* Port reset. */ -static int pm3393_reset(struct cmac *cmac) -{ - return 0; -} - -/* - * Enable interrupts for the PM3393 - * - * 1. Enable PM3393 BLOCK interrupts. - * 2. Enable PM3393 Master Interrupt bit(INTE) - * 3. Enable ELMER's PM3393 bit. - * 4. Enable Terminator external interrupt. - */ -static int pm3393_interrupt_enable(struct cmac *cmac) -{ - u32 pl_intr; - - /* PM3393 - Enabling all hardware block interrupts. - */ - pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0xffff); - pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0xffff); - pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0xffff); - pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0xffff); - - /* Don't interrupt on statistics overflow, we are polling */ - pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0); - pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0); - pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0); - pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0); - - pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0xffff); - pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0xffff); - pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0xffff); - pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0xffff); - pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0xffff); - pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0xffff); - pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0xffff); - pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0xffff); - pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0xffff); - - /* PM3393 - Global interrupt enable - */ - /* TBD XXX Disable for now until we figure out why error interrupts keep asserting. */ - pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE, - 0 /*SUNI1x10GEXP_BITMSK_TOP_INTE */ ); - - /* TERMINATOR - PL_INTERUPTS_EXT */ - pl_intr = readl(cmac->adapter->regs + A_PL_ENABLE); - pl_intr |= F_PL_INTR_EXT; - writel(pl_intr, cmac->adapter->regs + A_PL_ENABLE); - return 0; -} - -static int pm3393_interrupt_disable(struct cmac *cmac) -{ - u32 elmer; - - /* PM3393 - Enabling HW interrupt blocks. */ - pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0); - pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0); - pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0); - pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0); - pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0); - pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0); - pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0); - pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0); - pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0); - pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0); - pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0); - pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0); - pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0); - pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0); - pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0); - pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0); - pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0); - - /* PM3393 - Global interrupt enable */ - pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE, 0); - - /* ELMER - External chip interrupts. */ - t1_tpi_read(cmac->adapter, A_ELMER0_INT_ENABLE, &elmer); - elmer &= ~ELMER0_GP_BIT1; - t1_tpi_write(cmac->adapter, A_ELMER0_INT_ENABLE, elmer); - - /* TERMINATOR - PL_INTERUPTS_EXT */ - /* DO NOT DISABLE TERMINATOR's EXTERNAL INTERRUPTS. ANOTHER CHIP - * COULD WANT THEM ENABLED. We disable PM3393 at the ELMER level. - */ - - return 0; -} - -static int pm3393_interrupt_clear(struct cmac *cmac) -{ - u32 elmer; - u32 pl_intr; - u32 val32; - - /* PM3393 - Clearing HW interrupt blocks. Note, this assumes - * bit WCIMODE=0 for a clear-on-read. - */ - pmread(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS, &val32); - pmread(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS, &val32); - pmread(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS, &val32); - pmread(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS, &val32); - pmread(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT, &val32); - pmread(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS, &val32); - pmread(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT, &val32); - pmread(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS, &val32); - pmread(cmac, SUNI1x10GEXP_REG_RXXG_INTERRUPT, &val32); - pmread(cmac, SUNI1x10GEXP_REG_TXXG_INTERRUPT, &val32); - pmread(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT, &val32); - pmread(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION, - &val32); - pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS, &val32); - pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE, &val32); - - /* PM3393 - Global interrupt status - */ - pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS, &val32); - - /* ELMER - External chip interrupts. - */ - t1_tpi_read(cmac->adapter, A_ELMER0_INT_CAUSE, &elmer); - elmer |= ELMER0_GP_BIT1; - t1_tpi_write(cmac->adapter, A_ELMER0_INT_CAUSE, elmer); - - /* TERMINATOR - PL_INTERUPTS_EXT - */ - pl_intr = readl(cmac->adapter->regs + A_PL_CAUSE); - pl_intr |= F_PL_INTR_EXT; - writel(pl_intr, cmac->adapter->regs + A_PL_CAUSE); - - return 0; -} - -/* Interrupt handler */ -static int pm3393_interrupt_handler(struct cmac *cmac) -{ - u32 master_intr_status; - - /* Read the master interrupt status register. */ - pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS, - &master_intr_status); - if (netif_msg_intr(cmac->adapter)) - dev_dbg(&cmac->adapter->pdev->dev, "PM3393 intr cause 0x%x\n", - master_intr_status); - - /* TBD XXX Lets just clear everything for now */ - pm3393_interrupt_clear(cmac); - - return 0; -} - -static int pm3393_enable(struct cmac *cmac, int which) -{ - if (which & MAC_DIRECTION_RX) - pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1, - (RXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_RXXG_RXEN)); - - if (which & MAC_DIRECTION_TX) { - u32 val = TXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_TXXG_TXEN0; - - if (cmac->instance->fc & PAUSE_RX) - val |= SUNI1x10GEXP_BITMSK_TXXG_FCRX; - if (cmac->instance->fc & PAUSE_TX) - val |= SUNI1x10GEXP_BITMSK_TXXG_FCTX; - pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, val); - } - - cmac->instance->enabled |= which; - return 0; -} - -static int pm3393_enable_port(struct cmac *cmac, int which) -{ - /* Clear port statistics */ - pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_CONTROL, - SUNI1x10GEXP_BITMSK_MSTAT_CLEAR); - udelay(2); - memset(&cmac->stats, 0, sizeof(struct cmac_statistics)); - - pm3393_enable(cmac, which); - - /* - * XXX This should be done by the PHY and preferably not at all. - * The PHY doesn't give us link status indication on its own so have - * the link management code query it instead. - */ - t1_link_changed(cmac->adapter, 0); - return 0; -} - -static int pm3393_disable(struct cmac *cmac, int which) -{ - if (which & MAC_DIRECTION_RX) - pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1, RXXG_CONF1_VAL); - if (which & MAC_DIRECTION_TX) - pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, TXXG_CONF1_VAL); - - /* - * The disable is graceful. Give the PM3393 time. Can't wait very - * long here, we may be holding locks. - */ - udelay(20); - - cmac->instance->enabled &= ~which; - return 0; -} - -static int pm3393_loopback_enable(struct cmac *cmac) -{ - return 0; -} - -static int pm3393_loopback_disable(struct cmac *cmac) -{ - return 0; -} - -static int pm3393_set_mtu(struct cmac *cmac, int mtu) -{ - int enabled = cmac->instance->enabled; - - /* MAX_FRAME_SIZE includes header + FCS, mtu doesn't */ - mtu += 14 + 4; - if (mtu > MAX_FRAME_SIZE) - return -EINVAL; - - /* Disable Rx/Tx MAC before configuring it. */ - if (enabled) - pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); - - pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH, mtu); - pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE, mtu); - - if (enabled) - pm3393_enable(cmac, enabled); - return 0; -} - -static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm) -{ - int enabled = cmac->instance->enabled & MAC_DIRECTION_RX; - u32 rx_mode; - - /* Disable MAC RX before reconfiguring it */ - if (enabled) - pm3393_disable(cmac, MAC_DIRECTION_RX); - - pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, &rx_mode); - rx_mode &= ~(SUNI1x10GEXP_BITMSK_RXXG_PMODE | - SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN); - pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, - (u16)rx_mode); - - if (t1_rx_mode_promisc(rm)) { - /* Promiscuous mode. */ - rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_PMODE; - } - if (t1_rx_mode_allmulti(rm)) { - /* Accept all multicast. */ - pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, 0xffff); - pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, 0xffff); - pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, 0xffff); - pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, 0xffff); - rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN; - } else if (t1_rx_mode_mc_cnt(rm)) { - /* Accept one or more multicast(s). */ - struct netdev_hw_addr *ha; - int bit; - u16 mc_filter[4] = { 0, }; - - netdev_for_each_mc_addr(ha, t1_get_netdev(rm)) { - /* bit[23:28] */ - bit = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x3f; - mc_filter[bit >> 4] |= 1 << (bit & 0xf); - } - pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]); - pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, mc_filter[1]); - pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, mc_filter[2]); - pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, mc_filter[3]); - rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN; - } - - pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, (u16)rx_mode); - - if (enabled) - pm3393_enable(cmac, MAC_DIRECTION_RX); - - return 0; -} - -static int pm3393_get_speed_duplex_fc(struct cmac *cmac, int *speed, - int *duplex, int *fc) -{ - if (speed) - *speed = SPEED_10000; - if (duplex) - *duplex = DUPLEX_FULL; - if (fc) - *fc = cmac->instance->fc; - return 0; -} - -static int pm3393_set_speed_duplex_fc(struct cmac *cmac, int speed, int duplex, - int fc) -{ - if (speed >= 0 && speed != SPEED_10000) - return -1; - if (duplex >= 0 && duplex != DUPLEX_FULL) - return -1; - if (fc & ~(PAUSE_TX | PAUSE_RX)) - return -1; - - if (fc != cmac->instance->fc) { - cmac->instance->fc = (u8) fc; - if (cmac->instance->enabled & MAC_DIRECTION_TX) - pm3393_enable(cmac, MAC_DIRECTION_TX); - } - return 0; -} - -#define RMON_UPDATE(mac, name, stat_name) \ -{ \ - t1_tpi_read((mac)->adapter, OFFSET(name), &val0); \ - t1_tpi_read((mac)->adapter, OFFSET((name)+1), &val1); \ - t1_tpi_read((mac)->adapter, OFFSET((name)+2), &val2); \ - (mac)->stats.stat_name = (u64)(val0 & 0xffff) | \ - ((u64)(val1 & 0xffff) << 16) | \ - ((u64)(val2 & 0xff) << 32) | \ - ((mac)->stats.stat_name & \ - 0xffffff0000000000ULL); \ - if (ro & \ - (1ULL << ((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2))) \ - (mac)->stats.stat_name += 1ULL << 40; \ -} - -static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac, - int flag) -{ - u64 ro; - u32 val0, val1, val2, val3; - - /* Snap the counters */ - pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL, - SUNI1x10GEXP_BITMSK_MSTAT_SNAP); - - /* Counter rollover, clear on read */ - pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0, &val0); - pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1, &val1); - pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2, &val2); - pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3, &val3); - ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) | - (((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48); - - /* Rx stats */ - RMON_UPDATE(mac, RxOctetsReceivedOK, RxOctetsOK); - RMON_UPDATE(mac, RxUnicastFramesReceivedOK, RxUnicastFramesOK); - RMON_UPDATE(mac, RxMulticastFramesReceivedOK, RxMulticastFramesOK); - RMON_UPDATE(mac, RxBroadcastFramesReceivedOK, RxBroadcastFramesOK); - RMON_UPDATE(mac, RxPAUSEMACCtrlFramesReceived, RxPauseFrames); - RMON_UPDATE(mac, RxFrameCheckSequenceErrors, RxFCSErrors); - RMON_UPDATE(mac, RxFramesLostDueToInternalMACErrors, - RxInternalMACRcvError); - RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors); - RMON_UPDATE(mac, RxInRangeLengthErrors, RxInRangeLengthErrors); - RMON_UPDATE(mac, RxFramesTooLongErrors , RxFrameTooLongErrors); - RMON_UPDATE(mac, RxJabbers, RxJabberErrors); - RMON_UPDATE(mac, RxFragments, RxRuntErrors); - RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors); - RMON_UPDATE(mac, RxJumboFramesReceivedOK, RxJumboFramesOK); - RMON_UPDATE(mac, RxJumboOctetsReceivedOK, RxJumboOctetsOK); - - /* Tx stats */ - RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK); - RMON_UPDATE(mac, TxFramesLostDueToInternalMACTransmissionError, - TxInternalMACXmitError); - RMON_UPDATE(mac, TxTransmitSystemError, TxFCSErrors); - RMON_UPDATE(mac, TxUnicastFramesTransmittedOK, TxUnicastFramesOK); - RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK); - RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK); - RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames); - RMON_UPDATE(mac, TxJumboFramesReceivedOK, TxJumboFramesOK); - RMON_UPDATE(mac, TxJumboOctetsReceivedOK, TxJumboOctetsOK); - - return &mac->stats; -} - -static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6]) -{ - memcpy(mac_addr, cmac->instance->mac_addr, 6); - return 0; -} - -static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6]) -{ - u32 val, lo, mid, hi, enabled = cmac->instance->enabled; - - /* - * MAC addr: 00:07:43:00:13:09 - * - * ma[5] = 0x09 - * ma[4] = 0x13 - * ma[3] = 0x00 - * ma[2] = 0x43 - * ma[1] = 0x07 - * ma[0] = 0x00 - * - * The PM3393 requires byte swapping and reverse order entry - * when programming MAC addresses: - * - * low_bits[15:0] = ma[1]:ma[0] - * mid_bits[31:16] = ma[3]:ma[2] - * high_bits[47:32] = ma[5]:ma[4] - */ - - /* Store local copy */ - memcpy(cmac->instance->mac_addr, ma, 6); - - lo = ((u32) ma[1] << 8) | (u32) ma[0]; - mid = ((u32) ma[3] << 8) | (u32) ma[2]; - hi = ((u32) ma[5] << 8) | (u32) ma[4]; - - /* Disable Rx/Tx MAC before configuring it. */ - if (enabled) - pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); - - /* Set RXXG Station Address */ - pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_15_0, lo); - pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_31_16, mid); - pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_47_32, hi); - - /* Set TXXG Station Address */ - pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_15_0, lo); - pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_31_16, mid); - pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_47_32, hi); - - /* Setup Exact Match Filter 1 with our MAC address - * - * Must disable exact match filter before configuring it. - */ - pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, &val); - val &= 0xff0f; - pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val); - - pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW, lo); - pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID, mid); - pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH, hi); - - val |= 0x0090; - pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val); - - if (enabled) - pm3393_enable(cmac, enabled); - return 0; -} - -static void pm3393_destroy(struct cmac *cmac) -{ - kfree(cmac); -} - -static struct cmac_ops pm3393_ops = { - .destroy = pm3393_destroy, - .reset = pm3393_reset, - .interrupt_enable = pm3393_interrupt_enable, - .interrupt_disable = pm3393_interrupt_disable, - .interrupt_clear = pm3393_interrupt_clear, - .interrupt_handler = pm3393_interrupt_handler, - .enable = pm3393_enable_port, - .disable = pm3393_disable, - .loopback_enable = pm3393_loopback_enable, - .loopback_disable = pm3393_loopback_disable, - .set_mtu = pm3393_set_mtu, - .set_rx_mode = pm3393_set_rx_mode, - .get_speed_duplex_fc = pm3393_get_speed_duplex_fc, - .set_speed_duplex_fc = pm3393_set_speed_duplex_fc, - .statistics_update = pm3393_update_statistics, - .macaddress_get = pm3393_macaddress_get, - .macaddress_set = pm3393_macaddress_set -}; - -static struct cmac *pm3393_mac_create(adapter_t *adapter, int index) -{ - struct cmac *cmac; - - cmac = kzalloc(sizeof(*cmac) + sizeof(cmac_instance), GFP_KERNEL); - if (!cmac) - return NULL; - - cmac->ops = &pm3393_ops; - cmac->instance = (cmac_instance *) (cmac + 1); - cmac->adapter = adapter; - cmac->instance->fc = PAUSE_TX | PAUSE_RX; - - t1_tpi_write(adapter, OFFSET(0x0001), 0x00008000); - t1_tpi_write(adapter, OFFSET(0x0001), 0x00000000); - t1_tpi_write(adapter, OFFSET(0x2308), 0x00009800); - t1_tpi_write(adapter, OFFSET(0x2305), 0x00001001); /* PL4IO Enable */ - t1_tpi_write(adapter, OFFSET(0x2320), 0x00008800); - t1_tpi_write(adapter, OFFSET(0x2321), 0x00008800); - t1_tpi_write(adapter, OFFSET(0x2322), 0x00008800); - t1_tpi_write(adapter, OFFSET(0x2323), 0x00008800); - t1_tpi_write(adapter, OFFSET(0x2324), 0x00008800); - t1_tpi_write(adapter, OFFSET(0x2325), 0x00008800); - t1_tpi_write(adapter, OFFSET(0x2326), 0x00008800); - t1_tpi_write(adapter, OFFSET(0x2327), 0x00008800); - t1_tpi_write(adapter, OFFSET(0x2328), 0x00008800); - t1_tpi_write(adapter, OFFSET(0x2329), 0x00008800); - t1_tpi_write(adapter, OFFSET(0x232a), 0x00008800); - t1_tpi_write(adapter, OFFSET(0x232b), 0x00008800); - t1_tpi_write(adapter, OFFSET(0x232c), 0x00008800); - t1_tpi_write(adapter, OFFSET(0x232d), 0x00008800); - t1_tpi_write(adapter, OFFSET(0x232e), 0x00008800); - t1_tpi_write(adapter, OFFSET(0x232f), 0x00008800); - t1_tpi_write(adapter, OFFSET(0x230d), 0x00009c00); - t1_tpi_write(adapter, OFFSET(0x2304), 0x00000202); /* PL4IO Calendar Repetitions */ - - t1_tpi_write(adapter, OFFSET(0x3200), 0x00008080); /* EFLX Enable */ - t1_tpi_write(adapter, OFFSET(0x3210), 0x00000000); /* EFLX Channel Deprovision */ - t1_tpi_write(adapter, OFFSET(0x3203), 0x00000000); /* EFLX Low Limit */ - t1_tpi_write(adapter, OFFSET(0x3204), 0x00000040); /* EFLX High Limit */ - t1_tpi_write(adapter, OFFSET(0x3205), 0x000002cc); /* EFLX Almost Full */ - t1_tpi_write(adapter, OFFSET(0x3206), 0x00000199); /* EFLX Almost Empty */ - t1_tpi_write(adapter, OFFSET(0x3207), 0x00000240); /* EFLX Cut Through Threshold */ - t1_tpi_write(adapter, OFFSET(0x3202), 0x00000000); /* EFLX Indirect Register Update */ - t1_tpi_write(adapter, OFFSET(0x3210), 0x00000001); /* EFLX Channel Provision */ - t1_tpi_write(adapter, OFFSET(0x3208), 0x0000ffff); /* EFLX Undocumented */ - t1_tpi_write(adapter, OFFSET(0x320a), 0x0000ffff); /* EFLX Undocumented */ - t1_tpi_write(adapter, OFFSET(0x320c), 0x0000ffff); /* EFLX enable overflow interrupt The other bit are undocumented */ - t1_tpi_write(adapter, OFFSET(0x320e), 0x0000ffff); /* EFLX Undocumented */ - - t1_tpi_write(adapter, OFFSET(0x2200), 0x0000c000); /* IFLX Configuration - enable */ - t1_tpi_write(adapter, OFFSET(0x2201), 0x00000000); /* IFLX Channel Deprovision */ - t1_tpi_write(adapter, OFFSET(0x220e), 0x00000000); /* IFLX Low Limit */ - t1_tpi_write(adapter, OFFSET(0x220f), 0x00000100); /* IFLX High Limit */ - t1_tpi_write(adapter, OFFSET(0x2210), 0x00000c00); /* IFLX Almost Full Limit */ - t1_tpi_write(adapter, OFFSET(0x2211), 0x00000599); /* IFLX Almost Empty Limit */ - t1_tpi_write(adapter, OFFSET(0x220d), 0x00000000); /* IFLX Indirect Register Update */ - t1_tpi_write(adapter, OFFSET(0x2201), 0x00000001); /* IFLX Channel Provision */ - t1_tpi_write(adapter, OFFSET(0x2203), 0x0000ffff); /* IFLX Undocumented */ - t1_tpi_write(adapter, OFFSET(0x2205), 0x0000ffff); /* IFLX Undocumented */ - t1_tpi_write(adapter, OFFSET(0x2209), 0x0000ffff); /* IFLX Enable overflow interrupt. The other bit are undocumented */ - - t1_tpi_write(adapter, OFFSET(0x2241), 0xfffffffe); /* PL4MOS Undocumented */ - t1_tpi_write(adapter, OFFSET(0x2242), 0x0000ffff); /* PL4MOS Undocumented */ - t1_tpi_write(adapter, OFFSET(0x2243), 0x00000008); /* PL4MOS Starving Burst Size */ - t1_tpi_write(adapter, OFFSET(0x2244), 0x00000008); /* PL4MOS Hungry Burst Size */ - t1_tpi_write(adapter, OFFSET(0x2245), 0x00000008); /* PL4MOS Transfer Size */ - t1_tpi_write(adapter, OFFSET(0x2240), 0x00000005); /* PL4MOS Disable */ - - t1_tpi_write(adapter, OFFSET(0x2280), 0x00002103); /* PL4ODP Training Repeat and SOP rule */ - t1_tpi_write(adapter, OFFSET(0x2284), 0x00000000); /* PL4ODP MAX_T setting */ - - t1_tpi_write(adapter, OFFSET(0x3280), 0x00000087); /* PL4IDU Enable data forward, port state machine. Set ALLOW_NON_ZERO_OLB */ - t1_tpi_write(adapter, OFFSET(0x3282), 0x0000001f); /* PL4IDU Enable Dip4 check error interrupts */ - - t1_tpi_write(adapter, OFFSET(0x3040), 0x0c32); /* # TXXG Config */ - /* For T1 use timer based Mac flow control. */ - t1_tpi_write(adapter, OFFSET(0x304d), 0x8000); - t1_tpi_write(adapter, OFFSET(0x2040), 0x059c); /* # RXXG Config */ - t1_tpi_write(adapter, OFFSET(0x2049), 0x0001); /* # RXXG Cut Through */ - t1_tpi_write(adapter, OFFSET(0x2070), 0x0000); /* # Disable promiscuous mode */ - - /* Setup Exact Match Filter 0 to allow broadcast packets. - */ - t1_tpi_write(adapter, OFFSET(0x206e), 0x0000); /* # Disable Match Enable bit */ - t1_tpi_write(adapter, OFFSET(0x204a), 0xffff); /* # low addr */ - t1_tpi_write(adapter, OFFSET(0x204b), 0xffff); /* # mid addr */ - t1_tpi_write(adapter, OFFSET(0x204c), 0xffff); /* # high addr */ - t1_tpi_write(adapter, OFFSET(0x206e), 0x0009); /* # Enable Match Enable bit */ - - t1_tpi_write(adapter, OFFSET(0x0003), 0x0000); /* # NO SOP/ PAD_EN setup */ - t1_tpi_write(adapter, OFFSET(0x0100), 0x0ff0); /* # RXEQB disabled */ - t1_tpi_write(adapter, OFFSET(0x0101), 0x0f0f); /* # No Preemphasis */ - - return cmac; -} - -static int pm3393_mac_reset(adapter_t * adapter) -{ - u32 val; - u32 x; - u32 is_pl4_reset_finished; - u32 is_pl4_outof_lock; - u32 is_xaui_mabc_pll_locked; - u32 successful_reset; - int i; - - /* The following steps are required to properly reset - * the PM3393. This information is provided in the - * PM3393 datasheet (Issue 2: November 2002) - * section 13.1 -- Device Reset. - * - * The PM3393 has three types of components that are - * individually reset: - * - * DRESETB - Digital circuitry - * PL4_ARESETB - PL4 analog circuitry - * XAUI_ARESETB - XAUI bus analog circuitry - * - * Steps to reset PM3393 using RSTB pin: - * - * 1. Assert RSTB pin low ( write 0 ) - * 2. Wait at least 1ms to initiate a complete initialization of device. - * 3. Wait until all external clocks and REFSEL are stable. - * 4. Wait minimum of 1ms. (after external clocks and REFEL are stable) - * 5. De-assert RSTB ( write 1 ) - * 6. Wait until internal timers to expires after ~14ms. - * - Allows analog clock synthesizer(PL4CSU) to stabilize to - * selected reference frequency before allowing the digital - * portion of the device to operate. - * 7. Wait at least 200us for XAUI interface to stabilize. - * 8. Verify the PM3393 came out of reset successfully. - * Set successful reset flag if everything worked else try again - * a few more times. - */ - - successful_reset = 0; - for (i = 0; i < 3 && !successful_reset; i++) { - /* 1 */ - t1_tpi_read(adapter, A_ELMER0_GPO, &val); - val &= ~1; - t1_tpi_write(adapter, A_ELMER0_GPO, val); - - /* 2 */ - msleep(1); - - /* 3 */ - msleep(1); - - /* 4 */ - msleep(2 /*1 extra ms for safety */ ); - - /* 5 */ - val |= 1; - t1_tpi_write(adapter, A_ELMER0_GPO, val); - - /* 6 */ - msleep(15 /*1 extra ms for safety */ ); - - /* 7 */ - msleep(1); - - /* 8 */ - - /* Has PL4 analog block come out of reset correctly? */ - t1_tpi_read(adapter, OFFSET(SUNI1x10GEXP_REG_DEVICE_STATUS), &val); - is_pl4_reset_finished = (val & SUNI1x10GEXP_BITMSK_TOP_EXPIRED); - - /* TBD XXX SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL gets locked later in the init sequence - * figure out why? */ - - /* Have all PL4 block clocks locked? */ - x = (SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL - /*| SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL */ | - SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL | - SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL | - SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL); - is_pl4_outof_lock = (val & x); - - /* ??? If this fails, might be able to software reset the XAUI part - * and try to recover... thus saving us from doing another HW reset */ - /* Has the XAUI MABC PLL circuitry stablized? */ - is_xaui_mabc_pll_locked = - (val & SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED); - - successful_reset = (is_pl4_reset_finished && !is_pl4_outof_lock - && is_xaui_mabc_pll_locked); - - if (netif_msg_hw(adapter)) - dev_dbg(&adapter->pdev->dev, - "PM3393 HW reset %d: pl4_reset 0x%x, val 0x%x, " - "is_pl4_outof_lock 0x%x, xaui_locked 0x%x\n", - i, is_pl4_reset_finished, val, - is_pl4_outof_lock, is_xaui_mabc_pll_locked); - } - return successful_reset ? 0 : 1; -} - -const struct gmac t1_pm3393_ops = { - .stats_update_period = STATS_TICK_SECS, - .create = pm3393_mac_create, - .reset = pm3393_mac_reset, -}; diff --git a/drivers/net/chelsio/regs.h b/drivers/net/chelsio/regs.h deleted file mode 100644 index c80bf4d..0000000 --- a/drivers/net/chelsio/regs.h +++ /dev/null @@ -1,2168 +0,0 @@ -/***************************************************************************** - * * - * File: regs.h * - * $Revision: 1.8 $ * - * $Date: 2005/06/21 18:29:48 $ * - * Description: * - * part of the Chelsio 10Gb Ethernet Driver. * - * * - * This program is free software; you can redistribute it and/or modify * - * it under the terms of the GNU General Public License, version 2, as * - * published by the Free Software Foundation. * - * * - * You should have received a copy of the GNU General Public License along * - * with this program; if not, write to the Free Software Foundation, Inc., * - * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * - * * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * - * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * - * * - * http://www.chelsio.com * - * * - * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * - * All rights reserved. * - * * - * Maintainers: maintainers@chelsio.com * - * * - * Authors: Dimitrios Michailidis * - * Tina Yang * - * Felix Marti * - * Scott Bardone * - * Kurt Ottaway * - * Frank DiMambro * - * * - * History: * - * * - ****************************************************************************/ - -#ifndef _CXGB_REGS_H_ -#define _CXGB_REGS_H_ - -/* SGE registers */ -#define A_SG_CONTROL 0x0 - -#define S_CMDQ0_ENABLE 0 -#define V_CMDQ0_ENABLE(x) ((x) << S_CMDQ0_ENABLE) -#define F_CMDQ0_ENABLE V_CMDQ0_ENABLE(1U) - -#define S_CMDQ1_ENABLE 1 -#define V_CMDQ1_ENABLE(x) ((x) << S_CMDQ1_ENABLE) -#define F_CMDQ1_ENABLE V_CMDQ1_ENABLE(1U) - -#define S_FL0_ENABLE 2 -#define V_FL0_ENABLE(x) ((x) << S_FL0_ENABLE) -#define F_FL0_ENABLE V_FL0_ENABLE(1U) - -#define S_FL1_ENABLE 3 -#define V_FL1_ENABLE(x) ((x) << S_FL1_ENABLE) -#define F_FL1_ENABLE V_FL1_ENABLE(1U) - -#define S_CPL_ENABLE 4 -#define V_CPL_ENABLE(x) ((x) << S_CPL_ENABLE) -#define F_CPL_ENABLE V_CPL_ENABLE(1U) - -#define S_RESPONSE_QUEUE_ENABLE 5 -#define V_RESPONSE_QUEUE_ENABLE(x) ((x) << S_RESPONSE_QUEUE_ENABLE) -#define F_RESPONSE_QUEUE_ENABLE V_RESPONSE_QUEUE_ENABLE(1U) - -#define S_CMDQ_PRIORITY 6 -#define M_CMDQ_PRIORITY 0x3 -#define V_CMDQ_PRIORITY(x) ((x) << S_CMDQ_PRIORITY) -#define G_CMDQ_PRIORITY(x) (((x) >> S_CMDQ_PRIORITY) & M_CMDQ_PRIORITY) - -#define S_DISABLE_CMDQ0_GTS 8 -#define V_DISABLE_CMDQ0_GTS(x) ((x) << S_DISABLE_CMDQ0_GTS) -#define F_DISABLE_CMDQ0_GTS V_DISABLE_CMDQ0_GTS(1U) - -#define S_DISABLE_CMDQ1_GTS 9 -#define V_DISABLE_CMDQ1_GTS(x) ((x) << S_DISABLE_CMDQ1_GTS) -#define F_DISABLE_CMDQ1_GTS V_DISABLE_CMDQ1_GTS(1U) - -#define S_DISABLE_FL0_GTS 10 -#define V_DISABLE_FL0_GTS(x) ((x) << S_DISABLE_FL0_GTS) -#define F_DISABLE_FL0_GTS V_DISABLE_FL0_GTS(1U) - -#define S_DISABLE_FL1_GTS 11 -#define V_DISABLE_FL1_GTS(x) ((x) << S_DISABLE_FL1_GTS) -#define F_DISABLE_FL1_GTS V_DISABLE_FL1_GTS(1U) - -#define S_ENABLE_BIG_ENDIAN 12 -#define V_ENABLE_BIG_ENDIAN(x) ((x) << S_ENABLE_BIG_ENDIAN) -#define F_ENABLE_BIG_ENDIAN V_ENABLE_BIG_ENDIAN(1U) - -#define S_FL_SELECTION_CRITERIA 13 -#define V_FL_SELECTION_CRITERIA(x) ((x) << S_FL_SELECTION_CRITERIA) -#define F_FL_SELECTION_CRITERIA V_FL_SELECTION_CRITERIA(1U) - -#define S_ISCSI_COALESCE 14 -#define V_ISCSI_COALESCE(x) ((x) << S_ISCSI_COALESCE) -#define F_ISCSI_COALESCE V_ISCSI_COALESCE(1U) - -#define S_RX_PKT_OFFSET 15 -#define M_RX_PKT_OFFSET 0x7 -#define V_RX_PKT_OFFSET(x) ((x) << S_RX_PKT_OFFSET) -#define G_RX_PKT_OFFSET(x) (((x) >> S_RX_PKT_OFFSET) & M_RX_PKT_OFFSET) - -#define S_VLAN_XTRACT 18 -#define V_VLAN_XTRACT(x) ((x) << S_VLAN_XTRACT) -#define F_VLAN_XTRACT V_VLAN_XTRACT(1U) - -#define A_SG_DOORBELL 0x4 -#define A_SG_CMD0BASELWR 0x8 -#define A_SG_CMD0BASEUPR 0xc -#define A_SG_CMD1BASELWR 0x10 -#define A_SG_CMD1BASEUPR 0x14 -#define A_SG_FL0BASELWR 0x18 -#define A_SG_FL0BASEUPR 0x1c -#define A_SG_FL1BASELWR 0x20 -#define A_SG_FL1BASEUPR 0x24 -#define A_SG_CMD0SIZE 0x28 - -#define S_CMDQ0_SIZE 0 -#define M_CMDQ0_SIZE 0x1ffff -#define V_CMDQ0_SIZE(x) ((x) << S_CMDQ0_SIZE) -#define G_CMDQ0_SIZE(x) (((x) >> S_CMDQ0_SIZE) & M_CMDQ0_SIZE) - -#define A_SG_FL0SIZE 0x2c - -#define S_FL0_SIZE 0 -#define M_FL0_SIZE 0x1ffff -#define V_FL0_SIZE(x) ((x) << S_FL0_SIZE) -#define G_FL0_SIZE(x) (((x) >> S_FL0_SIZE) & M_FL0_SIZE) - -#define A_SG_RSPSIZE 0x30 - -#define S_RESPQ_SIZE 0 -#define M_RESPQ_SIZE 0x1ffff -#define V_RESPQ_SIZE(x) ((x) << S_RESPQ_SIZE) -#define G_RESPQ_SIZE(x) (((x) >> S_RESPQ_SIZE) & M_RESPQ_SIZE) - -#define A_SG_RSPBASELWR 0x34 -#define A_SG_RSPBASEUPR 0x38 -#define A_SG_FLTHRESHOLD 0x3c - -#define S_FL_THRESHOLD 0 -#define M_FL_THRESHOLD 0xffff -#define V_FL_THRESHOLD(x) ((x) << S_FL_THRESHOLD) -#define G_FL_THRESHOLD(x) (((x) >> S_FL_THRESHOLD) & M_FL_THRESHOLD) - -#define A_SG_RSPQUEUECREDIT 0x40 - -#define S_RESPQ_CREDIT 0 -#define M_RESPQ_CREDIT 0x1ffff -#define V_RESPQ_CREDIT(x) ((x) << S_RESPQ_CREDIT) -#define G_RESPQ_CREDIT(x) (((x) >> S_RESPQ_CREDIT) & M_RESPQ_CREDIT) - -#define A_SG_SLEEPING 0x48 - -#define S_SLEEPING 0 -#define M_SLEEPING 0xffff -#define V_SLEEPING(x) ((x) << S_SLEEPING) -#define G_SLEEPING(x) (((x) >> S_SLEEPING) & M_SLEEPING) - -#define A_SG_INTRTIMER 0x4c - -#define S_INTERRUPT_TIMER_COUNT 0 -#define M_INTERRUPT_TIMER_COUNT 0xffffff -#define V_INTERRUPT_TIMER_COUNT(x) ((x) << S_INTERRUPT_TIMER_COUNT) -#define G_INTERRUPT_TIMER_COUNT(x) (((x) >> S_INTERRUPT_TIMER_COUNT) & M_INTERRUPT_TIMER_COUNT) - -#define A_SG_CMD0PTR 0x50 - -#define S_CMDQ0_POINTER 0 -#define M_CMDQ0_POINTER 0xffff -#define V_CMDQ0_POINTER(x) ((x) << S_CMDQ0_POINTER) -#define G_CMDQ0_POINTER(x) (((x) >> S_CMDQ0_POINTER) & M_CMDQ0_POINTER) - -#define S_CURRENT_GENERATION_BIT 16 -#define V_CURRENT_GENERATION_BIT(x) ((x) << S_CURRENT_GENERATION_BIT) -#define F_CURRENT_GENERATION_BIT V_CURRENT_GENERATION_BIT(1U) - -#define A_SG_CMD1PTR 0x54 - -#define S_CMDQ1_POINTER 0 -#define M_CMDQ1_POINTER 0xffff -#define V_CMDQ1_POINTER(x) ((x) << S_CMDQ1_POINTER) -#define G_CMDQ1_POINTER(x) (((x) >> S_CMDQ1_POINTER) & M_CMDQ1_POINTER) - -#define A_SG_FL0PTR 0x58 - -#define S_FL0_POINTER 0 -#define M_FL0_POINTER 0xffff -#define V_FL0_POINTER(x) ((x) << S_FL0_POINTER) -#define G_FL0_POINTER(x) (((x) >> S_FL0_POINTER) & M_FL0_POINTER) - -#define A_SG_FL1PTR 0x5c - -#define S_FL1_POINTER 0 -#define M_FL1_POINTER 0xffff -#define V_FL1_POINTER(x) ((x) << S_FL1_POINTER) -#define G_FL1_POINTER(x) (((x) >> S_FL1_POINTER) & M_FL1_POINTER) - -#define A_SG_VERSION 0x6c - -#define S_DAY 0 -#define M_DAY 0x1f -#define V_DAY(x) ((x) << S_DAY) -#define G_DAY(x) (((x) >> S_DAY) & M_DAY) - -#define S_MONTH 5 -#define M_MONTH 0xf -#define V_MONTH(x) ((x) << S_MONTH) -#define G_MONTH(x) (((x) >> S_MONTH) & M_MONTH) - -#define A_SG_CMD1SIZE 0xb0 - -#define S_CMDQ1_SIZE 0 -#define M_CMDQ1_SIZE 0x1ffff -#define V_CMDQ1_SIZE(x) ((x) << S_CMDQ1_SIZE) -#define G_CMDQ1_SIZE(x) (((x) >> S_CMDQ1_SIZE) & M_CMDQ1_SIZE) - -#define A_SG_FL1SIZE 0xb4 - -#define S_FL1_SIZE 0 -#define M_FL1_SIZE 0x1ffff -#define V_FL1_SIZE(x) ((x) << S_FL1_SIZE) -#define G_FL1_SIZE(x) (((x) >> S_FL1_SIZE) & M_FL1_SIZE) - -#define A_SG_INT_ENABLE 0xb8 - -#define S_RESPQ_EXHAUSTED 0 -#define V_RESPQ_EXHAUSTED(x) ((x) << S_RESPQ_EXHAUSTED) -#define F_RESPQ_EXHAUSTED V_RESPQ_EXHAUSTED(1U) - -#define S_RESPQ_OVERFLOW 1 -#define V_RESPQ_OVERFLOW(x) ((x) << S_RESPQ_OVERFLOW) -#define F_RESPQ_OVERFLOW V_RESPQ_OVERFLOW(1U) - -#define S_FL_EXHAUSTED 2 -#define V_FL_EXHAUSTED(x) ((x) << S_FL_EXHAUSTED) -#define F_FL_EXHAUSTED V_FL_EXHAUSTED(1U) - -#define S_PACKET_TOO_BIG 3 -#define V_PACKET_TOO_BIG(x) ((x) << S_PACKET_TOO_BIG) -#define F_PACKET_TOO_BIG V_PACKET_TOO_BIG(1U) - -#define S_PACKET_MISMATCH 4 -#define V_PACKET_MISMATCH(x) ((x) << S_PACKET_MISMATCH) -#define F_PACKET_MISMATCH V_PACKET_MISMATCH(1U) - -#define A_SG_INT_CAUSE 0xbc -#define A_SG_RESPACCUTIMER 0xc0 - -/* MC3 registers */ -#define A_MC3_CFG 0x100 - -#define S_CLK_ENABLE 0 -#define V_CLK_ENABLE(x) ((x) << S_CLK_ENABLE) -#define F_CLK_ENABLE V_CLK_ENABLE(1U) - -#define S_READY 1 -#define V_READY(x) ((x) << S_READY) -#define F_READY V_READY(1U) - -#define S_READ_TO_WRITE_DELAY 2 -#define M_READ_TO_WRITE_DELAY 0x7 -#define V_READ_TO_WRITE_DELAY(x) ((x) << S_READ_TO_WRITE_DELAY) -#define G_READ_TO_WRITE_DELAY(x) (((x) >> S_READ_TO_WRITE_DELAY) & M_READ_TO_WRITE_DELAY) - -#define S_WRITE_TO_READ_DELAY 5 -#define M_WRITE_TO_READ_DELAY 0x7 -#define V_WRITE_TO_READ_DELAY(x) ((x) << S_WRITE_TO_READ_DELAY) -#define G_WRITE_TO_READ_DELAY(x) (((x) >> S_WRITE_TO_READ_DELAY) & M_WRITE_TO_READ_DELAY) - -#define S_MC3_BANK_CYCLE 8 -#define M_MC3_BANK_CYCLE 0xf -#define V_MC3_BANK_CYCLE(x) ((x) << S_MC3_BANK_CYCLE) -#define G_MC3_BANK_CYCLE(x) (((x) >> S_MC3_BANK_CYCLE) & M_MC3_BANK_CYCLE) - -#define S_REFRESH_CYCLE 12 -#define M_REFRESH_CYCLE 0xf -#define V_REFRESH_CYCLE(x) ((x) << S_REFRESH_CYCLE) -#define G_REFRESH_CYCLE(x) (((x) >> S_REFRESH_CYCLE) & M_REFRESH_CYCLE) - -#define S_PRECHARGE_CYCLE 16 -#define M_PRECHARGE_CYCLE 0x3 -#define V_PRECHARGE_CYCLE(x) ((x) << S_PRECHARGE_CYCLE) -#define G_PRECHARGE_CYCLE(x) (((x) >> S_PRECHARGE_CYCLE) & M_PRECHARGE_CYCLE) - -#define S_ACTIVE_TO_READ_WRITE_DELAY 18 -#define V_ACTIVE_TO_READ_WRITE_DELAY(x) ((x) << S_ACTIVE_TO_READ_WRITE_DELAY) -#define F_ACTIVE_TO_READ_WRITE_DELAY V_ACTIVE_TO_READ_WRITE_DELAY(1U) - -#define S_ACTIVE_TO_PRECHARGE_DELAY 19 -#define M_ACTIVE_TO_PRECHARGE_DELAY 0x7 -#define V_ACTIVE_TO_PRECHARGE_DELAY(x) ((x) << S_ACTIVE_TO_PRECHARGE_DELAY) -#define G_ACTIVE_TO_PRECHARGE_DELAY(x) (((x) >> S_ACTIVE_TO_PRECHARGE_DELAY) & M_ACTIVE_TO_PRECHARGE_DELAY) - -#define S_WRITE_RECOVERY_DELAY 22 -#define M_WRITE_RECOVERY_DELAY 0x3 -#define V_WRITE_RECOVERY_DELAY(x) ((x) << S_WRITE_RECOVERY_DELAY) -#define G_WRITE_RECOVERY_DELAY(x) (((x) >> S_WRITE_RECOVERY_DELAY) & M_WRITE_RECOVERY_DELAY) - -#define S_DENSITY 24 -#define M_DENSITY 0x3 -#define V_DENSITY(x) ((x) << S_DENSITY) -#define G_DENSITY(x) (((x) >> S_DENSITY) & M_DENSITY) - -#define S_ORGANIZATION 26 -#define V_ORGANIZATION(x) ((x) << S_ORGANIZATION) -#define F_ORGANIZATION V_ORGANIZATION(1U) - -#define S_BANKS 27 -#define V_BANKS(x) ((x) << S_BANKS) -#define F_BANKS V_BANKS(1U) - -#define S_UNREGISTERED 28 -#define V_UNREGISTERED(x) ((x) << S_UNREGISTERED) -#define F_UNREGISTERED V_UNREGISTERED(1U) - -#define S_MC3_WIDTH 29 -#define M_MC3_WIDTH 0x3 -#define V_MC3_WIDTH(x) ((x) << S_MC3_WIDTH) -#define G_MC3_WIDTH(x) (((x) >> S_MC3_WIDTH) & M_MC3_WIDTH) - -#define S_MC3_SLOW 31 -#define V_MC3_SLOW(x) ((x) << S_MC3_SLOW) -#define F_MC3_SLOW V_MC3_SLOW(1U) - -#define A_MC3_MODE 0x104 - -#define S_MC3_MODE 0 -#define M_MC3_MODE 0x3fff -#define V_MC3_MODE(x) ((x) << S_MC3_MODE) -#define G_MC3_MODE(x) (((x) >> S_MC3_MODE) & M_MC3_MODE) - -#define S_BUSY 31 -#define V_BUSY(x) ((x) << S_BUSY) -#define F_BUSY V_BUSY(1U) - -#define A_MC3_EXT_MODE 0x108 - -#define S_MC3_EXTENDED_MODE 0 -#define M_MC3_EXTENDED_MODE 0x3fff -#define V_MC3_EXTENDED_MODE(x) ((x) << S_MC3_EXTENDED_MODE) -#define G_MC3_EXTENDED_MODE(x) (((x) >> S_MC3_EXTENDED_MODE) & M_MC3_EXTENDED_MODE) - -#define A_MC3_PRECHARG 0x10c -#define A_MC3_REFRESH 0x110 - -#define S_REFRESH_ENABLE 0 -#define V_REFRESH_ENABLE(x) ((x) << S_REFRESH_ENABLE) -#define F_REFRESH_ENABLE V_REFRESH_ENABLE(1U) - -#define S_REFRESH_DIVISOR 1 -#define M_REFRESH_DIVISOR 0x3fff -#define V_REFRESH_DIVISOR(x) ((x) << S_REFRESH_DIVISOR) -#define G_REFRESH_DIVISOR(x) (((x) >> S_REFRESH_DIVISOR) & M_REFRESH_DIVISOR) - -#define A_MC3_STROBE 0x114 - -#define S_MASTER_DLL_RESET 0 -#define V_MASTER_DLL_RESET(x) ((x) << S_MASTER_DLL_RESET) -#define F_MASTER_DLL_RESET V_MASTER_DLL_RESET(1U) - -#define S_MASTER_DLL_TAP_COUNT 1 -#define M_MASTER_DLL_TAP_COUNT 0xff -#define V_MASTER_DLL_TAP_COUNT(x) ((x) << S_MASTER_DLL_TAP_COUNT) -#define G_MASTER_DLL_TAP_COUNT(x) (((x) >> S_MASTER_DLL_TAP_COUNT) & M_MASTER_DLL_TAP_COUNT) - -#define S_MASTER_DLL_LOCKED 9 -#define V_MASTER_DLL_LOCKED(x) ((x) << S_MASTER_DLL_LOCKED) -#define F_MASTER_DLL_LOCKED V_MASTER_DLL_LOCKED(1U) - -#define S_MASTER_DLL_MAX_TAP_COUNT 10 -#define V_MASTER_DLL_MAX_TAP_COUNT(x) ((x) << S_MASTER_DLL_MAX_TAP_COUNT) -#define F_MASTER_DLL_MAX_TAP_COUNT V_MASTER_DLL_MAX_TAP_COUNT(1U) - -#define S_MASTER_DLL_TAP_COUNT_OFFSET 11 -#define M_MASTER_DLL_TAP_COUNT_OFFSET 0x3f -#define V_MASTER_DLL_TAP_COUNT_OFFSET(x) ((x) << S_MASTER_DLL_TAP_COUNT_OFFSET) -#define G_MASTER_DLL_TAP_COUNT_OFFSET(x) (((x) >> S_MASTER_DLL_TAP_COUNT_OFFSET) & M_MASTER_DLL_TAP_COUNT_OFFSET) - -#define S_SLAVE_DLL_RESET 11 -#define V_SLAVE_DLL_RESET(x) ((x) << S_SLAVE_DLL_RESET) -#define F_SLAVE_DLL_RESET V_SLAVE_DLL_RESET(1U) - -#define S_SLAVE_DLL_DELTA 12 -#define M_SLAVE_DLL_DELTA 0xf -#define V_SLAVE_DLL_DELTA(x) ((x) << S_SLAVE_DLL_DELTA) -#define G_SLAVE_DLL_DELTA(x) (((x) >> S_SLAVE_DLL_DELTA) & M_SLAVE_DLL_DELTA) - -#define S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT 17 -#define M_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT 0x3f -#define V_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT(x) ((x) << S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT) -#define G_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT(x) (((x) >> S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT) & M_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT) - -#define S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE 23 -#define V_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE(x) ((x) << S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE) -#define F_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE V_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE(1U) - -#define S_SLAVE_DELAY_LINE_TAP_COUNT 24 -#define M_SLAVE_DELAY_LINE_TAP_COUNT 0x3f -#define V_SLAVE_DELAY_LINE_TAP_COUNT(x) ((x) << S_SLAVE_DELAY_LINE_TAP_COUNT) -#define G_SLAVE_DELAY_LINE_TAP_COUNT(x) (((x) >> S_SLAVE_DELAY_LINE_TAP_COUNT) & M_SLAVE_DELAY_LINE_TAP_COUNT) - -#define A_MC3_ECC_CNTL 0x118 - -#define S_ECC_GENERATION_ENABLE 0 -#define V_ECC_GENERATION_ENABLE(x) ((x) << S_ECC_GENERATION_ENABLE) -#define F_ECC_GENERATION_ENABLE V_ECC_GENERATION_ENABLE(1U) - -#define S_ECC_CHECK_ENABLE 1 -#define V_ECC_CHECK_ENABLE(x) ((x) << S_ECC_CHECK_ENABLE) -#define F_ECC_CHECK_ENABLE V_ECC_CHECK_ENABLE(1U) - -#define S_CORRECTABLE_ERROR_COUNT 2 -#define M_CORRECTABLE_ERROR_COUNT 0xff -#define V_CORRECTABLE_ERROR_COUNT(x) ((x) << S_CORRECTABLE_ERROR_COUNT) -#define G_CORRECTABLE_ERROR_COUNT(x) (((x) >> S_CORRECTABLE_ERROR_COUNT) & M_CORRECTABLE_ERROR_COUNT) - -#define S_UNCORRECTABLE_ERROR_COUNT 10 -#define M_UNCORRECTABLE_ERROR_COUNT 0xff -#define V_UNCORRECTABLE_ERROR_COUNT(x) ((x) << S_UNCORRECTABLE_ERROR_COUNT) -#define G_UNCORRECTABLE_ERROR_COUNT(x) (((x) >> S_UNCORRECTABLE_ERROR_COUNT) & M_UNCORRECTABLE_ERROR_COUNT) - -#define A_MC3_CE_ADDR 0x11c - -#define S_MC3_CE_ADDR 4 -#define M_MC3_CE_ADDR 0xfffffff -#define V_MC3_CE_ADDR(x) ((x) << S_MC3_CE_ADDR) -#define G_MC3_CE_ADDR(x) (((x) >> S_MC3_CE_ADDR) & M_MC3_CE_ADDR) - -#define A_MC3_CE_DATA0 0x120 -#define A_MC3_CE_DATA1 0x124 -#define A_MC3_CE_DATA2 0x128 -#define A_MC3_CE_DATA3 0x12c -#define A_MC3_CE_DATA4 0x130 -#define A_MC3_UE_ADDR 0x134 - -#define S_MC3_UE_ADDR 4 -#define M_MC3_UE_ADDR 0xfffffff -#define V_MC3_UE_ADDR(x) ((x) << S_MC3_UE_ADDR) -#define G_MC3_UE_ADDR(x) (((x) >> S_MC3_UE_ADDR) & M_MC3_UE_ADDR) - -#define A_MC3_UE_DATA0 0x138 -#define A_MC3_UE_DATA1 0x13c -#define A_MC3_UE_DATA2 0x140 -#define A_MC3_UE_DATA3 0x144 -#define A_MC3_UE_DATA4 0x148 -#define A_MC3_BD_ADDR 0x14c -#define A_MC3_BD_DATA0 0x150 -#define A_MC3_BD_DATA1 0x154 -#define A_MC3_BD_DATA2 0x158 -#define A_MC3_BD_DATA3 0x15c -#define A_MC3_BD_DATA4 0x160 -#define A_MC3_BD_OP 0x164 - -#define S_BACK_DOOR_OPERATION 0 -#define V_BACK_DOOR_OPERATION(x) ((x) << S_BACK_DOOR_OPERATION) -#define F_BACK_DOOR_OPERATION V_BACK_DOOR_OPERATION(1U) - -#define A_MC3_BIST_ADDR_BEG 0x168 -#define A_MC3_BIST_ADDR_END 0x16c -#define A_MC3_BIST_DATA 0x170 -#define A_MC3_BIST_OP 0x174 - -#define S_OP 0 -#define V_OP(x) ((x) << S_OP) -#define F_OP V_OP(1U) - -#define S_DATA_PATTERN 1 -#define M_DATA_PATTERN 0x3 -#define V_DATA_PATTERN(x) ((x) << S_DATA_PATTERN) -#define G_DATA_PATTERN(x) (((x) >> S_DATA_PATTERN) & M_DATA_PATTERN) - -#define S_CONTINUOUS 3 -#define V_CONTINUOUS(x) ((x) << S_CONTINUOUS) -#define F_CONTINUOUS V_CONTINUOUS(1U) - -#define A_MC3_INT_ENABLE 0x178 - -#define S_MC3_CORR_ERR 0 -#define V_MC3_CORR_ERR(x) ((x) << S_MC3_CORR_ERR) -#define F_MC3_CORR_ERR V_MC3_CORR_ERR(1U) - -#define S_MC3_UNCORR_ERR 1 -#define V_MC3_UNCORR_ERR(x) ((x) << S_MC3_UNCORR_ERR) -#define F_MC3_UNCORR_ERR V_MC3_UNCORR_ERR(1U) - -#define S_MC3_PARITY_ERR 2 -#define M_MC3_PARITY_ERR 0xff -#define V_MC3_PARITY_ERR(x) ((x) << S_MC3_PARITY_ERR) -#define G_MC3_PARITY_ERR(x) (((x) >> S_MC3_PARITY_ERR) & M_MC3_PARITY_ERR) - -#define S_MC3_ADDR_ERR 10 -#define V_MC3_ADDR_ERR(x) ((x) << S_MC3_ADDR_ERR) -#define F_MC3_ADDR_ERR V_MC3_ADDR_ERR(1U) - -#define A_MC3_INT_CAUSE 0x17c - -/* MC4 registers */ -#define A_MC4_CFG 0x180 - -#define S_POWER_UP 0 -#define V_POWER_UP(x) ((x) << S_POWER_UP) -#define F_POWER_UP V_POWER_UP(1U) - -#define S_MC4_BANK_CYCLE 8 -#define M_MC4_BANK_CYCLE 0x7 -#define V_MC4_BANK_CYCLE(x) ((x) << S_MC4_BANK_CYCLE) -#define G_MC4_BANK_CYCLE(x) (((x) >> S_MC4_BANK_CYCLE) & M_MC4_BANK_CYCLE) - -#define S_MC4_NARROW 24 -#define V_MC4_NARROW(x) ((x) << S_MC4_NARROW) -#define F_MC4_NARROW V_MC4_NARROW(1U) - -#define S_MC4_SLOW 25 -#define V_MC4_SLOW(x) ((x) << S_MC4_SLOW) -#define F_MC4_SLOW V_MC4_SLOW(1U) - -#define S_MC4A_WIDTH 24 -#define M_MC4A_WIDTH 0x3 -#define V_MC4A_WIDTH(x) ((x) << S_MC4A_WIDTH) -#define G_MC4A_WIDTH(x) (((x) >> S_MC4A_WIDTH) & M_MC4A_WIDTH) - -#define S_MC4A_SLOW 26 -#define V_MC4A_SLOW(x) ((x) << S_MC4A_SLOW) -#define F_MC4A_SLOW V_MC4A_SLOW(1U) - -#define A_MC4_MODE 0x184 - -#define S_MC4_MODE 0 -#define M_MC4_MODE 0x7fff -#define V_MC4_MODE(x) ((x) << S_MC4_MODE) -#define G_MC4_MODE(x) (((x) >> S_MC4_MODE) & M_MC4_MODE) - -#define A_MC4_EXT_MODE 0x188 - -#define S_MC4_EXTENDED_MODE 0 -#define M_MC4_EXTENDED_MODE 0x7fff -#define V_MC4_EXTENDED_MODE(x) ((x) << S_MC4_EXTENDED_MODE) -#define G_MC4_EXTENDED_MODE(x) (((x) >> S_MC4_EXTENDED_MODE) & M_MC4_EXTENDED_MODE) - -#define A_MC4_REFRESH 0x190 -#define A_MC4_STROBE 0x194 -#define A_MC4_ECC_CNTL 0x198 -#define A_MC4_CE_ADDR 0x19c - -#define S_MC4_CE_ADDR 4 -#define M_MC4_CE_ADDR 0xffffff -#define V_MC4_CE_ADDR(x) ((x) << S_MC4_CE_ADDR) -#define G_MC4_CE_ADDR(x) (((x) >> S_MC4_CE_ADDR) & M_MC4_CE_ADDR) - -#define A_MC4_CE_DATA0 0x1a0 -#define A_MC4_CE_DATA1 0x1a4 -#define A_MC4_CE_DATA2 0x1a8 -#define A_MC4_CE_DATA3 0x1ac -#define A_MC4_CE_DATA4 0x1b0 -#define A_MC4_UE_ADDR 0x1b4 - -#define S_MC4_UE_ADDR 4 -#define M_MC4_UE_ADDR 0xffffff -#define V_MC4_UE_ADDR(x) ((x) << S_MC4_UE_ADDR) -#define G_MC4_UE_ADDR(x) (((x) >> S_MC4_UE_ADDR) & M_MC4_UE_ADDR) - -#define A_MC4_UE_DATA0 0x1b8 -#define A_MC4_UE_DATA1 0x1bc -#define A_MC4_UE_DATA2 0x1c0 -#define A_MC4_UE_DATA3 0x1c4 -#define A_MC4_UE_DATA4 0x1c8 -#define A_MC4_BD_ADDR 0x1cc - -#define S_MC4_BACK_DOOR_ADDR 0 -#define M_MC4_BACK_DOOR_ADDR 0xfffffff -#define V_MC4_BACK_DOOR_ADDR(x) ((x) << S_MC4_BACK_DOOR_ADDR) -#define G_MC4_BACK_DOOR_ADDR(x) (((x) >> S_MC4_BACK_DOOR_ADDR) & M_MC4_BACK_DOOR_ADDR) - -#define A_MC4_BD_DATA0 0x1d0 -#define A_MC4_BD_DATA1 0x1d4 -#define A_MC4_BD_DATA2 0x1d8 -#define A_MC4_BD_DATA3 0x1dc -#define A_MC4_BD_DATA4 0x1e0 -#define A_MC4_BD_OP 0x1e4 - -#define S_OPERATION 0 -#define V_OPERATION(x) ((x) << S_OPERATION) -#define F_OPERATION V_OPERATION(1U) - -#define A_MC4_BIST_ADDR_BEG 0x1e8 -#define A_MC4_BIST_ADDR_END 0x1ec -#define A_MC4_BIST_DATA 0x1f0 -#define A_MC4_BIST_OP 0x1f4 -#define A_MC4_INT_ENABLE 0x1f8 - -#define S_MC4_CORR_ERR 0 -#define V_MC4_CORR_ERR(x) ((x) << S_MC4_CORR_ERR) -#define F_MC4_CORR_ERR V_MC4_CORR_ERR(1U) - -#define S_MC4_UNCORR_ERR 1 -#define V_MC4_UNCORR_ERR(x) ((x) << S_MC4_UNCORR_ERR) -#define F_MC4_UNCORR_ERR V_MC4_UNCORR_ERR(1U) - -#define S_MC4_ADDR_ERR 2 -#define V_MC4_ADDR_ERR(x) ((x) << S_MC4_ADDR_ERR) -#define F_MC4_ADDR_ERR V_MC4_ADDR_ERR(1U) - -#define A_MC4_INT_CAUSE 0x1fc - -/* TPI registers */ -#define A_TPI_ADDR 0x280 - -#define S_TPI_ADDRESS 0 -#define M_TPI_ADDRESS 0xffffff -#define V_TPI_ADDRESS(x) ((x) << S_TPI_ADDRESS) -#define G_TPI_ADDRESS(x) (((x) >> S_TPI_ADDRESS) & M_TPI_ADDRESS) - -#define A_TPI_WR_DATA 0x284 -#define A_TPI_RD_DATA 0x288 -#define A_TPI_CSR 0x28c - -#define S_TPIWR 0 -#define V_TPIWR(x) ((x) << S_TPIWR) -#define F_TPIWR V_TPIWR(1U) - -#define S_TPIRDY 1 -#define V_TPIRDY(x) ((x) << S_TPIRDY) -#define F_TPIRDY V_TPIRDY(1U) - -#define S_INT_DIR 31 -#define V_INT_DIR(x) ((x) << S_INT_DIR) -#define F_INT_DIR V_INT_DIR(1U) - -#define A_TPI_PAR 0x29c - -#define S_TPIPAR 0 -#define M_TPIPAR 0x7f -#define V_TPIPAR(x) ((x) << S_TPIPAR) -#define G_TPIPAR(x) (((x) >> S_TPIPAR) & M_TPIPAR) - - -/* TP registers */ -#define A_TP_IN_CONFIG 0x300 - -#define S_TP_IN_CSPI_TUNNEL 0 -#define V_TP_IN_CSPI_TUNNEL(x) ((x) << S_TP_IN_CSPI_TUNNEL) -#define F_TP_IN_CSPI_TUNNEL V_TP_IN_CSPI_TUNNEL(1U) - -#define S_TP_IN_CSPI_ETHERNET 1 -#define V_TP_IN_CSPI_ETHERNET(x) ((x) << S_TP_IN_CSPI_ETHERNET) -#define F_TP_IN_CSPI_ETHERNET V_TP_IN_CSPI_ETHERNET(1U) - -#define S_TP_IN_CSPI_CPL 3 -#define V_TP_IN_CSPI_CPL(x) ((x) << S_TP_IN_CSPI_CPL) -#define F_TP_IN_CSPI_CPL V_TP_IN_CSPI_CPL(1U) - -#define S_TP_IN_CSPI_POS 4 -#define V_TP_IN_CSPI_POS(x) ((x) << S_TP_IN_CSPI_POS) -#define F_TP_IN_CSPI_POS V_TP_IN_CSPI_POS(1U) - -#define S_TP_IN_CSPI_CHECK_IP_CSUM 5 -#define V_TP_IN_CSPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_IP_CSUM) -#define F_TP_IN_CSPI_CHECK_IP_CSUM V_TP_IN_CSPI_CHECK_IP_CSUM(1U) - -#define S_TP_IN_CSPI_CHECK_TCP_CSUM 6 -#define V_TP_IN_CSPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_TCP_CSUM) -#define F_TP_IN_CSPI_CHECK_TCP_CSUM V_TP_IN_CSPI_CHECK_TCP_CSUM(1U) - -#define S_TP_IN_ESPI_TUNNEL 7 -#define V_TP_IN_ESPI_TUNNEL(x) ((x) << S_TP_IN_ESPI_TUNNEL) -#define F_TP_IN_ESPI_TUNNEL V_TP_IN_ESPI_TUNNEL(1U) - -#define S_TP_IN_ESPI_ETHERNET 8 -#define V_TP_IN_ESPI_ETHERNET(x) ((x) << S_TP_IN_ESPI_ETHERNET) -#define F_TP_IN_ESPI_ETHERNET V_TP_IN_ESPI_ETHERNET(1U) - -#define S_TP_IN_ESPI_CPL 10 -#define V_TP_IN_ESPI_CPL(x) ((x) << S_TP_IN_ESPI_CPL) -#define F_TP_IN_ESPI_CPL V_TP_IN_ESPI_CPL(1U) - -#define S_TP_IN_ESPI_POS 11 -#define V_TP_IN_ESPI_POS(x) ((x) << S_TP_IN_ESPI_POS) -#define F_TP_IN_ESPI_POS V_TP_IN_ESPI_POS(1U) - -#define S_TP_IN_ESPI_CHECK_IP_CSUM 12 -#define V_TP_IN_ESPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_IP_CSUM) -#define F_TP_IN_ESPI_CHECK_IP_CSUM V_TP_IN_ESPI_CHECK_IP_CSUM(1U) - -#define S_TP_IN_ESPI_CHECK_TCP_CSUM 13 -#define V_TP_IN_ESPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_TCP_CSUM) -#define F_TP_IN_ESPI_CHECK_TCP_CSUM V_TP_IN_ESPI_CHECK_TCP_CSUM(1U) - -#define S_OFFLOAD_DISABLE 14 -#define V_OFFLOAD_DISABLE(x) ((x) << S_OFFLOAD_DISABLE) -#define F_OFFLOAD_DISABLE V_OFFLOAD_DISABLE(1U) - -#define A_TP_OUT_CONFIG 0x304 - -#define S_TP_OUT_C_ETH 0 -#define V_TP_OUT_C_ETH(x) ((x) << S_TP_OUT_C_ETH) -#define F_TP_OUT_C_ETH V_TP_OUT_C_ETH(1U) - -#define S_TP_OUT_CSPI_CPL 2 -#define V_TP_OUT_CSPI_CPL(x) ((x) << S_TP_OUT_CSPI_CPL) -#define F_TP_OUT_CSPI_CPL V_TP_OUT_CSPI_CPL(1U) - -#define S_TP_OUT_CSPI_POS 3 -#define V_TP_OUT_CSPI_POS(x) ((x) << S_TP_OUT_CSPI_POS) -#define F_TP_OUT_CSPI_POS V_TP_OUT_CSPI_POS(1U) - -#define S_TP_OUT_CSPI_GENERATE_IP_CSUM 4 -#define V_TP_OUT_CSPI_GENERATE_IP_CSUM(x) ((x) << S_TP_OUT_CSPI_GENERATE_IP_CSUM) -#define F_TP_OUT_CSPI_GENERATE_IP_CSUM V_TP_OUT_CSPI_GENERATE_IP_CSUM(1U) - -#define S_TP_OUT_CSPI_GENERATE_TCP_CSUM 5 -#define V_TP_OUT_CSPI_GENERATE_TCP_CSUM(x) ((x) << S_TP_OUT_CSPI_GENERATE_TCP_CSUM) -#define F_TP_OUT_CSPI_GENERATE_TCP_CSUM V_TP_OUT_CSPI_GENERATE_TCP_CSUM(1U) - -#define S_TP_OUT_ESPI_ETHERNET 6 -#define V_TP_OUT_ESPI_ETHERNET(x) ((x) << S_TP_OUT_ESPI_ETHERNET) -#define F_TP_OUT_ESPI_ETHERNET V_TP_OUT_ESPI_ETHERNET(1U) - -#define S_TP_OUT_ESPI_TAG_ETHERNET 7 -#define V_TP_OUT_ESPI_TAG_ETHERNET(x) ((x) << S_TP_OUT_ESPI_TAG_ETHERNET) -#define F_TP_OUT_ESPI_TAG_ETHERNET V_TP_OUT_ESPI_TAG_ETHERNET(1U) - -#define S_TP_OUT_ESPI_CPL 8 -#define V_TP_OUT_ESPI_CPL(x) ((x) << S_TP_OUT_ESPI_CPL) -#define F_TP_OUT_ESPI_CPL V_TP_OUT_ESPI_CPL(1U) - -#define S_TP_OUT_ESPI_POS 9 -#define V_TP_OUT_ESPI_POS(x) ((x) << S_TP_OUT_ESPI_POS) -#define F_TP_OUT_ESPI_POS V_TP_OUT_ESPI_POS(1U) - -#define S_TP_OUT_ESPI_GENERATE_IP_CSUM 10 -#define V_TP_OUT_ESPI_GENERATE_IP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_IP_CSUM) -#define F_TP_OUT_ESPI_GENERATE_IP_CSUM V_TP_OUT_ESPI_GENERATE_IP_CSUM(1U) - -#define S_TP_OUT_ESPI_GENERATE_TCP_CSUM 11 -#define V_TP_OUT_ESPI_GENERATE_TCP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_TCP_CSUM) -#define F_TP_OUT_ESPI_GENERATE_TCP_CSUM V_TP_OUT_ESPI_GENERATE_TCP_CSUM(1U) - -#define A_TP_GLOBAL_CONFIG 0x308 - -#define S_IP_TTL 0 -#define M_IP_TTL 0xff -#define V_IP_TTL(x) ((x) << S_IP_TTL) -#define G_IP_TTL(x) (((x) >> S_IP_TTL) & M_IP_TTL) - -#define S_TCAM_SERVER_REGION_USAGE 8 -#define M_TCAM_SERVER_REGION_USAGE 0x3 -#define V_TCAM_SERVER_REGION_USAGE(x) ((x) << S_TCAM_SERVER_REGION_USAGE) -#define G_TCAM_SERVER_REGION_USAGE(x) (((x) >> S_TCAM_SERVER_REGION_USAGE) & M_TCAM_SERVER_REGION_USAGE) - -#define S_QOS_MAPPING 10 -#define V_QOS_MAPPING(x) ((x) << S_QOS_MAPPING) -#define F_QOS_MAPPING V_QOS_MAPPING(1U) - -#define S_TCP_CSUM 11 -#define V_TCP_CSUM(x) ((x) << S_TCP_CSUM) -#define F_TCP_CSUM V_TCP_CSUM(1U) - -#define S_UDP_CSUM 12 -#define V_UDP_CSUM(x) ((x) << S_UDP_CSUM) -#define F_UDP_CSUM V_UDP_CSUM(1U) - -#define S_IP_CSUM 13 -#define V_IP_CSUM(x) ((x) << S_IP_CSUM) -#define F_IP_CSUM V_IP_CSUM(1U) - -#define S_IP_ID_SPLIT 14 -#define V_IP_ID_SPLIT(x) ((x) << S_IP_ID_SPLIT) -#define F_IP_ID_SPLIT V_IP_ID_SPLIT(1U) - -#define S_PATH_MTU 15 -#define V_PATH_MTU(x) ((x) << S_PATH_MTU) -#define F_PATH_MTU V_PATH_MTU(1U) - -#define S_5TUPLE_LOOKUP 17 -#define M_5TUPLE_LOOKUP 0x3 -#define V_5TUPLE_LOOKUP(x) ((x) << S_5TUPLE_LOOKUP) -#define G_5TUPLE_LOOKUP(x) (((x) >> S_5TUPLE_LOOKUP) & M_5TUPLE_LOOKUP) - -#define S_IP_FRAGMENT_DROP 19 -#define V_IP_FRAGMENT_DROP(x) ((x) << S_IP_FRAGMENT_DROP) -#define F_IP_FRAGMENT_DROP V_IP_FRAGMENT_DROP(1U) - -#define S_PING_DROP 20 -#define V_PING_DROP(x) ((x) << S_PING_DROP) -#define F_PING_DROP V_PING_DROP(1U) - -#define S_PROTECT_MODE 21 -#define V_PROTECT_MODE(x) ((x) << S_PROTECT_MODE) -#define F_PROTECT_MODE V_PROTECT_MODE(1U) - -#define S_SYN_COOKIE_ALGORITHM 22 -#define V_SYN_COOKIE_ALGORITHM(x) ((x) << S_SYN_COOKIE_ALGORITHM) -#define F_SYN_COOKIE_ALGORITHM V_SYN_COOKIE_ALGORITHM(1U) - -#define S_ATTACK_FILTER 23 -#define V_ATTACK_FILTER(x) ((x) << S_ATTACK_FILTER) -#define F_ATTACK_FILTER V_ATTACK_FILTER(1U) - -#define S_INTERFACE_TYPE 24 -#define V_INTERFACE_TYPE(x) ((x) << S_INTERFACE_TYPE) -#define F_INTERFACE_TYPE V_INTERFACE_TYPE(1U) - -#define S_DISABLE_RX_FLOW_CONTROL 25 -#define V_DISABLE_RX_FLOW_CONTROL(x) ((x) << S_DISABLE_RX_FLOW_CONTROL) -#define F_DISABLE_RX_FLOW_CONTROL V_DISABLE_RX_FLOW_CONTROL(1U) - -#define S_SYN_COOKIE_PARAMETER 26 -#define M_SYN_COOKIE_PARAMETER 0x3f -#define V_SYN_COOKIE_PARAMETER(x) ((x) << S_SYN_COOKIE_PARAMETER) -#define G_SYN_COOKIE_PARAMETER(x) (((x) >> S_SYN_COOKIE_PARAMETER) & M_SYN_COOKIE_PARAMETER) - -#define A_TP_GLOBAL_RX_CREDITS 0x30c -#define A_TP_CM_SIZE 0x310 -#define A_TP_CM_MM_BASE 0x314 - -#define S_CM_MEMMGR_BASE 0 -#define M_CM_MEMMGR_BASE 0xfffffff -#define V_CM_MEMMGR_BASE(x) ((x) << S_CM_MEMMGR_BASE) -#define G_CM_MEMMGR_BASE(x) (((x) >> S_CM_MEMMGR_BASE) & M_CM_MEMMGR_BASE) - -#define A_TP_CM_TIMER_BASE 0x318 - -#define S_CM_TIMER_BASE 0 -#define M_CM_TIMER_BASE 0xfffffff -#define V_CM_TIMER_BASE(x) ((x) << S_CM_TIMER_BASE) -#define G_CM_TIMER_BASE(x) (((x) >> S_CM_TIMER_BASE) & M_CM_TIMER_BASE) - -#define A_TP_PM_SIZE 0x31c -#define A_TP_PM_TX_BASE 0x320 -#define A_TP_PM_DEFRAG_BASE 0x324 -#define A_TP_PM_RX_BASE 0x328 -#define A_TP_PM_RX_PG_SIZE 0x32c -#define A_TP_PM_RX_MAX_PGS 0x330 -#define A_TP_PM_TX_PG_SIZE 0x334 -#define A_TP_PM_TX_MAX_PGS 0x338 -#define A_TP_TCP_OPTIONS 0x340 - -#define S_TIMESTAMP 0 -#define M_TIMESTAMP 0x3 -#define V_TIMESTAMP(x) ((x) << S_TIMESTAMP) -#define G_TIMESTAMP(x) (((x) >> S_TIMESTAMP) & M_TIMESTAMP) - -#define S_WINDOW_SCALE 2 -#define M_WINDOW_SCALE 0x3 -#define V_WINDOW_SCALE(x) ((x) << S_WINDOW_SCALE) -#define G_WINDOW_SCALE(x) (((x) >> S_WINDOW_SCALE) & M_WINDOW_SCALE) - -#define S_SACK 4 -#define M_SACK 0x3 -#define V_SACK(x) ((x) << S_SACK) -#define G_SACK(x) (((x) >> S_SACK) & M_SACK) - -#define S_ECN 6 -#define M_ECN 0x3 -#define V_ECN(x) ((x) << S_ECN) -#define G_ECN(x) (((x) >> S_ECN) & M_ECN) - -#define S_SACK_ALGORITHM 8 -#define M_SACK_ALGORITHM 0x3 -#define V_SACK_ALGORITHM(x) ((x) << S_SACK_ALGORITHM) -#define G_SACK_ALGORITHM(x) (((x) >> S_SACK_ALGORITHM) & M_SACK_ALGORITHM) - -#define S_MSS 10 -#define V_MSS(x) ((x) << S_MSS) -#define F_MSS V_MSS(1U) - -#define S_DEFAULT_PEER_MSS 16 -#define M_DEFAULT_PEER_MSS 0xffff -#define V_DEFAULT_PEER_MSS(x) ((x) << S_DEFAULT_PEER_MSS) -#define G_DEFAULT_PEER_MSS(x) (((x) >> S_DEFAULT_PEER_MSS) & M_DEFAULT_PEER_MSS) - -#define A_TP_DACK_CONFIG 0x344 - -#define S_DACK_MODE 0 -#define V_DACK_MODE(x) ((x) << S_DACK_MODE) -#define F_DACK_MODE V_DACK_MODE(1U) - -#define S_DACK_AUTO_MGMT 1 -#define V_DACK_AUTO_MGMT(x) ((x) << S_DACK_AUTO_MGMT) -#define F_DACK_AUTO_MGMT V_DACK_AUTO_MGMT(1U) - -#define S_DACK_AUTO_CAREFUL 2 -#define V_DACK_AUTO_CAREFUL(x) ((x) << S_DACK_AUTO_CAREFUL) -#define F_DACK_AUTO_CAREFUL V_DACK_AUTO_CAREFUL(1U) - -#define S_DACK_MSS_SELECTOR 3 -#define M_DACK_MSS_SELECTOR 0x3 -#define V_DACK_MSS_SELECTOR(x) ((x) << S_DACK_MSS_SELECTOR) -#define G_DACK_MSS_SELECTOR(x) (((x) >> S_DACK_MSS_SELECTOR) & M_DACK_MSS_SELECTOR) - -#define S_DACK_BYTE_THRESHOLD 5 -#define M_DACK_BYTE_THRESHOLD 0xfffff -#define V_DACK_BYTE_THRESHOLD(x) ((x) << S_DACK_BYTE_THRESHOLD) -#define G_DACK_BYTE_THRESHOLD(x) (((x) >> S_DACK_BYTE_THRESHOLD) & M_DACK_BYTE_THRESHOLD) - -#define A_TP_PC_CONFIG 0x348 - -#define S_TP_ACCESS_LATENCY 0 -#define M_TP_ACCESS_LATENCY 0xf -#define V_TP_ACCESS_LATENCY(x) ((x) << S_TP_ACCESS_LATENCY) -#define G_TP_ACCESS_LATENCY(x) (((x) >> S_TP_ACCESS_LATENCY) & M_TP_ACCESS_LATENCY) - -#define S_HELD_FIN_DISABLE 4 -#define V_HELD_FIN_DISABLE(x) ((x) << S_HELD_FIN_DISABLE) -#define F_HELD_FIN_DISABLE V_HELD_FIN_DISABLE(1U) - -#define S_DDP_FC_ENABLE 5 -#define V_DDP_FC_ENABLE(x) ((x) << S_DDP_FC_ENABLE) -#define F_DDP_FC_ENABLE V_DDP_FC_ENABLE(1U) - -#define S_RDMA_ERR_ENABLE 6 -#define V_RDMA_ERR_ENABLE(x) ((x) << S_RDMA_ERR_ENABLE) -#define F_RDMA_ERR_ENABLE V_RDMA_ERR_ENABLE(1U) - -#define S_FAST_PDU_DELIVERY 7 -#define V_FAST_PDU_DELIVERY(x) ((x) << S_FAST_PDU_DELIVERY) -#define F_FAST_PDU_DELIVERY V_FAST_PDU_DELIVERY(1U) - -#define S_CLEAR_FIN 8 -#define V_CLEAR_FIN(x) ((x) << S_CLEAR_FIN) -#define F_CLEAR_FIN V_CLEAR_FIN(1U) - -#define S_DIS_TX_FILL_WIN_PUSH 12 -#define V_DIS_TX_FILL_WIN_PUSH(x) ((x) << S_DIS_TX_FILL_WIN_PUSH) -#define F_DIS_TX_FILL_WIN_PUSH V_DIS_TX_FILL_WIN_PUSH(1U) - -#define S_TP_PC_REV 30 -#define M_TP_PC_REV 0x3 -#define V_TP_PC_REV(x) ((x) << S_TP_PC_REV) -#define G_TP_PC_REV(x) (((x) >> S_TP_PC_REV) & M_TP_PC_REV) - -#define A_TP_BACKOFF0 0x350 - -#define S_ELEMENT0 0 -#define M_ELEMENT0 0xff -#define V_ELEMENT0(x) ((x) << S_ELEMENT0) -#define G_ELEMENT0(x) (((x) >> S_ELEMENT0) & M_ELEMENT0) - -#define S_ELEMENT1 8 -#define M_ELEMENT1 0xff -#define V_ELEMENT1(x) ((x) << S_ELEMENT1) -#define G_ELEMENT1(x) (((x) >> S_ELEMENT1) & M_ELEMENT1) - -#define S_ELEMENT2 16 -#define M_ELEMENT2 0xff -#define V_ELEMENT2(x) ((x) << S_ELEMENT2) -#define G_ELEMENT2(x) (((x) >> S_ELEMENT2) & M_ELEMENT2) - -#define S_ELEMENT3 24 -#define M_ELEMENT3 0xff -#define V_ELEMENT3(x) ((x) << S_ELEMENT3) -#define G_ELEMENT3(x) (((x) >> S_ELEMENT3) & M_ELEMENT3) - -#define A_TP_BACKOFF1 0x354 -#define A_TP_BACKOFF2 0x358 -#define A_TP_BACKOFF3 0x35c -#define A_TP_PARA_REG0 0x360 - -#define S_VAR_MULT 0 -#define M_VAR_MULT 0xf -#define V_VAR_MULT(x) ((x) << S_VAR_MULT) -#define G_VAR_MULT(x) (((x) >> S_VAR_MULT) & M_VAR_MULT) - -#define S_VAR_GAIN 4 -#define M_VAR_GAIN 0xf -#define V_VAR_GAIN(x) ((x) << S_VAR_GAIN) -#define G_VAR_GAIN(x) (((x) >> S_VAR_GAIN) & M_VAR_GAIN) - -#define S_SRTT_GAIN 8 -#define M_SRTT_GAIN 0xf -#define V_SRTT_GAIN(x) ((x) << S_SRTT_GAIN) -#define G_SRTT_GAIN(x) (((x) >> S_SRTT_GAIN) & M_SRTT_GAIN) - -#define S_RTTVAR_INIT 12 -#define M_RTTVAR_INIT 0xf -#define V_RTTVAR_INIT(x) ((x) << S_RTTVAR_INIT) -#define G_RTTVAR_INIT(x) (((x) >> S_RTTVAR_INIT) & M_RTTVAR_INIT) - -#define S_DUP_THRESH 20 -#define M_DUP_THRESH 0xf -#define V_DUP_THRESH(x) ((x) << S_DUP_THRESH) -#define G_DUP_THRESH(x) (((x) >> S_DUP_THRESH) & M_DUP_THRESH) - -#define S_INIT_CONG_WIN 24 -#define M_INIT_CONG_WIN 0x7 -#define V_INIT_CONG_WIN(x) ((x) << S_INIT_CONG_WIN) -#define G_INIT_CONG_WIN(x) (((x) >> S_INIT_CONG_WIN) & M_INIT_CONG_WIN) - -#define A_TP_PARA_REG1 0x364 - -#define S_INITIAL_SLOW_START_THRESHOLD 0 -#define M_INITIAL_SLOW_START_THRESHOLD 0xffff -#define V_INITIAL_SLOW_START_THRESHOLD(x) ((x) << S_INITIAL_SLOW_START_THRESHOLD) -#define G_INITIAL_SLOW_START_THRESHOLD(x) (((x) >> S_INITIAL_SLOW_START_THRESHOLD) & M_INITIAL_SLOW_START_THRESHOLD) - -#define S_RECEIVE_BUFFER_SIZE 16 -#define M_RECEIVE_BUFFER_SIZE 0xffff -#define V_RECEIVE_BUFFER_SIZE(x) ((x) << S_RECEIVE_BUFFER_SIZE) -#define G_RECEIVE_BUFFER_SIZE(x) (((x) >> S_RECEIVE_BUFFER_SIZE) & M_RECEIVE_BUFFER_SIZE) - -#define A_TP_PARA_REG2 0x368 - -#define S_RX_COALESCE_SIZE 0 -#define M_RX_COALESCE_SIZE 0xffff -#define V_RX_COALESCE_SIZE(x) ((x) << S_RX_COALESCE_SIZE) -#define G_RX_COALESCE_SIZE(x) (((x) >> S_RX_COALESCE_SIZE) & M_RX_COALESCE_SIZE) - -#define S_MAX_RX_SIZE 16 -#define M_MAX_RX_SIZE 0xffff -#define V_MAX_RX_SIZE(x) ((x) << S_MAX_RX_SIZE) -#define G_MAX_RX_SIZE(x) (((x) >> S_MAX_RX_SIZE) & M_MAX_RX_SIZE) - -#define A_TP_PARA_REG3 0x36c - -#define S_RX_COALESCING_PSH_DELIVER 0 -#define V_RX_COALESCING_PSH_DELIVER(x) ((x) << S_RX_COALESCING_PSH_DELIVER) -#define F_RX_COALESCING_PSH_DELIVER V_RX_COALESCING_PSH_DELIVER(1U) - -#define S_RX_COALESCING_ENABLE 1 -#define V_RX_COALESCING_ENABLE(x) ((x) << S_RX_COALESCING_ENABLE) -#define F_RX_COALESCING_ENABLE V_RX_COALESCING_ENABLE(1U) - -#define S_TAHOE_ENABLE 2 -#define V_TAHOE_ENABLE(x) ((x) << S_TAHOE_ENABLE) -#define F_TAHOE_ENABLE V_TAHOE_ENABLE(1U) - -#define S_MAX_REORDER_FRAGMENTS 12 -#define M_MAX_REORDER_FRAGMENTS 0x7 -#define V_MAX_REORDER_FRAGMENTS(x) ((x) << S_MAX_REORDER_FRAGMENTS) -#define G_MAX_REORDER_FRAGMENTS(x) (((x) >> S_MAX_REORDER_FRAGMENTS) & M_MAX_REORDER_FRAGMENTS) - -#define A_TP_TIMER_RESOLUTION 0x390 - -#define S_DELAYED_ACK_TIMER_RESOLUTION 0 -#define M_DELAYED_ACK_TIMER_RESOLUTION 0x3f -#define V_DELAYED_ACK_TIMER_RESOLUTION(x) ((x) << S_DELAYED_ACK_TIMER_RESOLUTION) -#define G_DELAYED_ACK_TIMER_RESOLUTION(x) (((x) >> S_DELAYED_ACK_TIMER_RESOLUTION) & M_DELAYED_ACK_TIMER_RESOLUTION) - -#define S_GENERIC_TIMER_RESOLUTION 16 -#define M_GENERIC_TIMER_RESOLUTION 0x3f -#define V_GENERIC_TIMER_RESOLUTION(x) ((x) << S_GENERIC_TIMER_RESOLUTION) -#define G_GENERIC_TIMER_RESOLUTION(x) (((x) >> S_GENERIC_TIMER_RESOLUTION) & M_GENERIC_TIMER_RESOLUTION) - -#define A_TP_2MSL 0x394 - -#define S_2MSL 0 -#define M_2MSL 0x3fffffff -#define V_2MSL(x) ((x) << S_2MSL) -#define G_2MSL(x) (((x) >> S_2MSL) & M_2MSL) - -#define A_TP_RXT_MIN 0x398 - -#define S_RETRANSMIT_TIMER_MIN 0 -#define M_RETRANSMIT_TIMER_MIN 0xffff -#define V_RETRANSMIT_TIMER_MIN(x) ((x) << S_RETRANSMIT_TIMER_MIN) -#define G_RETRANSMIT_TIMER_MIN(x) (((x) >> S_RETRANSMIT_TIMER_MIN) & M_RETRANSMIT_TIMER_MIN) - -#define A_TP_RXT_MAX 0x39c - -#define S_RETRANSMIT_TIMER_MAX 0 -#define M_RETRANSMIT_TIMER_MAX 0x3fffffff -#define V_RETRANSMIT_TIMER_MAX(x) ((x) << S_RETRANSMIT_TIMER_MAX) -#define G_RETRANSMIT_TIMER_MAX(x) (((x) >> S_RETRANSMIT_TIMER_MAX) & M_RETRANSMIT_TIMER_MAX) - -#define A_TP_PERS_MIN 0x3a0 - -#define S_PERSIST_TIMER_MIN 0 -#define M_PERSIST_TIMER_MIN 0xffff -#define V_PERSIST_TIMER_MIN(x) ((x) << S_PERSIST_TIMER_MIN) -#define G_PERSIST_TIMER_MIN(x) (((x) >> S_PERSIST_TIMER_MIN) & M_PERSIST_TIMER_MIN) - -#define A_TP_PERS_MAX 0x3a4 - -#define S_PERSIST_TIMER_MAX 0 -#define M_PERSIST_TIMER_MAX 0x3fffffff -#define V_PERSIST_TIMER_MAX(x) ((x) << S_PERSIST_TIMER_MAX) -#define G_PERSIST_TIMER_MAX(x) (((x) >> S_PERSIST_TIMER_MAX) & M_PERSIST_TIMER_MAX) - -#define A_TP_KEEP_IDLE 0x3ac - -#define S_KEEP_ALIVE_IDLE_TIME 0 -#define M_KEEP_ALIVE_IDLE_TIME 0x3fffffff -#define V_KEEP_ALIVE_IDLE_TIME(x) ((x) << S_KEEP_ALIVE_IDLE_TIME) -#define G_KEEP_ALIVE_IDLE_TIME(x) (((x) >> S_KEEP_ALIVE_IDLE_TIME) & M_KEEP_ALIVE_IDLE_TIME) - -#define A_TP_KEEP_INTVL 0x3b0 - -#define S_KEEP_ALIVE_INTERVAL_TIME 0 -#define M_KEEP_ALIVE_INTERVAL_TIME 0x3fffffff -#define V_KEEP_ALIVE_INTERVAL_TIME(x) ((x) << S_KEEP_ALIVE_INTERVAL_TIME) -#define G_KEEP_ALIVE_INTERVAL_TIME(x) (((x) >> S_KEEP_ALIVE_INTERVAL_TIME) & M_KEEP_ALIVE_INTERVAL_TIME) - -#define A_TP_INIT_SRTT 0x3b4 - -#define S_INITIAL_SRTT 0 -#define M_INITIAL_SRTT 0xffff -#define V_INITIAL_SRTT(x) ((x) << S_INITIAL_SRTT) -#define G_INITIAL_SRTT(x) (((x) >> S_INITIAL_SRTT) & M_INITIAL_SRTT) - -#define A_TP_DACK_TIME 0x3b8 - -#define S_DELAYED_ACK_TIME 0 -#define M_DELAYED_ACK_TIME 0x7ff -#define V_DELAYED_ACK_TIME(x) ((x) << S_DELAYED_ACK_TIME) -#define G_DELAYED_ACK_TIME(x) (((x) >> S_DELAYED_ACK_TIME) & M_DELAYED_ACK_TIME) - -#define A_TP_FINWAIT2_TIME 0x3bc - -#define S_FINWAIT2_TIME 0 -#define M_FINWAIT2_TIME 0x3fffffff -#define V_FINWAIT2_TIME(x) ((x) << S_FINWAIT2_TIME) -#define G_FINWAIT2_TIME(x) (((x) >> S_FINWAIT2_TIME) & M_FINWAIT2_TIME) - -#define A_TP_FAST_FINWAIT2_TIME 0x3c0 - -#define S_FAST_FINWAIT2_TIME 0 -#define M_FAST_FINWAIT2_TIME 0x3fffffff -#define V_FAST_FINWAIT2_TIME(x) ((x) << S_FAST_FINWAIT2_TIME) -#define G_FAST_FINWAIT2_TIME(x) (((x) >> S_FAST_FINWAIT2_TIME) & M_FAST_FINWAIT2_TIME) - -#define A_TP_SHIFT_CNT 0x3c4 - -#define S_KEEPALIVE_MAX 0 -#define M_KEEPALIVE_MAX 0xff -#define V_KEEPALIVE_MAX(x) ((x) << S_KEEPALIVE_MAX) -#define G_KEEPALIVE_MAX(x) (((x) >> S_KEEPALIVE_MAX) & M_KEEPALIVE_MAX) - -#define S_WINDOWPROBE_MAX 8 -#define M_WINDOWPROBE_MAX 0xff -#define V_WINDOWPROBE_MAX(x) ((x) << S_WINDOWPROBE_MAX) -#define G_WINDOWPROBE_MAX(x) (((x) >> S_WINDOWPROBE_MAX) & M_WINDOWPROBE_MAX) - -#define S_RETRANSMISSION_MAX 16 -#define M_RETRANSMISSION_MAX 0xff -#define V_RETRANSMISSION_MAX(x) ((x) << S_RETRANSMISSION_MAX) -#define G_RETRANSMISSION_MAX(x) (((x) >> S_RETRANSMISSION_MAX) & M_RETRANSMISSION_MAX) - -#define S_SYN_MAX 24 -#define M_SYN_MAX 0xff -#define V_SYN_MAX(x) ((x) << S_SYN_MAX) -#define G_SYN_MAX(x) (((x) >> S_SYN_MAX) & M_SYN_MAX) - -#define A_TP_QOS_REG0 0x3e0 - -#define S_L3_VALUE 0 -#define M_L3_VALUE 0x3f -#define V_L3_VALUE(x) ((x) << S_L3_VALUE) -#define G_L3_VALUE(x) (((x) >> S_L3_VALUE) & M_L3_VALUE) - -#define A_TP_QOS_REG1 0x3e4 -#define A_TP_QOS_REG2 0x3e8 -#define A_TP_QOS_REG3 0x3ec -#define A_TP_QOS_REG4 0x3f0 -#define A_TP_QOS_REG5 0x3f4 -#define A_TP_QOS_REG6 0x3f8 -#define A_TP_QOS_REG7 0x3fc -#define A_TP_MTU_REG0 0x404 -#define A_TP_MTU_REG1 0x408 -#define A_TP_MTU_REG2 0x40c -#define A_TP_MTU_REG3 0x410 -#define A_TP_MTU_REG4 0x414 -#define A_TP_MTU_REG5 0x418 -#define A_TP_MTU_REG6 0x41c -#define A_TP_MTU_REG7 0x420 -#define A_TP_RESET 0x44c - -#define S_TP_RESET 0 -#define V_TP_RESET(x) ((x) << S_TP_RESET) -#define F_TP_RESET V_TP_RESET(1U) - -#define S_CM_MEMMGR_INIT 1 -#define V_CM_MEMMGR_INIT(x) ((x) << S_CM_MEMMGR_INIT) -#define F_CM_MEMMGR_INIT V_CM_MEMMGR_INIT(1U) - -#define A_TP_MIB_INDEX 0x450 -#define A_TP_MIB_DATA 0x454 -#define A_TP_SYNC_TIME_HI 0x458 -#define A_TP_SYNC_TIME_LO 0x45c -#define A_TP_CM_MM_RX_FLST_BASE 0x460 - -#define S_CM_MEMMGR_RX_FREE_LIST_BASE 0 -#define M_CM_MEMMGR_RX_FREE_LIST_BASE 0xfffffff -#define V_CM_MEMMGR_RX_FREE_LIST_BASE(x) ((x) << S_CM_MEMMGR_RX_FREE_LIST_BASE) -#define G_CM_MEMMGR_RX_FREE_LIST_BASE(x) (((x) >> S_CM_MEMMGR_RX_FREE_LIST_BASE) & M_CM_MEMMGR_RX_FREE_LIST_BASE) - -#define A_TP_CM_MM_TX_FLST_BASE 0x464 - -#define S_CM_MEMMGR_TX_FREE_LIST_BASE 0 -#define M_CM_MEMMGR_TX_FREE_LIST_BASE 0xfffffff -#define V_CM_MEMMGR_TX_FREE_LIST_BASE(x) ((x) << S_CM_MEMMGR_TX_FREE_LIST_BASE) -#define G_CM_MEMMGR_TX_FREE_LIST_BASE(x) (((x) >> S_CM_MEMMGR_TX_FREE_LIST_BASE) & M_CM_MEMMGR_TX_FREE_LIST_BASE) - -#define A_TP_CM_MM_P_FLST_BASE 0x468 - -#define S_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE 0 -#define M_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE 0xfffffff -#define V_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE(x) ((x) << S_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE) -#define G_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE(x) (((x) >> S_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE) & M_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE) - -#define A_TP_CM_MM_MAX_P 0x46c - -#define S_CM_MEMMGR_MAX_PSTRUCT 0 -#define M_CM_MEMMGR_MAX_PSTRUCT 0xfffffff -#define V_CM_MEMMGR_MAX_PSTRUCT(x) ((x) << S_CM_MEMMGR_MAX_PSTRUCT) -#define G_CM_MEMMGR_MAX_PSTRUCT(x) (((x) >> S_CM_MEMMGR_MAX_PSTRUCT) & M_CM_MEMMGR_MAX_PSTRUCT) - -#define A_TP_INT_ENABLE 0x470 - -#define S_TX_FREE_LIST_EMPTY 0 -#define V_TX_FREE_LIST_EMPTY(x) ((x) << S_TX_FREE_LIST_EMPTY) -#define F_TX_FREE_LIST_EMPTY V_TX_FREE_LIST_EMPTY(1U) - -#define S_RX_FREE_LIST_EMPTY 1 -#define V_RX_FREE_LIST_EMPTY(x) ((x) << S_RX_FREE_LIST_EMPTY) -#define F_RX_FREE_LIST_EMPTY V_RX_FREE_LIST_EMPTY(1U) - -#define A_TP_INT_CAUSE 0x474 -#define A_TP_TIMER_SEPARATOR 0x4a4 - -#define S_DISABLE_PAST_TIMER_INSERTION 0 -#define V_DISABLE_PAST_TIMER_INSERTION(x) ((x) << S_DISABLE_PAST_TIMER_INSERTION) -#define F_DISABLE_PAST_TIMER_INSERTION V_DISABLE_PAST_TIMER_INSERTION(1U) - -#define S_MODULATION_TIMER_SEPARATOR 1 -#define M_MODULATION_TIMER_SEPARATOR 0x7fff -#define V_MODULATION_TIMER_SEPARATOR(x) ((x) << S_MODULATION_TIMER_SEPARATOR) -#define G_MODULATION_TIMER_SEPARATOR(x) (((x) >> S_MODULATION_TIMER_SEPARATOR) & M_MODULATION_TIMER_SEPARATOR) - -#define S_GLOBAL_TIMER_SEPARATOR 16 -#define M_GLOBAL_TIMER_SEPARATOR 0xffff -#define V_GLOBAL_TIMER_SEPARATOR(x) ((x) << S_GLOBAL_TIMER_SEPARATOR) -#define G_GLOBAL_TIMER_SEPARATOR(x) (((x) >> S_GLOBAL_TIMER_SEPARATOR) & M_GLOBAL_TIMER_SEPARATOR) - -#define A_TP_CM_FC_MODE 0x4b0 -#define A_TP_PC_CONGESTION_CNTL 0x4b4 -#define A_TP_TX_DROP_CONFIG 0x4b8 - -#define S_ENABLE_TX_DROP 31 -#define V_ENABLE_TX_DROP(x) ((x) << S_ENABLE_TX_DROP) -#define F_ENABLE_TX_DROP V_ENABLE_TX_DROP(1U) - -#define S_ENABLE_TX_ERROR 30 -#define V_ENABLE_TX_ERROR(x) ((x) << S_ENABLE_TX_ERROR) -#define F_ENABLE_TX_ERROR V_ENABLE_TX_ERROR(1U) - -#define S_DROP_TICKS_CNT 4 -#define M_DROP_TICKS_CNT 0x3ffffff -#define V_DROP_TICKS_CNT(x) ((x) << S_DROP_TICKS_CNT) -#define G_DROP_TICKS_CNT(x) (((x) >> S_DROP_TICKS_CNT) & M_DROP_TICKS_CNT) - -#define S_NUM_PKTS_DROPPED 0 -#define M_NUM_PKTS_DROPPED 0xf -#define V_NUM_PKTS_DROPPED(x) ((x) << S_NUM_PKTS_DROPPED) -#define G_NUM_PKTS_DROPPED(x) (((x) >> S_NUM_PKTS_DROPPED) & M_NUM_PKTS_DROPPED) - -#define A_TP_TX_DROP_COUNT 0x4bc - -/* RAT registers */ -#define A_RAT_ROUTE_CONTROL 0x580 - -#define S_USE_ROUTE_TABLE 0 -#define V_USE_ROUTE_TABLE(x) ((x) << S_USE_ROUTE_TABLE) -#define F_USE_ROUTE_TABLE V_USE_ROUTE_TABLE(1U) - -#define S_ENABLE_CSPI 1 -#define V_ENABLE_CSPI(x) ((x) << S_ENABLE_CSPI) -#define F_ENABLE_CSPI V_ENABLE_CSPI(1U) - -#define S_ENABLE_PCIX 2 -#define V_ENABLE_PCIX(x) ((x) << S_ENABLE_PCIX) -#define F_ENABLE_PCIX V_ENABLE_PCIX(1U) - -#define A_RAT_ROUTE_TABLE_INDEX 0x584 - -#define S_ROUTE_TABLE_INDEX 0 -#define M_ROUTE_TABLE_INDEX 0xf -#define V_ROUTE_TABLE_INDEX(x) ((x) << S_ROUTE_TABLE_INDEX) -#define G_ROUTE_TABLE_INDEX(x) (((x) >> S_ROUTE_TABLE_INDEX) & M_ROUTE_TABLE_INDEX) - -#define A_RAT_ROUTE_TABLE_DATA 0x588 -#define A_RAT_NO_ROUTE 0x58c - -#define S_CPL_OPCODE 0 -#define M_CPL_OPCODE 0xff -#define V_CPL_OPCODE(x) ((x) << S_CPL_OPCODE) -#define G_CPL_OPCODE(x) (((x) >> S_CPL_OPCODE) & M_CPL_OPCODE) - -#define A_RAT_INTR_ENABLE 0x590 - -#define S_ZEROROUTEERROR 0 -#define V_ZEROROUTEERROR(x) ((x) << S_ZEROROUTEERROR) -#define F_ZEROROUTEERROR V_ZEROROUTEERROR(1U) - -#define S_CSPIFRAMINGERROR 1 -#define V_CSPIFRAMINGERROR(x) ((x) << S_CSPIFRAMINGERROR) -#define F_CSPIFRAMINGERROR V_CSPIFRAMINGERROR(1U) - -#define S_SGEFRAMINGERROR 2 -#define V_SGEFRAMINGERROR(x) ((x) << S_SGEFRAMINGERROR) -#define F_SGEFRAMINGERROR V_SGEFRAMINGERROR(1U) - -#define S_TPFRAMINGERROR 3 -#define V_TPFRAMINGERROR(x) ((x) << S_TPFRAMINGERROR) -#define F_TPFRAMINGERROR V_TPFRAMINGERROR(1U) - -#define A_RAT_INTR_CAUSE 0x594 - -/* CSPI registers */ -#define A_CSPI_RX_AE_WM 0x810 -#define A_CSPI_RX_AF_WM 0x814 -#define A_CSPI_CALENDAR_LEN 0x818 - -#define S_CALENDARLENGTH 0 -#define M_CALENDARLENGTH 0xffff -#define V_CALENDARLENGTH(x) ((x) << S_CALENDARLENGTH) -#define G_CALENDARLENGTH(x) (((x) >> S_CALENDARLENGTH) & M_CALENDARLENGTH) - -#define A_CSPI_FIFO_STATUS_ENABLE 0x820 - -#define S_FIFOSTATUSENABLE 0 -#define V_FIFOSTATUSENABLE(x) ((x) << S_FIFOSTATUSENABLE) -#define F_FIFOSTATUSENABLE V_FIFOSTATUSENABLE(1U) - -#define A_CSPI_MAXBURST1_MAXBURST2 0x828 - -#define S_MAXBURST1 0 -#define M_MAXBURST1 0xffff -#define V_MAXBURST1(x) ((x) << S_MAXBURST1) -#define G_MAXBURST1(x) (((x) >> S_MAXBURST1) & M_MAXBURST1) - -#define S_MAXBURST2 16 -#define M_MAXBURST2 0xffff -#define V_MAXBURST2(x) ((x) << S_MAXBURST2) -#define G_MAXBURST2(x) (((x) >> S_MAXBURST2) & M_MAXBURST2) - -#define A_CSPI_TRAIN 0x82c - -#define S_CSPI_TRAIN_ALPHA 0 -#define M_CSPI_TRAIN_ALPHA 0xffff -#define V_CSPI_TRAIN_ALPHA(x) ((x) << S_CSPI_TRAIN_ALPHA) -#define G_CSPI_TRAIN_ALPHA(x) (((x) >> S_CSPI_TRAIN_ALPHA) & M_CSPI_TRAIN_ALPHA) - -#define S_CSPI_TRAIN_DATA_MAXT 16 -#define M_CSPI_TRAIN_DATA_MAXT 0xffff -#define V_CSPI_TRAIN_DATA_MAXT(x) ((x) << S_CSPI_TRAIN_DATA_MAXT) -#define G_CSPI_TRAIN_DATA_MAXT(x) (((x) >> S_CSPI_TRAIN_DATA_MAXT) & M_CSPI_TRAIN_DATA_MAXT) - -#define A_CSPI_INTR_STATUS 0x848 - -#define S_DIP4ERR 0 -#define V_DIP4ERR(x) ((x) << S_DIP4ERR) -#define F_DIP4ERR V_DIP4ERR(1U) - -#define S_RXDROP 1 -#define V_RXDROP(x) ((x) << S_RXDROP) -#define F_RXDROP V_RXDROP(1U) - -#define S_TXDROP 2 -#define V_TXDROP(x) ((x) << S_TXDROP) -#define F_TXDROP V_TXDROP(1U) - -#define S_RXOVERFLOW 3 -#define V_RXOVERFLOW(x) ((x) << S_RXOVERFLOW) -#define F_RXOVERFLOW V_RXOVERFLOW(1U) - -#define S_RAMPARITYERR 4 -#define V_RAMPARITYERR(x) ((x) << S_RAMPARITYERR) -#define F_RAMPARITYERR V_RAMPARITYERR(1U) - -#define A_CSPI_INTR_ENABLE 0x84c - -/* ESPI registers */ -#define A_ESPI_SCH_TOKEN0 0x880 - -#define S_SCHTOKEN0 0 -#define M_SCHTOKEN0 0xffff -#define V_SCHTOKEN0(x) ((x) << S_SCHTOKEN0) -#define G_SCHTOKEN0(x) (((x) >> S_SCHTOKEN0) & M_SCHTOKEN0) - -#define A_ESPI_SCH_TOKEN1 0x884 - -#define S_SCHTOKEN1 0 -#define M_SCHTOKEN1 0xffff -#define V_SCHTOKEN1(x) ((x) << S_SCHTOKEN1) -#define G_SCHTOKEN1(x) (((x) >> S_SCHTOKEN1) & M_SCHTOKEN1) - -#define A_ESPI_SCH_TOKEN2 0x888 - -#define S_SCHTOKEN2 0 -#define M_SCHTOKEN2 0xffff -#define V_SCHTOKEN2(x) ((x) << S_SCHTOKEN2) -#define G_SCHTOKEN2(x) (((x) >> S_SCHTOKEN2) & M_SCHTOKEN2) - -#define A_ESPI_SCH_TOKEN3 0x88c - -#define S_SCHTOKEN3 0 -#define M_SCHTOKEN3 0xffff -#define V_SCHTOKEN3(x) ((x) << S_SCHTOKEN3) -#define G_SCHTOKEN3(x) (((x) >> S_SCHTOKEN3) & M_SCHTOKEN3) - -#define A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK 0x890 - -#define S_ALMOSTEMPTY 0 -#define M_ALMOSTEMPTY 0xffff -#define V_ALMOSTEMPTY(x) ((x) << S_ALMOSTEMPTY) -#define G_ALMOSTEMPTY(x) (((x) >> S_ALMOSTEMPTY) & M_ALMOSTEMPTY) - -#define A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK 0x894 - -#define S_ALMOSTFULL 0 -#define M_ALMOSTFULL 0xffff -#define V_ALMOSTFULL(x) ((x) << S_ALMOSTFULL) -#define G_ALMOSTFULL(x) (((x) >> S_ALMOSTFULL) & M_ALMOSTFULL) - -#define A_ESPI_CALENDAR_LENGTH 0x898 -#define A_PORT_CONFIG 0x89c - -#define S_RX_NPORTS 0 -#define M_RX_NPORTS 0xff -#define V_RX_NPORTS(x) ((x) << S_RX_NPORTS) -#define G_RX_NPORTS(x) (((x) >> S_RX_NPORTS) & M_RX_NPORTS) - -#define S_TX_NPORTS 8 -#define M_TX_NPORTS 0xff -#define V_TX_NPORTS(x) ((x) << S_TX_NPORTS) -#define G_TX_NPORTS(x) (((x) >> S_TX_NPORTS) & M_TX_NPORTS) - -#define A_ESPI_FIFO_STATUS_ENABLE 0x8a0 - -#define S_RXSTATUSENABLE 0 -#define V_RXSTATUSENABLE(x) ((x) << S_RXSTATUSENABLE) -#define F_RXSTATUSENABLE V_RXSTATUSENABLE(1U) - -#define S_TXDROPENABLE 1 -#define V_TXDROPENABLE(x) ((x) << S_TXDROPENABLE) -#define F_TXDROPENABLE V_TXDROPENABLE(1U) - -#define S_RXENDIANMODE 2 -#define V_RXENDIANMODE(x) ((x) << S_RXENDIANMODE) -#define F_RXENDIANMODE V_RXENDIANMODE(1U) - -#define S_TXENDIANMODE 3 -#define V_TXENDIANMODE(x) ((x) << S_TXENDIANMODE) -#define F_TXENDIANMODE V_TXENDIANMODE(1U) - -#define S_INTEL1010MODE 4 -#define V_INTEL1010MODE(x) ((x) << S_INTEL1010MODE) -#define F_INTEL1010MODE V_INTEL1010MODE(1U) - -#define A_ESPI_MAXBURST1_MAXBURST2 0x8a8 -#define A_ESPI_TRAIN 0x8ac - -#define S_MAXTRAINALPHA 0 -#define M_MAXTRAINALPHA 0xffff -#define V_MAXTRAINALPHA(x) ((x) << S_MAXTRAINALPHA) -#define G_MAXTRAINALPHA(x) (((x) >> S_MAXTRAINALPHA) & M_MAXTRAINALPHA) - -#define S_MAXTRAINDATA 16 -#define M_MAXTRAINDATA 0xffff -#define V_MAXTRAINDATA(x) ((x) << S_MAXTRAINDATA) -#define G_MAXTRAINDATA(x) (((x) >> S_MAXTRAINDATA) & M_MAXTRAINDATA) - -#define A_RAM_STATUS 0x8b0 - -#define S_RXFIFOPARITYERROR 0 -#define M_RXFIFOPARITYERROR 0x3ff -#define V_RXFIFOPARITYERROR(x) ((x) << S_RXFIFOPARITYERROR) -#define G_RXFIFOPARITYERROR(x) (((x) >> S_RXFIFOPARITYERROR) & M_RXFIFOPARITYERROR) - -#define S_TXFIFOPARITYERROR 10 -#define M_TXFIFOPARITYERROR 0x3ff -#define V_TXFIFOPARITYERROR(x) ((x) << S_TXFIFOPARITYERROR) -#define G_TXFIFOPARITYERROR(x) (((x) >> S_TXFIFOPARITYERROR) & M_TXFIFOPARITYERROR) - -#define S_RXFIFOOVERFLOW 20 -#define M_RXFIFOOVERFLOW 0x3ff -#define V_RXFIFOOVERFLOW(x) ((x) << S_RXFIFOOVERFLOW) -#define G_RXFIFOOVERFLOW(x) (((x) >> S_RXFIFOOVERFLOW) & M_RXFIFOOVERFLOW) - -#define A_TX_DROP_COUNT0 0x8b4 - -#define S_TXPORT0DROPCNT 0 -#define M_TXPORT0DROPCNT 0xffff -#define V_TXPORT0DROPCNT(x) ((x) << S_TXPORT0DROPCNT) -#define G_TXPORT0DROPCNT(x) (((x) >> S_TXPORT0DROPCNT) & M_TXPORT0DROPCNT) - -#define S_TXPORT1DROPCNT 16 -#define M_TXPORT1DROPCNT 0xffff -#define V_TXPORT1DROPCNT(x) ((x) << S_TXPORT1DROPCNT) -#define G_TXPORT1DROPCNT(x) (((x) >> S_TXPORT1DROPCNT) & M_TXPORT1DROPCNT) - -#define A_TX_DROP_COUNT1 0x8b8 - -#define S_TXPORT2DROPCNT 0 -#define M_TXPORT2DROPCNT 0xffff -#define V_TXPORT2DROPCNT(x) ((x) << S_TXPORT2DROPCNT) -#define G_TXPORT2DROPCNT(x) (((x) >> S_TXPORT2DROPCNT) & M_TXPORT2DROPCNT) - -#define S_TXPORT3DROPCNT 16 -#define M_TXPORT3DROPCNT 0xffff -#define V_TXPORT3DROPCNT(x) ((x) << S_TXPORT3DROPCNT) -#define G_TXPORT3DROPCNT(x) (((x) >> S_TXPORT3DROPCNT) & M_TXPORT3DROPCNT) - -#define A_RX_DROP_COUNT0 0x8bc - -#define S_RXPORT0DROPCNT 0 -#define M_RXPORT0DROPCNT 0xffff -#define V_RXPORT0DROPCNT(x) ((x) << S_RXPORT0DROPCNT) -#define G_RXPORT0DROPCNT(x) (((x) >> S_RXPORT0DROPCNT) & M_RXPORT0DROPCNT) - -#define S_RXPORT1DROPCNT 16 -#define M_RXPORT1DROPCNT 0xffff -#define V_RXPORT1DROPCNT(x) ((x) << S_RXPORT1DROPCNT) -#define G_RXPORT1DROPCNT(x) (((x) >> S_RXPORT1DROPCNT) & M_RXPORT1DROPCNT) - -#define A_RX_DROP_COUNT1 0x8c0 - -#define S_RXPORT2DROPCNT 0 -#define M_RXPORT2DROPCNT 0xffff -#define V_RXPORT2DROPCNT(x) ((x) << S_RXPORT2DROPCNT) -#define G_RXPORT2DROPCNT(x) (((x) >> S_RXPORT2DROPCNT) & M_RXPORT2DROPCNT) - -#define S_RXPORT3DROPCNT 16 -#define M_RXPORT3DROPCNT 0xffff -#define V_RXPORT3DROPCNT(x) ((x) << S_RXPORT3DROPCNT) -#define G_RXPORT3DROPCNT(x) (((x) >> S_RXPORT3DROPCNT) & M_RXPORT3DROPCNT) - -#define A_DIP4_ERROR_COUNT 0x8c4 - -#define S_DIP4ERRORCNT 0 -#define M_DIP4ERRORCNT 0xfff -#define V_DIP4ERRORCNT(x) ((x) << S_DIP4ERRORCNT) -#define G_DIP4ERRORCNT(x) (((x) >> S_DIP4ERRORCNT) & M_DIP4ERRORCNT) - -#define S_DIP4ERRORCNTSHADOW 12 -#define M_DIP4ERRORCNTSHADOW 0xfff -#define V_DIP4ERRORCNTSHADOW(x) ((x) << S_DIP4ERRORCNTSHADOW) -#define G_DIP4ERRORCNTSHADOW(x) (((x) >> S_DIP4ERRORCNTSHADOW) & M_DIP4ERRORCNTSHADOW) - -#define S_TRICN_RX_TRAIN_ERR 24 -#define V_TRICN_RX_TRAIN_ERR(x) ((x) << S_TRICN_RX_TRAIN_ERR) -#define F_TRICN_RX_TRAIN_ERR V_TRICN_RX_TRAIN_ERR(1U) - -#define S_TRICN_RX_TRAINING 25 -#define V_TRICN_RX_TRAINING(x) ((x) << S_TRICN_RX_TRAINING) -#define F_TRICN_RX_TRAINING V_TRICN_RX_TRAINING(1U) - -#define S_TRICN_RX_TRAIN_OK 26 -#define V_TRICN_RX_TRAIN_OK(x) ((x) << S_TRICN_RX_TRAIN_OK) -#define F_TRICN_RX_TRAIN_OK V_TRICN_RX_TRAIN_OK(1U) - -#define A_ESPI_INTR_STATUS 0x8c8 - -#define S_DIP2PARITYERR 5 -#define V_DIP2PARITYERR(x) ((x) << S_DIP2PARITYERR) -#define F_DIP2PARITYERR V_DIP2PARITYERR(1U) - -#define A_ESPI_INTR_ENABLE 0x8cc -#define A_RX_DROP_THRESHOLD 0x8d0 -#define A_ESPI_RX_RESET 0x8ec - -#define S_ESPI_RX_LNK_RST 0 -#define V_ESPI_RX_LNK_RST(x) ((x) << S_ESPI_RX_LNK_RST) -#define F_ESPI_RX_LNK_RST V_ESPI_RX_LNK_RST(1U) - -#define S_ESPI_RX_CORE_RST 1 -#define V_ESPI_RX_CORE_RST(x) ((x) << S_ESPI_RX_CORE_RST) -#define F_ESPI_RX_CORE_RST V_ESPI_RX_CORE_RST(1U) - -#define S_RX_CLK_STATUS 2 -#define V_RX_CLK_STATUS(x) ((x) << S_RX_CLK_STATUS) -#define F_RX_CLK_STATUS V_RX_CLK_STATUS(1U) - -#define A_ESPI_MISC_CONTROL 0x8f0 - -#define S_OUT_OF_SYNC_COUNT 0 -#define M_OUT_OF_SYNC_COUNT 0xf -#define V_OUT_OF_SYNC_COUNT(x) ((x) << S_OUT_OF_SYNC_COUNT) -#define G_OUT_OF_SYNC_COUNT(x) (((x) >> S_OUT_OF_SYNC_COUNT) & M_OUT_OF_SYNC_COUNT) - -#define S_DIP2_COUNT_MODE_ENABLE 4 -#define V_DIP2_COUNT_MODE_ENABLE(x) ((x) << S_DIP2_COUNT_MODE_ENABLE) -#define F_DIP2_COUNT_MODE_ENABLE V_DIP2_COUNT_MODE_ENABLE(1U) - -#define S_DIP2_PARITY_ERR_THRES 5 -#define M_DIP2_PARITY_ERR_THRES 0xf -#define V_DIP2_PARITY_ERR_THRES(x) ((x) << S_DIP2_PARITY_ERR_THRES) -#define G_DIP2_PARITY_ERR_THRES(x) (((x) >> S_DIP2_PARITY_ERR_THRES) & M_DIP2_PARITY_ERR_THRES) - -#define S_DIP4_THRES 9 -#define M_DIP4_THRES 0xfff -#define V_DIP4_THRES(x) ((x) << S_DIP4_THRES) -#define G_DIP4_THRES(x) (((x) >> S_DIP4_THRES) & M_DIP4_THRES) - -#define S_DIP4_THRES_ENABLE 21 -#define V_DIP4_THRES_ENABLE(x) ((x) << S_DIP4_THRES_ENABLE) -#define F_DIP4_THRES_ENABLE V_DIP4_THRES_ENABLE(1U) - -#define S_FORCE_DISABLE_STATUS 22 -#define V_FORCE_DISABLE_STATUS(x) ((x) << S_FORCE_DISABLE_STATUS) -#define F_FORCE_DISABLE_STATUS V_FORCE_DISABLE_STATUS(1U) - -#define S_DYNAMIC_DESKEW 23 -#define V_DYNAMIC_DESKEW(x) ((x) << S_DYNAMIC_DESKEW) -#define F_DYNAMIC_DESKEW V_DYNAMIC_DESKEW(1U) - -#define S_MONITORED_PORT_NUM 25 -#define M_MONITORED_PORT_NUM 0x3 -#define V_MONITORED_PORT_NUM(x) ((x) << S_MONITORED_PORT_NUM) -#define G_MONITORED_PORT_NUM(x) (((x) >> S_MONITORED_PORT_NUM) & M_MONITORED_PORT_NUM) - -#define S_MONITORED_DIRECTION 27 -#define V_MONITORED_DIRECTION(x) ((x) << S_MONITORED_DIRECTION) -#define F_MONITORED_DIRECTION V_MONITORED_DIRECTION(1U) - -#define S_MONITORED_INTERFACE 28 -#define V_MONITORED_INTERFACE(x) ((x) << S_MONITORED_INTERFACE) -#define F_MONITORED_INTERFACE V_MONITORED_INTERFACE(1U) - -#define A_ESPI_DIP2_ERR_COUNT 0x8f4 - -#define S_DIP2_ERR_CNT 0 -#define M_DIP2_ERR_CNT 0xf -#define V_DIP2_ERR_CNT(x) ((x) << S_DIP2_ERR_CNT) -#define G_DIP2_ERR_CNT(x) (((x) >> S_DIP2_ERR_CNT) & M_DIP2_ERR_CNT) - -#define A_ESPI_CMD_ADDR 0x8f8 - -#define S_WRITE_DATA 0 -#define M_WRITE_DATA 0xff -#define V_WRITE_DATA(x) ((x) << S_WRITE_DATA) -#define G_WRITE_DATA(x) (((x) >> S_WRITE_DATA) & M_WRITE_DATA) - -#define S_REGISTER_OFFSET 8 -#define M_REGISTER_OFFSET 0xf -#define V_REGISTER_OFFSET(x) ((x) << S_REGISTER_OFFSET) -#define G_REGISTER_OFFSET(x) (((x) >> S_REGISTER_OFFSET) & M_REGISTER_OFFSET) - -#define S_CHANNEL_ADDR 12 -#define M_CHANNEL_ADDR 0xf -#define V_CHANNEL_ADDR(x) ((x) << S_CHANNEL_ADDR) -#define G_CHANNEL_ADDR(x) (((x) >> S_CHANNEL_ADDR) & M_CHANNEL_ADDR) - -#define S_MODULE_ADDR 16 -#define M_MODULE_ADDR 0x3 -#define V_MODULE_ADDR(x) ((x) << S_MODULE_ADDR) -#define G_MODULE_ADDR(x) (((x) >> S_MODULE_ADDR) & M_MODULE_ADDR) - -#define S_BUNDLE_ADDR 20 -#define M_BUNDLE_ADDR 0x3 -#define V_BUNDLE_ADDR(x) ((x) << S_BUNDLE_ADDR) -#define G_BUNDLE_ADDR(x) (((x) >> S_BUNDLE_ADDR) & M_BUNDLE_ADDR) - -#define S_SPI4_COMMAND 24 -#define M_SPI4_COMMAND 0xff -#define V_SPI4_COMMAND(x) ((x) << S_SPI4_COMMAND) -#define G_SPI4_COMMAND(x) (((x) >> S_SPI4_COMMAND) & M_SPI4_COMMAND) - -#define A_ESPI_GOSTAT 0x8fc - -#define S_READ_DATA 0 -#define M_READ_DATA 0xff -#define V_READ_DATA(x) ((x) << S_READ_DATA) -#define G_READ_DATA(x) (((x) >> S_READ_DATA) & M_READ_DATA) - -#define S_ESPI_CMD_BUSY 8 -#define V_ESPI_CMD_BUSY(x) ((x) << S_ESPI_CMD_BUSY) -#define F_ESPI_CMD_BUSY V_ESPI_CMD_BUSY(1U) - -#define S_ERROR_ACK 9 -#define V_ERROR_ACK(x) ((x) << S_ERROR_ACK) -#define F_ERROR_ACK V_ERROR_ACK(1U) - -#define S_UNMAPPED_ERR 10 -#define V_UNMAPPED_ERR(x) ((x) << S_UNMAPPED_ERR) -#define F_UNMAPPED_ERR V_UNMAPPED_ERR(1U) - -#define S_TRANSACTION_TIMER 16 -#define M_TRANSACTION_TIMER 0xff -#define V_TRANSACTION_TIMER(x) ((x) << S_TRANSACTION_TIMER) -#define G_TRANSACTION_TIMER(x) (((x) >> S_TRANSACTION_TIMER) & M_TRANSACTION_TIMER) - - -/* ULP registers */ -#define A_ULP_ULIMIT 0x980 -#define A_ULP_TAGMASK 0x984 -#define A_ULP_HREG_INDEX 0x988 -#define A_ULP_HREG_DATA 0x98c -#define A_ULP_INT_ENABLE 0x990 -#define A_ULP_INT_CAUSE 0x994 - -#define S_HREG_PAR_ERR 0 -#define V_HREG_PAR_ERR(x) ((x) << S_HREG_PAR_ERR) -#define F_HREG_PAR_ERR V_HREG_PAR_ERR(1U) - -#define S_EGRS_DATA_PAR_ERR 1 -#define V_EGRS_DATA_PAR_ERR(x) ((x) << S_EGRS_DATA_PAR_ERR) -#define F_EGRS_DATA_PAR_ERR V_EGRS_DATA_PAR_ERR(1U) - -#define S_INGRS_DATA_PAR_ERR 2 -#define V_INGRS_DATA_PAR_ERR(x) ((x) << S_INGRS_DATA_PAR_ERR) -#define F_INGRS_DATA_PAR_ERR V_INGRS_DATA_PAR_ERR(1U) - -#define S_PM_INTR 3 -#define V_PM_INTR(x) ((x) << S_PM_INTR) -#define F_PM_INTR V_PM_INTR(1U) - -#define S_PM_E2C_SYNC_ERR 4 -#define V_PM_E2C_SYNC_ERR(x) ((x) << S_PM_E2C_SYNC_ERR) -#define F_PM_E2C_SYNC_ERR V_PM_E2C_SYNC_ERR(1U) - -#define S_PM_C2E_SYNC_ERR 5 -#define V_PM_C2E_SYNC_ERR(x) ((x) << S_PM_C2E_SYNC_ERR) -#define F_PM_C2E_SYNC_ERR V_PM_C2E_SYNC_ERR(1U) - -#define S_PM_E2C_EMPTY_ERR 6 -#define V_PM_E2C_EMPTY_ERR(x) ((x) << S_PM_E2C_EMPTY_ERR) -#define F_PM_E2C_EMPTY_ERR V_PM_E2C_EMPTY_ERR(1U) - -#define S_PM_C2E_EMPTY_ERR 7 -#define V_PM_C2E_EMPTY_ERR(x) ((x) << S_PM_C2E_EMPTY_ERR) -#define F_PM_C2E_EMPTY_ERR V_PM_C2E_EMPTY_ERR(1U) - -#define S_PM_PAR_ERR 8 -#define M_PM_PAR_ERR 0xffff -#define V_PM_PAR_ERR(x) ((x) << S_PM_PAR_ERR) -#define G_PM_PAR_ERR(x) (((x) >> S_PM_PAR_ERR) & M_PM_PAR_ERR) - -#define S_PM_E2C_WRT_FULL 24 -#define V_PM_E2C_WRT_FULL(x) ((x) << S_PM_E2C_WRT_FULL) -#define F_PM_E2C_WRT_FULL V_PM_E2C_WRT_FULL(1U) - -#define S_PM_C2E_WRT_FULL 25 -#define V_PM_C2E_WRT_FULL(x) ((x) << S_PM_C2E_WRT_FULL) -#define F_PM_C2E_WRT_FULL V_PM_C2E_WRT_FULL(1U) - -#define A_ULP_PIO_CTRL 0x998 - -/* PL registers */ -#define A_PL_ENABLE 0xa00 - -#define S_PL_INTR_SGE_ERR 0 -#define V_PL_INTR_SGE_ERR(x) ((x) << S_PL_INTR_SGE_ERR) -#define F_PL_INTR_SGE_ERR V_PL_INTR_SGE_ERR(1U) - -#define S_PL_INTR_SGE_DATA 1 -#define V_PL_INTR_SGE_DATA(x) ((x) << S_PL_INTR_SGE_DATA) -#define F_PL_INTR_SGE_DATA V_PL_INTR_SGE_DATA(1U) - -#define S_PL_INTR_MC3 2 -#define V_PL_INTR_MC3(x) ((x) << S_PL_INTR_MC3) -#define F_PL_INTR_MC3 V_PL_INTR_MC3(1U) - -#define S_PL_INTR_MC4 3 -#define V_PL_INTR_MC4(x) ((x) << S_PL_INTR_MC4) -#define F_PL_INTR_MC4 V_PL_INTR_MC4(1U) - -#define S_PL_INTR_MC5 4 -#define V_PL_INTR_MC5(x) ((x) << S_PL_INTR_MC5) -#define F_PL_INTR_MC5 V_PL_INTR_MC5(1U) - -#define S_PL_INTR_RAT 5 -#define V_PL_INTR_RAT(x) ((x) << S_PL_INTR_RAT) -#define F_PL_INTR_RAT V_PL_INTR_RAT(1U) - -#define S_PL_INTR_TP 6 -#define V_PL_INTR_TP(x) ((x) << S_PL_INTR_TP) -#define F_PL_INTR_TP V_PL_INTR_TP(1U) - -#define S_PL_INTR_ULP 7 -#define V_PL_INTR_ULP(x) ((x) << S_PL_INTR_ULP) -#define F_PL_INTR_ULP V_PL_INTR_ULP(1U) - -#define S_PL_INTR_ESPI 8 -#define V_PL_INTR_ESPI(x) ((x) << S_PL_INTR_ESPI) -#define F_PL_INTR_ESPI V_PL_INTR_ESPI(1U) - -#define S_PL_INTR_CSPI 9 -#define V_PL_INTR_CSPI(x) ((x) << S_PL_INTR_CSPI) -#define F_PL_INTR_CSPI V_PL_INTR_CSPI(1U) - -#define S_PL_INTR_PCIX 10 -#define V_PL_INTR_PCIX(x) ((x) << S_PL_INTR_PCIX) -#define F_PL_INTR_PCIX V_PL_INTR_PCIX(1U) - -#define S_PL_INTR_EXT 11 -#define V_PL_INTR_EXT(x) ((x) << S_PL_INTR_EXT) -#define F_PL_INTR_EXT V_PL_INTR_EXT(1U) - -#define A_PL_CAUSE 0xa04 - -/* MC5 registers */ -#define A_MC5_CONFIG 0xc04 - -#define S_MODE 0 -#define V_MODE(x) ((x) << S_MODE) -#define F_MODE V_MODE(1U) - -#define S_TCAM_RESET 1 -#define V_TCAM_RESET(x) ((x) << S_TCAM_RESET) -#define F_TCAM_RESET V_TCAM_RESET(1U) - -#define S_TCAM_READY 2 -#define V_TCAM_READY(x) ((x) << S_TCAM_READY) -#define F_TCAM_READY V_TCAM_READY(1U) - -#define S_DBGI_ENABLE 4 -#define V_DBGI_ENABLE(x) ((x) << S_DBGI_ENABLE) -#define F_DBGI_ENABLE V_DBGI_ENABLE(1U) - -#define S_M_BUS_ENABLE 5 -#define V_M_BUS_ENABLE(x) ((x) << S_M_BUS_ENABLE) -#define F_M_BUS_ENABLE V_M_BUS_ENABLE(1U) - -#define S_PARITY_ENABLE 6 -#define V_PARITY_ENABLE(x) ((x) << S_PARITY_ENABLE) -#define F_PARITY_ENABLE V_PARITY_ENABLE(1U) - -#define S_SYN_ISSUE_MODE 7 -#define M_SYN_ISSUE_MODE 0x3 -#define V_SYN_ISSUE_MODE(x) ((x) << S_SYN_ISSUE_MODE) -#define G_SYN_ISSUE_MODE(x) (((x) >> S_SYN_ISSUE_MODE) & M_SYN_ISSUE_MODE) - -#define S_BUILD 16 -#define V_BUILD(x) ((x) << S_BUILD) -#define F_BUILD V_BUILD(1U) - -#define S_COMPRESSION_ENABLE 17 -#define V_COMPRESSION_ENABLE(x) ((x) << S_COMPRESSION_ENABLE) -#define F_COMPRESSION_ENABLE V_COMPRESSION_ENABLE(1U) - -#define S_NUM_LIP 18 -#define M_NUM_LIP 0x3f -#define V_NUM_LIP(x) ((x) << S_NUM_LIP) -#define G_NUM_LIP(x) (((x) >> S_NUM_LIP) & M_NUM_LIP) - -#define S_TCAM_PART_CNT 24 -#define M_TCAM_PART_CNT 0x3 -#define V_TCAM_PART_CNT(x) ((x) << S_TCAM_PART_CNT) -#define G_TCAM_PART_CNT(x) (((x) >> S_TCAM_PART_CNT) & M_TCAM_PART_CNT) - -#define S_TCAM_PART_TYPE 26 -#define M_TCAM_PART_TYPE 0x3 -#define V_TCAM_PART_TYPE(x) ((x) << S_TCAM_PART_TYPE) -#define G_TCAM_PART_TYPE(x) (((x) >> S_TCAM_PART_TYPE) & M_TCAM_PART_TYPE) - -#define S_TCAM_PART_SIZE 28 -#define M_TCAM_PART_SIZE 0x3 -#define V_TCAM_PART_SIZE(x) ((x) << S_TCAM_PART_SIZE) -#define G_TCAM_PART_SIZE(x) (((x) >> S_TCAM_PART_SIZE) & M_TCAM_PART_SIZE) - -#define S_TCAM_PART_TYPE_HI 30 -#define V_TCAM_PART_TYPE_HI(x) ((x) << S_TCAM_PART_TYPE_HI) -#define F_TCAM_PART_TYPE_HI V_TCAM_PART_TYPE_HI(1U) - -#define A_MC5_SIZE 0xc08 - -#define S_SIZE 0 -#define M_SIZE 0x3fffff -#define V_SIZE(x) ((x) << S_SIZE) -#define G_SIZE(x) (((x) >> S_SIZE) & M_SIZE) - -#define A_MC5_ROUTING_TABLE_INDEX 0xc0c - -#define S_START_OF_ROUTING_TABLE 0 -#define M_START_OF_ROUTING_TABLE 0x3fffff -#define V_START_OF_ROUTING_TABLE(x) ((x) << S_START_OF_ROUTING_TABLE) -#define G_START_OF_ROUTING_TABLE(x) (((x) >> S_START_OF_ROUTING_TABLE) & M_START_OF_ROUTING_TABLE) - -#define A_MC5_SERVER_INDEX 0xc14 - -#define S_START_OF_SERVER_INDEX 0 -#define M_START_OF_SERVER_INDEX 0x3fffff -#define V_START_OF_SERVER_INDEX(x) ((x) << S_START_OF_SERVER_INDEX) -#define G_START_OF_SERVER_INDEX(x) (((x) >> S_START_OF_SERVER_INDEX) & M_START_OF_SERVER_INDEX) - -#define A_MC5_LIP_RAM_ADDR 0xc18 - -#define S_LOCAL_IP_RAM_ADDR 0 -#define M_LOCAL_IP_RAM_ADDR 0x3f -#define V_LOCAL_IP_RAM_ADDR(x) ((x) << S_LOCAL_IP_RAM_ADDR) -#define G_LOCAL_IP_RAM_ADDR(x) (((x) >> S_LOCAL_IP_RAM_ADDR) & M_LOCAL_IP_RAM_ADDR) - -#define S_RAM_WRITE_ENABLE 8 -#define V_RAM_WRITE_ENABLE(x) ((x) << S_RAM_WRITE_ENABLE) -#define F_RAM_WRITE_ENABLE V_RAM_WRITE_ENABLE(1U) - -#define A_MC5_LIP_RAM_DATA 0xc1c -#define A_MC5_RSP_LATENCY 0xc20 - -#define S_SEARCH_RESPONSE_LATENCY 0 -#define M_SEARCH_RESPONSE_LATENCY 0x1f -#define V_SEARCH_RESPONSE_LATENCY(x) ((x) << S_SEARCH_RESPONSE_LATENCY) -#define G_SEARCH_RESPONSE_LATENCY(x) (((x) >> S_SEARCH_RESPONSE_LATENCY) & M_SEARCH_RESPONSE_LATENCY) - -#define S_LEARN_RESPONSE_LATENCY 8 -#define M_LEARN_RESPONSE_LATENCY 0x1f -#define V_LEARN_RESPONSE_LATENCY(x) ((x) << S_LEARN_RESPONSE_LATENCY) -#define G_LEARN_RESPONSE_LATENCY(x) (((x) >> S_LEARN_RESPONSE_LATENCY) & M_LEARN_RESPONSE_LATENCY) - -#define A_MC5_PARITY_LATENCY 0xc24 - -#define S_SRCHLAT 0 -#define M_SRCHLAT 0x1f -#define V_SRCHLAT(x) ((x) << S_SRCHLAT) -#define G_SRCHLAT(x) (((x) >> S_SRCHLAT) & M_SRCHLAT) - -#define S_PARLAT 8 -#define M_PARLAT 0x1f -#define V_PARLAT(x) ((x) << S_PARLAT) -#define G_PARLAT(x) (((x) >> S_PARLAT) & M_PARLAT) - -#define A_MC5_WR_LRN_VERIFY 0xc28 - -#define S_POVEREN 0 -#define V_POVEREN(x) ((x) << S_POVEREN) -#define F_POVEREN V_POVEREN(1U) - -#define S_LRNVEREN 1 -#define V_LRNVEREN(x) ((x) << S_LRNVEREN) -#define F_LRNVEREN V_LRNVEREN(1U) - -#define S_VWVEREN 2 -#define V_VWVEREN(x) ((x) << S_VWVEREN) -#define F_VWVEREN V_VWVEREN(1U) - -#define A_MC5_PART_ID_INDEX 0xc2c - -#define S_IDINDEX 0 -#define M_IDINDEX 0xf -#define V_IDINDEX(x) ((x) << S_IDINDEX) -#define G_IDINDEX(x) (((x) >> S_IDINDEX) & M_IDINDEX) - -#define A_MC5_RESET_MAX 0xc30 - -#define S_RSTMAX 0 -#define M_RSTMAX 0x1ff -#define V_RSTMAX(x) ((x) << S_RSTMAX) -#define G_RSTMAX(x) (((x) >> S_RSTMAX) & M_RSTMAX) - -#define A_MC5_INT_ENABLE 0xc40 - -#define S_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR 0 -#define V_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR(x) ((x) << S_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR) -#define F_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR V_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR(1U) - -#define S_MC5_INT_HIT_IN_ACTIVE_REGION_ERR 1 -#define V_MC5_INT_HIT_IN_ACTIVE_REGION_ERR(x) ((x) << S_MC5_INT_HIT_IN_ACTIVE_REGION_ERR) -#define F_MC5_INT_HIT_IN_ACTIVE_REGION_ERR V_MC5_INT_HIT_IN_ACTIVE_REGION_ERR(1U) - -#define S_MC5_INT_HIT_IN_RT_REGION_ERR 2 -#define V_MC5_INT_HIT_IN_RT_REGION_ERR(x) ((x) << S_MC5_INT_HIT_IN_RT_REGION_ERR) -#define F_MC5_INT_HIT_IN_RT_REGION_ERR V_MC5_INT_HIT_IN_RT_REGION_ERR(1U) - -#define S_MC5_INT_MISS_ERR 3 -#define V_MC5_INT_MISS_ERR(x) ((x) << S_MC5_INT_MISS_ERR) -#define F_MC5_INT_MISS_ERR V_MC5_INT_MISS_ERR(1U) - -#define S_MC5_INT_LIP0_ERR 4 -#define V_MC5_INT_LIP0_ERR(x) ((x) << S_MC5_INT_LIP0_ERR) -#define F_MC5_INT_LIP0_ERR V_MC5_INT_LIP0_ERR(1U) - -#define S_MC5_INT_LIP_MISS_ERR 5 -#define V_MC5_INT_LIP_MISS_ERR(x) ((x) << S_MC5_INT_LIP_MISS_ERR) -#define F_MC5_INT_LIP_MISS_ERR V_MC5_INT_LIP_MISS_ERR(1U) - -#define S_MC5_INT_PARITY_ERR 6 -#define V_MC5_INT_PARITY_ERR(x) ((x) << S_MC5_INT_PARITY_ERR) -#define F_MC5_INT_PARITY_ERR V_MC5_INT_PARITY_ERR(1U) - -#define S_MC5_INT_ACTIVE_REGION_FULL 7 -#define V_MC5_INT_ACTIVE_REGION_FULL(x) ((x) << S_MC5_INT_ACTIVE_REGION_FULL) -#define F_MC5_INT_ACTIVE_REGION_FULL V_MC5_INT_ACTIVE_REGION_FULL(1U) - -#define S_MC5_INT_NFA_SRCH_ERR 8 -#define V_MC5_INT_NFA_SRCH_ERR(x) ((x) << S_MC5_INT_NFA_SRCH_ERR) -#define F_MC5_INT_NFA_SRCH_ERR V_MC5_INT_NFA_SRCH_ERR(1U) - -#define S_MC5_INT_SYN_COOKIE 9 -#define V_MC5_INT_SYN_COOKIE(x) ((x) << S_MC5_INT_SYN_COOKIE) -#define F_MC5_INT_SYN_COOKIE V_MC5_INT_SYN_COOKIE(1U) - -#define S_MC5_INT_SYN_COOKIE_BAD 10 -#define V_MC5_INT_SYN_COOKIE_BAD(x) ((x) << S_MC5_INT_SYN_COOKIE_BAD) -#define F_MC5_INT_SYN_COOKIE_BAD V_MC5_INT_SYN_COOKIE_BAD(1U) - -#define S_MC5_INT_SYN_COOKIE_OFF 11 -#define V_MC5_INT_SYN_COOKIE_OFF(x) ((x) << S_MC5_INT_SYN_COOKIE_OFF) -#define F_MC5_INT_SYN_COOKIE_OFF V_MC5_INT_SYN_COOKIE_OFF(1U) - -#define S_MC5_INT_UNKNOWN_CMD 15 -#define V_MC5_INT_UNKNOWN_CMD(x) ((x) << S_MC5_INT_UNKNOWN_CMD) -#define F_MC5_INT_UNKNOWN_CMD V_MC5_INT_UNKNOWN_CMD(1U) - -#define S_MC5_INT_REQUESTQ_PARITY_ERR 16 -#define V_MC5_INT_REQUESTQ_PARITY_ERR(x) ((x) << S_MC5_INT_REQUESTQ_PARITY_ERR) -#define F_MC5_INT_REQUESTQ_PARITY_ERR V_MC5_INT_REQUESTQ_PARITY_ERR(1U) - -#define S_MC5_INT_DISPATCHQ_PARITY_ERR 17 -#define V_MC5_INT_DISPATCHQ_PARITY_ERR(x) ((x) << S_MC5_INT_DISPATCHQ_PARITY_ERR) -#define F_MC5_INT_DISPATCHQ_PARITY_ERR V_MC5_INT_DISPATCHQ_PARITY_ERR(1U) - -#define S_MC5_INT_DEL_ACT_EMPTY 18 -#define V_MC5_INT_DEL_ACT_EMPTY(x) ((x) << S_MC5_INT_DEL_ACT_EMPTY) -#define F_MC5_INT_DEL_ACT_EMPTY V_MC5_INT_DEL_ACT_EMPTY(1U) - -#define A_MC5_INT_CAUSE 0xc44 -#define A_MC5_INT_TID 0xc48 -#define A_MC5_INT_PTID 0xc4c -#define A_MC5_DBGI_CONFIG 0xc74 -#define A_MC5_DBGI_REQ_CMD 0xc78 - -#define S_CMDMODE 0 -#define M_CMDMODE 0x7 -#define V_CMDMODE(x) ((x) << S_CMDMODE) -#define G_CMDMODE(x) (((x) >> S_CMDMODE) & M_CMDMODE) - -#define S_SADRSEL 4 -#define V_SADRSEL(x) ((x) << S_SADRSEL) -#define F_SADRSEL V_SADRSEL(1U) - -#define S_WRITE_BURST_SIZE 22 -#define M_WRITE_BURST_SIZE 0x3ff -#define V_WRITE_BURST_SIZE(x) ((x) << S_WRITE_BURST_SIZE) -#define G_WRITE_BURST_SIZE(x) (((x) >> S_WRITE_BURST_SIZE) & M_WRITE_BURST_SIZE) - -#define A_MC5_DBGI_REQ_ADDR0 0xc7c -#define A_MC5_DBGI_REQ_ADDR1 0xc80 -#define A_MC5_DBGI_REQ_ADDR2 0xc84 -#define A_MC5_DBGI_REQ_DATA0 0xc88 -#define A_MC5_DBGI_REQ_DATA1 0xc8c -#define A_MC5_DBGI_REQ_DATA2 0xc90 -#define A_MC5_DBGI_REQ_DATA3 0xc94 -#define A_MC5_DBGI_REQ_DATA4 0xc98 -#define A_MC5_DBGI_REQ_MASK0 0xc9c -#define A_MC5_DBGI_REQ_MASK1 0xca0 -#define A_MC5_DBGI_REQ_MASK2 0xca4 -#define A_MC5_DBGI_REQ_MASK3 0xca8 -#define A_MC5_DBGI_REQ_MASK4 0xcac -#define A_MC5_DBGI_RSP_STATUS 0xcb0 - -#define S_DBGI_RSP_VALID 0 -#define V_DBGI_RSP_VALID(x) ((x) << S_DBGI_RSP_VALID) -#define F_DBGI_RSP_VALID V_DBGI_RSP_VALID(1U) - -#define S_DBGI_RSP_HIT 1 -#define V_DBGI_RSP_HIT(x) ((x) << S_DBGI_RSP_HIT) -#define F_DBGI_RSP_HIT V_DBGI_RSP_HIT(1U) - -#define S_DBGI_RSP_ERR 2 -#define V_DBGI_RSP_ERR(x) ((x) << S_DBGI_RSP_ERR) -#define F_DBGI_RSP_ERR V_DBGI_RSP_ERR(1U) - -#define S_DBGI_RSP_ERR_REASON 8 -#define M_DBGI_RSP_ERR_REASON 0x7 -#define V_DBGI_RSP_ERR_REASON(x) ((x) << S_DBGI_RSP_ERR_REASON) -#define G_DBGI_RSP_ERR_REASON(x) (((x) >> S_DBGI_RSP_ERR_REASON) & M_DBGI_RSP_ERR_REASON) - -#define A_MC5_DBGI_RSP_DATA0 0xcb4 -#define A_MC5_DBGI_RSP_DATA1 0xcb8 -#define A_MC5_DBGI_RSP_DATA2 0xcbc -#define A_MC5_DBGI_RSP_DATA3 0xcc0 -#define A_MC5_DBGI_RSP_DATA4 0xcc4 -#define A_MC5_DBGI_RSP_LAST_CMD 0xcc8 -#define A_MC5_POPEN_DATA_WR_CMD 0xccc -#define A_MC5_POPEN_MASK_WR_CMD 0xcd0 -#define A_MC5_AOPEN_SRCH_CMD 0xcd4 -#define A_MC5_AOPEN_LRN_CMD 0xcd8 -#define A_MC5_SYN_SRCH_CMD 0xcdc -#define A_MC5_SYN_LRN_CMD 0xce0 -#define A_MC5_ACK_SRCH_CMD 0xce4 -#define A_MC5_ACK_LRN_CMD 0xce8 -#define A_MC5_ILOOKUP_CMD 0xcec -#define A_MC5_ELOOKUP_CMD 0xcf0 -#define A_MC5_DATA_WRITE_CMD 0xcf4 -#define A_MC5_DATA_READ_CMD 0xcf8 -#define A_MC5_MASK_WRITE_CMD 0xcfc - -/* PCICFG registers */ -#define A_PCICFG_PM_CSR 0x44 -#define A_PCICFG_VPD_ADDR 0x4a - -#define S_VPD_ADDR 0 -#define M_VPD_ADDR 0x7fff -#define V_VPD_ADDR(x) ((x) << S_VPD_ADDR) -#define G_VPD_ADDR(x) (((x) >> S_VPD_ADDR) & M_VPD_ADDR) - -#define S_VPD_OP_FLAG 15 -#define V_VPD_OP_FLAG(x) ((x) << S_VPD_OP_FLAG) -#define F_VPD_OP_FLAG V_VPD_OP_FLAG(1U) - -#define A_PCICFG_VPD_DATA 0x4c -#define A_PCICFG_PCIX_CMD 0x60 -#define A_PCICFG_INTR_ENABLE 0xf4 - -#define S_MASTER_PARITY_ERR 0 -#define V_MASTER_PARITY_ERR(x) ((x) << S_MASTER_PARITY_ERR) -#define F_MASTER_PARITY_ERR V_MASTER_PARITY_ERR(1U) - -#define S_SIG_TARGET_ABORT 1 -#define V_SIG_TARGET_ABORT(x) ((x) << S_SIG_TARGET_ABORT) -#define F_SIG_TARGET_ABORT V_SIG_TARGET_ABORT(1U) - -#define S_RCV_TARGET_ABORT 2 -#define V_RCV_TARGET_ABORT(x) ((x) << S_RCV_TARGET_ABORT) -#define F_RCV_TARGET_ABORT V_RCV_TARGET_ABORT(1U) - -#define S_RCV_MASTER_ABORT 3 -#define V_RCV_MASTER_ABORT(x) ((x) << S_RCV_MASTER_ABORT) -#define F_RCV_MASTER_ABORT V_RCV_MASTER_ABORT(1U) - -#define S_SIG_SYS_ERR 4 -#define V_SIG_SYS_ERR(x) ((x) << S_SIG_SYS_ERR) -#define F_SIG_SYS_ERR V_SIG_SYS_ERR(1U) - -#define S_DET_PARITY_ERR 5 -#define V_DET_PARITY_ERR(x) ((x) << S_DET_PARITY_ERR) -#define F_DET_PARITY_ERR V_DET_PARITY_ERR(1U) - -#define S_PIO_PARITY_ERR 6 -#define V_PIO_PARITY_ERR(x) ((x) << S_PIO_PARITY_ERR) -#define F_PIO_PARITY_ERR V_PIO_PARITY_ERR(1U) - -#define S_WF_PARITY_ERR 7 -#define V_WF_PARITY_ERR(x) ((x) << S_WF_PARITY_ERR) -#define F_WF_PARITY_ERR V_WF_PARITY_ERR(1U) - -#define S_RF_PARITY_ERR 8 -#define M_RF_PARITY_ERR 0x3 -#define V_RF_PARITY_ERR(x) ((x) << S_RF_PARITY_ERR) -#define G_RF_PARITY_ERR(x) (((x) >> S_RF_PARITY_ERR) & M_RF_PARITY_ERR) - -#define S_CF_PARITY_ERR 10 -#define M_CF_PARITY_ERR 0x3 -#define V_CF_PARITY_ERR(x) ((x) << S_CF_PARITY_ERR) -#define G_CF_PARITY_ERR(x) (((x) >> S_CF_PARITY_ERR) & M_CF_PARITY_ERR) - -#define A_PCICFG_INTR_CAUSE 0xf8 -#define A_PCICFG_MODE 0xfc - -#define S_PCI_MODE_64BIT 0 -#define V_PCI_MODE_64BIT(x) ((x) << S_PCI_MODE_64BIT) -#define F_PCI_MODE_64BIT V_PCI_MODE_64BIT(1U) - -#define S_PCI_MODE_66MHZ 1 -#define V_PCI_MODE_66MHZ(x) ((x) << S_PCI_MODE_66MHZ) -#define F_PCI_MODE_66MHZ V_PCI_MODE_66MHZ(1U) - -#define S_PCI_MODE_PCIX_INITPAT 2 -#define M_PCI_MODE_PCIX_INITPAT 0x7 -#define V_PCI_MODE_PCIX_INITPAT(x) ((x) << S_PCI_MODE_PCIX_INITPAT) -#define G_PCI_MODE_PCIX_INITPAT(x) (((x) >> S_PCI_MODE_PCIX_INITPAT) & M_PCI_MODE_PCIX_INITPAT) - -#define S_PCI_MODE_PCIX 5 -#define V_PCI_MODE_PCIX(x) ((x) << S_PCI_MODE_PCIX) -#define F_PCI_MODE_PCIX V_PCI_MODE_PCIX(1U) - -#define S_PCI_MODE_CLK 6 -#define M_PCI_MODE_CLK 0x3 -#define V_PCI_MODE_CLK(x) ((x) << S_PCI_MODE_CLK) -#define G_PCI_MODE_CLK(x) (((x) >> S_PCI_MODE_CLK) & M_PCI_MODE_CLK) - -#endif /* _CXGB_REGS_H_ */ diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c deleted file mode 100644 index e9a03ff..0000000 --- a/drivers/net/chelsio/sge.c +++ /dev/null @@ -1,2140 +0,0 @@ -/***************************************************************************** - * * - * File: sge.c * - * $Revision: 1.26 $ * - * $Date: 2005/06/21 18:29:48 $ * - * Description: * - * DMA engine. * - * part of the Chelsio 10Gb Ethernet Driver. * - * * - * This program is free software; you can redistribute it and/or modify * - * it under the terms of the GNU General Public License, version 2, as * - * published by the Free Software Foundation. * - * * - * You should have received a copy of the GNU General Public License along * - * with this program; if not, write to the Free Software Foundation, Inc., * - * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * - * * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * - * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * - * * - * http://www.chelsio.com * - * * - * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * - * All rights reserved. * - * * - * Maintainers: maintainers@chelsio.com * - * * - * Authors: Dimitrios Michailidis * - * Tina Yang * - * Felix Marti * - * Scott Bardone * - * Kurt Ottaway * - * Frank DiMambro * - * * - * History: * - * * - ****************************************************************************/ - -#include "common.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "cpl5_cmd.h" -#include "sge.h" -#include "regs.h" -#include "espi.h" - -/* This belongs in if_ether.h */ -#define ETH_P_CPL5 0xf - -#define SGE_CMDQ_N 2 -#define SGE_FREELQ_N 2 -#define SGE_CMDQ0_E_N 1024 -#define SGE_CMDQ1_E_N 128 -#define SGE_FREEL_SIZE 4096 -#define SGE_JUMBO_FREEL_SIZE 512 -#define SGE_FREEL_REFILL_THRESH 16 -#define SGE_RESPQ_E_N 1024 -#define SGE_INTRTIMER_NRES 1000 -#define SGE_RX_SM_BUF_SIZE 1536 -#define SGE_TX_DESC_MAX_PLEN 16384 - -#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4) - -/* - * Period of the TX buffer reclaim timer. This timer does not need to run - * frequently as TX buffers are usually reclaimed by new TX packets. - */ -#define TX_RECLAIM_PERIOD (HZ / 4) - -#define M_CMD_LEN 0x7fffffff -#define V_CMD_LEN(v) (v) -#define G_CMD_LEN(v) ((v) & M_CMD_LEN) -#define V_CMD_GEN1(v) ((v) << 31) -#define V_CMD_GEN2(v) (v) -#define F_CMD_DATAVALID (1 << 1) -#define F_CMD_SOP (1 << 2) -#define V_CMD_EOP(v) ((v) << 3) - -/* - * Command queue, receive buffer list, and response queue descriptors. - */ -#if defined(__BIG_ENDIAN_BITFIELD) -struct cmdQ_e { - u32 addr_lo; - u32 len_gen; - u32 flags; - u32 addr_hi; -}; - -struct freelQ_e { - u32 addr_lo; - u32 len_gen; - u32 gen2; - u32 addr_hi; -}; - -struct respQ_e { - u32 Qsleeping : 4; - u32 Cmdq1CreditReturn : 5; - u32 Cmdq1DmaComplete : 5; - u32 Cmdq0CreditReturn : 5; - u32 Cmdq0DmaComplete : 5; - u32 FreelistQid : 2; - u32 CreditValid : 1; - u32 DataValid : 1; - u32 Offload : 1; - u32 Eop : 1; - u32 Sop : 1; - u32 GenerationBit : 1; - u32 BufferLength; -}; -#elif defined(__LITTLE_ENDIAN_BITFIELD) -struct cmdQ_e { - u32 len_gen; - u32 addr_lo; - u32 addr_hi; - u32 flags; -}; - -struct freelQ_e { - u32 len_gen; - u32 addr_lo; - u32 addr_hi; - u32 gen2; -}; - -struct respQ_e { - u32 BufferLength; - u32 GenerationBit : 1; - u32 Sop : 1; - u32 Eop : 1; - u32 Offload : 1; - u32 DataValid : 1; - u32 CreditValid : 1; - u32 FreelistQid : 2; - u32 Cmdq0DmaComplete : 5; - u32 Cmdq0CreditReturn : 5; - u32 Cmdq1DmaComplete : 5; - u32 Cmdq1CreditReturn : 5; - u32 Qsleeping : 4; -} ; -#endif - -/* - * SW Context Command and Freelist Queue Descriptors - */ -struct cmdQ_ce { - struct sk_buff *skb; - DEFINE_DMA_UNMAP_ADDR(dma_addr); - DEFINE_DMA_UNMAP_LEN(dma_len); -}; - -struct freelQ_ce { - struct sk_buff *skb; - DEFINE_DMA_UNMAP_ADDR(dma_addr); - DEFINE_DMA_UNMAP_LEN(dma_len); -}; - -/* - * SW command, freelist and response rings - */ -struct cmdQ { - unsigned long status; /* HW DMA fetch status */ - unsigned int in_use; /* # of in-use command descriptors */ - unsigned int size; /* # of descriptors */ - unsigned int processed; /* total # of descs HW has processed */ - unsigned int cleaned; /* total # of descs SW has reclaimed */ - unsigned int stop_thres; /* SW TX queue suspend threshold */ - u16 pidx; /* producer index (SW) */ - u16 cidx; /* consumer index (HW) */ - u8 genbit; /* current generation (=valid) bit */ - u8 sop; /* is next entry start of packet? */ - struct cmdQ_e *entries; /* HW command descriptor Q */ - struct cmdQ_ce *centries; /* SW command context descriptor Q */ - dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */ - spinlock_t lock; /* Lock to protect cmdQ enqueuing */ -}; - -struct freelQ { - unsigned int credits; /* # of available RX buffers */ - unsigned int size; /* free list capacity */ - u16 pidx; /* producer index (SW) */ - u16 cidx; /* consumer index (HW) */ - u16 rx_buffer_size; /* Buffer size on this free list */ - u16 dma_offset; /* DMA offset to align IP headers */ - u16 recycleq_idx; /* skb recycle q to use */ - u8 genbit; /* current generation (=valid) bit */ - struct freelQ_e *entries; /* HW freelist descriptor Q */ - struct freelQ_ce *centries; /* SW freelist context descriptor Q */ - dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */ -}; - -struct respQ { - unsigned int credits; /* credits to be returned to SGE */ - unsigned int size; /* # of response Q descriptors */ - u16 cidx; /* consumer index (SW) */ - u8 genbit; /* current generation(=valid) bit */ - struct respQ_e *entries; /* HW response descriptor Q */ - dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */ -}; - -/* Bit flags for cmdQ.status */ -enum { - CMDQ_STAT_RUNNING = 1, /* fetch engine is running */ - CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */ -}; - -/* T204 TX SW scheduler */ - -/* Per T204 TX port */ -struct sched_port { - unsigned int avail; /* available bits - quota */ - unsigned int drain_bits_per_1024ns; /* drain rate */ - unsigned int speed; /* drain rate, mbps */ - unsigned int mtu; /* mtu size */ - struct sk_buff_head skbq; /* pending skbs */ -}; - -/* Per T204 device */ -struct sched { - ktime_t last_updated; /* last time quotas were computed */ - unsigned int max_avail; /* max bits to be sent to any port */ - unsigned int port; /* port index (round robin ports) */ - unsigned int num; /* num skbs in per port queues */ - struct sched_port p[MAX_NPORTS]; - struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */ -}; -static void restart_sched(unsigned long); - - -/* - * Main SGE data structure - * - * Interrupts are handled by a single CPU and it is likely that on a MP system - * the application is migrated to another CPU. In that scenario, we try to - * separate the RX(in irq context) and TX state in order to decrease memory - * contention. - */ -struct sge { - struct adapter *adapter; /* adapter backpointer */ - struct net_device *netdev; /* netdevice backpointer */ - struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */ - struct respQ respQ; /* response Q */ - unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */ - unsigned int rx_pkt_pad; /* RX padding for L2 packets */ - unsigned int jumbo_fl; /* jumbo freelist Q index */ - unsigned int intrtimer_nres; /* no-resource interrupt timer */ - unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */ - struct timer_list tx_reclaim_timer; /* reclaims TX buffers */ - struct timer_list espibug_timer; - unsigned long espibug_timeout; - struct sk_buff *espibug_skb[MAX_NPORTS]; - u32 sge_control; /* shadow value of sge control reg */ - struct sge_intr_counts stats; - struct sge_port_stats __percpu *port_stats[MAX_NPORTS]; - struct sched *tx_sched; - struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp; -}; - -static const u8 ch_mac_addr[ETH_ALEN] = { - 0x0, 0x7, 0x43, 0x0, 0x0, 0x0 -}; - -/* - * stop tasklet and free all pending skb's - */ -static void tx_sched_stop(struct sge *sge) -{ - struct sched *s = sge->tx_sched; - int i; - - tasklet_kill(&s->sched_tsk); - - for (i = 0; i < MAX_NPORTS; i++) - __skb_queue_purge(&s->p[s->port].skbq); -} - -/* - * t1_sched_update_parms() is called when the MTU or link speed changes. It - * re-computes scheduler parameters to scope with the change. - */ -unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port, - unsigned int mtu, unsigned int speed) -{ - struct sched *s = sge->tx_sched; - struct sched_port *p = &s->p[port]; - unsigned int max_avail_segs; - - pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed); - if (speed) - p->speed = speed; - if (mtu) - p->mtu = mtu; - - if (speed || mtu) { - unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40); - do_div(drain, (p->mtu + 50) * 1000); - p->drain_bits_per_1024ns = (unsigned int) drain; - - if (p->speed < 1000) - p->drain_bits_per_1024ns = - 90 * p->drain_bits_per_1024ns / 100; - } - - if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) { - p->drain_bits_per_1024ns -= 16; - s->max_avail = max(4096U, p->mtu + 16 + 14 + 4); - max_avail_segs = max(1U, 4096 / (p->mtu - 40)); - } else { - s->max_avail = 16384; - max_avail_segs = max(1U, 9000 / (p->mtu - 40)); - } - - pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u " - "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu, - p->speed, s->max_avail, max_avail_segs, - p->drain_bits_per_1024ns); - - return max_avail_segs * (p->mtu - 40); -} - -#if 0 - -/* - * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of - * data that can be pushed per port. - */ -void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val) -{ - struct sched *s = sge->tx_sched; - unsigned int i; - - s->max_avail = val; - for (i = 0; i < MAX_NPORTS; i++) - t1_sched_update_parms(sge, i, 0, 0); -} - -/* - * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port - * is draining. - */ -void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port, - unsigned int val) -{ - struct sched *s = sge->tx_sched; - struct sched_port *p = &s->p[port]; - p->drain_bits_per_1024ns = val * 1024 / 1000; - t1_sched_update_parms(sge, port, 0, 0); -} - -#endif /* 0 */ - - -/* - * get_clock() implements a ns clock (see ktime_get) - */ -static inline ktime_t get_clock(void) -{ - struct timespec ts; - - ktime_get_ts(&ts); - return timespec_to_ktime(ts); -} - -/* - * tx_sched_init() allocates resources and does basic initialization. - */ -static int tx_sched_init(struct sge *sge) -{ - struct sched *s; - int i; - - s = kzalloc(sizeof (struct sched), GFP_KERNEL); - if (!s) - return -ENOMEM; - - pr_debug("tx_sched_init\n"); - tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge); - sge->tx_sched = s; - - for (i = 0; i < MAX_NPORTS; i++) { - skb_queue_head_init(&s->p[i].skbq); - t1_sched_update_parms(sge, i, 1500, 1000); - } - - return 0; -} - -/* - * sched_update_avail() computes the delta since the last time it was called - * and updates the per port quota (number of bits that can be sent to the any - * port). - */ -static inline int sched_update_avail(struct sge *sge) -{ - struct sched *s = sge->tx_sched; - ktime_t now = get_clock(); - unsigned int i; - long long delta_time_ns; - - delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated)); - - pr_debug("sched_update_avail delta=%lld\n", delta_time_ns); - if (delta_time_ns < 15000) - return 0; - - for (i = 0; i < MAX_NPORTS; i++) { - struct sched_port *p = &s->p[i]; - unsigned int delta_avail; - - delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13; - p->avail = min(p->avail + delta_avail, s->max_avail); - } - - s->last_updated = now; - - return 1; -} - -/* - * sched_skb() is called from two different places. In the tx path, any - * packet generating load on an output port will call sched_skb() - * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq - * context (skb == NULL). - * The scheduler only returns a skb (which will then be sent) if the - * length of the skb is <= the current quota of the output port. - */ -static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb, - unsigned int credits) -{ - struct sched *s = sge->tx_sched; - struct sk_buff_head *skbq; - unsigned int i, len, update = 1; - - pr_debug("sched_skb %p\n", skb); - if (!skb) { - if (!s->num) - return NULL; - } else { - skbq = &s->p[skb->dev->if_port].skbq; - __skb_queue_tail(skbq, skb); - s->num++; - skb = NULL; - } - - if (credits < MAX_SKB_FRAGS + 1) - goto out; - -again: - for (i = 0; i < MAX_NPORTS; i++) { - s->port = (s->port + 1) & (MAX_NPORTS - 1); - skbq = &s->p[s->port].skbq; - - skb = skb_peek(skbq); - - if (!skb) - continue; - - len = skb->len; - if (len <= s->p[s->port].avail) { - s->p[s->port].avail -= len; - s->num--; - __skb_unlink(skb, skbq); - goto out; - } - skb = NULL; - } - - if (update-- && sched_update_avail(sge)) - goto again; - -out: - /* If there are more pending skbs, we use the hardware to schedule us - * again. - */ - if (s->num && !skb) { - struct cmdQ *q = &sge->cmdQ[0]; - clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); - if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { - set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); - writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL); - } - } - pr_debug("sched_skb ret %p\n", skb); - - return skb; -} - -/* - * PIO to indicate that memory mapped Q contains valid descriptor(s). - */ -static inline void doorbell_pio(struct adapter *adapter, u32 val) -{ - wmb(); - writel(val, adapter->regs + A_SG_DOORBELL); -} - -/* - * Frees all RX buffers on the freelist Q. The caller must make sure that - * the SGE is turned off before calling this function. - */ -static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q) -{ - unsigned int cidx = q->cidx; - - while (q->credits--) { - struct freelQ_ce *ce = &q->centries[cidx]; - - pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), - dma_unmap_len(ce, dma_len), - PCI_DMA_FROMDEVICE); - dev_kfree_skb(ce->skb); - ce->skb = NULL; - if (++cidx == q->size) - cidx = 0; - } -} - -/* - * Free RX free list and response queue resources. - */ -static void free_rx_resources(struct sge *sge) -{ - struct pci_dev *pdev = sge->adapter->pdev; - unsigned int size, i; - - if (sge->respQ.entries) { - size = sizeof(struct respQ_e) * sge->respQ.size; - pci_free_consistent(pdev, size, sge->respQ.entries, - sge->respQ.dma_addr); - } - - for (i = 0; i < SGE_FREELQ_N; i++) { - struct freelQ *q = &sge->freelQ[i]; - - if (q->centries) { - free_freelQ_buffers(pdev, q); - kfree(q->centries); - } - if (q->entries) { - size = sizeof(struct freelQ_e) * q->size; - pci_free_consistent(pdev, size, q->entries, - q->dma_addr); - } - } -} - -/* - * Allocates basic RX resources, consisting of memory mapped freelist Qs and a - * response queue. - */ -static int alloc_rx_resources(struct sge *sge, struct sge_params *p) -{ - struct pci_dev *pdev = sge->adapter->pdev; - unsigned int size, i; - - for (i = 0; i < SGE_FREELQ_N; i++) { - struct freelQ *q = &sge->freelQ[i]; - - q->genbit = 1; - q->size = p->freelQ_size[i]; - q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN; - size = sizeof(struct freelQ_e) * q->size; - q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); - if (!q->entries) - goto err_no_mem; - - size = sizeof(struct freelQ_ce) * q->size; - q->centries = kzalloc(size, GFP_KERNEL); - if (!q->centries) - goto err_no_mem; - } - - /* - * Calculate the buffer sizes for the two free lists. FL0 accommodates - * regular sized Ethernet frames, FL1 is sized not to exceed 16K, - * including all the sk_buff overhead. - * - * Note: For T2 FL0 and FL1 are reversed. - */ - sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE + - sizeof(struct cpl_rx_data) + - sge->freelQ[!sge->jumbo_fl].dma_offset; - - size = (16 * 1024) - - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - - sge->freelQ[sge->jumbo_fl].rx_buffer_size = size; - - /* - * Setup which skb recycle Q should be used when recycling buffers from - * each free list. - */ - sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0; - sge->freelQ[sge->jumbo_fl].recycleq_idx = 1; - - sge->respQ.genbit = 1; - sge->respQ.size = SGE_RESPQ_E_N; - sge->respQ.credits = 0; - size = sizeof(struct respQ_e) * sge->respQ.size; - sge->respQ.entries = - pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr); - if (!sge->respQ.entries) - goto err_no_mem; - return 0; - -err_no_mem: - free_rx_resources(sge); - return -ENOMEM; -} - -/* - * Reclaims n TX descriptors and frees the buffers associated with them. - */ -static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n) -{ - struct cmdQ_ce *ce; - struct pci_dev *pdev = sge->adapter->pdev; - unsigned int cidx = q->cidx; - - q->in_use -= n; - ce = &q->centries[cidx]; - while (n--) { - if (likely(dma_unmap_len(ce, dma_len))) { - pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), - dma_unmap_len(ce, dma_len), - PCI_DMA_TODEVICE); - if (q->sop) - q->sop = 0; - } - if (ce->skb) { - dev_kfree_skb_any(ce->skb); - q->sop = 1; - } - ce++; - if (++cidx == q->size) { - cidx = 0; - ce = q->centries; - } - } - q->cidx = cidx; -} - -/* - * Free TX resources. - * - * Assumes that SGE is stopped and all interrupts are disabled. - */ -static void free_tx_resources(struct sge *sge) -{ - struct pci_dev *pdev = sge->adapter->pdev; - unsigned int size, i; - - for (i = 0; i < SGE_CMDQ_N; i++) { - struct cmdQ *q = &sge->cmdQ[i]; - - if (q->centries) { - if (q->in_use) - free_cmdQ_buffers(sge, q, q->in_use); - kfree(q->centries); - } - if (q->entries) { - size = sizeof(struct cmdQ_e) * q->size; - pci_free_consistent(pdev, size, q->entries, - q->dma_addr); - } - } -} - -/* - * Allocates basic TX resources, consisting of memory mapped command Qs. - */ -static int alloc_tx_resources(struct sge *sge, struct sge_params *p) -{ - struct pci_dev *pdev = sge->adapter->pdev; - unsigned int size, i; - - for (i = 0; i < SGE_CMDQ_N; i++) { - struct cmdQ *q = &sge->cmdQ[i]; - - q->genbit = 1; - q->sop = 1; - q->size = p->cmdQ_size[i]; - q->in_use = 0; - q->status = 0; - q->processed = q->cleaned = 0; - q->stop_thres = 0; - spin_lock_init(&q->lock); - size = sizeof(struct cmdQ_e) * q->size; - q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); - if (!q->entries) - goto err_no_mem; - - size = sizeof(struct cmdQ_ce) * q->size; - q->centries = kzalloc(size, GFP_KERNEL); - if (!q->centries) - goto err_no_mem; - } - - /* - * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE - * only. For queue 0 set the stop threshold so we can handle one more - * packet from each port, plus reserve an additional 24 entries for - * Ethernet packets only. Queue 1 never suspends nor do we reserve - * space for Ethernet packets. - */ - sge->cmdQ[0].stop_thres = sge->adapter->params.nports * - (MAX_SKB_FRAGS + 1); - return 0; - -err_no_mem: - free_tx_resources(sge); - return -ENOMEM; -} - -static inline void setup_ring_params(struct adapter *adapter, u64 addr, - u32 size, int base_reg_lo, - int base_reg_hi, int size_reg) -{ - writel((u32)addr, adapter->regs + base_reg_lo); - writel(addr >> 32, adapter->regs + base_reg_hi); - writel(size, adapter->regs + size_reg); -} - -/* - * Enable/disable VLAN acceleration. - */ -void t1_vlan_mode(struct adapter *adapter, u32 features) -{ - struct sge *sge = adapter->sge; - - if (features & NETIF_F_HW_VLAN_RX) - sge->sge_control |= F_VLAN_XTRACT; - else - sge->sge_control &= ~F_VLAN_XTRACT; - if (adapter->open_device_map) { - writel(sge->sge_control, adapter->regs + A_SG_CONTROL); - readl(adapter->regs + A_SG_CONTROL); /* flush */ - } -} - -/* - * Programs the various SGE registers. However, the engine is not yet enabled, - * but sge->sge_control is setup and ready to go. - */ -static void configure_sge(struct sge *sge, struct sge_params *p) -{ - struct adapter *ap = sge->adapter; - - writel(0, ap->regs + A_SG_CONTROL); - setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size, - A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE); - setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size, - A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE); - setup_ring_params(ap, sge->freelQ[0].dma_addr, - sge->freelQ[0].size, A_SG_FL0BASELWR, - A_SG_FL0BASEUPR, A_SG_FL0SIZE); - setup_ring_params(ap, sge->freelQ[1].dma_addr, - sge->freelQ[1].size, A_SG_FL1BASELWR, - A_SG_FL1BASEUPR, A_SG_FL1SIZE); - - /* The threshold comparison uses <. */ - writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD); - - setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size, - A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE); - writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT); - - sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE | - F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE | - V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE | - V_RX_PKT_OFFSET(sge->rx_pkt_pad); - -#if defined(__BIG_ENDIAN_BITFIELD) - sge->sge_control |= F_ENABLE_BIG_ENDIAN; -#endif - - /* Initialize no-resource timer */ - sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap); - - t1_sge_set_coalesce_params(sge, p); -} - -/* - * Return the payload capacity of the jumbo free-list buffers. - */ -static inline unsigned int jumbo_payload_capacity(const struct sge *sge) -{ - return sge->freelQ[sge->jumbo_fl].rx_buffer_size - - sge->freelQ[sge->jumbo_fl].dma_offset - - sizeof(struct cpl_rx_data); -} - -/* - * Frees all SGE related resources and the sge structure itself - */ -void t1_sge_destroy(struct sge *sge) -{ - int i; - - for_each_port(sge->adapter, i) - free_percpu(sge->port_stats[i]); - - kfree(sge->tx_sched); - free_tx_resources(sge); - free_rx_resources(sge); - kfree(sge); -} - -/* - * Allocates new RX buffers on the freelist Q (and tracks them on the freelist - * context Q) until the Q is full or alloc_skb fails. - * - * It is possible that the generation bits already match, indicating that the - * buffer is already valid and nothing needs to be done. This happens when we - * copied a received buffer into a new sk_buff during the interrupt processing. - * - * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad), - * we specify a RX_OFFSET in order to make sure that the IP header is 4B - * aligned. - */ -static void refill_free_list(struct sge *sge, struct freelQ *q) -{ - struct pci_dev *pdev = sge->adapter->pdev; - struct freelQ_ce *ce = &q->centries[q->pidx]; - struct freelQ_e *e = &q->entries[q->pidx]; - unsigned int dma_len = q->rx_buffer_size - q->dma_offset; - - while (q->credits < q->size) { - struct sk_buff *skb; - dma_addr_t mapping; - - skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC); - if (!skb) - break; - - skb_reserve(skb, q->dma_offset); - mapping = pci_map_single(pdev, skb->data, dma_len, - PCI_DMA_FROMDEVICE); - skb_reserve(skb, sge->rx_pkt_pad); - - ce->skb = skb; - dma_unmap_addr_set(ce, dma_addr, mapping); - dma_unmap_len_set(ce, dma_len, dma_len); - e->addr_lo = (u32)mapping; - e->addr_hi = (u64)mapping >> 32; - e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit); - wmb(); - e->gen2 = V_CMD_GEN2(q->genbit); - - e++; - ce++; - if (++q->pidx == q->size) { - q->pidx = 0; - q->genbit ^= 1; - ce = q->centries; - e = q->entries; - } - q->credits++; - } -} - -/* - * Calls refill_free_list for both free lists. If we cannot fill at least 1/4 - * of both rings, we go into 'few interrupt mode' in order to give the system - * time to free up resources. - */ -static void freelQs_empty(struct sge *sge) -{ - struct adapter *adapter = sge->adapter; - u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE); - u32 irqholdoff_reg; - - refill_free_list(sge, &sge->freelQ[0]); - refill_free_list(sge, &sge->freelQ[1]); - - if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) && - sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) { - irq_reg |= F_FL_EXHAUSTED; - irqholdoff_reg = sge->fixed_intrtimer; - } else { - /* Clear the F_FL_EXHAUSTED interrupts for now */ - irq_reg &= ~F_FL_EXHAUSTED; - irqholdoff_reg = sge->intrtimer_nres; - } - writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER); - writel(irq_reg, adapter->regs + A_SG_INT_ENABLE); - - /* We reenable the Qs to force a freelist GTS interrupt later */ - doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE); -} - -#define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA) -#define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH) -#define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \ - F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH) - -/* - * Disable SGE Interrupts - */ -void t1_sge_intr_disable(struct sge *sge) -{ - u32 val = readl(sge->adapter->regs + A_PL_ENABLE); - - writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); - writel(0, sge->adapter->regs + A_SG_INT_ENABLE); -} - -/* - * Enable SGE interrupts. - */ -void t1_sge_intr_enable(struct sge *sge) -{ - u32 en = SGE_INT_ENABLE; - u32 val = readl(sge->adapter->regs + A_PL_ENABLE); - - if (sge->adapter->port[0].dev->hw_features & NETIF_F_TSO) - en &= ~F_PACKET_TOO_BIG; - writel(en, sge->adapter->regs + A_SG_INT_ENABLE); - writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); -} - -/* - * Clear SGE interrupts. - */ -void t1_sge_intr_clear(struct sge *sge) -{ - writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE); - writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE); -} - -/* - * SGE 'Error' interrupt handler - */ -int t1_sge_intr_error_handler(struct sge *sge) -{ - struct adapter *adapter = sge->adapter; - u32 cause = readl(adapter->regs + A_SG_INT_CAUSE); - - if (adapter->port[0].dev->hw_features & NETIF_F_TSO) - cause &= ~F_PACKET_TOO_BIG; - if (cause & F_RESPQ_EXHAUSTED) - sge->stats.respQ_empty++; - if (cause & F_RESPQ_OVERFLOW) { - sge->stats.respQ_overflow++; - pr_alert("%s: SGE response queue overflow\n", - adapter->name); - } - if (cause & F_FL_EXHAUSTED) { - sge->stats.freelistQ_empty++; - freelQs_empty(sge); - } - if (cause & F_PACKET_TOO_BIG) { - sge->stats.pkt_too_big++; - pr_alert("%s: SGE max packet size exceeded\n", - adapter->name); - } - if (cause & F_PACKET_MISMATCH) { - sge->stats.pkt_mismatch++; - pr_alert("%s: SGE packet mismatch\n", adapter->name); - } - if (cause & SGE_INT_FATAL) - t1_fatal_err(adapter); - - writel(cause, adapter->regs + A_SG_INT_CAUSE); - return 0; -} - -const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge) -{ - return &sge->stats; -} - -void t1_sge_get_port_stats(const struct sge *sge, int port, - struct sge_port_stats *ss) -{ - int cpu; - - memset(ss, 0, sizeof(*ss)); - for_each_possible_cpu(cpu) { - struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu); - - ss->rx_cso_good += st->rx_cso_good; - ss->tx_cso += st->tx_cso; - ss->tx_tso += st->tx_tso; - ss->tx_need_hdrroom += st->tx_need_hdrroom; - ss->vlan_xtract += st->vlan_xtract; - ss->vlan_insert += st->vlan_insert; - } -} - -/** - * recycle_fl_buf - recycle a free list buffer - * @fl: the free list - * @idx: index of buffer to recycle - * - * Recycles the specified buffer on the given free list by adding it at - * the next available slot on the list. - */ -static void recycle_fl_buf(struct freelQ *fl, int idx) -{ - struct freelQ_e *from = &fl->entries[idx]; - struct freelQ_e *to = &fl->entries[fl->pidx]; - - fl->centries[fl->pidx] = fl->centries[idx]; - to->addr_lo = from->addr_lo; - to->addr_hi = from->addr_hi; - to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit); - wmb(); - to->gen2 = V_CMD_GEN2(fl->genbit); - fl->credits++; - - if (++fl->pidx == fl->size) { - fl->pidx = 0; - fl->genbit ^= 1; - } -} - -static int copybreak __read_mostly = 256; -module_param(copybreak, int, 0); -MODULE_PARM_DESC(copybreak, "Receive copy threshold"); - -/** - * get_packet - return the next ingress packet buffer - * @pdev: the PCI device that received the packet - * @fl: the SGE free list holding the packet - * @len: the actual packet length, excluding any SGE padding - * - * Get the next packet from a free list and complete setup of the - * sk_buff. If the packet is small we make a copy and recycle the - * original buffer, otherwise we use the original buffer itself. If a - * positive drop threshold is supplied packets are dropped and their - * buffers recycled if (a) the number of remaining buffers is under the - * threshold and the packet is too big to copy, or (b) the packet should - * be copied but there is no memory for the copy. - */ -static inline struct sk_buff *get_packet(struct pci_dev *pdev, - struct freelQ *fl, unsigned int len) -{ - struct sk_buff *skb; - const struct freelQ_ce *ce = &fl->centries[fl->cidx]; - - if (len < copybreak) { - skb = alloc_skb(len + 2, GFP_ATOMIC); - if (!skb) - goto use_orig_buf; - - skb_reserve(skb, 2); /* align IP header */ - skb_put(skb, len); - pci_dma_sync_single_for_cpu(pdev, - dma_unmap_addr(ce, dma_addr), - dma_unmap_len(ce, dma_len), - PCI_DMA_FROMDEVICE); - skb_copy_from_linear_data(ce->skb, skb->data, len); - pci_dma_sync_single_for_device(pdev, - dma_unmap_addr(ce, dma_addr), - dma_unmap_len(ce, dma_len), - PCI_DMA_FROMDEVICE); - recycle_fl_buf(fl, fl->cidx); - return skb; - } - -use_orig_buf: - if (fl->credits < 2) { - recycle_fl_buf(fl, fl->cidx); - return NULL; - } - - pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), - dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); - skb = ce->skb; - prefetch(skb->data); - - skb_put(skb, len); - return skb; -} - -/** - * unexpected_offload - handle an unexpected offload packet - * @adapter: the adapter - * @fl: the free list that received the packet - * - * Called when we receive an unexpected offload packet (e.g., the TOE - * function is disabled or the card is a NIC). Prints a message and - * recycles the buffer. - */ -static void unexpected_offload(struct adapter *adapter, struct freelQ *fl) -{ - struct freelQ_ce *ce = &fl->centries[fl->cidx]; - struct sk_buff *skb = ce->skb; - - pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr), - dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); - pr_err("%s: unexpected offload packet, cmd %u\n", - adapter->name, *skb->data); - recycle_fl_buf(fl, fl->cidx); -} - -/* - * T1/T2 SGE limits the maximum DMA size per TX descriptor to - * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the - * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner. - * Note that the *_large_page_tx_descs stuff will be optimized out when - * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN. - * - * compute_large_page_descs() computes how many additional descriptors are - * required to break down the stack's request. - */ -static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb) -{ - unsigned int count = 0; - - if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { - unsigned int nfrags = skb_shinfo(skb)->nr_frags; - unsigned int i, len = skb_headlen(skb); - while (len > SGE_TX_DESC_MAX_PLEN) { - count++; - len -= SGE_TX_DESC_MAX_PLEN; - } - for (i = 0; nfrags--; i++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - len = frag->size; - while (len > SGE_TX_DESC_MAX_PLEN) { - count++; - len -= SGE_TX_DESC_MAX_PLEN; - } - } - } - return count; -} - -/* - * Write a cmdQ entry. - * - * Since this function writes the 'flags' field, it must not be used to - * write the first cmdQ entry. - */ -static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping, - unsigned int len, unsigned int gen, - unsigned int eop) -{ - BUG_ON(len > SGE_TX_DESC_MAX_PLEN); - - e->addr_lo = (u32)mapping; - e->addr_hi = (u64)mapping >> 32; - e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen); - e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen); -} - -/* - * See comment for previous function. - * - * write_tx_descs_large_page() writes additional SGE tx descriptors if - * *desc_len exceeds HW's capability. - */ -static inline unsigned int write_large_page_tx_descs(unsigned int pidx, - struct cmdQ_e **e, - struct cmdQ_ce **ce, - unsigned int *gen, - dma_addr_t *desc_mapping, - unsigned int *desc_len, - unsigned int nfrags, - struct cmdQ *q) -{ - if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { - struct cmdQ_e *e1 = *e; - struct cmdQ_ce *ce1 = *ce; - - while (*desc_len > SGE_TX_DESC_MAX_PLEN) { - *desc_len -= SGE_TX_DESC_MAX_PLEN; - write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN, - *gen, nfrags == 0 && *desc_len == 0); - ce1->skb = NULL; - dma_unmap_len_set(ce1, dma_len, 0); - *desc_mapping += SGE_TX_DESC_MAX_PLEN; - if (*desc_len) { - ce1++; - e1++; - if (++pidx == q->size) { - pidx = 0; - *gen ^= 1; - ce1 = q->centries; - e1 = q->entries; - } - } - } - *e = e1; - *ce = ce1; - } - return pidx; -} - -/* - * Write the command descriptors to transmit the given skb starting at - * descriptor pidx with the given generation. - */ -static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb, - unsigned int pidx, unsigned int gen, - struct cmdQ *q) -{ - dma_addr_t mapping, desc_mapping; - struct cmdQ_e *e, *e1; - struct cmdQ_ce *ce; - unsigned int i, flags, first_desc_len, desc_len, - nfrags = skb_shinfo(skb)->nr_frags; - - e = e1 = &q->entries[pidx]; - ce = &q->centries[pidx]; - - mapping = pci_map_single(adapter->pdev, skb->data, - skb_headlen(skb), PCI_DMA_TODEVICE); - - desc_mapping = mapping; - desc_len = skb_headlen(skb); - - flags = F_CMD_DATAVALID | F_CMD_SOP | - V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) | - V_CMD_GEN2(gen); - first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ? - desc_len : SGE_TX_DESC_MAX_PLEN; - e->addr_lo = (u32)desc_mapping; - e->addr_hi = (u64)desc_mapping >> 32; - e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen); - ce->skb = NULL; - dma_unmap_len_set(ce, dma_len, 0); - - if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN && - desc_len > SGE_TX_DESC_MAX_PLEN) { - desc_mapping += first_desc_len; - desc_len -= first_desc_len; - e1++; - ce++; - if (++pidx == q->size) { - pidx = 0; - gen ^= 1; - e1 = q->entries; - ce = q->centries; - } - pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen, - &desc_mapping, &desc_len, - nfrags, q); - - if (likely(desc_len)) - write_tx_desc(e1, desc_mapping, desc_len, gen, - nfrags == 0); - } - - ce->skb = NULL; - dma_unmap_addr_set(ce, dma_addr, mapping); - dma_unmap_len_set(ce, dma_len, skb_headlen(skb)); - - for (i = 0; nfrags--; i++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - e1++; - ce++; - if (++pidx == q->size) { - pidx = 0; - gen ^= 1; - e1 = q->entries; - ce = q->centries; - } - - mapping = pci_map_page(adapter->pdev, frag->page, - frag->page_offset, frag->size, - PCI_DMA_TODEVICE); - desc_mapping = mapping; - desc_len = frag->size; - - pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen, - &desc_mapping, &desc_len, - nfrags, q); - if (likely(desc_len)) - write_tx_desc(e1, desc_mapping, desc_len, gen, - nfrags == 0); - ce->skb = NULL; - dma_unmap_addr_set(ce, dma_addr, mapping); - dma_unmap_len_set(ce, dma_len, frag->size); - } - ce->skb = skb; - wmb(); - e->flags = flags; -} - -/* - * Clean up completed Tx buffers. - */ -static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q) -{ - unsigned int reclaim = q->processed - q->cleaned; - - if (reclaim) { - pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n", - q->processed, q->cleaned); - free_cmdQ_buffers(sge, q, reclaim); - q->cleaned += reclaim; - } -} - -/* - * Called from tasklet. Checks the scheduler for any - * pending skbs that can be sent. - */ -static void restart_sched(unsigned long arg) -{ - struct sge *sge = (struct sge *) arg; - struct adapter *adapter = sge->adapter; - struct cmdQ *q = &sge->cmdQ[0]; - struct sk_buff *skb; - unsigned int credits, queued_skb = 0; - - spin_lock(&q->lock); - reclaim_completed_tx(sge, q); - - credits = q->size - q->in_use; - pr_debug("restart_sched credits=%d\n", credits); - while ((skb = sched_skb(sge, NULL, credits)) != NULL) { - unsigned int genbit, pidx, count; - count = 1 + skb_shinfo(skb)->nr_frags; - count += compute_large_page_tx_descs(skb); - q->in_use += count; - genbit = q->genbit; - pidx = q->pidx; - q->pidx += count; - if (q->pidx >= q->size) { - q->pidx -= q->size; - q->genbit ^= 1; - } - write_tx_descs(adapter, skb, pidx, genbit, q); - credits = q->size - q->in_use; - queued_skb = 1; - } - - if (queued_skb) { - clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); - if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { - set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); - writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); - } - } - spin_unlock(&q->lock); -} - -/** - * sge_rx - process an ingress ethernet packet - * @sge: the sge structure - * @fl: the free list that contains the packet buffer - * @len: the packet length - * - * Process an ingress ethernet pakcet and deliver it to the stack. - */ -static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) -{ - struct sk_buff *skb; - const struct cpl_rx_pkt *p; - struct adapter *adapter = sge->adapter; - struct sge_port_stats *st; - struct net_device *dev; - - skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad); - if (unlikely(!skb)) { - sge->stats.rx_drops++; - return; - } - - p = (const struct cpl_rx_pkt *) skb->data; - if (p->iff >= adapter->params.nports) { - kfree_skb(skb); - return; - } - __skb_pull(skb, sizeof(*p)); - - st = this_cpu_ptr(sge->port_stats[p->iff]); - dev = adapter->port[p->iff].dev; - - skb->protocol = eth_type_trans(skb, dev); - if ((dev->features & NETIF_F_RXCSUM) && p->csum == 0xffff && - skb->protocol == htons(ETH_P_IP) && - (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) { - ++st->rx_cso_good; - skb->ip_summed = CHECKSUM_UNNECESSARY; - } else - skb_checksum_none_assert(skb); - - if (p->vlan_valid) { - st->vlan_xtract++; - __vlan_hwaccel_put_tag(skb, ntohs(p->vlan)); - } - netif_receive_skb(skb); -} - -/* - * Returns true if a command queue has enough available descriptors that - * we can resume Tx operation after temporarily disabling its packet queue. - */ -static inline int enough_free_Tx_descs(const struct cmdQ *q) -{ - unsigned int r = q->processed - q->cleaned; - - return q->in_use - r < (q->size >> 1); -} - -/* - * Called when sufficient space has become available in the SGE command queues - * after the Tx packet schedulers have been suspended to restart the Tx path. - */ -static void restart_tx_queues(struct sge *sge) -{ - struct adapter *adap = sge->adapter; - int i; - - if (!enough_free_Tx_descs(&sge->cmdQ[0])) - return; - - for_each_port(adap, i) { - struct net_device *nd = adap->port[i].dev; - - if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) && - netif_running(nd)) { - sge->stats.cmdQ_restarted[2]++; - netif_wake_queue(nd); - } - } -} - -/* - * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0 - * information. - */ -static unsigned int update_tx_info(struct adapter *adapter, - unsigned int flags, - unsigned int pr0) -{ - struct sge *sge = adapter->sge; - struct cmdQ *cmdq = &sge->cmdQ[0]; - - cmdq->processed += pr0; - if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) { - freelQs_empty(sge); - flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE); - } - if (flags & F_CMDQ0_ENABLE) { - clear_bit(CMDQ_STAT_RUNNING, &cmdq->status); - - if (cmdq->cleaned + cmdq->in_use != cmdq->processed && - !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) { - set_bit(CMDQ_STAT_RUNNING, &cmdq->status); - writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); - } - if (sge->tx_sched) - tasklet_hi_schedule(&sge->tx_sched->sched_tsk); - - flags &= ~F_CMDQ0_ENABLE; - } - - if (unlikely(sge->stopped_tx_queues != 0)) - restart_tx_queues(sge); - - return flags; -} - -/* - * Process SGE responses, up to the supplied budget. Returns the number of - * responses processed. A negative budget is effectively unlimited. - */ -static int process_responses(struct adapter *adapter, int budget) -{ - struct sge *sge = adapter->sge; - struct respQ *q = &sge->respQ; - struct respQ_e *e = &q->entries[q->cidx]; - int done = 0; - unsigned int flags = 0; - unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; - - while (done < budget && e->GenerationBit == q->genbit) { - flags |= e->Qsleeping; - - cmdq_processed[0] += e->Cmdq0CreditReturn; - cmdq_processed[1] += e->Cmdq1CreditReturn; - - /* We batch updates to the TX side to avoid cacheline - * ping-pong of TX state information on MP where the sender - * might run on a different CPU than this function... - */ - if (unlikely((flags & F_CMDQ0_ENABLE) || cmdq_processed[0] > 64)) { - flags = update_tx_info(adapter, flags, cmdq_processed[0]); - cmdq_processed[0] = 0; - } - - if (unlikely(cmdq_processed[1] > 16)) { - sge->cmdQ[1].processed += cmdq_processed[1]; - cmdq_processed[1] = 0; - } - - if (likely(e->DataValid)) { - struct freelQ *fl = &sge->freelQ[e->FreelistQid]; - - BUG_ON(!e->Sop || !e->Eop); - if (unlikely(e->Offload)) - unexpected_offload(adapter, fl); - else - sge_rx(sge, fl, e->BufferLength); - - ++done; - - /* - * Note: this depends on each packet consuming a - * single free-list buffer; cf. the BUG above. - */ - if (++fl->cidx == fl->size) - fl->cidx = 0; - prefetch(fl->centries[fl->cidx].skb); - - if (unlikely(--fl->credits < - fl->size - SGE_FREEL_REFILL_THRESH)) - refill_free_list(sge, fl); - } else - sge->stats.pure_rsps++; - - e++; - if (unlikely(++q->cidx == q->size)) { - q->cidx = 0; - q->genbit ^= 1; - e = q->entries; - } - prefetch(e); - - if (++q->credits > SGE_RESPQ_REPLENISH_THRES) { - writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); - q->credits = 0; - } - } - - flags = update_tx_info(adapter, flags, cmdq_processed[0]); - sge->cmdQ[1].processed += cmdq_processed[1]; - - return done; -} - -static inline int responses_pending(const struct adapter *adapter) -{ - const struct respQ *Q = &adapter->sge->respQ; - const struct respQ_e *e = &Q->entries[Q->cidx]; - - return e->GenerationBit == Q->genbit; -} - -/* - * A simpler version of process_responses() that handles only pure (i.e., - * non data-carrying) responses. Such respones are too light-weight to justify - * calling a softirq when using NAPI, so we handle them specially in hard - * interrupt context. The function is called with a pointer to a response, - * which the caller must ensure is a valid pure response. Returns 1 if it - * encounters a valid data-carrying response, 0 otherwise. - */ -static int process_pure_responses(struct adapter *adapter) -{ - struct sge *sge = adapter->sge; - struct respQ *q = &sge->respQ; - struct respQ_e *e = &q->entries[q->cidx]; - const struct freelQ *fl = &sge->freelQ[e->FreelistQid]; - unsigned int flags = 0; - unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; - - prefetch(fl->centries[fl->cidx].skb); - if (e->DataValid) - return 1; - - do { - flags |= e->Qsleeping; - - cmdq_processed[0] += e->Cmdq0CreditReturn; - cmdq_processed[1] += e->Cmdq1CreditReturn; - - e++; - if (unlikely(++q->cidx == q->size)) { - q->cidx = 0; - q->genbit ^= 1; - e = q->entries; - } - prefetch(e); - - if (++q->credits > SGE_RESPQ_REPLENISH_THRES) { - writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); - q->credits = 0; - } - sge->stats.pure_rsps++; - } while (e->GenerationBit == q->genbit && !e->DataValid); - - flags = update_tx_info(adapter, flags, cmdq_processed[0]); - sge->cmdQ[1].processed += cmdq_processed[1]; - - return e->GenerationBit == q->genbit; -} - -/* - * Handler for new data events when using NAPI. This does not need any locking - * or protection from interrupts as data interrupts are off at this point and - * other adapter interrupts do not interfere. - */ -int t1_poll(struct napi_struct *napi, int budget) -{ - struct adapter *adapter = container_of(napi, struct adapter, napi); - int work_done = process_responses(adapter, budget); - - if (likely(work_done < budget)) { - napi_complete(napi); - writel(adapter->sge->respQ.cidx, - adapter->regs + A_SG_SLEEPING); - } - return work_done; -} - -irqreturn_t t1_interrupt(int irq, void *data) -{ - struct adapter *adapter = data; - struct sge *sge = adapter->sge; - int handled; - - if (likely(responses_pending(adapter))) { - writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); - - if (napi_schedule_prep(&adapter->napi)) { - if (process_pure_responses(adapter)) - __napi_schedule(&adapter->napi); - else { - /* no data, no NAPI needed */ - writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); - /* undo schedule_prep */ - napi_enable(&adapter->napi); - } - } - return IRQ_HANDLED; - } - - spin_lock(&adapter->async_lock); - handled = t1_slow_intr_handler(adapter); - spin_unlock(&adapter->async_lock); - - if (!handled) - sge->stats.unhandled_irqs++; - - return IRQ_RETVAL(handled != 0); -} - -/* - * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it. - * - * The code figures out how many entries the sk_buff will require in the - * cmdQ and updates the cmdQ data structure with the state once the enqueue - * has complete. Then, it doesn't access the global structure anymore, but - * uses the corresponding fields on the stack. In conjunction with a spinlock - * around that code, we can make the function reentrant without holding the - * lock when we actually enqueue (which might be expensive, especially on - * architectures with IO MMUs). - * - * This runs with softirqs disabled. - */ -static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter, - unsigned int qid, struct net_device *dev) -{ - struct sge *sge = adapter->sge; - struct cmdQ *q = &sge->cmdQ[qid]; - unsigned int credits, pidx, genbit, count, use_sched_skb = 0; - - if (!spin_trylock(&q->lock)) - return NETDEV_TX_LOCKED; - - reclaim_completed_tx(sge, q); - - pidx = q->pidx; - credits = q->size - q->in_use; - count = 1 + skb_shinfo(skb)->nr_frags; - count += compute_large_page_tx_descs(skb); - - /* Ethernet packet */ - if (unlikely(credits < count)) { - if (!netif_queue_stopped(dev)) { - netif_stop_queue(dev); - set_bit(dev->if_port, &sge->stopped_tx_queues); - sge->stats.cmdQ_full[2]++; - pr_err("%s: Tx ring full while queue awake!\n", - adapter->name); - } - spin_unlock(&q->lock); - return NETDEV_TX_BUSY; - } - - if (unlikely(credits - count < q->stop_thres)) { - netif_stop_queue(dev); - set_bit(dev->if_port, &sge->stopped_tx_queues); - sge->stats.cmdQ_full[2]++; - } - - /* T204 cmdQ0 skbs that are destined for a certain port have to go - * through the scheduler. - */ - if (sge->tx_sched && !qid && skb->dev) { -use_sched: - use_sched_skb = 1; - /* Note that the scheduler might return a different skb than - * the one passed in. - */ - skb = sched_skb(sge, skb, credits); - if (!skb) { - spin_unlock(&q->lock); - return NETDEV_TX_OK; - } - pidx = q->pidx; - count = 1 + skb_shinfo(skb)->nr_frags; - count += compute_large_page_tx_descs(skb); - } - - q->in_use += count; - genbit = q->genbit; - pidx = q->pidx; - q->pidx += count; - if (q->pidx >= q->size) { - q->pidx -= q->size; - q->genbit ^= 1; - } - spin_unlock(&q->lock); - - write_tx_descs(adapter, skb, pidx, genbit, q); - - /* - * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring - * the doorbell if the Q is asleep. There is a natural race, where - * the hardware is going to sleep just after we checked, however, - * then the interrupt handler will detect the outstanding TX packet - * and ring the doorbell for us. - */ - if (qid) - doorbell_pio(adapter, F_CMDQ1_ENABLE); - else { - clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); - if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { - set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); - writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); - } - } - - if (use_sched_skb) { - if (spin_trylock(&q->lock)) { - credits = q->size - q->in_use; - skb = NULL; - goto use_sched; - } - } - return NETDEV_TX_OK; -} - -#define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14)) - -/* - * eth_hdr_len - return the length of an Ethernet header - * @data: pointer to the start of the Ethernet header - * - * Returns the length of an Ethernet header, including optional VLAN tag. - */ -static inline int eth_hdr_len(const void *data) -{ - const struct ethhdr *e = data; - - return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN; -} - -/* - * Adds the CPL header to the sk_buff and passes it to t1_sge_tx. - */ -netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct adapter *adapter = dev->ml_priv; - struct sge *sge = adapter->sge; - struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]); - struct cpl_tx_pkt *cpl; - struct sk_buff *orig_skb = skb; - int ret; - - if (skb->protocol == htons(ETH_P_CPL5)) - goto send; - - /* - * We are using a non-standard hard_header_len. - * Allocate more header room in the rare cases it is not big enough. - */ - if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) { - skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso)); - ++st->tx_need_hdrroom; - dev_kfree_skb_any(orig_skb); - if (!skb) - return NETDEV_TX_OK; - } - - if (skb_shinfo(skb)->gso_size) { - int eth_type; - struct cpl_tx_pkt_lso *hdr; - - ++st->tx_tso; - - eth_type = skb_network_offset(skb) == ETH_HLEN ? - CPL_ETH_II : CPL_ETH_II_VLAN; - - hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr)); - hdr->opcode = CPL_TX_PKT_LSO; - hdr->ip_csum_dis = hdr->l4_csum_dis = 0; - hdr->ip_hdr_words = ip_hdr(skb)->ihl; - hdr->tcp_hdr_words = tcp_hdr(skb)->doff; - hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type, - skb_shinfo(skb)->gso_size)); - hdr->len = htonl(skb->len - sizeof(*hdr)); - cpl = (struct cpl_tx_pkt *)hdr; - } else { - /* - * Packets shorter than ETH_HLEN can break the MAC, drop them - * early. Also, we may get oversized packets because some - * parts of the kernel don't handle our unusual hard_header_len - * right, drop those too. - */ - if (unlikely(skb->len < ETH_HLEN || - skb->len > dev->mtu + eth_hdr_len(skb->data))) { - pr_debug("%s: packet size %d hdr %d mtu%d\n", dev->name, - skb->len, eth_hdr_len(skb->data), dev->mtu); - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - - if (skb->ip_summed == CHECKSUM_PARTIAL && - ip_hdr(skb)->protocol == IPPROTO_UDP) { - if (unlikely(skb_checksum_help(skb))) { - pr_debug("%s: unable to do udp checksum\n", dev->name); - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - } - - /* Hmmm, assuming to catch the gratious arp... and we'll use - * it to flush out stuck espi packets... - */ - if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) { - if (skb->protocol == htons(ETH_P_ARP) && - arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) { - adapter->sge->espibug_skb[dev->if_port] = skb; - /* We want to re-use this skb later. We - * simply bump the reference count and it - * will not be freed... - */ - skb = skb_get(skb); - } - } - - cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl)); - cpl->opcode = CPL_TX_PKT; - cpl->ip_csum_dis = 1; /* SW calculates IP csum */ - cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1; - /* the length field isn't used so don't bother setting it */ - - st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL); - } - cpl->iff = dev->if_port; - - if (vlan_tx_tag_present(skb)) { - cpl->vlan_valid = 1; - cpl->vlan = htons(vlan_tx_tag_get(skb)); - st->vlan_insert++; - } else - cpl->vlan_valid = 0; - -send: - ret = t1_sge_tx(skb, adapter, 0, dev); - - /* If transmit busy, and we reallocated skb's due to headroom limit, - * then silently discard to avoid leak. - */ - if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) { - dev_kfree_skb_any(skb); - ret = NETDEV_TX_OK; - } - return ret; -} - -/* - * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled. - */ -static void sge_tx_reclaim_cb(unsigned long data) -{ - int i; - struct sge *sge = (struct sge *)data; - - for (i = 0; i < SGE_CMDQ_N; ++i) { - struct cmdQ *q = &sge->cmdQ[i]; - - if (!spin_trylock(&q->lock)) - continue; - - reclaim_completed_tx(sge, q); - if (i == 0 && q->in_use) { /* flush pending credits */ - writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL); - } - spin_unlock(&q->lock); - } - mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); -} - -/* - * Propagate changes of the SGE coalescing parameters to the HW. - */ -int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p) -{ - sge->fixed_intrtimer = p->rx_coalesce_usecs * - core_ticks_per_usec(sge->adapter); - writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER); - return 0; -} - -/* - * Allocates both RX and TX resources and configures the SGE. However, - * the hardware is not enabled yet. - */ -int t1_sge_configure(struct sge *sge, struct sge_params *p) -{ - if (alloc_rx_resources(sge, p)) - return -ENOMEM; - if (alloc_tx_resources(sge, p)) { - free_rx_resources(sge); - return -ENOMEM; - } - configure_sge(sge, p); - - /* - * Now that we have sized the free lists calculate the payload - * capacity of the large buffers. Other parts of the driver use - * this to set the max offload coalescing size so that RX packets - * do not overflow our large buffers. - */ - p->large_buf_capacity = jumbo_payload_capacity(sge); - return 0; -} - -/* - * Disables the DMA engine. - */ -void t1_sge_stop(struct sge *sge) -{ - int i; - writel(0, sge->adapter->regs + A_SG_CONTROL); - readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ - - if (is_T2(sge->adapter)) - del_timer_sync(&sge->espibug_timer); - - del_timer_sync(&sge->tx_reclaim_timer); - if (sge->tx_sched) - tx_sched_stop(sge); - - for (i = 0; i < MAX_NPORTS; i++) - kfree_skb(sge->espibug_skb[i]); -} - -/* - * Enables the DMA engine. - */ -void t1_sge_start(struct sge *sge) -{ - refill_free_list(sge, &sge->freelQ[0]); - refill_free_list(sge, &sge->freelQ[1]); - - writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL); - doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE); - readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ - - mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); - - if (is_T2(sge->adapter)) - mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); -} - -/* - * Callback for the T2 ESPI 'stuck packet feature' workaorund - */ -static void espibug_workaround_t204(unsigned long data) -{ - struct adapter *adapter = (struct adapter *)data; - struct sge *sge = adapter->sge; - unsigned int nports = adapter->params.nports; - u32 seop[MAX_NPORTS]; - - if (adapter->open_device_map & PORT_MASK) { - int i; - - if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0) - return; - - for (i = 0; i < nports; i++) { - struct sk_buff *skb = sge->espibug_skb[i]; - - if (!netif_running(adapter->port[i].dev) || - netif_queue_stopped(adapter->port[i].dev) || - !seop[i] || ((seop[i] & 0xfff) != 0) || !skb) - continue; - - if (!skb->cb[0]) { - skb_copy_to_linear_data_offset(skb, - sizeof(struct cpl_tx_pkt), - ch_mac_addr, - ETH_ALEN); - skb_copy_to_linear_data_offset(skb, - skb->len - 10, - ch_mac_addr, - ETH_ALEN); - skb->cb[0] = 0xff; - } - - /* bump the reference count to avoid freeing of - * the skb once the DMA has completed. - */ - skb = skb_get(skb); - t1_sge_tx(skb, adapter, 0, adapter->port[i].dev); - } - } - mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); -} - -static void espibug_workaround(unsigned long data) -{ - struct adapter *adapter = (struct adapter *)data; - struct sge *sge = adapter->sge; - - if (netif_running(adapter->port[0].dev)) { - struct sk_buff *skb = sge->espibug_skb[0]; - u32 seop = t1_espi_get_mon(adapter, 0x930, 0); - - if ((seop & 0xfff0fff) == 0xfff && skb) { - if (!skb->cb[0]) { - skb_copy_to_linear_data_offset(skb, - sizeof(struct cpl_tx_pkt), - ch_mac_addr, - ETH_ALEN); - skb_copy_to_linear_data_offset(skb, - skb->len - 10, - ch_mac_addr, - ETH_ALEN); - skb->cb[0] = 0xff; - } - - /* bump the reference count to avoid freeing of the - * skb once the DMA has completed. - */ - skb = skb_get(skb); - t1_sge_tx(skb, adapter, 0, adapter->port[0].dev); - } - } - mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); -} - -/* - * Creates a t1_sge structure and returns suggested resource parameters. - */ -struct sge * __devinit t1_sge_create(struct adapter *adapter, - struct sge_params *p) -{ - struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL); - int i; - - if (!sge) - return NULL; - - sge->adapter = adapter; - sge->netdev = adapter->port[0].dev; - sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2; - sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; - - for_each_port(adapter, i) { - sge->port_stats[i] = alloc_percpu(struct sge_port_stats); - if (!sge->port_stats[i]) - goto nomem_port; - } - - init_timer(&sge->tx_reclaim_timer); - sge->tx_reclaim_timer.data = (unsigned long)sge; - sge->tx_reclaim_timer.function = sge_tx_reclaim_cb; - - if (is_T2(sge->adapter)) { - init_timer(&sge->espibug_timer); - - if (adapter->params.nports > 1) { - tx_sched_init(sge); - sge->espibug_timer.function = espibug_workaround_t204; - } else - sge->espibug_timer.function = espibug_workaround; - sge->espibug_timer.data = (unsigned long)sge->adapter; - - sge->espibug_timeout = 1; - /* for T204, every 10ms */ - if (adapter->params.nports > 1) - sge->espibug_timeout = HZ/100; - } - - - p->cmdQ_size[0] = SGE_CMDQ0_E_N; - p->cmdQ_size[1] = SGE_CMDQ1_E_N; - p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE; - p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE; - if (sge->tx_sched) { - if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) - p->rx_coalesce_usecs = 15; - else - p->rx_coalesce_usecs = 50; - } else - p->rx_coalesce_usecs = 50; - - p->coalesce_enable = 0; - p->sample_interval_usecs = 0; - - return sge; -nomem_port: - while (i >= 0) { - free_percpu(sge->port_stats[i]); - --i; - } - kfree(sge); - return NULL; - -} diff --git a/drivers/net/chelsio/sge.h b/drivers/net/chelsio/sge.h deleted file mode 100644 index e03980b..0000000 --- a/drivers/net/chelsio/sge.h +++ /dev/null @@ -1,94 +0,0 @@ -/***************************************************************************** - * * - * File: sge.h * - * $Revision: 1.11 $ * - * $Date: 2005/06/21 22:10:55 $ * - * Description: * - * part of the Chelsio 10Gb Ethernet Driver. * - * * - * This program is free software; you can redistribute it and/or modify * - * it under the terms of the GNU General Public License, version 2, as * - * published by the Free Software Foundation. * - * * - * You should have received a copy of the GNU General Public License along * - * with this program; if not, write to the Free Software Foundation, Inc., * - * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * - * * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * - * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * - * * - * http://www.chelsio.com * - * * - * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * - * All rights reserved. * - * * - * Maintainers: maintainers@chelsio.com * - * * - * Authors: Dimitrios Michailidis * - * Tina Yang * - * Felix Marti * - * Scott Bardone * - * Kurt Ottaway * - * Frank DiMambro * - * * - * History: * - * * - ****************************************************************************/ - -#ifndef _CXGB_SGE_H_ -#define _CXGB_SGE_H_ - -#include -#include -#include - -struct sge_intr_counts { - unsigned int rx_drops; /* # of packets dropped due to no mem */ - unsigned int pure_rsps; /* # of non-payload responses */ - unsigned int unhandled_irqs; /* # of unhandled interrupts */ - unsigned int respQ_empty; /* # times respQ empty */ - unsigned int respQ_overflow; /* # respQ overflow (fatal) */ - unsigned int freelistQ_empty; /* # times freelist empty */ - unsigned int pkt_too_big; /* packet too large (fatal) */ - unsigned int pkt_mismatch; - unsigned int cmdQ_full[3]; /* not HW IRQ, host cmdQ[] full */ - unsigned int cmdQ_restarted[3];/* # of times cmdQ X was restarted */ -}; - -struct sge_port_stats { - u64 rx_cso_good; /* # of successful RX csum offloads */ - u64 tx_cso; /* # of TX checksum offloads */ - u64 tx_tso; /* # of TSO requests */ - u64 vlan_xtract; /* # of VLAN tag extractions */ - u64 vlan_insert; /* # of VLAN tag insertions */ - u64 tx_need_hdrroom; /* # of TX skbs in need of more header room */ -}; - -struct sk_buff; -struct net_device; -struct adapter; -struct sge_params; -struct sge; - -struct sge *t1_sge_create(struct adapter *, struct sge_params *); -int t1_sge_configure(struct sge *, struct sge_params *); -int t1_sge_set_coalesce_params(struct sge *, struct sge_params *); -void t1_sge_destroy(struct sge *); -irqreturn_t t1_interrupt(int irq, void *cookie); -int t1_poll(struct napi_struct *, int); - -netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev); -void t1_vlan_mode(struct adapter *adapter, u32 features); -void t1_sge_start(struct sge *); -void t1_sge_stop(struct sge *); -int t1_sge_intr_error_handler(struct sge *); -void t1_sge_intr_enable(struct sge *); -void t1_sge_intr_disable(struct sge *); -void t1_sge_intr_clear(struct sge *); -const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge); -void t1_sge_get_port_stats(const struct sge *sge, int port, struct sge_port_stats *); -unsigned int t1_sched_update_parms(struct sge *, unsigned int, unsigned int, - unsigned int); - -#endif /* _CXGB_SGE_H_ */ diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c deleted file mode 100644 index 8a43c7e..0000000 --- a/drivers/net/chelsio/subr.c +++ /dev/null @@ -1,1130 +0,0 @@ -/***************************************************************************** - * * - * File: subr.c * - * $Revision: 1.27 $ * - * $Date: 2005/06/22 01:08:36 $ * - * Description: * - * Various subroutines (intr,pio,etc.) used by Chelsio 10G Ethernet driver. * - * part of the Chelsio 10Gb Ethernet Driver. * - * * - * This program is free software; you can redistribute it and/or modify * - * it under the terms of the GNU General Public License, version 2, as * - * published by the Free Software Foundation. * - * * - * You should have received a copy of the GNU General Public License along * - * with this program; if not, write to the Free Software Foundation, Inc., * - * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * - * * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * - * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * - * * - * http://www.chelsio.com * - * * - * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * - * All rights reserved. * - * * - * Maintainers: maintainers@chelsio.com * - * * - * Authors: Dimitrios Michailidis * - * Tina Yang * - * Felix Marti * - * Scott Bardone * - * Kurt Ottaway * - * Frank DiMambro * - * * - * History: * - * * - ****************************************************************************/ - -#include "common.h" -#include "elmer0.h" -#include "regs.h" -#include "gmac.h" -#include "cphy.h" -#include "sge.h" -#include "tp.h" -#include "espi.h" - -/** - * t1_wait_op_done - wait until an operation is completed - * @adapter: the adapter performing the operation - * @reg: the register to check for completion - * @mask: a single-bit field within @reg that indicates completion - * @polarity: the value of the field when the operation is completed - * @attempts: number of check iterations - * @delay: delay in usecs between iterations - * - * Wait until an operation is completed by checking a bit in a register - * up to @attempts times. Returns %0 if the operation completes and %1 - * otherwise. - */ -static int t1_wait_op_done(adapter_t *adapter, int reg, u32 mask, int polarity, - int attempts, int delay) -{ - while (1) { - u32 val = readl(adapter->regs + reg) & mask; - - if (!!val == polarity) - return 0; - if (--attempts == 0) - return 1; - if (delay) - udelay(delay); - } -} - -#define TPI_ATTEMPTS 50 - -/* - * Write a register over the TPI interface (unlocked and locked versions). - */ -int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value) -{ - int tpi_busy; - - writel(addr, adapter->regs + A_TPI_ADDR); - writel(value, adapter->regs + A_TPI_WR_DATA); - writel(F_TPIWR, adapter->regs + A_TPI_CSR); - - tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1, - TPI_ATTEMPTS, 3); - if (tpi_busy) - pr_alert("%s: TPI write to 0x%x failed\n", - adapter->name, addr); - return tpi_busy; -} - -int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value) -{ - int ret; - - spin_lock(&adapter->tpi_lock); - ret = __t1_tpi_write(adapter, addr, value); - spin_unlock(&adapter->tpi_lock); - return ret; -} - -/* - * Read a register over the TPI interface (unlocked and locked versions). - */ -int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp) -{ - int tpi_busy; - - writel(addr, adapter->regs + A_TPI_ADDR); - writel(0, adapter->regs + A_TPI_CSR); - - tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1, - TPI_ATTEMPTS, 3); - if (tpi_busy) - pr_alert("%s: TPI read from 0x%x failed\n", - adapter->name, addr); - else - *valp = readl(adapter->regs + A_TPI_RD_DATA); - return tpi_busy; -} - -int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp) -{ - int ret; - - spin_lock(&adapter->tpi_lock); - ret = __t1_tpi_read(adapter, addr, valp); - spin_unlock(&adapter->tpi_lock); - return ret; -} - -/* - * Set a TPI parameter. - */ -static void t1_tpi_par(adapter_t *adapter, u32 value) -{ - writel(V_TPIPAR(value), adapter->regs + A_TPI_PAR); -} - -/* - * Called when a port's link settings change to propagate the new values to the - * associated PHY and MAC. After performing the common tasks it invokes an - * OS-specific handler. - */ -void t1_link_changed(adapter_t *adapter, int port_id) -{ - int link_ok, speed, duplex, fc; - struct cphy *phy = adapter->port[port_id].phy; - struct link_config *lc = &adapter->port[port_id].link_config; - - phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc); - - lc->speed = speed < 0 ? SPEED_INVALID : speed; - lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex; - if (!(lc->requested_fc & PAUSE_AUTONEG)) - fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); - - if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) { - /* Set MAC speed, duplex, and flow control to match PHY. */ - struct cmac *mac = adapter->port[port_id].mac; - - mac->ops->set_speed_duplex_fc(mac, speed, duplex, fc); - lc->fc = (unsigned char)fc; - } - t1_link_negotiated(adapter, port_id, link_ok, speed, duplex, fc); -} - -static int t1_pci_intr_handler(adapter_t *adapter) -{ - u32 pcix_cause; - - pci_read_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, &pcix_cause); - - if (pcix_cause) { - pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, - pcix_cause); - t1_fatal_err(adapter); /* PCI errors are fatal */ - } - return 0; -} - -#ifdef CONFIG_CHELSIO_T1_1G -#include "fpga_defs.h" - -/* - * PHY interrupt handler for FPGA boards. - */ -static int fpga_phy_intr_handler(adapter_t *adapter) -{ - int p; - u32 cause = readl(adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_CAUSE); - - for_each_port(adapter, p) - if (cause & (1 << p)) { - struct cphy *phy = adapter->port[p].phy; - int phy_cause = phy->ops->interrupt_handler(phy); - - if (phy_cause & cphy_cause_link_change) - t1_link_changed(adapter, p); - } - writel(cause, adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_CAUSE); - return 0; -} - -/* - * Slow path interrupt handler for FPGAs. - */ -static int fpga_slow_intr(adapter_t *adapter) -{ - u32 cause = readl(adapter->regs + A_PL_CAUSE); - - cause &= ~F_PL_INTR_SGE_DATA; - if (cause & F_PL_INTR_SGE_ERR) - t1_sge_intr_error_handler(adapter->sge); - - if (cause & FPGA_PCIX_INTERRUPT_GMAC) - fpga_phy_intr_handler(adapter); - - if (cause & FPGA_PCIX_INTERRUPT_TP) { - /* - * FPGA doesn't support MC4 interrupts and it requires - * this odd layer of indirection for MC5. - */ - u32 tp_cause = readl(adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE); - - /* Clear TP interrupt */ - writel(tp_cause, adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE); - } - if (cause & FPGA_PCIX_INTERRUPT_PCIX) - t1_pci_intr_handler(adapter); - - /* Clear the interrupts just processed. */ - if (cause) - writel(cause, adapter->regs + A_PL_CAUSE); - - return cause != 0; -} -#endif - -/* - * Wait until Elmer's MI1 interface is ready for new operations. - */ -static int mi1_wait_until_ready(adapter_t *adapter, int mi1_reg) -{ - int attempts = 100, busy; - - do { - u32 val; - - __t1_tpi_read(adapter, mi1_reg, &val); - busy = val & F_MI1_OP_BUSY; - if (busy) - udelay(10); - } while (busy && --attempts); - if (busy) - pr_alert("%s: MDIO operation timed out\n", adapter->name); - return busy; -} - -/* - * MI1 MDIO initialization. - */ -static void mi1_mdio_init(adapter_t *adapter, const struct board_info *bi) -{ - u32 clkdiv = bi->clock_elmer0 / (2 * bi->mdio_mdc) - 1; - u32 val = F_MI1_PREAMBLE_ENABLE | V_MI1_MDI_INVERT(bi->mdio_mdiinv) | - V_MI1_MDI_ENABLE(bi->mdio_mdien) | V_MI1_CLK_DIV(clkdiv); - - if (!(bi->caps & SUPPORTED_10000baseT_Full)) - val |= V_MI1_SOF(1); - t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val); -} - -#if defined(CONFIG_CHELSIO_T1_1G) -/* - * Elmer MI1 MDIO read/write operations. - */ -static int mi1_mdio_read(struct net_device *dev, int phy_addr, int mmd_addr, - u16 reg_addr) -{ - struct adapter *adapter = dev->ml_priv; - u32 addr = V_MI1_REG_ADDR(reg_addr) | V_MI1_PHY_ADDR(phy_addr); - unsigned int val; - - spin_lock(&adapter->tpi_lock); - __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr); - __t1_tpi_write(adapter, - A_ELMER0_PORT0_MI1_OP, MI1_OP_DIRECT_READ); - mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); - __t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, &val); - spin_unlock(&adapter->tpi_lock); - return val; -} - -static int mi1_mdio_write(struct net_device *dev, int phy_addr, int mmd_addr, - u16 reg_addr, u16 val) -{ - struct adapter *adapter = dev->ml_priv; - u32 addr = V_MI1_REG_ADDR(reg_addr) | V_MI1_PHY_ADDR(phy_addr); - - spin_lock(&adapter->tpi_lock); - __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr); - __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val); - __t1_tpi_write(adapter, - A_ELMER0_PORT0_MI1_OP, MI1_OP_DIRECT_WRITE); - mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); - spin_unlock(&adapter->tpi_lock); - return 0; -} - -static const struct mdio_ops mi1_mdio_ops = { - .init = mi1_mdio_init, - .read = mi1_mdio_read, - .write = mi1_mdio_write, - .mode_support = MDIO_SUPPORTS_C22 -}; - -#endif - -static int mi1_mdio_ext_read(struct net_device *dev, int phy_addr, int mmd_addr, - u16 reg_addr) -{ - struct adapter *adapter = dev->ml_priv; - u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr); - unsigned int val; - - spin_lock(&adapter->tpi_lock); - - /* Write the address we want. */ - __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr); - __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr); - __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, - MI1_OP_INDIRECT_ADDRESS); - mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); - - /* Write the operation we want. */ - __t1_tpi_write(adapter, - A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_READ); - mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); - - /* Read the data. */ - __t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, &val); - spin_unlock(&adapter->tpi_lock); - return val; -} - -static int mi1_mdio_ext_write(struct net_device *dev, int phy_addr, - int mmd_addr, u16 reg_addr, u16 val) -{ - struct adapter *adapter = dev->ml_priv; - u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr); - - spin_lock(&adapter->tpi_lock); - - /* Write the address we want. */ - __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr); - __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr); - __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, - MI1_OP_INDIRECT_ADDRESS); - mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); - - /* Write the data. */ - __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val); - __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_WRITE); - mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); - spin_unlock(&adapter->tpi_lock); - return 0; -} - -static const struct mdio_ops mi1_mdio_ext_ops = { - .init = mi1_mdio_init, - .read = mi1_mdio_ext_read, - .write = mi1_mdio_ext_write, - .mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22 -}; - -enum { - CH_BRD_T110_1CU, - CH_BRD_N110_1F, - CH_BRD_N210_1F, - CH_BRD_T210_1F, - CH_BRD_T210_1CU, - CH_BRD_N204_4CU, -}; - -static const struct board_info t1_board[] = { - { - .board = CHBT_BOARD_CHT110, - .port_number = 1, - .caps = SUPPORTED_10000baseT_Full, - .chip_term = CHBT_TERM_T1, - .chip_mac = CHBT_MAC_PM3393, - .chip_phy = CHBT_PHY_MY3126, - .clock_core = 125000000, - .clock_mc3 = 150000000, - .clock_mc4 = 125000000, - .espi_nports = 1, - .clock_elmer0 = 44, - .mdio_mdien = 1, - .mdio_mdiinv = 1, - .mdio_mdc = 1, - .mdio_phybaseaddr = 1, - .gmac = &t1_pm3393_ops, - .gphy = &t1_my3126_ops, - .mdio_ops = &mi1_mdio_ext_ops, - .desc = "Chelsio T110 1x10GBase-CX4 TOE", - }, - - { - .board = CHBT_BOARD_N110, - .port_number = 1, - .caps = SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE, - .chip_term = CHBT_TERM_T1, - .chip_mac = CHBT_MAC_PM3393, - .chip_phy = CHBT_PHY_88X2010, - .clock_core = 125000000, - .espi_nports = 1, - .clock_elmer0 = 44, - .mdio_mdien = 0, - .mdio_mdiinv = 0, - .mdio_mdc = 1, - .mdio_phybaseaddr = 0, - .gmac = &t1_pm3393_ops, - .gphy = &t1_mv88x201x_ops, - .mdio_ops = &mi1_mdio_ext_ops, - .desc = "Chelsio N110 1x10GBaseX NIC", - }, - - { - .board = CHBT_BOARD_N210, - .port_number = 1, - .caps = SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE, - .chip_term = CHBT_TERM_T2, - .chip_mac = CHBT_MAC_PM3393, - .chip_phy = CHBT_PHY_88X2010, - .clock_core = 125000000, - .espi_nports = 1, - .clock_elmer0 = 44, - .mdio_mdien = 0, - .mdio_mdiinv = 0, - .mdio_mdc = 1, - .mdio_phybaseaddr = 0, - .gmac = &t1_pm3393_ops, - .gphy = &t1_mv88x201x_ops, - .mdio_ops = &mi1_mdio_ext_ops, - .desc = "Chelsio N210 1x10GBaseX NIC", - }, - - { - .board = CHBT_BOARD_CHT210, - .port_number = 1, - .caps = SUPPORTED_10000baseT_Full, - .chip_term = CHBT_TERM_T2, - .chip_mac = CHBT_MAC_PM3393, - .chip_phy = CHBT_PHY_88X2010, - .clock_core = 125000000, - .clock_mc3 = 133000000, - .clock_mc4 = 125000000, - .espi_nports = 1, - .clock_elmer0 = 44, - .mdio_mdien = 0, - .mdio_mdiinv = 0, - .mdio_mdc = 1, - .mdio_phybaseaddr = 0, - .gmac = &t1_pm3393_ops, - .gphy = &t1_mv88x201x_ops, - .mdio_ops = &mi1_mdio_ext_ops, - .desc = "Chelsio T210 1x10GBaseX TOE", - }, - - { - .board = CHBT_BOARD_CHT210, - .port_number = 1, - .caps = SUPPORTED_10000baseT_Full, - .chip_term = CHBT_TERM_T2, - .chip_mac = CHBT_MAC_PM3393, - .chip_phy = CHBT_PHY_MY3126, - .clock_core = 125000000, - .clock_mc3 = 133000000, - .clock_mc4 = 125000000, - .espi_nports = 1, - .clock_elmer0 = 44, - .mdio_mdien = 1, - .mdio_mdiinv = 1, - .mdio_mdc = 1, - .mdio_phybaseaddr = 1, - .gmac = &t1_pm3393_ops, - .gphy = &t1_my3126_ops, - .mdio_ops = &mi1_mdio_ext_ops, - .desc = "Chelsio T210 1x10GBase-CX4 TOE", - }, - -#ifdef CONFIG_CHELSIO_T1_1G - { - .board = CHBT_BOARD_CHN204, - .port_number = 4, - .caps = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full - | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full - | SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | - SUPPORTED_PAUSE | SUPPORTED_TP, - .chip_term = CHBT_TERM_T2, - .chip_mac = CHBT_MAC_VSC7321, - .chip_phy = CHBT_PHY_88E1111, - .clock_core = 100000000, - .espi_nports = 4, - .clock_elmer0 = 44, - .mdio_mdien = 0, - .mdio_mdiinv = 0, - .mdio_mdc = 0, - .mdio_phybaseaddr = 4, - .gmac = &t1_vsc7326_ops, - .gphy = &t1_mv88e1xxx_ops, - .mdio_ops = &mi1_mdio_ops, - .desc = "Chelsio N204 4x100/1000BaseT NIC", - }, -#endif - -}; - -DEFINE_PCI_DEVICE_TABLE(t1_pci_tbl) = { - CH_DEVICE(8, 0, CH_BRD_T110_1CU), - CH_DEVICE(8, 1, CH_BRD_T110_1CU), - CH_DEVICE(7, 0, CH_BRD_N110_1F), - CH_DEVICE(10, 1, CH_BRD_N210_1F), - CH_DEVICE(11, 1, CH_BRD_T210_1F), - CH_DEVICE(14, 1, CH_BRD_T210_1CU), - CH_DEVICE(16, 1, CH_BRD_N204_4CU), - { 0 } -}; - -MODULE_DEVICE_TABLE(pci, t1_pci_tbl); - -/* - * Return the board_info structure with a given index. Out-of-range indices - * return NULL. - */ -const struct board_info *t1_get_board_info(unsigned int board_id) -{ - return board_id < ARRAY_SIZE(t1_board) ? &t1_board[board_id] : NULL; -} - -struct chelsio_vpd_t { - u32 format_version; - u8 serial_number[16]; - u8 mac_base_address[6]; - u8 pad[2]; /* make multiple-of-4 size requirement explicit */ -}; - -#define EEPROMSIZE (8 * 1024) -#define EEPROM_MAX_POLL 4 - -/* - * Read SEEPROM. A zero is written to the flag register when the address is - * written to the Control register. The hardware device will set the flag to a - * one when 4B have been transferred to the Data register. - */ -int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data) -{ - int i = EEPROM_MAX_POLL; - u16 val; - u32 v; - - if (addr >= EEPROMSIZE || (addr & 3)) - return -EINVAL; - - pci_write_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, (u16)addr); - do { - udelay(50); - pci_read_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, &val); - } while (!(val & F_VPD_OP_FLAG) && --i); - - if (!(val & F_VPD_OP_FLAG)) { - pr_err("%s: reading EEPROM address 0x%x failed\n", - adapter->name, addr); - return -EIO; - } - pci_read_config_dword(adapter->pdev, A_PCICFG_VPD_DATA, &v); - *data = cpu_to_le32(v); - return 0; -} - -static int t1_eeprom_vpd_get(adapter_t *adapter, struct chelsio_vpd_t *vpd) -{ - int addr, ret = 0; - - for (addr = 0; !ret && addr < sizeof(*vpd); addr += sizeof(u32)) - ret = t1_seeprom_read(adapter, addr, - (__le32 *)((u8 *)vpd + addr)); - - return ret; -} - -/* - * Read a port's MAC address from the VPD ROM. - */ -static int vpd_macaddress_get(adapter_t *adapter, int index, u8 mac_addr[]) -{ - struct chelsio_vpd_t vpd; - - if (t1_eeprom_vpd_get(adapter, &vpd)) - return 1; - memcpy(mac_addr, vpd.mac_base_address, 5); - mac_addr[5] = vpd.mac_base_address[5] + index; - return 0; -} - -/* - * Set up the MAC/PHY according to the requested link settings. - * - * If the PHY can auto-negotiate first decide what to advertise, then - * enable/disable auto-negotiation as desired and reset. - * - * If the PHY does not auto-negotiate we just reset it. - * - * If auto-negotiation is off set the MAC to the proper speed/duplex/FC, - * otherwise do it later based on the outcome of auto-negotiation. - */ -int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc) -{ - unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); - - if (lc->supported & SUPPORTED_Autoneg) { - lc->advertising &= ~(ADVERTISED_ASYM_PAUSE | ADVERTISED_PAUSE); - if (fc) { - if (fc == ((PAUSE_RX | PAUSE_TX) & - (mac->adapter->params.nports < 2))) - lc->advertising |= ADVERTISED_PAUSE; - else { - lc->advertising |= ADVERTISED_ASYM_PAUSE; - if (fc == PAUSE_RX) - lc->advertising |= ADVERTISED_PAUSE; - } - } - phy->ops->advertise(phy, lc->advertising); - - if (lc->autoneg == AUTONEG_DISABLE) { - lc->speed = lc->requested_speed; - lc->duplex = lc->requested_duplex; - lc->fc = (unsigned char)fc; - mac->ops->set_speed_duplex_fc(mac, lc->speed, - lc->duplex, fc); - /* Also disables autoneg */ - phy->state = PHY_AUTONEG_RDY; - phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex); - phy->ops->reset(phy, 0); - } else { - phy->state = PHY_AUTONEG_EN; - phy->ops->autoneg_enable(phy); /* also resets PHY */ - } - } else { - phy->state = PHY_AUTONEG_RDY; - mac->ops->set_speed_duplex_fc(mac, -1, -1, fc); - lc->fc = (unsigned char)fc; - phy->ops->reset(phy, 0); - } - return 0; -} - -/* - * External interrupt handler for boards using elmer0. - */ -int t1_elmer0_ext_intr_handler(adapter_t *adapter) -{ - struct cphy *phy; - int phy_cause; - u32 cause; - - t1_tpi_read(adapter, A_ELMER0_INT_CAUSE, &cause); - - switch (board_info(adapter)->board) { -#ifdef CONFIG_CHELSIO_T1_1G - case CHBT_BOARD_CHT204: - case CHBT_BOARD_CHT204E: - case CHBT_BOARD_CHN204: - case CHBT_BOARD_CHT204V: { - int i, port_bit; - for_each_port(adapter, i) { - port_bit = i + 1; - if (!(cause & (1 << port_bit))) - continue; - - phy = adapter->port[i].phy; - phy_cause = phy->ops->interrupt_handler(phy); - if (phy_cause & cphy_cause_link_change) - t1_link_changed(adapter, i); - } - break; - } - case CHBT_BOARD_CHT101: - if (cause & ELMER0_GP_BIT1) { /* Marvell 88E1111 interrupt */ - phy = adapter->port[0].phy; - phy_cause = phy->ops->interrupt_handler(phy); - if (phy_cause & cphy_cause_link_change) - t1_link_changed(adapter, 0); - } - break; - case CHBT_BOARD_7500: { - int p; - /* - * Elmer0's interrupt cause isn't useful here because there is - * only one bit that can be set for all 4 ports. This means - * we are forced to check every PHY's interrupt status - * register to see who initiated the interrupt. - */ - for_each_port(adapter, p) { - phy = adapter->port[p].phy; - phy_cause = phy->ops->interrupt_handler(phy); - if (phy_cause & cphy_cause_link_change) - t1_link_changed(adapter, p); - } - break; - } -#endif - case CHBT_BOARD_CHT210: - case CHBT_BOARD_N210: - case CHBT_BOARD_N110: - if (cause & ELMER0_GP_BIT6) { /* Marvell 88x2010 interrupt */ - phy = adapter->port[0].phy; - phy_cause = phy->ops->interrupt_handler(phy); - if (phy_cause & cphy_cause_link_change) - t1_link_changed(adapter, 0); - } - break; - case CHBT_BOARD_8000: - case CHBT_BOARD_CHT110: - if (netif_msg_intr(adapter)) - dev_dbg(&adapter->pdev->dev, - "External interrupt cause 0x%x\n", cause); - if (cause & ELMER0_GP_BIT1) { /* PMC3393 INTB */ - struct cmac *mac = adapter->port[0].mac; - - mac->ops->interrupt_handler(mac); - } - if (cause & ELMER0_GP_BIT5) { /* XPAK MOD_DETECT */ - u32 mod_detect; - - t1_tpi_read(adapter, - A_ELMER0_GPI_STAT, &mod_detect); - if (netif_msg_link(adapter)) - dev_info(&adapter->pdev->dev, "XPAK %s\n", - mod_detect ? "removed" : "inserted"); - } - break; - } - t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause); - return 0; -} - -/* Enables all interrupts. */ -void t1_interrupts_enable(adapter_t *adapter) -{ - unsigned int i; - - adapter->slow_intr_mask = F_PL_INTR_SGE_ERR | F_PL_INTR_TP; - - t1_sge_intr_enable(adapter->sge); - t1_tp_intr_enable(adapter->tp); - if (adapter->espi) { - adapter->slow_intr_mask |= F_PL_INTR_ESPI; - t1_espi_intr_enable(adapter->espi); - } - - /* Enable MAC/PHY interrupts for each port. */ - for_each_port(adapter, i) { - adapter->port[i].mac->ops->interrupt_enable(adapter->port[i].mac); - adapter->port[i].phy->ops->interrupt_enable(adapter->port[i].phy); - } - - /* Enable PCIX & external chip interrupts on ASIC boards. */ - if (t1_is_asic(adapter)) { - u32 pl_intr = readl(adapter->regs + A_PL_ENABLE); - - /* PCI-X interrupts */ - pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, - 0xffffffff); - - adapter->slow_intr_mask |= F_PL_INTR_EXT | F_PL_INTR_PCIX; - pl_intr |= F_PL_INTR_EXT | F_PL_INTR_PCIX; - writel(pl_intr, adapter->regs + A_PL_ENABLE); - } -} - -/* Disables all interrupts. */ -void t1_interrupts_disable(adapter_t* adapter) -{ - unsigned int i; - - t1_sge_intr_disable(adapter->sge); - t1_tp_intr_disable(adapter->tp); - if (adapter->espi) - t1_espi_intr_disable(adapter->espi); - - /* Disable MAC/PHY interrupts for each port. */ - for_each_port(adapter, i) { - adapter->port[i].mac->ops->interrupt_disable(adapter->port[i].mac); - adapter->port[i].phy->ops->interrupt_disable(adapter->port[i].phy); - } - - /* Disable PCIX & external chip interrupts. */ - if (t1_is_asic(adapter)) - writel(0, adapter->regs + A_PL_ENABLE); - - /* PCI-X interrupts */ - pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0); - - adapter->slow_intr_mask = 0; -} - -/* Clears all interrupts */ -void t1_interrupts_clear(adapter_t* adapter) -{ - unsigned int i; - - t1_sge_intr_clear(adapter->sge); - t1_tp_intr_clear(adapter->tp); - if (adapter->espi) - t1_espi_intr_clear(adapter->espi); - - /* Clear MAC/PHY interrupts for each port. */ - for_each_port(adapter, i) { - adapter->port[i].mac->ops->interrupt_clear(adapter->port[i].mac); - adapter->port[i].phy->ops->interrupt_clear(adapter->port[i].phy); - } - - /* Enable interrupts for external devices. */ - if (t1_is_asic(adapter)) { - u32 pl_intr = readl(adapter->regs + A_PL_CAUSE); - - writel(pl_intr | F_PL_INTR_EXT | F_PL_INTR_PCIX, - adapter->regs + A_PL_CAUSE); - } - - /* PCI-X interrupts */ - pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, 0xffffffff); -} - -/* - * Slow path interrupt handler for ASICs. - */ -static int asic_slow_intr(adapter_t *adapter) -{ - u32 cause = readl(adapter->regs + A_PL_CAUSE); - - cause &= adapter->slow_intr_mask; - if (!cause) - return 0; - if (cause & F_PL_INTR_SGE_ERR) - t1_sge_intr_error_handler(adapter->sge); - if (cause & F_PL_INTR_TP) - t1_tp_intr_handler(adapter->tp); - if (cause & F_PL_INTR_ESPI) - t1_espi_intr_handler(adapter->espi); - if (cause & F_PL_INTR_PCIX) - t1_pci_intr_handler(adapter); - if (cause & F_PL_INTR_EXT) - t1_elmer0_ext_intr(adapter); - - /* Clear the interrupts just processed. */ - writel(cause, adapter->regs + A_PL_CAUSE); - readl(adapter->regs + A_PL_CAUSE); /* flush writes */ - return 1; -} - -int t1_slow_intr_handler(adapter_t *adapter) -{ -#ifdef CONFIG_CHELSIO_T1_1G - if (!t1_is_asic(adapter)) - return fpga_slow_intr(adapter); -#endif - return asic_slow_intr(adapter); -} - -/* Power sequencing is a work-around for Intel's XPAKs. */ -static void power_sequence_xpak(adapter_t* adapter) -{ - u32 mod_detect; - u32 gpo; - - /* Check for XPAK */ - t1_tpi_read(adapter, A_ELMER0_GPI_STAT, &mod_detect); - if (!(ELMER0_GP_BIT5 & mod_detect)) { - /* XPAK is present */ - t1_tpi_read(adapter, A_ELMER0_GPO, &gpo); - gpo |= ELMER0_GP_BIT18; - t1_tpi_write(adapter, A_ELMER0_GPO, gpo); - } -} - -int __devinit t1_get_board_rev(adapter_t *adapter, const struct board_info *bi, - struct adapter_params *p) -{ - p->chip_version = bi->chip_term; - p->is_asic = (p->chip_version != CHBT_TERM_FPGA); - if (p->chip_version == CHBT_TERM_T1 || - p->chip_version == CHBT_TERM_T2 || - p->chip_version == CHBT_TERM_FPGA) { - u32 val = readl(adapter->regs + A_TP_PC_CONFIG); - - val = G_TP_PC_REV(val); - if (val == 2) - p->chip_revision = TERM_T1B; - else if (val == 3) - p->chip_revision = TERM_T2; - else - return -1; - } else - return -1; - return 0; -} - -/* - * Enable board components other than the Chelsio chip, such as external MAC - * and PHY. - */ -static int board_init(adapter_t *adapter, const struct board_info *bi) -{ - switch (bi->board) { - case CHBT_BOARD_8000: - case CHBT_BOARD_N110: - case CHBT_BOARD_N210: - case CHBT_BOARD_CHT210: - t1_tpi_par(adapter, 0xf); - t1_tpi_write(adapter, A_ELMER0_GPO, 0x800); - break; - case CHBT_BOARD_CHT110: - t1_tpi_par(adapter, 0xf); - t1_tpi_write(adapter, A_ELMER0_GPO, 0x1800); - - /* TBD XXX Might not need. This fixes a problem - * described in the Intel SR XPAK errata. - */ - power_sequence_xpak(adapter); - break; -#ifdef CONFIG_CHELSIO_T1_1G - case CHBT_BOARD_CHT204E: - /* add config space write here */ - case CHBT_BOARD_CHT204: - case CHBT_BOARD_CHT204V: - case CHBT_BOARD_CHN204: - t1_tpi_par(adapter, 0xf); - t1_tpi_write(adapter, A_ELMER0_GPO, 0x804); - break; - case CHBT_BOARD_CHT101: - case CHBT_BOARD_7500: - t1_tpi_par(adapter, 0xf); - t1_tpi_write(adapter, A_ELMER0_GPO, 0x1804); - break; -#endif - } - return 0; -} - -/* - * Initialize and configure the Terminator HW modules. Note that external - * MAC and PHYs are initialized separately. - */ -int t1_init_hw_modules(adapter_t *adapter) -{ - int err = -EIO; - const struct board_info *bi = board_info(adapter); - - if (!bi->clock_mc4) { - u32 val = readl(adapter->regs + A_MC4_CFG); - - writel(val | F_READY | F_MC4_SLOW, adapter->regs + A_MC4_CFG); - writel(F_M_BUS_ENABLE | F_TCAM_RESET, - adapter->regs + A_MC5_CONFIG); - } - - if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac, - bi->espi_nports)) - goto out_err; - - if (t1_tp_reset(adapter->tp, &adapter->params.tp, bi->clock_core)) - goto out_err; - - err = t1_sge_configure(adapter->sge, &adapter->params.sge); - if (err) - goto out_err; - - err = 0; -out_err: - return err; -} - -/* - * Determine a card's PCI mode. - */ -static void __devinit get_pci_mode(adapter_t *adapter, struct chelsio_pci_params *p) -{ - static const unsigned short speed_map[] = { 33, 66, 100, 133 }; - u32 pci_mode; - - pci_read_config_dword(adapter->pdev, A_PCICFG_MODE, &pci_mode); - p->speed = speed_map[G_PCI_MODE_CLK(pci_mode)]; - p->width = (pci_mode & F_PCI_MODE_64BIT) ? 64 : 32; - p->is_pcix = (pci_mode & F_PCI_MODE_PCIX) != 0; -} - -/* - * Release the structures holding the SW per-Terminator-HW-module state. - */ -void t1_free_sw_modules(adapter_t *adapter) -{ - unsigned int i; - - for_each_port(adapter, i) { - struct cmac *mac = adapter->port[i].mac; - struct cphy *phy = adapter->port[i].phy; - - if (mac) - mac->ops->destroy(mac); - if (phy) - phy->ops->destroy(phy); - } - - if (adapter->sge) - t1_sge_destroy(adapter->sge); - if (adapter->tp) - t1_tp_destroy(adapter->tp); - if (adapter->espi) - t1_espi_destroy(adapter->espi); -} - -static void __devinit init_link_config(struct link_config *lc, - const struct board_info *bi) -{ - lc->supported = bi->caps; - lc->requested_speed = lc->speed = SPEED_INVALID; - lc->requested_duplex = lc->duplex = DUPLEX_INVALID; - lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; - if (lc->supported & SUPPORTED_Autoneg) { - lc->advertising = lc->supported; - lc->autoneg = AUTONEG_ENABLE; - lc->requested_fc |= PAUSE_AUTONEG; - } else { - lc->advertising = 0; - lc->autoneg = AUTONEG_DISABLE; - } -} - -/* - * Allocate and initialize the data structures that hold the SW state of - * the Terminator HW modules. - */ -int __devinit t1_init_sw_modules(adapter_t *adapter, - const struct board_info *bi) -{ - unsigned int i; - - adapter->params.brd_info = bi; - adapter->params.nports = bi->port_number; - adapter->params.stats_update_period = bi->gmac->stats_update_period; - - adapter->sge = t1_sge_create(adapter, &adapter->params.sge); - if (!adapter->sge) { - pr_err("%s: SGE initialization failed\n", - adapter->name); - goto error; - } - - if (bi->espi_nports && !(adapter->espi = t1_espi_create(adapter))) { - pr_err("%s: ESPI initialization failed\n", - adapter->name); - goto error; - } - - adapter->tp = t1_tp_create(adapter, &adapter->params.tp); - if (!adapter->tp) { - pr_err("%s: TP initialization failed\n", - adapter->name); - goto error; - } - - board_init(adapter, bi); - bi->mdio_ops->init(adapter, bi); - if (bi->gphy->reset) - bi->gphy->reset(adapter); - if (bi->gmac->reset) - bi->gmac->reset(adapter); - - for_each_port(adapter, i) { - u8 hw_addr[6]; - struct cmac *mac; - int phy_addr = bi->mdio_phybaseaddr + i; - - adapter->port[i].phy = bi->gphy->create(adapter->port[i].dev, - phy_addr, bi->mdio_ops); - if (!adapter->port[i].phy) { - pr_err("%s: PHY %d initialization failed\n", - adapter->name, i); - goto error; - } - - adapter->port[i].mac = mac = bi->gmac->create(adapter, i); - if (!mac) { - pr_err("%s: MAC %d initialization failed\n", - adapter->name, i); - goto error; - } - - /* - * Get the port's MAC addresses either from the EEPROM if one - * exists or the one hardcoded in the MAC. - */ - if (!t1_is_asic(adapter) || bi->chip_mac == CHBT_MAC_DUMMY) - mac->ops->macaddress_get(mac, hw_addr); - else if (vpd_macaddress_get(adapter, i, hw_addr)) { - pr_err("%s: could not read MAC address from VPD ROM\n", - adapter->port[i].dev->name); - goto error; - } - memcpy(adapter->port[i].dev->dev_addr, hw_addr, ETH_ALEN); - init_link_config(&adapter->port[i].link_config, bi); - } - - get_pci_mode(adapter, &adapter->params.pci); - t1_interrupts_clear(adapter); - return 0; - -error: - t1_free_sw_modules(adapter); - return -1; -} diff --git a/drivers/net/chelsio/suni1x10gexp_regs.h b/drivers/net/chelsio/suni1x10gexp_regs.h deleted file mode 100644 index d0f87d8..0000000 --- a/drivers/net/chelsio/suni1x10gexp_regs.h +++ /dev/null @@ -1,1643 +0,0 @@ -/***************************************************************************** - * * - * File: suni1x10gexp_regs.h * - * $Revision: 1.9 $ * - * $Date: 2005/06/22 00:17:04 $ * - * Description: * - * PMC/SIERRA (pm3393) MAC-PHY functionality. * - * part of the Chelsio 10Gb Ethernet Driver. * - * * - * This program is free software; you can redistribute it and/or modify * - * it under the terms of the GNU General Public License, version 2, as * - * published by the Free Software Foundation. * - * * - * You should have received a copy of the GNU General Public License along * - * with this program; if not, write to the Free Software Foundation, Inc., * - * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * - * * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * - * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * - * * - * http://www.chelsio.com * - * * - * Maintainers: maintainers@chelsio.com * - * * - * Authors: PMC/SIERRA * - * * - * History: * - * * - ****************************************************************************/ - -#ifndef _CXGB_SUNI1x10GEXP_REGS_H_ -#define _CXGB_SUNI1x10GEXP_REGS_H_ - -/* -** Space allocated for each Exact Match Filter -** There are 8 filter configurations -*/ -#define SUNI1x10GEXP_REG_SIZEOF_MAC_FILTER 0x0003 - -#define mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId) ( (filterId) * SUNI1x10GEXP_REG_SIZEOF_MAC_FILTER ) - -/* -** Space allocated for VLAN-Id Filter -** There are 8 filter configurations -*/ -#define SUNI1x10GEXP_REG_SIZEOF_MAC_VID_FILTER 0x0001 - -#define mSUNI1x10GEXP_MAC_VID_FILTER_OFFSET(filterId) ( (filterId) * SUNI1x10GEXP_REG_SIZEOF_MAC_VID_FILTER ) - -/* -** Space allocated for each MSTAT Counter -*/ -#define SUNI1x10GEXP_REG_SIZEOF_MSTAT_COUNT 0x0004 - -#define mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId) ( (countId) * SUNI1x10GEXP_REG_SIZEOF_MSTAT_COUNT ) - - -/******************************************************************************/ -/** S/UNI-1x10GE-XP REGISTER ADDRESS MAP **/ -/******************************************************************************/ -/* Refer to the Register Bit Masks bellow for the naming of each register and */ -/* to the S/UNI-1x10GE-XP Data Sheet for the signification of each bit */ -/******************************************************************************/ - - -#define SUNI1x10GEXP_REG_IDENTIFICATION 0x0000 -#define SUNI1x10GEXP_REG_PRODUCT_REVISION 0x0001 -#define SUNI1x10GEXP_REG_CONFIG_AND_RESET_CONTROL 0x0002 -#define SUNI1x10GEXP_REG_LOOPBACK_MISC_CTRL 0x0003 -#define SUNI1x10GEXP_REG_DEVICE_STATUS 0x0004 -#define SUNI1x10GEXP_REG_GLOBAL_PERFORMANCE_MONITOR_UPDATE 0x0005 - -#define SUNI1x10GEXP_REG_MDIO_COMMAND 0x0006 -#define SUNI1x10GEXP_REG_MDIO_INTERRUPT_ENABLE 0x0007 -#define SUNI1x10GEXP_REG_MDIO_INTERRUPT_STATUS 0x0008 -#define SUNI1x10GEXP_REG_MMD_PHY_ADDRESS 0x0009 -#define SUNI1x10GEXP_REG_MMD_CONTROL_ADDRESS_DATA 0x000A -#define SUNI1x10GEXP_REG_MDIO_READ_STATUS_DATA 0x000B - -#define SUNI1x10GEXP_REG_OAM_INTF_CTRL 0x000C -#define SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS 0x000D -#define SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE 0x000E -#define SUNI1x10GEXP_REG_FREE 0x000F - -#define SUNI1x10GEXP_REG_XTEF_MISC_CTRL 0x0010 -#define SUNI1x10GEXP_REG_XRF_MISC_CTRL 0x0011 - -#define SUNI1x10GEXP_REG_SERDES_3125_CONFIG_1 0x0100 -#define SUNI1x10GEXP_REG_SERDES_3125_CONFIG_2 0x0101 -#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE 0x0102 -#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_VISIBLE 0x0103 -#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS 0x0104 -#define SUNI1x10GEXP_REG_SERDES_3125_TEST_CONFIG 0x0107 - -#define SUNI1x10GEXP_REG_RXXG_CONFIG_1 0x2040 -#define SUNI1x10GEXP_REG_RXXG_CONFIG_2 0x2041 -#define SUNI1x10GEXP_REG_RXXG_CONFIG_3 0x2042 -#define SUNI1x10GEXP_REG_RXXG_INTERRUPT 0x2043 -#define SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH 0x2045 -#define SUNI1x10GEXP_REG_RXXG_SA_15_0 0x2046 -#define SUNI1x10GEXP_REG_RXXG_SA_31_16 0x2047 -#define SUNI1x10GEXP_REG_RXXG_SA_47_32 0x2048 -#define SUNI1x10GEXP_REG_RXXG_RECEIVE_FIFO_THRESHOLD 0x2049 -#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_LOW(filterId) (0x204A + mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId)) -#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_MID(filterId) (0x204B + mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId)) -#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_HIGH(filterId)(0x204C + mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId)) -#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID(filterId) (0x2062 + mSUNI1x10GEXP_MAC_VID_FILTER_OFFSET(filterId)) -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_0_LOW 0x204A -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_0_MID 0x204B -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_0_HIGH 0x204C -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW 0x204D -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID 0x204E -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH 0x204F -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_2_LOW 0x2050 -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_2_MID 0x2051 -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_2_HIGH 0x2052 -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_3_LOW 0x2053 -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_3_MID 0x2054 -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_3_HIGH 0x2055 -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_4_LOW 0x2056 -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_4_MID 0x2057 -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_4_HIGH 0x2058 -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_5_LOW 0x2059 -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_5_MID 0x205A -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_5_HIGH 0x205B -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_6_LOW 0x205C -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_6_MID 0x205D -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_6_HIGH 0x205E -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_7_LOW 0x205F -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_7_MID 0x2060 -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_7_HIGH 0x2061 -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_0 0x2062 -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_1 0x2063 -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_2 0x2064 -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_3 0x2065 -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_4 0x2066 -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_5 0x2067 -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_6 0x2068 -#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_7 0x2069 -#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW 0x206A -#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW 0x206B -#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH 0x206C -#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH 0x206D -#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0 0x206E -#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_1 0x206F -#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2 0x2070 - -#define SUNI1x10GEXP_REG_XRF_PATTERN_GEN_CTRL 0x2081 -#define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_0 0x2084 -#define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_1 0x2085 -#define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_2 0x2086 -#define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_3 0x2087 -#define SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE 0x2088 -#define SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS 0x2089 -#define SUNI1x10GEXP_REG_XRF_ERR_STATUS 0x208A -#define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE 0x208B -#define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS 0x208C -#define SUNI1x10GEXP_REG_XRF_CODE_ERR_THRES 0x2092 - -#define SUNI1x10GEXP_REG_RXOAM_CONFIG 0x20C0 -#define SUNI1x10GEXP_REG_RXOAM_FILTER_1_CONFIG 0x20C1 -#define SUNI1x10GEXP_REG_RXOAM_FILTER_2_CONFIG 0x20C2 -#define SUNI1x10GEXP_REG_RXOAM_CONFIG_2 0x20C3 -#define SUNI1x10GEXP_REG_RXOAM_HEC_CONFIG 0x20C4 -#define SUNI1x10GEXP_REG_RXOAM_HEC_ERR_THRES 0x20C5 -#define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE 0x20C7 -#define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS 0x20C8 -#define SUNI1x10GEXP_REG_RXOAM_STATUS 0x20C9 -#define SUNI1x10GEXP_REG_RXOAM_HEC_ERR_COUNT 0x20CA -#define SUNI1x10GEXP_REG_RXOAM_FIFO_OVERFLOW_COUNT 0x20CB -#define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_COUNT_LSB 0x20CC -#define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_COUNT_MSB 0x20CD -#define SUNI1x10GEXP_REG_RXOAM_FILTER_1_MISMATCH_COUNT_LSB 0x20CE -#define SUNI1x10GEXP_REG_RXOAM_FILTER_1_MISMATCH_COUNT_MSB 0x20CF -#define SUNI1x10GEXP_REG_RXOAM_FILTER_2_MISMATCH_COUNT_LSB 0x20D0 -#define SUNI1x10GEXP_REG_RXOAM_FILTER_2_MISMATCH_COUNT_MSB 0x20D1 -#define SUNI1x10GEXP_REG_RXOAM_OAM_EXTRACT_COUNT_LSB 0x20D2 -#define SUNI1x10GEXP_REG_RXOAM_OAM_EXTRACT_COUNT_MSB 0x20D3 -#define SUNI1x10GEXP_REG_RXOAM_MINI_PACKET_COUNT_LSB 0x20D4 -#define SUNI1x10GEXP_REG_RXOAM_MINI_PACKET_COUNT_MSB 0x20D5 -#define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_THRES_LSB 0x20D6 -#define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_THRES_MSB 0x20D7 - -#define SUNI1x10GEXP_REG_MSTAT_CONTROL 0x2100 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0 0x2101 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1 0x2102 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2 0x2103 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3 0x2104 -#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0 0x2105 -#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1 0x2106 -#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2 0x2107 -#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3 0x2108 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_ADDRESS 0x2109 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_DATA_LOW 0x210A -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_DATA_MIDDLE 0x210B -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_DATA_HIGH 0x210C -#define mSUNI1x10GEXP_REG_MSTAT_COUNTER_LOW(countId) (0x2110 + mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId)) -#define mSUNI1x10GEXP_REG_MSTAT_COUNTER_MID(countId) (0x2111 + mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId)) -#define mSUNI1x10GEXP_REG_MSTAT_COUNTER_HIGH(countId) (0x2112 + mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId)) -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW 0x2110 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_MID 0x2111 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_HIGH 0x2112 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_RESVD 0x2113 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW 0x2114 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_MID 0x2115 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_HIGH 0x2116 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_RESVD 0x2117 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_LOW 0x2118 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_MID 0x2119 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_HIGH 0x211A -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_RESVD 0x211B -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_LOW 0x211C -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_MID 0x211D -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_HIGH 0x211E -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_RESVD 0x211F -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW 0x2120 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_MID 0x2121 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_HIGH 0x2122 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_RESVD 0x2123 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW 0x2124 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_MID 0x2125 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_HIGH 0x2126 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_RESVD 0x2127 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW 0x2128 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_MID 0x2129 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_HIGH 0x212A -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_RESVD 0x212B -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_LOW 0x212C -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_MID 0x212D -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_HIGH 0x212E -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_RESVD 0x212F -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW 0x2130 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_MID 0x2131 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_HIGH 0x2132 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_RESVD 0x2133 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_LOW 0x2134 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_MID 0x2135 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_HIGH 0x2136 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_RESVD 0x2137 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW 0x2138 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_MID 0x2139 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_HIGH 0x213A -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_RESVD 0x213B -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW 0x213C -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_MID 0x213D -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_HIGH 0x213E -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_RESVD 0x213F -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW 0x2140 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_MID 0x2141 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_HIGH 0x2142 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_RESVD 0x2143 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW 0x2144 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_MID 0x2145 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_HIGH 0x2146 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_RESVD 0x2147 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_LOW 0x2148 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_MID 0x2149 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_HIGH 0x214A -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_RESVD 0x214B -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW 0x214C -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_MID 0x214D -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_HIGH 0x214E -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_RESVD 0x214F -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW 0x2150 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_MID 0x2151 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_HIGH 0x2152 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_RESVD 0x2153 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW 0x2154 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_MID 0x2155 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_HIGH 0x2156 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_RESVD 0x2157 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW 0x2158 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_MID 0x2159 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_HIGH 0x215A -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_RESVD 0x215B -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_LOW 0x215C -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_MID 0x215D -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_HIGH 0x215E -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_RESVD 0x215F -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_LOW 0x2160 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_MID 0x2161 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_HIGH 0x2162 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_RESVD 0x2163 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_LOW 0x2164 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_MID 0x2165 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_HIGH 0x2166 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_RESVD 0x2167 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_LOW 0x2168 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_MID 0x2169 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_HIGH 0x216A -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_RESVD 0x216B -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_LOW 0x216C -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_MID 0x216D -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_HIGH 0x216E -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_RESVD 0x216F -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_LOW 0x2170 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_MID 0x2171 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_HIGH 0x2172 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_RESVD 0x2173 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_LOW 0x2174 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_MID 0x2175 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_HIGH 0x2176 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_RESVD 0x2177 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_LOW 0x2178 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_MID 0x2179 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_HIGH 0x217a -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_RESVD 0x217b -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_LOW 0x217c -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_MID 0x217d -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_HIGH 0x217e -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_RESVD 0x217f -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_LOW 0x2180 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_MID 0x2181 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_HIGH 0x2182 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_RESVD 0x2183 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_LOW 0x2184 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_MID 0x2185 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_HIGH 0x2186 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_RESVD 0x2187 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_LOW 0x2188 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_MID 0x2189 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_HIGH 0x218A -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_RESVD 0x218B -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_LOW 0x218C -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_MID 0x218D -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_HIGH 0x218E -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_RESVD 0x218F -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_LOW 0x2190 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_MID 0x2191 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_HIGH 0x2192 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_RESVD 0x2193 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW 0x2194 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_MID 0x2195 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_HIGH 0x2196 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_RESVD 0x2197 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_LOW 0x2198 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_MID 0x2199 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_HIGH 0x219A -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_RESVD 0x219B -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW 0x219C -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_MID 0x219D -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_HIGH 0x219E -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_RESVD 0x219F -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW 0x21A0 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_MID 0x21A1 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_HIGH 0x21A2 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_RESVD 0x21A3 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_LOW 0x21A4 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_MID 0x21A5 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_HIGH 0x21A6 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_RESVD 0x21A7 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW 0x21A8 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_MID 0x21A9 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_HIGH 0x21AA -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_RESVD 0x21AB -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_LOW 0x21AC -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_MID 0x21AD -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_HIGH 0x21AE -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_RESVD 0x21AF -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW 0x21B0 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_MID 0x21B1 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_HIGH 0x21B2 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_RESVD 0x21B3 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_LOW 0x21B4 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_MID 0x21B5 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_HIGH 0x21B6 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_RESVD 0x21B7 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW 0x21B8 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_MID 0x21B9 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_HIGH 0x21BA -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_RESVD 0x21BB -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW 0x21BC -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_MID 0x21BD -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_HIGH 0x21BE -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_RESVD 0x21BF -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_LOW 0x21C0 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_MID 0x21C1 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_HIGH 0x21C2 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_RESVD 0x21C3 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_LOW 0x21C4 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_MID 0x21C5 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_HIGH 0x21C6 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_RESVD 0x21C7 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_LOW 0x21C8 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_MID 0x21C9 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_HIGH 0x21CA -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_RESVD 0x21CB -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_LOW 0x21CC -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_MID 0x21CD -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_HIGH 0x21CE -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_RESVD 0x21CF -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_LOW 0x21D0 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_MID 0x21D1 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_HIGH 0x21D2 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_RESVD 0x21D3 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_LOW 0x21D4 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_MID 0x21D5 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_HIGH 0x21D6 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_RESVD 0x21D7 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_LOW 0x21D8 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_MID 0x21D9 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_HIGH 0x21DA -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_RESVD 0x21DB -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_LOW 0x21DC -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_MID 0x21DD -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_HIGH 0x21DE -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_RESVD 0x21DF -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_LOW 0x21E0 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_MID 0x21E1 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_HIGH 0x21E2 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_RESVD 0x21E3 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_53_LOW 0x21E4 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_53_MID 0x21E5 -#define SUNI1x10GEXP_REG_MSTAT_COUNTER_53_HIGH 0x21E6 -#define SUNI1x10GEXP_CNTR_MAC_ETHERNET_NUM 51 - -#define SUNI1x10GEXP_REG_IFLX_GLOBAL_CONFIG 0x2200 -#define SUNI1x10GEXP_REG_IFLX_CHANNEL_PROVISION 0x2201 -#define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE 0x2209 -#define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT 0x220A -#define SUNI1x10GEXP_REG_IFLX_INDIR_CHANNEL_ADDRESS 0x220D -#define SUNI1x10GEXP_REG_IFLX_INDIR_LOGICAL_FIFO_LOW_LIMIT_PROVISION 0x220E -#define SUNI1x10GEXP_REG_IFLX_INDIR_LOGICAL_FIFO_HIGH_LIMIT 0x220F -#define SUNI1x10GEXP_REG_IFLX_INDIR_FULL_ALMOST_FULL_STATUS_LIMIT 0x2210 -#define SUNI1x10GEXP_REG_IFLX_INDIR_EMPTY_ALMOST_EMPTY_STATUS_LIMIT 0x2211 - -#define SUNI1x10GEXP_REG_PL4MOS_CONFIG 0x2240 -#define SUNI1x10GEXP_REG_PL4MOS_MASK 0x2241 -#define SUNI1x10GEXP_REG_PL4MOS_FAIRNESS_MASKING 0x2242 -#define SUNI1x10GEXP_REG_PL4MOS_MAXBURST1 0x2243 -#define SUNI1x10GEXP_REG_PL4MOS_MAXBURST2 0x2244 -#define SUNI1x10GEXP_REG_PL4MOS_TRANSFER_SIZE 0x2245 - -#define SUNI1x10GEXP_REG_PL4ODP_CONFIG 0x2280 -#define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK 0x2282 -#define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT 0x2283 -#define SUNI1x10GEXP_REG_PL4ODP_CONFIG_MAX_T 0x2284 - -#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS 0x2300 -#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE 0x2301 -#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK 0x2302 -#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_LIMITS 0x2303 -#define SUNI1x10GEXP_REG_PL4IO_CALENDAR_REPETITIONS 0x2304 -#define SUNI1x10GEXP_REG_PL4IO_CONFIG 0x2305 - -#define SUNI1x10GEXP_REG_TXXG_CONFIG_1 0x3040 -#define SUNI1x10GEXP_REG_TXXG_CONFIG_2 0x3041 -#define SUNI1x10GEXP_REG_TXXG_CONFIG_3 0x3042 -#define SUNI1x10GEXP_REG_TXXG_INTERRUPT 0x3043 -#define SUNI1x10GEXP_REG_TXXG_STATUS 0x3044 -#define SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE 0x3045 -#define SUNI1x10GEXP_REG_TXXG_MIN_FRAME_SIZE 0x3046 -#define SUNI1x10GEXP_REG_TXXG_SA_15_0 0x3047 -#define SUNI1x10GEXP_REG_TXXG_SA_31_16 0x3048 -#define SUNI1x10GEXP_REG_TXXG_SA_47_32 0x3049 -#define SUNI1x10GEXP_REG_TXXG_PAUSE_TIMER 0x304D -#define SUNI1x10GEXP_REG_TXXG_PAUSE_TIMER_INTERVAL 0x304E -#define SUNI1x10GEXP_REG_TXXG_FILTER_ERROR_COUNTER 0x3051 -#define SUNI1x10GEXP_REG_TXXG_PAUSE_QUANTUM_CONFIG 0x3052 - -#define SUNI1x10GEXP_REG_XTEF_CTRL 0x3080 -#define SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS 0x3084 -#define SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE 0x3085 -#define SUNI1x10GEXP_REG_XTEF_VISIBILITY 0x3086 - -#define SUNI1x10GEXP_REG_TXOAM_OAM_CONFIG 0x30C0 -#define SUNI1x10GEXP_REG_TXOAM_MINI_RATE_CONFIG 0x30C1 -#define SUNI1x10GEXP_REG_TXOAM_MINI_GAP_FIFO_CONFIG 0x30C2 -#define SUNI1x10GEXP_REG_TXOAM_P1P2_STATIC_VALUES 0x30C3 -#define SUNI1x10GEXP_REG_TXOAM_P3P4_STATIC_VALUES 0x30C4 -#define SUNI1x10GEXP_REG_TXOAM_P5P6_STATIC_VALUES 0x30C5 -#define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE 0x30C6 -#define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS 0x30C7 -#define SUNI1x10GEXP_REG_TXOAM_INSERT_COUNT_LSB 0x30C8 -#define SUNI1x10GEXP_REG_TXOAM_INSERT_COUNT_MSB 0x30C9 -#define SUNI1x10GEXP_REG_TXOAM_OAM_MINI_COUNT_LSB 0x30CA -#define SUNI1x10GEXP_REG_TXOAM_OAM_MINI_COUNT_MSB 0x30CB -#define SUNI1x10GEXP_REG_TXOAM_P1P2_MINI_MASK 0x30CC -#define SUNI1x10GEXP_REG_TXOAM_P3P4_MINI_MASK 0x30CD -#define SUNI1x10GEXP_REG_TXOAM_P5P6_MINI_MASK 0x30CE -#define SUNI1x10GEXP_REG_TXOAM_COSET 0x30CF -#define SUNI1x10GEXP_REG_TXOAM_EMPTY_FIFO_INS_OP_CNT_LSB 0x30D0 -#define SUNI1x10GEXP_REG_TXOAM_EMPTY_FIFO_INS_OP_CNT_MSB 0x30D1 -#define SUNI1x10GEXP_REG_TXOAM_STATIC_VALUE_MINI_COUNT_LSB 0x30D2 -#define SUNI1x10GEXP_REG_TXOAM_STATIC_VALUE_MINI_COUNT_MSB 0x30D3 - - -#define SUNI1x10GEXP_REG_EFLX_GLOBAL_CONFIG 0x3200 -#define SUNI1x10GEXP_REG_EFLX_ERCU_GLOBAL_STATUS 0x3201 -#define SUNI1x10GEXP_REG_EFLX_INDIR_CHANNEL_ADDRESS 0x3202 -#define SUNI1x10GEXP_REG_EFLX_INDIR_FIFO_LOW_LIMIT 0x3203 -#define SUNI1x10GEXP_REG_EFLX_INDIR_FIFO_HIGH_LIMIT 0x3204 -#define SUNI1x10GEXP_REG_EFLX_INDIR_FULL_ALMOST_FULL_STATUS_AND_LIMIT 0x3205 -#define SUNI1x10GEXP_REG_EFLX_INDIR_EMPTY_ALMOST_EMPTY_STATUS_AND_LIMIT 0x3206 -#define SUNI1x10GEXP_REG_EFLX_INDIR_FIFO_CUT_THROUGH_THRESHOLD 0x3207 -#define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE 0x320C -#define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION 0x320D -#define SUNI1x10GEXP_REG_EFLX_CHANNEL_PROVISION 0x3210 - -#define SUNI1x10GEXP_REG_PL4IDU_CONFIG 0x3280 -#define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK 0x3282 -#define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT 0x3283 - - -/*----------------------------------------*/ -#define SUNI1x10GEXP_REG_MAX_OFFSET 0x3480 - -/******************************************************************************/ -/* -- End register offset definitions -- */ -/******************************************************************************/ - -/******************************************************************************/ -/** SUNI-1x10GE-XP REGISTER BIT MASKS **/ -/******************************************************************************/ - -#define SUNI1x10GEXP_BITMSK_BITS_1 0x00001 -#define SUNI1x10GEXP_BITMSK_BITS_2 0x00003 -#define SUNI1x10GEXP_BITMSK_BITS_3 0x00007 -#define SUNI1x10GEXP_BITMSK_BITS_4 0x0000f -#define SUNI1x10GEXP_BITMSK_BITS_5 0x0001f -#define SUNI1x10GEXP_BITMSK_BITS_6 0x0003f -#define SUNI1x10GEXP_BITMSK_BITS_7 0x0007f -#define SUNI1x10GEXP_BITMSK_BITS_8 0x000ff -#define SUNI1x10GEXP_BITMSK_BITS_9 0x001ff -#define SUNI1x10GEXP_BITMSK_BITS_10 0x003ff -#define SUNI1x10GEXP_BITMSK_BITS_11 0x007ff -#define SUNI1x10GEXP_BITMSK_BITS_12 0x00fff -#define SUNI1x10GEXP_BITMSK_BITS_13 0x01fff -#define SUNI1x10GEXP_BITMSK_BITS_14 0x03fff -#define SUNI1x10GEXP_BITMSK_BITS_15 0x07fff -#define SUNI1x10GEXP_BITMSK_BITS_16 0x0ffff - -#define mSUNI1x10GEXP_CLR_MSBITS_1(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_15) -#define mSUNI1x10GEXP_CLR_MSBITS_2(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_14) -#define mSUNI1x10GEXP_CLR_MSBITS_3(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_13) -#define mSUNI1x10GEXP_CLR_MSBITS_4(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_12) -#define mSUNI1x10GEXP_CLR_MSBITS_5(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_11) -#define mSUNI1x10GEXP_CLR_MSBITS_6(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_10) -#define mSUNI1x10GEXP_CLR_MSBITS_7(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_9) -#define mSUNI1x10GEXP_CLR_MSBITS_8(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_8) -#define mSUNI1x10GEXP_CLR_MSBITS_9(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_7) -#define mSUNI1x10GEXP_CLR_MSBITS_10(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_6) -#define mSUNI1x10GEXP_CLR_MSBITS_11(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_5) -#define mSUNI1x10GEXP_CLR_MSBITS_12(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_4) -#define mSUNI1x10GEXP_CLR_MSBITS_13(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_3) -#define mSUNI1x10GEXP_CLR_MSBITS_14(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_2) -#define mSUNI1x10GEXP_CLR_MSBITS_15(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_1) - -#define mSUNI1x10GEXP_GET_BIT(val, bitMsk) (((val)&(bitMsk)) ? 1:0) - - - -/*---------------------------------------------------------------------------- - * Register 0x0001: S/UNI-1x10GE-XP Product Revision - * Bit 3-0 REVISION - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_REVISION 0x000F - -/*---------------------------------------------------------------------------- - * Register 0x0002: S/UNI-1x10GE-XP Configuration and Reset Control - * Bit 2 XAUI_ARESETB - * Bit 1 PL4_ARESETB - * Bit 0 DRESETB - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_XAUI_ARESET 0x0004 -#define SUNI1x10GEXP_BITMSK_PL4_ARESET 0x0002 -#define SUNI1x10GEXP_BITMSK_DRESETB 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x0003: S/UNI-1x10GE-XP Loop Back and Miscellaneous Control - * Bit 11 PL4IO_OUTCLKSEL - * Bit 9 SYSPCSLB - * Bit 8 LINEPCSLB - * Bit 7 MSTAT_BYPASS - * Bit 6 RXXG_BYPASS - * Bit 5 TXXG_BYPASS - * Bit 4 SOP_PAD_EN - * Bit 1 LOS_INV - * Bit 0 OVERRIDE_LOS - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_PL4IO_OUTCLKSEL 0x0800 -#define SUNI1x10GEXP_BITMSK_SYSPCSLB 0x0200 -#define SUNI1x10GEXP_BITMSK_LINEPCSLB 0x0100 -#define SUNI1x10GEXP_BITMSK_MSTAT_BYPASS 0x0080 -#define SUNI1x10GEXP_BITMSK_RXXG_BYPASS 0x0040 -#define SUNI1x10GEXP_BITMSK_TXXG_BYPASS 0x0020 -#define SUNI1x10GEXP_BITMSK_SOP_PAD_EN 0x0010 -#define SUNI1x10GEXP_BITMSK_LOS_INV 0x0002 -#define SUNI1x10GEXP_BITMSK_OVERRIDE_LOS 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x0004: S/UNI-1x10GE-XP Device Status - * Bit 9 TOP_SXRA_EXPIRED - * Bit 8 TOP_MDIO_BUSY - * Bit 7 TOP_DTRB - * Bit 6 TOP_EXPIRED - * Bit 5 TOP_PAUSED - * Bit 4 TOP_PL4_ID_DOOL - * Bit 3 TOP_PL4_IS_DOOL - * Bit 2 TOP_PL4_ID_ROOL - * Bit 1 TOP_PL4_IS_ROOL - * Bit 0 TOP_PL4_OUT_ROOL - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED 0x0200 -#define SUNI1x10GEXP_BITMSK_TOP_MDIO_BUSY 0x0100 -#define SUNI1x10GEXP_BITMSK_TOP_DTRB 0x0080 -#define SUNI1x10GEXP_BITMSK_TOP_EXPIRED 0x0040 -#define SUNI1x10GEXP_BITMSK_TOP_PAUSED 0x0020 -#define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL 0x0010 -#define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL 0x0008 -#define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL 0x0004 -#define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL 0x0002 -#define SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x0005: Global Performance Update and Clock Monitors - * Bit 15 TIP - * Bit 8 XAUI_REF_CLKA - * Bit 7 RXLANE3CLKA - * Bit 6 RXLANE2CLKA - * Bit 5 RXLANE1CLKA - * Bit 4 RXLANE0CLKA - * Bit 3 CSUCLKA - * Bit 2 TDCLKA - * Bit 1 RSCLKA - * Bit 0 RDCLKA - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_TIP 0x8000 -#define SUNI1x10GEXP_BITMSK_XAUI_REF_CLKA 0x0100 -#define SUNI1x10GEXP_BITMSK_RXLANE3CLKA 0x0080 -#define SUNI1x10GEXP_BITMSK_RXLANE2CLKA 0x0040 -#define SUNI1x10GEXP_BITMSK_RXLANE1CLKA 0x0020 -#define SUNI1x10GEXP_BITMSK_RXLANE0CLKA 0x0010 -#define SUNI1x10GEXP_BITMSK_CSUCLKA 0x0008 -#define SUNI1x10GEXP_BITMSK_TDCLKA 0x0004 -#define SUNI1x10GEXP_BITMSK_RSCLKA 0x0002 -#define SUNI1x10GEXP_BITMSK_RDCLKA 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x0006: MDIO Command - * Bit 4 MDIO_RDINC - * Bit 3 MDIO_RSTAT - * Bit 2 MDIO_LCTLD - * Bit 1 MDIO_LCTLA - * Bit 0 MDIO_SPRE - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_MDIO_RDINC 0x0010 -#define SUNI1x10GEXP_BITMSK_MDIO_RSTAT 0x0008 -#define SUNI1x10GEXP_BITMSK_MDIO_LCTLD 0x0004 -#define SUNI1x10GEXP_BITMSK_MDIO_LCTLA 0x0002 -#define SUNI1x10GEXP_BITMSK_MDIO_SPRE 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x0007: MDIO Interrupt Enable - * Bit 0 MDIO_BUSY_EN - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_MDIO_BUSY_EN 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x0008: MDIO Interrupt Status - * Bit 0 MDIO_BUSYI - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_MDIO_BUSYI 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x0009: MMD PHY Address - * Bit 12-8 MDIO_DEVADR - * Bit 4-0 MDIO_PRTADR - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_MDIO_DEVADR 0x1F00 -#define SUNI1x10GEXP_BITOFF_MDIO_DEVADR 8 -#define SUNI1x10GEXP_BITMSK_MDIO_PRTADR 0x001F -#define SUNI1x10GEXP_BITOFF_MDIO_PRTADR 0 - -/*---------------------------------------------------------------------------- - * Register 0x000C: OAM Interface Control - * Bit 6 MDO_OD_ENB - * Bit 5 MDI_INV - * Bit 4 MDI_SEL - * Bit 3 RXOAMEN - * Bit 2 RXOAMCLKEN - * Bit 1 TXOAMEN - * Bit 0 TXOAMCLKEN - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_MDO_OD_ENB 0x0040 -#define SUNI1x10GEXP_BITMSK_MDI_INV 0x0020 -#define SUNI1x10GEXP_BITMSK_MDI_SEL 0x0010 -#define SUNI1x10GEXP_BITMSK_RXOAMEN 0x0008 -#define SUNI1x10GEXP_BITMSK_RXOAMCLKEN 0x0004 -#define SUNI1x10GEXP_BITMSK_TXOAMEN 0x0002 -#define SUNI1x10GEXP_BITMSK_TXOAMCLKEN 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x000D: S/UNI-1x10GE-XP Master Interrupt Status - * Bit 15 TOP_PL4IO_INT - * Bit 14 TOP_IRAM_INT - * Bit 13 TOP_ERAM_INT - * Bit 12 TOP_XAUI_INT - * Bit 11 TOP_MSTAT_INT - * Bit 10 TOP_RXXG_INT - * Bit 9 TOP_TXXG_INT - * Bit 8 TOP_XRF_INT - * Bit 7 TOP_XTEF_INT - * Bit 6 TOP_MDIO_BUSY_INT - * Bit 5 TOP_RXOAM_INT - * Bit 4 TOP_TXOAM_INT - * Bit 3 TOP_IFLX_INT - * Bit 2 TOP_EFLX_INT - * Bit 1 TOP_PL4ODP_INT - * Bit 0 TOP_PL4IDU_INT - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_TOP_PL4IO_INT 0x8000 -#define SUNI1x10GEXP_BITMSK_TOP_IRAM_INT 0x4000 -#define SUNI1x10GEXP_BITMSK_TOP_ERAM_INT 0x2000 -#define SUNI1x10GEXP_BITMSK_TOP_XAUI_INT 0x1000 -#define SUNI1x10GEXP_BITMSK_TOP_MSTAT_INT 0x0800 -#define SUNI1x10GEXP_BITMSK_TOP_RXXG_INT 0x0400 -#define SUNI1x10GEXP_BITMSK_TOP_TXXG_INT 0x0200 -#define SUNI1x10GEXP_BITMSK_TOP_XRF_INT 0x0100 -#define SUNI1x10GEXP_BITMSK_TOP_XTEF_INT 0x0080 -#define SUNI1x10GEXP_BITMSK_TOP_MDIO_BUSY_INT 0x0040 -#define SUNI1x10GEXP_BITMSK_TOP_RXOAM_INT 0x0020 -#define SUNI1x10GEXP_BITMSK_TOP_TXOAM_INT 0x0010 -#define SUNI1x10GEXP_BITMSK_TOP_IFLX_INT 0x0008 -#define SUNI1x10GEXP_BITMSK_TOP_EFLX_INT 0x0004 -#define SUNI1x10GEXP_BITMSK_TOP_PL4ODP_INT 0x0002 -#define SUNI1x10GEXP_BITMSK_TOP_PL4IDU_INT 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x000E:PM3393 Global interrupt enable - * Bit 15 TOP_INTE - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_TOP_INTE 0x8000 - -/*---------------------------------------------------------------------------- - * Register 0x0010: XTEF Miscellaneous Control - * Bit 7 RF_VAL - * Bit 6 RF_OVERRIDE - * Bit 5 LF_VAL - * Bit 4 LF_OVERRIDE - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_RF_VAL 0x0080 -#define SUNI1x10GEXP_BITMSK_RF_OVERRIDE 0x0040 -#define SUNI1x10GEXP_BITMSK_LF_VAL 0x0020 -#define SUNI1x10GEXP_BITMSK_LF_OVERRIDE 0x0010 -#define SUNI1x10GEXP_BITMSK_LFRF_OVERRIDE_VAL 0x00F0 - -/*---------------------------------------------------------------------------- - * Register 0x0011: XRF Miscellaneous Control - * Bit 6-4 EN_IDLE_REP - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_EN_IDLE_REP 0x0070 - -/*---------------------------------------------------------------------------- - * Register 0x0100: SERDES 3125 Configuration Register 1 - * Bit 10 RXEQB_3 - * Bit 8 RXEQB_2 - * Bit 6 RXEQB_1 - * Bit 4 RXEQB_0 - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_RXEQB 0x0FF0 -#define SUNI1x10GEXP_BITOFF_RXEQB_3 10 -#define SUNI1x10GEXP_BITOFF_RXEQB_2 8 -#define SUNI1x10GEXP_BITOFF_RXEQB_1 6 -#define SUNI1x10GEXP_BITOFF_RXEQB_0 4 - -/*---------------------------------------------------------------------------- - * Register 0x0101: SERDES 3125 Configuration Register 2 - * Bit 12 YSEL - * Bit 7 PRE_EMPH_3 - * Bit 6 PRE_EMPH_2 - * Bit 5 PRE_EMPH_1 - * Bit 4 PRE_EMPH_0 - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_YSEL 0x1000 -#define SUNI1x10GEXP_BITMSK_PRE_EMPH 0x00F0 -#define SUNI1x10GEXP_BITMSK_PRE_EMPH_3 0x0080 -#define SUNI1x10GEXP_BITMSK_PRE_EMPH_2 0x0040 -#define SUNI1x10GEXP_BITMSK_PRE_EMPH_1 0x0020 -#define SUNI1x10GEXP_BITMSK_PRE_EMPH_0 0x0010 - -/*---------------------------------------------------------------------------- - * Register 0x0102: SERDES 3125 Interrupt Enable Register - * Bit 3 LASIE - * Bit 2 SPLL_RAE - * Bit 1 MPLL_RAE - * Bit 0 PLL_LOCKE - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_LASIE 0x0008 -#define SUNI1x10GEXP_BITMSK_SPLL_RAE 0x0004 -#define SUNI1x10GEXP_BITMSK_MPLL_RAE 0x0002 -#define SUNI1x10GEXP_BITMSK_PLL_LOCKE 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x0103: SERDES 3125 Interrupt Visibility Register - * Bit 3 LASIV - * Bit 2 SPLL_RAV - * Bit 1 MPLL_RAV - * Bit 0 PLL_LOCKV - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_LASIV 0x0008 -#define SUNI1x10GEXP_BITMSK_SPLL_RAV 0x0004 -#define SUNI1x10GEXP_BITMSK_MPLL_RAV 0x0002 -#define SUNI1x10GEXP_BITMSK_PLL_LOCKV 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x0104: SERDES 3125 Interrupt Status Register - * Bit 3 LASII - * Bit 2 SPLL_RAI - * Bit 1 MPLL_RAI - * Bit 0 PLL_LOCKI - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_LASII 0x0008 -#define SUNI1x10GEXP_BITMSK_SPLL_RAI 0x0004 -#define SUNI1x10GEXP_BITMSK_MPLL_RAI 0x0002 -#define SUNI1x10GEXP_BITMSK_PLL_LOCKI 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x0107: SERDES 3125 Test Configuration - * Bit 12 DUALTX - * Bit 10 HC_1 - * Bit 9 HC_0 - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_DUALTX 0x1000 -#define SUNI1x10GEXP_BITMSK_HC 0x0600 -#define SUNI1x10GEXP_BITOFF_HC_0 9 - -/*---------------------------------------------------------------------------- - * Register 0x2040: RXXG Configuration 1 - * Bit 15 RXXG_RXEN - * Bit 14 RXXG_ROCF - * Bit 13 RXXG_PAD_STRIP - * Bit 10 RXXG_PUREP - * Bit 9 RXXG_LONGP - * Bit 8 RXXG_PARF - * Bit 7 RXXG_FLCHK - * Bit 5 RXXG_PASS_CTRL - * Bit 3 RXXG_CRC_STRIP - * Bit 2-0 RXXG_MIFG - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_RXXG_RXEN 0x8000 -#define SUNI1x10GEXP_BITMSK_RXXG_ROCF 0x4000 -#define SUNI1x10GEXP_BITMSK_RXXG_PAD_STRIP 0x2000 -#define SUNI1x10GEXP_BITMSK_RXXG_PUREP 0x0400 -#define SUNI1x10GEXP_BITMSK_RXXG_LONGP 0x0200 -#define SUNI1x10GEXP_BITMSK_RXXG_PARF 0x0100 -#define SUNI1x10GEXP_BITMSK_RXXG_FLCHK 0x0080 -#define SUNI1x10GEXP_BITMSK_RXXG_PASS_CTRL 0x0020 -#define SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP 0x0008 - -/*---------------------------------------------------------------------------- - * Register 0x02041: RXXG Configuration 2 - * Bit 7-0 RXXG_HDRSIZE - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_RXXG_HDRSIZE 0x00FF - -/*---------------------------------------------------------------------------- - * Register 0x2042: RXXG Configuration 3 - * Bit 15 RXXG_MIN_LERRE - * Bit 14 RXXG_MAX_LERRE - * Bit 12 RXXG_LINE_ERRE - * Bit 10 RXXG_RX_OVRE - * Bit 9 RXXG_ADR_FILTERE - * Bit 8 RXXG_ERR_FILTERE - * Bit 5 RXXG_PRMB_ERRE - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_RXXG_MIN_LERRE 0x8000 -#define SUNI1x10GEXP_BITMSK_RXXG_MAX_LERRE 0x4000 -#define SUNI1x10GEXP_BITMSK_RXXG_LINE_ERRE 0x1000 -#define SUNI1x10GEXP_BITMSK_RXXG_RX_OVRE 0x0400 -#define SUNI1x10GEXP_BITMSK_RXXG_ADR_FILTERE 0x0200 -#define SUNI1x10GEXP_BITMSK_RXXG_ERR_FILTERRE 0x0100 -#define SUNI1x10GEXP_BITMSK_RXXG_PRMB_ERRE 0x0020 - -/*---------------------------------------------------------------------------- - * Register 0x2043: RXXG Interrupt - * Bit 15 RXXG_MIN_LERRI - * Bit 14 RXXG_MAX_LERRI - * Bit 12 RXXG_LINE_ERRI - * Bit 10 RXXG_RX_OVRI - * Bit 9 RXXG_ADR_FILTERI - * Bit 8 RXXG_ERR_FILTERI - * Bit 5 RXXG_PRMB_ERRE - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_RXXG_MIN_LERRI 0x8000 -#define SUNI1x10GEXP_BITMSK_RXXG_MAX_LERRI 0x4000 -#define SUNI1x10GEXP_BITMSK_RXXG_LINE_ERRI 0x1000 -#define SUNI1x10GEXP_BITMSK_RXXG_RX_OVRI 0x0400 -#define SUNI1x10GEXP_BITMSK_RXXG_ADR_FILTERI 0x0200 -#define SUNI1x10GEXP_BITMSK_RXXG_ERR_FILTERI 0x0100 -#define SUNI1x10GEXP_BITMSK_RXXG_PRMB_ERRE 0x0020 - -/*---------------------------------------------------------------------------- - * Register 0x2049: RXXG Receive FIFO Threshold - * Bit 2-0 RXXG_CUT_THRU - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_RXXG_CUT_THRU 0x0007 -#define SUNI1x10GEXP_BITOFF_RXXG_CUT_THRU 0 - -/*---------------------------------------------------------------------------- - * Register 0x2062H - 0x2069: RXXG Exact Match VID - * Bit 11-0 RXXG_VID_MATCH - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_RXXG_VID_MATCH 0x0FFF -#define SUNI1x10GEXP_BITOFF_RXXG_VID_MATCH 0 - -/*---------------------------------------------------------------------------- - * Register 0x206EH - 0x206F: RXXG Address Filter Control - * Bit 3 RXXG_FORWARD_ENABLE - * Bit 2 RXXG_VLAN_ENABLE - * Bit 1 RXXG_SRC_ADDR - * Bit 0 RXXG_MATCH_ENABLE - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_RXXG_FORWARD_ENABLE 0x0008 -#define SUNI1x10GEXP_BITMSK_RXXG_VLAN_ENABLE 0x0004 -#define SUNI1x10GEXP_BITMSK_RXXG_SRC_ADDR 0x0002 -#define SUNI1x10GEXP_BITMSK_RXXG_MATCH_ENABLE 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x2070: RXXG Address Filter Control 2 - * Bit 1 RXXG_PMODE - * Bit 0 RXXG_MHASH_EN - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_RXXG_PMODE 0x0002 -#define SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x2081: XRF Control Register 2 - * Bit 6 EN_PKT_GEN - * Bit 4-2 PATT - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_EN_PKT_GEN 0x0040 -#define SUNI1x10GEXP_BITMSK_PATT 0x001C -#define SUNI1x10GEXP_BITOFF_PATT 2 - -/*---------------------------------------------------------------------------- - * Register 0x2088: XRF Interrupt Enable - * Bit 12-9 LANE_HICERE - * Bit 8-5 HS_SD_LANEE - * Bit 4 ALIGN_STATUS_ERRE - * Bit 3-0 LANE_SYNC_STAT_ERRE - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_LANE_HICERE 0x1E00 -#define SUNI1x10GEXP_BITOFF_LANE_HICERE 9 -#define SUNI1x10GEXP_BITMSK_HS_SD_LANEE 0x01E0 -#define SUNI1x10GEXP_BITOFF_HS_SD_LANEE 5 -#define SUNI1x10GEXP_BITMSK_ALIGN_STATUS_ERRE 0x0010 -#define SUNI1x10GEXP_BITMSK_LANE_SYNC_STAT_ERRE 0x000F -#define SUNI1x10GEXP_BITOFF_LANE_SYNC_STAT_ERRE 0 - -/*---------------------------------------------------------------------------- - * Register 0x2089: XRF Interrupt Status - * Bit 12-9 LANE_HICERI - * Bit 8-5 HS_SD_LANEI - * Bit 4 ALIGN_STATUS_ERRI - * Bit 3-0 LANE_SYNC_STAT_ERRI - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_LANE_HICERI 0x1E00 -#define SUNI1x10GEXP_BITOFF_LANE_HICERI 9 -#define SUNI1x10GEXP_BITMSK_HS_SD_LANEI 0x01E0 -#define SUNI1x10GEXP_BITOFF_HS_SD_LANEI 5 -#define SUNI1x10GEXP_BITMSK_ALIGN_STATUS_ERRI 0x0010 -#define SUNI1x10GEXP_BITMSK_LANE_SYNC_STAT_ERRI 0x000F -#define SUNI1x10GEXP_BITOFF_LANE_SYNC_STAT_ERRI 0 - -/*---------------------------------------------------------------------------- - * Register 0x208A: XRF Error Status - * Bit 8-5 HS_SD_LANE - * Bit 4 ALIGN_STATUS_ERR - * Bit 3-0 LANE_SYNC_STAT_ERR - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_HS_SD_LANE3 0x0100 -#define SUNI1x10GEXP_BITMSK_HS_SD_LANE2 0x0080 -#define SUNI1x10GEXP_BITMSK_HS_SD_LANE1 0x0040 -#define SUNI1x10GEXP_BITMSK_HS_SD_LANE0 0x0020 -#define SUNI1x10GEXP_BITMSK_ALIGN_STATUS_ERR 0x0010 -#define SUNI1x10GEXP_BITMSK_LANE3_SYNC_STAT_ERR 0x0008 -#define SUNI1x10GEXP_BITMSK_LANE2_SYNC_STAT_ERR 0x0004 -#define SUNI1x10GEXP_BITMSK_LANE1_SYNC_STAT_ERR 0x0002 -#define SUNI1x10GEXP_BITMSK_LANE0_SYNC_STAT_ERR 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x208B: XRF Diagnostic Interrupt Enable - * Bit 7-4 LANE_OVERRUNE - * Bit 3-0 LANE_UNDERRUNE - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_LANE_OVERRUNE 0x00F0 -#define SUNI1x10GEXP_BITOFF_LANE_OVERRUNE 4 -#define SUNI1x10GEXP_BITMSK_LANE_UNDERRUNE 0x000F -#define SUNI1x10GEXP_BITOFF_LANE_UNDERRUNE 0 - -/*---------------------------------------------------------------------------- - * Register 0x208C: XRF Diagnostic Interrupt Status - * Bit 7-4 LANE_OVERRUNI - * Bit 3-0 LANE_UNDERRUNI - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_LANE_OVERRUNI 0x00F0 -#define SUNI1x10GEXP_BITOFF_LANE_OVERRUNI 4 -#define SUNI1x10GEXP_BITMSK_LANE_UNDERRUNI 0x000F -#define SUNI1x10GEXP_BITOFF_LANE_UNDERRUNI 0 - -/*---------------------------------------------------------------------------- - * Register 0x20C0: RXOAM Configuration - * Bit 15 RXOAM_BUSY - * Bit 14-12 RXOAM_F2_SEL - * Bit 10-8 RXOAM_F1_SEL - * Bit 7-6 RXOAM_FILTER_CTRL - * Bit 5-0 RXOAM_PX_EN - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_RXOAM_BUSY 0x8000 -#define SUNI1x10GEXP_BITMSK_RXOAM_F2_SEL 0x7000 -#define SUNI1x10GEXP_BITOFF_RXOAM_F2_SEL 12 -#define SUNI1x10GEXP_BITMSK_RXOAM_F1_SEL 0x0700 -#define SUNI1x10GEXP_BITOFF_RXOAM_F1_SEL 8 -#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_CTRL 0x00C0 -#define SUNI1x10GEXP_BITOFF_RXOAM_FILTER_CTRL 6 -#define SUNI1x10GEXP_BITMSK_RXOAM_PX_EN 0x003F -#define SUNI1x10GEXP_BITOFF_RXOAM_PX_EN 0 - -/*---------------------------------------------------------------------------- - * Register 0x20C1,0x20C2: RXOAM Filter Configuration - * Bit 15-8 RXOAM_FX_MASK - * Bit 7-0 RXOAM_FX_VAL - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_RXOAM_FX_MASK 0xFF00 -#define SUNI1x10GEXP_BITOFF_RXOAM_FX_MASK 8 -#define SUNI1x10GEXP_BITMSK_RXOAM_FX_VAL 0x00FF -#define SUNI1x10GEXP_BITOFF_RXOAM_FX_VAl 0 - -/*---------------------------------------------------------------------------- - * Register 0x20C3: RXOAM Configuration Register 2 - * Bit 13 RXOAM_REC_BYTE_VAL - * Bit 11-10 RXOAM_BYPASS_MODE - * Bit 5-0 RXOAM_PX_CLEAR - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_RXOAM_REC_BYTE_VAL 0x2000 -#define SUNI1x10GEXP_BITMSK_RXOAM_BYPASS_MODE 0x0C00 -#define SUNI1x10GEXP_BITOFF_RXOAM_BYPASS_MODE 10 -#define SUNI1x10GEXP_BITMSK_RXOAM_PX_CLEAR 0x003F -#define SUNI1x10GEXP_BITOFF_RXOAM_PX_CLEAR 0 - -/*---------------------------------------------------------------------------- - * Register 0x20C4: RXOAM HEC Configuration - * Bit 15-8 RXOAM_COSET - * Bit 2 RXOAM_HEC_ERR_PKT - * Bit 0 RXOAM_HEC_EN - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_RXOAM_COSET 0xFF00 -#define SUNI1x10GEXP_BITOFF_RXOAM_COSET 8 -#define SUNI1x10GEXP_BITMSK_RXOAM_HEC_ERR_PKT 0x0004 -#define SUNI1x10GEXP_BITMSK_RXOAM_HEC_EN 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x20C7: RXOAM Interrupt Enable - * Bit 10 RXOAM_FILTER_THRSHE - * Bit 9 RXOAM_OAM_ERRE - * Bit 8 RXOAM_HECE_THRSHE - * Bit 7 RXOAM_SOPE - * Bit 6 RXOAM_RFE - * Bit 5 RXOAM_LFE - * Bit 4 RXOAM_DV_ERRE - * Bit 3 RXOAM_DATA_INVALIDE - * Bit 2 RXOAM_FILTER_DROPE - * Bit 1 RXOAM_HECE - * Bit 0 RXOAM_OFLE - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_THRSHE 0x0400 -#define SUNI1x10GEXP_BITMSK_RXOAM_OAM_ERRE 0x0200 -#define SUNI1x10GEXP_BITMSK_RXOAM_HECE_THRSHE 0x0100 -#define SUNI1x10GEXP_BITMSK_RXOAM_SOPE 0x0080 -#define SUNI1x10GEXP_BITMSK_RXOAM_RFE 0x0040 -#define SUNI1x10GEXP_BITMSK_RXOAM_LFE 0x0020 -#define SUNI1x10GEXP_BITMSK_RXOAM_DV_ERRE 0x0010 -#define SUNI1x10GEXP_BITMSK_RXOAM_DATA_INVALIDE 0x0008 -#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_DROPE 0x0004 -#define SUNI1x10GEXP_BITMSK_RXOAM_HECE 0x0002 -#define SUNI1x10GEXP_BITMSK_RXOAM_OFLE 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x20C8: RXOAM Interrupt Status - * Bit 10 RXOAM_FILTER_THRSHI - * Bit 9 RXOAM_OAM_ERRI - * Bit 8 RXOAM_HECE_THRSHI - * Bit 7 RXOAM_SOPI - * Bit 6 RXOAM_RFI - * Bit 5 RXOAM_LFI - * Bit 4 RXOAM_DV_ERRI - * Bit 3 RXOAM_DATA_INVALIDI - * Bit 2 RXOAM_FILTER_DROPI - * Bit 1 RXOAM_HECI - * Bit 0 RXOAM_OFLI - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_THRSHI 0x0400 -#define SUNI1x10GEXP_BITMSK_RXOAM_OAM_ERRI 0x0200 -#define SUNI1x10GEXP_BITMSK_RXOAM_HECE_THRSHI 0x0100 -#define SUNI1x10GEXP_BITMSK_RXOAM_SOPI 0x0080 -#define SUNI1x10GEXP_BITMSK_RXOAM_RFI 0x0040 -#define SUNI1x10GEXP_BITMSK_RXOAM_LFI 0x0020 -#define SUNI1x10GEXP_BITMSK_RXOAM_DV_ERRI 0x0010 -#define SUNI1x10GEXP_BITMSK_RXOAM_DATA_INVALIDI 0x0008 -#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_DROPI 0x0004 -#define SUNI1x10GEXP_BITMSK_RXOAM_HECI 0x0002 -#define SUNI1x10GEXP_BITMSK_RXOAM_OFLI 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x20C9: RXOAM Status - * Bit 10 RXOAM_FILTER_THRSHV - * Bit 8 RXOAM_HECE_THRSHV - * Bit 6 RXOAM_RFV - * Bit 5 RXOAM_LFV - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_THRSHV 0x0400 -#define SUNI1x10GEXP_BITMSK_RXOAM_HECE_THRSHV 0x0100 -#define SUNI1x10GEXP_BITMSK_RXOAM_RFV 0x0040 -#define SUNI1x10GEXP_BITMSK_RXOAM_LFV 0x0020 - -/*---------------------------------------------------------------------------- - * Register 0x2100: MSTAT Control - * Bit 2 MSTAT_WRITE - * Bit 1 MSTAT_CLEAR - * Bit 0 MSTAT_SNAP - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_MSTAT_WRITE 0x0004 -#define SUNI1x10GEXP_BITMSK_MSTAT_CLEAR 0x0002 -#define SUNI1x10GEXP_BITMSK_MSTAT_SNAP 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x2109: MSTAT Counter Write Address - * Bit 5-0 MSTAT_WRITE_ADDRESS - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_MSTAT_WRITE_ADDRESS 0x003F -#define SUNI1x10GEXP_BITOFF_MSTAT_WRITE_ADDRESS 0 - -/*---------------------------------------------------------------------------- - * Register 0x2200: IFLX Global Configuration Register - * Bit 15 IFLX_IRCU_ENABLE - * Bit 14 IFLX_IDSWT_ENABLE - * Bit 13-0 IFLX_IFD_CNT - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_IFLX_IRCU_ENABLE 0x8000 -#define SUNI1x10GEXP_BITMSK_IFLX_IDSWT_ENABLE 0x4000 -#define SUNI1x10GEXP_BITMSK_IFLX_IFD_CNT 0x3FFF -#define SUNI1x10GEXP_BITOFF_IFLX_IFD_CNT 0 - -/*---------------------------------------------------------------------------- - * Register 0x2209: IFLX FIFO Overflow Enable - * Bit 0 IFLX_OVFE - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_IFLX_OVFE 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x220A: IFLX FIFO Overflow Interrupt - * Bit 0 IFLX_OVFI - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_IFLX_OVFI 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x220D: IFLX Indirect Channel Address - * Bit 15 IFLX_BUSY - * Bit 14 IFLX_RWB - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_IFLX_BUSY 0x8000 -#define SUNI1x10GEXP_BITMSK_IFLX_RWB 0x4000 - -/*---------------------------------------------------------------------------- - * Register 0x220E: IFLX Indirect Logical FIFO Low Limit & Provision - * Bit 9-0 IFLX_LOLIM - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_IFLX_LOLIM 0x03FF -#define SUNI1x10GEXP_BITOFF_IFLX_LOLIM 0 - -/*---------------------------------------------------------------------------- - * Register 0x220F: IFLX Indirect Logical FIFO High Limit - * Bit 9-0 IFLX_HILIM - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_IFLX_HILIM 0x03FF -#define SUNI1x10GEXP_BITOFF_IFLX_HILIM 0 - -/*---------------------------------------------------------------------------- - * Register 0x2210: IFLX Indirect Full/Almost Full Status & Limit - * Bit 15 IFLX_FULL - * Bit 14 IFLX_AFULL - * Bit 13-0 IFLX_AFTH - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_IFLX_FULL 0x8000 -#define SUNI1x10GEXP_BITMSK_IFLX_AFULL 0x4000 -#define SUNI1x10GEXP_BITMSK_IFLX_AFTH 0x3FFF -#define SUNI1x10GEXP_BITOFF_IFLX_AFTH 0 - -/*---------------------------------------------------------------------------- - * Register 0x2211: IFLX Indirect Empty/Almost Empty Status & Limit - * Bit 15 IFLX_EMPTY - * Bit 14 IFLX_AEMPTY - * Bit 13-0 IFLX_AETH - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_IFLX_EMPTY 0x8000 -#define SUNI1x10GEXP_BITMSK_IFLX_AEMPTY 0x4000 -#define SUNI1x10GEXP_BITMSK_IFLX_AETH 0x3FFF -#define SUNI1x10GEXP_BITOFF_IFLX_AETH 0 - -/*---------------------------------------------------------------------------- - * Register 0x2240: PL4MOS Configuration Register - * Bit 3 PL4MOS_RE_INIT - * Bit 2 PL4MOS_EN - * Bit 1 PL4MOS_NO_STATUS - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_PL4MOS_RE_INIT 0x0008 -#define SUNI1x10GEXP_BITMSK_PL4MOS_EN 0x0004 -#define SUNI1x10GEXP_BITMSK_PL4MOS_NO_STATUS 0x0002 - -/*---------------------------------------------------------------------------- - * Register 0x2243: PL4MOS MaxBurst1 Register - * Bit 11-0 PL4MOS_MAX_BURST1 - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_PL4MOS_MAX_BURST1 0x0FFF -#define SUNI1x10GEXP_BITOFF_PL4MOS_MAX_BURST1 0 - -/*---------------------------------------------------------------------------- - * Register 0x2244: PL4MOS MaxBurst2 Register - * Bit 11-0 PL4MOS_MAX_BURST2 - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_PL4MOS_MAX_BURST2 0x0FFF -#define SUNI1x10GEXP_BITOFF_PL4MOS_MAX_BURST2 0 - -/*---------------------------------------------------------------------------- - * Register 0x2245: PL4MOS Transfer Size Register - * Bit 7-0 PL4MOS_MAX_TRANSFER - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_PL4MOS_MAX_TRANSFER 0x00FF -#define SUNI1x10GEXP_BITOFF_PL4MOS_MAX_TRANSFER 0 - -/*---------------------------------------------------------------------------- - * Register 0x2280: PL4ODP Configuration - * Bit 15-12 PL4ODP_REPEAT_T - * Bit 8 PL4ODP_SOP_RULE - * Bit 1 PL4ODP_EN_PORTS - * Bit 0 PL4ODP_EN_DFWD - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_PL4ODP_REPEAT_T 0xF000 -#define SUNI1x10GEXP_BITOFF_PL4ODP_REPEAT_T 12 -#define SUNI1x10GEXP_BITMSK_PL4ODP_SOP_RULE 0x0100 -#define SUNI1x10GEXP_BITMSK_PL4ODP_EN_PORTS 0x0002 -#define SUNI1x10GEXP_BITMSK_PL4ODP_EN_DFWD 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x2282: PL4ODP Interrupt Mask - * Bit 0 PL4ODP_OUT_DISE - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_PL4ODP_OUT_DISE 0x0001 - - - -#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_EOPEOBE 0x0080 -#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_ERREOPE 0x0040 -#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MEOPE 0x0008 -#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MSOPE 0x0004 -#define SUNI1x10GEXP_BITMSK_PL4ODP_ES_OVRE 0x0002 - - -/*---------------------------------------------------------------------------- - * Register 0x2283: PL4ODP Interrupt - * Bit 0 PL4ODP_OUT_DISI - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_PL4ODP_OUT_DISI 0x0001 - - - -#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_EOPEOBI 0x0080 -#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_ERREOPI 0x0040 -#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MEOPI 0x0008 -#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MSOPI 0x0004 -#define SUNI1x10GEXP_BITMSK_PL4ODP_ES_OVRI 0x0002 - -/*---------------------------------------------------------------------------- - * Register 0x2300: PL4IO Lock Detect Status - * Bit 15 PL4IO_OUT_ROOLV - * Bit 12 PL4IO_IS_ROOLV - * Bit 11 PL4IO_DIP2_ERRV - * Bit 8 PL4IO_ID_ROOLV - * Bit 4 PL4IO_IS_DOOLV - * Bit 0 PL4IO_ID_DOOLV - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_PL4IO_OUT_ROOLV 0x8000 -#define SUNI1x10GEXP_BITMSK_PL4IO_IS_ROOLV 0x1000 -#define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERRV 0x0800 -#define SUNI1x10GEXP_BITMSK_PL4IO_ID_ROOLV 0x0100 -#define SUNI1x10GEXP_BITMSK_PL4IO_IS_DOOLV 0x0010 -#define SUNI1x10GEXP_BITMSK_PL4IO_ID_DOOLV 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x2301: PL4IO Lock Detect Change - * Bit 15 PL4IO_OUT_ROOLI - * Bit 12 PL4IO_IS_ROOLI - * Bit 11 PL4IO_DIP2_ERRI - * Bit 8 PL4IO_ID_ROOLI - * Bit 4 PL4IO_IS_DOOLI - * Bit 0 PL4IO_ID_DOOLI - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_PL4IO_OUT_ROOLI 0x8000 -#define SUNI1x10GEXP_BITMSK_PL4IO_IS_ROOLI 0x1000 -#define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERRI 0x0800 -#define SUNI1x10GEXP_BITMSK_PL4IO_ID_ROOLI 0x0100 -#define SUNI1x10GEXP_BITMSK_PL4IO_IS_DOOLI 0x0010 -#define SUNI1x10GEXP_BITMSK_PL4IO_ID_DOOLI 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x2302: PL4IO Lock Detect Mask - * Bit 15 PL4IO_OUT_ROOLE - * Bit 12 PL4IO_IS_ROOLE - * Bit 11 PL4IO_DIP2_ERRE - * Bit 8 PL4IO_ID_ROOLE - * Bit 4 PL4IO_IS_DOOLE - * Bit 0 PL4IO_ID_DOOLE - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_PL4IO_OUT_ROOLE 0x8000 -#define SUNI1x10GEXP_BITMSK_PL4IO_IS_ROOLE 0x1000 -#define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERRE 0x0800 -#define SUNI1x10GEXP_BITMSK_PL4IO_ID_ROOLE 0x0100 -#define SUNI1x10GEXP_BITMSK_PL4IO_IS_DOOLE 0x0010 -#define SUNI1x10GEXP_BITMSK_PL4IO_ID_DOOLE 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x2303: PL4IO Lock Detect Limits - * Bit 15-8 PL4IO_REF_LIMIT - * Bit 7-0 PL4IO_TRAN_LIMIT - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_PL4IO_REF_LIMIT 0xFF00 -#define SUNI1x10GEXP_BITOFF_PL4IO_REF_LIMIT 8 -#define SUNI1x10GEXP_BITMSK_PL4IO_TRAN_LIMIT 0x00FF -#define SUNI1x10GEXP_BITOFF_PL4IO_TRAN_LIMIT 0 - -/*---------------------------------------------------------------------------- - * Register 0x2304: PL4IO Calendar Repetitions - * Bit 15-8 PL4IO_IN_MUL - * Bit 7-0 PL4IO_OUT_MUL - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_PL4IO_IN_MUL 0xFF00 -#define SUNI1x10GEXP_BITOFF_PL4IO_IN_MUL 8 -#define SUNI1x10GEXP_BITMSK_PL4IO_OUT_MUL 0x00FF -#define SUNI1x10GEXP_BITOFF_PL4IO_OUT_MUL 0 - -/*---------------------------------------------------------------------------- - * Register 0x2305: PL4IO Configuration - * Bit 15 PL4IO_DIP2_ERR_CHK - * Bit 11 PL4IO_ODAT_DIS - * Bit 10 PL4IO_TRAIN_DIS - * Bit 9 PL4IO_OSTAT_DIS - * Bit 8 PL4IO_ISTAT_DIS - * Bit 7 PL4IO_NO_ISTAT - * Bit 6 PL4IO_STAT_OUTSEL - * Bit 5 PL4IO_INSEL - * Bit 4 PL4IO_DLSEL - * Bit 1-0 PL4IO_OUTSEL - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERR_CHK 0x8000 -#define SUNI1x10GEXP_BITMSK_PL4IO_ODAT_DIS 0x0800 -#define SUNI1x10GEXP_BITMSK_PL4IO_TRAIN_DIS 0x0400 -#define SUNI1x10GEXP_BITMSK_PL4IO_OSTAT_DIS 0x0200 -#define SUNI1x10GEXP_BITMSK_PL4IO_ISTAT_DIS 0x0100 -#define SUNI1x10GEXP_BITMSK_PL4IO_NO_ISTAT 0x0080 -#define SUNI1x10GEXP_BITMSK_PL4IO_STAT_OUTSEL 0x0040 -#define SUNI1x10GEXP_BITMSK_PL4IO_INSEL 0x0020 -#define SUNI1x10GEXP_BITMSK_PL4IO_DLSEL 0x0010 -#define SUNI1x10GEXP_BITMSK_PL4IO_OUTSEL 0x0003 -#define SUNI1x10GEXP_BITOFF_PL4IO_OUTSEL 0 - -/*---------------------------------------------------------------------------- - * Register 0x3040: TXXG Configuration Register 1 - * Bit 15 TXXG_TXEN0 - * Bit 13 TXXG_HOSTPAUSE - * Bit 12-7 TXXG_IPGT - * Bit 5 TXXG_32BIT_ALIGN - * Bit 4 TXXG_CRCEN - * Bit 3 TXXG_FCTX - * Bit 2 TXXG_FCRX - * Bit 1 TXXG_PADEN - * Bit 0 TXXG_SPRE - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_TXXG_TXEN0 0x8000 -#define SUNI1x10GEXP_BITMSK_TXXG_HOSTPAUSE 0x2000 -#define SUNI1x10GEXP_BITMSK_TXXG_IPGT 0x1F80 -#define SUNI1x10GEXP_BITOFF_TXXG_IPGT 7 -#define SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN 0x0020 -#define SUNI1x10GEXP_BITMSK_TXXG_CRCEN 0x0010 -#define SUNI1x10GEXP_BITMSK_TXXG_FCTX 0x0008 -#define SUNI1x10GEXP_BITMSK_TXXG_FCRX 0x0004 -#define SUNI1x10GEXP_BITMSK_TXXG_PADEN 0x0002 -#define SUNI1x10GEXP_BITMSK_TXXG_SPRE 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x3041: TXXG Configuration Register 2 - * Bit 7-0 TXXG_HDRSIZE - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_TXXG_HDRSIZE 0x00FF - -/*---------------------------------------------------------------------------- - * Register 0x3042: TXXG Configuration Register 3 - * Bit 15 TXXG_FIFO_ERRE - * Bit 14 TXXG_FIFO_UDRE - * Bit 13 TXXG_MAX_LERRE - * Bit 12 TXXG_MIN_LERRE - * Bit 11 TXXG_XFERE - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_TXXG_FIFO_ERRE 0x8000 -#define SUNI1x10GEXP_BITMSK_TXXG_FIFO_UDRE 0x4000 -#define SUNI1x10GEXP_BITMSK_TXXG_MAX_LERRE 0x2000 -#define SUNI1x10GEXP_BITMSK_TXXG_MIN_LERRE 0x1000 -#define SUNI1x10GEXP_BITMSK_TXXG_XFERE 0x0800 - -/*---------------------------------------------------------------------------- - * Register 0x3043: TXXG Interrupt - * Bit 15 TXXG_FIFO_ERRI - * Bit 14 TXXG_FIFO_UDRI - * Bit 13 TXXG_MAX_LERRI - * Bit 12 TXXG_MIN_LERRI - * Bit 11 TXXG_XFERI - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_TXXG_FIFO_ERRI 0x8000 -#define SUNI1x10GEXP_BITMSK_TXXG_FIFO_UDRI 0x4000 -#define SUNI1x10GEXP_BITMSK_TXXG_MAX_LERRI 0x2000 -#define SUNI1x10GEXP_BITMSK_TXXG_MIN_LERRI 0x1000 -#define SUNI1x10GEXP_BITMSK_TXXG_XFERI 0x0800 - -/*---------------------------------------------------------------------------- - * Register 0x3044: TXXG Status Register - * Bit 1 TXXG_TXACTIVE - * Bit 0 TXXG_PAUSED - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_TXXG_TXACTIVE 0x0002 -#define SUNI1x10GEXP_BITMSK_TXXG_PAUSED 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x3046: TXXG TX_MINFR - Transmit Min Frame Size Register - * Bit 7-0 TXXG_TX_MINFR - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_TXXG_TX_MINFR 0x00FF -#define SUNI1x10GEXP_BITOFF_TXXG_TX_MINFR 0 - -/*---------------------------------------------------------------------------- - * Register 0x3052: TXXG Pause Quantum Value Configuration Register - * Bit 7-0 TXXG_FC_PAUSE_QNTM - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_TXXG_FC_PAUSE_QNTM 0x00FF -#define SUNI1x10GEXP_BITOFF_TXXG_FC_PAUSE_QNTM 0 - -/*---------------------------------------------------------------------------- - * Register 0x3080: XTEF Control - * Bit 3-0 XTEF_FORCE_PARITY_ERR - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_XTEF_FORCE_PARITY_ERR 0x000F -#define SUNI1x10GEXP_BITOFF_XTEF_FORCE_PARITY_ERR 0 - -/*---------------------------------------------------------------------------- - * Register 0x3084: XTEF Interrupt Event Register - * Bit 0 XTEF_LOST_SYNCI - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_XTEF_LOST_SYNCI 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x3085: XTEF Interrupt Enable Register - * Bit 0 XTEF_LOST_SYNCE - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_XTEF_LOST_SYNCE 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x3086: XTEF Visibility Register - * Bit 0 XTEF_LOST_SYNCV - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_XTEF_LOST_SYNCV 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x30C0: TXOAM OAM Configuration - * Bit 15 TXOAM_HEC_EN - * Bit 14 TXOAM_EMPTYCODE_EN - * Bit 13 TXOAM_FORCE_IDLE - * Bit 12 TXOAM_IGNORE_IDLE - * Bit 11-6 TXOAM_PX_OVERWRITE - * Bit 5-0 TXOAM_PX_SEL - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_TXOAM_HEC_EN 0x8000 -#define SUNI1x10GEXP_BITMSK_TXOAM_EMPTYCODE_EN 0x4000 -#define SUNI1x10GEXP_BITMSK_TXOAM_FORCE_IDLE 0x2000 -#define SUNI1x10GEXP_BITMSK_TXOAM_IGNORE_IDLE 0x1000 -#define SUNI1x10GEXP_BITMSK_TXOAM_PX_OVERWRITE 0x0FC0 -#define SUNI1x10GEXP_BITOFF_TXOAM_PX_OVERWRITE 6 -#define SUNI1x10GEXP_BITMSK_TXOAM_PX_SEL 0x003F -#define SUNI1x10GEXP_BITOFF_TXOAM_PX_SEL 0 - -/*---------------------------------------------------------------------------- - * Register 0x30C1: TXOAM Mini-Packet Rate Configuration - * Bit 15 TXOAM_MINIDIS - * Bit 14 TXOAM_BUSY - * Bit 13 TXOAM_TRANS_EN - * Bit 10-0 TXOAM_MINIRATE - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_TXOAM_MINIDIS 0x8000 -#define SUNI1x10GEXP_BITMSK_TXOAM_BUSY 0x4000 -#define SUNI1x10GEXP_BITMSK_TXOAM_TRANS_EN 0x2000 -#define SUNI1x10GEXP_BITMSK_TXOAM_MINIRATE 0x07FF - -/*---------------------------------------------------------------------------- - * Register 0x30C2: TXOAM Mini-Packet Gap and FIFO Configuration - * Bit 13-10 TXOAM_FTHRESH - * Bit 9-6 TXOAM_MINIPOST - * Bit 5-0 TXOAM_MINIPRE - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_TXOAM_FTHRESH 0x3C00 -#define SUNI1x10GEXP_BITOFF_TXOAM_FTHRESH 10 -#define SUNI1x10GEXP_BITMSK_TXOAM_MINIPOST 0x03C0 -#define SUNI1x10GEXP_BITOFF_TXOAM_MINIPOST 6 -#define SUNI1x10GEXP_BITMSK_TXOAM_MINIPRE 0x003F - -/*---------------------------------------------------------------------------- - * Register 0x30C6: TXOAM Interrupt Enable - * Bit 2 TXOAM_SOP_ERRE - * Bit 1 TXOAM_OFLE - * Bit 0 TXOAM_ERRE - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_TXOAM_SOP_ERRE 0x0004 -#define SUNI1x10GEXP_BITMSK_TXOAM_OFLE 0x0002 -#define SUNI1x10GEXP_BITMSK_TXOAM_ERRE 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x30C7: TXOAM Interrupt Status - * Bit 2 TXOAM_SOP_ERRI - * Bit 1 TXOAM_OFLI - * Bit 0 TXOAM_ERRI - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_TXOAM_SOP_ERRI 0x0004 -#define SUNI1x10GEXP_BITMSK_TXOAM_OFLI 0x0002 -#define SUNI1x10GEXP_BITMSK_TXOAM_ERRI 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x30CF: TXOAM Coset - * Bit 7-0 TXOAM_COSET - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_TXOAM_COSET 0x00FF - -/*---------------------------------------------------------------------------- - * Register 0x3200: EFLX Global Configuration - * Bit 15 EFLX_ERCU_EN - * Bit 7 EFLX_EN_EDSWT - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_EFLX_ERCU_EN 0x8000 -#define SUNI1x10GEXP_BITMSK_EFLX_EN_EDSWT 0x0080 - -/*---------------------------------------------------------------------------- - * Register 0x3201: EFLX ERCU Global Status - * Bit 13 EFLX_OVF_ERR - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_EFLX_OVF_ERR 0x2000 - -/*---------------------------------------------------------------------------- - * Register 0x3202: EFLX Indirect Channel Address - * Bit 15 EFLX_BUSY - * Bit 14 EFLX_RDWRB - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_EFLX_BUSY 0x8000 -#define SUNI1x10GEXP_BITMSK_EFLX_RDWRB 0x4000 - -/*---------------------------------------------------------------------------- - * Register 0x3203: EFLX Indirect Logical FIFO Low Limit - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_EFLX_LOLIM 0x03FF -#define SUNI1x10GEXP_BITOFF_EFLX_LOLIM 0 - -/*---------------------------------------------------------------------------- - * Register 0x3204: EFLX Indirect Logical FIFO High Limit - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_EFLX_HILIM 0x03FF -#define SUNI1x10GEXP_BITOFF_EFLX_HILIM 0 - -/*---------------------------------------------------------------------------- - * Register 0x3205: EFLX Indirect Full/Almost-Full Status and Limit - * Bit 15 EFLX_FULL - * Bit 14 EFLX_AFULL - * Bit 13-0 EFLX_AFTH - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_EFLX_FULL 0x8000 -#define SUNI1x10GEXP_BITMSK_EFLX_AFULL 0x4000 -#define SUNI1x10GEXP_BITMSK_EFLX_AFTH 0x3FFF -#define SUNI1x10GEXP_BITOFF_EFLX_AFTH 0 - -/*---------------------------------------------------------------------------- - * Register 0x3206: EFLX Indirect Empty/Almost-Empty Status and Limit - * Bit 15 EFLX_EMPTY - * Bit 14 EFLX_AEMPTY - * Bit 13-0 EFLX_AETH - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_EFLX_EMPTY 0x8000 -#define SUNI1x10GEXP_BITMSK_EFLX_AEMPTY 0x4000 -#define SUNI1x10GEXP_BITMSK_EFLX_AETH 0x3FFF -#define SUNI1x10GEXP_BITOFF_EFLX_AETH 0 - -/*---------------------------------------------------------------------------- - * Register 0x3207: EFLX Indirect FIFO Cut-Through Threshold - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_EFLX_CUT_THRU 0x3FFF -#define SUNI1x10GEXP_BITOFF_EFLX_CUT_THRU 0 - -/*---------------------------------------------------------------------------- - * Register 0x320C: EFLX FIFO Overflow Error Enable - * Bit 0 EFLX_OVFE - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_EFLX_OVFE 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x320D: EFLX FIFO Overflow Error Indication - * Bit 0 EFLX_OVFI - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_EFLX_OVFI 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x3210: EFLX Channel Provision - * Bit 0 EFLX_PROV - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_EFLX_PROV 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x3280: PL4IDU Configuration - * Bit 2 PL4IDU_SYNCH_ON_TRAIN - * Bit 1 PL4IDU_EN_PORTS - * Bit 0 PL4IDU_EN_DFWD - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_PL4IDU_SYNCH_ON_TRAIN 0x0004 -#define SUNI1x10GEXP_BITMSK_PL4IDU_EN_PORTS 0x0002 -#define SUNI1x10GEXP_BITMSK_PL4IDU_EN_DFWD 0x0001 - -/*---------------------------------------------------------------------------- - * Register 0x3282: PL4IDU Interrupt Mask - * Bit 1 PL4IDU_DIP4E - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_PL4IDU_DIP4E 0x0002 - -/*---------------------------------------------------------------------------- - * Register 0x3283: PL4IDU Interrupt - * Bit 1 PL4IDU_DIP4I - *----------------------------------------------------------------------------*/ -#define SUNI1x10GEXP_BITMSK_PL4IDU_DIP4I 0x0002 - -#endif /* _CXGB_SUNI1x10GEXP_REGS_H_ */ - diff --git a/drivers/net/chelsio/tp.c b/drivers/net/chelsio/tp.c deleted file mode 100644 index 8bed4a5..0000000 --- a/drivers/net/chelsio/tp.c +++ /dev/null @@ -1,171 +0,0 @@ -/* $Date: 2006/02/07 04:21:54 $ $RCSfile: tp.c,v $ $Revision: 1.73 $ */ -#include "common.h" -#include "regs.h" -#include "tp.h" -#ifdef CONFIG_CHELSIO_T1_1G -#include "fpga_defs.h" -#endif - -struct petp { - adapter_t *adapter; -}; - -/* Pause deadlock avoidance parameters */ -#define DROP_MSEC 16 -#define DROP_PKTS_CNT 1 - -static void tp_init(adapter_t * ap, const struct tp_params *p, - unsigned int tp_clk) -{ - u32 val; - - if (!t1_is_asic(ap)) - return; - - val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM | - F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET; - if (!p->pm_size) - val |= F_OFFLOAD_DISABLE; - else - val |= F_TP_IN_ESPI_CHECK_IP_CSUM | F_TP_IN_ESPI_CHECK_TCP_CSUM; - writel(val, ap->regs + A_TP_IN_CONFIG); - writel(F_TP_OUT_CSPI_CPL | - F_TP_OUT_ESPI_ETHERNET | - F_TP_OUT_ESPI_GENERATE_IP_CSUM | - F_TP_OUT_ESPI_GENERATE_TCP_CSUM, ap->regs + A_TP_OUT_CONFIG); - writel(V_IP_TTL(64) | - F_PATH_MTU /* IP DF bit */ | - V_5TUPLE_LOOKUP(p->use_5tuple_mode) | - V_SYN_COOKIE_PARAMETER(29), ap->regs + A_TP_GLOBAL_CONFIG); - /* - * Enable pause frame deadlock prevention. - */ - if (is_T2(ap) && ap->params.nports > 1) { - u32 drop_ticks = DROP_MSEC * (tp_clk / 1000); - - writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR | - V_DROP_TICKS_CNT(drop_ticks) | - V_NUM_PKTS_DROPPED(DROP_PKTS_CNT), - ap->regs + A_TP_TX_DROP_CONFIG); - } -} - -void t1_tp_destroy(struct petp *tp) -{ - kfree(tp); -} - -struct petp *__devinit t1_tp_create(adapter_t * adapter, struct tp_params *p) -{ - struct petp *tp = kzalloc(sizeof(*tp), GFP_KERNEL); - - if (!tp) - return NULL; - - tp->adapter = adapter; - - return tp; -} - -void t1_tp_intr_enable(struct petp *tp) -{ - u32 tp_intr = readl(tp->adapter->regs + A_PL_ENABLE); - -#ifdef CONFIG_CHELSIO_T1_1G - if (!t1_is_asic(tp->adapter)) { - /* FPGA */ - writel(0xffffffff, - tp->adapter->regs + FPGA_TP_ADDR_INTERRUPT_ENABLE); - writel(tp_intr | FPGA_PCIX_INTERRUPT_TP, - tp->adapter->regs + A_PL_ENABLE); - } else -#endif - { - /* We don't use any TP interrupts */ - writel(0, tp->adapter->regs + A_TP_INT_ENABLE); - writel(tp_intr | F_PL_INTR_TP, - tp->adapter->regs + A_PL_ENABLE); - } -} - -void t1_tp_intr_disable(struct petp *tp) -{ - u32 tp_intr = readl(tp->adapter->regs + A_PL_ENABLE); - -#ifdef CONFIG_CHELSIO_T1_1G - if (!t1_is_asic(tp->adapter)) { - /* FPGA */ - writel(0, tp->adapter->regs + FPGA_TP_ADDR_INTERRUPT_ENABLE); - writel(tp_intr & ~FPGA_PCIX_INTERRUPT_TP, - tp->adapter->regs + A_PL_ENABLE); - } else -#endif - { - writel(0, tp->adapter->regs + A_TP_INT_ENABLE); - writel(tp_intr & ~F_PL_INTR_TP, - tp->adapter->regs + A_PL_ENABLE); - } -} - -void t1_tp_intr_clear(struct petp *tp) -{ -#ifdef CONFIG_CHELSIO_T1_1G - if (!t1_is_asic(tp->adapter)) { - writel(0xffffffff, - tp->adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE); - writel(FPGA_PCIX_INTERRUPT_TP, tp->adapter->regs + A_PL_CAUSE); - return; - } -#endif - writel(0xffffffff, tp->adapter->regs + A_TP_INT_CAUSE); - writel(F_PL_INTR_TP, tp->adapter->regs + A_PL_CAUSE); -} - -int t1_tp_intr_handler(struct petp *tp) -{ - u32 cause; - -#ifdef CONFIG_CHELSIO_T1_1G - /* FPGA doesn't support TP interrupts. */ - if (!t1_is_asic(tp->adapter)) - return 1; -#endif - - cause = readl(tp->adapter->regs + A_TP_INT_CAUSE); - writel(cause, tp->adapter->regs + A_TP_INT_CAUSE); - return 0; -} - -static void set_csum_offload(struct petp *tp, u32 csum_bit, int enable) -{ - u32 val = readl(tp->adapter->regs + A_TP_GLOBAL_CONFIG); - - if (enable) - val |= csum_bit; - else - val &= ~csum_bit; - writel(val, tp->adapter->regs + A_TP_GLOBAL_CONFIG); -} - -void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable) -{ - set_csum_offload(tp, F_IP_CSUM, enable); -} - -void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable) -{ - set_csum_offload(tp, F_TCP_CSUM, enable); -} - -/* - * Initialize TP state. tp_params contains initial settings for some TP - * parameters, particularly the one-time PM and CM settings. - */ -int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk) -{ - adapter_t *adapter = tp->adapter; - - tp_init(adapter, p, tp_clk); - writel(F_TP_RESET, adapter->regs + A_TP_RESET); - return 0; -} diff --git a/drivers/net/chelsio/tp.h b/drivers/net/chelsio/tp.h deleted file mode 100644 index dfd8ce2..0000000 --- a/drivers/net/chelsio/tp.h +++ /dev/null @@ -1,72 +0,0 @@ -/* $Date: 2005/03/07 23:59:05 $ $RCSfile: tp.h,v $ $Revision: 1.20 $ */ -#ifndef CHELSIO_TP_H -#define CHELSIO_TP_H - -#include "common.h" - -#define TP_MAX_RX_COALESCING_SIZE 16224U - -struct tp_mib_statistics { - - /* IP */ - u32 ipInReceive_hi; - u32 ipInReceive_lo; - u32 ipInHdrErrors_hi; - u32 ipInHdrErrors_lo; - u32 ipInAddrErrors_hi; - u32 ipInAddrErrors_lo; - u32 ipInUnknownProtos_hi; - u32 ipInUnknownProtos_lo; - u32 ipInDiscards_hi; - u32 ipInDiscards_lo; - u32 ipInDelivers_hi; - u32 ipInDelivers_lo; - u32 ipOutRequests_hi; - u32 ipOutRequests_lo; - u32 ipOutDiscards_hi; - u32 ipOutDiscards_lo; - u32 ipOutNoRoutes_hi; - u32 ipOutNoRoutes_lo; - u32 ipReasmTimeout; - u32 ipReasmReqds; - u32 ipReasmOKs; - u32 ipReasmFails; - - u32 reserved[8]; - - /* TCP */ - u32 tcpActiveOpens; - u32 tcpPassiveOpens; - u32 tcpAttemptFails; - u32 tcpEstabResets; - u32 tcpOutRsts; - u32 tcpCurrEstab; - u32 tcpInSegs_hi; - u32 tcpInSegs_lo; - u32 tcpOutSegs_hi; - u32 tcpOutSegs_lo; - u32 tcpRetransSeg_hi; - u32 tcpRetransSeg_lo; - u32 tcpInErrs_hi; - u32 tcpInErrs_lo; - u32 tcpRtoMin; - u32 tcpRtoMax; -}; - -struct petp; -struct tp_params; - -struct petp *t1_tp_create(adapter_t *adapter, struct tp_params *p); -void t1_tp_destroy(struct petp *tp); - -void t1_tp_intr_disable(struct petp *tp); -void t1_tp_intr_enable(struct petp *tp); -void t1_tp_intr_clear(struct petp *tp); -int t1_tp_intr_handler(struct petp *tp); - -void t1_tp_get_mib_statistics(adapter_t *adap, struct tp_mib_statistics *tps); -void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable); -void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable); -int t1_tp_set_coalescing_size(struct petp *tp, unsigned int size); -int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk); -#endif diff --git a/drivers/net/chelsio/vsc7326.c b/drivers/net/chelsio/vsc7326.c deleted file mode 100644 index b0cb388..0000000 --- a/drivers/net/chelsio/vsc7326.c +++ /dev/null @@ -1,730 +0,0 @@ -/* $Date: 2006/04/28 19:20:06 $ $RCSfile: vsc7326.c,v $ $Revision: 1.19 $ */ - -/* Driver for Vitesse VSC7326 (Schaumburg) MAC */ - -#include "gmac.h" -#include "elmer0.h" -#include "vsc7326_reg.h" - -/* Update fast changing statistics every 15 seconds */ -#define STATS_TICK_SECS 15 -/* 30 minutes for full statistics update */ -#define MAJOR_UPDATE_TICKS (1800 / STATS_TICK_SECS) - -#define MAX_MTU 9600 - -/* The egress WM value 0x01a01fff should be used only when the - * interface is down (MAC port disabled). This is a workaround - * for disabling the T2/MAC flow-control. When the interface is - * enabled, the WM value should be set to 0x014a03F0. - */ -#define WM_DISABLE 0x01a01fff -#define WM_ENABLE 0x014a03F0 - -struct init_table { - u32 addr; - u32 data; -}; - -struct _cmac_instance { - u32 index; - u32 ticks; -}; - -#define INITBLOCK_SLEEP 0xffffffff - -static void vsc_read(adapter_t *adapter, u32 addr, u32 *val) -{ - u32 status, vlo, vhi; - int i; - - spin_lock_bh(&adapter->mac_lock); - t1_tpi_read(adapter, (addr << 2) + 4, &vlo); - i = 0; - do { - t1_tpi_read(adapter, (REG_LOCAL_STATUS << 2) + 4, &vlo); - t1_tpi_read(adapter, REG_LOCAL_STATUS << 2, &vhi); - status = (vhi << 16) | vlo; - i++; - } while (((status & 1) == 0) && (i < 50)); - if (i == 50) - pr_err("Invalid tpi read from MAC, breaking loop.\n"); - - t1_tpi_read(adapter, (REG_LOCAL_DATA << 2) + 4, &vlo); - t1_tpi_read(adapter, REG_LOCAL_DATA << 2, &vhi); - - *val = (vhi << 16) | vlo; - - /* pr_err("rd: block: 0x%x sublock: 0x%x reg: 0x%x data: 0x%x\n", - ((addr&0xe000)>>13), ((addr&0x1e00)>>9), - ((addr&0x01fe)>>1), *val); */ - spin_unlock_bh(&adapter->mac_lock); -} - -static void vsc_write(adapter_t *adapter, u32 addr, u32 data) -{ - spin_lock_bh(&adapter->mac_lock); - t1_tpi_write(adapter, (addr << 2) + 4, data & 0xFFFF); - t1_tpi_write(adapter, addr << 2, (data >> 16) & 0xFFFF); - /* pr_err("wr: block: 0x%x sublock: 0x%x reg: 0x%x data: 0x%x\n", - ((addr&0xe000)>>13), ((addr&0x1e00)>>9), - ((addr&0x01fe)>>1), data); */ - spin_unlock_bh(&adapter->mac_lock); -} - -/* Hard reset the MAC. This wipes out *all* configuration. */ -static void vsc7326_full_reset(adapter_t* adapter) -{ - u32 val; - u32 result = 0xffff; - - t1_tpi_read(adapter, A_ELMER0_GPO, &val); - val &= ~1; - t1_tpi_write(adapter, A_ELMER0_GPO, val); - udelay(2); - val |= 0x1; /* Enable mac MAC itself */ - val |= 0x800; /* Turn off the red LED */ - t1_tpi_write(adapter, A_ELMER0_GPO, val); - mdelay(1); - vsc_write(adapter, REG_SW_RESET, 0x80000001); - do { - mdelay(1); - vsc_read(adapter, REG_SW_RESET, &result); - } while (result != 0x0); -} - -static struct init_table vsc7326_reset[] = { - { REG_IFACE_MODE, 0x00000000 }, - { REG_CRC_CFG, 0x00000020 }, - { REG_PLL_CLK_SPEED, 0x00050c00 }, - { REG_PLL_CLK_SPEED, 0x00050c00 }, - { REG_MSCH, 0x00002f14 }, - { REG_SPI4_MISC, 0x00040409 }, - { REG_SPI4_DESKEW, 0x00080000 }, - { REG_SPI4_ING_SETUP2, 0x08080004 }, - { REG_SPI4_ING_SETUP0, 0x04111004 }, - { REG_SPI4_EGR_SETUP0, 0x80001a04 }, - { REG_SPI4_ING_SETUP1, 0x02010000 }, - { REG_AGE_INC(0), 0x00000000 }, - { REG_AGE_INC(1), 0x00000000 }, - { REG_ING_CONTROL, 0x0a200011 }, - { REG_EGR_CONTROL, 0xa0010091 }, -}; - -static struct init_table vsc7326_portinit[4][22] = { - { /* Port 0 */ - /* FIFO setup */ - { REG_DBG(0), 0x000004f0 }, - { REG_HDX(0), 0x00073101 }, - { REG_TEST(0,0), 0x00000022 }, - { REG_TEST(1,0), 0x00000022 }, - { REG_TOP_BOTTOM(0,0), 0x003f0000 }, - { REG_TOP_BOTTOM(1,0), 0x00120000 }, - { REG_HIGH_LOW_WM(0,0), 0x07460757 }, - { REG_HIGH_LOW_WM(1,0), WM_DISABLE }, - { REG_CT_THRHLD(0,0), 0x00000000 }, - { REG_CT_THRHLD(1,0), 0x00000000 }, - { REG_BUCKE(0), 0x0002ffff }, - { REG_BUCKI(0), 0x0002ffff }, - { REG_TEST(0,0), 0x00000020 }, - { REG_TEST(1,0), 0x00000020 }, - /* Port config */ - { REG_MAX_LEN(0), 0x00002710 }, - { REG_PORT_FAIL(0), 0x00000002 }, - { REG_NORMALIZER(0), 0x00000a64 }, - { REG_DENORM(0), 0x00000010 }, - { REG_STICK_BIT(0), 0x03baa370 }, - { REG_DEV_SETUP(0), 0x00000083 }, - { REG_DEV_SETUP(0), 0x00000082 }, - { REG_MODE_CFG(0), 0x0200259f }, - }, - { /* Port 1 */ - /* FIFO setup */ - { REG_DBG(1), 0x000004f0 }, - { REG_HDX(1), 0x00073101 }, - { REG_TEST(0,1), 0x00000022 }, - { REG_TEST(1,1), 0x00000022 }, - { REG_TOP_BOTTOM(0,1), 0x007e003f }, - { REG_TOP_BOTTOM(1,1), 0x00240012 }, - { REG_HIGH_LOW_WM(0,1), 0x07460757 }, - { REG_HIGH_LOW_WM(1,1), WM_DISABLE }, - { REG_CT_THRHLD(0,1), 0x00000000 }, - { REG_CT_THRHLD(1,1), 0x00000000 }, - { REG_BUCKE(1), 0x0002ffff }, - { REG_BUCKI(1), 0x0002ffff }, - { REG_TEST(0,1), 0x00000020 }, - { REG_TEST(1,1), 0x00000020 }, - /* Port config */ - { REG_MAX_LEN(1), 0x00002710 }, - { REG_PORT_FAIL(1), 0x00000002 }, - { REG_NORMALIZER(1), 0x00000a64 }, - { REG_DENORM(1), 0x00000010 }, - { REG_STICK_BIT(1), 0x03baa370 }, - { REG_DEV_SETUP(1), 0x00000083 }, - { REG_DEV_SETUP(1), 0x00000082 }, - { REG_MODE_CFG(1), 0x0200259f }, - }, - { /* Port 2 */ - /* FIFO setup */ - { REG_DBG(2), 0x000004f0 }, - { REG_HDX(2), 0x00073101 }, - { REG_TEST(0,2), 0x00000022 }, - { REG_TEST(1,2), 0x00000022 }, - { REG_TOP_BOTTOM(0,2), 0x00bd007e }, - { REG_TOP_BOTTOM(1,2), 0x00360024 }, - { REG_HIGH_LOW_WM(0,2), 0x07460757 }, - { REG_HIGH_LOW_WM(1,2), WM_DISABLE }, - { REG_CT_THRHLD(0,2), 0x00000000 }, - { REG_CT_THRHLD(1,2), 0x00000000 }, - { REG_BUCKE(2), 0x0002ffff }, - { REG_BUCKI(2), 0x0002ffff }, - { REG_TEST(0,2), 0x00000020 }, - { REG_TEST(1,2), 0x00000020 }, - /* Port config */ - { REG_MAX_LEN(2), 0x00002710 }, - { REG_PORT_FAIL(2), 0x00000002 }, - { REG_NORMALIZER(2), 0x00000a64 }, - { REG_DENORM(2), 0x00000010 }, - { REG_STICK_BIT(2), 0x03baa370 }, - { REG_DEV_SETUP(2), 0x00000083 }, - { REG_DEV_SETUP(2), 0x00000082 }, - { REG_MODE_CFG(2), 0x0200259f }, - }, - { /* Port 3 */ - /* FIFO setup */ - { REG_DBG(3), 0x000004f0 }, - { REG_HDX(3), 0x00073101 }, - { REG_TEST(0,3), 0x00000022 }, - { REG_TEST(1,3), 0x00000022 }, - { REG_TOP_BOTTOM(0,3), 0x00fc00bd }, - { REG_TOP_BOTTOM(1,3), 0x00480036 }, - { REG_HIGH_LOW_WM(0,3), 0x07460757 }, - { REG_HIGH_LOW_WM(1,3), WM_DISABLE }, - { REG_CT_THRHLD(0,3), 0x00000000 }, - { REG_CT_THRHLD(1,3), 0x00000000 }, - { REG_BUCKE(3), 0x0002ffff }, - { REG_BUCKI(3), 0x0002ffff }, - { REG_TEST(0,3), 0x00000020 }, - { REG_TEST(1,3), 0x00000020 }, - /* Port config */ - { REG_MAX_LEN(3), 0x00002710 }, - { REG_PORT_FAIL(3), 0x00000002 }, - { REG_NORMALIZER(3), 0x00000a64 }, - { REG_DENORM(3), 0x00000010 }, - { REG_STICK_BIT(3), 0x03baa370 }, - { REG_DEV_SETUP(3), 0x00000083 }, - { REG_DEV_SETUP(3), 0x00000082 }, - { REG_MODE_CFG(3), 0x0200259f }, - }, -}; - -static void run_table(adapter_t *adapter, struct init_table *ib, int len) -{ - int i; - - for (i = 0; i < len; i++) { - if (ib[i].addr == INITBLOCK_SLEEP) { - udelay( ib[i].data ); - pr_err("sleep %d us\n",ib[i].data); - } else - vsc_write( adapter, ib[i].addr, ib[i].data ); - } -} - -static int bist_rd(adapter_t *adapter, int moduleid, int address) -{ - int data = 0; - u32 result = 0; - - if ((address != 0x0) && - (address != 0x1) && - (address != 0x2) && - (address != 0xd) && - (address != 0xe)) - pr_err("No bist address: 0x%x\n", address); - - data = ((0x00 << 24) | ((address & 0xff) << 16) | (0x00 << 8) | - ((moduleid & 0xff) << 0)); - vsc_write(adapter, REG_RAM_BIST_CMD, data); - - udelay(10); - - vsc_read(adapter, REG_RAM_BIST_RESULT, &result); - if ((result & (1 << 9)) != 0x0) - pr_err("Still in bist read: 0x%x\n", result); - else if ((result & (1 << 8)) != 0x0) - pr_err("bist read error: 0x%x\n", result); - - return result & 0xff; -} - -static int bist_wr(adapter_t *adapter, int moduleid, int address, int value) -{ - int data = 0; - u32 result = 0; - - if ((address != 0x0) && - (address != 0x1) && - (address != 0x2) && - (address != 0xd) && - (address != 0xe)) - pr_err("No bist address: 0x%x\n", address); - - if (value > 255) - pr_err("Suspicious write out of range value: 0x%x\n", value); - - data = ((0x01 << 24) | ((address & 0xff) << 16) | (value << 8) | - ((moduleid & 0xff) << 0)); - vsc_write(adapter, REG_RAM_BIST_CMD, data); - - udelay(5); - - vsc_read(adapter, REG_RAM_BIST_CMD, &result); - if ((result & (1 << 27)) != 0x0) - pr_err("Still in bist write: 0x%x\n", result); - else if ((result & (1 << 26)) != 0x0) - pr_err("bist write error: 0x%x\n", result); - - return 0; -} - -static int run_bist(adapter_t *adapter, int moduleid) -{ - /*run bist*/ - (void) bist_wr(adapter,moduleid, 0x00, 0x02); - (void) bist_wr(adapter,moduleid, 0x01, 0x01); - - return 0; -} - -static int check_bist(adapter_t *adapter, int moduleid) -{ - int result=0; - int column=0; - /*check bist*/ - result = bist_rd(adapter,moduleid, 0x02); - column = ((bist_rd(adapter,moduleid, 0x0e)<<8) + - (bist_rd(adapter,moduleid, 0x0d))); - if ((result & 3) != 0x3) - pr_err("Result: 0x%x BIST error in ram %d, column: 0x%04x\n", - result, moduleid, column); - return 0; -} - -static int enable_mem(adapter_t *adapter, int moduleid) -{ - /*enable mem*/ - (void) bist_wr(adapter,moduleid, 0x00, 0x00); - return 0; -} - -static int run_bist_all(adapter_t *adapter) -{ - int port = 0; - u32 val = 0; - - vsc_write(adapter, REG_MEM_BIST, 0x5); - vsc_read(adapter, REG_MEM_BIST, &val); - - for (port = 0; port < 12; port++) - vsc_write(adapter, REG_DEV_SETUP(port), 0x0); - - udelay(300); - vsc_write(adapter, REG_SPI4_MISC, 0x00040409); - udelay(300); - - (void) run_bist(adapter,13); - (void) run_bist(adapter,14); - (void) run_bist(adapter,20); - (void) run_bist(adapter,21); - mdelay(200); - (void) check_bist(adapter,13); - (void) check_bist(adapter,14); - (void) check_bist(adapter,20); - (void) check_bist(adapter,21); - udelay(100); - (void) enable_mem(adapter,13); - (void) enable_mem(adapter,14); - (void) enable_mem(adapter,20); - (void) enable_mem(adapter,21); - udelay(300); - vsc_write(adapter, REG_SPI4_MISC, 0x60040400); - udelay(300); - for (port = 0; port < 12; port++) - vsc_write(adapter, REG_DEV_SETUP(port), 0x1); - - udelay(300); - vsc_write(adapter, REG_MEM_BIST, 0x0); - mdelay(10); - return 0; -} - -static int mac_intr_handler(struct cmac *mac) -{ - return 0; -} - -static int mac_intr_enable(struct cmac *mac) -{ - return 0; -} - -static int mac_intr_disable(struct cmac *mac) -{ - return 0; -} - -static int mac_intr_clear(struct cmac *mac) -{ - return 0; -} - -/* Expect MAC address to be in network byte order. */ -static int mac_set_address(struct cmac* mac, u8 addr[6]) -{ - u32 val; - int port = mac->instance->index; - - vsc_write(mac->adapter, REG_MAC_LOW_ADDR(port), - (addr[3] << 16) | (addr[4] << 8) | addr[5]); - vsc_write(mac->adapter, REG_MAC_HIGH_ADDR(port), - (addr[0] << 16) | (addr[1] << 8) | addr[2]); - - vsc_read(mac->adapter, REG_ING_FFILT_UM_EN, &val); - val &= ~0xf0000000; - vsc_write(mac->adapter, REG_ING_FFILT_UM_EN, val | (port << 28)); - - vsc_write(mac->adapter, REG_ING_FFILT_MASK0, - 0xffff0000 | (addr[4] << 8) | addr[5]); - vsc_write(mac->adapter, REG_ING_FFILT_MASK1, - 0xffff0000 | (addr[2] << 8) | addr[3]); - vsc_write(mac->adapter, REG_ING_FFILT_MASK2, - 0xffff0000 | (addr[0] << 8) | addr[1]); - return 0; -} - -static int mac_get_address(struct cmac *mac, u8 addr[6]) -{ - u32 addr_lo, addr_hi; - int port = mac->instance->index; - - vsc_read(mac->adapter, REG_MAC_LOW_ADDR(port), &addr_lo); - vsc_read(mac->adapter, REG_MAC_HIGH_ADDR(port), &addr_hi); - - addr[0] = (u8) (addr_hi >> 16); - addr[1] = (u8) (addr_hi >> 8); - addr[2] = (u8) addr_hi; - addr[3] = (u8) (addr_lo >> 16); - addr[4] = (u8) (addr_lo >> 8); - addr[5] = (u8) addr_lo; - return 0; -} - -/* This is intended to reset a port, not the whole MAC */ -static int mac_reset(struct cmac *mac) -{ - int index = mac->instance->index; - - run_table(mac->adapter, vsc7326_portinit[index], - ARRAY_SIZE(vsc7326_portinit[index])); - - return 0; -} - -static int mac_set_rx_mode(struct cmac *mac, struct t1_rx_mode *rm) -{ - u32 v; - int port = mac->instance->index; - - vsc_read(mac->adapter, REG_ING_FFILT_UM_EN, &v); - v |= 1 << 12; - - if (t1_rx_mode_promisc(rm)) - v &= ~(1 << (port + 16)); - else - v |= 1 << (port + 16); - - vsc_write(mac->adapter, REG_ING_FFILT_UM_EN, v); - return 0; -} - -static int mac_set_mtu(struct cmac *mac, int mtu) -{ - int port = mac->instance->index; - - if (mtu > MAX_MTU) - return -EINVAL; - - /* max_len includes header and FCS */ - vsc_write(mac->adapter, REG_MAX_LEN(port), mtu + 14 + 4); - return 0; -} - -static int mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, - int fc) -{ - u32 v; - int enable, port = mac->instance->index; - - if (speed >= 0 && speed != SPEED_10 && speed != SPEED_100 && - speed != SPEED_1000) - return -1; - if (duplex > 0 && duplex != DUPLEX_FULL) - return -1; - - if (speed >= 0) { - vsc_read(mac->adapter, REG_MODE_CFG(port), &v); - enable = v & 3; /* save tx/rx enables */ - v &= ~0xf; - v |= 4; /* full duplex */ - if (speed == SPEED_1000) - v |= 8; /* GigE */ - enable |= v; - vsc_write(mac->adapter, REG_MODE_CFG(port), v); - - if (speed == SPEED_1000) - v = 0x82; - else if (speed == SPEED_100) - v = 0x84; - else /* SPEED_10 */ - v = 0x86; - vsc_write(mac->adapter, REG_DEV_SETUP(port), v | 1); /* reset */ - vsc_write(mac->adapter, REG_DEV_SETUP(port), v); - vsc_read(mac->adapter, REG_DBG(port), &v); - v &= ~0xff00; - if (speed == SPEED_1000) - v |= 0x400; - else if (speed == SPEED_100) - v |= 0x2000; - else /* SPEED_10 */ - v |= 0xff00; - vsc_write(mac->adapter, REG_DBG(port), v); - - vsc_write(mac->adapter, REG_TX_IFG(port), - speed == SPEED_1000 ? 5 : 0x11); - if (duplex == DUPLEX_HALF) - enable = 0x0; /* 100 or 10 */ - else if (speed == SPEED_1000) - enable = 0xc; - else /* SPEED_100 or 10 */ - enable = 0x4; - enable |= 0x9 << 10; /* IFG1 */ - enable |= 0x6 << 6; /* IFG2 */ - enable |= 0x1 << 4; /* VLAN */ - enable |= 0x3; /* RX/TX EN */ - vsc_write(mac->adapter, REG_MODE_CFG(port), enable); - - } - - vsc_read(mac->adapter, REG_PAUSE_CFG(port), &v); - v &= 0xfff0ffff; - v |= 0x20000; /* xon/xoff */ - if (fc & PAUSE_RX) - v |= 0x40000; - if (fc & PAUSE_TX) - v |= 0x80000; - if (fc == (PAUSE_RX | PAUSE_TX)) - v |= 0x10000; - vsc_write(mac->adapter, REG_PAUSE_CFG(port), v); - return 0; -} - -static int mac_enable(struct cmac *mac, int which) -{ - u32 val; - int port = mac->instance->index; - - /* Write the correct WM value when the port is enabled. */ - vsc_write(mac->adapter, REG_HIGH_LOW_WM(1,port), WM_ENABLE); - - vsc_read(mac->adapter, REG_MODE_CFG(port), &val); - if (which & MAC_DIRECTION_RX) - val |= 0x2; - if (which & MAC_DIRECTION_TX) - val |= 1; - vsc_write(mac->adapter, REG_MODE_CFG(port), val); - return 0; -} - -static int mac_disable(struct cmac *mac, int which) -{ - u32 val; - int i, port = mac->instance->index; - - /* Reset the port, this also writes the correct WM value */ - mac_reset(mac); - - vsc_read(mac->adapter, REG_MODE_CFG(port), &val); - if (which & MAC_DIRECTION_RX) - val &= ~0x2; - if (which & MAC_DIRECTION_TX) - val &= ~0x1; - vsc_write(mac->adapter, REG_MODE_CFG(port), val); - vsc_read(mac->adapter, REG_MODE_CFG(port), &val); - - /* Clear stats */ - for (i = 0; i <= 0x3a; ++i) - vsc_write(mac->adapter, CRA(4, port, i), 0); - - /* Clear software counters */ - memset(&mac->stats, 0, sizeof(struct cmac_statistics)); - - return 0; -} - -static void rmon_update(struct cmac *mac, unsigned int addr, u64 *stat) -{ - u32 v, lo; - - vsc_read(mac->adapter, addr, &v); - lo = *stat; - *stat = *stat - lo + v; - - if (v == 0) - return; - - if (v < lo) - *stat += (1ULL << 32); -} - -static void port_stats_update(struct cmac *mac) -{ - struct { - unsigned int reg; - unsigned int offset; - } hw_stats[] = { - -#define HW_STAT(reg, stat_name) \ - { reg, (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL } - - /* Rx stats */ - HW_STAT(RxUnicast, RxUnicastFramesOK), - HW_STAT(RxMulticast, RxMulticastFramesOK), - HW_STAT(RxBroadcast, RxBroadcastFramesOK), - HW_STAT(Crc, RxFCSErrors), - HW_STAT(RxAlignment, RxAlignErrors), - HW_STAT(RxOversize, RxFrameTooLongErrors), - HW_STAT(RxPause, RxPauseFrames), - HW_STAT(RxJabbers, RxJabberErrors), - HW_STAT(RxFragments, RxRuntErrors), - HW_STAT(RxUndersize, RxRuntErrors), - HW_STAT(RxSymbolCarrier, RxSymbolErrors), - HW_STAT(RxSize1519ToMax, RxJumboFramesOK), - - /* Tx stats (skip collision stats as we are full-duplex only) */ - HW_STAT(TxUnicast, TxUnicastFramesOK), - HW_STAT(TxMulticast, TxMulticastFramesOK), - HW_STAT(TxBroadcast, TxBroadcastFramesOK), - HW_STAT(TxPause, TxPauseFrames), - HW_STAT(TxUnderrun, TxUnderrun), - HW_STAT(TxSize1519ToMax, TxJumboFramesOK), - }, *p = hw_stats; - unsigned int port = mac->instance->index; - u64 *stats = (u64 *)&mac->stats; - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(hw_stats); i++) - rmon_update(mac, CRA(0x4, port, p->reg), stats + p->offset); - - rmon_update(mac, REG_TX_OK_BYTES(port), &mac->stats.TxOctetsOK); - rmon_update(mac, REG_RX_OK_BYTES(port), &mac->stats.RxOctetsOK); - rmon_update(mac, REG_RX_BAD_BYTES(port), &mac->stats.RxOctetsBad); -} - -/* - * This function is called periodically to accumulate the current values of the - * RMON counters into the port statistics. Since the counters are only 32 bits - * some of them can overflow in less than a minute at GigE speeds, so this - * function should be called every 30 seconds or so. - * - * To cut down on reading costs we update only the octet counters at each tick - * and do a full update at major ticks, which can be every 30 minutes or more. - */ -static const struct cmac_statistics *mac_update_statistics(struct cmac *mac, - int flag) -{ - if (flag == MAC_STATS_UPDATE_FULL || - mac->instance->ticks >= MAJOR_UPDATE_TICKS) { - port_stats_update(mac); - mac->instance->ticks = 0; - } else { - int port = mac->instance->index; - - rmon_update(mac, REG_RX_OK_BYTES(port), - &mac->stats.RxOctetsOK); - rmon_update(mac, REG_RX_BAD_BYTES(port), - &mac->stats.RxOctetsBad); - rmon_update(mac, REG_TX_OK_BYTES(port), - &mac->stats.TxOctetsOK); - mac->instance->ticks++; - } - return &mac->stats; -} - -static void mac_destroy(struct cmac *mac) -{ - kfree(mac); -} - -static struct cmac_ops vsc7326_ops = { - .destroy = mac_destroy, - .reset = mac_reset, - .interrupt_handler = mac_intr_handler, - .interrupt_enable = mac_intr_enable, - .interrupt_disable = mac_intr_disable, - .interrupt_clear = mac_intr_clear, - .enable = mac_enable, - .disable = mac_disable, - .set_mtu = mac_set_mtu, - .set_rx_mode = mac_set_rx_mode, - .set_speed_duplex_fc = mac_set_speed_duplex_fc, - .statistics_update = mac_update_statistics, - .macaddress_get = mac_get_address, - .macaddress_set = mac_set_address, -}; - -static struct cmac *vsc7326_mac_create(adapter_t *adapter, int index) -{ - struct cmac *mac; - u32 val; - int i; - - mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL); - if (!mac) - return NULL; - - mac->ops = &vsc7326_ops; - mac->instance = (cmac_instance *)(mac + 1); - mac->adapter = adapter; - - mac->instance->index = index; - mac->instance->ticks = 0; - - i = 0; - do { - u32 vhi, vlo; - - vhi = vlo = 0; - t1_tpi_read(adapter, (REG_LOCAL_STATUS << 2) + 4, &vlo); - udelay(1); - t1_tpi_read(adapter, REG_LOCAL_STATUS << 2, &vhi); - udelay(5); - val = (vhi << 16) | vlo; - } while ((++i < 10000) && (val == 0xffffffff)); - - return mac; -} - -static int vsc7326_mac_reset(adapter_t *adapter) -{ - vsc7326_full_reset(adapter); - (void) run_bist_all(adapter); - run_table(adapter, vsc7326_reset, ARRAY_SIZE(vsc7326_reset)); - return 0; -} - -const struct gmac t1_vsc7326_ops = { - .stats_update_period = STATS_TICK_SECS, - .create = vsc7326_mac_create, - .reset = vsc7326_mac_reset, -}; diff --git a/drivers/net/chelsio/vsc7326_reg.h b/drivers/net/chelsio/vsc7326_reg.h deleted file mode 100644 index 479edbc..0000000 --- a/drivers/net/chelsio/vsc7326_reg.h +++ /dev/null @@ -1,297 +0,0 @@ -/* $Date: 2006/04/28 19:20:17 $ $RCSfile: vsc7326_reg.h,v $ $Revision: 1.5 $ */ -#ifndef _VSC7321_REG_H_ -#define _VSC7321_REG_H_ - -/* Register definitions for Vitesse VSC7321 (Meigs II) MAC - * - * Straight off the data sheet, VMDS-10038 Rev 2.0 and - * PD0011-01-14-Meigs-II 2002-12-12 - */ - -/* Just 'cause it's in here doesn't mean it's used. */ - -#define CRA(blk,sub,adr) ((((blk) & 0x7) << 13) | (((sub) & 0xf) << 9) | (((adr) & 0xff) << 1)) - -/* System and CPU comm's registers */ -#define REG_CHIP_ID CRA(0x7,0xf,0x00) /* Chip ID */ -#define REG_BLADE_ID CRA(0x7,0xf,0x01) /* Blade ID */ -#define REG_SW_RESET CRA(0x7,0xf,0x02) /* Global Soft Reset */ -#define REG_MEM_BIST CRA(0x7,0xf,0x04) /* mem */ -#define REG_IFACE_MODE CRA(0x7,0xf,0x07) /* Interface mode */ -#define REG_MSCH CRA(0x7,0x2,0x06) /* CRC error count */ -#define REG_CRC_CNT CRA(0x7,0x2,0x0a) /* CRC error count */ -#define REG_CRC_CFG CRA(0x7,0x2,0x0b) /* CRC config */ -#define REG_SI_TRANSFER_SEL CRA(0x7,0xf,0x18) /* SI Transfer Select */ -#define REG_PLL_CLK_SPEED CRA(0x7,0xf,0x19) /* Clock Speed Selection */ -#define REG_SYS_CLK_SELECT CRA(0x7,0xf,0x1c) /* System Clock Select */ -#define REG_GPIO_CTRL CRA(0x7,0xf,0x1d) /* GPIO Control */ -#define REG_GPIO_OUT CRA(0x7,0xf,0x1e) /* GPIO Out */ -#define REG_GPIO_IN CRA(0x7,0xf,0x1f) /* GPIO In */ -#define REG_CPU_TRANSFER_SEL CRA(0x7,0xf,0x20) /* CPU Transfer Select */ -#define REG_LOCAL_DATA CRA(0x7,0xf,0xfe) /* Local CPU Data Register */ -#define REG_LOCAL_STATUS CRA(0x7,0xf,0xff) /* Local CPU Status Register */ - -/* Aggregator registers */ -#define REG_AGGR_SETUP CRA(0x7,0x1,0x00) /* Aggregator Setup */ -#define REG_PMAP_TABLE CRA(0x7,0x1,0x01) /* Port map table */ -#define REG_MPLS_BIT0 CRA(0x7,0x1,0x08) /* MPLS bit0 position */ -#define REG_MPLS_BIT1 CRA(0x7,0x1,0x09) /* MPLS bit1 position */ -#define REG_MPLS_BIT2 CRA(0x7,0x1,0x0a) /* MPLS bit2 position */ -#define REG_MPLS_BIT3 CRA(0x7,0x1,0x0b) /* MPLS bit3 position */ -#define REG_MPLS_BITMASK CRA(0x7,0x1,0x0c) /* MPLS bit mask */ -#define REG_PRE_BIT0POS CRA(0x7,0x1,0x10) /* Preamble bit0 position */ -#define REG_PRE_BIT1POS CRA(0x7,0x1,0x11) /* Preamble bit1 position */ -#define REG_PRE_BIT2POS CRA(0x7,0x1,0x12) /* Preamble bit2 position */ -#define REG_PRE_BIT3POS CRA(0x7,0x1,0x13) /* Preamble bit3 position */ -#define REG_PRE_ERR_CNT CRA(0x7,0x1,0x14) /* Preamble parity error count */ - -/* BIST registers */ -/*#define REG_RAM_BIST_CMD CRA(0x7,0x2,0x00)*/ /* RAM BIST Command Register */ -/*#define REG_RAM_BIST_RESULT CRA(0x7,0x2,0x01)*/ /* RAM BIST Read Status/Result */ -#define REG_RAM_BIST_CMD CRA(0x7,0x1,0x00) /* RAM BIST Command Register */ -#define REG_RAM_BIST_RESULT CRA(0x7,0x1,0x01) /* RAM BIST Read Status/Result */ -#define BIST_PORT_SELECT 0x00 /* BIST port select */ -#define BIST_COMMAND 0x01 /* BIST enable/disable */ -#define BIST_STATUS 0x02 /* BIST operation status */ -#define BIST_ERR_CNT_LSB 0x03 /* BIST error count lo 8b */ -#define BIST_ERR_CNT_MSB 0x04 /* BIST error count hi 8b */ -#define BIST_ERR_SEL_LSB 0x05 /* BIST error select lo 8b */ -#define BIST_ERR_SEL_MSB 0x06 /* BIST error select hi 8b */ -#define BIST_ERROR_STATE 0x07 /* BIST engine internal state */ -#define BIST_ERR_ADR0 0x08 /* BIST error address lo 8b */ -#define BIST_ERR_ADR1 0x09 /* BIST error address lomid 8b */ -#define BIST_ERR_ADR2 0x0a /* BIST error address himid 8b */ -#define BIST_ERR_ADR3 0x0b /* BIST error address hi 8b */ - -/* FIFO registers - * ie = 0 for ingress, 1 for egress - * fn = FIFO number, 0-9 - */ -#define REG_TEST(ie,fn) CRA(0x2,ie&1,0x00+fn) /* Mode & Test Register */ -#define REG_TOP_BOTTOM(ie,fn) CRA(0x2,ie&1,0x10+fn) /* FIFO Buffer Top & Bottom */ -#define REG_TAIL(ie,fn) CRA(0x2,ie&1,0x20+fn) /* FIFO Write Pointer */ -#define REG_HEAD(ie,fn) CRA(0x2,ie&1,0x30+fn) /* FIFO Read Pointer */ -#define REG_HIGH_LOW_WM(ie,fn) CRA(0x2,ie&1,0x40+fn) /* Flow Control Water Marks */ -#define REG_CT_THRHLD(ie,fn) CRA(0x2,ie&1,0x50+fn) /* Cut Through Threshold */ -#define REG_FIFO_DROP_CNT(ie,fn) CRA(0x2,ie&1,0x60+fn) /* Drop & CRC Error Counter */ -#define REG_DEBUG_BUF_CNT(ie,fn) CRA(0x2,ie&1,0x70+fn) /* Input Side Debug Counter */ -#define REG_BUCKI(fn) CRA(0x2,2,0x20+fn) /* Input Side Debug Counter */ -#define REG_BUCKE(fn) CRA(0x2,3,0x20+fn) /* Input Side Debug Counter */ - -/* Traffic shaper buckets - * ie = 0 for ingress, 1 for egress - * bn = bucket number 0-10 (yes, 11 buckets) - */ -/* OK, this one's kinda ugly. Some hardware designers are perverse. */ -#define REG_TRAFFIC_SHAPER_BUCKET(ie,bn) CRA(0x2,ie&1,0x0a + (bn>7) | ((bn&7)<<4)) -#define REG_TRAFFIC_SHAPER_CONTROL(ie) CRA(0x2,ie&1,0x3b) - -#define REG_SRAM_ADR(ie) CRA(0x2,ie&1,0x0e) /* FIFO SRAM address */ -#define REG_SRAM_WR_STRB(ie) CRA(0x2,ie&1,0x1e) /* FIFO SRAM write strobe */ -#define REG_SRAM_RD_STRB(ie) CRA(0x2,ie&1,0x2e) /* FIFO SRAM read strobe */ -#define REG_SRAM_DATA_0(ie) CRA(0x2,ie&1,0x3e) /* FIFO SRAM data lo 8b */ -#define REG_SRAM_DATA_1(ie) CRA(0x2,ie&1,0x4e) /* FIFO SRAM data lomid 8b */ -#define REG_SRAM_DATA_2(ie) CRA(0x2,ie&1,0x5e) /* FIFO SRAM data himid 8b */ -#define REG_SRAM_DATA_3(ie) CRA(0x2,ie&1,0x6e) /* FIFO SRAM data hi 8b */ -#define REG_SRAM_DATA_BLK_TYPE(ie) CRA(0x2,ie&1,0x7e) /* FIFO SRAM tag */ -/* REG_ING_CONTROL equals REG_CONTROL with ie = 0, likewise REG_EGR_CONTROL is ie = 1 */ -#define REG_CONTROL(ie) CRA(0x2,ie&1,0x0f) /* FIFO control */ -#define REG_ING_CONTROL CRA(0x2,0x0,0x0f) /* Ingress control (alias) */ -#define REG_EGR_CONTROL CRA(0x2,0x1,0x0f) /* Egress control (alias) */ -#define REG_AGE_TIMER(ie) CRA(0x2,ie&1,0x1f) /* Aging timer */ -#define REG_AGE_INC(ie) CRA(0x2,ie&1,0x2f) /* Aging increment */ -#define DEBUG_OUT(ie) CRA(0x2,ie&1,0x3f) /* Output debug counter control */ -#define DEBUG_CNT(ie) CRA(0x2,ie&1,0x4f) /* Output debug counter */ - -/* SPI4 interface */ -#define REG_SPI4_MISC CRA(0x5,0x0,0x00) /* Misc Register */ -#define REG_SPI4_STATUS CRA(0x5,0x0,0x01) /* CML Status */ -#define REG_SPI4_ING_SETUP0 CRA(0x5,0x0,0x02) /* Ingress Status Channel Setup */ -#define REG_SPI4_ING_SETUP1 CRA(0x5,0x0,0x03) /* Ingress Data Training Setup */ -#define REG_SPI4_ING_SETUP2 CRA(0x5,0x0,0x04) /* Ingress Data Burst Size Setup */ -#define REG_SPI4_EGR_SETUP0 CRA(0x5,0x0,0x05) /* Egress Status Channel Setup */ -#define REG_SPI4_DBG_CNT(n) CRA(0x5,0x0,0x10+n) /* Debug counters 0-9 */ -#define REG_SPI4_DBG_SETUP CRA(0x5,0x0,0x1A) /* Debug counters setup */ -#define REG_SPI4_TEST CRA(0x5,0x0,0x20) /* Test Setup Register */ -#define REG_TPGEN_UP0 CRA(0x5,0x0,0x21) /* Test Pattern generator user pattern 0 */ -#define REG_TPGEN_UP1 CRA(0x5,0x0,0x22) /* Test Pattern generator user pattern 1 */ -#define REG_TPCHK_UP0 CRA(0x5,0x0,0x23) /* Test Pattern checker user pattern 0 */ -#define REG_TPCHK_UP1 CRA(0x5,0x0,0x24) /* Test Pattern checker user pattern 1 */ -#define REG_TPSAM_P0 CRA(0x5,0x0,0x25) /* Sampled pattern 0 */ -#define REG_TPSAM_P1 CRA(0x5,0x0,0x26) /* Sampled pattern 1 */ -#define REG_TPERR_CNT CRA(0x5,0x0,0x27) /* Pattern checker error counter */ -#define REG_SPI4_STICKY CRA(0x5,0x0,0x30) /* Sticky bits register */ -#define REG_SPI4_DBG_INH CRA(0x5,0x0,0x31) /* Core egress & ingress inhibit */ -#define REG_SPI4_DBG_STATUS CRA(0x5,0x0,0x32) /* Sampled ingress status */ -#define REG_SPI4_DBG_GRANT CRA(0x5,0x0,0x33) /* Ingress cranted credit value */ - -#define REG_SPI4_DESKEW CRA(0x5,0x0,0x43) /* Ingress cranted credit value */ - -/* 10GbE MAC Block Registers */ -/* Note that those registers that are exactly the same for 10GbE as for - * tri-speed are only defined with the version that needs a port number. - * Pass 0xa in those cases. - * - * Also note that despite the presence of a MAC address register, this part - * does no ingress MAC address filtering. That register is used only for - * pause frame detection and generation. - */ -/* 10GbE specific, and different from tri-speed */ -#define REG_MISC_10G CRA(0x1,0xa,0x00) /* Misc 10GbE setup */ -#define REG_PAUSE_10G CRA(0x1,0xa,0x01) /* Pause register */ -#define REG_NORMALIZER_10G CRA(0x1,0xa,0x05) /* 10G normalizer */ -#define REG_STICKY_RX CRA(0x1,0xa,0x06) /* RX debug register */ -#define REG_DENORM_10G CRA(0x1,0xa,0x07) /* Denormalizer */ -#define REG_STICKY_TX CRA(0x1,0xa,0x08) /* TX sticky bits */ -#define REG_MAX_RXHIGH CRA(0x1,0xa,0x0a) /* XGMII lane 0-3 debug */ -#define REG_MAX_RXLOW CRA(0x1,0xa,0x0b) /* XGMII lane 4-7 debug */ -#define REG_MAC_TX_STICKY CRA(0x1,0xa,0x0c) /* MAC Tx state sticky debug */ -#define REG_MAC_TX_RUNNING CRA(0x1,0xa,0x0d) /* MAC Tx state running debug */ -#define REG_TX_ABORT_AGE CRA(0x1,0xa,0x14) /* Aged Tx frames discarded */ -#define REG_TX_ABORT_SHORT CRA(0x1,0xa,0x15) /* Short Tx frames discarded */ -#define REG_TX_ABORT_TAXI CRA(0x1,0xa,0x16) /* Taxi error frames discarded */ -#define REG_TX_ABORT_UNDERRUN CRA(0x1,0xa,0x17) /* Tx Underrun abort counter */ -#define REG_TX_DENORM_DISCARD CRA(0x1,0xa,0x18) /* Tx denormalizer discards */ -#define REG_XAUI_STAT_A CRA(0x1,0xa,0x20) /* XAUI status A */ -#define REG_XAUI_STAT_B CRA(0x1,0xa,0x21) /* XAUI status B */ -#define REG_XAUI_STAT_C CRA(0x1,0xa,0x22) /* XAUI status C */ -#define REG_XAUI_CONF_A CRA(0x1,0xa,0x23) /* XAUI configuration A */ -#define REG_XAUI_CONF_B CRA(0x1,0xa,0x24) /* XAUI configuration B */ -#define REG_XAUI_CODE_GRP_CNT CRA(0x1,0xa,0x25) /* XAUI code group error count */ -#define REG_XAUI_CONF_TEST_A CRA(0x1,0xa,0x26) /* XAUI test register A */ -#define REG_PDERRCNT CRA(0x1,0xa,0x27) /* XAUI test register B */ - -/* pn = port number 0-9 for tri-speed, 10 for 10GbE */ -/* Both tri-speed and 10GbE */ -#define REG_MAX_LEN(pn) CRA(0x1,pn,0x02) /* Max length */ -#define REG_MAC_HIGH_ADDR(pn) CRA(0x1,pn,0x03) /* Upper 24 bits of MAC addr */ -#define REG_MAC_LOW_ADDR(pn) CRA(0x1,pn,0x04) /* Lower 24 bits of MAC addr */ - -/* tri-speed only - * pn = port number, 0-9 - */ -#define REG_MODE_CFG(pn) CRA(0x1,pn,0x00) /* Mode configuration */ -#define REG_PAUSE_CFG(pn) CRA(0x1,pn,0x01) /* Pause configuration */ -#define REG_NORMALIZER(pn) CRA(0x1,pn,0x05) /* Normalizer */ -#define REG_TBI_STATUS(pn) CRA(0x1,pn,0x06) /* TBI status */ -#define REG_PCS_STATUS_DBG(pn) CRA(0x1,pn,0x07) /* PCS status debug */ -#define REG_PCS_CTRL(pn) CRA(0x1,pn,0x08) /* PCS control */ -#define REG_TBI_CONFIG(pn) CRA(0x1,pn,0x09) /* TBI configuration */ -#define REG_STICK_BIT(pn) CRA(0x1,pn,0x0a) /* Sticky bits */ -#define REG_DEV_SETUP(pn) CRA(0x1,pn,0x0b) /* MAC clock/reset setup */ -#define REG_DROP_CNT(pn) CRA(0x1,pn,0x0c) /* Drop counter */ -#define REG_PORT_POS(pn) CRA(0x1,pn,0x0d) /* Preamble port position */ -#define REG_PORT_FAIL(pn) CRA(0x1,pn,0x0e) /* Preamble port position */ -#define REG_SERDES_CONF(pn) CRA(0x1,pn,0x0f) /* SerDes configuration */ -#define REG_SERDES_TEST(pn) CRA(0x1,pn,0x10) /* SerDes test */ -#define REG_SERDES_STAT(pn) CRA(0x1,pn,0x11) /* SerDes status */ -#define REG_SERDES_COM_CNT(pn) CRA(0x1,pn,0x12) /* SerDes comma counter */ -#define REG_DENORM(pn) CRA(0x1,pn,0x15) /* Frame denormalization */ -#define REG_DBG(pn) CRA(0x1,pn,0x16) /* Device 1G debug */ -#define REG_TX_IFG(pn) CRA(0x1,pn,0x18) /* Tx IFG config */ -#define REG_HDX(pn) CRA(0x1,pn,0x19) /* Half-duplex config */ - -/* Statistics */ -/* CRA(0x4,pn,reg) */ -/* reg below */ -/* pn = port number, 0-a, a = 10GbE */ - -enum { - RxInBytes = 0x00, // # Rx in octets - RxSymbolCarrier = 0x01, // Frames w/ symbol errors - RxPause = 0x02, // # pause frames received - RxUnsupOpcode = 0x03, // # control frames with unsupported opcode - RxOkBytes = 0x04, // # octets in good frames - RxBadBytes = 0x05, // # octets in bad frames - RxUnicast = 0x06, // # good unicast frames - RxMulticast = 0x07, // # good multicast frames - RxBroadcast = 0x08, // # good broadcast frames - Crc = 0x09, // # frames w/ bad CRC only - RxAlignment = 0x0a, // # frames w/ alignment err - RxUndersize = 0x0b, // # frames undersize - RxFragments = 0x0c, // # frames undersize w/ crc err - RxInRangeLengthError = 0x0d, // # frames with length error - RxOutOfRangeError = 0x0e, // # frames with illegal length field - RxOversize = 0x0f, // # frames oversize - RxJabbers = 0x10, // # frames oversize w/ crc err - RxSize64 = 0x11, // # frames 64 octets long - RxSize65To127 = 0x12, // # frames 65-127 octets - RxSize128To255 = 0x13, // # frames 128-255 - RxSize256To511 = 0x14, // # frames 256-511 - RxSize512To1023 = 0x15, // # frames 512-1023 - RxSize1024To1518 = 0x16, // # frames 1024-1518 - RxSize1519ToMax = 0x17, // # frames 1519-max - - TxOutBytes = 0x18, // # octets tx - TxPause = 0x19, // # pause frames sent - TxOkBytes = 0x1a, // # octets tx OK - TxUnicast = 0x1b, // # frames unicast - TxMulticast = 0x1c, // # frames multicast - TxBroadcast = 0x1d, // # frames broadcast - TxMultipleColl = 0x1e, // # frames tx after multiple collisions - TxLateColl = 0x1f, // # late collisions detected - TxXcoll = 0x20, // # frames lost, excessive collisions - TxDefer = 0x21, // # frames deferred on first tx attempt - TxXdefer = 0x22, // # frames excessively deferred - TxCsense = 0x23, // carrier sense errors at frame end - TxSize64 = 0x24, // # frames 64 octets long - TxSize65To127 = 0x25, // # frames 65-127 octets - TxSize128To255 = 0x26, // # frames 128-255 - TxSize256To511 = 0x27, // # frames 256-511 - TxSize512To1023 = 0x28, // # frames 512-1023 - TxSize1024To1518 = 0x29, // # frames 1024-1518 - TxSize1519ToMax = 0x2a, // # frames 1519-max - TxSingleColl = 0x2b, // # frames tx after single collision - TxBackoff2 = 0x2c, // # frames tx ok after 2 backoffs/collisions - TxBackoff3 = 0x2d, // after 3 backoffs/collisions - TxBackoff4 = 0x2e, // after 4 - TxBackoff5 = 0x2f, // after 5 - TxBackoff6 = 0x30, // after 6 - TxBackoff7 = 0x31, // after 7 - TxBackoff8 = 0x32, // after 8 - TxBackoff9 = 0x33, // after 9 - TxBackoff10 = 0x34, // after 10 - TxBackoff11 = 0x35, // after 11 - TxBackoff12 = 0x36, // after 12 - TxBackoff13 = 0x37, // after 13 - TxBackoff14 = 0x38, // after 14 - TxBackoff15 = 0x39, // after 15 - TxUnderrun = 0x3a, // # frames dropped from underrun - // Hole. See REG_RX_XGMII_PROT_ERR below. - RxIpgShrink = 0x3c, // # of IPG shrinks detected - // Duplicate. See REG_STAT_STICKY10G below. - StatSticky1G = 0x3e, // tri-speed sticky bits - StatInit = 0x3f // Clear all statistics -}; - -#define REG_RX_XGMII_PROT_ERR CRA(0x4,0xa,0x3b) /* # protocol errors detected on XGMII interface */ -#define REG_STAT_STICKY10G CRA(0x4,0xa,StatSticky1G) /* 10GbE sticky bits */ - -#define REG_RX_OK_BYTES(pn) CRA(0x4,pn,RxOkBytes) -#define REG_RX_BAD_BYTES(pn) CRA(0x4,pn,RxBadBytes) -#define REG_TX_OK_BYTES(pn) CRA(0x4,pn,TxOkBytes) - -/* MII-Management Block registers */ -/* These are for MII-M interface 0, which is the bidirectional LVTTL one. If - * we hooked up to the one with separate directions, the middle 0x0 needs to - * change to 0x1. And the current errata states that MII-M 1 doesn't work. - */ - -#define REG_MIIM_STATUS CRA(0x3,0x0,0x00) /* MII-M Status */ -#define REG_MIIM_CMD CRA(0x3,0x0,0x01) /* MII-M Command */ -#define REG_MIIM_DATA CRA(0x3,0x0,0x02) /* MII-M Data */ -#define REG_MIIM_PRESCALE CRA(0x3,0x0,0x03) /* MII-M MDC Prescale */ - -#define REG_ING_FFILT_UM_EN CRA(0x2, 0, 0xd) -#define REG_ING_FFILT_BE_EN CRA(0x2, 0, 0x1d) -#define REG_ING_FFILT_VAL0 CRA(0x2, 0, 0x2d) -#define REG_ING_FFILT_VAL1 CRA(0x2, 0, 0x3d) -#define REG_ING_FFILT_MASK0 CRA(0x2, 0, 0x4d) -#define REG_ING_FFILT_MASK1 CRA(0x2, 0, 0x5d) -#define REG_ING_FFILT_MASK2 CRA(0x2, 0, 0x6d) -#define REG_ING_FFILT_ETYPE CRA(0x2, 0, 0x7d) - - -/* Whew. */ - -#endif diff --git a/drivers/net/cxgb3/Makefile b/drivers/net/cxgb3/Makefile deleted file mode 100644 index 29aff78..0000000 --- a/drivers/net/cxgb3/Makefile +++ /dev/null @@ -1,8 +0,0 @@ -# -# Chelsio T3 driver -# - -obj-$(CONFIG_CHELSIO_T3) += cxgb3.o - -cxgb3-objs := cxgb3_main.o ael1002.o vsc8211.o t3_hw.o mc5.o \ - xgmac.o sge.o l2t.o cxgb3_offload.o aq100x.o diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h deleted file mode 100644 index 8b395b5..0000000 --- a/drivers/net/cxgb3/adapter.h +++ /dev/null @@ -1,334 +0,0 @@ -/* - * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -/* This file should not be included directly. Include common.h instead. */ - -#ifndef __T3_ADAPTER_H__ -#define __T3_ADAPTER_H__ - -#include -#include -#include -#include -#include -#include -#include -#include "t3cdev.h" -#include - -struct adapter; -struct sge_qset; -struct port_info; - -enum mac_idx_types { - LAN_MAC_IDX = 0, - SAN_MAC_IDX, - - MAX_MAC_IDX -}; - -struct iscsi_config { - __u8 mac_addr[ETH_ALEN]; - __u32 flags; - int (*send)(struct port_info *pi, struct sk_buff **skb); - int (*recv)(struct port_info *pi, struct sk_buff *skb); -}; - -struct port_info { - struct adapter *adapter; - struct sge_qset *qs; - u8 port_id; - u8 nqsets; - u8 first_qset; - struct cphy phy; - struct cmac mac; - struct link_config link_config; - struct net_device_stats netstats; - int activity; - __be32 iscsi_ipv4addr; - struct iscsi_config iscsic; - - int link_fault; /* link fault was detected */ -}; - -enum { /* adapter flags */ - FULL_INIT_DONE = (1 << 0), - USING_MSI = (1 << 1), - USING_MSIX = (1 << 2), - QUEUES_BOUND = (1 << 3), - TP_PARITY_INIT = (1 << 4), - NAPI_INIT = (1 << 5), -}; - -struct fl_pg_chunk { - struct page *page; - void *va; - unsigned int offset; - unsigned long *p_cnt; - dma_addr_t mapping; -}; - -struct rx_desc; -struct rx_sw_desc; - -struct sge_fl { /* SGE per free-buffer list state */ - unsigned int buf_size; /* size of each Rx buffer */ - unsigned int credits; /* # of available Rx buffers */ - unsigned int pend_cred; /* new buffers since last FL DB ring */ - unsigned int size; /* capacity of free list */ - unsigned int cidx; /* consumer index */ - unsigned int pidx; /* producer index */ - unsigned int gen; /* free list generation */ - struct fl_pg_chunk pg_chunk;/* page chunk cache */ - unsigned int use_pages; /* whether FL uses pages or sk_buffs */ - unsigned int order; /* order of page allocations */ - unsigned int alloc_size; /* size of allocated buffer */ - struct rx_desc *desc; /* address of HW Rx descriptor ring */ - struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ - dma_addr_t phys_addr; /* physical address of HW ring start */ - unsigned int cntxt_id; /* SGE context id for the free list */ - unsigned long empty; /* # of times queue ran out of buffers */ - unsigned long alloc_failed; /* # of times buffer allocation failed */ -}; - -/* - * Bundle size for grouping offload RX packets for delivery to the stack. - * Don't make this too big as we do prefetch on each packet in a bundle. - */ -# define RX_BUNDLE_SIZE 8 - -struct rsp_desc; - -struct sge_rspq { /* state for an SGE response queue */ - unsigned int credits; /* # of pending response credits */ - unsigned int size; /* capacity of response queue */ - unsigned int cidx; /* consumer index */ - unsigned int gen; /* current generation bit */ - unsigned int polling; /* is the queue serviced through NAPI? */ - unsigned int holdoff_tmr; /* interrupt holdoff timer in 100ns */ - unsigned int next_holdoff; /* holdoff time for next interrupt */ - unsigned int rx_recycle_buf; /* whether recycling occurred - within current sop-eop */ - struct rsp_desc *desc; /* address of HW response ring */ - dma_addr_t phys_addr; /* physical address of the ring */ - unsigned int cntxt_id; /* SGE context id for the response q */ - spinlock_t lock; /* guards response processing */ - struct sk_buff_head rx_queue; /* offload packet receive queue */ - struct sk_buff *pg_skb; /* used to build frag list in napi handler */ - - unsigned long offload_pkts; - unsigned long offload_bundles; - unsigned long eth_pkts; /* # of ethernet packets */ - unsigned long pure_rsps; /* # of pure (non-data) responses */ - unsigned long imm_data; /* responses with immediate data */ - unsigned long rx_drops; /* # of packets dropped due to no mem */ - unsigned long async_notif; /* # of asynchronous notification events */ - unsigned long empty; /* # of times queue ran out of credits */ - unsigned long nomem; /* # of responses deferred due to no mem */ - unsigned long unhandled_irqs; /* # of spurious intrs */ - unsigned long starved; - unsigned long restarted; -}; - -struct tx_desc; -struct tx_sw_desc; - -struct sge_txq { /* state for an SGE Tx queue */ - unsigned long flags; /* HW DMA fetch status */ - unsigned int in_use; /* # of in-use Tx descriptors */ - unsigned int size; /* # of descriptors */ - unsigned int processed; /* total # of descs HW has processed */ - unsigned int cleaned; /* total # of descs SW has reclaimed */ - unsigned int stop_thres; /* SW TX queue suspend threshold */ - unsigned int cidx; /* consumer index */ - unsigned int pidx; /* producer index */ - unsigned int gen; /* current value of generation bit */ - unsigned int unacked; /* Tx descriptors used since last COMPL */ - struct tx_desc *desc; /* address of HW Tx descriptor ring */ - struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */ - spinlock_t lock; /* guards enqueueing of new packets */ - unsigned int token; /* WR token */ - dma_addr_t phys_addr; /* physical address of the ring */ - struct sk_buff_head sendq; /* List of backpressured offload packets */ - struct tasklet_struct qresume_tsk; /* restarts the queue */ - unsigned int cntxt_id; /* SGE context id for the Tx q */ - unsigned long stops; /* # of times q has been stopped */ - unsigned long restarts; /* # of queue restarts */ -}; - -enum { /* per port SGE statistics */ - SGE_PSTAT_TSO, /* # of TSO requests */ - SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */ - SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */ - SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */ - SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */ - - SGE_PSTAT_MAX /* must be last */ -}; - -struct napi_gro_fraginfo; - -struct sge_qset { /* an SGE queue set */ - struct adapter *adap; - struct napi_struct napi; - struct sge_rspq rspq; - struct sge_fl fl[SGE_RXQ_PER_SET]; - struct sge_txq txq[SGE_TXQ_PER_SET]; - int nomem; - void *lro_va; - struct net_device *netdev; - struct netdev_queue *tx_q; /* associated netdev TX queue */ - unsigned long txq_stopped; /* which Tx queues are stopped */ - struct timer_list tx_reclaim_timer; /* reclaims TX buffers */ - struct timer_list rx_reclaim_timer; /* reclaims RX buffers */ - unsigned long port_stats[SGE_PSTAT_MAX]; -} ____cacheline_aligned; - -struct sge { - struct sge_qset qs[SGE_QSETS]; - spinlock_t reg_lock; /* guards non-atomic SGE registers (eg context) */ -}; - -struct adapter { - struct t3cdev tdev; - struct list_head adapter_list; - void __iomem *regs; - struct pci_dev *pdev; - unsigned long registered_device_map; - unsigned long open_device_map; - unsigned long flags; - - const char *name; - int msg_enable; - unsigned int mmio_len; - - struct adapter_params params; - unsigned int slow_intr_mask; - unsigned long irq_stats[IRQ_NUM_STATS]; - - int msix_nvectors; - struct { - unsigned short vec; - char desc[22]; - } msix_info[SGE_QSETS + 1]; - - /* T3 modules */ - struct sge sge; - struct mc7 pmrx; - struct mc7 pmtx; - struct mc7 cm; - struct mc5 mc5; - - struct net_device *port[MAX_NPORTS]; - unsigned int check_task_cnt; - struct delayed_work adap_check_task; - struct work_struct ext_intr_handler_task; - struct work_struct fatal_error_handler_task; - struct work_struct link_fault_handler_task; - - struct work_struct db_full_task; - struct work_struct db_empty_task; - struct work_struct db_drop_task; - - struct dentry *debugfs_root; - - struct mutex mdio_lock; - spinlock_t stats_lock; - spinlock_t work_lock; - - struct sk_buff *nofail_skb; -}; - -static inline u32 t3_read_reg(struct adapter *adapter, u32 reg_addr) -{ - u32 val = readl(adapter->regs + reg_addr); - - CH_DBG(adapter, MMIO, "read register 0x%x value 0x%x\n", reg_addr, val); - return val; -} - -static inline void t3_write_reg(struct adapter *adapter, u32 reg_addr, u32 val) -{ - CH_DBG(adapter, MMIO, "setting register 0x%x to 0x%x\n", reg_addr, val); - writel(val, adapter->regs + reg_addr); -} - -static inline struct port_info *adap2pinfo(struct adapter *adap, int idx) -{ - return netdev_priv(adap->port[idx]); -} - -static inline int phy2portid(struct cphy *phy) -{ - struct adapter *adap = phy->adapter; - struct port_info *port0 = adap2pinfo(adap, 0); - - return &port0->phy == phy ? 0 : 1; -} - -#define OFFLOAD_DEVMAP_BIT 15 - -#define tdev2adap(d) container_of(d, struct adapter, tdev) - -static inline int offload_running(struct adapter *adapter) -{ - return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map); -} - -int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb); - -void t3_os_ext_intr_handler(struct adapter *adapter); -void t3_os_link_changed(struct adapter *adapter, int port_id, int link_status, - int speed, int duplex, int fc); -void t3_os_phymod_changed(struct adapter *adap, int port_id); -void t3_os_link_fault(struct adapter *adapter, int port_id, int state); -void t3_os_link_fault_handler(struct adapter *adapter, int port_id); - -void t3_sge_start(struct adapter *adap); -void t3_sge_stop(struct adapter *adap); -void t3_start_sge_timers(struct adapter *adap); -void t3_stop_sge_timers(struct adapter *adap); -void t3_free_sge_resources(struct adapter *adap); -void t3_sge_err_intr_handler(struct adapter *adapter); -irq_handler_t t3_intr_handler(struct adapter *adap, int polling); -netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev); -int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb); -void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p); -int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, - int irq_vec_idx, const struct qset_params *p, - int ntxq, struct net_device *dev, - struct netdev_queue *netdevq); -extern struct workqueue_struct *cxgb3_wq; - -int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size); - -#endif /* __T3_ADAPTER_H__ */ diff --git a/drivers/net/cxgb3/ael1002.c b/drivers/net/cxgb3/ael1002.c deleted file mode 100644 index 2028da9..0000000 --- a/drivers/net/cxgb3/ael1002.c +++ /dev/null @@ -1,941 +0,0 @@ -/* - * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "common.h" -#include "regs.h" - -enum { - AEL100X_TX_CONFIG1 = 0xc002, - AEL1002_PWR_DOWN_HI = 0xc011, - AEL1002_PWR_DOWN_LO = 0xc012, - AEL1002_XFI_EQL = 0xc015, - AEL1002_LB_EN = 0xc017, - AEL_OPT_SETTINGS = 0xc017, - AEL_I2C_CTRL = 0xc30a, - AEL_I2C_DATA = 0xc30b, - AEL_I2C_STAT = 0xc30c, - AEL2005_GPIO_CTRL = 0xc214, - AEL2005_GPIO_STAT = 0xc215, - - AEL2020_GPIO_INTR = 0xc103, /* Latch High (LH) */ - AEL2020_GPIO_CTRL = 0xc108, /* Store Clear (SC) */ - AEL2020_GPIO_STAT = 0xc10c, /* Read Only (RO) */ - AEL2020_GPIO_CFG = 0xc110, /* Read Write (RW) */ - - AEL2020_GPIO_SDA = 0, /* IN: i2c serial data */ - AEL2020_GPIO_MODDET = 1, /* IN: Module Detect */ - AEL2020_GPIO_0 = 3, /* IN: unassigned */ - AEL2020_GPIO_1 = 2, /* OUT: unassigned */ - AEL2020_GPIO_LSTAT = AEL2020_GPIO_1, /* wired to link status LED */ -}; - -enum { edc_none, edc_sr, edc_twinax }; - -/* PHY module I2C device address */ -enum { - MODULE_DEV_ADDR = 0xa0, - SFF_DEV_ADDR = 0xa2, -}; - -/* PHY transceiver type */ -enum { - phy_transtype_unknown = 0, - phy_transtype_sfp = 3, - phy_transtype_xfp = 6, -}; - -#define AEL2005_MODDET_IRQ 4 - -struct reg_val { - unsigned short mmd_addr; - unsigned short reg_addr; - unsigned short clear_bits; - unsigned short set_bits; -}; - -static int set_phy_regs(struct cphy *phy, const struct reg_val *rv) -{ - int err; - - for (err = 0; rv->mmd_addr && !err; rv++) { - if (rv->clear_bits == 0xffff) - err = t3_mdio_write(phy, rv->mmd_addr, rv->reg_addr, - rv->set_bits); - else - err = t3_mdio_change_bits(phy, rv->mmd_addr, - rv->reg_addr, rv->clear_bits, - rv->set_bits); - } - return err; -} - -static void ael100x_txon(struct cphy *phy) -{ - int tx_on_gpio = - phy->mdio.prtad == 0 ? F_GPIO7_OUT_VAL : F_GPIO2_OUT_VAL; - - msleep(100); - t3_set_reg_field(phy->adapter, A_T3DBG_GPIO_EN, 0, tx_on_gpio); - msleep(30); -} - -/* - * Read an 8-bit word from a device attached to the PHY's i2c bus. - */ -static int ael_i2c_rd(struct cphy *phy, int dev_addr, int word_addr) -{ - int i, err; - unsigned int stat, data; - - err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL_I2C_CTRL, - (dev_addr << 8) | (1 << 8) | word_addr); - if (err) - return err; - - for (i = 0; i < 200; i++) { - msleep(1); - err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL_I2C_STAT, &stat); - if (err) - return err; - if ((stat & 3) == 1) { - err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL_I2C_DATA, - &data); - if (err) - return err; - return data >> 8; - } - } - CH_WARN(phy->adapter, "PHY %u i2c read of dev.addr %#x.%#x timed out\n", - phy->mdio.prtad, dev_addr, word_addr); - return -ETIMEDOUT; -} - -static int ael1002_power_down(struct cphy *phy, int enable) -{ - int err; - - err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_TXDIS, !!enable); - if (!err) - err = mdio_set_flag(&phy->mdio, phy->mdio.prtad, - MDIO_MMD_PMAPMD, MDIO_CTRL1, - MDIO_CTRL1_LPOWER, enable); - return err; -} - -static int ael1002_reset(struct cphy *phy, int wait) -{ - int err; - - if ((err = ael1002_power_down(phy, 0)) || - (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL100X_TX_CONFIG1, 1)) || - (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL1002_PWR_DOWN_HI, 0)) || - (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL1002_PWR_DOWN_LO, 0)) || - (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL1002_XFI_EQL, 0x18)) || - (err = t3_mdio_change_bits(phy, MDIO_MMD_PMAPMD, AEL1002_LB_EN, - 0, 1 << 5))) - return err; - return 0; -} - -static int ael1002_intr_noop(struct cphy *phy) -{ - return 0; -} - -/* - * Get link status for a 10GBASE-R device. - */ -static int get_link_status_r(struct cphy *phy, int *link_ok, int *speed, - int *duplex, int *fc) -{ - if (link_ok) { - unsigned int stat0, stat1, stat2; - int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, - MDIO_PMA_RXDET, &stat0); - - if (!err) - err = t3_mdio_read(phy, MDIO_MMD_PCS, - MDIO_PCS_10GBRT_STAT1, &stat1); - if (!err) - err = t3_mdio_read(phy, MDIO_MMD_PHYXS, - MDIO_PHYXS_LNSTAT, &stat2); - if (err) - return err; - *link_ok = (stat0 & stat1 & (stat2 >> 12)) & 1; - } - if (speed) - *speed = SPEED_10000; - if (duplex) - *duplex = DUPLEX_FULL; - return 0; -} - -static struct cphy_ops ael1002_ops = { - .reset = ael1002_reset, - .intr_enable = ael1002_intr_noop, - .intr_disable = ael1002_intr_noop, - .intr_clear = ael1002_intr_noop, - .intr_handler = ael1002_intr_noop, - .get_link_status = get_link_status_r, - .power_down = ael1002_power_down, - .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS, -}; - -int t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter, - int phy_addr, const struct mdio_ops *mdio_ops) -{ - cphy_init(phy, adapter, phy_addr, &ael1002_ops, mdio_ops, - SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE, - "10GBASE-R"); - ael100x_txon(phy); - return 0; -} - -static int ael1006_reset(struct cphy *phy, int wait) -{ - return t3_phy_reset(phy, MDIO_MMD_PMAPMD, wait); -} - -static struct cphy_ops ael1006_ops = { - .reset = ael1006_reset, - .intr_enable = t3_phy_lasi_intr_enable, - .intr_disable = t3_phy_lasi_intr_disable, - .intr_clear = t3_phy_lasi_intr_clear, - .intr_handler = t3_phy_lasi_intr_handler, - .get_link_status = get_link_status_r, - .power_down = ael1002_power_down, - .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS, -}; - -int t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter, - int phy_addr, const struct mdio_ops *mdio_ops) -{ - cphy_init(phy, adapter, phy_addr, &ael1006_ops, mdio_ops, - SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE, - "10GBASE-SR"); - ael100x_txon(phy); - return 0; -} - -/* - * Decode our module type. - */ -static int ael2xxx_get_module_type(struct cphy *phy, int delay_ms) -{ - int v; - - if (delay_ms) - msleep(delay_ms); - - /* see SFF-8472 for below */ - v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 3); - if (v < 0) - return v; - - if (v == 0x10) - return phy_modtype_sr; - if (v == 0x20) - return phy_modtype_lr; - if (v == 0x40) - return phy_modtype_lrm; - - v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 6); - if (v < 0) - return v; - if (v != 4) - goto unknown; - - v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 10); - if (v < 0) - return v; - - if (v & 0x80) { - v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 0x12); - if (v < 0) - return v; - return v > 10 ? phy_modtype_twinax_long : phy_modtype_twinax; - } -unknown: - return phy_modtype_unknown; -} - -/* - * Code to support the Aeluros/NetLogic 2005 10Gb PHY. - */ -static int ael2005_setup_sr_edc(struct cphy *phy) -{ - static const struct reg_val regs[] = { - { MDIO_MMD_PMAPMD, 0xc003, 0xffff, 0x181 }, - { MDIO_MMD_PMAPMD, 0xc010, 0xffff, 0x448a }, - { MDIO_MMD_PMAPMD, 0xc04a, 0xffff, 0x5200 }, - { 0, 0, 0, 0 } - }; - - int i, err; - - err = set_phy_regs(phy, regs); - if (err) - return err; - - msleep(50); - - if (phy->priv != edc_sr) - err = t3_get_edc_fw(phy, EDC_OPT_AEL2005, - EDC_OPT_AEL2005_SIZE); - if (err) - return err; - - for (i = 0; i < EDC_OPT_AEL2005_SIZE / sizeof(u16) && !err; i += 2) - err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, - phy->phy_cache[i], - phy->phy_cache[i + 1]); - if (!err) - phy->priv = edc_sr; - return err; -} - -static int ael2005_setup_twinax_edc(struct cphy *phy, int modtype) -{ - static const struct reg_val regs[] = { - { MDIO_MMD_PMAPMD, 0xc04a, 0xffff, 0x5a00 }, - { 0, 0, 0, 0 } - }; - static const struct reg_val preemphasis[] = { - { MDIO_MMD_PMAPMD, 0xc014, 0xffff, 0xfe16 }, - { MDIO_MMD_PMAPMD, 0xc015, 0xffff, 0xa000 }, - { 0, 0, 0, 0 } - }; - int i, err; - - err = set_phy_regs(phy, regs); - if (!err && modtype == phy_modtype_twinax_long) - err = set_phy_regs(phy, preemphasis); - if (err) - return err; - - msleep(50); - - if (phy->priv != edc_twinax) - err = t3_get_edc_fw(phy, EDC_TWX_AEL2005, - EDC_TWX_AEL2005_SIZE); - if (err) - return err; - - for (i = 0; i < EDC_TWX_AEL2005_SIZE / sizeof(u16) && !err; i += 2) - err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, - phy->phy_cache[i], - phy->phy_cache[i + 1]); - if (!err) - phy->priv = edc_twinax; - return err; -} - -static int ael2005_get_module_type(struct cphy *phy, int delay_ms) -{ - int v; - unsigned int stat; - - v = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, &stat); - if (v) - return v; - - if (stat & (1 << 8)) /* module absent */ - return phy_modtype_none; - - return ael2xxx_get_module_type(phy, delay_ms); -} - -static int ael2005_intr_enable(struct cphy *phy) -{ - int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, 0x200); - return err ? err : t3_phy_lasi_intr_enable(phy); -} - -static int ael2005_intr_disable(struct cphy *phy) -{ - int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, 0x100); - return err ? err : t3_phy_lasi_intr_disable(phy); -} - -static int ael2005_intr_clear(struct cphy *phy) -{ - int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, 0xd00); - return err ? err : t3_phy_lasi_intr_clear(phy); -} - -static int ael2005_reset(struct cphy *phy, int wait) -{ - static const struct reg_val regs0[] = { - { MDIO_MMD_PMAPMD, 0xc001, 0, 1 << 5 }, - { MDIO_MMD_PMAPMD, 0xc017, 0, 1 << 5 }, - { MDIO_MMD_PMAPMD, 0xc013, 0xffff, 0xf341 }, - { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0x8000 }, - { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0x8100 }, - { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0x8000 }, - { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0 }, - { 0, 0, 0, 0 } - }; - static const struct reg_val regs1[] = { - { MDIO_MMD_PMAPMD, 0xca00, 0xffff, 0x0080 }, - { MDIO_MMD_PMAPMD, 0xca12, 0xffff, 0 }, - { 0, 0, 0, 0 } - }; - - int err; - unsigned int lasi_ctrl; - - err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, - &lasi_ctrl); - if (err) - return err; - - err = t3_phy_reset(phy, MDIO_MMD_PMAPMD, 0); - if (err) - return err; - - msleep(125); - phy->priv = edc_none; - err = set_phy_regs(phy, regs0); - if (err) - return err; - - msleep(50); - - err = ael2005_get_module_type(phy, 0); - if (err < 0) - return err; - phy->modtype = err; - - if (err == phy_modtype_twinax || err == phy_modtype_twinax_long) - err = ael2005_setup_twinax_edc(phy, err); - else - err = ael2005_setup_sr_edc(phy); - if (err) - return err; - - err = set_phy_regs(phy, regs1); - if (err) - return err; - - /* reset wipes out interrupts, reenable them if they were on */ - if (lasi_ctrl & 1) - err = ael2005_intr_enable(phy); - return err; -} - -static int ael2005_intr_handler(struct cphy *phy) -{ - unsigned int stat; - int ret, edc_needed, cause = 0; - - ret = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_STAT, &stat); - if (ret) - return ret; - - if (stat & AEL2005_MODDET_IRQ) { - ret = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, - 0xd00); - if (ret) - return ret; - - /* modules have max 300 ms init time after hot plug */ - ret = ael2005_get_module_type(phy, 300); - if (ret < 0) - return ret; - - phy->modtype = ret; - if (ret == phy_modtype_none) - edc_needed = phy->priv; /* on unplug retain EDC */ - else if (ret == phy_modtype_twinax || - ret == phy_modtype_twinax_long) - edc_needed = edc_twinax; - else - edc_needed = edc_sr; - - if (edc_needed != phy->priv) { - ret = ael2005_reset(phy, 0); - return ret ? ret : cphy_cause_module_change; - } - cause = cphy_cause_module_change; - } - - ret = t3_phy_lasi_intr_handler(phy); - if (ret < 0) - return ret; - - ret |= cause; - return ret ? ret : cphy_cause_link_change; -} - -static struct cphy_ops ael2005_ops = { - .reset = ael2005_reset, - .intr_enable = ael2005_intr_enable, - .intr_disable = ael2005_intr_disable, - .intr_clear = ael2005_intr_clear, - .intr_handler = ael2005_intr_handler, - .get_link_status = get_link_status_r, - .power_down = ael1002_power_down, - .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS, -}; - -int t3_ael2005_phy_prep(struct cphy *phy, struct adapter *adapter, - int phy_addr, const struct mdio_ops *mdio_ops) -{ - cphy_init(phy, adapter, phy_addr, &ael2005_ops, mdio_ops, - SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE | - SUPPORTED_IRQ, "10GBASE-R"); - msleep(125); - return t3_mdio_change_bits(phy, MDIO_MMD_PMAPMD, AEL_OPT_SETTINGS, 0, - 1 << 5); -} - -/* - * Setup EDC and other parameters for operation with an optical module. - */ -static int ael2020_setup_sr_edc(struct cphy *phy) -{ - static const struct reg_val regs[] = { - /* set CDR offset to 10 */ - { MDIO_MMD_PMAPMD, 0xcc01, 0xffff, 0x488a }, - - /* adjust 10G RX bias current */ - { MDIO_MMD_PMAPMD, 0xcb1b, 0xffff, 0x0200 }, - { MDIO_MMD_PMAPMD, 0xcb1c, 0xffff, 0x00f0 }, - { MDIO_MMD_PMAPMD, 0xcc06, 0xffff, 0x00e0 }, - - /* end */ - { 0, 0, 0, 0 } - }; - int err; - - err = set_phy_regs(phy, regs); - msleep(50); - if (err) - return err; - - phy->priv = edc_sr; - return 0; -} - -/* - * Setup EDC and other parameters for operation with an TWINAX module. - */ -static int ael2020_setup_twinax_edc(struct cphy *phy, int modtype) -{ - /* set uC to 40MHz */ - static const struct reg_val uCclock40MHz[] = { - { MDIO_MMD_PMAPMD, 0xff28, 0xffff, 0x4001 }, - { MDIO_MMD_PMAPMD, 0xff2a, 0xffff, 0x0002 }, - { 0, 0, 0, 0 } - }; - - /* activate uC clock */ - static const struct reg_val uCclockActivate[] = { - { MDIO_MMD_PMAPMD, 0xd000, 0xffff, 0x5200 }, - { 0, 0, 0, 0 } - }; - - /* set PC to start of SRAM and activate uC */ - static const struct reg_val uCactivate[] = { - { MDIO_MMD_PMAPMD, 0xd080, 0xffff, 0x0100 }, - { MDIO_MMD_PMAPMD, 0xd092, 0xffff, 0x0000 }, - { 0, 0, 0, 0 } - }; - int i, err; - - /* set uC clock and activate it */ - err = set_phy_regs(phy, uCclock40MHz); - msleep(500); - if (err) - return err; - err = set_phy_regs(phy, uCclockActivate); - msleep(500); - if (err) - return err; - - if (phy->priv != edc_twinax) - err = t3_get_edc_fw(phy, EDC_TWX_AEL2020, - EDC_TWX_AEL2020_SIZE); - if (err) - return err; - - for (i = 0; i < EDC_TWX_AEL2020_SIZE / sizeof(u16) && !err; i += 2) - err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, - phy->phy_cache[i], - phy->phy_cache[i + 1]); - /* activate uC */ - err = set_phy_regs(phy, uCactivate); - if (!err) - phy->priv = edc_twinax; - return err; -} - -/* - * Return Module Type. - */ -static int ael2020_get_module_type(struct cphy *phy, int delay_ms) -{ - int v; - unsigned int stat; - - v = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_STAT, &stat); - if (v) - return v; - - if (stat & (0x1 << (AEL2020_GPIO_MODDET*4))) { - /* module absent */ - return phy_modtype_none; - } - - return ael2xxx_get_module_type(phy, delay_ms); -} - -/* - * Enable PHY interrupts. We enable "Module Detection" interrupts (on any - * state transition) and then generic Link Alarm Status Interrupt (LASI). - */ -static int ael2020_intr_enable(struct cphy *phy) -{ - static const struct reg_val regs[] = { - /* output Module's Loss Of Signal (LOS) to LED */ - { MDIO_MMD_PMAPMD, AEL2020_GPIO_CFG+AEL2020_GPIO_LSTAT, - 0xffff, 0x4 }, - { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL, - 0xffff, 0x8 << (AEL2020_GPIO_LSTAT*4) }, - - /* enable module detect status change interrupts */ - { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL, - 0xffff, 0x2 << (AEL2020_GPIO_MODDET*4) }, - - /* end */ - { 0, 0, 0, 0 } - }; - int err, link_ok = 0; - - /* set up "link status" LED and enable module change interrupts */ - err = set_phy_regs(phy, regs); - if (err) - return err; - - err = get_link_status_r(phy, &link_ok, NULL, NULL, NULL); - if (err) - return err; - if (link_ok) - t3_link_changed(phy->adapter, - phy2portid(phy)); - - err = t3_phy_lasi_intr_enable(phy); - if (err) - return err; - - return 0; -} - -/* - * Disable PHY interrupts. The mirror of the above ... - */ -static int ael2020_intr_disable(struct cphy *phy) -{ - static const struct reg_val regs[] = { - /* reset "link status" LED to "off" */ - { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL, - 0xffff, 0xb << (AEL2020_GPIO_LSTAT*4) }, - - /* disable module detect status change interrupts */ - { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL, - 0xffff, 0x1 << (AEL2020_GPIO_MODDET*4) }, - - /* end */ - { 0, 0, 0, 0 } - }; - int err; - - /* turn off "link status" LED and disable module change interrupts */ - err = set_phy_regs(phy, regs); - if (err) - return err; - - return t3_phy_lasi_intr_disable(phy); -} - -/* - * Clear PHY interrupt state. - */ -static int ael2020_intr_clear(struct cphy *phy) -{ - /* - * The GPIO Interrupt register on the AEL2020 is a "Latching High" - * (LH) register which is cleared to the current state when it's read. - * Thus, we simply read the register and discard the result. - */ - unsigned int stat; - int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_INTR, &stat); - return err ? err : t3_phy_lasi_intr_clear(phy); -} - -static const struct reg_val ael2020_reset_regs[] = { - /* Erratum #2: CDRLOL asserted, causing PMA link down status */ - { MDIO_MMD_PMAPMD, 0xc003, 0xffff, 0x3101 }, - - /* force XAUI to send LF when RX_LOS is asserted */ - { MDIO_MMD_PMAPMD, 0xcd40, 0xffff, 0x0001 }, - - /* allow writes to transceiver module EEPROM on i2c bus */ - { MDIO_MMD_PMAPMD, 0xff02, 0xffff, 0x0023 }, - { MDIO_MMD_PMAPMD, 0xff03, 0xffff, 0x0000 }, - { MDIO_MMD_PMAPMD, 0xff04, 0xffff, 0x0000 }, - - /* end */ - { 0, 0, 0, 0 } -}; -/* - * Reset the PHY and put it into a canonical operating state. - */ -static int ael2020_reset(struct cphy *phy, int wait) -{ - int err; - unsigned int lasi_ctrl; - - /* grab current interrupt state */ - err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, - &lasi_ctrl); - if (err) - return err; - - err = t3_phy_reset(phy, MDIO_MMD_PMAPMD, 125); - if (err) - return err; - msleep(100); - - /* basic initialization for all module types */ - phy->priv = edc_none; - err = set_phy_regs(phy, ael2020_reset_regs); - if (err) - return err; - - /* determine module type and perform appropriate initialization */ - err = ael2020_get_module_type(phy, 0); - if (err < 0) - return err; - phy->modtype = (u8)err; - if (err == phy_modtype_twinax || err == phy_modtype_twinax_long) - err = ael2020_setup_twinax_edc(phy, err); - else - err = ael2020_setup_sr_edc(phy); - if (err) - return err; - - /* reset wipes out interrupts, reenable them if they were on */ - if (lasi_ctrl & 1) - err = ael2005_intr_enable(phy); - return err; -} - -/* - * Handle a PHY interrupt. - */ -static int ael2020_intr_handler(struct cphy *phy) -{ - unsigned int stat; - int ret, edc_needed, cause = 0; - - ret = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_INTR, &stat); - if (ret) - return ret; - - if (stat & (0x1 << AEL2020_GPIO_MODDET)) { - /* modules have max 300 ms init time after hot plug */ - ret = ael2020_get_module_type(phy, 300); - if (ret < 0) - return ret; - - phy->modtype = (u8)ret; - if (ret == phy_modtype_none) - edc_needed = phy->priv; /* on unplug retain EDC */ - else if (ret == phy_modtype_twinax || - ret == phy_modtype_twinax_long) - edc_needed = edc_twinax; - else - edc_needed = edc_sr; - - if (edc_needed != phy->priv) { - ret = ael2020_reset(phy, 0); - return ret ? ret : cphy_cause_module_change; - } - cause = cphy_cause_module_change; - } - - ret = t3_phy_lasi_intr_handler(phy); - if (ret < 0) - return ret; - - ret |= cause; - return ret ? ret : cphy_cause_link_change; -} - -static struct cphy_ops ael2020_ops = { - .reset = ael2020_reset, - .intr_enable = ael2020_intr_enable, - .intr_disable = ael2020_intr_disable, - .intr_clear = ael2020_intr_clear, - .intr_handler = ael2020_intr_handler, - .get_link_status = get_link_status_r, - .power_down = ael1002_power_down, - .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS, -}; - -int t3_ael2020_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr, - const struct mdio_ops *mdio_ops) -{ - int err; - - cphy_init(phy, adapter, phy_addr, &ael2020_ops, mdio_ops, - SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE | - SUPPORTED_IRQ, "10GBASE-R"); - msleep(125); - - err = set_phy_regs(phy, ael2020_reset_regs); - if (err) - return err; - return 0; -} - -/* - * Get link status for a 10GBASE-X device. - */ -static int get_link_status_x(struct cphy *phy, int *link_ok, int *speed, - int *duplex, int *fc) -{ - if (link_ok) { - unsigned int stat0, stat1, stat2; - int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, - MDIO_PMA_RXDET, &stat0); - - if (!err) - err = t3_mdio_read(phy, MDIO_MMD_PCS, - MDIO_PCS_10GBX_STAT1, &stat1); - if (!err) - err = t3_mdio_read(phy, MDIO_MMD_PHYXS, - MDIO_PHYXS_LNSTAT, &stat2); - if (err) - return err; - *link_ok = (stat0 & (stat1 >> 12) & (stat2 >> 12)) & 1; - } - if (speed) - *speed = SPEED_10000; - if (duplex) - *duplex = DUPLEX_FULL; - return 0; -} - -static struct cphy_ops qt2045_ops = { - .reset = ael1006_reset, - .intr_enable = t3_phy_lasi_intr_enable, - .intr_disable = t3_phy_lasi_intr_disable, - .intr_clear = t3_phy_lasi_intr_clear, - .intr_handler = t3_phy_lasi_intr_handler, - .get_link_status = get_link_status_x, - .power_down = ael1002_power_down, - .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS, -}; - -int t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter, - int phy_addr, const struct mdio_ops *mdio_ops) -{ - unsigned int stat; - - cphy_init(phy, adapter, phy_addr, &qt2045_ops, mdio_ops, - SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP, - "10GBASE-CX4"); - - /* - * Some cards where the PHY is supposed to be at address 0 actually - * have it at 1. - */ - if (!phy_addr && - !t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &stat) && - stat == 0xffff) - phy->mdio.prtad = 1; - return 0; -} - -static int xaui_direct_reset(struct cphy *phy, int wait) -{ - return 0; -} - -static int xaui_direct_get_link_status(struct cphy *phy, int *link_ok, - int *speed, int *duplex, int *fc) -{ - if (link_ok) { - unsigned int status; - int prtad = phy->mdio.prtad; - - status = t3_read_reg(phy->adapter, - XGM_REG(A_XGM_SERDES_STAT0, prtad)) | - t3_read_reg(phy->adapter, - XGM_REG(A_XGM_SERDES_STAT1, prtad)) | - t3_read_reg(phy->adapter, - XGM_REG(A_XGM_SERDES_STAT2, prtad)) | - t3_read_reg(phy->adapter, - XGM_REG(A_XGM_SERDES_STAT3, prtad)); - *link_ok = !(status & F_LOWSIG0); - } - if (speed) - *speed = SPEED_10000; - if (duplex) - *duplex = DUPLEX_FULL; - return 0; -} - -static int xaui_direct_power_down(struct cphy *phy, int enable) -{ - return 0; -} - -static struct cphy_ops xaui_direct_ops = { - .reset = xaui_direct_reset, - .intr_enable = ael1002_intr_noop, - .intr_disable = ael1002_intr_noop, - .intr_clear = ael1002_intr_noop, - .intr_handler = ael1002_intr_noop, - .get_link_status = xaui_direct_get_link_status, - .power_down = xaui_direct_power_down, -}; - -int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter, - int phy_addr, const struct mdio_ops *mdio_ops) -{ - cphy_init(phy, adapter, phy_addr, &xaui_direct_ops, mdio_ops, - SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP, - "10GBASE-CX4"); - return 0; -} diff --git a/drivers/net/cxgb3/aq100x.c b/drivers/net/cxgb3/aq100x.c deleted file mode 100644 index 341b7ef..0000000 --- a/drivers/net/cxgb3/aq100x.c +++ /dev/null @@ -1,354 +0,0 @@ -/* - * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.h" -#include "regs.h" - -enum { - /* MDIO_DEV_PMA_PMD registers */ - AQ_LINK_STAT = 0xe800, - AQ_IMASK_PMA = 0xf000, - - /* MDIO_DEV_XGXS registers */ - AQ_XAUI_RX_CFG = 0xc400, - AQ_XAUI_TX_CFG = 0xe400, - - /* MDIO_DEV_ANEG registers */ - AQ_1G_CTRL = 0xc400, - AQ_ANEG_STAT = 0xc800, - - /* MDIO_DEV_VEND1 registers */ - AQ_FW_VERSION = 0x0020, - AQ_IFLAG_GLOBAL = 0xfc00, - AQ_IMASK_GLOBAL = 0xff00, -}; - -enum { - IMASK_PMA = 1 << 2, - IMASK_GLOBAL = 1 << 15, - ADV_1G_FULL = 1 << 15, - ADV_1G_HALF = 1 << 14, - ADV_10G_FULL = 1 << 12, - AQ_RESET = (1 << 14) | (1 << 15), - AQ_LOWPOWER = 1 << 12, -}; - -static int aq100x_reset(struct cphy *phy, int wait) -{ - /* - * Ignore the caller specified wait time; always wait for the reset to - * complete. Can take up to 3s. - */ - int err = t3_phy_reset(phy, MDIO_MMD_VEND1, 3000); - - if (err) - CH_WARN(phy->adapter, "PHY%d: reset failed (0x%x).\n", - phy->mdio.prtad, err); - - return err; -} - -static int aq100x_intr_enable(struct cphy *phy) -{ - int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AQ_IMASK_PMA, IMASK_PMA); - if (err) - return err; - - err = t3_mdio_write(phy, MDIO_MMD_VEND1, AQ_IMASK_GLOBAL, IMASK_GLOBAL); - return err; -} - -static int aq100x_intr_disable(struct cphy *phy) -{ - return t3_mdio_write(phy, MDIO_MMD_VEND1, AQ_IMASK_GLOBAL, 0); -} - -static int aq100x_intr_clear(struct cphy *phy) -{ - unsigned int v; - - t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_IFLAG_GLOBAL, &v); - t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &v); - - return 0; -} - -static int aq100x_intr_handler(struct cphy *phy) -{ - int err; - unsigned int cause, v; - - err = t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_IFLAG_GLOBAL, &cause); - if (err) - return err; - - /* Read (and reset) the latching version of the status */ - t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &v); - - return cphy_cause_link_change; -} - -static int aq100x_power_down(struct cphy *phy, int off) -{ - return mdio_set_flag(&phy->mdio, phy->mdio.prtad, - MDIO_MMD_PMAPMD, MDIO_CTRL1, - MDIO_CTRL1_LPOWER, off); -} - -static int aq100x_autoneg_enable(struct cphy *phy) -{ - int err; - - err = aq100x_power_down(phy, 0); - if (!err) - err = mdio_set_flag(&phy->mdio, phy->mdio.prtad, - MDIO_MMD_AN, MDIO_CTRL1, - BMCR_ANENABLE | BMCR_ANRESTART, 1); - - return err; -} - -static int aq100x_autoneg_restart(struct cphy *phy) -{ - int err; - - err = aq100x_power_down(phy, 0); - if (!err) - err = mdio_set_flag(&phy->mdio, phy->mdio.prtad, - MDIO_MMD_AN, MDIO_CTRL1, - BMCR_ANENABLE | BMCR_ANRESTART, 1); - - return err; -} - -static int aq100x_advertise(struct cphy *phy, unsigned int advertise_map) -{ - unsigned int adv; - int err; - - /* 10G advertisement */ - adv = 0; - if (advertise_map & ADVERTISED_10000baseT_Full) - adv |= ADV_10G_FULL; - err = t3_mdio_change_bits(phy, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL, - ADV_10G_FULL, adv); - if (err) - return err; - - /* 1G advertisement */ - adv = 0; - if (advertise_map & ADVERTISED_1000baseT_Full) - adv |= ADV_1G_FULL; - if (advertise_map & ADVERTISED_1000baseT_Half) - adv |= ADV_1G_HALF; - err = t3_mdio_change_bits(phy, MDIO_MMD_AN, AQ_1G_CTRL, - ADV_1G_FULL | ADV_1G_HALF, adv); - if (err) - return err; - - /* 100M, pause advertisement */ - adv = 0; - if (advertise_map & ADVERTISED_100baseT_Half) - adv |= ADVERTISE_100HALF; - if (advertise_map & ADVERTISED_100baseT_Full) - adv |= ADVERTISE_100FULL; - if (advertise_map & ADVERTISED_Pause) - adv |= ADVERTISE_PAUSE_CAP; - if (advertise_map & ADVERTISED_Asym_Pause) - adv |= ADVERTISE_PAUSE_ASYM; - err = t3_mdio_change_bits(phy, MDIO_MMD_AN, MDIO_AN_ADVERTISE, - 0xfe0, adv); - - return err; -} - -static int aq100x_set_loopback(struct cphy *phy, int mmd, int dir, int enable) -{ - return mdio_set_flag(&phy->mdio, phy->mdio.prtad, - MDIO_MMD_PMAPMD, MDIO_CTRL1, - BMCR_LOOPBACK, enable); -} - -static int aq100x_set_speed_duplex(struct cphy *phy, int speed, int duplex) -{ - /* no can do */ - return -1; -} - -static int aq100x_get_link_status(struct cphy *phy, int *link_ok, - int *speed, int *duplex, int *fc) -{ - int err; - unsigned int v; - - if (link_ok) { - err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AQ_LINK_STAT, &v); - if (err) - return err; - - *link_ok = v & 1; - if (!*link_ok) - return 0; - } - - err = t3_mdio_read(phy, MDIO_MMD_AN, AQ_ANEG_STAT, &v); - if (err) - return err; - - if (speed) { - switch (v & 0x6) { - case 0x6: - *speed = SPEED_10000; - break; - case 0x4: - *speed = SPEED_1000; - break; - case 0x2: - *speed = SPEED_100; - break; - case 0x0: - *speed = SPEED_10; - break; - } - } - - if (duplex) - *duplex = v & 1 ? DUPLEX_FULL : DUPLEX_HALF; - - return 0; -} - -static struct cphy_ops aq100x_ops = { - .reset = aq100x_reset, - .intr_enable = aq100x_intr_enable, - .intr_disable = aq100x_intr_disable, - .intr_clear = aq100x_intr_clear, - .intr_handler = aq100x_intr_handler, - .autoneg_enable = aq100x_autoneg_enable, - .autoneg_restart = aq100x_autoneg_restart, - .advertise = aq100x_advertise, - .set_loopback = aq100x_set_loopback, - .set_speed_duplex = aq100x_set_speed_duplex, - .get_link_status = aq100x_get_link_status, - .power_down = aq100x_power_down, - .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS, -}; - -int t3_aq100x_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr, - const struct mdio_ops *mdio_ops) -{ - unsigned int v, v2, gpio, wait; - int err; - - cphy_init(phy, adapter, phy_addr, &aq100x_ops, mdio_ops, - SUPPORTED_1000baseT_Full | SUPPORTED_10000baseT_Full | - SUPPORTED_TP | SUPPORTED_Autoneg | SUPPORTED_AUI, - "1000/10GBASE-T"); - - /* - * The PHY has been out of reset ever since the system powered up. So - * we do a hard reset over here. - */ - gpio = phy_addr ? F_GPIO10_OUT_VAL : F_GPIO6_OUT_VAL; - t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, gpio, 0); - msleep(1); - t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, gpio, gpio); - - /* - * Give it enough time to load the firmware and get ready for mdio. - */ - msleep(1000); - wait = 500; /* in 10ms increments */ - do { - err = t3_mdio_read(phy, MDIO_MMD_VEND1, MDIO_CTRL1, &v); - if (err || v == 0xffff) { - - /* Allow prep_adapter to succeed when ffff is read */ - - CH_WARN(adapter, "PHY%d: reset failed (0x%x, 0x%x).\n", - phy_addr, err, v); - goto done; - } - - v &= AQ_RESET; - if (v) - msleep(10); - } while (v && --wait); - if (v) { - CH_WARN(adapter, "PHY%d: reset timed out (0x%x).\n", - phy_addr, v); - - goto done; /* let prep_adapter succeed */ - } - - /* Datasheet says 3s max but this has been observed */ - wait = (500 - wait) * 10 + 1000; - if (wait > 3000) - CH_WARN(adapter, "PHY%d: reset took %ums\n", phy_addr, wait); - - /* Firmware version check. */ - t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_FW_VERSION, &v); - if (v != 101) - CH_WARN(adapter, "PHY%d: unsupported firmware %d\n", - phy_addr, v); - - /* - * The PHY should start in really-low-power mode. Prepare it for normal - * operations. - */ - err = t3_mdio_read(phy, MDIO_MMD_VEND1, MDIO_CTRL1, &v); - if (err) - return err; - if (v & AQ_LOWPOWER) { - err = t3_mdio_change_bits(phy, MDIO_MMD_VEND1, MDIO_CTRL1, - AQ_LOWPOWER, 0); - if (err) - return err; - msleep(10); - } else - CH_WARN(adapter, "PHY%d does not start in low power mode.\n", - phy_addr); - - /* - * Verify XAUI settings, but let prep succeed no matter what. - */ - v = v2 = 0; - t3_mdio_read(phy, MDIO_MMD_PHYXS, AQ_XAUI_RX_CFG, &v); - t3_mdio_read(phy, MDIO_MMD_PHYXS, AQ_XAUI_TX_CFG, &v2); - if (v != 0x1b || v2 != 0x1b) - CH_WARN(adapter, - "PHY%d: incorrect XAUI settings (0x%x, 0x%x).\n", - phy_addr, v, v2); - -done: - return err; -} diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h deleted file mode 100644 index df01b63..0000000 --- a/drivers/net/cxgb3/common.h +++ /dev/null @@ -1,775 +0,0 @@ -/* - * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef __CHELSIO_COMMON_H -#define __CHELSIO_COMMON_H - -#include -#include -#include -#include -#include -#include -#include -#include -#include "version.h" - -#define CH_ERR(adap, fmt, ...) dev_err(&adap->pdev->dev, fmt, ## __VA_ARGS__) -#define CH_WARN(adap, fmt, ...) dev_warn(&adap->pdev->dev, fmt, ## __VA_ARGS__) -#define CH_ALERT(adap, fmt, ...) \ - dev_printk(KERN_ALERT, &adap->pdev->dev, fmt, ## __VA_ARGS__) - -/* - * More powerful macro that selectively prints messages based on msg_enable. - * For info and debugging messages. - */ -#define CH_MSG(adapter, level, category, fmt, ...) do { \ - if ((adapter)->msg_enable & NETIF_MSG_##category) \ - dev_printk(KERN_##level, &adapter->pdev->dev, fmt, \ - ## __VA_ARGS__); \ -} while (0) - -#ifdef DEBUG -# define CH_DBG(adapter, category, fmt, ...) \ - CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__) -#else -# define CH_DBG(adapter, category, fmt, ...) -#endif - -/* Additional NETIF_MSG_* categories */ -#define NETIF_MSG_MMIO 0x8000000 - -enum { - MAX_NPORTS = 2, /* max # of ports */ - MAX_FRAME_SIZE = 10240, /* max MAC frame size, including header + FCS */ - EEPROMSIZE = 8192, /* Serial EEPROM size */ - SERNUM_LEN = 16, /* Serial # length */ - RSS_TABLE_SIZE = 64, /* size of RSS lookup and mapping tables */ - TCB_SIZE = 128, /* TCB size */ - NMTUS = 16, /* size of MTU table */ - NCCTRL_WIN = 32, /* # of congestion control windows */ - PROTO_SRAM_LINES = 128, /* size of TP sram */ -}; - -#define MAX_RX_COALESCING_LEN 12288U - -enum { - PAUSE_RX = 1 << 0, - PAUSE_TX = 1 << 1, - PAUSE_AUTONEG = 1 << 2 -}; - -enum { - SUPPORTED_IRQ = 1 << 24 -}; - -enum { /* adapter interrupt-maintained statistics */ - STAT_ULP_CH0_PBL_OOB, - STAT_ULP_CH1_PBL_OOB, - STAT_PCI_CORR_ECC, - - IRQ_NUM_STATS /* keep last */ -}; - -#define TP_VERSION_MAJOR 1 -#define TP_VERSION_MINOR 1 -#define TP_VERSION_MICRO 0 - -#define S_TP_VERSION_MAJOR 16 -#define M_TP_VERSION_MAJOR 0xFF -#define V_TP_VERSION_MAJOR(x) ((x) << S_TP_VERSION_MAJOR) -#define G_TP_VERSION_MAJOR(x) \ - (((x) >> S_TP_VERSION_MAJOR) & M_TP_VERSION_MAJOR) - -#define S_TP_VERSION_MINOR 8 -#define M_TP_VERSION_MINOR 0xFF -#define V_TP_VERSION_MINOR(x) ((x) << S_TP_VERSION_MINOR) -#define G_TP_VERSION_MINOR(x) \ - (((x) >> S_TP_VERSION_MINOR) & M_TP_VERSION_MINOR) - -#define S_TP_VERSION_MICRO 0 -#define M_TP_VERSION_MICRO 0xFF -#define V_TP_VERSION_MICRO(x) ((x) << S_TP_VERSION_MICRO) -#define G_TP_VERSION_MICRO(x) \ - (((x) >> S_TP_VERSION_MICRO) & M_TP_VERSION_MICRO) - -enum { - SGE_QSETS = 8, /* # of SGE Tx/Rx/RspQ sets */ - SGE_RXQ_PER_SET = 2, /* # of Rx queues per set */ - SGE_TXQ_PER_SET = 3 /* # of Tx queues per set */ -}; - -enum sge_context_type { /* SGE egress context types */ - SGE_CNTXT_RDMA = 0, - SGE_CNTXT_ETH = 2, - SGE_CNTXT_OFLD = 4, - SGE_CNTXT_CTRL = 5 -}; - -enum { - AN_PKT_SIZE = 32, /* async notification packet size */ - IMMED_PKT_SIZE = 48 /* packet size for immediate data */ -}; - -struct sg_ent { /* SGE scatter/gather entry */ - __be32 len[2]; - __be64 addr[2]; -}; - -#ifndef SGE_NUM_GENBITS -/* Must be 1 or 2 */ -# define SGE_NUM_GENBITS 2 -#endif - -#define TX_DESC_FLITS 16U -#define WR_FLITS (TX_DESC_FLITS + 1 - SGE_NUM_GENBITS) - -struct cphy; -struct adapter; - -struct mdio_ops { - int (*read)(struct net_device *dev, int phy_addr, int mmd_addr, - u16 reg_addr); - int (*write)(struct net_device *dev, int phy_addr, int mmd_addr, - u16 reg_addr, u16 val); - unsigned mode_support; -}; - -struct adapter_info { - unsigned char nports0; /* # of ports on channel 0 */ - unsigned char nports1; /* # of ports on channel 1 */ - unsigned char phy_base_addr; /* MDIO PHY base address */ - unsigned int gpio_out; /* GPIO output settings */ - unsigned char gpio_intr[MAX_NPORTS]; /* GPIO PHY IRQ pins */ - unsigned long caps; /* adapter capabilities */ - const struct mdio_ops *mdio_ops; /* MDIO operations */ - const char *desc; /* product description */ -}; - -struct mc5_stats { - unsigned long parity_err; - unsigned long active_rgn_full; - unsigned long nfa_srch_err; - unsigned long unknown_cmd; - unsigned long reqq_parity_err; - unsigned long dispq_parity_err; - unsigned long del_act_empty; -}; - -struct mc7_stats { - unsigned long corr_err; - unsigned long uncorr_err; - unsigned long parity_err; - unsigned long addr_err; -}; - -struct mac_stats { - u64 tx_octets; /* total # of octets in good frames */ - u64 tx_octets_bad; /* total # of octets in error frames */ - u64 tx_frames; /* all good frames */ - u64 tx_mcast_frames; /* good multicast frames */ - u64 tx_bcast_frames; /* good broadcast frames */ - u64 tx_pause; /* # of transmitted pause frames */ - u64 tx_deferred; /* frames with deferred transmissions */ - u64 tx_late_collisions; /* # of late collisions */ - u64 tx_total_collisions; /* # of total collisions */ - u64 tx_excess_collisions; /* frame errors from excessive collissions */ - u64 tx_underrun; /* # of Tx FIFO underruns */ - u64 tx_len_errs; /* # of Tx length errors */ - u64 tx_mac_internal_errs; /* # of internal MAC errors on Tx */ - u64 tx_excess_deferral; /* # of frames with excessive deferral */ - u64 tx_fcs_errs; /* # of frames with bad FCS */ - - u64 tx_frames_64; /* # of Tx frames in a particular range */ - u64 tx_frames_65_127; - u64 tx_frames_128_255; - u64 tx_frames_256_511; - u64 tx_frames_512_1023; - u64 tx_frames_1024_1518; - u64 tx_frames_1519_max; - - u64 rx_octets; /* total # of octets in good frames */ - u64 rx_octets_bad; /* total # of octets in error frames */ - u64 rx_frames; /* all good frames */ - u64 rx_mcast_frames; /* good multicast frames */ - u64 rx_bcast_frames; /* good broadcast frames */ - u64 rx_pause; /* # of received pause frames */ - u64 rx_fcs_errs; /* # of received frames with bad FCS */ - u64 rx_align_errs; /* alignment errors */ - u64 rx_symbol_errs; /* symbol errors */ - u64 rx_data_errs; /* data errors */ - u64 rx_sequence_errs; /* sequence errors */ - u64 rx_runt; /* # of runt frames */ - u64 rx_jabber; /* # of jabber frames */ - u64 rx_short; /* # of short frames */ - u64 rx_too_long; /* # of oversized frames */ - u64 rx_mac_internal_errs; /* # of internal MAC errors on Rx */ - - u64 rx_frames_64; /* # of Rx frames in a particular range */ - u64 rx_frames_65_127; - u64 rx_frames_128_255; - u64 rx_frames_256_511; - u64 rx_frames_512_1023; - u64 rx_frames_1024_1518; - u64 rx_frames_1519_max; - - u64 rx_cong_drops; /* # of Rx drops due to SGE congestion */ - - unsigned long tx_fifo_parity_err; - unsigned long rx_fifo_parity_err; - unsigned long tx_fifo_urun; - unsigned long rx_fifo_ovfl; - unsigned long serdes_signal_loss; - unsigned long xaui_pcs_ctc_err; - unsigned long xaui_pcs_align_change; - - unsigned long num_toggled; /* # times toggled TxEn due to stuck TX */ - unsigned long num_resets; /* # times reset due to stuck TX */ - - unsigned long link_faults; /* # detected link faults */ -}; - -struct tp_mib_stats { - u32 ipInReceive_hi; - u32 ipInReceive_lo; - u32 ipInHdrErrors_hi; - u32 ipInHdrErrors_lo; - u32 ipInAddrErrors_hi; - u32 ipInAddrErrors_lo; - u32 ipInUnknownProtos_hi; - u32 ipInUnknownProtos_lo; - u32 ipInDiscards_hi; - u32 ipInDiscards_lo; - u32 ipInDelivers_hi; - u32 ipInDelivers_lo; - u32 ipOutRequests_hi; - u32 ipOutRequests_lo; - u32 ipOutDiscards_hi; - u32 ipOutDiscards_lo; - u32 ipOutNoRoutes_hi; - u32 ipOutNoRoutes_lo; - u32 ipReasmTimeout; - u32 ipReasmReqds; - u32 ipReasmOKs; - u32 ipReasmFails; - - u32 reserved[8]; - - u32 tcpActiveOpens; - u32 tcpPassiveOpens; - u32 tcpAttemptFails; - u32 tcpEstabResets; - u32 tcpOutRsts; - u32 tcpCurrEstab; - u32 tcpInSegs_hi; - u32 tcpInSegs_lo; - u32 tcpOutSegs_hi; - u32 tcpOutSegs_lo; - u32 tcpRetransSeg_hi; - u32 tcpRetransSeg_lo; - u32 tcpInErrs_hi; - u32 tcpInErrs_lo; - u32 tcpRtoMin; - u32 tcpRtoMax; -}; - -struct tp_params { - unsigned int nchan; /* # of channels */ - unsigned int pmrx_size; /* total PMRX capacity */ - unsigned int pmtx_size; /* total PMTX capacity */ - unsigned int cm_size; /* total CM capacity */ - unsigned int chan_rx_size; /* per channel Rx size */ - unsigned int chan_tx_size; /* per channel Tx size */ - unsigned int rx_pg_size; /* Rx page size */ - unsigned int tx_pg_size; /* Tx page size */ - unsigned int rx_num_pgs; /* # of Rx pages */ - unsigned int tx_num_pgs; /* # of Tx pages */ - unsigned int ntimer_qs; /* # of timer queues */ -}; - -struct qset_params { /* SGE queue set parameters */ - unsigned int polling; /* polling/interrupt service for rspq */ - unsigned int coalesce_usecs; /* irq coalescing timer */ - unsigned int rspq_size; /* # of entries in response queue */ - unsigned int fl_size; /* # of entries in regular free list */ - unsigned int jumbo_size; /* # of entries in jumbo free list */ - unsigned int txq_size[SGE_TXQ_PER_SET]; /* Tx queue sizes */ - unsigned int cong_thres; /* FL congestion threshold */ - unsigned int vector; /* Interrupt (line or vector) number */ -}; - -struct sge_params { - unsigned int max_pkt_size; /* max offload pkt size */ - struct qset_params qset[SGE_QSETS]; -}; - -struct mc5_params { - unsigned int mode; /* selects MC5 width */ - unsigned int nservers; /* size of server region */ - unsigned int nfilters; /* size of filter region */ - unsigned int nroutes; /* size of routing region */ -}; - -/* Default MC5 region sizes */ -enum { - DEFAULT_NSERVERS = 512, - DEFAULT_NFILTERS = 128 -}; - -/* MC5 modes, these must be non-0 */ -enum { - MC5_MODE_144_BIT = 1, - MC5_MODE_72_BIT = 2 -}; - -/* MC5 min active region size */ -enum { MC5_MIN_TIDS = 16 }; - -struct vpd_params { - unsigned int cclk; - unsigned int mclk; - unsigned int uclk; - unsigned int mdc; - unsigned int mem_timing; - u8 sn[SERNUM_LEN + 1]; - u8 eth_base[6]; - u8 port_type[MAX_NPORTS]; - unsigned short xauicfg[2]; -}; - -struct pci_params { - unsigned int vpd_cap_addr; - unsigned short speed; - unsigned char width; - unsigned char variant; -}; - -enum { - PCI_VARIANT_PCI, - PCI_VARIANT_PCIX_MODE1_PARITY, - PCI_VARIANT_PCIX_MODE1_ECC, - PCI_VARIANT_PCIX_266_MODE2, - PCI_VARIANT_PCIE -}; - -struct adapter_params { - struct sge_params sge; - struct mc5_params mc5; - struct tp_params tp; - struct vpd_params vpd; - struct pci_params pci; - - const struct adapter_info *info; - - unsigned short mtus[NMTUS]; - unsigned short a_wnd[NCCTRL_WIN]; - unsigned short b_wnd[NCCTRL_WIN]; - - unsigned int nports; /* # of ethernet ports */ - unsigned int chan_map; /* bitmap of in-use Tx channels */ - unsigned int stats_update_period; /* MAC stats accumulation period */ - unsigned int linkpoll_period; /* link poll period in 0.1s */ - unsigned int rev; /* chip revision */ - unsigned int offload; -}; - -enum { /* chip revisions */ - T3_REV_A = 0, - T3_REV_B = 2, - T3_REV_B2 = 3, - T3_REV_C = 4, -}; - -struct trace_params { - u32 sip; - u32 sip_mask; - u32 dip; - u32 dip_mask; - u16 sport; - u16 sport_mask; - u16 dport; - u16 dport_mask; - u32 vlan:12; - u32 vlan_mask:12; - u32 intf:4; - u32 intf_mask:4; - u8 proto; - u8 proto_mask; -}; - -struct link_config { - unsigned int supported; /* link capabilities */ - unsigned int advertising; /* advertised capabilities */ - unsigned short requested_speed; /* speed user has requested */ - unsigned short speed; /* actual link speed */ - unsigned char requested_duplex; /* duplex user has requested */ - unsigned char duplex; /* actual link duplex */ - unsigned char requested_fc; /* flow control user has requested */ - unsigned char fc; /* actual link flow control */ - unsigned char autoneg; /* autonegotiating? */ - unsigned int link_ok; /* link up? */ -}; - -#define SPEED_INVALID 0xffff -#define DUPLEX_INVALID 0xff - -struct mc5 { - struct adapter *adapter; - unsigned int tcam_size; - unsigned char part_type; - unsigned char parity_enabled; - unsigned char mode; - struct mc5_stats stats; -}; - -static inline unsigned int t3_mc5_size(const struct mc5 *p) -{ - return p->tcam_size; -} - -struct mc7 { - struct adapter *adapter; /* backpointer to adapter */ - unsigned int size; /* memory size in bytes */ - unsigned int width; /* MC7 interface width */ - unsigned int offset; /* register address offset for MC7 instance */ - const char *name; /* name of MC7 instance */ - struct mc7_stats stats; /* MC7 statistics */ -}; - -static inline unsigned int t3_mc7_size(const struct mc7 *p) -{ - return p->size; -} - -struct cmac { - struct adapter *adapter; - unsigned int offset; - unsigned int nucast; /* # of address filters for unicast MACs */ - unsigned int tx_tcnt; - unsigned int tx_xcnt; - u64 tx_mcnt; - unsigned int rx_xcnt; - unsigned int rx_ocnt; - u64 rx_mcnt; - unsigned int toggle_cnt; - unsigned int txen; - u64 rx_pause; - struct mac_stats stats; -}; - -enum { - MAC_DIRECTION_RX = 1, - MAC_DIRECTION_TX = 2, - MAC_RXFIFO_SIZE = 32768 -}; - -/* PHY loopback direction */ -enum { - PHY_LOOPBACK_TX = 1, - PHY_LOOPBACK_RX = 2 -}; - -/* PHY interrupt types */ -enum { - cphy_cause_link_change = 1, - cphy_cause_fifo_error = 2, - cphy_cause_module_change = 4, -}; - -/* PHY module types */ -enum { - phy_modtype_none, - phy_modtype_sr, - phy_modtype_lr, - phy_modtype_lrm, - phy_modtype_twinax, - phy_modtype_twinax_long, - phy_modtype_unknown -}; - -/* PHY operations */ -struct cphy_ops { - int (*reset)(struct cphy *phy, int wait); - - int (*intr_enable)(struct cphy *phy); - int (*intr_disable)(struct cphy *phy); - int (*intr_clear)(struct cphy *phy); - int (*intr_handler)(struct cphy *phy); - - int (*autoneg_enable)(struct cphy *phy); - int (*autoneg_restart)(struct cphy *phy); - - int (*advertise)(struct cphy *phy, unsigned int advertise_map); - int (*set_loopback)(struct cphy *phy, int mmd, int dir, int enable); - int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex); - int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed, - int *duplex, int *fc); - int (*power_down)(struct cphy *phy, int enable); - - u32 mmds; -}; -enum { - EDC_OPT_AEL2005 = 0, - EDC_OPT_AEL2005_SIZE = 1084, - EDC_TWX_AEL2005 = 1, - EDC_TWX_AEL2005_SIZE = 1464, - EDC_TWX_AEL2020 = 2, - EDC_TWX_AEL2020_SIZE = 1628, - EDC_MAX_SIZE = EDC_TWX_AEL2020_SIZE, /* Max cache size */ -}; - -/* A PHY instance */ -struct cphy { - u8 modtype; /* PHY module type */ - short priv; /* scratch pad */ - unsigned int caps; /* PHY capabilities */ - struct adapter *adapter; /* associated adapter */ - const char *desc; /* PHY description */ - unsigned long fifo_errors; /* FIFO over/under-flows */ - const struct cphy_ops *ops; /* PHY operations */ - struct mdio_if_info mdio; - u16 phy_cache[EDC_MAX_SIZE]; /* EDC cache */ -}; - -/* Convenience MDIO read/write wrappers */ -static inline int t3_mdio_read(struct cphy *phy, int mmd, int reg, - unsigned int *valp) -{ - int rc = phy->mdio.mdio_read(phy->mdio.dev, phy->mdio.prtad, mmd, reg); - *valp = (rc >= 0) ? rc : -1; - return (rc >= 0) ? 0 : rc; -} - -static inline int t3_mdio_write(struct cphy *phy, int mmd, int reg, - unsigned int val) -{ - return phy->mdio.mdio_write(phy->mdio.dev, phy->mdio.prtad, mmd, - reg, val); -} - -/* Convenience initializer */ -static inline void cphy_init(struct cphy *phy, struct adapter *adapter, - int phy_addr, struct cphy_ops *phy_ops, - const struct mdio_ops *mdio_ops, - unsigned int caps, const char *desc) -{ - phy->caps = caps; - phy->adapter = adapter; - phy->desc = desc; - phy->ops = phy_ops; - if (mdio_ops) { - phy->mdio.prtad = phy_addr; - phy->mdio.mmds = phy_ops->mmds; - phy->mdio.mode_support = mdio_ops->mode_support; - phy->mdio.mdio_read = mdio_ops->read; - phy->mdio.mdio_write = mdio_ops->write; - } -} - -/* Accumulate MAC statistics every 180 seconds. For 1G we multiply by 10. */ -#define MAC_STATS_ACCUM_SECS 180 - -#define XGM_REG(reg_addr, idx) \ - ((reg_addr) + (idx) * (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR)) - -struct addr_val_pair { - unsigned int reg_addr; - unsigned int val; -}; - -#include "adapter.h" - -#ifndef PCI_VENDOR_ID_CHELSIO -# define PCI_VENDOR_ID_CHELSIO 0x1425 -#endif - -#define for_each_port(adapter, iter) \ - for (iter = 0; iter < (adapter)->params.nports; ++iter) - -#define adapter_info(adap) ((adap)->params.info) - -static inline int uses_xaui(const struct adapter *adap) -{ - return adapter_info(adap)->caps & SUPPORTED_AUI; -} - -static inline int is_10G(const struct adapter *adap) -{ - return adapter_info(adap)->caps & SUPPORTED_10000baseT_Full; -} - -static inline int is_offload(const struct adapter *adap) -{ - return adap->params.offload; -} - -static inline unsigned int core_ticks_per_usec(const struct adapter *adap) -{ - return adap->params.vpd.cclk / 1000; -} - -static inline unsigned int is_pcie(const struct adapter *adap) -{ - return adap->params.pci.variant == PCI_VARIANT_PCIE; -} - -void t3_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask, - u32 val); -void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p, - int n, unsigned int offset); -int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, - int polarity, int attempts, int delay, u32 *valp); -static inline int t3_wait_op_done(struct adapter *adapter, int reg, u32 mask, - int polarity, int attempts, int delay) -{ - return t3_wait_op_done_val(adapter, reg, mask, polarity, attempts, - delay, NULL); -} -int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear, - unsigned int set); -int t3_phy_reset(struct cphy *phy, int mmd, int wait); -int t3_phy_advertise(struct cphy *phy, unsigned int advert); -int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert); -int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex); -int t3_phy_lasi_intr_enable(struct cphy *phy); -int t3_phy_lasi_intr_disable(struct cphy *phy); -int t3_phy_lasi_intr_clear(struct cphy *phy); -int t3_phy_lasi_intr_handler(struct cphy *phy); - -void t3_intr_enable(struct adapter *adapter); -void t3_intr_disable(struct adapter *adapter); -void t3_intr_clear(struct adapter *adapter); -void t3_xgm_intr_enable(struct adapter *adapter, int idx); -void t3_xgm_intr_disable(struct adapter *adapter, int idx); -void t3_port_intr_enable(struct adapter *adapter, int idx); -void t3_port_intr_disable(struct adapter *adapter, int idx); -int t3_slow_intr_handler(struct adapter *adapter); -int t3_phy_intr_handler(struct adapter *adapter); - -void t3_link_changed(struct adapter *adapter, int port_id); -void t3_link_fault(struct adapter *adapter, int port_id); -int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc); -const struct adapter_info *t3_get_adapter_info(unsigned int board_id); -int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data); -int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data); -int t3_seeprom_wp(struct adapter *adapter, int enable); -int t3_get_tp_version(struct adapter *adapter, u32 *vers); -int t3_check_tpsram_version(struct adapter *adapter); -int t3_check_tpsram(struct adapter *adapter, const u8 *tp_ram, - unsigned int size); -int t3_set_proto_sram(struct adapter *adap, const u8 *data); -int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size); -int t3_get_fw_version(struct adapter *adapter, u32 *vers); -int t3_check_fw_version(struct adapter *adapter); -int t3_init_hw(struct adapter *adapter, u32 fw_params); -int t3_reset_adapter(struct adapter *adapter); -int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, - int reset); -int t3_replay_prep_adapter(struct adapter *adapter); -void t3_led_ready(struct adapter *adapter); -void t3_fatal_err(struct adapter *adapter); -void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on); -void t3_config_rss(struct adapter *adapter, unsigned int rss_config, - const u8 * cpus, const u16 *rspq); -int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr, - unsigned int n, unsigned int *valp); -int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n, - u64 *buf); - -int t3_mac_reset(struct cmac *mac); -void t3b_pcs_reset(struct cmac *mac); -void t3_mac_disable_exact_filters(struct cmac *mac); -void t3_mac_enable_exact_filters(struct cmac *mac); -int t3_mac_enable(struct cmac *mac, int which); -int t3_mac_disable(struct cmac *mac, int which); -int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu); -int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev); -int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]); -int t3_mac_set_num_ucast(struct cmac *mac, int n); -const struct mac_stats *t3_mac_update_stats(struct cmac *mac); -int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc); -int t3b2_mac_watchdog_task(struct cmac *mac); - -void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode); -int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters, - unsigned int nroutes); -void t3_mc5_intr_handler(struct mc5 *mc5); - -void t3_tp_set_offload_mode(struct adapter *adap, int enable); -void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps); -void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS], - unsigned short alpha[NCCTRL_WIN], - unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap); -void t3_config_trace_filter(struct adapter *adapter, - const struct trace_params *tp, int filter_index, - int invert, int enable); -int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched); - -void t3_sge_prep(struct adapter *adap, struct sge_params *p); -void t3_sge_init(struct adapter *adap, struct sge_params *p); -int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable, - enum sge_context_type type, int respq, u64 base_addr, - unsigned int size, unsigned int token, int gen, - unsigned int cidx); -int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id, - int gts_enable, u64 base_addr, unsigned int size, - unsigned int esize, unsigned int cong_thres, int gen, - unsigned int cidx); -int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id, - int irq_vec_idx, u64 base_addr, unsigned int size, - unsigned int fl_thres, int gen, unsigned int cidx); -int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr, - unsigned int size, int rspq, int ovfl_mode, - unsigned int credits, unsigned int credit_thres); -int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable); -int t3_sge_disable_fl(struct adapter *adapter, unsigned int id); -int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id); -int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id); -int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op, - unsigned int credits); - -int t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter, - int phy_addr, const struct mdio_ops *mdio_ops); -int t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter, - int phy_addr, const struct mdio_ops *mdio_ops); -int t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter, - int phy_addr, const struct mdio_ops *mdio_ops); -int t3_ael2005_phy_prep(struct cphy *phy, struct adapter *adapter, - int phy_addr, const struct mdio_ops *mdio_ops); -int t3_ael2020_phy_prep(struct cphy *phy, struct adapter *adapter, - int phy_addr, const struct mdio_ops *mdio_ops); -int t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr, - const struct mdio_ops *mdio_ops); -int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter, - int phy_addr, const struct mdio_ops *mdio_ops); -int t3_aq100x_phy_prep(struct cphy *phy, struct adapter *adapter, - int phy_addr, const struct mdio_ops *mdio_ops); -#endif /* __CHELSIO_COMMON_H */ diff --git a/drivers/net/cxgb3/cxgb3_ctl_defs.h b/drivers/net/cxgb3/cxgb3_ctl_defs.h deleted file mode 100644 index 369fe71..0000000 --- a/drivers/net/cxgb3/cxgb3_ctl_defs.h +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef _CXGB3_OFFLOAD_CTL_DEFS_H -#define _CXGB3_OFFLOAD_CTL_DEFS_H - -enum { - GET_MAX_OUTSTANDING_WR = 0, - GET_TX_MAX_CHUNK = 1, - GET_TID_RANGE = 2, - GET_STID_RANGE = 3, - GET_RTBL_RANGE = 4, - GET_L2T_CAPACITY = 5, - GET_MTUS = 6, - GET_WR_LEN = 7, - GET_IFF_FROM_MAC = 8, - GET_DDP_PARAMS = 9, - GET_PORTS = 10, - - ULP_ISCSI_GET_PARAMS = 11, - ULP_ISCSI_SET_PARAMS = 12, - - RDMA_GET_PARAMS = 13, - RDMA_CQ_OP = 14, - RDMA_CQ_SETUP = 15, - RDMA_CQ_DISABLE = 16, - RDMA_CTRL_QP_SETUP = 17, - RDMA_GET_MEM = 18, - RDMA_GET_MIB = 19, - - GET_RX_PAGE_INFO = 50, - GET_ISCSI_IPV4ADDR = 51, - - GET_EMBEDDED_INFO = 70, -}; - -/* - * Structure used to describe a TID range. Valid TIDs are [base, base+num). - */ -struct tid_range { - unsigned int base; /* first TID */ - unsigned int num; /* number of TIDs in range */ -}; - -/* - * Structure used to request the size and contents of the MTU table. - */ -struct mtutab { - unsigned int size; /* # of entries in the MTU table */ - const unsigned short *mtus; /* the MTU table values */ -}; - -struct net_device; - -/* - * Structure used to request the adapter net_device owning a given MAC address. - */ -struct iff_mac { - struct net_device *dev; /* the net_device */ - const unsigned char *mac_addr; /* MAC address to lookup */ - u16 vlan_tag; -}; - -/* Structure used to request a port's iSCSI IPv4 address */ -struct iscsi_ipv4addr { - struct net_device *dev; /* the net_device */ - __be32 ipv4addr; /* the return iSCSI IPv4 address */ -}; - -struct pci_dev; - -/* - * Structure used to request the TCP DDP parameters. - */ -struct ddp_params { - unsigned int llimit; /* TDDP region start address */ - unsigned int ulimit; /* TDDP region end address */ - unsigned int tag_mask; /* TDDP tag mask */ - struct pci_dev *pdev; -}; - -struct adap_ports { - unsigned int nports; /* number of ports on this adapter */ - struct net_device *lldevs[2]; -}; - -/* - * Structure used to return information to the iscsi layer. - */ -struct ulp_iscsi_info { - unsigned int offset; - unsigned int llimit; - unsigned int ulimit; - unsigned int tagmask; - u8 pgsz_factor[4]; - unsigned int max_rxsz; - unsigned int max_txsz; - struct pci_dev *pdev; -}; - -/* - * Structure used to return information to the RDMA layer. - */ -struct rdma_info { - unsigned int tpt_base; /* TPT base address */ - unsigned int tpt_top; /* TPT last entry address */ - unsigned int pbl_base; /* PBL base address */ - unsigned int pbl_top; /* PBL last entry address */ - unsigned int rqt_base; /* RQT base address */ - unsigned int rqt_top; /* RQT last entry address */ - unsigned int udbell_len; /* user doorbell region length */ - unsigned long udbell_physbase; /* user doorbell physical start addr */ - void __iomem *kdb_addr; /* kernel doorbell register address */ - struct pci_dev *pdev; /* associated PCI device */ -}; - -/* - * Structure used to request an operation on an RDMA completion queue. - */ -struct rdma_cq_op { - unsigned int id; - unsigned int op; - unsigned int credits; -}; - -/* - * Structure used to setup RDMA completion queues. - */ -struct rdma_cq_setup { - unsigned int id; - unsigned long long base_addr; - unsigned int size; - unsigned int credits; - unsigned int credit_thres; - unsigned int ovfl_mode; -}; - -/* - * Structure used to setup the RDMA control egress context. - */ -struct rdma_ctrlqp_setup { - unsigned long long base_addr; - unsigned int size; -}; - -/* - * Offload TX/RX page information. - */ -struct ofld_page_info { - unsigned int page_size; /* Page size, should be a power of 2 */ - unsigned int num; /* Number of pages */ -}; - -/* - * Structure used to get firmware and protocol engine versions. - */ -struct ch_embedded_info { - u32 fw_vers; - u32 tp_vers; -}; -#endif /* _CXGB3_OFFLOAD_CTL_DEFS_H */ diff --git a/drivers/net/cxgb3/cxgb3_defs.h b/drivers/net/cxgb3/cxgb3_defs.h deleted file mode 100644 index 920d918..0000000 --- a/drivers/net/cxgb3/cxgb3_defs.h +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef _CHELSIO_DEFS_H -#define _CHELSIO_DEFS_H - -#include -#include - -#include "t3cdev.h" - -#include "cxgb3_offload.h" - -#define VALIDATE_TID 1 - -void *cxgb_alloc_mem(unsigned long size); -void cxgb_free_mem(void *addr); - -/* - * Map an ATID or STID to their entries in the corresponding TID tables. - */ -static inline union active_open_entry *atid2entry(const struct tid_info *t, - unsigned int atid) -{ - return &t->atid_tab[atid - t->atid_base]; -} - -static inline union listen_entry *stid2entry(const struct tid_info *t, - unsigned int stid) -{ - return &t->stid_tab[stid - t->stid_base]; -} - -/* - * Find the connection corresponding to a TID. - */ -static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t, - unsigned int tid) -{ - struct t3c_tid_entry *t3c_tid = tid < t->ntids ? - &(t->tid_tab[tid]) : NULL; - - return (t3c_tid && t3c_tid->client) ? t3c_tid : NULL; -} - -/* - * Find the connection corresponding to a server TID. - */ -static inline struct t3c_tid_entry *lookup_stid(const struct tid_info *t, - unsigned int tid) -{ - union listen_entry *e; - - if (tid < t->stid_base || tid >= t->stid_base + t->nstids) - return NULL; - - e = stid2entry(t, tid); - if ((void *)e->next >= (void *)t->tid_tab && - (void *)e->next < (void *)&t->atid_tab[t->natids]) - return NULL; - - return &e->t3c_tid; -} - -/* - * Find the connection corresponding to an active-open TID. - */ -static inline struct t3c_tid_entry *lookup_atid(const struct tid_info *t, - unsigned int tid) -{ - union active_open_entry *e; - - if (tid < t->atid_base || tid >= t->atid_base + t->natids) - return NULL; - - e = atid2entry(t, tid); - if ((void *)e->next >= (void *)t->tid_tab && - (void *)e->next < (void *)&t->atid_tab[t->natids]) - return NULL; - - return &e->t3c_tid; -} - -int attach_t3cdev(struct t3cdev *dev); -void detach_t3cdev(struct t3cdev *dev); -#endif diff --git a/drivers/net/cxgb3/cxgb3_ioctl.h b/drivers/net/cxgb3/cxgb3_ioctl.h deleted file mode 100644 index b19e437..0000000 --- a/drivers/net/cxgb3/cxgb3_ioctl.h +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef __CHIOCTL_H__ -#define __CHIOCTL_H__ - -/* - * Ioctl commands specific to this driver. - */ -enum { - CHELSIO_GETMTUTAB = 1029, - CHELSIO_SETMTUTAB = 1030, - CHELSIO_SET_PM = 1032, - CHELSIO_GET_PM = 1033, - CHELSIO_GET_MEM = 1038, - CHELSIO_LOAD_FW = 1041, - CHELSIO_SET_TRACE_FILTER = 1044, - CHELSIO_SET_QSET_PARAMS = 1045, - CHELSIO_GET_QSET_PARAMS = 1046, - CHELSIO_SET_QSET_NUM = 1047, - CHELSIO_GET_QSET_NUM = 1048, -}; - -struct ch_reg { - uint32_t cmd; - uint32_t addr; - uint32_t val; -}; - -struct ch_cntxt { - uint32_t cmd; - uint32_t cntxt_type; - uint32_t cntxt_id; - uint32_t data[4]; -}; - -/* context types */ -enum { CNTXT_TYPE_EGRESS, CNTXT_TYPE_FL, CNTXT_TYPE_RSP, CNTXT_TYPE_CQ }; - -struct ch_desc { - uint32_t cmd; - uint32_t queue_num; - uint32_t idx; - uint32_t size; - uint8_t data[128]; -}; - -struct ch_mem_range { - uint32_t cmd; - uint32_t mem_id; - uint32_t addr; - uint32_t len; - uint32_t version; - uint8_t buf[0]; -}; - -struct ch_qset_params { - uint32_t cmd; - uint32_t qset_idx; - int32_t txq_size[3]; - int32_t rspq_size; - int32_t fl_size[2]; - int32_t intr_lat; - int32_t polling; - int32_t lro; - int32_t cong_thres; - int32_t vector; - int32_t qnum; -}; - -struct ch_pktsched_params { - uint32_t cmd; - uint8_t sched; - uint8_t idx; - uint8_t min; - uint8_t max; - uint8_t binding; -}; - -#ifndef TCB_SIZE -# define TCB_SIZE 128 -#endif - -/* TCB size in 32-bit words */ -#define TCB_WORDS (TCB_SIZE / 4) - -enum { MEM_CM, MEM_PMRX, MEM_PMTX }; /* ch_mem_range.mem_id values */ - -struct ch_mtus { - uint32_t cmd; - uint32_t nmtus; - uint16_t mtus[NMTUS]; -}; - -struct ch_pm { - uint32_t cmd; - uint32_t tx_pg_sz; - uint32_t tx_num_pg; - uint32_t rx_pg_sz; - uint32_t rx_num_pg; - uint32_t pm_total; -}; - -struct ch_tcam { - uint32_t cmd; - uint32_t tcam_size; - uint32_t nservers; - uint32_t nroutes; - uint32_t nfilters; -}; - -struct ch_tcb { - uint32_t cmd; - uint32_t tcb_index; - uint32_t tcb_data[TCB_WORDS]; -}; - -struct ch_tcam_word { - uint32_t cmd; - uint32_t addr; - uint32_t buf[3]; -}; - -struct ch_trace { - uint32_t cmd; - uint32_t sip; - uint32_t sip_mask; - uint32_t dip; - uint32_t dip_mask; - uint16_t sport; - uint16_t sport_mask; - uint16_t dport; - uint16_t dport_mask; - uint32_t vlan:12; - uint32_t vlan_mask:12; - uint32_t intf:4; - uint32_t intf_mask:4; - uint8_t proto; - uint8_t proto_mask; - uint8_t invert_match:1; - uint8_t config_tx:1; - uint8_t config_rx:1; - uint8_t trace_tx:1; - uint8_t trace_rx:1; -}; - -#define SIOCCHIOCTL SIOCDEVPRIVATE - -#endif diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c deleted file mode 100644 index 93b41a7..0000000 --- a/drivers/net/cxgb3/cxgb3_main.c +++ /dev/null @@ -1,3448 +0,0 @@ -/* - * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "common.h" -#include "cxgb3_ioctl.h" -#include "regs.h" -#include "cxgb3_offload.h" -#include "version.h" - -#include "cxgb3_ctl_defs.h" -#include "t3_cpl.h" -#include "firmware_exports.h" - -enum { - MAX_TXQ_ENTRIES = 16384, - MAX_CTRL_TXQ_ENTRIES = 1024, - MAX_RSPQ_ENTRIES = 16384, - MAX_RX_BUFFERS = 16384, - MAX_RX_JUMBO_BUFFERS = 16384, - MIN_TXQ_ENTRIES = 4, - MIN_CTRL_TXQ_ENTRIES = 4, - MIN_RSPQ_ENTRIES = 32, - MIN_FL_ENTRIES = 32 -}; - -#define PORT_MASK ((1 << MAX_NPORTS) - 1) - -#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ - NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ - NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) - -#define EEPROM_MAGIC 0x38E2F10C - -#define CH_DEVICE(devid, idx) \ - { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx } - -static DEFINE_PCI_DEVICE_TABLE(cxgb3_pci_tbl) = { - CH_DEVICE(0x20, 0), /* PE9000 */ - CH_DEVICE(0x21, 1), /* T302E */ - CH_DEVICE(0x22, 2), /* T310E */ - CH_DEVICE(0x23, 3), /* T320X */ - CH_DEVICE(0x24, 1), /* T302X */ - CH_DEVICE(0x25, 3), /* T320E */ - CH_DEVICE(0x26, 2), /* T310X */ - CH_DEVICE(0x30, 2), /* T3B10 */ - CH_DEVICE(0x31, 3), /* T3B20 */ - CH_DEVICE(0x32, 1), /* T3B02 */ - CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */ - CH_DEVICE(0x36, 3), /* S320E-CR */ - CH_DEVICE(0x37, 7), /* N320E-G2 */ - {0,} -}; - -MODULE_DESCRIPTION(DRV_DESC); -MODULE_AUTHOR("Chelsio Communications"); -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_VERSION(DRV_VERSION); -MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl); - -static int dflt_msg_enable = DFLT_MSG_ENABLE; - -module_param(dflt_msg_enable, int, 0644); -MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap"); - -/* - * The driver uses the best interrupt scheme available on a platform in the - * order MSI-X, MSI, legacy pin interrupts. This parameter determines which - * of these schemes the driver may consider as follows: - * - * msi = 2: choose from among all three options - * msi = 1: only consider MSI and pin interrupts - * msi = 0: force pin interrupts - */ -static int msi = 2; - -module_param(msi, int, 0644); -MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X"); - -/* - * The driver enables offload as a default. - * To disable it, use ofld_disable = 1. - */ - -static int ofld_disable = 0; - -module_param(ofld_disable, int, 0644); -MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not"); - -/* - * We have work elements that we need to cancel when an interface is taken - * down. Normally the work elements would be executed by keventd but that - * can deadlock because of linkwatch. If our close method takes the rtnl - * lock and linkwatch is ahead of our work elements in keventd, linkwatch - * will block keventd as it needs the rtnl lock, and we'll deadlock waiting - * for our work to complete. Get our own work queue to solve this. - */ -struct workqueue_struct *cxgb3_wq; - -/** - * link_report - show link status and link speed/duplex - * @p: the port whose settings are to be reported - * - * Shows the link status, speed, and duplex of a port. - */ -static void link_report(struct net_device *dev) -{ - if (!netif_carrier_ok(dev)) - printk(KERN_INFO "%s: link down\n", dev->name); - else { - const char *s = "10Mbps"; - const struct port_info *p = netdev_priv(dev); - - switch (p->link_config.speed) { - case SPEED_10000: - s = "10Gbps"; - break; - case SPEED_1000: - s = "1000Mbps"; - break; - case SPEED_100: - s = "100Mbps"; - break; - } - - printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s, - p->link_config.duplex == DUPLEX_FULL ? "full" : "half"); - } -} - -static void enable_tx_fifo_drain(struct adapter *adapter, - struct port_info *pi) -{ - t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0, - F_ENDROPPKT); - t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0); - t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN); - t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN); -} - -static void disable_tx_fifo_drain(struct adapter *adapter, - struct port_info *pi) -{ - t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, - F_ENDROPPKT, 0); -} - -void t3_os_link_fault(struct adapter *adap, int port_id, int state) -{ - struct net_device *dev = adap->port[port_id]; - struct port_info *pi = netdev_priv(dev); - - if (state == netif_carrier_ok(dev)) - return; - - if (state) { - struct cmac *mac = &pi->mac; - - netif_carrier_on(dev); - - disable_tx_fifo_drain(adap, pi); - - /* Clear local faults */ - t3_xgm_intr_disable(adap, pi->port_id); - t3_read_reg(adap, A_XGM_INT_STATUS + - pi->mac.offset); - t3_write_reg(adap, - A_XGM_INT_CAUSE + pi->mac.offset, - F_XGM_INT); - - t3_set_reg_field(adap, - A_XGM_INT_ENABLE + - pi->mac.offset, - F_XGM_INT, F_XGM_INT); - t3_xgm_intr_enable(adap, pi->port_id); - - t3_mac_enable(mac, MAC_DIRECTION_TX); - } else { - netif_carrier_off(dev); - - /* Flush TX FIFO */ - enable_tx_fifo_drain(adap, pi); - } - link_report(dev); -} - -/** - * t3_os_link_changed - handle link status changes - * @adapter: the adapter associated with the link change - * @port_id: the port index whose limk status has changed - * @link_stat: the new status of the link - * @speed: the new speed setting - * @duplex: the new duplex setting - * @pause: the new flow-control setting - * - * This is the OS-dependent handler for link status changes. The OS - * neutral handler takes care of most of the processing for these events, - * then calls this handler for any OS-specific processing. - */ -void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat, - int speed, int duplex, int pause) -{ - struct net_device *dev = adapter->port[port_id]; - struct port_info *pi = netdev_priv(dev); - struct cmac *mac = &pi->mac; - - /* Skip changes from disabled ports. */ - if (!netif_running(dev)) - return; - - if (link_stat != netif_carrier_ok(dev)) { - if (link_stat) { - disable_tx_fifo_drain(adapter, pi); - - t3_mac_enable(mac, MAC_DIRECTION_RX); - - /* Clear local faults */ - t3_xgm_intr_disable(adapter, pi->port_id); - t3_read_reg(adapter, A_XGM_INT_STATUS + - pi->mac.offset); - t3_write_reg(adapter, - A_XGM_INT_CAUSE + pi->mac.offset, - F_XGM_INT); - - t3_set_reg_field(adapter, - A_XGM_INT_ENABLE + pi->mac.offset, - F_XGM_INT, F_XGM_INT); - t3_xgm_intr_enable(adapter, pi->port_id); - - netif_carrier_on(dev); - } else { - netif_carrier_off(dev); - - t3_xgm_intr_disable(adapter, pi->port_id); - t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset); - t3_set_reg_field(adapter, - A_XGM_INT_ENABLE + pi->mac.offset, - F_XGM_INT, 0); - - if (is_10G(adapter)) - pi->phy.ops->power_down(&pi->phy, 1); - - t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset); - t3_mac_disable(mac, MAC_DIRECTION_RX); - t3_link_start(&pi->phy, mac, &pi->link_config); - - /* Flush TX FIFO */ - enable_tx_fifo_drain(adapter, pi); - } - - link_report(dev); - } -} - -/** - * t3_os_phymod_changed - handle PHY module changes - * @phy: the PHY reporting the module change - * @mod_type: new module type - * - * This is the OS-dependent handler for PHY module changes. It is - * invoked when a PHY module is removed or inserted for any OS-specific - * processing. - */ -void t3_os_phymod_changed(struct adapter *adap, int port_id) -{ - static const char *mod_str[] = { - NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown" - }; - - const struct net_device *dev = adap->port[port_id]; - const struct port_info *pi = netdev_priv(dev); - - if (pi->phy.modtype == phy_modtype_none) - printk(KERN_INFO "%s: PHY module unplugged\n", dev->name); - else - printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name, - mod_str[pi->phy.modtype]); -} - -static void cxgb_set_rxmode(struct net_device *dev) -{ - struct port_info *pi = netdev_priv(dev); - - t3_mac_set_rx_mode(&pi->mac, dev); -} - -/** - * link_start - enable a port - * @dev: the device to enable - * - * Performs the MAC and PHY actions needed to enable a port. - */ -static void link_start(struct net_device *dev) -{ - struct port_info *pi = netdev_priv(dev); - struct cmac *mac = &pi->mac; - - t3_mac_reset(mac); - t3_mac_set_num_ucast(mac, MAX_MAC_IDX); - t3_mac_set_mtu(mac, dev->mtu); - t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr); - t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr); - t3_mac_set_rx_mode(mac, dev); - t3_link_start(&pi->phy, mac, &pi->link_config); - t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); -} - -static inline void cxgb_disable_msi(struct adapter *adapter) -{ - if (adapter->flags & USING_MSIX) { - pci_disable_msix(adapter->pdev); - adapter->flags &= ~USING_MSIX; - } else if (adapter->flags & USING_MSI) { - pci_disable_msi(adapter->pdev); - adapter->flags &= ~USING_MSI; - } -} - -/* - * Interrupt handler for asynchronous events used with MSI-X. - */ -static irqreturn_t t3_async_intr_handler(int irq, void *cookie) -{ - t3_slow_intr_handler(cookie); - return IRQ_HANDLED; -} - -/* - * Name the MSI-X interrupts. - */ -static void name_msix_vecs(struct adapter *adap) -{ - int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1; - - snprintf(adap->msix_info[0].desc, n, "%s", adap->name); - adap->msix_info[0].desc[n] = 0; - - for_each_port(adap, j) { - struct net_device *d = adap->port[j]; - const struct port_info *pi = netdev_priv(d); - - for (i = 0; i < pi->nqsets; i++, msi_idx++) { - snprintf(adap->msix_info[msi_idx].desc, n, - "%s-%d", d->name, pi->first_qset + i); - adap->msix_info[msi_idx].desc[n] = 0; - } - } -} - -static int request_msix_data_irqs(struct adapter *adap) -{ - int i, j, err, qidx = 0; - - for_each_port(adap, i) { - int nqsets = adap2pinfo(adap, i)->nqsets; - - for (j = 0; j < nqsets; ++j) { - err = request_irq(adap->msix_info[qidx + 1].vec, - t3_intr_handler(adap, - adap->sge.qs[qidx]. - rspq.polling), 0, - adap->msix_info[qidx + 1].desc, - &adap->sge.qs[qidx]); - if (err) { - while (--qidx >= 0) - free_irq(adap->msix_info[qidx + 1].vec, - &adap->sge.qs[qidx]); - return err; - } - qidx++; - } - } - return 0; -} - -static void free_irq_resources(struct adapter *adapter) -{ - if (adapter->flags & USING_MSIX) { - int i, n = 0; - - free_irq(adapter->msix_info[0].vec, adapter); - for_each_port(adapter, i) - n += adap2pinfo(adapter, i)->nqsets; - - for (i = 0; i < n; ++i) - free_irq(adapter->msix_info[i + 1].vec, - &adapter->sge.qs[i]); - } else - free_irq(adapter->pdev->irq, adapter); -} - -static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt, - unsigned long n) -{ - int attempts = 10; - - while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) { - if (!--attempts) - return -ETIMEDOUT; - msleep(10); - } - return 0; -} - -static int init_tp_parity(struct adapter *adap) -{ - int i; - struct sk_buff *skb; - struct cpl_set_tcb_field *greq; - unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts; - - t3_tp_set_offload_mode(adap, 1); - - for (i = 0; i < 16; i++) { - struct cpl_smt_write_req *req; - - skb = alloc_skb(sizeof(*req), GFP_KERNEL); - if (!skb) - skb = adap->nofail_skb; - if (!skb) - goto alloc_skb_fail; - - req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req)); - memset(req, 0, sizeof(*req)); - req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); - OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i)); - req->mtu_idx = NMTUS - 1; - req->iff = i; - t3_mgmt_tx(adap, skb); - if (skb == adap->nofail_skb) { - await_mgmt_replies(adap, cnt, i + 1); - adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL); - if (!adap->nofail_skb) - goto alloc_skb_fail; - } - } - - for (i = 0; i < 2048; i++) { - struct cpl_l2t_write_req *req; - - skb = alloc_skb(sizeof(*req), GFP_KERNEL); - if (!skb) - skb = adap->nofail_skb; - if (!skb) - goto alloc_skb_fail; - - req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req)); - memset(req, 0, sizeof(*req)); - req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); - OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i)); - req->params = htonl(V_L2T_W_IDX(i)); - t3_mgmt_tx(adap, skb); - if (skb == adap->nofail_skb) { - await_mgmt_replies(adap, cnt, 16 + i + 1); - adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL); - if (!adap->nofail_skb) - goto alloc_skb_fail; - } - } - - for (i = 0; i < 2048; i++) { - struct cpl_rte_write_req *req; - - skb = alloc_skb(sizeof(*req), GFP_KERNEL); - if (!skb) - skb = adap->nofail_skb; - if (!skb) - goto alloc_skb_fail; - - req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req)); - memset(req, 0, sizeof(*req)); - req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); - OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i)); - req->l2t_idx = htonl(V_L2T_W_IDX(i)); - t3_mgmt_tx(adap, skb); - if (skb == adap->nofail_skb) { - await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1); - adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL); - if (!adap->nofail_skb) - goto alloc_skb_fail; - } - } - - skb = alloc_skb(sizeof(*greq), GFP_KERNEL); - if (!skb) - skb = adap->nofail_skb; - if (!skb) - goto alloc_skb_fail; - - greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq)); - memset(greq, 0, sizeof(*greq)); - greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); - OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0)); - greq->mask = cpu_to_be64(1); - t3_mgmt_tx(adap, skb); - - i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1); - if (skb == adap->nofail_skb) { - i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1); - adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL); - } - - t3_tp_set_offload_mode(adap, 0); - return i; - -alloc_skb_fail: - t3_tp_set_offload_mode(adap, 0); - return -ENOMEM; -} - -/** - * setup_rss - configure RSS - * @adap: the adapter - * - * Sets up RSS to distribute packets to multiple receive queues. We - * configure the RSS CPU lookup table to distribute to the number of HW - * receive queues, and the response queue lookup table to narrow that - * down to the response queues actually configured for each port. - * We always configure the RSS mapping for two ports since the mapping - * table has plenty of entries. - */ -static void setup_rss(struct adapter *adap) -{ - int i; - unsigned int nq0 = adap2pinfo(adap, 0)->nqsets; - unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1; - u8 cpus[SGE_QSETS + 1]; - u16 rspq_map[RSS_TABLE_SIZE]; - - for (i = 0; i < SGE_QSETS; ++i) - cpus[i] = i; - cpus[SGE_QSETS] = 0xff; /* terminator */ - - for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) { - rspq_map[i] = i % nq0; - rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0; - } - - t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN | - F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | - V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map); -} - -static void ring_dbs(struct adapter *adap) -{ - int i, j; - - for (i = 0; i < SGE_QSETS; i++) { - struct sge_qset *qs = &adap->sge.qs[i]; - - if (qs->adap) - for (j = 0; j < SGE_TXQ_PER_SET; j++) - t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id)); - } -} - -static void init_napi(struct adapter *adap) -{ - int i; - - for (i = 0; i < SGE_QSETS; i++) { - struct sge_qset *qs = &adap->sge.qs[i]; - - if (qs->adap) - netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll, - 64); - } - - /* - * netif_napi_add() can be called only once per napi_struct because it - * adds each new napi_struct to a list. Be careful not to call it a - * second time, e.g., during EEH recovery, by making a note of it. - */ - adap->flags |= NAPI_INIT; -} - -/* - * Wait until all NAPI handlers are descheduled. This includes the handlers of - * both netdevices representing interfaces and the dummy ones for the extra - * queues. - */ -static void quiesce_rx(struct adapter *adap) -{ - int i; - - for (i = 0; i < SGE_QSETS; i++) - if (adap->sge.qs[i].adap) - napi_disable(&adap->sge.qs[i].napi); -} - -static void enable_all_napi(struct adapter *adap) -{ - int i; - for (i = 0; i < SGE_QSETS; i++) - if (adap->sge.qs[i].adap) - napi_enable(&adap->sge.qs[i].napi); -} - -/** - * setup_sge_qsets - configure SGE Tx/Rx/response queues - * @adap: the adapter - * - * Determines how many sets of SGE queues to use and initializes them. - * We support multiple queue sets per port if we have MSI-X, otherwise - * just one queue set per port. - */ -static int setup_sge_qsets(struct adapter *adap) -{ - int i, j, err, irq_idx = 0, qset_idx = 0; - unsigned int ntxq = SGE_TXQ_PER_SET; - - if (adap->params.rev > 0 && !(adap->flags & USING_MSI)) - irq_idx = -1; - - for_each_port(adap, i) { - struct net_device *dev = adap->port[i]; - struct port_info *pi = netdev_priv(dev); - - pi->qs = &adap->sge.qs[pi->first_qset]; - for (j = 0; j < pi->nqsets; ++j, ++qset_idx) { - err = t3_sge_alloc_qset(adap, qset_idx, 1, - (adap->flags & USING_MSIX) ? qset_idx + 1 : - irq_idx, - &adap->params.sge.qset[qset_idx], ntxq, dev, - netdev_get_tx_queue(dev, j)); - if (err) { - t3_free_sge_resources(adap); - return err; - } - } - } - - return 0; -} - -static ssize_t attr_show(struct device *d, char *buf, - ssize_t(*format) (struct net_device *, char *)) -{ - ssize_t len; - - /* Synchronize with ioctls that may shut down the device */ - rtnl_lock(); - len = (*format) (to_net_dev(d), buf); - rtnl_unlock(); - return len; -} - -static ssize_t attr_store(struct device *d, - const char *buf, size_t len, - ssize_t(*set) (struct net_device *, unsigned int), - unsigned int min_val, unsigned int max_val) -{ - char *endp; - ssize_t ret; - unsigned int val; - - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - val = simple_strtoul(buf, &endp, 0); - if (endp == buf || val < min_val || val > max_val) - return -EINVAL; - - rtnl_lock(); - ret = (*set) (to_net_dev(d), val); - if (!ret) - ret = len; - rtnl_unlock(); - return ret; -} - -#define CXGB3_SHOW(name, val_expr) \ -static ssize_t format_##name(struct net_device *dev, char *buf) \ -{ \ - struct port_info *pi = netdev_priv(dev); \ - struct adapter *adap = pi->adapter; \ - return sprintf(buf, "%u\n", val_expr); \ -} \ -static ssize_t show_##name(struct device *d, struct device_attribute *attr, \ - char *buf) \ -{ \ - return attr_show(d, buf, format_##name); \ -} - -static ssize_t set_nfilters(struct net_device *dev, unsigned int val) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adap = pi->adapter; - int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0; - - if (adap->flags & FULL_INIT_DONE) - return -EBUSY; - if (val && adap->params.rev == 0) - return -EINVAL; - if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers - - min_tids) - return -EINVAL; - adap->params.mc5.nfilters = val; - return 0; -} - -static ssize_t store_nfilters(struct device *d, struct device_attribute *attr, - const char *buf, size_t len) -{ - return attr_store(d, buf, len, set_nfilters, 0, ~0); -} - -static ssize_t set_nservers(struct net_device *dev, unsigned int val) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adap = pi->adapter; - - if (adap->flags & FULL_INIT_DONE) - return -EBUSY; - if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters - - MC5_MIN_TIDS) - return -EINVAL; - adap->params.mc5.nservers = val; - return 0; -} - -static ssize_t store_nservers(struct device *d, struct device_attribute *attr, - const char *buf, size_t len) -{ - return attr_store(d, buf, len, set_nservers, 0, ~0); -} - -#define CXGB3_ATTR_R(name, val_expr) \ -CXGB3_SHOW(name, val_expr) \ -static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) - -#define CXGB3_ATTR_RW(name, val_expr, store_method) \ -CXGB3_SHOW(name, val_expr) \ -static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method) - -CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5)); -CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters); -CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers); - -static struct attribute *cxgb3_attrs[] = { - &dev_attr_cam_size.attr, - &dev_attr_nfilters.attr, - &dev_attr_nservers.attr, - NULL -}; - -static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs }; - -static ssize_t tm_attr_show(struct device *d, - char *buf, int sched) -{ - struct port_info *pi = netdev_priv(to_net_dev(d)); - struct adapter *adap = pi->adapter; - unsigned int v, addr, bpt, cpt; - ssize_t len; - - addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2; - rtnl_lock(); - t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr); - v = t3_read_reg(adap, A_TP_TM_PIO_DATA); - if (sched & 1) - v >>= 16; - bpt = (v >> 8) & 0xff; - cpt = v & 0xff; - if (!cpt) - len = sprintf(buf, "disabled\n"); - else { - v = (adap->params.vpd.cclk * 1000) / cpt; - len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125); - } - rtnl_unlock(); - return len; -} - -static ssize_t tm_attr_store(struct device *d, - const char *buf, size_t len, int sched) -{ - struct port_info *pi = netdev_priv(to_net_dev(d)); - struct adapter *adap = pi->adapter; - unsigned int val; - char *endp; - ssize_t ret; - - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - val = simple_strtoul(buf, &endp, 0); - if (endp == buf || val > 10000000) - return -EINVAL; - - rtnl_lock(); - ret = t3_config_sched(adap, val, sched); - if (!ret) - ret = len; - rtnl_unlock(); - return ret; -} - -#define TM_ATTR(name, sched) \ -static ssize_t show_##name(struct device *d, struct device_attribute *attr, \ - char *buf) \ -{ \ - return tm_attr_show(d, buf, sched); \ -} \ -static ssize_t store_##name(struct device *d, struct device_attribute *attr, \ - const char *buf, size_t len) \ -{ \ - return tm_attr_store(d, buf, len, sched); \ -} \ -static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name) - -TM_ATTR(sched0, 0); -TM_ATTR(sched1, 1); -TM_ATTR(sched2, 2); -TM_ATTR(sched3, 3); -TM_ATTR(sched4, 4); -TM_ATTR(sched5, 5); -TM_ATTR(sched6, 6); -TM_ATTR(sched7, 7); - -static struct attribute *offload_attrs[] = { - &dev_attr_sched0.attr, - &dev_attr_sched1.attr, - &dev_attr_sched2.attr, - &dev_attr_sched3.attr, - &dev_attr_sched4.attr, - &dev_attr_sched5.attr, - &dev_attr_sched6.attr, - &dev_attr_sched7.attr, - NULL -}; - -static struct attribute_group offload_attr_group = {.attrs = offload_attrs }; - -/* - * Sends an sk_buff to an offload queue driver - * after dealing with any active network taps. - */ -static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb) -{ - int ret; - - local_bh_disable(); - ret = t3_offload_tx(tdev, skb); - local_bh_enable(); - return ret; -} - -static int write_smt_entry(struct adapter *adapter, int idx) -{ - struct cpl_smt_write_req *req; - struct port_info *pi = netdev_priv(adapter->port[idx]); - struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL); - - if (!skb) - return -ENOMEM; - - req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req)); - req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); - OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx)); - req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */ - req->iff = idx; - memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN); - memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN); - skb->priority = 1; - offload_tx(&adapter->tdev, skb); - return 0; -} - -static int init_smt(struct adapter *adapter) -{ - int i; - - for_each_port(adapter, i) - write_smt_entry(adapter, i); - return 0; -} - -static void init_port_mtus(struct adapter *adapter) -{ - unsigned int mtus = adapter->port[0]->mtu; - - if (adapter->port[1]) - mtus |= adapter->port[1]->mtu << 16; - t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus); -} - -static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo, - int hi, int port) -{ - struct sk_buff *skb; - struct mngt_pktsched_wr *req; - int ret; - - skb = alloc_skb(sizeof(*req), GFP_KERNEL); - if (!skb) - skb = adap->nofail_skb; - if (!skb) - return -ENOMEM; - - req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req)); - req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT)); - req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET; - req->sched = sched; - req->idx = qidx; - req->min = lo; - req->max = hi; - req->binding = port; - ret = t3_mgmt_tx(adap, skb); - if (skb == adap->nofail_skb) { - adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field), - GFP_KERNEL); - if (!adap->nofail_skb) - ret = -ENOMEM; - } - - return ret; -} - -static int bind_qsets(struct adapter *adap) -{ - int i, j, err = 0; - - for_each_port(adap, i) { - const struct port_info *pi = adap2pinfo(adap, i); - - for (j = 0; j < pi->nqsets; ++j) { - int ret = send_pktsched_cmd(adap, 1, - pi->first_qset + j, -1, - -1, i); - if (ret) - err = ret; - } - } - - return err; -} - -#define FW_VERSION __stringify(FW_VERSION_MAJOR) "." \ - __stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO) -#define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin" -#define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "." \ - __stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO) -#define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin" -#define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin" -#define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin" -#define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin" -MODULE_FIRMWARE(FW_FNAME); -MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin"); -MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin"); -MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME); -MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME); -MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME); - -static inline const char *get_edc_fw_name(int edc_idx) -{ - const char *fw_name = NULL; - - switch (edc_idx) { - case EDC_OPT_AEL2005: - fw_name = AEL2005_OPT_EDC_NAME; - break; - case EDC_TWX_AEL2005: - fw_name = AEL2005_TWX_EDC_NAME; - break; - case EDC_TWX_AEL2020: - fw_name = AEL2020_TWX_EDC_NAME; - break; - } - return fw_name; -} - -int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size) -{ - struct adapter *adapter = phy->adapter; - const struct firmware *fw; - char buf[64]; - u32 csum; - const __be32 *p; - u16 *cache = phy->phy_cache; - int i, ret; - - snprintf(buf, sizeof(buf), get_edc_fw_name(edc_idx)); - - ret = request_firmware(&fw, buf, &adapter->pdev->dev); - if (ret < 0) { - dev_err(&adapter->pdev->dev, - "could not upgrade firmware: unable to load %s\n", - buf); - return ret; - } - - /* check size, take checksum in account */ - if (fw->size > size + 4) { - CH_ERR(adapter, "firmware image too large %u, expected %d\n", - (unsigned int)fw->size, size + 4); - ret = -EINVAL; - } - - /* compute checksum */ - p = (const __be32 *)fw->data; - for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++) - csum += ntohl(p[i]); - - if (csum != 0xffffffff) { - CH_ERR(adapter, "corrupted firmware image, checksum %u\n", - csum); - ret = -EINVAL; - } - - for (i = 0; i < size / 4 ; i++) { - *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16; - *cache++ = be32_to_cpu(p[i]) & 0xffff; - } - - release_firmware(fw); - - return ret; -} - -static int upgrade_fw(struct adapter *adap) -{ - int ret; - const struct firmware *fw; - struct device *dev = &adap->pdev->dev; - - ret = request_firmware(&fw, FW_FNAME, dev); - if (ret < 0) { - dev_err(dev, "could not upgrade firmware: unable to load %s\n", - FW_FNAME); - return ret; - } - ret = t3_load_fw(adap, fw->data, fw->size); - release_firmware(fw); - - if (ret == 0) - dev_info(dev, "successful upgrade to firmware %d.%d.%d\n", - FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO); - else - dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n", - FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO); - - return ret; -} - -static inline char t3rev2char(struct adapter *adapter) -{ - char rev = 0; - - switch(adapter->params.rev) { - case T3_REV_B: - case T3_REV_B2: - rev = 'b'; - break; - case T3_REV_C: - rev = 'c'; - break; - } - return rev; -} - -static int update_tpsram(struct adapter *adap) -{ - const struct firmware *tpsram; - char buf[64]; - struct device *dev = &adap->pdev->dev; - int ret; - char rev; - - rev = t3rev2char(adap); - if (!rev) - return 0; - - snprintf(buf, sizeof(buf), TPSRAM_NAME, rev); - - ret = request_firmware(&tpsram, buf, dev); - if (ret < 0) { - dev_err(dev, "could not load TP SRAM: unable to load %s\n", - buf); - return ret; - } - - ret = t3_check_tpsram(adap, tpsram->data, tpsram->size); - if (ret) - goto release_tpsram; - - ret = t3_set_proto_sram(adap, tpsram->data); - if (ret == 0) - dev_info(dev, - "successful update of protocol engine " - "to %d.%d.%d\n", - TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO); - else - dev_err(dev, "failed to update of protocol engine %d.%d.%d\n", - TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO); - if (ret) - dev_err(dev, "loading protocol SRAM failed\n"); - -release_tpsram: - release_firmware(tpsram); - - return ret; -} - -/** - * cxgb_up - enable the adapter - * @adapter: adapter being enabled - * - * Called when the first port is enabled, this function performs the - * actions necessary to make an adapter operational, such as completing - * the initialization of HW modules, and enabling interrupts. - * - * Must be called with the rtnl lock held. - */ -static int cxgb_up(struct adapter *adap) -{ - int err; - - if (!(adap->flags & FULL_INIT_DONE)) { - err = t3_check_fw_version(adap); - if (err == -EINVAL) { - err = upgrade_fw(adap); - CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n", - FW_VERSION_MAJOR, FW_VERSION_MINOR, - FW_VERSION_MICRO, err ? "failed" : "succeeded"); - } - - err = t3_check_tpsram_version(adap); - if (err == -EINVAL) { - err = update_tpsram(adap); - CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n", - TP_VERSION_MAJOR, TP_VERSION_MINOR, - TP_VERSION_MICRO, err ? "failed" : "succeeded"); - } - - /* - * Clear interrupts now to catch errors if t3_init_hw fails. - * We clear them again later as initialization may trigger - * conditions that can interrupt. - */ - t3_intr_clear(adap); - - err = t3_init_hw(adap, 0); - if (err) - goto out; - - t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT); - t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12)); - - err = setup_sge_qsets(adap); - if (err) - goto out; - - setup_rss(adap); - if (!(adap->flags & NAPI_INIT)) - init_napi(adap); - - t3_start_sge_timers(adap); - adap->flags |= FULL_INIT_DONE; - } - - t3_intr_clear(adap); - - if (adap->flags & USING_MSIX) { - name_msix_vecs(adap); - err = request_irq(adap->msix_info[0].vec, - t3_async_intr_handler, 0, - adap->msix_info[0].desc, adap); - if (err) - goto irq_err; - - err = request_msix_data_irqs(adap); - if (err) { - free_irq(adap->msix_info[0].vec, adap); - goto irq_err; - } - } else if ((err = request_irq(adap->pdev->irq, - t3_intr_handler(adap, - adap->sge.qs[0].rspq. - polling), - (adap->flags & USING_MSI) ? - 0 : IRQF_SHARED, - adap->name, adap))) - goto irq_err; - - enable_all_napi(adap); - t3_sge_start(adap); - t3_intr_enable(adap); - - if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) && - is_offload(adap) && init_tp_parity(adap) == 0) - adap->flags |= TP_PARITY_INIT; - - if (adap->flags & TP_PARITY_INIT) { - t3_write_reg(adap, A_TP_INT_CAUSE, - F_CMCACHEPERR | F_ARPLUTPERR); - t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff); - } - - if (!(adap->flags & QUEUES_BOUND)) { - int ret = bind_qsets(adap); - - if (ret < 0) { - CH_ERR(adap, "failed to bind qsets, err %d\n", ret); - t3_intr_disable(adap); - free_irq_resources(adap); - err = ret; - goto out; - } - adap->flags |= QUEUES_BOUND; - } - -out: - return err; -irq_err: - CH_ERR(adap, "request_irq failed, err %d\n", err); - goto out; -} - -/* - * Release resources when all the ports and offloading have been stopped. - */ -static void cxgb_down(struct adapter *adapter, int on_wq) -{ - t3_sge_stop(adapter); - spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */ - t3_intr_disable(adapter); - spin_unlock_irq(&adapter->work_lock); - - free_irq_resources(adapter); - quiesce_rx(adapter); - t3_sge_stop(adapter); - if (!on_wq) - flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */ -} - -static void schedule_chk_task(struct adapter *adap) -{ - unsigned int timeo; - - timeo = adap->params.linkpoll_period ? - (HZ * adap->params.linkpoll_period) / 10 : - adap->params.stats_update_period * HZ; - if (timeo) - queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo); -} - -static int offload_open(struct net_device *dev) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - struct t3cdev *tdev = dev2t3cdev(dev); - int adap_up = adapter->open_device_map & PORT_MASK; - int err; - - if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) - return 0; - - if (!adap_up && (err = cxgb_up(adapter)) < 0) - goto out; - - t3_tp_set_offload_mode(adapter, 1); - tdev->lldev = adapter->port[0]; - err = cxgb3_offload_activate(adapter); - if (err) - goto out; - - init_port_mtus(adapter); - t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd, - adapter->params.b_wnd, - adapter->params.rev == 0 ? - adapter->port[0]->mtu : 0xffff); - init_smt(adapter); - - if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group)) - dev_dbg(&dev->dev, "cannot create sysfs group\n"); - - /* Call back all registered clients */ - cxgb3_add_clients(tdev); - -out: - /* restore them in case the offload module has changed them */ - if (err) { - t3_tp_set_offload_mode(adapter, 0); - clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map); - cxgb3_set_dummy_ops(tdev); - } - return err; -} - -static int offload_close(struct t3cdev *tdev) -{ - struct adapter *adapter = tdev2adap(tdev); - struct t3c_data *td = T3C_DATA(tdev); - - if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) - return 0; - - /* Call back all registered clients */ - cxgb3_remove_clients(tdev); - - sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group); - - /* Flush work scheduled while releasing TIDs */ - flush_work_sync(&td->tid_release_task); - - tdev->lldev = NULL; - cxgb3_set_dummy_ops(tdev); - t3_tp_set_offload_mode(adapter, 0); - clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map); - - if (!adapter->open_device_map) - cxgb_down(adapter, 0); - - cxgb3_offload_deactivate(adapter); - return 0; -} - -static int cxgb_open(struct net_device *dev) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - int other_ports = adapter->open_device_map & PORT_MASK; - int err; - - if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) - return err; - - set_bit(pi->port_id, &adapter->open_device_map); - if (is_offload(adapter) && !ofld_disable) { - err = offload_open(dev); - if (err) - printk(KERN_WARNING - "Could not initialize offload capabilities\n"); - } - - netif_set_real_num_tx_queues(dev, pi->nqsets); - err = netif_set_real_num_rx_queues(dev, pi->nqsets); - if (err) - return err; - link_start(dev); - t3_port_intr_enable(adapter, pi->port_id); - netif_tx_start_all_queues(dev); - if (!other_ports) - schedule_chk_task(adapter); - - cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id); - return 0; -} - -static int __cxgb_close(struct net_device *dev, int on_wq) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - - - if (!adapter->open_device_map) - return 0; - - /* Stop link fault interrupts */ - t3_xgm_intr_disable(adapter, pi->port_id); - t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset); - - t3_port_intr_disable(adapter, pi->port_id); - netif_tx_stop_all_queues(dev); - pi->phy.ops->power_down(&pi->phy, 1); - netif_carrier_off(dev); - t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX); - - spin_lock_irq(&adapter->work_lock); /* sync with update task */ - clear_bit(pi->port_id, &adapter->open_device_map); - spin_unlock_irq(&adapter->work_lock); - - if (!(adapter->open_device_map & PORT_MASK)) - cancel_delayed_work_sync(&adapter->adap_check_task); - - if (!adapter->open_device_map) - cxgb_down(adapter, on_wq); - - cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id); - return 0; -} - -static int cxgb_close(struct net_device *dev) -{ - return __cxgb_close(dev, 0); -} - -static struct net_device_stats *cxgb_get_stats(struct net_device *dev) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - struct net_device_stats *ns = &pi->netstats; - const struct mac_stats *pstats; - - spin_lock(&adapter->stats_lock); - pstats = t3_mac_update_stats(&pi->mac); - spin_unlock(&adapter->stats_lock); - - ns->tx_bytes = pstats->tx_octets; - ns->tx_packets = pstats->tx_frames; - ns->rx_bytes = pstats->rx_octets; - ns->rx_packets = pstats->rx_frames; - ns->multicast = pstats->rx_mcast_frames; - - ns->tx_errors = pstats->tx_underrun; - ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs + - pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short + - pstats->rx_fifo_ovfl; - - /* detailed rx_errors */ - ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long; - ns->rx_over_errors = 0; - ns->rx_crc_errors = pstats->rx_fcs_errs; - ns->rx_frame_errors = pstats->rx_symbol_errs; - ns->rx_fifo_errors = pstats->rx_fifo_ovfl; - ns->rx_missed_errors = pstats->rx_cong_drops; - - /* detailed tx_errors */ - ns->tx_aborted_errors = 0; - ns->tx_carrier_errors = 0; - ns->tx_fifo_errors = pstats->tx_underrun; - ns->tx_heartbeat_errors = 0; - ns->tx_window_errors = 0; - return ns; -} - -static u32 get_msglevel(struct net_device *dev) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - - return adapter->msg_enable; -} - -static void set_msglevel(struct net_device *dev, u32 val) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - - adapter->msg_enable = val; -} - -static char stats_strings[][ETH_GSTRING_LEN] = { - "TxOctetsOK ", - "TxFramesOK ", - "TxMulticastFramesOK", - "TxBroadcastFramesOK", - "TxPauseFrames ", - "TxUnderrun ", - "TxExtUnderrun ", - - "TxFrames64 ", - "TxFrames65To127 ", - "TxFrames128To255 ", - "TxFrames256To511 ", - "TxFrames512To1023 ", - "TxFrames1024To1518 ", - "TxFrames1519ToMax ", - - "RxOctetsOK ", - "RxFramesOK ", - "RxMulticastFramesOK", - "RxBroadcastFramesOK", - "RxPauseFrames ", - "RxFCSErrors ", - "RxSymbolErrors ", - "RxShortErrors ", - "RxJabberErrors ", - "RxLengthErrors ", - "RxFIFOoverflow ", - - "RxFrames64 ", - "RxFrames65To127 ", - "RxFrames128To255 ", - "RxFrames256To511 ", - "RxFrames512To1023 ", - "RxFrames1024To1518 ", - "RxFrames1519ToMax ", - - "PhyFIFOErrors ", - "TSO ", - "VLANextractions ", - "VLANinsertions ", - "TxCsumOffload ", - "RxCsumGood ", - "LroAggregated ", - "LroFlushed ", - "LroNoDesc ", - "RxDrops ", - - "CheckTXEnToggled ", - "CheckResets ", - - "LinkFaults ", -}; - -static int get_sset_count(struct net_device *dev, int sset) -{ - switch (sset) { - case ETH_SS_STATS: - return ARRAY_SIZE(stats_strings); - default: - return -EOPNOTSUPP; - } -} - -#define T3_REGMAP_SIZE (3 * 1024) - -static int get_regs_len(struct net_device *dev) -{ - return T3_REGMAP_SIZE; -} - -static int get_eeprom_len(struct net_device *dev) -{ - return EEPROMSIZE; -} - -static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - u32 fw_vers = 0; - u32 tp_vers = 0; - - spin_lock(&adapter->stats_lock); - t3_get_fw_version(adapter, &fw_vers); - t3_get_tp_version(adapter, &tp_vers); - spin_unlock(&adapter->stats_lock); - - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, pci_name(adapter->pdev)); - if (!fw_vers) - strcpy(info->fw_version, "N/A"); - else { - snprintf(info->fw_version, sizeof(info->fw_version), - "%s %u.%u.%u TP %u.%u.%u", - G_FW_VERSION_TYPE(fw_vers) ? "T" : "N", - G_FW_VERSION_MAJOR(fw_vers), - G_FW_VERSION_MINOR(fw_vers), - G_FW_VERSION_MICRO(fw_vers), - G_TP_VERSION_MAJOR(tp_vers), - G_TP_VERSION_MINOR(tp_vers), - G_TP_VERSION_MICRO(tp_vers)); - } -} - -static void get_strings(struct net_device *dev, u32 stringset, u8 * data) -{ - if (stringset == ETH_SS_STATS) - memcpy(data, stats_strings, sizeof(stats_strings)); -} - -static unsigned long collect_sge_port_stats(struct adapter *adapter, - struct port_info *p, int idx) -{ - int i; - unsigned long tot = 0; - - for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i) - tot += adapter->sge.qs[i].port_stats[idx]; - return tot; -} - -static void get_stats(struct net_device *dev, struct ethtool_stats *stats, - u64 *data) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - const struct mac_stats *s; - - spin_lock(&adapter->stats_lock); - s = t3_mac_update_stats(&pi->mac); - spin_unlock(&adapter->stats_lock); - - *data++ = s->tx_octets; - *data++ = s->tx_frames; - *data++ = s->tx_mcast_frames; - *data++ = s->tx_bcast_frames; - *data++ = s->tx_pause; - *data++ = s->tx_underrun; - *data++ = s->tx_fifo_urun; - - *data++ = s->tx_frames_64; - *data++ = s->tx_frames_65_127; - *data++ = s->tx_frames_128_255; - *data++ = s->tx_frames_256_511; - *data++ = s->tx_frames_512_1023; - *data++ = s->tx_frames_1024_1518; - *data++ = s->tx_frames_1519_max; - - *data++ = s->rx_octets; - *data++ = s->rx_frames; - *data++ = s->rx_mcast_frames; - *data++ = s->rx_bcast_frames; - *data++ = s->rx_pause; - *data++ = s->rx_fcs_errs; - *data++ = s->rx_symbol_errs; - *data++ = s->rx_short; - *data++ = s->rx_jabber; - *data++ = s->rx_too_long; - *data++ = s->rx_fifo_ovfl; - - *data++ = s->rx_frames_64; - *data++ = s->rx_frames_65_127; - *data++ = s->rx_frames_128_255; - *data++ = s->rx_frames_256_511; - *data++ = s->rx_frames_512_1023; - *data++ = s->rx_frames_1024_1518; - *data++ = s->rx_frames_1519_max; - - *data++ = pi->phy.fifo_errors; - - *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO); - *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX); - *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS); - *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM); - *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD); - *data++ = 0; - *data++ = 0; - *data++ = 0; - *data++ = s->rx_cong_drops; - - *data++ = s->num_toggled; - *data++ = s->num_resets; - - *data++ = s->link_faults; -} - -static inline void reg_block_dump(struct adapter *ap, void *buf, - unsigned int start, unsigned int end) -{ - u32 *p = buf + start; - - for (; start <= end; start += sizeof(u32)) - *p++ = t3_read_reg(ap, start); -} - -static void get_regs(struct net_device *dev, struct ethtool_regs *regs, - void *buf) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *ap = pi->adapter; - - /* - * Version scheme: - * bits 0..9: chip version - * bits 10..15: chip revision - * bit 31: set for PCIe cards - */ - regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31); - - /* - * We skip the MAC statistics registers because they are clear-on-read. - * Also reading multi-register stats would need to synchronize with the - * periodic mac stats accumulation. Hard to justify the complexity. - */ - memset(buf, 0, T3_REGMAP_SIZE); - reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN); - reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT); - reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE); - reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA); - reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3); - reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0, - XGM_REG(A_XGM_SERDES_STAT3, 1)); - reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1), - XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1)); -} - -static int restart_autoneg(struct net_device *dev) -{ - struct port_info *p = netdev_priv(dev); - - if (!netif_running(dev)) - return -EAGAIN; - if (p->link_config.autoneg != AUTONEG_ENABLE) - return -EINVAL; - p->phy.ops->autoneg_restart(&p->phy); - return 0; -} - -static int set_phys_id(struct net_device *dev, - enum ethtool_phys_id_state state) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - - switch (state) { - case ETHTOOL_ID_ACTIVE: - return 1; /* cycle on/off once per second */ - - case ETHTOOL_ID_OFF: - t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0); - break; - - case ETHTOOL_ID_ON: - case ETHTOOL_ID_INACTIVE: - t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, - F_GPIO0_OUT_VAL); - } - - return 0; -} - -static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct port_info *p = netdev_priv(dev); - - cmd->supported = p->link_config.supported; - cmd->advertising = p->link_config.advertising; - - if (netif_carrier_ok(dev)) { - ethtool_cmd_speed_set(cmd, p->link_config.speed); - cmd->duplex = p->link_config.duplex; - } else { - ethtool_cmd_speed_set(cmd, -1); - cmd->duplex = -1; - } - - cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; - cmd->phy_address = p->phy.mdio.prtad; - cmd->transceiver = XCVR_EXTERNAL; - cmd->autoneg = p->link_config.autoneg; - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 0; - return 0; -} - -static int speed_duplex_to_caps(int speed, int duplex) -{ - int cap = 0; - - switch (speed) { - case SPEED_10: - if (duplex == DUPLEX_FULL) - cap = SUPPORTED_10baseT_Full; - else - cap = SUPPORTED_10baseT_Half; - break; - case SPEED_100: - if (duplex == DUPLEX_FULL) - cap = SUPPORTED_100baseT_Full; - else - cap = SUPPORTED_100baseT_Half; - break; - case SPEED_1000: - if (duplex == DUPLEX_FULL) - cap = SUPPORTED_1000baseT_Full; - else - cap = SUPPORTED_1000baseT_Half; - break; - case SPEED_10000: - if (duplex == DUPLEX_FULL) - cap = SUPPORTED_10000baseT_Full; - } - return cap; -} - -#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \ - ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \ - ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \ - ADVERTISED_10000baseT_Full) - -static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct port_info *p = netdev_priv(dev); - struct link_config *lc = &p->link_config; - - if (!(lc->supported & SUPPORTED_Autoneg)) { - /* - * PHY offers a single speed/duplex. See if that's what's - * being requested. - */ - if (cmd->autoneg == AUTONEG_DISABLE) { - u32 speed = ethtool_cmd_speed(cmd); - int cap = speed_duplex_to_caps(speed, cmd->duplex); - if (lc->supported & cap) - return 0; - } - return -EINVAL; - } - - if (cmd->autoneg == AUTONEG_DISABLE) { - u32 speed = ethtool_cmd_speed(cmd); - int cap = speed_duplex_to_caps(speed, cmd->duplex); - - if (!(lc->supported & cap) || (speed == SPEED_1000)) - return -EINVAL; - lc->requested_speed = speed; - lc->requested_duplex = cmd->duplex; - lc->advertising = 0; - } else { - cmd->advertising &= ADVERTISED_MASK; - cmd->advertising &= lc->supported; - if (!cmd->advertising) - return -EINVAL; - lc->requested_speed = SPEED_INVALID; - lc->requested_duplex = DUPLEX_INVALID; - lc->advertising = cmd->advertising | ADVERTISED_Autoneg; - } - lc->autoneg = cmd->autoneg; - if (netif_running(dev)) - t3_link_start(&p->phy, &p->mac, lc); - return 0; -} - -static void get_pauseparam(struct net_device *dev, - struct ethtool_pauseparam *epause) -{ - struct port_info *p = netdev_priv(dev); - - epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0; - epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0; - epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0; -} - -static int set_pauseparam(struct net_device *dev, - struct ethtool_pauseparam *epause) -{ - struct port_info *p = netdev_priv(dev); - struct link_config *lc = &p->link_config; - - if (epause->autoneg == AUTONEG_DISABLE) - lc->requested_fc = 0; - else if (lc->supported & SUPPORTED_Autoneg) - lc->requested_fc = PAUSE_AUTONEG; - else - return -EINVAL; - - if (epause->rx_pause) - lc->requested_fc |= PAUSE_RX; - if (epause->tx_pause) - lc->requested_fc |= PAUSE_TX; - if (lc->autoneg == AUTONEG_ENABLE) { - if (netif_running(dev)) - t3_link_start(&p->phy, &p->mac, lc); - } else { - lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); - if (netif_running(dev)) - t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc); - } - return 0; -} - -static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset]; - - e->rx_max_pending = MAX_RX_BUFFERS; - e->rx_mini_max_pending = 0; - e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS; - e->tx_max_pending = MAX_TXQ_ENTRIES; - - e->rx_pending = q->fl_size; - e->rx_mini_pending = q->rspq_size; - e->rx_jumbo_pending = q->jumbo_size; - e->tx_pending = q->txq_size[0]; -} - -static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - struct qset_params *q; - int i; - - if (e->rx_pending > MAX_RX_BUFFERS || - e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS || - e->tx_pending > MAX_TXQ_ENTRIES || - e->rx_mini_pending > MAX_RSPQ_ENTRIES || - e->rx_mini_pending < MIN_RSPQ_ENTRIES || - e->rx_pending < MIN_FL_ENTRIES || - e->rx_jumbo_pending < MIN_FL_ENTRIES || - e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES) - return -EINVAL; - - if (adapter->flags & FULL_INIT_DONE) - return -EBUSY; - - q = &adapter->params.sge.qset[pi->first_qset]; - for (i = 0; i < pi->nqsets; ++i, ++q) { - q->rspq_size = e->rx_mini_pending; - q->fl_size = e->rx_pending; - q->jumbo_size = e->rx_jumbo_pending; - q->txq_size[0] = e->tx_pending; - q->txq_size[1] = e->tx_pending; - q->txq_size[2] = e->tx_pending; - } - return 0; -} - -static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - struct qset_params *qsp; - struct sge_qset *qs; - int i; - - if (c->rx_coalesce_usecs * 10 > M_NEWTIMER) - return -EINVAL; - - for (i = 0; i < pi->nqsets; i++) { - qsp = &adapter->params.sge.qset[i]; - qs = &adapter->sge.qs[i]; - qsp->coalesce_usecs = c->rx_coalesce_usecs; - t3_update_qset_coalesce(qs, qsp); - } - - return 0; -} - -static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - struct qset_params *q = adapter->params.sge.qset; - - c->rx_coalesce_usecs = q->coalesce_usecs; - return 0; -} - -static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, - u8 * data) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - int i, err = 0; - - u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - e->magic = EEPROM_MAGIC; - for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4) - err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]); - - if (!err) - memcpy(data, buf + e->offset, e->len); - kfree(buf); - return err; -} - -static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, - u8 * data) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - u32 aligned_offset, aligned_len; - __le32 *p; - u8 *buf; - int err; - - if (eeprom->magic != EEPROM_MAGIC) - return -EINVAL; - - aligned_offset = eeprom->offset & ~3; - aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3; - - if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) { - buf = kmalloc(aligned_len, GFP_KERNEL); - if (!buf) - return -ENOMEM; - err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf); - if (!err && aligned_len > 4) - err = t3_seeprom_read(adapter, - aligned_offset + aligned_len - 4, - (__le32 *) & buf[aligned_len - 4]); - if (err) - goto out; - memcpy(buf + (eeprom->offset & 3), data, eeprom->len); - } else - buf = data; - - err = t3_seeprom_wp(adapter, 0); - if (err) - goto out; - - for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) { - err = t3_seeprom_write(adapter, aligned_offset, *p); - aligned_offset += 4; - } - - if (!err) - err = t3_seeprom_wp(adapter, 1); -out: - if (buf != data) - kfree(buf); - return err; -} - -static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - wol->supported = 0; - wol->wolopts = 0; - memset(&wol->sopass, 0, sizeof(wol->sopass)); -} - -static const struct ethtool_ops cxgb_ethtool_ops = { - .get_settings = get_settings, - .set_settings = set_settings, - .get_drvinfo = get_drvinfo, - .get_msglevel = get_msglevel, - .set_msglevel = set_msglevel, - .get_ringparam = get_sge_param, - .set_ringparam = set_sge_param, - .get_coalesce = get_coalesce, - .set_coalesce = set_coalesce, - .get_eeprom_len = get_eeprom_len, - .get_eeprom = get_eeprom, - .set_eeprom = set_eeprom, - .get_pauseparam = get_pauseparam, - .set_pauseparam = set_pauseparam, - .get_link = ethtool_op_get_link, - .get_strings = get_strings, - .set_phys_id = set_phys_id, - .nway_reset = restart_autoneg, - .get_sset_count = get_sset_count, - .get_ethtool_stats = get_stats, - .get_regs_len = get_regs_len, - .get_regs = get_regs, - .get_wol = get_wol, -}; - -static int in_range(int val, int lo, int hi) -{ - return val < 0 || (val <= hi && val >= lo); -} - -static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - u32 cmd; - int ret; - - if (copy_from_user(&cmd, useraddr, sizeof(cmd))) - return -EFAULT; - - switch (cmd) { - case CHELSIO_SET_QSET_PARAMS:{ - int i; - struct qset_params *q; - struct ch_qset_params t; - int q1 = pi->first_qset; - int nqsets = pi->nqsets; - - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - if (copy_from_user(&t, useraddr, sizeof(t))) - return -EFAULT; - if (t.qset_idx >= SGE_QSETS) - return -EINVAL; - if (!in_range(t.intr_lat, 0, M_NEWTIMER) || - !in_range(t.cong_thres, 0, 255) || - !in_range(t.txq_size[0], MIN_TXQ_ENTRIES, - MAX_TXQ_ENTRIES) || - !in_range(t.txq_size[1], MIN_TXQ_ENTRIES, - MAX_TXQ_ENTRIES) || - !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES, - MAX_CTRL_TXQ_ENTRIES) || - !in_range(t.fl_size[0], MIN_FL_ENTRIES, - MAX_RX_BUFFERS) || - !in_range(t.fl_size[1], MIN_FL_ENTRIES, - MAX_RX_JUMBO_BUFFERS) || - !in_range(t.rspq_size, MIN_RSPQ_ENTRIES, - MAX_RSPQ_ENTRIES)) - return -EINVAL; - - if ((adapter->flags & FULL_INIT_DONE) && - (t.rspq_size >= 0 || t.fl_size[0] >= 0 || - t.fl_size[1] >= 0 || t.txq_size[0] >= 0 || - t.txq_size[1] >= 0 || t.txq_size[2] >= 0 || - t.polling >= 0 || t.cong_thres >= 0)) - return -EBUSY; - - /* Allow setting of any available qset when offload enabled */ - if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { - q1 = 0; - for_each_port(adapter, i) { - pi = adap2pinfo(adapter, i); - nqsets += pi->first_qset + pi->nqsets; - } - } - - if (t.qset_idx < q1) - return -EINVAL; - if (t.qset_idx > q1 + nqsets - 1) - return -EINVAL; - - q = &adapter->params.sge.qset[t.qset_idx]; - - if (t.rspq_size >= 0) - q->rspq_size = t.rspq_size; - if (t.fl_size[0] >= 0) - q->fl_size = t.fl_size[0]; - if (t.fl_size[1] >= 0) - q->jumbo_size = t.fl_size[1]; - if (t.txq_size[0] >= 0) - q->txq_size[0] = t.txq_size[0]; - if (t.txq_size[1] >= 0) - q->txq_size[1] = t.txq_size[1]; - if (t.txq_size[2] >= 0) - q->txq_size[2] = t.txq_size[2]; - if (t.cong_thres >= 0) - q->cong_thres = t.cong_thres; - if (t.intr_lat >= 0) { - struct sge_qset *qs = - &adapter->sge.qs[t.qset_idx]; - - q->coalesce_usecs = t.intr_lat; - t3_update_qset_coalesce(qs, q); - } - if (t.polling >= 0) { - if (adapter->flags & USING_MSIX) - q->polling = t.polling; - else { - /* No polling with INTx for T3A */ - if (adapter->params.rev == 0 && - !(adapter->flags & USING_MSI)) - t.polling = 0; - - for (i = 0; i < SGE_QSETS; i++) { - q = &adapter->params.sge. - qset[i]; - q->polling = t.polling; - } - } - } - - if (t.lro >= 0) { - if (t.lro) - dev->wanted_features |= NETIF_F_GRO; - else - dev->wanted_features &= ~NETIF_F_GRO; - netdev_update_features(dev); - } - - break; - } - case CHELSIO_GET_QSET_PARAMS:{ - struct qset_params *q; - struct ch_qset_params t; - int q1 = pi->first_qset; - int nqsets = pi->nqsets; - int i; - - if (copy_from_user(&t, useraddr, sizeof(t))) - return -EFAULT; - - /* Display qsets for all ports when offload enabled */ - if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { - q1 = 0; - for_each_port(adapter, i) { - pi = adap2pinfo(adapter, i); - nqsets = pi->first_qset + pi->nqsets; - } - } - - if (t.qset_idx >= nqsets) - return -EINVAL; - - q = &adapter->params.sge.qset[q1 + t.qset_idx]; - t.rspq_size = q->rspq_size; - t.txq_size[0] = q->txq_size[0]; - t.txq_size[1] = q->txq_size[1]; - t.txq_size[2] = q->txq_size[2]; - t.fl_size[0] = q->fl_size; - t.fl_size[1] = q->jumbo_size; - t.polling = q->polling; - t.lro = !!(dev->features & NETIF_F_GRO); - t.intr_lat = q->coalesce_usecs; - t.cong_thres = q->cong_thres; - t.qnum = q1; - - if (adapter->flags & USING_MSIX) - t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec; - else - t.vector = adapter->pdev->irq; - - if (copy_to_user(useraddr, &t, sizeof(t))) - return -EFAULT; - break; - } - case CHELSIO_SET_QSET_NUM:{ - struct ch_reg edata; - unsigned int i, first_qset = 0, other_qsets = 0; - - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - if (adapter->flags & FULL_INIT_DONE) - return -EBUSY; - if (copy_from_user(&edata, useraddr, sizeof(edata))) - return -EFAULT; - if (edata.val < 1 || - (edata.val > 1 && !(adapter->flags & USING_MSIX))) - return -EINVAL; - - for_each_port(adapter, i) - if (adapter->port[i] && adapter->port[i] != dev) - other_qsets += adap2pinfo(adapter, i)->nqsets; - - if (edata.val + other_qsets > SGE_QSETS) - return -EINVAL; - - pi->nqsets = edata.val; - - for_each_port(adapter, i) - if (adapter->port[i]) { - pi = adap2pinfo(adapter, i); - pi->first_qset = first_qset; - first_qset += pi->nqsets; - } - break; - } - case CHELSIO_GET_QSET_NUM:{ - struct ch_reg edata; - - memset(&edata, 0, sizeof(struct ch_reg)); - - edata.cmd = CHELSIO_GET_QSET_NUM; - edata.val = pi->nqsets; - if (copy_to_user(useraddr, &edata, sizeof(edata))) - return -EFAULT; - break; - } - case CHELSIO_LOAD_FW:{ - u8 *fw_data; - struct ch_mem_range t; - - if (!capable(CAP_SYS_RAWIO)) - return -EPERM; - if (copy_from_user(&t, useraddr, sizeof(t))) - return -EFAULT; - /* Check t.len sanity ? */ - fw_data = memdup_user(useraddr + sizeof(t), t.len); - if (IS_ERR(fw_data)) - return PTR_ERR(fw_data); - - ret = t3_load_fw(adapter, fw_data, t.len); - kfree(fw_data); - if (ret) - return ret; - break; - } - case CHELSIO_SETMTUTAB:{ - struct ch_mtus m; - int i; - - if (!is_offload(adapter)) - return -EOPNOTSUPP; - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - if (offload_running(adapter)) - return -EBUSY; - if (copy_from_user(&m, useraddr, sizeof(m))) - return -EFAULT; - if (m.nmtus != NMTUS) - return -EINVAL; - if (m.mtus[0] < 81) /* accommodate SACK */ - return -EINVAL; - - /* MTUs must be in ascending order */ - for (i = 1; i < NMTUS; ++i) - if (m.mtus[i] < m.mtus[i - 1]) - return -EINVAL; - - memcpy(adapter->params.mtus, m.mtus, - sizeof(adapter->params.mtus)); - break; - } - case CHELSIO_GET_PM:{ - struct tp_params *p = &adapter->params.tp; - struct ch_pm m = {.cmd = CHELSIO_GET_PM }; - - if (!is_offload(adapter)) - return -EOPNOTSUPP; - m.tx_pg_sz = p->tx_pg_size; - m.tx_num_pg = p->tx_num_pgs; - m.rx_pg_sz = p->rx_pg_size; - m.rx_num_pg = p->rx_num_pgs; - m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan; - if (copy_to_user(useraddr, &m, sizeof(m))) - return -EFAULT; - break; - } - case CHELSIO_SET_PM:{ - struct ch_pm m; - struct tp_params *p = &adapter->params.tp; - - if (!is_offload(adapter)) - return -EOPNOTSUPP; - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - if (adapter->flags & FULL_INIT_DONE) - return -EBUSY; - if (copy_from_user(&m, useraddr, sizeof(m))) - return -EFAULT; - if (!is_power_of_2(m.rx_pg_sz) || - !is_power_of_2(m.tx_pg_sz)) - return -EINVAL; /* not power of 2 */ - if (!(m.rx_pg_sz & 0x14000)) - return -EINVAL; /* not 16KB or 64KB */ - if (!(m.tx_pg_sz & 0x1554000)) - return -EINVAL; - if (m.tx_num_pg == -1) - m.tx_num_pg = p->tx_num_pgs; - if (m.rx_num_pg == -1) - m.rx_num_pg = p->rx_num_pgs; - if (m.tx_num_pg % 24 || m.rx_num_pg % 24) - return -EINVAL; - if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size || - m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size) - return -EINVAL; - p->rx_pg_size = m.rx_pg_sz; - p->tx_pg_size = m.tx_pg_sz; - p->rx_num_pgs = m.rx_num_pg; - p->tx_num_pgs = m.tx_num_pg; - break; - } - case CHELSIO_GET_MEM:{ - struct ch_mem_range t; - struct mc7 *mem; - u64 buf[32]; - - if (!is_offload(adapter)) - return -EOPNOTSUPP; - if (!(adapter->flags & FULL_INIT_DONE)) - return -EIO; /* need the memory controllers */ - if (copy_from_user(&t, useraddr, sizeof(t))) - return -EFAULT; - if ((t.addr & 7) || (t.len & 7)) - return -EINVAL; - if (t.mem_id == MEM_CM) - mem = &adapter->cm; - else if (t.mem_id == MEM_PMRX) - mem = &adapter->pmrx; - else if (t.mem_id == MEM_PMTX) - mem = &adapter->pmtx; - else - return -EINVAL; - - /* - * Version scheme: - * bits 0..9: chip version - * bits 10..15: chip revision - */ - t.version = 3 | (adapter->params.rev << 10); - if (copy_to_user(useraddr, &t, sizeof(t))) - return -EFAULT; - - /* - * Read 256 bytes at a time as len can be large and we don't - * want to use huge intermediate buffers. - */ - useraddr += sizeof(t); /* advance to start of buffer */ - while (t.len) { - unsigned int chunk = - min_t(unsigned int, t.len, sizeof(buf)); - - ret = - t3_mc7_bd_read(mem, t.addr / 8, chunk / 8, - buf); - if (ret) - return ret; - if (copy_to_user(useraddr, buf, chunk)) - return -EFAULT; - useraddr += chunk; - t.addr += chunk; - t.len -= chunk; - } - break; - } - case CHELSIO_SET_TRACE_FILTER:{ - struct ch_trace t; - const struct trace_params *tp; - - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - if (!offload_running(adapter)) - return -EAGAIN; - if (copy_from_user(&t, useraddr, sizeof(t))) - return -EFAULT; - - tp = (const struct trace_params *)&t.sip; - if (t.config_tx) - t3_config_trace_filter(adapter, tp, 0, - t.invert_match, - t.trace_tx); - if (t.config_rx) - t3_config_trace_filter(adapter, tp, 1, - t.invert_match, - t.trace_rx); - break; - } - default: - return -EOPNOTSUPP; - } - return 0; -} - -static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) -{ - struct mii_ioctl_data *data = if_mii(req); - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - - switch (cmd) { - case SIOCGMIIREG: - case SIOCSMIIREG: - /* Convert phy_id from older PRTAD/DEVAD format */ - if (is_10G(adapter) && - !mdio_phy_id_is_c45(data->phy_id) && - (data->phy_id & 0x1f00) && - !(data->phy_id & 0xe0e0)) - data->phy_id = mdio_phy_id_c45(data->phy_id >> 8, - data->phy_id & 0x1f); - /* FALLTHRU */ - case SIOCGMIIPHY: - return mdio_mii_ioctl(&pi->phy.mdio, data, cmd); - case SIOCCHIOCTL: - return cxgb_extension_ioctl(dev, req->ifr_data); - default: - return -EOPNOTSUPP; - } -} - -static int cxgb_change_mtu(struct net_device *dev, int new_mtu) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - int ret; - - if (new_mtu < 81) /* accommodate SACK */ - return -EINVAL; - if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu))) - return ret; - dev->mtu = new_mtu; - init_port_mtus(adapter); - if (adapter->params.rev == 0 && offload_running(adapter)) - t3_load_mtus(adapter, adapter->params.mtus, - adapter->params.a_wnd, adapter->params.b_wnd, - adapter->port[0]->mtu); - return 0; -} - -static int cxgb_set_mac_addr(struct net_device *dev, void *p) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - struct sockaddr *addr = p; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EINVAL; - - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr); - if (offload_running(adapter)) - write_smt_entry(adapter, pi->port_id); - return 0; -} - -/** - * t3_synchronize_rx - wait for current Rx processing on a port to complete - * @adap: the adapter - * @p: the port - * - * Ensures that current Rx processing on any of the queues associated with - * the given port completes before returning. We do this by acquiring and - * releasing the locks of the response queues associated with the port. - */ -static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p) -{ - int i; - - for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) { - struct sge_rspq *q = &adap->sge.qs[i].rspq; - - spin_lock_irq(&q->lock); - spin_unlock_irq(&q->lock); - } -} - -static void cxgb_vlan_mode(struct net_device *dev, u32 features) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - - if (adapter->params.rev > 0) { - t3_set_vlan_accel(adapter, 1 << pi->port_id, - features & NETIF_F_HW_VLAN_RX); - } else { - /* single control for all ports */ - unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_RX; - - for_each_port(adapter, i) - have_vlans |= - adapter->port[i]->features & NETIF_F_HW_VLAN_RX; - - t3_set_vlan_accel(adapter, 1, have_vlans); - } - t3_synchronize_rx(adapter, pi); -} - -static u32 cxgb_fix_features(struct net_device *dev, u32 features) -{ - /* - * Since there is no support for separate rx/tx vlan accel - * enable/disable make sure tx flag is always in same state as rx. - */ - if (features & NETIF_F_HW_VLAN_RX) - features |= NETIF_F_HW_VLAN_TX; - else - features &= ~NETIF_F_HW_VLAN_TX; - - return features; -} - -static int cxgb_set_features(struct net_device *dev, u32 features) -{ - u32 changed = dev->features ^ features; - - if (changed & NETIF_F_HW_VLAN_RX) - cxgb_vlan_mode(dev, features); - - return 0; -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void cxgb_netpoll(struct net_device *dev) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - int qidx; - - for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) { - struct sge_qset *qs = &adapter->sge.qs[qidx]; - void *source; - - if (adapter->flags & USING_MSIX) - source = qs; - else - source = adapter; - - t3_intr_handler(adapter, qs->rspq.polling) (0, source); - } -} -#endif - -/* - * Periodic accumulation of MAC statistics. - */ -static void mac_stats_update(struct adapter *adapter) -{ - int i; - - for_each_port(adapter, i) { - struct net_device *dev = adapter->port[i]; - struct port_info *p = netdev_priv(dev); - - if (netif_running(dev)) { - spin_lock(&adapter->stats_lock); - t3_mac_update_stats(&p->mac); - spin_unlock(&adapter->stats_lock); - } - } -} - -static void check_link_status(struct adapter *adapter) -{ - int i; - - for_each_port(adapter, i) { - struct net_device *dev = adapter->port[i]; - struct port_info *p = netdev_priv(dev); - int link_fault; - - spin_lock_irq(&adapter->work_lock); - link_fault = p->link_fault; - spin_unlock_irq(&adapter->work_lock); - - if (link_fault) { - t3_link_fault(adapter, i); - continue; - } - - if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) { - t3_xgm_intr_disable(adapter, i); - t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset); - - t3_link_changed(adapter, i); - t3_xgm_intr_enable(adapter, i); - } - } -} - -static void check_t3b2_mac(struct adapter *adapter) -{ - int i; - - if (!rtnl_trylock()) /* synchronize with ifdown */ - return; - - for_each_port(adapter, i) { - struct net_device *dev = adapter->port[i]; - struct port_info *p = netdev_priv(dev); - int status; - - if (!netif_running(dev)) - continue; - - status = 0; - if (netif_running(dev) && netif_carrier_ok(dev)) - status = t3b2_mac_watchdog_task(&p->mac); - if (status == 1) - p->mac.stats.num_toggled++; - else if (status == 2) { - struct cmac *mac = &p->mac; - - t3_mac_set_mtu(mac, dev->mtu); - t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr); - cxgb_set_rxmode(dev); - t3_link_start(&p->phy, mac, &p->link_config); - t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); - t3_port_intr_enable(adapter, p->port_id); - p->mac.stats.num_resets++; - } - } - rtnl_unlock(); -} - - -static void t3_adap_check_task(struct work_struct *work) -{ - struct adapter *adapter = container_of(work, struct adapter, - adap_check_task.work); - const struct adapter_params *p = &adapter->params; - int port; - unsigned int v, status, reset; - - adapter->check_task_cnt++; - - check_link_status(adapter); - - /* Accumulate MAC stats if needed */ - if (!p->linkpoll_period || - (adapter->check_task_cnt * p->linkpoll_period) / 10 >= - p->stats_update_period) { - mac_stats_update(adapter); - adapter->check_task_cnt = 0; - } - - if (p->rev == T3_REV_B2) - check_t3b2_mac(adapter); - - /* - * Scan the XGMAC's to check for various conditions which we want to - * monitor in a periodic polling manner rather than via an interrupt - * condition. This is used for conditions which would otherwise flood - * the system with interrupts and we only really need to know that the - * conditions are "happening" ... For each condition we count the - * detection of the condition and reset it for the next polling loop. - */ - for_each_port(adapter, port) { - struct cmac *mac = &adap2pinfo(adapter, port)->mac; - u32 cause; - - cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset); - reset = 0; - if (cause & F_RXFIFO_OVERFLOW) { - mac->stats.rx_fifo_ovfl++; - reset |= F_RXFIFO_OVERFLOW; - } - - t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset); - } - - /* - * We do the same as above for FL_EMPTY interrupts. - */ - status = t3_read_reg(adapter, A_SG_INT_CAUSE); - reset = 0; - - if (status & F_FLEMPTY) { - struct sge_qset *qs = &adapter->sge.qs[0]; - int i = 0; - - reset |= F_FLEMPTY; - - v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) & - 0xffff; - - while (v) { - qs->fl[i].empty += (v & 1); - if (i) - qs++; - i ^= 1; - v >>= 1; - } - } - - t3_write_reg(adapter, A_SG_INT_CAUSE, reset); - - /* Schedule the next check update if any port is active. */ - spin_lock_irq(&adapter->work_lock); - if (adapter->open_device_map & PORT_MASK) - schedule_chk_task(adapter); - spin_unlock_irq(&adapter->work_lock); -} - -static void db_full_task(struct work_struct *work) -{ - struct adapter *adapter = container_of(work, struct adapter, - db_full_task); - - cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0); -} - -static void db_empty_task(struct work_struct *work) -{ - struct adapter *adapter = container_of(work, struct adapter, - db_empty_task); - - cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0); -} - -static void db_drop_task(struct work_struct *work) -{ - struct adapter *adapter = container_of(work, struct adapter, - db_drop_task); - unsigned long delay = 1000; - unsigned short r; - - cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0); - - /* - * Sleep a while before ringing the driver qset dbs. - * The delay is between 1000-2023 usecs. - */ - get_random_bytes(&r, 2); - delay += r & 1023; - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(usecs_to_jiffies(delay)); - ring_dbs(adapter); -} - -/* - * Processes external (PHY) interrupts in process context. - */ -static void ext_intr_task(struct work_struct *work) -{ - struct adapter *adapter = container_of(work, struct adapter, - ext_intr_handler_task); - int i; - - /* Disable link fault interrupts */ - for_each_port(adapter, i) { - struct net_device *dev = adapter->port[i]; - struct port_info *p = netdev_priv(dev); - - t3_xgm_intr_disable(adapter, i); - t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset); - } - - /* Re-enable link fault interrupts */ - t3_phy_intr_handler(adapter); - - for_each_port(adapter, i) - t3_xgm_intr_enable(adapter, i); - - /* Now reenable external interrupts */ - spin_lock_irq(&adapter->work_lock); - if (adapter->slow_intr_mask) { - adapter->slow_intr_mask |= F_T3DBG; - t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG); - t3_write_reg(adapter, A_PL_INT_ENABLE0, - adapter->slow_intr_mask); - } - spin_unlock_irq(&adapter->work_lock); -} - -/* - * Interrupt-context handler for external (PHY) interrupts. - */ -void t3_os_ext_intr_handler(struct adapter *adapter) -{ - /* - * Schedule a task to handle external interrupts as they may be slow - * and we use a mutex to protect MDIO registers. We disable PHY - * interrupts in the meantime and let the task reenable them when - * it's done. - */ - spin_lock(&adapter->work_lock); - if (adapter->slow_intr_mask) { - adapter->slow_intr_mask &= ~F_T3DBG; - t3_write_reg(adapter, A_PL_INT_ENABLE0, - adapter->slow_intr_mask); - queue_work(cxgb3_wq, &adapter->ext_intr_handler_task); - } - spin_unlock(&adapter->work_lock); -} - -void t3_os_link_fault_handler(struct adapter *adapter, int port_id) -{ - struct net_device *netdev = adapter->port[port_id]; - struct port_info *pi = netdev_priv(netdev); - - spin_lock(&adapter->work_lock); - pi->link_fault = 1; - spin_unlock(&adapter->work_lock); -} - -static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq) -{ - int i, ret = 0; - - if (is_offload(adapter) && - test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { - cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0); - offload_close(&adapter->tdev); - } - - /* Stop all ports */ - for_each_port(adapter, i) { - struct net_device *netdev = adapter->port[i]; - - if (netif_running(netdev)) - __cxgb_close(netdev, on_wq); - } - - /* Stop SGE timers */ - t3_stop_sge_timers(adapter); - - adapter->flags &= ~FULL_INIT_DONE; - - if (reset) - ret = t3_reset_adapter(adapter); - - pci_disable_device(adapter->pdev); - - return ret; -} - -static int t3_reenable_adapter(struct adapter *adapter) -{ - if (pci_enable_device(adapter->pdev)) { - dev_err(&adapter->pdev->dev, - "Cannot re-enable PCI device after reset.\n"); - goto err; - } - pci_set_master(adapter->pdev); - pci_restore_state(adapter->pdev); - pci_save_state(adapter->pdev); - - /* Free sge resources */ - t3_free_sge_resources(adapter); - - if (t3_replay_prep_adapter(adapter)) - goto err; - - return 0; -err: - return -1; -} - -static void t3_resume_ports(struct adapter *adapter) -{ - int i; - - /* Restart the ports */ - for_each_port(adapter, i) { - struct net_device *netdev = adapter->port[i]; - - if (netif_running(netdev)) { - if (cxgb_open(netdev)) { - dev_err(&adapter->pdev->dev, - "can't bring device back up" - " after reset\n"); - continue; - } - } - } - - if (is_offload(adapter) && !ofld_disable) - cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0); -} - -/* - * processes a fatal error. - * Bring the ports down, reset the chip, bring the ports back up. - */ -static void fatal_error_task(struct work_struct *work) -{ - struct adapter *adapter = container_of(work, struct adapter, - fatal_error_handler_task); - int err = 0; - - rtnl_lock(); - err = t3_adapter_error(adapter, 1, 1); - if (!err) - err = t3_reenable_adapter(adapter); - if (!err) - t3_resume_ports(adapter); - - CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded"); - rtnl_unlock(); -} - -void t3_fatal_err(struct adapter *adapter) -{ - unsigned int fw_status[4]; - - if (adapter->flags & FULL_INIT_DONE) { - t3_sge_stop(adapter); - t3_write_reg(adapter, A_XGM_TX_CTRL, 0); - t3_write_reg(adapter, A_XGM_RX_CTRL, 0); - t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0); - t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0); - - spin_lock(&adapter->work_lock); - t3_intr_disable(adapter); - queue_work(cxgb3_wq, &adapter->fatal_error_handler_task); - spin_unlock(&adapter->work_lock); - } - CH_ALERT(adapter, "encountered fatal error, operation suspended\n"); - if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status)) - CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n", - fw_status[0], fw_status[1], - fw_status[2], fw_status[3]); -} - -/** - * t3_io_error_detected - called when PCI error is detected - * @pdev: Pointer to PCI device - * @state: The current pci connection state - * - * This function is called after a PCI bus error affecting - * this device has been detected. - */ -static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) -{ - struct adapter *adapter = pci_get_drvdata(pdev); - - if (state == pci_channel_io_perm_failure) - return PCI_ERS_RESULT_DISCONNECT; - - t3_adapter_error(adapter, 0, 0); - - /* Request a slot reset. */ - return PCI_ERS_RESULT_NEED_RESET; -} - -/** - * t3_io_slot_reset - called after the pci bus has been reset. - * @pdev: Pointer to PCI device - * - * Restart the card from scratch, as if from a cold-boot. - */ -static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev) -{ - struct adapter *adapter = pci_get_drvdata(pdev); - - if (!t3_reenable_adapter(adapter)) - return PCI_ERS_RESULT_RECOVERED; - - return PCI_ERS_RESULT_DISCONNECT; -} - -/** - * t3_io_resume - called when traffic can start flowing again. - * @pdev: Pointer to PCI device - * - * This callback is called when the error recovery driver tells us that - * its OK to resume normal operation. - */ -static void t3_io_resume(struct pci_dev *pdev) -{ - struct adapter *adapter = pci_get_drvdata(pdev); - - CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n", - t3_read_reg(adapter, A_PCIE_PEX_ERR)); - - t3_resume_ports(adapter); -} - -static struct pci_error_handlers t3_err_handler = { - .error_detected = t3_io_error_detected, - .slot_reset = t3_io_slot_reset, - .resume = t3_io_resume, -}; - -/* - * Set the number of qsets based on the number of CPUs and the number of ports, - * not to exceed the number of available qsets, assuming there are enough qsets - * per port in HW. - */ -static void set_nqsets(struct adapter *adap) -{ - int i, j = 0; - int num_cpus = num_online_cpus(); - int hwports = adap->params.nports; - int nqsets = adap->msix_nvectors - 1; - - if (adap->params.rev > 0 && adap->flags & USING_MSIX) { - if (hwports == 2 && - (hwports * nqsets > SGE_QSETS || - num_cpus >= nqsets / hwports)) - nqsets /= hwports; - if (nqsets > num_cpus) - nqsets = num_cpus; - if (nqsets < 1 || hwports == 4) - nqsets = 1; - } else - nqsets = 1; - - for_each_port(adap, i) { - struct port_info *pi = adap2pinfo(adap, i); - - pi->first_qset = j; - pi->nqsets = nqsets; - j = pi->first_qset + nqsets; - - dev_info(&adap->pdev->dev, - "Port %d using %d queue sets.\n", i, nqsets); - } -} - -static int __devinit cxgb_enable_msix(struct adapter *adap) -{ - struct msix_entry entries[SGE_QSETS + 1]; - int vectors; - int i, err; - - vectors = ARRAY_SIZE(entries); - for (i = 0; i < vectors; ++i) - entries[i].entry = i; - - while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0) - vectors = err; - - if (err < 0) - pci_disable_msix(adap->pdev); - - if (!err && vectors < (adap->params.nports + 1)) { - pci_disable_msix(adap->pdev); - err = -1; - } - - if (!err) { - for (i = 0; i < vectors; ++i) - adap->msix_info[i].vec = entries[i].vector; - adap->msix_nvectors = vectors; - } - - return err; -} - -static void __devinit print_port_info(struct adapter *adap, - const struct adapter_info *ai) -{ - static const char *pci_variant[] = { - "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express" - }; - - int i; - char buf[80]; - - if (is_pcie(adap)) - snprintf(buf, sizeof(buf), "%s x%d", - pci_variant[adap->params.pci.variant], - adap->params.pci.width); - else - snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit", - pci_variant[adap->params.pci.variant], - adap->params.pci.speed, adap->params.pci.width); - - for_each_port(adap, i) { - struct net_device *dev = adap->port[i]; - const struct port_info *pi = netdev_priv(dev); - - if (!test_bit(i, &adap->registered_device_map)) - continue; - printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n", - dev->name, ai->desc, pi->phy.desc, - is_offload(adap) ? "R" : "", adap->params.rev, buf, - (adap->flags & USING_MSIX) ? " MSI-X" : - (adap->flags & USING_MSI) ? " MSI" : ""); - if (adap->name == dev->name && adap->params.vpd.mclk) - printk(KERN_INFO - "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n", - adap->name, t3_mc7_size(&adap->cm) >> 20, - t3_mc7_size(&adap->pmtx) >> 20, - t3_mc7_size(&adap->pmrx) >> 20, - adap->params.vpd.sn); - } -} - -static const struct net_device_ops cxgb_netdev_ops = { - .ndo_open = cxgb_open, - .ndo_stop = cxgb_close, - .ndo_start_xmit = t3_eth_xmit, - .ndo_get_stats = cxgb_get_stats, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_multicast_list = cxgb_set_rxmode, - .ndo_do_ioctl = cxgb_ioctl, - .ndo_change_mtu = cxgb_change_mtu, - .ndo_set_mac_address = cxgb_set_mac_addr, - .ndo_fix_features = cxgb_fix_features, - .ndo_set_features = cxgb_set_features, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = cxgb_netpoll, -#endif -}; - -static void __devinit cxgb3_init_iscsi_mac(struct net_device *dev) -{ - struct port_info *pi = netdev_priv(dev); - - memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN); - pi->iscsic.mac_addr[3] |= 0x80; -} - -static int __devinit init_one(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - static int version_printed; - - int i, err, pci_using_dac = 0; - resource_size_t mmio_start, mmio_len; - const struct adapter_info *ai; - struct adapter *adapter = NULL; - struct port_info *pi; - - if (!version_printed) { - printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); - ++version_printed; - } - - if (!cxgb3_wq) { - cxgb3_wq = create_singlethread_workqueue(DRV_NAME); - if (!cxgb3_wq) { - printk(KERN_ERR DRV_NAME - ": cannot initialize work queue\n"); - return -ENOMEM; - } - } - - err = pci_enable_device(pdev); - if (err) { - dev_err(&pdev->dev, "cannot enable PCI device\n"); - goto out; - } - - err = pci_request_regions(pdev, DRV_NAME); - if (err) { - /* Just info, some other driver may have claimed the device. */ - dev_info(&pdev->dev, "cannot obtain PCI resources\n"); - goto out_disable_device; - } - - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { - pci_using_dac = 1; - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " - "coherent allocations\n"); - goto out_release_regions; - } - } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) { - dev_err(&pdev->dev, "no usable DMA configuration\n"); - goto out_release_regions; - } - - pci_set_master(pdev); - pci_save_state(pdev); - - mmio_start = pci_resource_start(pdev, 0); - mmio_len = pci_resource_len(pdev, 0); - ai = t3_get_adapter_info(ent->driver_data); - - adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); - if (!adapter) { - err = -ENOMEM; - goto out_release_regions; - } - - adapter->nofail_skb = - alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL); - if (!adapter->nofail_skb) { - dev_err(&pdev->dev, "cannot allocate nofail buffer\n"); - err = -ENOMEM; - goto out_free_adapter; - } - - adapter->regs = ioremap_nocache(mmio_start, mmio_len); - if (!adapter->regs) { - dev_err(&pdev->dev, "cannot map device registers\n"); - err = -ENOMEM; - goto out_free_adapter; - } - - adapter->pdev = pdev; - adapter->name = pci_name(pdev); - adapter->msg_enable = dflt_msg_enable; - adapter->mmio_len = mmio_len; - - mutex_init(&adapter->mdio_lock); - spin_lock_init(&adapter->work_lock); - spin_lock_init(&adapter->stats_lock); - - INIT_LIST_HEAD(&adapter->adapter_list); - INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task); - INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task); - - INIT_WORK(&adapter->db_full_task, db_full_task); - INIT_WORK(&adapter->db_empty_task, db_empty_task); - INIT_WORK(&adapter->db_drop_task, db_drop_task); - - INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task); - - for (i = 0; i < ai->nports0 + ai->nports1; ++i) { - struct net_device *netdev; - - netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS); - if (!netdev) { - err = -ENOMEM; - goto out_free_dev; - } - - SET_NETDEV_DEV(netdev, &pdev->dev); - - adapter->port[i] = netdev; - pi = netdev_priv(netdev); - pi->adapter = adapter; - pi->port_id = i; - netif_carrier_off(netdev); - netdev->irq = pdev->irq; - netdev->mem_start = mmio_start; - netdev->mem_end = mmio_start + mmio_len - 1; - netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | - NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX; - netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_TX; - if (pci_using_dac) - netdev->features |= NETIF_F_HIGHDMA; - - netdev->netdev_ops = &cxgb_netdev_ops; - SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops); - } - - pci_set_drvdata(pdev, adapter); - if (t3_prep_adapter(adapter, ai, 1) < 0) { - err = -ENODEV; - goto out_free_dev; - } - - /* - * The card is now ready to go. If any errors occur during device - * registration we do not fail the whole card but rather proceed only - * with the ports we manage to register successfully. However we must - * register at least one net device. - */ - for_each_port(adapter, i) { - err = register_netdev(adapter->port[i]); - if (err) - dev_warn(&pdev->dev, - "cannot register net device %s, skipping\n", - adapter->port[i]->name); - else { - /* - * Change the name we use for messages to the name of - * the first successfully registered interface. - */ - if (!adapter->registered_device_map) - adapter->name = adapter->port[i]->name; - - __set_bit(i, &adapter->registered_device_map); - } - } - if (!adapter->registered_device_map) { - dev_err(&pdev->dev, "could not register any net devices\n"); - goto out_free_dev; - } - - for_each_port(adapter, i) - cxgb3_init_iscsi_mac(adapter->port[i]); - - /* Driver's ready. Reflect it on LEDs */ - t3_led_ready(adapter); - - if (is_offload(adapter)) { - __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map); - cxgb3_adapter_ofld(adapter); - } - - /* See what interrupts we'll be using */ - if (msi > 1 && cxgb_enable_msix(adapter) == 0) - adapter->flags |= USING_MSIX; - else if (msi > 0 && pci_enable_msi(pdev) == 0) - adapter->flags |= USING_MSI; - - set_nqsets(adapter); - - err = sysfs_create_group(&adapter->port[0]->dev.kobj, - &cxgb3_attr_group); - - for_each_port(adapter, i) - cxgb_vlan_mode(adapter->port[i], adapter->port[i]->features); - - print_port_info(adapter, ai); - return 0; - -out_free_dev: - iounmap(adapter->regs); - for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i) - if (adapter->port[i]) - free_netdev(adapter->port[i]); - -out_free_adapter: - kfree(adapter); - -out_release_regions: - pci_release_regions(pdev); -out_disable_device: - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); -out: - return err; -} - -static void __devexit remove_one(struct pci_dev *pdev) -{ - struct adapter *adapter = pci_get_drvdata(pdev); - - if (adapter) { - int i; - - t3_sge_stop(adapter); - sysfs_remove_group(&adapter->port[0]->dev.kobj, - &cxgb3_attr_group); - - if (is_offload(adapter)) { - cxgb3_adapter_unofld(adapter); - if (test_bit(OFFLOAD_DEVMAP_BIT, - &adapter->open_device_map)) - offload_close(&adapter->tdev); - } - - for_each_port(adapter, i) - if (test_bit(i, &adapter->registered_device_map)) - unregister_netdev(adapter->port[i]); - - t3_stop_sge_timers(adapter); - t3_free_sge_resources(adapter); - cxgb_disable_msi(adapter); - - for_each_port(adapter, i) - if (adapter->port[i]) - free_netdev(adapter->port[i]); - - iounmap(adapter->regs); - if (adapter->nofail_skb) - kfree_skb(adapter->nofail_skb); - kfree(adapter); - pci_release_regions(pdev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - } -} - -static struct pci_driver driver = { - .name = DRV_NAME, - .id_table = cxgb3_pci_tbl, - .probe = init_one, - .remove = __devexit_p(remove_one), - .err_handler = &t3_err_handler, -}; - -static int __init cxgb3_init_module(void) -{ - int ret; - - cxgb3_offload_init(); - - ret = pci_register_driver(&driver); - return ret; -} - -static void __exit cxgb3_cleanup_module(void) -{ - pci_unregister_driver(&driver); - if (cxgb3_wq) - destroy_workqueue(cxgb3_wq); -} - -module_init(cxgb3_init_module); -module_exit(cxgb3_cleanup_module); diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c deleted file mode 100644 index 805076c..0000000 --- a/drivers/net/cxgb3/cxgb3_offload.c +++ /dev/null @@ -1,1418 +0,0 @@ -/* - * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "common.h" -#include "regs.h" -#include "cxgb3_ioctl.h" -#include "cxgb3_ctl_defs.h" -#include "cxgb3_defs.h" -#include "l2t.h" -#include "firmware_exports.h" -#include "cxgb3_offload.h" - -static LIST_HEAD(client_list); -static LIST_HEAD(ofld_dev_list); -static DEFINE_MUTEX(cxgb3_db_lock); - -static DEFINE_RWLOCK(adapter_list_lock); -static LIST_HEAD(adapter_list); - -static const unsigned int MAX_ATIDS = 64 * 1024; -static const unsigned int ATID_BASE = 0x10000; - -static void cxgb_neigh_update(struct neighbour *neigh); -static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new); - -static inline int offload_activated(struct t3cdev *tdev) -{ - const struct adapter *adapter = tdev2adap(tdev); - - return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map); -} - -/** - * cxgb3_register_client - register an offload client - * @client: the client - * - * Add the client to the client list, - * and call backs the client for each activated offload device - */ -void cxgb3_register_client(struct cxgb3_client *client) -{ - struct t3cdev *tdev; - - mutex_lock(&cxgb3_db_lock); - list_add_tail(&client->client_list, &client_list); - - if (client->add) { - list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) { - if (offload_activated(tdev)) - client->add(tdev); - } - } - mutex_unlock(&cxgb3_db_lock); -} - -EXPORT_SYMBOL(cxgb3_register_client); - -/** - * cxgb3_unregister_client - unregister an offload client - * @client: the client - * - * Remove the client to the client list, - * and call backs the client for each activated offload device. - */ -void cxgb3_unregister_client(struct cxgb3_client *client) -{ - struct t3cdev *tdev; - - mutex_lock(&cxgb3_db_lock); - list_del(&client->client_list); - - if (client->remove) { - list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) { - if (offload_activated(tdev)) - client->remove(tdev); - } - } - mutex_unlock(&cxgb3_db_lock); -} - -EXPORT_SYMBOL(cxgb3_unregister_client); - -/** - * cxgb3_add_clients - activate registered clients for an offload device - * @tdev: the offload device - * - * Call backs all registered clients once a offload device is activated - */ -void cxgb3_add_clients(struct t3cdev *tdev) -{ - struct cxgb3_client *client; - - mutex_lock(&cxgb3_db_lock); - list_for_each_entry(client, &client_list, client_list) { - if (client->add) - client->add(tdev); - } - mutex_unlock(&cxgb3_db_lock); -} - -/** - * cxgb3_remove_clients - deactivates registered clients - * for an offload device - * @tdev: the offload device - * - * Call backs all registered clients once a offload device is deactivated - */ -void cxgb3_remove_clients(struct t3cdev *tdev) -{ - struct cxgb3_client *client; - - mutex_lock(&cxgb3_db_lock); - list_for_each_entry(client, &client_list, client_list) { - if (client->remove) - client->remove(tdev); - } - mutex_unlock(&cxgb3_db_lock); -} - -void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port) -{ - struct cxgb3_client *client; - - mutex_lock(&cxgb3_db_lock); - list_for_each_entry(client, &client_list, client_list) { - if (client->event_handler) - client->event_handler(tdev, event, port); - } - mutex_unlock(&cxgb3_db_lock); -} - -static struct net_device *get_iff_from_mac(struct adapter *adapter, - const unsigned char *mac, - unsigned int vlan) -{ - int i; - - for_each_port(adapter, i) { - struct net_device *dev = adapter->port[i]; - - if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) { - if (vlan && vlan != VLAN_VID_MASK) { - rcu_read_lock(); - dev = __vlan_find_dev_deep(dev, vlan); - rcu_read_unlock(); - } else if (netif_is_bond_slave(dev)) { - while (dev->master) - dev = dev->master; - } - return dev; - } - } - return NULL; -} - -static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req, - void *data) -{ - int i; - int ret = 0; - unsigned int val = 0; - struct ulp_iscsi_info *uiip = data; - - switch (req) { - case ULP_ISCSI_GET_PARAMS: - uiip->pdev = adapter->pdev; - uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT); - uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT); - uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK); - - val = t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ); - for (i = 0; i < 4; i++, val >>= 8) - uiip->pgsz_factor[i] = val & 0xFF; - - val = t3_read_reg(adapter, A_TP_PARA_REG7); - uiip->max_txsz = - uiip->max_rxsz = min((val >> S_PMMAXXFERLEN0)&M_PMMAXXFERLEN0, - (val >> S_PMMAXXFERLEN1)&M_PMMAXXFERLEN1); - /* - * On tx, the iscsi pdu has to be <= tx page size and has to - * fit into the Tx PM FIFO. - */ - val = min(adapter->params.tp.tx_pg_size, - t3_read_reg(adapter, A_PM1_TX_CFG) >> 17); - uiip->max_txsz = min(val, uiip->max_txsz); - - /* set MaxRxData to 16224 */ - val = t3_read_reg(adapter, A_TP_PARA_REG2); - if ((val >> S_MAXRXDATA) != 0x3f60) { - val &= (M_RXCOALESCESIZE << S_RXCOALESCESIZE); - val |= V_MAXRXDATA(0x3f60); - printk(KERN_INFO - "%s, iscsi set MaxRxData to 16224 (0x%x).\n", - adapter->name, val); - t3_write_reg(adapter, A_TP_PARA_REG2, val); - } - - /* - * on rx, the iscsi pdu has to be < rx page size and the - * the max rx data length programmed in TP - */ - val = min(adapter->params.tp.rx_pg_size, - ((t3_read_reg(adapter, A_TP_PARA_REG2)) >> - S_MAXRXDATA) & M_MAXRXDATA); - uiip->max_rxsz = min(val, uiip->max_rxsz); - break; - case ULP_ISCSI_SET_PARAMS: - t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask); - /* program the ddp page sizes */ - for (i = 0; i < 4; i++) - val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i); - if (val && (val != t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ))) { - printk(KERN_INFO - "%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u.\n", - adapter->name, val, uiip->pgsz_factor[0], - uiip->pgsz_factor[1], uiip->pgsz_factor[2], - uiip->pgsz_factor[3]); - t3_write_reg(adapter, A_ULPRX_ISCSI_PSZ, val); - } - break; - default: - ret = -EOPNOTSUPP; - } - return ret; -} - -/* Response queue used for RDMA events. */ -#define ASYNC_NOTIF_RSPQ 0 - -static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data) -{ - int ret = 0; - - switch (req) { - case RDMA_GET_PARAMS: { - struct rdma_info *rdma = data; - struct pci_dev *pdev = adapter->pdev; - - rdma->udbell_physbase = pci_resource_start(pdev, 2); - rdma->udbell_len = pci_resource_len(pdev, 2); - rdma->tpt_base = - t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT); - rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT); - rdma->pbl_base = - t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT); - rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT); - rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT); - rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT); - rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL; - rdma->pdev = pdev; - break; - } - case RDMA_CQ_OP:{ - unsigned long flags; - struct rdma_cq_op *rdma = data; - - /* may be called in any context */ - spin_lock_irqsave(&adapter->sge.reg_lock, flags); - ret = t3_sge_cqcntxt_op(adapter, rdma->id, rdma->op, - rdma->credits); - spin_unlock_irqrestore(&adapter->sge.reg_lock, flags); - break; - } - case RDMA_GET_MEM:{ - struct ch_mem_range *t = data; - struct mc7 *mem; - - if ((t->addr & 7) || (t->len & 7)) - return -EINVAL; - if (t->mem_id == MEM_CM) - mem = &adapter->cm; - else if (t->mem_id == MEM_PMRX) - mem = &adapter->pmrx; - else if (t->mem_id == MEM_PMTX) - mem = &adapter->pmtx; - else - return -EINVAL; - - ret = - t3_mc7_bd_read(mem, t->addr / 8, t->len / 8, - (u64 *) t->buf); - if (ret) - return ret; - break; - } - case RDMA_CQ_SETUP:{ - struct rdma_cq_setup *rdma = data; - - spin_lock_irq(&adapter->sge.reg_lock); - ret = - t3_sge_init_cqcntxt(adapter, rdma->id, - rdma->base_addr, rdma->size, - ASYNC_NOTIF_RSPQ, - rdma->ovfl_mode, rdma->credits, - rdma->credit_thres); - spin_unlock_irq(&adapter->sge.reg_lock); - break; - } - case RDMA_CQ_DISABLE: - spin_lock_irq(&adapter->sge.reg_lock); - ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data); - spin_unlock_irq(&adapter->sge.reg_lock); - break; - case RDMA_CTRL_QP_SETUP:{ - struct rdma_ctrlqp_setup *rdma = data; - - spin_lock_irq(&adapter->sge.reg_lock); - ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0, - SGE_CNTXT_RDMA, - ASYNC_NOTIF_RSPQ, - rdma->base_addr, rdma->size, - FW_RI_TID_START, 1, 0); - spin_unlock_irq(&adapter->sge.reg_lock); - break; - } - case RDMA_GET_MIB: { - spin_lock(&adapter->stats_lock); - t3_tp_get_mib_stats(adapter, (struct tp_mib_stats *)data); - spin_unlock(&adapter->stats_lock); - break; - } - default: - ret = -EOPNOTSUPP; - } - return ret; -} - -static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data) -{ - struct adapter *adapter = tdev2adap(tdev); - struct tid_range *tid; - struct mtutab *mtup; - struct iff_mac *iffmacp; - struct ddp_params *ddpp; - struct adap_ports *ports; - struct ofld_page_info *rx_page_info; - struct tp_params *tp = &adapter->params.tp; - int i; - - switch (req) { - case GET_MAX_OUTSTANDING_WR: - *(unsigned int *)data = FW_WR_NUM; - break; - case GET_WR_LEN: - *(unsigned int *)data = WR_FLITS; - break; - case GET_TX_MAX_CHUNK: - *(unsigned int *)data = 1 << 20; /* 1MB */ - break; - case GET_TID_RANGE: - tid = data; - tid->num = t3_mc5_size(&adapter->mc5) - - adapter->params.mc5.nroutes - - adapter->params.mc5.nfilters - adapter->params.mc5.nservers; - tid->base = 0; - break; - case GET_STID_RANGE: - tid = data; - tid->num = adapter->params.mc5.nservers; - tid->base = t3_mc5_size(&adapter->mc5) - tid->num - - adapter->params.mc5.nfilters - adapter->params.mc5.nroutes; - break; - case GET_L2T_CAPACITY: - *(unsigned int *)data = 2048; - break; - case GET_MTUS: - mtup = data; - mtup->size = NMTUS; - mtup->mtus = adapter->params.mtus; - break; - case GET_IFF_FROM_MAC: - iffmacp = data; - iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr, - iffmacp->vlan_tag & - VLAN_VID_MASK); - break; - case GET_DDP_PARAMS: - ddpp = data; - ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT); - ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT); - ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK); - break; - case GET_PORTS: - ports = data; - ports->nports = adapter->params.nports; - for_each_port(adapter, i) - ports->lldevs[i] = adapter->port[i]; - break; - case ULP_ISCSI_GET_PARAMS: - case ULP_ISCSI_SET_PARAMS: - if (!offload_running(adapter)) - return -EAGAIN; - return cxgb_ulp_iscsi_ctl(adapter, req, data); - case RDMA_GET_PARAMS: - case RDMA_CQ_OP: - case RDMA_CQ_SETUP: - case RDMA_CQ_DISABLE: - case RDMA_CTRL_QP_SETUP: - case RDMA_GET_MEM: - case RDMA_GET_MIB: - if (!offload_running(adapter)) - return -EAGAIN; - return cxgb_rdma_ctl(adapter, req, data); - case GET_RX_PAGE_INFO: - rx_page_info = data; - rx_page_info->page_size = tp->rx_pg_size; - rx_page_info->num = tp->rx_num_pgs; - break; - case GET_ISCSI_IPV4ADDR: { - struct iscsi_ipv4addr *p = data; - struct port_info *pi = netdev_priv(p->dev); - p->ipv4addr = pi->iscsi_ipv4addr; - break; - } - case GET_EMBEDDED_INFO: { - struct ch_embedded_info *e = data; - - spin_lock(&adapter->stats_lock); - t3_get_fw_version(adapter, &e->fw_vers); - t3_get_tp_version(adapter, &e->tp_vers); - spin_unlock(&adapter->stats_lock); - break; - } - default: - return -EOPNOTSUPP; - } - return 0; -} - -/* - * Dummy handler for Rx offload packets in case we get an offload packet before - * proper processing is setup. This complains and drops the packet as it isn't - * normal to get offload packets at this stage. - */ -static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs, - int n) -{ - while (n--) - dev_kfree_skb_any(skbs[n]); - return 0; -} - -static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh) -{ -} - -void cxgb3_set_dummy_ops(struct t3cdev *dev) -{ - dev->recv = rx_offload_blackhole; - dev->neigh_update = dummy_neigh_update; -} - -/* - * Free an active-open TID. - */ -void *cxgb3_free_atid(struct t3cdev *tdev, int atid) -{ - struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; - union active_open_entry *p = atid2entry(t, atid); - void *ctx = p->t3c_tid.ctx; - - spin_lock_bh(&t->atid_lock); - p->next = t->afree; - t->afree = p; - t->atids_in_use--; - spin_unlock_bh(&t->atid_lock); - - return ctx; -} - -EXPORT_SYMBOL(cxgb3_free_atid); - -/* - * Free a server TID and return it to the free pool. - */ -void cxgb3_free_stid(struct t3cdev *tdev, int stid) -{ - struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; - union listen_entry *p = stid2entry(t, stid); - - spin_lock_bh(&t->stid_lock); - p->next = t->sfree; - t->sfree = p; - t->stids_in_use--; - spin_unlock_bh(&t->stid_lock); -} - -EXPORT_SYMBOL(cxgb3_free_stid); - -void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client, - void *ctx, unsigned int tid) -{ - struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; - - t->tid_tab[tid].client = client; - t->tid_tab[tid].ctx = ctx; - atomic_inc(&t->tids_in_use); -} - -EXPORT_SYMBOL(cxgb3_insert_tid); - -/* - * Populate a TID_RELEASE WR. The skb must be already propely sized. - */ -static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid) -{ - struct cpl_tid_release *req; - - skb->priority = CPL_PRIORITY_SETUP; - req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req)); - req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); - OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); -} - -static void t3_process_tid_release_list(struct work_struct *work) -{ - struct t3c_data *td = container_of(work, struct t3c_data, - tid_release_task); - struct sk_buff *skb; - struct t3cdev *tdev = td->dev; - - - spin_lock_bh(&td->tid_release_lock); - while (td->tid_release_list) { - struct t3c_tid_entry *p = td->tid_release_list; - - td->tid_release_list = p->ctx; - spin_unlock_bh(&td->tid_release_lock); - - skb = alloc_skb(sizeof(struct cpl_tid_release), - GFP_KERNEL); - if (!skb) - skb = td->nofail_skb; - if (!skb) { - spin_lock_bh(&td->tid_release_lock); - p->ctx = (void *)td->tid_release_list; - td->tid_release_list = (struct t3c_tid_entry *)p; - break; - } - mk_tid_release(skb, p - td->tid_maps.tid_tab); - cxgb3_ofld_send(tdev, skb); - p->ctx = NULL; - if (skb == td->nofail_skb) - td->nofail_skb = - alloc_skb(sizeof(struct cpl_tid_release), - GFP_KERNEL); - spin_lock_bh(&td->tid_release_lock); - } - td->release_list_incomplete = (td->tid_release_list == NULL) ? 0 : 1; - spin_unlock_bh(&td->tid_release_lock); - - if (!td->nofail_skb) - td->nofail_skb = - alloc_skb(sizeof(struct cpl_tid_release), - GFP_KERNEL); -} - -/* use ctx as a next pointer in the tid release list */ -void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid) -{ - struct t3c_data *td = T3C_DATA(tdev); - struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid]; - - spin_lock_bh(&td->tid_release_lock); - p->ctx = (void *)td->tid_release_list; - p->client = NULL; - td->tid_release_list = p; - if (!p->ctx || td->release_list_incomplete) - schedule_work(&td->tid_release_task); - spin_unlock_bh(&td->tid_release_lock); -} - -EXPORT_SYMBOL(cxgb3_queue_tid_release); - -/* - * Remove a tid from the TID table. A client may defer processing its last - * CPL message if it is locked at the time it arrives, and while the message - * sits in the client's backlog the TID may be reused for another connection. - * To handle this we atomically switch the TID association if it still points - * to the original client context. - */ -void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid) -{ - struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; - - BUG_ON(tid >= t->ntids); - if (tdev->type == T3A) - (void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL); - else { - struct sk_buff *skb; - - skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC); - if (likely(skb)) { - mk_tid_release(skb, tid); - cxgb3_ofld_send(tdev, skb); - t->tid_tab[tid].ctx = NULL; - } else - cxgb3_queue_tid_release(tdev, tid); - } - atomic_dec(&t->tids_in_use); -} - -EXPORT_SYMBOL(cxgb3_remove_tid); - -int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client, - void *ctx) -{ - int atid = -1; - struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; - - spin_lock_bh(&t->atid_lock); - if (t->afree && - t->atids_in_use + atomic_read(&t->tids_in_use) + MC5_MIN_TIDS <= - t->ntids) { - union active_open_entry *p = t->afree; - - atid = (p - t->atid_tab) + t->atid_base; - t->afree = p->next; - p->t3c_tid.ctx = ctx; - p->t3c_tid.client = client; - t->atids_in_use++; - } - spin_unlock_bh(&t->atid_lock); - return atid; -} - -EXPORT_SYMBOL(cxgb3_alloc_atid); - -int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client, - void *ctx) -{ - int stid = -1; - struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; - - spin_lock_bh(&t->stid_lock); - if (t->sfree) { - union listen_entry *p = t->sfree; - - stid = (p - t->stid_tab) + t->stid_base; - t->sfree = p->next; - p->t3c_tid.ctx = ctx; - p->t3c_tid.client = client; - t->stids_in_use++; - } - spin_unlock_bh(&t->stid_lock); - return stid; -} - -EXPORT_SYMBOL(cxgb3_alloc_stid); - -/* Get the t3cdev associated with a net_device */ -struct t3cdev *dev2t3cdev(struct net_device *dev) -{ - const struct port_info *pi = netdev_priv(dev); - - return (struct t3cdev *)pi->adapter; -} - -EXPORT_SYMBOL(dev2t3cdev); - -static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb) -{ - struct cpl_smt_write_rpl *rpl = cplhdr(skb); - - if (rpl->status != CPL_ERR_NONE) - printk(KERN_ERR - "Unexpected SMT_WRITE_RPL status %u for entry %u\n", - rpl->status, GET_TID(rpl)); - - return CPL_RET_BUF_DONE; -} - -static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb) -{ - struct cpl_l2t_write_rpl *rpl = cplhdr(skb); - - if (rpl->status != CPL_ERR_NONE) - printk(KERN_ERR - "Unexpected L2T_WRITE_RPL status %u for entry %u\n", - rpl->status, GET_TID(rpl)); - - return CPL_RET_BUF_DONE; -} - -static int do_rte_write_rpl(struct t3cdev *dev, struct sk_buff *skb) -{ - struct cpl_rte_write_rpl *rpl = cplhdr(skb); - - if (rpl->status != CPL_ERR_NONE) - printk(KERN_ERR - "Unexpected RTE_WRITE_RPL status %u for entry %u\n", - rpl->status, GET_TID(rpl)); - - return CPL_RET_BUF_DONE; -} - -static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb) -{ - struct cpl_act_open_rpl *rpl = cplhdr(skb); - unsigned int atid = G_TID(ntohl(rpl->atid)); - struct t3c_tid_entry *t3c_tid; - - t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid); - if (t3c_tid && t3c_tid->ctx && t3c_tid->client && - t3c_tid->client->handlers && - t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) { - return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb, - t3c_tid-> - ctx); - } else { - printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", - dev->name, CPL_ACT_OPEN_RPL); - return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; - } -} - -static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb) -{ - union opcode_tid *p = cplhdr(skb); - unsigned int stid = G_TID(ntohl(p->opcode_tid)); - struct t3c_tid_entry *t3c_tid; - - t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid); - if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && - t3c_tid->client->handlers[p->opcode]) { - return t3c_tid->client->handlers[p->opcode] (dev, skb, - t3c_tid->ctx); - } else { - printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", - dev->name, p->opcode); - return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; - } -} - -static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb) -{ - union opcode_tid *p = cplhdr(skb); - unsigned int hwtid = G_TID(ntohl(p->opcode_tid)); - struct t3c_tid_entry *t3c_tid; - - t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); - if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && - t3c_tid->client->handlers[p->opcode]) { - return t3c_tid->client->handlers[p->opcode] - (dev, skb, t3c_tid->ctx); - } else { - printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", - dev->name, p->opcode); - return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; - } -} - -static int do_cr(struct t3cdev *dev, struct sk_buff *skb) -{ - struct cpl_pass_accept_req *req = cplhdr(skb); - unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); - struct tid_info *t = &(T3C_DATA(dev))->tid_maps; - struct t3c_tid_entry *t3c_tid; - unsigned int tid = GET_TID(req); - - if (unlikely(tid >= t->ntids)) { - printk("%s: passive open TID %u too large\n", - dev->name, tid); - t3_fatal_err(tdev2adap(dev)); - return CPL_RET_BUF_DONE; - } - - t3c_tid = lookup_stid(t, stid); - if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && - t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) { - return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ] - (dev, skb, t3c_tid->ctx); - } else { - printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", - dev->name, CPL_PASS_ACCEPT_REQ); - return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; - } -} - -/* - * Returns an sk_buff for a reply CPL message of size len. If the input - * sk_buff has no other users it is trimmed and reused, otherwise a new buffer - * is allocated. The input skb must be of size at least len. Note that this - * operation does not destroy the original skb data even if it decides to reuse - * the buffer. - */ -static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len, - gfp_t gfp) -{ - if (likely(!skb_cloned(skb))) { - BUG_ON(skb->len < len); - __skb_trim(skb, len); - skb_get(skb); - } else { - skb = alloc_skb(len, gfp); - if (skb) - __skb_put(skb, len); - } - return skb; -} - -static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb) -{ - union opcode_tid *p = cplhdr(skb); - unsigned int hwtid = G_TID(ntohl(p->opcode_tid)); - struct t3c_tid_entry *t3c_tid; - - t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); - if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && - t3c_tid->client->handlers[p->opcode]) { - return t3c_tid->client->handlers[p->opcode] - (dev, skb, t3c_tid->ctx); - } else { - struct cpl_abort_req_rss *req = cplhdr(skb); - struct cpl_abort_rpl *rpl; - struct sk_buff *reply_skb; - unsigned int tid = GET_TID(req); - u8 cmd = req->status; - - if (req->status == CPL_ERR_RTX_NEG_ADVICE || - req->status == CPL_ERR_PERSIST_NEG_ADVICE) - goto out; - - reply_skb = cxgb3_get_cpl_reply_skb(skb, - sizeof(struct - cpl_abort_rpl), - GFP_ATOMIC); - - if (!reply_skb) { - printk("do_abort_req_rss: couldn't get skb!\n"); - goto out; - } - reply_skb->priority = CPL_PRIORITY_DATA; - __skb_put(reply_skb, sizeof(struct cpl_abort_rpl)); - rpl = cplhdr(reply_skb); - rpl->wr.wr_hi = - htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); - rpl->wr.wr_lo = htonl(V_WR_TID(tid)); - OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid)); - rpl->cmd = cmd; - cxgb3_ofld_send(dev, reply_skb); -out: - return CPL_RET_BUF_DONE; - } -} - -static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb) -{ - struct cpl_act_establish *req = cplhdr(skb); - unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); - struct tid_info *t = &(T3C_DATA(dev))->tid_maps; - struct t3c_tid_entry *t3c_tid; - unsigned int tid = GET_TID(req); - - if (unlikely(tid >= t->ntids)) { - printk("%s: active establish TID %u too large\n", - dev->name, tid); - t3_fatal_err(tdev2adap(dev)); - return CPL_RET_BUF_DONE; - } - - t3c_tid = lookup_atid(t, atid); - if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && - t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) { - return t3c_tid->client->handlers[CPL_ACT_ESTABLISH] - (dev, skb, t3c_tid->ctx); - } else { - printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", - dev->name, CPL_ACT_ESTABLISH); - return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; - } -} - -static int do_trace(struct t3cdev *dev, struct sk_buff *skb) -{ - struct cpl_trace_pkt *p = cplhdr(skb); - - skb->protocol = htons(0xffff); - skb->dev = dev->lldev; - skb_pull(skb, sizeof(*p)); - skb_reset_mac_header(skb); - netif_receive_skb(skb); - return 0; -} - -/* - * That skb would better have come from process_responses() where we abuse - * ->priority and ->csum to carry our data. NB: if we get to per-arch - * ->csum, the things might get really interesting here. - */ - -static inline u32 get_hwtid(struct sk_buff *skb) -{ - return ntohl((__force __be32)skb->priority) >> 8 & 0xfffff; -} - -static inline u32 get_opcode(struct sk_buff *skb) -{ - return G_OPCODE(ntohl((__force __be32)skb->csum)); -} - -static int do_term(struct t3cdev *dev, struct sk_buff *skb) -{ - unsigned int hwtid = get_hwtid(skb); - unsigned int opcode = get_opcode(skb); - struct t3c_tid_entry *t3c_tid; - - t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); - if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && - t3c_tid->client->handlers[opcode]) { - return t3c_tid->client->handlers[opcode] (dev, skb, - t3c_tid->ctx); - } else { - printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", - dev->name, opcode); - return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; - } -} - -static int nb_callback(struct notifier_block *self, unsigned long event, - void *ctx) -{ - switch (event) { - case (NETEVENT_NEIGH_UPDATE):{ - cxgb_neigh_update((struct neighbour *)ctx); - break; - } - case (NETEVENT_REDIRECT):{ - struct netevent_redirect *nr = ctx; - cxgb_redirect(nr->old, nr->new); - cxgb_neigh_update(dst_get_neighbour(nr->new)); - break; - } - default: - break; - } - return 0; -} - -static struct notifier_block nb = { - .notifier_call = nb_callback -}; - -/* - * Process a received packet with an unknown/unexpected CPL opcode. - */ -static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb) -{ - printk(KERN_ERR "%s: received bad CPL command 0x%x\n", dev->name, - *skb->data); - return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; -} - -/* - * Handlers for each CPL opcode - */ -static cpl_handler_func cpl_handlers[NUM_CPL_CMDS]; - -/* - * Add a new handler to the CPL dispatch table. A NULL handler may be supplied - * to unregister an existing handler. - */ -void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h) -{ - if (opcode < NUM_CPL_CMDS) - cpl_handlers[opcode] = h ? h : do_bad_cpl; - else - printk(KERN_ERR "T3C: handler registration for " - "opcode %x failed\n", opcode); -} - -EXPORT_SYMBOL(t3_register_cpl_handler); - -/* - * T3CDEV's receive method. - */ -static int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n) -{ - while (n--) { - struct sk_buff *skb = *skbs++; - unsigned int opcode = get_opcode(skb); - int ret = cpl_handlers[opcode] (dev, skb); - -#if VALIDATE_TID - if (ret & CPL_RET_UNKNOWN_TID) { - union opcode_tid *p = cplhdr(skb); - - printk(KERN_ERR "%s: CPL message (opcode %u) had " - "unknown TID %u\n", dev->name, opcode, - G_TID(ntohl(p->opcode_tid))); - } -#endif - if (ret & CPL_RET_BUF_DONE) - kfree_skb(skb); - } - return 0; -} - -/* - * Sends an sk_buff to a T3C driver after dealing with any active network taps. - */ -int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb) -{ - int r; - - local_bh_disable(); - r = dev->send(dev, skb); - local_bh_enable(); - return r; -} - -EXPORT_SYMBOL(cxgb3_ofld_send); - -static int is_offloading(struct net_device *dev) -{ - struct adapter *adapter; - int i; - - read_lock_bh(&adapter_list_lock); - list_for_each_entry(adapter, &adapter_list, adapter_list) { - for_each_port(adapter, i) { - if (dev == adapter->port[i]) { - read_unlock_bh(&adapter_list_lock); - return 1; - } - } - } - read_unlock_bh(&adapter_list_lock); - return 0; -} - -static void cxgb_neigh_update(struct neighbour *neigh) -{ - struct net_device *dev = neigh->dev; - - if (dev && (is_offloading(dev))) { - struct t3cdev *tdev = dev2t3cdev(dev); - - BUG_ON(!tdev); - t3_l2t_update(tdev, neigh); - } -} - -static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e) -{ - struct sk_buff *skb; - struct cpl_set_tcb_field *req; - - skb = alloc_skb(sizeof(*req), GFP_ATOMIC); - if (!skb) { - printk(KERN_ERR "%s: cannot allocate skb!\n", __func__); - return; - } - skb->priority = CPL_PRIORITY_CONTROL; - req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req)); - req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); - OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); - req->reply = 0; - req->cpu_idx = 0; - req->word = htons(W_TCB_L2T_IX); - req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX)); - req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx)); - tdev->send(tdev, skb); -} - -static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new) -{ - struct net_device *olddev, *newdev; - struct tid_info *ti; - struct t3cdev *tdev; - u32 tid; - int update_tcb; - struct l2t_entry *e; - struct t3c_tid_entry *te; - - olddev = dst_get_neighbour(old)->dev; - newdev = dst_get_neighbour(new)->dev; - if (!is_offloading(olddev)) - return; - if (!is_offloading(newdev)) { - printk(KERN_WARNING "%s: Redirect to non-offload " - "device ignored.\n", __func__); - return; - } - tdev = dev2t3cdev(olddev); - BUG_ON(!tdev); - if (tdev != dev2t3cdev(newdev)) { - printk(KERN_WARNING "%s: Redirect to different " - "offload device ignored.\n", __func__); - return; - } - - /* Add new L2T entry */ - e = t3_l2t_get(tdev, dst_get_neighbour(new), newdev); - if (!e) { - printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n", - __func__); - return; - } - - /* Walk tid table and notify clients of dst change. */ - ti = &(T3C_DATA(tdev))->tid_maps; - for (tid = 0; tid < ti->ntids; tid++) { - te = lookup_tid(ti, tid); - BUG_ON(!te); - if (te && te->ctx && te->client && te->client->redirect) { - update_tcb = te->client->redirect(te->ctx, old, new, e); - if (update_tcb) { - l2t_hold(L2DATA(tdev), e); - set_l2t_ix(tdev, tid, e); - } - } - } - l2t_release(L2DATA(tdev), e); -} - -/* - * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. - * The allocated memory is cleared. - */ -void *cxgb_alloc_mem(unsigned long size) -{ - void *p = kzalloc(size, GFP_KERNEL); - - if (!p) - p = vzalloc(size); - return p; -} - -/* - * Free memory allocated through t3_alloc_mem(). - */ -void cxgb_free_mem(void *addr) -{ - if (is_vmalloc_addr(addr)) - vfree(addr); - else - kfree(addr); -} - -/* - * Allocate and initialize the TID tables. Returns 0 on success. - */ -static int init_tid_tabs(struct tid_info *t, unsigned int ntids, - unsigned int natids, unsigned int nstids, - unsigned int atid_base, unsigned int stid_base) -{ - unsigned long size = ntids * sizeof(*t->tid_tab) + - natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab); - - t->tid_tab = cxgb_alloc_mem(size); - if (!t->tid_tab) - return -ENOMEM; - - t->stid_tab = (union listen_entry *)&t->tid_tab[ntids]; - t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids]; - t->ntids = ntids; - t->nstids = nstids; - t->stid_base = stid_base; - t->sfree = NULL; - t->natids = natids; - t->atid_base = atid_base; - t->afree = NULL; - t->stids_in_use = t->atids_in_use = 0; - atomic_set(&t->tids_in_use, 0); - spin_lock_init(&t->stid_lock); - spin_lock_init(&t->atid_lock); - - /* - * Setup the free lists for stid_tab and atid_tab. - */ - if (nstids) { - while (--nstids) - t->stid_tab[nstids - 1].next = &t->stid_tab[nstids]; - t->sfree = t->stid_tab; - } - if (natids) { - while (--natids) - t->atid_tab[natids - 1].next = &t->atid_tab[natids]; - t->afree = t->atid_tab; - } - return 0; -} - -static void free_tid_maps(struct tid_info *t) -{ - cxgb_free_mem(t->tid_tab); -} - -static inline void add_adapter(struct adapter *adap) -{ - write_lock_bh(&adapter_list_lock); - list_add_tail(&adap->adapter_list, &adapter_list); - write_unlock_bh(&adapter_list_lock); -} - -static inline void remove_adapter(struct adapter *adap) -{ - write_lock_bh(&adapter_list_lock); - list_del(&adap->adapter_list); - write_unlock_bh(&adapter_list_lock); -} - -int cxgb3_offload_activate(struct adapter *adapter) -{ - struct t3cdev *dev = &adapter->tdev; - int natids, err; - struct t3c_data *t; - struct tid_range stid_range, tid_range; - struct mtutab mtutab; - unsigned int l2t_capacity; - - t = kzalloc(sizeof(*t), GFP_KERNEL); - if (!t) - return -ENOMEM; - - err = -EOPNOTSUPP; - if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 || - dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 || - dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 || - dev->ctl(dev, GET_MTUS, &mtutab) < 0 || - dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 || - dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0) - goto out_free; - - err = -ENOMEM; - L2DATA(dev) = t3_init_l2t(l2t_capacity); - if (!L2DATA(dev)) - goto out_free; - - natids = min(tid_range.num / 2, MAX_ATIDS); - err = init_tid_tabs(&t->tid_maps, tid_range.num, natids, - stid_range.num, ATID_BASE, stid_range.base); - if (err) - goto out_free_l2t; - - t->mtus = mtutab.mtus; - t->nmtus = mtutab.size; - - INIT_WORK(&t->tid_release_task, t3_process_tid_release_list); - spin_lock_init(&t->tid_release_lock); - INIT_LIST_HEAD(&t->list_node); - t->dev = dev; - - T3C_DATA(dev) = t; - dev->recv = process_rx; - dev->neigh_update = t3_l2t_update; - - /* Register netevent handler once */ - if (list_empty(&adapter_list)) - register_netevent_notifier(&nb); - - t->nofail_skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_KERNEL); - t->release_list_incomplete = 0; - - add_adapter(adapter); - return 0; - -out_free_l2t: - t3_free_l2t(L2DATA(dev)); - L2DATA(dev) = NULL; -out_free: - kfree(t); - return err; -} - -void cxgb3_offload_deactivate(struct adapter *adapter) -{ - struct t3cdev *tdev = &adapter->tdev; - struct t3c_data *t = T3C_DATA(tdev); - - remove_adapter(adapter); - if (list_empty(&adapter_list)) - unregister_netevent_notifier(&nb); - - free_tid_maps(&t->tid_maps); - T3C_DATA(tdev) = NULL; - t3_free_l2t(L2DATA(tdev)); - L2DATA(tdev) = NULL; - if (t->nofail_skb) - kfree_skb(t->nofail_skb); - kfree(t); -} - -static inline void register_tdev(struct t3cdev *tdev) -{ - static int unit; - - mutex_lock(&cxgb3_db_lock); - snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++); - list_add_tail(&tdev->ofld_dev_list, &ofld_dev_list); - mutex_unlock(&cxgb3_db_lock); -} - -static inline void unregister_tdev(struct t3cdev *tdev) -{ - mutex_lock(&cxgb3_db_lock); - list_del(&tdev->ofld_dev_list); - mutex_unlock(&cxgb3_db_lock); -} - -static inline int adap2type(struct adapter *adapter) -{ - int type = 0; - - switch (adapter->params.rev) { - case T3_REV_A: - type = T3A; - break; - case T3_REV_B: - case T3_REV_B2: - type = T3B; - break; - case T3_REV_C: - type = T3C; - break; - } - return type; -} - -void __devinit cxgb3_adapter_ofld(struct adapter *adapter) -{ - struct t3cdev *tdev = &adapter->tdev; - - INIT_LIST_HEAD(&tdev->ofld_dev_list); - - cxgb3_set_dummy_ops(tdev); - tdev->send = t3_offload_tx; - tdev->ctl = cxgb_offload_ctl; - tdev->type = adap2type(adapter); - - register_tdev(tdev); -} - -void __devexit cxgb3_adapter_unofld(struct adapter *adapter) -{ - struct t3cdev *tdev = &adapter->tdev; - - tdev->recv = NULL; - tdev->neigh_update = NULL; - - unregister_tdev(tdev); -} - -void __init cxgb3_offload_init(void) -{ - int i; - - for (i = 0; i < NUM_CPL_CMDS; ++i) - cpl_handlers[i] = do_bad_cpl; - - t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl); - t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl); - t3_register_cpl_handler(CPL_RTE_WRITE_RPL, do_rte_write_rpl); - t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl); - t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl); - t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr); - t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl); - t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl); - t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl); - t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl); - t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl); - t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl); - t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl); - t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl); - t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl); - t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl); - t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss); - t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish); - t3_register_cpl_handler(CPL_SET_TCB_RPL, do_hwtid_rpl); - t3_register_cpl_handler(CPL_GET_TCB_RPL, do_hwtid_rpl); - t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term); - t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl); - t3_register_cpl_handler(CPL_TRACE_PKT, do_trace); - t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl); - t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl); - t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl); -} diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h deleted file mode 100644 index 929c298..0000000 --- a/drivers/net/cxgb3/cxgb3_offload.h +++ /dev/null @@ -1,209 +0,0 @@ -/* - * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef _CXGB3_OFFLOAD_H -#define _CXGB3_OFFLOAD_H - -#include -#include - -#include "l2t.h" - -#include "t3cdev.h" -#include "t3_cpl.h" - -struct adapter; - -void cxgb3_offload_init(void); - -void cxgb3_adapter_ofld(struct adapter *adapter); -void cxgb3_adapter_unofld(struct adapter *adapter); -int cxgb3_offload_activate(struct adapter *adapter); -void cxgb3_offload_deactivate(struct adapter *adapter); - -void cxgb3_set_dummy_ops(struct t3cdev *dev); - -struct t3cdev *dev2t3cdev(struct net_device *dev); - -/* - * Client registration. Users of T3 driver must register themselves. - * The T3 driver will call the add function of every client for each T3 - * adapter activated, passing up the t3cdev ptr. Each client fills out an - * array of callback functions to process CPL messages. - */ - -void cxgb3_register_client(struct cxgb3_client *client); -void cxgb3_unregister_client(struct cxgb3_client *client); -void cxgb3_add_clients(struct t3cdev *tdev); -void cxgb3_remove_clients(struct t3cdev *tdev); -void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port); - -typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev, - struct sk_buff *skb, void *ctx); - -enum { - OFFLOAD_STATUS_UP, - OFFLOAD_STATUS_DOWN, - OFFLOAD_PORT_DOWN, - OFFLOAD_PORT_UP, - OFFLOAD_DB_FULL, - OFFLOAD_DB_EMPTY, - OFFLOAD_DB_DROP -}; - -struct cxgb3_client { - char *name; - void (*add) (struct t3cdev *); - void (*remove) (struct t3cdev *); - cxgb3_cpl_handler_func *handlers; - int (*redirect)(void *ctx, struct dst_entry *old, - struct dst_entry *new, struct l2t_entry *l2t); - struct list_head client_list; - void (*event_handler)(struct t3cdev *tdev, u32 event, u32 port); -}; - -/* - * TID allocation services. - */ -int cxgb3_alloc_atid(struct t3cdev *dev, struct cxgb3_client *client, - void *ctx); -int cxgb3_alloc_stid(struct t3cdev *dev, struct cxgb3_client *client, - void *ctx); -void *cxgb3_free_atid(struct t3cdev *dev, int atid); -void cxgb3_free_stid(struct t3cdev *dev, int stid); -void cxgb3_insert_tid(struct t3cdev *dev, struct cxgb3_client *client, - void *ctx, unsigned int tid); -void cxgb3_queue_tid_release(struct t3cdev *dev, unsigned int tid); -void cxgb3_remove_tid(struct t3cdev *dev, void *ctx, unsigned int tid); - -struct t3c_tid_entry { - struct cxgb3_client *client; - void *ctx; -}; - -/* CPL message priority levels */ -enum { - CPL_PRIORITY_DATA = 0, /* data messages */ - CPL_PRIORITY_SETUP = 1, /* connection setup messages */ - CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */ - CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */ - CPL_PRIORITY_ACK = 1, /* RX ACK messages */ - CPL_PRIORITY_CONTROL = 1 /* offload control messages */ -}; - -/* Flags for return value of CPL message handlers */ -enum { - CPL_RET_BUF_DONE = 1, /* buffer processing done, buffer may be freed */ - CPL_RET_BAD_MSG = 2, /* bad CPL message (e.g., unknown opcode) */ - CPL_RET_UNKNOWN_TID = 4 /* unexpected unknown TID */ -}; - -typedef int (*cpl_handler_func)(struct t3cdev *dev, struct sk_buff *skb); - -/* - * Returns a pointer to the first byte of the CPL header in an sk_buff that - * contains a CPL message. - */ -static inline void *cplhdr(struct sk_buff *skb) -{ - return skb->data; -} - -void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h); - -union listen_entry { - struct t3c_tid_entry t3c_tid; - union listen_entry *next; -}; - -union active_open_entry { - struct t3c_tid_entry t3c_tid; - union active_open_entry *next; -}; - -/* - * Holds the size, base address, free list start, etc of the TID, server TID, - * and active-open TID tables for a offload device. - * The tables themselves are allocated dynamically. - */ -struct tid_info { - struct t3c_tid_entry *tid_tab; - unsigned int ntids; - atomic_t tids_in_use; - - union listen_entry *stid_tab; - unsigned int nstids; - unsigned int stid_base; - - union active_open_entry *atid_tab; - unsigned int natids; - unsigned int atid_base; - - /* - * The following members are accessed R/W so we put them in their own - * cache lines. - * - * XXX We could combine the atid fields above with the lock here since - * atids are use once (unlike other tids). OTOH the above fields are - * usually in cache due to tid_tab. - */ - spinlock_t atid_lock ____cacheline_aligned_in_smp; - union active_open_entry *afree; - unsigned int atids_in_use; - - spinlock_t stid_lock ____cacheline_aligned; - union listen_entry *sfree; - unsigned int stids_in_use; -}; - -struct t3c_data { - struct list_head list_node; - struct t3cdev *dev; - unsigned int tx_max_chunk; /* max payload for TX_DATA */ - unsigned int max_wrs; /* max in-flight WRs per connection */ - unsigned int nmtus; - const unsigned short *mtus; - struct tid_info tid_maps; - - struct t3c_tid_entry *tid_release_list; - spinlock_t tid_release_lock; - struct work_struct tid_release_task; - - struct sk_buff *nofail_skb; - unsigned int release_list_incomplete; -}; - -/* - * t3cdev -> t3c_data accessor - */ -#define T3C_DATA(dev) (*(struct t3c_data **)&(dev)->l4opt) - -#endif diff --git a/drivers/net/cxgb3/firmware_exports.h b/drivers/net/cxgb3/firmware_exports.h deleted file mode 100644 index 0d9b0e6..0000000 --- a/drivers/net/cxgb3/firmware_exports.h +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Copyright (c) 2004-2008 Chelsio, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef _FIRMWARE_EXPORTS_H_ -#define _FIRMWARE_EXPORTS_H_ - -/* WR OPCODES supported by the firmware. - */ -#define FW_WROPCODE_FORWARD 0x01 -#define FW_WROPCODE_BYPASS 0x05 - -#define FW_WROPCODE_TUNNEL_TX_PKT 0x03 - -#define FW_WROPOCDE_ULPTX_DATA_SGL 0x00 -#define FW_WROPCODE_ULPTX_MEM_READ 0x02 -#define FW_WROPCODE_ULPTX_PKT 0x04 -#define FW_WROPCODE_ULPTX_INVALIDATE 0x06 - -#define FW_WROPCODE_TUNNEL_RX_PKT 0x07 - -#define FW_WROPCODE_OFLD_GETTCB_RPL 0x08 -#define FW_WROPCODE_OFLD_CLOSE_CON 0x09 -#define FW_WROPCODE_OFLD_TP_ABORT_CON_REQ 0x0A -#define FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL 0x0F -#define FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ 0x0B -#define FW_WROPCODE_OFLD_TP_ABORT_CON_RPL 0x0C -#define FW_WROPCODE_OFLD_TX_DATA 0x0D -#define FW_WROPCODE_OFLD_TX_DATA_ACK 0x0E - -#define FW_WROPCODE_RI_RDMA_INIT 0x10 -#define FW_WROPCODE_RI_RDMA_WRITE 0x11 -#define FW_WROPCODE_RI_RDMA_READ_REQ 0x12 -#define FW_WROPCODE_RI_RDMA_READ_RESP 0x13 -#define FW_WROPCODE_RI_SEND 0x14 -#define FW_WROPCODE_RI_TERMINATE 0x15 -#define FW_WROPCODE_RI_RDMA_READ 0x16 -#define FW_WROPCODE_RI_RECEIVE 0x17 -#define FW_WROPCODE_RI_BIND_MW 0x18 -#define FW_WROPCODE_RI_FASTREGISTER_MR 0x19 -#define FW_WROPCODE_RI_LOCAL_INV 0x1A -#define FW_WROPCODE_RI_MODIFY_QP 0x1B -#define FW_WROPCODE_RI_BYPASS 0x1C - -#define FW_WROPOCDE_RSVD 0x1E - -#define FW_WROPCODE_SGE_EGRESSCONTEXT_RR 0x1F - -#define FW_WROPCODE_MNGT 0x1D -#define FW_MNGTOPCODE_PKTSCHED_SET 0x00 - -/* Maximum size of a WR sent from the host, limited by the SGE. - * - * Note: WR coming from ULP or TP are only limited by CIM. - */ -#define FW_WR_SIZE 128 - -/* Maximum number of outstanding WRs sent from the host. Value must be - * programmed in the CTRL/TUNNEL/QP SGE Egress Context and used by - * offload modules to limit the number of WRs per connection. - */ -#define FW_T3_WR_NUM 16 -#define FW_N3_WR_NUM 7 - -#ifndef N3 -# define FW_WR_NUM FW_T3_WR_NUM -#else -# define FW_WR_NUM FW_N3_WR_NUM -#endif - -/* FW_TUNNEL_NUM corresponds to the number of supported TUNNEL Queues. These - * queues must start at SGE Egress Context FW_TUNNEL_SGEEC_START and must - * start at 'TID' (or 'uP Token') FW_TUNNEL_TID_START. - * - * Ingress Traffic (e.g. DMA completion credit) for TUNNEL Queue[i] is sent - * to RESP Queue[i]. - */ -#define FW_TUNNEL_NUM 8 -#define FW_TUNNEL_SGEEC_START 8 -#define FW_TUNNEL_TID_START 65544 - -/* FW_CTRL_NUM corresponds to the number of supported CTRL Queues. These queues - * must start at SGE Egress Context FW_CTRL_SGEEC_START and must start at 'TID' - * (or 'uP Token') FW_CTRL_TID_START. - * - * Ingress Traffic for CTRL Queue[i] is sent to RESP Queue[i]. - */ -#define FW_CTRL_NUM 8 -#define FW_CTRL_SGEEC_START 65528 -#define FW_CTRL_TID_START 65536 - -/* FW_OFLD_NUM corresponds to the number of supported OFFLOAD Queues. These - * queues must start at SGE Egress Context FW_OFLD_SGEEC_START. - * - * Note: the 'uP Token' in the SGE Egress Context fields is irrelevant for - * OFFLOAD Queues, as the host is responsible for providing the correct TID in - * every WR. - * - * Ingress Trafffic for OFFLOAD Queue[i] is sent to RESP Queue[i]. - */ -#define FW_OFLD_NUM 8 -#define FW_OFLD_SGEEC_START 0 - -/* - * - */ -#define FW_RI_NUM 1 -#define FW_RI_SGEEC_START 65527 -#define FW_RI_TID_START 65552 - -/* - * The RX_PKT_TID - */ -#define FW_RX_PKT_NUM 1 -#define FW_RX_PKT_TID_START 65553 - -/* FW_WRC_NUM corresponds to the number of Work Request Context that supported - * by the firmware. - */ -#define FW_WRC_NUM \ - (65536 + FW_TUNNEL_NUM + FW_CTRL_NUM + FW_RI_NUM + FW_RX_PKT_NUM) - -/* - * FW type and version. - */ -#define S_FW_VERSION_TYPE 28 -#define M_FW_VERSION_TYPE 0xF -#define V_FW_VERSION_TYPE(x) ((x) << S_FW_VERSION_TYPE) -#define G_FW_VERSION_TYPE(x) \ - (((x) >> S_FW_VERSION_TYPE) & M_FW_VERSION_TYPE) - -#define S_FW_VERSION_MAJOR 16 -#define M_FW_VERSION_MAJOR 0xFFF -#define V_FW_VERSION_MAJOR(x) ((x) << S_FW_VERSION_MAJOR) -#define G_FW_VERSION_MAJOR(x) \ - (((x) >> S_FW_VERSION_MAJOR) & M_FW_VERSION_MAJOR) - -#define S_FW_VERSION_MINOR 8 -#define M_FW_VERSION_MINOR 0xFF -#define V_FW_VERSION_MINOR(x) ((x) << S_FW_VERSION_MINOR) -#define G_FW_VERSION_MINOR(x) \ - (((x) >> S_FW_VERSION_MINOR) & M_FW_VERSION_MINOR) - -#define S_FW_VERSION_MICRO 0 -#define M_FW_VERSION_MICRO 0xFF -#define V_FW_VERSION_MICRO(x) ((x) << S_FW_VERSION_MICRO) -#define G_FW_VERSION_MICRO(x) \ - (((x) >> S_FW_VERSION_MICRO) & M_FW_VERSION_MICRO) - -#endif /* _FIRMWARE_EXPORTS_H_ */ diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c deleted file mode 100644 index f452c40..0000000 --- a/drivers/net/cxgb3/l2t.c +++ /dev/null @@ -1,445 +0,0 @@ -/* - * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include -#include -#include -#include -#include -#include -#include -#include "common.h" -#include "t3cdev.h" -#include "cxgb3_defs.h" -#include "l2t.h" -#include "t3_cpl.h" -#include "firmware_exports.h" - -#define VLAN_NONE 0xfff - -/* - * Module locking notes: There is a RW lock protecting the L2 table as a - * whole plus a spinlock per L2T entry. Entry lookups and allocations happen - * under the protection of the table lock, individual entry changes happen - * while holding that entry's spinlock. The table lock nests outside the - * entry locks. Allocations of new entries take the table lock as writers so - * no other lookups can happen while allocating new entries. Entry updates - * take the table lock as readers so multiple entries can be updated in - * parallel. An L2T entry can be dropped by decrementing its reference count - * and therefore can happen in parallel with entry allocation but no entry - * can change state or increment its ref count during allocation as both of - * these perform lookups. - */ - -static inline unsigned int vlan_prio(const struct l2t_entry *e) -{ - return e->vlan >> 13; -} - -static inline unsigned int arp_hash(u32 key, int ifindex, - const struct l2t_data *d) -{ - return jhash_2words(key, ifindex, 0) & (d->nentries - 1); -} - -static inline void neigh_replace(struct l2t_entry *e, struct neighbour *n) -{ - neigh_hold(n); - if (e->neigh) - neigh_release(e->neigh); - e->neigh = n; -} - -/* - * Set up an L2T entry and send any packets waiting in the arp queue. The - * supplied skb is used for the CPL_L2T_WRITE_REQ. Must be called with the - * entry locked. - */ -static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb, - struct l2t_entry *e) -{ - struct cpl_l2t_write_req *req; - struct sk_buff *tmp; - - if (!skb) { - skb = alloc_skb(sizeof(*req), GFP_ATOMIC); - if (!skb) - return -ENOMEM; - } - - req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req)); - req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); - OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx)); - req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) | - V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) | - V_L2T_W_PRIO(vlan_prio(e))); - memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); - memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); - skb->priority = CPL_PRIORITY_CONTROL; - cxgb3_ofld_send(dev, skb); - - skb_queue_walk_safe(&e->arpq, skb, tmp) { - __skb_unlink(skb, &e->arpq); - cxgb3_ofld_send(dev, skb); - } - e->state = L2T_STATE_VALID; - - return 0; -} - -/* - * Add a packet to the an L2T entry's queue of packets awaiting resolution. - * Must be called with the entry's lock held. - */ -static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb) -{ - __skb_queue_tail(&e->arpq, skb); -} - -int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb, - struct l2t_entry *e) -{ -again: - switch (e->state) { - case L2T_STATE_STALE: /* entry is stale, kick off revalidation */ - neigh_event_send(e->neigh, NULL); - spin_lock_bh(&e->lock); - if (e->state == L2T_STATE_STALE) - e->state = L2T_STATE_VALID; - spin_unlock_bh(&e->lock); - case L2T_STATE_VALID: /* fast-path, send the packet on */ - return cxgb3_ofld_send(dev, skb); - case L2T_STATE_RESOLVING: - spin_lock_bh(&e->lock); - if (e->state != L2T_STATE_RESOLVING) { - /* ARP already completed */ - spin_unlock_bh(&e->lock); - goto again; - } - arpq_enqueue(e, skb); - spin_unlock_bh(&e->lock); - - /* - * Only the first packet added to the arpq should kick off - * resolution. However, because the alloc_skb below can fail, - * we allow each packet added to the arpq to retry resolution - * as a way of recovering from transient memory exhaustion. - * A better way would be to use a work request to retry L2T - * entries when there's no memory. - */ - if (!neigh_event_send(e->neigh, NULL)) { - skb = alloc_skb(sizeof(struct cpl_l2t_write_req), - GFP_ATOMIC); - if (!skb) - break; - - spin_lock_bh(&e->lock); - if (!skb_queue_empty(&e->arpq)) - setup_l2e_send_pending(dev, skb, e); - else /* we lost the race */ - __kfree_skb(skb); - spin_unlock_bh(&e->lock); - } - } - return 0; -} - -EXPORT_SYMBOL(t3_l2t_send_slow); - -void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e) -{ -again: - switch (e->state) { - case L2T_STATE_STALE: /* entry is stale, kick off revalidation */ - neigh_event_send(e->neigh, NULL); - spin_lock_bh(&e->lock); - if (e->state == L2T_STATE_STALE) { - e->state = L2T_STATE_VALID; - } - spin_unlock_bh(&e->lock); - return; - case L2T_STATE_VALID: /* fast-path, send the packet on */ - return; - case L2T_STATE_RESOLVING: - spin_lock_bh(&e->lock); - if (e->state != L2T_STATE_RESOLVING) { - /* ARP already completed */ - spin_unlock_bh(&e->lock); - goto again; - } - spin_unlock_bh(&e->lock); - - /* - * Only the first packet added to the arpq should kick off - * resolution. However, because the alloc_skb below can fail, - * we allow each packet added to the arpq to retry resolution - * as a way of recovering from transient memory exhaustion. - * A better way would be to use a work request to retry L2T - * entries when there's no memory. - */ - neigh_event_send(e->neigh, NULL); - } -} - -EXPORT_SYMBOL(t3_l2t_send_event); - -/* - * Allocate a free L2T entry. Must be called with l2t_data.lock held. - */ -static struct l2t_entry *alloc_l2e(struct l2t_data *d) -{ - struct l2t_entry *end, *e, **p; - - if (!atomic_read(&d->nfree)) - return NULL; - - /* there's definitely a free entry */ - for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e) - if (atomic_read(&e->refcnt) == 0) - goto found; - - for (e = &d->l2tab[1]; atomic_read(&e->refcnt); ++e) ; -found: - d->rover = e + 1; - atomic_dec(&d->nfree); - - /* - * The entry we found may be an inactive entry that is - * presently in the hash table. We need to remove it. - */ - if (e->state != L2T_STATE_UNUSED) { - int hash = arp_hash(e->addr, e->ifindex, d); - - for (p = &d->l2tab[hash].first; *p; p = &(*p)->next) - if (*p == e) { - *p = e->next; - break; - } - e->state = L2T_STATE_UNUSED; - } - return e; -} - -/* - * Called when an L2T entry has no more users. The entry is left in the hash - * table since it is likely to be reused but we also bump nfree to indicate - * that the entry can be reallocated for a different neighbor. We also drop - * the existing neighbor reference in case the neighbor is going away and is - * waiting on our reference. - * - * Because entries can be reallocated to other neighbors once their ref count - * drops to 0 we need to take the entry's lock to avoid races with a new - * incarnation. - */ -void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e) -{ - spin_lock_bh(&e->lock); - if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ - if (e->neigh) { - neigh_release(e->neigh); - e->neigh = NULL; - } - } - spin_unlock_bh(&e->lock); - atomic_inc(&d->nfree); -} - -EXPORT_SYMBOL(t3_l2e_free); - -/* - * Update an L2T entry that was previously used for the same next hop as neigh. - * Must be called with softirqs disabled. - */ -static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh) -{ - unsigned int nud_state; - - spin_lock(&e->lock); /* avoid race with t3_l2t_free */ - - if (neigh != e->neigh) - neigh_replace(e, neigh); - nud_state = neigh->nud_state; - if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) || - !(nud_state & NUD_VALID)) - e->state = L2T_STATE_RESOLVING; - else if (nud_state & NUD_CONNECTED) - e->state = L2T_STATE_VALID; - else - e->state = L2T_STATE_STALE; - spin_unlock(&e->lock); -} - -struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, - struct net_device *dev) -{ - struct l2t_entry *e; - struct l2t_data *d = L2DATA(cdev); - u32 addr = *(u32 *) neigh->primary_key; - int ifidx = neigh->dev->ifindex; - int hash = arp_hash(addr, ifidx, d); - struct port_info *p = netdev_priv(dev); - int smt_idx = p->port_id; - - write_lock_bh(&d->lock); - for (e = d->l2tab[hash].first; e; e = e->next) - if (e->addr == addr && e->ifindex == ifidx && - e->smt_idx == smt_idx) { - l2t_hold(d, e); - if (atomic_read(&e->refcnt) == 1) - reuse_entry(e, neigh); - goto done; - } - - /* Need to allocate a new entry */ - e = alloc_l2e(d); - if (e) { - spin_lock(&e->lock); /* avoid race with t3_l2t_free */ - e->next = d->l2tab[hash].first; - d->l2tab[hash].first = e; - e->state = L2T_STATE_RESOLVING; - e->addr = addr; - e->ifindex = ifidx; - e->smt_idx = smt_idx; - atomic_set(&e->refcnt, 1); - neigh_replace(e, neigh); - if (neigh->dev->priv_flags & IFF_802_1Q_VLAN) - e->vlan = vlan_dev_vlan_id(neigh->dev); - else - e->vlan = VLAN_NONE; - spin_unlock(&e->lock); - } -done: - write_unlock_bh(&d->lock); - return e; -} - -EXPORT_SYMBOL(t3_l2t_get); - -/* - * Called when address resolution fails for an L2T entry to handle packets - * on the arpq head. If a packet specifies a failure handler it is invoked, - * otherwise the packets is sent to the offload device. - * - * XXX: maybe we should abandon the latter behavior and just require a failure - * handler. - */ -static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff_head *arpq) -{ - struct sk_buff *skb, *tmp; - - skb_queue_walk_safe(arpq, skb, tmp) { - struct l2t_skb_cb *cb = L2T_SKB_CB(skb); - - __skb_unlink(skb, arpq); - if (cb->arp_failure_handler) - cb->arp_failure_handler(dev, skb); - else - cxgb3_ofld_send(dev, skb); - } -} - -/* - * Called when the host's ARP layer makes a change to some entry that is - * loaded into the HW L2 table. - */ -void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh) -{ - struct sk_buff_head arpq; - struct l2t_entry *e; - struct l2t_data *d = L2DATA(dev); - u32 addr = *(u32 *) neigh->primary_key; - int ifidx = neigh->dev->ifindex; - int hash = arp_hash(addr, ifidx, d); - - read_lock_bh(&d->lock); - for (e = d->l2tab[hash].first; e; e = e->next) - if (e->addr == addr && e->ifindex == ifidx) { - spin_lock(&e->lock); - goto found; - } - read_unlock_bh(&d->lock); - return; - -found: - __skb_queue_head_init(&arpq); - - read_unlock(&d->lock); - if (atomic_read(&e->refcnt)) { - if (neigh != e->neigh) - neigh_replace(e, neigh); - - if (e->state == L2T_STATE_RESOLVING) { - if (neigh->nud_state & NUD_FAILED) { - skb_queue_splice_init(&e->arpq, &arpq); - } else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE)) - setup_l2e_send_pending(dev, NULL, e); - } else { - e->state = neigh->nud_state & NUD_CONNECTED ? - L2T_STATE_VALID : L2T_STATE_STALE; - if (memcmp(e->dmac, neigh->ha, 6)) - setup_l2e_send_pending(dev, NULL, e); - } - } - spin_unlock_bh(&e->lock); - - if (!skb_queue_empty(&arpq)) - handle_failed_resolution(dev, &arpq); -} - -struct l2t_data *t3_init_l2t(unsigned int l2t_capacity) -{ - struct l2t_data *d; - int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry); - - d = cxgb_alloc_mem(size); - if (!d) - return NULL; - - d->nentries = l2t_capacity; - d->rover = &d->l2tab[1]; /* entry 0 is not used */ - atomic_set(&d->nfree, l2t_capacity - 1); - rwlock_init(&d->lock); - - for (i = 0; i < l2t_capacity; ++i) { - d->l2tab[i].idx = i; - d->l2tab[i].state = L2T_STATE_UNUSED; - __skb_queue_head_init(&d->l2tab[i].arpq); - spin_lock_init(&d->l2tab[i].lock); - atomic_set(&d->l2tab[i].refcnt, 0); - } - return d; -} - -void t3_free_l2t(struct l2t_data *d) -{ - cxgb_free_mem(d); -} - diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h deleted file mode 100644 index 7a12d52..0000000 --- a/drivers/net/cxgb3/l2t.h +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef _CHELSIO_L2T_H -#define _CHELSIO_L2T_H - -#include -#include "t3cdev.h" -#include - -enum { - L2T_STATE_VALID, /* entry is up to date */ - L2T_STATE_STALE, /* entry may be used but needs revalidation */ - L2T_STATE_RESOLVING, /* entry needs address resolution */ - L2T_STATE_UNUSED /* entry not in use */ -}; - -struct neighbour; -struct sk_buff; - -/* - * Each L2T entry plays multiple roles. First of all, it keeps state for the - * corresponding entry of the HW L2 table and maintains a queue of offload - * packets awaiting address resolution. Second, it is a node of a hash table - * chain, where the nodes of the chain are linked together through their next - * pointer. Finally, each node is a bucket of a hash table, pointing to the - * first element in its chain through its first pointer. - */ -struct l2t_entry { - u16 state; /* entry state */ - u16 idx; /* entry index */ - u32 addr; /* dest IP address */ - int ifindex; /* neighbor's net_device's ifindex */ - u16 smt_idx; /* SMT index */ - u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */ - struct neighbour *neigh; /* associated neighbour */ - struct l2t_entry *first; /* start of hash chain */ - struct l2t_entry *next; /* next l2t_entry on chain */ - struct sk_buff_head arpq; /* queue of packets awaiting resolution */ - spinlock_t lock; - atomic_t refcnt; /* entry reference count */ - u8 dmac[6]; /* neighbour's MAC address */ -}; - -struct l2t_data { - unsigned int nentries; /* number of entries */ - struct l2t_entry *rover; /* starting point for next allocation */ - atomic_t nfree; /* number of free entries */ - rwlock_t lock; - struct l2t_entry l2tab[0]; -}; - -typedef void (*arp_failure_handler_func)(struct t3cdev * dev, - struct sk_buff * skb); - -/* - * Callback stored in an skb to handle address resolution failure. - */ -struct l2t_skb_cb { - arp_failure_handler_func arp_failure_handler; -}; - -#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb) - -static inline void set_arp_failure_handler(struct sk_buff *skb, - arp_failure_handler_func hnd) -{ - L2T_SKB_CB(skb)->arp_failure_handler = hnd; -} - -/* - * Getting to the L2 data from an offload device. - */ -#define L2DATA(dev) ((dev)->l2opt) - -#define W_TCB_L2T_IX 0 -#define S_TCB_L2T_IX 7 -#define M_TCB_L2T_IX 0x7ffULL -#define V_TCB_L2T_IX(x) ((x) << S_TCB_L2T_IX) - -void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e); -void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh); -struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, - struct net_device *dev); -int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb, - struct l2t_entry *e); -void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e); -struct l2t_data *t3_init_l2t(unsigned int l2t_capacity); -void t3_free_l2t(struct l2t_data *d); - -int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb); - -static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb, - struct l2t_entry *e) -{ - if (likely(e->state == L2T_STATE_VALID)) - return cxgb3_ofld_send(dev, skb); - return t3_l2t_send_slow(dev, skb, e); -} - -static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e) -{ - if (atomic_dec_and_test(&e->refcnt)) - t3_l2e_free(d, e); -} - -static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) -{ - if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ - atomic_dec(&d->nfree); -} - -#endif diff --git a/drivers/net/cxgb3/mc5.c b/drivers/net/cxgb3/mc5.c deleted file mode 100644 index e13b7fe..0000000 --- a/drivers/net/cxgb3/mc5.c +++ /dev/null @@ -1,438 +0,0 @@ -/* - * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "common.h" -#include "regs.h" - -enum { - IDT75P52100 = 4, - IDT75N43102 = 5 -}; - -/* DBGI command mode */ -enum { - DBGI_MODE_MBUS = 0, - DBGI_MODE_IDT52100 = 5 -}; - -/* IDT 75P52100 commands */ -#define IDT_CMD_READ 0 -#define IDT_CMD_WRITE 1 -#define IDT_CMD_SEARCH 2 -#define IDT_CMD_LEARN 3 - -/* IDT LAR register address and value for 144-bit mode (low 32 bits) */ -#define IDT_LAR_ADR0 0x180006 -#define IDT_LAR_MODE144 0xffff0000 - -/* IDT SCR and SSR addresses (low 32 bits) */ -#define IDT_SCR_ADR0 0x180000 -#define IDT_SSR0_ADR0 0x180002 -#define IDT_SSR1_ADR0 0x180004 - -/* IDT GMR base address (low 32 bits) */ -#define IDT_GMR_BASE_ADR0 0x180020 - -/* IDT data and mask array base addresses (low 32 bits) */ -#define IDT_DATARY_BASE_ADR0 0 -#define IDT_MSKARY_BASE_ADR0 0x80000 - -/* IDT 75N43102 commands */ -#define IDT4_CMD_SEARCH144 3 -#define IDT4_CMD_WRITE 4 -#define IDT4_CMD_READ 5 - -/* IDT 75N43102 SCR address (low 32 bits) */ -#define IDT4_SCR_ADR0 0x3 - -/* IDT 75N43102 GMR base addresses (low 32 bits) */ -#define IDT4_GMR_BASE0 0x10 -#define IDT4_GMR_BASE1 0x20 -#define IDT4_GMR_BASE2 0x30 - -/* IDT 75N43102 data and mask array base addresses (low 32 bits) */ -#define IDT4_DATARY_BASE_ADR0 0x1000000 -#define IDT4_MSKARY_BASE_ADR0 0x2000000 - -#define MAX_WRITE_ATTEMPTS 5 - -#define MAX_ROUTES 2048 - -/* - * Issue a command to the TCAM and wait for its completion. The address and - * any data required by the command must have been setup by the caller. - */ -static int mc5_cmd_write(struct adapter *adapter, u32 cmd) -{ - t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_CMD, cmd); - return t3_wait_op_done(adapter, A_MC5_DB_DBGI_RSP_STATUS, - F_DBGIRSPVALID, 1, MAX_WRITE_ATTEMPTS, 1); -} - -static inline void dbgi_wr_addr3(struct adapter *adapter, u32 v1, u32 v2, - u32 v3) -{ - t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, v1); - t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR1, v2); - t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR2, v3); -} - -static inline void dbgi_wr_data3(struct adapter *adapter, u32 v1, u32 v2, - u32 v3) -{ - t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA0, v1); - t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA1, v2); - t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA2, v3); -} - -static inline void dbgi_rd_rsp3(struct adapter *adapter, u32 *v1, u32 *v2, - u32 *v3) -{ - *v1 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA0); - *v2 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA1); - *v3 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA2); -} - -/* - * Write data to the TCAM register at address (0, 0, addr_lo) using the TCAM - * command cmd. The data to be written must have been set up by the caller. - * Returns -1 on failure, 0 on success. - */ -static int mc5_write(struct adapter *adapter, u32 addr_lo, u32 cmd) -{ - t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, addr_lo); - if (mc5_cmd_write(adapter, cmd) == 0) - return 0; - CH_ERR(adapter, "MC5 timeout writing to TCAM address 0x%x\n", - addr_lo); - return -1; -} - -static int init_mask_data_array(struct mc5 *mc5, u32 mask_array_base, - u32 data_array_base, u32 write_cmd, - int addr_shift) -{ - unsigned int i; - struct adapter *adap = mc5->adapter; - - /* - * We need the size of the TCAM data and mask arrays in terms of - * 72-bit entries. - */ - unsigned int size72 = mc5->tcam_size; - unsigned int server_base = t3_read_reg(adap, A_MC5_DB_SERVER_INDEX); - - if (mc5->mode == MC5_MODE_144_BIT) { - size72 *= 2; /* 1 144-bit entry is 2 72-bit entries */ - server_base *= 2; - } - - /* Clear the data array */ - dbgi_wr_data3(adap, 0, 0, 0); - for (i = 0; i < size72; i++) - if (mc5_write(adap, data_array_base + (i << addr_shift), - write_cmd)) - return -1; - - /* Initialize the mask array. */ - dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff); - for (i = 0; i < size72; i++) { - if (i == server_base) /* entering server or routing region */ - t3_write_reg(adap, A_MC5_DB_DBGI_REQ_DATA0, - mc5->mode == MC5_MODE_144_BIT ? - 0xfffffff9 : 0xfffffffd); - if (mc5_write(adap, mask_array_base + (i << addr_shift), - write_cmd)) - return -1; - } - return 0; -} - -static int init_idt52100(struct mc5 *mc5) -{ - int i; - struct adapter *adap = mc5->adapter; - - t3_write_reg(adap, A_MC5_DB_RSP_LATENCY, - V_RDLAT(0x15) | V_LRNLAT(0x15) | V_SRCHLAT(0x15)); - t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 2); - - /* - * Use GMRs 14-15 for ELOOKUP, GMRs 12-13 for SYN lookups, and - * GMRs 8-9 for ACK- and AOPEN searches. - */ - t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT_CMD_WRITE); - t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT_CMD_WRITE); - t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD, IDT_CMD_SEARCH); - t3_write_reg(adap, A_MC5_DB_AOPEN_LRN_CMD, IDT_CMD_LEARN); - t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT_CMD_SEARCH | 0x6000); - t3_write_reg(adap, A_MC5_DB_SYN_LRN_CMD, IDT_CMD_LEARN); - t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT_CMD_SEARCH); - t3_write_reg(adap, A_MC5_DB_ACK_LRN_CMD, IDT_CMD_LEARN); - t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT_CMD_SEARCH); - t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT_CMD_SEARCH | 0x7000); - t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT_CMD_WRITE); - t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT_CMD_READ); - - /* Set DBGI command mode for IDT TCAM. */ - t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100); - - /* Set up LAR */ - dbgi_wr_data3(adap, IDT_LAR_MODE144, 0, 0); - if (mc5_write(adap, IDT_LAR_ADR0, IDT_CMD_WRITE)) - goto err; - - /* Set up SSRs */ - dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0); - if (mc5_write(adap, IDT_SSR0_ADR0, IDT_CMD_WRITE) || - mc5_write(adap, IDT_SSR1_ADR0, IDT_CMD_WRITE)) - goto err; - - /* Set up GMRs */ - for (i = 0; i < 32; ++i) { - if (i >= 12 && i < 15) - dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff); - else if (i == 15) - dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff); - else - dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff); - - if (mc5_write(adap, IDT_GMR_BASE_ADR0 + i, IDT_CMD_WRITE)) - goto err; - } - - /* Set up SCR */ - dbgi_wr_data3(adap, 1, 0, 0); - if (mc5_write(adap, IDT_SCR_ADR0, IDT_CMD_WRITE)) - goto err; - - return init_mask_data_array(mc5, IDT_MSKARY_BASE_ADR0, - IDT_DATARY_BASE_ADR0, IDT_CMD_WRITE, 0); -err: - return -EIO; -} - -static int init_idt43102(struct mc5 *mc5) -{ - int i; - struct adapter *adap = mc5->adapter; - - t3_write_reg(adap, A_MC5_DB_RSP_LATENCY, - adap->params.rev == 0 ? V_RDLAT(0xd) | V_SRCHLAT(0x11) : - V_RDLAT(0xd) | V_SRCHLAT(0x12)); - - /* - * Use GMRs 24-25 for ELOOKUP, GMRs 20-21 for SYN lookups, and no mask - * for ACK- and AOPEN searches. - */ - t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT4_CMD_WRITE); - t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT4_CMD_WRITE); - t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD, - IDT4_CMD_SEARCH144 | 0x3800); - t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT4_CMD_SEARCH144); - t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT4_CMD_SEARCH144 | 0x3800); - t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x3800); - t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x800); - t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT4_CMD_WRITE); - t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT4_CMD_READ); - - t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 3); - - /* Set DBGI command mode for IDT TCAM. */ - t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100); - - /* Set up GMRs */ - dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff); - for (i = 0; i < 7; ++i) - if (mc5_write(adap, IDT4_GMR_BASE0 + i, IDT4_CMD_WRITE)) - goto err; - - for (i = 0; i < 4; ++i) - if (mc5_write(adap, IDT4_GMR_BASE2 + i, IDT4_CMD_WRITE)) - goto err; - - dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff); - if (mc5_write(adap, IDT4_GMR_BASE1, IDT4_CMD_WRITE) || - mc5_write(adap, IDT4_GMR_BASE1 + 1, IDT4_CMD_WRITE) || - mc5_write(adap, IDT4_GMR_BASE1 + 4, IDT4_CMD_WRITE)) - goto err; - - dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff); - if (mc5_write(adap, IDT4_GMR_BASE1 + 5, IDT4_CMD_WRITE)) - goto err; - - /* Set up SCR */ - dbgi_wr_data3(adap, 0xf0000000, 0, 0); - if (mc5_write(adap, IDT4_SCR_ADR0, IDT4_CMD_WRITE)) - goto err; - - return init_mask_data_array(mc5, IDT4_MSKARY_BASE_ADR0, - IDT4_DATARY_BASE_ADR0, IDT4_CMD_WRITE, 1); -err: - return -EIO; -} - -/* Put MC5 in DBGI mode. */ -static inline void mc5_dbgi_mode_enable(const struct mc5 *mc5) -{ - t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG, - V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_DBGIEN); -} - -/* Put MC5 in M-Bus mode. */ -static void mc5_dbgi_mode_disable(const struct mc5 *mc5) -{ - t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG, - V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | - V_COMPEN(mc5->mode == MC5_MODE_72_BIT) | - V_PRTYEN(mc5->parity_enabled) | F_MBUSEN); -} - -/* - * Initialization that requires the OS and protocol layers to already - * be initialized goes here. - */ -int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters, - unsigned int nroutes) -{ - u32 cfg; - int err; - unsigned int tcam_size = mc5->tcam_size; - struct adapter *adap = mc5->adapter; - - if (!tcam_size) - return 0; - - if (nroutes > MAX_ROUTES || nroutes + nservers + nfilters > tcam_size) - return -EINVAL; - - /* Reset the TCAM */ - cfg = t3_read_reg(adap, A_MC5_DB_CONFIG) & ~F_TMMODE; - cfg |= V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_TMRST; - t3_write_reg(adap, A_MC5_DB_CONFIG, cfg); - if (t3_wait_op_done(adap, A_MC5_DB_CONFIG, F_TMRDY, 1, 500, 0)) { - CH_ERR(adap, "TCAM reset timed out\n"); - return -1; - } - - t3_write_reg(adap, A_MC5_DB_ROUTING_TABLE_INDEX, tcam_size - nroutes); - t3_write_reg(adap, A_MC5_DB_FILTER_TABLE, - tcam_size - nroutes - nfilters); - t3_write_reg(adap, A_MC5_DB_SERVER_INDEX, - tcam_size - nroutes - nfilters - nservers); - - mc5->parity_enabled = 1; - - /* All the TCAM addresses we access have only the low 32 bits non 0 */ - t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR1, 0); - t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR2, 0); - - mc5_dbgi_mode_enable(mc5); - - switch (mc5->part_type) { - case IDT75P52100: - err = init_idt52100(mc5); - break; - case IDT75N43102: - err = init_idt43102(mc5); - break; - default: - CH_ERR(adap, "Unsupported TCAM type %d\n", mc5->part_type); - err = -EINVAL; - break; - } - - mc5_dbgi_mode_disable(mc5); - return err; -} - - -#define MC5_INT_FATAL (F_PARITYERR | F_REQQPARERR | F_DISPQPARERR) - -/* - * MC5 interrupt handler - */ -void t3_mc5_intr_handler(struct mc5 *mc5) -{ - struct adapter *adap = mc5->adapter; - u32 cause = t3_read_reg(adap, A_MC5_DB_INT_CAUSE); - - if ((cause & F_PARITYERR) && mc5->parity_enabled) { - CH_ALERT(adap, "MC5 parity error\n"); - mc5->stats.parity_err++; - } - - if (cause & F_REQQPARERR) { - CH_ALERT(adap, "MC5 request queue parity error\n"); - mc5->stats.reqq_parity_err++; - } - - if (cause & F_DISPQPARERR) { - CH_ALERT(adap, "MC5 dispatch queue parity error\n"); - mc5->stats.dispq_parity_err++; - } - - if (cause & F_ACTRGNFULL) - mc5->stats.active_rgn_full++; - if (cause & F_NFASRCHFAIL) - mc5->stats.nfa_srch_err++; - if (cause & F_UNKNOWNCMD) - mc5->stats.unknown_cmd++; - if (cause & F_DELACTEMPTY) - mc5->stats.del_act_empty++; - if (cause & MC5_INT_FATAL) - t3_fatal_err(adap); - - t3_write_reg(adap, A_MC5_DB_INT_CAUSE, cause); -} - -void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode) -{ -#define K * 1024 - - static unsigned int tcam_part_size[] = { /* in K 72-bit entries */ - 64 K, 128 K, 256 K, 32 K - }; - -#undef K - - u32 cfg = t3_read_reg(adapter, A_MC5_DB_CONFIG); - - mc5->adapter = adapter; - mc5->mode = (unsigned char)mode; - mc5->part_type = (unsigned char)G_TMTYPE(cfg); - if (cfg & F_TMTYPEHI) - mc5->part_type |= 4; - - mc5->tcam_size = tcam_part_size[G_TMPARTSIZE(cfg)]; - if (mode == MC5_MODE_144_BIT) - mc5->tcam_size /= 2; -} diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h deleted file mode 100644 index 6990f6c..0000000 --- a/drivers/net/cxgb3/regs.h +++ /dev/null @@ -1,2598 +0,0 @@ -#define A_SG_CONTROL 0x0 - -#define S_CONGMODE 29 -#define V_CONGMODE(x) ((x) << S_CONGMODE) -#define F_CONGMODE V_CONGMODE(1U) - -#define S_TNLFLMODE 28 -#define V_TNLFLMODE(x) ((x) << S_TNLFLMODE) -#define F_TNLFLMODE V_TNLFLMODE(1U) - -#define S_FATLPERREN 27 -#define V_FATLPERREN(x) ((x) << S_FATLPERREN) -#define F_FATLPERREN V_FATLPERREN(1U) - -#define S_DROPPKT 20 -#define V_DROPPKT(x) ((x) << S_DROPPKT) -#define F_DROPPKT V_DROPPKT(1U) - -#define S_EGRGENCTRL 19 -#define V_EGRGENCTRL(x) ((x) << S_EGRGENCTRL) -#define F_EGRGENCTRL V_EGRGENCTRL(1U) - -#define S_USERSPACESIZE 14 -#define M_USERSPACESIZE 0x1f -#define V_USERSPACESIZE(x) ((x) << S_USERSPACESIZE) - -#define S_HOSTPAGESIZE 11 -#define M_HOSTPAGESIZE 0x7 -#define V_HOSTPAGESIZE(x) ((x) << S_HOSTPAGESIZE) - -#define S_FLMODE 9 -#define V_FLMODE(x) ((x) << S_FLMODE) -#define F_FLMODE V_FLMODE(1U) - -#define S_PKTSHIFT 6 -#define M_PKTSHIFT 0x7 -#define V_PKTSHIFT(x) ((x) << S_PKTSHIFT) - -#define S_ONEINTMULTQ 5 -#define V_ONEINTMULTQ(x) ((x) << S_ONEINTMULTQ) -#define F_ONEINTMULTQ V_ONEINTMULTQ(1U) - -#define S_BIGENDIANINGRESS 2 -#define V_BIGENDIANINGRESS(x) ((x) << S_BIGENDIANINGRESS) -#define F_BIGENDIANINGRESS V_BIGENDIANINGRESS(1U) - -#define S_ISCSICOALESCING 1 -#define V_ISCSICOALESCING(x) ((x) << S_ISCSICOALESCING) -#define F_ISCSICOALESCING V_ISCSICOALESCING(1U) - -#define S_GLOBALENABLE 0 -#define V_GLOBALENABLE(x) ((x) << S_GLOBALENABLE) -#define F_GLOBALENABLE V_GLOBALENABLE(1U) - -#define S_AVOIDCQOVFL 24 -#define V_AVOIDCQOVFL(x) ((x) << S_AVOIDCQOVFL) -#define F_AVOIDCQOVFL V_AVOIDCQOVFL(1U) - -#define S_OPTONEINTMULTQ 23 -#define V_OPTONEINTMULTQ(x) ((x) << S_OPTONEINTMULTQ) -#define F_OPTONEINTMULTQ V_OPTONEINTMULTQ(1U) - -#define S_CQCRDTCTRL 22 -#define V_CQCRDTCTRL(x) ((x) << S_CQCRDTCTRL) -#define F_CQCRDTCTRL V_CQCRDTCTRL(1U) - -#define A_SG_KDOORBELL 0x4 - -#define S_SELEGRCNTX 31 -#define V_SELEGRCNTX(x) ((x) << S_SELEGRCNTX) -#define F_SELEGRCNTX V_SELEGRCNTX(1U) - -#define S_EGRCNTX 0 -#define M_EGRCNTX 0xffff -#define V_EGRCNTX(x) ((x) << S_EGRCNTX) - -#define A_SG_GTS 0x8 - -#define S_RSPQ 29 -#define M_RSPQ 0x7 -#define V_RSPQ(x) ((x) << S_RSPQ) -#define G_RSPQ(x) (((x) >> S_RSPQ) & M_RSPQ) - -#define S_NEWTIMER 16 -#define M_NEWTIMER 0x1fff -#define V_NEWTIMER(x) ((x) << S_NEWTIMER) - -#define S_NEWINDEX 0 -#define M_NEWINDEX 0xffff -#define V_NEWINDEX(x) ((x) << S_NEWINDEX) - -#define A_SG_CONTEXT_CMD 0xc - -#define S_CONTEXT_CMD_OPCODE 28 -#define M_CONTEXT_CMD_OPCODE 0xf -#define V_CONTEXT_CMD_OPCODE(x) ((x) << S_CONTEXT_CMD_OPCODE) - -#define S_CONTEXT_CMD_BUSY 27 -#define V_CONTEXT_CMD_BUSY(x) ((x) << S_CONTEXT_CMD_BUSY) -#define F_CONTEXT_CMD_BUSY V_CONTEXT_CMD_BUSY(1U) - -#define S_CQ_CREDIT 20 - -#define M_CQ_CREDIT 0x7f - -#define V_CQ_CREDIT(x) ((x) << S_CQ_CREDIT) - -#define G_CQ_CREDIT(x) (((x) >> S_CQ_CREDIT) & M_CQ_CREDIT) - -#define S_CQ 19 - -#define V_CQ(x) ((x) << S_CQ) -#define F_CQ V_CQ(1U) - -#define S_RESPONSEQ 18 -#define V_RESPONSEQ(x) ((x) << S_RESPONSEQ) -#define F_RESPONSEQ V_RESPONSEQ(1U) - -#define S_EGRESS 17 -#define V_EGRESS(x) ((x) << S_EGRESS) -#define F_EGRESS V_EGRESS(1U) - -#define S_FREELIST 16 -#define V_FREELIST(x) ((x) << S_FREELIST) -#define F_FREELIST V_FREELIST(1U) - -#define S_CONTEXT 0 -#define M_CONTEXT 0xffff -#define V_CONTEXT(x) ((x) << S_CONTEXT) - -#define G_CONTEXT(x) (((x) >> S_CONTEXT) & M_CONTEXT) - -#define A_SG_CONTEXT_DATA0 0x10 - -#define A_SG_CONTEXT_DATA1 0x14 - -#define A_SG_CONTEXT_DATA2 0x18 - -#define A_SG_CONTEXT_DATA3 0x1c - -#define A_SG_CONTEXT_MASK0 0x20 - -#define A_SG_CONTEXT_MASK1 0x24 - -#define A_SG_CONTEXT_MASK2 0x28 - -#define A_SG_CONTEXT_MASK3 0x2c - -#define A_SG_RSPQ_CREDIT_RETURN 0x30 - -#define S_CREDITS 0 -#define M_CREDITS 0xffff -#define V_CREDITS(x) ((x) << S_CREDITS) - -#define A_SG_DATA_INTR 0x34 - -#define S_ERRINTR 31 -#define V_ERRINTR(x) ((x) << S_ERRINTR) -#define F_ERRINTR V_ERRINTR(1U) - -#define A_SG_HI_DRB_HI_THRSH 0x38 - -#define A_SG_HI_DRB_LO_THRSH 0x3c - -#define A_SG_LO_DRB_HI_THRSH 0x40 - -#define A_SG_LO_DRB_LO_THRSH 0x44 - -#define A_SG_RSPQ_FL_STATUS 0x4c - -#define S_RSPQ0DISABLED 8 - -#define S_FL0EMPTY 16 -#define V_FL0EMPTY(x) ((x) << S_FL0EMPTY) -#define F_FL0EMPTY V_FL0EMPTY(1U) - -#define A_SG_EGR_RCQ_DRB_THRSH 0x54 - -#define S_HIRCQDRBTHRSH 16 -#define M_HIRCQDRBTHRSH 0x7ff -#define V_HIRCQDRBTHRSH(x) ((x) << S_HIRCQDRBTHRSH) - -#define S_LORCQDRBTHRSH 0 -#define M_LORCQDRBTHRSH 0x7ff -#define V_LORCQDRBTHRSH(x) ((x) << S_LORCQDRBTHRSH) - -#define A_SG_EGR_CNTX_BADDR 0x58 - -#define A_SG_INT_CAUSE 0x5c - -#define S_HIRCQPARITYERROR 31 -#define V_HIRCQPARITYERROR(x) ((x) << S_HIRCQPARITYERROR) -#define F_HIRCQPARITYERROR V_HIRCQPARITYERROR(1U) - -#define S_LORCQPARITYERROR 30 -#define V_LORCQPARITYERROR(x) ((x) << S_LORCQPARITYERROR) -#define F_LORCQPARITYERROR V_LORCQPARITYERROR(1U) - -#define S_HIDRBPARITYERROR 29 -#define V_HIDRBPARITYERROR(x) ((x) << S_HIDRBPARITYERROR) -#define F_HIDRBPARITYERROR V_HIDRBPARITYERROR(1U) - -#define S_LODRBPARITYERROR 28 -#define V_LODRBPARITYERROR(x) ((x) << S_LODRBPARITYERROR) -#define F_LODRBPARITYERROR V_LODRBPARITYERROR(1U) - -#define S_FLPARITYERROR 22 -#define M_FLPARITYERROR 0x3f -#define V_FLPARITYERROR(x) ((x) << S_FLPARITYERROR) -#define G_FLPARITYERROR(x) (((x) >> S_FLPARITYERROR) & M_FLPARITYERROR) - -#define S_ITPARITYERROR 20 -#define M_ITPARITYERROR 0x3 -#define V_ITPARITYERROR(x) ((x) << S_ITPARITYERROR) -#define G_ITPARITYERROR(x) (((x) >> S_ITPARITYERROR) & M_ITPARITYERROR) - -#define S_IRPARITYERROR 19 -#define V_IRPARITYERROR(x) ((x) << S_IRPARITYERROR) -#define F_IRPARITYERROR V_IRPARITYERROR(1U) - -#define S_RCPARITYERROR 18 -#define V_RCPARITYERROR(x) ((x) << S_RCPARITYERROR) -#define F_RCPARITYERROR V_RCPARITYERROR(1U) - -#define S_OCPARITYERROR 17 -#define V_OCPARITYERROR(x) ((x) << S_OCPARITYERROR) -#define F_OCPARITYERROR V_OCPARITYERROR(1U) - -#define S_CPPARITYERROR 16 -#define V_CPPARITYERROR(x) ((x) << S_CPPARITYERROR) -#define F_CPPARITYERROR V_CPPARITYERROR(1U) - -#define S_R_REQ_FRAMINGERROR 15 -#define V_R_REQ_FRAMINGERROR(x) ((x) << S_R_REQ_FRAMINGERROR) -#define F_R_REQ_FRAMINGERROR V_R_REQ_FRAMINGERROR(1U) - -#define S_UC_REQ_FRAMINGERROR 14 -#define V_UC_REQ_FRAMINGERROR(x) ((x) << S_UC_REQ_FRAMINGERROR) -#define F_UC_REQ_FRAMINGERROR V_UC_REQ_FRAMINGERROR(1U) - -#define S_HICTLDRBDROPERR 13 -#define V_HICTLDRBDROPERR(x) ((x) << S_HICTLDRBDROPERR) -#define F_HICTLDRBDROPERR V_HICTLDRBDROPERR(1U) - -#define S_LOCTLDRBDROPERR 12 -#define V_LOCTLDRBDROPERR(x) ((x) << S_LOCTLDRBDROPERR) -#define F_LOCTLDRBDROPERR V_LOCTLDRBDROPERR(1U) - -#define S_HIPIODRBDROPERR 11 -#define V_HIPIODRBDROPERR(x) ((x) << S_HIPIODRBDROPERR) -#define F_HIPIODRBDROPERR V_HIPIODRBDROPERR(1U) - -#define S_LOPIODRBDROPERR 10 -#define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR) -#define F_LOPIODRBDROPERR V_LOPIODRBDROPERR(1U) - -#define S_HIPRIORITYDBFULL 7 -#define V_HIPRIORITYDBFULL(x) ((x) << S_HIPRIORITYDBFULL) -#define F_HIPRIORITYDBFULL V_HIPRIORITYDBFULL(1U) - -#define S_HIPRIORITYDBEMPTY 6 -#define V_HIPRIORITYDBEMPTY(x) ((x) << S_HIPRIORITYDBEMPTY) -#define F_HIPRIORITYDBEMPTY V_HIPRIORITYDBEMPTY(1U) - -#define S_LOPRIORITYDBFULL 5 -#define V_LOPRIORITYDBFULL(x) ((x) << S_LOPRIORITYDBFULL) -#define F_LOPRIORITYDBFULL V_LOPRIORITYDBFULL(1U) - -#define S_LOPRIORITYDBEMPTY 4 -#define V_LOPRIORITYDBEMPTY(x) ((x) << S_LOPRIORITYDBEMPTY) -#define F_LOPRIORITYDBEMPTY V_LOPRIORITYDBEMPTY(1U) - -#define S_RSPQDISABLED 3 -#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED) -#define F_RSPQDISABLED V_RSPQDISABLED(1U) - -#define S_RSPQCREDITOVERFOW 2 -#define V_RSPQCREDITOVERFOW(x) ((x) << S_RSPQCREDITOVERFOW) -#define F_RSPQCREDITOVERFOW V_RSPQCREDITOVERFOW(1U) - -#define S_FLEMPTY 1 -#define V_FLEMPTY(x) ((x) << S_FLEMPTY) -#define F_FLEMPTY V_FLEMPTY(1U) - -#define A_SG_INT_ENABLE 0x60 - -#define A_SG_CMDQ_CREDIT_TH 0x64 - -#define S_TIMEOUT 8 -#define M_TIMEOUT 0xffffff -#define V_TIMEOUT(x) ((x) << S_TIMEOUT) - -#define S_THRESHOLD 0 -#define M_THRESHOLD 0xff -#define V_THRESHOLD(x) ((x) << S_THRESHOLD) - -#define A_SG_TIMER_TICK 0x68 - -#define A_SG_CQ_CONTEXT_BADDR 0x6c - -#define A_SG_OCO_BASE 0x70 - -#define S_BASE1 16 -#define M_BASE1 0xffff -#define V_BASE1(x) ((x) << S_BASE1) - -#define A_SG_DRB_PRI_THRESH 0x74 - -#define A_PCIX_INT_ENABLE 0x80 - -#define S_MSIXPARERR 22 -#define M_MSIXPARERR 0x7 - -#define V_MSIXPARERR(x) ((x) << S_MSIXPARERR) - -#define S_CFPARERR 18 -#define M_CFPARERR 0xf - -#define V_CFPARERR(x) ((x) << S_CFPARERR) - -#define S_RFPARERR 14 -#define M_RFPARERR 0xf - -#define V_RFPARERR(x) ((x) << S_RFPARERR) - -#define S_WFPARERR 12 -#define M_WFPARERR 0x3 - -#define V_WFPARERR(x) ((x) << S_WFPARERR) - -#define S_PIOPARERR 11 -#define V_PIOPARERR(x) ((x) << S_PIOPARERR) -#define F_PIOPARERR V_PIOPARERR(1U) - -#define S_DETUNCECCERR 10 -#define V_DETUNCECCERR(x) ((x) << S_DETUNCECCERR) -#define F_DETUNCECCERR V_DETUNCECCERR(1U) - -#define S_DETCORECCERR 9 -#define V_DETCORECCERR(x) ((x) << S_DETCORECCERR) -#define F_DETCORECCERR V_DETCORECCERR(1U) - -#define S_RCVSPLCMPERR 8 -#define V_RCVSPLCMPERR(x) ((x) << S_RCVSPLCMPERR) -#define F_RCVSPLCMPERR V_RCVSPLCMPERR(1U) - -#define S_UNXSPLCMP 7 -#define V_UNXSPLCMP(x) ((x) << S_UNXSPLCMP) -#define F_UNXSPLCMP V_UNXSPLCMP(1U) - -#define S_SPLCMPDIS 6 -#define V_SPLCMPDIS(x) ((x) << S_SPLCMPDIS) -#define F_SPLCMPDIS V_SPLCMPDIS(1U) - -#define S_DETPARERR 5 -#define V_DETPARERR(x) ((x) << S_DETPARERR) -#define F_DETPARERR V_DETPARERR(1U) - -#define S_SIGSYSERR 4 -#define V_SIGSYSERR(x) ((x) << S_SIGSYSERR) -#define F_SIGSYSERR V_SIGSYSERR(1U) - -#define S_RCVMSTABT 3 -#define V_RCVMSTABT(x) ((x) << S_RCVMSTABT) -#define F_RCVMSTABT V_RCVMSTABT(1U) - -#define S_RCVTARABT 2 -#define V_RCVTARABT(x) ((x) << S_RCVTARABT) -#define F_RCVTARABT V_RCVTARABT(1U) - -#define S_SIGTARABT 1 -#define V_SIGTARABT(x) ((x) << S_SIGTARABT) -#define F_SIGTARABT V_SIGTARABT(1U) - -#define S_MSTDETPARERR 0 -#define V_MSTDETPARERR(x) ((x) << S_MSTDETPARERR) -#define F_MSTDETPARERR V_MSTDETPARERR(1U) - -#define A_PCIX_INT_CAUSE 0x84 - -#define A_PCIX_CFG 0x88 - -#define S_DMASTOPEN 19 -#define V_DMASTOPEN(x) ((x) << S_DMASTOPEN) -#define F_DMASTOPEN V_DMASTOPEN(1U) - -#define S_CLIDECEN 18 -#define V_CLIDECEN(x) ((x) << S_CLIDECEN) -#define F_CLIDECEN V_CLIDECEN(1U) - -#define A_PCIX_MODE 0x8c - -#define S_PCLKRANGE 6 -#define M_PCLKRANGE 0x3 -#define V_PCLKRANGE(x) ((x) << S_PCLKRANGE) -#define G_PCLKRANGE(x) (((x) >> S_PCLKRANGE) & M_PCLKRANGE) - -#define S_PCIXINITPAT 2 -#define M_PCIXINITPAT 0xf -#define V_PCIXINITPAT(x) ((x) << S_PCIXINITPAT) -#define G_PCIXINITPAT(x) (((x) >> S_PCIXINITPAT) & M_PCIXINITPAT) - -#define S_64BIT 0 -#define V_64BIT(x) ((x) << S_64BIT) -#define F_64BIT V_64BIT(1U) - -#define A_PCIE_INT_ENABLE 0x80 - -#define S_BISTERR 15 -#define M_BISTERR 0xff - -#define V_BISTERR(x) ((x) << S_BISTERR) - -#define S_TXPARERR 18 -#define V_TXPARERR(x) ((x) << S_TXPARERR) -#define F_TXPARERR V_TXPARERR(1U) - -#define S_RXPARERR 17 -#define V_RXPARERR(x) ((x) << S_RXPARERR) -#define F_RXPARERR V_RXPARERR(1U) - -#define S_RETRYLUTPARERR 16 -#define V_RETRYLUTPARERR(x) ((x) << S_RETRYLUTPARERR) -#define F_RETRYLUTPARERR V_RETRYLUTPARERR(1U) - -#define S_RETRYBUFPARERR 15 -#define V_RETRYBUFPARERR(x) ((x) << S_RETRYBUFPARERR) -#define F_RETRYBUFPARERR V_RETRYBUFPARERR(1U) - -#define S_PCIE_MSIXPARERR 12 -#define M_PCIE_MSIXPARERR 0x7 - -#define V_PCIE_MSIXPARERR(x) ((x) << S_PCIE_MSIXPARERR) - -#define S_PCIE_CFPARERR 11 -#define V_PCIE_CFPARERR(x) ((x) << S_PCIE_CFPARERR) -#define F_PCIE_CFPARERR V_PCIE_CFPARERR(1U) - -#define S_PCIE_RFPARERR 10 -#define V_PCIE_RFPARERR(x) ((x) << S_PCIE_RFPARERR) -#define F_PCIE_RFPARERR V_PCIE_RFPARERR(1U) - -#define S_PCIE_WFPARERR 9 -#define V_PCIE_WFPARERR(x) ((x) << S_PCIE_WFPARERR) -#define F_PCIE_WFPARERR V_PCIE_WFPARERR(1U) - -#define S_PCIE_PIOPARERR 8 -#define V_PCIE_PIOPARERR(x) ((x) << S_PCIE_PIOPARERR) -#define F_PCIE_PIOPARERR V_PCIE_PIOPARERR(1U) - -#define S_UNXSPLCPLERRC 7 -#define V_UNXSPLCPLERRC(x) ((x) << S_UNXSPLCPLERRC) -#define F_UNXSPLCPLERRC V_UNXSPLCPLERRC(1U) - -#define S_UNXSPLCPLERRR 6 -#define V_UNXSPLCPLERRR(x) ((x) << S_UNXSPLCPLERRR) -#define F_UNXSPLCPLERRR V_UNXSPLCPLERRR(1U) - -#define S_PEXERR 0 -#define V_PEXERR(x) ((x) << S_PEXERR) -#define F_PEXERR V_PEXERR(1U) - -#define A_PCIE_INT_CAUSE 0x84 - -#define S_PCIE_DMASTOPEN 24 -#define V_PCIE_DMASTOPEN(x) ((x) << S_PCIE_DMASTOPEN) -#define F_PCIE_DMASTOPEN V_PCIE_DMASTOPEN(1U) - -#define A_PCIE_CFG 0x88 - -#define S_ENABLELINKDWNDRST 21 -#define V_ENABLELINKDWNDRST(x) ((x) << S_ENABLELINKDWNDRST) -#define F_ENABLELINKDWNDRST V_ENABLELINKDWNDRST(1U) - -#define S_ENABLELINKDOWNRST 20 -#define V_ENABLELINKDOWNRST(x) ((x) << S_ENABLELINKDOWNRST) -#define F_ENABLELINKDOWNRST V_ENABLELINKDOWNRST(1U) - -#define S_PCIE_CLIDECEN 16 -#define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN) -#define F_PCIE_CLIDECEN V_PCIE_CLIDECEN(1U) - -#define S_CRSTWRMMODE 0 -#define V_CRSTWRMMODE(x) ((x) << S_CRSTWRMMODE) -#define F_CRSTWRMMODE V_CRSTWRMMODE(1U) - -#define A_PCIE_MODE 0x8c - -#define S_NUMFSTTRNSEQRX 10 -#define M_NUMFSTTRNSEQRX 0xff -#define V_NUMFSTTRNSEQRX(x) ((x) << S_NUMFSTTRNSEQRX) -#define G_NUMFSTTRNSEQRX(x) (((x) >> S_NUMFSTTRNSEQRX) & M_NUMFSTTRNSEQRX) - -#define A_PCIE_PEX_CTRL0 0x98 - -#define S_NUMFSTTRNSEQ 22 -#define M_NUMFSTTRNSEQ 0xff -#define V_NUMFSTTRNSEQ(x) ((x) << S_NUMFSTTRNSEQ) -#define G_NUMFSTTRNSEQ(x) (((x) >> S_NUMFSTTRNSEQ) & M_NUMFSTTRNSEQ) - -#define S_REPLAYLMT 2 -#define M_REPLAYLMT 0xfffff - -#define V_REPLAYLMT(x) ((x) << S_REPLAYLMT) - -#define A_PCIE_PEX_CTRL1 0x9c - -#define S_T3A_ACKLAT 0 -#define M_T3A_ACKLAT 0x7ff - -#define V_T3A_ACKLAT(x) ((x) << S_T3A_ACKLAT) - -#define S_ACKLAT 0 -#define M_ACKLAT 0x1fff - -#define V_ACKLAT(x) ((x) << S_ACKLAT) - -#define A_PCIE_PEX_ERR 0xa4 - -#define A_T3DBG_GPIO_EN 0xd0 - -#define S_GPIO11_OEN 27 -#define V_GPIO11_OEN(x) ((x) << S_GPIO11_OEN) -#define F_GPIO11_OEN V_GPIO11_OEN(1U) - -#define S_GPIO10_OEN 26 -#define V_GPIO10_OEN(x) ((x) << S_GPIO10_OEN) -#define F_GPIO10_OEN V_GPIO10_OEN(1U) - -#define S_GPIO7_OEN 23 -#define V_GPIO7_OEN(x) ((x) << S_GPIO7_OEN) -#define F_GPIO7_OEN V_GPIO7_OEN(1U) - -#define S_GPIO6_OEN 22 -#define V_GPIO6_OEN(x) ((x) << S_GPIO6_OEN) -#define F_GPIO6_OEN V_GPIO6_OEN(1U) - -#define S_GPIO5_OEN 21 -#define V_GPIO5_OEN(x) ((x) << S_GPIO5_OEN) -#define F_GPIO5_OEN V_GPIO5_OEN(1U) - -#define S_GPIO4_OEN 20 -#define V_GPIO4_OEN(x) ((x) << S_GPIO4_OEN) -#define F_GPIO4_OEN V_GPIO4_OEN(1U) - -#define S_GPIO2_OEN 18 -#define V_GPIO2_OEN(x) ((x) << S_GPIO2_OEN) -#define F_GPIO2_OEN V_GPIO2_OEN(1U) - -#define S_GPIO1_OEN 17 -#define V_GPIO1_OEN(x) ((x) << S_GPIO1_OEN) -#define F_GPIO1_OEN V_GPIO1_OEN(1U) - -#define S_GPIO0_OEN 16 -#define V_GPIO0_OEN(x) ((x) << S_GPIO0_OEN) -#define F_GPIO0_OEN V_GPIO0_OEN(1U) - -#define S_GPIO10_OUT_VAL 10 -#define V_GPIO10_OUT_VAL(x) ((x) << S_GPIO10_OUT_VAL) -#define F_GPIO10_OUT_VAL V_GPIO10_OUT_VAL(1U) - -#define S_GPIO7_OUT_VAL 7 -#define V_GPIO7_OUT_VAL(x) ((x) << S_GPIO7_OUT_VAL) -#define F_GPIO7_OUT_VAL V_GPIO7_OUT_VAL(1U) - -#define S_GPIO6_OUT_VAL 6 -#define V_GPIO6_OUT_VAL(x) ((x) << S_GPIO6_OUT_VAL) -#define F_GPIO6_OUT_VAL V_GPIO6_OUT_VAL(1U) - -#define S_GPIO5_OUT_VAL 5 -#define V_GPIO5_OUT_VAL(x) ((x) << S_GPIO5_OUT_VAL) -#define F_GPIO5_OUT_VAL V_GPIO5_OUT_VAL(1U) - -#define S_GPIO4_OUT_VAL 4 -#define V_GPIO4_OUT_VAL(x) ((x) << S_GPIO4_OUT_VAL) -#define F_GPIO4_OUT_VAL V_GPIO4_OUT_VAL(1U) - -#define S_GPIO2_OUT_VAL 2 -#define V_GPIO2_OUT_VAL(x) ((x) << S_GPIO2_OUT_VAL) -#define F_GPIO2_OUT_VAL V_GPIO2_OUT_VAL(1U) - -#define S_GPIO1_OUT_VAL 1 -#define V_GPIO1_OUT_VAL(x) ((x) << S_GPIO1_OUT_VAL) -#define F_GPIO1_OUT_VAL V_GPIO1_OUT_VAL(1U) - -#define S_GPIO0_OUT_VAL 0 -#define V_GPIO0_OUT_VAL(x) ((x) << S_GPIO0_OUT_VAL) -#define F_GPIO0_OUT_VAL V_GPIO0_OUT_VAL(1U) - -#define A_T3DBG_INT_ENABLE 0xd8 - -#define S_GPIO11 11 -#define V_GPIO11(x) ((x) << S_GPIO11) -#define F_GPIO11 V_GPIO11(1U) - -#define S_GPIO10 10 -#define V_GPIO10(x) ((x) << S_GPIO10) -#define F_GPIO10 V_GPIO10(1U) - -#define S_GPIO9 9 -#define V_GPIO9(x) ((x) << S_GPIO9) -#define F_GPIO9 V_GPIO9(1U) - -#define S_GPIO7 7 -#define V_GPIO7(x) ((x) << S_GPIO7) -#define F_GPIO7 V_GPIO7(1U) - -#define S_GPIO6 6 -#define V_GPIO6(x) ((x) << S_GPIO6) -#define F_GPIO6 V_GPIO6(1U) - -#define S_GPIO5 5 -#define V_GPIO5(x) ((x) << S_GPIO5) -#define F_GPIO5 V_GPIO5(1U) - -#define S_GPIO4 4 -#define V_GPIO4(x) ((x) << S_GPIO4) -#define F_GPIO4 V_GPIO4(1U) - -#define S_GPIO3 3 -#define V_GPIO3(x) ((x) << S_GPIO3) -#define F_GPIO3 V_GPIO3(1U) - -#define S_GPIO2 2 -#define V_GPIO2(x) ((x) << S_GPIO2) -#define F_GPIO2 V_GPIO2(1U) - -#define S_GPIO1 1 -#define V_GPIO1(x) ((x) << S_GPIO1) -#define F_GPIO1 V_GPIO1(1U) - -#define S_GPIO0 0 -#define V_GPIO0(x) ((x) << S_GPIO0) -#define F_GPIO0 V_GPIO0(1U) - -#define A_T3DBG_INT_CAUSE 0xdc - -#define A_T3DBG_GPIO_ACT_LOW 0xf0 - -#define MC7_PMRX_BASE_ADDR 0x100 - -#define A_MC7_CFG 0x100 - -#define S_IFEN 13 -#define V_IFEN(x) ((x) << S_IFEN) -#define F_IFEN V_IFEN(1U) - -#define S_TERM150 11 -#define V_TERM150(x) ((x) << S_TERM150) -#define F_TERM150 V_TERM150(1U) - -#define S_SLOW 10 -#define V_SLOW(x) ((x) << S_SLOW) -#define F_SLOW V_SLOW(1U) - -#define S_WIDTH 8 -#define M_WIDTH 0x3 -#define V_WIDTH(x) ((x) << S_WIDTH) -#define G_WIDTH(x) (((x) >> S_WIDTH) & M_WIDTH) - -#define S_BKS 6 -#define V_BKS(x) ((x) << S_BKS) -#define F_BKS V_BKS(1U) - -#define S_ORG 5 -#define V_ORG(x) ((x) << S_ORG) -#define F_ORG V_ORG(1U) - -#define S_DEN 2 -#define M_DEN 0x7 -#define V_DEN(x) ((x) << S_DEN) -#define G_DEN(x) (((x) >> S_DEN) & M_DEN) - -#define S_RDY 1 -#define V_RDY(x) ((x) << S_RDY) -#define F_RDY V_RDY(1U) - -#define S_CLKEN 0 -#define V_CLKEN(x) ((x) << S_CLKEN) -#define F_CLKEN V_CLKEN(1U) - -#define A_MC7_MODE 0x104 - -#define S_BUSY 31 -#define V_BUSY(x) ((x) << S_BUSY) -#define F_BUSY V_BUSY(1U) - -#define S_BUSY 31 -#define V_BUSY(x) ((x) << S_BUSY) -#define F_BUSY V_BUSY(1U) - -#define A_MC7_EXT_MODE1 0x108 - -#define A_MC7_EXT_MODE2 0x10c - -#define A_MC7_EXT_MODE3 0x110 - -#define A_MC7_PRE 0x114 - -#define A_MC7_REF 0x118 - -#define S_PREREFDIV 1 -#define M_PREREFDIV 0x3fff -#define V_PREREFDIV(x) ((x) << S_PREREFDIV) - -#define S_PERREFEN 0 -#define V_PERREFEN(x) ((x) << S_PERREFEN) -#define F_PERREFEN V_PERREFEN(1U) - -#define A_MC7_DLL 0x11c - -#define S_DLLENB 1 -#define V_DLLENB(x) ((x) << S_DLLENB) -#define F_DLLENB V_DLLENB(1U) - -#define S_DLLRST 0 -#define V_DLLRST(x) ((x) << S_DLLRST) -#define F_DLLRST V_DLLRST(1U) - -#define A_MC7_PARM 0x120 - -#define S_ACTTOPREDLY 26 -#define M_ACTTOPREDLY 0xf -#define V_ACTTOPREDLY(x) ((x) << S_ACTTOPREDLY) - -#define S_ACTTORDWRDLY 23 -#define M_ACTTORDWRDLY 0x7 -#define V_ACTTORDWRDLY(x) ((x) << S_ACTTORDWRDLY) - -#define S_PRECYC 20 -#define M_PRECYC 0x7 -#define V_PRECYC(x) ((x) << S_PRECYC) - -#define S_REFCYC 13 -#define M_REFCYC 0x7f -#define V_REFCYC(x) ((x) << S_REFCYC) - -#define S_BKCYC 8 -#define M_BKCYC 0x1f -#define V_BKCYC(x) ((x) << S_BKCYC) - -#define S_WRTORDDLY 4 -#define M_WRTORDDLY 0xf -#define V_WRTORDDLY(x) ((x) << S_WRTORDDLY) - -#define S_RDTOWRDLY 0 -#define M_RDTOWRDLY 0xf -#define V_RDTOWRDLY(x) ((x) << S_RDTOWRDLY) - -#define A_MC7_CAL 0x128 - -#define S_BUSY 31 -#define V_BUSY(x) ((x) << S_BUSY) -#define F_BUSY V_BUSY(1U) - -#define S_BUSY 31 -#define V_BUSY(x) ((x) << S_BUSY) -#define F_BUSY V_BUSY(1U) - -#define S_CAL_FAULT 30 -#define V_CAL_FAULT(x) ((x) << S_CAL_FAULT) -#define F_CAL_FAULT V_CAL_FAULT(1U) - -#define S_SGL_CAL_EN 20 -#define V_SGL_CAL_EN(x) ((x) << S_SGL_CAL_EN) -#define F_SGL_CAL_EN V_SGL_CAL_EN(1U) - -#define A_MC7_ERR_ADDR 0x12c - -#define A_MC7_ECC 0x130 - -#define S_ECCCHKEN 1 -#define V_ECCCHKEN(x) ((x) << S_ECCCHKEN) -#define F_ECCCHKEN V_ECCCHKEN(1U) - -#define S_ECCGENEN 0 -#define V_ECCGENEN(x) ((x) << S_ECCGENEN) -#define F_ECCGENEN V_ECCGENEN(1U) - -#define A_MC7_CE_ADDR 0x134 - -#define A_MC7_CE_DATA0 0x138 - -#define A_MC7_CE_DATA1 0x13c - -#define A_MC7_CE_DATA2 0x140 - -#define S_DATA 0 -#define M_DATA 0xff - -#define G_DATA(x) (((x) >> S_DATA) & M_DATA) - -#define A_MC7_UE_ADDR 0x144 - -#define A_MC7_UE_DATA0 0x148 - -#define A_MC7_UE_DATA1 0x14c - -#define A_MC7_UE_DATA2 0x150 - -#define A_MC7_BD_ADDR 0x154 - -#define S_ADDR 3 - -#define M_ADDR 0x1fffffff - -#define A_MC7_BD_DATA0 0x158 - -#define A_MC7_BD_DATA1 0x15c - -#define A_MC7_BD_OP 0x164 - -#define S_OP 0 - -#define V_OP(x) ((x) << S_OP) -#define F_OP V_OP(1U) - -#define F_OP V_OP(1U) -#define A_SF_OP 0x6dc - -#define A_MC7_BIST_ADDR_BEG 0x168 - -#define A_MC7_BIST_ADDR_END 0x16c - -#define A_MC7_BIST_DATA 0x170 - -#define A_MC7_BIST_OP 0x174 - -#define S_CONT 3 -#define V_CONT(x) ((x) << S_CONT) -#define F_CONT V_CONT(1U) - -#define F_CONT V_CONT(1U) - -#define A_MC7_INT_ENABLE 0x178 - -#define S_AE 17 -#define V_AE(x) ((x) << S_AE) -#define F_AE V_AE(1U) - -#define S_PE 2 -#define M_PE 0x7fff - -#define V_PE(x) ((x) << S_PE) - -#define G_PE(x) (((x) >> S_PE) & M_PE) - -#define S_UE 1 -#define V_UE(x) ((x) << S_UE) -#define F_UE V_UE(1U) - -#define S_CE 0 -#define V_CE(x) ((x) << S_CE) -#define F_CE V_CE(1U) - -#define A_MC7_INT_CAUSE 0x17c - -#define MC7_PMTX_BASE_ADDR 0x180 - -#define MC7_CM_BASE_ADDR 0x200 - -#define A_CIM_BOOT_CFG 0x280 - -#define S_BOOTADDR 2 -#define M_BOOTADDR 0x3fffffff -#define V_BOOTADDR(x) ((x) << S_BOOTADDR) - -#define A_CIM_SDRAM_BASE_ADDR 0x28c - -#define A_CIM_SDRAM_ADDR_SIZE 0x290 - -#define A_CIM_HOST_INT_ENABLE 0x298 - -#define S_DTAGPARERR 28 -#define V_DTAGPARERR(x) ((x) << S_DTAGPARERR) -#define F_DTAGPARERR V_DTAGPARERR(1U) - -#define S_ITAGPARERR 27 -#define V_ITAGPARERR(x) ((x) << S_ITAGPARERR) -#define F_ITAGPARERR V_ITAGPARERR(1U) - -#define S_IBQTPPARERR 26 -#define V_IBQTPPARERR(x) ((x) << S_IBQTPPARERR) -#define F_IBQTPPARERR V_IBQTPPARERR(1U) - -#define S_IBQULPPARERR 25 -#define V_IBQULPPARERR(x) ((x) << S_IBQULPPARERR) -#define F_IBQULPPARERR V_IBQULPPARERR(1U) - -#define S_IBQSGEHIPARERR 24 -#define V_IBQSGEHIPARERR(x) ((x) << S_IBQSGEHIPARERR) -#define F_IBQSGEHIPARERR V_IBQSGEHIPARERR(1U) - -#define S_IBQSGELOPARERR 23 -#define V_IBQSGELOPARERR(x) ((x) << S_IBQSGELOPARERR) -#define F_IBQSGELOPARERR V_IBQSGELOPARERR(1U) - -#define S_OBQULPLOPARERR 22 -#define V_OBQULPLOPARERR(x) ((x) << S_OBQULPLOPARERR) -#define F_OBQULPLOPARERR V_OBQULPLOPARERR(1U) - -#define S_OBQULPHIPARERR 21 -#define V_OBQULPHIPARERR(x) ((x) << S_OBQULPHIPARERR) -#define F_OBQULPHIPARERR V_OBQULPHIPARERR(1U) - -#define S_OBQSGEPARERR 20 -#define V_OBQSGEPARERR(x) ((x) << S_OBQSGEPARERR) -#define F_OBQSGEPARERR V_OBQSGEPARERR(1U) - -#define S_DCACHEPARERR 19 -#define V_DCACHEPARERR(x) ((x) << S_DCACHEPARERR) -#define F_DCACHEPARERR V_DCACHEPARERR(1U) - -#define S_ICACHEPARERR 18 -#define V_ICACHEPARERR(x) ((x) << S_ICACHEPARERR) -#define F_ICACHEPARERR V_ICACHEPARERR(1U) - -#define S_DRAMPARERR 17 -#define V_DRAMPARERR(x) ((x) << S_DRAMPARERR) -#define F_DRAMPARERR V_DRAMPARERR(1U) - -#define A_CIM_HOST_INT_CAUSE 0x29c - -#define S_BLKWRPLINT 12 -#define V_BLKWRPLINT(x) ((x) << S_BLKWRPLINT) -#define F_BLKWRPLINT V_BLKWRPLINT(1U) - -#define S_BLKRDPLINT 11 -#define V_BLKRDPLINT(x) ((x) << S_BLKRDPLINT) -#define F_BLKRDPLINT V_BLKRDPLINT(1U) - -#define S_BLKWRCTLINT 10 -#define V_BLKWRCTLINT(x) ((x) << S_BLKWRCTLINT) -#define F_BLKWRCTLINT V_BLKWRCTLINT(1U) - -#define S_BLKRDCTLINT 9 -#define V_BLKRDCTLINT(x) ((x) << S_BLKRDCTLINT) -#define F_BLKRDCTLINT V_BLKRDCTLINT(1U) - -#define S_BLKWRFLASHINT 8 -#define V_BLKWRFLASHINT(x) ((x) << S_BLKWRFLASHINT) -#define F_BLKWRFLASHINT V_BLKWRFLASHINT(1U) - -#define S_BLKRDFLASHINT 7 -#define V_BLKRDFLASHINT(x) ((x) << S_BLKRDFLASHINT) -#define F_BLKRDFLASHINT V_BLKRDFLASHINT(1U) - -#define S_SGLWRFLASHINT 6 -#define V_SGLWRFLASHINT(x) ((x) << S_SGLWRFLASHINT) -#define F_SGLWRFLASHINT V_SGLWRFLASHINT(1U) - -#define S_WRBLKFLASHINT 5 -#define V_WRBLKFLASHINT(x) ((x) << S_WRBLKFLASHINT) -#define F_WRBLKFLASHINT V_WRBLKFLASHINT(1U) - -#define S_BLKWRBOOTINT 4 -#define V_BLKWRBOOTINT(x) ((x) << S_BLKWRBOOTINT) -#define F_BLKWRBOOTINT V_BLKWRBOOTINT(1U) - -#define S_FLASHRANGEINT 2 -#define V_FLASHRANGEINT(x) ((x) << S_FLASHRANGEINT) -#define F_FLASHRANGEINT V_FLASHRANGEINT(1U) - -#define S_SDRAMRANGEINT 1 -#define V_SDRAMRANGEINT(x) ((x) << S_SDRAMRANGEINT) -#define F_SDRAMRANGEINT V_SDRAMRANGEINT(1U) - -#define S_RSVDSPACEINT 0 -#define V_RSVDSPACEINT(x) ((x) << S_RSVDSPACEINT) -#define F_RSVDSPACEINT V_RSVDSPACEINT(1U) - -#define A_CIM_HOST_ACC_CTRL 0x2b0 - -#define S_HOSTBUSY 17 -#define V_HOSTBUSY(x) ((x) << S_HOSTBUSY) -#define F_HOSTBUSY V_HOSTBUSY(1U) - -#define A_CIM_HOST_ACC_DATA 0x2b4 - -#define A_CIM_IBQ_DBG_CFG 0x2c0 - -#define S_IBQDBGADDR 16 -#define M_IBQDBGADDR 0x1ff -#define V_IBQDBGADDR(x) ((x) << S_IBQDBGADDR) -#define G_IBQDBGADDR(x) (((x) >> S_IBQDBGADDR) & M_IBQDBGADDR) - -#define S_IBQDBGQID 3 -#define M_IBQDBGQID 0x3 -#define V_IBQDBGQID(x) ((x) << S_IBQDBGQID) -#define G_IBQDBGQID(x) (((x) >> S_IBQDBGQID) & M_IBQDBGQID) - -#define S_IBQDBGWR 2 -#define V_IBQDBGWR(x) ((x) << S_IBQDBGWR) -#define F_IBQDBGWR V_IBQDBGWR(1U) - -#define S_IBQDBGBUSY 1 -#define V_IBQDBGBUSY(x) ((x) << S_IBQDBGBUSY) -#define F_IBQDBGBUSY V_IBQDBGBUSY(1U) - -#define S_IBQDBGEN 0 -#define V_IBQDBGEN(x) ((x) << S_IBQDBGEN) -#define F_IBQDBGEN V_IBQDBGEN(1U) - -#define A_CIM_IBQ_DBG_DATA 0x2c8 - -#define A_TP_IN_CONFIG 0x300 - -#define S_RXFBARBPRIO 25 -#define V_RXFBARBPRIO(x) ((x) << S_RXFBARBPRIO) -#define F_RXFBARBPRIO V_RXFBARBPRIO(1U) - -#define S_TXFBARBPRIO 24 -#define V_TXFBARBPRIO(x) ((x) << S_TXFBARBPRIO) -#define F_TXFBARBPRIO V_TXFBARBPRIO(1U) - -#define S_NICMODE 14 -#define V_NICMODE(x) ((x) << S_NICMODE) -#define F_NICMODE V_NICMODE(1U) - -#define F_NICMODE V_NICMODE(1U) - -#define S_IPV6ENABLE 15 -#define V_IPV6ENABLE(x) ((x) << S_IPV6ENABLE) -#define F_IPV6ENABLE V_IPV6ENABLE(1U) - -#define A_TP_OUT_CONFIG 0x304 - -#define S_VLANEXTRACTIONENABLE 12 - -#define A_TP_GLOBAL_CONFIG 0x308 - -#define S_TXPACINGENABLE 24 -#define V_TXPACINGENABLE(x) ((x) << S_TXPACINGENABLE) -#define F_TXPACINGENABLE V_TXPACINGENABLE(1U) - -#define S_PATHMTU 15 -#define V_PATHMTU(x) ((x) << S_PATHMTU) -#define F_PATHMTU V_PATHMTU(1U) - -#define S_IPCHECKSUMOFFLOAD 13 -#define V_IPCHECKSUMOFFLOAD(x) ((x) << S_IPCHECKSUMOFFLOAD) -#define F_IPCHECKSUMOFFLOAD V_IPCHECKSUMOFFLOAD(1U) - -#define S_UDPCHECKSUMOFFLOAD 12 -#define V_UDPCHECKSUMOFFLOAD(x) ((x) << S_UDPCHECKSUMOFFLOAD) -#define F_UDPCHECKSUMOFFLOAD V_UDPCHECKSUMOFFLOAD(1U) - -#define S_TCPCHECKSUMOFFLOAD 11 -#define V_TCPCHECKSUMOFFLOAD(x) ((x) << S_TCPCHECKSUMOFFLOAD) -#define F_TCPCHECKSUMOFFLOAD V_TCPCHECKSUMOFFLOAD(1U) - -#define S_IPTTL 0 -#define M_IPTTL 0xff -#define V_IPTTL(x) ((x) << S_IPTTL) - -#define A_TP_CMM_MM_BASE 0x314 - -#define A_TP_CMM_TIMER_BASE 0x318 - -#define S_CMTIMERMAXNUM 28 -#define M_CMTIMERMAXNUM 0x3 -#define V_CMTIMERMAXNUM(x) ((x) << S_CMTIMERMAXNUM) - -#define A_TP_PMM_SIZE 0x31c - -#define A_TP_PMM_TX_BASE 0x320 - -#define A_TP_PMM_RX_BASE 0x328 - -#define A_TP_PMM_RX_PAGE_SIZE 0x32c - -#define A_TP_PMM_RX_MAX_PAGE 0x330 - -#define A_TP_PMM_TX_PAGE_SIZE 0x334 - -#define A_TP_PMM_TX_MAX_PAGE 0x338 - -#define A_TP_TCP_OPTIONS 0x340 - -#define S_MTUDEFAULT 16 -#define M_MTUDEFAULT 0xffff -#define V_MTUDEFAULT(x) ((x) << S_MTUDEFAULT) - -#define S_MTUENABLE 10 -#define V_MTUENABLE(x) ((x) << S_MTUENABLE) -#define F_MTUENABLE V_MTUENABLE(1U) - -#define S_SACKRX 8 -#define V_SACKRX(x) ((x) << S_SACKRX) -#define F_SACKRX V_SACKRX(1U) - -#define S_SACKMODE 4 - -#define M_SACKMODE 0x3 - -#define V_SACKMODE(x) ((x) << S_SACKMODE) - -#define S_WINDOWSCALEMODE 2 -#define M_WINDOWSCALEMODE 0x3 -#define V_WINDOWSCALEMODE(x) ((x) << S_WINDOWSCALEMODE) - -#define S_TIMESTAMPSMODE 0 - -#define M_TIMESTAMPSMODE 0x3 - -#define V_TIMESTAMPSMODE(x) ((x) << S_TIMESTAMPSMODE) - -#define A_TP_DACK_CONFIG 0x344 - -#define S_AUTOSTATE3 30 -#define M_AUTOSTATE3 0x3 -#define V_AUTOSTATE3(x) ((x) << S_AUTOSTATE3) - -#define S_AUTOSTATE2 28 -#define M_AUTOSTATE2 0x3 -#define V_AUTOSTATE2(x) ((x) << S_AUTOSTATE2) - -#define S_AUTOSTATE1 26 -#define M_AUTOSTATE1 0x3 -#define V_AUTOSTATE1(x) ((x) << S_AUTOSTATE1) - -#define S_BYTETHRESHOLD 5 -#define M_BYTETHRESHOLD 0xfffff -#define V_BYTETHRESHOLD(x) ((x) << S_BYTETHRESHOLD) - -#define S_MSSTHRESHOLD 3 -#define M_MSSTHRESHOLD 0x3 -#define V_MSSTHRESHOLD(x) ((x) << S_MSSTHRESHOLD) - -#define S_AUTOCAREFUL 2 -#define V_AUTOCAREFUL(x) ((x) << S_AUTOCAREFUL) -#define F_AUTOCAREFUL V_AUTOCAREFUL(1U) - -#define S_AUTOENABLE 1 -#define V_AUTOENABLE(x) ((x) << S_AUTOENABLE) -#define F_AUTOENABLE V_AUTOENABLE(1U) - -#define S_DACK_MODE 0 -#define V_DACK_MODE(x) ((x) << S_DACK_MODE) -#define F_DACK_MODE V_DACK_MODE(1U) - -#define A_TP_PC_CONFIG 0x348 - -#define S_TXTOSQUEUEMAPMODE 26 -#define V_TXTOSQUEUEMAPMODE(x) ((x) << S_TXTOSQUEUEMAPMODE) -#define F_TXTOSQUEUEMAPMODE V_TXTOSQUEUEMAPMODE(1U) - -#define S_ENABLEEPCMDAFULL 23 -#define V_ENABLEEPCMDAFULL(x) ((x) << S_ENABLEEPCMDAFULL) -#define F_ENABLEEPCMDAFULL V_ENABLEEPCMDAFULL(1U) - -#define S_MODULATEUNIONMODE 22 -#define V_MODULATEUNIONMODE(x) ((x) << S_MODULATEUNIONMODE) -#define F_MODULATEUNIONMODE V_MODULATEUNIONMODE(1U) - -#define S_TXDEFERENABLE 20 -#define V_TXDEFERENABLE(x) ((x) << S_TXDEFERENABLE) -#define F_TXDEFERENABLE V_TXDEFERENABLE(1U) - -#define S_RXCONGESTIONMODE 19 -#define V_RXCONGESTIONMODE(x) ((x) << S_RXCONGESTIONMODE) -#define F_RXCONGESTIONMODE V_RXCONGESTIONMODE(1U) - -#define S_HEARBEATDACK 16 -#define V_HEARBEATDACK(x) ((x) << S_HEARBEATDACK) -#define F_HEARBEATDACK V_HEARBEATDACK(1U) - -#define S_TXCONGESTIONMODE 15 -#define V_TXCONGESTIONMODE(x) ((x) << S_TXCONGESTIONMODE) -#define F_TXCONGESTIONMODE V_TXCONGESTIONMODE(1U) - -#define S_ENABLEOCSPIFULL 30 -#define V_ENABLEOCSPIFULL(x) ((x) << S_ENABLEOCSPIFULL) -#define F_ENABLEOCSPIFULL V_ENABLEOCSPIFULL(1U) - -#define S_LOCKTID 28 -#define V_LOCKTID(x) ((x) << S_LOCKTID) -#define F_LOCKTID V_LOCKTID(1U) - -#define S_TABLELATENCYDELTA 0 -#define M_TABLELATENCYDELTA 0xf -#define V_TABLELATENCYDELTA(x) ((x) << S_TABLELATENCYDELTA) -#define G_TABLELATENCYDELTA(x) \ - (((x) >> S_TABLELATENCYDELTA) & M_TABLELATENCYDELTA) - -#define A_TP_PC_CONFIG2 0x34c - -#define S_DISBLEDAPARBIT0 15 -#define V_DISBLEDAPARBIT0(x) ((x) << S_DISBLEDAPARBIT0) -#define F_DISBLEDAPARBIT0 V_DISBLEDAPARBIT0(1U) - -#define S_ENABLEARPMISS 13 -#define V_ENABLEARPMISS(x) ((x) << S_ENABLEARPMISS) -#define F_ENABLEARPMISS V_ENABLEARPMISS(1U) - -#define S_ENABLENONOFDTNLSYN 12 -#define V_ENABLENONOFDTNLSYN(x) ((x) << S_ENABLENONOFDTNLSYN) -#define F_ENABLENONOFDTNLSYN V_ENABLENONOFDTNLSYN(1U) - -#define S_ENABLEIPV6RSS 11 -#define V_ENABLEIPV6RSS(x) ((x) << S_ENABLEIPV6RSS) -#define F_ENABLEIPV6RSS V_ENABLEIPV6RSS(1U) - -#define S_CHDRAFULL 4 -#define V_CHDRAFULL(x) ((x) << S_CHDRAFULL) -#define F_CHDRAFULL V_CHDRAFULL(1U) - -#define A_TP_TCP_BACKOFF_REG0 0x350 - -#define A_TP_TCP_BACKOFF_REG1 0x354 - -#define A_TP_TCP_BACKOFF_REG2 0x358 - -#define A_TP_TCP_BACKOFF_REG3 0x35c - -#define A_TP_PARA_REG2 0x368 - -#define S_MAXRXDATA 16 -#define M_MAXRXDATA 0xffff -#define V_MAXRXDATA(x) ((x) << S_MAXRXDATA) - -#define S_RXCOALESCESIZE 0 -#define M_RXCOALESCESIZE 0xffff -#define V_RXCOALESCESIZE(x) ((x) << S_RXCOALESCESIZE) - -#define A_TP_PARA_REG3 0x36c - -#define S_TXDATAACKIDX 16 -#define M_TXDATAACKIDX 0xf - -#define V_TXDATAACKIDX(x) ((x) << S_TXDATAACKIDX) - -#define S_TXPACEAUTOSTRICT 10 -#define V_TXPACEAUTOSTRICT(x) ((x) << S_TXPACEAUTOSTRICT) -#define F_TXPACEAUTOSTRICT V_TXPACEAUTOSTRICT(1U) - -#define S_TXPACEFIXED 9 -#define V_TXPACEFIXED(x) ((x) << S_TXPACEFIXED) -#define F_TXPACEFIXED V_TXPACEFIXED(1U) - -#define S_TXPACEAUTO 8 -#define V_TXPACEAUTO(x) ((x) << S_TXPACEAUTO) -#define F_TXPACEAUTO V_TXPACEAUTO(1U) - -#define S_RXCOALESCEENABLE 1 -#define V_RXCOALESCEENABLE(x) ((x) << S_RXCOALESCEENABLE) -#define F_RXCOALESCEENABLE V_RXCOALESCEENABLE(1U) - -#define S_RXCOALESCEPSHEN 0 -#define V_RXCOALESCEPSHEN(x) ((x) << S_RXCOALESCEPSHEN) -#define F_RXCOALESCEPSHEN V_RXCOALESCEPSHEN(1U) - -#define A_TP_PARA_REG4 0x370 - -#define A_TP_PARA_REG5 0x374 - -#define S_RXDDPOFFINIT 3 -#define V_RXDDPOFFINIT(x) ((x) << S_RXDDPOFFINIT) -#define F_RXDDPOFFINIT V_RXDDPOFFINIT(1U) - -#define A_TP_PARA_REG6 0x378 - -#define S_T3A_ENABLEESND 13 -#define V_T3A_ENABLEESND(x) ((x) << S_T3A_ENABLEESND) -#define F_T3A_ENABLEESND V_T3A_ENABLEESND(1U) - -#define S_ENABLEESND 11 -#define V_ENABLEESND(x) ((x) << S_ENABLEESND) -#define F_ENABLEESND V_ENABLEESND(1U) - -#define A_TP_PARA_REG7 0x37c - -#define S_PMMAXXFERLEN1 16 -#define M_PMMAXXFERLEN1 0xffff -#define V_PMMAXXFERLEN1(x) ((x) << S_PMMAXXFERLEN1) - -#define S_PMMAXXFERLEN0 0 -#define M_PMMAXXFERLEN0 0xffff -#define V_PMMAXXFERLEN0(x) ((x) << S_PMMAXXFERLEN0) - -#define A_TP_TIMER_RESOLUTION 0x390 - -#define S_TIMERRESOLUTION 16 -#define M_TIMERRESOLUTION 0xff -#define V_TIMERRESOLUTION(x) ((x) << S_TIMERRESOLUTION) - -#define S_TIMESTAMPRESOLUTION 8 -#define M_TIMESTAMPRESOLUTION 0xff -#define V_TIMESTAMPRESOLUTION(x) ((x) << S_TIMESTAMPRESOLUTION) - -#define S_DELAYEDACKRESOLUTION 0 -#define M_DELAYEDACKRESOLUTION 0xff -#define V_DELAYEDACKRESOLUTION(x) ((x) << S_DELAYEDACKRESOLUTION) - -#define A_TP_MSL 0x394 - -#define A_TP_RXT_MIN 0x398 - -#define A_TP_RXT_MAX 0x39c - -#define A_TP_PERS_MIN 0x3a0 - -#define A_TP_PERS_MAX 0x3a4 - -#define A_TP_KEEP_IDLE 0x3a8 - -#define A_TP_KEEP_INTVL 0x3ac - -#define A_TP_INIT_SRTT 0x3b0 - -#define A_TP_DACK_TIMER 0x3b4 - -#define A_TP_FINWAIT2_TIMER 0x3b8 - -#define A_TP_SHIFT_CNT 0x3c0 - -#define S_SYNSHIFTMAX 24 - -#define M_SYNSHIFTMAX 0xff - -#define V_SYNSHIFTMAX(x) ((x) << S_SYNSHIFTMAX) - -#define S_RXTSHIFTMAXR1 20 - -#define M_RXTSHIFTMAXR1 0xf - -#define V_RXTSHIFTMAXR1(x) ((x) << S_RXTSHIFTMAXR1) - -#define S_RXTSHIFTMAXR2 16 - -#define M_RXTSHIFTMAXR2 0xf - -#define V_RXTSHIFTMAXR2(x) ((x) << S_RXTSHIFTMAXR2) - -#define S_PERSHIFTBACKOFFMAX 12 -#define M_PERSHIFTBACKOFFMAX 0xf -#define V_PERSHIFTBACKOFFMAX(x) ((x) << S_PERSHIFTBACKOFFMAX) - -#define S_PERSHIFTMAX 8 -#define M_PERSHIFTMAX 0xf -#define V_PERSHIFTMAX(x) ((x) << S_PERSHIFTMAX) - -#define S_KEEPALIVEMAX 0 - -#define M_KEEPALIVEMAX 0xff - -#define V_KEEPALIVEMAX(x) ((x) << S_KEEPALIVEMAX) - -#define A_TP_MTU_PORT_TABLE 0x3d0 - -#define A_TP_CCTRL_TABLE 0x3dc - -#define A_TP_MTU_TABLE 0x3e4 - -#define A_TP_RSS_MAP_TABLE 0x3e8 - -#define A_TP_RSS_LKP_TABLE 0x3ec - -#define A_TP_RSS_CONFIG 0x3f0 - -#define S_TNL4TUPEN 29 -#define V_TNL4TUPEN(x) ((x) << S_TNL4TUPEN) -#define F_TNL4TUPEN V_TNL4TUPEN(1U) - -#define S_TNL2TUPEN 28 -#define V_TNL2TUPEN(x) ((x) << S_TNL2TUPEN) -#define F_TNL2TUPEN V_TNL2TUPEN(1U) - -#define S_TNLPRTEN 26 -#define V_TNLPRTEN(x) ((x) << S_TNLPRTEN) -#define F_TNLPRTEN V_TNLPRTEN(1U) - -#define S_TNLMAPEN 25 -#define V_TNLMAPEN(x) ((x) << S_TNLMAPEN) -#define F_TNLMAPEN V_TNLMAPEN(1U) - -#define S_TNLLKPEN 24 -#define V_TNLLKPEN(x) ((x) << S_TNLLKPEN) -#define F_TNLLKPEN V_TNLLKPEN(1U) - -#define S_RRCPLMAPEN 7 -#define V_RRCPLMAPEN(x) ((x) << S_RRCPLMAPEN) -#define F_RRCPLMAPEN V_RRCPLMAPEN(1U) - -#define S_RRCPLCPUSIZE 4 -#define M_RRCPLCPUSIZE 0x7 -#define V_RRCPLCPUSIZE(x) ((x) << S_RRCPLCPUSIZE) - -#define S_RQFEEDBACKENABLE 3 -#define V_RQFEEDBACKENABLE(x) ((x) << S_RQFEEDBACKENABLE) -#define F_RQFEEDBACKENABLE V_RQFEEDBACKENABLE(1U) - -#define S_HASHTOEPLITZ 2 -#define V_HASHTOEPLITZ(x) ((x) << S_HASHTOEPLITZ) -#define F_HASHTOEPLITZ V_HASHTOEPLITZ(1U) - -#define S_DISABLE 0 - -#define A_TP_TM_PIO_ADDR 0x418 - -#define A_TP_TM_PIO_DATA 0x41c - -#define A_TP_TX_MOD_QUE_TABLE 0x420 - -#define A_TP_TX_RESOURCE_LIMIT 0x424 - -#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x428 - -#define S_TX_MOD_QUEUE_REQ_MAP 0 -#define M_TX_MOD_QUEUE_REQ_MAP 0xff -#define V_TX_MOD_QUEUE_REQ_MAP(x) ((x) << S_TX_MOD_QUEUE_REQ_MAP) - -#define A_TP_TX_MOD_QUEUE_WEIGHT1 0x42c - -#define A_TP_TX_MOD_QUEUE_WEIGHT0 0x430 - -#define A_TP_MOD_CHANNEL_WEIGHT 0x434 - -#define A_TP_MOD_RATE_LIMIT 0x438 - -#define A_TP_PIO_ADDR 0x440 - -#define A_TP_PIO_DATA 0x444 - -#define A_TP_RESET 0x44c - -#define S_FLSTINITENABLE 1 -#define V_FLSTINITENABLE(x) ((x) << S_FLSTINITENABLE) -#define F_FLSTINITENABLE V_FLSTINITENABLE(1U) - -#define S_TPRESET 0 -#define V_TPRESET(x) ((x) << S_TPRESET) -#define F_TPRESET V_TPRESET(1U) - -#define A_TP_CMM_MM_RX_FLST_BASE 0x460 - -#define A_TP_CMM_MM_TX_FLST_BASE 0x464 - -#define A_TP_CMM_MM_PS_FLST_BASE 0x468 - -#define A_TP_MIB_INDEX 0x450 - -#define A_TP_MIB_RDATA 0x454 - -#define A_TP_CMM_MM_MAX_PSTRUCT 0x46c - -#define A_TP_INT_ENABLE 0x470 - -#define S_FLMTXFLSTEMPTY 30 -#define V_FLMTXFLSTEMPTY(x) ((x) << S_FLMTXFLSTEMPTY) -#define F_FLMTXFLSTEMPTY V_FLMTXFLSTEMPTY(1U) - -#define S_FLMRXFLSTEMPTY 29 -#define V_FLMRXFLSTEMPTY(x) ((x) << S_FLMRXFLSTEMPTY) -#define F_FLMRXFLSTEMPTY V_FLMRXFLSTEMPTY(1U) - -#define S_ARPLUTPERR 26 -#define V_ARPLUTPERR(x) ((x) << S_ARPLUTPERR) -#define F_ARPLUTPERR V_ARPLUTPERR(1U) - -#define S_CMCACHEPERR 24 -#define V_CMCACHEPERR(x) ((x) << S_CMCACHEPERR) -#define F_CMCACHEPERR V_CMCACHEPERR(1U) - -#define A_TP_INT_CAUSE 0x474 - -#define A_TP_TX_MOD_Q1_Q0_RATE_LIMIT 0x8 - -#define A_TP_TX_DROP_CFG_CH0 0x12b - -#define A_TP_TX_DROP_MODE 0x12f - -#define A_TP_EGRESS_CONFIG 0x145 - -#define S_REWRITEFORCETOSIZE 0 -#define V_REWRITEFORCETOSIZE(x) ((x) << S_REWRITEFORCETOSIZE) -#define F_REWRITEFORCETOSIZE V_REWRITEFORCETOSIZE(1U) - -#define A_TP_TX_TRC_KEY0 0x20 - -#define A_TP_RX_TRC_KEY0 0x120 - -#define A_TP_TX_DROP_CNT_CH0 0x12d - -#define S_TXDROPCNTCH0RCVD 0 -#define M_TXDROPCNTCH0RCVD 0xffff -#define V_TXDROPCNTCH0RCVD(x) ((x) << S_TXDROPCNTCH0RCVD) -#define G_TXDROPCNTCH0RCVD(x) (((x) >> S_TXDROPCNTCH0RCVD) & \ - M_TXDROPCNTCH0RCVD) - -#define A_TP_PROXY_FLOW_CNTL 0x4b0 - -#define A_TP_EMBED_OP_FIELD0 0x4e8 -#define A_TP_EMBED_OP_FIELD1 0x4ec -#define A_TP_EMBED_OP_FIELD2 0x4f0 -#define A_TP_EMBED_OP_FIELD3 0x4f4 -#define A_TP_EMBED_OP_FIELD4 0x4f8 -#define A_TP_EMBED_OP_FIELD5 0x4fc - -#define A_ULPRX_CTL 0x500 - -#define S_ROUND_ROBIN 4 -#define V_ROUND_ROBIN(x) ((x) << S_ROUND_ROBIN) -#define F_ROUND_ROBIN V_ROUND_ROBIN(1U) - -#define A_ULPRX_INT_ENABLE 0x504 - -#define S_DATASELFRAMEERR0 7 -#define V_DATASELFRAMEERR0(x) ((x) << S_DATASELFRAMEERR0) -#define F_DATASELFRAMEERR0 V_DATASELFRAMEERR0(1U) - -#define S_DATASELFRAMEERR1 6 -#define V_DATASELFRAMEERR1(x) ((x) << S_DATASELFRAMEERR1) -#define F_DATASELFRAMEERR1 V_DATASELFRAMEERR1(1U) - -#define S_PCMDMUXPERR 5 -#define V_PCMDMUXPERR(x) ((x) << S_PCMDMUXPERR) -#define F_PCMDMUXPERR V_PCMDMUXPERR(1U) - -#define S_ARBFPERR 4 -#define V_ARBFPERR(x) ((x) << S_ARBFPERR) -#define F_ARBFPERR V_ARBFPERR(1U) - -#define S_ARBPF0PERR 3 -#define V_ARBPF0PERR(x) ((x) << S_ARBPF0PERR) -#define F_ARBPF0PERR V_ARBPF0PERR(1U) - -#define S_ARBPF1PERR 2 -#define V_ARBPF1PERR(x) ((x) << S_ARBPF1PERR) -#define F_ARBPF1PERR V_ARBPF1PERR(1U) - -#define S_PARERRPCMD 1 -#define V_PARERRPCMD(x) ((x) << S_PARERRPCMD) -#define F_PARERRPCMD V_PARERRPCMD(1U) - -#define S_PARERRDATA 0 -#define V_PARERRDATA(x) ((x) << S_PARERRDATA) -#define F_PARERRDATA V_PARERRDATA(1U) - -#define A_ULPRX_INT_CAUSE 0x508 - -#define A_ULPRX_ISCSI_LLIMIT 0x50c - -#define A_ULPRX_ISCSI_ULIMIT 0x510 - -#define A_ULPRX_ISCSI_TAGMASK 0x514 - -#define A_ULPRX_ISCSI_PSZ 0x518 - -#define A_ULPRX_TDDP_LLIMIT 0x51c - -#define A_ULPRX_TDDP_ULIMIT 0x520 -#define A_ULPRX_TDDP_PSZ 0x528 - -#define S_HPZ0 0 -#define M_HPZ0 0xf -#define V_HPZ0(x) ((x) << S_HPZ0) -#define G_HPZ0(x) (((x) >> S_HPZ0) & M_HPZ0) - -#define A_ULPRX_STAG_LLIMIT 0x52c - -#define A_ULPRX_STAG_ULIMIT 0x530 - -#define A_ULPRX_RQ_LLIMIT 0x534 -#define A_ULPRX_RQ_LLIMIT 0x534 - -#define A_ULPRX_RQ_ULIMIT 0x538 -#define A_ULPRX_RQ_ULIMIT 0x538 - -#define A_ULPRX_PBL_LLIMIT 0x53c - -#define A_ULPRX_PBL_ULIMIT 0x540 -#define A_ULPRX_PBL_ULIMIT 0x540 - -#define A_ULPRX_TDDP_TAGMASK 0x524 - -#define A_ULPRX_RQ_LLIMIT 0x534 -#define A_ULPRX_RQ_LLIMIT 0x534 - -#define A_ULPRX_RQ_ULIMIT 0x538 -#define A_ULPRX_RQ_ULIMIT 0x538 - -#define A_ULPRX_PBL_ULIMIT 0x540 -#define A_ULPRX_PBL_ULIMIT 0x540 - -#define A_ULPTX_CONFIG 0x580 - -#define S_CFG_CQE_SOP_MASK 1 -#define V_CFG_CQE_SOP_MASK(x) ((x) << S_CFG_CQE_SOP_MASK) -#define F_CFG_CQE_SOP_MASK V_CFG_CQE_SOP_MASK(1U) - -#define S_CFG_RR_ARB 0 -#define V_CFG_RR_ARB(x) ((x) << S_CFG_RR_ARB) -#define F_CFG_RR_ARB V_CFG_RR_ARB(1U) - -#define A_ULPTX_INT_ENABLE 0x584 - -#define S_PBL_BOUND_ERR_CH1 1 -#define V_PBL_BOUND_ERR_CH1(x) ((x) << S_PBL_BOUND_ERR_CH1) -#define F_PBL_BOUND_ERR_CH1 V_PBL_BOUND_ERR_CH1(1U) - -#define S_PBL_BOUND_ERR_CH0 0 -#define V_PBL_BOUND_ERR_CH0(x) ((x) << S_PBL_BOUND_ERR_CH0) -#define F_PBL_BOUND_ERR_CH0 V_PBL_BOUND_ERR_CH0(1U) - -#define A_ULPTX_INT_CAUSE 0x588 - -#define A_ULPTX_TPT_LLIMIT 0x58c - -#define A_ULPTX_TPT_ULIMIT 0x590 - -#define A_ULPTX_PBL_LLIMIT 0x594 - -#define A_ULPTX_PBL_ULIMIT 0x598 - -#define A_ULPTX_DMA_WEIGHT 0x5ac - -#define S_D1_WEIGHT 16 -#define M_D1_WEIGHT 0xffff -#define V_D1_WEIGHT(x) ((x) << S_D1_WEIGHT) - -#define S_D0_WEIGHT 0 -#define M_D0_WEIGHT 0xffff -#define V_D0_WEIGHT(x) ((x) << S_D0_WEIGHT) - -#define A_PM1_RX_CFG 0x5c0 -#define A_PM1_RX_MODE 0x5c4 - -#define A_PM1_RX_INT_ENABLE 0x5d8 - -#define S_ZERO_E_CMD_ERROR 18 -#define V_ZERO_E_CMD_ERROR(x) ((x) << S_ZERO_E_CMD_ERROR) -#define F_ZERO_E_CMD_ERROR V_ZERO_E_CMD_ERROR(1U) - -#define S_IESPI0_FIFO2X_RX_FRAMING_ERROR 17 -#define V_IESPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_FIFO2X_RX_FRAMING_ERROR) -#define F_IESPI0_FIFO2X_RX_FRAMING_ERROR V_IESPI0_FIFO2X_RX_FRAMING_ERROR(1U) - -#define S_IESPI1_FIFO2X_RX_FRAMING_ERROR 16 -#define V_IESPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_FIFO2X_RX_FRAMING_ERROR) -#define F_IESPI1_FIFO2X_RX_FRAMING_ERROR V_IESPI1_FIFO2X_RX_FRAMING_ERROR(1U) - -#define S_IESPI0_RX_FRAMING_ERROR 15 -#define V_IESPI0_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_RX_FRAMING_ERROR) -#define F_IESPI0_RX_FRAMING_ERROR V_IESPI0_RX_FRAMING_ERROR(1U) - -#define S_IESPI1_RX_FRAMING_ERROR 14 -#define V_IESPI1_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_RX_FRAMING_ERROR) -#define F_IESPI1_RX_FRAMING_ERROR V_IESPI1_RX_FRAMING_ERROR(1U) - -#define S_IESPI0_TX_FRAMING_ERROR 13 -#define V_IESPI0_TX_FRAMING_ERROR(x) ((x) << S_IESPI0_TX_FRAMING_ERROR) -#define F_IESPI0_TX_FRAMING_ERROR V_IESPI0_TX_FRAMING_ERROR(1U) - -#define S_IESPI1_TX_FRAMING_ERROR 12 -#define V_IESPI1_TX_FRAMING_ERROR(x) ((x) << S_IESPI1_TX_FRAMING_ERROR) -#define F_IESPI1_TX_FRAMING_ERROR V_IESPI1_TX_FRAMING_ERROR(1U) - -#define S_OCSPI0_RX_FRAMING_ERROR 11 -#define V_OCSPI0_RX_FRAMING_ERROR(x) ((x) << S_OCSPI0_RX_FRAMING_ERROR) -#define F_OCSPI0_RX_FRAMING_ERROR V_OCSPI0_RX_FRAMING_ERROR(1U) - -#define S_OCSPI1_RX_FRAMING_ERROR 10 -#define V_OCSPI1_RX_FRAMING_ERROR(x) ((x) << S_OCSPI1_RX_FRAMING_ERROR) -#define F_OCSPI1_RX_FRAMING_ERROR V_OCSPI1_RX_FRAMING_ERROR(1U) - -#define S_OCSPI0_TX_FRAMING_ERROR 9 -#define V_OCSPI0_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_TX_FRAMING_ERROR) -#define F_OCSPI0_TX_FRAMING_ERROR V_OCSPI0_TX_FRAMING_ERROR(1U) - -#define S_OCSPI1_TX_FRAMING_ERROR 8 -#define V_OCSPI1_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_TX_FRAMING_ERROR) -#define F_OCSPI1_TX_FRAMING_ERROR V_OCSPI1_TX_FRAMING_ERROR(1U) - -#define S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR 7 -#define V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR) -#define F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(1U) - -#define S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR 6 -#define V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR) -#define F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(1U) - -#define S_IESPI_PAR_ERROR 3 -#define M_IESPI_PAR_ERROR 0x7 - -#define V_IESPI_PAR_ERROR(x) ((x) << S_IESPI_PAR_ERROR) - -#define S_OCSPI_PAR_ERROR 0 -#define M_OCSPI_PAR_ERROR 0x7 - -#define V_OCSPI_PAR_ERROR(x) ((x) << S_OCSPI_PAR_ERROR) - -#define A_PM1_RX_INT_CAUSE 0x5dc - -#define A_PM1_TX_CFG 0x5e0 -#define A_PM1_TX_MODE 0x5e4 - -#define A_PM1_TX_INT_ENABLE 0x5f8 - -#define S_ZERO_C_CMD_ERROR 18 -#define V_ZERO_C_CMD_ERROR(x) ((x) << S_ZERO_C_CMD_ERROR) -#define F_ZERO_C_CMD_ERROR V_ZERO_C_CMD_ERROR(1U) - -#define S_ICSPI0_FIFO2X_RX_FRAMING_ERROR 17 -#define V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_FIFO2X_RX_FRAMING_ERROR) -#define F_ICSPI0_FIFO2X_RX_FRAMING_ERROR V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(1U) - -#define S_ICSPI1_FIFO2X_RX_FRAMING_ERROR 16 -#define V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_FIFO2X_RX_FRAMING_ERROR) -#define F_ICSPI1_FIFO2X_RX_FRAMING_ERROR V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(1U) - -#define S_ICSPI0_RX_FRAMING_ERROR 15 -#define V_ICSPI0_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_RX_FRAMING_ERROR) -#define F_ICSPI0_RX_FRAMING_ERROR V_ICSPI0_RX_FRAMING_ERROR(1U) - -#define S_ICSPI1_RX_FRAMING_ERROR 14 -#define V_ICSPI1_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_RX_FRAMING_ERROR) -#define F_ICSPI1_RX_FRAMING_ERROR V_ICSPI1_RX_FRAMING_ERROR(1U) - -#define S_ICSPI0_TX_FRAMING_ERROR 13 -#define V_ICSPI0_TX_FRAMING_ERROR(x) ((x) << S_ICSPI0_TX_FRAMING_ERROR) -#define F_ICSPI0_TX_FRAMING_ERROR V_ICSPI0_TX_FRAMING_ERROR(1U) - -#define S_ICSPI1_TX_FRAMING_ERROR 12 -#define V_ICSPI1_TX_FRAMING_ERROR(x) ((x) << S_ICSPI1_TX_FRAMING_ERROR) -#define F_ICSPI1_TX_FRAMING_ERROR V_ICSPI1_TX_FRAMING_ERROR(1U) - -#define S_OESPI0_RX_FRAMING_ERROR 11 -#define V_OESPI0_RX_FRAMING_ERROR(x) ((x) << S_OESPI0_RX_FRAMING_ERROR) -#define F_OESPI0_RX_FRAMING_ERROR V_OESPI0_RX_FRAMING_ERROR(1U) - -#define S_OESPI1_RX_FRAMING_ERROR 10 -#define V_OESPI1_RX_FRAMING_ERROR(x) ((x) << S_OESPI1_RX_FRAMING_ERROR) -#define F_OESPI1_RX_FRAMING_ERROR V_OESPI1_RX_FRAMING_ERROR(1U) - -#define S_OESPI0_TX_FRAMING_ERROR 9 -#define V_OESPI0_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_TX_FRAMING_ERROR) -#define F_OESPI0_TX_FRAMING_ERROR V_OESPI0_TX_FRAMING_ERROR(1U) - -#define S_OESPI1_TX_FRAMING_ERROR 8 -#define V_OESPI1_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_TX_FRAMING_ERROR) -#define F_OESPI1_TX_FRAMING_ERROR V_OESPI1_TX_FRAMING_ERROR(1U) - -#define S_OESPI0_OFIFO2X_TX_FRAMING_ERROR 7 -#define V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_OFIFO2X_TX_FRAMING_ERROR) -#define F_OESPI0_OFIFO2X_TX_FRAMING_ERROR V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(1U) - -#define S_OESPI1_OFIFO2X_TX_FRAMING_ERROR 6 -#define V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_OFIFO2X_TX_FRAMING_ERROR) -#define F_OESPI1_OFIFO2X_TX_FRAMING_ERROR V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(1U) - -#define S_ICSPI_PAR_ERROR 3 -#define M_ICSPI_PAR_ERROR 0x7 - -#define V_ICSPI_PAR_ERROR(x) ((x) << S_ICSPI_PAR_ERROR) - -#define S_OESPI_PAR_ERROR 0 -#define M_OESPI_PAR_ERROR 0x7 - -#define V_OESPI_PAR_ERROR(x) ((x) << S_OESPI_PAR_ERROR) - -#define A_PM1_TX_INT_CAUSE 0x5fc - -#define A_MPS_CFG 0x600 - -#define S_TPRXPORTEN 4 -#define V_TPRXPORTEN(x) ((x) << S_TPRXPORTEN) -#define F_TPRXPORTEN V_TPRXPORTEN(1U) - -#define S_TPTXPORT1EN 3 -#define V_TPTXPORT1EN(x) ((x) << S_TPTXPORT1EN) -#define F_TPTXPORT1EN V_TPTXPORT1EN(1U) - -#define S_TPTXPORT0EN 2 -#define V_TPTXPORT0EN(x) ((x) << S_TPTXPORT0EN) -#define F_TPTXPORT0EN V_TPTXPORT0EN(1U) - -#define S_PORT1ACTIVE 1 -#define V_PORT1ACTIVE(x) ((x) << S_PORT1ACTIVE) -#define F_PORT1ACTIVE V_PORT1ACTIVE(1U) - -#define S_PORT0ACTIVE 0 -#define V_PORT0ACTIVE(x) ((x) << S_PORT0ACTIVE) -#define F_PORT0ACTIVE V_PORT0ACTIVE(1U) - -#define S_ENFORCEPKT 11 -#define V_ENFORCEPKT(x) ((x) << S_ENFORCEPKT) -#define F_ENFORCEPKT V_ENFORCEPKT(1U) - -#define A_MPS_INT_ENABLE 0x61c - -#define S_MCAPARERRENB 6 -#define M_MCAPARERRENB 0x7 - -#define V_MCAPARERRENB(x) ((x) << S_MCAPARERRENB) - -#define S_RXTPPARERRENB 4 -#define M_RXTPPARERRENB 0x3 - -#define V_RXTPPARERRENB(x) ((x) << S_RXTPPARERRENB) - -#define S_TX1TPPARERRENB 2 -#define M_TX1TPPARERRENB 0x3 - -#define V_TX1TPPARERRENB(x) ((x) << S_TX1TPPARERRENB) - -#define S_TX0TPPARERRENB 0 -#define M_TX0TPPARERRENB 0x3 - -#define V_TX0TPPARERRENB(x) ((x) << S_TX0TPPARERRENB) - -#define A_MPS_INT_CAUSE 0x620 - -#define S_MCAPARERR 6 -#define M_MCAPARERR 0x7 - -#define V_MCAPARERR(x) ((x) << S_MCAPARERR) - -#define S_RXTPPARERR 4 -#define M_RXTPPARERR 0x3 - -#define V_RXTPPARERR(x) ((x) << S_RXTPPARERR) - -#define S_TX1TPPARERR 2 -#define M_TX1TPPARERR 0x3 - -#define V_TX1TPPARERR(x) ((x) << S_TX1TPPARERR) - -#define S_TX0TPPARERR 0 -#define M_TX0TPPARERR 0x3 - -#define V_TX0TPPARERR(x) ((x) << S_TX0TPPARERR) - -#define A_CPL_SWITCH_CNTRL 0x640 - -#define A_CPL_INTR_ENABLE 0x650 - -#define S_CIM_OP_MAP_PERR 5 -#define V_CIM_OP_MAP_PERR(x) ((x) << S_CIM_OP_MAP_PERR) -#define F_CIM_OP_MAP_PERR V_CIM_OP_MAP_PERR(1U) - -#define S_CIM_OVFL_ERROR 4 -#define V_CIM_OVFL_ERROR(x) ((x) << S_CIM_OVFL_ERROR) -#define F_CIM_OVFL_ERROR V_CIM_OVFL_ERROR(1U) - -#define S_TP_FRAMING_ERROR 3 -#define V_TP_FRAMING_ERROR(x) ((x) << S_TP_FRAMING_ERROR) -#define F_TP_FRAMING_ERROR V_TP_FRAMING_ERROR(1U) - -#define S_SGE_FRAMING_ERROR 2 -#define V_SGE_FRAMING_ERROR(x) ((x) << S_SGE_FRAMING_ERROR) -#define F_SGE_FRAMING_ERROR V_SGE_FRAMING_ERROR(1U) - -#define S_CIM_FRAMING_ERROR 1 -#define V_CIM_FRAMING_ERROR(x) ((x) << S_CIM_FRAMING_ERROR) -#define F_CIM_FRAMING_ERROR V_CIM_FRAMING_ERROR(1U) - -#define S_ZERO_SWITCH_ERROR 0 -#define V_ZERO_SWITCH_ERROR(x) ((x) << S_ZERO_SWITCH_ERROR) -#define F_ZERO_SWITCH_ERROR V_ZERO_SWITCH_ERROR(1U) - -#define A_CPL_INTR_CAUSE 0x654 - -#define A_CPL_MAP_TBL_DATA 0x65c - -#define A_SMB_GLOBAL_TIME_CFG 0x660 - -#define A_I2C_CFG 0x6a0 - -#define S_I2C_CLKDIV 0 -#define M_I2C_CLKDIV 0xfff -#define V_I2C_CLKDIV(x) ((x) << S_I2C_CLKDIV) - -#define A_MI1_CFG 0x6b0 - -#define S_CLKDIV 5 -#define M_CLKDIV 0xff -#define V_CLKDIV(x) ((x) << S_CLKDIV) - -#define S_ST 3 - -#define M_ST 0x3 - -#define V_ST(x) ((x) << S_ST) - -#define G_ST(x) (((x) >> S_ST) & M_ST) - -#define S_PREEN 2 -#define V_PREEN(x) ((x) << S_PREEN) -#define F_PREEN V_PREEN(1U) - -#define S_MDIINV 1 -#define V_MDIINV(x) ((x) << S_MDIINV) -#define F_MDIINV V_MDIINV(1U) - -#define S_MDIEN 0 -#define V_MDIEN(x) ((x) << S_MDIEN) -#define F_MDIEN V_MDIEN(1U) - -#define A_MI1_ADDR 0x6b4 - -#define S_PHYADDR 5 -#define M_PHYADDR 0x1f -#define V_PHYADDR(x) ((x) << S_PHYADDR) - -#define S_REGADDR 0 -#define M_REGADDR 0x1f -#define V_REGADDR(x) ((x) << S_REGADDR) - -#define A_MI1_DATA 0x6b8 - -#define A_MI1_OP 0x6bc - -#define S_MDI_OP 0 -#define M_MDI_OP 0x3 -#define V_MDI_OP(x) ((x) << S_MDI_OP) - -#define A_SF_DATA 0x6d8 - -#define A_SF_OP 0x6dc - -#define S_BYTECNT 1 -#define M_BYTECNT 0x3 -#define V_BYTECNT(x) ((x) << S_BYTECNT) - -#define A_PL_INT_ENABLE0 0x6e0 - -#define S_T3DBG 23 -#define V_T3DBG(x) ((x) << S_T3DBG) -#define F_T3DBG V_T3DBG(1U) - -#define S_XGMAC0_1 20 -#define V_XGMAC0_1(x) ((x) << S_XGMAC0_1) -#define F_XGMAC0_1 V_XGMAC0_1(1U) - -#define S_XGMAC0_0 19 -#define V_XGMAC0_0(x) ((x) << S_XGMAC0_0) -#define F_XGMAC0_0 V_XGMAC0_0(1U) - -#define S_MC5A 18 -#define V_MC5A(x) ((x) << S_MC5A) -#define F_MC5A V_MC5A(1U) - -#define S_CPL_SWITCH 12 -#define V_CPL_SWITCH(x) ((x) << S_CPL_SWITCH) -#define F_CPL_SWITCH V_CPL_SWITCH(1U) - -#define S_MPS0 11 -#define V_MPS0(x) ((x) << S_MPS0) -#define F_MPS0 V_MPS0(1U) - -#define S_PM1_TX 10 -#define V_PM1_TX(x) ((x) << S_PM1_TX) -#define F_PM1_TX V_PM1_TX(1U) - -#define S_PM1_RX 9 -#define V_PM1_RX(x) ((x) << S_PM1_RX) -#define F_PM1_RX V_PM1_RX(1U) - -#define S_ULP2_TX 8 -#define V_ULP2_TX(x) ((x) << S_ULP2_TX) -#define F_ULP2_TX V_ULP2_TX(1U) - -#define S_ULP2_RX 7 -#define V_ULP2_RX(x) ((x) << S_ULP2_RX) -#define F_ULP2_RX V_ULP2_RX(1U) - -#define S_TP1 6 -#define V_TP1(x) ((x) << S_TP1) -#define F_TP1 V_TP1(1U) - -#define S_CIM 5 -#define V_CIM(x) ((x) << S_CIM) -#define F_CIM V_CIM(1U) - -#define S_MC7_CM 4 -#define V_MC7_CM(x) ((x) << S_MC7_CM) -#define F_MC7_CM V_MC7_CM(1U) - -#define S_MC7_PMTX 3 -#define V_MC7_PMTX(x) ((x) << S_MC7_PMTX) -#define F_MC7_PMTX V_MC7_PMTX(1U) - -#define S_MC7_PMRX 2 -#define V_MC7_PMRX(x) ((x) << S_MC7_PMRX) -#define F_MC7_PMRX V_MC7_PMRX(1U) - -#define S_PCIM0 1 -#define V_PCIM0(x) ((x) << S_PCIM0) -#define F_PCIM0 V_PCIM0(1U) - -#define S_SGE3 0 -#define V_SGE3(x) ((x) << S_SGE3) -#define F_SGE3 V_SGE3(1U) - -#define A_PL_INT_CAUSE0 0x6e4 - -#define A_PL_RST 0x6f0 - -#define S_FATALPERREN 4 -#define V_FATALPERREN(x) ((x) << S_FATALPERREN) -#define F_FATALPERREN V_FATALPERREN(1U) - -#define S_CRSTWRM 1 -#define V_CRSTWRM(x) ((x) << S_CRSTWRM) -#define F_CRSTWRM V_CRSTWRM(1U) - -#define A_PL_REV 0x6f4 - -#define A_PL_CLI 0x6f8 - -#define A_MC5_DB_CONFIG 0x704 - -#define S_TMTYPEHI 30 -#define V_TMTYPEHI(x) ((x) << S_TMTYPEHI) -#define F_TMTYPEHI V_TMTYPEHI(1U) - -#define S_TMPARTSIZE 28 -#define M_TMPARTSIZE 0x3 -#define V_TMPARTSIZE(x) ((x) << S_TMPARTSIZE) -#define G_TMPARTSIZE(x) (((x) >> S_TMPARTSIZE) & M_TMPARTSIZE) - -#define S_TMTYPE 26 -#define M_TMTYPE 0x3 -#define V_TMTYPE(x) ((x) << S_TMTYPE) -#define G_TMTYPE(x) (((x) >> S_TMTYPE) & M_TMTYPE) - -#define S_COMPEN 17 -#define V_COMPEN(x) ((x) << S_COMPEN) -#define F_COMPEN V_COMPEN(1U) - -#define S_PRTYEN 6 -#define V_PRTYEN(x) ((x) << S_PRTYEN) -#define F_PRTYEN V_PRTYEN(1U) - -#define S_MBUSEN 5 -#define V_MBUSEN(x) ((x) << S_MBUSEN) -#define F_MBUSEN V_MBUSEN(1U) - -#define S_DBGIEN 4 -#define V_DBGIEN(x) ((x) << S_DBGIEN) -#define F_DBGIEN V_DBGIEN(1U) - -#define S_TMRDY 2 -#define V_TMRDY(x) ((x) << S_TMRDY) -#define F_TMRDY V_TMRDY(1U) - -#define S_TMRST 1 -#define V_TMRST(x) ((x) << S_TMRST) -#define F_TMRST V_TMRST(1U) - -#define S_TMMODE 0 -#define V_TMMODE(x) ((x) << S_TMMODE) -#define F_TMMODE V_TMMODE(1U) - -#define F_TMMODE V_TMMODE(1U) - -#define A_MC5_DB_ROUTING_TABLE_INDEX 0x70c - -#define A_MC5_DB_FILTER_TABLE 0x710 - -#define A_MC5_DB_SERVER_INDEX 0x714 - -#define A_MC5_DB_RSP_LATENCY 0x720 - -#define S_RDLAT 16 -#define M_RDLAT 0x1f -#define V_RDLAT(x) ((x) << S_RDLAT) - -#define S_LRNLAT 8 -#define M_LRNLAT 0x1f -#define V_LRNLAT(x) ((x) << S_LRNLAT) - -#define S_SRCHLAT 0 -#define M_SRCHLAT 0x1f -#define V_SRCHLAT(x) ((x) << S_SRCHLAT) - -#define A_MC5_DB_PART_ID_INDEX 0x72c - -#define A_MC5_DB_INT_ENABLE 0x740 - -#define S_DELACTEMPTY 18 -#define V_DELACTEMPTY(x) ((x) << S_DELACTEMPTY) -#define F_DELACTEMPTY V_DELACTEMPTY(1U) - -#define S_DISPQPARERR 17 -#define V_DISPQPARERR(x) ((x) << S_DISPQPARERR) -#define F_DISPQPARERR V_DISPQPARERR(1U) - -#define S_REQQPARERR 16 -#define V_REQQPARERR(x) ((x) << S_REQQPARERR) -#define F_REQQPARERR V_REQQPARERR(1U) - -#define S_UNKNOWNCMD 15 -#define V_UNKNOWNCMD(x) ((x) << S_UNKNOWNCMD) -#define F_UNKNOWNCMD V_UNKNOWNCMD(1U) - -#define S_NFASRCHFAIL 8 -#define V_NFASRCHFAIL(x) ((x) << S_NFASRCHFAIL) -#define F_NFASRCHFAIL V_NFASRCHFAIL(1U) - -#define S_ACTRGNFULL 7 -#define V_ACTRGNFULL(x) ((x) << S_ACTRGNFULL) -#define F_ACTRGNFULL V_ACTRGNFULL(1U) - -#define S_PARITYERR 6 -#define V_PARITYERR(x) ((x) << S_PARITYERR) -#define F_PARITYERR V_PARITYERR(1U) - -#define A_MC5_DB_INT_CAUSE 0x744 - -#define A_MC5_DB_DBGI_CONFIG 0x774 - -#define A_MC5_DB_DBGI_REQ_CMD 0x778 - -#define A_MC5_DB_DBGI_REQ_ADDR0 0x77c - -#define A_MC5_DB_DBGI_REQ_ADDR1 0x780 - -#define A_MC5_DB_DBGI_REQ_ADDR2 0x784 - -#define A_MC5_DB_DBGI_REQ_DATA0 0x788 - -#define A_MC5_DB_DBGI_REQ_DATA1 0x78c - -#define A_MC5_DB_DBGI_REQ_DATA2 0x790 - -#define A_MC5_DB_DBGI_RSP_STATUS 0x7b0 - -#define S_DBGIRSPVALID 0 -#define V_DBGIRSPVALID(x) ((x) << S_DBGIRSPVALID) -#define F_DBGIRSPVALID V_DBGIRSPVALID(1U) - -#define A_MC5_DB_DBGI_RSP_DATA0 0x7b4 - -#define A_MC5_DB_DBGI_RSP_DATA1 0x7b8 - -#define A_MC5_DB_DBGI_RSP_DATA2 0x7bc - -#define A_MC5_DB_POPEN_DATA_WR_CMD 0x7cc - -#define A_MC5_DB_POPEN_MASK_WR_CMD 0x7d0 - -#define A_MC5_DB_AOPEN_SRCH_CMD 0x7d4 - -#define A_MC5_DB_AOPEN_LRN_CMD 0x7d8 - -#define A_MC5_DB_SYN_SRCH_CMD 0x7dc - -#define A_MC5_DB_SYN_LRN_CMD 0x7e0 - -#define A_MC5_DB_ACK_SRCH_CMD 0x7e4 - -#define A_MC5_DB_ACK_LRN_CMD 0x7e8 - -#define A_MC5_DB_ILOOKUP_CMD 0x7ec - -#define A_MC5_DB_ELOOKUP_CMD 0x7f0 - -#define A_MC5_DB_DATA_WRITE_CMD 0x7f4 - -#define A_MC5_DB_DATA_READ_CMD 0x7f8 - -#define XGMAC0_0_BASE_ADDR 0x800 - -#define A_XGM_TX_CTRL 0x800 - -#define S_TXEN 0 -#define V_TXEN(x) ((x) << S_TXEN) -#define F_TXEN V_TXEN(1U) - -#define A_XGM_TX_CFG 0x804 - -#define S_TXPAUSEEN 0 -#define V_TXPAUSEEN(x) ((x) << S_TXPAUSEEN) -#define F_TXPAUSEEN V_TXPAUSEEN(1U) - -#define A_XGM_TX_PAUSE_QUANTA 0x808 - -#define A_XGM_RX_CTRL 0x80c - -#define S_RXEN 0 -#define V_RXEN(x) ((x) << S_RXEN) -#define F_RXEN V_RXEN(1U) - -#define A_XGM_RX_CFG 0x810 - -#define S_DISPAUSEFRAMES 9 -#define V_DISPAUSEFRAMES(x) ((x) << S_DISPAUSEFRAMES) -#define F_DISPAUSEFRAMES V_DISPAUSEFRAMES(1U) - -#define S_EN1536BFRAMES 8 -#define V_EN1536BFRAMES(x) ((x) << S_EN1536BFRAMES) -#define F_EN1536BFRAMES V_EN1536BFRAMES(1U) - -#define S_ENJUMBO 7 -#define V_ENJUMBO(x) ((x) << S_ENJUMBO) -#define F_ENJUMBO V_ENJUMBO(1U) - -#define S_RMFCS 6 -#define V_RMFCS(x) ((x) << S_RMFCS) -#define F_RMFCS V_RMFCS(1U) - -#define S_ENHASHMCAST 2 -#define V_ENHASHMCAST(x) ((x) << S_ENHASHMCAST) -#define F_ENHASHMCAST V_ENHASHMCAST(1U) - -#define S_COPYALLFRAMES 0 -#define V_COPYALLFRAMES(x) ((x) << S_COPYALLFRAMES) -#define F_COPYALLFRAMES V_COPYALLFRAMES(1U) - -#define S_DISBCAST 1 -#define V_DISBCAST(x) ((x) << S_DISBCAST) -#define F_DISBCAST V_DISBCAST(1U) - -#define A_XGM_RX_HASH_LOW 0x814 - -#define A_XGM_RX_HASH_HIGH 0x818 - -#define A_XGM_RX_EXACT_MATCH_LOW_1 0x81c - -#define A_XGM_RX_EXACT_MATCH_HIGH_1 0x820 - -#define A_XGM_RX_EXACT_MATCH_LOW_2 0x824 - -#define A_XGM_RX_EXACT_MATCH_LOW_3 0x82c - -#define A_XGM_RX_EXACT_MATCH_LOW_4 0x834 - -#define A_XGM_RX_EXACT_MATCH_LOW_5 0x83c - -#define A_XGM_RX_EXACT_MATCH_LOW_6 0x844 - -#define A_XGM_RX_EXACT_MATCH_LOW_7 0x84c - -#define A_XGM_RX_EXACT_MATCH_LOW_8 0x854 - -#define A_XGM_INT_STATUS 0x86c - -#define S_LINKFAULTCHANGE 9 -#define V_LINKFAULTCHANGE(x) ((x) << S_LINKFAULTCHANGE) -#define F_LINKFAULTCHANGE V_LINKFAULTCHANGE(1U) - -#define A_XGM_XGM_INT_ENABLE 0x874 -#define A_XGM_XGM_INT_DISABLE 0x878 - -#define A_XGM_STAT_CTRL 0x880 - -#define S_CLRSTATS 2 -#define V_CLRSTATS(x) ((x) << S_CLRSTATS) -#define F_CLRSTATS V_CLRSTATS(1U) - -#define A_XGM_RXFIFO_CFG 0x884 - -#define S_RXFIFO_EMPTY 31 -#define V_RXFIFO_EMPTY(x) ((x) << S_RXFIFO_EMPTY) -#define F_RXFIFO_EMPTY V_RXFIFO_EMPTY(1U) - -#define S_RXFIFOPAUSEHWM 17 -#define M_RXFIFOPAUSEHWM 0xfff - -#define V_RXFIFOPAUSEHWM(x) ((x) << S_RXFIFOPAUSEHWM) - -#define G_RXFIFOPAUSEHWM(x) (((x) >> S_RXFIFOPAUSEHWM) & M_RXFIFOPAUSEHWM) - -#define S_RXFIFOPAUSELWM 5 -#define M_RXFIFOPAUSELWM 0xfff - -#define V_RXFIFOPAUSELWM(x) ((x) << S_RXFIFOPAUSELWM) - -#define G_RXFIFOPAUSELWM(x) (((x) >> S_RXFIFOPAUSELWM) & M_RXFIFOPAUSELWM) - -#define S_RXSTRFRWRD 1 -#define V_RXSTRFRWRD(x) ((x) << S_RXSTRFRWRD) -#define F_RXSTRFRWRD V_RXSTRFRWRD(1U) - -#define S_DISERRFRAMES 0 -#define V_DISERRFRAMES(x) ((x) << S_DISERRFRAMES) -#define F_DISERRFRAMES V_DISERRFRAMES(1U) - -#define A_XGM_TXFIFO_CFG 0x888 - -#define S_UNDERUNFIX 22 -#define V_UNDERUNFIX(x) ((x) << S_UNDERUNFIX) -#define F_UNDERUNFIX V_UNDERUNFIX(1U) - -#define S_TXIPG 13 -#define M_TXIPG 0xff -#define V_TXIPG(x) ((x) << S_TXIPG) -#define G_TXIPG(x) (((x) >> S_TXIPG) & M_TXIPG) - -#define S_TXFIFOTHRESH 4 -#define M_TXFIFOTHRESH 0x1ff - -#define V_TXFIFOTHRESH(x) ((x) << S_TXFIFOTHRESH) - -#define S_ENDROPPKT 21 -#define V_ENDROPPKT(x) ((x) << S_ENDROPPKT) -#define F_ENDROPPKT V_ENDROPPKT(1U) - -#define A_XGM_SERDES_CTRL 0x890 -#define A_XGM_SERDES_CTRL0 0x8e0 - -#define S_SERDESRESET_ 24 -#define V_SERDESRESET_(x) ((x) << S_SERDESRESET_) -#define F_SERDESRESET_ V_SERDESRESET_(1U) - -#define S_RXENABLE 4 -#define V_RXENABLE(x) ((x) << S_RXENABLE) -#define F_RXENABLE V_RXENABLE(1U) - -#define S_TXENABLE 3 -#define V_TXENABLE(x) ((x) << S_TXENABLE) -#define F_TXENABLE V_TXENABLE(1U) - -#define A_XGM_PAUSE_TIMER 0x890 - -#define A_XGM_RGMII_IMP 0x89c - -#define S_XGM_IMPSETUPDATE 6 -#define V_XGM_IMPSETUPDATE(x) ((x) << S_XGM_IMPSETUPDATE) -#define F_XGM_IMPSETUPDATE V_XGM_IMPSETUPDATE(1U) - -#define S_RGMIIIMPPD 3 -#define M_RGMIIIMPPD 0x7 -#define V_RGMIIIMPPD(x) ((x) << S_RGMIIIMPPD) - -#define S_RGMIIIMPPU 0 -#define M_RGMIIIMPPU 0x7 -#define V_RGMIIIMPPU(x) ((x) << S_RGMIIIMPPU) - -#define S_CALRESET 8 -#define V_CALRESET(x) ((x) << S_CALRESET) -#define F_CALRESET V_CALRESET(1U) - -#define S_CALUPDATE 7 -#define V_CALUPDATE(x) ((x) << S_CALUPDATE) -#define F_CALUPDATE V_CALUPDATE(1U) - -#define A_XGM_XAUI_IMP 0x8a0 - -#define S_CALBUSY 31 -#define V_CALBUSY(x) ((x) << S_CALBUSY) -#define F_CALBUSY V_CALBUSY(1U) - -#define S_XGM_CALFAULT 29 -#define V_XGM_CALFAULT(x) ((x) << S_XGM_CALFAULT) -#define F_XGM_CALFAULT V_XGM_CALFAULT(1U) - -#define S_CALIMP 24 -#define M_CALIMP 0x1f -#define V_CALIMP(x) ((x) << S_CALIMP) -#define G_CALIMP(x) (((x) >> S_CALIMP) & M_CALIMP) - -#define S_XAUIIMP 0 -#define M_XAUIIMP 0x7 -#define V_XAUIIMP(x) ((x) << S_XAUIIMP) - -#define A_XGM_RX_MAX_PKT_SIZE 0x8a8 - -#define S_RXMAXFRAMERSIZE 17 -#define M_RXMAXFRAMERSIZE 0x3fff -#define V_RXMAXFRAMERSIZE(x) ((x) << S_RXMAXFRAMERSIZE) -#define G_RXMAXFRAMERSIZE(x) (((x) >> S_RXMAXFRAMERSIZE) & M_RXMAXFRAMERSIZE) - -#define S_RXENFRAMER 14 -#define V_RXENFRAMER(x) ((x) << S_RXENFRAMER) -#define F_RXENFRAMER V_RXENFRAMER(1U) - -#define S_RXMAXPKTSIZE 0 -#define M_RXMAXPKTSIZE 0x3fff -#define V_RXMAXPKTSIZE(x) ((x) << S_RXMAXPKTSIZE) -#define G_RXMAXPKTSIZE(x) (((x) >> S_RXMAXPKTSIZE) & M_RXMAXPKTSIZE) - -#define A_XGM_RESET_CTRL 0x8ac - -#define S_XGMAC_STOP_EN 4 -#define V_XGMAC_STOP_EN(x) ((x) << S_XGMAC_STOP_EN) -#define F_XGMAC_STOP_EN V_XGMAC_STOP_EN(1U) - -#define S_XG2G_RESET_ 3 -#define V_XG2G_RESET_(x) ((x) << S_XG2G_RESET_) -#define F_XG2G_RESET_ V_XG2G_RESET_(1U) - -#define S_RGMII_RESET_ 2 -#define V_RGMII_RESET_(x) ((x) << S_RGMII_RESET_) -#define F_RGMII_RESET_ V_RGMII_RESET_(1U) - -#define S_PCS_RESET_ 1 -#define V_PCS_RESET_(x) ((x) << S_PCS_RESET_) -#define F_PCS_RESET_ V_PCS_RESET_(1U) - -#define S_MAC_RESET_ 0 -#define V_MAC_RESET_(x) ((x) << S_MAC_RESET_) -#define F_MAC_RESET_ V_MAC_RESET_(1U) - -#define A_XGM_PORT_CFG 0x8b8 - -#define S_CLKDIVRESET_ 3 -#define V_CLKDIVRESET_(x) ((x) << S_CLKDIVRESET_) -#define F_CLKDIVRESET_ V_CLKDIVRESET_(1U) - -#define S_PORTSPEED 1 -#define M_PORTSPEED 0x3 - -#define V_PORTSPEED(x) ((x) << S_PORTSPEED) - -#define S_ENRGMII 0 -#define V_ENRGMII(x) ((x) << S_ENRGMII) -#define F_ENRGMII V_ENRGMII(1U) - -#define A_XGM_INT_ENABLE 0x8d4 - -#define S_TXFIFO_PRTY_ERR 17 -#define M_TXFIFO_PRTY_ERR 0x7 - -#define V_TXFIFO_PRTY_ERR(x) ((x) << S_TXFIFO_PRTY_ERR) - -#define S_RXFIFO_PRTY_ERR 14 -#define M_RXFIFO_PRTY_ERR 0x7 - -#define V_RXFIFO_PRTY_ERR(x) ((x) << S_RXFIFO_PRTY_ERR) - -#define S_TXFIFO_UNDERRUN 13 -#define V_TXFIFO_UNDERRUN(x) ((x) << S_TXFIFO_UNDERRUN) -#define F_TXFIFO_UNDERRUN V_TXFIFO_UNDERRUN(1U) - -#define S_RXFIFO_OVERFLOW 12 -#define V_RXFIFO_OVERFLOW(x) ((x) << S_RXFIFO_OVERFLOW) -#define F_RXFIFO_OVERFLOW V_RXFIFO_OVERFLOW(1U) - -#define S_SERDES_LOS 4 -#define M_SERDES_LOS 0xf - -#define V_SERDES_LOS(x) ((x) << S_SERDES_LOS) - -#define S_XAUIPCSCTCERR 3 -#define V_XAUIPCSCTCERR(x) ((x) << S_XAUIPCSCTCERR) -#define F_XAUIPCSCTCERR V_XAUIPCSCTCERR(1U) - -#define S_XAUIPCSALIGNCHANGE 2 -#define V_XAUIPCSALIGNCHANGE(x) ((x) << S_XAUIPCSALIGNCHANGE) -#define F_XAUIPCSALIGNCHANGE V_XAUIPCSALIGNCHANGE(1U) - -#define S_XGM_INT 0 -#define V_XGM_INT(x) ((x) << S_XGM_INT) -#define F_XGM_INT V_XGM_INT(1U) - -#define A_XGM_INT_CAUSE 0x8d8 - -#define A_XGM_XAUI_ACT_CTRL 0x8dc - -#define S_TXACTENABLE 1 -#define V_TXACTENABLE(x) ((x) << S_TXACTENABLE) -#define F_TXACTENABLE V_TXACTENABLE(1U) - -#define A_XGM_SERDES_CTRL0 0x8e0 - -#define S_RESET3 23 -#define V_RESET3(x) ((x) << S_RESET3) -#define F_RESET3 V_RESET3(1U) - -#define S_RESET2 22 -#define V_RESET2(x) ((x) << S_RESET2) -#define F_RESET2 V_RESET2(1U) - -#define S_RESET1 21 -#define V_RESET1(x) ((x) << S_RESET1) -#define F_RESET1 V_RESET1(1U) - -#define S_RESET0 20 -#define V_RESET0(x) ((x) << S_RESET0) -#define F_RESET0 V_RESET0(1U) - -#define S_PWRDN3 19 -#define V_PWRDN3(x) ((x) << S_PWRDN3) -#define F_PWRDN3 V_PWRDN3(1U) - -#define S_PWRDN2 18 -#define V_PWRDN2(x) ((x) << S_PWRDN2) -#define F_PWRDN2 V_PWRDN2(1U) - -#define S_PWRDN1 17 -#define V_PWRDN1(x) ((x) << S_PWRDN1) -#define F_PWRDN1 V_PWRDN1(1U) - -#define S_PWRDN0 16 -#define V_PWRDN0(x) ((x) << S_PWRDN0) -#define F_PWRDN0 V_PWRDN0(1U) - -#define S_RESETPLL23 15 -#define V_RESETPLL23(x) ((x) << S_RESETPLL23) -#define F_RESETPLL23 V_RESETPLL23(1U) - -#define S_RESETPLL01 14 -#define V_RESETPLL01(x) ((x) << S_RESETPLL01) -#define F_RESETPLL01 V_RESETPLL01(1U) - -#define A_XGM_SERDES_STAT0 0x8f0 -#define A_XGM_SERDES_STAT1 0x8f4 -#define A_XGM_SERDES_STAT2 0x8f8 - -#define S_LOWSIG0 0 -#define V_LOWSIG0(x) ((x) << S_LOWSIG0) -#define F_LOWSIG0 V_LOWSIG0(1U) - -#define A_XGM_SERDES_STAT3 0x8fc - -#define A_XGM_STAT_TX_BYTE_LOW 0x900 - -#define A_XGM_STAT_TX_BYTE_HIGH 0x904 - -#define A_XGM_STAT_TX_FRAME_LOW 0x908 - -#define A_XGM_STAT_TX_FRAME_HIGH 0x90c - -#define A_XGM_STAT_TX_BCAST 0x910 - -#define A_XGM_STAT_TX_MCAST 0x914 - -#define A_XGM_STAT_TX_PAUSE 0x918 - -#define A_XGM_STAT_TX_64B_FRAMES 0x91c - -#define A_XGM_STAT_TX_65_127B_FRAMES 0x920 - -#define A_XGM_STAT_TX_128_255B_FRAMES 0x924 - -#define A_XGM_STAT_TX_256_511B_FRAMES 0x928 - -#define A_XGM_STAT_TX_512_1023B_FRAMES 0x92c - -#define A_XGM_STAT_TX_1024_1518B_FRAMES 0x930 - -#define A_XGM_STAT_TX_1519_MAXB_FRAMES 0x934 - -#define A_XGM_STAT_TX_ERR_FRAMES 0x938 - -#define A_XGM_STAT_RX_BYTES_LOW 0x93c - -#define A_XGM_STAT_RX_BYTES_HIGH 0x940 - -#define A_XGM_STAT_RX_FRAMES_LOW 0x944 - -#define A_XGM_STAT_RX_FRAMES_HIGH 0x948 - -#define A_XGM_STAT_RX_BCAST_FRAMES 0x94c - -#define A_XGM_STAT_RX_MCAST_FRAMES 0x950 - -#define A_XGM_STAT_RX_PAUSE_FRAMES 0x954 - -#define A_XGM_STAT_RX_64B_FRAMES 0x958 - -#define A_XGM_STAT_RX_65_127B_FRAMES 0x95c - -#define A_XGM_STAT_RX_128_255B_FRAMES 0x960 - -#define A_XGM_STAT_RX_256_511B_FRAMES 0x964 - -#define A_XGM_STAT_RX_512_1023B_FRAMES 0x968 - -#define A_XGM_STAT_RX_1024_1518B_FRAMES 0x96c - -#define A_XGM_STAT_RX_1519_MAXB_FRAMES 0x970 - -#define A_XGM_STAT_RX_SHORT_FRAMES 0x974 - -#define A_XGM_STAT_RX_OVERSIZE_FRAMES 0x978 - -#define A_XGM_STAT_RX_JABBER_FRAMES 0x97c - -#define A_XGM_STAT_RX_CRC_ERR_FRAMES 0x980 - -#define A_XGM_STAT_RX_LENGTH_ERR_FRAMES 0x984 - -#define A_XGM_STAT_RX_SYM_CODE_ERR_FRAMES 0x988 - -#define A_XGM_SERDES_STATUS0 0x98c - -#define A_XGM_SERDES_STATUS1 0x990 - -#define S_CMULOCK 31 -#define V_CMULOCK(x) ((x) << S_CMULOCK) -#define F_CMULOCK V_CMULOCK(1U) - -#define A_XGM_RX_MAX_PKT_SIZE_ERR_CNT 0x9a4 - -#define A_XGM_TX_SPI4_SOP_EOP_CNT 0x9a8 - -#define S_TXSPI4SOPCNT 16 -#define M_TXSPI4SOPCNT 0xffff -#define V_TXSPI4SOPCNT(x) ((x) << S_TXSPI4SOPCNT) -#define G_TXSPI4SOPCNT(x) (((x) >> S_TXSPI4SOPCNT) & M_TXSPI4SOPCNT) - -#define A_XGM_RX_SPI4_SOP_EOP_CNT 0x9ac - -#define XGMAC0_1_BASE_ADDR 0xa00 diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c deleted file mode 100644 index d6fa177..0000000 --- a/drivers/net/cxgb3/sge.c +++ /dev/null @@ -1,3303 +0,0 @@ -/* - * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "common.h" -#include "regs.h" -#include "sge_defs.h" -#include "t3_cpl.h" -#include "firmware_exports.h" -#include "cxgb3_offload.h" - -#define USE_GTS 0 - -#define SGE_RX_SM_BUF_SIZE 1536 - -#define SGE_RX_COPY_THRES 256 -#define SGE_RX_PULL_LEN 128 - -#define SGE_PG_RSVD SMP_CACHE_BYTES -/* - * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks. - * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs - * directly. - */ -#define FL0_PG_CHUNK_SIZE 2048 -#define FL0_PG_ORDER 0 -#define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER) -#define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192) -#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1) -#define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER) - -#define SGE_RX_DROP_THRES 16 -#define RX_RECLAIM_PERIOD (HZ/4) - -/* - * Max number of Rx buffers we replenish at a time. - */ -#define MAX_RX_REFILL 16U -/* - * Period of the Tx buffer reclaim timer. This timer does not need to run - * frequently as Tx buffers are usually reclaimed by new Tx packets. - */ -#define TX_RECLAIM_PERIOD (HZ / 4) -#define TX_RECLAIM_TIMER_CHUNK 64U -#define TX_RECLAIM_CHUNK 16U - -/* WR size in bytes */ -#define WR_LEN (WR_FLITS * 8) - -/* - * Types of Tx queues in each queue set. Order here matters, do not change. - */ -enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL }; - -/* Values for sge_txq.flags */ -enum { - TXQ_RUNNING = 1 << 0, /* fetch engine is running */ - TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */ -}; - -struct tx_desc { - __be64 flit[TX_DESC_FLITS]; -}; - -struct rx_desc { - __be32 addr_lo; - __be32 len_gen; - __be32 gen2; - __be32 addr_hi; -}; - -struct tx_sw_desc { /* SW state per Tx descriptor */ - struct sk_buff *skb; - u8 eop; /* set if last descriptor for packet */ - u8 addr_idx; /* buffer index of first SGL entry in descriptor */ - u8 fragidx; /* first page fragment associated with descriptor */ - s8 sflit; /* start flit of first SGL entry in descriptor */ -}; - -struct rx_sw_desc { /* SW state per Rx descriptor */ - union { - struct sk_buff *skb; - struct fl_pg_chunk pg_chunk; - }; - DEFINE_DMA_UNMAP_ADDR(dma_addr); -}; - -struct rsp_desc { /* response queue descriptor */ - struct rss_header rss_hdr; - __be32 flags; - __be32 len_cq; - u8 imm_data[47]; - u8 intr_gen; -}; - -/* - * Holds unmapping information for Tx packets that need deferred unmapping. - * This structure lives at skb->head and must be allocated by callers. - */ -struct deferred_unmap_info { - struct pci_dev *pdev; - dma_addr_t addr[MAX_SKB_FRAGS + 1]; -}; - -/* - * Maps a number of flits to the number of Tx descriptors that can hold them. - * The formula is - * - * desc = 1 + (flits - 2) / (WR_FLITS - 1). - * - * HW allows up to 4 descriptors to be combined into a WR. - */ -static u8 flit_desc_map[] = { - 0, -#if SGE_NUM_GENBITS == 1 - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 -#elif SGE_NUM_GENBITS == 2 - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, -#else -# error "SGE_NUM_GENBITS must be 1 or 2" -#endif -}; - -static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx) -{ - return container_of(q, struct sge_qset, fl[qidx]); -} - -static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q) -{ - return container_of(q, struct sge_qset, rspq); -} - -static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx) -{ - return container_of(q, struct sge_qset, txq[qidx]); -} - -/** - * refill_rspq - replenish an SGE response queue - * @adapter: the adapter - * @q: the response queue to replenish - * @credits: how many new responses to make available - * - * Replenishes a response queue by making the supplied number of responses - * available to HW. - */ -static inline void refill_rspq(struct adapter *adapter, - const struct sge_rspq *q, unsigned int credits) -{ - rmb(); - t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN, - V_RSPQ(q->cntxt_id) | V_CREDITS(credits)); -} - -/** - * need_skb_unmap - does the platform need unmapping of sk_buffs? - * - * Returns true if the platform needs sk_buff unmapping. The compiler - * optimizes away unnecessary code if this returns true. - */ -static inline int need_skb_unmap(void) -{ -#ifdef CONFIG_NEED_DMA_MAP_STATE - return 1; -#else - return 0; -#endif -} - -/** - * unmap_skb - unmap a packet main body and its page fragments - * @skb: the packet - * @q: the Tx queue containing Tx descriptors for the packet - * @cidx: index of Tx descriptor - * @pdev: the PCI device - * - * Unmap the main body of an sk_buff and its page fragments, if any. - * Because of the fairly complicated structure of our SGLs and the desire - * to conserve space for metadata, the information necessary to unmap an - * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx - * descriptors (the physical addresses of the various data buffers), and - * the SW descriptor state (assorted indices). The send functions - * initialize the indices for the first packet descriptor so we can unmap - * the buffers held in the first Tx descriptor here, and we have enough - * information at this point to set the state for the next Tx descriptor. - * - * Note that it is possible to clean up the first descriptor of a packet - * before the send routines have written the next descriptors, but this - * race does not cause any problem. We just end up writing the unmapping - * info for the descriptor first. - */ -static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q, - unsigned int cidx, struct pci_dev *pdev) -{ - const struct sg_ent *sgp; - struct tx_sw_desc *d = &q->sdesc[cidx]; - int nfrags, frag_idx, curflit, j = d->addr_idx; - - sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit]; - frag_idx = d->fragidx; - - if (frag_idx == 0 && skb_headlen(skb)) { - pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), - skb_headlen(skb), PCI_DMA_TODEVICE); - j = 1; - } - - curflit = d->sflit + 1 + j; - nfrags = skb_shinfo(skb)->nr_frags; - - while (frag_idx < nfrags && curflit < WR_FLITS) { - pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]), - skb_shinfo(skb)->frags[frag_idx].size, - PCI_DMA_TODEVICE); - j ^= 1; - if (j == 0) { - sgp++; - curflit++; - } - curflit++; - frag_idx++; - } - - if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */ - d = cidx + 1 == q->size ? q->sdesc : d + 1; - d->fragidx = frag_idx; - d->addr_idx = j; - d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */ - } -} - -/** - * free_tx_desc - reclaims Tx descriptors and their buffers - * @adapter: the adapter - * @q: the Tx queue to reclaim descriptors from - * @n: the number of descriptors to reclaim - * - * Reclaims Tx descriptors from an SGE Tx queue and frees the associated - * Tx buffers. Called with the Tx queue lock held. - */ -static void free_tx_desc(struct adapter *adapter, struct sge_txq *q, - unsigned int n) -{ - struct tx_sw_desc *d; - struct pci_dev *pdev = adapter->pdev; - unsigned int cidx = q->cidx; - - const int need_unmap = need_skb_unmap() && - q->cntxt_id >= FW_TUNNEL_SGEEC_START; - - d = &q->sdesc[cidx]; - while (n--) { - if (d->skb) { /* an SGL is present */ - if (need_unmap) - unmap_skb(d->skb, q, cidx, pdev); - if (d->eop) { - kfree_skb(d->skb); - d->skb = NULL; - } - } - ++d; - if (++cidx == q->size) { - cidx = 0; - d = q->sdesc; - } - } - q->cidx = cidx; -} - -/** - * reclaim_completed_tx - reclaims completed Tx descriptors - * @adapter: the adapter - * @q: the Tx queue to reclaim completed descriptors from - * @chunk: maximum number of descriptors to reclaim - * - * Reclaims Tx descriptors that the SGE has indicated it has processed, - * and frees the associated buffers if possible. Called with the Tx - * queue's lock held. - */ -static inline unsigned int reclaim_completed_tx(struct adapter *adapter, - struct sge_txq *q, - unsigned int chunk) -{ - unsigned int reclaim = q->processed - q->cleaned; - - reclaim = min(chunk, reclaim); - if (reclaim) { - free_tx_desc(adapter, q, reclaim); - q->cleaned += reclaim; - q->in_use -= reclaim; - } - return q->processed - q->cleaned; -} - -/** - * should_restart_tx - are there enough resources to restart a Tx queue? - * @q: the Tx queue - * - * Checks if there are enough descriptors to restart a suspended Tx queue. - */ -static inline int should_restart_tx(const struct sge_txq *q) -{ - unsigned int r = q->processed - q->cleaned; - - return q->in_use - r < (q->size >> 1); -} - -static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q, - struct rx_sw_desc *d) -{ - if (q->use_pages && d->pg_chunk.page) { - (*d->pg_chunk.p_cnt)--; - if (!*d->pg_chunk.p_cnt) - pci_unmap_page(pdev, - d->pg_chunk.mapping, - q->alloc_size, PCI_DMA_FROMDEVICE); - - put_page(d->pg_chunk.page); - d->pg_chunk.page = NULL; - } else { - pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr), - q->buf_size, PCI_DMA_FROMDEVICE); - kfree_skb(d->skb); - d->skb = NULL; - } -} - -/** - * free_rx_bufs - free the Rx buffers on an SGE free list - * @pdev: the PCI device associated with the adapter - * @rxq: the SGE free list to clean up - * - * Release the buffers on an SGE free-buffer Rx queue. HW fetching from - * this queue should be stopped before calling this function. - */ -static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q) -{ - unsigned int cidx = q->cidx; - - while (q->credits--) { - struct rx_sw_desc *d = &q->sdesc[cidx]; - - - clear_rx_desc(pdev, q, d); - if (++cidx == q->size) - cidx = 0; - } - - if (q->pg_chunk.page) { - __free_pages(q->pg_chunk.page, q->order); - q->pg_chunk.page = NULL; - } -} - -/** - * add_one_rx_buf - add a packet buffer to a free-buffer list - * @va: buffer start VA - * @len: the buffer length - * @d: the HW Rx descriptor to write - * @sd: the SW Rx descriptor to write - * @gen: the generation bit value - * @pdev: the PCI device associated with the adapter - * - * Add a buffer of the given length to the supplied HW and SW Rx - * descriptors. - */ -static inline int add_one_rx_buf(void *va, unsigned int len, - struct rx_desc *d, struct rx_sw_desc *sd, - unsigned int gen, struct pci_dev *pdev) -{ - dma_addr_t mapping; - - mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(pdev, mapping))) - return -ENOMEM; - - dma_unmap_addr_set(sd, dma_addr, mapping); - - d->addr_lo = cpu_to_be32(mapping); - d->addr_hi = cpu_to_be32((u64) mapping >> 32); - wmb(); - d->len_gen = cpu_to_be32(V_FLD_GEN1(gen)); - d->gen2 = cpu_to_be32(V_FLD_GEN2(gen)); - return 0; -} - -static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d, - unsigned int gen) -{ - d->addr_lo = cpu_to_be32(mapping); - d->addr_hi = cpu_to_be32((u64) mapping >> 32); - wmb(); - d->len_gen = cpu_to_be32(V_FLD_GEN1(gen)); - d->gen2 = cpu_to_be32(V_FLD_GEN2(gen)); - return 0; -} - -static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, - struct rx_sw_desc *sd, gfp_t gfp, - unsigned int order) -{ - if (!q->pg_chunk.page) { - dma_addr_t mapping; - - q->pg_chunk.page = alloc_pages(gfp, order); - if (unlikely(!q->pg_chunk.page)) - return -ENOMEM; - q->pg_chunk.va = page_address(q->pg_chunk.page); - q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) - - SGE_PG_RSVD; - q->pg_chunk.offset = 0; - mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, - 0, q->alloc_size, PCI_DMA_FROMDEVICE); - q->pg_chunk.mapping = mapping; - } - sd->pg_chunk = q->pg_chunk; - - prefetch(sd->pg_chunk.p_cnt); - - q->pg_chunk.offset += q->buf_size; - if (q->pg_chunk.offset == (PAGE_SIZE << order)) - q->pg_chunk.page = NULL; - else { - q->pg_chunk.va += q->buf_size; - get_page(q->pg_chunk.page); - } - - if (sd->pg_chunk.offset == 0) - *sd->pg_chunk.p_cnt = 1; - else - *sd->pg_chunk.p_cnt += 1; - - return 0; -} - -static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) -{ - if (q->pend_cred >= q->credits / 4) { - q->pend_cred = 0; - wmb(); - t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); - } -} - -/** - * refill_fl - refill an SGE free-buffer list - * @adapter: the adapter - * @q: the free-list to refill - * @n: the number of new buffers to allocate - * @gfp: the gfp flags for allocating new buffers - * - * (Re)populate an SGE free-buffer list with up to @n new packet buffers, - * allocated with the supplied gfp flags. The caller must assure that - * @n does not exceed the queue's capacity. - */ -static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) -{ - struct rx_sw_desc *sd = &q->sdesc[q->pidx]; - struct rx_desc *d = &q->desc[q->pidx]; - unsigned int count = 0; - - while (n--) { - dma_addr_t mapping; - int err; - - if (q->use_pages) { - if (unlikely(alloc_pg_chunk(adap, q, sd, gfp, - q->order))) { -nomem: q->alloc_failed++; - break; - } - mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset; - dma_unmap_addr_set(sd, dma_addr, mapping); - - add_one_rx_chunk(mapping, d, q->gen); - pci_dma_sync_single_for_device(adap->pdev, mapping, - q->buf_size - SGE_PG_RSVD, - PCI_DMA_FROMDEVICE); - } else { - void *buf_start; - - struct sk_buff *skb = alloc_skb(q->buf_size, gfp); - if (!skb) - goto nomem; - - sd->skb = skb; - buf_start = skb->data; - err = add_one_rx_buf(buf_start, q->buf_size, d, sd, - q->gen, adap->pdev); - if (unlikely(err)) { - clear_rx_desc(adap->pdev, q, sd); - break; - } - } - - d++; - sd++; - if (++q->pidx == q->size) { - q->pidx = 0; - q->gen ^= 1; - sd = q->sdesc; - d = q->desc; - } - count++; - } - - q->credits += count; - q->pend_cred += count; - ring_fl_db(adap, q); - - return count; -} - -static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) -{ - refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits), - GFP_ATOMIC | __GFP_COMP); -} - -/** - * recycle_rx_buf - recycle a receive buffer - * @adapter: the adapter - * @q: the SGE free list - * @idx: index of buffer to recycle - * - * Recycles the specified buffer on the given free list by adding it at - * the next available slot on the list. - */ -static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q, - unsigned int idx) -{ - struct rx_desc *from = &q->desc[idx]; - struct rx_desc *to = &q->desc[q->pidx]; - - q->sdesc[q->pidx] = q->sdesc[idx]; - to->addr_lo = from->addr_lo; /* already big endian */ - to->addr_hi = from->addr_hi; /* likewise */ - wmb(); - to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen)); - to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen)); - - if (++q->pidx == q->size) { - q->pidx = 0; - q->gen ^= 1; - } - - q->credits++; - q->pend_cred++; - ring_fl_db(adap, q); -} - -/** - * alloc_ring - allocate resources for an SGE descriptor ring - * @pdev: the PCI device - * @nelem: the number of descriptors - * @elem_size: the size of each descriptor - * @sw_size: the size of the SW state associated with each ring element - * @phys: the physical address of the allocated ring - * @metadata: address of the array holding the SW state for the ring - * - * Allocates resources for an SGE descriptor ring, such as Tx queues, - * free buffer lists, or response queues. Each SGE ring requires - * space for its HW descriptors plus, optionally, space for the SW state - * associated with each HW entry (the metadata). The function returns - * three values: the virtual address for the HW ring (the return value - * of the function), the physical address of the HW ring, and the address - * of the SW ring. - */ -static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size, - size_t sw_size, dma_addr_t * phys, void *metadata) -{ - size_t len = nelem * elem_size; - void *s = NULL; - void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL); - - if (!p) - return NULL; - if (sw_size && metadata) { - s = kcalloc(nelem, sw_size, GFP_KERNEL); - - if (!s) { - dma_free_coherent(&pdev->dev, len, p, *phys); - return NULL; - } - *(void **)metadata = s; - } - memset(p, 0, len); - return p; -} - -/** - * t3_reset_qset - reset a sge qset - * @q: the queue set - * - * Reset the qset structure. - * the NAPI structure is preserved in the event of - * the qset's reincarnation, for example during EEH recovery. - */ -static void t3_reset_qset(struct sge_qset *q) -{ - if (q->adap && - !(q->adap->flags & NAPI_INIT)) { - memset(q, 0, sizeof(*q)); - return; - } - - q->adap = NULL; - memset(&q->rspq, 0, sizeof(q->rspq)); - memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET); - memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); - q->txq_stopped = 0; - q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */ - q->rx_reclaim_timer.function = NULL; - q->nomem = 0; - napi_free_frags(&q->napi); -} - - -/** - * free_qset - free the resources of an SGE queue set - * @adapter: the adapter owning the queue set - * @q: the queue set - * - * Release the HW and SW resources associated with an SGE queue set, such - * as HW contexts, packet buffers, and descriptor rings. Traffic to the - * queue set must be quiesced prior to calling this. - */ -static void t3_free_qset(struct adapter *adapter, struct sge_qset *q) -{ - int i; - struct pci_dev *pdev = adapter->pdev; - - for (i = 0; i < SGE_RXQ_PER_SET; ++i) - if (q->fl[i].desc) { - spin_lock_irq(&adapter->sge.reg_lock); - t3_sge_disable_fl(adapter, q->fl[i].cntxt_id); - spin_unlock_irq(&adapter->sge.reg_lock); - free_rx_bufs(pdev, &q->fl[i]); - kfree(q->fl[i].sdesc); - dma_free_coherent(&pdev->dev, - q->fl[i].size * - sizeof(struct rx_desc), q->fl[i].desc, - q->fl[i].phys_addr); - } - - for (i = 0; i < SGE_TXQ_PER_SET; ++i) - if (q->txq[i].desc) { - spin_lock_irq(&adapter->sge.reg_lock); - t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0); - spin_unlock_irq(&adapter->sge.reg_lock); - if (q->txq[i].sdesc) { - free_tx_desc(adapter, &q->txq[i], - q->txq[i].in_use); - kfree(q->txq[i].sdesc); - } - dma_free_coherent(&pdev->dev, - q->txq[i].size * - sizeof(struct tx_desc), - q->txq[i].desc, q->txq[i].phys_addr); - __skb_queue_purge(&q->txq[i].sendq); - } - - if (q->rspq.desc) { - spin_lock_irq(&adapter->sge.reg_lock); - t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id); - spin_unlock_irq(&adapter->sge.reg_lock); - dma_free_coherent(&pdev->dev, - q->rspq.size * sizeof(struct rsp_desc), - q->rspq.desc, q->rspq.phys_addr); - } - - t3_reset_qset(q); -} - -/** - * init_qset_cntxt - initialize an SGE queue set context info - * @qs: the queue set - * @id: the queue set id - * - * Initializes the TIDs and context ids for the queues of a queue set. - */ -static void init_qset_cntxt(struct sge_qset *qs, unsigned int id) -{ - qs->rspq.cntxt_id = id; - qs->fl[0].cntxt_id = 2 * id; - qs->fl[1].cntxt_id = 2 * id + 1; - qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id; - qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id; - qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id; - qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id; - qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id; -} - -/** - * sgl_len - calculates the size of an SGL of the given capacity - * @n: the number of SGL entries - * - * Calculates the number of flits needed for a scatter/gather list that - * can hold the given number of entries. - */ -static inline unsigned int sgl_len(unsigned int n) -{ - /* alternatively: 3 * (n / 2) + 2 * (n & 1) */ - return (3 * n) / 2 + (n & 1); -} - -/** - * flits_to_desc - returns the num of Tx descriptors for the given flits - * @n: the number of flits - * - * Calculates the number of Tx descriptors needed for the supplied number - * of flits. - */ -static inline unsigned int flits_to_desc(unsigned int n) -{ - BUG_ON(n >= ARRAY_SIZE(flit_desc_map)); - return flit_desc_map[n]; -} - -/** - * get_packet - return the next ingress packet buffer from a free list - * @adap: the adapter that received the packet - * @fl: the SGE free list holding the packet - * @len: the packet length including any SGE padding - * @drop_thres: # of remaining buffers before we start dropping packets - * - * Get the next packet from a free list and complete setup of the - * sk_buff. If the packet is small we make a copy and recycle the - * original buffer, otherwise we use the original buffer itself. If a - * positive drop threshold is supplied packets are dropped and their - * buffers recycled if (a) the number of remaining buffers is under the - * threshold and the packet is too big to copy, or (b) the packet should - * be copied but there is no memory for the copy. - */ -static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl, - unsigned int len, unsigned int drop_thres) -{ - struct sk_buff *skb = NULL; - struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; - - prefetch(sd->skb->data); - fl->credits--; - - if (len <= SGE_RX_COPY_THRES) { - skb = alloc_skb(len, GFP_ATOMIC); - if (likely(skb != NULL)) { - __skb_put(skb, len); - pci_dma_sync_single_for_cpu(adap->pdev, - dma_unmap_addr(sd, dma_addr), len, - PCI_DMA_FROMDEVICE); - memcpy(skb->data, sd->skb->data, len); - pci_dma_sync_single_for_device(adap->pdev, - dma_unmap_addr(sd, dma_addr), len, - PCI_DMA_FROMDEVICE); - } else if (!drop_thres) - goto use_orig_buf; -recycle: - recycle_rx_buf(adap, fl, fl->cidx); - return skb; - } - - if (unlikely(fl->credits < drop_thres) && - refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1), - GFP_ATOMIC | __GFP_COMP) == 0) - goto recycle; - -use_orig_buf: - pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr), - fl->buf_size, PCI_DMA_FROMDEVICE); - skb = sd->skb; - skb_put(skb, len); - __refill_fl(adap, fl); - return skb; -} - -/** - * get_packet_pg - return the next ingress packet buffer from a free list - * @adap: the adapter that received the packet - * @fl: the SGE free list holding the packet - * @len: the packet length including any SGE padding - * @drop_thres: # of remaining buffers before we start dropping packets - * - * Get the next packet from a free list populated with page chunks. - * If the packet is small we make a copy and recycle the original buffer, - * otherwise we attach the original buffer as a page fragment to a fresh - * sk_buff. If a positive drop threshold is supplied packets are dropped - * and their buffers recycled if (a) the number of remaining buffers is - * under the threshold and the packet is too big to copy, or (b) there's - * no system memory. - * - * Note: this function is similar to @get_packet but deals with Rx buffers - * that are page chunks rather than sk_buffs. - */ -static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl, - struct sge_rspq *q, unsigned int len, - unsigned int drop_thres) -{ - struct sk_buff *newskb, *skb; - struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; - - dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr); - - newskb = skb = q->pg_skb; - if (!skb && (len <= SGE_RX_COPY_THRES)) { - newskb = alloc_skb(len, GFP_ATOMIC); - if (likely(newskb != NULL)) { - __skb_put(newskb, len); - pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, - PCI_DMA_FROMDEVICE); - memcpy(newskb->data, sd->pg_chunk.va, len); - pci_dma_sync_single_for_device(adap->pdev, dma_addr, - len, - PCI_DMA_FROMDEVICE); - } else if (!drop_thres) - return NULL; -recycle: - fl->credits--; - recycle_rx_buf(adap, fl, fl->cidx); - q->rx_recycle_buf++; - return newskb; - } - - if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres))) - goto recycle; - - prefetch(sd->pg_chunk.p_cnt); - - if (!skb) - newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC); - - if (unlikely(!newskb)) { - if (!drop_thres) - return NULL; - goto recycle; - } - - pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, - PCI_DMA_FROMDEVICE); - (*sd->pg_chunk.p_cnt)--; - if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page) - pci_unmap_page(adap->pdev, - sd->pg_chunk.mapping, - fl->alloc_size, - PCI_DMA_FROMDEVICE); - if (!skb) { - __skb_put(newskb, SGE_RX_PULL_LEN); - memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN); - skb_fill_page_desc(newskb, 0, sd->pg_chunk.page, - sd->pg_chunk.offset + SGE_RX_PULL_LEN, - len - SGE_RX_PULL_LEN); - newskb->len = len; - newskb->data_len = len - SGE_RX_PULL_LEN; - newskb->truesize += newskb->data_len; - } else { - skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags, - sd->pg_chunk.page, - sd->pg_chunk.offset, len); - newskb->len += len; - newskb->data_len += len; - newskb->truesize += len; - } - - fl->credits--; - /* - * We do not refill FLs here, we let the caller do it to overlap a - * prefetch. - */ - return newskb; -} - -/** - * get_imm_packet - return the next ingress packet buffer from a response - * @resp: the response descriptor containing the packet data - * - * Return a packet containing the immediate data of the given response. - */ -static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp) -{ - struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC); - - if (skb) { - __skb_put(skb, IMMED_PKT_SIZE); - skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE); - } - return skb; -} - -/** - * calc_tx_descs - calculate the number of Tx descriptors for a packet - * @skb: the packet - * - * Returns the number of Tx descriptors needed for the given Ethernet - * packet. Ethernet packets require addition of WR and CPL headers. - */ -static inline unsigned int calc_tx_descs(const struct sk_buff *skb) -{ - unsigned int flits; - - if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt)) - return 1; - - flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2; - if (skb_shinfo(skb)->gso_size) - flits++; - return flits_to_desc(flits); -} - -/** - * make_sgl - populate a scatter/gather list for a packet - * @skb: the packet - * @sgp: the SGL to populate - * @start: start address of skb main body data to include in the SGL - * @len: length of skb main body data to include in the SGL - * @pdev: the PCI device - * - * Generates a scatter/gather list for the buffers that make up a packet - * and returns the SGL size in 8-byte words. The caller must size the SGL - * appropriately. - */ -static inline unsigned int make_sgl(const struct sk_buff *skb, - struct sg_ent *sgp, unsigned char *start, - unsigned int len, struct pci_dev *pdev) -{ - dma_addr_t mapping; - unsigned int i, j = 0, nfrags; - - if (len) { - mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE); - sgp->len[0] = cpu_to_be32(len); - sgp->addr[0] = cpu_to_be64(mapping); - j = 1; - } - - nfrags = skb_shinfo(skb)->nr_frags; - for (i = 0; i < nfrags; i++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - - mapping = pci_map_page(pdev, frag->page, frag->page_offset, - frag->size, PCI_DMA_TODEVICE); - sgp->len[j] = cpu_to_be32(frag->size); - sgp->addr[j] = cpu_to_be64(mapping); - j ^= 1; - if (j == 0) - ++sgp; - } - if (j) - sgp->len[j] = 0; - return ((nfrags + (len != 0)) * 3) / 2 + j; -} - -/** - * check_ring_tx_db - check and potentially ring a Tx queue's doorbell - * @adap: the adapter - * @q: the Tx queue - * - * Ring the doorbel if a Tx queue is asleep. There is a natural race, - * where the HW is going to sleep just after we checked, however, - * then the interrupt handler will detect the outstanding TX packet - * and ring the doorbell for us. - * - * When GTS is disabled we unconditionally ring the doorbell. - */ -static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q) -{ -#if USE_GTS - clear_bit(TXQ_LAST_PKT_DB, &q->flags); - if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) { - set_bit(TXQ_LAST_PKT_DB, &q->flags); - t3_write_reg(adap, A_SG_KDOORBELL, - F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); - } -#else - wmb(); /* write descriptors before telling HW */ - t3_write_reg(adap, A_SG_KDOORBELL, - F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); -#endif -} - -static inline void wr_gen2(struct tx_desc *d, unsigned int gen) -{ -#if SGE_NUM_GENBITS == 2 - d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen); -#endif -} - -/** - * write_wr_hdr_sgl - write a WR header and, optionally, SGL - * @ndesc: number of Tx descriptors spanned by the SGL - * @skb: the packet corresponding to the WR - * @d: first Tx descriptor to be written - * @pidx: index of above descriptors - * @q: the SGE Tx queue - * @sgl: the SGL - * @flits: number of flits to the start of the SGL in the first descriptor - * @sgl_flits: the SGL size in flits - * @gen: the Tx descriptor generation - * @wr_hi: top 32 bits of WR header based on WR type (big endian) - * @wr_lo: low 32 bits of WR header based on WR type (big endian) - * - * Write a work request header and an associated SGL. If the SGL is - * small enough to fit into one Tx descriptor it has already been written - * and we just need to write the WR header. Otherwise we distribute the - * SGL across the number of descriptors it spans. - */ -static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb, - struct tx_desc *d, unsigned int pidx, - const struct sge_txq *q, - const struct sg_ent *sgl, - unsigned int flits, unsigned int sgl_flits, - unsigned int gen, __be32 wr_hi, - __be32 wr_lo) -{ - struct work_request_hdr *wrp = (struct work_request_hdr *)d; - struct tx_sw_desc *sd = &q->sdesc[pidx]; - - sd->skb = skb; - if (need_skb_unmap()) { - sd->fragidx = 0; - sd->addr_idx = 0; - sd->sflit = flits; - } - - if (likely(ndesc == 1)) { - sd->eop = 1; - wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) | - V_WR_SGLSFLT(flits)) | wr_hi; - wmb(); - wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) | - V_WR_GEN(gen)) | wr_lo; - wr_gen2(d, gen); - } else { - unsigned int ogen = gen; - const u64 *fp = (const u64 *)sgl; - struct work_request_hdr *wp = wrp; - - wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) | - V_WR_SGLSFLT(flits)) | wr_hi; - - while (sgl_flits) { - unsigned int avail = WR_FLITS - flits; - - if (avail > sgl_flits) - avail = sgl_flits; - memcpy(&d->flit[flits], fp, avail * sizeof(*fp)); - sgl_flits -= avail; - ndesc--; - if (!sgl_flits) - break; - - fp += avail; - d++; - sd->eop = 0; - sd++; - if (++pidx == q->size) { - pidx = 0; - gen ^= 1; - d = q->desc; - sd = q->sdesc; - } - - sd->skb = skb; - wrp = (struct work_request_hdr *)d; - wrp->wr_hi = htonl(V_WR_DATATYPE(1) | - V_WR_SGLSFLT(1)) | wr_hi; - wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS, - sgl_flits + 1)) | - V_WR_GEN(gen)) | wr_lo; - wr_gen2(d, gen); - flits = 1; - } - sd->eop = 1; - wrp->wr_hi |= htonl(F_WR_EOP); - wmb(); - wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo; - wr_gen2((struct tx_desc *)wp, ogen); - WARN_ON(ndesc != 0); - } -} - -/** - * write_tx_pkt_wr - write a TX_PKT work request - * @adap: the adapter - * @skb: the packet to send - * @pi: the egress interface - * @pidx: index of the first Tx descriptor to write - * @gen: the generation value to use - * @q: the Tx queue - * @ndesc: number of descriptors the packet will occupy - * @compl: the value of the COMPL bit to use - * - * Generate a TX_PKT work request to send the supplied packet. - */ -static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, - const struct port_info *pi, - unsigned int pidx, unsigned int gen, - struct sge_txq *q, unsigned int ndesc, - unsigned int compl) -{ - unsigned int flits, sgl_flits, cntrl, tso_info; - struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; - struct tx_desc *d = &q->desc[pidx]; - struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d; - - cpl->len = htonl(skb->len); - cntrl = V_TXPKT_INTF(pi->port_id); - - if (vlan_tx_tag_present(skb)) - cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb)); - - tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size); - if (tso_info) { - int eth_type; - struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl; - - d->flit[2] = 0; - cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO); - hdr->cntrl = htonl(cntrl); - eth_type = skb_network_offset(skb) == ETH_HLEN ? - CPL_ETH_II : CPL_ETH_II_VLAN; - tso_info |= V_LSO_ETH_TYPE(eth_type) | - V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) | - V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff); - hdr->lso_info = htonl(tso_info); - flits = 3; - } else { - cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT); - cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */ - cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL); - cpl->cntrl = htonl(cntrl); - - if (skb->len <= WR_LEN - sizeof(*cpl)) { - q->sdesc[pidx].skb = NULL; - if (!skb->data_len) - skb_copy_from_linear_data(skb, &d->flit[2], - skb->len); - else - skb_copy_bits(skb, 0, &d->flit[2], skb->len); - - flits = (skb->len + 7) / 8 + 2; - cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) | - V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) - | F_WR_SOP | F_WR_EOP | compl); - wmb(); - cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) | - V_WR_TID(q->token)); - wr_gen2(d, gen); - kfree_skb(skb); - return; - } - - flits = 2; - } - - sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; - sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev); - - write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, - htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl), - htonl(V_WR_TID(q->token))); -} - -static inline void t3_stop_tx_queue(struct netdev_queue *txq, - struct sge_qset *qs, struct sge_txq *q) -{ - netif_tx_stop_queue(txq); - set_bit(TXQ_ETH, &qs->txq_stopped); - q->stops++; -} - -/** - * eth_xmit - add a packet to the Ethernet Tx queue - * @skb: the packet - * @dev: the egress net device - * - * Add a packet to an SGE Tx queue. Runs with softirqs disabled. - */ -netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) -{ - int qidx; - unsigned int ndesc, pidx, credits, gen, compl; - const struct port_info *pi = netdev_priv(dev); - struct adapter *adap = pi->adapter; - struct netdev_queue *txq; - struct sge_qset *qs; - struct sge_txq *q; - - /* - * The chip min packet length is 9 octets but play safe and reject - * anything shorter than an Ethernet header. - */ - if (unlikely(skb->len < ETH_HLEN)) { - dev_kfree_skb(skb); - return NETDEV_TX_OK; - } - - qidx = skb_get_queue_mapping(skb); - qs = &pi->qs[qidx]; - q = &qs->txq[TXQ_ETH]; - txq = netdev_get_tx_queue(dev, qidx); - - reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); - - credits = q->size - q->in_use; - ndesc = calc_tx_descs(skb); - - if (unlikely(credits < ndesc)) { - t3_stop_tx_queue(txq, qs, q); - dev_err(&adap->pdev->dev, - "%s: Tx ring %u full while queue awake!\n", - dev->name, q->cntxt_id & 7); - return NETDEV_TX_BUSY; - } - - q->in_use += ndesc; - if (unlikely(credits - ndesc < q->stop_thres)) { - t3_stop_tx_queue(txq, qs, q); - - if (should_restart_tx(q) && - test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) { - q->restarts++; - netif_tx_start_queue(txq); - } - } - - gen = q->gen; - q->unacked += ndesc; - compl = (q->unacked & 8) << (S_WR_COMPL - 3); - q->unacked &= 7; - pidx = q->pidx; - q->pidx += ndesc; - if (q->pidx >= q->size) { - q->pidx -= q->size; - q->gen ^= 1; - } - - /* update port statistics */ - if (skb->ip_summed == CHECKSUM_COMPLETE) - qs->port_stats[SGE_PSTAT_TX_CSUM]++; - if (skb_shinfo(skb)->gso_size) - qs->port_stats[SGE_PSTAT_TSO]++; - if (vlan_tx_tag_present(skb)) - qs->port_stats[SGE_PSTAT_VLANINS]++; - - /* - * We do not use Tx completion interrupts to free DMAd Tx packets. - * This is good for performance but means that we rely on new Tx - * packets arriving to run the destructors of completed packets, - * which open up space in their sockets' send queues. Sometimes - * we do not get such new packets causing Tx to stall. A single - * UDP transmitter is a good example of this situation. We have - * a clean up timer that periodically reclaims completed packets - * but it doesn't run often enough (nor do we want it to) to prevent - * lengthy stalls. A solution to this problem is to run the - * destructor early, after the packet is queued but before it's DMAd. - * A cons is that we lie to socket memory accounting, but the amount - * of extra memory is reasonable (limited by the number of Tx - * descriptors), the packets do actually get freed quickly by new - * packets almost always, and for protocols like TCP that wait for - * acks to really free up the data the extra memory is even less. - * On the positive side we run the destructors on the sending CPU - * rather than on a potentially different completing CPU, usually a - * good thing. We also run them without holding our Tx queue lock, - * unlike what reclaim_completed_tx() would otherwise do. - * - * Run the destructor before telling the DMA engine about the packet - * to make sure it doesn't complete and get freed prematurely. - */ - if (likely(!skb_shared(skb))) - skb_orphan(skb); - - write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl); - check_ring_tx_db(adap, q); - return NETDEV_TX_OK; -} - -/** - * write_imm - write a packet into a Tx descriptor as immediate data - * @d: the Tx descriptor to write - * @skb: the packet - * @len: the length of packet data to write as immediate data - * @gen: the generation bit value to write - * - * Writes a packet as immediate data into a Tx descriptor. The packet - * contains a work request at its beginning. We must write the packet - * carefully so the SGE doesn't read it accidentally before it's written - * in its entirety. - */ -static inline void write_imm(struct tx_desc *d, struct sk_buff *skb, - unsigned int len, unsigned int gen) -{ - struct work_request_hdr *from = (struct work_request_hdr *)skb->data; - struct work_request_hdr *to = (struct work_request_hdr *)d; - - if (likely(!skb->data_len)) - memcpy(&to[1], &from[1], len - sizeof(*from)); - else - skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from)); - - to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP | - V_WR_BCNTLFLT(len & 7)); - wmb(); - to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) | - V_WR_LEN((len + 7) / 8)); - wr_gen2(d, gen); - kfree_skb(skb); -} - -/** - * check_desc_avail - check descriptor availability on a send queue - * @adap: the adapter - * @q: the send queue - * @skb: the packet needing the descriptors - * @ndesc: the number of Tx descriptors needed - * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL) - * - * Checks if the requested number of Tx descriptors is available on an - * SGE send queue. If the queue is already suspended or not enough - * descriptors are available the packet is queued for later transmission. - * Must be called with the Tx queue locked. - * - * Returns 0 if enough descriptors are available, 1 if there aren't - * enough descriptors and the packet has been queued, and 2 if the caller - * needs to retry because there weren't enough descriptors at the - * beginning of the call but some freed up in the mean time. - */ -static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q, - struct sk_buff *skb, unsigned int ndesc, - unsigned int qid) -{ - if (unlikely(!skb_queue_empty(&q->sendq))) { - addq_exit:__skb_queue_tail(&q->sendq, skb); - return 1; - } - if (unlikely(q->size - q->in_use < ndesc)) { - struct sge_qset *qs = txq_to_qset(q, qid); - - set_bit(qid, &qs->txq_stopped); - smp_mb__after_clear_bit(); - - if (should_restart_tx(q) && - test_and_clear_bit(qid, &qs->txq_stopped)) - return 2; - - q->stops++; - goto addq_exit; - } - return 0; -} - -/** - * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs - * @q: the SGE control Tx queue - * - * This is a variant of reclaim_completed_tx() that is used for Tx queues - * that send only immediate data (presently just the control queues) and - * thus do not have any sk_buffs to release. - */ -static inline void reclaim_completed_tx_imm(struct sge_txq *q) -{ - unsigned int reclaim = q->processed - q->cleaned; - - q->in_use -= reclaim; - q->cleaned += reclaim; -} - -static inline int immediate(const struct sk_buff *skb) -{ - return skb->len <= WR_LEN; -} - -/** - * ctrl_xmit - send a packet through an SGE control Tx queue - * @adap: the adapter - * @q: the control queue - * @skb: the packet - * - * Send a packet through an SGE control Tx queue. Packets sent through - * a control queue must fit entirely as immediate data in a single Tx - * descriptor and have no page fragments. - */ -static int ctrl_xmit(struct adapter *adap, struct sge_txq *q, - struct sk_buff *skb) -{ - int ret; - struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data; - - if (unlikely(!immediate(skb))) { - WARN_ON(1); - dev_kfree_skb(skb); - return NET_XMIT_SUCCESS; - } - - wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP); - wrp->wr_lo = htonl(V_WR_TID(q->token)); - - spin_lock(&q->lock); - again:reclaim_completed_tx_imm(q); - - ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL); - if (unlikely(ret)) { - if (ret == 1) { - spin_unlock(&q->lock); - return NET_XMIT_CN; - } - goto again; - } - - write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); - - q->in_use++; - if (++q->pidx >= q->size) { - q->pidx = 0; - q->gen ^= 1; - } - spin_unlock(&q->lock); - wmb(); - t3_write_reg(adap, A_SG_KDOORBELL, - F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); - return NET_XMIT_SUCCESS; -} - -/** - * restart_ctrlq - restart a suspended control queue - * @qs: the queue set cotaining the control queue - * - * Resumes transmission on a suspended Tx control queue. - */ -static void restart_ctrlq(unsigned long data) -{ - struct sk_buff *skb; - struct sge_qset *qs = (struct sge_qset *)data; - struct sge_txq *q = &qs->txq[TXQ_CTRL]; - - spin_lock(&q->lock); - again:reclaim_completed_tx_imm(q); - - while (q->in_use < q->size && - (skb = __skb_dequeue(&q->sendq)) != NULL) { - - write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); - - if (++q->pidx >= q->size) { - q->pidx = 0; - q->gen ^= 1; - } - q->in_use++; - } - - if (!skb_queue_empty(&q->sendq)) { - set_bit(TXQ_CTRL, &qs->txq_stopped); - smp_mb__after_clear_bit(); - - if (should_restart_tx(q) && - test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) - goto again; - q->stops++; - } - - spin_unlock(&q->lock); - wmb(); - t3_write_reg(qs->adap, A_SG_KDOORBELL, - F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); -} - -/* - * Send a management message through control queue 0 - */ -int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb) -{ - int ret; - local_bh_disable(); - ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb); - local_bh_enable(); - - return ret; -} - -/** - * deferred_unmap_destructor - unmap a packet when it is freed - * @skb: the packet - * - * This is the packet destructor used for Tx packets that need to remain - * mapped until they are freed rather than until their Tx descriptors are - * freed. - */ -static void deferred_unmap_destructor(struct sk_buff *skb) -{ - int i; - const dma_addr_t *p; - const struct skb_shared_info *si; - const struct deferred_unmap_info *dui; - - dui = (struct deferred_unmap_info *)skb->head; - p = dui->addr; - - if (skb->tail - skb->transport_header) - pci_unmap_single(dui->pdev, *p++, - skb->tail - skb->transport_header, - PCI_DMA_TODEVICE); - - si = skb_shinfo(skb); - for (i = 0; i < si->nr_frags; i++) - pci_unmap_page(dui->pdev, *p++, si->frags[i].size, - PCI_DMA_TODEVICE); -} - -static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev, - const struct sg_ent *sgl, int sgl_flits) -{ - dma_addr_t *p; - struct deferred_unmap_info *dui; - - dui = (struct deferred_unmap_info *)skb->head; - dui->pdev = pdev; - for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) { - *p++ = be64_to_cpu(sgl->addr[0]); - *p++ = be64_to_cpu(sgl->addr[1]); - } - if (sgl_flits) - *p = be64_to_cpu(sgl->addr[0]); -} - -/** - * write_ofld_wr - write an offload work request - * @adap: the adapter - * @skb: the packet to send - * @q: the Tx queue - * @pidx: index of the first Tx descriptor to write - * @gen: the generation value to use - * @ndesc: number of descriptors the packet will occupy - * - * Write an offload work request to send the supplied packet. The packet - * data already carry the work request with most fields populated. - */ -static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, - struct sge_txq *q, unsigned int pidx, - unsigned int gen, unsigned int ndesc) -{ - unsigned int sgl_flits, flits; - struct work_request_hdr *from; - struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; - struct tx_desc *d = &q->desc[pidx]; - - if (immediate(skb)) { - q->sdesc[pidx].skb = NULL; - write_imm(d, skb, skb->len, gen); - return; - } - - /* Only TX_DATA builds SGLs */ - - from = (struct work_request_hdr *)skb->data; - memcpy(&d->flit[1], &from[1], - skb_transport_offset(skb) - sizeof(*from)); - - flits = skb_transport_offset(skb) / 8; - sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; - sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb), - skb->tail - skb->transport_header, - adap->pdev); - if (need_skb_unmap()) { - setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); - skb->destructor = deferred_unmap_destructor; - } - - write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, - gen, from->wr_hi, from->wr_lo); -} - -/** - * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet - * @skb: the packet - * - * Returns the number of Tx descriptors needed for the given offload - * packet. These packets are already fully constructed. - */ -static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb) -{ - unsigned int flits, cnt; - - if (skb->len <= WR_LEN) - return 1; /* packet fits as immediate data */ - - flits = skb_transport_offset(skb) / 8; /* headers */ - cnt = skb_shinfo(skb)->nr_frags; - if (skb->tail != skb->transport_header) - cnt++; - return flits_to_desc(flits + sgl_len(cnt)); -} - -/** - * ofld_xmit - send a packet through an offload queue - * @adap: the adapter - * @q: the Tx offload queue - * @skb: the packet - * - * Send an offload packet through an SGE offload queue. - */ -static int ofld_xmit(struct adapter *adap, struct sge_txq *q, - struct sk_buff *skb) -{ - int ret; - unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen; - - spin_lock(&q->lock); -again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); - - ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD); - if (unlikely(ret)) { - if (ret == 1) { - skb->priority = ndesc; /* save for restart */ - spin_unlock(&q->lock); - return NET_XMIT_CN; - } - goto again; - } - - gen = q->gen; - q->in_use += ndesc; - pidx = q->pidx; - q->pidx += ndesc; - if (q->pidx >= q->size) { - q->pidx -= q->size; - q->gen ^= 1; - } - spin_unlock(&q->lock); - - write_ofld_wr(adap, skb, q, pidx, gen, ndesc); - check_ring_tx_db(adap, q); - return NET_XMIT_SUCCESS; -} - -/** - * restart_offloadq - restart a suspended offload queue - * @qs: the queue set cotaining the offload queue - * - * Resumes transmission on a suspended Tx offload queue. - */ -static void restart_offloadq(unsigned long data) -{ - struct sk_buff *skb; - struct sge_qset *qs = (struct sge_qset *)data; - struct sge_txq *q = &qs->txq[TXQ_OFLD]; - const struct port_info *pi = netdev_priv(qs->netdev); - struct adapter *adap = pi->adapter; - - spin_lock(&q->lock); -again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); - - while ((skb = skb_peek(&q->sendq)) != NULL) { - unsigned int gen, pidx; - unsigned int ndesc = skb->priority; - - if (unlikely(q->size - q->in_use < ndesc)) { - set_bit(TXQ_OFLD, &qs->txq_stopped); - smp_mb__after_clear_bit(); - - if (should_restart_tx(q) && - test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) - goto again; - q->stops++; - break; - } - - gen = q->gen; - q->in_use += ndesc; - pidx = q->pidx; - q->pidx += ndesc; - if (q->pidx >= q->size) { - q->pidx -= q->size; - q->gen ^= 1; - } - __skb_unlink(skb, &q->sendq); - spin_unlock(&q->lock); - - write_ofld_wr(adap, skb, q, pidx, gen, ndesc); - spin_lock(&q->lock); - } - spin_unlock(&q->lock); - -#if USE_GTS - set_bit(TXQ_RUNNING, &q->flags); - set_bit(TXQ_LAST_PKT_DB, &q->flags); -#endif - wmb(); - t3_write_reg(adap, A_SG_KDOORBELL, - F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); -} - -/** - * queue_set - return the queue set a packet should use - * @skb: the packet - * - * Maps a packet to the SGE queue set it should use. The desired queue - * set is carried in bits 1-3 in the packet's priority. - */ -static inline int queue_set(const struct sk_buff *skb) -{ - return skb->priority >> 1; -} - -/** - * is_ctrl_pkt - return whether an offload packet is a control packet - * @skb: the packet - * - * Determines whether an offload packet should use an OFLD or a CTRL - * Tx queue. This is indicated by bit 0 in the packet's priority. - */ -static inline int is_ctrl_pkt(const struct sk_buff *skb) -{ - return skb->priority & 1; -} - -/** - * t3_offload_tx - send an offload packet - * @tdev: the offload device to send to - * @skb: the packet - * - * Sends an offload packet. We use the packet priority to select the - * appropriate Tx queue as follows: bit 0 indicates whether the packet - * should be sent as regular or control, bits 1-3 select the queue set. - */ -int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb) -{ - struct adapter *adap = tdev2adap(tdev); - struct sge_qset *qs = &adap->sge.qs[queue_set(skb)]; - - if (unlikely(is_ctrl_pkt(skb))) - return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb); - - return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb); -} - -/** - * offload_enqueue - add an offload packet to an SGE offload receive queue - * @q: the SGE response queue - * @skb: the packet - * - * Add a new offload packet to an SGE response queue's offload packet - * queue. If the packet is the first on the queue it schedules the RX - * softirq to process the queue. - */ -static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb) -{ - int was_empty = skb_queue_empty(&q->rx_queue); - - __skb_queue_tail(&q->rx_queue, skb); - - if (was_empty) { - struct sge_qset *qs = rspq_to_qset(q); - - napi_schedule(&qs->napi); - } -} - -/** - * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts - * @tdev: the offload device that will be receiving the packets - * @q: the SGE response queue that assembled the bundle - * @skbs: the partial bundle - * @n: the number of packets in the bundle - * - * Delivers a (partial) bundle of Rx offload packets to an offload device. - */ -static inline void deliver_partial_bundle(struct t3cdev *tdev, - struct sge_rspq *q, - struct sk_buff *skbs[], int n) -{ - if (n) { - q->offload_bundles++; - tdev->recv(tdev, skbs, n); - } -} - -/** - * ofld_poll - NAPI handler for offload packets in interrupt mode - * @dev: the network device doing the polling - * @budget: polling budget - * - * The NAPI handler for offload packets when a response queue is serviced - * by the hard interrupt handler, i.e., when it's operating in non-polling - * mode. Creates small packet batches and sends them through the offload - * receive handler. Batches need to be of modest size as we do prefetches - * on the packets in each. - */ -static int ofld_poll(struct napi_struct *napi, int budget) -{ - struct sge_qset *qs = container_of(napi, struct sge_qset, napi); - struct sge_rspq *q = &qs->rspq; - struct adapter *adapter = qs->adap; - int work_done = 0; - - while (work_done < budget) { - struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE]; - struct sk_buff_head queue; - int ngathered; - - spin_lock_irq(&q->lock); - __skb_queue_head_init(&queue); - skb_queue_splice_init(&q->rx_queue, &queue); - if (skb_queue_empty(&queue)) { - napi_complete(napi); - spin_unlock_irq(&q->lock); - return work_done; - } - spin_unlock_irq(&q->lock); - - ngathered = 0; - skb_queue_walk_safe(&queue, skb, tmp) { - if (work_done >= budget) - break; - work_done++; - - __skb_unlink(skb, &queue); - prefetch(skb->data); - skbs[ngathered] = skb; - if (++ngathered == RX_BUNDLE_SIZE) { - q->offload_bundles++; - adapter->tdev.recv(&adapter->tdev, skbs, - ngathered); - ngathered = 0; - } - } - if (!skb_queue_empty(&queue)) { - /* splice remaining packets back onto Rx queue */ - spin_lock_irq(&q->lock); - skb_queue_splice(&queue, &q->rx_queue); - spin_unlock_irq(&q->lock); - } - deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered); - } - - return work_done; -} - -/** - * rx_offload - process a received offload packet - * @tdev: the offload device receiving the packet - * @rq: the response queue that received the packet - * @skb: the packet - * @rx_gather: a gather list of packets if we are building a bundle - * @gather_idx: index of the next available slot in the bundle - * - * Process an ingress offload pakcet and add it to the offload ingress - * queue. Returns the index of the next available slot in the bundle. - */ -static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq, - struct sk_buff *skb, struct sk_buff *rx_gather[], - unsigned int gather_idx) -{ - skb_reset_mac_header(skb); - skb_reset_network_header(skb); - skb_reset_transport_header(skb); - - if (rq->polling) { - rx_gather[gather_idx++] = skb; - if (gather_idx == RX_BUNDLE_SIZE) { - tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE); - gather_idx = 0; - rq->offload_bundles++; - } - } else - offload_enqueue(rq, skb); - - return gather_idx; -} - -/** - * restart_tx - check whether to restart suspended Tx queues - * @qs: the queue set to resume - * - * Restarts suspended Tx queues of an SGE queue set if they have enough - * free resources to resume operation. - */ -static void restart_tx(struct sge_qset *qs) -{ - if (test_bit(TXQ_ETH, &qs->txq_stopped) && - should_restart_tx(&qs->txq[TXQ_ETH]) && - test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) { - qs->txq[TXQ_ETH].restarts++; - if (netif_running(qs->netdev)) - netif_tx_wake_queue(qs->tx_q); - } - - if (test_bit(TXQ_OFLD, &qs->txq_stopped) && - should_restart_tx(&qs->txq[TXQ_OFLD]) && - test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) { - qs->txq[TXQ_OFLD].restarts++; - tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk); - } - if (test_bit(TXQ_CTRL, &qs->txq_stopped) && - should_restart_tx(&qs->txq[TXQ_CTRL]) && - test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) { - qs->txq[TXQ_CTRL].restarts++; - tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk); - } -} - -/** - * cxgb3_arp_process - process an ARP request probing a private IP address - * @adapter: the adapter - * @skb: the skbuff containing the ARP request - * - * Check if the ARP request is probing the private IP address - * dedicated to iSCSI, generate an ARP reply if so. - */ -static void cxgb3_arp_process(struct port_info *pi, struct sk_buff *skb) -{ - struct net_device *dev = skb->dev; - struct arphdr *arp; - unsigned char *arp_ptr; - unsigned char *sha; - __be32 sip, tip; - - if (!dev) - return; - - skb_reset_network_header(skb); - arp = arp_hdr(skb); - - if (arp->ar_op != htons(ARPOP_REQUEST)) - return; - - arp_ptr = (unsigned char *)(arp + 1); - sha = arp_ptr; - arp_ptr += dev->addr_len; - memcpy(&sip, arp_ptr, sizeof(sip)); - arp_ptr += sizeof(sip); - arp_ptr += dev->addr_len; - memcpy(&tip, arp_ptr, sizeof(tip)); - - if (tip != pi->iscsi_ipv4addr) - return; - - arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, - pi->iscsic.mac_addr, sha); - -} - -static inline int is_arp(struct sk_buff *skb) -{ - return skb->protocol == htons(ETH_P_ARP); -} - -static void cxgb3_process_iscsi_prov_pack(struct port_info *pi, - struct sk_buff *skb) -{ - if (is_arp(skb)) { - cxgb3_arp_process(pi, skb); - return; - } - - if (pi->iscsic.recv) - pi->iscsic.recv(pi, skb); - -} - -/** - * rx_eth - process an ingress ethernet packet - * @adap: the adapter - * @rq: the response queue that received the packet - * @skb: the packet - * @pad: amount of padding at the start of the buffer - * - * Process an ingress ethernet pakcet and deliver it to the stack. - * The padding is 2 if the packet was delivered in an Rx buffer and 0 - * if it was immediate data in a response. - */ -static void rx_eth(struct adapter *adap, struct sge_rspq *rq, - struct sk_buff *skb, int pad, int lro) -{ - struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad); - struct sge_qset *qs = rspq_to_qset(rq); - struct port_info *pi; - - skb_pull(skb, sizeof(*p) + pad); - skb->protocol = eth_type_trans(skb, adap->port[p->iff]); - pi = netdev_priv(skb->dev); - if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid && - p->csum == htons(0xffff) && !p->fragment) { - qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; - skb->ip_summed = CHECKSUM_UNNECESSARY; - } else - skb_checksum_none_assert(skb); - skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]); - - if (p->vlan_valid) { - qs->port_stats[SGE_PSTAT_VLANEX]++; - __vlan_hwaccel_put_tag(skb, ntohs(p->vlan)); - } - if (rq->polling) { - if (lro) - napi_gro_receive(&qs->napi, skb); - else { - if (unlikely(pi->iscsic.flags)) - cxgb3_process_iscsi_prov_pack(pi, skb); - netif_receive_skb(skb); - } - } else - netif_rx(skb); -} - -static inline int is_eth_tcp(u32 rss) -{ - return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE; -} - -/** - * lro_add_page - add a page chunk to an LRO session - * @adap: the adapter - * @qs: the associated queue set - * @fl: the free list containing the page chunk to add - * @len: packet length - * @complete: Indicates the last fragment of a frame - * - * Add a received packet contained in a page chunk to an existing LRO - * session. - */ -static void lro_add_page(struct adapter *adap, struct sge_qset *qs, - struct sge_fl *fl, int len, int complete) -{ - struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; - struct port_info *pi = netdev_priv(qs->netdev); - struct sk_buff *skb = NULL; - struct cpl_rx_pkt *cpl; - struct skb_frag_struct *rx_frag; - int nr_frags; - int offset = 0; - - if (!qs->nomem) { - skb = napi_get_frags(&qs->napi); - qs->nomem = !skb; - } - - fl->credits--; - - pci_dma_sync_single_for_cpu(adap->pdev, - dma_unmap_addr(sd, dma_addr), - fl->buf_size - SGE_PG_RSVD, - PCI_DMA_FROMDEVICE); - - (*sd->pg_chunk.p_cnt)--; - if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page) - pci_unmap_page(adap->pdev, - sd->pg_chunk.mapping, - fl->alloc_size, - PCI_DMA_FROMDEVICE); - - if (!skb) { - put_page(sd->pg_chunk.page); - if (complete) - qs->nomem = 0; - return; - } - - rx_frag = skb_shinfo(skb)->frags; - nr_frags = skb_shinfo(skb)->nr_frags; - - if (!nr_frags) { - offset = 2 + sizeof(struct cpl_rx_pkt); - cpl = qs->lro_va = sd->pg_chunk.va + 2; - - if ((qs->netdev->features & NETIF_F_RXCSUM) && - cpl->csum_valid && cpl->csum == htons(0xffff)) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; - } else - skb->ip_summed = CHECKSUM_NONE; - } else - cpl = qs->lro_va; - - len -= offset; - - rx_frag += nr_frags; - rx_frag->page = sd->pg_chunk.page; - rx_frag->page_offset = sd->pg_chunk.offset + offset; - rx_frag->size = len; - - skb->len += len; - skb->data_len += len; - skb->truesize += len; - skb_shinfo(skb)->nr_frags++; - - if (!complete) - return; - - skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]); - - if (cpl->vlan_valid) - __vlan_hwaccel_put_tag(skb, ntohs(cpl->vlan)); - napi_gro_frags(&qs->napi); -} - -/** - * handle_rsp_cntrl_info - handles control information in a response - * @qs: the queue set corresponding to the response - * @flags: the response control flags - * - * Handles the control information of an SGE response, such as GTS - * indications and completion credits for the queue set's Tx queues. - * HW coalesces credits, we don't do any extra SW coalescing. - */ -static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags) -{ - unsigned int credits; - -#if USE_GTS - if (flags & F_RSPD_TXQ0_GTS) - clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags); -#endif - - credits = G_RSPD_TXQ0_CR(flags); - if (credits) - qs->txq[TXQ_ETH].processed += credits; - - credits = G_RSPD_TXQ2_CR(flags); - if (credits) - qs->txq[TXQ_CTRL].processed += credits; - -# if USE_GTS - if (flags & F_RSPD_TXQ1_GTS) - clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags); -# endif - credits = G_RSPD_TXQ1_CR(flags); - if (credits) - qs->txq[TXQ_OFLD].processed += credits; -} - -/** - * check_ring_db - check if we need to ring any doorbells - * @adapter: the adapter - * @qs: the queue set whose Tx queues are to be examined - * @sleeping: indicates which Tx queue sent GTS - * - * Checks if some of a queue set's Tx queues need to ring their doorbells - * to resume transmission after idling while they still have unprocessed - * descriptors. - */ -static void check_ring_db(struct adapter *adap, struct sge_qset *qs, - unsigned int sleeping) -{ - if (sleeping & F_RSPD_TXQ0_GTS) { - struct sge_txq *txq = &qs->txq[TXQ_ETH]; - - if (txq->cleaned + txq->in_use != txq->processed && - !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) { - set_bit(TXQ_RUNNING, &txq->flags); - t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | - V_EGRCNTX(txq->cntxt_id)); - } - } - - if (sleeping & F_RSPD_TXQ1_GTS) { - struct sge_txq *txq = &qs->txq[TXQ_OFLD]; - - if (txq->cleaned + txq->in_use != txq->processed && - !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) { - set_bit(TXQ_RUNNING, &txq->flags); - t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | - V_EGRCNTX(txq->cntxt_id)); - } - } -} - -/** - * is_new_response - check if a response is newly written - * @r: the response descriptor - * @q: the response queue - * - * Returns true if a response descriptor contains a yet unprocessed - * response. - */ -static inline int is_new_response(const struct rsp_desc *r, - const struct sge_rspq *q) -{ - return (r->intr_gen & F_RSPD_GEN2) == q->gen; -} - -static inline void clear_rspq_bufstate(struct sge_rspq * const q) -{ - q->pg_skb = NULL; - q->rx_recycle_buf = 0; -} - -#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS) -#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \ - V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \ - V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \ - V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR)) - -/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */ -#define NOMEM_INTR_DELAY 2500 - -/** - * process_responses - process responses from an SGE response queue - * @adap: the adapter - * @qs: the queue set to which the response queue belongs - * @budget: how many responses can be processed in this round - * - * Process responses from an SGE response queue up to the supplied budget. - * Responses include received packets as well as credits and other events - * for the queues that belong to the response queue's queue set. - * A negative budget is effectively unlimited. - * - * Additionally choose the interrupt holdoff time for the next interrupt - * on this queue. If the system is under memory shortage use a fairly - * long delay to help recovery. - */ -static int process_responses(struct adapter *adap, struct sge_qset *qs, - int budget) -{ - struct sge_rspq *q = &qs->rspq; - struct rsp_desc *r = &q->desc[q->cidx]; - int budget_left = budget; - unsigned int sleeping = 0; - struct sk_buff *offload_skbs[RX_BUNDLE_SIZE]; - int ngathered = 0; - - q->next_holdoff = q->holdoff_tmr; - - while (likely(budget_left && is_new_response(r, q))) { - int packet_complete, eth, ethpad = 2; - int lro = !!(qs->netdev->features & NETIF_F_GRO); - struct sk_buff *skb = NULL; - u32 len, flags; - __be32 rss_hi, rss_lo; - - rmb(); - eth = r->rss_hdr.opcode == CPL_RX_PKT; - rss_hi = *(const __be32 *)r; - rss_lo = r->rss_hdr.rss_hash_val; - flags = ntohl(r->flags); - - if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) { - skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC); - if (!skb) - goto no_mem; - - memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE); - skb->data[0] = CPL_ASYNC_NOTIF; - rss_hi = htonl(CPL_ASYNC_NOTIF << 24); - q->async_notif++; - } else if (flags & F_RSPD_IMM_DATA_VALID) { - skb = get_imm_packet(r); - if (unlikely(!skb)) { -no_mem: - q->next_holdoff = NOMEM_INTR_DELAY; - q->nomem++; - /* consume one credit since we tried */ - budget_left--; - break; - } - q->imm_data++; - ethpad = 0; - } else if ((len = ntohl(r->len_cq)) != 0) { - struct sge_fl *fl; - - lro &= eth && is_eth_tcp(rss_hi); - - fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; - if (fl->use_pages) { - void *addr = fl->sdesc[fl->cidx].pg_chunk.va; - - prefetch(addr); -#if L1_CACHE_BYTES < 128 - prefetch(addr + L1_CACHE_BYTES); -#endif - __refill_fl(adap, fl); - if (lro > 0) { - lro_add_page(adap, qs, fl, - G_RSPD_LEN(len), - flags & F_RSPD_EOP); - goto next_fl; - } - - skb = get_packet_pg(adap, fl, q, - G_RSPD_LEN(len), - eth ? - SGE_RX_DROP_THRES : 0); - q->pg_skb = skb; - } else - skb = get_packet(adap, fl, G_RSPD_LEN(len), - eth ? SGE_RX_DROP_THRES : 0); - if (unlikely(!skb)) { - if (!eth) - goto no_mem; - q->rx_drops++; - } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT)) - __skb_pull(skb, 2); -next_fl: - if (++fl->cidx == fl->size) - fl->cidx = 0; - } else - q->pure_rsps++; - - if (flags & RSPD_CTRL_MASK) { - sleeping |= flags & RSPD_GTS_MASK; - handle_rsp_cntrl_info(qs, flags); - } - - r++; - if (unlikely(++q->cidx == q->size)) { - q->cidx = 0; - q->gen ^= 1; - r = q->desc; - } - prefetch(r); - - if (++q->credits >= (q->size / 4)) { - refill_rspq(adap, q, q->credits); - q->credits = 0; - } - - packet_complete = flags & - (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID | - F_RSPD_ASYNC_NOTIF); - - if (skb != NULL && packet_complete) { - if (eth) - rx_eth(adap, q, skb, ethpad, lro); - else { - q->offload_pkts++; - /* Preserve the RSS info in csum & priority */ - skb->csum = rss_hi; - skb->priority = rss_lo; - ngathered = rx_offload(&adap->tdev, q, skb, - offload_skbs, - ngathered); - } - - if (flags & F_RSPD_EOP) - clear_rspq_bufstate(q); - } - --budget_left; - } - - deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); - - if (sleeping) - check_ring_db(adap, qs, sleeping); - - smp_mb(); /* commit Tx queue .processed updates */ - if (unlikely(qs->txq_stopped != 0)) - restart_tx(qs); - - budget -= budget_left; - return budget; -} - -static inline int is_pure_response(const struct rsp_desc *r) -{ - __be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID); - - return (n | r->len_cq) == 0; -} - -/** - * napi_rx_handler - the NAPI handler for Rx processing - * @napi: the napi instance - * @budget: how many packets we can process in this round - * - * Handler for new data events when using NAPI. - */ -static int napi_rx_handler(struct napi_struct *napi, int budget) -{ - struct sge_qset *qs = container_of(napi, struct sge_qset, napi); - struct adapter *adap = qs->adap; - int work_done = process_responses(adap, qs, budget); - - if (likely(work_done < budget)) { - napi_complete(napi); - - /* - * Because we don't atomically flush the following - * write it is possible that in very rare cases it can - * reach the device in a way that races with a new - * response being written plus an error interrupt - * causing the NAPI interrupt handler below to return - * unhandled status to the OS. To protect against - * this would require flushing the write and doing - * both the write and the flush with interrupts off. - * Way too expensive and unjustifiable given the - * rarity of the race. - * - * The race cannot happen at all with MSI-X. - */ - t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) | - V_NEWTIMER(qs->rspq.next_holdoff) | - V_NEWINDEX(qs->rspq.cidx)); - } - return work_done; -} - -/* - * Returns true if the device is already scheduled for polling. - */ -static inline int napi_is_scheduled(struct napi_struct *napi) -{ - return test_bit(NAPI_STATE_SCHED, &napi->state); -} - -/** - * process_pure_responses - process pure responses from a response queue - * @adap: the adapter - * @qs: the queue set owning the response queue - * @r: the first pure response to process - * - * A simpler version of process_responses() that handles only pure (i.e., - * non data-carrying) responses. Such respones are too light-weight to - * justify calling a softirq under NAPI, so we handle them specially in - * the interrupt handler. The function is called with a pointer to a - * response, which the caller must ensure is a valid pure response. - * - * Returns 1 if it encounters a valid data-carrying response, 0 otherwise. - */ -static int process_pure_responses(struct adapter *adap, struct sge_qset *qs, - struct rsp_desc *r) -{ - struct sge_rspq *q = &qs->rspq; - unsigned int sleeping = 0; - - do { - u32 flags = ntohl(r->flags); - - r++; - if (unlikely(++q->cidx == q->size)) { - q->cidx = 0; - q->gen ^= 1; - r = q->desc; - } - prefetch(r); - - if (flags & RSPD_CTRL_MASK) { - sleeping |= flags & RSPD_GTS_MASK; - handle_rsp_cntrl_info(qs, flags); - } - - q->pure_rsps++; - if (++q->credits >= (q->size / 4)) { - refill_rspq(adap, q, q->credits); - q->credits = 0; - } - if (!is_new_response(r, q)) - break; - rmb(); - } while (is_pure_response(r)); - - if (sleeping) - check_ring_db(adap, qs, sleeping); - - smp_mb(); /* commit Tx queue .processed updates */ - if (unlikely(qs->txq_stopped != 0)) - restart_tx(qs); - - return is_new_response(r, q); -} - -/** - * handle_responses - decide what to do with new responses in NAPI mode - * @adap: the adapter - * @q: the response queue - * - * This is used by the NAPI interrupt handlers to decide what to do with - * new SGE responses. If there are no new responses it returns -1. If - * there are new responses and they are pure (i.e., non-data carrying) - * it handles them straight in hard interrupt context as they are very - * cheap and don't deliver any packets. Finally, if there are any data - * signaling responses it schedules the NAPI handler. Returns 1 if it - * schedules NAPI, 0 if all new responses were pure. - * - * The caller must ascertain NAPI is not already running. - */ -static inline int handle_responses(struct adapter *adap, struct sge_rspq *q) -{ - struct sge_qset *qs = rspq_to_qset(q); - struct rsp_desc *r = &q->desc[q->cidx]; - - if (!is_new_response(r, q)) - return -1; - rmb(); - if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) { - t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | - V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx)); - return 0; - } - napi_schedule(&qs->napi); - return 1; -} - -/* - * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case - * (i.e., response queue serviced in hard interrupt). - */ -static irqreturn_t t3_sge_intr_msix(int irq, void *cookie) -{ - struct sge_qset *qs = cookie; - struct adapter *adap = qs->adap; - struct sge_rspq *q = &qs->rspq; - - spin_lock(&q->lock); - if (process_responses(adap, qs, -1) == 0) - q->unhandled_irqs++; - t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | - V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); - spin_unlock(&q->lock); - return IRQ_HANDLED; -} - -/* - * The MSI-X interrupt handler for an SGE response queue for the NAPI case - * (i.e., response queue serviced by NAPI polling). - */ -static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie) -{ - struct sge_qset *qs = cookie; - struct sge_rspq *q = &qs->rspq; - - spin_lock(&q->lock); - - if (handle_responses(qs->adap, q) < 0) - q->unhandled_irqs++; - spin_unlock(&q->lock); - return IRQ_HANDLED; -} - -/* - * The non-NAPI MSI interrupt handler. This needs to handle data events from - * SGE response queues as well as error and other async events as they all use - * the same MSI vector. We use one SGE response queue per port in this mode - * and protect all response queues with queue 0's lock. - */ -static irqreturn_t t3_intr_msi(int irq, void *cookie) -{ - int new_packets = 0; - struct adapter *adap = cookie; - struct sge_rspq *q = &adap->sge.qs[0].rspq; - - spin_lock(&q->lock); - - if (process_responses(adap, &adap->sge.qs[0], -1)) { - t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | - V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); - new_packets = 1; - } - - if (adap->params.nports == 2 && - process_responses(adap, &adap->sge.qs[1], -1)) { - struct sge_rspq *q1 = &adap->sge.qs[1].rspq; - - t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) | - V_NEWTIMER(q1->next_holdoff) | - V_NEWINDEX(q1->cidx)); - new_packets = 1; - } - - if (!new_packets && t3_slow_intr_handler(adap) == 0) - q->unhandled_irqs++; - - spin_unlock(&q->lock); - return IRQ_HANDLED; -} - -static int rspq_check_napi(struct sge_qset *qs) -{ - struct sge_rspq *q = &qs->rspq; - - if (!napi_is_scheduled(&qs->napi) && - is_new_response(&q->desc[q->cidx], q)) { - napi_schedule(&qs->napi); - return 1; - } - return 0; -} - -/* - * The MSI interrupt handler for the NAPI case (i.e., response queues serviced - * by NAPI polling). Handles data events from SGE response queues as well as - * error and other async events as they all use the same MSI vector. We use - * one SGE response queue per port in this mode and protect all response - * queues with queue 0's lock. - */ -static irqreturn_t t3_intr_msi_napi(int irq, void *cookie) -{ - int new_packets; - struct adapter *adap = cookie; - struct sge_rspq *q = &adap->sge.qs[0].rspq; - - spin_lock(&q->lock); - - new_packets = rspq_check_napi(&adap->sge.qs[0]); - if (adap->params.nports == 2) - new_packets += rspq_check_napi(&adap->sge.qs[1]); - if (!new_packets && t3_slow_intr_handler(adap) == 0) - q->unhandled_irqs++; - - spin_unlock(&q->lock); - return IRQ_HANDLED; -} - -/* - * A helper function that processes responses and issues GTS. - */ -static inline int process_responses_gts(struct adapter *adap, - struct sge_rspq *rq) -{ - int work; - - work = process_responses(adap, rspq_to_qset(rq), -1); - t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) | - V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx)); - return work; -} - -/* - * The legacy INTx interrupt handler. This needs to handle data events from - * SGE response queues as well as error and other async events as they all use - * the same interrupt pin. We use one SGE response queue per port in this mode - * and protect all response queues with queue 0's lock. - */ -static irqreturn_t t3_intr(int irq, void *cookie) -{ - int work_done, w0, w1; - struct adapter *adap = cookie; - struct sge_rspq *q0 = &adap->sge.qs[0].rspq; - struct sge_rspq *q1 = &adap->sge.qs[1].rspq; - - spin_lock(&q0->lock); - - w0 = is_new_response(&q0->desc[q0->cidx], q0); - w1 = adap->params.nports == 2 && - is_new_response(&q1->desc[q1->cidx], q1); - - if (likely(w0 | w1)) { - t3_write_reg(adap, A_PL_CLI, 0); - t3_read_reg(adap, A_PL_CLI); /* flush */ - - if (likely(w0)) - process_responses_gts(adap, q0); - - if (w1) - process_responses_gts(adap, q1); - - work_done = w0 | w1; - } else - work_done = t3_slow_intr_handler(adap); - - spin_unlock(&q0->lock); - return IRQ_RETVAL(work_done != 0); -} - -/* - * Interrupt handler for legacy INTx interrupts for T3B-based cards. - * Handles data events from SGE response queues as well as error and other - * async events as they all use the same interrupt pin. We use one SGE - * response queue per port in this mode and protect all response queues with - * queue 0's lock. - */ -static irqreturn_t t3b_intr(int irq, void *cookie) -{ - u32 map; - struct adapter *adap = cookie; - struct sge_rspq *q0 = &adap->sge.qs[0].rspq; - - t3_write_reg(adap, A_PL_CLI, 0); - map = t3_read_reg(adap, A_SG_DATA_INTR); - - if (unlikely(!map)) /* shared interrupt, most likely */ - return IRQ_NONE; - - spin_lock(&q0->lock); - - if (unlikely(map & F_ERRINTR)) - t3_slow_intr_handler(adap); - - if (likely(map & 1)) - process_responses_gts(adap, q0); - - if (map & 2) - process_responses_gts(adap, &adap->sge.qs[1].rspq); - - spin_unlock(&q0->lock); - return IRQ_HANDLED; -} - -/* - * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards. - * Handles data events from SGE response queues as well as error and other - * async events as they all use the same interrupt pin. We use one SGE - * response queue per port in this mode and protect all response queues with - * queue 0's lock. - */ -static irqreturn_t t3b_intr_napi(int irq, void *cookie) -{ - u32 map; - struct adapter *adap = cookie; - struct sge_qset *qs0 = &adap->sge.qs[0]; - struct sge_rspq *q0 = &qs0->rspq; - - t3_write_reg(adap, A_PL_CLI, 0); - map = t3_read_reg(adap, A_SG_DATA_INTR); - - if (unlikely(!map)) /* shared interrupt, most likely */ - return IRQ_NONE; - - spin_lock(&q0->lock); - - if (unlikely(map & F_ERRINTR)) - t3_slow_intr_handler(adap); - - if (likely(map & 1)) - napi_schedule(&qs0->napi); - - if (map & 2) - napi_schedule(&adap->sge.qs[1].napi); - - spin_unlock(&q0->lock); - return IRQ_HANDLED; -} - -/** - * t3_intr_handler - select the top-level interrupt handler - * @adap: the adapter - * @polling: whether using NAPI to service response queues - * - * Selects the top-level interrupt handler based on the type of interrupts - * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the - * response queues. - */ -irq_handler_t t3_intr_handler(struct adapter *adap, int polling) -{ - if (adap->flags & USING_MSIX) - return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix; - if (adap->flags & USING_MSI) - return polling ? t3_intr_msi_napi : t3_intr_msi; - if (adap->params.rev > 0) - return polling ? t3b_intr_napi : t3b_intr; - return t3_intr; -} - -#define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \ - F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \ - V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \ - F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \ - F_HIRCQPARITYERROR) -#define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR) -#define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \ - F_RSPQDISABLED) - -/** - * t3_sge_err_intr_handler - SGE async event interrupt handler - * @adapter: the adapter - * - * Interrupt handler for SGE asynchronous (non-data) events. - */ -void t3_sge_err_intr_handler(struct adapter *adapter) -{ - unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) & - ~F_FLEMPTY; - - if (status & SGE_PARERR) - CH_ALERT(adapter, "SGE parity error (0x%x)\n", - status & SGE_PARERR); - if (status & SGE_FRAMINGERR) - CH_ALERT(adapter, "SGE framing error (0x%x)\n", - status & SGE_FRAMINGERR); - - if (status & F_RSPQCREDITOVERFOW) - CH_ALERT(adapter, "SGE response queue credit overflow\n"); - - if (status & F_RSPQDISABLED) { - v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS); - - CH_ALERT(adapter, - "packet delivered to disabled response queue " - "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff); - } - - if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR)) - queue_work(cxgb3_wq, &adapter->db_drop_task); - - if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL)) - queue_work(cxgb3_wq, &adapter->db_full_task); - - if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY)) - queue_work(cxgb3_wq, &adapter->db_empty_task); - - t3_write_reg(adapter, A_SG_INT_CAUSE, status); - if (status & SGE_FATALERR) - t3_fatal_err(adapter); -} - -/** - * sge_timer_tx - perform periodic maintenance of an SGE qset - * @data: the SGE queue set to maintain - * - * Runs periodically from a timer to perform maintenance of an SGE queue - * set. It performs two tasks: - * - * Cleans up any completed Tx descriptors that may still be pending. - * Normal descriptor cleanup happens when new packets are added to a Tx - * queue so this timer is relatively infrequent and does any cleanup only - * if the Tx queue has not seen any new packets in a while. We make a - * best effort attempt to reclaim descriptors, in that we don't wait - * around if we cannot get a queue's lock (which most likely is because - * someone else is queueing new packets and so will also handle the clean - * up). Since control queues use immediate data exclusively we don't - * bother cleaning them up here. - * - */ -static void sge_timer_tx(unsigned long data) -{ - struct sge_qset *qs = (struct sge_qset *)data; - struct port_info *pi = netdev_priv(qs->netdev); - struct adapter *adap = pi->adapter; - unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0}; - unsigned long next_period; - - if (__netif_tx_trylock(qs->tx_q)) { - tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH], - TX_RECLAIM_TIMER_CHUNK); - __netif_tx_unlock(qs->tx_q); - } - - if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) { - tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD], - TX_RECLAIM_TIMER_CHUNK); - spin_unlock(&qs->txq[TXQ_OFLD].lock); - } - - next_period = TX_RECLAIM_PERIOD >> - (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) / - TX_RECLAIM_TIMER_CHUNK); - mod_timer(&qs->tx_reclaim_timer, jiffies + next_period); -} - -/* - * sge_timer_rx - perform periodic maintenance of an SGE qset - * @data: the SGE queue set to maintain - * - * a) Replenishes Rx queues that have run out due to memory shortage. - * Normally new Rx buffers are added when existing ones are consumed but - * when out of memory a queue can become empty. We try to add only a few - * buffers here, the queue will be replenished fully as these new buffers - * are used up if memory shortage has subsided. - * - * b) Return coalesced response queue credits in case a response queue is - * starved. - * - */ -static void sge_timer_rx(unsigned long data) -{ - spinlock_t *lock; - struct sge_qset *qs = (struct sge_qset *)data; - struct port_info *pi = netdev_priv(qs->netdev); - struct adapter *adap = pi->adapter; - u32 status; - - lock = adap->params.rev > 0 ? - &qs->rspq.lock : &adap->sge.qs[0].rspq.lock; - - if (!spin_trylock_irq(lock)) - goto out; - - if (napi_is_scheduled(&qs->napi)) - goto unlock; - - if (adap->params.rev < 4) { - status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS); - - if (status & (1 << qs->rspq.cntxt_id)) { - qs->rspq.starved++; - if (qs->rspq.credits) { - qs->rspq.credits--; - refill_rspq(adap, &qs->rspq, 1); - qs->rspq.restarted++; - t3_write_reg(adap, A_SG_RSPQ_FL_STATUS, - 1 << qs->rspq.cntxt_id); - } - } - } - - if (qs->fl[0].credits < qs->fl[0].size) - __refill_fl(adap, &qs->fl[0]); - if (qs->fl[1].credits < qs->fl[1].size) - __refill_fl(adap, &qs->fl[1]); - -unlock: - spin_unlock_irq(lock); -out: - mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD); -} - -/** - * t3_update_qset_coalesce - update coalescing settings for a queue set - * @qs: the SGE queue set - * @p: new queue set parameters - * - * Update the coalescing settings for an SGE queue set. Nothing is done - * if the queue set is not initialized yet. - */ -void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p) -{ - qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */ - qs->rspq.polling = p->polling; - qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll; -} - -/** - * t3_sge_alloc_qset - initialize an SGE queue set - * @adapter: the adapter - * @id: the queue set id - * @nports: how many Ethernet ports will be using this queue set - * @irq_vec_idx: the IRQ vector index for response queue interrupts - * @p: configuration parameters for this queue set - * @ntxq: number of Tx queues for the queue set - * @netdev: net device associated with this queue set - * @netdevq: net device TX queue associated with this queue set - * - * Allocate resources and initialize an SGE queue set. A queue set - * comprises a response queue, two Rx free-buffer queues, and up to 3 - * Tx queues. The Tx queues are assigned roles in the order Ethernet - * queue, offload queue, and control queue. - */ -int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, - int irq_vec_idx, const struct qset_params *p, - int ntxq, struct net_device *dev, - struct netdev_queue *netdevq) -{ - int i, avail, ret = -ENOMEM; - struct sge_qset *q = &adapter->sge.qs[id]; - - init_qset_cntxt(q, id); - setup_timer(&q->tx_reclaim_timer, sge_timer_tx, (unsigned long)q); - setup_timer(&q->rx_reclaim_timer, sge_timer_rx, (unsigned long)q); - - q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size, - sizeof(struct rx_desc), - sizeof(struct rx_sw_desc), - &q->fl[0].phys_addr, &q->fl[0].sdesc); - if (!q->fl[0].desc) - goto err; - - q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size, - sizeof(struct rx_desc), - sizeof(struct rx_sw_desc), - &q->fl[1].phys_addr, &q->fl[1].sdesc); - if (!q->fl[1].desc) - goto err; - - q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size, - sizeof(struct rsp_desc), 0, - &q->rspq.phys_addr, NULL); - if (!q->rspq.desc) - goto err; - - for (i = 0; i < ntxq; ++i) { - /* - * The control queue always uses immediate data so does not - * need to keep track of any sk_buffs. - */ - size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc); - - q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i], - sizeof(struct tx_desc), sz, - &q->txq[i].phys_addr, - &q->txq[i].sdesc); - if (!q->txq[i].desc) - goto err; - - q->txq[i].gen = 1; - q->txq[i].size = p->txq_size[i]; - spin_lock_init(&q->txq[i].lock); - skb_queue_head_init(&q->txq[i].sendq); - } - - tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq, - (unsigned long)q); - tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq, - (unsigned long)q); - - q->fl[0].gen = q->fl[1].gen = 1; - q->fl[0].size = p->fl_size; - q->fl[1].size = p->jumbo_size; - - q->rspq.gen = 1; - q->rspq.size = p->rspq_size; - spin_lock_init(&q->rspq.lock); - skb_queue_head_init(&q->rspq.rx_queue); - - q->txq[TXQ_ETH].stop_thres = nports * - flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3); - -#if FL0_PG_CHUNK_SIZE > 0 - q->fl[0].buf_size = FL0_PG_CHUNK_SIZE; -#else - q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data); -#endif -#if FL1_PG_CHUNK_SIZE > 0 - q->fl[1].buf_size = FL1_PG_CHUNK_SIZE; -#else - q->fl[1].buf_size = is_offload(adapter) ? - (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : - MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt); -#endif - - q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0; - q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0; - q->fl[0].order = FL0_PG_ORDER; - q->fl[1].order = FL1_PG_ORDER; - q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE; - q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE; - - spin_lock_irq(&adapter->sge.reg_lock); - - /* FL threshold comparison uses < */ - ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx, - q->rspq.phys_addr, q->rspq.size, - q->fl[0].buf_size - SGE_PG_RSVD, 1, 0); - if (ret) - goto err_unlock; - - for (i = 0; i < SGE_RXQ_PER_SET; ++i) { - ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0, - q->fl[i].phys_addr, q->fl[i].size, - q->fl[i].buf_size - SGE_PG_RSVD, - p->cong_thres, 1, 0); - if (ret) - goto err_unlock; - } - - ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS, - SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr, - q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token, - 1, 0); - if (ret) - goto err_unlock; - - if (ntxq > 1) { - ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id, - USE_GTS, SGE_CNTXT_OFLD, id, - q->txq[TXQ_OFLD].phys_addr, - q->txq[TXQ_OFLD].size, 0, 1, 0); - if (ret) - goto err_unlock; - } - - if (ntxq > 2) { - ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0, - SGE_CNTXT_CTRL, id, - q->txq[TXQ_CTRL].phys_addr, - q->txq[TXQ_CTRL].size, - q->txq[TXQ_CTRL].token, 1, 0); - if (ret) - goto err_unlock; - } - - spin_unlock_irq(&adapter->sge.reg_lock); - - q->adap = adapter; - q->netdev = dev; - q->tx_q = netdevq; - t3_update_qset_coalesce(q, p); - - avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, - GFP_KERNEL | __GFP_COMP); - if (!avail) { - CH_ALERT(adapter, "free list queue 0 initialization failed\n"); - goto err; - } - if (avail < q->fl[0].size) - CH_WARN(adapter, "free list queue 0 enabled with %d credits\n", - avail); - - avail = refill_fl(adapter, &q->fl[1], q->fl[1].size, - GFP_KERNEL | __GFP_COMP); - if (avail < q->fl[1].size) - CH_WARN(adapter, "free list queue 1 enabled with %d credits\n", - avail); - refill_rspq(adapter, &q->rspq, q->rspq.size - 1); - - t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) | - V_NEWTIMER(q->rspq.holdoff_tmr)); - - return 0; - -err_unlock: - spin_unlock_irq(&adapter->sge.reg_lock); -err: - t3_free_qset(adapter, q); - return ret; -} - -/** - * t3_start_sge_timers - start SGE timer call backs - * @adap: the adapter - * - * Starts each SGE queue set's timer call back - */ -void t3_start_sge_timers(struct adapter *adap) -{ - int i; - - for (i = 0; i < SGE_QSETS; ++i) { - struct sge_qset *q = &adap->sge.qs[i]; - - if (q->tx_reclaim_timer.function) - mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); - - if (q->rx_reclaim_timer.function) - mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD); - } -} - -/** - * t3_stop_sge_timers - stop SGE timer call backs - * @adap: the adapter - * - * Stops each SGE queue set's timer call back - */ -void t3_stop_sge_timers(struct adapter *adap) -{ - int i; - - for (i = 0; i < SGE_QSETS; ++i) { - struct sge_qset *q = &adap->sge.qs[i]; - - if (q->tx_reclaim_timer.function) - del_timer_sync(&q->tx_reclaim_timer); - if (q->rx_reclaim_timer.function) - del_timer_sync(&q->rx_reclaim_timer); - } -} - -/** - * t3_free_sge_resources - free SGE resources - * @adap: the adapter - * - * Frees resources used by the SGE queue sets. - */ -void t3_free_sge_resources(struct adapter *adap) -{ - int i; - - for (i = 0; i < SGE_QSETS; ++i) - t3_free_qset(adap, &adap->sge.qs[i]); -} - -/** - * t3_sge_start - enable SGE - * @adap: the adapter - * - * Enables the SGE for DMAs. This is the last step in starting packet - * transfers. - */ -void t3_sge_start(struct adapter *adap) -{ - t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE); -} - -/** - * t3_sge_stop - disable SGE operation - * @adap: the adapter - * - * Disables the DMA engine. This can be called in emeregencies (e.g., - * from error interrupts) or from normal process context. In the latter - * case it also disables any pending queue restart tasklets. Note that - * if it is called in interrupt context it cannot disable the restart - * tasklets as it cannot wait, however the tasklets will have no effect - * since the doorbells are disabled and the driver will call this again - * later from process context, at which time the tasklets will be stopped - * if they are still running. - */ -void t3_sge_stop(struct adapter *adap) -{ - t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0); - if (!in_interrupt()) { - int i; - - for (i = 0; i < SGE_QSETS; ++i) { - struct sge_qset *qs = &adap->sge.qs[i]; - - tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk); - tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk); - } - } -} - -/** - * t3_sge_init - initialize SGE - * @adap: the adapter - * @p: the SGE parameters - * - * Performs SGE initialization needed every time after a chip reset. - * We do not initialize any of the queue sets here, instead the driver - * top-level must request those individually. We also do not enable DMA - * here, that should be done after the queues have been set up. - */ -void t3_sge_init(struct adapter *adap, struct sge_params *p) -{ - unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12); - - ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL | - F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN | - V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS | - V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING; -#if SGE_NUM_GENBITS == 1 - ctrl |= F_EGRGENCTRL; -#endif - if (adap->params.rev > 0) { - if (!(adap->flags & (USING_MSIX | USING_MSI))) - ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ; - } - t3_write_reg(adap, A_SG_CONTROL, ctrl); - t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) | - V_LORCQDRBTHRSH(512)); - t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10); - t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) | - V_TIMEOUT(200 * core_ticks_per_usec(adap))); - t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, - adap->params.rev < T3_REV_C ? 1000 : 500); - t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256); - t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000); - t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256); - t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff)); - t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024); -} - -/** - * t3_sge_prep - one-time SGE initialization - * @adap: the associated adapter - * @p: SGE parameters - * - * Performs one-time initialization of SGE SW state. Includes determining - * defaults for the assorted SGE parameters, which admins can change until - * they are used to initialize the SGE. - */ -void t3_sge_prep(struct adapter *adap, struct sge_params *p) -{ - int i; - - p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) - - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - - for (i = 0; i < SGE_QSETS; ++i) { - struct qset_params *q = p->qset + i; - - q->polling = adap->params.rev > 0; - q->coalesce_usecs = 5; - q->rspq_size = 1024; - q->fl_size = 1024; - q->jumbo_size = 512; - q->txq_size[TXQ_ETH] = 1024; - q->txq_size[TXQ_OFLD] = 1024; - q->txq_size[TXQ_CTRL] = 256; - q->cong_thres = 0; - } - - spin_lock_init(&adap->sge.reg_lock); -} diff --git a/drivers/net/cxgb3/sge_defs.h b/drivers/net/cxgb3/sge_defs.h deleted file mode 100644 index 29b6c80..0000000 --- a/drivers/net/cxgb3/sge_defs.h +++ /dev/null @@ -1,255 +0,0 @@ -/* - * This file is automatically generated --- any changes will be lost. - */ - -#ifndef _SGE_DEFS_H -#define _SGE_DEFS_H - -#define S_EC_CREDITS 0 -#define M_EC_CREDITS 0x7FFF -#define V_EC_CREDITS(x) ((x) << S_EC_CREDITS) -#define G_EC_CREDITS(x) (((x) >> S_EC_CREDITS) & M_EC_CREDITS) - -#define S_EC_GTS 15 -#define V_EC_GTS(x) ((x) << S_EC_GTS) -#define F_EC_GTS V_EC_GTS(1U) - -#define S_EC_INDEX 16 -#define M_EC_INDEX 0xFFFF -#define V_EC_INDEX(x) ((x) << S_EC_INDEX) -#define G_EC_INDEX(x) (((x) >> S_EC_INDEX) & M_EC_INDEX) - -#define S_EC_SIZE 0 -#define M_EC_SIZE 0xFFFF -#define V_EC_SIZE(x) ((x) << S_EC_SIZE) -#define G_EC_SIZE(x) (((x) >> S_EC_SIZE) & M_EC_SIZE) - -#define S_EC_BASE_LO 16 -#define M_EC_BASE_LO 0xFFFF -#define V_EC_BASE_LO(x) ((x) << S_EC_BASE_LO) -#define G_EC_BASE_LO(x) (((x) >> S_EC_BASE_LO) & M_EC_BASE_LO) - -#define S_EC_BASE_HI 0 -#define M_EC_BASE_HI 0xF -#define V_EC_BASE_HI(x) ((x) << S_EC_BASE_HI) -#define G_EC_BASE_HI(x) (((x) >> S_EC_BASE_HI) & M_EC_BASE_HI) - -#define S_EC_RESPQ 4 -#define M_EC_RESPQ 0x7 -#define V_EC_RESPQ(x) ((x) << S_EC_RESPQ) -#define G_EC_RESPQ(x) (((x) >> S_EC_RESPQ) & M_EC_RESPQ) - -#define S_EC_TYPE 7 -#define M_EC_TYPE 0x7 -#define V_EC_TYPE(x) ((x) << S_EC_TYPE) -#define G_EC_TYPE(x) (((x) >> S_EC_TYPE) & M_EC_TYPE) - -#define S_EC_GEN 10 -#define V_EC_GEN(x) ((x) << S_EC_GEN) -#define F_EC_GEN V_EC_GEN(1U) - -#define S_EC_UP_TOKEN 11 -#define M_EC_UP_TOKEN 0xFFFFF -#define V_EC_UP_TOKEN(x) ((x) << S_EC_UP_TOKEN) -#define G_EC_UP_TOKEN(x) (((x) >> S_EC_UP_TOKEN) & M_EC_UP_TOKEN) - -#define S_EC_VALID 31 -#define V_EC_VALID(x) ((x) << S_EC_VALID) -#define F_EC_VALID V_EC_VALID(1U) - -#define S_RQ_MSI_VEC 20 -#define M_RQ_MSI_VEC 0x3F -#define V_RQ_MSI_VEC(x) ((x) << S_RQ_MSI_VEC) -#define G_RQ_MSI_VEC(x) (((x) >> S_RQ_MSI_VEC) & M_RQ_MSI_VEC) - -#define S_RQ_INTR_EN 26 -#define V_RQ_INTR_EN(x) ((x) << S_RQ_INTR_EN) -#define F_RQ_INTR_EN V_RQ_INTR_EN(1U) - -#define S_RQ_GEN 28 -#define V_RQ_GEN(x) ((x) << S_RQ_GEN) -#define F_RQ_GEN V_RQ_GEN(1U) - -#define S_CQ_INDEX 0 -#define M_CQ_INDEX 0xFFFF -#define V_CQ_INDEX(x) ((x) << S_CQ_INDEX) -#define G_CQ_INDEX(x) (((x) >> S_CQ_INDEX) & M_CQ_INDEX) - -#define S_CQ_SIZE 16 -#define M_CQ_SIZE 0xFFFF -#define V_CQ_SIZE(x) ((x) << S_CQ_SIZE) -#define G_CQ_SIZE(x) (((x) >> S_CQ_SIZE) & M_CQ_SIZE) - -#define S_CQ_BASE_HI 0 -#define M_CQ_BASE_HI 0xFFFFF -#define V_CQ_BASE_HI(x) ((x) << S_CQ_BASE_HI) -#define G_CQ_BASE_HI(x) (((x) >> S_CQ_BASE_HI) & M_CQ_BASE_HI) - -#define S_CQ_RSPQ 20 -#define M_CQ_RSPQ 0x3F -#define V_CQ_RSPQ(x) ((x) << S_CQ_RSPQ) -#define G_CQ_RSPQ(x) (((x) >> S_CQ_RSPQ) & M_CQ_RSPQ) - -#define S_CQ_ASYNC_NOTIF 26 -#define V_CQ_ASYNC_NOTIF(x) ((x) << S_CQ_ASYNC_NOTIF) -#define F_CQ_ASYNC_NOTIF V_CQ_ASYNC_NOTIF(1U) - -#define S_CQ_ARMED 27 -#define V_CQ_ARMED(x) ((x) << S_CQ_ARMED) -#define F_CQ_ARMED V_CQ_ARMED(1U) - -#define S_CQ_ASYNC_NOTIF_SOL 28 -#define V_CQ_ASYNC_NOTIF_SOL(x) ((x) << S_CQ_ASYNC_NOTIF_SOL) -#define F_CQ_ASYNC_NOTIF_SOL V_CQ_ASYNC_NOTIF_SOL(1U) - -#define S_CQ_GEN 29 -#define V_CQ_GEN(x) ((x) << S_CQ_GEN) -#define F_CQ_GEN V_CQ_GEN(1U) - -#define S_CQ_ERR 30 -#define V_CQ_ERR(x) ((x) << S_CQ_ERR) -#define F_CQ_ERR V_CQ_ERR(1U) - -#define S_CQ_OVERFLOW_MODE 31 -#define V_CQ_OVERFLOW_MODE(x) ((x) << S_CQ_OVERFLOW_MODE) -#define F_CQ_OVERFLOW_MODE V_CQ_OVERFLOW_MODE(1U) - -#define S_CQ_CREDITS 0 -#define M_CQ_CREDITS 0xFFFF -#define V_CQ_CREDITS(x) ((x) << S_CQ_CREDITS) -#define G_CQ_CREDITS(x) (((x) >> S_CQ_CREDITS) & M_CQ_CREDITS) - -#define S_CQ_CREDIT_THRES 16 -#define M_CQ_CREDIT_THRES 0x1FFF -#define V_CQ_CREDIT_THRES(x) ((x) << S_CQ_CREDIT_THRES) -#define G_CQ_CREDIT_THRES(x) (((x) >> S_CQ_CREDIT_THRES) & M_CQ_CREDIT_THRES) - -#define S_FL_BASE_HI 0 -#define M_FL_BASE_HI 0xFFFFF -#define V_FL_BASE_HI(x) ((x) << S_FL_BASE_HI) -#define G_FL_BASE_HI(x) (((x) >> S_FL_BASE_HI) & M_FL_BASE_HI) - -#define S_FL_INDEX_LO 20 -#define M_FL_INDEX_LO 0xFFF -#define V_FL_INDEX_LO(x) ((x) << S_FL_INDEX_LO) -#define G_FL_INDEX_LO(x) (((x) >> S_FL_INDEX_LO) & M_FL_INDEX_LO) - -#define S_FL_INDEX_HI 0 -#define M_FL_INDEX_HI 0xF -#define V_FL_INDEX_HI(x) ((x) << S_FL_INDEX_HI) -#define G_FL_INDEX_HI(x) (((x) >> S_FL_INDEX_HI) & M_FL_INDEX_HI) - -#define S_FL_SIZE 4 -#define M_FL_SIZE 0xFFFF -#define V_FL_SIZE(x) ((x) << S_FL_SIZE) -#define G_FL_SIZE(x) (((x) >> S_FL_SIZE) & M_FL_SIZE) - -#define S_FL_GEN 20 -#define V_FL_GEN(x) ((x) << S_FL_GEN) -#define F_FL_GEN V_FL_GEN(1U) - -#define S_FL_ENTRY_SIZE_LO 21 -#define M_FL_ENTRY_SIZE_LO 0x7FF -#define V_FL_ENTRY_SIZE_LO(x) ((x) << S_FL_ENTRY_SIZE_LO) -#define G_FL_ENTRY_SIZE_LO(x) (((x) >> S_FL_ENTRY_SIZE_LO) & M_FL_ENTRY_SIZE_LO) - -#define S_FL_ENTRY_SIZE_HI 0 -#define M_FL_ENTRY_SIZE_HI 0x1FFFFF -#define V_FL_ENTRY_SIZE_HI(x) ((x) << S_FL_ENTRY_SIZE_HI) -#define G_FL_ENTRY_SIZE_HI(x) (((x) >> S_FL_ENTRY_SIZE_HI) & M_FL_ENTRY_SIZE_HI) - -#define S_FL_CONG_THRES 21 -#define M_FL_CONG_THRES 0x3FF -#define V_FL_CONG_THRES(x) ((x) << S_FL_CONG_THRES) -#define G_FL_CONG_THRES(x) (((x) >> S_FL_CONG_THRES) & M_FL_CONG_THRES) - -#define S_FL_GTS 31 -#define V_FL_GTS(x) ((x) << S_FL_GTS) -#define F_FL_GTS V_FL_GTS(1U) - -#define S_FLD_GEN1 31 -#define V_FLD_GEN1(x) ((x) << S_FLD_GEN1) -#define F_FLD_GEN1 V_FLD_GEN1(1U) - -#define S_FLD_GEN2 0 -#define V_FLD_GEN2(x) ((x) << S_FLD_GEN2) -#define F_FLD_GEN2 V_FLD_GEN2(1U) - -#define S_RSPD_TXQ1_CR 0 -#define M_RSPD_TXQ1_CR 0x7F -#define V_RSPD_TXQ1_CR(x) ((x) << S_RSPD_TXQ1_CR) -#define G_RSPD_TXQ1_CR(x) (((x) >> S_RSPD_TXQ1_CR) & M_RSPD_TXQ1_CR) - -#define S_RSPD_TXQ1_GTS 7 -#define V_RSPD_TXQ1_GTS(x) ((x) << S_RSPD_TXQ1_GTS) -#define F_RSPD_TXQ1_GTS V_RSPD_TXQ1_GTS(1U) - -#define S_RSPD_TXQ2_CR 8 -#define M_RSPD_TXQ2_CR 0x7F -#define V_RSPD_TXQ2_CR(x) ((x) << S_RSPD_TXQ2_CR) -#define G_RSPD_TXQ2_CR(x) (((x) >> S_RSPD_TXQ2_CR) & M_RSPD_TXQ2_CR) - -#define S_RSPD_TXQ2_GTS 15 -#define V_RSPD_TXQ2_GTS(x) ((x) << S_RSPD_TXQ2_GTS) -#define F_RSPD_TXQ2_GTS V_RSPD_TXQ2_GTS(1U) - -#define S_RSPD_TXQ0_CR 16 -#define M_RSPD_TXQ0_CR 0x7F -#define V_RSPD_TXQ0_CR(x) ((x) << S_RSPD_TXQ0_CR) -#define G_RSPD_TXQ0_CR(x) (((x) >> S_RSPD_TXQ0_CR) & M_RSPD_TXQ0_CR) - -#define S_RSPD_TXQ0_GTS 23 -#define V_RSPD_TXQ0_GTS(x) ((x) << S_RSPD_TXQ0_GTS) -#define F_RSPD_TXQ0_GTS V_RSPD_TXQ0_GTS(1U) - -#define S_RSPD_EOP 24 -#define V_RSPD_EOP(x) ((x) << S_RSPD_EOP) -#define F_RSPD_EOP V_RSPD_EOP(1U) - -#define S_RSPD_SOP 25 -#define V_RSPD_SOP(x) ((x) << S_RSPD_SOP) -#define F_RSPD_SOP V_RSPD_SOP(1U) - -#define S_RSPD_ASYNC_NOTIF 26 -#define V_RSPD_ASYNC_NOTIF(x) ((x) << S_RSPD_ASYNC_NOTIF) -#define F_RSPD_ASYNC_NOTIF V_RSPD_ASYNC_NOTIF(1U) - -#define S_RSPD_FL0_GTS 27 -#define V_RSPD_FL0_GTS(x) ((x) << S_RSPD_FL0_GTS) -#define F_RSPD_FL0_GTS V_RSPD_FL0_GTS(1U) - -#define S_RSPD_FL1_GTS 28 -#define V_RSPD_FL1_GTS(x) ((x) << S_RSPD_FL1_GTS) -#define F_RSPD_FL1_GTS V_RSPD_FL1_GTS(1U) - -#define S_RSPD_IMM_DATA_VALID 29 -#define V_RSPD_IMM_DATA_VALID(x) ((x) << S_RSPD_IMM_DATA_VALID) -#define F_RSPD_IMM_DATA_VALID V_RSPD_IMM_DATA_VALID(1U) - -#define S_RSPD_OFFLOAD 30 -#define V_RSPD_OFFLOAD(x) ((x) << S_RSPD_OFFLOAD) -#define F_RSPD_OFFLOAD V_RSPD_OFFLOAD(1U) - -#define S_RSPD_GEN1 31 -#define V_RSPD_GEN1(x) ((x) << S_RSPD_GEN1) -#define F_RSPD_GEN1 V_RSPD_GEN1(1U) - -#define S_RSPD_LEN 0 -#define M_RSPD_LEN 0x7FFFFFFF -#define V_RSPD_LEN(x) ((x) << S_RSPD_LEN) -#define G_RSPD_LEN(x) (((x) >> S_RSPD_LEN) & M_RSPD_LEN) - -#define S_RSPD_FLQ 31 -#define V_RSPD_FLQ(x) ((x) << S_RSPD_FLQ) -#define F_RSPD_FLQ V_RSPD_FLQ(1U) - -#define S_RSPD_GEN2 0 -#define V_RSPD_GEN2(x) ((x) << S_RSPD_GEN2) -#define F_RSPD_GEN2 V_RSPD_GEN2(1U) - -#define S_RSPD_INR_VEC 1 -#define M_RSPD_INR_VEC 0x7F -#define V_RSPD_INR_VEC(x) ((x) << S_RSPD_INR_VEC) -#define G_RSPD_INR_VEC(x) (((x) >> S_RSPD_INR_VEC) & M_RSPD_INR_VEC) - -#endif /* _SGE_DEFS_H */ diff --git a/drivers/net/cxgb3/t3_cpl.h b/drivers/net/cxgb3/t3_cpl.h deleted file mode 100644 index 852c399..0000000 --- a/drivers/net/cxgb3/t3_cpl.h +++ /dev/null @@ -1,1495 +0,0 @@ -/* - * Copyright (c) 2004-2008 Chelsio, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef T3_CPL_H -#define T3_CPL_H - -#if !defined(__LITTLE_ENDIAN_BITFIELD) && !defined(__BIG_ENDIAN_BITFIELD) -# include -#endif - -enum CPL_opcode { - CPL_PASS_OPEN_REQ = 0x1, - CPL_PASS_ACCEPT_RPL = 0x2, - CPL_ACT_OPEN_REQ = 0x3, - CPL_SET_TCB = 0x4, - CPL_SET_TCB_FIELD = 0x5, - CPL_GET_TCB = 0x6, - CPL_PCMD = 0x7, - CPL_CLOSE_CON_REQ = 0x8, - CPL_CLOSE_LISTSRV_REQ = 0x9, - CPL_ABORT_REQ = 0xA, - CPL_ABORT_RPL = 0xB, - CPL_TX_DATA = 0xC, - CPL_RX_DATA_ACK = 0xD, - CPL_TX_PKT = 0xE, - CPL_RTE_DELETE_REQ = 0xF, - CPL_RTE_WRITE_REQ = 0x10, - CPL_RTE_READ_REQ = 0x11, - CPL_L2T_WRITE_REQ = 0x12, - CPL_L2T_READ_REQ = 0x13, - CPL_SMT_WRITE_REQ = 0x14, - CPL_SMT_READ_REQ = 0x15, - CPL_TX_PKT_LSO = 0x16, - CPL_PCMD_READ = 0x17, - CPL_BARRIER = 0x18, - CPL_TID_RELEASE = 0x1A, - - CPL_CLOSE_LISTSRV_RPL = 0x20, - CPL_ERROR = 0x21, - CPL_GET_TCB_RPL = 0x22, - CPL_L2T_WRITE_RPL = 0x23, - CPL_PCMD_READ_RPL = 0x24, - CPL_PCMD_RPL = 0x25, - CPL_PEER_CLOSE = 0x26, - CPL_RTE_DELETE_RPL = 0x27, - CPL_RTE_WRITE_RPL = 0x28, - CPL_RX_DDP_COMPLETE = 0x29, - CPL_RX_PHYS_ADDR = 0x2A, - CPL_RX_PKT = 0x2B, - CPL_RX_URG_NOTIFY = 0x2C, - CPL_SET_TCB_RPL = 0x2D, - CPL_SMT_WRITE_RPL = 0x2E, - CPL_TX_DATA_ACK = 0x2F, - - CPL_ABORT_REQ_RSS = 0x30, - CPL_ABORT_RPL_RSS = 0x31, - CPL_CLOSE_CON_RPL = 0x32, - CPL_ISCSI_HDR = 0x33, - CPL_L2T_READ_RPL = 0x34, - CPL_RDMA_CQE = 0x35, - CPL_RDMA_CQE_READ_RSP = 0x36, - CPL_RDMA_CQE_ERR = 0x37, - CPL_RTE_READ_RPL = 0x38, - CPL_RX_DATA = 0x39, - - CPL_ACT_OPEN_RPL = 0x40, - CPL_PASS_OPEN_RPL = 0x41, - CPL_RX_DATA_DDP = 0x42, - CPL_SMT_READ_RPL = 0x43, - - CPL_ACT_ESTABLISH = 0x50, - CPL_PASS_ESTABLISH = 0x51, - - CPL_PASS_ACCEPT_REQ = 0x70, - - CPL_ASYNC_NOTIF = 0x80, /* fake opcode for async notifications */ - - CPL_TX_DMA_ACK = 0xA0, - CPL_RDMA_READ_REQ = 0xA1, - CPL_RDMA_TERMINATE = 0xA2, - CPL_TRACE_PKT = 0xA3, - CPL_RDMA_EC_STATUS = 0xA5, - - NUM_CPL_CMDS /* must be last and previous entries must be sorted */ -}; - -enum CPL_error { - CPL_ERR_NONE = 0, - CPL_ERR_TCAM_PARITY = 1, - CPL_ERR_TCAM_FULL = 3, - CPL_ERR_CONN_RESET = 20, - CPL_ERR_CONN_EXIST = 22, - CPL_ERR_ARP_MISS = 23, - CPL_ERR_BAD_SYN = 24, - CPL_ERR_CONN_TIMEDOUT = 30, - CPL_ERR_XMIT_TIMEDOUT = 31, - CPL_ERR_PERSIST_TIMEDOUT = 32, - CPL_ERR_FINWAIT2_TIMEDOUT = 33, - CPL_ERR_KEEPALIVE_TIMEDOUT = 34, - CPL_ERR_RTX_NEG_ADVICE = 35, - CPL_ERR_PERSIST_NEG_ADVICE = 36, - CPL_ERR_ABORT_FAILED = 42, - CPL_ERR_GENERAL = 99 -}; - -enum { - CPL_CONN_POLICY_AUTO = 0, - CPL_CONN_POLICY_ASK = 1, - CPL_CONN_POLICY_DENY = 3 -}; - -enum { - ULP_MODE_NONE = 0, - ULP_MODE_ISCSI = 2, - ULP_MODE_RDMA = 4, - ULP_MODE_TCPDDP = 5 -}; - -enum { - ULP_CRC_HEADER = 1 << 0, - ULP_CRC_DATA = 1 << 1 -}; - -enum { - CPL_PASS_OPEN_ACCEPT, - CPL_PASS_OPEN_REJECT -}; - -enum { - CPL_ABORT_SEND_RST = 0, - CPL_ABORT_NO_RST, - CPL_ABORT_POST_CLOSE_REQ = 2 -}; - -enum { /* TX_PKT_LSO ethernet types */ - CPL_ETH_II, - CPL_ETH_II_VLAN, - CPL_ETH_802_3, - CPL_ETH_802_3_VLAN -}; - -enum { /* TCP congestion control algorithms */ - CONG_ALG_RENO, - CONG_ALG_TAHOE, - CONG_ALG_NEWRENO, - CONG_ALG_HIGHSPEED -}; - -enum { /* RSS hash type */ - RSS_HASH_NONE = 0, - RSS_HASH_2_TUPLE = 1, - RSS_HASH_4_TUPLE = 2, - RSS_HASH_TCPV6 = 3 -}; - -union opcode_tid { - __be32 opcode_tid; - __u8 opcode; -}; - -#define S_OPCODE 24 -#define V_OPCODE(x) ((x) << S_OPCODE) -#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF) -#define G_TID(x) ((x) & 0xFFFFFF) - -#define S_QNUM 0 -#define G_QNUM(x) (((x) >> S_QNUM) & 0xFFFF) - -#define S_HASHTYPE 22 -#define M_HASHTYPE 0x3 -#define G_HASHTYPE(x) (((x) >> S_HASHTYPE) & M_HASHTYPE) - -/* tid is assumed to be 24-bits */ -#define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid)) - -#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid) - -/* extract the TID from a CPL command */ -#define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd)))) - -struct tcp_options { - __be16 mss; - __u8 wsf; -#if defined(__LITTLE_ENDIAN_BITFIELD) - __u8:5; - __u8 ecn:1; - __u8 sack:1; - __u8 tstamp:1; -#else - __u8 tstamp:1; - __u8 sack:1; - __u8 ecn:1; - __u8:5; -#endif -}; - -struct rss_header { - __u8 opcode; -#if defined(__LITTLE_ENDIAN_BITFIELD) - __u8 cpu_idx:6; - __u8 hash_type:2; -#else - __u8 hash_type:2; - __u8 cpu_idx:6; -#endif - __be16 cq_idx; - __be32 rss_hash_val; -}; - -#ifndef CHELSIO_FW -struct work_request_hdr { - __be32 wr_hi; - __be32 wr_lo; -}; - -/* wr_hi fields */ -#define S_WR_SGE_CREDITS 0 -#define M_WR_SGE_CREDITS 0xFF -#define V_WR_SGE_CREDITS(x) ((x) << S_WR_SGE_CREDITS) -#define G_WR_SGE_CREDITS(x) (((x) >> S_WR_SGE_CREDITS) & M_WR_SGE_CREDITS) - -#define S_WR_SGLSFLT 8 -#define M_WR_SGLSFLT 0xFF -#define V_WR_SGLSFLT(x) ((x) << S_WR_SGLSFLT) -#define G_WR_SGLSFLT(x) (((x) >> S_WR_SGLSFLT) & M_WR_SGLSFLT) - -#define S_WR_BCNTLFLT 16 -#define M_WR_BCNTLFLT 0xF -#define V_WR_BCNTLFLT(x) ((x) << S_WR_BCNTLFLT) -#define G_WR_BCNTLFLT(x) (((x) >> S_WR_BCNTLFLT) & M_WR_BCNTLFLT) - -#define S_WR_DATATYPE 20 -#define V_WR_DATATYPE(x) ((x) << S_WR_DATATYPE) -#define F_WR_DATATYPE V_WR_DATATYPE(1U) - -#define S_WR_COMPL 21 -#define V_WR_COMPL(x) ((x) << S_WR_COMPL) -#define F_WR_COMPL V_WR_COMPL(1U) - -#define S_WR_EOP 22 -#define V_WR_EOP(x) ((x) << S_WR_EOP) -#define F_WR_EOP V_WR_EOP(1U) - -#define S_WR_SOP 23 -#define V_WR_SOP(x) ((x) << S_WR_SOP) -#define F_WR_SOP V_WR_SOP(1U) - -#define S_WR_OP 24 -#define M_WR_OP 0xFF -#define V_WR_OP(x) ((x) << S_WR_OP) -#define G_WR_OP(x) (((x) >> S_WR_OP) & M_WR_OP) - -/* wr_lo fields */ -#define S_WR_LEN 0 -#define M_WR_LEN 0xFF -#define V_WR_LEN(x) ((x) << S_WR_LEN) -#define G_WR_LEN(x) (((x) >> S_WR_LEN) & M_WR_LEN) - -#define S_WR_TID 8 -#define M_WR_TID 0xFFFFF -#define V_WR_TID(x) ((x) << S_WR_TID) -#define G_WR_TID(x) (((x) >> S_WR_TID) & M_WR_TID) - -#define S_WR_CR_FLUSH 30 -#define V_WR_CR_FLUSH(x) ((x) << S_WR_CR_FLUSH) -#define F_WR_CR_FLUSH V_WR_CR_FLUSH(1U) - -#define S_WR_GEN 31 -#define V_WR_GEN(x) ((x) << S_WR_GEN) -#define F_WR_GEN V_WR_GEN(1U) - -# define WR_HDR struct work_request_hdr wr -# define RSS_HDR -#else -# define WR_HDR -# define RSS_HDR struct rss_header rss_hdr; -#endif - -/* option 0 lower-half fields */ -#define S_CPL_STATUS 0 -#define M_CPL_STATUS 0xFF -#define V_CPL_STATUS(x) ((x) << S_CPL_STATUS) -#define G_CPL_STATUS(x) (((x) >> S_CPL_STATUS) & M_CPL_STATUS) - -#define S_INJECT_TIMER 6 -#define V_INJECT_TIMER(x) ((x) << S_INJECT_TIMER) -#define F_INJECT_TIMER V_INJECT_TIMER(1U) - -#define S_NO_OFFLOAD 7 -#define V_NO_OFFLOAD(x) ((x) << S_NO_OFFLOAD) -#define F_NO_OFFLOAD V_NO_OFFLOAD(1U) - -#define S_ULP_MODE 8 -#define M_ULP_MODE 0xF -#define V_ULP_MODE(x) ((x) << S_ULP_MODE) -#define G_ULP_MODE(x) (((x) >> S_ULP_MODE) & M_ULP_MODE) - -#define S_RCV_BUFSIZ 12 -#define M_RCV_BUFSIZ 0x3FFF -#define V_RCV_BUFSIZ(x) ((x) << S_RCV_BUFSIZ) -#define G_RCV_BUFSIZ(x) (((x) >> S_RCV_BUFSIZ) & M_RCV_BUFSIZ) - -#define S_TOS 26 -#define M_TOS 0x3F -#define V_TOS(x) ((x) << S_TOS) -#define G_TOS(x) (((x) >> S_TOS) & M_TOS) - -/* option 0 upper-half fields */ -#define S_DELACK 0 -#define V_DELACK(x) ((x) << S_DELACK) -#define F_DELACK V_DELACK(1U) - -#define S_NO_CONG 1 -#define V_NO_CONG(x) ((x) << S_NO_CONG) -#define F_NO_CONG V_NO_CONG(1U) - -#define S_SRC_MAC_SEL 2 -#define M_SRC_MAC_SEL 0x3 -#define V_SRC_MAC_SEL(x) ((x) << S_SRC_MAC_SEL) -#define G_SRC_MAC_SEL(x) (((x) >> S_SRC_MAC_SEL) & M_SRC_MAC_SEL) - -#define S_L2T_IDX 4 -#define M_L2T_IDX 0x7FF -#define V_L2T_IDX(x) ((x) << S_L2T_IDX) -#define G_L2T_IDX(x) (((x) >> S_L2T_IDX) & M_L2T_IDX) - -#define S_TX_CHANNEL 15 -#define V_TX_CHANNEL(x) ((x) << S_TX_CHANNEL) -#define F_TX_CHANNEL V_TX_CHANNEL(1U) - -#define S_TCAM_BYPASS 16 -#define V_TCAM_BYPASS(x) ((x) << S_TCAM_BYPASS) -#define F_TCAM_BYPASS V_TCAM_BYPASS(1U) - -#define S_NAGLE 17 -#define V_NAGLE(x) ((x) << S_NAGLE) -#define F_NAGLE V_NAGLE(1U) - -#define S_WND_SCALE 18 -#define M_WND_SCALE 0xF -#define V_WND_SCALE(x) ((x) << S_WND_SCALE) -#define G_WND_SCALE(x) (((x) >> S_WND_SCALE) & M_WND_SCALE) - -#define S_KEEP_ALIVE 22 -#define V_KEEP_ALIVE(x) ((x) << S_KEEP_ALIVE) -#define F_KEEP_ALIVE V_KEEP_ALIVE(1U) - -#define S_MAX_RETRANS 23 -#define M_MAX_RETRANS 0xF -#define V_MAX_RETRANS(x) ((x) << S_MAX_RETRANS) -#define G_MAX_RETRANS(x) (((x) >> S_MAX_RETRANS) & M_MAX_RETRANS) - -#define S_MAX_RETRANS_OVERRIDE 27 -#define V_MAX_RETRANS_OVERRIDE(x) ((x) << S_MAX_RETRANS_OVERRIDE) -#define F_MAX_RETRANS_OVERRIDE V_MAX_RETRANS_OVERRIDE(1U) - -#define S_MSS_IDX 28 -#define M_MSS_IDX 0xF -#define V_MSS_IDX(x) ((x) << S_MSS_IDX) -#define G_MSS_IDX(x) (((x) >> S_MSS_IDX) & M_MSS_IDX) - -/* option 1 fields */ -#define S_RSS_ENABLE 0 -#define V_RSS_ENABLE(x) ((x) << S_RSS_ENABLE) -#define F_RSS_ENABLE V_RSS_ENABLE(1U) - -#define S_RSS_MASK_LEN 1 -#define M_RSS_MASK_LEN 0x7 -#define V_RSS_MASK_LEN(x) ((x) << S_RSS_MASK_LEN) -#define G_RSS_MASK_LEN(x) (((x) >> S_RSS_MASK_LEN) & M_RSS_MASK_LEN) - -#define S_CPU_IDX 4 -#define M_CPU_IDX 0x3F -#define V_CPU_IDX(x) ((x) << S_CPU_IDX) -#define G_CPU_IDX(x) (((x) >> S_CPU_IDX) & M_CPU_IDX) - -#define S_MAC_MATCH_VALID 18 -#define V_MAC_MATCH_VALID(x) ((x) << S_MAC_MATCH_VALID) -#define F_MAC_MATCH_VALID V_MAC_MATCH_VALID(1U) - -#define S_CONN_POLICY 19 -#define M_CONN_POLICY 0x3 -#define V_CONN_POLICY(x) ((x) << S_CONN_POLICY) -#define G_CONN_POLICY(x) (((x) >> S_CONN_POLICY) & M_CONN_POLICY) - -#define S_SYN_DEFENSE 21 -#define V_SYN_DEFENSE(x) ((x) << S_SYN_DEFENSE) -#define F_SYN_DEFENSE V_SYN_DEFENSE(1U) - -#define S_VLAN_PRI 22 -#define M_VLAN_PRI 0x3 -#define V_VLAN_PRI(x) ((x) << S_VLAN_PRI) -#define G_VLAN_PRI(x) (((x) >> S_VLAN_PRI) & M_VLAN_PRI) - -#define S_VLAN_PRI_VALID 24 -#define V_VLAN_PRI_VALID(x) ((x) << S_VLAN_PRI_VALID) -#define F_VLAN_PRI_VALID V_VLAN_PRI_VALID(1U) - -#define S_PKT_TYPE 25 -#define M_PKT_TYPE 0x3 -#define V_PKT_TYPE(x) ((x) << S_PKT_TYPE) -#define G_PKT_TYPE(x) (((x) >> S_PKT_TYPE) & M_PKT_TYPE) - -#define S_MAC_MATCH 27 -#define M_MAC_MATCH 0x1F -#define V_MAC_MATCH(x) ((x) << S_MAC_MATCH) -#define G_MAC_MATCH(x) (((x) >> S_MAC_MATCH) & M_MAC_MATCH) - -/* option 2 fields */ -#define S_CPU_INDEX 0 -#define M_CPU_INDEX 0x7F -#define V_CPU_INDEX(x) ((x) << S_CPU_INDEX) -#define G_CPU_INDEX(x) (((x) >> S_CPU_INDEX) & M_CPU_INDEX) - -#define S_CPU_INDEX_VALID 7 -#define V_CPU_INDEX_VALID(x) ((x) << S_CPU_INDEX_VALID) -#define F_CPU_INDEX_VALID V_CPU_INDEX_VALID(1U) - -#define S_RX_COALESCE 8 -#define M_RX_COALESCE 0x3 -#define V_RX_COALESCE(x) ((x) << S_RX_COALESCE) -#define G_RX_COALESCE(x) (((x) >> S_RX_COALESCE) & M_RX_COALESCE) - -#define S_RX_COALESCE_VALID 10 -#define V_RX_COALESCE_VALID(x) ((x) << S_RX_COALESCE_VALID) -#define F_RX_COALESCE_VALID V_RX_COALESCE_VALID(1U) - -#define S_CONG_CONTROL_FLAVOR 11 -#define M_CONG_CONTROL_FLAVOR 0x3 -#define V_CONG_CONTROL_FLAVOR(x) ((x) << S_CONG_CONTROL_FLAVOR) -#define G_CONG_CONTROL_FLAVOR(x) (((x) >> S_CONG_CONTROL_FLAVOR) & M_CONG_CONTROL_FLAVOR) - -#define S_PACING_FLAVOR 13 -#define M_PACING_FLAVOR 0x3 -#define V_PACING_FLAVOR(x) ((x) << S_PACING_FLAVOR) -#define G_PACING_FLAVOR(x) (((x) >> S_PACING_FLAVOR) & M_PACING_FLAVOR) - -#define S_FLAVORS_VALID 15 -#define V_FLAVORS_VALID(x) ((x) << S_FLAVORS_VALID) -#define F_FLAVORS_VALID V_FLAVORS_VALID(1U) - -#define S_RX_FC_DISABLE 16 -#define V_RX_FC_DISABLE(x) ((x) << S_RX_FC_DISABLE) -#define F_RX_FC_DISABLE V_RX_FC_DISABLE(1U) - -#define S_RX_FC_VALID 17 -#define V_RX_FC_VALID(x) ((x) << S_RX_FC_VALID) -#define F_RX_FC_VALID V_RX_FC_VALID(1U) - -struct cpl_pass_open_req { - WR_HDR; - union opcode_tid ot; - __be16 local_port; - __be16 peer_port; - __be32 local_ip; - __be32 peer_ip; - __be32 opt0h; - __be32 opt0l; - __be32 peer_netmask; - __be32 opt1; -}; - -struct cpl_pass_open_rpl { - RSS_HDR union opcode_tid ot; - __be16 local_port; - __be16 peer_port; - __be32 local_ip; - __be32 peer_ip; - __u8 resvd[7]; - __u8 status; -}; - -struct cpl_pass_establish { - RSS_HDR union opcode_tid ot; - __be16 local_port; - __be16 peer_port; - __be32 local_ip; - __be32 peer_ip; - __be32 tos_tid; - __be16 l2t_idx; - __be16 tcp_opt; - __be32 snd_isn; - __be32 rcv_isn; -}; - -/* cpl_pass_establish.tos_tid fields */ -#define S_PASS_OPEN_TID 0 -#define M_PASS_OPEN_TID 0xFFFFFF -#define V_PASS_OPEN_TID(x) ((x) << S_PASS_OPEN_TID) -#define G_PASS_OPEN_TID(x) (((x) >> S_PASS_OPEN_TID) & M_PASS_OPEN_TID) - -#define S_PASS_OPEN_TOS 24 -#define M_PASS_OPEN_TOS 0xFF -#define V_PASS_OPEN_TOS(x) ((x) << S_PASS_OPEN_TOS) -#define G_PASS_OPEN_TOS(x) (((x) >> S_PASS_OPEN_TOS) & M_PASS_OPEN_TOS) - -/* cpl_pass_establish.l2t_idx fields */ -#define S_L2T_IDX16 5 -#define M_L2T_IDX16 0x7FF -#define V_L2T_IDX16(x) ((x) << S_L2T_IDX16) -#define G_L2T_IDX16(x) (((x) >> S_L2T_IDX16) & M_L2T_IDX16) - -/* cpl_pass_establish.tcp_opt fields (also applies act_open_establish) */ -#define G_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1) -#define G_TCPOPT_SACK(x) (((x) >> 6) & 1) -#define G_TCPOPT_TSTAMP(x) (((x) >> 7) & 1) -#define G_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf) -#define G_TCPOPT_MSS(x) (((x) >> 12) & 0xf) - -struct cpl_pass_accept_req { - RSS_HDR union opcode_tid ot; - __be16 local_port; - __be16 peer_port; - __be32 local_ip; - __be32 peer_ip; - __be32 tos_tid; - struct tcp_options tcp_options; - __u8 dst_mac[6]; - __be16 vlan_tag; - __u8 src_mac[6]; -#if defined(__LITTLE_ENDIAN_BITFIELD) - __u8:3; - __u8 addr_idx:3; - __u8 port_idx:1; - __u8 exact_match:1; -#else - __u8 exact_match:1; - __u8 port_idx:1; - __u8 addr_idx:3; - __u8:3; -#endif - __u8 rsvd; - __be32 rcv_isn; - __be32 rsvd2; -}; - -struct cpl_pass_accept_rpl { - WR_HDR; - union opcode_tid ot; - __be32 opt2; - __be32 rsvd; - __be32 peer_ip; - __be32 opt0h; - __be32 opt0l_status; -}; - -struct cpl_act_open_req { - WR_HDR; - union opcode_tid ot; - __be16 local_port; - __be16 peer_port; - __be32 local_ip; - __be32 peer_ip; - __be32 opt0h; - __be32 opt0l; - __be32 params; - __be32 opt2; -}; - -/* cpl_act_open_req.params fields */ -#define S_AOPEN_VLAN_PRI 9 -#define M_AOPEN_VLAN_PRI 0x3 -#define V_AOPEN_VLAN_PRI(x) ((x) << S_AOPEN_VLAN_PRI) -#define G_AOPEN_VLAN_PRI(x) (((x) >> S_AOPEN_VLAN_PRI) & M_AOPEN_VLAN_PRI) - -#define S_AOPEN_VLAN_PRI_VALID 11 -#define V_AOPEN_VLAN_PRI_VALID(x) ((x) << S_AOPEN_VLAN_PRI_VALID) -#define F_AOPEN_VLAN_PRI_VALID V_AOPEN_VLAN_PRI_VALID(1U) - -#define S_AOPEN_PKT_TYPE 12 -#define M_AOPEN_PKT_TYPE 0x3 -#define V_AOPEN_PKT_TYPE(x) ((x) << S_AOPEN_PKT_TYPE) -#define G_AOPEN_PKT_TYPE(x) (((x) >> S_AOPEN_PKT_TYPE) & M_AOPEN_PKT_TYPE) - -#define S_AOPEN_MAC_MATCH 14 -#define M_AOPEN_MAC_MATCH 0x1F -#define V_AOPEN_MAC_MATCH(x) ((x) << S_AOPEN_MAC_MATCH) -#define G_AOPEN_MAC_MATCH(x) (((x) >> S_AOPEN_MAC_MATCH) & M_AOPEN_MAC_MATCH) - -#define S_AOPEN_MAC_MATCH_VALID 19 -#define V_AOPEN_MAC_MATCH_VALID(x) ((x) << S_AOPEN_MAC_MATCH_VALID) -#define F_AOPEN_MAC_MATCH_VALID V_AOPEN_MAC_MATCH_VALID(1U) - -#define S_AOPEN_IFF_VLAN 20 -#define M_AOPEN_IFF_VLAN 0xFFF -#define V_AOPEN_IFF_VLAN(x) ((x) << S_AOPEN_IFF_VLAN) -#define G_AOPEN_IFF_VLAN(x) (((x) >> S_AOPEN_IFF_VLAN) & M_AOPEN_IFF_VLAN) - -struct cpl_act_open_rpl { - RSS_HDR union opcode_tid ot; - __be16 local_port; - __be16 peer_port; - __be32 local_ip; - __be32 peer_ip; - __be32 atid; - __u8 rsvd[3]; - __u8 status; -}; - -struct cpl_act_establish { - RSS_HDR union opcode_tid ot; - __be16 local_port; - __be16 peer_port; - __be32 local_ip; - __be32 peer_ip; - __be32 tos_tid; - __be16 l2t_idx; - __be16 tcp_opt; - __be32 snd_isn; - __be32 rcv_isn; -}; - -struct cpl_get_tcb { - WR_HDR; - union opcode_tid ot; - __be16 cpuno; - __be16 rsvd; -}; - -struct cpl_get_tcb_rpl { - RSS_HDR union opcode_tid ot; - __u8 rsvd; - __u8 status; - __be16 len; -}; - -struct cpl_set_tcb { - WR_HDR; - union opcode_tid ot; - __u8 reply; - __u8 cpu_idx; - __be16 len; -}; - -/* cpl_set_tcb.reply fields */ -#define S_NO_REPLY 7 -#define V_NO_REPLY(x) ((x) << S_NO_REPLY) -#define F_NO_REPLY V_NO_REPLY(1U) - -struct cpl_set_tcb_field { - WR_HDR; - union opcode_tid ot; - __u8 reply; - __u8 cpu_idx; - __be16 word; - __be64 mask; - __be64 val; -}; - -struct cpl_set_tcb_rpl { - RSS_HDR union opcode_tid ot; - __u8 rsvd[3]; - __u8 status; -}; - -struct cpl_pcmd { - WR_HDR; - union opcode_tid ot; - __u8 rsvd[3]; -#if defined(__LITTLE_ENDIAN_BITFIELD) - __u8 src:1; - __u8 bundle:1; - __u8 channel:1; - __u8:5; -#else - __u8:5; - __u8 channel:1; - __u8 bundle:1; - __u8 src:1; -#endif - __be32 pcmd_parm[2]; -}; - -struct cpl_pcmd_reply { - RSS_HDR union opcode_tid ot; - __u8 status; - __u8 rsvd; - __be16 len; -}; - -struct cpl_close_con_req { - WR_HDR; - union opcode_tid ot; - __be32 rsvd; -}; - -struct cpl_close_con_rpl { - RSS_HDR union opcode_tid ot; - __u8 rsvd[3]; - __u8 status; - __be32 snd_nxt; - __be32 rcv_nxt; -}; - -struct cpl_close_listserv_req { - WR_HDR; - union opcode_tid ot; - __u8 rsvd0; - __u8 cpu_idx; - __be16 rsvd1; -}; - -struct cpl_close_listserv_rpl { - RSS_HDR union opcode_tid ot; - __u8 rsvd[3]; - __u8 status; -}; - -struct cpl_abort_req_rss { - RSS_HDR union opcode_tid ot; - __be32 rsvd0; - __u8 rsvd1; - __u8 status; - __u8 rsvd2[6]; -}; - -struct cpl_abort_req { - WR_HDR; - union opcode_tid ot; - __be32 rsvd0; - __u8 rsvd1; - __u8 cmd; - __u8 rsvd2[6]; -}; - -struct cpl_abort_rpl_rss { - RSS_HDR union opcode_tid ot; - __be32 rsvd0; - __u8 rsvd1; - __u8 status; - __u8 rsvd2[6]; -}; - -struct cpl_abort_rpl { - WR_HDR; - union opcode_tid ot; - __be32 rsvd0; - __u8 rsvd1; - __u8 cmd; - __u8 rsvd2[6]; -}; - -struct cpl_peer_close { - RSS_HDR union opcode_tid ot; - __be32 rcv_nxt; -}; - -struct tx_data_wr { - __be32 wr_hi; - __be32 wr_lo; - __be32 len; - __be32 flags; - __be32 sndseq; - __be32 param; -}; - -/* tx_data_wr.flags fields */ -#define S_TX_ACK_PAGES 21 -#define M_TX_ACK_PAGES 0x7 -#define V_TX_ACK_PAGES(x) ((x) << S_TX_ACK_PAGES) -#define G_TX_ACK_PAGES(x) (((x) >> S_TX_ACK_PAGES) & M_TX_ACK_PAGES) - -/* tx_data_wr.param fields */ -#define S_TX_PORT 0 -#define M_TX_PORT 0x7 -#define V_TX_PORT(x) ((x) << S_TX_PORT) -#define G_TX_PORT(x) (((x) >> S_TX_PORT) & M_TX_PORT) - -#define S_TX_MSS 4 -#define M_TX_MSS 0xF -#define V_TX_MSS(x) ((x) << S_TX_MSS) -#define G_TX_MSS(x) (((x) >> S_TX_MSS) & M_TX_MSS) - -#define S_TX_QOS 8 -#define M_TX_QOS 0xFF -#define V_TX_QOS(x) ((x) << S_TX_QOS) -#define G_TX_QOS(x) (((x) >> S_TX_QOS) & M_TX_QOS) - -#define S_TX_SNDBUF 16 -#define M_TX_SNDBUF 0xFFFF -#define V_TX_SNDBUF(x) ((x) << S_TX_SNDBUF) -#define G_TX_SNDBUF(x) (((x) >> S_TX_SNDBUF) & M_TX_SNDBUF) - -struct cpl_tx_data { - union opcode_tid ot; - __be32 len; - __be32 rsvd; - __be16 urg; - __be16 flags; -}; - -/* cpl_tx_data.flags fields */ -#define S_TX_ULP_SUBMODE 6 -#define M_TX_ULP_SUBMODE 0xF -#define V_TX_ULP_SUBMODE(x) ((x) << S_TX_ULP_SUBMODE) -#define G_TX_ULP_SUBMODE(x) (((x) >> S_TX_ULP_SUBMODE) & M_TX_ULP_SUBMODE) - -#define S_TX_ULP_MODE 10 -#define M_TX_ULP_MODE 0xF -#define V_TX_ULP_MODE(x) ((x) << S_TX_ULP_MODE) -#define G_TX_ULP_MODE(x) (((x) >> S_TX_ULP_MODE) & M_TX_ULP_MODE) - -#define S_TX_SHOVE 14 -#define V_TX_SHOVE(x) ((x) << S_TX_SHOVE) -#define F_TX_SHOVE V_TX_SHOVE(1U) - -#define S_TX_MORE 15 -#define V_TX_MORE(x) ((x) << S_TX_MORE) -#define F_TX_MORE V_TX_MORE(1U) - -/* additional tx_data_wr.flags fields */ -#define S_TX_CPU_IDX 0 -#define M_TX_CPU_IDX 0x3F -#define V_TX_CPU_IDX(x) ((x) << S_TX_CPU_IDX) -#define G_TX_CPU_IDX(x) (((x) >> S_TX_CPU_IDX) & M_TX_CPU_IDX) - -#define S_TX_URG 16 -#define V_TX_URG(x) ((x) << S_TX_URG) -#define F_TX_URG V_TX_URG(1U) - -#define S_TX_CLOSE 17 -#define V_TX_CLOSE(x) ((x) << S_TX_CLOSE) -#define F_TX_CLOSE V_TX_CLOSE(1U) - -#define S_TX_INIT 18 -#define V_TX_INIT(x) ((x) << S_TX_INIT) -#define F_TX_INIT V_TX_INIT(1U) - -#define S_TX_IMM_ACK 19 -#define V_TX_IMM_ACK(x) ((x) << S_TX_IMM_ACK) -#define F_TX_IMM_ACK V_TX_IMM_ACK(1U) - -#define S_TX_IMM_DMA 20 -#define V_TX_IMM_DMA(x) ((x) << S_TX_IMM_DMA) -#define F_TX_IMM_DMA V_TX_IMM_DMA(1U) - -struct cpl_tx_data_ack { - RSS_HDR union opcode_tid ot; - __be32 ack_seq; -}; - -struct cpl_wr_ack { - RSS_HDR union opcode_tid ot; - __be16 credits; - __be16 rsvd; - __be32 snd_nxt; - __be32 snd_una; -}; - -struct cpl_rdma_ec_status { - RSS_HDR union opcode_tid ot; - __u8 rsvd[3]; - __u8 status; -}; - -struct mngt_pktsched_wr { - __be32 wr_hi; - __be32 wr_lo; - __u8 mngt_opcode; - __u8 rsvd[7]; - __u8 sched; - __u8 idx; - __u8 min; - __u8 max; - __u8 binding; - __u8 rsvd1[3]; -}; - -struct cpl_iscsi_hdr { - RSS_HDR union opcode_tid ot; - __be16 pdu_len_ddp; - __be16 len; - __be32 seq; - __be16 urg; - __u8 rsvd; - __u8 status; -}; - -/* cpl_iscsi_hdr.pdu_len_ddp fields */ -#define S_ISCSI_PDU_LEN 0 -#define M_ISCSI_PDU_LEN 0x7FFF -#define V_ISCSI_PDU_LEN(x) ((x) << S_ISCSI_PDU_LEN) -#define G_ISCSI_PDU_LEN(x) (((x) >> S_ISCSI_PDU_LEN) & M_ISCSI_PDU_LEN) - -#define S_ISCSI_DDP 15 -#define V_ISCSI_DDP(x) ((x) << S_ISCSI_DDP) -#define F_ISCSI_DDP V_ISCSI_DDP(1U) - -struct cpl_rx_data { - RSS_HDR union opcode_tid ot; - __be16 rsvd; - __be16 len; - __be32 seq; - __be16 urg; -#if defined(__LITTLE_ENDIAN_BITFIELD) - __u8 dack_mode:2; - __u8 psh:1; - __u8 heartbeat:1; - __u8:4; -#else - __u8:4; - __u8 heartbeat:1; - __u8 psh:1; - __u8 dack_mode:2; -#endif - __u8 status; -}; - -struct cpl_rx_data_ack { - WR_HDR; - union opcode_tid ot; - __be32 credit_dack; -}; - -/* cpl_rx_data_ack.ack_seq fields */ -#define S_RX_CREDITS 0 -#define M_RX_CREDITS 0x7FFFFFF -#define V_RX_CREDITS(x) ((x) << S_RX_CREDITS) -#define G_RX_CREDITS(x) (((x) >> S_RX_CREDITS) & M_RX_CREDITS) - -#define S_RX_MODULATE 27 -#define V_RX_MODULATE(x) ((x) << S_RX_MODULATE) -#define F_RX_MODULATE V_RX_MODULATE(1U) - -#define S_RX_FORCE_ACK 28 -#define V_RX_FORCE_ACK(x) ((x) << S_RX_FORCE_ACK) -#define F_RX_FORCE_ACK V_RX_FORCE_ACK(1U) - -#define S_RX_DACK_MODE 29 -#define M_RX_DACK_MODE 0x3 -#define V_RX_DACK_MODE(x) ((x) << S_RX_DACK_MODE) -#define G_RX_DACK_MODE(x) (((x) >> S_RX_DACK_MODE) & M_RX_DACK_MODE) - -#define S_RX_DACK_CHANGE 31 -#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE) -#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U) - -struct cpl_rx_urg_notify { - RSS_HDR union opcode_tid ot; - __be32 seq; -}; - -struct cpl_rx_ddp_complete { - RSS_HDR union opcode_tid ot; - __be32 ddp_report; -}; - -struct cpl_rx_data_ddp { - RSS_HDR union opcode_tid ot; - __be16 urg; - __be16 len; - __be32 seq; - union { - __be32 nxt_seq; - __be32 ddp_report; - }; - __be32 ulp_crc; - __be32 ddpvld_status; -}; - -/* cpl_rx_data_ddp.ddpvld_status fields */ -#define S_DDP_STATUS 0 -#define M_DDP_STATUS 0xFF -#define V_DDP_STATUS(x) ((x) << S_DDP_STATUS) -#define G_DDP_STATUS(x) (((x) >> S_DDP_STATUS) & M_DDP_STATUS) - -#define S_DDP_VALID 15 -#define M_DDP_VALID 0x1FFFF -#define V_DDP_VALID(x) ((x) << S_DDP_VALID) -#define G_DDP_VALID(x) (((x) >> S_DDP_VALID) & M_DDP_VALID) - -#define S_DDP_PPOD_MISMATCH 15 -#define V_DDP_PPOD_MISMATCH(x) ((x) << S_DDP_PPOD_MISMATCH) -#define F_DDP_PPOD_MISMATCH V_DDP_PPOD_MISMATCH(1U) - -#define S_DDP_PDU 16 -#define V_DDP_PDU(x) ((x) << S_DDP_PDU) -#define F_DDP_PDU V_DDP_PDU(1U) - -#define S_DDP_LLIMIT_ERR 17 -#define V_DDP_LLIMIT_ERR(x) ((x) << S_DDP_LLIMIT_ERR) -#define F_DDP_LLIMIT_ERR V_DDP_LLIMIT_ERR(1U) - -#define S_DDP_PPOD_PARITY_ERR 18 -#define V_DDP_PPOD_PARITY_ERR(x) ((x) << S_DDP_PPOD_PARITY_ERR) -#define F_DDP_PPOD_PARITY_ERR V_DDP_PPOD_PARITY_ERR(1U) - -#define S_DDP_PADDING_ERR 19 -#define V_DDP_PADDING_ERR(x) ((x) << S_DDP_PADDING_ERR) -#define F_DDP_PADDING_ERR V_DDP_PADDING_ERR(1U) - -#define S_DDP_HDRCRC_ERR 20 -#define V_DDP_HDRCRC_ERR(x) ((x) << S_DDP_HDRCRC_ERR) -#define F_DDP_HDRCRC_ERR V_DDP_HDRCRC_ERR(1U) - -#define S_DDP_DATACRC_ERR 21 -#define V_DDP_DATACRC_ERR(x) ((x) << S_DDP_DATACRC_ERR) -#define F_DDP_DATACRC_ERR V_DDP_DATACRC_ERR(1U) - -#define S_DDP_INVALID_TAG 22 -#define V_DDP_INVALID_TAG(x) ((x) << S_DDP_INVALID_TAG) -#define F_DDP_INVALID_TAG V_DDP_INVALID_TAG(1U) - -#define S_DDP_ULIMIT_ERR 23 -#define V_DDP_ULIMIT_ERR(x) ((x) << S_DDP_ULIMIT_ERR) -#define F_DDP_ULIMIT_ERR V_DDP_ULIMIT_ERR(1U) - -#define S_DDP_OFFSET_ERR 24 -#define V_DDP_OFFSET_ERR(x) ((x) << S_DDP_OFFSET_ERR) -#define F_DDP_OFFSET_ERR V_DDP_OFFSET_ERR(1U) - -#define S_DDP_COLOR_ERR 25 -#define V_DDP_COLOR_ERR(x) ((x) << S_DDP_COLOR_ERR) -#define F_DDP_COLOR_ERR V_DDP_COLOR_ERR(1U) - -#define S_DDP_TID_MISMATCH 26 -#define V_DDP_TID_MISMATCH(x) ((x) << S_DDP_TID_MISMATCH) -#define F_DDP_TID_MISMATCH V_DDP_TID_MISMATCH(1U) - -#define S_DDP_INVALID_PPOD 27 -#define V_DDP_INVALID_PPOD(x) ((x) << S_DDP_INVALID_PPOD) -#define F_DDP_INVALID_PPOD V_DDP_INVALID_PPOD(1U) - -#define S_DDP_ULP_MODE 28 -#define M_DDP_ULP_MODE 0xF -#define V_DDP_ULP_MODE(x) ((x) << S_DDP_ULP_MODE) -#define G_DDP_ULP_MODE(x) (((x) >> S_DDP_ULP_MODE) & M_DDP_ULP_MODE) - -/* cpl_rx_data_ddp.ddp_report fields */ -#define S_DDP_OFFSET 0 -#define M_DDP_OFFSET 0x3FFFFF -#define V_DDP_OFFSET(x) ((x) << S_DDP_OFFSET) -#define G_DDP_OFFSET(x) (((x) >> S_DDP_OFFSET) & M_DDP_OFFSET) - -#define S_DDP_URG 24 -#define V_DDP_URG(x) ((x) << S_DDP_URG) -#define F_DDP_URG V_DDP_URG(1U) - -#define S_DDP_PSH 25 -#define V_DDP_PSH(x) ((x) << S_DDP_PSH) -#define F_DDP_PSH V_DDP_PSH(1U) - -#define S_DDP_BUF_COMPLETE 26 -#define V_DDP_BUF_COMPLETE(x) ((x) << S_DDP_BUF_COMPLETE) -#define F_DDP_BUF_COMPLETE V_DDP_BUF_COMPLETE(1U) - -#define S_DDP_BUF_TIMED_OUT 27 -#define V_DDP_BUF_TIMED_OUT(x) ((x) << S_DDP_BUF_TIMED_OUT) -#define F_DDP_BUF_TIMED_OUT V_DDP_BUF_TIMED_OUT(1U) - -#define S_DDP_BUF_IDX 28 -#define V_DDP_BUF_IDX(x) ((x) << S_DDP_BUF_IDX) -#define F_DDP_BUF_IDX V_DDP_BUF_IDX(1U) - -struct cpl_tx_pkt { - WR_HDR; - __be32 cntrl; - __be32 len; -}; - -struct cpl_tx_pkt_lso { - WR_HDR; - __be32 cntrl; - __be32 len; - - __be32 rsvd; - __be32 lso_info; -}; - -/* cpl_tx_pkt*.cntrl fields */ -#define S_TXPKT_VLAN 0 -#define M_TXPKT_VLAN 0xFFFF -#define V_TXPKT_VLAN(x) ((x) << S_TXPKT_VLAN) -#define G_TXPKT_VLAN(x) (((x) >> S_TXPKT_VLAN) & M_TXPKT_VLAN) - -#define S_TXPKT_INTF 16 -#define M_TXPKT_INTF 0xF -#define V_TXPKT_INTF(x) ((x) << S_TXPKT_INTF) -#define G_TXPKT_INTF(x) (((x) >> S_TXPKT_INTF) & M_TXPKT_INTF) - -#define S_TXPKT_IPCSUM_DIS 20 -#define V_TXPKT_IPCSUM_DIS(x) ((x) << S_TXPKT_IPCSUM_DIS) -#define F_TXPKT_IPCSUM_DIS V_TXPKT_IPCSUM_DIS(1U) - -#define S_TXPKT_L4CSUM_DIS 21 -#define V_TXPKT_L4CSUM_DIS(x) ((x) << S_TXPKT_L4CSUM_DIS) -#define F_TXPKT_L4CSUM_DIS V_TXPKT_L4CSUM_DIS(1U) - -#define S_TXPKT_VLAN_VLD 22 -#define V_TXPKT_VLAN_VLD(x) ((x) << S_TXPKT_VLAN_VLD) -#define F_TXPKT_VLAN_VLD V_TXPKT_VLAN_VLD(1U) - -#define S_TXPKT_LOOPBACK 23 -#define V_TXPKT_LOOPBACK(x) ((x) << S_TXPKT_LOOPBACK) -#define F_TXPKT_LOOPBACK V_TXPKT_LOOPBACK(1U) - -#define S_TXPKT_OPCODE 24 -#define M_TXPKT_OPCODE 0xFF -#define V_TXPKT_OPCODE(x) ((x) << S_TXPKT_OPCODE) -#define G_TXPKT_OPCODE(x) (((x) >> S_TXPKT_OPCODE) & M_TXPKT_OPCODE) - -/* cpl_tx_pkt_lso.lso_info fields */ -#define S_LSO_MSS 0 -#define M_LSO_MSS 0x3FFF -#define V_LSO_MSS(x) ((x) << S_LSO_MSS) -#define G_LSO_MSS(x) (((x) >> S_LSO_MSS) & M_LSO_MSS) - -#define S_LSO_ETH_TYPE 14 -#define M_LSO_ETH_TYPE 0x3 -#define V_LSO_ETH_TYPE(x) ((x) << S_LSO_ETH_TYPE) -#define G_LSO_ETH_TYPE(x) (((x) >> S_LSO_ETH_TYPE) & M_LSO_ETH_TYPE) - -#define S_LSO_TCPHDR_WORDS 16 -#define M_LSO_TCPHDR_WORDS 0xF -#define V_LSO_TCPHDR_WORDS(x) ((x) << S_LSO_TCPHDR_WORDS) -#define G_LSO_TCPHDR_WORDS(x) (((x) >> S_LSO_TCPHDR_WORDS) & M_LSO_TCPHDR_WORDS) - -#define S_LSO_IPHDR_WORDS 20 -#define M_LSO_IPHDR_WORDS 0xF -#define V_LSO_IPHDR_WORDS(x) ((x) << S_LSO_IPHDR_WORDS) -#define G_LSO_IPHDR_WORDS(x) (((x) >> S_LSO_IPHDR_WORDS) & M_LSO_IPHDR_WORDS) - -#define S_LSO_IPV6 24 -#define V_LSO_IPV6(x) ((x) << S_LSO_IPV6) -#define F_LSO_IPV6 V_LSO_IPV6(1U) - -struct cpl_trace_pkt { -#ifdef CHELSIO_FW - __u8 rss_opcode; -#if defined(__LITTLE_ENDIAN_BITFIELD) - __u8 err:1; - __u8:7; -#else - __u8:7; - __u8 err:1; -#endif - __u8 rsvd0; -#if defined(__LITTLE_ENDIAN_BITFIELD) - __u8 qid:4; - __u8:4; -#else - __u8:4; - __u8 qid:4; -#endif - __be32 tstamp; -#endif /* CHELSIO_FW */ - - __u8 opcode; -#if defined(__LITTLE_ENDIAN_BITFIELD) - __u8 iff:4; - __u8:4; -#else - __u8:4; - __u8 iff:4; -#endif - __u8 rsvd[4]; - __be16 len; -}; - -struct cpl_rx_pkt { - RSS_HDR __u8 opcode; -#if defined(__LITTLE_ENDIAN_BITFIELD) - __u8 iff:4; - __u8 csum_valid:1; - __u8 ipmi_pkt:1; - __u8 vlan_valid:1; - __u8 fragment:1; -#else - __u8 fragment:1; - __u8 vlan_valid:1; - __u8 ipmi_pkt:1; - __u8 csum_valid:1; - __u8 iff:4; -#endif - __be16 csum; - __be16 vlan; - __be16 len; -}; - -struct cpl_l2t_write_req { - WR_HDR; - union opcode_tid ot; - __be32 params; - __u8 rsvd[2]; - __u8 dst_mac[6]; -}; - -/* cpl_l2t_write_req.params fields */ -#define S_L2T_W_IDX 0 -#define M_L2T_W_IDX 0x7FF -#define V_L2T_W_IDX(x) ((x) << S_L2T_W_IDX) -#define G_L2T_W_IDX(x) (((x) >> S_L2T_W_IDX) & M_L2T_W_IDX) - -#define S_L2T_W_VLAN 11 -#define M_L2T_W_VLAN 0xFFF -#define V_L2T_W_VLAN(x) ((x) << S_L2T_W_VLAN) -#define G_L2T_W_VLAN(x) (((x) >> S_L2T_W_VLAN) & M_L2T_W_VLAN) - -#define S_L2T_W_IFF 23 -#define M_L2T_W_IFF 0xF -#define V_L2T_W_IFF(x) ((x) << S_L2T_W_IFF) -#define G_L2T_W_IFF(x) (((x) >> S_L2T_W_IFF) & M_L2T_W_IFF) - -#define S_L2T_W_PRIO 27 -#define M_L2T_W_PRIO 0x7 -#define V_L2T_W_PRIO(x) ((x) << S_L2T_W_PRIO) -#define G_L2T_W_PRIO(x) (((x) >> S_L2T_W_PRIO) & M_L2T_W_PRIO) - -struct cpl_l2t_write_rpl { - RSS_HDR union opcode_tid ot; - __u8 status; - __u8 rsvd[3]; -}; - -struct cpl_l2t_read_req { - WR_HDR; - union opcode_tid ot; - __be16 rsvd; - __be16 l2t_idx; -}; - -struct cpl_l2t_read_rpl { - RSS_HDR union opcode_tid ot; - __be32 params; - __u8 rsvd[2]; - __u8 dst_mac[6]; -}; - -/* cpl_l2t_read_rpl.params fields */ -#define S_L2T_R_PRIO 0 -#define M_L2T_R_PRIO 0x7 -#define V_L2T_R_PRIO(x) ((x) << S_L2T_R_PRIO) -#define G_L2T_R_PRIO(x) (((x) >> S_L2T_R_PRIO) & M_L2T_R_PRIO) - -#define S_L2T_R_VLAN 8 -#define M_L2T_R_VLAN 0xFFF -#define V_L2T_R_VLAN(x) ((x) << S_L2T_R_VLAN) -#define G_L2T_R_VLAN(x) (((x) >> S_L2T_R_VLAN) & M_L2T_R_VLAN) - -#define S_L2T_R_IFF 20 -#define M_L2T_R_IFF 0xF -#define V_L2T_R_IFF(x) ((x) << S_L2T_R_IFF) -#define G_L2T_R_IFF(x) (((x) >> S_L2T_R_IFF) & M_L2T_R_IFF) - -#define S_L2T_STATUS 24 -#define M_L2T_STATUS 0xFF -#define V_L2T_STATUS(x) ((x) << S_L2T_STATUS) -#define G_L2T_STATUS(x) (((x) >> S_L2T_STATUS) & M_L2T_STATUS) - -struct cpl_smt_write_req { - WR_HDR; - union opcode_tid ot; - __u8 rsvd0; -#if defined(__LITTLE_ENDIAN_BITFIELD) - __u8 mtu_idx:4; - __u8 iff:4; -#else - __u8 iff:4; - __u8 mtu_idx:4; -#endif - __be16 rsvd2; - __be16 rsvd3; - __u8 src_mac1[6]; - __be16 rsvd4; - __u8 src_mac0[6]; -}; - -struct cpl_smt_write_rpl { - RSS_HDR union opcode_tid ot; - __u8 status; - __u8 rsvd[3]; -}; - -struct cpl_smt_read_req { - WR_HDR; - union opcode_tid ot; - __u8 rsvd0; -#if defined(__LITTLE_ENDIAN_BITFIELD) - __u8:4; - __u8 iff:4; -#else - __u8 iff:4; - __u8:4; -#endif - __be16 rsvd2; -}; - -struct cpl_smt_read_rpl { - RSS_HDR union opcode_tid ot; - __u8 status; -#if defined(__LITTLE_ENDIAN_BITFIELD) - __u8 mtu_idx:4; - __u8:4; -#else - __u8:4; - __u8 mtu_idx:4; -#endif - __be16 rsvd2; - __be16 rsvd3; - __u8 src_mac1[6]; - __be16 rsvd4; - __u8 src_mac0[6]; -}; - -struct cpl_rte_delete_req { - WR_HDR; - union opcode_tid ot; - __be32 params; -}; - -/* { cpl_rte_delete_req, cpl_rte_read_req }.params fields */ -#define S_RTE_REQ_LUT_IX 8 -#define M_RTE_REQ_LUT_IX 0x7FF -#define V_RTE_REQ_LUT_IX(x) ((x) << S_RTE_REQ_LUT_IX) -#define G_RTE_REQ_LUT_IX(x) (((x) >> S_RTE_REQ_LUT_IX) & M_RTE_REQ_LUT_IX) - -#define S_RTE_REQ_LUT_BASE 19 -#define M_RTE_REQ_LUT_BASE 0x7FF -#define V_RTE_REQ_LUT_BASE(x) ((x) << S_RTE_REQ_LUT_BASE) -#define G_RTE_REQ_LUT_BASE(x) (((x) >> S_RTE_REQ_LUT_BASE) & M_RTE_REQ_LUT_BASE) - -#define S_RTE_READ_REQ_SELECT 31 -#define V_RTE_READ_REQ_SELECT(x) ((x) << S_RTE_READ_REQ_SELECT) -#define F_RTE_READ_REQ_SELECT V_RTE_READ_REQ_SELECT(1U) - -struct cpl_rte_delete_rpl { - RSS_HDR union opcode_tid ot; - __u8 status; - __u8 rsvd[3]; -}; - -struct cpl_rte_write_req { - WR_HDR; - union opcode_tid ot; -#if defined(__LITTLE_ENDIAN_BITFIELD) - __u8:6; - __u8 write_tcam:1; - __u8 write_l2t_lut:1; -#else - __u8 write_l2t_lut:1; - __u8 write_tcam:1; - __u8:6; -#endif - __u8 rsvd[3]; - __be32 lut_params; - __be16 rsvd2; - __be16 l2t_idx; - __be32 netmask; - __be32 faddr; -}; - -/* cpl_rte_write_req.lut_params fields */ -#define S_RTE_WRITE_REQ_LUT_IX 10 -#define M_RTE_WRITE_REQ_LUT_IX 0x7FF -#define V_RTE_WRITE_REQ_LUT_IX(x) ((x) << S_RTE_WRITE_REQ_LUT_IX) -#define G_RTE_WRITE_REQ_LUT_IX(x) (((x) >> S_RTE_WRITE_REQ_LUT_IX) & M_RTE_WRITE_REQ_LUT_IX) - -#define S_RTE_WRITE_REQ_LUT_BASE 21 -#define M_RTE_WRITE_REQ_LUT_BASE 0x7FF -#define V_RTE_WRITE_REQ_LUT_BASE(x) ((x) << S_RTE_WRITE_REQ_LUT_BASE) -#define G_RTE_WRITE_REQ_LUT_BASE(x) (((x) >> S_RTE_WRITE_REQ_LUT_BASE) & M_RTE_WRITE_REQ_LUT_BASE) - -struct cpl_rte_write_rpl { - RSS_HDR union opcode_tid ot; - __u8 status; - __u8 rsvd[3]; -}; - -struct cpl_rte_read_req { - WR_HDR; - union opcode_tid ot; - __be32 params; -}; - -struct cpl_rte_read_rpl { - RSS_HDR union opcode_tid ot; - __u8 status; - __u8 rsvd0; - __be16 l2t_idx; -#if defined(__LITTLE_ENDIAN_BITFIELD) - __u8:7; - __u8 select:1; -#else - __u8 select:1; - __u8:7; -#endif - __u8 rsvd2[3]; - __be32 addr; -}; - -struct cpl_tid_release { - WR_HDR; - union opcode_tid ot; - __be32 rsvd; -}; - -struct cpl_barrier { - WR_HDR; - __u8 opcode; - __u8 rsvd[7]; -}; - -struct cpl_rdma_read_req { - __u8 opcode; - __u8 rsvd[15]; -}; - -struct cpl_rdma_terminate { -#ifdef CHELSIO_FW - __u8 opcode; - __u8 rsvd[2]; -#if defined(__LITTLE_ENDIAN_BITFIELD) - __u8 rspq:3; - __u8:5; -#else - __u8:5; - __u8 rspq:3; -#endif - __be32 tid_len; -#endif - __be32 msn; - __be32 mo; - __u8 data[0]; -}; - -/* cpl_rdma_terminate.tid_len fields */ -#define S_FLIT_CNT 0 -#define M_FLIT_CNT 0xFF -#define V_FLIT_CNT(x) ((x) << S_FLIT_CNT) -#define G_FLIT_CNT(x) (((x) >> S_FLIT_CNT) & M_FLIT_CNT) - -#define S_TERM_TID 8 -#define M_TERM_TID 0xFFFFF -#define V_TERM_TID(x) ((x) << S_TERM_TID) -#define G_TERM_TID(x) (((x) >> S_TERM_TID) & M_TERM_TID) - -/* ULP_TX opcodes */ -enum { ULP_MEM_READ = 2, ULP_MEM_WRITE = 3, ULP_TXPKT = 4 }; - -#define S_ULPTX_CMD 28 -#define M_ULPTX_CMD 0xF -#define V_ULPTX_CMD(x) ((x) << S_ULPTX_CMD) - -#define S_ULPTX_NFLITS 0 -#define M_ULPTX_NFLITS 0xFF -#define V_ULPTX_NFLITS(x) ((x) << S_ULPTX_NFLITS) - -struct ulp_mem_io { - WR_HDR; - __be32 cmd_lock_addr; - __be32 len; -}; - -/* ulp_mem_io.cmd_lock_addr fields */ -#define S_ULP_MEMIO_ADDR 0 -#define M_ULP_MEMIO_ADDR 0x7FFFFFF -#define V_ULP_MEMIO_ADDR(x) ((x) << S_ULP_MEMIO_ADDR) -#define S_ULP_MEMIO_LOCK 27 -#define V_ULP_MEMIO_LOCK(x) ((x) << S_ULP_MEMIO_LOCK) -#define F_ULP_MEMIO_LOCK V_ULP_MEMIO_LOCK(1U) - -/* ulp_mem_io.len fields */ -#define S_ULP_MEMIO_DATA_LEN 28 -#define M_ULP_MEMIO_DATA_LEN 0xF -#define V_ULP_MEMIO_DATA_LEN(x) ((x) << S_ULP_MEMIO_DATA_LEN) - -#endif /* T3_CPL_H */ diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c deleted file mode 100644 index 44ac2f4..0000000 --- a/drivers/net/cxgb3/t3_hw.c +++ /dev/null @@ -1,3785 +0,0 @@ -/* - * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "common.h" -#include "regs.h" -#include "sge_defs.h" -#include "firmware_exports.h" - -static void t3_port_intr_clear(struct adapter *adapter, int idx); - -/** - * t3_wait_op_done_val - wait until an operation is completed - * @adapter: the adapter performing the operation - * @reg: the register to check for completion - * @mask: a single-bit field within @reg that indicates completion - * @polarity: the value of the field when the operation is completed - * @attempts: number of check iterations - * @delay: delay in usecs between iterations - * @valp: where to store the value of the register at completion time - * - * Wait until an operation is completed by checking a bit in a register - * up to @attempts times. If @valp is not NULL the value of the register - * at the time it indicated completion is stored there. Returns 0 if the - * operation completes and -EAGAIN otherwise. - */ - -int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, - int polarity, int attempts, int delay, u32 *valp) -{ - while (1) { - u32 val = t3_read_reg(adapter, reg); - - if (!!(val & mask) == polarity) { - if (valp) - *valp = val; - return 0; - } - if (--attempts == 0) - return -EAGAIN; - if (delay) - udelay(delay); - } -} - -/** - * t3_write_regs - write a bunch of registers - * @adapter: the adapter to program - * @p: an array of register address/register value pairs - * @n: the number of address/value pairs - * @offset: register address offset - * - * Takes an array of register address/register value pairs and writes each - * value to the corresponding register. Register addresses are adjusted - * by the supplied offset. - */ -void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p, - int n, unsigned int offset) -{ - while (n--) { - t3_write_reg(adapter, p->reg_addr + offset, p->val); - p++; - } -} - -/** - * t3_set_reg_field - set a register field to a value - * @adapter: the adapter to program - * @addr: the register address - * @mask: specifies the portion of the register to modify - * @val: the new value for the register field - * - * Sets a register field specified by the supplied mask to the - * given value. - */ -void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, - u32 val) -{ - u32 v = t3_read_reg(adapter, addr) & ~mask; - - t3_write_reg(adapter, addr, v | val); - t3_read_reg(adapter, addr); /* flush */ -} - -/** - * t3_read_indirect - read indirectly addressed registers - * @adap: the adapter - * @addr_reg: register holding the indirect address - * @data_reg: register holding the value of the indirect register - * @vals: where the read register values are stored - * @start_idx: index of first indirect register to read - * @nregs: how many indirect registers to read - * - * Reads registers that are accessed indirectly through an address/data - * register pair. - */ -static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg, - unsigned int data_reg, u32 *vals, - unsigned int nregs, unsigned int start_idx) -{ - while (nregs--) { - t3_write_reg(adap, addr_reg, start_idx); - *vals++ = t3_read_reg(adap, data_reg); - start_idx++; - } -} - -/** - * t3_mc7_bd_read - read from MC7 through backdoor accesses - * @mc7: identifies MC7 to read from - * @start: index of first 64-bit word to read - * @n: number of 64-bit words to read - * @buf: where to store the read result - * - * Read n 64-bit words from MC7 starting at word start, using backdoor - * accesses. - */ -int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n, - u64 *buf) -{ - static const int shift[] = { 0, 0, 16, 24 }; - static const int step[] = { 0, 32, 16, 8 }; - - unsigned int size64 = mc7->size / 8; /* # of 64-bit words */ - struct adapter *adap = mc7->adapter; - - if (start >= size64 || start + n > size64) - return -EINVAL; - - start *= (8 << mc7->width); - while (n--) { - int i; - u64 val64 = 0; - - for (i = (1 << mc7->width) - 1; i >= 0; --i) { - int attempts = 10; - u32 val; - - t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start); - t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0); - val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP); - while ((val & F_BUSY) && attempts--) - val = t3_read_reg(adap, - mc7->offset + A_MC7_BD_OP); - if (val & F_BUSY) - return -EIO; - - val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1); - if (mc7->width == 0) { - val64 = t3_read_reg(adap, - mc7->offset + - A_MC7_BD_DATA0); - val64 |= (u64) val << 32; - } else { - if (mc7->width > 1) - val >>= shift[mc7->width]; - val64 |= (u64) val << (step[mc7->width] * i); - } - start += 8; - } - *buf++ = val64; - } - return 0; -} - -/* - * Initialize MI1. - */ -static void mi1_init(struct adapter *adap, const struct adapter_info *ai) -{ - u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1; - u32 val = F_PREEN | V_CLKDIV(clkdiv); - - t3_write_reg(adap, A_MI1_CFG, val); -} - -#define MDIO_ATTEMPTS 20 - -/* - * MI1 read/write operations for clause 22 PHYs. - */ -static int t3_mi1_read(struct net_device *dev, int phy_addr, int mmd_addr, - u16 reg_addr) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - int ret; - u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr); - - mutex_lock(&adapter->mdio_lock); - t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1)); - t3_write_reg(adapter, A_MI1_ADDR, addr); - t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2)); - ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10); - if (!ret) - ret = t3_read_reg(adapter, A_MI1_DATA); - mutex_unlock(&adapter->mdio_lock); - return ret; -} - -static int t3_mi1_write(struct net_device *dev, int phy_addr, int mmd_addr, - u16 reg_addr, u16 val) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - int ret; - u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr); - - mutex_lock(&adapter->mdio_lock); - t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1)); - t3_write_reg(adapter, A_MI1_ADDR, addr); - t3_write_reg(adapter, A_MI1_DATA, val); - t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1)); - ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10); - mutex_unlock(&adapter->mdio_lock); - return ret; -} - -static const struct mdio_ops mi1_mdio_ops = { - .read = t3_mi1_read, - .write = t3_mi1_write, - .mode_support = MDIO_SUPPORTS_C22 -}; - -/* - * Performs the address cycle for clause 45 PHYs. - * Must be called with the MDIO_LOCK held. - */ -static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr, - int reg_addr) -{ - u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr); - - t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0); - t3_write_reg(adapter, A_MI1_ADDR, addr); - t3_write_reg(adapter, A_MI1_DATA, reg_addr); - t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0)); - return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, - MDIO_ATTEMPTS, 10); -} - -/* - * MI1 read/write operations for indirect-addressed PHYs. - */ -static int mi1_ext_read(struct net_device *dev, int phy_addr, int mmd_addr, - u16 reg_addr) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - int ret; - - mutex_lock(&adapter->mdio_lock); - ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr); - if (!ret) { - t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3)); - ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, - MDIO_ATTEMPTS, 10); - if (!ret) - ret = t3_read_reg(adapter, A_MI1_DATA); - } - mutex_unlock(&adapter->mdio_lock); - return ret; -} - -static int mi1_ext_write(struct net_device *dev, int phy_addr, int mmd_addr, - u16 reg_addr, u16 val) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - int ret; - - mutex_lock(&adapter->mdio_lock); - ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr); - if (!ret) { - t3_write_reg(adapter, A_MI1_DATA, val); - t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1)); - ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, - MDIO_ATTEMPTS, 10); - } - mutex_unlock(&adapter->mdio_lock); - return ret; -} - -static const struct mdio_ops mi1_mdio_ext_ops = { - .read = mi1_ext_read, - .write = mi1_ext_write, - .mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22 -}; - -/** - * t3_mdio_change_bits - modify the value of a PHY register - * @phy: the PHY to operate on - * @mmd: the device address - * @reg: the register address - * @clear: what part of the register value to mask off - * @set: what part of the register value to set - * - * Changes the value of a PHY register by applying a mask to its current - * value and ORing the result with a new value. - */ -int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear, - unsigned int set) -{ - int ret; - unsigned int val; - - ret = t3_mdio_read(phy, mmd, reg, &val); - if (!ret) { - val &= ~clear; - ret = t3_mdio_write(phy, mmd, reg, val | set); - } - return ret; -} - -/** - * t3_phy_reset - reset a PHY block - * @phy: the PHY to operate on - * @mmd: the device address of the PHY block to reset - * @wait: how long to wait for the reset to complete in 1ms increments - * - * Resets a PHY block and optionally waits for the reset to complete. - * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset - * for 10G PHYs. - */ -int t3_phy_reset(struct cphy *phy, int mmd, int wait) -{ - int err; - unsigned int ctl; - - err = t3_mdio_change_bits(phy, mmd, MDIO_CTRL1, MDIO_CTRL1_LPOWER, - MDIO_CTRL1_RESET); - if (err || !wait) - return err; - - do { - err = t3_mdio_read(phy, mmd, MDIO_CTRL1, &ctl); - if (err) - return err; - ctl &= MDIO_CTRL1_RESET; - if (ctl) - msleep(1); - } while (ctl && --wait); - - return ctl ? -1 : 0; -} - -/** - * t3_phy_advertise - set the PHY advertisement registers for autoneg - * @phy: the PHY to operate on - * @advert: bitmap of capabilities the PHY should advertise - * - * Sets a 10/100/1000 PHY's advertisement registers to advertise the - * requested capabilities. - */ -int t3_phy_advertise(struct cphy *phy, unsigned int advert) -{ - int err; - unsigned int val = 0; - - err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_CTRL1000, &val); - if (err) - return err; - - val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL); - if (advert & ADVERTISED_1000baseT_Half) - val |= ADVERTISE_1000HALF; - if (advert & ADVERTISED_1000baseT_Full) - val |= ADVERTISE_1000FULL; - - err = t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_CTRL1000, val); - if (err) - return err; - - val = 1; - if (advert & ADVERTISED_10baseT_Half) - val |= ADVERTISE_10HALF; - if (advert & ADVERTISED_10baseT_Full) - val |= ADVERTISE_10FULL; - if (advert & ADVERTISED_100baseT_Half) - val |= ADVERTISE_100HALF; - if (advert & ADVERTISED_100baseT_Full) - val |= ADVERTISE_100FULL; - if (advert & ADVERTISED_Pause) - val |= ADVERTISE_PAUSE_CAP; - if (advert & ADVERTISED_Asym_Pause) - val |= ADVERTISE_PAUSE_ASYM; - return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val); -} - -/** - * t3_phy_advertise_fiber - set fiber PHY advertisement register - * @phy: the PHY to operate on - * @advert: bitmap of capabilities the PHY should advertise - * - * Sets a fiber PHY's advertisement register to advertise the - * requested capabilities. - */ -int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert) -{ - unsigned int val = 0; - - if (advert & ADVERTISED_1000baseT_Half) - val |= ADVERTISE_1000XHALF; - if (advert & ADVERTISED_1000baseT_Full) - val |= ADVERTISE_1000XFULL; - if (advert & ADVERTISED_Pause) - val |= ADVERTISE_1000XPAUSE; - if (advert & ADVERTISED_Asym_Pause) - val |= ADVERTISE_1000XPSE_ASYM; - return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val); -} - -/** - * t3_set_phy_speed_duplex - force PHY speed and duplex - * @phy: the PHY to operate on - * @speed: requested PHY speed - * @duplex: requested PHY duplex - * - * Force a 10/100/1000 PHY's speed and duplex. This also disables - * auto-negotiation except for GigE, where auto-negotiation is mandatory. - */ -int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex) -{ - int err; - unsigned int ctl; - - err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_BMCR, &ctl); - if (err) - return err; - - if (speed >= 0) { - ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE); - if (speed == SPEED_100) - ctl |= BMCR_SPEED100; - else if (speed == SPEED_1000) - ctl |= BMCR_SPEED1000; - } - if (duplex >= 0) { - ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE); - if (duplex == DUPLEX_FULL) - ctl |= BMCR_FULLDPLX; - } - if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */ - ctl |= BMCR_ANENABLE; - return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_BMCR, ctl); -} - -int t3_phy_lasi_intr_enable(struct cphy *phy) -{ - return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, - MDIO_PMA_LASI_LSALARM); -} - -int t3_phy_lasi_intr_disable(struct cphy *phy) -{ - return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0); -} - -int t3_phy_lasi_intr_clear(struct cphy *phy) -{ - u32 val; - - return t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val); -} - -int t3_phy_lasi_intr_handler(struct cphy *phy) -{ - unsigned int status; - int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, - &status); - - if (err) - return err; - return (status & MDIO_PMA_LASI_LSALARM) ? cphy_cause_link_change : 0; -} - -static const struct adapter_info t3_adap_info[] = { - {1, 1, 0, - F_GPIO2_OEN | F_GPIO4_OEN | - F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0, - &mi1_mdio_ops, "Chelsio PE9000"}, - {1, 1, 0, - F_GPIO2_OEN | F_GPIO4_OEN | - F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0, - &mi1_mdio_ops, "Chelsio T302"}, - {1, 0, 0, - F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN | - F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, - { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, - &mi1_mdio_ext_ops, "Chelsio T310"}, - {1, 1, 0, - F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN | - F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL | - F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, - { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, - &mi1_mdio_ext_ops, "Chelsio T320"}, - {}, - {}, - {1, 0, 0, - F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN | - F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, - { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, - &mi1_mdio_ext_ops, "Chelsio T310" }, - {1, 0, 0, - F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | - F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL, - { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, - &mi1_mdio_ext_ops, "Chelsio N320E-G2" }, -}; - -/* - * Return the adapter_info structure with a given index. Out-of-range indices - * return NULL. - */ -const struct adapter_info *t3_get_adapter_info(unsigned int id) -{ - return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL; -} - -struct port_type_info { - int (*phy_prep)(struct cphy *phy, struct adapter *adapter, - int phy_addr, const struct mdio_ops *ops); -}; - -static const struct port_type_info port_types[] = { - { NULL }, - { t3_ael1002_phy_prep }, - { t3_vsc8211_phy_prep }, - { NULL}, - { t3_xaui_direct_phy_prep }, - { t3_ael2005_phy_prep }, - { t3_qt2045_phy_prep }, - { t3_ael1006_phy_prep }, - { NULL }, - { t3_aq100x_phy_prep }, - { t3_ael2020_phy_prep }, -}; - -#define VPD_ENTRY(name, len) \ - u8 name##_kword[2]; u8 name##_len; u8 name##_data[len] - -/* - * Partial EEPROM Vital Product Data structure. Includes only the ID and - * VPD-R sections. - */ -struct t3_vpd { - u8 id_tag; - u8 id_len[2]; - u8 id_data[16]; - u8 vpdr_tag; - u8 vpdr_len[2]; - VPD_ENTRY(pn, 16); /* part number */ - VPD_ENTRY(ec, 16); /* EC level */ - VPD_ENTRY(sn, SERNUM_LEN); /* serial number */ - VPD_ENTRY(na, 12); /* MAC address base */ - VPD_ENTRY(cclk, 6); /* core clock */ - VPD_ENTRY(mclk, 6); /* mem clock */ - VPD_ENTRY(uclk, 6); /* uP clk */ - VPD_ENTRY(mdc, 6); /* MDIO clk */ - VPD_ENTRY(mt, 2); /* mem timing */ - VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */ - VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */ - VPD_ENTRY(port0, 2); /* PHY0 complex */ - VPD_ENTRY(port1, 2); /* PHY1 complex */ - VPD_ENTRY(port2, 2); /* PHY2 complex */ - VPD_ENTRY(port3, 2); /* PHY3 complex */ - VPD_ENTRY(rv, 1); /* csum */ - u32 pad; /* for multiple-of-4 sizing and alignment */ -}; - -#define EEPROM_MAX_POLL 40 -#define EEPROM_STAT_ADDR 0x4000 -#define VPD_BASE 0xc00 - -/** - * t3_seeprom_read - read a VPD EEPROM location - * @adapter: adapter to read - * @addr: EEPROM address - * @data: where to store the read data - * - * Read a 32-bit word from a location in VPD EEPROM using the card's PCI - * VPD ROM capability. A zero is written to the flag bit when the - * address is written to the control register. The hardware device will - * set the flag to 1 when 4 bytes have been read into the data register. - */ -int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data) -{ - u16 val; - int attempts = EEPROM_MAX_POLL; - u32 v; - unsigned int base = adapter->params.pci.vpd_cap_addr; - - if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3)) - return -EINVAL; - - pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr); - do { - udelay(10); - pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val); - } while (!(val & PCI_VPD_ADDR_F) && --attempts); - - if (!(val & PCI_VPD_ADDR_F)) { - CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr); - return -EIO; - } - pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v); - *data = cpu_to_le32(v); - return 0; -} - -/** - * t3_seeprom_write - write a VPD EEPROM location - * @adapter: adapter to write - * @addr: EEPROM address - * @data: value to write - * - * Write a 32-bit word to a location in VPD EEPROM using the card's PCI - * VPD ROM capability. - */ -int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data) -{ - u16 val; - int attempts = EEPROM_MAX_POLL; - unsigned int base = adapter->params.pci.vpd_cap_addr; - - if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3)) - return -EINVAL; - - pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA, - le32_to_cpu(data)); - pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR, - addr | PCI_VPD_ADDR_F); - do { - msleep(1); - pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val); - } while ((val & PCI_VPD_ADDR_F) && --attempts); - - if (val & PCI_VPD_ADDR_F) { - CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr); - return -EIO; - } - return 0; -} - -/** - * t3_seeprom_wp - enable/disable EEPROM write protection - * @adapter: the adapter - * @enable: 1 to enable write protection, 0 to disable it - * - * Enables or disables write protection on the serial EEPROM. - */ -int t3_seeprom_wp(struct adapter *adapter, int enable) -{ - return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); -} - -/** - * get_vpd_params - read VPD parameters from VPD EEPROM - * @adapter: adapter to read - * @p: where to store the parameters - * - * Reads card parameters stored in VPD EEPROM. - */ -static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) -{ - int i, addr, ret; - struct t3_vpd vpd; - - /* - * Card information is normally at VPD_BASE but some early cards had - * it at 0. - */ - ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd); - if (ret) - return ret; - addr = vpd.id_tag == 0x82 ? VPD_BASE : 0; - - for (i = 0; i < sizeof(vpd); i += 4) { - ret = t3_seeprom_read(adapter, addr + i, - (__le32 *)((u8 *)&vpd + i)); - if (ret) - return ret; - } - - p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10); - p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10); - p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10); - p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10); - p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10); - memcpy(p->sn, vpd.sn_data, SERNUM_LEN); - - /* Old eeproms didn't have port information */ - if (adapter->params.rev == 0 && !vpd.port0_data[0]) { - p->port_type[0] = uses_xaui(adapter) ? 1 : 2; - p->port_type[1] = uses_xaui(adapter) ? 6 : 2; - } else { - p->port_type[0] = hex_to_bin(vpd.port0_data[0]); - p->port_type[1] = hex_to_bin(vpd.port1_data[0]); - p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16); - p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16); - } - - for (i = 0; i < 6; i++) - p->eth_base[i] = hex_to_bin(vpd.na_data[2 * i]) * 16 + - hex_to_bin(vpd.na_data[2 * i + 1]); - return 0; -} - -/* serial flash and firmware constants */ -enum { - SF_ATTEMPTS = 5, /* max retries for SF1 operations */ - SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */ - SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */ - - /* flash command opcodes */ - SF_PROG_PAGE = 2, /* program page */ - SF_WR_DISABLE = 4, /* disable writes */ - SF_RD_STATUS = 5, /* read status register */ - SF_WR_ENABLE = 6, /* enable writes */ - SF_RD_DATA_FAST = 0xb, /* read flash */ - SF_ERASE_SECTOR = 0xd8, /* erase sector */ - - FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */ - FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */ - FW_MIN_SIZE = 8 /* at least version and csum */ -}; - -/** - * sf1_read - read data from the serial flash - * @adapter: the adapter - * @byte_cnt: number of bytes to read - * @cont: whether another operation will be chained - * @valp: where to store the read data - * - * Reads up to 4 bytes of data from the serial flash. The location of - * the read needs to be specified prior to calling this by issuing the - * appropriate commands to the serial flash. - */ -static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, - u32 *valp) -{ - int ret; - - if (!byte_cnt || byte_cnt > 4) - return -EINVAL; - if (t3_read_reg(adapter, A_SF_OP) & F_BUSY) - return -EBUSY; - t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1)); - ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10); - if (!ret) - *valp = t3_read_reg(adapter, A_SF_DATA); - return ret; -} - -/** - * sf1_write - write data to the serial flash - * @adapter: the adapter - * @byte_cnt: number of bytes to write - * @cont: whether another operation will be chained - * @val: value to write - * - * Writes up to 4 bytes of data to the serial flash. The location of - * the write needs to be specified prior to calling this by issuing the - * appropriate commands to the serial flash. - */ -static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, - u32 val) -{ - if (!byte_cnt || byte_cnt > 4) - return -EINVAL; - if (t3_read_reg(adapter, A_SF_OP) & F_BUSY) - return -EBUSY; - t3_write_reg(adapter, A_SF_DATA, val); - t3_write_reg(adapter, A_SF_OP, - V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1)); - return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10); -} - -/** - * flash_wait_op - wait for a flash operation to complete - * @adapter: the adapter - * @attempts: max number of polls of the status register - * @delay: delay between polls in ms - * - * Wait for a flash operation to complete by polling the status register. - */ -static int flash_wait_op(struct adapter *adapter, int attempts, int delay) -{ - int ret; - u32 status; - - while (1) { - if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 || - (ret = sf1_read(adapter, 1, 0, &status)) != 0) - return ret; - if (!(status & 1)) - return 0; - if (--attempts == 0) - return -EAGAIN; - if (delay) - msleep(delay); - } -} - -/** - * t3_read_flash - read words from serial flash - * @adapter: the adapter - * @addr: the start address for the read - * @nwords: how many 32-bit words to read - * @data: where to store the read data - * @byte_oriented: whether to store data as bytes or as words - * - * Read the specified number of 32-bit words from the serial flash. - * If @byte_oriented is set the read data is stored as a byte array - * (i.e., big-endian), otherwise as 32-bit words in the platform's - * natural endianess. - */ -static int t3_read_flash(struct adapter *adapter, unsigned int addr, - unsigned int nwords, u32 *data, int byte_oriented) -{ - int ret; - - if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3)) - return -EINVAL; - - addr = swab32(addr) | SF_RD_DATA_FAST; - - if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 || - (ret = sf1_read(adapter, 1, 1, data)) != 0) - return ret; - - for (; nwords; nwords--, data++) { - ret = sf1_read(adapter, 4, nwords > 1, data); - if (ret) - return ret; - if (byte_oriented) - *data = htonl(*data); - } - return 0; -} - -/** - * t3_write_flash - write up to a page of data to the serial flash - * @adapter: the adapter - * @addr: the start address to write - * @n: length of data to write - * @data: the data to write - * - * Writes up to a page of data (256 bytes) to the serial flash starting - * at the given address. - */ -static int t3_write_flash(struct adapter *adapter, unsigned int addr, - unsigned int n, const u8 *data) -{ - int ret; - u32 buf[64]; - unsigned int i, c, left, val, offset = addr & 0xff; - - if (addr + n > SF_SIZE || offset + n > 256) - return -EINVAL; - - val = swab32(addr) | SF_PROG_PAGE; - - if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 || - (ret = sf1_write(adapter, 4, 1, val)) != 0) - return ret; - - for (left = n; left; left -= c) { - c = min(left, 4U); - for (val = 0, i = 0; i < c; ++i) - val = (val << 8) + *data++; - - ret = sf1_write(adapter, c, c != left, val); - if (ret) - return ret; - } - if ((ret = flash_wait_op(adapter, 5, 1)) != 0) - return ret; - - /* Read the page to verify the write succeeded */ - ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); - if (ret) - return ret; - - if (memcmp(data - n, (u8 *) buf + offset, n)) - return -EIO; - return 0; -} - -/** - * t3_get_tp_version - read the tp sram version - * @adapter: the adapter - * @vers: where to place the version - * - * Reads the protocol sram version from sram. - */ -int t3_get_tp_version(struct adapter *adapter, u32 *vers) -{ - int ret; - - /* Get version loaded in SRAM */ - t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0); - ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0, - 1, 1, 5, 1); - if (ret) - return ret; - - *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1); - - return 0; -} - -/** - * t3_check_tpsram_version - read the tp sram version - * @adapter: the adapter - * - * Reads the protocol sram version from flash. - */ -int t3_check_tpsram_version(struct adapter *adapter) -{ - int ret; - u32 vers; - unsigned int major, minor; - - if (adapter->params.rev == T3_REV_A) - return 0; - - - ret = t3_get_tp_version(adapter, &vers); - if (ret) - return ret; - - major = G_TP_VERSION_MAJOR(vers); - minor = G_TP_VERSION_MINOR(vers); - - if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR) - return 0; - else { - CH_ERR(adapter, "found wrong TP version (%u.%u), " - "driver compiled for version %d.%d\n", major, minor, - TP_VERSION_MAJOR, TP_VERSION_MINOR); - } - return -EINVAL; -} - -/** - * t3_check_tpsram - check if provided protocol SRAM - * is compatible with this driver - * @adapter: the adapter - * @tp_sram: the firmware image to write - * @size: image size - * - * Checks if an adapter's tp sram is compatible with the driver. - * Returns 0 if the versions are compatible, a negative error otherwise. - */ -int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram, - unsigned int size) -{ - u32 csum; - unsigned int i; - const __be32 *p = (const __be32 *)tp_sram; - - /* Verify checksum */ - for (csum = 0, i = 0; i < size / sizeof(csum); i++) - csum += ntohl(p[i]); - if (csum != 0xffffffff) { - CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n", - csum); - return -EINVAL; - } - - return 0; -} - -enum fw_version_type { - FW_VERSION_N3, - FW_VERSION_T3 -}; - -/** - * t3_get_fw_version - read the firmware version - * @adapter: the adapter - * @vers: where to place the version - * - * Reads the FW version from flash. - */ -int t3_get_fw_version(struct adapter *adapter, u32 *vers) -{ - return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0); -} - -/** - * t3_check_fw_version - check if the FW is compatible with this driver - * @adapter: the adapter - * - * Checks if an adapter's FW is compatible with the driver. Returns 0 - * if the versions are compatible, a negative error otherwise. - */ -int t3_check_fw_version(struct adapter *adapter) -{ - int ret; - u32 vers; - unsigned int type, major, minor; - - ret = t3_get_fw_version(adapter, &vers); - if (ret) - return ret; - - type = G_FW_VERSION_TYPE(vers); - major = G_FW_VERSION_MAJOR(vers); - minor = G_FW_VERSION_MINOR(vers); - - if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR && - minor == FW_VERSION_MINOR) - return 0; - else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR) - CH_WARN(adapter, "found old FW minor version(%u.%u), " - "driver compiled for version %u.%u\n", major, minor, - FW_VERSION_MAJOR, FW_VERSION_MINOR); - else { - CH_WARN(adapter, "found newer FW version(%u.%u), " - "driver compiled for version %u.%u\n", major, minor, - FW_VERSION_MAJOR, FW_VERSION_MINOR); - return 0; - } - return -EINVAL; -} - -/** - * t3_flash_erase_sectors - erase a range of flash sectors - * @adapter: the adapter - * @start: the first sector to erase - * @end: the last sector to erase - * - * Erases the sectors in the given range. - */ -static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end) -{ - while (start <= end) { - int ret; - - if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 || - (ret = sf1_write(adapter, 4, 0, - SF_ERASE_SECTOR | (start << 8))) != 0 || - (ret = flash_wait_op(adapter, 5, 500)) != 0) - return ret; - start++; - } - return 0; -} - -/* - * t3_load_fw - download firmware - * @adapter: the adapter - * @fw_data: the firmware image to write - * @size: image size - * - * Write the supplied firmware image to the card's serial flash. - * The FW image has the following sections: @size - 8 bytes of code and - * data, followed by 4 bytes of FW version, followed by the 32-bit - * 1's complement checksum of the whole image. - */ -int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size) -{ - u32 csum; - unsigned int i; - const __be32 *p = (const __be32 *)fw_data; - int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16; - - if ((size & 3) || size < FW_MIN_SIZE) - return -EINVAL; - if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR) - return -EFBIG; - - for (csum = 0, i = 0; i < size / sizeof(csum); i++) - csum += ntohl(p[i]); - if (csum != 0xffffffff) { - CH_ERR(adapter, "corrupted firmware image, checksum %u\n", - csum); - return -EINVAL; - } - - ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector); - if (ret) - goto out; - - size -= 8; /* trim off version and checksum */ - for (addr = FW_FLASH_BOOT_ADDR; size;) { - unsigned int chunk_size = min(size, 256U); - - ret = t3_write_flash(adapter, addr, chunk_size, fw_data); - if (ret) - goto out; - - addr += chunk_size; - fw_data += chunk_size; - size -= chunk_size; - } - - ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data); -out: - if (ret) - CH_ERR(adapter, "firmware download failed, error %d\n", ret); - return ret; -} - -#define CIM_CTL_BASE 0x2000 - -/** - * t3_cim_ctl_blk_read - read a block from CIM control region - * - * @adap: the adapter - * @addr: the start address within the CIM control region - * @n: number of words to read - * @valp: where to store the result - * - * Reads a block of 4-byte words from the CIM control region. - */ -int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr, - unsigned int n, unsigned int *valp) -{ - int ret = 0; - - if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY) - return -EBUSY; - - for ( ; !ret && n--; addr += 4) { - t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr); - ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, - 0, 5, 2); - if (!ret) - *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA); - } - return ret; -} - -static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg, - u32 *rx_hash_high, u32 *rx_hash_low) -{ - /* stop Rx unicast traffic */ - t3_mac_disable_exact_filters(mac); - - /* stop broadcast, multicast, promiscuous mode traffic */ - *rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG); - t3_set_reg_field(mac->adapter, A_XGM_RX_CFG, - F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES, - F_DISBCAST); - - *rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH); - t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0); - - *rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW); - t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0); - - /* Leave time to drain max RX fifo */ - msleep(1); -} - -static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg, - u32 rx_hash_high, u32 rx_hash_low) -{ - t3_mac_enable_exact_filters(mac); - t3_set_reg_field(mac->adapter, A_XGM_RX_CFG, - F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES, - rx_cfg); - t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high); - t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low); -} - -/** - * t3_link_changed - handle interface link changes - * @adapter: the adapter - * @port_id: the port index that changed link state - * - * Called when a port's link settings change to propagate the new values - * to the associated PHY and MAC. After performing the common tasks it - * invokes an OS-specific handler. - */ -void t3_link_changed(struct adapter *adapter, int port_id) -{ - int link_ok, speed, duplex, fc; - struct port_info *pi = adap2pinfo(adapter, port_id); - struct cphy *phy = &pi->phy; - struct cmac *mac = &pi->mac; - struct link_config *lc = &pi->link_config; - - phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc); - - if (!lc->link_ok && link_ok) { - u32 rx_cfg, rx_hash_high, rx_hash_low; - u32 status; - - t3_xgm_intr_enable(adapter, port_id); - t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low); - t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0); - t3_mac_enable(mac, MAC_DIRECTION_RX); - - status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset); - if (status & F_LINKFAULTCHANGE) { - mac->stats.link_faults++; - pi->link_fault = 1; - } - t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low); - } - - if (lc->requested_fc & PAUSE_AUTONEG) - fc &= lc->requested_fc; - else - fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); - - if (link_ok == lc->link_ok && speed == lc->speed && - duplex == lc->duplex && fc == lc->fc) - return; /* nothing changed */ - - if (link_ok != lc->link_ok && adapter->params.rev > 0 && - uses_xaui(adapter)) { - if (link_ok) - t3b_pcs_reset(mac); - t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, - link_ok ? F_TXACTENABLE | F_RXEN : 0); - } - lc->link_ok = link_ok; - lc->speed = speed < 0 ? SPEED_INVALID : speed; - lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex; - - if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) { - /* Set MAC speed, duplex, and flow control to match PHY. */ - t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc); - lc->fc = fc; - } - - t3_os_link_changed(adapter, port_id, link_ok && !pi->link_fault, - speed, duplex, fc); -} - -void t3_link_fault(struct adapter *adapter, int port_id) -{ - struct port_info *pi = adap2pinfo(adapter, port_id); - struct cmac *mac = &pi->mac; - struct cphy *phy = &pi->phy; - struct link_config *lc = &pi->link_config; - int link_ok, speed, duplex, fc, link_fault; - u32 rx_cfg, rx_hash_high, rx_hash_low; - - t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low); - - if (adapter->params.rev > 0 && uses_xaui(adapter)) - t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 0); - - t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0); - t3_mac_enable(mac, MAC_DIRECTION_RX); - - t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low); - - link_fault = t3_read_reg(adapter, - A_XGM_INT_STATUS + mac->offset); - link_fault &= F_LINKFAULTCHANGE; - - link_ok = lc->link_ok; - speed = lc->speed; - duplex = lc->duplex; - fc = lc->fc; - - phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc); - - if (link_fault) { - lc->link_ok = 0; - lc->speed = SPEED_INVALID; - lc->duplex = DUPLEX_INVALID; - - t3_os_link_fault(adapter, port_id, 0); - - /* Account link faults only when the phy reports a link up */ - if (link_ok) - mac->stats.link_faults++; - } else { - if (link_ok) - t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, - F_TXACTENABLE | F_RXEN); - - pi->link_fault = 0; - lc->link_ok = (unsigned char)link_ok; - lc->speed = speed < 0 ? SPEED_INVALID : speed; - lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex; - t3_os_link_fault(adapter, port_id, link_ok); - } -} - -/** - * t3_link_start - apply link configuration to MAC/PHY - * @phy: the PHY to setup - * @mac: the MAC to setup - * @lc: the requested link configuration - * - * Set up a port's MAC and PHY according to a desired link configuration. - * - If the PHY can auto-negotiate first decide what to advertise, then - * enable/disable auto-negotiation as desired, and reset. - * - If the PHY does not auto-negotiate just reset it. - * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, - * otherwise do it later based on the outcome of auto-negotiation. - */ -int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc) -{ - unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); - - lc->link_ok = 0; - if (lc->supported & SUPPORTED_Autoneg) { - lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause); - if (fc) { - lc->advertising |= ADVERTISED_Asym_Pause; - if (fc & PAUSE_RX) - lc->advertising |= ADVERTISED_Pause; - } - phy->ops->advertise(phy, lc->advertising); - - if (lc->autoneg == AUTONEG_DISABLE) { - lc->speed = lc->requested_speed; - lc->duplex = lc->requested_duplex; - lc->fc = (unsigned char)fc; - t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex, - fc); - /* Also disables autoneg */ - phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex); - } else - phy->ops->autoneg_enable(phy); - } else { - t3_mac_set_speed_duplex_fc(mac, -1, -1, fc); - lc->fc = (unsigned char)fc; - phy->ops->reset(phy, 0); - } - return 0; -} - -/** - * t3_set_vlan_accel - control HW VLAN extraction - * @adapter: the adapter - * @ports: bitmap of adapter ports to operate on - * @on: enable (1) or disable (0) HW VLAN extraction - * - * Enables or disables HW extraction of VLAN tags for the given port. - */ -void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on) -{ - t3_set_reg_field(adapter, A_TP_OUT_CONFIG, - ports << S_VLANEXTRACTIONENABLE, - on ? (ports << S_VLANEXTRACTIONENABLE) : 0); -} - -struct intr_info { - unsigned int mask; /* bits to check in interrupt status */ - const char *msg; /* message to print or NULL */ - short stat_idx; /* stat counter to increment or -1 */ - unsigned short fatal; /* whether the condition reported is fatal */ -}; - -/** - * t3_handle_intr_status - table driven interrupt handler - * @adapter: the adapter that generated the interrupt - * @reg: the interrupt status register to process - * @mask: a mask to apply to the interrupt status - * @acts: table of interrupt actions - * @stats: statistics counters tracking interrupt occurrences - * - * A table driven interrupt handler that applies a set of masks to an - * interrupt status word and performs the corresponding actions if the - * interrupts described by the mask have occurred. The actions include - * optionally printing a warning or alert message, and optionally - * incrementing a stat counter. The table is terminated by an entry - * specifying mask 0. Returns the number of fatal interrupt conditions. - */ -static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg, - unsigned int mask, - const struct intr_info *acts, - unsigned long *stats) -{ - int fatal = 0; - unsigned int status = t3_read_reg(adapter, reg) & mask; - - for (; acts->mask; ++acts) { - if (!(status & acts->mask)) - continue; - if (acts->fatal) { - fatal++; - CH_ALERT(adapter, "%s (0x%x)\n", - acts->msg, status & acts->mask); - status &= ~acts->mask; - } else if (acts->msg) - CH_WARN(adapter, "%s (0x%x)\n", - acts->msg, status & acts->mask); - if (acts->stat_idx >= 0) - stats[acts->stat_idx]++; - } - if (status) /* clear processed interrupts */ - t3_write_reg(adapter, reg, status); - return fatal; -} - -#define SGE_INTR_MASK (F_RSPQDISABLED | \ - F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \ - F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \ - F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \ - V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \ - F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \ - F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \ - F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \ - F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \ - F_LOPIODRBDROPERR) -#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \ - F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \ - F_NFASRCHFAIL) -#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE)) -#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \ - V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \ - F_TXFIFO_UNDERRUN) -#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \ - F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \ - F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \ - F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \ - V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \ - V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */) -#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\ - F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \ - /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \ - F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \ - F_TXPARERR | V_BISTERR(M_BISTERR)) -#define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \ - F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \ - F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0) -#define ULPTX_INTR_MASK 0xfc -#define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \ - F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \ - F_ZERO_SWITCH_ERROR) -#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \ - F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \ - F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \ - F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \ - F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \ - F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \ - F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \ - F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR) -#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \ - V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \ - V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR)) -#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \ - V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \ - V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR)) -#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \ - V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \ - V_RXTPPARERRENB(M_RXTPPARERRENB) | \ - V_MCAPARERRENB(M_MCAPARERRENB)) -#define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE) -#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \ - F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \ - F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \ - F_MPS0 | F_CPL_SWITCH) -/* - * Interrupt handler for the PCIX1 module. - */ -static void pci_intr_handler(struct adapter *adapter) -{ - static const struct intr_info pcix1_intr_info[] = { - {F_MSTDETPARERR, "PCI master detected parity error", -1, 1}, - {F_SIGTARABT, "PCI signaled target abort", -1, 1}, - {F_RCVTARABT, "PCI received target abort", -1, 1}, - {F_RCVMSTABT, "PCI received master abort", -1, 1}, - {F_SIGSYSERR, "PCI signaled system error", -1, 1}, - {F_DETPARERR, "PCI detected parity error", -1, 1}, - {F_SPLCMPDIS, "PCI split completion discarded", -1, 1}, - {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1}, - {F_RCVSPLCMPERR, "PCI received split completion error", -1, - 1}, - {F_DETCORECCERR, "PCI correctable ECC error", - STAT_PCI_CORR_ECC, 0}, - {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1}, - {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1}, - {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1, - 1}, - {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1, - 1}, - {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1, - 1}, - {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity " - "error", -1, 1}, - {0} - }; - - if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK, - pcix1_intr_info, adapter->irq_stats)) - t3_fatal_err(adapter); -} - -/* - * Interrupt handler for the PCIE module. - */ -static void pcie_intr_handler(struct adapter *adapter) -{ - static const struct intr_info pcie_intr_info[] = { - {F_PEXERR, "PCI PEX error", -1, 1}, - {F_UNXSPLCPLERRR, - "PCI unexpected split completion DMA read error", -1, 1}, - {F_UNXSPLCPLERRC, - "PCI unexpected split completion DMA command error", -1, 1}, - {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1}, - {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1}, - {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1}, - {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1}, - {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR), - "PCI MSI-X table/PBA parity error", -1, 1}, - {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1}, - {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1}, - {F_RXPARERR, "PCI Rx parity error", -1, 1}, - {F_TXPARERR, "PCI Tx parity error", -1, 1}, - {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1}, - {0} - }; - - if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR) - CH_ALERT(adapter, "PEX error code 0x%x\n", - t3_read_reg(adapter, A_PCIE_PEX_ERR)); - - if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK, - pcie_intr_info, adapter->irq_stats)) - t3_fatal_err(adapter); -} - -/* - * TP interrupt handler. - */ -static void tp_intr_handler(struct adapter *adapter) -{ - static const struct intr_info tp_intr_info[] = { - {0xffffff, "TP parity error", -1, 1}, - {0x1000000, "TP out of Rx pages", -1, 1}, - {0x2000000, "TP out of Tx pages", -1, 1}, - {0} - }; - - static const struct intr_info tp_intr_info_t3c[] = { - {0x1fffffff, "TP parity error", -1, 1}, - {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1}, - {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1}, - {0} - }; - - if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff, - adapter->params.rev < T3_REV_C ? - tp_intr_info : tp_intr_info_t3c, NULL)) - t3_fatal_err(adapter); -} - -/* - * CIM interrupt handler. - */ -static void cim_intr_handler(struct adapter *adapter) -{ - static const struct intr_info cim_intr_info[] = { - {F_RSVDSPACEINT, "CIM reserved space write", -1, 1}, - {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1}, - {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1}, - {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1}, - {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1}, - {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1}, - {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1}, - {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1}, - {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1}, - {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1}, - {F_BLKRDPLINT, "CIM block read from PL space", -1, 1}, - {F_BLKWRPLINT, "CIM block write to PL space", -1, 1}, - {F_DRAMPARERR, "CIM DRAM parity error", -1, 1}, - {F_ICACHEPARERR, "CIM icache parity error", -1, 1}, - {F_DCACHEPARERR, "CIM dcache parity error", -1, 1}, - {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1}, - {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1}, - {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1}, - {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1}, - {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1}, - {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1}, - {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1}, - {F_ITAGPARERR, "CIM itag parity error", -1, 1}, - {F_DTAGPARERR, "CIM dtag parity error", -1, 1}, - {0} - }; - - if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff, - cim_intr_info, NULL)) - t3_fatal_err(adapter); -} - -/* - * ULP RX interrupt handler. - */ -static void ulprx_intr_handler(struct adapter *adapter) -{ - static const struct intr_info ulprx_intr_info[] = { - {F_PARERRDATA, "ULP RX data parity error", -1, 1}, - {F_PARERRPCMD, "ULP RX command parity error", -1, 1}, - {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1}, - {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1}, - {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1}, - {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1}, - {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1}, - {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1}, - {0} - }; - - if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff, - ulprx_intr_info, NULL)) - t3_fatal_err(adapter); -} - -/* - * ULP TX interrupt handler. - */ -static void ulptx_intr_handler(struct adapter *adapter) -{ - static const struct intr_info ulptx_intr_info[] = { - {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds", - STAT_ULP_CH0_PBL_OOB, 0}, - {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds", - STAT_ULP_CH1_PBL_OOB, 0}, - {0xfc, "ULP TX parity error", -1, 1}, - {0} - }; - - if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff, - ulptx_intr_info, adapter->irq_stats)) - t3_fatal_err(adapter); -} - -#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \ - F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \ - F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \ - F_ICSPI1_TX_FRAMING_ERROR) -#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \ - F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \ - F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \ - F_OESPI1_OFIFO2X_TX_FRAMING_ERROR) - -/* - * PM TX interrupt handler. - */ -static void pmtx_intr_handler(struct adapter *adapter) -{ - static const struct intr_info pmtx_intr_info[] = { - {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1}, - {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1}, - {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1}, - {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR), - "PMTX ispi parity error", -1, 1}, - {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR), - "PMTX ospi parity error", -1, 1}, - {0} - }; - - if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff, - pmtx_intr_info, NULL)) - t3_fatal_err(adapter); -} - -#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \ - F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \ - F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \ - F_IESPI1_TX_FRAMING_ERROR) -#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \ - F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \ - F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \ - F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR) - -/* - * PM RX interrupt handler. - */ -static void pmrx_intr_handler(struct adapter *adapter) -{ - static const struct intr_info pmrx_intr_info[] = { - {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1}, - {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1}, - {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1}, - {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR), - "PMRX ispi parity error", -1, 1}, - {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR), - "PMRX ospi parity error", -1, 1}, - {0} - }; - - if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff, - pmrx_intr_info, NULL)) - t3_fatal_err(adapter); -} - -/* - * CPL switch interrupt handler. - */ -static void cplsw_intr_handler(struct adapter *adapter) -{ - static const struct intr_info cplsw_intr_info[] = { - {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1}, - {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1}, - {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1}, - {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1}, - {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1}, - {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1}, - {0} - }; - - if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff, - cplsw_intr_info, NULL)) - t3_fatal_err(adapter); -} - -/* - * MPS interrupt handler. - */ -static void mps_intr_handler(struct adapter *adapter) -{ - static const struct intr_info mps_intr_info[] = { - {0x1ff, "MPS parity error", -1, 1}, - {0} - }; - - if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff, - mps_intr_info, NULL)) - t3_fatal_err(adapter); -} - -#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE) - -/* - * MC7 interrupt handler. - */ -static void mc7_intr_handler(struct mc7 *mc7) -{ - struct adapter *adapter = mc7->adapter; - u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE); - - if (cause & F_CE) { - mc7->stats.corr_err++; - CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, " - "data 0x%x 0x%x 0x%x\n", mc7->name, - t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR), - t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0), - t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1), - t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2)); - } - - if (cause & F_UE) { - mc7->stats.uncorr_err++; - CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, " - "data 0x%x 0x%x 0x%x\n", mc7->name, - t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR), - t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0), - t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1), - t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2)); - } - - if (G_PE(cause)) { - mc7->stats.parity_err++; - CH_ALERT(adapter, "%s MC7 parity error 0x%x\n", - mc7->name, G_PE(cause)); - } - - if (cause & F_AE) { - u32 addr = 0; - - if (adapter->params.rev > 0) - addr = t3_read_reg(adapter, - mc7->offset + A_MC7_ERR_ADDR); - mc7->stats.addr_err++; - CH_ALERT(adapter, "%s MC7 address error: 0x%x\n", - mc7->name, addr); - } - - if (cause & MC7_INTR_FATAL) - t3_fatal_err(adapter); - - t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause); -} - -#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \ - V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) -/* - * XGMAC interrupt handler. - */ -static int mac_intr_handler(struct adapter *adap, unsigned int idx) -{ - struct cmac *mac = &adap2pinfo(adap, idx)->mac; - /* - * We mask out interrupt causes for which we're not taking interrupts. - * This allows us to use polling logic to monitor some of the other - * conditions when taking interrupts would impose too much load on the - * system. - */ - u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) & - ~F_RXFIFO_OVERFLOW; - - if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) { - mac->stats.tx_fifo_parity_err++; - CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx); - } - if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) { - mac->stats.rx_fifo_parity_err++; - CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx); - } - if (cause & F_TXFIFO_UNDERRUN) - mac->stats.tx_fifo_urun++; - if (cause & F_RXFIFO_OVERFLOW) - mac->stats.rx_fifo_ovfl++; - if (cause & V_SERDES_LOS(M_SERDES_LOS)) - mac->stats.serdes_signal_loss++; - if (cause & F_XAUIPCSCTCERR) - mac->stats.xaui_pcs_ctc_err++; - if (cause & F_XAUIPCSALIGNCHANGE) - mac->stats.xaui_pcs_align_change++; - if (cause & F_XGM_INT) { - t3_set_reg_field(adap, - A_XGM_INT_ENABLE + mac->offset, - F_XGM_INT, 0); - mac->stats.link_faults++; - - t3_os_link_fault_handler(adap, idx); - } - - if (cause & XGM_INTR_FATAL) - t3_fatal_err(adap); - - t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause); - return cause != 0; -} - -/* - * Interrupt handler for PHY events. - */ -int t3_phy_intr_handler(struct adapter *adapter) -{ - u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE); - - for_each_port(adapter, i) { - struct port_info *p = adap2pinfo(adapter, i); - - if (!(p->phy.caps & SUPPORTED_IRQ)) - continue; - - if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) { - int phy_cause = p->phy.ops->intr_handler(&p->phy); - - if (phy_cause & cphy_cause_link_change) - t3_link_changed(adapter, i); - if (phy_cause & cphy_cause_fifo_error) - p->phy.fifo_errors++; - if (phy_cause & cphy_cause_module_change) - t3_os_phymod_changed(adapter, i); - } - } - - t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause); - return 0; -} - -/* - * T3 slow path (non-data) interrupt handler. - */ -int t3_slow_intr_handler(struct adapter *adapter) -{ - u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0); - - cause &= adapter->slow_intr_mask; - if (!cause) - return 0; - if (cause & F_PCIM0) { - if (is_pcie(adapter)) - pcie_intr_handler(adapter); - else - pci_intr_handler(adapter); - } - if (cause & F_SGE3) - t3_sge_err_intr_handler(adapter); - if (cause & F_MC7_PMRX) - mc7_intr_handler(&adapter->pmrx); - if (cause & F_MC7_PMTX) - mc7_intr_handler(&adapter->pmtx); - if (cause & F_MC7_CM) - mc7_intr_handler(&adapter->cm); - if (cause & F_CIM) - cim_intr_handler(adapter); - if (cause & F_TP1) - tp_intr_handler(adapter); - if (cause & F_ULP2_RX) - ulprx_intr_handler(adapter); - if (cause & F_ULP2_TX) - ulptx_intr_handler(adapter); - if (cause & F_PM1_RX) - pmrx_intr_handler(adapter); - if (cause & F_PM1_TX) - pmtx_intr_handler(adapter); - if (cause & F_CPL_SWITCH) - cplsw_intr_handler(adapter); - if (cause & F_MPS0) - mps_intr_handler(adapter); - if (cause & F_MC5A) - t3_mc5_intr_handler(&adapter->mc5); - if (cause & F_XGMAC0_0) - mac_intr_handler(adapter, 0); - if (cause & F_XGMAC0_1) - mac_intr_handler(adapter, 1); - if (cause & F_T3DBG) - t3_os_ext_intr_handler(adapter); - - /* Clear the interrupts just processed. */ - t3_write_reg(adapter, A_PL_INT_CAUSE0, cause); - t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */ - return 1; -} - -static unsigned int calc_gpio_intr(struct adapter *adap) -{ - unsigned int i, gpi_intr = 0; - - for_each_port(adap, i) - if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) && - adapter_info(adap)->gpio_intr[i]) - gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i]; - return gpi_intr; -} - -/** - * t3_intr_enable - enable interrupts - * @adapter: the adapter whose interrupts should be enabled - * - * Enable interrupts by setting the interrupt enable registers of the - * various HW modules and then enabling the top-level interrupt - * concentrator. - */ -void t3_intr_enable(struct adapter *adapter) -{ - static const struct addr_val_pair intr_en_avp[] = { - {A_SG_INT_ENABLE, SGE_INTR_MASK}, - {A_MC7_INT_ENABLE, MC7_INTR_MASK}, - {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR, - MC7_INTR_MASK}, - {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR, - MC7_INTR_MASK}, - {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK}, - {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK}, - {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK}, - {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK}, - {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK}, - {A_MPS_INT_ENABLE, MPS_INTR_MASK}, - }; - - adapter->slow_intr_mask = PL_INTR_MASK; - - t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0); - t3_write_reg(adapter, A_TP_INT_ENABLE, - adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff); - - if (adapter->params.rev > 0) { - t3_write_reg(adapter, A_CPL_INTR_ENABLE, - CPLSW_INTR_MASK | F_CIM_OVFL_ERROR); - t3_write_reg(adapter, A_ULPTX_INT_ENABLE, - ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 | - F_PBL_BOUND_ERR_CH1); - } else { - t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK); - t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK); - } - - t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter)); - - if (is_pcie(adapter)) - t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK); - else - t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK); - t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask); - t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */ -} - -/** - * t3_intr_disable - disable a card's interrupts - * @adapter: the adapter whose interrupts should be disabled - * - * Disable interrupts. We only disable the top-level interrupt - * concentrator and the SGE data interrupts. - */ -void t3_intr_disable(struct adapter *adapter) -{ - t3_write_reg(adapter, A_PL_INT_ENABLE0, 0); - t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */ - adapter->slow_intr_mask = 0; -} - -/** - * t3_intr_clear - clear all interrupts - * @adapter: the adapter whose interrupts should be cleared - * - * Clears all interrupts. - */ -void t3_intr_clear(struct adapter *adapter) -{ - static const unsigned int cause_reg_addr[] = { - A_SG_INT_CAUSE, - A_SG_RSPQ_FL_STATUS, - A_PCIX_INT_CAUSE, - A_MC7_INT_CAUSE, - A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR, - A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR, - A_CIM_HOST_INT_CAUSE, - A_TP_INT_CAUSE, - A_MC5_DB_INT_CAUSE, - A_ULPRX_INT_CAUSE, - A_ULPTX_INT_CAUSE, - A_CPL_INTR_CAUSE, - A_PM1_TX_INT_CAUSE, - A_PM1_RX_INT_CAUSE, - A_MPS_INT_CAUSE, - A_T3DBG_INT_CAUSE, - }; - unsigned int i; - - /* Clear PHY and MAC interrupts for each port. */ - for_each_port(adapter, i) - t3_port_intr_clear(adapter, i); - - for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i) - t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff); - - if (is_pcie(adapter)) - t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff); - t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff); - t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */ -} - -void t3_xgm_intr_enable(struct adapter *adapter, int idx) -{ - struct port_info *pi = adap2pinfo(adapter, idx); - - t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset, - XGM_EXTRA_INTR_MASK); -} - -void t3_xgm_intr_disable(struct adapter *adapter, int idx) -{ - struct port_info *pi = adap2pinfo(adapter, idx); - - t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset, - 0x7ff); -} - -/** - * t3_port_intr_enable - enable port-specific interrupts - * @adapter: associated adapter - * @idx: index of port whose interrupts should be enabled - * - * Enable port-specific (i.e., MAC and PHY) interrupts for the given - * adapter port. - */ -void t3_port_intr_enable(struct adapter *adapter, int idx) -{ - struct cphy *phy = &adap2pinfo(adapter, idx)->phy; - - t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK); - t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */ - phy->ops->intr_enable(phy); -} - -/** - * t3_port_intr_disable - disable port-specific interrupts - * @adapter: associated adapter - * @idx: index of port whose interrupts should be disabled - * - * Disable port-specific (i.e., MAC and PHY) interrupts for the given - * adapter port. - */ -void t3_port_intr_disable(struct adapter *adapter, int idx) -{ - struct cphy *phy = &adap2pinfo(adapter, idx)->phy; - - t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0); - t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */ - phy->ops->intr_disable(phy); -} - -/** - * t3_port_intr_clear - clear port-specific interrupts - * @adapter: associated adapter - * @idx: index of port whose interrupts to clear - * - * Clear port-specific (i.e., MAC and PHY) interrupts for the given - * adapter port. - */ -static void t3_port_intr_clear(struct adapter *adapter, int idx) -{ - struct cphy *phy = &adap2pinfo(adapter, idx)->phy; - - t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff); - t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */ - phy->ops->intr_clear(phy); -} - -#define SG_CONTEXT_CMD_ATTEMPTS 100 - -/** - * t3_sge_write_context - write an SGE context - * @adapter: the adapter - * @id: the context id - * @type: the context type - * - * Program an SGE context with the values already loaded in the - * CONTEXT_DATA? registers. - */ -static int t3_sge_write_context(struct adapter *adapter, unsigned int id, - unsigned int type) -{ - if (type == F_RESPONSEQ) { - /* - * Can't write the Response Queue Context bits for - * Interrupt Armed or the Reserve bits after the chip - * has been initialized out of reset. Writing to these - * bits can confuse the hardware. - */ - t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff); - t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff); - t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff); - t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff); - } else { - t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff); - t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff); - t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff); - t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff); - } - t3_write_reg(adapter, A_SG_CONTEXT_CMD, - V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id)); - return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, - 0, SG_CONTEXT_CMD_ATTEMPTS, 1); -} - -/** - * clear_sge_ctxt - completely clear an SGE context - * @adapter: the adapter - * @id: the context id - * @type: the context type - * - * Completely clear an SGE context. Used predominantly at post-reset - * initialization. Note in particular that we don't skip writing to any - * "sensitive bits" in the contexts the way that t3_sge_write_context() - * does ... - */ -static int clear_sge_ctxt(struct adapter *adap, unsigned int id, - unsigned int type) -{ - t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0); - t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0); - t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0); - t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0); - t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff); - t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff); - t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff); - t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff); - t3_write_reg(adap, A_SG_CONTEXT_CMD, - V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id)); - return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, - 0, SG_CONTEXT_CMD_ATTEMPTS, 1); -} - -/** - * t3_sge_init_ecntxt - initialize an SGE egress context - * @adapter: the adapter to configure - * @id: the context id - * @gts_enable: whether to enable GTS for the context - * @type: the egress context type - * @respq: associated response queue - * @base_addr: base address of queue - * @size: number of queue entries - * @token: uP token - * @gen: initial generation value for the context - * @cidx: consumer pointer - * - * Initialize an SGE egress context and make it ready for use. If the - * platform allows concurrent context operations, the caller is - * responsible for appropriate locking. - */ -int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable, - enum sge_context_type type, int respq, u64 base_addr, - unsigned int size, unsigned int token, int gen, - unsigned int cidx) -{ - unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM; - - if (base_addr & 0xfff) /* must be 4K aligned */ - return -EINVAL; - if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) - return -EBUSY; - - base_addr >>= 12; - t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) | - V_EC_CREDITS(credits) | V_EC_GTS(gts_enable)); - t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) | - V_EC_BASE_LO(base_addr & 0xffff)); - base_addr >>= 16; - t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr); - base_addr >>= 32; - t3_write_reg(adapter, A_SG_CONTEXT_DATA3, - V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) | - V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) | - F_EC_VALID); - return t3_sge_write_context(adapter, id, F_EGRESS); -} - -/** - * t3_sge_init_flcntxt - initialize an SGE free-buffer list context - * @adapter: the adapter to configure - * @id: the context id - * @gts_enable: whether to enable GTS for the context - * @base_addr: base address of queue - * @size: number of queue entries - * @bsize: size of each buffer for this queue - * @cong_thres: threshold to signal congestion to upstream producers - * @gen: initial generation value for the context - * @cidx: consumer pointer - * - * Initialize an SGE free list context and make it ready for use. The - * caller is responsible for ensuring only one context operation occurs - * at a time. - */ -int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id, - int gts_enable, u64 base_addr, unsigned int size, - unsigned int bsize, unsigned int cong_thres, int gen, - unsigned int cidx) -{ - if (base_addr & 0xfff) /* must be 4K aligned */ - return -EINVAL; - if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) - return -EBUSY; - - base_addr >>= 12; - t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr); - base_addr >>= 32; - t3_write_reg(adapter, A_SG_CONTEXT_DATA1, - V_FL_BASE_HI((u32) base_addr) | - V_FL_INDEX_LO(cidx & M_FL_INDEX_LO)); - t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) | - V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) | - V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO)); - t3_write_reg(adapter, A_SG_CONTEXT_DATA3, - V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) | - V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable)); - return t3_sge_write_context(adapter, id, F_FREELIST); -} - -/** - * t3_sge_init_rspcntxt - initialize an SGE response queue context - * @adapter: the adapter to configure - * @id: the context id - * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ - * @base_addr: base address of queue - * @size: number of queue entries - * @fl_thres: threshold for selecting the normal or jumbo free list - * @gen: initial generation value for the context - * @cidx: consumer pointer - * - * Initialize an SGE response queue context and make it ready for use. - * The caller is responsible for ensuring only one context operation - * occurs at a time. - */ -int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id, - int irq_vec_idx, u64 base_addr, unsigned int size, - unsigned int fl_thres, int gen, unsigned int cidx) -{ - unsigned int intr = 0; - - if (base_addr & 0xfff) /* must be 4K aligned */ - return -EINVAL; - if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) - return -EBUSY; - - base_addr >>= 12; - t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) | - V_CQ_INDEX(cidx)); - t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr); - base_addr >>= 32; - if (irq_vec_idx >= 0) - intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN; - t3_write_reg(adapter, A_SG_CONTEXT_DATA2, - V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen)); - t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres); - return t3_sge_write_context(adapter, id, F_RESPONSEQ); -} - -/** - * t3_sge_init_cqcntxt - initialize an SGE completion queue context - * @adapter: the adapter to configure - * @id: the context id - * @base_addr: base address of queue - * @size: number of queue entries - * @rspq: response queue for async notifications - * @ovfl_mode: CQ overflow mode - * @credits: completion queue credits - * @credit_thres: the credit threshold - * - * Initialize an SGE completion queue context and make it ready for use. - * The caller is responsible for ensuring only one context operation - * occurs at a time. - */ -int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr, - unsigned int size, int rspq, int ovfl_mode, - unsigned int credits, unsigned int credit_thres) -{ - if (base_addr & 0xfff) /* must be 4K aligned */ - return -EINVAL; - if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) - return -EBUSY; - - base_addr >>= 12; - t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size)); - t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr); - base_addr >>= 32; - t3_write_reg(adapter, A_SG_CONTEXT_DATA2, - V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) | - V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) | - V_CQ_ERR(ovfl_mode)); - t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) | - V_CQ_CREDIT_THRES(credit_thres)); - return t3_sge_write_context(adapter, id, F_CQ); -} - -/** - * t3_sge_enable_ecntxt - enable/disable an SGE egress context - * @adapter: the adapter - * @id: the egress context id - * @enable: enable (1) or disable (0) the context - * - * Enable or disable an SGE egress context. The caller is responsible for - * ensuring only one context operation occurs at a time. - */ -int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable) -{ - if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) - return -EBUSY; - - t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0); - t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0); - t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0); - t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID); - t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable)); - t3_write_reg(adapter, A_SG_CONTEXT_CMD, - V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id)); - return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, - 0, SG_CONTEXT_CMD_ATTEMPTS, 1); -} - -/** - * t3_sge_disable_fl - disable an SGE free-buffer list - * @adapter: the adapter - * @id: the free list context id - * - * Disable an SGE free-buffer list. The caller is responsible for - * ensuring only one context operation occurs at a time. - */ -int t3_sge_disable_fl(struct adapter *adapter, unsigned int id) -{ - if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) - return -EBUSY; - - t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0); - t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0); - t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE)); - t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0); - t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0); - t3_write_reg(adapter, A_SG_CONTEXT_CMD, - V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id)); - return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, - 0, SG_CONTEXT_CMD_ATTEMPTS, 1); -} - -/** - * t3_sge_disable_rspcntxt - disable an SGE response queue - * @adapter: the adapter - * @id: the response queue context id - * - * Disable an SGE response queue. The caller is responsible for - * ensuring only one context operation occurs at a time. - */ -int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id) -{ - if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) - return -EBUSY; - - t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE)); - t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0); - t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0); - t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0); - t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0); - t3_write_reg(adapter, A_SG_CONTEXT_CMD, - V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id)); - return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, - 0, SG_CONTEXT_CMD_ATTEMPTS, 1); -} - -/** - * t3_sge_disable_cqcntxt - disable an SGE completion queue - * @adapter: the adapter - * @id: the completion queue context id - * - * Disable an SGE completion queue. The caller is responsible for - * ensuring only one context operation occurs at a time. - */ -int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id) -{ - if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) - return -EBUSY; - - t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE)); - t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0); - t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0); - t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0); - t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0); - t3_write_reg(adapter, A_SG_CONTEXT_CMD, - V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id)); - return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, - 0, SG_CONTEXT_CMD_ATTEMPTS, 1); -} - -/** - * t3_sge_cqcntxt_op - perform an operation on a completion queue context - * @adapter: the adapter - * @id: the context id - * @op: the operation to perform - * - * Perform the selected operation on an SGE completion queue context. - * The caller is responsible for ensuring only one context operation - * occurs at a time. - */ -int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op, - unsigned int credits) -{ - u32 val; - - if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) - return -EBUSY; - - t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16); - t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) | - V_CONTEXT(id) | F_CQ); - if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, - 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val)) - return -EIO; - - if (op >= 2 && op < 7) { - if (adapter->params.rev > 0) - return G_CQ_INDEX(val); - - t3_write_reg(adapter, A_SG_CONTEXT_CMD, - V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id)); - if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, - F_CONTEXT_CMD_BUSY, 0, - SG_CONTEXT_CMD_ATTEMPTS, 1)) - return -EIO; - return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0)); - } - return 0; -} - -/** - * t3_config_rss - configure Rx packet steering - * @adapter: the adapter - * @rss_config: RSS settings (written to TP_RSS_CONFIG) - * @cpus: values for the CPU lookup table (0xff terminated) - * @rspq: values for the response queue lookup table (0xffff terminated) - * - * Programs the receive packet steering logic. @cpus and @rspq provide - * the values for the CPU and response queue lookup tables. If they - * provide fewer values than the size of the tables the supplied values - * are used repeatedly until the tables are fully populated. - */ -void t3_config_rss(struct adapter *adapter, unsigned int rss_config, - const u8 * cpus, const u16 *rspq) -{ - int i, j, cpu_idx = 0, q_idx = 0; - - if (cpus) - for (i = 0; i < RSS_TABLE_SIZE; ++i) { - u32 val = i << 16; - - for (j = 0; j < 2; ++j) { - val |= (cpus[cpu_idx++] & 0x3f) << (8 * j); - if (cpus[cpu_idx] == 0xff) - cpu_idx = 0; - } - t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val); - } - - if (rspq) - for (i = 0; i < RSS_TABLE_SIZE; ++i) { - t3_write_reg(adapter, A_TP_RSS_MAP_TABLE, - (i << 16) | rspq[q_idx++]); - if (rspq[q_idx] == 0xffff) - q_idx = 0; - } - - t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config); -} - -/** - * t3_tp_set_offload_mode - put TP in NIC/offload mode - * @adap: the adapter - * @enable: 1 to select offload mode, 0 for regular NIC - * - * Switches TP to NIC/offload mode. - */ -void t3_tp_set_offload_mode(struct adapter *adap, int enable) -{ - if (is_offload(adap) || !enable) - t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE, - V_NICMODE(!enable)); -} - -/** - * pm_num_pages - calculate the number of pages of the payload memory - * @mem_size: the size of the payload memory - * @pg_size: the size of each payload memory page - * - * Calculate the number of pages, each of the given size, that fit in a - * memory of the specified size, respecting the HW requirement that the - * number of pages must be a multiple of 24. - */ -static inline unsigned int pm_num_pages(unsigned int mem_size, - unsigned int pg_size) -{ - unsigned int n = mem_size / pg_size; - - return n - n % 24; -} - -#define mem_region(adap, start, size, reg) \ - t3_write_reg((adap), A_ ## reg, (start)); \ - start += size - -/** - * partition_mem - partition memory and configure TP memory settings - * @adap: the adapter - * @p: the TP parameters - * - * Partitions context and payload memory and configures TP's memory - * registers. - */ -static void partition_mem(struct adapter *adap, const struct tp_params *p) -{ - unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5); - unsigned int timers = 0, timers_shift = 22; - - if (adap->params.rev > 0) { - if (tids <= 16 * 1024) { - timers = 1; - timers_shift = 16; - } else if (tids <= 64 * 1024) { - timers = 2; - timers_shift = 18; - } else if (tids <= 256 * 1024) { - timers = 3; - timers_shift = 20; - } - } - - t3_write_reg(adap, A_TP_PMM_SIZE, - p->chan_rx_size | (p->chan_tx_size >> 16)); - - t3_write_reg(adap, A_TP_PMM_TX_BASE, 0); - t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size); - t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs); - t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX), - V_TXDATAACKIDX(fls(p->tx_pg_size) - 12)); - - t3_write_reg(adap, A_TP_PMM_RX_BASE, 0); - t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size); - t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs); - - pstructs = p->rx_num_pgs + p->tx_num_pgs; - /* Add a bit of headroom and make multiple of 24 */ - pstructs += 48; - pstructs -= pstructs % 24; - t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs); - - m = tids * TCB_SIZE; - mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR); - mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR); - t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m); - m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22); - mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE); - mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE); - mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE); - mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE); - - m = (m + 4095) & ~0xfff; - t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m); - t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m); - - tids = (p->cm_size - m - (3 << 20)) / 3072 - 32; - m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers - - adap->params.mc5.nfilters - adap->params.mc5.nroutes; - if (tids < m) - adap->params.mc5.nservers += m - tids; -} - -static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr, - u32 val) -{ - t3_write_reg(adap, A_TP_PIO_ADDR, addr); - t3_write_reg(adap, A_TP_PIO_DATA, val); -} - -static void tp_config(struct adapter *adap, const struct tp_params *p) -{ - t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU | - F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD | - F_TCPCHECKSUMOFFLOAD | V_IPTTL(64)); - t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) | - F_MTUENABLE | V_WINDOWSCALEMODE(1) | - V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1)); - t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) | - V_AUTOSTATE2(1) | V_AUTOSTATE1(0) | - V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) | - F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1)); - t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO, - F_IPV6ENABLE | F_NICMODE); - t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814); - t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105); - t3_set_reg_field(adap, A_TP_PARA_REG6, 0, - adap->params.rev > 0 ? F_ENABLEESND : - F_T3A_ENABLEESND); - - t3_set_reg_field(adap, A_TP_PC_CONFIG, - F_ENABLEEPCMDAFULL, - F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK | - F_TXCONGESTIONMODE | F_RXCONGESTIONMODE); - t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, - F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN | - F_ENABLEARPMISS | F_DISBLEDAPARBIT0); - t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080); - t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000); - - if (adap->params.rev > 0) { - tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE); - t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO, - F_TXPACEAUTO); - t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID); - t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT); - } else - t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED); - - if (adap->params.rev == T3_REV_C) - t3_set_reg_field(adap, A_TP_PC_CONFIG, - V_TABLELATENCYDELTA(M_TABLELATENCYDELTA), - V_TABLELATENCYDELTA(4)); - - t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0); - t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0); - t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0); - t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000); -} - -/* Desired TP timer resolution in usec */ -#define TP_TMR_RES 50 - -/* TCP timer values in ms */ -#define TP_DACK_TIMER 50 -#define TP_RTO_MIN 250 - -/** - * tp_set_timers - set TP timing parameters - * @adap: the adapter to set - * @core_clk: the core clock frequency in Hz - * - * Set TP's timing parameters, such as the various timer resolutions and - * the TCP timer values. - */ -static void tp_set_timers(struct adapter *adap, unsigned int core_clk) -{ - unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1; - unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */ - unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */ - unsigned int tps = core_clk >> tre; - - t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) | - V_DELAYEDACKRESOLUTION(dack_re) | - V_TIMESTAMPRESOLUTION(tstamp_re)); - t3_write_reg(adap, A_TP_DACK_TIMER, - (core_clk >> dack_re) / (1000 / TP_DACK_TIMER)); - t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100); - t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504); - t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908); - t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c); - t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) | - V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) | - V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) | - V_KEEPALIVEMAX(9)); - -#define SECONDS * tps - - t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS); - t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN)); - t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS); - t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS); - t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS); - t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS); - t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS); - t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS); - t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS); - -#undef SECONDS -} - -/** - * t3_tp_set_coalescing_size - set receive coalescing size - * @adap: the adapter - * @size: the receive coalescing size - * @psh: whether a set PSH bit should deliver coalesced data - * - * Set the receive coalescing size and PSH bit handling. - */ -static int t3_tp_set_coalescing_size(struct adapter *adap, - unsigned int size, int psh) -{ - u32 val; - - if (size > MAX_RX_COALESCING_LEN) - return -EINVAL; - - val = t3_read_reg(adap, A_TP_PARA_REG3); - val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN); - - if (size) { - val |= F_RXCOALESCEENABLE; - if (psh) - val |= F_RXCOALESCEPSHEN; - size = min(MAX_RX_COALESCING_LEN, size); - t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) | - V_MAXRXDATA(MAX_RX_COALESCING_LEN)); - } - t3_write_reg(adap, A_TP_PARA_REG3, val); - return 0; -} - -/** - * t3_tp_set_max_rxsize - set the max receive size - * @adap: the adapter - * @size: the max receive size - * - * Set TP's max receive size. This is the limit that applies when - * receive coalescing is disabled. - */ -static void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size) -{ - t3_write_reg(adap, A_TP_PARA_REG7, - V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size)); -} - -static void init_mtus(unsigned short mtus[]) -{ - /* - * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so - * it can accommodate max size TCP/IP headers when SACK and timestamps - * are enabled and still have at least 8 bytes of payload. - */ - mtus[0] = 88; - mtus[1] = 88; - mtus[2] = 256; - mtus[3] = 512; - mtus[4] = 576; - mtus[5] = 1024; - mtus[6] = 1280; - mtus[7] = 1492; - mtus[8] = 1500; - mtus[9] = 2002; - mtus[10] = 2048; - mtus[11] = 4096; - mtus[12] = 4352; - mtus[13] = 8192; - mtus[14] = 9000; - mtus[15] = 9600; -} - -/* - * Initial congestion control parameters. - */ -static void init_cong_ctrl(unsigned short *a, unsigned short *b) -{ - a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; - a[9] = 2; - a[10] = 3; - a[11] = 4; - a[12] = 5; - a[13] = 6; - a[14] = 7; - a[15] = 8; - a[16] = 9; - a[17] = 10; - a[18] = 14; - a[19] = 17; - a[20] = 21; - a[21] = 25; - a[22] = 30; - a[23] = 35; - a[24] = 45; - a[25] = 60; - a[26] = 80; - a[27] = 100; - a[28] = 200; - a[29] = 300; - a[30] = 400; - a[31] = 500; - - b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; - b[9] = b[10] = 1; - b[11] = b[12] = 2; - b[13] = b[14] = b[15] = b[16] = 3; - b[17] = b[18] = b[19] = b[20] = b[21] = 4; - b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; - b[28] = b[29] = 6; - b[30] = b[31] = 7; -} - -/* The minimum additive increment value for the congestion control table */ -#define CC_MIN_INCR 2U - -/** - * t3_load_mtus - write the MTU and congestion control HW tables - * @adap: the adapter - * @mtus: the unrestricted values for the MTU table - * @alphs: the values for the congestion control alpha parameter - * @beta: the values for the congestion control beta parameter - * @mtu_cap: the maximum permitted effective MTU - * - * Write the MTU table with the supplied MTUs capping each at &mtu_cap. - * Update the high-speed congestion control table with the supplied alpha, - * beta, and MTUs. - */ -void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS], - unsigned short alpha[NCCTRL_WIN], - unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap) -{ - static const unsigned int avg_pkts[NCCTRL_WIN] = { - 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, - 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, - 28672, 40960, 57344, 81920, 114688, 163840, 229376 - }; - - unsigned int i, w; - - for (i = 0; i < NMTUS; ++i) { - unsigned int mtu = min(mtus[i], mtu_cap); - unsigned int log2 = fls(mtu); - - if (!(mtu & ((1 << log2) >> 2))) /* round */ - log2--; - t3_write_reg(adap, A_TP_MTU_TABLE, - (i << 24) | (log2 << 16) | mtu); - - for (w = 0; w < NCCTRL_WIN; ++w) { - unsigned int inc; - - inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], - CC_MIN_INCR); - - t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) | - (w << 16) | (beta[w] << 13) | inc); - } - } -} - -/** - * t3_tp_get_mib_stats - read TP's MIB counters - * @adap: the adapter - * @tps: holds the returned counter values - * - * Returns the values of TP's MIB counters. - */ -void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps) -{ - t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps, - sizeof(*tps) / sizeof(u32), 0); -} - -#define ulp_region(adap, name, start, len) \ - t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \ - t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \ - (start) + (len) - 1); \ - start += len - -#define ulptx_region(adap, name, start, len) \ - t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \ - t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \ - (start) + (len) - 1) - -static void ulp_config(struct adapter *adap, const struct tp_params *p) -{ - unsigned int m = p->chan_rx_size; - - ulp_region(adap, ISCSI, m, p->chan_rx_size / 8); - ulp_region(adap, TDDP, m, p->chan_rx_size / 8); - ulptx_region(adap, TPT, m, p->chan_rx_size / 4); - ulp_region(adap, STAG, m, p->chan_rx_size / 4); - ulp_region(adap, RQ, m, p->chan_rx_size / 4); - ulptx_region(adap, PBL, m, p->chan_rx_size / 4); - ulp_region(adap, PBL, m, p->chan_rx_size / 4); - t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff); -} - -/** - * t3_set_proto_sram - set the contents of the protocol sram - * @adapter: the adapter - * @data: the protocol image - * - * Write the contents of the protocol SRAM. - */ -int t3_set_proto_sram(struct adapter *adap, const u8 *data) -{ - int i; - const __be32 *buf = (const __be32 *)data; - - for (i = 0; i < PROTO_SRAM_LINES; i++) { - t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++)); - t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++)); - t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++)); - t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++)); - t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++)); - - t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31); - if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1)) - return -EIO; - } - t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0); - - return 0; -} - -void t3_config_trace_filter(struct adapter *adapter, - const struct trace_params *tp, int filter_index, - int invert, int enable) -{ - u32 addr, key[4], mask[4]; - - key[0] = tp->sport | (tp->sip << 16); - key[1] = (tp->sip >> 16) | (tp->dport << 16); - key[2] = tp->dip; - key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20); - - mask[0] = tp->sport_mask | (tp->sip_mask << 16); - mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16); - mask[2] = tp->dip_mask; - mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20); - - if (invert) - key[3] |= (1 << 29); - if (enable) - key[3] |= (1 << 28); - - addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0; - tp_wr_indirect(adapter, addr++, key[0]); - tp_wr_indirect(adapter, addr++, mask[0]); - tp_wr_indirect(adapter, addr++, key[1]); - tp_wr_indirect(adapter, addr++, mask[1]); - tp_wr_indirect(adapter, addr++, key[2]); - tp_wr_indirect(adapter, addr++, mask[2]); - tp_wr_indirect(adapter, addr++, key[3]); - tp_wr_indirect(adapter, addr, mask[3]); - t3_read_reg(adapter, A_TP_PIO_DATA); -} - -/** - * t3_config_sched - configure a HW traffic scheduler - * @adap: the adapter - * @kbps: target rate in Kbps - * @sched: the scheduler index - * - * Configure a HW scheduler for the target rate - */ -int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched) -{ - unsigned int v, tps, cpt, bpt, delta, mindelta = ~0; - unsigned int clk = adap->params.vpd.cclk * 1000; - unsigned int selected_cpt = 0, selected_bpt = 0; - - if (kbps > 0) { - kbps *= 125; /* -> bytes */ - for (cpt = 1; cpt <= 255; cpt++) { - tps = clk / cpt; - bpt = (kbps + tps / 2) / tps; - if (bpt > 0 && bpt <= 255) { - v = bpt * tps; - delta = v >= kbps ? v - kbps : kbps - v; - if (delta <= mindelta) { - mindelta = delta; - selected_cpt = cpt; - selected_bpt = bpt; - } - } else if (selected_cpt) - break; - } - if (!selected_cpt) - return -EINVAL; - } - t3_write_reg(adap, A_TP_TM_PIO_ADDR, - A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2); - v = t3_read_reg(adap, A_TP_TM_PIO_DATA); - if (sched & 1) - v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24); - else - v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8); - t3_write_reg(adap, A_TP_TM_PIO_DATA, v); - return 0; -} - -static int tp_init(struct adapter *adap, const struct tp_params *p) -{ - int busy = 0; - - tp_config(adap, p); - t3_set_vlan_accel(adap, 3, 0); - - if (is_offload(adap)) { - tp_set_timers(adap, adap->params.vpd.cclk * 1000); - t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE); - busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE, - 0, 1000, 5); - if (busy) - CH_ERR(adap, "TP initialization timed out\n"); - } - - if (!busy) - t3_write_reg(adap, A_TP_RESET, F_TPRESET); - return busy; -} - -/* - * Perform the bits of HW initialization that are dependent on the Tx - * channels being used. - */ -static void chan_init_hw(struct adapter *adap, unsigned int chan_map) -{ - int i; - - if (chan_map != 3) { /* one channel */ - t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0); - t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0); - t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT | - (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE : - F_TPTXPORT1EN | F_PORT1ACTIVE)); - t3_write_reg(adap, A_PM1_TX_CFG, - chan_map == 1 ? 0xffffffff : 0); - } else { /* two channels */ - t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN); - t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB); - t3_write_reg(adap, A_ULPTX_DMA_WEIGHT, - V_D1_WEIGHT(16) | V_D0_WEIGHT(16)); - t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN | - F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE | - F_ENFORCEPKT); - t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000); - t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE); - t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP, - V_TX_MOD_QUEUE_REQ_MAP(0xaa)); - for (i = 0; i < 16; i++) - t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, - (i << 16) | 0x1010); - } -} - -static int calibrate_xgm(struct adapter *adapter) -{ - if (uses_xaui(adapter)) { - unsigned int v, i; - - for (i = 0; i < 5; ++i) { - t3_write_reg(adapter, A_XGM_XAUI_IMP, 0); - t3_read_reg(adapter, A_XGM_XAUI_IMP); - msleep(1); - v = t3_read_reg(adapter, A_XGM_XAUI_IMP); - if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) { - t3_write_reg(adapter, A_XGM_XAUI_IMP, - V_XAUIIMP(G_CALIMP(v) >> 2)); - return 0; - } - } - CH_ERR(adapter, "MAC calibration failed\n"); - return -1; - } else { - t3_write_reg(adapter, A_XGM_RGMII_IMP, - V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3)); - t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE, - F_XGM_IMPSETUPDATE); - } - return 0; -} - -static void calibrate_xgm_t3b(struct adapter *adapter) -{ - if (!uses_xaui(adapter)) { - t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET | - F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3)); - t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0); - t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, - F_XGM_IMPSETUPDATE); - t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE, - 0); - t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0); - t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE); - } -} - -struct mc7_timing_params { - unsigned char ActToPreDly; - unsigned char ActToRdWrDly; - unsigned char PreCyc; - unsigned char RefCyc[5]; - unsigned char BkCyc; - unsigned char WrToRdDly; - unsigned char RdToWrDly; -}; - -/* - * Write a value to a register and check that the write completed. These - * writes normally complete in a cycle or two, so one read should suffice. - * The very first read exists to flush the posted write to the device. - */ -static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val) -{ - t3_write_reg(adapter, addr, val); - t3_read_reg(adapter, addr); /* flush */ - if (!(t3_read_reg(adapter, addr) & F_BUSY)) - return 0; - CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr); - return -EIO; -} - -static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type) -{ - static const unsigned int mc7_mode[] = { - 0x632, 0x642, 0x652, 0x432, 0x442 - }; - static const struct mc7_timing_params mc7_timings[] = { - {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4}, - {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4}, - {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4}, - {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4}, - {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4} - }; - - u32 val; - unsigned int width, density, slow, attempts; - struct adapter *adapter = mc7->adapter; - const struct mc7_timing_params *p = &mc7_timings[mem_type]; - - if (!mc7->size) - return 0; - - val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); - slow = val & F_SLOW; - width = G_WIDTH(val); - density = G_DEN(val); - - t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN); - val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */ - msleep(1); - - if (!slow) { - t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN); - t3_read_reg(adapter, mc7->offset + A_MC7_CAL); - msleep(1); - if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) & - (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) { - CH_ERR(adapter, "%s MC7 calibration timed out\n", - mc7->name); - goto out_fail; - } - } - - t3_write_reg(adapter, mc7->offset + A_MC7_PARM, - V_ACTTOPREDLY(p->ActToPreDly) | - V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) | - V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) | - V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly)); - - t3_write_reg(adapter, mc7->offset + A_MC7_CFG, - val | F_CLKEN | F_TERM150); - t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */ - - if (!slow) - t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB, - F_DLLENB); - udelay(1); - - val = slow ? 3 : 6; - if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) || - wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) || - wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) || - wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val)) - goto out_fail; - - if (!slow) { - t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100); - t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0); - udelay(5); - } - - if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) || - wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) || - wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) || - wrreg_wait(adapter, mc7->offset + A_MC7_MODE, - mc7_mode[mem_type]) || - wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) || - wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val)) - goto out_fail; - - /* clock value is in KHz */ - mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */ - mc7_clock /= 1000000; /* KHz->MHz, ns->us */ - - t3_write_reg(adapter, mc7->offset + A_MC7_REF, - F_PERREFEN | V_PREREFDIV(mc7_clock)); - t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */ - - t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN); - t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0); - t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0); - t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END, - (mc7->size << width) - 1); - t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1)); - t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */ - - attempts = 50; - do { - msleep(250); - val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); - } while ((val & F_BUSY) && --attempts); - if (val & F_BUSY) { - CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name); - goto out_fail; - } - - /* Enable normal memory accesses. */ - t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY); - return 0; - -out_fail: - return -1; -} - -static void config_pcie(struct adapter *adap) -{ - static const u16 ack_lat[4][6] = { - {237, 416, 559, 1071, 2095, 4143}, - {128, 217, 289, 545, 1057, 2081}, - {73, 118, 154, 282, 538, 1050}, - {67, 107, 86, 150, 278, 534} - }; - static const u16 rpl_tmr[4][6] = { - {711, 1248, 1677, 3213, 6285, 12429}, - {384, 651, 867, 1635, 3171, 6243}, - {219, 354, 462, 846, 1614, 3150}, - {201, 321, 258, 450, 834, 1602} - }; - - u16 val, devid; - unsigned int log2_width, pldsize; - unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt; - - pci_read_config_word(adap->pdev, - adap->pdev->pcie_cap + PCI_EXP_DEVCTL, - &val); - pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5; - - pci_read_config_word(adap->pdev, 0x2, &devid); - if (devid == 0x37) { - pci_write_config_word(adap->pdev, - adap->pdev->pcie_cap + PCI_EXP_DEVCTL, - val & ~PCI_EXP_DEVCTL_READRQ & - ~PCI_EXP_DEVCTL_PAYLOAD); - pldsize = 0; - } - - pci_read_config_word(adap->pdev, adap->pdev->pcie_cap + PCI_EXP_LNKCTL, - &val); - - fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0)); - fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx : - G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE)); - log2_width = fls(adap->params.pci.width) - 1; - acklat = ack_lat[log2_width][pldsize]; - if (val & 1) /* check LOsEnable */ - acklat += fst_trn_tx * 4; - rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4; - - if (adap->params.rev == 0) - t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, - V_T3A_ACKLAT(M_T3A_ACKLAT), - V_T3A_ACKLAT(acklat)); - else - t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT), - V_ACKLAT(acklat)); - - t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT), - V_REPLAYLMT(rpllmt)); - - t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff); - t3_set_reg_field(adap, A_PCIE_CFG, 0, - F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST | - F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN); -} - -/* - * Initialize and configure T3 HW modules. This performs the - * initialization steps that need to be done once after a card is reset. - * MAC and PHY initialization is handled separarely whenever a port is enabled. - * - * fw_params are passed to FW and their value is platform dependent. Only the - * top 8 bits are available for use, the rest must be 0. - */ -int t3_init_hw(struct adapter *adapter, u32 fw_params) -{ - int err = -EIO, attempts, i; - const struct vpd_params *vpd = &adapter->params.vpd; - - if (adapter->params.rev > 0) - calibrate_xgm_t3b(adapter); - else if (calibrate_xgm(adapter)) - goto out_err; - - if (vpd->mclk) { - partition_mem(adapter, &adapter->params.tp); - - if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) || - mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) || - mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) || - t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers, - adapter->params.mc5.nfilters, - adapter->params.mc5.nroutes)) - goto out_err; - - for (i = 0; i < 32; i++) - if (clear_sge_ctxt(adapter, i, F_CQ)) - goto out_err; - } - - if (tp_init(adapter, &adapter->params.tp)) - goto out_err; - - t3_tp_set_coalescing_size(adapter, - min(adapter->params.sge.max_pkt_size, - MAX_RX_COALESCING_LEN), 1); - t3_tp_set_max_rxsize(adapter, - min(adapter->params.sge.max_pkt_size, 16384U)); - ulp_config(adapter, &adapter->params.tp); - - if (is_pcie(adapter)) - config_pcie(adapter); - else - t3_set_reg_field(adapter, A_PCIX_CFG, 0, - F_DMASTOPEN | F_CLIDECEN); - - if (adapter->params.rev == T3_REV_C) - t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0, - F_CFG_CQE_SOP_MASK); - - t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff); - t3_write_reg(adapter, A_PM1_RX_MODE, 0); - t3_write_reg(adapter, A_PM1_TX_MODE, 0); - chan_init_hw(adapter, adapter->params.chan_map); - t3_sge_init(adapter, &adapter->params.sge); - t3_set_reg_field(adapter, A_PL_RST, 0, F_FATALPERREN); - - t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter)); - - t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params); - t3_write_reg(adapter, A_CIM_BOOT_CFG, - V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2)); - t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */ - - attempts = 100; - do { /* wait for uP to initialize */ - msleep(20); - } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts); - if (!attempts) { - CH_ERR(adapter, "uP initialization timed out\n"); - goto out_err; - } - - err = 0; -out_err: - return err; -} - -/** - * get_pci_mode - determine a card's PCI mode - * @adapter: the adapter - * @p: where to store the PCI settings - * - * Determines a card's PCI mode and associated parameters, such as speed - * and width. - */ -static void get_pci_mode(struct adapter *adapter, struct pci_params *p) -{ - static unsigned short speed_map[] = { 33, 66, 100, 133 }; - u32 pci_mode, pcie_cap; - - pcie_cap = pci_pcie_cap(adapter->pdev); - if (pcie_cap) { - u16 val; - - p->variant = PCI_VARIANT_PCIE; - pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA, - &val); - p->width = (val >> 4) & 0x3f; - return; - } - - pci_mode = t3_read_reg(adapter, A_PCIX_MODE); - p->speed = speed_map[G_PCLKRANGE(pci_mode)]; - p->width = (pci_mode & F_64BIT) ? 64 : 32; - pci_mode = G_PCIXINITPAT(pci_mode); - if (pci_mode == 0) - p->variant = PCI_VARIANT_PCI; - else if (pci_mode < 4) - p->variant = PCI_VARIANT_PCIX_MODE1_PARITY; - else if (pci_mode < 8) - p->variant = PCI_VARIANT_PCIX_MODE1_ECC; - else - p->variant = PCI_VARIANT_PCIX_266_MODE2; -} - -/** - * init_link_config - initialize a link's SW state - * @lc: structure holding the link state - * @ai: information about the current card - * - * Initializes the SW state maintained for each link, including the link's - * capabilities and default speed/duplex/flow-control/autonegotiation - * settings. - */ -static void init_link_config(struct link_config *lc, unsigned int caps) -{ - lc->supported = caps; - lc->requested_speed = lc->speed = SPEED_INVALID; - lc->requested_duplex = lc->duplex = DUPLEX_INVALID; - lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; - if (lc->supported & SUPPORTED_Autoneg) { - lc->advertising = lc->supported; - lc->autoneg = AUTONEG_ENABLE; - lc->requested_fc |= PAUSE_AUTONEG; - } else { - lc->advertising = 0; - lc->autoneg = AUTONEG_DISABLE; - } -} - -/** - * mc7_calc_size - calculate MC7 memory size - * @cfg: the MC7 configuration - * - * Calculates the size of an MC7 memory in bytes from the value of its - * configuration register. - */ -static unsigned int mc7_calc_size(u32 cfg) -{ - unsigned int width = G_WIDTH(cfg); - unsigned int banks = !!(cfg & F_BKS) + 1; - unsigned int org = !!(cfg & F_ORG) + 1; - unsigned int density = G_DEN(cfg); - unsigned int MBs = ((256 << density) * banks) / (org << width); - - return MBs << 20; -} - -static void mc7_prep(struct adapter *adapter, struct mc7 *mc7, - unsigned int base_addr, const char *name) -{ - u32 cfg; - - mc7->adapter = adapter; - mc7->name = name; - mc7->offset = base_addr - MC7_PMRX_BASE_ADDR; - cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); - mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg); - mc7->width = G_WIDTH(cfg); -} - -static void mac_prep(struct cmac *mac, struct adapter *adapter, int index) -{ - u16 devid; - - mac->adapter = adapter; - pci_read_config_word(adapter->pdev, 0x2, &devid); - - if (devid == 0x37 && !adapter->params.vpd.xauicfg[1]) - index = 0; - mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index; - mac->nucast = 1; - - if (adapter->params.rev == 0 && uses_xaui(adapter)) { - t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset, - is_10G(adapter) ? 0x2901c04 : 0x2301c04); - t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset, - F_ENRGMII, 0); - } -} - -static void early_hw_init(struct adapter *adapter, - const struct adapter_info *ai) -{ - u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2); - - mi1_init(adapter, ai); - t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */ - V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1)); - t3_write_reg(adapter, A_T3DBG_GPIO_EN, - ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL); - t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0); - t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff)); - - if (adapter->params.rev == 0 || !uses_xaui(adapter)) - val |= F_ENRGMII; - - /* Enable MAC clocks so we can access the registers */ - t3_write_reg(adapter, A_XGM_PORT_CFG, val); - t3_read_reg(adapter, A_XGM_PORT_CFG); - - val |= F_CLKDIVRESET_; - t3_write_reg(adapter, A_XGM_PORT_CFG, val); - t3_read_reg(adapter, A_XGM_PORT_CFG); - t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val); - t3_read_reg(adapter, A_XGM_PORT_CFG); -} - -/* - * Reset the adapter. - * Older PCIe cards lose their config space during reset, PCI-X - * ones don't. - */ -int t3_reset_adapter(struct adapter *adapter) -{ - int i, save_and_restore_pcie = - adapter->params.rev < T3_REV_B2 && is_pcie(adapter); - uint16_t devid = 0; - - if (save_and_restore_pcie) - pci_save_state(adapter->pdev); - t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE); - - /* - * Delay. Give Some time to device to reset fully. - * XXX The delay time should be modified. - */ - for (i = 0; i < 10; i++) { - msleep(50); - pci_read_config_word(adapter->pdev, 0x00, &devid); - if (devid == 0x1425) - break; - } - - if (devid != 0x1425) - return -1; - - if (save_and_restore_pcie) - pci_restore_state(adapter->pdev); - return 0; -} - -static int init_parity(struct adapter *adap) -{ - int i, err, addr; - - if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) - return -EBUSY; - - for (err = i = 0; !err && i < 16; i++) - err = clear_sge_ctxt(adap, i, F_EGRESS); - for (i = 0xfff0; !err && i <= 0xffff; i++) - err = clear_sge_ctxt(adap, i, F_EGRESS); - for (i = 0; !err && i < SGE_QSETS; i++) - err = clear_sge_ctxt(adap, i, F_RESPONSEQ); - if (err) - return err; - - t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0); - for (i = 0; i < 4; i++) - for (addr = 0; addr <= M_IBQDBGADDR; addr++) { - t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN | - F_IBQDBGWR | V_IBQDBGQID(i) | - V_IBQDBGADDR(addr)); - err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, - F_IBQDBGBUSY, 0, 2, 1); - if (err) - return err; - } - return 0; -} - -/* - * Initialize adapter SW state for the various HW modules, set initial values - * for some adapter tunables, take PHYs out of reset, and initialize the MDIO - * interface. - */ -int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, - int reset) -{ - int ret; - unsigned int i, j = -1; - - get_pci_mode(adapter, &adapter->params.pci); - - adapter->params.info = ai; - adapter->params.nports = ai->nports0 + ai->nports1; - adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1); - adapter->params.rev = t3_read_reg(adapter, A_PL_REV); - /* - * We used to only run the "adapter check task" once a second if - * we had PHYs which didn't support interrupts (we would check - * their link status once a second). Now we check other conditions - * in that routine which could potentially impose a very high - * interrupt load on the system. As such, we now always scan the - * adapter state once a second ... - */ - adapter->params.linkpoll_period = 10; - adapter->params.stats_update_period = is_10G(adapter) ? - MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10); - adapter->params.pci.vpd_cap_addr = - pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD); - ret = get_vpd_params(adapter, &adapter->params.vpd); - if (ret < 0) - return ret; - - if (reset && t3_reset_adapter(adapter)) - return -1; - - t3_sge_prep(adapter, &adapter->params.sge); - - if (adapter->params.vpd.mclk) { - struct tp_params *p = &adapter->params.tp; - - mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX"); - mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX"); - mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM"); - - p->nchan = adapter->params.chan_map == 3 ? 2 : 1; - p->pmrx_size = t3_mc7_size(&adapter->pmrx); - p->pmtx_size = t3_mc7_size(&adapter->pmtx); - p->cm_size = t3_mc7_size(&adapter->cm); - p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */ - p->chan_tx_size = p->pmtx_size / p->nchan; - p->rx_pg_size = 64 * 1024; - p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024; - p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size); - p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size); - p->ntimer_qs = p->cm_size >= (128 << 20) || - adapter->params.rev > 0 ? 12 : 6; - } - - adapter->params.offload = t3_mc7_size(&adapter->pmrx) && - t3_mc7_size(&adapter->pmtx) && - t3_mc7_size(&adapter->cm); - - if (is_offload(adapter)) { - adapter->params.mc5.nservers = DEFAULT_NSERVERS; - adapter->params.mc5.nfilters = adapter->params.rev > 0 ? - DEFAULT_NFILTERS : 0; - adapter->params.mc5.nroutes = 0; - t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT); - - init_mtus(adapter->params.mtus); - init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); - } - - early_hw_init(adapter, ai); - ret = init_parity(adapter); - if (ret) - return ret; - - for_each_port(adapter, i) { - u8 hw_addr[6]; - const struct port_type_info *pti; - struct port_info *p = adap2pinfo(adapter, i); - - while (!adapter->params.vpd.port_type[++j]) - ; - - pti = &port_types[adapter->params.vpd.port_type[j]]; - if (!pti->phy_prep) { - CH_ALERT(adapter, "Invalid port type index %d\n", - adapter->params.vpd.port_type[j]); - return -EINVAL; - } - - p->phy.mdio.dev = adapter->port[i]; - ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j, - ai->mdio_ops); - if (ret) - return ret; - mac_prep(&p->mac, adapter, j); - - /* - * The VPD EEPROM stores the base Ethernet address for the - * card. A port's address is derived from the base by adding - * the port's index to the base's low octet. - */ - memcpy(hw_addr, adapter->params.vpd.eth_base, 5); - hw_addr[5] = adapter->params.vpd.eth_base[5] + i; - - memcpy(adapter->port[i]->dev_addr, hw_addr, - ETH_ALEN); - memcpy(adapter->port[i]->perm_addr, hw_addr, - ETH_ALEN); - init_link_config(&p->link_config, p->phy.caps); - p->phy.ops->power_down(&p->phy, 1); - - /* - * If the PHY doesn't support interrupts for link status - * changes, schedule a scan of the adapter links at least - * once a second. - */ - if (!(p->phy.caps & SUPPORTED_IRQ) && - adapter->params.linkpoll_period > 10) - adapter->params.linkpoll_period = 10; - } - - return 0; -} - -void t3_led_ready(struct adapter *adapter) -{ - t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, - F_GPIO0_OUT_VAL); -} - -int t3_replay_prep_adapter(struct adapter *adapter) -{ - const struct adapter_info *ai = adapter->params.info; - unsigned int i, j = -1; - int ret; - - early_hw_init(adapter, ai); - ret = init_parity(adapter); - if (ret) - return ret; - - for_each_port(adapter, i) { - const struct port_type_info *pti; - struct port_info *p = adap2pinfo(adapter, i); - - while (!adapter->params.vpd.port_type[++j]) - ; - - pti = &port_types[adapter->params.vpd.port_type[j]]; - ret = pti->phy_prep(&p->phy, adapter, p->phy.mdio.prtad, NULL); - if (ret) - return ret; - p->phy.ops->power_down(&p->phy, 1); - } - -return 0; -} - diff --git a/drivers/net/cxgb3/t3cdev.h b/drivers/net/cxgb3/t3cdev.h deleted file mode 100644 index 705713b..0000000 --- a/drivers/net/cxgb3/t3cdev.h +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (C) 2006-2008 Chelsio Communications. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef _T3CDEV_H_ -#define _T3CDEV_H_ - -#include -#include -#include -#include -#include -#include - -#define T3CNAMSIZ 16 - -struct cxgb3_client; - -enum t3ctype { - T3A = 0, - T3B, - T3C, -}; - -struct t3cdev { - char name[T3CNAMSIZ]; /* T3C device name */ - enum t3ctype type; - struct list_head ofld_dev_list; /* for list linking */ - struct net_device *lldev; /* LL dev associated with T3C messages */ - struct proc_dir_entry *proc_dir; /* root of proc dir for this T3C */ - int (*send)(struct t3cdev *dev, struct sk_buff *skb); - int (*recv)(struct t3cdev *dev, struct sk_buff **skb, int n); - int (*ctl)(struct t3cdev *dev, unsigned int req, void *data); - void (*neigh_update)(struct t3cdev *dev, struct neighbour *neigh); - void *priv; /* driver private data */ - void *l2opt; /* optional layer 2 data */ - void *l3opt; /* optional layer 3 data */ - void *l4opt; /* optional layer 4 data */ - void *ulp; /* ulp stuff */ - void *ulp_iscsi; /* ulp iscsi */ -}; - -#endif /* _T3CDEV_H_ */ diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h deleted file mode 100644 index 8bda06e..0000000 --- a/drivers/net/cxgb3/version.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -/* $Date: 2006/10/31 18:57:51 $ $RCSfile: version.h,v $ $Revision: 1.3 $ */ -#ifndef __CHELSIO_VERSION_H -#define __CHELSIO_VERSION_H -#define DRV_DESC "Chelsio T3 Network Driver" -#define DRV_NAME "cxgb3" -/* Driver version */ -#define DRV_VERSION "1.1.4-ko" - -/* Firmware version */ -#define FW_VERSION_MAJOR 7 -#define FW_VERSION_MINOR 10 -#define FW_VERSION_MICRO 0 -#endif /* __CHELSIO_VERSION_H */ diff --git a/drivers/net/cxgb3/vsc8211.c b/drivers/net/cxgb3/vsc8211.c deleted file mode 100644 index 4f9a1c2..0000000 --- a/drivers/net/cxgb3/vsc8211.c +++ /dev/null @@ -1,416 +0,0 @@ -/* - * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "common.h" - -/* VSC8211 PHY specific registers. */ -enum { - VSC8211_SIGDET_CTRL = 19, - VSC8211_EXT_CTRL = 23, - VSC8211_INTR_ENABLE = 25, - VSC8211_INTR_STATUS = 26, - VSC8211_LED_CTRL = 27, - VSC8211_AUX_CTRL_STAT = 28, - VSC8211_EXT_PAGE_AXS = 31, -}; - -enum { - VSC_INTR_RX_ERR = 1 << 0, - VSC_INTR_MS_ERR = 1 << 1, /* master/slave resolution error */ - VSC_INTR_CABLE = 1 << 2, /* cable impairment */ - VSC_INTR_FALSE_CARR = 1 << 3, /* false carrier */ - VSC_INTR_MEDIA_CHG = 1 << 4, /* AMS media change */ - VSC_INTR_RX_FIFO = 1 << 5, /* Rx FIFO over/underflow */ - VSC_INTR_TX_FIFO = 1 << 6, /* Tx FIFO over/underflow */ - VSC_INTR_DESCRAMBL = 1 << 7, /* descrambler lock-lost */ - VSC_INTR_SYMBOL_ERR = 1 << 8, /* symbol error */ - VSC_INTR_NEG_DONE = 1 << 10, /* autoneg done */ - VSC_INTR_NEG_ERR = 1 << 11, /* autoneg error */ - VSC_INTR_DPLX_CHG = 1 << 12, /* duplex change */ - VSC_INTR_LINK_CHG = 1 << 13, /* link change */ - VSC_INTR_SPD_CHG = 1 << 14, /* speed change */ - VSC_INTR_ENABLE = 1 << 15, /* interrupt enable */ -}; - -enum { - VSC_CTRL_CLAUSE37_VIEW = 1 << 4, /* Switch to Clause 37 view */ - VSC_CTRL_MEDIA_MODE_HI = 0xf000 /* High part of media mode select */ -}; - -#define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \ - VSC_INTR_DPLX_CHG | VSC_INTR_SPD_CHG | \ - VSC_INTR_NEG_DONE) -#define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \ - VSC_INTR_ENABLE) - -/* PHY specific auxiliary control & status register fields */ -#define S_ACSR_ACTIPHY_TMR 0 -#define M_ACSR_ACTIPHY_TMR 0x3 -#define V_ACSR_ACTIPHY_TMR(x) ((x) << S_ACSR_ACTIPHY_TMR) - -#define S_ACSR_SPEED 3 -#define M_ACSR_SPEED 0x3 -#define G_ACSR_SPEED(x) (((x) >> S_ACSR_SPEED) & M_ACSR_SPEED) - -#define S_ACSR_DUPLEX 5 -#define F_ACSR_DUPLEX (1 << S_ACSR_DUPLEX) - -#define S_ACSR_ACTIPHY 6 -#define F_ACSR_ACTIPHY (1 << S_ACSR_ACTIPHY) - -/* - * Reset the PHY. This PHY completes reset immediately so we never wait. - */ -static int vsc8211_reset(struct cphy *cphy, int wait) -{ - return t3_phy_reset(cphy, MDIO_DEVAD_NONE, 0); -} - -static int vsc8211_intr_enable(struct cphy *cphy) -{ - return t3_mdio_write(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_ENABLE, - INTR_MASK); -} - -static int vsc8211_intr_disable(struct cphy *cphy) -{ - return t3_mdio_write(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_ENABLE, 0); -} - -static int vsc8211_intr_clear(struct cphy *cphy) -{ - u32 val; - - /* Clear PHY interrupts by reading the register. */ - return t3_mdio_read(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_STATUS, &val); -} - -static int vsc8211_autoneg_enable(struct cphy *cphy) -{ - return t3_mdio_change_bits(cphy, MDIO_DEVAD_NONE, MII_BMCR, - BMCR_PDOWN | BMCR_ISOLATE, - BMCR_ANENABLE | BMCR_ANRESTART); -} - -static int vsc8211_autoneg_restart(struct cphy *cphy) -{ - return t3_mdio_change_bits(cphy, MDIO_DEVAD_NONE, MII_BMCR, - BMCR_PDOWN | BMCR_ISOLATE, - BMCR_ANRESTART); -} - -static int vsc8211_get_link_status(struct cphy *cphy, int *link_ok, - int *speed, int *duplex, int *fc) -{ - unsigned int bmcr, status, lpa, adv; - int err, sp = -1, dplx = -1, pause = 0; - - err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMCR, &bmcr); - if (!err) - err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR, &status); - if (err) - return err; - - if (link_ok) { - /* - * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it - * once more to get the current link state. - */ - if (!(status & BMSR_LSTATUS)) - err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR, - &status); - if (err) - return err; - *link_ok = (status & BMSR_LSTATUS) != 0; - } - if (!(bmcr & BMCR_ANENABLE)) { - dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; - if (bmcr & BMCR_SPEED1000) - sp = SPEED_1000; - else if (bmcr & BMCR_SPEED100) - sp = SPEED_100; - else - sp = SPEED_10; - } else if (status & BMSR_ANEGCOMPLETE) { - err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, VSC8211_AUX_CTRL_STAT, - &status); - if (err) - return err; - - dplx = (status & F_ACSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF; - sp = G_ACSR_SPEED(status); - if (sp == 0) - sp = SPEED_10; - else if (sp == 1) - sp = SPEED_100; - else - sp = SPEED_1000; - - if (fc && dplx == DUPLEX_FULL) { - err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_LPA, - &lpa); - if (!err) - err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, - MII_ADVERTISE, &adv); - if (err) - return err; - - if (lpa & adv & ADVERTISE_PAUSE_CAP) - pause = PAUSE_RX | PAUSE_TX; - else if ((lpa & ADVERTISE_PAUSE_CAP) && - (lpa & ADVERTISE_PAUSE_ASYM) && - (adv & ADVERTISE_PAUSE_ASYM)) - pause = PAUSE_TX; - else if ((lpa & ADVERTISE_PAUSE_ASYM) && - (adv & ADVERTISE_PAUSE_CAP)) - pause = PAUSE_RX; - } - } - if (speed) - *speed = sp; - if (duplex) - *duplex = dplx; - if (fc) - *fc = pause; - return 0; -} - -static int vsc8211_get_link_status_fiber(struct cphy *cphy, int *link_ok, - int *speed, int *duplex, int *fc) -{ - unsigned int bmcr, status, lpa, adv; - int err, sp = -1, dplx = -1, pause = 0; - - err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMCR, &bmcr); - if (!err) - err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR, &status); - if (err) - return err; - - if (link_ok) { - /* - * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it - * once more to get the current link state. - */ - if (!(status & BMSR_LSTATUS)) - err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR, - &status); - if (err) - return err; - *link_ok = (status & BMSR_LSTATUS) != 0; - } - if (!(bmcr & BMCR_ANENABLE)) { - dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; - if (bmcr & BMCR_SPEED1000) - sp = SPEED_1000; - else if (bmcr & BMCR_SPEED100) - sp = SPEED_100; - else - sp = SPEED_10; - } else if (status & BMSR_ANEGCOMPLETE) { - err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_LPA, &lpa); - if (!err) - err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_ADVERTISE, - &adv); - if (err) - return err; - - if (adv & lpa & ADVERTISE_1000XFULL) { - dplx = DUPLEX_FULL; - sp = SPEED_1000; - } else if (adv & lpa & ADVERTISE_1000XHALF) { - dplx = DUPLEX_HALF; - sp = SPEED_1000; - } - - if (fc && dplx == DUPLEX_FULL) { - if (lpa & adv & ADVERTISE_1000XPAUSE) - pause = PAUSE_RX | PAUSE_TX; - else if ((lpa & ADVERTISE_1000XPAUSE) && - (adv & lpa & ADVERTISE_1000XPSE_ASYM)) - pause = PAUSE_TX; - else if ((lpa & ADVERTISE_1000XPSE_ASYM) && - (adv & ADVERTISE_1000XPAUSE)) - pause = PAUSE_RX; - } - } - if (speed) - *speed = sp; - if (duplex) - *duplex = dplx; - if (fc) - *fc = pause; - return 0; -} - -#ifdef UNUSED -/* - * Enable/disable auto MDI/MDI-X in forced link speed mode. - */ -static int vsc8211_set_automdi(struct cphy *phy, int enable) -{ - int err; - - err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 0x52b5); - if (err) - return err; - - err = t3_mdio_write(phy, MDIO_DEVAD_NONE, 18, 0x12); - if (err) - return err; - - err = t3_mdio_write(phy, MDIO_DEVAD_NONE, 17, enable ? 0x2803 : 0x3003); - if (err) - return err; - - err = t3_mdio_write(phy, MDIO_DEVAD_NONE, 16, 0x87fa); - if (err) - return err; - - err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 0); - if (err) - return err; - - return 0; -} - -int vsc8211_set_speed_duplex(struct cphy *phy, int speed, int duplex) -{ - int err; - - err = t3_set_phy_speed_duplex(phy, speed, duplex); - if (!err) - err = vsc8211_set_automdi(phy, 1); - return err; -} -#endif /* UNUSED */ - -static int vsc8211_power_down(struct cphy *cphy, int enable) -{ - return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN, - enable ? BMCR_PDOWN : 0); -} - -static int vsc8211_intr_handler(struct cphy *cphy) -{ - unsigned int cause; - int err, cphy_cause = 0; - - err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_STATUS, &cause); - if (err) - return err; - - cause &= INTR_MASK; - if (cause & CFG_CHG_INTR_MASK) - cphy_cause |= cphy_cause_link_change; - if (cause & (VSC_INTR_RX_FIFO | VSC_INTR_TX_FIFO)) - cphy_cause |= cphy_cause_fifo_error; - return cphy_cause; -} - -static struct cphy_ops vsc8211_ops = { - .reset = vsc8211_reset, - .intr_enable = vsc8211_intr_enable, - .intr_disable = vsc8211_intr_disable, - .intr_clear = vsc8211_intr_clear, - .intr_handler = vsc8211_intr_handler, - .autoneg_enable = vsc8211_autoneg_enable, - .autoneg_restart = vsc8211_autoneg_restart, - .advertise = t3_phy_advertise, - .set_speed_duplex = t3_set_phy_speed_duplex, - .get_link_status = vsc8211_get_link_status, - .power_down = vsc8211_power_down, -}; - -static struct cphy_ops vsc8211_fiber_ops = { - .reset = vsc8211_reset, - .intr_enable = vsc8211_intr_enable, - .intr_disable = vsc8211_intr_disable, - .intr_clear = vsc8211_intr_clear, - .intr_handler = vsc8211_intr_handler, - .autoneg_enable = vsc8211_autoneg_enable, - .autoneg_restart = vsc8211_autoneg_restart, - .advertise = t3_phy_advertise_fiber, - .set_speed_duplex = t3_set_phy_speed_duplex, - .get_link_status = vsc8211_get_link_status_fiber, - .power_down = vsc8211_power_down, -}; - -int t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter, - int phy_addr, const struct mdio_ops *mdio_ops) -{ - int err; - unsigned int val; - - cphy_init(phy, adapter, phy_addr, &vsc8211_ops, mdio_ops, - SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII | - SUPPORTED_TP | SUPPORTED_IRQ, "10/100/1000BASE-T"); - msleep(20); /* PHY needs ~10ms to start responding to MDIO */ - - err = t3_mdio_read(phy, MDIO_DEVAD_NONE, VSC8211_EXT_CTRL, &val); - if (err) - return err; - if (val & VSC_CTRL_MEDIA_MODE_HI) { - /* copper interface, just need to configure the LEDs */ - return t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_LED_CTRL, - 0x100); - } - - phy->caps = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | - SUPPORTED_MII | SUPPORTED_FIBRE | SUPPORTED_IRQ; - phy->desc = "1000BASE-X"; - phy->ops = &vsc8211_fiber_ops; - - err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 1); - if (err) - return err; - - err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_SIGDET_CTRL, 1); - if (err) - return err; - - err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 0); - if (err) - return err; - - err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_CTRL, - val | VSC_CTRL_CLAUSE37_VIEW); - if (err) - return err; - - err = vsc8211_reset(phy, 0); - if (err) - return err; - - udelay(5); /* delay after reset before next SMI */ - return 0; -} diff --git a/drivers/net/cxgb3/xgmac.c b/drivers/net/cxgb3/xgmac.c deleted file mode 100644 index 3af19a5..0000000 --- a/drivers/net/cxgb3/xgmac.c +++ /dev/null @@ -1,657 +0,0 @@ -/* - * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "common.h" -#include "regs.h" - -/* - * # of exact address filters. The first one is used for the station address, - * the rest are available for multicast addresses. - */ -#define EXACT_ADDR_FILTERS 8 - -static inline int macidx(const struct cmac *mac) -{ - return mac->offset / (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR); -} - -static void xaui_serdes_reset(struct cmac *mac) -{ - static const unsigned int clear[] = { - F_PWRDN0 | F_PWRDN1, F_RESETPLL01, F_RESET0 | F_RESET1, - F_PWRDN2 | F_PWRDN3, F_RESETPLL23, F_RESET2 | F_RESET3 - }; - - int i; - struct adapter *adap = mac->adapter; - u32 ctrl = A_XGM_SERDES_CTRL0 + mac->offset; - - t3_write_reg(adap, ctrl, adap->params.vpd.xauicfg[macidx(mac)] | - F_RESET3 | F_RESET2 | F_RESET1 | F_RESET0 | - F_PWRDN3 | F_PWRDN2 | F_PWRDN1 | F_PWRDN0 | - F_RESETPLL23 | F_RESETPLL01); - t3_read_reg(adap, ctrl); - udelay(15); - - for (i = 0; i < ARRAY_SIZE(clear); i++) { - t3_set_reg_field(adap, ctrl, clear[i], 0); - udelay(15); - } -} - -void t3b_pcs_reset(struct cmac *mac) -{ - t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, - F_PCS_RESET_, 0); - udelay(20); - t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, 0, - F_PCS_RESET_); -} - -int t3_mac_reset(struct cmac *mac) -{ - static const struct addr_val_pair mac_reset_avp[] = { - {A_XGM_TX_CTRL, 0}, - {A_XGM_RX_CTRL, 0}, - {A_XGM_RX_CFG, F_DISPAUSEFRAMES | F_EN1536BFRAMES | - F_RMFCS | F_ENJUMBO | F_ENHASHMCAST}, - {A_XGM_RX_HASH_LOW, 0}, - {A_XGM_RX_HASH_HIGH, 0}, - {A_XGM_RX_EXACT_MATCH_LOW_1, 0}, - {A_XGM_RX_EXACT_MATCH_LOW_2, 0}, - {A_XGM_RX_EXACT_MATCH_LOW_3, 0}, - {A_XGM_RX_EXACT_MATCH_LOW_4, 0}, - {A_XGM_RX_EXACT_MATCH_LOW_5, 0}, - {A_XGM_RX_EXACT_MATCH_LOW_6, 0}, - {A_XGM_RX_EXACT_MATCH_LOW_7, 0}, - {A_XGM_RX_EXACT_MATCH_LOW_8, 0}, - {A_XGM_STAT_CTRL, F_CLRSTATS} - }; - u32 val; - struct adapter *adap = mac->adapter; - unsigned int oft = mac->offset; - - t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_); - t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ - - t3_write_regs(adap, mac_reset_avp, ARRAY_SIZE(mac_reset_avp), oft); - t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft, - F_RXSTRFRWRD | F_DISERRFRAMES, - uses_xaui(adap) ? 0 : F_RXSTRFRWRD); - t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + oft, 0, F_UNDERUNFIX); - - if (uses_xaui(adap)) { - if (adap->params.rev == 0) { - t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0, - F_RXENABLE | F_TXENABLE); - if (t3_wait_op_done(adap, A_XGM_SERDES_STATUS1 + oft, - F_CMULOCK, 1, 5, 2)) { - CH_ERR(adap, - "MAC %d XAUI SERDES CMU lock failed\n", - macidx(mac)); - return -1; - } - t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0, - F_SERDESRESET_); - } else - xaui_serdes_reset(mac); - } - - t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + oft, - V_RXMAXFRAMERSIZE(M_RXMAXFRAMERSIZE), - V_RXMAXFRAMERSIZE(MAX_FRAME_SIZE) | F_RXENFRAMER); - val = F_MAC_RESET_ | F_XGMAC_STOP_EN; - - if (is_10G(adap)) - val |= F_PCS_RESET_; - else if (uses_xaui(adap)) - val |= F_PCS_RESET_ | F_XG2G_RESET_; - else - val |= F_RGMII_RESET_ | F_XG2G_RESET_; - t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val); - t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ - if ((val & F_PCS_RESET_) && adap->params.rev) { - msleep(1); - t3b_pcs_reset(mac); - } - - memset(&mac->stats, 0, sizeof(mac->stats)); - return 0; -} - -static int t3b2_mac_reset(struct cmac *mac) -{ - struct adapter *adap = mac->adapter; - unsigned int oft = mac->offset, store; - int idx = macidx(mac); - u32 val; - - if (!macidx(mac)) - t3_set_reg_field(adap, A_MPS_CFG, F_PORT0ACTIVE, 0); - else - t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE, 0); - - /* Stop NIC traffic to reduce the number of TXTOGGLES */ - t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 0); - /* Ensure TX drains */ - t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN, 0); - - t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_); - t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ - - /* Store A_TP_TX_DROP_CFG_CH0 */ - t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); - store = t3_read_reg(adap, A_TP_TX_DROP_CFG_CH0 + idx); - - msleep(10); - - /* Change DROP_CFG to 0xc0000011 */ - t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); - t3_write_reg(adap, A_TP_PIO_DATA, 0xc0000011); - - /* Check for xgm Rx fifo empty */ - /* Increased loop count to 1000 from 5 cover 1G and 100Mbps case */ - if (t3_wait_op_done(adap, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT + oft, - 0x80000000, 1, 1000, 2)) { - CH_ERR(adap, "MAC %d Rx fifo drain failed\n", - macidx(mac)); - return -1; - } - - t3_write_reg(adap, A_XGM_RESET_CTRL + oft, 0); - t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ - - val = F_MAC_RESET_; - if (is_10G(adap)) - val |= F_PCS_RESET_; - else if (uses_xaui(adap)) - val |= F_PCS_RESET_ | F_XG2G_RESET_; - else - val |= F_RGMII_RESET_ | F_XG2G_RESET_; - t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val); - t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ - if ((val & F_PCS_RESET_) && adap->params.rev) { - msleep(1); - t3b_pcs_reset(mac); - } - t3_write_reg(adap, A_XGM_RX_CFG + oft, - F_DISPAUSEFRAMES | F_EN1536BFRAMES | - F_RMFCS | F_ENJUMBO | F_ENHASHMCAST); - - /* Restore the DROP_CFG */ - t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); - t3_write_reg(adap, A_TP_PIO_DATA, store); - - if (!idx) - t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT0ACTIVE); - else - t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT1ACTIVE); - - /* re-enable nic traffic */ - t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 1); - - /* Set: re-enable NIC traffic */ - t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 1); - - return 0; -} - -/* - * Set the exact match register 'idx' to recognize the given Ethernet address. - */ -static void set_addr_filter(struct cmac *mac, int idx, const u8 * addr) -{ - u32 addr_lo, addr_hi; - unsigned int oft = mac->offset + idx * 8; - - addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; - addr_hi = (addr[5] << 8) | addr[4]; - - t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1 + oft, addr_lo); - t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_HIGH_1 + oft, addr_hi); -} - -/* Set one of the station's unicast MAC addresses. */ -int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]) -{ - if (idx >= mac->nucast) - return -EINVAL; - set_addr_filter(mac, idx, addr); - return 0; -} - -/* - * Specify the number of exact address filters that should be reserved for - * unicast addresses. Caller should reload the unicast and multicast addresses - * after calling this. - */ -int t3_mac_set_num_ucast(struct cmac *mac, int n) -{ - if (n > EXACT_ADDR_FILTERS) - return -EINVAL; - mac->nucast = n; - return 0; -} - -void t3_mac_disable_exact_filters(struct cmac *mac) -{ - unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_LOW_1; - - for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) { - u32 v = t3_read_reg(mac->adapter, reg); - t3_write_reg(mac->adapter, reg, v); - } - t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1); /* flush */ -} - -void t3_mac_enable_exact_filters(struct cmac *mac) -{ - unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_HIGH_1; - - for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) { - u32 v = t3_read_reg(mac->adapter, reg); - t3_write_reg(mac->adapter, reg, v); - } - t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1); /* flush */ -} - -/* Calculate the RX hash filter index of an Ethernet address */ -static int hash_hw_addr(const u8 * addr) -{ - int hash = 0, octet, bit, i = 0, c; - - for (octet = 0; octet < 6; ++octet) - for (c = addr[octet], bit = 0; bit < 8; c >>= 1, ++bit) { - hash ^= (c & 1) << i; - if (++i == 6) - i = 0; - } - return hash; -} - -int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev) -{ - u32 val, hash_lo, hash_hi; - struct adapter *adap = mac->adapter; - unsigned int oft = mac->offset; - - val = t3_read_reg(adap, A_XGM_RX_CFG + oft) & ~F_COPYALLFRAMES; - if (dev->flags & IFF_PROMISC) - val |= F_COPYALLFRAMES; - t3_write_reg(adap, A_XGM_RX_CFG + oft, val); - - if (dev->flags & IFF_ALLMULTI) - hash_lo = hash_hi = 0xffffffff; - else { - struct netdev_hw_addr *ha; - int exact_addr_idx = mac->nucast; - - hash_lo = hash_hi = 0; - netdev_for_each_mc_addr(ha, dev) - if (exact_addr_idx < EXACT_ADDR_FILTERS) - set_addr_filter(mac, exact_addr_idx++, - ha->addr); - else { - int hash = hash_hw_addr(ha->addr); - - if (hash < 32) - hash_lo |= (1 << hash); - else - hash_hi |= (1 << (hash - 32)); - } - } - - t3_write_reg(adap, A_XGM_RX_HASH_LOW + oft, hash_lo); - t3_write_reg(adap, A_XGM_RX_HASH_HIGH + oft, hash_hi); - return 0; -} - -static int rx_fifo_hwm(int mtu) -{ - int hwm; - - hwm = max(MAC_RXFIFO_SIZE - 3 * mtu, (MAC_RXFIFO_SIZE * 38) / 100); - return min(hwm, MAC_RXFIFO_SIZE - 8192); -} - -int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu) -{ - int hwm, lwm, divisor; - int ipg; - unsigned int thres, v, reg; - struct adapter *adap = mac->adapter; - - /* - * MAX_FRAME_SIZE inludes header + FCS, mtu doesn't. The HW max - * packet size register includes header, but not FCS. - */ - mtu += 14; - if (mtu > 1536) - mtu += 4; - - if (mtu > MAX_FRAME_SIZE - 4) - return -EINVAL; - t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu); - - if (adap->params.rev >= T3_REV_B2 && - (t3_read_reg(adap, A_XGM_RX_CTRL + mac->offset) & F_RXEN)) { - t3_mac_disable_exact_filters(mac); - v = t3_read_reg(adap, A_XGM_RX_CFG + mac->offset); - t3_set_reg_field(adap, A_XGM_RX_CFG + mac->offset, - F_ENHASHMCAST | F_COPYALLFRAMES, F_DISBCAST); - - reg = adap->params.rev == T3_REV_B2 ? - A_XGM_RX_MAX_PKT_SIZE_ERR_CNT : A_XGM_RXFIFO_CFG; - - /* drain RX FIFO */ - if (t3_wait_op_done(adap, reg + mac->offset, - F_RXFIFO_EMPTY, 1, 20, 5)) { - t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v); - t3_mac_enable_exact_filters(mac); - return -EIO; - } - t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, - V_RXMAXPKTSIZE(M_RXMAXPKTSIZE), - V_RXMAXPKTSIZE(mtu)); - t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v); - t3_mac_enable_exact_filters(mac); - } else - t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, - V_RXMAXPKTSIZE(M_RXMAXPKTSIZE), - V_RXMAXPKTSIZE(mtu)); - - /* - * Adjust the PAUSE frame watermarks. We always set the LWM, and the - * HWM only if flow-control is enabled. - */ - hwm = rx_fifo_hwm(mtu); - lwm = min(3 * (int)mtu, MAC_RXFIFO_SIZE / 4); - v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset); - v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM); - v |= V_RXFIFOPAUSELWM(lwm / 8); - if (G_RXFIFOPAUSEHWM(v)) - v = (v & ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM)) | - V_RXFIFOPAUSEHWM(hwm / 8); - - t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v); - - /* Adjust the TX FIFO threshold based on the MTU */ - thres = (adap->params.vpd.cclk * 1000) / 15625; - thres = (thres * mtu) / 1000; - if (is_10G(adap)) - thres /= 10; - thres = mtu > thres ? (mtu - thres + 7) / 8 : 0; - thres = max(thres, 8U); /* need at least 8 */ - ipg = (adap->params.rev == T3_REV_C) ? 0 : 1; - t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset, - V_TXFIFOTHRESH(M_TXFIFOTHRESH) | V_TXIPG(M_TXIPG), - V_TXFIFOTHRESH(thres) | V_TXIPG(ipg)); - - if (adap->params.rev > 0) { - divisor = (adap->params.rev == T3_REV_C) ? 64 : 8; - t3_write_reg(adap, A_XGM_PAUSE_TIMER + mac->offset, - (hwm - lwm) * 4 / divisor); - } - t3_write_reg(adap, A_XGM_TX_PAUSE_QUANTA + mac->offset, - MAC_RXFIFO_SIZE * 4 * 8 / 512); - return 0; -} - -int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc) -{ - u32 val; - struct adapter *adap = mac->adapter; - unsigned int oft = mac->offset; - - if (duplex >= 0 && duplex != DUPLEX_FULL) - return -EINVAL; - if (speed >= 0) { - if (speed == SPEED_10) - val = V_PORTSPEED(0); - else if (speed == SPEED_100) - val = V_PORTSPEED(1); - else if (speed == SPEED_1000) - val = V_PORTSPEED(2); - else if (speed == SPEED_10000) - val = V_PORTSPEED(3); - else - return -EINVAL; - - t3_set_reg_field(adap, A_XGM_PORT_CFG + oft, - V_PORTSPEED(M_PORTSPEED), val); - } - - val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft); - val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM); - if (fc & PAUSE_TX) { - u32 rx_max_pkt_size = - G_RXMAXPKTSIZE(t3_read_reg(adap, - A_XGM_RX_MAX_PKT_SIZE + oft)); - val |= V_RXFIFOPAUSEHWM(rx_fifo_hwm(rx_max_pkt_size) / 8); - } - t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val); - - t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN, - (fc & PAUSE_RX) ? F_TXPAUSEEN : 0); - return 0; -} - -int t3_mac_enable(struct cmac *mac, int which) -{ - int idx = macidx(mac); - struct adapter *adap = mac->adapter; - unsigned int oft = mac->offset; - struct mac_stats *s = &mac->stats; - - if (which & MAC_DIRECTION_TX) { - t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); - t3_write_reg(adap, A_TP_PIO_DATA, - adap->params.rev == T3_REV_C ? - 0xc4ffff01 : 0xc0ede401); - t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE); - t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, - adap->params.rev == T3_REV_C ? 0 : 1 << idx); - - t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN); - - t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CNT_CH0 + idx); - mac->tx_mcnt = s->tx_frames; - mac->tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap, - A_TP_PIO_DATA))); - mac->tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap, - A_XGM_TX_SPI4_SOP_EOP_CNT + - oft))); - mac->rx_mcnt = s->rx_frames; - mac->rx_pause = s->rx_pause; - mac->rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap, - A_XGM_RX_SPI4_SOP_EOP_CNT + - oft))); - mac->rx_ocnt = s->rx_fifo_ovfl; - mac->txen = F_TXEN; - mac->toggle_cnt = 0; - } - if (which & MAC_DIRECTION_RX) - t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN); - return 0; -} - -int t3_mac_disable(struct cmac *mac, int which) -{ - struct adapter *adap = mac->adapter; - - if (which & MAC_DIRECTION_TX) { - t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0); - mac->txen = 0; - } - if (which & MAC_DIRECTION_RX) { - int val = F_MAC_RESET_; - - t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, - F_PCS_RESET_, 0); - msleep(100); - t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0); - if (is_10G(adap)) - val |= F_PCS_RESET_; - else if (uses_xaui(adap)) - val |= F_PCS_RESET_ | F_XG2G_RESET_; - else - val |= F_RGMII_RESET_ | F_XG2G_RESET_; - t3_write_reg(mac->adapter, A_XGM_RESET_CTRL + mac->offset, val); - } - return 0; -} - -int t3b2_mac_watchdog_task(struct cmac *mac) -{ - struct adapter *adap = mac->adapter; - struct mac_stats *s = &mac->stats; - unsigned int tx_tcnt, tx_xcnt; - u64 tx_mcnt = s->tx_frames; - int status; - - status = 0; - tx_xcnt = 1; /* By default tx_xcnt is making progress */ - tx_tcnt = mac->tx_tcnt; /* If tx_mcnt is progressing ignore tx_tcnt */ - if (tx_mcnt == mac->tx_mcnt && mac->rx_pause == s->rx_pause) { - tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap, - A_XGM_TX_SPI4_SOP_EOP_CNT + - mac->offset))); - if (tx_xcnt == 0) { - t3_write_reg(adap, A_TP_PIO_ADDR, - A_TP_TX_DROP_CNT_CH0 + macidx(mac)); - tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap, - A_TP_PIO_DATA))); - } else { - goto out; - } - } else { - mac->toggle_cnt = 0; - goto out; - } - - if ((tx_tcnt != mac->tx_tcnt) && (mac->tx_xcnt == 0)) { - if (mac->toggle_cnt > 4) { - status = 2; - goto out; - } else { - status = 1; - goto out; - } - } else { - mac->toggle_cnt = 0; - goto out; - } - -out: - mac->tx_tcnt = tx_tcnt; - mac->tx_xcnt = tx_xcnt; - mac->tx_mcnt = s->tx_frames; - mac->rx_pause = s->rx_pause; - if (status == 1) { - t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0); - t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */ - t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, mac->txen); - t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */ - mac->toggle_cnt++; - } else if (status == 2) { - t3b2_mac_reset(mac); - mac->toggle_cnt = 0; - } - return status; -} - -/* - * This function is called periodically to accumulate the current values of the - * RMON counters into the port statistics. Since the packet counters are only - * 32 bits they can overflow in ~286 secs at 10G, so the function should be - * called more frequently than that. The byte counters are 45-bit wide, they - * would overflow in ~7.8 hours. - */ -const struct mac_stats *t3_mac_update_stats(struct cmac *mac) -{ -#define RMON_READ(mac, addr) t3_read_reg(mac->adapter, addr + mac->offset) -#define RMON_UPDATE(mac, name, reg) \ - (mac)->stats.name += (u64)RMON_READ(mac, A_XGM_STAT_##reg) -#define RMON_UPDATE64(mac, name, reg_lo, reg_hi) \ - (mac)->stats.name += RMON_READ(mac, A_XGM_STAT_##reg_lo) + \ - ((u64)RMON_READ(mac, A_XGM_STAT_##reg_hi) << 32) - - u32 v, lo; - - RMON_UPDATE64(mac, rx_octets, RX_BYTES_LOW, RX_BYTES_HIGH); - RMON_UPDATE64(mac, rx_frames, RX_FRAMES_LOW, RX_FRAMES_HIGH); - RMON_UPDATE(mac, rx_mcast_frames, RX_MCAST_FRAMES); - RMON_UPDATE(mac, rx_bcast_frames, RX_BCAST_FRAMES); - RMON_UPDATE(mac, rx_fcs_errs, RX_CRC_ERR_FRAMES); - RMON_UPDATE(mac, rx_pause, RX_PAUSE_FRAMES); - RMON_UPDATE(mac, rx_jabber, RX_JABBER_FRAMES); - RMON_UPDATE(mac, rx_short, RX_SHORT_FRAMES); - RMON_UPDATE(mac, rx_symbol_errs, RX_SYM_CODE_ERR_FRAMES); - - RMON_UPDATE(mac, rx_too_long, RX_OVERSIZE_FRAMES); - - v = RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT); - if (mac->adapter->params.rev == T3_REV_B2) - v &= 0x7fffffff; - mac->stats.rx_too_long += v; - - RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES); - RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES); - RMON_UPDATE(mac, rx_frames_128_255, RX_128_255B_FRAMES); - RMON_UPDATE(mac, rx_frames_256_511, RX_256_511B_FRAMES); - RMON_UPDATE(mac, rx_frames_512_1023, RX_512_1023B_FRAMES); - RMON_UPDATE(mac, rx_frames_1024_1518, RX_1024_1518B_FRAMES); - RMON_UPDATE(mac, rx_frames_1519_max, RX_1519_MAXB_FRAMES); - - RMON_UPDATE64(mac, tx_octets, TX_BYTE_LOW, TX_BYTE_HIGH); - RMON_UPDATE64(mac, tx_frames, TX_FRAME_LOW, TX_FRAME_HIGH); - RMON_UPDATE(mac, tx_mcast_frames, TX_MCAST); - RMON_UPDATE(mac, tx_bcast_frames, TX_BCAST); - RMON_UPDATE(mac, tx_pause, TX_PAUSE); - /* This counts error frames in general (bad FCS, underrun, etc). */ - RMON_UPDATE(mac, tx_underrun, TX_ERR_FRAMES); - - RMON_UPDATE(mac, tx_frames_64, TX_64B_FRAMES); - RMON_UPDATE(mac, tx_frames_65_127, TX_65_127B_FRAMES); - RMON_UPDATE(mac, tx_frames_128_255, TX_128_255B_FRAMES); - RMON_UPDATE(mac, tx_frames_256_511, TX_256_511B_FRAMES); - RMON_UPDATE(mac, tx_frames_512_1023, TX_512_1023B_FRAMES); - RMON_UPDATE(mac, tx_frames_1024_1518, TX_1024_1518B_FRAMES); - RMON_UPDATE(mac, tx_frames_1519_max, TX_1519_MAXB_FRAMES); - - /* The next stat isn't clear-on-read. */ - t3_write_reg(mac->adapter, A_TP_MIB_INDEX, mac->offset ? 51 : 50); - v = t3_read_reg(mac->adapter, A_TP_MIB_RDATA); - lo = (u32) mac->stats.rx_cong_drops; - mac->stats.rx_cong_drops += (u64) (v - lo); - - return &mac->stats; -} diff --git a/drivers/net/cxgb4/Makefile b/drivers/net/cxgb4/Makefile deleted file mode 100644 index 4986674..0000000 --- a/drivers/net/cxgb4/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# -# Chelsio T4 driver -# - -obj-$(CONFIG_CHELSIO_T4) += cxgb4.o - -cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h deleted file mode 100644 index 223a7f7..0000000 --- a/drivers/net/cxgb4/cxgb4.h +++ /dev/null @@ -1,722 +0,0 @@ -/* - * This file is part of the Chelsio T4 Ethernet driver for Linux. - * - * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __CXGB4_H__ -#define __CXGB4_H__ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "cxgb4_uld.h" -#include "t4_hw.h" - -#define FW_VERSION_MAJOR 1 -#define FW_VERSION_MINOR 1 -#define FW_VERSION_MICRO 0 - -enum { - MAX_NPORTS = 4, /* max # of ports */ - SERNUM_LEN = 24, /* Serial # length */ - EC_LEN = 16, /* E/C length */ - ID_LEN = 16, /* ID length */ -}; - -enum { - MEM_EDC0, - MEM_EDC1, - MEM_MC -}; - -enum dev_master { - MASTER_CANT, - MASTER_MAY, - MASTER_MUST -}; - -enum dev_state { - DEV_STATE_UNINIT, - DEV_STATE_INIT, - DEV_STATE_ERR -}; - -enum { - PAUSE_RX = 1 << 0, - PAUSE_TX = 1 << 1, - PAUSE_AUTONEG = 1 << 2 -}; - -struct port_stats { - u64 tx_octets; /* total # of octets in good frames */ - u64 tx_frames; /* all good frames */ - u64 tx_bcast_frames; /* all broadcast frames */ - u64 tx_mcast_frames; /* all multicast frames */ - u64 tx_ucast_frames; /* all unicast frames */ - u64 tx_error_frames; /* all error frames */ - - u64 tx_frames_64; /* # of Tx frames in a particular range */ - u64 tx_frames_65_127; - u64 tx_frames_128_255; - u64 tx_frames_256_511; - u64 tx_frames_512_1023; - u64 tx_frames_1024_1518; - u64 tx_frames_1519_max; - - u64 tx_drop; /* # of dropped Tx frames */ - u64 tx_pause; /* # of transmitted pause frames */ - u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */ - u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */ - u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */ - u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */ - u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */ - u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */ - u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */ - u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */ - - u64 rx_octets; /* total # of octets in good frames */ - u64 rx_frames; /* all good frames */ - u64 rx_bcast_frames; /* all broadcast frames */ - u64 rx_mcast_frames; /* all multicast frames */ - u64 rx_ucast_frames; /* all unicast frames */ - u64 rx_too_long; /* # of frames exceeding MTU */ - u64 rx_jabber; /* # of jabber frames */ - u64 rx_fcs_err; /* # of received frames with bad FCS */ - u64 rx_len_err; /* # of received frames with length error */ - u64 rx_symbol_err; /* symbol errors */ - u64 rx_runt; /* # of short frames */ - - u64 rx_frames_64; /* # of Rx frames in a particular range */ - u64 rx_frames_65_127; - u64 rx_frames_128_255; - u64 rx_frames_256_511; - u64 rx_frames_512_1023; - u64 rx_frames_1024_1518; - u64 rx_frames_1519_max; - - u64 rx_pause; /* # of received pause frames */ - u64 rx_ppp0; /* # of received PPP prio 0 frames */ - u64 rx_ppp1; /* # of received PPP prio 1 frames */ - u64 rx_ppp2; /* # of received PPP prio 2 frames */ - u64 rx_ppp3; /* # of received PPP prio 3 frames */ - u64 rx_ppp4; /* # of received PPP prio 4 frames */ - u64 rx_ppp5; /* # of received PPP prio 5 frames */ - u64 rx_ppp6; /* # of received PPP prio 6 frames */ - u64 rx_ppp7; /* # of received PPP prio 7 frames */ - - u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */ - u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */ - u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */ - u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */ - u64 rx_trunc0; /* buffer-group 0 truncated packets */ - u64 rx_trunc1; /* buffer-group 1 truncated packets */ - u64 rx_trunc2; /* buffer-group 2 truncated packets */ - u64 rx_trunc3; /* buffer-group 3 truncated packets */ -}; - -struct lb_port_stats { - u64 octets; - u64 frames; - u64 bcast_frames; - u64 mcast_frames; - u64 ucast_frames; - u64 error_frames; - - u64 frames_64; - u64 frames_65_127; - u64 frames_128_255; - u64 frames_256_511; - u64 frames_512_1023; - u64 frames_1024_1518; - u64 frames_1519_max; - - u64 drop; - - u64 ovflow0; - u64 ovflow1; - u64 ovflow2; - u64 ovflow3; - u64 trunc0; - u64 trunc1; - u64 trunc2; - u64 trunc3; -}; - -struct tp_tcp_stats { - u32 tcpOutRsts; - u64 tcpInSegs; - u64 tcpOutSegs; - u64 tcpRetransSegs; -}; - -struct tp_err_stats { - u32 macInErrs[4]; - u32 hdrInErrs[4]; - u32 tcpInErrs[4]; - u32 tnlCongDrops[4]; - u32 ofldChanDrops[4]; - u32 tnlTxDrops[4]; - u32 ofldVlanDrops[4]; - u32 tcp6InErrs[4]; - u32 ofldNoNeigh; - u32 ofldCongDefer; -}; - -struct tp_params { - unsigned int ntxchan; /* # of Tx channels */ - unsigned int tre; /* log2 of core clocks per TP tick */ -}; - -struct vpd_params { - unsigned int cclk; - u8 ec[EC_LEN + 1]; - u8 sn[SERNUM_LEN + 1]; - u8 id[ID_LEN + 1]; -}; - -struct pci_params { - unsigned char speed; - unsigned char width; -}; - -struct adapter_params { - struct tp_params tp; - struct vpd_params vpd; - struct pci_params pci; - - unsigned int sf_size; /* serial flash size in bytes */ - unsigned int sf_nsec; /* # of flash sectors */ - unsigned int sf_fw_start; /* start of FW image in flash */ - - unsigned int fw_vers; - unsigned int tp_vers; - u8 api_vers[7]; - - unsigned short mtus[NMTUS]; - unsigned short a_wnd[NCCTRL_WIN]; - unsigned short b_wnd[NCCTRL_WIN]; - - unsigned char nports; /* # of ethernet ports */ - unsigned char portvec; - unsigned char rev; /* chip revision */ - unsigned char offload; - - unsigned int ofldq_wr_cred; -}; - -struct trace_params { - u32 data[TRACE_LEN / 4]; - u32 mask[TRACE_LEN / 4]; - unsigned short snap_len; - unsigned short min_len; - unsigned char skip_ofst; - unsigned char skip_len; - unsigned char invert; - unsigned char port; -}; - -struct link_config { - unsigned short supported; /* link capabilities */ - unsigned short advertising; /* advertised capabilities */ - unsigned short requested_speed; /* speed user has requested */ - unsigned short speed; /* actual link speed */ - unsigned char requested_fc; /* flow control user has requested */ - unsigned char fc; /* actual link flow control */ - unsigned char autoneg; /* autonegotiating? */ - unsigned char link_ok; /* link up? */ -}; - -#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16) - -enum { - MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */ - MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */ - MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */ - MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */ -}; - -enum { - MAX_EGRQ = 128, /* max # of egress queues, including FLs */ - MAX_INGQ = 64 /* max # of interrupt-capable ingress queues */ -}; - -struct adapter; -struct sge_rspq; - -struct port_info { - struct adapter *adapter; - u16 viid; - s16 xact_addr_filt; /* index of exact MAC address filter */ - u16 rss_size; /* size of VI's RSS table slice */ - s8 mdio_addr; - u8 port_type; - u8 mod_type; - u8 port_id; - u8 tx_chan; - u8 lport; /* associated offload logical port */ - u8 nqsets; /* # of qsets */ - u8 first_qset; /* index of first qset */ - u8 rss_mode; - struct link_config link_cfg; - u16 *rss; -}; - -struct dentry; -struct work_struct; - -enum { /* adapter flags */ - FULL_INIT_DONE = (1 << 0), - USING_MSI = (1 << 1), - USING_MSIX = (1 << 2), - FW_OK = (1 << 4), -}; - -struct rx_sw_desc; - -struct sge_fl { /* SGE free-buffer queue state */ - unsigned int avail; /* # of available Rx buffers */ - unsigned int pend_cred; /* new buffers since last FL DB ring */ - unsigned int cidx; /* consumer index */ - unsigned int pidx; /* producer index */ - unsigned long alloc_failed; /* # of times buffer allocation failed */ - unsigned long large_alloc_failed; - unsigned long starving; - /* RO fields */ - unsigned int cntxt_id; /* SGE context id for the free list */ - unsigned int size; /* capacity of free list */ - struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ - __be64 *desc; /* address of HW Rx descriptor ring */ - dma_addr_t addr; /* bus address of HW ring start */ -}; - -/* A packet gather list */ -struct pkt_gl { - skb_frag_t frags[MAX_SKB_FRAGS]; - void *va; /* virtual address of first byte */ - unsigned int nfrags; /* # of fragments */ - unsigned int tot_len; /* total length of fragments */ -}; - -typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp, - const struct pkt_gl *gl); - -struct sge_rspq { /* state for an SGE response queue */ - struct napi_struct napi; - const __be64 *cur_desc; /* current descriptor in queue */ - unsigned int cidx; /* consumer index */ - u8 gen; /* current generation bit */ - u8 intr_params; /* interrupt holdoff parameters */ - u8 next_intr_params; /* holdoff params for next interrupt */ - u8 pktcnt_idx; /* interrupt packet threshold */ - u8 uld; /* ULD handling this queue */ - u8 idx; /* queue index within its group */ - int offset; /* offset into current Rx buffer */ - u16 cntxt_id; /* SGE context id for the response q */ - u16 abs_id; /* absolute SGE id for the response q */ - __be64 *desc; /* address of HW response ring */ - dma_addr_t phys_addr; /* physical address of the ring */ - unsigned int iqe_len; /* entry size */ - unsigned int size; /* capacity of response queue */ - struct adapter *adap; - struct net_device *netdev; /* associated net device */ - rspq_handler_t handler; -}; - -struct sge_eth_stats { /* Ethernet queue statistics */ - unsigned long pkts; /* # of ethernet packets */ - unsigned long lro_pkts; /* # of LRO super packets */ - unsigned long lro_merged; /* # of wire packets merged by LRO */ - unsigned long rx_cso; /* # of Rx checksum offloads */ - unsigned long vlan_ex; /* # of Rx VLAN extractions */ - unsigned long rx_drops; /* # of packets dropped due to no mem */ -}; - -struct sge_eth_rxq { /* SW Ethernet Rx queue */ - struct sge_rspq rspq; - struct sge_fl fl; - struct sge_eth_stats stats; -} ____cacheline_aligned_in_smp; - -struct sge_ofld_stats { /* offload queue statistics */ - unsigned long pkts; /* # of packets */ - unsigned long imm; /* # of immediate-data packets */ - unsigned long an; /* # of asynchronous notifications */ - unsigned long nomem; /* # of responses deferred due to no mem */ -}; - -struct sge_ofld_rxq { /* SW offload Rx queue */ - struct sge_rspq rspq; - struct sge_fl fl; - struct sge_ofld_stats stats; -} ____cacheline_aligned_in_smp; - -struct tx_desc { - __be64 flit[8]; -}; - -struct tx_sw_desc; - -struct sge_txq { - unsigned int in_use; /* # of in-use Tx descriptors */ - unsigned int size; /* # of descriptors */ - unsigned int cidx; /* SW consumer index */ - unsigned int pidx; /* producer index */ - unsigned long stops; /* # of times q has been stopped */ - unsigned long restarts; /* # of queue restarts */ - unsigned int cntxt_id; /* SGE context id for the Tx q */ - struct tx_desc *desc; /* address of HW Tx descriptor ring */ - struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */ - struct sge_qstat *stat; /* queue status entry */ - dma_addr_t phys_addr; /* physical address of the ring */ -}; - -struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ - struct sge_txq q; - struct netdev_queue *txq; /* associated netdev TX queue */ - unsigned long tso; /* # of TSO requests */ - unsigned long tx_cso; /* # of Tx checksum offloads */ - unsigned long vlan_ins; /* # of Tx VLAN insertions */ - unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ -} ____cacheline_aligned_in_smp; - -struct sge_ofld_txq { /* state for an SGE offload Tx queue */ - struct sge_txq q; - struct adapter *adap; - struct sk_buff_head sendq; /* list of backpressured packets */ - struct tasklet_struct qresume_tsk; /* restarts the queue */ - u8 full; /* the Tx ring is full */ - unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ -} ____cacheline_aligned_in_smp; - -struct sge_ctrl_txq { /* state for an SGE control Tx queue */ - struct sge_txq q; - struct adapter *adap; - struct sk_buff_head sendq; /* list of backpressured packets */ - struct tasklet_struct qresume_tsk; /* restarts the queue */ - u8 full; /* the Tx ring is full */ -} ____cacheline_aligned_in_smp; - -struct sge { - struct sge_eth_txq ethtxq[MAX_ETH_QSETS]; - struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS]; - struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES]; - - struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; - struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS]; - struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES]; - struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; - - struct sge_rspq intrq ____cacheline_aligned_in_smp; - spinlock_t intrq_lock; - - u16 max_ethqsets; /* # of available Ethernet queue sets */ - u16 ethqsets; /* # of active Ethernet queue sets */ - u16 ethtxq_rover; /* Tx queue to clean up next */ - u16 ofldqsets; /* # of active offload queue sets */ - u16 rdmaqs; /* # of available RDMA Rx queues */ - u16 ofld_rxq[MAX_OFLD_QSETS]; - u16 rdma_rxq[NCHAN]; - u16 timer_val[SGE_NTIMERS]; - u8 counter_val[SGE_NCOUNTERS]; - unsigned int starve_thres; - u8 idma_state[2]; - unsigned int egr_start; - unsigned int ingr_start; - void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */ - struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */ - DECLARE_BITMAP(starving_fl, MAX_EGRQ); - DECLARE_BITMAP(txq_maperr, MAX_EGRQ); - struct timer_list rx_timer; /* refills starving FLs */ - struct timer_list tx_timer; /* checks Tx queues */ -}; - -#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++) -#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++) -#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++) - -struct l2t_data; - -struct adapter { - void __iomem *regs; - struct pci_dev *pdev; - struct device *pdev_dev; - unsigned int fn; - unsigned int flags; - - int msg_enable; - - struct adapter_params params; - struct cxgb4_virt_res vres; - unsigned int swintr; - - unsigned int wol; - - struct { - unsigned short vec; - char desc[IFNAMSIZ + 10]; - } msix_info[MAX_INGQ + 1]; - - struct sge sge; - - struct net_device *port[MAX_NPORTS]; - u8 chan_map[NCHAN]; /* channel -> port map */ - - struct l2t_data *l2t; - void *uld_handle[CXGB4_ULD_MAX]; - struct list_head list_node; - - struct tid_info tids; - void **tid_release_head; - spinlock_t tid_release_lock; - struct work_struct tid_release_task; - bool tid_release_task_busy; - - struct dentry *debugfs_root; - - spinlock_t stats_lock; -}; - -static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) -{ - return readl(adap->regs + reg_addr); -} - -static inline void t4_write_reg(struct adapter *adap, u32 reg_addr, u32 val) -{ - writel(val, adap->regs + reg_addr); -} - -#ifndef readq -static inline u64 readq(const volatile void __iomem *addr) -{ - return readl(addr) + ((u64)readl(addr + 4) << 32); -} - -static inline void writeq(u64 val, volatile void __iomem *addr) -{ - writel(val, addr); - writel(val >> 32, addr + 4); -} -#endif - -static inline u64 t4_read_reg64(struct adapter *adap, u32 reg_addr) -{ - return readq(adap->regs + reg_addr); -} - -static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val) -{ - writeq(val, adap->regs + reg_addr); -} - -/** - * netdev2pinfo - return the port_info structure associated with a net_device - * @dev: the netdev - * - * Return the struct port_info associated with a net_device - */ -static inline struct port_info *netdev2pinfo(const struct net_device *dev) -{ - return netdev_priv(dev); -} - -/** - * adap2pinfo - return the port_info of a port - * @adap: the adapter - * @idx: the port index - * - * Return the port_info structure for the port of the given index. - */ -static inline struct port_info *adap2pinfo(struct adapter *adap, int idx) -{ - return netdev_priv(adap->port[idx]); -} - -/** - * netdev2adap - return the adapter structure associated with a net_device - * @dev: the netdev - * - * Return the struct adapter associated with a net_device - */ -static inline struct adapter *netdev2adap(const struct net_device *dev) -{ - return netdev2pinfo(dev)->adapter; -} - -void t4_os_portmod_changed(const struct adapter *adap, int port_id); -void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); - -void *t4_alloc_mem(size_t size); - -void t4_free_sge_resources(struct adapter *adap); -irq_handler_t t4_intr_handler(struct adapter *adap); -netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev); -int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, - const struct pkt_gl *gl); -int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb); -int t4_ofld_send(struct adapter *adap, struct sk_buff *skb); -int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, - struct net_device *dev, int intr_idx, - struct sge_fl *fl, rspq_handler_t hnd); -int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, - struct net_device *dev, struct netdev_queue *netdevq, - unsigned int iqid); -int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, - struct net_device *dev, unsigned int iqid, - unsigned int cmplqid); -int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, - struct net_device *dev, unsigned int iqid); -irqreturn_t t4_sge_intr_msix(int irq, void *cookie); -void t4_sge_init(struct adapter *adap); -void t4_sge_start(struct adapter *adap); -void t4_sge_stop(struct adapter *adap); - -#define for_each_port(adapter, iter) \ - for (iter = 0; iter < (adapter)->params.nports; ++iter) - -static inline unsigned int core_ticks_per_usec(const struct adapter *adap) -{ - return adap->params.vpd.cclk / 1000; -} - -static inline unsigned int us_to_core_ticks(const struct adapter *adap, - unsigned int us) -{ - return (us * adap->params.vpd.cclk) / 1000; -} - -void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask, - u32 val); - -int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, - void *rpl, bool sleep_ok); - -static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd, - int size, void *rpl) -{ - return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true); -} - -static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd, - int size, void *rpl) -{ - return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false); -} - -void t4_intr_enable(struct adapter *adapter); -void t4_intr_disable(struct adapter *adapter); -int t4_slow_intr_handler(struct adapter *adapter); - -int t4_wait_dev_ready(struct adapter *adap); -int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, - struct link_config *lc); -int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); -int t4_seeprom_wp(struct adapter *adapter, bool enable); -int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); -int t4_check_fw_version(struct adapter *adapter); -int t4_prep_adapter(struct adapter *adapter); -int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); -void t4_fatal_err(struct adapter *adapter); -int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, - int start, int n, const u16 *rspq, unsigned int nrspq); -int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, - unsigned int flags); -int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity); -int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, - u64 *parity); - -void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); -void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); -void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, - struct tp_tcp_stats *v6); -void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, - const unsigned short *alpha, const unsigned short *beta); - -void t4_wol_magic_enable(struct adapter *adap, unsigned int port, - const u8 *addr); -int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, - u64 mask0, u64 mask1, unsigned int crc, bool enable); - -int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, - enum dev_master master, enum dev_state *state); -int t4_fw_bye(struct adapter *adap, unsigned int mbox); -int t4_early_init(struct adapter *adap, unsigned int mbox); -int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset); -int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, - unsigned int vf, unsigned int nparams, const u32 *params, - u32 *val); -int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, - unsigned int vf, unsigned int nparams, const u32 *params, - const u32 *val); -int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, - unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, - unsigned int rxqi, unsigned int rxq, unsigned int tc, - unsigned int vi, unsigned int cmask, unsigned int pmask, - unsigned int nexact, unsigned int rcaps, unsigned int wxcaps); -int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, - unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, - unsigned int *rss_size); -int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, - int mtu, int promisc, int all_multi, int bcast, int vlanex, - bool sleep_ok); -int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, - unsigned int viid, bool free, unsigned int naddr, - const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok); -int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, - int idx, const u8 *addr, bool persist, bool add_smt); -int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, - bool ucast, u64 vec, bool sleep_ok); -int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, - bool rx_en, bool tx_en); -int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, - unsigned int nblinks); -int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, - unsigned int mmd, unsigned int reg, u16 *valp); -int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, - unsigned int mmd, unsigned int reg, u16 val); -int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, - unsigned int vf, unsigned int iqtype, unsigned int iqid, - unsigned int fl0id, unsigned int fl1id); -int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, - unsigned int vf, unsigned int eqid); -int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, - unsigned int vf, unsigned int eqid); -int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, - unsigned int vf, unsigned int eqid); -int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl); -#endif /* __CXGB4_H__ */ diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c deleted file mode 100644 index c9957b7..0000000 --- a/drivers/net/cxgb4/cxgb4_main.c +++ /dev/null @@ -1,3806 +0,0 @@ -/* - * This file is part of the Chelsio T4 Ethernet driver for Linux. - * - * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "cxgb4.h" -#include "t4_regs.h" -#include "t4_msg.h" -#include "t4fw_api.h" -#include "l2t.h" - -#define DRV_VERSION "1.3.0-ko" -#define DRV_DESC "Chelsio T4 Network Driver" - -/* - * Max interrupt hold-off timer value in us. Queues fall back to this value - * under extreme memory pressure so it's largish to give the system time to - * recover. - */ -#define MAX_SGE_TIMERVAL 200U - -#ifdef CONFIG_PCI_IOV -/* - * Virtual Function provisioning constants. We need two extra Ingress Queues - * with Interrupt capability to serve as the VF's Firmware Event Queue and - * Forwarded Interrupt Queue (when using MSI mode) -- neither will have Free - * Lists associated with them). For each Ethernet/Control Egress Queue and - * for each Free List, we need an Egress Context. - */ -enum { - VFRES_NPORTS = 1, /* # of "ports" per VF */ - VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */ - - VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */ - VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */ - VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */ - VFRES_NIQ = 0, /* # of non-fl/int ingress queues */ - VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */ - VFRES_TC = 0, /* PCI-E traffic class */ - VFRES_NEXACTF = 16, /* # of exact MPS filters */ - - VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT, - VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF, -}; - -/* - * Provide a Port Access Rights Mask for the specified PF/VF. This is very - * static and likely not to be useful in the long run. We really need to - * implement some form of persistent configuration which the firmware - * controls. - */ -static unsigned int pfvfres_pmask(struct adapter *adapter, - unsigned int pf, unsigned int vf) -{ - unsigned int portn, portvec; - - /* - * Give PF's access to all of the ports. - */ - if (vf == 0) - return FW_PFVF_CMD_PMASK_MASK; - - /* - * For VFs, we'll assign them access to the ports based purely on the - * PF. We assign active ports in order, wrapping around if there are - * fewer active ports than PFs: e.g. active port[pf % nports]. - * Unfortunately the adapter's port_info structs haven't been - * initialized yet so we have to compute this. - */ - if (adapter->params.nports == 0) - return 0; - - portn = pf % adapter->params.nports; - portvec = adapter->params.portvec; - for (;;) { - /* - * Isolate the lowest set bit in the port vector. If we're at - * the port number that we want, return that as the pmask. - * otherwise mask that bit out of the port vector and - * decrement our port number ... - */ - unsigned int pmask = portvec ^ (portvec & (portvec-1)); - if (portn == 0) - return pmask; - portn--; - portvec &= ~pmask; - } - /*NOTREACHED*/ -} -#endif - -enum { - MEMWIN0_APERTURE = 65536, - MEMWIN0_BASE = 0x30000, - MEMWIN1_APERTURE = 32768, - MEMWIN1_BASE = 0x28000, - MEMWIN2_APERTURE = 2048, - MEMWIN2_BASE = 0x1b800, -}; - -enum { - MAX_TXQ_ENTRIES = 16384, - MAX_CTRL_TXQ_ENTRIES = 1024, - MAX_RSPQ_ENTRIES = 16384, - MAX_RX_BUFFERS = 16384, - MIN_TXQ_ENTRIES = 32, - MIN_CTRL_TXQ_ENTRIES = 32, - MIN_RSPQ_ENTRIES = 128, - MIN_FL_ENTRIES = 16 -}; - -#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ - NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ - NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) - -#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) } - -static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = { - CH_DEVICE(0xa000, 0), /* PE10K */ - CH_DEVICE(0x4001, -1), - CH_DEVICE(0x4002, -1), - CH_DEVICE(0x4003, -1), - CH_DEVICE(0x4004, -1), - CH_DEVICE(0x4005, -1), - CH_DEVICE(0x4006, -1), - CH_DEVICE(0x4007, -1), - CH_DEVICE(0x4008, -1), - CH_DEVICE(0x4009, -1), - CH_DEVICE(0x400a, -1), - CH_DEVICE(0x4401, 4), - CH_DEVICE(0x4402, 4), - CH_DEVICE(0x4403, 4), - CH_DEVICE(0x4404, 4), - CH_DEVICE(0x4405, 4), - CH_DEVICE(0x4406, 4), - CH_DEVICE(0x4407, 4), - CH_DEVICE(0x4408, 4), - CH_DEVICE(0x4409, 4), - CH_DEVICE(0x440a, 4), - { 0, } -}; - -#define FW_FNAME "cxgb4/t4fw.bin" - -MODULE_DESCRIPTION(DRV_DESC); -MODULE_AUTHOR("Chelsio Communications"); -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_VERSION(DRV_VERSION); -MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); -MODULE_FIRMWARE(FW_FNAME); - -static int dflt_msg_enable = DFLT_MSG_ENABLE; - -module_param(dflt_msg_enable, int, 0644); -MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap"); - -/* - * The driver uses the best interrupt scheme available on a platform in the - * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which - * of these schemes the driver may consider as follows: - * - * msi = 2: choose from among all three options - * msi = 1: only consider MSI and INTx interrupts - * msi = 0: force INTx interrupts - */ -static int msi = 2; - -module_param(msi, int, 0644); -MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)"); - -/* - * Queue interrupt hold-off timer values. Queues default to the first of these - * upon creation. - */ -static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 }; - -module_param_array(intr_holdoff, uint, NULL, 0644); -MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers " - "0..4 in microseconds"); - -static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 }; - -module_param_array(intr_cnt, uint, NULL, 0644); -MODULE_PARM_DESC(intr_cnt, - "thresholds 1..3 for queue interrupt packet counters"); - -static int vf_acls; - -#ifdef CONFIG_PCI_IOV -module_param(vf_acls, bool, 0644); -MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement"); - -static unsigned int num_vf[4]; - -module_param_array(num_vf, uint, NULL, 0644); -MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3"); -#endif - -static struct dentry *cxgb4_debugfs_root; - -static LIST_HEAD(adapter_list); -static DEFINE_MUTEX(uld_mutex); -static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX]; -static const char *uld_str[] = { "RDMA", "iSCSI" }; - -static void link_report(struct net_device *dev) -{ - if (!netif_carrier_ok(dev)) - netdev_info(dev, "link down\n"); - else { - static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" }; - - const char *s = "10Mbps"; - const struct port_info *p = netdev_priv(dev); - - switch (p->link_cfg.speed) { - case SPEED_10000: - s = "10Gbps"; - break; - case SPEED_1000: - s = "1000Mbps"; - break; - case SPEED_100: - s = "100Mbps"; - break; - } - - netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, - fc[p->link_cfg.fc]); - } -} - -void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat) -{ - struct net_device *dev = adapter->port[port_id]; - - /* Skip changes from disabled ports. */ - if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) { - if (link_stat) - netif_carrier_on(dev); - else - netif_carrier_off(dev); - - link_report(dev); - } -} - -void t4_os_portmod_changed(const struct adapter *adap, int port_id) -{ - static const char *mod_str[] = { - NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM" - }; - - const struct net_device *dev = adap->port[port_id]; - const struct port_info *pi = netdev_priv(dev); - - if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) - netdev_info(dev, "port module unplugged\n"); - else if (pi->mod_type < ARRAY_SIZE(mod_str)) - netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]); -} - -/* - * Configure the exact and hash address filters to handle a port's multicast - * and secondary unicast MAC addresses. - */ -static int set_addr_filters(const struct net_device *dev, bool sleep) -{ - u64 mhash = 0; - u64 uhash = 0; - bool free = true; - u16 filt_idx[7]; - const u8 *addr[7]; - int ret, naddr = 0; - const struct netdev_hw_addr *ha; - int uc_cnt = netdev_uc_count(dev); - int mc_cnt = netdev_mc_count(dev); - const struct port_info *pi = netdev_priv(dev); - unsigned int mb = pi->adapter->fn; - - /* first do the secondary unicast addresses */ - netdev_for_each_uc_addr(ha, dev) { - addr[naddr++] = ha->addr; - if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) { - ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free, - naddr, addr, filt_idx, &uhash, sleep); - if (ret < 0) - return ret; - - free = false; - naddr = 0; - } - } - - /* next set up the multicast addresses */ - netdev_for_each_mc_addr(ha, dev) { - addr[naddr++] = ha->addr; - if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) { - ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free, - naddr, addr, filt_idx, &mhash, sleep); - if (ret < 0) - return ret; - - free = false; - naddr = 0; - } - } - - return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0, - uhash | mhash, sleep); -} - -/* - * Set Rx properties of a port, such as promiscruity, address filters, and MTU. - * If @mtu is -1 it is left unchanged. - */ -static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) -{ - int ret; - struct port_info *pi = netdev_priv(dev); - - ret = set_addr_filters(dev, sleep_ok); - if (ret == 0) - ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu, - (dev->flags & IFF_PROMISC) ? 1 : 0, - (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1, - sleep_ok); - return ret; -} - -/** - * link_start - enable a port - * @dev: the port to enable - * - * Performs the MAC and PHY actions needed to enable a port. - */ -static int link_start(struct net_device *dev) -{ - int ret; - struct port_info *pi = netdev_priv(dev); - unsigned int mb = pi->adapter->fn; - - /* - * We do not set address filters and promiscuity here, the stack does - * that step explicitly. - */ - ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1, - !!(dev->features & NETIF_F_HW_VLAN_RX), true); - if (ret == 0) { - ret = t4_change_mac(pi->adapter, mb, pi->viid, - pi->xact_addr_filt, dev->dev_addr, true, - true); - if (ret >= 0) { - pi->xact_addr_filt = ret; - ret = 0; - } - } - if (ret == 0) - ret = t4_link_start(pi->adapter, mb, pi->tx_chan, - &pi->link_cfg); - if (ret == 0) - ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true); - return ret; -} - -/* - * Response queue handler for the FW event queue. - */ -static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, - const struct pkt_gl *gl) -{ - u8 opcode = ((const struct rss_header *)rsp)->opcode; - - rsp++; /* skip RSS header */ - if (likely(opcode == CPL_SGE_EGR_UPDATE)) { - const struct cpl_sge_egr_update *p = (void *)rsp; - unsigned int qid = EGR_QID(ntohl(p->opcode_qid)); - struct sge_txq *txq; - - txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start]; - txq->restarts++; - if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) { - struct sge_eth_txq *eq; - - eq = container_of(txq, struct sge_eth_txq, q); - netif_tx_wake_queue(eq->txq); - } else { - struct sge_ofld_txq *oq; - - oq = container_of(txq, struct sge_ofld_txq, q); - tasklet_schedule(&oq->qresume_tsk); - } - } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) { - const struct cpl_fw6_msg *p = (void *)rsp; - - if (p->type == 0) - t4_handle_fw_rpl(q->adap, p->data); - } else if (opcode == CPL_L2T_WRITE_RPL) { - const struct cpl_l2t_write_rpl *p = (void *)rsp; - - do_l2t_write_rpl(q->adap, p); - } else - dev_err(q->adap->pdev_dev, - "unexpected CPL %#x on FW event queue\n", opcode); - return 0; -} - -/** - * uldrx_handler - response queue handler for ULD queues - * @q: the response queue that received the packet - * @rsp: the response queue descriptor holding the offload message - * @gl: the gather list of packet fragments - * - * Deliver an ingress offload packet to a ULD. All processing is done by - * the ULD, we just maintain statistics. - */ -static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp, - const struct pkt_gl *gl) -{ - struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq); - - if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) { - rxq->stats.nomem++; - return -1; - } - if (gl == NULL) - rxq->stats.imm++; - else if (gl == CXGB4_MSG_AN) - rxq->stats.an++; - else - rxq->stats.pkts++; - return 0; -} - -static void disable_msi(struct adapter *adapter) -{ - if (adapter->flags & USING_MSIX) { - pci_disable_msix(adapter->pdev); - adapter->flags &= ~USING_MSIX; - } else if (adapter->flags & USING_MSI) { - pci_disable_msi(adapter->pdev); - adapter->flags &= ~USING_MSI; - } -} - -/* - * Interrupt handler for non-data events used with MSI-X. - */ -static irqreturn_t t4_nondata_intr(int irq, void *cookie) -{ - struct adapter *adap = cookie; - - u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE)); - if (v & PFSW) { - adap->swintr = 1; - t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v); - } - t4_slow_intr_handler(adap); - return IRQ_HANDLED; -} - -/* - * Name the MSI-X interrupts. - */ -static void name_msix_vecs(struct adapter *adap) -{ - int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc); - - /* non-data interrupts */ - snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name); - - /* FW events */ - snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", - adap->port[0]->name); - - /* Ethernet queues */ - for_each_port(adap, j) { - struct net_device *d = adap->port[j]; - const struct port_info *pi = netdev_priv(d); - - for (i = 0; i < pi->nqsets; i++, msi_idx++) - snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d", - d->name, i); - } - - /* offload queues */ - for_each_ofldrxq(&adap->sge, i) - snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d", - adap->port[0]->name, i); - - for_each_rdmarxq(&adap->sge, i) - snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d", - adap->port[0]->name, i); -} - -static int request_msix_queue_irqs(struct adapter *adap) -{ - struct sge *s = &adap->sge; - int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2; - - err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, - adap->msix_info[1].desc, &s->fw_evtq); - if (err) - return err; - - for_each_ethrxq(s, ethqidx) { - err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, - adap->msix_info[msi].desc, - &s->ethrxq[ethqidx].rspq); - if (err) - goto unwind; - msi++; - } - for_each_ofldrxq(s, ofldqidx) { - err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, - adap->msix_info[msi].desc, - &s->ofldrxq[ofldqidx].rspq); - if (err) - goto unwind; - msi++; - } - for_each_rdmarxq(s, rdmaqidx) { - err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, - adap->msix_info[msi].desc, - &s->rdmarxq[rdmaqidx].rspq); - if (err) - goto unwind; - msi++; - } - return 0; - -unwind: - while (--rdmaqidx >= 0) - free_irq(adap->msix_info[--msi].vec, - &s->rdmarxq[rdmaqidx].rspq); - while (--ofldqidx >= 0) - free_irq(adap->msix_info[--msi].vec, - &s->ofldrxq[ofldqidx].rspq); - while (--ethqidx >= 0) - free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq); - free_irq(adap->msix_info[1].vec, &s->fw_evtq); - return err; -} - -static void free_msix_queue_irqs(struct adapter *adap) -{ - int i, msi = 2; - struct sge *s = &adap->sge; - - free_irq(adap->msix_info[1].vec, &s->fw_evtq); - for_each_ethrxq(s, i) - free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq); - for_each_ofldrxq(s, i) - free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq); - for_each_rdmarxq(s, i) - free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq); -} - -/** - * write_rss - write the RSS table for a given port - * @pi: the port - * @queues: array of queue indices for RSS - * - * Sets up the portion of the HW RSS table for the port's VI to distribute - * packets to the Rx queues in @queues. - */ -static int write_rss(const struct port_info *pi, const u16 *queues) -{ - u16 *rss; - int i, err; - const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset]; - - rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL); - if (!rss) - return -ENOMEM; - - /* map the queue indices to queue ids */ - for (i = 0; i < pi->rss_size; i++, queues++) - rss[i] = q[*queues].rspq.abs_id; - - err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0, - pi->rss_size, rss, pi->rss_size); - kfree(rss); - return err; -} - -/** - * setup_rss - configure RSS - * @adap: the adapter - * - * Sets up RSS for each port. - */ -static int setup_rss(struct adapter *adap) -{ - int i, err; - - for_each_port(adap, i) { - const struct port_info *pi = adap2pinfo(adap, i); - - err = write_rss(pi, pi->rss); - if (err) - return err; - } - return 0; -} - -/* - * Return the channel of the ingress queue with the given qid. - */ -static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid) -{ - qid -= p->ingr_start; - return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan; -} - -/* - * Wait until all NAPI handlers are descheduled. - */ -static void quiesce_rx(struct adapter *adap) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { - struct sge_rspq *q = adap->sge.ingr_map[i]; - - if (q && q->handler) - napi_disable(&q->napi); - } -} - -/* - * Enable NAPI scheduling and interrupt generation for all Rx queues. - */ -static void enable_rx(struct adapter *adap) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { - struct sge_rspq *q = adap->sge.ingr_map[i]; - - if (!q) - continue; - if (q->handler) - napi_enable(&q->napi); - /* 0-increment GTS to start the timer and enable interrupts */ - t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), - SEINTARM(q->intr_params) | - INGRESSQID(q->cntxt_id)); - } -} - -/** - * setup_sge_queues - configure SGE Tx/Rx/response queues - * @adap: the adapter - * - * Determines how many sets of SGE queues to use and initializes them. - * We support multiple queue sets per port if we have MSI-X, otherwise - * just one queue set per port. - */ -static int setup_sge_queues(struct adapter *adap) -{ - int err, msi_idx, i, j; - struct sge *s = &adap->sge; - - bitmap_zero(s->starving_fl, MAX_EGRQ); - bitmap_zero(s->txq_maperr, MAX_EGRQ); - - if (adap->flags & USING_MSIX) - msi_idx = 1; /* vector 0 is for non-queue interrupts */ - else { - err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, - NULL, NULL); - if (err) - return err; - msi_idx = -((int)s->intrq.abs_id + 1); - } - - err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], - msi_idx, NULL, fwevtq_handler); - if (err) { -freeout: t4_free_sge_resources(adap); - return err; - } - - for_each_port(adap, i) { - struct net_device *dev = adap->port[i]; - struct port_info *pi = netdev_priv(dev); - struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset]; - struct sge_eth_txq *t = &s->ethtxq[pi->first_qset]; - - for (j = 0; j < pi->nqsets; j++, q++) { - if (msi_idx > 0) - msi_idx++; - err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, - msi_idx, &q->fl, - t4_ethrx_handler); - if (err) - goto freeout; - q->rspq.idx = j; - memset(&q->stats, 0, sizeof(q->stats)); - } - for (j = 0; j < pi->nqsets; j++, t++) { - err = t4_sge_alloc_eth_txq(adap, t, dev, - netdev_get_tx_queue(dev, j), - s->fw_evtq.cntxt_id); - if (err) - goto freeout; - } - } - - j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */ - for_each_ofldrxq(s, i) { - struct sge_ofld_rxq *q = &s->ofldrxq[i]; - struct net_device *dev = adap->port[i / j]; - - if (msi_idx > 0) - msi_idx++; - err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx, - &q->fl, uldrx_handler); - if (err) - goto freeout; - memset(&q->stats, 0, sizeof(q->stats)); - s->ofld_rxq[i] = q->rspq.abs_id; - err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev, - s->fw_evtq.cntxt_id); - if (err) - goto freeout; - } - - for_each_rdmarxq(s, i) { - struct sge_ofld_rxq *q = &s->rdmarxq[i]; - - if (msi_idx > 0) - msi_idx++; - err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i], - msi_idx, &q->fl, uldrx_handler); - if (err) - goto freeout; - memset(&q->stats, 0, sizeof(q->stats)); - s->rdma_rxq[i] = q->rspq.abs_id; - } - - for_each_port(adap, i) { - /* - * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't - * have RDMA queues, and that's the right value. - */ - err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i], - s->fw_evtq.cntxt_id, - s->rdmarxq[i].rspq.cntxt_id); - if (err) - goto freeout; - } - - t4_write_reg(adap, MPS_TRC_RSS_CONTROL, - RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) | - QUEUENUMBER(s->ethrxq[0].rspq.abs_id)); - return 0; -} - -/* - * Returns 0 if new FW was successfully loaded, a positive errno if a load was - * started but failed, and a negative errno if flash load couldn't start. - */ -static int upgrade_fw(struct adapter *adap) -{ - int ret; - u32 vers; - const struct fw_hdr *hdr; - const struct firmware *fw; - struct device *dev = adap->pdev_dev; - - ret = request_firmware(&fw, FW_FNAME, dev); - if (ret < 0) { - dev_err(dev, "unable to load firmware image " FW_FNAME - ", error %d\n", ret); - return ret; - } - - hdr = (const struct fw_hdr *)fw->data; - vers = ntohl(hdr->fw_ver); - if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) { - ret = -EINVAL; /* wrong major version, won't do */ - goto out; - } - - /* - * If the flash FW is unusable or we found something newer, load it. - */ - if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR || - vers > adap->params.fw_vers) { - ret = -t4_load_fw(adap, fw->data, fw->size); - if (!ret) - dev_info(dev, "firmware upgraded to version %pI4 from " - FW_FNAME "\n", &hdr->fw_ver); - } -out: release_firmware(fw); - return ret; -} - -/* - * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. - * The allocated memory is cleared. - */ -void *t4_alloc_mem(size_t size) -{ - void *p = kzalloc(size, GFP_KERNEL); - - if (!p) - p = vzalloc(size); - return p; -} - -/* - * Free memory allocated through alloc_mem(). - */ -static void t4_free_mem(void *addr) -{ - if (is_vmalloc_addr(addr)) - vfree(addr); - else - kfree(addr); -} - -static inline int is_offload(const struct adapter *adap) -{ - return adap->params.offload; -} - -/* - * Implementation of ethtool operations. - */ - -static u32 get_msglevel(struct net_device *dev) -{ - return netdev2adap(dev)->msg_enable; -} - -static void set_msglevel(struct net_device *dev, u32 val) -{ - netdev2adap(dev)->msg_enable = val; -} - -static char stats_strings[][ETH_GSTRING_LEN] = { - "TxOctetsOK ", - "TxFramesOK ", - "TxBroadcastFrames ", - "TxMulticastFrames ", - "TxUnicastFrames ", - "TxErrorFrames ", - - "TxFrames64 ", - "TxFrames65To127 ", - "TxFrames128To255 ", - "TxFrames256To511 ", - "TxFrames512To1023 ", - "TxFrames1024To1518 ", - "TxFrames1519ToMax ", - - "TxFramesDropped ", - "TxPauseFrames ", - "TxPPP0Frames ", - "TxPPP1Frames ", - "TxPPP2Frames ", - "TxPPP3Frames ", - "TxPPP4Frames ", - "TxPPP5Frames ", - "TxPPP6Frames ", - "TxPPP7Frames ", - - "RxOctetsOK ", - "RxFramesOK ", - "RxBroadcastFrames ", - "RxMulticastFrames ", - "RxUnicastFrames ", - - "RxFramesTooLong ", - "RxJabberErrors ", - "RxFCSErrors ", - "RxLengthErrors ", - "RxSymbolErrors ", - "RxRuntFrames ", - - "RxFrames64 ", - "RxFrames65To127 ", - "RxFrames128To255 ", - "RxFrames256To511 ", - "RxFrames512To1023 ", - "RxFrames1024To1518 ", - "RxFrames1519ToMax ", - - "RxPauseFrames ", - "RxPPP0Frames ", - "RxPPP1Frames ", - "RxPPP2Frames ", - "RxPPP3Frames ", - "RxPPP4Frames ", - "RxPPP5Frames ", - "RxPPP6Frames ", - "RxPPP7Frames ", - - "RxBG0FramesDropped ", - "RxBG1FramesDropped ", - "RxBG2FramesDropped ", - "RxBG3FramesDropped ", - "RxBG0FramesTrunc ", - "RxBG1FramesTrunc ", - "RxBG2FramesTrunc ", - "RxBG3FramesTrunc ", - - "TSO ", - "TxCsumOffload ", - "RxCsumGood ", - "VLANextractions ", - "VLANinsertions ", - "GROpackets ", - "GROmerged ", -}; - -static int get_sset_count(struct net_device *dev, int sset) -{ - switch (sset) { - case ETH_SS_STATS: - return ARRAY_SIZE(stats_strings); - default: - return -EOPNOTSUPP; - } -} - -#define T4_REGMAP_SIZE (160 * 1024) - -static int get_regs_len(struct net_device *dev) -{ - return T4_REGMAP_SIZE; -} - -static int get_eeprom_len(struct net_device *dev) -{ - return EEPROMSIZE; -} - -static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) -{ - struct adapter *adapter = netdev2adap(dev); - - strcpy(info->driver, KBUILD_MODNAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, pci_name(adapter->pdev)); - - if (!adapter->params.fw_vers) - strcpy(info->fw_version, "N/A"); - else - snprintf(info->fw_version, sizeof(info->fw_version), - "%u.%u.%u.%u, TP %u.%u.%u.%u", - FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers), - FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers), - FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers), - FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers), - FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers), - FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers), - FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers), - FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers)); -} - -static void get_strings(struct net_device *dev, u32 stringset, u8 *data) -{ - if (stringset == ETH_SS_STATS) - memcpy(data, stats_strings, sizeof(stats_strings)); -} - -/* - * port stats maintained per queue of the port. They should be in the same - * order as in stats_strings above. - */ -struct queue_port_stats { - u64 tso; - u64 tx_csum; - u64 rx_csum; - u64 vlan_ex; - u64 vlan_ins; - u64 gro_pkts; - u64 gro_merged; -}; - -static void collect_sge_port_stats(const struct adapter *adap, - const struct port_info *p, struct queue_port_stats *s) -{ - int i; - const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset]; - const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset]; - - memset(s, 0, sizeof(*s)); - for (i = 0; i < p->nqsets; i++, rx++, tx++) { - s->tso += tx->tso; - s->tx_csum += tx->tx_cso; - s->rx_csum += rx->stats.rx_cso; - s->vlan_ex += rx->stats.vlan_ex; - s->vlan_ins += tx->vlan_ins; - s->gro_pkts += rx->stats.lro_pkts; - s->gro_merged += rx->stats.lro_merged; - } -} - -static void get_stats(struct net_device *dev, struct ethtool_stats *stats, - u64 *data) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - - t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data); - - data += sizeof(struct port_stats) / sizeof(u64); - collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); -} - -/* - * Return a version number to identify the type of adapter. The scheme is: - * - bits 0..9: chip version - * - bits 10..15: chip revision - * - bits 16..23: register dump version - */ -static inline unsigned int mk_adap_vers(const struct adapter *ap) -{ - return 4 | (ap->params.rev << 10) | (1 << 16); -} - -static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start, - unsigned int end) -{ - u32 *p = buf + start; - - for ( ; start <= end; start += sizeof(u32)) - *p++ = t4_read_reg(ap, start); -} - -static void get_regs(struct net_device *dev, struct ethtool_regs *regs, - void *buf) -{ - static const unsigned int reg_ranges[] = { - 0x1008, 0x1108, - 0x1180, 0x11b4, - 0x11fc, 0x123c, - 0x1300, 0x173c, - 0x1800, 0x18fc, - 0x3000, 0x30d8, - 0x30e0, 0x5924, - 0x5960, 0x59d4, - 0x5a00, 0x5af8, - 0x6000, 0x6098, - 0x6100, 0x6150, - 0x6200, 0x6208, - 0x6240, 0x6248, - 0x6280, 0x6338, - 0x6370, 0x638c, - 0x6400, 0x643c, - 0x6500, 0x6524, - 0x6a00, 0x6a38, - 0x6a60, 0x6a78, - 0x6b00, 0x6b84, - 0x6bf0, 0x6c84, - 0x6cf0, 0x6d84, - 0x6df0, 0x6e84, - 0x6ef0, 0x6f84, - 0x6ff0, 0x7084, - 0x70f0, 0x7184, - 0x71f0, 0x7284, - 0x72f0, 0x7384, - 0x73f0, 0x7450, - 0x7500, 0x7530, - 0x7600, 0x761c, - 0x7680, 0x76cc, - 0x7700, 0x7798, - 0x77c0, 0x77fc, - 0x7900, 0x79fc, - 0x7b00, 0x7c38, - 0x7d00, 0x7efc, - 0x8dc0, 0x8e1c, - 0x8e30, 0x8e78, - 0x8ea0, 0x8f6c, - 0x8fc0, 0x9074, - 0x90fc, 0x90fc, - 0x9400, 0x9458, - 0x9600, 0x96bc, - 0x9800, 0x9808, - 0x9820, 0x983c, - 0x9850, 0x9864, - 0x9c00, 0x9c6c, - 0x9c80, 0x9cec, - 0x9d00, 0x9d6c, - 0x9d80, 0x9dec, - 0x9e00, 0x9e6c, - 0x9e80, 0x9eec, - 0x9f00, 0x9f6c, - 0x9f80, 0x9fec, - 0xd004, 0xd03c, - 0xdfc0, 0xdfe0, - 0xe000, 0xea7c, - 0xf000, 0x11190, - 0x19040, 0x1906c, - 0x19078, 0x19080, - 0x1908c, 0x19124, - 0x19150, 0x191b0, - 0x191d0, 0x191e8, - 0x19238, 0x1924c, - 0x193f8, 0x19474, - 0x19490, 0x194f8, - 0x19800, 0x19f30, - 0x1a000, 0x1a06c, - 0x1a0b0, 0x1a120, - 0x1a128, 0x1a138, - 0x1a190, 0x1a1c4, - 0x1a1fc, 0x1a1fc, - 0x1e040, 0x1e04c, - 0x1e284, 0x1e28c, - 0x1e2c0, 0x1e2c0, - 0x1e2e0, 0x1e2e0, - 0x1e300, 0x1e384, - 0x1e3c0, 0x1e3c8, - 0x1e440, 0x1e44c, - 0x1e684, 0x1e68c, - 0x1e6c0, 0x1e6c0, - 0x1e6e0, 0x1e6e0, - 0x1e700, 0x1e784, - 0x1e7c0, 0x1e7c8, - 0x1e840, 0x1e84c, - 0x1ea84, 0x1ea8c, - 0x1eac0, 0x1eac0, - 0x1eae0, 0x1eae0, - 0x1eb00, 0x1eb84, - 0x1ebc0, 0x1ebc8, - 0x1ec40, 0x1ec4c, - 0x1ee84, 0x1ee8c, - 0x1eec0, 0x1eec0, - 0x1eee0, 0x1eee0, - 0x1ef00, 0x1ef84, - 0x1efc0, 0x1efc8, - 0x1f040, 0x1f04c, - 0x1f284, 0x1f28c, - 0x1f2c0, 0x1f2c0, - 0x1f2e0, 0x1f2e0, - 0x1f300, 0x1f384, - 0x1f3c0, 0x1f3c8, - 0x1f440, 0x1f44c, - 0x1f684, 0x1f68c, - 0x1f6c0, 0x1f6c0, - 0x1f6e0, 0x1f6e0, - 0x1f700, 0x1f784, - 0x1f7c0, 0x1f7c8, - 0x1f840, 0x1f84c, - 0x1fa84, 0x1fa8c, - 0x1fac0, 0x1fac0, - 0x1fae0, 0x1fae0, - 0x1fb00, 0x1fb84, - 0x1fbc0, 0x1fbc8, - 0x1fc40, 0x1fc4c, - 0x1fe84, 0x1fe8c, - 0x1fec0, 0x1fec0, - 0x1fee0, 0x1fee0, - 0x1ff00, 0x1ff84, - 0x1ffc0, 0x1ffc8, - 0x20000, 0x2002c, - 0x20100, 0x2013c, - 0x20190, 0x201c8, - 0x20200, 0x20318, - 0x20400, 0x20528, - 0x20540, 0x20614, - 0x21000, 0x21040, - 0x2104c, 0x21060, - 0x210c0, 0x210ec, - 0x21200, 0x21268, - 0x21270, 0x21284, - 0x212fc, 0x21388, - 0x21400, 0x21404, - 0x21500, 0x21518, - 0x2152c, 0x2153c, - 0x21550, 0x21554, - 0x21600, 0x21600, - 0x21608, 0x21628, - 0x21630, 0x2163c, - 0x21700, 0x2171c, - 0x21780, 0x2178c, - 0x21800, 0x21c38, - 0x21c80, 0x21d7c, - 0x21e00, 0x21e04, - 0x22000, 0x2202c, - 0x22100, 0x2213c, - 0x22190, 0x221c8, - 0x22200, 0x22318, - 0x22400, 0x22528, - 0x22540, 0x22614, - 0x23000, 0x23040, - 0x2304c, 0x23060, - 0x230c0, 0x230ec, - 0x23200, 0x23268, - 0x23270, 0x23284, - 0x232fc, 0x23388, - 0x23400, 0x23404, - 0x23500, 0x23518, - 0x2352c, 0x2353c, - 0x23550, 0x23554, - 0x23600, 0x23600, - 0x23608, 0x23628, - 0x23630, 0x2363c, - 0x23700, 0x2371c, - 0x23780, 0x2378c, - 0x23800, 0x23c38, - 0x23c80, 0x23d7c, - 0x23e00, 0x23e04, - 0x24000, 0x2402c, - 0x24100, 0x2413c, - 0x24190, 0x241c8, - 0x24200, 0x24318, - 0x24400, 0x24528, - 0x24540, 0x24614, - 0x25000, 0x25040, - 0x2504c, 0x25060, - 0x250c0, 0x250ec, - 0x25200, 0x25268, - 0x25270, 0x25284, - 0x252fc, 0x25388, - 0x25400, 0x25404, - 0x25500, 0x25518, - 0x2552c, 0x2553c, - 0x25550, 0x25554, - 0x25600, 0x25600, - 0x25608, 0x25628, - 0x25630, 0x2563c, - 0x25700, 0x2571c, - 0x25780, 0x2578c, - 0x25800, 0x25c38, - 0x25c80, 0x25d7c, - 0x25e00, 0x25e04, - 0x26000, 0x2602c, - 0x26100, 0x2613c, - 0x26190, 0x261c8, - 0x26200, 0x26318, - 0x26400, 0x26528, - 0x26540, 0x26614, - 0x27000, 0x27040, - 0x2704c, 0x27060, - 0x270c0, 0x270ec, - 0x27200, 0x27268, - 0x27270, 0x27284, - 0x272fc, 0x27388, - 0x27400, 0x27404, - 0x27500, 0x27518, - 0x2752c, 0x2753c, - 0x27550, 0x27554, - 0x27600, 0x27600, - 0x27608, 0x27628, - 0x27630, 0x2763c, - 0x27700, 0x2771c, - 0x27780, 0x2778c, - 0x27800, 0x27c38, - 0x27c80, 0x27d7c, - 0x27e00, 0x27e04 - }; - - int i; - struct adapter *ap = netdev2adap(dev); - - regs->version = mk_adap_vers(ap); - - memset(buf, 0, T4_REGMAP_SIZE); - for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2) - reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]); -} - -static int restart_autoneg(struct net_device *dev) -{ - struct port_info *p = netdev_priv(dev); - - if (!netif_running(dev)) - return -EAGAIN; - if (p->link_cfg.autoneg != AUTONEG_ENABLE) - return -EINVAL; - t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan); - return 0; -} - -static int identify_port(struct net_device *dev, - enum ethtool_phys_id_state state) -{ - unsigned int val; - struct adapter *adap = netdev2adap(dev); - - if (state == ETHTOOL_ID_ACTIVE) - val = 0xffff; - else if (state == ETHTOOL_ID_INACTIVE) - val = 0; - else - return -EINVAL; - - return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val); -} - -static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps) -{ - unsigned int v = 0; - - if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI || - type == FW_PORT_TYPE_BT_XAUI) { - v |= SUPPORTED_TP; - if (caps & FW_PORT_CAP_SPEED_100M) - v |= SUPPORTED_100baseT_Full; - if (caps & FW_PORT_CAP_SPEED_1G) - v |= SUPPORTED_1000baseT_Full; - if (caps & FW_PORT_CAP_SPEED_10G) - v |= SUPPORTED_10000baseT_Full; - } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) { - v |= SUPPORTED_Backplane; - if (caps & FW_PORT_CAP_SPEED_1G) - v |= SUPPORTED_1000baseKX_Full; - if (caps & FW_PORT_CAP_SPEED_10G) - v |= SUPPORTED_10000baseKX4_Full; - } else if (type == FW_PORT_TYPE_KR) - v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full; - else if (type == FW_PORT_TYPE_BP_AP) - v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | - SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full; - else if (type == FW_PORT_TYPE_BP4_AP) - v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | - SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full | - SUPPORTED_10000baseKX4_Full; - else if (type == FW_PORT_TYPE_FIBER_XFI || - type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) - v |= SUPPORTED_FIBRE; - - if (caps & FW_PORT_CAP_ANEG) - v |= SUPPORTED_Autoneg; - return v; -} - -static unsigned int to_fw_linkcaps(unsigned int caps) -{ - unsigned int v = 0; - - if (caps & ADVERTISED_100baseT_Full) - v |= FW_PORT_CAP_SPEED_100M; - if (caps & ADVERTISED_1000baseT_Full) - v |= FW_PORT_CAP_SPEED_1G; - if (caps & ADVERTISED_10000baseT_Full) - v |= FW_PORT_CAP_SPEED_10G; - return v; -} - -static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - const struct port_info *p = netdev_priv(dev); - - if (p->port_type == FW_PORT_TYPE_BT_SGMII || - p->port_type == FW_PORT_TYPE_BT_XFI || - p->port_type == FW_PORT_TYPE_BT_XAUI) - cmd->port = PORT_TP; - else if (p->port_type == FW_PORT_TYPE_FIBER_XFI || - p->port_type == FW_PORT_TYPE_FIBER_XAUI) - cmd->port = PORT_FIBRE; - else if (p->port_type == FW_PORT_TYPE_SFP) { - if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE || - p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE) - cmd->port = PORT_DA; - else - cmd->port = PORT_FIBRE; - } else - cmd->port = PORT_OTHER; - - if (p->mdio_addr >= 0) { - cmd->phy_address = p->mdio_addr; - cmd->transceiver = XCVR_EXTERNAL; - cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ? - MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45; - } else { - cmd->phy_address = 0; /* not really, but no better option */ - cmd->transceiver = XCVR_INTERNAL; - cmd->mdio_support = 0; - } - - cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported); - cmd->advertising = from_fw_linkcaps(p->port_type, - p->link_cfg.advertising); - ethtool_cmd_speed_set(cmd, - netif_carrier_ok(dev) ? p->link_cfg.speed : 0); - cmd->duplex = DUPLEX_FULL; - cmd->autoneg = p->link_cfg.autoneg; - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 0; - return 0; -} - -static unsigned int speed_to_caps(int speed) -{ - if (speed == SPEED_100) - return FW_PORT_CAP_SPEED_100M; - if (speed == SPEED_1000) - return FW_PORT_CAP_SPEED_1G; - if (speed == SPEED_10000) - return FW_PORT_CAP_SPEED_10G; - return 0; -} - -static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - unsigned int cap; - struct port_info *p = netdev_priv(dev); - struct link_config *lc = &p->link_cfg; - u32 speed = ethtool_cmd_speed(cmd); - - if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */ - return -EINVAL; - - if (!(lc->supported & FW_PORT_CAP_ANEG)) { - /* - * PHY offers a single speed. See if that's what's - * being requested. - */ - if (cmd->autoneg == AUTONEG_DISABLE && - (lc->supported & speed_to_caps(speed))) - return 0; - return -EINVAL; - } - - if (cmd->autoneg == AUTONEG_DISABLE) { - cap = speed_to_caps(speed); - - if (!(lc->supported & cap) || (speed == SPEED_1000) || - (speed == SPEED_10000)) - return -EINVAL; - lc->requested_speed = cap; - lc->advertising = 0; - } else { - cap = to_fw_linkcaps(cmd->advertising); - if (!(lc->supported & cap)) - return -EINVAL; - lc->requested_speed = 0; - lc->advertising = cap | FW_PORT_CAP_ANEG; - } - lc->autoneg = cmd->autoneg; - - if (netif_running(dev)) - return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan, - lc); - return 0; -} - -static void get_pauseparam(struct net_device *dev, - struct ethtool_pauseparam *epause) -{ - struct port_info *p = netdev_priv(dev); - - epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0; - epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0; - epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0; -} - -static int set_pauseparam(struct net_device *dev, - struct ethtool_pauseparam *epause) -{ - struct port_info *p = netdev_priv(dev); - struct link_config *lc = &p->link_cfg; - - if (epause->autoneg == AUTONEG_DISABLE) - lc->requested_fc = 0; - else if (lc->supported & FW_PORT_CAP_ANEG) - lc->requested_fc = PAUSE_AUTONEG; - else - return -EINVAL; - - if (epause->rx_pause) - lc->requested_fc |= PAUSE_RX; - if (epause->tx_pause) - lc->requested_fc |= PAUSE_TX; - if (netif_running(dev)) - return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan, - lc); - return 0; -} - -static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) -{ - const struct port_info *pi = netdev_priv(dev); - const struct sge *s = &pi->adapter->sge; - - e->rx_max_pending = MAX_RX_BUFFERS; - e->rx_mini_max_pending = MAX_RSPQ_ENTRIES; - e->rx_jumbo_max_pending = 0; - e->tx_max_pending = MAX_TXQ_ENTRIES; - - e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8; - e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size; - e->rx_jumbo_pending = 0; - e->tx_pending = s->ethtxq[pi->first_qset].q.size; -} - -static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) -{ - int i; - const struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - struct sge *s = &adapter->sge; - - if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending || - e->tx_pending > MAX_TXQ_ENTRIES || - e->rx_mini_pending > MAX_RSPQ_ENTRIES || - e->rx_mini_pending < MIN_RSPQ_ENTRIES || - e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES) - return -EINVAL; - - if (adapter->flags & FULL_INIT_DONE) - return -EBUSY; - - for (i = 0; i < pi->nqsets; ++i) { - s->ethtxq[pi->first_qset + i].q.size = e->tx_pending; - s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8; - s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending; - } - return 0; -} - -static int closest_timer(const struct sge *s, int time) -{ - int i, delta, match = 0, min_delta = INT_MAX; - - for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { - delta = time - s->timer_val[i]; - if (delta < 0) - delta = -delta; - if (delta < min_delta) { - min_delta = delta; - match = i; - } - } - return match; -} - -static int closest_thres(const struct sge *s, int thres) -{ - int i, delta, match = 0, min_delta = INT_MAX; - - for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { - delta = thres - s->counter_val[i]; - if (delta < 0) - delta = -delta; - if (delta < min_delta) { - min_delta = delta; - match = i; - } - } - return match; -} - -/* - * Return a queue's interrupt hold-off time in us. 0 means no timer. - */ -static unsigned int qtimer_val(const struct adapter *adap, - const struct sge_rspq *q) -{ - unsigned int idx = q->intr_params >> 1; - - return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0; -} - -/** - * set_rxq_intr_params - set a queue's interrupt holdoff parameters - * @adap: the adapter - * @q: the Rx queue - * @us: the hold-off time in us, or 0 to disable timer - * @cnt: the hold-off packet count, or 0 to disable counter - * - * Sets an Rx queue's interrupt hold-off time and packet count. At least - * one of the two needs to be enabled for the queue to generate interrupts. - */ -static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q, - unsigned int us, unsigned int cnt) -{ - if ((us | cnt) == 0) - cnt = 1; - - if (cnt) { - int err; - u32 v, new_idx; - - new_idx = closest_thres(&adap->sge, cnt); - if (q->desc && q->pktcnt_idx != new_idx) { - /* the queue has already been created, update it */ - v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | - FW_PARAMS_PARAM_YZ(q->cntxt_id); - err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v, - &new_idx); - if (err) - return err; - } - q->pktcnt_idx = new_idx; - } - - us = us == 0 ? 6 : closest_timer(&adap->sge, us); - q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0); - return 0; -} - -static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) -{ - const struct port_info *pi = netdev_priv(dev); - struct adapter *adap = pi->adapter; - - return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq, - c->rx_coalesce_usecs, c->rx_max_coalesced_frames); -} - -static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) -{ - const struct port_info *pi = netdev_priv(dev); - const struct adapter *adap = pi->adapter; - const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq; - - c->rx_coalesce_usecs = qtimer_val(adap, rq); - c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ? - adap->sge.counter_val[rq->pktcnt_idx] : 0; - return 0; -} - -/** - * eeprom_ptov - translate a physical EEPROM address to virtual - * @phys_addr: the physical EEPROM address - * @fn: the PCI function number - * @sz: size of function-specific area - * - * Translate a physical EEPROM address to virtual. The first 1K is - * accessed through virtual addresses starting at 31K, the rest is - * accessed through virtual addresses starting at 0. - * - * The mapping is as follows: - * [0..1K) -> [31K..32K) - * [1K..1K+A) -> [31K-A..31K) - * [1K+A..ES) -> [0..ES-A-1K) - * - * where A = @fn * @sz, and ES = EEPROM size. - */ -static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) -{ - fn *= sz; - if (phys_addr < 1024) - return phys_addr + (31 << 10); - if (phys_addr < 1024 + fn) - return 31744 - fn + phys_addr - 1024; - if (phys_addr < EEPROMSIZE) - return phys_addr - 1024 - fn; - return -EINVAL; -} - -/* - * The next two routines implement eeprom read/write from physical addresses. - */ -static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) -{ - int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE); - - if (vaddr >= 0) - vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v); - return vaddr < 0 ? vaddr : 0; -} - -static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v) -{ - int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE); - - if (vaddr >= 0) - vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v); - return vaddr < 0 ? vaddr : 0; -} - -#define EEPROM_MAGIC 0x38E2F10C - -static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, - u8 *data) -{ - int i, err = 0; - struct adapter *adapter = netdev2adap(dev); - - u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - e->magic = EEPROM_MAGIC; - for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4) - err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]); - - if (!err) - memcpy(data, buf + e->offset, e->len); - kfree(buf); - return err; -} - -static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, - u8 *data) -{ - u8 *buf; - int err = 0; - u32 aligned_offset, aligned_len, *p; - struct adapter *adapter = netdev2adap(dev); - - if (eeprom->magic != EEPROM_MAGIC) - return -EINVAL; - - aligned_offset = eeprom->offset & ~3; - aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3; - - if (adapter->fn > 0) { - u32 start = 1024 + adapter->fn * EEPROMPFSIZE; - - if (aligned_offset < start || - aligned_offset + aligned_len > start + EEPROMPFSIZE) - return -EPERM; - } - - if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) { - /* - * RMW possibly needed for first or last words. - */ - buf = kmalloc(aligned_len, GFP_KERNEL); - if (!buf) - return -ENOMEM; - err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf); - if (!err && aligned_len > 4) - err = eeprom_rd_phys(adapter, - aligned_offset + aligned_len - 4, - (u32 *)&buf[aligned_len - 4]); - if (err) - goto out; - memcpy(buf + (eeprom->offset & 3), data, eeprom->len); - } else - buf = data; - - err = t4_seeprom_wp(adapter, false); - if (err) - goto out; - - for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) { - err = eeprom_wr_phys(adapter, aligned_offset, *p); - aligned_offset += 4; - } - - if (!err) - err = t4_seeprom_wp(adapter, true); -out: - if (buf != data) - kfree(buf); - return err; -} - -static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) -{ - int ret; - const struct firmware *fw; - struct adapter *adap = netdev2adap(netdev); - - ef->data[sizeof(ef->data) - 1] = '\0'; - ret = request_firmware(&fw, ef->data, adap->pdev_dev); - if (ret < 0) - return ret; - - ret = t4_load_fw(adap, fw->data, fw->size); - release_firmware(fw); - if (!ret) - dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data); - return ret; -} - -#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC) -#define BCAST_CRC 0xa0ccc1a6 - -static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - wol->supported = WAKE_BCAST | WAKE_MAGIC; - wol->wolopts = netdev2adap(dev)->wol; - memset(&wol->sopass, 0, sizeof(wol->sopass)); -} - -static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - int err = 0; - struct port_info *pi = netdev_priv(dev); - - if (wol->wolopts & ~WOL_SUPPORTED) - return -EINVAL; - t4_wol_magic_enable(pi->adapter, pi->tx_chan, - (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL); - if (wol->wolopts & WAKE_BCAST) { - err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL, - ~0ULL, 0, false); - if (!err) - err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1, - ~6ULL, ~0ULL, BCAST_CRC, true); - } else - t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false); - return err; -} - -static int cxgb_set_features(struct net_device *dev, u32 features) -{ - const struct port_info *pi = netdev_priv(dev); - u32 changed = dev->features ^ features; - int err; - - if (!(changed & NETIF_F_HW_VLAN_RX)) - return 0; - - err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, - -1, -1, -1, - !!(features & NETIF_F_HW_VLAN_RX), true); - if (unlikely(err)) - dev->features = features ^ NETIF_F_HW_VLAN_RX; - return err; -} - -static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p) -{ - const struct port_info *pi = netdev_priv(dev); - unsigned int n = min_t(unsigned int, p->size, pi->rss_size); - - p->size = pi->rss_size; - while (n--) - p->ring_index[n] = pi->rss[n]; - return 0; -} - -static int set_rss_table(struct net_device *dev, - const struct ethtool_rxfh_indir *p) -{ - unsigned int i; - struct port_info *pi = netdev_priv(dev); - - if (p->size != pi->rss_size) - return -EINVAL; - for (i = 0; i < p->size; i++) - if (p->ring_index[i] >= pi->nqsets) - return -EINVAL; - for (i = 0; i < p->size; i++) - pi->rss[i] = p->ring_index[i]; - if (pi->adapter->flags & FULL_INIT_DONE) - return write_rss(pi, pi->rss); - return 0; -} - -static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, - void *rules) -{ - const struct port_info *pi = netdev_priv(dev); - - switch (info->cmd) { - case ETHTOOL_GRXFH: { - unsigned int v = pi->rss_mode; - - info->data = 0; - switch (info->flow_type) { - case TCP_V4_FLOW: - if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) - info->data = RXH_IP_SRC | RXH_IP_DST | - RXH_L4_B_0_1 | RXH_L4_B_2_3; - else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) - info->data = RXH_IP_SRC | RXH_IP_DST; - break; - case UDP_V4_FLOW: - if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) && - (v & FW_RSS_VI_CONFIG_CMD_UDPEN)) - info->data = RXH_IP_SRC | RXH_IP_DST | - RXH_L4_B_0_1 | RXH_L4_B_2_3; - else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) - info->data = RXH_IP_SRC | RXH_IP_DST; - break; - case SCTP_V4_FLOW: - case AH_ESP_V4_FLOW: - case IPV4_FLOW: - if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) - info->data = RXH_IP_SRC | RXH_IP_DST; - break; - case TCP_V6_FLOW: - if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) - info->data = RXH_IP_SRC | RXH_IP_DST | - RXH_L4_B_0_1 | RXH_L4_B_2_3; - else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) - info->data = RXH_IP_SRC | RXH_IP_DST; - break; - case UDP_V6_FLOW: - if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) && - (v & FW_RSS_VI_CONFIG_CMD_UDPEN)) - info->data = RXH_IP_SRC | RXH_IP_DST | - RXH_L4_B_0_1 | RXH_L4_B_2_3; - else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) - info->data = RXH_IP_SRC | RXH_IP_DST; - break; - case SCTP_V6_FLOW: - case AH_ESP_V6_FLOW: - case IPV6_FLOW: - if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) - info->data = RXH_IP_SRC | RXH_IP_DST; - break; - } - return 0; - } - case ETHTOOL_GRXRINGS: - info->data = pi->nqsets; - return 0; - } - return -EOPNOTSUPP; -} - -static struct ethtool_ops cxgb_ethtool_ops = { - .get_settings = get_settings, - .set_settings = set_settings, - .get_drvinfo = get_drvinfo, - .get_msglevel = get_msglevel, - .set_msglevel = set_msglevel, - .get_ringparam = get_sge_param, - .set_ringparam = set_sge_param, - .get_coalesce = get_coalesce, - .set_coalesce = set_coalesce, - .get_eeprom_len = get_eeprom_len, - .get_eeprom = get_eeprom, - .set_eeprom = set_eeprom, - .get_pauseparam = get_pauseparam, - .set_pauseparam = set_pauseparam, - .get_link = ethtool_op_get_link, - .get_strings = get_strings, - .set_phys_id = identify_port, - .nway_reset = restart_autoneg, - .get_sset_count = get_sset_count, - .get_ethtool_stats = get_stats, - .get_regs_len = get_regs_len, - .get_regs = get_regs, - .get_wol = get_wol, - .set_wol = set_wol, - .get_rxnfc = get_rxnfc, - .get_rxfh_indir = get_rss_table, - .set_rxfh_indir = set_rss_table, - .flash_device = set_flash, -}; - -/* - * debugfs support - */ - -static int mem_open(struct inode *inode, struct file *file) -{ - file->private_data = inode->i_private; - return 0; -} - -static ssize_t mem_read(struct file *file, char __user *buf, size_t count, - loff_t *ppos) -{ - loff_t pos = *ppos; - loff_t avail = file->f_path.dentry->d_inode->i_size; - unsigned int mem = (uintptr_t)file->private_data & 3; - struct adapter *adap = file->private_data - mem; - - if (pos < 0) - return -EINVAL; - if (pos >= avail) - return 0; - if (count > avail - pos) - count = avail - pos; - - while (count) { - size_t len; - int ret, ofst; - __be32 data[16]; - - if (mem == MEM_MC) - ret = t4_mc_read(adap, pos, data, NULL); - else - ret = t4_edc_read(adap, mem, pos, data, NULL); - if (ret) - return ret; - - ofst = pos % sizeof(data); - len = min(count, sizeof(data) - ofst); - if (copy_to_user(buf, (u8 *)data + ofst, len)) - return -EFAULT; - - buf += len; - pos += len; - count -= len; - } - count = pos - *ppos; - *ppos = pos; - return count; -} - -static const struct file_operations mem_debugfs_fops = { - .owner = THIS_MODULE, - .open = mem_open, - .read = mem_read, - .llseek = default_llseek, -}; - -static void __devinit add_debugfs_mem(struct adapter *adap, const char *name, - unsigned int idx, unsigned int size_mb) -{ - struct dentry *de; - - de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root, - (void *)adap + idx, &mem_debugfs_fops); - if (de && de->d_inode) - de->d_inode->i_size = size_mb << 20; -} - -static int __devinit setup_debugfs(struct adapter *adap) -{ - int i; - - if (IS_ERR_OR_NULL(adap->debugfs_root)) - return -1; - - i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE); - if (i & EDRAM0_ENABLE) - add_debugfs_mem(adap, "edc0", MEM_EDC0, 5); - if (i & EDRAM1_ENABLE) - add_debugfs_mem(adap, "edc1", MEM_EDC1, 5); - if (i & EXT_MEM_ENABLE) - add_debugfs_mem(adap, "mc", MEM_MC, - EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR))); - if (adap->l2t) - debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap, - &t4_l2t_fops); - return 0; -} - -/* - * upper-layer driver support - */ - -/* - * Allocate an active-open TID and set it to the supplied value. - */ -int cxgb4_alloc_atid(struct tid_info *t, void *data) -{ - int atid = -1; - - spin_lock_bh(&t->atid_lock); - if (t->afree) { - union aopen_entry *p = t->afree; - - atid = p - t->atid_tab; - t->afree = p->next; - p->data = data; - t->atids_in_use++; - } - spin_unlock_bh(&t->atid_lock); - return atid; -} -EXPORT_SYMBOL(cxgb4_alloc_atid); - -/* - * Release an active-open TID. - */ -void cxgb4_free_atid(struct tid_info *t, unsigned int atid) -{ - union aopen_entry *p = &t->atid_tab[atid]; - - spin_lock_bh(&t->atid_lock); - p->next = t->afree; - t->afree = p; - t->atids_in_use--; - spin_unlock_bh(&t->atid_lock); -} -EXPORT_SYMBOL(cxgb4_free_atid); - -/* - * Allocate a server TID and set it to the supplied value. - */ -int cxgb4_alloc_stid(struct tid_info *t, int family, void *data) -{ - int stid; - - spin_lock_bh(&t->stid_lock); - if (family == PF_INET) { - stid = find_first_zero_bit(t->stid_bmap, t->nstids); - if (stid < t->nstids) - __set_bit(stid, t->stid_bmap); - else - stid = -1; - } else { - stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2); - if (stid < 0) - stid = -1; - } - if (stid >= 0) { - t->stid_tab[stid].data = data; - stid += t->stid_base; - t->stids_in_use++; - } - spin_unlock_bh(&t->stid_lock); - return stid; -} -EXPORT_SYMBOL(cxgb4_alloc_stid); - -/* - * Release a server TID. - */ -void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) -{ - stid -= t->stid_base; - spin_lock_bh(&t->stid_lock); - if (family == PF_INET) - __clear_bit(stid, t->stid_bmap); - else - bitmap_release_region(t->stid_bmap, stid, 2); - t->stid_tab[stid].data = NULL; - t->stids_in_use--; - spin_unlock_bh(&t->stid_lock); -} -EXPORT_SYMBOL(cxgb4_free_stid); - -/* - * Populate a TID_RELEASE WR. Caller must properly size the skb. - */ -static void mk_tid_release(struct sk_buff *skb, unsigned int chan, - unsigned int tid) -{ - struct cpl_tid_release *req; - - set_wr_txq(skb, CPL_PRIORITY_SETUP, chan); - req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req)); - INIT_TP_WR(req, tid); - OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); -} - -/* - * Queue a TID release request and if necessary schedule a work queue to - * process it. - */ -static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, - unsigned int tid) -{ - void **p = &t->tid_tab[tid]; - struct adapter *adap = container_of(t, struct adapter, tids); - - spin_lock_bh(&adap->tid_release_lock); - *p = adap->tid_release_head; - /* Low 2 bits encode the Tx channel number */ - adap->tid_release_head = (void **)((uintptr_t)p | chan); - if (!adap->tid_release_task_busy) { - adap->tid_release_task_busy = true; - schedule_work(&adap->tid_release_task); - } - spin_unlock_bh(&adap->tid_release_lock); -} - -/* - * Process the list of pending TID release requests. - */ -static void process_tid_release_list(struct work_struct *work) -{ - struct sk_buff *skb; - struct adapter *adap; - - adap = container_of(work, struct adapter, tid_release_task); - - spin_lock_bh(&adap->tid_release_lock); - while (adap->tid_release_head) { - void **p = adap->tid_release_head; - unsigned int chan = (uintptr_t)p & 3; - p = (void *)p - chan; - - adap->tid_release_head = *p; - *p = NULL; - spin_unlock_bh(&adap->tid_release_lock); - - while (!(skb = alloc_skb(sizeof(struct cpl_tid_release), - GFP_KERNEL))) - schedule_timeout_uninterruptible(1); - - mk_tid_release(skb, chan, p - adap->tids.tid_tab); - t4_ofld_send(adap, skb); - spin_lock_bh(&adap->tid_release_lock); - } - adap->tid_release_task_busy = false; - spin_unlock_bh(&adap->tid_release_lock); -} - -/* - * Release a TID and inform HW. If we are unable to allocate the release - * message we defer to a work queue. - */ -void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid) -{ - void *old; - struct sk_buff *skb; - struct adapter *adap = container_of(t, struct adapter, tids); - - old = t->tid_tab[tid]; - skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC); - if (likely(skb)) { - t->tid_tab[tid] = NULL; - mk_tid_release(skb, chan, tid); - t4_ofld_send(adap, skb); - } else - cxgb4_queue_tid_release(t, chan, tid); - if (old) - atomic_dec(&t->tids_in_use); -} -EXPORT_SYMBOL(cxgb4_remove_tid); - -/* - * Allocate and initialize the TID tables. Returns 0 on success. - */ -static int tid_init(struct tid_info *t) -{ - size_t size; - unsigned int natids = t->natids; - - size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) + - t->nstids * sizeof(*t->stid_tab) + - BITS_TO_LONGS(t->nstids) * sizeof(long); - t->tid_tab = t4_alloc_mem(size); - if (!t->tid_tab) - return -ENOMEM; - - t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids]; - t->stid_tab = (struct serv_entry *)&t->atid_tab[natids]; - t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids]; - spin_lock_init(&t->stid_lock); - spin_lock_init(&t->atid_lock); - - t->stids_in_use = 0; - t->afree = NULL; - t->atids_in_use = 0; - atomic_set(&t->tids_in_use, 0); - - /* Setup the free list for atid_tab and clear the stid bitmap. */ - if (natids) { - while (--natids) - t->atid_tab[natids - 1].next = &t->atid_tab[natids]; - t->afree = t->atid_tab; - } - bitmap_zero(t->stid_bmap, t->nstids); - return 0; -} - -/** - * cxgb4_create_server - create an IP server - * @dev: the device - * @stid: the server TID - * @sip: local IP address to bind server to - * @sport: the server's TCP port - * @queue: queue to direct messages from this server to - * - * Create an IP server for the given port and address. - * Returns <0 on error and one of the %NET_XMIT_* values on success. - */ -int cxgb4_create_server(const struct net_device *dev, unsigned int stid, - __be32 sip, __be16 sport, unsigned int queue) -{ - unsigned int chan; - struct sk_buff *skb; - struct adapter *adap; - struct cpl_pass_open_req *req; - - skb = alloc_skb(sizeof(*req), GFP_KERNEL); - if (!skb) - return -ENOMEM; - - adap = netdev2adap(dev); - req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req)); - INIT_TP_WR(req, 0); - OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid)); - req->local_port = sport; - req->peer_port = htons(0); - req->local_ip = sip; - req->peer_ip = htonl(0); - chan = rxq_to_chan(&adap->sge, queue); - req->opt0 = cpu_to_be64(TX_CHAN(chan)); - req->opt1 = cpu_to_be64(CONN_POLICY_ASK | - SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); - return t4_mgmt_tx(adap, skb); -} -EXPORT_SYMBOL(cxgb4_create_server); - -/** - * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU - * @mtus: the HW MTU table - * @mtu: the target MTU - * @idx: index of selected entry in the MTU table - * - * Returns the index and the value in the HW MTU table that is closest to - * but does not exceed @mtu, unless @mtu is smaller than any value in the - * table, in which case that smallest available value is selected. - */ -unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, - unsigned int *idx) -{ - unsigned int i = 0; - - while (i < NMTUS - 1 && mtus[i + 1] <= mtu) - ++i; - if (idx) - *idx = i; - return mtus[i]; -} -EXPORT_SYMBOL(cxgb4_best_mtu); - -/** - * cxgb4_port_chan - get the HW channel of a port - * @dev: the net device for the port - * - * Return the HW Tx channel of the given port. - */ -unsigned int cxgb4_port_chan(const struct net_device *dev) -{ - return netdev2pinfo(dev)->tx_chan; -} -EXPORT_SYMBOL(cxgb4_port_chan); - -/** - * cxgb4_port_viid - get the VI id of a port - * @dev: the net device for the port - * - * Return the VI id of the given port. - */ -unsigned int cxgb4_port_viid(const struct net_device *dev) -{ - return netdev2pinfo(dev)->viid; -} -EXPORT_SYMBOL(cxgb4_port_viid); - -/** - * cxgb4_port_idx - get the index of a port - * @dev: the net device for the port - * - * Return the index of the given port. - */ -unsigned int cxgb4_port_idx(const struct net_device *dev) -{ - return netdev2pinfo(dev)->port_id; -} -EXPORT_SYMBOL(cxgb4_port_idx); - -void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, - struct tp_tcp_stats *v6) -{ - struct adapter *adap = pci_get_drvdata(pdev); - - spin_lock(&adap->stats_lock); - t4_tp_get_tcp_stats(adap, v4, v6); - spin_unlock(&adap->stats_lock); -} -EXPORT_SYMBOL(cxgb4_get_tcp_stats); - -void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, - const unsigned int *pgsz_order) -{ - struct adapter *adap = netdev2adap(dev); - - t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask); - t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) | - HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) | - HPZ3(pgsz_order[3])); -} -EXPORT_SYMBOL(cxgb4_iscsi_init); - -static struct pci_driver cxgb4_driver; - -static void check_neigh_update(struct neighbour *neigh) -{ - const struct device *parent; - const struct net_device *netdev = neigh->dev; - - if (netdev->priv_flags & IFF_802_1Q_VLAN) - netdev = vlan_dev_real_dev(netdev); - parent = netdev->dev.parent; - if (parent && parent->driver == &cxgb4_driver.driver) - t4_l2t_update(dev_get_drvdata(parent), neigh); -} - -static int netevent_cb(struct notifier_block *nb, unsigned long event, - void *data) -{ - switch (event) { - case NETEVENT_NEIGH_UPDATE: - check_neigh_update(data); - break; - case NETEVENT_REDIRECT: - default: - break; - } - return 0; -} - -static bool netevent_registered; -static struct notifier_block cxgb4_netevent_nb = { - .notifier_call = netevent_cb -}; - -static void uld_attach(struct adapter *adap, unsigned int uld) -{ - void *handle; - struct cxgb4_lld_info lli; - - lli.pdev = adap->pdev; - lli.l2t = adap->l2t; - lli.tids = &adap->tids; - lli.ports = adap->port; - lli.vr = &adap->vres; - lli.mtus = adap->params.mtus; - if (uld == CXGB4_ULD_RDMA) { - lli.rxq_ids = adap->sge.rdma_rxq; - lli.nrxq = adap->sge.rdmaqs; - } else if (uld == CXGB4_ULD_ISCSI) { - lli.rxq_ids = adap->sge.ofld_rxq; - lli.nrxq = adap->sge.ofldqsets; - } - lli.ntxq = adap->sge.ofldqsets; - lli.nchan = adap->params.nports; - lli.nports = adap->params.nports; - lli.wr_cred = adap->params.ofldq_wr_cred; - lli.adapter_type = adap->params.rev; - lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2)); - lli.udb_density = 1 << QUEUESPERPAGEPF0_GET( - t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >> - (adap->fn * 4)); - lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( - t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >> - (adap->fn * 4)); - lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS); - lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL); - lli.fw_vers = adap->params.fw_vers; - - handle = ulds[uld].add(&lli); - if (IS_ERR(handle)) { - dev_warn(adap->pdev_dev, - "could not attach to the %s driver, error %ld\n", - uld_str[uld], PTR_ERR(handle)); - return; - } - - adap->uld_handle[uld] = handle; - - if (!netevent_registered) { - register_netevent_notifier(&cxgb4_netevent_nb); - netevent_registered = true; - } - - if (adap->flags & FULL_INIT_DONE) - ulds[uld].state_change(handle, CXGB4_STATE_UP); -} - -static void attach_ulds(struct adapter *adap) -{ - unsigned int i; - - mutex_lock(&uld_mutex); - list_add_tail(&adap->list_node, &adapter_list); - for (i = 0; i < CXGB4_ULD_MAX; i++) - if (ulds[i].add) - uld_attach(adap, i); - mutex_unlock(&uld_mutex); -} - -static void detach_ulds(struct adapter *adap) -{ - unsigned int i; - - mutex_lock(&uld_mutex); - list_del(&adap->list_node); - for (i = 0; i < CXGB4_ULD_MAX; i++) - if (adap->uld_handle[i]) { - ulds[i].state_change(adap->uld_handle[i], - CXGB4_STATE_DETACH); - adap->uld_handle[i] = NULL; - } - if (netevent_registered && list_empty(&adapter_list)) { - unregister_netevent_notifier(&cxgb4_netevent_nb); - netevent_registered = false; - } - mutex_unlock(&uld_mutex); -} - -static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state) -{ - unsigned int i; - - mutex_lock(&uld_mutex); - for (i = 0; i < CXGB4_ULD_MAX; i++) - if (adap->uld_handle[i]) - ulds[i].state_change(adap->uld_handle[i], new_state); - mutex_unlock(&uld_mutex); -} - -/** - * cxgb4_register_uld - register an upper-layer driver - * @type: the ULD type - * @p: the ULD methods - * - * Registers an upper-layer driver with this driver and notifies the ULD - * about any presently available devices that support its type. Returns - * %-EBUSY if a ULD of the same type is already registered. - */ -int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p) -{ - int ret = 0; - struct adapter *adap; - - if (type >= CXGB4_ULD_MAX) - return -EINVAL; - mutex_lock(&uld_mutex); - if (ulds[type].add) { - ret = -EBUSY; - goto out; - } - ulds[type] = *p; - list_for_each_entry(adap, &adapter_list, list_node) - uld_attach(adap, type); -out: mutex_unlock(&uld_mutex); - return ret; -} -EXPORT_SYMBOL(cxgb4_register_uld); - -/** - * cxgb4_unregister_uld - unregister an upper-layer driver - * @type: the ULD type - * - * Unregisters an existing upper-layer driver. - */ -int cxgb4_unregister_uld(enum cxgb4_uld type) -{ - struct adapter *adap; - - if (type >= CXGB4_ULD_MAX) - return -EINVAL; - mutex_lock(&uld_mutex); - list_for_each_entry(adap, &adapter_list, list_node) - adap->uld_handle[type] = NULL; - ulds[type].add = NULL; - mutex_unlock(&uld_mutex); - return 0; -} -EXPORT_SYMBOL(cxgb4_unregister_uld); - -/** - * cxgb_up - enable the adapter - * @adap: adapter being enabled - * - * Called when the first port is enabled, this function performs the - * actions necessary to make an adapter operational, such as completing - * the initialization of HW modules, and enabling interrupts. - * - * Must be called with the rtnl lock held. - */ -static int cxgb_up(struct adapter *adap) -{ - int err; - - err = setup_sge_queues(adap); - if (err) - goto out; - err = setup_rss(adap); - if (err) - goto freeq; - - if (adap->flags & USING_MSIX) { - name_msix_vecs(adap); - err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0, - adap->msix_info[0].desc, adap); - if (err) - goto irq_err; - - err = request_msix_queue_irqs(adap); - if (err) { - free_irq(adap->msix_info[0].vec, adap); - goto irq_err; - } - } else { - err = request_irq(adap->pdev->irq, t4_intr_handler(adap), - (adap->flags & USING_MSI) ? 0 : IRQF_SHARED, - adap->port[0]->name, adap); - if (err) - goto irq_err; - } - enable_rx(adap); - t4_sge_start(adap); - t4_intr_enable(adap); - adap->flags |= FULL_INIT_DONE; - notify_ulds(adap, CXGB4_STATE_UP); - out: - return err; - irq_err: - dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); - freeq: - t4_free_sge_resources(adap); - goto out; -} - -static void cxgb_down(struct adapter *adapter) -{ - t4_intr_disable(adapter); - cancel_work_sync(&adapter->tid_release_task); - adapter->tid_release_task_busy = false; - adapter->tid_release_head = NULL; - - if (adapter->flags & USING_MSIX) { - free_msix_queue_irqs(adapter); - free_irq(adapter->msix_info[0].vec, adapter); - } else - free_irq(adapter->pdev->irq, adapter); - quiesce_rx(adapter); - t4_sge_stop(adapter); - t4_free_sge_resources(adapter); - adapter->flags &= ~FULL_INIT_DONE; -} - -/* - * net_device operations - */ -static int cxgb_open(struct net_device *dev) -{ - int err; - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - - netif_carrier_off(dev); - - if (!(adapter->flags & FULL_INIT_DONE)) { - err = cxgb_up(adapter); - if (err < 0) - return err; - } - - err = link_start(dev); - if (!err) - netif_tx_start_all_queues(dev); - return err; -} - -static int cxgb_close(struct net_device *dev) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - - netif_tx_stop_all_queues(dev); - netif_carrier_off(dev); - return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false); -} - -static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev, - struct rtnl_link_stats64 *ns) -{ - struct port_stats stats; - struct port_info *p = netdev_priv(dev); - struct adapter *adapter = p->adapter; - - spin_lock(&adapter->stats_lock); - t4_get_port_stats(adapter, p->tx_chan, &stats); - spin_unlock(&adapter->stats_lock); - - ns->tx_bytes = stats.tx_octets; - ns->tx_packets = stats.tx_frames; - ns->rx_bytes = stats.rx_octets; - ns->rx_packets = stats.rx_frames; - ns->multicast = stats.rx_mcast_frames; - - /* detailed rx_errors */ - ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long + - stats.rx_runt; - ns->rx_over_errors = 0; - ns->rx_crc_errors = stats.rx_fcs_err; - ns->rx_frame_errors = stats.rx_symbol_err; - ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 + - stats.rx_ovflow2 + stats.rx_ovflow3 + - stats.rx_trunc0 + stats.rx_trunc1 + - stats.rx_trunc2 + stats.rx_trunc3; - ns->rx_missed_errors = 0; - - /* detailed tx_errors */ - ns->tx_aborted_errors = 0; - ns->tx_carrier_errors = 0; - ns->tx_fifo_errors = 0; - ns->tx_heartbeat_errors = 0; - ns->tx_window_errors = 0; - - ns->tx_errors = stats.tx_error_frames; - ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err + - ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors; - return ns; -} - -static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) -{ - unsigned int mbox; - int ret = 0, prtad, devad; - struct port_info *pi = netdev_priv(dev); - struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data; - - switch (cmd) { - case SIOCGMIIPHY: - if (pi->mdio_addr < 0) - return -EOPNOTSUPP; - data->phy_id = pi->mdio_addr; - break; - case SIOCGMIIREG: - case SIOCSMIIREG: - if (mdio_phy_id_is_c45(data->phy_id)) { - prtad = mdio_phy_id_prtad(data->phy_id); - devad = mdio_phy_id_devad(data->phy_id); - } else if (data->phy_id < 32) { - prtad = data->phy_id; - devad = 0; - data->reg_num &= 0x1f; - } else - return -EINVAL; - - mbox = pi->adapter->fn; - if (cmd == SIOCGMIIREG) - ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad, - data->reg_num, &data->val_out); - else - ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad, - data->reg_num, data->val_in); - break; - default: - return -EOPNOTSUPP; - } - return ret; -} - -static void cxgb_set_rxmode(struct net_device *dev) -{ - /* unfortunately we can't return errors to the stack */ - set_rxmode(dev, -1, false); -} - -static int cxgb_change_mtu(struct net_device *dev, int new_mtu) -{ - int ret; - struct port_info *pi = netdev_priv(dev); - - if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */ - return -EINVAL; - ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1, - -1, -1, -1, true); - if (!ret) - dev->mtu = new_mtu; - return ret; -} - -static int cxgb_set_mac_addr(struct net_device *dev, void *p) -{ - int ret; - struct sockaddr *addr = p; - struct port_info *pi = netdev_priv(dev); - - if (!is_valid_ether_addr(addr->sa_data)) - return -EINVAL; - - ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid, - pi->xact_addr_filt, addr->sa_data, true, true); - if (ret < 0) - return ret; - - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - pi->xact_addr_filt = ret; - return 0; -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void cxgb_netpoll(struct net_device *dev) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adap = pi->adapter; - - if (adap->flags & USING_MSIX) { - int i; - struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset]; - - for (i = pi->nqsets; i; i--, rx++) - t4_sge_intr_msix(0, &rx->rspq); - } else - t4_intr_handler(adap)(0, adap); -} -#endif - -static const struct net_device_ops cxgb4_netdev_ops = { - .ndo_open = cxgb_open, - .ndo_stop = cxgb_close, - .ndo_start_xmit = t4_eth_xmit, - .ndo_get_stats64 = cxgb_get_stats, - .ndo_set_rx_mode = cxgb_set_rxmode, - .ndo_set_mac_address = cxgb_set_mac_addr, - .ndo_set_features = cxgb_set_features, - .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = cxgb_ioctl, - .ndo_change_mtu = cxgb_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = cxgb_netpoll, -#endif -}; - -void t4_fatal_err(struct adapter *adap) -{ - t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0); - t4_intr_disable(adap); - dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n"); -} - -static void setup_memwin(struct adapter *adap) -{ - u32 bar0; - - bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */ - t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0), - (bar0 + MEMWIN0_BASE) | BIR(0) | - WINDOW(ilog2(MEMWIN0_APERTURE) - 10)); - t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1), - (bar0 + MEMWIN1_BASE) | BIR(0) | - WINDOW(ilog2(MEMWIN1_APERTURE) - 10)); - t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2), - (bar0 + MEMWIN2_BASE) | BIR(0) | - WINDOW(ilog2(MEMWIN2_APERTURE) - 10)); - if (adap->vres.ocq.size) { - unsigned int start, sz_kb; - - start = pci_resource_start(adap->pdev, 2) + - OCQ_WIN_OFFSET(adap->pdev, &adap->vres); - sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10; - t4_write_reg(adap, - PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3), - start | BIR(1) | WINDOW(ilog2(sz_kb))); - t4_write_reg(adap, - PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3), - adap->vres.ocq.start); - t4_read_reg(adap, - PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3)); - } -} - -static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) -{ - u32 v; - int ret; - - /* get device capabilities */ - memset(c, 0, sizeof(*c)); - c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | - FW_CMD_REQUEST | FW_CMD_READ); - c->retval_len16 = htonl(FW_LEN16(*c)); - ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c); - if (ret < 0) - return ret; - - /* select capabilities we'll be using */ - if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) { - if (!vf_acls) - c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM); - else - c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM); - } else if (vf_acls) { - dev_err(adap->pdev_dev, "virtualization ACLs not supported"); - return ret; - } - c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | - FW_CMD_REQUEST | FW_CMD_WRITE); - ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL); - if (ret < 0) - return ret; - - ret = t4_config_glbl_rss(adap, adap->fn, - FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, - FW_RSS_GLB_CONFIG_CMD_TNLMAPEN | - FW_RSS_GLB_CONFIG_CMD_TNLALLLKP); - if (ret < 0) - return ret; - - ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ, - 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF); - if (ret < 0) - return ret; - - t4_sge_init(adap); - - /* tweak some settings */ - t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849); - t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12)); - t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG); - v = t4_read_reg(adap, TP_PIO_DATA); - t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR); - - /* get basic stuff going */ - return t4_early_init(adap, adap->fn); -} - -/* - * Max # of ATIDs. The absolute HW max is 16K but we keep it lower. - */ -#define MAX_ATIDS 8192U - -/* - * Phase 0 of initialization: contact FW, obtain config, perform basic init. - */ -static int adap_init0(struct adapter *adap) -{ - int ret; - u32 v, port_vec; - enum dev_state state; - u32 params[7], val[7]; - struct fw_caps_config_cmd c; - - ret = t4_check_fw_version(adap); - if (ret == -EINVAL || ret > 0) { - if (upgrade_fw(adap) >= 0) /* recache FW version */ - ret = t4_check_fw_version(adap); - } - if (ret < 0) - return ret; - - /* contact FW, request master */ - ret = t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, &state); - if (ret < 0) { - dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", - ret); - return ret; - } - - /* reset device */ - ret = t4_fw_reset(adap, adap->fn, PIORSTMODE | PIORST); - if (ret < 0) - goto bye; - - for (v = 0; v < SGE_NTIMERS - 1; v++) - adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL); - adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL; - adap->sge.counter_val[0] = 1; - for (v = 1; v < SGE_NCOUNTERS; v++) - adap->sge.counter_val[v] = min(intr_cnt[v - 1], - THRESHOLD_3_MASK); -#define FW_PARAM_DEV(param) \ - (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) - - params[0] = FW_PARAM_DEV(CCLK); - ret = t4_query_params(adap, adap->fn, adap->fn, 0, 1, params, val); - if (ret < 0) - goto bye; - adap->params.vpd.cclk = val[0]; - - ret = adap_init1(adap, &c); - if (ret < 0) - goto bye; - -#define FW_PARAM_PFVF(param) \ - (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \ - FW_PARAMS_PARAM_Y(adap->fn)) - - params[0] = FW_PARAM_DEV(PORTVEC); - params[1] = FW_PARAM_PFVF(L2T_START); - params[2] = FW_PARAM_PFVF(L2T_END); - params[3] = FW_PARAM_PFVF(FILTER_START); - params[4] = FW_PARAM_PFVF(FILTER_END); - params[5] = FW_PARAM_PFVF(IQFLINT_START); - params[6] = FW_PARAM_PFVF(EQ_START); - ret = t4_query_params(adap, adap->fn, adap->fn, 0, 7, params, val); - if (ret < 0) - goto bye; - port_vec = val[0]; - adap->tids.ftid_base = val[3]; - adap->tids.nftids = val[4] - val[3] + 1; - adap->sge.ingr_start = val[5]; - adap->sge.egr_start = val[6]; - - if (c.ofldcaps) { - /* query offload-related parameters */ - params[0] = FW_PARAM_DEV(NTID); - params[1] = FW_PARAM_PFVF(SERVER_START); - params[2] = FW_PARAM_PFVF(SERVER_END); - params[3] = FW_PARAM_PFVF(TDDP_START); - params[4] = FW_PARAM_PFVF(TDDP_END); - params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); - ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params, - val); - if (ret < 0) - goto bye; - adap->tids.ntids = val[0]; - adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); - adap->tids.stid_base = val[1]; - adap->tids.nstids = val[2] - val[1] + 1; - adap->vres.ddp.start = val[3]; - adap->vres.ddp.size = val[4] - val[3] + 1; - adap->params.ofldq_wr_cred = val[5]; - adap->params.offload = 1; - } - if (c.rdmacaps) { - params[0] = FW_PARAM_PFVF(STAG_START); - params[1] = FW_PARAM_PFVF(STAG_END); - params[2] = FW_PARAM_PFVF(RQ_START); - params[3] = FW_PARAM_PFVF(RQ_END); - params[4] = FW_PARAM_PFVF(PBL_START); - params[5] = FW_PARAM_PFVF(PBL_END); - ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params, - val); - if (ret < 0) - goto bye; - adap->vres.stag.start = val[0]; - adap->vres.stag.size = val[1] - val[0] + 1; - adap->vres.rq.start = val[2]; - adap->vres.rq.size = val[3] - val[2] + 1; - adap->vres.pbl.start = val[4]; - adap->vres.pbl.size = val[5] - val[4] + 1; - - params[0] = FW_PARAM_PFVF(SQRQ_START); - params[1] = FW_PARAM_PFVF(SQRQ_END); - params[2] = FW_PARAM_PFVF(CQ_START); - params[3] = FW_PARAM_PFVF(CQ_END); - params[4] = FW_PARAM_PFVF(OCQ_START); - params[5] = FW_PARAM_PFVF(OCQ_END); - ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params, - val); - if (ret < 0) - goto bye; - adap->vres.qp.start = val[0]; - adap->vres.qp.size = val[1] - val[0] + 1; - adap->vres.cq.start = val[2]; - adap->vres.cq.size = val[3] - val[2] + 1; - adap->vres.ocq.start = val[4]; - adap->vres.ocq.size = val[5] - val[4] + 1; - } - if (c.iscsicaps) { - params[0] = FW_PARAM_PFVF(ISCSI_START); - params[1] = FW_PARAM_PFVF(ISCSI_END); - ret = t4_query_params(adap, adap->fn, adap->fn, 0, 2, params, - val); - if (ret < 0) - goto bye; - adap->vres.iscsi.start = val[0]; - adap->vres.iscsi.size = val[1] - val[0] + 1; - } -#undef FW_PARAM_PFVF -#undef FW_PARAM_DEV - - adap->params.nports = hweight32(port_vec); - adap->params.portvec = port_vec; - adap->flags |= FW_OK; - - /* These are finalized by FW initialization, load their values now */ - v = t4_read_reg(adap, TP_TIMER_RESOLUTION); - adap->params.tp.tre = TIMERRESOLUTION_GET(v); - t4_read_mtu_tbl(adap, adap->params.mtus, NULL); - t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, - adap->params.b_wnd); - -#ifdef CONFIG_PCI_IOV - /* - * Provision resource limits for Virtual Functions. We currently - * grant them all the same static resource limits except for the Port - * Access Rights Mask which we're assigning based on the PF. All of - * the static provisioning stuff for both the PF and VF really needs - * to be managed in a persistent manner for each device which the - * firmware controls. - */ - { - int pf, vf; - - for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) { - if (num_vf[pf] <= 0) - continue; - - /* VF numbering starts at 1! */ - for (vf = 1; vf <= num_vf[pf]; vf++) { - ret = t4_cfg_pfvf(adap, adap->fn, pf, vf, - VFRES_NEQ, VFRES_NETHCTRL, - VFRES_NIQFLINT, VFRES_NIQ, - VFRES_TC, VFRES_NVI, - FW_PFVF_CMD_CMASK_MASK, - pfvfres_pmask(adap, pf, vf), - VFRES_NEXACTF, - VFRES_R_CAPS, VFRES_WX_CAPS); - if (ret < 0) - dev_warn(adap->pdev_dev, "failed to " - "provision pf/vf=%d/%d; " - "err=%d\n", pf, vf, ret); - } - } - } -#endif - - setup_memwin(adap); - return 0; - - /* - * If a command timed out or failed with EIO FW does not operate within - * its spec or something catastrophic happened to HW/FW, stop issuing - * commands. - */ -bye: if (ret != -ETIMEDOUT && ret != -EIO) - t4_fw_bye(adap, adap->fn); - return ret; -} - -/* EEH callbacks */ - -static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev, - pci_channel_state_t state) -{ - int i; - struct adapter *adap = pci_get_drvdata(pdev); - - if (!adap) - goto out; - - rtnl_lock(); - adap->flags &= ~FW_OK; - notify_ulds(adap, CXGB4_STATE_START_RECOVERY); - for_each_port(adap, i) { - struct net_device *dev = adap->port[i]; - - netif_device_detach(dev); - netif_carrier_off(dev); - } - if (adap->flags & FULL_INIT_DONE) - cxgb_down(adap); - rtnl_unlock(); - pci_disable_device(pdev); -out: return state == pci_channel_io_perm_failure ? - PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; -} - -static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev) -{ - int i, ret; - struct fw_caps_config_cmd c; - struct adapter *adap = pci_get_drvdata(pdev); - - if (!adap) { - pci_restore_state(pdev); - pci_save_state(pdev); - return PCI_ERS_RESULT_RECOVERED; - } - - if (pci_enable_device(pdev)) { - dev_err(&pdev->dev, "cannot reenable PCI device after reset\n"); - return PCI_ERS_RESULT_DISCONNECT; - } - - pci_set_master(pdev); - pci_restore_state(pdev); - pci_save_state(pdev); - pci_cleanup_aer_uncorrect_error_status(pdev); - - if (t4_wait_dev_ready(adap) < 0) - return PCI_ERS_RESULT_DISCONNECT; - if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL)) - return PCI_ERS_RESULT_DISCONNECT; - adap->flags |= FW_OK; - if (adap_init1(adap, &c)) - return PCI_ERS_RESULT_DISCONNECT; - - for_each_port(adap, i) { - struct port_info *p = adap2pinfo(adap, i); - - ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1, - NULL, NULL); - if (ret < 0) - return PCI_ERS_RESULT_DISCONNECT; - p->viid = ret; - p->xact_addr_filt = -1; - } - - t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, - adap->params.b_wnd); - setup_memwin(adap); - if (cxgb_up(adap)) - return PCI_ERS_RESULT_DISCONNECT; - return PCI_ERS_RESULT_RECOVERED; -} - -static void eeh_resume(struct pci_dev *pdev) -{ - int i; - struct adapter *adap = pci_get_drvdata(pdev); - - if (!adap) - return; - - rtnl_lock(); - for_each_port(adap, i) { - struct net_device *dev = adap->port[i]; - - if (netif_running(dev)) { - link_start(dev); - cxgb_set_rxmode(dev); - } - netif_device_attach(dev); - } - rtnl_unlock(); -} - -static struct pci_error_handlers cxgb4_eeh = { - .error_detected = eeh_err_detected, - .slot_reset = eeh_slot_reset, - .resume = eeh_resume, -}; - -static inline bool is_10g_port(const struct link_config *lc) -{ - return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0; -} - -static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx, - unsigned int size, unsigned int iqe_size) -{ - q->intr_params = QINTR_TIMER_IDX(timer_idx) | - (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0); - q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0; - q->iqe_len = iqe_size; - q->size = size; -} - -/* - * Perform default configuration of DMA queues depending on the number and type - * of ports we found and the number of available CPUs. Most settings can be - * modified by the admin prior to actual use. - */ -static void __devinit cfg_queues(struct adapter *adap) -{ - struct sge *s = &adap->sge; - int i, q10g = 0, n10g = 0, qidx = 0; - - for_each_port(adap, i) - n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg); - - /* - * We default to 1 queue per non-10G port and up to # of cores queues - * per 10G port. - */ - if (n10g) - q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g; - if (q10g > num_online_cpus()) - q10g = num_online_cpus(); - - for_each_port(adap, i) { - struct port_info *pi = adap2pinfo(adap, i); - - pi->first_qset = qidx; - pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1; - qidx += pi->nqsets; - } - - s->ethqsets = qidx; - s->max_ethqsets = qidx; /* MSI-X may lower it later */ - - if (is_offload(adap)) { - /* - * For offload we use 1 queue/channel if all ports are up to 1G, - * otherwise we divide all available queues amongst the channels - * capped by the number of available cores. - */ - if (n10g) { - i = min_t(int, ARRAY_SIZE(s->ofldrxq), - num_online_cpus()); - s->ofldqsets = roundup(i, adap->params.nports); - } else - s->ofldqsets = adap->params.nports; - /* For RDMA one Rx queue per channel suffices */ - s->rdmaqs = adap->params.nports; - } - - for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { - struct sge_eth_rxq *r = &s->ethrxq[i]; - - init_rspq(&r->rspq, 0, 0, 1024, 64); - r->fl.size = 72; - } - - for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++) - s->ethtxq[i].q.size = 1024; - - for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) - s->ctrlq[i].q.size = 512; - - for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) - s->ofldtxq[i].q.size = 1024; - - for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) { - struct sge_ofld_rxq *r = &s->ofldrxq[i]; - - init_rspq(&r->rspq, 0, 0, 1024, 64); - r->rspq.uld = CXGB4_ULD_ISCSI; - r->fl.size = 72; - } - - for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) { - struct sge_ofld_rxq *r = &s->rdmarxq[i]; - - init_rspq(&r->rspq, 0, 0, 511, 64); - r->rspq.uld = CXGB4_ULD_RDMA; - r->fl.size = 72; - } - - init_rspq(&s->fw_evtq, 6, 0, 512, 64); - init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64); -} - -/* - * Reduce the number of Ethernet queues across all ports to at most n. - * n provides at least one queue per port. - */ -static void __devinit reduce_ethqs(struct adapter *adap, int n) -{ - int i; - struct port_info *pi; - - while (n < adap->sge.ethqsets) - for_each_port(adap, i) { - pi = adap2pinfo(adap, i); - if (pi->nqsets > 1) { - pi->nqsets--; - adap->sge.ethqsets--; - if (adap->sge.ethqsets <= n) - break; - } - } - - n = 0; - for_each_port(adap, i) { - pi = adap2pinfo(adap, i); - pi->first_qset = n; - n += pi->nqsets; - } -} - -/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */ -#define EXTRA_VECS 2 - -static int __devinit enable_msix(struct adapter *adap) -{ - int ofld_need = 0; - int i, err, want, need; - struct sge *s = &adap->sge; - unsigned int nchan = adap->params.nports; - struct msix_entry entries[MAX_INGQ + 1]; - - for (i = 0; i < ARRAY_SIZE(entries); ++i) - entries[i].entry = i; - - want = s->max_ethqsets + EXTRA_VECS; - if (is_offload(adap)) { - want += s->rdmaqs + s->ofldqsets; - /* need nchan for each possible ULD */ - ofld_need = 2 * nchan; - } - need = adap->params.nports + EXTRA_VECS + ofld_need; - - while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need) - want = err; - - if (!err) { - /* - * Distribute available vectors to the various queue groups. - * Every group gets its minimum requirement and NIC gets top - * priority for leftovers. - */ - i = want - EXTRA_VECS - ofld_need; - if (i < s->max_ethqsets) { - s->max_ethqsets = i; - if (i < s->ethqsets) - reduce_ethqs(adap, i); - } - if (is_offload(adap)) { - i = want - EXTRA_VECS - s->max_ethqsets; - i -= ofld_need - nchan; - s->ofldqsets = (i / nchan) * nchan; /* round down */ - } - for (i = 0; i < want; ++i) - adap->msix_info[i].vec = entries[i].vector; - } else if (err > 0) - dev_info(adap->pdev_dev, - "only %d MSI-X vectors left, not using MSI-X\n", err); - return err; -} - -#undef EXTRA_VECS - -static int __devinit init_rss(struct adapter *adap) -{ - unsigned int i, j; - - for_each_port(adap, i) { - struct port_info *pi = adap2pinfo(adap, i); - - pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL); - if (!pi->rss) - return -ENOMEM; - for (j = 0; j < pi->rss_size; j++) - pi->rss[j] = j % pi->nqsets; - } - return 0; -} - -static void __devinit print_port_info(const struct net_device *dev) -{ - static const char *base[] = { - "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4", - "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4" - }; - - char buf[80]; - char *bufp = buf; - const char *spd = ""; - const struct port_info *pi = netdev_priv(dev); - const struct adapter *adap = pi->adapter; - - if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB) - spd = " 2.5 GT/s"; - else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB) - spd = " 5 GT/s"; - - if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M) - bufp += sprintf(bufp, "100/"); - if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G) - bufp += sprintf(bufp, "1000/"); - if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) - bufp += sprintf(bufp, "10G/"); - if (bufp != buf) - --bufp; - sprintf(bufp, "BASE-%s", base[pi->port_type]); - - netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n", - adap->params.vpd.id, adap->params.rev, buf, - is_offload(adap) ? "R" : "", adap->params.pci.width, spd, - (adap->flags & USING_MSIX) ? " MSI-X" : - (adap->flags & USING_MSI) ? " MSI" : ""); - netdev_info(dev, "S/N: %s, E/C: %s\n", - adap->params.vpd.sn, adap->params.vpd.ec); -} - -static void __devinit enable_pcie_relaxed_ordering(struct pci_dev *dev) -{ - u16 v; - int pos; - - pos = pci_pcie_cap(dev); - if (pos > 0) { - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &v); - v |= PCI_EXP_DEVCTL_RELAX_EN; - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, v); - } -} - -/* - * Free the following resources: - * - memory used for tables - * - MSI/MSI-X - * - net devices - * - resources FW is holding for us - */ -static void free_some_resources(struct adapter *adapter) -{ - unsigned int i; - - t4_free_mem(adapter->l2t); - t4_free_mem(adapter->tids.tid_tab); - disable_msi(adapter); - - for_each_port(adapter, i) - if (adapter->port[i]) { - kfree(adap2pinfo(adapter, i)->rss); - free_netdev(adapter->port[i]); - } - if (adapter->flags & FW_OK) - t4_fw_bye(adapter, adapter->fn); -} - -#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) -#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \ - NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) - -static int __devinit init_one(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - int func, i, err; - struct port_info *pi; - unsigned int highdma = 0; - struct adapter *adapter = NULL; - - printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); - - err = pci_request_regions(pdev, KBUILD_MODNAME); - if (err) { - /* Just info, some other driver may have claimed the device. */ - dev_info(&pdev->dev, "cannot obtain PCI resources\n"); - return err; - } - - /* We control everything through one PF */ - func = PCI_FUNC(pdev->devfn); - if (func != ent->driver_data) { - pci_save_state(pdev); /* to restore SR-IOV later */ - goto sriov; - } - - err = pci_enable_device(pdev); - if (err) { - dev_err(&pdev->dev, "cannot enable PCI device\n"); - goto out_release_regions; - } - - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { - highdma = NETIF_F_HIGHDMA; - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " - "coherent allocations\n"); - goto out_disable_device; - } - } else { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, "no usable DMA configuration\n"); - goto out_disable_device; - } - } - - pci_enable_pcie_error_reporting(pdev); - enable_pcie_relaxed_ordering(pdev); - pci_set_master(pdev); - pci_save_state(pdev); - - adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); - if (!adapter) { - err = -ENOMEM; - goto out_disable_device; - } - - adapter->regs = pci_ioremap_bar(pdev, 0); - if (!adapter->regs) { - dev_err(&pdev->dev, "cannot map device registers\n"); - err = -ENOMEM; - goto out_free_adapter; - } - - adapter->pdev = pdev; - adapter->pdev_dev = &pdev->dev; - adapter->fn = func; - adapter->msg_enable = dflt_msg_enable; - memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); - - spin_lock_init(&adapter->stats_lock); - spin_lock_init(&adapter->tid_release_lock); - - INIT_WORK(&adapter->tid_release_task, process_tid_release_list); - - err = t4_prep_adapter(adapter); - if (err) - goto out_unmap_bar; - err = adap_init0(adapter); - if (err) - goto out_unmap_bar; - - for_each_port(adapter, i) { - struct net_device *netdev; - - netdev = alloc_etherdev_mq(sizeof(struct port_info), - MAX_ETH_QSETS); - if (!netdev) { - err = -ENOMEM; - goto out_free_dev; - } - - SET_NETDEV_DEV(netdev, &pdev->dev); - - adapter->port[i] = netdev; - pi = netdev_priv(netdev); - pi->adapter = adapter; - pi->xact_addr_filt = -1; - pi->port_id = i; - netdev->irq = pdev->irq; - - netdev->hw_features = NETIF_F_SG | TSO_FLAGS | - NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_RXCSUM | NETIF_F_RXHASH | - NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; - netdev->features |= netdev->hw_features | highdma; - netdev->vlan_features = netdev->features & VLAN_FEAT; - - netdev->netdev_ops = &cxgb4_netdev_ops; - SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops); - } - - pci_set_drvdata(pdev, adapter); - - if (adapter->flags & FW_OK) { - err = t4_port_init(adapter, func, func, 0); - if (err) - goto out_free_dev; - } - - /* - * Configure queues and allocate tables now, they can be needed as - * soon as the first register_netdev completes. - */ - cfg_queues(adapter); - - adapter->l2t = t4_init_l2t(); - if (!adapter->l2t) { - /* We tolerate a lack of L2T, giving up some functionality */ - dev_warn(&pdev->dev, "could not allocate L2T, continuing\n"); - adapter->params.offload = 0; - } - - if (is_offload(adapter) && tid_init(&adapter->tids) < 0) { - dev_warn(&pdev->dev, "could not allocate TID table, " - "continuing\n"); - adapter->params.offload = 0; - } - - /* See what interrupts we'll be using */ - if (msi > 1 && enable_msix(adapter) == 0) - adapter->flags |= USING_MSIX; - else if (msi > 0 && pci_enable_msi(pdev) == 0) - adapter->flags |= USING_MSI; - - err = init_rss(adapter); - if (err) - goto out_free_dev; - - /* - * The card is now ready to go. If any errors occur during device - * registration we do not fail the whole card but rather proceed only - * with the ports we manage to register successfully. However we must - * register at least one net device. - */ - for_each_port(adapter, i) { - pi = adap2pinfo(adapter, i); - netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets); - netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets); - - err = register_netdev(adapter->port[i]); - if (err) - break; - adapter->chan_map[pi->tx_chan] = i; - print_port_info(adapter->port[i]); - } - if (i == 0) { - dev_err(&pdev->dev, "could not register any net devices\n"); - goto out_free_dev; - } - if (err) { - dev_warn(&pdev->dev, "only %d net devices registered\n", i); - err = 0; - } - - if (cxgb4_debugfs_root) { - adapter->debugfs_root = debugfs_create_dir(pci_name(pdev), - cxgb4_debugfs_root); - setup_debugfs(adapter); - } - - if (is_offload(adapter)) - attach_ulds(adapter); - -sriov: -#ifdef CONFIG_PCI_IOV - if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0) - if (pci_enable_sriov(pdev, num_vf[func]) == 0) - dev_info(&pdev->dev, - "instantiated %u virtual functions\n", - num_vf[func]); -#endif - return 0; - - out_free_dev: - free_some_resources(adapter); - out_unmap_bar: - iounmap(adapter->regs); - out_free_adapter: - kfree(adapter); - out_disable_device: - pci_disable_pcie_error_reporting(pdev); - pci_disable_device(pdev); - out_release_regions: - pci_release_regions(pdev); - pci_set_drvdata(pdev, NULL); - return err; -} - -static void __devexit remove_one(struct pci_dev *pdev) -{ - struct adapter *adapter = pci_get_drvdata(pdev); - - pci_disable_sriov(pdev); - - if (adapter) { - int i; - - if (is_offload(adapter)) - detach_ulds(adapter); - - for_each_port(adapter, i) - if (adapter->port[i]->reg_state == NETREG_REGISTERED) - unregister_netdev(adapter->port[i]); - - if (adapter->debugfs_root) - debugfs_remove_recursive(adapter->debugfs_root); - - if (adapter->flags & FULL_INIT_DONE) - cxgb_down(adapter); - - free_some_resources(adapter); - iounmap(adapter->regs); - kfree(adapter); - pci_disable_pcie_error_reporting(pdev); - pci_disable_device(pdev); - pci_release_regions(pdev); - pci_set_drvdata(pdev, NULL); - } else - pci_release_regions(pdev); -} - -static struct pci_driver cxgb4_driver = { - .name = KBUILD_MODNAME, - .id_table = cxgb4_pci_tbl, - .probe = init_one, - .remove = __devexit_p(remove_one), - .err_handler = &cxgb4_eeh, -}; - -static int __init cxgb4_init_module(void) -{ - int ret; - - /* Debugfs support is optional, just warn if this fails */ - cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); - if (!cxgb4_debugfs_root) - pr_warning("could not create debugfs entry, continuing\n"); - - ret = pci_register_driver(&cxgb4_driver); - if (ret < 0) - debugfs_remove(cxgb4_debugfs_root); - return ret; -} - -static void __exit cxgb4_cleanup_module(void) -{ - pci_unregister_driver(&cxgb4_driver); - debugfs_remove(cxgb4_debugfs_root); /* NULL ok */ -} - -module_init(cxgb4_init_module); -module_exit(cxgb4_cleanup_module); diff --git a/drivers/net/cxgb4/cxgb4_uld.h b/drivers/net/cxgb4/cxgb4_uld.h deleted file mode 100644 index b1d39b8..0000000 --- a/drivers/net/cxgb4/cxgb4_uld.h +++ /dev/null @@ -1,239 +0,0 @@ -/* - * This file is part of the Chelsio T4 Ethernet driver for Linux. - * - * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __CXGB4_OFLD_H -#define __CXGB4_OFLD_H - -#include -#include -#include -#include - -/* CPL message priority levels */ -enum { - CPL_PRIORITY_DATA = 0, /* data messages */ - CPL_PRIORITY_SETUP = 1, /* connection setup messages */ - CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */ - CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */ - CPL_PRIORITY_ACK = 1, /* RX ACK messages */ - CPL_PRIORITY_CONTROL = 1 /* control messages */ -}; - -#define INIT_TP_WR(w, tid) do { \ - (w)->wr.wr_hi = htonl(FW_WR_OP(FW_TP_WR) | \ - FW_WR_IMMDLEN(sizeof(*w) - sizeof(w->wr))); \ - (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \ - FW_WR_FLOWID(tid)); \ - (w)->wr.wr_lo = cpu_to_be64(0); \ -} while (0) - -#define INIT_TP_WR_CPL(w, cpl, tid) do { \ - INIT_TP_WR(w, tid); \ - OPCODE_TID(w) = htonl(MK_OPCODE_TID(cpl, tid)); \ -} while (0) - -#define INIT_ULPTX_WR(w, wrlen, atomic, tid) do { \ - (w)->wr.wr_hi = htonl(FW_WR_OP(FW_ULPTX_WR) | FW_WR_ATOMIC(atomic)); \ - (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(wrlen, 16)) | \ - FW_WR_FLOWID(tid)); \ - (w)->wr.wr_lo = cpu_to_be64(0); \ -} while (0) - -/* Special asynchronous notification message */ -#define CXGB4_MSG_AN ((void *)1) - -struct serv_entry { - void *data; -}; - -union aopen_entry { - void *data; - union aopen_entry *next; -}; - -/* - * Holds the size, base address, free list start, etc of the TID, server TID, - * and active-open TID tables. The tables themselves are allocated dynamically. - */ -struct tid_info { - void **tid_tab; - unsigned int ntids; - - struct serv_entry *stid_tab; - unsigned long *stid_bmap; - unsigned int nstids; - unsigned int stid_base; - - union aopen_entry *atid_tab; - unsigned int natids; - - unsigned int nftids; - unsigned int ftid_base; - - spinlock_t atid_lock ____cacheline_aligned_in_smp; - union aopen_entry *afree; - unsigned int atids_in_use; - - spinlock_t stid_lock; - unsigned int stids_in_use; - - atomic_t tids_in_use; -}; - -static inline void *lookup_tid(const struct tid_info *t, unsigned int tid) -{ - return tid < t->ntids ? t->tid_tab[tid] : NULL; -} - -static inline void *lookup_atid(const struct tid_info *t, unsigned int atid) -{ - return atid < t->natids ? t->atid_tab[atid].data : NULL; -} - -static inline void *lookup_stid(const struct tid_info *t, unsigned int stid) -{ - stid -= t->stid_base; - return stid < t->nstids ? t->stid_tab[stid].data : NULL; -} - -static inline void cxgb4_insert_tid(struct tid_info *t, void *data, - unsigned int tid) -{ - t->tid_tab[tid] = data; - atomic_inc(&t->tids_in_use); -} - -int cxgb4_alloc_atid(struct tid_info *t, void *data); -int cxgb4_alloc_stid(struct tid_info *t, int family, void *data); -void cxgb4_free_atid(struct tid_info *t, unsigned int atid); -void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family); -void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid); - -struct in6_addr; - -int cxgb4_create_server(const struct net_device *dev, unsigned int stid, - __be32 sip, __be16 sport, unsigned int queue); - -static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue) -{ - skb_set_queue_mapping(skb, (queue << 1) | prio); -} - -enum cxgb4_uld { - CXGB4_ULD_RDMA, - CXGB4_ULD_ISCSI, - CXGB4_ULD_MAX -}; - -enum cxgb4_state { - CXGB4_STATE_UP, - CXGB4_STATE_START_RECOVERY, - CXGB4_STATE_DOWN, - CXGB4_STATE_DETACH -}; - -struct pci_dev; -struct l2t_data; -struct net_device; -struct pkt_gl; -struct tp_tcp_stats; - -struct cxgb4_range { - unsigned int start; - unsigned int size; -}; - -struct cxgb4_virt_res { /* virtualized HW resources */ - struct cxgb4_range ddp; - struct cxgb4_range iscsi; - struct cxgb4_range stag; - struct cxgb4_range rq; - struct cxgb4_range pbl; - struct cxgb4_range qp; - struct cxgb4_range cq; - struct cxgb4_range ocq; -}; - -#define OCQ_WIN_OFFSET(pdev, vres) \ - (pci_resource_len((pdev), 2) - roundup_pow_of_two((vres)->ocq.size)) - -/* - * Block of information the LLD provides to ULDs attaching to a device. - */ -struct cxgb4_lld_info { - struct pci_dev *pdev; /* associated PCI device */ - struct l2t_data *l2t; /* L2 table */ - struct tid_info *tids; /* TID table */ - struct net_device **ports; /* device ports */ - const struct cxgb4_virt_res *vr; /* assorted HW resources */ - const unsigned short *mtus; /* MTU table */ - const unsigned short *rxq_ids; /* the ULD's Rx queue ids */ - unsigned short nrxq; /* # of Rx queues */ - unsigned short ntxq; /* # of Tx queues */ - unsigned char nchan:4; /* # of channels */ - unsigned char nports:4; /* # of ports */ - unsigned char wr_cred; /* WR 16-byte credits */ - unsigned char adapter_type; /* type of adapter */ - unsigned char fw_api_ver; /* FW API version */ - unsigned int fw_vers; /* FW version */ - unsigned int iscsi_iolen; /* iSCSI max I/O length */ - unsigned short udb_density; /* # of user DB/page */ - unsigned short ucq_density; /* # of user CQs/page */ - void __iomem *gts_reg; /* address of GTS register */ - void __iomem *db_reg; /* address of kernel doorbell */ -}; - -struct cxgb4_uld_info { - const char *name; - void *(*add)(const struct cxgb4_lld_info *p); - int (*rx_handler)(void *handle, const __be64 *rsp, - const struct pkt_gl *gl); - int (*state_change)(void *handle, enum cxgb4_state new_state); -}; - -int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); -int cxgb4_unregister_uld(enum cxgb4_uld type); -int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb); -unsigned int cxgb4_port_chan(const struct net_device *dev); -unsigned int cxgb4_port_viid(const struct net_device *dev); -unsigned int cxgb4_port_idx(const struct net_device *dev); -unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, - unsigned int *idx); -void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, - struct tp_tcp_stats *v6); -void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, - const unsigned int *pgsz_order); -struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, - unsigned int skb_len, unsigned int pull_len); -#endif /* !__CXGB4_OFLD_H */ diff --git a/drivers/net/cxgb4/l2t.c b/drivers/net/cxgb4/l2t.c deleted file mode 100644 index a2d323c..0000000 --- a/drivers/net/cxgb4/l2t.c +++ /dev/null @@ -1,597 +0,0 @@ -/* - * This file is part of the Chelsio T4 Ethernet driver for Linux. - * - * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include -#include -#include -#include -#include -#include -#include "cxgb4.h" -#include "l2t.h" -#include "t4_msg.h" -#include "t4fw_api.h" - -#define VLAN_NONE 0xfff - -/* identifies sync vs async L2T_WRITE_REQs */ -#define F_SYNC_WR (1 << 12) - -enum { - L2T_STATE_VALID, /* entry is up to date */ - L2T_STATE_STALE, /* entry may be used but needs revalidation */ - L2T_STATE_RESOLVING, /* entry needs address resolution */ - L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */ - - /* when state is one of the below the entry is not hashed */ - L2T_STATE_SWITCHING, /* entry is being used by a switching filter */ - L2T_STATE_UNUSED /* entry not in use */ -}; - -struct l2t_data { - rwlock_t lock; - atomic_t nfree; /* number of free entries */ - struct l2t_entry *rover; /* starting point for next allocation */ - struct l2t_entry l2tab[L2T_SIZE]; -}; - -static inline unsigned int vlan_prio(const struct l2t_entry *e) -{ - return e->vlan >> 13; -} - -static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) -{ - if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ - atomic_dec(&d->nfree); -} - -/* - * To avoid having to check address families we do not allow v4 and v6 - * neighbors to be on the same hash chain. We keep v4 entries in the first - * half of available hash buckets and v6 in the second. - */ -enum { - L2T_SZ_HALF = L2T_SIZE / 2, - L2T_HASH_MASK = L2T_SZ_HALF - 1 -}; - -static inline unsigned int arp_hash(const u32 *key, int ifindex) -{ - return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK; -} - -static inline unsigned int ipv6_hash(const u32 *key, int ifindex) -{ - u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3]; - - return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK); -} - -static unsigned int addr_hash(const u32 *addr, int addr_len, int ifindex) -{ - return addr_len == 4 ? arp_hash(addr, ifindex) : - ipv6_hash(addr, ifindex); -} - -/* - * Checks if an L2T entry is for the given IP/IPv6 address. It does not check - * whether the L2T entry and the address are of the same address family. - * Callers ensure an address is only checked against L2T entries of the same - * family, something made trivial by the separation of IP and IPv6 hash chains - * mentioned above. Returns 0 if there's a match, - */ -static int addreq(const struct l2t_entry *e, const u32 *addr) -{ - if (e->v6) - return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) | - (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]); - return e->addr[0] ^ addr[0]; -} - -static void neigh_replace(struct l2t_entry *e, struct neighbour *n) -{ - neigh_hold(n); - if (e->neigh) - neigh_release(e->neigh); - e->neigh = n; -} - -/* - * Write an L2T entry. Must be called with the entry locked. - * The write may be synchronous or asynchronous. - */ -static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync) -{ - struct sk_buff *skb; - struct cpl_l2t_write_req *req; - - skb = alloc_skb(sizeof(*req), GFP_ATOMIC); - if (!skb) - return -ENOMEM; - - req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req)); - INIT_TP_WR(req, 0); - - OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, - e->idx | (sync ? F_SYNC_WR : 0) | - TID_QID(adap->sge.fw_evtq.abs_id))); - req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync)); - req->l2t_idx = htons(e->idx); - req->vlan = htons(e->vlan); - if (e->neigh) - memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); - memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); - - set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); - t4_ofld_send(adap, skb); - - if (sync && e->state != L2T_STATE_SWITCHING) - e->state = L2T_STATE_SYNC_WRITE; - return 0; -} - -/* - * Send packets waiting in an L2T entry's ARP queue. Must be called with the - * entry locked. - */ -static void send_pending(struct adapter *adap, struct l2t_entry *e) -{ - while (e->arpq_head) { - struct sk_buff *skb = e->arpq_head; - - e->arpq_head = skb->next; - skb->next = NULL; - t4_ofld_send(adap, skb); - } - e->arpq_tail = NULL; -} - -/* - * Process a CPL_L2T_WRITE_RPL. Wake up the ARP queue if it completes a - * synchronous L2T_WRITE. Note that the TID in the reply is really the L2T - * index it refers to. - */ -void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl) -{ - unsigned int tid = GET_TID(rpl); - unsigned int idx = tid & (L2T_SIZE - 1); - - if (unlikely(rpl->status != CPL_ERR_NONE)) { - dev_err(adap->pdev_dev, - "Unexpected L2T_WRITE_RPL status %u for entry %u\n", - rpl->status, idx); - return; - } - - if (tid & F_SYNC_WR) { - struct l2t_entry *e = &adap->l2t->l2tab[idx]; - - spin_lock(&e->lock); - if (e->state != L2T_STATE_SWITCHING) { - send_pending(adap, e); - e->state = (e->neigh->nud_state & NUD_STALE) ? - L2T_STATE_STALE : L2T_STATE_VALID; - } - spin_unlock(&e->lock); - } -} - -/* - * Add a packet to an L2T entry's queue of packets awaiting resolution. - * Must be called with the entry's lock held. - */ -static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb) -{ - skb->next = NULL; - if (e->arpq_head) - e->arpq_tail->next = skb; - else - e->arpq_head = skb; - e->arpq_tail = skb; -} - -int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb, - struct l2t_entry *e) -{ - struct adapter *adap = netdev2adap(dev); - -again: - switch (e->state) { - case L2T_STATE_STALE: /* entry is stale, kick off revalidation */ - neigh_event_send(e->neigh, NULL); - spin_lock_bh(&e->lock); - if (e->state == L2T_STATE_STALE) - e->state = L2T_STATE_VALID; - spin_unlock_bh(&e->lock); - case L2T_STATE_VALID: /* fast-path, send the packet on */ - return t4_ofld_send(adap, skb); - case L2T_STATE_RESOLVING: - case L2T_STATE_SYNC_WRITE: - spin_lock_bh(&e->lock); - if (e->state != L2T_STATE_SYNC_WRITE && - e->state != L2T_STATE_RESOLVING) { - spin_unlock_bh(&e->lock); - goto again; - } - arpq_enqueue(e, skb); - spin_unlock_bh(&e->lock); - - if (e->state == L2T_STATE_RESOLVING && - !neigh_event_send(e->neigh, NULL)) { - spin_lock_bh(&e->lock); - if (e->state == L2T_STATE_RESOLVING && e->arpq_head) - write_l2e(adap, e, 1); - spin_unlock_bh(&e->lock); - } - } - return 0; -} -EXPORT_SYMBOL(cxgb4_l2t_send); - -/* - * Allocate a free L2T entry. Must be called with l2t_data.lock held. - */ -static struct l2t_entry *alloc_l2e(struct l2t_data *d) -{ - struct l2t_entry *end, *e, **p; - - if (!atomic_read(&d->nfree)) - return NULL; - - /* there's definitely a free entry */ - for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e) - if (atomic_read(&e->refcnt) == 0) - goto found; - - for (e = d->l2tab; atomic_read(&e->refcnt); ++e) - ; -found: - d->rover = e + 1; - atomic_dec(&d->nfree); - - /* - * The entry we found may be an inactive entry that is - * presently in the hash table. We need to remove it. - */ - if (e->state < L2T_STATE_SWITCHING) - for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) - if (*p == e) { - *p = e->next; - e->next = NULL; - break; - } - - e->state = L2T_STATE_UNUSED; - return e; -} - -/* - * Called when an L2T entry has no more users. - */ -static void t4_l2e_free(struct l2t_entry *e) -{ - struct l2t_data *d; - - spin_lock_bh(&e->lock); - if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ - if (e->neigh) { - neigh_release(e->neigh); - e->neigh = NULL; - } - while (e->arpq_head) { - struct sk_buff *skb = e->arpq_head; - - e->arpq_head = skb->next; - kfree_skb(skb); - } - e->arpq_tail = NULL; - } - spin_unlock_bh(&e->lock); - - d = container_of(e, struct l2t_data, l2tab[e->idx]); - atomic_inc(&d->nfree); -} - -void cxgb4_l2t_release(struct l2t_entry *e) -{ - if (atomic_dec_and_test(&e->refcnt)) - t4_l2e_free(e); -} -EXPORT_SYMBOL(cxgb4_l2t_release); - -/* - * Update an L2T entry that was previously used for the same next hop as neigh. - * Must be called with softirqs disabled. - */ -static void reuse_entry(struct l2t_entry *e, struct neighbour *neigh) -{ - unsigned int nud_state; - - spin_lock(&e->lock); /* avoid race with t4_l2t_free */ - if (neigh != e->neigh) - neigh_replace(e, neigh); - nud_state = neigh->nud_state; - if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) || - !(nud_state & NUD_VALID)) - e->state = L2T_STATE_RESOLVING; - else if (nud_state & NUD_CONNECTED) - e->state = L2T_STATE_VALID; - else - e->state = L2T_STATE_STALE; - spin_unlock(&e->lock); -} - -struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, - const struct net_device *physdev, - unsigned int priority) -{ - u8 lport; - u16 vlan; - struct l2t_entry *e; - int addr_len = neigh->tbl->key_len; - u32 *addr = (u32 *)neigh->primary_key; - int ifidx = neigh->dev->ifindex; - int hash = addr_hash(addr, addr_len, ifidx); - - if (neigh->dev->flags & IFF_LOOPBACK) - lport = netdev2pinfo(physdev)->tx_chan + 4; - else - lport = netdev2pinfo(physdev)->lport; - - if (neigh->dev->priv_flags & IFF_802_1Q_VLAN) - vlan = vlan_dev_vlan_id(neigh->dev); - else - vlan = VLAN_NONE; - - write_lock_bh(&d->lock); - for (e = d->l2tab[hash].first; e; e = e->next) - if (!addreq(e, addr) && e->ifindex == ifidx && - e->vlan == vlan && e->lport == lport) { - l2t_hold(d, e); - if (atomic_read(&e->refcnt) == 1) - reuse_entry(e, neigh); - goto done; - } - - /* Need to allocate a new entry */ - e = alloc_l2e(d); - if (e) { - spin_lock(&e->lock); /* avoid race with t4_l2t_free */ - e->state = L2T_STATE_RESOLVING; - memcpy(e->addr, addr, addr_len); - e->ifindex = ifidx; - e->hash = hash; - e->lport = lport; - e->v6 = addr_len == 16; - atomic_set(&e->refcnt, 1); - neigh_replace(e, neigh); - e->vlan = vlan; - e->next = d->l2tab[hash].first; - d->l2tab[hash].first = e; - spin_unlock(&e->lock); - } -done: - write_unlock_bh(&d->lock); - return e; -} -EXPORT_SYMBOL(cxgb4_l2t_get); - -/* - * Called when address resolution fails for an L2T entry to handle packets - * on the arpq head. If a packet specifies a failure handler it is invoked, - * otherwise the packet is sent to the device. - */ -static void handle_failed_resolution(struct adapter *adap, struct sk_buff *arpq) -{ - while (arpq) { - struct sk_buff *skb = arpq; - const struct l2t_skb_cb *cb = L2T_SKB_CB(skb); - - arpq = skb->next; - skb->next = NULL; - if (cb->arp_err_handler) - cb->arp_err_handler(cb->handle, skb); - else - t4_ofld_send(adap, skb); - } -} - -/* - * Called when the host's neighbor layer makes a change to some entry that is - * loaded into the HW L2 table. - */ -void t4_l2t_update(struct adapter *adap, struct neighbour *neigh) -{ - struct l2t_entry *e; - struct sk_buff *arpq = NULL; - struct l2t_data *d = adap->l2t; - int addr_len = neigh->tbl->key_len; - u32 *addr = (u32 *) neigh->primary_key; - int ifidx = neigh->dev->ifindex; - int hash = addr_hash(addr, addr_len, ifidx); - - read_lock_bh(&d->lock); - for (e = d->l2tab[hash].first; e; e = e->next) - if (!addreq(e, addr) && e->ifindex == ifidx) { - spin_lock(&e->lock); - if (atomic_read(&e->refcnt)) - goto found; - spin_unlock(&e->lock); - break; - } - read_unlock_bh(&d->lock); - return; - - found: - read_unlock(&d->lock); - - if (neigh != e->neigh) - neigh_replace(e, neigh); - - if (e->state == L2T_STATE_RESOLVING) { - if (neigh->nud_state & NUD_FAILED) { - arpq = e->arpq_head; - e->arpq_head = e->arpq_tail = NULL; - } else if ((neigh->nud_state & (NUD_CONNECTED | NUD_STALE)) && - e->arpq_head) { - write_l2e(adap, e, 1); - } - } else { - e->state = neigh->nud_state & NUD_CONNECTED ? - L2T_STATE_VALID : L2T_STATE_STALE; - if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac))) - write_l2e(adap, e, 0); - } - - spin_unlock_bh(&e->lock); - - if (arpq) - handle_failed_resolution(adap, arpq); -} - -struct l2t_data *t4_init_l2t(void) -{ - int i; - struct l2t_data *d; - - d = t4_alloc_mem(sizeof(*d)); - if (!d) - return NULL; - - d->rover = d->l2tab; - atomic_set(&d->nfree, L2T_SIZE); - rwlock_init(&d->lock); - - for (i = 0; i < L2T_SIZE; ++i) { - d->l2tab[i].idx = i; - d->l2tab[i].state = L2T_STATE_UNUSED; - spin_lock_init(&d->l2tab[i].lock); - atomic_set(&d->l2tab[i].refcnt, 0); - } - return d; -} - -#include -#include -#include - -static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos) -{ - struct l2t_entry *l2tab = seq->private; - - return pos >= L2T_SIZE ? NULL : &l2tab[pos]; -} - -static void *l2t_seq_start(struct seq_file *seq, loff_t *pos) -{ - return *pos ? l2t_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; -} - -static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos) -{ - v = l2t_get_idx(seq, *pos); - if (v) - ++*pos; - return v; -} - -static void l2t_seq_stop(struct seq_file *seq, void *v) -{ -} - -static char l2e_state(const struct l2t_entry *e) -{ - switch (e->state) { - case L2T_STATE_VALID: return 'V'; - case L2T_STATE_STALE: return 'S'; - case L2T_STATE_SYNC_WRITE: return 'W'; - case L2T_STATE_RESOLVING: return e->arpq_head ? 'A' : 'R'; - case L2T_STATE_SWITCHING: return 'X'; - default: - return 'U'; - } -} - -static int l2t_seq_show(struct seq_file *seq, void *v) -{ - if (v == SEQ_START_TOKEN) - seq_puts(seq, " Idx IP address " - "Ethernet address VLAN/P LP State Users Port\n"); - else { - char ip[60]; - struct l2t_entry *e = v; - - spin_lock_bh(&e->lock); - if (e->state == L2T_STATE_SWITCHING) - ip[0] = '\0'; - else - sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr); - seq_printf(seq, "%4u %-25s %17pM %4d %u %2u %c %5u %s\n", - e->idx, ip, e->dmac, - e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport, - l2e_state(e), atomic_read(&e->refcnt), - e->neigh ? e->neigh->dev->name : ""); - spin_unlock_bh(&e->lock); - } - return 0; -} - -static const struct seq_operations l2t_seq_ops = { - .start = l2t_seq_start, - .next = l2t_seq_next, - .stop = l2t_seq_stop, - .show = l2t_seq_show -}; - -static int l2t_seq_open(struct inode *inode, struct file *file) -{ - int rc = seq_open(file, &l2t_seq_ops); - - if (!rc) { - struct adapter *adap = inode->i_private; - struct seq_file *seq = file->private_data; - - seq->private = adap->l2t->l2tab; - } - return rc; -} - -const struct file_operations t4_l2t_fops = { - .owner = THIS_MODULE, - .open = l2t_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; diff --git a/drivers/net/cxgb4/l2t.h b/drivers/net/cxgb4/l2t.h deleted file mode 100644 index 02b31d0..0000000 --- a/drivers/net/cxgb4/l2t.h +++ /dev/null @@ -1,107 +0,0 @@ -/* - * This file is part of the Chelsio T4 Ethernet driver for Linux. - * - * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __CXGB4_L2T_H -#define __CXGB4_L2T_H - -#include -#include -#include - -struct adapter; -struct l2t_data; -struct neighbour; -struct net_device; -struct file_operations; -struct cpl_l2t_write_rpl; - -/* - * Each L2T entry plays multiple roles. First of all, it keeps state for the - * corresponding entry of the HW L2 table and maintains a queue of offload - * packets awaiting address resolution. Second, it is a node of a hash table - * chain, where the nodes of the chain are linked together through their next - * pointer. Finally, each node is a bucket of a hash table, pointing to the - * first element in its chain through its first pointer. - */ -struct l2t_entry { - u16 state; /* entry state */ - u16 idx; /* entry index */ - u32 addr[4]; /* next hop IP or IPv6 address */ - int ifindex; /* neighbor's net_device's ifindex */ - struct neighbour *neigh; /* associated neighbour */ - struct l2t_entry *first; /* start of hash chain */ - struct l2t_entry *next; /* next l2t_entry on chain */ - struct sk_buff *arpq_head; /* queue of packets awaiting resolution */ - struct sk_buff *arpq_tail; - spinlock_t lock; - atomic_t refcnt; /* entry reference count */ - u16 hash; /* hash bucket the entry is on */ - u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */ - u8 v6; /* whether entry is for IPv6 */ - u8 lport; /* associated offload logical interface */ - u8 dmac[ETH_ALEN]; /* neighbour's MAC address */ -}; - -typedef void (*arp_err_handler_t)(void *handle, struct sk_buff *skb); - -/* - * Callback stored in an skb to handle address resolution failure. - */ -struct l2t_skb_cb { - void *handle; - arp_err_handler_t arp_err_handler; -}; - -#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb) - -static inline void t4_set_arp_err_handler(struct sk_buff *skb, void *handle, - arp_err_handler_t handler) -{ - L2T_SKB_CB(skb)->handle = handle; - L2T_SKB_CB(skb)->arp_err_handler = handler; -} - -void cxgb4_l2t_release(struct l2t_entry *e); -int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb, - struct l2t_entry *e); -struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, - const struct net_device *physdev, - unsigned int priority); - -void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); -struct l2t_data *t4_init_l2t(void); -void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl); - -extern const struct file_operations t4_l2t_fops; -#endif /* __CXGB4_L2T_H */ diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c deleted file mode 100644 index 56adf44..0000000 --- a/drivers/net/cxgb4/sge.c +++ /dev/null @@ -1,2442 +0,0 @@ -/* - * This file is part of the Chelsio T4 Ethernet driver for Linux. - * - * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "cxgb4.h" -#include "t4_regs.h" -#include "t4_msg.h" -#include "t4fw_api.h" - -/* - * Rx buffer size. We use largish buffers if possible but settle for single - * pages under memory shortage. - */ -#if PAGE_SHIFT >= 16 -# define FL_PG_ORDER 0 -#else -# define FL_PG_ORDER (16 - PAGE_SHIFT) -#endif - -/* RX_PULL_LEN should be <= RX_COPY_THRES */ -#define RX_COPY_THRES 256 -#define RX_PULL_LEN 128 - -/* - * Main body length for sk_buffs used for Rx Ethernet packets with fragments. - * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room. - */ -#define RX_PKT_SKB_LEN 512 - -/* Ethernet header padding prepended to RX_PKTs */ -#define RX_PKT_PAD 2 - -/* - * Max number of Tx descriptors we clean up at a time. Should be modest as - * freeing skbs isn't cheap and it happens while holding locks. We just need - * to free packets faster than they arrive, we eventually catch up and keep - * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES. - */ -#define MAX_TX_RECLAIM 16 - -/* - * Max number of Rx buffers we replenish at a time. Again keep this modest, - * allocating buffers isn't cheap either. - */ -#define MAX_RX_REFILL 16U - -/* - * Period of the Rx queue check timer. This timer is infrequent as it has - * something to do only when the system experiences severe memory shortage. - */ -#define RX_QCHECK_PERIOD (HZ / 2) - -/* - * Period of the Tx queue check timer. - */ -#define TX_QCHECK_PERIOD (HZ / 2) - -/* - * Max number of Tx descriptors to be reclaimed by the Tx timer. - */ -#define MAX_TIMER_TX_RECLAIM 100 - -/* - * Timer index used when backing off due to memory shortage. - */ -#define NOMEM_TMR_IDX (SGE_NTIMERS - 1) - -/* - * An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will - * attempt to refill it. - */ -#define FL_STARVE_THRES 4 - -/* - * Suspend an Ethernet Tx queue with fewer available descriptors than this. - * This is the same as calc_tx_descs() for a TSO packet with - * nr_frags == MAX_SKB_FRAGS. - */ -#define ETHTXQ_STOP_THRES \ - (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8)) - -/* - * Suspension threshold for non-Ethernet Tx queues. We require enough room - * for a full sized WR. - */ -#define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc)) - -/* - * Max Tx descriptor space we allow for an Ethernet packet to be inlined - * into a WR. - */ -#define MAX_IMM_TX_PKT_LEN 128 - -/* - * Max size of a WR sent through a control Tx queue. - */ -#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN - -enum { - /* packet alignment in FL buffers */ - FL_ALIGN = L1_CACHE_BYTES < 32 ? 32 : L1_CACHE_BYTES, - /* egress status entry size */ - STAT_LEN = L1_CACHE_BYTES > 64 ? 128 : 64 -}; - -struct tx_sw_desc { /* SW state per Tx descriptor */ - struct sk_buff *skb; - struct ulptx_sgl *sgl; -}; - -struct rx_sw_desc { /* SW state per Rx descriptor */ - struct page *page; - dma_addr_t dma_addr; -}; - -/* - * The low bits of rx_sw_desc.dma_addr have special meaning. - */ -enum { - RX_LARGE_BUF = 1 << 0, /* buffer is larger than PAGE_SIZE */ - RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */ -}; - -static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d) -{ - return d->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF); -} - -static inline bool is_buf_mapped(const struct rx_sw_desc *d) -{ - return !(d->dma_addr & RX_UNMAPPED_BUF); -} - -/** - * txq_avail - return the number of available slots in a Tx queue - * @q: the Tx queue - * - * Returns the number of descriptors in a Tx queue available to write new - * packets. - */ -static inline unsigned int txq_avail(const struct sge_txq *q) -{ - return q->size - 1 - q->in_use; -} - -/** - * fl_cap - return the capacity of a free-buffer list - * @fl: the FL - * - * Returns the capacity of a free-buffer list. The capacity is less than - * the size because one descriptor needs to be left unpopulated, otherwise - * HW will think the FL is empty. - */ -static inline unsigned int fl_cap(const struct sge_fl *fl) -{ - return fl->size - 8; /* 1 descriptor = 8 buffers */ -} - -static inline bool fl_starving(const struct sge_fl *fl) -{ - return fl->avail - fl->pend_cred <= FL_STARVE_THRES; -} - -static int map_skb(struct device *dev, const struct sk_buff *skb, - dma_addr_t *addr) -{ - const skb_frag_t *fp, *end; - const struct skb_shared_info *si; - - *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); - if (dma_mapping_error(dev, *addr)) - goto out_err; - - si = skb_shinfo(skb); - end = &si->frags[si->nr_frags]; - - for (fp = si->frags; fp < end; fp++) { - *++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size, - DMA_TO_DEVICE); - if (dma_mapping_error(dev, *addr)) - goto unwind; - } - return 0; - -unwind: - while (fp-- > si->frags) - dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE); - - dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); -out_err: - return -ENOMEM; -} - -#ifdef CONFIG_NEED_DMA_MAP_STATE -static void unmap_skb(struct device *dev, const struct sk_buff *skb, - const dma_addr_t *addr) -{ - const skb_frag_t *fp, *end; - const struct skb_shared_info *si; - - dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE); - - si = skb_shinfo(skb); - end = &si->frags[si->nr_frags]; - for (fp = si->frags; fp < end; fp++) - dma_unmap_page(dev, *addr++, fp->size, DMA_TO_DEVICE); -} - -/** - * deferred_unmap_destructor - unmap a packet when it is freed - * @skb: the packet - * - * This is the packet destructor used for Tx packets that need to remain - * mapped until they are freed rather than until their Tx descriptors are - * freed. - */ -static void deferred_unmap_destructor(struct sk_buff *skb) -{ - unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head); -} -#endif - -static void unmap_sgl(struct device *dev, const struct sk_buff *skb, - const struct ulptx_sgl *sgl, const struct sge_txq *q) -{ - const struct ulptx_sge_pair *p; - unsigned int nfrags = skb_shinfo(skb)->nr_frags; - - if (likely(skb_headlen(skb))) - dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0), - DMA_TO_DEVICE); - else { - dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0), - DMA_TO_DEVICE); - nfrags--; - } - - /* - * the complexity below is because of the possibility of a wrap-around - * in the middle of an SGL - */ - for (p = sgl->sge; nfrags >= 2; nfrags -= 2) { - if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) { -unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]), - ntohl(p->len[0]), DMA_TO_DEVICE); - dma_unmap_page(dev, be64_to_cpu(p->addr[1]), - ntohl(p->len[1]), DMA_TO_DEVICE); - p++; - } else if ((u8 *)p == (u8 *)q->stat) { - p = (const struct ulptx_sge_pair *)q->desc; - goto unmap; - } else if ((u8 *)p + 8 == (u8 *)q->stat) { - const __be64 *addr = (const __be64 *)q->desc; - - dma_unmap_page(dev, be64_to_cpu(addr[0]), - ntohl(p->len[0]), DMA_TO_DEVICE); - dma_unmap_page(dev, be64_to_cpu(addr[1]), - ntohl(p->len[1]), DMA_TO_DEVICE); - p = (const struct ulptx_sge_pair *)&addr[2]; - } else { - const __be64 *addr = (const __be64 *)q->desc; - - dma_unmap_page(dev, be64_to_cpu(p->addr[0]), - ntohl(p->len[0]), DMA_TO_DEVICE); - dma_unmap_page(dev, be64_to_cpu(addr[0]), - ntohl(p->len[1]), DMA_TO_DEVICE); - p = (const struct ulptx_sge_pair *)&addr[1]; - } - } - if (nfrags) { - __be64 addr; - - if ((u8 *)p == (u8 *)q->stat) - p = (const struct ulptx_sge_pair *)q->desc; - addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] : - *(const __be64 *)q->desc; - dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]), - DMA_TO_DEVICE); - } -} - -/** - * free_tx_desc - reclaims Tx descriptors and their buffers - * @adapter: the adapter - * @q: the Tx queue to reclaim descriptors from - * @n: the number of descriptors to reclaim - * @unmap: whether the buffers should be unmapped for DMA - * - * Reclaims Tx descriptors from an SGE Tx queue and frees the associated - * Tx buffers. Called with the Tx queue lock held. - */ -static void free_tx_desc(struct adapter *adap, struct sge_txq *q, - unsigned int n, bool unmap) -{ - struct tx_sw_desc *d; - unsigned int cidx = q->cidx; - struct device *dev = adap->pdev_dev; - - d = &q->sdesc[cidx]; - while (n--) { - if (d->skb) { /* an SGL is present */ - if (unmap) - unmap_sgl(dev, d->skb, d->sgl, q); - kfree_skb(d->skb); - d->skb = NULL; - } - ++d; - if (++cidx == q->size) { - cidx = 0; - d = q->sdesc; - } - } - q->cidx = cidx; -} - -/* - * Return the number of reclaimable descriptors in a Tx queue. - */ -static inline int reclaimable(const struct sge_txq *q) -{ - int hw_cidx = ntohs(q->stat->cidx); - hw_cidx -= q->cidx; - return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; -} - -/** - * reclaim_completed_tx - reclaims completed Tx descriptors - * @adap: the adapter - * @q: the Tx queue to reclaim completed descriptors from - * @unmap: whether the buffers should be unmapped for DMA - * - * Reclaims Tx descriptors that the SGE has indicated it has processed, - * and frees the associated buffers if possible. Called with the Tx - * queue locked. - */ -static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, - bool unmap) -{ - int avail = reclaimable(q); - - if (avail) { - /* - * Limit the amount of clean up work we do at a time to keep - * the Tx lock hold time O(1). - */ - if (avail > MAX_TX_RECLAIM) - avail = MAX_TX_RECLAIM; - - free_tx_desc(adap, q, avail, unmap); - q->in_use -= avail; - } -} - -static inline int get_buf_size(const struct rx_sw_desc *d) -{ -#if FL_PG_ORDER > 0 - return (d->dma_addr & RX_LARGE_BUF) ? (PAGE_SIZE << FL_PG_ORDER) : - PAGE_SIZE; -#else - return PAGE_SIZE; -#endif -} - -/** - * free_rx_bufs - free the Rx buffers on an SGE free list - * @adap: the adapter - * @q: the SGE free list to free buffers from - * @n: how many buffers to free - * - * Release the next @n buffers on an SGE free-buffer Rx queue. The - * buffers must be made inaccessible to HW before calling this function. - */ -static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n) -{ - while (n--) { - struct rx_sw_desc *d = &q->sdesc[q->cidx]; - - if (is_buf_mapped(d)) - dma_unmap_page(adap->pdev_dev, get_buf_addr(d), - get_buf_size(d), PCI_DMA_FROMDEVICE); - put_page(d->page); - d->page = NULL; - if (++q->cidx == q->size) - q->cidx = 0; - q->avail--; - } -} - -/** - * unmap_rx_buf - unmap the current Rx buffer on an SGE free list - * @adap: the adapter - * @q: the SGE free list - * - * Unmap the current buffer on an SGE free-buffer Rx queue. The - * buffer must be made inaccessible to HW before calling this function. - * - * This is similar to @free_rx_bufs above but does not free the buffer. - * Do note that the FL still loses any further access to the buffer. - */ -static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q) -{ - struct rx_sw_desc *d = &q->sdesc[q->cidx]; - - if (is_buf_mapped(d)) - dma_unmap_page(adap->pdev_dev, get_buf_addr(d), - get_buf_size(d), PCI_DMA_FROMDEVICE); - d->page = NULL; - if (++q->cidx == q->size) - q->cidx = 0; - q->avail--; -} - -static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) -{ - if (q->pend_cred >= 8) { - wmb(); - t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO | - QID(q->cntxt_id) | PIDX(q->pend_cred / 8)); - q->pend_cred &= 7; - } -} - -static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg, - dma_addr_t mapping) -{ - sd->page = pg; - sd->dma_addr = mapping; /* includes size low bits */ -} - -/** - * refill_fl - refill an SGE Rx buffer ring - * @adap: the adapter - * @q: the ring to refill - * @n: the number of new buffers to allocate - * @gfp: the gfp flags for the allocations - * - * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, - * allocated with the supplied gfp flags. The caller must assure that - * @n does not exceed the queue's capacity. If afterwards the queue is - * found critically low mark it as starving in the bitmap of starving FLs. - * - * Returns the number of buffers allocated. - */ -static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, - gfp_t gfp) -{ - struct page *pg; - dma_addr_t mapping; - unsigned int cred = q->avail; - __be64 *d = &q->desc[q->pidx]; - struct rx_sw_desc *sd = &q->sdesc[q->pidx]; - - gfp |= __GFP_NOWARN; /* failures are expected */ - -#if FL_PG_ORDER > 0 - /* - * Prefer large buffers - */ - while (n) { - pg = alloc_pages(gfp | __GFP_COMP, FL_PG_ORDER); - if (unlikely(!pg)) { - q->large_alloc_failed++; - break; /* fall back to single pages */ - } - - mapping = dma_map_page(adap->pdev_dev, pg, 0, - PAGE_SIZE << FL_PG_ORDER, - PCI_DMA_FROMDEVICE); - if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { - __free_pages(pg, FL_PG_ORDER); - goto out; /* do not try small pages for this error */ - } - mapping |= RX_LARGE_BUF; - *d++ = cpu_to_be64(mapping); - - set_rx_sw_desc(sd, pg, mapping); - sd++; - - q->avail++; - if (++q->pidx == q->size) { - q->pidx = 0; - sd = q->sdesc; - d = q->desc; - } - n--; - } -#endif - - while (n--) { - pg = __netdev_alloc_page(adap->port[0], gfp); - if (unlikely(!pg)) { - q->alloc_failed++; - break; - } - - mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE, - PCI_DMA_FROMDEVICE); - if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { - netdev_free_page(adap->port[0], pg); - goto out; - } - *d++ = cpu_to_be64(mapping); - - set_rx_sw_desc(sd, pg, mapping); - sd++; - - q->avail++; - if (++q->pidx == q->size) { - q->pidx = 0; - sd = q->sdesc; - d = q->desc; - } - } - -out: cred = q->avail - cred; - q->pend_cred += cred; - ring_fl_db(adap, q); - - if (unlikely(fl_starving(q))) { - smp_wmb(); - set_bit(q->cntxt_id - adap->sge.egr_start, - adap->sge.starving_fl); - } - - return cred; -} - -static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) -{ - refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail), - GFP_ATOMIC); -} - -/** - * alloc_ring - allocate resources for an SGE descriptor ring - * @dev: the PCI device's core device - * @nelem: the number of descriptors - * @elem_size: the size of each descriptor - * @sw_size: the size of the SW state associated with each ring element - * @phys: the physical address of the allocated ring - * @metadata: address of the array holding the SW state for the ring - * @stat_size: extra space in HW ring for status information - * @node: preferred node for memory allocations - * - * Allocates resources for an SGE descriptor ring, such as Tx queues, - * free buffer lists, or response queues. Each SGE ring requires - * space for its HW descriptors plus, optionally, space for the SW state - * associated with each HW entry (the metadata). The function returns - * three values: the virtual address for the HW ring (the return value - * of the function), the bus address of the HW ring, and the address - * of the SW ring. - */ -static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size, - size_t sw_size, dma_addr_t *phys, void *metadata, - size_t stat_size, int node) -{ - size_t len = nelem * elem_size + stat_size; - void *s = NULL; - void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL); - - if (!p) - return NULL; - if (sw_size) { - s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node); - - if (!s) { - dma_free_coherent(dev, len, p, *phys); - return NULL; - } - } - if (metadata) - *(void **)metadata = s; - memset(p, 0, len); - return p; -} - -/** - * sgl_len - calculates the size of an SGL of the given capacity - * @n: the number of SGL entries - * - * Calculates the number of flits needed for a scatter/gather list that - * can hold the given number of entries. - */ -static inline unsigned int sgl_len(unsigned int n) -{ - n--; - return (3 * n) / 2 + (n & 1) + 2; -} - -/** - * flits_to_desc - returns the num of Tx descriptors for the given flits - * @n: the number of flits - * - * Returns the number of Tx descriptors needed for the supplied number - * of flits. - */ -static inline unsigned int flits_to_desc(unsigned int n) -{ - BUG_ON(n > SGE_MAX_WR_LEN / 8); - return DIV_ROUND_UP(n, 8); -} - -/** - * is_eth_imm - can an Ethernet packet be sent as immediate data? - * @skb: the packet - * - * Returns whether an Ethernet packet is small enough to fit as - * immediate data. - */ -static inline int is_eth_imm(const struct sk_buff *skb) -{ - return skb->len <= MAX_IMM_TX_PKT_LEN - sizeof(struct cpl_tx_pkt); -} - -/** - * calc_tx_flits - calculate the number of flits for a packet Tx WR - * @skb: the packet - * - * Returns the number of flits needed for a Tx WR for the given Ethernet - * packet, including the needed WR and CPL headers. - */ -static inline unsigned int calc_tx_flits(const struct sk_buff *skb) -{ - unsigned int flits; - - if (is_eth_imm(skb)) - return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 8); - - flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4; - if (skb_shinfo(skb)->gso_size) - flits += 2; - return flits; -} - -/** - * calc_tx_descs - calculate the number of Tx descriptors for a packet - * @skb: the packet - * - * Returns the number of Tx descriptors needed for the given Ethernet - * packet, including the needed WR and CPL headers. - */ -static inline unsigned int calc_tx_descs(const struct sk_buff *skb) -{ - return flits_to_desc(calc_tx_flits(skb)); -} - -/** - * write_sgl - populate a scatter/gather list for a packet - * @skb: the packet - * @q: the Tx queue we are writing into - * @sgl: starting location for writing the SGL - * @end: points right after the end of the SGL - * @start: start offset into skb main-body data to include in the SGL - * @addr: the list of bus addresses for the SGL elements - * - * Generates a gather list for the buffers that make up a packet. - * The caller must provide adequate space for the SGL that will be written. - * The SGL includes all of the packet's page fragments and the data in its - * main body except for the first @start bytes. @sgl must be 16-byte - * aligned and within a Tx descriptor with available space. @end points - * right after the end of the SGL but does not account for any potential - * wrap around, i.e., @end > @sgl. - */ -static void write_sgl(const struct sk_buff *skb, struct sge_txq *q, - struct ulptx_sgl *sgl, u64 *end, unsigned int start, - const dma_addr_t *addr) -{ - unsigned int i, len; - struct ulptx_sge_pair *to; - const struct skb_shared_info *si = skb_shinfo(skb); - unsigned int nfrags = si->nr_frags; - struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1]; - - len = skb_headlen(skb) - start; - if (likely(len)) { - sgl->len0 = htonl(len); - sgl->addr0 = cpu_to_be64(addr[0] + start); - nfrags++; - } else { - sgl->len0 = htonl(si->frags[0].size); - sgl->addr0 = cpu_to_be64(addr[1]); - } - - sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags)); - if (likely(--nfrags == 0)) - return; - /* - * Most of the complexity below deals with the possibility we hit the - * end of the queue in the middle of writing the SGL. For this case - * only we create the SGL in a temporary buffer and then copy it. - */ - to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; - - for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { - to->len[0] = cpu_to_be32(si->frags[i].size); - to->len[1] = cpu_to_be32(si->frags[++i].size); - to->addr[0] = cpu_to_be64(addr[i]); - to->addr[1] = cpu_to_be64(addr[++i]); - } - if (nfrags) { - to->len[0] = cpu_to_be32(si->frags[i].size); - to->len[1] = cpu_to_be32(0); - to->addr[0] = cpu_to_be64(addr[i + 1]); - } - if (unlikely((u8 *)end > (u8 *)q->stat)) { - unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; - - if (likely(part0)) - memcpy(sgl->sge, buf, part0); - part1 = (u8 *)end - (u8 *)q->stat; - memcpy(q->desc, (u8 *)buf + part0, part1); - end = (void *)q->desc + part1; - } - if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ - *(u64 *)end = 0; -} - -/** - * ring_tx_db - check and potentially ring a Tx queue's doorbell - * @adap: the adapter - * @q: the Tx queue - * @n: number of new descriptors to give to HW - * - * Ring the doorbel for a Tx queue. - */ -static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) -{ - wmb(); /* write descriptors before telling HW */ - t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), - QID(q->cntxt_id) | PIDX(n)); -} - -/** - * inline_tx_skb - inline a packet's data into Tx descriptors - * @skb: the packet - * @q: the Tx queue where the packet will be inlined - * @pos: starting position in the Tx queue where to inline the packet - * - * Inline a packet's contents directly into Tx descriptors, starting at - * the given position within the Tx DMA ring. - * Most of the complexity of this operation is dealing with wrap arounds - * in the middle of the packet we want to inline. - */ -static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q, - void *pos) -{ - u64 *p; - int left = (void *)q->stat - pos; - - if (likely(skb->len <= left)) { - if (likely(!skb->data_len)) - skb_copy_from_linear_data(skb, pos, skb->len); - else - skb_copy_bits(skb, 0, pos, skb->len); - pos += skb->len; - } else { - skb_copy_bits(skb, 0, pos, left); - skb_copy_bits(skb, left, q->desc, skb->len - left); - pos = (void *)q->desc + (skb->len - left); - } - - /* 0-pad to multiple of 16 */ - p = PTR_ALIGN(pos, 8); - if ((uintptr_t)p & 8) - *p = 0; -} - -/* - * Figure out what HW csum a packet wants and return the appropriate control - * bits. - */ -static u64 hwcsum(const struct sk_buff *skb) -{ - int csum_type; - const struct iphdr *iph = ip_hdr(skb); - - if (iph->version == 4) { - if (iph->protocol == IPPROTO_TCP) - csum_type = TX_CSUM_TCPIP; - else if (iph->protocol == IPPROTO_UDP) - csum_type = TX_CSUM_UDPIP; - else { -nocsum: /* - * unknown protocol, disable HW csum - * and hope a bad packet is detected - */ - return TXPKT_L4CSUM_DIS; - } - } else { - /* - * this doesn't work with extension headers - */ - const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph; - - if (ip6h->nexthdr == IPPROTO_TCP) - csum_type = TX_CSUM_TCPIP6; - else if (ip6h->nexthdr == IPPROTO_UDP) - csum_type = TX_CSUM_UDPIP6; - else - goto nocsum; - } - - if (likely(csum_type >= TX_CSUM_TCPIP)) - return TXPKT_CSUM_TYPE(csum_type) | - TXPKT_IPHDR_LEN(skb_network_header_len(skb)) | - TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN); - else { - int start = skb_transport_offset(skb); - - return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) | - TXPKT_CSUM_LOC(start + skb->csum_offset); - } -} - -static void eth_txq_stop(struct sge_eth_txq *q) -{ - netif_tx_stop_queue(q->txq); - q->q.stops++; -} - -static inline void txq_advance(struct sge_txq *q, unsigned int n) -{ - q->in_use += n; - q->pidx += n; - if (q->pidx >= q->size) - q->pidx -= q->size; -} - -/** - * t4_eth_xmit - add a packet to an Ethernet Tx queue - * @skb: the packet - * @dev: the egress net device - * - * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled. - */ -netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev) -{ - u32 wr_mid; - u64 cntrl, *end; - int qidx, credits; - unsigned int flits, ndesc; - struct adapter *adap; - struct sge_eth_txq *q; - const struct port_info *pi; - struct fw_eth_tx_pkt_wr *wr; - struct cpl_tx_pkt_core *cpl; - const struct skb_shared_info *ssi; - dma_addr_t addr[MAX_SKB_FRAGS + 1]; - - /* - * The chip min packet length is 10 octets but play safe and reject - * anything shorter than an Ethernet header. - */ - if (unlikely(skb->len < ETH_HLEN)) { -out_free: dev_kfree_skb(skb); - return NETDEV_TX_OK; - } - - pi = netdev_priv(dev); - adap = pi->adapter; - qidx = skb_get_queue_mapping(skb); - q = &adap->sge.ethtxq[qidx + pi->first_qset]; - - reclaim_completed_tx(adap, &q->q, true); - - flits = calc_tx_flits(skb); - ndesc = flits_to_desc(flits); - credits = txq_avail(&q->q) - ndesc; - - if (unlikely(credits < 0)) { - eth_txq_stop(q); - dev_err(adap->pdev_dev, - "%s: Tx ring %u full while queue awake!\n", - dev->name, qidx); - return NETDEV_TX_BUSY; - } - - if (!is_eth_imm(skb) && - unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) { - q->mapping_err++; - goto out_free; - } - - wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2)); - if (unlikely(credits < ETHTXQ_STOP_THRES)) { - eth_txq_stop(q); - wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ; - } - - wr = (void *)&q->q.desc[q->q.pidx]; - wr->equiq_to_len16 = htonl(wr_mid); - wr->r3 = cpu_to_be64(0); - end = (u64 *)wr + flits; - - ssi = skb_shinfo(skb); - if (ssi->gso_size) { - struct cpl_tx_pkt_lso *lso = (void *)wr; - bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; - int l3hdr_len = skb_network_header_len(skb); - int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; - - wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | - FW_WR_IMMDLEN(sizeof(*lso))); - lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) | - LSO_FIRST_SLICE | LSO_LAST_SLICE | - LSO_IPV6(v6) | - LSO_ETHHDR_LEN(eth_xtra_len / 4) | - LSO_IPHDR_LEN(l3hdr_len / 4) | - LSO_TCPHDR_LEN(tcp_hdr(skb)->doff)); - lso->c.ipid_ofst = htons(0); - lso->c.mss = htons(ssi->gso_size); - lso->c.seqno_offset = htonl(0); - lso->c.len = htonl(skb->len); - cpl = (void *)(lso + 1); - cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | - TXPKT_IPHDR_LEN(l3hdr_len) | - TXPKT_ETHHDR_LEN(eth_xtra_len); - q->tso++; - q->tx_cso += ssi->gso_segs; - } else { - int len; - - len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl); - wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | - FW_WR_IMMDLEN(len)); - cpl = (void *)(wr + 1); - if (skb->ip_summed == CHECKSUM_PARTIAL) { - cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS; - q->tx_cso++; - } else - cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS; - } - - if (vlan_tx_tag_present(skb)) { - q->vlan_ins++; - cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb)); - } - - cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) | - TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn)); - cpl->pack = htons(0); - cpl->len = htons(skb->len); - cpl->ctrl1 = cpu_to_be64(cntrl); - - if (is_eth_imm(skb)) { - inline_tx_skb(skb, &q->q, cpl + 1); - dev_kfree_skb(skb); - } else { - int last_desc; - - write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0, - addr); - skb_orphan(skb); - - last_desc = q->q.pidx + ndesc - 1; - if (last_desc >= q->q.size) - last_desc -= q->q.size; - q->q.sdesc[last_desc].skb = skb; - q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1); - } - - txq_advance(&q->q, ndesc); - - ring_tx_db(adap, &q->q, ndesc); - return NETDEV_TX_OK; -} - -/** - * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs - * @q: the SGE control Tx queue - * - * This is a variant of reclaim_completed_tx() that is used for Tx queues - * that send only immediate data (presently just the control queues) and - * thus do not have any sk_buffs to release. - */ -static inline void reclaim_completed_tx_imm(struct sge_txq *q) -{ - int hw_cidx = ntohs(q->stat->cidx); - int reclaim = hw_cidx - q->cidx; - - if (reclaim < 0) - reclaim += q->size; - - q->in_use -= reclaim; - q->cidx = hw_cidx; -} - -/** - * is_imm - check whether a packet can be sent as immediate data - * @skb: the packet - * - * Returns true if a packet can be sent as a WR with immediate data. - */ -static inline int is_imm(const struct sk_buff *skb) -{ - return skb->len <= MAX_CTRL_WR_LEN; -} - -/** - * ctrlq_check_stop - check if a control queue is full and should stop - * @q: the queue - * @wr: most recent WR written to the queue - * - * Check if a control queue has become full and should be stopped. - * We clean up control queue descriptors very lazily, only when we are out. - * If the queue is still full after reclaiming any completed descriptors - * we suspend it and have the last WR wake it up. - */ -static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr) -{ - reclaim_completed_tx_imm(&q->q); - if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { - wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ); - q->q.stops++; - q->full = 1; - } -} - -/** - * ctrl_xmit - send a packet through an SGE control Tx queue - * @q: the control queue - * @skb: the packet - * - * Send a packet through an SGE control Tx queue. Packets sent through - * a control queue must fit entirely as immediate data. - */ -static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb) -{ - unsigned int ndesc; - struct fw_wr_hdr *wr; - - if (unlikely(!is_imm(skb))) { - WARN_ON(1); - dev_kfree_skb(skb); - return NET_XMIT_DROP; - } - - ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc)); - spin_lock(&q->sendq.lock); - - if (unlikely(q->full)) { - skb->priority = ndesc; /* save for restart */ - __skb_queue_tail(&q->sendq, skb); - spin_unlock(&q->sendq.lock); - return NET_XMIT_CN; - } - - wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; - inline_tx_skb(skb, &q->q, wr); - - txq_advance(&q->q, ndesc); - if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) - ctrlq_check_stop(q, wr); - - ring_tx_db(q->adap, &q->q, ndesc); - spin_unlock(&q->sendq.lock); - - kfree_skb(skb); - return NET_XMIT_SUCCESS; -} - -/** - * restart_ctrlq - restart a suspended control queue - * @data: the control queue to restart - * - * Resumes transmission on a suspended Tx control queue. - */ -static void restart_ctrlq(unsigned long data) -{ - struct sk_buff *skb; - unsigned int written = 0; - struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data; - - spin_lock(&q->sendq.lock); - reclaim_completed_tx_imm(&q->q); - BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */ - - while ((skb = __skb_dequeue(&q->sendq)) != NULL) { - struct fw_wr_hdr *wr; - unsigned int ndesc = skb->priority; /* previously saved */ - - /* - * Write descriptors and free skbs outside the lock to limit - * wait times. q->full is still set so new skbs will be queued. - */ - spin_unlock(&q->sendq.lock); - - wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; - inline_tx_skb(skb, &q->q, wr); - kfree_skb(skb); - - written += ndesc; - txq_advance(&q->q, ndesc); - if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { - unsigned long old = q->q.stops; - - ctrlq_check_stop(q, wr); - if (q->q.stops != old) { /* suspended anew */ - spin_lock(&q->sendq.lock); - goto ringdb; - } - } - if (written > 16) { - ring_tx_db(q->adap, &q->q, written); - written = 0; - } - spin_lock(&q->sendq.lock); - } - q->full = 0; -ringdb: if (written) - ring_tx_db(q->adap, &q->q, written); - spin_unlock(&q->sendq.lock); -} - -/** - * t4_mgmt_tx - send a management message - * @adap: the adapter - * @skb: the packet containing the management message - * - * Send a management message through control queue 0. - */ -int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb) -{ - int ret; - - local_bh_disable(); - ret = ctrl_xmit(&adap->sge.ctrlq[0], skb); - local_bh_enable(); - return ret; -} - -/** - * is_ofld_imm - check whether a packet can be sent as immediate data - * @skb: the packet - * - * Returns true if a packet can be sent as an offload WR with immediate - * data. We currently use the same limit as for Ethernet packets. - */ -static inline int is_ofld_imm(const struct sk_buff *skb) -{ - return skb->len <= MAX_IMM_TX_PKT_LEN; -} - -/** - * calc_tx_flits_ofld - calculate # of flits for an offload packet - * @skb: the packet - * - * Returns the number of flits needed for the given offload packet. - * These packets are already fully constructed and no additional headers - * will be added. - */ -static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) -{ - unsigned int flits, cnt; - - if (is_ofld_imm(skb)) - return DIV_ROUND_UP(skb->len, 8); - - flits = skb_transport_offset(skb) / 8U; /* headers */ - cnt = skb_shinfo(skb)->nr_frags; - if (skb->tail != skb->transport_header) - cnt++; - return flits + sgl_len(cnt); -} - -/** - * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion - * @adap: the adapter - * @q: the queue to stop - * - * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting - * inability to map packets. A periodic timer attempts to restart - * queues so marked. - */ -static void txq_stop_maperr(struct sge_ofld_txq *q) -{ - q->mapping_err++; - q->q.stops++; - set_bit(q->q.cntxt_id - q->adap->sge.egr_start, - q->adap->sge.txq_maperr); -} - -/** - * ofldtxq_stop - stop an offload Tx queue that has become full - * @q: the queue to stop - * @skb: the packet causing the queue to become full - * - * Stops an offload Tx queue that has become full and modifies the packet - * being written to request a wakeup. - */ -static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb) -{ - struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data; - - wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ); - q->q.stops++; - q->full = 1; -} - -/** - * service_ofldq - restart a suspended offload queue - * @q: the offload queue - * - * Services an offload Tx queue by moving packets from its packet queue - * to the HW Tx ring. The function starts and ends with the queue locked. - */ -static void service_ofldq(struct sge_ofld_txq *q) -{ - u64 *pos; - int credits; - struct sk_buff *skb; - unsigned int written = 0; - unsigned int flits, ndesc; - - while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) { - /* - * We drop the lock but leave skb on sendq, thus retaining - * exclusive access to the state of the queue. - */ - spin_unlock(&q->sendq.lock); - - reclaim_completed_tx(q->adap, &q->q, false); - - flits = skb->priority; /* previously saved */ - ndesc = flits_to_desc(flits); - credits = txq_avail(&q->q) - ndesc; - BUG_ON(credits < 0); - if (unlikely(credits < TXQ_STOP_THRES)) - ofldtxq_stop(q, skb); - - pos = (u64 *)&q->q.desc[q->q.pidx]; - if (is_ofld_imm(skb)) - inline_tx_skb(skb, &q->q, pos); - else if (map_skb(q->adap->pdev_dev, skb, - (dma_addr_t *)skb->head)) { - txq_stop_maperr(q); - spin_lock(&q->sendq.lock); - break; - } else { - int last_desc, hdr_len = skb_transport_offset(skb); - - memcpy(pos, skb->data, hdr_len); - write_sgl(skb, &q->q, (void *)pos + hdr_len, - pos + flits, hdr_len, - (dma_addr_t *)skb->head); -#ifdef CONFIG_NEED_DMA_MAP_STATE - skb->dev = q->adap->port[0]; - skb->destructor = deferred_unmap_destructor; -#endif - last_desc = q->q.pidx + ndesc - 1; - if (last_desc >= q->q.size) - last_desc -= q->q.size; - q->q.sdesc[last_desc].skb = skb; - } - - txq_advance(&q->q, ndesc); - written += ndesc; - if (unlikely(written > 32)) { - ring_tx_db(q->adap, &q->q, written); - written = 0; - } - - spin_lock(&q->sendq.lock); - __skb_unlink(skb, &q->sendq); - if (is_ofld_imm(skb)) - kfree_skb(skb); - } - if (likely(written)) - ring_tx_db(q->adap, &q->q, written); -} - -/** - * ofld_xmit - send a packet through an offload queue - * @q: the Tx offload queue - * @skb: the packet - * - * Send an offload packet through an SGE offload queue. - */ -static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb) -{ - skb->priority = calc_tx_flits_ofld(skb); /* save for restart */ - spin_lock(&q->sendq.lock); - __skb_queue_tail(&q->sendq, skb); - if (q->sendq.qlen == 1) - service_ofldq(q); - spin_unlock(&q->sendq.lock); - return NET_XMIT_SUCCESS; -} - -/** - * restart_ofldq - restart a suspended offload queue - * @data: the offload queue to restart - * - * Resumes transmission on a suspended Tx offload queue. - */ -static void restart_ofldq(unsigned long data) -{ - struct sge_ofld_txq *q = (struct sge_ofld_txq *)data; - - spin_lock(&q->sendq.lock); - q->full = 0; /* the queue actually is completely empty now */ - service_ofldq(q); - spin_unlock(&q->sendq.lock); -} - -/** - * skb_txq - return the Tx queue an offload packet should use - * @skb: the packet - * - * Returns the Tx queue an offload packet should use as indicated by bits - * 1-15 in the packet's queue_mapping. - */ -static inline unsigned int skb_txq(const struct sk_buff *skb) -{ - return skb->queue_mapping >> 1; -} - -/** - * is_ctrl_pkt - return whether an offload packet is a control packet - * @skb: the packet - * - * Returns whether an offload packet should use an OFLD or a CTRL - * Tx queue as indicated by bit 0 in the packet's queue_mapping. - */ -static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb) -{ - return skb->queue_mapping & 1; -} - -static inline int ofld_send(struct adapter *adap, struct sk_buff *skb) -{ - unsigned int idx = skb_txq(skb); - - if (unlikely(is_ctrl_pkt(skb))) - return ctrl_xmit(&adap->sge.ctrlq[idx], skb); - return ofld_xmit(&adap->sge.ofldtxq[idx], skb); -} - -/** - * t4_ofld_send - send an offload packet - * @adap: the adapter - * @skb: the packet - * - * Sends an offload packet. We use the packet queue_mapping to select the - * appropriate Tx queue as follows: bit 0 indicates whether the packet - * should be sent as regular or control, bits 1-15 select the queue. - */ -int t4_ofld_send(struct adapter *adap, struct sk_buff *skb) -{ - int ret; - - local_bh_disable(); - ret = ofld_send(adap, skb); - local_bh_enable(); - return ret; -} - -/** - * cxgb4_ofld_send - send an offload packet - * @dev: the net device - * @skb: the packet - * - * Sends an offload packet. This is an exported version of @t4_ofld_send, - * intended for ULDs. - */ -int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb) -{ - return t4_ofld_send(netdev2adap(dev), skb); -} -EXPORT_SYMBOL(cxgb4_ofld_send); - -static inline void copy_frags(struct skb_shared_info *ssi, - const struct pkt_gl *gl, unsigned int offset) -{ - unsigned int n; - - /* usually there's just one frag */ - ssi->frags[0].page = gl->frags[0].page; - ssi->frags[0].page_offset = gl->frags[0].page_offset + offset; - ssi->frags[0].size = gl->frags[0].size - offset; - ssi->nr_frags = gl->nfrags; - n = gl->nfrags - 1; - if (n) - memcpy(&ssi->frags[1], &gl->frags[1], n * sizeof(skb_frag_t)); - - /* get a reference to the last page, we don't own it */ - get_page(gl->frags[n].page); -} - -/** - * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list - * @gl: the gather list - * @skb_len: size of sk_buff main body if it carries fragments - * @pull_len: amount of data to move to the sk_buff's main body - * - * Builds an sk_buff from the given packet gather list. Returns the - * sk_buff or %NULL if sk_buff allocation failed. - */ -struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, - unsigned int skb_len, unsigned int pull_len) -{ - struct sk_buff *skb; - - /* - * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer - * size, which is expected since buffers are at least PAGE_SIZEd. - * In this case packets up to RX_COPY_THRES have only one fragment. - */ - if (gl->tot_len <= RX_COPY_THRES) { - skb = dev_alloc_skb(gl->tot_len); - if (unlikely(!skb)) - goto out; - __skb_put(skb, gl->tot_len); - skb_copy_to_linear_data(skb, gl->va, gl->tot_len); - } else { - skb = dev_alloc_skb(skb_len); - if (unlikely(!skb)) - goto out; - __skb_put(skb, pull_len); - skb_copy_to_linear_data(skb, gl->va, pull_len); - - copy_frags(skb_shinfo(skb), gl, pull_len); - skb->len = gl->tot_len; - skb->data_len = skb->len - pull_len; - skb->truesize += skb->data_len; - } -out: return skb; -} -EXPORT_SYMBOL(cxgb4_pktgl_to_skb); - -/** - * t4_pktgl_free - free a packet gather list - * @gl: the gather list - * - * Releases the pages of a packet gather list. We do not own the last - * page on the list and do not free it. - */ -static void t4_pktgl_free(const struct pkt_gl *gl) -{ - int n; - const skb_frag_t *p; - - for (p = gl->frags, n = gl->nfrags - 1; n--; p++) - put_page(p->page); -} - -/* - * Process an MPS trace packet. Give it an unused protocol number so it won't - * be delivered to anyone and send it to the stack for capture. - */ -static noinline int handle_trace_pkt(struct adapter *adap, - const struct pkt_gl *gl) -{ - struct sk_buff *skb; - struct cpl_trace_pkt *p; - - skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); - if (unlikely(!skb)) { - t4_pktgl_free(gl); - return 0; - } - - p = (struct cpl_trace_pkt *)skb->data; - __skb_pull(skb, sizeof(*p)); - skb_reset_mac_header(skb); - skb->protocol = htons(0xffff); - skb->dev = adap->port[0]; - netif_receive_skb(skb); - return 0; -} - -static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, - const struct cpl_rx_pkt *pkt) -{ - int ret; - struct sk_buff *skb; - - skb = napi_get_frags(&rxq->rspq.napi); - if (unlikely(!skb)) { - t4_pktgl_free(gl); - rxq->stats.rx_drops++; - return; - } - - copy_frags(skb_shinfo(skb), gl, RX_PKT_PAD); - skb->len = gl->tot_len - RX_PKT_PAD; - skb->data_len = skb->len; - skb->truesize += skb->data_len; - skb->ip_summed = CHECKSUM_UNNECESSARY; - skb_record_rx_queue(skb, rxq->rspq.idx); - if (rxq->rspq.netdev->features & NETIF_F_RXHASH) - skb->rxhash = (__force u32)pkt->rsshdr.hash_val; - - if (unlikely(pkt->vlan_ex)) { - __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan)); - rxq->stats.vlan_ex++; - } - ret = napi_gro_frags(&rxq->rspq.napi); - if (ret == GRO_HELD) - rxq->stats.lro_pkts++; - else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE) - rxq->stats.lro_merged++; - rxq->stats.pkts++; - rxq->stats.rx_cso++; -} - -/** - * t4_ethrx_handler - process an ingress ethernet packet - * @q: the response queue that received the packet - * @rsp: the response queue descriptor holding the RX_PKT message - * @si: the gather list of packet fragments - * - * Process an ingress ethernet packet and deliver it to the stack. - */ -int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, - const struct pkt_gl *si) -{ - bool csum_ok; - struct sk_buff *skb; - const struct cpl_rx_pkt *pkt; - struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); - - if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT)) - return handle_trace_pkt(q->adap, si); - - pkt = (const struct cpl_rx_pkt *)rsp; - csum_ok = pkt->csum_calc && !pkt->err_vec; - if ((pkt->l2info & htonl(RXF_TCP)) && - (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { - do_gro(rxq, si, pkt); - return 0; - } - - skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN); - if (unlikely(!skb)) { - t4_pktgl_free(si); - rxq->stats.rx_drops++; - return 0; - } - - __skb_pull(skb, RX_PKT_PAD); /* remove ethernet header padding */ - skb->protocol = eth_type_trans(skb, q->netdev); - skb_record_rx_queue(skb, q->idx); - if (skb->dev->features & NETIF_F_RXHASH) - skb->rxhash = (__force u32)pkt->rsshdr.hash_val; - - rxq->stats.pkts++; - - if (csum_ok && (q->netdev->features & NETIF_F_RXCSUM) && - (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) { - if (!pkt->ip_frag) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - rxq->stats.rx_cso++; - } else if (pkt->l2info & htonl(RXF_IP)) { - __sum16 c = (__force __sum16)pkt->csum; - skb->csum = csum_unfold(c); - skb->ip_summed = CHECKSUM_COMPLETE; - rxq->stats.rx_cso++; - } - } else - skb_checksum_none_assert(skb); - - if (unlikely(pkt->vlan_ex)) { - __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan)); - rxq->stats.vlan_ex++; - } - netif_receive_skb(skb); - return 0; -} - -/** - * restore_rx_bufs - put back a packet's Rx buffers - * @si: the packet gather list - * @q: the SGE free list - * @frags: number of FL buffers to restore - * - * Puts back on an FL the Rx buffers associated with @si. The buffers - * have already been unmapped and are left unmapped, we mark them so to - * prevent further unmapping attempts. - * - * This function undoes a series of @unmap_rx_buf calls when we find out - * that the current packet can't be processed right away afterall and we - * need to come back to it later. This is a very rare event and there's - * no effort to make this particularly efficient. - */ -static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q, - int frags) -{ - struct rx_sw_desc *d; - - while (frags--) { - if (q->cidx == 0) - q->cidx = q->size - 1; - else - q->cidx--; - d = &q->sdesc[q->cidx]; - d->page = si->frags[frags].page; - d->dma_addr |= RX_UNMAPPED_BUF; - q->avail++; - } -} - -/** - * is_new_response - check if a response is newly written - * @r: the response descriptor - * @q: the response queue - * - * Returns true if a response descriptor contains a yet unprocessed - * response. - */ -static inline bool is_new_response(const struct rsp_ctrl *r, - const struct sge_rspq *q) -{ - return RSPD_GEN(r->type_gen) == q->gen; -} - -/** - * rspq_next - advance to the next entry in a response queue - * @q: the queue - * - * Updates the state of a response queue to advance it to the next entry. - */ -static inline void rspq_next(struct sge_rspq *q) -{ - q->cur_desc = (void *)q->cur_desc + q->iqe_len; - if (unlikely(++q->cidx == q->size)) { - q->cidx = 0; - q->gen ^= 1; - q->cur_desc = q->desc; - } -} - -/** - * process_responses - process responses from an SGE response queue - * @q: the ingress queue to process - * @budget: how many responses can be processed in this round - * - * Process responses from an SGE response queue up to the supplied budget. - * Responses include received packets as well as control messages from FW - * or HW. - * - * Additionally choose the interrupt holdoff time for the next interrupt - * on this queue. If the system is under memory shortage use a fairly - * long delay to help recovery. - */ -static int process_responses(struct sge_rspq *q, int budget) -{ - int ret, rsp_type; - int budget_left = budget; - const struct rsp_ctrl *rc; - struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); - - while (likely(budget_left)) { - rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); - if (!is_new_response(rc, q)) - break; - - rmb(); - rsp_type = RSPD_TYPE(rc->type_gen); - if (likely(rsp_type == RSP_TYPE_FLBUF)) { - skb_frag_t *fp; - struct pkt_gl si; - const struct rx_sw_desc *rsd; - u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags; - - if (len & RSPD_NEWBUF) { - if (likely(q->offset > 0)) { - free_rx_bufs(q->adap, &rxq->fl, 1); - q->offset = 0; - } - len = RSPD_LEN(len); - } - si.tot_len = len; - - /* gather packet fragments */ - for (frags = 0, fp = si.frags; ; frags++, fp++) { - rsd = &rxq->fl.sdesc[rxq->fl.cidx]; - bufsz = get_buf_size(rsd); - fp->page = rsd->page; - fp->page_offset = q->offset; - fp->size = min(bufsz, len); - len -= fp->size; - if (!len) - break; - unmap_rx_buf(q->adap, &rxq->fl); - } - - /* - * Last buffer remains mapped so explicitly make it - * coherent for CPU access. - */ - dma_sync_single_for_cpu(q->adap->pdev_dev, - get_buf_addr(rsd), - fp->size, DMA_FROM_DEVICE); - - si.va = page_address(si.frags[0].page) + - si.frags[0].page_offset; - prefetch(si.va); - - si.nfrags = frags + 1; - ret = q->handler(q, q->cur_desc, &si); - if (likely(ret == 0)) - q->offset += ALIGN(fp->size, FL_ALIGN); - else - restore_rx_bufs(&si, &rxq->fl, frags); - } else if (likely(rsp_type == RSP_TYPE_CPL)) { - ret = q->handler(q, q->cur_desc, NULL); - } else { - ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN); - } - - if (unlikely(ret)) { - /* couldn't process descriptor, back off for recovery */ - q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX); - break; - } - - rspq_next(q); - budget_left--; - } - - if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16) - __refill_fl(q->adap, &rxq->fl); - return budget - budget_left; -} - -/** - * napi_rx_handler - the NAPI handler for Rx processing - * @napi: the napi instance - * @budget: how many packets we can process in this round - * - * Handler for new data events when using NAPI. This does not need any - * locking or protection from interrupts as data interrupts are off at - * this point and other adapter interrupts do not interfere (the latter - * in not a concern at all with MSI-X as non-data interrupts then have - * a separate handler). - */ -static int napi_rx_handler(struct napi_struct *napi, int budget) -{ - unsigned int params; - struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); - int work_done = process_responses(q, budget); - - if (likely(work_done < budget)) { - napi_complete(napi); - params = q->next_intr_params; - q->next_intr_params = q->intr_params; - } else - params = QINTR_TIMER_IDX(7); - - t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), CIDXINC(work_done) | - INGRESSQID((u32)q->cntxt_id) | SEINTARM(params)); - return work_done; -} - -/* - * The MSI-X interrupt handler for an SGE response queue. - */ -irqreturn_t t4_sge_intr_msix(int irq, void *cookie) -{ - struct sge_rspq *q = cookie; - - napi_schedule(&q->napi); - return IRQ_HANDLED; -} - -/* - * Process the indirect interrupt entries in the interrupt queue and kick off - * NAPI for each queue that has generated an entry. - */ -static unsigned int process_intrq(struct adapter *adap) -{ - unsigned int credits; - const struct rsp_ctrl *rc; - struct sge_rspq *q = &adap->sge.intrq; - - spin_lock(&adap->sge.intrq_lock); - for (credits = 0; ; credits++) { - rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); - if (!is_new_response(rc, q)) - break; - - rmb(); - if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) { - unsigned int qid = ntohl(rc->pldbuflen_qid); - - qid -= adap->sge.ingr_start; - napi_schedule(&adap->sge.ingr_map[qid]->napi); - } - - rspq_next(q); - } - - t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), CIDXINC(credits) | - INGRESSQID(q->cntxt_id) | SEINTARM(q->intr_params)); - spin_unlock(&adap->sge.intrq_lock); - return credits; -} - -/* - * The MSI interrupt handler, which handles data events from SGE response queues - * as well as error and other async events as they all use the same MSI vector. - */ -static irqreturn_t t4_intr_msi(int irq, void *cookie) -{ - struct adapter *adap = cookie; - - t4_slow_intr_handler(adap); - process_intrq(adap); - return IRQ_HANDLED; -} - -/* - * Interrupt handler for legacy INTx interrupts. - * Handles data events from SGE response queues as well as error and other - * async events as they all use the same interrupt line. - */ -static irqreturn_t t4_intr_intx(int irq, void *cookie) -{ - struct adapter *adap = cookie; - - t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0); - if (t4_slow_intr_handler(adap) | process_intrq(adap)) - return IRQ_HANDLED; - return IRQ_NONE; /* probably shared interrupt */ -} - -/** - * t4_intr_handler - select the top-level interrupt handler - * @adap: the adapter - * - * Selects the top-level interrupt handler based on the type of interrupts - * (MSI-X, MSI, or INTx). - */ -irq_handler_t t4_intr_handler(struct adapter *adap) -{ - if (adap->flags & USING_MSIX) - return t4_sge_intr_msix; - if (adap->flags & USING_MSI) - return t4_intr_msi; - return t4_intr_intx; -} - -static void sge_rx_timer_cb(unsigned long data) -{ - unsigned long m; - unsigned int i, cnt[2]; - struct adapter *adap = (struct adapter *)data; - struct sge *s = &adap->sge; - - for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) - for (m = s->starving_fl[i]; m; m &= m - 1) { - struct sge_eth_rxq *rxq; - unsigned int id = __ffs(m) + i * BITS_PER_LONG; - struct sge_fl *fl = s->egr_map[id]; - - clear_bit(id, s->starving_fl); - smp_mb__after_clear_bit(); - - if (fl_starving(fl)) { - rxq = container_of(fl, struct sge_eth_rxq, fl); - if (napi_reschedule(&rxq->rspq.napi)) - fl->starving++; - else - set_bit(id, s->starving_fl); - } - } - - t4_write_reg(adap, SGE_DEBUG_INDEX, 13); - cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH); - cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW); - - for (i = 0; i < 2; i++) - if (cnt[i] >= s->starve_thres) { - if (s->idma_state[i] || cnt[i] == 0xffffffff) - continue; - s->idma_state[i] = 1; - t4_write_reg(adap, SGE_DEBUG_INDEX, 11); - m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16); - dev_warn(adap->pdev_dev, - "SGE idma%u starvation detected for " - "queue %lu\n", i, m & 0xffff); - } else if (s->idma_state[i]) - s->idma_state[i] = 0; - - mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); -} - -static void sge_tx_timer_cb(unsigned long data) -{ - unsigned long m; - unsigned int i, budget; - struct adapter *adap = (struct adapter *)data; - struct sge *s = &adap->sge; - - for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++) - for (m = s->txq_maperr[i]; m; m &= m - 1) { - unsigned long id = __ffs(m) + i * BITS_PER_LONG; - struct sge_ofld_txq *txq = s->egr_map[id]; - - clear_bit(id, s->txq_maperr); - tasklet_schedule(&txq->qresume_tsk); - } - - budget = MAX_TIMER_TX_RECLAIM; - i = s->ethtxq_rover; - do { - struct sge_eth_txq *q = &s->ethtxq[i]; - - if (q->q.in_use && - time_after_eq(jiffies, q->txq->trans_start + HZ / 100) && - __netif_tx_trylock(q->txq)) { - int avail = reclaimable(&q->q); - - if (avail) { - if (avail > budget) - avail = budget; - - free_tx_desc(adap, &q->q, avail, true); - q->q.in_use -= avail; - budget -= avail; - } - __netif_tx_unlock(q->txq); - } - - if (++i >= s->ethqsets) - i = 0; - } while (budget && i != s->ethtxq_rover); - s->ethtxq_rover = i; - mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2)); -} - -int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, - struct net_device *dev, int intr_idx, - struct sge_fl *fl, rspq_handler_t hnd) -{ - int ret, flsz = 0; - struct fw_iq_cmd c; - struct port_info *pi = netdev_priv(dev); - - /* Size needs to be multiple of 16, including status entry. */ - iq->size = roundup(iq->size, 16); - - iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0, - &iq->phys_addr, NULL, 0, NUMA_NO_NODE); - if (!iq->desc) - return -ENOMEM; - - memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_CMD_EXEC | - FW_IQ_CMD_PFN(adap->fn) | FW_IQ_CMD_VFN(0)); - c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) | - FW_LEN16(c)); - c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | - FW_IQ_CMD_IQASYNCH(fwevtq) | FW_IQ_CMD_VIID(pi->viid) | - FW_IQ_CMD_IQANDST(intr_idx < 0) | FW_IQ_CMD_IQANUD(1) | - FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx : - -intr_idx - 1)); - c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH(pi->tx_chan) | - FW_IQ_CMD_IQGTSMODE | - FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) | - FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4)); - c.iqsize = htons(iq->size); - c.iqaddr = cpu_to_be64(iq->phys_addr); - - if (fl) { - fl->size = roundup(fl->size, 8); - fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64), - sizeof(struct rx_sw_desc), &fl->addr, - &fl->sdesc, STAT_LEN, NUMA_NO_NODE); - if (!fl->desc) - goto fl_nomem; - - flsz = fl->size / 8 + STAT_LEN / sizeof(struct tx_desc); - c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN | - FW_IQ_CMD_FL0FETCHRO(1) | - FW_IQ_CMD_FL0DATARO(1) | - FW_IQ_CMD_FL0PADEN); - c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) | - FW_IQ_CMD_FL0FBMAX(3)); - c.fl0size = htons(flsz); - c.fl0addr = cpu_to_be64(fl->addr); - } - - ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); - if (ret) - goto err; - - netif_napi_add(dev, &iq->napi, napi_rx_handler, 64); - iq->cur_desc = iq->desc; - iq->cidx = 0; - iq->gen = 1; - iq->next_intr_params = iq->intr_params; - iq->cntxt_id = ntohs(c.iqid); - iq->abs_id = ntohs(c.physiqid); - iq->size--; /* subtract status entry */ - iq->adap = adap; - iq->netdev = dev; - iq->handler = hnd; - - /* set offset to -1 to distinguish ingress queues without FL */ - iq->offset = fl ? 0 : -1; - - adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq; - - if (fl) { - fl->cntxt_id = ntohs(c.fl0id); - fl->avail = fl->pend_cred = 0; - fl->pidx = fl->cidx = 0; - fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; - adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl; - refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL); - } - return 0; - -fl_nomem: - ret = -ENOMEM; -err: - if (iq->desc) { - dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len, - iq->desc, iq->phys_addr); - iq->desc = NULL; - } - if (fl && fl->desc) { - kfree(fl->sdesc); - fl->sdesc = NULL; - dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc), - fl->desc, fl->addr); - fl->desc = NULL; - } - return ret; -} - -static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) -{ - q->in_use = 0; - q->cidx = q->pidx = 0; - q->stops = q->restarts = 0; - q->stat = (void *)&q->desc[q->size]; - q->cntxt_id = id; - adap->sge.egr_map[id - adap->sge.egr_start] = q; -} - -int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, - struct net_device *dev, struct netdev_queue *netdevq, - unsigned int iqid) -{ - int ret, nentries; - struct fw_eq_eth_cmd c; - struct port_info *pi = netdev_priv(dev); - - /* Add status entries */ - nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); - - txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, - sizeof(struct tx_desc), sizeof(struct tx_sw_desc), - &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN, - netdev_queue_numa_node_read(netdevq)); - if (!txq->q.desc) - return -ENOMEM; - - memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_CMD_EXEC | - FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0)); - c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC | - FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); - c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid)); - c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) | - FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | - FW_EQ_ETH_CMD_FETCHRO(1) | - FW_EQ_ETH_CMD_IQID(iqid)); - c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) | - FW_EQ_ETH_CMD_FBMAX(3) | - FW_EQ_ETH_CMD_CIDXFTHRESH(5) | - FW_EQ_ETH_CMD_EQSIZE(nentries)); - c.eqaddr = cpu_to_be64(txq->q.phys_addr); - - ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); - if (ret) { - kfree(txq->q.sdesc); - txq->q.sdesc = NULL; - dma_free_coherent(adap->pdev_dev, - nentries * sizeof(struct tx_desc), - txq->q.desc, txq->q.phys_addr); - txq->q.desc = NULL; - return ret; - } - - init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_GET(ntohl(c.eqid_pkd))); - txq->txq = netdevq; - txq->tso = txq->tx_cso = txq->vlan_ins = 0; - txq->mapping_err = 0; - return 0; -} - -int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, - struct net_device *dev, unsigned int iqid, - unsigned int cmplqid) -{ - int ret, nentries; - struct fw_eq_ctrl_cmd c; - struct port_info *pi = netdev_priv(dev); - - /* Add status entries */ - nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); - - txq->q.desc = alloc_ring(adap->pdev_dev, nentries, - sizeof(struct tx_desc), 0, &txq->q.phys_addr, - NULL, 0, NUMA_NO_NODE); - if (!txq->q.desc) - return -ENOMEM; - - c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_CMD_EXEC | - FW_EQ_CTRL_CMD_PFN(adap->fn) | - FW_EQ_CTRL_CMD_VFN(0)); - c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC | - FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c)); - c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid)); - c.physeqid_pkd = htonl(0); - c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) | - FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) | - FW_EQ_CTRL_CMD_FETCHRO | - FW_EQ_CTRL_CMD_IQID(iqid)); - c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) | - FW_EQ_CTRL_CMD_FBMAX(3) | - FW_EQ_CTRL_CMD_CIDXFTHRESH(5) | - FW_EQ_CTRL_CMD_EQSIZE(nentries)); - c.eqaddr = cpu_to_be64(txq->q.phys_addr); - - ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); - if (ret) { - dma_free_coherent(adap->pdev_dev, - nentries * sizeof(struct tx_desc), - txq->q.desc, txq->q.phys_addr); - txq->q.desc = NULL; - return ret; - } - - init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_GET(ntohl(c.cmpliqid_eqid))); - txq->adap = adap; - skb_queue_head_init(&txq->sendq); - tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq); - txq->full = 0; - return 0; -} - -int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, - struct net_device *dev, unsigned int iqid) -{ - int ret, nentries; - struct fw_eq_ofld_cmd c; - struct port_info *pi = netdev_priv(dev); - - /* Add status entries */ - nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); - - txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, - sizeof(struct tx_desc), sizeof(struct tx_sw_desc), - &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN, - NUMA_NO_NODE); - if (!txq->q.desc) - return -ENOMEM; - - memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_CMD_EXEC | - FW_EQ_OFLD_CMD_PFN(adap->fn) | - FW_EQ_OFLD_CMD_VFN(0)); - c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC | - FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c)); - c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) | - FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) | - FW_EQ_OFLD_CMD_FETCHRO(1) | - FW_EQ_OFLD_CMD_IQID(iqid)); - c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) | - FW_EQ_OFLD_CMD_FBMAX(3) | - FW_EQ_OFLD_CMD_CIDXFTHRESH(5) | - FW_EQ_OFLD_CMD_EQSIZE(nentries)); - c.eqaddr = cpu_to_be64(txq->q.phys_addr); - - ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); - if (ret) { - kfree(txq->q.sdesc); - txq->q.sdesc = NULL; - dma_free_coherent(adap->pdev_dev, - nentries * sizeof(struct tx_desc), - txq->q.desc, txq->q.phys_addr); - txq->q.desc = NULL; - return ret; - } - - init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_GET(ntohl(c.eqid_pkd))); - txq->adap = adap; - skb_queue_head_init(&txq->sendq); - tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq); - txq->full = 0; - txq->mapping_err = 0; - return 0; -} - -static void free_txq(struct adapter *adap, struct sge_txq *q) -{ - dma_free_coherent(adap->pdev_dev, - q->size * sizeof(struct tx_desc) + STAT_LEN, - q->desc, q->phys_addr); - q->cntxt_id = 0; - q->sdesc = NULL; - q->desc = NULL; -} - -static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, - struct sge_fl *fl) -{ - unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; - - adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL; - t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP, - rq->cntxt_id, fl_id, 0xffff); - dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, - rq->desc, rq->phys_addr); - netif_napi_del(&rq->napi); - rq->netdev = NULL; - rq->cntxt_id = rq->abs_id = 0; - rq->desc = NULL; - - if (fl) { - free_rx_bufs(adap, fl, fl->avail); - dma_free_coherent(adap->pdev_dev, fl->size * 8 + STAT_LEN, - fl->desc, fl->addr); - kfree(fl->sdesc); - fl->sdesc = NULL; - fl->cntxt_id = 0; - fl->desc = NULL; - } -} - -/** - * t4_free_sge_resources - free SGE resources - * @adap: the adapter - * - * Frees resources used by the SGE queue sets. - */ -void t4_free_sge_resources(struct adapter *adap) -{ - int i; - struct sge_eth_rxq *eq = adap->sge.ethrxq; - struct sge_eth_txq *etq = adap->sge.ethtxq; - struct sge_ofld_rxq *oq = adap->sge.ofldrxq; - - /* clean up Ethernet Tx/Rx queues */ - for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) { - if (eq->rspq.desc) - free_rspq_fl(adap, &eq->rspq, &eq->fl); - if (etq->q.desc) { - t4_eth_eq_free(adap, adap->fn, adap->fn, 0, - etq->q.cntxt_id); - free_tx_desc(adap, &etq->q, etq->q.in_use, true); - kfree(etq->q.sdesc); - free_txq(adap, &etq->q); - } - } - - /* clean up RDMA and iSCSI Rx queues */ - for (i = 0; i < adap->sge.ofldqsets; i++, oq++) { - if (oq->rspq.desc) - free_rspq_fl(adap, &oq->rspq, &oq->fl); - } - for (i = 0, oq = adap->sge.rdmarxq; i < adap->sge.rdmaqs; i++, oq++) { - if (oq->rspq.desc) - free_rspq_fl(adap, &oq->rspq, &oq->fl); - } - - /* clean up offload Tx queues */ - for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) { - struct sge_ofld_txq *q = &adap->sge.ofldtxq[i]; - - if (q->q.desc) { - tasklet_kill(&q->qresume_tsk); - t4_ofld_eq_free(adap, adap->fn, adap->fn, 0, - q->q.cntxt_id); - free_tx_desc(adap, &q->q, q->q.in_use, false); - kfree(q->q.sdesc); - __skb_queue_purge(&q->sendq); - free_txq(adap, &q->q); - } - } - - /* clean up control Tx queues */ - for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) { - struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; - - if (cq->q.desc) { - tasklet_kill(&cq->qresume_tsk); - t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0, - cq->q.cntxt_id); - __skb_queue_purge(&cq->sendq); - free_txq(adap, &cq->q); - } - } - - if (adap->sge.fw_evtq.desc) - free_rspq_fl(adap, &adap->sge.fw_evtq, NULL); - - if (adap->sge.intrq.desc) - free_rspq_fl(adap, &adap->sge.intrq, NULL); - - /* clear the reverse egress queue map */ - memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map)); -} - -void t4_sge_start(struct adapter *adap) -{ - adap->sge.ethtxq_rover = 0; - mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD); - mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD); -} - -/** - * t4_sge_stop - disable SGE operation - * @adap: the adapter - * - * Stop tasklets and timers associated with the DMA engine. Note that - * this is effective only if measures have been taken to disable any HW - * events that may restart them. - */ -void t4_sge_stop(struct adapter *adap) -{ - int i; - struct sge *s = &adap->sge; - - if (in_interrupt()) /* actions below require waiting */ - return; - - if (s->rx_timer.function) - del_timer_sync(&s->rx_timer); - if (s->tx_timer.function) - del_timer_sync(&s->tx_timer); - - for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) { - struct sge_ofld_txq *q = &s->ofldtxq[i]; - - if (q->q.desc) - tasklet_kill(&q->qresume_tsk); - } - for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) { - struct sge_ctrl_txq *cq = &s->ctrlq[i]; - - if (cq->q.desc) - tasklet_kill(&cq->qresume_tsk); - } -} - -/** - * t4_sge_init - initialize SGE - * @adap: the adapter - * - * Performs SGE initialization needed every time after a chip reset. - * We do not initialize any of the queues here, instead the driver - * top-level must request them individually. - */ -void t4_sge_init(struct adapter *adap) -{ - unsigned int i, v; - struct sge *s = &adap->sge; - unsigned int fl_align_log = ilog2(FL_ALIGN); - - t4_set_reg_field(adap, SGE_CONTROL, PKTSHIFT_MASK | - INGPADBOUNDARY_MASK | EGRSTATUSPAGESIZE, - INGPADBOUNDARY(fl_align_log - 5) | PKTSHIFT(2) | - RXPKTCPLMODE | - (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0)); - - for (i = v = 0; i < 32; i += 4) - v |= (PAGE_SHIFT - 10) << i; - t4_write_reg(adap, SGE_HOST_PAGE_SIZE, v); - t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, PAGE_SIZE); -#if FL_PG_ORDER > 0 - t4_write_reg(adap, SGE_FL_BUFFER_SIZE1, PAGE_SIZE << FL_PG_ORDER); -#endif - t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD, - THRESHOLD_0(s->counter_val[0]) | - THRESHOLD_1(s->counter_val[1]) | - THRESHOLD_2(s->counter_val[2]) | - THRESHOLD_3(s->counter_val[3])); - t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1, - TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) | - TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1]))); - t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3, - TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[2])) | - TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[3]))); - t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5, - TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[4])) | - TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[5]))); - setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap); - setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap); - s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */ - s->idma_state[0] = s->idma_state[1] = 0; - spin_lock_init(&s->intrq_lock); -} diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c deleted file mode 100644 index d1ec111..0000000 --- a/drivers/net/cxgb4/t4_hw.c +++ /dev/null @@ -1,2856 +0,0 @@ -/* - * This file is part of the Chelsio T4 Ethernet driver for Linux. - * - * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include -#include -#include "cxgb4.h" -#include "t4_regs.h" -#include "t4fw_api.h" - -/** - * t4_wait_op_done_val - wait until an operation is completed - * @adapter: the adapter performing the operation - * @reg: the register to check for completion - * @mask: a single-bit field within @reg that indicates completion - * @polarity: the value of the field when the operation is completed - * @attempts: number of check iterations - * @delay: delay in usecs between iterations - * @valp: where to store the value of the register at completion time - * - * Wait until an operation is completed by checking a bit in a register - * up to @attempts times. If @valp is not NULL the value of the register - * at the time it indicated completion is stored there. Returns 0 if the - * operation completes and -EAGAIN otherwise. - */ -static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, - int polarity, int attempts, int delay, u32 *valp) -{ - while (1) { - u32 val = t4_read_reg(adapter, reg); - - if (!!(val & mask) == polarity) { - if (valp) - *valp = val; - return 0; - } - if (--attempts == 0) - return -EAGAIN; - if (delay) - udelay(delay); - } -} - -static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, - int polarity, int attempts, int delay) -{ - return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, - delay, NULL); -} - -/** - * t4_set_reg_field - set a register field to a value - * @adapter: the adapter to program - * @addr: the register address - * @mask: specifies the portion of the register to modify - * @val: the new value for the register field - * - * Sets a register field specified by the supplied mask to the - * given value. - */ -void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, - u32 val) -{ - u32 v = t4_read_reg(adapter, addr) & ~mask; - - t4_write_reg(adapter, addr, v | val); - (void) t4_read_reg(adapter, addr); /* flush */ -} - -/** - * t4_read_indirect - read indirectly addressed registers - * @adap: the adapter - * @addr_reg: register holding the indirect address - * @data_reg: register holding the value of the indirect register - * @vals: where the read register values are stored - * @nregs: how many indirect registers to read - * @start_idx: index of first indirect register to read - * - * Reads registers that are accessed indirectly through an address/data - * register pair. - */ -static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, - unsigned int data_reg, u32 *vals, - unsigned int nregs, unsigned int start_idx) -{ - while (nregs--) { - t4_write_reg(adap, addr_reg, start_idx); - *vals++ = t4_read_reg(adap, data_reg); - start_idx++; - } -} - -/* - * Get the reply to a mailbox command and store it in @rpl in big-endian order. - */ -static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, - u32 mbox_addr) -{ - for ( ; nflit; nflit--, mbox_addr += 8) - *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); -} - -/* - * Handle a FW assertion reported in a mailbox. - */ -static void fw_asrt(struct adapter *adap, u32 mbox_addr) -{ - struct fw_debug_cmd asrt; - - get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr); - dev_alert(adap->pdev_dev, - "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", - asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line), - ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y)); -} - -static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg) -{ - dev_err(adap->pdev_dev, - "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox, - (unsigned long long)t4_read_reg64(adap, data_reg), - (unsigned long long)t4_read_reg64(adap, data_reg + 8), - (unsigned long long)t4_read_reg64(adap, data_reg + 16), - (unsigned long long)t4_read_reg64(adap, data_reg + 24), - (unsigned long long)t4_read_reg64(adap, data_reg + 32), - (unsigned long long)t4_read_reg64(adap, data_reg + 40), - (unsigned long long)t4_read_reg64(adap, data_reg + 48), - (unsigned long long)t4_read_reg64(adap, data_reg + 56)); -} - -/** - * t4_wr_mbox_meat - send a command to FW through the given mailbox - * @adap: the adapter - * @mbox: index of the mailbox to use - * @cmd: the command to write - * @size: command length in bytes - * @rpl: where to optionally store the reply - * @sleep_ok: if true we may sleep while awaiting command completion - * - * Sends the given command to FW through the selected mailbox and waits - * for the FW to execute the command. If @rpl is not %NULL it is used to - * store the FW's reply to the command. The command and its optional - * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms - * to respond. @sleep_ok determines whether we may sleep while awaiting - * the response. If sleeping is allowed we use progressive backoff - * otherwise we spin. - * - * The return value is 0 on success or a negative errno on failure. A - * failure can happen either because we are not able to execute the - * command or FW executes it but signals an error. In the latter case - * the return value is the error code indicated by FW (negated). - */ -int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, - void *rpl, bool sleep_ok) -{ - static const int delay[] = { - 1, 1, 3, 5, 10, 10, 20, 50, 100, 200 - }; - - u32 v; - u64 res; - int i, ms, delay_idx; - const __be64 *p = cmd; - u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA); - u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL); - - if ((size & 15) || size > MBOX_LEN) - return -EINVAL; - - /* - * If the device is off-line, as in EEH, commands will time out. - * Fail them early so we don't waste time waiting. - */ - if (adap->pdev->error_state != pci_channel_io_normal) - return -EIO; - - v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); - for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) - v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); - - if (v != MBOX_OWNER_DRV) - return v ? -EBUSY : -ETIMEDOUT; - - for (i = 0; i < size; i += 8) - t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++)); - - t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); - t4_read_reg(adap, ctl_reg); /* flush write */ - - delay_idx = 0; - ms = delay[0]; - - for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { - if (sleep_ok) { - ms = delay[delay_idx]; /* last element may repeat */ - if (delay_idx < ARRAY_SIZE(delay) - 1) - delay_idx++; - msleep(ms); - } else - mdelay(ms); - - v = t4_read_reg(adap, ctl_reg); - if (MBOWNER_GET(v) == MBOX_OWNER_DRV) { - if (!(v & MBMSGVALID)) { - t4_write_reg(adap, ctl_reg, 0); - continue; - } - - res = t4_read_reg64(adap, data_reg); - if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) { - fw_asrt(adap, data_reg); - res = FW_CMD_RETVAL(EIO); - } else if (rpl) - get_mbox_rpl(adap, rpl, size / 8, data_reg); - - if (FW_CMD_RETVAL_GET((int)res)) - dump_mbox(adap, mbox, data_reg); - t4_write_reg(adap, ctl_reg, 0); - return -FW_CMD_RETVAL_GET((int)res); - } - } - - dump_mbox(adap, mbox, data_reg); - dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", - *(const u8 *)cmd, mbox); - return -ETIMEDOUT; -} - -/** - * t4_mc_read - read from MC through backdoor accesses - * @adap: the adapter - * @addr: address of first byte requested - * @data: 64 bytes of data containing the requested address - * @ecc: where to store the corresponding 64-bit ECC word - * - * Read 64 bytes of data from MC starting at a 64-byte-aligned address - * that covers the requested address @addr. If @parity is not %NULL it - * is assigned the 64-bit ECC word for the read data. - */ -int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc) -{ - int i; - - if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST) - return -EBUSY; - t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU); - t4_write_reg(adap, MC_BIST_CMD_LEN, 64); - t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc); - t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST | - BIST_CMD_GAP(1)); - i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1); - if (i) - return i; - -#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i) - - for (i = 15; i >= 0; i--) - *data++ = htonl(t4_read_reg(adap, MC_DATA(i))); - if (ecc) - *ecc = t4_read_reg64(adap, MC_DATA(16)); -#undef MC_DATA - return 0; -} - -/** - * t4_edc_read - read from EDC through backdoor accesses - * @adap: the adapter - * @idx: which EDC to access - * @addr: address of first byte requested - * @data: 64 bytes of data containing the requested address - * @ecc: where to store the corresponding 64-bit ECC word - * - * Read 64 bytes of data from EDC starting at a 64-byte-aligned address - * that covers the requested address @addr. If @parity is not %NULL it - * is assigned the 64-bit ECC word for the read data. - */ -int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) -{ - int i; - - idx *= EDC_STRIDE; - if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST) - return -EBUSY; - t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU); - t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64); - t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc); - t4_write_reg(adap, EDC_BIST_CMD + idx, - BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST); - i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1); - if (i) - return i; - -#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx) - - for (i = 15; i >= 0; i--) - *data++ = htonl(t4_read_reg(adap, EDC_DATA(i))); - if (ecc) - *ecc = t4_read_reg64(adap, EDC_DATA(16)); -#undef EDC_DATA - return 0; -} - -#define EEPROM_STAT_ADDR 0x7bfc -#define VPD_BASE 0 -#define VPD_LEN 512 - -/** - * t4_seeprom_wp - enable/disable EEPROM write protection - * @adapter: the adapter - * @enable: whether to enable or disable write protection - * - * Enables or disables write protection on the serial EEPROM. - */ -int t4_seeprom_wp(struct adapter *adapter, bool enable) -{ - unsigned int v = enable ? 0xc : 0; - int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v); - return ret < 0 ? ret : 0; -} - -/** - * get_vpd_params - read VPD parameters from VPD EEPROM - * @adapter: adapter to read - * @p: where to store the parameters - * - * Reads card parameters stored in VPD EEPROM. - */ -static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) -{ - int i, ret; - int ec, sn; - u8 vpd[VPD_LEN], csum; - unsigned int vpdr_len, kw_offset, id_len; - - ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd); - if (ret < 0) - return ret; - - if (vpd[0] != PCI_VPD_LRDT_ID_STRING) { - dev_err(adapter->pdev_dev, "missing VPD ID string\n"); - return -EINVAL; - } - - id_len = pci_vpd_lrdt_size(vpd); - if (id_len > ID_LEN) - id_len = ID_LEN; - - i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA); - if (i < 0) { - dev_err(adapter->pdev_dev, "missing VPD-R section\n"); - return -EINVAL; - } - - vpdr_len = pci_vpd_lrdt_size(&vpd[i]); - kw_offset = i + PCI_VPD_LRDT_TAG_SIZE; - if (vpdr_len + kw_offset > VPD_LEN) { - dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len); - return -EINVAL; - } - -#define FIND_VPD_KW(var, name) do { \ - var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \ - if (var < 0) { \ - dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \ - return -EINVAL; \ - } \ - var += PCI_VPD_INFO_FLD_HDR_SIZE; \ -} while (0) - - FIND_VPD_KW(i, "RV"); - for (csum = 0; i >= 0; i--) - csum += vpd[i]; - - if (csum) { - dev_err(adapter->pdev_dev, - "corrupted VPD EEPROM, actual csum %u\n", csum); - return -EINVAL; - } - - FIND_VPD_KW(ec, "EC"); - FIND_VPD_KW(sn, "SN"); -#undef FIND_VPD_KW - - memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len); - strim(p->id); - memcpy(p->ec, vpd + ec, EC_LEN); - strim(p->ec); - i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE); - memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); - strim(p->sn); - return 0; -} - -/* serial flash and firmware constants */ -enum { - SF_ATTEMPTS = 10, /* max retries for SF operations */ - - /* flash command opcodes */ - SF_PROG_PAGE = 2, /* program page */ - SF_WR_DISABLE = 4, /* disable writes */ - SF_RD_STATUS = 5, /* read status register */ - SF_WR_ENABLE = 6, /* enable writes */ - SF_RD_DATA_FAST = 0xb, /* read flash */ - SF_RD_ID = 0x9f, /* read ID */ - SF_ERASE_SECTOR = 0xd8, /* erase sector */ - - FW_MAX_SIZE = 512 * 1024, -}; - -/** - * sf1_read - read data from the serial flash - * @adapter: the adapter - * @byte_cnt: number of bytes to read - * @cont: whether another operation will be chained - * @lock: whether to lock SF for PL access only - * @valp: where to store the read data - * - * Reads up to 4 bytes of data from the serial flash. The location of - * the read needs to be specified prior to calling this by issuing the - * appropriate commands to the serial flash. - */ -static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, - int lock, u32 *valp) -{ - int ret; - - if (!byte_cnt || byte_cnt > 4) - return -EINVAL; - if (t4_read_reg(adapter, SF_OP) & BUSY) - return -EBUSY; - cont = cont ? SF_CONT : 0; - lock = lock ? SF_LOCK : 0; - t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1)); - ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5); - if (!ret) - *valp = t4_read_reg(adapter, SF_DATA); - return ret; -} - -/** - * sf1_write - write data to the serial flash - * @adapter: the adapter - * @byte_cnt: number of bytes to write - * @cont: whether another operation will be chained - * @lock: whether to lock SF for PL access only - * @val: value to write - * - * Writes up to 4 bytes of data to the serial flash. The location of - * the write needs to be specified prior to calling this by issuing the - * appropriate commands to the serial flash. - */ -static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, - int lock, u32 val) -{ - if (!byte_cnt || byte_cnt > 4) - return -EINVAL; - if (t4_read_reg(adapter, SF_OP) & BUSY) - return -EBUSY; - cont = cont ? SF_CONT : 0; - lock = lock ? SF_LOCK : 0; - t4_write_reg(adapter, SF_DATA, val); - t4_write_reg(adapter, SF_OP, lock | - cont | BYTECNT(byte_cnt - 1) | OP_WR); - return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5); -} - -/** - * flash_wait_op - wait for a flash operation to complete - * @adapter: the adapter - * @attempts: max number of polls of the status register - * @delay: delay between polls in ms - * - * Wait for a flash operation to complete by polling the status register. - */ -static int flash_wait_op(struct adapter *adapter, int attempts, int delay) -{ - int ret; - u32 status; - - while (1) { - if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 || - (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0) - return ret; - if (!(status & 1)) - return 0; - if (--attempts == 0) - return -EAGAIN; - if (delay) - msleep(delay); - } -} - -/** - * t4_read_flash - read words from serial flash - * @adapter: the adapter - * @addr: the start address for the read - * @nwords: how many 32-bit words to read - * @data: where to store the read data - * @byte_oriented: whether to store data as bytes or as words - * - * Read the specified number of 32-bit words from the serial flash. - * If @byte_oriented is set the read data is stored as a byte array - * (i.e., big-endian), otherwise as 32-bit words in the platform's - * natural endianess. - */ -static int t4_read_flash(struct adapter *adapter, unsigned int addr, - unsigned int nwords, u32 *data, int byte_oriented) -{ - int ret; - - if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3)) - return -EINVAL; - - addr = swab32(addr) | SF_RD_DATA_FAST; - - if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 || - (ret = sf1_read(adapter, 1, 1, 0, data)) != 0) - return ret; - - for ( ; nwords; nwords--, data++) { - ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); - if (nwords == 1) - t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ - if (ret) - return ret; - if (byte_oriented) - *data = htonl(*data); - } - return 0; -} - -/** - * t4_write_flash - write up to a page of data to the serial flash - * @adapter: the adapter - * @addr: the start address to write - * @n: length of data to write in bytes - * @data: the data to write - * - * Writes up to a page of data (256 bytes) to the serial flash starting - * at the given address. All the data must be written to the same page. - */ -static int t4_write_flash(struct adapter *adapter, unsigned int addr, - unsigned int n, const u8 *data) -{ - int ret; - u32 buf[64]; - unsigned int i, c, left, val, offset = addr & 0xff; - - if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE) - return -EINVAL; - - val = swab32(addr) | SF_PROG_PAGE; - - if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || - (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) - goto unlock; - - for (left = n; left; left -= c) { - c = min(left, 4U); - for (val = 0, i = 0; i < c; ++i) - val = (val << 8) + *data++; - - ret = sf1_write(adapter, c, c != left, 1, val); - if (ret) - goto unlock; - } - ret = flash_wait_op(adapter, 8, 1); - if (ret) - goto unlock; - - t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ - - /* Read the page to verify the write succeeded */ - ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); - if (ret) - return ret; - - if (memcmp(data - n, (u8 *)buf + offset, n)) { - dev_err(adapter->pdev_dev, - "failed to correctly write the flash page at %#x\n", - addr); - return -EIO; - } - return 0; - -unlock: - t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ - return ret; -} - -/** - * get_fw_version - read the firmware version - * @adapter: the adapter - * @vers: where to place the version - * - * Reads the FW version from flash. - */ -static int get_fw_version(struct adapter *adapter, u32 *vers) -{ - return t4_read_flash(adapter, adapter->params.sf_fw_start + - offsetof(struct fw_hdr, fw_ver), 1, vers, 0); -} - -/** - * get_tp_version - read the TP microcode version - * @adapter: the adapter - * @vers: where to place the version - * - * Reads the TP microcode version from flash. - */ -static int get_tp_version(struct adapter *adapter, u32 *vers) -{ - return t4_read_flash(adapter, adapter->params.sf_fw_start + - offsetof(struct fw_hdr, tp_microcode_ver), - 1, vers, 0); -} - -/** - * t4_check_fw_version - check if the FW is compatible with this driver - * @adapter: the adapter - * - * Checks if an adapter's FW is compatible with the driver. Returns 0 - * if there's exact match, a negative error if the version could not be - * read or there's a major version mismatch, and a positive value if the - * expected major version is found but there's a minor version mismatch. - */ -int t4_check_fw_version(struct adapter *adapter) -{ - u32 api_vers[2]; - int ret, major, minor, micro; - - ret = get_fw_version(adapter, &adapter->params.fw_vers); - if (!ret) - ret = get_tp_version(adapter, &adapter->params.tp_vers); - if (!ret) - ret = t4_read_flash(adapter, adapter->params.sf_fw_start + - offsetof(struct fw_hdr, intfver_nic), - 2, api_vers, 1); - if (ret) - return ret; - - major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers); - minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers); - micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers); - memcpy(adapter->params.api_vers, api_vers, - sizeof(adapter->params.api_vers)); - - if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */ - dev_err(adapter->pdev_dev, - "card FW has major version %u, driver wants %u\n", - major, FW_VERSION_MAJOR); - return -EINVAL; - } - - if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO) - return 0; /* perfect match */ - - /* Minor/micro version mismatch. Report it but often it's OK. */ - return 1; -} - -/** - * t4_flash_erase_sectors - erase a range of flash sectors - * @adapter: the adapter - * @start: the first sector to erase - * @end: the last sector to erase - * - * Erases the sectors in the given inclusive range. - */ -static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) -{ - int ret = 0; - - while (start <= end) { - if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || - (ret = sf1_write(adapter, 4, 0, 1, - SF_ERASE_SECTOR | (start << 8))) != 0 || - (ret = flash_wait_op(adapter, 14, 500)) != 0) { - dev_err(adapter->pdev_dev, - "erase of flash sector %d failed, error %d\n", - start, ret); - break; - } - start++; - } - t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ - return ret; -} - -/** - * t4_load_fw - download firmware - * @adap: the adapter - * @fw_data: the firmware image to write - * @size: image size - * - * Write the supplied firmware image to the card's serial flash. - */ -int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) -{ - u32 csum; - int ret, addr; - unsigned int i; - u8 first_page[SF_PAGE_SIZE]; - const u32 *p = (const u32 *)fw_data; - const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; - unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; - unsigned int fw_img_start = adap->params.sf_fw_start; - unsigned int fw_start_sec = fw_img_start / sf_sec_size; - - if (!size) { - dev_err(adap->pdev_dev, "FW image has no data\n"); - return -EINVAL; - } - if (size & 511) { - dev_err(adap->pdev_dev, - "FW image size not multiple of 512 bytes\n"); - return -EINVAL; - } - if (ntohs(hdr->len512) * 512 != size) { - dev_err(adap->pdev_dev, - "FW image size differs from size in FW header\n"); - return -EINVAL; - } - if (size > FW_MAX_SIZE) { - dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n", - FW_MAX_SIZE); - return -EFBIG; - } - - for (csum = 0, i = 0; i < size / sizeof(csum); i++) - csum += ntohl(p[i]); - - if (csum != 0xffffffff) { - dev_err(adap->pdev_dev, - "corrupted firmware image, checksum %#x\n", csum); - return -EINVAL; - } - - i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ - ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1); - if (ret) - goto out; - - /* - * We write the correct version at the end so the driver can see a bad - * version if the FW write fails. Start by writing a copy of the - * first page with a bad version. - */ - memcpy(first_page, fw_data, SF_PAGE_SIZE); - ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); - ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page); - if (ret) - goto out; - - addr = fw_img_start; - for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { - addr += SF_PAGE_SIZE; - fw_data += SF_PAGE_SIZE; - ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data); - if (ret) - goto out; - } - - ret = t4_write_flash(adap, - fw_img_start + offsetof(struct fw_hdr, fw_ver), - sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver); -out: - if (ret) - dev_err(adap->pdev_dev, "firmware download failed, error %d\n", - ret); - return ret; -} - -#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ - FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG) - -/** - * t4_link_start - apply link configuration to MAC/PHY - * @phy: the PHY to setup - * @mac: the MAC to setup - * @lc: the requested link configuration - * - * Set up a port's MAC and PHY according to a desired link configuration. - * - If the PHY can auto-negotiate first decide what to advertise, then - * enable/disable auto-negotiation as desired, and reset. - * - If the PHY does not auto-negotiate just reset it. - * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, - * otherwise do it later based on the outcome of auto-negotiation. - */ -int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, - struct link_config *lc) -{ - struct fw_port_cmd c; - unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO); - - lc->link_ok = 0; - if (lc->requested_fc & PAUSE_RX) - fc |= FW_PORT_CAP_FC_RX; - if (lc->requested_fc & PAUSE_TX) - fc |= FW_PORT_CAP_FC_TX; - - memset(&c, 0, sizeof(c)); - c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); - c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | - FW_LEN16(c)); - - if (!(lc->supported & FW_PORT_CAP_ANEG)) { - c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc); - lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); - } else if (lc->autoneg == AUTONEG_DISABLE) { - c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi); - lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); - } else - c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi); - - return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); -} - -/** - * t4_restart_aneg - restart autonegotiation - * @adap: the adapter - * @mbox: mbox to use for the FW command - * @port: the port id - * - * Restarts autonegotiation for the selected port. - */ -int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) -{ - struct fw_port_cmd c; - - memset(&c, 0, sizeof(c)); - c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); - c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | - FW_LEN16(c)); - c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG); - return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); -} - -struct intr_info { - unsigned int mask; /* bits to check in interrupt status */ - const char *msg; /* message to print or NULL */ - short stat_idx; /* stat counter to increment or -1 */ - unsigned short fatal; /* whether the condition reported is fatal */ -}; - -/** - * t4_handle_intr_status - table driven interrupt handler - * @adapter: the adapter that generated the interrupt - * @reg: the interrupt status register to process - * @acts: table of interrupt actions - * - * A table driven interrupt handler that applies a set of masks to an - * interrupt status word and performs the corresponding actions if the - * interrupts described by the mask have occurred. The actions include - * optionally emitting a warning or alert message. The table is terminated - * by an entry specifying mask 0. Returns the number of fatal interrupt - * conditions. - */ -static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg, - const struct intr_info *acts) -{ - int fatal = 0; - unsigned int mask = 0; - unsigned int status = t4_read_reg(adapter, reg); - - for ( ; acts->mask; ++acts) { - if (!(status & acts->mask)) - continue; - if (acts->fatal) { - fatal++; - dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, - status & acts->mask); - } else if (acts->msg && printk_ratelimit()) - dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, - status & acts->mask); - mask |= acts->mask; - } - status &= mask; - if (status) /* clear processed interrupts */ - t4_write_reg(adapter, reg, status); - return fatal; -} - -/* - * Interrupt handler for the PCIE module. - */ -static void pcie_intr_handler(struct adapter *adapter) -{ - static const struct intr_info sysbus_intr_info[] = { - { RNPP, "RXNP array parity error", -1, 1 }, - { RPCP, "RXPC array parity error", -1, 1 }, - { RCIP, "RXCIF array parity error", -1, 1 }, - { RCCP, "Rx completions control array parity error", -1, 1 }, - { RFTP, "RXFT array parity error", -1, 1 }, - { 0 } - }; - static const struct intr_info pcie_port_intr_info[] = { - { TPCP, "TXPC array parity error", -1, 1 }, - { TNPP, "TXNP array parity error", -1, 1 }, - { TFTP, "TXFT array parity error", -1, 1 }, - { TCAP, "TXCA array parity error", -1, 1 }, - { TCIP, "TXCIF array parity error", -1, 1 }, - { RCAP, "RXCA array parity error", -1, 1 }, - { OTDD, "outbound request TLP discarded", -1, 1 }, - { RDPE, "Rx data parity error", -1, 1 }, - { TDUE, "Tx uncorrectable data error", -1, 1 }, - { 0 } - }; - static const struct intr_info pcie_intr_info[] = { - { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, - { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, - { MSIDATAPERR, "MSI data parity error", -1, 1 }, - { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, - { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, - { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, - { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, - { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, - { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, - { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, - { CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, - { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, - { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, - { DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, - { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, - { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, - { HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, - { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, - { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, - { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, - { FIDPERR, "PCI FID parity error", -1, 1 }, - { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, - { MATAGPERR, "PCI MA tag parity error", -1, 1 }, - { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, - { RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, - { RXWRPERR, "PCI Rx write parity error", -1, 1 }, - { RPLPERR, "PCI replay buffer parity error", -1, 1 }, - { PCIESINT, "PCI core secondary fault", -1, 1 }, - { PCIEPINT, "PCI core primary fault", -1, 1 }, - { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 }, - { 0 } - }; - - int fat; - - fat = t4_handle_intr_status(adapter, - PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, - sysbus_intr_info) + - t4_handle_intr_status(adapter, - PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, - pcie_port_intr_info) + - t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info); - if (fat) - t4_fatal_err(adapter); -} - -/* - * TP interrupt handler. - */ -static void tp_intr_handler(struct adapter *adapter) -{ - static const struct intr_info tp_intr_info[] = { - { 0x3fffffff, "TP parity error", -1, 1 }, - { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, - { 0 } - }; - - if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info)) - t4_fatal_err(adapter); -} - -/* - * SGE interrupt handler. - */ -static void sge_intr_handler(struct adapter *adapter) -{ - u64 v; - - static const struct intr_info sge_intr_info[] = { - { ERR_CPL_EXCEED_IQE_SIZE, - "SGE received CPL exceeding IQE size", -1, 1 }, - { ERR_INVALID_CIDX_INC, - "SGE GTS CIDX increment too large", -1, 0 }, - { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, - { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 }, - { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, - "SGE IQID > 1023 received CPL for FL", -1, 0 }, - { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, - 0 }, - { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, - 0 }, - { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, - 0 }, - { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, - 0 }, - { ERR_ING_CTXT_PRIO, - "SGE too many priority ingress contexts", -1, 0 }, - { ERR_EGR_CTXT_PRIO, - "SGE too many priority egress contexts", -1, 0 }, - { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, - { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, - { 0 } - }; - - v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) | - ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32); - if (v) { - dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n", - (unsigned long long)v); - t4_write_reg(adapter, SGE_INT_CAUSE1, v); - t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32); - } - - if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) || - v != 0) - t4_fatal_err(adapter); -} - -/* - * CIM interrupt handler. - */ -static void cim_intr_handler(struct adapter *adapter) -{ - static const struct intr_info cim_intr_info[] = { - { PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, - { OBQPARERR, "CIM OBQ parity error", -1, 1 }, - { IBQPARERR, "CIM IBQ parity error", -1, 1 }, - { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, - { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, - { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, - { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, - { 0 } - }; - static const struct intr_info cim_upintr_info[] = { - { RSVDSPACEINT, "CIM reserved space access", -1, 1 }, - { ILLTRANSINT, "CIM illegal transaction", -1, 1 }, - { ILLWRINT, "CIM illegal write", -1, 1 }, - { ILLRDINT, "CIM illegal read", -1, 1 }, - { ILLRDBEINT, "CIM illegal read BE", -1, 1 }, - { ILLWRBEINT, "CIM illegal write BE", -1, 1 }, - { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, - { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, - { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, - { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, - { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, - { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, - { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, - { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, - { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, - { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, - { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, - { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, - { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, - { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, - { SGLRDPLINT , "CIM single read from PL space", -1, 1 }, - { SGLWRPLINT , "CIM single write to PL space", -1, 1 }, - { BLKRDPLINT , "CIM block read from PL space", -1, 1 }, - { BLKWRPLINT , "CIM block write to PL space", -1, 1 }, - { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, - { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, - { TIMEOUTINT , "CIM PIF timeout", -1, 1 }, - { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, - { 0 } - }; - - int fat; - - fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE, - cim_intr_info) + - t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE, - cim_upintr_info); - if (fat) - t4_fatal_err(adapter); -} - -/* - * ULP RX interrupt handler. - */ -static void ulprx_intr_handler(struct adapter *adapter) -{ - static const struct intr_info ulprx_intr_info[] = { - { 0x1800000, "ULPRX context error", -1, 1 }, - { 0x7fffff, "ULPRX parity error", -1, 1 }, - { 0 } - }; - - if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info)) - t4_fatal_err(adapter); -} - -/* - * ULP TX interrupt handler. - */ -static void ulptx_intr_handler(struct adapter *adapter) -{ - static const struct intr_info ulptx_intr_info[] = { - { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, - 0 }, - { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, - 0 }, - { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, - 0 }, - { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, - 0 }, - { 0xfffffff, "ULPTX parity error", -1, 1 }, - { 0 } - }; - - if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info)) - t4_fatal_err(adapter); -} - -/* - * PM TX interrupt handler. - */ -static void pmtx_intr_handler(struct adapter *adapter) -{ - static const struct intr_info pmtx_intr_info[] = { - { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, - { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, - { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, - { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, - { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 }, - { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, - { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 }, - { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, - { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, - { 0 } - }; - - if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info)) - t4_fatal_err(adapter); -} - -/* - * PM RX interrupt handler. - */ -static void pmrx_intr_handler(struct adapter *adapter) -{ - static const struct intr_info pmrx_intr_info[] = { - { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, - { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 }, - { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, - { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 }, - { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, - { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, - { 0 } - }; - - if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info)) - t4_fatal_err(adapter); -} - -/* - * CPL switch interrupt handler. - */ -static void cplsw_intr_handler(struct adapter *adapter) -{ - static const struct intr_info cplsw_intr_info[] = { - { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, - { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, - { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, - { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, - { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, - { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, - { 0 } - }; - - if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info)) - t4_fatal_err(adapter); -} - -/* - * LE interrupt handler. - */ -static void le_intr_handler(struct adapter *adap) -{ - static const struct intr_info le_intr_info[] = { - { LIPMISS, "LE LIP miss", -1, 0 }, - { LIP0, "LE 0 LIP error", -1, 0 }, - { PARITYERR, "LE parity error", -1, 1 }, - { UNKNOWNCMD, "LE unknown command", -1, 1 }, - { REQQPARERR, "LE request queue parity error", -1, 1 }, - { 0 } - }; - - if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info)) - t4_fatal_err(adap); -} - -/* - * MPS interrupt handler. - */ -static void mps_intr_handler(struct adapter *adapter) -{ - static const struct intr_info mps_rx_intr_info[] = { - { 0xffffff, "MPS Rx parity error", -1, 1 }, - { 0 } - }; - static const struct intr_info mps_tx_intr_info[] = { - { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 }, - { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, - { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 }, - { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 }, - { BUBBLE, "MPS Tx underflow", -1, 1 }, - { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, - { FRMERR, "MPS Tx framing error", -1, 1 }, - { 0 } - }; - static const struct intr_info mps_trc_intr_info[] = { - { FILTMEM, "MPS TRC filter parity error", -1, 1 }, - { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 }, - { MISCPERR, "MPS TRC misc parity error", -1, 1 }, - { 0 } - }; - static const struct intr_info mps_stat_sram_intr_info[] = { - { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, - { 0 } - }; - static const struct intr_info mps_stat_tx_intr_info[] = { - { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, - { 0 } - }; - static const struct intr_info mps_stat_rx_intr_info[] = { - { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, - { 0 } - }; - static const struct intr_info mps_cls_intr_info[] = { - { MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, - { MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, - { HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, - { 0 } - }; - - int fat; - - fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE, - mps_rx_intr_info) + - t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE, - mps_tx_intr_info) + - t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE, - mps_trc_intr_info) + - t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM, - mps_stat_sram_intr_info) + - t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO, - mps_stat_tx_intr_info) + - t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO, - mps_stat_rx_intr_info) + - t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE, - mps_cls_intr_info); - - t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT | - RXINT | TXINT | STATINT); - t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */ - if (fat) - t4_fatal_err(adapter); -} - -#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE) - -/* - * EDC/MC interrupt handler. - */ -static void mem_intr_handler(struct adapter *adapter, int idx) -{ - static const char name[3][5] = { "EDC0", "EDC1", "MC" }; - - unsigned int addr, cnt_addr, v; - - if (idx <= MEM_EDC1) { - addr = EDC_REG(EDC_INT_CAUSE, idx); - cnt_addr = EDC_REG(EDC_ECC_STATUS, idx); - } else { - addr = MC_INT_CAUSE; - cnt_addr = MC_ECC_STATUS; - } - - v = t4_read_reg(adapter, addr) & MEM_INT_MASK; - if (v & PERR_INT_CAUSE) - dev_alert(adapter->pdev_dev, "%s FIFO parity error\n", - name[idx]); - if (v & ECC_CE_INT_CAUSE) { - u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr)); - - t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK); - if (printk_ratelimit()) - dev_warn(adapter->pdev_dev, - "%u %s correctable ECC data error%s\n", - cnt, name[idx], cnt > 1 ? "s" : ""); - } - if (v & ECC_UE_INT_CAUSE) - dev_alert(adapter->pdev_dev, - "%s uncorrectable ECC data error\n", name[idx]); - - t4_write_reg(adapter, addr, v); - if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE)) - t4_fatal_err(adapter); -} - -/* - * MA interrupt handler. - */ -static void ma_intr_handler(struct adapter *adap) -{ - u32 v, status = t4_read_reg(adap, MA_INT_CAUSE); - - if (status & MEM_PERR_INT_CAUSE) - dev_alert(adap->pdev_dev, - "MA parity error, parity status %#x\n", - t4_read_reg(adap, MA_PARITY_ERROR_STATUS)); - if (status & MEM_WRAP_INT_CAUSE) { - v = t4_read_reg(adap, MA_INT_WRAP_STATUS); - dev_alert(adap->pdev_dev, "MA address wrap-around error by " - "client %u to address %#x\n", - MEM_WRAP_CLIENT_NUM_GET(v), - MEM_WRAP_ADDRESS_GET(v) << 4); - } - t4_write_reg(adap, MA_INT_CAUSE, status); - t4_fatal_err(adap); -} - -/* - * SMB interrupt handler. - */ -static void smb_intr_handler(struct adapter *adap) -{ - static const struct intr_info smb_intr_info[] = { - { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, - { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, - { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, - { 0 } - }; - - if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info)) - t4_fatal_err(adap); -} - -/* - * NC-SI interrupt handler. - */ -static void ncsi_intr_handler(struct adapter *adap) -{ - static const struct intr_info ncsi_intr_info[] = { - { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, - { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, - { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, - { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, - { 0 } - }; - - if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info)) - t4_fatal_err(adap); -} - -/* - * XGMAC interrupt handler. - */ -static void xgmac_intr_handler(struct adapter *adap, int port) -{ - u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE)); - - v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR; - if (!v) - return; - - if (v & TXFIFO_PRTY_ERR) - dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n", - port); - if (v & RXFIFO_PRTY_ERR) - dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n", - port); - t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v); - t4_fatal_err(adap); -} - -/* - * PL interrupt handler. - */ -static void pl_intr_handler(struct adapter *adap) -{ - static const struct intr_info pl_intr_info[] = { - { FATALPERR, "T4 fatal parity error", -1, 1 }, - { PERRVFID, "PL VFID_MAP parity error", -1, 1 }, - { 0 } - }; - - if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info)) - t4_fatal_err(adap); -} - -#define PF_INTR_MASK (PFSW) -#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \ - EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \ - CPL_SWITCH | SGE | ULP_TX) - -/** - * t4_slow_intr_handler - control path interrupt handler - * @adapter: the adapter - * - * T4 interrupt handler for non-data global interrupt events, e.g., errors. - * The designation 'slow' is because it involves register reads, while - * data interrupts typically don't involve any MMIOs. - */ -int t4_slow_intr_handler(struct adapter *adapter) -{ - u32 cause = t4_read_reg(adapter, PL_INT_CAUSE); - - if (!(cause & GLBL_INTR_MASK)) - return 0; - if (cause & CIM) - cim_intr_handler(adapter); - if (cause & MPS) - mps_intr_handler(adapter); - if (cause & NCSI) - ncsi_intr_handler(adapter); - if (cause & PL) - pl_intr_handler(adapter); - if (cause & SMB) - smb_intr_handler(adapter); - if (cause & XGMAC0) - xgmac_intr_handler(adapter, 0); - if (cause & XGMAC1) - xgmac_intr_handler(adapter, 1); - if (cause & XGMAC_KR0) - xgmac_intr_handler(adapter, 2); - if (cause & XGMAC_KR1) - xgmac_intr_handler(adapter, 3); - if (cause & PCIE) - pcie_intr_handler(adapter); - if (cause & MC) - mem_intr_handler(adapter, MEM_MC); - if (cause & EDC0) - mem_intr_handler(adapter, MEM_EDC0); - if (cause & EDC1) - mem_intr_handler(adapter, MEM_EDC1); - if (cause & LE) - le_intr_handler(adapter); - if (cause & TP) - tp_intr_handler(adapter); - if (cause & MA) - ma_intr_handler(adapter); - if (cause & PM_TX) - pmtx_intr_handler(adapter); - if (cause & PM_RX) - pmrx_intr_handler(adapter); - if (cause & ULP_RX) - ulprx_intr_handler(adapter); - if (cause & CPL_SWITCH) - cplsw_intr_handler(adapter); - if (cause & SGE) - sge_intr_handler(adapter); - if (cause & ULP_TX) - ulptx_intr_handler(adapter); - - /* Clear the interrupts just processed for which we are the master. */ - t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK); - (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */ - return 1; -} - -/** - * t4_intr_enable - enable interrupts - * @adapter: the adapter whose interrupts should be enabled - * - * Enable PF-specific interrupts for the calling function and the top-level - * interrupt concentrator for global interrupts. Interrupts are already - * enabled at each module, here we just enable the roots of the interrupt - * hierarchies. - * - * Note: this function should be called only when the driver manages - * non PF-specific interrupts from the various HW modules. Only one PCI - * function at a time should be doing this. - */ -void t4_intr_enable(struct adapter *adapter) -{ - u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); - - t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE | - ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 | - ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 | - ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 | - ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | - ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | - ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR | - EGRESS_SIZE_ERR); - t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK); - t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf); -} - -/** - * t4_intr_disable - disable interrupts - * @adapter: the adapter whose interrupts should be disabled - * - * Disable interrupts. We only disable the top-level interrupt - * concentrators. The caller must be a PCI function managing global - * interrupts. - */ -void t4_intr_disable(struct adapter *adapter) -{ - u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); - - t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0); - t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0); -} - -/** - * hash_mac_addr - return the hash value of a MAC address - * @addr: the 48-bit Ethernet MAC address - * - * Hashes a MAC address according to the hash function used by HW inexact - * (hash) address matching. - */ -static int hash_mac_addr(const u8 *addr) -{ - u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; - u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; - a ^= b; - a ^= (a >> 12); - a ^= (a >> 6); - return a & 0x3f; -} - -/** - * t4_config_rss_range - configure a portion of the RSS mapping table - * @adapter: the adapter - * @mbox: mbox to use for the FW command - * @viid: virtual interface whose RSS subtable is to be written - * @start: start entry in the table to write - * @n: how many table entries to write - * @rspq: values for the response queue lookup table - * @nrspq: number of values in @rspq - * - * Programs the selected part of the VI's RSS mapping table with the - * provided values. If @nrspq < @n the supplied values are used repeatedly - * until the full table range is populated. - * - * The caller must ensure the values in @rspq are in the range allowed for - * @viid. - */ -int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, - int start, int n, const u16 *rspq, unsigned int nrspq) -{ - int ret; - const u16 *rsp = rspq; - const u16 *rsp_end = rspq + nrspq; - struct fw_rss_ind_tbl_cmd cmd; - - memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) | - FW_CMD_REQUEST | FW_CMD_WRITE | - FW_RSS_IND_TBL_CMD_VIID(viid)); - cmd.retval_len16 = htonl(FW_LEN16(cmd)); - - /* each fw_rss_ind_tbl_cmd takes up to 32 entries */ - while (n > 0) { - int nq = min(n, 32); - __be32 *qp = &cmd.iq0_to_iq2; - - cmd.niqid = htons(nq); - cmd.startidx = htons(start); - - start += nq; - n -= nq; - - while (nq > 0) { - unsigned int v; - - v = FW_RSS_IND_TBL_CMD_IQ0(*rsp); - if (++rsp >= rsp_end) - rsp = rspq; - v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp); - if (++rsp >= rsp_end) - rsp = rspq; - v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp); - if (++rsp >= rsp_end) - rsp = rspq; - - *qp++ = htonl(v); - nq -= 3; - } - - ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); - if (ret) - return ret; - } - return 0; -} - -/** - * t4_config_glbl_rss - configure the global RSS mode - * @adapter: the adapter - * @mbox: mbox to use for the FW command - * @mode: global RSS mode - * @flags: mode-specific flags - * - * Sets the global RSS mode. - */ -int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, - unsigned int flags) -{ - struct fw_rss_glb_config_cmd c; - - memset(&c, 0, sizeof(c)); - c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | - FW_CMD_REQUEST | FW_CMD_WRITE); - c.retval_len16 = htonl(FW_LEN16(c)); - if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { - c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); - } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { - c.u.basicvirtual.mode_pkd = - htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); - c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags); - } else - return -EINVAL; - return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); -} - -/** - * t4_tp_get_tcp_stats - read TP's TCP MIB counters - * @adap: the adapter - * @v4: holds the TCP/IP counter values - * @v6: holds the TCP/IPv6 counter values - * - * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. - * Either @v4 or @v6 may be %NULL to skip the corresponding stats. - */ -void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, - struct tp_tcp_stats *v6) -{ - u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1]; - -#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST) -#define STAT(x) val[STAT_IDX(x)] -#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) - - if (v4) { - t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, - ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST); - v4->tcpOutRsts = STAT(OUT_RST); - v4->tcpInSegs = STAT64(IN_SEG); - v4->tcpOutSegs = STAT64(OUT_SEG); - v4->tcpRetransSegs = STAT64(RXT_SEG); - } - if (v6) { - t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, - ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST); - v6->tcpOutRsts = STAT(OUT_RST); - v6->tcpInSegs = STAT64(IN_SEG); - v6->tcpOutSegs = STAT64(OUT_SEG); - v6->tcpRetransSegs = STAT64(RXT_SEG); - } -#undef STAT64 -#undef STAT -#undef STAT_IDX -} - -/** - * t4_read_mtu_tbl - returns the values in the HW path MTU table - * @adap: the adapter - * @mtus: where to store the MTU values - * @mtu_log: where to store the MTU base-2 log (may be %NULL) - * - * Reads the HW path MTU table. - */ -void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) -{ - u32 v; - int i; - - for (i = 0; i < NMTUS; ++i) { - t4_write_reg(adap, TP_MTU_TABLE, - MTUINDEX(0xff) | MTUVALUE(i)); - v = t4_read_reg(adap, TP_MTU_TABLE); - mtus[i] = MTUVALUE_GET(v); - if (mtu_log) - mtu_log[i] = MTUWIDTH_GET(v); - } -} - -/** - * init_cong_ctrl - initialize congestion control parameters - * @a: the alpha values for congestion control - * @b: the beta values for congestion control - * - * Initialize the congestion control parameters. - */ -static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b) -{ - a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; - a[9] = 2; - a[10] = 3; - a[11] = 4; - a[12] = 5; - a[13] = 6; - a[14] = 7; - a[15] = 8; - a[16] = 9; - a[17] = 10; - a[18] = 14; - a[19] = 17; - a[20] = 21; - a[21] = 25; - a[22] = 30; - a[23] = 35; - a[24] = 45; - a[25] = 60; - a[26] = 80; - a[27] = 100; - a[28] = 200; - a[29] = 300; - a[30] = 400; - a[31] = 500; - - b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; - b[9] = b[10] = 1; - b[11] = b[12] = 2; - b[13] = b[14] = b[15] = b[16] = 3; - b[17] = b[18] = b[19] = b[20] = b[21] = 4; - b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; - b[28] = b[29] = 6; - b[30] = b[31] = 7; -} - -/* The minimum additive increment value for the congestion control table */ -#define CC_MIN_INCR 2U - -/** - * t4_load_mtus - write the MTU and congestion control HW tables - * @adap: the adapter - * @mtus: the values for the MTU table - * @alpha: the values for the congestion control alpha parameter - * @beta: the values for the congestion control beta parameter - * - * Write the HW MTU table with the supplied MTUs and the high-speed - * congestion control table with the supplied alpha, beta, and MTUs. - * We write the two tables together because the additive increments - * depend on the MTUs. - */ -void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, - const unsigned short *alpha, const unsigned short *beta) -{ - static const unsigned int avg_pkts[NCCTRL_WIN] = { - 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, - 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, - 28672, 40960, 57344, 81920, 114688, 163840, 229376 - }; - - unsigned int i, w; - - for (i = 0; i < NMTUS; ++i) { - unsigned int mtu = mtus[i]; - unsigned int log2 = fls(mtu); - - if (!(mtu & ((1 << log2) >> 2))) /* round */ - log2--; - t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) | - MTUWIDTH(log2) | MTUVALUE(mtu)); - - for (w = 0; w < NCCTRL_WIN; ++w) { - unsigned int inc; - - inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], - CC_MIN_INCR); - - t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) | - (w << 16) | (beta[w] << 13) | inc); - } - } -} - -/** - * get_mps_bg_map - return the buffer groups associated with a port - * @adap: the adapter - * @idx: the port index - * - * Returns a bitmap indicating which MPS buffer groups are associated - * with the given port. Bit i is set if buffer group i is used by the - * port. - */ -static unsigned int get_mps_bg_map(struct adapter *adap, int idx) -{ - u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL)); - - if (n == 0) - return idx == 0 ? 0xf : 0; - if (n == 1) - return idx < 2 ? (3 << (2 * idx)) : 0; - return 1 << idx; -} - -/** - * t4_get_port_stats - collect port statistics - * @adap: the adapter - * @idx: the port index - * @p: the stats structure to fill - * - * Collect statistics related to the given port from HW. - */ -void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) -{ - u32 bgmap = get_mps_bg_map(adap, idx); - -#define GET_STAT(name) \ - t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L)) -#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) - - p->tx_octets = GET_STAT(TX_PORT_BYTES); - p->tx_frames = GET_STAT(TX_PORT_FRAMES); - p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); - p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); - p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); - p->tx_error_frames = GET_STAT(TX_PORT_ERROR); - p->tx_frames_64 = GET_STAT(TX_PORT_64B); - p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); - p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); - p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); - p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); - p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); - p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); - p->tx_drop = GET_STAT(TX_PORT_DROP); - p->tx_pause = GET_STAT(TX_PORT_PAUSE); - p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); - p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); - p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); - p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); - p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); - p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); - p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); - p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); - - p->rx_octets = GET_STAT(RX_PORT_BYTES); - p->rx_frames = GET_STAT(RX_PORT_FRAMES); - p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); - p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); - p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); - p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); - p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); - p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); - p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); - p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); - p->rx_runt = GET_STAT(RX_PORT_LESS_64B); - p->rx_frames_64 = GET_STAT(RX_PORT_64B); - p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); - p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); - p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); - p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); - p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); - p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); - p->rx_pause = GET_STAT(RX_PORT_PAUSE); - p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); - p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); - p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); - p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); - p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); - p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); - p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); - p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); - - p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; - p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; - p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; - p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; - p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; - p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; - p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; - p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; - -#undef GET_STAT -#undef GET_STAT_COM -} - -/** - * t4_wol_magic_enable - enable/disable magic packet WoL - * @adap: the adapter - * @port: the physical port index - * @addr: MAC address expected in magic packets, %NULL to disable - * - * Enables/disables magic packet wake-on-LAN for the selected port. - */ -void t4_wol_magic_enable(struct adapter *adap, unsigned int port, - const u8 *addr) -{ - if (addr) { - t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO), - (addr[2] << 24) | (addr[3] << 16) | - (addr[4] << 8) | addr[5]); - t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI), - (addr[0] << 8) | addr[1]); - } - t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN, - addr ? MAGICEN : 0); -} - -/** - * t4_wol_pat_enable - enable/disable pattern-based WoL - * @adap: the adapter - * @port: the physical port index - * @map: bitmap of which HW pattern filters to set - * @mask0: byte mask for bytes 0-63 of a packet - * @mask1: byte mask for bytes 64-127 of a packet - * @crc: Ethernet CRC for selected bytes - * @enable: enable/disable switch - * - * Sets the pattern filters indicated in @map to mask out the bytes - * specified in @mask0/@mask1 in received packets and compare the CRC of - * the resulting packet against @crc. If @enable is %true pattern-based - * WoL is enabled, otherwise disabled. - */ -int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, - u64 mask0, u64 mask1, unsigned int crc, bool enable) -{ - int i; - - if (!enable) { - t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), - PATEN, 0); - return 0; - } - if (map > 0xff) - return -EINVAL; - -#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name) - - t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); - t4_write_reg(adap, EPIO_REG(DATA2), mask1); - t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32); - - for (i = 0; i < NWOL_PAT; i++, map >>= 1) { - if (!(map & 1)) - continue; - - /* write byte masks */ - t4_write_reg(adap, EPIO_REG(DATA0), mask0); - t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR); - t4_read_reg(adap, EPIO_REG(OP)); /* flush */ - if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY) - return -ETIMEDOUT; - - /* write CRC */ - t4_write_reg(adap, EPIO_REG(DATA0), crc); - t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR); - t4_read_reg(adap, EPIO_REG(OP)); /* flush */ - if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY) - return -ETIMEDOUT; - } -#undef EPIO_REG - - t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN); - return 0; -} - -#define INIT_CMD(var, cmd, rd_wr) do { \ - (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \ - FW_CMD_REQUEST | FW_CMD_##rd_wr); \ - (var).retval_len16 = htonl(FW_LEN16(var)); \ -} while (0) - -/** - * t4_mdio_rd - read a PHY register through MDIO - * @adap: the adapter - * @mbox: mailbox to use for the FW command - * @phy_addr: the PHY address - * @mmd: the PHY MMD to access (0 for clause 22 PHYs) - * @reg: the register to read - * @valp: where to store the value - * - * Issues a FW command through the given mailbox to read a PHY register. - */ -int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, - unsigned int mmd, unsigned int reg, u16 *valp) -{ - int ret; - struct fw_ldst_cmd c; - - memset(&c, 0, sizeof(c)); - c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | - FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); - c.cycles_to_len16 = htonl(FW_LEN16(c)); - c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | - FW_LDST_CMD_MMD(mmd)); - c.u.mdio.raddr = htons(reg); - - ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); - if (ret == 0) - *valp = ntohs(c.u.mdio.rval); - return ret; -} - -/** - * t4_mdio_wr - write a PHY register through MDIO - * @adap: the adapter - * @mbox: mailbox to use for the FW command - * @phy_addr: the PHY address - * @mmd: the PHY MMD to access (0 for clause 22 PHYs) - * @reg: the register to write - * @valp: value to write - * - * Issues a FW command through the given mailbox to write a PHY register. - */ -int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, - unsigned int mmd, unsigned int reg, u16 val) -{ - struct fw_ldst_cmd c; - - memset(&c, 0, sizeof(c)); - c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); - c.cycles_to_len16 = htonl(FW_LEN16(c)); - c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | - FW_LDST_CMD_MMD(mmd)); - c.u.mdio.raddr = htons(reg); - c.u.mdio.rval = htons(val); - - return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); -} - -/** - * t4_fw_hello - establish communication with FW - * @adap: the adapter - * @mbox: mailbox to use for the FW command - * @evt_mbox: mailbox to receive async FW events - * @master: specifies the caller's willingness to be the device master - * @state: returns the current device state - * - * Issues a command to establish communication with FW. - */ -int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, - enum dev_master master, enum dev_state *state) -{ - int ret; - struct fw_hello_cmd c; - - INIT_CMD(c, HELLO, WRITE); - c.err_to_mbasyncnot = htonl( - FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | - FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | - FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) | - FW_HELLO_CMD_MBASYNCNOT(evt_mbox)); - - ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); - if (ret == 0 && state) { - u32 v = ntohl(c.err_to_mbasyncnot); - if (v & FW_HELLO_CMD_INIT) - *state = DEV_STATE_INIT; - else if (v & FW_HELLO_CMD_ERR) - *state = DEV_STATE_ERR; - else - *state = DEV_STATE_UNINIT; - } - return ret; -} - -/** - * t4_fw_bye - end communication with FW - * @adap: the adapter - * @mbox: mailbox to use for the FW command - * - * Issues a command to terminate communication with FW. - */ -int t4_fw_bye(struct adapter *adap, unsigned int mbox) -{ - struct fw_bye_cmd c; - - INIT_CMD(c, BYE, WRITE); - return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); -} - -/** - * t4_init_cmd - ask FW to initialize the device - * @adap: the adapter - * @mbox: mailbox to use for the FW command - * - * Issues a command to FW to partially initialize the device. This - * performs initialization that generally doesn't depend on user input. - */ -int t4_early_init(struct adapter *adap, unsigned int mbox) -{ - struct fw_initialize_cmd c; - - INIT_CMD(c, INITIALIZE, WRITE); - return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); -} - -/** - * t4_fw_reset - issue a reset to FW - * @adap: the adapter - * @mbox: mailbox to use for the FW command - * @reset: specifies the type of reset to perform - * - * Issues a reset command of the specified type to FW. - */ -int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) -{ - struct fw_reset_cmd c; - - INIT_CMD(c, RESET, WRITE); - c.val = htonl(reset); - return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); -} - -/** - * t4_query_params - query FW or device parameters - * @adap: the adapter - * @mbox: mailbox to use for the FW command - * @pf: the PF - * @vf: the VF - * @nparams: the number of parameters - * @params: the parameter names - * @val: the parameter values - * - * Reads the value of FW or device parameters. Up to 7 parameters can be - * queried at once. - */ -int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, - unsigned int vf, unsigned int nparams, const u32 *params, - u32 *val) -{ - int i, ret; - struct fw_params_cmd c; - __be32 *p = &c.param[0].mnem; - - if (nparams > 7) - return -EINVAL; - - memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | - FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) | - FW_PARAMS_CMD_VFN(vf)); - c.retval_len16 = htonl(FW_LEN16(c)); - for (i = 0; i < nparams; i++, p += 2) - *p = htonl(*params++); - - ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); - if (ret == 0) - for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) - *val++ = ntohl(*p); - return ret; -} - -/** - * t4_set_params - sets FW or device parameters - * @adap: the adapter - * @mbox: mailbox to use for the FW command - * @pf: the PF - * @vf: the VF - * @nparams: the number of parameters - * @params: the parameter names - * @val: the parameter values - * - * Sets the value of FW or device parameters. Up to 7 parameters can be - * specified at once. - */ -int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, - unsigned int vf, unsigned int nparams, const u32 *params, - const u32 *val) -{ - struct fw_params_cmd c; - __be32 *p = &c.param[0].mnem; - - if (nparams > 7) - return -EINVAL; - - memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) | - FW_PARAMS_CMD_VFN(vf)); - c.retval_len16 = htonl(FW_LEN16(c)); - while (nparams--) { - *p++ = htonl(*params++); - *p++ = htonl(*val++); - } - - return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); -} - -/** - * t4_cfg_pfvf - configure PF/VF resource limits - * @adap: the adapter - * @mbox: mailbox to use for the FW command - * @pf: the PF being configured - * @vf: the VF being configured - * @txq: the max number of egress queues - * @txq_eth_ctrl: the max number of egress Ethernet or control queues - * @rxqi: the max number of interrupt-capable ingress queues - * @rxq: the max number of interruptless ingress queues - * @tc: the PCI traffic class - * @vi: the max number of virtual interfaces - * @cmask: the channel access rights mask for the PF/VF - * @pmask: the port access rights mask for the PF/VF - * @nexact: the maximum number of exact MPS filters - * @rcaps: read capabilities - * @wxcaps: write/execute capabilities - * - * Configures resource limits and capabilities for a physical or virtual - * function. - */ -int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, - unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, - unsigned int rxqi, unsigned int rxq, unsigned int tc, - unsigned int vi, unsigned int cmask, unsigned int pmask, - unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) -{ - struct fw_pfvf_cmd c; - - memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) | - FW_PFVF_CMD_VFN(vf)); - c.retval_len16 = htonl(FW_LEN16(c)); - c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) | - FW_PFVF_CMD_NIQ(rxq)); - c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) | - FW_PFVF_CMD_PMASK(pmask) | - FW_PFVF_CMD_NEQ(txq)); - c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) | - FW_PFVF_CMD_NEXACTF(nexact)); - c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) | - FW_PFVF_CMD_WX_CAPS(wxcaps) | - FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); - return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); -} - -/** - * t4_alloc_vi - allocate a virtual interface - * @adap: the adapter - * @mbox: mailbox to use for the FW command - * @port: physical port associated with the VI - * @pf: the PF owning the VI - * @vf: the VF owning the VI - * @nmac: number of MAC addresses needed (1 to 5) - * @mac: the MAC addresses of the VI - * @rss_size: size of RSS table slice associated with this VI - * - * Allocates a virtual interface for the given physical port. If @mac is - * not %NULL it contains the MAC addresses of the VI as assigned by FW. - * @mac should be large enough to hold @nmac Ethernet addresses, they are - * stored consecutively so the space needed is @nmac * 6 bytes. - * Returns a negative error number or the non-negative VI id. - */ -int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, - unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, - unsigned int *rss_size) -{ - int ret; - struct fw_vi_cmd c; - - memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_CMD_EXEC | - FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf)); - c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c)); - c.portid_pkd = FW_VI_CMD_PORTID(port); - c.nmac = nmac - 1; - - ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); - if (ret) - return ret; - - if (mac) { - memcpy(mac, c.mac, sizeof(c.mac)); - switch (nmac) { - case 5: - memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); - case 4: - memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); - case 3: - memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); - case 2: - memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); - } - } - if (rss_size) - *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd)); - return FW_VI_CMD_VIID_GET(ntohs(c.type_viid)); -} - -/** - * t4_set_rxmode - set Rx properties of a virtual interface - * @adap: the adapter - * @mbox: mailbox to use for the FW command - * @viid: the VI id - * @mtu: the new MTU or -1 - * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change - * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change - * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change - * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change - * @sleep_ok: if true we may sleep while awaiting command completion - * - * Sets Rx properties of a virtual interface. - */ -int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, - int mtu, int promisc, int all_multi, int bcast, int vlanex, - bool sleep_ok) -{ - struct fw_vi_rxmode_cmd c; - - /* convert to FW values */ - if (mtu < 0) - mtu = FW_RXMODE_MTU_NO_CHG; - if (promisc < 0) - promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK; - if (all_multi < 0) - all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK; - if (bcast < 0) - bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK; - if (vlanex < 0) - vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK; - - memset(&c, 0, sizeof(c)); - c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid)); - c.retval_len16 = htonl(FW_LEN16(c)); - c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) | - FW_VI_RXMODE_CMD_PROMISCEN(promisc) | - FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | - FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | - FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); - return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); -} - -/** - * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses - * @adap: the adapter - * @mbox: mailbox to use for the FW command - * @viid: the VI id - * @free: if true any existing filters for this VI id are first removed - * @naddr: the number of MAC addresses to allocate filters for (up to 7) - * @addr: the MAC address(es) - * @idx: where to store the index of each allocated filter - * @hash: pointer to hash address filter bitmap - * @sleep_ok: call is allowed to sleep - * - * Allocates an exact-match filter for each of the supplied addresses and - * sets it to the corresponding address. If @idx is not %NULL it should - * have at least @naddr entries, each of which will be set to the index of - * the filter allocated for the corresponding MAC address. If a filter - * could not be allocated for an address its index is set to 0xffff. - * If @hash is not %NULL addresses that fail to allocate an exact filter - * are hashed and update the hash filter bitmap pointed at by @hash. - * - * Returns a negative error number or the number of filters allocated. - */ -int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, - unsigned int viid, bool free, unsigned int naddr, - const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) -{ - int i, ret; - struct fw_vi_mac_cmd c; - struct fw_vi_mac_exact *p; - - if (naddr > 7) - return -EINVAL; - - memset(&c, 0, sizeof(c)); - c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) | - FW_VI_MAC_CMD_VIID(viid)); - c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) | - FW_CMD_LEN16((naddr + 2) / 2)); - - for (i = 0, p = c.u.exact; i < naddr; i++, p++) { - p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | - FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); - memcpy(p->macaddr, addr[i], sizeof(p->macaddr)); - } - - ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); - if (ret) - return ret; - - for (i = 0, p = c.u.exact; i < naddr; i++, p++) { - u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); - - if (idx) - idx[i] = index >= NEXACT_MAC ? 0xffff : index; - if (index < NEXACT_MAC) - ret++; - else if (hash) - *hash |= (1ULL << hash_mac_addr(addr[i])); - } - return ret; -} - -/** - * t4_change_mac - modifies the exact-match filter for a MAC address - * @adap: the adapter - * @mbox: mailbox to use for the FW command - * @viid: the VI id - * @idx: index of existing filter for old value of MAC address, or -1 - * @addr: the new MAC address value - * @persist: whether a new MAC allocation should be persistent - * @add_smt: if true also add the address to the HW SMT - * - * Modifies an exact-match filter and sets it to the new MAC address. - * Note that in general it is not possible to modify the value of a given - * filter so the generic way to modify an address filter is to free the one - * being used by the old address value and allocate a new filter for the - * new address value. @idx can be -1 if the address is a new addition. - * - * Returns a negative error number or the index of the filter with the new - * MAC value. - */ -int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, - int idx, const u8 *addr, bool persist, bool add_smt) -{ - int ret, mode; - struct fw_vi_mac_cmd c; - struct fw_vi_mac_exact *p = c.u.exact; - - if (idx < 0) /* new allocation */ - idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; - mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; - - memset(&c, 0, sizeof(c)); - c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid)); - c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1)); - p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | - FW_VI_MAC_CMD_SMAC_RESULT(mode) | - FW_VI_MAC_CMD_IDX(idx)); - memcpy(p->macaddr, addr, sizeof(p->macaddr)); - - ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); - if (ret == 0) { - ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); - if (ret >= NEXACT_MAC) - ret = -ENOMEM; - } - return ret; -} - -/** - * t4_set_addr_hash - program the MAC inexact-match hash filter - * @adap: the adapter - * @mbox: mailbox to use for the FW command - * @viid: the VI id - * @ucast: whether the hash filter should also match unicast addresses - * @vec: the value to be written to the hash filter - * @sleep_ok: call is allowed to sleep - * - * Sets the 64-bit inexact-match hash filter for a virtual interface. - */ -int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, - bool ucast, u64 vec, bool sleep_ok) -{ - struct fw_vi_mac_cmd c; - - memset(&c, 0, sizeof(c)); - c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | - FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid)); - c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN | - FW_VI_MAC_CMD_HASHUNIEN(ucast) | - FW_CMD_LEN16(1)); - c.u.hash.hashvec = cpu_to_be64(vec); - return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); -} - -/** - * t4_enable_vi - enable/disable a virtual interface - * @adap: the adapter - * @mbox: mailbox to use for the FW command - * @viid: the VI id - * @rx_en: 1=enable Rx, 0=disable Rx - * @tx_en: 1=enable Tx, 0=disable Tx - * - * Enables/disables a virtual interface. - */ -int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, - bool rx_en, bool tx_en) -{ - struct fw_vi_enable_cmd c; - - memset(&c, 0, sizeof(c)); - c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); - c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) | - FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c)); - return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); -} - -/** - * t4_identify_port - identify a VI's port by blinking its LED - * @adap: the adapter - * @mbox: mailbox to use for the FW command - * @viid: the VI id - * @nblinks: how many times to blink LED at 2.5 Hz - * - * Identifies a VI's port by blinking its LED. - */ -int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, - unsigned int nblinks) -{ - struct fw_vi_enable_cmd c; - - c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); - c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); - c.blinkdur = htons(nblinks); - return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); -} - -/** - * t4_iq_free - free an ingress queue and its FLs - * @adap: the adapter - * @mbox: mailbox to use for the FW command - * @pf: the PF owning the queues - * @vf: the VF owning the queues - * @iqtype: the ingress queue type - * @iqid: ingress queue id - * @fl0id: FL0 queue id or 0xffff if no attached FL0 - * @fl1id: FL1 queue id or 0xffff if no attached FL1 - * - * Frees an ingress queue and its associated FLs, if any. - */ -int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, - unsigned int vf, unsigned int iqtype, unsigned int iqid, - unsigned int fl0id, unsigned int fl1id) -{ - struct fw_iq_cmd c; - - memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) | - FW_IQ_CMD_VFN(vf)); - c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c)); - c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype)); - c.iqid = htons(iqid); - c.fl0id = htons(fl0id); - c.fl1id = htons(fl1id); - return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); -} - -/** - * t4_eth_eq_free - free an Ethernet egress queue - * @adap: the adapter - * @mbox: mailbox to use for the FW command - * @pf: the PF owning the queue - * @vf: the VF owning the queue - * @eqid: egress queue id - * - * Frees an Ethernet egress queue. - */ -int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, - unsigned int vf, unsigned int eqid) -{ - struct fw_eq_eth_cmd c; - - memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) | - FW_EQ_ETH_CMD_VFN(vf)); - c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); - c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid)); - return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); -} - -/** - * t4_ctrl_eq_free - free a control egress queue - * @adap: the adapter - * @mbox: mailbox to use for the FW command - * @pf: the PF owning the queue - * @vf: the VF owning the queue - * @eqid: egress queue id - * - * Frees a control egress queue. - */ -int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, - unsigned int vf, unsigned int eqid) -{ - struct fw_eq_ctrl_cmd c; - - memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) | - FW_EQ_CTRL_CMD_VFN(vf)); - c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); - c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid)); - return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); -} - -/** - * t4_ofld_eq_free - free an offload egress queue - * @adap: the adapter - * @mbox: mailbox to use for the FW command - * @pf: the PF owning the queue - * @vf: the VF owning the queue - * @eqid: egress queue id - * - * Frees a control egress queue. - */ -int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, - unsigned int vf, unsigned int eqid) -{ - struct fw_eq_ofld_cmd c; - - memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST | - FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) | - FW_EQ_OFLD_CMD_VFN(vf)); - c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); - c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid)); - return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); -} - -/** - * t4_handle_fw_rpl - process a FW reply message - * @adap: the adapter - * @rpl: start of the FW message - * - * Processes a FW message, such as link state change messages. - */ -int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) -{ - u8 opcode = *(const u8 *)rpl; - - if (opcode == FW_PORT_CMD) { /* link/module state change message */ - int speed = 0, fc = 0; - const struct fw_port_cmd *p = (void *)rpl; - int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid)); - int port = adap->chan_map[chan]; - struct port_info *pi = adap2pinfo(adap, port); - struct link_config *lc = &pi->link_cfg; - u32 stat = ntohl(p->u.info.lstatus_to_modtype); - int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0; - u32 mod = FW_PORT_CMD_MODTYPE_GET(stat); - - if (stat & FW_PORT_CMD_RXPAUSE) - fc |= PAUSE_RX; - if (stat & FW_PORT_CMD_TXPAUSE) - fc |= PAUSE_TX; - if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) - speed = SPEED_100; - else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) - speed = SPEED_1000; - else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) - speed = SPEED_10000; - - if (link_ok != lc->link_ok || speed != lc->speed || - fc != lc->fc) { /* something changed */ - lc->link_ok = link_ok; - lc->speed = speed; - lc->fc = fc; - t4_os_link_changed(adap, port, link_ok); - } - if (mod != pi->mod_type) { - pi->mod_type = mod; - t4_os_portmod_changed(adap, port); - } - } - return 0; -} - -static void __devinit get_pci_mode(struct adapter *adapter, - struct pci_params *p) -{ - u16 val; - u32 pcie_cap = pci_pcie_cap(adapter->pdev); - - if (pcie_cap) { - pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA, - &val); - p->speed = val & PCI_EXP_LNKSTA_CLS; - p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; - } -} - -/** - * init_link_config - initialize a link's SW state - * @lc: structure holding the link state - * @caps: link capabilities - * - * Initializes the SW state maintained for each link, including the link's - * capabilities and default speed/flow-control/autonegotiation settings. - */ -static void __devinit init_link_config(struct link_config *lc, - unsigned int caps) -{ - lc->supported = caps; - lc->requested_speed = 0; - lc->speed = 0; - lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; - if (lc->supported & FW_PORT_CAP_ANEG) { - lc->advertising = lc->supported & ADVERT_MASK; - lc->autoneg = AUTONEG_ENABLE; - lc->requested_fc |= PAUSE_AUTONEG; - } else { - lc->advertising = 0; - lc->autoneg = AUTONEG_DISABLE; - } -} - -int t4_wait_dev_ready(struct adapter *adap) -{ - if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff) - return 0; - msleep(500); - return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO; -} - -static int __devinit get_flash_params(struct adapter *adap) -{ - int ret; - u32 info; - - ret = sf1_write(adap, 1, 1, 0, SF_RD_ID); - if (!ret) - ret = sf1_read(adap, 3, 0, 1, &info); - t4_write_reg(adap, SF_OP, 0); /* unlock SF */ - if (ret) - return ret; - - if ((info & 0xff) != 0x20) /* not a Numonix flash */ - return -EINVAL; - info >>= 16; /* log2 of size */ - if (info >= 0x14 && info < 0x18) - adap->params.sf_nsec = 1 << (info - 16); - else if (info == 0x18) - adap->params.sf_nsec = 64; - else - return -EINVAL; - adap->params.sf_size = 1 << info; - adap->params.sf_fw_start = - t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK; - return 0; -} - -/** - * t4_prep_adapter - prepare SW and HW for operation - * @adapter: the adapter - * @reset: if true perform a HW reset - * - * Initialize adapter SW state for the various HW modules, set initial - * values for some adapter tunables, take PHYs out of reset, and - * initialize the MDIO interface. - */ -int __devinit t4_prep_adapter(struct adapter *adapter) -{ - int ret; - - ret = t4_wait_dev_ready(adapter); - if (ret < 0) - return ret; - - get_pci_mode(adapter, &adapter->params.pci); - adapter->params.rev = t4_read_reg(adapter, PL_REV); - - ret = get_flash_params(adapter); - if (ret < 0) { - dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret); - return ret; - } - - ret = get_vpd_params(adapter, &adapter->params.vpd); - if (ret < 0) - return ret; - - init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); - - /* - * Default port for debugging in case we can't reach FW. - */ - adapter->params.nports = 1; - adapter->params.portvec = 1; - return 0; -} - -int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf) -{ - u8 addr[6]; - int ret, i, j = 0; - struct fw_port_cmd c; - struct fw_rss_vi_config_cmd rvc; - - memset(&c, 0, sizeof(c)); - memset(&rvc, 0, sizeof(rvc)); - - for_each_port(adap, i) { - unsigned int rss_size; - struct port_info *p = adap2pinfo(adap, i); - - while ((adap->params.portvec & (1 << j)) == 0) - j++; - - c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | - FW_CMD_REQUEST | FW_CMD_READ | - FW_PORT_CMD_PORTID(j)); - c.action_to_len16 = htonl( - FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | - FW_LEN16(c)); - ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); - if (ret) - return ret; - - ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); - if (ret < 0) - return ret; - - p->viid = ret; - p->tx_chan = j; - p->lport = j; - p->rss_size = rss_size; - memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN); - memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN); - adap->port[i]->dev_id = j; - - ret = ntohl(c.u.info.lstatus_to_modtype); - p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ? - FW_PORT_CMD_MDIOADDR_GET(ret) : -1; - p->port_type = FW_PORT_CMD_PTYPE_GET(ret); - p->mod_type = FW_PORT_MOD_TYPE_NA; - - rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | - FW_CMD_REQUEST | FW_CMD_READ | - FW_RSS_VI_CONFIG_CMD_VIID(p->viid)); - rvc.retval_len16 = htonl(FW_LEN16(rvc)); - ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc); - if (ret) - return ret; - p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen); - - init_link_config(&p->link_cfg, ntohs(c.u.info.pcap)); - j++; - } - return 0; -} diff --git a/drivers/net/cxgb4/t4_hw.h b/drivers/net/cxgb4/t4_hw.h deleted file mode 100644 index c26b455..0000000 --- a/drivers/net/cxgb4/t4_hw.h +++ /dev/null @@ -1,140 +0,0 @@ -/* - * This file is part of the Chelsio T4 Ethernet driver for Linux. - * - * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __T4_HW_H -#define __T4_HW_H - -#include - -enum { - NCHAN = 4, /* # of HW channels */ - MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */ - EEPROMSIZE = 17408, /* Serial EEPROM physical size */ - EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */ - EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */ - RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */ - TCB_SIZE = 128, /* TCB size */ - NMTUS = 16, /* size of MTU table */ - NCCTRL_WIN = 32, /* # of congestion control windows */ - NEXACT_MAC = 336, /* # of exact MAC address filters */ - L2T_SIZE = 4096, /* # of L2T entries */ - MBOX_LEN = 64, /* mailbox size in bytes */ - TRACE_LEN = 112, /* length of trace data and mask */ - FILTER_OPT_LEN = 36, /* filter tuple width for optional components */ - NWOL_PAT = 8, /* # of WoL patterns */ - WOL_PAT_LEN = 128, /* length of WoL patterns */ -}; - -enum { - SF_PAGE_SIZE = 256, /* serial flash page size */ -}; - -enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */ - -enum { MBOX_OWNER_NONE, MBOX_OWNER_FW, MBOX_OWNER_DRV }; /* mailbox owners */ - -enum { - SGE_MAX_WR_LEN = 512, /* max WR size in bytes */ - SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */ - SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */ - - SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */ - SGE_TIMER_UPD_CIDX = 7, /* update cidx only */ - - SGE_EQ_IDXSIZE = 64, /* egress queue pidx/cidx unit size */ - - SGE_INTRDST_PCI = 0, /* interrupt destination is PCI-E */ - SGE_INTRDST_IQ = 1, /* destination is an ingress queue */ - - SGE_UPDATEDEL_NONE = 0, /* ingress queue pidx update delivery */ - SGE_UPDATEDEL_INTR = 1, /* interrupt */ - SGE_UPDATEDEL_STPG = 2, /* status page */ - SGE_UPDATEDEL_BOTH = 3, /* interrupt and status page */ - - SGE_HOSTFCMODE_NONE = 0, /* egress queue cidx updates */ - SGE_HOSTFCMODE_IQ = 1, /* sent to ingress queue */ - SGE_HOSTFCMODE_STPG = 2, /* sent to status page */ - SGE_HOSTFCMODE_BOTH = 3, /* ingress queue and status page */ - - SGE_FETCHBURSTMIN_16B = 0,/* egress queue descriptor fetch minimum */ - SGE_FETCHBURSTMIN_32B = 1, - SGE_FETCHBURSTMIN_64B = 2, - SGE_FETCHBURSTMIN_128B = 3, - - SGE_FETCHBURSTMAX_64B = 0,/* egress queue descriptor fetch maximum */ - SGE_FETCHBURSTMAX_128B = 1, - SGE_FETCHBURSTMAX_256B = 2, - SGE_FETCHBURSTMAX_512B = 3, - - SGE_CIDXFLUSHTHRESH_1 = 0,/* egress queue cidx flush threshold */ - SGE_CIDXFLUSHTHRESH_2 = 1, - SGE_CIDXFLUSHTHRESH_4 = 2, - SGE_CIDXFLUSHTHRESH_8 = 3, - SGE_CIDXFLUSHTHRESH_16 = 4, - SGE_CIDXFLUSHTHRESH_32 = 5, - SGE_CIDXFLUSHTHRESH_64 = 6, - SGE_CIDXFLUSHTHRESH_128 = 7, - - SGE_INGPADBOUNDARY_SHIFT = 5,/* ingress queue pad boundary */ -}; - -struct sge_qstat { /* data written to SGE queue status entries */ - __be32 qid; - __be16 cidx; - __be16 pidx; -}; - -/* - * Structure for last 128 bits of response descriptors - */ -struct rsp_ctrl { - __be32 hdrbuflen_pidx; - __be32 pldbuflen_qid; - union { - u8 type_gen; - __be64 last_flit; - }; -}; - -#define RSPD_NEWBUF 0x80000000U -#define RSPD_LEN(x) (((x) >> 0) & 0x7fffffffU) -#define RSPD_QID(x) RSPD_LEN(x) - -#define RSPD_GEN(x) ((x) >> 7) -#define RSPD_TYPE(x) (((x) >> 4) & 3) - -#define QINTR_CNT_EN 0x1 -#define QINTR_TIMER_IDX(x) ((x) << 1) -#define QINTR_TIMER_IDX_GET(x) (((x) >> 1) & 0x7) -#endif /* __T4_HW_H */ diff --git a/drivers/net/cxgb4/t4_msg.h b/drivers/net/cxgb4/t4_msg.h deleted file mode 100644 index eb71b82..0000000 --- a/drivers/net/cxgb4/t4_msg.h +++ /dev/null @@ -1,678 +0,0 @@ -/* - * This file is part of the Chelsio T4 Ethernet driver for Linux. - * - * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __T4_MSG_H -#define __T4_MSG_H - -#include - -enum { - CPL_PASS_OPEN_REQ = 0x1, - CPL_PASS_ACCEPT_RPL = 0x2, - CPL_ACT_OPEN_REQ = 0x3, - CPL_SET_TCB_FIELD = 0x5, - CPL_GET_TCB = 0x6, - CPL_CLOSE_CON_REQ = 0x8, - CPL_CLOSE_LISTSRV_REQ = 0x9, - CPL_ABORT_REQ = 0xA, - CPL_ABORT_RPL = 0xB, - CPL_RX_DATA_ACK = 0xD, - CPL_TX_PKT = 0xE, - CPL_L2T_WRITE_REQ = 0x12, - CPL_TID_RELEASE = 0x1A, - - CPL_CLOSE_LISTSRV_RPL = 0x20, - CPL_L2T_WRITE_RPL = 0x23, - CPL_PASS_OPEN_RPL = 0x24, - CPL_ACT_OPEN_RPL = 0x25, - CPL_PEER_CLOSE = 0x26, - CPL_ABORT_REQ_RSS = 0x2B, - CPL_ABORT_RPL_RSS = 0x2D, - - CPL_CLOSE_CON_RPL = 0x32, - CPL_ISCSI_HDR = 0x33, - CPL_RDMA_CQE = 0x35, - CPL_RDMA_CQE_READ_RSP = 0x36, - CPL_RDMA_CQE_ERR = 0x37, - CPL_RX_DATA = 0x39, - CPL_SET_TCB_RPL = 0x3A, - CPL_RX_PKT = 0x3B, - CPL_RX_DDP_COMPLETE = 0x3F, - - CPL_ACT_ESTABLISH = 0x40, - CPL_PASS_ESTABLISH = 0x41, - CPL_RX_DATA_DDP = 0x42, - CPL_PASS_ACCEPT_REQ = 0x44, - - CPL_RDMA_READ_REQ = 0x60, - - CPL_PASS_OPEN_REQ6 = 0x81, - CPL_ACT_OPEN_REQ6 = 0x83, - - CPL_RDMA_TERMINATE = 0xA2, - CPL_RDMA_WRITE = 0xA4, - CPL_SGE_EGR_UPDATE = 0xA5, - - CPL_TRACE_PKT = 0xB0, - - CPL_FW4_MSG = 0xC0, - CPL_FW4_PLD = 0xC1, - CPL_FW4_ACK = 0xC3, - - CPL_FW6_MSG = 0xE0, - CPL_FW6_PLD = 0xE1, - CPL_TX_PKT_LSO = 0xED, - CPL_TX_PKT_XT = 0xEE, - - NUM_CPL_CMDS -}; - -enum CPL_error { - CPL_ERR_NONE = 0, - CPL_ERR_TCAM_FULL = 3, - CPL_ERR_BAD_LENGTH = 15, - CPL_ERR_BAD_ROUTE = 18, - CPL_ERR_CONN_RESET = 20, - CPL_ERR_CONN_EXIST_SYNRECV = 21, - CPL_ERR_CONN_EXIST = 22, - CPL_ERR_ARP_MISS = 23, - CPL_ERR_BAD_SYN = 24, - CPL_ERR_CONN_TIMEDOUT = 30, - CPL_ERR_XMIT_TIMEDOUT = 31, - CPL_ERR_PERSIST_TIMEDOUT = 32, - CPL_ERR_FINWAIT2_TIMEDOUT = 33, - CPL_ERR_KEEPALIVE_TIMEDOUT = 34, - CPL_ERR_RTX_NEG_ADVICE = 35, - CPL_ERR_PERSIST_NEG_ADVICE = 36, - CPL_ERR_ABORT_FAILED = 42, - CPL_ERR_IWARP_FLM = 50, -}; - -enum { - ULP_MODE_NONE = 0, - ULP_MODE_ISCSI = 2, - ULP_MODE_RDMA = 4, - ULP_MODE_TCPDDP = 5, - ULP_MODE_FCOE = 6, -}; - -enum { - ULP_CRC_HEADER = 1 << 0, - ULP_CRC_DATA = 1 << 1 -}; - -enum { - CPL_ABORT_SEND_RST = 0, - CPL_ABORT_NO_RST, -}; - -enum { /* TX_PKT_XT checksum types */ - TX_CSUM_TCP = 0, - TX_CSUM_UDP = 1, - TX_CSUM_CRC16 = 4, - TX_CSUM_CRC32 = 5, - TX_CSUM_CRC32C = 6, - TX_CSUM_FCOE = 7, - TX_CSUM_TCPIP = 8, - TX_CSUM_UDPIP = 9, - TX_CSUM_TCPIP6 = 10, - TX_CSUM_UDPIP6 = 11, - TX_CSUM_IP = 12, -}; - -union opcode_tid { - __be32 opcode_tid; - u8 opcode; -}; - -#define CPL_OPCODE(x) ((x) << 24) -#define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE(opcode) | (tid)) -#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid) -#define GET_TID(cmd) (ntohl(OPCODE_TID(cmd)) & 0xFFFFFF) - -/* partitioning of TID fields that also carry a queue id */ -#define GET_TID_TID(x) ((x) & 0x3fff) -#define GET_TID_QID(x) (((x) >> 14) & 0x3ff) -#define TID_QID(x) ((x) << 14) - -struct rss_header { - u8 opcode; -#if defined(__LITTLE_ENDIAN_BITFIELD) - u8 channel:2; - u8 filter_hit:1; - u8 filter_tid:1; - u8 hash_type:2; - u8 ipv6:1; - u8 send2fw:1; -#else - u8 send2fw:1; - u8 ipv6:1; - u8 hash_type:2; - u8 filter_tid:1; - u8 filter_hit:1; - u8 channel:2; -#endif - __be16 qid; - __be32 hash_val; -}; - -struct work_request_hdr { - __be32 wr_hi; - __be32 wr_mid; - __be64 wr_lo; -}; - -#define WR_HDR struct work_request_hdr wr - -struct cpl_pass_open_req { - WR_HDR; - union opcode_tid ot; - __be16 local_port; - __be16 peer_port; - __be32 local_ip; - __be32 peer_ip; - __be64 opt0; -#define TX_CHAN(x) ((x) << 2) -#define DELACK(x) ((x) << 5) -#define ULP_MODE(x) ((x) << 8) -#define RCV_BUFSIZ(x) ((x) << 12) -#define DSCP(x) ((x) << 22) -#define SMAC_SEL(x) ((u64)(x) << 28) -#define L2T_IDX(x) ((u64)(x) << 36) -#define NAGLE(x) ((u64)(x) << 49) -#define WND_SCALE(x) ((u64)(x) << 50) -#define KEEP_ALIVE(x) ((u64)(x) << 54) -#define MSS_IDX(x) ((u64)(x) << 60) - __be64 opt1; -#define SYN_RSS_ENABLE (1 << 0) -#define SYN_RSS_QUEUE(x) ((x) << 2) -#define CONN_POLICY_ASK (1 << 22) -}; - -struct cpl_pass_open_req6 { - WR_HDR; - union opcode_tid ot; - __be16 local_port; - __be16 peer_port; - __be64 local_ip_hi; - __be64 local_ip_lo; - __be64 peer_ip_hi; - __be64 peer_ip_lo; - __be64 opt0; - __be64 opt1; -}; - -struct cpl_pass_open_rpl { - union opcode_tid ot; - u8 rsvd[3]; - u8 status; -}; - -struct cpl_pass_accept_rpl { - WR_HDR; - union opcode_tid ot; - __be32 opt2; -#define RSS_QUEUE(x) ((x) << 0) -#define RSS_QUEUE_VALID (1 << 10) -#define RX_COALESCE_VALID(x) ((x) << 11) -#define RX_COALESCE(x) ((x) << 12) -#define TX_QUEUE(x) ((x) << 23) -#define RX_CHANNEL(x) ((x) << 26) -#define WND_SCALE_EN(x) ((x) << 28) -#define TSTAMPS_EN(x) ((x) << 29) -#define SACK_EN(x) ((x) << 30) - __be64 opt0; -}; - -struct cpl_act_open_req { - WR_HDR; - union opcode_tid ot; - __be16 local_port; - __be16 peer_port; - __be32 local_ip; - __be32 peer_ip; - __be64 opt0; - __be32 params; - __be32 opt2; -}; - -struct cpl_act_open_req6 { - WR_HDR; - union opcode_tid ot; - __be16 local_port; - __be16 peer_port; - __be64 local_ip_hi; - __be64 local_ip_lo; - __be64 peer_ip_hi; - __be64 peer_ip_lo; - __be64 opt0; - __be32 params; - __be32 opt2; -}; - -struct cpl_act_open_rpl { - union opcode_tid ot; - __be32 atid_status; -#define GET_AOPEN_STATUS(x) ((x) & 0xff) -#define GET_AOPEN_ATID(x) (((x) >> 8) & 0xffffff) -}; - -struct cpl_pass_establish { - union opcode_tid ot; - __be32 rsvd; - __be32 tos_stid; -#define GET_POPEN_TID(x) ((x) & 0xffffff) -#define GET_POPEN_TOS(x) (((x) >> 24) & 0xff) - __be16 mac_idx; - __be16 tcp_opt; -#define GET_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1) -#define GET_TCPOPT_SACK(x) (((x) >> 6) & 1) -#define GET_TCPOPT_TSTAMP(x) (((x) >> 7) & 1) -#define GET_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf) -#define GET_TCPOPT_MSS(x) (((x) >> 12) & 0xf) - __be32 snd_isn; - __be32 rcv_isn; -}; - -struct cpl_act_establish { - union opcode_tid ot; - __be32 rsvd; - __be32 tos_atid; - __be16 mac_idx; - __be16 tcp_opt; - __be32 snd_isn; - __be32 rcv_isn; -}; - -struct cpl_get_tcb { - WR_HDR; - union opcode_tid ot; - __be16 reply_ctrl; -#define QUEUENO(x) ((x) << 0) -#define REPLY_CHAN(x) ((x) << 14) -#define NO_REPLY(x) ((x) << 15) - __be16 cookie; -}; - -struct cpl_set_tcb_field { - WR_HDR; - union opcode_tid ot; - __be16 reply_ctrl; - __be16 word_cookie; -#define TCB_WORD(x) ((x) << 0) -#define TCB_COOKIE(x) ((x) << 5) - __be64 mask; - __be64 val; -}; - -struct cpl_set_tcb_rpl { - union opcode_tid ot; - __be16 rsvd; - u8 cookie; - u8 status; - __be64 oldval; -}; - -struct cpl_close_con_req { - WR_HDR; - union opcode_tid ot; - __be32 rsvd; -}; - -struct cpl_close_con_rpl { - union opcode_tid ot; - u8 rsvd[3]; - u8 status; - __be32 snd_nxt; - __be32 rcv_nxt; -}; - -struct cpl_close_listsvr_req { - WR_HDR; - union opcode_tid ot; - __be16 reply_ctrl; -#define LISTSVR_IPV6 (1 << 14) - __be16 rsvd; -}; - -struct cpl_close_listsvr_rpl { - union opcode_tid ot; - u8 rsvd[3]; - u8 status; -}; - -struct cpl_abort_req_rss { - union opcode_tid ot; - u8 rsvd[3]; - u8 status; -}; - -struct cpl_abort_req { - WR_HDR; - union opcode_tid ot; - __be32 rsvd0; - u8 rsvd1; - u8 cmd; - u8 rsvd2[6]; -}; - -struct cpl_abort_rpl_rss { - union opcode_tid ot; - u8 rsvd[3]; - u8 status; -}; - -struct cpl_abort_rpl { - WR_HDR; - union opcode_tid ot; - __be32 rsvd0; - u8 rsvd1; - u8 cmd; - u8 rsvd2[6]; -}; - -struct cpl_peer_close { - union opcode_tid ot; - __be32 rcv_nxt; -}; - -struct cpl_tid_release { - WR_HDR; - union opcode_tid ot; - __be32 rsvd; -}; - -struct cpl_tx_pkt_core { - __be32 ctrl0; -#define TXPKT_VF(x) ((x) << 0) -#define TXPKT_PF(x) ((x) << 8) -#define TXPKT_VF_VLD (1 << 11) -#define TXPKT_OVLAN_IDX(x) ((x) << 12) -#define TXPKT_INTF(x) ((x) << 16) -#define TXPKT_INS_OVLAN (1 << 21) -#define TXPKT_OPCODE(x) ((x) << 24) - __be16 pack; - __be16 len; - __be64 ctrl1; -#define TXPKT_CSUM_END(x) ((x) << 12) -#define TXPKT_CSUM_START(x) ((x) << 20) -#define TXPKT_IPHDR_LEN(x) ((u64)(x) << 20) -#define TXPKT_CSUM_LOC(x) ((u64)(x) << 30) -#define TXPKT_ETHHDR_LEN(x) ((u64)(x) << 34) -#define TXPKT_CSUM_TYPE(x) ((u64)(x) << 40) -#define TXPKT_VLAN(x) ((u64)(x) << 44) -#define TXPKT_VLAN_VLD (1ULL << 60) -#define TXPKT_IPCSUM_DIS (1ULL << 62) -#define TXPKT_L4CSUM_DIS (1ULL << 63) -}; - -struct cpl_tx_pkt { - WR_HDR; - struct cpl_tx_pkt_core c; -}; - -#define cpl_tx_pkt_xt cpl_tx_pkt - -struct cpl_tx_pkt_lso_core { - __be32 lso_ctrl; -#define LSO_TCPHDR_LEN(x) ((x) << 0) -#define LSO_IPHDR_LEN(x) ((x) << 4) -#define LSO_ETHHDR_LEN(x) ((x) << 16) -#define LSO_IPV6(x) ((x) << 20) -#define LSO_LAST_SLICE (1 << 22) -#define LSO_FIRST_SLICE (1 << 23) -#define LSO_OPCODE(x) ((x) << 24) - __be16 ipid_ofst; - __be16 mss; - __be32 seqno_offset; - __be32 len; - /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */ -}; - -struct cpl_tx_pkt_lso { - WR_HDR; - struct cpl_tx_pkt_lso_core c; - /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */ -}; - -struct cpl_iscsi_hdr { - union opcode_tid ot; - __be16 pdu_len_ddp; -#define ISCSI_PDU_LEN(x) ((x) & 0x7FFF) -#define ISCSI_DDP (1 << 15) - __be16 len; - __be32 seq; - __be16 urg; - u8 rsvd; - u8 status; -}; - -struct cpl_rx_data { - union opcode_tid ot; - __be16 rsvd; - __be16 len; - __be32 seq; - __be16 urg; -#if defined(__LITTLE_ENDIAN_BITFIELD) - u8 dack_mode:2; - u8 psh:1; - u8 heartbeat:1; - u8 ddp_off:1; - u8 :3; -#else - u8 :3; - u8 ddp_off:1; - u8 heartbeat:1; - u8 psh:1; - u8 dack_mode:2; -#endif - u8 status; -}; - -struct cpl_rx_data_ack { - WR_HDR; - union opcode_tid ot; - __be32 credit_dack; -#define RX_CREDITS(x) ((x) << 0) -#define RX_FORCE_ACK(x) ((x) << 28) -}; - -struct cpl_rx_pkt { - struct rss_header rsshdr; - u8 opcode; -#if defined(__LITTLE_ENDIAN_BITFIELD) - u8 iff:4; - u8 csum_calc:1; - u8 ipmi_pkt:1; - u8 vlan_ex:1; - u8 ip_frag:1; -#else - u8 ip_frag:1; - u8 vlan_ex:1; - u8 ipmi_pkt:1; - u8 csum_calc:1; - u8 iff:4; -#endif - __be16 csum; - __be16 vlan; - __be16 len; - __be32 l2info; -#define RXF_UDP (1 << 22) -#define RXF_TCP (1 << 23) -#define RXF_IP (1 << 24) -#define RXF_IP6 (1 << 25) - __be16 hdr_len; - __be16 err_vec; -}; - -struct cpl_trace_pkt { - u8 opcode; - u8 intf; -#if defined(__LITTLE_ENDIAN_BITFIELD) - u8 runt:4; - u8 filter_hit:4; - u8 :6; - u8 err:1; - u8 trunc:1; -#else - u8 filter_hit:4; - u8 runt:4; - u8 trunc:1; - u8 err:1; - u8 :6; -#endif - __be16 rsvd; - __be16 len; - __be64 tstamp; -}; - -struct cpl_l2t_write_req { - WR_HDR; - union opcode_tid ot; - __be16 params; -#define L2T_W_INFO(x) ((x) << 2) -#define L2T_W_PORT(x) ((x) << 8) -#define L2T_W_NOREPLY(x) ((x) << 15) - __be16 l2t_idx; - __be16 vlan; - u8 dst_mac[6]; -}; - -struct cpl_l2t_write_rpl { - union opcode_tid ot; - u8 status; - u8 rsvd[3]; -}; - -struct cpl_rdma_terminate { - union opcode_tid ot; - __be16 rsvd; - __be16 len; -}; - -struct cpl_sge_egr_update { - __be32 opcode_qid; -#define EGR_QID(x) ((x) & 0x1FFFF) - __be16 cidx; - __be16 pidx; -}; - -struct cpl_fw4_pld { - u8 opcode; - u8 rsvd0[3]; - u8 type; - u8 rsvd1; - __be16 len; - __be64 data; - __be64 rsvd2; -}; - -struct cpl_fw6_pld { - u8 opcode; - u8 rsvd[5]; - __be16 len; - __be64 data[4]; -}; - -struct cpl_fw4_msg { - u8 opcode; - u8 type; - __be16 rsvd0; - __be32 rsvd1; - __be64 data[2]; -}; - -struct cpl_fw4_ack { - union opcode_tid ot; - u8 credits; - u8 rsvd0[2]; - u8 seq_vld; - __be32 snd_nxt; - __be32 snd_una; - __be64 rsvd1; -}; - -struct cpl_fw6_msg { - u8 opcode; - u8 type; - __be16 rsvd0; - __be32 rsvd1; - __be64 data[4]; -}; - -/* cpl_fw6_msg.type values */ -enum { - FW6_TYPE_CMD_RPL = 0, -}; - -enum { - ULP_TX_MEM_READ = 2, - ULP_TX_MEM_WRITE = 3, - ULP_TX_PKT = 4 -}; - -enum { - ULP_TX_SC_NOOP = 0x80, - ULP_TX_SC_IMM = 0x81, - ULP_TX_SC_DSGL = 0x82, - ULP_TX_SC_ISGL = 0x83 -}; - -struct ulptx_sge_pair { - __be32 len[2]; - __be64 addr[2]; -}; - -struct ulptx_sgl { - __be32 cmd_nsge; -#define ULPTX_CMD(x) ((x) << 24) -#define ULPTX_NSGE(x) ((x) << 0) - __be32 len0; - __be64 addr0; - struct ulptx_sge_pair sge[0]; -}; - -struct ulp_mem_io { - WR_HDR; - __be32 cmd; -#define ULP_MEMIO_ORDER(x) ((x) << 23) - __be32 len16; /* command length */ - __be32 dlen; /* data length in 32-byte units */ -#define ULP_MEMIO_DATA_LEN(x) ((x) << 0) - __be32 lock_addr; -#define ULP_MEMIO_ADDR(x) ((x) << 0) -#define ULP_MEMIO_LOCK(x) ((x) << 31) -}; - -#endif /* __T4_MSG_H */ diff --git a/drivers/net/cxgb4/t4_regs.h b/drivers/net/cxgb4/t4_regs.h deleted file mode 100644 index 0adc5bc..0000000 --- a/drivers/net/cxgb4/t4_regs.h +++ /dev/null @@ -1,885 +0,0 @@ -/* - * This file is part of the Chelsio T4 Ethernet driver for Linux. - * - * Copyright (c) 2010 Chelsio Communications, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __T4_REGS_H -#define __T4_REGS_H - -#define MYPF_BASE 0x1b000 -#define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr)) - -#define PF0_BASE 0x1e000 -#define PF0_REG(reg_addr) (PF0_BASE + (reg_addr)) - -#define PF_STRIDE 0x400 -#define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE) -#define PF_REG(idx, reg) (PF_BASE(idx) + (reg)) - -#define MYPORT_BASE 0x1c000 -#define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr)) - -#define PORT0_BASE 0x20000 -#define PORT0_REG(reg_addr) (PORT0_BASE + (reg_addr)) - -#define PORT_STRIDE 0x2000 -#define PORT_BASE(idx) (PORT0_BASE + (idx) * PORT_STRIDE) -#define PORT_REG(idx, reg) (PORT_BASE(idx) + (reg)) - -#define EDC_STRIDE (EDC_1_BASE_ADDR - EDC_0_BASE_ADDR) -#define EDC_REG(reg, idx) (reg + EDC_STRIDE * idx) - -#define PCIE_MEM_ACCESS_REG(reg_addr, idx) ((reg_addr) + (idx) * 8) -#define PCIE_MAILBOX_REG(reg_addr, idx) ((reg_addr) + (idx) * 8) -#define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) -#define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) - -#define SGE_PF_KDOORBELL 0x0 -#define QID_MASK 0xffff8000U -#define QID_SHIFT 15 -#define QID(x) ((x) << QID_SHIFT) -#define DBPRIO 0x00004000U -#define PIDX_MASK 0x00003fffU -#define PIDX_SHIFT 0 -#define PIDX(x) ((x) << PIDX_SHIFT) - -#define SGE_PF_GTS 0x4 -#define INGRESSQID_MASK 0xffff0000U -#define INGRESSQID_SHIFT 16 -#define INGRESSQID(x) ((x) << INGRESSQID_SHIFT) -#define TIMERREG_MASK 0x0000e000U -#define TIMERREG_SHIFT 13 -#define TIMERREG(x) ((x) << TIMERREG_SHIFT) -#define SEINTARM_MASK 0x00001000U -#define SEINTARM_SHIFT 12 -#define SEINTARM(x) ((x) << SEINTARM_SHIFT) -#define CIDXINC_MASK 0x00000fffU -#define CIDXINC_SHIFT 0 -#define CIDXINC(x) ((x) << CIDXINC_SHIFT) - -#define SGE_CONTROL 0x1008 -#define DCASYSTYPE 0x00080000U -#define RXPKTCPLMODE 0x00040000U -#define EGRSTATUSPAGESIZE 0x00020000U -#define PKTSHIFT_MASK 0x00001c00U -#define PKTSHIFT_SHIFT 10 -#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT) -#define PKTSHIFT_GET(x) (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT) -#define INGPCIEBOUNDARY_MASK 0x00000380U -#define INGPCIEBOUNDARY_SHIFT 7 -#define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT) -#define INGPADBOUNDARY_MASK 0x00000070U -#define INGPADBOUNDARY_SHIFT 4 -#define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT) -#define INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \ - >> INGPADBOUNDARY_SHIFT) -#define EGRPCIEBOUNDARY_MASK 0x0000000eU -#define EGRPCIEBOUNDARY_SHIFT 1 -#define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT) -#define GLOBALENABLE 0x00000001U - -#define SGE_HOST_PAGE_SIZE 0x100c -#define HOSTPAGESIZEPF0_MASK 0x0000000fU -#define HOSTPAGESIZEPF0_SHIFT 0 -#define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_SHIFT) - -#define SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010 -#define QUEUESPERPAGEPF0_MASK 0x0000000fU -#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK) - -#define SGE_INT_CAUSE1 0x1024 -#define SGE_INT_CAUSE2 0x1030 -#define SGE_INT_CAUSE3 0x103c -#define ERR_FLM_DBP 0x80000000U -#define ERR_FLM_IDMA1 0x40000000U -#define ERR_FLM_IDMA0 0x20000000U -#define ERR_FLM_HINT 0x10000000U -#define ERR_PCIE_ERROR3 0x08000000U -#define ERR_PCIE_ERROR2 0x04000000U -#define ERR_PCIE_ERROR1 0x02000000U -#define ERR_PCIE_ERROR0 0x01000000U -#define ERR_TIMER_ABOVE_MAX_QID 0x00800000U -#define ERR_CPL_EXCEED_IQE_SIZE 0x00400000U -#define ERR_INVALID_CIDX_INC 0x00200000U -#define ERR_ITP_TIME_PAUSED 0x00100000U -#define ERR_CPL_OPCODE_0 0x00080000U -#define ERR_DROPPED_DB 0x00040000U -#define ERR_DATA_CPL_ON_HIGH_QID1 0x00020000U -#define ERR_DATA_CPL_ON_HIGH_QID0 0x00010000U -#define ERR_BAD_DB_PIDX3 0x00008000U -#define ERR_BAD_DB_PIDX2 0x00004000U -#define ERR_BAD_DB_PIDX1 0x00002000U -#define ERR_BAD_DB_PIDX0 0x00001000U -#define ERR_ING_PCIE_CHAN 0x00000800U -#define ERR_ING_CTXT_PRIO 0x00000400U -#define ERR_EGR_CTXT_PRIO 0x00000200U -#define DBFIFO_HP_INT 0x00000100U -#define DBFIFO_LP_INT 0x00000080U -#define REG_ADDRESS_ERR 0x00000040U -#define INGRESS_SIZE_ERR 0x00000020U -#define EGRESS_SIZE_ERR 0x00000010U -#define ERR_INV_CTXT3 0x00000008U -#define ERR_INV_CTXT2 0x00000004U -#define ERR_INV_CTXT1 0x00000002U -#define ERR_INV_CTXT0 0x00000001U - -#define SGE_INT_ENABLE3 0x1040 -#define SGE_FL_BUFFER_SIZE0 0x1044 -#define SGE_FL_BUFFER_SIZE1 0x1048 -#define SGE_INGRESS_RX_THRESHOLD 0x10a0 -#define THRESHOLD_0_MASK 0x3f000000U -#define THRESHOLD_0_SHIFT 24 -#define THRESHOLD_0(x) ((x) << THRESHOLD_0_SHIFT) -#define THRESHOLD_0_GET(x) (((x) & THRESHOLD_0_MASK) >> THRESHOLD_0_SHIFT) -#define THRESHOLD_1_MASK 0x003f0000U -#define THRESHOLD_1_SHIFT 16 -#define THRESHOLD_1(x) ((x) << THRESHOLD_1_SHIFT) -#define THRESHOLD_1_GET(x) (((x) & THRESHOLD_1_MASK) >> THRESHOLD_1_SHIFT) -#define THRESHOLD_2_MASK 0x00003f00U -#define THRESHOLD_2_SHIFT 8 -#define THRESHOLD_2(x) ((x) << THRESHOLD_2_SHIFT) -#define THRESHOLD_2_GET(x) (((x) & THRESHOLD_2_MASK) >> THRESHOLD_2_SHIFT) -#define THRESHOLD_3_MASK 0x0000003fU -#define THRESHOLD_3_SHIFT 0 -#define THRESHOLD_3(x) ((x) << THRESHOLD_3_SHIFT) -#define THRESHOLD_3_GET(x) (((x) & THRESHOLD_3_MASK) >> THRESHOLD_3_SHIFT) - -#define SGE_TIMER_VALUE_0_AND_1 0x10b8 -#define TIMERVALUE0_MASK 0xffff0000U -#define TIMERVALUE0_SHIFT 16 -#define TIMERVALUE0(x) ((x) << TIMERVALUE0_SHIFT) -#define TIMERVALUE0_GET(x) (((x) & TIMERVALUE0_MASK) >> TIMERVALUE0_SHIFT) -#define TIMERVALUE1_MASK 0x0000ffffU -#define TIMERVALUE1_SHIFT 0 -#define TIMERVALUE1(x) ((x) << TIMERVALUE1_SHIFT) -#define TIMERVALUE1_GET(x) (((x) & TIMERVALUE1_MASK) >> TIMERVALUE1_SHIFT) - -#define SGE_TIMER_VALUE_2_AND_3 0x10bc -#define SGE_TIMER_VALUE_4_AND_5 0x10c0 -#define SGE_DEBUG_INDEX 0x10cc -#define SGE_DEBUG_DATA_HIGH 0x10d0 -#define SGE_DEBUG_DATA_LOW 0x10d4 -#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4 - -#define PCIE_PF_CLI 0x44 -#define PCIE_INT_CAUSE 0x3004 -#define UNXSPLCPLERR 0x20000000U -#define PCIEPINT 0x10000000U -#define PCIESINT 0x08000000U -#define RPLPERR 0x04000000U -#define RXWRPERR 0x02000000U -#define RXCPLPERR 0x01000000U -#define PIOTAGPERR 0x00800000U -#define MATAGPERR 0x00400000U -#define INTXCLRPERR 0x00200000U -#define FIDPERR 0x00100000U -#define CFGSNPPERR 0x00080000U -#define HRSPPERR 0x00040000U -#define HREQPERR 0x00020000U -#define HCNTPERR 0x00010000U -#define DRSPPERR 0x00008000U -#define DREQPERR 0x00004000U -#define DCNTPERR 0x00002000U -#define CRSPPERR 0x00001000U -#define CREQPERR 0x00000800U -#define CCNTPERR 0x00000400U -#define TARTAGPERR 0x00000200U -#define PIOREQPERR 0x00000100U -#define PIOCPLPERR 0x00000080U -#define MSIXDIPERR 0x00000040U -#define MSIXDATAPERR 0x00000020U -#define MSIXADDRHPERR 0x00000010U -#define MSIXADDRLPERR 0x00000008U -#define MSIDATAPERR 0x00000004U -#define MSIADDRHPERR 0x00000002U -#define MSIADDRLPERR 0x00000001U - -#define PCIE_NONFAT_ERR 0x3010 -#define PCIE_MEM_ACCESS_BASE_WIN 0x3068 -#define PCIEOFST_MASK 0xfffffc00U -#define BIR_MASK 0x00000300U -#define BIR_SHIFT 8 -#define BIR(x) ((x) << BIR_SHIFT) -#define WINDOW_MASK 0x000000ffU -#define WINDOW_SHIFT 0 -#define WINDOW(x) ((x) << WINDOW_SHIFT) -#define PCIE_MEM_ACCESS_OFFSET 0x306c - -#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908 -#define RNPP 0x80000000U -#define RPCP 0x20000000U -#define RCIP 0x08000000U -#define RCCP 0x04000000U -#define RFTP 0x00800000U -#define PTRP 0x00100000U - -#define PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS 0x59a4 -#define TPCP 0x40000000U -#define TNPP 0x20000000U -#define TFTP 0x10000000U -#define TCAP 0x08000000U -#define TCIP 0x04000000U -#define RCAP 0x02000000U -#define PLUP 0x00800000U -#define PLDN 0x00400000U -#define OTDD 0x00200000U -#define GTRP 0x00100000U -#define RDPE 0x00040000U -#define TDCE 0x00020000U -#define TDUE 0x00010000U - -#define MC_INT_CAUSE 0x7518 -#define ECC_UE_INT_CAUSE 0x00000004U -#define ECC_CE_INT_CAUSE 0x00000002U -#define PERR_INT_CAUSE 0x00000001U - -#define MC_ECC_STATUS 0x751c -#define ECC_CECNT_MASK 0xffff0000U -#define ECC_CECNT_SHIFT 16 -#define ECC_CECNT(x) ((x) << ECC_CECNT_SHIFT) -#define ECC_CECNT_GET(x) (((x) & ECC_CECNT_MASK) >> ECC_CECNT_SHIFT) -#define ECC_UECNT_MASK 0x0000ffffU -#define ECC_UECNT_SHIFT 0 -#define ECC_UECNT(x) ((x) << ECC_UECNT_SHIFT) -#define ECC_UECNT_GET(x) (((x) & ECC_UECNT_MASK) >> ECC_UECNT_SHIFT) - -#define MC_BIST_CMD 0x7600 -#define START_BIST 0x80000000U -#define BIST_CMD_GAP_MASK 0x0000ff00U -#define BIST_CMD_GAP_SHIFT 8 -#define BIST_CMD_GAP(x) ((x) << BIST_CMD_GAP_SHIFT) -#define BIST_OPCODE_MASK 0x00000003U -#define BIST_OPCODE_SHIFT 0 -#define BIST_OPCODE(x) ((x) << BIST_OPCODE_SHIFT) - -#define MC_BIST_CMD_ADDR 0x7604 -#define MC_BIST_CMD_LEN 0x7608 -#define MC_BIST_DATA_PATTERN 0x760c -#define BIST_DATA_TYPE_MASK 0x0000000fU -#define BIST_DATA_TYPE_SHIFT 0 -#define BIST_DATA_TYPE(x) ((x) << BIST_DATA_TYPE_SHIFT) - -#define MC_BIST_STATUS_RDATA 0x7688 - -#define MA_EXT_MEMORY_BAR 0x77c8 -#define EXT_MEM_SIZE_MASK 0x00000fffU -#define EXT_MEM_SIZE_SHIFT 0 -#define EXT_MEM_SIZE_GET(x) (((x) & EXT_MEM_SIZE_MASK) >> EXT_MEM_SIZE_SHIFT) - -#define MA_TARGET_MEM_ENABLE 0x77d8 -#define EXT_MEM_ENABLE 0x00000004U -#define EDRAM1_ENABLE 0x00000002U -#define EDRAM0_ENABLE 0x00000001U - -#define MA_INT_CAUSE 0x77e0 -#define MEM_PERR_INT_CAUSE 0x00000002U -#define MEM_WRAP_INT_CAUSE 0x00000001U - -#define MA_INT_WRAP_STATUS 0x77e4 -#define MEM_WRAP_ADDRESS_MASK 0xfffffff0U -#define MEM_WRAP_ADDRESS_SHIFT 4 -#define MEM_WRAP_ADDRESS_GET(x) (((x) & MEM_WRAP_ADDRESS_MASK) >> MEM_WRAP_ADDRESS_SHIFT) -#define MEM_WRAP_CLIENT_NUM_MASK 0x0000000fU -#define MEM_WRAP_CLIENT_NUM_SHIFT 0 -#define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT) - -#define MA_PARITY_ERROR_STATUS 0x77f4 - -#define EDC_0_BASE_ADDR 0x7900 - -#define EDC_BIST_CMD 0x7904 -#define EDC_BIST_CMD_ADDR 0x7908 -#define EDC_BIST_CMD_LEN 0x790c -#define EDC_BIST_DATA_PATTERN 0x7910 -#define EDC_BIST_STATUS_RDATA 0x7928 -#define EDC_INT_CAUSE 0x7978 -#define ECC_UE_PAR 0x00000020U -#define ECC_CE_PAR 0x00000010U -#define PERR_PAR_CAUSE 0x00000008U - -#define EDC_ECC_STATUS 0x797c - -#define EDC_1_BASE_ADDR 0x7980 - -#define CIM_BOOT_CFG 0x7b00 -#define BOOTADDR_MASK 0xffffff00U - -#define CIM_PF_MAILBOX_DATA 0x240 -#define CIM_PF_MAILBOX_CTRL 0x280 -#define MBMSGVALID 0x00000008U -#define MBINTREQ 0x00000004U -#define MBOWNER_MASK 0x00000003U -#define MBOWNER_SHIFT 0 -#define MBOWNER(x) ((x) << MBOWNER_SHIFT) -#define MBOWNER_GET(x) (((x) & MBOWNER_MASK) >> MBOWNER_SHIFT) - -#define CIM_PF_HOST_INT_CAUSE 0x28c -#define MBMSGRDYINT 0x00080000U - -#define CIM_HOST_INT_CAUSE 0x7b2c -#define TIEQOUTPARERRINT 0x00100000U -#define TIEQINPARERRINT 0x00080000U -#define MBHOSTPARERR 0x00040000U -#define MBUPPARERR 0x00020000U -#define IBQPARERR 0x0001f800U -#define IBQTP0PARERR 0x00010000U -#define IBQTP1PARERR 0x00008000U -#define IBQULPPARERR 0x00004000U -#define IBQSGELOPARERR 0x00002000U -#define IBQSGEHIPARERR 0x00001000U -#define IBQNCSIPARERR 0x00000800U -#define OBQPARERR 0x000007e0U -#define OBQULP0PARERR 0x00000400U -#define OBQULP1PARERR 0x00000200U -#define OBQULP2PARERR 0x00000100U -#define OBQULP3PARERR 0x00000080U -#define OBQSGEPARERR 0x00000040U -#define OBQNCSIPARERR 0x00000020U -#define PREFDROPINT 0x00000002U -#define UPACCNONZERO 0x00000001U - -#define CIM_HOST_UPACC_INT_CAUSE 0x7b34 -#define EEPROMWRINT 0x40000000U -#define TIMEOUTMAINT 0x20000000U -#define TIMEOUTINT 0x10000000U -#define RSPOVRLOOKUPINT 0x08000000U -#define REQOVRLOOKUPINT 0x04000000U -#define BLKWRPLINT 0x02000000U -#define BLKRDPLINT 0x01000000U -#define SGLWRPLINT 0x00800000U -#define SGLRDPLINT 0x00400000U -#define BLKWRCTLINT 0x00200000U -#define BLKRDCTLINT 0x00100000U -#define SGLWRCTLINT 0x00080000U -#define SGLRDCTLINT 0x00040000U -#define BLKWREEPROMINT 0x00020000U -#define BLKRDEEPROMINT 0x00010000U -#define SGLWREEPROMINT 0x00008000U -#define SGLRDEEPROMINT 0x00004000U -#define BLKWRFLASHINT 0x00002000U -#define BLKRDFLASHINT 0x00001000U -#define SGLWRFLASHINT 0x00000800U -#define SGLRDFLASHINT 0x00000400U -#define BLKWRBOOTINT 0x00000200U -#define BLKRDBOOTINT 0x00000100U -#define SGLWRBOOTINT 0x00000080U -#define SGLRDBOOTINT 0x00000040U -#define ILLWRBEINT 0x00000020U -#define ILLRDBEINT 0x00000010U -#define ILLRDINT 0x00000008U -#define ILLWRINT 0x00000004U -#define ILLTRANSINT 0x00000002U -#define RSVDSPACEINT 0x00000001U - -#define TP_OUT_CONFIG 0x7d04 -#define VLANEXTENABLE_MASK 0x0000f000U -#define VLANEXTENABLE_SHIFT 12 - -#define TP_PARA_REG2 0x7d68 -#define MAXRXDATA_MASK 0xffff0000U -#define MAXRXDATA_SHIFT 16 -#define MAXRXDATA_GET(x) (((x) & MAXRXDATA_MASK) >> MAXRXDATA_SHIFT) - -#define TP_TIMER_RESOLUTION 0x7d90 -#define TIMERRESOLUTION_MASK 0x00ff0000U -#define TIMERRESOLUTION_SHIFT 16 -#define TIMERRESOLUTION_GET(x) (((x) & TIMERRESOLUTION_MASK) >> TIMERRESOLUTION_SHIFT) - -#define TP_SHIFT_CNT 0x7dc0 - -#define TP_CCTRL_TABLE 0x7ddc -#define TP_MTU_TABLE 0x7de4 -#define MTUINDEX_MASK 0xff000000U -#define MTUINDEX_SHIFT 24 -#define MTUINDEX(x) ((x) << MTUINDEX_SHIFT) -#define MTUWIDTH_MASK 0x000f0000U -#define MTUWIDTH_SHIFT 16 -#define MTUWIDTH(x) ((x) << MTUWIDTH_SHIFT) -#define MTUWIDTH_GET(x) (((x) & MTUWIDTH_MASK) >> MTUWIDTH_SHIFT) -#define MTUVALUE_MASK 0x00003fffU -#define MTUVALUE_SHIFT 0 -#define MTUVALUE(x) ((x) << MTUVALUE_SHIFT) -#define MTUVALUE_GET(x) (((x) & MTUVALUE_MASK) >> MTUVALUE_SHIFT) - -#define TP_RSS_LKP_TABLE 0x7dec -#define LKPTBLROWVLD 0x80000000U -#define LKPTBLQUEUE1_MASK 0x000ffc00U -#define LKPTBLQUEUE1_SHIFT 10 -#define LKPTBLQUEUE1(x) ((x) << LKPTBLQUEUE1_SHIFT) -#define LKPTBLQUEUE1_GET(x) (((x) & LKPTBLQUEUE1_MASK) >> LKPTBLQUEUE1_SHIFT) -#define LKPTBLQUEUE0_MASK 0x000003ffU -#define LKPTBLQUEUE0_SHIFT 0 -#define LKPTBLQUEUE0(x) ((x) << LKPTBLQUEUE0_SHIFT) -#define LKPTBLQUEUE0_GET(x) (((x) & LKPTBLQUEUE0_MASK) >> LKPTBLQUEUE0_SHIFT) - -#define TP_PIO_ADDR 0x7e40 -#define TP_PIO_DATA 0x7e44 -#define TP_MIB_INDEX 0x7e50 -#define TP_MIB_DATA 0x7e54 -#define TP_INT_CAUSE 0x7e74 -#define FLMTXFLSTEMPTY 0x40000000U - -#define TP_INGRESS_CONFIG 0x141 -#define VNIC 0x00000800U -#define CSUM_HAS_PSEUDO_HDR 0x00000400U -#define RM_OVLAN 0x00000200U -#define LOOKUPEVERYPKT 0x00000100U - -#define TP_MIB_MAC_IN_ERR_0 0x0 -#define TP_MIB_TCP_OUT_RST 0xc -#define TP_MIB_TCP_IN_SEG_HI 0x10 -#define TP_MIB_TCP_IN_SEG_LO 0x11 -#define TP_MIB_TCP_OUT_SEG_HI 0x12 -#define TP_MIB_TCP_OUT_SEG_LO 0x13 -#define TP_MIB_TCP_RXT_SEG_HI 0x14 -#define TP_MIB_TCP_RXT_SEG_LO 0x15 -#define TP_MIB_TNL_CNG_DROP_0 0x18 -#define TP_MIB_TCP_V6IN_ERR_0 0x28 -#define TP_MIB_TCP_V6OUT_RST 0x2c -#define TP_MIB_OFD_ARP_DROP 0x36 -#define TP_MIB_TNL_DROP_0 0x44 -#define TP_MIB_OFD_VLN_DROP_0 0x58 - -#define ULP_TX_INT_CAUSE 0x8dcc -#define PBL_BOUND_ERR_CH3 0x80000000U -#define PBL_BOUND_ERR_CH2 0x40000000U -#define PBL_BOUND_ERR_CH1 0x20000000U -#define PBL_BOUND_ERR_CH0 0x10000000U - -#define PM_RX_INT_CAUSE 0x8fdc -#define ZERO_E_CMD_ERROR 0x00400000U -#define PMRX_FRAMING_ERROR 0x003ffff0U -#define OCSPI_PAR_ERROR 0x00000008U -#define DB_OPTIONS_PAR_ERROR 0x00000004U -#define IESPI_PAR_ERROR 0x00000002U -#define E_PCMD_PAR_ERROR 0x00000001U - -#define PM_TX_INT_CAUSE 0x8ffc -#define PCMD_LEN_OVFL0 0x80000000U -#define PCMD_LEN_OVFL1 0x40000000U -#define PCMD_LEN_OVFL2 0x20000000U -#define ZERO_C_CMD_ERROR 0x10000000U -#define PMTX_FRAMING_ERROR 0x0ffffff0U -#define OESPI_PAR_ERROR 0x00000008U -#define ICSPI_PAR_ERROR 0x00000002U -#define C_PCMD_PAR_ERROR 0x00000001U - -#define MPS_PORT_STAT_TX_PORT_BYTES_L 0x400 -#define MPS_PORT_STAT_TX_PORT_BYTES_H 0x404 -#define MPS_PORT_STAT_TX_PORT_FRAMES_L 0x408 -#define MPS_PORT_STAT_TX_PORT_FRAMES_H 0x40c -#define MPS_PORT_STAT_TX_PORT_BCAST_L 0x410 -#define MPS_PORT_STAT_TX_PORT_BCAST_H 0x414 -#define MPS_PORT_STAT_TX_PORT_MCAST_L 0x418 -#define MPS_PORT_STAT_TX_PORT_MCAST_H 0x41c -#define MPS_PORT_STAT_TX_PORT_UCAST_L 0x420 -#define MPS_PORT_STAT_TX_PORT_UCAST_H 0x424 -#define MPS_PORT_STAT_TX_PORT_ERROR_L 0x428 -#define MPS_PORT_STAT_TX_PORT_ERROR_H 0x42c -#define MPS_PORT_STAT_TX_PORT_64B_L 0x430 -#define MPS_PORT_STAT_TX_PORT_64B_H 0x434 -#define MPS_PORT_STAT_TX_PORT_65B_127B_L 0x438 -#define MPS_PORT_STAT_TX_PORT_65B_127B_H 0x43c -#define MPS_PORT_STAT_TX_PORT_128B_255B_L 0x440 -#define MPS_PORT_STAT_TX_PORT_128B_255B_H 0x444 -#define MPS_PORT_STAT_TX_PORT_256B_511B_L 0x448 -#define MPS_PORT_STAT_TX_PORT_256B_511B_H 0x44c -#define MPS_PORT_STAT_TX_PORT_512B_1023B_L 0x450 -#define MPS_PORT_STAT_TX_PORT_512B_1023B_H 0x454 -#define MPS_PORT_STAT_TX_PORT_1024B_1518B_L 0x458 -#define MPS_PORT_STAT_TX_PORT_1024B_1518B_H 0x45c -#define MPS_PORT_STAT_TX_PORT_1519B_MAX_L 0x460 -#define MPS_PORT_STAT_TX_PORT_1519B_MAX_H 0x464 -#define MPS_PORT_STAT_TX_PORT_DROP_L 0x468 -#define MPS_PORT_STAT_TX_PORT_DROP_H 0x46c -#define MPS_PORT_STAT_TX_PORT_PAUSE_L 0x470 -#define MPS_PORT_STAT_TX_PORT_PAUSE_H 0x474 -#define MPS_PORT_STAT_TX_PORT_PPP0_L 0x478 -#define MPS_PORT_STAT_TX_PORT_PPP0_H 0x47c -#define MPS_PORT_STAT_TX_PORT_PPP1_L 0x480 -#define MPS_PORT_STAT_TX_PORT_PPP1_H 0x484 -#define MPS_PORT_STAT_TX_PORT_PPP2_L 0x488 -#define MPS_PORT_STAT_TX_PORT_PPP2_H 0x48c -#define MPS_PORT_STAT_TX_PORT_PPP3_L 0x490 -#define MPS_PORT_STAT_TX_PORT_PPP3_H 0x494 -#define MPS_PORT_STAT_TX_PORT_PPP4_L 0x498 -#define MPS_PORT_STAT_TX_PORT_PPP4_H 0x49c -#define MPS_PORT_STAT_TX_PORT_PPP5_L 0x4a0 -#define MPS_PORT_STAT_TX_PORT_PPP5_H 0x4a4 -#define MPS_PORT_STAT_TX_PORT_PPP6_L 0x4a8 -#define MPS_PORT_STAT_TX_PORT_PPP6_H 0x4ac -#define MPS_PORT_STAT_TX_PORT_PPP7_L 0x4b0 -#define MPS_PORT_STAT_TX_PORT_PPP7_H 0x4b4 -#define MPS_PORT_STAT_LB_PORT_BYTES_L 0x4c0 -#define MPS_PORT_STAT_LB_PORT_BYTES_H 0x4c4 -#define MPS_PORT_STAT_LB_PORT_FRAMES_L 0x4c8 -#define MPS_PORT_STAT_LB_PORT_FRAMES_H 0x4cc -#define MPS_PORT_STAT_LB_PORT_BCAST_L 0x4d0 -#define MPS_PORT_STAT_LB_PORT_BCAST_H 0x4d4 -#define MPS_PORT_STAT_LB_PORT_MCAST_L 0x4d8 -#define MPS_PORT_STAT_LB_PORT_MCAST_H 0x4dc -#define MPS_PORT_STAT_LB_PORT_UCAST_L 0x4e0 -#define MPS_PORT_STAT_LB_PORT_UCAST_H 0x4e4 -#define MPS_PORT_STAT_LB_PORT_ERROR_L 0x4e8 -#define MPS_PORT_STAT_LB_PORT_ERROR_H 0x4ec -#define MPS_PORT_STAT_LB_PORT_64B_L 0x4f0 -#define MPS_PORT_STAT_LB_PORT_64B_H 0x4f4 -#define MPS_PORT_STAT_LB_PORT_65B_127B_L 0x4f8 -#define MPS_PORT_STAT_LB_PORT_65B_127B_H 0x4fc -#define MPS_PORT_STAT_LB_PORT_128B_255B_L 0x500 -#define MPS_PORT_STAT_LB_PORT_128B_255B_H 0x504 -#define MPS_PORT_STAT_LB_PORT_256B_511B_L 0x508 -#define MPS_PORT_STAT_LB_PORT_256B_511B_H 0x50c -#define MPS_PORT_STAT_LB_PORT_512B_1023B_L 0x510 -#define MPS_PORT_STAT_LB_PORT_512B_1023B_H 0x514 -#define MPS_PORT_STAT_LB_PORT_1024B_1518B_L 0x518 -#define MPS_PORT_STAT_LB_PORT_1024B_1518B_H 0x51c -#define MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520 -#define MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524 -#define MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528 -#define MPS_PORT_STAT_RX_PORT_BYTES_L 0x540 -#define MPS_PORT_STAT_RX_PORT_BYTES_H 0x544 -#define MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548 -#define MPS_PORT_STAT_RX_PORT_FRAMES_H 0x54c -#define MPS_PORT_STAT_RX_PORT_BCAST_L 0x550 -#define MPS_PORT_STAT_RX_PORT_BCAST_H 0x554 -#define MPS_PORT_STAT_RX_PORT_MCAST_L 0x558 -#define MPS_PORT_STAT_RX_PORT_MCAST_H 0x55c -#define MPS_PORT_STAT_RX_PORT_UCAST_L 0x560 -#define MPS_PORT_STAT_RX_PORT_UCAST_H 0x564 -#define MPS_PORT_STAT_RX_PORT_MTU_ERROR_L 0x568 -#define MPS_PORT_STAT_RX_PORT_MTU_ERROR_H 0x56c -#define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L 0x570 -#define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_H 0x574 -#define MPS_PORT_STAT_RX_PORT_CRC_ERROR_L 0x578 -#define MPS_PORT_STAT_RX_PORT_CRC_ERROR_H 0x57c -#define MPS_PORT_STAT_RX_PORT_LEN_ERROR_L 0x580 -#define MPS_PORT_STAT_RX_PORT_LEN_ERROR_H 0x584 -#define MPS_PORT_STAT_RX_PORT_SYM_ERROR_L 0x588 -#define MPS_PORT_STAT_RX_PORT_SYM_ERROR_H 0x58c -#define MPS_PORT_STAT_RX_PORT_64B_L 0x590 -#define MPS_PORT_STAT_RX_PORT_64B_H 0x594 -#define MPS_PORT_STAT_RX_PORT_65B_127B_L 0x598 -#define MPS_PORT_STAT_RX_PORT_65B_127B_H 0x59c -#define MPS_PORT_STAT_RX_PORT_128B_255B_L 0x5a0 -#define MPS_PORT_STAT_RX_PORT_128B_255B_H 0x5a4 -#define MPS_PORT_STAT_RX_PORT_256B_511B_L 0x5a8 -#define MPS_PORT_STAT_RX_PORT_256B_511B_H 0x5ac -#define MPS_PORT_STAT_RX_PORT_512B_1023B_L 0x5b0 -#define MPS_PORT_STAT_RX_PORT_512B_1023B_H 0x5b4 -#define MPS_PORT_STAT_RX_PORT_1024B_1518B_L 0x5b8 -#define MPS_PORT_STAT_RX_PORT_1024B_1518B_H 0x5bc -#define MPS_PORT_STAT_RX_PORT_1519B_MAX_L 0x5c0 -#define MPS_PORT_STAT_RX_PORT_1519B_MAX_H 0x5c4 -#define MPS_PORT_STAT_RX_PORT_PAUSE_L 0x5c8 -#define MPS_PORT_STAT_RX_PORT_PAUSE_H 0x5cc -#define MPS_PORT_STAT_RX_PORT_PPP0_L 0x5d0 -#define MPS_PORT_STAT_RX_PORT_PPP0_H 0x5d4 -#define MPS_PORT_STAT_RX_PORT_PPP1_L 0x5d8 -#define MPS_PORT_STAT_RX_PORT_PPP1_H 0x5dc -#define MPS_PORT_STAT_RX_PORT_PPP2_L 0x5e0 -#define MPS_PORT_STAT_RX_PORT_PPP2_H 0x5e4 -#define MPS_PORT_STAT_RX_PORT_PPP3_L 0x5e8 -#define MPS_PORT_STAT_RX_PORT_PPP3_H 0x5ec -#define MPS_PORT_STAT_RX_PORT_PPP4_L 0x5f0 -#define MPS_PORT_STAT_RX_PORT_PPP4_H 0x5f4 -#define MPS_PORT_STAT_RX_PORT_PPP5_L 0x5f8 -#define MPS_PORT_STAT_RX_PORT_PPP5_H 0x5fc -#define MPS_PORT_STAT_RX_PORT_PPP6_L 0x600 -#define MPS_PORT_STAT_RX_PORT_PPP6_H 0x604 -#define MPS_PORT_STAT_RX_PORT_PPP7_L 0x608 -#define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c -#define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610 -#define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614 -#define MPS_CMN_CTL 0x9000 -#define NUMPORTS_MASK 0x00000003U -#define NUMPORTS_SHIFT 0 -#define NUMPORTS_GET(x) (((x) & NUMPORTS_MASK) >> NUMPORTS_SHIFT) - -#define MPS_INT_CAUSE 0x9008 -#define STATINT 0x00000020U -#define TXINT 0x00000010U -#define RXINT 0x00000008U -#define TRCINT 0x00000004U -#define CLSINT 0x00000002U -#define PLINT 0x00000001U - -#define MPS_TX_INT_CAUSE 0x9408 -#define PORTERR 0x00010000U -#define FRMERR 0x00008000U -#define SECNTERR 0x00004000U -#define BUBBLE 0x00002000U -#define TXDESCFIFO 0x00001e00U -#define TXDATAFIFO 0x000001e0U -#define NCSIFIFO 0x00000010U -#define TPFIFO 0x0000000fU - -#define MPS_STAT_PERR_INT_CAUSE_SRAM 0x9614 -#define MPS_STAT_PERR_INT_CAUSE_TX_FIFO 0x9620 -#define MPS_STAT_PERR_INT_CAUSE_RX_FIFO 0x962c - -#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640 -#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644 -#define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_L 0x9648 -#define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_H 0x964c -#define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_L 0x9650 -#define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_H 0x9654 -#define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_L 0x9658 -#define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_H 0x965c -#define MPS_STAT_RX_BG_0_LB_DROP_FRAME_L 0x9660 -#define MPS_STAT_RX_BG_0_LB_DROP_FRAME_H 0x9664 -#define MPS_STAT_RX_BG_1_LB_DROP_FRAME_L 0x9668 -#define MPS_STAT_RX_BG_1_LB_DROP_FRAME_H 0x966c -#define MPS_STAT_RX_BG_2_LB_DROP_FRAME_L 0x9670 -#define MPS_STAT_RX_BG_2_LB_DROP_FRAME_H 0x9674 -#define MPS_STAT_RX_BG_3_LB_DROP_FRAME_L 0x9678 -#define MPS_STAT_RX_BG_3_LB_DROP_FRAME_H 0x967c -#define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L 0x9680 -#define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_H 0x9684 -#define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_L 0x9688 -#define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_H 0x968c -#define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_L 0x9690 -#define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_H 0x9694 -#define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_L 0x9698 -#define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_H 0x969c -#define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L 0x96a0 -#define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_H 0x96a4 -#define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_L 0x96a8 -#define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_H 0x96ac -#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_L 0x96b0 -#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4 -#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8 -#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc -#define MPS_TRC_CFG 0x9800 -#define TRCFIFOEMPTY 0x00000010U -#define TRCIGNOREDROPINPUT 0x00000008U -#define TRCKEEPDUPLICATES 0x00000004U -#define TRCEN 0x00000002U -#define TRCMULTIFILTER 0x00000001U - -#define MPS_TRC_RSS_CONTROL 0x9808 -#define RSSCONTROL_MASK 0x00ff0000U -#define RSSCONTROL_SHIFT 16 -#define RSSCONTROL(x) ((x) << RSSCONTROL_SHIFT) -#define QUEUENUMBER_MASK 0x0000ffffU -#define QUEUENUMBER_SHIFT 0 -#define QUEUENUMBER(x) ((x) << QUEUENUMBER_SHIFT) - -#define MPS_TRC_FILTER_MATCH_CTL_A 0x9810 -#define TFINVERTMATCH 0x01000000U -#define TFPKTTOOLARGE 0x00800000U -#define TFEN 0x00400000U -#define TFPORT_MASK 0x003c0000U -#define TFPORT_SHIFT 18 -#define TFPORT(x) ((x) << TFPORT_SHIFT) -#define TFPORT_GET(x) (((x) & TFPORT_MASK) >> TFPORT_SHIFT) -#define TFDROP 0x00020000U -#define TFSOPEOPERR 0x00010000U -#define TFLENGTH_MASK 0x00001f00U -#define TFLENGTH_SHIFT 8 -#define TFLENGTH(x) ((x) << TFLENGTH_SHIFT) -#define TFLENGTH_GET(x) (((x) & TFLENGTH_MASK) >> TFLENGTH_SHIFT) -#define TFOFFSET_MASK 0x0000001fU -#define TFOFFSET_SHIFT 0 -#define TFOFFSET(x) ((x) << TFOFFSET_SHIFT) -#define TFOFFSET_GET(x) (((x) & TFOFFSET_MASK) >> TFOFFSET_SHIFT) - -#define MPS_TRC_FILTER_MATCH_CTL_B 0x9820 -#define TFMINPKTSIZE_MASK 0x01ff0000U -#define TFMINPKTSIZE_SHIFT 16 -#define TFMINPKTSIZE(x) ((x) << TFMINPKTSIZE_SHIFT) -#define TFMINPKTSIZE_GET(x) (((x) & TFMINPKTSIZE_MASK) >> TFMINPKTSIZE_SHIFT) -#define TFCAPTUREMAX_MASK 0x00003fffU -#define TFCAPTUREMAX_SHIFT 0 -#define TFCAPTUREMAX(x) ((x) << TFCAPTUREMAX_SHIFT) -#define TFCAPTUREMAX_GET(x) (((x) & TFCAPTUREMAX_MASK) >> TFCAPTUREMAX_SHIFT) - -#define MPS_TRC_INT_CAUSE 0x985c -#define MISCPERR 0x00000100U -#define PKTFIFO 0x000000f0U -#define FILTMEM 0x0000000fU - -#define MPS_TRC_FILTER0_MATCH 0x9c00 -#define MPS_TRC_FILTER0_DONT_CARE 0x9c80 -#define MPS_TRC_FILTER1_MATCH 0x9d00 -#define MPS_CLS_INT_CAUSE 0xd028 -#define PLERRENB 0x00000008U -#define HASHSRAM 0x00000004U -#define MATCHTCAM 0x00000002U -#define MATCHSRAM 0x00000001U - -#define MPS_RX_PERR_INT_CAUSE 0x11074 - -#define CPL_INTR_CAUSE 0x19054 -#define CIM_OP_MAP_PERR 0x00000020U -#define CIM_OVFL_ERROR 0x00000010U -#define TP_FRAMING_ERROR 0x00000008U -#define SGE_FRAMING_ERROR 0x00000004U -#define CIM_FRAMING_ERROR 0x00000002U -#define ZERO_SWITCH_ERROR 0x00000001U - -#define SMB_INT_CAUSE 0x19090 -#define MSTTXFIFOPARINT 0x00200000U -#define MSTRXFIFOPARINT 0x00100000U -#define SLVFIFOPARINT 0x00080000U - -#define ULP_RX_INT_CAUSE 0x19158 -#define ULP_RX_ISCSI_TAGMASK 0x19164 -#define ULP_RX_ISCSI_PSZ 0x19168 -#define HPZ3_MASK 0x0f000000U -#define HPZ3_SHIFT 24 -#define HPZ3(x) ((x) << HPZ3_SHIFT) -#define HPZ2_MASK 0x000f0000U -#define HPZ2_SHIFT 16 -#define HPZ2(x) ((x) << HPZ2_SHIFT) -#define HPZ1_MASK 0x00000f00U -#define HPZ1_SHIFT 8 -#define HPZ1(x) ((x) << HPZ1_SHIFT) -#define HPZ0_MASK 0x0000000fU -#define HPZ0_SHIFT 0 -#define HPZ0(x) ((x) << HPZ0_SHIFT) - -#define ULP_RX_TDDP_PSZ 0x19178 - -#define SF_DATA 0x193f8 -#define SF_OP 0x193fc -#define BUSY 0x80000000U -#define SF_LOCK 0x00000010U -#define SF_CONT 0x00000008U -#define BYTECNT_MASK 0x00000006U -#define BYTECNT_SHIFT 1 -#define BYTECNT(x) ((x) << BYTECNT_SHIFT) -#define OP_WR 0x00000001U - -#define PL_PF_INT_CAUSE 0x3c0 -#define PFSW 0x00000008U -#define PFSGE 0x00000004U -#define PFCIM 0x00000002U -#define PFMPS 0x00000001U - -#define PL_PF_INT_ENABLE 0x3c4 -#define PL_PF_CTL 0x3c8 -#define SWINT 0x00000001U - -#define PL_WHOAMI 0x19400 -#define SOURCEPF_MASK 0x00000700U -#define SOURCEPF_SHIFT 8 -#define SOURCEPF(x) ((x) << SOURCEPF_SHIFT) -#define SOURCEPF_GET(x) (((x) & SOURCEPF_MASK) >> SOURCEPF_SHIFT) -#define ISVF 0x00000080U -#define VFID_MASK 0x0000007fU -#define VFID_SHIFT 0 -#define VFID(x) ((x) << VFID_SHIFT) -#define VFID_GET(x) (((x) & VFID_MASK) >> VFID_SHIFT) - -#define PL_INT_CAUSE 0x1940c -#define ULP_TX 0x08000000U -#define SGE 0x04000000U -#define HMA 0x02000000U -#define CPL_SWITCH 0x01000000U -#define ULP_RX 0x00800000U -#define PM_RX 0x00400000U -#define PM_TX 0x00200000U -#define MA 0x00100000U -#define TP 0x00080000U -#define LE 0x00040000U -#define EDC1 0x00020000U -#define EDC0 0x00010000U -#define MC 0x00008000U -#define PCIE 0x00004000U -#define PMU 0x00002000U -#define XGMAC_KR1 0x00001000U -#define XGMAC_KR0 0x00000800U -#define XGMAC1 0x00000400U -#define XGMAC0 0x00000200U -#define SMB 0x00000100U -#define SF 0x00000080U -#define PL 0x00000040U -#define NCSI 0x00000020U -#define MPS 0x00000010U -#define MI 0x00000008U -#define DBG 0x00000004U -#define I2CM 0x00000002U -#define CIM 0x00000001U - -#define PL_INT_MAP0 0x19414 -#define PL_RST 0x19428 -#define PIORST 0x00000002U -#define PIORSTMODE 0x00000001U - -#define PL_PL_INT_CAUSE 0x19430 -#define FATALPERR 0x00000010U -#define PERRVFID 0x00000001U - -#define PL_REV 0x1943c - -#define LE_DB_CONFIG 0x19c04 -#define HASHEN 0x00100000U - -#define LE_DB_SERVER_INDEX 0x19c18 -#define LE_DB_ACT_CNT_IPV4 0x19c20 -#define LE_DB_ACT_CNT_IPV6 0x19c24 - -#define LE_DB_INT_CAUSE 0x19c3c -#define REQQPARERR 0x00010000U -#define UNKNOWNCMD 0x00008000U -#define PARITYERR 0x00000040U -#define LIPMISS 0x00000020U -#define LIP0 0x00000010U - -#define LE_DB_TID_HASHBASE 0x19df8 - -#define NCSI_INT_CAUSE 0x1a0d8 -#define CIM_DM_PRTY_ERR 0x00000100U -#define MPS_DM_PRTY_ERR 0x00000080U -#define TXFIFO_PRTY_ERR 0x00000002U -#define RXFIFO_PRTY_ERR 0x00000001U - -#define XGMAC_PORT_CFG2 0x1018 -#define PATEN 0x00040000U -#define MAGICEN 0x00020000U - -#define XGMAC_PORT_MAGIC_MACID_LO 0x1024 -#define XGMAC_PORT_MAGIC_MACID_HI 0x1028 - -#define XGMAC_PORT_EPIO_DATA0 0x10c0 -#define XGMAC_PORT_EPIO_DATA1 0x10c4 -#define XGMAC_PORT_EPIO_DATA2 0x10c8 -#define XGMAC_PORT_EPIO_DATA3 0x10cc -#define XGMAC_PORT_EPIO_OP 0x10d0 -#define EPIOWR 0x00000100U -#define ADDRESS_MASK 0x000000ffU -#define ADDRESS_SHIFT 0 -#define ADDRESS(x) ((x) << ADDRESS_SHIFT) - -#define XGMAC_PORT_INT_CAUSE 0x10dc -#endif /* __T4_REGS_H */ diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/cxgb4/t4fw_api.h deleted file mode 100644 index edcfd7e..0000000 --- a/drivers/net/cxgb4/t4fw_api.h +++ /dev/null @@ -1,1623 +0,0 @@ -/* - * This file is part of the Chelsio T4 Ethernet driver for Linux. - * - * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef _T4FW_INTERFACE_H_ -#define _T4FW_INTERFACE_H_ - -#define FW_T4VF_SGE_BASE_ADDR 0x0000 -#define FW_T4VF_MPS_BASE_ADDR 0x0100 -#define FW_T4VF_PL_BASE_ADDR 0x0200 -#define FW_T4VF_MBDATA_BASE_ADDR 0x0240 -#define FW_T4VF_CIM_BASE_ADDR 0x0300 - -enum fw_wr_opcodes { - FW_FILTER_WR = 0x02, - FW_ULPTX_WR = 0x04, - FW_TP_WR = 0x05, - FW_ETH_TX_PKT_WR = 0x08, - FW_FLOWC_WR = 0x0a, - FW_OFLD_TX_DATA_WR = 0x0b, - FW_CMD_WR = 0x10, - FW_ETH_TX_PKT_VM_WR = 0x11, - FW_RI_RES_WR = 0x0c, - FW_RI_INIT_WR = 0x0d, - FW_RI_RDMA_WRITE_WR = 0x14, - FW_RI_SEND_WR = 0x15, - FW_RI_RDMA_READ_WR = 0x16, - FW_RI_RECV_WR = 0x17, - FW_RI_BIND_MW_WR = 0x18, - FW_RI_FR_NSMR_WR = 0x19, - FW_RI_INV_LSTAG_WR = 0x1a, - FW_LASTC2E_WR = 0x40 -}; - -struct fw_wr_hdr { - __be32 hi; - __be32 lo; -}; - -#define FW_WR_OP(x) ((x) << 24) -#define FW_WR_ATOMIC(x) ((x) << 23) -#define FW_WR_FLUSH(x) ((x) << 22) -#define FW_WR_COMPL(x) ((x) << 21) -#define FW_WR_IMMDLEN_MASK 0xff -#define FW_WR_IMMDLEN(x) ((x) << 0) - -#define FW_WR_EQUIQ (1U << 31) -#define FW_WR_EQUEQ (1U << 30) -#define FW_WR_FLOWID(x) ((x) << 8) -#define FW_WR_LEN16(x) ((x) << 0) - -struct fw_ulptx_wr { - __be32 op_to_compl; - __be32 flowid_len16; - u64 cookie; -}; - -struct fw_tp_wr { - __be32 op_to_immdlen; - __be32 flowid_len16; - u64 cookie; -}; - -struct fw_eth_tx_pkt_wr { - __be32 op_immdlen; - __be32 equiq_to_len16; - __be64 r3; -}; - -enum fw_flowc_mnem { - FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */ - FW_FLOWC_MNEM_CH, - FW_FLOWC_MNEM_PORT, - FW_FLOWC_MNEM_IQID, - FW_FLOWC_MNEM_SNDNXT, - FW_FLOWC_MNEM_RCVNXT, - FW_FLOWC_MNEM_SNDBUF, - FW_FLOWC_MNEM_MSS, -}; - -struct fw_flowc_mnemval { - u8 mnemonic; - u8 r4[3]; - __be32 val; -}; - -struct fw_flowc_wr { - __be32 op_to_nparams; -#define FW_FLOWC_WR_NPARAMS(x) ((x) << 0) - __be32 flowid_len16; - struct fw_flowc_mnemval mnemval[0]; -}; - -struct fw_ofld_tx_data_wr { - __be32 op_to_immdlen; - __be32 flowid_len16; - __be32 plen; - __be32 tunnel_to_proxy; -#define FW_OFLD_TX_DATA_WR_TUNNEL(x) ((x) << 19) -#define FW_OFLD_TX_DATA_WR_SAVE(x) ((x) << 18) -#define FW_OFLD_TX_DATA_WR_FLUSH(x) ((x) << 17) -#define FW_OFLD_TX_DATA_WR_URGENT(x) ((x) << 16) -#define FW_OFLD_TX_DATA_WR_MORE(x) ((x) << 15) -#define FW_OFLD_TX_DATA_WR_SHOVE(x) ((x) << 14) -#define FW_OFLD_TX_DATA_WR_ULPMODE(x) ((x) << 10) -#define FW_OFLD_TX_DATA_WR_ULPSUBMODE(x) ((x) << 6) -}; - -struct fw_cmd_wr { - __be32 op_dma; -#define FW_CMD_WR_DMA (1U << 17) - __be32 len16_pkd; - __be64 cookie_daddr; -}; - -struct fw_eth_tx_pkt_vm_wr { - __be32 op_immdlen; - __be32 equiq_to_len16; - __be32 r3[2]; - u8 ethmacdst[6]; - u8 ethmacsrc[6]; - __be16 ethtype; - __be16 vlantci; -}; - -#define FW_CMD_MAX_TIMEOUT 3000 - -enum fw_cmd_opcodes { - FW_LDST_CMD = 0x01, - FW_RESET_CMD = 0x03, - FW_HELLO_CMD = 0x04, - FW_BYE_CMD = 0x05, - FW_INITIALIZE_CMD = 0x06, - FW_CAPS_CONFIG_CMD = 0x07, - FW_PARAMS_CMD = 0x08, - FW_PFVF_CMD = 0x09, - FW_IQ_CMD = 0x10, - FW_EQ_MNGT_CMD = 0x11, - FW_EQ_ETH_CMD = 0x12, - FW_EQ_CTRL_CMD = 0x13, - FW_EQ_OFLD_CMD = 0x21, - FW_VI_CMD = 0x14, - FW_VI_MAC_CMD = 0x15, - FW_VI_RXMODE_CMD = 0x16, - FW_VI_ENABLE_CMD = 0x17, - FW_ACL_MAC_CMD = 0x18, - FW_ACL_VLAN_CMD = 0x19, - FW_VI_STATS_CMD = 0x1a, - FW_PORT_CMD = 0x1b, - FW_PORT_STATS_CMD = 0x1c, - FW_PORT_LB_STATS_CMD = 0x1d, - FW_PORT_TRACE_CMD = 0x1e, - FW_PORT_TRACE_MMAP_CMD = 0x1f, - FW_RSS_IND_TBL_CMD = 0x20, - FW_RSS_GLB_CONFIG_CMD = 0x22, - FW_RSS_VI_CONFIG_CMD = 0x23, - FW_LASTC2E_CMD = 0x40, - FW_ERROR_CMD = 0x80, - FW_DEBUG_CMD = 0x81, -}; - -enum fw_cmd_cap { - FW_CMD_CAP_PF = 0x01, - FW_CMD_CAP_DMAQ = 0x02, - FW_CMD_CAP_PORT = 0x04, - FW_CMD_CAP_PORTPROMISC = 0x08, - FW_CMD_CAP_PORTSTATS = 0x10, - FW_CMD_CAP_VF = 0x80, -}; - -/* - * Generic command header flit0 - */ -struct fw_cmd_hdr { - __be32 hi; - __be32 lo; -}; - -#define FW_CMD_OP(x) ((x) << 24) -#define FW_CMD_OP_GET(x) (((x) >> 24) & 0xff) -#define FW_CMD_REQUEST (1U << 23) -#define FW_CMD_READ (1U << 22) -#define FW_CMD_WRITE (1U << 21) -#define FW_CMD_EXEC (1U << 20) -#define FW_CMD_RAMASK(x) ((x) << 20) -#define FW_CMD_RETVAL(x) ((x) << 8) -#define FW_CMD_RETVAL_GET(x) (((x) >> 8) & 0xff) -#define FW_CMD_LEN16(x) ((x) << 0) - -enum fw_ldst_addrspc { - FW_LDST_ADDRSPC_FIRMWARE = 0x0001, - FW_LDST_ADDRSPC_SGE_EGRC = 0x0008, - FW_LDST_ADDRSPC_SGE_INGC = 0x0009, - FW_LDST_ADDRSPC_SGE_FLMC = 0x000a, - FW_LDST_ADDRSPC_SGE_CONMC = 0x000b, - FW_LDST_ADDRSPC_TP_PIO = 0x0010, - FW_LDST_ADDRSPC_TP_TM_PIO = 0x0011, - FW_LDST_ADDRSPC_TP_MIB = 0x0012, - FW_LDST_ADDRSPC_MDIO = 0x0018, - FW_LDST_ADDRSPC_MPS = 0x0020, - FW_LDST_ADDRSPC_FUNC = 0x0028 -}; - -enum fw_ldst_mps_fid { - FW_LDST_MPS_ATRB, - FW_LDST_MPS_RPLC -}; - -enum fw_ldst_func_access_ctl { - FW_LDST_FUNC_ACC_CTL_VIID, - FW_LDST_FUNC_ACC_CTL_FID -}; - -enum fw_ldst_func_mod_index { - FW_LDST_FUNC_MPS -}; - -struct fw_ldst_cmd { - __be32 op_to_addrspace; -#define FW_LDST_CMD_ADDRSPACE(x) ((x) << 0) - __be32 cycles_to_len16; - union fw_ldst { - struct fw_ldst_addrval { - __be32 addr; - __be32 val; - } addrval; - struct fw_ldst_idctxt { - __be32 physid; - __be32 msg_pkd; - __be32 ctxt_data7; - __be32 ctxt_data6; - __be32 ctxt_data5; - __be32 ctxt_data4; - __be32 ctxt_data3; - __be32 ctxt_data2; - __be32 ctxt_data1; - __be32 ctxt_data0; - } idctxt; - struct fw_ldst_mdio { - __be16 paddr_mmd; - __be16 raddr; - __be16 vctl; - __be16 rval; - } mdio; - struct fw_ldst_mps { - __be16 fid_ctl; - __be16 rplcpf_pkd; - __be32 rplc127_96; - __be32 rplc95_64; - __be32 rplc63_32; - __be32 rplc31_0; - __be32 atrb; - __be16 vlan[16]; - } mps; - struct fw_ldst_func { - u8 access_ctl; - u8 mod_index; - __be16 ctl_id; - __be32 offset; - __be64 data0; - __be64 data1; - } func; - } u; -}; - -#define FW_LDST_CMD_MSG(x) ((x) << 31) -#define FW_LDST_CMD_PADDR(x) ((x) << 8) -#define FW_LDST_CMD_MMD(x) ((x) << 0) -#define FW_LDST_CMD_FID(x) ((x) << 15) -#define FW_LDST_CMD_CTL(x) ((x) << 0) -#define FW_LDST_CMD_RPLCPF(x) ((x) << 0) - -struct fw_reset_cmd { - __be32 op_to_write; - __be32 retval_len16; - __be32 val; - __be32 r3; -}; - -struct fw_hello_cmd { - __be32 op_to_write; - __be32 retval_len16; - __be32 err_to_mbasyncnot; -#define FW_HELLO_CMD_ERR (1U << 31) -#define FW_HELLO_CMD_INIT (1U << 30) -#define FW_HELLO_CMD_MASTERDIS(x) ((x) << 29) -#define FW_HELLO_CMD_MASTERFORCE(x) ((x) << 28) -#define FW_HELLO_CMD_MBMASTER(x) ((x) << 24) -#define FW_HELLO_CMD_MBASYNCNOT(x) ((x) << 20) - __be32 fwrev; -}; - -struct fw_bye_cmd { - __be32 op_to_write; - __be32 retval_len16; - __be64 r3; -}; - -struct fw_initialize_cmd { - __be32 op_to_write; - __be32 retval_len16; - __be64 r3; -}; - -enum fw_caps_config_hm { - FW_CAPS_CONFIG_HM_PCIE = 0x00000001, - FW_CAPS_CONFIG_HM_PL = 0x00000002, - FW_CAPS_CONFIG_HM_SGE = 0x00000004, - FW_CAPS_CONFIG_HM_CIM = 0x00000008, - FW_CAPS_CONFIG_HM_ULPTX = 0x00000010, - FW_CAPS_CONFIG_HM_TP = 0x00000020, - FW_CAPS_CONFIG_HM_ULPRX = 0x00000040, - FW_CAPS_CONFIG_HM_PMRX = 0x00000080, - FW_CAPS_CONFIG_HM_PMTX = 0x00000100, - FW_CAPS_CONFIG_HM_MC = 0x00000200, - FW_CAPS_CONFIG_HM_LE = 0x00000400, - FW_CAPS_CONFIG_HM_MPS = 0x00000800, - FW_CAPS_CONFIG_HM_XGMAC = 0x00001000, - FW_CAPS_CONFIG_HM_CPLSWITCH = 0x00002000, - FW_CAPS_CONFIG_HM_T4DBG = 0x00004000, - FW_CAPS_CONFIG_HM_MI = 0x00008000, - FW_CAPS_CONFIG_HM_I2CM = 0x00010000, - FW_CAPS_CONFIG_HM_NCSI = 0x00020000, - FW_CAPS_CONFIG_HM_SMB = 0x00040000, - FW_CAPS_CONFIG_HM_MA = 0x00080000, - FW_CAPS_CONFIG_HM_EDRAM = 0x00100000, - FW_CAPS_CONFIG_HM_PMU = 0x00200000, - FW_CAPS_CONFIG_HM_UART = 0x00400000, - FW_CAPS_CONFIG_HM_SF = 0x00800000, -}; - -enum fw_caps_config_nbm { - FW_CAPS_CONFIG_NBM_IPMI = 0x00000001, - FW_CAPS_CONFIG_NBM_NCSI = 0x00000002, -}; - -enum fw_caps_config_link { - FW_CAPS_CONFIG_LINK_PPP = 0x00000001, - FW_CAPS_CONFIG_LINK_QFC = 0x00000002, - FW_CAPS_CONFIG_LINK_DCBX = 0x00000004, -}; - -enum fw_caps_config_switch { - FW_CAPS_CONFIG_SWITCH_INGRESS = 0x00000001, - FW_CAPS_CONFIG_SWITCH_EGRESS = 0x00000002, -}; - -enum fw_caps_config_nic { - FW_CAPS_CONFIG_NIC = 0x00000001, - FW_CAPS_CONFIG_NIC_VM = 0x00000002, -}; - -enum fw_caps_config_ofld { - FW_CAPS_CONFIG_OFLD = 0x00000001, -}; - -enum fw_caps_config_rdma { - FW_CAPS_CONFIG_RDMA_RDDP = 0x00000001, - FW_CAPS_CONFIG_RDMA_RDMAC = 0x00000002, -}; - -enum fw_caps_config_iscsi { - FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU = 0x00000001, - FW_CAPS_CONFIG_ISCSI_TARGET_PDU = 0x00000002, - FW_CAPS_CONFIG_ISCSI_INITIATOR_CNXOFLD = 0x00000004, - FW_CAPS_CONFIG_ISCSI_TARGET_CNXOFLD = 0x00000008, -}; - -enum fw_caps_config_fcoe { - FW_CAPS_CONFIG_FCOE_INITIATOR = 0x00000001, - FW_CAPS_CONFIG_FCOE_TARGET = 0x00000002, -}; - -struct fw_caps_config_cmd { - __be32 op_to_write; - __be32 retval_len16; - __be32 r2; - __be32 hwmbitmap; - __be16 nbmcaps; - __be16 linkcaps; - __be16 switchcaps; - __be16 r3; - __be16 niccaps; - __be16 ofldcaps; - __be16 rdmacaps; - __be16 r4; - __be16 iscsicaps; - __be16 fcoecaps; - __be32 r5; - __be64 r6; -}; - -/* - * params command mnemonics - */ -enum fw_params_mnem { - FW_PARAMS_MNEM_DEV = 1, /* device params */ - FW_PARAMS_MNEM_PFVF = 2, /* function params */ - FW_PARAMS_MNEM_REG = 3, /* limited register access */ - FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */ - FW_PARAMS_MNEM_LAST -}; - -/* - * device parameters - */ -enum fw_params_param_dev { - FW_PARAMS_PARAM_DEV_CCLK = 0x00, /* chip core clock in khz */ - FW_PARAMS_PARAM_DEV_PORTVEC = 0x01, /* the port vector */ - FW_PARAMS_PARAM_DEV_NTID = 0x02, /* reads the number of TIDs - * allocated by the device's - * Lookup Engine - */ - FW_PARAMS_PARAM_DEV_FLOWC_BUFFIFO_SZ = 0x03, - FW_PARAMS_PARAM_DEV_INTVER_NIC = 0x04, - FW_PARAMS_PARAM_DEV_INTVER_VNIC = 0x05, - FW_PARAMS_PARAM_DEV_INTVER_OFLD = 0x06, - FW_PARAMS_PARAM_DEV_INTVER_RI = 0x07, - FW_PARAMS_PARAM_DEV_INTVER_ISCSIPDU = 0x08, - FW_PARAMS_PARAM_DEV_INTVER_ISCSI = 0x09, - FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A, - FW_PARAMS_PARAM_DEV_FWREV = 0x0B, - FW_PARAMS_PARAM_DEV_TPREV = 0x0C, -}; - -/* - * physical and virtual function parameters - */ -enum fw_params_param_pfvf { - FW_PARAMS_PARAM_PFVF_RWXCAPS = 0x00, - FW_PARAMS_PARAM_PFVF_ROUTE_START = 0x01, - FW_PARAMS_PARAM_PFVF_ROUTE_END = 0x02, - FW_PARAMS_PARAM_PFVF_CLIP_START = 0x03, - FW_PARAMS_PARAM_PFVF_CLIP_END = 0x04, - FW_PARAMS_PARAM_PFVF_FILTER_START = 0x05, - FW_PARAMS_PARAM_PFVF_FILTER_END = 0x06, - FW_PARAMS_PARAM_PFVF_SERVER_START = 0x07, - FW_PARAMS_PARAM_PFVF_SERVER_END = 0x08, - FW_PARAMS_PARAM_PFVF_TDDP_START = 0x09, - FW_PARAMS_PARAM_PFVF_TDDP_END = 0x0A, - FW_PARAMS_PARAM_PFVF_ISCSI_START = 0x0B, - FW_PARAMS_PARAM_PFVF_ISCSI_END = 0x0C, - FW_PARAMS_PARAM_PFVF_STAG_START = 0x0D, - FW_PARAMS_PARAM_PFVF_STAG_END = 0x0E, - FW_PARAMS_PARAM_PFVF_RQ_START = 0x1F, - FW_PARAMS_PARAM_PFVF_RQ_END = 0x10, - FW_PARAMS_PARAM_PFVF_PBL_START = 0x11, - FW_PARAMS_PARAM_PFVF_PBL_END = 0x12, - FW_PARAMS_PARAM_PFVF_L2T_START = 0x13, - FW_PARAMS_PARAM_PFVF_L2T_END = 0x14, - FW_PARAMS_PARAM_PFVF_SQRQ_START = 0x15, - FW_PARAMS_PARAM_PFVF_SQRQ_END = 0x16, - FW_PARAMS_PARAM_PFVF_CQ_START = 0x17, - FW_PARAMS_PARAM_PFVF_CQ_END = 0x18, - FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH = 0x20, - FW_PARAMS_PARAM_PFVF_VIID = 0x24, - FW_PARAMS_PARAM_PFVF_CPMASK = 0x25, - FW_PARAMS_PARAM_PFVF_OCQ_START = 0x26, - FW_PARAMS_PARAM_PFVF_OCQ_END = 0x27, - FW_PARAMS_PARAM_PFVF_CONM_MAP = 0x28, - FW_PARAMS_PARAM_PFVF_IQFLINT_START = 0x29, - FW_PARAMS_PARAM_PFVF_IQFLINT_END = 0x2A, - FW_PARAMS_PARAM_PFVF_EQ_START = 0x2B, - FW_PARAMS_PARAM_PFVF_EQ_END = 0x2C, -}; - -/* - * dma queue parameters - */ -enum fw_params_param_dmaq { - FW_PARAMS_PARAM_DMAQ_IQ_DCAEN_DCACPU = 0x00, - FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH = 0x01, - FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_MNGT = 0x10, - FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL = 0x11, - FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH = 0x12, -}; - -#define FW_PARAMS_MNEM(x) ((x) << 24) -#define FW_PARAMS_PARAM_X(x) ((x) << 16) -#define FW_PARAMS_PARAM_Y(x) ((x) << 8) -#define FW_PARAMS_PARAM_Z(x) ((x) << 0) -#define FW_PARAMS_PARAM_XYZ(x) ((x) << 0) -#define FW_PARAMS_PARAM_YZ(x) ((x) << 0) - -struct fw_params_cmd { - __be32 op_to_vfn; - __be32 retval_len16; - struct fw_params_param { - __be32 mnem; - __be32 val; - } param[7]; -}; - -#define FW_PARAMS_CMD_PFN(x) ((x) << 8) -#define FW_PARAMS_CMD_VFN(x) ((x) << 0) - -struct fw_pfvf_cmd { - __be32 op_to_vfn; - __be32 retval_len16; - __be32 niqflint_niq; - __be32 type_to_neq; - __be32 tc_to_nexactf; - __be32 r_caps_to_nethctrl; - __be16 nricq; - __be16 nriqp; - __be32 r4; -}; - -#define FW_PFVF_CMD_PFN(x) ((x) << 8) -#define FW_PFVF_CMD_VFN(x) ((x) << 0) - -#define FW_PFVF_CMD_NIQFLINT(x) ((x) << 20) -#define FW_PFVF_CMD_NIQFLINT_GET(x) (((x) >> 20) & 0xfff) - -#define FW_PFVF_CMD_NIQ(x) ((x) << 0) -#define FW_PFVF_CMD_NIQ_GET(x) (((x) >> 0) & 0xfffff) - -#define FW_PFVF_CMD_TYPE (1 << 31) -#define FW_PFVF_CMD_TYPE_GET(x) (((x) >> 31) & 0x1) - -#define FW_PFVF_CMD_CMASK(x) ((x) << 24) -#define FW_PFVF_CMD_CMASK_MASK 0xf -#define FW_PFVF_CMD_CMASK_GET(x) (((x) >> 24) & FW_PFVF_CMD_CMASK_MASK) - -#define FW_PFVF_CMD_PMASK(x) ((x) << 20) -#define FW_PFVF_CMD_PMASK_MASK 0xf -#define FW_PFVF_CMD_PMASK_GET(x) (((x) >> 20) & FW_PFVF_CMD_PMASK_MASK) - -#define FW_PFVF_CMD_NEQ(x) ((x) << 0) -#define FW_PFVF_CMD_NEQ_GET(x) (((x) >> 0) & 0xfffff) - -#define FW_PFVF_CMD_TC(x) ((x) << 24) -#define FW_PFVF_CMD_TC_GET(x) (((x) >> 24) & 0xff) - -#define FW_PFVF_CMD_NVI(x) ((x) << 16) -#define FW_PFVF_CMD_NVI_GET(x) (((x) >> 16) & 0xff) - -#define FW_PFVF_CMD_NEXACTF(x) ((x) << 0) -#define FW_PFVF_CMD_NEXACTF_GET(x) (((x) >> 0) & 0xffff) - -#define FW_PFVF_CMD_R_CAPS(x) ((x) << 24) -#define FW_PFVF_CMD_R_CAPS_GET(x) (((x) >> 24) & 0xff) - -#define FW_PFVF_CMD_WX_CAPS(x) ((x) << 16) -#define FW_PFVF_CMD_WX_CAPS_GET(x) (((x) >> 16) & 0xff) - -#define FW_PFVF_CMD_NETHCTRL(x) ((x) << 0) -#define FW_PFVF_CMD_NETHCTRL_GET(x) (((x) >> 0) & 0xffff) - -enum fw_iq_type { - FW_IQ_TYPE_FL_INT_CAP, - FW_IQ_TYPE_NO_FL_INT_CAP -}; - -struct fw_iq_cmd { - __be32 op_to_vfn; - __be32 alloc_to_len16; - __be16 physiqid; - __be16 iqid; - __be16 fl0id; - __be16 fl1id; - __be32 type_to_iqandstindex; - __be16 iqdroprss_to_iqesize; - __be16 iqsize; - __be64 iqaddr; - __be32 iqns_to_fl0congen; - __be16 fl0dcaen_to_fl0cidxfthresh; - __be16 fl0size; - __be64 fl0addr; - __be32 fl1cngchmap_to_fl1congen; - __be16 fl1dcaen_to_fl1cidxfthresh; - __be16 fl1size; - __be64 fl1addr; -}; - -#define FW_IQ_CMD_PFN(x) ((x) << 8) -#define FW_IQ_CMD_VFN(x) ((x) << 0) - -#define FW_IQ_CMD_ALLOC (1U << 31) -#define FW_IQ_CMD_FREE (1U << 30) -#define FW_IQ_CMD_MODIFY (1U << 29) -#define FW_IQ_CMD_IQSTART(x) ((x) << 28) -#define FW_IQ_CMD_IQSTOP(x) ((x) << 27) - -#define FW_IQ_CMD_TYPE(x) ((x) << 29) -#define FW_IQ_CMD_IQASYNCH(x) ((x) << 28) -#define FW_IQ_CMD_VIID(x) ((x) << 16) -#define FW_IQ_CMD_IQANDST(x) ((x) << 15) -#define FW_IQ_CMD_IQANUS(x) ((x) << 14) -#define FW_IQ_CMD_IQANUD(x) ((x) << 12) -#define FW_IQ_CMD_IQANDSTINDEX(x) ((x) << 0) - -#define FW_IQ_CMD_IQDROPRSS (1U << 15) -#define FW_IQ_CMD_IQGTSMODE (1U << 14) -#define FW_IQ_CMD_IQPCIECH(x) ((x) << 12) -#define FW_IQ_CMD_IQDCAEN(x) ((x) << 11) -#define FW_IQ_CMD_IQDCACPU(x) ((x) << 6) -#define FW_IQ_CMD_IQINTCNTTHRESH(x) ((x) << 4) -#define FW_IQ_CMD_IQO (1U << 3) -#define FW_IQ_CMD_IQCPRIO(x) ((x) << 2) -#define FW_IQ_CMD_IQESIZE(x) ((x) << 0) - -#define FW_IQ_CMD_IQNS(x) ((x) << 31) -#define FW_IQ_CMD_IQRO(x) ((x) << 30) -#define FW_IQ_CMD_IQFLINTIQHSEN(x) ((x) << 28) -#define FW_IQ_CMD_IQFLINTCONGEN(x) ((x) << 27) -#define FW_IQ_CMD_IQFLINTISCSIC(x) ((x) << 26) -#define FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << 20) -#define FW_IQ_CMD_FL0CACHELOCK(x) ((x) << 15) -#define FW_IQ_CMD_FL0DBP(x) ((x) << 14) -#define FW_IQ_CMD_FL0DATANS(x) ((x) << 13) -#define FW_IQ_CMD_FL0DATARO(x) ((x) << 12) -#define FW_IQ_CMD_FL0CONGCIF(x) ((x) << 11) -#define FW_IQ_CMD_FL0ONCHIP(x) ((x) << 10) -#define FW_IQ_CMD_FL0STATUSPGNS(x) ((x) << 9) -#define FW_IQ_CMD_FL0STATUSPGRO(x) ((x) << 8) -#define FW_IQ_CMD_FL0FETCHNS(x) ((x) << 7) -#define FW_IQ_CMD_FL0FETCHRO(x) ((x) << 6) -#define FW_IQ_CMD_FL0HOSTFCMODE(x) ((x) << 4) -#define FW_IQ_CMD_FL0CPRIO(x) ((x) << 3) -#define FW_IQ_CMD_FL0PADEN (1U << 2) -#define FW_IQ_CMD_FL0PACKEN (1U << 1) -#define FW_IQ_CMD_FL0CONGEN (1U << 0) - -#define FW_IQ_CMD_FL0DCAEN(x) ((x) << 15) -#define FW_IQ_CMD_FL0DCACPU(x) ((x) << 10) -#define FW_IQ_CMD_FL0FBMIN(x) ((x) << 7) -#define FW_IQ_CMD_FL0FBMAX(x) ((x) << 4) -#define FW_IQ_CMD_FL0CIDXFTHRESHO (1U << 3) -#define FW_IQ_CMD_FL0CIDXFTHRESH(x) ((x) << 0) - -#define FW_IQ_CMD_FL1CNGCHMAP(x) ((x) << 20) -#define FW_IQ_CMD_FL1CACHELOCK(x) ((x) << 15) -#define FW_IQ_CMD_FL1DBP(x) ((x) << 14) -#define FW_IQ_CMD_FL1DATANS(x) ((x) << 13) -#define FW_IQ_CMD_FL1DATARO(x) ((x) << 12) -#define FW_IQ_CMD_FL1CONGCIF(x) ((x) << 11) -#define FW_IQ_CMD_FL1ONCHIP(x) ((x) << 10) -#define FW_IQ_CMD_FL1STATUSPGNS(x) ((x) << 9) -#define FW_IQ_CMD_FL1STATUSPGRO(x) ((x) << 8) -#define FW_IQ_CMD_FL1FETCHNS(x) ((x) << 7) -#define FW_IQ_CMD_FL1FETCHRO(x) ((x) << 6) -#define FW_IQ_CMD_FL1HOSTFCMODE(x) ((x) << 4) -#define FW_IQ_CMD_FL1CPRIO(x) ((x) << 3) -#define FW_IQ_CMD_FL1PADEN (1U << 2) -#define FW_IQ_CMD_FL1PACKEN (1U << 1) -#define FW_IQ_CMD_FL1CONGEN (1U << 0) - -#define FW_IQ_CMD_FL1DCAEN(x) ((x) << 15) -#define FW_IQ_CMD_FL1DCACPU(x) ((x) << 10) -#define FW_IQ_CMD_FL1FBMIN(x) ((x) << 7) -#define FW_IQ_CMD_FL1FBMAX(x) ((x) << 4) -#define FW_IQ_CMD_FL1CIDXFTHRESHO (1U << 3) -#define FW_IQ_CMD_FL1CIDXFTHRESH(x) ((x) << 0) - -struct fw_eq_eth_cmd { - __be32 op_to_vfn; - __be32 alloc_to_len16; - __be32 eqid_pkd; - __be32 physeqid_pkd; - __be32 fetchszm_to_iqid; - __be32 dcaen_to_eqsize; - __be64 eqaddr; - __be32 viid_pkd; - __be32 r8_lo; - __be64 r9; -}; - -#define FW_EQ_ETH_CMD_PFN(x) ((x) << 8) -#define FW_EQ_ETH_CMD_VFN(x) ((x) << 0) -#define FW_EQ_ETH_CMD_ALLOC (1U << 31) -#define FW_EQ_ETH_CMD_FREE (1U << 30) -#define FW_EQ_ETH_CMD_MODIFY (1U << 29) -#define FW_EQ_ETH_CMD_EQSTART (1U << 28) -#define FW_EQ_ETH_CMD_EQSTOP (1U << 27) - -#define FW_EQ_ETH_CMD_EQID(x) ((x) << 0) -#define FW_EQ_ETH_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) -#define FW_EQ_ETH_CMD_PHYSEQID(x) ((x) << 0) -#define FW_EQ_ETH_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff) - -#define FW_EQ_ETH_CMD_FETCHSZM(x) ((x) << 26) -#define FW_EQ_ETH_CMD_STATUSPGNS(x) ((x) << 25) -#define FW_EQ_ETH_CMD_STATUSPGRO(x) ((x) << 24) -#define FW_EQ_ETH_CMD_FETCHNS(x) ((x) << 23) -#define FW_EQ_ETH_CMD_FETCHRO(x) ((x) << 22) -#define FW_EQ_ETH_CMD_HOSTFCMODE(x) ((x) << 20) -#define FW_EQ_ETH_CMD_CPRIO(x) ((x) << 19) -#define FW_EQ_ETH_CMD_ONCHIP(x) ((x) << 18) -#define FW_EQ_ETH_CMD_PCIECHN(x) ((x) << 16) -#define FW_EQ_ETH_CMD_IQID(x) ((x) << 0) - -#define FW_EQ_ETH_CMD_DCAEN(x) ((x) << 31) -#define FW_EQ_ETH_CMD_DCACPU(x) ((x) << 26) -#define FW_EQ_ETH_CMD_FBMIN(x) ((x) << 23) -#define FW_EQ_ETH_CMD_FBMAX(x) ((x) << 20) -#define FW_EQ_ETH_CMD_CIDXFTHRESHO(x) ((x) << 19) -#define FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << 16) -#define FW_EQ_ETH_CMD_EQSIZE(x) ((x) << 0) - -#define FW_EQ_ETH_CMD_VIID(x) ((x) << 16) - -struct fw_eq_ctrl_cmd { - __be32 op_to_vfn; - __be32 alloc_to_len16; - __be32 cmpliqid_eqid; - __be32 physeqid_pkd; - __be32 fetchszm_to_iqid; - __be32 dcaen_to_eqsize; - __be64 eqaddr; -}; - -#define FW_EQ_CTRL_CMD_PFN(x) ((x) << 8) -#define FW_EQ_CTRL_CMD_VFN(x) ((x) << 0) - -#define FW_EQ_CTRL_CMD_ALLOC (1U << 31) -#define FW_EQ_CTRL_CMD_FREE (1U << 30) -#define FW_EQ_CTRL_CMD_MODIFY (1U << 29) -#define FW_EQ_CTRL_CMD_EQSTART (1U << 28) -#define FW_EQ_CTRL_CMD_EQSTOP (1U << 27) - -#define FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << 20) -#define FW_EQ_CTRL_CMD_EQID(x) ((x) << 0) -#define FW_EQ_CTRL_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) -#define FW_EQ_CTRL_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff) - -#define FW_EQ_CTRL_CMD_FETCHSZM (1U << 26) -#define FW_EQ_CTRL_CMD_STATUSPGNS (1U << 25) -#define FW_EQ_CTRL_CMD_STATUSPGRO (1U << 24) -#define FW_EQ_CTRL_CMD_FETCHNS (1U << 23) -#define FW_EQ_CTRL_CMD_FETCHRO (1U << 22) -#define FW_EQ_CTRL_CMD_HOSTFCMODE(x) ((x) << 20) -#define FW_EQ_CTRL_CMD_CPRIO(x) ((x) << 19) -#define FW_EQ_CTRL_CMD_ONCHIP(x) ((x) << 18) -#define FW_EQ_CTRL_CMD_PCIECHN(x) ((x) << 16) -#define FW_EQ_CTRL_CMD_IQID(x) ((x) << 0) - -#define FW_EQ_CTRL_CMD_DCAEN(x) ((x) << 31) -#define FW_EQ_CTRL_CMD_DCACPU(x) ((x) << 26) -#define FW_EQ_CTRL_CMD_FBMIN(x) ((x) << 23) -#define FW_EQ_CTRL_CMD_FBMAX(x) ((x) << 20) -#define FW_EQ_CTRL_CMD_CIDXFTHRESHO(x) ((x) << 19) -#define FW_EQ_CTRL_CMD_CIDXFTHRESH(x) ((x) << 16) -#define FW_EQ_CTRL_CMD_EQSIZE(x) ((x) << 0) - -struct fw_eq_ofld_cmd { - __be32 op_to_vfn; - __be32 alloc_to_len16; - __be32 eqid_pkd; - __be32 physeqid_pkd; - __be32 fetchszm_to_iqid; - __be32 dcaen_to_eqsize; - __be64 eqaddr; -}; - -#define FW_EQ_OFLD_CMD_PFN(x) ((x) << 8) -#define FW_EQ_OFLD_CMD_VFN(x) ((x) << 0) - -#define FW_EQ_OFLD_CMD_ALLOC (1U << 31) -#define FW_EQ_OFLD_CMD_FREE (1U << 30) -#define FW_EQ_OFLD_CMD_MODIFY (1U << 29) -#define FW_EQ_OFLD_CMD_EQSTART (1U << 28) -#define FW_EQ_OFLD_CMD_EQSTOP (1U << 27) - -#define FW_EQ_OFLD_CMD_EQID(x) ((x) << 0) -#define FW_EQ_OFLD_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) -#define FW_EQ_OFLD_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff) - -#define FW_EQ_OFLD_CMD_FETCHSZM(x) ((x) << 26) -#define FW_EQ_OFLD_CMD_STATUSPGNS(x) ((x) << 25) -#define FW_EQ_OFLD_CMD_STATUSPGRO(x) ((x) << 24) -#define FW_EQ_OFLD_CMD_FETCHNS(x) ((x) << 23) -#define FW_EQ_OFLD_CMD_FETCHRO(x) ((x) << 22) -#define FW_EQ_OFLD_CMD_HOSTFCMODE(x) ((x) << 20) -#define FW_EQ_OFLD_CMD_CPRIO(x) ((x) << 19) -#define FW_EQ_OFLD_CMD_ONCHIP(x) ((x) << 18) -#define FW_EQ_OFLD_CMD_PCIECHN(x) ((x) << 16) -#define FW_EQ_OFLD_CMD_IQID(x) ((x) << 0) - -#define FW_EQ_OFLD_CMD_DCAEN(x) ((x) << 31) -#define FW_EQ_OFLD_CMD_DCACPU(x) ((x) << 26) -#define FW_EQ_OFLD_CMD_FBMIN(x) ((x) << 23) -#define FW_EQ_OFLD_CMD_FBMAX(x) ((x) << 20) -#define FW_EQ_OFLD_CMD_CIDXFTHRESHO(x) ((x) << 19) -#define FW_EQ_OFLD_CMD_CIDXFTHRESH(x) ((x) << 16) -#define FW_EQ_OFLD_CMD_EQSIZE(x) ((x) << 0) - -/* - * Macros for VIID parsing: - * VIID - [10:8] PFN, [7] VI Valid, [6:0] VI number - */ -#define FW_VIID_PFN_GET(x) (((x) >> 8) & 0x7) -#define FW_VIID_VIVLD_GET(x) (((x) >> 7) & 0x1) -#define FW_VIID_VIN_GET(x) (((x) >> 0) & 0x7F) - -struct fw_vi_cmd { - __be32 op_to_vfn; - __be32 alloc_to_len16; - __be16 type_viid; - u8 mac[6]; - u8 portid_pkd; - u8 nmac; - u8 nmac0[6]; - __be16 rsssize_pkd; - u8 nmac1[6]; - __be16 idsiiq_pkd; - u8 nmac2[6]; - __be16 idseiq_pkd; - u8 nmac3[6]; - __be64 r9; - __be64 r10; -}; - -#define FW_VI_CMD_PFN(x) ((x) << 8) -#define FW_VI_CMD_VFN(x) ((x) << 0) -#define FW_VI_CMD_ALLOC (1U << 31) -#define FW_VI_CMD_FREE (1U << 30) -#define FW_VI_CMD_VIID(x) ((x) << 0) -#define FW_VI_CMD_VIID_GET(x) ((x) & 0xfff) -#define FW_VI_CMD_PORTID(x) ((x) << 4) -#define FW_VI_CMD_PORTID_GET(x) (((x) >> 4) & 0xf) -#define FW_VI_CMD_RSSSIZE_GET(x) (((x) >> 0) & 0x7ff) - -/* Special VI_MAC command index ids */ -#define FW_VI_MAC_ADD_MAC 0x3FF -#define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE -#define FW_VI_MAC_MAC_BASED_FREE 0x3FD -#define FW_CLS_TCAM_NUM_ENTRIES 336 - -enum fw_vi_mac_smac { - FW_VI_MAC_MPS_TCAM_ENTRY, - FW_VI_MAC_MPS_TCAM_ONLY, - FW_VI_MAC_SMT_ONLY, - FW_VI_MAC_SMT_AND_MPSTCAM -}; - -enum fw_vi_mac_result { - FW_VI_MAC_R_SUCCESS, - FW_VI_MAC_R_F_NONEXISTENT_NOMEM, - FW_VI_MAC_R_SMAC_FAIL, - FW_VI_MAC_R_F_ACL_CHECK -}; - -struct fw_vi_mac_cmd { - __be32 op_to_viid; - __be32 freemacs_to_len16; - union fw_vi_mac { - struct fw_vi_mac_exact { - __be16 valid_to_idx; - u8 macaddr[6]; - } exact[7]; - struct fw_vi_mac_hash { - __be64 hashvec; - } hash; - } u; -}; - -#define FW_VI_MAC_CMD_VIID(x) ((x) << 0) -#define FW_VI_MAC_CMD_FREEMACS(x) ((x) << 31) -#define FW_VI_MAC_CMD_HASHVECEN (1U << 23) -#define FW_VI_MAC_CMD_HASHUNIEN(x) ((x) << 22) -#define FW_VI_MAC_CMD_VALID (1U << 15) -#define FW_VI_MAC_CMD_PRIO(x) ((x) << 12) -#define FW_VI_MAC_CMD_SMAC_RESULT(x) ((x) << 10) -#define FW_VI_MAC_CMD_SMAC_RESULT_GET(x) (((x) >> 10) & 0x3) -#define FW_VI_MAC_CMD_IDX(x) ((x) << 0) -#define FW_VI_MAC_CMD_IDX_GET(x) (((x) >> 0) & 0x3ff) - -#define FW_RXMODE_MTU_NO_CHG 65535 - -struct fw_vi_rxmode_cmd { - __be32 op_to_viid; - __be32 retval_len16; - __be32 mtu_to_vlanexen; - __be32 r4_lo; -}; - -#define FW_VI_RXMODE_CMD_VIID(x) ((x) << 0) -#define FW_VI_RXMODE_CMD_MTU_MASK 0xffff -#define FW_VI_RXMODE_CMD_MTU(x) ((x) << 16) -#define FW_VI_RXMODE_CMD_PROMISCEN_MASK 0x3 -#define FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << 14) -#define FW_VI_RXMODE_CMD_ALLMULTIEN_MASK 0x3 -#define FW_VI_RXMODE_CMD_ALLMULTIEN(x) ((x) << 12) -#define FW_VI_RXMODE_CMD_BROADCASTEN_MASK 0x3 -#define FW_VI_RXMODE_CMD_BROADCASTEN(x) ((x) << 10) -#define FW_VI_RXMODE_CMD_VLANEXEN_MASK 0x3 -#define FW_VI_RXMODE_CMD_VLANEXEN(x) ((x) << 8) - -struct fw_vi_enable_cmd { - __be32 op_to_viid; - __be32 ien_to_len16; - __be16 blinkdur; - __be16 r3; - __be32 r4; -}; - -#define FW_VI_ENABLE_CMD_VIID(x) ((x) << 0) -#define FW_VI_ENABLE_CMD_IEN(x) ((x) << 31) -#define FW_VI_ENABLE_CMD_EEN(x) ((x) << 30) -#define FW_VI_ENABLE_CMD_LED (1U << 29) - -/* VI VF stats offset definitions */ -#define VI_VF_NUM_STATS 16 -enum fw_vi_stats_vf_index { - FW_VI_VF_STAT_TX_BCAST_BYTES_IX, - FW_VI_VF_STAT_TX_BCAST_FRAMES_IX, - FW_VI_VF_STAT_TX_MCAST_BYTES_IX, - FW_VI_VF_STAT_TX_MCAST_FRAMES_IX, - FW_VI_VF_STAT_TX_UCAST_BYTES_IX, - FW_VI_VF_STAT_TX_UCAST_FRAMES_IX, - FW_VI_VF_STAT_TX_DROP_FRAMES_IX, - FW_VI_VF_STAT_TX_OFLD_BYTES_IX, - FW_VI_VF_STAT_TX_OFLD_FRAMES_IX, - FW_VI_VF_STAT_RX_BCAST_BYTES_IX, - FW_VI_VF_STAT_RX_BCAST_FRAMES_IX, - FW_VI_VF_STAT_RX_MCAST_BYTES_IX, - FW_VI_VF_STAT_RX_MCAST_FRAMES_IX, - FW_VI_VF_STAT_RX_UCAST_BYTES_IX, - FW_VI_VF_STAT_RX_UCAST_FRAMES_IX, - FW_VI_VF_STAT_RX_ERR_FRAMES_IX -}; - -/* VI PF stats offset definitions */ -#define VI_PF_NUM_STATS 17 -enum fw_vi_stats_pf_index { - FW_VI_PF_STAT_TX_BCAST_BYTES_IX, - FW_VI_PF_STAT_TX_BCAST_FRAMES_IX, - FW_VI_PF_STAT_TX_MCAST_BYTES_IX, - FW_VI_PF_STAT_TX_MCAST_FRAMES_IX, - FW_VI_PF_STAT_TX_UCAST_BYTES_IX, - FW_VI_PF_STAT_TX_UCAST_FRAMES_IX, - FW_VI_PF_STAT_TX_OFLD_BYTES_IX, - FW_VI_PF_STAT_TX_OFLD_FRAMES_IX, - FW_VI_PF_STAT_RX_BYTES_IX, - FW_VI_PF_STAT_RX_FRAMES_IX, - FW_VI_PF_STAT_RX_BCAST_BYTES_IX, - FW_VI_PF_STAT_RX_BCAST_FRAMES_IX, - FW_VI_PF_STAT_RX_MCAST_BYTES_IX, - FW_VI_PF_STAT_RX_MCAST_FRAMES_IX, - FW_VI_PF_STAT_RX_UCAST_BYTES_IX, - FW_VI_PF_STAT_RX_UCAST_FRAMES_IX, - FW_VI_PF_STAT_RX_ERR_FRAMES_IX -}; - -struct fw_vi_stats_cmd { - __be32 op_to_viid; - __be32 retval_len16; - union fw_vi_stats { - struct fw_vi_stats_ctl { - __be16 nstats_ix; - __be16 r6; - __be32 r7; - __be64 stat0; - __be64 stat1; - __be64 stat2; - __be64 stat3; - __be64 stat4; - __be64 stat5; - } ctl; - struct fw_vi_stats_pf { - __be64 tx_bcast_bytes; - __be64 tx_bcast_frames; - __be64 tx_mcast_bytes; - __be64 tx_mcast_frames; - __be64 tx_ucast_bytes; - __be64 tx_ucast_frames; - __be64 tx_offload_bytes; - __be64 tx_offload_frames; - __be64 rx_pf_bytes; - __be64 rx_pf_frames; - __be64 rx_bcast_bytes; - __be64 rx_bcast_frames; - __be64 rx_mcast_bytes; - __be64 rx_mcast_frames; - __be64 rx_ucast_bytes; - __be64 rx_ucast_frames; - __be64 rx_err_frames; - } pf; - struct fw_vi_stats_vf { - __be64 tx_bcast_bytes; - __be64 tx_bcast_frames; - __be64 tx_mcast_bytes; - __be64 tx_mcast_frames; - __be64 tx_ucast_bytes; - __be64 tx_ucast_frames; - __be64 tx_drop_frames; - __be64 tx_offload_bytes; - __be64 tx_offload_frames; - __be64 rx_bcast_bytes; - __be64 rx_bcast_frames; - __be64 rx_mcast_bytes; - __be64 rx_mcast_frames; - __be64 rx_ucast_bytes; - __be64 rx_ucast_frames; - __be64 rx_err_frames; - } vf; - } u; -}; - -#define FW_VI_STATS_CMD_VIID(x) ((x) << 0) -#define FW_VI_STATS_CMD_NSTATS(x) ((x) << 12) -#define FW_VI_STATS_CMD_IX(x) ((x) << 0) - -struct fw_acl_mac_cmd { - __be32 op_to_vfn; - __be32 en_to_len16; - u8 nmac; - u8 r3[7]; - __be16 r4; - u8 macaddr0[6]; - __be16 r5; - u8 macaddr1[6]; - __be16 r6; - u8 macaddr2[6]; - __be16 r7; - u8 macaddr3[6]; -}; - -#define FW_ACL_MAC_CMD_PFN(x) ((x) << 8) -#define FW_ACL_MAC_CMD_VFN(x) ((x) << 0) -#define FW_ACL_MAC_CMD_EN(x) ((x) << 31) - -struct fw_acl_vlan_cmd { - __be32 op_to_vfn; - __be32 en_to_len16; - u8 nvlan; - u8 dropnovlan_fm; - u8 r3_lo[6]; - __be16 vlanid[16]; -}; - -#define FW_ACL_VLAN_CMD_PFN(x) ((x) << 8) -#define FW_ACL_VLAN_CMD_VFN(x) ((x) << 0) -#define FW_ACL_VLAN_CMD_EN(x) ((x) << 31) -#define FW_ACL_VLAN_CMD_DROPNOVLAN(x) ((x) << 7) -#define FW_ACL_VLAN_CMD_FM(x) ((x) << 6) - -enum fw_port_cap { - FW_PORT_CAP_SPEED_100M = 0x0001, - FW_PORT_CAP_SPEED_1G = 0x0002, - FW_PORT_CAP_SPEED_2_5G = 0x0004, - FW_PORT_CAP_SPEED_10G = 0x0008, - FW_PORT_CAP_SPEED_40G = 0x0010, - FW_PORT_CAP_SPEED_100G = 0x0020, - FW_PORT_CAP_FC_RX = 0x0040, - FW_PORT_CAP_FC_TX = 0x0080, - FW_PORT_CAP_ANEG = 0x0100, - FW_PORT_CAP_MDI_0 = 0x0200, - FW_PORT_CAP_MDI_1 = 0x0400, - FW_PORT_CAP_BEAN = 0x0800, - FW_PORT_CAP_PMA_LPBK = 0x1000, - FW_PORT_CAP_PCS_LPBK = 0x2000, - FW_PORT_CAP_PHYXS_LPBK = 0x4000, - FW_PORT_CAP_FAR_END_LPBK = 0x8000, -}; - -enum fw_port_mdi { - FW_PORT_MDI_UNCHANGED, - FW_PORT_MDI_AUTO, - FW_PORT_MDI_F_STRAIGHT, - FW_PORT_MDI_F_CROSSOVER -}; - -#define FW_PORT_MDI(x) ((x) << 9) - -enum fw_port_action { - FW_PORT_ACTION_L1_CFG = 0x0001, - FW_PORT_ACTION_L2_CFG = 0x0002, - FW_PORT_ACTION_GET_PORT_INFO = 0x0003, - FW_PORT_ACTION_L2_PPP_CFG = 0x0004, - FW_PORT_ACTION_L2_DCB_CFG = 0x0005, - FW_PORT_ACTION_LOW_PWR_TO_NORMAL = 0x0010, - FW_PORT_ACTION_L1_LOW_PWR_EN = 0x0011, - FW_PORT_ACTION_L2_WOL_MODE_EN = 0x0012, - FW_PORT_ACTION_LPBK_TO_NORMAL = 0x0020, - FW_PORT_ACTION_L1_LPBK = 0x0021, - FW_PORT_ACTION_L1_PMA_LPBK = 0x0022, - FW_PORT_ACTION_L1_PCS_LPBK = 0x0023, - FW_PORT_ACTION_L1_PHYXS_CSIDE_LPBK = 0x0024, - FW_PORT_ACTION_L1_PHYXS_ESIDE_LPBK = 0x0025, - FW_PORT_ACTION_PHY_RESET = 0x0040, - FW_PORT_ACTION_PMA_RESET = 0x0041, - FW_PORT_ACTION_PCS_RESET = 0x0042, - FW_PORT_ACTION_PHYXS_RESET = 0x0043, - FW_PORT_ACTION_DTEXS_REEST = 0x0044, - FW_PORT_ACTION_AN_RESET = 0x0045 -}; - -enum fw_port_l2cfg_ctlbf { - FW_PORT_L2_CTLBF_OVLAN0 = 0x01, - FW_PORT_L2_CTLBF_OVLAN1 = 0x02, - FW_PORT_L2_CTLBF_OVLAN2 = 0x04, - FW_PORT_L2_CTLBF_OVLAN3 = 0x08, - FW_PORT_L2_CTLBF_IVLAN = 0x10, - FW_PORT_L2_CTLBF_TXIPG = 0x20 -}; - -enum fw_port_dcb_cfg { - FW_PORT_DCB_CFG_PG = 0x01, - FW_PORT_DCB_CFG_PFC = 0x02, - FW_PORT_DCB_CFG_APPL = 0x04 -}; - -enum fw_port_dcb_cfg_rc { - FW_PORT_DCB_CFG_SUCCESS = 0x0, - FW_PORT_DCB_CFG_ERROR = 0x1 -}; - -struct fw_port_cmd { - __be32 op_to_portid; - __be32 action_to_len16; - union fw_port { - struct fw_port_l1cfg { - __be32 rcap; - __be32 r; - } l1cfg; - struct fw_port_l2cfg { - __be16 ctlbf_to_ivlan0; - __be16 ivlantype; - __be32 txipg_pkd; - __be16 ovlan0mask; - __be16 ovlan0type; - __be16 ovlan1mask; - __be16 ovlan1type; - __be16 ovlan2mask; - __be16 ovlan2type; - __be16 ovlan3mask; - __be16 ovlan3type; - } l2cfg; - struct fw_port_info { - __be32 lstatus_to_modtype; - __be16 pcap; - __be16 acap; - __be16 mtu; - __u8 cbllen; - __u8 r9; - __be32 r10; - __be64 r11; - } info; - struct fw_port_ppp { - __be32 pppen_to_ncsich; - __be32 r11; - } ppp; - struct fw_port_dcb { - __be16 cfg; - u8 up_map; - u8 sf_cfgrc; - __be16 prot_ix; - u8 pe7_to_pe0; - u8 numTCPFCs; - __be32 pgid0_to_pgid7; - __be32 numTCs_oui; - u8 pgpc[8]; - } dcb; - } u; -}; - -#define FW_PORT_CMD_READ (1U << 22) - -#define FW_PORT_CMD_PORTID(x) ((x) << 0) -#define FW_PORT_CMD_PORTID_GET(x) (((x) >> 0) & 0xf) - -#define FW_PORT_CMD_ACTION(x) ((x) << 16) -#define FW_PORT_CMD_ACTION_GET(x) (((x) >> 16) & 0xffff) - -#define FW_PORT_CMD_CTLBF(x) ((x) << 10) -#define FW_PORT_CMD_OVLAN3(x) ((x) << 7) -#define FW_PORT_CMD_OVLAN2(x) ((x) << 6) -#define FW_PORT_CMD_OVLAN1(x) ((x) << 5) -#define FW_PORT_CMD_OVLAN0(x) ((x) << 4) -#define FW_PORT_CMD_IVLAN0(x) ((x) << 3) - -#define FW_PORT_CMD_TXIPG(x) ((x) << 19) - -#define FW_PORT_CMD_LSTATUS (1U << 31) -#define FW_PORT_CMD_LSPEED(x) ((x) << 24) -#define FW_PORT_CMD_LSPEED_GET(x) (((x) >> 24) & 0x3f) -#define FW_PORT_CMD_TXPAUSE (1U << 23) -#define FW_PORT_CMD_RXPAUSE (1U << 22) -#define FW_PORT_CMD_MDIOCAP (1U << 21) -#define FW_PORT_CMD_MDIOADDR_GET(x) (((x) >> 16) & 0x1f) -#define FW_PORT_CMD_LPTXPAUSE (1U << 15) -#define FW_PORT_CMD_LPRXPAUSE (1U << 14) -#define FW_PORT_CMD_PTYPE_MASK 0x1f -#define FW_PORT_CMD_PTYPE_GET(x) (((x) >> 8) & FW_PORT_CMD_PTYPE_MASK) -#define FW_PORT_CMD_MODTYPE_MASK 0x1f -#define FW_PORT_CMD_MODTYPE_GET(x) (((x) >> 0) & FW_PORT_CMD_MODTYPE_MASK) - -#define FW_PORT_CMD_PPPEN(x) ((x) << 31) -#define FW_PORT_CMD_TPSRC(x) ((x) << 28) -#define FW_PORT_CMD_NCSISRC(x) ((x) << 24) - -#define FW_PORT_CMD_CH0(x) ((x) << 20) -#define FW_PORT_CMD_CH1(x) ((x) << 16) -#define FW_PORT_CMD_CH2(x) ((x) << 12) -#define FW_PORT_CMD_CH3(x) ((x) << 8) -#define FW_PORT_CMD_NCSICH(x) ((x) << 4) - -enum fw_port_type { - FW_PORT_TYPE_FIBER_XFI, - FW_PORT_TYPE_FIBER_XAUI, - FW_PORT_TYPE_BT_SGMII, - FW_PORT_TYPE_BT_XFI, - FW_PORT_TYPE_BT_XAUI, - FW_PORT_TYPE_KX4, - FW_PORT_TYPE_CX4, - FW_PORT_TYPE_KX, - FW_PORT_TYPE_KR, - FW_PORT_TYPE_SFP, - FW_PORT_TYPE_BP_AP, - FW_PORT_TYPE_BP4_AP, - - FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK -}; - -enum fw_port_module_type { - FW_PORT_MOD_TYPE_NA, - FW_PORT_MOD_TYPE_LR, - FW_PORT_MOD_TYPE_SR, - FW_PORT_MOD_TYPE_ER, - FW_PORT_MOD_TYPE_TWINAX_PASSIVE, - FW_PORT_MOD_TYPE_TWINAX_ACTIVE, - FW_PORT_MOD_TYPE_LRM, - - FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_MASK -}; - -/* port stats */ -#define FW_NUM_PORT_STATS 50 -#define FW_NUM_PORT_TX_STATS 23 -#define FW_NUM_PORT_RX_STATS 27 - -enum fw_port_stats_tx_index { - FW_STAT_TX_PORT_BYTES_IX, - FW_STAT_TX_PORT_FRAMES_IX, - FW_STAT_TX_PORT_BCAST_IX, - FW_STAT_TX_PORT_MCAST_IX, - FW_STAT_TX_PORT_UCAST_IX, - FW_STAT_TX_PORT_ERROR_IX, - FW_STAT_TX_PORT_64B_IX, - FW_STAT_TX_PORT_65B_127B_IX, - FW_STAT_TX_PORT_128B_255B_IX, - FW_STAT_TX_PORT_256B_511B_IX, - FW_STAT_TX_PORT_512B_1023B_IX, - FW_STAT_TX_PORT_1024B_1518B_IX, - FW_STAT_TX_PORT_1519B_MAX_IX, - FW_STAT_TX_PORT_DROP_IX, - FW_STAT_TX_PORT_PAUSE_IX, - FW_STAT_TX_PORT_PPP0_IX, - FW_STAT_TX_PORT_PPP1_IX, - FW_STAT_TX_PORT_PPP2_IX, - FW_STAT_TX_PORT_PPP3_IX, - FW_STAT_TX_PORT_PPP4_IX, - FW_STAT_TX_PORT_PPP5_IX, - FW_STAT_TX_PORT_PPP6_IX, - FW_STAT_TX_PORT_PPP7_IX -}; - -enum fw_port_stat_rx_index { - FW_STAT_RX_PORT_BYTES_IX, - FW_STAT_RX_PORT_FRAMES_IX, - FW_STAT_RX_PORT_BCAST_IX, - FW_STAT_RX_PORT_MCAST_IX, - FW_STAT_RX_PORT_UCAST_IX, - FW_STAT_RX_PORT_MTU_ERROR_IX, - FW_STAT_RX_PORT_MTU_CRC_ERROR_IX, - FW_STAT_RX_PORT_CRC_ERROR_IX, - FW_STAT_RX_PORT_LEN_ERROR_IX, - FW_STAT_RX_PORT_SYM_ERROR_IX, - FW_STAT_RX_PORT_64B_IX, - FW_STAT_RX_PORT_65B_127B_IX, - FW_STAT_RX_PORT_128B_255B_IX, - FW_STAT_RX_PORT_256B_511B_IX, - FW_STAT_RX_PORT_512B_1023B_IX, - FW_STAT_RX_PORT_1024B_1518B_IX, - FW_STAT_RX_PORT_1519B_MAX_IX, - FW_STAT_RX_PORT_PAUSE_IX, - FW_STAT_RX_PORT_PPP0_IX, - FW_STAT_RX_PORT_PPP1_IX, - FW_STAT_RX_PORT_PPP2_IX, - FW_STAT_RX_PORT_PPP3_IX, - FW_STAT_RX_PORT_PPP4_IX, - FW_STAT_RX_PORT_PPP5_IX, - FW_STAT_RX_PORT_PPP6_IX, - FW_STAT_RX_PORT_PPP7_IX, - FW_STAT_RX_PORT_LESS_64B_IX -}; - -struct fw_port_stats_cmd { - __be32 op_to_portid; - __be32 retval_len16; - union fw_port_stats { - struct fw_port_stats_ctl { - u8 nstats_bg_bm; - u8 tx_ix; - __be16 r6; - __be32 r7; - __be64 stat0; - __be64 stat1; - __be64 stat2; - __be64 stat3; - __be64 stat4; - __be64 stat5; - } ctl; - struct fw_port_stats_all { - __be64 tx_bytes; - __be64 tx_frames; - __be64 tx_bcast; - __be64 tx_mcast; - __be64 tx_ucast; - __be64 tx_error; - __be64 tx_64b; - __be64 tx_65b_127b; - __be64 tx_128b_255b; - __be64 tx_256b_511b; - __be64 tx_512b_1023b; - __be64 tx_1024b_1518b; - __be64 tx_1519b_max; - __be64 tx_drop; - __be64 tx_pause; - __be64 tx_ppp0; - __be64 tx_ppp1; - __be64 tx_ppp2; - __be64 tx_ppp3; - __be64 tx_ppp4; - __be64 tx_ppp5; - __be64 tx_ppp6; - __be64 tx_ppp7; - __be64 rx_bytes; - __be64 rx_frames; - __be64 rx_bcast; - __be64 rx_mcast; - __be64 rx_ucast; - __be64 rx_mtu_error; - __be64 rx_mtu_crc_error; - __be64 rx_crc_error; - __be64 rx_len_error; - __be64 rx_sym_error; - __be64 rx_64b; - __be64 rx_65b_127b; - __be64 rx_128b_255b; - __be64 rx_256b_511b; - __be64 rx_512b_1023b; - __be64 rx_1024b_1518b; - __be64 rx_1519b_max; - __be64 rx_pause; - __be64 rx_ppp0; - __be64 rx_ppp1; - __be64 rx_ppp2; - __be64 rx_ppp3; - __be64 rx_ppp4; - __be64 rx_ppp5; - __be64 rx_ppp6; - __be64 rx_ppp7; - __be64 rx_less_64b; - __be64 rx_bg_drop; - __be64 rx_bg_trunc; - } all; - } u; -}; - -#define FW_PORT_STATS_CMD_NSTATS(x) ((x) << 4) -#define FW_PORT_STATS_CMD_BG_BM(x) ((x) << 0) -#define FW_PORT_STATS_CMD_TX(x) ((x) << 7) -#define FW_PORT_STATS_CMD_IX(x) ((x) << 0) - -/* port loopback stats */ -#define FW_NUM_LB_STATS 16 -enum fw_port_lb_stats_index { - FW_STAT_LB_PORT_BYTES_IX, - FW_STAT_LB_PORT_FRAMES_IX, - FW_STAT_LB_PORT_BCAST_IX, - FW_STAT_LB_PORT_MCAST_IX, - FW_STAT_LB_PORT_UCAST_IX, - FW_STAT_LB_PORT_ERROR_IX, - FW_STAT_LB_PORT_64B_IX, - FW_STAT_LB_PORT_65B_127B_IX, - FW_STAT_LB_PORT_128B_255B_IX, - FW_STAT_LB_PORT_256B_511B_IX, - FW_STAT_LB_PORT_512B_1023B_IX, - FW_STAT_LB_PORT_1024B_1518B_IX, - FW_STAT_LB_PORT_1519B_MAX_IX, - FW_STAT_LB_PORT_DROP_FRAMES_IX -}; - -struct fw_port_lb_stats_cmd { - __be32 op_to_lbport; - __be32 retval_len16; - union fw_port_lb_stats { - struct fw_port_lb_stats_ctl { - u8 nstats_bg_bm; - u8 ix_pkd; - __be16 r6; - __be32 r7; - __be64 stat0; - __be64 stat1; - __be64 stat2; - __be64 stat3; - __be64 stat4; - __be64 stat5; - } ctl; - struct fw_port_lb_stats_all { - __be64 tx_bytes; - __be64 tx_frames; - __be64 tx_bcast; - __be64 tx_mcast; - __be64 tx_ucast; - __be64 tx_error; - __be64 tx_64b; - __be64 tx_65b_127b; - __be64 tx_128b_255b; - __be64 tx_256b_511b; - __be64 tx_512b_1023b; - __be64 tx_1024b_1518b; - __be64 tx_1519b_max; - __be64 rx_lb_drop; - __be64 rx_lb_trunc; - } all; - } u; -}; - -#define FW_PORT_LB_STATS_CMD_LBPORT(x) ((x) << 0) -#define FW_PORT_LB_STATS_CMD_NSTATS(x) ((x) << 4) -#define FW_PORT_LB_STATS_CMD_BG_BM(x) ((x) << 0) -#define FW_PORT_LB_STATS_CMD_IX(x) ((x) << 0) - -struct fw_rss_ind_tbl_cmd { - __be32 op_to_viid; -#define FW_RSS_IND_TBL_CMD_VIID(x) ((x) << 0) - __be32 retval_len16; - __be16 niqid; - __be16 startidx; - __be32 r3; - __be32 iq0_to_iq2; -#define FW_RSS_IND_TBL_CMD_IQ0(x) ((x) << 20) -#define FW_RSS_IND_TBL_CMD_IQ1(x) ((x) << 10) -#define FW_RSS_IND_TBL_CMD_IQ2(x) ((x) << 0) - __be32 iq3_to_iq5; - __be32 iq6_to_iq8; - __be32 iq9_to_iq11; - __be32 iq12_to_iq14; - __be32 iq15_to_iq17; - __be32 iq18_to_iq20; - __be32 iq21_to_iq23; - __be32 iq24_to_iq26; - __be32 iq27_to_iq29; - __be32 iq30_iq31; - __be32 r15_lo; -}; - -struct fw_rss_glb_config_cmd { - __be32 op_to_write; - __be32 retval_len16; - union fw_rss_glb_config { - struct fw_rss_glb_config_manual { - __be32 mode_pkd; - __be32 r3; - __be64 r4; - __be64 r5; - } manual; - struct fw_rss_glb_config_basicvirtual { - __be32 mode_pkd; - __be32 synmapen_to_hashtoeplitz; -#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN (1U << 8) -#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 (1U << 7) -#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 (1U << 6) -#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 (1U << 5) -#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 (1U << 4) -#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN (1U << 3) -#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN (1U << 2) -#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP (1U << 1) -#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ (1U << 0) - __be64 r8; - __be64 r9; - } basicvirtual; - } u; -}; - -#define FW_RSS_GLB_CONFIG_CMD_MODE(x) ((x) << 28) -#define FW_RSS_GLB_CONFIG_CMD_MODE_GET(x) (((x) >> 28) & 0xf) - -#define FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL 0 -#define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1 - -struct fw_rss_vi_config_cmd { - __be32 op_to_viid; -#define FW_RSS_VI_CONFIG_CMD_VIID(x) ((x) << 0) - __be32 retval_len16; - union fw_rss_vi_config { - struct fw_rss_vi_config_manual { - __be64 r3; - __be64 r4; - __be64 r5; - } manual; - struct fw_rss_vi_config_basicvirtual { - __be32 r6; - __be32 defaultq_to_udpen; -#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) ((x) << 16) -#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_GET(x) (((x) >> 16) & 0x3ff) -#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN (1U << 4) -#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN (1U << 3) -#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN (1U << 2) -#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN (1U << 1) -#define FW_RSS_VI_CONFIG_CMD_UDPEN (1U << 0) - __be64 r9; - __be64 r10; - } basicvirtual; - } u; -}; - -enum fw_error_type { - FW_ERROR_TYPE_EXCEPTION = 0x0, - FW_ERROR_TYPE_HWMODULE = 0x1, - FW_ERROR_TYPE_WR = 0x2, - FW_ERROR_TYPE_ACL = 0x3, -}; - -struct fw_error_cmd { - __be32 op_to_type; - __be32 len16_pkd; - union fw_error { - struct fw_error_exception { - __be32 info[6]; - } exception; - struct fw_error_hwmodule { - __be32 regaddr; - __be32 regval; - } hwmodule; - struct fw_error_wr { - __be16 cidx; - __be16 pfn_vfn; - __be32 eqid; - u8 wrhdr[16]; - } wr; - struct fw_error_acl { - __be16 cidx; - __be16 pfn_vfn; - __be32 eqid; - __be16 mv_pkd; - u8 val[6]; - __be64 r4; - } acl; - } u; -}; - -struct fw_debug_cmd { - __be32 op_type; -#define FW_DEBUG_CMD_TYPE_GET(x) ((x) & 0xff) - __be32 len16_pkd; - union fw_debug { - struct fw_debug_assert { - __be32 fcid; - __be32 line; - __be32 x; - __be32 y; - u8 filename_0_7[8]; - u8 filename_8_15[8]; - __be64 r3; - } assert; - struct fw_debug_prt { - __be16 dprtstridx; - __be16 r3[3]; - __be32 dprtstrparam0; - __be32 dprtstrparam1; - __be32 dprtstrparam2; - __be32 dprtstrparam3; - } prt; - } u; -}; - -struct fw_hdr { - u8 ver; - u8 reserved1; - __be16 len512; /* bin length in units of 512-bytes */ - __be32 fw_ver; /* firmware version */ - __be32 tp_microcode_ver; - u8 intfver_nic; - u8 intfver_vnic; - u8 intfver_ofld; - u8 intfver_ri; - u8 intfver_iscsipdu; - u8 intfver_iscsi; - u8 intfver_fcoe; - u8 reserved2; - __be32 reserved3[27]; -}; - -#define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff) -#define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff) -#define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff) -#define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff) -#endif /* _T4FW_INTERFACE_H_ */ diff --git a/drivers/net/cxgb4vf/Makefile b/drivers/net/cxgb4vf/Makefile deleted file mode 100644 index d72ee26..0000000 --- a/drivers/net/cxgb4vf/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# -# Chelsio T4 SR-IOV Virtual Function Driver -# - -obj-$(CONFIG_CHELSIO_T4VF) += cxgb4vf.o - -cxgb4vf-objs := cxgb4vf_main.o t4vf_hw.o sge.o diff --git a/drivers/net/cxgb4vf/adapter.h b/drivers/net/cxgb4vf/adapter.h deleted file mode 100644 index 594334d..0000000 --- a/drivers/net/cxgb4vf/adapter.h +++ /dev/null @@ -1,534 +0,0 @@ -/* - * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet - * driver for Linux. - * - * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -/* - * This file should not be included directly. Include t4vf_common.h instead. - */ - -#ifndef __CXGB4VF_ADAPTER_H__ -#define __CXGB4VF_ADAPTER_H__ - -#include -#include -#include -#include -#include -#include - -#include "../cxgb4/t4_hw.h" - -/* - * Constants of the implementation. - */ -enum { - MAX_NPORTS = 1, /* max # of "ports" */ - MAX_PORT_QSETS = 8, /* max # of Queue Sets / "port" */ - MAX_ETH_QSETS = MAX_NPORTS*MAX_PORT_QSETS, - - /* - * MSI-X interrupt index usage. - */ - MSIX_FW = 0, /* MSI-X index for firmware Q */ - MSIX_IQFLINT = 1, /* MSI-X index base for Ingress Qs */ - MSIX_EXTRAS = 1, - MSIX_ENTRIES = MAX_ETH_QSETS + MSIX_EXTRAS, - - /* - * The maximum number of Ingress and Egress Queues is determined by - * the maximum number of "Queue Sets" which we support plus any - * ancillary queues. Each "Queue Set" requires one Ingress Queue - * for RX Packet Ingress Event notifications and two Egress Queues for - * a Free List and an Ethernet TX list. - */ - INGQ_EXTRAS = 2, /* firmware event queue and */ - /* forwarded interrupts */ - MAX_INGQ = MAX_ETH_QSETS+INGQ_EXTRAS, - MAX_EGRQ = MAX_ETH_QSETS*2, -}; - -/* - * Forward structure definition references. - */ -struct adapter; -struct sge_eth_rxq; -struct sge_rspq; - -/* - * Per-"port" information. This is really per-Virtual Interface information - * but the use of the "port" nomanclature makes it easier to go back and forth - * between the PF and VF drivers ... - */ -struct port_info { - struct adapter *adapter; /* our adapter */ - u16 viid; /* virtual interface ID */ - s16 xact_addr_filt; /* index of our MAC address filter */ - u16 rss_size; /* size of VI's RSS table slice */ - u8 pidx; /* index into adapter port[] */ - u8 port_id; /* physical port ID */ - u8 nqsets; /* # of "Queue Sets" */ - u8 first_qset; /* index of first "Queue Set" */ - struct link_config link_cfg; /* physical port configuration */ -}; - -/* - * Scatter Gather Engine resources for the "adapter". Our ingress and egress - * queues are organized into "Queue Sets" with one ingress and one egress - * queue per Queue Set. These Queue Sets are aportionable between the "ports" - * (Virtual Interfaces). One extra ingress queue is used to receive - * asynchronous messages from the firmware. Note that the "Queue IDs" that we - * use here are really "Relative Queue IDs" which are returned as part of the - * firmware command to allocate queues. These queue IDs are relative to the - * absolute Queue ID base of the section of the Queue ID space allocated to - * the PF/VF. - */ - -/* - * SGE free-list queue state. - */ -struct rx_sw_desc; -struct sge_fl { - unsigned int avail; /* # of available RX buffers */ - unsigned int pend_cred; /* new buffers since last FL DB ring */ - unsigned int cidx; /* consumer index */ - unsigned int pidx; /* producer index */ - unsigned long alloc_failed; /* # of buffer allocation failures */ - unsigned long large_alloc_failed; - unsigned long starving; /* # of times FL was found starving */ - - /* - * Write-once/infrequently fields. - * ------------------------------- - */ - - unsigned int cntxt_id; /* SGE relative QID for the free list */ - unsigned int abs_id; /* SGE absolute QID for the free list */ - unsigned int size; /* capacity of free list */ - struct rx_sw_desc *sdesc; /* address of SW RX descriptor ring */ - __be64 *desc; /* address of HW RX descriptor ring */ - dma_addr_t addr; /* PCI bus address of hardware ring */ -}; - -/* - * An ingress packet gather list. - */ -struct pkt_gl { - skb_frag_t frags[MAX_SKB_FRAGS]; - void *va; /* virtual address of first byte */ - unsigned int nfrags; /* # of fragments */ - unsigned int tot_len; /* total length of fragments */ -}; - -typedef int (*rspq_handler_t)(struct sge_rspq *, const __be64 *, - const struct pkt_gl *); - -/* - * State for an SGE Response Queue. - */ -struct sge_rspq { - struct napi_struct napi; /* NAPI scheduling control */ - const __be64 *cur_desc; /* current descriptor in queue */ - unsigned int cidx; /* consumer index */ - u8 gen; /* current generation bit */ - u8 next_intr_params; /* holdoff params for next interrupt */ - int offset; /* offset into current FL buffer */ - - unsigned int unhandled_irqs; /* bogus interrupts */ - - /* - * Write-once/infrequently fields. - * ------------------------------- - */ - - u8 intr_params; /* interrupt holdoff parameters */ - u8 pktcnt_idx; /* interrupt packet threshold */ - u8 idx; /* queue index within its group */ - u16 cntxt_id; /* SGE rel QID for the response Q */ - u16 abs_id; /* SGE abs QID for the response Q */ - __be64 *desc; /* address of hardware response ring */ - dma_addr_t phys_addr; /* PCI bus address of ring */ - unsigned int iqe_len; /* entry size */ - unsigned int size; /* capcity of response Q */ - struct adapter *adapter; /* our adapter */ - struct net_device *netdev; /* associated net device */ - rspq_handler_t handler; /* the handler for this response Q */ -}; - -/* - * Ethernet queue statistics - */ -struct sge_eth_stats { - unsigned long pkts; /* # of ethernet packets */ - unsigned long lro_pkts; /* # of LRO super packets */ - unsigned long lro_merged; /* # of wire packets merged by LRO */ - unsigned long rx_cso; /* # of Rx checksum offloads */ - unsigned long vlan_ex; /* # of Rx VLAN extractions */ - unsigned long rx_drops; /* # of packets dropped due to no mem */ -}; - -/* - * State for an Ethernet Receive Queue. - */ -struct sge_eth_rxq { - struct sge_rspq rspq; /* Response Queue */ - struct sge_fl fl; /* Free List */ - struct sge_eth_stats stats; /* receive statistics */ -}; - -/* - * SGE Transmit Queue state. This contains all of the resources associated - * with the hardware status of a TX Queue which is a circular ring of hardware - * TX Descriptors. For convenience, it also contains a pointer to a parallel - * "Software Descriptor" array but we don't know anything about it here other - * than its type name. - */ -struct tx_desc { - /* - * Egress Queues are measured in units of SGE_EQ_IDXSIZE by the - * hardware: Sizes, Producer and Consumer indices, etc. - */ - __be64 flit[SGE_EQ_IDXSIZE/sizeof(__be64)]; -}; -struct tx_sw_desc; -struct sge_txq { - unsigned int in_use; /* # of in-use TX descriptors */ - unsigned int size; /* # of descriptors */ - unsigned int cidx; /* SW consumer index */ - unsigned int pidx; /* producer index */ - unsigned long stops; /* # of times queue has been stopped */ - unsigned long restarts; /* # of queue restarts */ - - /* - * Write-once/infrequently fields. - * ------------------------------- - */ - - unsigned int cntxt_id; /* SGE relative QID for the TX Q */ - unsigned int abs_id; /* SGE absolute QID for the TX Q */ - struct tx_desc *desc; /* address of HW TX descriptor ring */ - struct tx_sw_desc *sdesc; /* address of SW TX descriptor ring */ - struct sge_qstat *stat; /* queue status entry */ - dma_addr_t phys_addr; /* PCI bus address of hardware ring */ -}; - -/* - * State for an Ethernet Transmit Queue. - */ -struct sge_eth_txq { - struct sge_txq q; /* SGE TX Queue */ - struct netdev_queue *txq; /* associated netdev TX queue */ - unsigned long tso; /* # of TSO requests */ - unsigned long tx_cso; /* # of TX checksum offloads */ - unsigned long vlan_ins; /* # of TX VLAN insertions */ - unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ -}; - -/* - * The complete set of Scatter/Gather Engine resources. - */ -struct sge { - /* - * Our "Queue Sets" ... - */ - struct sge_eth_txq ethtxq[MAX_ETH_QSETS]; - struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; - - /* - * Extra ingress queues for asynchronous firmware events and - * forwarded interrupts (when in MSI mode). - */ - struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; - - struct sge_rspq intrq ____cacheline_aligned_in_smp; - spinlock_t intrq_lock; - - /* - * State for managing "starving Free Lists" -- Free Lists which have - * fallen below a certain threshold of buffers available to the - * hardware and attempts to refill them up to that threshold have - * failed. We have a regular "slow tick" timer process which will - * make periodic attempts to refill these starving Free Lists ... - */ - DECLARE_BITMAP(starving_fl, MAX_EGRQ); - struct timer_list rx_timer; - - /* - * State for cleaning up completed TX descriptors. - */ - struct timer_list tx_timer; - - /* - * Write-once/infrequently fields. - * ------------------------------- - */ - - u16 max_ethqsets; /* # of available Ethernet queue sets */ - u16 ethqsets; /* # of active Ethernet queue sets */ - u16 ethtxq_rover; /* Tx queue to clean up next */ - u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */ - u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */ - - /* - * Reverse maps from Absolute Queue IDs to associated queue pointers. - * The absolute Queue IDs are in a compact range which start at a - * [potentially large] Base Queue ID. We perform the reverse map by - * first converting the Absolute Queue ID into a Relative Queue ID by - * subtracting off the Base Queue ID and then use a Relative Queue ID - * indexed table to get the pointer to the corresponding software - * queue structure. - */ - unsigned int egr_base; - unsigned int ingr_base; - void *egr_map[MAX_EGRQ]; - struct sge_rspq *ingr_map[MAX_INGQ]; -}; - -/* - * Utility macros to convert Absolute- to Relative-Queue indices and Egress- - * and Ingress-Queues. The EQ_MAP() and IQ_MAP() macros which provide - * pointers to Ingress- and Egress-Queues can be used as both L- and R-values - */ -#define EQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->egr_base)) -#define IQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->ingr_base)) - -#define EQ_MAP(s, abs_id) ((s)->egr_map[EQ_IDX(s, abs_id)]) -#define IQ_MAP(s, abs_id) ((s)->ingr_map[IQ_IDX(s, abs_id)]) - -/* - * Macro to iterate across Queue Sets ("rxq" is a historic misnomer). - */ -#define for_each_ethrxq(sge, iter) \ - for (iter = 0; iter < (sge)->ethqsets; iter++) - -/* - * Per-"adapter" (Virtual Function) information. - */ -struct adapter { - /* PCI resources */ - void __iomem *regs; - struct pci_dev *pdev; - struct device *pdev_dev; - - /* "adapter" resources */ - unsigned long registered_device_map; - unsigned long open_device_map; - unsigned long flags; - struct adapter_params params; - - /* queue and interrupt resources */ - struct { - unsigned short vec; - char desc[22]; - } msix_info[MSIX_ENTRIES]; - struct sge sge; - - /* Linux network device resources */ - struct net_device *port[MAX_NPORTS]; - const char *name; - unsigned int msg_enable; - - /* debugfs resources */ - struct dentry *debugfs_root; - - /* various locks */ - spinlock_t stats_lock; -}; - -enum { /* adapter flags */ - FULL_INIT_DONE = (1UL << 0), - USING_MSI = (1UL << 1), - USING_MSIX = (1UL << 2), - QUEUES_BOUND = (1UL << 3), -}; - -/* - * The following register read/write routine definitions are required by - * the common code. - */ - -/** - * t4_read_reg - read a HW register - * @adapter: the adapter - * @reg_addr: the register address - * - * Returns the 32-bit value of the given HW register. - */ -static inline u32 t4_read_reg(struct adapter *adapter, u32 reg_addr) -{ - return readl(adapter->regs + reg_addr); -} - -/** - * t4_write_reg - write a HW register - * @adapter: the adapter - * @reg_addr: the register address - * @val: the value to write - * - * Write a 32-bit value into the given HW register. - */ -static inline void t4_write_reg(struct adapter *adapter, u32 reg_addr, u32 val) -{ - writel(val, adapter->regs + reg_addr); -} - -#ifndef readq -static inline u64 readq(const volatile void __iomem *addr) -{ - return readl(addr) + ((u64)readl(addr + 4) << 32); -} - -static inline void writeq(u64 val, volatile void __iomem *addr) -{ - writel(val, addr); - writel(val >> 32, addr + 4); -} -#endif - -/** - * t4_read_reg64 - read a 64-bit HW register - * @adapter: the adapter - * @reg_addr: the register address - * - * Returns the 64-bit value of the given HW register. - */ -static inline u64 t4_read_reg64(struct adapter *adapter, u32 reg_addr) -{ - return readq(adapter->regs + reg_addr); -} - -/** - * t4_write_reg64 - write a 64-bit HW register - * @adapter: the adapter - * @reg_addr: the register address - * @val: the value to write - * - * Write a 64-bit value into the given HW register. - */ -static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr, - u64 val) -{ - writeq(val, adapter->regs + reg_addr); -} - -/** - * port_name - return the string name of a port - * @adapter: the adapter - * @pidx: the port index - * - * Return the string name of the selected port. - */ -static inline const char *port_name(struct adapter *adapter, int pidx) -{ - return adapter->port[pidx]->name; -} - -/** - * t4_os_set_hw_addr - store a port's MAC address in SW - * @adapter: the adapter - * @pidx: the port index - * @hw_addr: the Ethernet address - * - * Store the Ethernet address of the given port in SW. Called by the common - * code when it retrieves a port's Ethernet address from EEPROM. - */ -static inline void t4_os_set_hw_addr(struct adapter *adapter, int pidx, - u8 hw_addr[]) -{ - memcpy(adapter->port[pidx]->dev_addr, hw_addr, ETH_ALEN); - memcpy(adapter->port[pidx]->perm_addr, hw_addr, ETH_ALEN); -} - -/** - * netdev2pinfo - return the port_info structure associated with a net_device - * @dev: the netdev - * - * Return the struct port_info associated with a net_device - */ -static inline struct port_info *netdev2pinfo(const struct net_device *dev) -{ - return netdev_priv(dev); -} - -/** - * adap2pinfo - return the port_info of a port - * @adap: the adapter - * @pidx: the port index - * - * Return the port_info structure for the adapter. - */ -static inline struct port_info *adap2pinfo(struct adapter *adapter, int pidx) -{ - return netdev_priv(adapter->port[pidx]); -} - -/** - * netdev2adap - return the adapter structure associated with a net_device - * @dev: the netdev - * - * Return the struct adapter associated with a net_device - */ -static inline struct adapter *netdev2adap(const struct net_device *dev) -{ - return netdev2pinfo(dev)->adapter; -} - -/* - * OS "Callback" function declarations. These are functions that the OS code - * is "contracted" to provide for the common code. - */ -void t4vf_os_link_changed(struct adapter *, int, int); - -/* - * SGE function prototype declarations. - */ -int t4vf_sge_alloc_rxq(struct adapter *, struct sge_rspq *, bool, - struct net_device *, int, - struct sge_fl *, rspq_handler_t); -int t4vf_sge_alloc_eth_txq(struct adapter *, struct sge_eth_txq *, - struct net_device *, struct netdev_queue *, - unsigned int); -void t4vf_free_sge_resources(struct adapter *); - -int t4vf_eth_xmit(struct sk_buff *, struct net_device *); -int t4vf_ethrx_handler(struct sge_rspq *, const __be64 *, - const struct pkt_gl *); - -irq_handler_t t4vf_intr_handler(struct adapter *); -irqreturn_t t4vf_sge_intr_msix(int, void *); - -int t4vf_sge_init(struct adapter *); -void t4vf_sge_start(struct adapter *); -void t4vf_sge_stop(struct adapter *); - -#endif /* __CXGB4VF_ADAPTER_H__ */ diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c deleted file mode 100644 index ec79913..0000000 --- a/drivers/net/cxgb4vf/cxgb4vf_main.c +++ /dev/null @@ -1,2947 +0,0 @@ -/* - * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet - * driver for Linux. - * - * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "t4vf_common.h" -#include "t4vf_defs.h" - -#include "../cxgb4/t4_regs.h" -#include "../cxgb4/t4_msg.h" - -/* - * Generic information about the driver. - */ -#define DRV_VERSION "1.0.0" -#define DRV_DESC "Chelsio T4 Virtual Function (VF) Network Driver" - -/* - * Module Parameters. - * ================== - */ - -/* - * Default ethtool "message level" for adapters. - */ -#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ - NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ - NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) - -static int dflt_msg_enable = DFLT_MSG_ENABLE; - -module_param(dflt_msg_enable, int, 0644); -MODULE_PARM_DESC(dflt_msg_enable, - "default adapter ethtool message level bitmap"); - -/* - * The driver uses the best interrupt scheme available on a platform in the - * order MSI-X then MSI. This parameter determines which of these schemes the - * driver may consider as follows: - * - * msi = 2: choose from among MSI-X and MSI - * msi = 1: only consider MSI interrupts - * - * Note that unlike the Physical Function driver, this Virtual Function driver - * does _not_ support legacy INTx interrupts (this limitation is mandated by - * the PCI-E SR-IOV standard). - */ -#define MSI_MSIX 2 -#define MSI_MSI 1 -#define MSI_DEFAULT MSI_MSIX - -static int msi = MSI_DEFAULT; - -module_param(msi, int, 0644); -MODULE_PARM_DESC(msi, "whether to use MSI-X or MSI"); - -/* - * Fundamental constants. - * ====================== - */ - -enum { - MAX_TXQ_ENTRIES = 16384, - MAX_RSPQ_ENTRIES = 16384, - MAX_RX_BUFFERS = 16384, - - MIN_TXQ_ENTRIES = 32, - MIN_RSPQ_ENTRIES = 128, - MIN_FL_ENTRIES = 16, - - /* - * For purposes of manipulating the Free List size we need to - * recognize that Free Lists are actually Egress Queues (the host - * produces free buffers which the hardware consumes), Egress Queues - * indices are all in units of Egress Context Units bytes, and free - * list entries are 64-bit PCI DMA addresses. And since the state of - * the Producer Index == the Consumer Index implies an EMPTY list, we - * always have at least one Egress Unit's worth of Free List entries - * unused. See sge.c for more details ... - */ - EQ_UNIT = SGE_EQ_IDXSIZE, - FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), - MIN_FL_RESID = FL_PER_EQ_UNIT, -}; - -/* - * Global driver state. - * ==================== - */ - -static struct dentry *cxgb4vf_debugfs_root; - -/* - * OS "Callback" functions. - * ======================== - */ - -/* - * The link status has changed on the indicated "port" (Virtual Interface). - */ -void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok) -{ - struct net_device *dev = adapter->port[pidx]; - - /* - * If the port is disabled or the current recorded "link up" - * status matches the new status, just return. - */ - if (!netif_running(dev) || link_ok == netif_carrier_ok(dev)) - return; - - /* - * Tell the OS that the link status has changed and print a short - * informative message on the console about the event. - */ - if (link_ok) { - const char *s; - const char *fc; - const struct port_info *pi = netdev_priv(dev); - - netif_carrier_on(dev); - - switch (pi->link_cfg.speed) { - case SPEED_10000: - s = "10Gbps"; - break; - - case SPEED_1000: - s = "1000Mbps"; - break; - - case SPEED_100: - s = "100Mbps"; - break; - - default: - s = "unknown"; - break; - } - - switch (pi->link_cfg.fc) { - case PAUSE_RX: - fc = "RX"; - break; - - case PAUSE_TX: - fc = "TX"; - break; - - case PAUSE_RX|PAUSE_TX: - fc = "RX/TX"; - break; - - default: - fc = "no"; - break; - } - - printk(KERN_INFO "%s: link up, %s, full-duplex, %s PAUSE\n", - dev->name, s, fc); - } else { - netif_carrier_off(dev); - printk(KERN_INFO "%s: link down\n", dev->name); - } -} - -/* - * Net device operations. - * ====================== - */ - - - - -/* - * Perform the MAC and PHY actions needed to enable a "port" (Virtual - * Interface). - */ -static int link_start(struct net_device *dev) -{ - int ret; - struct port_info *pi = netdev_priv(dev); - - /* - * We do not set address filters and promiscuity here, the stack does - * that step explicitly. Enable vlan accel. - */ - ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, 1, - true); - if (ret == 0) { - ret = t4vf_change_mac(pi->adapter, pi->viid, - pi->xact_addr_filt, dev->dev_addr, true); - if (ret >= 0) { - pi->xact_addr_filt = ret; - ret = 0; - } - } - - /* - * We don't need to actually "start the link" itself since the - * firmware will do that for us when the first Virtual Interface - * is enabled on a port. - */ - if (ret == 0) - ret = t4vf_enable_vi(pi->adapter, pi->viid, true, true); - return ret; -} - -/* - * Name the MSI-X interrupts. - */ -static void name_msix_vecs(struct adapter *adapter) -{ - int namelen = sizeof(adapter->msix_info[0].desc) - 1; - int pidx; - - /* - * Firmware events. - */ - snprintf(adapter->msix_info[MSIX_FW].desc, namelen, - "%s-FWeventq", adapter->name); - adapter->msix_info[MSIX_FW].desc[namelen] = 0; - - /* - * Ethernet queues. - */ - for_each_port(adapter, pidx) { - struct net_device *dev = adapter->port[pidx]; - const struct port_info *pi = netdev_priv(dev); - int qs, msi; - - for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) { - snprintf(adapter->msix_info[msi].desc, namelen, - "%s-%d", dev->name, qs); - adapter->msix_info[msi].desc[namelen] = 0; - } - } -} - -/* - * Request all of our MSI-X resources. - */ -static int request_msix_queue_irqs(struct adapter *adapter) -{ - struct sge *s = &adapter->sge; - int rxq, msi, err; - - /* - * Firmware events. - */ - err = request_irq(adapter->msix_info[MSIX_FW].vec, t4vf_sge_intr_msix, - 0, adapter->msix_info[MSIX_FW].desc, &s->fw_evtq); - if (err) - return err; - - /* - * Ethernet queues. - */ - msi = MSIX_IQFLINT; - for_each_ethrxq(s, rxq) { - err = request_irq(adapter->msix_info[msi].vec, - t4vf_sge_intr_msix, 0, - adapter->msix_info[msi].desc, - &s->ethrxq[rxq].rspq); - if (err) - goto err_free_irqs; - msi++; - } - return 0; - -err_free_irqs: - while (--rxq >= 0) - free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq); - free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq); - return err; -} - -/* - * Free our MSI-X resources. - */ -static void free_msix_queue_irqs(struct adapter *adapter) -{ - struct sge *s = &adapter->sge; - int rxq, msi; - - free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq); - msi = MSIX_IQFLINT; - for_each_ethrxq(s, rxq) - free_irq(adapter->msix_info[msi++].vec, - &s->ethrxq[rxq].rspq); -} - -/* - * Turn on NAPI and start up interrupts on a response queue. - */ -static void qenable(struct sge_rspq *rspq) -{ - napi_enable(&rspq->napi); - - /* - * 0-increment the Going To Sleep register to start the timer and - * enable interrupts. - */ - t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, - CIDXINC(0) | - SEINTARM(rspq->intr_params) | - INGRESSQID(rspq->cntxt_id)); -} - -/* - * Enable NAPI scheduling and interrupt generation for all Receive Queues. - */ -static void enable_rx(struct adapter *adapter) -{ - int rxq; - struct sge *s = &adapter->sge; - - for_each_ethrxq(s, rxq) - qenable(&s->ethrxq[rxq].rspq); - qenable(&s->fw_evtq); - - /* - * The interrupt queue doesn't use NAPI so we do the 0-increment of - * its Going To Sleep register here to get it started. - */ - if (adapter->flags & USING_MSI) - t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, - CIDXINC(0) | - SEINTARM(s->intrq.intr_params) | - INGRESSQID(s->intrq.cntxt_id)); - -} - -/* - * Wait until all NAPI handlers are descheduled. - */ -static void quiesce_rx(struct adapter *adapter) -{ - struct sge *s = &adapter->sge; - int rxq; - - for_each_ethrxq(s, rxq) - napi_disable(&s->ethrxq[rxq].rspq.napi); - napi_disable(&s->fw_evtq.napi); -} - -/* - * Response queue handler for the firmware event queue. - */ -static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp, - const struct pkt_gl *gl) -{ - /* - * Extract response opcode and get pointer to CPL message body. - */ - struct adapter *adapter = rspq->adapter; - u8 opcode = ((const struct rss_header *)rsp)->opcode; - void *cpl = (void *)(rsp + 1); - - switch (opcode) { - case CPL_FW6_MSG: { - /* - * We've received an asynchronous message from the firmware. - */ - const struct cpl_fw6_msg *fw_msg = cpl; - if (fw_msg->type == FW6_TYPE_CMD_RPL) - t4vf_handle_fw_rpl(adapter, fw_msg->data); - break; - } - - case CPL_SGE_EGR_UPDATE: { - /* - * We've received an Egress Queue Status Update message. We - * get these, if the SGE is configured to send these when the - * firmware passes certain points in processing our TX - * Ethernet Queue or if we make an explicit request for one. - * We use these updates to determine when we may need to - * restart a TX Ethernet Queue which was stopped for lack of - * free TX Queue Descriptors ... - */ - const struct cpl_sge_egr_update *p = (void *)cpl; - unsigned int qid = EGR_QID(be32_to_cpu(p->opcode_qid)); - struct sge *s = &adapter->sge; - struct sge_txq *tq; - struct sge_eth_txq *txq; - unsigned int eq_idx; - - /* - * Perform sanity checking on the Queue ID to make sure it - * really refers to one of our TX Ethernet Egress Queues which - * is active and matches the queue's ID. None of these error - * conditions should ever happen so we may want to either make - * them fatal and/or conditionalized under DEBUG. - */ - eq_idx = EQ_IDX(s, qid); - if (unlikely(eq_idx >= MAX_EGRQ)) { - dev_err(adapter->pdev_dev, - "Egress Update QID %d out of range\n", qid); - break; - } - tq = s->egr_map[eq_idx]; - if (unlikely(tq == NULL)) { - dev_err(adapter->pdev_dev, - "Egress Update QID %d TXQ=NULL\n", qid); - break; - } - txq = container_of(tq, struct sge_eth_txq, q); - if (unlikely(tq->abs_id != qid)) { - dev_err(adapter->pdev_dev, - "Egress Update QID %d refers to TXQ %d\n", - qid, tq->abs_id); - break; - } - - /* - * Restart a stopped TX Queue which has less than half of its - * TX ring in use ... - */ - txq->q.restarts++; - netif_tx_wake_queue(txq->txq); - break; - } - - default: - dev_err(adapter->pdev_dev, - "unexpected CPL %#x on FW event queue\n", opcode); - } - - return 0; -} - -/* - * Allocate SGE TX/RX response queues. Determine how many sets of SGE queues - * to use and initializes them. We support multiple "Queue Sets" per port if - * we have MSI-X, otherwise just one queue set per port. - */ -static int setup_sge_queues(struct adapter *adapter) -{ - struct sge *s = &adapter->sge; - int err, pidx, msix; - - /* - * Clear "Queue Set" Free List Starving and TX Queue Mapping Error - * state. - */ - bitmap_zero(s->starving_fl, MAX_EGRQ); - - /* - * If we're using MSI interrupt mode we need to set up a "forwarded - * interrupt" queue which we'll set up with our MSI vector. The rest - * of the ingress queues will be set up to forward their interrupts to - * this queue ... This must be first since t4vf_sge_alloc_rxq() uses - * the intrq's queue ID as the interrupt forwarding queue for the - * subsequent calls ... - */ - if (adapter->flags & USING_MSI) { - err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false, - adapter->port[0], 0, NULL, NULL); - if (err) - goto err_free_queues; - } - - /* - * Allocate our ingress queue for asynchronous firmware messages. - */ - err = t4vf_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->port[0], - MSIX_FW, NULL, fwevtq_handler); - if (err) - goto err_free_queues; - - /* - * Allocate each "port"'s initial Queue Sets. These can be changed - * later on ... up to the point where any interface on the adapter is - * brought up at which point lots of things get nailed down - * permanently ... - */ - msix = MSIX_IQFLINT; - for_each_port(adapter, pidx) { - struct net_device *dev = adapter->port[pidx]; - struct port_info *pi = netdev_priv(dev); - struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset]; - struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset]; - int qs; - - for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { - err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false, - dev, msix++, - &rxq->fl, t4vf_ethrx_handler); - if (err) - goto err_free_queues; - - err = t4vf_sge_alloc_eth_txq(adapter, txq, dev, - netdev_get_tx_queue(dev, qs), - s->fw_evtq.cntxt_id); - if (err) - goto err_free_queues; - - rxq->rspq.idx = qs; - memset(&rxq->stats, 0, sizeof(rxq->stats)); - } - } - - /* - * Create the reverse mappings for the queues. - */ - s->egr_base = s->ethtxq[0].q.abs_id - s->ethtxq[0].q.cntxt_id; - s->ingr_base = s->ethrxq[0].rspq.abs_id - s->ethrxq[0].rspq.cntxt_id; - IQ_MAP(s, s->fw_evtq.abs_id) = &s->fw_evtq; - for_each_port(adapter, pidx) { - struct net_device *dev = adapter->port[pidx]; - struct port_info *pi = netdev_priv(dev); - struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset]; - struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset]; - int qs; - - for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { - IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq; - EQ_MAP(s, txq->q.abs_id) = &txq->q; - - /* - * The FW_IQ_CMD doesn't return the Absolute Queue IDs - * for Free Lists but since all of the Egress Queues - * (including Free Lists) have Relative Queue IDs - * which are computed as Absolute - Base Queue ID, we - * can synthesize the Absolute Queue IDs for the Free - * Lists. This is useful for debugging purposes when - * we want to dump Queue Contexts via the PF Driver. - */ - rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base; - EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl; - } - } - return 0; - -err_free_queues: - t4vf_free_sge_resources(adapter); - return err; -} - -/* - * Set up Receive Side Scaling (RSS) to distribute packets to multiple receive - * queues. We configure the RSS CPU lookup table to distribute to the number - * of HW receive queues, and the response queue lookup table to narrow that - * down to the response queues actually configured for each "port" (Virtual - * Interface). We always configure the RSS mapping for all ports since the - * mapping table has plenty of entries. - */ -static int setup_rss(struct adapter *adapter) -{ - int pidx; - - for_each_port(adapter, pidx) { - struct port_info *pi = adap2pinfo(adapter, pidx); - struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset]; - u16 rss[MAX_PORT_QSETS]; - int qs, err; - - for (qs = 0; qs < pi->nqsets; qs++) - rss[qs] = rxq[qs].rspq.abs_id; - - err = t4vf_config_rss_range(adapter, pi->viid, - 0, pi->rss_size, rss, pi->nqsets); - if (err) - return err; - - /* - * Perform Global RSS Mode-specific initialization. - */ - switch (adapter->params.rss.mode) { - case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: - /* - * If Tunnel All Lookup isn't specified in the global - * RSS Configuration, then we need to specify a - * default Ingress Queue for any ingress packets which - * aren't hashed. We'll use our first ingress queue - * ... - */ - if (!adapter->params.rss.u.basicvirtual.tnlalllookup) { - union rss_vi_config config; - err = t4vf_read_rss_vi_config(adapter, - pi->viid, - &config); - if (err) - return err; - config.basicvirtual.defaultq = - rxq[0].rspq.abs_id; - err = t4vf_write_rss_vi_config(adapter, - pi->viid, - &config); - if (err) - return err; - } - break; - } - } - - return 0; -} - -/* - * Bring the adapter up. Called whenever we go from no "ports" open to having - * one open. This function performs the actions necessary to make an adapter - * operational, such as completing the initialization of HW modules, and - * enabling interrupts. Must be called with the rtnl lock held. (Note that - * this is called "cxgb_up" in the PF Driver.) - */ -static int adapter_up(struct adapter *adapter) -{ - int err; - - /* - * If this is the first time we've been called, perform basic - * adapter setup. Once we've done this, many of our adapter - * parameters can no longer be changed ... - */ - if ((adapter->flags & FULL_INIT_DONE) == 0) { - err = setup_sge_queues(adapter); - if (err) - return err; - err = setup_rss(adapter); - if (err) { - t4vf_free_sge_resources(adapter); - return err; - } - - if (adapter->flags & USING_MSIX) - name_msix_vecs(adapter); - adapter->flags |= FULL_INIT_DONE; - } - - /* - * Acquire our interrupt resources. We only support MSI-X and MSI. - */ - BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0); - if (adapter->flags & USING_MSIX) - err = request_msix_queue_irqs(adapter); - else - err = request_irq(adapter->pdev->irq, - t4vf_intr_handler(adapter), 0, - adapter->name, adapter); - if (err) { - dev_err(adapter->pdev_dev, "request_irq failed, err %d\n", - err); - return err; - } - - /* - * Enable NAPI ingress processing and return success. - */ - enable_rx(adapter); - t4vf_sge_start(adapter); - return 0; -} - -/* - * Bring the adapter down. Called whenever the last "port" (Virtual - * Interface) closed. (Note that this routine is called "cxgb_down" in the PF - * Driver.) - */ -static void adapter_down(struct adapter *adapter) -{ - /* - * Free interrupt resources. - */ - if (adapter->flags & USING_MSIX) - free_msix_queue_irqs(adapter); - else - free_irq(adapter->pdev->irq, adapter); - - /* - * Wait for NAPI handlers to finish. - */ - quiesce_rx(adapter); -} - -/* - * Start up a net device. - */ -static int cxgb4vf_open(struct net_device *dev) -{ - int err; - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - - /* - * If this is the first interface that we're opening on the "adapter", - * bring the "adapter" up now. - */ - if (adapter->open_device_map == 0) { - err = adapter_up(adapter); - if (err) - return err; - } - - /* - * Note that this interface is up and start everything up ... - */ - netif_set_real_num_tx_queues(dev, pi->nqsets); - err = netif_set_real_num_rx_queues(dev, pi->nqsets); - if (err) - goto err_unwind; - err = link_start(dev); - if (err) - goto err_unwind; - - netif_tx_start_all_queues(dev); - set_bit(pi->port_id, &adapter->open_device_map); - return 0; - -err_unwind: - if (adapter->open_device_map == 0) - adapter_down(adapter); - return err; -} - -/* - * Shut down a net device. This routine is called "cxgb_close" in the PF - * Driver ... - */ -static int cxgb4vf_stop(struct net_device *dev) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - - netif_tx_stop_all_queues(dev); - netif_carrier_off(dev); - t4vf_enable_vi(adapter, pi->viid, false, false); - pi->link_cfg.link_ok = 0; - - clear_bit(pi->port_id, &adapter->open_device_map); - if (adapter->open_device_map == 0) - adapter_down(adapter); - return 0; -} - -/* - * Translate our basic statistics into the standard "ifconfig" statistics. - */ -static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev) -{ - struct t4vf_port_stats stats; - struct port_info *pi = netdev2pinfo(dev); - struct adapter *adapter = pi->adapter; - struct net_device_stats *ns = &dev->stats; - int err; - - spin_lock(&adapter->stats_lock); - err = t4vf_get_port_stats(adapter, pi->pidx, &stats); - spin_unlock(&adapter->stats_lock); - - memset(ns, 0, sizeof(*ns)); - if (err) - return ns; - - ns->tx_bytes = (stats.tx_bcast_bytes + stats.tx_mcast_bytes + - stats.tx_ucast_bytes + stats.tx_offload_bytes); - ns->tx_packets = (stats.tx_bcast_frames + stats.tx_mcast_frames + - stats.tx_ucast_frames + stats.tx_offload_frames); - ns->rx_bytes = (stats.rx_bcast_bytes + stats.rx_mcast_bytes + - stats.rx_ucast_bytes); - ns->rx_packets = (stats.rx_bcast_frames + stats.rx_mcast_frames + - stats.rx_ucast_frames); - ns->multicast = stats.rx_mcast_frames; - ns->tx_errors = stats.tx_drop_frames; - ns->rx_errors = stats.rx_err_frames; - - return ns; -} - -/* - * Collect up to maxaddrs worth of a netdevice's unicast addresses, starting - * at a specified offset within the list, into an array of addrss pointers and - * return the number collected. - */ -static inline unsigned int collect_netdev_uc_list_addrs(const struct net_device *dev, - const u8 **addr, - unsigned int offset, - unsigned int maxaddrs) -{ - unsigned int index = 0; - unsigned int naddr = 0; - const struct netdev_hw_addr *ha; - - for_each_dev_addr(dev, ha) - if (index++ >= offset) { - addr[naddr++] = ha->addr; - if (naddr >= maxaddrs) - break; - } - return naddr; -} - -/* - * Collect up to maxaddrs worth of a netdevice's multicast addresses, starting - * at a specified offset within the list, into an array of addrss pointers and - * return the number collected. - */ -static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device *dev, - const u8 **addr, - unsigned int offset, - unsigned int maxaddrs) -{ - unsigned int index = 0; - unsigned int naddr = 0; - const struct netdev_hw_addr *ha; - - netdev_for_each_mc_addr(ha, dev) - if (index++ >= offset) { - addr[naddr++] = ha->addr; - if (naddr >= maxaddrs) - break; - } - return naddr; -} - -/* - * Configure the exact and hash address filters to handle a port's multicast - * and secondary unicast MAC addresses. - */ -static int set_addr_filters(const struct net_device *dev, bool sleep) -{ - u64 mhash = 0; - u64 uhash = 0; - bool free = true; - unsigned int offset, naddr; - const u8 *addr[7]; - int ret; - const struct port_info *pi = netdev_priv(dev); - - /* first do the secondary unicast addresses */ - for (offset = 0; ; offset += naddr) { - naddr = collect_netdev_uc_list_addrs(dev, addr, offset, - ARRAY_SIZE(addr)); - if (naddr == 0) - break; - - ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, - naddr, addr, NULL, &uhash, sleep); - if (ret < 0) - return ret; - - free = false; - } - - /* next set up the multicast addresses */ - for (offset = 0; ; offset += naddr) { - naddr = collect_netdev_mc_list_addrs(dev, addr, offset, - ARRAY_SIZE(addr)); - if (naddr == 0) - break; - - ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, - naddr, addr, NULL, &mhash, sleep); - if (ret < 0) - return ret; - free = false; - } - - return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0, - uhash | mhash, sleep); -} - -/* - * Set RX properties of a port, such as promiscruity, address filters, and MTU. - * If @mtu is -1 it is left unchanged. - */ -static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) -{ - int ret; - struct port_info *pi = netdev_priv(dev); - - ret = set_addr_filters(dev, sleep_ok); - if (ret == 0) - ret = t4vf_set_rxmode(pi->adapter, pi->viid, -1, - (dev->flags & IFF_PROMISC) != 0, - (dev->flags & IFF_ALLMULTI) != 0, - 1, -1, sleep_ok); - return ret; -} - -/* - * Set the current receive modes on the device. - */ -static void cxgb4vf_set_rxmode(struct net_device *dev) -{ - /* unfortunately we can't return errors to the stack */ - set_rxmode(dev, -1, false); -} - -/* - * Find the entry in the interrupt holdoff timer value array which comes - * closest to the specified interrupt holdoff value. - */ -static int closest_timer(const struct sge *s, int us) -{ - int i, timer_idx = 0, min_delta = INT_MAX; - - for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { - int delta = us - s->timer_val[i]; - if (delta < 0) - delta = -delta; - if (delta < min_delta) { - min_delta = delta; - timer_idx = i; - } - } - return timer_idx; -} - -static int closest_thres(const struct sge *s, int thres) -{ - int i, delta, pktcnt_idx = 0, min_delta = INT_MAX; - - for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { - delta = thres - s->counter_val[i]; - if (delta < 0) - delta = -delta; - if (delta < min_delta) { - min_delta = delta; - pktcnt_idx = i; - } - } - return pktcnt_idx; -} - -/* - * Return a queue's interrupt hold-off time in us. 0 means no timer. - */ -static unsigned int qtimer_val(const struct adapter *adapter, - const struct sge_rspq *rspq) -{ - unsigned int timer_idx = QINTR_TIMER_IDX_GET(rspq->intr_params); - - return timer_idx < SGE_NTIMERS - ? adapter->sge.timer_val[timer_idx] - : 0; -} - -/** - * set_rxq_intr_params - set a queue's interrupt holdoff parameters - * @adapter: the adapter - * @rspq: the RX response queue - * @us: the hold-off time in us, or 0 to disable timer - * @cnt: the hold-off packet count, or 0 to disable counter - * - * Sets an RX response queue's interrupt hold-off time and packet count. - * At least one of the two needs to be enabled for the queue to generate - * interrupts. - */ -static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq, - unsigned int us, unsigned int cnt) -{ - unsigned int timer_idx; - - /* - * If both the interrupt holdoff timer and count are specified as - * zero, default to a holdoff count of 1 ... - */ - if ((us | cnt) == 0) - cnt = 1; - - /* - * If an interrupt holdoff count has been specified, then find the - * closest configured holdoff count and use that. If the response - * queue has already been created, then update its queue context - * parameters ... - */ - if (cnt) { - int err; - u32 v, pktcnt_idx; - - pktcnt_idx = closest_thres(&adapter->sge, cnt); - if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) { - v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | - FW_PARAMS_PARAM_X( - FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | - FW_PARAMS_PARAM_YZ(rspq->cntxt_id); - err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx); - if (err) - return err; - } - rspq->pktcnt_idx = pktcnt_idx; - } - - /* - * Compute the closest holdoff timer index from the supplied holdoff - * timer value. - */ - timer_idx = (us == 0 - ? SGE_TIMER_RSTRT_CNTR - : closest_timer(&adapter->sge, us)); - - /* - * Update the response queue's interrupt coalescing parameters and - * return success. - */ - rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) | - (cnt > 0 ? QINTR_CNT_EN : 0)); - return 0; -} - -/* - * Return a version number to identify the type of adapter. The scheme is: - * - bits 0..9: chip version - * - bits 10..15: chip revision - */ -static inline unsigned int mk_adap_vers(const struct adapter *adapter) -{ - /* - * Chip version 4, revision 0x3f (cxgb4vf). - */ - return 4 | (0x3f << 10); -} - -/* - * Execute the specified ioctl command. - */ -static int cxgb4vf_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - int ret = 0; - - switch (cmd) { - /* - * The VF Driver doesn't have access to any of the other - * common Ethernet device ioctl()'s (like reading/writing - * PHY registers, etc. - */ - - default: - ret = -EOPNOTSUPP; - break; - } - return ret; -} - -/* - * Change the device's MTU. - */ -static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu) -{ - int ret; - struct port_info *pi = netdev_priv(dev); - - /* accommodate SACK */ - if (new_mtu < 81) - return -EINVAL; - - ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu, - -1, -1, -1, -1, true); - if (!ret) - dev->mtu = new_mtu; - return ret; -} - -static u32 cxgb4vf_fix_features(struct net_device *dev, u32 features) -{ - /* - * Since there is no support for separate rx/tx vlan accel - * enable/disable make sure tx flag is always in same state as rx. - */ - if (features & NETIF_F_HW_VLAN_RX) - features |= NETIF_F_HW_VLAN_TX; - else - features &= ~NETIF_F_HW_VLAN_TX; - - return features; -} - -static int cxgb4vf_set_features(struct net_device *dev, u32 features) -{ - struct port_info *pi = netdev_priv(dev); - u32 changed = dev->features ^ features; - - if (changed & NETIF_F_HW_VLAN_RX) - t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1, - features & NETIF_F_HW_VLAN_TX, 0); - - return 0; -} - -/* - * Change the devices MAC address. - */ -static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr) -{ - int ret; - struct sockaddr *addr = _addr; - struct port_info *pi = netdev_priv(dev); - - if (!is_valid_ether_addr(addr->sa_data)) - return -EINVAL; - - ret = t4vf_change_mac(pi->adapter, pi->viid, pi->xact_addr_filt, - addr->sa_data, true); - if (ret < 0) - return ret; - - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - pi->xact_addr_filt = ret; - return 0; -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -/* - * Poll all of our receive queues. This is called outside of normal interrupt - * context. - */ -static void cxgb4vf_poll_controller(struct net_device *dev) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - - if (adapter->flags & USING_MSIX) { - struct sge_eth_rxq *rxq; - int nqsets; - - rxq = &adapter->sge.ethrxq[pi->first_qset]; - for (nqsets = pi->nqsets; nqsets; nqsets--) { - t4vf_sge_intr_msix(0, &rxq->rspq); - rxq++; - } - } else - t4vf_intr_handler(adapter)(0, adapter); -} -#endif - -/* - * Ethtool operations. - * =================== - * - * Note that we don't support any ethtool operations which change the physical - * state of the port to which we're linked. - */ - -/* - * Return current port link settings. - */ -static int cxgb4vf_get_settings(struct net_device *dev, - struct ethtool_cmd *cmd) -{ - const struct port_info *pi = netdev_priv(dev); - - cmd->supported = pi->link_cfg.supported; - cmd->advertising = pi->link_cfg.advertising; - ethtool_cmd_speed_set(cmd, - netif_carrier_ok(dev) ? pi->link_cfg.speed : -1); - cmd->duplex = DUPLEX_FULL; - - cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; - cmd->phy_address = pi->port_id; - cmd->transceiver = XCVR_EXTERNAL; - cmd->autoneg = pi->link_cfg.autoneg; - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 0; - return 0; -} - -/* - * Return our driver information. - */ -static void cxgb4vf_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *drvinfo) -{ - struct adapter *adapter = netdev2adap(dev); - - strcpy(drvinfo->driver, KBUILD_MODNAME); - strcpy(drvinfo->version, DRV_VERSION); - strcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent))); - snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), - "%u.%u.%u.%u, TP %u.%u.%u.%u", - FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.fwrev), - FW_HDR_FW_VER_MINOR_GET(adapter->params.dev.fwrev), - FW_HDR_FW_VER_MICRO_GET(adapter->params.dev.fwrev), - FW_HDR_FW_VER_BUILD_GET(adapter->params.dev.fwrev), - FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.tprev), - FW_HDR_FW_VER_MINOR_GET(adapter->params.dev.tprev), - FW_HDR_FW_VER_MICRO_GET(adapter->params.dev.tprev), - FW_HDR_FW_VER_BUILD_GET(adapter->params.dev.tprev)); -} - -/* - * Return current adapter message level. - */ -static u32 cxgb4vf_get_msglevel(struct net_device *dev) -{ - return netdev2adap(dev)->msg_enable; -} - -/* - * Set current adapter message level. - */ -static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel) -{ - netdev2adap(dev)->msg_enable = msglevel; -} - -/* - * Return the device's current Queue Set ring size parameters along with the - * allowed maximum values. Since ethtool doesn't understand the concept of - * multi-queue devices, we just return the current values associated with the - * first Queue Set. - */ -static void cxgb4vf_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *rp) -{ - const struct port_info *pi = netdev_priv(dev); - const struct sge *s = &pi->adapter->sge; - - rp->rx_max_pending = MAX_RX_BUFFERS; - rp->rx_mini_max_pending = MAX_RSPQ_ENTRIES; - rp->rx_jumbo_max_pending = 0; - rp->tx_max_pending = MAX_TXQ_ENTRIES; - - rp->rx_pending = s->ethrxq[pi->first_qset].fl.size - MIN_FL_RESID; - rp->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size; - rp->rx_jumbo_pending = 0; - rp->tx_pending = s->ethtxq[pi->first_qset].q.size; -} - -/* - * Set the Queue Set ring size parameters for the device. Again, since - * ethtool doesn't allow for the concept of multiple queues per device, we'll - * apply these new values across all of the Queue Sets associated with the - * device -- after vetting them of course! - */ -static int cxgb4vf_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *rp) -{ - const struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - struct sge *s = &adapter->sge; - int qs; - - if (rp->rx_pending > MAX_RX_BUFFERS || - rp->rx_jumbo_pending || - rp->tx_pending > MAX_TXQ_ENTRIES || - rp->rx_mini_pending > MAX_RSPQ_ENTRIES || - rp->rx_mini_pending < MIN_RSPQ_ENTRIES || - rp->rx_pending < MIN_FL_ENTRIES || - rp->tx_pending < MIN_TXQ_ENTRIES) - return -EINVAL; - - if (adapter->flags & FULL_INIT_DONE) - return -EBUSY; - - for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) { - s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID; - s->ethrxq[qs].rspq.size = rp->rx_mini_pending; - s->ethtxq[qs].q.size = rp->tx_pending; - } - return 0; -} - -/* - * Return the interrupt holdoff timer and count for the first Queue Set on the - * device. Our extension ioctl() (the cxgbtool interface) allows the - * interrupt holdoff timer to be read on all of the device's Queue Sets. - */ -static int cxgb4vf_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *coalesce) -{ - const struct port_info *pi = netdev_priv(dev); - const struct adapter *adapter = pi->adapter; - const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq; - - coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq); - coalesce->rx_max_coalesced_frames = - ((rspq->intr_params & QINTR_CNT_EN) - ? adapter->sge.counter_val[rspq->pktcnt_idx] - : 0); - return 0; -} - -/* - * Set the RX interrupt holdoff timer and count for the first Queue Set on the - * interface. Our extension ioctl() (the cxgbtool interface) allows us to set - * the interrupt holdoff timer on any of the device's Queue Sets. - */ -static int cxgb4vf_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *coalesce) -{ - const struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - - return set_rxq_intr_params(adapter, - &adapter->sge.ethrxq[pi->first_qset].rspq, - coalesce->rx_coalesce_usecs, - coalesce->rx_max_coalesced_frames); -} - -/* - * Report current port link pause parameter settings. - */ -static void cxgb4vf_get_pauseparam(struct net_device *dev, - struct ethtool_pauseparam *pauseparam) -{ - struct port_info *pi = netdev_priv(dev); - - pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0; - pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0; - pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0; -} - -/* - * Identify the port by blinking the port's LED. - */ -static int cxgb4vf_phys_id(struct net_device *dev, - enum ethtool_phys_id_state state) -{ - unsigned int val; - struct port_info *pi = netdev_priv(dev); - - if (state == ETHTOOL_ID_ACTIVE) - val = 0xffff; - else if (state == ETHTOOL_ID_INACTIVE) - val = 0; - else - return -EINVAL; - - return t4vf_identify_port(pi->adapter, pi->viid, val); -} - -/* - * Port stats maintained per queue of the port. - */ -struct queue_port_stats { - u64 tso; - u64 tx_csum; - u64 rx_csum; - u64 vlan_ex; - u64 vlan_ins; - u64 lro_pkts; - u64 lro_merged; -}; - -/* - * Strings for the ETH_SS_STATS statistics set ("ethtool -S"). Note that - * these need to match the order of statistics returned by - * t4vf_get_port_stats(). - */ -static const char stats_strings[][ETH_GSTRING_LEN] = { - /* - * These must match the layout of the t4vf_port_stats structure. - */ - "TxBroadcastBytes ", - "TxBroadcastFrames ", - "TxMulticastBytes ", - "TxMulticastFrames ", - "TxUnicastBytes ", - "TxUnicastFrames ", - "TxDroppedFrames ", - "TxOffloadBytes ", - "TxOffloadFrames ", - "RxBroadcastBytes ", - "RxBroadcastFrames ", - "RxMulticastBytes ", - "RxMulticastFrames ", - "RxUnicastBytes ", - "RxUnicastFrames ", - "RxErrorFrames ", - - /* - * These are accumulated per-queue statistics and must match the - * order of the fields in the queue_port_stats structure. - */ - "TSO ", - "TxCsumOffload ", - "RxCsumGood ", - "VLANextractions ", - "VLANinsertions ", - "GROPackets ", - "GROMerged ", -}; - -/* - * Return the number of statistics in the specified statistics set. - */ -static int cxgb4vf_get_sset_count(struct net_device *dev, int sset) -{ - switch (sset) { - case ETH_SS_STATS: - return ARRAY_SIZE(stats_strings); - default: - return -EOPNOTSUPP; - } - /*NOTREACHED*/ -} - -/* - * Return the strings for the specified statistics set. - */ -static void cxgb4vf_get_strings(struct net_device *dev, - u32 sset, - u8 *data) -{ - switch (sset) { - case ETH_SS_STATS: - memcpy(data, stats_strings, sizeof(stats_strings)); - break; - } -} - -/* - * Small utility routine to accumulate queue statistics across the queues of - * a "port". - */ -static void collect_sge_port_stats(const struct adapter *adapter, - const struct port_info *pi, - struct queue_port_stats *stats) -{ - const struct sge_eth_txq *txq = &adapter->sge.ethtxq[pi->first_qset]; - const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset]; - int qs; - - memset(stats, 0, sizeof(*stats)); - for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { - stats->tso += txq->tso; - stats->tx_csum += txq->tx_cso; - stats->rx_csum += rxq->stats.rx_cso; - stats->vlan_ex += rxq->stats.vlan_ex; - stats->vlan_ins += txq->vlan_ins; - stats->lro_pkts += rxq->stats.lro_pkts; - stats->lro_merged += rxq->stats.lro_merged; - } -} - -/* - * Return the ETH_SS_STATS statistics set. - */ -static void cxgb4vf_get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *stats, - u64 *data) -{ - struct port_info *pi = netdev2pinfo(dev); - struct adapter *adapter = pi->adapter; - int err = t4vf_get_port_stats(adapter, pi->pidx, - (struct t4vf_port_stats *)data); - if (err) - memset(data, 0, sizeof(struct t4vf_port_stats)); - - data += sizeof(struct t4vf_port_stats) / sizeof(u64); - collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); -} - -/* - * Return the size of our register map. - */ -static int cxgb4vf_get_regs_len(struct net_device *dev) -{ - return T4VF_REGMAP_SIZE; -} - -/* - * Dump a block of registers, start to end inclusive, into a buffer. - */ -static void reg_block_dump(struct adapter *adapter, void *regbuf, - unsigned int start, unsigned int end) -{ - u32 *bp = regbuf + start - T4VF_REGMAP_START; - - for ( ; start <= end; start += sizeof(u32)) { - /* - * Avoid reading the Mailbox Control register since that - * can trigger a Mailbox Ownership Arbitration cycle and - * interfere with communication with the firmware. - */ - if (start == T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL) - *bp++ = 0xffff; - else - *bp++ = t4_read_reg(adapter, start); - } -} - -/* - * Copy our entire register map into the provided buffer. - */ -static void cxgb4vf_get_regs(struct net_device *dev, - struct ethtool_regs *regs, - void *regbuf) -{ - struct adapter *adapter = netdev2adap(dev); - - regs->version = mk_adap_vers(adapter); - - /* - * Fill in register buffer with our register map. - */ - memset(regbuf, 0, T4VF_REGMAP_SIZE); - - reg_block_dump(adapter, regbuf, - T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_FIRST, - T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_LAST); - reg_block_dump(adapter, regbuf, - T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST, - T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST); - reg_block_dump(adapter, regbuf, - T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST, - T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_LAST); - reg_block_dump(adapter, regbuf, - T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST, - T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST); - - reg_block_dump(adapter, regbuf, - T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_FIRST, - T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_LAST); -} - -/* - * Report current Wake On LAN settings. - */ -static void cxgb4vf_get_wol(struct net_device *dev, - struct ethtool_wolinfo *wol) -{ - wol->supported = 0; - wol->wolopts = 0; - memset(&wol->sopass, 0, sizeof(wol->sopass)); -} - -/* - * TCP Segmentation Offload flags which we support. - */ -#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) - -static struct ethtool_ops cxgb4vf_ethtool_ops = { - .get_settings = cxgb4vf_get_settings, - .get_drvinfo = cxgb4vf_get_drvinfo, - .get_msglevel = cxgb4vf_get_msglevel, - .set_msglevel = cxgb4vf_set_msglevel, - .get_ringparam = cxgb4vf_get_ringparam, - .set_ringparam = cxgb4vf_set_ringparam, - .get_coalesce = cxgb4vf_get_coalesce, - .set_coalesce = cxgb4vf_set_coalesce, - .get_pauseparam = cxgb4vf_get_pauseparam, - .get_link = ethtool_op_get_link, - .get_strings = cxgb4vf_get_strings, - .set_phys_id = cxgb4vf_phys_id, - .get_sset_count = cxgb4vf_get_sset_count, - .get_ethtool_stats = cxgb4vf_get_ethtool_stats, - .get_regs_len = cxgb4vf_get_regs_len, - .get_regs = cxgb4vf_get_regs, - .get_wol = cxgb4vf_get_wol, -}; - -/* - * /sys/kernel/debug/cxgb4vf support code and data. - * ================================================ - */ - -/* - * Show SGE Queue Set information. We display QPL Queues Sets per line. - */ -#define QPL 4 - -static int sge_qinfo_show(struct seq_file *seq, void *v) -{ - struct adapter *adapter = seq->private; - int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL); - int qs, r = (uintptr_t)v - 1; - - if (r) - seq_putc(seq, '\n'); - - #define S3(fmt_spec, s, v) \ - do {\ - seq_printf(seq, "%-12s", s); \ - for (qs = 0; qs < n; ++qs) \ - seq_printf(seq, " %16" fmt_spec, v); \ - seq_putc(seq, '\n'); \ - } while (0) - #define S(s, v) S3("s", s, v) - #define T(s, v) S3("u", s, txq[qs].v) - #define R(s, v) S3("u", s, rxq[qs].v) - - if (r < eth_entries) { - const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL]; - const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL]; - int n = min(QPL, adapter->sge.ethqsets - QPL * r); - - S("QType:", "Ethernet"); - S("Interface:", - (rxq[qs].rspq.netdev - ? rxq[qs].rspq.netdev->name - : "N/A")); - S3("d", "Port:", - (rxq[qs].rspq.netdev - ? ((struct port_info *) - netdev_priv(rxq[qs].rspq.netdev))->port_id - : -1)); - T("TxQ ID:", q.abs_id); - T("TxQ size:", q.size); - T("TxQ inuse:", q.in_use); - T("TxQ PIdx:", q.pidx); - T("TxQ CIdx:", q.cidx); - R("RspQ ID:", rspq.abs_id); - R("RspQ size:", rspq.size); - R("RspQE size:", rspq.iqe_len); - S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq)); - S3("u", "Intr pktcnt:", - adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]); - R("RspQ CIdx:", rspq.cidx); - R("RspQ Gen:", rspq.gen); - R("FL ID:", fl.abs_id); - R("FL size:", fl.size - MIN_FL_RESID); - R("FL avail:", fl.avail); - R("FL PIdx:", fl.pidx); - R("FL CIdx:", fl.cidx); - return 0; - } - - r -= eth_entries; - if (r == 0) { - const struct sge_rspq *evtq = &adapter->sge.fw_evtq; - - seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue"); - seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id); - seq_printf(seq, "%-12s %16u\n", "Intr delay:", - qtimer_val(adapter, evtq)); - seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:", - adapter->sge.counter_val[evtq->pktcnt_idx]); - seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", evtq->cidx); - seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen); - } else if (r == 1) { - const struct sge_rspq *intrq = &adapter->sge.intrq; - - seq_printf(seq, "%-12s %16s\n", "QType:", "Interrupt Queue"); - seq_printf(seq, "%-12s %16u\n", "RspQ ID:", intrq->abs_id); - seq_printf(seq, "%-12s %16u\n", "Intr delay:", - qtimer_val(adapter, intrq)); - seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:", - adapter->sge.counter_val[intrq->pktcnt_idx]); - seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", intrq->cidx); - seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", intrq->gen); - } - - #undef R - #undef T - #undef S - #undef S3 - - return 0; -} - -/* - * Return the number of "entries" in our "file". We group the multi-Queue - * sections with QPL Queue Sets per "entry". The sections of the output are: - * - * Ethernet RX/TX Queue Sets - * Firmware Event Queue - * Forwarded Interrupt Queue (if in MSI mode) - */ -static int sge_queue_entries(const struct adapter *adapter) -{ - return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 + - ((adapter->flags & USING_MSI) != 0); -} - -static void *sge_queue_start(struct seq_file *seq, loff_t *pos) -{ - int entries = sge_queue_entries(seq->private); - - return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL; -} - -static void sge_queue_stop(struct seq_file *seq, void *v) -{ -} - -static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos) -{ - int entries = sge_queue_entries(seq->private); - - ++*pos; - return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL; -} - -static const struct seq_operations sge_qinfo_seq_ops = { - .start = sge_queue_start, - .next = sge_queue_next, - .stop = sge_queue_stop, - .show = sge_qinfo_show -}; - -static int sge_qinfo_open(struct inode *inode, struct file *file) -{ - int res = seq_open(file, &sge_qinfo_seq_ops); - - if (!res) { - struct seq_file *seq = file->private_data; - seq->private = inode->i_private; - } - return res; -} - -static const struct file_operations sge_qinfo_debugfs_fops = { - .owner = THIS_MODULE, - .open = sge_qinfo_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -/* - * Show SGE Queue Set statistics. We display QPL Queues Sets per line. - */ -#define QPL 4 - -static int sge_qstats_show(struct seq_file *seq, void *v) -{ - struct adapter *adapter = seq->private; - int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL); - int qs, r = (uintptr_t)v - 1; - - if (r) - seq_putc(seq, '\n'); - - #define S3(fmt, s, v) \ - do { \ - seq_printf(seq, "%-16s", s); \ - for (qs = 0; qs < n; ++qs) \ - seq_printf(seq, " %8" fmt, v); \ - seq_putc(seq, '\n'); \ - } while (0) - #define S(s, v) S3("s", s, v) - - #define T3(fmt, s, v) S3(fmt, s, txq[qs].v) - #define T(s, v) T3("lu", s, v) - - #define R3(fmt, s, v) S3(fmt, s, rxq[qs].v) - #define R(s, v) R3("lu", s, v) - - if (r < eth_entries) { - const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL]; - const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL]; - int n = min(QPL, adapter->sge.ethqsets - QPL * r); - - S("QType:", "Ethernet"); - S("Interface:", - (rxq[qs].rspq.netdev - ? rxq[qs].rspq.netdev->name - : "N/A")); - R3("u", "RspQNullInts:", rspq.unhandled_irqs); - R("RxPackets:", stats.pkts); - R("RxCSO:", stats.rx_cso); - R("VLANxtract:", stats.vlan_ex); - R("LROmerged:", stats.lro_merged); - R("LROpackets:", stats.lro_pkts); - R("RxDrops:", stats.rx_drops); - T("TSO:", tso); - T("TxCSO:", tx_cso); - T("VLANins:", vlan_ins); - T("TxQFull:", q.stops); - T("TxQRestarts:", q.restarts); - T("TxMapErr:", mapping_err); - R("FLAllocErr:", fl.alloc_failed); - R("FLLrgAlcErr:", fl.large_alloc_failed); - R("FLStarving:", fl.starving); - return 0; - } - - r -= eth_entries; - if (r == 0) { - const struct sge_rspq *evtq = &adapter->sge.fw_evtq; - - seq_printf(seq, "%-8s %16s\n", "QType:", "FW event queue"); - seq_printf(seq, "%-16s %8u\n", "RspQNullInts:", - evtq->unhandled_irqs); - seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", evtq->cidx); - seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", evtq->gen); - } else if (r == 1) { - const struct sge_rspq *intrq = &adapter->sge.intrq; - - seq_printf(seq, "%-8s %16s\n", "QType:", "Interrupt Queue"); - seq_printf(seq, "%-16s %8u\n", "RspQNullInts:", - intrq->unhandled_irqs); - seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", intrq->cidx); - seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", intrq->gen); - } - - #undef R - #undef T - #undef S - #undef R3 - #undef T3 - #undef S3 - - return 0; -} - -/* - * Return the number of "entries" in our "file". We group the multi-Queue - * sections with QPL Queue Sets per "entry". The sections of the output are: - * - * Ethernet RX/TX Queue Sets - * Firmware Event Queue - * Forwarded Interrupt Queue (if in MSI mode) - */ -static int sge_qstats_entries(const struct adapter *adapter) -{ - return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 + - ((adapter->flags & USING_MSI) != 0); -} - -static void *sge_qstats_start(struct seq_file *seq, loff_t *pos) -{ - int entries = sge_qstats_entries(seq->private); - - return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL; -} - -static void sge_qstats_stop(struct seq_file *seq, void *v) -{ -} - -static void *sge_qstats_next(struct seq_file *seq, void *v, loff_t *pos) -{ - int entries = sge_qstats_entries(seq->private); - - (*pos)++; - return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL; -} - -static const struct seq_operations sge_qstats_seq_ops = { - .start = sge_qstats_start, - .next = sge_qstats_next, - .stop = sge_qstats_stop, - .show = sge_qstats_show -}; - -static int sge_qstats_open(struct inode *inode, struct file *file) -{ - int res = seq_open(file, &sge_qstats_seq_ops); - - if (res == 0) { - struct seq_file *seq = file->private_data; - seq->private = inode->i_private; - } - return res; -} - -static const struct file_operations sge_qstats_proc_fops = { - .owner = THIS_MODULE, - .open = sge_qstats_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -/* - * Show PCI-E SR-IOV Virtual Function Resource Limits. - */ -static int resources_show(struct seq_file *seq, void *v) -{ - struct adapter *adapter = seq->private; - struct vf_resources *vfres = &adapter->params.vfres; - - #define S(desc, fmt, var) \ - seq_printf(seq, "%-60s " fmt "\n", \ - desc " (" #var "):", vfres->var) - - S("Virtual Interfaces", "%d", nvi); - S("Egress Queues", "%d", neq); - S("Ethernet Control", "%d", nethctrl); - S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint); - S("Ingress Queues", "%d", niq); - S("Traffic Class", "%d", tc); - S("Port Access Rights Mask", "%#x", pmask); - S("MAC Address Filters", "%d", nexactf); - S("Firmware Command Read Capabilities", "%#x", r_caps); - S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps); - - #undef S - - return 0; -} - -static int resources_open(struct inode *inode, struct file *file) -{ - return single_open(file, resources_show, inode->i_private); -} - -static const struct file_operations resources_proc_fops = { - .owner = THIS_MODULE, - .open = resources_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -/* - * Show Virtual Interfaces. - */ -static int interfaces_show(struct seq_file *seq, void *v) -{ - if (v == SEQ_START_TOKEN) { - seq_puts(seq, "Interface Port VIID\n"); - } else { - struct adapter *adapter = seq->private; - int pidx = (uintptr_t)v - 2; - struct net_device *dev = adapter->port[pidx]; - struct port_info *pi = netdev_priv(dev); - - seq_printf(seq, "%9s %4d %#5x\n", - dev->name, pi->port_id, pi->viid); - } - return 0; -} - -static inline void *interfaces_get_idx(struct adapter *adapter, loff_t pos) -{ - return pos <= adapter->params.nports - ? (void *)(uintptr_t)(pos + 1) - : NULL; -} - -static void *interfaces_start(struct seq_file *seq, loff_t *pos) -{ - return *pos - ? interfaces_get_idx(seq->private, *pos) - : SEQ_START_TOKEN; -} - -static void *interfaces_next(struct seq_file *seq, void *v, loff_t *pos) -{ - (*pos)++; - return interfaces_get_idx(seq->private, *pos); -} - -static void interfaces_stop(struct seq_file *seq, void *v) -{ -} - -static const struct seq_operations interfaces_seq_ops = { - .start = interfaces_start, - .next = interfaces_next, - .stop = interfaces_stop, - .show = interfaces_show -}; - -static int interfaces_open(struct inode *inode, struct file *file) -{ - int res = seq_open(file, &interfaces_seq_ops); - - if (res == 0) { - struct seq_file *seq = file->private_data; - seq->private = inode->i_private; - } - return res; -} - -static const struct file_operations interfaces_proc_fops = { - .owner = THIS_MODULE, - .open = interfaces_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -/* - * /sys/kernel/debugfs/cxgb4vf/ files list. - */ -struct cxgb4vf_debugfs_entry { - const char *name; /* name of debugfs node */ - mode_t mode; /* file system mode */ - const struct file_operations *fops; -}; - -static struct cxgb4vf_debugfs_entry debugfs_files[] = { - { "sge_qinfo", S_IRUGO, &sge_qinfo_debugfs_fops }, - { "sge_qstats", S_IRUGO, &sge_qstats_proc_fops }, - { "resources", S_IRUGO, &resources_proc_fops }, - { "interfaces", S_IRUGO, &interfaces_proc_fops }, -}; - -/* - * Module and device initialization and cleanup code. - * ================================================== - */ - -/* - * Set up out /sys/kernel/debug/cxgb4vf sub-nodes. We assume that the - * directory (debugfs_root) has already been set up. - */ -static int __devinit setup_debugfs(struct adapter *adapter) -{ - int i; - - BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); - - /* - * Debugfs support is best effort. - */ - for (i = 0; i < ARRAY_SIZE(debugfs_files); i++) - (void)debugfs_create_file(debugfs_files[i].name, - debugfs_files[i].mode, - adapter->debugfs_root, - (void *)adapter, - debugfs_files[i].fops); - - return 0; -} - -/* - * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave - * it to our caller to tear down the directory (debugfs_root). - */ -static void cleanup_debugfs(struct adapter *adapter) -{ - BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); - - /* - * Unlike our sister routine cleanup_proc(), we don't need to remove - * individual entries because a call will be made to - * debugfs_remove_recursive(). We just need to clean up any ancillary - * persistent state. - */ - /* nothing to do */ -} - -/* - * Perform early "adapter" initialization. This is where we discover what - * adapter parameters we're going to be using and initialize basic adapter - * hardware support. - */ -static int __devinit adap_init0(struct adapter *adapter) -{ - struct vf_resources *vfres = &adapter->params.vfres; - struct sge_params *sge_params = &adapter->params.sge; - struct sge *s = &adapter->sge; - unsigned int ethqsets; - int err; - - /* - * Wait for the device to become ready before proceeding ... - */ - err = t4vf_wait_dev_ready(adapter); - if (err) { - dev_err(adapter->pdev_dev, "device didn't become ready:" - " err=%d\n", err); - return err; - } - - /* - * Some environments do not properly handle PCIE FLRs -- e.g. in Linux - * 2.6.31 and later we can't call pci_reset_function() in order to - * issue an FLR because of a self- deadlock on the device semaphore. - * Meanwhile, the OS infrastructure doesn't issue FLRs in all the - * cases where they're needed -- for instance, some versions of KVM - * fail to reset "Assigned Devices" when the VM reboots. Therefore we - * use the firmware based reset in order to reset any per function - * state. - */ - err = t4vf_fw_reset(adapter); - if (err < 0) { - dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err); - return err; - } - - /* - * Grab basic operational parameters. These will predominantly have - * been set up by the Physical Function Driver or will be hard coded - * into the adapter. We just have to live with them ... Note that - * we _must_ get our VPD parameters before our SGE parameters because - * we need to know the adapter's core clock from the VPD in order to - * properly decode the SGE Timer Values. - */ - err = t4vf_get_dev_params(adapter); - if (err) { - dev_err(adapter->pdev_dev, "unable to retrieve adapter" - " device parameters: err=%d\n", err); - return err; - } - err = t4vf_get_vpd_params(adapter); - if (err) { - dev_err(adapter->pdev_dev, "unable to retrieve adapter" - " VPD parameters: err=%d\n", err); - return err; - } - err = t4vf_get_sge_params(adapter); - if (err) { - dev_err(adapter->pdev_dev, "unable to retrieve adapter" - " SGE parameters: err=%d\n", err); - return err; - } - err = t4vf_get_rss_glb_config(adapter); - if (err) { - dev_err(adapter->pdev_dev, "unable to retrieve adapter" - " RSS parameters: err=%d\n", err); - return err; - } - if (adapter->params.rss.mode != - FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { - dev_err(adapter->pdev_dev, "unable to operate with global RSS" - " mode %d\n", adapter->params.rss.mode); - return -EINVAL; - } - err = t4vf_sge_init(adapter); - if (err) { - dev_err(adapter->pdev_dev, "unable to use adapter parameters:" - " err=%d\n", err); - return err; - } - - /* - * Retrieve our RX interrupt holdoff timer values and counter - * threshold values from the SGE parameters. - */ - s->timer_val[0] = core_ticks_to_us(adapter, - TIMERVALUE0_GET(sge_params->sge_timer_value_0_and_1)); - s->timer_val[1] = core_ticks_to_us(adapter, - TIMERVALUE1_GET(sge_params->sge_timer_value_0_and_1)); - s->timer_val[2] = core_ticks_to_us(adapter, - TIMERVALUE0_GET(sge_params->sge_timer_value_2_and_3)); - s->timer_val[3] = core_ticks_to_us(adapter, - TIMERVALUE1_GET(sge_params->sge_timer_value_2_and_3)); - s->timer_val[4] = core_ticks_to_us(adapter, - TIMERVALUE0_GET(sge_params->sge_timer_value_4_and_5)); - s->timer_val[5] = core_ticks_to_us(adapter, - TIMERVALUE1_GET(sge_params->sge_timer_value_4_and_5)); - - s->counter_val[0] = - THRESHOLD_0_GET(sge_params->sge_ingress_rx_threshold); - s->counter_val[1] = - THRESHOLD_1_GET(sge_params->sge_ingress_rx_threshold); - s->counter_val[2] = - THRESHOLD_2_GET(sge_params->sge_ingress_rx_threshold); - s->counter_val[3] = - THRESHOLD_3_GET(sge_params->sge_ingress_rx_threshold); - - /* - * Grab our Virtual Interface resource allocation, extract the - * features that we're interested in and do a bit of sanity testing on - * what we discover. - */ - err = t4vf_get_vfres(adapter); - if (err) { - dev_err(adapter->pdev_dev, "unable to get virtual interface" - " resources: err=%d\n", err); - return err; - } - - /* - * The number of "ports" which we support is equal to the number of - * Virtual Interfaces with which we've been provisioned. - */ - adapter->params.nports = vfres->nvi; - if (adapter->params.nports > MAX_NPORTS) { - dev_warn(adapter->pdev_dev, "only using %d of %d allowed" - " virtual interfaces\n", MAX_NPORTS, - adapter->params.nports); - adapter->params.nports = MAX_NPORTS; - } - - /* - * We need to reserve a number of the ingress queues with Free List - * and Interrupt capabilities for special interrupt purposes (like - * asynchronous firmware messages, or forwarded interrupts if we're - * using MSI). The rest of the FL/Intr-capable ingress queues will be - * matched up one-for-one with Ethernet/Control egress queues in order - * to form "Queue Sets" which will be aportioned between the "ports". - * For each Queue Set, we'll need the ability to allocate two Egress - * Contexts -- one for the Ingress Queue Free List and one for the TX - * Ethernet Queue. - */ - ethqsets = vfres->niqflint - INGQ_EXTRAS; - if (vfres->nethctrl != ethqsets) { - dev_warn(adapter->pdev_dev, "unequal number of [available]" - " ingress/egress queues (%d/%d); using minimum for" - " number of Queue Sets\n", ethqsets, vfres->nethctrl); - ethqsets = min(vfres->nethctrl, ethqsets); - } - if (vfres->neq < ethqsets*2) { - dev_warn(adapter->pdev_dev, "Not enough Egress Contexts (%d)" - " to support Queue Sets (%d); reducing allowed Queue" - " Sets\n", vfres->neq, ethqsets); - ethqsets = vfres->neq/2; - } - if (ethqsets > MAX_ETH_QSETS) { - dev_warn(adapter->pdev_dev, "only using %d of %d allowed Queue" - " Sets\n", MAX_ETH_QSETS, adapter->sge.max_ethqsets); - ethqsets = MAX_ETH_QSETS; - } - if (vfres->niq != 0 || vfres->neq > ethqsets*2) { - dev_warn(adapter->pdev_dev, "unused resources niq/neq (%d/%d)" - " ignored\n", vfres->niq, vfres->neq - ethqsets*2); - } - adapter->sge.max_ethqsets = ethqsets; - - /* - * Check for various parameter sanity issues. Most checks simply - * result in us using fewer resources than our provissioning but we - * do need at least one "port" with which to work ... - */ - if (adapter->sge.max_ethqsets < adapter->params.nports) { - dev_warn(adapter->pdev_dev, "only using %d of %d available" - " virtual interfaces (too few Queue Sets)\n", - adapter->sge.max_ethqsets, adapter->params.nports); - adapter->params.nports = adapter->sge.max_ethqsets; - } - if (adapter->params.nports == 0) { - dev_err(adapter->pdev_dev, "no virtual interfaces configured/" - "usable!\n"); - return -EINVAL; - } - return 0; -} - -static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx, - u8 pkt_cnt_idx, unsigned int size, - unsigned int iqe_size) -{ - rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) | - (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0)); - rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS - ? pkt_cnt_idx - : 0); - rspq->iqe_len = iqe_size; - rspq->size = size; -} - -/* - * Perform default configuration of DMA queues depending on the number and - * type of ports we found and the number of available CPUs. Most settings can - * be modified by the admin via ethtool and cxgbtool prior to the adapter - * being brought up for the first time. - */ -static void __devinit cfg_queues(struct adapter *adapter) -{ - struct sge *s = &adapter->sge; - int q10g, n10g, qidx, pidx, qs; - size_t iqe_size; - - /* - * We should not be called till we know how many Queue Sets we can - * support. In particular, this means that we need to know what kind - * of interrupts we'll be using ... - */ - BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0); - - /* - * Count the number of 10GbE Virtual Interfaces that we have. - */ - n10g = 0; - for_each_port(adapter, pidx) - n10g += is_10g_port(&adap2pinfo(adapter, pidx)->link_cfg); - - /* - * We default to 1 queue per non-10G port and up to # of cores queues - * per 10G port. - */ - if (n10g == 0) - q10g = 0; - else { - int n1g = (adapter->params.nports - n10g); - q10g = (adapter->sge.max_ethqsets - n1g) / n10g; - if (q10g > num_online_cpus()) - q10g = num_online_cpus(); - } - - /* - * Allocate the "Queue Sets" to the various Virtual Interfaces. - * The layout will be established in setup_sge_queues() when the - * adapter is brough up for the first time. - */ - qidx = 0; - for_each_port(adapter, pidx) { - struct port_info *pi = adap2pinfo(adapter, pidx); - - pi->first_qset = qidx; - pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1; - qidx += pi->nqsets; - } - s->ethqsets = qidx; - - /* - * The Ingress Queue Entry Size for our various Response Queues needs - * to be big enough to accommodate the largest message we can receive - * from the chip/firmware; which is 64 bytes ... - */ - iqe_size = 64; - - /* - * Set up default Queue Set parameters ... Start off with the - * shortest interrupt holdoff timer. - */ - for (qs = 0; qs < s->max_ethqsets; qs++) { - struct sge_eth_rxq *rxq = &s->ethrxq[qs]; - struct sge_eth_txq *txq = &s->ethtxq[qs]; - - init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size); - rxq->fl.size = 72; - txq->q.size = 1024; - } - - /* - * The firmware event queue is used for link state changes and - * notifications of TX DMA completions. - */ - init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size); - - /* - * The forwarded interrupt queue is used when we're in MSI interrupt - * mode. In this mode all interrupts associated with RX queues will - * be forwarded to a single queue which we'll associate with our MSI - * interrupt vector. The messages dropped in the forwarded interrupt - * queue will indicate which ingress queue needs servicing ... This - * queue needs to be large enough to accommodate all of the ingress - * queues which are forwarding their interrupt (+1 to prevent the PIDX - * from equalling the CIDX if every ingress queue has an outstanding - * interrupt). The queue doesn't need to be any larger because no - * ingress queue will ever have more than one outstanding interrupt at - * any time ... - */ - init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1, - iqe_size); -} - -/* - * Reduce the number of Ethernet queues across all ports to at most n. - * n provides at least one queue per port. - */ -static void __devinit reduce_ethqs(struct adapter *adapter, int n) -{ - int i; - struct port_info *pi; - - /* - * While we have too many active Ether Queue Sets, interate across the - * "ports" and reduce their individual Queue Set allocations. - */ - BUG_ON(n < adapter->params.nports); - while (n < adapter->sge.ethqsets) - for_each_port(adapter, i) { - pi = adap2pinfo(adapter, i); - if (pi->nqsets > 1) { - pi->nqsets--; - adapter->sge.ethqsets--; - if (adapter->sge.ethqsets <= n) - break; - } - } - - /* - * Reassign the starting Queue Sets for each of the "ports" ... - */ - n = 0; - for_each_port(adapter, i) { - pi = adap2pinfo(adapter, i); - pi->first_qset = n; - n += pi->nqsets; - } -} - -/* - * We need to grab enough MSI-X vectors to cover our interrupt needs. Ideally - * we get a separate MSI-X vector for every "Queue Set" plus any extras we - * need. Minimally we need one for every Virtual Interface plus those needed - * for our "extras". Note that this process may lower the maximum number of - * allowed Queue Sets ... - */ -static int __devinit enable_msix(struct adapter *adapter) -{ - int i, err, want, need; - struct msix_entry entries[MSIX_ENTRIES]; - struct sge *s = &adapter->sge; - - for (i = 0; i < MSIX_ENTRIES; ++i) - entries[i].entry = i; - - /* - * We _want_ enough MSI-X interrupts to cover all of our "Queue Sets" - * plus those needed for our "extras" (for example, the firmware - * message queue). We _need_ at least one "Queue Set" per Virtual - * Interface plus those needed for our "extras". So now we get to see - * if the song is right ... - */ - want = s->max_ethqsets + MSIX_EXTRAS; - need = adapter->params.nports + MSIX_EXTRAS; - while ((err = pci_enable_msix(adapter->pdev, entries, want)) >= need) - want = err; - - if (err == 0) { - int nqsets = want - MSIX_EXTRAS; - if (nqsets < s->max_ethqsets) { - dev_warn(adapter->pdev_dev, "only enough MSI-X vectors" - " for %d Queue Sets\n", nqsets); - s->max_ethqsets = nqsets; - if (nqsets < s->ethqsets) - reduce_ethqs(adapter, nqsets); - } - for (i = 0; i < want; ++i) - adapter->msix_info[i].vec = entries[i].vector; - } else if (err > 0) { - pci_disable_msix(adapter->pdev); - dev_info(adapter->pdev_dev, "only %d MSI-X vectors left," - " not using MSI-X\n", err); - } - return err; -} - -static const struct net_device_ops cxgb4vf_netdev_ops = { - .ndo_open = cxgb4vf_open, - .ndo_stop = cxgb4vf_stop, - .ndo_start_xmit = t4vf_eth_xmit, - .ndo_get_stats = cxgb4vf_get_stats, - .ndo_set_rx_mode = cxgb4vf_set_rxmode, - .ndo_set_mac_address = cxgb4vf_set_mac_addr, - .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = cxgb4vf_do_ioctl, - .ndo_change_mtu = cxgb4vf_change_mtu, - .ndo_fix_features = cxgb4vf_fix_features, - .ndo_set_features = cxgb4vf_set_features, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = cxgb4vf_poll_controller, -#endif -}; - -/* - * "Probe" a device: initialize a device and construct all kernel and driver - * state needed to manage the device. This routine is called "init_one" in - * the PF Driver ... - */ -static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - static int version_printed; - - int pci_using_dac; - int err, pidx; - unsigned int pmask; - struct adapter *adapter; - struct port_info *pi; - struct net_device *netdev; - - /* - * Print our driver banner the first time we're called to initialize a - * device. - */ - if (version_printed == 0) { - printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); - version_printed = 1; - } - - /* - * Initialize generic PCI device state. - */ - err = pci_enable_device(pdev); - if (err) { - dev_err(&pdev->dev, "cannot enable PCI device\n"); - return err; - } - - /* - * Reserve PCI resources for the device. If we can't get them some - * other driver may have already claimed the device ... - */ - err = pci_request_regions(pdev, KBUILD_MODNAME); - if (err) { - dev_err(&pdev->dev, "cannot obtain PCI resources\n"); - goto err_disable_device; - } - - /* - * Set up our DMA mask: try for 64-bit address masking first and - * fall back to 32-bit if we can't get 64 bits ... - */ - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err == 0) { - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - dev_err(&pdev->dev, "unable to obtain 64-bit DMA for" - " coherent allocations\n"); - goto err_release_regions; - } - pci_using_dac = 1; - } else { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err != 0) { - dev_err(&pdev->dev, "no usable DMA configuration\n"); - goto err_release_regions; - } - pci_using_dac = 0; - } - - /* - * Enable bus mastering for the device ... - */ - pci_set_master(pdev); - - /* - * Allocate our adapter data structure and attach it to the device. - */ - adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); - if (!adapter) { - err = -ENOMEM; - goto err_release_regions; - } - pci_set_drvdata(pdev, adapter); - adapter->pdev = pdev; - adapter->pdev_dev = &pdev->dev; - - /* - * Initialize SMP data synchronization resources. - */ - spin_lock_init(&adapter->stats_lock); - - /* - * Map our I/O registers in BAR0. - */ - adapter->regs = pci_ioremap_bar(pdev, 0); - if (!adapter->regs) { - dev_err(&pdev->dev, "cannot map device registers\n"); - err = -ENOMEM; - goto err_free_adapter; - } - - /* - * Initialize adapter level features. - */ - adapter->name = pci_name(pdev); - adapter->msg_enable = dflt_msg_enable; - err = adap_init0(adapter); - if (err) - goto err_unmap_bar; - - /* - * Allocate our "adapter ports" and stitch everything together. - */ - pmask = adapter->params.vfres.pmask; - for_each_port(adapter, pidx) { - int port_id, viid; - - /* - * We simplistically allocate our virtual interfaces - * sequentially across the port numbers to which we have - * access rights. This should be configurable in some manner - * ... - */ - if (pmask == 0) - break; - port_id = ffs(pmask) - 1; - pmask &= ~(1 << port_id); - viid = t4vf_alloc_vi(adapter, port_id); - if (viid < 0) { - dev_err(&pdev->dev, "cannot allocate VI for port %d:" - " err=%d\n", port_id, viid); - err = viid; - goto err_free_dev; - } - - /* - * Allocate our network device and stitch things together. - */ - netdev = alloc_etherdev_mq(sizeof(struct port_info), - MAX_PORT_QSETS); - if (netdev == NULL) { - dev_err(&pdev->dev, "cannot allocate netdev for" - " port %d\n", port_id); - t4vf_free_vi(adapter, viid); - err = -ENOMEM; - goto err_free_dev; - } - adapter->port[pidx] = netdev; - SET_NETDEV_DEV(netdev, &pdev->dev); - pi = netdev_priv(netdev); - pi->adapter = adapter; - pi->pidx = pidx; - pi->port_id = port_id; - pi->viid = viid; - - /* - * Initialize the starting state of our "port" and register - * it. - */ - pi->xact_addr_filt = -1; - netif_carrier_off(netdev); - netdev->irq = pdev->irq; - - netdev->hw_features = NETIF_F_SG | TSO_FLAGS | - NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_HW_VLAN_RX | NETIF_F_RXCSUM; - netdev->vlan_features = NETIF_F_SG | TSO_FLAGS | - NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_HIGHDMA; - netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_TX; - if (pci_using_dac) - netdev->features |= NETIF_F_HIGHDMA; - - netdev->netdev_ops = &cxgb4vf_netdev_ops; - SET_ETHTOOL_OPS(netdev, &cxgb4vf_ethtool_ops); - - /* - * Initialize the hardware/software state for the port. - */ - err = t4vf_port_init(adapter, pidx); - if (err) { - dev_err(&pdev->dev, "cannot initialize port %d\n", - pidx); - goto err_free_dev; - } - } - - /* - * The "card" is now ready to go. If any errors occur during device - * registration we do not fail the whole "card" but rather proceed - * only with the ports we manage to register successfully. However we - * must register at least one net device. - */ - for_each_port(adapter, pidx) { - netdev = adapter->port[pidx]; - if (netdev == NULL) - continue; - - err = register_netdev(netdev); - if (err) { - dev_warn(&pdev->dev, "cannot register net device %s," - " skipping\n", netdev->name); - continue; - } - - set_bit(pidx, &adapter->registered_device_map); - } - if (adapter->registered_device_map == 0) { - dev_err(&pdev->dev, "could not register any net devices\n"); - goto err_free_dev; - } - - /* - * Set up our debugfs entries. - */ - if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) { - adapter->debugfs_root = - debugfs_create_dir(pci_name(pdev), - cxgb4vf_debugfs_root); - if (IS_ERR_OR_NULL(adapter->debugfs_root)) - dev_warn(&pdev->dev, "could not create debugfs" - " directory"); - else - setup_debugfs(adapter); - } - - /* - * See what interrupts we'll be using. If we've been configured to - * use MSI-X interrupts, try to enable them but fall back to using - * MSI interrupts if we can't enable MSI-X interrupts. If we can't - * get MSI interrupts we bail with the error. - */ - if (msi == MSI_MSIX && enable_msix(adapter) == 0) - adapter->flags |= USING_MSIX; - else { - err = pci_enable_msi(pdev); - if (err) { - dev_err(&pdev->dev, "Unable to allocate %s interrupts;" - " err=%d\n", - msi == MSI_MSIX ? "MSI-X or MSI" : "MSI", err); - goto err_free_debugfs; - } - adapter->flags |= USING_MSI; - } - - /* - * Now that we know how many "ports" we have and what their types are, - * and how many Queue Sets we can support, we can configure our queue - * resources. - */ - cfg_queues(adapter); - - /* - * Print a short notice on the existence and configuration of the new - * VF network device ... - */ - for_each_port(adapter, pidx) { - dev_info(adapter->pdev_dev, "%s: Chelsio VF NIC PCIe %s\n", - adapter->port[pidx]->name, - (adapter->flags & USING_MSIX) ? "MSI-X" : - (adapter->flags & USING_MSI) ? "MSI" : ""); - } - - /* - * Return success! - */ - return 0; - - /* - * Error recovery and exit code. Unwind state that's been created - * so far and return the error. - */ - -err_free_debugfs: - if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { - cleanup_debugfs(adapter); - debugfs_remove_recursive(adapter->debugfs_root); - } - -err_free_dev: - for_each_port(adapter, pidx) { - netdev = adapter->port[pidx]; - if (netdev == NULL) - continue; - pi = netdev_priv(netdev); - t4vf_free_vi(adapter, pi->viid); - if (test_bit(pidx, &adapter->registered_device_map)) - unregister_netdev(netdev); - free_netdev(netdev); - } - -err_unmap_bar: - iounmap(adapter->regs); - -err_free_adapter: - kfree(adapter); - pci_set_drvdata(pdev, NULL); - -err_release_regions: - pci_release_regions(pdev); - pci_set_drvdata(pdev, NULL); - pci_clear_master(pdev); - -err_disable_device: - pci_disable_device(pdev); - - return err; -} - -/* - * "Remove" a device: tear down all kernel and driver state created in the - * "probe" routine and quiesce the device (disable interrupts, etc.). (Note - * that this is called "remove_one" in the PF Driver.) - */ -static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev) -{ - struct adapter *adapter = pci_get_drvdata(pdev); - - /* - * Tear down driver state associated with device. - */ - if (adapter) { - int pidx; - - /* - * Stop all of our activity. Unregister network port, - * disable interrupts, etc. - */ - for_each_port(adapter, pidx) - if (test_bit(pidx, &adapter->registered_device_map)) - unregister_netdev(adapter->port[pidx]); - t4vf_sge_stop(adapter); - if (adapter->flags & USING_MSIX) { - pci_disable_msix(adapter->pdev); - adapter->flags &= ~USING_MSIX; - } else if (adapter->flags & USING_MSI) { - pci_disable_msi(adapter->pdev); - adapter->flags &= ~USING_MSI; - } - - /* - * Tear down our debugfs entries. - */ - if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { - cleanup_debugfs(adapter); - debugfs_remove_recursive(adapter->debugfs_root); - } - - /* - * Free all of the various resources which we've acquired ... - */ - t4vf_free_sge_resources(adapter); - for_each_port(adapter, pidx) { - struct net_device *netdev = adapter->port[pidx]; - struct port_info *pi; - - if (netdev == NULL) - continue; - - pi = netdev_priv(netdev); - t4vf_free_vi(adapter, pi->viid); - free_netdev(netdev); - } - iounmap(adapter->regs); - kfree(adapter); - pci_set_drvdata(pdev, NULL); - } - - /* - * Disable the device and release its PCI resources. - */ - pci_disable_device(pdev); - pci_clear_master(pdev); - pci_release_regions(pdev); -} - -/* - * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt - * delivery. - */ -static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev) -{ - struct adapter *adapter; - int pidx; - - adapter = pci_get_drvdata(pdev); - if (!adapter) - return; - - /* - * Disable all Virtual Interfaces. This will shut down the - * delivery of all ingress packets into the chip for these - * Virtual Interfaces. - */ - for_each_port(adapter, pidx) { - struct net_device *netdev; - struct port_info *pi; - - if (!test_bit(pidx, &adapter->registered_device_map)) - continue; - - netdev = adapter->port[pidx]; - if (!netdev) - continue; - - pi = netdev_priv(netdev); - t4vf_enable_vi(adapter, pi->viid, false, false); - } - - /* - * Free up all Queues which will prevent further DMA and - * Interrupts allowing various internal pathways to drain. - */ - t4vf_free_sge_resources(adapter); -} - -/* - * PCI Device registration data structures. - */ -#define CH_DEVICE(devid, idx) \ - { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx } - -static struct pci_device_id cxgb4vf_pci_tbl[] = { - CH_DEVICE(0xb000, 0), /* PE10K FPGA */ - CH_DEVICE(0x4800, 0), /* T440-dbg */ - CH_DEVICE(0x4801, 0), /* T420-cr */ - CH_DEVICE(0x4802, 0), /* T422-cr */ - CH_DEVICE(0x4803, 0), /* T440-cr */ - CH_DEVICE(0x4804, 0), /* T420-bch */ - CH_DEVICE(0x4805, 0), /* T440-bch */ - CH_DEVICE(0x4806, 0), /* T460-ch */ - CH_DEVICE(0x4807, 0), /* T420-so */ - CH_DEVICE(0x4808, 0), /* T420-cx */ - CH_DEVICE(0x4809, 0), /* T420-bt */ - CH_DEVICE(0x480a, 0), /* T404-bt */ - { 0, } -}; - -MODULE_DESCRIPTION(DRV_DESC); -MODULE_AUTHOR("Chelsio Communications"); -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_VERSION(DRV_VERSION); -MODULE_DEVICE_TABLE(pci, cxgb4vf_pci_tbl); - -static struct pci_driver cxgb4vf_driver = { - .name = KBUILD_MODNAME, - .id_table = cxgb4vf_pci_tbl, - .probe = cxgb4vf_pci_probe, - .remove = __devexit_p(cxgb4vf_pci_remove), - .shutdown = __devexit_p(cxgb4vf_pci_shutdown), -}; - -/* - * Initialize global driver state. - */ -static int __init cxgb4vf_module_init(void) -{ - int ret; - - /* - * Vet our module parameters. - */ - if (msi != MSI_MSIX && msi != MSI_MSI) { - printk(KERN_WARNING KBUILD_MODNAME - ": bad module parameter msi=%d; must be %d" - " (MSI-X or MSI) or %d (MSI)\n", - msi, MSI_MSIX, MSI_MSI); - return -EINVAL; - } - - /* Debugfs support is optional, just warn if this fails */ - cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); - if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) - printk(KERN_WARNING KBUILD_MODNAME ": could not create" - " debugfs entry, continuing\n"); - - ret = pci_register_driver(&cxgb4vf_driver); - if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) - debugfs_remove(cxgb4vf_debugfs_root); - return ret; -} - -/* - * Tear down global driver state. - */ -static void __exit cxgb4vf_module_exit(void) -{ - pci_unregister_driver(&cxgb4vf_driver); - debugfs_remove(cxgb4vf_debugfs_root); -} - -module_init(cxgb4vf_module_init); -module_exit(cxgb4vf_module_exit); diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c deleted file mode 100644 index cffb328..0000000 --- a/drivers/net/cxgb4vf/sge.c +++ /dev/null @@ -1,2465 +0,0 @@ -/* - * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet - * driver for Linux. - * - * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "t4vf_common.h" -#include "t4vf_defs.h" - -#include "../cxgb4/t4_regs.h" -#include "../cxgb4/t4fw_api.h" -#include "../cxgb4/t4_msg.h" - -/* - * Decoded Adapter Parameters. - */ -static u32 FL_PG_ORDER; /* large page allocation size */ -static u32 STAT_LEN; /* length of status page at ring end */ -static u32 PKTSHIFT; /* padding between CPL and packet data */ -static u32 FL_ALIGN; /* response queue message alignment */ - -/* - * Constants ... - */ -enum { - /* - * Egress Queue sizes, producer and consumer indices are all in units - * of Egress Context Units bytes. Note that as far as the hardware is - * concerned, the free list is an Egress Queue (the host produces free - * buffers which the hardware consumes) and free list entries are - * 64-bit PCI DMA addresses. - */ - EQ_UNIT = SGE_EQ_IDXSIZE, - FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), - TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), - - /* - * Max number of TX descriptors we clean up at a time. Should be - * modest as freeing skbs isn't cheap and it happens while holding - * locks. We just need to free packets faster than they arrive, we - * eventually catch up and keep the amortized cost reasonable. - */ - MAX_TX_RECLAIM = 16, - - /* - * Max number of Rx buffers we replenish at a time. Again keep this - * modest, allocating buffers isn't cheap either. - */ - MAX_RX_REFILL = 16, - - /* - * Period of the Rx queue check timer. This timer is infrequent as it - * has something to do only when the system experiences severe memory - * shortage. - */ - RX_QCHECK_PERIOD = (HZ / 2), - - /* - * Period of the TX queue check timer and the maximum number of TX - * descriptors to be reclaimed by the TX timer. - */ - TX_QCHECK_PERIOD = (HZ / 2), - MAX_TIMER_TX_RECLAIM = 100, - - /* - * An FL with <= FL_STARVE_THRES buffers is starving and a periodic - * timer will attempt to refill it. - */ - FL_STARVE_THRES = 4, - - /* - * Suspend an Ethernet TX queue with fewer available descriptors than - * this. We always want to have room for a maximum sized packet: - * inline immediate data + MAX_SKB_FRAGS. This is the same as - * calc_tx_flits() for a TSO packet with nr_frags == MAX_SKB_FRAGS - * (see that function and its helpers for a description of the - * calculation). - */ - ETHTXQ_MAX_FRAGS = MAX_SKB_FRAGS + 1, - ETHTXQ_MAX_SGL_LEN = ((3 * (ETHTXQ_MAX_FRAGS-1))/2 + - ((ETHTXQ_MAX_FRAGS-1) & 1) + - 2), - ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) + - sizeof(struct cpl_tx_pkt_lso_core) + - sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64), - ETHTXQ_MAX_FLITS = ETHTXQ_MAX_SGL_LEN + ETHTXQ_MAX_HDR, - - ETHTXQ_STOP_THRES = 1 + DIV_ROUND_UP(ETHTXQ_MAX_FLITS, TXD_PER_EQ_UNIT), - - /* - * Max TX descriptor space we allow for an Ethernet packet to be - * inlined into a WR. This is limited by the maximum value which - * we can specify for immediate data in the firmware Ethernet TX - * Work Request. - */ - MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_MASK, - - /* - * Max size of a WR sent through a control TX queue. - */ - MAX_CTRL_WR_LEN = 256, - - /* - * Maximum amount of data which we'll ever need to inline into a - * TX ring: max(MAX_IMM_TX_PKT_LEN, MAX_CTRL_WR_LEN). - */ - MAX_IMM_TX_LEN = (MAX_IMM_TX_PKT_LEN > MAX_CTRL_WR_LEN - ? MAX_IMM_TX_PKT_LEN - : MAX_CTRL_WR_LEN), - - /* - * For incoming packets less than RX_COPY_THRES, we copy the data into - * an skb rather than referencing the data. We allocate enough - * in-line room in skb's to accommodate pulling in RX_PULL_LEN bytes - * of the data (header). - */ - RX_COPY_THRES = 256, - RX_PULL_LEN = 128, - - /* - * Main body length for sk_buffs used for RX Ethernet packets with - * fragments. Should be >= RX_PULL_LEN but possibly bigger to give - * pskb_may_pull() some room. - */ - RX_SKB_LEN = 512, -}; - -/* - * Software state per TX descriptor. - */ -struct tx_sw_desc { - struct sk_buff *skb; /* socket buffer of TX data source */ - struct ulptx_sgl *sgl; /* scatter/gather list in TX Queue */ -}; - -/* - * Software state per RX Free List descriptor. We keep track of the allocated - * FL page, its size, and its PCI DMA address (if the page is mapped). The FL - * page size and its PCI DMA mapped state are stored in the low bits of the - * PCI DMA address as per below. - */ -struct rx_sw_desc { - struct page *page; /* Free List page buffer */ - dma_addr_t dma_addr; /* PCI DMA address (if mapped) */ - /* and flags (see below) */ -}; - -/* - * The low bits of rx_sw_desc.dma_addr have special meaning. Note that the - * SGE also uses the low 4 bits to determine the size of the buffer. It uses - * those bits to index into the SGE_FL_BUFFER_SIZE[index] register array. - * Since we only use SGE_FL_BUFFER_SIZE0 and SGE_FL_BUFFER_SIZE1, these low 4 - * bits can only contain a 0 or a 1 to indicate which size buffer we're giving - * to the SGE. Thus, our software state of "is the buffer mapped for DMA" is - * maintained in an inverse sense so the hardware never sees that bit high. - */ -enum { - RX_LARGE_BUF = 1 << 0, /* buffer is SGE_FL_BUFFER_SIZE[1] */ - RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */ -}; - -/** - * get_buf_addr - return DMA buffer address of software descriptor - * @sdesc: pointer to the software buffer descriptor - * - * Return the DMA buffer address of a software descriptor (stripping out - * our low-order flag bits). - */ -static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *sdesc) -{ - return sdesc->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF); -} - -/** - * is_buf_mapped - is buffer mapped for DMA? - * @sdesc: pointer to the software buffer descriptor - * - * Determine whether the buffer associated with a software descriptor in - * mapped for DMA or not. - */ -static inline bool is_buf_mapped(const struct rx_sw_desc *sdesc) -{ - return !(sdesc->dma_addr & RX_UNMAPPED_BUF); -} - -/** - * need_skb_unmap - does the platform need unmapping of sk_buffs? - * - * Returns true if the platform needs sk_buff unmapping. The compiler - * optimizes away unnecessary code if this returns true. - */ -static inline int need_skb_unmap(void) -{ -#ifdef CONFIG_NEED_DMA_MAP_STATE - return 1; -#else - return 0; -#endif -} - -/** - * txq_avail - return the number of available slots in a TX queue - * @tq: the TX queue - * - * Returns the number of available descriptors in a TX queue. - */ -static inline unsigned int txq_avail(const struct sge_txq *tq) -{ - return tq->size - 1 - tq->in_use; -} - -/** - * fl_cap - return the capacity of a Free List - * @fl: the Free List - * - * Returns the capacity of a Free List. The capacity is less than the - * size because an Egress Queue Index Unit worth of descriptors needs to - * be left unpopulated, otherwise the Producer and Consumer indices PIDX - * and CIDX will match and the hardware will think the FL is empty. - */ -static inline unsigned int fl_cap(const struct sge_fl *fl) -{ - return fl->size - FL_PER_EQ_UNIT; -} - -/** - * fl_starving - return whether a Free List is starving. - * @fl: the Free List - * - * Tests specified Free List to see whether the number of buffers - * available to the hardware has falled below our "starvation" - * threshold. - */ -static inline bool fl_starving(const struct sge_fl *fl) -{ - return fl->avail - fl->pend_cred <= FL_STARVE_THRES; -} - -/** - * map_skb - map an skb for DMA to the device - * @dev: the egress net device - * @skb: the packet to map - * @addr: a pointer to the base of the DMA mapping array - * - * Map an skb for DMA to the device and return an array of DMA addresses. - */ -static int map_skb(struct device *dev, const struct sk_buff *skb, - dma_addr_t *addr) -{ - const skb_frag_t *fp, *end; - const struct skb_shared_info *si; - - *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); - if (dma_mapping_error(dev, *addr)) - goto out_err; - - si = skb_shinfo(skb); - end = &si->frags[si->nr_frags]; - for (fp = si->frags; fp < end; fp++) { - *++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size, - DMA_TO_DEVICE); - if (dma_mapping_error(dev, *addr)) - goto unwind; - } - return 0; - -unwind: - while (fp-- > si->frags) - dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE); - dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); - -out_err: - return -ENOMEM; -} - -static void unmap_sgl(struct device *dev, const struct sk_buff *skb, - const struct ulptx_sgl *sgl, const struct sge_txq *tq) -{ - const struct ulptx_sge_pair *p; - unsigned int nfrags = skb_shinfo(skb)->nr_frags; - - if (likely(skb_headlen(skb))) - dma_unmap_single(dev, be64_to_cpu(sgl->addr0), - be32_to_cpu(sgl->len0), DMA_TO_DEVICE); - else { - dma_unmap_page(dev, be64_to_cpu(sgl->addr0), - be32_to_cpu(sgl->len0), DMA_TO_DEVICE); - nfrags--; - } - - /* - * the complexity below is because of the possibility of a wrap-around - * in the middle of an SGL - */ - for (p = sgl->sge; nfrags >= 2; nfrags -= 2) { - if (likely((u8 *)(p + 1) <= (u8 *)tq->stat)) { -unmap: - dma_unmap_page(dev, be64_to_cpu(p->addr[0]), - be32_to_cpu(p->len[0]), DMA_TO_DEVICE); - dma_unmap_page(dev, be64_to_cpu(p->addr[1]), - be32_to_cpu(p->len[1]), DMA_TO_DEVICE); - p++; - } else if ((u8 *)p == (u8 *)tq->stat) { - p = (const struct ulptx_sge_pair *)tq->desc; - goto unmap; - } else if ((u8 *)p + 8 == (u8 *)tq->stat) { - const __be64 *addr = (const __be64 *)tq->desc; - - dma_unmap_page(dev, be64_to_cpu(addr[0]), - be32_to_cpu(p->len[0]), DMA_TO_DEVICE); - dma_unmap_page(dev, be64_to_cpu(addr[1]), - be32_to_cpu(p->len[1]), DMA_TO_DEVICE); - p = (const struct ulptx_sge_pair *)&addr[2]; - } else { - const __be64 *addr = (const __be64 *)tq->desc; - - dma_unmap_page(dev, be64_to_cpu(p->addr[0]), - be32_to_cpu(p->len[0]), DMA_TO_DEVICE); - dma_unmap_page(dev, be64_to_cpu(addr[0]), - be32_to_cpu(p->len[1]), DMA_TO_DEVICE); - p = (const struct ulptx_sge_pair *)&addr[1]; - } - } - if (nfrags) { - __be64 addr; - - if ((u8 *)p == (u8 *)tq->stat) - p = (const struct ulptx_sge_pair *)tq->desc; - addr = ((u8 *)p + 16 <= (u8 *)tq->stat - ? p->addr[0] - : *(const __be64 *)tq->desc); - dma_unmap_page(dev, be64_to_cpu(addr), be32_to_cpu(p->len[0]), - DMA_TO_DEVICE); - } -} - -/** - * free_tx_desc - reclaims TX descriptors and their buffers - * @adapter: the adapter - * @tq: the TX queue to reclaim descriptors from - * @n: the number of descriptors to reclaim - * @unmap: whether the buffers should be unmapped for DMA - * - * Reclaims TX descriptors from an SGE TX queue and frees the associated - * TX buffers. Called with the TX queue lock held. - */ -static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq, - unsigned int n, bool unmap) -{ - struct tx_sw_desc *sdesc; - unsigned int cidx = tq->cidx; - struct device *dev = adapter->pdev_dev; - - const int need_unmap = need_skb_unmap() && unmap; - - sdesc = &tq->sdesc[cidx]; - while (n--) { - /* - * If we kept a reference to the original TX skb, we need to - * unmap it from PCI DMA space (if required) and free it. - */ - if (sdesc->skb) { - if (need_unmap) - unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq); - kfree_skb(sdesc->skb); - sdesc->skb = NULL; - } - - sdesc++; - if (++cidx == tq->size) { - cidx = 0; - sdesc = tq->sdesc; - } - } - tq->cidx = cidx; -} - -/* - * Return the number of reclaimable descriptors in a TX queue. - */ -static inline int reclaimable(const struct sge_txq *tq) -{ - int hw_cidx = be16_to_cpu(tq->stat->cidx); - int reclaimable = hw_cidx - tq->cidx; - if (reclaimable < 0) - reclaimable += tq->size; - return reclaimable; -} - -/** - * reclaim_completed_tx - reclaims completed TX descriptors - * @adapter: the adapter - * @tq: the TX queue to reclaim completed descriptors from - * @unmap: whether the buffers should be unmapped for DMA - * - * Reclaims TX descriptors that the SGE has indicated it has processed, - * and frees the associated buffers if possible. Called with the TX - * queue locked. - */ -static inline void reclaim_completed_tx(struct adapter *adapter, - struct sge_txq *tq, - bool unmap) -{ - int avail = reclaimable(tq); - - if (avail) { - /* - * Limit the amount of clean up work we do at a time to keep - * the TX lock hold time O(1). - */ - if (avail > MAX_TX_RECLAIM) - avail = MAX_TX_RECLAIM; - - free_tx_desc(adapter, tq, avail, unmap); - tq->in_use -= avail; - } -} - -/** - * get_buf_size - return the size of an RX Free List buffer. - * @sdesc: pointer to the software buffer descriptor - */ -static inline int get_buf_size(const struct rx_sw_desc *sdesc) -{ - return FL_PG_ORDER > 0 && (sdesc->dma_addr & RX_LARGE_BUF) - ? (PAGE_SIZE << FL_PG_ORDER) - : PAGE_SIZE; -} - -/** - * free_rx_bufs - free RX buffers on an SGE Free List - * @adapter: the adapter - * @fl: the SGE Free List to free buffers from - * @n: how many buffers to free - * - * Release the next @n buffers on an SGE Free List RX queue. The - * buffers must be made inaccessible to hardware before calling this - * function. - */ -static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n) -{ - while (n--) { - struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx]; - - if (is_buf_mapped(sdesc)) - dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), - get_buf_size(sdesc), PCI_DMA_FROMDEVICE); - put_page(sdesc->page); - sdesc->page = NULL; - if (++fl->cidx == fl->size) - fl->cidx = 0; - fl->avail--; - } -} - -/** - * unmap_rx_buf - unmap the current RX buffer on an SGE Free List - * @adapter: the adapter - * @fl: the SGE Free List - * - * Unmap the current buffer on an SGE Free List RX queue. The - * buffer must be made inaccessible to HW before calling this function. - * - * This is similar to @free_rx_bufs above but does not free the buffer. - * Do note that the FL still loses any further access to the buffer. - * This is used predominantly to "transfer ownership" of an FL buffer - * to another entity (typically an skb's fragment list). - */ -static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl) -{ - struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx]; - - if (is_buf_mapped(sdesc)) - dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), - get_buf_size(sdesc), PCI_DMA_FROMDEVICE); - sdesc->page = NULL; - if (++fl->cidx == fl->size) - fl->cidx = 0; - fl->avail--; -} - -/** - * ring_fl_db - righ doorbell on free list - * @adapter: the adapter - * @fl: the Free List whose doorbell should be rung ... - * - * Tell the Scatter Gather Engine that there are new free list entries - * available. - */ -static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl) -{ - /* - * The SGE keeps track of its Producer and Consumer Indices in terms - * of Egress Queue Units so we can only tell it about integral numbers - * of multiples of Free List Entries per Egress Queue Units ... - */ - if (fl->pend_cred >= FL_PER_EQ_UNIT) { - wmb(); - t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, - DBPRIO | - QID(fl->cntxt_id) | - PIDX(fl->pend_cred / FL_PER_EQ_UNIT)); - fl->pend_cred %= FL_PER_EQ_UNIT; - } -} - -/** - * set_rx_sw_desc - initialize software RX buffer descriptor - * @sdesc: pointer to the softwore RX buffer descriptor - * @page: pointer to the page data structure backing the RX buffer - * @dma_addr: PCI DMA address (possibly with low-bit flags) - */ -static inline void set_rx_sw_desc(struct rx_sw_desc *sdesc, struct page *page, - dma_addr_t dma_addr) -{ - sdesc->page = page; - sdesc->dma_addr = dma_addr; -} - -/* - * Support for poisoning RX buffers ... - */ -#define POISON_BUF_VAL -1 - -static inline void poison_buf(struct page *page, size_t sz) -{ -#if POISON_BUF_VAL >= 0 - memset(page_address(page), POISON_BUF_VAL, sz); -#endif -} - -/** - * refill_fl - refill an SGE RX buffer ring - * @adapter: the adapter - * @fl: the Free List ring to refill - * @n: the number of new buffers to allocate - * @gfp: the gfp flags for the allocations - * - * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, - * allocated with the supplied gfp flags. The caller must assure that - * @n does not exceed the queue's capacity -- i.e. (cidx == pidx) _IN - * EGRESS QUEUE UNITS_ indicates an empty Free List! Returns the number - * of buffers allocated. If afterwards the queue is found critically low, - * mark it as starving in the bitmap of starving FLs. - */ -static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, - int n, gfp_t gfp) -{ - struct page *page; - dma_addr_t dma_addr; - unsigned int cred = fl->avail; - __be64 *d = &fl->desc[fl->pidx]; - struct rx_sw_desc *sdesc = &fl->sdesc[fl->pidx]; - - /* - * Sanity: ensure that the result of adding n Free List buffers - * won't result in wrapping the SGE's Producer Index around to - * it's Consumer Index thereby indicating an empty Free List ... - */ - BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT); - - /* - * If we support large pages, prefer large buffers and fail over to - * small pages if we can't allocate large pages to satisfy the refill. - * If we don't support large pages, drop directly into the small page - * allocation code. - */ - if (FL_PG_ORDER == 0) - goto alloc_small_pages; - - while (n) { - page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, - FL_PG_ORDER); - if (unlikely(!page)) { - /* - * We've failed inour attempt to allocate a "large - * page". Fail over to the "small page" allocation - * below. - */ - fl->large_alloc_failed++; - break; - } - poison_buf(page, PAGE_SIZE << FL_PG_ORDER); - - dma_addr = dma_map_page(adapter->pdev_dev, page, 0, - PAGE_SIZE << FL_PG_ORDER, - PCI_DMA_FROMDEVICE); - if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { - /* - * We've run out of DMA mapping space. Free up the - * buffer and return with what we've managed to put - * into the free list. We don't want to fail over to - * the small page allocation below in this case - * because DMA mapping resources are typically - * critical resources once they become scarse. - */ - __free_pages(page, FL_PG_ORDER); - goto out; - } - dma_addr |= RX_LARGE_BUF; - *d++ = cpu_to_be64(dma_addr); - - set_rx_sw_desc(sdesc, page, dma_addr); - sdesc++; - - fl->avail++; - if (++fl->pidx == fl->size) { - fl->pidx = 0; - sdesc = fl->sdesc; - d = fl->desc; - } - n--; - } - -alloc_small_pages: - while (n--) { - page = __netdev_alloc_page(adapter->port[0], - gfp | __GFP_NOWARN); - if (unlikely(!page)) { - fl->alloc_failed++; - break; - } - poison_buf(page, PAGE_SIZE); - - dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE, - PCI_DMA_FROMDEVICE); - if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { - netdev_free_page(adapter->port[0], page); - break; - } - *d++ = cpu_to_be64(dma_addr); - - set_rx_sw_desc(sdesc, page, dma_addr); - sdesc++; - - fl->avail++; - if (++fl->pidx == fl->size) { - fl->pidx = 0; - sdesc = fl->sdesc; - d = fl->desc; - } - } - -out: - /* - * Update our accounting state to incorporate the new Free List - * buffers, tell the hardware about them and return the number of - * bufers which we were able to allocate. - */ - cred = fl->avail - cred; - fl->pend_cred += cred; - ring_fl_db(adapter, fl); - - if (unlikely(fl_starving(fl))) { - smp_wmb(); - set_bit(fl->cntxt_id, adapter->sge.starving_fl); - } - - return cred; -} - -/* - * Refill a Free List to its capacity or the Maximum Refill Increment, - * whichever is smaller ... - */ -static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl) -{ - refill_fl(adapter, fl, - min((unsigned int)MAX_RX_REFILL, fl_cap(fl) - fl->avail), - GFP_ATOMIC); -} - -/** - * alloc_ring - allocate resources for an SGE descriptor ring - * @dev: the PCI device's core device - * @nelem: the number of descriptors - * @hwsize: the size of each hardware descriptor - * @swsize: the size of each software descriptor - * @busaddrp: the physical PCI bus address of the allocated ring - * @swringp: return address pointer for software ring - * @stat_size: extra space in hardware ring for status information - * - * Allocates resources for an SGE descriptor ring, such as TX queues, - * free buffer lists, response queues, etc. Each SGE ring requires - * space for its hardware descriptors plus, optionally, space for software - * state associated with each hardware entry (the metadata). The function - * returns three values: the virtual address for the hardware ring (the - * return value of the function), the PCI bus address of the hardware - * ring (in *busaddrp), and the address of the software ring (in swringp). - * Both the hardware and software rings are returned zeroed out. - */ -static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize, - size_t swsize, dma_addr_t *busaddrp, void *swringp, - size_t stat_size) -{ - /* - * Allocate the hardware ring and PCI DMA bus address space for said. - */ - size_t hwlen = nelem * hwsize + stat_size; - void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL); - - if (!hwring) - return NULL; - - /* - * If the caller wants a software ring, allocate it and return a - * pointer to it in *swringp. - */ - BUG_ON((swsize != 0) != (swringp != NULL)); - if (swsize) { - void *swring = kcalloc(nelem, swsize, GFP_KERNEL); - - if (!swring) { - dma_free_coherent(dev, hwlen, hwring, *busaddrp); - return NULL; - } - *(void **)swringp = swring; - } - - /* - * Zero out the hardware ring and return its address as our function - * value. - */ - memset(hwring, 0, hwlen); - return hwring; -} - -/** - * sgl_len - calculates the size of an SGL of the given capacity - * @n: the number of SGL entries - * - * Calculates the number of flits (8-byte units) needed for a Direct - * Scatter/Gather List that can hold the given number of entries. - */ -static inline unsigned int sgl_len(unsigned int n) -{ - /* - * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA - * addresses. The DSGL Work Request starts off with a 32-bit DSGL - * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N, - * repeated sequences of { Length[i], Length[i+1], Address[i], - * Address[i+1] } (this ensures that all addresses are on 64-bit - * boundaries). If N is even, then Length[N+1] should be set to 0 and - * Address[N+1] is omitted. - * - * The following calculation incorporates all of the above. It's - * somewhat hard to follow but, briefly: the "+2" accounts for the - * first two flits which include the DSGL header, Length0 and - * Address0; the "(3*(n-1))/2" covers the main body of list entries (3 - * flits for every pair of the remaining N) +1 if (n-1) is odd; and - * finally the "+((n-1)&1)" adds the one remaining flit needed if - * (n-1) is odd ... - */ - n--; - return (3 * n) / 2 + (n & 1) + 2; -} - -/** - * flits_to_desc - returns the num of TX descriptors for the given flits - * @flits: the number of flits - * - * Returns the number of TX descriptors needed for the supplied number - * of flits. - */ -static inline unsigned int flits_to_desc(unsigned int flits) -{ - BUG_ON(flits > SGE_MAX_WR_LEN / sizeof(__be64)); - return DIV_ROUND_UP(flits, TXD_PER_EQ_UNIT); -} - -/** - * is_eth_imm - can an Ethernet packet be sent as immediate data? - * @skb: the packet - * - * Returns whether an Ethernet packet is small enough to fit completely as - * immediate data. - */ -static inline int is_eth_imm(const struct sk_buff *skb) -{ - /* - * The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request - * which does not accommodate immediate data. We could dike out all - * of the support code for immediate data but that would tie our hands - * too much if we ever want to enhace the firmware. It would also - * create more differences between the PF and VF Drivers. - */ - return false; -} - -/** - * calc_tx_flits - calculate the number of flits for a packet TX WR - * @skb: the packet - * - * Returns the number of flits needed for a TX Work Request for the - * given Ethernet packet, including the needed WR and CPL headers. - */ -static inline unsigned int calc_tx_flits(const struct sk_buff *skb) -{ - unsigned int flits; - - /* - * If the skb is small enough, we can pump it out as a work request - * with only immediate data. In that case we just have to have the - * TX Packet header plus the skb data in the Work Request. - */ - if (is_eth_imm(skb)) - return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), - sizeof(__be64)); - - /* - * Otherwise, we're going to have to construct a Scatter gather list - * of the skb body and fragments. We also include the flits necessary - * for the TX Packet Work Request and CPL. We always have a firmware - * Write Header (incorporated as part of the cpl_tx_pkt_lso and - * cpl_tx_pkt structures), followed by either a TX Packet Write CPL - * message or, if we're doing a Large Send Offload, an LSO CPL message - * with an embeded TX Packet Write CPL message. - */ - flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); - if (skb_shinfo(skb)->gso_size) - flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + - sizeof(struct cpl_tx_pkt_lso_core) + - sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); - else - flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + - sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); - return flits; -} - -/** - * write_sgl - populate a Scatter/Gather List for a packet - * @skb: the packet - * @tq: the TX queue we are writing into - * @sgl: starting location for writing the SGL - * @end: points right after the end of the SGL - * @start: start offset into skb main-body data to include in the SGL - * @addr: the list of DMA bus addresses for the SGL elements - * - * Generates a Scatter/Gather List for the buffers that make up a packet. - * The caller must provide adequate space for the SGL that will be written. - * The SGL includes all of the packet's page fragments and the data in its - * main body except for the first @start bytes. @pos must be 16-byte - * aligned and within a TX descriptor with available space. @end points - * write after the end of the SGL but does not account for any potential - * wrap around, i.e., @end > @tq->stat. - */ -static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq, - struct ulptx_sgl *sgl, u64 *end, unsigned int start, - const dma_addr_t *addr) -{ - unsigned int i, len; - struct ulptx_sge_pair *to; - const struct skb_shared_info *si = skb_shinfo(skb); - unsigned int nfrags = si->nr_frags; - struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1]; - - len = skb_headlen(skb) - start; - if (likely(len)) { - sgl->len0 = htonl(len); - sgl->addr0 = cpu_to_be64(addr[0] + start); - nfrags++; - } else { - sgl->len0 = htonl(si->frags[0].size); - sgl->addr0 = cpu_to_be64(addr[1]); - } - - sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | - ULPTX_NSGE(nfrags)); - if (likely(--nfrags == 0)) - return; - /* - * Most of the complexity below deals with the possibility we hit the - * end of the queue in the middle of writing the SGL. For this case - * only we create the SGL in a temporary buffer and then copy it. - */ - to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge; - - for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { - to->len[0] = cpu_to_be32(si->frags[i].size); - to->len[1] = cpu_to_be32(si->frags[++i].size); - to->addr[0] = cpu_to_be64(addr[i]); - to->addr[1] = cpu_to_be64(addr[++i]); - } - if (nfrags) { - to->len[0] = cpu_to_be32(si->frags[i].size); - to->len[1] = cpu_to_be32(0); - to->addr[0] = cpu_to_be64(addr[i + 1]); - } - if (unlikely((u8 *)end > (u8 *)tq->stat)) { - unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1; - - if (likely(part0)) - memcpy(sgl->sge, buf, part0); - part1 = (u8 *)end - (u8 *)tq->stat; - memcpy(tq->desc, (u8 *)buf + part0, part1); - end = (void *)tq->desc + part1; - } - if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ - *(u64 *)end = 0; -} - -/** - * check_ring_tx_db - check and potentially ring a TX queue's doorbell - * @adapter: the adapter - * @tq: the TX queue - * @n: number of new descriptors to give to HW - * - * Ring the doorbel for a TX queue. - */ -static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq, - int n) -{ - /* - * Warn if we write doorbells with the wrong priority and write - * descriptors before telling HW. - */ - WARN_ON((QID(tq->cntxt_id) | PIDX(n)) & DBPRIO); - wmb(); - t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, - QID(tq->cntxt_id) | PIDX(n)); -} - -/** - * inline_tx_skb - inline a packet's data into TX descriptors - * @skb: the packet - * @tq: the TX queue where the packet will be inlined - * @pos: starting position in the TX queue to inline the packet - * - * Inline a packet's contents directly into TX descriptors, starting at - * the given position within the TX DMA ring. - * Most of the complexity of this operation is dealing with wrap arounds - * in the middle of the packet we want to inline. - */ -static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq, - void *pos) -{ - u64 *p; - int left = (void *)tq->stat - pos; - - if (likely(skb->len <= left)) { - if (likely(!skb->data_len)) - skb_copy_from_linear_data(skb, pos, skb->len); - else - skb_copy_bits(skb, 0, pos, skb->len); - pos += skb->len; - } else { - skb_copy_bits(skb, 0, pos, left); - skb_copy_bits(skb, left, tq->desc, skb->len - left); - pos = (void *)tq->desc + (skb->len - left); - } - - /* 0-pad to multiple of 16 */ - p = PTR_ALIGN(pos, 8); - if ((uintptr_t)p & 8) - *p = 0; -} - -/* - * Figure out what HW csum a packet wants and return the appropriate control - * bits. - */ -static u64 hwcsum(const struct sk_buff *skb) -{ - int csum_type; - const struct iphdr *iph = ip_hdr(skb); - - if (iph->version == 4) { - if (iph->protocol == IPPROTO_TCP) - csum_type = TX_CSUM_TCPIP; - else if (iph->protocol == IPPROTO_UDP) - csum_type = TX_CSUM_UDPIP; - else { -nocsum: - /* - * unknown protocol, disable HW csum - * and hope a bad packet is detected - */ - return TXPKT_L4CSUM_DIS; - } - } else { - /* - * this doesn't work with extension headers - */ - const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph; - - if (ip6h->nexthdr == IPPROTO_TCP) - csum_type = TX_CSUM_TCPIP6; - else if (ip6h->nexthdr == IPPROTO_UDP) - csum_type = TX_CSUM_UDPIP6; - else - goto nocsum; - } - - if (likely(csum_type >= TX_CSUM_TCPIP)) - return TXPKT_CSUM_TYPE(csum_type) | - TXPKT_IPHDR_LEN(skb_network_header_len(skb)) | - TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN); - else { - int start = skb_transport_offset(skb); - - return TXPKT_CSUM_TYPE(csum_type) | - TXPKT_CSUM_START(start) | - TXPKT_CSUM_LOC(start + skb->csum_offset); - } -} - -/* - * Stop an Ethernet TX queue and record that state change. - */ -static void txq_stop(struct sge_eth_txq *txq) -{ - netif_tx_stop_queue(txq->txq); - txq->q.stops++; -} - -/* - * Advance our software state for a TX queue by adding n in use descriptors. - */ -static inline void txq_advance(struct sge_txq *tq, unsigned int n) -{ - tq->in_use += n; - tq->pidx += n; - if (tq->pidx >= tq->size) - tq->pidx -= tq->size; -} - -/** - * t4vf_eth_xmit - add a packet to an Ethernet TX queue - * @skb: the packet - * @dev: the egress net device - * - * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled. - */ -int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) -{ - u32 wr_mid; - u64 cntrl, *end; - int qidx, credits; - unsigned int flits, ndesc; - struct adapter *adapter; - struct sge_eth_txq *txq; - const struct port_info *pi; - struct fw_eth_tx_pkt_vm_wr *wr; - struct cpl_tx_pkt_core *cpl; - const struct skb_shared_info *ssi; - dma_addr_t addr[MAX_SKB_FRAGS + 1]; - const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) + - sizeof(wr->ethmacsrc) + - sizeof(wr->ethtype) + - sizeof(wr->vlantci)); - - /* - * The chip minimum packet length is 10 octets but the firmware - * command that we are using requires that we copy the Ethernet header - * (including the VLAN tag) into the header so we reject anything - * smaller than that ... - */ - if (unlikely(skb->len < fw_hdr_copy_len)) - goto out_free; - - /* - * Figure out which TX Queue we're going to use. - */ - pi = netdev_priv(dev); - adapter = pi->adapter; - qidx = skb_get_queue_mapping(skb); - BUG_ON(qidx >= pi->nqsets); - txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; - - /* - * Take this opportunity to reclaim any TX Descriptors whose DMA - * transfers have completed. - */ - reclaim_completed_tx(adapter, &txq->q, true); - - /* - * Calculate the number of flits and TX Descriptors we're going to - * need along with how many TX Descriptors will be left over after - * we inject our Work Request. - */ - flits = calc_tx_flits(skb); - ndesc = flits_to_desc(flits); - credits = txq_avail(&txq->q) - ndesc; - - if (unlikely(credits < 0)) { - /* - * Not enough room for this packet's Work Request. Stop the - * TX Queue and return a "busy" condition. The queue will get - * started later on when the firmware informs us that space - * has opened up. - */ - txq_stop(txq); - dev_err(adapter->pdev_dev, - "%s: TX ring %u full while queue awake!\n", - dev->name, qidx); - return NETDEV_TX_BUSY; - } - - if (!is_eth_imm(skb) && - unlikely(map_skb(adapter->pdev_dev, skb, addr) < 0)) { - /* - * We need to map the skb into PCI DMA space (because it can't - * be in-lined directly into the Work Request) and the mapping - * operation failed. Record the error and drop the packet. - */ - txq->mapping_err++; - goto out_free; - } - - wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2)); - if (unlikely(credits < ETHTXQ_STOP_THRES)) { - /* - * After we're done injecting the Work Request for this - * packet, we'll be below our "stop threshold" so stop the TX - * Queue now and schedule a request for an SGE Egress Queue - * Update message. The queue will get started later on when - * the firmware processes this Work Request and sends us an - * Egress Queue Status Update message indicating that space - * has opened up. - */ - txq_stop(txq); - wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ; - } - - /* - * Start filling in our Work Request. Note that we do _not_ handle - * the WR Header wrapping around the TX Descriptor Ring. If our - * maximum header size ever exceeds one TX Descriptor, we'll need to - * do something else here. - */ - BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1); - wr = (void *)&txq->q.desc[txq->q.pidx]; - wr->equiq_to_len16 = cpu_to_be32(wr_mid); - wr->r3[0] = cpu_to_be64(0); - wr->r3[1] = cpu_to_be64(0); - skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len); - end = (u64 *)wr + flits; - - /* - * If this is a Large Send Offload packet we'll put in an LSO CPL - * message with an encapsulated TX Packet CPL message. Otherwise we - * just use a TX Packet CPL message. - */ - ssi = skb_shinfo(skb); - if (ssi->gso_size) { - struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); - bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; - int l3hdr_len = skb_network_header_len(skb); - int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; - - wr->op_immdlen = - cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) | - FW_WR_IMMDLEN(sizeof(*lso) + - sizeof(*cpl))); - /* - * Fill in the LSO CPL message. - */ - lso->lso_ctrl = - cpu_to_be32(LSO_OPCODE(CPL_TX_PKT_LSO) | - LSO_FIRST_SLICE | - LSO_LAST_SLICE | - LSO_IPV6(v6) | - LSO_ETHHDR_LEN(eth_xtra_len/4) | - LSO_IPHDR_LEN(l3hdr_len/4) | - LSO_TCPHDR_LEN(tcp_hdr(skb)->doff)); - lso->ipid_ofst = cpu_to_be16(0); - lso->mss = cpu_to_be16(ssi->gso_size); - lso->seqno_offset = cpu_to_be32(0); - lso->len = cpu_to_be32(skb->len); - - /* - * Set up TX Packet CPL pointer, control word and perform - * accounting. - */ - cpl = (void *)(lso + 1); - cntrl = (TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | - TXPKT_IPHDR_LEN(l3hdr_len) | - TXPKT_ETHHDR_LEN(eth_xtra_len)); - txq->tso++; - txq->tx_cso += ssi->gso_segs; - } else { - int len; - - len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl); - wr->op_immdlen = - cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) | - FW_WR_IMMDLEN(len)); - - /* - * Set up TX Packet CPL pointer, control word and perform - * accounting. - */ - cpl = (void *)(wr + 1); - if (skb->ip_summed == CHECKSUM_PARTIAL) { - cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS; - txq->tx_cso++; - } else - cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS; - } - - /* - * If there's a VLAN tag present, add that to the list of things to - * do in this Work Request. - */ - if (vlan_tx_tag_present(skb)) { - txq->vlan_ins++; - cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb)); - } - - /* - * Fill in the TX Packet CPL message header. - */ - cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE(CPL_TX_PKT_XT) | - TXPKT_INTF(pi->port_id) | - TXPKT_PF(0)); - cpl->pack = cpu_to_be16(0); - cpl->len = cpu_to_be16(skb->len); - cpl->ctrl1 = cpu_to_be64(cntrl); - -#ifdef T4_TRACE - T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7], - "eth_xmit: ndesc %u, credits %u, pidx %u, len %u, frags %u", - ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags); -#endif - - /* - * Fill in the body of the TX Packet CPL message with either in-lined - * data or a Scatter/Gather List. - */ - if (is_eth_imm(skb)) { - /* - * In-line the packet's data and free the skb since we don't - * need it any longer. - */ - inline_tx_skb(skb, &txq->q, cpl + 1); - dev_kfree_skb(skb); - } else { - /* - * Write the skb's Scatter/Gather list into the TX Packet CPL - * message and retain a pointer to the skb so we can free it - * later when its DMA completes. (We store the skb pointer - * in the Software Descriptor corresponding to the last TX - * Descriptor used by the Work Request.) - * - * The retained skb will be freed when the corresponding TX - * Descriptors are reclaimed after their DMAs complete. - * However, this could take quite a while since, in general, - * the hardware is set up to be lazy about sending DMA - * completion notifications to us and we mostly perform TX - * reclaims in the transmit routine. - * - * This is good for performamce but means that we rely on new - * TX packets arriving to run the destructors of completed - * packets, which open up space in their sockets' send queues. - * Sometimes we do not get such new packets causing TX to - * stall. A single UDP transmitter is a good example of this - * situation. We have a clean up timer that periodically - * reclaims completed packets but it doesn't run often enough - * (nor do we want it to) to prevent lengthy stalls. A - * solution to this problem is to run the destructor early, - * after the packet is queued but before it's DMAd. A con is - * that we lie to socket memory accounting, but the amount of - * extra memory is reasonable (limited by the number of TX - * descriptors), the packets do actually get freed quickly by - * new packets almost always, and for protocols like TCP that - * wait for acks to really free up the data the extra memory - * is even less. On the positive side we run the destructors - * on the sending CPU rather than on a potentially different - * completing CPU, usually a good thing. - * - * Run the destructor before telling the DMA engine about the - * packet to make sure it doesn't complete and get freed - * prematurely. - */ - struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1); - struct sge_txq *tq = &txq->q; - int last_desc; - - /* - * If the Work Request header was an exact multiple of our TX - * Descriptor length, then it's possible that the starting SGL - * pointer lines up exactly with the end of our TX Descriptor - * ring. If that's the case, wrap around to the beginning - * here ... - */ - if (unlikely((void *)sgl == (void *)tq->stat)) { - sgl = (void *)tq->desc; - end = (void *)((void *)tq->desc + - ((void *)end - (void *)tq->stat)); - } - - write_sgl(skb, tq, sgl, end, 0, addr); - skb_orphan(skb); - - last_desc = tq->pidx + ndesc - 1; - if (last_desc >= tq->size) - last_desc -= tq->size; - tq->sdesc[last_desc].skb = skb; - tq->sdesc[last_desc].sgl = sgl; - } - - /* - * Advance our internal TX Queue state, tell the hardware about - * the new TX descriptors and return success. - */ - txq_advance(&txq->q, ndesc); - dev->trans_start = jiffies; - ring_tx_db(adapter, &txq->q, ndesc); - return NETDEV_TX_OK; - -out_free: - /* - * An error of some sort happened. Free the TX skb and tell the - * OS that we've "dealt" with the packet ... - */ - dev_kfree_skb(skb); - return NETDEV_TX_OK; -} - -/** - * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list - * @gl: the gather list - * @skb_len: size of sk_buff main body if it carries fragments - * @pull_len: amount of data to move to the sk_buff's main body - * - * Builds an sk_buff from the given packet gather list. Returns the - * sk_buff or %NULL if sk_buff allocation failed. - */ -struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl, - unsigned int skb_len, unsigned int pull_len) -{ - struct sk_buff *skb; - struct skb_shared_info *ssi; - - /* - * If the ingress packet is small enough, allocate an skb large enough - * for all of the data and copy it inline. Otherwise, allocate an skb - * with enough room to pull in the header and reference the rest of - * the data via the skb fragment list. - * - * Below we rely on RX_COPY_THRES being less than the smallest Rx - * buff! size, which is expected since buffers are at least - * PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one - * fragment. - */ - if (gl->tot_len <= RX_COPY_THRES) { - /* small packets have only one fragment */ - skb = alloc_skb(gl->tot_len, GFP_ATOMIC); - if (unlikely(!skb)) - goto out; - __skb_put(skb, gl->tot_len); - skb_copy_to_linear_data(skb, gl->va, gl->tot_len); - } else { - skb = alloc_skb(skb_len, GFP_ATOMIC); - if (unlikely(!skb)) - goto out; - __skb_put(skb, pull_len); - skb_copy_to_linear_data(skb, gl->va, pull_len); - - ssi = skb_shinfo(skb); - ssi->frags[0].page = gl->frags[0].page; - ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len; - ssi->frags[0].size = gl->frags[0].size - pull_len; - if (gl->nfrags > 1) - memcpy(&ssi->frags[1], &gl->frags[1], - (gl->nfrags-1) * sizeof(skb_frag_t)); - ssi->nr_frags = gl->nfrags; - - skb->len = gl->tot_len; - skb->data_len = skb->len - pull_len; - skb->truesize += skb->data_len; - - /* Get a reference for the last page, we don't own it */ - get_page(gl->frags[gl->nfrags - 1].page); - } - -out: - return skb; -} - -/** - * t4vf_pktgl_free - free a packet gather list - * @gl: the gather list - * - * Releases the pages of a packet gather list. We do not own the last - * page on the list and do not free it. - */ -void t4vf_pktgl_free(const struct pkt_gl *gl) -{ - int frag; - - frag = gl->nfrags - 1; - while (frag--) - put_page(gl->frags[frag].page); -} - -/** - * copy_frags - copy fragments from gather list into skb_shared_info - * @si: destination skb shared info structure - * @gl: source internal packet gather list - * @offset: packet start offset in first page - * - * Copy an internal packet gather list into a Linux skb_shared_info - * structure. - */ -static inline void copy_frags(struct skb_shared_info *si, - const struct pkt_gl *gl, - unsigned int offset) -{ - unsigned int n; - - /* usually there's just one frag */ - si->frags[0].page = gl->frags[0].page; - si->frags[0].page_offset = gl->frags[0].page_offset + offset; - si->frags[0].size = gl->frags[0].size - offset; - si->nr_frags = gl->nfrags; - - n = gl->nfrags - 1; - if (n) - memcpy(&si->frags[1], &gl->frags[1], n * sizeof(skb_frag_t)); - - /* get a reference to the last page, we don't own it */ - get_page(gl->frags[n].page); -} - -/** - * do_gro - perform Generic Receive Offload ingress packet processing - * @rxq: ingress RX Ethernet Queue - * @gl: gather list for ingress packet - * @pkt: CPL header for last packet fragment - * - * Perform Generic Receive Offload (GRO) ingress packet processing. - * We use the standard Linux GRO interfaces for this. - */ -static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, - const struct cpl_rx_pkt *pkt) -{ - int ret; - struct sk_buff *skb; - - skb = napi_get_frags(&rxq->rspq.napi); - if (unlikely(!skb)) { - t4vf_pktgl_free(gl); - rxq->stats.rx_drops++; - return; - } - - copy_frags(skb_shinfo(skb), gl, PKTSHIFT); - skb->len = gl->tot_len - PKTSHIFT; - skb->data_len = skb->len; - skb->truesize += skb->data_len; - skb->ip_summed = CHECKSUM_UNNECESSARY; - skb_record_rx_queue(skb, rxq->rspq.idx); - - if (pkt->vlan_ex) - __vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan)); - ret = napi_gro_frags(&rxq->rspq.napi); - - if (ret == GRO_HELD) - rxq->stats.lro_pkts++; - else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE) - rxq->stats.lro_merged++; - rxq->stats.pkts++; - rxq->stats.rx_cso++; -} - -/** - * t4vf_ethrx_handler - process an ingress ethernet packet - * @rspq: the response queue that received the packet - * @rsp: the response queue descriptor holding the RX_PKT message - * @gl: the gather list of packet fragments - * - * Process an ingress ethernet packet and deliver it to the stack. - */ -int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, - const struct pkt_gl *gl) -{ - struct sk_buff *skb; - const struct cpl_rx_pkt *pkt = (void *)&rsp[1]; - bool csum_ok = pkt->csum_calc && !pkt->err_vec; - struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); - - /* - * If this is a good TCP packet and we have Generic Receive Offload - * enabled, handle the packet in the GRO path. - */ - if ((pkt->l2info & cpu_to_be32(RXF_TCP)) && - (rspq->netdev->features & NETIF_F_GRO) && csum_ok && - !pkt->ip_frag) { - do_gro(rxq, gl, pkt); - return 0; - } - - /* - * Convert the Packet Gather List into an skb. - */ - skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN); - if (unlikely(!skb)) { - t4vf_pktgl_free(gl); - rxq->stats.rx_drops++; - return 0; - } - __skb_pull(skb, PKTSHIFT); - skb->protocol = eth_type_trans(skb, rspq->netdev); - skb_record_rx_queue(skb, rspq->idx); - rxq->stats.pkts++; - - if (csum_ok && (rspq->netdev->features & NETIF_F_RXCSUM) && - !pkt->err_vec && (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) { - if (!pkt->ip_frag) - skb->ip_summed = CHECKSUM_UNNECESSARY; - else { - __sum16 c = (__force __sum16)pkt->csum; - skb->csum = csum_unfold(c); - skb->ip_summed = CHECKSUM_COMPLETE; - } - rxq->stats.rx_cso++; - } else - skb_checksum_none_assert(skb); - - if (pkt->vlan_ex) { - rxq->stats.vlan_ex++; - __vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan)); - } - - netif_receive_skb(skb); - - return 0; -} - -/** - * is_new_response - check if a response is newly written - * @rc: the response control descriptor - * @rspq: the response queue - * - * Returns true if a response descriptor contains a yet unprocessed - * response. - */ -static inline bool is_new_response(const struct rsp_ctrl *rc, - const struct sge_rspq *rspq) -{ - return RSPD_GEN(rc->type_gen) == rspq->gen; -} - -/** - * restore_rx_bufs - put back a packet's RX buffers - * @gl: the packet gather list - * @fl: the SGE Free List - * @nfrags: how many fragments in @si - * - * Called when we find out that the current packet, @si, can't be - * processed right away for some reason. This is a very rare event and - * there's no effort to make this suspension/resumption process - * particularly efficient. - * - * We implement the suspension by putting all of the RX buffers associated - * with the current packet back on the original Free List. The buffers - * have already been unmapped and are left unmapped, we mark them as - * unmapped in order to prevent further unmapping attempts. (Effectively - * this function undoes the series of @unmap_rx_buf calls which were done - * to create the current packet's gather list.) This leaves us ready to - * restart processing of the packet the next time we start processing the - * RX Queue ... - */ -static void restore_rx_bufs(const struct pkt_gl *gl, struct sge_fl *fl, - int frags) -{ - struct rx_sw_desc *sdesc; - - while (frags--) { - if (fl->cidx == 0) - fl->cidx = fl->size - 1; - else - fl->cidx--; - sdesc = &fl->sdesc[fl->cidx]; - sdesc->page = gl->frags[frags].page; - sdesc->dma_addr |= RX_UNMAPPED_BUF; - fl->avail++; - } -} - -/** - * rspq_next - advance to the next entry in a response queue - * @rspq: the queue - * - * Updates the state of a response queue to advance it to the next entry. - */ -static inline void rspq_next(struct sge_rspq *rspq) -{ - rspq->cur_desc = (void *)rspq->cur_desc + rspq->iqe_len; - if (unlikely(++rspq->cidx == rspq->size)) { - rspq->cidx = 0; - rspq->gen ^= 1; - rspq->cur_desc = rspq->desc; - } -} - -/** - * process_responses - process responses from an SGE response queue - * @rspq: the ingress response queue to process - * @budget: how many responses can be processed in this round - * - * Process responses from a Scatter Gather Engine response queue up to - * the supplied budget. Responses include received packets as well as - * control messages from firmware or hardware. - * - * Additionally choose the interrupt holdoff time for the next interrupt - * on this queue. If the system is under memory shortage use a fairly - * long delay to help recovery. - */ -int process_responses(struct sge_rspq *rspq, int budget) -{ - struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); - int budget_left = budget; - - while (likely(budget_left)) { - int ret, rsp_type; - const struct rsp_ctrl *rc; - - rc = (void *)rspq->cur_desc + (rspq->iqe_len - sizeof(*rc)); - if (!is_new_response(rc, rspq)) - break; - - /* - * Figure out what kind of response we've received from the - * SGE. - */ - rmb(); - rsp_type = RSPD_TYPE(rc->type_gen); - if (likely(rsp_type == RSP_TYPE_FLBUF)) { - skb_frag_t *fp; - struct pkt_gl gl; - const struct rx_sw_desc *sdesc; - u32 bufsz, frag; - u32 len = be32_to_cpu(rc->pldbuflen_qid); - - /* - * If we get a "new buffer" message from the SGE we - * need to move on to the next Free List buffer. - */ - if (len & RSPD_NEWBUF) { - /* - * We get one "new buffer" message when we - * first start up a queue so we need to ignore - * it when our offset into the buffer is 0. - */ - if (likely(rspq->offset > 0)) { - free_rx_bufs(rspq->adapter, &rxq->fl, - 1); - rspq->offset = 0; - } - len = RSPD_LEN(len); - } - gl.tot_len = len; - - /* - * Gather packet fragments. - */ - for (frag = 0, fp = gl.frags; /**/; frag++, fp++) { - BUG_ON(frag >= MAX_SKB_FRAGS); - BUG_ON(rxq->fl.avail == 0); - sdesc = &rxq->fl.sdesc[rxq->fl.cidx]; - bufsz = get_buf_size(sdesc); - fp->page = sdesc->page; - fp->page_offset = rspq->offset; - fp->size = min(bufsz, len); - len -= fp->size; - if (!len) - break; - unmap_rx_buf(rspq->adapter, &rxq->fl); - } - gl.nfrags = frag+1; - - /* - * Last buffer remains mapped so explicitly make it - * coherent for CPU access and start preloading first - * cache line ... - */ - dma_sync_single_for_cpu(rspq->adapter->pdev_dev, - get_buf_addr(sdesc), - fp->size, DMA_FROM_DEVICE); - gl.va = (page_address(gl.frags[0].page) + - gl.frags[0].page_offset); - prefetch(gl.va); - - /* - * Hand the new ingress packet to the handler for - * this Response Queue. - */ - ret = rspq->handler(rspq, rspq->cur_desc, &gl); - if (likely(ret == 0)) - rspq->offset += ALIGN(fp->size, FL_ALIGN); - else - restore_rx_bufs(&gl, &rxq->fl, frag); - } else if (likely(rsp_type == RSP_TYPE_CPL)) { - ret = rspq->handler(rspq, rspq->cur_desc, NULL); - } else { - WARN_ON(rsp_type > RSP_TYPE_CPL); - ret = 0; - } - - if (unlikely(ret)) { - /* - * Couldn't process descriptor, back off for recovery. - * We use the SGE's last timer which has the longest - * interrupt coalescing value ... - */ - const int NOMEM_TIMER_IDX = SGE_NTIMERS-1; - rspq->next_intr_params = - QINTR_TIMER_IDX(NOMEM_TIMER_IDX); - break; - } - - rspq_next(rspq); - budget_left--; - } - - /* - * If this is a Response Queue with an associated Free List and - * at least two Egress Queue units available in the Free List - * for new buffer pointers, refill the Free List. - */ - if (rspq->offset >= 0 && - rxq->fl.size - rxq->fl.avail >= 2*FL_PER_EQ_UNIT) - __refill_fl(rspq->adapter, &rxq->fl); - return budget - budget_left; -} - -/** - * napi_rx_handler - the NAPI handler for RX processing - * @napi: the napi instance - * @budget: how many packets we can process in this round - * - * Handler for new data events when using NAPI. This does not need any - * locking or protection from interrupts as data interrupts are off at - * this point and other adapter interrupts do not interfere (the latter - * in not a concern at all with MSI-X as non-data interrupts then have - * a separate handler). - */ -static int napi_rx_handler(struct napi_struct *napi, int budget) -{ - unsigned int intr_params; - struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi); - int work_done = process_responses(rspq, budget); - - if (likely(work_done < budget)) { - napi_complete(napi); - intr_params = rspq->next_intr_params; - rspq->next_intr_params = rspq->intr_params; - } else - intr_params = QINTR_TIMER_IDX(SGE_TIMER_UPD_CIDX); - - if (unlikely(work_done == 0)) - rspq->unhandled_irqs++; - - t4_write_reg(rspq->adapter, - T4VF_SGE_BASE_ADDR + SGE_VF_GTS, - CIDXINC(work_done) | - INGRESSQID((u32)rspq->cntxt_id) | - SEINTARM(intr_params)); - return work_done; -} - -/* - * The MSI-X interrupt handler for an SGE response queue for the NAPI case - * (i.e., response queue serviced by NAPI polling). - */ -irqreturn_t t4vf_sge_intr_msix(int irq, void *cookie) -{ - struct sge_rspq *rspq = cookie; - - napi_schedule(&rspq->napi); - return IRQ_HANDLED; -} - -/* - * Process the indirect interrupt entries in the interrupt queue and kick off - * NAPI for each queue that has generated an entry. - */ -static unsigned int process_intrq(struct adapter *adapter) -{ - struct sge *s = &adapter->sge; - struct sge_rspq *intrq = &s->intrq; - unsigned int work_done; - - spin_lock(&adapter->sge.intrq_lock); - for (work_done = 0; ; work_done++) { - const struct rsp_ctrl *rc; - unsigned int qid, iq_idx; - struct sge_rspq *rspq; - - /* - * Grab the next response from the interrupt queue and bail - * out if it's not a new response. - */ - rc = (void *)intrq->cur_desc + (intrq->iqe_len - sizeof(*rc)); - if (!is_new_response(rc, intrq)) - break; - - /* - * If the response isn't a forwarded interrupt message issue a - * error and go on to the next response message. This should - * never happen ... - */ - rmb(); - if (unlikely(RSPD_TYPE(rc->type_gen) != RSP_TYPE_INTR)) { - dev_err(adapter->pdev_dev, - "Unexpected INTRQ response type %d\n", - RSPD_TYPE(rc->type_gen)); - continue; - } - - /* - * Extract the Queue ID from the interrupt message and perform - * sanity checking to make sure it really refers to one of our - * Ingress Queues which is active and matches the queue's ID. - * None of these error conditions should ever happen so we may - * want to either make them fatal and/or conditionalized under - * DEBUG. - */ - qid = RSPD_QID(be32_to_cpu(rc->pldbuflen_qid)); - iq_idx = IQ_IDX(s, qid); - if (unlikely(iq_idx >= MAX_INGQ)) { - dev_err(adapter->pdev_dev, - "Ingress QID %d out of range\n", qid); - continue; - } - rspq = s->ingr_map[iq_idx]; - if (unlikely(rspq == NULL)) { - dev_err(adapter->pdev_dev, - "Ingress QID %d RSPQ=NULL\n", qid); - continue; - } - if (unlikely(rspq->abs_id != qid)) { - dev_err(adapter->pdev_dev, - "Ingress QID %d refers to RSPQ %d\n", - qid, rspq->abs_id); - continue; - } - - /* - * Schedule NAPI processing on the indicated Response Queue - * and move on to the next entry in the Forwarded Interrupt - * Queue. - */ - napi_schedule(&rspq->napi); - rspq_next(intrq); - } - - t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, - CIDXINC(work_done) | - INGRESSQID(intrq->cntxt_id) | - SEINTARM(intrq->intr_params)); - - spin_unlock(&adapter->sge.intrq_lock); - - return work_done; -} - -/* - * The MSI interrupt handler handles data events from SGE response queues as - * well as error and other async events as they all use the same MSI vector. - */ -irqreturn_t t4vf_intr_msi(int irq, void *cookie) -{ - struct adapter *adapter = cookie; - - process_intrq(adapter); - return IRQ_HANDLED; -} - -/** - * t4vf_intr_handler - select the top-level interrupt handler - * @adapter: the adapter - * - * Selects the top-level interrupt handler based on the type of interrupts - * (MSI-X or MSI). - */ -irq_handler_t t4vf_intr_handler(struct adapter *adapter) -{ - BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0); - if (adapter->flags & USING_MSIX) - return t4vf_sge_intr_msix; - else - return t4vf_intr_msi; -} - -/** - * sge_rx_timer_cb - perform periodic maintenance of SGE RX queues - * @data: the adapter - * - * Runs periodically from a timer to perform maintenance of SGE RX queues. - * - * a) Replenishes RX queues that have run out due to memory shortage. - * Normally new RX buffers are added when existing ones are consumed but - * when out of memory a queue can become empty. We schedule NAPI to do - * the actual refill. - */ -static void sge_rx_timer_cb(unsigned long data) -{ - struct adapter *adapter = (struct adapter *)data; - struct sge *s = &adapter->sge; - unsigned int i; - - /* - * Scan the "Starving Free Lists" flag array looking for any Free - * Lists in need of more free buffers. If we find one and it's not - * being actively polled, then bump its "starving" counter and attempt - * to refill it. If we're successful in adding enough buffers to push - * the Free List over the starving threshold, then we can clear its - * "starving" status. - */ - for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) { - unsigned long m; - - for (m = s->starving_fl[i]; m; m &= m - 1) { - unsigned int id = __ffs(m) + i * BITS_PER_LONG; - struct sge_fl *fl = s->egr_map[id]; - - clear_bit(id, s->starving_fl); - smp_mb__after_clear_bit(); - - /* - * Since we are accessing fl without a lock there's a - * small probability of a false positive where we - * schedule napi but the FL is no longer starving. - * No biggie. - */ - if (fl_starving(fl)) { - struct sge_eth_rxq *rxq; - - rxq = container_of(fl, struct sge_eth_rxq, fl); - if (napi_reschedule(&rxq->rspq.napi)) - fl->starving++; - else - set_bit(id, s->starving_fl); - } - } - } - - /* - * Reschedule the next scan for starving Free Lists ... - */ - mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); -} - -/** - * sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues - * @data: the adapter - * - * Runs periodically from a timer to perform maintenance of SGE TX queues. - * - * b) Reclaims completed Tx packets for the Ethernet queues. Normally - * packets are cleaned up by new Tx packets, this timer cleans up packets - * when no new packets are being submitted. This is essential for pktgen, - * at least. - */ -static void sge_tx_timer_cb(unsigned long data) -{ - struct adapter *adapter = (struct adapter *)data; - struct sge *s = &adapter->sge; - unsigned int i, budget; - - budget = MAX_TIMER_TX_RECLAIM; - i = s->ethtxq_rover; - do { - struct sge_eth_txq *txq = &s->ethtxq[i]; - - if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) { - int avail = reclaimable(&txq->q); - - if (avail > budget) - avail = budget; - - free_tx_desc(adapter, &txq->q, avail, true); - txq->q.in_use -= avail; - __netif_tx_unlock(txq->txq); - - budget -= avail; - if (!budget) - break; - } - - i++; - if (i >= s->ethqsets) - i = 0; - } while (i != s->ethtxq_rover); - s->ethtxq_rover = i; - - /* - * If we found too many reclaimable packets schedule a timer in the - * near future to continue where we left off. Otherwise the next timer - * will be at its normal interval. - */ - mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2)); -} - -/** - * t4vf_sge_alloc_rxq - allocate an SGE RX Queue - * @adapter: the adapter - * @rspq: pointer to to the new rxq's Response Queue to be filled in - * @iqasynch: if 0, a normal rspq; if 1, an asynchronous event queue - * @dev: the network device associated with the new rspq - * @intr_dest: MSI-X vector index (overriden in MSI mode) - * @fl: pointer to the new rxq's Free List to be filled in - * @hnd: the interrupt handler to invoke for the rspq - */ -int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, - bool iqasynch, struct net_device *dev, - int intr_dest, - struct sge_fl *fl, rspq_handler_t hnd) -{ - struct port_info *pi = netdev_priv(dev); - struct fw_iq_cmd cmd, rpl; - int ret, iqandst, flsz = 0; - - /* - * If we're using MSI interrupts and we're not initializing the - * Forwarded Interrupt Queue itself, then set up this queue for - * indirect interrupts to the Forwarded Interrupt Queue. Obviously - * the Forwarded Interrupt Queue must be set up before any other - * ingress queue ... - */ - if ((adapter->flags & USING_MSI) && rspq != &adapter->sge.intrq) { - iqandst = SGE_INTRDST_IQ; - intr_dest = adapter->sge.intrq.abs_id; - } else - iqandst = SGE_INTRDST_PCI; - - /* - * Allocate the hardware ring for the Response Queue. The size needs - * to be a multiple of 16 which includes the mandatory status entry - * (regardless of whether the Status Page capabilities are enabled or - * not). - */ - rspq->size = roundup(rspq->size, 16); - rspq->desc = alloc_ring(adapter->pdev_dev, rspq->size, rspq->iqe_len, - 0, &rspq->phys_addr, NULL, 0); - if (!rspq->desc) - return -ENOMEM; - - /* - * Fill in the Ingress Queue Command. Note: Ideally this code would - * be in t4vf_hw.c but there are so many parameters and dependencies - * on our Linux SGE state that we would end up having to pass tons of - * parameters. We'll have to think about how this might be migrated - * into OS-independent common code ... - */ - memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_IQ_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_CMD_EXEC); - cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC | - FW_IQ_CMD_IQSTART(1) | - FW_LEN16(cmd)); - cmd.type_to_iqandstindex = - cpu_to_be32(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | - FW_IQ_CMD_IQASYNCH(iqasynch) | - FW_IQ_CMD_VIID(pi->viid) | - FW_IQ_CMD_IQANDST(iqandst) | - FW_IQ_CMD_IQANUS(1) | - FW_IQ_CMD_IQANUD(SGE_UPDATEDEL_INTR) | - FW_IQ_CMD_IQANDSTINDEX(intr_dest)); - cmd.iqdroprss_to_iqesize = - cpu_to_be16(FW_IQ_CMD_IQPCIECH(pi->port_id) | - FW_IQ_CMD_IQGTSMODE | - FW_IQ_CMD_IQINTCNTTHRESH(rspq->pktcnt_idx) | - FW_IQ_CMD_IQESIZE(ilog2(rspq->iqe_len) - 4)); - cmd.iqsize = cpu_to_be16(rspq->size); - cmd.iqaddr = cpu_to_be64(rspq->phys_addr); - - if (fl) { - /* - * Allocate the ring for the hardware free list (with space - * for its status page) along with the associated software - * descriptor ring. The free list size needs to be a multiple - * of the Egress Queue Unit. - */ - fl->size = roundup(fl->size, FL_PER_EQ_UNIT); - fl->desc = alloc_ring(adapter->pdev_dev, fl->size, - sizeof(__be64), sizeof(struct rx_sw_desc), - &fl->addr, &fl->sdesc, STAT_LEN); - if (!fl->desc) { - ret = -ENOMEM; - goto err; - } - - /* - * Calculate the size of the hardware free list ring plus - * Status Page (which the SGE will place after the end of the - * free list ring) in Egress Queue Units. - */ - flsz = (fl->size / FL_PER_EQ_UNIT + - STAT_LEN / EQ_UNIT); - - /* - * Fill in all the relevant firmware Ingress Queue Command - * fields for the free list. - */ - cmd.iqns_to_fl0congen = - cpu_to_be32( - FW_IQ_CMD_FL0HOSTFCMODE(SGE_HOSTFCMODE_NONE) | - FW_IQ_CMD_FL0PACKEN | - FW_IQ_CMD_FL0PADEN); - cmd.fl0dcaen_to_fl0cidxfthresh = - cpu_to_be16( - FW_IQ_CMD_FL0FBMIN(SGE_FETCHBURSTMIN_64B) | - FW_IQ_CMD_FL0FBMAX(SGE_FETCHBURSTMAX_512B)); - cmd.fl0size = cpu_to_be16(flsz); - cmd.fl0addr = cpu_to_be64(fl->addr); - } - - /* - * Issue the firmware Ingress Queue Command and extract the results if - * it completes successfully. - */ - ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); - if (ret) - goto err; - - netif_napi_add(dev, &rspq->napi, napi_rx_handler, 64); - rspq->cur_desc = rspq->desc; - rspq->cidx = 0; - rspq->gen = 1; - rspq->next_intr_params = rspq->intr_params; - rspq->cntxt_id = be16_to_cpu(rpl.iqid); - rspq->abs_id = be16_to_cpu(rpl.physiqid); - rspq->size--; /* subtract status entry */ - rspq->adapter = adapter; - rspq->netdev = dev; - rspq->handler = hnd; - - /* set offset to -1 to distinguish ingress queues without FL */ - rspq->offset = fl ? 0 : -1; - - if (fl) { - fl->cntxt_id = be16_to_cpu(rpl.fl0id); - fl->avail = 0; - fl->pend_cred = 0; - fl->pidx = 0; - fl->cidx = 0; - fl->alloc_failed = 0; - fl->large_alloc_failed = 0; - fl->starving = 0; - refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL); - } - - return 0; - -err: - /* - * An error occurred. Clean up our partial allocation state and - * return the error. - */ - if (rspq->desc) { - dma_free_coherent(adapter->pdev_dev, rspq->size * rspq->iqe_len, - rspq->desc, rspq->phys_addr); - rspq->desc = NULL; - } - if (fl && fl->desc) { - kfree(fl->sdesc); - fl->sdesc = NULL; - dma_free_coherent(adapter->pdev_dev, flsz * EQ_UNIT, - fl->desc, fl->addr); - fl->desc = NULL; - } - return ret; -} - -/** - * t4vf_sge_alloc_eth_txq - allocate an SGE Ethernet TX Queue - * @adapter: the adapter - * @txq: pointer to the new txq to be filled in - * @devq: the network TX queue associated with the new txq - * @iqid: the relative ingress queue ID to which events relating to - * the new txq should be directed - */ -int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, - struct net_device *dev, struct netdev_queue *devq, - unsigned int iqid) -{ - int ret, nentries; - struct fw_eq_eth_cmd cmd, rpl; - struct port_info *pi = netdev_priv(dev); - - /* - * Calculate the size of the hardware TX Queue (including the Status - * Page on the end of the TX Queue) in units of TX Descriptors. - */ - nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); - - /* - * Allocate the hardware ring for the TX ring (with space for its - * status page) along with the associated software descriptor ring. - */ - txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size, - sizeof(struct tx_desc), - sizeof(struct tx_sw_desc), - &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN); - if (!txq->q.desc) - return -ENOMEM; - - /* - * Fill in the Egress Queue Command. Note: As with the direct use of - * the firmware Ingress Queue COmmand above in our RXQ allocation - * routine, ideally, this code would be in t4vf_hw.c. Again, we'll - * have to see if there's some reasonable way to parameterize it - * into the common code ... - */ - memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_EQ_ETH_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_CMD_EXEC); - cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC | - FW_EQ_ETH_CMD_EQSTART | - FW_LEN16(cmd)); - cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_VIID(pi->viid)); - cmd.fetchszm_to_iqid = - cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE(SGE_HOSTFCMODE_STPG) | - FW_EQ_ETH_CMD_PCIECHN(pi->port_id) | - FW_EQ_ETH_CMD_IQID(iqid)); - cmd.dcaen_to_eqsize = - cpu_to_be32(FW_EQ_ETH_CMD_FBMIN(SGE_FETCHBURSTMIN_64B) | - FW_EQ_ETH_CMD_FBMAX(SGE_FETCHBURSTMAX_512B) | - FW_EQ_ETH_CMD_CIDXFTHRESH(SGE_CIDXFLUSHTHRESH_32) | - FW_EQ_ETH_CMD_EQSIZE(nentries)); - cmd.eqaddr = cpu_to_be64(txq->q.phys_addr); - - /* - * Issue the firmware Egress Queue Command and extract the results if - * it completes successfully. - */ - ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); - if (ret) { - /* - * The girmware Ingress Queue Command failed for some reason. - * Free up our partial allocation state and return the error. - */ - kfree(txq->q.sdesc); - txq->q.sdesc = NULL; - dma_free_coherent(adapter->pdev_dev, - nentries * sizeof(struct tx_desc), - txq->q.desc, txq->q.phys_addr); - txq->q.desc = NULL; - return ret; - } - - txq->q.in_use = 0; - txq->q.cidx = 0; - txq->q.pidx = 0; - txq->q.stat = (void *)&txq->q.desc[txq->q.size]; - txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_GET(be32_to_cpu(rpl.eqid_pkd)); - txq->q.abs_id = - FW_EQ_ETH_CMD_PHYSEQID_GET(be32_to_cpu(rpl.physeqid_pkd)); - txq->txq = devq; - txq->tso = 0; - txq->tx_cso = 0; - txq->vlan_ins = 0; - txq->q.stops = 0; - txq->q.restarts = 0; - txq->mapping_err = 0; - return 0; -} - -/* - * Free the DMA map resources associated with a TX queue. - */ -static void free_txq(struct adapter *adapter, struct sge_txq *tq) -{ - dma_free_coherent(adapter->pdev_dev, - tq->size * sizeof(*tq->desc) + STAT_LEN, - tq->desc, tq->phys_addr); - tq->cntxt_id = 0; - tq->sdesc = NULL; - tq->desc = NULL; -} - -/* - * Free the resources associated with a response queue (possibly including a - * free list). - */ -static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq, - struct sge_fl *fl) -{ - unsigned int flid = fl ? fl->cntxt_id : 0xffff; - - t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP, - rspq->cntxt_id, flid, 0xffff); - dma_free_coherent(adapter->pdev_dev, (rspq->size + 1) * rspq->iqe_len, - rspq->desc, rspq->phys_addr); - netif_napi_del(&rspq->napi); - rspq->netdev = NULL; - rspq->cntxt_id = 0; - rspq->abs_id = 0; - rspq->desc = NULL; - - if (fl) { - free_rx_bufs(adapter, fl, fl->avail); - dma_free_coherent(adapter->pdev_dev, - fl->size * sizeof(*fl->desc) + STAT_LEN, - fl->desc, fl->addr); - kfree(fl->sdesc); - fl->sdesc = NULL; - fl->cntxt_id = 0; - fl->desc = NULL; - } -} - -/** - * t4vf_free_sge_resources - free SGE resources - * @adapter: the adapter - * - * Frees resources used by the SGE queue sets. - */ -void t4vf_free_sge_resources(struct adapter *adapter) -{ - struct sge *s = &adapter->sge; - struct sge_eth_rxq *rxq = s->ethrxq; - struct sge_eth_txq *txq = s->ethtxq; - struct sge_rspq *evtq = &s->fw_evtq; - struct sge_rspq *intrq = &s->intrq; - int qs; - - for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) { - if (rxq->rspq.desc) - free_rspq_fl(adapter, &rxq->rspq, &rxq->fl); - if (txq->q.desc) { - t4vf_eth_eq_free(adapter, txq->q.cntxt_id); - free_tx_desc(adapter, &txq->q, txq->q.in_use, true); - kfree(txq->q.sdesc); - free_txq(adapter, &txq->q); - } - } - if (evtq->desc) - free_rspq_fl(adapter, evtq, NULL); - if (intrq->desc) - free_rspq_fl(adapter, intrq, NULL); -} - -/** - * t4vf_sge_start - enable SGE operation - * @adapter: the adapter - * - * Start tasklets and timers associated with the DMA engine. - */ -void t4vf_sge_start(struct adapter *adapter) -{ - adapter->sge.ethtxq_rover = 0; - mod_timer(&adapter->sge.rx_timer, jiffies + RX_QCHECK_PERIOD); - mod_timer(&adapter->sge.tx_timer, jiffies + TX_QCHECK_PERIOD); -} - -/** - * t4vf_sge_stop - disable SGE operation - * @adapter: the adapter - * - * Stop tasklets and timers associated with the DMA engine. Note that - * this is effective only if measures have been taken to disable any HW - * events that may restart them. - */ -void t4vf_sge_stop(struct adapter *adapter) -{ - struct sge *s = &adapter->sge; - - if (s->rx_timer.function) - del_timer_sync(&s->rx_timer); - if (s->tx_timer.function) - del_timer_sync(&s->tx_timer); -} - -/** - * t4vf_sge_init - initialize SGE - * @adapter: the adapter - * - * Performs SGE initialization needed every time after a chip reset. - * We do not initialize any of the queue sets here, instead the driver - * top-level must request those individually. We also do not enable DMA - * here, that should be done after the queues have been set up. - */ -int t4vf_sge_init(struct adapter *adapter) -{ - struct sge_params *sge_params = &adapter->params.sge; - u32 fl0 = sge_params->sge_fl_buffer_size[0]; - u32 fl1 = sge_params->sge_fl_buffer_size[1]; - struct sge *s = &adapter->sge; - - /* - * Start by vetting the basic SGE parameters which have been set up by - * the Physical Function Driver. Ideally we should be able to deal - * with _any_ configuration. Practice is different ... - */ - if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) { - dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n", - fl0, fl1); - return -EINVAL; - } - if ((sge_params->sge_control & RXPKTCPLMODE) == 0) { - dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n"); - return -EINVAL; - } - - /* - * Now translate the adapter parameters into our internal forms. - */ - if (fl1) - FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT; - STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE) ? 128 : 64); - PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control); - FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) + - SGE_INGPADBOUNDARY_SHIFT); - - /* - * Set up tasklet timers. - */ - setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adapter); - setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adapter); - - /* - * Initialize Forwarded Interrupt Queue lock. - */ - spin_lock_init(&s->intrq_lock); - - return 0; -} diff --git a/drivers/net/cxgb4vf/t4vf_common.h b/drivers/net/cxgb4vf/t4vf_common.h deleted file mode 100644 index a65c80ae..0000000 --- a/drivers/net/cxgb4vf/t4vf_common.h +++ /dev/null @@ -1,274 +0,0 @@ -/* - * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet - * driver for Linux. - * - * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __T4VF_COMMON_H__ -#define __T4VF_COMMON_H__ - -#include "../cxgb4/t4fw_api.h" - -/* - * The "len16" field of a Firmware Command Structure ... - */ -#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16) - -/* - * Per-VF statistics. - */ -struct t4vf_port_stats { - /* - * TX statistics. - */ - u64 tx_bcast_bytes; /* broadcast */ - u64 tx_bcast_frames; - u64 tx_mcast_bytes; /* multicast */ - u64 tx_mcast_frames; - u64 tx_ucast_bytes; /* unicast */ - u64 tx_ucast_frames; - u64 tx_drop_frames; /* TX dropped frames */ - u64 tx_offload_bytes; /* offload */ - u64 tx_offload_frames; - - /* - * RX statistics. - */ - u64 rx_bcast_bytes; /* broadcast */ - u64 rx_bcast_frames; - u64 rx_mcast_bytes; /* multicast */ - u64 rx_mcast_frames; - u64 rx_ucast_bytes; - u64 rx_ucast_frames; /* unicast */ - - u64 rx_err_frames; /* RX error frames */ -}; - -/* - * Per-"port" (Virtual Interface) link configuration ... - */ -struct link_config { - unsigned int supported; /* link capabilities */ - unsigned int advertising; /* advertised capabilities */ - unsigned short requested_speed; /* speed user has requested */ - unsigned short speed; /* actual link speed */ - unsigned char requested_fc; /* flow control user has requested */ - unsigned char fc; /* actual link flow control */ - unsigned char autoneg; /* autonegotiating? */ - unsigned char link_ok; /* link up? */ -}; - -enum { - PAUSE_RX = 1 << 0, - PAUSE_TX = 1 << 1, - PAUSE_AUTONEG = 1 << 2 -}; - -/* - * General device parameters ... - */ -struct dev_params { - u32 fwrev; /* firmware version */ - u32 tprev; /* TP Microcode Version */ -}; - -/* - * Scatter Gather Engine parameters. These are almost all determined by the - * Physical Function Driver. We just need to grab them to see within which - * environment we're playing ... - */ -struct sge_params { - u32 sge_control; /* padding, boundaries, lengths, etc. */ - u32 sge_host_page_size; /* RDMA page sizes */ - u32 sge_queues_per_page; /* RDMA queues/page */ - u32 sge_user_mode_limits; /* limits for BAR2 user mode accesses */ - u32 sge_fl_buffer_size[16]; /* free list buffer sizes */ - u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */ - u32 sge_timer_value_0_and_1; /* interrupt coalescing timer values */ - u32 sge_timer_value_2_and_3; - u32 sge_timer_value_4_and_5; -}; - -/* - * Vital Product Data parameters. - */ -struct vpd_params { - u32 cclk; /* Core Clock (KHz) */ -}; - -/* - * Global Receive Side Scaling (RSS) parameters in host-native format. - */ -struct rss_params { - unsigned int mode; /* RSS mode */ - union { - struct { - unsigned int synmapen:1; /* SYN Map Enable */ - unsigned int syn4tupenipv6:1; /* enable hashing 4-tuple IPv6 SYNs */ - unsigned int syn2tupenipv6:1; /* enable hashing 2-tuple IPv6 SYNs */ - unsigned int syn4tupenipv4:1; /* enable hashing 4-tuple IPv4 SYNs */ - unsigned int syn2tupenipv4:1; /* enable hashing 2-tuple IPv4 SYNs */ - unsigned int ofdmapen:1; /* Offload Map Enable */ - unsigned int tnlmapen:1; /* Tunnel Map Enable */ - unsigned int tnlalllookup:1; /* Tunnel All Lookup */ - unsigned int hashtoeplitz:1; /* use Toeplitz hash */ - } basicvirtual; - } u; -}; - -/* - * Virtual Interface RSS Configuration in host-native format. - */ -union rss_vi_config { - struct { - u16 defaultq; /* Ingress Queue ID for !tnlalllookup */ - unsigned int ip6fourtupen:1; /* hash 4-tuple IPv6 ingress packets */ - unsigned int ip6twotupen:1; /* hash 2-tuple IPv6 ingress packets */ - unsigned int ip4fourtupen:1; /* hash 4-tuple IPv4 ingress packets */ - unsigned int ip4twotupen:1; /* hash 2-tuple IPv4 ingress packets */ - int udpen; /* hash 4-tuple UDP ingress packets */ - } basicvirtual; -}; - -/* - * Maximum resources provisioned for a PCI VF. - */ -struct vf_resources { - unsigned int nvi; /* N virtual interfaces */ - unsigned int neq; /* N egress Qs */ - unsigned int nethctrl; /* N egress ETH or CTRL Qs */ - unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */ - unsigned int niq; /* N ingress Qs */ - unsigned int tc; /* PCI-E traffic class */ - unsigned int pmask; /* port access rights mask */ - unsigned int nexactf; /* N exact MPS filters */ - unsigned int r_caps; /* read capabilities */ - unsigned int wx_caps; /* write/execute capabilities */ -}; - -/* - * Per-"adapter" (Virtual Function) parameters. - */ -struct adapter_params { - struct dev_params dev; /* general device parameters */ - struct sge_params sge; /* Scatter Gather Engine */ - struct vpd_params vpd; /* Vital Product Data */ - struct rss_params rss; /* Receive Side Scaling */ - struct vf_resources vfres; /* Virtual Function Resource limits */ - u8 nports; /* # of Ethernet "ports" */ -}; - -#include "adapter.h" - -#ifndef PCI_VENDOR_ID_CHELSIO -# define PCI_VENDOR_ID_CHELSIO 0x1425 -#endif - -#define for_each_port(adapter, iter) \ - for (iter = 0; iter < (adapter)->params.nports; iter++) - -static inline bool is_10g_port(const struct link_config *lc) -{ - return (lc->supported & SUPPORTED_10000baseT_Full) != 0; -} - -static inline unsigned int core_ticks_per_usec(const struct adapter *adapter) -{ - return adapter->params.vpd.cclk / 1000; -} - -static inline unsigned int us_to_core_ticks(const struct adapter *adapter, - unsigned int us) -{ - return (us * adapter->params.vpd.cclk) / 1000; -} - -static inline unsigned int core_ticks_to_us(const struct adapter *adapter, - unsigned int ticks) -{ - return (ticks * 1000) / adapter->params.vpd.cclk; -} - -int t4vf_wr_mbox_core(struct adapter *, const void *, int, void *, bool); - -static inline int t4vf_wr_mbox(struct adapter *adapter, const void *cmd, - int size, void *rpl) -{ - return t4vf_wr_mbox_core(adapter, cmd, size, rpl, true); -} - -static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd, - int size, void *rpl) -{ - return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false); -} - -int __devinit t4vf_wait_dev_ready(struct adapter *); -int __devinit t4vf_port_init(struct adapter *, int); - -int t4vf_fw_reset(struct adapter *); -int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *); -int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *); - -int t4vf_get_sge_params(struct adapter *); -int t4vf_get_vpd_params(struct adapter *); -int t4vf_get_dev_params(struct adapter *); -int t4vf_get_rss_glb_config(struct adapter *); -int t4vf_get_vfres(struct adapter *); - -int t4vf_read_rss_vi_config(struct adapter *, unsigned int, - union rss_vi_config *); -int t4vf_write_rss_vi_config(struct adapter *, unsigned int, - union rss_vi_config *); -int t4vf_config_rss_range(struct adapter *, unsigned int, int, int, - const u16 *, int); - -int t4vf_alloc_vi(struct adapter *, int); -int t4vf_free_vi(struct adapter *, int); -int t4vf_enable_vi(struct adapter *, unsigned int, bool, bool); -int t4vf_identify_port(struct adapter *, unsigned int, unsigned int); - -int t4vf_set_rxmode(struct adapter *, unsigned int, int, int, int, int, int, - bool); -int t4vf_alloc_mac_filt(struct adapter *, unsigned int, bool, unsigned int, - const u8 **, u16 *, u64 *, bool); -int t4vf_change_mac(struct adapter *, unsigned int, int, const u8 *, bool); -int t4vf_set_addr_hash(struct adapter *, unsigned int, bool, u64, bool); -int t4vf_get_port_stats(struct adapter *, int, struct t4vf_port_stats *); - -int t4vf_iq_free(struct adapter *, unsigned int, unsigned int, unsigned int, - unsigned int); -int t4vf_eth_eq_free(struct adapter *, unsigned int); - -int t4vf_handle_fw_rpl(struct adapter *, const __be64 *); - -#endif /* __T4VF_COMMON_H__ */ diff --git a/drivers/net/cxgb4vf/t4vf_defs.h b/drivers/net/cxgb4vf/t4vf_defs.h deleted file mode 100644 index c7b127d..0000000 --- a/drivers/net/cxgb4vf/t4vf_defs.h +++ /dev/null @@ -1,121 +0,0 @@ -/* - * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet - * driver for Linux. - * - * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef __T4VF_DEFS_H__ -#define __T4VF_DEFS_H__ - -#include "../cxgb4/t4_regs.h" - -/* - * The VF Register Map. - * - * The Scatter Gather Engine (SGE), Multiport Support module (MPS), PIO Local - * bus module (PL) and CPU Interface Module (CIM) components are mapped via - * the Slice to Module Map Table (see below) in the Physical Function Register - * Map. The Mail Box Data (MBDATA) range is mapped via the PCI-E Mailbox Base - * and Offset registers in the PF Register Map. The MBDATA base address is - * quite constrained as it determines the Mailbox Data addresses for both PFs - * and VFs, and therefore must fit in both the VF and PF Register Maps without - * overlapping other registers. - */ -#define T4VF_SGE_BASE_ADDR 0x0000 -#define T4VF_MPS_BASE_ADDR 0x0100 -#define T4VF_PL_BASE_ADDR 0x0200 -#define T4VF_MBDATA_BASE_ADDR 0x0240 -#define T4VF_CIM_BASE_ADDR 0x0300 - -#define T4VF_REGMAP_START 0x0000 -#define T4VF_REGMAP_SIZE 0x0400 - -/* - * There's no hardware limitation which requires that the addresses of the - * Mailbox Data in the fixed CIM PF map and the programmable VF map must - * match. However, it's a useful convention ... - */ -#if T4VF_MBDATA_BASE_ADDR != CIM_PF_MAILBOX_DATA -#error T4VF_MBDATA_BASE_ADDR must match CIM_PF_MAILBOX_DATA! -#endif - -/* - * Virtual Function "Slice to Module Map Table" definitions. - * - * This table allows us to map subsets of the various module register sets - * into the T4VF Register Map. Each table entry identifies the index of the - * module whose registers are being mapped, the offset within the module's - * register set that the mapping should start at, the limit of the mapping, - * and the offset within the T4VF Register Map to which the module's registers - * are being mapped. All addresses and qualtities are in terms of 32-bit - * words. The "limit" value is also in terms of 32-bit words and is equal to - * the last address mapped in the T4VF Register Map 1 (i.e. it's a "<=" - * relation rather than a "<"). - */ -#define T4VF_MOD_MAP(module, index, first, last) \ - T4VF_MOD_MAP_##module##_INDEX = (index), \ - T4VF_MOD_MAP_##module##_FIRST = (first), \ - T4VF_MOD_MAP_##module##_LAST = (last), \ - T4VF_MOD_MAP_##module##_OFFSET = ((first)/4), \ - T4VF_MOD_MAP_##module##_BASE = \ - (T4VF_##module##_BASE_ADDR/4 + (first)/4), \ - T4VF_MOD_MAP_##module##_LIMIT = \ - (T4VF_##module##_BASE_ADDR/4 + (last)/4), - -#define SGE_VF_KDOORBELL 0x0 -#define SGE_VF_GTS 0x4 -#define MPS_VF_CTL 0x0 -#define MPS_VF_STAT_RX_VF_ERR_FRAMES_H 0xfc -#define PL_VF_WHOAMI 0x0 -#define CIM_VF_EXT_MAILBOX_CTRL 0x0 -#define CIM_VF_EXT_MAILBOX_STATUS 0x4 - -enum { - T4VF_MOD_MAP(SGE, 2, SGE_VF_KDOORBELL, SGE_VF_GTS) - T4VF_MOD_MAP(MPS, 0, MPS_VF_CTL, MPS_VF_STAT_RX_VF_ERR_FRAMES_H) - T4VF_MOD_MAP(PL, 3, PL_VF_WHOAMI, PL_VF_WHOAMI) - T4VF_MOD_MAP(CIM, 1, CIM_VF_EXT_MAILBOX_CTRL, CIM_VF_EXT_MAILBOX_STATUS) -}; - -/* - * There isn't a Slice to Module Map Table entry for the Mailbox Data - * registers, but it's convenient to use similar names as above. There are 8 - * little-endian 64-bit Mailbox Data registers. Note that the "instances" - * value below is in terms of 32-bit words which matches the "word" addressing - * space we use above for the Slice to Module Map Space. - */ -#define NUM_CIM_VF_MAILBOX_DATA_INSTANCES 16 - -#define T4VF_MBDATA_FIRST 0 -#define T4VF_MBDATA_LAST ((NUM_CIM_VF_MAILBOX_DATA_INSTANCES-1)*4) - -#endif /* __T4T4VF_DEFS_H__ */ diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c deleted file mode 100644 index fe3fd3d..0000000 --- a/drivers/net/cxgb4vf/t4vf_hw.c +++ /dev/null @@ -1,1387 +0,0 @@ -/* - * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet - * driver for Linux. - * - * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include - -#include "t4vf_common.h" -#include "t4vf_defs.h" - -#include "../cxgb4/t4_regs.h" -#include "../cxgb4/t4fw_api.h" - -/* - * Wait for the device to become ready (signified by our "who am I" register - * returning a value other than all 1's). Return an error if it doesn't - * become ready ... - */ -int __devinit t4vf_wait_dev_ready(struct adapter *adapter) -{ - const u32 whoami = T4VF_PL_BASE_ADDR + PL_VF_WHOAMI; - const u32 notready1 = 0xffffffff; - const u32 notready2 = 0xeeeeeeee; - u32 val; - - val = t4_read_reg(adapter, whoami); - if (val != notready1 && val != notready2) - return 0; - msleep(500); - val = t4_read_reg(adapter, whoami); - if (val != notready1 && val != notready2) - return 0; - else - return -EIO; -} - -/* - * Get the reply to a mailbox command and store it in @rpl in big-endian order - * (since the firmware data structures are specified in a big-endian layout). - */ -static void get_mbox_rpl(struct adapter *adapter, __be64 *rpl, int size, - u32 mbox_data) -{ - for ( ; size; size -= 8, mbox_data += 8) - *rpl++ = cpu_to_be64(t4_read_reg64(adapter, mbox_data)); -} - -/* - * Dump contents of mailbox with a leading tag. - */ -static void dump_mbox(struct adapter *adapter, const char *tag, u32 mbox_data) -{ - dev_err(adapter->pdev_dev, - "mbox %s: %llx %llx %llx %llx %llx %llx %llx %llx\n", tag, - (unsigned long long)t4_read_reg64(adapter, mbox_data + 0), - (unsigned long long)t4_read_reg64(adapter, mbox_data + 8), - (unsigned long long)t4_read_reg64(adapter, mbox_data + 16), - (unsigned long long)t4_read_reg64(adapter, mbox_data + 24), - (unsigned long long)t4_read_reg64(adapter, mbox_data + 32), - (unsigned long long)t4_read_reg64(adapter, mbox_data + 40), - (unsigned long long)t4_read_reg64(adapter, mbox_data + 48), - (unsigned long long)t4_read_reg64(adapter, mbox_data + 56)); -} - -/** - * t4vf_wr_mbox_core - send a command to FW through the mailbox - * @adapter: the adapter - * @cmd: the command to write - * @size: command length in bytes - * @rpl: where to optionally store the reply - * @sleep_ok: if true we may sleep while awaiting command completion - * - * Sends the given command to FW through the mailbox and waits for the - * FW to execute the command. If @rpl is not %NULL it is used to store - * the FW's reply to the command. The command and its optional reply - * are of the same length. FW can take up to 500 ms to respond. - * @sleep_ok determines whether we may sleep while awaiting the response. - * If sleeping is allowed we use progressive backoff otherwise we spin. - * - * The return value is 0 on success or a negative errno on failure. A - * failure can happen either because we are not able to execute the - * command or FW executes it but signals an error. In the latter case - * the return value is the error code indicated by FW (negated). - */ -int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, - void *rpl, bool sleep_ok) -{ - static const int delay[] = { - 1, 1, 3, 5, 10, 10, 20, 50, 100 - }; - - u32 v; - int i, ms, delay_idx; - const __be64 *p; - u32 mbox_data = T4VF_MBDATA_BASE_ADDR; - u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL; - - /* - * Commands must be multiples of 16 bytes in length and may not be - * larger than the size of the Mailbox Data register array. - */ - if ((size % 16) != 0 || - size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4) - return -EINVAL; - - /* - * Loop trying to get ownership of the mailbox. Return an error - * if we can't gain ownership. - */ - v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl)); - for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) - v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl)); - if (v != MBOX_OWNER_DRV) - return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT; - - /* - * Write the command array into the Mailbox Data register array and - * transfer ownership of the mailbox to the firmware. - * - * For the VFs, the Mailbox Data "registers" are actually backed by - * T4's "MA" interface rather than PL Registers (as is the case for - * the PFs). Because these are in different coherency domains, the - * write to the VF's PL-register-backed Mailbox Control can race in - * front of the writes to the MA-backed VF Mailbox Data "registers". - * So we need to do a read-back on at least one byte of the VF Mailbox - * Data registers before doing the write to the VF Mailbox Control - * register. - */ - for (i = 0, p = cmd; i < size; i += 8) - t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); - t4_read_reg(adapter, mbox_data); /* flush write */ - - t4_write_reg(adapter, mbox_ctl, - MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); - t4_read_reg(adapter, mbox_ctl); /* flush write */ - - /* - * Spin waiting for firmware to acknowledge processing our command. - */ - delay_idx = 0; - ms = delay[0]; - - for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { - if (sleep_ok) { - ms = delay[delay_idx]; - if (delay_idx < ARRAY_SIZE(delay) - 1) - delay_idx++; - msleep(ms); - } else - mdelay(ms); - - /* - * If we're the owner, see if this is the reply we wanted. - */ - v = t4_read_reg(adapter, mbox_ctl); - if (MBOWNER_GET(v) == MBOX_OWNER_DRV) { - /* - * If the Message Valid bit isn't on, revoke ownership - * of the mailbox and continue waiting for our reply. - */ - if ((v & MBMSGVALID) == 0) { - t4_write_reg(adapter, mbox_ctl, - MBOWNER(MBOX_OWNER_NONE)); - continue; - } - - /* - * We now have our reply. Extract the command return - * value, copy the reply back to our caller's buffer - * (if specified) and revoke ownership of the mailbox. - * We return the (negated) firmware command return - * code (this depends on FW_SUCCESS == 0). - */ - - /* return value in low-order little-endian word */ - v = t4_read_reg(adapter, mbox_data); - if (FW_CMD_RETVAL_GET(v)) - dump_mbox(adapter, "FW Error", mbox_data); - - if (rpl) { - /* request bit in high-order BE word */ - WARN_ON((be32_to_cpu(*(const u32 *)cmd) - & FW_CMD_REQUEST) == 0); - get_mbox_rpl(adapter, rpl, size, mbox_data); - WARN_ON((be32_to_cpu(*(u32 *)rpl) - & FW_CMD_REQUEST) != 0); - } - t4_write_reg(adapter, mbox_ctl, - MBOWNER(MBOX_OWNER_NONE)); - return -FW_CMD_RETVAL_GET(v); - } - } - - /* - * We timed out. Return the error ... - */ - dump_mbox(adapter, "FW Timeout", mbox_data); - return -ETIMEDOUT; -} - -/** - * hash_mac_addr - return the hash value of a MAC address - * @addr: the 48-bit Ethernet MAC address - * - * Hashes a MAC address according to the hash function used by hardware - * inexact (hash) address matching. - */ -static int hash_mac_addr(const u8 *addr) -{ - u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; - u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; - a ^= b; - a ^= (a >> 12); - a ^= (a >> 6); - return a & 0x3f; -} - -/** - * init_link_config - initialize a link's SW state - * @lc: structure holding the link state - * @caps: link capabilities - * - * Initializes the SW state maintained for each link, including the link's - * capabilities and default speed/flow-control/autonegotiation settings. - */ -static void __devinit init_link_config(struct link_config *lc, - unsigned int caps) -{ - lc->supported = caps; - lc->requested_speed = 0; - lc->speed = 0; - lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; - if (lc->supported & SUPPORTED_Autoneg) { - lc->advertising = lc->supported; - lc->autoneg = AUTONEG_ENABLE; - lc->requested_fc |= PAUSE_AUTONEG; - } else { - lc->advertising = 0; - lc->autoneg = AUTONEG_DISABLE; - } -} - -/** - * t4vf_port_init - initialize port hardware/software state - * @adapter: the adapter - * @pidx: the adapter port index - */ -int __devinit t4vf_port_init(struct adapter *adapter, int pidx) -{ - struct port_info *pi = adap2pinfo(adapter, pidx); - struct fw_vi_cmd vi_cmd, vi_rpl; - struct fw_port_cmd port_cmd, port_rpl; - int v; - u32 word; - - /* - * Execute a VI Read command to get our Virtual Interface information - * like MAC address, etc. - */ - memset(&vi_cmd, 0, sizeof(vi_cmd)); - vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ); - vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd)); - vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID(pi->viid)); - v = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl); - if (v) - return v; - - BUG_ON(pi->port_id != FW_VI_CMD_PORTID_GET(vi_rpl.portid_pkd)); - pi->rss_size = FW_VI_CMD_RSSSIZE_GET(be16_to_cpu(vi_rpl.rsssize_pkd)); - t4_os_set_hw_addr(adapter, pidx, vi_rpl.mac); - - /* - * If we don't have read access to our port information, we're done - * now. Otherwise, execute a PORT Read command to get it ... - */ - if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT)) - return 0; - - memset(&port_cmd, 0, sizeof(port_cmd)); - port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP(FW_PORT_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ | - FW_PORT_CMD_PORTID(pi->port_id)); - port_cmd.action_to_len16 = - cpu_to_be32(FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | - FW_LEN16(port_cmd)); - v = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl); - if (v) - return v; - - v = 0; - word = be16_to_cpu(port_rpl.u.info.pcap); - if (word & FW_PORT_CAP_SPEED_100M) - v |= SUPPORTED_100baseT_Full; - if (word & FW_PORT_CAP_SPEED_1G) - v |= SUPPORTED_1000baseT_Full; - if (word & FW_PORT_CAP_SPEED_10G) - v |= SUPPORTED_10000baseT_Full; - if (word & FW_PORT_CAP_ANEG) - v |= SUPPORTED_Autoneg; - init_link_config(&pi->link_cfg, v); - - return 0; -} - -/** - * t4vf_fw_reset - issue a reset to FW - * @adapter: the adapter - * - * Issues a reset command to FW. For a Physical Function this would - * result in the Firmware reseting all of its state. For a Virtual - * Function this just resets the state associated with the VF. - */ -int t4vf_fw_reset(struct adapter *adapter) -{ - struct fw_reset_cmd cmd; - - memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RESET_CMD) | - FW_CMD_WRITE); - cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); - return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); -} - -/** - * t4vf_query_params - query FW or device parameters - * @adapter: the adapter - * @nparams: the number of parameters - * @params: the parameter names - * @vals: the parameter values - * - * Reads the values of firmware or device parameters. Up to 7 parameters - * can be queried at once. - */ -int t4vf_query_params(struct adapter *adapter, unsigned int nparams, - const u32 *params, u32 *vals) -{ - int i, ret; - struct fw_params_cmd cmd, rpl; - struct fw_params_param *p; - size_t len16; - - if (nparams > 7) - return -EINVAL; - - memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ); - len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, - param[nparams].mnem), 16); - cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16)); - for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) - p->mnem = htonl(*params++); - - ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); - if (ret == 0) - for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++) - *vals++ = be32_to_cpu(p->val); - return ret; -} - -/** - * t4vf_set_params - sets FW or device parameters - * @adapter: the adapter - * @nparams: the number of parameters - * @params: the parameter names - * @vals: the parameter values - * - * Sets the values of firmware or device parameters. Up to 7 parameters - * can be specified at once. - */ -int t4vf_set_params(struct adapter *adapter, unsigned int nparams, - const u32 *params, const u32 *vals) -{ - int i; - struct fw_params_cmd cmd; - struct fw_params_param *p; - size_t len16; - - if (nparams > 7) - return -EINVAL; - - memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE); - len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, - param[nparams]), 16); - cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16)); - for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) { - p->mnem = cpu_to_be32(*params++); - p->val = cpu_to_be32(*vals++); - } - - return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); -} - -/** - * t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters - * @adapter: the adapter - * - * Retrieves various core SGE parameters in the form of hardware SGE - * register values. The caller is responsible for decoding these as - * needed. The SGE parameters are stored in @adapter->params.sge. - */ -int t4vf_get_sge_params(struct adapter *adapter) -{ - struct sge_params *sge_params = &adapter->params.sge; - u32 params[7], vals[7]; - int v; - - params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_CONTROL)); - params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_HOST_PAGE_SIZE)); - params[2] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_FL_BUFFER_SIZE0)); - params[3] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_FL_BUFFER_SIZE1)); - params[4] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_0_AND_1)); - params[5] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_2_AND_3)); - params[6] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_4_AND_5)); - v = t4vf_query_params(adapter, 7, params, vals); - if (v) - return v; - sge_params->sge_control = vals[0]; - sge_params->sge_host_page_size = vals[1]; - sge_params->sge_fl_buffer_size[0] = vals[2]; - sge_params->sge_fl_buffer_size[1] = vals[3]; - sge_params->sge_timer_value_0_and_1 = vals[4]; - sge_params->sge_timer_value_2_and_3 = vals[5]; - sge_params->sge_timer_value_4_and_5 = vals[6]; - - params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | - FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD)); - v = t4vf_query_params(adapter, 1, params, vals); - if (v) - return v; - sge_params->sge_ingress_rx_threshold = vals[0]; - - return 0; -} - -/** - * t4vf_get_vpd_params - retrieve device VPD paremeters - * @adapter: the adapter - * - * Retrives various device Vital Product Data parameters. The parameters - * are stored in @adapter->params.vpd. - */ -int t4vf_get_vpd_params(struct adapter *adapter) -{ - struct vpd_params *vpd_params = &adapter->params.vpd; - u32 params[7], vals[7]; - int v; - - params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK)); - v = t4vf_query_params(adapter, 1, params, vals); - if (v) - return v; - vpd_params->cclk = vals[0]; - - return 0; -} - -/** - * t4vf_get_dev_params - retrieve device paremeters - * @adapter: the adapter - * - * Retrives various device parameters. The parameters are stored in - * @adapter->params.dev. - */ -int t4vf_get_dev_params(struct adapter *adapter) -{ - struct dev_params *dev_params = &adapter->params.dev; - u32 params[7], vals[7]; - int v; - - params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWREV)); - params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPREV)); - v = t4vf_query_params(adapter, 2, params, vals); - if (v) - return v; - dev_params->fwrev = vals[0]; - dev_params->tprev = vals[1]; - - return 0; -} - -/** - * t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration - * @adapter: the adapter - * - * Retrieves global RSS mode and parameters with which we have to live - * and stores them in the @adapter's RSS parameters. - */ -int t4vf_get_rss_glb_config(struct adapter *adapter) -{ - struct rss_params *rss = &adapter->params.rss; - struct fw_rss_glb_config_cmd cmd, rpl; - int v; - - /* - * Execute an RSS Global Configuration read command to retrieve - * our RSS configuration. - */ - memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ); - cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); - v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); - if (v) - return v; - - /* - * Transate the big-endian RSS Global Configuration into our - * cpu-endian format based on the RSS mode. We also do first level - * filtering at this point to weed out modes which don't support - * VF Drivers ... - */ - rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_GET( - be32_to_cpu(rpl.u.manual.mode_pkd)); - switch (rss->mode) { - case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { - u32 word = be32_to_cpu( - rpl.u.basicvirtual.synmapen_to_hashtoeplitz); - - rss->u.basicvirtual.synmapen = - ((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) != 0); - rss->u.basicvirtual.syn4tupenipv6 = - ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) != 0); - rss->u.basicvirtual.syn2tupenipv6 = - ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) != 0); - rss->u.basicvirtual.syn4tupenipv4 = - ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) != 0); - rss->u.basicvirtual.syn2tupenipv4 = - ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) != 0); - - rss->u.basicvirtual.ofdmapen = - ((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) != 0); - - rss->u.basicvirtual.tnlmapen = - ((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) != 0); - rss->u.basicvirtual.tnlalllookup = - ((word & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) != 0); - - rss->u.basicvirtual.hashtoeplitz = - ((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) != 0); - - /* we need at least Tunnel Map Enable to be set */ - if (!rss->u.basicvirtual.tnlmapen) - return -EINVAL; - break; - } - - default: - /* all unknown/unsupported RSS modes result in an error */ - return -EINVAL; - } - - return 0; -} - -/** - * t4vf_get_vfres - retrieve VF resource limits - * @adapter: the adapter - * - * Retrieves configured resource limits and capabilities for a virtual - * function. The results are stored in @adapter->vfres. - */ -int t4vf_get_vfres(struct adapter *adapter) -{ - struct vf_resources *vfres = &adapter->params.vfres; - struct fw_pfvf_cmd cmd, rpl; - int v; - u32 word; - - /* - * Execute PFVF Read command to get VF resource limits; bail out early - * with error on command failure. - */ - memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PFVF_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ); - cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); - v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); - if (v) - return v; - - /* - * Extract VF resource limits and return success. - */ - word = be32_to_cpu(rpl.niqflint_niq); - vfres->niqflint = FW_PFVF_CMD_NIQFLINT_GET(word); - vfres->niq = FW_PFVF_CMD_NIQ_GET(word); - - word = be32_to_cpu(rpl.type_to_neq); - vfres->neq = FW_PFVF_CMD_NEQ_GET(word); - vfres->pmask = FW_PFVF_CMD_PMASK_GET(word); - - word = be32_to_cpu(rpl.tc_to_nexactf); - vfres->tc = FW_PFVF_CMD_TC_GET(word); - vfres->nvi = FW_PFVF_CMD_NVI_GET(word); - vfres->nexactf = FW_PFVF_CMD_NEXACTF_GET(word); - - word = be32_to_cpu(rpl.r_caps_to_nethctrl); - vfres->r_caps = FW_PFVF_CMD_R_CAPS_GET(word); - vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_GET(word); - vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_GET(word); - - return 0; -} - -/** - * t4vf_read_rss_vi_config - read a VI's RSS configuration - * @adapter: the adapter - * @viid: Virtual Interface ID - * @config: pointer to host-native VI RSS Configuration buffer - * - * Reads the Virtual Interface's RSS configuration information and - * translates it into CPU-native format. - */ -int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid, - union rss_vi_config *config) -{ - struct fw_rss_vi_config_cmd cmd, rpl; - int v; - - memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | - FW_CMD_REQUEST | - FW_CMD_READ | - FW_RSS_VI_CONFIG_CMD_VIID(viid)); - cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); - v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); - if (v) - return v; - - switch (adapter->params.rss.mode) { - case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { - u32 word = be32_to_cpu(rpl.u.basicvirtual.defaultq_to_udpen); - - config->basicvirtual.ip6fourtupen = - ((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) != 0); - config->basicvirtual.ip6twotupen = - ((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) != 0); - config->basicvirtual.ip4fourtupen = - ((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) != 0); - config->basicvirtual.ip4twotupen = - ((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) != 0); - config->basicvirtual.udpen = - ((word & FW_RSS_VI_CONFIG_CMD_UDPEN) != 0); - config->basicvirtual.defaultq = - FW_RSS_VI_CONFIG_CMD_DEFAULTQ_GET(word); - break; - } - - default: - return -EINVAL; - } - - return 0; -} - -/** - * t4vf_write_rss_vi_config - write a VI's RSS configuration - * @adapter: the adapter - * @viid: Virtual Interface ID - * @config: pointer to host-native VI RSS Configuration buffer - * - * Write the Virtual Interface's RSS configuration information - * (translating it into firmware-native format before writing). - */ -int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid, - union rss_vi_config *config) -{ - struct fw_rss_vi_config_cmd cmd, rpl; - - memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_RSS_VI_CONFIG_CMD_VIID(viid)); - cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); - switch (adapter->params.rss.mode) { - case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { - u32 word = 0; - - if (config->basicvirtual.ip6fourtupen) - word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; - if (config->basicvirtual.ip6twotupen) - word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN; - if (config->basicvirtual.ip4fourtupen) - word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; - if (config->basicvirtual.ip4twotupen) - word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN; - if (config->basicvirtual.udpen) - word |= FW_RSS_VI_CONFIG_CMD_UDPEN; - word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ( - config->basicvirtual.defaultq); - cmd.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(word); - break; - } - - default: - return -EINVAL; - } - - return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); -} - -/** - * t4vf_config_rss_range - configure a portion of the RSS mapping table - * @adapter: the adapter - * @viid: Virtual Interface of RSS Table Slice - * @start: starting entry in the table to write - * @n: how many table entries to write - * @rspq: values for the "Response Queue" (Ingress Queue) lookup table - * @nrspq: number of values in @rspq - * - * Programs the selected part of the VI's RSS mapping table with the - * provided values. If @nrspq < @n the supplied values are used repeatedly - * until the full table range is populated. - * - * The caller must ensure the values in @rspq are in the range 0..1023. - */ -int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid, - int start, int n, const u16 *rspq, int nrspq) -{ - const u16 *rsp = rspq; - const u16 *rsp_end = rspq+nrspq; - struct fw_rss_ind_tbl_cmd cmd; - - /* - * Initialize firmware command template to write the RSS table. - */ - memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_IND_TBL_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_RSS_IND_TBL_CMD_VIID(viid)); - cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); - - /* - * Each firmware RSS command can accommodate up to 32 RSS Ingress - * Queue Identifiers. These Ingress Queue IDs are packed three to - * a 32-bit word as 10-bit values with the upper remaining 2 bits - * reserved. - */ - while (n > 0) { - __be32 *qp = &cmd.iq0_to_iq2; - int nq = min(n, 32); - int ret; - - /* - * Set up the firmware RSS command header to send the next - * "nq" Ingress Queue IDs to the firmware. - */ - cmd.niqid = cpu_to_be16(nq); - cmd.startidx = cpu_to_be16(start); - - /* - * "nq" more done for the start of the next loop. - */ - start += nq; - n -= nq; - - /* - * While there are still Ingress Queue IDs to stuff into the - * current firmware RSS command, retrieve them from the - * Ingress Queue ID array and insert them into the command. - */ - while (nq > 0) { - /* - * Grab up to the next 3 Ingress Queue IDs (wrapping - * around the Ingress Queue ID array if necessary) and - * insert them into the firmware RSS command at the - * current 3-tuple position within the commad. - */ - u16 qbuf[3]; - u16 *qbp = qbuf; - int nqbuf = min(3, nq); - - nq -= nqbuf; - qbuf[0] = qbuf[1] = qbuf[2] = 0; - while (nqbuf) { - nqbuf--; - *qbp++ = *rsp++; - if (rsp >= rsp_end) - rsp = rspq; - } - *qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) | - FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) | - FW_RSS_IND_TBL_CMD_IQ2(qbuf[2])); - } - - /* - * Send this portion of the RRS table update to the firmware; - * bail out on any errors. - */ - ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); - if (ret) - return ret; - } - return 0; -} - -/** - * t4vf_alloc_vi - allocate a virtual interface on a port - * @adapter: the adapter - * @port_id: physical port associated with the VI - * - * Allocate a new Virtual Interface and bind it to the indicated - * physical port. Return the new Virtual Interface Identifier on - * success, or a [negative] error number on failure. - */ -int t4vf_alloc_vi(struct adapter *adapter, int port_id) -{ - struct fw_vi_cmd cmd, rpl; - int v; - - /* - * Execute a VI command to allocate Virtual Interface and return its - * VIID. - */ - memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_CMD_EXEC); - cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | - FW_VI_CMD_ALLOC); - cmd.portid_pkd = FW_VI_CMD_PORTID(port_id); - v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); - if (v) - return v; - - return FW_VI_CMD_VIID_GET(be16_to_cpu(rpl.type_viid)); -} - -/** - * t4vf_free_vi -- free a virtual interface - * @adapter: the adapter - * @viid: the virtual interface identifier - * - * Free a previously allocated Virtual Interface. Return an error on - * failure. - */ -int t4vf_free_vi(struct adapter *adapter, int viid) -{ - struct fw_vi_cmd cmd; - - /* - * Execute a VI command to free the Virtual Interface. - */ - memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) | - FW_CMD_REQUEST | - FW_CMD_EXEC); - cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | - FW_VI_CMD_FREE); - cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID(viid)); - return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); -} - -/** - * t4vf_enable_vi - enable/disable a virtual interface - * @adapter: the adapter - * @viid: the Virtual Interface ID - * @rx_en: 1=enable Rx, 0=disable Rx - * @tx_en: 1=enable Tx, 0=disable Tx - * - * Enables/disables a virtual interface. - */ -int t4vf_enable_vi(struct adapter *adapter, unsigned int viid, - bool rx_en, bool tx_en) -{ - struct fw_vi_enable_cmd cmd; - - memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_ENABLE_CMD) | - FW_CMD_REQUEST | - FW_CMD_EXEC | - FW_VI_ENABLE_CMD_VIID(viid)); - cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN(rx_en) | - FW_VI_ENABLE_CMD_EEN(tx_en) | - FW_LEN16(cmd)); - return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); -} - -/** - * t4vf_identify_port - identify a VI's port by blinking its LED - * @adapter: the adapter - * @viid: the Virtual Interface ID - * @nblinks: how many times to blink LED at 2.5 Hz - * - * Identifies a VI's port by blinking its LED. - */ -int t4vf_identify_port(struct adapter *adapter, unsigned int viid, - unsigned int nblinks) -{ - struct fw_vi_enable_cmd cmd; - - memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_ENABLE_CMD) | - FW_CMD_REQUEST | - FW_CMD_EXEC | - FW_VI_ENABLE_CMD_VIID(viid)); - cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED | - FW_LEN16(cmd)); - cmd.blinkdur = cpu_to_be16(nblinks); - return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); -} - -/** - * t4vf_set_rxmode - set Rx properties of a virtual interface - * @adapter: the adapter - * @viid: the VI id - * @mtu: the new MTU or -1 for no change - * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change - * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change - * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change - * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it, - * -1 no change - * - * Sets Rx properties of a virtual interface. - */ -int t4vf_set_rxmode(struct adapter *adapter, unsigned int viid, - int mtu, int promisc, int all_multi, int bcast, int vlanex, - bool sleep_ok) -{ - struct fw_vi_rxmode_cmd cmd; - - /* convert to FW values */ - if (mtu < 0) - mtu = FW_VI_RXMODE_CMD_MTU_MASK; - if (promisc < 0) - promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK; - if (all_multi < 0) - all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK; - if (bcast < 0) - bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK; - if (vlanex < 0) - vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK; - - memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_RXMODE_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_VI_RXMODE_CMD_VIID(viid)); - cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); - cmd.mtu_to_vlanexen = - cpu_to_be32(FW_VI_RXMODE_CMD_MTU(mtu) | - FW_VI_RXMODE_CMD_PROMISCEN(promisc) | - FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | - FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | - FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); - return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok); -} - -/** - * t4vf_alloc_mac_filt - allocates exact-match filters for MAC addresses - * @adapter: the adapter - * @viid: the Virtual Interface Identifier - * @free: if true any existing filters for this VI id are first removed - * @naddr: the number of MAC addresses to allocate filters for (up to 7) - * @addr: the MAC address(es) - * @idx: where to store the index of each allocated filter - * @hash: pointer to hash address filter bitmap - * @sleep_ok: call is allowed to sleep - * - * Allocates an exact-match filter for each of the supplied addresses and - * sets it to the corresponding address. If @idx is not %NULL it should - * have at least @naddr entries, each of which will be set to the index of - * the filter allocated for the corresponding MAC address. If a filter - * could not be allocated for an address its index is set to 0xffff. - * If @hash is not %NULL addresses that fail to allocate an exact filter - * are hashed and update the hash filter bitmap pointed at by @hash. - * - * Returns a negative error number or the number of filters allocated. - */ -int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, - unsigned int naddr, const u8 **addr, u16 *idx, - u64 *hash, bool sleep_ok) -{ - int offset, ret = 0; - unsigned nfilters = 0; - unsigned int rem = naddr; - struct fw_vi_mac_cmd cmd, rpl; - - if (naddr > FW_CLS_TCAM_NUM_ENTRIES) - return -EINVAL; - - for (offset = 0; offset < naddr; /**/) { - unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact) - ? rem - : ARRAY_SIZE(cmd.u.exact)); - size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, - u.exact[fw_naddr]), 16); - struct fw_vi_mac_exact *p; - int i; - - memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - (free ? FW_CMD_EXEC : 0) | - FW_VI_MAC_CMD_VIID(viid)); - cmd.freemacs_to_len16 = - cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) | - FW_CMD_LEN16(len16)); - - for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) { - p->valid_to_idx = cpu_to_be16( - FW_VI_MAC_CMD_VALID | - FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); - memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); - } - - - ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, - sleep_ok); - if (ret && ret != -ENOMEM) - break; - - for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) { - u16 index = FW_VI_MAC_CMD_IDX_GET( - be16_to_cpu(p->valid_to_idx)); - - if (idx) - idx[offset+i] = - (index >= FW_CLS_TCAM_NUM_ENTRIES - ? 0xffff - : index); - if (index < FW_CLS_TCAM_NUM_ENTRIES) - nfilters++; - else if (hash) - *hash |= (1ULL << hash_mac_addr(addr[offset+i])); - } - - free = false; - offset += fw_naddr; - rem -= fw_naddr; - } - - /* - * If there were no errors or we merely ran out of room in our MAC - * address arena, return the number of filters actually written. - */ - if (ret == 0 || ret == -ENOMEM) - ret = nfilters; - return ret; -} - -/** - * t4vf_change_mac - modifies the exact-match filter for a MAC address - * @adapter: the adapter - * @viid: the Virtual Interface ID - * @idx: index of existing filter for old value of MAC address, or -1 - * @addr: the new MAC address value - * @persist: if idx < 0, the new MAC allocation should be persistent - * - * Modifies an exact-match filter and sets it to the new MAC address. - * Note that in general it is not possible to modify the value of a given - * filter so the generic way to modify an address filter is to free the - * one being used by the old address value and allocate a new filter for - * the new address value. @idx can be -1 if the address is a new - * addition. - * - * Returns a negative error number or the index of the filter with the new - * MAC value. - */ -int t4vf_change_mac(struct adapter *adapter, unsigned int viid, - int idx, const u8 *addr, bool persist) -{ - int ret; - struct fw_vi_mac_cmd cmd, rpl; - struct fw_vi_mac_exact *p = &cmd.u.exact[0]; - size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, - u.exact[1]), 16); - - /* - * If this is a new allocation, determine whether it should be - * persistent (across a "freemacs" operation) or not. - */ - if (idx < 0) - idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; - - memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_VI_MAC_CMD_VIID(viid)); - cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16(len16)); - p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID | - FW_VI_MAC_CMD_IDX(idx)); - memcpy(p->macaddr, addr, sizeof(p->macaddr)); - - ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); - if (ret == 0) { - p = &rpl.u.exact[0]; - ret = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx)); - if (ret >= FW_CLS_TCAM_NUM_ENTRIES) - ret = -ENOMEM; - } - return ret; -} - -/** - * t4vf_set_addr_hash - program the MAC inexact-match hash filter - * @adapter: the adapter - * @viid: the Virtual Interface Identifier - * @ucast: whether the hash filter should also match unicast addresses - * @vec: the value to be written to the hash filter - * @sleep_ok: call is allowed to sleep - * - * Sets the 64-bit inexact-match hash filter for a virtual interface. - */ -int t4vf_set_addr_hash(struct adapter *adapter, unsigned int viid, - bool ucast, u64 vec, bool sleep_ok) -{ - struct fw_vi_mac_cmd cmd; - size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, - u.exact[0]), 16); - - memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_VI_ENABLE_CMD_VIID(viid)); - cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN | - FW_VI_MAC_CMD_HASHUNIEN(ucast) | - FW_CMD_LEN16(len16)); - cmd.u.hash.hashvec = cpu_to_be64(vec); - return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok); -} - -/** - * t4vf_get_port_stats - collect "port" statistics - * @adapter: the adapter - * @pidx: the port index - * @s: the stats structure to fill - * - * Collect statistics for the "port"'s Virtual Interface. - */ -int t4vf_get_port_stats(struct adapter *adapter, int pidx, - struct t4vf_port_stats *s) -{ - struct port_info *pi = adap2pinfo(adapter, pidx); - struct fw_vi_stats_vf fwstats; - unsigned int rem = VI_VF_NUM_STATS; - __be64 *fwsp = (__be64 *)&fwstats; - - /* - * Grab the Virtual Interface statistics a chunk at a time via mailbox - * commands. We could use a Work Request and get all of them at once - * but that's an asynchronous interface which is awkward to use. - */ - while (rem) { - unsigned int ix = VI_VF_NUM_STATS - rem; - unsigned int nstats = min(6U, rem); - struct fw_vi_stats_cmd cmd, rpl; - size_t len = (offsetof(struct fw_vi_stats_cmd, u) + - sizeof(struct fw_vi_stats_ctl)); - size_t len16 = DIV_ROUND_UP(len, 16); - int ret; - - memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_STATS_CMD) | - FW_VI_STATS_CMD_VIID(pi->viid) | - FW_CMD_REQUEST | - FW_CMD_READ); - cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16)); - cmd.u.ctl.nstats_ix = - cpu_to_be16(FW_VI_STATS_CMD_IX(ix) | - FW_VI_STATS_CMD_NSTATS(nstats)); - ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl); - if (ret) - return ret; - - memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats); - - rem -= nstats; - fwsp += nstats; - } - - /* - * Translate firmware statistics into host native statistics. - */ - s->tx_bcast_bytes = be64_to_cpu(fwstats.tx_bcast_bytes); - s->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames); - s->tx_mcast_bytes = be64_to_cpu(fwstats.tx_mcast_bytes); - s->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames); - s->tx_ucast_bytes = be64_to_cpu(fwstats.tx_ucast_bytes); - s->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames); - s->tx_drop_frames = be64_to_cpu(fwstats.tx_drop_frames); - s->tx_offload_bytes = be64_to_cpu(fwstats.tx_offload_bytes); - s->tx_offload_frames = be64_to_cpu(fwstats.tx_offload_frames); - - s->rx_bcast_bytes = be64_to_cpu(fwstats.rx_bcast_bytes); - s->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames); - s->rx_mcast_bytes = be64_to_cpu(fwstats.rx_mcast_bytes); - s->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames); - s->rx_ucast_bytes = be64_to_cpu(fwstats.rx_ucast_bytes); - s->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames); - - s->rx_err_frames = be64_to_cpu(fwstats.rx_err_frames); - - return 0; -} - -/** - * t4vf_iq_free - free an ingress queue and its free lists - * @adapter: the adapter - * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) - * @iqid: ingress queue ID - * @fl0id: FL0 queue ID or 0xffff if no attached FL0 - * @fl1id: FL1 queue ID or 0xffff if no attached FL1 - * - * Frees an ingress queue and its associated free lists, if any. - */ -int t4vf_iq_free(struct adapter *adapter, unsigned int iqtype, - unsigned int iqid, unsigned int fl0id, unsigned int fl1id) -{ - struct fw_iq_cmd cmd; - - memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_IQ_CMD) | - FW_CMD_REQUEST | - FW_CMD_EXEC); - cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE | - FW_LEN16(cmd)); - cmd.type_to_iqandstindex = - cpu_to_be32(FW_IQ_CMD_TYPE(iqtype)); - - cmd.iqid = cpu_to_be16(iqid); - cmd.fl0id = cpu_to_be16(fl0id); - cmd.fl1id = cpu_to_be16(fl1id); - return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); -} - -/** - * t4vf_eth_eq_free - free an Ethernet egress queue - * @adapter: the adapter - * @eqid: egress queue ID - * - * Frees an Ethernet egress queue. - */ -int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid) -{ - struct fw_eq_eth_cmd cmd; - - memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_EQ_ETH_CMD) | - FW_CMD_REQUEST | - FW_CMD_EXEC); - cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE | - FW_LEN16(cmd)); - cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID(eqid)); - return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); -} - -/** - * t4vf_handle_fw_rpl - process a firmware reply message - * @adapter: the adapter - * @rpl: start of the firmware message - * - * Processes a firmware message, such as link state change messages. - */ -int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) -{ - const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl; - u8 opcode = FW_CMD_OP_GET(be32_to_cpu(cmd_hdr->hi)); - - switch (opcode) { - case FW_PORT_CMD: { - /* - * Link/module state change message. - */ - const struct fw_port_cmd *port_cmd = - (const struct fw_port_cmd *)rpl; - u32 word; - int action, port_id, link_ok, speed, fc, pidx; - - /* - * Extract various fields from port status change message. - */ - action = FW_PORT_CMD_ACTION_GET( - be32_to_cpu(port_cmd->action_to_len16)); - if (action != FW_PORT_ACTION_GET_PORT_INFO) { - dev_err(adapter->pdev_dev, - "Unknown firmware PORT reply action %x\n", - action); - break; - } - - port_id = FW_PORT_CMD_PORTID_GET( - be32_to_cpu(port_cmd->op_to_portid)); - - word = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype); - link_ok = (word & FW_PORT_CMD_LSTATUS) != 0; - speed = 0; - fc = 0; - if (word & FW_PORT_CMD_RXPAUSE) - fc |= PAUSE_RX; - if (word & FW_PORT_CMD_TXPAUSE) - fc |= PAUSE_TX; - if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) - speed = SPEED_100; - else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) - speed = SPEED_1000; - else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) - speed = SPEED_10000; - - /* - * Scan all of our "ports" (Virtual Interfaces) looking for - * those bound to the physical port which has changed. If - * our recorded state doesn't match the current state, - * signal that change to the OS code. - */ - for_each_port(adapter, pidx) { - struct port_info *pi = adap2pinfo(adapter, pidx); - struct link_config *lc; - - if (pi->port_id != port_id) - continue; - - lc = &pi->link_cfg; - if (link_ok != lc->link_ok || speed != lc->speed || - fc != lc->fc) { - /* something changed */ - lc->link_ok = link_ok; - lc->speed = speed; - lc->fc = fc; - t4vf_os_link_changed(adapter, pidx, link_ok); - } - } - break; - } - - default: - dev_err(adapter->pdev_dev, "Unknown firmware reply %X\n", - opcode); - } - return 0; -} diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 772fb49..69d6403 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -15,5 +15,6 @@ source "drivers/net/ethernet/3com/Kconfig" source "drivers/net/ethernet/8390/Kconfig" source "drivers/net/ethernet/amd/Kconfig" source "drivers/net/ethernet/broadcom/Kconfig" +source "drivers/net/ethernet/chelsio/Kconfig" endif # ETHERNET diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 3ef49a2..470e5d8 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_NET_VENDOR_3COM) += 3com/ obj-$(CONFIG_NET_VENDOR_8390) += 8390/ obj-$(CONFIG_NET_VENDOR_AMD) += amd/ obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ +obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig new file mode 100644 index 0000000..7b54574 --- /dev/null +++ b/drivers/net/ethernet/chelsio/Kconfig @@ -0,0 +1,106 @@ +# +# Chelsio device configuration +# + +config NET_VENDOR_CHELSIO + bool "Chelsio devices" + depends on PCI || INET + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Chelsio devices. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_CHELSIO + +config CHELSIO_T1 + tristate "Chelsio 10Gb Ethernet support" + depends on PCI + select CRC32 + select MDIO + ---help--- + This driver supports Chelsio gigabit and 10-gigabit + Ethernet cards. More information about adapter features and + performance tuning is in . + + For general information about Chelsio and our products, visit + our website at . + + For customer support, please visit our customer support page at + . + + Please send feedback to . + + To compile this driver as a module, choose M here: the module + will be called cxgb. + +config CHELSIO_T1_1G + bool "Chelsio gigabit Ethernet support" + depends on CHELSIO_T1 + ---help--- + Enables support for Chelsio's gigabit Ethernet PCI cards. If you + are using only 10G cards say 'N' here. + +config CHELSIO_T3 + tristate "Chelsio Communications T3 10Gb Ethernet support" + depends on PCI && INET + select FW_LOADER + select MDIO + ---help--- + This driver supports Chelsio T3-based gigabit and 10Gb Ethernet + adapters. + + For general information about Chelsio and our products, visit + our website at . + + For customer support, please visit our customer support page at + . + + Please send feedback to . + + To compile this driver as a module, choose M here: the module + will be called cxgb3. + +config CHELSIO_T4 + tristate "Chelsio Communications T4 Ethernet support" + depends on PCI + select FW_LOADER + select MDIO + ---help--- + This driver supports Chelsio T4-based gigabit and 10Gb Ethernet + adapters. + + For general information about Chelsio and our products, visit + our website at . + + For customer support, please visit our customer support page at + . + + Please send feedback to . + + To compile this driver as a module choose M here; the module + will be called cxgb4. + +config CHELSIO_T4VF + tristate "Chelsio Communications T4 Virtual Function Ethernet support" + depends on PCI + ---help--- + This driver supports Chelsio T4-based gigabit and 10Gb Ethernet + adapters with PCI-E SR-IOV Virtual Functions. + + For general information about Chelsio and our products, visit + our website at . + + For customer support, please visit our customer support page at + . + + Please send feedback to . + + To compile this driver as a module choose M here; the module + will be called cxgb4vf. + +endif # NET_VENDOR_CHELSIO diff --git a/drivers/net/ethernet/chelsio/Makefile b/drivers/net/ethernet/chelsio/Makefile new file mode 100644 index 0000000..390510b --- /dev/null +++ b/drivers/net/ethernet/chelsio/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for the Chelsio network device drivers. +# + +obj-$(CONFIG_CHELSIO_T1) += cxgb/ +obj-$(CONFIG_CHELSIO_T3) += cxgb3/ +obj-$(CONFIG_CHELSIO_T4) += cxgb4/ +obj-$(CONFIG_CHELSIO_T4VF) += cxgb4vf/ diff --git a/drivers/net/ethernet/chelsio/cxgb/Makefile b/drivers/net/ethernet/chelsio/cxgb/Makefile new file mode 100644 index 0000000..57a4b26 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/Makefile @@ -0,0 +1,9 @@ +# +# Chelsio T1 driver +# + +obj-$(CONFIG_CHELSIO_T1) += cxgb.o + +cxgb-$(CONFIG_CHELSIO_T1_1G) += mv88e1xxx.o vsc7326.o +cxgb-objs := cxgb2.o espi.o tp.o pm3393.o sge.o subr.o \ + mv88x201x.o my3126.o $(cxgb-y) diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h new file mode 100644 index 0000000..5ccbed1 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/common.h @@ -0,0 +1,353 @@ +/***************************************************************************** + * * + * File: common.h * + * $Revision: 1.21 $ * + * $Date: 2005/06/22 00:43:25 $ * + * Description: * + * part of the Chelsio 10Gb Ethernet Driver. * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License, version 2, as * + * published by the Free Software Foundation. * + * * + * You should have received a copy of the GNU General Public License along * + * with this program; if not, write to the Free Software Foundation, Inc., * + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * + * * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * + * * + * http://www.chelsio.com * + * * + * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * + * All rights reserved. * + * * + * Maintainers: maintainers@chelsio.com * + * * + * Authors: Dimitrios Michailidis * + * Tina Yang * + * Felix Marti * + * Scott Bardone * + * Kurt Ottaway * + * Frank DiMambro * + * * + * History: * + * * + ****************************************************************************/ + +#define pr_fmt(fmt) "cxgb: " fmt + +#ifndef _CXGB_COMMON_H_ +#define _CXGB_COMMON_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_DESCRIPTION "Chelsio 10Gb Ethernet Driver" +#define DRV_NAME "cxgb" +#define DRV_VERSION "2.2" + +#define CH_DEVICE(devid, ssid, idx) \ + { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx } + +#define SUPPORTED_PAUSE (1 << 13) +#define SUPPORTED_LOOPBACK (1 << 15) + +#define ADVERTISED_PAUSE (1 << 13) +#define ADVERTISED_ASYM_PAUSE (1 << 14) + +typedef struct adapter adapter_t; + +struct t1_rx_mode { + struct net_device *dev; +}; + +#define t1_rx_mode_promisc(rm) (rm->dev->flags & IFF_PROMISC) +#define t1_rx_mode_allmulti(rm) (rm->dev->flags & IFF_ALLMULTI) +#define t1_rx_mode_mc_cnt(rm) (netdev_mc_count(rm->dev)) +#define t1_get_netdev(rm) (rm->dev) + +#define MAX_NPORTS 4 +#define PORT_MASK ((1 << MAX_NPORTS) - 1) +#define NMTUS 8 +#define TCB_SIZE 128 + +#define SPEED_INVALID 0xffff +#define DUPLEX_INVALID 0xff + +enum { + CHBT_BOARD_N110, + CHBT_BOARD_N210, + CHBT_BOARD_7500, + CHBT_BOARD_8000, + CHBT_BOARD_CHT101, + CHBT_BOARD_CHT110, + CHBT_BOARD_CHT210, + CHBT_BOARD_CHT204, + CHBT_BOARD_CHT204V, + CHBT_BOARD_CHT204E, + CHBT_BOARD_CHN204, + CHBT_BOARD_COUGAR, + CHBT_BOARD_6800, + CHBT_BOARD_SIMUL, +}; + +enum { + CHBT_TERM_FPGA, + CHBT_TERM_T1, + CHBT_TERM_T2, + CHBT_TERM_T3 +}; + +enum { + CHBT_MAC_CHELSIO_A, + CHBT_MAC_IXF1010, + CHBT_MAC_PM3393, + CHBT_MAC_VSC7321, + CHBT_MAC_DUMMY +}; + +enum { + CHBT_PHY_88E1041, + CHBT_PHY_88E1111, + CHBT_PHY_88X2010, + CHBT_PHY_XPAK, + CHBT_PHY_MY3126, + CHBT_PHY_8244, + CHBT_PHY_DUMMY +}; + +enum { + PAUSE_RX = 1 << 0, + PAUSE_TX = 1 << 1, + PAUSE_AUTONEG = 1 << 2 +}; + +/* Revisions of T1 chip */ +enum { + TERM_T1A = 0, + TERM_T1B = 1, + TERM_T2 = 3 +}; + +struct sge_params { + unsigned int cmdQ_size[2]; + unsigned int freelQ_size[2]; + unsigned int large_buf_capacity; + unsigned int rx_coalesce_usecs; + unsigned int last_rx_coalesce_raw; + unsigned int default_rx_coalesce_usecs; + unsigned int sample_interval_usecs; + unsigned int coalesce_enable; + unsigned int polling; +}; + +struct chelsio_pci_params { + unsigned short speed; + unsigned char width; + unsigned char is_pcix; +}; + +struct tp_params { + unsigned int pm_size; + unsigned int cm_size; + unsigned int pm_rx_base; + unsigned int pm_tx_base; + unsigned int pm_rx_pg_size; + unsigned int pm_tx_pg_size; + unsigned int pm_rx_num_pgs; + unsigned int pm_tx_num_pgs; + unsigned int rx_coalescing_size; + unsigned int use_5tuple_mode; +}; + +struct mc5_params { + unsigned int mode; /* selects MC5 width */ + unsigned int nservers; /* size of server region */ + unsigned int nroutes; /* size of routing region */ +}; + +/* Default MC5 region sizes */ +#define DEFAULT_SERVER_REGION_LEN 256 +#define DEFAULT_RT_REGION_LEN 1024 + +struct adapter_params { + struct sge_params sge; + struct mc5_params mc5; + struct tp_params tp; + struct chelsio_pci_params pci; + + const struct board_info *brd_info; + + unsigned short mtus[NMTUS]; + unsigned int nports; /* # of ethernet ports */ + unsigned int stats_update_period; + unsigned short chip_revision; + unsigned char chip_version; + unsigned char is_asic; + unsigned char has_msi; +}; + +struct link_config { + unsigned int supported; /* link capabilities */ + unsigned int advertising; /* advertised capabilities */ + unsigned short requested_speed; /* speed user has requested */ + unsigned short speed; /* actual link speed */ + unsigned char requested_duplex; /* duplex user has requested */ + unsigned char duplex; /* actual link duplex */ + unsigned char requested_fc; /* flow control user has requested */ + unsigned char fc; /* actual link flow control */ + unsigned char autoneg; /* autonegotiating? */ +}; + +struct cmac; +struct cphy; + +struct port_info { + struct net_device *dev; + struct cmac *mac; + struct cphy *phy; + struct link_config link_config; + struct net_device_stats netstats; +}; + +struct sge; +struct peespi; + +struct adapter { + u8 __iomem *regs; + struct pci_dev *pdev; + unsigned long registered_device_map; + unsigned long open_device_map; + unsigned long flags; + + const char *name; + int msg_enable; + u32 mmio_len; + + struct work_struct ext_intr_handler_task; + struct adapter_params params; + + /* Terminator modules. */ + struct sge *sge; + struct peespi *espi; + struct petp *tp; + + struct napi_struct napi; + struct port_info port[MAX_NPORTS]; + struct delayed_work stats_update_task; + struct timer_list stats_update_timer; + + spinlock_t tpi_lock; + spinlock_t work_lock; + spinlock_t mac_lock; + + /* guards async operations */ + spinlock_t async_lock ____cacheline_aligned; + u32 slow_intr_mask; + int t1powersave; +}; + +enum { /* adapter flags */ + FULL_INIT_DONE = 1 << 0, +}; + +struct mdio_ops; +struct gmac; +struct gphy; + +struct board_info { + unsigned char board; + unsigned char port_number; + unsigned long caps; + unsigned char chip_term; + unsigned char chip_mac; + unsigned char chip_phy; + unsigned int clock_core; + unsigned int clock_mc3; + unsigned int clock_mc4; + unsigned int espi_nports; + unsigned int clock_elmer0; + unsigned char mdio_mdien; + unsigned char mdio_mdiinv; + unsigned char mdio_mdc; + unsigned char mdio_phybaseaddr; + const struct gmac *gmac; + const struct gphy *gphy; + const struct mdio_ops *mdio_ops; + const char *desc; +}; + +static inline int t1_is_asic(const adapter_t *adapter) +{ + return adapter->params.is_asic; +} + +extern const struct pci_device_id t1_pci_tbl[]; + +static inline int adapter_matches_type(const adapter_t *adapter, + int version, int revision) +{ + return adapter->params.chip_version == version && + adapter->params.chip_revision == revision; +} + +#define t1_is_T1B(adap) adapter_matches_type(adap, CHBT_TERM_T1, TERM_T1B) +#define is_T2(adap) adapter_matches_type(adap, CHBT_TERM_T2, TERM_T2) + +/* Returns true if an adapter supports VLAN acceleration and TSO */ +static inline int vlan_tso_capable(const adapter_t *adapter) +{ + return !t1_is_T1B(adapter); +} + +#define for_each_port(adapter, iter) \ + for (iter = 0; iter < (adapter)->params.nports; ++iter) + +#define board_info(adapter) ((adapter)->params.brd_info) +#define is_10G(adapter) (board_info(adapter)->caps & SUPPORTED_10000baseT_Full) + +static inline unsigned int core_ticks_per_usec(const adapter_t *adap) +{ + return board_info(adap)->clock_core / 1000000; +} + +extern int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp); +extern int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value); +extern int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value); +extern int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value); + +extern void t1_interrupts_enable(adapter_t *adapter); +extern void t1_interrupts_disable(adapter_t *adapter); +extern void t1_interrupts_clear(adapter_t *adapter); +extern int t1_elmer0_ext_intr_handler(adapter_t *adapter); +extern void t1_elmer0_ext_intr(adapter_t *adapter); +extern int t1_slow_intr_handler(adapter_t *adapter); + +extern int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc); +extern const struct board_info *t1_get_board_info(unsigned int board_id); +extern const struct board_info *t1_get_board_info_from_ids(unsigned int devid, + unsigned short ssid); +extern int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data); +extern int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi, + struct adapter_params *p); +extern int t1_init_hw_modules(adapter_t *adapter); +extern int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi); +extern void t1_free_sw_modules(adapter_t *adapter); +extern void t1_fatal_err(adapter_t *adapter); +extern void t1_link_changed(adapter_t *adapter, int port_id); +extern void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat, + int speed, int duplex, int pause); +#endif /* _CXGB_COMMON_H_ */ diff --git a/drivers/net/ethernet/chelsio/cxgb/cphy.h b/drivers/net/ethernet/chelsio/cxgb/cphy.h new file mode 100644 index 0000000..1f095a9 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/cphy.h @@ -0,0 +1,175 @@ +/***************************************************************************** + * * + * File: cphy.h * + * $Revision: 1.7 $ * + * $Date: 2005/06/21 18:29:47 $ * + * Description: * + * part of the Chelsio 10Gb Ethernet Driver. * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License, version 2, as * + * published by the Free Software Foundation. * + * * + * You should have received a copy of the GNU General Public License along * + * with this program; if not, write to the Free Software Foundation, Inc., * + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * + * * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * + * * + * http://www.chelsio.com * + * * + * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * + * All rights reserved. * + * * + * Maintainers: maintainers@chelsio.com * + * * + * Authors: Dimitrios Michailidis * + * Tina Yang * + * Felix Marti * + * Scott Bardone * + * Kurt Ottaway * + * Frank DiMambro * + * * + * History: * + * * + ****************************************************************************/ + +#ifndef _CXGB_CPHY_H_ +#define _CXGB_CPHY_H_ + +#include "common.h" + +struct mdio_ops { + void (*init)(adapter_t *adapter, const struct board_info *bi); + int (*read)(struct net_device *dev, int phy_addr, int mmd_addr, + u16 reg_addr); + int (*write)(struct net_device *dev, int phy_addr, int mmd_addr, + u16 reg_addr, u16 val); + unsigned mode_support; +}; + +/* PHY interrupt types */ +enum { + cphy_cause_link_change = 0x1, + cphy_cause_error = 0x2, + cphy_cause_fifo_error = 0x3 +}; + +enum { + PHY_LINK_UP = 0x1, + PHY_AUTONEG_RDY = 0x2, + PHY_AUTONEG_EN = 0x4 +}; + +struct cphy; + +/* PHY operations */ +struct cphy_ops { + void (*destroy)(struct cphy *); + int (*reset)(struct cphy *, int wait); + + int (*interrupt_enable)(struct cphy *); + int (*interrupt_disable)(struct cphy *); + int (*interrupt_clear)(struct cphy *); + int (*interrupt_handler)(struct cphy *); + + int (*autoneg_enable)(struct cphy *); + int (*autoneg_disable)(struct cphy *); + int (*autoneg_restart)(struct cphy *); + + int (*advertise)(struct cphy *phy, unsigned int advertise_map); + int (*set_loopback)(struct cphy *, int on); + int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex); + int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed, + int *duplex, int *fc); + + u32 mmds; +}; + +/* A PHY instance */ +struct cphy { + int state; /* Link status state machine */ + adapter_t *adapter; /* associated adapter */ + + struct delayed_work phy_update; + + u16 bmsr; + int count; + int act_count; + int act_on; + + u32 elmer_gpo; + + const struct cphy_ops *ops; /* PHY operations */ + struct mdio_if_info mdio; + struct cphy_instance *instance; +}; + +/* Convenience MDIO read/write wrappers */ +static inline int cphy_mdio_read(struct cphy *cphy, int mmd, int reg, + unsigned int *valp) +{ + int rc = cphy->mdio.mdio_read(cphy->mdio.dev, cphy->mdio.prtad, mmd, + reg); + *valp = (rc >= 0) ? rc : -1; + return (rc >= 0) ? 0 : rc; +} + +static inline int cphy_mdio_write(struct cphy *cphy, int mmd, int reg, + unsigned int val) +{ + return cphy->mdio.mdio_write(cphy->mdio.dev, cphy->mdio.prtad, mmd, + reg, val); +} + +static inline int simple_mdio_read(struct cphy *cphy, int reg, + unsigned int *valp) +{ + return cphy_mdio_read(cphy, MDIO_DEVAD_NONE, reg, valp); +} + +static inline int simple_mdio_write(struct cphy *cphy, int reg, + unsigned int val) +{ + return cphy_mdio_write(cphy, MDIO_DEVAD_NONE, reg, val); +} + +/* Convenience initializer */ +static inline void cphy_init(struct cphy *phy, struct net_device *dev, + int phy_addr, struct cphy_ops *phy_ops, + const struct mdio_ops *mdio_ops) +{ + struct adapter *adapter = netdev_priv(dev); + phy->adapter = adapter; + phy->ops = phy_ops; + if (mdio_ops) { + phy->mdio.prtad = phy_addr; + phy->mdio.mmds = phy_ops->mmds; + phy->mdio.mode_support = mdio_ops->mode_support; + phy->mdio.mdio_read = mdio_ops->read; + phy->mdio.mdio_write = mdio_ops->write; + } + phy->mdio.dev = dev; +} + +/* Operations of the PHY-instance factory */ +struct gphy { + /* Construct a PHY instance with the given PHY address */ + struct cphy *(*create)(struct net_device *dev, int phy_addr, + const struct mdio_ops *mdio_ops); + + /* + * Reset the PHY chip. This resets the whole PHY chip, not individual + * ports. + */ + int (*reset)(adapter_t *adapter); +}; + +extern const struct gphy t1_my3126_ops; +extern const struct gphy t1_mv88e1xxx_ops; +extern const struct gphy t1_vsc8244_ops; +extern const struct gphy t1_mv88x201x_ops; + +#endif /* _CXGB_CPHY_H_ */ diff --git a/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h b/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h new file mode 100644 index 0000000..e36d45b --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h @@ -0,0 +1,639 @@ +/***************************************************************************** + * * + * File: cpl5_cmd.h * + * $Revision: 1.6 $ * + * $Date: 2005/06/21 18:29:47 $ * + * Description: * + * part of the Chelsio 10Gb Ethernet Driver. * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License, version 2, as * + * published by the Free Software Foundation. * + * * + * You should have received a copy of the GNU General Public License along * + * with this program; if not, write to the Free Software Foundation, Inc., * + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * + * * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * + * * + * http://www.chelsio.com * + * * + * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * + * All rights reserved. * + * * + * Maintainers: maintainers@chelsio.com * + * * + * Authors: Dimitrios Michailidis * + * Tina Yang * + * Felix Marti * + * Scott Bardone * + * Kurt Ottaway * + * Frank DiMambro * + * * + * History: * + * * + ****************************************************************************/ + +#ifndef _CXGB_CPL5_CMD_H_ +#define _CXGB_CPL5_CMD_H_ + +#include + +#if !defined(__LITTLE_ENDIAN_BITFIELD) && !defined(__BIG_ENDIAN_BITFIELD) +#error "Adjust your defines" +#endif + +enum CPL_opcode { + CPL_PASS_OPEN_REQ = 0x1, + CPL_PASS_OPEN_RPL = 0x2, + CPL_PASS_ESTABLISH = 0x3, + CPL_PASS_ACCEPT_REQ = 0xE, + CPL_PASS_ACCEPT_RPL = 0x4, + CPL_ACT_OPEN_REQ = 0x5, + CPL_ACT_OPEN_RPL = 0x6, + CPL_CLOSE_CON_REQ = 0x7, + CPL_CLOSE_CON_RPL = 0x8, + CPL_CLOSE_LISTSRV_REQ = 0x9, + CPL_CLOSE_LISTSRV_RPL = 0xA, + CPL_ABORT_REQ = 0xB, + CPL_ABORT_RPL = 0xC, + CPL_PEER_CLOSE = 0xD, + CPL_ACT_ESTABLISH = 0x17, + + CPL_GET_TCB = 0x24, + CPL_GET_TCB_RPL = 0x25, + CPL_SET_TCB = 0x26, + CPL_SET_TCB_FIELD = 0x27, + CPL_SET_TCB_RPL = 0x28, + CPL_PCMD = 0x29, + + CPL_PCMD_READ = 0x31, + CPL_PCMD_READ_RPL = 0x32, + + + CPL_RX_DATA = 0xA0, + CPL_RX_DATA_DDP = 0xA1, + CPL_RX_DATA_ACK = 0xA3, + CPL_RX_PKT = 0xAD, + CPL_RX_ISCSI_HDR = 0xAF, + CPL_TX_DATA_ACK = 0xB0, + CPL_TX_DATA = 0xB1, + CPL_TX_PKT = 0xB2, + CPL_TX_PKT_LSO = 0xB6, + + CPL_RTE_DELETE_REQ = 0xC0, + CPL_RTE_DELETE_RPL = 0xC1, + CPL_RTE_WRITE_REQ = 0xC2, + CPL_RTE_WRITE_RPL = 0xD3, + CPL_RTE_READ_REQ = 0xC3, + CPL_RTE_READ_RPL = 0xC4, + CPL_L2T_WRITE_REQ = 0xC5, + CPL_L2T_WRITE_RPL = 0xD4, + CPL_L2T_READ_REQ = 0xC6, + CPL_L2T_READ_RPL = 0xC7, + CPL_SMT_WRITE_REQ = 0xC8, + CPL_SMT_WRITE_RPL = 0xD5, + CPL_SMT_READ_REQ = 0xC9, + CPL_SMT_READ_RPL = 0xCA, + CPL_ARP_MISS_REQ = 0xCD, + CPL_ARP_MISS_RPL = 0xCE, + CPL_MIGRATE_C2T_REQ = 0xDC, + CPL_MIGRATE_C2T_RPL = 0xDD, + CPL_ERROR = 0xD7, + + /* internal: driver -> TOM */ + CPL_MSS_CHANGE = 0xE1 +}; + +#define NUM_CPL_CMDS 256 + +enum CPL_error { + CPL_ERR_NONE = 0, + CPL_ERR_TCAM_PARITY = 1, + CPL_ERR_TCAM_FULL = 3, + CPL_ERR_CONN_RESET = 20, + CPL_ERR_CONN_EXIST = 22, + CPL_ERR_ARP_MISS = 23, + CPL_ERR_BAD_SYN = 24, + CPL_ERR_CONN_TIMEDOUT = 30, + CPL_ERR_XMIT_TIMEDOUT = 31, + CPL_ERR_PERSIST_TIMEDOUT = 32, + CPL_ERR_FINWAIT2_TIMEDOUT = 33, + CPL_ERR_KEEPALIVE_TIMEDOUT = 34, + CPL_ERR_ABORT_FAILED = 42, + CPL_ERR_GENERAL = 99 +}; + +enum { + CPL_CONN_POLICY_AUTO = 0, + CPL_CONN_POLICY_ASK = 1, + CPL_CONN_POLICY_DENY = 3 +}; + +enum { + ULP_MODE_NONE = 0, + ULP_MODE_TCPDDP = 1, + ULP_MODE_ISCSI = 2, + ULP_MODE_IWARP = 3, + ULP_MODE_SSL = 4 +}; + +enum { + CPL_PASS_OPEN_ACCEPT, + CPL_PASS_OPEN_REJECT +}; + +enum { + CPL_ABORT_SEND_RST = 0, + CPL_ABORT_NO_RST, + CPL_ABORT_POST_CLOSE_REQ = 2 +}; + +enum { // TX_PKT_LSO ethernet types + CPL_ETH_II, + CPL_ETH_II_VLAN, + CPL_ETH_802_3, + CPL_ETH_802_3_VLAN +}; + +union opcode_tid { + u32 opcode_tid; + u8 opcode; +}; + +#define S_OPCODE 24 +#define V_OPCODE(x) ((x) << S_OPCODE) +#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF) +#define G_TID(x) ((x) & 0xFFFFFF) + +/* tid is assumed to be 24-bits */ +#define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid)) + +#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid) + +/* extract the TID from a CPL command */ +#define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd)))) + +struct tcp_options { + u16 mss; + u8 wsf; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 rsvd:4; + u8 ecn:1; + u8 sack:1; + u8 tstamp:1; +#else + u8 tstamp:1; + u8 sack:1; + u8 ecn:1; + u8 rsvd:4; +#endif +}; + +struct cpl_pass_open_req { + union opcode_tid ot; + u16 local_port; + u16 peer_port; + u32 local_ip; + u32 peer_ip; + u32 opt0h; + u32 opt0l; + u32 peer_netmask; + u32 opt1; +}; + +struct cpl_pass_open_rpl { + union opcode_tid ot; + u16 local_port; + u16 peer_port; + u32 local_ip; + u32 peer_ip; + u8 resvd[7]; + u8 status; +}; + +struct cpl_pass_establish { + union opcode_tid ot; + u16 local_port; + u16 peer_port; + u32 local_ip; + u32 peer_ip; + u32 tos_tid; + u8 l2t_idx; + u8 rsvd[3]; + u32 snd_isn; + u32 rcv_isn; +}; + +struct cpl_pass_accept_req { + union opcode_tid ot; + u16 local_port; + u16 peer_port; + u32 local_ip; + u32 peer_ip; + u32 tos_tid; + struct tcp_options tcp_options; + u8 dst_mac[6]; + u16 vlan_tag; + u8 src_mac[6]; + u8 rsvd[2]; + u32 rcv_isn; + u32 unknown_tcp_options; +}; + +struct cpl_pass_accept_rpl { + union opcode_tid ot; + u32 rsvd0; + u32 rsvd1; + u32 peer_ip; + u32 opt0h; + union { + u32 opt0l; + struct { + u8 rsvd[3]; + u8 status; + }; + }; +}; + +struct cpl_act_open_req { + union opcode_tid ot; + u16 local_port; + u16 peer_port; + u32 local_ip; + u32 peer_ip; + u32 opt0h; + u32 opt0l; + u32 iff_vlantag; + u32 rsvd; +}; + +struct cpl_act_open_rpl { + union opcode_tid ot; + u16 local_port; + u16 peer_port; + u32 local_ip; + u32 peer_ip; + u32 new_tid; + u8 rsvd[3]; + u8 status; +}; + +struct cpl_act_establish { + union opcode_tid ot; + u16 local_port; + u16 peer_port; + u32 local_ip; + u32 peer_ip; + u32 tos_tid; + u32 rsvd; + u32 snd_isn; + u32 rcv_isn; +}; + +struct cpl_get_tcb { + union opcode_tid ot; + u32 rsvd; +}; + +struct cpl_get_tcb_rpl { + union opcode_tid ot; + u16 len; + u8 rsvd; + u8 status; +}; + +struct cpl_set_tcb { + union opcode_tid ot; + u16 len; + u16 rsvd; +}; + +struct cpl_set_tcb_field { + union opcode_tid ot; + u8 rsvd[3]; + u8 offset; + u32 mask; + u32 val; +}; + +struct cpl_set_tcb_rpl { + union opcode_tid ot; + u8 rsvd[3]; + u8 status; +}; + +struct cpl_pcmd { + union opcode_tid ot; + u16 dlen_in; + u16 dlen_out; + u32 pcmd_parm[2]; +}; + +struct cpl_pcmd_read { + union opcode_tid ot; + u32 rsvd1; + u16 rsvd2; + u32 addr; + u16 len; +}; + +struct cpl_pcmd_read_rpl { + union opcode_tid ot; + u16 len; +}; + +struct cpl_close_con_req { + union opcode_tid ot; + u32 rsvd; +}; + +struct cpl_close_con_rpl { + union opcode_tid ot; + u8 rsvd[3]; + u8 status; + u32 snd_nxt; + u32 rcv_nxt; +}; + +struct cpl_close_listserv_req { + union opcode_tid ot; + u32 rsvd; +}; + +struct cpl_close_listserv_rpl { + union opcode_tid ot; + u8 rsvd[3]; + u8 status; +}; + +struct cpl_abort_req { + union opcode_tid ot; + u32 rsvd0; + u8 rsvd1; + u8 cmd; + u8 rsvd2[6]; +}; + +struct cpl_abort_rpl { + union opcode_tid ot; + u32 rsvd0; + u8 rsvd1; + u8 status; + u8 rsvd2[6]; +}; + +struct cpl_peer_close { + union opcode_tid ot; + u32 rsvd; +}; + +struct cpl_tx_data { + union opcode_tid ot; + u32 len; + u32 rsvd0; + u16 urg; + u16 flags; +}; + +struct cpl_tx_data_ack { + union opcode_tid ot; + u32 ack_seq; +}; + +struct cpl_rx_data { + union opcode_tid ot; + u32 len; + u32 seq; + u16 urg; + u8 rsvd; + u8 status; +}; + +struct cpl_rx_data_ack { + union opcode_tid ot; + u32 credit; +}; + +struct cpl_rx_data_ddp { + union opcode_tid ot; + u32 len; + u32 seq; + u32 nxt_seq; + u32 ulp_crc; + u16 ddp_status; + u8 rsvd; + u8 status; +}; + +/* + * We want this header's alignment to be no more stringent than 2-byte aligned. + * All fields are u8 or u16 except for the length. However that field is not + * used so we break it into 2 16-bit parts to easily meet our alignment needs. + */ +struct cpl_tx_pkt { + u8 opcode; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 iff:4; + u8 ip_csum_dis:1; + u8 l4_csum_dis:1; + u8 vlan_valid:1; + u8 rsvd:1; +#else + u8 rsvd:1; + u8 vlan_valid:1; + u8 l4_csum_dis:1; + u8 ip_csum_dis:1; + u8 iff:4; +#endif + u16 vlan; + u16 len_hi; + u16 len_lo; +}; + +struct cpl_tx_pkt_lso { + u8 opcode; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 iff:4; + u8 ip_csum_dis:1; + u8 l4_csum_dis:1; + u8 vlan_valid:1; + u8 :1; +#else + u8 :1; + u8 vlan_valid:1; + u8 l4_csum_dis:1; + u8 ip_csum_dis:1; + u8 iff:4; +#endif + u16 vlan; + __be32 len; + + u8 rsvd[5]; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 tcp_hdr_words:4; + u8 ip_hdr_words:4; +#else + u8 ip_hdr_words:4; + u8 tcp_hdr_words:4; +#endif + __be16 eth_type_mss; +}; + +struct cpl_rx_pkt { + u8 opcode; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 iff:4; + u8 csum_valid:1; + u8 bad_pkt:1; + u8 vlan_valid:1; + u8 rsvd:1; +#else + u8 rsvd:1; + u8 vlan_valid:1; + u8 bad_pkt:1; + u8 csum_valid:1; + u8 iff:4; +#endif + u16 csum; + u16 vlan; + u16 len; +}; + +struct cpl_l2t_write_req { + union opcode_tid ot; + u32 params; + u8 rsvd1[2]; + u8 dst_mac[6]; +}; + +struct cpl_l2t_write_rpl { + union opcode_tid ot; + u8 status; + u8 rsvd[3]; +}; + +struct cpl_l2t_read_req { + union opcode_tid ot; + u8 rsvd[3]; + u8 l2t_idx; +}; + +struct cpl_l2t_read_rpl { + union opcode_tid ot; + u32 params; + u8 rsvd1[2]; + u8 dst_mac[6]; +}; + +struct cpl_smt_write_req { + union opcode_tid ot; + u8 rsvd0; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 rsvd1:1; + u8 mtu_idx:3; + u8 iff:4; +#else + u8 iff:4; + u8 mtu_idx:3; + u8 rsvd1:1; +#endif + u16 rsvd2; + u16 rsvd3; + u8 src_mac1[6]; + u16 rsvd4; + u8 src_mac0[6]; +}; + +struct cpl_smt_write_rpl { + union opcode_tid ot; + u8 status; + u8 rsvd[3]; +}; + +struct cpl_smt_read_req { + union opcode_tid ot; + u8 rsvd0; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 rsvd1:4; + u8 iff:4; +#else + u8 iff:4; + u8 rsvd1:4; +#endif + u16 rsvd2; +}; + +struct cpl_smt_read_rpl { + union opcode_tid ot; + u8 status; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 rsvd1:1; + u8 mtu_idx:3; + u8 rsvd0:4; +#else + u8 rsvd0:4; + u8 mtu_idx:3; + u8 rsvd1:1; +#endif + u16 rsvd2; + u16 rsvd3; + u8 src_mac1[6]; + u16 rsvd4; + u8 src_mac0[6]; +}; + +struct cpl_rte_delete_req { + union opcode_tid ot; + u32 params; +}; + +struct cpl_rte_delete_rpl { + union opcode_tid ot; + u8 status; + u8 rsvd[3]; +}; + +struct cpl_rte_write_req { + union opcode_tid ot; + u32 params; + u32 netmask; + u32 faddr; +}; + +struct cpl_rte_write_rpl { + union opcode_tid ot; + u8 status; + u8 rsvd[3]; +}; + +struct cpl_rte_read_req { + union opcode_tid ot; + u32 params; +}; + +struct cpl_rte_read_rpl { + union opcode_tid ot; + u8 status; + u8 rsvd0[2]; + u8 l2t_idx; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 rsvd1:7; + u8 select:1; +#else + u8 select:1; + u8 rsvd1:7; +#endif + u8 rsvd2[3]; + u32 addr; +}; + +struct cpl_mss_change { + union opcode_tid ot; + u32 mss; +}; + +#endif /* _CXGB_CPL5_CMD_H_ */ + diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c new file mode 100644 index 0000000..3edbbc4 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c @@ -0,0 +1,1379 @@ +/***************************************************************************** + * * + * File: cxgb2.c * + * $Revision: 1.25 $ * + * $Date: 2005/06/22 00:43:25 $ * + * Description: * + * Chelsio 10Gb Ethernet Driver. * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License, version 2, as * + * published by the Free Software Foundation. * + * * + * You should have received a copy of the GNU General Public License along * + * with this program; if not, write to the Free Software Foundation, Inc., * + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * + * * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * + * * + * http://www.chelsio.com * + * * + * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * + * All rights reserved. * + * * + * Maintainers: maintainers@chelsio.com * + * * + * Authors: Dimitrios Michailidis * + * Tina Yang * + * Felix Marti * + * Scott Bardone * + * Kurt Ottaway * + * Frank DiMambro * + * * + * History: * + * * + ****************************************************************************/ + +#include "common.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cpl5_cmd.h" +#include "regs.h" +#include "gmac.h" +#include "cphy.h" +#include "sge.h" +#include "tp.h" +#include "espi.h" +#include "elmer0.h" + +#include + +static inline void schedule_mac_stats_update(struct adapter *ap, int secs) +{ + schedule_delayed_work(&ap->stats_update_task, secs * HZ); +} + +static inline void cancel_mac_stats_update(struct adapter *ap) +{ + cancel_delayed_work(&ap->stats_update_task); +} + +#define MAX_CMDQ_ENTRIES 16384 +#define MAX_CMDQ1_ENTRIES 1024 +#define MAX_RX_BUFFERS 16384 +#define MAX_RX_JUMBO_BUFFERS 16384 +#define MAX_TX_BUFFERS_HIGH 16384U +#define MAX_TX_BUFFERS_LOW 1536U +#define MAX_TX_BUFFERS 1460U +#define MIN_FL_ENTRIES 32 + +#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ + NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) + +/* + * The EEPROM is actually bigger but only the first few bytes are used so we + * only report those. + */ +#define EEPROM_SIZE 32 + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR("Chelsio Communications"); +MODULE_LICENSE("GPL"); + +static int dflt_msg_enable = DFLT_MSG_ENABLE; + +module_param(dflt_msg_enable, int, 0); +MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap"); + +#define HCLOCK 0x0 +#define LCLOCK 0x1 + +/* T1 cards powersave mode */ +static int t1_clock(struct adapter *adapter, int mode); +static int t1powersave = 1; /* HW default is powersave mode. */ + +module_param(t1powersave, int, 0); +MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode"); + +static int disable_msi = 0; +module_param(disable_msi, int, 0); +MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); + +static const char pci_speed[][4] = { + "33", "66", "100", "133" +}; + +/* + * Setup MAC to receive the types of packets we want. + */ +static void t1_set_rxmode(struct net_device *dev) +{ + struct adapter *adapter = dev->ml_priv; + struct cmac *mac = adapter->port[dev->if_port].mac; + struct t1_rx_mode rm; + + rm.dev = dev; + mac->ops->set_rx_mode(mac, &rm); +} + +static void link_report(struct port_info *p) +{ + if (!netif_carrier_ok(p->dev)) + printk(KERN_INFO "%s: link down\n", p->dev->name); + else { + const char *s = "10Mbps"; + + switch (p->link_config.speed) { + case SPEED_10000: s = "10Gbps"; break; + case SPEED_1000: s = "1000Mbps"; break; + case SPEED_100: s = "100Mbps"; break; + } + + printk(KERN_INFO "%s: link up, %s, %s-duplex\n", + p->dev->name, s, + p->link_config.duplex == DUPLEX_FULL ? "full" : "half"); + } +} + +void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat, + int speed, int duplex, int pause) +{ + struct port_info *p = &adapter->port[port_id]; + + if (link_stat != netif_carrier_ok(p->dev)) { + if (link_stat) + netif_carrier_on(p->dev); + else + netif_carrier_off(p->dev); + link_report(p); + + /* multi-ports: inform toe */ + if ((speed > 0) && (adapter->params.nports > 1)) { + unsigned int sched_speed = 10; + switch (speed) { + case SPEED_1000: + sched_speed = 1000; + break; + case SPEED_100: + sched_speed = 100; + break; + case SPEED_10: + sched_speed = 10; + break; + } + t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed); + } + } +} + +static void link_start(struct port_info *p) +{ + struct cmac *mac = p->mac; + + mac->ops->reset(mac); + if (mac->ops->macaddress_set) + mac->ops->macaddress_set(mac, p->dev->dev_addr); + t1_set_rxmode(p->dev); + t1_link_start(p->phy, mac, &p->link_config); + mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); +} + +static void enable_hw_csum(struct adapter *adapter) +{ + if (adapter->port[0].dev->hw_features & NETIF_F_TSO) + t1_tp_set_ip_checksum_offload(adapter->tp, 1); /* for TSO only */ + t1_tp_set_tcp_checksum_offload(adapter->tp, 1); +} + +/* + * Things to do upon first use of a card. + * This must run with the rtnl lock held. + */ +static int cxgb_up(struct adapter *adapter) +{ + int err = 0; + + if (!(adapter->flags & FULL_INIT_DONE)) { + err = t1_init_hw_modules(adapter); + if (err) + goto out_err; + + enable_hw_csum(adapter); + adapter->flags |= FULL_INIT_DONE; + } + + t1_interrupts_clear(adapter); + + adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev); + err = request_irq(adapter->pdev->irq, t1_interrupt, + adapter->params.has_msi ? 0 : IRQF_SHARED, + adapter->name, adapter); + if (err) { + if (adapter->params.has_msi) + pci_disable_msi(adapter->pdev); + + goto out_err; + } + + t1_sge_start(adapter->sge); + t1_interrupts_enable(adapter); +out_err: + return err; +} + +/* + * Release resources when all the ports have been stopped. + */ +static void cxgb_down(struct adapter *adapter) +{ + t1_sge_stop(adapter->sge); + t1_interrupts_disable(adapter); + free_irq(adapter->pdev->irq, adapter); + if (adapter->params.has_msi) + pci_disable_msi(adapter->pdev); +} + +static int cxgb_open(struct net_device *dev) +{ + int err; + struct adapter *adapter = dev->ml_priv; + int other_ports = adapter->open_device_map & PORT_MASK; + + napi_enable(&adapter->napi); + if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) { + napi_disable(&adapter->napi); + return err; + } + + __set_bit(dev->if_port, &adapter->open_device_map); + link_start(&adapter->port[dev->if_port]); + netif_start_queue(dev); + if (!other_ports && adapter->params.stats_update_period) + schedule_mac_stats_update(adapter, + adapter->params.stats_update_period); + + t1_vlan_mode(adapter, dev->features); + return 0; +} + +static int cxgb_close(struct net_device *dev) +{ + struct adapter *adapter = dev->ml_priv; + struct port_info *p = &adapter->port[dev->if_port]; + struct cmac *mac = p->mac; + + netif_stop_queue(dev); + napi_disable(&adapter->napi); + mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX); + netif_carrier_off(dev); + + clear_bit(dev->if_port, &adapter->open_device_map); + if (adapter->params.stats_update_period && + !(adapter->open_device_map & PORT_MASK)) { + /* Stop statistics accumulation. */ + smp_mb__after_clear_bit(); + spin_lock(&adapter->work_lock); /* sync with update task */ + spin_unlock(&adapter->work_lock); + cancel_mac_stats_update(adapter); + } + + if (!adapter->open_device_map) + cxgb_down(adapter); + return 0; +} + +static struct net_device_stats *t1_get_stats(struct net_device *dev) +{ + struct adapter *adapter = dev->ml_priv; + struct port_info *p = &adapter->port[dev->if_port]; + struct net_device_stats *ns = &p->netstats; + const struct cmac_statistics *pstats; + + /* Do a full update of the MAC stats */ + pstats = p->mac->ops->statistics_update(p->mac, + MAC_STATS_UPDATE_FULL); + + ns->tx_packets = pstats->TxUnicastFramesOK + + pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK; + + ns->rx_packets = pstats->RxUnicastFramesOK + + pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK; + + ns->tx_bytes = pstats->TxOctetsOK; + ns->rx_bytes = pstats->RxOctetsOK; + + ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors + + pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions; + ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors + + pstats->RxFCSErrors + pstats->RxAlignErrors + + pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors + + pstats->RxSymbolErrors + pstats->RxRuntErrors; + + ns->multicast = pstats->RxMulticastFramesOK; + ns->collisions = pstats->TxTotalCollisions; + + /* detailed rx_errors */ + ns->rx_length_errors = pstats->RxFrameTooLongErrors + + pstats->RxJabberErrors; + ns->rx_over_errors = 0; + ns->rx_crc_errors = pstats->RxFCSErrors; + ns->rx_frame_errors = pstats->RxAlignErrors; + ns->rx_fifo_errors = 0; + ns->rx_missed_errors = 0; + + /* detailed tx_errors */ + ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions; + ns->tx_carrier_errors = 0; + ns->tx_fifo_errors = pstats->TxUnderrun; + ns->tx_heartbeat_errors = 0; + ns->tx_window_errors = pstats->TxLateCollisions; + return ns; +} + +static u32 get_msglevel(struct net_device *dev) +{ + struct adapter *adapter = dev->ml_priv; + + return adapter->msg_enable; +} + +static void set_msglevel(struct net_device *dev, u32 val) +{ + struct adapter *adapter = dev->ml_priv; + + adapter->msg_enable = val; +} + +static char stats_strings[][ETH_GSTRING_LEN] = { + "TxOctetsOK", + "TxOctetsBad", + "TxUnicastFramesOK", + "TxMulticastFramesOK", + "TxBroadcastFramesOK", + "TxPauseFrames", + "TxFramesWithDeferredXmissions", + "TxLateCollisions", + "TxTotalCollisions", + "TxFramesAbortedDueToXSCollisions", + "TxUnderrun", + "TxLengthErrors", + "TxInternalMACXmitError", + "TxFramesWithExcessiveDeferral", + "TxFCSErrors", + "TxJumboFramesOk", + "TxJumboOctetsOk", + + "RxOctetsOK", + "RxOctetsBad", + "RxUnicastFramesOK", + "RxMulticastFramesOK", + "RxBroadcastFramesOK", + "RxPauseFrames", + "RxFCSErrors", + "RxAlignErrors", + "RxSymbolErrors", + "RxDataErrors", + "RxSequenceErrors", + "RxRuntErrors", + "RxJabberErrors", + "RxInternalMACRcvError", + "RxInRangeLengthErrors", + "RxOutOfRangeLengthField", + "RxFrameTooLongErrors", + "RxJumboFramesOk", + "RxJumboOctetsOk", + + /* Port stats */ + "RxCsumGood", + "TxCsumOffload", + "TxTso", + "RxVlan", + "TxVlan", + "TxNeedHeadroom", + + /* Interrupt stats */ + "rx drops", + "pure_rsps", + "unhandled irqs", + "respQ_empty", + "respQ_overflow", + "freelistQ_empty", + "pkt_too_big", + "pkt_mismatch", + "cmdQ_full0", + "cmdQ_full1", + + "espi_DIP2ParityErr", + "espi_DIP4Err", + "espi_RxDrops", + "espi_TxDrops", + "espi_RxOvfl", + "espi_ParityErr" +}; + +#define T2_REGMAP_SIZE (3 * 1024) + +static int get_regs_len(struct net_device *dev) +{ + return T2_REGMAP_SIZE; +} + +static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct adapter *adapter = dev->ml_priv; + + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->fw_version, "N/A"); + strcpy(info->bus_info, pci_name(adapter->pdev)); +} + +static int get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(stats_strings); + default: + return -EOPNOTSUPP; + } +} + +static void get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + if (stringset == ETH_SS_STATS) + memcpy(data, stats_strings, sizeof(stats_strings)); +} + +static void get_stats(struct net_device *dev, struct ethtool_stats *stats, + u64 *data) +{ + struct adapter *adapter = dev->ml_priv; + struct cmac *mac = adapter->port[dev->if_port].mac; + const struct cmac_statistics *s; + const struct sge_intr_counts *t; + struct sge_port_stats ss; + + s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL); + t = t1_sge_get_intr_counts(adapter->sge); + t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss); + + *data++ = s->TxOctetsOK; + *data++ = s->TxOctetsBad; + *data++ = s->TxUnicastFramesOK; + *data++ = s->TxMulticastFramesOK; + *data++ = s->TxBroadcastFramesOK; + *data++ = s->TxPauseFrames; + *data++ = s->TxFramesWithDeferredXmissions; + *data++ = s->TxLateCollisions; + *data++ = s->TxTotalCollisions; + *data++ = s->TxFramesAbortedDueToXSCollisions; + *data++ = s->TxUnderrun; + *data++ = s->TxLengthErrors; + *data++ = s->TxInternalMACXmitError; + *data++ = s->TxFramesWithExcessiveDeferral; + *data++ = s->TxFCSErrors; + *data++ = s->TxJumboFramesOK; + *data++ = s->TxJumboOctetsOK; + + *data++ = s->RxOctetsOK; + *data++ = s->RxOctetsBad; + *data++ = s->RxUnicastFramesOK; + *data++ = s->RxMulticastFramesOK; + *data++ = s->RxBroadcastFramesOK; + *data++ = s->RxPauseFrames; + *data++ = s->RxFCSErrors; + *data++ = s->RxAlignErrors; + *data++ = s->RxSymbolErrors; + *data++ = s->RxDataErrors; + *data++ = s->RxSequenceErrors; + *data++ = s->RxRuntErrors; + *data++ = s->RxJabberErrors; + *data++ = s->RxInternalMACRcvError; + *data++ = s->RxInRangeLengthErrors; + *data++ = s->RxOutOfRangeLengthField; + *data++ = s->RxFrameTooLongErrors; + *data++ = s->RxJumboFramesOK; + *data++ = s->RxJumboOctetsOK; + + *data++ = ss.rx_cso_good; + *data++ = ss.tx_cso; + *data++ = ss.tx_tso; + *data++ = ss.vlan_xtract; + *data++ = ss.vlan_insert; + *data++ = ss.tx_need_hdrroom; + + *data++ = t->rx_drops; + *data++ = t->pure_rsps; + *data++ = t->unhandled_irqs; + *data++ = t->respQ_empty; + *data++ = t->respQ_overflow; + *data++ = t->freelistQ_empty; + *data++ = t->pkt_too_big; + *data++ = t->pkt_mismatch; + *data++ = t->cmdQ_full[0]; + *data++ = t->cmdQ_full[1]; + + if (adapter->espi) { + const struct espi_intr_counts *e; + + e = t1_espi_get_intr_counts(adapter->espi); + *data++ = e->DIP2_parity_err; + *data++ = e->DIP4_err; + *data++ = e->rx_drops; + *data++ = e->tx_drops; + *data++ = e->rx_ovflw; + *data++ = e->parity_err; + } +} + +static inline void reg_block_dump(struct adapter *ap, void *buf, + unsigned int start, unsigned int end) +{ + u32 *p = buf + start; + + for ( ; start <= end; start += sizeof(u32)) + *p++ = readl(ap->regs + start); +} + +static void get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *buf) +{ + struct adapter *ap = dev->ml_priv; + + /* + * Version scheme: bits 0..9: chip version, bits 10..15: chip revision + */ + regs->version = 2; + + memset(buf, 0, T2_REGMAP_SIZE); + reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER); + reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE); + reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR); + reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT); + reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE); + reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE); + reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT); + reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL); + reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE); + reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD); +} + +static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct adapter *adapter = dev->ml_priv; + struct port_info *p = &adapter->port[dev->if_port]; + + cmd->supported = p->link_config.supported; + cmd->advertising = p->link_config.advertising; + + if (netif_carrier_ok(dev)) { + ethtool_cmd_speed_set(cmd, p->link_config.speed); + cmd->duplex = p->link_config.duplex; + } else { + ethtool_cmd_speed_set(cmd, -1); + cmd->duplex = -1; + } + + cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; + cmd->phy_address = p->phy->mdio.prtad; + cmd->transceiver = XCVR_EXTERNAL; + cmd->autoneg = p->link_config.autoneg; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + return 0; +} + +static int speed_duplex_to_caps(int speed, int duplex) +{ + int cap = 0; + + switch (speed) { + case SPEED_10: + if (duplex == DUPLEX_FULL) + cap = SUPPORTED_10baseT_Full; + else + cap = SUPPORTED_10baseT_Half; + break; + case SPEED_100: + if (duplex == DUPLEX_FULL) + cap = SUPPORTED_100baseT_Full; + else + cap = SUPPORTED_100baseT_Half; + break; + case SPEED_1000: + if (duplex == DUPLEX_FULL) + cap = SUPPORTED_1000baseT_Full; + else + cap = SUPPORTED_1000baseT_Half; + break; + case SPEED_10000: + if (duplex == DUPLEX_FULL) + cap = SUPPORTED_10000baseT_Full; + } + return cap; +} + +#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \ + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \ + ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \ + ADVERTISED_10000baseT_Full) + +static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct adapter *adapter = dev->ml_priv; + struct port_info *p = &adapter->port[dev->if_port]; + struct link_config *lc = &p->link_config; + + if (!(lc->supported & SUPPORTED_Autoneg)) + return -EOPNOTSUPP; /* can't change speed/duplex */ + + if (cmd->autoneg == AUTONEG_DISABLE) { + u32 speed = ethtool_cmd_speed(cmd); + int cap = speed_duplex_to_caps(speed, cmd->duplex); + + if (!(lc->supported & cap) || (speed == SPEED_1000)) + return -EINVAL; + lc->requested_speed = speed; + lc->requested_duplex = cmd->duplex; + lc->advertising = 0; + } else { + cmd->advertising &= ADVERTISED_MASK; + if (cmd->advertising & (cmd->advertising - 1)) + cmd->advertising = lc->supported; + cmd->advertising &= lc->supported; + if (!cmd->advertising) + return -EINVAL; + lc->requested_speed = SPEED_INVALID; + lc->requested_duplex = DUPLEX_INVALID; + lc->advertising = cmd->advertising | ADVERTISED_Autoneg; + } + lc->autoneg = cmd->autoneg; + if (netif_running(dev)) + t1_link_start(p->phy, p->mac, lc); + return 0; +} + +static void get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct adapter *adapter = dev->ml_priv; + struct port_info *p = &adapter->port[dev->if_port]; + + epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0; + epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0; + epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0; +} + +static int set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct adapter *adapter = dev->ml_priv; + struct port_info *p = &adapter->port[dev->if_port]; + struct link_config *lc = &p->link_config; + + if (epause->autoneg == AUTONEG_DISABLE) + lc->requested_fc = 0; + else if (lc->supported & SUPPORTED_Autoneg) + lc->requested_fc = PAUSE_AUTONEG; + else + return -EINVAL; + + if (epause->rx_pause) + lc->requested_fc |= PAUSE_RX; + if (epause->tx_pause) + lc->requested_fc |= PAUSE_TX; + if (lc->autoneg == AUTONEG_ENABLE) { + if (netif_running(dev)) + t1_link_start(p->phy, p->mac, lc); + } else { + lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + if (netif_running(dev)) + p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1, + lc->fc); + } + return 0; +} + +static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) +{ + struct adapter *adapter = dev->ml_priv; + int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; + + e->rx_max_pending = MAX_RX_BUFFERS; + e->rx_mini_max_pending = 0; + e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS; + e->tx_max_pending = MAX_CMDQ_ENTRIES; + + e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl]; + e->rx_mini_pending = 0; + e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl]; + e->tx_pending = adapter->params.sge.cmdQ_size[0]; +} + +static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) +{ + struct adapter *adapter = dev->ml_priv; + int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; + + if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending || + e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS || + e->tx_pending > MAX_CMDQ_ENTRIES || + e->rx_pending < MIN_FL_ENTRIES || + e->rx_jumbo_pending < MIN_FL_ENTRIES || + e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1)) + return -EINVAL; + + if (adapter->flags & FULL_INIT_DONE) + return -EBUSY; + + adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending; + adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending; + adapter->params.sge.cmdQ_size[0] = e->tx_pending; + adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ? + MAX_CMDQ1_ENTRIES : e->tx_pending; + return 0; +} + +static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +{ + struct adapter *adapter = dev->ml_priv; + + adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs; + adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce; + adapter->params.sge.sample_interval_usecs = c->rate_sample_interval; + t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge); + return 0; +} + +static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +{ + struct adapter *adapter = dev->ml_priv; + + c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs; + c->rate_sample_interval = adapter->params.sge.sample_interval_usecs; + c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable; + return 0; +} + +static int get_eeprom_len(struct net_device *dev) +{ + struct adapter *adapter = dev->ml_priv; + + return t1_is_asic(adapter) ? EEPROM_SIZE : 0; +} + +#define EEPROM_MAGIC(ap) \ + (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16)) + +static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, + u8 *data) +{ + int i; + u8 buf[EEPROM_SIZE] __attribute__((aligned(4))); + struct adapter *adapter = dev->ml_priv; + + e->magic = EEPROM_MAGIC(adapter); + for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32)) + t1_seeprom_read(adapter, i, (__le32 *)&buf[i]); + memcpy(data, buf + e->offset, e->len); + return 0; +} + +static const struct ethtool_ops t1_ethtool_ops = { + .get_settings = get_settings, + .set_settings = set_settings, + .get_drvinfo = get_drvinfo, + .get_msglevel = get_msglevel, + .set_msglevel = set_msglevel, + .get_ringparam = get_sge_param, + .set_ringparam = set_sge_param, + .get_coalesce = get_coalesce, + .set_coalesce = set_coalesce, + .get_eeprom_len = get_eeprom_len, + .get_eeprom = get_eeprom, + .get_pauseparam = get_pauseparam, + .set_pauseparam = set_pauseparam, + .get_link = ethtool_op_get_link, + .get_strings = get_strings, + .get_sset_count = get_sset_count, + .get_ethtool_stats = get_stats, + .get_regs_len = get_regs_len, + .get_regs = get_regs, +}; + +static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd) +{ + struct adapter *adapter = dev->ml_priv; + struct mdio_if_info *mdio = &adapter->port[dev->if_port].phy->mdio; + + return mdio_mii_ioctl(mdio, if_mii(req), cmd); +} + +static int t1_change_mtu(struct net_device *dev, int new_mtu) +{ + int ret; + struct adapter *adapter = dev->ml_priv; + struct cmac *mac = adapter->port[dev->if_port].mac; + + if (!mac->ops->set_mtu) + return -EOPNOTSUPP; + if (new_mtu < 68) + return -EINVAL; + if ((ret = mac->ops->set_mtu(mac, new_mtu))) + return ret; + dev->mtu = new_mtu; + return 0; +} + +static int t1_set_mac_addr(struct net_device *dev, void *p) +{ + struct adapter *adapter = dev->ml_priv; + struct cmac *mac = adapter->port[dev->if_port].mac; + struct sockaddr *addr = p; + + if (!mac->ops->macaddress_set) + return -EOPNOTSUPP; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + mac->ops->macaddress_set(mac, dev->dev_addr); + return 0; +} + +static u32 t1_fix_features(struct net_device *dev, u32 features) +{ + /* + * Since there is no support for separate rx/tx vlan accel + * enable/disable make sure tx flag is always in same state as rx. + */ + if (features & NETIF_F_HW_VLAN_RX) + features |= NETIF_F_HW_VLAN_TX; + else + features &= ~NETIF_F_HW_VLAN_TX; + + return features; +} + +static int t1_set_features(struct net_device *dev, u32 features) +{ + u32 changed = dev->features ^ features; + struct adapter *adapter = dev->ml_priv; + + if (changed & NETIF_F_HW_VLAN_RX) + t1_vlan_mode(adapter, features); + + return 0; +} +#ifdef CONFIG_NET_POLL_CONTROLLER +static void t1_netpoll(struct net_device *dev) +{ + unsigned long flags; + struct adapter *adapter = dev->ml_priv; + + local_irq_save(flags); + t1_interrupt(adapter->pdev->irq, adapter); + local_irq_restore(flags); +} +#endif + +/* + * Periodic accumulation of MAC statistics. This is used only if the MAC + * does not have any other way to prevent stats counter overflow. + */ +static void mac_stats_task(struct work_struct *work) +{ + int i; + struct adapter *adapter = + container_of(work, struct adapter, stats_update_task.work); + + for_each_port(adapter, i) { + struct port_info *p = &adapter->port[i]; + + if (netif_running(p->dev)) + p->mac->ops->statistics_update(p->mac, + MAC_STATS_UPDATE_FAST); + } + + /* Schedule the next statistics update if any port is active. */ + spin_lock(&adapter->work_lock); + if (adapter->open_device_map & PORT_MASK) + schedule_mac_stats_update(adapter, + adapter->params.stats_update_period); + spin_unlock(&adapter->work_lock); +} + +/* + * Processes elmer0 external interrupts in process context. + */ +static void ext_intr_task(struct work_struct *work) +{ + struct adapter *adapter = + container_of(work, struct adapter, ext_intr_handler_task); + + t1_elmer0_ext_intr_handler(adapter); + + /* Now reenable external interrupts */ + spin_lock_irq(&adapter->async_lock); + adapter->slow_intr_mask |= F_PL_INTR_EXT; + writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE); + writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA, + adapter->regs + A_PL_ENABLE); + spin_unlock_irq(&adapter->async_lock); +} + +/* + * Interrupt-context handler for elmer0 external interrupts. + */ +void t1_elmer0_ext_intr(struct adapter *adapter) +{ + /* + * Schedule a task to handle external interrupts as we require + * a process context. We disable EXT interrupts in the interim + * and let the task reenable them when it's done. + */ + adapter->slow_intr_mask &= ~F_PL_INTR_EXT; + writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA, + adapter->regs + A_PL_ENABLE); + schedule_work(&adapter->ext_intr_handler_task); +} + +void t1_fatal_err(struct adapter *adapter) +{ + if (adapter->flags & FULL_INIT_DONE) { + t1_sge_stop(adapter->sge); + t1_interrupts_disable(adapter); + } + pr_alert("%s: encountered fatal error, operation suspended\n", + adapter->name); +} + +static const struct net_device_ops cxgb_netdev_ops = { + .ndo_open = cxgb_open, + .ndo_stop = cxgb_close, + .ndo_start_xmit = t1_start_xmit, + .ndo_get_stats = t1_get_stats, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_multicast_list = t1_set_rxmode, + .ndo_do_ioctl = t1_ioctl, + .ndo_change_mtu = t1_change_mtu, + .ndo_set_mac_address = t1_set_mac_addr, + .ndo_fix_features = t1_fix_features, + .ndo_set_features = t1_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = t1_netpoll, +#endif +}; + +static int __devinit init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + static int version_printed; + + int i, err, pci_using_dac = 0; + unsigned long mmio_start, mmio_len; + const struct board_info *bi; + struct adapter *adapter = NULL; + struct port_info *pi; + + if (!version_printed) { + printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION, + DRV_VERSION); + ++version_printed; + } + + err = pci_enable_device(pdev); + if (err) + return err; + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + pr_err("%s: cannot find PCI device memory base address\n", + pci_name(pdev)); + err = -ENODEV; + goto out_disable_pdev; + } + + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + pci_using_dac = 1; + + if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { + pr_err("%s: unable to obtain 64-bit DMA for " + "consistent allocations\n", pci_name(pdev)); + err = -ENODEV; + goto out_disable_pdev; + } + + } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) { + pr_err("%s: no usable DMA configuration\n", pci_name(pdev)); + goto out_disable_pdev; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + pr_err("%s: cannot obtain PCI resources\n", pci_name(pdev)); + goto out_disable_pdev; + } + + pci_set_master(pdev); + + mmio_start = pci_resource_start(pdev, 0); + mmio_len = pci_resource_len(pdev, 0); + bi = t1_get_board_info(ent->driver_data); + + for (i = 0; i < bi->port_number; ++i) { + struct net_device *netdev; + + netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter)); + if (!netdev) { + err = -ENOMEM; + goto out_free_dev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + if (!adapter) { + adapter = netdev_priv(netdev); + adapter->pdev = pdev; + adapter->port[0].dev = netdev; /* so we don't leak it */ + + adapter->regs = ioremap(mmio_start, mmio_len); + if (!adapter->regs) { + pr_err("%s: cannot map device registers\n", + pci_name(pdev)); + err = -ENOMEM; + goto out_free_dev; + } + + if (t1_get_board_rev(adapter, bi, &adapter->params)) { + err = -ENODEV; /* Can't handle this chip rev */ + goto out_free_dev; + } + + adapter->name = pci_name(pdev); + adapter->msg_enable = dflt_msg_enable; + adapter->mmio_len = mmio_len; + + spin_lock_init(&adapter->tpi_lock); + spin_lock_init(&adapter->work_lock); + spin_lock_init(&adapter->async_lock); + spin_lock_init(&adapter->mac_lock); + + INIT_WORK(&adapter->ext_intr_handler_task, + ext_intr_task); + INIT_DELAYED_WORK(&adapter->stats_update_task, + mac_stats_task); + + pci_set_drvdata(pdev, netdev); + } + + pi = &adapter->port[i]; + pi->dev = netdev; + netif_carrier_off(netdev); + netdev->irq = pdev->irq; + netdev->if_port = i; + netdev->mem_start = mmio_start; + netdev->mem_end = mmio_start + mmio_len - 1; + netdev->ml_priv = adapter; + netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_RXCSUM; + netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_RXCSUM | NETIF_F_LLTX; + + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + if (vlan_tso_capable(adapter)) { + netdev->features |= + NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + netdev->hw_features |= NETIF_F_HW_VLAN_RX; + + /* T204: disable TSO */ + if (!(is_T2(adapter)) || bi->port_number != 4) { + netdev->hw_features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO; + } + } + + netdev->netdev_ops = &cxgb_netdev_ops; + netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ? + sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt); + + netif_napi_add(netdev, &adapter->napi, t1_poll, 64); + + SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops); + } + + if (t1_init_sw_modules(adapter, bi) < 0) { + err = -ENODEV; + goto out_free_dev; + } + + /* + * The card is now ready to go. If any errors occur during device + * registration we do not fail the whole card but rather proceed only + * with the ports we manage to register successfully. However we must + * register at least one net device. + */ + for (i = 0; i < bi->port_number; ++i) { + err = register_netdev(adapter->port[i].dev); + if (err) + pr_warning("%s: cannot register net device %s, skipping\n", + pci_name(pdev), adapter->port[i].dev->name); + else { + /* + * Change the name we use for messages to the name of + * the first successfully registered interface. + */ + if (!adapter->registered_device_map) + adapter->name = adapter->port[i].dev->name; + + __set_bit(i, &adapter->registered_device_map); + } + } + if (!adapter->registered_device_map) { + pr_err("%s: could not register any net devices\n", + pci_name(pdev)); + goto out_release_adapter_res; + } + + printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name, + bi->desc, adapter->params.chip_revision, + adapter->params.pci.is_pcix ? "PCIX" : "PCI", + adapter->params.pci.speed, adapter->params.pci.width); + + /* + * Set the T1B ASIC and memory clocks. + */ + if (t1powersave) + adapter->t1powersave = LCLOCK; /* HW default is powersave mode. */ + else + adapter->t1powersave = HCLOCK; + if (t1_is_T1B(adapter)) + t1_clock(adapter, t1powersave); + + return 0; + +out_release_adapter_res: + t1_free_sw_modules(adapter); +out_free_dev: + if (adapter) { + if (adapter->regs) + iounmap(adapter->regs); + for (i = bi->port_number - 1; i >= 0; --i) + if (adapter->port[i].dev) + free_netdev(adapter->port[i].dev); + } + pci_release_regions(pdev); +out_disable_pdev: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + return err; +} + +static void bit_bang(struct adapter *adapter, int bitdata, int nbits) +{ + int data; + int i; + u32 val; + + enum { + S_CLOCK = 1 << 3, + S_DATA = 1 << 4 + }; + + for (i = (nbits - 1); i > -1; i--) { + + udelay(50); + + data = ((bitdata >> i) & 0x1); + __t1_tpi_read(adapter, A_ELMER0_GPO, &val); + + if (data) + val |= S_DATA; + else + val &= ~S_DATA; + + udelay(50); + + /* Set SCLOCK low */ + val &= ~S_CLOCK; + __t1_tpi_write(adapter, A_ELMER0_GPO, val); + + udelay(50); + + /* Write SCLOCK high */ + val |= S_CLOCK; + __t1_tpi_write(adapter, A_ELMER0_GPO, val); + + } +} + +static int t1_clock(struct adapter *adapter, int mode) +{ + u32 val; + int M_CORE_VAL; + int M_MEM_VAL; + + enum { + M_CORE_BITS = 9, + T_CORE_VAL = 0, + T_CORE_BITS = 2, + N_CORE_VAL = 0, + N_CORE_BITS = 2, + M_MEM_BITS = 9, + T_MEM_VAL = 0, + T_MEM_BITS = 2, + N_MEM_VAL = 0, + N_MEM_BITS = 2, + NP_LOAD = 1 << 17, + S_LOAD_MEM = 1 << 5, + S_LOAD_CORE = 1 << 6, + S_CLOCK = 1 << 3 + }; + + if (!t1_is_T1B(adapter)) + return -ENODEV; /* Can't re-clock this chip. */ + + if (mode & 2) + return 0; /* show current mode. */ + + if ((adapter->t1powersave & 1) == (mode & 1)) + return -EALREADY; /* ASIC already running in mode. */ + + if ((mode & 1) == HCLOCK) { + M_CORE_VAL = 0x14; + M_MEM_VAL = 0x18; + adapter->t1powersave = HCLOCK; /* overclock */ + } else { + M_CORE_VAL = 0xe; + M_MEM_VAL = 0x10; + adapter->t1powersave = LCLOCK; /* underclock */ + } + + /* Don't interrupt this serial stream! */ + spin_lock(&adapter->tpi_lock); + + /* Initialize for ASIC core */ + __t1_tpi_read(adapter, A_ELMER0_GPO, &val); + val |= NP_LOAD; + udelay(50); + __t1_tpi_write(adapter, A_ELMER0_GPO, val); + udelay(50); + __t1_tpi_read(adapter, A_ELMER0_GPO, &val); + val &= ~S_LOAD_CORE; + val &= ~S_CLOCK; + __t1_tpi_write(adapter, A_ELMER0_GPO, val); + udelay(50); + + /* Serial program the ASIC clock synthesizer */ + bit_bang(adapter, T_CORE_VAL, T_CORE_BITS); + bit_bang(adapter, N_CORE_VAL, N_CORE_BITS); + bit_bang(adapter, M_CORE_VAL, M_CORE_BITS); + udelay(50); + + /* Finish ASIC core */ + __t1_tpi_read(adapter, A_ELMER0_GPO, &val); + val |= S_LOAD_CORE; + udelay(50); + __t1_tpi_write(adapter, A_ELMER0_GPO, val); + udelay(50); + __t1_tpi_read(adapter, A_ELMER0_GPO, &val); + val &= ~S_LOAD_CORE; + udelay(50); + __t1_tpi_write(adapter, A_ELMER0_GPO, val); + udelay(50); + + /* Initialize for memory */ + __t1_tpi_read(adapter, A_ELMER0_GPO, &val); + val |= NP_LOAD; + udelay(50); + __t1_tpi_write(adapter, A_ELMER0_GPO, val); + udelay(50); + __t1_tpi_read(adapter, A_ELMER0_GPO, &val); + val &= ~S_LOAD_MEM; + val &= ~S_CLOCK; + udelay(50); + __t1_tpi_write(adapter, A_ELMER0_GPO, val); + udelay(50); + + /* Serial program the memory clock synthesizer */ + bit_bang(adapter, T_MEM_VAL, T_MEM_BITS); + bit_bang(adapter, N_MEM_VAL, N_MEM_BITS); + bit_bang(adapter, M_MEM_VAL, M_MEM_BITS); + udelay(50); + + /* Finish memory */ + __t1_tpi_read(adapter, A_ELMER0_GPO, &val); + val |= S_LOAD_MEM; + udelay(50); + __t1_tpi_write(adapter, A_ELMER0_GPO, val); + udelay(50); + __t1_tpi_read(adapter, A_ELMER0_GPO, &val); + val &= ~S_LOAD_MEM; + udelay(50); + __t1_tpi_write(adapter, A_ELMER0_GPO, val); + + spin_unlock(&adapter->tpi_lock); + + return 0; +} + +static inline void t1_sw_reset(struct pci_dev *pdev) +{ + pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3); + pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0); +} + +static void __devexit remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct adapter *adapter = dev->ml_priv; + int i; + + for_each_port(adapter, i) { + if (test_bit(i, &adapter->registered_device_map)) + unregister_netdev(adapter->port[i].dev); + } + + t1_free_sw_modules(adapter); + iounmap(adapter->regs); + + while (--i >= 0) { + if (adapter->port[i].dev) + free_netdev(adapter->port[i].dev); + } + + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + t1_sw_reset(pdev); +} + +static struct pci_driver driver = { + .name = DRV_NAME, + .id_table = t1_pci_tbl, + .probe = init_one, + .remove = __devexit_p(remove_one), +}; + +static int __init t1_init_module(void) +{ + return pci_register_driver(&driver); +} + +static void __exit t1_cleanup_module(void) +{ + pci_unregister_driver(&driver); +} + +module_init(t1_init_module); +module_exit(t1_cleanup_module); diff --git a/drivers/net/ethernet/chelsio/cxgb/elmer0.h b/drivers/net/ethernet/chelsio/cxgb/elmer0.h new file mode 100644 index 0000000..eef655c --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/elmer0.h @@ -0,0 +1,158 @@ +/***************************************************************************** + * * + * File: elmer0.h * + * $Revision: 1.6 $ * + * $Date: 2005/06/21 22:49:43 $ * + * Description: * + * part of the Chelsio 10Gb Ethernet Driver. * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License, version 2, as * + * published by the Free Software Foundation. * + * * + * You should have received a copy of the GNU General Public License along * + * with this program; if not, write to the Free Software Foundation, Inc., * + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * + * * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * + * * + * http://www.chelsio.com * + * * + * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * + * All rights reserved. * + * * + * Maintainers: maintainers@chelsio.com * + * * + * Authors: Dimitrios Michailidis * + * Tina Yang * + * Felix Marti * + * Scott Bardone * + * Kurt Ottaway * + * Frank DiMambro * + * * + * History: * + * * + ****************************************************************************/ + +#ifndef _CXGB_ELMER0_H_ +#define _CXGB_ELMER0_H_ + +/* ELMER0 flavors */ +enum { + ELMER0_XC2S300E_6FT256_C, + ELMER0_XC2S100E_6TQ144_C +}; + +/* ELMER0 registers */ +#define A_ELMER0_VERSION 0x100000 +#define A_ELMER0_PHY_CFG 0x100004 +#define A_ELMER0_INT_ENABLE 0x100008 +#define A_ELMER0_INT_CAUSE 0x10000c +#define A_ELMER0_GPI_CFG 0x100010 +#define A_ELMER0_GPI_STAT 0x100014 +#define A_ELMER0_GPO 0x100018 +#define A_ELMER0_PORT0_MI1_CFG 0x400000 + +#define S_MI1_MDI_ENABLE 0 +#define V_MI1_MDI_ENABLE(x) ((x) << S_MI1_MDI_ENABLE) +#define F_MI1_MDI_ENABLE V_MI1_MDI_ENABLE(1U) + +#define S_MI1_MDI_INVERT 1 +#define V_MI1_MDI_INVERT(x) ((x) << S_MI1_MDI_INVERT) +#define F_MI1_MDI_INVERT V_MI1_MDI_INVERT(1U) + +#define S_MI1_PREAMBLE_ENABLE 2 +#define V_MI1_PREAMBLE_ENABLE(x) ((x) << S_MI1_PREAMBLE_ENABLE) +#define F_MI1_PREAMBLE_ENABLE V_MI1_PREAMBLE_ENABLE(1U) + +#define S_MI1_SOF 3 +#define M_MI1_SOF 0x3 +#define V_MI1_SOF(x) ((x) << S_MI1_SOF) +#define G_MI1_SOF(x) (((x) >> S_MI1_SOF) & M_MI1_SOF) + +#define S_MI1_CLK_DIV 5 +#define M_MI1_CLK_DIV 0xff +#define V_MI1_CLK_DIV(x) ((x) << S_MI1_CLK_DIV) +#define G_MI1_CLK_DIV(x) (((x) >> S_MI1_CLK_DIV) & M_MI1_CLK_DIV) + +#define A_ELMER0_PORT0_MI1_ADDR 0x400004 + +#define S_MI1_REG_ADDR 0 +#define M_MI1_REG_ADDR 0x1f +#define V_MI1_REG_ADDR(x) ((x) << S_MI1_REG_ADDR) +#define G_MI1_REG_ADDR(x) (((x) >> S_MI1_REG_ADDR) & M_MI1_REG_ADDR) + +#define S_MI1_PHY_ADDR 5 +#define M_MI1_PHY_ADDR 0x1f +#define V_MI1_PHY_ADDR(x) ((x) << S_MI1_PHY_ADDR) +#define G_MI1_PHY_ADDR(x) (((x) >> S_MI1_PHY_ADDR) & M_MI1_PHY_ADDR) + +#define A_ELMER0_PORT0_MI1_DATA 0x400008 + +#define S_MI1_DATA 0 +#define M_MI1_DATA 0xffff +#define V_MI1_DATA(x) ((x) << S_MI1_DATA) +#define G_MI1_DATA(x) (((x) >> S_MI1_DATA) & M_MI1_DATA) + +#define A_ELMER0_PORT0_MI1_OP 0x40000c + +#define S_MI1_OP 0 +#define M_MI1_OP 0x3 +#define V_MI1_OP(x) ((x) << S_MI1_OP) +#define G_MI1_OP(x) (((x) >> S_MI1_OP) & M_MI1_OP) + +#define S_MI1_ADDR_AUTOINC 2 +#define V_MI1_ADDR_AUTOINC(x) ((x) << S_MI1_ADDR_AUTOINC) +#define F_MI1_ADDR_AUTOINC V_MI1_ADDR_AUTOINC(1U) + +#define S_MI1_OP_BUSY 31 +#define V_MI1_OP_BUSY(x) ((x) << S_MI1_OP_BUSY) +#define F_MI1_OP_BUSY V_MI1_OP_BUSY(1U) + +#define A_ELMER0_PORT1_MI1_CFG 0x500000 +#define A_ELMER0_PORT1_MI1_ADDR 0x500004 +#define A_ELMER0_PORT1_MI1_DATA 0x500008 +#define A_ELMER0_PORT1_MI1_OP 0x50000c +#define A_ELMER0_PORT2_MI1_CFG 0x600000 +#define A_ELMER0_PORT2_MI1_ADDR 0x600004 +#define A_ELMER0_PORT2_MI1_DATA 0x600008 +#define A_ELMER0_PORT2_MI1_OP 0x60000c +#define A_ELMER0_PORT3_MI1_CFG 0x700000 +#define A_ELMER0_PORT3_MI1_ADDR 0x700004 +#define A_ELMER0_PORT3_MI1_DATA 0x700008 +#define A_ELMER0_PORT3_MI1_OP 0x70000c + +/* Simple bit definition for GPI and GP0 registers. */ +#define ELMER0_GP_BIT0 0x0001 +#define ELMER0_GP_BIT1 0x0002 +#define ELMER0_GP_BIT2 0x0004 +#define ELMER0_GP_BIT3 0x0008 +#define ELMER0_GP_BIT4 0x0010 +#define ELMER0_GP_BIT5 0x0020 +#define ELMER0_GP_BIT6 0x0040 +#define ELMER0_GP_BIT7 0x0080 +#define ELMER0_GP_BIT8 0x0100 +#define ELMER0_GP_BIT9 0x0200 +#define ELMER0_GP_BIT10 0x0400 +#define ELMER0_GP_BIT11 0x0800 +#define ELMER0_GP_BIT12 0x1000 +#define ELMER0_GP_BIT13 0x2000 +#define ELMER0_GP_BIT14 0x4000 +#define ELMER0_GP_BIT15 0x8000 +#define ELMER0_GP_BIT16 0x10000 +#define ELMER0_GP_BIT17 0x20000 +#define ELMER0_GP_BIT18 0x40000 +#define ELMER0_GP_BIT19 0x80000 + +#define MI1_OP_DIRECT_WRITE 1 +#define MI1_OP_DIRECT_READ 2 + +#define MI1_OP_INDIRECT_ADDRESS 0 +#define MI1_OP_INDIRECT_WRITE 1 +#define MI1_OP_INDIRECT_READ_INC 2 +#define MI1_OP_INDIRECT_READ 3 + +#endif /* _CXGB_ELMER0_H_ */ + diff --git a/drivers/net/ethernet/chelsio/cxgb/espi.c b/drivers/net/ethernet/chelsio/cxgb/espi.c new file mode 100644 index 0000000..639ff19 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/espi.c @@ -0,0 +1,373 @@ +/***************************************************************************** + * * + * File: espi.c * + * $Revision: 1.14 $ * + * $Date: 2005/05/14 00:59:32 $ * + * Description: * + * Ethernet SPI functionality. * + * part of the Chelsio 10Gb Ethernet Driver. * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License, version 2, as * + * published by the Free Software Foundation. * + * * + * You should have received a copy of the GNU General Public License along * + * with this program; if not, write to the Free Software Foundation, Inc., * + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * + * * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * + * * + * http://www.chelsio.com * + * * + * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * + * All rights reserved. * + * * + * Maintainers: maintainers@chelsio.com * + * * + * Authors: Dimitrios Michailidis * + * Tina Yang * + * Felix Marti * + * Scott Bardone * + * Kurt Ottaway * + * Frank DiMambro * + * * + * History: * + * * + ****************************************************************************/ + +#include "common.h" +#include "regs.h" +#include "espi.h" + +struct peespi { + adapter_t *adapter; + struct espi_intr_counts intr_cnt; + u32 misc_ctrl; + spinlock_t lock; +}; + +#define ESPI_INTR_MASK (F_DIP4ERR | F_RXDROP | F_TXDROP | F_RXOVERFLOW | \ + F_RAMPARITYERR | F_DIP2PARITYERR) +#define MON_MASK (V_MONITORED_PORT_NUM(3) | F_MONITORED_DIRECTION \ + | F_MONITORED_INTERFACE) + +#define TRICN_CNFG 14 +#define TRICN_CMD_READ 0x11 +#define TRICN_CMD_WRITE 0x21 +#define TRICN_CMD_ATTEMPTS 10 + +static int tricn_write(adapter_t *adapter, int bundle_addr, int module_addr, + int ch_addr, int reg_offset, u32 wr_data) +{ + int busy, attempts = TRICN_CMD_ATTEMPTS; + + writel(V_WRITE_DATA(wr_data) | + V_REGISTER_OFFSET(reg_offset) | + V_CHANNEL_ADDR(ch_addr) | V_MODULE_ADDR(module_addr) | + V_BUNDLE_ADDR(bundle_addr) | + V_SPI4_COMMAND(TRICN_CMD_WRITE), + adapter->regs + A_ESPI_CMD_ADDR); + writel(0, adapter->regs + A_ESPI_GOSTAT); + + do { + busy = readl(adapter->regs + A_ESPI_GOSTAT) & F_ESPI_CMD_BUSY; + } while (busy && --attempts); + + if (busy) + pr_err("%s: TRICN write timed out\n", adapter->name); + + return busy; +} + +static int tricn_init(adapter_t *adapter) +{ + int i, sme = 1; + + if (!(readl(adapter->regs + A_ESPI_RX_RESET) & F_RX_CLK_STATUS)) { + pr_err("%s: ESPI clock not ready\n", adapter->name); + return -1; + } + + writel(F_ESPI_RX_CORE_RST, adapter->regs + A_ESPI_RX_RESET); + + if (sme) { + tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81); + tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81); + tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81); + } + for (i = 1; i <= 8; i++) + tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1); + for (i = 1; i <= 2; i++) + tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1); + for (i = 1; i <= 3; i++) + tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1); + tricn_write(adapter, 0, 2, 4, TRICN_CNFG, 0xf1); + tricn_write(adapter, 0, 2, 5, TRICN_CNFG, 0xe1); + tricn_write(adapter, 0, 2, 6, TRICN_CNFG, 0xf1); + tricn_write(adapter, 0, 2, 7, TRICN_CNFG, 0x80); + tricn_write(adapter, 0, 2, 8, TRICN_CNFG, 0xf1); + + writel(F_ESPI_RX_CORE_RST | F_ESPI_RX_LNK_RST, + adapter->regs + A_ESPI_RX_RESET); + + return 0; +} + +void t1_espi_intr_enable(struct peespi *espi) +{ + u32 enable, pl_intr = readl(espi->adapter->regs + A_PL_ENABLE); + + /* + * Cannot enable ESPI interrupts on T1B because HW asserts the + * interrupt incorrectly, namely the driver gets ESPI interrupts + * but no data is actually dropped (can verify this reading the ESPI + * drop registers). Also, once the ESPI interrupt is asserted it + * cannot be cleared (HW bug). + */ + enable = t1_is_T1B(espi->adapter) ? 0 : ESPI_INTR_MASK; + writel(enable, espi->adapter->regs + A_ESPI_INTR_ENABLE); + writel(pl_intr | F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE); +} + +void t1_espi_intr_clear(struct peespi *espi) +{ + readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT); + writel(0xffffffff, espi->adapter->regs + A_ESPI_INTR_STATUS); + writel(F_PL_INTR_ESPI, espi->adapter->regs + A_PL_CAUSE); +} + +void t1_espi_intr_disable(struct peespi *espi) +{ + u32 pl_intr = readl(espi->adapter->regs + A_PL_ENABLE); + + writel(0, espi->adapter->regs + A_ESPI_INTR_ENABLE); + writel(pl_intr & ~F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE); +} + +int t1_espi_intr_handler(struct peespi *espi) +{ + u32 status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS); + + if (status & F_DIP4ERR) + espi->intr_cnt.DIP4_err++; + if (status & F_RXDROP) + espi->intr_cnt.rx_drops++; + if (status & F_TXDROP) + espi->intr_cnt.tx_drops++; + if (status & F_RXOVERFLOW) + espi->intr_cnt.rx_ovflw++; + if (status & F_RAMPARITYERR) + espi->intr_cnt.parity_err++; + if (status & F_DIP2PARITYERR) { + espi->intr_cnt.DIP2_parity_err++; + + /* + * Must read the error count to clear the interrupt + * that it causes. + */ + readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT); + } + + /* + * For T1B we need to write 1 to clear ESPI interrupts. For T2+ we + * write the status as is. + */ + if (status && t1_is_T1B(espi->adapter)) + status = 1; + writel(status, espi->adapter->regs + A_ESPI_INTR_STATUS); + return 0; +} + +const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi) +{ + return &espi->intr_cnt; +} + +static void espi_setup_for_pm3393(adapter_t *adapter) +{ + u32 wmark = t1_is_T1B(adapter) ? 0x4000 : 0x3200; + + writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0); + writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN1); + writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2); + writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN3); + writel(0x100, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK); + writel(wmark, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK); + writel(3, adapter->regs + A_ESPI_CALENDAR_LENGTH); + writel(0x08000008, adapter->regs + A_ESPI_TRAIN); + writel(V_RX_NPORTS(1) | V_TX_NPORTS(1), adapter->regs + A_PORT_CONFIG); +} + +static void espi_setup_for_vsc7321(adapter_t *adapter) +{ + writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0); + writel(0x1f401f4, adapter->regs + A_ESPI_SCH_TOKEN1); + writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2); + writel(0xa00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK); + writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK); + writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH); + writel(V_RX_NPORTS(4) | V_TX_NPORTS(4), adapter->regs + A_PORT_CONFIG); + + writel(0x08000008, adapter->regs + A_ESPI_TRAIN); +} + +/* + * Note that T1B requires at least 2 ports for IXF1010 due to a HW bug. + */ +static void espi_setup_for_ixf1010(adapter_t *adapter, int nports) +{ + writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH); + if (nports == 4) { + if (is_T2(adapter)) { + writel(0xf00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK); + writel(0x3c0, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK); + } else { + writel(0x7ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK); + writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK); + } + } else { + writel(0x1fff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK); + writel(0x7ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK); + } + writel(V_RX_NPORTS(nports) | V_TX_NPORTS(nports), adapter->regs + A_PORT_CONFIG); + +} + +int t1_espi_init(struct peespi *espi, int mac_type, int nports) +{ + u32 status_enable_extra = 0; + adapter_t *adapter = espi->adapter; + + /* Disable ESPI training. MACs that can handle it enable it below. */ + writel(0, adapter->regs + A_ESPI_TRAIN); + + if (is_T2(adapter)) { + writel(V_OUT_OF_SYNC_COUNT(4) | + V_DIP2_PARITY_ERR_THRES(3) | + V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL); + writel(nports == 4 ? 0x200040 : 0x1000080, + adapter->regs + A_ESPI_MAXBURST1_MAXBURST2); + } else + writel(0x800100, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2); + + if (mac_type == CHBT_MAC_PM3393) + espi_setup_for_pm3393(adapter); + else if (mac_type == CHBT_MAC_VSC7321) + espi_setup_for_vsc7321(adapter); + else if (mac_type == CHBT_MAC_IXF1010) { + status_enable_extra = F_INTEL1010MODE; + espi_setup_for_ixf1010(adapter, nports); + } else + return -1; + + writel(status_enable_extra | F_RXSTATUSENABLE, + adapter->regs + A_ESPI_FIFO_STATUS_ENABLE); + + if (is_T2(adapter)) { + tricn_init(adapter); + /* + * Always position the control at the 1st port egress IN + * (sop,eop) counter to reduce PIOs for T/N210 workaround. + */ + espi->misc_ctrl = readl(adapter->regs + A_ESPI_MISC_CONTROL); + espi->misc_ctrl &= ~MON_MASK; + espi->misc_ctrl |= F_MONITORED_DIRECTION; + if (adapter->params.nports == 1) + espi->misc_ctrl |= F_MONITORED_INTERFACE; + writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); + spin_lock_init(&espi->lock); + } + + return 0; +} + +void t1_espi_destroy(struct peespi *espi) +{ + kfree(espi); +} + +struct peespi *t1_espi_create(adapter_t *adapter) +{ + struct peespi *espi = kzalloc(sizeof(*espi), GFP_KERNEL); + + if (espi) + espi->adapter = adapter; + return espi; +} + +#if 0 +void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val) +{ + struct peespi *espi = adapter->espi; + + if (!is_T2(adapter)) + return; + spin_lock(&espi->lock); + espi->misc_ctrl = (val & ~MON_MASK) | + (espi->misc_ctrl & MON_MASK); + writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); + spin_unlock(&espi->lock); +} +#endif /* 0 */ + +u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait) +{ + struct peespi *espi = adapter->espi; + u32 sel; + + if (!is_T2(adapter)) + return 0; + + sel = V_MONITORED_PORT_NUM((addr & 0x3c) >> 2); + if (!wait) { + if (!spin_trylock(&espi->lock)) + return 0; + } else + spin_lock(&espi->lock); + + if ((sel != (espi->misc_ctrl & MON_MASK))) { + writel(((espi->misc_ctrl & ~MON_MASK) | sel), + adapter->regs + A_ESPI_MISC_CONTROL); + sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3); + writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); + } else + sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3); + spin_unlock(&espi->lock); + return sel; +} + +/* + * This function is for T204 only. + * compare with t1_espi_get_mon(), it reads espiInTxSop[0 ~ 3] in + * one shot, since there is no per port counter on the out side. + */ +int t1_espi_get_mon_t204(adapter_t *adapter, u32 *valp, u8 wait) +{ + struct peespi *espi = adapter->espi; + u8 i, nport = (u8)adapter->params.nports; + + if (!wait) { + if (!spin_trylock(&espi->lock)) + return -1; + } else + spin_lock(&espi->lock); + + if ((espi->misc_ctrl & MON_MASK) != F_MONITORED_DIRECTION) { + espi->misc_ctrl = (espi->misc_ctrl & ~MON_MASK) | + F_MONITORED_DIRECTION; + writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); + } + for (i = 0 ; i < nport; i++, valp++) { + if (i) { + writel(espi->misc_ctrl | V_MONITORED_PORT_NUM(i), + adapter->regs + A_ESPI_MISC_CONTROL); + } + *valp = readl(adapter->regs + A_ESPI_SCH_TOKEN3); + } + + writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); + spin_unlock(&espi->lock); + return 0; +} diff --git a/drivers/net/ethernet/chelsio/cxgb/espi.h b/drivers/net/ethernet/chelsio/cxgb/espi.h new file mode 100644 index 0000000..5694aad --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/espi.h @@ -0,0 +1,68 @@ +/***************************************************************************** + * * + * File: espi.h * + * $Revision: 1.7 $ * + * $Date: 2005/06/21 18:29:47 $ * + * Description: * + * part of the Chelsio 10Gb Ethernet Driver. * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License, version 2, as * + * published by the Free Software Foundation. * + * * + * You should have received a copy of the GNU General Public License along * + * with this program; if not, write to the Free Software Foundation, Inc., * + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * + * * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * + * * + * http://www.chelsio.com * + * * + * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * + * All rights reserved. * + * * + * Maintainers: maintainers@chelsio.com * + * * + * Authors: Dimitrios Michailidis * + * Tina Yang * + * Felix Marti * + * Scott Bardone * + * Kurt Ottaway * + * Frank DiMambro * + * * + * History: * + * * + ****************************************************************************/ + +#ifndef _CXGB_ESPI_H_ +#define _CXGB_ESPI_H_ + +#include "common.h" + +struct espi_intr_counts { + unsigned int DIP4_err; + unsigned int rx_drops; + unsigned int tx_drops; + unsigned int rx_ovflw; + unsigned int parity_err; + unsigned int DIP2_parity_err; +}; + +struct peespi; + +struct peespi *t1_espi_create(adapter_t *adapter); +void t1_espi_destroy(struct peespi *espi); +int t1_espi_init(struct peespi *espi, int mac_type, int nports); + +void t1_espi_intr_enable(struct peespi *); +void t1_espi_intr_clear(struct peespi *); +void t1_espi_intr_disable(struct peespi *); +int t1_espi_intr_handler(struct peespi *); +const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi); + +u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait); +int t1_espi_get_mon_t204(adapter_t *, u32 *, u8); + +#endif /* _CXGB_ESPI_H_ */ diff --git a/drivers/net/ethernet/chelsio/cxgb/fpga_defs.h b/drivers/net/ethernet/chelsio/cxgb/fpga_defs.h new file mode 100644 index 0000000..ccdb2bc --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/fpga_defs.h @@ -0,0 +1,232 @@ +/* $Date: 2005/03/07 23:59:05 $ $RCSfile: fpga_defs.h,v $ $Revision: 1.4 $ */ + +/* + * FPGA specific definitions + */ + +#ifndef __CHELSIO_FPGA_DEFS_H__ +#define __CHELSIO_FPGA_DEFS_H__ + +#define FPGA_PCIX_ADDR_VERSION 0xA08 +#define FPGA_PCIX_ADDR_STAT 0xA0C + +/* FPGA master interrupt Cause/Enable bits */ +#define FPGA_PCIX_INTERRUPT_SGE_ERROR 0x1 +#define FPGA_PCIX_INTERRUPT_SGE_DATA 0x2 +#define FPGA_PCIX_INTERRUPT_TP 0x4 +#define FPGA_PCIX_INTERRUPT_MC3 0x8 +#define FPGA_PCIX_INTERRUPT_GMAC 0x10 +#define FPGA_PCIX_INTERRUPT_PCIX 0x20 + +/* TP interrupt register addresses */ +#define FPGA_TP_ADDR_INTERRUPT_ENABLE 0xA10 +#define FPGA_TP_ADDR_INTERRUPT_CAUSE 0xA14 +#define FPGA_TP_ADDR_VERSION 0xA18 + +/* TP interrupt Cause/Enable bits */ +#define FPGA_TP_INTERRUPT_MC4 0x1 +#define FPGA_TP_INTERRUPT_MC5 0x2 + +/* + * PM interrupt register addresses + */ +#define FPGA_MC3_REG_INTRENABLE 0xA20 +#define FPGA_MC3_REG_INTRCAUSE 0xA24 +#define FPGA_MC3_REG_VERSION 0xA28 + +/* + * GMAC interrupt register addresses + */ +#define FPGA_GMAC_ADDR_INTERRUPT_ENABLE 0xA30 +#define FPGA_GMAC_ADDR_INTERRUPT_CAUSE 0xA34 +#define FPGA_GMAC_ADDR_VERSION 0xA38 + +/* GMAC Cause/Enable bits */ +#define FPGA_GMAC_INTERRUPT_PORT0 0x1 +#define FPGA_GMAC_INTERRUPT_PORT1 0x2 +#define FPGA_GMAC_INTERRUPT_PORT2 0x4 +#define FPGA_GMAC_INTERRUPT_PORT3 0x8 + +/* MI0 registers */ +#define A_MI0_CLK 0xb00 + +#define S_MI0_CLK_DIV 0 +#define M_MI0_CLK_DIV 0xff +#define V_MI0_CLK_DIV(x) ((x) << S_MI0_CLK_DIV) +#define G_MI0_CLK_DIV(x) (((x) >> S_MI0_CLK_DIV) & M_MI0_CLK_DIV) + +#define S_MI0_CLK_CNT 8 +#define M_MI0_CLK_CNT 0xff +#define V_MI0_CLK_CNT(x) ((x) << S_MI0_CLK_CNT) +#define G_MI0_CLK_CNT(x) (((x) >> S_MI0_CLK_CNT) & M_MI0_CLK_CNT) + +#define A_MI0_CSR 0xb04 + +#define S_MI0_CSR_POLL 0 +#define V_MI0_CSR_POLL(x) ((x) << S_MI0_CSR_POLL) +#define F_MI0_CSR_POLL V_MI0_CSR_POLL(1U) + +#define S_MI0_PREAMBLE 1 +#define V_MI0_PREAMBLE(x) ((x) << S_MI0_PREAMBLE) +#define F_MI0_PREAMBLE V_MI0_PREAMBLE(1U) + +#define S_MI0_INTR_ENABLE 2 +#define V_MI0_INTR_ENABLE(x) ((x) << S_MI0_INTR_ENABLE) +#define F_MI0_INTR_ENABLE V_MI0_INTR_ENABLE(1U) + +#define S_MI0_BUSY 3 +#define V_MI0_BUSY(x) ((x) << S_MI0_BUSY) +#define F_MI0_BUSY V_MI0_BUSY(1U) + +#define S_MI0_MDIO 4 +#define V_MI0_MDIO(x) ((x) << S_MI0_MDIO) +#define F_MI0_MDIO V_MI0_MDIO(1U) + +#define A_MI0_ADDR 0xb08 + +#define S_MI0_PHY_REG_ADDR 0 +#define M_MI0_PHY_REG_ADDR 0x1f +#define V_MI0_PHY_REG_ADDR(x) ((x) << S_MI0_PHY_REG_ADDR) +#define G_MI0_PHY_REG_ADDR(x) (((x) >> S_MI0_PHY_REG_ADDR) & M_MI0_PHY_REG_ADDR) + +#define S_MI0_PHY_ADDR 5 +#define M_MI0_PHY_ADDR 0x1f +#define V_MI0_PHY_ADDR(x) ((x) << S_MI0_PHY_ADDR) +#define G_MI0_PHY_ADDR(x) (((x) >> S_MI0_PHY_ADDR) & M_MI0_PHY_ADDR) + +#define A_MI0_DATA_EXT 0xb0c +#define A_MI0_DATA_INT 0xb10 + +/* GMAC registers */ +#define A_GMAC_MACID_LO 0x28 +#define A_GMAC_MACID_HI 0x2c +#define A_GMAC_CSR 0x30 + +#define S_INTERFACE 0 +#define M_INTERFACE 0x3 +#define V_INTERFACE(x) ((x) << S_INTERFACE) +#define G_INTERFACE(x) (((x) >> S_INTERFACE) & M_INTERFACE) + +#define S_MAC_TX_ENABLE 2 +#define V_MAC_TX_ENABLE(x) ((x) << S_MAC_TX_ENABLE) +#define F_MAC_TX_ENABLE V_MAC_TX_ENABLE(1U) + +#define S_MAC_RX_ENABLE 3 +#define V_MAC_RX_ENABLE(x) ((x) << S_MAC_RX_ENABLE) +#define F_MAC_RX_ENABLE V_MAC_RX_ENABLE(1U) + +#define S_MAC_LB_ENABLE 4 +#define V_MAC_LB_ENABLE(x) ((x) << S_MAC_LB_ENABLE) +#define F_MAC_LB_ENABLE V_MAC_LB_ENABLE(1U) + +#define S_MAC_SPEED 5 +#define M_MAC_SPEED 0x3 +#define V_MAC_SPEED(x) ((x) << S_MAC_SPEED) +#define G_MAC_SPEED(x) (((x) >> S_MAC_SPEED) & M_MAC_SPEED) + +#define S_MAC_HD_FC_ENABLE 7 +#define V_MAC_HD_FC_ENABLE(x) ((x) << S_MAC_HD_FC_ENABLE) +#define F_MAC_HD_FC_ENABLE V_MAC_HD_FC_ENABLE(1U) + +#define S_MAC_HALF_DUPLEX 8 +#define V_MAC_HALF_DUPLEX(x) ((x) << S_MAC_HALF_DUPLEX) +#define F_MAC_HALF_DUPLEX V_MAC_HALF_DUPLEX(1U) + +#define S_MAC_PROMISC 9 +#define V_MAC_PROMISC(x) ((x) << S_MAC_PROMISC) +#define F_MAC_PROMISC V_MAC_PROMISC(1U) + +#define S_MAC_MC_ENABLE 10 +#define V_MAC_MC_ENABLE(x) ((x) << S_MAC_MC_ENABLE) +#define F_MAC_MC_ENABLE V_MAC_MC_ENABLE(1U) + +#define S_MAC_RESET 11 +#define V_MAC_RESET(x) ((x) << S_MAC_RESET) +#define F_MAC_RESET V_MAC_RESET(1U) + +#define S_MAC_RX_PAUSE_ENABLE 12 +#define V_MAC_RX_PAUSE_ENABLE(x) ((x) << S_MAC_RX_PAUSE_ENABLE) +#define F_MAC_RX_PAUSE_ENABLE V_MAC_RX_PAUSE_ENABLE(1U) + +#define S_MAC_TX_PAUSE_ENABLE 13 +#define V_MAC_TX_PAUSE_ENABLE(x) ((x) << S_MAC_TX_PAUSE_ENABLE) +#define F_MAC_TX_PAUSE_ENABLE V_MAC_TX_PAUSE_ENABLE(1U) + +#define S_MAC_LWM_ENABLE 14 +#define V_MAC_LWM_ENABLE(x) ((x) << S_MAC_LWM_ENABLE) +#define F_MAC_LWM_ENABLE V_MAC_LWM_ENABLE(1U) + +#define S_MAC_MAGIC_PKT_ENABLE 15 +#define V_MAC_MAGIC_PKT_ENABLE(x) ((x) << S_MAC_MAGIC_PKT_ENABLE) +#define F_MAC_MAGIC_PKT_ENABLE V_MAC_MAGIC_PKT_ENABLE(1U) + +#define S_MAC_ISL_ENABLE 16 +#define V_MAC_ISL_ENABLE(x) ((x) << S_MAC_ISL_ENABLE) +#define F_MAC_ISL_ENABLE V_MAC_ISL_ENABLE(1U) + +#define S_MAC_JUMBO_ENABLE 17 +#define V_MAC_JUMBO_ENABLE(x) ((x) << S_MAC_JUMBO_ENABLE) +#define F_MAC_JUMBO_ENABLE V_MAC_JUMBO_ENABLE(1U) + +#define S_MAC_RX_PAD_ENABLE 18 +#define V_MAC_RX_PAD_ENABLE(x) ((x) << S_MAC_RX_PAD_ENABLE) +#define F_MAC_RX_PAD_ENABLE V_MAC_RX_PAD_ENABLE(1U) + +#define S_MAC_RX_CRC_ENABLE 19 +#define V_MAC_RX_CRC_ENABLE(x) ((x) << S_MAC_RX_CRC_ENABLE) +#define F_MAC_RX_CRC_ENABLE V_MAC_RX_CRC_ENABLE(1U) + +#define A_GMAC_IFS 0x34 + +#define S_MAC_IFS2 0 +#define M_MAC_IFS2 0x3f +#define V_MAC_IFS2(x) ((x) << S_MAC_IFS2) +#define G_MAC_IFS2(x) (((x) >> S_MAC_IFS2) & M_MAC_IFS2) + +#define S_MAC_IFS1 8 +#define M_MAC_IFS1 0x7f +#define V_MAC_IFS1(x) ((x) << S_MAC_IFS1) +#define G_MAC_IFS1(x) (((x) >> S_MAC_IFS1) & M_MAC_IFS1) + +#define A_GMAC_JUMBO_FRAME_LEN 0x38 +#define A_GMAC_LNK_DLY 0x3c +#define A_GMAC_PAUSETIME 0x40 +#define A_GMAC_MCAST_LO 0x44 +#define A_GMAC_MCAST_HI 0x48 +#define A_GMAC_MCAST_MASK_LO 0x4c +#define A_GMAC_MCAST_MASK_HI 0x50 +#define A_GMAC_RMT_CNT 0x54 +#define A_GMAC_RMT_DATA 0x58 +#define A_GMAC_BACKOFF_SEED 0x5c +#define A_GMAC_TXF_THRES 0x60 + +#define S_TXF_READ_THRESHOLD 0 +#define M_TXF_READ_THRESHOLD 0xff +#define V_TXF_READ_THRESHOLD(x) ((x) << S_TXF_READ_THRESHOLD) +#define G_TXF_READ_THRESHOLD(x) (((x) >> S_TXF_READ_THRESHOLD) & M_TXF_READ_THRESHOLD) + +#define S_TXF_WRITE_THRESHOLD 16 +#define M_TXF_WRITE_THRESHOLD 0xff +#define V_TXF_WRITE_THRESHOLD(x) ((x) << S_TXF_WRITE_THRESHOLD) +#define G_TXF_WRITE_THRESHOLD(x) (((x) >> S_TXF_WRITE_THRESHOLD) & M_TXF_WRITE_THRESHOLD) + +#define MAC_REG_BASE 0x600 +#define MAC_REG_ADDR(idx, reg) (MAC_REG_BASE + (idx) * 128 + (reg)) + +#define MAC_REG_IDLO(idx) MAC_REG_ADDR(idx, A_GMAC_MACID_LO) +#define MAC_REG_IDHI(idx) MAC_REG_ADDR(idx, A_GMAC_MACID_HI) +#define MAC_REG_CSR(idx) MAC_REG_ADDR(idx, A_GMAC_CSR) +#define MAC_REG_IFS(idx) MAC_REG_ADDR(idx, A_GMAC_IFS) +#define MAC_REG_LARGEFRAMELENGTH(idx) MAC_REG_ADDR(idx, A_GMAC_JUMBO_FRAME_LEN) +#define MAC_REG_LINKDLY(idx) MAC_REG_ADDR(idx, A_GMAC_LNK_DLY) +#define MAC_REG_PAUSETIME(idx) MAC_REG_ADDR(idx, A_GMAC_PAUSETIME) +#define MAC_REG_CASTLO(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_LO) +#define MAC_REG_MCASTHI(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_HI) +#define MAC_REG_CASTMASKLO(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_MASK_LO) +#define MAC_REG_MCASTMASKHI(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_MASK_HI) +#define MAC_REG_RMCNT(idx) MAC_REG_ADDR(idx, A_GMAC_RMT_CNT) +#define MAC_REG_RMDATA(idx) MAC_REG_ADDR(idx, A_GMAC_RMT_DATA) +#define MAC_REG_GMRANDBACKOFFSEED(idx) MAC_REG_ADDR(idx, A_GMAC_BACKOFF_SEED) +#define MAC_REG_TXFTHRESHOLDS(idx) MAC_REG_ADDR(idx, A_GMAC_TXF_THRES) + +#endif diff --git a/drivers/net/ethernet/chelsio/cxgb/gmac.h b/drivers/net/ethernet/chelsio/cxgb/gmac.h new file mode 100644 index 0000000..d423374 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/gmac.h @@ -0,0 +1,142 @@ +/***************************************************************************** + * * + * File: gmac.h * + * $Revision: 1.6 $ * + * $Date: 2005/06/21 18:29:47 $ * + * Description: * + * Generic MAC functionality. * + * part of the Chelsio 10Gb Ethernet Driver. * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License, version 2, as * + * published by the Free Software Foundation. * + * * + * You should have received a copy of the GNU General Public License along * + * with this program; if not, write to the Free Software Foundation, Inc., * + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * + * * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * + * * + * http://www.chelsio.com * + * * + * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * + * All rights reserved. * + * * + * Maintainers: maintainers@chelsio.com * + * * + * Authors: Dimitrios Michailidis * + * Tina Yang * + * Felix Marti * + * Scott Bardone * + * Kurt Ottaway * + * Frank DiMambro * + * * + * History: * + * * + ****************************************************************************/ + +#ifndef _CXGB_GMAC_H_ +#define _CXGB_GMAC_H_ + +#include "common.h" + +enum { + MAC_STATS_UPDATE_FAST, + MAC_STATS_UPDATE_FULL +}; + +enum { + MAC_DIRECTION_RX = 1, + MAC_DIRECTION_TX = 2 +}; + +struct cmac_statistics { + /* Transmit */ + u64 TxOctetsOK; + u64 TxOctetsBad; + u64 TxUnicastFramesOK; + u64 TxMulticastFramesOK; + u64 TxBroadcastFramesOK; + u64 TxPauseFrames; + u64 TxFramesWithDeferredXmissions; + u64 TxLateCollisions; + u64 TxTotalCollisions; + u64 TxFramesAbortedDueToXSCollisions; + u64 TxUnderrun; + u64 TxLengthErrors; + u64 TxInternalMACXmitError; + u64 TxFramesWithExcessiveDeferral; + u64 TxFCSErrors; + u64 TxJumboFramesOK; + u64 TxJumboOctetsOK; + + /* Receive */ + u64 RxOctetsOK; + u64 RxOctetsBad; + u64 RxUnicastFramesOK; + u64 RxMulticastFramesOK; + u64 RxBroadcastFramesOK; + u64 RxPauseFrames; + u64 RxFCSErrors; + u64 RxAlignErrors; + u64 RxSymbolErrors; + u64 RxDataErrors; + u64 RxSequenceErrors; + u64 RxRuntErrors; + u64 RxJabberErrors; + u64 RxInternalMACRcvError; + u64 RxInRangeLengthErrors; + u64 RxOutOfRangeLengthField; + u64 RxFrameTooLongErrors; + u64 RxJumboFramesOK; + u64 RxJumboOctetsOK; +}; + +struct cmac_ops { + void (*destroy)(struct cmac *); + int (*reset)(struct cmac *); + int (*interrupt_enable)(struct cmac *); + int (*interrupt_disable)(struct cmac *); + int (*interrupt_clear)(struct cmac *); + int (*interrupt_handler)(struct cmac *); + + int (*enable)(struct cmac *, int); + int (*disable)(struct cmac *, int); + + int (*loopback_enable)(struct cmac *); + int (*loopback_disable)(struct cmac *); + + int (*set_mtu)(struct cmac *, int mtu); + int (*set_rx_mode)(struct cmac *, struct t1_rx_mode *rm); + + int (*set_speed_duplex_fc)(struct cmac *, int speed, int duplex, int fc); + int (*get_speed_duplex_fc)(struct cmac *, int *speed, int *duplex, + int *fc); + + const struct cmac_statistics *(*statistics_update)(struct cmac *, int); + + int (*macaddress_get)(struct cmac *, u8 mac_addr[6]); + int (*macaddress_set)(struct cmac *, u8 mac_addr[6]); +}; + +typedef struct _cmac_instance cmac_instance; + +struct cmac { + struct cmac_statistics stats; + adapter_t *adapter; + const struct cmac_ops *ops; + cmac_instance *instance; +}; + +struct gmac { + unsigned int stats_update_period; + struct cmac *(*create)(adapter_t *adapter, int index); + int (*reset)(adapter_t *); +}; + +extern const struct gmac t1_pm3393_ops; +extern const struct gmac t1_vsc7326_ops; + +#endif /* _CXGB_GMAC_H_ */ diff --git a/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c b/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c new file mode 100644 index 0000000..71018a4 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c @@ -0,0 +1,397 @@ +/* $Date: 2005/10/24 23:18:13 $ $RCSfile: mv88e1xxx.c,v $ $Revision: 1.49 $ */ +#include "common.h" +#include "mv88e1xxx.h" +#include "cphy.h" +#include "elmer0.h" + +/* MV88E1XXX MDI crossover register values */ +#define CROSSOVER_MDI 0 +#define CROSSOVER_MDIX 1 +#define CROSSOVER_AUTO 3 + +#define INTR_ENABLE_MASK 0x6CA0 + +/* + * Set the bits given by 'bitval' in PHY register 'reg'. + */ +static void mdio_set_bit(struct cphy *cphy, int reg, u32 bitval) +{ + u32 val; + + (void) simple_mdio_read(cphy, reg, &val); + (void) simple_mdio_write(cphy, reg, val | bitval); +} + +/* + * Clear the bits given by 'bitval' in PHY register 'reg'. + */ +static void mdio_clear_bit(struct cphy *cphy, int reg, u32 bitval) +{ + u32 val; + + (void) simple_mdio_read(cphy, reg, &val); + (void) simple_mdio_write(cphy, reg, val & ~bitval); +} + +/* + * NAME: phy_reset + * + * DESC: Reset the given PHY's port. NOTE: This is not a global + * chip reset. + * + * PARAMS: cphy - Pointer to PHY instance data. + * + * RETURN: 0 - Successful reset. + * -1 - Timeout. + */ +static int mv88e1xxx_reset(struct cphy *cphy, int wait) +{ + u32 ctl; + int time_out = 1000; + + mdio_set_bit(cphy, MII_BMCR, BMCR_RESET); + + do { + (void) simple_mdio_read(cphy, MII_BMCR, &ctl); + ctl &= BMCR_RESET; + if (ctl) + udelay(1); + } while (ctl && --time_out); + + return ctl ? -1 : 0; +} + +static int mv88e1xxx_interrupt_enable(struct cphy *cphy) +{ + /* Enable PHY interrupts. */ + (void) simple_mdio_write(cphy, MV88E1XXX_INTERRUPT_ENABLE_REGISTER, + INTR_ENABLE_MASK); + + /* Enable Marvell interrupts through Elmer0. */ + if (t1_is_asic(cphy->adapter)) { + u32 elmer; + + t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); + elmer |= ELMER0_GP_BIT1; + if (is_T2(cphy->adapter)) + elmer |= ELMER0_GP_BIT2 | ELMER0_GP_BIT3 | ELMER0_GP_BIT4; + t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); + } + return 0; +} + +static int mv88e1xxx_interrupt_disable(struct cphy *cphy) +{ + /* Disable all phy interrupts. */ + (void) simple_mdio_write(cphy, MV88E1XXX_INTERRUPT_ENABLE_REGISTER, 0); + + /* Disable Marvell interrupts through Elmer0. */ + if (t1_is_asic(cphy->adapter)) { + u32 elmer; + + t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); + elmer &= ~ELMER0_GP_BIT1; + if (is_T2(cphy->adapter)) + elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4); + t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); + } + return 0; +} + +static int mv88e1xxx_interrupt_clear(struct cphy *cphy) +{ + u32 elmer; + + /* Clear PHY interrupts by reading the register. */ + (void) simple_mdio_read(cphy, + MV88E1XXX_INTERRUPT_STATUS_REGISTER, &elmer); + + /* Clear Marvell interrupts through Elmer0. */ + if (t1_is_asic(cphy->adapter)) { + t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer); + elmer |= ELMER0_GP_BIT1; + if (is_T2(cphy->adapter)) + elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4; + t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer); + } + return 0; +} + +/* + * Set the PHY speed and duplex. This also disables auto-negotiation, except + * for 1Gb/s, where auto-negotiation is mandatory. + */ +static int mv88e1xxx_set_speed_duplex(struct cphy *phy, int speed, int duplex) +{ + u32 ctl; + + (void) simple_mdio_read(phy, MII_BMCR, &ctl); + if (speed >= 0) { + ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE); + if (speed == SPEED_100) + ctl |= BMCR_SPEED100; + else if (speed == SPEED_1000) + ctl |= BMCR_SPEED1000; + } + if (duplex >= 0) { + ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE); + if (duplex == DUPLEX_FULL) + ctl |= BMCR_FULLDPLX; + } + if (ctl & BMCR_SPEED1000) /* auto-negotiation required for 1Gb/s */ + ctl |= BMCR_ANENABLE; + (void) simple_mdio_write(phy, MII_BMCR, ctl); + return 0; +} + +static int mv88e1xxx_crossover_set(struct cphy *cphy, int crossover) +{ + u32 data32; + + (void) simple_mdio_read(cphy, + MV88E1XXX_SPECIFIC_CNTRL_REGISTER, &data32); + data32 &= ~V_PSCR_MDI_XOVER_MODE(M_PSCR_MDI_XOVER_MODE); + data32 |= V_PSCR_MDI_XOVER_MODE(crossover); + (void) simple_mdio_write(cphy, + MV88E1XXX_SPECIFIC_CNTRL_REGISTER, data32); + return 0; +} + +static int mv88e1xxx_autoneg_enable(struct cphy *cphy) +{ + u32 ctl; + + (void) mv88e1xxx_crossover_set(cphy, CROSSOVER_AUTO); + + (void) simple_mdio_read(cphy, MII_BMCR, &ctl); + /* restart autoneg for change to take effect */ + ctl |= BMCR_ANENABLE | BMCR_ANRESTART; + (void) simple_mdio_write(cphy, MII_BMCR, ctl); + return 0; +} + +static int mv88e1xxx_autoneg_disable(struct cphy *cphy) +{ + u32 ctl; + + /* + * Crossover *must* be set to manual in order to disable auto-neg. + * The Alaska FAQs document highlights this point. + */ + (void) mv88e1xxx_crossover_set(cphy, CROSSOVER_MDI); + + /* + * Must include autoneg reset when disabling auto-neg. This + * is described in the Alaska FAQ document. + */ + (void) simple_mdio_read(cphy, MII_BMCR, &ctl); + ctl &= ~BMCR_ANENABLE; + (void) simple_mdio_write(cphy, MII_BMCR, ctl | BMCR_ANRESTART); + return 0; +} + +static int mv88e1xxx_autoneg_restart(struct cphy *cphy) +{ + mdio_set_bit(cphy, MII_BMCR, BMCR_ANRESTART); + return 0; +} + +static int mv88e1xxx_advertise(struct cphy *phy, unsigned int advertise_map) +{ + u32 val = 0; + + if (advertise_map & + (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) { + (void) simple_mdio_read(phy, MII_GBCR, &val); + val &= ~(GBCR_ADV_1000HALF | GBCR_ADV_1000FULL); + if (advertise_map & ADVERTISED_1000baseT_Half) + val |= GBCR_ADV_1000HALF; + if (advertise_map & ADVERTISED_1000baseT_Full) + val |= GBCR_ADV_1000FULL; + } + (void) simple_mdio_write(phy, MII_GBCR, val); + + val = 1; + if (advertise_map & ADVERTISED_10baseT_Half) + val |= ADVERTISE_10HALF; + if (advertise_map & ADVERTISED_10baseT_Full) + val |= ADVERTISE_10FULL; + if (advertise_map & ADVERTISED_100baseT_Half) + val |= ADVERTISE_100HALF; + if (advertise_map & ADVERTISED_100baseT_Full) + val |= ADVERTISE_100FULL; + if (advertise_map & ADVERTISED_PAUSE) + val |= ADVERTISE_PAUSE; + if (advertise_map & ADVERTISED_ASYM_PAUSE) + val |= ADVERTISE_PAUSE_ASYM; + (void) simple_mdio_write(phy, MII_ADVERTISE, val); + return 0; +} + +static int mv88e1xxx_set_loopback(struct cphy *cphy, int on) +{ + if (on) + mdio_set_bit(cphy, MII_BMCR, BMCR_LOOPBACK); + else + mdio_clear_bit(cphy, MII_BMCR, BMCR_LOOPBACK); + return 0; +} + +static int mv88e1xxx_get_link_status(struct cphy *cphy, int *link_ok, + int *speed, int *duplex, int *fc) +{ + u32 status; + int sp = -1, dplx = -1, pause = 0; + + (void) simple_mdio_read(cphy, + MV88E1XXX_SPECIFIC_STATUS_REGISTER, &status); + if ((status & V_PSSR_STATUS_RESOLVED) != 0) { + if (status & V_PSSR_RX_PAUSE) + pause |= PAUSE_RX; + if (status & V_PSSR_TX_PAUSE) + pause |= PAUSE_TX; + dplx = (status & V_PSSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF; + sp = G_PSSR_SPEED(status); + if (sp == 0) + sp = SPEED_10; + else if (sp == 1) + sp = SPEED_100; + else + sp = SPEED_1000; + } + if (link_ok) + *link_ok = (status & V_PSSR_LINK) != 0; + if (speed) + *speed = sp; + if (duplex) + *duplex = dplx; + if (fc) + *fc = pause; + return 0; +} + +static int mv88e1xxx_downshift_set(struct cphy *cphy, int downshift_enable) +{ + u32 val; + + (void) simple_mdio_read(cphy, + MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_REGISTER, &val); + + /* + * Set the downshift counter to 2 so we try to establish Gb link + * twice before downshifting. + */ + val &= ~(V_DOWNSHIFT_ENABLE | V_DOWNSHIFT_CNT(M_DOWNSHIFT_CNT)); + + if (downshift_enable) + val |= V_DOWNSHIFT_ENABLE | V_DOWNSHIFT_CNT(2); + (void) simple_mdio_write(cphy, + MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_REGISTER, val); + return 0; +} + +static int mv88e1xxx_interrupt_handler(struct cphy *cphy) +{ + int cphy_cause = 0; + u32 status; + + /* + * Loop until cause reads zero. Need to handle bouncing interrupts. + */ + while (1) { + u32 cause; + + (void) simple_mdio_read(cphy, + MV88E1XXX_INTERRUPT_STATUS_REGISTER, + &cause); + cause &= INTR_ENABLE_MASK; + if (!cause) + break; + + if (cause & MV88E1XXX_INTR_LINK_CHNG) { + (void) simple_mdio_read(cphy, + MV88E1XXX_SPECIFIC_STATUS_REGISTER, &status); + + if (status & MV88E1XXX_INTR_LINK_CHNG) + cphy->state |= PHY_LINK_UP; + else { + cphy->state &= ~PHY_LINK_UP; + if (cphy->state & PHY_AUTONEG_EN) + cphy->state &= ~PHY_AUTONEG_RDY; + cphy_cause |= cphy_cause_link_change; + } + } + + if (cause & MV88E1XXX_INTR_AUTONEG_DONE) + cphy->state |= PHY_AUTONEG_RDY; + + if ((cphy->state & (PHY_LINK_UP | PHY_AUTONEG_RDY)) == + (PHY_LINK_UP | PHY_AUTONEG_RDY)) + cphy_cause |= cphy_cause_link_change; + } + return cphy_cause; +} + +static void mv88e1xxx_destroy(struct cphy *cphy) +{ + kfree(cphy); +} + +static struct cphy_ops mv88e1xxx_ops = { + .destroy = mv88e1xxx_destroy, + .reset = mv88e1xxx_reset, + .interrupt_enable = mv88e1xxx_interrupt_enable, + .interrupt_disable = mv88e1xxx_interrupt_disable, + .interrupt_clear = mv88e1xxx_interrupt_clear, + .interrupt_handler = mv88e1xxx_interrupt_handler, + .autoneg_enable = mv88e1xxx_autoneg_enable, + .autoneg_disable = mv88e1xxx_autoneg_disable, + .autoneg_restart = mv88e1xxx_autoneg_restart, + .advertise = mv88e1xxx_advertise, + .set_loopback = mv88e1xxx_set_loopback, + .set_speed_duplex = mv88e1xxx_set_speed_duplex, + .get_link_status = mv88e1xxx_get_link_status, +}; + +static struct cphy *mv88e1xxx_phy_create(struct net_device *dev, int phy_addr, + const struct mdio_ops *mdio_ops) +{ + struct adapter *adapter = netdev_priv(dev); + struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL); + + if (!cphy) + return NULL; + + cphy_init(cphy, dev, phy_addr, &mv88e1xxx_ops, mdio_ops); + + /* Configure particular PHY's to run in a different mode. */ + if ((board_info(adapter)->caps & SUPPORTED_TP) && + board_info(adapter)->chip_phy == CHBT_PHY_88E1111) { + /* + * Configure the PHY transmitter as class A to reduce EMI. + */ + (void) simple_mdio_write(cphy, + MV88E1XXX_EXTENDED_ADDR_REGISTER, 0xB); + (void) simple_mdio_write(cphy, + MV88E1XXX_EXTENDED_REGISTER, 0x8004); + } + (void) mv88e1xxx_downshift_set(cphy, 1); /* Enable downshift */ + + /* LED */ + if (is_T2(adapter)) { + (void) simple_mdio_write(cphy, + MV88E1XXX_LED_CONTROL_REGISTER, 0x1); + } + + return cphy; +} + +static int mv88e1xxx_phy_reset(adapter_t* adapter) +{ + return 0; +} + +const struct gphy t1_mv88e1xxx_ops = { + .create = mv88e1xxx_phy_create, + .reset = mv88e1xxx_phy_reset +}; diff --git a/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.h b/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.h new file mode 100644 index 0000000..967cc42 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.h @@ -0,0 +1,127 @@ +/* $Date: 2005/03/07 23:59:05 $ $RCSfile: mv88e1xxx.h,v $ $Revision: 1.13 $ */ +#ifndef CHELSIO_MV8E1XXX_H +#define CHELSIO_MV8E1XXX_H + +#ifndef BMCR_SPEED1000 +# define BMCR_SPEED1000 0x40 +#endif + +#ifndef ADVERTISE_PAUSE +# define ADVERTISE_PAUSE 0x400 +#endif +#ifndef ADVERTISE_PAUSE_ASYM +# define ADVERTISE_PAUSE_ASYM 0x800 +#endif + +/* Gigabit MII registers */ +#define MII_GBCR 9 /* 1000Base-T control register */ +#define MII_GBSR 10 /* 1000Base-T status register */ + +/* 1000Base-T control register fields */ +#define GBCR_ADV_1000HALF 0x100 +#define GBCR_ADV_1000FULL 0x200 +#define GBCR_PREFER_MASTER 0x400 +#define GBCR_MANUAL_AS_MASTER 0x800 +#define GBCR_MANUAL_CONFIG_ENABLE 0x1000 + +/* 1000Base-T status register fields */ +#define GBSR_LP_1000HALF 0x400 +#define GBSR_LP_1000FULL 0x800 +#define GBSR_REMOTE_OK 0x1000 +#define GBSR_LOCAL_OK 0x2000 +#define GBSR_LOCAL_MASTER 0x4000 +#define GBSR_MASTER_FAULT 0x8000 + +/* Marvell PHY interrupt status bits. */ +#define MV88E1XXX_INTR_JABBER 0x0001 +#define MV88E1XXX_INTR_POLARITY_CHNG 0x0002 +#define MV88E1XXX_INTR_ENG_DETECT_CHNG 0x0010 +#define MV88E1XXX_INTR_DOWNSHIFT 0x0020 +#define MV88E1XXX_INTR_MDI_XOVER_CHNG 0x0040 +#define MV88E1XXX_INTR_FIFO_OVER_UNDER 0x0080 +#define MV88E1XXX_INTR_FALSE_CARRIER 0x0100 +#define MV88E1XXX_INTR_SYMBOL_ERROR 0x0200 +#define MV88E1XXX_INTR_LINK_CHNG 0x0400 +#define MV88E1XXX_INTR_AUTONEG_DONE 0x0800 +#define MV88E1XXX_INTR_PAGE_RECV 0x1000 +#define MV88E1XXX_INTR_DUPLEX_CHNG 0x2000 +#define MV88E1XXX_INTR_SPEED_CHNG 0x4000 +#define MV88E1XXX_INTR_AUTONEG_ERR 0x8000 + +/* Marvell PHY specific registers. */ +#define MV88E1XXX_SPECIFIC_CNTRL_REGISTER 16 +#define MV88E1XXX_SPECIFIC_STATUS_REGISTER 17 +#define MV88E1XXX_INTERRUPT_ENABLE_REGISTER 18 +#define MV88E1XXX_INTERRUPT_STATUS_REGISTER 19 +#define MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_REGISTER 20 +#define MV88E1XXX_RECV_ERR_CNTR_REGISTER 21 +#define MV88E1XXX_RES_REGISTER 22 +#define MV88E1XXX_GLOBAL_STATUS_REGISTER 23 +#define MV88E1XXX_LED_CONTROL_REGISTER 24 +#define MV88E1XXX_MANUAL_LED_OVERRIDE_REGISTER 25 +#define MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_2_REGISTER 26 +#define MV88E1XXX_EXT_PHY_SPECIFIC_STATUS_REGISTER 27 +#define MV88E1XXX_VIRTUAL_CABLE_TESTER_REGISTER 28 +#define MV88E1XXX_EXTENDED_ADDR_REGISTER 29 +#define MV88E1XXX_EXTENDED_REGISTER 30 + +/* PHY specific control register fields */ +#define S_PSCR_MDI_XOVER_MODE 5 +#define M_PSCR_MDI_XOVER_MODE 0x3 +#define V_PSCR_MDI_XOVER_MODE(x) ((x) << S_PSCR_MDI_XOVER_MODE) +#define G_PSCR_MDI_XOVER_MODE(x) (((x) >> S_PSCR_MDI_XOVER_MODE) & M_PSCR_MDI_XOVER_MODE) + +/* Extended PHY specific control register fields */ +#define S_DOWNSHIFT_ENABLE 8 +#define V_DOWNSHIFT_ENABLE (1 << S_DOWNSHIFT_ENABLE) + +#define S_DOWNSHIFT_CNT 9 +#define M_DOWNSHIFT_CNT 0x7 +#define V_DOWNSHIFT_CNT(x) ((x) << S_DOWNSHIFT_CNT) +#define G_DOWNSHIFT_CNT(x) (((x) >> S_DOWNSHIFT_CNT) & M_DOWNSHIFT_CNT) + +/* PHY specific status register fields */ +#define S_PSSR_JABBER 0 +#define V_PSSR_JABBER (1 << S_PSSR_JABBER) + +#define S_PSSR_POLARITY 1 +#define V_PSSR_POLARITY (1 << S_PSSR_POLARITY) + +#define S_PSSR_RX_PAUSE 2 +#define V_PSSR_RX_PAUSE (1 << S_PSSR_RX_PAUSE) + +#define S_PSSR_TX_PAUSE 3 +#define V_PSSR_TX_PAUSE (1 << S_PSSR_TX_PAUSE) + +#define S_PSSR_ENERGY_DETECT 4 +#define V_PSSR_ENERGY_DETECT (1 << S_PSSR_ENERGY_DETECT) + +#define S_PSSR_DOWNSHIFT_STATUS 5 +#define V_PSSR_DOWNSHIFT_STATUS (1 << S_PSSR_DOWNSHIFT_STATUS) + +#define S_PSSR_MDI 6 +#define V_PSSR_MDI (1 << S_PSSR_MDI) + +#define S_PSSR_CABLE_LEN 7 +#define M_PSSR_CABLE_LEN 0x7 +#define V_PSSR_CABLE_LEN(x) ((x) << S_PSSR_CABLE_LEN) +#define G_PSSR_CABLE_LEN(x) (((x) >> S_PSSR_CABLE_LEN) & M_PSSR_CABLE_LEN) + +#define S_PSSR_LINK 10 +#define V_PSSR_LINK (1 << S_PSSR_LINK) + +#define S_PSSR_STATUS_RESOLVED 11 +#define V_PSSR_STATUS_RESOLVED (1 << S_PSSR_STATUS_RESOLVED) + +#define S_PSSR_PAGE_RECEIVED 12 +#define V_PSSR_PAGE_RECEIVED (1 << S_PSSR_PAGE_RECEIVED) + +#define S_PSSR_DUPLEX 13 +#define V_PSSR_DUPLEX (1 << S_PSSR_DUPLEX) + +#define S_PSSR_SPEED 14 +#define M_PSSR_SPEED 0x3 +#define V_PSSR_SPEED(x) ((x) << S_PSSR_SPEED) +#define G_PSSR_SPEED(x) (((x) >> S_PSSR_SPEED) & M_PSSR_SPEED) + +#endif diff --git a/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c new file mode 100644 index 0000000..f7136b2 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c @@ -0,0 +1,260 @@ +/***************************************************************************** + * * + * File: mv88x201x.c * + * $Revision: 1.12 $ * + * $Date: 2005/04/15 19:27:14 $ * + * Description: * + * Marvell PHY (mv88x201x) functionality. * + * part of the Chelsio 10Gb Ethernet Driver. * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License, version 2, as * + * published by the Free Software Foundation. * + * * + * You should have received a copy of the GNU General Public License along * + * with this program; if not, write to the Free Software Foundation, Inc., * + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * + * * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * + * * + * http://www.chelsio.com * + * * + * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * + * All rights reserved. * + * * + * Maintainers: maintainers@chelsio.com * + * * + * Authors: Dimitrios Michailidis * + * Tina Yang * + * Felix Marti * + * Scott Bardone * + * Kurt Ottaway * + * Frank DiMambro * + * * + * History: * + * * + ****************************************************************************/ + +#include "cphy.h" +#include "elmer0.h" + +/* + * The 88x2010 Rev C. requires some link status registers * to be read + * twice in order to get the right values. Future * revisions will fix + * this problem and then this macro * can disappear. + */ +#define MV88x2010_LINK_STATUS_BUGS 1 + +static int led_init(struct cphy *cphy) +{ + /* Setup the LED registers so we can turn on/off. + * Writing these bits maps control to another + * register. mmd(0x1) addr(0x7) + */ + cphy_mdio_write(cphy, MDIO_MMD_PCS, 0x8304, 0xdddd); + return 0; +} + +static int led_link(struct cphy *cphy, u32 do_enable) +{ + u32 led = 0; +#define LINK_ENABLE_BIT 0x1 + + cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, &led); + + if (do_enable & LINK_ENABLE_BIT) { + led |= LINK_ENABLE_BIT; + cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, led); + } else { + led &= ~LINK_ENABLE_BIT; + cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, led); + } + return 0; +} + +/* Port Reset */ +static int mv88x201x_reset(struct cphy *cphy, int wait) +{ + /* This can be done through registers. It is not required since + * a full chip reset is used. + */ + return 0; +} + +static int mv88x201x_interrupt_enable(struct cphy *cphy) +{ + /* Enable PHY LASI interrupts. */ + cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, + MDIO_PMA_LASI_LSALARM); + + /* Enable Marvell interrupts through Elmer0. */ + if (t1_is_asic(cphy->adapter)) { + u32 elmer; + + t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); + elmer |= ELMER0_GP_BIT6; + t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); + } + return 0; +} + +static int mv88x201x_interrupt_disable(struct cphy *cphy) +{ + /* Disable PHY LASI interrupts. */ + cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0x0); + + /* Disable Marvell interrupts through Elmer0. */ + if (t1_is_asic(cphy->adapter)) { + u32 elmer; + + t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); + elmer &= ~ELMER0_GP_BIT6; + t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); + } + return 0; +} + +static int mv88x201x_interrupt_clear(struct cphy *cphy) +{ + u32 elmer; + u32 val; + +#ifdef MV88x2010_LINK_STATUS_BUGS + /* Required to read twice before clear takes affect. */ + cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_RXSTAT, &val); + cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_TXSTAT, &val); + cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val); + + /* Read this register after the others above it else + * the register doesn't clear correctly. + */ + cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val); +#endif + + /* Clear link status. */ + cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val); + /* Clear PHY LASI interrupts. */ + cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val); + +#ifdef MV88x2010_LINK_STATUS_BUGS + /* Do it again. */ + cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_RXSTAT, &val); + cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_TXSTAT, &val); +#endif + + /* Clear Marvell interrupts through Elmer0. */ + if (t1_is_asic(cphy->adapter)) { + t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer); + elmer |= ELMER0_GP_BIT6; + t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer); + } + return 0; +} + +static int mv88x201x_interrupt_handler(struct cphy *cphy) +{ + /* Clear interrupts */ + mv88x201x_interrupt_clear(cphy); + + /* We have only enabled link change interrupts and so + * cphy_cause must be a link change interrupt. + */ + return cphy_cause_link_change; +} + +static int mv88x201x_set_loopback(struct cphy *cphy, int on) +{ + return 0; +} + +static int mv88x201x_get_link_status(struct cphy *cphy, int *link_ok, + int *speed, int *duplex, int *fc) +{ + u32 val = 0; + + if (link_ok) { + /* Read link status. */ + cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val); + val &= MDIO_STAT1_LSTATUS; + *link_ok = (val == MDIO_STAT1_LSTATUS); + /* Turn on/off Link LED */ + led_link(cphy, *link_ok); + } + if (speed) + *speed = SPEED_10000; + if (duplex) + *duplex = DUPLEX_FULL; + if (fc) + *fc = PAUSE_RX | PAUSE_TX; + return 0; +} + +static void mv88x201x_destroy(struct cphy *cphy) +{ + kfree(cphy); +} + +static struct cphy_ops mv88x201x_ops = { + .destroy = mv88x201x_destroy, + .reset = mv88x201x_reset, + .interrupt_enable = mv88x201x_interrupt_enable, + .interrupt_disable = mv88x201x_interrupt_disable, + .interrupt_clear = mv88x201x_interrupt_clear, + .interrupt_handler = mv88x201x_interrupt_handler, + .get_link_status = mv88x201x_get_link_status, + .set_loopback = mv88x201x_set_loopback, + .mmds = (MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | + MDIO_DEVS_PHYXS | MDIO_DEVS_WIS), +}; + +static struct cphy *mv88x201x_phy_create(struct net_device *dev, int phy_addr, + const struct mdio_ops *mdio_ops) +{ + u32 val; + struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL); + + if (!cphy) + return NULL; + + cphy_init(cphy, dev, phy_addr, &mv88x201x_ops, mdio_ops); + + /* Commands the PHY to enable XFP's clock. */ + cphy_mdio_read(cphy, MDIO_MMD_PCS, 0x8300, &val); + cphy_mdio_write(cphy, MDIO_MMD_PCS, 0x8300, val | 1); + + /* Clear link status. Required because of a bug in the PHY. */ + cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT2, &val); + cphy_mdio_read(cphy, MDIO_MMD_PCS, MDIO_STAT2, &val); + + /* Allows for Link,Ack LED turn on/off */ + led_init(cphy); + return cphy; +} + +/* Chip Reset */ +static int mv88x201x_phy_reset(adapter_t *adapter) +{ + u32 val; + + t1_tpi_read(adapter, A_ELMER0_GPO, &val); + val &= ~4; + t1_tpi_write(adapter, A_ELMER0_GPO, val); + msleep(100); + + t1_tpi_write(adapter, A_ELMER0_GPO, val | 4); + msleep(1000); + + /* Now lets enable the Laser. Delay 100us */ + t1_tpi_read(adapter, A_ELMER0_GPO, &val); + val |= 0x8000; + t1_tpi_write(adapter, A_ELMER0_GPO, val); + udelay(100); + return 0; +} + +const struct gphy t1_mv88x201x_ops = { + .create = mv88x201x_phy_create, + .reset = mv88x201x_phy_reset +}; diff --git a/drivers/net/ethernet/chelsio/cxgb/my3126.c b/drivers/net/ethernet/chelsio/cxgb/my3126.c new file mode 100644 index 0000000..a683fd3 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/my3126.c @@ -0,0 +1,209 @@ +/* $Date: 2005/11/12 02:13:49 $ $RCSfile: my3126.c,v $ $Revision: 1.15 $ */ +#include "cphy.h" +#include "elmer0.h" +#include "suni1x10gexp_regs.h" + +/* Port Reset */ +static int my3126_reset(struct cphy *cphy, int wait) +{ + /* + * This can be done through registers. It is not required since + * a full chip reset is used. + */ + return 0; +} + +static int my3126_interrupt_enable(struct cphy *cphy) +{ + schedule_delayed_work(&cphy->phy_update, HZ/30); + t1_tpi_read(cphy->adapter, A_ELMER0_GPO, &cphy->elmer_gpo); + return 0; +} + +static int my3126_interrupt_disable(struct cphy *cphy) +{ + cancel_delayed_work_sync(&cphy->phy_update); + return 0; +} + +static int my3126_interrupt_clear(struct cphy *cphy) +{ + return 0; +} + +#define OFFSET(REG_ADDR) (REG_ADDR << 2) + +static int my3126_interrupt_handler(struct cphy *cphy) +{ + u32 val; + u16 val16; + u16 status; + u32 act_count; + adapter_t *adapter; + adapter = cphy->adapter; + + if (cphy->count == 50) { + cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val); + val16 = (u16) val; + status = cphy->bmsr ^ val16; + + if (status & MDIO_STAT1_LSTATUS) + t1_link_changed(adapter, 0); + cphy->bmsr = val16; + + /* We have only enabled link change interrupts so it + must be that + */ + cphy->count = 0; + } + + t1_tpi_write(adapter, OFFSET(SUNI1x10GEXP_REG_MSTAT_CONTROL), + SUNI1x10GEXP_BITMSK_MSTAT_SNAP); + t1_tpi_read(adapter, + OFFSET(SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW), &act_count); + t1_tpi_read(adapter, + OFFSET(SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW), &val); + act_count += val; + + /* Populate elmer_gpo with the register value */ + t1_tpi_read(adapter, A_ELMER0_GPO, &val); + cphy->elmer_gpo = val; + + if ( (val & (1 << 8)) || (val & (1 << 19)) || + (cphy->act_count == act_count) || cphy->act_on ) { + if (is_T2(adapter)) + val |= (1 << 9); + else if (t1_is_T1B(adapter)) + val |= (1 << 20); + cphy->act_on = 0; + } else { + if (is_T2(adapter)) + val &= ~(1 << 9); + else if (t1_is_T1B(adapter)) + val &= ~(1 << 20); + cphy->act_on = 1; + } + + t1_tpi_write(adapter, A_ELMER0_GPO, val); + + cphy->elmer_gpo = val; + cphy->act_count = act_count; + cphy->count++; + + return cphy_cause_link_change; +} + +static void my3216_poll(struct work_struct *work) +{ + struct cphy *cphy = container_of(work, struct cphy, phy_update.work); + + my3126_interrupt_handler(cphy); +} + +static int my3126_set_loopback(struct cphy *cphy, int on) +{ + return 0; +} + +/* To check the activity LED */ +static int my3126_get_link_status(struct cphy *cphy, + int *link_ok, int *speed, int *duplex, int *fc) +{ + u32 val; + u16 val16; + adapter_t *adapter; + + adapter = cphy->adapter; + cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val); + val16 = (u16) val; + + /* Populate elmer_gpo with the register value */ + t1_tpi_read(adapter, A_ELMER0_GPO, &val); + cphy->elmer_gpo = val; + + *link_ok = (val16 & MDIO_STAT1_LSTATUS); + + if (*link_ok) { + /* Turn on the LED. */ + if (is_T2(adapter)) + val &= ~(1 << 8); + else if (t1_is_T1B(adapter)) + val &= ~(1 << 19); + } else { + /* Turn off the LED. */ + if (is_T2(adapter)) + val |= (1 << 8); + else if (t1_is_T1B(adapter)) + val |= (1 << 19); + } + + t1_tpi_write(adapter, A_ELMER0_GPO, val); + cphy->elmer_gpo = val; + *speed = SPEED_10000; + *duplex = DUPLEX_FULL; + + /* need to add flow control */ + if (fc) + *fc = PAUSE_RX | PAUSE_TX; + + return 0; +} + +static void my3126_destroy(struct cphy *cphy) +{ + kfree(cphy); +} + +static struct cphy_ops my3126_ops = { + .destroy = my3126_destroy, + .reset = my3126_reset, + .interrupt_enable = my3126_interrupt_enable, + .interrupt_disable = my3126_interrupt_disable, + .interrupt_clear = my3126_interrupt_clear, + .interrupt_handler = my3126_interrupt_handler, + .get_link_status = my3126_get_link_status, + .set_loopback = my3126_set_loopback, + .mmds = (MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | + MDIO_DEVS_PHYXS), +}; + +static struct cphy *my3126_phy_create(struct net_device *dev, + int phy_addr, const struct mdio_ops *mdio_ops) +{ + struct cphy *cphy = kzalloc(sizeof (*cphy), GFP_KERNEL); + + if (!cphy) + return NULL; + + cphy_init(cphy, dev, phy_addr, &my3126_ops, mdio_ops); + INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll); + cphy->bmsr = 0; + + return cphy; +} + +/* Chip Reset */ +static int my3126_phy_reset(adapter_t * adapter) +{ + u32 val; + + t1_tpi_read(adapter, A_ELMER0_GPO, &val); + val &= ~4; + t1_tpi_write(adapter, A_ELMER0_GPO, val); + msleep(100); + + t1_tpi_write(adapter, A_ELMER0_GPO, val | 4); + msleep(1000); + + /* Now lets enable the Laser. Delay 100us */ + t1_tpi_read(adapter, A_ELMER0_GPO, &val); + val |= 0x8000; + t1_tpi_write(adapter, A_ELMER0_GPO, val); + udelay(100); + return 0; +} + +const struct gphy t1_my3126_ops = { + .create = my3126_phy_create, + .reset = my3126_phy_reset +}; diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c new file mode 100644 index 0000000..40c7b93 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c @@ -0,0 +1,796 @@ +/***************************************************************************** + * * + * File: pm3393.c * + * $Revision: 1.16 $ * + * $Date: 2005/05/14 00:59:32 $ * + * Description: * + * PMC/SIERRA (pm3393) MAC-PHY functionality. * + * part of the Chelsio 10Gb Ethernet Driver. * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License, version 2, as * + * published by the Free Software Foundation. * + * * + * You should have received a copy of the GNU General Public License along * + * with this program; if not, write to the Free Software Foundation, Inc., * + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * + * * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * + * * + * http://www.chelsio.com * + * * + * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * + * All rights reserved. * + * * + * Maintainers: maintainers@chelsio.com * + * * + * Authors: Dimitrios Michailidis * + * Tina Yang * + * Felix Marti * + * Scott Bardone * + * Kurt Ottaway * + * Frank DiMambro * + * * + * History: * + * * + ****************************************************************************/ + +#include "common.h" +#include "regs.h" +#include "gmac.h" +#include "elmer0.h" +#include "suni1x10gexp_regs.h" + +#include +#include + +#define OFFSET(REG_ADDR) ((REG_ADDR) << 2) + +/* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */ +#define MAX_FRAME_SIZE 9600 + +#define IPG 12 +#define TXXG_CONF1_VAL ((IPG << SUNI1x10GEXP_BITOFF_TXXG_IPGT) | \ + SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN | SUNI1x10GEXP_BITMSK_TXXG_CRCEN | \ + SUNI1x10GEXP_BITMSK_TXXG_PADEN) +#define RXXG_CONF1_VAL (SUNI1x10GEXP_BITMSK_RXXG_PUREP | 0x14 | \ + SUNI1x10GEXP_BITMSK_RXXG_FLCHK | SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP) + +/* Update statistics every 15 minutes */ +#define STATS_TICK_SECS (15 * 60) + +enum { /* RMON registers */ + RxOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW, + RxUnicastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW, + RxMulticastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW, + RxBroadcastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW, + RxPAUSEMACCtrlFramesReceived = SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW, + RxFrameCheckSequenceErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW, + RxFramesLostDueToInternalMACErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW, + RxSymbolErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW, + RxInRangeLengthErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW, + RxFramesTooLongErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW, + RxJabbers = SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW, + RxFragments = SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW, + RxUndersizedFrames = SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW, + RxJumboFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_25_LOW, + RxJumboOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_26_LOW, + + TxOctetsTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW, + TxFramesLostDueToInternalMACTransmissionError = SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW, + TxTransmitSystemError = SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW, + TxUnicastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW, + TxMulticastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW, + TxBroadcastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW, + TxPAUSEMACCtrlFramesTransmitted = SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW, + TxJumboFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_51_LOW, + TxJumboOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_52_LOW +}; + +struct _cmac_instance { + u8 enabled; + u8 fc; + u8 mac_addr[6]; +}; + +static int pmread(struct cmac *cmac, u32 reg, u32 * data32) +{ + t1_tpi_read(cmac->adapter, OFFSET(reg), data32); + return 0; +} + +static int pmwrite(struct cmac *cmac, u32 reg, u32 data32) +{ + t1_tpi_write(cmac->adapter, OFFSET(reg), data32); + return 0; +} + +/* Port reset. */ +static int pm3393_reset(struct cmac *cmac) +{ + return 0; +} + +/* + * Enable interrupts for the PM3393 + * + * 1. Enable PM3393 BLOCK interrupts. + * 2. Enable PM3393 Master Interrupt bit(INTE) + * 3. Enable ELMER's PM3393 bit. + * 4. Enable Terminator external interrupt. + */ +static int pm3393_interrupt_enable(struct cmac *cmac) +{ + u32 pl_intr; + + /* PM3393 - Enabling all hardware block interrupts. + */ + pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0xffff); + pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0xffff); + pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0xffff); + pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0xffff); + + /* Don't interrupt on statistics overflow, we are polling */ + pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0); + + pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0xffff); + pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0xffff); + pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0xffff); + pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0xffff); + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0xffff); + pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0xffff); + pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0xffff); + pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0xffff); + pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0xffff); + + /* PM3393 - Global interrupt enable + */ + /* TBD XXX Disable for now until we figure out why error interrupts keep asserting. */ + pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE, + 0 /*SUNI1x10GEXP_BITMSK_TOP_INTE */ ); + + /* TERMINATOR - PL_INTERUPTS_EXT */ + pl_intr = readl(cmac->adapter->regs + A_PL_ENABLE); + pl_intr |= F_PL_INTR_EXT; + writel(pl_intr, cmac->adapter->regs + A_PL_ENABLE); + return 0; +} + +static int pm3393_interrupt_disable(struct cmac *cmac) +{ + u32 elmer; + + /* PM3393 - Enabling HW interrupt blocks. */ + pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0); + + /* PM3393 - Global interrupt enable */ + pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE, 0); + + /* ELMER - External chip interrupts. */ + t1_tpi_read(cmac->adapter, A_ELMER0_INT_ENABLE, &elmer); + elmer &= ~ELMER0_GP_BIT1; + t1_tpi_write(cmac->adapter, A_ELMER0_INT_ENABLE, elmer); + + /* TERMINATOR - PL_INTERUPTS_EXT */ + /* DO NOT DISABLE TERMINATOR's EXTERNAL INTERRUPTS. ANOTHER CHIP + * COULD WANT THEM ENABLED. We disable PM3393 at the ELMER level. + */ + + return 0; +} + +static int pm3393_interrupt_clear(struct cmac *cmac) +{ + u32 elmer; + u32 pl_intr; + u32 val32; + + /* PM3393 - Clearing HW interrupt blocks. Note, this assumes + * bit WCIMODE=0 for a clear-on-read. + */ + pmread(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS, &val32); + pmread(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS, &val32); + pmread(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS, &val32); + pmread(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS, &val32); + pmread(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT, &val32); + pmread(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS, &val32); + pmread(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT, &val32); + pmread(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS, &val32); + pmread(cmac, SUNI1x10GEXP_REG_RXXG_INTERRUPT, &val32); + pmread(cmac, SUNI1x10GEXP_REG_TXXG_INTERRUPT, &val32); + pmread(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT, &val32); + pmread(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION, + &val32); + pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS, &val32); + pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE, &val32); + + /* PM3393 - Global interrupt status + */ + pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS, &val32); + + /* ELMER - External chip interrupts. + */ + t1_tpi_read(cmac->adapter, A_ELMER0_INT_CAUSE, &elmer); + elmer |= ELMER0_GP_BIT1; + t1_tpi_write(cmac->adapter, A_ELMER0_INT_CAUSE, elmer); + + /* TERMINATOR - PL_INTERUPTS_EXT + */ + pl_intr = readl(cmac->adapter->regs + A_PL_CAUSE); + pl_intr |= F_PL_INTR_EXT; + writel(pl_intr, cmac->adapter->regs + A_PL_CAUSE); + + return 0; +} + +/* Interrupt handler */ +static int pm3393_interrupt_handler(struct cmac *cmac) +{ + u32 master_intr_status; + + /* Read the master interrupt status register. */ + pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS, + &master_intr_status); + if (netif_msg_intr(cmac->adapter)) + dev_dbg(&cmac->adapter->pdev->dev, "PM3393 intr cause 0x%x\n", + master_intr_status); + + /* TBD XXX Lets just clear everything for now */ + pm3393_interrupt_clear(cmac); + + return 0; +} + +static int pm3393_enable(struct cmac *cmac, int which) +{ + if (which & MAC_DIRECTION_RX) + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1, + (RXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_RXXG_RXEN)); + + if (which & MAC_DIRECTION_TX) { + u32 val = TXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_TXXG_TXEN0; + + if (cmac->instance->fc & PAUSE_RX) + val |= SUNI1x10GEXP_BITMSK_TXXG_FCRX; + if (cmac->instance->fc & PAUSE_TX) + val |= SUNI1x10GEXP_BITMSK_TXXG_FCTX; + pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, val); + } + + cmac->instance->enabled |= which; + return 0; +} + +static int pm3393_enable_port(struct cmac *cmac, int which) +{ + /* Clear port statistics */ + pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_CONTROL, + SUNI1x10GEXP_BITMSK_MSTAT_CLEAR); + udelay(2); + memset(&cmac->stats, 0, sizeof(struct cmac_statistics)); + + pm3393_enable(cmac, which); + + /* + * XXX This should be done by the PHY and preferably not at all. + * The PHY doesn't give us link status indication on its own so have + * the link management code query it instead. + */ + t1_link_changed(cmac->adapter, 0); + return 0; +} + +static int pm3393_disable(struct cmac *cmac, int which) +{ + if (which & MAC_DIRECTION_RX) + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1, RXXG_CONF1_VAL); + if (which & MAC_DIRECTION_TX) + pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, TXXG_CONF1_VAL); + + /* + * The disable is graceful. Give the PM3393 time. Can't wait very + * long here, we may be holding locks. + */ + udelay(20); + + cmac->instance->enabled &= ~which; + return 0; +} + +static int pm3393_loopback_enable(struct cmac *cmac) +{ + return 0; +} + +static int pm3393_loopback_disable(struct cmac *cmac) +{ + return 0; +} + +static int pm3393_set_mtu(struct cmac *cmac, int mtu) +{ + int enabled = cmac->instance->enabled; + + /* MAX_FRAME_SIZE includes header + FCS, mtu doesn't */ + mtu += 14 + 4; + if (mtu > MAX_FRAME_SIZE) + return -EINVAL; + + /* Disable Rx/Tx MAC before configuring it. */ + if (enabled) + pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); + + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH, mtu); + pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE, mtu); + + if (enabled) + pm3393_enable(cmac, enabled); + return 0; +} + +static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm) +{ + int enabled = cmac->instance->enabled & MAC_DIRECTION_RX; + u32 rx_mode; + + /* Disable MAC RX before reconfiguring it */ + if (enabled) + pm3393_disable(cmac, MAC_DIRECTION_RX); + + pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, &rx_mode); + rx_mode &= ~(SUNI1x10GEXP_BITMSK_RXXG_PMODE | + SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN); + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, + (u16)rx_mode); + + if (t1_rx_mode_promisc(rm)) { + /* Promiscuous mode. */ + rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_PMODE; + } + if (t1_rx_mode_allmulti(rm)) { + /* Accept all multicast. */ + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, 0xffff); + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, 0xffff); + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, 0xffff); + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, 0xffff); + rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN; + } else if (t1_rx_mode_mc_cnt(rm)) { + /* Accept one or more multicast(s). */ + struct netdev_hw_addr *ha; + int bit; + u16 mc_filter[4] = { 0, }; + + netdev_for_each_mc_addr(ha, t1_get_netdev(rm)) { + /* bit[23:28] */ + bit = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x3f; + mc_filter[bit >> 4] |= 1 << (bit & 0xf); + } + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]); + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, mc_filter[1]); + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, mc_filter[2]); + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, mc_filter[3]); + rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN; + } + + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, (u16)rx_mode); + + if (enabled) + pm3393_enable(cmac, MAC_DIRECTION_RX); + + return 0; +} + +static int pm3393_get_speed_duplex_fc(struct cmac *cmac, int *speed, + int *duplex, int *fc) +{ + if (speed) + *speed = SPEED_10000; + if (duplex) + *duplex = DUPLEX_FULL; + if (fc) + *fc = cmac->instance->fc; + return 0; +} + +static int pm3393_set_speed_duplex_fc(struct cmac *cmac, int speed, int duplex, + int fc) +{ + if (speed >= 0 && speed != SPEED_10000) + return -1; + if (duplex >= 0 && duplex != DUPLEX_FULL) + return -1; + if (fc & ~(PAUSE_TX | PAUSE_RX)) + return -1; + + if (fc != cmac->instance->fc) { + cmac->instance->fc = (u8) fc; + if (cmac->instance->enabled & MAC_DIRECTION_TX) + pm3393_enable(cmac, MAC_DIRECTION_TX); + } + return 0; +} + +#define RMON_UPDATE(mac, name, stat_name) \ +{ \ + t1_tpi_read((mac)->adapter, OFFSET(name), &val0); \ + t1_tpi_read((mac)->adapter, OFFSET((name)+1), &val1); \ + t1_tpi_read((mac)->adapter, OFFSET((name)+2), &val2); \ + (mac)->stats.stat_name = (u64)(val0 & 0xffff) | \ + ((u64)(val1 & 0xffff) << 16) | \ + ((u64)(val2 & 0xff) << 32) | \ + ((mac)->stats.stat_name & \ + 0xffffff0000000000ULL); \ + if (ro & \ + (1ULL << ((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2))) \ + (mac)->stats.stat_name += 1ULL << 40; \ +} + +static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac, + int flag) +{ + u64 ro; + u32 val0, val1, val2, val3; + + /* Snap the counters */ + pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL, + SUNI1x10GEXP_BITMSK_MSTAT_SNAP); + + /* Counter rollover, clear on read */ + pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0, &val0); + pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1, &val1); + pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2, &val2); + pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3, &val3); + ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) | + (((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48); + + /* Rx stats */ + RMON_UPDATE(mac, RxOctetsReceivedOK, RxOctetsOK); + RMON_UPDATE(mac, RxUnicastFramesReceivedOK, RxUnicastFramesOK); + RMON_UPDATE(mac, RxMulticastFramesReceivedOK, RxMulticastFramesOK); + RMON_UPDATE(mac, RxBroadcastFramesReceivedOK, RxBroadcastFramesOK); + RMON_UPDATE(mac, RxPAUSEMACCtrlFramesReceived, RxPauseFrames); + RMON_UPDATE(mac, RxFrameCheckSequenceErrors, RxFCSErrors); + RMON_UPDATE(mac, RxFramesLostDueToInternalMACErrors, + RxInternalMACRcvError); + RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors); + RMON_UPDATE(mac, RxInRangeLengthErrors, RxInRangeLengthErrors); + RMON_UPDATE(mac, RxFramesTooLongErrors , RxFrameTooLongErrors); + RMON_UPDATE(mac, RxJabbers, RxJabberErrors); + RMON_UPDATE(mac, RxFragments, RxRuntErrors); + RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors); + RMON_UPDATE(mac, RxJumboFramesReceivedOK, RxJumboFramesOK); + RMON_UPDATE(mac, RxJumboOctetsReceivedOK, RxJumboOctetsOK); + + /* Tx stats */ + RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK); + RMON_UPDATE(mac, TxFramesLostDueToInternalMACTransmissionError, + TxInternalMACXmitError); + RMON_UPDATE(mac, TxTransmitSystemError, TxFCSErrors); + RMON_UPDATE(mac, TxUnicastFramesTransmittedOK, TxUnicastFramesOK); + RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK); + RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK); + RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames); + RMON_UPDATE(mac, TxJumboFramesReceivedOK, TxJumboFramesOK); + RMON_UPDATE(mac, TxJumboOctetsReceivedOK, TxJumboOctetsOK); + + return &mac->stats; +} + +static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6]) +{ + memcpy(mac_addr, cmac->instance->mac_addr, 6); + return 0; +} + +static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6]) +{ + u32 val, lo, mid, hi, enabled = cmac->instance->enabled; + + /* + * MAC addr: 00:07:43:00:13:09 + * + * ma[5] = 0x09 + * ma[4] = 0x13 + * ma[3] = 0x00 + * ma[2] = 0x43 + * ma[1] = 0x07 + * ma[0] = 0x00 + * + * The PM3393 requires byte swapping and reverse order entry + * when programming MAC addresses: + * + * low_bits[15:0] = ma[1]:ma[0] + * mid_bits[31:16] = ma[3]:ma[2] + * high_bits[47:32] = ma[5]:ma[4] + */ + + /* Store local copy */ + memcpy(cmac->instance->mac_addr, ma, 6); + + lo = ((u32) ma[1] << 8) | (u32) ma[0]; + mid = ((u32) ma[3] << 8) | (u32) ma[2]; + hi = ((u32) ma[5] << 8) | (u32) ma[4]; + + /* Disable Rx/Tx MAC before configuring it. */ + if (enabled) + pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); + + /* Set RXXG Station Address */ + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_15_0, lo); + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_31_16, mid); + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_47_32, hi); + + /* Set TXXG Station Address */ + pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_15_0, lo); + pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_31_16, mid); + pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_47_32, hi); + + /* Setup Exact Match Filter 1 with our MAC address + * + * Must disable exact match filter before configuring it. + */ + pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, &val); + val &= 0xff0f; + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val); + + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW, lo); + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID, mid); + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH, hi); + + val |= 0x0090; + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val); + + if (enabled) + pm3393_enable(cmac, enabled); + return 0; +} + +static void pm3393_destroy(struct cmac *cmac) +{ + kfree(cmac); +} + +static struct cmac_ops pm3393_ops = { + .destroy = pm3393_destroy, + .reset = pm3393_reset, + .interrupt_enable = pm3393_interrupt_enable, + .interrupt_disable = pm3393_interrupt_disable, + .interrupt_clear = pm3393_interrupt_clear, + .interrupt_handler = pm3393_interrupt_handler, + .enable = pm3393_enable_port, + .disable = pm3393_disable, + .loopback_enable = pm3393_loopback_enable, + .loopback_disable = pm3393_loopback_disable, + .set_mtu = pm3393_set_mtu, + .set_rx_mode = pm3393_set_rx_mode, + .get_speed_duplex_fc = pm3393_get_speed_duplex_fc, + .set_speed_duplex_fc = pm3393_set_speed_duplex_fc, + .statistics_update = pm3393_update_statistics, + .macaddress_get = pm3393_macaddress_get, + .macaddress_set = pm3393_macaddress_set +}; + +static struct cmac *pm3393_mac_create(adapter_t *adapter, int index) +{ + struct cmac *cmac; + + cmac = kzalloc(sizeof(*cmac) + sizeof(cmac_instance), GFP_KERNEL); + if (!cmac) + return NULL; + + cmac->ops = &pm3393_ops; + cmac->instance = (cmac_instance *) (cmac + 1); + cmac->adapter = adapter; + cmac->instance->fc = PAUSE_TX | PAUSE_RX; + + t1_tpi_write(adapter, OFFSET(0x0001), 0x00008000); + t1_tpi_write(adapter, OFFSET(0x0001), 0x00000000); + t1_tpi_write(adapter, OFFSET(0x2308), 0x00009800); + t1_tpi_write(adapter, OFFSET(0x2305), 0x00001001); /* PL4IO Enable */ + t1_tpi_write(adapter, OFFSET(0x2320), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x2321), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x2322), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x2323), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x2324), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x2325), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x2326), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x2327), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x2328), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x2329), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x232a), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x232b), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x232c), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x232d), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x232e), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x232f), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x230d), 0x00009c00); + t1_tpi_write(adapter, OFFSET(0x2304), 0x00000202); /* PL4IO Calendar Repetitions */ + + t1_tpi_write(adapter, OFFSET(0x3200), 0x00008080); /* EFLX Enable */ + t1_tpi_write(adapter, OFFSET(0x3210), 0x00000000); /* EFLX Channel Deprovision */ + t1_tpi_write(adapter, OFFSET(0x3203), 0x00000000); /* EFLX Low Limit */ + t1_tpi_write(adapter, OFFSET(0x3204), 0x00000040); /* EFLX High Limit */ + t1_tpi_write(adapter, OFFSET(0x3205), 0x000002cc); /* EFLX Almost Full */ + t1_tpi_write(adapter, OFFSET(0x3206), 0x00000199); /* EFLX Almost Empty */ + t1_tpi_write(adapter, OFFSET(0x3207), 0x00000240); /* EFLX Cut Through Threshold */ + t1_tpi_write(adapter, OFFSET(0x3202), 0x00000000); /* EFLX Indirect Register Update */ + t1_tpi_write(adapter, OFFSET(0x3210), 0x00000001); /* EFLX Channel Provision */ + t1_tpi_write(adapter, OFFSET(0x3208), 0x0000ffff); /* EFLX Undocumented */ + t1_tpi_write(adapter, OFFSET(0x320a), 0x0000ffff); /* EFLX Undocumented */ + t1_tpi_write(adapter, OFFSET(0x320c), 0x0000ffff); /* EFLX enable overflow interrupt The other bit are undocumented */ + t1_tpi_write(adapter, OFFSET(0x320e), 0x0000ffff); /* EFLX Undocumented */ + + t1_tpi_write(adapter, OFFSET(0x2200), 0x0000c000); /* IFLX Configuration - enable */ + t1_tpi_write(adapter, OFFSET(0x2201), 0x00000000); /* IFLX Channel Deprovision */ + t1_tpi_write(adapter, OFFSET(0x220e), 0x00000000); /* IFLX Low Limit */ + t1_tpi_write(adapter, OFFSET(0x220f), 0x00000100); /* IFLX High Limit */ + t1_tpi_write(adapter, OFFSET(0x2210), 0x00000c00); /* IFLX Almost Full Limit */ + t1_tpi_write(adapter, OFFSET(0x2211), 0x00000599); /* IFLX Almost Empty Limit */ + t1_tpi_write(adapter, OFFSET(0x220d), 0x00000000); /* IFLX Indirect Register Update */ + t1_tpi_write(adapter, OFFSET(0x2201), 0x00000001); /* IFLX Channel Provision */ + t1_tpi_write(adapter, OFFSET(0x2203), 0x0000ffff); /* IFLX Undocumented */ + t1_tpi_write(adapter, OFFSET(0x2205), 0x0000ffff); /* IFLX Undocumented */ + t1_tpi_write(adapter, OFFSET(0x2209), 0x0000ffff); /* IFLX Enable overflow interrupt. The other bit are undocumented */ + + t1_tpi_write(adapter, OFFSET(0x2241), 0xfffffffe); /* PL4MOS Undocumented */ + t1_tpi_write(adapter, OFFSET(0x2242), 0x0000ffff); /* PL4MOS Undocumented */ + t1_tpi_write(adapter, OFFSET(0x2243), 0x00000008); /* PL4MOS Starving Burst Size */ + t1_tpi_write(adapter, OFFSET(0x2244), 0x00000008); /* PL4MOS Hungry Burst Size */ + t1_tpi_write(adapter, OFFSET(0x2245), 0x00000008); /* PL4MOS Transfer Size */ + t1_tpi_write(adapter, OFFSET(0x2240), 0x00000005); /* PL4MOS Disable */ + + t1_tpi_write(adapter, OFFSET(0x2280), 0x00002103); /* PL4ODP Training Repeat and SOP rule */ + t1_tpi_write(adapter, OFFSET(0x2284), 0x00000000); /* PL4ODP MAX_T setting */ + + t1_tpi_write(adapter, OFFSET(0x3280), 0x00000087); /* PL4IDU Enable data forward, port state machine. Set ALLOW_NON_ZERO_OLB */ + t1_tpi_write(adapter, OFFSET(0x3282), 0x0000001f); /* PL4IDU Enable Dip4 check error interrupts */ + + t1_tpi_write(adapter, OFFSET(0x3040), 0x0c32); /* # TXXG Config */ + /* For T1 use timer based Mac flow control. */ + t1_tpi_write(adapter, OFFSET(0x304d), 0x8000); + t1_tpi_write(adapter, OFFSET(0x2040), 0x059c); /* # RXXG Config */ + t1_tpi_write(adapter, OFFSET(0x2049), 0x0001); /* # RXXG Cut Through */ + t1_tpi_write(adapter, OFFSET(0x2070), 0x0000); /* # Disable promiscuous mode */ + + /* Setup Exact Match Filter 0 to allow broadcast packets. + */ + t1_tpi_write(adapter, OFFSET(0x206e), 0x0000); /* # Disable Match Enable bit */ + t1_tpi_write(adapter, OFFSET(0x204a), 0xffff); /* # low addr */ + t1_tpi_write(adapter, OFFSET(0x204b), 0xffff); /* # mid addr */ + t1_tpi_write(adapter, OFFSET(0x204c), 0xffff); /* # high addr */ + t1_tpi_write(adapter, OFFSET(0x206e), 0x0009); /* # Enable Match Enable bit */ + + t1_tpi_write(adapter, OFFSET(0x0003), 0x0000); /* # NO SOP/ PAD_EN setup */ + t1_tpi_write(adapter, OFFSET(0x0100), 0x0ff0); /* # RXEQB disabled */ + t1_tpi_write(adapter, OFFSET(0x0101), 0x0f0f); /* # No Preemphasis */ + + return cmac; +} + +static int pm3393_mac_reset(adapter_t * adapter) +{ + u32 val; + u32 x; + u32 is_pl4_reset_finished; + u32 is_pl4_outof_lock; + u32 is_xaui_mabc_pll_locked; + u32 successful_reset; + int i; + + /* The following steps are required to properly reset + * the PM3393. This information is provided in the + * PM3393 datasheet (Issue 2: November 2002) + * section 13.1 -- Device Reset. + * + * The PM3393 has three types of components that are + * individually reset: + * + * DRESETB - Digital circuitry + * PL4_ARESETB - PL4 analog circuitry + * XAUI_ARESETB - XAUI bus analog circuitry + * + * Steps to reset PM3393 using RSTB pin: + * + * 1. Assert RSTB pin low ( write 0 ) + * 2. Wait at least 1ms to initiate a complete initialization of device. + * 3. Wait until all external clocks and REFSEL are stable. + * 4. Wait minimum of 1ms. (after external clocks and REFEL are stable) + * 5. De-assert RSTB ( write 1 ) + * 6. Wait until internal timers to expires after ~14ms. + * - Allows analog clock synthesizer(PL4CSU) to stabilize to + * selected reference frequency before allowing the digital + * portion of the device to operate. + * 7. Wait at least 200us for XAUI interface to stabilize. + * 8. Verify the PM3393 came out of reset successfully. + * Set successful reset flag if everything worked else try again + * a few more times. + */ + + successful_reset = 0; + for (i = 0; i < 3 && !successful_reset; i++) { + /* 1 */ + t1_tpi_read(adapter, A_ELMER0_GPO, &val); + val &= ~1; + t1_tpi_write(adapter, A_ELMER0_GPO, val); + + /* 2 */ + msleep(1); + + /* 3 */ + msleep(1); + + /* 4 */ + msleep(2 /*1 extra ms for safety */ ); + + /* 5 */ + val |= 1; + t1_tpi_write(adapter, A_ELMER0_GPO, val); + + /* 6 */ + msleep(15 /*1 extra ms for safety */ ); + + /* 7 */ + msleep(1); + + /* 8 */ + + /* Has PL4 analog block come out of reset correctly? */ + t1_tpi_read(adapter, OFFSET(SUNI1x10GEXP_REG_DEVICE_STATUS), &val); + is_pl4_reset_finished = (val & SUNI1x10GEXP_BITMSK_TOP_EXPIRED); + + /* TBD XXX SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL gets locked later in the init sequence + * figure out why? */ + + /* Have all PL4 block clocks locked? */ + x = (SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL + /*| SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL */ | + SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL | + SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL | + SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL); + is_pl4_outof_lock = (val & x); + + /* ??? If this fails, might be able to software reset the XAUI part + * and try to recover... thus saving us from doing another HW reset */ + /* Has the XAUI MABC PLL circuitry stablized? */ + is_xaui_mabc_pll_locked = + (val & SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED); + + successful_reset = (is_pl4_reset_finished && !is_pl4_outof_lock + && is_xaui_mabc_pll_locked); + + if (netif_msg_hw(adapter)) + dev_dbg(&adapter->pdev->dev, + "PM3393 HW reset %d: pl4_reset 0x%x, val 0x%x, " + "is_pl4_outof_lock 0x%x, xaui_locked 0x%x\n", + i, is_pl4_reset_finished, val, + is_pl4_outof_lock, is_xaui_mabc_pll_locked); + } + return successful_reset ? 0 : 1; +} + +const struct gmac t1_pm3393_ops = { + .stats_update_period = STATS_TICK_SECS, + .create = pm3393_mac_create, + .reset = pm3393_mac_reset, +}; diff --git a/drivers/net/ethernet/chelsio/cxgb/regs.h b/drivers/net/ethernet/chelsio/cxgb/regs.h new file mode 100644 index 0000000..c80bf4d --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/regs.h @@ -0,0 +1,2168 @@ +/***************************************************************************** + * * + * File: regs.h * + * $Revision: 1.8 $ * + * $Date: 2005/06/21 18:29:48 $ * + * Description: * + * part of the Chelsio 10Gb Ethernet Driver. * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License, version 2, as * + * published by the Free Software Foundation. * + * * + * You should have received a copy of the GNU General Public License along * + * with this program; if not, write to the Free Software Foundation, Inc., * + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * + * * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * + * * + * http://www.chelsio.com * + * * + * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * + * All rights reserved. * + * * + * Maintainers: maintainers@chelsio.com * + * * + * Authors: Dimitrios Michailidis * + * Tina Yang * + * Felix Marti * + * Scott Bardone * + * Kurt Ottaway * + * Frank DiMambro * + * * + * History: * + * * + ****************************************************************************/ + +#ifndef _CXGB_REGS_H_ +#define _CXGB_REGS_H_ + +/* SGE registers */ +#define A_SG_CONTROL 0x0 + +#define S_CMDQ0_ENABLE 0 +#define V_CMDQ0_ENABLE(x) ((x) << S_CMDQ0_ENABLE) +#define F_CMDQ0_ENABLE V_CMDQ0_ENABLE(1U) + +#define S_CMDQ1_ENABLE 1 +#define V_CMDQ1_ENABLE(x) ((x) << S_CMDQ1_ENABLE) +#define F_CMDQ1_ENABLE V_CMDQ1_ENABLE(1U) + +#define S_FL0_ENABLE 2 +#define V_FL0_ENABLE(x) ((x) << S_FL0_ENABLE) +#define F_FL0_ENABLE V_FL0_ENABLE(1U) + +#define S_FL1_ENABLE 3 +#define V_FL1_ENABLE(x) ((x) << S_FL1_ENABLE) +#define F_FL1_ENABLE V_FL1_ENABLE(1U) + +#define S_CPL_ENABLE 4 +#define V_CPL_ENABLE(x) ((x) << S_CPL_ENABLE) +#define F_CPL_ENABLE V_CPL_ENABLE(1U) + +#define S_RESPONSE_QUEUE_ENABLE 5 +#define V_RESPONSE_QUEUE_ENABLE(x) ((x) << S_RESPONSE_QUEUE_ENABLE) +#define F_RESPONSE_QUEUE_ENABLE V_RESPONSE_QUEUE_ENABLE(1U) + +#define S_CMDQ_PRIORITY 6 +#define M_CMDQ_PRIORITY 0x3 +#define V_CMDQ_PRIORITY(x) ((x) << S_CMDQ_PRIORITY) +#define G_CMDQ_PRIORITY(x) (((x) >> S_CMDQ_PRIORITY) & M_CMDQ_PRIORITY) + +#define S_DISABLE_CMDQ0_GTS 8 +#define V_DISABLE_CMDQ0_GTS(x) ((x) << S_DISABLE_CMDQ0_GTS) +#define F_DISABLE_CMDQ0_GTS V_DISABLE_CMDQ0_GTS(1U) + +#define S_DISABLE_CMDQ1_GTS 9 +#define V_DISABLE_CMDQ1_GTS(x) ((x) << S_DISABLE_CMDQ1_GTS) +#define F_DISABLE_CMDQ1_GTS V_DISABLE_CMDQ1_GTS(1U) + +#define S_DISABLE_FL0_GTS 10 +#define V_DISABLE_FL0_GTS(x) ((x) << S_DISABLE_FL0_GTS) +#define F_DISABLE_FL0_GTS V_DISABLE_FL0_GTS(1U) + +#define S_DISABLE_FL1_GTS 11 +#define V_DISABLE_FL1_GTS(x) ((x) << S_DISABLE_FL1_GTS) +#define F_DISABLE_FL1_GTS V_DISABLE_FL1_GTS(1U) + +#define S_ENABLE_BIG_ENDIAN 12 +#define V_ENABLE_BIG_ENDIAN(x) ((x) << S_ENABLE_BIG_ENDIAN) +#define F_ENABLE_BIG_ENDIAN V_ENABLE_BIG_ENDIAN(1U) + +#define S_FL_SELECTION_CRITERIA 13 +#define V_FL_SELECTION_CRITERIA(x) ((x) << S_FL_SELECTION_CRITERIA) +#define F_FL_SELECTION_CRITERIA V_FL_SELECTION_CRITERIA(1U) + +#define S_ISCSI_COALESCE 14 +#define V_ISCSI_COALESCE(x) ((x) << S_ISCSI_COALESCE) +#define F_ISCSI_COALESCE V_ISCSI_COALESCE(1U) + +#define S_RX_PKT_OFFSET 15 +#define M_RX_PKT_OFFSET 0x7 +#define V_RX_PKT_OFFSET(x) ((x) << S_RX_PKT_OFFSET) +#define G_RX_PKT_OFFSET(x) (((x) >> S_RX_PKT_OFFSET) & M_RX_PKT_OFFSET) + +#define S_VLAN_XTRACT 18 +#define V_VLAN_XTRACT(x) ((x) << S_VLAN_XTRACT) +#define F_VLAN_XTRACT V_VLAN_XTRACT(1U) + +#define A_SG_DOORBELL 0x4 +#define A_SG_CMD0BASELWR 0x8 +#define A_SG_CMD0BASEUPR 0xc +#define A_SG_CMD1BASELWR 0x10 +#define A_SG_CMD1BASEUPR 0x14 +#define A_SG_FL0BASELWR 0x18 +#define A_SG_FL0BASEUPR 0x1c +#define A_SG_FL1BASELWR 0x20 +#define A_SG_FL1BASEUPR 0x24 +#define A_SG_CMD0SIZE 0x28 + +#define S_CMDQ0_SIZE 0 +#define M_CMDQ0_SIZE 0x1ffff +#define V_CMDQ0_SIZE(x) ((x) << S_CMDQ0_SIZE) +#define G_CMDQ0_SIZE(x) (((x) >> S_CMDQ0_SIZE) & M_CMDQ0_SIZE) + +#define A_SG_FL0SIZE 0x2c + +#define S_FL0_SIZE 0 +#define M_FL0_SIZE 0x1ffff +#define V_FL0_SIZE(x) ((x) << S_FL0_SIZE) +#define G_FL0_SIZE(x) (((x) >> S_FL0_SIZE) & M_FL0_SIZE) + +#define A_SG_RSPSIZE 0x30 + +#define S_RESPQ_SIZE 0 +#define M_RESPQ_SIZE 0x1ffff +#define V_RESPQ_SIZE(x) ((x) << S_RESPQ_SIZE) +#define G_RESPQ_SIZE(x) (((x) >> S_RESPQ_SIZE) & M_RESPQ_SIZE) + +#define A_SG_RSPBASELWR 0x34 +#define A_SG_RSPBASEUPR 0x38 +#define A_SG_FLTHRESHOLD 0x3c + +#define S_FL_THRESHOLD 0 +#define M_FL_THRESHOLD 0xffff +#define V_FL_THRESHOLD(x) ((x) << S_FL_THRESHOLD) +#define G_FL_THRESHOLD(x) (((x) >> S_FL_THRESHOLD) & M_FL_THRESHOLD) + +#define A_SG_RSPQUEUECREDIT 0x40 + +#define S_RESPQ_CREDIT 0 +#define M_RESPQ_CREDIT 0x1ffff +#define V_RESPQ_CREDIT(x) ((x) << S_RESPQ_CREDIT) +#define G_RESPQ_CREDIT(x) (((x) >> S_RESPQ_CREDIT) & M_RESPQ_CREDIT) + +#define A_SG_SLEEPING 0x48 + +#define S_SLEEPING 0 +#define M_SLEEPING 0xffff +#define V_SLEEPING(x) ((x) << S_SLEEPING) +#define G_SLEEPING(x) (((x) >> S_SLEEPING) & M_SLEEPING) + +#define A_SG_INTRTIMER 0x4c + +#define S_INTERRUPT_TIMER_COUNT 0 +#define M_INTERRUPT_TIMER_COUNT 0xffffff +#define V_INTERRUPT_TIMER_COUNT(x) ((x) << S_INTERRUPT_TIMER_COUNT) +#define G_INTERRUPT_TIMER_COUNT(x) (((x) >> S_INTERRUPT_TIMER_COUNT) & M_INTERRUPT_TIMER_COUNT) + +#define A_SG_CMD0PTR 0x50 + +#define S_CMDQ0_POINTER 0 +#define M_CMDQ0_POINTER 0xffff +#define V_CMDQ0_POINTER(x) ((x) << S_CMDQ0_POINTER) +#define G_CMDQ0_POINTER(x) (((x) >> S_CMDQ0_POINTER) & M_CMDQ0_POINTER) + +#define S_CURRENT_GENERATION_BIT 16 +#define V_CURRENT_GENERATION_BIT(x) ((x) << S_CURRENT_GENERATION_BIT) +#define F_CURRENT_GENERATION_BIT V_CURRENT_GENERATION_BIT(1U) + +#define A_SG_CMD1PTR 0x54 + +#define S_CMDQ1_POINTER 0 +#define M_CMDQ1_POINTER 0xffff +#define V_CMDQ1_POINTER(x) ((x) << S_CMDQ1_POINTER) +#define G_CMDQ1_POINTER(x) (((x) >> S_CMDQ1_POINTER) & M_CMDQ1_POINTER) + +#define A_SG_FL0PTR 0x58 + +#define S_FL0_POINTER 0 +#define M_FL0_POINTER 0xffff +#define V_FL0_POINTER(x) ((x) << S_FL0_POINTER) +#define G_FL0_POINTER(x) (((x) >> S_FL0_POINTER) & M_FL0_POINTER) + +#define A_SG_FL1PTR 0x5c + +#define S_FL1_POINTER 0 +#define M_FL1_POINTER 0xffff +#define V_FL1_POINTER(x) ((x) << S_FL1_POINTER) +#define G_FL1_POINTER(x) (((x) >> S_FL1_POINTER) & M_FL1_POINTER) + +#define A_SG_VERSION 0x6c + +#define S_DAY 0 +#define M_DAY 0x1f +#define V_DAY(x) ((x) << S_DAY) +#define G_DAY(x) (((x) >> S_DAY) & M_DAY) + +#define S_MONTH 5 +#define M_MONTH 0xf +#define V_MONTH(x) ((x) << S_MONTH) +#define G_MONTH(x) (((x) >> S_MONTH) & M_MONTH) + +#define A_SG_CMD1SIZE 0xb0 + +#define S_CMDQ1_SIZE 0 +#define M_CMDQ1_SIZE 0x1ffff +#define V_CMDQ1_SIZE(x) ((x) << S_CMDQ1_SIZE) +#define G_CMDQ1_SIZE(x) (((x) >> S_CMDQ1_SIZE) & M_CMDQ1_SIZE) + +#define A_SG_FL1SIZE 0xb4 + +#define S_FL1_SIZE 0 +#define M_FL1_SIZE 0x1ffff +#define V_FL1_SIZE(x) ((x) << S_FL1_SIZE) +#define G_FL1_SIZE(x) (((x) >> S_FL1_SIZE) & M_FL1_SIZE) + +#define A_SG_INT_ENABLE 0xb8 + +#define S_RESPQ_EXHAUSTED 0 +#define V_RESPQ_EXHAUSTED(x) ((x) << S_RESPQ_EXHAUSTED) +#define F_RESPQ_EXHAUSTED V_RESPQ_EXHAUSTED(1U) + +#define S_RESPQ_OVERFLOW 1 +#define V_RESPQ_OVERFLOW(x) ((x) << S_RESPQ_OVERFLOW) +#define F_RESPQ_OVERFLOW V_RESPQ_OVERFLOW(1U) + +#define S_FL_EXHAUSTED 2 +#define V_FL_EXHAUSTED(x) ((x) << S_FL_EXHAUSTED) +#define F_FL_EXHAUSTED V_FL_EXHAUSTED(1U) + +#define S_PACKET_TOO_BIG 3 +#define V_PACKET_TOO_BIG(x) ((x) << S_PACKET_TOO_BIG) +#define F_PACKET_TOO_BIG V_PACKET_TOO_BIG(1U) + +#define S_PACKET_MISMATCH 4 +#define V_PACKET_MISMATCH(x) ((x) << S_PACKET_MISMATCH) +#define F_PACKET_MISMATCH V_PACKET_MISMATCH(1U) + +#define A_SG_INT_CAUSE 0xbc +#define A_SG_RESPACCUTIMER 0xc0 + +/* MC3 registers */ +#define A_MC3_CFG 0x100 + +#define S_CLK_ENABLE 0 +#define V_CLK_ENABLE(x) ((x) << S_CLK_ENABLE) +#define F_CLK_ENABLE V_CLK_ENABLE(1U) + +#define S_READY 1 +#define V_READY(x) ((x) << S_READY) +#define F_READY V_READY(1U) + +#define S_READ_TO_WRITE_DELAY 2 +#define M_READ_TO_WRITE_DELAY 0x7 +#define V_READ_TO_WRITE_DELAY(x) ((x) << S_READ_TO_WRITE_DELAY) +#define G_READ_TO_WRITE_DELAY(x) (((x) >> S_READ_TO_WRITE_DELAY) & M_READ_TO_WRITE_DELAY) + +#define S_WRITE_TO_READ_DELAY 5 +#define M_WRITE_TO_READ_DELAY 0x7 +#define V_WRITE_TO_READ_DELAY(x) ((x) << S_WRITE_TO_READ_DELAY) +#define G_WRITE_TO_READ_DELAY(x) (((x) >> S_WRITE_TO_READ_DELAY) & M_WRITE_TO_READ_DELAY) + +#define S_MC3_BANK_CYCLE 8 +#define M_MC3_BANK_CYCLE 0xf +#define V_MC3_BANK_CYCLE(x) ((x) << S_MC3_BANK_CYCLE) +#define G_MC3_BANK_CYCLE(x) (((x) >> S_MC3_BANK_CYCLE) & M_MC3_BANK_CYCLE) + +#define S_REFRESH_CYCLE 12 +#define M_REFRESH_CYCLE 0xf +#define V_REFRESH_CYCLE(x) ((x) << S_REFRESH_CYCLE) +#define G_REFRESH_CYCLE(x) (((x) >> S_REFRESH_CYCLE) & M_REFRESH_CYCLE) + +#define S_PRECHARGE_CYCLE 16 +#define M_PRECHARGE_CYCLE 0x3 +#define V_PRECHARGE_CYCLE(x) ((x) << S_PRECHARGE_CYCLE) +#define G_PRECHARGE_CYCLE(x) (((x) >> S_PRECHARGE_CYCLE) & M_PRECHARGE_CYCLE) + +#define S_ACTIVE_TO_READ_WRITE_DELAY 18 +#define V_ACTIVE_TO_READ_WRITE_DELAY(x) ((x) << S_ACTIVE_TO_READ_WRITE_DELAY) +#define F_ACTIVE_TO_READ_WRITE_DELAY V_ACTIVE_TO_READ_WRITE_DELAY(1U) + +#define S_ACTIVE_TO_PRECHARGE_DELAY 19 +#define M_ACTIVE_TO_PRECHARGE_DELAY 0x7 +#define V_ACTIVE_TO_PRECHARGE_DELAY(x) ((x) << S_ACTIVE_TO_PRECHARGE_DELAY) +#define G_ACTIVE_TO_PRECHARGE_DELAY(x) (((x) >> S_ACTIVE_TO_PRECHARGE_DELAY) & M_ACTIVE_TO_PRECHARGE_DELAY) + +#define S_WRITE_RECOVERY_DELAY 22 +#define M_WRITE_RECOVERY_DELAY 0x3 +#define V_WRITE_RECOVERY_DELAY(x) ((x) << S_WRITE_RECOVERY_DELAY) +#define G_WRITE_RECOVERY_DELAY(x) (((x) >> S_WRITE_RECOVERY_DELAY) & M_WRITE_RECOVERY_DELAY) + +#define S_DENSITY 24 +#define M_DENSITY 0x3 +#define V_DENSITY(x) ((x) << S_DENSITY) +#define G_DENSITY(x) (((x) >> S_DENSITY) & M_DENSITY) + +#define S_ORGANIZATION 26 +#define V_ORGANIZATION(x) ((x) << S_ORGANIZATION) +#define F_ORGANIZATION V_ORGANIZATION(1U) + +#define S_BANKS 27 +#define V_BANKS(x) ((x) << S_BANKS) +#define F_BANKS V_BANKS(1U) + +#define S_UNREGISTERED 28 +#define V_UNREGISTERED(x) ((x) << S_UNREGISTERED) +#define F_UNREGISTERED V_UNREGISTERED(1U) + +#define S_MC3_WIDTH 29 +#define M_MC3_WIDTH 0x3 +#define V_MC3_WIDTH(x) ((x) << S_MC3_WIDTH) +#define G_MC3_WIDTH(x) (((x) >> S_MC3_WIDTH) & M_MC3_WIDTH) + +#define S_MC3_SLOW 31 +#define V_MC3_SLOW(x) ((x) << S_MC3_SLOW) +#define F_MC3_SLOW V_MC3_SLOW(1U) + +#define A_MC3_MODE 0x104 + +#define S_MC3_MODE 0 +#define M_MC3_MODE 0x3fff +#define V_MC3_MODE(x) ((x) << S_MC3_MODE) +#define G_MC3_MODE(x) (((x) >> S_MC3_MODE) & M_MC3_MODE) + +#define S_BUSY 31 +#define V_BUSY(x) ((x) << S_BUSY) +#define F_BUSY V_BUSY(1U) + +#define A_MC3_EXT_MODE 0x108 + +#define S_MC3_EXTENDED_MODE 0 +#define M_MC3_EXTENDED_MODE 0x3fff +#define V_MC3_EXTENDED_MODE(x) ((x) << S_MC3_EXTENDED_MODE) +#define G_MC3_EXTENDED_MODE(x) (((x) >> S_MC3_EXTENDED_MODE) & M_MC3_EXTENDED_MODE) + +#define A_MC3_PRECHARG 0x10c +#define A_MC3_REFRESH 0x110 + +#define S_REFRESH_ENABLE 0 +#define V_REFRESH_ENABLE(x) ((x) << S_REFRESH_ENABLE) +#define F_REFRESH_ENABLE V_REFRESH_ENABLE(1U) + +#define S_REFRESH_DIVISOR 1 +#define M_REFRESH_DIVISOR 0x3fff +#define V_REFRESH_DIVISOR(x) ((x) << S_REFRESH_DIVISOR) +#define G_REFRESH_DIVISOR(x) (((x) >> S_REFRESH_DIVISOR) & M_REFRESH_DIVISOR) + +#define A_MC3_STROBE 0x114 + +#define S_MASTER_DLL_RESET 0 +#define V_MASTER_DLL_RESET(x) ((x) << S_MASTER_DLL_RESET) +#define F_MASTER_DLL_RESET V_MASTER_DLL_RESET(1U) + +#define S_MASTER_DLL_TAP_COUNT 1 +#define M_MASTER_DLL_TAP_COUNT 0xff +#define V_MASTER_DLL_TAP_COUNT(x) ((x) << S_MASTER_DLL_TAP_COUNT) +#define G_MASTER_DLL_TAP_COUNT(x) (((x) >> S_MASTER_DLL_TAP_COUNT) & M_MASTER_DLL_TAP_COUNT) + +#define S_MASTER_DLL_LOCKED 9 +#define V_MASTER_DLL_LOCKED(x) ((x) << S_MASTER_DLL_LOCKED) +#define F_MASTER_DLL_LOCKED V_MASTER_DLL_LOCKED(1U) + +#define S_MASTER_DLL_MAX_TAP_COUNT 10 +#define V_MASTER_DLL_MAX_TAP_COUNT(x) ((x) << S_MASTER_DLL_MAX_TAP_COUNT) +#define F_MASTER_DLL_MAX_TAP_COUNT V_MASTER_DLL_MAX_TAP_COUNT(1U) + +#define S_MASTER_DLL_TAP_COUNT_OFFSET 11 +#define M_MASTER_DLL_TAP_COUNT_OFFSET 0x3f +#define V_MASTER_DLL_TAP_COUNT_OFFSET(x) ((x) << S_MASTER_DLL_TAP_COUNT_OFFSET) +#define G_MASTER_DLL_TAP_COUNT_OFFSET(x) (((x) >> S_MASTER_DLL_TAP_COUNT_OFFSET) & M_MASTER_DLL_TAP_COUNT_OFFSET) + +#define S_SLAVE_DLL_RESET 11 +#define V_SLAVE_DLL_RESET(x) ((x) << S_SLAVE_DLL_RESET) +#define F_SLAVE_DLL_RESET V_SLAVE_DLL_RESET(1U) + +#define S_SLAVE_DLL_DELTA 12 +#define M_SLAVE_DLL_DELTA 0xf +#define V_SLAVE_DLL_DELTA(x) ((x) << S_SLAVE_DLL_DELTA) +#define G_SLAVE_DLL_DELTA(x) (((x) >> S_SLAVE_DLL_DELTA) & M_SLAVE_DLL_DELTA) + +#define S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT 17 +#define M_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT 0x3f +#define V_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT(x) ((x) << S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT) +#define G_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT(x) (((x) >> S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT) & M_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT) + +#define S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE 23 +#define V_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE(x) ((x) << S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE) +#define F_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE V_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE(1U) + +#define S_SLAVE_DELAY_LINE_TAP_COUNT 24 +#define M_SLAVE_DELAY_LINE_TAP_COUNT 0x3f +#define V_SLAVE_DELAY_LINE_TAP_COUNT(x) ((x) << S_SLAVE_DELAY_LINE_TAP_COUNT) +#define G_SLAVE_DELAY_LINE_TAP_COUNT(x) (((x) >> S_SLAVE_DELAY_LINE_TAP_COUNT) & M_SLAVE_DELAY_LINE_TAP_COUNT) + +#define A_MC3_ECC_CNTL 0x118 + +#define S_ECC_GENERATION_ENABLE 0 +#define V_ECC_GENERATION_ENABLE(x) ((x) << S_ECC_GENERATION_ENABLE) +#define F_ECC_GENERATION_ENABLE V_ECC_GENERATION_ENABLE(1U) + +#define S_ECC_CHECK_ENABLE 1 +#define V_ECC_CHECK_ENABLE(x) ((x) << S_ECC_CHECK_ENABLE) +#define F_ECC_CHECK_ENABLE V_ECC_CHECK_ENABLE(1U) + +#define S_CORRECTABLE_ERROR_COUNT 2 +#define M_CORRECTABLE_ERROR_COUNT 0xff +#define V_CORRECTABLE_ERROR_COUNT(x) ((x) << S_CORRECTABLE_ERROR_COUNT) +#define G_CORRECTABLE_ERROR_COUNT(x) (((x) >> S_CORRECTABLE_ERROR_COUNT) & M_CORRECTABLE_ERROR_COUNT) + +#define S_UNCORRECTABLE_ERROR_COUNT 10 +#define M_UNCORRECTABLE_ERROR_COUNT 0xff +#define V_UNCORRECTABLE_ERROR_COUNT(x) ((x) << S_UNCORRECTABLE_ERROR_COUNT) +#define G_UNCORRECTABLE_ERROR_COUNT(x) (((x) >> S_UNCORRECTABLE_ERROR_COUNT) & M_UNCORRECTABLE_ERROR_COUNT) + +#define A_MC3_CE_ADDR 0x11c + +#define S_MC3_CE_ADDR 4 +#define M_MC3_CE_ADDR 0xfffffff +#define V_MC3_CE_ADDR(x) ((x) << S_MC3_CE_ADDR) +#define G_MC3_CE_ADDR(x) (((x) >> S_MC3_CE_ADDR) & M_MC3_CE_ADDR) + +#define A_MC3_CE_DATA0 0x120 +#define A_MC3_CE_DATA1 0x124 +#define A_MC3_CE_DATA2 0x128 +#define A_MC3_CE_DATA3 0x12c +#define A_MC3_CE_DATA4 0x130 +#define A_MC3_UE_ADDR 0x134 + +#define S_MC3_UE_ADDR 4 +#define M_MC3_UE_ADDR 0xfffffff +#define V_MC3_UE_ADDR(x) ((x) << S_MC3_UE_ADDR) +#define G_MC3_UE_ADDR(x) (((x) >> S_MC3_UE_ADDR) & M_MC3_UE_ADDR) + +#define A_MC3_UE_DATA0 0x138 +#define A_MC3_UE_DATA1 0x13c +#define A_MC3_UE_DATA2 0x140 +#define A_MC3_UE_DATA3 0x144 +#define A_MC3_UE_DATA4 0x148 +#define A_MC3_BD_ADDR 0x14c +#define A_MC3_BD_DATA0 0x150 +#define A_MC3_BD_DATA1 0x154 +#define A_MC3_BD_DATA2 0x158 +#define A_MC3_BD_DATA3 0x15c +#define A_MC3_BD_DATA4 0x160 +#define A_MC3_BD_OP 0x164 + +#define S_BACK_DOOR_OPERATION 0 +#define V_BACK_DOOR_OPERATION(x) ((x) << S_BACK_DOOR_OPERATION) +#define F_BACK_DOOR_OPERATION V_BACK_DOOR_OPERATION(1U) + +#define A_MC3_BIST_ADDR_BEG 0x168 +#define A_MC3_BIST_ADDR_END 0x16c +#define A_MC3_BIST_DATA 0x170 +#define A_MC3_BIST_OP 0x174 + +#define S_OP 0 +#define V_OP(x) ((x) << S_OP) +#define F_OP V_OP(1U) + +#define S_DATA_PATTERN 1 +#define M_DATA_PATTERN 0x3 +#define V_DATA_PATTERN(x) ((x) << S_DATA_PATTERN) +#define G_DATA_PATTERN(x) (((x) >> S_DATA_PATTERN) & M_DATA_PATTERN) + +#define S_CONTINUOUS 3 +#define V_CONTINUOUS(x) ((x) << S_CONTINUOUS) +#define F_CONTINUOUS V_CONTINUOUS(1U) + +#define A_MC3_INT_ENABLE 0x178 + +#define S_MC3_CORR_ERR 0 +#define V_MC3_CORR_ERR(x) ((x) << S_MC3_CORR_ERR) +#define F_MC3_CORR_ERR V_MC3_CORR_ERR(1U) + +#define S_MC3_UNCORR_ERR 1 +#define V_MC3_UNCORR_ERR(x) ((x) << S_MC3_UNCORR_ERR) +#define F_MC3_UNCORR_ERR V_MC3_UNCORR_ERR(1U) + +#define S_MC3_PARITY_ERR 2 +#define M_MC3_PARITY_ERR 0xff +#define V_MC3_PARITY_ERR(x) ((x) << S_MC3_PARITY_ERR) +#define G_MC3_PARITY_ERR(x) (((x) >> S_MC3_PARITY_ERR) & M_MC3_PARITY_ERR) + +#define S_MC3_ADDR_ERR 10 +#define V_MC3_ADDR_ERR(x) ((x) << S_MC3_ADDR_ERR) +#define F_MC3_ADDR_ERR V_MC3_ADDR_ERR(1U) + +#define A_MC3_INT_CAUSE 0x17c + +/* MC4 registers */ +#define A_MC4_CFG 0x180 + +#define S_POWER_UP 0 +#define V_POWER_UP(x) ((x) << S_POWER_UP) +#define F_POWER_UP V_POWER_UP(1U) + +#define S_MC4_BANK_CYCLE 8 +#define M_MC4_BANK_CYCLE 0x7 +#define V_MC4_BANK_CYCLE(x) ((x) << S_MC4_BANK_CYCLE) +#define G_MC4_BANK_CYCLE(x) (((x) >> S_MC4_BANK_CYCLE) & M_MC4_BANK_CYCLE) + +#define S_MC4_NARROW 24 +#define V_MC4_NARROW(x) ((x) << S_MC4_NARROW) +#define F_MC4_NARROW V_MC4_NARROW(1U) + +#define S_MC4_SLOW 25 +#define V_MC4_SLOW(x) ((x) << S_MC4_SLOW) +#define F_MC4_SLOW V_MC4_SLOW(1U) + +#define S_MC4A_WIDTH 24 +#define M_MC4A_WIDTH 0x3 +#define V_MC4A_WIDTH(x) ((x) << S_MC4A_WIDTH) +#define G_MC4A_WIDTH(x) (((x) >> S_MC4A_WIDTH) & M_MC4A_WIDTH) + +#define S_MC4A_SLOW 26 +#define V_MC4A_SLOW(x) ((x) << S_MC4A_SLOW) +#define F_MC4A_SLOW V_MC4A_SLOW(1U) + +#define A_MC4_MODE 0x184 + +#define S_MC4_MODE 0 +#define M_MC4_MODE 0x7fff +#define V_MC4_MODE(x) ((x) << S_MC4_MODE) +#define G_MC4_MODE(x) (((x) >> S_MC4_MODE) & M_MC4_MODE) + +#define A_MC4_EXT_MODE 0x188 + +#define S_MC4_EXTENDED_MODE 0 +#define M_MC4_EXTENDED_MODE 0x7fff +#define V_MC4_EXTENDED_MODE(x) ((x) << S_MC4_EXTENDED_MODE) +#define G_MC4_EXTENDED_MODE(x) (((x) >> S_MC4_EXTENDED_MODE) & M_MC4_EXTENDED_MODE) + +#define A_MC4_REFRESH 0x190 +#define A_MC4_STROBE 0x194 +#define A_MC4_ECC_CNTL 0x198 +#define A_MC4_CE_ADDR 0x19c + +#define S_MC4_CE_ADDR 4 +#define M_MC4_CE_ADDR 0xffffff +#define V_MC4_CE_ADDR(x) ((x) << S_MC4_CE_ADDR) +#define G_MC4_CE_ADDR(x) (((x) >> S_MC4_CE_ADDR) & M_MC4_CE_ADDR) + +#define A_MC4_CE_DATA0 0x1a0 +#define A_MC4_CE_DATA1 0x1a4 +#define A_MC4_CE_DATA2 0x1a8 +#define A_MC4_CE_DATA3 0x1ac +#define A_MC4_CE_DATA4 0x1b0 +#define A_MC4_UE_ADDR 0x1b4 + +#define S_MC4_UE_ADDR 4 +#define M_MC4_UE_ADDR 0xffffff +#define V_MC4_UE_ADDR(x) ((x) << S_MC4_UE_ADDR) +#define G_MC4_UE_ADDR(x) (((x) >> S_MC4_UE_ADDR) & M_MC4_UE_ADDR) + +#define A_MC4_UE_DATA0 0x1b8 +#define A_MC4_UE_DATA1 0x1bc +#define A_MC4_UE_DATA2 0x1c0 +#define A_MC4_UE_DATA3 0x1c4 +#define A_MC4_UE_DATA4 0x1c8 +#define A_MC4_BD_ADDR 0x1cc + +#define S_MC4_BACK_DOOR_ADDR 0 +#define M_MC4_BACK_DOOR_ADDR 0xfffffff +#define V_MC4_BACK_DOOR_ADDR(x) ((x) << S_MC4_BACK_DOOR_ADDR) +#define G_MC4_BACK_DOOR_ADDR(x) (((x) >> S_MC4_BACK_DOOR_ADDR) & M_MC4_BACK_DOOR_ADDR) + +#define A_MC4_BD_DATA0 0x1d0 +#define A_MC4_BD_DATA1 0x1d4 +#define A_MC4_BD_DATA2 0x1d8 +#define A_MC4_BD_DATA3 0x1dc +#define A_MC4_BD_DATA4 0x1e0 +#define A_MC4_BD_OP 0x1e4 + +#define S_OPERATION 0 +#define V_OPERATION(x) ((x) << S_OPERATION) +#define F_OPERATION V_OPERATION(1U) + +#define A_MC4_BIST_ADDR_BEG 0x1e8 +#define A_MC4_BIST_ADDR_END 0x1ec +#define A_MC4_BIST_DATA 0x1f0 +#define A_MC4_BIST_OP 0x1f4 +#define A_MC4_INT_ENABLE 0x1f8 + +#define S_MC4_CORR_ERR 0 +#define V_MC4_CORR_ERR(x) ((x) << S_MC4_CORR_ERR) +#define F_MC4_CORR_ERR V_MC4_CORR_ERR(1U) + +#define S_MC4_UNCORR_ERR 1 +#define V_MC4_UNCORR_ERR(x) ((x) << S_MC4_UNCORR_ERR) +#define F_MC4_UNCORR_ERR V_MC4_UNCORR_ERR(1U) + +#define S_MC4_ADDR_ERR 2 +#define V_MC4_ADDR_ERR(x) ((x) << S_MC4_ADDR_ERR) +#define F_MC4_ADDR_ERR V_MC4_ADDR_ERR(1U) + +#define A_MC4_INT_CAUSE 0x1fc + +/* TPI registers */ +#define A_TPI_ADDR 0x280 + +#define S_TPI_ADDRESS 0 +#define M_TPI_ADDRESS 0xffffff +#define V_TPI_ADDRESS(x) ((x) << S_TPI_ADDRESS) +#define G_TPI_ADDRESS(x) (((x) >> S_TPI_ADDRESS) & M_TPI_ADDRESS) + +#define A_TPI_WR_DATA 0x284 +#define A_TPI_RD_DATA 0x288 +#define A_TPI_CSR 0x28c + +#define S_TPIWR 0 +#define V_TPIWR(x) ((x) << S_TPIWR) +#define F_TPIWR V_TPIWR(1U) + +#define S_TPIRDY 1 +#define V_TPIRDY(x) ((x) << S_TPIRDY) +#define F_TPIRDY V_TPIRDY(1U) + +#define S_INT_DIR 31 +#define V_INT_DIR(x) ((x) << S_INT_DIR) +#define F_INT_DIR V_INT_DIR(1U) + +#define A_TPI_PAR 0x29c + +#define S_TPIPAR 0 +#define M_TPIPAR 0x7f +#define V_TPIPAR(x) ((x) << S_TPIPAR) +#define G_TPIPAR(x) (((x) >> S_TPIPAR) & M_TPIPAR) + + +/* TP registers */ +#define A_TP_IN_CONFIG 0x300 + +#define S_TP_IN_CSPI_TUNNEL 0 +#define V_TP_IN_CSPI_TUNNEL(x) ((x) << S_TP_IN_CSPI_TUNNEL) +#define F_TP_IN_CSPI_TUNNEL V_TP_IN_CSPI_TUNNEL(1U) + +#define S_TP_IN_CSPI_ETHERNET 1 +#define V_TP_IN_CSPI_ETHERNET(x) ((x) << S_TP_IN_CSPI_ETHERNET) +#define F_TP_IN_CSPI_ETHERNET V_TP_IN_CSPI_ETHERNET(1U) + +#define S_TP_IN_CSPI_CPL 3 +#define V_TP_IN_CSPI_CPL(x) ((x) << S_TP_IN_CSPI_CPL) +#define F_TP_IN_CSPI_CPL V_TP_IN_CSPI_CPL(1U) + +#define S_TP_IN_CSPI_POS 4 +#define V_TP_IN_CSPI_POS(x) ((x) << S_TP_IN_CSPI_POS) +#define F_TP_IN_CSPI_POS V_TP_IN_CSPI_POS(1U) + +#define S_TP_IN_CSPI_CHECK_IP_CSUM 5 +#define V_TP_IN_CSPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_IP_CSUM) +#define F_TP_IN_CSPI_CHECK_IP_CSUM V_TP_IN_CSPI_CHECK_IP_CSUM(1U) + +#define S_TP_IN_CSPI_CHECK_TCP_CSUM 6 +#define V_TP_IN_CSPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_TCP_CSUM) +#define F_TP_IN_CSPI_CHECK_TCP_CSUM V_TP_IN_CSPI_CHECK_TCP_CSUM(1U) + +#define S_TP_IN_ESPI_TUNNEL 7 +#define V_TP_IN_ESPI_TUNNEL(x) ((x) << S_TP_IN_ESPI_TUNNEL) +#define F_TP_IN_ESPI_TUNNEL V_TP_IN_ESPI_TUNNEL(1U) + +#define S_TP_IN_ESPI_ETHERNET 8 +#define V_TP_IN_ESPI_ETHERNET(x) ((x) << S_TP_IN_ESPI_ETHERNET) +#define F_TP_IN_ESPI_ETHERNET V_TP_IN_ESPI_ETHERNET(1U) + +#define S_TP_IN_ESPI_CPL 10 +#define V_TP_IN_ESPI_CPL(x) ((x) << S_TP_IN_ESPI_CPL) +#define F_TP_IN_ESPI_CPL V_TP_IN_ESPI_CPL(1U) + +#define S_TP_IN_ESPI_POS 11 +#define V_TP_IN_ESPI_POS(x) ((x) << S_TP_IN_ESPI_POS) +#define F_TP_IN_ESPI_POS V_TP_IN_ESPI_POS(1U) + +#define S_TP_IN_ESPI_CHECK_IP_CSUM 12 +#define V_TP_IN_ESPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_IP_CSUM) +#define F_TP_IN_ESPI_CHECK_IP_CSUM V_TP_IN_ESPI_CHECK_IP_CSUM(1U) + +#define S_TP_IN_ESPI_CHECK_TCP_CSUM 13 +#define V_TP_IN_ESPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_TCP_CSUM) +#define F_TP_IN_ESPI_CHECK_TCP_CSUM V_TP_IN_ESPI_CHECK_TCP_CSUM(1U) + +#define S_OFFLOAD_DISABLE 14 +#define V_OFFLOAD_DISABLE(x) ((x) << S_OFFLOAD_DISABLE) +#define F_OFFLOAD_DISABLE V_OFFLOAD_DISABLE(1U) + +#define A_TP_OUT_CONFIG 0x304 + +#define S_TP_OUT_C_ETH 0 +#define V_TP_OUT_C_ETH(x) ((x) << S_TP_OUT_C_ETH) +#define F_TP_OUT_C_ETH V_TP_OUT_C_ETH(1U) + +#define S_TP_OUT_CSPI_CPL 2 +#define V_TP_OUT_CSPI_CPL(x) ((x) << S_TP_OUT_CSPI_CPL) +#define F_TP_OUT_CSPI_CPL V_TP_OUT_CSPI_CPL(1U) + +#define S_TP_OUT_CSPI_POS 3 +#define V_TP_OUT_CSPI_POS(x) ((x) << S_TP_OUT_CSPI_POS) +#define F_TP_OUT_CSPI_POS V_TP_OUT_CSPI_POS(1U) + +#define S_TP_OUT_CSPI_GENERATE_IP_CSUM 4 +#define V_TP_OUT_CSPI_GENERATE_IP_CSUM(x) ((x) << S_TP_OUT_CSPI_GENERATE_IP_CSUM) +#define F_TP_OUT_CSPI_GENERATE_IP_CSUM V_TP_OUT_CSPI_GENERATE_IP_CSUM(1U) + +#define S_TP_OUT_CSPI_GENERATE_TCP_CSUM 5 +#define V_TP_OUT_CSPI_GENERATE_TCP_CSUM(x) ((x) << S_TP_OUT_CSPI_GENERATE_TCP_CSUM) +#define F_TP_OUT_CSPI_GENERATE_TCP_CSUM V_TP_OUT_CSPI_GENERATE_TCP_CSUM(1U) + +#define S_TP_OUT_ESPI_ETHERNET 6 +#define V_TP_OUT_ESPI_ETHERNET(x) ((x) << S_TP_OUT_ESPI_ETHERNET) +#define F_TP_OUT_ESPI_ETHERNET V_TP_OUT_ESPI_ETHERNET(1U) + +#define S_TP_OUT_ESPI_TAG_ETHERNET 7 +#define V_TP_OUT_ESPI_TAG_ETHERNET(x) ((x) << S_TP_OUT_ESPI_TAG_ETHERNET) +#define F_TP_OUT_ESPI_TAG_ETHERNET V_TP_OUT_ESPI_TAG_ETHERNET(1U) + +#define S_TP_OUT_ESPI_CPL 8 +#define V_TP_OUT_ESPI_CPL(x) ((x) << S_TP_OUT_ESPI_CPL) +#define F_TP_OUT_ESPI_CPL V_TP_OUT_ESPI_CPL(1U) + +#define S_TP_OUT_ESPI_POS 9 +#define V_TP_OUT_ESPI_POS(x) ((x) << S_TP_OUT_ESPI_POS) +#define F_TP_OUT_ESPI_POS V_TP_OUT_ESPI_POS(1U) + +#define S_TP_OUT_ESPI_GENERATE_IP_CSUM 10 +#define V_TP_OUT_ESPI_GENERATE_IP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_IP_CSUM) +#define F_TP_OUT_ESPI_GENERATE_IP_CSUM V_TP_OUT_ESPI_GENERATE_IP_CSUM(1U) + +#define S_TP_OUT_ESPI_GENERATE_TCP_CSUM 11 +#define V_TP_OUT_ESPI_GENERATE_TCP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_TCP_CSUM) +#define F_TP_OUT_ESPI_GENERATE_TCP_CSUM V_TP_OUT_ESPI_GENERATE_TCP_CSUM(1U) + +#define A_TP_GLOBAL_CONFIG 0x308 + +#define S_IP_TTL 0 +#define M_IP_TTL 0xff +#define V_IP_TTL(x) ((x) << S_IP_TTL) +#define G_IP_TTL(x) (((x) >> S_IP_TTL) & M_IP_TTL) + +#define S_TCAM_SERVER_REGION_USAGE 8 +#define M_TCAM_SERVER_REGION_USAGE 0x3 +#define V_TCAM_SERVER_REGION_USAGE(x) ((x) << S_TCAM_SERVER_REGION_USAGE) +#define G_TCAM_SERVER_REGION_USAGE(x) (((x) >> S_TCAM_SERVER_REGION_USAGE) & M_TCAM_SERVER_REGION_USAGE) + +#define S_QOS_MAPPING 10 +#define V_QOS_MAPPING(x) ((x) << S_QOS_MAPPING) +#define F_QOS_MAPPING V_QOS_MAPPING(1U) + +#define S_TCP_CSUM 11 +#define V_TCP_CSUM(x) ((x) << S_TCP_CSUM) +#define F_TCP_CSUM V_TCP_CSUM(1U) + +#define S_UDP_CSUM 12 +#define V_UDP_CSUM(x) ((x) << S_UDP_CSUM) +#define F_UDP_CSUM V_UDP_CSUM(1U) + +#define S_IP_CSUM 13 +#define V_IP_CSUM(x) ((x) << S_IP_CSUM) +#define F_IP_CSUM V_IP_CSUM(1U) + +#define S_IP_ID_SPLIT 14 +#define V_IP_ID_SPLIT(x) ((x) << S_IP_ID_SPLIT) +#define F_IP_ID_SPLIT V_IP_ID_SPLIT(1U) + +#define S_PATH_MTU 15 +#define V_PATH_MTU(x) ((x) << S_PATH_MTU) +#define F_PATH_MTU V_PATH_MTU(1U) + +#define S_5TUPLE_LOOKUP 17 +#define M_5TUPLE_LOOKUP 0x3 +#define V_5TUPLE_LOOKUP(x) ((x) << S_5TUPLE_LOOKUP) +#define G_5TUPLE_LOOKUP(x) (((x) >> S_5TUPLE_LOOKUP) & M_5TUPLE_LOOKUP) + +#define S_IP_FRAGMENT_DROP 19 +#define V_IP_FRAGMENT_DROP(x) ((x) << S_IP_FRAGMENT_DROP) +#define F_IP_FRAGMENT_DROP V_IP_FRAGMENT_DROP(1U) + +#define S_PING_DROP 20 +#define V_PING_DROP(x) ((x) << S_PING_DROP) +#define F_PING_DROP V_PING_DROP(1U) + +#define S_PROTECT_MODE 21 +#define V_PROTECT_MODE(x) ((x) << S_PROTECT_MODE) +#define F_PROTECT_MODE V_PROTECT_MODE(1U) + +#define S_SYN_COOKIE_ALGORITHM 22 +#define V_SYN_COOKIE_ALGORITHM(x) ((x) << S_SYN_COOKIE_ALGORITHM) +#define F_SYN_COOKIE_ALGORITHM V_SYN_COOKIE_ALGORITHM(1U) + +#define S_ATTACK_FILTER 23 +#define V_ATTACK_FILTER(x) ((x) << S_ATTACK_FILTER) +#define F_ATTACK_FILTER V_ATTACK_FILTER(1U) + +#define S_INTERFACE_TYPE 24 +#define V_INTERFACE_TYPE(x) ((x) << S_INTERFACE_TYPE) +#define F_INTERFACE_TYPE V_INTERFACE_TYPE(1U) + +#define S_DISABLE_RX_FLOW_CONTROL 25 +#define V_DISABLE_RX_FLOW_CONTROL(x) ((x) << S_DISABLE_RX_FLOW_CONTROL) +#define F_DISABLE_RX_FLOW_CONTROL V_DISABLE_RX_FLOW_CONTROL(1U) + +#define S_SYN_COOKIE_PARAMETER 26 +#define M_SYN_COOKIE_PARAMETER 0x3f +#define V_SYN_COOKIE_PARAMETER(x) ((x) << S_SYN_COOKIE_PARAMETER) +#define G_SYN_COOKIE_PARAMETER(x) (((x) >> S_SYN_COOKIE_PARAMETER) & M_SYN_COOKIE_PARAMETER) + +#define A_TP_GLOBAL_RX_CREDITS 0x30c +#define A_TP_CM_SIZE 0x310 +#define A_TP_CM_MM_BASE 0x314 + +#define S_CM_MEMMGR_BASE 0 +#define M_CM_MEMMGR_BASE 0xfffffff +#define V_CM_MEMMGR_BASE(x) ((x) << S_CM_MEMMGR_BASE) +#define G_CM_MEMMGR_BASE(x) (((x) >> S_CM_MEMMGR_BASE) & M_CM_MEMMGR_BASE) + +#define A_TP_CM_TIMER_BASE 0x318 + +#define S_CM_TIMER_BASE 0 +#define M_CM_TIMER_BASE 0xfffffff +#define V_CM_TIMER_BASE(x) ((x) << S_CM_TIMER_BASE) +#define G_CM_TIMER_BASE(x) (((x) >> S_CM_TIMER_BASE) & M_CM_TIMER_BASE) + +#define A_TP_PM_SIZE 0x31c +#define A_TP_PM_TX_BASE 0x320 +#define A_TP_PM_DEFRAG_BASE 0x324 +#define A_TP_PM_RX_BASE 0x328 +#define A_TP_PM_RX_PG_SIZE 0x32c +#define A_TP_PM_RX_MAX_PGS 0x330 +#define A_TP_PM_TX_PG_SIZE 0x334 +#define A_TP_PM_TX_MAX_PGS 0x338 +#define A_TP_TCP_OPTIONS 0x340 + +#define S_TIMESTAMP 0 +#define M_TIMESTAMP 0x3 +#define V_TIMESTAMP(x) ((x) << S_TIMESTAMP) +#define G_TIMESTAMP(x) (((x) >> S_TIMESTAMP) & M_TIMESTAMP) + +#define S_WINDOW_SCALE 2 +#define M_WINDOW_SCALE 0x3 +#define V_WINDOW_SCALE(x) ((x) << S_WINDOW_SCALE) +#define G_WINDOW_SCALE(x) (((x) >> S_WINDOW_SCALE) & M_WINDOW_SCALE) + +#define S_SACK 4 +#define M_SACK 0x3 +#define V_SACK(x) ((x) << S_SACK) +#define G_SACK(x) (((x) >> S_SACK) & M_SACK) + +#define S_ECN 6 +#define M_ECN 0x3 +#define V_ECN(x) ((x) << S_ECN) +#define G_ECN(x) (((x) >> S_ECN) & M_ECN) + +#define S_SACK_ALGORITHM 8 +#define M_SACK_ALGORITHM 0x3 +#define V_SACK_ALGORITHM(x) ((x) << S_SACK_ALGORITHM) +#define G_SACK_ALGORITHM(x) (((x) >> S_SACK_ALGORITHM) & M_SACK_ALGORITHM) + +#define S_MSS 10 +#define V_MSS(x) ((x) << S_MSS) +#define F_MSS V_MSS(1U) + +#define S_DEFAULT_PEER_MSS 16 +#define M_DEFAULT_PEER_MSS 0xffff +#define V_DEFAULT_PEER_MSS(x) ((x) << S_DEFAULT_PEER_MSS) +#define G_DEFAULT_PEER_MSS(x) (((x) >> S_DEFAULT_PEER_MSS) & M_DEFAULT_PEER_MSS) + +#define A_TP_DACK_CONFIG 0x344 + +#define S_DACK_MODE 0 +#define V_DACK_MODE(x) ((x) << S_DACK_MODE) +#define F_DACK_MODE V_DACK_MODE(1U) + +#define S_DACK_AUTO_MGMT 1 +#define V_DACK_AUTO_MGMT(x) ((x) << S_DACK_AUTO_MGMT) +#define F_DACK_AUTO_MGMT V_DACK_AUTO_MGMT(1U) + +#define S_DACK_AUTO_CAREFUL 2 +#define V_DACK_AUTO_CAREFUL(x) ((x) << S_DACK_AUTO_CAREFUL) +#define F_DACK_AUTO_CAREFUL V_DACK_AUTO_CAREFUL(1U) + +#define S_DACK_MSS_SELECTOR 3 +#define M_DACK_MSS_SELECTOR 0x3 +#define V_DACK_MSS_SELECTOR(x) ((x) << S_DACK_MSS_SELECTOR) +#define G_DACK_MSS_SELECTOR(x) (((x) >> S_DACK_MSS_SELECTOR) & M_DACK_MSS_SELECTOR) + +#define S_DACK_BYTE_THRESHOLD 5 +#define M_DACK_BYTE_THRESHOLD 0xfffff +#define V_DACK_BYTE_THRESHOLD(x) ((x) << S_DACK_BYTE_THRESHOLD) +#define G_DACK_BYTE_THRESHOLD(x) (((x) >> S_DACK_BYTE_THRESHOLD) & M_DACK_BYTE_THRESHOLD) + +#define A_TP_PC_CONFIG 0x348 + +#define S_TP_ACCESS_LATENCY 0 +#define M_TP_ACCESS_LATENCY 0xf +#define V_TP_ACCESS_LATENCY(x) ((x) << S_TP_ACCESS_LATENCY) +#define G_TP_ACCESS_LATENCY(x) (((x) >> S_TP_ACCESS_LATENCY) & M_TP_ACCESS_LATENCY) + +#define S_HELD_FIN_DISABLE 4 +#define V_HELD_FIN_DISABLE(x) ((x) << S_HELD_FIN_DISABLE) +#define F_HELD_FIN_DISABLE V_HELD_FIN_DISABLE(1U) + +#define S_DDP_FC_ENABLE 5 +#define V_DDP_FC_ENABLE(x) ((x) << S_DDP_FC_ENABLE) +#define F_DDP_FC_ENABLE V_DDP_FC_ENABLE(1U) + +#define S_RDMA_ERR_ENABLE 6 +#define V_RDMA_ERR_ENABLE(x) ((x) << S_RDMA_ERR_ENABLE) +#define F_RDMA_ERR_ENABLE V_RDMA_ERR_ENABLE(1U) + +#define S_FAST_PDU_DELIVERY 7 +#define V_FAST_PDU_DELIVERY(x) ((x) << S_FAST_PDU_DELIVERY) +#define F_FAST_PDU_DELIVERY V_FAST_PDU_DELIVERY(1U) + +#define S_CLEAR_FIN 8 +#define V_CLEAR_FIN(x) ((x) << S_CLEAR_FIN) +#define F_CLEAR_FIN V_CLEAR_FIN(1U) + +#define S_DIS_TX_FILL_WIN_PUSH 12 +#define V_DIS_TX_FILL_WIN_PUSH(x) ((x) << S_DIS_TX_FILL_WIN_PUSH) +#define F_DIS_TX_FILL_WIN_PUSH V_DIS_TX_FILL_WIN_PUSH(1U) + +#define S_TP_PC_REV 30 +#define M_TP_PC_REV 0x3 +#define V_TP_PC_REV(x) ((x) << S_TP_PC_REV) +#define G_TP_PC_REV(x) (((x) >> S_TP_PC_REV) & M_TP_PC_REV) + +#define A_TP_BACKOFF0 0x350 + +#define S_ELEMENT0 0 +#define M_ELEMENT0 0xff +#define V_ELEMENT0(x) ((x) << S_ELEMENT0) +#define G_ELEMENT0(x) (((x) >> S_ELEMENT0) & M_ELEMENT0) + +#define S_ELEMENT1 8 +#define M_ELEMENT1 0xff +#define V_ELEMENT1(x) ((x) << S_ELEMENT1) +#define G_ELEMENT1(x) (((x) >> S_ELEMENT1) & M_ELEMENT1) + +#define S_ELEMENT2 16 +#define M_ELEMENT2 0xff +#define V_ELEMENT2(x) ((x) << S_ELEMENT2) +#define G_ELEMENT2(x) (((x) >> S_ELEMENT2) & M_ELEMENT2) + +#define S_ELEMENT3 24 +#define M_ELEMENT3 0xff +#define V_ELEMENT3(x) ((x) << S_ELEMENT3) +#define G_ELEMENT3(x) (((x) >> S_ELEMENT3) & M_ELEMENT3) + +#define A_TP_BACKOFF1 0x354 +#define A_TP_BACKOFF2 0x358 +#define A_TP_BACKOFF3 0x35c +#define A_TP_PARA_REG0 0x360 + +#define S_VAR_MULT 0 +#define M_VAR_MULT 0xf +#define V_VAR_MULT(x) ((x) << S_VAR_MULT) +#define G_VAR_MULT(x) (((x) >> S_VAR_MULT) & M_VAR_MULT) + +#define S_VAR_GAIN 4 +#define M_VAR_GAIN 0xf +#define V_VAR_GAIN(x) ((x) << S_VAR_GAIN) +#define G_VAR_GAIN(x) (((x) >> S_VAR_GAIN) & M_VAR_GAIN) + +#define S_SRTT_GAIN 8 +#define M_SRTT_GAIN 0xf +#define V_SRTT_GAIN(x) ((x) << S_SRTT_GAIN) +#define G_SRTT_GAIN(x) (((x) >> S_SRTT_GAIN) & M_SRTT_GAIN) + +#define S_RTTVAR_INIT 12 +#define M_RTTVAR_INIT 0xf +#define V_RTTVAR_INIT(x) ((x) << S_RTTVAR_INIT) +#define G_RTTVAR_INIT(x) (((x) >> S_RTTVAR_INIT) & M_RTTVAR_INIT) + +#define S_DUP_THRESH 20 +#define M_DUP_THRESH 0xf +#define V_DUP_THRESH(x) ((x) << S_DUP_THRESH) +#define G_DUP_THRESH(x) (((x) >> S_DUP_THRESH) & M_DUP_THRESH) + +#define S_INIT_CONG_WIN 24 +#define M_INIT_CONG_WIN 0x7 +#define V_INIT_CONG_WIN(x) ((x) << S_INIT_CONG_WIN) +#define G_INIT_CONG_WIN(x) (((x) >> S_INIT_CONG_WIN) & M_INIT_CONG_WIN) + +#define A_TP_PARA_REG1 0x364 + +#define S_INITIAL_SLOW_START_THRESHOLD 0 +#define M_INITIAL_SLOW_START_THRESHOLD 0xffff +#define V_INITIAL_SLOW_START_THRESHOLD(x) ((x) << S_INITIAL_SLOW_START_THRESHOLD) +#define G_INITIAL_SLOW_START_THRESHOLD(x) (((x) >> S_INITIAL_SLOW_START_THRESHOLD) & M_INITIAL_SLOW_START_THRESHOLD) + +#define S_RECEIVE_BUFFER_SIZE 16 +#define M_RECEIVE_BUFFER_SIZE 0xffff +#define V_RECEIVE_BUFFER_SIZE(x) ((x) << S_RECEIVE_BUFFER_SIZE) +#define G_RECEIVE_BUFFER_SIZE(x) (((x) >> S_RECEIVE_BUFFER_SIZE) & M_RECEIVE_BUFFER_SIZE) + +#define A_TP_PARA_REG2 0x368 + +#define S_RX_COALESCE_SIZE 0 +#define M_RX_COALESCE_SIZE 0xffff +#define V_RX_COALESCE_SIZE(x) ((x) << S_RX_COALESCE_SIZE) +#define G_RX_COALESCE_SIZE(x) (((x) >> S_RX_COALESCE_SIZE) & M_RX_COALESCE_SIZE) + +#define S_MAX_RX_SIZE 16 +#define M_MAX_RX_SIZE 0xffff +#define V_MAX_RX_SIZE(x) ((x) << S_MAX_RX_SIZE) +#define G_MAX_RX_SIZE(x) (((x) >> S_MAX_RX_SIZE) & M_MAX_RX_SIZE) + +#define A_TP_PARA_REG3 0x36c + +#define S_RX_COALESCING_PSH_DELIVER 0 +#define V_RX_COALESCING_PSH_DELIVER(x) ((x) << S_RX_COALESCING_PSH_DELIVER) +#define F_RX_COALESCING_PSH_DELIVER V_RX_COALESCING_PSH_DELIVER(1U) + +#define S_RX_COALESCING_ENABLE 1 +#define V_RX_COALESCING_ENABLE(x) ((x) << S_RX_COALESCING_ENABLE) +#define F_RX_COALESCING_ENABLE V_RX_COALESCING_ENABLE(1U) + +#define S_TAHOE_ENABLE 2 +#define V_TAHOE_ENABLE(x) ((x) << S_TAHOE_ENABLE) +#define F_TAHOE_ENABLE V_TAHOE_ENABLE(1U) + +#define S_MAX_REORDER_FRAGMENTS 12 +#define M_MAX_REORDER_FRAGMENTS 0x7 +#define V_MAX_REORDER_FRAGMENTS(x) ((x) << S_MAX_REORDER_FRAGMENTS) +#define G_MAX_REORDER_FRAGMENTS(x) (((x) >> S_MAX_REORDER_FRAGMENTS) & M_MAX_REORDER_FRAGMENTS) + +#define A_TP_TIMER_RESOLUTION 0x390 + +#define S_DELAYED_ACK_TIMER_RESOLUTION 0 +#define M_DELAYED_ACK_TIMER_RESOLUTION 0x3f +#define V_DELAYED_ACK_TIMER_RESOLUTION(x) ((x) << S_DELAYED_ACK_TIMER_RESOLUTION) +#define G_DELAYED_ACK_TIMER_RESOLUTION(x) (((x) >> S_DELAYED_ACK_TIMER_RESOLUTION) & M_DELAYED_ACK_TIMER_RESOLUTION) + +#define S_GENERIC_TIMER_RESOLUTION 16 +#define M_GENERIC_TIMER_RESOLUTION 0x3f +#define V_GENERIC_TIMER_RESOLUTION(x) ((x) << S_GENERIC_TIMER_RESOLUTION) +#define G_GENERIC_TIMER_RESOLUTION(x) (((x) >> S_GENERIC_TIMER_RESOLUTION) & M_GENERIC_TIMER_RESOLUTION) + +#define A_TP_2MSL 0x394 + +#define S_2MSL 0 +#define M_2MSL 0x3fffffff +#define V_2MSL(x) ((x) << S_2MSL) +#define G_2MSL(x) (((x) >> S_2MSL) & M_2MSL) + +#define A_TP_RXT_MIN 0x398 + +#define S_RETRANSMIT_TIMER_MIN 0 +#define M_RETRANSMIT_TIMER_MIN 0xffff +#define V_RETRANSMIT_TIMER_MIN(x) ((x) << S_RETRANSMIT_TIMER_MIN) +#define G_RETRANSMIT_TIMER_MIN(x) (((x) >> S_RETRANSMIT_TIMER_MIN) & M_RETRANSMIT_TIMER_MIN) + +#define A_TP_RXT_MAX 0x39c + +#define S_RETRANSMIT_TIMER_MAX 0 +#define M_RETRANSMIT_TIMER_MAX 0x3fffffff +#define V_RETRANSMIT_TIMER_MAX(x) ((x) << S_RETRANSMIT_TIMER_MAX) +#define G_RETRANSMIT_TIMER_MAX(x) (((x) >> S_RETRANSMIT_TIMER_MAX) & M_RETRANSMIT_TIMER_MAX) + +#define A_TP_PERS_MIN 0x3a0 + +#define S_PERSIST_TIMER_MIN 0 +#define M_PERSIST_TIMER_MIN 0xffff +#define V_PERSIST_TIMER_MIN(x) ((x) << S_PERSIST_TIMER_MIN) +#define G_PERSIST_TIMER_MIN(x) (((x) >> S_PERSIST_TIMER_MIN) & M_PERSIST_TIMER_MIN) + +#define A_TP_PERS_MAX 0x3a4 + +#define S_PERSIST_TIMER_MAX 0 +#define M_PERSIST_TIMER_MAX 0x3fffffff +#define V_PERSIST_TIMER_MAX(x) ((x) << S_PERSIST_TIMER_MAX) +#define G_PERSIST_TIMER_MAX(x) (((x) >> S_PERSIST_TIMER_MAX) & M_PERSIST_TIMER_MAX) + +#define A_TP_KEEP_IDLE 0x3ac + +#define S_KEEP_ALIVE_IDLE_TIME 0 +#define M_KEEP_ALIVE_IDLE_TIME 0x3fffffff +#define V_KEEP_ALIVE_IDLE_TIME(x) ((x) << S_KEEP_ALIVE_IDLE_TIME) +#define G_KEEP_ALIVE_IDLE_TIME(x) (((x) >> S_KEEP_ALIVE_IDLE_TIME) & M_KEEP_ALIVE_IDLE_TIME) + +#define A_TP_KEEP_INTVL 0x3b0 + +#define S_KEEP_ALIVE_INTERVAL_TIME 0 +#define M_KEEP_ALIVE_INTERVAL_TIME 0x3fffffff +#define V_KEEP_ALIVE_INTERVAL_TIME(x) ((x) << S_KEEP_ALIVE_INTERVAL_TIME) +#define G_KEEP_ALIVE_INTERVAL_TIME(x) (((x) >> S_KEEP_ALIVE_INTERVAL_TIME) & M_KEEP_ALIVE_INTERVAL_TIME) + +#define A_TP_INIT_SRTT 0x3b4 + +#define S_INITIAL_SRTT 0 +#define M_INITIAL_SRTT 0xffff +#define V_INITIAL_SRTT(x) ((x) << S_INITIAL_SRTT) +#define G_INITIAL_SRTT(x) (((x) >> S_INITIAL_SRTT) & M_INITIAL_SRTT) + +#define A_TP_DACK_TIME 0x3b8 + +#define S_DELAYED_ACK_TIME 0 +#define M_DELAYED_ACK_TIME 0x7ff +#define V_DELAYED_ACK_TIME(x) ((x) << S_DELAYED_ACK_TIME) +#define G_DELAYED_ACK_TIME(x) (((x) >> S_DELAYED_ACK_TIME) & M_DELAYED_ACK_TIME) + +#define A_TP_FINWAIT2_TIME 0x3bc + +#define S_FINWAIT2_TIME 0 +#define M_FINWAIT2_TIME 0x3fffffff +#define V_FINWAIT2_TIME(x) ((x) << S_FINWAIT2_TIME) +#define G_FINWAIT2_TIME(x) (((x) >> S_FINWAIT2_TIME) & M_FINWAIT2_TIME) + +#define A_TP_FAST_FINWAIT2_TIME 0x3c0 + +#define S_FAST_FINWAIT2_TIME 0 +#define M_FAST_FINWAIT2_TIME 0x3fffffff +#define V_FAST_FINWAIT2_TIME(x) ((x) << S_FAST_FINWAIT2_TIME) +#define G_FAST_FINWAIT2_TIME(x) (((x) >> S_FAST_FINWAIT2_TIME) & M_FAST_FINWAIT2_TIME) + +#define A_TP_SHIFT_CNT 0x3c4 + +#define S_KEEPALIVE_MAX 0 +#define M_KEEPALIVE_MAX 0xff +#define V_KEEPALIVE_MAX(x) ((x) << S_KEEPALIVE_MAX) +#define G_KEEPALIVE_MAX(x) (((x) >> S_KEEPALIVE_MAX) & M_KEEPALIVE_MAX) + +#define S_WINDOWPROBE_MAX 8 +#define M_WINDOWPROBE_MAX 0xff +#define V_WINDOWPROBE_MAX(x) ((x) << S_WINDOWPROBE_MAX) +#define G_WINDOWPROBE_MAX(x) (((x) >> S_WINDOWPROBE_MAX) & M_WINDOWPROBE_MAX) + +#define S_RETRANSMISSION_MAX 16 +#define M_RETRANSMISSION_MAX 0xff +#define V_RETRANSMISSION_MAX(x) ((x) << S_RETRANSMISSION_MAX) +#define G_RETRANSMISSION_MAX(x) (((x) >> S_RETRANSMISSION_MAX) & M_RETRANSMISSION_MAX) + +#define S_SYN_MAX 24 +#define M_SYN_MAX 0xff +#define V_SYN_MAX(x) ((x) << S_SYN_MAX) +#define G_SYN_MAX(x) (((x) >> S_SYN_MAX) & M_SYN_MAX) + +#define A_TP_QOS_REG0 0x3e0 + +#define S_L3_VALUE 0 +#define M_L3_VALUE 0x3f +#define V_L3_VALUE(x) ((x) << S_L3_VALUE) +#define G_L3_VALUE(x) (((x) >> S_L3_VALUE) & M_L3_VALUE) + +#define A_TP_QOS_REG1 0x3e4 +#define A_TP_QOS_REG2 0x3e8 +#define A_TP_QOS_REG3 0x3ec +#define A_TP_QOS_REG4 0x3f0 +#define A_TP_QOS_REG5 0x3f4 +#define A_TP_QOS_REG6 0x3f8 +#define A_TP_QOS_REG7 0x3fc +#define A_TP_MTU_REG0 0x404 +#define A_TP_MTU_REG1 0x408 +#define A_TP_MTU_REG2 0x40c +#define A_TP_MTU_REG3 0x410 +#define A_TP_MTU_REG4 0x414 +#define A_TP_MTU_REG5 0x418 +#define A_TP_MTU_REG6 0x41c +#define A_TP_MTU_REG7 0x420 +#define A_TP_RESET 0x44c + +#define S_TP_RESET 0 +#define V_TP_RESET(x) ((x) << S_TP_RESET) +#define F_TP_RESET V_TP_RESET(1U) + +#define S_CM_MEMMGR_INIT 1 +#define V_CM_MEMMGR_INIT(x) ((x) << S_CM_MEMMGR_INIT) +#define F_CM_MEMMGR_INIT V_CM_MEMMGR_INIT(1U) + +#define A_TP_MIB_INDEX 0x450 +#define A_TP_MIB_DATA 0x454 +#define A_TP_SYNC_TIME_HI 0x458 +#define A_TP_SYNC_TIME_LO 0x45c +#define A_TP_CM_MM_RX_FLST_BASE 0x460 + +#define S_CM_MEMMGR_RX_FREE_LIST_BASE 0 +#define M_CM_MEMMGR_RX_FREE_LIST_BASE 0xfffffff +#define V_CM_MEMMGR_RX_FREE_LIST_BASE(x) ((x) << S_CM_MEMMGR_RX_FREE_LIST_BASE) +#define G_CM_MEMMGR_RX_FREE_LIST_BASE(x) (((x) >> S_CM_MEMMGR_RX_FREE_LIST_BASE) & M_CM_MEMMGR_RX_FREE_LIST_BASE) + +#define A_TP_CM_MM_TX_FLST_BASE 0x464 + +#define S_CM_MEMMGR_TX_FREE_LIST_BASE 0 +#define M_CM_MEMMGR_TX_FREE_LIST_BASE 0xfffffff +#define V_CM_MEMMGR_TX_FREE_LIST_BASE(x) ((x) << S_CM_MEMMGR_TX_FREE_LIST_BASE) +#define G_CM_MEMMGR_TX_FREE_LIST_BASE(x) (((x) >> S_CM_MEMMGR_TX_FREE_LIST_BASE) & M_CM_MEMMGR_TX_FREE_LIST_BASE) + +#define A_TP_CM_MM_P_FLST_BASE 0x468 + +#define S_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE 0 +#define M_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE 0xfffffff +#define V_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE(x) ((x) << S_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE) +#define G_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE(x) (((x) >> S_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE) & M_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE) + +#define A_TP_CM_MM_MAX_P 0x46c + +#define S_CM_MEMMGR_MAX_PSTRUCT 0 +#define M_CM_MEMMGR_MAX_PSTRUCT 0xfffffff +#define V_CM_MEMMGR_MAX_PSTRUCT(x) ((x) << S_CM_MEMMGR_MAX_PSTRUCT) +#define G_CM_MEMMGR_MAX_PSTRUCT(x) (((x) >> S_CM_MEMMGR_MAX_PSTRUCT) & M_CM_MEMMGR_MAX_PSTRUCT) + +#define A_TP_INT_ENABLE 0x470 + +#define S_TX_FREE_LIST_EMPTY 0 +#define V_TX_FREE_LIST_EMPTY(x) ((x) << S_TX_FREE_LIST_EMPTY) +#define F_TX_FREE_LIST_EMPTY V_TX_FREE_LIST_EMPTY(1U) + +#define S_RX_FREE_LIST_EMPTY 1 +#define V_RX_FREE_LIST_EMPTY(x) ((x) << S_RX_FREE_LIST_EMPTY) +#define F_RX_FREE_LIST_EMPTY V_RX_FREE_LIST_EMPTY(1U) + +#define A_TP_INT_CAUSE 0x474 +#define A_TP_TIMER_SEPARATOR 0x4a4 + +#define S_DISABLE_PAST_TIMER_INSERTION 0 +#define V_DISABLE_PAST_TIMER_INSERTION(x) ((x) << S_DISABLE_PAST_TIMER_INSERTION) +#define F_DISABLE_PAST_TIMER_INSERTION V_DISABLE_PAST_TIMER_INSERTION(1U) + +#define S_MODULATION_TIMER_SEPARATOR 1 +#define M_MODULATION_TIMER_SEPARATOR 0x7fff +#define V_MODULATION_TIMER_SEPARATOR(x) ((x) << S_MODULATION_TIMER_SEPARATOR) +#define G_MODULATION_TIMER_SEPARATOR(x) (((x) >> S_MODULATION_TIMER_SEPARATOR) & M_MODULATION_TIMER_SEPARATOR) + +#define S_GLOBAL_TIMER_SEPARATOR 16 +#define M_GLOBAL_TIMER_SEPARATOR 0xffff +#define V_GLOBAL_TIMER_SEPARATOR(x) ((x) << S_GLOBAL_TIMER_SEPARATOR) +#define G_GLOBAL_TIMER_SEPARATOR(x) (((x) >> S_GLOBAL_TIMER_SEPARATOR) & M_GLOBAL_TIMER_SEPARATOR) + +#define A_TP_CM_FC_MODE 0x4b0 +#define A_TP_PC_CONGESTION_CNTL 0x4b4 +#define A_TP_TX_DROP_CONFIG 0x4b8 + +#define S_ENABLE_TX_DROP 31 +#define V_ENABLE_TX_DROP(x) ((x) << S_ENABLE_TX_DROP) +#define F_ENABLE_TX_DROP V_ENABLE_TX_DROP(1U) + +#define S_ENABLE_TX_ERROR 30 +#define V_ENABLE_TX_ERROR(x) ((x) << S_ENABLE_TX_ERROR) +#define F_ENABLE_TX_ERROR V_ENABLE_TX_ERROR(1U) + +#define S_DROP_TICKS_CNT 4 +#define M_DROP_TICKS_CNT 0x3ffffff +#define V_DROP_TICKS_CNT(x) ((x) << S_DROP_TICKS_CNT) +#define G_DROP_TICKS_CNT(x) (((x) >> S_DROP_TICKS_CNT) & M_DROP_TICKS_CNT) + +#define S_NUM_PKTS_DROPPED 0 +#define M_NUM_PKTS_DROPPED 0xf +#define V_NUM_PKTS_DROPPED(x) ((x) << S_NUM_PKTS_DROPPED) +#define G_NUM_PKTS_DROPPED(x) (((x) >> S_NUM_PKTS_DROPPED) & M_NUM_PKTS_DROPPED) + +#define A_TP_TX_DROP_COUNT 0x4bc + +/* RAT registers */ +#define A_RAT_ROUTE_CONTROL 0x580 + +#define S_USE_ROUTE_TABLE 0 +#define V_USE_ROUTE_TABLE(x) ((x) << S_USE_ROUTE_TABLE) +#define F_USE_ROUTE_TABLE V_USE_ROUTE_TABLE(1U) + +#define S_ENABLE_CSPI 1 +#define V_ENABLE_CSPI(x) ((x) << S_ENABLE_CSPI) +#define F_ENABLE_CSPI V_ENABLE_CSPI(1U) + +#define S_ENABLE_PCIX 2 +#define V_ENABLE_PCIX(x) ((x) << S_ENABLE_PCIX) +#define F_ENABLE_PCIX V_ENABLE_PCIX(1U) + +#define A_RAT_ROUTE_TABLE_INDEX 0x584 + +#define S_ROUTE_TABLE_INDEX 0 +#define M_ROUTE_TABLE_INDEX 0xf +#define V_ROUTE_TABLE_INDEX(x) ((x) << S_ROUTE_TABLE_INDEX) +#define G_ROUTE_TABLE_INDEX(x) (((x) >> S_ROUTE_TABLE_INDEX) & M_ROUTE_TABLE_INDEX) + +#define A_RAT_ROUTE_TABLE_DATA 0x588 +#define A_RAT_NO_ROUTE 0x58c + +#define S_CPL_OPCODE 0 +#define M_CPL_OPCODE 0xff +#define V_CPL_OPCODE(x) ((x) << S_CPL_OPCODE) +#define G_CPL_OPCODE(x) (((x) >> S_CPL_OPCODE) & M_CPL_OPCODE) + +#define A_RAT_INTR_ENABLE 0x590 + +#define S_ZEROROUTEERROR 0 +#define V_ZEROROUTEERROR(x) ((x) << S_ZEROROUTEERROR) +#define F_ZEROROUTEERROR V_ZEROROUTEERROR(1U) + +#define S_CSPIFRAMINGERROR 1 +#define V_CSPIFRAMINGERROR(x) ((x) << S_CSPIFRAMINGERROR) +#define F_CSPIFRAMINGERROR V_CSPIFRAMINGERROR(1U) + +#define S_SGEFRAMINGERROR 2 +#define V_SGEFRAMINGERROR(x) ((x) << S_SGEFRAMINGERROR) +#define F_SGEFRAMINGERROR V_SGEFRAMINGERROR(1U) + +#define S_TPFRAMINGERROR 3 +#define V_TPFRAMINGERROR(x) ((x) << S_TPFRAMINGERROR) +#define F_TPFRAMINGERROR V_TPFRAMINGERROR(1U) + +#define A_RAT_INTR_CAUSE 0x594 + +/* CSPI registers */ +#define A_CSPI_RX_AE_WM 0x810 +#define A_CSPI_RX_AF_WM 0x814 +#define A_CSPI_CALENDAR_LEN 0x818 + +#define S_CALENDARLENGTH 0 +#define M_CALENDARLENGTH 0xffff +#define V_CALENDARLENGTH(x) ((x) << S_CALENDARLENGTH) +#define G_CALENDARLENGTH(x) (((x) >> S_CALENDARLENGTH) & M_CALENDARLENGTH) + +#define A_CSPI_FIFO_STATUS_ENABLE 0x820 + +#define S_FIFOSTATUSENABLE 0 +#define V_FIFOSTATUSENABLE(x) ((x) << S_FIFOSTATUSENABLE) +#define F_FIFOSTATUSENABLE V_FIFOSTATUSENABLE(1U) + +#define A_CSPI_MAXBURST1_MAXBURST2 0x828 + +#define S_MAXBURST1 0 +#define M_MAXBURST1 0xffff +#define V_MAXBURST1(x) ((x) << S_MAXBURST1) +#define G_MAXBURST1(x) (((x) >> S_MAXBURST1) & M_MAXBURST1) + +#define S_MAXBURST2 16 +#define M_MAXBURST2 0xffff +#define V_MAXBURST2(x) ((x) << S_MAXBURST2) +#define G_MAXBURST2(x) (((x) >> S_MAXBURST2) & M_MAXBURST2) + +#define A_CSPI_TRAIN 0x82c + +#define S_CSPI_TRAIN_ALPHA 0 +#define M_CSPI_TRAIN_ALPHA 0xffff +#define V_CSPI_TRAIN_ALPHA(x) ((x) << S_CSPI_TRAIN_ALPHA) +#define G_CSPI_TRAIN_ALPHA(x) (((x) >> S_CSPI_TRAIN_ALPHA) & M_CSPI_TRAIN_ALPHA) + +#define S_CSPI_TRAIN_DATA_MAXT 16 +#define M_CSPI_TRAIN_DATA_MAXT 0xffff +#define V_CSPI_TRAIN_DATA_MAXT(x) ((x) << S_CSPI_TRAIN_DATA_MAXT) +#define G_CSPI_TRAIN_DATA_MAXT(x) (((x) >> S_CSPI_TRAIN_DATA_MAXT) & M_CSPI_TRAIN_DATA_MAXT) + +#define A_CSPI_INTR_STATUS 0x848 + +#define S_DIP4ERR 0 +#define V_DIP4ERR(x) ((x) << S_DIP4ERR) +#define F_DIP4ERR V_DIP4ERR(1U) + +#define S_RXDROP 1 +#define V_RXDROP(x) ((x) << S_RXDROP) +#define F_RXDROP V_RXDROP(1U) + +#define S_TXDROP 2 +#define V_TXDROP(x) ((x) << S_TXDROP) +#define F_TXDROP V_TXDROP(1U) + +#define S_RXOVERFLOW 3 +#define V_RXOVERFLOW(x) ((x) << S_RXOVERFLOW) +#define F_RXOVERFLOW V_RXOVERFLOW(1U) + +#define S_RAMPARITYERR 4 +#define V_RAMPARITYERR(x) ((x) << S_RAMPARITYERR) +#define F_RAMPARITYERR V_RAMPARITYERR(1U) + +#define A_CSPI_INTR_ENABLE 0x84c + +/* ESPI registers */ +#define A_ESPI_SCH_TOKEN0 0x880 + +#define S_SCHTOKEN0 0 +#define M_SCHTOKEN0 0xffff +#define V_SCHTOKEN0(x) ((x) << S_SCHTOKEN0) +#define G_SCHTOKEN0(x) (((x) >> S_SCHTOKEN0) & M_SCHTOKEN0) + +#define A_ESPI_SCH_TOKEN1 0x884 + +#define S_SCHTOKEN1 0 +#define M_SCHTOKEN1 0xffff +#define V_SCHTOKEN1(x) ((x) << S_SCHTOKEN1) +#define G_SCHTOKEN1(x) (((x) >> S_SCHTOKEN1) & M_SCHTOKEN1) + +#define A_ESPI_SCH_TOKEN2 0x888 + +#define S_SCHTOKEN2 0 +#define M_SCHTOKEN2 0xffff +#define V_SCHTOKEN2(x) ((x) << S_SCHTOKEN2) +#define G_SCHTOKEN2(x) (((x) >> S_SCHTOKEN2) & M_SCHTOKEN2) + +#define A_ESPI_SCH_TOKEN3 0x88c + +#define S_SCHTOKEN3 0 +#define M_SCHTOKEN3 0xffff +#define V_SCHTOKEN3(x) ((x) << S_SCHTOKEN3) +#define G_SCHTOKEN3(x) (((x) >> S_SCHTOKEN3) & M_SCHTOKEN3) + +#define A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK 0x890 + +#define S_ALMOSTEMPTY 0 +#define M_ALMOSTEMPTY 0xffff +#define V_ALMOSTEMPTY(x) ((x) << S_ALMOSTEMPTY) +#define G_ALMOSTEMPTY(x) (((x) >> S_ALMOSTEMPTY) & M_ALMOSTEMPTY) + +#define A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK 0x894 + +#define S_ALMOSTFULL 0 +#define M_ALMOSTFULL 0xffff +#define V_ALMOSTFULL(x) ((x) << S_ALMOSTFULL) +#define G_ALMOSTFULL(x) (((x) >> S_ALMOSTFULL) & M_ALMOSTFULL) + +#define A_ESPI_CALENDAR_LENGTH 0x898 +#define A_PORT_CONFIG 0x89c + +#define S_RX_NPORTS 0 +#define M_RX_NPORTS 0xff +#define V_RX_NPORTS(x) ((x) << S_RX_NPORTS) +#define G_RX_NPORTS(x) (((x) >> S_RX_NPORTS) & M_RX_NPORTS) + +#define S_TX_NPORTS 8 +#define M_TX_NPORTS 0xff +#define V_TX_NPORTS(x) ((x) << S_TX_NPORTS) +#define G_TX_NPORTS(x) (((x) >> S_TX_NPORTS) & M_TX_NPORTS) + +#define A_ESPI_FIFO_STATUS_ENABLE 0x8a0 + +#define S_RXSTATUSENABLE 0 +#define V_RXSTATUSENABLE(x) ((x) << S_RXSTATUSENABLE) +#define F_RXSTATUSENABLE V_RXSTATUSENABLE(1U) + +#define S_TXDROPENABLE 1 +#define V_TXDROPENABLE(x) ((x) << S_TXDROPENABLE) +#define F_TXDROPENABLE V_TXDROPENABLE(1U) + +#define S_RXENDIANMODE 2 +#define V_RXENDIANMODE(x) ((x) << S_RXENDIANMODE) +#define F_RXENDIANMODE V_RXENDIANMODE(1U) + +#define S_TXENDIANMODE 3 +#define V_TXENDIANMODE(x) ((x) << S_TXENDIANMODE) +#define F_TXENDIANMODE V_TXENDIANMODE(1U) + +#define S_INTEL1010MODE 4 +#define V_INTEL1010MODE(x) ((x) << S_INTEL1010MODE) +#define F_INTEL1010MODE V_INTEL1010MODE(1U) + +#define A_ESPI_MAXBURST1_MAXBURST2 0x8a8 +#define A_ESPI_TRAIN 0x8ac + +#define S_MAXTRAINALPHA 0 +#define M_MAXTRAINALPHA 0xffff +#define V_MAXTRAINALPHA(x) ((x) << S_MAXTRAINALPHA) +#define G_MAXTRAINALPHA(x) (((x) >> S_MAXTRAINALPHA) & M_MAXTRAINALPHA) + +#define S_MAXTRAINDATA 16 +#define M_MAXTRAINDATA 0xffff +#define V_MAXTRAINDATA(x) ((x) << S_MAXTRAINDATA) +#define G_MAXTRAINDATA(x) (((x) >> S_MAXTRAINDATA) & M_MAXTRAINDATA) + +#define A_RAM_STATUS 0x8b0 + +#define S_RXFIFOPARITYERROR 0 +#define M_RXFIFOPARITYERROR 0x3ff +#define V_RXFIFOPARITYERROR(x) ((x) << S_RXFIFOPARITYERROR) +#define G_RXFIFOPARITYERROR(x) (((x) >> S_RXFIFOPARITYERROR) & M_RXFIFOPARITYERROR) + +#define S_TXFIFOPARITYERROR 10 +#define M_TXFIFOPARITYERROR 0x3ff +#define V_TXFIFOPARITYERROR(x) ((x) << S_TXFIFOPARITYERROR) +#define G_TXFIFOPARITYERROR(x) (((x) >> S_TXFIFOPARITYERROR) & M_TXFIFOPARITYERROR) + +#define S_RXFIFOOVERFLOW 20 +#define M_RXFIFOOVERFLOW 0x3ff +#define V_RXFIFOOVERFLOW(x) ((x) << S_RXFIFOOVERFLOW) +#define G_RXFIFOOVERFLOW(x) (((x) >> S_RXFIFOOVERFLOW) & M_RXFIFOOVERFLOW) + +#define A_TX_DROP_COUNT0 0x8b4 + +#define S_TXPORT0DROPCNT 0 +#define M_TXPORT0DROPCNT 0xffff +#define V_TXPORT0DROPCNT(x) ((x) << S_TXPORT0DROPCNT) +#define G_TXPORT0DROPCNT(x) (((x) >> S_TXPORT0DROPCNT) & M_TXPORT0DROPCNT) + +#define S_TXPORT1DROPCNT 16 +#define M_TXPORT1DROPCNT 0xffff +#define V_TXPORT1DROPCNT(x) ((x) << S_TXPORT1DROPCNT) +#define G_TXPORT1DROPCNT(x) (((x) >> S_TXPORT1DROPCNT) & M_TXPORT1DROPCNT) + +#define A_TX_DROP_COUNT1 0x8b8 + +#define S_TXPORT2DROPCNT 0 +#define M_TXPORT2DROPCNT 0xffff +#define V_TXPORT2DROPCNT(x) ((x) << S_TXPORT2DROPCNT) +#define G_TXPORT2DROPCNT(x) (((x) >> S_TXPORT2DROPCNT) & M_TXPORT2DROPCNT) + +#define S_TXPORT3DROPCNT 16 +#define M_TXPORT3DROPCNT 0xffff +#define V_TXPORT3DROPCNT(x) ((x) << S_TXPORT3DROPCNT) +#define G_TXPORT3DROPCNT(x) (((x) >> S_TXPORT3DROPCNT) & M_TXPORT3DROPCNT) + +#define A_RX_DROP_COUNT0 0x8bc + +#define S_RXPORT0DROPCNT 0 +#define M_RXPORT0DROPCNT 0xffff +#define V_RXPORT0DROPCNT(x) ((x) << S_RXPORT0DROPCNT) +#define G_RXPORT0DROPCNT(x) (((x) >> S_RXPORT0DROPCNT) & M_RXPORT0DROPCNT) + +#define S_RXPORT1DROPCNT 16 +#define M_RXPORT1DROPCNT 0xffff +#define V_RXPORT1DROPCNT(x) ((x) << S_RXPORT1DROPCNT) +#define G_RXPORT1DROPCNT(x) (((x) >> S_RXPORT1DROPCNT) & M_RXPORT1DROPCNT) + +#define A_RX_DROP_COUNT1 0x8c0 + +#define S_RXPORT2DROPCNT 0 +#define M_RXPORT2DROPCNT 0xffff +#define V_RXPORT2DROPCNT(x) ((x) << S_RXPORT2DROPCNT) +#define G_RXPORT2DROPCNT(x) (((x) >> S_RXPORT2DROPCNT) & M_RXPORT2DROPCNT) + +#define S_RXPORT3DROPCNT 16 +#define M_RXPORT3DROPCNT 0xffff +#define V_RXPORT3DROPCNT(x) ((x) << S_RXPORT3DROPCNT) +#define G_RXPORT3DROPCNT(x) (((x) >> S_RXPORT3DROPCNT) & M_RXPORT3DROPCNT) + +#define A_DIP4_ERROR_COUNT 0x8c4 + +#define S_DIP4ERRORCNT 0 +#define M_DIP4ERRORCNT 0xfff +#define V_DIP4ERRORCNT(x) ((x) << S_DIP4ERRORCNT) +#define G_DIP4ERRORCNT(x) (((x) >> S_DIP4ERRORCNT) & M_DIP4ERRORCNT) + +#define S_DIP4ERRORCNTSHADOW 12 +#define M_DIP4ERRORCNTSHADOW 0xfff +#define V_DIP4ERRORCNTSHADOW(x) ((x) << S_DIP4ERRORCNTSHADOW) +#define G_DIP4ERRORCNTSHADOW(x) (((x) >> S_DIP4ERRORCNTSHADOW) & M_DIP4ERRORCNTSHADOW) + +#define S_TRICN_RX_TRAIN_ERR 24 +#define V_TRICN_RX_TRAIN_ERR(x) ((x) << S_TRICN_RX_TRAIN_ERR) +#define F_TRICN_RX_TRAIN_ERR V_TRICN_RX_TRAIN_ERR(1U) + +#define S_TRICN_RX_TRAINING 25 +#define V_TRICN_RX_TRAINING(x) ((x) << S_TRICN_RX_TRAINING) +#define F_TRICN_RX_TRAINING V_TRICN_RX_TRAINING(1U) + +#define S_TRICN_RX_TRAIN_OK 26 +#define V_TRICN_RX_TRAIN_OK(x) ((x) << S_TRICN_RX_TRAIN_OK) +#define F_TRICN_RX_TRAIN_OK V_TRICN_RX_TRAIN_OK(1U) + +#define A_ESPI_INTR_STATUS 0x8c8 + +#define S_DIP2PARITYERR 5 +#define V_DIP2PARITYERR(x) ((x) << S_DIP2PARITYERR) +#define F_DIP2PARITYERR V_DIP2PARITYERR(1U) + +#define A_ESPI_INTR_ENABLE 0x8cc +#define A_RX_DROP_THRESHOLD 0x8d0 +#define A_ESPI_RX_RESET 0x8ec + +#define S_ESPI_RX_LNK_RST 0 +#define V_ESPI_RX_LNK_RST(x) ((x) << S_ESPI_RX_LNK_RST) +#define F_ESPI_RX_LNK_RST V_ESPI_RX_LNK_RST(1U) + +#define S_ESPI_RX_CORE_RST 1 +#define V_ESPI_RX_CORE_RST(x) ((x) << S_ESPI_RX_CORE_RST) +#define F_ESPI_RX_CORE_RST V_ESPI_RX_CORE_RST(1U) + +#define S_RX_CLK_STATUS 2 +#define V_RX_CLK_STATUS(x) ((x) << S_RX_CLK_STATUS) +#define F_RX_CLK_STATUS V_RX_CLK_STATUS(1U) + +#define A_ESPI_MISC_CONTROL 0x8f0 + +#define S_OUT_OF_SYNC_COUNT 0 +#define M_OUT_OF_SYNC_COUNT 0xf +#define V_OUT_OF_SYNC_COUNT(x) ((x) << S_OUT_OF_SYNC_COUNT) +#define G_OUT_OF_SYNC_COUNT(x) (((x) >> S_OUT_OF_SYNC_COUNT) & M_OUT_OF_SYNC_COUNT) + +#define S_DIP2_COUNT_MODE_ENABLE 4 +#define V_DIP2_COUNT_MODE_ENABLE(x) ((x) << S_DIP2_COUNT_MODE_ENABLE) +#define F_DIP2_COUNT_MODE_ENABLE V_DIP2_COUNT_MODE_ENABLE(1U) + +#define S_DIP2_PARITY_ERR_THRES 5 +#define M_DIP2_PARITY_ERR_THRES 0xf +#define V_DIP2_PARITY_ERR_THRES(x) ((x) << S_DIP2_PARITY_ERR_THRES) +#define G_DIP2_PARITY_ERR_THRES(x) (((x) >> S_DIP2_PARITY_ERR_THRES) & M_DIP2_PARITY_ERR_THRES) + +#define S_DIP4_THRES 9 +#define M_DIP4_THRES 0xfff +#define V_DIP4_THRES(x) ((x) << S_DIP4_THRES) +#define G_DIP4_THRES(x) (((x) >> S_DIP4_THRES) & M_DIP4_THRES) + +#define S_DIP4_THRES_ENABLE 21 +#define V_DIP4_THRES_ENABLE(x) ((x) << S_DIP4_THRES_ENABLE) +#define F_DIP4_THRES_ENABLE V_DIP4_THRES_ENABLE(1U) + +#define S_FORCE_DISABLE_STATUS 22 +#define V_FORCE_DISABLE_STATUS(x) ((x) << S_FORCE_DISABLE_STATUS) +#define F_FORCE_DISABLE_STATUS V_FORCE_DISABLE_STATUS(1U) + +#define S_DYNAMIC_DESKEW 23 +#define V_DYNAMIC_DESKEW(x) ((x) << S_DYNAMIC_DESKEW) +#define F_DYNAMIC_DESKEW V_DYNAMIC_DESKEW(1U) + +#define S_MONITORED_PORT_NUM 25 +#define M_MONITORED_PORT_NUM 0x3 +#define V_MONITORED_PORT_NUM(x) ((x) << S_MONITORED_PORT_NUM) +#define G_MONITORED_PORT_NUM(x) (((x) >> S_MONITORED_PORT_NUM) & M_MONITORED_PORT_NUM) + +#define S_MONITORED_DIRECTION 27 +#define V_MONITORED_DIRECTION(x) ((x) << S_MONITORED_DIRECTION) +#define F_MONITORED_DIRECTION V_MONITORED_DIRECTION(1U) + +#define S_MONITORED_INTERFACE 28 +#define V_MONITORED_INTERFACE(x) ((x) << S_MONITORED_INTERFACE) +#define F_MONITORED_INTERFACE V_MONITORED_INTERFACE(1U) + +#define A_ESPI_DIP2_ERR_COUNT 0x8f4 + +#define S_DIP2_ERR_CNT 0 +#define M_DIP2_ERR_CNT 0xf +#define V_DIP2_ERR_CNT(x) ((x) << S_DIP2_ERR_CNT) +#define G_DIP2_ERR_CNT(x) (((x) >> S_DIP2_ERR_CNT) & M_DIP2_ERR_CNT) + +#define A_ESPI_CMD_ADDR 0x8f8 + +#define S_WRITE_DATA 0 +#define M_WRITE_DATA 0xff +#define V_WRITE_DATA(x) ((x) << S_WRITE_DATA) +#define G_WRITE_DATA(x) (((x) >> S_WRITE_DATA) & M_WRITE_DATA) + +#define S_REGISTER_OFFSET 8 +#define M_REGISTER_OFFSET 0xf +#define V_REGISTER_OFFSET(x) ((x) << S_REGISTER_OFFSET) +#define G_REGISTER_OFFSET(x) (((x) >> S_REGISTER_OFFSET) & M_REGISTER_OFFSET) + +#define S_CHANNEL_ADDR 12 +#define M_CHANNEL_ADDR 0xf +#define V_CHANNEL_ADDR(x) ((x) << S_CHANNEL_ADDR) +#define G_CHANNEL_ADDR(x) (((x) >> S_CHANNEL_ADDR) & M_CHANNEL_ADDR) + +#define S_MODULE_ADDR 16 +#define M_MODULE_ADDR 0x3 +#define V_MODULE_ADDR(x) ((x) << S_MODULE_ADDR) +#define G_MODULE_ADDR(x) (((x) >> S_MODULE_ADDR) & M_MODULE_ADDR) + +#define S_BUNDLE_ADDR 20 +#define M_BUNDLE_ADDR 0x3 +#define V_BUNDLE_ADDR(x) ((x) << S_BUNDLE_ADDR) +#define G_BUNDLE_ADDR(x) (((x) >> S_BUNDLE_ADDR) & M_BUNDLE_ADDR) + +#define S_SPI4_COMMAND 24 +#define M_SPI4_COMMAND 0xff +#define V_SPI4_COMMAND(x) ((x) << S_SPI4_COMMAND) +#define G_SPI4_COMMAND(x) (((x) >> S_SPI4_COMMAND) & M_SPI4_COMMAND) + +#define A_ESPI_GOSTAT 0x8fc + +#define S_READ_DATA 0 +#define M_READ_DATA 0xff +#define V_READ_DATA(x) ((x) << S_READ_DATA) +#define G_READ_DATA(x) (((x) >> S_READ_DATA) & M_READ_DATA) + +#define S_ESPI_CMD_BUSY 8 +#define V_ESPI_CMD_BUSY(x) ((x) << S_ESPI_CMD_BUSY) +#define F_ESPI_CMD_BUSY V_ESPI_CMD_BUSY(1U) + +#define S_ERROR_ACK 9 +#define V_ERROR_ACK(x) ((x) << S_ERROR_ACK) +#define F_ERROR_ACK V_ERROR_ACK(1U) + +#define S_UNMAPPED_ERR 10 +#define V_UNMAPPED_ERR(x) ((x) << S_UNMAPPED_ERR) +#define F_UNMAPPED_ERR V_UNMAPPED_ERR(1U) + +#define S_TRANSACTION_TIMER 16 +#define M_TRANSACTION_TIMER 0xff +#define V_TRANSACTION_TIMER(x) ((x) << S_TRANSACTION_TIMER) +#define G_TRANSACTION_TIMER(x) (((x) >> S_TRANSACTION_TIMER) & M_TRANSACTION_TIMER) + + +/* ULP registers */ +#define A_ULP_ULIMIT 0x980 +#define A_ULP_TAGMASK 0x984 +#define A_ULP_HREG_INDEX 0x988 +#define A_ULP_HREG_DATA 0x98c +#define A_ULP_INT_ENABLE 0x990 +#define A_ULP_INT_CAUSE 0x994 + +#define S_HREG_PAR_ERR 0 +#define V_HREG_PAR_ERR(x) ((x) << S_HREG_PAR_ERR) +#define F_HREG_PAR_ERR V_HREG_PAR_ERR(1U) + +#define S_EGRS_DATA_PAR_ERR 1 +#define V_EGRS_DATA_PAR_ERR(x) ((x) << S_EGRS_DATA_PAR_ERR) +#define F_EGRS_DATA_PAR_ERR V_EGRS_DATA_PAR_ERR(1U) + +#define S_INGRS_DATA_PAR_ERR 2 +#define V_INGRS_DATA_PAR_ERR(x) ((x) << S_INGRS_DATA_PAR_ERR) +#define F_INGRS_DATA_PAR_ERR V_INGRS_DATA_PAR_ERR(1U) + +#define S_PM_INTR 3 +#define V_PM_INTR(x) ((x) << S_PM_INTR) +#define F_PM_INTR V_PM_INTR(1U) + +#define S_PM_E2C_SYNC_ERR 4 +#define V_PM_E2C_SYNC_ERR(x) ((x) << S_PM_E2C_SYNC_ERR) +#define F_PM_E2C_SYNC_ERR V_PM_E2C_SYNC_ERR(1U) + +#define S_PM_C2E_SYNC_ERR 5 +#define V_PM_C2E_SYNC_ERR(x) ((x) << S_PM_C2E_SYNC_ERR) +#define F_PM_C2E_SYNC_ERR V_PM_C2E_SYNC_ERR(1U) + +#define S_PM_E2C_EMPTY_ERR 6 +#define V_PM_E2C_EMPTY_ERR(x) ((x) << S_PM_E2C_EMPTY_ERR) +#define F_PM_E2C_EMPTY_ERR V_PM_E2C_EMPTY_ERR(1U) + +#define S_PM_C2E_EMPTY_ERR 7 +#define V_PM_C2E_EMPTY_ERR(x) ((x) << S_PM_C2E_EMPTY_ERR) +#define F_PM_C2E_EMPTY_ERR V_PM_C2E_EMPTY_ERR(1U) + +#define S_PM_PAR_ERR 8 +#define M_PM_PAR_ERR 0xffff +#define V_PM_PAR_ERR(x) ((x) << S_PM_PAR_ERR) +#define G_PM_PAR_ERR(x) (((x) >> S_PM_PAR_ERR) & M_PM_PAR_ERR) + +#define S_PM_E2C_WRT_FULL 24 +#define V_PM_E2C_WRT_FULL(x) ((x) << S_PM_E2C_WRT_FULL) +#define F_PM_E2C_WRT_FULL V_PM_E2C_WRT_FULL(1U) + +#define S_PM_C2E_WRT_FULL 25 +#define V_PM_C2E_WRT_FULL(x) ((x) << S_PM_C2E_WRT_FULL) +#define F_PM_C2E_WRT_FULL V_PM_C2E_WRT_FULL(1U) + +#define A_ULP_PIO_CTRL 0x998 + +/* PL registers */ +#define A_PL_ENABLE 0xa00 + +#define S_PL_INTR_SGE_ERR 0 +#define V_PL_INTR_SGE_ERR(x) ((x) << S_PL_INTR_SGE_ERR) +#define F_PL_INTR_SGE_ERR V_PL_INTR_SGE_ERR(1U) + +#define S_PL_INTR_SGE_DATA 1 +#define V_PL_INTR_SGE_DATA(x) ((x) << S_PL_INTR_SGE_DATA) +#define F_PL_INTR_SGE_DATA V_PL_INTR_SGE_DATA(1U) + +#define S_PL_INTR_MC3 2 +#define V_PL_INTR_MC3(x) ((x) << S_PL_INTR_MC3) +#define F_PL_INTR_MC3 V_PL_INTR_MC3(1U) + +#define S_PL_INTR_MC4 3 +#define V_PL_INTR_MC4(x) ((x) << S_PL_INTR_MC4) +#define F_PL_INTR_MC4 V_PL_INTR_MC4(1U) + +#define S_PL_INTR_MC5 4 +#define V_PL_INTR_MC5(x) ((x) << S_PL_INTR_MC5) +#define F_PL_INTR_MC5 V_PL_INTR_MC5(1U) + +#define S_PL_INTR_RAT 5 +#define V_PL_INTR_RAT(x) ((x) << S_PL_INTR_RAT) +#define F_PL_INTR_RAT V_PL_INTR_RAT(1U) + +#define S_PL_INTR_TP 6 +#define V_PL_INTR_TP(x) ((x) << S_PL_INTR_TP) +#define F_PL_INTR_TP V_PL_INTR_TP(1U) + +#define S_PL_INTR_ULP 7 +#define V_PL_INTR_ULP(x) ((x) << S_PL_INTR_ULP) +#define F_PL_INTR_ULP V_PL_INTR_ULP(1U) + +#define S_PL_INTR_ESPI 8 +#define V_PL_INTR_ESPI(x) ((x) << S_PL_INTR_ESPI) +#define F_PL_INTR_ESPI V_PL_INTR_ESPI(1U) + +#define S_PL_INTR_CSPI 9 +#define V_PL_INTR_CSPI(x) ((x) << S_PL_INTR_CSPI) +#define F_PL_INTR_CSPI V_PL_INTR_CSPI(1U) + +#define S_PL_INTR_PCIX 10 +#define V_PL_INTR_PCIX(x) ((x) << S_PL_INTR_PCIX) +#define F_PL_INTR_PCIX V_PL_INTR_PCIX(1U) + +#define S_PL_INTR_EXT 11 +#define V_PL_INTR_EXT(x) ((x) << S_PL_INTR_EXT) +#define F_PL_INTR_EXT V_PL_INTR_EXT(1U) + +#define A_PL_CAUSE 0xa04 + +/* MC5 registers */ +#define A_MC5_CONFIG 0xc04 + +#define S_MODE 0 +#define V_MODE(x) ((x) << S_MODE) +#define F_MODE V_MODE(1U) + +#define S_TCAM_RESET 1 +#define V_TCAM_RESET(x) ((x) << S_TCAM_RESET) +#define F_TCAM_RESET V_TCAM_RESET(1U) + +#define S_TCAM_READY 2 +#define V_TCAM_READY(x) ((x) << S_TCAM_READY) +#define F_TCAM_READY V_TCAM_READY(1U) + +#define S_DBGI_ENABLE 4 +#define V_DBGI_ENABLE(x) ((x) << S_DBGI_ENABLE) +#define F_DBGI_ENABLE V_DBGI_ENABLE(1U) + +#define S_M_BUS_ENABLE 5 +#define V_M_BUS_ENABLE(x) ((x) << S_M_BUS_ENABLE) +#define F_M_BUS_ENABLE V_M_BUS_ENABLE(1U) + +#define S_PARITY_ENABLE 6 +#define V_PARITY_ENABLE(x) ((x) << S_PARITY_ENABLE) +#define F_PARITY_ENABLE V_PARITY_ENABLE(1U) + +#define S_SYN_ISSUE_MODE 7 +#define M_SYN_ISSUE_MODE 0x3 +#define V_SYN_ISSUE_MODE(x) ((x) << S_SYN_ISSUE_MODE) +#define G_SYN_ISSUE_MODE(x) (((x) >> S_SYN_ISSUE_MODE) & M_SYN_ISSUE_MODE) + +#define S_BUILD 16 +#define V_BUILD(x) ((x) << S_BUILD) +#define F_BUILD V_BUILD(1U) + +#define S_COMPRESSION_ENABLE 17 +#define V_COMPRESSION_ENABLE(x) ((x) << S_COMPRESSION_ENABLE) +#define F_COMPRESSION_ENABLE V_COMPRESSION_ENABLE(1U) + +#define S_NUM_LIP 18 +#define M_NUM_LIP 0x3f +#define V_NUM_LIP(x) ((x) << S_NUM_LIP) +#define G_NUM_LIP(x) (((x) >> S_NUM_LIP) & M_NUM_LIP) + +#define S_TCAM_PART_CNT 24 +#define M_TCAM_PART_CNT 0x3 +#define V_TCAM_PART_CNT(x) ((x) << S_TCAM_PART_CNT) +#define G_TCAM_PART_CNT(x) (((x) >> S_TCAM_PART_CNT) & M_TCAM_PART_CNT) + +#define S_TCAM_PART_TYPE 26 +#define M_TCAM_PART_TYPE 0x3 +#define V_TCAM_PART_TYPE(x) ((x) << S_TCAM_PART_TYPE) +#define G_TCAM_PART_TYPE(x) (((x) >> S_TCAM_PART_TYPE) & M_TCAM_PART_TYPE) + +#define S_TCAM_PART_SIZE 28 +#define M_TCAM_PART_SIZE 0x3 +#define V_TCAM_PART_SIZE(x) ((x) << S_TCAM_PART_SIZE) +#define G_TCAM_PART_SIZE(x) (((x) >> S_TCAM_PART_SIZE) & M_TCAM_PART_SIZE) + +#define S_TCAM_PART_TYPE_HI 30 +#define V_TCAM_PART_TYPE_HI(x) ((x) << S_TCAM_PART_TYPE_HI) +#define F_TCAM_PART_TYPE_HI V_TCAM_PART_TYPE_HI(1U) + +#define A_MC5_SIZE 0xc08 + +#define S_SIZE 0 +#define M_SIZE 0x3fffff +#define V_SIZE(x) ((x) << S_SIZE) +#define G_SIZE(x) (((x) >> S_SIZE) & M_SIZE) + +#define A_MC5_ROUTING_TABLE_INDEX 0xc0c + +#define S_START_OF_ROUTING_TABLE 0 +#define M_START_OF_ROUTING_TABLE 0x3fffff +#define V_START_OF_ROUTING_TABLE(x) ((x) << S_START_OF_ROUTING_TABLE) +#define G_START_OF_ROUTING_TABLE(x) (((x) >> S_START_OF_ROUTING_TABLE) & M_START_OF_ROUTING_TABLE) + +#define A_MC5_SERVER_INDEX 0xc14 + +#define S_START_OF_SERVER_INDEX 0 +#define M_START_OF_SERVER_INDEX 0x3fffff +#define V_START_OF_SERVER_INDEX(x) ((x) << S_START_OF_SERVER_INDEX) +#define G_START_OF_SERVER_INDEX(x) (((x) >> S_START_OF_SERVER_INDEX) & M_START_OF_SERVER_INDEX) + +#define A_MC5_LIP_RAM_ADDR 0xc18 + +#define S_LOCAL_IP_RAM_ADDR 0 +#define M_LOCAL_IP_RAM_ADDR 0x3f +#define V_LOCAL_IP_RAM_ADDR(x) ((x) << S_LOCAL_IP_RAM_ADDR) +#define G_LOCAL_IP_RAM_ADDR(x) (((x) >> S_LOCAL_IP_RAM_ADDR) & M_LOCAL_IP_RAM_ADDR) + +#define S_RAM_WRITE_ENABLE 8 +#define V_RAM_WRITE_ENABLE(x) ((x) << S_RAM_WRITE_ENABLE) +#define F_RAM_WRITE_ENABLE V_RAM_WRITE_ENABLE(1U) + +#define A_MC5_LIP_RAM_DATA 0xc1c +#define A_MC5_RSP_LATENCY 0xc20 + +#define S_SEARCH_RESPONSE_LATENCY 0 +#define M_SEARCH_RESPONSE_LATENCY 0x1f +#define V_SEARCH_RESPONSE_LATENCY(x) ((x) << S_SEARCH_RESPONSE_LATENCY) +#define G_SEARCH_RESPONSE_LATENCY(x) (((x) >> S_SEARCH_RESPONSE_LATENCY) & M_SEARCH_RESPONSE_LATENCY) + +#define S_LEARN_RESPONSE_LATENCY 8 +#define M_LEARN_RESPONSE_LATENCY 0x1f +#define V_LEARN_RESPONSE_LATENCY(x) ((x) << S_LEARN_RESPONSE_LATENCY) +#define G_LEARN_RESPONSE_LATENCY(x) (((x) >> S_LEARN_RESPONSE_LATENCY) & M_LEARN_RESPONSE_LATENCY) + +#define A_MC5_PARITY_LATENCY 0xc24 + +#define S_SRCHLAT 0 +#define M_SRCHLAT 0x1f +#define V_SRCHLAT(x) ((x) << S_SRCHLAT) +#define G_SRCHLAT(x) (((x) >> S_SRCHLAT) & M_SRCHLAT) + +#define S_PARLAT 8 +#define M_PARLAT 0x1f +#define V_PARLAT(x) ((x) << S_PARLAT) +#define G_PARLAT(x) (((x) >> S_PARLAT) & M_PARLAT) + +#define A_MC5_WR_LRN_VERIFY 0xc28 + +#define S_POVEREN 0 +#define V_POVEREN(x) ((x) << S_POVEREN) +#define F_POVEREN V_POVEREN(1U) + +#define S_LRNVEREN 1 +#define V_LRNVEREN(x) ((x) << S_LRNVEREN) +#define F_LRNVEREN V_LRNVEREN(1U) + +#define S_VWVEREN 2 +#define V_VWVEREN(x) ((x) << S_VWVEREN) +#define F_VWVEREN V_VWVEREN(1U) + +#define A_MC5_PART_ID_INDEX 0xc2c + +#define S_IDINDEX 0 +#define M_IDINDEX 0xf +#define V_IDINDEX(x) ((x) << S_IDINDEX) +#define G_IDINDEX(x) (((x) >> S_IDINDEX) & M_IDINDEX) + +#define A_MC5_RESET_MAX 0xc30 + +#define S_RSTMAX 0 +#define M_RSTMAX 0x1ff +#define V_RSTMAX(x) ((x) << S_RSTMAX) +#define G_RSTMAX(x) (((x) >> S_RSTMAX) & M_RSTMAX) + +#define A_MC5_INT_ENABLE 0xc40 + +#define S_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR 0 +#define V_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR(x) ((x) << S_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR) +#define F_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR V_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR(1U) + +#define S_MC5_INT_HIT_IN_ACTIVE_REGION_ERR 1 +#define V_MC5_INT_HIT_IN_ACTIVE_REGION_ERR(x) ((x) << S_MC5_INT_HIT_IN_ACTIVE_REGION_ERR) +#define F_MC5_INT_HIT_IN_ACTIVE_REGION_ERR V_MC5_INT_HIT_IN_ACTIVE_REGION_ERR(1U) + +#define S_MC5_INT_HIT_IN_RT_REGION_ERR 2 +#define V_MC5_INT_HIT_IN_RT_REGION_ERR(x) ((x) << S_MC5_INT_HIT_IN_RT_REGION_ERR) +#define F_MC5_INT_HIT_IN_RT_REGION_ERR V_MC5_INT_HIT_IN_RT_REGION_ERR(1U) + +#define S_MC5_INT_MISS_ERR 3 +#define V_MC5_INT_MISS_ERR(x) ((x) << S_MC5_INT_MISS_ERR) +#define F_MC5_INT_MISS_ERR V_MC5_INT_MISS_ERR(1U) + +#define S_MC5_INT_LIP0_ERR 4 +#define V_MC5_INT_LIP0_ERR(x) ((x) << S_MC5_INT_LIP0_ERR) +#define F_MC5_INT_LIP0_ERR V_MC5_INT_LIP0_ERR(1U) + +#define S_MC5_INT_LIP_MISS_ERR 5 +#define V_MC5_INT_LIP_MISS_ERR(x) ((x) << S_MC5_INT_LIP_MISS_ERR) +#define F_MC5_INT_LIP_MISS_ERR V_MC5_INT_LIP_MISS_ERR(1U) + +#define S_MC5_INT_PARITY_ERR 6 +#define V_MC5_INT_PARITY_ERR(x) ((x) << S_MC5_INT_PARITY_ERR) +#define F_MC5_INT_PARITY_ERR V_MC5_INT_PARITY_ERR(1U) + +#define S_MC5_INT_ACTIVE_REGION_FULL 7 +#define V_MC5_INT_ACTIVE_REGION_FULL(x) ((x) << S_MC5_INT_ACTIVE_REGION_FULL) +#define F_MC5_INT_ACTIVE_REGION_FULL V_MC5_INT_ACTIVE_REGION_FULL(1U) + +#define S_MC5_INT_NFA_SRCH_ERR 8 +#define V_MC5_INT_NFA_SRCH_ERR(x) ((x) << S_MC5_INT_NFA_SRCH_ERR) +#define F_MC5_INT_NFA_SRCH_ERR V_MC5_INT_NFA_SRCH_ERR(1U) + +#define S_MC5_INT_SYN_COOKIE 9 +#define V_MC5_INT_SYN_COOKIE(x) ((x) << S_MC5_INT_SYN_COOKIE) +#define F_MC5_INT_SYN_COOKIE V_MC5_INT_SYN_COOKIE(1U) + +#define S_MC5_INT_SYN_COOKIE_BAD 10 +#define V_MC5_INT_SYN_COOKIE_BAD(x) ((x) << S_MC5_INT_SYN_COOKIE_BAD) +#define F_MC5_INT_SYN_COOKIE_BAD V_MC5_INT_SYN_COOKIE_BAD(1U) + +#define S_MC5_INT_SYN_COOKIE_OFF 11 +#define V_MC5_INT_SYN_COOKIE_OFF(x) ((x) << S_MC5_INT_SYN_COOKIE_OFF) +#define F_MC5_INT_SYN_COOKIE_OFF V_MC5_INT_SYN_COOKIE_OFF(1U) + +#define S_MC5_INT_UNKNOWN_CMD 15 +#define V_MC5_INT_UNKNOWN_CMD(x) ((x) << S_MC5_INT_UNKNOWN_CMD) +#define F_MC5_INT_UNKNOWN_CMD V_MC5_INT_UNKNOWN_CMD(1U) + +#define S_MC5_INT_REQUESTQ_PARITY_ERR 16 +#define V_MC5_INT_REQUESTQ_PARITY_ERR(x) ((x) << S_MC5_INT_REQUESTQ_PARITY_ERR) +#define F_MC5_INT_REQUESTQ_PARITY_ERR V_MC5_INT_REQUESTQ_PARITY_ERR(1U) + +#define S_MC5_INT_DISPATCHQ_PARITY_ERR 17 +#define V_MC5_INT_DISPATCHQ_PARITY_ERR(x) ((x) << S_MC5_INT_DISPATCHQ_PARITY_ERR) +#define F_MC5_INT_DISPATCHQ_PARITY_ERR V_MC5_INT_DISPATCHQ_PARITY_ERR(1U) + +#define S_MC5_INT_DEL_ACT_EMPTY 18 +#define V_MC5_INT_DEL_ACT_EMPTY(x) ((x) << S_MC5_INT_DEL_ACT_EMPTY) +#define F_MC5_INT_DEL_ACT_EMPTY V_MC5_INT_DEL_ACT_EMPTY(1U) + +#define A_MC5_INT_CAUSE 0xc44 +#define A_MC5_INT_TID 0xc48 +#define A_MC5_INT_PTID 0xc4c +#define A_MC5_DBGI_CONFIG 0xc74 +#define A_MC5_DBGI_REQ_CMD 0xc78 + +#define S_CMDMODE 0 +#define M_CMDMODE 0x7 +#define V_CMDMODE(x) ((x) << S_CMDMODE) +#define G_CMDMODE(x) (((x) >> S_CMDMODE) & M_CMDMODE) + +#define S_SADRSEL 4 +#define V_SADRSEL(x) ((x) << S_SADRSEL) +#define F_SADRSEL V_SADRSEL(1U) + +#define S_WRITE_BURST_SIZE 22 +#define M_WRITE_BURST_SIZE 0x3ff +#define V_WRITE_BURST_SIZE(x) ((x) << S_WRITE_BURST_SIZE) +#define G_WRITE_BURST_SIZE(x) (((x) >> S_WRITE_BURST_SIZE) & M_WRITE_BURST_SIZE) + +#define A_MC5_DBGI_REQ_ADDR0 0xc7c +#define A_MC5_DBGI_REQ_ADDR1 0xc80 +#define A_MC5_DBGI_REQ_ADDR2 0xc84 +#define A_MC5_DBGI_REQ_DATA0 0xc88 +#define A_MC5_DBGI_REQ_DATA1 0xc8c +#define A_MC5_DBGI_REQ_DATA2 0xc90 +#define A_MC5_DBGI_REQ_DATA3 0xc94 +#define A_MC5_DBGI_REQ_DATA4 0xc98 +#define A_MC5_DBGI_REQ_MASK0 0xc9c +#define A_MC5_DBGI_REQ_MASK1 0xca0 +#define A_MC5_DBGI_REQ_MASK2 0xca4 +#define A_MC5_DBGI_REQ_MASK3 0xca8 +#define A_MC5_DBGI_REQ_MASK4 0xcac +#define A_MC5_DBGI_RSP_STATUS 0xcb0 + +#define S_DBGI_RSP_VALID 0 +#define V_DBGI_RSP_VALID(x) ((x) << S_DBGI_RSP_VALID) +#define F_DBGI_RSP_VALID V_DBGI_RSP_VALID(1U) + +#define S_DBGI_RSP_HIT 1 +#define V_DBGI_RSP_HIT(x) ((x) << S_DBGI_RSP_HIT) +#define F_DBGI_RSP_HIT V_DBGI_RSP_HIT(1U) + +#define S_DBGI_RSP_ERR 2 +#define V_DBGI_RSP_ERR(x) ((x) << S_DBGI_RSP_ERR) +#define F_DBGI_RSP_ERR V_DBGI_RSP_ERR(1U) + +#define S_DBGI_RSP_ERR_REASON 8 +#define M_DBGI_RSP_ERR_REASON 0x7 +#define V_DBGI_RSP_ERR_REASON(x) ((x) << S_DBGI_RSP_ERR_REASON) +#define G_DBGI_RSP_ERR_REASON(x) (((x) >> S_DBGI_RSP_ERR_REASON) & M_DBGI_RSP_ERR_REASON) + +#define A_MC5_DBGI_RSP_DATA0 0xcb4 +#define A_MC5_DBGI_RSP_DATA1 0xcb8 +#define A_MC5_DBGI_RSP_DATA2 0xcbc +#define A_MC5_DBGI_RSP_DATA3 0xcc0 +#define A_MC5_DBGI_RSP_DATA4 0xcc4 +#define A_MC5_DBGI_RSP_LAST_CMD 0xcc8 +#define A_MC5_POPEN_DATA_WR_CMD 0xccc +#define A_MC5_POPEN_MASK_WR_CMD 0xcd0 +#define A_MC5_AOPEN_SRCH_CMD 0xcd4 +#define A_MC5_AOPEN_LRN_CMD 0xcd8 +#define A_MC5_SYN_SRCH_CMD 0xcdc +#define A_MC5_SYN_LRN_CMD 0xce0 +#define A_MC5_ACK_SRCH_CMD 0xce4 +#define A_MC5_ACK_LRN_CMD 0xce8 +#define A_MC5_ILOOKUP_CMD 0xcec +#define A_MC5_ELOOKUP_CMD 0xcf0 +#define A_MC5_DATA_WRITE_CMD 0xcf4 +#define A_MC5_DATA_READ_CMD 0xcf8 +#define A_MC5_MASK_WRITE_CMD 0xcfc + +/* PCICFG registers */ +#define A_PCICFG_PM_CSR 0x44 +#define A_PCICFG_VPD_ADDR 0x4a + +#define S_VPD_ADDR 0 +#define M_VPD_ADDR 0x7fff +#define V_VPD_ADDR(x) ((x) << S_VPD_ADDR) +#define G_VPD_ADDR(x) (((x) >> S_VPD_ADDR) & M_VPD_ADDR) + +#define S_VPD_OP_FLAG 15 +#define V_VPD_OP_FLAG(x) ((x) << S_VPD_OP_FLAG) +#define F_VPD_OP_FLAG V_VPD_OP_FLAG(1U) + +#define A_PCICFG_VPD_DATA 0x4c +#define A_PCICFG_PCIX_CMD 0x60 +#define A_PCICFG_INTR_ENABLE 0xf4 + +#define S_MASTER_PARITY_ERR 0 +#define V_MASTER_PARITY_ERR(x) ((x) << S_MASTER_PARITY_ERR) +#define F_MASTER_PARITY_ERR V_MASTER_PARITY_ERR(1U) + +#define S_SIG_TARGET_ABORT 1 +#define V_SIG_TARGET_ABORT(x) ((x) << S_SIG_TARGET_ABORT) +#define F_SIG_TARGET_ABORT V_SIG_TARGET_ABORT(1U) + +#define S_RCV_TARGET_ABORT 2 +#define V_RCV_TARGET_ABORT(x) ((x) << S_RCV_TARGET_ABORT) +#define F_RCV_TARGET_ABORT V_RCV_TARGET_ABORT(1U) + +#define S_RCV_MASTER_ABORT 3 +#define V_RCV_MASTER_ABORT(x) ((x) << S_RCV_MASTER_ABORT) +#define F_RCV_MASTER_ABORT V_RCV_MASTER_ABORT(1U) + +#define S_SIG_SYS_ERR 4 +#define V_SIG_SYS_ERR(x) ((x) << S_SIG_SYS_ERR) +#define F_SIG_SYS_ERR V_SIG_SYS_ERR(1U) + +#define S_DET_PARITY_ERR 5 +#define V_DET_PARITY_ERR(x) ((x) << S_DET_PARITY_ERR) +#define F_DET_PARITY_ERR V_DET_PARITY_ERR(1U) + +#define S_PIO_PARITY_ERR 6 +#define V_PIO_PARITY_ERR(x) ((x) << S_PIO_PARITY_ERR) +#define F_PIO_PARITY_ERR V_PIO_PARITY_ERR(1U) + +#define S_WF_PARITY_ERR 7 +#define V_WF_PARITY_ERR(x) ((x) << S_WF_PARITY_ERR) +#define F_WF_PARITY_ERR V_WF_PARITY_ERR(1U) + +#define S_RF_PARITY_ERR 8 +#define M_RF_PARITY_ERR 0x3 +#define V_RF_PARITY_ERR(x) ((x) << S_RF_PARITY_ERR) +#define G_RF_PARITY_ERR(x) (((x) >> S_RF_PARITY_ERR) & M_RF_PARITY_ERR) + +#define S_CF_PARITY_ERR 10 +#define M_CF_PARITY_ERR 0x3 +#define V_CF_PARITY_ERR(x) ((x) << S_CF_PARITY_ERR) +#define G_CF_PARITY_ERR(x) (((x) >> S_CF_PARITY_ERR) & M_CF_PARITY_ERR) + +#define A_PCICFG_INTR_CAUSE 0xf8 +#define A_PCICFG_MODE 0xfc + +#define S_PCI_MODE_64BIT 0 +#define V_PCI_MODE_64BIT(x) ((x) << S_PCI_MODE_64BIT) +#define F_PCI_MODE_64BIT V_PCI_MODE_64BIT(1U) + +#define S_PCI_MODE_66MHZ 1 +#define V_PCI_MODE_66MHZ(x) ((x) << S_PCI_MODE_66MHZ) +#define F_PCI_MODE_66MHZ V_PCI_MODE_66MHZ(1U) + +#define S_PCI_MODE_PCIX_INITPAT 2 +#define M_PCI_MODE_PCIX_INITPAT 0x7 +#define V_PCI_MODE_PCIX_INITPAT(x) ((x) << S_PCI_MODE_PCIX_INITPAT) +#define G_PCI_MODE_PCIX_INITPAT(x) (((x) >> S_PCI_MODE_PCIX_INITPAT) & M_PCI_MODE_PCIX_INITPAT) + +#define S_PCI_MODE_PCIX 5 +#define V_PCI_MODE_PCIX(x) ((x) << S_PCI_MODE_PCIX) +#define F_PCI_MODE_PCIX V_PCI_MODE_PCIX(1U) + +#define S_PCI_MODE_CLK 6 +#define M_PCI_MODE_CLK 0x3 +#define V_PCI_MODE_CLK(x) ((x) << S_PCI_MODE_CLK) +#define G_PCI_MODE_CLK(x) (((x) >> S_PCI_MODE_CLK) & M_PCI_MODE_CLK) + +#endif /* _CXGB_REGS_H_ */ diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c new file mode 100644 index 0000000..e9a03ff --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/sge.c @@ -0,0 +1,2140 @@ +/***************************************************************************** + * * + * File: sge.c * + * $Revision: 1.26 $ * + * $Date: 2005/06/21 18:29:48 $ * + * Description: * + * DMA engine. * + * part of the Chelsio 10Gb Ethernet Driver. * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License, version 2, as * + * published by the Free Software Foundation. * + * * + * You should have received a copy of the GNU General Public License along * + * with this program; if not, write to the Free Software Foundation, Inc., * + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * + * * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * + * * + * http://www.chelsio.com * + * * + * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * + * All rights reserved. * + * * + * Maintainers: maintainers@chelsio.com * + * * + * Authors: Dimitrios Michailidis * + * Tina Yang * + * Felix Marti * + * Scott Bardone * + * Kurt Ottaway * + * Frank DiMambro * + * * + * History: * + * * + ****************************************************************************/ + +#include "common.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cpl5_cmd.h" +#include "sge.h" +#include "regs.h" +#include "espi.h" + +/* This belongs in if_ether.h */ +#define ETH_P_CPL5 0xf + +#define SGE_CMDQ_N 2 +#define SGE_FREELQ_N 2 +#define SGE_CMDQ0_E_N 1024 +#define SGE_CMDQ1_E_N 128 +#define SGE_FREEL_SIZE 4096 +#define SGE_JUMBO_FREEL_SIZE 512 +#define SGE_FREEL_REFILL_THRESH 16 +#define SGE_RESPQ_E_N 1024 +#define SGE_INTRTIMER_NRES 1000 +#define SGE_RX_SM_BUF_SIZE 1536 +#define SGE_TX_DESC_MAX_PLEN 16384 + +#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4) + +/* + * Period of the TX buffer reclaim timer. This timer does not need to run + * frequently as TX buffers are usually reclaimed by new TX packets. + */ +#define TX_RECLAIM_PERIOD (HZ / 4) + +#define M_CMD_LEN 0x7fffffff +#define V_CMD_LEN(v) (v) +#define G_CMD_LEN(v) ((v) & M_CMD_LEN) +#define V_CMD_GEN1(v) ((v) << 31) +#define V_CMD_GEN2(v) (v) +#define F_CMD_DATAVALID (1 << 1) +#define F_CMD_SOP (1 << 2) +#define V_CMD_EOP(v) ((v) << 3) + +/* + * Command queue, receive buffer list, and response queue descriptors. + */ +#if defined(__BIG_ENDIAN_BITFIELD) +struct cmdQ_e { + u32 addr_lo; + u32 len_gen; + u32 flags; + u32 addr_hi; +}; + +struct freelQ_e { + u32 addr_lo; + u32 len_gen; + u32 gen2; + u32 addr_hi; +}; + +struct respQ_e { + u32 Qsleeping : 4; + u32 Cmdq1CreditReturn : 5; + u32 Cmdq1DmaComplete : 5; + u32 Cmdq0CreditReturn : 5; + u32 Cmdq0DmaComplete : 5; + u32 FreelistQid : 2; + u32 CreditValid : 1; + u32 DataValid : 1; + u32 Offload : 1; + u32 Eop : 1; + u32 Sop : 1; + u32 GenerationBit : 1; + u32 BufferLength; +}; +#elif defined(__LITTLE_ENDIAN_BITFIELD) +struct cmdQ_e { + u32 len_gen; + u32 addr_lo; + u32 addr_hi; + u32 flags; +}; + +struct freelQ_e { + u32 len_gen; + u32 addr_lo; + u32 addr_hi; + u32 gen2; +}; + +struct respQ_e { + u32 BufferLength; + u32 GenerationBit : 1; + u32 Sop : 1; + u32 Eop : 1; + u32 Offload : 1; + u32 DataValid : 1; + u32 CreditValid : 1; + u32 FreelistQid : 2; + u32 Cmdq0DmaComplete : 5; + u32 Cmdq0CreditReturn : 5; + u32 Cmdq1DmaComplete : 5; + u32 Cmdq1CreditReturn : 5; + u32 Qsleeping : 4; +} ; +#endif + +/* + * SW Context Command and Freelist Queue Descriptors + */ +struct cmdQ_ce { + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(dma_addr); + DEFINE_DMA_UNMAP_LEN(dma_len); +}; + +struct freelQ_ce { + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(dma_addr); + DEFINE_DMA_UNMAP_LEN(dma_len); +}; + +/* + * SW command, freelist and response rings + */ +struct cmdQ { + unsigned long status; /* HW DMA fetch status */ + unsigned int in_use; /* # of in-use command descriptors */ + unsigned int size; /* # of descriptors */ + unsigned int processed; /* total # of descs HW has processed */ + unsigned int cleaned; /* total # of descs SW has reclaimed */ + unsigned int stop_thres; /* SW TX queue suspend threshold */ + u16 pidx; /* producer index (SW) */ + u16 cidx; /* consumer index (HW) */ + u8 genbit; /* current generation (=valid) bit */ + u8 sop; /* is next entry start of packet? */ + struct cmdQ_e *entries; /* HW command descriptor Q */ + struct cmdQ_ce *centries; /* SW command context descriptor Q */ + dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */ + spinlock_t lock; /* Lock to protect cmdQ enqueuing */ +}; + +struct freelQ { + unsigned int credits; /* # of available RX buffers */ + unsigned int size; /* free list capacity */ + u16 pidx; /* producer index (SW) */ + u16 cidx; /* consumer index (HW) */ + u16 rx_buffer_size; /* Buffer size on this free list */ + u16 dma_offset; /* DMA offset to align IP headers */ + u16 recycleq_idx; /* skb recycle q to use */ + u8 genbit; /* current generation (=valid) bit */ + struct freelQ_e *entries; /* HW freelist descriptor Q */ + struct freelQ_ce *centries; /* SW freelist context descriptor Q */ + dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */ +}; + +struct respQ { + unsigned int credits; /* credits to be returned to SGE */ + unsigned int size; /* # of response Q descriptors */ + u16 cidx; /* consumer index (SW) */ + u8 genbit; /* current generation(=valid) bit */ + struct respQ_e *entries; /* HW response descriptor Q */ + dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */ +}; + +/* Bit flags for cmdQ.status */ +enum { + CMDQ_STAT_RUNNING = 1, /* fetch engine is running */ + CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */ +}; + +/* T204 TX SW scheduler */ + +/* Per T204 TX port */ +struct sched_port { + unsigned int avail; /* available bits - quota */ + unsigned int drain_bits_per_1024ns; /* drain rate */ + unsigned int speed; /* drain rate, mbps */ + unsigned int mtu; /* mtu size */ + struct sk_buff_head skbq; /* pending skbs */ +}; + +/* Per T204 device */ +struct sched { + ktime_t last_updated; /* last time quotas were computed */ + unsigned int max_avail; /* max bits to be sent to any port */ + unsigned int port; /* port index (round robin ports) */ + unsigned int num; /* num skbs in per port queues */ + struct sched_port p[MAX_NPORTS]; + struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */ +}; +static void restart_sched(unsigned long); + + +/* + * Main SGE data structure + * + * Interrupts are handled by a single CPU and it is likely that on a MP system + * the application is migrated to another CPU. In that scenario, we try to + * separate the RX(in irq context) and TX state in order to decrease memory + * contention. + */ +struct sge { + struct adapter *adapter; /* adapter backpointer */ + struct net_device *netdev; /* netdevice backpointer */ + struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */ + struct respQ respQ; /* response Q */ + unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */ + unsigned int rx_pkt_pad; /* RX padding for L2 packets */ + unsigned int jumbo_fl; /* jumbo freelist Q index */ + unsigned int intrtimer_nres; /* no-resource interrupt timer */ + unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */ + struct timer_list tx_reclaim_timer; /* reclaims TX buffers */ + struct timer_list espibug_timer; + unsigned long espibug_timeout; + struct sk_buff *espibug_skb[MAX_NPORTS]; + u32 sge_control; /* shadow value of sge control reg */ + struct sge_intr_counts stats; + struct sge_port_stats __percpu *port_stats[MAX_NPORTS]; + struct sched *tx_sched; + struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp; +}; + +static const u8 ch_mac_addr[ETH_ALEN] = { + 0x0, 0x7, 0x43, 0x0, 0x0, 0x0 +}; + +/* + * stop tasklet and free all pending skb's + */ +static void tx_sched_stop(struct sge *sge) +{ + struct sched *s = sge->tx_sched; + int i; + + tasklet_kill(&s->sched_tsk); + + for (i = 0; i < MAX_NPORTS; i++) + __skb_queue_purge(&s->p[s->port].skbq); +} + +/* + * t1_sched_update_parms() is called when the MTU or link speed changes. It + * re-computes scheduler parameters to scope with the change. + */ +unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port, + unsigned int mtu, unsigned int speed) +{ + struct sched *s = sge->tx_sched; + struct sched_port *p = &s->p[port]; + unsigned int max_avail_segs; + + pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed); + if (speed) + p->speed = speed; + if (mtu) + p->mtu = mtu; + + if (speed || mtu) { + unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40); + do_div(drain, (p->mtu + 50) * 1000); + p->drain_bits_per_1024ns = (unsigned int) drain; + + if (p->speed < 1000) + p->drain_bits_per_1024ns = + 90 * p->drain_bits_per_1024ns / 100; + } + + if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) { + p->drain_bits_per_1024ns -= 16; + s->max_avail = max(4096U, p->mtu + 16 + 14 + 4); + max_avail_segs = max(1U, 4096 / (p->mtu - 40)); + } else { + s->max_avail = 16384; + max_avail_segs = max(1U, 9000 / (p->mtu - 40)); + } + + pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u " + "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu, + p->speed, s->max_avail, max_avail_segs, + p->drain_bits_per_1024ns); + + return max_avail_segs * (p->mtu - 40); +} + +#if 0 + +/* + * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of + * data that can be pushed per port. + */ +void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val) +{ + struct sched *s = sge->tx_sched; + unsigned int i; + + s->max_avail = val; + for (i = 0; i < MAX_NPORTS; i++) + t1_sched_update_parms(sge, i, 0, 0); +} + +/* + * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port + * is draining. + */ +void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port, + unsigned int val) +{ + struct sched *s = sge->tx_sched; + struct sched_port *p = &s->p[port]; + p->drain_bits_per_1024ns = val * 1024 / 1000; + t1_sched_update_parms(sge, port, 0, 0); +} + +#endif /* 0 */ + + +/* + * get_clock() implements a ns clock (see ktime_get) + */ +static inline ktime_t get_clock(void) +{ + struct timespec ts; + + ktime_get_ts(&ts); + return timespec_to_ktime(ts); +} + +/* + * tx_sched_init() allocates resources and does basic initialization. + */ +static int tx_sched_init(struct sge *sge) +{ + struct sched *s; + int i; + + s = kzalloc(sizeof (struct sched), GFP_KERNEL); + if (!s) + return -ENOMEM; + + pr_debug("tx_sched_init\n"); + tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge); + sge->tx_sched = s; + + for (i = 0; i < MAX_NPORTS; i++) { + skb_queue_head_init(&s->p[i].skbq); + t1_sched_update_parms(sge, i, 1500, 1000); + } + + return 0; +} + +/* + * sched_update_avail() computes the delta since the last time it was called + * and updates the per port quota (number of bits that can be sent to the any + * port). + */ +static inline int sched_update_avail(struct sge *sge) +{ + struct sched *s = sge->tx_sched; + ktime_t now = get_clock(); + unsigned int i; + long long delta_time_ns; + + delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated)); + + pr_debug("sched_update_avail delta=%lld\n", delta_time_ns); + if (delta_time_ns < 15000) + return 0; + + for (i = 0; i < MAX_NPORTS; i++) { + struct sched_port *p = &s->p[i]; + unsigned int delta_avail; + + delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13; + p->avail = min(p->avail + delta_avail, s->max_avail); + } + + s->last_updated = now; + + return 1; +} + +/* + * sched_skb() is called from two different places. In the tx path, any + * packet generating load on an output port will call sched_skb() + * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq + * context (skb == NULL). + * The scheduler only returns a skb (which will then be sent) if the + * length of the skb is <= the current quota of the output port. + */ +static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb, + unsigned int credits) +{ + struct sched *s = sge->tx_sched; + struct sk_buff_head *skbq; + unsigned int i, len, update = 1; + + pr_debug("sched_skb %p\n", skb); + if (!skb) { + if (!s->num) + return NULL; + } else { + skbq = &s->p[skb->dev->if_port].skbq; + __skb_queue_tail(skbq, skb); + s->num++; + skb = NULL; + } + + if (credits < MAX_SKB_FRAGS + 1) + goto out; + +again: + for (i = 0; i < MAX_NPORTS; i++) { + s->port = (s->port + 1) & (MAX_NPORTS - 1); + skbq = &s->p[s->port].skbq; + + skb = skb_peek(skbq); + + if (!skb) + continue; + + len = skb->len; + if (len <= s->p[s->port].avail) { + s->p[s->port].avail -= len; + s->num--; + __skb_unlink(skb, skbq); + goto out; + } + skb = NULL; + } + + if (update-- && sched_update_avail(sge)) + goto again; + +out: + /* If there are more pending skbs, we use the hardware to schedule us + * again. + */ + if (s->num && !skb) { + struct cmdQ *q = &sge->cmdQ[0]; + clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); + if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { + set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); + writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL); + } + } + pr_debug("sched_skb ret %p\n", skb); + + return skb; +} + +/* + * PIO to indicate that memory mapped Q contains valid descriptor(s). + */ +static inline void doorbell_pio(struct adapter *adapter, u32 val) +{ + wmb(); + writel(val, adapter->regs + A_SG_DOORBELL); +} + +/* + * Frees all RX buffers on the freelist Q. The caller must make sure that + * the SGE is turned off before calling this function. + */ +static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q) +{ + unsigned int cidx = q->cidx; + + while (q->credits--) { + struct freelQ_ce *ce = &q->centries[cidx]; + + pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), + dma_unmap_len(ce, dma_len), + PCI_DMA_FROMDEVICE); + dev_kfree_skb(ce->skb); + ce->skb = NULL; + if (++cidx == q->size) + cidx = 0; + } +} + +/* + * Free RX free list and response queue resources. + */ +static void free_rx_resources(struct sge *sge) +{ + struct pci_dev *pdev = sge->adapter->pdev; + unsigned int size, i; + + if (sge->respQ.entries) { + size = sizeof(struct respQ_e) * sge->respQ.size; + pci_free_consistent(pdev, size, sge->respQ.entries, + sge->respQ.dma_addr); + } + + for (i = 0; i < SGE_FREELQ_N; i++) { + struct freelQ *q = &sge->freelQ[i]; + + if (q->centries) { + free_freelQ_buffers(pdev, q); + kfree(q->centries); + } + if (q->entries) { + size = sizeof(struct freelQ_e) * q->size; + pci_free_consistent(pdev, size, q->entries, + q->dma_addr); + } + } +} + +/* + * Allocates basic RX resources, consisting of memory mapped freelist Qs and a + * response queue. + */ +static int alloc_rx_resources(struct sge *sge, struct sge_params *p) +{ + struct pci_dev *pdev = sge->adapter->pdev; + unsigned int size, i; + + for (i = 0; i < SGE_FREELQ_N; i++) { + struct freelQ *q = &sge->freelQ[i]; + + q->genbit = 1; + q->size = p->freelQ_size[i]; + q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN; + size = sizeof(struct freelQ_e) * q->size; + q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); + if (!q->entries) + goto err_no_mem; + + size = sizeof(struct freelQ_ce) * q->size; + q->centries = kzalloc(size, GFP_KERNEL); + if (!q->centries) + goto err_no_mem; + } + + /* + * Calculate the buffer sizes for the two free lists. FL0 accommodates + * regular sized Ethernet frames, FL1 is sized not to exceed 16K, + * including all the sk_buff overhead. + * + * Note: For T2 FL0 and FL1 are reversed. + */ + sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE + + sizeof(struct cpl_rx_data) + + sge->freelQ[!sge->jumbo_fl].dma_offset; + + size = (16 * 1024) - + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + sge->freelQ[sge->jumbo_fl].rx_buffer_size = size; + + /* + * Setup which skb recycle Q should be used when recycling buffers from + * each free list. + */ + sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0; + sge->freelQ[sge->jumbo_fl].recycleq_idx = 1; + + sge->respQ.genbit = 1; + sge->respQ.size = SGE_RESPQ_E_N; + sge->respQ.credits = 0; + size = sizeof(struct respQ_e) * sge->respQ.size; + sge->respQ.entries = + pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr); + if (!sge->respQ.entries) + goto err_no_mem; + return 0; + +err_no_mem: + free_rx_resources(sge); + return -ENOMEM; +} + +/* + * Reclaims n TX descriptors and frees the buffers associated with them. + */ +static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n) +{ + struct cmdQ_ce *ce; + struct pci_dev *pdev = sge->adapter->pdev; + unsigned int cidx = q->cidx; + + q->in_use -= n; + ce = &q->centries[cidx]; + while (n--) { + if (likely(dma_unmap_len(ce, dma_len))) { + pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), + dma_unmap_len(ce, dma_len), + PCI_DMA_TODEVICE); + if (q->sop) + q->sop = 0; + } + if (ce->skb) { + dev_kfree_skb_any(ce->skb); + q->sop = 1; + } + ce++; + if (++cidx == q->size) { + cidx = 0; + ce = q->centries; + } + } + q->cidx = cidx; +} + +/* + * Free TX resources. + * + * Assumes that SGE is stopped and all interrupts are disabled. + */ +static void free_tx_resources(struct sge *sge) +{ + struct pci_dev *pdev = sge->adapter->pdev; + unsigned int size, i; + + for (i = 0; i < SGE_CMDQ_N; i++) { + struct cmdQ *q = &sge->cmdQ[i]; + + if (q->centries) { + if (q->in_use) + free_cmdQ_buffers(sge, q, q->in_use); + kfree(q->centries); + } + if (q->entries) { + size = sizeof(struct cmdQ_e) * q->size; + pci_free_consistent(pdev, size, q->entries, + q->dma_addr); + } + } +} + +/* + * Allocates basic TX resources, consisting of memory mapped command Qs. + */ +static int alloc_tx_resources(struct sge *sge, struct sge_params *p) +{ + struct pci_dev *pdev = sge->adapter->pdev; + unsigned int size, i; + + for (i = 0; i < SGE_CMDQ_N; i++) { + struct cmdQ *q = &sge->cmdQ[i]; + + q->genbit = 1; + q->sop = 1; + q->size = p->cmdQ_size[i]; + q->in_use = 0; + q->status = 0; + q->processed = q->cleaned = 0; + q->stop_thres = 0; + spin_lock_init(&q->lock); + size = sizeof(struct cmdQ_e) * q->size; + q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); + if (!q->entries) + goto err_no_mem; + + size = sizeof(struct cmdQ_ce) * q->size; + q->centries = kzalloc(size, GFP_KERNEL); + if (!q->centries) + goto err_no_mem; + } + + /* + * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE + * only. For queue 0 set the stop threshold so we can handle one more + * packet from each port, plus reserve an additional 24 entries for + * Ethernet packets only. Queue 1 never suspends nor do we reserve + * space for Ethernet packets. + */ + sge->cmdQ[0].stop_thres = sge->adapter->params.nports * + (MAX_SKB_FRAGS + 1); + return 0; + +err_no_mem: + free_tx_resources(sge); + return -ENOMEM; +} + +static inline void setup_ring_params(struct adapter *adapter, u64 addr, + u32 size, int base_reg_lo, + int base_reg_hi, int size_reg) +{ + writel((u32)addr, adapter->regs + base_reg_lo); + writel(addr >> 32, adapter->regs + base_reg_hi); + writel(size, adapter->regs + size_reg); +} + +/* + * Enable/disable VLAN acceleration. + */ +void t1_vlan_mode(struct adapter *adapter, u32 features) +{ + struct sge *sge = adapter->sge; + + if (features & NETIF_F_HW_VLAN_RX) + sge->sge_control |= F_VLAN_XTRACT; + else + sge->sge_control &= ~F_VLAN_XTRACT; + if (adapter->open_device_map) { + writel(sge->sge_control, adapter->regs + A_SG_CONTROL); + readl(adapter->regs + A_SG_CONTROL); /* flush */ + } +} + +/* + * Programs the various SGE registers. However, the engine is not yet enabled, + * but sge->sge_control is setup and ready to go. + */ +static void configure_sge(struct sge *sge, struct sge_params *p) +{ + struct adapter *ap = sge->adapter; + + writel(0, ap->regs + A_SG_CONTROL); + setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size, + A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE); + setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size, + A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE); + setup_ring_params(ap, sge->freelQ[0].dma_addr, + sge->freelQ[0].size, A_SG_FL0BASELWR, + A_SG_FL0BASEUPR, A_SG_FL0SIZE); + setup_ring_params(ap, sge->freelQ[1].dma_addr, + sge->freelQ[1].size, A_SG_FL1BASELWR, + A_SG_FL1BASEUPR, A_SG_FL1SIZE); + + /* The threshold comparison uses <. */ + writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD); + + setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size, + A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE); + writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT); + + sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE | + F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE | + V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE | + V_RX_PKT_OFFSET(sge->rx_pkt_pad); + +#if defined(__BIG_ENDIAN_BITFIELD) + sge->sge_control |= F_ENABLE_BIG_ENDIAN; +#endif + + /* Initialize no-resource timer */ + sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap); + + t1_sge_set_coalesce_params(sge, p); +} + +/* + * Return the payload capacity of the jumbo free-list buffers. + */ +static inline unsigned int jumbo_payload_capacity(const struct sge *sge) +{ + return sge->freelQ[sge->jumbo_fl].rx_buffer_size - + sge->freelQ[sge->jumbo_fl].dma_offset - + sizeof(struct cpl_rx_data); +} + +/* + * Frees all SGE related resources and the sge structure itself + */ +void t1_sge_destroy(struct sge *sge) +{ + int i; + + for_each_port(sge->adapter, i) + free_percpu(sge->port_stats[i]); + + kfree(sge->tx_sched); + free_tx_resources(sge); + free_rx_resources(sge); + kfree(sge); +} + +/* + * Allocates new RX buffers on the freelist Q (and tracks them on the freelist + * context Q) until the Q is full or alloc_skb fails. + * + * It is possible that the generation bits already match, indicating that the + * buffer is already valid and nothing needs to be done. This happens when we + * copied a received buffer into a new sk_buff during the interrupt processing. + * + * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad), + * we specify a RX_OFFSET in order to make sure that the IP header is 4B + * aligned. + */ +static void refill_free_list(struct sge *sge, struct freelQ *q) +{ + struct pci_dev *pdev = sge->adapter->pdev; + struct freelQ_ce *ce = &q->centries[q->pidx]; + struct freelQ_e *e = &q->entries[q->pidx]; + unsigned int dma_len = q->rx_buffer_size - q->dma_offset; + + while (q->credits < q->size) { + struct sk_buff *skb; + dma_addr_t mapping; + + skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC); + if (!skb) + break; + + skb_reserve(skb, q->dma_offset); + mapping = pci_map_single(pdev, skb->data, dma_len, + PCI_DMA_FROMDEVICE); + skb_reserve(skb, sge->rx_pkt_pad); + + ce->skb = skb; + dma_unmap_addr_set(ce, dma_addr, mapping); + dma_unmap_len_set(ce, dma_len, dma_len); + e->addr_lo = (u32)mapping; + e->addr_hi = (u64)mapping >> 32; + e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit); + wmb(); + e->gen2 = V_CMD_GEN2(q->genbit); + + e++; + ce++; + if (++q->pidx == q->size) { + q->pidx = 0; + q->genbit ^= 1; + ce = q->centries; + e = q->entries; + } + q->credits++; + } +} + +/* + * Calls refill_free_list for both free lists. If we cannot fill at least 1/4 + * of both rings, we go into 'few interrupt mode' in order to give the system + * time to free up resources. + */ +static void freelQs_empty(struct sge *sge) +{ + struct adapter *adapter = sge->adapter; + u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE); + u32 irqholdoff_reg; + + refill_free_list(sge, &sge->freelQ[0]); + refill_free_list(sge, &sge->freelQ[1]); + + if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) && + sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) { + irq_reg |= F_FL_EXHAUSTED; + irqholdoff_reg = sge->fixed_intrtimer; + } else { + /* Clear the F_FL_EXHAUSTED interrupts for now */ + irq_reg &= ~F_FL_EXHAUSTED; + irqholdoff_reg = sge->intrtimer_nres; + } + writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER); + writel(irq_reg, adapter->regs + A_SG_INT_ENABLE); + + /* We reenable the Qs to force a freelist GTS interrupt later */ + doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE); +} + +#define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA) +#define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH) +#define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \ + F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH) + +/* + * Disable SGE Interrupts + */ +void t1_sge_intr_disable(struct sge *sge) +{ + u32 val = readl(sge->adapter->regs + A_PL_ENABLE); + + writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); + writel(0, sge->adapter->regs + A_SG_INT_ENABLE); +} + +/* + * Enable SGE interrupts. + */ +void t1_sge_intr_enable(struct sge *sge) +{ + u32 en = SGE_INT_ENABLE; + u32 val = readl(sge->adapter->regs + A_PL_ENABLE); + + if (sge->adapter->port[0].dev->hw_features & NETIF_F_TSO) + en &= ~F_PACKET_TOO_BIG; + writel(en, sge->adapter->regs + A_SG_INT_ENABLE); + writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); +} + +/* + * Clear SGE interrupts. + */ +void t1_sge_intr_clear(struct sge *sge) +{ + writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE); + writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE); +} + +/* + * SGE 'Error' interrupt handler + */ +int t1_sge_intr_error_handler(struct sge *sge) +{ + struct adapter *adapter = sge->adapter; + u32 cause = readl(adapter->regs + A_SG_INT_CAUSE); + + if (adapter->port[0].dev->hw_features & NETIF_F_TSO) + cause &= ~F_PACKET_TOO_BIG; + if (cause & F_RESPQ_EXHAUSTED) + sge->stats.respQ_empty++; + if (cause & F_RESPQ_OVERFLOW) { + sge->stats.respQ_overflow++; + pr_alert("%s: SGE response queue overflow\n", + adapter->name); + } + if (cause & F_FL_EXHAUSTED) { + sge->stats.freelistQ_empty++; + freelQs_empty(sge); + } + if (cause & F_PACKET_TOO_BIG) { + sge->stats.pkt_too_big++; + pr_alert("%s: SGE max packet size exceeded\n", + adapter->name); + } + if (cause & F_PACKET_MISMATCH) { + sge->stats.pkt_mismatch++; + pr_alert("%s: SGE packet mismatch\n", adapter->name); + } + if (cause & SGE_INT_FATAL) + t1_fatal_err(adapter); + + writel(cause, adapter->regs + A_SG_INT_CAUSE); + return 0; +} + +const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge) +{ + return &sge->stats; +} + +void t1_sge_get_port_stats(const struct sge *sge, int port, + struct sge_port_stats *ss) +{ + int cpu; + + memset(ss, 0, sizeof(*ss)); + for_each_possible_cpu(cpu) { + struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu); + + ss->rx_cso_good += st->rx_cso_good; + ss->tx_cso += st->tx_cso; + ss->tx_tso += st->tx_tso; + ss->tx_need_hdrroom += st->tx_need_hdrroom; + ss->vlan_xtract += st->vlan_xtract; + ss->vlan_insert += st->vlan_insert; + } +} + +/** + * recycle_fl_buf - recycle a free list buffer + * @fl: the free list + * @idx: index of buffer to recycle + * + * Recycles the specified buffer on the given free list by adding it at + * the next available slot on the list. + */ +static void recycle_fl_buf(struct freelQ *fl, int idx) +{ + struct freelQ_e *from = &fl->entries[idx]; + struct freelQ_e *to = &fl->entries[fl->pidx]; + + fl->centries[fl->pidx] = fl->centries[idx]; + to->addr_lo = from->addr_lo; + to->addr_hi = from->addr_hi; + to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit); + wmb(); + to->gen2 = V_CMD_GEN2(fl->genbit); + fl->credits++; + + if (++fl->pidx == fl->size) { + fl->pidx = 0; + fl->genbit ^= 1; + } +} + +static int copybreak __read_mostly = 256; +module_param(copybreak, int, 0); +MODULE_PARM_DESC(copybreak, "Receive copy threshold"); + +/** + * get_packet - return the next ingress packet buffer + * @pdev: the PCI device that received the packet + * @fl: the SGE free list holding the packet + * @len: the actual packet length, excluding any SGE padding + * + * Get the next packet from a free list and complete setup of the + * sk_buff. If the packet is small we make a copy and recycle the + * original buffer, otherwise we use the original buffer itself. If a + * positive drop threshold is supplied packets are dropped and their + * buffers recycled if (a) the number of remaining buffers is under the + * threshold and the packet is too big to copy, or (b) the packet should + * be copied but there is no memory for the copy. + */ +static inline struct sk_buff *get_packet(struct pci_dev *pdev, + struct freelQ *fl, unsigned int len) +{ + struct sk_buff *skb; + const struct freelQ_ce *ce = &fl->centries[fl->cidx]; + + if (len < copybreak) { + skb = alloc_skb(len + 2, GFP_ATOMIC); + if (!skb) + goto use_orig_buf; + + skb_reserve(skb, 2); /* align IP header */ + skb_put(skb, len); + pci_dma_sync_single_for_cpu(pdev, + dma_unmap_addr(ce, dma_addr), + dma_unmap_len(ce, dma_len), + PCI_DMA_FROMDEVICE); + skb_copy_from_linear_data(ce->skb, skb->data, len); + pci_dma_sync_single_for_device(pdev, + dma_unmap_addr(ce, dma_addr), + dma_unmap_len(ce, dma_len), + PCI_DMA_FROMDEVICE); + recycle_fl_buf(fl, fl->cidx); + return skb; + } + +use_orig_buf: + if (fl->credits < 2) { + recycle_fl_buf(fl, fl->cidx); + return NULL; + } + + pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), + dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); + skb = ce->skb; + prefetch(skb->data); + + skb_put(skb, len); + return skb; +} + +/** + * unexpected_offload - handle an unexpected offload packet + * @adapter: the adapter + * @fl: the free list that received the packet + * + * Called when we receive an unexpected offload packet (e.g., the TOE + * function is disabled or the card is a NIC). Prints a message and + * recycles the buffer. + */ +static void unexpected_offload(struct adapter *adapter, struct freelQ *fl) +{ + struct freelQ_ce *ce = &fl->centries[fl->cidx]; + struct sk_buff *skb = ce->skb; + + pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr), + dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); + pr_err("%s: unexpected offload packet, cmd %u\n", + adapter->name, *skb->data); + recycle_fl_buf(fl, fl->cidx); +} + +/* + * T1/T2 SGE limits the maximum DMA size per TX descriptor to + * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the + * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner. + * Note that the *_large_page_tx_descs stuff will be optimized out when + * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN. + * + * compute_large_page_descs() computes how many additional descriptors are + * required to break down the stack's request. + */ +static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb) +{ + unsigned int count = 0; + + if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { + unsigned int nfrags = skb_shinfo(skb)->nr_frags; + unsigned int i, len = skb_headlen(skb); + while (len > SGE_TX_DESC_MAX_PLEN) { + count++; + len -= SGE_TX_DESC_MAX_PLEN; + } + for (i = 0; nfrags--; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + len = frag->size; + while (len > SGE_TX_DESC_MAX_PLEN) { + count++; + len -= SGE_TX_DESC_MAX_PLEN; + } + } + } + return count; +} + +/* + * Write a cmdQ entry. + * + * Since this function writes the 'flags' field, it must not be used to + * write the first cmdQ entry. + */ +static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping, + unsigned int len, unsigned int gen, + unsigned int eop) +{ + BUG_ON(len > SGE_TX_DESC_MAX_PLEN); + + e->addr_lo = (u32)mapping; + e->addr_hi = (u64)mapping >> 32; + e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen); + e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen); +} + +/* + * See comment for previous function. + * + * write_tx_descs_large_page() writes additional SGE tx descriptors if + * *desc_len exceeds HW's capability. + */ +static inline unsigned int write_large_page_tx_descs(unsigned int pidx, + struct cmdQ_e **e, + struct cmdQ_ce **ce, + unsigned int *gen, + dma_addr_t *desc_mapping, + unsigned int *desc_len, + unsigned int nfrags, + struct cmdQ *q) +{ + if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { + struct cmdQ_e *e1 = *e; + struct cmdQ_ce *ce1 = *ce; + + while (*desc_len > SGE_TX_DESC_MAX_PLEN) { + *desc_len -= SGE_TX_DESC_MAX_PLEN; + write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN, + *gen, nfrags == 0 && *desc_len == 0); + ce1->skb = NULL; + dma_unmap_len_set(ce1, dma_len, 0); + *desc_mapping += SGE_TX_DESC_MAX_PLEN; + if (*desc_len) { + ce1++; + e1++; + if (++pidx == q->size) { + pidx = 0; + *gen ^= 1; + ce1 = q->centries; + e1 = q->entries; + } + } + } + *e = e1; + *ce = ce1; + } + return pidx; +} + +/* + * Write the command descriptors to transmit the given skb starting at + * descriptor pidx with the given generation. + */ +static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb, + unsigned int pidx, unsigned int gen, + struct cmdQ *q) +{ + dma_addr_t mapping, desc_mapping; + struct cmdQ_e *e, *e1; + struct cmdQ_ce *ce; + unsigned int i, flags, first_desc_len, desc_len, + nfrags = skb_shinfo(skb)->nr_frags; + + e = e1 = &q->entries[pidx]; + ce = &q->centries[pidx]; + + mapping = pci_map_single(adapter->pdev, skb->data, + skb_headlen(skb), PCI_DMA_TODEVICE); + + desc_mapping = mapping; + desc_len = skb_headlen(skb); + + flags = F_CMD_DATAVALID | F_CMD_SOP | + V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) | + V_CMD_GEN2(gen); + first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ? + desc_len : SGE_TX_DESC_MAX_PLEN; + e->addr_lo = (u32)desc_mapping; + e->addr_hi = (u64)desc_mapping >> 32; + e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen); + ce->skb = NULL; + dma_unmap_len_set(ce, dma_len, 0); + + if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN && + desc_len > SGE_TX_DESC_MAX_PLEN) { + desc_mapping += first_desc_len; + desc_len -= first_desc_len; + e1++; + ce++; + if (++pidx == q->size) { + pidx = 0; + gen ^= 1; + e1 = q->entries; + ce = q->centries; + } + pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen, + &desc_mapping, &desc_len, + nfrags, q); + + if (likely(desc_len)) + write_tx_desc(e1, desc_mapping, desc_len, gen, + nfrags == 0); + } + + ce->skb = NULL; + dma_unmap_addr_set(ce, dma_addr, mapping); + dma_unmap_len_set(ce, dma_len, skb_headlen(skb)); + + for (i = 0; nfrags--; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + e1++; + ce++; + if (++pidx == q->size) { + pidx = 0; + gen ^= 1; + e1 = q->entries; + ce = q->centries; + } + + mapping = pci_map_page(adapter->pdev, frag->page, + frag->page_offset, frag->size, + PCI_DMA_TODEVICE); + desc_mapping = mapping; + desc_len = frag->size; + + pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen, + &desc_mapping, &desc_len, + nfrags, q); + if (likely(desc_len)) + write_tx_desc(e1, desc_mapping, desc_len, gen, + nfrags == 0); + ce->skb = NULL; + dma_unmap_addr_set(ce, dma_addr, mapping); + dma_unmap_len_set(ce, dma_len, frag->size); + } + ce->skb = skb; + wmb(); + e->flags = flags; +} + +/* + * Clean up completed Tx buffers. + */ +static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q) +{ + unsigned int reclaim = q->processed - q->cleaned; + + if (reclaim) { + pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n", + q->processed, q->cleaned); + free_cmdQ_buffers(sge, q, reclaim); + q->cleaned += reclaim; + } +} + +/* + * Called from tasklet. Checks the scheduler for any + * pending skbs that can be sent. + */ +static void restart_sched(unsigned long arg) +{ + struct sge *sge = (struct sge *) arg; + struct adapter *adapter = sge->adapter; + struct cmdQ *q = &sge->cmdQ[0]; + struct sk_buff *skb; + unsigned int credits, queued_skb = 0; + + spin_lock(&q->lock); + reclaim_completed_tx(sge, q); + + credits = q->size - q->in_use; + pr_debug("restart_sched credits=%d\n", credits); + while ((skb = sched_skb(sge, NULL, credits)) != NULL) { + unsigned int genbit, pidx, count; + count = 1 + skb_shinfo(skb)->nr_frags; + count += compute_large_page_tx_descs(skb); + q->in_use += count; + genbit = q->genbit; + pidx = q->pidx; + q->pidx += count; + if (q->pidx >= q->size) { + q->pidx -= q->size; + q->genbit ^= 1; + } + write_tx_descs(adapter, skb, pidx, genbit, q); + credits = q->size - q->in_use; + queued_skb = 1; + } + + if (queued_skb) { + clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); + if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { + set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); + writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); + } + } + spin_unlock(&q->lock); +} + +/** + * sge_rx - process an ingress ethernet packet + * @sge: the sge structure + * @fl: the free list that contains the packet buffer + * @len: the packet length + * + * Process an ingress ethernet pakcet and deliver it to the stack. + */ +static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) +{ + struct sk_buff *skb; + const struct cpl_rx_pkt *p; + struct adapter *adapter = sge->adapter; + struct sge_port_stats *st; + struct net_device *dev; + + skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad); + if (unlikely(!skb)) { + sge->stats.rx_drops++; + return; + } + + p = (const struct cpl_rx_pkt *) skb->data; + if (p->iff >= adapter->params.nports) { + kfree_skb(skb); + return; + } + __skb_pull(skb, sizeof(*p)); + + st = this_cpu_ptr(sge->port_stats[p->iff]); + dev = adapter->port[p->iff].dev; + + skb->protocol = eth_type_trans(skb, dev); + if ((dev->features & NETIF_F_RXCSUM) && p->csum == 0xffff && + skb->protocol == htons(ETH_P_IP) && + (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) { + ++st->rx_cso_good; + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else + skb_checksum_none_assert(skb); + + if (p->vlan_valid) { + st->vlan_xtract++; + __vlan_hwaccel_put_tag(skb, ntohs(p->vlan)); + } + netif_receive_skb(skb); +} + +/* + * Returns true if a command queue has enough available descriptors that + * we can resume Tx operation after temporarily disabling its packet queue. + */ +static inline int enough_free_Tx_descs(const struct cmdQ *q) +{ + unsigned int r = q->processed - q->cleaned; + + return q->in_use - r < (q->size >> 1); +} + +/* + * Called when sufficient space has become available in the SGE command queues + * after the Tx packet schedulers have been suspended to restart the Tx path. + */ +static void restart_tx_queues(struct sge *sge) +{ + struct adapter *adap = sge->adapter; + int i; + + if (!enough_free_Tx_descs(&sge->cmdQ[0])) + return; + + for_each_port(adap, i) { + struct net_device *nd = adap->port[i].dev; + + if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) && + netif_running(nd)) { + sge->stats.cmdQ_restarted[2]++; + netif_wake_queue(nd); + } + } +} + +/* + * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0 + * information. + */ +static unsigned int update_tx_info(struct adapter *adapter, + unsigned int flags, + unsigned int pr0) +{ + struct sge *sge = adapter->sge; + struct cmdQ *cmdq = &sge->cmdQ[0]; + + cmdq->processed += pr0; + if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) { + freelQs_empty(sge); + flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE); + } + if (flags & F_CMDQ0_ENABLE) { + clear_bit(CMDQ_STAT_RUNNING, &cmdq->status); + + if (cmdq->cleaned + cmdq->in_use != cmdq->processed && + !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) { + set_bit(CMDQ_STAT_RUNNING, &cmdq->status); + writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); + } + if (sge->tx_sched) + tasklet_hi_schedule(&sge->tx_sched->sched_tsk); + + flags &= ~F_CMDQ0_ENABLE; + } + + if (unlikely(sge->stopped_tx_queues != 0)) + restart_tx_queues(sge); + + return flags; +} + +/* + * Process SGE responses, up to the supplied budget. Returns the number of + * responses processed. A negative budget is effectively unlimited. + */ +static int process_responses(struct adapter *adapter, int budget) +{ + struct sge *sge = adapter->sge; + struct respQ *q = &sge->respQ; + struct respQ_e *e = &q->entries[q->cidx]; + int done = 0; + unsigned int flags = 0; + unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; + + while (done < budget && e->GenerationBit == q->genbit) { + flags |= e->Qsleeping; + + cmdq_processed[0] += e->Cmdq0CreditReturn; + cmdq_processed[1] += e->Cmdq1CreditReturn; + + /* We batch updates to the TX side to avoid cacheline + * ping-pong of TX state information on MP where the sender + * might run on a different CPU than this function... + */ + if (unlikely((flags & F_CMDQ0_ENABLE) || cmdq_processed[0] > 64)) { + flags = update_tx_info(adapter, flags, cmdq_processed[0]); + cmdq_processed[0] = 0; + } + + if (unlikely(cmdq_processed[1] > 16)) { + sge->cmdQ[1].processed += cmdq_processed[1]; + cmdq_processed[1] = 0; + } + + if (likely(e->DataValid)) { + struct freelQ *fl = &sge->freelQ[e->FreelistQid]; + + BUG_ON(!e->Sop || !e->Eop); + if (unlikely(e->Offload)) + unexpected_offload(adapter, fl); + else + sge_rx(sge, fl, e->BufferLength); + + ++done; + + /* + * Note: this depends on each packet consuming a + * single free-list buffer; cf. the BUG above. + */ + if (++fl->cidx == fl->size) + fl->cidx = 0; + prefetch(fl->centries[fl->cidx].skb); + + if (unlikely(--fl->credits < + fl->size - SGE_FREEL_REFILL_THRESH)) + refill_free_list(sge, fl); + } else + sge->stats.pure_rsps++; + + e++; + if (unlikely(++q->cidx == q->size)) { + q->cidx = 0; + q->genbit ^= 1; + e = q->entries; + } + prefetch(e); + + if (++q->credits > SGE_RESPQ_REPLENISH_THRES) { + writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); + q->credits = 0; + } + } + + flags = update_tx_info(adapter, flags, cmdq_processed[0]); + sge->cmdQ[1].processed += cmdq_processed[1]; + + return done; +} + +static inline int responses_pending(const struct adapter *adapter) +{ + const struct respQ *Q = &adapter->sge->respQ; + const struct respQ_e *e = &Q->entries[Q->cidx]; + + return e->GenerationBit == Q->genbit; +} + +/* + * A simpler version of process_responses() that handles only pure (i.e., + * non data-carrying) responses. Such respones are too light-weight to justify + * calling a softirq when using NAPI, so we handle them specially in hard + * interrupt context. The function is called with a pointer to a response, + * which the caller must ensure is a valid pure response. Returns 1 if it + * encounters a valid data-carrying response, 0 otherwise. + */ +static int process_pure_responses(struct adapter *adapter) +{ + struct sge *sge = adapter->sge; + struct respQ *q = &sge->respQ; + struct respQ_e *e = &q->entries[q->cidx]; + const struct freelQ *fl = &sge->freelQ[e->FreelistQid]; + unsigned int flags = 0; + unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; + + prefetch(fl->centries[fl->cidx].skb); + if (e->DataValid) + return 1; + + do { + flags |= e->Qsleeping; + + cmdq_processed[0] += e->Cmdq0CreditReturn; + cmdq_processed[1] += e->Cmdq1CreditReturn; + + e++; + if (unlikely(++q->cidx == q->size)) { + q->cidx = 0; + q->genbit ^= 1; + e = q->entries; + } + prefetch(e); + + if (++q->credits > SGE_RESPQ_REPLENISH_THRES) { + writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); + q->credits = 0; + } + sge->stats.pure_rsps++; + } while (e->GenerationBit == q->genbit && !e->DataValid); + + flags = update_tx_info(adapter, flags, cmdq_processed[0]); + sge->cmdQ[1].processed += cmdq_processed[1]; + + return e->GenerationBit == q->genbit; +} + +/* + * Handler for new data events when using NAPI. This does not need any locking + * or protection from interrupts as data interrupts are off at this point and + * other adapter interrupts do not interfere. + */ +int t1_poll(struct napi_struct *napi, int budget) +{ + struct adapter *adapter = container_of(napi, struct adapter, napi); + int work_done = process_responses(adapter, budget); + + if (likely(work_done < budget)) { + napi_complete(napi); + writel(adapter->sge->respQ.cidx, + adapter->regs + A_SG_SLEEPING); + } + return work_done; +} + +irqreturn_t t1_interrupt(int irq, void *data) +{ + struct adapter *adapter = data; + struct sge *sge = adapter->sge; + int handled; + + if (likely(responses_pending(adapter))) { + writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); + + if (napi_schedule_prep(&adapter->napi)) { + if (process_pure_responses(adapter)) + __napi_schedule(&adapter->napi); + else { + /* no data, no NAPI needed */ + writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); + /* undo schedule_prep */ + napi_enable(&adapter->napi); + } + } + return IRQ_HANDLED; + } + + spin_lock(&adapter->async_lock); + handled = t1_slow_intr_handler(adapter); + spin_unlock(&adapter->async_lock); + + if (!handled) + sge->stats.unhandled_irqs++; + + return IRQ_RETVAL(handled != 0); +} + +/* + * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it. + * + * The code figures out how many entries the sk_buff will require in the + * cmdQ and updates the cmdQ data structure with the state once the enqueue + * has complete. Then, it doesn't access the global structure anymore, but + * uses the corresponding fields on the stack. In conjunction with a spinlock + * around that code, we can make the function reentrant without holding the + * lock when we actually enqueue (which might be expensive, especially on + * architectures with IO MMUs). + * + * This runs with softirqs disabled. + */ +static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter, + unsigned int qid, struct net_device *dev) +{ + struct sge *sge = adapter->sge; + struct cmdQ *q = &sge->cmdQ[qid]; + unsigned int credits, pidx, genbit, count, use_sched_skb = 0; + + if (!spin_trylock(&q->lock)) + return NETDEV_TX_LOCKED; + + reclaim_completed_tx(sge, q); + + pidx = q->pidx; + credits = q->size - q->in_use; + count = 1 + skb_shinfo(skb)->nr_frags; + count += compute_large_page_tx_descs(skb); + + /* Ethernet packet */ + if (unlikely(credits < count)) { + if (!netif_queue_stopped(dev)) { + netif_stop_queue(dev); + set_bit(dev->if_port, &sge->stopped_tx_queues); + sge->stats.cmdQ_full[2]++; + pr_err("%s: Tx ring full while queue awake!\n", + adapter->name); + } + spin_unlock(&q->lock); + return NETDEV_TX_BUSY; + } + + if (unlikely(credits - count < q->stop_thres)) { + netif_stop_queue(dev); + set_bit(dev->if_port, &sge->stopped_tx_queues); + sge->stats.cmdQ_full[2]++; + } + + /* T204 cmdQ0 skbs that are destined for a certain port have to go + * through the scheduler. + */ + if (sge->tx_sched && !qid && skb->dev) { +use_sched: + use_sched_skb = 1; + /* Note that the scheduler might return a different skb than + * the one passed in. + */ + skb = sched_skb(sge, skb, credits); + if (!skb) { + spin_unlock(&q->lock); + return NETDEV_TX_OK; + } + pidx = q->pidx; + count = 1 + skb_shinfo(skb)->nr_frags; + count += compute_large_page_tx_descs(skb); + } + + q->in_use += count; + genbit = q->genbit; + pidx = q->pidx; + q->pidx += count; + if (q->pidx >= q->size) { + q->pidx -= q->size; + q->genbit ^= 1; + } + spin_unlock(&q->lock); + + write_tx_descs(adapter, skb, pidx, genbit, q); + + /* + * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring + * the doorbell if the Q is asleep. There is a natural race, where + * the hardware is going to sleep just after we checked, however, + * then the interrupt handler will detect the outstanding TX packet + * and ring the doorbell for us. + */ + if (qid) + doorbell_pio(adapter, F_CMDQ1_ENABLE); + else { + clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); + if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { + set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); + writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); + } + } + + if (use_sched_skb) { + if (spin_trylock(&q->lock)) { + credits = q->size - q->in_use; + skb = NULL; + goto use_sched; + } + } + return NETDEV_TX_OK; +} + +#define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14)) + +/* + * eth_hdr_len - return the length of an Ethernet header + * @data: pointer to the start of the Ethernet header + * + * Returns the length of an Ethernet header, including optional VLAN tag. + */ +static inline int eth_hdr_len(const void *data) +{ + const struct ethhdr *e = data; + + return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN; +} + +/* + * Adds the CPL header to the sk_buff and passes it to t1_sge_tx. + */ +netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct adapter *adapter = dev->ml_priv; + struct sge *sge = adapter->sge; + struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]); + struct cpl_tx_pkt *cpl; + struct sk_buff *orig_skb = skb; + int ret; + + if (skb->protocol == htons(ETH_P_CPL5)) + goto send; + + /* + * We are using a non-standard hard_header_len. + * Allocate more header room in the rare cases it is not big enough. + */ + if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) { + skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso)); + ++st->tx_need_hdrroom; + dev_kfree_skb_any(orig_skb); + if (!skb) + return NETDEV_TX_OK; + } + + if (skb_shinfo(skb)->gso_size) { + int eth_type; + struct cpl_tx_pkt_lso *hdr; + + ++st->tx_tso; + + eth_type = skb_network_offset(skb) == ETH_HLEN ? + CPL_ETH_II : CPL_ETH_II_VLAN; + + hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr)); + hdr->opcode = CPL_TX_PKT_LSO; + hdr->ip_csum_dis = hdr->l4_csum_dis = 0; + hdr->ip_hdr_words = ip_hdr(skb)->ihl; + hdr->tcp_hdr_words = tcp_hdr(skb)->doff; + hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type, + skb_shinfo(skb)->gso_size)); + hdr->len = htonl(skb->len - sizeof(*hdr)); + cpl = (struct cpl_tx_pkt *)hdr; + } else { + /* + * Packets shorter than ETH_HLEN can break the MAC, drop them + * early. Also, we may get oversized packets because some + * parts of the kernel don't handle our unusual hard_header_len + * right, drop those too. + */ + if (unlikely(skb->len < ETH_HLEN || + skb->len > dev->mtu + eth_hdr_len(skb->data))) { + pr_debug("%s: packet size %d hdr %d mtu%d\n", dev->name, + skb->len, eth_hdr_len(skb->data), dev->mtu); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (skb->ip_summed == CHECKSUM_PARTIAL && + ip_hdr(skb)->protocol == IPPROTO_UDP) { + if (unlikely(skb_checksum_help(skb))) { + pr_debug("%s: unable to do udp checksum\n", dev->name); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + } + + /* Hmmm, assuming to catch the gratious arp... and we'll use + * it to flush out stuck espi packets... + */ + if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) { + if (skb->protocol == htons(ETH_P_ARP) && + arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) { + adapter->sge->espibug_skb[dev->if_port] = skb; + /* We want to re-use this skb later. We + * simply bump the reference count and it + * will not be freed... + */ + skb = skb_get(skb); + } + } + + cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl)); + cpl->opcode = CPL_TX_PKT; + cpl->ip_csum_dis = 1; /* SW calculates IP csum */ + cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1; + /* the length field isn't used so don't bother setting it */ + + st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL); + } + cpl->iff = dev->if_port; + + if (vlan_tx_tag_present(skb)) { + cpl->vlan_valid = 1; + cpl->vlan = htons(vlan_tx_tag_get(skb)); + st->vlan_insert++; + } else + cpl->vlan_valid = 0; + +send: + ret = t1_sge_tx(skb, adapter, 0, dev); + + /* If transmit busy, and we reallocated skb's due to headroom limit, + * then silently discard to avoid leak. + */ + if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) { + dev_kfree_skb_any(skb); + ret = NETDEV_TX_OK; + } + return ret; +} + +/* + * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled. + */ +static void sge_tx_reclaim_cb(unsigned long data) +{ + int i; + struct sge *sge = (struct sge *)data; + + for (i = 0; i < SGE_CMDQ_N; ++i) { + struct cmdQ *q = &sge->cmdQ[i]; + + if (!spin_trylock(&q->lock)) + continue; + + reclaim_completed_tx(sge, q); + if (i == 0 && q->in_use) { /* flush pending credits */ + writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL); + } + spin_unlock(&q->lock); + } + mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); +} + +/* + * Propagate changes of the SGE coalescing parameters to the HW. + */ +int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p) +{ + sge->fixed_intrtimer = p->rx_coalesce_usecs * + core_ticks_per_usec(sge->adapter); + writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER); + return 0; +} + +/* + * Allocates both RX and TX resources and configures the SGE. However, + * the hardware is not enabled yet. + */ +int t1_sge_configure(struct sge *sge, struct sge_params *p) +{ + if (alloc_rx_resources(sge, p)) + return -ENOMEM; + if (alloc_tx_resources(sge, p)) { + free_rx_resources(sge); + return -ENOMEM; + } + configure_sge(sge, p); + + /* + * Now that we have sized the free lists calculate the payload + * capacity of the large buffers. Other parts of the driver use + * this to set the max offload coalescing size so that RX packets + * do not overflow our large buffers. + */ + p->large_buf_capacity = jumbo_payload_capacity(sge); + return 0; +} + +/* + * Disables the DMA engine. + */ +void t1_sge_stop(struct sge *sge) +{ + int i; + writel(0, sge->adapter->regs + A_SG_CONTROL); + readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ + + if (is_T2(sge->adapter)) + del_timer_sync(&sge->espibug_timer); + + del_timer_sync(&sge->tx_reclaim_timer); + if (sge->tx_sched) + tx_sched_stop(sge); + + for (i = 0; i < MAX_NPORTS; i++) + kfree_skb(sge->espibug_skb[i]); +} + +/* + * Enables the DMA engine. + */ +void t1_sge_start(struct sge *sge) +{ + refill_free_list(sge, &sge->freelQ[0]); + refill_free_list(sge, &sge->freelQ[1]); + + writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL); + doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE); + readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ + + mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); + + if (is_T2(sge->adapter)) + mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); +} + +/* + * Callback for the T2 ESPI 'stuck packet feature' workaorund + */ +static void espibug_workaround_t204(unsigned long data) +{ + struct adapter *adapter = (struct adapter *)data; + struct sge *sge = adapter->sge; + unsigned int nports = adapter->params.nports; + u32 seop[MAX_NPORTS]; + + if (adapter->open_device_map & PORT_MASK) { + int i; + + if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0) + return; + + for (i = 0; i < nports; i++) { + struct sk_buff *skb = sge->espibug_skb[i]; + + if (!netif_running(adapter->port[i].dev) || + netif_queue_stopped(adapter->port[i].dev) || + !seop[i] || ((seop[i] & 0xfff) != 0) || !skb) + continue; + + if (!skb->cb[0]) { + skb_copy_to_linear_data_offset(skb, + sizeof(struct cpl_tx_pkt), + ch_mac_addr, + ETH_ALEN); + skb_copy_to_linear_data_offset(skb, + skb->len - 10, + ch_mac_addr, + ETH_ALEN); + skb->cb[0] = 0xff; + } + + /* bump the reference count to avoid freeing of + * the skb once the DMA has completed. + */ + skb = skb_get(skb); + t1_sge_tx(skb, adapter, 0, adapter->port[i].dev); + } + } + mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); +} + +static void espibug_workaround(unsigned long data) +{ + struct adapter *adapter = (struct adapter *)data; + struct sge *sge = adapter->sge; + + if (netif_running(adapter->port[0].dev)) { + struct sk_buff *skb = sge->espibug_skb[0]; + u32 seop = t1_espi_get_mon(adapter, 0x930, 0); + + if ((seop & 0xfff0fff) == 0xfff && skb) { + if (!skb->cb[0]) { + skb_copy_to_linear_data_offset(skb, + sizeof(struct cpl_tx_pkt), + ch_mac_addr, + ETH_ALEN); + skb_copy_to_linear_data_offset(skb, + skb->len - 10, + ch_mac_addr, + ETH_ALEN); + skb->cb[0] = 0xff; + } + + /* bump the reference count to avoid freeing of the + * skb once the DMA has completed. + */ + skb = skb_get(skb); + t1_sge_tx(skb, adapter, 0, adapter->port[0].dev); + } + } + mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); +} + +/* + * Creates a t1_sge structure and returns suggested resource parameters. + */ +struct sge * __devinit t1_sge_create(struct adapter *adapter, + struct sge_params *p) +{ + struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL); + int i; + + if (!sge) + return NULL; + + sge->adapter = adapter; + sge->netdev = adapter->port[0].dev; + sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2; + sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; + + for_each_port(adapter, i) { + sge->port_stats[i] = alloc_percpu(struct sge_port_stats); + if (!sge->port_stats[i]) + goto nomem_port; + } + + init_timer(&sge->tx_reclaim_timer); + sge->tx_reclaim_timer.data = (unsigned long)sge; + sge->tx_reclaim_timer.function = sge_tx_reclaim_cb; + + if (is_T2(sge->adapter)) { + init_timer(&sge->espibug_timer); + + if (adapter->params.nports > 1) { + tx_sched_init(sge); + sge->espibug_timer.function = espibug_workaround_t204; + } else + sge->espibug_timer.function = espibug_workaround; + sge->espibug_timer.data = (unsigned long)sge->adapter; + + sge->espibug_timeout = 1; + /* for T204, every 10ms */ + if (adapter->params.nports > 1) + sge->espibug_timeout = HZ/100; + } + + + p->cmdQ_size[0] = SGE_CMDQ0_E_N; + p->cmdQ_size[1] = SGE_CMDQ1_E_N; + p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE; + p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE; + if (sge->tx_sched) { + if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) + p->rx_coalesce_usecs = 15; + else + p->rx_coalesce_usecs = 50; + } else + p->rx_coalesce_usecs = 50; + + p->coalesce_enable = 0; + p->sample_interval_usecs = 0; + + return sge; +nomem_port: + while (i >= 0) { + free_percpu(sge->port_stats[i]); + --i; + } + kfree(sge); + return NULL; + +} diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.h b/drivers/net/ethernet/chelsio/cxgb/sge.h new file mode 100644 index 0000000..e03980b --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/sge.h @@ -0,0 +1,94 @@ +/***************************************************************************** + * * + * File: sge.h * + * $Revision: 1.11 $ * + * $Date: 2005/06/21 22:10:55 $ * + * Description: * + * part of the Chelsio 10Gb Ethernet Driver. * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License, version 2, as * + * published by the Free Software Foundation. * + * * + * You should have received a copy of the GNU General Public License along * + * with this program; if not, write to the Free Software Foundation, Inc., * + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * + * * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * + * * + * http://www.chelsio.com * + * * + * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * + * All rights reserved. * + * * + * Maintainers: maintainers@chelsio.com * + * * + * Authors: Dimitrios Michailidis * + * Tina Yang * + * Felix Marti * + * Scott Bardone * + * Kurt Ottaway * + * Frank DiMambro * + * * + * History: * + * * + ****************************************************************************/ + +#ifndef _CXGB_SGE_H_ +#define _CXGB_SGE_H_ + +#include +#include +#include + +struct sge_intr_counts { + unsigned int rx_drops; /* # of packets dropped due to no mem */ + unsigned int pure_rsps; /* # of non-payload responses */ + unsigned int unhandled_irqs; /* # of unhandled interrupts */ + unsigned int respQ_empty; /* # times respQ empty */ + unsigned int respQ_overflow; /* # respQ overflow (fatal) */ + unsigned int freelistQ_empty; /* # times freelist empty */ + unsigned int pkt_too_big; /* packet too large (fatal) */ + unsigned int pkt_mismatch; + unsigned int cmdQ_full[3]; /* not HW IRQ, host cmdQ[] full */ + unsigned int cmdQ_restarted[3];/* # of times cmdQ X was restarted */ +}; + +struct sge_port_stats { + u64 rx_cso_good; /* # of successful RX csum offloads */ + u64 tx_cso; /* # of TX checksum offloads */ + u64 tx_tso; /* # of TSO requests */ + u64 vlan_xtract; /* # of VLAN tag extractions */ + u64 vlan_insert; /* # of VLAN tag insertions */ + u64 tx_need_hdrroom; /* # of TX skbs in need of more header room */ +}; + +struct sk_buff; +struct net_device; +struct adapter; +struct sge_params; +struct sge; + +struct sge *t1_sge_create(struct adapter *, struct sge_params *); +int t1_sge_configure(struct sge *, struct sge_params *); +int t1_sge_set_coalesce_params(struct sge *, struct sge_params *); +void t1_sge_destroy(struct sge *); +irqreturn_t t1_interrupt(int irq, void *cookie); +int t1_poll(struct napi_struct *, int); + +netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev); +void t1_vlan_mode(struct adapter *adapter, u32 features); +void t1_sge_start(struct sge *); +void t1_sge_stop(struct sge *); +int t1_sge_intr_error_handler(struct sge *); +void t1_sge_intr_enable(struct sge *); +void t1_sge_intr_disable(struct sge *); +void t1_sge_intr_clear(struct sge *); +const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge); +void t1_sge_get_port_stats(const struct sge *sge, int port, struct sge_port_stats *); +unsigned int t1_sched_update_parms(struct sge *, unsigned int, unsigned int, + unsigned int); + +#endif /* _CXGB_SGE_H_ */ diff --git a/drivers/net/ethernet/chelsio/cxgb/subr.c b/drivers/net/ethernet/chelsio/cxgb/subr.c new file mode 100644 index 0000000..8a43c7e --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/subr.c @@ -0,0 +1,1130 @@ +/***************************************************************************** + * * + * File: subr.c * + * $Revision: 1.27 $ * + * $Date: 2005/06/22 01:08:36 $ * + * Description: * + * Various subroutines (intr,pio,etc.) used by Chelsio 10G Ethernet driver. * + * part of the Chelsio 10Gb Ethernet Driver. * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License, version 2, as * + * published by the Free Software Foundation. * + * * + * You should have received a copy of the GNU General Public License along * + * with this program; if not, write to the Free Software Foundation, Inc., * + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * + * * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * + * * + * http://www.chelsio.com * + * * + * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * + * All rights reserved. * + * * + * Maintainers: maintainers@chelsio.com * + * * + * Authors: Dimitrios Michailidis * + * Tina Yang * + * Felix Marti * + * Scott Bardone * + * Kurt Ottaway * + * Frank DiMambro * + * * + * History: * + * * + ****************************************************************************/ + +#include "common.h" +#include "elmer0.h" +#include "regs.h" +#include "gmac.h" +#include "cphy.h" +#include "sge.h" +#include "tp.h" +#include "espi.h" + +/** + * t1_wait_op_done - wait until an operation is completed + * @adapter: the adapter performing the operation + * @reg: the register to check for completion + * @mask: a single-bit field within @reg that indicates completion + * @polarity: the value of the field when the operation is completed + * @attempts: number of check iterations + * @delay: delay in usecs between iterations + * + * Wait until an operation is completed by checking a bit in a register + * up to @attempts times. Returns %0 if the operation completes and %1 + * otherwise. + */ +static int t1_wait_op_done(adapter_t *adapter, int reg, u32 mask, int polarity, + int attempts, int delay) +{ + while (1) { + u32 val = readl(adapter->regs + reg) & mask; + + if (!!val == polarity) + return 0; + if (--attempts == 0) + return 1; + if (delay) + udelay(delay); + } +} + +#define TPI_ATTEMPTS 50 + +/* + * Write a register over the TPI interface (unlocked and locked versions). + */ +int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value) +{ + int tpi_busy; + + writel(addr, adapter->regs + A_TPI_ADDR); + writel(value, adapter->regs + A_TPI_WR_DATA); + writel(F_TPIWR, adapter->regs + A_TPI_CSR); + + tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1, + TPI_ATTEMPTS, 3); + if (tpi_busy) + pr_alert("%s: TPI write to 0x%x failed\n", + adapter->name, addr); + return tpi_busy; +} + +int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value) +{ + int ret; + + spin_lock(&adapter->tpi_lock); + ret = __t1_tpi_write(adapter, addr, value); + spin_unlock(&adapter->tpi_lock); + return ret; +} + +/* + * Read a register over the TPI interface (unlocked and locked versions). + */ +int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp) +{ + int tpi_busy; + + writel(addr, adapter->regs + A_TPI_ADDR); + writel(0, adapter->regs + A_TPI_CSR); + + tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1, + TPI_ATTEMPTS, 3); + if (tpi_busy) + pr_alert("%s: TPI read from 0x%x failed\n", + adapter->name, addr); + else + *valp = readl(adapter->regs + A_TPI_RD_DATA); + return tpi_busy; +} + +int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp) +{ + int ret; + + spin_lock(&adapter->tpi_lock); + ret = __t1_tpi_read(adapter, addr, valp); + spin_unlock(&adapter->tpi_lock); + return ret; +} + +/* + * Set a TPI parameter. + */ +static void t1_tpi_par(adapter_t *adapter, u32 value) +{ + writel(V_TPIPAR(value), adapter->regs + A_TPI_PAR); +} + +/* + * Called when a port's link settings change to propagate the new values to the + * associated PHY and MAC. After performing the common tasks it invokes an + * OS-specific handler. + */ +void t1_link_changed(adapter_t *adapter, int port_id) +{ + int link_ok, speed, duplex, fc; + struct cphy *phy = adapter->port[port_id].phy; + struct link_config *lc = &adapter->port[port_id].link_config; + + phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc); + + lc->speed = speed < 0 ? SPEED_INVALID : speed; + lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex; + if (!(lc->requested_fc & PAUSE_AUTONEG)) + fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + + if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) { + /* Set MAC speed, duplex, and flow control to match PHY. */ + struct cmac *mac = adapter->port[port_id].mac; + + mac->ops->set_speed_duplex_fc(mac, speed, duplex, fc); + lc->fc = (unsigned char)fc; + } + t1_link_negotiated(adapter, port_id, link_ok, speed, duplex, fc); +} + +static int t1_pci_intr_handler(adapter_t *adapter) +{ + u32 pcix_cause; + + pci_read_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, &pcix_cause); + + if (pcix_cause) { + pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, + pcix_cause); + t1_fatal_err(adapter); /* PCI errors are fatal */ + } + return 0; +} + +#ifdef CONFIG_CHELSIO_T1_1G +#include "fpga_defs.h" + +/* + * PHY interrupt handler for FPGA boards. + */ +static int fpga_phy_intr_handler(adapter_t *adapter) +{ + int p; + u32 cause = readl(adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_CAUSE); + + for_each_port(adapter, p) + if (cause & (1 << p)) { + struct cphy *phy = adapter->port[p].phy; + int phy_cause = phy->ops->interrupt_handler(phy); + + if (phy_cause & cphy_cause_link_change) + t1_link_changed(adapter, p); + } + writel(cause, adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_CAUSE); + return 0; +} + +/* + * Slow path interrupt handler for FPGAs. + */ +static int fpga_slow_intr(adapter_t *adapter) +{ + u32 cause = readl(adapter->regs + A_PL_CAUSE); + + cause &= ~F_PL_INTR_SGE_DATA; + if (cause & F_PL_INTR_SGE_ERR) + t1_sge_intr_error_handler(adapter->sge); + + if (cause & FPGA_PCIX_INTERRUPT_GMAC) + fpga_phy_intr_handler(adapter); + + if (cause & FPGA_PCIX_INTERRUPT_TP) { + /* + * FPGA doesn't support MC4 interrupts and it requires + * this odd layer of indirection for MC5. + */ + u32 tp_cause = readl(adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE); + + /* Clear TP interrupt */ + writel(tp_cause, adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE); + } + if (cause & FPGA_PCIX_INTERRUPT_PCIX) + t1_pci_intr_handler(adapter); + + /* Clear the interrupts just processed. */ + if (cause) + writel(cause, adapter->regs + A_PL_CAUSE); + + return cause != 0; +} +#endif + +/* + * Wait until Elmer's MI1 interface is ready for new operations. + */ +static int mi1_wait_until_ready(adapter_t *adapter, int mi1_reg) +{ + int attempts = 100, busy; + + do { + u32 val; + + __t1_tpi_read(adapter, mi1_reg, &val); + busy = val & F_MI1_OP_BUSY; + if (busy) + udelay(10); + } while (busy && --attempts); + if (busy) + pr_alert("%s: MDIO operation timed out\n", adapter->name); + return busy; +} + +/* + * MI1 MDIO initialization. + */ +static void mi1_mdio_init(adapter_t *adapter, const struct board_info *bi) +{ + u32 clkdiv = bi->clock_elmer0 / (2 * bi->mdio_mdc) - 1; + u32 val = F_MI1_PREAMBLE_ENABLE | V_MI1_MDI_INVERT(bi->mdio_mdiinv) | + V_MI1_MDI_ENABLE(bi->mdio_mdien) | V_MI1_CLK_DIV(clkdiv); + + if (!(bi->caps & SUPPORTED_10000baseT_Full)) + val |= V_MI1_SOF(1); + t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val); +} + +#if defined(CONFIG_CHELSIO_T1_1G) +/* + * Elmer MI1 MDIO read/write operations. + */ +static int mi1_mdio_read(struct net_device *dev, int phy_addr, int mmd_addr, + u16 reg_addr) +{ + struct adapter *adapter = dev->ml_priv; + u32 addr = V_MI1_REG_ADDR(reg_addr) | V_MI1_PHY_ADDR(phy_addr); + unsigned int val; + + spin_lock(&adapter->tpi_lock); + __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr); + __t1_tpi_write(adapter, + A_ELMER0_PORT0_MI1_OP, MI1_OP_DIRECT_READ); + mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); + __t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, &val); + spin_unlock(&adapter->tpi_lock); + return val; +} + +static int mi1_mdio_write(struct net_device *dev, int phy_addr, int mmd_addr, + u16 reg_addr, u16 val) +{ + struct adapter *adapter = dev->ml_priv; + u32 addr = V_MI1_REG_ADDR(reg_addr) | V_MI1_PHY_ADDR(phy_addr); + + spin_lock(&adapter->tpi_lock); + __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr); + __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val); + __t1_tpi_write(adapter, + A_ELMER0_PORT0_MI1_OP, MI1_OP_DIRECT_WRITE); + mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); + spin_unlock(&adapter->tpi_lock); + return 0; +} + +static const struct mdio_ops mi1_mdio_ops = { + .init = mi1_mdio_init, + .read = mi1_mdio_read, + .write = mi1_mdio_write, + .mode_support = MDIO_SUPPORTS_C22 +}; + +#endif + +static int mi1_mdio_ext_read(struct net_device *dev, int phy_addr, int mmd_addr, + u16 reg_addr) +{ + struct adapter *adapter = dev->ml_priv; + u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr); + unsigned int val; + + spin_lock(&adapter->tpi_lock); + + /* Write the address we want. */ + __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr); + __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr); + __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, + MI1_OP_INDIRECT_ADDRESS); + mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); + + /* Write the operation we want. */ + __t1_tpi_write(adapter, + A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_READ); + mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); + + /* Read the data. */ + __t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, &val); + spin_unlock(&adapter->tpi_lock); + return val; +} + +static int mi1_mdio_ext_write(struct net_device *dev, int phy_addr, + int mmd_addr, u16 reg_addr, u16 val) +{ + struct adapter *adapter = dev->ml_priv; + u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr); + + spin_lock(&adapter->tpi_lock); + + /* Write the address we want. */ + __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr); + __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr); + __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, + MI1_OP_INDIRECT_ADDRESS); + mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); + + /* Write the data. */ + __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val); + __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_WRITE); + mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); + spin_unlock(&adapter->tpi_lock); + return 0; +} + +static const struct mdio_ops mi1_mdio_ext_ops = { + .init = mi1_mdio_init, + .read = mi1_mdio_ext_read, + .write = mi1_mdio_ext_write, + .mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22 +}; + +enum { + CH_BRD_T110_1CU, + CH_BRD_N110_1F, + CH_BRD_N210_1F, + CH_BRD_T210_1F, + CH_BRD_T210_1CU, + CH_BRD_N204_4CU, +}; + +static const struct board_info t1_board[] = { + { + .board = CHBT_BOARD_CHT110, + .port_number = 1, + .caps = SUPPORTED_10000baseT_Full, + .chip_term = CHBT_TERM_T1, + .chip_mac = CHBT_MAC_PM3393, + .chip_phy = CHBT_PHY_MY3126, + .clock_core = 125000000, + .clock_mc3 = 150000000, + .clock_mc4 = 125000000, + .espi_nports = 1, + .clock_elmer0 = 44, + .mdio_mdien = 1, + .mdio_mdiinv = 1, + .mdio_mdc = 1, + .mdio_phybaseaddr = 1, + .gmac = &t1_pm3393_ops, + .gphy = &t1_my3126_ops, + .mdio_ops = &mi1_mdio_ext_ops, + .desc = "Chelsio T110 1x10GBase-CX4 TOE", + }, + + { + .board = CHBT_BOARD_N110, + .port_number = 1, + .caps = SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE, + .chip_term = CHBT_TERM_T1, + .chip_mac = CHBT_MAC_PM3393, + .chip_phy = CHBT_PHY_88X2010, + .clock_core = 125000000, + .espi_nports = 1, + .clock_elmer0 = 44, + .mdio_mdien = 0, + .mdio_mdiinv = 0, + .mdio_mdc = 1, + .mdio_phybaseaddr = 0, + .gmac = &t1_pm3393_ops, + .gphy = &t1_mv88x201x_ops, + .mdio_ops = &mi1_mdio_ext_ops, + .desc = "Chelsio N110 1x10GBaseX NIC", + }, + + { + .board = CHBT_BOARD_N210, + .port_number = 1, + .caps = SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE, + .chip_term = CHBT_TERM_T2, + .chip_mac = CHBT_MAC_PM3393, + .chip_phy = CHBT_PHY_88X2010, + .clock_core = 125000000, + .espi_nports = 1, + .clock_elmer0 = 44, + .mdio_mdien = 0, + .mdio_mdiinv = 0, + .mdio_mdc = 1, + .mdio_phybaseaddr = 0, + .gmac = &t1_pm3393_ops, + .gphy = &t1_mv88x201x_ops, + .mdio_ops = &mi1_mdio_ext_ops, + .desc = "Chelsio N210 1x10GBaseX NIC", + }, + + { + .board = CHBT_BOARD_CHT210, + .port_number = 1, + .caps = SUPPORTED_10000baseT_Full, + .chip_term = CHBT_TERM_T2, + .chip_mac = CHBT_MAC_PM3393, + .chip_phy = CHBT_PHY_88X2010, + .clock_core = 125000000, + .clock_mc3 = 133000000, + .clock_mc4 = 125000000, + .espi_nports = 1, + .clock_elmer0 = 44, + .mdio_mdien = 0, + .mdio_mdiinv = 0, + .mdio_mdc = 1, + .mdio_phybaseaddr = 0, + .gmac = &t1_pm3393_ops, + .gphy = &t1_mv88x201x_ops, + .mdio_ops = &mi1_mdio_ext_ops, + .desc = "Chelsio T210 1x10GBaseX TOE", + }, + + { + .board = CHBT_BOARD_CHT210, + .port_number = 1, + .caps = SUPPORTED_10000baseT_Full, + .chip_term = CHBT_TERM_T2, + .chip_mac = CHBT_MAC_PM3393, + .chip_phy = CHBT_PHY_MY3126, + .clock_core = 125000000, + .clock_mc3 = 133000000, + .clock_mc4 = 125000000, + .espi_nports = 1, + .clock_elmer0 = 44, + .mdio_mdien = 1, + .mdio_mdiinv = 1, + .mdio_mdc = 1, + .mdio_phybaseaddr = 1, + .gmac = &t1_pm3393_ops, + .gphy = &t1_my3126_ops, + .mdio_ops = &mi1_mdio_ext_ops, + .desc = "Chelsio T210 1x10GBase-CX4 TOE", + }, + +#ifdef CONFIG_CHELSIO_T1_1G + { + .board = CHBT_BOARD_CHN204, + .port_number = 4, + .caps = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full + | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full + | SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | + SUPPORTED_PAUSE | SUPPORTED_TP, + .chip_term = CHBT_TERM_T2, + .chip_mac = CHBT_MAC_VSC7321, + .chip_phy = CHBT_PHY_88E1111, + .clock_core = 100000000, + .espi_nports = 4, + .clock_elmer0 = 44, + .mdio_mdien = 0, + .mdio_mdiinv = 0, + .mdio_mdc = 0, + .mdio_phybaseaddr = 4, + .gmac = &t1_vsc7326_ops, + .gphy = &t1_mv88e1xxx_ops, + .mdio_ops = &mi1_mdio_ops, + .desc = "Chelsio N204 4x100/1000BaseT NIC", + }, +#endif + +}; + +DEFINE_PCI_DEVICE_TABLE(t1_pci_tbl) = { + CH_DEVICE(8, 0, CH_BRD_T110_1CU), + CH_DEVICE(8, 1, CH_BRD_T110_1CU), + CH_DEVICE(7, 0, CH_BRD_N110_1F), + CH_DEVICE(10, 1, CH_BRD_N210_1F), + CH_DEVICE(11, 1, CH_BRD_T210_1F), + CH_DEVICE(14, 1, CH_BRD_T210_1CU), + CH_DEVICE(16, 1, CH_BRD_N204_4CU), + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, t1_pci_tbl); + +/* + * Return the board_info structure with a given index. Out-of-range indices + * return NULL. + */ +const struct board_info *t1_get_board_info(unsigned int board_id) +{ + return board_id < ARRAY_SIZE(t1_board) ? &t1_board[board_id] : NULL; +} + +struct chelsio_vpd_t { + u32 format_version; + u8 serial_number[16]; + u8 mac_base_address[6]; + u8 pad[2]; /* make multiple-of-4 size requirement explicit */ +}; + +#define EEPROMSIZE (8 * 1024) +#define EEPROM_MAX_POLL 4 + +/* + * Read SEEPROM. A zero is written to the flag register when the address is + * written to the Control register. The hardware device will set the flag to a + * one when 4B have been transferred to the Data register. + */ +int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data) +{ + int i = EEPROM_MAX_POLL; + u16 val; + u32 v; + + if (addr >= EEPROMSIZE || (addr & 3)) + return -EINVAL; + + pci_write_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, (u16)addr); + do { + udelay(50); + pci_read_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, &val); + } while (!(val & F_VPD_OP_FLAG) && --i); + + if (!(val & F_VPD_OP_FLAG)) { + pr_err("%s: reading EEPROM address 0x%x failed\n", + adapter->name, addr); + return -EIO; + } + pci_read_config_dword(adapter->pdev, A_PCICFG_VPD_DATA, &v); + *data = cpu_to_le32(v); + return 0; +} + +static int t1_eeprom_vpd_get(adapter_t *adapter, struct chelsio_vpd_t *vpd) +{ + int addr, ret = 0; + + for (addr = 0; !ret && addr < sizeof(*vpd); addr += sizeof(u32)) + ret = t1_seeprom_read(adapter, addr, + (__le32 *)((u8 *)vpd + addr)); + + return ret; +} + +/* + * Read a port's MAC address from the VPD ROM. + */ +static int vpd_macaddress_get(adapter_t *adapter, int index, u8 mac_addr[]) +{ + struct chelsio_vpd_t vpd; + + if (t1_eeprom_vpd_get(adapter, &vpd)) + return 1; + memcpy(mac_addr, vpd.mac_base_address, 5); + mac_addr[5] = vpd.mac_base_address[5] + index; + return 0; +} + +/* + * Set up the MAC/PHY according to the requested link settings. + * + * If the PHY can auto-negotiate first decide what to advertise, then + * enable/disable auto-negotiation as desired and reset. + * + * If the PHY does not auto-negotiate we just reset it. + * + * If auto-negotiation is off set the MAC to the proper speed/duplex/FC, + * otherwise do it later based on the outcome of auto-negotiation. + */ +int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc) +{ + unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + + if (lc->supported & SUPPORTED_Autoneg) { + lc->advertising &= ~(ADVERTISED_ASYM_PAUSE | ADVERTISED_PAUSE); + if (fc) { + if (fc == ((PAUSE_RX | PAUSE_TX) & + (mac->adapter->params.nports < 2))) + lc->advertising |= ADVERTISED_PAUSE; + else { + lc->advertising |= ADVERTISED_ASYM_PAUSE; + if (fc == PAUSE_RX) + lc->advertising |= ADVERTISED_PAUSE; + } + } + phy->ops->advertise(phy, lc->advertising); + + if (lc->autoneg == AUTONEG_DISABLE) { + lc->speed = lc->requested_speed; + lc->duplex = lc->requested_duplex; + lc->fc = (unsigned char)fc; + mac->ops->set_speed_duplex_fc(mac, lc->speed, + lc->duplex, fc); + /* Also disables autoneg */ + phy->state = PHY_AUTONEG_RDY; + phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex); + phy->ops->reset(phy, 0); + } else { + phy->state = PHY_AUTONEG_EN; + phy->ops->autoneg_enable(phy); /* also resets PHY */ + } + } else { + phy->state = PHY_AUTONEG_RDY; + mac->ops->set_speed_duplex_fc(mac, -1, -1, fc); + lc->fc = (unsigned char)fc; + phy->ops->reset(phy, 0); + } + return 0; +} + +/* + * External interrupt handler for boards using elmer0. + */ +int t1_elmer0_ext_intr_handler(adapter_t *adapter) +{ + struct cphy *phy; + int phy_cause; + u32 cause; + + t1_tpi_read(adapter, A_ELMER0_INT_CAUSE, &cause); + + switch (board_info(adapter)->board) { +#ifdef CONFIG_CHELSIO_T1_1G + case CHBT_BOARD_CHT204: + case CHBT_BOARD_CHT204E: + case CHBT_BOARD_CHN204: + case CHBT_BOARD_CHT204V: { + int i, port_bit; + for_each_port(adapter, i) { + port_bit = i + 1; + if (!(cause & (1 << port_bit))) + continue; + + phy = adapter->port[i].phy; + phy_cause = phy->ops->interrupt_handler(phy); + if (phy_cause & cphy_cause_link_change) + t1_link_changed(adapter, i); + } + break; + } + case CHBT_BOARD_CHT101: + if (cause & ELMER0_GP_BIT1) { /* Marvell 88E1111 interrupt */ + phy = adapter->port[0].phy; + phy_cause = phy->ops->interrupt_handler(phy); + if (phy_cause & cphy_cause_link_change) + t1_link_changed(adapter, 0); + } + break; + case CHBT_BOARD_7500: { + int p; + /* + * Elmer0's interrupt cause isn't useful here because there is + * only one bit that can be set for all 4 ports. This means + * we are forced to check every PHY's interrupt status + * register to see who initiated the interrupt. + */ + for_each_port(adapter, p) { + phy = adapter->port[p].phy; + phy_cause = phy->ops->interrupt_handler(phy); + if (phy_cause & cphy_cause_link_change) + t1_link_changed(adapter, p); + } + break; + } +#endif + case CHBT_BOARD_CHT210: + case CHBT_BOARD_N210: + case CHBT_BOARD_N110: + if (cause & ELMER0_GP_BIT6) { /* Marvell 88x2010 interrupt */ + phy = adapter->port[0].phy; + phy_cause = phy->ops->interrupt_handler(phy); + if (phy_cause & cphy_cause_link_change) + t1_link_changed(adapter, 0); + } + break; + case CHBT_BOARD_8000: + case CHBT_BOARD_CHT110: + if (netif_msg_intr(adapter)) + dev_dbg(&adapter->pdev->dev, + "External interrupt cause 0x%x\n", cause); + if (cause & ELMER0_GP_BIT1) { /* PMC3393 INTB */ + struct cmac *mac = adapter->port[0].mac; + + mac->ops->interrupt_handler(mac); + } + if (cause & ELMER0_GP_BIT5) { /* XPAK MOD_DETECT */ + u32 mod_detect; + + t1_tpi_read(adapter, + A_ELMER0_GPI_STAT, &mod_detect); + if (netif_msg_link(adapter)) + dev_info(&adapter->pdev->dev, "XPAK %s\n", + mod_detect ? "removed" : "inserted"); + } + break; + } + t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause); + return 0; +} + +/* Enables all interrupts. */ +void t1_interrupts_enable(adapter_t *adapter) +{ + unsigned int i; + + adapter->slow_intr_mask = F_PL_INTR_SGE_ERR | F_PL_INTR_TP; + + t1_sge_intr_enable(adapter->sge); + t1_tp_intr_enable(adapter->tp); + if (adapter->espi) { + adapter->slow_intr_mask |= F_PL_INTR_ESPI; + t1_espi_intr_enable(adapter->espi); + } + + /* Enable MAC/PHY interrupts for each port. */ + for_each_port(adapter, i) { + adapter->port[i].mac->ops->interrupt_enable(adapter->port[i].mac); + adapter->port[i].phy->ops->interrupt_enable(adapter->port[i].phy); + } + + /* Enable PCIX & external chip interrupts on ASIC boards. */ + if (t1_is_asic(adapter)) { + u32 pl_intr = readl(adapter->regs + A_PL_ENABLE); + + /* PCI-X interrupts */ + pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, + 0xffffffff); + + adapter->slow_intr_mask |= F_PL_INTR_EXT | F_PL_INTR_PCIX; + pl_intr |= F_PL_INTR_EXT | F_PL_INTR_PCIX; + writel(pl_intr, adapter->regs + A_PL_ENABLE); + } +} + +/* Disables all interrupts. */ +void t1_interrupts_disable(adapter_t* adapter) +{ + unsigned int i; + + t1_sge_intr_disable(adapter->sge); + t1_tp_intr_disable(adapter->tp); + if (adapter->espi) + t1_espi_intr_disable(adapter->espi); + + /* Disable MAC/PHY interrupts for each port. */ + for_each_port(adapter, i) { + adapter->port[i].mac->ops->interrupt_disable(adapter->port[i].mac); + adapter->port[i].phy->ops->interrupt_disable(adapter->port[i].phy); + } + + /* Disable PCIX & external chip interrupts. */ + if (t1_is_asic(adapter)) + writel(0, adapter->regs + A_PL_ENABLE); + + /* PCI-X interrupts */ + pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0); + + adapter->slow_intr_mask = 0; +} + +/* Clears all interrupts */ +void t1_interrupts_clear(adapter_t* adapter) +{ + unsigned int i; + + t1_sge_intr_clear(adapter->sge); + t1_tp_intr_clear(adapter->tp); + if (adapter->espi) + t1_espi_intr_clear(adapter->espi); + + /* Clear MAC/PHY interrupts for each port. */ + for_each_port(adapter, i) { + adapter->port[i].mac->ops->interrupt_clear(adapter->port[i].mac); + adapter->port[i].phy->ops->interrupt_clear(adapter->port[i].phy); + } + + /* Enable interrupts for external devices. */ + if (t1_is_asic(adapter)) { + u32 pl_intr = readl(adapter->regs + A_PL_CAUSE); + + writel(pl_intr | F_PL_INTR_EXT | F_PL_INTR_PCIX, + adapter->regs + A_PL_CAUSE); + } + + /* PCI-X interrupts */ + pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, 0xffffffff); +} + +/* + * Slow path interrupt handler for ASICs. + */ +static int asic_slow_intr(adapter_t *adapter) +{ + u32 cause = readl(adapter->regs + A_PL_CAUSE); + + cause &= adapter->slow_intr_mask; + if (!cause) + return 0; + if (cause & F_PL_INTR_SGE_ERR) + t1_sge_intr_error_handler(adapter->sge); + if (cause & F_PL_INTR_TP) + t1_tp_intr_handler(adapter->tp); + if (cause & F_PL_INTR_ESPI) + t1_espi_intr_handler(adapter->espi); + if (cause & F_PL_INTR_PCIX) + t1_pci_intr_handler(adapter); + if (cause & F_PL_INTR_EXT) + t1_elmer0_ext_intr(adapter); + + /* Clear the interrupts just processed. */ + writel(cause, adapter->regs + A_PL_CAUSE); + readl(adapter->regs + A_PL_CAUSE); /* flush writes */ + return 1; +} + +int t1_slow_intr_handler(adapter_t *adapter) +{ +#ifdef CONFIG_CHELSIO_T1_1G + if (!t1_is_asic(adapter)) + return fpga_slow_intr(adapter); +#endif + return asic_slow_intr(adapter); +} + +/* Power sequencing is a work-around for Intel's XPAKs. */ +static void power_sequence_xpak(adapter_t* adapter) +{ + u32 mod_detect; + u32 gpo; + + /* Check for XPAK */ + t1_tpi_read(adapter, A_ELMER0_GPI_STAT, &mod_detect); + if (!(ELMER0_GP_BIT5 & mod_detect)) { + /* XPAK is present */ + t1_tpi_read(adapter, A_ELMER0_GPO, &gpo); + gpo |= ELMER0_GP_BIT18; + t1_tpi_write(adapter, A_ELMER0_GPO, gpo); + } +} + +int __devinit t1_get_board_rev(adapter_t *adapter, const struct board_info *bi, + struct adapter_params *p) +{ + p->chip_version = bi->chip_term; + p->is_asic = (p->chip_version != CHBT_TERM_FPGA); + if (p->chip_version == CHBT_TERM_T1 || + p->chip_version == CHBT_TERM_T2 || + p->chip_version == CHBT_TERM_FPGA) { + u32 val = readl(adapter->regs + A_TP_PC_CONFIG); + + val = G_TP_PC_REV(val); + if (val == 2) + p->chip_revision = TERM_T1B; + else if (val == 3) + p->chip_revision = TERM_T2; + else + return -1; + } else + return -1; + return 0; +} + +/* + * Enable board components other than the Chelsio chip, such as external MAC + * and PHY. + */ +static int board_init(adapter_t *adapter, const struct board_info *bi) +{ + switch (bi->board) { + case CHBT_BOARD_8000: + case CHBT_BOARD_N110: + case CHBT_BOARD_N210: + case CHBT_BOARD_CHT210: + t1_tpi_par(adapter, 0xf); + t1_tpi_write(adapter, A_ELMER0_GPO, 0x800); + break; + case CHBT_BOARD_CHT110: + t1_tpi_par(adapter, 0xf); + t1_tpi_write(adapter, A_ELMER0_GPO, 0x1800); + + /* TBD XXX Might not need. This fixes a problem + * described in the Intel SR XPAK errata. + */ + power_sequence_xpak(adapter); + break; +#ifdef CONFIG_CHELSIO_T1_1G + case CHBT_BOARD_CHT204E: + /* add config space write here */ + case CHBT_BOARD_CHT204: + case CHBT_BOARD_CHT204V: + case CHBT_BOARD_CHN204: + t1_tpi_par(adapter, 0xf); + t1_tpi_write(adapter, A_ELMER0_GPO, 0x804); + break; + case CHBT_BOARD_CHT101: + case CHBT_BOARD_7500: + t1_tpi_par(adapter, 0xf); + t1_tpi_write(adapter, A_ELMER0_GPO, 0x1804); + break; +#endif + } + return 0; +} + +/* + * Initialize and configure the Terminator HW modules. Note that external + * MAC and PHYs are initialized separately. + */ +int t1_init_hw_modules(adapter_t *adapter) +{ + int err = -EIO; + const struct board_info *bi = board_info(adapter); + + if (!bi->clock_mc4) { + u32 val = readl(adapter->regs + A_MC4_CFG); + + writel(val | F_READY | F_MC4_SLOW, adapter->regs + A_MC4_CFG); + writel(F_M_BUS_ENABLE | F_TCAM_RESET, + adapter->regs + A_MC5_CONFIG); + } + + if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac, + bi->espi_nports)) + goto out_err; + + if (t1_tp_reset(adapter->tp, &adapter->params.tp, bi->clock_core)) + goto out_err; + + err = t1_sge_configure(adapter->sge, &adapter->params.sge); + if (err) + goto out_err; + + err = 0; +out_err: + return err; +} + +/* + * Determine a card's PCI mode. + */ +static void __devinit get_pci_mode(adapter_t *adapter, struct chelsio_pci_params *p) +{ + static const unsigned short speed_map[] = { 33, 66, 100, 133 }; + u32 pci_mode; + + pci_read_config_dword(adapter->pdev, A_PCICFG_MODE, &pci_mode); + p->speed = speed_map[G_PCI_MODE_CLK(pci_mode)]; + p->width = (pci_mode & F_PCI_MODE_64BIT) ? 64 : 32; + p->is_pcix = (pci_mode & F_PCI_MODE_PCIX) != 0; +} + +/* + * Release the structures holding the SW per-Terminator-HW-module state. + */ +void t1_free_sw_modules(adapter_t *adapter) +{ + unsigned int i; + + for_each_port(adapter, i) { + struct cmac *mac = adapter->port[i].mac; + struct cphy *phy = adapter->port[i].phy; + + if (mac) + mac->ops->destroy(mac); + if (phy) + phy->ops->destroy(phy); + } + + if (adapter->sge) + t1_sge_destroy(adapter->sge); + if (adapter->tp) + t1_tp_destroy(adapter->tp); + if (adapter->espi) + t1_espi_destroy(adapter->espi); +} + +static void __devinit init_link_config(struct link_config *lc, + const struct board_info *bi) +{ + lc->supported = bi->caps; + lc->requested_speed = lc->speed = SPEED_INVALID; + lc->requested_duplex = lc->duplex = DUPLEX_INVALID; + lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; + if (lc->supported & SUPPORTED_Autoneg) { + lc->advertising = lc->supported; + lc->autoneg = AUTONEG_ENABLE; + lc->requested_fc |= PAUSE_AUTONEG; + } else { + lc->advertising = 0; + lc->autoneg = AUTONEG_DISABLE; + } +} + +/* + * Allocate and initialize the data structures that hold the SW state of + * the Terminator HW modules. + */ +int __devinit t1_init_sw_modules(adapter_t *adapter, + const struct board_info *bi) +{ + unsigned int i; + + adapter->params.brd_info = bi; + adapter->params.nports = bi->port_number; + adapter->params.stats_update_period = bi->gmac->stats_update_period; + + adapter->sge = t1_sge_create(adapter, &adapter->params.sge); + if (!adapter->sge) { + pr_err("%s: SGE initialization failed\n", + adapter->name); + goto error; + } + + if (bi->espi_nports && !(adapter->espi = t1_espi_create(adapter))) { + pr_err("%s: ESPI initialization failed\n", + adapter->name); + goto error; + } + + adapter->tp = t1_tp_create(adapter, &adapter->params.tp); + if (!adapter->tp) { + pr_err("%s: TP initialization failed\n", + adapter->name); + goto error; + } + + board_init(adapter, bi); + bi->mdio_ops->init(adapter, bi); + if (bi->gphy->reset) + bi->gphy->reset(adapter); + if (bi->gmac->reset) + bi->gmac->reset(adapter); + + for_each_port(adapter, i) { + u8 hw_addr[6]; + struct cmac *mac; + int phy_addr = bi->mdio_phybaseaddr + i; + + adapter->port[i].phy = bi->gphy->create(adapter->port[i].dev, + phy_addr, bi->mdio_ops); + if (!adapter->port[i].phy) { + pr_err("%s: PHY %d initialization failed\n", + adapter->name, i); + goto error; + } + + adapter->port[i].mac = mac = bi->gmac->create(adapter, i); + if (!mac) { + pr_err("%s: MAC %d initialization failed\n", + adapter->name, i); + goto error; + } + + /* + * Get the port's MAC addresses either from the EEPROM if one + * exists or the one hardcoded in the MAC. + */ + if (!t1_is_asic(adapter) || bi->chip_mac == CHBT_MAC_DUMMY) + mac->ops->macaddress_get(mac, hw_addr); + else if (vpd_macaddress_get(adapter, i, hw_addr)) { + pr_err("%s: could not read MAC address from VPD ROM\n", + adapter->port[i].dev->name); + goto error; + } + memcpy(adapter->port[i].dev->dev_addr, hw_addr, ETH_ALEN); + init_link_config(&adapter->port[i].link_config, bi); + } + + get_pci_mode(adapter, &adapter->params.pci); + t1_interrupts_clear(adapter); + return 0; + +error: + t1_free_sw_modules(adapter); + return -1; +} diff --git a/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h b/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h new file mode 100644 index 0000000..d0f87d8 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h @@ -0,0 +1,1643 @@ +/***************************************************************************** + * * + * File: suni1x10gexp_regs.h * + * $Revision: 1.9 $ * + * $Date: 2005/06/22 00:17:04 $ * + * Description: * + * PMC/SIERRA (pm3393) MAC-PHY functionality. * + * part of the Chelsio 10Gb Ethernet Driver. * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License, version 2, as * + * published by the Free Software Foundation. * + * * + * You should have received a copy of the GNU General Public License along * + * with this program; if not, write to the Free Software Foundation, Inc., * + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * + * * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * + * * + * http://www.chelsio.com * + * * + * Maintainers: maintainers@chelsio.com * + * * + * Authors: PMC/SIERRA * + * * + * History: * + * * + ****************************************************************************/ + +#ifndef _CXGB_SUNI1x10GEXP_REGS_H_ +#define _CXGB_SUNI1x10GEXP_REGS_H_ + +/* +** Space allocated for each Exact Match Filter +** There are 8 filter configurations +*/ +#define SUNI1x10GEXP_REG_SIZEOF_MAC_FILTER 0x0003 + +#define mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId) ( (filterId) * SUNI1x10GEXP_REG_SIZEOF_MAC_FILTER ) + +/* +** Space allocated for VLAN-Id Filter +** There are 8 filter configurations +*/ +#define SUNI1x10GEXP_REG_SIZEOF_MAC_VID_FILTER 0x0001 + +#define mSUNI1x10GEXP_MAC_VID_FILTER_OFFSET(filterId) ( (filterId) * SUNI1x10GEXP_REG_SIZEOF_MAC_VID_FILTER ) + +/* +** Space allocated for each MSTAT Counter +*/ +#define SUNI1x10GEXP_REG_SIZEOF_MSTAT_COUNT 0x0004 + +#define mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId) ( (countId) * SUNI1x10GEXP_REG_SIZEOF_MSTAT_COUNT ) + + +/******************************************************************************/ +/** S/UNI-1x10GE-XP REGISTER ADDRESS MAP **/ +/******************************************************************************/ +/* Refer to the Register Bit Masks bellow for the naming of each register and */ +/* to the S/UNI-1x10GE-XP Data Sheet for the signification of each bit */ +/******************************************************************************/ + + +#define SUNI1x10GEXP_REG_IDENTIFICATION 0x0000 +#define SUNI1x10GEXP_REG_PRODUCT_REVISION 0x0001 +#define SUNI1x10GEXP_REG_CONFIG_AND_RESET_CONTROL 0x0002 +#define SUNI1x10GEXP_REG_LOOPBACK_MISC_CTRL 0x0003 +#define SUNI1x10GEXP_REG_DEVICE_STATUS 0x0004 +#define SUNI1x10GEXP_REG_GLOBAL_PERFORMANCE_MONITOR_UPDATE 0x0005 + +#define SUNI1x10GEXP_REG_MDIO_COMMAND 0x0006 +#define SUNI1x10GEXP_REG_MDIO_INTERRUPT_ENABLE 0x0007 +#define SUNI1x10GEXP_REG_MDIO_INTERRUPT_STATUS 0x0008 +#define SUNI1x10GEXP_REG_MMD_PHY_ADDRESS 0x0009 +#define SUNI1x10GEXP_REG_MMD_CONTROL_ADDRESS_DATA 0x000A +#define SUNI1x10GEXP_REG_MDIO_READ_STATUS_DATA 0x000B + +#define SUNI1x10GEXP_REG_OAM_INTF_CTRL 0x000C +#define SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS 0x000D +#define SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE 0x000E +#define SUNI1x10GEXP_REG_FREE 0x000F + +#define SUNI1x10GEXP_REG_XTEF_MISC_CTRL 0x0010 +#define SUNI1x10GEXP_REG_XRF_MISC_CTRL 0x0011 + +#define SUNI1x10GEXP_REG_SERDES_3125_CONFIG_1 0x0100 +#define SUNI1x10GEXP_REG_SERDES_3125_CONFIG_2 0x0101 +#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE 0x0102 +#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_VISIBLE 0x0103 +#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS 0x0104 +#define SUNI1x10GEXP_REG_SERDES_3125_TEST_CONFIG 0x0107 + +#define SUNI1x10GEXP_REG_RXXG_CONFIG_1 0x2040 +#define SUNI1x10GEXP_REG_RXXG_CONFIG_2 0x2041 +#define SUNI1x10GEXP_REG_RXXG_CONFIG_3 0x2042 +#define SUNI1x10GEXP_REG_RXXG_INTERRUPT 0x2043 +#define SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH 0x2045 +#define SUNI1x10GEXP_REG_RXXG_SA_15_0 0x2046 +#define SUNI1x10GEXP_REG_RXXG_SA_31_16 0x2047 +#define SUNI1x10GEXP_REG_RXXG_SA_47_32 0x2048 +#define SUNI1x10GEXP_REG_RXXG_RECEIVE_FIFO_THRESHOLD 0x2049 +#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_LOW(filterId) (0x204A + mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId)) +#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_MID(filterId) (0x204B + mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId)) +#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_HIGH(filterId)(0x204C + mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId)) +#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID(filterId) (0x2062 + mSUNI1x10GEXP_MAC_VID_FILTER_OFFSET(filterId)) +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_0_LOW 0x204A +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_0_MID 0x204B +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_0_HIGH 0x204C +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW 0x204D +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID 0x204E +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH 0x204F +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_2_LOW 0x2050 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_2_MID 0x2051 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_2_HIGH 0x2052 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_3_LOW 0x2053 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_3_MID 0x2054 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_3_HIGH 0x2055 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_4_LOW 0x2056 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_4_MID 0x2057 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_4_HIGH 0x2058 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_5_LOW 0x2059 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_5_MID 0x205A +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_5_HIGH 0x205B +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_6_LOW 0x205C +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_6_MID 0x205D +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_6_HIGH 0x205E +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_7_LOW 0x205F +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_7_MID 0x2060 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_7_HIGH 0x2061 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_0 0x2062 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_1 0x2063 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_2 0x2064 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_3 0x2065 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_4 0x2066 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_5 0x2067 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_6 0x2068 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_7 0x2069 +#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW 0x206A +#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW 0x206B +#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH 0x206C +#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH 0x206D +#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0 0x206E +#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_1 0x206F +#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2 0x2070 + +#define SUNI1x10GEXP_REG_XRF_PATTERN_GEN_CTRL 0x2081 +#define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_0 0x2084 +#define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_1 0x2085 +#define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_2 0x2086 +#define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_3 0x2087 +#define SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE 0x2088 +#define SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS 0x2089 +#define SUNI1x10GEXP_REG_XRF_ERR_STATUS 0x208A +#define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE 0x208B +#define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS 0x208C +#define SUNI1x10GEXP_REG_XRF_CODE_ERR_THRES 0x2092 + +#define SUNI1x10GEXP_REG_RXOAM_CONFIG 0x20C0 +#define SUNI1x10GEXP_REG_RXOAM_FILTER_1_CONFIG 0x20C1 +#define SUNI1x10GEXP_REG_RXOAM_FILTER_2_CONFIG 0x20C2 +#define SUNI1x10GEXP_REG_RXOAM_CONFIG_2 0x20C3 +#define SUNI1x10GEXP_REG_RXOAM_HEC_CONFIG 0x20C4 +#define SUNI1x10GEXP_REG_RXOAM_HEC_ERR_THRES 0x20C5 +#define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE 0x20C7 +#define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS 0x20C8 +#define SUNI1x10GEXP_REG_RXOAM_STATUS 0x20C9 +#define SUNI1x10GEXP_REG_RXOAM_HEC_ERR_COUNT 0x20CA +#define SUNI1x10GEXP_REG_RXOAM_FIFO_OVERFLOW_COUNT 0x20CB +#define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_COUNT_LSB 0x20CC +#define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_COUNT_MSB 0x20CD +#define SUNI1x10GEXP_REG_RXOAM_FILTER_1_MISMATCH_COUNT_LSB 0x20CE +#define SUNI1x10GEXP_REG_RXOAM_FILTER_1_MISMATCH_COUNT_MSB 0x20CF +#define SUNI1x10GEXP_REG_RXOAM_FILTER_2_MISMATCH_COUNT_LSB 0x20D0 +#define SUNI1x10GEXP_REG_RXOAM_FILTER_2_MISMATCH_COUNT_MSB 0x20D1 +#define SUNI1x10GEXP_REG_RXOAM_OAM_EXTRACT_COUNT_LSB 0x20D2 +#define SUNI1x10GEXP_REG_RXOAM_OAM_EXTRACT_COUNT_MSB 0x20D3 +#define SUNI1x10GEXP_REG_RXOAM_MINI_PACKET_COUNT_LSB 0x20D4 +#define SUNI1x10GEXP_REG_RXOAM_MINI_PACKET_COUNT_MSB 0x20D5 +#define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_THRES_LSB 0x20D6 +#define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_THRES_MSB 0x20D7 + +#define SUNI1x10GEXP_REG_MSTAT_CONTROL 0x2100 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0 0x2101 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1 0x2102 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2 0x2103 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3 0x2104 +#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0 0x2105 +#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1 0x2106 +#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2 0x2107 +#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3 0x2108 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_ADDRESS 0x2109 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_DATA_LOW 0x210A +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_DATA_MIDDLE 0x210B +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_DATA_HIGH 0x210C +#define mSUNI1x10GEXP_REG_MSTAT_COUNTER_LOW(countId) (0x2110 + mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId)) +#define mSUNI1x10GEXP_REG_MSTAT_COUNTER_MID(countId) (0x2111 + mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId)) +#define mSUNI1x10GEXP_REG_MSTAT_COUNTER_HIGH(countId) (0x2112 + mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId)) +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW 0x2110 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_MID 0x2111 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_HIGH 0x2112 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_RESVD 0x2113 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW 0x2114 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_MID 0x2115 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_HIGH 0x2116 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_RESVD 0x2117 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_LOW 0x2118 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_MID 0x2119 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_HIGH 0x211A +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_RESVD 0x211B +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_LOW 0x211C +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_MID 0x211D +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_HIGH 0x211E +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_RESVD 0x211F +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW 0x2120 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_MID 0x2121 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_HIGH 0x2122 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_RESVD 0x2123 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW 0x2124 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_MID 0x2125 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_HIGH 0x2126 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_RESVD 0x2127 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW 0x2128 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_MID 0x2129 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_HIGH 0x212A +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_RESVD 0x212B +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_LOW 0x212C +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_MID 0x212D +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_HIGH 0x212E +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_RESVD 0x212F +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW 0x2130 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_MID 0x2131 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_HIGH 0x2132 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_RESVD 0x2133 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_LOW 0x2134 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_MID 0x2135 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_HIGH 0x2136 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_RESVD 0x2137 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW 0x2138 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_MID 0x2139 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_HIGH 0x213A +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_RESVD 0x213B +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW 0x213C +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_MID 0x213D +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_HIGH 0x213E +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_RESVD 0x213F +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW 0x2140 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_MID 0x2141 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_HIGH 0x2142 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_RESVD 0x2143 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW 0x2144 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_MID 0x2145 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_HIGH 0x2146 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_RESVD 0x2147 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_LOW 0x2148 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_MID 0x2149 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_HIGH 0x214A +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_RESVD 0x214B +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW 0x214C +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_MID 0x214D +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_HIGH 0x214E +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_RESVD 0x214F +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW 0x2150 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_MID 0x2151 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_HIGH 0x2152 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_RESVD 0x2153 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW 0x2154 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_MID 0x2155 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_HIGH 0x2156 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_RESVD 0x2157 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW 0x2158 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_MID 0x2159 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_HIGH 0x215A +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_RESVD 0x215B +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_LOW 0x215C +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_MID 0x215D +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_HIGH 0x215E +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_RESVD 0x215F +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_LOW 0x2160 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_MID 0x2161 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_HIGH 0x2162 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_RESVD 0x2163 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_LOW 0x2164 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_MID 0x2165 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_HIGH 0x2166 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_RESVD 0x2167 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_LOW 0x2168 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_MID 0x2169 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_HIGH 0x216A +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_RESVD 0x216B +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_LOW 0x216C +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_MID 0x216D +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_HIGH 0x216E +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_RESVD 0x216F +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_LOW 0x2170 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_MID 0x2171 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_HIGH 0x2172 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_RESVD 0x2173 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_LOW 0x2174 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_MID 0x2175 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_HIGH 0x2176 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_RESVD 0x2177 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_LOW 0x2178 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_MID 0x2179 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_HIGH 0x217a +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_RESVD 0x217b +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_LOW 0x217c +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_MID 0x217d +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_HIGH 0x217e +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_RESVD 0x217f +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_LOW 0x2180 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_MID 0x2181 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_HIGH 0x2182 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_RESVD 0x2183 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_LOW 0x2184 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_MID 0x2185 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_HIGH 0x2186 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_RESVD 0x2187 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_LOW 0x2188 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_MID 0x2189 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_HIGH 0x218A +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_RESVD 0x218B +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_LOW 0x218C +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_MID 0x218D +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_HIGH 0x218E +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_RESVD 0x218F +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_LOW 0x2190 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_MID 0x2191 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_HIGH 0x2192 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_RESVD 0x2193 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW 0x2194 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_MID 0x2195 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_HIGH 0x2196 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_RESVD 0x2197 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_LOW 0x2198 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_MID 0x2199 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_HIGH 0x219A +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_RESVD 0x219B +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW 0x219C +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_MID 0x219D +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_HIGH 0x219E +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_RESVD 0x219F +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW 0x21A0 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_MID 0x21A1 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_HIGH 0x21A2 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_RESVD 0x21A3 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_LOW 0x21A4 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_MID 0x21A5 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_HIGH 0x21A6 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_RESVD 0x21A7 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW 0x21A8 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_MID 0x21A9 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_HIGH 0x21AA +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_RESVD 0x21AB +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_LOW 0x21AC +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_MID 0x21AD +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_HIGH 0x21AE +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_RESVD 0x21AF +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW 0x21B0 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_MID 0x21B1 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_HIGH 0x21B2 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_RESVD 0x21B3 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_LOW 0x21B4 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_MID 0x21B5 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_HIGH 0x21B6 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_RESVD 0x21B7 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW 0x21B8 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_MID 0x21B9 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_HIGH 0x21BA +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_RESVD 0x21BB +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW 0x21BC +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_MID 0x21BD +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_HIGH 0x21BE +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_RESVD 0x21BF +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_LOW 0x21C0 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_MID 0x21C1 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_HIGH 0x21C2 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_RESVD 0x21C3 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_LOW 0x21C4 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_MID 0x21C5 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_HIGH 0x21C6 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_RESVD 0x21C7 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_LOW 0x21C8 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_MID 0x21C9 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_HIGH 0x21CA +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_RESVD 0x21CB +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_LOW 0x21CC +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_MID 0x21CD +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_HIGH 0x21CE +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_RESVD 0x21CF +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_LOW 0x21D0 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_MID 0x21D1 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_HIGH 0x21D2 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_RESVD 0x21D3 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_LOW 0x21D4 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_MID 0x21D5 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_HIGH 0x21D6 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_RESVD 0x21D7 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_LOW 0x21D8 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_MID 0x21D9 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_HIGH 0x21DA +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_RESVD 0x21DB +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_LOW 0x21DC +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_MID 0x21DD +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_HIGH 0x21DE +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_RESVD 0x21DF +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_LOW 0x21E0 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_MID 0x21E1 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_HIGH 0x21E2 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_RESVD 0x21E3 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_53_LOW 0x21E4 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_53_MID 0x21E5 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_53_HIGH 0x21E6 +#define SUNI1x10GEXP_CNTR_MAC_ETHERNET_NUM 51 + +#define SUNI1x10GEXP_REG_IFLX_GLOBAL_CONFIG 0x2200 +#define SUNI1x10GEXP_REG_IFLX_CHANNEL_PROVISION 0x2201 +#define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE 0x2209 +#define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT 0x220A +#define SUNI1x10GEXP_REG_IFLX_INDIR_CHANNEL_ADDRESS 0x220D +#define SUNI1x10GEXP_REG_IFLX_INDIR_LOGICAL_FIFO_LOW_LIMIT_PROVISION 0x220E +#define SUNI1x10GEXP_REG_IFLX_INDIR_LOGICAL_FIFO_HIGH_LIMIT 0x220F +#define SUNI1x10GEXP_REG_IFLX_INDIR_FULL_ALMOST_FULL_STATUS_LIMIT 0x2210 +#define SUNI1x10GEXP_REG_IFLX_INDIR_EMPTY_ALMOST_EMPTY_STATUS_LIMIT 0x2211 + +#define SUNI1x10GEXP_REG_PL4MOS_CONFIG 0x2240 +#define SUNI1x10GEXP_REG_PL4MOS_MASK 0x2241 +#define SUNI1x10GEXP_REG_PL4MOS_FAIRNESS_MASKING 0x2242 +#define SUNI1x10GEXP_REG_PL4MOS_MAXBURST1 0x2243 +#define SUNI1x10GEXP_REG_PL4MOS_MAXBURST2 0x2244 +#define SUNI1x10GEXP_REG_PL4MOS_TRANSFER_SIZE 0x2245 + +#define SUNI1x10GEXP_REG_PL4ODP_CONFIG 0x2280 +#define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK 0x2282 +#define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT 0x2283 +#define SUNI1x10GEXP_REG_PL4ODP_CONFIG_MAX_T 0x2284 + +#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS 0x2300 +#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE 0x2301 +#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK 0x2302 +#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_LIMITS 0x2303 +#define SUNI1x10GEXP_REG_PL4IO_CALENDAR_REPETITIONS 0x2304 +#define SUNI1x10GEXP_REG_PL4IO_CONFIG 0x2305 + +#define SUNI1x10GEXP_REG_TXXG_CONFIG_1 0x3040 +#define SUNI1x10GEXP_REG_TXXG_CONFIG_2 0x3041 +#define SUNI1x10GEXP_REG_TXXG_CONFIG_3 0x3042 +#define SUNI1x10GEXP_REG_TXXG_INTERRUPT 0x3043 +#define SUNI1x10GEXP_REG_TXXG_STATUS 0x3044 +#define SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE 0x3045 +#define SUNI1x10GEXP_REG_TXXG_MIN_FRAME_SIZE 0x3046 +#define SUNI1x10GEXP_REG_TXXG_SA_15_0 0x3047 +#define SUNI1x10GEXP_REG_TXXG_SA_31_16 0x3048 +#define SUNI1x10GEXP_REG_TXXG_SA_47_32 0x3049 +#define SUNI1x10GEXP_REG_TXXG_PAUSE_TIMER 0x304D +#define SUNI1x10GEXP_REG_TXXG_PAUSE_TIMER_INTERVAL 0x304E +#define SUNI1x10GEXP_REG_TXXG_FILTER_ERROR_COUNTER 0x3051 +#define SUNI1x10GEXP_REG_TXXG_PAUSE_QUANTUM_CONFIG 0x3052 + +#define SUNI1x10GEXP_REG_XTEF_CTRL 0x3080 +#define SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS 0x3084 +#define SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE 0x3085 +#define SUNI1x10GEXP_REG_XTEF_VISIBILITY 0x3086 + +#define SUNI1x10GEXP_REG_TXOAM_OAM_CONFIG 0x30C0 +#define SUNI1x10GEXP_REG_TXOAM_MINI_RATE_CONFIG 0x30C1 +#define SUNI1x10GEXP_REG_TXOAM_MINI_GAP_FIFO_CONFIG 0x30C2 +#define SUNI1x10GEXP_REG_TXOAM_P1P2_STATIC_VALUES 0x30C3 +#define SUNI1x10GEXP_REG_TXOAM_P3P4_STATIC_VALUES 0x30C4 +#define SUNI1x10GEXP_REG_TXOAM_P5P6_STATIC_VALUES 0x30C5 +#define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE 0x30C6 +#define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS 0x30C7 +#define SUNI1x10GEXP_REG_TXOAM_INSERT_COUNT_LSB 0x30C8 +#define SUNI1x10GEXP_REG_TXOAM_INSERT_COUNT_MSB 0x30C9 +#define SUNI1x10GEXP_REG_TXOAM_OAM_MINI_COUNT_LSB 0x30CA +#define SUNI1x10GEXP_REG_TXOAM_OAM_MINI_COUNT_MSB 0x30CB +#define SUNI1x10GEXP_REG_TXOAM_P1P2_MINI_MASK 0x30CC +#define SUNI1x10GEXP_REG_TXOAM_P3P4_MINI_MASK 0x30CD +#define SUNI1x10GEXP_REG_TXOAM_P5P6_MINI_MASK 0x30CE +#define SUNI1x10GEXP_REG_TXOAM_COSET 0x30CF +#define SUNI1x10GEXP_REG_TXOAM_EMPTY_FIFO_INS_OP_CNT_LSB 0x30D0 +#define SUNI1x10GEXP_REG_TXOAM_EMPTY_FIFO_INS_OP_CNT_MSB 0x30D1 +#define SUNI1x10GEXP_REG_TXOAM_STATIC_VALUE_MINI_COUNT_LSB 0x30D2 +#define SUNI1x10GEXP_REG_TXOAM_STATIC_VALUE_MINI_COUNT_MSB 0x30D3 + + +#define SUNI1x10GEXP_REG_EFLX_GLOBAL_CONFIG 0x3200 +#define SUNI1x10GEXP_REG_EFLX_ERCU_GLOBAL_STATUS 0x3201 +#define SUNI1x10GEXP_REG_EFLX_INDIR_CHANNEL_ADDRESS 0x3202 +#define SUNI1x10GEXP_REG_EFLX_INDIR_FIFO_LOW_LIMIT 0x3203 +#define SUNI1x10GEXP_REG_EFLX_INDIR_FIFO_HIGH_LIMIT 0x3204 +#define SUNI1x10GEXP_REG_EFLX_INDIR_FULL_ALMOST_FULL_STATUS_AND_LIMIT 0x3205 +#define SUNI1x10GEXP_REG_EFLX_INDIR_EMPTY_ALMOST_EMPTY_STATUS_AND_LIMIT 0x3206 +#define SUNI1x10GEXP_REG_EFLX_INDIR_FIFO_CUT_THROUGH_THRESHOLD 0x3207 +#define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE 0x320C +#define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION 0x320D +#define SUNI1x10GEXP_REG_EFLX_CHANNEL_PROVISION 0x3210 + +#define SUNI1x10GEXP_REG_PL4IDU_CONFIG 0x3280 +#define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK 0x3282 +#define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT 0x3283 + + +/*----------------------------------------*/ +#define SUNI1x10GEXP_REG_MAX_OFFSET 0x3480 + +/******************************************************************************/ +/* -- End register offset definitions -- */ +/******************************************************************************/ + +/******************************************************************************/ +/** SUNI-1x10GE-XP REGISTER BIT MASKS **/ +/******************************************************************************/ + +#define SUNI1x10GEXP_BITMSK_BITS_1 0x00001 +#define SUNI1x10GEXP_BITMSK_BITS_2 0x00003 +#define SUNI1x10GEXP_BITMSK_BITS_3 0x00007 +#define SUNI1x10GEXP_BITMSK_BITS_4 0x0000f +#define SUNI1x10GEXP_BITMSK_BITS_5 0x0001f +#define SUNI1x10GEXP_BITMSK_BITS_6 0x0003f +#define SUNI1x10GEXP_BITMSK_BITS_7 0x0007f +#define SUNI1x10GEXP_BITMSK_BITS_8 0x000ff +#define SUNI1x10GEXP_BITMSK_BITS_9 0x001ff +#define SUNI1x10GEXP_BITMSK_BITS_10 0x003ff +#define SUNI1x10GEXP_BITMSK_BITS_11 0x007ff +#define SUNI1x10GEXP_BITMSK_BITS_12 0x00fff +#define SUNI1x10GEXP_BITMSK_BITS_13 0x01fff +#define SUNI1x10GEXP_BITMSK_BITS_14 0x03fff +#define SUNI1x10GEXP_BITMSK_BITS_15 0x07fff +#define SUNI1x10GEXP_BITMSK_BITS_16 0x0ffff + +#define mSUNI1x10GEXP_CLR_MSBITS_1(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_15) +#define mSUNI1x10GEXP_CLR_MSBITS_2(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_14) +#define mSUNI1x10GEXP_CLR_MSBITS_3(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_13) +#define mSUNI1x10GEXP_CLR_MSBITS_4(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_12) +#define mSUNI1x10GEXP_CLR_MSBITS_5(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_11) +#define mSUNI1x10GEXP_CLR_MSBITS_6(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_10) +#define mSUNI1x10GEXP_CLR_MSBITS_7(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_9) +#define mSUNI1x10GEXP_CLR_MSBITS_8(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_8) +#define mSUNI1x10GEXP_CLR_MSBITS_9(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_7) +#define mSUNI1x10GEXP_CLR_MSBITS_10(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_6) +#define mSUNI1x10GEXP_CLR_MSBITS_11(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_5) +#define mSUNI1x10GEXP_CLR_MSBITS_12(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_4) +#define mSUNI1x10GEXP_CLR_MSBITS_13(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_3) +#define mSUNI1x10GEXP_CLR_MSBITS_14(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_2) +#define mSUNI1x10GEXP_CLR_MSBITS_15(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_1) + +#define mSUNI1x10GEXP_GET_BIT(val, bitMsk) (((val)&(bitMsk)) ? 1:0) + + + +/*---------------------------------------------------------------------------- + * Register 0x0001: S/UNI-1x10GE-XP Product Revision + * Bit 3-0 REVISION + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_REVISION 0x000F + +/*---------------------------------------------------------------------------- + * Register 0x0002: S/UNI-1x10GE-XP Configuration and Reset Control + * Bit 2 XAUI_ARESETB + * Bit 1 PL4_ARESETB + * Bit 0 DRESETB + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_XAUI_ARESET 0x0004 +#define SUNI1x10GEXP_BITMSK_PL4_ARESET 0x0002 +#define SUNI1x10GEXP_BITMSK_DRESETB 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x0003: S/UNI-1x10GE-XP Loop Back and Miscellaneous Control + * Bit 11 PL4IO_OUTCLKSEL + * Bit 9 SYSPCSLB + * Bit 8 LINEPCSLB + * Bit 7 MSTAT_BYPASS + * Bit 6 RXXG_BYPASS + * Bit 5 TXXG_BYPASS + * Bit 4 SOP_PAD_EN + * Bit 1 LOS_INV + * Bit 0 OVERRIDE_LOS + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4IO_OUTCLKSEL 0x0800 +#define SUNI1x10GEXP_BITMSK_SYSPCSLB 0x0200 +#define SUNI1x10GEXP_BITMSK_LINEPCSLB 0x0100 +#define SUNI1x10GEXP_BITMSK_MSTAT_BYPASS 0x0080 +#define SUNI1x10GEXP_BITMSK_RXXG_BYPASS 0x0040 +#define SUNI1x10GEXP_BITMSK_TXXG_BYPASS 0x0020 +#define SUNI1x10GEXP_BITMSK_SOP_PAD_EN 0x0010 +#define SUNI1x10GEXP_BITMSK_LOS_INV 0x0002 +#define SUNI1x10GEXP_BITMSK_OVERRIDE_LOS 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x0004: S/UNI-1x10GE-XP Device Status + * Bit 9 TOP_SXRA_EXPIRED + * Bit 8 TOP_MDIO_BUSY + * Bit 7 TOP_DTRB + * Bit 6 TOP_EXPIRED + * Bit 5 TOP_PAUSED + * Bit 4 TOP_PL4_ID_DOOL + * Bit 3 TOP_PL4_IS_DOOL + * Bit 2 TOP_PL4_ID_ROOL + * Bit 1 TOP_PL4_IS_ROOL + * Bit 0 TOP_PL4_OUT_ROOL + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED 0x0200 +#define SUNI1x10GEXP_BITMSK_TOP_MDIO_BUSY 0x0100 +#define SUNI1x10GEXP_BITMSK_TOP_DTRB 0x0080 +#define SUNI1x10GEXP_BITMSK_TOP_EXPIRED 0x0040 +#define SUNI1x10GEXP_BITMSK_TOP_PAUSED 0x0020 +#define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL 0x0010 +#define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL 0x0008 +#define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL 0x0004 +#define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL 0x0002 +#define SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x0005: Global Performance Update and Clock Monitors + * Bit 15 TIP + * Bit 8 XAUI_REF_CLKA + * Bit 7 RXLANE3CLKA + * Bit 6 RXLANE2CLKA + * Bit 5 RXLANE1CLKA + * Bit 4 RXLANE0CLKA + * Bit 3 CSUCLKA + * Bit 2 TDCLKA + * Bit 1 RSCLKA + * Bit 0 RDCLKA + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TIP 0x8000 +#define SUNI1x10GEXP_BITMSK_XAUI_REF_CLKA 0x0100 +#define SUNI1x10GEXP_BITMSK_RXLANE3CLKA 0x0080 +#define SUNI1x10GEXP_BITMSK_RXLANE2CLKA 0x0040 +#define SUNI1x10GEXP_BITMSK_RXLANE1CLKA 0x0020 +#define SUNI1x10GEXP_BITMSK_RXLANE0CLKA 0x0010 +#define SUNI1x10GEXP_BITMSK_CSUCLKA 0x0008 +#define SUNI1x10GEXP_BITMSK_TDCLKA 0x0004 +#define SUNI1x10GEXP_BITMSK_RSCLKA 0x0002 +#define SUNI1x10GEXP_BITMSK_RDCLKA 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x0006: MDIO Command + * Bit 4 MDIO_RDINC + * Bit 3 MDIO_RSTAT + * Bit 2 MDIO_LCTLD + * Bit 1 MDIO_LCTLA + * Bit 0 MDIO_SPRE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_MDIO_RDINC 0x0010 +#define SUNI1x10GEXP_BITMSK_MDIO_RSTAT 0x0008 +#define SUNI1x10GEXP_BITMSK_MDIO_LCTLD 0x0004 +#define SUNI1x10GEXP_BITMSK_MDIO_LCTLA 0x0002 +#define SUNI1x10GEXP_BITMSK_MDIO_SPRE 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x0007: MDIO Interrupt Enable + * Bit 0 MDIO_BUSY_EN + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_MDIO_BUSY_EN 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x0008: MDIO Interrupt Status + * Bit 0 MDIO_BUSYI + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_MDIO_BUSYI 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x0009: MMD PHY Address + * Bit 12-8 MDIO_DEVADR + * Bit 4-0 MDIO_PRTADR + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_MDIO_DEVADR 0x1F00 +#define SUNI1x10GEXP_BITOFF_MDIO_DEVADR 8 +#define SUNI1x10GEXP_BITMSK_MDIO_PRTADR 0x001F +#define SUNI1x10GEXP_BITOFF_MDIO_PRTADR 0 + +/*---------------------------------------------------------------------------- + * Register 0x000C: OAM Interface Control + * Bit 6 MDO_OD_ENB + * Bit 5 MDI_INV + * Bit 4 MDI_SEL + * Bit 3 RXOAMEN + * Bit 2 RXOAMCLKEN + * Bit 1 TXOAMEN + * Bit 0 TXOAMCLKEN + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_MDO_OD_ENB 0x0040 +#define SUNI1x10GEXP_BITMSK_MDI_INV 0x0020 +#define SUNI1x10GEXP_BITMSK_MDI_SEL 0x0010 +#define SUNI1x10GEXP_BITMSK_RXOAMEN 0x0008 +#define SUNI1x10GEXP_BITMSK_RXOAMCLKEN 0x0004 +#define SUNI1x10GEXP_BITMSK_TXOAMEN 0x0002 +#define SUNI1x10GEXP_BITMSK_TXOAMCLKEN 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x000D: S/UNI-1x10GE-XP Master Interrupt Status + * Bit 15 TOP_PL4IO_INT + * Bit 14 TOP_IRAM_INT + * Bit 13 TOP_ERAM_INT + * Bit 12 TOP_XAUI_INT + * Bit 11 TOP_MSTAT_INT + * Bit 10 TOP_RXXG_INT + * Bit 9 TOP_TXXG_INT + * Bit 8 TOP_XRF_INT + * Bit 7 TOP_XTEF_INT + * Bit 6 TOP_MDIO_BUSY_INT + * Bit 5 TOP_RXOAM_INT + * Bit 4 TOP_TXOAM_INT + * Bit 3 TOP_IFLX_INT + * Bit 2 TOP_EFLX_INT + * Bit 1 TOP_PL4ODP_INT + * Bit 0 TOP_PL4IDU_INT + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TOP_PL4IO_INT 0x8000 +#define SUNI1x10GEXP_BITMSK_TOP_IRAM_INT 0x4000 +#define SUNI1x10GEXP_BITMSK_TOP_ERAM_INT 0x2000 +#define SUNI1x10GEXP_BITMSK_TOP_XAUI_INT 0x1000 +#define SUNI1x10GEXP_BITMSK_TOP_MSTAT_INT 0x0800 +#define SUNI1x10GEXP_BITMSK_TOP_RXXG_INT 0x0400 +#define SUNI1x10GEXP_BITMSK_TOP_TXXG_INT 0x0200 +#define SUNI1x10GEXP_BITMSK_TOP_XRF_INT 0x0100 +#define SUNI1x10GEXP_BITMSK_TOP_XTEF_INT 0x0080 +#define SUNI1x10GEXP_BITMSK_TOP_MDIO_BUSY_INT 0x0040 +#define SUNI1x10GEXP_BITMSK_TOP_RXOAM_INT 0x0020 +#define SUNI1x10GEXP_BITMSK_TOP_TXOAM_INT 0x0010 +#define SUNI1x10GEXP_BITMSK_TOP_IFLX_INT 0x0008 +#define SUNI1x10GEXP_BITMSK_TOP_EFLX_INT 0x0004 +#define SUNI1x10GEXP_BITMSK_TOP_PL4ODP_INT 0x0002 +#define SUNI1x10GEXP_BITMSK_TOP_PL4IDU_INT 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x000E:PM3393 Global interrupt enable + * Bit 15 TOP_INTE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TOP_INTE 0x8000 + +/*---------------------------------------------------------------------------- + * Register 0x0010: XTEF Miscellaneous Control + * Bit 7 RF_VAL + * Bit 6 RF_OVERRIDE + * Bit 5 LF_VAL + * Bit 4 LF_OVERRIDE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RF_VAL 0x0080 +#define SUNI1x10GEXP_BITMSK_RF_OVERRIDE 0x0040 +#define SUNI1x10GEXP_BITMSK_LF_VAL 0x0020 +#define SUNI1x10GEXP_BITMSK_LF_OVERRIDE 0x0010 +#define SUNI1x10GEXP_BITMSK_LFRF_OVERRIDE_VAL 0x00F0 + +/*---------------------------------------------------------------------------- + * Register 0x0011: XRF Miscellaneous Control + * Bit 6-4 EN_IDLE_REP + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_EN_IDLE_REP 0x0070 + +/*---------------------------------------------------------------------------- + * Register 0x0100: SERDES 3125 Configuration Register 1 + * Bit 10 RXEQB_3 + * Bit 8 RXEQB_2 + * Bit 6 RXEQB_1 + * Bit 4 RXEQB_0 + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXEQB 0x0FF0 +#define SUNI1x10GEXP_BITOFF_RXEQB_3 10 +#define SUNI1x10GEXP_BITOFF_RXEQB_2 8 +#define SUNI1x10GEXP_BITOFF_RXEQB_1 6 +#define SUNI1x10GEXP_BITOFF_RXEQB_0 4 + +/*---------------------------------------------------------------------------- + * Register 0x0101: SERDES 3125 Configuration Register 2 + * Bit 12 YSEL + * Bit 7 PRE_EMPH_3 + * Bit 6 PRE_EMPH_2 + * Bit 5 PRE_EMPH_1 + * Bit 4 PRE_EMPH_0 + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_YSEL 0x1000 +#define SUNI1x10GEXP_BITMSK_PRE_EMPH 0x00F0 +#define SUNI1x10GEXP_BITMSK_PRE_EMPH_3 0x0080 +#define SUNI1x10GEXP_BITMSK_PRE_EMPH_2 0x0040 +#define SUNI1x10GEXP_BITMSK_PRE_EMPH_1 0x0020 +#define SUNI1x10GEXP_BITMSK_PRE_EMPH_0 0x0010 + +/*---------------------------------------------------------------------------- + * Register 0x0102: SERDES 3125 Interrupt Enable Register + * Bit 3 LASIE + * Bit 2 SPLL_RAE + * Bit 1 MPLL_RAE + * Bit 0 PLL_LOCKE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_LASIE 0x0008 +#define SUNI1x10GEXP_BITMSK_SPLL_RAE 0x0004 +#define SUNI1x10GEXP_BITMSK_MPLL_RAE 0x0002 +#define SUNI1x10GEXP_BITMSK_PLL_LOCKE 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x0103: SERDES 3125 Interrupt Visibility Register + * Bit 3 LASIV + * Bit 2 SPLL_RAV + * Bit 1 MPLL_RAV + * Bit 0 PLL_LOCKV + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_LASIV 0x0008 +#define SUNI1x10GEXP_BITMSK_SPLL_RAV 0x0004 +#define SUNI1x10GEXP_BITMSK_MPLL_RAV 0x0002 +#define SUNI1x10GEXP_BITMSK_PLL_LOCKV 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x0104: SERDES 3125 Interrupt Status Register + * Bit 3 LASII + * Bit 2 SPLL_RAI + * Bit 1 MPLL_RAI + * Bit 0 PLL_LOCKI + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_LASII 0x0008 +#define SUNI1x10GEXP_BITMSK_SPLL_RAI 0x0004 +#define SUNI1x10GEXP_BITMSK_MPLL_RAI 0x0002 +#define SUNI1x10GEXP_BITMSK_PLL_LOCKI 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x0107: SERDES 3125 Test Configuration + * Bit 12 DUALTX + * Bit 10 HC_1 + * Bit 9 HC_0 + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_DUALTX 0x1000 +#define SUNI1x10GEXP_BITMSK_HC 0x0600 +#define SUNI1x10GEXP_BITOFF_HC_0 9 + +/*---------------------------------------------------------------------------- + * Register 0x2040: RXXG Configuration 1 + * Bit 15 RXXG_RXEN + * Bit 14 RXXG_ROCF + * Bit 13 RXXG_PAD_STRIP + * Bit 10 RXXG_PUREP + * Bit 9 RXXG_LONGP + * Bit 8 RXXG_PARF + * Bit 7 RXXG_FLCHK + * Bit 5 RXXG_PASS_CTRL + * Bit 3 RXXG_CRC_STRIP + * Bit 2-0 RXXG_MIFG + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXXG_RXEN 0x8000 +#define SUNI1x10GEXP_BITMSK_RXXG_ROCF 0x4000 +#define SUNI1x10GEXP_BITMSK_RXXG_PAD_STRIP 0x2000 +#define SUNI1x10GEXP_BITMSK_RXXG_PUREP 0x0400 +#define SUNI1x10GEXP_BITMSK_RXXG_LONGP 0x0200 +#define SUNI1x10GEXP_BITMSK_RXXG_PARF 0x0100 +#define SUNI1x10GEXP_BITMSK_RXXG_FLCHK 0x0080 +#define SUNI1x10GEXP_BITMSK_RXXG_PASS_CTRL 0x0020 +#define SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP 0x0008 + +/*---------------------------------------------------------------------------- + * Register 0x02041: RXXG Configuration 2 + * Bit 7-0 RXXG_HDRSIZE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXXG_HDRSIZE 0x00FF + +/*---------------------------------------------------------------------------- + * Register 0x2042: RXXG Configuration 3 + * Bit 15 RXXG_MIN_LERRE + * Bit 14 RXXG_MAX_LERRE + * Bit 12 RXXG_LINE_ERRE + * Bit 10 RXXG_RX_OVRE + * Bit 9 RXXG_ADR_FILTERE + * Bit 8 RXXG_ERR_FILTERE + * Bit 5 RXXG_PRMB_ERRE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXXG_MIN_LERRE 0x8000 +#define SUNI1x10GEXP_BITMSK_RXXG_MAX_LERRE 0x4000 +#define SUNI1x10GEXP_BITMSK_RXXG_LINE_ERRE 0x1000 +#define SUNI1x10GEXP_BITMSK_RXXG_RX_OVRE 0x0400 +#define SUNI1x10GEXP_BITMSK_RXXG_ADR_FILTERE 0x0200 +#define SUNI1x10GEXP_BITMSK_RXXG_ERR_FILTERRE 0x0100 +#define SUNI1x10GEXP_BITMSK_RXXG_PRMB_ERRE 0x0020 + +/*---------------------------------------------------------------------------- + * Register 0x2043: RXXG Interrupt + * Bit 15 RXXG_MIN_LERRI + * Bit 14 RXXG_MAX_LERRI + * Bit 12 RXXG_LINE_ERRI + * Bit 10 RXXG_RX_OVRI + * Bit 9 RXXG_ADR_FILTERI + * Bit 8 RXXG_ERR_FILTERI + * Bit 5 RXXG_PRMB_ERRE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXXG_MIN_LERRI 0x8000 +#define SUNI1x10GEXP_BITMSK_RXXG_MAX_LERRI 0x4000 +#define SUNI1x10GEXP_BITMSK_RXXG_LINE_ERRI 0x1000 +#define SUNI1x10GEXP_BITMSK_RXXG_RX_OVRI 0x0400 +#define SUNI1x10GEXP_BITMSK_RXXG_ADR_FILTERI 0x0200 +#define SUNI1x10GEXP_BITMSK_RXXG_ERR_FILTERI 0x0100 +#define SUNI1x10GEXP_BITMSK_RXXG_PRMB_ERRE 0x0020 + +/*---------------------------------------------------------------------------- + * Register 0x2049: RXXG Receive FIFO Threshold + * Bit 2-0 RXXG_CUT_THRU + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXXG_CUT_THRU 0x0007 +#define SUNI1x10GEXP_BITOFF_RXXG_CUT_THRU 0 + +/*---------------------------------------------------------------------------- + * Register 0x2062H - 0x2069: RXXG Exact Match VID + * Bit 11-0 RXXG_VID_MATCH + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXXG_VID_MATCH 0x0FFF +#define SUNI1x10GEXP_BITOFF_RXXG_VID_MATCH 0 + +/*---------------------------------------------------------------------------- + * Register 0x206EH - 0x206F: RXXG Address Filter Control + * Bit 3 RXXG_FORWARD_ENABLE + * Bit 2 RXXG_VLAN_ENABLE + * Bit 1 RXXG_SRC_ADDR + * Bit 0 RXXG_MATCH_ENABLE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXXG_FORWARD_ENABLE 0x0008 +#define SUNI1x10GEXP_BITMSK_RXXG_VLAN_ENABLE 0x0004 +#define SUNI1x10GEXP_BITMSK_RXXG_SRC_ADDR 0x0002 +#define SUNI1x10GEXP_BITMSK_RXXG_MATCH_ENABLE 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x2070: RXXG Address Filter Control 2 + * Bit 1 RXXG_PMODE + * Bit 0 RXXG_MHASH_EN + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXXG_PMODE 0x0002 +#define SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x2081: XRF Control Register 2 + * Bit 6 EN_PKT_GEN + * Bit 4-2 PATT + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_EN_PKT_GEN 0x0040 +#define SUNI1x10GEXP_BITMSK_PATT 0x001C +#define SUNI1x10GEXP_BITOFF_PATT 2 + +/*---------------------------------------------------------------------------- + * Register 0x2088: XRF Interrupt Enable + * Bit 12-9 LANE_HICERE + * Bit 8-5 HS_SD_LANEE + * Bit 4 ALIGN_STATUS_ERRE + * Bit 3-0 LANE_SYNC_STAT_ERRE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_LANE_HICERE 0x1E00 +#define SUNI1x10GEXP_BITOFF_LANE_HICERE 9 +#define SUNI1x10GEXP_BITMSK_HS_SD_LANEE 0x01E0 +#define SUNI1x10GEXP_BITOFF_HS_SD_LANEE 5 +#define SUNI1x10GEXP_BITMSK_ALIGN_STATUS_ERRE 0x0010 +#define SUNI1x10GEXP_BITMSK_LANE_SYNC_STAT_ERRE 0x000F +#define SUNI1x10GEXP_BITOFF_LANE_SYNC_STAT_ERRE 0 + +/*---------------------------------------------------------------------------- + * Register 0x2089: XRF Interrupt Status + * Bit 12-9 LANE_HICERI + * Bit 8-5 HS_SD_LANEI + * Bit 4 ALIGN_STATUS_ERRI + * Bit 3-0 LANE_SYNC_STAT_ERRI + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_LANE_HICERI 0x1E00 +#define SUNI1x10GEXP_BITOFF_LANE_HICERI 9 +#define SUNI1x10GEXP_BITMSK_HS_SD_LANEI 0x01E0 +#define SUNI1x10GEXP_BITOFF_HS_SD_LANEI 5 +#define SUNI1x10GEXP_BITMSK_ALIGN_STATUS_ERRI 0x0010 +#define SUNI1x10GEXP_BITMSK_LANE_SYNC_STAT_ERRI 0x000F +#define SUNI1x10GEXP_BITOFF_LANE_SYNC_STAT_ERRI 0 + +/*---------------------------------------------------------------------------- + * Register 0x208A: XRF Error Status + * Bit 8-5 HS_SD_LANE + * Bit 4 ALIGN_STATUS_ERR + * Bit 3-0 LANE_SYNC_STAT_ERR + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_HS_SD_LANE3 0x0100 +#define SUNI1x10GEXP_BITMSK_HS_SD_LANE2 0x0080 +#define SUNI1x10GEXP_BITMSK_HS_SD_LANE1 0x0040 +#define SUNI1x10GEXP_BITMSK_HS_SD_LANE0 0x0020 +#define SUNI1x10GEXP_BITMSK_ALIGN_STATUS_ERR 0x0010 +#define SUNI1x10GEXP_BITMSK_LANE3_SYNC_STAT_ERR 0x0008 +#define SUNI1x10GEXP_BITMSK_LANE2_SYNC_STAT_ERR 0x0004 +#define SUNI1x10GEXP_BITMSK_LANE1_SYNC_STAT_ERR 0x0002 +#define SUNI1x10GEXP_BITMSK_LANE0_SYNC_STAT_ERR 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x208B: XRF Diagnostic Interrupt Enable + * Bit 7-4 LANE_OVERRUNE + * Bit 3-0 LANE_UNDERRUNE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_LANE_OVERRUNE 0x00F0 +#define SUNI1x10GEXP_BITOFF_LANE_OVERRUNE 4 +#define SUNI1x10GEXP_BITMSK_LANE_UNDERRUNE 0x000F +#define SUNI1x10GEXP_BITOFF_LANE_UNDERRUNE 0 + +/*---------------------------------------------------------------------------- + * Register 0x208C: XRF Diagnostic Interrupt Status + * Bit 7-4 LANE_OVERRUNI + * Bit 3-0 LANE_UNDERRUNI + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_LANE_OVERRUNI 0x00F0 +#define SUNI1x10GEXP_BITOFF_LANE_OVERRUNI 4 +#define SUNI1x10GEXP_BITMSK_LANE_UNDERRUNI 0x000F +#define SUNI1x10GEXP_BITOFF_LANE_UNDERRUNI 0 + +/*---------------------------------------------------------------------------- + * Register 0x20C0: RXOAM Configuration + * Bit 15 RXOAM_BUSY + * Bit 14-12 RXOAM_F2_SEL + * Bit 10-8 RXOAM_F1_SEL + * Bit 7-6 RXOAM_FILTER_CTRL + * Bit 5-0 RXOAM_PX_EN + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXOAM_BUSY 0x8000 +#define SUNI1x10GEXP_BITMSK_RXOAM_F2_SEL 0x7000 +#define SUNI1x10GEXP_BITOFF_RXOAM_F2_SEL 12 +#define SUNI1x10GEXP_BITMSK_RXOAM_F1_SEL 0x0700 +#define SUNI1x10GEXP_BITOFF_RXOAM_F1_SEL 8 +#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_CTRL 0x00C0 +#define SUNI1x10GEXP_BITOFF_RXOAM_FILTER_CTRL 6 +#define SUNI1x10GEXP_BITMSK_RXOAM_PX_EN 0x003F +#define SUNI1x10GEXP_BITOFF_RXOAM_PX_EN 0 + +/*---------------------------------------------------------------------------- + * Register 0x20C1,0x20C2: RXOAM Filter Configuration + * Bit 15-8 RXOAM_FX_MASK + * Bit 7-0 RXOAM_FX_VAL + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXOAM_FX_MASK 0xFF00 +#define SUNI1x10GEXP_BITOFF_RXOAM_FX_MASK 8 +#define SUNI1x10GEXP_BITMSK_RXOAM_FX_VAL 0x00FF +#define SUNI1x10GEXP_BITOFF_RXOAM_FX_VAl 0 + +/*---------------------------------------------------------------------------- + * Register 0x20C3: RXOAM Configuration Register 2 + * Bit 13 RXOAM_REC_BYTE_VAL + * Bit 11-10 RXOAM_BYPASS_MODE + * Bit 5-0 RXOAM_PX_CLEAR + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXOAM_REC_BYTE_VAL 0x2000 +#define SUNI1x10GEXP_BITMSK_RXOAM_BYPASS_MODE 0x0C00 +#define SUNI1x10GEXP_BITOFF_RXOAM_BYPASS_MODE 10 +#define SUNI1x10GEXP_BITMSK_RXOAM_PX_CLEAR 0x003F +#define SUNI1x10GEXP_BITOFF_RXOAM_PX_CLEAR 0 + +/*---------------------------------------------------------------------------- + * Register 0x20C4: RXOAM HEC Configuration + * Bit 15-8 RXOAM_COSET + * Bit 2 RXOAM_HEC_ERR_PKT + * Bit 0 RXOAM_HEC_EN + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXOAM_COSET 0xFF00 +#define SUNI1x10GEXP_BITOFF_RXOAM_COSET 8 +#define SUNI1x10GEXP_BITMSK_RXOAM_HEC_ERR_PKT 0x0004 +#define SUNI1x10GEXP_BITMSK_RXOAM_HEC_EN 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x20C7: RXOAM Interrupt Enable + * Bit 10 RXOAM_FILTER_THRSHE + * Bit 9 RXOAM_OAM_ERRE + * Bit 8 RXOAM_HECE_THRSHE + * Bit 7 RXOAM_SOPE + * Bit 6 RXOAM_RFE + * Bit 5 RXOAM_LFE + * Bit 4 RXOAM_DV_ERRE + * Bit 3 RXOAM_DATA_INVALIDE + * Bit 2 RXOAM_FILTER_DROPE + * Bit 1 RXOAM_HECE + * Bit 0 RXOAM_OFLE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_THRSHE 0x0400 +#define SUNI1x10GEXP_BITMSK_RXOAM_OAM_ERRE 0x0200 +#define SUNI1x10GEXP_BITMSK_RXOAM_HECE_THRSHE 0x0100 +#define SUNI1x10GEXP_BITMSK_RXOAM_SOPE 0x0080 +#define SUNI1x10GEXP_BITMSK_RXOAM_RFE 0x0040 +#define SUNI1x10GEXP_BITMSK_RXOAM_LFE 0x0020 +#define SUNI1x10GEXP_BITMSK_RXOAM_DV_ERRE 0x0010 +#define SUNI1x10GEXP_BITMSK_RXOAM_DATA_INVALIDE 0x0008 +#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_DROPE 0x0004 +#define SUNI1x10GEXP_BITMSK_RXOAM_HECE 0x0002 +#define SUNI1x10GEXP_BITMSK_RXOAM_OFLE 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x20C8: RXOAM Interrupt Status + * Bit 10 RXOAM_FILTER_THRSHI + * Bit 9 RXOAM_OAM_ERRI + * Bit 8 RXOAM_HECE_THRSHI + * Bit 7 RXOAM_SOPI + * Bit 6 RXOAM_RFI + * Bit 5 RXOAM_LFI + * Bit 4 RXOAM_DV_ERRI + * Bit 3 RXOAM_DATA_INVALIDI + * Bit 2 RXOAM_FILTER_DROPI + * Bit 1 RXOAM_HECI + * Bit 0 RXOAM_OFLI + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_THRSHI 0x0400 +#define SUNI1x10GEXP_BITMSK_RXOAM_OAM_ERRI 0x0200 +#define SUNI1x10GEXP_BITMSK_RXOAM_HECE_THRSHI 0x0100 +#define SUNI1x10GEXP_BITMSK_RXOAM_SOPI 0x0080 +#define SUNI1x10GEXP_BITMSK_RXOAM_RFI 0x0040 +#define SUNI1x10GEXP_BITMSK_RXOAM_LFI 0x0020 +#define SUNI1x10GEXP_BITMSK_RXOAM_DV_ERRI 0x0010 +#define SUNI1x10GEXP_BITMSK_RXOAM_DATA_INVALIDI 0x0008 +#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_DROPI 0x0004 +#define SUNI1x10GEXP_BITMSK_RXOAM_HECI 0x0002 +#define SUNI1x10GEXP_BITMSK_RXOAM_OFLI 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x20C9: RXOAM Status + * Bit 10 RXOAM_FILTER_THRSHV + * Bit 8 RXOAM_HECE_THRSHV + * Bit 6 RXOAM_RFV + * Bit 5 RXOAM_LFV + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_THRSHV 0x0400 +#define SUNI1x10GEXP_BITMSK_RXOAM_HECE_THRSHV 0x0100 +#define SUNI1x10GEXP_BITMSK_RXOAM_RFV 0x0040 +#define SUNI1x10GEXP_BITMSK_RXOAM_LFV 0x0020 + +/*---------------------------------------------------------------------------- + * Register 0x2100: MSTAT Control + * Bit 2 MSTAT_WRITE + * Bit 1 MSTAT_CLEAR + * Bit 0 MSTAT_SNAP + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_MSTAT_WRITE 0x0004 +#define SUNI1x10GEXP_BITMSK_MSTAT_CLEAR 0x0002 +#define SUNI1x10GEXP_BITMSK_MSTAT_SNAP 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x2109: MSTAT Counter Write Address + * Bit 5-0 MSTAT_WRITE_ADDRESS + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_MSTAT_WRITE_ADDRESS 0x003F +#define SUNI1x10GEXP_BITOFF_MSTAT_WRITE_ADDRESS 0 + +/*---------------------------------------------------------------------------- + * Register 0x2200: IFLX Global Configuration Register + * Bit 15 IFLX_IRCU_ENABLE + * Bit 14 IFLX_IDSWT_ENABLE + * Bit 13-0 IFLX_IFD_CNT + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_IFLX_IRCU_ENABLE 0x8000 +#define SUNI1x10GEXP_BITMSK_IFLX_IDSWT_ENABLE 0x4000 +#define SUNI1x10GEXP_BITMSK_IFLX_IFD_CNT 0x3FFF +#define SUNI1x10GEXP_BITOFF_IFLX_IFD_CNT 0 + +/*---------------------------------------------------------------------------- + * Register 0x2209: IFLX FIFO Overflow Enable + * Bit 0 IFLX_OVFE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_IFLX_OVFE 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x220A: IFLX FIFO Overflow Interrupt + * Bit 0 IFLX_OVFI + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_IFLX_OVFI 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x220D: IFLX Indirect Channel Address + * Bit 15 IFLX_BUSY + * Bit 14 IFLX_RWB + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_IFLX_BUSY 0x8000 +#define SUNI1x10GEXP_BITMSK_IFLX_RWB 0x4000 + +/*---------------------------------------------------------------------------- + * Register 0x220E: IFLX Indirect Logical FIFO Low Limit & Provision + * Bit 9-0 IFLX_LOLIM + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_IFLX_LOLIM 0x03FF +#define SUNI1x10GEXP_BITOFF_IFLX_LOLIM 0 + +/*---------------------------------------------------------------------------- + * Register 0x220F: IFLX Indirect Logical FIFO High Limit + * Bit 9-0 IFLX_HILIM + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_IFLX_HILIM 0x03FF +#define SUNI1x10GEXP_BITOFF_IFLX_HILIM 0 + +/*---------------------------------------------------------------------------- + * Register 0x2210: IFLX Indirect Full/Almost Full Status & Limit + * Bit 15 IFLX_FULL + * Bit 14 IFLX_AFULL + * Bit 13-0 IFLX_AFTH + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_IFLX_FULL 0x8000 +#define SUNI1x10GEXP_BITMSK_IFLX_AFULL 0x4000 +#define SUNI1x10GEXP_BITMSK_IFLX_AFTH 0x3FFF +#define SUNI1x10GEXP_BITOFF_IFLX_AFTH 0 + +/*---------------------------------------------------------------------------- + * Register 0x2211: IFLX Indirect Empty/Almost Empty Status & Limit + * Bit 15 IFLX_EMPTY + * Bit 14 IFLX_AEMPTY + * Bit 13-0 IFLX_AETH + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_IFLX_EMPTY 0x8000 +#define SUNI1x10GEXP_BITMSK_IFLX_AEMPTY 0x4000 +#define SUNI1x10GEXP_BITMSK_IFLX_AETH 0x3FFF +#define SUNI1x10GEXP_BITOFF_IFLX_AETH 0 + +/*---------------------------------------------------------------------------- + * Register 0x2240: PL4MOS Configuration Register + * Bit 3 PL4MOS_RE_INIT + * Bit 2 PL4MOS_EN + * Bit 1 PL4MOS_NO_STATUS + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4MOS_RE_INIT 0x0008 +#define SUNI1x10GEXP_BITMSK_PL4MOS_EN 0x0004 +#define SUNI1x10GEXP_BITMSK_PL4MOS_NO_STATUS 0x0002 + +/*---------------------------------------------------------------------------- + * Register 0x2243: PL4MOS MaxBurst1 Register + * Bit 11-0 PL4MOS_MAX_BURST1 + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4MOS_MAX_BURST1 0x0FFF +#define SUNI1x10GEXP_BITOFF_PL4MOS_MAX_BURST1 0 + +/*---------------------------------------------------------------------------- + * Register 0x2244: PL4MOS MaxBurst2 Register + * Bit 11-0 PL4MOS_MAX_BURST2 + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4MOS_MAX_BURST2 0x0FFF +#define SUNI1x10GEXP_BITOFF_PL4MOS_MAX_BURST2 0 + +/*---------------------------------------------------------------------------- + * Register 0x2245: PL4MOS Transfer Size Register + * Bit 7-0 PL4MOS_MAX_TRANSFER + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4MOS_MAX_TRANSFER 0x00FF +#define SUNI1x10GEXP_BITOFF_PL4MOS_MAX_TRANSFER 0 + +/*---------------------------------------------------------------------------- + * Register 0x2280: PL4ODP Configuration + * Bit 15-12 PL4ODP_REPEAT_T + * Bit 8 PL4ODP_SOP_RULE + * Bit 1 PL4ODP_EN_PORTS + * Bit 0 PL4ODP_EN_DFWD + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4ODP_REPEAT_T 0xF000 +#define SUNI1x10GEXP_BITOFF_PL4ODP_REPEAT_T 12 +#define SUNI1x10GEXP_BITMSK_PL4ODP_SOP_RULE 0x0100 +#define SUNI1x10GEXP_BITMSK_PL4ODP_EN_PORTS 0x0002 +#define SUNI1x10GEXP_BITMSK_PL4ODP_EN_DFWD 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x2282: PL4ODP Interrupt Mask + * Bit 0 PL4ODP_OUT_DISE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4ODP_OUT_DISE 0x0001 + + + +#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_EOPEOBE 0x0080 +#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_ERREOPE 0x0040 +#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MEOPE 0x0008 +#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MSOPE 0x0004 +#define SUNI1x10GEXP_BITMSK_PL4ODP_ES_OVRE 0x0002 + + +/*---------------------------------------------------------------------------- + * Register 0x2283: PL4ODP Interrupt + * Bit 0 PL4ODP_OUT_DISI + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4ODP_OUT_DISI 0x0001 + + + +#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_EOPEOBI 0x0080 +#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_ERREOPI 0x0040 +#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MEOPI 0x0008 +#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MSOPI 0x0004 +#define SUNI1x10GEXP_BITMSK_PL4ODP_ES_OVRI 0x0002 + +/*---------------------------------------------------------------------------- + * Register 0x2300: PL4IO Lock Detect Status + * Bit 15 PL4IO_OUT_ROOLV + * Bit 12 PL4IO_IS_ROOLV + * Bit 11 PL4IO_DIP2_ERRV + * Bit 8 PL4IO_ID_ROOLV + * Bit 4 PL4IO_IS_DOOLV + * Bit 0 PL4IO_ID_DOOLV + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4IO_OUT_ROOLV 0x8000 +#define SUNI1x10GEXP_BITMSK_PL4IO_IS_ROOLV 0x1000 +#define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERRV 0x0800 +#define SUNI1x10GEXP_BITMSK_PL4IO_ID_ROOLV 0x0100 +#define SUNI1x10GEXP_BITMSK_PL4IO_IS_DOOLV 0x0010 +#define SUNI1x10GEXP_BITMSK_PL4IO_ID_DOOLV 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x2301: PL4IO Lock Detect Change + * Bit 15 PL4IO_OUT_ROOLI + * Bit 12 PL4IO_IS_ROOLI + * Bit 11 PL4IO_DIP2_ERRI + * Bit 8 PL4IO_ID_ROOLI + * Bit 4 PL4IO_IS_DOOLI + * Bit 0 PL4IO_ID_DOOLI + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4IO_OUT_ROOLI 0x8000 +#define SUNI1x10GEXP_BITMSK_PL4IO_IS_ROOLI 0x1000 +#define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERRI 0x0800 +#define SUNI1x10GEXP_BITMSK_PL4IO_ID_ROOLI 0x0100 +#define SUNI1x10GEXP_BITMSK_PL4IO_IS_DOOLI 0x0010 +#define SUNI1x10GEXP_BITMSK_PL4IO_ID_DOOLI 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x2302: PL4IO Lock Detect Mask + * Bit 15 PL4IO_OUT_ROOLE + * Bit 12 PL4IO_IS_ROOLE + * Bit 11 PL4IO_DIP2_ERRE + * Bit 8 PL4IO_ID_ROOLE + * Bit 4 PL4IO_IS_DOOLE + * Bit 0 PL4IO_ID_DOOLE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4IO_OUT_ROOLE 0x8000 +#define SUNI1x10GEXP_BITMSK_PL4IO_IS_ROOLE 0x1000 +#define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERRE 0x0800 +#define SUNI1x10GEXP_BITMSK_PL4IO_ID_ROOLE 0x0100 +#define SUNI1x10GEXP_BITMSK_PL4IO_IS_DOOLE 0x0010 +#define SUNI1x10GEXP_BITMSK_PL4IO_ID_DOOLE 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x2303: PL4IO Lock Detect Limits + * Bit 15-8 PL4IO_REF_LIMIT + * Bit 7-0 PL4IO_TRAN_LIMIT + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4IO_REF_LIMIT 0xFF00 +#define SUNI1x10GEXP_BITOFF_PL4IO_REF_LIMIT 8 +#define SUNI1x10GEXP_BITMSK_PL4IO_TRAN_LIMIT 0x00FF +#define SUNI1x10GEXP_BITOFF_PL4IO_TRAN_LIMIT 0 + +/*---------------------------------------------------------------------------- + * Register 0x2304: PL4IO Calendar Repetitions + * Bit 15-8 PL4IO_IN_MUL + * Bit 7-0 PL4IO_OUT_MUL + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4IO_IN_MUL 0xFF00 +#define SUNI1x10GEXP_BITOFF_PL4IO_IN_MUL 8 +#define SUNI1x10GEXP_BITMSK_PL4IO_OUT_MUL 0x00FF +#define SUNI1x10GEXP_BITOFF_PL4IO_OUT_MUL 0 + +/*---------------------------------------------------------------------------- + * Register 0x2305: PL4IO Configuration + * Bit 15 PL4IO_DIP2_ERR_CHK + * Bit 11 PL4IO_ODAT_DIS + * Bit 10 PL4IO_TRAIN_DIS + * Bit 9 PL4IO_OSTAT_DIS + * Bit 8 PL4IO_ISTAT_DIS + * Bit 7 PL4IO_NO_ISTAT + * Bit 6 PL4IO_STAT_OUTSEL + * Bit 5 PL4IO_INSEL + * Bit 4 PL4IO_DLSEL + * Bit 1-0 PL4IO_OUTSEL + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERR_CHK 0x8000 +#define SUNI1x10GEXP_BITMSK_PL4IO_ODAT_DIS 0x0800 +#define SUNI1x10GEXP_BITMSK_PL4IO_TRAIN_DIS 0x0400 +#define SUNI1x10GEXP_BITMSK_PL4IO_OSTAT_DIS 0x0200 +#define SUNI1x10GEXP_BITMSK_PL4IO_ISTAT_DIS 0x0100 +#define SUNI1x10GEXP_BITMSK_PL4IO_NO_ISTAT 0x0080 +#define SUNI1x10GEXP_BITMSK_PL4IO_STAT_OUTSEL 0x0040 +#define SUNI1x10GEXP_BITMSK_PL4IO_INSEL 0x0020 +#define SUNI1x10GEXP_BITMSK_PL4IO_DLSEL 0x0010 +#define SUNI1x10GEXP_BITMSK_PL4IO_OUTSEL 0x0003 +#define SUNI1x10GEXP_BITOFF_PL4IO_OUTSEL 0 + +/*---------------------------------------------------------------------------- + * Register 0x3040: TXXG Configuration Register 1 + * Bit 15 TXXG_TXEN0 + * Bit 13 TXXG_HOSTPAUSE + * Bit 12-7 TXXG_IPGT + * Bit 5 TXXG_32BIT_ALIGN + * Bit 4 TXXG_CRCEN + * Bit 3 TXXG_FCTX + * Bit 2 TXXG_FCRX + * Bit 1 TXXG_PADEN + * Bit 0 TXXG_SPRE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TXXG_TXEN0 0x8000 +#define SUNI1x10GEXP_BITMSK_TXXG_HOSTPAUSE 0x2000 +#define SUNI1x10GEXP_BITMSK_TXXG_IPGT 0x1F80 +#define SUNI1x10GEXP_BITOFF_TXXG_IPGT 7 +#define SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN 0x0020 +#define SUNI1x10GEXP_BITMSK_TXXG_CRCEN 0x0010 +#define SUNI1x10GEXP_BITMSK_TXXG_FCTX 0x0008 +#define SUNI1x10GEXP_BITMSK_TXXG_FCRX 0x0004 +#define SUNI1x10GEXP_BITMSK_TXXG_PADEN 0x0002 +#define SUNI1x10GEXP_BITMSK_TXXG_SPRE 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x3041: TXXG Configuration Register 2 + * Bit 7-0 TXXG_HDRSIZE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TXXG_HDRSIZE 0x00FF + +/*---------------------------------------------------------------------------- + * Register 0x3042: TXXG Configuration Register 3 + * Bit 15 TXXG_FIFO_ERRE + * Bit 14 TXXG_FIFO_UDRE + * Bit 13 TXXG_MAX_LERRE + * Bit 12 TXXG_MIN_LERRE + * Bit 11 TXXG_XFERE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TXXG_FIFO_ERRE 0x8000 +#define SUNI1x10GEXP_BITMSK_TXXG_FIFO_UDRE 0x4000 +#define SUNI1x10GEXP_BITMSK_TXXG_MAX_LERRE 0x2000 +#define SUNI1x10GEXP_BITMSK_TXXG_MIN_LERRE 0x1000 +#define SUNI1x10GEXP_BITMSK_TXXG_XFERE 0x0800 + +/*---------------------------------------------------------------------------- + * Register 0x3043: TXXG Interrupt + * Bit 15 TXXG_FIFO_ERRI + * Bit 14 TXXG_FIFO_UDRI + * Bit 13 TXXG_MAX_LERRI + * Bit 12 TXXG_MIN_LERRI + * Bit 11 TXXG_XFERI + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TXXG_FIFO_ERRI 0x8000 +#define SUNI1x10GEXP_BITMSK_TXXG_FIFO_UDRI 0x4000 +#define SUNI1x10GEXP_BITMSK_TXXG_MAX_LERRI 0x2000 +#define SUNI1x10GEXP_BITMSK_TXXG_MIN_LERRI 0x1000 +#define SUNI1x10GEXP_BITMSK_TXXG_XFERI 0x0800 + +/*---------------------------------------------------------------------------- + * Register 0x3044: TXXG Status Register + * Bit 1 TXXG_TXACTIVE + * Bit 0 TXXG_PAUSED + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TXXG_TXACTIVE 0x0002 +#define SUNI1x10GEXP_BITMSK_TXXG_PAUSED 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x3046: TXXG TX_MINFR - Transmit Min Frame Size Register + * Bit 7-0 TXXG_TX_MINFR + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TXXG_TX_MINFR 0x00FF +#define SUNI1x10GEXP_BITOFF_TXXG_TX_MINFR 0 + +/*---------------------------------------------------------------------------- + * Register 0x3052: TXXG Pause Quantum Value Configuration Register + * Bit 7-0 TXXG_FC_PAUSE_QNTM + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TXXG_FC_PAUSE_QNTM 0x00FF +#define SUNI1x10GEXP_BITOFF_TXXG_FC_PAUSE_QNTM 0 + +/*---------------------------------------------------------------------------- + * Register 0x3080: XTEF Control + * Bit 3-0 XTEF_FORCE_PARITY_ERR + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_XTEF_FORCE_PARITY_ERR 0x000F +#define SUNI1x10GEXP_BITOFF_XTEF_FORCE_PARITY_ERR 0 + +/*---------------------------------------------------------------------------- + * Register 0x3084: XTEF Interrupt Event Register + * Bit 0 XTEF_LOST_SYNCI + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_XTEF_LOST_SYNCI 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x3085: XTEF Interrupt Enable Register + * Bit 0 XTEF_LOST_SYNCE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_XTEF_LOST_SYNCE 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x3086: XTEF Visibility Register + * Bit 0 XTEF_LOST_SYNCV + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_XTEF_LOST_SYNCV 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x30C0: TXOAM OAM Configuration + * Bit 15 TXOAM_HEC_EN + * Bit 14 TXOAM_EMPTYCODE_EN + * Bit 13 TXOAM_FORCE_IDLE + * Bit 12 TXOAM_IGNORE_IDLE + * Bit 11-6 TXOAM_PX_OVERWRITE + * Bit 5-0 TXOAM_PX_SEL + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TXOAM_HEC_EN 0x8000 +#define SUNI1x10GEXP_BITMSK_TXOAM_EMPTYCODE_EN 0x4000 +#define SUNI1x10GEXP_BITMSK_TXOAM_FORCE_IDLE 0x2000 +#define SUNI1x10GEXP_BITMSK_TXOAM_IGNORE_IDLE 0x1000 +#define SUNI1x10GEXP_BITMSK_TXOAM_PX_OVERWRITE 0x0FC0 +#define SUNI1x10GEXP_BITOFF_TXOAM_PX_OVERWRITE 6 +#define SUNI1x10GEXP_BITMSK_TXOAM_PX_SEL 0x003F +#define SUNI1x10GEXP_BITOFF_TXOAM_PX_SEL 0 + +/*---------------------------------------------------------------------------- + * Register 0x30C1: TXOAM Mini-Packet Rate Configuration + * Bit 15 TXOAM_MINIDIS + * Bit 14 TXOAM_BUSY + * Bit 13 TXOAM_TRANS_EN + * Bit 10-0 TXOAM_MINIRATE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TXOAM_MINIDIS 0x8000 +#define SUNI1x10GEXP_BITMSK_TXOAM_BUSY 0x4000 +#define SUNI1x10GEXP_BITMSK_TXOAM_TRANS_EN 0x2000 +#define SUNI1x10GEXP_BITMSK_TXOAM_MINIRATE 0x07FF + +/*---------------------------------------------------------------------------- + * Register 0x30C2: TXOAM Mini-Packet Gap and FIFO Configuration + * Bit 13-10 TXOAM_FTHRESH + * Bit 9-6 TXOAM_MINIPOST + * Bit 5-0 TXOAM_MINIPRE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TXOAM_FTHRESH 0x3C00 +#define SUNI1x10GEXP_BITOFF_TXOAM_FTHRESH 10 +#define SUNI1x10GEXP_BITMSK_TXOAM_MINIPOST 0x03C0 +#define SUNI1x10GEXP_BITOFF_TXOAM_MINIPOST 6 +#define SUNI1x10GEXP_BITMSK_TXOAM_MINIPRE 0x003F + +/*---------------------------------------------------------------------------- + * Register 0x30C6: TXOAM Interrupt Enable + * Bit 2 TXOAM_SOP_ERRE + * Bit 1 TXOAM_OFLE + * Bit 0 TXOAM_ERRE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TXOAM_SOP_ERRE 0x0004 +#define SUNI1x10GEXP_BITMSK_TXOAM_OFLE 0x0002 +#define SUNI1x10GEXP_BITMSK_TXOAM_ERRE 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x30C7: TXOAM Interrupt Status + * Bit 2 TXOAM_SOP_ERRI + * Bit 1 TXOAM_OFLI + * Bit 0 TXOAM_ERRI + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TXOAM_SOP_ERRI 0x0004 +#define SUNI1x10GEXP_BITMSK_TXOAM_OFLI 0x0002 +#define SUNI1x10GEXP_BITMSK_TXOAM_ERRI 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x30CF: TXOAM Coset + * Bit 7-0 TXOAM_COSET + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TXOAM_COSET 0x00FF + +/*---------------------------------------------------------------------------- + * Register 0x3200: EFLX Global Configuration + * Bit 15 EFLX_ERCU_EN + * Bit 7 EFLX_EN_EDSWT + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_EFLX_ERCU_EN 0x8000 +#define SUNI1x10GEXP_BITMSK_EFLX_EN_EDSWT 0x0080 + +/*---------------------------------------------------------------------------- + * Register 0x3201: EFLX ERCU Global Status + * Bit 13 EFLX_OVF_ERR + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_EFLX_OVF_ERR 0x2000 + +/*---------------------------------------------------------------------------- + * Register 0x3202: EFLX Indirect Channel Address + * Bit 15 EFLX_BUSY + * Bit 14 EFLX_RDWRB + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_EFLX_BUSY 0x8000 +#define SUNI1x10GEXP_BITMSK_EFLX_RDWRB 0x4000 + +/*---------------------------------------------------------------------------- + * Register 0x3203: EFLX Indirect Logical FIFO Low Limit + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_EFLX_LOLIM 0x03FF +#define SUNI1x10GEXP_BITOFF_EFLX_LOLIM 0 + +/*---------------------------------------------------------------------------- + * Register 0x3204: EFLX Indirect Logical FIFO High Limit + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_EFLX_HILIM 0x03FF +#define SUNI1x10GEXP_BITOFF_EFLX_HILIM 0 + +/*---------------------------------------------------------------------------- + * Register 0x3205: EFLX Indirect Full/Almost-Full Status and Limit + * Bit 15 EFLX_FULL + * Bit 14 EFLX_AFULL + * Bit 13-0 EFLX_AFTH + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_EFLX_FULL 0x8000 +#define SUNI1x10GEXP_BITMSK_EFLX_AFULL 0x4000 +#define SUNI1x10GEXP_BITMSK_EFLX_AFTH 0x3FFF +#define SUNI1x10GEXP_BITOFF_EFLX_AFTH 0 + +/*---------------------------------------------------------------------------- + * Register 0x3206: EFLX Indirect Empty/Almost-Empty Status and Limit + * Bit 15 EFLX_EMPTY + * Bit 14 EFLX_AEMPTY + * Bit 13-0 EFLX_AETH + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_EFLX_EMPTY 0x8000 +#define SUNI1x10GEXP_BITMSK_EFLX_AEMPTY 0x4000 +#define SUNI1x10GEXP_BITMSK_EFLX_AETH 0x3FFF +#define SUNI1x10GEXP_BITOFF_EFLX_AETH 0 + +/*---------------------------------------------------------------------------- + * Register 0x3207: EFLX Indirect FIFO Cut-Through Threshold + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_EFLX_CUT_THRU 0x3FFF +#define SUNI1x10GEXP_BITOFF_EFLX_CUT_THRU 0 + +/*---------------------------------------------------------------------------- + * Register 0x320C: EFLX FIFO Overflow Error Enable + * Bit 0 EFLX_OVFE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_EFLX_OVFE 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x320D: EFLX FIFO Overflow Error Indication + * Bit 0 EFLX_OVFI + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_EFLX_OVFI 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x3210: EFLX Channel Provision + * Bit 0 EFLX_PROV + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_EFLX_PROV 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x3280: PL4IDU Configuration + * Bit 2 PL4IDU_SYNCH_ON_TRAIN + * Bit 1 PL4IDU_EN_PORTS + * Bit 0 PL4IDU_EN_DFWD + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4IDU_SYNCH_ON_TRAIN 0x0004 +#define SUNI1x10GEXP_BITMSK_PL4IDU_EN_PORTS 0x0002 +#define SUNI1x10GEXP_BITMSK_PL4IDU_EN_DFWD 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x3282: PL4IDU Interrupt Mask + * Bit 1 PL4IDU_DIP4E + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4IDU_DIP4E 0x0002 + +/*---------------------------------------------------------------------------- + * Register 0x3283: PL4IDU Interrupt + * Bit 1 PL4IDU_DIP4I + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4IDU_DIP4I 0x0002 + +#endif /* _CXGB_SUNI1x10GEXP_REGS_H_ */ + diff --git a/drivers/net/ethernet/chelsio/cxgb/tp.c b/drivers/net/ethernet/chelsio/cxgb/tp.c new file mode 100644 index 0000000..8bed4a5 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/tp.c @@ -0,0 +1,171 @@ +/* $Date: 2006/02/07 04:21:54 $ $RCSfile: tp.c,v $ $Revision: 1.73 $ */ +#include "common.h" +#include "regs.h" +#include "tp.h" +#ifdef CONFIG_CHELSIO_T1_1G +#include "fpga_defs.h" +#endif + +struct petp { + adapter_t *adapter; +}; + +/* Pause deadlock avoidance parameters */ +#define DROP_MSEC 16 +#define DROP_PKTS_CNT 1 + +static void tp_init(adapter_t * ap, const struct tp_params *p, + unsigned int tp_clk) +{ + u32 val; + + if (!t1_is_asic(ap)) + return; + + val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM | + F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET; + if (!p->pm_size) + val |= F_OFFLOAD_DISABLE; + else + val |= F_TP_IN_ESPI_CHECK_IP_CSUM | F_TP_IN_ESPI_CHECK_TCP_CSUM; + writel(val, ap->regs + A_TP_IN_CONFIG); + writel(F_TP_OUT_CSPI_CPL | + F_TP_OUT_ESPI_ETHERNET | + F_TP_OUT_ESPI_GENERATE_IP_CSUM | + F_TP_OUT_ESPI_GENERATE_TCP_CSUM, ap->regs + A_TP_OUT_CONFIG); + writel(V_IP_TTL(64) | + F_PATH_MTU /* IP DF bit */ | + V_5TUPLE_LOOKUP(p->use_5tuple_mode) | + V_SYN_COOKIE_PARAMETER(29), ap->regs + A_TP_GLOBAL_CONFIG); + /* + * Enable pause frame deadlock prevention. + */ + if (is_T2(ap) && ap->params.nports > 1) { + u32 drop_ticks = DROP_MSEC * (tp_clk / 1000); + + writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR | + V_DROP_TICKS_CNT(drop_ticks) | + V_NUM_PKTS_DROPPED(DROP_PKTS_CNT), + ap->regs + A_TP_TX_DROP_CONFIG); + } +} + +void t1_tp_destroy(struct petp *tp) +{ + kfree(tp); +} + +struct petp *__devinit t1_tp_create(adapter_t * adapter, struct tp_params *p) +{ + struct petp *tp = kzalloc(sizeof(*tp), GFP_KERNEL); + + if (!tp) + return NULL; + + tp->adapter = adapter; + + return tp; +} + +void t1_tp_intr_enable(struct petp *tp) +{ + u32 tp_intr = readl(tp->adapter->regs + A_PL_ENABLE); + +#ifdef CONFIG_CHELSIO_T1_1G + if (!t1_is_asic(tp->adapter)) { + /* FPGA */ + writel(0xffffffff, + tp->adapter->regs + FPGA_TP_ADDR_INTERRUPT_ENABLE); + writel(tp_intr | FPGA_PCIX_INTERRUPT_TP, + tp->adapter->regs + A_PL_ENABLE); + } else +#endif + { + /* We don't use any TP interrupts */ + writel(0, tp->adapter->regs + A_TP_INT_ENABLE); + writel(tp_intr | F_PL_INTR_TP, + tp->adapter->regs + A_PL_ENABLE); + } +} + +void t1_tp_intr_disable(struct petp *tp) +{ + u32 tp_intr = readl(tp->adapter->regs + A_PL_ENABLE); + +#ifdef CONFIG_CHELSIO_T1_1G + if (!t1_is_asic(tp->adapter)) { + /* FPGA */ + writel(0, tp->adapter->regs + FPGA_TP_ADDR_INTERRUPT_ENABLE); + writel(tp_intr & ~FPGA_PCIX_INTERRUPT_TP, + tp->adapter->regs + A_PL_ENABLE); + } else +#endif + { + writel(0, tp->adapter->regs + A_TP_INT_ENABLE); + writel(tp_intr & ~F_PL_INTR_TP, + tp->adapter->regs + A_PL_ENABLE); + } +} + +void t1_tp_intr_clear(struct petp *tp) +{ +#ifdef CONFIG_CHELSIO_T1_1G + if (!t1_is_asic(tp->adapter)) { + writel(0xffffffff, + tp->adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE); + writel(FPGA_PCIX_INTERRUPT_TP, tp->adapter->regs + A_PL_CAUSE); + return; + } +#endif + writel(0xffffffff, tp->adapter->regs + A_TP_INT_CAUSE); + writel(F_PL_INTR_TP, tp->adapter->regs + A_PL_CAUSE); +} + +int t1_tp_intr_handler(struct petp *tp) +{ + u32 cause; + +#ifdef CONFIG_CHELSIO_T1_1G + /* FPGA doesn't support TP interrupts. */ + if (!t1_is_asic(tp->adapter)) + return 1; +#endif + + cause = readl(tp->adapter->regs + A_TP_INT_CAUSE); + writel(cause, tp->adapter->regs + A_TP_INT_CAUSE); + return 0; +} + +static void set_csum_offload(struct petp *tp, u32 csum_bit, int enable) +{ + u32 val = readl(tp->adapter->regs + A_TP_GLOBAL_CONFIG); + + if (enable) + val |= csum_bit; + else + val &= ~csum_bit; + writel(val, tp->adapter->regs + A_TP_GLOBAL_CONFIG); +} + +void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable) +{ + set_csum_offload(tp, F_IP_CSUM, enable); +} + +void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable) +{ + set_csum_offload(tp, F_TCP_CSUM, enable); +} + +/* + * Initialize TP state. tp_params contains initial settings for some TP + * parameters, particularly the one-time PM and CM settings. + */ +int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk) +{ + adapter_t *adapter = tp->adapter; + + tp_init(adapter, p, tp_clk); + writel(F_TP_RESET, adapter->regs + A_TP_RESET); + return 0; +} diff --git a/drivers/net/ethernet/chelsio/cxgb/tp.h b/drivers/net/ethernet/chelsio/cxgb/tp.h new file mode 100644 index 0000000..dfd8ce2 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/tp.h @@ -0,0 +1,72 @@ +/* $Date: 2005/03/07 23:59:05 $ $RCSfile: tp.h,v $ $Revision: 1.20 $ */ +#ifndef CHELSIO_TP_H +#define CHELSIO_TP_H + +#include "common.h" + +#define TP_MAX_RX_COALESCING_SIZE 16224U + +struct tp_mib_statistics { + + /* IP */ + u32 ipInReceive_hi; + u32 ipInReceive_lo; + u32 ipInHdrErrors_hi; + u32 ipInHdrErrors_lo; + u32 ipInAddrErrors_hi; + u32 ipInAddrErrors_lo; + u32 ipInUnknownProtos_hi; + u32 ipInUnknownProtos_lo; + u32 ipInDiscards_hi; + u32 ipInDiscards_lo; + u32 ipInDelivers_hi; + u32 ipInDelivers_lo; + u32 ipOutRequests_hi; + u32 ipOutRequests_lo; + u32 ipOutDiscards_hi; + u32 ipOutDiscards_lo; + u32 ipOutNoRoutes_hi; + u32 ipOutNoRoutes_lo; + u32 ipReasmTimeout; + u32 ipReasmReqds; + u32 ipReasmOKs; + u32 ipReasmFails; + + u32 reserved[8]; + + /* TCP */ + u32 tcpActiveOpens; + u32 tcpPassiveOpens; + u32 tcpAttemptFails; + u32 tcpEstabResets; + u32 tcpOutRsts; + u32 tcpCurrEstab; + u32 tcpInSegs_hi; + u32 tcpInSegs_lo; + u32 tcpOutSegs_hi; + u32 tcpOutSegs_lo; + u32 tcpRetransSeg_hi; + u32 tcpRetransSeg_lo; + u32 tcpInErrs_hi; + u32 tcpInErrs_lo; + u32 tcpRtoMin; + u32 tcpRtoMax; +}; + +struct petp; +struct tp_params; + +struct petp *t1_tp_create(adapter_t *adapter, struct tp_params *p); +void t1_tp_destroy(struct petp *tp); + +void t1_tp_intr_disable(struct petp *tp); +void t1_tp_intr_enable(struct petp *tp); +void t1_tp_intr_clear(struct petp *tp); +int t1_tp_intr_handler(struct petp *tp); + +void t1_tp_get_mib_statistics(adapter_t *adap, struct tp_mib_statistics *tps); +void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable); +void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable); +int t1_tp_set_coalescing_size(struct petp *tp, unsigned int size); +int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk); +#endif diff --git a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c new file mode 100644 index 0000000..b0cb388 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c @@ -0,0 +1,730 @@ +/* $Date: 2006/04/28 19:20:06 $ $RCSfile: vsc7326.c,v $ $Revision: 1.19 $ */ + +/* Driver for Vitesse VSC7326 (Schaumburg) MAC */ + +#include "gmac.h" +#include "elmer0.h" +#include "vsc7326_reg.h" + +/* Update fast changing statistics every 15 seconds */ +#define STATS_TICK_SECS 15 +/* 30 minutes for full statistics update */ +#define MAJOR_UPDATE_TICKS (1800 / STATS_TICK_SECS) + +#define MAX_MTU 9600 + +/* The egress WM value 0x01a01fff should be used only when the + * interface is down (MAC port disabled). This is a workaround + * for disabling the T2/MAC flow-control. When the interface is + * enabled, the WM value should be set to 0x014a03F0. + */ +#define WM_DISABLE 0x01a01fff +#define WM_ENABLE 0x014a03F0 + +struct init_table { + u32 addr; + u32 data; +}; + +struct _cmac_instance { + u32 index; + u32 ticks; +}; + +#define INITBLOCK_SLEEP 0xffffffff + +static void vsc_read(adapter_t *adapter, u32 addr, u32 *val) +{ + u32 status, vlo, vhi; + int i; + + spin_lock_bh(&adapter->mac_lock); + t1_tpi_read(adapter, (addr << 2) + 4, &vlo); + i = 0; + do { + t1_tpi_read(adapter, (REG_LOCAL_STATUS << 2) + 4, &vlo); + t1_tpi_read(adapter, REG_LOCAL_STATUS << 2, &vhi); + status = (vhi << 16) | vlo; + i++; + } while (((status & 1) == 0) && (i < 50)); + if (i == 50) + pr_err("Invalid tpi read from MAC, breaking loop.\n"); + + t1_tpi_read(adapter, (REG_LOCAL_DATA << 2) + 4, &vlo); + t1_tpi_read(adapter, REG_LOCAL_DATA << 2, &vhi); + + *val = (vhi << 16) | vlo; + + /* pr_err("rd: block: 0x%x sublock: 0x%x reg: 0x%x data: 0x%x\n", + ((addr&0xe000)>>13), ((addr&0x1e00)>>9), + ((addr&0x01fe)>>1), *val); */ + spin_unlock_bh(&adapter->mac_lock); +} + +static void vsc_write(adapter_t *adapter, u32 addr, u32 data) +{ + spin_lock_bh(&adapter->mac_lock); + t1_tpi_write(adapter, (addr << 2) + 4, data & 0xFFFF); + t1_tpi_write(adapter, addr << 2, (data >> 16) & 0xFFFF); + /* pr_err("wr: block: 0x%x sublock: 0x%x reg: 0x%x data: 0x%x\n", + ((addr&0xe000)>>13), ((addr&0x1e00)>>9), + ((addr&0x01fe)>>1), data); */ + spin_unlock_bh(&adapter->mac_lock); +} + +/* Hard reset the MAC. This wipes out *all* configuration. */ +static void vsc7326_full_reset(adapter_t* adapter) +{ + u32 val; + u32 result = 0xffff; + + t1_tpi_read(adapter, A_ELMER0_GPO, &val); + val &= ~1; + t1_tpi_write(adapter, A_ELMER0_GPO, val); + udelay(2); + val |= 0x1; /* Enable mac MAC itself */ + val |= 0x800; /* Turn off the red LED */ + t1_tpi_write(adapter, A_ELMER0_GPO, val); + mdelay(1); + vsc_write(adapter, REG_SW_RESET, 0x80000001); + do { + mdelay(1); + vsc_read(adapter, REG_SW_RESET, &result); + } while (result != 0x0); +} + +static struct init_table vsc7326_reset[] = { + { REG_IFACE_MODE, 0x00000000 }, + { REG_CRC_CFG, 0x00000020 }, + { REG_PLL_CLK_SPEED, 0x00050c00 }, + { REG_PLL_CLK_SPEED, 0x00050c00 }, + { REG_MSCH, 0x00002f14 }, + { REG_SPI4_MISC, 0x00040409 }, + { REG_SPI4_DESKEW, 0x00080000 }, + { REG_SPI4_ING_SETUP2, 0x08080004 }, + { REG_SPI4_ING_SETUP0, 0x04111004 }, + { REG_SPI4_EGR_SETUP0, 0x80001a04 }, + { REG_SPI4_ING_SETUP1, 0x02010000 }, + { REG_AGE_INC(0), 0x00000000 }, + { REG_AGE_INC(1), 0x00000000 }, + { REG_ING_CONTROL, 0x0a200011 }, + { REG_EGR_CONTROL, 0xa0010091 }, +}; + +static struct init_table vsc7326_portinit[4][22] = { + { /* Port 0 */ + /* FIFO setup */ + { REG_DBG(0), 0x000004f0 }, + { REG_HDX(0), 0x00073101 }, + { REG_TEST(0,0), 0x00000022 }, + { REG_TEST(1,0), 0x00000022 }, + { REG_TOP_BOTTOM(0,0), 0x003f0000 }, + { REG_TOP_BOTTOM(1,0), 0x00120000 }, + { REG_HIGH_LOW_WM(0,0), 0x07460757 }, + { REG_HIGH_LOW_WM(1,0), WM_DISABLE }, + { REG_CT_THRHLD(0,0), 0x00000000 }, + { REG_CT_THRHLD(1,0), 0x00000000 }, + { REG_BUCKE(0), 0x0002ffff }, + { REG_BUCKI(0), 0x0002ffff }, + { REG_TEST(0,0), 0x00000020 }, + { REG_TEST(1,0), 0x00000020 }, + /* Port config */ + { REG_MAX_LEN(0), 0x00002710 }, + { REG_PORT_FAIL(0), 0x00000002 }, + { REG_NORMALIZER(0), 0x00000a64 }, + { REG_DENORM(0), 0x00000010 }, + { REG_STICK_BIT(0), 0x03baa370 }, + { REG_DEV_SETUP(0), 0x00000083 }, + { REG_DEV_SETUP(0), 0x00000082 }, + { REG_MODE_CFG(0), 0x0200259f }, + }, + { /* Port 1 */ + /* FIFO setup */ + { REG_DBG(1), 0x000004f0 }, + { REG_HDX(1), 0x00073101 }, + { REG_TEST(0,1), 0x00000022 }, + { REG_TEST(1,1), 0x00000022 }, + { REG_TOP_BOTTOM(0,1), 0x007e003f }, + { REG_TOP_BOTTOM(1,1), 0x00240012 }, + { REG_HIGH_LOW_WM(0,1), 0x07460757 }, + { REG_HIGH_LOW_WM(1,1), WM_DISABLE }, + { REG_CT_THRHLD(0,1), 0x00000000 }, + { REG_CT_THRHLD(1,1), 0x00000000 }, + { REG_BUCKE(1), 0x0002ffff }, + { REG_BUCKI(1), 0x0002ffff }, + { REG_TEST(0,1), 0x00000020 }, + { REG_TEST(1,1), 0x00000020 }, + /* Port config */ + { REG_MAX_LEN(1), 0x00002710 }, + { REG_PORT_FAIL(1), 0x00000002 }, + { REG_NORMALIZER(1), 0x00000a64 }, + { REG_DENORM(1), 0x00000010 }, + { REG_STICK_BIT(1), 0x03baa370 }, + { REG_DEV_SETUP(1), 0x00000083 }, + { REG_DEV_SETUP(1), 0x00000082 }, + { REG_MODE_CFG(1), 0x0200259f }, + }, + { /* Port 2 */ + /* FIFO setup */ + { REG_DBG(2), 0x000004f0 }, + { REG_HDX(2), 0x00073101 }, + { REG_TEST(0,2), 0x00000022 }, + { REG_TEST(1,2), 0x00000022 }, + { REG_TOP_BOTTOM(0,2), 0x00bd007e }, + { REG_TOP_BOTTOM(1,2), 0x00360024 }, + { REG_HIGH_LOW_WM(0,2), 0x07460757 }, + { REG_HIGH_LOW_WM(1,2), WM_DISABLE }, + { REG_CT_THRHLD(0,2), 0x00000000 }, + { REG_CT_THRHLD(1,2), 0x00000000 }, + { REG_BUCKE(2), 0x0002ffff }, + { REG_BUCKI(2), 0x0002ffff }, + { REG_TEST(0,2), 0x00000020 }, + { REG_TEST(1,2), 0x00000020 }, + /* Port config */ + { REG_MAX_LEN(2), 0x00002710 }, + { REG_PORT_FAIL(2), 0x00000002 }, + { REG_NORMALIZER(2), 0x00000a64 }, + { REG_DENORM(2), 0x00000010 }, + { REG_STICK_BIT(2), 0x03baa370 }, + { REG_DEV_SETUP(2), 0x00000083 }, + { REG_DEV_SETUP(2), 0x00000082 }, + { REG_MODE_CFG(2), 0x0200259f }, + }, + { /* Port 3 */ + /* FIFO setup */ + { REG_DBG(3), 0x000004f0 }, + { REG_HDX(3), 0x00073101 }, + { REG_TEST(0,3), 0x00000022 }, + { REG_TEST(1,3), 0x00000022 }, + { REG_TOP_BOTTOM(0,3), 0x00fc00bd }, + { REG_TOP_BOTTOM(1,3), 0x00480036 }, + { REG_HIGH_LOW_WM(0,3), 0x07460757 }, + { REG_HIGH_LOW_WM(1,3), WM_DISABLE }, + { REG_CT_THRHLD(0,3), 0x00000000 }, + { REG_CT_THRHLD(1,3), 0x00000000 }, + { REG_BUCKE(3), 0x0002ffff }, + { REG_BUCKI(3), 0x0002ffff }, + { REG_TEST(0,3), 0x00000020 }, + { REG_TEST(1,3), 0x00000020 }, + /* Port config */ + { REG_MAX_LEN(3), 0x00002710 }, + { REG_PORT_FAIL(3), 0x00000002 }, + { REG_NORMALIZER(3), 0x00000a64 }, + { REG_DENORM(3), 0x00000010 }, + { REG_STICK_BIT(3), 0x03baa370 }, + { REG_DEV_SETUP(3), 0x00000083 }, + { REG_DEV_SETUP(3), 0x00000082 }, + { REG_MODE_CFG(3), 0x0200259f }, + }, +}; + +static void run_table(adapter_t *adapter, struct init_table *ib, int len) +{ + int i; + + for (i = 0; i < len; i++) { + if (ib[i].addr == INITBLOCK_SLEEP) { + udelay( ib[i].data ); + pr_err("sleep %d us\n",ib[i].data); + } else + vsc_write( adapter, ib[i].addr, ib[i].data ); + } +} + +static int bist_rd(adapter_t *adapter, int moduleid, int address) +{ + int data = 0; + u32 result = 0; + + if ((address != 0x0) && + (address != 0x1) && + (address != 0x2) && + (address != 0xd) && + (address != 0xe)) + pr_err("No bist address: 0x%x\n", address); + + data = ((0x00 << 24) | ((address & 0xff) << 16) | (0x00 << 8) | + ((moduleid & 0xff) << 0)); + vsc_write(adapter, REG_RAM_BIST_CMD, data); + + udelay(10); + + vsc_read(adapter, REG_RAM_BIST_RESULT, &result); + if ((result & (1 << 9)) != 0x0) + pr_err("Still in bist read: 0x%x\n", result); + else if ((result & (1 << 8)) != 0x0) + pr_err("bist read error: 0x%x\n", result); + + return result & 0xff; +} + +static int bist_wr(adapter_t *adapter, int moduleid, int address, int value) +{ + int data = 0; + u32 result = 0; + + if ((address != 0x0) && + (address != 0x1) && + (address != 0x2) && + (address != 0xd) && + (address != 0xe)) + pr_err("No bist address: 0x%x\n", address); + + if (value > 255) + pr_err("Suspicious write out of range value: 0x%x\n", value); + + data = ((0x01 << 24) | ((address & 0xff) << 16) | (value << 8) | + ((moduleid & 0xff) << 0)); + vsc_write(adapter, REG_RAM_BIST_CMD, data); + + udelay(5); + + vsc_read(adapter, REG_RAM_BIST_CMD, &result); + if ((result & (1 << 27)) != 0x0) + pr_err("Still in bist write: 0x%x\n", result); + else if ((result & (1 << 26)) != 0x0) + pr_err("bist write error: 0x%x\n", result); + + return 0; +} + +static int run_bist(adapter_t *adapter, int moduleid) +{ + /*run bist*/ + (void) bist_wr(adapter,moduleid, 0x00, 0x02); + (void) bist_wr(adapter,moduleid, 0x01, 0x01); + + return 0; +} + +static int check_bist(adapter_t *adapter, int moduleid) +{ + int result=0; + int column=0; + /*check bist*/ + result = bist_rd(adapter,moduleid, 0x02); + column = ((bist_rd(adapter,moduleid, 0x0e)<<8) + + (bist_rd(adapter,moduleid, 0x0d))); + if ((result & 3) != 0x3) + pr_err("Result: 0x%x BIST error in ram %d, column: 0x%04x\n", + result, moduleid, column); + return 0; +} + +static int enable_mem(adapter_t *adapter, int moduleid) +{ + /*enable mem*/ + (void) bist_wr(adapter,moduleid, 0x00, 0x00); + return 0; +} + +static int run_bist_all(adapter_t *adapter) +{ + int port = 0; + u32 val = 0; + + vsc_write(adapter, REG_MEM_BIST, 0x5); + vsc_read(adapter, REG_MEM_BIST, &val); + + for (port = 0; port < 12; port++) + vsc_write(adapter, REG_DEV_SETUP(port), 0x0); + + udelay(300); + vsc_write(adapter, REG_SPI4_MISC, 0x00040409); + udelay(300); + + (void) run_bist(adapter,13); + (void) run_bist(adapter,14); + (void) run_bist(adapter,20); + (void) run_bist(adapter,21); + mdelay(200); + (void) check_bist(adapter,13); + (void) check_bist(adapter,14); + (void) check_bist(adapter,20); + (void) check_bist(adapter,21); + udelay(100); + (void) enable_mem(adapter,13); + (void) enable_mem(adapter,14); + (void) enable_mem(adapter,20); + (void) enable_mem(adapter,21); + udelay(300); + vsc_write(adapter, REG_SPI4_MISC, 0x60040400); + udelay(300); + for (port = 0; port < 12; port++) + vsc_write(adapter, REG_DEV_SETUP(port), 0x1); + + udelay(300); + vsc_write(adapter, REG_MEM_BIST, 0x0); + mdelay(10); + return 0; +} + +static int mac_intr_handler(struct cmac *mac) +{ + return 0; +} + +static int mac_intr_enable(struct cmac *mac) +{ + return 0; +} + +static int mac_intr_disable(struct cmac *mac) +{ + return 0; +} + +static int mac_intr_clear(struct cmac *mac) +{ + return 0; +} + +/* Expect MAC address to be in network byte order. */ +static int mac_set_address(struct cmac* mac, u8 addr[6]) +{ + u32 val; + int port = mac->instance->index; + + vsc_write(mac->adapter, REG_MAC_LOW_ADDR(port), + (addr[3] << 16) | (addr[4] << 8) | addr[5]); + vsc_write(mac->adapter, REG_MAC_HIGH_ADDR(port), + (addr[0] << 16) | (addr[1] << 8) | addr[2]); + + vsc_read(mac->adapter, REG_ING_FFILT_UM_EN, &val); + val &= ~0xf0000000; + vsc_write(mac->adapter, REG_ING_FFILT_UM_EN, val | (port << 28)); + + vsc_write(mac->adapter, REG_ING_FFILT_MASK0, + 0xffff0000 | (addr[4] << 8) | addr[5]); + vsc_write(mac->adapter, REG_ING_FFILT_MASK1, + 0xffff0000 | (addr[2] << 8) | addr[3]); + vsc_write(mac->adapter, REG_ING_FFILT_MASK2, + 0xffff0000 | (addr[0] << 8) | addr[1]); + return 0; +} + +static int mac_get_address(struct cmac *mac, u8 addr[6]) +{ + u32 addr_lo, addr_hi; + int port = mac->instance->index; + + vsc_read(mac->adapter, REG_MAC_LOW_ADDR(port), &addr_lo); + vsc_read(mac->adapter, REG_MAC_HIGH_ADDR(port), &addr_hi); + + addr[0] = (u8) (addr_hi >> 16); + addr[1] = (u8) (addr_hi >> 8); + addr[2] = (u8) addr_hi; + addr[3] = (u8) (addr_lo >> 16); + addr[4] = (u8) (addr_lo >> 8); + addr[5] = (u8) addr_lo; + return 0; +} + +/* This is intended to reset a port, not the whole MAC */ +static int mac_reset(struct cmac *mac) +{ + int index = mac->instance->index; + + run_table(mac->adapter, vsc7326_portinit[index], + ARRAY_SIZE(vsc7326_portinit[index])); + + return 0; +} + +static int mac_set_rx_mode(struct cmac *mac, struct t1_rx_mode *rm) +{ + u32 v; + int port = mac->instance->index; + + vsc_read(mac->adapter, REG_ING_FFILT_UM_EN, &v); + v |= 1 << 12; + + if (t1_rx_mode_promisc(rm)) + v &= ~(1 << (port + 16)); + else + v |= 1 << (port + 16); + + vsc_write(mac->adapter, REG_ING_FFILT_UM_EN, v); + return 0; +} + +static int mac_set_mtu(struct cmac *mac, int mtu) +{ + int port = mac->instance->index; + + if (mtu > MAX_MTU) + return -EINVAL; + + /* max_len includes header and FCS */ + vsc_write(mac->adapter, REG_MAX_LEN(port), mtu + 14 + 4); + return 0; +} + +static int mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, + int fc) +{ + u32 v; + int enable, port = mac->instance->index; + + if (speed >= 0 && speed != SPEED_10 && speed != SPEED_100 && + speed != SPEED_1000) + return -1; + if (duplex > 0 && duplex != DUPLEX_FULL) + return -1; + + if (speed >= 0) { + vsc_read(mac->adapter, REG_MODE_CFG(port), &v); + enable = v & 3; /* save tx/rx enables */ + v &= ~0xf; + v |= 4; /* full duplex */ + if (speed == SPEED_1000) + v |= 8; /* GigE */ + enable |= v; + vsc_write(mac->adapter, REG_MODE_CFG(port), v); + + if (speed == SPEED_1000) + v = 0x82; + else if (speed == SPEED_100) + v = 0x84; + else /* SPEED_10 */ + v = 0x86; + vsc_write(mac->adapter, REG_DEV_SETUP(port), v | 1); /* reset */ + vsc_write(mac->adapter, REG_DEV_SETUP(port), v); + vsc_read(mac->adapter, REG_DBG(port), &v); + v &= ~0xff00; + if (speed == SPEED_1000) + v |= 0x400; + else if (speed == SPEED_100) + v |= 0x2000; + else /* SPEED_10 */ + v |= 0xff00; + vsc_write(mac->adapter, REG_DBG(port), v); + + vsc_write(mac->adapter, REG_TX_IFG(port), + speed == SPEED_1000 ? 5 : 0x11); + if (duplex == DUPLEX_HALF) + enable = 0x0; /* 100 or 10 */ + else if (speed == SPEED_1000) + enable = 0xc; + else /* SPEED_100 or 10 */ + enable = 0x4; + enable |= 0x9 << 10; /* IFG1 */ + enable |= 0x6 << 6; /* IFG2 */ + enable |= 0x1 << 4; /* VLAN */ + enable |= 0x3; /* RX/TX EN */ + vsc_write(mac->adapter, REG_MODE_CFG(port), enable); + + } + + vsc_read(mac->adapter, REG_PAUSE_CFG(port), &v); + v &= 0xfff0ffff; + v |= 0x20000; /* xon/xoff */ + if (fc & PAUSE_RX) + v |= 0x40000; + if (fc & PAUSE_TX) + v |= 0x80000; + if (fc == (PAUSE_RX | PAUSE_TX)) + v |= 0x10000; + vsc_write(mac->adapter, REG_PAUSE_CFG(port), v); + return 0; +} + +static int mac_enable(struct cmac *mac, int which) +{ + u32 val; + int port = mac->instance->index; + + /* Write the correct WM value when the port is enabled. */ + vsc_write(mac->adapter, REG_HIGH_LOW_WM(1,port), WM_ENABLE); + + vsc_read(mac->adapter, REG_MODE_CFG(port), &val); + if (which & MAC_DIRECTION_RX) + val |= 0x2; + if (which & MAC_DIRECTION_TX) + val |= 1; + vsc_write(mac->adapter, REG_MODE_CFG(port), val); + return 0; +} + +static int mac_disable(struct cmac *mac, int which) +{ + u32 val; + int i, port = mac->instance->index; + + /* Reset the port, this also writes the correct WM value */ + mac_reset(mac); + + vsc_read(mac->adapter, REG_MODE_CFG(port), &val); + if (which & MAC_DIRECTION_RX) + val &= ~0x2; + if (which & MAC_DIRECTION_TX) + val &= ~0x1; + vsc_write(mac->adapter, REG_MODE_CFG(port), val); + vsc_read(mac->adapter, REG_MODE_CFG(port), &val); + + /* Clear stats */ + for (i = 0; i <= 0x3a; ++i) + vsc_write(mac->adapter, CRA(4, port, i), 0); + + /* Clear software counters */ + memset(&mac->stats, 0, sizeof(struct cmac_statistics)); + + return 0; +} + +static void rmon_update(struct cmac *mac, unsigned int addr, u64 *stat) +{ + u32 v, lo; + + vsc_read(mac->adapter, addr, &v); + lo = *stat; + *stat = *stat - lo + v; + + if (v == 0) + return; + + if (v < lo) + *stat += (1ULL << 32); +} + +static void port_stats_update(struct cmac *mac) +{ + struct { + unsigned int reg; + unsigned int offset; + } hw_stats[] = { + +#define HW_STAT(reg, stat_name) \ + { reg, (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL } + + /* Rx stats */ + HW_STAT(RxUnicast, RxUnicastFramesOK), + HW_STAT(RxMulticast, RxMulticastFramesOK), + HW_STAT(RxBroadcast, RxBroadcastFramesOK), + HW_STAT(Crc, RxFCSErrors), + HW_STAT(RxAlignment, RxAlignErrors), + HW_STAT(RxOversize, RxFrameTooLongErrors), + HW_STAT(RxPause, RxPauseFrames), + HW_STAT(RxJabbers, RxJabberErrors), + HW_STAT(RxFragments, RxRuntErrors), + HW_STAT(RxUndersize, RxRuntErrors), + HW_STAT(RxSymbolCarrier, RxSymbolErrors), + HW_STAT(RxSize1519ToMax, RxJumboFramesOK), + + /* Tx stats (skip collision stats as we are full-duplex only) */ + HW_STAT(TxUnicast, TxUnicastFramesOK), + HW_STAT(TxMulticast, TxMulticastFramesOK), + HW_STAT(TxBroadcast, TxBroadcastFramesOK), + HW_STAT(TxPause, TxPauseFrames), + HW_STAT(TxUnderrun, TxUnderrun), + HW_STAT(TxSize1519ToMax, TxJumboFramesOK), + }, *p = hw_stats; + unsigned int port = mac->instance->index; + u64 *stats = (u64 *)&mac->stats; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(hw_stats); i++) + rmon_update(mac, CRA(0x4, port, p->reg), stats + p->offset); + + rmon_update(mac, REG_TX_OK_BYTES(port), &mac->stats.TxOctetsOK); + rmon_update(mac, REG_RX_OK_BYTES(port), &mac->stats.RxOctetsOK); + rmon_update(mac, REG_RX_BAD_BYTES(port), &mac->stats.RxOctetsBad); +} + +/* + * This function is called periodically to accumulate the current values of the + * RMON counters into the port statistics. Since the counters are only 32 bits + * some of them can overflow in less than a minute at GigE speeds, so this + * function should be called every 30 seconds or so. + * + * To cut down on reading costs we update only the octet counters at each tick + * and do a full update at major ticks, which can be every 30 minutes or more. + */ +static const struct cmac_statistics *mac_update_statistics(struct cmac *mac, + int flag) +{ + if (flag == MAC_STATS_UPDATE_FULL || + mac->instance->ticks >= MAJOR_UPDATE_TICKS) { + port_stats_update(mac); + mac->instance->ticks = 0; + } else { + int port = mac->instance->index; + + rmon_update(mac, REG_RX_OK_BYTES(port), + &mac->stats.RxOctetsOK); + rmon_update(mac, REG_RX_BAD_BYTES(port), + &mac->stats.RxOctetsBad); + rmon_update(mac, REG_TX_OK_BYTES(port), + &mac->stats.TxOctetsOK); + mac->instance->ticks++; + } + return &mac->stats; +} + +static void mac_destroy(struct cmac *mac) +{ + kfree(mac); +} + +static struct cmac_ops vsc7326_ops = { + .destroy = mac_destroy, + .reset = mac_reset, + .interrupt_handler = mac_intr_handler, + .interrupt_enable = mac_intr_enable, + .interrupt_disable = mac_intr_disable, + .interrupt_clear = mac_intr_clear, + .enable = mac_enable, + .disable = mac_disable, + .set_mtu = mac_set_mtu, + .set_rx_mode = mac_set_rx_mode, + .set_speed_duplex_fc = mac_set_speed_duplex_fc, + .statistics_update = mac_update_statistics, + .macaddress_get = mac_get_address, + .macaddress_set = mac_set_address, +}; + +static struct cmac *vsc7326_mac_create(adapter_t *adapter, int index) +{ + struct cmac *mac; + u32 val; + int i; + + mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL); + if (!mac) + return NULL; + + mac->ops = &vsc7326_ops; + mac->instance = (cmac_instance *)(mac + 1); + mac->adapter = adapter; + + mac->instance->index = index; + mac->instance->ticks = 0; + + i = 0; + do { + u32 vhi, vlo; + + vhi = vlo = 0; + t1_tpi_read(adapter, (REG_LOCAL_STATUS << 2) + 4, &vlo); + udelay(1); + t1_tpi_read(adapter, REG_LOCAL_STATUS << 2, &vhi); + udelay(5); + val = (vhi << 16) | vlo; + } while ((++i < 10000) && (val == 0xffffffff)); + + return mac; +} + +static int vsc7326_mac_reset(adapter_t *adapter) +{ + vsc7326_full_reset(adapter); + (void) run_bist_all(adapter); + run_table(adapter, vsc7326_reset, ARRAY_SIZE(vsc7326_reset)); + return 0; +} + +const struct gmac t1_vsc7326_ops = { + .stats_update_period = STATS_TICK_SECS, + .create = vsc7326_mac_create, + .reset = vsc7326_mac_reset, +}; diff --git a/drivers/net/ethernet/chelsio/cxgb/vsc7326_reg.h b/drivers/net/ethernet/chelsio/cxgb/vsc7326_reg.h new file mode 100644 index 0000000..479edbc --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/vsc7326_reg.h @@ -0,0 +1,297 @@ +/* $Date: 2006/04/28 19:20:17 $ $RCSfile: vsc7326_reg.h,v $ $Revision: 1.5 $ */ +#ifndef _VSC7321_REG_H_ +#define _VSC7321_REG_H_ + +/* Register definitions for Vitesse VSC7321 (Meigs II) MAC + * + * Straight off the data sheet, VMDS-10038 Rev 2.0 and + * PD0011-01-14-Meigs-II 2002-12-12 + */ + +/* Just 'cause it's in here doesn't mean it's used. */ + +#define CRA(blk,sub,adr) ((((blk) & 0x7) << 13) | (((sub) & 0xf) << 9) | (((adr) & 0xff) << 1)) + +/* System and CPU comm's registers */ +#define REG_CHIP_ID CRA(0x7,0xf,0x00) /* Chip ID */ +#define REG_BLADE_ID CRA(0x7,0xf,0x01) /* Blade ID */ +#define REG_SW_RESET CRA(0x7,0xf,0x02) /* Global Soft Reset */ +#define REG_MEM_BIST CRA(0x7,0xf,0x04) /* mem */ +#define REG_IFACE_MODE CRA(0x7,0xf,0x07) /* Interface mode */ +#define REG_MSCH CRA(0x7,0x2,0x06) /* CRC error count */ +#define REG_CRC_CNT CRA(0x7,0x2,0x0a) /* CRC error count */ +#define REG_CRC_CFG CRA(0x7,0x2,0x0b) /* CRC config */ +#define REG_SI_TRANSFER_SEL CRA(0x7,0xf,0x18) /* SI Transfer Select */ +#define REG_PLL_CLK_SPEED CRA(0x7,0xf,0x19) /* Clock Speed Selection */ +#define REG_SYS_CLK_SELECT CRA(0x7,0xf,0x1c) /* System Clock Select */ +#define REG_GPIO_CTRL CRA(0x7,0xf,0x1d) /* GPIO Control */ +#define REG_GPIO_OUT CRA(0x7,0xf,0x1e) /* GPIO Out */ +#define REG_GPIO_IN CRA(0x7,0xf,0x1f) /* GPIO In */ +#define REG_CPU_TRANSFER_SEL CRA(0x7,0xf,0x20) /* CPU Transfer Select */ +#define REG_LOCAL_DATA CRA(0x7,0xf,0xfe) /* Local CPU Data Register */ +#define REG_LOCAL_STATUS CRA(0x7,0xf,0xff) /* Local CPU Status Register */ + +/* Aggregator registers */ +#define REG_AGGR_SETUP CRA(0x7,0x1,0x00) /* Aggregator Setup */ +#define REG_PMAP_TABLE CRA(0x7,0x1,0x01) /* Port map table */ +#define REG_MPLS_BIT0 CRA(0x7,0x1,0x08) /* MPLS bit0 position */ +#define REG_MPLS_BIT1 CRA(0x7,0x1,0x09) /* MPLS bit1 position */ +#define REG_MPLS_BIT2 CRA(0x7,0x1,0x0a) /* MPLS bit2 position */ +#define REG_MPLS_BIT3 CRA(0x7,0x1,0x0b) /* MPLS bit3 position */ +#define REG_MPLS_BITMASK CRA(0x7,0x1,0x0c) /* MPLS bit mask */ +#define REG_PRE_BIT0POS CRA(0x7,0x1,0x10) /* Preamble bit0 position */ +#define REG_PRE_BIT1POS CRA(0x7,0x1,0x11) /* Preamble bit1 position */ +#define REG_PRE_BIT2POS CRA(0x7,0x1,0x12) /* Preamble bit2 position */ +#define REG_PRE_BIT3POS CRA(0x7,0x1,0x13) /* Preamble bit3 position */ +#define REG_PRE_ERR_CNT CRA(0x7,0x1,0x14) /* Preamble parity error count */ + +/* BIST registers */ +/*#define REG_RAM_BIST_CMD CRA(0x7,0x2,0x00)*/ /* RAM BIST Command Register */ +/*#define REG_RAM_BIST_RESULT CRA(0x7,0x2,0x01)*/ /* RAM BIST Read Status/Result */ +#define REG_RAM_BIST_CMD CRA(0x7,0x1,0x00) /* RAM BIST Command Register */ +#define REG_RAM_BIST_RESULT CRA(0x7,0x1,0x01) /* RAM BIST Read Status/Result */ +#define BIST_PORT_SELECT 0x00 /* BIST port select */ +#define BIST_COMMAND 0x01 /* BIST enable/disable */ +#define BIST_STATUS 0x02 /* BIST operation status */ +#define BIST_ERR_CNT_LSB 0x03 /* BIST error count lo 8b */ +#define BIST_ERR_CNT_MSB 0x04 /* BIST error count hi 8b */ +#define BIST_ERR_SEL_LSB 0x05 /* BIST error select lo 8b */ +#define BIST_ERR_SEL_MSB 0x06 /* BIST error select hi 8b */ +#define BIST_ERROR_STATE 0x07 /* BIST engine internal state */ +#define BIST_ERR_ADR0 0x08 /* BIST error address lo 8b */ +#define BIST_ERR_ADR1 0x09 /* BIST error address lomid 8b */ +#define BIST_ERR_ADR2 0x0a /* BIST error address himid 8b */ +#define BIST_ERR_ADR3 0x0b /* BIST error address hi 8b */ + +/* FIFO registers + * ie = 0 for ingress, 1 for egress + * fn = FIFO number, 0-9 + */ +#define REG_TEST(ie,fn) CRA(0x2,ie&1,0x00+fn) /* Mode & Test Register */ +#define REG_TOP_BOTTOM(ie,fn) CRA(0x2,ie&1,0x10+fn) /* FIFO Buffer Top & Bottom */ +#define REG_TAIL(ie,fn) CRA(0x2,ie&1,0x20+fn) /* FIFO Write Pointer */ +#define REG_HEAD(ie,fn) CRA(0x2,ie&1,0x30+fn) /* FIFO Read Pointer */ +#define REG_HIGH_LOW_WM(ie,fn) CRA(0x2,ie&1,0x40+fn) /* Flow Control Water Marks */ +#define REG_CT_THRHLD(ie,fn) CRA(0x2,ie&1,0x50+fn) /* Cut Through Threshold */ +#define REG_FIFO_DROP_CNT(ie,fn) CRA(0x2,ie&1,0x60+fn) /* Drop & CRC Error Counter */ +#define REG_DEBUG_BUF_CNT(ie,fn) CRA(0x2,ie&1,0x70+fn) /* Input Side Debug Counter */ +#define REG_BUCKI(fn) CRA(0x2,2,0x20+fn) /* Input Side Debug Counter */ +#define REG_BUCKE(fn) CRA(0x2,3,0x20+fn) /* Input Side Debug Counter */ + +/* Traffic shaper buckets + * ie = 0 for ingress, 1 for egress + * bn = bucket number 0-10 (yes, 11 buckets) + */ +/* OK, this one's kinda ugly. Some hardware designers are perverse. */ +#define REG_TRAFFIC_SHAPER_BUCKET(ie,bn) CRA(0x2,ie&1,0x0a + (bn>7) | ((bn&7)<<4)) +#define REG_TRAFFIC_SHAPER_CONTROL(ie) CRA(0x2,ie&1,0x3b) + +#define REG_SRAM_ADR(ie) CRA(0x2,ie&1,0x0e) /* FIFO SRAM address */ +#define REG_SRAM_WR_STRB(ie) CRA(0x2,ie&1,0x1e) /* FIFO SRAM write strobe */ +#define REG_SRAM_RD_STRB(ie) CRA(0x2,ie&1,0x2e) /* FIFO SRAM read strobe */ +#define REG_SRAM_DATA_0(ie) CRA(0x2,ie&1,0x3e) /* FIFO SRAM data lo 8b */ +#define REG_SRAM_DATA_1(ie) CRA(0x2,ie&1,0x4e) /* FIFO SRAM data lomid 8b */ +#define REG_SRAM_DATA_2(ie) CRA(0x2,ie&1,0x5e) /* FIFO SRAM data himid 8b */ +#define REG_SRAM_DATA_3(ie) CRA(0x2,ie&1,0x6e) /* FIFO SRAM data hi 8b */ +#define REG_SRAM_DATA_BLK_TYPE(ie) CRA(0x2,ie&1,0x7e) /* FIFO SRAM tag */ +/* REG_ING_CONTROL equals REG_CONTROL with ie = 0, likewise REG_EGR_CONTROL is ie = 1 */ +#define REG_CONTROL(ie) CRA(0x2,ie&1,0x0f) /* FIFO control */ +#define REG_ING_CONTROL CRA(0x2,0x0,0x0f) /* Ingress control (alias) */ +#define REG_EGR_CONTROL CRA(0x2,0x1,0x0f) /* Egress control (alias) */ +#define REG_AGE_TIMER(ie) CRA(0x2,ie&1,0x1f) /* Aging timer */ +#define REG_AGE_INC(ie) CRA(0x2,ie&1,0x2f) /* Aging increment */ +#define DEBUG_OUT(ie) CRA(0x2,ie&1,0x3f) /* Output debug counter control */ +#define DEBUG_CNT(ie) CRA(0x2,ie&1,0x4f) /* Output debug counter */ + +/* SPI4 interface */ +#define REG_SPI4_MISC CRA(0x5,0x0,0x00) /* Misc Register */ +#define REG_SPI4_STATUS CRA(0x5,0x0,0x01) /* CML Status */ +#define REG_SPI4_ING_SETUP0 CRA(0x5,0x0,0x02) /* Ingress Status Channel Setup */ +#define REG_SPI4_ING_SETUP1 CRA(0x5,0x0,0x03) /* Ingress Data Training Setup */ +#define REG_SPI4_ING_SETUP2 CRA(0x5,0x0,0x04) /* Ingress Data Burst Size Setup */ +#define REG_SPI4_EGR_SETUP0 CRA(0x5,0x0,0x05) /* Egress Status Channel Setup */ +#define REG_SPI4_DBG_CNT(n) CRA(0x5,0x0,0x10+n) /* Debug counters 0-9 */ +#define REG_SPI4_DBG_SETUP CRA(0x5,0x0,0x1A) /* Debug counters setup */ +#define REG_SPI4_TEST CRA(0x5,0x0,0x20) /* Test Setup Register */ +#define REG_TPGEN_UP0 CRA(0x5,0x0,0x21) /* Test Pattern generator user pattern 0 */ +#define REG_TPGEN_UP1 CRA(0x5,0x0,0x22) /* Test Pattern generator user pattern 1 */ +#define REG_TPCHK_UP0 CRA(0x5,0x0,0x23) /* Test Pattern checker user pattern 0 */ +#define REG_TPCHK_UP1 CRA(0x5,0x0,0x24) /* Test Pattern checker user pattern 1 */ +#define REG_TPSAM_P0 CRA(0x5,0x0,0x25) /* Sampled pattern 0 */ +#define REG_TPSAM_P1 CRA(0x5,0x0,0x26) /* Sampled pattern 1 */ +#define REG_TPERR_CNT CRA(0x5,0x0,0x27) /* Pattern checker error counter */ +#define REG_SPI4_STICKY CRA(0x5,0x0,0x30) /* Sticky bits register */ +#define REG_SPI4_DBG_INH CRA(0x5,0x0,0x31) /* Core egress & ingress inhibit */ +#define REG_SPI4_DBG_STATUS CRA(0x5,0x0,0x32) /* Sampled ingress status */ +#define REG_SPI4_DBG_GRANT CRA(0x5,0x0,0x33) /* Ingress cranted credit value */ + +#define REG_SPI4_DESKEW CRA(0x5,0x0,0x43) /* Ingress cranted credit value */ + +/* 10GbE MAC Block Registers */ +/* Note that those registers that are exactly the same for 10GbE as for + * tri-speed are only defined with the version that needs a port number. + * Pass 0xa in those cases. + * + * Also note that despite the presence of a MAC address register, this part + * does no ingress MAC address filtering. That register is used only for + * pause frame detection and generation. + */ +/* 10GbE specific, and different from tri-speed */ +#define REG_MISC_10G CRA(0x1,0xa,0x00) /* Misc 10GbE setup */ +#define REG_PAUSE_10G CRA(0x1,0xa,0x01) /* Pause register */ +#define REG_NORMALIZER_10G CRA(0x1,0xa,0x05) /* 10G normalizer */ +#define REG_STICKY_RX CRA(0x1,0xa,0x06) /* RX debug register */ +#define REG_DENORM_10G CRA(0x1,0xa,0x07) /* Denormalizer */ +#define REG_STICKY_TX CRA(0x1,0xa,0x08) /* TX sticky bits */ +#define REG_MAX_RXHIGH CRA(0x1,0xa,0x0a) /* XGMII lane 0-3 debug */ +#define REG_MAX_RXLOW CRA(0x1,0xa,0x0b) /* XGMII lane 4-7 debug */ +#define REG_MAC_TX_STICKY CRA(0x1,0xa,0x0c) /* MAC Tx state sticky debug */ +#define REG_MAC_TX_RUNNING CRA(0x1,0xa,0x0d) /* MAC Tx state running debug */ +#define REG_TX_ABORT_AGE CRA(0x1,0xa,0x14) /* Aged Tx frames discarded */ +#define REG_TX_ABORT_SHORT CRA(0x1,0xa,0x15) /* Short Tx frames discarded */ +#define REG_TX_ABORT_TAXI CRA(0x1,0xa,0x16) /* Taxi error frames discarded */ +#define REG_TX_ABORT_UNDERRUN CRA(0x1,0xa,0x17) /* Tx Underrun abort counter */ +#define REG_TX_DENORM_DISCARD CRA(0x1,0xa,0x18) /* Tx denormalizer discards */ +#define REG_XAUI_STAT_A CRA(0x1,0xa,0x20) /* XAUI status A */ +#define REG_XAUI_STAT_B CRA(0x1,0xa,0x21) /* XAUI status B */ +#define REG_XAUI_STAT_C CRA(0x1,0xa,0x22) /* XAUI status C */ +#define REG_XAUI_CONF_A CRA(0x1,0xa,0x23) /* XAUI configuration A */ +#define REG_XAUI_CONF_B CRA(0x1,0xa,0x24) /* XAUI configuration B */ +#define REG_XAUI_CODE_GRP_CNT CRA(0x1,0xa,0x25) /* XAUI code group error count */ +#define REG_XAUI_CONF_TEST_A CRA(0x1,0xa,0x26) /* XAUI test register A */ +#define REG_PDERRCNT CRA(0x1,0xa,0x27) /* XAUI test register B */ + +/* pn = port number 0-9 for tri-speed, 10 for 10GbE */ +/* Both tri-speed and 10GbE */ +#define REG_MAX_LEN(pn) CRA(0x1,pn,0x02) /* Max length */ +#define REG_MAC_HIGH_ADDR(pn) CRA(0x1,pn,0x03) /* Upper 24 bits of MAC addr */ +#define REG_MAC_LOW_ADDR(pn) CRA(0x1,pn,0x04) /* Lower 24 bits of MAC addr */ + +/* tri-speed only + * pn = port number, 0-9 + */ +#define REG_MODE_CFG(pn) CRA(0x1,pn,0x00) /* Mode configuration */ +#define REG_PAUSE_CFG(pn) CRA(0x1,pn,0x01) /* Pause configuration */ +#define REG_NORMALIZER(pn) CRA(0x1,pn,0x05) /* Normalizer */ +#define REG_TBI_STATUS(pn) CRA(0x1,pn,0x06) /* TBI status */ +#define REG_PCS_STATUS_DBG(pn) CRA(0x1,pn,0x07) /* PCS status debug */ +#define REG_PCS_CTRL(pn) CRA(0x1,pn,0x08) /* PCS control */ +#define REG_TBI_CONFIG(pn) CRA(0x1,pn,0x09) /* TBI configuration */ +#define REG_STICK_BIT(pn) CRA(0x1,pn,0x0a) /* Sticky bits */ +#define REG_DEV_SETUP(pn) CRA(0x1,pn,0x0b) /* MAC clock/reset setup */ +#define REG_DROP_CNT(pn) CRA(0x1,pn,0x0c) /* Drop counter */ +#define REG_PORT_POS(pn) CRA(0x1,pn,0x0d) /* Preamble port position */ +#define REG_PORT_FAIL(pn) CRA(0x1,pn,0x0e) /* Preamble port position */ +#define REG_SERDES_CONF(pn) CRA(0x1,pn,0x0f) /* SerDes configuration */ +#define REG_SERDES_TEST(pn) CRA(0x1,pn,0x10) /* SerDes test */ +#define REG_SERDES_STAT(pn) CRA(0x1,pn,0x11) /* SerDes status */ +#define REG_SERDES_COM_CNT(pn) CRA(0x1,pn,0x12) /* SerDes comma counter */ +#define REG_DENORM(pn) CRA(0x1,pn,0x15) /* Frame denormalization */ +#define REG_DBG(pn) CRA(0x1,pn,0x16) /* Device 1G debug */ +#define REG_TX_IFG(pn) CRA(0x1,pn,0x18) /* Tx IFG config */ +#define REG_HDX(pn) CRA(0x1,pn,0x19) /* Half-duplex config */ + +/* Statistics */ +/* CRA(0x4,pn,reg) */ +/* reg below */ +/* pn = port number, 0-a, a = 10GbE */ + +enum { + RxInBytes = 0x00, // # Rx in octets + RxSymbolCarrier = 0x01, // Frames w/ symbol errors + RxPause = 0x02, // # pause frames received + RxUnsupOpcode = 0x03, // # control frames with unsupported opcode + RxOkBytes = 0x04, // # octets in good frames + RxBadBytes = 0x05, // # octets in bad frames + RxUnicast = 0x06, // # good unicast frames + RxMulticast = 0x07, // # good multicast frames + RxBroadcast = 0x08, // # good broadcast frames + Crc = 0x09, // # frames w/ bad CRC only + RxAlignment = 0x0a, // # frames w/ alignment err + RxUndersize = 0x0b, // # frames undersize + RxFragments = 0x0c, // # frames undersize w/ crc err + RxInRangeLengthError = 0x0d, // # frames with length error + RxOutOfRangeError = 0x0e, // # frames with illegal length field + RxOversize = 0x0f, // # frames oversize + RxJabbers = 0x10, // # frames oversize w/ crc err + RxSize64 = 0x11, // # frames 64 octets long + RxSize65To127 = 0x12, // # frames 65-127 octets + RxSize128To255 = 0x13, // # frames 128-255 + RxSize256To511 = 0x14, // # frames 256-511 + RxSize512To1023 = 0x15, // # frames 512-1023 + RxSize1024To1518 = 0x16, // # frames 1024-1518 + RxSize1519ToMax = 0x17, // # frames 1519-max + + TxOutBytes = 0x18, // # octets tx + TxPause = 0x19, // # pause frames sent + TxOkBytes = 0x1a, // # octets tx OK + TxUnicast = 0x1b, // # frames unicast + TxMulticast = 0x1c, // # frames multicast + TxBroadcast = 0x1d, // # frames broadcast + TxMultipleColl = 0x1e, // # frames tx after multiple collisions + TxLateColl = 0x1f, // # late collisions detected + TxXcoll = 0x20, // # frames lost, excessive collisions + TxDefer = 0x21, // # frames deferred on first tx attempt + TxXdefer = 0x22, // # frames excessively deferred + TxCsense = 0x23, // carrier sense errors at frame end + TxSize64 = 0x24, // # frames 64 octets long + TxSize65To127 = 0x25, // # frames 65-127 octets + TxSize128To255 = 0x26, // # frames 128-255 + TxSize256To511 = 0x27, // # frames 256-511 + TxSize512To1023 = 0x28, // # frames 512-1023 + TxSize1024To1518 = 0x29, // # frames 1024-1518 + TxSize1519ToMax = 0x2a, // # frames 1519-max + TxSingleColl = 0x2b, // # frames tx after single collision + TxBackoff2 = 0x2c, // # frames tx ok after 2 backoffs/collisions + TxBackoff3 = 0x2d, // after 3 backoffs/collisions + TxBackoff4 = 0x2e, // after 4 + TxBackoff5 = 0x2f, // after 5 + TxBackoff6 = 0x30, // after 6 + TxBackoff7 = 0x31, // after 7 + TxBackoff8 = 0x32, // after 8 + TxBackoff9 = 0x33, // after 9 + TxBackoff10 = 0x34, // after 10 + TxBackoff11 = 0x35, // after 11 + TxBackoff12 = 0x36, // after 12 + TxBackoff13 = 0x37, // after 13 + TxBackoff14 = 0x38, // after 14 + TxBackoff15 = 0x39, // after 15 + TxUnderrun = 0x3a, // # frames dropped from underrun + // Hole. See REG_RX_XGMII_PROT_ERR below. + RxIpgShrink = 0x3c, // # of IPG shrinks detected + // Duplicate. See REG_STAT_STICKY10G below. + StatSticky1G = 0x3e, // tri-speed sticky bits + StatInit = 0x3f // Clear all statistics +}; + +#define REG_RX_XGMII_PROT_ERR CRA(0x4,0xa,0x3b) /* # protocol errors detected on XGMII interface */ +#define REG_STAT_STICKY10G CRA(0x4,0xa,StatSticky1G) /* 10GbE sticky bits */ + +#define REG_RX_OK_BYTES(pn) CRA(0x4,pn,RxOkBytes) +#define REG_RX_BAD_BYTES(pn) CRA(0x4,pn,RxBadBytes) +#define REG_TX_OK_BYTES(pn) CRA(0x4,pn,TxOkBytes) + +/* MII-Management Block registers */ +/* These are for MII-M interface 0, which is the bidirectional LVTTL one. If + * we hooked up to the one with separate directions, the middle 0x0 needs to + * change to 0x1. And the current errata states that MII-M 1 doesn't work. + */ + +#define REG_MIIM_STATUS CRA(0x3,0x0,0x00) /* MII-M Status */ +#define REG_MIIM_CMD CRA(0x3,0x0,0x01) /* MII-M Command */ +#define REG_MIIM_DATA CRA(0x3,0x0,0x02) /* MII-M Data */ +#define REG_MIIM_PRESCALE CRA(0x3,0x0,0x03) /* MII-M MDC Prescale */ + +#define REG_ING_FFILT_UM_EN CRA(0x2, 0, 0xd) +#define REG_ING_FFILT_BE_EN CRA(0x2, 0, 0x1d) +#define REG_ING_FFILT_VAL0 CRA(0x2, 0, 0x2d) +#define REG_ING_FFILT_VAL1 CRA(0x2, 0, 0x3d) +#define REG_ING_FFILT_MASK0 CRA(0x2, 0, 0x4d) +#define REG_ING_FFILT_MASK1 CRA(0x2, 0, 0x5d) +#define REG_ING_FFILT_MASK2 CRA(0x2, 0, 0x6d) +#define REG_ING_FFILT_ETYPE CRA(0x2, 0, 0x7d) + + +/* Whew. */ + +#endif diff --git a/drivers/net/ethernet/chelsio/cxgb3/Makefile b/drivers/net/ethernet/chelsio/cxgb3/Makefile new file mode 100644 index 0000000..29aff78 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/Makefile @@ -0,0 +1,8 @@ +# +# Chelsio T3 driver +# + +obj-$(CONFIG_CHELSIO_T3) += cxgb3.o + +cxgb3-objs := cxgb3_main.o ael1002.o vsc8211.o t3_hw.o mc5.o \ + xgmac.o sge.o l2t.o cxgb3_offload.o aq100x.o diff --git a/drivers/net/ethernet/chelsio/cxgb3/adapter.h b/drivers/net/ethernet/chelsio/cxgb3/adapter.h new file mode 100644 index 0000000..8b395b5 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/adapter.h @@ -0,0 +1,334 @@ +/* + * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* This file should not be included directly. Include common.h instead. */ + +#ifndef __T3_ADAPTER_H__ +#define __T3_ADAPTER_H__ + +#include +#include +#include +#include +#include +#include +#include +#include "t3cdev.h" +#include + +struct adapter; +struct sge_qset; +struct port_info; + +enum mac_idx_types { + LAN_MAC_IDX = 0, + SAN_MAC_IDX, + + MAX_MAC_IDX +}; + +struct iscsi_config { + __u8 mac_addr[ETH_ALEN]; + __u32 flags; + int (*send)(struct port_info *pi, struct sk_buff **skb); + int (*recv)(struct port_info *pi, struct sk_buff *skb); +}; + +struct port_info { + struct adapter *adapter; + struct sge_qset *qs; + u8 port_id; + u8 nqsets; + u8 first_qset; + struct cphy phy; + struct cmac mac; + struct link_config link_config; + struct net_device_stats netstats; + int activity; + __be32 iscsi_ipv4addr; + struct iscsi_config iscsic; + + int link_fault; /* link fault was detected */ +}; + +enum { /* adapter flags */ + FULL_INIT_DONE = (1 << 0), + USING_MSI = (1 << 1), + USING_MSIX = (1 << 2), + QUEUES_BOUND = (1 << 3), + TP_PARITY_INIT = (1 << 4), + NAPI_INIT = (1 << 5), +}; + +struct fl_pg_chunk { + struct page *page; + void *va; + unsigned int offset; + unsigned long *p_cnt; + dma_addr_t mapping; +}; + +struct rx_desc; +struct rx_sw_desc; + +struct sge_fl { /* SGE per free-buffer list state */ + unsigned int buf_size; /* size of each Rx buffer */ + unsigned int credits; /* # of available Rx buffers */ + unsigned int pend_cred; /* new buffers since last FL DB ring */ + unsigned int size; /* capacity of free list */ + unsigned int cidx; /* consumer index */ + unsigned int pidx; /* producer index */ + unsigned int gen; /* free list generation */ + struct fl_pg_chunk pg_chunk;/* page chunk cache */ + unsigned int use_pages; /* whether FL uses pages or sk_buffs */ + unsigned int order; /* order of page allocations */ + unsigned int alloc_size; /* size of allocated buffer */ + struct rx_desc *desc; /* address of HW Rx descriptor ring */ + struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ + dma_addr_t phys_addr; /* physical address of HW ring start */ + unsigned int cntxt_id; /* SGE context id for the free list */ + unsigned long empty; /* # of times queue ran out of buffers */ + unsigned long alloc_failed; /* # of times buffer allocation failed */ +}; + +/* + * Bundle size for grouping offload RX packets for delivery to the stack. + * Don't make this too big as we do prefetch on each packet in a bundle. + */ +# define RX_BUNDLE_SIZE 8 + +struct rsp_desc; + +struct sge_rspq { /* state for an SGE response queue */ + unsigned int credits; /* # of pending response credits */ + unsigned int size; /* capacity of response queue */ + unsigned int cidx; /* consumer index */ + unsigned int gen; /* current generation bit */ + unsigned int polling; /* is the queue serviced through NAPI? */ + unsigned int holdoff_tmr; /* interrupt holdoff timer in 100ns */ + unsigned int next_holdoff; /* holdoff time for next interrupt */ + unsigned int rx_recycle_buf; /* whether recycling occurred + within current sop-eop */ + struct rsp_desc *desc; /* address of HW response ring */ + dma_addr_t phys_addr; /* physical address of the ring */ + unsigned int cntxt_id; /* SGE context id for the response q */ + spinlock_t lock; /* guards response processing */ + struct sk_buff_head rx_queue; /* offload packet receive queue */ + struct sk_buff *pg_skb; /* used to build frag list in napi handler */ + + unsigned long offload_pkts; + unsigned long offload_bundles; + unsigned long eth_pkts; /* # of ethernet packets */ + unsigned long pure_rsps; /* # of pure (non-data) responses */ + unsigned long imm_data; /* responses with immediate data */ + unsigned long rx_drops; /* # of packets dropped due to no mem */ + unsigned long async_notif; /* # of asynchronous notification events */ + unsigned long empty; /* # of times queue ran out of credits */ + unsigned long nomem; /* # of responses deferred due to no mem */ + unsigned long unhandled_irqs; /* # of spurious intrs */ + unsigned long starved; + unsigned long restarted; +}; + +struct tx_desc; +struct tx_sw_desc; + +struct sge_txq { /* state for an SGE Tx queue */ + unsigned long flags; /* HW DMA fetch status */ + unsigned int in_use; /* # of in-use Tx descriptors */ + unsigned int size; /* # of descriptors */ + unsigned int processed; /* total # of descs HW has processed */ + unsigned int cleaned; /* total # of descs SW has reclaimed */ + unsigned int stop_thres; /* SW TX queue suspend threshold */ + unsigned int cidx; /* consumer index */ + unsigned int pidx; /* producer index */ + unsigned int gen; /* current value of generation bit */ + unsigned int unacked; /* Tx descriptors used since last COMPL */ + struct tx_desc *desc; /* address of HW Tx descriptor ring */ + struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */ + spinlock_t lock; /* guards enqueueing of new packets */ + unsigned int token; /* WR token */ + dma_addr_t phys_addr; /* physical address of the ring */ + struct sk_buff_head sendq; /* List of backpressured offload packets */ + struct tasklet_struct qresume_tsk; /* restarts the queue */ + unsigned int cntxt_id; /* SGE context id for the Tx q */ + unsigned long stops; /* # of times q has been stopped */ + unsigned long restarts; /* # of queue restarts */ +}; + +enum { /* per port SGE statistics */ + SGE_PSTAT_TSO, /* # of TSO requests */ + SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */ + SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */ + SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */ + SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */ + + SGE_PSTAT_MAX /* must be last */ +}; + +struct napi_gro_fraginfo; + +struct sge_qset { /* an SGE queue set */ + struct adapter *adap; + struct napi_struct napi; + struct sge_rspq rspq; + struct sge_fl fl[SGE_RXQ_PER_SET]; + struct sge_txq txq[SGE_TXQ_PER_SET]; + int nomem; + void *lro_va; + struct net_device *netdev; + struct netdev_queue *tx_q; /* associated netdev TX queue */ + unsigned long txq_stopped; /* which Tx queues are stopped */ + struct timer_list tx_reclaim_timer; /* reclaims TX buffers */ + struct timer_list rx_reclaim_timer; /* reclaims RX buffers */ + unsigned long port_stats[SGE_PSTAT_MAX]; +} ____cacheline_aligned; + +struct sge { + struct sge_qset qs[SGE_QSETS]; + spinlock_t reg_lock; /* guards non-atomic SGE registers (eg context) */ +}; + +struct adapter { + struct t3cdev tdev; + struct list_head adapter_list; + void __iomem *regs; + struct pci_dev *pdev; + unsigned long registered_device_map; + unsigned long open_device_map; + unsigned long flags; + + const char *name; + int msg_enable; + unsigned int mmio_len; + + struct adapter_params params; + unsigned int slow_intr_mask; + unsigned long irq_stats[IRQ_NUM_STATS]; + + int msix_nvectors; + struct { + unsigned short vec; + char desc[22]; + } msix_info[SGE_QSETS + 1]; + + /* T3 modules */ + struct sge sge; + struct mc7 pmrx; + struct mc7 pmtx; + struct mc7 cm; + struct mc5 mc5; + + struct net_device *port[MAX_NPORTS]; + unsigned int check_task_cnt; + struct delayed_work adap_check_task; + struct work_struct ext_intr_handler_task; + struct work_struct fatal_error_handler_task; + struct work_struct link_fault_handler_task; + + struct work_struct db_full_task; + struct work_struct db_empty_task; + struct work_struct db_drop_task; + + struct dentry *debugfs_root; + + struct mutex mdio_lock; + spinlock_t stats_lock; + spinlock_t work_lock; + + struct sk_buff *nofail_skb; +}; + +static inline u32 t3_read_reg(struct adapter *adapter, u32 reg_addr) +{ + u32 val = readl(adapter->regs + reg_addr); + + CH_DBG(adapter, MMIO, "read register 0x%x value 0x%x\n", reg_addr, val); + return val; +} + +static inline void t3_write_reg(struct adapter *adapter, u32 reg_addr, u32 val) +{ + CH_DBG(adapter, MMIO, "setting register 0x%x to 0x%x\n", reg_addr, val); + writel(val, adapter->regs + reg_addr); +} + +static inline struct port_info *adap2pinfo(struct adapter *adap, int idx) +{ + return netdev_priv(adap->port[idx]); +} + +static inline int phy2portid(struct cphy *phy) +{ + struct adapter *adap = phy->adapter; + struct port_info *port0 = adap2pinfo(adap, 0); + + return &port0->phy == phy ? 0 : 1; +} + +#define OFFLOAD_DEVMAP_BIT 15 + +#define tdev2adap(d) container_of(d, struct adapter, tdev) + +static inline int offload_running(struct adapter *adapter) +{ + return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map); +} + +int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb); + +void t3_os_ext_intr_handler(struct adapter *adapter); +void t3_os_link_changed(struct adapter *adapter, int port_id, int link_status, + int speed, int duplex, int fc); +void t3_os_phymod_changed(struct adapter *adap, int port_id); +void t3_os_link_fault(struct adapter *adapter, int port_id, int state); +void t3_os_link_fault_handler(struct adapter *adapter, int port_id); + +void t3_sge_start(struct adapter *adap); +void t3_sge_stop(struct adapter *adap); +void t3_start_sge_timers(struct adapter *adap); +void t3_stop_sge_timers(struct adapter *adap); +void t3_free_sge_resources(struct adapter *adap); +void t3_sge_err_intr_handler(struct adapter *adapter); +irq_handler_t t3_intr_handler(struct adapter *adap, int polling); +netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev); +int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb); +void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p); +int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, + int irq_vec_idx, const struct qset_params *p, + int ntxq, struct net_device *dev, + struct netdev_queue *netdevq); +extern struct workqueue_struct *cxgb3_wq; + +int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size); + +#endif /* __T3_ADAPTER_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb3/ael1002.c b/drivers/net/ethernet/chelsio/cxgb3/ael1002.c new file mode 100644 index 0000000..2028da9 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/ael1002.c @@ -0,0 +1,941 @@ +/* + * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "common.h" +#include "regs.h" + +enum { + AEL100X_TX_CONFIG1 = 0xc002, + AEL1002_PWR_DOWN_HI = 0xc011, + AEL1002_PWR_DOWN_LO = 0xc012, + AEL1002_XFI_EQL = 0xc015, + AEL1002_LB_EN = 0xc017, + AEL_OPT_SETTINGS = 0xc017, + AEL_I2C_CTRL = 0xc30a, + AEL_I2C_DATA = 0xc30b, + AEL_I2C_STAT = 0xc30c, + AEL2005_GPIO_CTRL = 0xc214, + AEL2005_GPIO_STAT = 0xc215, + + AEL2020_GPIO_INTR = 0xc103, /* Latch High (LH) */ + AEL2020_GPIO_CTRL = 0xc108, /* Store Clear (SC) */ + AEL2020_GPIO_STAT = 0xc10c, /* Read Only (RO) */ + AEL2020_GPIO_CFG = 0xc110, /* Read Write (RW) */ + + AEL2020_GPIO_SDA = 0, /* IN: i2c serial data */ + AEL2020_GPIO_MODDET = 1, /* IN: Module Detect */ + AEL2020_GPIO_0 = 3, /* IN: unassigned */ + AEL2020_GPIO_1 = 2, /* OUT: unassigned */ + AEL2020_GPIO_LSTAT = AEL2020_GPIO_1, /* wired to link status LED */ +}; + +enum { edc_none, edc_sr, edc_twinax }; + +/* PHY module I2C device address */ +enum { + MODULE_DEV_ADDR = 0xa0, + SFF_DEV_ADDR = 0xa2, +}; + +/* PHY transceiver type */ +enum { + phy_transtype_unknown = 0, + phy_transtype_sfp = 3, + phy_transtype_xfp = 6, +}; + +#define AEL2005_MODDET_IRQ 4 + +struct reg_val { + unsigned short mmd_addr; + unsigned short reg_addr; + unsigned short clear_bits; + unsigned short set_bits; +}; + +static int set_phy_regs(struct cphy *phy, const struct reg_val *rv) +{ + int err; + + for (err = 0; rv->mmd_addr && !err; rv++) { + if (rv->clear_bits == 0xffff) + err = t3_mdio_write(phy, rv->mmd_addr, rv->reg_addr, + rv->set_bits); + else + err = t3_mdio_change_bits(phy, rv->mmd_addr, + rv->reg_addr, rv->clear_bits, + rv->set_bits); + } + return err; +} + +static void ael100x_txon(struct cphy *phy) +{ + int tx_on_gpio = + phy->mdio.prtad == 0 ? F_GPIO7_OUT_VAL : F_GPIO2_OUT_VAL; + + msleep(100); + t3_set_reg_field(phy->adapter, A_T3DBG_GPIO_EN, 0, tx_on_gpio); + msleep(30); +} + +/* + * Read an 8-bit word from a device attached to the PHY's i2c bus. + */ +static int ael_i2c_rd(struct cphy *phy, int dev_addr, int word_addr) +{ + int i, err; + unsigned int stat, data; + + err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL_I2C_CTRL, + (dev_addr << 8) | (1 << 8) | word_addr); + if (err) + return err; + + for (i = 0; i < 200; i++) { + msleep(1); + err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL_I2C_STAT, &stat); + if (err) + return err; + if ((stat & 3) == 1) { + err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL_I2C_DATA, + &data); + if (err) + return err; + return data >> 8; + } + } + CH_WARN(phy->adapter, "PHY %u i2c read of dev.addr %#x.%#x timed out\n", + phy->mdio.prtad, dev_addr, word_addr); + return -ETIMEDOUT; +} + +static int ael1002_power_down(struct cphy *phy, int enable) +{ + int err; + + err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_TXDIS, !!enable); + if (!err) + err = mdio_set_flag(&phy->mdio, phy->mdio.prtad, + MDIO_MMD_PMAPMD, MDIO_CTRL1, + MDIO_CTRL1_LPOWER, enable); + return err; +} + +static int ael1002_reset(struct cphy *phy, int wait) +{ + int err; + + if ((err = ael1002_power_down(phy, 0)) || + (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL100X_TX_CONFIG1, 1)) || + (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL1002_PWR_DOWN_HI, 0)) || + (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL1002_PWR_DOWN_LO, 0)) || + (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL1002_XFI_EQL, 0x18)) || + (err = t3_mdio_change_bits(phy, MDIO_MMD_PMAPMD, AEL1002_LB_EN, + 0, 1 << 5))) + return err; + return 0; +} + +static int ael1002_intr_noop(struct cphy *phy) +{ + return 0; +} + +/* + * Get link status for a 10GBASE-R device. + */ +static int get_link_status_r(struct cphy *phy, int *link_ok, int *speed, + int *duplex, int *fc) +{ + if (link_ok) { + unsigned int stat0, stat1, stat2; + int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, + MDIO_PMA_RXDET, &stat0); + + if (!err) + err = t3_mdio_read(phy, MDIO_MMD_PCS, + MDIO_PCS_10GBRT_STAT1, &stat1); + if (!err) + err = t3_mdio_read(phy, MDIO_MMD_PHYXS, + MDIO_PHYXS_LNSTAT, &stat2); + if (err) + return err; + *link_ok = (stat0 & stat1 & (stat2 >> 12)) & 1; + } + if (speed) + *speed = SPEED_10000; + if (duplex) + *duplex = DUPLEX_FULL; + return 0; +} + +static struct cphy_ops ael1002_ops = { + .reset = ael1002_reset, + .intr_enable = ael1002_intr_noop, + .intr_disable = ael1002_intr_noop, + .intr_clear = ael1002_intr_noop, + .intr_handler = ael1002_intr_noop, + .get_link_status = get_link_status_r, + .power_down = ael1002_power_down, + .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS, +}; + +int t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter, + int phy_addr, const struct mdio_ops *mdio_ops) +{ + cphy_init(phy, adapter, phy_addr, &ael1002_ops, mdio_ops, + SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE, + "10GBASE-R"); + ael100x_txon(phy); + return 0; +} + +static int ael1006_reset(struct cphy *phy, int wait) +{ + return t3_phy_reset(phy, MDIO_MMD_PMAPMD, wait); +} + +static struct cphy_ops ael1006_ops = { + .reset = ael1006_reset, + .intr_enable = t3_phy_lasi_intr_enable, + .intr_disable = t3_phy_lasi_intr_disable, + .intr_clear = t3_phy_lasi_intr_clear, + .intr_handler = t3_phy_lasi_intr_handler, + .get_link_status = get_link_status_r, + .power_down = ael1002_power_down, + .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS, +}; + +int t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter, + int phy_addr, const struct mdio_ops *mdio_ops) +{ + cphy_init(phy, adapter, phy_addr, &ael1006_ops, mdio_ops, + SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE, + "10GBASE-SR"); + ael100x_txon(phy); + return 0; +} + +/* + * Decode our module type. + */ +static int ael2xxx_get_module_type(struct cphy *phy, int delay_ms) +{ + int v; + + if (delay_ms) + msleep(delay_ms); + + /* see SFF-8472 for below */ + v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 3); + if (v < 0) + return v; + + if (v == 0x10) + return phy_modtype_sr; + if (v == 0x20) + return phy_modtype_lr; + if (v == 0x40) + return phy_modtype_lrm; + + v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 6); + if (v < 0) + return v; + if (v != 4) + goto unknown; + + v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 10); + if (v < 0) + return v; + + if (v & 0x80) { + v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 0x12); + if (v < 0) + return v; + return v > 10 ? phy_modtype_twinax_long : phy_modtype_twinax; + } +unknown: + return phy_modtype_unknown; +} + +/* + * Code to support the Aeluros/NetLogic 2005 10Gb PHY. + */ +static int ael2005_setup_sr_edc(struct cphy *phy) +{ + static const struct reg_val regs[] = { + { MDIO_MMD_PMAPMD, 0xc003, 0xffff, 0x181 }, + { MDIO_MMD_PMAPMD, 0xc010, 0xffff, 0x448a }, + { MDIO_MMD_PMAPMD, 0xc04a, 0xffff, 0x5200 }, + { 0, 0, 0, 0 } + }; + + int i, err; + + err = set_phy_regs(phy, regs); + if (err) + return err; + + msleep(50); + + if (phy->priv != edc_sr) + err = t3_get_edc_fw(phy, EDC_OPT_AEL2005, + EDC_OPT_AEL2005_SIZE); + if (err) + return err; + + for (i = 0; i < EDC_OPT_AEL2005_SIZE / sizeof(u16) && !err; i += 2) + err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, + phy->phy_cache[i], + phy->phy_cache[i + 1]); + if (!err) + phy->priv = edc_sr; + return err; +} + +static int ael2005_setup_twinax_edc(struct cphy *phy, int modtype) +{ + static const struct reg_val regs[] = { + { MDIO_MMD_PMAPMD, 0xc04a, 0xffff, 0x5a00 }, + { 0, 0, 0, 0 } + }; + static const struct reg_val preemphasis[] = { + { MDIO_MMD_PMAPMD, 0xc014, 0xffff, 0xfe16 }, + { MDIO_MMD_PMAPMD, 0xc015, 0xffff, 0xa000 }, + { 0, 0, 0, 0 } + }; + int i, err; + + err = set_phy_regs(phy, regs); + if (!err && modtype == phy_modtype_twinax_long) + err = set_phy_regs(phy, preemphasis); + if (err) + return err; + + msleep(50); + + if (phy->priv != edc_twinax) + err = t3_get_edc_fw(phy, EDC_TWX_AEL2005, + EDC_TWX_AEL2005_SIZE); + if (err) + return err; + + for (i = 0; i < EDC_TWX_AEL2005_SIZE / sizeof(u16) && !err; i += 2) + err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, + phy->phy_cache[i], + phy->phy_cache[i + 1]); + if (!err) + phy->priv = edc_twinax; + return err; +} + +static int ael2005_get_module_type(struct cphy *phy, int delay_ms) +{ + int v; + unsigned int stat; + + v = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, &stat); + if (v) + return v; + + if (stat & (1 << 8)) /* module absent */ + return phy_modtype_none; + + return ael2xxx_get_module_type(phy, delay_ms); +} + +static int ael2005_intr_enable(struct cphy *phy) +{ + int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, 0x200); + return err ? err : t3_phy_lasi_intr_enable(phy); +} + +static int ael2005_intr_disable(struct cphy *phy) +{ + int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, 0x100); + return err ? err : t3_phy_lasi_intr_disable(phy); +} + +static int ael2005_intr_clear(struct cphy *phy) +{ + int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, 0xd00); + return err ? err : t3_phy_lasi_intr_clear(phy); +} + +static int ael2005_reset(struct cphy *phy, int wait) +{ + static const struct reg_val regs0[] = { + { MDIO_MMD_PMAPMD, 0xc001, 0, 1 << 5 }, + { MDIO_MMD_PMAPMD, 0xc017, 0, 1 << 5 }, + { MDIO_MMD_PMAPMD, 0xc013, 0xffff, 0xf341 }, + { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0x8000 }, + { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0x8100 }, + { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0x8000 }, + { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0 }, + { 0, 0, 0, 0 } + }; + static const struct reg_val regs1[] = { + { MDIO_MMD_PMAPMD, 0xca00, 0xffff, 0x0080 }, + { MDIO_MMD_PMAPMD, 0xca12, 0xffff, 0 }, + { 0, 0, 0, 0 } + }; + + int err; + unsigned int lasi_ctrl; + + err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, + &lasi_ctrl); + if (err) + return err; + + err = t3_phy_reset(phy, MDIO_MMD_PMAPMD, 0); + if (err) + return err; + + msleep(125); + phy->priv = edc_none; + err = set_phy_regs(phy, regs0); + if (err) + return err; + + msleep(50); + + err = ael2005_get_module_type(phy, 0); + if (err < 0) + return err; + phy->modtype = err; + + if (err == phy_modtype_twinax || err == phy_modtype_twinax_long) + err = ael2005_setup_twinax_edc(phy, err); + else + err = ael2005_setup_sr_edc(phy); + if (err) + return err; + + err = set_phy_regs(phy, regs1); + if (err) + return err; + + /* reset wipes out interrupts, reenable them if they were on */ + if (lasi_ctrl & 1) + err = ael2005_intr_enable(phy); + return err; +} + +static int ael2005_intr_handler(struct cphy *phy) +{ + unsigned int stat; + int ret, edc_needed, cause = 0; + + ret = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_STAT, &stat); + if (ret) + return ret; + + if (stat & AEL2005_MODDET_IRQ) { + ret = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, + 0xd00); + if (ret) + return ret; + + /* modules have max 300 ms init time after hot plug */ + ret = ael2005_get_module_type(phy, 300); + if (ret < 0) + return ret; + + phy->modtype = ret; + if (ret == phy_modtype_none) + edc_needed = phy->priv; /* on unplug retain EDC */ + else if (ret == phy_modtype_twinax || + ret == phy_modtype_twinax_long) + edc_needed = edc_twinax; + else + edc_needed = edc_sr; + + if (edc_needed != phy->priv) { + ret = ael2005_reset(phy, 0); + return ret ? ret : cphy_cause_module_change; + } + cause = cphy_cause_module_change; + } + + ret = t3_phy_lasi_intr_handler(phy); + if (ret < 0) + return ret; + + ret |= cause; + return ret ? ret : cphy_cause_link_change; +} + +static struct cphy_ops ael2005_ops = { + .reset = ael2005_reset, + .intr_enable = ael2005_intr_enable, + .intr_disable = ael2005_intr_disable, + .intr_clear = ael2005_intr_clear, + .intr_handler = ael2005_intr_handler, + .get_link_status = get_link_status_r, + .power_down = ael1002_power_down, + .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS, +}; + +int t3_ael2005_phy_prep(struct cphy *phy, struct adapter *adapter, + int phy_addr, const struct mdio_ops *mdio_ops) +{ + cphy_init(phy, adapter, phy_addr, &ael2005_ops, mdio_ops, + SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE | + SUPPORTED_IRQ, "10GBASE-R"); + msleep(125); + return t3_mdio_change_bits(phy, MDIO_MMD_PMAPMD, AEL_OPT_SETTINGS, 0, + 1 << 5); +} + +/* + * Setup EDC and other parameters for operation with an optical module. + */ +static int ael2020_setup_sr_edc(struct cphy *phy) +{ + static const struct reg_val regs[] = { + /* set CDR offset to 10 */ + { MDIO_MMD_PMAPMD, 0xcc01, 0xffff, 0x488a }, + + /* adjust 10G RX bias current */ + { MDIO_MMD_PMAPMD, 0xcb1b, 0xffff, 0x0200 }, + { MDIO_MMD_PMAPMD, 0xcb1c, 0xffff, 0x00f0 }, + { MDIO_MMD_PMAPMD, 0xcc06, 0xffff, 0x00e0 }, + + /* end */ + { 0, 0, 0, 0 } + }; + int err; + + err = set_phy_regs(phy, regs); + msleep(50); + if (err) + return err; + + phy->priv = edc_sr; + return 0; +} + +/* + * Setup EDC and other parameters for operation with an TWINAX module. + */ +static int ael2020_setup_twinax_edc(struct cphy *phy, int modtype) +{ + /* set uC to 40MHz */ + static const struct reg_val uCclock40MHz[] = { + { MDIO_MMD_PMAPMD, 0xff28, 0xffff, 0x4001 }, + { MDIO_MMD_PMAPMD, 0xff2a, 0xffff, 0x0002 }, + { 0, 0, 0, 0 } + }; + + /* activate uC clock */ + static const struct reg_val uCclockActivate[] = { + { MDIO_MMD_PMAPMD, 0xd000, 0xffff, 0x5200 }, + { 0, 0, 0, 0 } + }; + + /* set PC to start of SRAM and activate uC */ + static const struct reg_val uCactivate[] = { + { MDIO_MMD_PMAPMD, 0xd080, 0xffff, 0x0100 }, + { MDIO_MMD_PMAPMD, 0xd092, 0xffff, 0x0000 }, + { 0, 0, 0, 0 } + }; + int i, err; + + /* set uC clock and activate it */ + err = set_phy_regs(phy, uCclock40MHz); + msleep(500); + if (err) + return err; + err = set_phy_regs(phy, uCclockActivate); + msleep(500); + if (err) + return err; + + if (phy->priv != edc_twinax) + err = t3_get_edc_fw(phy, EDC_TWX_AEL2020, + EDC_TWX_AEL2020_SIZE); + if (err) + return err; + + for (i = 0; i < EDC_TWX_AEL2020_SIZE / sizeof(u16) && !err; i += 2) + err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, + phy->phy_cache[i], + phy->phy_cache[i + 1]); + /* activate uC */ + err = set_phy_regs(phy, uCactivate); + if (!err) + phy->priv = edc_twinax; + return err; +} + +/* + * Return Module Type. + */ +static int ael2020_get_module_type(struct cphy *phy, int delay_ms) +{ + int v; + unsigned int stat; + + v = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_STAT, &stat); + if (v) + return v; + + if (stat & (0x1 << (AEL2020_GPIO_MODDET*4))) { + /* module absent */ + return phy_modtype_none; + } + + return ael2xxx_get_module_type(phy, delay_ms); +} + +/* + * Enable PHY interrupts. We enable "Module Detection" interrupts (on any + * state transition) and then generic Link Alarm Status Interrupt (LASI). + */ +static int ael2020_intr_enable(struct cphy *phy) +{ + static const struct reg_val regs[] = { + /* output Module's Loss Of Signal (LOS) to LED */ + { MDIO_MMD_PMAPMD, AEL2020_GPIO_CFG+AEL2020_GPIO_LSTAT, + 0xffff, 0x4 }, + { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL, + 0xffff, 0x8 << (AEL2020_GPIO_LSTAT*4) }, + + /* enable module detect status change interrupts */ + { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL, + 0xffff, 0x2 << (AEL2020_GPIO_MODDET*4) }, + + /* end */ + { 0, 0, 0, 0 } + }; + int err, link_ok = 0; + + /* set up "link status" LED and enable module change interrupts */ + err = set_phy_regs(phy, regs); + if (err) + return err; + + err = get_link_status_r(phy, &link_ok, NULL, NULL, NULL); + if (err) + return err; + if (link_ok) + t3_link_changed(phy->adapter, + phy2portid(phy)); + + err = t3_phy_lasi_intr_enable(phy); + if (err) + return err; + + return 0; +} + +/* + * Disable PHY interrupts. The mirror of the above ... + */ +static int ael2020_intr_disable(struct cphy *phy) +{ + static const struct reg_val regs[] = { + /* reset "link status" LED to "off" */ + { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL, + 0xffff, 0xb << (AEL2020_GPIO_LSTAT*4) }, + + /* disable module detect status change interrupts */ + { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL, + 0xffff, 0x1 << (AEL2020_GPIO_MODDET*4) }, + + /* end */ + { 0, 0, 0, 0 } + }; + int err; + + /* turn off "link status" LED and disable module change interrupts */ + err = set_phy_regs(phy, regs); + if (err) + return err; + + return t3_phy_lasi_intr_disable(phy); +} + +/* + * Clear PHY interrupt state. + */ +static int ael2020_intr_clear(struct cphy *phy) +{ + /* + * The GPIO Interrupt register on the AEL2020 is a "Latching High" + * (LH) register which is cleared to the current state when it's read. + * Thus, we simply read the register and discard the result. + */ + unsigned int stat; + int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_INTR, &stat); + return err ? err : t3_phy_lasi_intr_clear(phy); +} + +static const struct reg_val ael2020_reset_regs[] = { + /* Erratum #2: CDRLOL asserted, causing PMA link down status */ + { MDIO_MMD_PMAPMD, 0xc003, 0xffff, 0x3101 }, + + /* force XAUI to send LF when RX_LOS is asserted */ + { MDIO_MMD_PMAPMD, 0xcd40, 0xffff, 0x0001 }, + + /* allow writes to transceiver module EEPROM on i2c bus */ + { MDIO_MMD_PMAPMD, 0xff02, 0xffff, 0x0023 }, + { MDIO_MMD_PMAPMD, 0xff03, 0xffff, 0x0000 }, + { MDIO_MMD_PMAPMD, 0xff04, 0xffff, 0x0000 }, + + /* end */ + { 0, 0, 0, 0 } +}; +/* + * Reset the PHY and put it into a canonical operating state. + */ +static int ael2020_reset(struct cphy *phy, int wait) +{ + int err; + unsigned int lasi_ctrl; + + /* grab current interrupt state */ + err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, + &lasi_ctrl); + if (err) + return err; + + err = t3_phy_reset(phy, MDIO_MMD_PMAPMD, 125); + if (err) + return err; + msleep(100); + + /* basic initialization for all module types */ + phy->priv = edc_none; + err = set_phy_regs(phy, ael2020_reset_regs); + if (err) + return err; + + /* determine module type and perform appropriate initialization */ + err = ael2020_get_module_type(phy, 0); + if (err < 0) + return err; + phy->modtype = (u8)err; + if (err == phy_modtype_twinax || err == phy_modtype_twinax_long) + err = ael2020_setup_twinax_edc(phy, err); + else + err = ael2020_setup_sr_edc(phy); + if (err) + return err; + + /* reset wipes out interrupts, reenable them if they were on */ + if (lasi_ctrl & 1) + err = ael2005_intr_enable(phy); + return err; +} + +/* + * Handle a PHY interrupt. + */ +static int ael2020_intr_handler(struct cphy *phy) +{ + unsigned int stat; + int ret, edc_needed, cause = 0; + + ret = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_INTR, &stat); + if (ret) + return ret; + + if (stat & (0x1 << AEL2020_GPIO_MODDET)) { + /* modules have max 300 ms init time after hot plug */ + ret = ael2020_get_module_type(phy, 300); + if (ret < 0) + return ret; + + phy->modtype = (u8)ret; + if (ret == phy_modtype_none) + edc_needed = phy->priv; /* on unplug retain EDC */ + else if (ret == phy_modtype_twinax || + ret == phy_modtype_twinax_long) + edc_needed = edc_twinax; + else + edc_needed = edc_sr; + + if (edc_needed != phy->priv) { + ret = ael2020_reset(phy, 0); + return ret ? ret : cphy_cause_module_change; + } + cause = cphy_cause_module_change; + } + + ret = t3_phy_lasi_intr_handler(phy); + if (ret < 0) + return ret; + + ret |= cause; + return ret ? ret : cphy_cause_link_change; +} + +static struct cphy_ops ael2020_ops = { + .reset = ael2020_reset, + .intr_enable = ael2020_intr_enable, + .intr_disable = ael2020_intr_disable, + .intr_clear = ael2020_intr_clear, + .intr_handler = ael2020_intr_handler, + .get_link_status = get_link_status_r, + .power_down = ael1002_power_down, + .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS, +}; + +int t3_ael2020_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr, + const struct mdio_ops *mdio_ops) +{ + int err; + + cphy_init(phy, adapter, phy_addr, &ael2020_ops, mdio_ops, + SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE | + SUPPORTED_IRQ, "10GBASE-R"); + msleep(125); + + err = set_phy_regs(phy, ael2020_reset_regs); + if (err) + return err; + return 0; +} + +/* + * Get link status for a 10GBASE-X device. + */ +static int get_link_status_x(struct cphy *phy, int *link_ok, int *speed, + int *duplex, int *fc) +{ + if (link_ok) { + unsigned int stat0, stat1, stat2; + int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, + MDIO_PMA_RXDET, &stat0); + + if (!err) + err = t3_mdio_read(phy, MDIO_MMD_PCS, + MDIO_PCS_10GBX_STAT1, &stat1); + if (!err) + err = t3_mdio_read(phy, MDIO_MMD_PHYXS, + MDIO_PHYXS_LNSTAT, &stat2); + if (err) + return err; + *link_ok = (stat0 & (stat1 >> 12) & (stat2 >> 12)) & 1; + } + if (speed) + *speed = SPEED_10000; + if (duplex) + *duplex = DUPLEX_FULL; + return 0; +} + +static struct cphy_ops qt2045_ops = { + .reset = ael1006_reset, + .intr_enable = t3_phy_lasi_intr_enable, + .intr_disable = t3_phy_lasi_intr_disable, + .intr_clear = t3_phy_lasi_intr_clear, + .intr_handler = t3_phy_lasi_intr_handler, + .get_link_status = get_link_status_x, + .power_down = ael1002_power_down, + .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS, +}; + +int t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter, + int phy_addr, const struct mdio_ops *mdio_ops) +{ + unsigned int stat; + + cphy_init(phy, adapter, phy_addr, &qt2045_ops, mdio_ops, + SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP, + "10GBASE-CX4"); + + /* + * Some cards where the PHY is supposed to be at address 0 actually + * have it at 1. + */ + if (!phy_addr && + !t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &stat) && + stat == 0xffff) + phy->mdio.prtad = 1; + return 0; +} + +static int xaui_direct_reset(struct cphy *phy, int wait) +{ + return 0; +} + +static int xaui_direct_get_link_status(struct cphy *phy, int *link_ok, + int *speed, int *duplex, int *fc) +{ + if (link_ok) { + unsigned int status; + int prtad = phy->mdio.prtad; + + status = t3_read_reg(phy->adapter, + XGM_REG(A_XGM_SERDES_STAT0, prtad)) | + t3_read_reg(phy->adapter, + XGM_REG(A_XGM_SERDES_STAT1, prtad)) | + t3_read_reg(phy->adapter, + XGM_REG(A_XGM_SERDES_STAT2, prtad)) | + t3_read_reg(phy->adapter, + XGM_REG(A_XGM_SERDES_STAT3, prtad)); + *link_ok = !(status & F_LOWSIG0); + } + if (speed) + *speed = SPEED_10000; + if (duplex) + *duplex = DUPLEX_FULL; + return 0; +} + +static int xaui_direct_power_down(struct cphy *phy, int enable) +{ + return 0; +} + +static struct cphy_ops xaui_direct_ops = { + .reset = xaui_direct_reset, + .intr_enable = ael1002_intr_noop, + .intr_disable = ael1002_intr_noop, + .intr_clear = ael1002_intr_noop, + .intr_handler = ael1002_intr_noop, + .get_link_status = xaui_direct_get_link_status, + .power_down = xaui_direct_power_down, +}; + +int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter, + int phy_addr, const struct mdio_ops *mdio_ops) +{ + cphy_init(phy, adapter, phy_addr, &xaui_direct_ops, mdio_ops, + SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP, + "10GBASE-CX4"); + return 0; +} diff --git a/drivers/net/ethernet/chelsio/cxgb3/aq100x.c b/drivers/net/ethernet/chelsio/cxgb3/aq100x.c new file mode 100644 index 0000000..341b7ef --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/aq100x.c @@ -0,0 +1,354 @@ +/* + * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "common.h" +#include "regs.h" + +enum { + /* MDIO_DEV_PMA_PMD registers */ + AQ_LINK_STAT = 0xe800, + AQ_IMASK_PMA = 0xf000, + + /* MDIO_DEV_XGXS registers */ + AQ_XAUI_RX_CFG = 0xc400, + AQ_XAUI_TX_CFG = 0xe400, + + /* MDIO_DEV_ANEG registers */ + AQ_1G_CTRL = 0xc400, + AQ_ANEG_STAT = 0xc800, + + /* MDIO_DEV_VEND1 registers */ + AQ_FW_VERSION = 0x0020, + AQ_IFLAG_GLOBAL = 0xfc00, + AQ_IMASK_GLOBAL = 0xff00, +}; + +enum { + IMASK_PMA = 1 << 2, + IMASK_GLOBAL = 1 << 15, + ADV_1G_FULL = 1 << 15, + ADV_1G_HALF = 1 << 14, + ADV_10G_FULL = 1 << 12, + AQ_RESET = (1 << 14) | (1 << 15), + AQ_LOWPOWER = 1 << 12, +}; + +static int aq100x_reset(struct cphy *phy, int wait) +{ + /* + * Ignore the caller specified wait time; always wait for the reset to + * complete. Can take up to 3s. + */ + int err = t3_phy_reset(phy, MDIO_MMD_VEND1, 3000); + + if (err) + CH_WARN(phy->adapter, "PHY%d: reset failed (0x%x).\n", + phy->mdio.prtad, err); + + return err; +} + +static int aq100x_intr_enable(struct cphy *phy) +{ + int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AQ_IMASK_PMA, IMASK_PMA); + if (err) + return err; + + err = t3_mdio_write(phy, MDIO_MMD_VEND1, AQ_IMASK_GLOBAL, IMASK_GLOBAL); + return err; +} + +static int aq100x_intr_disable(struct cphy *phy) +{ + return t3_mdio_write(phy, MDIO_MMD_VEND1, AQ_IMASK_GLOBAL, 0); +} + +static int aq100x_intr_clear(struct cphy *phy) +{ + unsigned int v; + + t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_IFLAG_GLOBAL, &v); + t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &v); + + return 0; +} + +static int aq100x_intr_handler(struct cphy *phy) +{ + int err; + unsigned int cause, v; + + err = t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_IFLAG_GLOBAL, &cause); + if (err) + return err; + + /* Read (and reset) the latching version of the status */ + t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &v); + + return cphy_cause_link_change; +} + +static int aq100x_power_down(struct cphy *phy, int off) +{ + return mdio_set_flag(&phy->mdio, phy->mdio.prtad, + MDIO_MMD_PMAPMD, MDIO_CTRL1, + MDIO_CTRL1_LPOWER, off); +} + +static int aq100x_autoneg_enable(struct cphy *phy) +{ + int err; + + err = aq100x_power_down(phy, 0); + if (!err) + err = mdio_set_flag(&phy->mdio, phy->mdio.prtad, + MDIO_MMD_AN, MDIO_CTRL1, + BMCR_ANENABLE | BMCR_ANRESTART, 1); + + return err; +} + +static int aq100x_autoneg_restart(struct cphy *phy) +{ + int err; + + err = aq100x_power_down(phy, 0); + if (!err) + err = mdio_set_flag(&phy->mdio, phy->mdio.prtad, + MDIO_MMD_AN, MDIO_CTRL1, + BMCR_ANENABLE | BMCR_ANRESTART, 1); + + return err; +} + +static int aq100x_advertise(struct cphy *phy, unsigned int advertise_map) +{ + unsigned int adv; + int err; + + /* 10G advertisement */ + adv = 0; + if (advertise_map & ADVERTISED_10000baseT_Full) + adv |= ADV_10G_FULL; + err = t3_mdio_change_bits(phy, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL, + ADV_10G_FULL, adv); + if (err) + return err; + + /* 1G advertisement */ + adv = 0; + if (advertise_map & ADVERTISED_1000baseT_Full) + adv |= ADV_1G_FULL; + if (advertise_map & ADVERTISED_1000baseT_Half) + adv |= ADV_1G_HALF; + err = t3_mdio_change_bits(phy, MDIO_MMD_AN, AQ_1G_CTRL, + ADV_1G_FULL | ADV_1G_HALF, adv); + if (err) + return err; + + /* 100M, pause advertisement */ + adv = 0; + if (advertise_map & ADVERTISED_100baseT_Half) + adv |= ADVERTISE_100HALF; + if (advertise_map & ADVERTISED_100baseT_Full) + adv |= ADVERTISE_100FULL; + if (advertise_map & ADVERTISED_Pause) + adv |= ADVERTISE_PAUSE_CAP; + if (advertise_map & ADVERTISED_Asym_Pause) + adv |= ADVERTISE_PAUSE_ASYM; + err = t3_mdio_change_bits(phy, MDIO_MMD_AN, MDIO_AN_ADVERTISE, + 0xfe0, adv); + + return err; +} + +static int aq100x_set_loopback(struct cphy *phy, int mmd, int dir, int enable) +{ + return mdio_set_flag(&phy->mdio, phy->mdio.prtad, + MDIO_MMD_PMAPMD, MDIO_CTRL1, + BMCR_LOOPBACK, enable); +} + +static int aq100x_set_speed_duplex(struct cphy *phy, int speed, int duplex) +{ + /* no can do */ + return -1; +} + +static int aq100x_get_link_status(struct cphy *phy, int *link_ok, + int *speed, int *duplex, int *fc) +{ + int err; + unsigned int v; + + if (link_ok) { + err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AQ_LINK_STAT, &v); + if (err) + return err; + + *link_ok = v & 1; + if (!*link_ok) + return 0; + } + + err = t3_mdio_read(phy, MDIO_MMD_AN, AQ_ANEG_STAT, &v); + if (err) + return err; + + if (speed) { + switch (v & 0x6) { + case 0x6: + *speed = SPEED_10000; + break; + case 0x4: + *speed = SPEED_1000; + break; + case 0x2: + *speed = SPEED_100; + break; + case 0x0: + *speed = SPEED_10; + break; + } + } + + if (duplex) + *duplex = v & 1 ? DUPLEX_FULL : DUPLEX_HALF; + + return 0; +} + +static struct cphy_ops aq100x_ops = { + .reset = aq100x_reset, + .intr_enable = aq100x_intr_enable, + .intr_disable = aq100x_intr_disable, + .intr_clear = aq100x_intr_clear, + .intr_handler = aq100x_intr_handler, + .autoneg_enable = aq100x_autoneg_enable, + .autoneg_restart = aq100x_autoneg_restart, + .advertise = aq100x_advertise, + .set_loopback = aq100x_set_loopback, + .set_speed_duplex = aq100x_set_speed_duplex, + .get_link_status = aq100x_get_link_status, + .power_down = aq100x_power_down, + .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS, +}; + +int t3_aq100x_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr, + const struct mdio_ops *mdio_ops) +{ + unsigned int v, v2, gpio, wait; + int err; + + cphy_init(phy, adapter, phy_addr, &aq100x_ops, mdio_ops, + SUPPORTED_1000baseT_Full | SUPPORTED_10000baseT_Full | + SUPPORTED_TP | SUPPORTED_Autoneg | SUPPORTED_AUI, + "1000/10GBASE-T"); + + /* + * The PHY has been out of reset ever since the system powered up. So + * we do a hard reset over here. + */ + gpio = phy_addr ? F_GPIO10_OUT_VAL : F_GPIO6_OUT_VAL; + t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, gpio, 0); + msleep(1); + t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, gpio, gpio); + + /* + * Give it enough time to load the firmware and get ready for mdio. + */ + msleep(1000); + wait = 500; /* in 10ms increments */ + do { + err = t3_mdio_read(phy, MDIO_MMD_VEND1, MDIO_CTRL1, &v); + if (err || v == 0xffff) { + + /* Allow prep_adapter to succeed when ffff is read */ + + CH_WARN(adapter, "PHY%d: reset failed (0x%x, 0x%x).\n", + phy_addr, err, v); + goto done; + } + + v &= AQ_RESET; + if (v) + msleep(10); + } while (v && --wait); + if (v) { + CH_WARN(adapter, "PHY%d: reset timed out (0x%x).\n", + phy_addr, v); + + goto done; /* let prep_adapter succeed */ + } + + /* Datasheet says 3s max but this has been observed */ + wait = (500 - wait) * 10 + 1000; + if (wait > 3000) + CH_WARN(adapter, "PHY%d: reset took %ums\n", phy_addr, wait); + + /* Firmware version check. */ + t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_FW_VERSION, &v); + if (v != 101) + CH_WARN(adapter, "PHY%d: unsupported firmware %d\n", + phy_addr, v); + + /* + * The PHY should start in really-low-power mode. Prepare it for normal + * operations. + */ + err = t3_mdio_read(phy, MDIO_MMD_VEND1, MDIO_CTRL1, &v); + if (err) + return err; + if (v & AQ_LOWPOWER) { + err = t3_mdio_change_bits(phy, MDIO_MMD_VEND1, MDIO_CTRL1, + AQ_LOWPOWER, 0); + if (err) + return err; + msleep(10); + } else + CH_WARN(adapter, "PHY%d does not start in low power mode.\n", + phy_addr); + + /* + * Verify XAUI settings, but let prep succeed no matter what. + */ + v = v2 = 0; + t3_mdio_read(phy, MDIO_MMD_PHYXS, AQ_XAUI_RX_CFG, &v); + t3_mdio_read(phy, MDIO_MMD_PHYXS, AQ_XAUI_TX_CFG, &v2); + if (v != 0x1b || v2 != 0x1b) + CH_WARN(adapter, + "PHY%d: incorrect XAUI settings (0x%x, 0x%x).\n", + phy_addr, v, v2); + +done: + return err; +} diff --git a/drivers/net/ethernet/chelsio/cxgb3/common.h b/drivers/net/ethernet/chelsio/cxgb3/common.h new file mode 100644 index 0000000..df01b63 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/common.h @@ -0,0 +1,775 @@ +/* + * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __CHELSIO_COMMON_H +#define __CHELSIO_COMMON_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include "version.h" + +#define CH_ERR(adap, fmt, ...) dev_err(&adap->pdev->dev, fmt, ## __VA_ARGS__) +#define CH_WARN(adap, fmt, ...) dev_warn(&adap->pdev->dev, fmt, ## __VA_ARGS__) +#define CH_ALERT(adap, fmt, ...) \ + dev_printk(KERN_ALERT, &adap->pdev->dev, fmt, ## __VA_ARGS__) + +/* + * More powerful macro that selectively prints messages based on msg_enable. + * For info and debugging messages. + */ +#define CH_MSG(adapter, level, category, fmt, ...) do { \ + if ((adapter)->msg_enable & NETIF_MSG_##category) \ + dev_printk(KERN_##level, &adapter->pdev->dev, fmt, \ + ## __VA_ARGS__); \ +} while (0) + +#ifdef DEBUG +# define CH_DBG(adapter, category, fmt, ...) \ + CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__) +#else +# define CH_DBG(adapter, category, fmt, ...) +#endif + +/* Additional NETIF_MSG_* categories */ +#define NETIF_MSG_MMIO 0x8000000 + +enum { + MAX_NPORTS = 2, /* max # of ports */ + MAX_FRAME_SIZE = 10240, /* max MAC frame size, including header + FCS */ + EEPROMSIZE = 8192, /* Serial EEPROM size */ + SERNUM_LEN = 16, /* Serial # length */ + RSS_TABLE_SIZE = 64, /* size of RSS lookup and mapping tables */ + TCB_SIZE = 128, /* TCB size */ + NMTUS = 16, /* size of MTU table */ + NCCTRL_WIN = 32, /* # of congestion control windows */ + PROTO_SRAM_LINES = 128, /* size of TP sram */ +}; + +#define MAX_RX_COALESCING_LEN 12288U + +enum { + PAUSE_RX = 1 << 0, + PAUSE_TX = 1 << 1, + PAUSE_AUTONEG = 1 << 2 +}; + +enum { + SUPPORTED_IRQ = 1 << 24 +}; + +enum { /* adapter interrupt-maintained statistics */ + STAT_ULP_CH0_PBL_OOB, + STAT_ULP_CH1_PBL_OOB, + STAT_PCI_CORR_ECC, + + IRQ_NUM_STATS /* keep last */ +}; + +#define TP_VERSION_MAJOR 1 +#define TP_VERSION_MINOR 1 +#define TP_VERSION_MICRO 0 + +#define S_TP_VERSION_MAJOR 16 +#define M_TP_VERSION_MAJOR 0xFF +#define V_TP_VERSION_MAJOR(x) ((x) << S_TP_VERSION_MAJOR) +#define G_TP_VERSION_MAJOR(x) \ + (((x) >> S_TP_VERSION_MAJOR) & M_TP_VERSION_MAJOR) + +#define S_TP_VERSION_MINOR 8 +#define M_TP_VERSION_MINOR 0xFF +#define V_TP_VERSION_MINOR(x) ((x) << S_TP_VERSION_MINOR) +#define G_TP_VERSION_MINOR(x) \ + (((x) >> S_TP_VERSION_MINOR) & M_TP_VERSION_MINOR) + +#define S_TP_VERSION_MICRO 0 +#define M_TP_VERSION_MICRO 0xFF +#define V_TP_VERSION_MICRO(x) ((x) << S_TP_VERSION_MICRO) +#define G_TP_VERSION_MICRO(x) \ + (((x) >> S_TP_VERSION_MICRO) & M_TP_VERSION_MICRO) + +enum { + SGE_QSETS = 8, /* # of SGE Tx/Rx/RspQ sets */ + SGE_RXQ_PER_SET = 2, /* # of Rx queues per set */ + SGE_TXQ_PER_SET = 3 /* # of Tx queues per set */ +}; + +enum sge_context_type { /* SGE egress context types */ + SGE_CNTXT_RDMA = 0, + SGE_CNTXT_ETH = 2, + SGE_CNTXT_OFLD = 4, + SGE_CNTXT_CTRL = 5 +}; + +enum { + AN_PKT_SIZE = 32, /* async notification packet size */ + IMMED_PKT_SIZE = 48 /* packet size for immediate data */ +}; + +struct sg_ent { /* SGE scatter/gather entry */ + __be32 len[2]; + __be64 addr[2]; +}; + +#ifndef SGE_NUM_GENBITS +/* Must be 1 or 2 */ +# define SGE_NUM_GENBITS 2 +#endif + +#define TX_DESC_FLITS 16U +#define WR_FLITS (TX_DESC_FLITS + 1 - SGE_NUM_GENBITS) + +struct cphy; +struct adapter; + +struct mdio_ops { + int (*read)(struct net_device *dev, int phy_addr, int mmd_addr, + u16 reg_addr); + int (*write)(struct net_device *dev, int phy_addr, int mmd_addr, + u16 reg_addr, u16 val); + unsigned mode_support; +}; + +struct adapter_info { + unsigned char nports0; /* # of ports on channel 0 */ + unsigned char nports1; /* # of ports on channel 1 */ + unsigned char phy_base_addr; /* MDIO PHY base address */ + unsigned int gpio_out; /* GPIO output settings */ + unsigned char gpio_intr[MAX_NPORTS]; /* GPIO PHY IRQ pins */ + unsigned long caps; /* adapter capabilities */ + const struct mdio_ops *mdio_ops; /* MDIO operations */ + const char *desc; /* product description */ +}; + +struct mc5_stats { + unsigned long parity_err; + unsigned long active_rgn_full; + unsigned long nfa_srch_err; + unsigned long unknown_cmd; + unsigned long reqq_parity_err; + unsigned long dispq_parity_err; + unsigned long del_act_empty; +}; + +struct mc7_stats { + unsigned long corr_err; + unsigned long uncorr_err; + unsigned long parity_err; + unsigned long addr_err; +}; + +struct mac_stats { + u64 tx_octets; /* total # of octets in good frames */ + u64 tx_octets_bad; /* total # of octets in error frames */ + u64 tx_frames; /* all good frames */ + u64 tx_mcast_frames; /* good multicast frames */ + u64 tx_bcast_frames; /* good broadcast frames */ + u64 tx_pause; /* # of transmitted pause frames */ + u64 tx_deferred; /* frames with deferred transmissions */ + u64 tx_late_collisions; /* # of late collisions */ + u64 tx_total_collisions; /* # of total collisions */ + u64 tx_excess_collisions; /* frame errors from excessive collissions */ + u64 tx_underrun; /* # of Tx FIFO underruns */ + u64 tx_len_errs; /* # of Tx length errors */ + u64 tx_mac_internal_errs; /* # of internal MAC errors on Tx */ + u64 tx_excess_deferral; /* # of frames with excessive deferral */ + u64 tx_fcs_errs; /* # of frames with bad FCS */ + + u64 tx_frames_64; /* # of Tx frames in a particular range */ + u64 tx_frames_65_127; + u64 tx_frames_128_255; + u64 tx_frames_256_511; + u64 tx_frames_512_1023; + u64 tx_frames_1024_1518; + u64 tx_frames_1519_max; + + u64 rx_octets; /* total # of octets in good frames */ + u64 rx_octets_bad; /* total # of octets in error frames */ + u64 rx_frames; /* all good frames */ + u64 rx_mcast_frames; /* good multicast frames */ + u64 rx_bcast_frames; /* good broadcast frames */ + u64 rx_pause; /* # of received pause frames */ + u64 rx_fcs_errs; /* # of received frames with bad FCS */ + u64 rx_align_errs; /* alignment errors */ + u64 rx_symbol_errs; /* symbol errors */ + u64 rx_data_errs; /* data errors */ + u64 rx_sequence_errs; /* sequence errors */ + u64 rx_runt; /* # of runt frames */ + u64 rx_jabber; /* # of jabber frames */ + u64 rx_short; /* # of short frames */ + u64 rx_too_long; /* # of oversized frames */ + u64 rx_mac_internal_errs; /* # of internal MAC errors on Rx */ + + u64 rx_frames_64; /* # of Rx frames in a particular range */ + u64 rx_frames_65_127; + u64 rx_frames_128_255; + u64 rx_frames_256_511; + u64 rx_frames_512_1023; + u64 rx_frames_1024_1518; + u64 rx_frames_1519_max; + + u64 rx_cong_drops; /* # of Rx drops due to SGE congestion */ + + unsigned long tx_fifo_parity_err; + unsigned long rx_fifo_parity_err; + unsigned long tx_fifo_urun; + unsigned long rx_fifo_ovfl; + unsigned long serdes_signal_loss; + unsigned long xaui_pcs_ctc_err; + unsigned long xaui_pcs_align_change; + + unsigned long num_toggled; /* # times toggled TxEn due to stuck TX */ + unsigned long num_resets; /* # times reset due to stuck TX */ + + unsigned long link_faults; /* # detected link faults */ +}; + +struct tp_mib_stats { + u32 ipInReceive_hi; + u32 ipInReceive_lo; + u32 ipInHdrErrors_hi; + u32 ipInHdrErrors_lo; + u32 ipInAddrErrors_hi; + u32 ipInAddrErrors_lo; + u32 ipInUnknownProtos_hi; + u32 ipInUnknownProtos_lo; + u32 ipInDiscards_hi; + u32 ipInDiscards_lo; + u32 ipInDelivers_hi; + u32 ipInDelivers_lo; + u32 ipOutRequests_hi; + u32 ipOutRequests_lo; + u32 ipOutDiscards_hi; + u32 ipOutDiscards_lo; + u32 ipOutNoRoutes_hi; + u32 ipOutNoRoutes_lo; + u32 ipReasmTimeout; + u32 ipReasmReqds; + u32 ipReasmOKs; + u32 ipReasmFails; + + u32 reserved[8]; + + u32 tcpActiveOpens; + u32 tcpPassiveOpens; + u32 tcpAttemptFails; + u32 tcpEstabResets; + u32 tcpOutRsts; + u32 tcpCurrEstab; + u32 tcpInSegs_hi; + u32 tcpInSegs_lo; + u32 tcpOutSegs_hi; + u32 tcpOutSegs_lo; + u32 tcpRetransSeg_hi; + u32 tcpRetransSeg_lo; + u32 tcpInErrs_hi; + u32 tcpInErrs_lo; + u32 tcpRtoMin; + u32 tcpRtoMax; +}; + +struct tp_params { + unsigned int nchan; /* # of channels */ + unsigned int pmrx_size; /* total PMRX capacity */ + unsigned int pmtx_size; /* total PMTX capacity */ + unsigned int cm_size; /* total CM capacity */ + unsigned int chan_rx_size; /* per channel Rx size */ + unsigned int chan_tx_size; /* per channel Tx size */ + unsigned int rx_pg_size; /* Rx page size */ + unsigned int tx_pg_size; /* Tx page size */ + unsigned int rx_num_pgs; /* # of Rx pages */ + unsigned int tx_num_pgs; /* # of Tx pages */ + unsigned int ntimer_qs; /* # of timer queues */ +}; + +struct qset_params { /* SGE queue set parameters */ + unsigned int polling; /* polling/interrupt service for rspq */ + unsigned int coalesce_usecs; /* irq coalescing timer */ + unsigned int rspq_size; /* # of entries in response queue */ + unsigned int fl_size; /* # of entries in regular free list */ + unsigned int jumbo_size; /* # of entries in jumbo free list */ + unsigned int txq_size[SGE_TXQ_PER_SET]; /* Tx queue sizes */ + unsigned int cong_thres; /* FL congestion threshold */ + unsigned int vector; /* Interrupt (line or vector) number */ +}; + +struct sge_params { + unsigned int max_pkt_size; /* max offload pkt size */ + struct qset_params qset[SGE_QSETS]; +}; + +struct mc5_params { + unsigned int mode; /* selects MC5 width */ + unsigned int nservers; /* size of server region */ + unsigned int nfilters; /* size of filter region */ + unsigned int nroutes; /* size of routing region */ +}; + +/* Default MC5 region sizes */ +enum { + DEFAULT_NSERVERS = 512, + DEFAULT_NFILTERS = 128 +}; + +/* MC5 modes, these must be non-0 */ +enum { + MC5_MODE_144_BIT = 1, + MC5_MODE_72_BIT = 2 +}; + +/* MC5 min active region size */ +enum { MC5_MIN_TIDS = 16 }; + +struct vpd_params { + unsigned int cclk; + unsigned int mclk; + unsigned int uclk; + unsigned int mdc; + unsigned int mem_timing; + u8 sn[SERNUM_LEN + 1]; + u8 eth_base[6]; + u8 port_type[MAX_NPORTS]; + unsigned short xauicfg[2]; +}; + +struct pci_params { + unsigned int vpd_cap_addr; + unsigned short speed; + unsigned char width; + unsigned char variant; +}; + +enum { + PCI_VARIANT_PCI, + PCI_VARIANT_PCIX_MODE1_PARITY, + PCI_VARIANT_PCIX_MODE1_ECC, + PCI_VARIANT_PCIX_266_MODE2, + PCI_VARIANT_PCIE +}; + +struct adapter_params { + struct sge_params sge; + struct mc5_params mc5; + struct tp_params tp; + struct vpd_params vpd; + struct pci_params pci; + + const struct adapter_info *info; + + unsigned short mtus[NMTUS]; + unsigned short a_wnd[NCCTRL_WIN]; + unsigned short b_wnd[NCCTRL_WIN]; + + unsigned int nports; /* # of ethernet ports */ + unsigned int chan_map; /* bitmap of in-use Tx channels */ + unsigned int stats_update_period; /* MAC stats accumulation period */ + unsigned int linkpoll_period; /* link poll period in 0.1s */ + unsigned int rev; /* chip revision */ + unsigned int offload; +}; + +enum { /* chip revisions */ + T3_REV_A = 0, + T3_REV_B = 2, + T3_REV_B2 = 3, + T3_REV_C = 4, +}; + +struct trace_params { + u32 sip; + u32 sip_mask; + u32 dip; + u32 dip_mask; + u16 sport; + u16 sport_mask; + u16 dport; + u16 dport_mask; + u32 vlan:12; + u32 vlan_mask:12; + u32 intf:4; + u32 intf_mask:4; + u8 proto; + u8 proto_mask; +}; + +struct link_config { + unsigned int supported; /* link capabilities */ + unsigned int advertising; /* advertised capabilities */ + unsigned short requested_speed; /* speed user has requested */ + unsigned short speed; /* actual link speed */ + unsigned char requested_duplex; /* duplex user has requested */ + unsigned char duplex; /* actual link duplex */ + unsigned char requested_fc; /* flow control user has requested */ + unsigned char fc; /* actual link flow control */ + unsigned char autoneg; /* autonegotiating? */ + unsigned int link_ok; /* link up? */ +}; + +#define SPEED_INVALID 0xffff +#define DUPLEX_INVALID 0xff + +struct mc5 { + struct adapter *adapter; + unsigned int tcam_size; + unsigned char part_type; + unsigned char parity_enabled; + unsigned char mode; + struct mc5_stats stats; +}; + +static inline unsigned int t3_mc5_size(const struct mc5 *p) +{ + return p->tcam_size; +} + +struct mc7 { + struct adapter *adapter; /* backpointer to adapter */ + unsigned int size; /* memory size in bytes */ + unsigned int width; /* MC7 interface width */ + unsigned int offset; /* register address offset for MC7 instance */ + const char *name; /* name of MC7 instance */ + struct mc7_stats stats; /* MC7 statistics */ +}; + +static inline unsigned int t3_mc7_size(const struct mc7 *p) +{ + return p->size; +} + +struct cmac { + struct adapter *adapter; + unsigned int offset; + unsigned int nucast; /* # of address filters for unicast MACs */ + unsigned int tx_tcnt; + unsigned int tx_xcnt; + u64 tx_mcnt; + unsigned int rx_xcnt; + unsigned int rx_ocnt; + u64 rx_mcnt; + unsigned int toggle_cnt; + unsigned int txen; + u64 rx_pause; + struct mac_stats stats; +}; + +enum { + MAC_DIRECTION_RX = 1, + MAC_DIRECTION_TX = 2, + MAC_RXFIFO_SIZE = 32768 +}; + +/* PHY loopback direction */ +enum { + PHY_LOOPBACK_TX = 1, + PHY_LOOPBACK_RX = 2 +}; + +/* PHY interrupt types */ +enum { + cphy_cause_link_change = 1, + cphy_cause_fifo_error = 2, + cphy_cause_module_change = 4, +}; + +/* PHY module types */ +enum { + phy_modtype_none, + phy_modtype_sr, + phy_modtype_lr, + phy_modtype_lrm, + phy_modtype_twinax, + phy_modtype_twinax_long, + phy_modtype_unknown +}; + +/* PHY operations */ +struct cphy_ops { + int (*reset)(struct cphy *phy, int wait); + + int (*intr_enable)(struct cphy *phy); + int (*intr_disable)(struct cphy *phy); + int (*intr_clear)(struct cphy *phy); + int (*intr_handler)(struct cphy *phy); + + int (*autoneg_enable)(struct cphy *phy); + int (*autoneg_restart)(struct cphy *phy); + + int (*advertise)(struct cphy *phy, unsigned int advertise_map); + int (*set_loopback)(struct cphy *phy, int mmd, int dir, int enable); + int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex); + int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed, + int *duplex, int *fc); + int (*power_down)(struct cphy *phy, int enable); + + u32 mmds; +}; +enum { + EDC_OPT_AEL2005 = 0, + EDC_OPT_AEL2005_SIZE = 1084, + EDC_TWX_AEL2005 = 1, + EDC_TWX_AEL2005_SIZE = 1464, + EDC_TWX_AEL2020 = 2, + EDC_TWX_AEL2020_SIZE = 1628, + EDC_MAX_SIZE = EDC_TWX_AEL2020_SIZE, /* Max cache size */ +}; + +/* A PHY instance */ +struct cphy { + u8 modtype; /* PHY module type */ + short priv; /* scratch pad */ + unsigned int caps; /* PHY capabilities */ + struct adapter *adapter; /* associated adapter */ + const char *desc; /* PHY description */ + unsigned long fifo_errors; /* FIFO over/under-flows */ + const struct cphy_ops *ops; /* PHY operations */ + struct mdio_if_info mdio; + u16 phy_cache[EDC_MAX_SIZE]; /* EDC cache */ +}; + +/* Convenience MDIO read/write wrappers */ +static inline int t3_mdio_read(struct cphy *phy, int mmd, int reg, + unsigned int *valp) +{ + int rc = phy->mdio.mdio_read(phy->mdio.dev, phy->mdio.prtad, mmd, reg); + *valp = (rc >= 0) ? rc : -1; + return (rc >= 0) ? 0 : rc; +} + +static inline int t3_mdio_write(struct cphy *phy, int mmd, int reg, + unsigned int val) +{ + return phy->mdio.mdio_write(phy->mdio.dev, phy->mdio.prtad, mmd, + reg, val); +} + +/* Convenience initializer */ +static inline void cphy_init(struct cphy *phy, struct adapter *adapter, + int phy_addr, struct cphy_ops *phy_ops, + const struct mdio_ops *mdio_ops, + unsigned int caps, const char *desc) +{ + phy->caps = caps; + phy->adapter = adapter; + phy->desc = desc; + phy->ops = phy_ops; + if (mdio_ops) { + phy->mdio.prtad = phy_addr; + phy->mdio.mmds = phy_ops->mmds; + phy->mdio.mode_support = mdio_ops->mode_support; + phy->mdio.mdio_read = mdio_ops->read; + phy->mdio.mdio_write = mdio_ops->write; + } +} + +/* Accumulate MAC statistics every 180 seconds. For 1G we multiply by 10. */ +#define MAC_STATS_ACCUM_SECS 180 + +#define XGM_REG(reg_addr, idx) \ + ((reg_addr) + (idx) * (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR)) + +struct addr_val_pair { + unsigned int reg_addr; + unsigned int val; +}; + +#include "adapter.h" + +#ifndef PCI_VENDOR_ID_CHELSIO +# define PCI_VENDOR_ID_CHELSIO 0x1425 +#endif + +#define for_each_port(adapter, iter) \ + for (iter = 0; iter < (adapter)->params.nports; ++iter) + +#define adapter_info(adap) ((adap)->params.info) + +static inline int uses_xaui(const struct adapter *adap) +{ + return adapter_info(adap)->caps & SUPPORTED_AUI; +} + +static inline int is_10G(const struct adapter *adap) +{ + return adapter_info(adap)->caps & SUPPORTED_10000baseT_Full; +} + +static inline int is_offload(const struct adapter *adap) +{ + return adap->params.offload; +} + +static inline unsigned int core_ticks_per_usec(const struct adapter *adap) +{ + return adap->params.vpd.cclk / 1000; +} + +static inline unsigned int is_pcie(const struct adapter *adap) +{ + return adap->params.pci.variant == PCI_VARIANT_PCIE; +} + +void t3_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask, + u32 val); +void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p, + int n, unsigned int offset); +int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, + int polarity, int attempts, int delay, u32 *valp); +static inline int t3_wait_op_done(struct adapter *adapter, int reg, u32 mask, + int polarity, int attempts, int delay) +{ + return t3_wait_op_done_val(adapter, reg, mask, polarity, attempts, + delay, NULL); +} +int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear, + unsigned int set); +int t3_phy_reset(struct cphy *phy, int mmd, int wait); +int t3_phy_advertise(struct cphy *phy, unsigned int advert); +int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert); +int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex); +int t3_phy_lasi_intr_enable(struct cphy *phy); +int t3_phy_lasi_intr_disable(struct cphy *phy); +int t3_phy_lasi_intr_clear(struct cphy *phy); +int t3_phy_lasi_intr_handler(struct cphy *phy); + +void t3_intr_enable(struct adapter *adapter); +void t3_intr_disable(struct adapter *adapter); +void t3_intr_clear(struct adapter *adapter); +void t3_xgm_intr_enable(struct adapter *adapter, int idx); +void t3_xgm_intr_disable(struct adapter *adapter, int idx); +void t3_port_intr_enable(struct adapter *adapter, int idx); +void t3_port_intr_disable(struct adapter *adapter, int idx); +int t3_slow_intr_handler(struct adapter *adapter); +int t3_phy_intr_handler(struct adapter *adapter); + +void t3_link_changed(struct adapter *adapter, int port_id); +void t3_link_fault(struct adapter *adapter, int port_id); +int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc); +const struct adapter_info *t3_get_adapter_info(unsigned int board_id); +int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data); +int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data); +int t3_seeprom_wp(struct adapter *adapter, int enable); +int t3_get_tp_version(struct adapter *adapter, u32 *vers); +int t3_check_tpsram_version(struct adapter *adapter); +int t3_check_tpsram(struct adapter *adapter, const u8 *tp_ram, + unsigned int size); +int t3_set_proto_sram(struct adapter *adap, const u8 *data); +int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size); +int t3_get_fw_version(struct adapter *adapter, u32 *vers); +int t3_check_fw_version(struct adapter *adapter); +int t3_init_hw(struct adapter *adapter, u32 fw_params); +int t3_reset_adapter(struct adapter *adapter); +int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, + int reset); +int t3_replay_prep_adapter(struct adapter *adapter); +void t3_led_ready(struct adapter *adapter); +void t3_fatal_err(struct adapter *adapter); +void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on); +void t3_config_rss(struct adapter *adapter, unsigned int rss_config, + const u8 * cpus, const u16 *rspq); +int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr, + unsigned int n, unsigned int *valp); +int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n, + u64 *buf); + +int t3_mac_reset(struct cmac *mac); +void t3b_pcs_reset(struct cmac *mac); +void t3_mac_disable_exact_filters(struct cmac *mac); +void t3_mac_enable_exact_filters(struct cmac *mac); +int t3_mac_enable(struct cmac *mac, int which); +int t3_mac_disable(struct cmac *mac, int which); +int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu); +int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev); +int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]); +int t3_mac_set_num_ucast(struct cmac *mac, int n); +const struct mac_stats *t3_mac_update_stats(struct cmac *mac); +int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc); +int t3b2_mac_watchdog_task(struct cmac *mac); + +void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode); +int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters, + unsigned int nroutes); +void t3_mc5_intr_handler(struct mc5 *mc5); + +void t3_tp_set_offload_mode(struct adapter *adap, int enable); +void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps); +void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS], + unsigned short alpha[NCCTRL_WIN], + unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap); +void t3_config_trace_filter(struct adapter *adapter, + const struct trace_params *tp, int filter_index, + int invert, int enable); +int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched); + +void t3_sge_prep(struct adapter *adap, struct sge_params *p); +void t3_sge_init(struct adapter *adap, struct sge_params *p); +int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable, + enum sge_context_type type, int respq, u64 base_addr, + unsigned int size, unsigned int token, int gen, + unsigned int cidx); +int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id, + int gts_enable, u64 base_addr, unsigned int size, + unsigned int esize, unsigned int cong_thres, int gen, + unsigned int cidx); +int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id, + int irq_vec_idx, u64 base_addr, unsigned int size, + unsigned int fl_thres, int gen, unsigned int cidx); +int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr, + unsigned int size, int rspq, int ovfl_mode, + unsigned int credits, unsigned int credit_thres); +int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable); +int t3_sge_disable_fl(struct adapter *adapter, unsigned int id); +int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id); +int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id); +int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op, + unsigned int credits); + +int t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter, + int phy_addr, const struct mdio_ops *mdio_ops); +int t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter, + int phy_addr, const struct mdio_ops *mdio_ops); +int t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter, + int phy_addr, const struct mdio_ops *mdio_ops); +int t3_ael2005_phy_prep(struct cphy *phy, struct adapter *adapter, + int phy_addr, const struct mdio_ops *mdio_ops); +int t3_ael2020_phy_prep(struct cphy *phy, struct adapter *adapter, + int phy_addr, const struct mdio_ops *mdio_ops); +int t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr, + const struct mdio_ops *mdio_ops); +int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter, + int phy_addr, const struct mdio_ops *mdio_ops); +int t3_aq100x_phy_prep(struct cphy *phy, struct adapter *adapter, + int phy_addr, const struct mdio_ops *mdio_ops); +#endif /* __CHELSIO_COMMON_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ctl_defs.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ctl_defs.h new file mode 100644 index 0000000..369fe71 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ctl_defs.h @@ -0,0 +1,189 @@ +/* + * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _CXGB3_OFFLOAD_CTL_DEFS_H +#define _CXGB3_OFFLOAD_CTL_DEFS_H + +enum { + GET_MAX_OUTSTANDING_WR = 0, + GET_TX_MAX_CHUNK = 1, + GET_TID_RANGE = 2, + GET_STID_RANGE = 3, + GET_RTBL_RANGE = 4, + GET_L2T_CAPACITY = 5, + GET_MTUS = 6, + GET_WR_LEN = 7, + GET_IFF_FROM_MAC = 8, + GET_DDP_PARAMS = 9, + GET_PORTS = 10, + + ULP_ISCSI_GET_PARAMS = 11, + ULP_ISCSI_SET_PARAMS = 12, + + RDMA_GET_PARAMS = 13, + RDMA_CQ_OP = 14, + RDMA_CQ_SETUP = 15, + RDMA_CQ_DISABLE = 16, + RDMA_CTRL_QP_SETUP = 17, + RDMA_GET_MEM = 18, + RDMA_GET_MIB = 19, + + GET_RX_PAGE_INFO = 50, + GET_ISCSI_IPV4ADDR = 51, + + GET_EMBEDDED_INFO = 70, +}; + +/* + * Structure used to describe a TID range. Valid TIDs are [base, base+num). + */ +struct tid_range { + unsigned int base; /* first TID */ + unsigned int num; /* number of TIDs in range */ +}; + +/* + * Structure used to request the size and contents of the MTU table. + */ +struct mtutab { + unsigned int size; /* # of entries in the MTU table */ + const unsigned short *mtus; /* the MTU table values */ +}; + +struct net_device; + +/* + * Structure used to request the adapter net_device owning a given MAC address. + */ +struct iff_mac { + struct net_device *dev; /* the net_device */ + const unsigned char *mac_addr; /* MAC address to lookup */ + u16 vlan_tag; +}; + +/* Structure used to request a port's iSCSI IPv4 address */ +struct iscsi_ipv4addr { + struct net_device *dev; /* the net_device */ + __be32 ipv4addr; /* the return iSCSI IPv4 address */ +}; + +struct pci_dev; + +/* + * Structure used to request the TCP DDP parameters. + */ +struct ddp_params { + unsigned int llimit; /* TDDP region start address */ + unsigned int ulimit; /* TDDP region end address */ + unsigned int tag_mask; /* TDDP tag mask */ + struct pci_dev *pdev; +}; + +struct adap_ports { + unsigned int nports; /* number of ports on this adapter */ + struct net_device *lldevs[2]; +}; + +/* + * Structure used to return information to the iscsi layer. + */ +struct ulp_iscsi_info { + unsigned int offset; + unsigned int llimit; + unsigned int ulimit; + unsigned int tagmask; + u8 pgsz_factor[4]; + unsigned int max_rxsz; + unsigned int max_txsz; + struct pci_dev *pdev; +}; + +/* + * Structure used to return information to the RDMA layer. + */ +struct rdma_info { + unsigned int tpt_base; /* TPT base address */ + unsigned int tpt_top; /* TPT last entry address */ + unsigned int pbl_base; /* PBL base address */ + unsigned int pbl_top; /* PBL last entry address */ + unsigned int rqt_base; /* RQT base address */ + unsigned int rqt_top; /* RQT last entry address */ + unsigned int udbell_len; /* user doorbell region length */ + unsigned long udbell_physbase; /* user doorbell physical start addr */ + void __iomem *kdb_addr; /* kernel doorbell register address */ + struct pci_dev *pdev; /* associated PCI device */ +}; + +/* + * Structure used to request an operation on an RDMA completion queue. + */ +struct rdma_cq_op { + unsigned int id; + unsigned int op; + unsigned int credits; +}; + +/* + * Structure used to setup RDMA completion queues. + */ +struct rdma_cq_setup { + unsigned int id; + unsigned long long base_addr; + unsigned int size; + unsigned int credits; + unsigned int credit_thres; + unsigned int ovfl_mode; +}; + +/* + * Structure used to setup the RDMA control egress context. + */ +struct rdma_ctrlqp_setup { + unsigned long long base_addr; + unsigned int size; +}; + +/* + * Offload TX/RX page information. + */ +struct ofld_page_info { + unsigned int page_size; /* Page size, should be a power of 2 */ + unsigned int num; /* Number of pages */ +}; + +/* + * Structure used to get firmware and protocol engine versions. + */ +struct ch_embedded_info { + u32 fw_vers; + u32 tp_vers; +}; +#endif /* _CXGB3_OFFLOAD_CTL_DEFS_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h new file mode 100644 index 0000000..920d918 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _CHELSIO_DEFS_H +#define _CHELSIO_DEFS_H + +#include +#include + +#include "t3cdev.h" + +#include "cxgb3_offload.h" + +#define VALIDATE_TID 1 + +void *cxgb_alloc_mem(unsigned long size); +void cxgb_free_mem(void *addr); + +/* + * Map an ATID or STID to their entries in the corresponding TID tables. + */ +static inline union active_open_entry *atid2entry(const struct tid_info *t, + unsigned int atid) +{ + return &t->atid_tab[atid - t->atid_base]; +} + +static inline union listen_entry *stid2entry(const struct tid_info *t, + unsigned int stid) +{ + return &t->stid_tab[stid - t->stid_base]; +} + +/* + * Find the connection corresponding to a TID. + */ +static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t, + unsigned int tid) +{ + struct t3c_tid_entry *t3c_tid = tid < t->ntids ? + &(t->tid_tab[tid]) : NULL; + + return (t3c_tid && t3c_tid->client) ? t3c_tid : NULL; +} + +/* + * Find the connection corresponding to a server TID. + */ +static inline struct t3c_tid_entry *lookup_stid(const struct tid_info *t, + unsigned int tid) +{ + union listen_entry *e; + + if (tid < t->stid_base || tid >= t->stid_base + t->nstids) + return NULL; + + e = stid2entry(t, tid); + if ((void *)e->next >= (void *)t->tid_tab && + (void *)e->next < (void *)&t->atid_tab[t->natids]) + return NULL; + + return &e->t3c_tid; +} + +/* + * Find the connection corresponding to an active-open TID. + */ +static inline struct t3c_tid_entry *lookup_atid(const struct tid_info *t, + unsigned int tid) +{ + union active_open_entry *e; + + if (tid < t->atid_base || tid >= t->atid_base + t->natids) + return NULL; + + e = atid2entry(t, tid); + if ((void *)e->next >= (void *)t->tid_tab && + (void *)e->next < (void *)&t->atid_tab[t->natids]) + return NULL; + + return &e->t3c_tid; +} + +int attach_t3cdev(struct t3cdev *dev); +void detach_t3cdev(struct t3cdev *dev); +#endif diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h new file mode 100644 index 0000000..b19e437 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __CHIOCTL_H__ +#define __CHIOCTL_H__ + +/* + * Ioctl commands specific to this driver. + */ +enum { + CHELSIO_GETMTUTAB = 1029, + CHELSIO_SETMTUTAB = 1030, + CHELSIO_SET_PM = 1032, + CHELSIO_GET_PM = 1033, + CHELSIO_GET_MEM = 1038, + CHELSIO_LOAD_FW = 1041, + CHELSIO_SET_TRACE_FILTER = 1044, + CHELSIO_SET_QSET_PARAMS = 1045, + CHELSIO_GET_QSET_PARAMS = 1046, + CHELSIO_SET_QSET_NUM = 1047, + CHELSIO_GET_QSET_NUM = 1048, +}; + +struct ch_reg { + uint32_t cmd; + uint32_t addr; + uint32_t val; +}; + +struct ch_cntxt { + uint32_t cmd; + uint32_t cntxt_type; + uint32_t cntxt_id; + uint32_t data[4]; +}; + +/* context types */ +enum { CNTXT_TYPE_EGRESS, CNTXT_TYPE_FL, CNTXT_TYPE_RSP, CNTXT_TYPE_CQ }; + +struct ch_desc { + uint32_t cmd; + uint32_t queue_num; + uint32_t idx; + uint32_t size; + uint8_t data[128]; +}; + +struct ch_mem_range { + uint32_t cmd; + uint32_t mem_id; + uint32_t addr; + uint32_t len; + uint32_t version; + uint8_t buf[0]; +}; + +struct ch_qset_params { + uint32_t cmd; + uint32_t qset_idx; + int32_t txq_size[3]; + int32_t rspq_size; + int32_t fl_size[2]; + int32_t intr_lat; + int32_t polling; + int32_t lro; + int32_t cong_thres; + int32_t vector; + int32_t qnum; +}; + +struct ch_pktsched_params { + uint32_t cmd; + uint8_t sched; + uint8_t idx; + uint8_t min; + uint8_t max; + uint8_t binding; +}; + +#ifndef TCB_SIZE +# define TCB_SIZE 128 +#endif + +/* TCB size in 32-bit words */ +#define TCB_WORDS (TCB_SIZE / 4) + +enum { MEM_CM, MEM_PMRX, MEM_PMTX }; /* ch_mem_range.mem_id values */ + +struct ch_mtus { + uint32_t cmd; + uint32_t nmtus; + uint16_t mtus[NMTUS]; +}; + +struct ch_pm { + uint32_t cmd; + uint32_t tx_pg_sz; + uint32_t tx_num_pg; + uint32_t rx_pg_sz; + uint32_t rx_num_pg; + uint32_t pm_total; +}; + +struct ch_tcam { + uint32_t cmd; + uint32_t tcam_size; + uint32_t nservers; + uint32_t nroutes; + uint32_t nfilters; +}; + +struct ch_tcb { + uint32_t cmd; + uint32_t tcb_index; + uint32_t tcb_data[TCB_WORDS]; +}; + +struct ch_tcam_word { + uint32_t cmd; + uint32_t addr; + uint32_t buf[3]; +}; + +struct ch_trace { + uint32_t cmd; + uint32_t sip; + uint32_t sip_mask; + uint32_t dip; + uint32_t dip_mask; + uint16_t sport; + uint16_t sport_mask; + uint16_t dport; + uint16_t dport_mask; + uint32_t vlan:12; + uint32_t vlan_mask:12; + uint32_t intf:4; + uint32_t intf_mask:4; + uint8_t proto; + uint8_t proto_mask; + uint8_t invert_match:1; + uint8_t config_tx:1; + uint8_t config_rx:1; + uint8_t trace_tx:1; + uint8_t trace_rx:1; +}; + +#define SIOCCHIOCTL SIOCDEVPRIVATE + +#endif diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c new file mode 100644 index 0000000..93b41a7 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -0,0 +1,3448 @@ +/* + * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" +#include "cxgb3_ioctl.h" +#include "regs.h" +#include "cxgb3_offload.h" +#include "version.h" + +#include "cxgb3_ctl_defs.h" +#include "t3_cpl.h" +#include "firmware_exports.h" + +enum { + MAX_TXQ_ENTRIES = 16384, + MAX_CTRL_TXQ_ENTRIES = 1024, + MAX_RSPQ_ENTRIES = 16384, + MAX_RX_BUFFERS = 16384, + MAX_RX_JUMBO_BUFFERS = 16384, + MIN_TXQ_ENTRIES = 4, + MIN_CTRL_TXQ_ENTRIES = 4, + MIN_RSPQ_ENTRIES = 32, + MIN_FL_ENTRIES = 32 +}; + +#define PORT_MASK ((1 << MAX_NPORTS) - 1) + +#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ + NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) + +#define EEPROM_MAGIC 0x38E2F10C + +#define CH_DEVICE(devid, idx) \ + { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx } + +static DEFINE_PCI_DEVICE_TABLE(cxgb3_pci_tbl) = { + CH_DEVICE(0x20, 0), /* PE9000 */ + CH_DEVICE(0x21, 1), /* T302E */ + CH_DEVICE(0x22, 2), /* T310E */ + CH_DEVICE(0x23, 3), /* T320X */ + CH_DEVICE(0x24, 1), /* T302X */ + CH_DEVICE(0x25, 3), /* T320E */ + CH_DEVICE(0x26, 2), /* T310X */ + CH_DEVICE(0x30, 2), /* T3B10 */ + CH_DEVICE(0x31, 3), /* T3B20 */ + CH_DEVICE(0x32, 1), /* T3B02 */ + CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */ + CH_DEVICE(0x36, 3), /* S320E-CR */ + CH_DEVICE(0x37, 7), /* N320E-G2 */ + {0,} +}; + +MODULE_DESCRIPTION(DRV_DESC); +MODULE_AUTHOR("Chelsio Communications"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRV_VERSION); +MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl); + +static int dflt_msg_enable = DFLT_MSG_ENABLE; + +module_param(dflt_msg_enable, int, 0644); +MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap"); + +/* + * The driver uses the best interrupt scheme available on a platform in the + * order MSI-X, MSI, legacy pin interrupts. This parameter determines which + * of these schemes the driver may consider as follows: + * + * msi = 2: choose from among all three options + * msi = 1: only consider MSI and pin interrupts + * msi = 0: force pin interrupts + */ +static int msi = 2; + +module_param(msi, int, 0644); +MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X"); + +/* + * The driver enables offload as a default. + * To disable it, use ofld_disable = 1. + */ + +static int ofld_disable = 0; + +module_param(ofld_disable, int, 0644); +MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not"); + +/* + * We have work elements that we need to cancel when an interface is taken + * down. Normally the work elements would be executed by keventd but that + * can deadlock because of linkwatch. If our close method takes the rtnl + * lock and linkwatch is ahead of our work elements in keventd, linkwatch + * will block keventd as it needs the rtnl lock, and we'll deadlock waiting + * for our work to complete. Get our own work queue to solve this. + */ +struct workqueue_struct *cxgb3_wq; + +/** + * link_report - show link status and link speed/duplex + * @p: the port whose settings are to be reported + * + * Shows the link status, speed, and duplex of a port. + */ +static void link_report(struct net_device *dev) +{ + if (!netif_carrier_ok(dev)) + printk(KERN_INFO "%s: link down\n", dev->name); + else { + const char *s = "10Mbps"; + const struct port_info *p = netdev_priv(dev); + + switch (p->link_config.speed) { + case SPEED_10000: + s = "10Gbps"; + break; + case SPEED_1000: + s = "1000Mbps"; + break; + case SPEED_100: + s = "100Mbps"; + break; + } + + printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s, + p->link_config.duplex == DUPLEX_FULL ? "full" : "half"); + } +} + +static void enable_tx_fifo_drain(struct adapter *adapter, + struct port_info *pi) +{ + t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0, + F_ENDROPPKT); + t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0); + t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN); + t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN); +} + +static void disable_tx_fifo_drain(struct adapter *adapter, + struct port_info *pi) +{ + t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, + F_ENDROPPKT, 0); +} + +void t3_os_link_fault(struct adapter *adap, int port_id, int state) +{ + struct net_device *dev = adap->port[port_id]; + struct port_info *pi = netdev_priv(dev); + + if (state == netif_carrier_ok(dev)) + return; + + if (state) { + struct cmac *mac = &pi->mac; + + netif_carrier_on(dev); + + disable_tx_fifo_drain(adap, pi); + + /* Clear local faults */ + t3_xgm_intr_disable(adap, pi->port_id); + t3_read_reg(adap, A_XGM_INT_STATUS + + pi->mac.offset); + t3_write_reg(adap, + A_XGM_INT_CAUSE + pi->mac.offset, + F_XGM_INT); + + t3_set_reg_field(adap, + A_XGM_INT_ENABLE + + pi->mac.offset, + F_XGM_INT, F_XGM_INT); + t3_xgm_intr_enable(adap, pi->port_id); + + t3_mac_enable(mac, MAC_DIRECTION_TX); + } else { + netif_carrier_off(dev); + + /* Flush TX FIFO */ + enable_tx_fifo_drain(adap, pi); + } + link_report(dev); +} + +/** + * t3_os_link_changed - handle link status changes + * @adapter: the adapter associated with the link change + * @port_id: the port index whose limk status has changed + * @link_stat: the new status of the link + * @speed: the new speed setting + * @duplex: the new duplex setting + * @pause: the new flow-control setting + * + * This is the OS-dependent handler for link status changes. The OS + * neutral handler takes care of most of the processing for these events, + * then calls this handler for any OS-specific processing. + */ +void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat, + int speed, int duplex, int pause) +{ + struct net_device *dev = adapter->port[port_id]; + struct port_info *pi = netdev_priv(dev); + struct cmac *mac = &pi->mac; + + /* Skip changes from disabled ports. */ + if (!netif_running(dev)) + return; + + if (link_stat != netif_carrier_ok(dev)) { + if (link_stat) { + disable_tx_fifo_drain(adapter, pi); + + t3_mac_enable(mac, MAC_DIRECTION_RX); + + /* Clear local faults */ + t3_xgm_intr_disable(adapter, pi->port_id); + t3_read_reg(adapter, A_XGM_INT_STATUS + + pi->mac.offset); + t3_write_reg(adapter, + A_XGM_INT_CAUSE + pi->mac.offset, + F_XGM_INT); + + t3_set_reg_field(adapter, + A_XGM_INT_ENABLE + pi->mac.offset, + F_XGM_INT, F_XGM_INT); + t3_xgm_intr_enable(adapter, pi->port_id); + + netif_carrier_on(dev); + } else { + netif_carrier_off(dev); + + t3_xgm_intr_disable(adapter, pi->port_id); + t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset); + t3_set_reg_field(adapter, + A_XGM_INT_ENABLE + pi->mac.offset, + F_XGM_INT, 0); + + if (is_10G(adapter)) + pi->phy.ops->power_down(&pi->phy, 1); + + t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset); + t3_mac_disable(mac, MAC_DIRECTION_RX); + t3_link_start(&pi->phy, mac, &pi->link_config); + + /* Flush TX FIFO */ + enable_tx_fifo_drain(adapter, pi); + } + + link_report(dev); + } +} + +/** + * t3_os_phymod_changed - handle PHY module changes + * @phy: the PHY reporting the module change + * @mod_type: new module type + * + * This is the OS-dependent handler for PHY module changes. It is + * invoked when a PHY module is removed or inserted for any OS-specific + * processing. + */ +void t3_os_phymod_changed(struct adapter *adap, int port_id) +{ + static const char *mod_str[] = { + NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown" + }; + + const struct net_device *dev = adap->port[port_id]; + const struct port_info *pi = netdev_priv(dev); + + if (pi->phy.modtype == phy_modtype_none) + printk(KERN_INFO "%s: PHY module unplugged\n", dev->name); + else + printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name, + mod_str[pi->phy.modtype]); +} + +static void cxgb_set_rxmode(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + + t3_mac_set_rx_mode(&pi->mac, dev); +} + +/** + * link_start - enable a port + * @dev: the device to enable + * + * Performs the MAC and PHY actions needed to enable a port. + */ +static void link_start(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct cmac *mac = &pi->mac; + + t3_mac_reset(mac); + t3_mac_set_num_ucast(mac, MAX_MAC_IDX); + t3_mac_set_mtu(mac, dev->mtu); + t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr); + t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr); + t3_mac_set_rx_mode(mac, dev); + t3_link_start(&pi->phy, mac, &pi->link_config); + t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); +} + +static inline void cxgb_disable_msi(struct adapter *adapter) +{ + if (adapter->flags & USING_MSIX) { + pci_disable_msix(adapter->pdev); + adapter->flags &= ~USING_MSIX; + } else if (adapter->flags & USING_MSI) { + pci_disable_msi(adapter->pdev); + adapter->flags &= ~USING_MSI; + } +} + +/* + * Interrupt handler for asynchronous events used with MSI-X. + */ +static irqreturn_t t3_async_intr_handler(int irq, void *cookie) +{ + t3_slow_intr_handler(cookie); + return IRQ_HANDLED; +} + +/* + * Name the MSI-X interrupts. + */ +static void name_msix_vecs(struct adapter *adap) +{ + int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1; + + snprintf(adap->msix_info[0].desc, n, "%s", adap->name); + adap->msix_info[0].desc[n] = 0; + + for_each_port(adap, j) { + struct net_device *d = adap->port[j]; + const struct port_info *pi = netdev_priv(d); + + for (i = 0; i < pi->nqsets; i++, msi_idx++) { + snprintf(adap->msix_info[msi_idx].desc, n, + "%s-%d", d->name, pi->first_qset + i); + adap->msix_info[msi_idx].desc[n] = 0; + } + } +} + +static int request_msix_data_irqs(struct adapter *adap) +{ + int i, j, err, qidx = 0; + + for_each_port(adap, i) { + int nqsets = adap2pinfo(adap, i)->nqsets; + + for (j = 0; j < nqsets; ++j) { + err = request_irq(adap->msix_info[qidx + 1].vec, + t3_intr_handler(adap, + adap->sge.qs[qidx]. + rspq.polling), 0, + adap->msix_info[qidx + 1].desc, + &adap->sge.qs[qidx]); + if (err) { + while (--qidx >= 0) + free_irq(adap->msix_info[qidx + 1].vec, + &adap->sge.qs[qidx]); + return err; + } + qidx++; + } + } + return 0; +} + +static void free_irq_resources(struct adapter *adapter) +{ + if (adapter->flags & USING_MSIX) { + int i, n = 0; + + free_irq(adapter->msix_info[0].vec, adapter); + for_each_port(adapter, i) + n += adap2pinfo(adapter, i)->nqsets; + + for (i = 0; i < n; ++i) + free_irq(adapter->msix_info[i + 1].vec, + &adapter->sge.qs[i]); + } else + free_irq(adapter->pdev->irq, adapter); +} + +static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt, + unsigned long n) +{ + int attempts = 10; + + while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) { + if (!--attempts) + return -ETIMEDOUT; + msleep(10); + } + return 0; +} + +static int init_tp_parity(struct adapter *adap) +{ + int i; + struct sk_buff *skb; + struct cpl_set_tcb_field *greq; + unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts; + + t3_tp_set_offload_mode(adap, 1); + + for (i = 0; i < 16; i++) { + struct cpl_smt_write_req *req; + + skb = alloc_skb(sizeof(*req), GFP_KERNEL); + if (!skb) + skb = adap->nofail_skb; + if (!skb) + goto alloc_skb_fail; + + req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req)); + memset(req, 0, sizeof(*req)); + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i)); + req->mtu_idx = NMTUS - 1; + req->iff = i; + t3_mgmt_tx(adap, skb); + if (skb == adap->nofail_skb) { + await_mgmt_replies(adap, cnt, i + 1); + adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL); + if (!adap->nofail_skb) + goto alloc_skb_fail; + } + } + + for (i = 0; i < 2048; i++) { + struct cpl_l2t_write_req *req; + + skb = alloc_skb(sizeof(*req), GFP_KERNEL); + if (!skb) + skb = adap->nofail_skb; + if (!skb) + goto alloc_skb_fail; + + req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req)); + memset(req, 0, sizeof(*req)); + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i)); + req->params = htonl(V_L2T_W_IDX(i)); + t3_mgmt_tx(adap, skb); + if (skb == adap->nofail_skb) { + await_mgmt_replies(adap, cnt, 16 + i + 1); + adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL); + if (!adap->nofail_skb) + goto alloc_skb_fail; + } + } + + for (i = 0; i < 2048; i++) { + struct cpl_rte_write_req *req; + + skb = alloc_skb(sizeof(*req), GFP_KERNEL); + if (!skb) + skb = adap->nofail_skb; + if (!skb) + goto alloc_skb_fail; + + req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req)); + memset(req, 0, sizeof(*req)); + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i)); + req->l2t_idx = htonl(V_L2T_W_IDX(i)); + t3_mgmt_tx(adap, skb); + if (skb == adap->nofail_skb) { + await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1); + adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL); + if (!adap->nofail_skb) + goto alloc_skb_fail; + } + } + + skb = alloc_skb(sizeof(*greq), GFP_KERNEL); + if (!skb) + skb = adap->nofail_skb; + if (!skb) + goto alloc_skb_fail; + + greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq)); + memset(greq, 0, sizeof(*greq)); + greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0)); + greq->mask = cpu_to_be64(1); + t3_mgmt_tx(adap, skb); + + i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1); + if (skb == adap->nofail_skb) { + i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1); + adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL); + } + + t3_tp_set_offload_mode(adap, 0); + return i; + +alloc_skb_fail: + t3_tp_set_offload_mode(adap, 0); + return -ENOMEM; +} + +/** + * setup_rss - configure RSS + * @adap: the adapter + * + * Sets up RSS to distribute packets to multiple receive queues. We + * configure the RSS CPU lookup table to distribute to the number of HW + * receive queues, and the response queue lookup table to narrow that + * down to the response queues actually configured for each port. + * We always configure the RSS mapping for two ports since the mapping + * table has plenty of entries. + */ +static void setup_rss(struct adapter *adap) +{ + int i; + unsigned int nq0 = adap2pinfo(adap, 0)->nqsets; + unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1; + u8 cpus[SGE_QSETS + 1]; + u16 rspq_map[RSS_TABLE_SIZE]; + + for (i = 0; i < SGE_QSETS; ++i) + cpus[i] = i; + cpus[SGE_QSETS] = 0xff; /* terminator */ + + for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) { + rspq_map[i] = i % nq0; + rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0; + } + + t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN | + F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | + V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map); +} + +static void ring_dbs(struct adapter *adap) +{ + int i, j; + + for (i = 0; i < SGE_QSETS; i++) { + struct sge_qset *qs = &adap->sge.qs[i]; + + if (qs->adap) + for (j = 0; j < SGE_TXQ_PER_SET; j++) + t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id)); + } +} + +static void init_napi(struct adapter *adap) +{ + int i; + + for (i = 0; i < SGE_QSETS; i++) { + struct sge_qset *qs = &adap->sge.qs[i]; + + if (qs->adap) + netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll, + 64); + } + + /* + * netif_napi_add() can be called only once per napi_struct because it + * adds each new napi_struct to a list. Be careful not to call it a + * second time, e.g., during EEH recovery, by making a note of it. + */ + adap->flags |= NAPI_INIT; +} + +/* + * Wait until all NAPI handlers are descheduled. This includes the handlers of + * both netdevices representing interfaces and the dummy ones for the extra + * queues. + */ +static void quiesce_rx(struct adapter *adap) +{ + int i; + + for (i = 0; i < SGE_QSETS; i++) + if (adap->sge.qs[i].adap) + napi_disable(&adap->sge.qs[i].napi); +} + +static void enable_all_napi(struct adapter *adap) +{ + int i; + for (i = 0; i < SGE_QSETS; i++) + if (adap->sge.qs[i].adap) + napi_enable(&adap->sge.qs[i].napi); +} + +/** + * setup_sge_qsets - configure SGE Tx/Rx/response queues + * @adap: the adapter + * + * Determines how many sets of SGE queues to use and initializes them. + * We support multiple queue sets per port if we have MSI-X, otherwise + * just one queue set per port. + */ +static int setup_sge_qsets(struct adapter *adap) +{ + int i, j, err, irq_idx = 0, qset_idx = 0; + unsigned int ntxq = SGE_TXQ_PER_SET; + + if (adap->params.rev > 0 && !(adap->flags & USING_MSI)) + irq_idx = -1; + + for_each_port(adap, i) { + struct net_device *dev = adap->port[i]; + struct port_info *pi = netdev_priv(dev); + + pi->qs = &adap->sge.qs[pi->first_qset]; + for (j = 0; j < pi->nqsets; ++j, ++qset_idx) { + err = t3_sge_alloc_qset(adap, qset_idx, 1, + (adap->flags & USING_MSIX) ? qset_idx + 1 : + irq_idx, + &adap->params.sge.qset[qset_idx], ntxq, dev, + netdev_get_tx_queue(dev, j)); + if (err) { + t3_free_sge_resources(adap); + return err; + } + } + } + + return 0; +} + +static ssize_t attr_show(struct device *d, char *buf, + ssize_t(*format) (struct net_device *, char *)) +{ + ssize_t len; + + /* Synchronize with ioctls that may shut down the device */ + rtnl_lock(); + len = (*format) (to_net_dev(d), buf); + rtnl_unlock(); + return len; +} + +static ssize_t attr_store(struct device *d, + const char *buf, size_t len, + ssize_t(*set) (struct net_device *, unsigned int), + unsigned int min_val, unsigned int max_val) +{ + char *endp; + ssize_t ret; + unsigned int val; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + val = simple_strtoul(buf, &endp, 0); + if (endp == buf || val < min_val || val > max_val) + return -EINVAL; + + rtnl_lock(); + ret = (*set) (to_net_dev(d), val); + if (!ret) + ret = len; + rtnl_unlock(); + return ret; +} + +#define CXGB3_SHOW(name, val_expr) \ +static ssize_t format_##name(struct net_device *dev, char *buf) \ +{ \ + struct port_info *pi = netdev_priv(dev); \ + struct adapter *adap = pi->adapter; \ + return sprintf(buf, "%u\n", val_expr); \ +} \ +static ssize_t show_##name(struct device *d, struct device_attribute *attr, \ + char *buf) \ +{ \ + return attr_show(d, buf, format_##name); \ +} + +static ssize_t set_nfilters(struct net_device *dev, unsigned int val) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0; + + if (adap->flags & FULL_INIT_DONE) + return -EBUSY; + if (val && adap->params.rev == 0) + return -EINVAL; + if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers - + min_tids) + return -EINVAL; + adap->params.mc5.nfilters = val; + return 0; +} + +static ssize_t store_nfilters(struct device *d, struct device_attribute *attr, + const char *buf, size_t len) +{ + return attr_store(d, buf, len, set_nfilters, 0, ~0); +} + +static ssize_t set_nservers(struct net_device *dev, unsigned int val) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + + if (adap->flags & FULL_INIT_DONE) + return -EBUSY; + if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters - + MC5_MIN_TIDS) + return -EINVAL; + adap->params.mc5.nservers = val; + return 0; +} + +static ssize_t store_nservers(struct device *d, struct device_attribute *attr, + const char *buf, size_t len) +{ + return attr_store(d, buf, len, set_nservers, 0, ~0); +} + +#define CXGB3_ATTR_R(name, val_expr) \ +CXGB3_SHOW(name, val_expr) \ +static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) + +#define CXGB3_ATTR_RW(name, val_expr, store_method) \ +CXGB3_SHOW(name, val_expr) \ +static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method) + +CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5)); +CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters); +CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers); + +static struct attribute *cxgb3_attrs[] = { + &dev_attr_cam_size.attr, + &dev_attr_nfilters.attr, + &dev_attr_nservers.attr, + NULL +}; + +static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs }; + +static ssize_t tm_attr_show(struct device *d, + char *buf, int sched) +{ + struct port_info *pi = netdev_priv(to_net_dev(d)); + struct adapter *adap = pi->adapter; + unsigned int v, addr, bpt, cpt; + ssize_t len; + + addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2; + rtnl_lock(); + t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr); + v = t3_read_reg(adap, A_TP_TM_PIO_DATA); + if (sched & 1) + v >>= 16; + bpt = (v >> 8) & 0xff; + cpt = v & 0xff; + if (!cpt) + len = sprintf(buf, "disabled\n"); + else { + v = (adap->params.vpd.cclk * 1000) / cpt; + len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125); + } + rtnl_unlock(); + return len; +} + +static ssize_t tm_attr_store(struct device *d, + const char *buf, size_t len, int sched) +{ + struct port_info *pi = netdev_priv(to_net_dev(d)); + struct adapter *adap = pi->adapter; + unsigned int val; + char *endp; + ssize_t ret; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + val = simple_strtoul(buf, &endp, 0); + if (endp == buf || val > 10000000) + return -EINVAL; + + rtnl_lock(); + ret = t3_config_sched(adap, val, sched); + if (!ret) + ret = len; + rtnl_unlock(); + return ret; +} + +#define TM_ATTR(name, sched) \ +static ssize_t show_##name(struct device *d, struct device_attribute *attr, \ + char *buf) \ +{ \ + return tm_attr_show(d, buf, sched); \ +} \ +static ssize_t store_##name(struct device *d, struct device_attribute *attr, \ + const char *buf, size_t len) \ +{ \ + return tm_attr_store(d, buf, len, sched); \ +} \ +static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name) + +TM_ATTR(sched0, 0); +TM_ATTR(sched1, 1); +TM_ATTR(sched2, 2); +TM_ATTR(sched3, 3); +TM_ATTR(sched4, 4); +TM_ATTR(sched5, 5); +TM_ATTR(sched6, 6); +TM_ATTR(sched7, 7); + +static struct attribute *offload_attrs[] = { + &dev_attr_sched0.attr, + &dev_attr_sched1.attr, + &dev_attr_sched2.attr, + &dev_attr_sched3.attr, + &dev_attr_sched4.attr, + &dev_attr_sched5.attr, + &dev_attr_sched6.attr, + &dev_attr_sched7.attr, + NULL +}; + +static struct attribute_group offload_attr_group = {.attrs = offload_attrs }; + +/* + * Sends an sk_buff to an offload queue driver + * after dealing with any active network taps. + */ +static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb) +{ + int ret; + + local_bh_disable(); + ret = t3_offload_tx(tdev, skb); + local_bh_enable(); + return ret; +} + +static int write_smt_entry(struct adapter *adapter, int idx) +{ + struct cpl_smt_write_req *req; + struct port_info *pi = netdev_priv(adapter->port[idx]); + struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL); + + if (!skb) + return -ENOMEM; + + req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req)); + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx)); + req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */ + req->iff = idx; + memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN); + memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN); + skb->priority = 1; + offload_tx(&adapter->tdev, skb); + return 0; +} + +static int init_smt(struct adapter *adapter) +{ + int i; + + for_each_port(adapter, i) + write_smt_entry(adapter, i); + return 0; +} + +static void init_port_mtus(struct adapter *adapter) +{ + unsigned int mtus = adapter->port[0]->mtu; + + if (adapter->port[1]) + mtus |= adapter->port[1]->mtu << 16; + t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus); +} + +static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo, + int hi, int port) +{ + struct sk_buff *skb; + struct mngt_pktsched_wr *req; + int ret; + + skb = alloc_skb(sizeof(*req), GFP_KERNEL); + if (!skb) + skb = adap->nofail_skb; + if (!skb) + return -ENOMEM; + + req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req)); + req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT)); + req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET; + req->sched = sched; + req->idx = qidx; + req->min = lo; + req->max = hi; + req->binding = port; + ret = t3_mgmt_tx(adap, skb); + if (skb == adap->nofail_skb) { + adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field), + GFP_KERNEL); + if (!adap->nofail_skb) + ret = -ENOMEM; + } + + return ret; +} + +static int bind_qsets(struct adapter *adap) +{ + int i, j, err = 0; + + for_each_port(adap, i) { + const struct port_info *pi = adap2pinfo(adap, i); + + for (j = 0; j < pi->nqsets; ++j) { + int ret = send_pktsched_cmd(adap, 1, + pi->first_qset + j, -1, + -1, i); + if (ret) + err = ret; + } + } + + return err; +} + +#define FW_VERSION __stringify(FW_VERSION_MAJOR) "." \ + __stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO) +#define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin" +#define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "." \ + __stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO) +#define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin" +#define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin" +#define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin" +#define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin" +MODULE_FIRMWARE(FW_FNAME); +MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin"); +MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin"); +MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME); +MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME); +MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME); + +static inline const char *get_edc_fw_name(int edc_idx) +{ + const char *fw_name = NULL; + + switch (edc_idx) { + case EDC_OPT_AEL2005: + fw_name = AEL2005_OPT_EDC_NAME; + break; + case EDC_TWX_AEL2005: + fw_name = AEL2005_TWX_EDC_NAME; + break; + case EDC_TWX_AEL2020: + fw_name = AEL2020_TWX_EDC_NAME; + break; + } + return fw_name; +} + +int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size) +{ + struct adapter *adapter = phy->adapter; + const struct firmware *fw; + char buf[64]; + u32 csum; + const __be32 *p; + u16 *cache = phy->phy_cache; + int i, ret; + + snprintf(buf, sizeof(buf), get_edc_fw_name(edc_idx)); + + ret = request_firmware(&fw, buf, &adapter->pdev->dev); + if (ret < 0) { + dev_err(&adapter->pdev->dev, + "could not upgrade firmware: unable to load %s\n", + buf); + return ret; + } + + /* check size, take checksum in account */ + if (fw->size > size + 4) { + CH_ERR(adapter, "firmware image too large %u, expected %d\n", + (unsigned int)fw->size, size + 4); + ret = -EINVAL; + } + + /* compute checksum */ + p = (const __be32 *)fw->data; + for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++) + csum += ntohl(p[i]); + + if (csum != 0xffffffff) { + CH_ERR(adapter, "corrupted firmware image, checksum %u\n", + csum); + ret = -EINVAL; + } + + for (i = 0; i < size / 4 ; i++) { + *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16; + *cache++ = be32_to_cpu(p[i]) & 0xffff; + } + + release_firmware(fw); + + return ret; +} + +static int upgrade_fw(struct adapter *adap) +{ + int ret; + const struct firmware *fw; + struct device *dev = &adap->pdev->dev; + + ret = request_firmware(&fw, FW_FNAME, dev); + if (ret < 0) { + dev_err(dev, "could not upgrade firmware: unable to load %s\n", + FW_FNAME); + return ret; + } + ret = t3_load_fw(adap, fw->data, fw->size); + release_firmware(fw); + + if (ret == 0) + dev_info(dev, "successful upgrade to firmware %d.%d.%d\n", + FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO); + else + dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n", + FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO); + + return ret; +} + +static inline char t3rev2char(struct adapter *adapter) +{ + char rev = 0; + + switch(adapter->params.rev) { + case T3_REV_B: + case T3_REV_B2: + rev = 'b'; + break; + case T3_REV_C: + rev = 'c'; + break; + } + return rev; +} + +static int update_tpsram(struct adapter *adap) +{ + const struct firmware *tpsram; + char buf[64]; + struct device *dev = &adap->pdev->dev; + int ret; + char rev; + + rev = t3rev2char(adap); + if (!rev) + return 0; + + snprintf(buf, sizeof(buf), TPSRAM_NAME, rev); + + ret = request_firmware(&tpsram, buf, dev); + if (ret < 0) { + dev_err(dev, "could not load TP SRAM: unable to load %s\n", + buf); + return ret; + } + + ret = t3_check_tpsram(adap, tpsram->data, tpsram->size); + if (ret) + goto release_tpsram; + + ret = t3_set_proto_sram(adap, tpsram->data); + if (ret == 0) + dev_info(dev, + "successful update of protocol engine " + "to %d.%d.%d\n", + TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO); + else + dev_err(dev, "failed to update of protocol engine %d.%d.%d\n", + TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO); + if (ret) + dev_err(dev, "loading protocol SRAM failed\n"); + +release_tpsram: + release_firmware(tpsram); + + return ret; +} + +/** + * cxgb_up - enable the adapter + * @adapter: adapter being enabled + * + * Called when the first port is enabled, this function performs the + * actions necessary to make an adapter operational, such as completing + * the initialization of HW modules, and enabling interrupts. + * + * Must be called with the rtnl lock held. + */ +static int cxgb_up(struct adapter *adap) +{ + int err; + + if (!(adap->flags & FULL_INIT_DONE)) { + err = t3_check_fw_version(adap); + if (err == -EINVAL) { + err = upgrade_fw(adap); + CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n", + FW_VERSION_MAJOR, FW_VERSION_MINOR, + FW_VERSION_MICRO, err ? "failed" : "succeeded"); + } + + err = t3_check_tpsram_version(adap); + if (err == -EINVAL) { + err = update_tpsram(adap); + CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n", + TP_VERSION_MAJOR, TP_VERSION_MINOR, + TP_VERSION_MICRO, err ? "failed" : "succeeded"); + } + + /* + * Clear interrupts now to catch errors if t3_init_hw fails. + * We clear them again later as initialization may trigger + * conditions that can interrupt. + */ + t3_intr_clear(adap); + + err = t3_init_hw(adap, 0); + if (err) + goto out; + + t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT); + t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12)); + + err = setup_sge_qsets(adap); + if (err) + goto out; + + setup_rss(adap); + if (!(adap->flags & NAPI_INIT)) + init_napi(adap); + + t3_start_sge_timers(adap); + adap->flags |= FULL_INIT_DONE; + } + + t3_intr_clear(adap); + + if (adap->flags & USING_MSIX) { + name_msix_vecs(adap); + err = request_irq(adap->msix_info[0].vec, + t3_async_intr_handler, 0, + adap->msix_info[0].desc, adap); + if (err) + goto irq_err; + + err = request_msix_data_irqs(adap); + if (err) { + free_irq(adap->msix_info[0].vec, adap); + goto irq_err; + } + } else if ((err = request_irq(adap->pdev->irq, + t3_intr_handler(adap, + adap->sge.qs[0].rspq. + polling), + (adap->flags & USING_MSI) ? + 0 : IRQF_SHARED, + adap->name, adap))) + goto irq_err; + + enable_all_napi(adap); + t3_sge_start(adap); + t3_intr_enable(adap); + + if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) && + is_offload(adap) && init_tp_parity(adap) == 0) + adap->flags |= TP_PARITY_INIT; + + if (adap->flags & TP_PARITY_INIT) { + t3_write_reg(adap, A_TP_INT_CAUSE, + F_CMCACHEPERR | F_ARPLUTPERR); + t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff); + } + + if (!(adap->flags & QUEUES_BOUND)) { + int ret = bind_qsets(adap); + + if (ret < 0) { + CH_ERR(adap, "failed to bind qsets, err %d\n", ret); + t3_intr_disable(adap); + free_irq_resources(adap); + err = ret; + goto out; + } + adap->flags |= QUEUES_BOUND; + } + +out: + return err; +irq_err: + CH_ERR(adap, "request_irq failed, err %d\n", err); + goto out; +} + +/* + * Release resources when all the ports and offloading have been stopped. + */ +static void cxgb_down(struct adapter *adapter, int on_wq) +{ + t3_sge_stop(adapter); + spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */ + t3_intr_disable(adapter); + spin_unlock_irq(&adapter->work_lock); + + free_irq_resources(adapter); + quiesce_rx(adapter); + t3_sge_stop(adapter); + if (!on_wq) + flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */ +} + +static void schedule_chk_task(struct adapter *adap) +{ + unsigned int timeo; + + timeo = adap->params.linkpoll_period ? + (HZ * adap->params.linkpoll_period) / 10 : + adap->params.stats_update_period * HZ; + if (timeo) + queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo); +} + +static int offload_open(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + struct t3cdev *tdev = dev2t3cdev(dev); + int adap_up = adapter->open_device_map & PORT_MASK; + int err; + + if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) + return 0; + + if (!adap_up && (err = cxgb_up(adapter)) < 0) + goto out; + + t3_tp_set_offload_mode(adapter, 1); + tdev->lldev = adapter->port[0]; + err = cxgb3_offload_activate(adapter); + if (err) + goto out; + + init_port_mtus(adapter); + t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd, + adapter->params.b_wnd, + adapter->params.rev == 0 ? + adapter->port[0]->mtu : 0xffff); + init_smt(adapter); + + if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group)) + dev_dbg(&dev->dev, "cannot create sysfs group\n"); + + /* Call back all registered clients */ + cxgb3_add_clients(tdev); + +out: + /* restore them in case the offload module has changed them */ + if (err) { + t3_tp_set_offload_mode(adapter, 0); + clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map); + cxgb3_set_dummy_ops(tdev); + } + return err; +} + +static int offload_close(struct t3cdev *tdev) +{ + struct adapter *adapter = tdev2adap(tdev); + struct t3c_data *td = T3C_DATA(tdev); + + if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) + return 0; + + /* Call back all registered clients */ + cxgb3_remove_clients(tdev); + + sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group); + + /* Flush work scheduled while releasing TIDs */ + flush_work_sync(&td->tid_release_task); + + tdev->lldev = NULL; + cxgb3_set_dummy_ops(tdev); + t3_tp_set_offload_mode(adapter, 0); + clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map); + + if (!adapter->open_device_map) + cxgb_down(adapter, 0); + + cxgb3_offload_deactivate(adapter); + return 0; +} + +static int cxgb_open(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + int other_ports = adapter->open_device_map & PORT_MASK; + int err; + + if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) + return err; + + set_bit(pi->port_id, &adapter->open_device_map); + if (is_offload(adapter) && !ofld_disable) { + err = offload_open(dev); + if (err) + printk(KERN_WARNING + "Could not initialize offload capabilities\n"); + } + + netif_set_real_num_tx_queues(dev, pi->nqsets); + err = netif_set_real_num_rx_queues(dev, pi->nqsets); + if (err) + return err; + link_start(dev); + t3_port_intr_enable(adapter, pi->port_id); + netif_tx_start_all_queues(dev); + if (!other_ports) + schedule_chk_task(adapter); + + cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id); + return 0; +} + +static int __cxgb_close(struct net_device *dev, int on_wq) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + + if (!adapter->open_device_map) + return 0; + + /* Stop link fault interrupts */ + t3_xgm_intr_disable(adapter, pi->port_id); + t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset); + + t3_port_intr_disable(adapter, pi->port_id); + netif_tx_stop_all_queues(dev); + pi->phy.ops->power_down(&pi->phy, 1); + netif_carrier_off(dev); + t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX); + + spin_lock_irq(&adapter->work_lock); /* sync with update task */ + clear_bit(pi->port_id, &adapter->open_device_map); + spin_unlock_irq(&adapter->work_lock); + + if (!(adapter->open_device_map & PORT_MASK)) + cancel_delayed_work_sync(&adapter->adap_check_task); + + if (!adapter->open_device_map) + cxgb_down(adapter, on_wq); + + cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id); + return 0; +} + +static int cxgb_close(struct net_device *dev) +{ + return __cxgb_close(dev, 0); +} + +static struct net_device_stats *cxgb_get_stats(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + struct net_device_stats *ns = &pi->netstats; + const struct mac_stats *pstats; + + spin_lock(&adapter->stats_lock); + pstats = t3_mac_update_stats(&pi->mac); + spin_unlock(&adapter->stats_lock); + + ns->tx_bytes = pstats->tx_octets; + ns->tx_packets = pstats->tx_frames; + ns->rx_bytes = pstats->rx_octets; + ns->rx_packets = pstats->rx_frames; + ns->multicast = pstats->rx_mcast_frames; + + ns->tx_errors = pstats->tx_underrun; + ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs + + pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short + + pstats->rx_fifo_ovfl; + + /* detailed rx_errors */ + ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long; + ns->rx_over_errors = 0; + ns->rx_crc_errors = pstats->rx_fcs_errs; + ns->rx_frame_errors = pstats->rx_symbol_errs; + ns->rx_fifo_errors = pstats->rx_fifo_ovfl; + ns->rx_missed_errors = pstats->rx_cong_drops; + + /* detailed tx_errors */ + ns->tx_aborted_errors = 0; + ns->tx_carrier_errors = 0; + ns->tx_fifo_errors = pstats->tx_underrun; + ns->tx_heartbeat_errors = 0; + ns->tx_window_errors = 0; + return ns; +} + +static u32 get_msglevel(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + return adapter->msg_enable; +} + +static void set_msglevel(struct net_device *dev, u32 val) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + adapter->msg_enable = val; +} + +static char stats_strings[][ETH_GSTRING_LEN] = { + "TxOctetsOK ", + "TxFramesOK ", + "TxMulticastFramesOK", + "TxBroadcastFramesOK", + "TxPauseFrames ", + "TxUnderrun ", + "TxExtUnderrun ", + + "TxFrames64 ", + "TxFrames65To127 ", + "TxFrames128To255 ", + "TxFrames256To511 ", + "TxFrames512To1023 ", + "TxFrames1024To1518 ", + "TxFrames1519ToMax ", + + "RxOctetsOK ", + "RxFramesOK ", + "RxMulticastFramesOK", + "RxBroadcastFramesOK", + "RxPauseFrames ", + "RxFCSErrors ", + "RxSymbolErrors ", + "RxShortErrors ", + "RxJabberErrors ", + "RxLengthErrors ", + "RxFIFOoverflow ", + + "RxFrames64 ", + "RxFrames65To127 ", + "RxFrames128To255 ", + "RxFrames256To511 ", + "RxFrames512To1023 ", + "RxFrames1024To1518 ", + "RxFrames1519ToMax ", + + "PhyFIFOErrors ", + "TSO ", + "VLANextractions ", + "VLANinsertions ", + "TxCsumOffload ", + "RxCsumGood ", + "LroAggregated ", + "LroFlushed ", + "LroNoDesc ", + "RxDrops ", + + "CheckTXEnToggled ", + "CheckResets ", + + "LinkFaults ", +}; + +static int get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(stats_strings); + default: + return -EOPNOTSUPP; + } +} + +#define T3_REGMAP_SIZE (3 * 1024) + +static int get_regs_len(struct net_device *dev) +{ + return T3_REGMAP_SIZE; +} + +static int get_eeprom_len(struct net_device *dev) +{ + return EEPROMSIZE; +} + +static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + u32 fw_vers = 0; + u32 tp_vers = 0; + + spin_lock(&adapter->stats_lock); + t3_get_fw_version(adapter, &fw_vers); + t3_get_tp_version(adapter, &tp_vers); + spin_unlock(&adapter->stats_lock); + + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->bus_info, pci_name(adapter->pdev)); + if (!fw_vers) + strcpy(info->fw_version, "N/A"); + else { + snprintf(info->fw_version, sizeof(info->fw_version), + "%s %u.%u.%u TP %u.%u.%u", + G_FW_VERSION_TYPE(fw_vers) ? "T" : "N", + G_FW_VERSION_MAJOR(fw_vers), + G_FW_VERSION_MINOR(fw_vers), + G_FW_VERSION_MICRO(fw_vers), + G_TP_VERSION_MAJOR(tp_vers), + G_TP_VERSION_MINOR(tp_vers), + G_TP_VERSION_MICRO(tp_vers)); + } +} + +static void get_strings(struct net_device *dev, u32 stringset, u8 * data) +{ + if (stringset == ETH_SS_STATS) + memcpy(data, stats_strings, sizeof(stats_strings)); +} + +static unsigned long collect_sge_port_stats(struct adapter *adapter, + struct port_info *p, int idx) +{ + int i; + unsigned long tot = 0; + + for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i) + tot += adapter->sge.qs[i].port_stats[idx]; + return tot; +} + +static void get_stats(struct net_device *dev, struct ethtool_stats *stats, + u64 *data) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + const struct mac_stats *s; + + spin_lock(&adapter->stats_lock); + s = t3_mac_update_stats(&pi->mac); + spin_unlock(&adapter->stats_lock); + + *data++ = s->tx_octets; + *data++ = s->tx_frames; + *data++ = s->tx_mcast_frames; + *data++ = s->tx_bcast_frames; + *data++ = s->tx_pause; + *data++ = s->tx_underrun; + *data++ = s->tx_fifo_urun; + + *data++ = s->tx_frames_64; + *data++ = s->tx_frames_65_127; + *data++ = s->tx_frames_128_255; + *data++ = s->tx_frames_256_511; + *data++ = s->tx_frames_512_1023; + *data++ = s->tx_frames_1024_1518; + *data++ = s->tx_frames_1519_max; + + *data++ = s->rx_octets; + *data++ = s->rx_frames; + *data++ = s->rx_mcast_frames; + *data++ = s->rx_bcast_frames; + *data++ = s->rx_pause; + *data++ = s->rx_fcs_errs; + *data++ = s->rx_symbol_errs; + *data++ = s->rx_short; + *data++ = s->rx_jabber; + *data++ = s->rx_too_long; + *data++ = s->rx_fifo_ovfl; + + *data++ = s->rx_frames_64; + *data++ = s->rx_frames_65_127; + *data++ = s->rx_frames_128_255; + *data++ = s->rx_frames_256_511; + *data++ = s->rx_frames_512_1023; + *data++ = s->rx_frames_1024_1518; + *data++ = s->rx_frames_1519_max; + + *data++ = pi->phy.fifo_errors; + + *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO); + *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX); + *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS); + *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM); + *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD); + *data++ = 0; + *data++ = 0; + *data++ = 0; + *data++ = s->rx_cong_drops; + + *data++ = s->num_toggled; + *data++ = s->num_resets; + + *data++ = s->link_faults; +} + +static inline void reg_block_dump(struct adapter *ap, void *buf, + unsigned int start, unsigned int end) +{ + u32 *p = buf + start; + + for (; start <= end; start += sizeof(u32)) + *p++ = t3_read_reg(ap, start); +} + +static void get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *buf) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *ap = pi->adapter; + + /* + * Version scheme: + * bits 0..9: chip version + * bits 10..15: chip revision + * bit 31: set for PCIe cards + */ + regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31); + + /* + * We skip the MAC statistics registers because they are clear-on-read. + * Also reading multi-register stats would need to synchronize with the + * periodic mac stats accumulation. Hard to justify the complexity. + */ + memset(buf, 0, T3_REGMAP_SIZE); + reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN); + reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT); + reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE); + reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA); + reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3); + reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0, + XGM_REG(A_XGM_SERDES_STAT3, 1)); + reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1), + XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1)); +} + +static int restart_autoneg(struct net_device *dev) +{ + struct port_info *p = netdev_priv(dev); + + if (!netif_running(dev)) + return -EAGAIN; + if (p->link_config.autoneg != AUTONEG_ENABLE) + return -EINVAL; + p->phy.ops->autoneg_restart(&p->phy); + return 0; +} + +static int set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 1; /* cycle on/off once per second */ + + case ETHTOOL_ID_OFF: + t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0); + break; + + case ETHTOOL_ID_ON: + case ETHTOOL_ID_INACTIVE: + t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, + F_GPIO0_OUT_VAL); + } + + return 0; +} + +static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct port_info *p = netdev_priv(dev); + + cmd->supported = p->link_config.supported; + cmd->advertising = p->link_config.advertising; + + if (netif_carrier_ok(dev)) { + ethtool_cmd_speed_set(cmd, p->link_config.speed); + cmd->duplex = p->link_config.duplex; + } else { + ethtool_cmd_speed_set(cmd, -1); + cmd->duplex = -1; + } + + cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; + cmd->phy_address = p->phy.mdio.prtad; + cmd->transceiver = XCVR_EXTERNAL; + cmd->autoneg = p->link_config.autoneg; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + return 0; +} + +static int speed_duplex_to_caps(int speed, int duplex) +{ + int cap = 0; + + switch (speed) { + case SPEED_10: + if (duplex == DUPLEX_FULL) + cap = SUPPORTED_10baseT_Full; + else + cap = SUPPORTED_10baseT_Half; + break; + case SPEED_100: + if (duplex == DUPLEX_FULL) + cap = SUPPORTED_100baseT_Full; + else + cap = SUPPORTED_100baseT_Half; + break; + case SPEED_1000: + if (duplex == DUPLEX_FULL) + cap = SUPPORTED_1000baseT_Full; + else + cap = SUPPORTED_1000baseT_Half; + break; + case SPEED_10000: + if (duplex == DUPLEX_FULL) + cap = SUPPORTED_10000baseT_Full; + } + return cap; +} + +#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \ + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \ + ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \ + ADVERTISED_10000baseT_Full) + +static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct port_info *p = netdev_priv(dev); + struct link_config *lc = &p->link_config; + + if (!(lc->supported & SUPPORTED_Autoneg)) { + /* + * PHY offers a single speed/duplex. See if that's what's + * being requested. + */ + if (cmd->autoneg == AUTONEG_DISABLE) { + u32 speed = ethtool_cmd_speed(cmd); + int cap = speed_duplex_to_caps(speed, cmd->duplex); + if (lc->supported & cap) + return 0; + } + return -EINVAL; + } + + if (cmd->autoneg == AUTONEG_DISABLE) { + u32 speed = ethtool_cmd_speed(cmd); + int cap = speed_duplex_to_caps(speed, cmd->duplex); + + if (!(lc->supported & cap) || (speed == SPEED_1000)) + return -EINVAL; + lc->requested_speed = speed; + lc->requested_duplex = cmd->duplex; + lc->advertising = 0; + } else { + cmd->advertising &= ADVERTISED_MASK; + cmd->advertising &= lc->supported; + if (!cmd->advertising) + return -EINVAL; + lc->requested_speed = SPEED_INVALID; + lc->requested_duplex = DUPLEX_INVALID; + lc->advertising = cmd->advertising | ADVERTISED_Autoneg; + } + lc->autoneg = cmd->autoneg; + if (netif_running(dev)) + t3_link_start(&p->phy, &p->mac, lc); + return 0; +} + +static void get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct port_info *p = netdev_priv(dev); + + epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0; + epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0; + epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0; +} + +static int set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct port_info *p = netdev_priv(dev); + struct link_config *lc = &p->link_config; + + if (epause->autoneg == AUTONEG_DISABLE) + lc->requested_fc = 0; + else if (lc->supported & SUPPORTED_Autoneg) + lc->requested_fc = PAUSE_AUTONEG; + else + return -EINVAL; + + if (epause->rx_pause) + lc->requested_fc |= PAUSE_RX; + if (epause->tx_pause) + lc->requested_fc |= PAUSE_TX; + if (lc->autoneg == AUTONEG_ENABLE) { + if (netif_running(dev)) + t3_link_start(&p->phy, &p->mac, lc); + } else { + lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + if (netif_running(dev)) + t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc); + } + return 0; +} + +static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset]; + + e->rx_max_pending = MAX_RX_BUFFERS; + e->rx_mini_max_pending = 0; + e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS; + e->tx_max_pending = MAX_TXQ_ENTRIES; + + e->rx_pending = q->fl_size; + e->rx_mini_pending = q->rspq_size; + e->rx_jumbo_pending = q->jumbo_size; + e->tx_pending = q->txq_size[0]; +} + +static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + struct qset_params *q; + int i; + + if (e->rx_pending > MAX_RX_BUFFERS || + e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS || + e->tx_pending > MAX_TXQ_ENTRIES || + e->rx_mini_pending > MAX_RSPQ_ENTRIES || + e->rx_mini_pending < MIN_RSPQ_ENTRIES || + e->rx_pending < MIN_FL_ENTRIES || + e->rx_jumbo_pending < MIN_FL_ENTRIES || + e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES) + return -EINVAL; + + if (adapter->flags & FULL_INIT_DONE) + return -EBUSY; + + q = &adapter->params.sge.qset[pi->first_qset]; + for (i = 0; i < pi->nqsets; ++i, ++q) { + q->rspq_size = e->rx_mini_pending; + q->fl_size = e->rx_pending; + q->jumbo_size = e->rx_jumbo_pending; + q->txq_size[0] = e->tx_pending; + q->txq_size[1] = e->tx_pending; + q->txq_size[2] = e->tx_pending; + } + return 0; +} + +static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + struct qset_params *qsp; + struct sge_qset *qs; + int i; + + if (c->rx_coalesce_usecs * 10 > M_NEWTIMER) + return -EINVAL; + + for (i = 0; i < pi->nqsets; i++) { + qsp = &adapter->params.sge.qset[i]; + qs = &adapter->sge.qs[i]; + qsp->coalesce_usecs = c->rx_coalesce_usecs; + t3_update_qset_coalesce(qs, qsp); + } + + return 0; +} + +static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + struct qset_params *q = adapter->params.sge.qset; + + c->rx_coalesce_usecs = q->coalesce_usecs; + return 0; +} + +static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, + u8 * data) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + int i, err = 0; + + u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + e->magic = EEPROM_MAGIC; + for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4) + err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]); + + if (!err) + memcpy(data, buf + e->offset, e->len); + kfree(buf); + return err; +} + +static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 * data) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + u32 aligned_offset, aligned_len; + __le32 *p; + u8 *buf; + int err; + + if (eeprom->magic != EEPROM_MAGIC) + return -EINVAL; + + aligned_offset = eeprom->offset & ~3; + aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3; + + if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) { + buf = kmalloc(aligned_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf); + if (!err && aligned_len > 4) + err = t3_seeprom_read(adapter, + aligned_offset + aligned_len - 4, + (__le32 *) & buf[aligned_len - 4]); + if (err) + goto out; + memcpy(buf + (eeprom->offset & 3), data, eeprom->len); + } else + buf = data; + + err = t3_seeprom_wp(adapter, 0); + if (err) + goto out; + + for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) { + err = t3_seeprom_write(adapter, aligned_offset, *p); + aligned_offset += 4; + } + + if (!err) + err = t3_seeprom_wp(adapter, 1); +out: + if (buf != data) + kfree(buf); + return err; +} + +static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + wol->supported = 0; + wol->wolopts = 0; + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + +static const struct ethtool_ops cxgb_ethtool_ops = { + .get_settings = get_settings, + .set_settings = set_settings, + .get_drvinfo = get_drvinfo, + .get_msglevel = get_msglevel, + .set_msglevel = set_msglevel, + .get_ringparam = get_sge_param, + .set_ringparam = set_sge_param, + .get_coalesce = get_coalesce, + .set_coalesce = set_coalesce, + .get_eeprom_len = get_eeprom_len, + .get_eeprom = get_eeprom, + .set_eeprom = set_eeprom, + .get_pauseparam = get_pauseparam, + .set_pauseparam = set_pauseparam, + .get_link = ethtool_op_get_link, + .get_strings = get_strings, + .set_phys_id = set_phys_id, + .nway_reset = restart_autoneg, + .get_sset_count = get_sset_count, + .get_ethtool_stats = get_stats, + .get_regs_len = get_regs_len, + .get_regs = get_regs, + .get_wol = get_wol, +}; + +static int in_range(int val, int lo, int hi) +{ + return val < 0 || (val <= hi && val >= lo); +} + +static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + u32 cmd; + int ret; + + if (copy_from_user(&cmd, useraddr, sizeof(cmd))) + return -EFAULT; + + switch (cmd) { + case CHELSIO_SET_QSET_PARAMS:{ + int i; + struct qset_params *q; + struct ch_qset_params t; + int q1 = pi->first_qset; + int nqsets = pi->nqsets; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + if (copy_from_user(&t, useraddr, sizeof(t))) + return -EFAULT; + if (t.qset_idx >= SGE_QSETS) + return -EINVAL; + if (!in_range(t.intr_lat, 0, M_NEWTIMER) || + !in_range(t.cong_thres, 0, 255) || + !in_range(t.txq_size[0], MIN_TXQ_ENTRIES, + MAX_TXQ_ENTRIES) || + !in_range(t.txq_size[1], MIN_TXQ_ENTRIES, + MAX_TXQ_ENTRIES) || + !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES, + MAX_CTRL_TXQ_ENTRIES) || + !in_range(t.fl_size[0], MIN_FL_ENTRIES, + MAX_RX_BUFFERS) || + !in_range(t.fl_size[1], MIN_FL_ENTRIES, + MAX_RX_JUMBO_BUFFERS) || + !in_range(t.rspq_size, MIN_RSPQ_ENTRIES, + MAX_RSPQ_ENTRIES)) + return -EINVAL; + + if ((adapter->flags & FULL_INIT_DONE) && + (t.rspq_size >= 0 || t.fl_size[0] >= 0 || + t.fl_size[1] >= 0 || t.txq_size[0] >= 0 || + t.txq_size[1] >= 0 || t.txq_size[2] >= 0 || + t.polling >= 0 || t.cong_thres >= 0)) + return -EBUSY; + + /* Allow setting of any available qset when offload enabled */ + if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { + q1 = 0; + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + nqsets += pi->first_qset + pi->nqsets; + } + } + + if (t.qset_idx < q1) + return -EINVAL; + if (t.qset_idx > q1 + nqsets - 1) + return -EINVAL; + + q = &adapter->params.sge.qset[t.qset_idx]; + + if (t.rspq_size >= 0) + q->rspq_size = t.rspq_size; + if (t.fl_size[0] >= 0) + q->fl_size = t.fl_size[0]; + if (t.fl_size[1] >= 0) + q->jumbo_size = t.fl_size[1]; + if (t.txq_size[0] >= 0) + q->txq_size[0] = t.txq_size[0]; + if (t.txq_size[1] >= 0) + q->txq_size[1] = t.txq_size[1]; + if (t.txq_size[2] >= 0) + q->txq_size[2] = t.txq_size[2]; + if (t.cong_thres >= 0) + q->cong_thres = t.cong_thres; + if (t.intr_lat >= 0) { + struct sge_qset *qs = + &adapter->sge.qs[t.qset_idx]; + + q->coalesce_usecs = t.intr_lat; + t3_update_qset_coalesce(qs, q); + } + if (t.polling >= 0) { + if (adapter->flags & USING_MSIX) + q->polling = t.polling; + else { + /* No polling with INTx for T3A */ + if (adapter->params.rev == 0 && + !(adapter->flags & USING_MSI)) + t.polling = 0; + + for (i = 0; i < SGE_QSETS; i++) { + q = &adapter->params.sge. + qset[i]; + q->polling = t.polling; + } + } + } + + if (t.lro >= 0) { + if (t.lro) + dev->wanted_features |= NETIF_F_GRO; + else + dev->wanted_features &= ~NETIF_F_GRO; + netdev_update_features(dev); + } + + break; + } + case CHELSIO_GET_QSET_PARAMS:{ + struct qset_params *q; + struct ch_qset_params t; + int q1 = pi->first_qset; + int nqsets = pi->nqsets; + int i; + + if (copy_from_user(&t, useraddr, sizeof(t))) + return -EFAULT; + + /* Display qsets for all ports when offload enabled */ + if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { + q1 = 0; + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + nqsets = pi->first_qset + pi->nqsets; + } + } + + if (t.qset_idx >= nqsets) + return -EINVAL; + + q = &adapter->params.sge.qset[q1 + t.qset_idx]; + t.rspq_size = q->rspq_size; + t.txq_size[0] = q->txq_size[0]; + t.txq_size[1] = q->txq_size[1]; + t.txq_size[2] = q->txq_size[2]; + t.fl_size[0] = q->fl_size; + t.fl_size[1] = q->jumbo_size; + t.polling = q->polling; + t.lro = !!(dev->features & NETIF_F_GRO); + t.intr_lat = q->coalesce_usecs; + t.cong_thres = q->cong_thres; + t.qnum = q1; + + if (adapter->flags & USING_MSIX) + t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec; + else + t.vector = adapter->pdev->irq; + + if (copy_to_user(useraddr, &t, sizeof(t))) + return -EFAULT; + break; + } + case CHELSIO_SET_QSET_NUM:{ + struct ch_reg edata; + unsigned int i, first_qset = 0, other_qsets = 0; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + if (adapter->flags & FULL_INIT_DONE) + return -EBUSY; + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + if (edata.val < 1 || + (edata.val > 1 && !(adapter->flags & USING_MSIX))) + return -EINVAL; + + for_each_port(adapter, i) + if (adapter->port[i] && adapter->port[i] != dev) + other_qsets += adap2pinfo(adapter, i)->nqsets; + + if (edata.val + other_qsets > SGE_QSETS) + return -EINVAL; + + pi->nqsets = edata.val; + + for_each_port(adapter, i) + if (adapter->port[i]) { + pi = adap2pinfo(adapter, i); + pi->first_qset = first_qset; + first_qset += pi->nqsets; + } + break; + } + case CHELSIO_GET_QSET_NUM:{ + struct ch_reg edata; + + memset(&edata, 0, sizeof(struct ch_reg)); + + edata.cmd = CHELSIO_GET_QSET_NUM; + edata.val = pi->nqsets; + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + break; + } + case CHELSIO_LOAD_FW:{ + u8 *fw_data; + struct ch_mem_range t; + + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + if (copy_from_user(&t, useraddr, sizeof(t))) + return -EFAULT; + /* Check t.len sanity ? */ + fw_data = memdup_user(useraddr + sizeof(t), t.len); + if (IS_ERR(fw_data)) + return PTR_ERR(fw_data); + + ret = t3_load_fw(adapter, fw_data, t.len); + kfree(fw_data); + if (ret) + return ret; + break; + } + case CHELSIO_SETMTUTAB:{ + struct ch_mtus m; + int i; + + if (!is_offload(adapter)) + return -EOPNOTSUPP; + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + if (offload_running(adapter)) + return -EBUSY; + if (copy_from_user(&m, useraddr, sizeof(m))) + return -EFAULT; + if (m.nmtus != NMTUS) + return -EINVAL; + if (m.mtus[0] < 81) /* accommodate SACK */ + return -EINVAL; + + /* MTUs must be in ascending order */ + for (i = 1; i < NMTUS; ++i) + if (m.mtus[i] < m.mtus[i - 1]) + return -EINVAL; + + memcpy(adapter->params.mtus, m.mtus, + sizeof(adapter->params.mtus)); + break; + } + case CHELSIO_GET_PM:{ + struct tp_params *p = &adapter->params.tp; + struct ch_pm m = {.cmd = CHELSIO_GET_PM }; + + if (!is_offload(adapter)) + return -EOPNOTSUPP; + m.tx_pg_sz = p->tx_pg_size; + m.tx_num_pg = p->tx_num_pgs; + m.rx_pg_sz = p->rx_pg_size; + m.rx_num_pg = p->rx_num_pgs; + m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan; + if (copy_to_user(useraddr, &m, sizeof(m))) + return -EFAULT; + break; + } + case CHELSIO_SET_PM:{ + struct ch_pm m; + struct tp_params *p = &adapter->params.tp; + + if (!is_offload(adapter)) + return -EOPNOTSUPP; + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + if (adapter->flags & FULL_INIT_DONE) + return -EBUSY; + if (copy_from_user(&m, useraddr, sizeof(m))) + return -EFAULT; + if (!is_power_of_2(m.rx_pg_sz) || + !is_power_of_2(m.tx_pg_sz)) + return -EINVAL; /* not power of 2 */ + if (!(m.rx_pg_sz & 0x14000)) + return -EINVAL; /* not 16KB or 64KB */ + if (!(m.tx_pg_sz & 0x1554000)) + return -EINVAL; + if (m.tx_num_pg == -1) + m.tx_num_pg = p->tx_num_pgs; + if (m.rx_num_pg == -1) + m.rx_num_pg = p->rx_num_pgs; + if (m.tx_num_pg % 24 || m.rx_num_pg % 24) + return -EINVAL; + if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size || + m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size) + return -EINVAL; + p->rx_pg_size = m.rx_pg_sz; + p->tx_pg_size = m.tx_pg_sz; + p->rx_num_pgs = m.rx_num_pg; + p->tx_num_pgs = m.tx_num_pg; + break; + } + case CHELSIO_GET_MEM:{ + struct ch_mem_range t; + struct mc7 *mem; + u64 buf[32]; + + if (!is_offload(adapter)) + return -EOPNOTSUPP; + if (!(adapter->flags & FULL_INIT_DONE)) + return -EIO; /* need the memory controllers */ + if (copy_from_user(&t, useraddr, sizeof(t))) + return -EFAULT; + if ((t.addr & 7) || (t.len & 7)) + return -EINVAL; + if (t.mem_id == MEM_CM) + mem = &adapter->cm; + else if (t.mem_id == MEM_PMRX) + mem = &adapter->pmrx; + else if (t.mem_id == MEM_PMTX) + mem = &adapter->pmtx; + else + return -EINVAL; + + /* + * Version scheme: + * bits 0..9: chip version + * bits 10..15: chip revision + */ + t.version = 3 | (adapter->params.rev << 10); + if (copy_to_user(useraddr, &t, sizeof(t))) + return -EFAULT; + + /* + * Read 256 bytes at a time as len can be large and we don't + * want to use huge intermediate buffers. + */ + useraddr += sizeof(t); /* advance to start of buffer */ + while (t.len) { + unsigned int chunk = + min_t(unsigned int, t.len, sizeof(buf)); + + ret = + t3_mc7_bd_read(mem, t.addr / 8, chunk / 8, + buf); + if (ret) + return ret; + if (copy_to_user(useraddr, buf, chunk)) + return -EFAULT; + useraddr += chunk; + t.addr += chunk; + t.len -= chunk; + } + break; + } + case CHELSIO_SET_TRACE_FILTER:{ + struct ch_trace t; + const struct trace_params *tp; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + if (!offload_running(adapter)) + return -EAGAIN; + if (copy_from_user(&t, useraddr, sizeof(t))) + return -EFAULT; + + tp = (const struct trace_params *)&t.sip; + if (t.config_tx) + t3_config_trace_filter(adapter, tp, 0, + t.invert_match, + t.trace_tx); + if (t.config_rx) + t3_config_trace_filter(adapter, tp, 1, + t.invert_match, + t.trace_rx); + break; + } + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) +{ + struct mii_ioctl_data *data = if_mii(req); + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + switch (cmd) { + case SIOCGMIIREG: + case SIOCSMIIREG: + /* Convert phy_id from older PRTAD/DEVAD format */ + if (is_10G(adapter) && + !mdio_phy_id_is_c45(data->phy_id) && + (data->phy_id & 0x1f00) && + !(data->phy_id & 0xe0e0)) + data->phy_id = mdio_phy_id_c45(data->phy_id >> 8, + data->phy_id & 0x1f); + /* FALLTHRU */ + case SIOCGMIIPHY: + return mdio_mii_ioctl(&pi->phy.mdio, data, cmd); + case SIOCCHIOCTL: + return cxgb_extension_ioctl(dev, req->ifr_data); + default: + return -EOPNOTSUPP; + } +} + +static int cxgb_change_mtu(struct net_device *dev, int new_mtu) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + int ret; + + if (new_mtu < 81) /* accommodate SACK */ + return -EINVAL; + if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu))) + return ret; + dev->mtu = new_mtu; + init_port_mtus(adapter); + if (adapter->params.rev == 0 && offload_running(adapter)) + t3_load_mtus(adapter, adapter->params.mtus, + adapter->params.a_wnd, adapter->params.b_wnd, + adapter->port[0]->mtu); + return 0; +} + +static int cxgb_set_mac_addr(struct net_device *dev, void *p) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr); + if (offload_running(adapter)) + write_smt_entry(adapter, pi->port_id); + return 0; +} + +/** + * t3_synchronize_rx - wait for current Rx processing on a port to complete + * @adap: the adapter + * @p: the port + * + * Ensures that current Rx processing on any of the queues associated with + * the given port completes before returning. We do this by acquiring and + * releasing the locks of the response queues associated with the port. + */ +static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p) +{ + int i; + + for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) { + struct sge_rspq *q = &adap->sge.qs[i].rspq; + + spin_lock_irq(&q->lock); + spin_unlock_irq(&q->lock); + } +} + +static void cxgb_vlan_mode(struct net_device *dev, u32 features) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + if (adapter->params.rev > 0) { + t3_set_vlan_accel(adapter, 1 << pi->port_id, + features & NETIF_F_HW_VLAN_RX); + } else { + /* single control for all ports */ + unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_RX; + + for_each_port(adapter, i) + have_vlans |= + adapter->port[i]->features & NETIF_F_HW_VLAN_RX; + + t3_set_vlan_accel(adapter, 1, have_vlans); + } + t3_synchronize_rx(adapter, pi); +} + +static u32 cxgb_fix_features(struct net_device *dev, u32 features) +{ + /* + * Since there is no support for separate rx/tx vlan accel + * enable/disable make sure tx flag is always in same state as rx. + */ + if (features & NETIF_F_HW_VLAN_RX) + features |= NETIF_F_HW_VLAN_TX; + else + features &= ~NETIF_F_HW_VLAN_TX; + + return features; +} + +static int cxgb_set_features(struct net_device *dev, u32 features) +{ + u32 changed = dev->features ^ features; + + if (changed & NETIF_F_HW_VLAN_RX) + cxgb_vlan_mode(dev, features); + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void cxgb_netpoll(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + int qidx; + + for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) { + struct sge_qset *qs = &adapter->sge.qs[qidx]; + void *source; + + if (adapter->flags & USING_MSIX) + source = qs; + else + source = adapter; + + t3_intr_handler(adapter, qs->rspq.polling) (0, source); + } +} +#endif + +/* + * Periodic accumulation of MAC statistics. + */ +static void mac_stats_update(struct adapter *adapter) +{ + int i; + + for_each_port(adapter, i) { + struct net_device *dev = adapter->port[i]; + struct port_info *p = netdev_priv(dev); + + if (netif_running(dev)) { + spin_lock(&adapter->stats_lock); + t3_mac_update_stats(&p->mac); + spin_unlock(&adapter->stats_lock); + } + } +} + +static void check_link_status(struct adapter *adapter) +{ + int i; + + for_each_port(adapter, i) { + struct net_device *dev = adapter->port[i]; + struct port_info *p = netdev_priv(dev); + int link_fault; + + spin_lock_irq(&adapter->work_lock); + link_fault = p->link_fault; + spin_unlock_irq(&adapter->work_lock); + + if (link_fault) { + t3_link_fault(adapter, i); + continue; + } + + if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) { + t3_xgm_intr_disable(adapter, i); + t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset); + + t3_link_changed(adapter, i); + t3_xgm_intr_enable(adapter, i); + } + } +} + +static void check_t3b2_mac(struct adapter *adapter) +{ + int i; + + if (!rtnl_trylock()) /* synchronize with ifdown */ + return; + + for_each_port(adapter, i) { + struct net_device *dev = adapter->port[i]; + struct port_info *p = netdev_priv(dev); + int status; + + if (!netif_running(dev)) + continue; + + status = 0; + if (netif_running(dev) && netif_carrier_ok(dev)) + status = t3b2_mac_watchdog_task(&p->mac); + if (status == 1) + p->mac.stats.num_toggled++; + else if (status == 2) { + struct cmac *mac = &p->mac; + + t3_mac_set_mtu(mac, dev->mtu); + t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr); + cxgb_set_rxmode(dev); + t3_link_start(&p->phy, mac, &p->link_config); + t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); + t3_port_intr_enable(adapter, p->port_id); + p->mac.stats.num_resets++; + } + } + rtnl_unlock(); +} + + +static void t3_adap_check_task(struct work_struct *work) +{ + struct adapter *adapter = container_of(work, struct adapter, + adap_check_task.work); + const struct adapter_params *p = &adapter->params; + int port; + unsigned int v, status, reset; + + adapter->check_task_cnt++; + + check_link_status(adapter); + + /* Accumulate MAC stats if needed */ + if (!p->linkpoll_period || + (adapter->check_task_cnt * p->linkpoll_period) / 10 >= + p->stats_update_period) { + mac_stats_update(adapter); + adapter->check_task_cnt = 0; + } + + if (p->rev == T3_REV_B2) + check_t3b2_mac(adapter); + + /* + * Scan the XGMAC's to check for various conditions which we want to + * monitor in a periodic polling manner rather than via an interrupt + * condition. This is used for conditions which would otherwise flood + * the system with interrupts and we only really need to know that the + * conditions are "happening" ... For each condition we count the + * detection of the condition and reset it for the next polling loop. + */ + for_each_port(adapter, port) { + struct cmac *mac = &adap2pinfo(adapter, port)->mac; + u32 cause; + + cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset); + reset = 0; + if (cause & F_RXFIFO_OVERFLOW) { + mac->stats.rx_fifo_ovfl++; + reset |= F_RXFIFO_OVERFLOW; + } + + t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset); + } + + /* + * We do the same as above for FL_EMPTY interrupts. + */ + status = t3_read_reg(adapter, A_SG_INT_CAUSE); + reset = 0; + + if (status & F_FLEMPTY) { + struct sge_qset *qs = &adapter->sge.qs[0]; + int i = 0; + + reset |= F_FLEMPTY; + + v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) & + 0xffff; + + while (v) { + qs->fl[i].empty += (v & 1); + if (i) + qs++; + i ^= 1; + v >>= 1; + } + } + + t3_write_reg(adapter, A_SG_INT_CAUSE, reset); + + /* Schedule the next check update if any port is active. */ + spin_lock_irq(&adapter->work_lock); + if (adapter->open_device_map & PORT_MASK) + schedule_chk_task(adapter); + spin_unlock_irq(&adapter->work_lock); +} + +static void db_full_task(struct work_struct *work) +{ + struct adapter *adapter = container_of(work, struct adapter, + db_full_task); + + cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0); +} + +static void db_empty_task(struct work_struct *work) +{ + struct adapter *adapter = container_of(work, struct adapter, + db_empty_task); + + cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0); +} + +static void db_drop_task(struct work_struct *work) +{ + struct adapter *adapter = container_of(work, struct adapter, + db_drop_task); + unsigned long delay = 1000; + unsigned short r; + + cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0); + + /* + * Sleep a while before ringing the driver qset dbs. + * The delay is between 1000-2023 usecs. + */ + get_random_bytes(&r, 2); + delay += r & 1023; + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(usecs_to_jiffies(delay)); + ring_dbs(adapter); +} + +/* + * Processes external (PHY) interrupts in process context. + */ +static void ext_intr_task(struct work_struct *work) +{ + struct adapter *adapter = container_of(work, struct adapter, + ext_intr_handler_task); + int i; + + /* Disable link fault interrupts */ + for_each_port(adapter, i) { + struct net_device *dev = adapter->port[i]; + struct port_info *p = netdev_priv(dev); + + t3_xgm_intr_disable(adapter, i); + t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset); + } + + /* Re-enable link fault interrupts */ + t3_phy_intr_handler(adapter); + + for_each_port(adapter, i) + t3_xgm_intr_enable(adapter, i); + + /* Now reenable external interrupts */ + spin_lock_irq(&adapter->work_lock); + if (adapter->slow_intr_mask) { + adapter->slow_intr_mask |= F_T3DBG; + t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG); + t3_write_reg(adapter, A_PL_INT_ENABLE0, + adapter->slow_intr_mask); + } + spin_unlock_irq(&adapter->work_lock); +} + +/* + * Interrupt-context handler for external (PHY) interrupts. + */ +void t3_os_ext_intr_handler(struct adapter *adapter) +{ + /* + * Schedule a task to handle external interrupts as they may be slow + * and we use a mutex to protect MDIO registers. We disable PHY + * interrupts in the meantime and let the task reenable them when + * it's done. + */ + spin_lock(&adapter->work_lock); + if (adapter->slow_intr_mask) { + adapter->slow_intr_mask &= ~F_T3DBG; + t3_write_reg(adapter, A_PL_INT_ENABLE0, + adapter->slow_intr_mask); + queue_work(cxgb3_wq, &adapter->ext_intr_handler_task); + } + spin_unlock(&adapter->work_lock); +} + +void t3_os_link_fault_handler(struct adapter *adapter, int port_id) +{ + struct net_device *netdev = adapter->port[port_id]; + struct port_info *pi = netdev_priv(netdev); + + spin_lock(&adapter->work_lock); + pi->link_fault = 1; + spin_unlock(&adapter->work_lock); +} + +static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq) +{ + int i, ret = 0; + + if (is_offload(adapter) && + test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { + cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0); + offload_close(&adapter->tdev); + } + + /* Stop all ports */ + for_each_port(adapter, i) { + struct net_device *netdev = adapter->port[i]; + + if (netif_running(netdev)) + __cxgb_close(netdev, on_wq); + } + + /* Stop SGE timers */ + t3_stop_sge_timers(adapter); + + adapter->flags &= ~FULL_INIT_DONE; + + if (reset) + ret = t3_reset_adapter(adapter); + + pci_disable_device(adapter->pdev); + + return ret; +} + +static int t3_reenable_adapter(struct adapter *adapter) +{ + if (pci_enable_device(adapter->pdev)) { + dev_err(&adapter->pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + goto err; + } + pci_set_master(adapter->pdev); + pci_restore_state(adapter->pdev); + pci_save_state(adapter->pdev); + + /* Free sge resources */ + t3_free_sge_resources(adapter); + + if (t3_replay_prep_adapter(adapter)) + goto err; + + return 0; +err: + return -1; +} + +static void t3_resume_ports(struct adapter *adapter) +{ + int i; + + /* Restart the ports */ + for_each_port(adapter, i) { + struct net_device *netdev = adapter->port[i]; + + if (netif_running(netdev)) { + if (cxgb_open(netdev)) { + dev_err(&adapter->pdev->dev, + "can't bring device back up" + " after reset\n"); + continue; + } + } + } + + if (is_offload(adapter) && !ofld_disable) + cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0); +} + +/* + * processes a fatal error. + * Bring the ports down, reset the chip, bring the ports back up. + */ +static void fatal_error_task(struct work_struct *work) +{ + struct adapter *adapter = container_of(work, struct adapter, + fatal_error_handler_task); + int err = 0; + + rtnl_lock(); + err = t3_adapter_error(adapter, 1, 1); + if (!err) + err = t3_reenable_adapter(adapter); + if (!err) + t3_resume_ports(adapter); + + CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded"); + rtnl_unlock(); +} + +void t3_fatal_err(struct adapter *adapter) +{ + unsigned int fw_status[4]; + + if (adapter->flags & FULL_INIT_DONE) { + t3_sge_stop(adapter); + t3_write_reg(adapter, A_XGM_TX_CTRL, 0); + t3_write_reg(adapter, A_XGM_RX_CTRL, 0); + t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0); + t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0); + + spin_lock(&adapter->work_lock); + t3_intr_disable(adapter); + queue_work(cxgb3_wq, &adapter->fatal_error_handler_task); + spin_unlock(&adapter->work_lock); + } + CH_ALERT(adapter, "encountered fatal error, operation suspended\n"); + if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status)) + CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n", + fw_status[0], fw_status[1], + fw_status[2], fw_status[3]); +} + +/** + * t3_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct adapter *adapter = pci_get_drvdata(pdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + t3_adapter_error(adapter, 0, 0); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * t3_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + */ +static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev) +{ + struct adapter *adapter = pci_get_drvdata(pdev); + + if (!t3_reenable_adapter(adapter)) + return PCI_ERS_RESULT_RECOVERED; + + return PCI_ERS_RESULT_DISCONNECT; +} + +/** + * t3_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. + */ +static void t3_io_resume(struct pci_dev *pdev) +{ + struct adapter *adapter = pci_get_drvdata(pdev); + + CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n", + t3_read_reg(adapter, A_PCIE_PEX_ERR)); + + t3_resume_ports(adapter); +} + +static struct pci_error_handlers t3_err_handler = { + .error_detected = t3_io_error_detected, + .slot_reset = t3_io_slot_reset, + .resume = t3_io_resume, +}; + +/* + * Set the number of qsets based on the number of CPUs and the number of ports, + * not to exceed the number of available qsets, assuming there are enough qsets + * per port in HW. + */ +static void set_nqsets(struct adapter *adap) +{ + int i, j = 0; + int num_cpus = num_online_cpus(); + int hwports = adap->params.nports; + int nqsets = adap->msix_nvectors - 1; + + if (adap->params.rev > 0 && adap->flags & USING_MSIX) { + if (hwports == 2 && + (hwports * nqsets > SGE_QSETS || + num_cpus >= nqsets / hwports)) + nqsets /= hwports; + if (nqsets > num_cpus) + nqsets = num_cpus; + if (nqsets < 1 || hwports == 4) + nqsets = 1; + } else + nqsets = 1; + + for_each_port(adap, i) { + struct port_info *pi = adap2pinfo(adap, i); + + pi->first_qset = j; + pi->nqsets = nqsets; + j = pi->first_qset + nqsets; + + dev_info(&adap->pdev->dev, + "Port %d using %d queue sets.\n", i, nqsets); + } +} + +static int __devinit cxgb_enable_msix(struct adapter *adap) +{ + struct msix_entry entries[SGE_QSETS + 1]; + int vectors; + int i, err; + + vectors = ARRAY_SIZE(entries); + for (i = 0; i < vectors; ++i) + entries[i].entry = i; + + while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0) + vectors = err; + + if (err < 0) + pci_disable_msix(adap->pdev); + + if (!err && vectors < (adap->params.nports + 1)) { + pci_disable_msix(adap->pdev); + err = -1; + } + + if (!err) { + for (i = 0; i < vectors; ++i) + adap->msix_info[i].vec = entries[i].vector; + adap->msix_nvectors = vectors; + } + + return err; +} + +static void __devinit print_port_info(struct adapter *adap, + const struct adapter_info *ai) +{ + static const char *pci_variant[] = { + "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express" + }; + + int i; + char buf[80]; + + if (is_pcie(adap)) + snprintf(buf, sizeof(buf), "%s x%d", + pci_variant[adap->params.pci.variant], + adap->params.pci.width); + else + snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit", + pci_variant[adap->params.pci.variant], + adap->params.pci.speed, adap->params.pci.width); + + for_each_port(adap, i) { + struct net_device *dev = adap->port[i]; + const struct port_info *pi = netdev_priv(dev); + + if (!test_bit(i, &adap->registered_device_map)) + continue; + printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n", + dev->name, ai->desc, pi->phy.desc, + is_offload(adap) ? "R" : "", adap->params.rev, buf, + (adap->flags & USING_MSIX) ? " MSI-X" : + (adap->flags & USING_MSI) ? " MSI" : ""); + if (adap->name == dev->name && adap->params.vpd.mclk) + printk(KERN_INFO + "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n", + adap->name, t3_mc7_size(&adap->cm) >> 20, + t3_mc7_size(&adap->pmtx) >> 20, + t3_mc7_size(&adap->pmrx) >> 20, + adap->params.vpd.sn); + } +} + +static const struct net_device_ops cxgb_netdev_ops = { + .ndo_open = cxgb_open, + .ndo_stop = cxgb_close, + .ndo_start_xmit = t3_eth_xmit, + .ndo_get_stats = cxgb_get_stats, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_multicast_list = cxgb_set_rxmode, + .ndo_do_ioctl = cxgb_ioctl, + .ndo_change_mtu = cxgb_change_mtu, + .ndo_set_mac_address = cxgb_set_mac_addr, + .ndo_fix_features = cxgb_fix_features, + .ndo_set_features = cxgb_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = cxgb_netpoll, +#endif +}; + +static void __devinit cxgb3_init_iscsi_mac(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + + memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN); + pi->iscsic.mac_addr[3] |= 0x80; +} + +static int __devinit init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + static int version_printed; + + int i, err, pci_using_dac = 0; + resource_size_t mmio_start, mmio_len; + const struct adapter_info *ai; + struct adapter *adapter = NULL; + struct port_info *pi; + + if (!version_printed) { + printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); + ++version_printed; + } + + if (!cxgb3_wq) { + cxgb3_wq = create_singlethread_workqueue(DRV_NAME); + if (!cxgb3_wq) { + printk(KERN_ERR DRV_NAME + ": cannot initialize work queue\n"); + return -ENOMEM; + } + } + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "cannot enable PCI device\n"); + goto out; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + /* Just info, some other driver may have claimed the device. */ + dev_info(&pdev->dev, "cannot obtain PCI resources\n"); + goto out_disable_device; + } + + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + pci_using_dac = 1; + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " + "coherent allocations\n"); + goto out_release_regions; + } + } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) { + dev_err(&pdev->dev, "no usable DMA configuration\n"); + goto out_release_regions; + } + + pci_set_master(pdev); + pci_save_state(pdev); + + mmio_start = pci_resource_start(pdev, 0); + mmio_len = pci_resource_len(pdev, 0); + ai = t3_get_adapter_info(ent->driver_data); + + adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); + if (!adapter) { + err = -ENOMEM; + goto out_release_regions; + } + + adapter->nofail_skb = + alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL); + if (!adapter->nofail_skb) { + dev_err(&pdev->dev, "cannot allocate nofail buffer\n"); + err = -ENOMEM; + goto out_free_adapter; + } + + adapter->regs = ioremap_nocache(mmio_start, mmio_len); + if (!adapter->regs) { + dev_err(&pdev->dev, "cannot map device registers\n"); + err = -ENOMEM; + goto out_free_adapter; + } + + adapter->pdev = pdev; + adapter->name = pci_name(pdev); + adapter->msg_enable = dflt_msg_enable; + adapter->mmio_len = mmio_len; + + mutex_init(&adapter->mdio_lock); + spin_lock_init(&adapter->work_lock); + spin_lock_init(&adapter->stats_lock); + + INIT_LIST_HEAD(&adapter->adapter_list); + INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task); + INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task); + + INIT_WORK(&adapter->db_full_task, db_full_task); + INIT_WORK(&adapter->db_empty_task, db_empty_task); + INIT_WORK(&adapter->db_drop_task, db_drop_task); + + INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task); + + for (i = 0; i < ai->nports0 + ai->nports1; ++i) { + struct net_device *netdev; + + netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS); + if (!netdev) { + err = -ENOMEM; + goto out_free_dev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + adapter->port[i] = netdev; + pi = netdev_priv(netdev); + pi->adapter = adapter; + pi->port_id = i; + netif_carrier_off(netdev); + netdev->irq = pdev->irq; + netdev->mem_start = mmio_start; + netdev->mem_end = mmio_start + mmio_len - 1; + netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX; + netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_TX; + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + netdev->netdev_ops = &cxgb_netdev_ops; + SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops); + } + + pci_set_drvdata(pdev, adapter); + if (t3_prep_adapter(adapter, ai, 1) < 0) { + err = -ENODEV; + goto out_free_dev; + } + + /* + * The card is now ready to go. If any errors occur during device + * registration we do not fail the whole card but rather proceed only + * with the ports we manage to register successfully. However we must + * register at least one net device. + */ + for_each_port(adapter, i) { + err = register_netdev(adapter->port[i]); + if (err) + dev_warn(&pdev->dev, + "cannot register net device %s, skipping\n", + adapter->port[i]->name); + else { + /* + * Change the name we use for messages to the name of + * the first successfully registered interface. + */ + if (!adapter->registered_device_map) + adapter->name = adapter->port[i]->name; + + __set_bit(i, &adapter->registered_device_map); + } + } + if (!adapter->registered_device_map) { + dev_err(&pdev->dev, "could not register any net devices\n"); + goto out_free_dev; + } + + for_each_port(adapter, i) + cxgb3_init_iscsi_mac(adapter->port[i]); + + /* Driver's ready. Reflect it on LEDs */ + t3_led_ready(adapter); + + if (is_offload(adapter)) { + __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map); + cxgb3_adapter_ofld(adapter); + } + + /* See what interrupts we'll be using */ + if (msi > 1 && cxgb_enable_msix(adapter) == 0) + adapter->flags |= USING_MSIX; + else if (msi > 0 && pci_enable_msi(pdev) == 0) + adapter->flags |= USING_MSI; + + set_nqsets(adapter); + + err = sysfs_create_group(&adapter->port[0]->dev.kobj, + &cxgb3_attr_group); + + for_each_port(adapter, i) + cxgb_vlan_mode(adapter->port[i], adapter->port[i]->features); + + print_port_info(adapter, ai); + return 0; + +out_free_dev: + iounmap(adapter->regs); + for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i) + if (adapter->port[i]) + free_netdev(adapter->port[i]); + +out_free_adapter: + kfree(adapter); + +out_release_regions: + pci_release_regions(pdev); +out_disable_device: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +out: + return err; +} + +static void __devexit remove_one(struct pci_dev *pdev) +{ + struct adapter *adapter = pci_get_drvdata(pdev); + + if (adapter) { + int i; + + t3_sge_stop(adapter); + sysfs_remove_group(&adapter->port[0]->dev.kobj, + &cxgb3_attr_group); + + if (is_offload(adapter)) { + cxgb3_adapter_unofld(adapter); + if (test_bit(OFFLOAD_DEVMAP_BIT, + &adapter->open_device_map)) + offload_close(&adapter->tdev); + } + + for_each_port(adapter, i) + if (test_bit(i, &adapter->registered_device_map)) + unregister_netdev(adapter->port[i]); + + t3_stop_sge_timers(adapter); + t3_free_sge_resources(adapter); + cxgb_disable_msi(adapter); + + for_each_port(adapter, i) + if (adapter->port[i]) + free_netdev(adapter->port[i]); + + iounmap(adapter->regs); + if (adapter->nofail_skb) + kfree_skb(adapter->nofail_skb); + kfree(adapter); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + } +} + +static struct pci_driver driver = { + .name = DRV_NAME, + .id_table = cxgb3_pci_tbl, + .probe = init_one, + .remove = __devexit_p(remove_one), + .err_handler = &t3_err_handler, +}; + +static int __init cxgb3_init_module(void) +{ + int ret; + + cxgb3_offload_init(); + + ret = pci_register_driver(&driver); + return ret; +} + +static void __exit cxgb3_cleanup_module(void) +{ + pci_unregister_driver(&driver); + if (cxgb3_wq) + destroy_workqueue(cxgb3_wq); +} + +module_init(cxgb3_init_module); +module_exit(cxgb3_cleanup_module); diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c new file mode 100644 index 0000000..805076c --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c @@ -0,0 +1,1418 @@ +/* + * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" +#include "regs.h" +#include "cxgb3_ioctl.h" +#include "cxgb3_ctl_defs.h" +#include "cxgb3_defs.h" +#include "l2t.h" +#include "firmware_exports.h" +#include "cxgb3_offload.h" + +static LIST_HEAD(client_list); +static LIST_HEAD(ofld_dev_list); +static DEFINE_MUTEX(cxgb3_db_lock); + +static DEFINE_RWLOCK(adapter_list_lock); +static LIST_HEAD(adapter_list); + +static const unsigned int MAX_ATIDS = 64 * 1024; +static const unsigned int ATID_BASE = 0x10000; + +static void cxgb_neigh_update(struct neighbour *neigh); +static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new); + +static inline int offload_activated(struct t3cdev *tdev) +{ + const struct adapter *adapter = tdev2adap(tdev); + + return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map); +} + +/** + * cxgb3_register_client - register an offload client + * @client: the client + * + * Add the client to the client list, + * and call backs the client for each activated offload device + */ +void cxgb3_register_client(struct cxgb3_client *client) +{ + struct t3cdev *tdev; + + mutex_lock(&cxgb3_db_lock); + list_add_tail(&client->client_list, &client_list); + + if (client->add) { + list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) { + if (offload_activated(tdev)) + client->add(tdev); + } + } + mutex_unlock(&cxgb3_db_lock); +} + +EXPORT_SYMBOL(cxgb3_register_client); + +/** + * cxgb3_unregister_client - unregister an offload client + * @client: the client + * + * Remove the client to the client list, + * and call backs the client for each activated offload device. + */ +void cxgb3_unregister_client(struct cxgb3_client *client) +{ + struct t3cdev *tdev; + + mutex_lock(&cxgb3_db_lock); + list_del(&client->client_list); + + if (client->remove) { + list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) { + if (offload_activated(tdev)) + client->remove(tdev); + } + } + mutex_unlock(&cxgb3_db_lock); +} + +EXPORT_SYMBOL(cxgb3_unregister_client); + +/** + * cxgb3_add_clients - activate registered clients for an offload device + * @tdev: the offload device + * + * Call backs all registered clients once a offload device is activated + */ +void cxgb3_add_clients(struct t3cdev *tdev) +{ + struct cxgb3_client *client; + + mutex_lock(&cxgb3_db_lock); + list_for_each_entry(client, &client_list, client_list) { + if (client->add) + client->add(tdev); + } + mutex_unlock(&cxgb3_db_lock); +} + +/** + * cxgb3_remove_clients - deactivates registered clients + * for an offload device + * @tdev: the offload device + * + * Call backs all registered clients once a offload device is deactivated + */ +void cxgb3_remove_clients(struct t3cdev *tdev) +{ + struct cxgb3_client *client; + + mutex_lock(&cxgb3_db_lock); + list_for_each_entry(client, &client_list, client_list) { + if (client->remove) + client->remove(tdev); + } + mutex_unlock(&cxgb3_db_lock); +} + +void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port) +{ + struct cxgb3_client *client; + + mutex_lock(&cxgb3_db_lock); + list_for_each_entry(client, &client_list, client_list) { + if (client->event_handler) + client->event_handler(tdev, event, port); + } + mutex_unlock(&cxgb3_db_lock); +} + +static struct net_device *get_iff_from_mac(struct adapter *adapter, + const unsigned char *mac, + unsigned int vlan) +{ + int i; + + for_each_port(adapter, i) { + struct net_device *dev = adapter->port[i]; + + if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) { + if (vlan && vlan != VLAN_VID_MASK) { + rcu_read_lock(); + dev = __vlan_find_dev_deep(dev, vlan); + rcu_read_unlock(); + } else if (netif_is_bond_slave(dev)) { + while (dev->master) + dev = dev->master; + } + return dev; + } + } + return NULL; +} + +static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req, + void *data) +{ + int i; + int ret = 0; + unsigned int val = 0; + struct ulp_iscsi_info *uiip = data; + + switch (req) { + case ULP_ISCSI_GET_PARAMS: + uiip->pdev = adapter->pdev; + uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT); + uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT); + uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK); + + val = t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ); + for (i = 0; i < 4; i++, val >>= 8) + uiip->pgsz_factor[i] = val & 0xFF; + + val = t3_read_reg(adapter, A_TP_PARA_REG7); + uiip->max_txsz = + uiip->max_rxsz = min((val >> S_PMMAXXFERLEN0)&M_PMMAXXFERLEN0, + (val >> S_PMMAXXFERLEN1)&M_PMMAXXFERLEN1); + /* + * On tx, the iscsi pdu has to be <= tx page size and has to + * fit into the Tx PM FIFO. + */ + val = min(adapter->params.tp.tx_pg_size, + t3_read_reg(adapter, A_PM1_TX_CFG) >> 17); + uiip->max_txsz = min(val, uiip->max_txsz); + + /* set MaxRxData to 16224 */ + val = t3_read_reg(adapter, A_TP_PARA_REG2); + if ((val >> S_MAXRXDATA) != 0x3f60) { + val &= (M_RXCOALESCESIZE << S_RXCOALESCESIZE); + val |= V_MAXRXDATA(0x3f60); + printk(KERN_INFO + "%s, iscsi set MaxRxData to 16224 (0x%x).\n", + adapter->name, val); + t3_write_reg(adapter, A_TP_PARA_REG2, val); + } + + /* + * on rx, the iscsi pdu has to be < rx page size and the + * the max rx data length programmed in TP + */ + val = min(adapter->params.tp.rx_pg_size, + ((t3_read_reg(adapter, A_TP_PARA_REG2)) >> + S_MAXRXDATA) & M_MAXRXDATA); + uiip->max_rxsz = min(val, uiip->max_rxsz); + break; + case ULP_ISCSI_SET_PARAMS: + t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask); + /* program the ddp page sizes */ + for (i = 0; i < 4; i++) + val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i); + if (val && (val != t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ))) { + printk(KERN_INFO + "%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u.\n", + adapter->name, val, uiip->pgsz_factor[0], + uiip->pgsz_factor[1], uiip->pgsz_factor[2], + uiip->pgsz_factor[3]); + t3_write_reg(adapter, A_ULPRX_ISCSI_PSZ, val); + } + break; + default: + ret = -EOPNOTSUPP; + } + return ret; +} + +/* Response queue used for RDMA events. */ +#define ASYNC_NOTIF_RSPQ 0 + +static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data) +{ + int ret = 0; + + switch (req) { + case RDMA_GET_PARAMS: { + struct rdma_info *rdma = data; + struct pci_dev *pdev = adapter->pdev; + + rdma->udbell_physbase = pci_resource_start(pdev, 2); + rdma->udbell_len = pci_resource_len(pdev, 2); + rdma->tpt_base = + t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT); + rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT); + rdma->pbl_base = + t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT); + rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT); + rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT); + rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT); + rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL; + rdma->pdev = pdev; + break; + } + case RDMA_CQ_OP:{ + unsigned long flags; + struct rdma_cq_op *rdma = data; + + /* may be called in any context */ + spin_lock_irqsave(&adapter->sge.reg_lock, flags); + ret = t3_sge_cqcntxt_op(adapter, rdma->id, rdma->op, + rdma->credits); + spin_unlock_irqrestore(&adapter->sge.reg_lock, flags); + break; + } + case RDMA_GET_MEM:{ + struct ch_mem_range *t = data; + struct mc7 *mem; + + if ((t->addr & 7) || (t->len & 7)) + return -EINVAL; + if (t->mem_id == MEM_CM) + mem = &adapter->cm; + else if (t->mem_id == MEM_PMRX) + mem = &adapter->pmrx; + else if (t->mem_id == MEM_PMTX) + mem = &adapter->pmtx; + else + return -EINVAL; + + ret = + t3_mc7_bd_read(mem, t->addr / 8, t->len / 8, + (u64 *) t->buf); + if (ret) + return ret; + break; + } + case RDMA_CQ_SETUP:{ + struct rdma_cq_setup *rdma = data; + + spin_lock_irq(&adapter->sge.reg_lock); + ret = + t3_sge_init_cqcntxt(adapter, rdma->id, + rdma->base_addr, rdma->size, + ASYNC_NOTIF_RSPQ, + rdma->ovfl_mode, rdma->credits, + rdma->credit_thres); + spin_unlock_irq(&adapter->sge.reg_lock); + break; + } + case RDMA_CQ_DISABLE: + spin_lock_irq(&adapter->sge.reg_lock); + ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data); + spin_unlock_irq(&adapter->sge.reg_lock); + break; + case RDMA_CTRL_QP_SETUP:{ + struct rdma_ctrlqp_setup *rdma = data; + + spin_lock_irq(&adapter->sge.reg_lock); + ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0, + SGE_CNTXT_RDMA, + ASYNC_NOTIF_RSPQ, + rdma->base_addr, rdma->size, + FW_RI_TID_START, 1, 0); + spin_unlock_irq(&adapter->sge.reg_lock); + break; + } + case RDMA_GET_MIB: { + spin_lock(&adapter->stats_lock); + t3_tp_get_mib_stats(adapter, (struct tp_mib_stats *)data); + spin_unlock(&adapter->stats_lock); + break; + } + default: + ret = -EOPNOTSUPP; + } + return ret; +} + +static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data) +{ + struct adapter *adapter = tdev2adap(tdev); + struct tid_range *tid; + struct mtutab *mtup; + struct iff_mac *iffmacp; + struct ddp_params *ddpp; + struct adap_ports *ports; + struct ofld_page_info *rx_page_info; + struct tp_params *tp = &adapter->params.tp; + int i; + + switch (req) { + case GET_MAX_OUTSTANDING_WR: + *(unsigned int *)data = FW_WR_NUM; + break; + case GET_WR_LEN: + *(unsigned int *)data = WR_FLITS; + break; + case GET_TX_MAX_CHUNK: + *(unsigned int *)data = 1 << 20; /* 1MB */ + break; + case GET_TID_RANGE: + tid = data; + tid->num = t3_mc5_size(&adapter->mc5) - + adapter->params.mc5.nroutes - + adapter->params.mc5.nfilters - adapter->params.mc5.nservers; + tid->base = 0; + break; + case GET_STID_RANGE: + tid = data; + tid->num = adapter->params.mc5.nservers; + tid->base = t3_mc5_size(&adapter->mc5) - tid->num - + adapter->params.mc5.nfilters - adapter->params.mc5.nroutes; + break; + case GET_L2T_CAPACITY: + *(unsigned int *)data = 2048; + break; + case GET_MTUS: + mtup = data; + mtup->size = NMTUS; + mtup->mtus = adapter->params.mtus; + break; + case GET_IFF_FROM_MAC: + iffmacp = data; + iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr, + iffmacp->vlan_tag & + VLAN_VID_MASK); + break; + case GET_DDP_PARAMS: + ddpp = data; + ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT); + ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT); + ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK); + break; + case GET_PORTS: + ports = data; + ports->nports = adapter->params.nports; + for_each_port(adapter, i) + ports->lldevs[i] = adapter->port[i]; + break; + case ULP_ISCSI_GET_PARAMS: + case ULP_ISCSI_SET_PARAMS: + if (!offload_running(adapter)) + return -EAGAIN; + return cxgb_ulp_iscsi_ctl(adapter, req, data); + case RDMA_GET_PARAMS: + case RDMA_CQ_OP: + case RDMA_CQ_SETUP: + case RDMA_CQ_DISABLE: + case RDMA_CTRL_QP_SETUP: + case RDMA_GET_MEM: + case RDMA_GET_MIB: + if (!offload_running(adapter)) + return -EAGAIN; + return cxgb_rdma_ctl(adapter, req, data); + case GET_RX_PAGE_INFO: + rx_page_info = data; + rx_page_info->page_size = tp->rx_pg_size; + rx_page_info->num = tp->rx_num_pgs; + break; + case GET_ISCSI_IPV4ADDR: { + struct iscsi_ipv4addr *p = data; + struct port_info *pi = netdev_priv(p->dev); + p->ipv4addr = pi->iscsi_ipv4addr; + break; + } + case GET_EMBEDDED_INFO: { + struct ch_embedded_info *e = data; + + spin_lock(&adapter->stats_lock); + t3_get_fw_version(adapter, &e->fw_vers); + t3_get_tp_version(adapter, &e->tp_vers); + spin_unlock(&adapter->stats_lock); + break; + } + default: + return -EOPNOTSUPP; + } + return 0; +} + +/* + * Dummy handler for Rx offload packets in case we get an offload packet before + * proper processing is setup. This complains and drops the packet as it isn't + * normal to get offload packets at this stage. + */ +static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs, + int n) +{ + while (n--) + dev_kfree_skb_any(skbs[n]); + return 0; +} + +static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh) +{ +} + +void cxgb3_set_dummy_ops(struct t3cdev *dev) +{ + dev->recv = rx_offload_blackhole; + dev->neigh_update = dummy_neigh_update; +} + +/* + * Free an active-open TID. + */ +void *cxgb3_free_atid(struct t3cdev *tdev, int atid) +{ + struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; + union active_open_entry *p = atid2entry(t, atid); + void *ctx = p->t3c_tid.ctx; + + spin_lock_bh(&t->atid_lock); + p->next = t->afree; + t->afree = p; + t->atids_in_use--; + spin_unlock_bh(&t->atid_lock); + + return ctx; +} + +EXPORT_SYMBOL(cxgb3_free_atid); + +/* + * Free a server TID and return it to the free pool. + */ +void cxgb3_free_stid(struct t3cdev *tdev, int stid) +{ + struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; + union listen_entry *p = stid2entry(t, stid); + + spin_lock_bh(&t->stid_lock); + p->next = t->sfree; + t->sfree = p; + t->stids_in_use--; + spin_unlock_bh(&t->stid_lock); +} + +EXPORT_SYMBOL(cxgb3_free_stid); + +void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client, + void *ctx, unsigned int tid) +{ + struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; + + t->tid_tab[tid].client = client; + t->tid_tab[tid].ctx = ctx; + atomic_inc(&t->tids_in_use); +} + +EXPORT_SYMBOL(cxgb3_insert_tid); + +/* + * Populate a TID_RELEASE WR. The skb must be already propely sized. + */ +static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid) +{ + struct cpl_tid_release *req; + + skb->priority = CPL_PRIORITY_SETUP; + req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req)); + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); +} + +static void t3_process_tid_release_list(struct work_struct *work) +{ + struct t3c_data *td = container_of(work, struct t3c_data, + tid_release_task); + struct sk_buff *skb; + struct t3cdev *tdev = td->dev; + + + spin_lock_bh(&td->tid_release_lock); + while (td->tid_release_list) { + struct t3c_tid_entry *p = td->tid_release_list; + + td->tid_release_list = p->ctx; + spin_unlock_bh(&td->tid_release_lock); + + skb = alloc_skb(sizeof(struct cpl_tid_release), + GFP_KERNEL); + if (!skb) + skb = td->nofail_skb; + if (!skb) { + spin_lock_bh(&td->tid_release_lock); + p->ctx = (void *)td->tid_release_list; + td->tid_release_list = (struct t3c_tid_entry *)p; + break; + } + mk_tid_release(skb, p - td->tid_maps.tid_tab); + cxgb3_ofld_send(tdev, skb); + p->ctx = NULL; + if (skb == td->nofail_skb) + td->nofail_skb = + alloc_skb(sizeof(struct cpl_tid_release), + GFP_KERNEL); + spin_lock_bh(&td->tid_release_lock); + } + td->release_list_incomplete = (td->tid_release_list == NULL) ? 0 : 1; + spin_unlock_bh(&td->tid_release_lock); + + if (!td->nofail_skb) + td->nofail_skb = + alloc_skb(sizeof(struct cpl_tid_release), + GFP_KERNEL); +} + +/* use ctx as a next pointer in the tid release list */ +void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid) +{ + struct t3c_data *td = T3C_DATA(tdev); + struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid]; + + spin_lock_bh(&td->tid_release_lock); + p->ctx = (void *)td->tid_release_list; + p->client = NULL; + td->tid_release_list = p; + if (!p->ctx || td->release_list_incomplete) + schedule_work(&td->tid_release_task); + spin_unlock_bh(&td->tid_release_lock); +} + +EXPORT_SYMBOL(cxgb3_queue_tid_release); + +/* + * Remove a tid from the TID table. A client may defer processing its last + * CPL message if it is locked at the time it arrives, and while the message + * sits in the client's backlog the TID may be reused for another connection. + * To handle this we atomically switch the TID association if it still points + * to the original client context. + */ +void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid) +{ + struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; + + BUG_ON(tid >= t->ntids); + if (tdev->type == T3A) + (void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL); + else { + struct sk_buff *skb; + + skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC); + if (likely(skb)) { + mk_tid_release(skb, tid); + cxgb3_ofld_send(tdev, skb); + t->tid_tab[tid].ctx = NULL; + } else + cxgb3_queue_tid_release(tdev, tid); + } + atomic_dec(&t->tids_in_use); +} + +EXPORT_SYMBOL(cxgb3_remove_tid); + +int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client, + void *ctx) +{ + int atid = -1; + struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; + + spin_lock_bh(&t->atid_lock); + if (t->afree && + t->atids_in_use + atomic_read(&t->tids_in_use) + MC5_MIN_TIDS <= + t->ntids) { + union active_open_entry *p = t->afree; + + atid = (p - t->atid_tab) + t->atid_base; + t->afree = p->next; + p->t3c_tid.ctx = ctx; + p->t3c_tid.client = client; + t->atids_in_use++; + } + spin_unlock_bh(&t->atid_lock); + return atid; +} + +EXPORT_SYMBOL(cxgb3_alloc_atid); + +int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client, + void *ctx) +{ + int stid = -1; + struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; + + spin_lock_bh(&t->stid_lock); + if (t->sfree) { + union listen_entry *p = t->sfree; + + stid = (p - t->stid_tab) + t->stid_base; + t->sfree = p->next; + p->t3c_tid.ctx = ctx; + p->t3c_tid.client = client; + t->stids_in_use++; + } + spin_unlock_bh(&t->stid_lock); + return stid; +} + +EXPORT_SYMBOL(cxgb3_alloc_stid); + +/* Get the t3cdev associated with a net_device */ +struct t3cdev *dev2t3cdev(struct net_device *dev) +{ + const struct port_info *pi = netdev_priv(dev); + + return (struct t3cdev *)pi->adapter; +} + +EXPORT_SYMBOL(dev2t3cdev); + +static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb) +{ + struct cpl_smt_write_rpl *rpl = cplhdr(skb); + + if (rpl->status != CPL_ERR_NONE) + printk(KERN_ERR + "Unexpected SMT_WRITE_RPL status %u for entry %u\n", + rpl->status, GET_TID(rpl)); + + return CPL_RET_BUF_DONE; +} + +static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb) +{ + struct cpl_l2t_write_rpl *rpl = cplhdr(skb); + + if (rpl->status != CPL_ERR_NONE) + printk(KERN_ERR + "Unexpected L2T_WRITE_RPL status %u for entry %u\n", + rpl->status, GET_TID(rpl)); + + return CPL_RET_BUF_DONE; +} + +static int do_rte_write_rpl(struct t3cdev *dev, struct sk_buff *skb) +{ + struct cpl_rte_write_rpl *rpl = cplhdr(skb); + + if (rpl->status != CPL_ERR_NONE) + printk(KERN_ERR + "Unexpected RTE_WRITE_RPL status %u for entry %u\n", + rpl->status, GET_TID(rpl)); + + return CPL_RET_BUF_DONE; +} + +static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb) +{ + struct cpl_act_open_rpl *rpl = cplhdr(skb); + unsigned int atid = G_TID(ntohl(rpl->atid)); + struct t3c_tid_entry *t3c_tid; + + t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid); + if (t3c_tid && t3c_tid->ctx && t3c_tid->client && + t3c_tid->client->handlers && + t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) { + return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb, + t3c_tid-> + ctx); + } else { + printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", + dev->name, CPL_ACT_OPEN_RPL); + return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; + } +} + +static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb) +{ + union opcode_tid *p = cplhdr(skb); + unsigned int stid = G_TID(ntohl(p->opcode_tid)); + struct t3c_tid_entry *t3c_tid; + + t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid); + if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && + t3c_tid->client->handlers[p->opcode]) { + return t3c_tid->client->handlers[p->opcode] (dev, skb, + t3c_tid->ctx); + } else { + printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", + dev->name, p->opcode); + return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; + } +} + +static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb) +{ + union opcode_tid *p = cplhdr(skb); + unsigned int hwtid = G_TID(ntohl(p->opcode_tid)); + struct t3c_tid_entry *t3c_tid; + + t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); + if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && + t3c_tid->client->handlers[p->opcode]) { + return t3c_tid->client->handlers[p->opcode] + (dev, skb, t3c_tid->ctx); + } else { + printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", + dev->name, p->opcode); + return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; + } +} + +static int do_cr(struct t3cdev *dev, struct sk_buff *skb) +{ + struct cpl_pass_accept_req *req = cplhdr(skb); + unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); + struct tid_info *t = &(T3C_DATA(dev))->tid_maps; + struct t3c_tid_entry *t3c_tid; + unsigned int tid = GET_TID(req); + + if (unlikely(tid >= t->ntids)) { + printk("%s: passive open TID %u too large\n", + dev->name, tid); + t3_fatal_err(tdev2adap(dev)); + return CPL_RET_BUF_DONE; + } + + t3c_tid = lookup_stid(t, stid); + if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && + t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) { + return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ] + (dev, skb, t3c_tid->ctx); + } else { + printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", + dev->name, CPL_PASS_ACCEPT_REQ); + return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; + } +} + +/* + * Returns an sk_buff for a reply CPL message of size len. If the input + * sk_buff has no other users it is trimmed and reused, otherwise a new buffer + * is allocated. The input skb must be of size at least len. Note that this + * operation does not destroy the original skb data even if it decides to reuse + * the buffer. + */ +static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len, + gfp_t gfp) +{ + if (likely(!skb_cloned(skb))) { + BUG_ON(skb->len < len); + __skb_trim(skb, len); + skb_get(skb); + } else { + skb = alloc_skb(len, gfp); + if (skb) + __skb_put(skb, len); + } + return skb; +} + +static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb) +{ + union opcode_tid *p = cplhdr(skb); + unsigned int hwtid = G_TID(ntohl(p->opcode_tid)); + struct t3c_tid_entry *t3c_tid; + + t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); + if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && + t3c_tid->client->handlers[p->opcode]) { + return t3c_tid->client->handlers[p->opcode] + (dev, skb, t3c_tid->ctx); + } else { + struct cpl_abort_req_rss *req = cplhdr(skb); + struct cpl_abort_rpl *rpl; + struct sk_buff *reply_skb; + unsigned int tid = GET_TID(req); + u8 cmd = req->status; + + if (req->status == CPL_ERR_RTX_NEG_ADVICE || + req->status == CPL_ERR_PERSIST_NEG_ADVICE) + goto out; + + reply_skb = cxgb3_get_cpl_reply_skb(skb, + sizeof(struct + cpl_abort_rpl), + GFP_ATOMIC); + + if (!reply_skb) { + printk("do_abort_req_rss: couldn't get skb!\n"); + goto out; + } + reply_skb->priority = CPL_PRIORITY_DATA; + __skb_put(reply_skb, sizeof(struct cpl_abort_rpl)); + rpl = cplhdr(reply_skb); + rpl->wr.wr_hi = + htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); + rpl->wr.wr_lo = htonl(V_WR_TID(tid)); + OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid)); + rpl->cmd = cmd; + cxgb3_ofld_send(dev, reply_skb); +out: + return CPL_RET_BUF_DONE; + } +} + +static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb) +{ + struct cpl_act_establish *req = cplhdr(skb); + unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); + struct tid_info *t = &(T3C_DATA(dev))->tid_maps; + struct t3c_tid_entry *t3c_tid; + unsigned int tid = GET_TID(req); + + if (unlikely(tid >= t->ntids)) { + printk("%s: active establish TID %u too large\n", + dev->name, tid); + t3_fatal_err(tdev2adap(dev)); + return CPL_RET_BUF_DONE; + } + + t3c_tid = lookup_atid(t, atid); + if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && + t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) { + return t3c_tid->client->handlers[CPL_ACT_ESTABLISH] + (dev, skb, t3c_tid->ctx); + } else { + printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", + dev->name, CPL_ACT_ESTABLISH); + return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; + } +} + +static int do_trace(struct t3cdev *dev, struct sk_buff *skb) +{ + struct cpl_trace_pkt *p = cplhdr(skb); + + skb->protocol = htons(0xffff); + skb->dev = dev->lldev; + skb_pull(skb, sizeof(*p)); + skb_reset_mac_header(skb); + netif_receive_skb(skb); + return 0; +} + +/* + * That skb would better have come from process_responses() where we abuse + * ->priority and ->csum to carry our data. NB: if we get to per-arch + * ->csum, the things might get really interesting here. + */ + +static inline u32 get_hwtid(struct sk_buff *skb) +{ + return ntohl((__force __be32)skb->priority) >> 8 & 0xfffff; +} + +static inline u32 get_opcode(struct sk_buff *skb) +{ + return G_OPCODE(ntohl((__force __be32)skb->csum)); +} + +static int do_term(struct t3cdev *dev, struct sk_buff *skb) +{ + unsigned int hwtid = get_hwtid(skb); + unsigned int opcode = get_opcode(skb); + struct t3c_tid_entry *t3c_tid; + + t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); + if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && + t3c_tid->client->handlers[opcode]) { + return t3c_tid->client->handlers[opcode] (dev, skb, + t3c_tid->ctx); + } else { + printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", + dev->name, opcode); + return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; + } +} + +static int nb_callback(struct notifier_block *self, unsigned long event, + void *ctx) +{ + switch (event) { + case (NETEVENT_NEIGH_UPDATE):{ + cxgb_neigh_update((struct neighbour *)ctx); + break; + } + case (NETEVENT_REDIRECT):{ + struct netevent_redirect *nr = ctx; + cxgb_redirect(nr->old, nr->new); + cxgb_neigh_update(dst_get_neighbour(nr->new)); + break; + } + default: + break; + } + return 0; +} + +static struct notifier_block nb = { + .notifier_call = nb_callback +}; + +/* + * Process a received packet with an unknown/unexpected CPL opcode. + */ +static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb) +{ + printk(KERN_ERR "%s: received bad CPL command 0x%x\n", dev->name, + *skb->data); + return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; +} + +/* + * Handlers for each CPL opcode + */ +static cpl_handler_func cpl_handlers[NUM_CPL_CMDS]; + +/* + * Add a new handler to the CPL dispatch table. A NULL handler may be supplied + * to unregister an existing handler. + */ +void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h) +{ + if (opcode < NUM_CPL_CMDS) + cpl_handlers[opcode] = h ? h : do_bad_cpl; + else + printk(KERN_ERR "T3C: handler registration for " + "opcode %x failed\n", opcode); +} + +EXPORT_SYMBOL(t3_register_cpl_handler); + +/* + * T3CDEV's receive method. + */ +static int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n) +{ + while (n--) { + struct sk_buff *skb = *skbs++; + unsigned int opcode = get_opcode(skb); + int ret = cpl_handlers[opcode] (dev, skb); + +#if VALIDATE_TID + if (ret & CPL_RET_UNKNOWN_TID) { + union opcode_tid *p = cplhdr(skb); + + printk(KERN_ERR "%s: CPL message (opcode %u) had " + "unknown TID %u\n", dev->name, opcode, + G_TID(ntohl(p->opcode_tid))); + } +#endif + if (ret & CPL_RET_BUF_DONE) + kfree_skb(skb); + } + return 0; +} + +/* + * Sends an sk_buff to a T3C driver after dealing with any active network taps. + */ +int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb) +{ + int r; + + local_bh_disable(); + r = dev->send(dev, skb); + local_bh_enable(); + return r; +} + +EXPORT_SYMBOL(cxgb3_ofld_send); + +static int is_offloading(struct net_device *dev) +{ + struct adapter *adapter; + int i; + + read_lock_bh(&adapter_list_lock); + list_for_each_entry(adapter, &adapter_list, adapter_list) { + for_each_port(adapter, i) { + if (dev == adapter->port[i]) { + read_unlock_bh(&adapter_list_lock); + return 1; + } + } + } + read_unlock_bh(&adapter_list_lock); + return 0; +} + +static void cxgb_neigh_update(struct neighbour *neigh) +{ + struct net_device *dev = neigh->dev; + + if (dev && (is_offloading(dev))) { + struct t3cdev *tdev = dev2t3cdev(dev); + + BUG_ON(!tdev); + t3_l2t_update(tdev, neigh); + } +} + +static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e) +{ + struct sk_buff *skb; + struct cpl_set_tcb_field *req; + + skb = alloc_skb(sizeof(*req), GFP_ATOMIC); + if (!skb) { + printk(KERN_ERR "%s: cannot allocate skb!\n", __func__); + return; + } + skb->priority = CPL_PRIORITY_CONTROL; + req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req)); + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); + req->reply = 0; + req->cpu_idx = 0; + req->word = htons(W_TCB_L2T_IX); + req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX)); + req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx)); + tdev->send(tdev, skb); +} + +static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new) +{ + struct net_device *olddev, *newdev; + struct tid_info *ti; + struct t3cdev *tdev; + u32 tid; + int update_tcb; + struct l2t_entry *e; + struct t3c_tid_entry *te; + + olddev = dst_get_neighbour(old)->dev; + newdev = dst_get_neighbour(new)->dev; + if (!is_offloading(olddev)) + return; + if (!is_offloading(newdev)) { + printk(KERN_WARNING "%s: Redirect to non-offload " + "device ignored.\n", __func__); + return; + } + tdev = dev2t3cdev(olddev); + BUG_ON(!tdev); + if (tdev != dev2t3cdev(newdev)) { + printk(KERN_WARNING "%s: Redirect to different " + "offload device ignored.\n", __func__); + return; + } + + /* Add new L2T entry */ + e = t3_l2t_get(tdev, dst_get_neighbour(new), newdev); + if (!e) { + printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n", + __func__); + return; + } + + /* Walk tid table and notify clients of dst change. */ + ti = &(T3C_DATA(tdev))->tid_maps; + for (tid = 0; tid < ti->ntids; tid++) { + te = lookup_tid(ti, tid); + BUG_ON(!te); + if (te && te->ctx && te->client && te->client->redirect) { + update_tcb = te->client->redirect(te->ctx, old, new, e); + if (update_tcb) { + l2t_hold(L2DATA(tdev), e); + set_l2t_ix(tdev, tid, e); + } + } + } + l2t_release(L2DATA(tdev), e); +} + +/* + * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. + * The allocated memory is cleared. + */ +void *cxgb_alloc_mem(unsigned long size) +{ + void *p = kzalloc(size, GFP_KERNEL); + + if (!p) + p = vzalloc(size); + return p; +} + +/* + * Free memory allocated through t3_alloc_mem(). + */ +void cxgb_free_mem(void *addr) +{ + if (is_vmalloc_addr(addr)) + vfree(addr); + else + kfree(addr); +} + +/* + * Allocate and initialize the TID tables. Returns 0 on success. + */ +static int init_tid_tabs(struct tid_info *t, unsigned int ntids, + unsigned int natids, unsigned int nstids, + unsigned int atid_base, unsigned int stid_base) +{ + unsigned long size = ntids * sizeof(*t->tid_tab) + + natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab); + + t->tid_tab = cxgb_alloc_mem(size); + if (!t->tid_tab) + return -ENOMEM; + + t->stid_tab = (union listen_entry *)&t->tid_tab[ntids]; + t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids]; + t->ntids = ntids; + t->nstids = nstids; + t->stid_base = stid_base; + t->sfree = NULL; + t->natids = natids; + t->atid_base = atid_base; + t->afree = NULL; + t->stids_in_use = t->atids_in_use = 0; + atomic_set(&t->tids_in_use, 0); + spin_lock_init(&t->stid_lock); + spin_lock_init(&t->atid_lock); + + /* + * Setup the free lists for stid_tab and atid_tab. + */ + if (nstids) { + while (--nstids) + t->stid_tab[nstids - 1].next = &t->stid_tab[nstids]; + t->sfree = t->stid_tab; + } + if (natids) { + while (--natids) + t->atid_tab[natids - 1].next = &t->atid_tab[natids]; + t->afree = t->atid_tab; + } + return 0; +} + +static void free_tid_maps(struct tid_info *t) +{ + cxgb_free_mem(t->tid_tab); +} + +static inline void add_adapter(struct adapter *adap) +{ + write_lock_bh(&adapter_list_lock); + list_add_tail(&adap->adapter_list, &adapter_list); + write_unlock_bh(&adapter_list_lock); +} + +static inline void remove_adapter(struct adapter *adap) +{ + write_lock_bh(&adapter_list_lock); + list_del(&adap->adapter_list); + write_unlock_bh(&adapter_list_lock); +} + +int cxgb3_offload_activate(struct adapter *adapter) +{ + struct t3cdev *dev = &adapter->tdev; + int natids, err; + struct t3c_data *t; + struct tid_range stid_range, tid_range; + struct mtutab mtutab; + unsigned int l2t_capacity; + + t = kzalloc(sizeof(*t), GFP_KERNEL); + if (!t) + return -ENOMEM; + + err = -EOPNOTSUPP; + if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 || + dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 || + dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 || + dev->ctl(dev, GET_MTUS, &mtutab) < 0 || + dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 || + dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0) + goto out_free; + + err = -ENOMEM; + L2DATA(dev) = t3_init_l2t(l2t_capacity); + if (!L2DATA(dev)) + goto out_free; + + natids = min(tid_range.num / 2, MAX_ATIDS); + err = init_tid_tabs(&t->tid_maps, tid_range.num, natids, + stid_range.num, ATID_BASE, stid_range.base); + if (err) + goto out_free_l2t; + + t->mtus = mtutab.mtus; + t->nmtus = mtutab.size; + + INIT_WORK(&t->tid_release_task, t3_process_tid_release_list); + spin_lock_init(&t->tid_release_lock); + INIT_LIST_HEAD(&t->list_node); + t->dev = dev; + + T3C_DATA(dev) = t; + dev->recv = process_rx; + dev->neigh_update = t3_l2t_update; + + /* Register netevent handler once */ + if (list_empty(&adapter_list)) + register_netevent_notifier(&nb); + + t->nofail_skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_KERNEL); + t->release_list_incomplete = 0; + + add_adapter(adapter); + return 0; + +out_free_l2t: + t3_free_l2t(L2DATA(dev)); + L2DATA(dev) = NULL; +out_free: + kfree(t); + return err; +} + +void cxgb3_offload_deactivate(struct adapter *adapter) +{ + struct t3cdev *tdev = &adapter->tdev; + struct t3c_data *t = T3C_DATA(tdev); + + remove_adapter(adapter); + if (list_empty(&adapter_list)) + unregister_netevent_notifier(&nb); + + free_tid_maps(&t->tid_maps); + T3C_DATA(tdev) = NULL; + t3_free_l2t(L2DATA(tdev)); + L2DATA(tdev) = NULL; + if (t->nofail_skb) + kfree_skb(t->nofail_skb); + kfree(t); +} + +static inline void register_tdev(struct t3cdev *tdev) +{ + static int unit; + + mutex_lock(&cxgb3_db_lock); + snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++); + list_add_tail(&tdev->ofld_dev_list, &ofld_dev_list); + mutex_unlock(&cxgb3_db_lock); +} + +static inline void unregister_tdev(struct t3cdev *tdev) +{ + mutex_lock(&cxgb3_db_lock); + list_del(&tdev->ofld_dev_list); + mutex_unlock(&cxgb3_db_lock); +} + +static inline int adap2type(struct adapter *adapter) +{ + int type = 0; + + switch (adapter->params.rev) { + case T3_REV_A: + type = T3A; + break; + case T3_REV_B: + case T3_REV_B2: + type = T3B; + break; + case T3_REV_C: + type = T3C; + break; + } + return type; +} + +void __devinit cxgb3_adapter_ofld(struct adapter *adapter) +{ + struct t3cdev *tdev = &adapter->tdev; + + INIT_LIST_HEAD(&tdev->ofld_dev_list); + + cxgb3_set_dummy_ops(tdev); + tdev->send = t3_offload_tx; + tdev->ctl = cxgb_offload_ctl; + tdev->type = adap2type(adapter); + + register_tdev(tdev); +} + +void __devexit cxgb3_adapter_unofld(struct adapter *adapter) +{ + struct t3cdev *tdev = &adapter->tdev; + + tdev->recv = NULL; + tdev->neigh_update = NULL; + + unregister_tdev(tdev); +} + +void __init cxgb3_offload_init(void) +{ + int i; + + for (i = 0; i < NUM_CPL_CMDS; ++i) + cpl_handlers[i] = do_bad_cpl; + + t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl); + t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl); + t3_register_cpl_handler(CPL_RTE_WRITE_RPL, do_rte_write_rpl); + t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl); + t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl); + t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr); + t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl); + t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl); + t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl); + t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl); + t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl); + t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl); + t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl); + t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl); + t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl); + t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl); + t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss); + t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish); + t3_register_cpl_handler(CPL_SET_TCB_RPL, do_hwtid_rpl); + t3_register_cpl_handler(CPL_GET_TCB_RPL, do_hwtid_rpl); + t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term); + t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl); + t3_register_cpl_handler(CPL_TRACE_PKT, do_trace); + t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl); + t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl); + t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl); +} diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h new file mode 100644 index 0000000..929c298 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _CXGB3_OFFLOAD_H +#define _CXGB3_OFFLOAD_H + +#include +#include + +#include "l2t.h" + +#include "t3cdev.h" +#include "t3_cpl.h" + +struct adapter; + +void cxgb3_offload_init(void); + +void cxgb3_adapter_ofld(struct adapter *adapter); +void cxgb3_adapter_unofld(struct adapter *adapter); +int cxgb3_offload_activate(struct adapter *adapter); +void cxgb3_offload_deactivate(struct adapter *adapter); + +void cxgb3_set_dummy_ops(struct t3cdev *dev); + +struct t3cdev *dev2t3cdev(struct net_device *dev); + +/* + * Client registration. Users of T3 driver must register themselves. + * The T3 driver will call the add function of every client for each T3 + * adapter activated, passing up the t3cdev ptr. Each client fills out an + * array of callback functions to process CPL messages. + */ + +void cxgb3_register_client(struct cxgb3_client *client); +void cxgb3_unregister_client(struct cxgb3_client *client); +void cxgb3_add_clients(struct t3cdev *tdev); +void cxgb3_remove_clients(struct t3cdev *tdev); +void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port); + +typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev, + struct sk_buff *skb, void *ctx); + +enum { + OFFLOAD_STATUS_UP, + OFFLOAD_STATUS_DOWN, + OFFLOAD_PORT_DOWN, + OFFLOAD_PORT_UP, + OFFLOAD_DB_FULL, + OFFLOAD_DB_EMPTY, + OFFLOAD_DB_DROP +}; + +struct cxgb3_client { + char *name; + void (*add) (struct t3cdev *); + void (*remove) (struct t3cdev *); + cxgb3_cpl_handler_func *handlers; + int (*redirect)(void *ctx, struct dst_entry *old, + struct dst_entry *new, struct l2t_entry *l2t); + struct list_head client_list; + void (*event_handler)(struct t3cdev *tdev, u32 event, u32 port); +}; + +/* + * TID allocation services. + */ +int cxgb3_alloc_atid(struct t3cdev *dev, struct cxgb3_client *client, + void *ctx); +int cxgb3_alloc_stid(struct t3cdev *dev, struct cxgb3_client *client, + void *ctx); +void *cxgb3_free_atid(struct t3cdev *dev, int atid); +void cxgb3_free_stid(struct t3cdev *dev, int stid); +void cxgb3_insert_tid(struct t3cdev *dev, struct cxgb3_client *client, + void *ctx, unsigned int tid); +void cxgb3_queue_tid_release(struct t3cdev *dev, unsigned int tid); +void cxgb3_remove_tid(struct t3cdev *dev, void *ctx, unsigned int tid); + +struct t3c_tid_entry { + struct cxgb3_client *client; + void *ctx; +}; + +/* CPL message priority levels */ +enum { + CPL_PRIORITY_DATA = 0, /* data messages */ + CPL_PRIORITY_SETUP = 1, /* connection setup messages */ + CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */ + CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */ + CPL_PRIORITY_ACK = 1, /* RX ACK messages */ + CPL_PRIORITY_CONTROL = 1 /* offload control messages */ +}; + +/* Flags for return value of CPL message handlers */ +enum { + CPL_RET_BUF_DONE = 1, /* buffer processing done, buffer may be freed */ + CPL_RET_BAD_MSG = 2, /* bad CPL message (e.g., unknown opcode) */ + CPL_RET_UNKNOWN_TID = 4 /* unexpected unknown TID */ +}; + +typedef int (*cpl_handler_func)(struct t3cdev *dev, struct sk_buff *skb); + +/* + * Returns a pointer to the first byte of the CPL header in an sk_buff that + * contains a CPL message. + */ +static inline void *cplhdr(struct sk_buff *skb) +{ + return skb->data; +} + +void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h); + +union listen_entry { + struct t3c_tid_entry t3c_tid; + union listen_entry *next; +}; + +union active_open_entry { + struct t3c_tid_entry t3c_tid; + union active_open_entry *next; +}; + +/* + * Holds the size, base address, free list start, etc of the TID, server TID, + * and active-open TID tables for a offload device. + * The tables themselves are allocated dynamically. + */ +struct tid_info { + struct t3c_tid_entry *tid_tab; + unsigned int ntids; + atomic_t tids_in_use; + + union listen_entry *stid_tab; + unsigned int nstids; + unsigned int stid_base; + + union active_open_entry *atid_tab; + unsigned int natids; + unsigned int atid_base; + + /* + * The following members are accessed R/W so we put them in their own + * cache lines. + * + * XXX We could combine the atid fields above with the lock here since + * atids are use once (unlike other tids). OTOH the above fields are + * usually in cache due to tid_tab. + */ + spinlock_t atid_lock ____cacheline_aligned_in_smp; + union active_open_entry *afree; + unsigned int atids_in_use; + + spinlock_t stid_lock ____cacheline_aligned; + union listen_entry *sfree; + unsigned int stids_in_use; +}; + +struct t3c_data { + struct list_head list_node; + struct t3cdev *dev; + unsigned int tx_max_chunk; /* max payload for TX_DATA */ + unsigned int max_wrs; /* max in-flight WRs per connection */ + unsigned int nmtus; + const unsigned short *mtus; + struct tid_info tid_maps; + + struct t3c_tid_entry *tid_release_list; + spinlock_t tid_release_lock; + struct work_struct tid_release_task; + + struct sk_buff *nofail_skb; + unsigned int release_list_incomplete; +}; + +/* + * t3cdev -> t3c_data accessor + */ +#define T3C_DATA(dev) (*(struct t3c_data **)&(dev)->l4opt) + +#endif diff --git a/drivers/net/ethernet/chelsio/cxgb3/firmware_exports.h b/drivers/net/ethernet/chelsio/cxgb3/firmware_exports.h new file mode 100644 index 0000000..0d9b0e6 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/firmware_exports.h @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2004-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _FIRMWARE_EXPORTS_H_ +#define _FIRMWARE_EXPORTS_H_ + +/* WR OPCODES supported by the firmware. + */ +#define FW_WROPCODE_FORWARD 0x01 +#define FW_WROPCODE_BYPASS 0x05 + +#define FW_WROPCODE_TUNNEL_TX_PKT 0x03 + +#define FW_WROPOCDE_ULPTX_DATA_SGL 0x00 +#define FW_WROPCODE_ULPTX_MEM_READ 0x02 +#define FW_WROPCODE_ULPTX_PKT 0x04 +#define FW_WROPCODE_ULPTX_INVALIDATE 0x06 + +#define FW_WROPCODE_TUNNEL_RX_PKT 0x07 + +#define FW_WROPCODE_OFLD_GETTCB_RPL 0x08 +#define FW_WROPCODE_OFLD_CLOSE_CON 0x09 +#define FW_WROPCODE_OFLD_TP_ABORT_CON_REQ 0x0A +#define FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL 0x0F +#define FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ 0x0B +#define FW_WROPCODE_OFLD_TP_ABORT_CON_RPL 0x0C +#define FW_WROPCODE_OFLD_TX_DATA 0x0D +#define FW_WROPCODE_OFLD_TX_DATA_ACK 0x0E + +#define FW_WROPCODE_RI_RDMA_INIT 0x10 +#define FW_WROPCODE_RI_RDMA_WRITE 0x11 +#define FW_WROPCODE_RI_RDMA_READ_REQ 0x12 +#define FW_WROPCODE_RI_RDMA_READ_RESP 0x13 +#define FW_WROPCODE_RI_SEND 0x14 +#define FW_WROPCODE_RI_TERMINATE 0x15 +#define FW_WROPCODE_RI_RDMA_READ 0x16 +#define FW_WROPCODE_RI_RECEIVE 0x17 +#define FW_WROPCODE_RI_BIND_MW 0x18 +#define FW_WROPCODE_RI_FASTREGISTER_MR 0x19 +#define FW_WROPCODE_RI_LOCAL_INV 0x1A +#define FW_WROPCODE_RI_MODIFY_QP 0x1B +#define FW_WROPCODE_RI_BYPASS 0x1C + +#define FW_WROPOCDE_RSVD 0x1E + +#define FW_WROPCODE_SGE_EGRESSCONTEXT_RR 0x1F + +#define FW_WROPCODE_MNGT 0x1D +#define FW_MNGTOPCODE_PKTSCHED_SET 0x00 + +/* Maximum size of a WR sent from the host, limited by the SGE. + * + * Note: WR coming from ULP or TP are only limited by CIM. + */ +#define FW_WR_SIZE 128 + +/* Maximum number of outstanding WRs sent from the host. Value must be + * programmed in the CTRL/TUNNEL/QP SGE Egress Context and used by + * offload modules to limit the number of WRs per connection. + */ +#define FW_T3_WR_NUM 16 +#define FW_N3_WR_NUM 7 + +#ifndef N3 +# define FW_WR_NUM FW_T3_WR_NUM +#else +# define FW_WR_NUM FW_N3_WR_NUM +#endif + +/* FW_TUNNEL_NUM corresponds to the number of supported TUNNEL Queues. These + * queues must start at SGE Egress Context FW_TUNNEL_SGEEC_START and must + * start at 'TID' (or 'uP Token') FW_TUNNEL_TID_START. + * + * Ingress Traffic (e.g. DMA completion credit) for TUNNEL Queue[i] is sent + * to RESP Queue[i]. + */ +#define FW_TUNNEL_NUM 8 +#define FW_TUNNEL_SGEEC_START 8 +#define FW_TUNNEL_TID_START 65544 + +/* FW_CTRL_NUM corresponds to the number of supported CTRL Queues. These queues + * must start at SGE Egress Context FW_CTRL_SGEEC_START and must start at 'TID' + * (or 'uP Token') FW_CTRL_TID_START. + * + * Ingress Traffic for CTRL Queue[i] is sent to RESP Queue[i]. + */ +#define FW_CTRL_NUM 8 +#define FW_CTRL_SGEEC_START 65528 +#define FW_CTRL_TID_START 65536 + +/* FW_OFLD_NUM corresponds to the number of supported OFFLOAD Queues. These + * queues must start at SGE Egress Context FW_OFLD_SGEEC_START. + * + * Note: the 'uP Token' in the SGE Egress Context fields is irrelevant for + * OFFLOAD Queues, as the host is responsible for providing the correct TID in + * every WR. + * + * Ingress Trafffic for OFFLOAD Queue[i] is sent to RESP Queue[i]. + */ +#define FW_OFLD_NUM 8 +#define FW_OFLD_SGEEC_START 0 + +/* + * + */ +#define FW_RI_NUM 1 +#define FW_RI_SGEEC_START 65527 +#define FW_RI_TID_START 65552 + +/* + * The RX_PKT_TID + */ +#define FW_RX_PKT_NUM 1 +#define FW_RX_PKT_TID_START 65553 + +/* FW_WRC_NUM corresponds to the number of Work Request Context that supported + * by the firmware. + */ +#define FW_WRC_NUM \ + (65536 + FW_TUNNEL_NUM + FW_CTRL_NUM + FW_RI_NUM + FW_RX_PKT_NUM) + +/* + * FW type and version. + */ +#define S_FW_VERSION_TYPE 28 +#define M_FW_VERSION_TYPE 0xF +#define V_FW_VERSION_TYPE(x) ((x) << S_FW_VERSION_TYPE) +#define G_FW_VERSION_TYPE(x) \ + (((x) >> S_FW_VERSION_TYPE) & M_FW_VERSION_TYPE) + +#define S_FW_VERSION_MAJOR 16 +#define M_FW_VERSION_MAJOR 0xFFF +#define V_FW_VERSION_MAJOR(x) ((x) << S_FW_VERSION_MAJOR) +#define G_FW_VERSION_MAJOR(x) \ + (((x) >> S_FW_VERSION_MAJOR) & M_FW_VERSION_MAJOR) + +#define S_FW_VERSION_MINOR 8 +#define M_FW_VERSION_MINOR 0xFF +#define V_FW_VERSION_MINOR(x) ((x) << S_FW_VERSION_MINOR) +#define G_FW_VERSION_MINOR(x) \ + (((x) >> S_FW_VERSION_MINOR) & M_FW_VERSION_MINOR) + +#define S_FW_VERSION_MICRO 0 +#define M_FW_VERSION_MICRO 0xFF +#define V_FW_VERSION_MICRO(x) ((x) << S_FW_VERSION_MICRO) +#define G_FW_VERSION_MICRO(x) \ + (((x) >> S_FW_VERSION_MICRO) & M_FW_VERSION_MICRO) + +#endif /* _FIRMWARE_EXPORTS_H_ */ diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c new file mode 100644 index 0000000..f452c40 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c @@ -0,0 +1,445 @@ +/* + * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include +#include +#include +#include +#include +#include +#include +#include "common.h" +#include "t3cdev.h" +#include "cxgb3_defs.h" +#include "l2t.h" +#include "t3_cpl.h" +#include "firmware_exports.h" + +#define VLAN_NONE 0xfff + +/* + * Module locking notes: There is a RW lock protecting the L2 table as a + * whole plus a spinlock per L2T entry. Entry lookups and allocations happen + * under the protection of the table lock, individual entry changes happen + * while holding that entry's spinlock. The table lock nests outside the + * entry locks. Allocations of new entries take the table lock as writers so + * no other lookups can happen while allocating new entries. Entry updates + * take the table lock as readers so multiple entries can be updated in + * parallel. An L2T entry can be dropped by decrementing its reference count + * and therefore can happen in parallel with entry allocation but no entry + * can change state or increment its ref count during allocation as both of + * these perform lookups. + */ + +static inline unsigned int vlan_prio(const struct l2t_entry *e) +{ + return e->vlan >> 13; +} + +static inline unsigned int arp_hash(u32 key, int ifindex, + const struct l2t_data *d) +{ + return jhash_2words(key, ifindex, 0) & (d->nentries - 1); +} + +static inline void neigh_replace(struct l2t_entry *e, struct neighbour *n) +{ + neigh_hold(n); + if (e->neigh) + neigh_release(e->neigh); + e->neigh = n; +} + +/* + * Set up an L2T entry and send any packets waiting in the arp queue. The + * supplied skb is used for the CPL_L2T_WRITE_REQ. Must be called with the + * entry locked. + */ +static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb, + struct l2t_entry *e) +{ + struct cpl_l2t_write_req *req; + struct sk_buff *tmp; + + if (!skb) { + skb = alloc_skb(sizeof(*req), GFP_ATOMIC); + if (!skb) + return -ENOMEM; + } + + req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req)); + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx)); + req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) | + V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) | + V_L2T_W_PRIO(vlan_prio(e))); + memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); + memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); + skb->priority = CPL_PRIORITY_CONTROL; + cxgb3_ofld_send(dev, skb); + + skb_queue_walk_safe(&e->arpq, skb, tmp) { + __skb_unlink(skb, &e->arpq); + cxgb3_ofld_send(dev, skb); + } + e->state = L2T_STATE_VALID; + + return 0; +} + +/* + * Add a packet to the an L2T entry's queue of packets awaiting resolution. + * Must be called with the entry's lock held. + */ +static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb) +{ + __skb_queue_tail(&e->arpq, skb); +} + +int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb, + struct l2t_entry *e) +{ +again: + switch (e->state) { + case L2T_STATE_STALE: /* entry is stale, kick off revalidation */ + neigh_event_send(e->neigh, NULL); + spin_lock_bh(&e->lock); + if (e->state == L2T_STATE_STALE) + e->state = L2T_STATE_VALID; + spin_unlock_bh(&e->lock); + case L2T_STATE_VALID: /* fast-path, send the packet on */ + return cxgb3_ofld_send(dev, skb); + case L2T_STATE_RESOLVING: + spin_lock_bh(&e->lock); + if (e->state != L2T_STATE_RESOLVING) { + /* ARP already completed */ + spin_unlock_bh(&e->lock); + goto again; + } + arpq_enqueue(e, skb); + spin_unlock_bh(&e->lock); + + /* + * Only the first packet added to the arpq should kick off + * resolution. However, because the alloc_skb below can fail, + * we allow each packet added to the arpq to retry resolution + * as a way of recovering from transient memory exhaustion. + * A better way would be to use a work request to retry L2T + * entries when there's no memory. + */ + if (!neigh_event_send(e->neigh, NULL)) { + skb = alloc_skb(sizeof(struct cpl_l2t_write_req), + GFP_ATOMIC); + if (!skb) + break; + + spin_lock_bh(&e->lock); + if (!skb_queue_empty(&e->arpq)) + setup_l2e_send_pending(dev, skb, e); + else /* we lost the race */ + __kfree_skb(skb); + spin_unlock_bh(&e->lock); + } + } + return 0; +} + +EXPORT_SYMBOL(t3_l2t_send_slow); + +void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e) +{ +again: + switch (e->state) { + case L2T_STATE_STALE: /* entry is stale, kick off revalidation */ + neigh_event_send(e->neigh, NULL); + spin_lock_bh(&e->lock); + if (e->state == L2T_STATE_STALE) { + e->state = L2T_STATE_VALID; + } + spin_unlock_bh(&e->lock); + return; + case L2T_STATE_VALID: /* fast-path, send the packet on */ + return; + case L2T_STATE_RESOLVING: + spin_lock_bh(&e->lock); + if (e->state != L2T_STATE_RESOLVING) { + /* ARP already completed */ + spin_unlock_bh(&e->lock); + goto again; + } + spin_unlock_bh(&e->lock); + + /* + * Only the first packet added to the arpq should kick off + * resolution. However, because the alloc_skb below can fail, + * we allow each packet added to the arpq to retry resolution + * as a way of recovering from transient memory exhaustion. + * A better way would be to use a work request to retry L2T + * entries when there's no memory. + */ + neigh_event_send(e->neigh, NULL); + } +} + +EXPORT_SYMBOL(t3_l2t_send_event); + +/* + * Allocate a free L2T entry. Must be called with l2t_data.lock held. + */ +static struct l2t_entry *alloc_l2e(struct l2t_data *d) +{ + struct l2t_entry *end, *e, **p; + + if (!atomic_read(&d->nfree)) + return NULL; + + /* there's definitely a free entry */ + for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e) + if (atomic_read(&e->refcnt) == 0) + goto found; + + for (e = &d->l2tab[1]; atomic_read(&e->refcnt); ++e) ; +found: + d->rover = e + 1; + atomic_dec(&d->nfree); + + /* + * The entry we found may be an inactive entry that is + * presently in the hash table. We need to remove it. + */ + if (e->state != L2T_STATE_UNUSED) { + int hash = arp_hash(e->addr, e->ifindex, d); + + for (p = &d->l2tab[hash].first; *p; p = &(*p)->next) + if (*p == e) { + *p = e->next; + break; + } + e->state = L2T_STATE_UNUSED; + } + return e; +} + +/* + * Called when an L2T entry has no more users. The entry is left in the hash + * table since it is likely to be reused but we also bump nfree to indicate + * that the entry can be reallocated for a different neighbor. We also drop + * the existing neighbor reference in case the neighbor is going away and is + * waiting on our reference. + * + * Because entries can be reallocated to other neighbors once their ref count + * drops to 0 we need to take the entry's lock to avoid races with a new + * incarnation. + */ +void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e) +{ + spin_lock_bh(&e->lock); + if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ + if (e->neigh) { + neigh_release(e->neigh); + e->neigh = NULL; + } + } + spin_unlock_bh(&e->lock); + atomic_inc(&d->nfree); +} + +EXPORT_SYMBOL(t3_l2e_free); + +/* + * Update an L2T entry that was previously used for the same next hop as neigh. + * Must be called with softirqs disabled. + */ +static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh) +{ + unsigned int nud_state; + + spin_lock(&e->lock); /* avoid race with t3_l2t_free */ + + if (neigh != e->neigh) + neigh_replace(e, neigh); + nud_state = neigh->nud_state; + if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) || + !(nud_state & NUD_VALID)) + e->state = L2T_STATE_RESOLVING; + else if (nud_state & NUD_CONNECTED) + e->state = L2T_STATE_VALID; + else + e->state = L2T_STATE_STALE; + spin_unlock(&e->lock); +} + +struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, + struct net_device *dev) +{ + struct l2t_entry *e; + struct l2t_data *d = L2DATA(cdev); + u32 addr = *(u32 *) neigh->primary_key; + int ifidx = neigh->dev->ifindex; + int hash = arp_hash(addr, ifidx, d); + struct port_info *p = netdev_priv(dev); + int smt_idx = p->port_id; + + write_lock_bh(&d->lock); + for (e = d->l2tab[hash].first; e; e = e->next) + if (e->addr == addr && e->ifindex == ifidx && + e->smt_idx == smt_idx) { + l2t_hold(d, e); + if (atomic_read(&e->refcnt) == 1) + reuse_entry(e, neigh); + goto done; + } + + /* Need to allocate a new entry */ + e = alloc_l2e(d); + if (e) { + spin_lock(&e->lock); /* avoid race with t3_l2t_free */ + e->next = d->l2tab[hash].first; + d->l2tab[hash].first = e; + e->state = L2T_STATE_RESOLVING; + e->addr = addr; + e->ifindex = ifidx; + e->smt_idx = smt_idx; + atomic_set(&e->refcnt, 1); + neigh_replace(e, neigh); + if (neigh->dev->priv_flags & IFF_802_1Q_VLAN) + e->vlan = vlan_dev_vlan_id(neigh->dev); + else + e->vlan = VLAN_NONE; + spin_unlock(&e->lock); + } +done: + write_unlock_bh(&d->lock); + return e; +} + +EXPORT_SYMBOL(t3_l2t_get); + +/* + * Called when address resolution fails for an L2T entry to handle packets + * on the arpq head. If a packet specifies a failure handler it is invoked, + * otherwise the packets is sent to the offload device. + * + * XXX: maybe we should abandon the latter behavior and just require a failure + * handler. + */ +static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff_head *arpq) +{ + struct sk_buff *skb, *tmp; + + skb_queue_walk_safe(arpq, skb, tmp) { + struct l2t_skb_cb *cb = L2T_SKB_CB(skb); + + __skb_unlink(skb, arpq); + if (cb->arp_failure_handler) + cb->arp_failure_handler(dev, skb); + else + cxgb3_ofld_send(dev, skb); + } +} + +/* + * Called when the host's ARP layer makes a change to some entry that is + * loaded into the HW L2 table. + */ +void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh) +{ + struct sk_buff_head arpq; + struct l2t_entry *e; + struct l2t_data *d = L2DATA(dev); + u32 addr = *(u32 *) neigh->primary_key; + int ifidx = neigh->dev->ifindex; + int hash = arp_hash(addr, ifidx, d); + + read_lock_bh(&d->lock); + for (e = d->l2tab[hash].first; e; e = e->next) + if (e->addr == addr && e->ifindex == ifidx) { + spin_lock(&e->lock); + goto found; + } + read_unlock_bh(&d->lock); + return; + +found: + __skb_queue_head_init(&arpq); + + read_unlock(&d->lock); + if (atomic_read(&e->refcnt)) { + if (neigh != e->neigh) + neigh_replace(e, neigh); + + if (e->state == L2T_STATE_RESOLVING) { + if (neigh->nud_state & NUD_FAILED) { + skb_queue_splice_init(&e->arpq, &arpq); + } else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE)) + setup_l2e_send_pending(dev, NULL, e); + } else { + e->state = neigh->nud_state & NUD_CONNECTED ? + L2T_STATE_VALID : L2T_STATE_STALE; + if (memcmp(e->dmac, neigh->ha, 6)) + setup_l2e_send_pending(dev, NULL, e); + } + } + spin_unlock_bh(&e->lock); + + if (!skb_queue_empty(&arpq)) + handle_failed_resolution(dev, &arpq); +} + +struct l2t_data *t3_init_l2t(unsigned int l2t_capacity) +{ + struct l2t_data *d; + int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry); + + d = cxgb_alloc_mem(size); + if (!d) + return NULL; + + d->nentries = l2t_capacity; + d->rover = &d->l2tab[1]; /* entry 0 is not used */ + atomic_set(&d->nfree, l2t_capacity - 1); + rwlock_init(&d->lock); + + for (i = 0; i < l2t_capacity; ++i) { + d->l2tab[i].idx = i; + d->l2tab[i].state = L2T_STATE_UNUSED; + __skb_queue_head_init(&d->l2tab[i].arpq); + spin_lock_init(&d->l2tab[i].lock); + atomic_set(&d->l2tab[i].refcnt, 0); + } + return d; +} + +void t3_free_l2t(struct l2t_data *d) +{ + cxgb_free_mem(d); +} + diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h new file mode 100644 index 0000000..7a12d52 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _CHELSIO_L2T_H +#define _CHELSIO_L2T_H + +#include +#include "t3cdev.h" +#include + +enum { + L2T_STATE_VALID, /* entry is up to date */ + L2T_STATE_STALE, /* entry may be used but needs revalidation */ + L2T_STATE_RESOLVING, /* entry needs address resolution */ + L2T_STATE_UNUSED /* entry not in use */ +}; + +struct neighbour; +struct sk_buff; + +/* + * Each L2T entry plays multiple roles. First of all, it keeps state for the + * corresponding entry of the HW L2 table and maintains a queue of offload + * packets awaiting address resolution. Second, it is a node of a hash table + * chain, where the nodes of the chain are linked together through their next + * pointer. Finally, each node is a bucket of a hash table, pointing to the + * first element in its chain through its first pointer. + */ +struct l2t_entry { + u16 state; /* entry state */ + u16 idx; /* entry index */ + u32 addr; /* dest IP address */ + int ifindex; /* neighbor's net_device's ifindex */ + u16 smt_idx; /* SMT index */ + u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */ + struct neighbour *neigh; /* associated neighbour */ + struct l2t_entry *first; /* start of hash chain */ + struct l2t_entry *next; /* next l2t_entry on chain */ + struct sk_buff_head arpq; /* queue of packets awaiting resolution */ + spinlock_t lock; + atomic_t refcnt; /* entry reference count */ + u8 dmac[6]; /* neighbour's MAC address */ +}; + +struct l2t_data { + unsigned int nentries; /* number of entries */ + struct l2t_entry *rover; /* starting point for next allocation */ + atomic_t nfree; /* number of free entries */ + rwlock_t lock; + struct l2t_entry l2tab[0]; +}; + +typedef void (*arp_failure_handler_func)(struct t3cdev * dev, + struct sk_buff * skb); + +/* + * Callback stored in an skb to handle address resolution failure. + */ +struct l2t_skb_cb { + arp_failure_handler_func arp_failure_handler; +}; + +#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb) + +static inline void set_arp_failure_handler(struct sk_buff *skb, + arp_failure_handler_func hnd) +{ + L2T_SKB_CB(skb)->arp_failure_handler = hnd; +} + +/* + * Getting to the L2 data from an offload device. + */ +#define L2DATA(dev) ((dev)->l2opt) + +#define W_TCB_L2T_IX 0 +#define S_TCB_L2T_IX 7 +#define M_TCB_L2T_IX 0x7ffULL +#define V_TCB_L2T_IX(x) ((x) << S_TCB_L2T_IX) + +void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e); +void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh); +struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, + struct net_device *dev); +int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb, + struct l2t_entry *e); +void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e); +struct l2t_data *t3_init_l2t(unsigned int l2t_capacity); +void t3_free_l2t(struct l2t_data *d); + +int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb); + +static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb, + struct l2t_entry *e) +{ + if (likely(e->state == L2T_STATE_VALID)) + return cxgb3_ofld_send(dev, skb); + return t3_l2t_send_slow(dev, skb, e); +} + +static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e) +{ + if (atomic_dec_and_test(&e->refcnt)) + t3_l2e_free(d, e); +} + +static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) +{ + if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ + atomic_dec(&d->nfree); +} + +#endif diff --git a/drivers/net/ethernet/chelsio/cxgb3/mc5.c b/drivers/net/ethernet/chelsio/cxgb3/mc5.c new file mode 100644 index 0000000..e13b7fe --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/mc5.c @@ -0,0 +1,438 @@ +/* + * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "common.h" +#include "regs.h" + +enum { + IDT75P52100 = 4, + IDT75N43102 = 5 +}; + +/* DBGI command mode */ +enum { + DBGI_MODE_MBUS = 0, + DBGI_MODE_IDT52100 = 5 +}; + +/* IDT 75P52100 commands */ +#define IDT_CMD_READ 0 +#define IDT_CMD_WRITE 1 +#define IDT_CMD_SEARCH 2 +#define IDT_CMD_LEARN 3 + +/* IDT LAR register address and value for 144-bit mode (low 32 bits) */ +#define IDT_LAR_ADR0 0x180006 +#define IDT_LAR_MODE144 0xffff0000 + +/* IDT SCR and SSR addresses (low 32 bits) */ +#define IDT_SCR_ADR0 0x180000 +#define IDT_SSR0_ADR0 0x180002 +#define IDT_SSR1_ADR0 0x180004 + +/* IDT GMR base address (low 32 bits) */ +#define IDT_GMR_BASE_ADR0 0x180020 + +/* IDT data and mask array base addresses (low 32 bits) */ +#define IDT_DATARY_BASE_ADR0 0 +#define IDT_MSKARY_BASE_ADR0 0x80000 + +/* IDT 75N43102 commands */ +#define IDT4_CMD_SEARCH144 3 +#define IDT4_CMD_WRITE 4 +#define IDT4_CMD_READ 5 + +/* IDT 75N43102 SCR address (low 32 bits) */ +#define IDT4_SCR_ADR0 0x3 + +/* IDT 75N43102 GMR base addresses (low 32 bits) */ +#define IDT4_GMR_BASE0 0x10 +#define IDT4_GMR_BASE1 0x20 +#define IDT4_GMR_BASE2 0x30 + +/* IDT 75N43102 data and mask array base addresses (low 32 bits) */ +#define IDT4_DATARY_BASE_ADR0 0x1000000 +#define IDT4_MSKARY_BASE_ADR0 0x2000000 + +#define MAX_WRITE_ATTEMPTS 5 + +#define MAX_ROUTES 2048 + +/* + * Issue a command to the TCAM and wait for its completion. The address and + * any data required by the command must have been setup by the caller. + */ +static int mc5_cmd_write(struct adapter *adapter, u32 cmd) +{ + t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_CMD, cmd); + return t3_wait_op_done(adapter, A_MC5_DB_DBGI_RSP_STATUS, + F_DBGIRSPVALID, 1, MAX_WRITE_ATTEMPTS, 1); +} + +static inline void dbgi_wr_addr3(struct adapter *adapter, u32 v1, u32 v2, + u32 v3) +{ + t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, v1); + t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR1, v2); + t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR2, v3); +} + +static inline void dbgi_wr_data3(struct adapter *adapter, u32 v1, u32 v2, + u32 v3) +{ + t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA0, v1); + t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA1, v2); + t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA2, v3); +} + +static inline void dbgi_rd_rsp3(struct adapter *adapter, u32 *v1, u32 *v2, + u32 *v3) +{ + *v1 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA0); + *v2 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA1); + *v3 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA2); +} + +/* + * Write data to the TCAM register at address (0, 0, addr_lo) using the TCAM + * command cmd. The data to be written must have been set up by the caller. + * Returns -1 on failure, 0 on success. + */ +static int mc5_write(struct adapter *adapter, u32 addr_lo, u32 cmd) +{ + t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, addr_lo); + if (mc5_cmd_write(adapter, cmd) == 0) + return 0; + CH_ERR(adapter, "MC5 timeout writing to TCAM address 0x%x\n", + addr_lo); + return -1; +} + +static int init_mask_data_array(struct mc5 *mc5, u32 mask_array_base, + u32 data_array_base, u32 write_cmd, + int addr_shift) +{ + unsigned int i; + struct adapter *adap = mc5->adapter; + + /* + * We need the size of the TCAM data and mask arrays in terms of + * 72-bit entries. + */ + unsigned int size72 = mc5->tcam_size; + unsigned int server_base = t3_read_reg(adap, A_MC5_DB_SERVER_INDEX); + + if (mc5->mode == MC5_MODE_144_BIT) { + size72 *= 2; /* 1 144-bit entry is 2 72-bit entries */ + server_base *= 2; + } + + /* Clear the data array */ + dbgi_wr_data3(adap, 0, 0, 0); + for (i = 0; i < size72; i++) + if (mc5_write(adap, data_array_base + (i << addr_shift), + write_cmd)) + return -1; + + /* Initialize the mask array. */ + dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff); + for (i = 0; i < size72; i++) { + if (i == server_base) /* entering server or routing region */ + t3_write_reg(adap, A_MC5_DB_DBGI_REQ_DATA0, + mc5->mode == MC5_MODE_144_BIT ? + 0xfffffff9 : 0xfffffffd); + if (mc5_write(adap, mask_array_base + (i << addr_shift), + write_cmd)) + return -1; + } + return 0; +} + +static int init_idt52100(struct mc5 *mc5) +{ + int i; + struct adapter *adap = mc5->adapter; + + t3_write_reg(adap, A_MC5_DB_RSP_LATENCY, + V_RDLAT(0x15) | V_LRNLAT(0x15) | V_SRCHLAT(0x15)); + t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 2); + + /* + * Use GMRs 14-15 for ELOOKUP, GMRs 12-13 for SYN lookups, and + * GMRs 8-9 for ACK- and AOPEN searches. + */ + t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT_CMD_WRITE); + t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT_CMD_WRITE); + t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD, IDT_CMD_SEARCH); + t3_write_reg(adap, A_MC5_DB_AOPEN_LRN_CMD, IDT_CMD_LEARN); + t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT_CMD_SEARCH | 0x6000); + t3_write_reg(adap, A_MC5_DB_SYN_LRN_CMD, IDT_CMD_LEARN); + t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT_CMD_SEARCH); + t3_write_reg(adap, A_MC5_DB_ACK_LRN_CMD, IDT_CMD_LEARN); + t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT_CMD_SEARCH); + t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT_CMD_SEARCH | 0x7000); + t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT_CMD_WRITE); + t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT_CMD_READ); + + /* Set DBGI command mode for IDT TCAM. */ + t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100); + + /* Set up LAR */ + dbgi_wr_data3(adap, IDT_LAR_MODE144, 0, 0); + if (mc5_write(adap, IDT_LAR_ADR0, IDT_CMD_WRITE)) + goto err; + + /* Set up SSRs */ + dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0); + if (mc5_write(adap, IDT_SSR0_ADR0, IDT_CMD_WRITE) || + mc5_write(adap, IDT_SSR1_ADR0, IDT_CMD_WRITE)) + goto err; + + /* Set up GMRs */ + for (i = 0; i < 32; ++i) { + if (i >= 12 && i < 15) + dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff); + else if (i == 15) + dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff); + else + dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff); + + if (mc5_write(adap, IDT_GMR_BASE_ADR0 + i, IDT_CMD_WRITE)) + goto err; + } + + /* Set up SCR */ + dbgi_wr_data3(adap, 1, 0, 0); + if (mc5_write(adap, IDT_SCR_ADR0, IDT_CMD_WRITE)) + goto err; + + return init_mask_data_array(mc5, IDT_MSKARY_BASE_ADR0, + IDT_DATARY_BASE_ADR0, IDT_CMD_WRITE, 0); +err: + return -EIO; +} + +static int init_idt43102(struct mc5 *mc5) +{ + int i; + struct adapter *adap = mc5->adapter; + + t3_write_reg(adap, A_MC5_DB_RSP_LATENCY, + adap->params.rev == 0 ? V_RDLAT(0xd) | V_SRCHLAT(0x11) : + V_RDLAT(0xd) | V_SRCHLAT(0x12)); + + /* + * Use GMRs 24-25 for ELOOKUP, GMRs 20-21 for SYN lookups, and no mask + * for ACK- and AOPEN searches. + */ + t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT4_CMD_WRITE); + t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT4_CMD_WRITE); + t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD, + IDT4_CMD_SEARCH144 | 0x3800); + t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT4_CMD_SEARCH144); + t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT4_CMD_SEARCH144 | 0x3800); + t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x3800); + t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x800); + t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT4_CMD_WRITE); + t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT4_CMD_READ); + + t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 3); + + /* Set DBGI command mode for IDT TCAM. */ + t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100); + + /* Set up GMRs */ + dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff); + for (i = 0; i < 7; ++i) + if (mc5_write(adap, IDT4_GMR_BASE0 + i, IDT4_CMD_WRITE)) + goto err; + + for (i = 0; i < 4; ++i) + if (mc5_write(adap, IDT4_GMR_BASE2 + i, IDT4_CMD_WRITE)) + goto err; + + dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff); + if (mc5_write(adap, IDT4_GMR_BASE1, IDT4_CMD_WRITE) || + mc5_write(adap, IDT4_GMR_BASE1 + 1, IDT4_CMD_WRITE) || + mc5_write(adap, IDT4_GMR_BASE1 + 4, IDT4_CMD_WRITE)) + goto err; + + dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff); + if (mc5_write(adap, IDT4_GMR_BASE1 + 5, IDT4_CMD_WRITE)) + goto err; + + /* Set up SCR */ + dbgi_wr_data3(adap, 0xf0000000, 0, 0); + if (mc5_write(adap, IDT4_SCR_ADR0, IDT4_CMD_WRITE)) + goto err; + + return init_mask_data_array(mc5, IDT4_MSKARY_BASE_ADR0, + IDT4_DATARY_BASE_ADR0, IDT4_CMD_WRITE, 1); +err: + return -EIO; +} + +/* Put MC5 in DBGI mode. */ +static inline void mc5_dbgi_mode_enable(const struct mc5 *mc5) +{ + t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG, + V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_DBGIEN); +} + +/* Put MC5 in M-Bus mode. */ +static void mc5_dbgi_mode_disable(const struct mc5 *mc5) +{ + t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG, + V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | + V_COMPEN(mc5->mode == MC5_MODE_72_BIT) | + V_PRTYEN(mc5->parity_enabled) | F_MBUSEN); +} + +/* + * Initialization that requires the OS and protocol layers to already + * be initialized goes here. + */ +int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters, + unsigned int nroutes) +{ + u32 cfg; + int err; + unsigned int tcam_size = mc5->tcam_size; + struct adapter *adap = mc5->adapter; + + if (!tcam_size) + return 0; + + if (nroutes > MAX_ROUTES || nroutes + nservers + nfilters > tcam_size) + return -EINVAL; + + /* Reset the TCAM */ + cfg = t3_read_reg(adap, A_MC5_DB_CONFIG) & ~F_TMMODE; + cfg |= V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_TMRST; + t3_write_reg(adap, A_MC5_DB_CONFIG, cfg); + if (t3_wait_op_done(adap, A_MC5_DB_CONFIG, F_TMRDY, 1, 500, 0)) { + CH_ERR(adap, "TCAM reset timed out\n"); + return -1; + } + + t3_write_reg(adap, A_MC5_DB_ROUTING_TABLE_INDEX, tcam_size - nroutes); + t3_write_reg(adap, A_MC5_DB_FILTER_TABLE, + tcam_size - nroutes - nfilters); + t3_write_reg(adap, A_MC5_DB_SERVER_INDEX, + tcam_size - nroutes - nfilters - nservers); + + mc5->parity_enabled = 1; + + /* All the TCAM addresses we access have only the low 32 bits non 0 */ + t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR1, 0); + t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR2, 0); + + mc5_dbgi_mode_enable(mc5); + + switch (mc5->part_type) { + case IDT75P52100: + err = init_idt52100(mc5); + break; + case IDT75N43102: + err = init_idt43102(mc5); + break; + default: + CH_ERR(adap, "Unsupported TCAM type %d\n", mc5->part_type); + err = -EINVAL; + break; + } + + mc5_dbgi_mode_disable(mc5); + return err; +} + + +#define MC5_INT_FATAL (F_PARITYERR | F_REQQPARERR | F_DISPQPARERR) + +/* + * MC5 interrupt handler + */ +void t3_mc5_intr_handler(struct mc5 *mc5) +{ + struct adapter *adap = mc5->adapter; + u32 cause = t3_read_reg(adap, A_MC5_DB_INT_CAUSE); + + if ((cause & F_PARITYERR) && mc5->parity_enabled) { + CH_ALERT(adap, "MC5 parity error\n"); + mc5->stats.parity_err++; + } + + if (cause & F_REQQPARERR) { + CH_ALERT(adap, "MC5 request queue parity error\n"); + mc5->stats.reqq_parity_err++; + } + + if (cause & F_DISPQPARERR) { + CH_ALERT(adap, "MC5 dispatch queue parity error\n"); + mc5->stats.dispq_parity_err++; + } + + if (cause & F_ACTRGNFULL) + mc5->stats.active_rgn_full++; + if (cause & F_NFASRCHFAIL) + mc5->stats.nfa_srch_err++; + if (cause & F_UNKNOWNCMD) + mc5->stats.unknown_cmd++; + if (cause & F_DELACTEMPTY) + mc5->stats.del_act_empty++; + if (cause & MC5_INT_FATAL) + t3_fatal_err(adap); + + t3_write_reg(adap, A_MC5_DB_INT_CAUSE, cause); +} + +void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode) +{ +#define K * 1024 + + static unsigned int tcam_part_size[] = { /* in K 72-bit entries */ + 64 K, 128 K, 256 K, 32 K + }; + +#undef K + + u32 cfg = t3_read_reg(adapter, A_MC5_DB_CONFIG); + + mc5->adapter = adapter; + mc5->mode = (unsigned char)mode; + mc5->part_type = (unsigned char)G_TMTYPE(cfg); + if (cfg & F_TMTYPEHI) + mc5->part_type |= 4; + + mc5->tcam_size = tcam_part_size[G_TMPARTSIZE(cfg)]; + if (mode == MC5_MODE_144_BIT) + mc5->tcam_size /= 2; +} diff --git a/drivers/net/ethernet/chelsio/cxgb3/regs.h b/drivers/net/ethernet/chelsio/cxgb3/regs.h new file mode 100644 index 0000000..6990f6c --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/regs.h @@ -0,0 +1,2598 @@ +#define A_SG_CONTROL 0x0 + +#define S_CONGMODE 29 +#define V_CONGMODE(x) ((x) << S_CONGMODE) +#define F_CONGMODE V_CONGMODE(1U) + +#define S_TNLFLMODE 28 +#define V_TNLFLMODE(x) ((x) << S_TNLFLMODE) +#define F_TNLFLMODE V_TNLFLMODE(1U) + +#define S_FATLPERREN 27 +#define V_FATLPERREN(x) ((x) << S_FATLPERREN) +#define F_FATLPERREN V_FATLPERREN(1U) + +#define S_DROPPKT 20 +#define V_DROPPKT(x) ((x) << S_DROPPKT) +#define F_DROPPKT V_DROPPKT(1U) + +#define S_EGRGENCTRL 19 +#define V_EGRGENCTRL(x) ((x) << S_EGRGENCTRL) +#define F_EGRGENCTRL V_EGRGENCTRL(1U) + +#define S_USERSPACESIZE 14 +#define M_USERSPACESIZE 0x1f +#define V_USERSPACESIZE(x) ((x) << S_USERSPACESIZE) + +#define S_HOSTPAGESIZE 11 +#define M_HOSTPAGESIZE 0x7 +#define V_HOSTPAGESIZE(x) ((x) << S_HOSTPAGESIZE) + +#define S_FLMODE 9 +#define V_FLMODE(x) ((x) << S_FLMODE) +#define F_FLMODE V_FLMODE(1U) + +#define S_PKTSHIFT 6 +#define M_PKTSHIFT 0x7 +#define V_PKTSHIFT(x) ((x) << S_PKTSHIFT) + +#define S_ONEINTMULTQ 5 +#define V_ONEINTMULTQ(x) ((x) << S_ONEINTMULTQ) +#define F_ONEINTMULTQ V_ONEINTMULTQ(1U) + +#define S_BIGENDIANINGRESS 2 +#define V_BIGENDIANINGRESS(x) ((x) << S_BIGENDIANINGRESS) +#define F_BIGENDIANINGRESS V_BIGENDIANINGRESS(1U) + +#define S_ISCSICOALESCING 1 +#define V_ISCSICOALESCING(x) ((x) << S_ISCSICOALESCING) +#define F_ISCSICOALESCING V_ISCSICOALESCING(1U) + +#define S_GLOBALENABLE 0 +#define V_GLOBALENABLE(x) ((x) << S_GLOBALENABLE) +#define F_GLOBALENABLE V_GLOBALENABLE(1U) + +#define S_AVOIDCQOVFL 24 +#define V_AVOIDCQOVFL(x) ((x) << S_AVOIDCQOVFL) +#define F_AVOIDCQOVFL V_AVOIDCQOVFL(1U) + +#define S_OPTONEINTMULTQ 23 +#define V_OPTONEINTMULTQ(x) ((x) << S_OPTONEINTMULTQ) +#define F_OPTONEINTMULTQ V_OPTONEINTMULTQ(1U) + +#define S_CQCRDTCTRL 22 +#define V_CQCRDTCTRL(x) ((x) << S_CQCRDTCTRL) +#define F_CQCRDTCTRL V_CQCRDTCTRL(1U) + +#define A_SG_KDOORBELL 0x4 + +#define S_SELEGRCNTX 31 +#define V_SELEGRCNTX(x) ((x) << S_SELEGRCNTX) +#define F_SELEGRCNTX V_SELEGRCNTX(1U) + +#define S_EGRCNTX 0 +#define M_EGRCNTX 0xffff +#define V_EGRCNTX(x) ((x) << S_EGRCNTX) + +#define A_SG_GTS 0x8 + +#define S_RSPQ 29 +#define M_RSPQ 0x7 +#define V_RSPQ(x) ((x) << S_RSPQ) +#define G_RSPQ(x) (((x) >> S_RSPQ) & M_RSPQ) + +#define S_NEWTIMER 16 +#define M_NEWTIMER 0x1fff +#define V_NEWTIMER(x) ((x) << S_NEWTIMER) + +#define S_NEWINDEX 0 +#define M_NEWINDEX 0xffff +#define V_NEWINDEX(x) ((x) << S_NEWINDEX) + +#define A_SG_CONTEXT_CMD 0xc + +#define S_CONTEXT_CMD_OPCODE 28 +#define M_CONTEXT_CMD_OPCODE 0xf +#define V_CONTEXT_CMD_OPCODE(x) ((x) << S_CONTEXT_CMD_OPCODE) + +#define S_CONTEXT_CMD_BUSY 27 +#define V_CONTEXT_CMD_BUSY(x) ((x) << S_CONTEXT_CMD_BUSY) +#define F_CONTEXT_CMD_BUSY V_CONTEXT_CMD_BUSY(1U) + +#define S_CQ_CREDIT 20 + +#define M_CQ_CREDIT 0x7f + +#define V_CQ_CREDIT(x) ((x) << S_CQ_CREDIT) + +#define G_CQ_CREDIT(x) (((x) >> S_CQ_CREDIT) & M_CQ_CREDIT) + +#define S_CQ 19 + +#define V_CQ(x) ((x) << S_CQ) +#define F_CQ V_CQ(1U) + +#define S_RESPONSEQ 18 +#define V_RESPONSEQ(x) ((x) << S_RESPONSEQ) +#define F_RESPONSEQ V_RESPONSEQ(1U) + +#define S_EGRESS 17 +#define V_EGRESS(x) ((x) << S_EGRESS) +#define F_EGRESS V_EGRESS(1U) + +#define S_FREELIST 16 +#define V_FREELIST(x) ((x) << S_FREELIST) +#define F_FREELIST V_FREELIST(1U) + +#define S_CONTEXT 0 +#define M_CONTEXT 0xffff +#define V_CONTEXT(x) ((x) << S_CONTEXT) + +#define G_CONTEXT(x) (((x) >> S_CONTEXT) & M_CONTEXT) + +#define A_SG_CONTEXT_DATA0 0x10 + +#define A_SG_CONTEXT_DATA1 0x14 + +#define A_SG_CONTEXT_DATA2 0x18 + +#define A_SG_CONTEXT_DATA3 0x1c + +#define A_SG_CONTEXT_MASK0 0x20 + +#define A_SG_CONTEXT_MASK1 0x24 + +#define A_SG_CONTEXT_MASK2 0x28 + +#define A_SG_CONTEXT_MASK3 0x2c + +#define A_SG_RSPQ_CREDIT_RETURN 0x30 + +#define S_CREDITS 0 +#define M_CREDITS 0xffff +#define V_CREDITS(x) ((x) << S_CREDITS) + +#define A_SG_DATA_INTR 0x34 + +#define S_ERRINTR 31 +#define V_ERRINTR(x) ((x) << S_ERRINTR) +#define F_ERRINTR V_ERRINTR(1U) + +#define A_SG_HI_DRB_HI_THRSH 0x38 + +#define A_SG_HI_DRB_LO_THRSH 0x3c + +#define A_SG_LO_DRB_HI_THRSH 0x40 + +#define A_SG_LO_DRB_LO_THRSH 0x44 + +#define A_SG_RSPQ_FL_STATUS 0x4c + +#define S_RSPQ0DISABLED 8 + +#define S_FL0EMPTY 16 +#define V_FL0EMPTY(x) ((x) << S_FL0EMPTY) +#define F_FL0EMPTY V_FL0EMPTY(1U) + +#define A_SG_EGR_RCQ_DRB_THRSH 0x54 + +#define S_HIRCQDRBTHRSH 16 +#define M_HIRCQDRBTHRSH 0x7ff +#define V_HIRCQDRBTHRSH(x) ((x) << S_HIRCQDRBTHRSH) + +#define S_LORCQDRBTHRSH 0 +#define M_LORCQDRBTHRSH 0x7ff +#define V_LORCQDRBTHRSH(x) ((x) << S_LORCQDRBTHRSH) + +#define A_SG_EGR_CNTX_BADDR 0x58 + +#define A_SG_INT_CAUSE 0x5c + +#define S_HIRCQPARITYERROR 31 +#define V_HIRCQPARITYERROR(x) ((x) << S_HIRCQPARITYERROR) +#define F_HIRCQPARITYERROR V_HIRCQPARITYERROR(1U) + +#define S_LORCQPARITYERROR 30 +#define V_LORCQPARITYERROR(x) ((x) << S_LORCQPARITYERROR) +#define F_LORCQPARITYERROR V_LORCQPARITYERROR(1U) + +#define S_HIDRBPARITYERROR 29 +#define V_HIDRBPARITYERROR(x) ((x) << S_HIDRBPARITYERROR) +#define F_HIDRBPARITYERROR V_HIDRBPARITYERROR(1U) + +#define S_LODRBPARITYERROR 28 +#define V_LODRBPARITYERROR(x) ((x) << S_LODRBPARITYERROR) +#define F_LODRBPARITYERROR V_LODRBPARITYERROR(1U) + +#define S_FLPARITYERROR 22 +#define M_FLPARITYERROR 0x3f +#define V_FLPARITYERROR(x) ((x) << S_FLPARITYERROR) +#define G_FLPARITYERROR(x) (((x) >> S_FLPARITYERROR) & M_FLPARITYERROR) + +#define S_ITPARITYERROR 20 +#define M_ITPARITYERROR 0x3 +#define V_ITPARITYERROR(x) ((x) << S_ITPARITYERROR) +#define G_ITPARITYERROR(x) (((x) >> S_ITPARITYERROR) & M_ITPARITYERROR) + +#define S_IRPARITYERROR 19 +#define V_IRPARITYERROR(x) ((x) << S_IRPARITYERROR) +#define F_IRPARITYERROR V_IRPARITYERROR(1U) + +#define S_RCPARITYERROR 18 +#define V_RCPARITYERROR(x) ((x) << S_RCPARITYERROR) +#define F_RCPARITYERROR V_RCPARITYERROR(1U) + +#define S_OCPARITYERROR 17 +#define V_OCPARITYERROR(x) ((x) << S_OCPARITYERROR) +#define F_OCPARITYERROR V_OCPARITYERROR(1U) + +#define S_CPPARITYERROR 16 +#define V_CPPARITYERROR(x) ((x) << S_CPPARITYERROR) +#define F_CPPARITYERROR V_CPPARITYERROR(1U) + +#define S_R_REQ_FRAMINGERROR 15 +#define V_R_REQ_FRAMINGERROR(x) ((x) << S_R_REQ_FRAMINGERROR) +#define F_R_REQ_FRAMINGERROR V_R_REQ_FRAMINGERROR(1U) + +#define S_UC_REQ_FRAMINGERROR 14 +#define V_UC_REQ_FRAMINGERROR(x) ((x) << S_UC_REQ_FRAMINGERROR) +#define F_UC_REQ_FRAMINGERROR V_UC_REQ_FRAMINGERROR(1U) + +#define S_HICTLDRBDROPERR 13 +#define V_HICTLDRBDROPERR(x) ((x) << S_HICTLDRBDROPERR) +#define F_HICTLDRBDROPERR V_HICTLDRBDROPERR(1U) + +#define S_LOCTLDRBDROPERR 12 +#define V_LOCTLDRBDROPERR(x) ((x) << S_LOCTLDRBDROPERR) +#define F_LOCTLDRBDROPERR V_LOCTLDRBDROPERR(1U) + +#define S_HIPIODRBDROPERR 11 +#define V_HIPIODRBDROPERR(x) ((x) << S_HIPIODRBDROPERR) +#define F_HIPIODRBDROPERR V_HIPIODRBDROPERR(1U) + +#define S_LOPIODRBDROPERR 10 +#define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR) +#define F_LOPIODRBDROPERR V_LOPIODRBDROPERR(1U) + +#define S_HIPRIORITYDBFULL 7 +#define V_HIPRIORITYDBFULL(x) ((x) << S_HIPRIORITYDBFULL) +#define F_HIPRIORITYDBFULL V_HIPRIORITYDBFULL(1U) + +#define S_HIPRIORITYDBEMPTY 6 +#define V_HIPRIORITYDBEMPTY(x) ((x) << S_HIPRIORITYDBEMPTY) +#define F_HIPRIORITYDBEMPTY V_HIPRIORITYDBEMPTY(1U) + +#define S_LOPRIORITYDBFULL 5 +#define V_LOPRIORITYDBFULL(x) ((x) << S_LOPRIORITYDBFULL) +#define F_LOPRIORITYDBFULL V_LOPRIORITYDBFULL(1U) + +#define S_LOPRIORITYDBEMPTY 4 +#define V_LOPRIORITYDBEMPTY(x) ((x) << S_LOPRIORITYDBEMPTY) +#define F_LOPRIORITYDBEMPTY V_LOPRIORITYDBEMPTY(1U) + +#define S_RSPQDISABLED 3 +#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED) +#define F_RSPQDISABLED V_RSPQDISABLED(1U) + +#define S_RSPQCREDITOVERFOW 2 +#define V_RSPQCREDITOVERFOW(x) ((x) << S_RSPQCREDITOVERFOW) +#define F_RSPQCREDITOVERFOW V_RSPQCREDITOVERFOW(1U) + +#define S_FLEMPTY 1 +#define V_FLEMPTY(x) ((x) << S_FLEMPTY) +#define F_FLEMPTY V_FLEMPTY(1U) + +#define A_SG_INT_ENABLE 0x60 + +#define A_SG_CMDQ_CREDIT_TH 0x64 + +#define S_TIMEOUT 8 +#define M_TIMEOUT 0xffffff +#define V_TIMEOUT(x) ((x) << S_TIMEOUT) + +#define S_THRESHOLD 0 +#define M_THRESHOLD 0xff +#define V_THRESHOLD(x) ((x) << S_THRESHOLD) + +#define A_SG_TIMER_TICK 0x68 + +#define A_SG_CQ_CONTEXT_BADDR 0x6c + +#define A_SG_OCO_BASE 0x70 + +#define S_BASE1 16 +#define M_BASE1 0xffff +#define V_BASE1(x) ((x) << S_BASE1) + +#define A_SG_DRB_PRI_THRESH 0x74 + +#define A_PCIX_INT_ENABLE 0x80 + +#define S_MSIXPARERR 22 +#define M_MSIXPARERR 0x7 + +#define V_MSIXPARERR(x) ((x) << S_MSIXPARERR) + +#define S_CFPARERR 18 +#define M_CFPARERR 0xf + +#define V_CFPARERR(x) ((x) << S_CFPARERR) + +#define S_RFPARERR 14 +#define M_RFPARERR 0xf + +#define V_RFPARERR(x) ((x) << S_RFPARERR) + +#define S_WFPARERR 12 +#define M_WFPARERR 0x3 + +#define V_WFPARERR(x) ((x) << S_WFPARERR) + +#define S_PIOPARERR 11 +#define V_PIOPARERR(x) ((x) << S_PIOPARERR) +#define F_PIOPARERR V_PIOPARERR(1U) + +#define S_DETUNCECCERR 10 +#define V_DETUNCECCERR(x) ((x) << S_DETUNCECCERR) +#define F_DETUNCECCERR V_DETUNCECCERR(1U) + +#define S_DETCORECCERR 9 +#define V_DETCORECCERR(x) ((x) << S_DETCORECCERR) +#define F_DETCORECCERR V_DETCORECCERR(1U) + +#define S_RCVSPLCMPERR 8 +#define V_RCVSPLCMPERR(x) ((x) << S_RCVSPLCMPERR) +#define F_RCVSPLCMPERR V_RCVSPLCMPERR(1U) + +#define S_UNXSPLCMP 7 +#define V_UNXSPLCMP(x) ((x) << S_UNXSPLCMP) +#define F_UNXSPLCMP V_UNXSPLCMP(1U) + +#define S_SPLCMPDIS 6 +#define V_SPLCMPDIS(x) ((x) << S_SPLCMPDIS) +#define F_SPLCMPDIS V_SPLCMPDIS(1U) + +#define S_DETPARERR 5 +#define V_DETPARERR(x) ((x) << S_DETPARERR) +#define F_DETPARERR V_DETPARERR(1U) + +#define S_SIGSYSERR 4 +#define V_SIGSYSERR(x) ((x) << S_SIGSYSERR) +#define F_SIGSYSERR V_SIGSYSERR(1U) + +#define S_RCVMSTABT 3 +#define V_RCVMSTABT(x) ((x) << S_RCVMSTABT) +#define F_RCVMSTABT V_RCVMSTABT(1U) + +#define S_RCVTARABT 2 +#define V_RCVTARABT(x) ((x) << S_RCVTARABT) +#define F_RCVTARABT V_RCVTARABT(1U) + +#define S_SIGTARABT 1 +#define V_SIGTARABT(x) ((x) << S_SIGTARABT) +#define F_SIGTARABT V_SIGTARABT(1U) + +#define S_MSTDETPARERR 0 +#define V_MSTDETPARERR(x) ((x) << S_MSTDETPARERR) +#define F_MSTDETPARERR V_MSTDETPARERR(1U) + +#define A_PCIX_INT_CAUSE 0x84 + +#define A_PCIX_CFG 0x88 + +#define S_DMASTOPEN 19 +#define V_DMASTOPEN(x) ((x) << S_DMASTOPEN) +#define F_DMASTOPEN V_DMASTOPEN(1U) + +#define S_CLIDECEN 18 +#define V_CLIDECEN(x) ((x) << S_CLIDECEN) +#define F_CLIDECEN V_CLIDECEN(1U) + +#define A_PCIX_MODE 0x8c + +#define S_PCLKRANGE 6 +#define M_PCLKRANGE 0x3 +#define V_PCLKRANGE(x) ((x) << S_PCLKRANGE) +#define G_PCLKRANGE(x) (((x) >> S_PCLKRANGE) & M_PCLKRANGE) + +#define S_PCIXINITPAT 2 +#define M_PCIXINITPAT 0xf +#define V_PCIXINITPAT(x) ((x) << S_PCIXINITPAT) +#define G_PCIXINITPAT(x) (((x) >> S_PCIXINITPAT) & M_PCIXINITPAT) + +#define S_64BIT 0 +#define V_64BIT(x) ((x) << S_64BIT) +#define F_64BIT V_64BIT(1U) + +#define A_PCIE_INT_ENABLE 0x80 + +#define S_BISTERR 15 +#define M_BISTERR 0xff + +#define V_BISTERR(x) ((x) << S_BISTERR) + +#define S_TXPARERR 18 +#define V_TXPARERR(x) ((x) << S_TXPARERR) +#define F_TXPARERR V_TXPARERR(1U) + +#define S_RXPARERR 17 +#define V_RXPARERR(x) ((x) << S_RXPARERR) +#define F_RXPARERR V_RXPARERR(1U) + +#define S_RETRYLUTPARERR 16 +#define V_RETRYLUTPARERR(x) ((x) << S_RETRYLUTPARERR) +#define F_RETRYLUTPARERR V_RETRYLUTPARERR(1U) + +#define S_RETRYBUFPARERR 15 +#define V_RETRYBUFPARERR(x) ((x) << S_RETRYBUFPARERR) +#define F_RETRYBUFPARERR V_RETRYBUFPARERR(1U) + +#define S_PCIE_MSIXPARERR 12 +#define M_PCIE_MSIXPARERR 0x7 + +#define V_PCIE_MSIXPARERR(x) ((x) << S_PCIE_MSIXPARERR) + +#define S_PCIE_CFPARERR 11 +#define V_PCIE_CFPARERR(x) ((x) << S_PCIE_CFPARERR) +#define F_PCIE_CFPARERR V_PCIE_CFPARERR(1U) + +#define S_PCIE_RFPARERR 10 +#define V_PCIE_RFPARERR(x) ((x) << S_PCIE_RFPARERR) +#define F_PCIE_RFPARERR V_PCIE_RFPARERR(1U) + +#define S_PCIE_WFPARERR 9 +#define V_PCIE_WFPARERR(x) ((x) << S_PCIE_WFPARERR) +#define F_PCIE_WFPARERR V_PCIE_WFPARERR(1U) + +#define S_PCIE_PIOPARERR 8 +#define V_PCIE_PIOPARERR(x) ((x) << S_PCIE_PIOPARERR) +#define F_PCIE_PIOPARERR V_PCIE_PIOPARERR(1U) + +#define S_UNXSPLCPLERRC 7 +#define V_UNXSPLCPLERRC(x) ((x) << S_UNXSPLCPLERRC) +#define F_UNXSPLCPLERRC V_UNXSPLCPLERRC(1U) + +#define S_UNXSPLCPLERRR 6 +#define V_UNXSPLCPLERRR(x) ((x) << S_UNXSPLCPLERRR) +#define F_UNXSPLCPLERRR V_UNXSPLCPLERRR(1U) + +#define S_PEXERR 0 +#define V_PEXERR(x) ((x) << S_PEXERR) +#define F_PEXERR V_PEXERR(1U) + +#define A_PCIE_INT_CAUSE 0x84 + +#define S_PCIE_DMASTOPEN 24 +#define V_PCIE_DMASTOPEN(x) ((x) << S_PCIE_DMASTOPEN) +#define F_PCIE_DMASTOPEN V_PCIE_DMASTOPEN(1U) + +#define A_PCIE_CFG 0x88 + +#define S_ENABLELINKDWNDRST 21 +#define V_ENABLELINKDWNDRST(x) ((x) << S_ENABLELINKDWNDRST) +#define F_ENABLELINKDWNDRST V_ENABLELINKDWNDRST(1U) + +#define S_ENABLELINKDOWNRST 20 +#define V_ENABLELINKDOWNRST(x) ((x) << S_ENABLELINKDOWNRST) +#define F_ENABLELINKDOWNRST V_ENABLELINKDOWNRST(1U) + +#define S_PCIE_CLIDECEN 16 +#define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN) +#define F_PCIE_CLIDECEN V_PCIE_CLIDECEN(1U) + +#define S_CRSTWRMMODE 0 +#define V_CRSTWRMMODE(x) ((x) << S_CRSTWRMMODE) +#define F_CRSTWRMMODE V_CRSTWRMMODE(1U) + +#define A_PCIE_MODE 0x8c + +#define S_NUMFSTTRNSEQRX 10 +#define M_NUMFSTTRNSEQRX 0xff +#define V_NUMFSTTRNSEQRX(x) ((x) << S_NUMFSTTRNSEQRX) +#define G_NUMFSTTRNSEQRX(x) (((x) >> S_NUMFSTTRNSEQRX) & M_NUMFSTTRNSEQRX) + +#define A_PCIE_PEX_CTRL0 0x98 + +#define S_NUMFSTTRNSEQ 22 +#define M_NUMFSTTRNSEQ 0xff +#define V_NUMFSTTRNSEQ(x) ((x) << S_NUMFSTTRNSEQ) +#define G_NUMFSTTRNSEQ(x) (((x) >> S_NUMFSTTRNSEQ) & M_NUMFSTTRNSEQ) + +#define S_REPLAYLMT 2 +#define M_REPLAYLMT 0xfffff + +#define V_REPLAYLMT(x) ((x) << S_REPLAYLMT) + +#define A_PCIE_PEX_CTRL1 0x9c + +#define S_T3A_ACKLAT 0 +#define M_T3A_ACKLAT 0x7ff + +#define V_T3A_ACKLAT(x) ((x) << S_T3A_ACKLAT) + +#define S_ACKLAT 0 +#define M_ACKLAT 0x1fff + +#define V_ACKLAT(x) ((x) << S_ACKLAT) + +#define A_PCIE_PEX_ERR 0xa4 + +#define A_T3DBG_GPIO_EN 0xd0 + +#define S_GPIO11_OEN 27 +#define V_GPIO11_OEN(x) ((x) << S_GPIO11_OEN) +#define F_GPIO11_OEN V_GPIO11_OEN(1U) + +#define S_GPIO10_OEN 26 +#define V_GPIO10_OEN(x) ((x) << S_GPIO10_OEN) +#define F_GPIO10_OEN V_GPIO10_OEN(1U) + +#define S_GPIO7_OEN 23 +#define V_GPIO7_OEN(x) ((x) << S_GPIO7_OEN) +#define F_GPIO7_OEN V_GPIO7_OEN(1U) + +#define S_GPIO6_OEN 22 +#define V_GPIO6_OEN(x) ((x) << S_GPIO6_OEN) +#define F_GPIO6_OEN V_GPIO6_OEN(1U) + +#define S_GPIO5_OEN 21 +#define V_GPIO5_OEN(x) ((x) << S_GPIO5_OEN) +#define F_GPIO5_OEN V_GPIO5_OEN(1U) + +#define S_GPIO4_OEN 20 +#define V_GPIO4_OEN(x) ((x) << S_GPIO4_OEN) +#define F_GPIO4_OEN V_GPIO4_OEN(1U) + +#define S_GPIO2_OEN 18 +#define V_GPIO2_OEN(x) ((x) << S_GPIO2_OEN) +#define F_GPIO2_OEN V_GPIO2_OEN(1U) + +#define S_GPIO1_OEN 17 +#define V_GPIO1_OEN(x) ((x) << S_GPIO1_OEN) +#define F_GPIO1_OEN V_GPIO1_OEN(1U) + +#define S_GPIO0_OEN 16 +#define V_GPIO0_OEN(x) ((x) << S_GPIO0_OEN) +#define F_GPIO0_OEN V_GPIO0_OEN(1U) + +#define S_GPIO10_OUT_VAL 10 +#define V_GPIO10_OUT_VAL(x) ((x) << S_GPIO10_OUT_VAL) +#define F_GPIO10_OUT_VAL V_GPIO10_OUT_VAL(1U) + +#define S_GPIO7_OUT_VAL 7 +#define V_GPIO7_OUT_VAL(x) ((x) << S_GPIO7_OUT_VAL) +#define F_GPIO7_OUT_VAL V_GPIO7_OUT_VAL(1U) + +#define S_GPIO6_OUT_VAL 6 +#define V_GPIO6_OUT_VAL(x) ((x) << S_GPIO6_OUT_VAL) +#define F_GPIO6_OUT_VAL V_GPIO6_OUT_VAL(1U) + +#define S_GPIO5_OUT_VAL 5 +#define V_GPIO5_OUT_VAL(x) ((x) << S_GPIO5_OUT_VAL) +#define F_GPIO5_OUT_VAL V_GPIO5_OUT_VAL(1U) + +#define S_GPIO4_OUT_VAL 4 +#define V_GPIO4_OUT_VAL(x) ((x) << S_GPIO4_OUT_VAL) +#define F_GPIO4_OUT_VAL V_GPIO4_OUT_VAL(1U) + +#define S_GPIO2_OUT_VAL 2 +#define V_GPIO2_OUT_VAL(x) ((x) << S_GPIO2_OUT_VAL) +#define F_GPIO2_OUT_VAL V_GPIO2_OUT_VAL(1U) + +#define S_GPIO1_OUT_VAL 1 +#define V_GPIO1_OUT_VAL(x) ((x) << S_GPIO1_OUT_VAL) +#define F_GPIO1_OUT_VAL V_GPIO1_OUT_VAL(1U) + +#define S_GPIO0_OUT_VAL 0 +#define V_GPIO0_OUT_VAL(x) ((x) << S_GPIO0_OUT_VAL) +#define F_GPIO0_OUT_VAL V_GPIO0_OUT_VAL(1U) + +#define A_T3DBG_INT_ENABLE 0xd8 + +#define S_GPIO11 11 +#define V_GPIO11(x) ((x) << S_GPIO11) +#define F_GPIO11 V_GPIO11(1U) + +#define S_GPIO10 10 +#define V_GPIO10(x) ((x) << S_GPIO10) +#define F_GPIO10 V_GPIO10(1U) + +#define S_GPIO9 9 +#define V_GPIO9(x) ((x) << S_GPIO9) +#define F_GPIO9 V_GPIO9(1U) + +#define S_GPIO7 7 +#define V_GPIO7(x) ((x) << S_GPIO7) +#define F_GPIO7 V_GPIO7(1U) + +#define S_GPIO6 6 +#define V_GPIO6(x) ((x) << S_GPIO6) +#define F_GPIO6 V_GPIO6(1U) + +#define S_GPIO5 5 +#define V_GPIO5(x) ((x) << S_GPIO5) +#define F_GPIO5 V_GPIO5(1U) + +#define S_GPIO4 4 +#define V_GPIO4(x) ((x) << S_GPIO4) +#define F_GPIO4 V_GPIO4(1U) + +#define S_GPIO3 3 +#define V_GPIO3(x) ((x) << S_GPIO3) +#define F_GPIO3 V_GPIO3(1U) + +#define S_GPIO2 2 +#define V_GPIO2(x) ((x) << S_GPIO2) +#define F_GPIO2 V_GPIO2(1U) + +#define S_GPIO1 1 +#define V_GPIO1(x) ((x) << S_GPIO1) +#define F_GPIO1 V_GPIO1(1U) + +#define S_GPIO0 0 +#define V_GPIO0(x) ((x) << S_GPIO0) +#define F_GPIO0 V_GPIO0(1U) + +#define A_T3DBG_INT_CAUSE 0xdc + +#define A_T3DBG_GPIO_ACT_LOW 0xf0 + +#define MC7_PMRX_BASE_ADDR 0x100 + +#define A_MC7_CFG 0x100 + +#define S_IFEN 13 +#define V_IFEN(x) ((x) << S_IFEN) +#define F_IFEN V_IFEN(1U) + +#define S_TERM150 11 +#define V_TERM150(x) ((x) << S_TERM150) +#define F_TERM150 V_TERM150(1U) + +#define S_SLOW 10 +#define V_SLOW(x) ((x) << S_SLOW) +#define F_SLOW V_SLOW(1U) + +#define S_WIDTH 8 +#define M_WIDTH 0x3 +#define V_WIDTH(x) ((x) << S_WIDTH) +#define G_WIDTH(x) (((x) >> S_WIDTH) & M_WIDTH) + +#define S_BKS 6 +#define V_BKS(x) ((x) << S_BKS) +#define F_BKS V_BKS(1U) + +#define S_ORG 5 +#define V_ORG(x) ((x) << S_ORG) +#define F_ORG V_ORG(1U) + +#define S_DEN 2 +#define M_DEN 0x7 +#define V_DEN(x) ((x) << S_DEN) +#define G_DEN(x) (((x) >> S_DEN) & M_DEN) + +#define S_RDY 1 +#define V_RDY(x) ((x) << S_RDY) +#define F_RDY V_RDY(1U) + +#define S_CLKEN 0 +#define V_CLKEN(x) ((x) << S_CLKEN) +#define F_CLKEN V_CLKEN(1U) + +#define A_MC7_MODE 0x104 + +#define S_BUSY 31 +#define V_BUSY(x) ((x) << S_BUSY) +#define F_BUSY V_BUSY(1U) + +#define S_BUSY 31 +#define V_BUSY(x) ((x) << S_BUSY) +#define F_BUSY V_BUSY(1U) + +#define A_MC7_EXT_MODE1 0x108 + +#define A_MC7_EXT_MODE2 0x10c + +#define A_MC7_EXT_MODE3 0x110 + +#define A_MC7_PRE 0x114 + +#define A_MC7_REF 0x118 + +#define S_PREREFDIV 1 +#define M_PREREFDIV 0x3fff +#define V_PREREFDIV(x) ((x) << S_PREREFDIV) + +#define S_PERREFEN 0 +#define V_PERREFEN(x) ((x) << S_PERREFEN) +#define F_PERREFEN V_PERREFEN(1U) + +#define A_MC7_DLL 0x11c + +#define S_DLLENB 1 +#define V_DLLENB(x) ((x) << S_DLLENB) +#define F_DLLENB V_DLLENB(1U) + +#define S_DLLRST 0 +#define V_DLLRST(x) ((x) << S_DLLRST) +#define F_DLLRST V_DLLRST(1U) + +#define A_MC7_PARM 0x120 + +#define S_ACTTOPREDLY 26 +#define M_ACTTOPREDLY 0xf +#define V_ACTTOPREDLY(x) ((x) << S_ACTTOPREDLY) + +#define S_ACTTORDWRDLY 23 +#define M_ACTTORDWRDLY 0x7 +#define V_ACTTORDWRDLY(x) ((x) << S_ACTTORDWRDLY) + +#define S_PRECYC 20 +#define M_PRECYC 0x7 +#define V_PRECYC(x) ((x) << S_PRECYC) + +#define S_REFCYC 13 +#define M_REFCYC 0x7f +#define V_REFCYC(x) ((x) << S_REFCYC) + +#define S_BKCYC 8 +#define M_BKCYC 0x1f +#define V_BKCYC(x) ((x) << S_BKCYC) + +#define S_WRTORDDLY 4 +#define M_WRTORDDLY 0xf +#define V_WRTORDDLY(x) ((x) << S_WRTORDDLY) + +#define S_RDTOWRDLY 0 +#define M_RDTOWRDLY 0xf +#define V_RDTOWRDLY(x) ((x) << S_RDTOWRDLY) + +#define A_MC7_CAL 0x128 + +#define S_BUSY 31 +#define V_BUSY(x) ((x) << S_BUSY) +#define F_BUSY V_BUSY(1U) + +#define S_BUSY 31 +#define V_BUSY(x) ((x) << S_BUSY) +#define F_BUSY V_BUSY(1U) + +#define S_CAL_FAULT 30 +#define V_CAL_FAULT(x) ((x) << S_CAL_FAULT) +#define F_CAL_FAULT V_CAL_FAULT(1U) + +#define S_SGL_CAL_EN 20 +#define V_SGL_CAL_EN(x) ((x) << S_SGL_CAL_EN) +#define F_SGL_CAL_EN V_SGL_CAL_EN(1U) + +#define A_MC7_ERR_ADDR 0x12c + +#define A_MC7_ECC 0x130 + +#define S_ECCCHKEN 1 +#define V_ECCCHKEN(x) ((x) << S_ECCCHKEN) +#define F_ECCCHKEN V_ECCCHKEN(1U) + +#define S_ECCGENEN 0 +#define V_ECCGENEN(x) ((x) << S_ECCGENEN) +#define F_ECCGENEN V_ECCGENEN(1U) + +#define A_MC7_CE_ADDR 0x134 + +#define A_MC7_CE_DATA0 0x138 + +#define A_MC7_CE_DATA1 0x13c + +#define A_MC7_CE_DATA2 0x140 + +#define S_DATA 0 +#define M_DATA 0xff + +#define G_DATA(x) (((x) >> S_DATA) & M_DATA) + +#define A_MC7_UE_ADDR 0x144 + +#define A_MC7_UE_DATA0 0x148 + +#define A_MC7_UE_DATA1 0x14c + +#define A_MC7_UE_DATA2 0x150 + +#define A_MC7_BD_ADDR 0x154 + +#define S_ADDR 3 + +#define M_ADDR 0x1fffffff + +#define A_MC7_BD_DATA0 0x158 + +#define A_MC7_BD_DATA1 0x15c + +#define A_MC7_BD_OP 0x164 + +#define S_OP 0 + +#define V_OP(x) ((x) << S_OP) +#define F_OP V_OP(1U) + +#define F_OP V_OP(1U) +#define A_SF_OP 0x6dc + +#define A_MC7_BIST_ADDR_BEG 0x168 + +#define A_MC7_BIST_ADDR_END 0x16c + +#define A_MC7_BIST_DATA 0x170 + +#define A_MC7_BIST_OP 0x174 + +#define S_CONT 3 +#define V_CONT(x) ((x) << S_CONT) +#define F_CONT V_CONT(1U) + +#define F_CONT V_CONT(1U) + +#define A_MC7_INT_ENABLE 0x178 + +#define S_AE 17 +#define V_AE(x) ((x) << S_AE) +#define F_AE V_AE(1U) + +#define S_PE 2 +#define M_PE 0x7fff + +#define V_PE(x) ((x) << S_PE) + +#define G_PE(x) (((x) >> S_PE) & M_PE) + +#define S_UE 1 +#define V_UE(x) ((x) << S_UE) +#define F_UE V_UE(1U) + +#define S_CE 0 +#define V_CE(x) ((x) << S_CE) +#define F_CE V_CE(1U) + +#define A_MC7_INT_CAUSE 0x17c + +#define MC7_PMTX_BASE_ADDR 0x180 + +#define MC7_CM_BASE_ADDR 0x200 + +#define A_CIM_BOOT_CFG 0x280 + +#define S_BOOTADDR 2 +#define M_BOOTADDR 0x3fffffff +#define V_BOOTADDR(x) ((x) << S_BOOTADDR) + +#define A_CIM_SDRAM_BASE_ADDR 0x28c + +#define A_CIM_SDRAM_ADDR_SIZE 0x290 + +#define A_CIM_HOST_INT_ENABLE 0x298 + +#define S_DTAGPARERR 28 +#define V_DTAGPARERR(x) ((x) << S_DTAGPARERR) +#define F_DTAGPARERR V_DTAGPARERR(1U) + +#define S_ITAGPARERR 27 +#define V_ITAGPARERR(x) ((x) << S_ITAGPARERR) +#define F_ITAGPARERR V_ITAGPARERR(1U) + +#define S_IBQTPPARERR 26 +#define V_IBQTPPARERR(x) ((x) << S_IBQTPPARERR) +#define F_IBQTPPARERR V_IBQTPPARERR(1U) + +#define S_IBQULPPARERR 25 +#define V_IBQULPPARERR(x) ((x) << S_IBQULPPARERR) +#define F_IBQULPPARERR V_IBQULPPARERR(1U) + +#define S_IBQSGEHIPARERR 24 +#define V_IBQSGEHIPARERR(x) ((x) << S_IBQSGEHIPARERR) +#define F_IBQSGEHIPARERR V_IBQSGEHIPARERR(1U) + +#define S_IBQSGELOPARERR 23 +#define V_IBQSGELOPARERR(x) ((x) << S_IBQSGELOPARERR) +#define F_IBQSGELOPARERR V_IBQSGELOPARERR(1U) + +#define S_OBQULPLOPARERR 22 +#define V_OBQULPLOPARERR(x) ((x) << S_OBQULPLOPARERR) +#define F_OBQULPLOPARERR V_OBQULPLOPARERR(1U) + +#define S_OBQULPHIPARERR 21 +#define V_OBQULPHIPARERR(x) ((x) << S_OBQULPHIPARERR) +#define F_OBQULPHIPARERR V_OBQULPHIPARERR(1U) + +#define S_OBQSGEPARERR 20 +#define V_OBQSGEPARERR(x) ((x) << S_OBQSGEPARERR) +#define F_OBQSGEPARERR V_OBQSGEPARERR(1U) + +#define S_DCACHEPARERR 19 +#define V_DCACHEPARERR(x) ((x) << S_DCACHEPARERR) +#define F_DCACHEPARERR V_DCACHEPARERR(1U) + +#define S_ICACHEPARERR 18 +#define V_ICACHEPARERR(x) ((x) << S_ICACHEPARERR) +#define F_ICACHEPARERR V_ICACHEPARERR(1U) + +#define S_DRAMPARERR 17 +#define V_DRAMPARERR(x) ((x) << S_DRAMPARERR) +#define F_DRAMPARERR V_DRAMPARERR(1U) + +#define A_CIM_HOST_INT_CAUSE 0x29c + +#define S_BLKWRPLINT 12 +#define V_BLKWRPLINT(x) ((x) << S_BLKWRPLINT) +#define F_BLKWRPLINT V_BLKWRPLINT(1U) + +#define S_BLKRDPLINT 11 +#define V_BLKRDPLINT(x) ((x) << S_BLKRDPLINT) +#define F_BLKRDPLINT V_BLKRDPLINT(1U) + +#define S_BLKWRCTLINT 10 +#define V_BLKWRCTLINT(x) ((x) << S_BLKWRCTLINT) +#define F_BLKWRCTLINT V_BLKWRCTLINT(1U) + +#define S_BLKRDCTLINT 9 +#define V_BLKRDCTLINT(x) ((x) << S_BLKRDCTLINT) +#define F_BLKRDCTLINT V_BLKRDCTLINT(1U) + +#define S_BLKWRFLASHINT 8 +#define V_BLKWRFLASHINT(x) ((x) << S_BLKWRFLASHINT) +#define F_BLKWRFLASHINT V_BLKWRFLASHINT(1U) + +#define S_BLKRDFLASHINT 7 +#define V_BLKRDFLASHINT(x) ((x) << S_BLKRDFLASHINT) +#define F_BLKRDFLASHINT V_BLKRDFLASHINT(1U) + +#define S_SGLWRFLASHINT 6 +#define V_SGLWRFLASHINT(x) ((x) << S_SGLWRFLASHINT) +#define F_SGLWRFLASHINT V_SGLWRFLASHINT(1U) + +#define S_WRBLKFLASHINT 5 +#define V_WRBLKFLASHINT(x) ((x) << S_WRBLKFLASHINT) +#define F_WRBLKFLASHINT V_WRBLKFLASHINT(1U) + +#define S_BLKWRBOOTINT 4 +#define V_BLKWRBOOTINT(x) ((x) << S_BLKWRBOOTINT) +#define F_BLKWRBOOTINT V_BLKWRBOOTINT(1U) + +#define S_FLASHRANGEINT 2 +#define V_FLASHRANGEINT(x) ((x) << S_FLASHRANGEINT) +#define F_FLASHRANGEINT V_FLASHRANGEINT(1U) + +#define S_SDRAMRANGEINT 1 +#define V_SDRAMRANGEINT(x) ((x) << S_SDRAMRANGEINT) +#define F_SDRAMRANGEINT V_SDRAMRANGEINT(1U) + +#define S_RSVDSPACEINT 0 +#define V_RSVDSPACEINT(x) ((x) << S_RSVDSPACEINT) +#define F_RSVDSPACEINT V_RSVDSPACEINT(1U) + +#define A_CIM_HOST_ACC_CTRL 0x2b0 + +#define S_HOSTBUSY 17 +#define V_HOSTBUSY(x) ((x) << S_HOSTBUSY) +#define F_HOSTBUSY V_HOSTBUSY(1U) + +#define A_CIM_HOST_ACC_DATA 0x2b4 + +#define A_CIM_IBQ_DBG_CFG 0x2c0 + +#define S_IBQDBGADDR 16 +#define M_IBQDBGADDR 0x1ff +#define V_IBQDBGADDR(x) ((x) << S_IBQDBGADDR) +#define G_IBQDBGADDR(x) (((x) >> S_IBQDBGADDR) & M_IBQDBGADDR) + +#define S_IBQDBGQID 3 +#define M_IBQDBGQID 0x3 +#define V_IBQDBGQID(x) ((x) << S_IBQDBGQID) +#define G_IBQDBGQID(x) (((x) >> S_IBQDBGQID) & M_IBQDBGQID) + +#define S_IBQDBGWR 2 +#define V_IBQDBGWR(x) ((x) << S_IBQDBGWR) +#define F_IBQDBGWR V_IBQDBGWR(1U) + +#define S_IBQDBGBUSY 1 +#define V_IBQDBGBUSY(x) ((x) << S_IBQDBGBUSY) +#define F_IBQDBGBUSY V_IBQDBGBUSY(1U) + +#define S_IBQDBGEN 0 +#define V_IBQDBGEN(x) ((x) << S_IBQDBGEN) +#define F_IBQDBGEN V_IBQDBGEN(1U) + +#define A_CIM_IBQ_DBG_DATA 0x2c8 + +#define A_TP_IN_CONFIG 0x300 + +#define S_RXFBARBPRIO 25 +#define V_RXFBARBPRIO(x) ((x) << S_RXFBARBPRIO) +#define F_RXFBARBPRIO V_RXFBARBPRIO(1U) + +#define S_TXFBARBPRIO 24 +#define V_TXFBARBPRIO(x) ((x) << S_TXFBARBPRIO) +#define F_TXFBARBPRIO V_TXFBARBPRIO(1U) + +#define S_NICMODE 14 +#define V_NICMODE(x) ((x) << S_NICMODE) +#define F_NICMODE V_NICMODE(1U) + +#define F_NICMODE V_NICMODE(1U) + +#define S_IPV6ENABLE 15 +#define V_IPV6ENABLE(x) ((x) << S_IPV6ENABLE) +#define F_IPV6ENABLE V_IPV6ENABLE(1U) + +#define A_TP_OUT_CONFIG 0x304 + +#define S_VLANEXTRACTIONENABLE 12 + +#define A_TP_GLOBAL_CONFIG 0x308 + +#define S_TXPACINGENABLE 24 +#define V_TXPACINGENABLE(x) ((x) << S_TXPACINGENABLE) +#define F_TXPACINGENABLE V_TXPACINGENABLE(1U) + +#define S_PATHMTU 15 +#define V_PATHMTU(x) ((x) << S_PATHMTU) +#define F_PATHMTU V_PATHMTU(1U) + +#define S_IPCHECKSUMOFFLOAD 13 +#define V_IPCHECKSUMOFFLOAD(x) ((x) << S_IPCHECKSUMOFFLOAD) +#define F_IPCHECKSUMOFFLOAD V_IPCHECKSUMOFFLOAD(1U) + +#define S_UDPCHECKSUMOFFLOAD 12 +#define V_UDPCHECKSUMOFFLOAD(x) ((x) << S_UDPCHECKSUMOFFLOAD) +#define F_UDPCHECKSUMOFFLOAD V_UDPCHECKSUMOFFLOAD(1U) + +#define S_TCPCHECKSUMOFFLOAD 11 +#define V_TCPCHECKSUMOFFLOAD(x) ((x) << S_TCPCHECKSUMOFFLOAD) +#define F_TCPCHECKSUMOFFLOAD V_TCPCHECKSUMOFFLOAD(1U) + +#define S_IPTTL 0 +#define M_IPTTL 0xff +#define V_IPTTL(x) ((x) << S_IPTTL) + +#define A_TP_CMM_MM_BASE 0x314 + +#define A_TP_CMM_TIMER_BASE 0x318 + +#define S_CMTIMERMAXNUM 28 +#define M_CMTIMERMAXNUM 0x3 +#define V_CMTIMERMAXNUM(x) ((x) << S_CMTIMERMAXNUM) + +#define A_TP_PMM_SIZE 0x31c + +#define A_TP_PMM_TX_BASE 0x320 + +#define A_TP_PMM_RX_BASE 0x328 + +#define A_TP_PMM_RX_PAGE_SIZE 0x32c + +#define A_TP_PMM_RX_MAX_PAGE 0x330 + +#define A_TP_PMM_TX_PAGE_SIZE 0x334 + +#define A_TP_PMM_TX_MAX_PAGE 0x338 + +#define A_TP_TCP_OPTIONS 0x340 + +#define S_MTUDEFAULT 16 +#define M_MTUDEFAULT 0xffff +#define V_MTUDEFAULT(x) ((x) << S_MTUDEFAULT) + +#define S_MTUENABLE 10 +#define V_MTUENABLE(x) ((x) << S_MTUENABLE) +#define F_MTUENABLE V_MTUENABLE(1U) + +#define S_SACKRX 8 +#define V_SACKRX(x) ((x) << S_SACKRX) +#define F_SACKRX V_SACKRX(1U) + +#define S_SACKMODE 4 + +#define M_SACKMODE 0x3 + +#define V_SACKMODE(x) ((x) << S_SACKMODE) + +#define S_WINDOWSCALEMODE 2 +#define M_WINDOWSCALEMODE 0x3 +#define V_WINDOWSCALEMODE(x) ((x) << S_WINDOWSCALEMODE) + +#define S_TIMESTAMPSMODE 0 + +#define M_TIMESTAMPSMODE 0x3 + +#define V_TIMESTAMPSMODE(x) ((x) << S_TIMESTAMPSMODE) + +#define A_TP_DACK_CONFIG 0x344 + +#define S_AUTOSTATE3 30 +#define M_AUTOSTATE3 0x3 +#define V_AUTOSTATE3(x) ((x) << S_AUTOSTATE3) + +#define S_AUTOSTATE2 28 +#define M_AUTOSTATE2 0x3 +#define V_AUTOSTATE2(x) ((x) << S_AUTOSTATE2) + +#define S_AUTOSTATE1 26 +#define M_AUTOSTATE1 0x3 +#define V_AUTOSTATE1(x) ((x) << S_AUTOSTATE1) + +#define S_BYTETHRESHOLD 5 +#define M_BYTETHRESHOLD 0xfffff +#define V_BYTETHRESHOLD(x) ((x) << S_BYTETHRESHOLD) + +#define S_MSSTHRESHOLD 3 +#define M_MSSTHRESHOLD 0x3 +#define V_MSSTHRESHOLD(x) ((x) << S_MSSTHRESHOLD) + +#define S_AUTOCAREFUL 2 +#define V_AUTOCAREFUL(x) ((x) << S_AUTOCAREFUL) +#define F_AUTOCAREFUL V_AUTOCAREFUL(1U) + +#define S_AUTOENABLE 1 +#define V_AUTOENABLE(x) ((x) << S_AUTOENABLE) +#define F_AUTOENABLE V_AUTOENABLE(1U) + +#define S_DACK_MODE 0 +#define V_DACK_MODE(x) ((x) << S_DACK_MODE) +#define F_DACK_MODE V_DACK_MODE(1U) + +#define A_TP_PC_CONFIG 0x348 + +#define S_TXTOSQUEUEMAPMODE 26 +#define V_TXTOSQUEUEMAPMODE(x) ((x) << S_TXTOSQUEUEMAPMODE) +#define F_TXTOSQUEUEMAPMODE V_TXTOSQUEUEMAPMODE(1U) + +#define S_ENABLEEPCMDAFULL 23 +#define V_ENABLEEPCMDAFULL(x) ((x) << S_ENABLEEPCMDAFULL) +#define F_ENABLEEPCMDAFULL V_ENABLEEPCMDAFULL(1U) + +#define S_MODULATEUNIONMODE 22 +#define V_MODULATEUNIONMODE(x) ((x) << S_MODULATEUNIONMODE) +#define F_MODULATEUNIONMODE V_MODULATEUNIONMODE(1U) + +#define S_TXDEFERENABLE 20 +#define V_TXDEFERENABLE(x) ((x) << S_TXDEFERENABLE) +#define F_TXDEFERENABLE V_TXDEFERENABLE(1U) + +#define S_RXCONGESTIONMODE 19 +#define V_RXCONGESTIONMODE(x) ((x) << S_RXCONGESTIONMODE) +#define F_RXCONGESTIONMODE V_RXCONGESTIONMODE(1U) + +#define S_HEARBEATDACK 16 +#define V_HEARBEATDACK(x) ((x) << S_HEARBEATDACK) +#define F_HEARBEATDACK V_HEARBEATDACK(1U) + +#define S_TXCONGESTIONMODE 15 +#define V_TXCONGESTIONMODE(x) ((x) << S_TXCONGESTIONMODE) +#define F_TXCONGESTIONMODE V_TXCONGESTIONMODE(1U) + +#define S_ENABLEOCSPIFULL 30 +#define V_ENABLEOCSPIFULL(x) ((x) << S_ENABLEOCSPIFULL) +#define F_ENABLEOCSPIFULL V_ENABLEOCSPIFULL(1U) + +#define S_LOCKTID 28 +#define V_LOCKTID(x) ((x) << S_LOCKTID) +#define F_LOCKTID V_LOCKTID(1U) + +#define S_TABLELATENCYDELTA 0 +#define M_TABLELATENCYDELTA 0xf +#define V_TABLELATENCYDELTA(x) ((x) << S_TABLELATENCYDELTA) +#define G_TABLELATENCYDELTA(x) \ + (((x) >> S_TABLELATENCYDELTA) & M_TABLELATENCYDELTA) + +#define A_TP_PC_CONFIG2 0x34c + +#define S_DISBLEDAPARBIT0 15 +#define V_DISBLEDAPARBIT0(x) ((x) << S_DISBLEDAPARBIT0) +#define F_DISBLEDAPARBIT0 V_DISBLEDAPARBIT0(1U) + +#define S_ENABLEARPMISS 13 +#define V_ENABLEARPMISS(x) ((x) << S_ENABLEARPMISS) +#define F_ENABLEARPMISS V_ENABLEARPMISS(1U) + +#define S_ENABLENONOFDTNLSYN 12 +#define V_ENABLENONOFDTNLSYN(x) ((x) << S_ENABLENONOFDTNLSYN) +#define F_ENABLENONOFDTNLSYN V_ENABLENONOFDTNLSYN(1U) + +#define S_ENABLEIPV6RSS 11 +#define V_ENABLEIPV6RSS(x) ((x) << S_ENABLEIPV6RSS) +#define F_ENABLEIPV6RSS V_ENABLEIPV6RSS(1U) + +#define S_CHDRAFULL 4 +#define V_CHDRAFULL(x) ((x) << S_CHDRAFULL) +#define F_CHDRAFULL V_CHDRAFULL(1U) + +#define A_TP_TCP_BACKOFF_REG0 0x350 + +#define A_TP_TCP_BACKOFF_REG1 0x354 + +#define A_TP_TCP_BACKOFF_REG2 0x358 + +#define A_TP_TCP_BACKOFF_REG3 0x35c + +#define A_TP_PARA_REG2 0x368 + +#define S_MAXRXDATA 16 +#define M_MAXRXDATA 0xffff +#define V_MAXRXDATA(x) ((x) << S_MAXRXDATA) + +#define S_RXCOALESCESIZE 0 +#define M_RXCOALESCESIZE 0xffff +#define V_RXCOALESCESIZE(x) ((x) << S_RXCOALESCESIZE) + +#define A_TP_PARA_REG3 0x36c + +#define S_TXDATAACKIDX 16 +#define M_TXDATAACKIDX 0xf + +#define V_TXDATAACKIDX(x) ((x) << S_TXDATAACKIDX) + +#define S_TXPACEAUTOSTRICT 10 +#define V_TXPACEAUTOSTRICT(x) ((x) << S_TXPACEAUTOSTRICT) +#define F_TXPACEAUTOSTRICT V_TXPACEAUTOSTRICT(1U) + +#define S_TXPACEFIXED 9 +#define V_TXPACEFIXED(x) ((x) << S_TXPACEFIXED) +#define F_TXPACEFIXED V_TXPACEFIXED(1U) + +#define S_TXPACEAUTO 8 +#define V_TXPACEAUTO(x) ((x) << S_TXPACEAUTO) +#define F_TXPACEAUTO V_TXPACEAUTO(1U) + +#define S_RXCOALESCEENABLE 1 +#define V_RXCOALESCEENABLE(x) ((x) << S_RXCOALESCEENABLE) +#define F_RXCOALESCEENABLE V_RXCOALESCEENABLE(1U) + +#define S_RXCOALESCEPSHEN 0 +#define V_RXCOALESCEPSHEN(x) ((x) << S_RXCOALESCEPSHEN) +#define F_RXCOALESCEPSHEN V_RXCOALESCEPSHEN(1U) + +#define A_TP_PARA_REG4 0x370 + +#define A_TP_PARA_REG5 0x374 + +#define S_RXDDPOFFINIT 3 +#define V_RXDDPOFFINIT(x) ((x) << S_RXDDPOFFINIT) +#define F_RXDDPOFFINIT V_RXDDPOFFINIT(1U) + +#define A_TP_PARA_REG6 0x378 + +#define S_T3A_ENABLEESND 13 +#define V_T3A_ENABLEESND(x) ((x) << S_T3A_ENABLEESND) +#define F_T3A_ENABLEESND V_T3A_ENABLEESND(1U) + +#define S_ENABLEESND 11 +#define V_ENABLEESND(x) ((x) << S_ENABLEESND) +#define F_ENABLEESND V_ENABLEESND(1U) + +#define A_TP_PARA_REG7 0x37c + +#define S_PMMAXXFERLEN1 16 +#define M_PMMAXXFERLEN1 0xffff +#define V_PMMAXXFERLEN1(x) ((x) << S_PMMAXXFERLEN1) + +#define S_PMMAXXFERLEN0 0 +#define M_PMMAXXFERLEN0 0xffff +#define V_PMMAXXFERLEN0(x) ((x) << S_PMMAXXFERLEN0) + +#define A_TP_TIMER_RESOLUTION 0x390 + +#define S_TIMERRESOLUTION 16 +#define M_TIMERRESOLUTION 0xff +#define V_TIMERRESOLUTION(x) ((x) << S_TIMERRESOLUTION) + +#define S_TIMESTAMPRESOLUTION 8 +#define M_TIMESTAMPRESOLUTION 0xff +#define V_TIMESTAMPRESOLUTION(x) ((x) << S_TIMESTAMPRESOLUTION) + +#define S_DELAYEDACKRESOLUTION 0 +#define M_DELAYEDACKRESOLUTION 0xff +#define V_DELAYEDACKRESOLUTION(x) ((x) << S_DELAYEDACKRESOLUTION) + +#define A_TP_MSL 0x394 + +#define A_TP_RXT_MIN 0x398 + +#define A_TP_RXT_MAX 0x39c + +#define A_TP_PERS_MIN 0x3a0 + +#define A_TP_PERS_MAX 0x3a4 + +#define A_TP_KEEP_IDLE 0x3a8 + +#define A_TP_KEEP_INTVL 0x3ac + +#define A_TP_INIT_SRTT 0x3b0 + +#define A_TP_DACK_TIMER 0x3b4 + +#define A_TP_FINWAIT2_TIMER 0x3b8 + +#define A_TP_SHIFT_CNT 0x3c0 + +#define S_SYNSHIFTMAX 24 + +#define M_SYNSHIFTMAX 0xff + +#define V_SYNSHIFTMAX(x) ((x) << S_SYNSHIFTMAX) + +#define S_RXTSHIFTMAXR1 20 + +#define M_RXTSHIFTMAXR1 0xf + +#define V_RXTSHIFTMAXR1(x) ((x) << S_RXTSHIFTMAXR1) + +#define S_RXTSHIFTMAXR2 16 + +#define M_RXTSHIFTMAXR2 0xf + +#define V_RXTSHIFTMAXR2(x) ((x) << S_RXTSHIFTMAXR2) + +#define S_PERSHIFTBACKOFFMAX 12 +#define M_PERSHIFTBACKOFFMAX 0xf +#define V_PERSHIFTBACKOFFMAX(x) ((x) << S_PERSHIFTBACKOFFMAX) + +#define S_PERSHIFTMAX 8 +#define M_PERSHIFTMAX 0xf +#define V_PERSHIFTMAX(x) ((x) << S_PERSHIFTMAX) + +#define S_KEEPALIVEMAX 0 + +#define M_KEEPALIVEMAX 0xff + +#define V_KEEPALIVEMAX(x) ((x) << S_KEEPALIVEMAX) + +#define A_TP_MTU_PORT_TABLE 0x3d0 + +#define A_TP_CCTRL_TABLE 0x3dc + +#define A_TP_MTU_TABLE 0x3e4 + +#define A_TP_RSS_MAP_TABLE 0x3e8 + +#define A_TP_RSS_LKP_TABLE 0x3ec + +#define A_TP_RSS_CONFIG 0x3f0 + +#define S_TNL4TUPEN 29 +#define V_TNL4TUPEN(x) ((x) << S_TNL4TUPEN) +#define F_TNL4TUPEN V_TNL4TUPEN(1U) + +#define S_TNL2TUPEN 28 +#define V_TNL2TUPEN(x) ((x) << S_TNL2TUPEN) +#define F_TNL2TUPEN V_TNL2TUPEN(1U) + +#define S_TNLPRTEN 26 +#define V_TNLPRTEN(x) ((x) << S_TNLPRTEN) +#define F_TNLPRTEN V_TNLPRTEN(1U) + +#define S_TNLMAPEN 25 +#define V_TNLMAPEN(x) ((x) << S_TNLMAPEN) +#define F_TNLMAPEN V_TNLMAPEN(1U) + +#define S_TNLLKPEN 24 +#define V_TNLLKPEN(x) ((x) << S_TNLLKPEN) +#define F_TNLLKPEN V_TNLLKPEN(1U) + +#define S_RRCPLMAPEN 7 +#define V_RRCPLMAPEN(x) ((x) << S_RRCPLMAPEN) +#define F_RRCPLMAPEN V_RRCPLMAPEN(1U) + +#define S_RRCPLCPUSIZE 4 +#define M_RRCPLCPUSIZE 0x7 +#define V_RRCPLCPUSIZE(x) ((x) << S_RRCPLCPUSIZE) + +#define S_RQFEEDBACKENABLE 3 +#define V_RQFEEDBACKENABLE(x) ((x) << S_RQFEEDBACKENABLE) +#define F_RQFEEDBACKENABLE V_RQFEEDBACKENABLE(1U) + +#define S_HASHTOEPLITZ 2 +#define V_HASHTOEPLITZ(x) ((x) << S_HASHTOEPLITZ) +#define F_HASHTOEPLITZ V_HASHTOEPLITZ(1U) + +#define S_DISABLE 0 + +#define A_TP_TM_PIO_ADDR 0x418 + +#define A_TP_TM_PIO_DATA 0x41c + +#define A_TP_TX_MOD_QUE_TABLE 0x420 + +#define A_TP_TX_RESOURCE_LIMIT 0x424 + +#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x428 + +#define S_TX_MOD_QUEUE_REQ_MAP 0 +#define M_TX_MOD_QUEUE_REQ_MAP 0xff +#define V_TX_MOD_QUEUE_REQ_MAP(x) ((x) << S_TX_MOD_QUEUE_REQ_MAP) + +#define A_TP_TX_MOD_QUEUE_WEIGHT1 0x42c + +#define A_TP_TX_MOD_QUEUE_WEIGHT0 0x430 + +#define A_TP_MOD_CHANNEL_WEIGHT 0x434 + +#define A_TP_MOD_RATE_LIMIT 0x438 + +#define A_TP_PIO_ADDR 0x440 + +#define A_TP_PIO_DATA 0x444 + +#define A_TP_RESET 0x44c + +#define S_FLSTINITENABLE 1 +#define V_FLSTINITENABLE(x) ((x) << S_FLSTINITENABLE) +#define F_FLSTINITENABLE V_FLSTINITENABLE(1U) + +#define S_TPRESET 0 +#define V_TPRESET(x) ((x) << S_TPRESET) +#define F_TPRESET V_TPRESET(1U) + +#define A_TP_CMM_MM_RX_FLST_BASE 0x460 + +#define A_TP_CMM_MM_TX_FLST_BASE 0x464 + +#define A_TP_CMM_MM_PS_FLST_BASE 0x468 + +#define A_TP_MIB_INDEX 0x450 + +#define A_TP_MIB_RDATA 0x454 + +#define A_TP_CMM_MM_MAX_PSTRUCT 0x46c + +#define A_TP_INT_ENABLE 0x470 + +#define S_FLMTXFLSTEMPTY 30 +#define V_FLMTXFLSTEMPTY(x) ((x) << S_FLMTXFLSTEMPTY) +#define F_FLMTXFLSTEMPTY V_FLMTXFLSTEMPTY(1U) + +#define S_FLMRXFLSTEMPTY 29 +#define V_FLMRXFLSTEMPTY(x) ((x) << S_FLMRXFLSTEMPTY) +#define F_FLMRXFLSTEMPTY V_FLMRXFLSTEMPTY(1U) + +#define S_ARPLUTPERR 26 +#define V_ARPLUTPERR(x) ((x) << S_ARPLUTPERR) +#define F_ARPLUTPERR V_ARPLUTPERR(1U) + +#define S_CMCACHEPERR 24 +#define V_CMCACHEPERR(x) ((x) << S_CMCACHEPERR) +#define F_CMCACHEPERR V_CMCACHEPERR(1U) + +#define A_TP_INT_CAUSE 0x474 + +#define A_TP_TX_MOD_Q1_Q0_RATE_LIMIT 0x8 + +#define A_TP_TX_DROP_CFG_CH0 0x12b + +#define A_TP_TX_DROP_MODE 0x12f + +#define A_TP_EGRESS_CONFIG 0x145 + +#define S_REWRITEFORCETOSIZE 0 +#define V_REWRITEFORCETOSIZE(x) ((x) << S_REWRITEFORCETOSIZE) +#define F_REWRITEFORCETOSIZE V_REWRITEFORCETOSIZE(1U) + +#define A_TP_TX_TRC_KEY0 0x20 + +#define A_TP_RX_TRC_KEY0 0x120 + +#define A_TP_TX_DROP_CNT_CH0 0x12d + +#define S_TXDROPCNTCH0RCVD 0 +#define M_TXDROPCNTCH0RCVD 0xffff +#define V_TXDROPCNTCH0RCVD(x) ((x) << S_TXDROPCNTCH0RCVD) +#define G_TXDROPCNTCH0RCVD(x) (((x) >> S_TXDROPCNTCH0RCVD) & \ + M_TXDROPCNTCH0RCVD) + +#define A_TP_PROXY_FLOW_CNTL 0x4b0 + +#define A_TP_EMBED_OP_FIELD0 0x4e8 +#define A_TP_EMBED_OP_FIELD1 0x4ec +#define A_TP_EMBED_OP_FIELD2 0x4f0 +#define A_TP_EMBED_OP_FIELD3 0x4f4 +#define A_TP_EMBED_OP_FIELD4 0x4f8 +#define A_TP_EMBED_OP_FIELD5 0x4fc + +#define A_ULPRX_CTL 0x500 + +#define S_ROUND_ROBIN 4 +#define V_ROUND_ROBIN(x) ((x) << S_ROUND_ROBIN) +#define F_ROUND_ROBIN V_ROUND_ROBIN(1U) + +#define A_ULPRX_INT_ENABLE 0x504 + +#define S_DATASELFRAMEERR0 7 +#define V_DATASELFRAMEERR0(x) ((x) << S_DATASELFRAMEERR0) +#define F_DATASELFRAMEERR0 V_DATASELFRAMEERR0(1U) + +#define S_DATASELFRAMEERR1 6 +#define V_DATASELFRAMEERR1(x) ((x) << S_DATASELFRAMEERR1) +#define F_DATASELFRAMEERR1 V_DATASELFRAMEERR1(1U) + +#define S_PCMDMUXPERR 5 +#define V_PCMDMUXPERR(x) ((x) << S_PCMDMUXPERR) +#define F_PCMDMUXPERR V_PCMDMUXPERR(1U) + +#define S_ARBFPERR 4 +#define V_ARBFPERR(x) ((x) << S_ARBFPERR) +#define F_ARBFPERR V_ARBFPERR(1U) + +#define S_ARBPF0PERR 3 +#define V_ARBPF0PERR(x) ((x) << S_ARBPF0PERR) +#define F_ARBPF0PERR V_ARBPF0PERR(1U) + +#define S_ARBPF1PERR 2 +#define V_ARBPF1PERR(x) ((x) << S_ARBPF1PERR) +#define F_ARBPF1PERR V_ARBPF1PERR(1U) + +#define S_PARERRPCMD 1 +#define V_PARERRPCMD(x) ((x) << S_PARERRPCMD) +#define F_PARERRPCMD V_PARERRPCMD(1U) + +#define S_PARERRDATA 0 +#define V_PARERRDATA(x) ((x) << S_PARERRDATA) +#define F_PARERRDATA V_PARERRDATA(1U) + +#define A_ULPRX_INT_CAUSE 0x508 + +#define A_ULPRX_ISCSI_LLIMIT 0x50c + +#define A_ULPRX_ISCSI_ULIMIT 0x510 + +#define A_ULPRX_ISCSI_TAGMASK 0x514 + +#define A_ULPRX_ISCSI_PSZ 0x518 + +#define A_ULPRX_TDDP_LLIMIT 0x51c + +#define A_ULPRX_TDDP_ULIMIT 0x520 +#define A_ULPRX_TDDP_PSZ 0x528 + +#define S_HPZ0 0 +#define M_HPZ0 0xf +#define V_HPZ0(x) ((x) << S_HPZ0) +#define G_HPZ0(x) (((x) >> S_HPZ0) & M_HPZ0) + +#define A_ULPRX_STAG_LLIMIT 0x52c + +#define A_ULPRX_STAG_ULIMIT 0x530 + +#define A_ULPRX_RQ_LLIMIT 0x534 +#define A_ULPRX_RQ_LLIMIT 0x534 + +#define A_ULPRX_RQ_ULIMIT 0x538 +#define A_ULPRX_RQ_ULIMIT 0x538 + +#define A_ULPRX_PBL_LLIMIT 0x53c + +#define A_ULPRX_PBL_ULIMIT 0x540 +#define A_ULPRX_PBL_ULIMIT 0x540 + +#define A_ULPRX_TDDP_TAGMASK 0x524 + +#define A_ULPRX_RQ_LLIMIT 0x534 +#define A_ULPRX_RQ_LLIMIT 0x534 + +#define A_ULPRX_RQ_ULIMIT 0x538 +#define A_ULPRX_RQ_ULIMIT 0x538 + +#define A_ULPRX_PBL_ULIMIT 0x540 +#define A_ULPRX_PBL_ULIMIT 0x540 + +#define A_ULPTX_CONFIG 0x580 + +#define S_CFG_CQE_SOP_MASK 1 +#define V_CFG_CQE_SOP_MASK(x) ((x) << S_CFG_CQE_SOP_MASK) +#define F_CFG_CQE_SOP_MASK V_CFG_CQE_SOP_MASK(1U) + +#define S_CFG_RR_ARB 0 +#define V_CFG_RR_ARB(x) ((x) << S_CFG_RR_ARB) +#define F_CFG_RR_ARB V_CFG_RR_ARB(1U) + +#define A_ULPTX_INT_ENABLE 0x584 + +#define S_PBL_BOUND_ERR_CH1 1 +#define V_PBL_BOUND_ERR_CH1(x) ((x) << S_PBL_BOUND_ERR_CH1) +#define F_PBL_BOUND_ERR_CH1 V_PBL_BOUND_ERR_CH1(1U) + +#define S_PBL_BOUND_ERR_CH0 0 +#define V_PBL_BOUND_ERR_CH0(x) ((x) << S_PBL_BOUND_ERR_CH0) +#define F_PBL_BOUND_ERR_CH0 V_PBL_BOUND_ERR_CH0(1U) + +#define A_ULPTX_INT_CAUSE 0x588 + +#define A_ULPTX_TPT_LLIMIT 0x58c + +#define A_ULPTX_TPT_ULIMIT 0x590 + +#define A_ULPTX_PBL_LLIMIT 0x594 + +#define A_ULPTX_PBL_ULIMIT 0x598 + +#define A_ULPTX_DMA_WEIGHT 0x5ac + +#define S_D1_WEIGHT 16 +#define M_D1_WEIGHT 0xffff +#define V_D1_WEIGHT(x) ((x) << S_D1_WEIGHT) + +#define S_D0_WEIGHT 0 +#define M_D0_WEIGHT 0xffff +#define V_D0_WEIGHT(x) ((x) << S_D0_WEIGHT) + +#define A_PM1_RX_CFG 0x5c0 +#define A_PM1_RX_MODE 0x5c4 + +#define A_PM1_RX_INT_ENABLE 0x5d8 + +#define S_ZERO_E_CMD_ERROR 18 +#define V_ZERO_E_CMD_ERROR(x) ((x) << S_ZERO_E_CMD_ERROR) +#define F_ZERO_E_CMD_ERROR V_ZERO_E_CMD_ERROR(1U) + +#define S_IESPI0_FIFO2X_RX_FRAMING_ERROR 17 +#define V_IESPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_FIFO2X_RX_FRAMING_ERROR) +#define F_IESPI0_FIFO2X_RX_FRAMING_ERROR V_IESPI0_FIFO2X_RX_FRAMING_ERROR(1U) + +#define S_IESPI1_FIFO2X_RX_FRAMING_ERROR 16 +#define V_IESPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_FIFO2X_RX_FRAMING_ERROR) +#define F_IESPI1_FIFO2X_RX_FRAMING_ERROR V_IESPI1_FIFO2X_RX_FRAMING_ERROR(1U) + +#define S_IESPI0_RX_FRAMING_ERROR 15 +#define V_IESPI0_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_RX_FRAMING_ERROR) +#define F_IESPI0_RX_FRAMING_ERROR V_IESPI0_RX_FRAMING_ERROR(1U) + +#define S_IESPI1_RX_FRAMING_ERROR 14 +#define V_IESPI1_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_RX_FRAMING_ERROR) +#define F_IESPI1_RX_FRAMING_ERROR V_IESPI1_RX_FRAMING_ERROR(1U) + +#define S_IESPI0_TX_FRAMING_ERROR 13 +#define V_IESPI0_TX_FRAMING_ERROR(x) ((x) << S_IESPI0_TX_FRAMING_ERROR) +#define F_IESPI0_TX_FRAMING_ERROR V_IESPI0_TX_FRAMING_ERROR(1U) + +#define S_IESPI1_TX_FRAMING_ERROR 12 +#define V_IESPI1_TX_FRAMING_ERROR(x) ((x) << S_IESPI1_TX_FRAMING_ERROR) +#define F_IESPI1_TX_FRAMING_ERROR V_IESPI1_TX_FRAMING_ERROR(1U) + +#define S_OCSPI0_RX_FRAMING_ERROR 11 +#define V_OCSPI0_RX_FRAMING_ERROR(x) ((x) << S_OCSPI0_RX_FRAMING_ERROR) +#define F_OCSPI0_RX_FRAMING_ERROR V_OCSPI0_RX_FRAMING_ERROR(1U) + +#define S_OCSPI1_RX_FRAMING_ERROR 10 +#define V_OCSPI1_RX_FRAMING_ERROR(x) ((x) << S_OCSPI1_RX_FRAMING_ERROR) +#define F_OCSPI1_RX_FRAMING_ERROR V_OCSPI1_RX_FRAMING_ERROR(1U) + +#define S_OCSPI0_TX_FRAMING_ERROR 9 +#define V_OCSPI0_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_TX_FRAMING_ERROR) +#define F_OCSPI0_TX_FRAMING_ERROR V_OCSPI0_TX_FRAMING_ERROR(1U) + +#define S_OCSPI1_TX_FRAMING_ERROR 8 +#define V_OCSPI1_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_TX_FRAMING_ERROR) +#define F_OCSPI1_TX_FRAMING_ERROR V_OCSPI1_TX_FRAMING_ERROR(1U) + +#define S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR 7 +#define V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR) +#define F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(1U) + +#define S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR 6 +#define V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR) +#define F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(1U) + +#define S_IESPI_PAR_ERROR 3 +#define M_IESPI_PAR_ERROR 0x7 + +#define V_IESPI_PAR_ERROR(x) ((x) << S_IESPI_PAR_ERROR) + +#define S_OCSPI_PAR_ERROR 0 +#define M_OCSPI_PAR_ERROR 0x7 + +#define V_OCSPI_PAR_ERROR(x) ((x) << S_OCSPI_PAR_ERROR) + +#define A_PM1_RX_INT_CAUSE 0x5dc + +#define A_PM1_TX_CFG 0x5e0 +#define A_PM1_TX_MODE 0x5e4 + +#define A_PM1_TX_INT_ENABLE 0x5f8 + +#define S_ZERO_C_CMD_ERROR 18 +#define V_ZERO_C_CMD_ERROR(x) ((x) << S_ZERO_C_CMD_ERROR) +#define F_ZERO_C_CMD_ERROR V_ZERO_C_CMD_ERROR(1U) + +#define S_ICSPI0_FIFO2X_RX_FRAMING_ERROR 17 +#define V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_FIFO2X_RX_FRAMING_ERROR) +#define F_ICSPI0_FIFO2X_RX_FRAMING_ERROR V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(1U) + +#define S_ICSPI1_FIFO2X_RX_FRAMING_ERROR 16 +#define V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_FIFO2X_RX_FRAMING_ERROR) +#define F_ICSPI1_FIFO2X_RX_FRAMING_ERROR V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(1U) + +#define S_ICSPI0_RX_FRAMING_ERROR 15 +#define V_ICSPI0_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_RX_FRAMING_ERROR) +#define F_ICSPI0_RX_FRAMING_ERROR V_ICSPI0_RX_FRAMING_ERROR(1U) + +#define S_ICSPI1_RX_FRAMING_ERROR 14 +#define V_ICSPI1_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_RX_FRAMING_ERROR) +#define F_ICSPI1_RX_FRAMING_ERROR V_ICSPI1_RX_FRAMING_ERROR(1U) + +#define S_ICSPI0_TX_FRAMING_ERROR 13 +#define V_ICSPI0_TX_FRAMING_ERROR(x) ((x) << S_ICSPI0_TX_FRAMING_ERROR) +#define F_ICSPI0_TX_FRAMING_ERROR V_ICSPI0_TX_FRAMING_ERROR(1U) + +#define S_ICSPI1_TX_FRAMING_ERROR 12 +#define V_ICSPI1_TX_FRAMING_ERROR(x) ((x) << S_ICSPI1_TX_FRAMING_ERROR) +#define F_ICSPI1_TX_FRAMING_ERROR V_ICSPI1_TX_FRAMING_ERROR(1U) + +#define S_OESPI0_RX_FRAMING_ERROR 11 +#define V_OESPI0_RX_FRAMING_ERROR(x) ((x) << S_OESPI0_RX_FRAMING_ERROR) +#define F_OESPI0_RX_FRAMING_ERROR V_OESPI0_RX_FRAMING_ERROR(1U) + +#define S_OESPI1_RX_FRAMING_ERROR 10 +#define V_OESPI1_RX_FRAMING_ERROR(x) ((x) << S_OESPI1_RX_FRAMING_ERROR) +#define F_OESPI1_RX_FRAMING_ERROR V_OESPI1_RX_FRAMING_ERROR(1U) + +#define S_OESPI0_TX_FRAMING_ERROR 9 +#define V_OESPI0_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_TX_FRAMING_ERROR) +#define F_OESPI0_TX_FRAMING_ERROR V_OESPI0_TX_FRAMING_ERROR(1U) + +#define S_OESPI1_TX_FRAMING_ERROR 8 +#define V_OESPI1_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_TX_FRAMING_ERROR) +#define F_OESPI1_TX_FRAMING_ERROR V_OESPI1_TX_FRAMING_ERROR(1U) + +#define S_OESPI0_OFIFO2X_TX_FRAMING_ERROR 7 +#define V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_OFIFO2X_TX_FRAMING_ERROR) +#define F_OESPI0_OFIFO2X_TX_FRAMING_ERROR V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(1U) + +#define S_OESPI1_OFIFO2X_TX_FRAMING_ERROR 6 +#define V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_OFIFO2X_TX_FRAMING_ERROR) +#define F_OESPI1_OFIFO2X_TX_FRAMING_ERROR V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(1U) + +#define S_ICSPI_PAR_ERROR 3 +#define M_ICSPI_PAR_ERROR 0x7 + +#define V_ICSPI_PAR_ERROR(x) ((x) << S_ICSPI_PAR_ERROR) + +#define S_OESPI_PAR_ERROR 0 +#define M_OESPI_PAR_ERROR 0x7 + +#define V_OESPI_PAR_ERROR(x) ((x) << S_OESPI_PAR_ERROR) + +#define A_PM1_TX_INT_CAUSE 0x5fc + +#define A_MPS_CFG 0x600 + +#define S_TPRXPORTEN 4 +#define V_TPRXPORTEN(x) ((x) << S_TPRXPORTEN) +#define F_TPRXPORTEN V_TPRXPORTEN(1U) + +#define S_TPTXPORT1EN 3 +#define V_TPTXPORT1EN(x) ((x) << S_TPTXPORT1EN) +#define F_TPTXPORT1EN V_TPTXPORT1EN(1U) + +#define S_TPTXPORT0EN 2 +#define V_TPTXPORT0EN(x) ((x) << S_TPTXPORT0EN) +#define F_TPTXPORT0EN V_TPTXPORT0EN(1U) + +#define S_PORT1ACTIVE 1 +#define V_PORT1ACTIVE(x) ((x) << S_PORT1ACTIVE) +#define F_PORT1ACTIVE V_PORT1ACTIVE(1U) + +#define S_PORT0ACTIVE 0 +#define V_PORT0ACTIVE(x) ((x) << S_PORT0ACTIVE) +#define F_PORT0ACTIVE V_PORT0ACTIVE(1U) + +#define S_ENFORCEPKT 11 +#define V_ENFORCEPKT(x) ((x) << S_ENFORCEPKT) +#define F_ENFORCEPKT V_ENFORCEPKT(1U) + +#define A_MPS_INT_ENABLE 0x61c + +#define S_MCAPARERRENB 6 +#define M_MCAPARERRENB 0x7 + +#define V_MCAPARERRENB(x) ((x) << S_MCAPARERRENB) + +#define S_RXTPPARERRENB 4 +#define M_RXTPPARERRENB 0x3 + +#define V_RXTPPARERRENB(x) ((x) << S_RXTPPARERRENB) + +#define S_TX1TPPARERRENB 2 +#define M_TX1TPPARERRENB 0x3 + +#define V_TX1TPPARERRENB(x) ((x) << S_TX1TPPARERRENB) + +#define S_TX0TPPARERRENB 0 +#define M_TX0TPPARERRENB 0x3 + +#define V_TX0TPPARERRENB(x) ((x) << S_TX0TPPARERRENB) + +#define A_MPS_INT_CAUSE 0x620 + +#define S_MCAPARERR 6 +#define M_MCAPARERR 0x7 + +#define V_MCAPARERR(x) ((x) << S_MCAPARERR) + +#define S_RXTPPARERR 4 +#define M_RXTPPARERR 0x3 + +#define V_RXTPPARERR(x) ((x) << S_RXTPPARERR) + +#define S_TX1TPPARERR 2 +#define M_TX1TPPARERR 0x3 + +#define V_TX1TPPARERR(x) ((x) << S_TX1TPPARERR) + +#define S_TX0TPPARERR 0 +#define M_TX0TPPARERR 0x3 + +#define V_TX0TPPARERR(x) ((x) << S_TX0TPPARERR) + +#define A_CPL_SWITCH_CNTRL 0x640 + +#define A_CPL_INTR_ENABLE 0x650 + +#define S_CIM_OP_MAP_PERR 5 +#define V_CIM_OP_MAP_PERR(x) ((x) << S_CIM_OP_MAP_PERR) +#define F_CIM_OP_MAP_PERR V_CIM_OP_MAP_PERR(1U) + +#define S_CIM_OVFL_ERROR 4 +#define V_CIM_OVFL_ERROR(x) ((x) << S_CIM_OVFL_ERROR) +#define F_CIM_OVFL_ERROR V_CIM_OVFL_ERROR(1U) + +#define S_TP_FRAMING_ERROR 3 +#define V_TP_FRAMING_ERROR(x) ((x) << S_TP_FRAMING_ERROR) +#define F_TP_FRAMING_ERROR V_TP_FRAMING_ERROR(1U) + +#define S_SGE_FRAMING_ERROR 2 +#define V_SGE_FRAMING_ERROR(x) ((x) << S_SGE_FRAMING_ERROR) +#define F_SGE_FRAMING_ERROR V_SGE_FRAMING_ERROR(1U) + +#define S_CIM_FRAMING_ERROR 1 +#define V_CIM_FRAMING_ERROR(x) ((x) << S_CIM_FRAMING_ERROR) +#define F_CIM_FRAMING_ERROR V_CIM_FRAMING_ERROR(1U) + +#define S_ZERO_SWITCH_ERROR 0 +#define V_ZERO_SWITCH_ERROR(x) ((x) << S_ZERO_SWITCH_ERROR) +#define F_ZERO_SWITCH_ERROR V_ZERO_SWITCH_ERROR(1U) + +#define A_CPL_INTR_CAUSE 0x654 + +#define A_CPL_MAP_TBL_DATA 0x65c + +#define A_SMB_GLOBAL_TIME_CFG 0x660 + +#define A_I2C_CFG 0x6a0 + +#define S_I2C_CLKDIV 0 +#define M_I2C_CLKDIV 0xfff +#define V_I2C_CLKDIV(x) ((x) << S_I2C_CLKDIV) + +#define A_MI1_CFG 0x6b0 + +#define S_CLKDIV 5 +#define M_CLKDIV 0xff +#define V_CLKDIV(x) ((x) << S_CLKDIV) + +#define S_ST 3 + +#define M_ST 0x3 + +#define V_ST(x) ((x) << S_ST) + +#define G_ST(x) (((x) >> S_ST) & M_ST) + +#define S_PREEN 2 +#define V_PREEN(x) ((x) << S_PREEN) +#define F_PREEN V_PREEN(1U) + +#define S_MDIINV 1 +#define V_MDIINV(x) ((x) << S_MDIINV) +#define F_MDIINV V_MDIINV(1U) + +#define S_MDIEN 0 +#define V_MDIEN(x) ((x) << S_MDIEN) +#define F_MDIEN V_MDIEN(1U) + +#define A_MI1_ADDR 0x6b4 + +#define S_PHYADDR 5 +#define M_PHYADDR 0x1f +#define V_PHYADDR(x) ((x) << S_PHYADDR) + +#define S_REGADDR 0 +#define M_REGADDR 0x1f +#define V_REGADDR(x) ((x) << S_REGADDR) + +#define A_MI1_DATA 0x6b8 + +#define A_MI1_OP 0x6bc + +#define S_MDI_OP 0 +#define M_MDI_OP 0x3 +#define V_MDI_OP(x) ((x) << S_MDI_OP) + +#define A_SF_DATA 0x6d8 + +#define A_SF_OP 0x6dc + +#define S_BYTECNT 1 +#define M_BYTECNT 0x3 +#define V_BYTECNT(x) ((x) << S_BYTECNT) + +#define A_PL_INT_ENABLE0 0x6e0 + +#define S_T3DBG 23 +#define V_T3DBG(x) ((x) << S_T3DBG) +#define F_T3DBG V_T3DBG(1U) + +#define S_XGMAC0_1 20 +#define V_XGMAC0_1(x) ((x) << S_XGMAC0_1) +#define F_XGMAC0_1 V_XGMAC0_1(1U) + +#define S_XGMAC0_0 19 +#define V_XGMAC0_0(x) ((x) << S_XGMAC0_0) +#define F_XGMAC0_0 V_XGMAC0_0(1U) + +#define S_MC5A 18 +#define V_MC5A(x) ((x) << S_MC5A) +#define F_MC5A V_MC5A(1U) + +#define S_CPL_SWITCH 12 +#define V_CPL_SWITCH(x) ((x) << S_CPL_SWITCH) +#define F_CPL_SWITCH V_CPL_SWITCH(1U) + +#define S_MPS0 11 +#define V_MPS0(x) ((x) << S_MPS0) +#define F_MPS0 V_MPS0(1U) + +#define S_PM1_TX 10 +#define V_PM1_TX(x) ((x) << S_PM1_TX) +#define F_PM1_TX V_PM1_TX(1U) + +#define S_PM1_RX 9 +#define V_PM1_RX(x) ((x) << S_PM1_RX) +#define F_PM1_RX V_PM1_RX(1U) + +#define S_ULP2_TX 8 +#define V_ULP2_TX(x) ((x) << S_ULP2_TX) +#define F_ULP2_TX V_ULP2_TX(1U) + +#define S_ULP2_RX 7 +#define V_ULP2_RX(x) ((x) << S_ULP2_RX) +#define F_ULP2_RX V_ULP2_RX(1U) + +#define S_TP1 6 +#define V_TP1(x) ((x) << S_TP1) +#define F_TP1 V_TP1(1U) + +#define S_CIM 5 +#define V_CIM(x) ((x) << S_CIM) +#define F_CIM V_CIM(1U) + +#define S_MC7_CM 4 +#define V_MC7_CM(x) ((x) << S_MC7_CM) +#define F_MC7_CM V_MC7_CM(1U) + +#define S_MC7_PMTX 3 +#define V_MC7_PMTX(x) ((x) << S_MC7_PMTX) +#define F_MC7_PMTX V_MC7_PMTX(1U) + +#define S_MC7_PMRX 2 +#define V_MC7_PMRX(x) ((x) << S_MC7_PMRX) +#define F_MC7_PMRX V_MC7_PMRX(1U) + +#define S_PCIM0 1 +#define V_PCIM0(x) ((x) << S_PCIM0) +#define F_PCIM0 V_PCIM0(1U) + +#define S_SGE3 0 +#define V_SGE3(x) ((x) << S_SGE3) +#define F_SGE3 V_SGE3(1U) + +#define A_PL_INT_CAUSE0 0x6e4 + +#define A_PL_RST 0x6f0 + +#define S_FATALPERREN 4 +#define V_FATALPERREN(x) ((x) << S_FATALPERREN) +#define F_FATALPERREN V_FATALPERREN(1U) + +#define S_CRSTWRM 1 +#define V_CRSTWRM(x) ((x) << S_CRSTWRM) +#define F_CRSTWRM V_CRSTWRM(1U) + +#define A_PL_REV 0x6f4 + +#define A_PL_CLI 0x6f8 + +#define A_MC5_DB_CONFIG 0x704 + +#define S_TMTYPEHI 30 +#define V_TMTYPEHI(x) ((x) << S_TMTYPEHI) +#define F_TMTYPEHI V_TMTYPEHI(1U) + +#define S_TMPARTSIZE 28 +#define M_TMPARTSIZE 0x3 +#define V_TMPARTSIZE(x) ((x) << S_TMPARTSIZE) +#define G_TMPARTSIZE(x) (((x) >> S_TMPARTSIZE) & M_TMPARTSIZE) + +#define S_TMTYPE 26 +#define M_TMTYPE 0x3 +#define V_TMTYPE(x) ((x) << S_TMTYPE) +#define G_TMTYPE(x) (((x) >> S_TMTYPE) & M_TMTYPE) + +#define S_COMPEN 17 +#define V_COMPEN(x) ((x) << S_COMPEN) +#define F_COMPEN V_COMPEN(1U) + +#define S_PRTYEN 6 +#define V_PRTYEN(x) ((x) << S_PRTYEN) +#define F_PRTYEN V_PRTYEN(1U) + +#define S_MBUSEN 5 +#define V_MBUSEN(x) ((x) << S_MBUSEN) +#define F_MBUSEN V_MBUSEN(1U) + +#define S_DBGIEN 4 +#define V_DBGIEN(x) ((x) << S_DBGIEN) +#define F_DBGIEN V_DBGIEN(1U) + +#define S_TMRDY 2 +#define V_TMRDY(x) ((x) << S_TMRDY) +#define F_TMRDY V_TMRDY(1U) + +#define S_TMRST 1 +#define V_TMRST(x) ((x) << S_TMRST) +#define F_TMRST V_TMRST(1U) + +#define S_TMMODE 0 +#define V_TMMODE(x) ((x) << S_TMMODE) +#define F_TMMODE V_TMMODE(1U) + +#define F_TMMODE V_TMMODE(1U) + +#define A_MC5_DB_ROUTING_TABLE_INDEX 0x70c + +#define A_MC5_DB_FILTER_TABLE 0x710 + +#define A_MC5_DB_SERVER_INDEX 0x714 + +#define A_MC5_DB_RSP_LATENCY 0x720 + +#define S_RDLAT 16 +#define M_RDLAT 0x1f +#define V_RDLAT(x) ((x) << S_RDLAT) + +#define S_LRNLAT 8 +#define M_LRNLAT 0x1f +#define V_LRNLAT(x) ((x) << S_LRNLAT) + +#define S_SRCHLAT 0 +#define M_SRCHLAT 0x1f +#define V_SRCHLAT(x) ((x) << S_SRCHLAT) + +#define A_MC5_DB_PART_ID_INDEX 0x72c + +#define A_MC5_DB_INT_ENABLE 0x740 + +#define S_DELACTEMPTY 18 +#define V_DELACTEMPTY(x) ((x) << S_DELACTEMPTY) +#define F_DELACTEMPTY V_DELACTEMPTY(1U) + +#define S_DISPQPARERR 17 +#define V_DISPQPARERR(x) ((x) << S_DISPQPARERR) +#define F_DISPQPARERR V_DISPQPARERR(1U) + +#define S_REQQPARERR 16 +#define V_REQQPARERR(x) ((x) << S_REQQPARERR) +#define F_REQQPARERR V_REQQPARERR(1U) + +#define S_UNKNOWNCMD 15 +#define V_UNKNOWNCMD(x) ((x) << S_UNKNOWNCMD) +#define F_UNKNOWNCMD V_UNKNOWNCMD(1U) + +#define S_NFASRCHFAIL 8 +#define V_NFASRCHFAIL(x) ((x) << S_NFASRCHFAIL) +#define F_NFASRCHFAIL V_NFASRCHFAIL(1U) + +#define S_ACTRGNFULL 7 +#define V_ACTRGNFULL(x) ((x) << S_ACTRGNFULL) +#define F_ACTRGNFULL V_ACTRGNFULL(1U) + +#define S_PARITYERR 6 +#define V_PARITYERR(x) ((x) << S_PARITYERR) +#define F_PARITYERR V_PARITYERR(1U) + +#define A_MC5_DB_INT_CAUSE 0x744 + +#define A_MC5_DB_DBGI_CONFIG 0x774 + +#define A_MC5_DB_DBGI_REQ_CMD 0x778 + +#define A_MC5_DB_DBGI_REQ_ADDR0 0x77c + +#define A_MC5_DB_DBGI_REQ_ADDR1 0x780 + +#define A_MC5_DB_DBGI_REQ_ADDR2 0x784 + +#define A_MC5_DB_DBGI_REQ_DATA0 0x788 + +#define A_MC5_DB_DBGI_REQ_DATA1 0x78c + +#define A_MC5_DB_DBGI_REQ_DATA2 0x790 + +#define A_MC5_DB_DBGI_RSP_STATUS 0x7b0 + +#define S_DBGIRSPVALID 0 +#define V_DBGIRSPVALID(x) ((x) << S_DBGIRSPVALID) +#define F_DBGIRSPVALID V_DBGIRSPVALID(1U) + +#define A_MC5_DB_DBGI_RSP_DATA0 0x7b4 + +#define A_MC5_DB_DBGI_RSP_DATA1 0x7b8 + +#define A_MC5_DB_DBGI_RSP_DATA2 0x7bc + +#define A_MC5_DB_POPEN_DATA_WR_CMD 0x7cc + +#define A_MC5_DB_POPEN_MASK_WR_CMD 0x7d0 + +#define A_MC5_DB_AOPEN_SRCH_CMD 0x7d4 + +#define A_MC5_DB_AOPEN_LRN_CMD 0x7d8 + +#define A_MC5_DB_SYN_SRCH_CMD 0x7dc + +#define A_MC5_DB_SYN_LRN_CMD 0x7e0 + +#define A_MC5_DB_ACK_SRCH_CMD 0x7e4 + +#define A_MC5_DB_ACK_LRN_CMD 0x7e8 + +#define A_MC5_DB_ILOOKUP_CMD 0x7ec + +#define A_MC5_DB_ELOOKUP_CMD 0x7f0 + +#define A_MC5_DB_DATA_WRITE_CMD 0x7f4 + +#define A_MC5_DB_DATA_READ_CMD 0x7f8 + +#define XGMAC0_0_BASE_ADDR 0x800 + +#define A_XGM_TX_CTRL 0x800 + +#define S_TXEN 0 +#define V_TXEN(x) ((x) << S_TXEN) +#define F_TXEN V_TXEN(1U) + +#define A_XGM_TX_CFG 0x804 + +#define S_TXPAUSEEN 0 +#define V_TXPAUSEEN(x) ((x) << S_TXPAUSEEN) +#define F_TXPAUSEEN V_TXPAUSEEN(1U) + +#define A_XGM_TX_PAUSE_QUANTA 0x808 + +#define A_XGM_RX_CTRL 0x80c + +#define S_RXEN 0 +#define V_RXEN(x) ((x) << S_RXEN) +#define F_RXEN V_RXEN(1U) + +#define A_XGM_RX_CFG 0x810 + +#define S_DISPAUSEFRAMES 9 +#define V_DISPAUSEFRAMES(x) ((x) << S_DISPAUSEFRAMES) +#define F_DISPAUSEFRAMES V_DISPAUSEFRAMES(1U) + +#define S_EN1536BFRAMES 8 +#define V_EN1536BFRAMES(x) ((x) << S_EN1536BFRAMES) +#define F_EN1536BFRAMES V_EN1536BFRAMES(1U) + +#define S_ENJUMBO 7 +#define V_ENJUMBO(x) ((x) << S_ENJUMBO) +#define F_ENJUMBO V_ENJUMBO(1U) + +#define S_RMFCS 6 +#define V_RMFCS(x) ((x) << S_RMFCS) +#define F_RMFCS V_RMFCS(1U) + +#define S_ENHASHMCAST 2 +#define V_ENHASHMCAST(x) ((x) << S_ENHASHMCAST) +#define F_ENHASHMCAST V_ENHASHMCAST(1U) + +#define S_COPYALLFRAMES 0 +#define V_COPYALLFRAMES(x) ((x) << S_COPYALLFRAMES) +#define F_COPYALLFRAMES V_COPYALLFRAMES(1U) + +#define S_DISBCAST 1 +#define V_DISBCAST(x) ((x) << S_DISBCAST) +#define F_DISBCAST V_DISBCAST(1U) + +#define A_XGM_RX_HASH_LOW 0x814 + +#define A_XGM_RX_HASH_HIGH 0x818 + +#define A_XGM_RX_EXACT_MATCH_LOW_1 0x81c + +#define A_XGM_RX_EXACT_MATCH_HIGH_1 0x820 + +#define A_XGM_RX_EXACT_MATCH_LOW_2 0x824 + +#define A_XGM_RX_EXACT_MATCH_LOW_3 0x82c + +#define A_XGM_RX_EXACT_MATCH_LOW_4 0x834 + +#define A_XGM_RX_EXACT_MATCH_LOW_5 0x83c + +#define A_XGM_RX_EXACT_MATCH_LOW_6 0x844 + +#define A_XGM_RX_EXACT_MATCH_LOW_7 0x84c + +#define A_XGM_RX_EXACT_MATCH_LOW_8 0x854 + +#define A_XGM_INT_STATUS 0x86c + +#define S_LINKFAULTCHANGE 9 +#define V_LINKFAULTCHANGE(x) ((x) << S_LINKFAULTCHANGE) +#define F_LINKFAULTCHANGE V_LINKFAULTCHANGE(1U) + +#define A_XGM_XGM_INT_ENABLE 0x874 +#define A_XGM_XGM_INT_DISABLE 0x878 + +#define A_XGM_STAT_CTRL 0x880 + +#define S_CLRSTATS 2 +#define V_CLRSTATS(x) ((x) << S_CLRSTATS) +#define F_CLRSTATS V_CLRSTATS(1U) + +#define A_XGM_RXFIFO_CFG 0x884 + +#define S_RXFIFO_EMPTY 31 +#define V_RXFIFO_EMPTY(x) ((x) << S_RXFIFO_EMPTY) +#define F_RXFIFO_EMPTY V_RXFIFO_EMPTY(1U) + +#define S_RXFIFOPAUSEHWM 17 +#define M_RXFIFOPAUSEHWM 0xfff + +#define V_RXFIFOPAUSEHWM(x) ((x) << S_RXFIFOPAUSEHWM) + +#define G_RXFIFOPAUSEHWM(x) (((x) >> S_RXFIFOPAUSEHWM) & M_RXFIFOPAUSEHWM) + +#define S_RXFIFOPAUSELWM 5 +#define M_RXFIFOPAUSELWM 0xfff + +#define V_RXFIFOPAUSELWM(x) ((x) << S_RXFIFOPAUSELWM) + +#define G_RXFIFOPAUSELWM(x) (((x) >> S_RXFIFOPAUSELWM) & M_RXFIFOPAUSELWM) + +#define S_RXSTRFRWRD 1 +#define V_RXSTRFRWRD(x) ((x) << S_RXSTRFRWRD) +#define F_RXSTRFRWRD V_RXSTRFRWRD(1U) + +#define S_DISERRFRAMES 0 +#define V_DISERRFRAMES(x) ((x) << S_DISERRFRAMES) +#define F_DISERRFRAMES V_DISERRFRAMES(1U) + +#define A_XGM_TXFIFO_CFG 0x888 + +#define S_UNDERUNFIX 22 +#define V_UNDERUNFIX(x) ((x) << S_UNDERUNFIX) +#define F_UNDERUNFIX V_UNDERUNFIX(1U) + +#define S_TXIPG 13 +#define M_TXIPG 0xff +#define V_TXIPG(x) ((x) << S_TXIPG) +#define G_TXIPG(x) (((x) >> S_TXIPG) & M_TXIPG) + +#define S_TXFIFOTHRESH 4 +#define M_TXFIFOTHRESH 0x1ff + +#define V_TXFIFOTHRESH(x) ((x) << S_TXFIFOTHRESH) + +#define S_ENDROPPKT 21 +#define V_ENDROPPKT(x) ((x) << S_ENDROPPKT) +#define F_ENDROPPKT V_ENDROPPKT(1U) + +#define A_XGM_SERDES_CTRL 0x890 +#define A_XGM_SERDES_CTRL0 0x8e0 + +#define S_SERDESRESET_ 24 +#define V_SERDESRESET_(x) ((x) << S_SERDESRESET_) +#define F_SERDESRESET_ V_SERDESRESET_(1U) + +#define S_RXENABLE 4 +#define V_RXENABLE(x) ((x) << S_RXENABLE) +#define F_RXENABLE V_RXENABLE(1U) + +#define S_TXENABLE 3 +#define V_TXENABLE(x) ((x) << S_TXENABLE) +#define F_TXENABLE V_TXENABLE(1U) + +#define A_XGM_PAUSE_TIMER 0x890 + +#define A_XGM_RGMII_IMP 0x89c + +#define S_XGM_IMPSETUPDATE 6 +#define V_XGM_IMPSETUPDATE(x) ((x) << S_XGM_IMPSETUPDATE) +#define F_XGM_IMPSETUPDATE V_XGM_IMPSETUPDATE(1U) + +#define S_RGMIIIMPPD 3 +#define M_RGMIIIMPPD 0x7 +#define V_RGMIIIMPPD(x) ((x) << S_RGMIIIMPPD) + +#define S_RGMIIIMPPU 0 +#define M_RGMIIIMPPU 0x7 +#define V_RGMIIIMPPU(x) ((x) << S_RGMIIIMPPU) + +#define S_CALRESET 8 +#define V_CALRESET(x) ((x) << S_CALRESET) +#define F_CALRESET V_CALRESET(1U) + +#define S_CALUPDATE 7 +#define V_CALUPDATE(x) ((x) << S_CALUPDATE) +#define F_CALUPDATE V_CALUPDATE(1U) + +#define A_XGM_XAUI_IMP 0x8a0 + +#define S_CALBUSY 31 +#define V_CALBUSY(x) ((x) << S_CALBUSY) +#define F_CALBUSY V_CALBUSY(1U) + +#define S_XGM_CALFAULT 29 +#define V_XGM_CALFAULT(x) ((x) << S_XGM_CALFAULT) +#define F_XGM_CALFAULT V_XGM_CALFAULT(1U) + +#define S_CALIMP 24 +#define M_CALIMP 0x1f +#define V_CALIMP(x) ((x) << S_CALIMP) +#define G_CALIMP(x) (((x) >> S_CALIMP) & M_CALIMP) + +#define S_XAUIIMP 0 +#define M_XAUIIMP 0x7 +#define V_XAUIIMP(x) ((x) << S_XAUIIMP) + +#define A_XGM_RX_MAX_PKT_SIZE 0x8a8 + +#define S_RXMAXFRAMERSIZE 17 +#define M_RXMAXFRAMERSIZE 0x3fff +#define V_RXMAXFRAMERSIZE(x) ((x) << S_RXMAXFRAMERSIZE) +#define G_RXMAXFRAMERSIZE(x) (((x) >> S_RXMAXFRAMERSIZE) & M_RXMAXFRAMERSIZE) + +#define S_RXENFRAMER 14 +#define V_RXENFRAMER(x) ((x) << S_RXENFRAMER) +#define F_RXENFRAMER V_RXENFRAMER(1U) + +#define S_RXMAXPKTSIZE 0 +#define M_RXMAXPKTSIZE 0x3fff +#define V_RXMAXPKTSIZE(x) ((x) << S_RXMAXPKTSIZE) +#define G_RXMAXPKTSIZE(x) (((x) >> S_RXMAXPKTSIZE) & M_RXMAXPKTSIZE) + +#define A_XGM_RESET_CTRL 0x8ac + +#define S_XGMAC_STOP_EN 4 +#define V_XGMAC_STOP_EN(x) ((x) << S_XGMAC_STOP_EN) +#define F_XGMAC_STOP_EN V_XGMAC_STOP_EN(1U) + +#define S_XG2G_RESET_ 3 +#define V_XG2G_RESET_(x) ((x) << S_XG2G_RESET_) +#define F_XG2G_RESET_ V_XG2G_RESET_(1U) + +#define S_RGMII_RESET_ 2 +#define V_RGMII_RESET_(x) ((x) << S_RGMII_RESET_) +#define F_RGMII_RESET_ V_RGMII_RESET_(1U) + +#define S_PCS_RESET_ 1 +#define V_PCS_RESET_(x) ((x) << S_PCS_RESET_) +#define F_PCS_RESET_ V_PCS_RESET_(1U) + +#define S_MAC_RESET_ 0 +#define V_MAC_RESET_(x) ((x) << S_MAC_RESET_) +#define F_MAC_RESET_ V_MAC_RESET_(1U) + +#define A_XGM_PORT_CFG 0x8b8 + +#define S_CLKDIVRESET_ 3 +#define V_CLKDIVRESET_(x) ((x) << S_CLKDIVRESET_) +#define F_CLKDIVRESET_ V_CLKDIVRESET_(1U) + +#define S_PORTSPEED 1 +#define M_PORTSPEED 0x3 + +#define V_PORTSPEED(x) ((x) << S_PORTSPEED) + +#define S_ENRGMII 0 +#define V_ENRGMII(x) ((x) << S_ENRGMII) +#define F_ENRGMII V_ENRGMII(1U) + +#define A_XGM_INT_ENABLE 0x8d4 + +#define S_TXFIFO_PRTY_ERR 17 +#define M_TXFIFO_PRTY_ERR 0x7 + +#define V_TXFIFO_PRTY_ERR(x) ((x) << S_TXFIFO_PRTY_ERR) + +#define S_RXFIFO_PRTY_ERR 14 +#define M_RXFIFO_PRTY_ERR 0x7 + +#define V_RXFIFO_PRTY_ERR(x) ((x) << S_RXFIFO_PRTY_ERR) + +#define S_TXFIFO_UNDERRUN 13 +#define V_TXFIFO_UNDERRUN(x) ((x) << S_TXFIFO_UNDERRUN) +#define F_TXFIFO_UNDERRUN V_TXFIFO_UNDERRUN(1U) + +#define S_RXFIFO_OVERFLOW 12 +#define V_RXFIFO_OVERFLOW(x) ((x) << S_RXFIFO_OVERFLOW) +#define F_RXFIFO_OVERFLOW V_RXFIFO_OVERFLOW(1U) + +#define S_SERDES_LOS 4 +#define M_SERDES_LOS 0xf + +#define V_SERDES_LOS(x) ((x) << S_SERDES_LOS) + +#define S_XAUIPCSCTCERR 3 +#define V_XAUIPCSCTCERR(x) ((x) << S_XAUIPCSCTCERR) +#define F_XAUIPCSCTCERR V_XAUIPCSCTCERR(1U) + +#define S_XAUIPCSALIGNCHANGE 2 +#define V_XAUIPCSALIGNCHANGE(x) ((x) << S_XAUIPCSALIGNCHANGE) +#define F_XAUIPCSALIGNCHANGE V_XAUIPCSALIGNCHANGE(1U) + +#define S_XGM_INT 0 +#define V_XGM_INT(x) ((x) << S_XGM_INT) +#define F_XGM_INT V_XGM_INT(1U) + +#define A_XGM_INT_CAUSE 0x8d8 + +#define A_XGM_XAUI_ACT_CTRL 0x8dc + +#define S_TXACTENABLE 1 +#define V_TXACTENABLE(x) ((x) << S_TXACTENABLE) +#define F_TXACTENABLE V_TXACTENABLE(1U) + +#define A_XGM_SERDES_CTRL0 0x8e0 + +#define S_RESET3 23 +#define V_RESET3(x) ((x) << S_RESET3) +#define F_RESET3 V_RESET3(1U) + +#define S_RESET2 22 +#define V_RESET2(x) ((x) << S_RESET2) +#define F_RESET2 V_RESET2(1U) + +#define S_RESET1 21 +#define V_RESET1(x) ((x) << S_RESET1) +#define F_RESET1 V_RESET1(1U) + +#define S_RESET0 20 +#define V_RESET0(x) ((x) << S_RESET0) +#define F_RESET0 V_RESET0(1U) + +#define S_PWRDN3 19 +#define V_PWRDN3(x) ((x) << S_PWRDN3) +#define F_PWRDN3 V_PWRDN3(1U) + +#define S_PWRDN2 18 +#define V_PWRDN2(x) ((x) << S_PWRDN2) +#define F_PWRDN2 V_PWRDN2(1U) + +#define S_PWRDN1 17 +#define V_PWRDN1(x) ((x) << S_PWRDN1) +#define F_PWRDN1 V_PWRDN1(1U) + +#define S_PWRDN0 16 +#define V_PWRDN0(x) ((x) << S_PWRDN0) +#define F_PWRDN0 V_PWRDN0(1U) + +#define S_RESETPLL23 15 +#define V_RESETPLL23(x) ((x) << S_RESETPLL23) +#define F_RESETPLL23 V_RESETPLL23(1U) + +#define S_RESETPLL01 14 +#define V_RESETPLL01(x) ((x) << S_RESETPLL01) +#define F_RESETPLL01 V_RESETPLL01(1U) + +#define A_XGM_SERDES_STAT0 0x8f0 +#define A_XGM_SERDES_STAT1 0x8f4 +#define A_XGM_SERDES_STAT2 0x8f8 + +#define S_LOWSIG0 0 +#define V_LOWSIG0(x) ((x) << S_LOWSIG0) +#define F_LOWSIG0 V_LOWSIG0(1U) + +#define A_XGM_SERDES_STAT3 0x8fc + +#define A_XGM_STAT_TX_BYTE_LOW 0x900 + +#define A_XGM_STAT_TX_BYTE_HIGH 0x904 + +#define A_XGM_STAT_TX_FRAME_LOW 0x908 + +#define A_XGM_STAT_TX_FRAME_HIGH 0x90c + +#define A_XGM_STAT_TX_BCAST 0x910 + +#define A_XGM_STAT_TX_MCAST 0x914 + +#define A_XGM_STAT_TX_PAUSE 0x918 + +#define A_XGM_STAT_TX_64B_FRAMES 0x91c + +#define A_XGM_STAT_TX_65_127B_FRAMES 0x920 + +#define A_XGM_STAT_TX_128_255B_FRAMES 0x924 + +#define A_XGM_STAT_TX_256_511B_FRAMES 0x928 + +#define A_XGM_STAT_TX_512_1023B_FRAMES 0x92c + +#define A_XGM_STAT_TX_1024_1518B_FRAMES 0x930 + +#define A_XGM_STAT_TX_1519_MAXB_FRAMES 0x934 + +#define A_XGM_STAT_TX_ERR_FRAMES 0x938 + +#define A_XGM_STAT_RX_BYTES_LOW 0x93c + +#define A_XGM_STAT_RX_BYTES_HIGH 0x940 + +#define A_XGM_STAT_RX_FRAMES_LOW 0x944 + +#define A_XGM_STAT_RX_FRAMES_HIGH 0x948 + +#define A_XGM_STAT_RX_BCAST_FRAMES 0x94c + +#define A_XGM_STAT_RX_MCAST_FRAMES 0x950 + +#define A_XGM_STAT_RX_PAUSE_FRAMES 0x954 + +#define A_XGM_STAT_RX_64B_FRAMES 0x958 + +#define A_XGM_STAT_RX_65_127B_FRAMES 0x95c + +#define A_XGM_STAT_RX_128_255B_FRAMES 0x960 + +#define A_XGM_STAT_RX_256_511B_FRAMES 0x964 + +#define A_XGM_STAT_RX_512_1023B_FRAMES 0x968 + +#define A_XGM_STAT_RX_1024_1518B_FRAMES 0x96c + +#define A_XGM_STAT_RX_1519_MAXB_FRAMES 0x970 + +#define A_XGM_STAT_RX_SHORT_FRAMES 0x974 + +#define A_XGM_STAT_RX_OVERSIZE_FRAMES 0x978 + +#define A_XGM_STAT_RX_JABBER_FRAMES 0x97c + +#define A_XGM_STAT_RX_CRC_ERR_FRAMES 0x980 + +#define A_XGM_STAT_RX_LENGTH_ERR_FRAMES 0x984 + +#define A_XGM_STAT_RX_SYM_CODE_ERR_FRAMES 0x988 + +#define A_XGM_SERDES_STATUS0 0x98c + +#define A_XGM_SERDES_STATUS1 0x990 + +#define S_CMULOCK 31 +#define V_CMULOCK(x) ((x) << S_CMULOCK) +#define F_CMULOCK V_CMULOCK(1U) + +#define A_XGM_RX_MAX_PKT_SIZE_ERR_CNT 0x9a4 + +#define A_XGM_TX_SPI4_SOP_EOP_CNT 0x9a8 + +#define S_TXSPI4SOPCNT 16 +#define M_TXSPI4SOPCNT 0xffff +#define V_TXSPI4SOPCNT(x) ((x) << S_TXSPI4SOPCNT) +#define G_TXSPI4SOPCNT(x) (((x) >> S_TXSPI4SOPCNT) & M_TXSPI4SOPCNT) + +#define A_XGM_RX_SPI4_SOP_EOP_CNT 0x9ac + +#define XGMAC0_1_BASE_ADDR 0xa00 diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c new file mode 100644 index 0000000..d6fa177 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -0,0 +1,3303 @@ +/* + * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "common.h" +#include "regs.h" +#include "sge_defs.h" +#include "t3_cpl.h" +#include "firmware_exports.h" +#include "cxgb3_offload.h" + +#define USE_GTS 0 + +#define SGE_RX_SM_BUF_SIZE 1536 + +#define SGE_RX_COPY_THRES 256 +#define SGE_RX_PULL_LEN 128 + +#define SGE_PG_RSVD SMP_CACHE_BYTES +/* + * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks. + * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs + * directly. + */ +#define FL0_PG_CHUNK_SIZE 2048 +#define FL0_PG_ORDER 0 +#define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER) +#define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192) +#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1) +#define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER) + +#define SGE_RX_DROP_THRES 16 +#define RX_RECLAIM_PERIOD (HZ/4) + +/* + * Max number of Rx buffers we replenish at a time. + */ +#define MAX_RX_REFILL 16U +/* + * Period of the Tx buffer reclaim timer. This timer does not need to run + * frequently as Tx buffers are usually reclaimed by new Tx packets. + */ +#define TX_RECLAIM_PERIOD (HZ / 4) +#define TX_RECLAIM_TIMER_CHUNK 64U +#define TX_RECLAIM_CHUNK 16U + +/* WR size in bytes */ +#define WR_LEN (WR_FLITS * 8) + +/* + * Types of Tx queues in each queue set. Order here matters, do not change. + */ +enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL }; + +/* Values for sge_txq.flags */ +enum { + TXQ_RUNNING = 1 << 0, /* fetch engine is running */ + TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */ +}; + +struct tx_desc { + __be64 flit[TX_DESC_FLITS]; +}; + +struct rx_desc { + __be32 addr_lo; + __be32 len_gen; + __be32 gen2; + __be32 addr_hi; +}; + +struct tx_sw_desc { /* SW state per Tx descriptor */ + struct sk_buff *skb; + u8 eop; /* set if last descriptor for packet */ + u8 addr_idx; /* buffer index of first SGL entry in descriptor */ + u8 fragidx; /* first page fragment associated with descriptor */ + s8 sflit; /* start flit of first SGL entry in descriptor */ +}; + +struct rx_sw_desc { /* SW state per Rx descriptor */ + union { + struct sk_buff *skb; + struct fl_pg_chunk pg_chunk; + }; + DEFINE_DMA_UNMAP_ADDR(dma_addr); +}; + +struct rsp_desc { /* response queue descriptor */ + struct rss_header rss_hdr; + __be32 flags; + __be32 len_cq; + u8 imm_data[47]; + u8 intr_gen; +}; + +/* + * Holds unmapping information for Tx packets that need deferred unmapping. + * This structure lives at skb->head and must be allocated by callers. + */ +struct deferred_unmap_info { + struct pci_dev *pdev; + dma_addr_t addr[MAX_SKB_FRAGS + 1]; +}; + +/* + * Maps a number of flits to the number of Tx descriptors that can hold them. + * The formula is + * + * desc = 1 + (flits - 2) / (WR_FLITS - 1). + * + * HW allows up to 4 descriptors to be combined into a WR. + */ +static u8 flit_desc_map[] = { + 0, +#if SGE_NUM_GENBITS == 1 + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 +#elif SGE_NUM_GENBITS == 2 + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, +#else +# error "SGE_NUM_GENBITS must be 1 or 2" +#endif +}; + +static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx) +{ + return container_of(q, struct sge_qset, fl[qidx]); +} + +static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q) +{ + return container_of(q, struct sge_qset, rspq); +} + +static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx) +{ + return container_of(q, struct sge_qset, txq[qidx]); +} + +/** + * refill_rspq - replenish an SGE response queue + * @adapter: the adapter + * @q: the response queue to replenish + * @credits: how many new responses to make available + * + * Replenishes a response queue by making the supplied number of responses + * available to HW. + */ +static inline void refill_rspq(struct adapter *adapter, + const struct sge_rspq *q, unsigned int credits) +{ + rmb(); + t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN, + V_RSPQ(q->cntxt_id) | V_CREDITS(credits)); +} + +/** + * need_skb_unmap - does the platform need unmapping of sk_buffs? + * + * Returns true if the platform needs sk_buff unmapping. The compiler + * optimizes away unnecessary code if this returns true. + */ +static inline int need_skb_unmap(void) +{ +#ifdef CONFIG_NEED_DMA_MAP_STATE + return 1; +#else + return 0; +#endif +} + +/** + * unmap_skb - unmap a packet main body and its page fragments + * @skb: the packet + * @q: the Tx queue containing Tx descriptors for the packet + * @cidx: index of Tx descriptor + * @pdev: the PCI device + * + * Unmap the main body of an sk_buff and its page fragments, if any. + * Because of the fairly complicated structure of our SGLs and the desire + * to conserve space for metadata, the information necessary to unmap an + * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx + * descriptors (the physical addresses of the various data buffers), and + * the SW descriptor state (assorted indices). The send functions + * initialize the indices for the first packet descriptor so we can unmap + * the buffers held in the first Tx descriptor here, and we have enough + * information at this point to set the state for the next Tx descriptor. + * + * Note that it is possible to clean up the first descriptor of a packet + * before the send routines have written the next descriptors, but this + * race does not cause any problem. We just end up writing the unmapping + * info for the descriptor first. + */ +static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q, + unsigned int cidx, struct pci_dev *pdev) +{ + const struct sg_ent *sgp; + struct tx_sw_desc *d = &q->sdesc[cidx]; + int nfrags, frag_idx, curflit, j = d->addr_idx; + + sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit]; + frag_idx = d->fragidx; + + if (frag_idx == 0 && skb_headlen(skb)) { + pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), + skb_headlen(skb), PCI_DMA_TODEVICE); + j = 1; + } + + curflit = d->sflit + 1 + j; + nfrags = skb_shinfo(skb)->nr_frags; + + while (frag_idx < nfrags && curflit < WR_FLITS) { + pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]), + skb_shinfo(skb)->frags[frag_idx].size, + PCI_DMA_TODEVICE); + j ^= 1; + if (j == 0) { + sgp++; + curflit++; + } + curflit++; + frag_idx++; + } + + if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */ + d = cidx + 1 == q->size ? q->sdesc : d + 1; + d->fragidx = frag_idx; + d->addr_idx = j; + d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */ + } +} + +/** + * free_tx_desc - reclaims Tx descriptors and their buffers + * @adapter: the adapter + * @q: the Tx queue to reclaim descriptors from + * @n: the number of descriptors to reclaim + * + * Reclaims Tx descriptors from an SGE Tx queue and frees the associated + * Tx buffers. Called with the Tx queue lock held. + */ +static void free_tx_desc(struct adapter *adapter, struct sge_txq *q, + unsigned int n) +{ + struct tx_sw_desc *d; + struct pci_dev *pdev = adapter->pdev; + unsigned int cidx = q->cidx; + + const int need_unmap = need_skb_unmap() && + q->cntxt_id >= FW_TUNNEL_SGEEC_START; + + d = &q->sdesc[cidx]; + while (n--) { + if (d->skb) { /* an SGL is present */ + if (need_unmap) + unmap_skb(d->skb, q, cidx, pdev); + if (d->eop) { + kfree_skb(d->skb); + d->skb = NULL; + } + } + ++d; + if (++cidx == q->size) { + cidx = 0; + d = q->sdesc; + } + } + q->cidx = cidx; +} + +/** + * reclaim_completed_tx - reclaims completed Tx descriptors + * @adapter: the adapter + * @q: the Tx queue to reclaim completed descriptors from + * @chunk: maximum number of descriptors to reclaim + * + * Reclaims Tx descriptors that the SGE has indicated it has processed, + * and frees the associated buffers if possible. Called with the Tx + * queue's lock held. + */ +static inline unsigned int reclaim_completed_tx(struct adapter *adapter, + struct sge_txq *q, + unsigned int chunk) +{ + unsigned int reclaim = q->processed - q->cleaned; + + reclaim = min(chunk, reclaim); + if (reclaim) { + free_tx_desc(adapter, q, reclaim); + q->cleaned += reclaim; + q->in_use -= reclaim; + } + return q->processed - q->cleaned; +} + +/** + * should_restart_tx - are there enough resources to restart a Tx queue? + * @q: the Tx queue + * + * Checks if there are enough descriptors to restart a suspended Tx queue. + */ +static inline int should_restart_tx(const struct sge_txq *q) +{ + unsigned int r = q->processed - q->cleaned; + + return q->in_use - r < (q->size >> 1); +} + +static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q, + struct rx_sw_desc *d) +{ + if (q->use_pages && d->pg_chunk.page) { + (*d->pg_chunk.p_cnt)--; + if (!*d->pg_chunk.p_cnt) + pci_unmap_page(pdev, + d->pg_chunk.mapping, + q->alloc_size, PCI_DMA_FROMDEVICE); + + put_page(d->pg_chunk.page); + d->pg_chunk.page = NULL; + } else { + pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr), + q->buf_size, PCI_DMA_FROMDEVICE); + kfree_skb(d->skb); + d->skb = NULL; + } +} + +/** + * free_rx_bufs - free the Rx buffers on an SGE free list + * @pdev: the PCI device associated with the adapter + * @rxq: the SGE free list to clean up + * + * Release the buffers on an SGE free-buffer Rx queue. HW fetching from + * this queue should be stopped before calling this function. + */ +static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q) +{ + unsigned int cidx = q->cidx; + + while (q->credits--) { + struct rx_sw_desc *d = &q->sdesc[cidx]; + + + clear_rx_desc(pdev, q, d); + if (++cidx == q->size) + cidx = 0; + } + + if (q->pg_chunk.page) { + __free_pages(q->pg_chunk.page, q->order); + q->pg_chunk.page = NULL; + } +} + +/** + * add_one_rx_buf - add a packet buffer to a free-buffer list + * @va: buffer start VA + * @len: the buffer length + * @d: the HW Rx descriptor to write + * @sd: the SW Rx descriptor to write + * @gen: the generation bit value + * @pdev: the PCI device associated with the adapter + * + * Add a buffer of the given length to the supplied HW and SW Rx + * descriptors. + */ +static inline int add_one_rx_buf(void *va, unsigned int len, + struct rx_desc *d, struct rx_sw_desc *sd, + unsigned int gen, struct pci_dev *pdev) +{ + dma_addr_t mapping; + + mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); + if (unlikely(pci_dma_mapping_error(pdev, mapping))) + return -ENOMEM; + + dma_unmap_addr_set(sd, dma_addr, mapping); + + d->addr_lo = cpu_to_be32(mapping); + d->addr_hi = cpu_to_be32((u64) mapping >> 32); + wmb(); + d->len_gen = cpu_to_be32(V_FLD_GEN1(gen)); + d->gen2 = cpu_to_be32(V_FLD_GEN2(gen)); + return 0; +} + +static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d, + unsigned int gen) +{ + d->addr_lo = cpu_to_be32(mapping); + d->addr_hi = cpu_to_be32((u64) mapping >> 32); + wmb(); + d->len_gen = cpu_to_be32(V_FLD_GEN1(gen)); + d->gen2 = cpu_to_be32(V_FLD_GEN2(gen)); + return 0; +} + +static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, + struct rx_sw_desc *sd, gfp_t gfp, + unsigned int order) +{ + if (!q->pg_chunk.page) { + dma_addr_t mapping; + + q->pg_chunk.page = alloc_pages(gfp, order); + if (unlikely(!q->pg_chunk.page)) + return -ENOMEM; + q->pg_chunk.va = page_address(q->pg_chunk.page); + q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) - + SGE_PG_RSVD; + q->pg_chunk.offset = 0; + mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, + 0, q->alloc_size, PCI_DMA_FROMDEVICE); + q->pg_chunk.mapping = mapping; + } + sd->pg_chunk = q->pg_chunk; + + prefetch(sd->pg_chunk.p_cnt); + + q->pg_chunk.offset += q->buf_size; + if (q->pg_chunk.offset == (PAGE_SIZE << order)) + q->pg_chunk.page = NULL; + else { + q->pg_chunk.va += q->buf_size; + get_page(q->pg_chunk.page); + } + + if (sd->pg_chunk.offset == 0) + *sd->pg_chunk.p_cnt = 1; + else + *sd->pg_chunk.p_cnt += 1; + + return 0; +} + +static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) +{ + if (q->pend_cred >= q->credits / 4) { + q->pend_cred = 0; + wmb(); + t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); + } +} + +/** + * refill_fl - refill an SGE free-buffer list + * @adapter: the adapter + * @q: the free-list to refill + * @n: the number of new buffers to allocate + * @gfp: the gfp flags for allocating new buffers + * + * (Re)populate an SGE free-buffer list with up to @n new packet buffers, + * allocated with the supplied gfp flags. The caller must assure that + * @n does not exceed the queue's capacity. + */ +static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) +{ + struct rx_sw_desc *sd = &q->sdesc[q->pidx]; + struct rx_desc *d = &q->desc[q->pidx]; + unsigned int count = 0; + + while (n--) { + dma_addr_t mapping; + int err; + + if (q->use_pages) { + if (unlikely(alloc_pg_chunk(adap, q, sd, gfp, + q->order))) { +nomem: q->alloc_failed++; + break; + } + mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset; + dma_unmap_addr_set(sd, dma_addr, mapping); + + add_one_rx_chunk(mapping, d, q->gen); + pci_dma_sync_single_for_device(adap->pdev, mapping, + q->buf_size - SGE_PG_RSVD, + PCI_DMA_FROMDEVICE); + } else { + void *buf_start; + + struct sk_buff *skb = alloc_skb(q->buf_size, gfp); + if (!skb) + goto nomem; + + sd->skb = skb; + buf_start = skb->data; + err = add_one_rx_buf(buf_start, q->buf_size, d, sd, + q->gen, adap->pdev); + if (unlikely(err)) { + clear_rx_desc(adap->pdev, q, sd); + break; + } + } + + d++; + sd++; + if (++q->pidx == q->size) { + q->pidx = 0; + q->gen ^= 1; + sd = q->sdesc; + d = q->desc; + } + count++; + } + + q->credits += count; + q->pend_cred += count; + ring_fl_db(adap, q); + + return count; +} + +static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) +{ + refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits), + GFP_ATOMIC | __GFP_COMP); +} + +/** + * recycle_rx_buf - recycle a receive buffer + * @adapter: the adapter + * @q: the SGE free list + * @idx: index of buffer to recycle + * + * Recycles the specified buffer on the given free list by adding it at + * the next available slot on the list. + */ +static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q, + unsigned int idx) +{ + struct rx_desc *from = &q->desc[idx]; + struct rx_desc *to = &q->desc[q->pidx]; + + q->sdesc[q->pidx] = q->sdesc[idx]; + to->addr_lo = from->addr_lo; /* already big endian */ + to->addr_hi = from->addr_hi; /* likewise */ + wmb(); + to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen)); + to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen)); + + if (++q->pidx == q->size) { + q->pidx = 0; + q->gen ^= 1; + } + + q->credits++; + q->pend_cred++; + ring_fl_db(adap, q); +} + +/** + * alloc_ring - allocate resources for an SGE descriptor ring + * @pdev: the PCI device + * @nelem: the number of descriptors + * @elem_size: the size of each descriptor + * @sw_size: the size of the SW state associated with each ring element + * @phys: the physical address of the allocated ring + * @metadata: address of the array holding the SW state for the ring + * + * Allocates resources for an SGE descriptor ring, such as Tx queues, + * free buffer lists, or response queues. Each SGE ring requires + * space for its HW descriptors plus, optionally, space for the SW state + * associated with each HW entry (the metadata). The function returns + * three values: the virtual address for the HW ring (the return value + * of the function), the physical address of the HW ring, and the address + * of the SW ring. + */ +static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size, + size_t sw_size, dma_addr_t * phys, void *metadata) +{ + size_t len = nelem * elem_size; + void *s = NULL; + void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL); + + if (!p) + return NULL; + if (sw_size && metadata) { + s = kcalloc(nelem, sw_size, GFP_KERNEL); + + if (!s) { + dma_free_coherent(&pdev->dev, len, p, *phys); + return NULL; + } + *(void **)metadata = s; + } + memset(p, 0, len); + return p; +} + +/** + * t3_reset_qset - reset a sge qset + * @q: the queue set + * + * Reset the qset structure. + * the NAPI structure is preserved in the event of + * the qset's reincarnation, for example during EEH recovery. + */ +static void t3_reset_qset(struct sge_qset *q) +{ + if (q->adap && + !(q->adap->flags & NAPI_INIT)) { + memset(q, 0, sizeof(*q)); + return; + } + + q->adap = NULL; + memset(&q->rspq, 0, sizeof(q->rspq)); + memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET); + memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); + q->txq_stopped = 0; + q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */ + q->rx_reclaim_timer.function = NULL; + q->nomem = 0; + napi_free_frags(&q->napi); +} + + +/** + * free_qset - free the resources of an SGE queue set + * @adapter: the adapter owning the queue set + * @q: the queue set + * + * Release the HW and SW resources associated with an SGE queue set, such + * as HW contexts, packet buffers, and descriptor rings. Traffic to the + * queue set must be quiesced prior to calling this. + */ +static void t3_free_qset(struct adapter *adapter, struct sge_qset *q) +{ + int i; + struct pci_dev *pdev = adapter->pdev; + + for (i = 0; i < SGE_RXQ_PER_SET; ++i) + if (q->fl[i].desc) { + spin_lock_irq(&adapter->sge.reg_lock); + t3_sge_disable_fl(adapter, q->fl[i].cntxt_id); + spin_unlock_irq(&adapter->sge.reg_lock); + free_rx_bufs(pdev, &q->fl[i]); + kfree(q->fl[i].sdesc); + dma_free_coherent(&pdev->dev, + q->fl[i].size * + sizeof(struct rx_desc), q->fl[i].desc, + q->fl[i].phys_addr); + } + + for (i = 0; i < SGE_TXQ_PER_SET; ++i) + if (q->txq[i].desc) { + spin_lock_irq(&adapter->sge.reg_lock); + t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0); + spin_unlock_irq(&adapter->sge.reg_lock); + if (q->txq[i].sdesc) { + free_tx_desc(adapter, &q->txq[i], + q->txq[i].in_use); + kfree(q->txq[i].sdesc); + } + dma_free_coherent(&pdev->dev, + q->txq[i].size * + sizeof(struct tx_desc), + q->txq[i].desc, q->txq[i].phys_addr); + __skb_queue_purge(&q->txq[i].sendq); + } + + if (q->rspq.desc) { + spin_lock_irq(&adapter->sge.reg_lock); + t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id); + spin_unlock_irq(&adapter->sge.reg_lock); + dma_free_coherent(&pdev->dev, + q->rspq.size * sizeof(struct rsp_desc), + q->rspq.desc, q->rspq.phys_addr); + } + + t3_reset_qset(q); +} + +/** + * init_qset_cntxt - initialize an SGE queue set context info + * @qs: the queue set + * @id: the queue set id + * + * Initializes the TIDs and context ids for the queues of a queue set. + */ +static void init_qset_cntxt(struct sge_qset *qs, unsigned int id) +{ + qs->rspq.cntxt_id = id; + qs->fl[0].cntxt_id = 2 * id; + qs->fl[1].cntxt_id = 2 * id + 1; + qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id; + qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id; + qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id; + qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id; + qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id; +} + +/** + * sgl_len - calculates the size of an SGL of the given capacity + * @n: the number of SGL entries + * + * Calculates the number of flits needed for a scatter/gather list that + * can hold the given number of entries. + */ +static inline unsigned int sgl_len(unsigned int n) +{ + /* alternatively: 3 * (n / 2) + 2 * (n & 1) */ + return (3 * n) / 2 + (n & 1); +} + +/** + * flits_to_desc - returns the num of Tx descriptors for the given flits + * @n: the number of flits + * + * Calculates the number of Tx descriptors needed for the supplied number + * of flits. + */ +static inline unsigned int flits_to_desc(unsigned int n) +{ + BUG_ON(n >= ARRAY_SIZE(flit_desc_map)); + return flit_desc_map[n]; +} + +/** + * get_packet - return the next ingress packet buffer from a free list + * @adap: the adapter that received the packet + * @fl: the SGE free list holding the packet + * @len: the packet length including any SGE padding + * @drop_thres: # of remaining buffers before we start dropping packets + * + * Get the next packet from a free list and complete setup of the + * sk_buff. If the packet is small we make a copy and recycle the + * original buffer, otherwise we use the original buffer itself. If a + * positive drop threshold is supplied packets are dropped and their + * buffers recycled if (a) the number of remaining buffers is under the + * threshold and the packet is too big to copy, or (b) the packet should + * be copied but there is no memory for the copy. + */ +static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl, + unsigned int len, unsigned int drop_thres) +{ + struct sk_buff *skb = NULL; + struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; + + prefetch(sd->skb->data); + fl->credits--; + + if (len <= SGE_RX_COPY_THRES) { + skb = alloc_skb(len, GFP_ATOMIC); + if (likely(skb != NULL)) { + __skb_put(skb, len); + pci_dma_sync_single_for_cpu(adap->pdev, + dma_unmap_addr(sd, dma_addr), len, + PCI_DMA_FROMDEVICE); + memcpy(skb->data, sd->skb->data, len); + pci_dma_sync_single_for_device(adap->pdev, + dma_unmap_addr(sd, dma_addr), len, + PCI_DMA_FROMDEVICE); + } else if (!drop_thres) + goto use_orig_buf; +recycle: + recycle_rx_buf(adap, fl, fl->cidx); + return skb; + } + + if (unlikely(fl->credits < drop_thres) && + refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1), + GFP_ATOMIC | __GFP_COMP) == 0) + goto recycle; + +use_orig_buf: + pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr), + fl->buf_size, PCI_DMA_FROMDEVICE); + skb = sd->skb; + skb_put(skb, len); + __refill_fl(adap, fl); + return skb; +} + +/** + * get_packet_pg - return the next ingress packet buffer from a free list + * @adap: the adapter that received the packet + * @fl: the SGE free list holding the packet + * @len: the packet length including any SGE padding + * @drop_thres: # of remaining buffers before we start dropping packets + * + * Get the next packet from a free list populated with page chunks. + * If the packet is small we make a copy and recycle the original buffer, + * otherwise we attach the original buffer as a page fragment to a fresh + * sk_buff. If a positive drop threshold is supplied packets are dropped + * and their buffers recycled if (a) the number of remaining buffers is + * under the threshold and the packet is too big to copy, or (b) there's + * no system memory. + * + * Note: this function is similar to @get_packet but deals with Rx buffers + * that are page chunks rather than sk_buffs. + */ +static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl, + struct sge_rspq *q, unsigned int len, + unsigned int drop_thres) +{ + struct sk_buff *newskb, *skb; + struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; + + dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr); + + newskb = skb = q->pg_skb; + if (!skb && (len <= SGE_RX_COPY_THRES)) { + newskb = alloc_skb(len, GFP_ATOMIC); + if (likely(newskb != NULL)) { + __skb_put(newskb, len); + pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, + PCI_DMA_FROMDEVICE); + memcpy(newskb->data, sd->pg_chunk.va, len); + pci_dma_sync_single_for_device(adap->pdev, dma_addr, + len, + PCI_DMA_FROMDEVICE); + } else if (!drop_thres) + return NULL; +recycle: + fl->credits--; + recycle_rx_buf(adap, fl, fl->cidx); + q->rx_recycle_buf++; + return newskb; + } + + if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres))) + goto recycle; + + prefetch(sd->pg_chunk.p_cnt); + + if (!skb) + newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC); + + if (unlikely(!newskb)) { + if (!drop_thres) + return NULL; + goto recycle; + } + + pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, + PCI_DMA_FROMDEVICE); + (*sd->pg_chunk.p_cnt)--; + if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page) + pci_unmap_page(adap->pdev, + sd->pg_chunk.mapping, + fl->alloc_size, + PCI_DMA_FROMDEVICE); + if (!skb) { + __skb_put(newskb, SGE_RX_PULL_LEN); + memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN); + skb_fill_page_desc(newskb, 0, sd->pg_chunk.page, + sd->pg_chunk.offset + SGE_RX_PULL_LEN, + len - SGE_RX_PULL_LEN); + newskb->len = len; + newskb->data_len = len - SGE_RX_PULL_LEN; + newskb->truesize += newskb->data_len; + } else { + skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags, + sd->pg_chunk.page, + sd->pg_chunk.offset, len); + newskb->len += len; + newskb->data_len += len; + newskb->truesize += len; + } + + fl->credits--; + /* + * We do not refill FLs here, we let the caller do it to overlap a + * prefetch. + */ + return newskb; +} + +/** + * get_imm_packet - return the next ingress packet buffer from a response + * @resp: the response descriptor containing the packet data + * + * Return a packet containing the immediate data of the given response. + */ +static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp) +{ + struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC); + + if (skb) { + __skb_put(skb, IMMED_PKT_SIZE); + skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE); + } + return skb; +} + +/** + * calc_tx_descs - calculate the number of Tx descriptors for a packet + * @skb: the packet + * + * Returns the number of Tx descriptors needed for the given Ethernet + * packet. Ethernet packets require addition of WR and CPL headers. + */ +static inline unsigned int calc_tx_descs(const struct sk_buff *skb) +{ + unsigned int flits; + + if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt)) + return 1; + + flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2; + if (skb_shinfo(skb)->gso_size) + flits++; + return flits_to_desc(flits); +} + +/** + * make_sgl - populate a scatter/gather list for a packet + * @skb: the packet + * @sgp: the SGL to populate + * @start: start address of skb main body data to include in the SGL + * @len: length of skb main body data to include in the SGL + * @pdev: the PCI device + * + * Generates a scatter/gather list for the buffers that make up a packet + * and returns the SGL size in 8-byte words. The caller must size the SGL + * appropriately. + */ +static inline unsigned int make_sgl(const struct sk_buff *skb, + struct sg_ent *sgp, unsigned char *start, + unsigned int len, struct pci_dev *pdev) +{ + dma_addr_t mapping; + unsigned int i, j = 0, nfrags; + + if (len) { + mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE); + sgp->len[0] = cpu_to_be32(len); + sgp->addr[0] = cpu_to_be64(mapping); + j = 1; + } + + nfrags = skb_shinfo(skb)->nr_frags; + for (i = 0; i < nfrags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + mapping = pci_map_page(pdev, frag->page, frag->page_offset, + frag->size, PCI_DMA_TODEVICE); + sgp->len[j] = cpu_to_be32(frag->size); + sgp->addr[j] = cpu_to_be64(mapping); + j ^= 1; + if (j == 0) + ++sgp; + } + if (j) + sgp->len[j] = 0; + return ((nfrags + (len != 0)) * 3) / 2 + j; +} + +/** + * check_ring_tx_db - check and potentially ring a Tx queue's doorbell + * @adap: the adapter + * @q: the Tx queue + * + * Ring the doorbel if a Tx queue is asleep. There is a natural race, + * where the HW is going to sleep just after we checked, however, + * then the interrupt handler will detect the outstanding TX packet + * and ring the doorbell for us. + * + * When GTS is disabled we unconditionally ring the doorbell. + */ +static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q) +{ +#if USE_GTS + clear_bit(TXQ_LAST_PKT_DB, &q->flags); + if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) { + set_bit(TXQ_LAST_PKT_DB, &q->flags); + t3_write_reg(adap, A_SG_KDOORBELL, + F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); + } +#else + wmb(); /* write descriptors before telling HW */ + t3_write_reg(adap, A_SG_KDOORBELL, + F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); +#endif +} + +static inline void wr_gen2(struct tx_desc *d, unsigned int gen) +{ +#if SGE_NUM_GENBITS == 2 + d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen); +#endif +} + +/** + * write_wr_hdr_sgl - write a WR header and, optionally, SGL + * @ndesc: number of Tx descriptors spanned by the SGL + * @skb: the packet corresponding to the WR + * @d: first Tx descriptor to be written + * @pidx: index of above descriptors + * @q: the SGE Tx queue + * @sgl: the SGL + * @flits: number of flits to the start of the SGL in the first descriptor + * @sgl_flits: the SGL size in flits + * @gen: the Tx descriptor generation + * @wr_hi: top 32 bits of WR header based on WR type (big endian) + * @wr_lo: low 32 bits of WR header based on WR type (big endian) + * + * Write a work request header and an associated SGL. If the SGL is + * small enough to fit into one Tx descriptor it has already been written + * and we just need to write the WR header. Otherwise we distribute the + * SGL across the number of descriptors it spans. + */ +static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb, + struct tx_desc *d, unsigned int pidx, + const struct sge_txq *q, + const struct sg_ent *sgl, + unsigned int flits, unsigned int sgl_flits, + unsigned int gen, __be32 wr_hi, + __be32 wr_lo) +{ + struct work_request_hdr *wrp = (struct work_request_hdr *)d; + struct tx_sw_desc *sd = &q->sdesc[pidx]; + + sd->skb = skb; + if (need_skb_unmap()) { + sd->fragidx = 0; + sd->addr_idx = 0; + sd->sflit = flits; + } + + if (likely(ndesc == 1)) { + sd->eop = 1; + wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) | + V_WR_SGLSFLT(flits)) | wr_hi; + wmb(); + wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) | + V_WR_GEN(gen)) | wr_lo; + wr_gen2(d, gen); + } else { + unsigned int ogen = gen; + const u64 *fp = (const u64 *)sgl; + struct work_request_hdr *wp = wrp; + + wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) | + V_WR_SGLSFLT(flits)) | wr_hi; + + while (sgl_flits) { + unsigned int avail = WR_FLITS - flits; + + if (avail > sgl_flits) + avail = sgl_flits; + memcpy(&d->flit[flits], fp, avail * sizeof(*fp)); + sgl_flits -= avail; + ndesc--; + if (!sgl_flits) + break; + + fp += avail; + d++; + sd->eop = 0; + sd++; + if (++pidx == q->size) { + pidx = 0; + gen ^= 1; + d = q->desc; + sd = q->sdesc; + } + + sd->skb = skb; + wrp = (struct work_request_hdr *)d; + wrp->wr_hi = htonl(V_WR_DATATYPE(1) | + V_WR_SGLSFLT(1)) | wr_hi; + wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS, + sgl_flits + 1)) | + V_WR_GEN(gen)) | wr_lo; + wr_gen2(d, gen); + flits = 1; + } + sd->eop = 1; + wrp->wr_hi |= htonl(F_WR_EOP); + wmb(); + wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo; + wr_gen2((struct tx_desc *)wp, ogen); + WARN_ON(ndesc != 0); + } +} + +/** + * write_tx_pkt_wr - write a TX_PKT work request + * @adap: the adapter + * @skb: the packet to send + * @pi: the egress interface + * @pidx: index of the first Tx descriptor to write + * @gen: the generation value to use + * @q: the Tx queue + * @ndesc: number of descriptors the packet will occupy + * @compl: the value of the COMPL bit to use + * + * Generate a TX_PKT work request to send the supplied packet. + */ +static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, + const struct port_info *pi, + unsigned int pidx, unsigned int gen, + struct sge_txq *q, unsigned int ndesc, + unsigned int compl) +{ + unsigned int flits, sgl_flits, cntrl, tso_info; + struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; + struct tx_desc *d = &q->desc[pidx]; + struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d; + + cpl->len = htonl(skb->len); + cntrl = V_TXPKT_INTF(pi->port_id); + + if (vlan_tx_tag_present(skb)) + cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb)); + + tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size); + if (tso_info) { + int eth_type; + struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl; + + d->flit[2] = 0; + cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO); + hdr->cntrl = htonl(cntrl); + eth_type = skb_network_offset(skb) == ETH_HLEN ? + CPL_ETH_II : CPL_ETH_II_VLAN; + tso_info |= V_LSO_ETH_TYPE(eth_type) | + V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) | + V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff); + hdr->lso_info = htonl(tso_info); + flits = 3; + } else { + cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT); + cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */ + cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL); + cpl->cntrl = htonl(cntrl); + + if (skb->len <= WR_LEN - sizeof(*cpl)) { + q->sdesc[pidx].skb = NULL; + if (!skb->data_len) + skb_copy_from_linear_data(skb, &d->flit[2], + skb->len); + else + skb_copy_bits(skb, 0, &d->flit[2], skb->len); + + flits = (skb->len + 7) / 8 + 2; + cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) | + V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) + | F_WR_SOP | F_WR_EOP | compl); + wmb(); + cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) | + V_WR_TID(q->token)); + wr_gen2(d, gen); + kfree_skb(skb); + return; + } + + flits = 2; + } + + sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; + sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev); + + write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, + htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl), + htonl(V_WR_TID(q->token))); +} + +static inline void t3_stop_tx_queue(struct netdev_queue *txq, + struct sge_qset *qs, struct sge_txq *q) +{ + netif_tx_stop_queue(txq); + set_bit(TXQ_ETH, &qs->txq_stopped); + q->stops++; +} + +/** + * eth_xmit - add a packet to the Ethernet Tx queue + * @skb: the packet + * @dev: the egress net device + * + * Add a packet to an SGE Tx queue. Runs with softirqs disabled. + */ +netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) +{ + int qidx; + unsigned int ndesc, pidx, credits, gen, compl; + const struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + struct netdev_queue *txq; + struct sge_qset *qs; + struct sge_txq *q; + + /* + * The chip min packet length is 9 octets but play safe and reject + * anything shorter than an Ethernet header. + */ + if (unlikely(skb->len < ETH_HLEN)) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + qidx = skb_get_queue_mapping(skb); + qs = &pi->qs[qidx]; + q = &qs->txq[TXQ_ETH]; + txq = netdev_get_tx_queue(dev, qidx); + + reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); + + credits = q->size - q->in_use; + ndesc = calc_tx_descs(skb); + + if (unlikely(credits < ndesc)) { + t3_stop_tx_queue(txq, qs, q); + dev_err(&adap->pdev->dev, + "%s: Tx ring %u full while queue awake!\n", + dev->name, q->cntxt_id & 7); + return NETDEV_TX_BUSY; + } + + q->in_use += ndesc; + if (unlikely(credits - ndesc < q->stop_thres)) { + t3_stop_tx_queue(txq, qs, q); + + if (should_restart_tx(q) && + test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) { + q->restarts++; + netif_tx_start_queue(txq); + } + } + + gen = q->gen; + q->unacked += ndesc; + compl = (q->unacked & 8) << (S_WR_COMPL - 3); + q->unacked &= 7; + pidx = q->pidx; + q->pidx += ndesc; + if (q->pidx >= q->size) { + q->pidx -= q->size; + q->gen ^= 1; + } + + /* update port statistics */ + if (skb->ip_summed == CHECKSUM_COMPLETE) + qs->port_stats[SGE_PSTAT_TX_CSUM]++; + if (skb_shinfo(skb)->gso_size) + qs->port_stats[SGE_PSTAT_TSO]++; + if (vlan_tx_tag_present(skb)) + qs->port_stats[SGE_PSTAT_VLANINS]++; + + /* + * We do not use Tx completion interrupts to free DMAd Tx packets. + * This is good for performance but means that we rely on new Tx + * packets arriving to run the destructors of completed packets, + * which open up space in their sockets' send queues. Sometimes + * we do not get such new packets causing Tx to stall. A single + * UDP transmitter is a good example of this situation. We have + * a clean up timer that periodically reclaims completed packets + * but it doesn't run often enough (nor do we want it to) to prevent + * lengthy stalls. A solution to this problem is to run the + * destructor early, after the packet is queued but before it's DMAd. + * A cons is that we lie to socket memory accounting, but the amount + * of extra memory is reasonable (limited by the number of Tx + * descriptors), the packets do actually get freed quickly by new + * packets almost always, and for protocols like TCP that wait for + * acks to really free up the data the extra memory is even less. + * On the positive side we run the destructors on the sending CPU + * rather than on a potentially different completing CPU, usually a + * good thing. We also run them without holding our Tx queue lock, + * unlike what reclaim_completed_tx() would otherwise do. + * + * Run the destructor before telling the DMA engine about the packet + * to make sure it doesn't complete and get freed prematurely. + */ + if (likely(!skb_shared(skb))) + skb_orphan(skb); + + write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl); + check_ring_tx_db(adap, q); + return NETDEV_TX_OK; +} + +/** + * write_imm - write a packet into a Tx descriptor as immediate data + * @d: the Tx descriptor to write + * @skb: the packet + * @len: the length of packet data to write as immediate data + * @gen: the generation bit value to write + * + * Writes a packet as immediate data into a Tx descriptor. The packet + * contains a work request at its beginning. We must write the packet + * carefully so the SGE doesn't read it accidentally before it's written + * in its entirety. + */ +static inline void write_imm(struct tx_desc *d, struct sk_buff *skb, + unsigned int len, unsigned int gen) +{ + struct work_request_hdr *from = (struct work_request_hdr *)skb->data; + struct work_request_hdr *to = (struct work_request_hdr *)d; + + if (likely(!skb->data_len)) + memcpy(&to[1], &from[1], len - sizeof(*from)); + else + skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from)); + + to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP | + V_WR_BCNTLFLT(len & 7)); + wmb(); + to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) | + V_WR_LEN((len + 7) / 8)); + wr_gen2(d, gen); + kfree_skb(skb); +} + +/** + * check_desc_avail - check descriptor availability on a send queue + * @adap: the adapter + * @q: the send queue + * @skb: the packet needing the descriptors + * @ndesc: the number of Tx descriptors needed + * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL) + * + * Checks if the requested number of Tx descriptors is available on an + * SGE send queue. If the queue is already suspended or not enough + * descriptors are available the packet is queued for later transmission. + * Must be called with the Tx queue locked. + * + * Returns 0 if enough descriptors are available, 1 if there aren't + * enough descriptors and the packet has been queued, and 2 if the caller + * needs to retry because there weren't enough descriptors at the + * beginning of the call but some freed up in the mean time. + */ +static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q, + struct sk_buff *skb, unsigned int ndesc, + unsigned int qid) +{ + if (unlikely(!skb_queue_empty(&q->sendq))) { + addq_exit:__skb_queue_tail(&q->sendq, skb); + return 1; + } + if (unlikely(q->size - q->in_use < ndesc)) { + struct sge_qset *qs = txq_to_qset(q, qid); + + set_bit(qid, &qs->txq_stopped); + smp_mb__after_clear_bit(); + + if (should_restart_tx(q) && + test_and_clear_bit(qid, &qs->txq_stopped)) + return 2; + + q->stops++; + goto addq_exit; + } + return 0; +} + +/** + * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs + * @q: the SGE control Tx queue + * + * This is a variant of reclaim_completed_tx() that is used for Tx queues + * that send only immediate data (presently just the control queues) and + * thus do not have any sk_buffs to release. + */ +static inline void reclaim_completed_tx_imm(struct sge_txq *q) +{ + unsigned int reclaim = q->processed - q->cleaned; + + q->in_use -= reclaim; + q->cleaned += reclaim; +} + +static inline int immediate(const struct sk_buff *skb) +{ + return skb->len <= WR_LEN; +} + +/** + * ctrl_xmit - send a packet through an SGE control Tx queue + * @adap: the adapter + * @q: the control queue + * @skb: the packet + * + * Send a packet through an SGE control Tx queue. Packets sent through + * a control queue must fit entirely as immediate data in a single Tx + * descriptor and have no page fragments. + */ +static int ctrl_xmit(struct adapter *adap, struct sge_txq *q, + struct sk_buff *skb) +{ + int ret; + struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data; + + if (unlikely(!immediate(skb))) { + WARN_ON(1); + dev_kfree_skb(skb); + return NET_XMIT_SUCCESS; + } + + wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP); + wrp->wr_lo = htonl(V_WR_TID(q->token)); + + spin_lock(&q->lock); + again:reclaim_completed_tx_imm(q); + + ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL); + if (unlikely(ret)) { + if (ret == 1) { + spin_unlock(&q->lock); + return NET_XMIT_CN; + } + goto again; + } + + write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); + + q->in_use++; + if (++q->pidx >= q->size) { + q->pidx = 0; + q->gen ^= 1; + } + spin_unlock(&q->lock); + wmb(); + t3_write_reg(adap, A_SG_KDOORBELL, + F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); + return NET_XMIT_SUCCESS; +} + +/** + * restart_ctrlq - restart a suspended control queue + * @qs: the queue set cotaining the control queue + * + * Resumes transmission on a suspended Tx control queue. + */ +static void restart_ctrlq(unsigned long data) +{ + struct sk_buff *skb; + struct sge_qset *qs = (struct sge_qset *)data; + struct sge_txq *q = &qs->txq[TXQ_CTRL]; + + spin_lock(&q->lock); + again:reclaim_completed_tx_imm(q); + + while (q->in_use < q->size && + (skb = __skb_dequeue(&q->sendq)) != NULL) { + + write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); + + if (++q->pidx >= q->size) { + q->pidx = 0; + q->gen ^= 1; + } + q->in_use++; + } + + if (!skb_queue_empty(&q->sendq)) { + set_bit(TXQ_CTRL, &qs->txq_stopped); + smp_mb__after_clear_bit(); + + if (should_restart_tx(q) && + test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) + goto again; + q->stops++; + } + + spin_unlock(&q->lock); + wmb(); + t3_write_reg(qs->adap, A_SG_KDOORBELL, + F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); +} + +/* + * Send a management message through control queue 0 + */ +int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb) +{ + int ret; + local_bh_disable(); + ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb); + local_bh_enable(); + + return ret; +} + +/** + * deferred_unmap_destructor - unmap a packet when it is freed + * @skb: the packet + * + * This is the packet destructor used for Tx packets that need to remain + * mapped until they are freed rather than until their Tx descriptors are + * freed. + */ +static void deferred_unmap_destructor(struct sk_buff *skb) +{ + int i; + const dma_addr_t *p; + const struct skb_shared_info *si; + const struct deferred_unmap_info *dui; + + dui = (struct deferred_unmap_info *)skb->head; + p = dui->addr; + + if (skb->tail - skb->transport_header) + pci_unmap_single(dui->pdev, *p++, + skb->tail - skb->transport_header, + PCI_DMA_TODEVICE); + + si = skb_shinfo(skb); + for (i = 0; i < si->nr_frags; i++) + pci_unmap_page(dui->pdev, *p++, si->frags[i].size, + PCI_DMA_TODEVICE); +} + +static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev, + const struct sg_ent *sgl, int sgl_flits) +{ + dma_addr_t *p; + struct deferred_unmap_info *dui; + + dui = (struct deferred_unmap_info *)skb->head; + dui->pdev = pdev; + for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) { + *p++ = be64_to_cpu(sgl->addr[0]); + *p++ = be64_to_cpu(sgl->addr[1]); + } + if (sgl_flits) + *p = be64_to_cpu(sgl->addr[0]); +} + +/** + * write_ofld_wr - write an offload work request + * @adap: the adapter + * @skb: the packet to send + * @q: the Tx queue + * @pidx: index of the first Tx descriptor to write + * @gen: the generation value to use + * @ndesc: number of descriptors the packet will occupy + * + * Write an offload work request to send the supplied packet. The packet + * data already carry the work request with most fields populated. + */ +static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, + struct sge_txq *q, unsigned int pidx, + unsigned int gen, unsigned int ndesc) +{ + unsigned int sgl_flits, flits; + struct work_request_hdr *from; + struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; + struct tx_desc *d = &q->desc[pidx]; + + if (immediate(skb)) { + q->sdesc[pidx].skb = NULL; + write_imm(d, skb, skb->len, gen); + return; + } + + /* Only TX_DATA builds SGLs */ + + from = (struct work_request_hdr *)skb->data; + memcpy(&d->flit[1], &from[1], + skb_transport_offset(skb) - sizeof(*from)); + + flits = skb_transport_offset(skb) / 8; + sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; + sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb), + skb->tail - skb->transport_header, + adap->pdev); + if (need_skb_unmap()) { + setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); + skb->destructor = deferred_unmap_destructor; + } + + write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, + gen, from->wr_hi, from->wr_lo); +} + +/** + * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet + * @skb: the packet + * + * Returns the number of Tx descriptors needed for the given offload + * packet. These packets are already fully constructed. + */ +static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb) +{ + unsigned int flits, cnt; + + if (skb->len <= WR_LEN) + return 1; /* packet fits as immediate data */ + + flits = skb_transport_offset(skb) / 8; /* headers */ + cnt = skb_shinfo(skb)->nr_frags; + if (skb->tail != skb->transport_header) + cnt++; + return flits_to_desc(flits + sgl_len(cnt)); +} + +/** + * ofld_xmit - send a packet through an offload queue + * @adap: the adapter + * @q: the Tx offload queue + * @skb: the packet + * + * Send an offload packet through an SGE offload queue. + */ +static int ofld_xmit(struct adapter *adap, struct sge_txq *q, + struct sk_buff *skb) +{ + int ret; + unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen; + + spin_lock(&q->lock); +again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); + + ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD); + if (unlikely(ret)) { + if (ret == 1) { + skb->priority = ndesc; /* save for restart */ + spin_unlock(&q->lock); + return NET_XMIT_CN; + } + goto again; + } + + gen = q->gen; + q->in_use += ndesc; + pidx = q->pidx; + q->pidx += ndesc; + if (q->pidx >= q->size) { + q->pidx -= q->size; + q->gen ^= 1; + } + spin_unlock(&q->lock); + + write_ofld_wr(adap, skb, q, pidx, gen, ndesc); + check_ring_tx_db(adap, q); + return NET_XMIT_SUCCESS; +} + +/** + * restart_offloadq - restart a suspended offload queue + * @qs: the queue set cotaining the offload queue + * + * Resumes transmission on a suspended Tx offload queue. + */ +static void restart_offloadq(unsigned long data) +{ + struct sk_buff *skb; + struct sge_qset *qs = (struct sge_qset *)data; + struct sge_txq *q = &qs->txq[TXQ_OFLD]; + const struct port_info *pi = netdev_priv(qs->netdev); + struct adapter *adap = pi->adapter; + + spin_lock(&q->lock); +again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); + + while ((skb = skb_peek(&q->sendq)) != NULL) { + unsigned int gen, pidx; + unsigned int ndesc = skb->priority; + + if (unlikely(q->size - q->in_use < ndesc)) { + set_bit(TXQ_OFLD, &qs->txq_stopped); + smp_mb__after_clear_bit(); + + if (should_restart_tx(q) && + test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) + goto again; + q->stops++; + break; + } + + gen = q->gen; + q->in_use += ndesc; + pidx = q->pidx; + q->pidx += ndesc; + if (q->pidx >= q->size) { + q->pidx -= q->size; + q->gen ^= 1; + } + __skb_unlink(skb, &q->sendq); + spin_unlock(&q->lock); + + write_ofld_wr(adap, skb, q, pidx, gen, ndesc); + spin_lock(&q->lock); + } + spin_unlock(&q->lock); + +#if USE_GTS + set_bit(TXQ_RUNNING, &q->flags); + set_bit(TXQ_LAST_PKT_DB, &q->flags); +#endif + wmb(); + t3_write_reg(adap, A_SG_KDOORBELL, + F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); +} + +/** + * queue_set - return the queue set a packet should use + * @skb: the packet + * + * Maps a packet to the SGE queue set it should use. The desired queue + * set is carried in bits 1-3 in the packet's priority. + */ +static inline int queue_set(const struct sk_buff *skb) +{ + return skb->priority >> 1; +} + +/** + * is_ctrl_pkt - return whether an offload packet is a control packet + * @skb: the packet + * + * Determines whether an offload packet should use an OFLD or a CTRL + * Tx queue. This is indicated by bit 0 in the packet's priority. + */ +static inline int is_ctrl_pkt(const struct sk_buff *skb) +{ + return skb->priority & 1; +} + +/** + * t3_offload_tx - send an offload packet + * @tdev: the offload device to send to + * @skb: the packet + * + * Sends an offload packet. We use the packet priority to select the + * appropriate Tx queue as follows: bit 0 indicates whether the packet + * should be sent as regular or control, bits 1-3 select the queue set. + */ +int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb) +{ + struct adapter *adap = tdev2adap(tdev); + struct sge_qset *qs = &adap->sge.qs[queue_set(skb)]; + + if (unlikely(is_ctrl_pkt(skb))) + return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb); + + return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb); +} + +/** + * offload_enqueue - add an offload packet to an SGE offload receive queue + * @q: the SGE response queue + * @skb: the packet + * + * Add a new offload packet to an SGE response queue's offload packet + * queue. If the packet is the first on the queue it schedules the RX + * softirq to process the queue. + */ +static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb) +{ + int was_empty = skb_queue_empty(&q->rx_queue); + + __skb_queue_tail(&q->rx_queue, skb); + + if (was_empty) { + struct sge_qset *qs = rspq_to_qset(q); + + napi_schedule(&qs->napi); + } +} + +/** + * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts + * @tdev: the offload device that will be receiving the packets + * @q: the SGE response queue that assembled the bundle + * @skbs: the partial bundle + * @n: the number of packets in the bundle + * + * Delivers a (partial) bundle of Rx offload packets to an offload device. + */ +static inline void deliver_partial_bundle(struct t3cdev *tdev, + struct sge_rspq *q, + struct sk_buff *skbs[], int n) +{ + if (n) { + q->offload_bundles++; + tdev->recv(tdev, skbs, n); + } +} + +/** + * ofld_poll - NAPI handler for offload packets in interrupt mode + * @dev: the network device doing the polling + * @budget: polling budget + * + * The NAPI handler for offload packets when a response queue is serviced + * by the hard interrupt handler, i.e., when it's operating in non-polling + * mode. Creates small packet batches and sends them through the offload + * receive handler. Batches need to be of modest size as we do prefetches + * on the packets in each. + */ +static int ofld_poll(struct napi_struct *napi, int budget) +{ + struct sge_qset *qs = container_of(napi, struct sge_qset, napi); + struct sge_rspq *q = &qs->rspq; + struct adapter *adapter = qs->adap; + int work_done = 0; + + while (work_done < budget) { + struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE]; + struct sk_buff_head queue; + int ngathered; + + spin_lock_irq(&q->lock); + __skb_queue_head_init(&queue); + skb_queue_splice_init(&q->rx_queue, &queue); + if (skb_queue_empty(&queue)) { + napi_complete(napi); + spin_unlock_irq(&q->lock); + return work_done; + } + spin_unlock_irq(&q->lock); + + ngathered = 0; + skb_queue_walk_safe(&queue, skb, tmp) { + if (work_done >= budget) + break; + work_done++; + + __skb_unlink(skb, &queue); + prefetch(skb->data); + skbs[ngathered] = skb; + if (++ngathered == RX_BUNDLE_SIZE) { + q->offload_bundles++; + adapter->tdev.recv(&adapter->tdev, skbs, + ngathered); + ngathered = 0; + } + } + if (!skb_queue_empty(&queue)) { + /* splice remaining packets back onto Rx queue */ + spin_lock_irq(&q->lock); + skb_queue_splice(&queue, &q->rx_queue); + spin_unlock_irq(&q->lock); + } + deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered); + } + + return work_done; +} + +/** + * rx_offload - process a received offload packet + * @tdev: the offload device receiving the packet + * @rq: the response queue that received the packet + * @skb: the packet + * @rx_gather: a gather list of packets if we are building a bundle + * @gather_idx: index of the next available slot in the bundle + * + * Process an ingress offload pakcet and add it to the offload ingress + * queue. Returns the index of the next available slot in the bundle. + */ +static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq, + struct sk_buff *skb, struct sk_buff *rx_gather[], + unsigned int gather_idx) +{ + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb_reset_transport_header(skb); + + if (rq->polling) { + rx_gather[gather_idx++] = skb; + if (gather_idx == RX_BUNDLE_SIZE) { + tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE); + gather_idx = 0; + rq->offload_bundles++; + } + } else + offload_enqueue(rq, skb); + + return gather_idx; +} + +/** + * restart_tx - check whether to restart suspended Tx queues + * @qs: the queue set to resume + * + * Restarts suspended Tx queues of an SGE queue set if they have enough + * free resources to resume operation. + */ +static void restart_tx(struct sge_qset *qs) +{ + if (test_bit(TXQ_ETH, &qs->txq_stopped) && + should_restart_tx(&qs->txq[TXQ_ETH]) && + test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) { + qs->txq[TXQ_ETH].restarts++; + if (netif_running(qs->netdev)) + netif_tx_wake_queue(qs->tx_q); + } + + if (test_bit(TXQ_OFLD, &qs->txq_stopped) && + should_restart_tx(&qs->txq[TXQ_OFLD]) && + test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) { + qs->txq[TXQ_OFLD].restarts++; + tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk); + } + if (test_bit(TXQ_CTRL, &qs->txq_stopped) && + should_restart_tx(&qs->txq[TXQ_CTRL]) && + test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) { + qs->txq[TXQ_CTRL].restarts++; + tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk); + } +} + +/** + * cxgb3_arp_process - process an ARP request probing a private IP address + * @adapter: the adapter + * @skb: the skbuff containing the ARP request + * + * Check if the ARP request is probing the private IP address + * dedicated to iSCSI, generate an ARP reply if so. + */ +static void cxgb3_arp_process(struct port_info *pi, struct sk_buff *skb) +{ + struct net_device *dev = skb->dev; + struct arphdr *arp; + unsigned char *arp_ptr; + unsigned char *sha; + __be32 sip, tip; + + if (!dev) + return; + + skb_reset_network_header(skb); + arp = arp_hdr(skb); + + if (arp->ar_op != htons(ARPOP_REQUEST)) + return; + + arp_ptr = (unsigned char *)(arp + 1); + sha = arp_ptr; + arp_ptr += dev->addr_len; + memcpy(&sip, arp_ptr, sizeof(sip)); + arp_ptr += sizeof(sip); + arp_ptr += dev->addr_len; + memcpy(&tip, arp_ptr, sizeof(tip)); + + if (tip != pi->iscsi_ipv4addr) + return; + + arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, + pi->iscsic.mac_addr, sha); + +} + +static inline int is_arp(struct sk_buff *skb) +{ + return skb->protocol == htons(ETH_P_ARP); +} + +static void cxgb3_process_iscsi_prov_pack(struct port_info *pi, + struct sk_buff *skb) +{ + if (is_arp(skb)) { + cxgb3_arp_process(pi, skb); + return; + } + + if (pi->iscsic.recv) + pi->iscsic.recv(pi, skb); + +} + +/** + * rx_eth - process an ingress ethernet packet + * @adap: the adapter + * @rq: the response queue that received the packet + * @skb: the packet + * @pad: amount of padding at the start of the buffer + * + * Process an ingress ethernet pakcet and deliver it to the stack. + * The padding is 2 if the packet was delivered in an Rx buffer and 0 + * if it was immediate data in a response. + */ +static void rx_eth(struct adapter *adap, struct sge_rspq *rq, + struct sk_buff *skb, int pad, int lro) +{ + struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad); + struct sge_qset *qs = rspq_to_qset(rq); + struct port_info *pi; + + skb_pull(skb, sizeof(*p) + pad); + skb->protocol = eth_type_trans(skb, adap->port[p->iff]); + pi = netdev_priv(skb->dev); + if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid && + p->csum == htons(0xffff) && !p->fragment) { + qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else + skb_checksum_none_assert(skb); + skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]); + + if (p->vlan_valid) { + qs->port_stats[SGE_PSTAT_VLANEX]++; + __vlan_hwaccel_put_tag(skb, ntohs(p->vlan)); + } + if (rq->polling) { + if (lro) + napi_gro_receive(&qs->napi, skb); + else { + if (unlikely(pi->iscsic.flags)) + cxgb3_process_iscsi_prov_pack(pi, skb); + netif_receive_skb(skb); + } + } else + netif_rx(skb); +} + +static inline int is_eth_tcp(u32 rss) +{ + return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE; +} + +/** + * lro_add_page - add a page chunk to an LRO session + * @adap: the adapter + * @qs: the associated queue set + * @fl: the free list containing the page chunk to add + * @len: packet length + * @complete: Indicates the last fragment of a frame + * + * Add a received packet contained in a page chunk to an existing LRO + * session. + */ +static void lro_add_page(struct adapter *adap, struct sge_qset *qs, + struct sge_fl *fl, int len, int complete) +{ + struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; + struct port_info *pi = netdev_priv(qs->netdev); + struct sk_buff *skb = NULL; + struct cpl_rx_pkt *cpl; + struct skb_frag_struct *rx_frag; + int nr_frags; + int offset = 0; + + if (!qs->nomem) { + skb = napi_get_frags(&qs->napi); + qs->nomem = !skb; + } + + fl->credits--; + + pci_dma_sync_single_for_cpu(adap->pdev, + dma_unmap_addr(sd, dma_addr), + fl->buf_size - SGE_PG_RSVD, + PCI_DMA_FROMDEVICE); + + (*sd->pg_chunk.p_cnt)--; + if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page) + pci_unmap_page(adap->pdev, + sd->pg_chunk.mapping, + fl->alloc_size, + PCI_DMA_FROMDEVICE); + + if (!skb) { + put_page(sd->pg_chunk.page); + if (complete) + qs->nomem = 0; + return; + } + + rx_frag = skb_shinfo(skb)->frags; + nr_frags = skb_shinfo(skb)->nr_frags; + + if (!nr_frags) { + offset = 2 + sizeof(struct cpl_rx_pkt); + cpl = qs->lro_va = sd->pg_chunk.va + 2; + + if ((qs->netdev->features & NETIF_F_RXCSUM) && + cpl->csum_valid && cpl->csum == htons(0xffff)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; + } else + skb->ip_summed = CHECKSUM_NONE; + } else + cpl = qs->lro_va; + + len -= offset; + + rx_frag += nr_frags; + rx_frag->page = sd->pg_chunk.page; + rx_frag->page_offset = sd->pg_chunk.offset + offset; + rx_frag->size = len; + + skb->len += len; + skb->data_len += len; + skb->truesize += len; + skb_shinfo(skb)->nr_frags++; + + if (!complete) + return; + + skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]); + + if (cpl->vlan_valid) + __vlan_hwaccel_put_tag(skb, ntohs(cpl->vlan)); + napi_gro_frags(&qs->napi); +} + +/** + * handle_rsp_cntrl_info - handles control information in a response + * @qs: the queue set corresponding to the response + * @flags: the response control flags + * + * Handles the control information of an SGE response, such as GTS + * indications and completion credits for the queue set's Tx queues. + * HW coalesces credits, we don't do any extra SW coalescing. + */ +static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags) +{ + unsigned int credits; + +#if USE_GTS + if (flags & F_RSPD_TXQ0_GTS) + clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags); +#endif + + credits = G_RSPD_TXQ0_CR(flags); + if (credits) + qs->txq[TXQ_ETH].processed += credits; + + credits = G_RSPD_TXQ2_CR(flags); + if (credits) + qs->txq[TXQ_CTRL].processed += credits; + +# if USE_GTS + if (flags & F_RSPD_TXQ1_GTS) + clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags); +# endif + credits = G_RSPD_TXQ1_CR(flags); + if (credits) + qs->txq[TXQ_OFLD].processed += credits; +} + +/** + * check_ring_db - check if we need to ring any doorbells + * @adapter: the adapter + * @qs: the queue set whose Tx queues are to be examined + * @sleeping: indicates which Tx queue sent GTS + * + * Checks if some of a queue set's Tx queues need to ring their doorbells + * to resume transmission after idling while they still have unprocessed + * descriptors. + */ +static void check_ring_db(struct adapter *adap, struct sge_qset *qs, + unsigned int sleeping) +{ + if (sleeping & F_RSPD_TXQ0_GTS) { + struct sge_txq *txq = &qs->txq[TXQ_ETH]; + + if (txq->cleaned + txq->in_use != txq->processed && + !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) { + set_bit(TXQ_RUNNING, &txq->flags); + t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | + V_EGRCNTX(txq->cntxt_id)); + } + } + + if (sleeping & F_RSPD_TXQ1_GTS) { + struct sge_txq *txq = &qs->txq[TXQ_OFLD]; + + if (txq->cleaned + txq->in_use != txq->processed && + !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) { + set_bit(TXQ_RUNNING, &txq->flags); + t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | + V_EGRCNTX(txq->cntxt_id)); + } + } +} + +/** + * is_new_response - check if a response is newly written + * @r: the response descriptor + * @q: the response queue + * + * Returns true if a response descriptor contains a yet unprocessed + * response. + */ +static inline int is_new_response(const struct rsp_desc *r, + const struct sge_rspq *q) +{ + return (r->intr_gen & F_RSPD_GEN2) == q->gen; +} + +static inline void clear_rspq_bufstate(struct sge_rspq * const q) +{ + q->pg_skb = NULL; + q->rx_recycle_buf = 0; +} + +#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS) +#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \ + V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \ + V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \ + V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR)) + +/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */ +#define NOMEM_INTR_DELAY 2500 + +/** + * process_responses - process responses from an SGE response queue + * @adap: the adapter + * @qs: the queue set to which the response queue belongs + * @budget: how many responses can be processed in this round + * + * Process responses from an SGE response queue up to the supplied budget. + * Responses include received packets as well as credits and other events + * for the queues that belong to the response queue's queue set. + * A negative budget is effectively unlimited. + * + * Additionally choose the interrupt holdoff time for the next interrupt + * on this queue. If the system is under memory shortage use a fairly + * long delay to help recovery. + */ +static int process_responses(struct adapter *adap, struct sge_qset *qs, + int budget) +{ + struct sge_rspq *q = &qs->rspq; + struct rsp_desc *r = &q->desc[q->cidx]; + int budget_left = budget; + unsigned int sleeping = 0; + struct sk_buff *offload_skbs[RX_BUNDLE_SIZE]; + int ngathered = 0; + + q->next_holdoff = q->holdoff_tmr; + + while (likely(budget_left && is_new_response(r, q))) { + int packet_complete, eth, ethpad = 2; + int lro = !!(qs->netdev->features & NETIF_F_GRO); + struct sk_buff *skb = NULL; + u32 len, flags; + __be32 rss_hi, rss_lo; + + rmb(); + eth = r->rss_hdr.opcode == CPL_RX_PKT; + rss_hi = *(const __be32 *)r; + rss_lo = r->rss_hdr.rss_hash_val; + flags = ntohl(r->flags); + + if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) { + skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC); + if (!skb) + goto no_mem; + + memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE); + skb->data[0] = CPL_ASYNC_NOTIF; + rss_hi = htonl(CPL_ASYNC_NOTIF << 24); + q->async_notif++; + } else if (flags & F_RSPD_IMM_DATA_VALID) { + skb = get_imm_packet(r); + if (unlikely(!skb)) { +no_mem: + q->next_holdoff = NOMEM_INTR_DELAY; + q->nomem++; + /* consume one credit since we tried */ + budget_left--; + break; + } + q->imm_data++; + ethpad = 0; + } else if ((len = ntohl(r->len_cq)) != 0) { + struct sge_fl *fl; + + lro &= eth && is_eth_tcp(rss_hi); + + fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; + if (fl->use_pages) { + void *addr = fl->sdesc[fl->cidx].pg_chunk.va; + + prefetch(addr); +#if L1_CACHE_BYTES < 128 + prefetch(addr + L1_CACHE_BYTES); +#endif + __refill_fl(adap, fl); + if (lro > 0) { + lro_add_page(adap, qs, fl, + G_RSPD_LEN(len), + flags & F_RSPD_EOP); + goto next_fl; + } + + skb = get_packet_pg(adap, fl, q, + G_RSPD_LEN(len), + eth ? + SGE_RX_DROP_THRES : 0); + q->pg_skb = skb; + } else + skb = get_packet(adap, fl, G_RSPD_LEN(len), + eth ? SGE_RX_DROP_THRES : 0); + if (unlikely(!skb)) { + if (!eth) + goto no_mem; + q->rx_drops++; + } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT)) + __skb_pull(skb, 2); +next_fl: + if (++fl->cidx == fl->size) + fl->cidx = 0; + } else + q->pure_rsps++; + + if (flags & RSPD_CTRL_MASK) { + sleeping |= flags & RSPD_GTS_MASK; + handle_rsp_cntrl_info(qs, flags); + } + + r++; + if (unlikely(++q->cidx == q->size)) { + q->cidx = 0; + q->gen ^= 1; + r = q->desc; + } + prefetch(r); + + if (++q->credits >= (q->size / 4)) { + refill_rspq(adap, q, q->credits); + q->credits = 0; + } + + packet_complete = flags & + (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID | + F_RSPD_ASYNC_NOTIF); + + if (skb != NULL && packet_complete) { + if (eth) + rx_eth(adap, q, skb, ethpad, lro); + else { + q->offload_pkts++; + /* Preserve the RSS info in csum & priority */ + skb->csum = rss_hi; + skb->priority = rss_lo; + ngathered = rx_offload(&adap->tdev, q, skb, + offload_skbs, + ngathered); + } + + if (flags & F_RSPD_EOP) + clear_rspq_bufstate(q); + } + --budget_left; + } + + deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); + + if (sleeping) + check_ring_db(adap, qs, sleeping); + + smp_mb(); /* commit Tx queue .processed updates */ + if (unlikely(qs->txq_stopped != 0)) + restart_tx(qs); + + budget -= budget_left; + return budget; +} + +static inline int is_pure_response(const struct rsp_desc *r) +{ + __be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID); + + return (n | r->len_cq) == 0; +} + +/** + * napi_rx_handler - the NAPI handler for Rx processing + * @napi: the napi instance + * @budget: how many packets we can process in this round + * + * Handler for new data events when using NAPI. + */ +static int napi_rx_handler(struct napi_struct *napi, int budget) +{ + struct sge_qset *qs = container_of(napi, struct sge_qset, napi); + struct adapter *adap = qs->adap; + int work_done = process_responses(adap, qs, budget); + + if (likely(work_done < budget)) { + napi_complete(napi); + + /* + * Because we don't atomically flush the following + * write it is possible that in very rare cases it can + * reach the device in a way that races with a new + * response being written plus an error interrupt + * causing the NAPI interrupt handler below to return + * unhandled status to the OS. To protect against + * this would require flushing the write and doing + * both the write and the flush with interrupts off. + * Way too expensive and unjustifiable given the + * rarity of the race. + * + * The race cannot happen at all with MSI-X. + */ + t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) | + V_NEWTIMER(qs->rspq.next_holdoff) | + V_NEWINDEX(qs->rspq.cidx)); + } + return work_done; +} + +/* + * Returns true if the device is already scheduled for polling. + */ +static inline int napi_is_scheduled(struct napi_struct *napi) +{ + return test_bit(NAPI_STATE_SCHED, &napi->state); +} + +/** + * process_pure_responses - process pure responses from a response queue + * @adap: the adapter + * @qs: the queue set owning the response queue + * @r: the first pure response to process + * + * A simpler version of process_responses() that handles only pure (i.e., + * non data-carrying) responses. Such respones are too light-weight to + * justify calling a softirq under NAPI, so we handle them specially in + * the interrupt handler. The function is called with a pointer to a + * response, which the caller must ensure is a valid pure response. + * + * Returns 1 if it encounters a valid data-carrying response, 0 otherwise. + */ +static int process_pure_responses(struct adapter *adap, struct sge_qset *qs, + struct rsp_desc *r) +{ + struct sge_rspq *q = &qs->rspq; + unsigned int sleeping = 0; + + do { + u32 flags = ntohl(r->flags); + + r++; + if (unlikely(++q->cidx == q->size)) { + q->cidx = 0; + q->gen ^= 1; + r = q->desc; + } + prefetch(r); + + if (flags & RSPD_CTRL_MASK) { + sleeping |= flags & RSPD_GTS_MASK; + handle_rsp_cntrl_info(qs, flags); + } + + q->pure_rsps++; + if (++q->credits >= (q->size / 4)) { + refill_rspq(adap, q, q->credits); + q->credits = 0; + } + if (!is_new_response(r, q)) + break; + rmb(); + } while (is_pure_response(r)); + + if (sleeping) + check_ring_db(adap, qs, sleeping); + + smp_mb(); /* commit Tx queue .processed updates */ + if (unlikely(qs->txq_stopped != 0)) + restart_tx(qs); + + return is_new_response(r, q); +} + +/** + * handle_responses - decide what to do with new responses in NAPI mode + * @adap: the adapter + * @q: the response queue + * + * This is used by the NAPI interrupt handlers to decide what to do with + * new SGE responses. If there are no new responses it returns -1. If + * there are new responses and they are pure (i.e., non-data carrying) + * it handles them straight in hard interrupt context as they are very + * cheap and don't deliver any packets. Finally, if there are any data + * signaling responses it schedules the NAPI handler. Returns 1 if it + * schedules NAPI, 0 if all new responses were pure. + * + * The caller must ascertain NAPI is not already running. + */ +static inline int handle_responses(struct adapter *adap, struct sge_rspq *q) +{ + struct sge_qset *qs = rspq_to_qset(q); + struct rsp_desc *r = &q->desc[q->cidx]; + + if (!is_new_response(r, q)) + return -1; + rmb(); + if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) { + t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | + V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx)); + return 0; + } + napi_schedule(&qs->napi); + return 1; +} + +/* + * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case + * (i.e., response queue serviced in hard interrupt). + */ +static irqreturn_t t3_sge_intr_msix(int irq, void *cookie) +{ + struct sge_qset *qs = cookie; + struct adapter *adap = qs->adap; + struct sge_rspq *q = &qs->rspq; + + spin_lock(&q->lock); + if (process_responses(adap, qs, -1) == 0) + q->unhandled_irqs++; + t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | + V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); + spin_unlock(&q->lock); + return IRQ_HANDLED; +} + +/* + * The MSI-X interrupt handler for an SGE response queue for the NAPI case + * (i.e., response queue serviced by NAPI polling). + */ +static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie) +{ + struct sge_qset *qs = cookie; + struct sge_rspq *q = &qs->rspq; + + spin_lock(&q->lock); + + if (handle_responses(qs->adap, q) < 0) + q->unhandled_irqs++; + spin_unlock(&q->lock); + return IRQ_HANDLED; +} + +/* + * The non-NAPI MSI interrupt handler. This needs to handle data events from + * SGE response queues as well as error and other async events as they all use + * the same MSI vector. We use one SGE response queue per port in this mode + * and protect all response queues with queue 0's lock. + */ +static irqreturn_t t3_intr_msi(int irq, void *cookie) +{ + int new_packets = 0; + struct adapter *adap = cookie; + struct sge_rspq *q = &adap->sge.qs[0].rspq; + + spin_lock(&q->lock); + + if (process_responses(adap, &adap->sge.qs[0], -1)) { + t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | + V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); + new_packets = 1; + } + + if (adap->params.nports == 2 && + process_responses(adap, &adap->sge.qs[1], -1)) { + struct sge_rspq *q1 = &adap->sge.qs[1].rspq; + + t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) | + V_NEWTIMER(q1->next_holdoff) | + V_NEWINDEX(q1->cidx)); + new_packets = 1; + } + + if (!new_packets && t3_slow_intr_handler(adap) == 0) + q->unhandled_irqs++; + + spin_unlock(&q->lock); + return IRQ_HANDLED; +} + +static int rspq_check_napi(struct sge_qset *qs) +{ + struct sge_rspq *q = &qs->rspq; + + if (!napi_is_scheduled(&qs->napi) && + is_new_response(&q->desc[q->cidx], q)) { + napi_schedule(&qs->napi); + return 1; + } + return 0; +} + +/* + * The MSI interrupt handler for the NAPI case (i.e., response queues serviced + * by NAPI polling). Handles data events from SGE response queues as well as + * error and other async events as they all use the same MSI vector. We use + * one SGE response queue per port in this mode and protect all response + * queues with queue 0's lock. + */ +static irqreturn_t t3_intr_msi_napi(int irq, void *cookie) +{ + int new_packets; + struct adapter *adap = cookie; + struct sge_rspq *q = &adap->sge.qs[0].rspq; + + spin_lock(&q->lock); + + new_packets = rspq_check_napi(&adap->sge.qs[0]); + if (adap->params.nports == 2) + new_packets += rspq_check_napi(&adap->sge.qs[1]); + if (!new_packets && t3_slow_intr_handler(adap) == 0) + q->unhandled_irqs++; + + spin_unlock(&q->lock); + return IRQ_HANDLED; +} + +/* + * A helper function that processes responses and issues GTS. + */ +static inline int process_responses_gts(struct adapter *adap, + struct sge_rspq *rq) +{ + int work; + + work = process_responses(adap, rspq_to_qset(rq), -1); + t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) | + V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx)); + return work; +} + +/* + * The legacy INTx interrupt handler. This needs to handle data events from + * SGE response queues as well as error and other async events as they all use + * the same interrupt pin. We use one SGE response queue per port in this mode + * and protect all response queues with queue 0's lock. + */ +static irqreturn_t t3_intr(int irq, void *cookie) +{ + int work_done, w0, w1; + struct adapter *adap = cookie; + struct sge_rspq *q0 = &adap->sge.qs[0].rspq; + struct sge_rspq *q1 = &adap->sge.qs[1].rspq; + + spin_lock(&q0->lock); + + w0 = is_new_response(&q0->desc[q0->cidx], q0); + w1 = adap->params.nports == 2 && + is_new_response(&q1->desc[q1->cidx], q1); + + if (likely(w0 | w1)) { + t3_write_reg(adap, A_PL_CLI, 0); + t3_read_reg(adap, A_PL_CLI); /* flush */ + + if (likely(w0)) + process_responses_gts(adap, q0); + + if (w1) + process_responses_gts(adap, q1); + + work_done = w0 | w1; + } else + work_done = t3_slow_intr_handler(adap); + + spin_unlock(&q0->lock); + return IRQ_RETVAL(work_done != 0); +} + +/* + * Interrupt handler for legacy INTx interrupts for T3B-based cards. + * Handles data events from SGE response queues as well as error and other + * async events as they all use the same interrupt pin. We use one SGE + * response queue per port in this mode and protect all response queues with + * queue 0's lock. + */ +static irqreturn_t t3b_intr(int irq, void *cookie) +{ + u32 map; + struct adapter *adap = cookie; + struct sge_rspq *q0 = &adap->sge.qs[0].rspq; + + t3_write_reg(adap, A_PL_CLI, 0); + map = t3_read_reg(adap, A_SG_DATA_INTR); + + if (unlikely(!map)) /* shared interrupt, most likely */ + return IRQ_NONE; + + spin_lock(&q0->lock); + + if (unlikely(map & F_ERRINTR)) + t3_slow_intr_handler(adap); + + if (likely(map & 1)) + process_responses_gts(adap, q0); + + if (map & 2) + process_responses_gts(adap, &adap->sge.qs[1].rspq); + + spin_unlock(&q0->lock); + return IRQ_HANDLED; +} + +/* + * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards. + * Handles data events from SGE response queues as well as error and other + * async events as they all use the same interrupt pin. We use one SGE + * response queue per port in this mode and protect all response queues with + * queue 0's lock. + */ +static irqreturn_t t3b_intr_napi(int irq, void *cookie) +{ + u32 map; + struct adapter *adap = cookie; + struct sge_qset *qs0 = &adap->sge.qs[0]; + struct sge_rspq *q0 = &qs0->rspq; + + t3_write_reg(adap, A_PL_CLI, 0); + map = t3_read_reg(adap, A_SG_DATA_INTR); + + if (unlikely(!map)) /* shared interrupt, most likely */ + return IRQ_NONE; + + spin_lock(&q0->lock); + + if (unlikely(map & F_ERRINTR)) + t3_slow_intr_handler(adap); + + if (likely(map & 1)) + napi_schedule(&qs0->napi); + + if (map & 2) + napi_schedule(&adap->sge.qs[1].napi); + + spin_unlock(&q0->lock); + return IRQ_HANDLED; +} + +/** + * t3_intr_handler - select the top-level interrupt handler + * @adap: the adapter + * @polling: whether using NAPI to service response queues + * + * Selects the top-level interrupt handler based on the type of interrupts + * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the + * response queues. + */ +irq_handler_t t3_intr_handler(struct adapter *adap, int polling) +{ + if (adap->flags & USING_MSIX) + return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix; + if (adap->flags & USING_MSI) + return polling ? t3_intr_msi_napi : t3_intr_msi; + if (adap->params.rev > 0) + return polling ? t3b_intr_napi : t3b_intr; + return t3_intr; +} + +#define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \ + F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \ + V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \ + F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \ + F_HIRCQPARITYERROR) +#define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR) +#define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \ + F_RSPQDISABLED) + +/** + * t3_sge_err_intr_handler - SGE async event interrupt handler + * @adapter: the adapter + * + * Interrupt handler for SGE asynchronous (non-data) events. + */ +void t3_sge_err_intr_handler(struct adapter *adapter) +{ + unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) & + ~F_FLEMPTY; + + if (status & SGE_PARERR) + CH_ALERT(adapter, "SGE parity error (0x%x)\n", + status & SGE_PARERR); + if (status & SGE_FRAMINGERR) + CH_ALERT(adapter, "SGE framing error (0x%x)\n", + status & SGE_FRAMINGERR); + + if (status & F_RSPQCREDITOVERFOW) + CH_ALERT(adapter, "SGE response queue credit overflow\n"); + + if (status & F_RSPQDISABLED) { + v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS); + + CH_ALERT(adapter, + "packet delivered to disabled response queue " + "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff); + } + + if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR)) + queue_work(cxgb3_wq, &adapter->db_drop_task); + + if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL)) + queue_work(cxgb3_wq, &adapter->db_full_task); + + if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY)) + queue_work(cxgb3_wq, &adapter->db_empty_task); + + t3_write_reg(adapter, A_SG_INT_CAUSE, status); + if (status & SGE_FATALERR) + t3_fatal_err(adapter); +} + +/** + * sge_timer_tx - perform periodic maintenance of an SGE qset + * @data: the SGE queue set to maintain + * + * Runs periodically from a timer to perform maintenance of an SGE queue + * set. It performs two tasks: + * + * Cleans up any completed Tx descriptors that may still be pending. + * Normal descriptor cleanup happens when new packets are added to a Tx + * queue so this timer is relatively infrequent and does any cleanup only + * if the Tx queue has not seen any new packets in a while. We make a + * best effort attempt to reclaim descriptors, in that we don't wait + * around if we cannot get a queue's lock (which most likely is because + * someone else is queueing new packets and so will also handle the clean + * up). Since control queues use immediate data exclusively we don't + * bother cleaning them up here. + * + */ +static void sge_timer_tx(unsigned long data) +{ + struct sge_qset *qs = (struct sge_qset *)data; + struct port_info *pi = netdev_priv(qs->netdev); + struct adapter *adap = pi->adapter; + unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0}; + unsigned long next_period; + + if (__netif_tx_trylock(qs->tx_q)) { + tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH], + TX_RECLAIM_TIMER_CHUNK); + __netif_tx_unlock(qs->tx_q); + } + + if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) { + tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD], + TX_RECLAIM_TIMER_CHUNK); + spin_unlock(&qs->txq[TXQ_OFLD].lock); + } + + next_period = TX_RECLAIM_PERIOD >> + (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) / + TX_RECLAIM_TIMER_CHUNK); + mod_timer(&qs->tx_reclaim_timer, jiffies + next_period); +} + +/* + * sge_timer_rx - perform periodic maintenance of an SGE qset + * @data: the SGE queue set to maintain + * + * a) Replenishes Rx queues that have run out due to memory shortage. + * Normally new Rx buffers are added when existing ones are consumed but + * when out of memory a queue can become empty. We try to add only a few + * buffers here, the queue will be replenished fully as these new buffers + * are used up if memory shortage has subsided. + * + * b) Return coalesced response queue credits in case a response queue is + * starved. + * + */ +static void sge_timer_rx(unsigned long data) +{ + spinlock_t *lock; + struct sge_qset *qs = (struct sge_qset *)data; + struct port_info *pi = netdev_priv(qs->netdev); + struct adapter *adap = pi->adapter; + u32 status; + + lock = adap->params.rev > 0 ? + &qs->rspq.lock : &adap->sge.qs[0].rspq.lock; + + if (!spin_trylock_irq(lock)) + goto out; + + if (napi_is_scheduled(&qs->napi)) + goto unlock; + + if (adap->params.rev < 4) { + status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS); + + if (status & (1 << qs->rspq.cntxt_id)) { + qs->rspq.starved++; + if (qs->rspq.credits) { + qs->rspq.credits--; + refill_rspq(adap, &qs->rspq, 1); + qs->rspq.restarted++; + t3_write_reg(adap, A_SG_RSPQ_FL_STATUS, + 1 << qs->rspq.cntxt_id); + } + } + } + + if (qs->fl[0].credits < qs->fl[0].size) + __refill_fl(adap, &qs->fl[0]); + if (qs->fl[1].credits < qs->fl[1].size) + __refill_fl(adap, &qs->fl[1]); + +unlock: + spin_unlock_irq(lock); +out: + mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD); +} + +/** + * t3_update_qset_coalesce - update coalescing settings for a queue set + * @qs: the SGE queue set + * @p: new queue set parameters + * + * Update the coalescing settings for an SGE queue set. Nothing is done + * if the queue set is not initialized yet. + */ +void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p) +{ + qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */ + qs->rspq.polling = p->polling; + qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll; +} + +/** + * t3_sge_alloc_qset - initialize an SGE queue set + * @adapter: the adapter + * @id: the queue set id + * @nports: how many Ethernet ports will be using this queue set + * @irq_vec_idx: the IRQ vector index for response queue interrupts + * @p: configuration parameters for this queue set + * @ntxq: number of Tx queues for the queue set + * @netdev: net device associated with this queue set + * @netdevq: net device TX queue associated with this queue set + * + * Allocate resources and initialize an SGE queue set. A queue set + * comprises a response queue, two Rx free-buffer queues, and up to 3 + * Tx queues. The Tx queues are assigned roles in the order Ethernet + * queue, offload queue, and control queue. + */ +int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, + int irq_vec_idx, const struct qset_params *p, + int ntxq, struct net_device *dev, + struct netdev_queue *netdevq) +{ + int i, avail, ret = -ENOMEM; + struct sge_qset *q = &adapter->sge.qs[id]; + + init_qset_cntxt(q, id); + setup_timer(&q->tx_reclaim_timer, sge_timer_tx, (unsigned long)q); + setup_timer(&q->rx_reclaim_timer, sge_timer_rx, (unsigned long)q); + + q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size, + sizeof(struct rx_desc), + sizeof(struct rx_sw_desc), + &q->fl[0].phys_addr, &q->fl[0].sdesc); + if (!q->fl[0].desc) + goto err; + + q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size, + sizeof(struct rx_desc), + sizeof(struct rx_sw_desc), + &q->fl[1].phys_addr, &q->fl[1].sdesc); + if (!q->fl[1].desc) + goto err; + + q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size, + sizeof(struct rsp_desc), 0, + &q->rspq.phys_addr, NULL); + if (!q->rspq.desc) + goto err; + + for (i = 0; i < ntxq; ++i) { + /* + * The control queue always uses immediate data so does not + * need to keep track of any sk_buffs. + */ + size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc); + + q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i], + sizeof(struct tx_desc), sz, + &q->txq[i].phys_addr, + &q->txq[i].sdesc); + if (!q->txq[i].desc) + goto err; + + q->txq[i].gen = 1; + q->txq[i].size = p->txq_size[i]; + spin_lock_init(&q->txq[i].lock); + skb_queue_head_init(&q->txq[i].sendq); + } + + tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq, + (unsigned long)q); + tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq, + (unsigned long)q); + + q->fl[0].gen = q->fl[1].gen = 1; + q->fl[0].size = p->fl_size; + q->fl[1].size = p->jumbo_size; + + q->rspq.gen = 1; + q->rspq.size = p->rspq_size; + spin_lock_init(&q->rspq.lock); + skb_queue_head_init(&q->rspq.rx_queue); + + q->txq[TXQ_ETH].stop_thres = nports * + flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3); + +#if FL0_PG_CHUNK_SIZE > 0 + q->fl[0].buf_size = FL0_PG_CHUNK_SIZE; +#else + q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data); +#endif +#if FL1_PG_CHUNK_SIZE > 0 + q->fl[1].buf_size = FL1_PG_CHUNK_SIZE; +#else + q->fl[1].buf_size = is_offload(adapter) ? + (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : + MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt); +#endif + + q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0; + q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0; + q->fl[0].order = FL0_PG_ORDER; + q->fl[1].order = FL1_PG_ORDER; + q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE; + q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE; + + spin_lock_irq(&adapter->sge.reg_lock); + + /* FL threshold comparison uses < */ + ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx, + q->rspq.phys_addr, q->rspq.size, + q->fl[0].buf_size - SGE_PG_RSVD, 1, 0); + if (ret) + goto err_unlock; + + for (i = 0; i < SGE_RXQ_PER_SET; ++i) { + ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0, + q->fl[i].phys_addr, q->fl[i].size, + q->fl[i].buf_size - SGE_PG_RSVD, + p->cong_thres, 1, 0); + if (ret) + goto err_unlock; + } + + ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS, + SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr, + q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token, + 1, 0); + if (ret) + goto err_unlock; + + if (ntxq > 1) { + ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id, + USE_GTS, SGE_CNTXT_OFLD, id, + q->txq[TXQ_OFLD].phys_addr, + q->txq[TXQ_OFLD].size, 0, 1, 0); + if (ret) + goto err_unlock; + } + + if (ntxq > 2) { + ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0, + SGE_CNTXT_CTRL, id, + q->txq[TXQ_CTRL].phys_addr, + q->txq[TXQ_CTRL].size, + q->txq[TXQ_CTRL].token, 1, 0); + if (ret) + goto err_unlock; + } + + spin_unlock_irq(&adapter->sge.reg_lock); + + q->adap = adapter; + q->netdev = dev; + q->tx_q = netdevq; + t3_update_qset_coalesce(q, p); + + avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, + GFP_KERNEL | __GFP_COMP); + if (!avail) { + CH_ALERT(adapter, "free list queue 0 initialization failed\n"); + goto err; + } + if (avail < q->fl[0].size) + CH_WARN(adapter, "free list queue 0 enabled with %d credits\n", + avail); + + avail = refill_fl(adapter, &q->fl[1], q->fl[1].size, + GFP_KERNEL | __GFP_COMP); + if (avail < q->fl[1].size) + CH_WARN(adapter, "free list queue 1 enabled with %d credits\n", + avail); + refill_rspq(adapter, &q->rspq, q->rspq.size - 1); + + t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) | + V_NEWTIMER(q->rspq.holdoff_tmr)); + + return 0; + +err_unlock: + spin_unlock_irq(&adapter->sge.reg_lock); +err: + t3_free_qset(adapter, q); + return ret; +} + +/** + * t3_start_sge_timers - start SGE timer call backs + * @adap: the adapter + * + * Starts each SGE queue set's timer call back + */ +void t3_start_sge_timers(struct adapter *adap) +{ + int i; + + for (i = 0; i < SGE_QSETS; ++i) { + struct sge_qset *q = &adap->sge.qs[i]; + + if (q->tx_reclaim_timer.function) + mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); + + if (q->rx_reclaim_timer.function) + mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD); + } +} + +/** + * t3_stop_sge_timers - stop SGE timer call backs + * @adap: the adapter + * + * Stops each SGE queue set's timer call back + */ +void t3_stop_sge_timers(struct adapter *adap) +{ + int i; + + for (i = 0; i < SGE_QSETS; ++i) { + struct sge_qset *q = &adap->sge.qs[i]; + + if (q->tx_reclaim_timer.function) + del_timer_sync(&q->tx_reclaim_timer); + if (q->rx_reclaim_timer.function) + del_timer_sync(&q->rx_reclaim_timer); + } +} + +/** + * t3_free_sge_resources - free SGE resources + * @adap: the adapter + * + * Frees resources used by the SGE queue sets. + */ +void t3_free_sge_resources(struct adapter *adap) +{ + int i; + + for (i = 0; i < SGE_QSETS; ++i) + t3_free_qset(adap, &adap->sge.qs[i]); +} + +/** + * t3_sge_start - enable SGE + * @adap: the adapter + * + * Enables the SGE for DMAs. This is the last step in starting packet + * transfers. + */ +void t3_sge_start(struct adapter *adap) +{ + t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE); +} + +/** + * t3_sge_stop - disable SGE operation + * @adap: the adapter + * + * Disables the DMA engine. This can be called in emeregencies (e.g., + * from error interrupts) or from normal process context. In the latter + * case it also disables any pending queue restart tasklets. Note that + * if it is called in interrupt context it cannot disable the restart + * tasklets as it cannot wait, however the tasklets will have no effect + * since the doorbells are disabled and the driver will call this again + * later from process context, at which time the tasklets will be stopped + * if they are still running. + */ +void t3_sge_stop(struct adapter *adap) +{ + t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0); + if (!in_interrupt()) { + int i; + + for (i = 0; i < SGE_QSETS; ++i) { + struct sge_qset *qs = &adap->sge.qs[i]; + + tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk); + tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk); + } + } +} + +/** + * t3_sge_init - initialize SGE + * @adap: the adapter + * @p: the SGE parameters + * + * Performs SGE initialization needed every time after a chip reset. + * We do not initialize any of the queue sets here, instead the driver + * top-level must request those individually. We also do not enable DMA + * here, that should be done after the queues have been set up. + */ +void t3_sge_init(struct adapter *adap, struct sge_params *p) +{ + unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12); + + ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL | + F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN | + V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS | + V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING; +#if SGE_NUM_GENBITS == 1 + ctrl |= F_EGRGENCTRL; +#endif + if (adap->params.rev > 0) { + if (!(adap->flags & (USING_MSIX | USING_MSI))) + ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ; + } + t3_write_reg(adap, A_SG_CONTROL, ctrl); + t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) | + V_LORCQDRBTHRSH(512)); + t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10); + t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) | + V_TIMEOUT(200 * core_ticks_per_usec(adap))); + t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, + adap->params.rev < T3_REV_C ? 1000 : 500); + t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256); + t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000); + t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256); + t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff)); + t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024); +} + +/** + * t3_sge_prep - one-time SGE initialization + * @adap: the associated adapter + * @p: SGE parameters + * + * Performs one-time initialization of SGE SW state. Includes determining + * defaults for the assorted SGE parameters, which admins can change until + * they are used to initialize the SGE. + */ +void t3_sge_prep(struct adapter *adap, struct sge_params *p) +{ + int i; + + p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) - + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + for (i = 0; i < SGE_QSETS; ++i) { + struct qset_params *q = p->qset + i; + + q->polling = adap->params.rev > 0; + q->coalesce_usecs = 5; + q->rspq_size = 1024; + q->fl_size = 1024; + q->jumbo_size = 512; + q->txq_size[TXQ_ETH] = 1024; + q->txq_size[TXQ_OFLD] = 1024; + q->txq_size[TXQ_CTRL] = 256; + q->cong_thres = 0; + } + + spin_lock_init(&adap->sge.reg_lock); +} diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge_defs.h b/drivers/net/ethernet/chelsio/cxgb3/sge_defs.h new file mode 100644 index 0000000..29b6c80 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/sge_defs.h @@ -0,0 +1,255 @@ +/* + * This file is automatically generated --- any changes will be lost. + */ + +#ifndef _SGE_DEFS_H +#define _SGE_DEFS_H + +#define S_EC_CREDITS 0 +#define M_EC_CREDITS 0x7FFF +#define V_EC_CREDITS(x) ((x) << S_EC_CREDITS) +#define G_EC_CREDITS(x) (((x) >> S_EC_CREDITS) & M_EC_CREDITS) + +#define S_EC_GTS 15 +#define V_EC_GTS(x) ((x) << S_EC_GTS) +#define F_EC_GTS V_EC_GTS(1U) + +#define S_EC_INDEX 16 +#define M_EC_INDEX 0xFFFF +#define V_EC_INDEX(x) ((x) << S_EC_INDEX) +#define G_EC_INDEX(x) (((x) >> S_EC_INDEX) & M_EC_INDEX) + +#define S_EC_SIZE 0 +#define M_EC_SIZE 0xFFFF +#define V_EC_SIZE(x) ((x) << S_EC_SIZE) +#define G_EC_SIZE(x) (((x) >> S_EC_SIZE) & M_EC_SIZE) + +#define S_EC_BASE_LO 16 +#define M_EC_BASE_LO 0xFFFF +#define V_EC_BASE_LO(x) ((x) << S_EC_BASE_LO) +#define G_EC_BASE_LO(x) (((x) >> S_EC_BASE_LO) & M_EC_BASE_LO) + +#define S_EC_BASE_HI 0 +#define M_EC_BASE_HI 0xF +#define V_EC_BASE_HI(x) ((x) << S_EC_BASE_HI) +#define G_EC_BASE_HI(x) (((x) >> S_EC_BASE_HI) & M_EC_BASE_HI) + +#define S_EC_RESPQ 4 +#define M_EC_RESPQ 0x7 +#define V_EC_RESPQ(x) ((x) << S_EC_RESPQ) +#define G_EC_RESPQ(x) (((x) >> S_EC_RESPQ) & M_EC_RESPQ) + +#define S_EC_TYPE 7 +#define M_EC_TYPE 0x7 +#define V_EC_TYPE(x) ((x) << S_EC_TYPE) +#define G_EC_TYPE(x) (((x) >> S_EC_TYPE) & M_EC_TYPE) + +#define S_EC_GEN 10 +#define V_EC_GEN(x) ((x) << S_EC_GEN) +#define F_EC_GEN V_EC_GEN(1U) + +#define S_EC_UP_TOKEN 11 +#define M_EC_UP_TOKEN 0xFFFFF +#define V_EC_UP_TOKEN(x) ((x) << S_EC_UP_TOKEN) +#define G_EC_UP_TOKEN(x) (((x) >> S_EC_UP_TOKEN) & M_EC_UP_TOKEN) + +#define S_EC_VALID 31 +#define V_EC_VALID(x) ((x) << S_EC_VALID) +#define F_EC_VALID V_EC_VALID(1U) + +#define S_RQ_MSI_VEC 20 +#define M_RQ_MSI_VEC 0x3F +#define V_RQ_MSI_VEC(x) ((x) << S_RQ_MSI_VEC) +#define G_RQ_MSI_VEC(x) (((x) >> S_RQ_MSI_VEC) & M_RQ_MSI_VEC) + +#define S_RQ_INTR_EN 26 +#define V_RQ_INTR_EN(x) ((x) << S_RQ_INTR_EN) +#define F_RQ_INTR_EN V_RQ_INTR_EN(1U) + +#define S_RQ_GEN 28 +#define V_RQ_GEN(x) ((x) << S_RQ_GEN) +#define F_RQ_GEN V_RQ_GEN(1U) + +#define S_CQ_INDEX 0 +#define M_CQ_INDEX 0xFFFF +#define V_CQ_INDEX(x) ((x) << S_CQ_INDEX) +#define G_CQ_INDEX(x) (((x) >> S_CQ_INDEX) & M_CQ_INDEX) + +#define S_CQ_SIZE 16 +#define M_CQ_SIZE 0xFFFF +#define V_CQ_SIZE(x) ((x) << S_CQ_SIZE) +#define G_CQ_SIZE(x) (((x) >> S_CQ_SIZE) & M_CQ_SIZE) + +#define S_CQ_BASE_HI 0 +#define M_CQ_BASE_HI 0xFFFFF +#define V_CQ_BASE_HI(x) ((x) << S_CQ_BASE_HI) +#define G_CQ_BASE_HI(x) (((x) >> S_CQ_BASE_HI) & M_CQ_BASE_HI) + +#define S_CQ_RSPQ 20 +#define M_CQ_RSPQ 0x3F +#define V_CQ_RSPQ(x) ((x) << S_CQ_RSPQ) +#define G_CQ_RSPQ(x) (((x) >> S_CQ_RSPQ) & M_CQ_RSPQ) + +#define S_CQ_ASYNC_NOTIF 26 +#define V_CQ_ASYNC_NOTIF(x) ((x) << S_CQ_ASYNC_NOTIF) +#define F_CQ_ASYNC_NOTIF V_CQ_ASYNC_NOTIF(1U) + +#define S_CQ_ARMED 27 +#define V_CQ_ARMED(x) ((x) << S_CQ_ARMED) +#define F_CQ_ARMED V_CQ_ARMED(1U) + +#define S_CQ_ASYNC_NOTIF_SOL 28 +#define V_CQ_ASYNC_NOTIF_SOL(x) ((x) << S_CQ_ASYNC_NOTIF_SOL) +#define F_CQ_ASYNC_NOTIF_SOL V_CQ_ASYNC_NOTIF_SOL(1U) + +#define S_CQ_GEN 29 +#define V_CQ_GEN(x) ((x) << S_CQ_GEN) +#define F_CQ_GEN V_CQ_GEN(1U) + +#define S_CQ_ERR 30 +#define V_CQ_ERR(x) ((x) << S_CQ_ERR) +#define F_CQ_ERR V_CQ_ERR(1U) + +#define S_CQ_OVERFLOW_MODE 31 +#define V_CQ_OVERFLOW_MODE(x) ((x) << S_CQ_OVERFLOW_MODE) +#define F_CQ_OVERFLOW_MODE V_CQ_OVERFLOW_MODE(1U) + +#define S_CQ_CREDITS 0 +#define M_CQ_CREDITS 0xFFFF +#define V_CQ_CREDITS(x) ((x) << S_CQ_CREDITS) +#define G_CQ_CREDITS(x) (((x) >> S_CQ_CREDITS) & M_CQ_CREDITS) + +#define S_CQ_CREDIT_THRES 16 +#define M_CQ_CREDIT_THRES 0x1FFF +#define V_CQ_CREDIT_THRES(x) ((x) << S_CQ_CREDIT_THRES) +#define G_CQ_CREDIT_THRES(x) (((x) >> S_CQ_CREDIT_THRES) & M_CQ_CREDIT_THRES) + +#define S_FL_BASE_HI 0 +#define M_FL_BASE_HI 0xFFFFF +#define V_FL_BASE_HI(x) ((x) << S_FL_BASE_HI) +#define G_FL_BASE_HI(x) (((x) >> S_FL_BASE_HI) & M_FL_BASE_HI) + +#define S_FL_INDEX_LO 20 +#define M_FL_INDEX_LO 0xFFF +#define V_FL_INDEX_LO(x) ((x) << S_FL_INDEX_LO) +#define G_FL_INDEX_LO(x) (((x) >> S_FL_INDEX_LO) & M_FL_INDEX_LO) + +#define S_FL_INDEX_HI 0 +#define M_FL_INDEX_HI 0xF +#define V_FL_INDEX_HI(x) ((x) << S_FL_INDEX_HI) +#define G_FL_INDEX_HI(x) (((x) >> S_FL_INDEX_HI) & M_FL_INDEX_HI) + +#define S_FL_SIZE 4 +#define M_FL_SIZE 0xFFFF +#define V_FL_SIZE(x) ((x) << S_FL_SIZE) +#define G_FL_SIZE(x) (((x) >> S_FL_SIZE) & M_FL_SIZE) + +#define S_FL_GEN 20 +#define V_FL_GEN(x) ((x) << S_FL_GEN) +#define F_FL_GEN V_FL_GEN(1U) + +#define S_FL_ENTRY_SIZE_LO 21 +#define M_FL_ENTRY_SIZE_LO 0x7FF +#define V_FL_ENTRY_SIZE_LO(x) ((x) << S_FL_ENTRY_SIZE_LO) +#define G_FL_ENTRY_SIZE_LO(x) (((x) >> S_FL_ENTRY_SIZE_LO) & M_FL_ENTRY_SIZE_LO) + +#define S_FL_ENTRY_SIZE_HI 0 +#define M_FL_ENTRY_SIZE_HI 0x1FFFFF +#define V_FL_ENTRY_SIZE_HI(x) ((x) << S_FL_ENTRY_SIZE_HI) +#define G_FL_ENTRY_SIZE_HI(x) (((x) >> S_FL_ENTRY_SIZE_HI) & M_FL_ENTRY_SIZE_HI) + +#define S_FL_CONG_THRES 21 +#define M_FL_CONG_THRES 0x3FF +#define V_FL_CONG_THRES(x) ((x) << S_FL_CONG_THRES) +#define G_FL_CONG_THRES(x) (((x) >> S_FL_CONG_THRES) & M_FL_CONG_THRES) + +#define S_FL_GTS 31 +#define V_FL_GTS(x) ((x) << S_FL_GTS) +#define F_FL_GTS V_FL_GTS(1U) + +#define S_FLD_GEN1 31 +#define V_FLD_GEN1(x) ((x) << S_FLD_GEN1) +#define F_FLD_GEN1 V_FLD_GEN1(1U) + +#define S_FLD_GEN2 0 +#define V_FLD_GEN2(x) ((x) << S_FLD_GEN2) +#define F_FLD_GEN2 V_FLD_GEN2(1U) + +#define S_RSPD_TXQ1_CR 0 +#define M_RSPD_TXQ1_CR 0x7F +#define V_RSPD_TXQ1_CR(x) ((x) << S_RSPD_TXQ1_CR) +#define G_RSPD_TXQ1_CR(x) (((x) >> S_RSPD_TXQ1_CR) & M_RSPD_TXQ1_CR) + +#define S_RSPD_TXQ1_GTS 7 +#define V_RSPD_TXQ1_GTS(x) ((x) << S_RSPD_TXQ1_GTS) +#define F_RSPD_TXQ1_GTS V_RSPD_TXQ1_GTS(1U) + +#define S_RSPD_TXQ2_CR 8 +#define M_RSPD_TXQ2_CR 0x7F +#define V_RSPD_TXQ2_CR(x) ((x) << S_RSPD_TXQ2_CR) +#define G_RSPD_TXQ2_CR(x) (((x) >> S_RSPD_TXQ2_CR) & M_RSPD_TXQ2_CR) + +#define S_RSPD_TXQ2_GTS 15 +#define V_RSPD_TXQ2_GTS(x) ((x) << S_RSPD_TXQ2_GTS) +#define F_RSPD_TXQ2_GTS V_RSPD_TXQ2_GTS(1U) + +#define S_RSPD_TXQ0_CR 16 +#define M_RSPD_TXQ0_CR 0x7F +#define V_RSPD_TXQ0_CR(x) ((x) << S_RSPD_TXQ0_CR) +#define G_RSPD_TXQ0_CR(x) (((x) >> S_RSPD_TXQ0_CR) & M_RSPD_TXQ0_CR) + +#define S_RSPD_TXQ0_GTS 23 +#define V_RSPD_TXQ0_GTS(x) ((x) << S_RSPD_TXQ0_GTS) +#define F_RSPD_TXQ0_GTS V_RSPD_TXQ0_GTS(1U) + +#define S_RSPD_EOP 24 +#define V_RSPD_EOP(x) ((x) << S_RSPD_EOP) +#define F_RSPD_EOP V_RSPD_EOP(1U) + +#define S_RSPD_SOP 25 +#define V_RSPD_SOP(x) ((x) << S_RSPD_SOP) +#define F_RSPD_SOP V_RSPD_SOP(1U) + +#define S_RSPD_ASYNC_NOTIF 26 +#define V_RSPD_ASYNC_NOTIF(x) ((x) << S_RSPD_ASYNC_NOTIF) +#define F_RSPD_ASYNC_NOTIF V_RSPD_ASYNC_NOTIF(1U) + +#define S_RSPD_FL0_GTS 27 +#define V_RSPD_FL0_GTS(x) ((x) << S_RSPD_FL0_GTS) +#define F_RSPD_FL0_GTS V_RSPD_FL0_GTS(1U) + +#define S_RSPD_FL1_GTS 28 +#define V_RSPD_FL1_GTS(x) ((x) << S_RSPD_FL1_GTS) +#define F_RSPD_FL1_GTS V_RSPD_FL1_GTS(1U) + +#define S_RSPD_IMM_DATA_VALID 29 +#define V_RSPD_IMM_DATA_VALID(x) ((x) << S_RSPD_IMM_DATA_VALID) +#define F_RSPD_IMM_DATA_VALID V_RSPD_IMM_DATA_VALID(1U) + +#define S_RSPD_OFFLOAD 30 +#define V_RSPD_OFFLOAD(x) ((x) << S_RSPD_OFFLOAD) +#define F_RSPD_OFFLOAD V_RSPD_OFFLOAD(1U) + +#define S_RSPD_GEN1 31 +#define V_RSPD_GEN1(x) ((x) << S_RSPD_GEN1) +#define F_RSPD_GEN1 V_RSPD_GEN1(1U) + +#define S_RSPD_LEN 0 +#define M_RSPD_LEN 0x7FFFFFFF +#define V_RSPD_LEN(x) ((x) << S_RSPD_LEN) +#define G_RSPD_LEN(x) (((x) >> S_RSPD_LEN) & M_RSPD_LEN) + +#define S_RSPD_FLQ 31 +#define V_RSPD_FLQ(x) ((x) << S_RSPD_FLQ) +#define F_RSPD_FLQ V_RSPD_FLQ(1U) + +#define S_RSPD_GEN2 0 +#define V_RSPD_GEN2(x) ((x) << S_RSPD_GEN2) +#define F_RSPD_GEN2 V_RSPD_GEN2(1U) + +#define S_RSPD_INR_VEC 1 +#define M_RSPD_INR_VEC 0x7F +#define V_RSPD_INR_VEC(x) ((x) << S_RSPD_INR_VEC) +#define G_RSPD_INR_VEC(x) (((x) >> S_RSPD_INR_VEC) & M_RSPD_INR_VEC) + +#endif /* _SGE_DEFS_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h b/drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h new file mode 100644 index 0000000..852c399 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h @@ -0,0 +1,1495 @@ +/* + * Copyright (c) 2004-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef T3_CPL_H +#define T3_CPL_H + +#if !defined(__LITTLE_ENDIAN_BITFIELD) && !defined(__BIG_ENDIAN_BITFIELD) +# include +#endif + +enum CPL_opcode { + CPL_PASS_OPEN_REQ = 0x1, + CPL_PASS_ACCEPT_RPL = 0x2, + CPL_ACT_OPEN_REQ = 0x3, + CPL_SET_TCB = 0x4, + CPL_SET_TCB_FIELD = 0x5, + CPL_GET_TCB = 0x6, + CPL_PCMD = 0x7, + CPL_CLOSE_CON_REQ = 0x8, + CPL_CLOSE_LISTSRV_REQ = 0x9, + CPL_ABORT_REQ = 0xA, + CPL_ABORT_RPL = 0xB, + CPL_TX_DATA = 0xC, + CPL_RX_DATA_ACK = 0xD, + CPL_TX_PKT = 0xE, + CPL_RTE_DELETE_REQ = 0xF, + CPL_RTE_WRITE_REQ = 0x10, + CPL_RTE_READ_REQ = 0x11, + CPL_L2T_WRITE_REQ = 0x12, + CPL_L2T_READ_REQ = 0x13, + CPL_SMT_WRITE_REQ = 0x14, + CPL_SMT_READ_REQ = 0x15, + CPL_TX_PKT_LSO = 0x16, + CPL_PCMD_READ = 0x17, + CPL_BARRIER = 0x18, + CPL_TID_RELEASE = 0x1A, + + CPL_CLOSE_LISTSRV_RPL = 0x20, + CPL_ERROR = 0x21, + CPL_GET_TCB_RPL = 0x22, + CPL_L2T_WRITE_RPL = 0x23, + CPL_PCMD_READ_RPL = 0x24, + CPL_PCMD_RPL = 0x25, + CPL_PEER_CLOSE = 0x26, + CPL_RTE_DELETE_RPL = 0x27, + CPL_RTE_WRITE_RPL = 0x28, + CPL_RX_DDP_COMPLETE = 0x29, + CPL_RX_PHYS_ADDR = 0x2A, + CPL_RX_PKT = 0x2B, + CPL_RX_URG_NOTIFY = 0x2C, + CPL_SET_TCB_RPL = 0x2D, + CPL_SMT_WRITE_RPL = 0x2E, + CPL_TX_DATA_ACK = 0x2F, + + CPL_ABORT_REQ_RSS = 0x30, + CPL_ABORT_RPL_RSS = 0x31, + CPL_CLOSE_CON_RPL = 0x32, + CPL_ISCSI_HDR = 0x33, + CPL_L2T_READ_RPL = 0x34, + CPL_RDMA_CQE = 0x35, + CPL_RDMA_CQE_READ_RSP = 0x36, + CPL_RDMA_CQE_ERR = 0x37, + CPL_RTE_READ_RPL = 0x38, + CPL_RX_DATA = 0x39, + + CPL_ACT_OPEN_RPL = 0x40, + CPL_PASS_OPEN_RPL = 0x41, + CPL_RX_DATA_DDP = 0x42, + CPL_SMT_READ_RPL = 0x43, + + CPL_ACT_ESTABLISH = 0x50, + CPL_PASS_ESTABLISH = 0x51, + + CPL_PASS_ACCEPT_REQ = 0x70, + + CPL_ASYNC_NOTIF = 0x80, /* fake opcode for async notifications */ + + CPL_TX_DMA_ACK = 0xA0, + CPL_RDMA_READ_REQ = 0xA1, + CPL_RDMA_TERMINATE = 0xA2, + CPL_TRACE_PKT = 0xA3, + CPL_RDMA_EC_STATUS = 0xA5, + + NUM_CPL_CMDS /* must be last and previous entries must be sorted */ +}; + +enum CPL_error { + CPL_ERR_NONE = 0, + CPL_ERR_TCAM_PARITY = 1, + CPL_ERR_TCAM_FULL = 3, + CPL_ERR_CONN_RESET = 20, + CPL_ERR_CONN_EXIST = 22, + CPL_ERR_ARP_MISS = 23, + CPL_ERR_BAD_SYN = 24, + CPL_ERR_CONN_TIMEDOUT = 30, + CPL_ERR_XMIT_TIMEDOUT = 31, + CPL_ERR_PERSIST_TIMEDOUT = 32, + CPL_ERR_FINWAIT2_TIMEDOUT = 33, + CPL_ERR_KEEPALIVE_TIMEDOUT = 34, + CPL_ERR_RTX_NEG_ADVICE = 35, + CPL_ERR_PERSIST_NEG_ADVICE = 36, + CPL_ERR_ABORT_FAILED = 42, + CPL_ERR_GENERAL = 99 +}; + +enum { + CPL_CONN_POLICY_AUTO = 0, + CPL_CONN_POLICY_ASK = 1, + CPL_CONN_POLICY_DENY = 3 +}; + +enum { + ULP_MODE_NONE = 0, + ULP_MODE_ISCSI = 2, + ULP_MODE_RDMA = 4, + ULP_MODE_TCPDDP = 5 +}; + +enum { + ULP_CRC_HEADER = 1 << 0, + ULP_CRC_DATA = 1 << 1 +}; + +enum { + CPL_PASS_OPEN_ACCEPT, + CPL_PASS_OPEN_REJECT +}; + +enum { + CPL_ABORT_SEND_RST = 0, + CPL_ABORT_NO_RST, + CPL_ABORT_POST_CLOSE_REQ = 2 +}; + +enum { /* TX_PKT_LSO ethernet types */ + CPL_ETH_II, + CPL_ETH_II_VLAN, + CPL_ETH_802_3, + CPL_ETH_802_3_VLAN +}; + +enum { /* TCP congestion control algorithms */ + CONG_ALG_RENO, + CONG_ALG_TAHOE, + CONG_ALG_NEWRENO, + CONG_ALG_HIGHSPEED +}; + +enum { /* RSS hash type */ + RSS_HASH_NONE = 0, + RSS_HASH_2_TUPLE = 1, + RSS_HASH_4_TUPLE = 2, + RSS_HASH_TCPV6 = 3 +}; + +union opcode_tid { + __be32 opcode_tid; + __u8 opcode; +}; + +#define S_OPCODE 24 +#define V_OPCODE(x) ((x) << S_OPCODE) +#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF) +#define G_TID(x) ((x) & 0xFFFFFF) + +#define S_QNUM 0 +#define G_QNUM(x) (((x) >> S_QNUM) & 0xFFFF) + +#define S_HASHTYPE 22 +#define M_HASHTYPE 0x3 +#define G_HASHTYPE(x) (((x) >> S_HASHTYPE) & M_HASHTYPE) + +/* tid is assumed to be 24-bits */ +#define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid)) + +#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid) + +/* extract the TID from a CPL command */ +#define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd)))) + +struct tcp_options { + __be16 mss; + __u8 wsf; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8:5; + __u8 ecn:1; + __u8 sack:1; + __u8 tstamp:1; +#else + __u8 tstamp:1; + __u8 sack:1; + __u8 ecn:1; + __u8:5; +#endif +}; + +struct rss_header { + __u8 opcode; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 cpu_idx:6; + __u8 hash_type:2; +#else + __u8 hash_type:2; + __u8 cpu_idx:6; +#endif + __be16 cq_idx; + __be32 rss_hash_val; +}; + +#ifndef CHELSIO_FW +struct work_request_hdr { + __be32 wr_hi; + __be32 wr_lo; +}; + +/* wr_hi fields */ +#define S_WR_SGE_CREDITS 0 +#define M_WR_SGE_CREDITS 0xFF +#define V_WR_SGE_CREDITS(x) ((x) << S_WR_SGE_CREDITS) +#define G_WR_SGE_CREDITS(x) (((x) >> S_WR_SGE_CREDITS) & M_WR_SGE_CREDITS) + +#define S_WR_SGLSFLT 8 +#define M_WR_SGLSFLT 0xFF +#define V_WR_SGLSFLT(x) ((x) << S_WR_SGLSFLT) +#define G_WR_SGLSFLT(x) (((x) >> S_WR_SGLSFLT) & M_WR_SGLSFLT) + +#define S_WR_BCNTLFLT 16 +#define M_WR_BCNTLFLT 0xF +#define V_WR_BCNTLFLT(x) ((x) << S_WR_BCNTLFLT) +#define G_WR_BCNTLFLT(x) (((x) >> S_WR_BCNTLFLT) & M_WR_BCNTLFLT) + +#define S_WR_DATATYPE 20 +#define V_WR_DATATYPE(x) ((x) << S_WR_DATATYPE) +#define F_WR_DATATYPE V_WR_DATATYPE(1U) + +#define S_WR_COMPL 21 +#define V_WR_COMPL(x) ((x) << S_WR_COMPL) +#define F_WR_COMPL V_WR_COMPL(1U) + +#define S_WR_EOP 22 +#define V_WR_EOP(x) ((x) << S_WR_EOP) +#define F_WR_EOP V_WR_EOP(1U) + +#define S_WR_SOP 23 +#define V_WR_SOP(x) ((x) << S_WR_SOP) +#define F_WR_SOP V_WR_SOP(1U) + +#define S_WR_OP 24 +#define M_WR_OP 0xFF +#define V_WR_OP(x) ((x) << S_WR_OP) +#define G_WR_OP(x) (((x) >> S_WR_OP) & M_WR_OP) + +/* wr_lo fields */ +#define S_WR_LEN 0 +#define M_WR_LEN 0xFF +#define V_WR_LEN(x) ((x) << S_WR_LEN) +#define G_WR_LEN(x) (((x) >> S_WR_LEN) & M_WR_LEN) + +#define S_WR_TID 8 +#define M_WR_TID 0xFFFFF +#define V_WR_TID(x) ((x) << S_WR_TID) +#define G_WR_TID(x) (((x) >> S_WR_TID) & M_WR_TID) + +#define S_WR_CR_FLUSH 30 +#define V_WR_CR_FLUSH(x) ((x) << S_WR_CR_FLUSH) +#define F_WR_CR_FLUSH V_WR_CR_FLUSH(1U) + +#define S_WR_GEN 31 +#define V_WR_GEN(x) ((x) << S_WR_GEN) +#define F_WR_GEN V_WR_GEN(1U) + +# define WR_HDR struct work_request_hdr wr +# define RSS_HDR +#else +# define WR_HDR +# define RSS_HDR struct rss_header rss_hdr; +#endif + +/* option 0 lower-half fields */ +#define S_CPL_STATUS 0 +#define M_CPL_STATUS 0xFF +#define V_CPL_STATUS(x) ((x) << S_CPL_STATUS) +#define G_CPL_STATUS(x) (((x) >> S_CPL_STATUS) & M_CPL_STATUS) + +#define S_INJECT_TIMER 6 +#define V_INJECT_TIMER(x) ((x) << S_INJECT_TIMER) +#define F_INJECT_TIMER V_INJECT_TIMER(1U) + +#define S_NO_OFFLOAD 7 +#define V_NO_OFFLOAD(x) ((x) << S_NO_OFFLOAD) +#define F_NO_OFFLOAD V_NO_OFFLOAD(1U) + +#define S_ULP_MODE 8 +#define M_ULP_MODE 0xF +#define V_ULP_MODE(x) ((x) << S_ULP_MODE) +#define G_ULP_MODE(x) (((x) >> S_ULP_MODE) & M_ULP_MODE) + +#define S_RCV_BUFSIZ 12 +#define M_RCV_BUFSIZ 0x3FFF +#define V_RCV_BUFSIZ(x) ((x) << S_RCV_BUFSIZ) +#define G_RCV_BUFSIZ(x) (((x) >> S_RCV_BUFSIZ) & M_RCV_BUFSIZ) + +#define S_TOS 26 +#define M_TOS 0x3F +#define V_TOS(x) ((x) << S_TOS) +#define G_TOS(x) (((x) >> S_TOS) & M_TOS) + +/* option 0 upper-half fields */ +#define S_DELACK 0 +#define V_DELACK(x) ((x) << S_DELACK) +#define F_DELACK V_DELACK(1U) + +#define S_NO_CONG 1 +#define V_NO_CONG(x) ((x) << S_NO_CONG) +#define F_NO_CONG V_NO_CONG(1U) + +#define S_SRC_MAC_SEL 2 +#define M_SRC_MAC_SEL 0x3 +#define V_SRC_MAC_SEL(x) ((x) << S_SRC_MAC_SEL) +#define G_SRC_MAC_SEL(x) (((x) >> S_SRC_MAC_SEL) & M_SRC_MAC_SEL) + +#define S_L2T_IDX 4 +#define M_L2T_IDX 0x7FF +#define V_L2T_IDX(x) ((x) << S_L2T_IDX) +#define G_L2T_IDX(x) (((x) >> S_L2T_IDX) & M_L2T_IDX) + +#define S_TX_CHANNEL 15 +#define V_TX_CHANNEL(x) ((x) << S_TX_CHANNEL) +#define F_TX_CHANNEL V_TX_CHANNEL(1U) + +#define S_TCAM_BYPASS 16 +#define V_TCAM_BYPASS(x) ((x) << S_TCAM_BYPASS) +#define F_TCAM_BYPASS V_TCAM_BYPASS(1U) + +#define S_NAGLE 17 +#define V_NAGLE(x) ((x) << S_NAGLE) +#define F_NAGLE V_NAGLE(1U) + +#define S_WND_SCALE 18 +#define M_WND_SCALE 0xF +#define V_WND_SCALE(x) ((x) << S_WND_SCALE) +#define G_WND_SCALE(x) (((x) >> S_WND_SCALE) & M_WND_SCALE) + +#define S_KEEP_ALIVE 22 +#define V_KEEP_ALIVE(x) ((x) << S_KEEP_ALIVE) +#define F_KEEP_ALIVE V_KEEP_ALIVE(1U) + +#define S_MAX_RETRANS 23 +#define M_MAX_RETRANS 0xF +#define V_MAX_RETRANS(x) ((x) << S_MAX_RETRANS) +#define G_MAX_RETRANS(x) (((x) >> S_MAX_RETRANS) & M_MAX_RETRANS) + +#define S_MAX_RETRANS_OVERRIDE 27 +#define V_MAX_RETRANS_OVERRIDE(x) ((x) << S_MAX_RETRANS_OVERRIDE) +#define F_MAX_RETRANS_OVERRIDE V_MAX_RETRANS_OVERRIDE(1U) + +#define S_MSS_IDX 28 +#define M_MSS_IDX 0xF +#define V_MSS_IDX(x) ((x) << S_MSS_IDX) +#define G_MSS_IDX(x) (((x) >> S_MSS_IDX) & M_MSS_IDX) + +/* option 1 fields */ +#define S_RSS_ENABLE 0 +#define V_RSS_ENABLE(x) ((x) << S_RSS_ENABLE) +#define F_RSS_ENABLE V_RSS_ENABLE(1U) + +#define S_RSS_MASK_LEN 1 +#define M_RSS_MASK_LEN 0x7 +#define V_RSS_MASK_LEN(x) ((x) << S_RSS_MASK_LEN) +#define G_RSS_MASK_LEN(x) (((x) >> S_RSS_MASK_LEN) & M_RSS_MASK_LEN) + +#define S_CPU_IDX 4 +#define M_CPU_IDX 0x3F +#define V_CPU_IDX(x) ((x) << S_CPU_IDX) +#define G_CPU_IDX(x) (((x) >> S_CPU_IDX) & M_CPU_IDX) + +#define S_MAC_MATCH_VALID 18 +#define V_MAC_MATCH_VALID(x) ((x) << S_MAC_MATCH_VALID) +#define F_MAC_MATCH_VALID V_MAC_MATCH_VALID(1U) + +#define S_CONN_POLICY 19 +#define M_CONN_POLICY 0x3 +#define V_CONN_POLICY(x) ((x) << S_CONN_POLICY) +#define G_CONN_POLICY(x) (((x) >> S_CONN_POLICY) & M_CONN_POLICY) + +#define S_SYN_DEFENSE 21 +#define V_SYN_DEFENSE(x) ((x) << S_SYN_DEFENSE) +#define F_SYN_DEFENSE V_SYN_DEFENSE(1U) + +#define S_VLAN_PRI 22 +#define M_VLAN_PRI 0x3 +#define V_VLAN_PRI(x) ((x) << S_VLAN_PRI) +#define G_VLAN_PRI(x) (((x) >> S_VLAN_PRI) & M_VLAN_PRI) + +#define S_VLAN_PRI_VALID 24 +#define V_VLAN_PRI_VALID(x) ((x) << S_VLAN_PRI_VALID) +#define F_VLAN_PRI_VALID V_VLAN_PRI_VALID(1U) + +#define S_PKT_TYPE 25 +#define M_PKT_TYPE 0x3 +#define V_PKT_TYPE(x) ((x) << S_PKT_TYPE) +#define G_PKT_TYPE(x) (((x) >> S_PKT_TYPE) & M_PKT_TYPE) + +#define S_MAC_MATCH 27 +#define M_MAC_MATCH 0x1F +#define V_MAC_MATCH(x) ((x) << S_MAC_MATCH) +#define G_MAC_MATCH(x) (((x) >> S_MAC_MATCH) & M_MAC_MATCH) + +/* option 2 fields */ +#define S_CPU_INDEX 0 +#define M_CPU_INDEX 0x7F +#define V_CPU_INDEX(x) ((x) << S_CPU_INDEX) +#define G_CPU_INDEX(x) (((x) >> S_CPU_INDEX) & M_CPU_INDEX) + +#define S_CPU_INDEX_VALID 7 +#define V_CPU_INDEX_VALID(x) ((x) << S_CPU_INDEX_VALID) +#define F_CPU_INDEX_VALID V_CPU_INDEX_VALID(1U) + +#define S_RX_COALESCE 8 +#define M_RX_COALESCE 0x3 +#define V_RX_COALESCE(x) ((x) << S_RX_COALESCE) +#define G_RX_COALESCE(x) (((x) >> S_RX_COALESCE) & M_RX_COALESCE) + +#define S_RX_COALESCE_VALID 10 +#define V_RX_COALESCE_VALID(x) ((x) << S_RX_COALESCE_VALID) +#define F_RX_COALESCE_VALID V_RX_COALESCE_VALID(1U) + +#define S_CONG_CONTROL_FLAVOR 11 +#define M_CONG_CONTROL_FLAVOR 0x3 +#define V_CONG_CONTROL_FLAVOR(x) ((x) << S_CONG_CONTROL_FLAVOR) +#define G_CONG_CONTROL_FLAVOR(x) (((x) >> S_CONG_CONTROL_FLAVOR) & M_CONG_CONTROL_FLAVOR) + +#define S_PACING_FLAVOR 13 +#define M_PACING_FLAVOR 0x3 +#define V_PACING_FLAVOR(x) ((x) << S_PACING_FLAVOR) +#define G_PACING_FLAVOR(x) (((x) >> S_PACING_FLAVOR) & M_PACING_FLAVOR) + +#define S_FLAVORS_VALID 15 +#define V_FLAVORS_VALID(x) ((x) << S_FLAVORS_VALID) +#define F_FLAVORS_VALID V_FLAVORS_VALID(1U) + +#define S_RX_FC_DISABLE 16 +#define V_RX_FC_DISABLE(x) ((x) << S_RX_FC_DISABLE) +#define F_RX_FC_DISABLE V_RX_FC_DISABLE(1U) + +#define S_RX_FC_VALID 17 +#define V_RX_FC_VALID(x) ((x) << S_RX_FC_VALID) +#define F_RX_FC_VALID V_RX_FC_VALID(1U) + +struct cpl_pass_open_req { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be32 local_ip; + __be32 peer_ip; + __be32 opt0h; + __be32 opt0l; + __be32 peer_netmask; + __be32 opt1; +}; + +struct cpl_pass_open_rpl { + RSS_HDR union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be32 local_ip; + __be32 peer_ip; + __u8 resvd[7]; + __u8 status; +}; + +struct cpl_pass_establish { + RSS_HDR union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be32 local_ip; + __be32 peer_ip; + __be32 tos_tid; + __be16 l2t_idx; + __be16 tcp_opt; + __be32 snd_isn; + __be32 rcv_isn; +}; + +/* cpl_pass_establish.tos_tid fields */ +#define S_PASS_OPEN_TID 0 +#define M_PASS_OPEN_TID 0xFFFFFF +#define V_PASS_OPEN_TID(x) ((x) << S_PASS_OPEN_TID) +#define G_PASS_OPEN_TID(x) (((x) >> S_PASS_OPEN_TID) & M_PASS_OPEN_TID) + +#define S_PASS_OPEN_TOS 24 +#define M_PASS_OPEN_TOS 0xFF +#define V_PASS_OPEN_TOS(x) ((x) << S_PASS_OPEN_TOS) +#define G_PASS_OPEN_TOS(x) (((x) >> S_PASS_OPEN_TOS) & M_PASS_OPEN_TOS) + +/* cpl_pass_establish.l2t_idx fields */ +#define S_L2T_IDX16 5 +#define M_L2T_IDX16 0x7FF +#define V_L2T_IDX16(x) ((x) << S_L2T_IDX16) +#define G_L2T_IDX16(x) (((x) >> S_L2T_IDX16) & M_L2T_IDX16) + +/* cpl_pass_establish.tcp_opt fields (also applies act_open_establish) */ +#define G_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1) +#define G_TCPOPT_SACK(x) (((x) >> 6) & 1) +#define G_TCPOPT_TSTAMP(x) (((x) >> 7) & 1) +#define G_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf) +#define G_TCPOPT_MSS(x) (((x) >> 12) & 0xf) + +struct cpl_pass_accept_req { + RSS_HDR union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be32 local_ip; + __be32 peer_ip; + __be32 tos_tid; + struct tcp_options tcp_options; + __u8 dst_mac[6]; + __be16 vlan_tag; + __u8 src_mac[6]; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8:3; + __u8 addr_idx:3; + __u8 port_idx:1; + __u8 exact_match:1; +#else + __u8 exact_match:1; + __u8 port_idx:1; + __u8 addr_idx:3; + __u8:3; +#endif + __u8 rsvd; + __be32 rcv_isn; + __be32 rsvd2; +}; + +struct cpl_pass_accept_rpl { + WR_HDR; + union opcode_tid ot; + __be32 opt2; + __be32 rsvd; + __be32 peer_ip; + __be32 opt0h; + __be32 opt0l_status; +}; + +struct cpl_act_open_req { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be32 local_ip; + __be32 peer_ip; + __be32 opt0h; + __be32 opt0l; + __be32 params; + __be32 opt2; +}; + +/* cpl_act_open_req.params fields */ +#define S_AOPEN_VLAN_PRI 9 +#define M_AOPEN_VLAN_PRI 0x3 +#define V_AOPEN_VLAN_PRI(x) ((x) << S_AOPEN_VLAN_PRI) +#define G_AOPEN_VLAN_PRI(x) (((x) >> S_AOPEN_VLAN_PRI) & M_AOPEN_VLAN_PRI) + +#define S_AOPEN_VLAN_PRI_VALID 11 +#define V_AOPEN_VLAN_PRI_VALID(x) ((x) << S_AOPEN_VLAN_PRI_VALID) +#define F_AOPEN_VLAN_PRI_VALID V_AOPEN_VLAN_PRI_VALID(1U) + +#define S_AOPEN_PKT_TYPE 12 +#define M_AOPEN_PKT_TYPE 0x3 +#define V_AOPEN_PKT_TYPE(x) ((x) << S_AOPEN_PKT_TYPE) +#define G_AOPEN_PKT_TYPE(x) (((x) >> S_AOPEN_PKT_TYPE) & M_AOPEN_PKT_TYPE) + +#define S_AOPEN_MAC_MATCH 14 +#define M_AOPEN_MAC_MATCH 0x1F +#define V_AOPEN_MAC_MATCH(x) ((x) << S_AOPEN_MAC_MATCH) +#define G_AOPEN_MAC_MATCH(x) (((x) >> S_AOPEN_MAC_MATCH) & M_AOPEN_MAC_MATCH) + +#define S_AOPEN_MAC_MATCH_VALID 19 +#define V_AOPEN_MAC_MATCH_VALID(x) ((x) << S_AOPEN_MAC_MATCH_VALID) +#define F_AOPEN_MAC_MATCH_VALID V_AOPEN_MAC_MATCH_VALID(1U) + +#define S_AOPEN_IFF_VLAN 20 +#define M_AOPEN_IFF_VLAN 0xFFF +#define V_AOPEN_IFF_VLAN(x) ((x) << S_AOPEN_IFF_VLAN) +#define G_AOPEN_IFF_VLAN(x) (((x) >> S_AOPEN_IFF_VLAN) & M_AOPEN_IFF_VLAN) + +struct cpl_act_open_rpl { + RSS_HDR union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be32 local_ip; + __be32 peer_ip; + __be32 atid; + __u8 rsvd[3]; + __u8 status; +}; + +struct cpl_act_establish { + RSS_HDR union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be32 local_ip; + __be32 peer_ip; + __be32 tos_tid; + __be16 l2t_idx; + __be16 tcp_opt; + __be32 snd_isn; + __be32 rcv_isn; +}; + +struct cpl_get_tcb { + WR_HDR; + union opcode_tid ot; + __be16 cpuno; + __be16 rsvd; +}; + +struct cpl_get_tcb_rpl { + RSS_HDR union opcode_tid ot; + __u8 rsvd; + __u8 status; + __be16 len; +}; + +struct cpl_set_tcb { + WR_HDR; + union opcode_tid ot; + __u8 reply; + __u8 cpu_idx; + __be16 len; +}; + +/* cpl_set_tcb.reply fields */ +#define S_NO_REPLY 7 +#define V_NO_REPLY(x) ((x) << S_NO_REPLY) +#define F_NO_REPLY V_NO_REPLY(1U) + +struct cpl_set_tcb_field { + WR_HDR; + union opcode_tid ot; + __u8 reply; + __u8 cpu_idx; + __be16 word; + __be64 mask; + __be64 val; +}; + +struct cpl_set_tcb_rpl { + RSS_HDR union opcode_tid ot; + __u8 rsvd[3]; + __u8 status; +}; + +struct cpl_pcmd { + WR_HDR; + union opcode_tid ot; + __u8 rsvd[3]; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 src:1; + __u8 bundle:1; + __u8 channel:1; + __u8:5; +#else + __u8:5; + __u8 channel:1; + __u8 bundle:1; + __u8 src:1; +#endif + __be32 pcmd_parm[2]; +}; + +struct cpl_pcmd_reply { + RSS_HDR union opcode_tid ot; + __u8 status; + __u8 rsvd; + __be16 len; +}; + +struct cpl_close_con_req { + WR_HDR; + union opcode_tid ot; + __be32 rsvd; +}; + +struct cpl_close_con_rpl { + RSS_HDR union opcode_tid ot; + __u8 rsvd[3]; + __u8 status; + __be32 snd_nxt; + __be32 rcv_nxt; +}; + +struct cpl_close_listserv_req { + WR_HDR; + union opcode_tid ot; + __u8 rsvd0; + __u8 cpu_idx; + __be16 rsvd1; +}; + +struct cpl_close_listserv_rpl { + RSS_HDR union opcode_tid ot; + __u8 rsvd[3]; + __u8 status; +}; + +struct cpl_abort_req_rss { + RSS_HDR union opcode_tid ot; + __be32 rsvd0; + __u8 rsvd1; + __u8 status; + __u8 rsvd2[6]; +}; + +struct cpl_abort_req { + WR_HDR; + union opcode_tid ot; + __be32 rsvd0; + __u8 rsvd1; + __u8 cmd; + __u8 rsvd2[6]; +}; + +struct cpl_abort_rpl_rss { + RSS_HDR union opcode_tid ot; + __be32 rsvd0; + __u8 rsvd1; + __u8 status; + __u8 rsvd2[6]; +}; + +struct cpl_abort_rpl { + WR_HDR; + union opcode_tid ot; + __be32 rsvd0; + __u8 rsvd1; + __u8 cmd; + __u8 rsvd2[6]; +}; + +struct cpl_peer_close { + RSS_HDR union opcode_tid ot; + __be32 rcv_nxt; +}; + +struct tx_data_wr { + __be32 wr_hi; + __be32 wr_lo; + __be32 len; + __be32 flags; + __be32 sndseq; + __be32 param; +}; + +/* tx_data_wr.flags fields */ +#define S_TX_ACK_PAGES 21 +#define M_TX_ACK_PAGES 0x7 +#define V_TX_ACK_PAGES(x) ((x) << S_TX_ACK_PAGES) +#define G_TX_ACK_PAGES(x) (((x) >> S_TX_ACK_PAGES) & M_TX_ACK_PAGES) + +/* tx_data_wr.param fields */ +#define S_TX_PORT 0 +#define M_TX_PORT 0x7 +#define V_TX_PORT(x) ((x) << S_TX_PORT) +#define G_TX_PORT(x) (((x) >> S_TX_PORT) & M_TX_PORT) + +#define S_TX_MSS 4 +#define M_TX_MSS 0xF +#define V_TX_MSS(x) ((x) << S_TX_MSS) +#define G_TX_MSS(x) (((x) >> S_TX_MSS) & M_TX_MSS) + +#define S_TX_QOS 8 +#define M_TX_QOS 0xFF +#define V_TX_QOS(x) ((x) << S_TX_QOS) +#define G_TX_QOS(x) (((x) >> S_TX_QOS) & M_TX_QOS) + +#define S_TX_SNDBUF 16 +#define M_TX_SNDBUF 0xFFFF +#define V_TX_SNDBUF(x) ((x) << S_TX_SNDBUF) +#define G_TX_SNDBUF(x) (((x) >> S_TX_SNDBUF) & M_TX_SNDBUF) + +struct cpl_tx_data { + union opcode_tid ot; + __be32 len; + __be32 rsvd; + __be16 urg; + __be16 flags; +}; + +/* cpl_tx_data.flags fields */ +#define S_TX_ULP_SUBMODE 6 +#define M_TX_ULP_SUBMODE 0xF +#define V_TX_ULP_SUBMODE(x) ((x) << S_TX_ULP_SUBMODE) +#define G_TX_ULP_SUBMODE(x) (((x) >> S_TX_ULP_SUBMODE) & M_TX_ULP_SUBMODE) + +#define S_TX_ULP_MODE 10 +#define M_TX_ULP_MODE 0xF +#define V_TX_ULP_MODE(x) ((x) << S_TX_ULP_MODE) +#define G_TX_ULP_MODE(x) (((x) >> S_TX_ULP_MODE) & M_TX_ULP_MODE) + +#define S_TX_SHOVE 14 +#define V_TX_SHOVE(x) ((x) << S_TX_SHOVE) +#define F_TX_SHOVE V_TX_SHOVE(1U) + +#define S_TX_MORE 15 +#define V_TX_MORE(x) ((x) << S_TX_MORE) +#define F_TX_MORE V_TX_MORE(1U) + +/* additional tx_data_wr.flags fields */ +#define S_TX_CPU_IDX 0 +#define M_TX_CPU_IDX 0x3F +#define V_TX_CPU_IDX(x) ((x) << S_TX_CPU_IDX) +#define G_TX_CPU_IDX(x) (((x) >> S_TX_CPU_IDX) & M_TX_CPU_IDX) + +#define S_TX_URG 16 +#define V_TX_URG(x) ((x) << S_TX_URG) +#define F_TX_URG V_TX_URG(1U) + +#define S_TX_CLOSE 17 +#define V_TX_CLOSE(x) ((x) << S_TX_CLOSE) +#define F_TX_CLOSE V_TX_CLOSE(1U) + +#define S_TX_INIT 18 +#define V_TX_INIT(x) ((x) << S_TX_INIT) +#define F_TX_INIT V_TX_INIT(1U) + +#define S_TX_IMM_ACK 19 +#define V_TX_IMM_ACK(x) ((x) << S_TX_IMM_ACK) +#define F_TX_IMM_ACK V_TX_IMM_ACK(1U) + +#define S_TX_IMM_DMA 20 +#define V_TX_IMM_DMA(x) ((x) << S_TX_IMM_DMA) +#define F_TX_IMM_DMA V_TX_IMM_DMA(1U) + +struct cpl_tx_data_ack { + RSS_HDR union opcode_tid ot; + __be32 ack_seq; +}; + +struct cpl_wr_ack { + RSS_HDR union opcode_tid ot; + __be16 credits; + __be16 rsvd; + __be32 snd_nxt; + __be32 snd_una; +}; + +struct cpl_rdma_ec_status { + RSS_HDR union opcode_tid ot; + __u8 rsvd[3]; + __u8 status; +}; + +struct mngt_pktsched_wr { + __be32 wr_hi; + __be32 wr_lo; + __u8 mngt_opcode; + __u8 rsvd[7]; + __u8 sched; + __u8 idx; + __u8 min; + __u8 max; + __u8 binding; + __u8 rsvd1[3]; +}; + +struct cpl_iscsi_hdr { + RSS_HDR union opcode_tid ot; + __be16 pdu_len_ddp; + __be16 len; + __be32 seq; + __be16 urg; + __u8 rsvd; + __u8 status; +}; + +/* cpl_iscsi_hdr.pdu_len_ddp fields */ +#define S_ISCSI_PDU_LEN 0 +#define M_ISCSI_PDU_LEN 0x7FFF +#define V_ISCSI_PDU_LEN(x) ((x) << S_ISCSI_PDU_LEN) +#define G_ISCSI_PDU_LEN(x) (((x) >> S_ISCSI_PDU_LEN) & M_ISCSI_PDU_LEN) + +#define S_ISCSI_DDP 15 +#define V_ISCSI_DDP(x) ((x) << S_ISCSI_DDP) +#define F_ISCSI_DDP V_ISCSI_DDP(1U) + +struct cpl_rx_data { + RSS_HDR union opcode_tid ot; + __be16 rsvd; + __be16 len; + __be32 seq; + __be16 urg; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 dack_mode:2; + __u8 psh:1; + __u8 heartbeat:1; + __u8:4; +#else + __u8:4; + __u8 heartbeat:1; + __u8 psh:1; + __u8 dack_mode:2; +#endif + __u8 status; +}; + +struct cpl_rx_data_ack { + WR_HDR; + union opcode_tid ot; + __be32 credit_dack; +}; + +/* cpl_rx_data_ack.ack_seq fields */ +#define S_RX_CREDITS 0 +#define M_RX_CREDITS 0x7FFFFFF +#define V_RX_CREDITS(x) ((x) << S_RX_CREDITS) +#define G_RX_CREDITS(x) (((x) >> S_RX_CREDITS) & M_RX_CREDITS) + +#define S_RX_MODULATE 27 +#define V_RX_MODULATE(x) ((x) << S_RX_MODULATE) +#define F_RX_MODULATE V_RX_MODULATE(1U) + +#define S_RX_FORCE_ACK 28 +#define V_RX_FORCE_ACK(x) ((x) << S_RX_FORCE_ACK) +#define F_RX_FORCE_ACK V_RX_FORCE_ACK(1U) + +#define S_RX_DACK_MODE 29 +#define M_RX_DACK_MODE 0x3 +#define V_RX_DACK_MODE(x) ((x) << S_RX_DACK_MODE) +#define G_RX_DACK_MODE(x) (((x) >> S_RX_DACK_MODE) & M_RX_DACK_MODE) + +#define S_RX_DACK_CHANGE 31 +#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE) +#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U) + +struct cpl_rx_urg_notify { + RSS_HDR union opcode_tid ot; + __be32 seq; +}; + +struct cpl_rx_ddp_complete { + RSS_HDR union opcode_tid ot; + __be32 ddp_report; +}; + +struct cpl_rx_data_ddp { + RSS_HDR union opcode_tid ot; + __be16 urg; + __be16 len; + __be32 seq; + union { + __be32 nxt_seq; + __be32 ddp_report; + }; + __be32 ulp_crc; + __be32 ddpvld_status; +}; + +/* cpl_rx_data_ddp.ddpvld_status fields */ +#define S_DDP_STATUS 0 +#define M_DDP_STATUS 0xFF +#define V_DDP_STATUS(x) ((x) << S_DDP_STATUS) +#define G_DDP_STATUS(x) (((x) >> S_DDP_STATUS) & M_DDP_STATUS) + +#define S_DDP_VALID 15 +#define M_DDP_VALID 0x1FFFF +#define V_DDP_VALID(x) ((x) << S_DDP_VALID) +#define G_DDP_VALID(x) (((x) >> S_DDP_VALID) & M_DDP_VALID) + +#define S_DDP_PPOD_MISMATCH 15 +#define V_DDP_PPOD_MISMATCH(x) ((x) << S_DDP_PPOD_MISMATCH) +#define F_DDP_PPOD_MISMATCH V_DDP_PPOD_MISMATCH(1U) + +#define S_DDP_PDU 16 +#define V_DDP_PDU(x) ((x) << S_DDP_PDU) +#define F_DDP_PDU V_DDP_PDU(1U) + +#define S_DDP_LLIMIT_ERR 17 +#define V_DDP_LLIMIT_ERR(x) ((x) << S_DDP_LLIMIT_ERR) +#define F_DDP_LLIMIT_ERR V_DDP_LLIMIT_ERR(1U) + +#define S_DDP_PPOD_PARITY_ERR 18 +#define V_DDP_PPOD_PARITY_ERR(x) ((x) << S_DDP_PPOD_PARITY_ERR) +#define F_DDP_PPOD_PARITY_ERR V_DDP_PPOD_PARITY_ERR(1U) + +#define S_DDP_PADDING_ERR 19 +#define V_DDP_PADDING_ERR(x) ((x) << S_DDP_PADDING_ERR) +#define F_DDP_PADDING_ERR V_DDP_PADDING_ERR(1U) + +#define S_DDP_HDRCRC_ERR 20 +#define V_DDP_HDRCRC_ERR(x) ((x) << S_DDP_HDRCRC_ERR) +#define F_DDP_HDRCRC_ERR V_DDP_HDRCRC_ERR(1U) + +#define S_DDP_DATACRC_ERR 21 +#define V_DDP_DATACRC_ERR(x) ((x) << S_DDP_DATACRC_ERR) +#define F_DDP_DATACRC_ERR V_DDP_DATACRC_ERR(1U) + +#define S_DDP_INVALID_TAG 22 +#define V_DDP_INVALID_TAG(x) ((x) << S_DDP_INVALID_TAG) +#define F_DDP_INVALID_TAG V_DDP_INVALID_TAG(1U) + +#define S_DDP_ULIMIT_ERR 23 +#define V_DDP_ULIMIT_ERR(x) ((x) << S_DDP_ULIMIT_ERR) +#define F_DDP_ULIMIT_ERR V_DDP_ULIMIT_ERR(1U) + +#define S_DDP_OFFSET_ERR 24 +#define V_DDP_OFFSET_ERR(x) ((x) << S_DDP_OFFSET_ERR) +#define F_DDP_OFFSET_ERR V_DDP_OFFSET_ERR(1U) + +#define S_DDP_COLOR_ERR 25 +#define V_DDP_COLOR_ERR(x) ((x) << S_DDP_COLOR_ERR) +#define F_DDP_COLOR_ERR V_DDP_COLOR_ERR(1U) + +#define S_DDP_TID_MISMATCH 26 +#define V_DDP_TID_MISMATCH(x) ((x) << S_DDP_TID_MISMATCH) +#define F_DDP_TID_MISMATCH V_DDP_TID_MISMATCH(1U) + +#define S_DDP_INVALID_PPOD 27 +#define V_DDP_INVALID_PPOD(x) ((x) << S_DDP_INVALID_PPOD) +#define F_DDP_INVALID_PPOD V_DDP_INVALID_PPOD(1U) + +#define S_DDP_ULP_MODE 28 +#define M_DDP_ULP_MODE 0xF +#define V_DDP_ULP_MODE(x) ((x) << S_DDP_ULP_MODE) +#define G_DDP_ULP_MODE(x) (((x) >> S_DDP_ULP_MODE) & M_DDP_ULP_MODE) + +/* cpl_rx_data_ddp.ddp_report fields */ +#define S_DDP_OFFSET 0 +#define M_DDP_OFFSET 0x3FFFFF +#define V_DDP_OFFSET(x) ((x) << S_DDP_OFFSET) +#define G_DDP_OFFSET(x) (((x) >> S_DDP_OFFSET) & M_DDP_OFFSET) + +#define S_DDP_URG 24 +#define V_DDP_URG(x) ((x) << S_DDP_URG) +#define F_DDP_URG V_DDP_URG(1U) + +#define S_DDP_PSH 25 +#define V_DDP_PSH(x) ((x) << S_DDP_PSH) +#define F_DDP_PSH V_DDP_PSH(1U) + +#define S_DDP_BUF_COMPLETE 26 +#define V_DDP_BUF_COMPLETE(x) ((x) << S_DDP_BUF_COMPLETE) +#define F_DDP_BUF_COMPLETE V_DDP_BUF_COMPLETE(1U) + +#define S_DDP_BUF_TIMED_OUT 27 +#define V_DDP_BUF_TIMED_OUT(x) ((x) << S_DDP_BUF_TIMED_OUT) +#define F_DDP_BUF_TIMED_OUT V_DDP_BUF_TIMED_OUT(1U) + +#define S_DDP_BUF_IDX 28 +#define V_DDP_BUF_IDX(x) ((x) << S_DDP_BUF_IDX) +#define F_DDP_BUF_IDX V_DDP_BUF_IDX(1U) + +struct cpl_tx_pkt { + WR_HDR; + __be32 cntrl; + __be32 len; +}; + +struct cpl_tx_pkt_lso { + WR_HDR; + __be32 cntrl; + __be32 len; + + __be32 rsvd; + __be32 lso_info; +}; + +/* cpl_tx_pkt*.cntrl fields */ +#define S_TXPKT_VLAN 0 +#define M_TXPKT_VLAN 0xFFFF +#define V_TXPKT_VLAN(x) ((x) << S_TXPKT_VLAN) +#define G_TXPKT_VLAN(x) (((x) >> S_TXPKT_VLAN) & M_TXPKT_VLAN) + +#define S_TXPKT_INTF 16 +#define M_TXPKT_INTF 0xF +#define V_TXPKT_INTF(x) ((x) << S_TXPKT_INTF) +#define G_TXPKT_INTF(x) (((x) >> S_TXPKT_INTF) & M_TXPKT_INTF) + +#define S_TXPKT_IPCSUM_DIS 20 +#define V_TXPKT_IPCSUM_DIS(x) ((x) << S_TXPKT_IPCSUM_DIS) +#define F_TXPKT_IPCSUM_DIS V_TXPKT_IPCSUM_DIS(1U) + +#define S_TXPKT_L4CSUM_DIS 21 +#define V_TXPKT_L4CSUM_DIS(x) ((x) << S_TXPKT_L4CSUM_DIS) +#define F_TXPKT_L4CSUM_DIS V_TXPKT_L4CSUM_DIS(1U) + +#define S_TXPKT_VLAN_VLD 22 +#define V_TXPKT_VLAN_VLD(x) ((x) << S_TXPKT_VLAN_VLD) +#define F_TXPKT_VLAN_VLD V_TXPKT_VLAN_VLD(1U) + +#define S_TXPKT_LOOPBACK 23 +#define V_TXPKT_LOOPBACK(x) ((x) << S_TXPKT_LOOPBACK) +#define F_TXPKT_LOOPBACK V_TXPKT_LOOPBACK(1U) + +#define S_TXPKT_OPCODE 24 +#define M_TXPKT_OPCODE 0xFF +#define V_TXPKT_OPCODE(x) ((x) << S_TXPKT_OPCODE) +#define G_TXPKT_OPCODE(x) (((x) >> S_TXPKT_OPCODE) & M_TXPKT_OPCODE) + +/* cpl_tx_pkt_lso.lso_info fields */ +#define S_LSO_MSS 0 +#define M_LSO_MSS 0x3FFF +#define V_LSO_MSS(x) ((x) << S_LSO_MSS) +#define G_LSO_MSS(x) (((x) >> S_LSO_MSS) & M_LSO_MSS) + +#define S_LSO_ETH_TYPE 14 +#define M_LSO_ETH_TYPE 0x3 +#define V_LSO_ETH_TYPE(x) ((x) << S_LSO_ETH_TYPE) +#define G_LSO_ETH_TYPE(x) (((x) >> S_LSO_ETH_TYPE) & M_LSO_ETH_TYPE) + +#define S_LSO_TCPHDR_WORDS 16 +#define M_LSO_TCPHDR_WORDS 0xF +#define V_LSO_TCPHDR_WORDS(x) ((x) << S_LSO_TCPHDR_WORDS) +#define G_LSO_TCPHDR_WORDS(x) (((x) >> S_LSO_TCPHDR_WORDS) & M_LSO_TCPHDR_WORDS) + +#define S_LSO_IPHDR_WORDS 20 +#define M_LSO_IPHDR_WORDS 0xF +#define V_LSO_IPHDR_WORDS(x) ((x) << S_LSO_IPHDR_WORDS) +#define G_LSO_IPHDR_WORDS(x) (((x) >> S_LSO_IPHDR_WORDS) & M_LSO_IPHDR_WORDS) + +#define S_LSO_IPV6 24 +#define V_LSO_IPV6(x) ((x) << S_LSO_IPV6) +#define F_LSO_IPV6 V_LSO_IPV6(1U) + +struct cpl_trace_pkt { +#ifdef CHELSIO_FW + __u8 rss_opcode; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 err:1; + __u8:7; +#else + __u8:7; + __u8 err:1; +#endif + __u8 rsvd0; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 qid:4; + __u8:4; +#else + __u8:4; + __u8 qid:4; +#endif + __be32 tstamp; +#endif /* CHELSIO_FW */ + + __u8 opcode; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 iff:4; + __u8:4; +#else + __u8:4; + __u8 iff:4; +#endif + __u8 rsvd[4]; + __be16 len; +}; + +struct cpl_rx_pkt { + RSS_HDR __u8 opcode; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 iff:4; + __u8 csum_valid:1; + __u8 ipmi_pkt:1; + __u8 vlan_valid:1; + __u8 fragment:1; +#else + __u8 fragment:1; + __u8 vlan_valid:1; + __u8 ipmi_pkt:1; + __u8 csum_valid:1; + __u8 iff:4; +#endif + __be16 csum; + __be16 vlan; + __be16 len; +}; + +struct cpl_l2t_write_req { + WR_HDR; + union opcode_tid ot; + __be32 params; + __u8 rsvd[2]; + __u8 dst_mac[6]; +}; + +/* cpl_l2t_write_req.params fields */ +#define S_L2T_W_IDX 0 +#define M_L2T_W_IDX 0x7FF +#define V_L2T_W_IDX(x) ((x) << S_L2T_W_IDX) +#define G_L2T_W_IDX(x) (((x) >> S_L2T_W_IDX) & M_L2T_W_IDX) + +#define S_L2T_W_VLAN 11 +#define M_L2T_W_VLAN 0xFFF +#define V_L2T_W_VLAN(x) ((x) << S_L2T_W_VLAN) +#define G_L2T_W_VLAN(x) (((x) >> S_L2T_W_VLAN) & M_L2T_W_VLAN) + +#define S_L2T_W_IFF 23 +#define M_L2T_W_IFF 0xF +#define V_L2T_W_IFF(x) ((x) << S_L2T_W_IFF) +#define G_L2T_W_IFF(x) (((x) >> S_L2T_W_IFF) & M_L2T_W_IFF) + +#define S_L2T_W_PRIO 27 +#define M_L2T_W_PRIO 0x7 +#define V_L2T_W_PRIO(x) ((x) << S_L2T_W_PRIO) +#define G_L2T_W_PRIO(x) (((x) >> S_L2T_W_PRIO) & M_L2T_W_PRIO) + +struct cpl_l2t_write_rpl { + RSS_HDR union opcode_tid ot; + __u8 status; + __u8 rsvd[3]; +}; + +struct cpl_l2t_read_req { + WR_HDR; + union opcode_tid ot; + __be16 rsvd; + __be16 l2t_idx; +}; + +struct cpl_l2t_read_rpl { + RSS_HDR union opcode_tid ot; + __be32 params; + __u8 rsvd[2]; + __u8 dst_mac[6]; +}; + +/* cpl_l2t_read_rpl.params fields */ +#define S_L2T_R_PRIO 0 +#define M_L2T_R_PRIO 0x7 +#define V_L2T_R_PRIO(x) ((x) << S_L2T_R_PRIO) +#define G_L2T_R_PRIO(x) (((x) >> S_L2T_R_PRIO) & M_L2T_R_PRIO) + +#define S_L2T_R_VLAN 8 +#define M_L2T_R_VLAN 0xFFF +#define V_L2T_R_VLAN(x) ((x) << S_L2T_R_VLAN) +#define G_L2T_R_VLAN(x) (((x) >> S_L2T_R_VLAN) & M_L2T_R_VLAN) + +#define S_L2T_R_IFF 20 +#define M_L2T_R_IFF 0xF +#define V_L2T_R_IFF(x) ((x) << S_L2T_R_IFF) +#define G_L2T_R_IFF(x) (((x) >> S_L2T_R_IFF) & M_L2T_R_IFF) + +#define S_L2T_STATUS 24 +#define M_L2T_STATUS 0xFF +#define V_L2T_STATUS(x) ((x) << S_L2T_STATUS) +#define G_L2T_STATUS(x) (((x) >> S_L2T_STATUS) & M_L2T_STATUS) + +struct cpl_smt_write_req { + WR_HDR; + union opcode_tid ot; + __u8 rsvd0; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 mtu_idx:4; + __u8 iff:4; +#else + __u8 iff:4; + __u8 mtu_idx:4; +#endif + __be16 rsvd2; + __be16 rsvd3; + __u8 src_mac1[6]; + __be16 rsvd4; + __u8 src_mac0[6]; +}; + +struct cpl_smt_write_rpl { + RSS_HDR union opcode_tid ot; + __u8 status; + __u8 rsvd[3]; +}; + +struct cpl_smt_read_req { + WR_HDR; + union opcode_tid ot; + __u8 rsvd0; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8:4; + __u8 iff:4; +#else + __u8 iff:4; + __u8:4; +#endif + __be16 rsvd2; +}; + +struct cpl_smt_read_rpl { + RSS_HDR union opcode_tid ot; + __u8 status; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 mtu_idx:4; + __u8:4; +#else + __u8:4; + __u8 mtu_idx:4; +#endif + __be16 rsvd2; + __be16 rsvd3; + __u8 src_mac1[6]; + __be16 rsvd4; + __u8 src_mac0[6]; +}; + +struct cpl_rte_delete_req { + WR_HDR; + union opcode_tid ot; + __be32 params; +}; + +/* { cpl_rte_delete_req, cpl_rte_read_req }.params fields */ +#define S_RTE_REQ_LUT_IX 8 +#define M_RTE_REQ_LUT_IX 0x7FF +#define V_RTE_REQ_LUT_IX(x) ((x) << S_RTE_REQ_LUT_IX) +#define G_RTE_REQ_LUT_IX(x) (((x) >> S_RTE_REQ_LUT_IX) & M_RTE_REQ_LUT_IX) + +#define S_RTE_REQ_LUT_BASE 19 +#define M_RTE_REQ_LUT_BASE 0x7FF +#define V_RTE_REQ_LUT_BASE(x) ((x) << S_RTE_REQ_LUT_BASE) +#define G_RTE_REQ_LUT_BASE(x) (((x) >> S_RTE_REQ_LUT_BASE) & M_RTE_REQ_LUT_BASE) + +#define S_RTE_READ_REQ_SELECT 31 +#define V_RTE_READ_REQ_SELECT(x) ((x) << S_RTE_READ_REQ_SELECT) +#define F_RTE_READ_REQ_SELECT V_RTE_READ_REQ_SELECT(1U) + +struct cpl_rte_delete_rpl { + RSS_HDR union opcode_tid ot; + __u8 status; + __u8 rsvd[3]; +}; + +struct cpl_rte_write_req { + WR_HDR; + union opcode_tid ot; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8:6; + __u8 write_tcam:1; + __u8 write_l2t_lut:1; +#else + __u8 write_l2t_lut:1; + __u8 write_tcam:1; + __u8:6; +#endif + __u8 rsvd[3]; + __be32 lut_params; + __be16 rsvd2; + __be16 l2t_idx; + __be32 netmask; + __be32 faddr; +}; + +/* cpl_rte_write_req.lut_params fields */ +#define S_RTE_WRITE_REQ_LUT_IX 10 +#define M_RTE_WRITE_REQ_LUT_IX 0x7FF +#define V_RTE_WRITE_REQ_LUT_IX(x) ((x) << S_RTE_WRITE_REQ_LUT_IX) +#define G_RTE_WRITE_REQ_LUT_IX(x) (((x) >> S_RTE_WRITE_REQ_LUT_IX) & M_RTE_WRITE_REQ_LUT_IX) + +#define S_RTE_WRITE_REQ_LUT_BASE 21 +#define M_RTE_WRITE_REQ_LUT_BASE 0x7FF +#define V_RTE_WRITE_REQ_LUT_BASE(x) ((x) << S_RTE_WRITE_REQ_LUT_BASE) +#define G_RTE_WRITE_REQ_LUT_BASE(x) (((x) >> S_RTE_WRITE_REQ_LUT_BASE) & M_RTE_WRITE_REQ_LUT_BASE) + +struct cpl_rte_write_rpl { + RSS_HDR union opcode_tid ot; + __u8 status; + __u8 rsvd[3]; +}; + +struct cpl_rte_read_req { + WR_HDR; + union opcode_tid ot; + __be32 params; +}; + +struct cpl_rte_read_rpl { + RSS_HDR union opcode_tid ot; + __u8 status; + __u8 rsvd0; + __be16 l2t_idx; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8:7; + __u8 select:1; +#else + __u8 select:1; + __u8:7; +#endif + __u8 rsvd2[3]; + __be32 addr; +}; + +struct cpl_tid_release { + WR_HDR; + union opcode_tid ot; + __be32 rsvd; +}; + +struct cpl_barrier { + WR_HDR; + __u8 opcode; + __u8 rsvd[7]; +}; + +struct cpl_rdma_read_req { + __u8 opcode; + __u8 rsvd[15]; +}; + +struct cpl_rdma_terminate { +#ifdef CHELSIO_FW + __u8 opcode; + __u8 rsvd[2]; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 rspq:3; + __u8:5; +#else + __u8:5; + __u8 rspq:3; +#endif + __be32 tid_len; +#endif + __be32 msn; + __be32 mo; + __u8 data[0]; +}; + +/* cpl_rdma_terminate.tid_len fields */ +#define S_FLIT_CNT 0 +#define M_FLIT_CNT 0xFF +#define V_FLIT_CNT(x) ((x) << S_FLIT_CNT) +#define G_FLIT_CNT(x) (((x) >> S_FLIT_CNT) & M_FLIT_CNT) + +#define S_TERM_TID 8 +#define M_TERM_TID 0xFFFFF +#define V_TERM_TID(x) ((x) << S_TERM_TID) +#define G_TERM_TID(x) (((x) >> S_TERM_TID) & M_TERM_TID) + +/* ULP_TX opcodes */ +enum { ULP_MEM_READ = 2, ULP_MEM_WRITE = 3, ULP_TXPKT = 4 }; + +#define S_ULPTX_CMD 28 +#define M_ULPTX_CMD 0xF +#define V_ULPTX_CMD(x) ((x) << S_ULPTX_CMD) + +#define S_ULPTX_NFLITS 0 +#define M_ULPTX_NFLITS 0xFF +#define V_ULPTX_NFLITS(x) ((x) << S_ULPTX_NFLITS) + +struct ulp_mem_io { + WR_HDR; + __be32 cmd_lock_addr; + __be32 len; +}; + +/* ulp_mem_io.cmd_lock_addr fields */ +#define S_ULP_MEMIO_ADDR 0 +#define M_ULP_MEMIO_ADDR 0x7FFFFFF +#define V_ULP_MEMIO_ADDR(x) ((x) << S_ULP_MEMIO_ADDR) +#define S_ULP_MEMIO_LOCK 27 +#define V_ULP_MEMIO_LOCK(x) ((x) << S_ULP_MEMIO_LOCK) +#define F_ULP_MEMIO_LOCK V_ULP_MEMIO_LOCK(1U) + +/* ulp_mem_io.len fields */ +#define S_ULP_MEMIO_DATA_LEN 28 +#define M_ULP_MEMIO_DATA_LEN 0xF +#define V_ULP_MEMIO_DATA_LEN(x) ((x) << S_ULP_MEMIO_DATA_LEN) + +#endif /* T3_CPL_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c new file mode 100644 index 0000000..44ac2f4 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c @@ -0,0 +1,3785 @@ +/* + * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "common.h" +#include "regs.h" +#include "sge_defs.h" +#include "firmware_exports.h" + +static void t3_port_intr_clear(struct adapter *adapter, int idx); + +/** + * t3_wait_op_done_val - wait until an operation is completed + * @adapter: the adapter performing the operation + * @reg: the register to check for completion + * @mask: a single-bit field within @reg that indicates completion + * @polarity: the value of the field when the operation is completed + * @attempts: number of check iterations + * @delay: delay in usecs between iterations + * @valp: where to store the value of the register at completion time + * + * Wait until an operation is completed by checking a bit in a register + * up to @attempts times. If @valp is not NULL the value of the register + * at the time it indicated completion is stored there. Returns 0 if the + * operation completes and -EAGAIN otherwise. + */ + +int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, + int polarity, int attempts, int delay, u32 *valp) +{ + while (1) { + u32 val = t3_read_reg(adapter, reg); + + if (!!(val & mask) == polarity) { + if (valp) + *valp = val; + return 0; + } + if (--attempts == 0) + return -EAGAIN; + if (delay) + udelay(delay); + } +} + +/** + * t3_write_regs - write a bunch of registers + * @adapter: the adapter to program + * @p: an array of register address/register value pairs + * @n: the number of address/value pairs + * @offset: register address offset + * + * Takes an array of register address/register value pairs and writes each + * value to the corresponding register. Register addresses are adjusted + * by the supplied offset. + */ +void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p, + int n, unsigned int offset) +{ + while (n--) { + t3_write_reg(adapter, p->reg_addr + offset, p->val); + p++; + } +} + +/** + * t3_set_reg_field - set a register field to a value + * @adapter: the adapter to program + * @addr: the register address + * @mask: specifies the portion of the register to modify + * @val: the new value for the register field + * + * Sets a register field specified by the supplied mask to the + * given value. + */ +void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, + u32 val) +{ + u32 v = t3_read_reg(adapter, addr) & ~mask; + + t3_write_reg(adapter, addr, v | val); + t3_read_reg(adapter, addr); /* flush */ +} + +/** + * t3_read_indirect - read indirectly addressed registers + * @adap: the adapter + * @addr_reg: register holding the indirect address + * @data_reg: register holding the value of the indirect register + * @vals: where the read register values are stored + * @start_idx: index of first indirect register to read + * @nregs: how many indirect registers to read + * + * Reads registers that are accessed indirectly through an address/data + * register pair. + */ +static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg, + unsigned int data_reg, u32 *vals, + unsigned int nregs, unsigned int start_idx) +{ + while (nregs--) { + t3_write_reg(adap, addr_reg, start_idx); + *vals++ = t3_read_reg(adap, data_reg); + start_idx++; + } +} + +/** + * t3_mc7_bd_read - read from MC7 through backdoor accesses + * @mc7: identifies MC7 to read from + * @start: index of first 64-bit word to read + * @n: number of 64-bit words to read + * @buf: where to store the read result + * + * Read n 64-bit words from MC7 starting at word start, using backdoor + * accesses. + */ +int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n, + u64 *buf) +{ + static const int shift[] = { 0, 0, 16, 24 }; + static const int step[] = { 0, 32, 16, 8 }; + + unsigned int size64 = mc7->size / 8; /* # of 64-bit words */ + struct adapter *adap = mc7->adapter; + + if (start >= size64 || start + n > size64) + return -EINVAL; + + start *= (8 << mc7->width); + while (n--) { + int i; + u64 val64 = 0; + + for (i = (1 << mc7->width) - 1; i >= 0; --i) { + int attempts = 10; + u32 val; + + t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start); + t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0); + val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP); + while ((val & F_BUSY) && attempts--) + val = t3_read_reg(adap, + mc7->offset + A_MC7_BD_OP); + if (val & F_BUSY) + return -EIO; + + val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1); + if (mc7->width == 0) { + val64 = t3_read_reg(adap, + mc7->offset + + A_MC7_BD_DATA0); + val64 |= (u64) val << 32; + } else { + if (mc7->width > 1) + val >>= shift[mc7->width]; + val64 |= (u64) val << (step[mc7->width] * i); + } + start += 8; + } + *buf++ = val64; + } + return 0; +} + +/* + * Initialize MI1. + */ +static void mi1_init(struct adapter *adap, const struct adapter_info *ai) +{ + u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1; + u32 val = F_PREEN | V_CLKDIV(clkdiv); + + t3_write_reg(adap, A_MI1_CFG, val); +} + +#define MDIO_ATTEMPTS 20 + +/* + * MI1 read/write operations for clause 22 PHYs. + */ +static int t3_mi1_read(struct net_device *dev, int phy_addr, int mmd_addr, + u16 reg_addr) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + int ret; + u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr); + + mutex_lock(&adapter->mdio_lock); + t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1)); + t3_write_reg(adapter, A_MI1_ADDR, addr); + t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2)); + ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10); + if (!ret) + ret = t3_read_reg(adapter, A_MI1_DATA); + mutex_unlock(&adapter->mdio_lock); + return ret; +} + +static int t3_mi1_write(struct net_device *dev, int phy_addr, int mmd_addr, + u16 reg_addr, u16 val) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + int ret; + u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr); + + mutex_lock(&adapter->mdio_lock); + t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1)); + t3_write_reg(adapter, A_MI1_ADDR, addr); + t3_write_reg(adapter, A_MI1_DATA, val); + t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1)); + ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10); + mutex_unlock(&adapter->mdio_lock); + return ret; +} + +static const struct mdio_ops mi1_mdio_ops = { + .read = t3_mi1_read, + .write = t3_mi1_write, + .mode_support = MDIO_SUPPORTS_C22 +}; + +/* + * Performs the address cycle for clause 45 PHYs. + * Must be called with the MDIO_LOCK held. + */ +static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr, + int reg_addr) +{ + u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr); + + t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0); + t3_write_reg(adapter, A_MI1_ADDR, addr); + t3_write_reg(adapter, A_MI1_DATA, reg_addr); + t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0)); + return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, + MDIO_ATTEMPTS, 10); +} + +/* + * MI1 read/write operations for indirect-addressed PHYs. + */ +static int mi1_ext_read(struct net_device *dev, int phy_addr, int mmd_addr, + u16 reg_addr) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + int ret; + + mutex_lock(&adapter->mdio_lock); + ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr); + if (!ret) { + t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3)); + ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, + MDIO_ATTEMPTS, 10); + if (!ret) + ret = t3_read_reg(adapter, A_MI1_DATA); + } + mutex_unlock(&adapter->mdio_lock); + return ret; +} + +static int mi1_ext_write(struct net_device *dev, int phy_addr, int mmd_addr, + u16 reg_addr, u16 val) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + int ret; + + mutex_lock(&adapter->mdio_lock); + ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr); + if (!ret) { + t3_write_reg(adapter, A_MI1_DATA, val); + t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1)); + ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, + MDIO_ATTEMPTS, 10); + } + mutex_unlock(&adapter->mdio_lock); + return ret; +} + +static const struct mdio_ops mi1_mdio_ext_ops = { + .read = mi1_ext_read, + .write = mi1_ext_write, + .mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22 +}; + +/** + * t3_mdio_change_bits - modify the value of a PHY register + * @phy: the PHY to operate on + * @mmd: the device address + * @reg: the register address + * @clear: what part of the register value to mask off + * @set: what part of the register value to set + * + * Changes the value of a PHY register by applying a mask to its current + * value and ORing the result with a new value. + */ +int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear, + unsigned int set) +{ + int ret; + unsigned int val; + + ret = t3_mdio_read(phy, mmd, reg, &val); + if (!ret) { + val &= ~clear; + ret = t3_mdio_write(phy, mmd, reg, val | set); + } + return ret; +} + +/** + * t3_phy_reset - reset a PHY block + * @phy: the PHY to operate on + * @mmd: the device address of the PHY block to reset + * @wait: how long to wait for the reset to complete in 1ms increments + * + * Resets a PHY block and optionally waits for the reset to complete. + * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset + * for 10G PHYs. + */ +int t3_phy_reset(struct cphy *phy, int mmd, int wait) +{ + int err; + unsigned int ctl; + + err = t3_mdio_change_bits(phy, mmd, MDIO_CTRL1, MDIO_CTRL1_LPOWER, + MDIO_CTRL1_RESET); + if (err || !wait) + return err; + + do { + err = t3_mdio_read(phy, mmd, MDIO_CTRL1, &ctl); + if (err) + return err; + ctl &= MDIO_CTRL1_RESET; + if (ctl) + msleep(1); + } while (ctl && --wait); + + return ctl ? -1 : 0; +} + +/** + * t3_phy_advertise - set the PHY advertisement registers for autoneg + * @phy: the PHY to operate on + * @advert: bitmap of capabilities the PHY should advertise + * + * Sets a 10/100/1000 PHY's advertisement registers to advertise the + * requested capabilities. + */ +int t3_phy_advertise(struct cphy *phy, unsigned int advert) +{ + int err; + unsigned int val = 0; + + err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_CTRL1000, &val); + if (err) + return err; + + val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL); + if (advert & ADVERTISED_1000baseT_Half) + val |= ADVERTISE_1000HALF; + if (advert & ADVERTISED_1000baseT_Full) + val |= ADVERTISE_1000FULL; + + err = t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_CTRL1000, val); + if (err) + return err; + + val = 1; + if (advert & ADVERTISED_10baseT_Half) + val |= ADVERTISE_10HALF; + if (advert & ADVERTISED_10baseT_Full) + val |= ADVERTISE_10FULL; + if (advert & ADVERTISED_100baseT_Half) + val |= ADVERTISE_100HALF; + if (advert & ADVERTISED_100baseT_Full) + val |= ADVERTISE_100FULL; + if (advert & ADVERTISED_Pause) + val |= ADVERTISE_PAUSE_CAP; + if (advert & ADVERTISED_Asym_Pause) + val |= ADVERTISE_PAUSE_ASYM; + return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val); +} + +/** + * t3_phy_advertise_fiber - set fiber PHY advertisement register + * @phy: the PHY to operate on + * @advert: bitmap of capabilities the PHY should advertise + * + * Sets a fiber PHY's advertisement register to advertise the + * requested capabilities. + */ +int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert) +{ + unsigned int val = 0; + + if (advert & ADVERTISED_1000baseT_Half) + val |= ADVERTISE_1000XHALF; + if (advert & ADVERTISED_1000baseT_Full) + val |= ADVERTISE_1000XFULL; + if (advert & ADVERTISED_Pause) + val |= ADVERTISE_1000XPAUSE; + if (advert & ADVERTISED_Asym_Pause) + val |= ADVERTISE_1000XPSE_ASYM; + return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val); +} + +/** + * t3_set_phy_speed_duplex - force PHY speed and duplex + * @phy: the PHY to operate on + * @speed: requested PHY speed + * @duplex: requested PHY duplex + * + * Force a 10/100/1000 PHY's speed and duplex. This also disables + * auto-negotiation except for GigE, where auto-negotiation is mandatory. + */ +int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex) +{ + int err; + unsigned int ctl; + + err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_BMCR, &ctl); + if (err) + return err; + + if (speed >= 0) { + ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE); + if (speed == SPEED_100) + ctl |= BMCR_SPEED100; + else if (speed == SPEED_1000) + ctl |= BMCR_SPEED1000; + } + if (duplex >= 0) { + ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE); + if (duplex == DUPLEX_FULL) + ctl |= BMCR_FULLDPLX; + } + if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */ + ctl |= BMCR_ANENABLE; + return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_BMCR, ctl); +} + +int t3_phy_lasi_intr_enable(struct cphy *phy) +{ + return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, + MDIO_PMA_LASI_LSALARM); +} + +int t3_phy_lasi_intr_disable(struct cphy *phy) +{ + return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0); +} + +int t3_phy_lasi_intr_clear(struct cphy *phy) +{ + u32 val; + + return t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val); +} + +int t3_phy_lasi_intr_handler(struct cphy *phy) +{ + unsigned int status; + int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, + &status); + + if (err) + return err; + return (status & MDIO_PMA_LASI_LSALARM) ? cphy_cause_link_change : 0; +} + +static const struct adapter_info t3_adap_info[] = { + {1, 1, 0, + F_GPIO2_OEN | F_GPIO4_OEN | + F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0, + &mi1_mdio_ops, "Chelsio PE9000"}, + {1, 1, 0, + F_GPIO2_OEN | F_GPIO4_OEN | + F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0, + &mi1_mdio_ops, "Chelsio T302"}, + {1, 0, 0, + F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN | + F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, + { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, + &mi1_mdio_ext_ops, "Chelsio T310"}, + {1, 1, 0, + F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN | + F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL | + F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, + { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, + &mi1_mdio_ext_ops, "Chelsio T320"}, + {}, + {}, + {1, 0, 0, + F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN | + F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, + { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, + &mi1_mdio_ext_ops, "Chelsio T310" }, + {1, 0, 0, + F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | + F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL, + { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, + &mi1_mdio_ext_ops, "Chelsio N320E-G2" }, +}; + +/* + * Return the adapter_info structure with a given index. Out-of-range indices + * return NULL. + */ +const struct adapter_info *t3_get_adapter_info(unsigned int id) +{ + return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL; +} + +struct port_type_info { + int (*phy_prep)(struct cphy *phy, struct adapter *adapter, + int phy_addr, const struct mdio_ops *ops); +}; + +static const struct port_type_info port_types[] = { + { NULL }, + { t3_ael1002_phy_prep }, + { t3_vsc8211_phy_prep }, + { NULL}, + { t3_xaui_direct_phy_prep }, + { t3_ael2005_phy_prep }, + { t3_qt2045_phy_prep }, + { t3_ael1006_phy_prep }, + { NULL }, + { t3_aq100x_phy_prep }, + { t3_ael2020_phy_prep }, +}; + +#define VPD_ENTRY(name, len) \ + u8 name##_kword[2]; u8 name##_len; u8 name##_data[len] + +/* + * Partial EEPROM Vital Product Data structure. Includes only the ID and + * VPD-R sections. + */ +struct t3_vpd { + u8 id_tag; + u8 id_len[2]; + u8 id_data[16]; + u8 vpdr_tag; + u8 vpdr_len[2]; + VPD_ENTRY(pn, 16); /* part number */ + VPD_ENTRY(ec, 16); /* EC level */ + VPD_ENTRY(sn, SERNUM_LEN); /* serial number */ + VPD_ENTRY(na, 12); /* MAC address base */ + VPD_ENTRY(cclk, 6); /* core clock */ + VPD_ENTRY(mclk, 6); /* mem clock */ + VPD_ENTRY(uclk, 6); /* uP clk */ + VPD_ENTRY(mdc, 6); /* MDIO clk */ + VPD_ENTRY(mt, 2); /* mem timing */ + VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */ + VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */ + VPD_ENTRY(port0, 2); /* PHY0 complex */ + VPD_ENTRY(port1, 2); /* PHY1 complex */ + VPD_ENTRY(port2, 2); /* PHY2 complex */ + VPD_ENTRY(port3, 2); /* PHY3 complex */ + VPD_ENTRY(rv, 1); /* csum */ + u32 pad; /* for multiple-of-4 sizing and alignment */ +}; + +#define EEPROM_MAX_POLL 40 +#define EEPROM_STAT_ADDR 0x4000 +#define VPD_BASE 0xc00 + +/** + * t3_seeprom_read - read a VPD EEPROM location + * @adapter: adapter to read + * @addr: EEPROM address + * @data: where to store the read data + * + * Read a 32-bit word from a location in VPD EEPROM using the card's PCI + * VPD ROM capability. A zero is written to the flag bit when the + * address is written to the control register. The hardware device will + * set the flag to 1 when 4 bytes have been read into the data register. + */ +int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data) +{ + u16 val; + int attempts = EEPROM_MAX_POLL; + u32 v; + unsigned int base = adapter->params.pci.vpd_cap_addr; + + if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3)) + return -EINVAL; + + pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr); + do { + udelay(10); + pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val); + } while (!(val & PCI_VPD_ADDR_F) && --attempts); + + if (!(val & PCI_VPD_ADDR_F)) { + CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr); + return -EIO; + } + pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v); + *data = cpu_to_le32(v); + return 0; +} + +/** + * t3_seeprom_write - write a VPD EEPROM location + * @adapter: adapter to write + * @addr: EEPROM address + * @data: value to write + * + * Write a 32-bit word to a location in VPD EEPROM using the card's PCI + * VPD ROM capability. + */ +int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data) +{ + u16 val; + int attempts = EEPROM_MAX_POLL; + unsigned int base = adapter->params.pci.vpd_cap_addr; + + if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3)) + return -EINVAL; + + pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA, + le32_to_cpu(data)); + pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR, + addr | PCI_VPD_ADDR_F); + do { + msleep(1); + pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val); + } while ((val & PCI_VPD_ADDR_F) && --attempts); + + if (val & PCI_VPD_ADDR_F) { + CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr); + return -EIO; + } + return 0; +} + +/** + * t3_seeprom_wp - enable/disable EEPROM write protection + * @adapter: the adapter + * @enable: 1 to enable write protection, 0 to disable it + * + * Enables or disables write protection on the serial EEPROM. + */ +int t3_seeprom_wp(struct adapter *adapter, int enable) +{ + return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); +} + +/** + * get_vpd_params - read VPD parameters from VPD EEPROM + * @adapter: adapter to read + * @p: where to store the parameters + * + * Reads card parameters stored in VPD EEPROM. + */ +static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) +{ + int i, addr, ret; + struct t3_vpd vpd; + + /* + * Card information is normally at VPD_BASE but some early cards had + * it at 0. + */ + ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd); + if (ret) + return ret; + addr = vpd.id_tag == 0x82 ? VPD_BASE : 0; + + for (i = 0; i < sizeof(vpd); i += 4) { + ret = t3_seeprom_read(adapter, addr + i, + (__le32 *)((u8 *)&vpd + i)); + if (ret) + return ret; + } + + p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10); + p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10); + p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10); + p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10); + p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10); + memcpy(p->sn, vpd.sn_data, SERNUM_LEN); + + /* Old eeproms didn't have port information */ + if (adapter->params.rev == 0 && !vpd.port0_data[0]) { + p->port_type[0] = uses_xaui(adapter) ? 1 : 2; + p->port_type[1] = uses_xaui(adapter) ? 6 : 2; + } else { + p->port_type[0] = hex_to_bin(vpd.port0_data[0]); + p->port_type[1] = hex_to_bin(vpd.port1_data[0]); + p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16); + p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16); + } + + for (i = 0; i < 6; i++) + p->eth_base[i] = hex_to_bin(vpd.na_data[2 * i]) * 16 + + hex_to_bin(vpd.na_data[2 * i + 1]); + return 0; +} + +/* serial flash and firmware constants */ +enum { + SF_ATTEMPTS = 5, /* max retries for SF1 operations */ + SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */ + SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */ + + /* flash command opcodes */ + SF_PROG_PAGE = 2, /* program page */ + SF_WR_DISABLE = 4, /* disable writes */ + SF_RD_STATUS = 5, /* read status register */ + SF_WR_ENABLE = 6, /* enable writes */ + SF_RD_DATA_FAST = 0xb, /* read flash */ + SF_ERASE_SECTOR = 0xd8, /* erase sector */ + + FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */ + FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */ + FW_MIN_SIZE = 8 /* at least version and csum */ +}; + +/** + * sf1_read - read data from the serial flash + * @adapter: the adapter + * @byte_cnt: number of bytes to read + * @cont: whether another operation will be chained + * @valp: where to store the read data + * + * Reads up to 4 bytes of data from the serial flash. The location of + * the read needs to be specified prior to calling this by issuing the + * appropriate commands to the serial flash. + */ +static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, + u32 *valp) +{ + int ret; + + if (!byte_cnt || byte_cnt > 4) + return -EINVAL; + if (t3_read_reg(adapter, A_SF_OP) & F_BUSY) + return -EBUSY; + t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1)); + ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10); + if (!ret) + *valp = t3_read_reg(adapter, A_SF_DATA); + return ret; +} + +/** + * sf1_write - write data to the serial flash + * @adapter: the adapter + * @byte_cnt: number of bytes to write + * @cont: whether another operation will be chained + * @val: value to write + * + * Writes up to 4 bytes of data to the serial flash. The location of + * the write needs to be specified prior to calling this by issuing the + * appropriate commands to the serial flash. + */ +static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, + u32 val) +{ + if (!byte_cnt || byte_cnt > 4) + return -EINVAL; + if (t3_read_reg(adapter, A_SF_OP) & F_BUSY) + return -EBUSY; + t3_write_reg(adapter, A_SF_DATA, val); + t3_write_reg(adapter, A_SF_OP, + V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1)); + return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10); +} + +/** + * flash_wait_op - wait for a flash operation to complete + * @adapter: the adapter + * @attempts: max number of polls of the status register + * @delay: delay between polls in ms + * + * Wait for a flash operation to complete by polling the status register. + */ +static int flash_wait_op(struct adapter *adapter, int attempts, int delay) +{ + int ret; + u32 status; + + while (1) { + if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 || + (ret = sf1_read(adapter, 1, 0, &status)) != 0) + return ret; + if (!(status & 1)) + return 0; + if (--attempts == 0) + return -EAGAIN; + if (delay) + msleep(delay); + } +} + +/** + * t3_read_flash - read words from serial flash + * @adapter: the adapter + * @addr: the start address for the read + * @nwords: how many 32-bit words to read + * @data: where to store the read data + * @byte_oriented: whether to store data as bytes or as words + * + * Read the specified number of 32-bit words from the serial flash. + * If @byte_oriented is set the read data is stored as a byte array + * (i.e., big-endian), otherwise as 32-bit words in the platform's + * natural endianess. + */ +static int t3_read_flash(struct adapter *adapter, unsigned int addr, + unsigned int nwords, u32 *data, int byte_oriented) +{ + int ret; + + if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3)) + return -EINVAL; + + addr = swab32(addr) | SF_RD_DATA_FAST; + + if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 || + (ret = sf1_read(adapter, 1, 1, data)) != 0) + return ret; + + for (; nwords; nwords--, data++) { + ret = sf1_read(adapter, 4, nwords > 1, data); + if (ret) + return ret; + if (byte_oriented) + *data = htonl(*data); + } + return 0; +} + +/** + * t3_write_flash - write up to a page of data to the serial flash + * @adapter: the adapter + * @addr: the start address to write + * @n: length of data to write + * @data: the data to write + * + * Writes up to a page of data (256 bytes) to the serial flash starting + * at the given address. + */ +static int t3_write_flash(struct adapter *adapter, unsigned int addr, + unsigned int n, const u8 *data) +{ + int ret; + u32 buf[64]; + unsigned int i, c, left, val, offset = addr & 0xff; + + if (addr + n > SF_SIZE || offset + n > 256) + return -EINVAL; + + val = swab32(addr) | SF_PROG_PAGE; + + if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 || + (ret = sf1_write(adapter, 4, 1, val)) != 0) + return ret; + + for (left = n; left; left -= c) { + c = min(left, 4U); + for (val = 0, i = 0; i < c; ++i) + val = (val << 8) + *data++; + + ret = sf1_write(adapter, c, c != left, val); + if (ret) + return ret; + } + if ((ret = flash_wait_op(adapter, 5, 1)) != 0) + return ret; + + /* Read the page to verify the write succeeded */ + ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); + if (ret) + return ret; + + if (memcmp(data - n, (u8 *) buf + offset, n)) + return -EIO; + return 0; +} + +/** + * t3_get_tp_version - read the tp sram version + * @adapter: the adapter + * @vers: where to place the version + * + * Reads the protocol sram version from sram. + */ +int t3_get_tp_version(struct adapter *adapter, u32 *vers) +{ + int ret; + + /* Get version loaded in SRAM */ + t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0); + ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0, + 1, 1, 5, 1); + if (ret) + return ret; + + *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1); + + return 0; +} + +/** + * t3_check_tpsram_version - read the tp sram version + * @adapter: the adapter + * + * Reads the protocol sram version from flash. + */ +int t3_check_tpsram_version(struct adapter *adapter) +{ + int ret; + u32 vers; + unsigned int major, minor; + + if (adapter->params.rev == T3_REV_A) + return 0; + + + ret = t3_get_tp_version(adapter, &vers); + if (ret) + return ret; + + major = G_TP_VERSION_MAJOR(vers); + minor = G_TP_VERSION_MINOR(vers); + + if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR) + return 0; + else { + CH_ERR(adapter, "found wrong TP version (%u.%u), " + "driver compiled for version %d.%d\n", major, minor, + TP_VERSION_MAJOR, TP_VERSION_MINOR); + } + return -EINVAL; +} + +/** + * t3_check_tpsram - check if provided protocol SRAM + * is compatible with this driver + * @adapter: the adapter + * @tp_sram: the firmware image to write + * @size: image size + * + * Checks if an adapter's tp sram is compatible with the driver. + * Returns 0 if the versions are compatible, a negative error otherwise. + */ +int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram, + unsigned int size) +{ + u32 csum; + unsigned int i; + const __be32 *p = (const __be32 *)tp_sram; + + /* Verify checksum */ + for (csum = 0, i = 0; i < size / sizeof(csum); i++) + csum += ntohl(p[i]); + if (csum != 0xffffffff) { + CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n", + csum); + return -EINVAL; + } + + return 0; +} + +enum fw_version_type { + FW_VERSION_N3, + FW_VERSION_T3 +}; + +/** + * t3_get_fw_version - read the firmware version + * @adapter: the adapter + * @vers: where to place the version + * + * Reads the FW version from flash. + */ +int t3_get_fw_version(struct adapter *adapter, u32 *vers) +{ + return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0); +} + +/** + * t3_check_fw_version - check if the FW is compatible with this driver + * @adapter: the adapter + * + * Checks if an adapter's FW is compatible with the driver. Returns 0 + * if the versions are compatible, a negative error otherwise. + */ +int t3_check_fw_version(struct adapter *adapter) +{ + int ret; + u32 vers; + unsigned int type, major, minor; + + ret = t3_get_fw_version(adapter, &vers); + if (ret) + return ret; + + type = G_FW_VERSION_TYPE(vers); + major = G_FW_VERSION_MAJOR(vers); + minor = G_FW_VERSION_MINOR(vers); + + if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR && + minor == FW_VERSION_MINOR) + return 0; + else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR) + CH_WARN(adapter, "found old FW minor version(%u.%u), " + "driver compiled for version %u.%u\n", major, minor, + FW_VERSION_MAJOR, FW_VERSION_MINOR); + else { + CH_WARN(adapter, "found newer FW version(%u.%u), " + "driver compiled for version %u.%u\n", major, minor, + FW_VERSION_MAJOR, FW_VERSION_MINOR); + return 0; + } + return -EINVAL; +} + +/** + * t3_flash_erase_sectors - erase a range of flash sectors + * @adapter: the adapter + * @start: the first sector to erase + * @end: the last sector to erase + * + * Erases the sectors in the given range. + */ +static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end) +{ + while (start <= end) { + int ret; + + if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 || + (ret = sf1_write(adapter, 4, 0, + SF_ERASE_SECTOR | (start << 8))) != 0 || + (ret = flash_wait_op(adapter, 5, 500)) != 0) + return ret; + start++; + } + return 0; +} + +/* + * t3_load_fw - download firmware + * @adapter: the adapter + * @fw_data: the firmware image to write + * @size: image size + * + * Write the supplied firmware image to the card's serial flash. + * The FW image has the following sections: @size - 8 bytes of code and + * data, followed by 4 bytes of FW version, followed by the 32-bit + * 1's complement checksum of the whole image. + */ +int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size) +{ + u32 csum; + unsigned int i; + const __be32 *p = (const __be32 *)fw_data; + int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16; + + if ((size & 3) || size < FW_MIN_SIZE) + return -EINVAL; + if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR) + return -EFBIG; + + for (csum = 0, i = 0; i < size / sizeof(csum); i++) + csum += ntohl(p[i]); + if (csum != 0xffffffff) { + CH_ERR(adapter, "corrupted firmware image, checksum %u\n", + csum); + return -EINVAL; + } + + ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector); + if (ret) + goto out; + + size -= 8; /* trim off version and checksum */ + for (addr = FW_FLASH_BOOT_ADDR; size;) { + unsigned int chunk_size = min(size, 256U); + + ret = t3_write_flash(adapter, addr, chunk_size, fw_data); + if (ret) + goto out; + + addr += chunk_size; + fw_data += chunk_size; + size -= chunk_size; + } + + ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data); +out: + if (ret) + CH_ERR(adapter, "firmware download failed, error %d\n", ret); + return ret; +} + +#define CIM_CTL_BASE 0x2000 + +/** + * t3_cim_ctl_blk_read - read a block from CIM control region + * + * @adap: the adapter + * @addr: the start address within the CIM control region + * @n: number of words to read + * @valp: where to store the result + * + * Reads a block of 4-byte words from the CIM control region. + */ +int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr, + unsigned int n, unsigned int *valp) +{ + int ret = 0; + + if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY) + return -EBUSY; + + for ( ; !ret && n--; addr += 4) { + t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr); + ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, + 0, 5, 2); + if (!ret) + *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA); + } + return ret; +} + +static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg, + u32 *rx_hash_high, u32 *rx_hash_low) +{ + /* stop Rx unicast traffic */ + t3_mac_disable_exact_filters(mac); + + /* stop broadcast, multicast, promiscuous mode traffic */ + *rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG); + t3_set_reg_field(mac->adapter, A_XGM_RX_CFG, + F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES, + F_DISBCAST); + + *rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH); + t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0); + + *rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW); + t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0); + + /* Leave time to drain max RX fifo */ + msleep(1); +} + +static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg, + u32 rx_hash_high, u32 rx_hash_low) +{ + t3_mac_enable_exact_filters(mac); + t3_set_reg_field(mac->adapter, A_XGM_RX_CFG, + F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES, + rx_cfg); + t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high); + t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low); +} + +/** + * t3_link_changed - handle interface link changes + * @adapter: the adapter + * @port_id: the port index that changed link state + * + * Called when a port's link settings change to propagate the new values + * to the associated PHY and MAC. After performing the common tasks it + * invokes an OS-specific handler. + */ +void t3_link_changed(struct adapter *adapter, int port_id) +{ + int link_ok, speed, duplex, fc; + struct port_info *pi = adap2pinfo(adapter, port_id); + struct cphy *phy = &pi->phy; + struct cmac *mac = &pi->mac; + struct link_config *lc = &pi->link_config; + + phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc); + + if (!lc->link_ok && link_ok) { + u32 rx_cfg, rx_hash_high, rx_hash_low; + u32 status; + + t3_xgm_intr_enable(adapter, port_id); + t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low); + t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0); + t3_mac_enable(mac, MAC_DIRECTION_RX); + + status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset); + if (status & F_LINKFAULTCHANGE) { + mac->stats.link_faults++; + pi->link_fault = 1; + } + t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low); + } + + if (lc->requested_fc & PAUSE_AUTONEG) + fc &= lc->requested_fc; + else + fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + + if (link_ok == lc->link_ok && speed == lc->speed && + duplex == lc->duplex && fc == lc->fc) + return; /* nothing changed */ + + if (link_ok != lc->link_ok && adapter->params.rev > 0 && + uses_xaui(adapter)) { + if (link_ok) + t3b_pcs_reset(mac); + t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, + link_ok ? F_TXACTENABLE | F_RXEN : 0); + } + lc->link_ok = link_ok; + lc->speed = speed < 0 ? SPEED_INVALID : speed; + lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex; + + if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) { + /* Set MAC speed, duplex, and flow control to match PHY. */ + t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc); + lc->fc = fc; + } + + t3_os_link_changed(adapter, port_id, link_ok && !pi->link_fault, + speed, duplex, fc); +} + +void t3_link_fault(struct adapter *adapter, int port_id) +{ + struct port_info *pi = adap2pinfo(adapter, port_id); + struct cmac *mac = &pi->mac; + struct cphy *phy = &pi->phy; + struct link_config *lc = &pi->link_config; + int link_ok, speed, duplex, fc, link_fault; + u32 rx_cfg, rx_hash_high, rx_hash_low; + + t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low); + + if (adapter->params.rev > 0 && uses_xaui(adapter)) + t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 0); + + t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0); + t3_mac_enable(mac, MAC_DIRECTION_RX); + + t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low); + + link_fault = t3_read_reg(adapter, + A_XGM_INT_STATUS + mac->offset); + link_fault &= F_LINKFAULTCHANGE; + + link_ok = lc->link_ok; + speed = lc->speed; + duplex = lc->duplex; + fc = lc->fc; + + phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc); + + if (link_fault) { + lc->link_ok = 0; + lc->speed = SPEED_INVALID; + lc->duplex = DUPLEX_INVALID; + + t3_os_link_fault(adapter, port_id, 0); + + /* Account link faults only when the phy reports a link up */ + if (link_ok) + mac->stats.link_faults++; + } else { + if (link_ok) + t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, + F_TXACTENABLE | F_RXEN); + + pi->link_fault = 0; + lc->link_ok = (unsigned char)link_ok; + lc->speed = speed < 0 ? SPEED_INVALID : speed; + lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex; + t3_os_link_fault(adapter, port_id, link_ok); + } +} + +/** + * t3_link_start - apply link configuration to MAC/PHY + * @phy: the PHY to setup + * @mac: the MAC to setup + * @lc: the requested link configuration + * + * Set up a port's MAC and PHY according to a desired link configuration. + * - If the PHY can auto-negotiate first decide what to advertise, then + * enable/disable auto-negotiation as desired, and reset. + * - If the PHY does not auto-negotiate just reset it. + * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, + * otherwise do it later based on the outcome of auto-negotiation. + */ +int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc) +{ + unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + + lc->link_ok = 0; + if (lc->supported & SUPPORTED_Autoneg) { + lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause); + if (fc) { + lc->advertising |= ADVERTISED_Asym_Pause; + if (fc & PAUSE_RX) + lc->advertising |= ADVERTISED_Pause; + } + phy->ops->advertise(phy, lc->advertising); + + if (lc->autoneg == AUTONEG_DISABLE) { + lc->speed = lc->requested_speed; + lc->duplex = lc->requested_duplex; + lc->fc = (unsigned char)fc; + t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex, + fc); + /* Also disables autoneg */ + phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex); + } else + phy->ops->autoneg_enable(phy); + } else { + t3_mac_set_speed_duplex_fc(mac, -1, -1, fc); + lc->fc = (unsigned char)fc; + phy->ops->reset(phy, 0); + } + return 0; +} + +/** + * t3_set_vlan_accel - control HW VLAN extraction + * @adapter: the adapter + * @ports: bitmap of adapter ports to operate on + * @on: enable (1) or disable (0) HW VLAN extraction + * + * Enables or disables HW extraction of VLAN tags for the given port. + */ +void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on) +{ + t3_set_reg_field(adapter, A_TP_OUT_CONFIG, + ports << S_VLANEXTRACTIONENABLE, + on ? (ports << S_VLANEXTRACTIONENABLE) : 0); +} + +struct intr_info { + unsigned int mask; /* bits to check in interrupt status */ + const char *msg; /* message to print or NULL */ + short stat_idx; /* stat counter to increment or -1 */ + unsigned short fatal; /* whether the condition reported is fatal */ +}; + +/** + * t3_handle_intr_status - table driven interrupt handler + * @adapter: the adapter that generated the interrupt + * @reg: the interrupt status register to process + * @mask: a mask to apply to the interrupt status + * @acts: table of interrupt actions + * @stats: statistics counters tracking interrupt occurrences + * + * A table driven interrupt handler that applies a set of masks to an + * interrupt status word and performs the corresponding actions if the + * interrupts described by the mask have occurred. The actions include + * optionally printing a warning or alert message, and optionally + * incrementing a stat counter. The table is terminated by an entry + * specifying mask 0. Returns the number of fatal interrupt conditions. + */ +static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg, + unsigned int mask, + const struct intr_info *acts, + unsigned long *stats) +{ + int fatal = 0; + unsigned int status = t3_read_reg(adapter, reg) & mask; + + for (; acts->mask; ++acts) { + if (!(status & acts->mask)) + continue; + if (acts->fatal) { + fatal++; + CH_ALERT(adapter, "%s (0x%x)\n", + acts->msg, status & acts->mask); + status &= ~acts->mask; + } else if (acts->msg) + CH_WARN(adapter, "%s (0x%x)\n", + acts->msg, status & acts->mask); + if (acts->stat_idx >= 0) + stats[acts->stat_idx]++; + } + if (status) /* clear processed interrupts */ + t3_write_reg(adapter, reg, status); + return fatal; +} + +#define SGE_INTR_MASK (F_RSPQDISABLED | \ + F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \ + F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \ + F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \ + V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \ + F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \ + F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \ + F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \ + F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \ + F_LOPIODRBDROPERR) +#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \ + F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \ + F_NFASRCHFAIL) +#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE)) +#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \ + V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \ + F_TXFIFO_UNDERRUN) +#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \ + F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \ + F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \ + F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \ + V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \ + V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */) +#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\ + F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \ + /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \ + F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \ + F_TXPARERR | V_BISTERR(M_BISTERR)) +#define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \ + F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \ + F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0) +#define ULPTX_INTR_MASK 0xfc +#define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \ + F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \ + F_ZERO_SWITCH_ERROR) +#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \ + F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \ + F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \ + F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \ + F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \ + F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \ + F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \ + F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR) +#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \ + V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \ + V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR)) +#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \ + V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \ + V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR)) +#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \ + V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \ + V_RXTPPARERRENB(M_RXTPPARERRENB) | \ + V_MCAPARERRENB(M_MCAPARERRENB)) +#define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE) +#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \ + F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \ + F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \ + F_MPS0 | F_CPL_SWITCH) +/* + * Interrupt handler for the PCIX1 module. + */ +static void pci_intr_handler(struct adapter *adapter) +{ + static const struct intr_info pcix1_intr_info[] = { + {F_MSTDETPARERR, "PCI master detected parity error", -1, 1}, + {F_SIGTARABT, "PCI signaled target abort", -1, 1}, + {F_RCVTARABT, "PCI received target abort", -1, 1}, + {F_RCVMSTABT, "PCI received master abort", -1, 1}, + {F_SIGSYSERR, "PCI signaled system error", -1, 1}, + {F_DETPARERR, "PCI detected parity error", -1, 1}, + {F_SPLCMPDIS, "PCI split completion discarded", -1, 1}, + {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1}, + {F_RCVSPLCMPERR, "PCI received split completion error", -1, + 1}, + {F_DETCORECCERR, "PCI correctable ECC error", + STAT_PCI_CORR_ECC, 0}, + {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1}, + {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1}, + {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1, + 1}, + {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1, + 1}, + {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1, + 1}, + {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity " + "error", -1, 1}, + {0} + }; + + if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK, + pcix1_intr_info, adapter->irq_stats)) + t3_fatal_err(adapter); +} + +/* + * Interrupt handler for the PCIE module. + */ +static void pcie_intr_handler(struct adapter *adapter) +{ + static const struct intr_info pcie_intr_info[] = { + {F_PEXERR, "PCI PEX error", -1, 1}, + {F_UNXSPLCPLERRR, + "PCI unexpected split completion DMA read error", -1, 1}, + {F_UNXSPLCPLERRC, + "PCI unexpected split completion DMA command error", -1, 1}, + {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1}, + {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1}, + {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1}, + {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1}, + {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR), + "PCI MSI-X table/PBA parity error", -1, 1}, + {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1}, + {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1}, + {F_RXPARERR, "PCI Rx parity error", -1, 1}, + {F_TXPARERR, "PCI Tx parity error", -1, 1}, + {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1}, + {0} + }; + + if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR) + CH_ALERT(adapter, "PEX error code 0x%x\n", + t3_read_reg(adapter, A_PCIE_PEX_ERR)); + + if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK, + pcie_intr_info, adapter->irq_stats)) + t3_fatal_err(adapter); +} + +/* + * TP interrupt handler. + */ +static void tp_intr_handler(struct adapter *adapter) +{ + static const struct intr_info tp_intr_info[] = { + {0xffffff, "TP parity error", -1, 1}, + {0x1000000, "TP out of Rx pages", -1, 1}, + {0x2000000, "TP out of Tx pages", -1, 1}, + {0} + }; + + static const struct intr_info tp_intr_info_t3c[] = { + {0x1fffffff, "TP parity error", -1, 1}, + {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1}, + {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1}, + {0} + }; + + if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff, + adapter->params.rev < T3_REV_C ? + tp_intr_info : tp_intr_info_t3c, NULL)) + t3_fatal_err(adapter); +} + +/* + * CIM interrupt handler. + */ +static void cim_intr_handler(struct adapter *adapter) +{ + static const struct intr_info cim_intr_info[] = { + {F_RSVDSPACEINT, "CIM reserved space write", -1, 1}, + {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1}, + {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1}, + {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1}, + {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1}, + {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1}, + {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1}, + {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1}, + {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1}, + {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1}, + {F_BLKRDPLINT, "CIM block read from PL space", -1, 1}, + {F_BLKWRPLINT, "CIM block write to PL space", -1, 1}, + {F_DRAMPARERR, "CIM DRAM parity error", -1, 1}, + {F_ICACHEPARERR, "CIM icache parity error", -1, 1}, + {F_DCACHEPARERR, "CIM dcache parity error", -1, 1}, + {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1}, + {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1}, + {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1}, + {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1}, + {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1}, + {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1}, + {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1}, + {F_ITAGPARERR, "CIM itag parity error", -1, 1}, + {F_DTAGPARERR, "CIM dtag parity error", -1, 1}, + {0} + }; + + if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff, + cim_intr_info, NULL)) + t3_fatal_err(adapter); +} + +/* + * ULP RX interrupt handler. + */ +static void ulprx_intr_handler(struct adapter *adapter) +{ + static const struct intr_info ulprx_intr_info[] = { + {F_PARERRDATA, "ULP RX data parity error", -1, 1}, + {F_PARERRPCMD, "ULP RX command parity error", -1, 1}, + {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1}, + {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1}, + {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1}, + {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1}, + {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1}, + {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1}, + {0} + }; + + if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff, + ulprx_intr_info, NULL)) + t3_fatal_err(adapter); +} + +/* + * ULP TX interrupt handler. + */ +static void ulptx_intr_handler(struct adapter *adapter) +{ + static const struct intr_info ulptx_intr_info[] = { + {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds", + STAT_ULP_CH0_PBL_OOB, 0}, + {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds", + STAT_ULP_CH1_PBL_OOB, 0}, + {0xfc, "ULP TX parity error", -1, 1}, + {0} + }; + + if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff, + ulptx_intr_info, adapter->irq_stats)) + t3_fatal_err(adapter); +} + +#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \ + F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \ + F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \ + F_ICSPI1_TX_FRAMING_ERROR) +#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \ + F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \ + F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \ + F_OESPI1_OFIFO2X_TX_FRAMING_ERROR) + +/* + * PM TX interrupt handler. + */ +static void pmtx_intr_handler(struct adapter *adapter) +{ + static const struct intr_info pmtx_intr_info[] = { + {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1}, + {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1}, + {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1}, + {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR), + "PMTX ispi parity error", -1, 1}, + {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR), + "PMTX ospi parity error", -1, 1}, + {0} + }; + + if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff, + pmtx_intr_info, NULL)) + t3_fatal_err(adapter); +} + +#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \ + F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \ + F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \ + F_IESPI1_TX_FRAMING_ERROR) +#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \ + F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \ + F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \ + F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR) + +/* + * PM RX interrupt handler. + */ +static void pmrx_intr_handler(struct adapter *adapter) +{ + static const struct intr_info pmrx_intr_info[] = { + {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1}, + {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1}, + {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1}, + {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR), + "PMRX ispi parity error", -1, 1}, + {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR), + "PMRX ospi parity error", -1, 1}, + {0} + }; + + if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff, + pmrx_intr_info, NULL)) + t3_fatal_err(adapter); +} + +/* + * CPL switch interrupt handler. + */ +static void cplsw_intr_handler(struct adapter *adapter) +{ + static const struct intr_info cplsw_intr_info[] = { + {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1}, + {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1}, + {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1}, + {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1}, + {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1}, + {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1}, + {0} + }; + + if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff, + cplsw_intr_info, NULL)) + t3_fatal_err(adapter); +} + +/* + * MPS interrupt handler. + */ +static void mps_intr_handler(struct adapter *adapter) +{ + static const struct intr_info mps_intr_info[] = { + {0x1ff, "MPS parity error", -1, 1}, + {0} + }; + + if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff, + mps_intr_info, NULL)) + t3_fatal_err(adapter); +} + +#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE) + +/* + * MC7 interrupt handler. + */ +static void mc7_intr_handler(struct mc7 *mc7) +{ + struct adapter *adapter = mc7->adapter; + u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE); + + if (cause & F_CE) { + mc7->stats.corr_err++; + CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, " + "data 0x%x 0x%x 0x%x\n", mc7->name, + t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR), + t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0), + t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1), + t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2)); + } + + if (cause & F_UE) { + mc7->stats.uncorr_err++; + CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, " + "data 0x%x 0x%x 0x%x\n", mc7->name, + t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR), + t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0), + t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1), + t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2)); + } + + if (G_PE(cause)) { + mc7->stats.parity_err++; + CH_ALERT(adapter, "%s MC7 parity error 0x%x\n", + mc7->name, G_PE(cause)); + } + + if (cause & F_AE) { + u32 addr = 0; + + if (adapter->params.rev > 0) + addr = t3_read_reg(adapter, + mc7->offset + A_MC7_ERR_ADDR); + mc7->stats.addr_err++; + CH_ALERT(adapter, "%s MC7 address error: 0x%x\n", + mc7->name, addr); + } + + if (cause & MC7_INTR_FATAL) + t3_fatal_err(adapter); + + t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause); +} + +#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \ + V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) +/* + * XGMAC interrupt handler. + */ +static int mac_intr_handler(struct adapter *adap, unsigned int idx) +{ + struct cmac *mac = &adap2pinfo(adap, idx)->mac; + /* + * We mask out interrupt causes for which we're not taking interrupts. + * This allows us to use polling logic to monitor some of the other + * conditions when taking interrupts would impose too much load on the + * system. + */ + u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) & + ~F_RXFIFO_OVERFLOW; + + if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) { + mac->stats.tx_fifo_parity_err++; + CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx); + } + if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) { + mac->stats.rx_fifo_parity_err++; + CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx); + } + if (cause & F_TXFIFO_UNDERRUN) + mac->stats.tx_fifo_urun++; + if (cause & F_RXFIFO_OVERFLOW) + mac->stats.rx_fifo_ovfl++; + if (cause & V_SERDES_LOS(M_SERDES_LOS)) + mac->stats.serdes_signal_loss++; + if (cause & F_XAUIPCSCTCERR) + mac->stats.xaui_pcs_ctc_err++; + if (cause & F_XAUIPCSALIGNCHANGE) + mac->stats.xaui_pcs_align_change++; + if (cause & F_XGM_INT) { + t3_set_reg_field(adap, + A_XGM_INT_ENABLE + mac->offset, + F_XGM_INT, 0); + mac->stats.link_faults++; + + t3_os_link_fault_handler(adap, idx); + } + + if (cause & XGM_INTR_FATAL) + t3_fatal_err(adap); + + t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause); + return cause != 0; +} + +/* + * Interrupt handler for PHY events. + */ +int t3_phy_intr_handler(struct adapter *adapter) +{ + u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE); + + for_each_port(adapter, i) { + struct port_info *p = adap2pinfo(adapter, i); + + if (!(p->phy.caps & SUPPORTED_IRQ)) + continue; + + if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) { + int phy_cause = p->phy.ops->intr_handler(&p->phy); + + if (phy_cause & cphy_cause_link_change) + t3_link_changed(adapter, i); + if (phy_cause & cphy_cause_fifo_error) + p->phy.fifo_errors++; + if (phy_cause & cphy_cause_module_change) + t3_os_phymod_changed(adapter, i); + } + } + + t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause); + return 0; +} + +/* + * T3 slow path (non-data) interrupt handler. + */ +int t3_slow_intr_handler(struct adapter *adapter) +{ + u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0); + + cause &= adapter->slow_intr_mask; + if (!cause) + return 0; + if (cause & F_PCIM0) { + if (is_pcie(adapter)) + pcie_intr_handler(adapter); + else + pci_intr_handler(adapter); + } + if (cause & F_SGE3) + t3_sge_err_intr_handler(adapter); + if (cause & F_MC7_PMRX) + mc7_intr_handler(&adapter->pmrx); + if (cause & F_MC7_PMTX) + mc7_intr_handler(&adapter->pmtx); + if (cause & F_MC7_CM) + mc7_intr_handler(&adapter->cm); + if (cause & F_CIM) + cim_intr_handler(adapter); + if (cause & F_TP1) + tp_intr_handler(adapter); + if (cause & F_ULP2_RX) + ulprx_intr_handler(adapter); + if (cause & F_ULP2_TX) + ulptx_intr_handler(adapter); + if (cause & F_PM1_RX) + pmrx_intr_handler(adapter); + if (cause & F_PM1_TX) + pmtx_intr_handler(adapter); + if (cause & F_CPL_SWITCH) + cplsw_intr_handler(adapter); + if (cause & F_MPS0) + mps_intr_handler(adapter); + if (cause & F_MC5A) + t3_mc5_intr_handler(&adapter->mc5); + if (cause & F_XGMAC0_0) + mac_intr_handler(adapter, 0); + if (cause & F_XGMAC0_1) + mac_intr_handler(adapter, 1); + if (cause & F_T3DBG) + t3_os_ext_intr_handler(adapter); + + /* Clear the interrupts just processed. */ + t3_write_reg(adapter, A_PL_INT_CAUSE0, cause); + t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */ + return 1; +} + +static unsigned int calc_gpio_intr(struct adapter *adap) +{ + unsigned int i, gpi_intr = 0; + + for_each_port(adap, i) + if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) && + adapter_info(adap)->gpio_intr[i]) + gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i]; + return gpi_intr; +} + +/** + * t3_intr_enable - enable interrupts + * @adapter: the adapter whose interrupts should be enabled + * + * Enable interrupts by setting the interrupt enable registers of the + * various HW modules and then enabling the top-level interrupt + * concentrator. + */ +void t3_intr_enable(struct adapter *adapter) +{ + static const struct addr_val_pair intr_en_avp[] = { + {A_SG_INT_ENABLE, SGE_INTR_MASK}, + {A_MC7_INT_ENABLE, MC7_INTR_MASK}, + {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR, + MC7_INTR_MASK}, + {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR, + MC7_INTR_MASK}, + {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK}, + {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK}, + {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK}, + {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK}, + {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK}, + {A_MPS_INT_ENABLE, MPS_INTR_MASK}, + }; + + adapter->slow_intr_mask = PL_INTR_MASK; + + t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0); + t3_write_reg(adapter, A_TP_INT_ENABLE, + adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff); + + if (adapter->params.rev > 0) { + t3_write_reg(adapter, A_CPL_INTR_ENABLE, + CPLSW_INTR_MASK | F_CIM_OVFL_ERROR); + t3_write_reg(adapter, A_ULPTX_INT_ENABLE, + ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 | + F_PBL_BOUND_ERR_CH1); + } else { + t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK); + t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK); + } + + t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter)); + + if (is_pcie(adapter)) + t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK); + else + t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK); + t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask); + t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */ +} + +/** + * t3_intr_disable - disable a card's interrupts + * @adapter: the adapter whose interrupts should be disabled + * + * Disable interrupts. We only disable the top-level interrupt + * concentrator and the SGE data interrupts. + */ +void t3_intr_disable(struct adapter *adapter) +{ + t3_write_reg(adapter, A_PL_INT_ENABLE0, 0); + t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */ + adapter->slow_intr_mask = 0; +} + +/** + * t3_intr_clear - clear all interrupts + * @adapter: the adapter whose interrupts should be cleared + * + * Clears all interrupts. + */ +void t3_intr_clear(struct adapter *adapter) +{ + static const unsigned int cause_reg_addr[] = { + A_SG_INT_CAUSE, + A_SG_RSPQ_FL_STATUS, + A_PCIX_INT_CAUSE, + A_MC7_INT_CAUSE, + A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR, + A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR, + A_CIM_HOST_INT_CAUSE, + A_TP_INT_CAUSE, + A_MC5_DB_INT_CAUSE, + A_ULPRX_INT_CAUSE, + A_ULPTX_INT_CAUSE, + A_CPL_INTR_CAUSE, + A_PM1_TX_INT_CAUSE, + A_PM1_RX_INT_CAUSE, + A_MPS_INT_CAUSE, + A_T3DBG_INT_CAUSE, + }; + unsigned int i; + + /* Clear PHY and MAC interrupts for each port. */ + for_each_port(adapter, i) + t3_port_intr_clear(adapter, i); + + for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i) + t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff); + + if (is_pcie(adapter)) + t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff); + t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff); + t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */ +} + +void t3_xgm_intr_enable(struct adapter *adapter, int idx) +{ + struct port_info *pi = adap2pinfo(adapter, idx); + + t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset, + XGM_EXTRA_INTR_MASK); +} + +void t3_xgm_intr_disable(struct adapter *adapter, int idx) +{ + struct port_info *pi = adap2pinfo(adapter, idx); + + t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset, + 0x7ff); +} + +/** + * t3_port_intr_enable - enable port-specific interrupts + * @adapter: associated adapter + * @idx: index of port whose interrupts should be enabled + * + * Enable port-specific (i.e., MAC and PHY) interrupts for the given + * adapter port. + */ +void t3_port_intr_enable(struct adapter *adapter, int idx) +{ + struct cphy *phy = &adap2pinfo(adapter, idx)->phy; + + t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK); + t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */ + phy->ops->intr_enable(phy); +} + +/** + * t3_port_intr_disable - disable port-specific interrupts + * @adapter: associated adapter + * @idx: index of port whose interrupts should be disabled + * + * Disable port-specific (i.e., MAC and PHY) interrupts for the given + * adapter port. + */ +void t3_port_intr_disable(struct adapter *adapter, int idx) +{ + struct cphy *phy = &adap2pinfo(adapter, idx)->phy; + + t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0); + t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */ + phy->ops->intr_disable(phy); +} + +/** + * t3_port_intr_clear - clear port-specific interrupts + * @adapter: associated adapter + * @idx: index of port whose interrupts to clear + * + * Clear port-specific (i.e., MAC and PHY) interrupts for the given + * adapter port. + */ +static void t3_port_intr_clear(struct adapter *adapter, int idx) +{ + struct cphy *phy = &adap2pinfo(adapter, idx)->phy; + + t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff); + t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */ + phy->ops->intr_clear(phy); +} + +#define SG_CONTEXT_CMD_ATTEMPTS 100 + +/** + * t3_sge_write_context - write an SGE context + * @adapter: the adapter + * @id: the context id + * @type: the context type + * + * Program an SGE context with the values already loaded in the + * CONTEXT_DATA? registers. + */ +static int t3_sge_write_context(struct adapter *adapter, unsigned int id, + unsigned int type) +{ + if (type == F_RESPONSEQ) { + /* + * Can't write the Response Queue Context bits for + * Interrupt Armed or the Reserve bits after the chip + * has been initialized out of reset. Writing to these + * bits can confuse the hardware. + */ + t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff); + t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff); + t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff); + t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff); + } else { + t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff); + t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff); + t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff); + t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff); + } + t3_write_reg(adapter, A_SG_CONTEXT_CMD, + V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id)); + return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, + 0, SG_CONTEXT_CMD_ATTEMPTS, 1); +} + +/** + * clear_sge_ctxt - completely clear an SGE context + * @adapter: the adapter + * @id: the context id + * @type: the context type + * + * Completely clear an SGE context. Used predominantly at post-reset + * initialization. Note in particular that we don't skip writing to any + * "sensitive bits" in the contexts the way that t3_sge_write_context() + * does ... + */ +static int clear_sge_ctxt(struct adapter *adap, unsigned int id, + unsigned int type) +{ + t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0); + t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0); + t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0); + t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0); + t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff); + t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff); + t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff); + t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff); + t3_write_reg(adap, A_SG_CONTEXT_CMD, + V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id)); + return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, + 0, SG_CONTEXT_CMD_ATTEMPTS, 1); +} + +/** + * t3_sge_init_ecntxt - initialize an SGE egress context + * @adapter: the adapter to configure + * @id: the context id + * @gts_enable: whether to enable GTS for the context + * @type: the egress context type + * @respq: associated response queue + * @base_addr: base address of queue + * @size: number of queue entries + * @token: uP token + * @gen: initial generation value for the context + * @cidx: consumer pointer + * + * Initialize an SGE egress context and make it ready for use. If the + * platform allows concurrent context operations, the caller is + * responsible for appropriate locking. + */ +int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable, + enum sge_context_type type, int respq, u64 base_addr, + unsigned int size, unsigned int token, int gen, + unsigned int cidx) +{ + unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM; + + if (base_addr & 0xfff) /* must be 4K aligned */ + return -EINVAL; + if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) + return -EBUSY; + + base_addr >>= 12; + t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) | + V_EC_CREDITS(credits) | V_EC_GTS(gts_enable)); + t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) | + V_EC_BASE_LO(base_addr & 0xffff)); + base_addr >>= 16; + t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr); + base_addr >>= 32; + t3_write_reg(adapter, A_SG_CONTEXT_DATA3, + V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) | + V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) | + F_EC_VALID); + return t3_sge_write_context(adapter, id, F_EGRESS); +} + +/** + * t3_sge_init_flcntxt - initialize an SGE free-buffer list context + * @adapter: the adapter to configure + * @id: the context id + * @gts_enable: whether to enable GTS for the context + * @base_addr: base address of queue + * @size: number of queue entries + * @bsize: size of each buffer for this queue + * @cong_thres: threshold to signal congestion to upstream producers + * @gen: initial generation value for the context + * @cidx: consumer pointer + * + * Initialize an SGE free list context and make it ready for use. The + * caller is responsible for ensuring only one context operation occurs + * at a time. + */ +int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id, + int gts_enable, u64 base_addr, unsigned int size, + unsigned int bsize, unsigned int cong_thres, int gen, + unsigned int cidx) +{ + if (base_addr & 0xfff) /* must be 4K aligned */ + return -EINVAL; + if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) + return -EBUSY; + + base_addr >>= 12; + t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr); + base_addr >>= 32; + t3_write_reg(adapter, A_SG_CONTEXT_DATA1, + V_FL_BASE_HI((u32) base_addr) | + V_FL_INDEX_LO(cidx & M_FL_INDEX_LO)); + t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) | + V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) | + V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO)); + t3_write_reg(adapter, A_SG_CONTEXT_DATA3, + V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) | + V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable)); + return t3_sge_write_context(adapter, id, F_FREELIST); +} + +/** + * t3_sge_init_rspcntxt - initialize an SGE response queue context + * @adapter: the adapter to configure + * @id: the context id + * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ + * @base_addr: base address of queue + * @size: number of queue entries + * @fl_thres: threshold for selecting the normal or jumbo free list + * @gen: initial generation value for the context + * @cidx: consumer pointer + * + * Initialize an SGE response queue context and make it ready for use. + * The caller is responsible for ensuring only one context operation + * occurs at a time. + */ +int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id, + int irq_vec_idx, u64 base_addr, unsigned int size, + unsigned int fl_thres, int gen, unsigned int cidx) +{ + unsigned int intr = 0; + + if (base_addr & 0xfff) /* must be 4K aligned */ + return -EINVAL; + if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) + return -EBUSY; + + base_addr >>= 12; + t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) | + V_CQ_INDEX(cidx)); + t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr); + base_addr >>= 32; + if (irq_vec_idx >= 0) + intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN; + t3_write_reg(adapter, A_SG_CONTEXT_DATA2, + V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen)); + t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres); + return t3_sge_write_context(adapter, id, F_RESPONSEQ); +} + +/** + * t3_sge_init_cqcntxt - initialize an SGE completion queue context + * @adapter: the adapter to configure + * @id: the context id + * @base_addr: base address of queue + * @size: number of queue entries + * @rspq: response queue for async notifications + * @ovfl_mode: CQ overflow mode + * @credits: completion queue credits + * @credit_thres: the credit threshold + * + * Initialize an SGE completion queue context and make it ready for use. + * The caller is responsible for ensuring only one context operation + * occurs at a time. + */ +int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr, + unsigned int size, int rspq, int ovfl_mode, + unsigned int credits, unsigned int credit_thres) +{ + if (base_addr & 0xfff) /* must be 4K aligned */ + return -EINVAL; + if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) + return -EBUSY; + + base_addr >>= 12; + t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size)); + t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr); + base_addr >>= 32; + t3_write_reg(adapter, A_SG_CONTEXT_DATA2, + V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) | + V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) | + V_CQ_ERR(ovfl_mode)); + t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) | + V_CQ_CREDIT_THRES(credit_thres)); + return t3_sge_write_context(adapter, id, F_CQ); +} + +/** + * t3_sge_enable_ecntxt - enable/disable an SGE egress context + * @adapter: the adapter + * @id: the egress context id + * @enable: enable (1) or disable (0) the context + * + * Enable or disable an SGE egress context. The caller is responsible for + * ensuring only one context operation occurs at a time. + */ +int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable) +{ + if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) + return -EBUSY; + + t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0); + t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0); + t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0); + t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID); + t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable)); + t3_write_reg(adapter, A_SG_CONTEXT_CMD, + V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id)); + return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, + 0, SG_CONTEXT_CMD_ATTEMPTS, 1); +} + +/** + * t3_sge_disable_fl - disable an SGE free-buffer list + * @adapter: the adapter + * @id: the free list context id + * + * Disable an SGE free-buffer list. The caller is responsible for + * ensuring only one context operation occurs at a time. + */ +int t3_sge_disable_fl(struct adapter *adapter, unsigned int id) +{ + if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) + return -EBUSY; + + t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0); + t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0); + t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE)); + t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0); + t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0); + t3_write_reg(adapter, A_SG_CONTEXT_CMD, + V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id)); + return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, + 0, SG_CONTEXT_CMD_ATTEMPTS, 1); +} + +/** + * t3_sge_disable_rspcntxt - disable an SGE response queue + * @adapter: the adapter + * @id: the response queue context id + * + * Disable an SGE response queue. The caller is responsible for + * ensuring only one context operation occurs at a time. + */ +int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id) +{ + if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) + return -EBUSY; + + t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE)); + t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0); + t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0); + t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0); + t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0); + t3_write_reg(adapter, A_SG_CONTEXT_CMD, + V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id)); + return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, + 0, SG_CONTEXT_CMD_ATTEMPTS, 1); +} + +/** + * t3_sge_disable_cqcntxt - disable an SGE completion queue + * @adapter: the adapter + * @id: the completion queue context id + * + * Disable an SGE completion queue. The caller is responsible for + * ensuring only one context operation occurs at a time. + */ +int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id) +{ + if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) + return -EBUSY; + + t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE)); + t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0); + t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0); + t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0); + t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0); + t3_write_reg(adapter, A_SG_CONTEXT_CMD, + V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id)); + return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, + 0, SG_CONTEXT_CMD_ATTEMPTS, 1); +} + +/** + * t3_sge_cqcntxt_op - perform an operation on a completion queue context + * @adapter: the adapter + * @id: the context id + * @op: the operation to perform + * + * Perform the selected operation on an SGE completion queue context. + * The caller is responsible for ensuring only one context operation + * occurs at a time. + */ +int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op, + unsigned int credits) +{ + u32 val; + + if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) + return -EBUSY; + + t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16); + t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) | + V_CONTEXT(id) | F_CQ); + if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, + 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val)) + return -EIO; + + if (op >= 2 && op < 7) { + if (adapter->params.rev > 0) + return G_CQ_INDEX(val); + + t3_write_reg(adapter, A_SG_CONTEXT_CMD, + V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id)); + if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, + F_CONTEXT_CMD_BUSY, 0, + SG_CONTEXT_CMD_ATTEMPTS, 1)) + return -EIO; + return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0)); + } + return 0; +} + +/** + * t3_config_rss - configure Rx packet steering + * @adapter: the adapter + * @rss_config: RSS settings (written to TP_RSS_CONFIG) + * @cpus: values for the CPU lookup table (0xff terminated) + * @rspq: values for the response queue lookup table (0xffff terminated) + * + * Programs the receive packet steering logic. @cpus and @rspq provide + * the values for the CPU and response queue lookup tables. If they + * provide fewer values than the size of the tables the supplied values + * are used repeatedly until the tables are fully populated. + */ +void t3_config_rss(struct adapter *adapter, unsigned int rss_config, + const u8 * cpus, const u16 *rspq) +{ + int i, j, cpu_idx = 0, q_idx = 0; + + if (cpus) + for (i = 0; i < RSS_TABLE_SIZE; ++i) { + u32 val = i << 16; + + for (j = 0; j < 2; ++j) { + val |= (cpus[cpu_idx++] & 0x3f) << (8 * j); + if (cpus[cpu_idx] == 0xff) + cpu_idx = 0; + } + t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val); + } + + if (rspq) + for (i = 0; i < RSS_TABLE_SIZE; ++i) { + t3_write_reg(adapter, A_TP_RSS_MAP_TABLE, + (i << 16) | rspq[q_idx++]); + if (rspq[q_idx] == 0xffff) + q_idx = 0; + } + + t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config); +} + +/** + * t3_tp_set_offload_mode - put TP in NIC/offload mode + * @adap: the adapter + * @enable: 1 to select offload mode, 0 for regular NIC + * + * Switches TP to NIC/offload mode. + */ +void t3_tp_set_offload_mode(struct adapter *adap, int enable) +{ + if (is_offload(adap) || !enable) + t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE, + V_NICMODE(!enable)); +} + +/** + * pm_num_pages - calculate the number of pages of the payload memory + * @mem_size: the size of the payload memory + * @pg_size: the size of each payload memory page + * + * Calculate the number of pages, each of the given size, that fit in a + * memory of the specified size, respecting the HW requirement that the + * number of pages must be a multiple of 24. + */ +static inline unsigned int pm_num_pages(unsigned int mem_size, + unsigned int pg_size) +{ + unsigned int n = mem_size / pg_size; + + return n - n % 24; +} + +#define mem_region(adap, start, size, reg) \ + t3_write_reg((adap), A_ ## reg, (start)); \ + start += size + +/** + * partition_mem - partition memory and configure TP memory settings + * @adap: the adapter + * @p: the TP parameters + * + * Partitions context and payload memory and configures TP's memory + * registers. + */ +static void partition_mem(struct adapter *adap, const struct tp_params *p) +{ + unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5); + unsigned int timers = 0, timers_shift = 22; + + if (adap->params.rev > 0) { + if (tids <= 16 * 1024) { + timers = 1; + timers_shift = 16; + } else if (tids <= 64 * 1024) { + timers = 2; + timers_shift = 18; + } else if (tids <= 256 * 1024) { + timers = 3; + timers_shift = 20; + } + } + + t3_write_reg(adap, A_TP_PMM_SIZE, + p->chan_rx_size | (p->chan_tx_size >> 16)); + + t3_write_reg(adap, A_TP_PMM_TX_BASE, 0); + t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size); + t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs); + t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX), + V_TXDATAACKIDX(fls(p->tx_pg_size) - 12)); + + t3_write_reg(adap, A_TP_PMM_RX_BASE, 0); + t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size); + t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs); + + pstructs = p->rx_num_pgs + p->tx_num_pgs; + /* Add a bit of headroom and make multiple of 24 */ + pstructs += 48; + pstructs -= pstructs % 24; + t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs); + + m = tids * TCB_SIZE; + mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR); + mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR); + t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m); + m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22); + mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE); + mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE); + mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE); + mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE); + + m = (m + 4095) & ~0xfff; + t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m); + t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m); + + tids = (p->cm_size - m - (3 << 20)) / 3072 - 32; + m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers - + adap->params.mc5.nfilters - adap->params.mc5.nroutes; + if (tids < m) + adap->params.mc5.nservers += m - tids; +} + +static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr, + u32 val) +{ + t3_write_reg(adap, A_TP_PIO_ADDR, addr); + t3_write_reg(adap, A_TP_PIO_DATA, val); +} + +static void tp_config(struct adapter *adap, const struct tp_params *p) +{ + t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU | + F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD | + F_TCPCHECKSUMOFFLOAD | V_IPTTL(64)); + t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) | + F_MTUENABLE | V_WINDOWSCALEMODE(1) | + V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1)); + t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) | + V_AUTOSTATE2(1) | V_AUTOSTATE1(0) | + V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) | + F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1)); + t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO, + F_IPV6ENABLE | F_NICMODE); + t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814); + t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105); + t3_set_reg_field(adap, A_TP_PARA_REG6, 0, + adap->params.rev > 0 ? F_ENABLEESND : + F_T3A_ENABLEESND); + + t3_set_reg_field(adap, A_TP_PC_CONFIG, + F_ENABLEEPCMDAFULL, + F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK | + F_TXCONGESTIONMODE | F_RXCONGESTIONMODE); + t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, + F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN | + F_ENABLEARPMISS | F_DISBLEDAPARBIT0); + t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080); + t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000); + + if (adap->params.rev > 0) { + tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE); + t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO, + F_TXPACEAUTO); + t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID); + t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT); + } else + t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED); + + if (adap->params.rev == T3_REV_C) + t3_set_reg_field(adap, A_TP_PC_CONFIG, + V_TABLELATENCYDELTA(M_TABLELATENCYDELTA), + V_TABLELATENCYDELTA(4)); + + t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0); + t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0); + t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0); + t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000); +} + +/* Desired TP timer resolution in usec */ +#define TP_TMR_RES 50 + +/* TCP timer values in ms */ +#define TP_DACK_TIMER 50 +#define TP_RTO_MIN 250 + +/** + * tp_set_timers - set TP timing parameters + * @adap: the adapter to set + * @core_clk: the core clock frequency in Hz + * + * Set TP's timing parameters, such as the various timer resolutions and + * the TCP timer values. + */ +static void tp_set_timers(struct adapter *adap, unsigned int core_clk) +{ + unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1; + unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */ + unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */ + unsigned int tps = core_clk >> tre; + + t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) | + V_DELAYEDACKRESOLUTION(dack_re) | + V_TIMESTAMPRESOLUTION(tstamp_re)); + t3_write_reg(adap, A_TP_DACK_TIMER, + (core_clk >> dack_re) / (1000 / TP_DACK_TIMER)); + t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100); + t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504); + t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908); + t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c); + t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) | + V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) | + V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) | + V_KEEPALIVEMAX(9)); + +#define SECONDS * tps + + t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS); + t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN)); + t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS); + t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS); + t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS); + t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS); + t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS); + t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS); + t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS); + +#undef SECONDS +} + +/** + * t3_tp_set_coalescing_size - set receive coalescing size + * @adap: the adapter + * @size: the receive coalescing size + * @psh: whether a set PSH bit should deliver coalesced data + * + * Set the receive coalescing size and PSH bit handling. + */ +static int t3_tp_set_coalescing_size(struct adapter *adap, + unsigned int size, int psh) +{ + u32 val; + + if (size > MAX_RX_COALESCING_LEN) + return -EINVAL; + + val = t3_read_reg(adap, A_TP_PARA_REG3); + val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN); + + if (size) { + val |= F_RXCOALESCEENABLE; + if (psh) + val |= F_RXCOALESCEPSHEN; + size = min(MAX_RX_COALESCING_LEN, size); + t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) | + V_MAXRXDATA(MAX_RX_COALESCING_LEN)); + } + t3_write_reg(adap, A_TP_PARA_REG3, val); + return 0; +} + +/** + * t3_tp_set_max_rxsize - set the max receive size + * @adap: the adapter + * @size: the max receive size + * + * Set TP's max receive size. This is the limit that applies when + * receive coalescing is disabled. + */ +static void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size) +{ + t3_write_reg(adap, A_TP_PARA_REG7, + V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size)); +} + +static void init_mtus(unsigned short mtus[]) +{ + /* + * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so + * it can accommodate max size TCP/IP headers when SACK and timestamps + * are enabled and still have at least 8 bytes of payload. + */ + mtus[0] = 88; + mtus[1] = 88; + mtus[2] = 256; + mtus[3] = 512; + mtus[4] = 576; + mtus[5] = 1024; + mtus[6] = 1280; + mtus[7] = 1492; + mtus[8] = 1500; + mtus[9] = 2002; + mtus[10] = 2048; + mtus[11] = 4096; + mtus[12] = 4352; + mtus[13] = 8192; + mtus[14] = 9000; + mtus[15] = 9600; +} + +/* + * Initial congestion control parameters. + */ +static void init_cong_ctrl(unsigned short *a, unsigned short *b) +{ + a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; + a[9] = 2; + a[10] = 3; + a[11] = 4; + a[12] = 5; + a[13] = 6; + a[14] = 7; + a[15] = 8; + a[16] = 9; + a[17] = 10; + a[18] = 14; + a[19] = 17; + a[20] = 21; + a[21] = 25; + a[22] = 30; + a[23] = 35; + a[24] = 45; + a[25] = 60; + a[26] = 80; + a[27] = 100; + a[28] = 200; + a[29] = 300; + a[30] = 400; + a[31] = 500; + + b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; + b[9] = b[10] = 1; + b[11] = b[12] = 2; + b[13] = b[14] = b[15] = b[16] = 3; + b[17] = b[18] = b[19] = b[20] = b[21] = 4; + b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; + b[28] = b[29] = 6; + b[30] = b[31] = 7; +} + +/* The minimum additive increment value for the congestion control table */ +#define CC_MIN_INCR 2U + +/** + * t3_load_mtus - write the MTU and congestion control HW tables + * @adap: the adapter + * @mtus: the unrestricted values for the MTU table + * @alphs: the values for the congestion control alpha parameter + * @beta: the values for the congestion control beta parameter + * @mtu_cap: the maximum permitted effective MTU + * + * Write the MTU table with the supplied MTUs capping each at &mtu_cap. + * Update the high-speed congestion control table with the supplied alpha, + * beta, and MTUs. + */ +void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS], + unsigned short alpha[NCCTRL_WIN], + unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap) +{ + static const unsigned int avg_pkts[NCCTRL_WIN] = { + 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, + 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, + 28672, 40960, 57344, 81920, 114688, 163840, 229376 + }; + + unsigned int i, w; + + for (i = 0; i < NMTUS; ++i) { + unsigned int mtu = min(mtus[i], mtu_cap); + unsigned int log2 = fls(mtu); + + if (!(mtu & ((1 << log2) >> 2))) /* round */ + log2--; + t3_write_reg(adap, A_TP_MTU_TABLE, + (i << 24) | (log2 << 16) | mtu); + + for (w = 0; w < NCCTRL_WIN; ++w) { + unsigned int inc; + + inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], + CC_MIN_INCR); + + t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) | + (w << 16) | (beta[w] << 13) | inc); + } + } +} + +/** + * t3_tp_get_mib_stats - read TP's MIB counters + * @adap: the adapter + * @tps: holds the returned counter values + * + * Returns the values of TP's MIB counters. + */ +void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps) +{ + t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps, + sizeof(*tps) / sizeof(u32), 0); +} + +#define ulp_region(adap, name, start, len) \ + t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \ + t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \ + (start) + (len) - 1); \ + start += len + +#define ulptx_region(adap, name, start, len) \ + t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \ + t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \ + (start) + (len) - 1) + +static void ulp_config(struct adapter *adap, const struct tp_params *p) +{ + unsigned int m = p->chan_rx_size; + + ulp_region(adap, ISCSI, m, p->chan_rx_size / 8); + ulp_region(adap, TDDP, m, p->chan_rx_size / 8); + ulptx_region(adap, TPT, m, p->chan_rx_size / 4); + ulp_region(adap, STAG, m, p->chan_rx_size / 4); + ulp_region(adap, RQ, m, p->chan_rx_size / 4); + ulptx_region(adap, PBL, m, p->chan_rx_size / 4); + ulp_region(adap, PBL, m, p->chan_rx_size / 4); + t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff); +} + +/** + * t3_set_proto_sram - set the contents of the protocol sram + * @adapter: the adapter + * @data: the protocol image + * + * Write the contents of the protocol SRAM. + */ +int t3_set_proto_sram(struct adapter *adap, const u8 *data) +{ + int i; + const __be32 *buf = (const __be32 *)data; + + for (i = 0; i < PROTO_SRAM_LINES; i++) { + t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++)); + t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++)); + t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++)); + t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++)); + t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++)); + + t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31); + if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1)) + return -EIO; + } + t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0); + + return 0; +} + +void t3_config_trace_filter(struct adapter *adapter, + const struct trace_params *tp, int filter_index, + int invert, int enable) +{ + u32 addr, key[4], mask[4]; + + key[0] = tp->sport | (tp->sip << 16); + key[1] = (tp->sip >> 16) | (tp->dport << 16); + key[2] = tp->dip; + key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20); + + mask[0] = tp->sport_mask | (tp->sip_mask << 16); + mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16); + mask[2] = tp->dip_mask; + mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20); + + if (invert) + key[3] |= (1 << 29); + if (enable) + key[3] |= (1 << 28); + + addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0; + tp_wr_indirect(adapter, addr++, key[0]); + tp_wr_indirect(adapter, addr++, mask[0]); + tp_wr_indirect(adapter, addr++, key[1]); + tp_wr_indirect(adapter, addr++, mask[1]); + tp_wr_indirect(adapter, addr++, key[2]); + tp_wr_indirect(adapter, addr++, mask[2]); + tp_wr_indirect(adapter, addr++, key[3]); + tp_wr_indirect(adapter, addr, mask[3]); + t3_read_reg(adapter, A_TP_PIO_DATA); +} + +/** + * t3_config_sched - configure a HW traffic scheduler + * @adap: the adapter + * @kbps: target rate in Kbps + * @sched: the scheduler index + * + * Configure a HW scheduler for the target rate + */ +int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched) +{ + unsigned int v, tps, cpt, bpt, delta, mindelta = ~0; + unsigned int clk = adap->params.vpd.cclk * 1000; + unsigned int selected_cpt = 0, selected_bpt = 0; + + if (kbps > 0) { + kbps *= 125; /* -> bytes */ + for (cpt = 1; cpt <= 255; cpt++) { + tps = clk / cpt; + bpt = (kbps + tps / 2) / tps; + if (bpt > 0 && bpt <= 255) { + v = bpt * tps; + delta = v >= kbps ? v - kbps : kbps - v; + if (delta <= mindelta) { + mindelta = delta; + selected_cpt = cpt; + selected_bpt = bpt; + } + } else if (selected_cpt) + break; + } + if (!selected_cpt) + return -EINVAL; + } + t3_write_reg(adap, A_TP_TM_PIO_ADDR, + A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2); + v = t3_read_reg(adap, A_TP_TM_PIO_DATA); + if (sched & 1) + v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24); + else + v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8); + t3_write_reg(adap, A_TP_TM_PIO_DATA, v); + return 0; +} + +static int tp_init(struct adapter *adap, const struct tp_params *p) +{ + int busy = 0; + + tp_config(adap, p); + t3_set_vlan_accel(adap, 3, 0); + + if (is_offload(adap)) { + tp_set_timers(adap, adap->params.vpd.cclk * 1000); + t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE); + busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE, + 0, 1000, 5); + if (busy) + CH_ERR(adap, "TP initialization timed out\n"); + } + + if (!busy) + t3_write_reg(adap, A_TP_RESET, F_TPRESET); + return busy; +} + +/* + * Perform the bits of HW initialization that are dependent on the Tx + * channels being used. + */ +static void chan_init_hw(struct adapter *adap, unsigned int chan_map) +{ + int i; + + if (chan_map != 3) { /* one channel */ + t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0); + t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0); + t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT | + (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE : + F_TPTXPORT1EN | F_PORT1ACTIVE)); + t3_write_reg(adap, A_PM1_TX_CFG, + chan_map == 1 ? 0xffffffff : 0); + } else { /* two channels */ + t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN); + t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB); + t3_write_reg(adap, A_ULPTX_DMA_WEIGHT, + V_D1_WEIGHT(16) | V_D0_WEIGHT(16)); + t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN | + F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE | + F_ENFORCEPKT); + t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000); + t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE); + t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP, + V_TX_MOD_QUEUE_REQ_MAP(0xaa)); + for (i = 0; i < 16; i++) + t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, + (i << 16) | 0x1010); + } +} + +static int calibrate_xgm(struct adapter *adapter) +{ + if (uses_xaui(adapter)) { + unsigned int v, i; + + for (i = 0; i < 5; ++i) { + t3_write_reg(adapter, A_XGM_XAUI_IMP, 0); + t3_read_reg(adapter, A_XGM_XAUI_IMP); + msleep(1); + v = t3_read_reg(adapter, A_XGM_XAUI_IMP); + if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) { + t3_write_reg(adapter, A_XGM_XAUI_IMP, + V_XAUIIMP(G_CALIMP(v) >> 2)); + return 0; + } + } + CH_ERR(adapter, "MAC calibration failed\n"); + return -1; + } else { + t3_write_reg(adapter, A_XGM_RGMII_IMP, + V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3)); + t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE, + F_XGM_IMPSETUPDATE); + } + return 0; +} + +static void calibrate_xgm_t3b(struct adapter *adapter) +{ + if (!uses_xaui(adapter)) { + t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET | + F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3)); + t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0); + t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, + F_XGM_IMPSETUPDATE); + t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE, + 0); + t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0); + t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE); + } +} + +struct mc7_timing_params { + unsigned char ActToPreDly; + unsigned char ActToRdWrDly; + unsigned char PreCyc; + unsigned char RefCyc[5]; + unsigned char BkCyc; + unsigned char WrToRdDly; + unsigned char RdToWrDly; +}; + +/* + * Write a value to a register and check that the write completed. These + * writes normally complete in a cycle or two, so one read should suffice. + * The very first read exists to flush the posted write to the device. + */ +static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val) +{ + t3_write_reg(adapter, addr, val); + t3_read_reg(adapter, addr); /* flush */ + if (!(t3_read_reg(adapter, addr) & F_BUSY)) + return 0; + CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr); + return -EIO; +} + +static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type) +{ + static const unsigned int mc7_mode[] = { + 0x632, 0x642, 0x652, 0x432, 0x442 + }; + static const struct mc7_timing_params mc7_timings[] = { + {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4}, + {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4}, + {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4}, + {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4}, + {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4} + }; + + u32 val; + unsigned int width, density, slow, attempts; + struct adapter *adapter = mc7->adapter; + const struct mc7_timing_params *p = &mc7_timings[mem_type]; + + if (!mc7->size) + return 0; + + val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); + slow = val & F_SLOW; + width = G_WIDTH(val); + density = G_DEN(val); + + t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN); + val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */ + msleep(1); + + if (!slow) { + t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN); + t3_read_reg(adapter, mc7->offset + A_MC7_CAL); + msleep(1); + if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) & + (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) { + CH_ERR(adapter, "%s MC7 calibration timed out\n", + mc7->name); + goto out_fail; + } + } + + t3_write_reg(adapter, mc7->offset + A_MC7_PARM, + V_ACTTOPREDLY(p->ActToPreDly) | + V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) | + V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) | + V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly)); + + t3_write_reg(adapter, mc7->offset + A_MC7_CFG, + val | F_CLKEN | F_TERM150); + t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */ + + if (!slow) + t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB, + F_DLLENB); + udelay(1); + + val = slow ? 3 : 6; + if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) || + wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) || + wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) || + wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val)) + goto out_fail; + + if (!slow) { + t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100); + t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0); + udelay(5); + } + + if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) || + wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) || + wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) || + wrreg_wait(adapter, mc7->offset + A_MC7_MODE, + mc7_mode[mem_type]) || + wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) || + wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val)) + goto out_fail; + + /* clock value is in KHz */ + mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */ + mc7_clock /= 1000000; /* KHz->MHz, ns->us */ + + t3_write_reg(adapter, mc7->offset + A_MC7_REF, + F_PERREFEN | V_PREREFDIV(mc7_clock)); + t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */ + + t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN); + t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0); + t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0); + t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END, + (mc7->size << width) - 1); + t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1)); + t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */ + + attempts = 50; + do { + msleep(250); + val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); + } while ((val & F_BUSY) && --attempts); + if (val & F_BUSY) { + CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name); + goto out_fail; + } + + /* Enable normal memory accesses. */ + t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY); + return 0; + +out_fail: + return -1; +} + +static void config_pcie(struct adapter *adap) +{ + static const u16 ack_lat[4][6] = { + {237, 416, 559, 1071, 2095, 4143}, + {128, 217, 289, 545, 1057, 2081}, + {73, 118, 154, 282, 538, 1050}, + {67, 107, 86, 150, 278, 534} + }; + static const u16 rpl_tmr[4][6] = { + {711, 1248, 1677, 3213, 6285, 12429}, + {384, 651, 867, 1635, 3171, 6243}, + {219, 354, 462, 846, 1614, 3150}, + {201, 321, 258, 450, 834, 1602} + }; + + u16 val, devid; + unsigned int log2_width, pldsize; + unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt; + + pci_read_config_word(adap->pdev, + adap->pdev->pcie_cap + PCI_EXP_DEVCTL, + &val); + pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5; + + pci_read_config_word(adap->pdev, 0x2, &devid); + if (devid == 0x37) { + pci_write_config_word(adap->pdev, + adap->pdev->pcie_cap + PCI_EXP_DEVCTL, + val & ~PCI_EXP_DEVCTL_READRQ & + ~PCI_EXP_DEVCTL_PAYLOAD); + pldsize = 0; + } + + pci_read_config_word(adap->pdev, adap->pdev->pcie_cap + PCI_EXP_LNKCTL, + &val); + + fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0)); + fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx : + G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE)); + log2_width = fls(adap->params.pci.width) - 1; + acklat = ack_lat[log2_width][pldsize]; + if (val & 1) /* check LOsEnable */ + acklat += fst_trn_tx * 4; + rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4; + + if (adap->params.rev == 0) + t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, + V_T3A_ACKLAT(M_T3A_ACKLAT), + V_T3A_ACKLAT(acklat)); + else + t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT), + V_ACKLAT(acklat)); + + t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT), + V_REPLAYLMT(rpllmt)); + + t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff); + t3_set_reg_field(adap, A_PCIE_CFG, 0, + F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST | + F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN); +} + +/* + * Initialize and configure T3 HW modules. This performs the + * initialization steps that need to be done once after a card is reset. + * MAC and PHY initialization is handled separarely whenever a port is enabled. + * + * fw_params are passed to FW and their value is platform dependent. Only the + * top 8 bits are available for use, the rest must be 0. + */ +int t3_init_hw(struct adapter *adapter, u32 fw_params) +{ + int err = -EIO, attempts, i; + const struct vpd_params *vpd = &adapter->params.vpd; + + if (adapter->params.rev > 0) + calibrate_xgm_t3b(adapter); + else if (calibrate_xgm(adapter)) + goto out_err; + + if (vpd->mclk) { + partition_mem(adapter, &adapter->params.tp); + + if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) || + mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) || + mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) || + t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers, + adapter->params.mc5.nfilters, + adapter->params.mc5.nroutes)) + goto out_err; + + for (i = 0; i < 32; i++) + if (clear_sge_ctxt(adapter, i, F_CQ)) + goto out_err; + } + + if (tp_init(adapter, &adapter->params.tp)) + goto out_err; + + t3_tp_set_coalescing_size(adapter, + min(adapter->params.sge.max_pkt_size, + MAX_RX_COALESCING_LEN), 1); + t3_tp_set_max_rxsize(adapter, + min(adapter->params.sge.max_pkt_size, 16384U)); + ulp_config(adapter, &adapter->params.tp); + + if (is_pcie(adapter)) + config_pcie(adapter); + else + t3_set_reg_field(adapter, A_PCIX_CFG, 0, + F_DMASTOPEN | F_CLIDECEN); + + if (adapter->params.rev == T3_REV_C) + t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0, + F_CFG_CQE_SOP_MASK); + + t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff); + t3_write_reg(adapter, A_PM1_RX_MODE, 0); + t3_write_reg(adapter, A_PM1_TX_MODE, 0); + chan_init_hw(adapter, adapter->params.chan_map); + t3_sge_init(adapter, &adapter->params.sge); + t3_set_reg_field(adapter, A_PL_RST, 0, F_FATALPERREN); + + t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter)); + + t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params); + t3_write_reg(adapter, A_CIM_BOOT_CFG, + V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2)); + t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */ + + attempts = 100; + do { /* wait for uP to initialize */ + msleep(20); + } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts); + if (!attempts) { + CH_ERR(adapter, "uP initialization timed out\n"); + goto out_err; + } + + err = 0; +out_err: + return err; +} + +/** + * get_pci_mode - determine a card's PCI mode + * @adapter: the adapter + * @p: where to store the PCI settings + * + * Determines a card's PCI mode and associated parameters, such as speed + * and width. + */ +static void get_pci_mode(struct adapter *adapter, struct pci_params *p) +{ + static unsigned short speed_map[] = { 33, 66, 100, 133 }; + u32 pci_mode, pcie_cap; + + pcie_cap = pci_pcie_cap(adapter->pdev); + if (pcie_cap) { + u16 val; + + p->variant = PCI_VARIANT_PCIE; + pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA, + &val); + p->width = (val >> 4) & 0x3f; + return; + } + + pci_mode = t3_read_reg(adapter, A_PCIX_MODE); + p->speed = speed_map[G_PCLKRANGE(pci_mode)]; + p->width = (pci_mode & F_64BIT) ? 64 : 32; + pci_mode = G_PCIXINITPAT(pci_mode); + if (pci_mode == 0) + p->variant = PCI_VARIANT_PCI; + else if (pci_mode < 4) + p->variant = PCI_VARIANT_PCIX_MODE1_PARITY; + else if (pci_mode < 8) + p->variant = PCI_VARIANT_PCIX_MODE1_ECC; + else + p->variant = PCI_VARIANT_PCIX_266_MODE2; +} + +/** + * init_link_config - initialize a link's SW state + * @lc: structure holding the link state + * @ai: information about the current card + * + * Initializes the SW state maintained for each link, including the link's + * capabilities and default speed/duplex/flow-control/autonegotiation + * settings. + */ +static void init_link_config(struct link_config *lc, unsigned int caps) +{ + lc->supported = caps; + lc->requested_speed = lc->speed = SPEED_INVALID; + lc->requested_duplex = lc->duplex = DUPLEX_INVALID; + lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; + if (lc->supported & SUPPORTED_Autoneg) { + lc->advertising = lc->supported; + lc->autoneg = AUTONEG_ENABLE; + lc->requested_fc |= PAUSE_AUTONEG; + } else { + lc->advertising = 0; + lc->autoneg = AUTONEG_DISABLE; + } +} + +/** + * mc7_calc_size - calculate MC7 memory size + * @cfg: the MC7 configuration + * + * Calculates the size of an MC7 memory in bytes from the value of its + * configuration register. + */ +static unsigned int mc7_calc_size(u32 cfg) +{ + unsigned int width = G_WIDTH(cfg); + unsigned int banks = !!(cfg & F_BKS) + 1; + unsigned int org = !!(cfg & F_ORG) + 1; + unsigned int density = G_DEN(cfg); + unsigned int MBs = ((256 << density) * banks) / (org << width); + + return MBs << 20; +} + +static void mc7_prep(struct adapter *adapter, struct mc7 *mc7, + unsigned int base_addr, const char *name) +{ + u32 cfg; + + mc7->adapter = adapter; + mc7->name = name; + mc7->offset = base_addr - MC7_PMRX_BASE_ADDR; + cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); + mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg); + mc7->width = G_WIDTH(cfg); +} + +static void mac_prep(struct cmac *mac, struct adapter *adapter, int index) +{ + u16 devid; + + mac->adapter = adapter; + pci_read_config_word(adapter->pdev, 0x2, &devid); + + if (devid == 0x37 && !adapter->params.vpd.xauicfg[1]) + index = 0; + mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index; + mac->nucast = 1; + + if (adapter->params.rev == 0 && uses_xaui(adapter)) { + t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset, + is_10G(adapter) ? 0x2901c04 : 0x2301c04); + t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset, + F_ENRGMII, 0); + } +} + +static void early_hw_init(struct adapter *adapter, + const struct adapter_info *ai) +{ + u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2); + + mi1_init(adapter, ai); + t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */ + V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1)); + t3_write_reg(adapter, A_T3DBG_GPIO_EN, + ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL); + t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0); + t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff)); + + if (adapter->params.rev == 0 || !uses_xaui(adapter)) + val |= F_ENRGMII; + + /* Enable MAC clocks so we can access the registers */ + t3_write_reg(adapter, A_XGM_PORT_CFG, val); + t3_read_reg(adapter, A_XGM_PORT_CFG); + + val |= F_CLKDIVRESET_; + t3_write_reg(adapter, A_XGM_PORT_CFG, val); + t3_read_reg(adapter, A_XGM_PORT_CFG); + t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val); + t3_read_reg(adapter, A_XGM_PORT_CFG); +} + +/* + * Reset the adapter. + * Older PCIe cards lose their config space during reset, PCI-X + * ones don't. + */ +int t3_reset_adapter(struct adapter *adapter) +{ + int i, save_and_restore_pcie = + adapter->params.rev < T3_REV_B2 && is_pcie(adapter); + uint16_t devid = 0; + + if (save_and_restore_pcie) + pci_save_state(adapter->pdev); + t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE); + + /* + * Delay. Give Some time to device to reset fully. + * XXX The delay time should be modified. + */ + for (i = 0; i < 10; i++) { + msleep(50); + pci_read_config_word(adapter->pdev, 0x00, &devid); + if (devid == 0x1425) + break; + } + + if (devid != 0x1425) + return -1; + + if (save_and_restore_pcie) + pci_restore_state(adapter->pdev); + return 0; +} + +static int init_parity(struct adapter *adap) +{ + int i, err, addr; + + if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) + return -EBUSY; + + for (err = i = 0; !err && i < 16; i++) + err = clear_sge_ctxt(adap, i, F_EGRESS); + for (i = 0xfff0; !err && i <= 0xffff; i++) + err = clear_sge_ctxt(adap, i, F_EGRESS); + for (i = 0; !err && i < SGE_QSETS; i++) + err = clear_sge_ctxt(adap, i, F_RESPONSEQ); + if (err) + return err; + + t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0); + for (i = 0; i < 4; i++) + for (addr = 0; addr <= M_IBQDBGADDR; addr++) { + t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN | + F_IBQDBGWR | V_IBQDBGQID(i) | + V_IBQDBGADDR(addr)); + err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, + F_IBQDBGBUSY, 0, 2, 1); + if (err) + return err; + } + return 0; +} + +/* + * Initialize adapter SW state for the various HW modules, set initial values + * for some adapter tunables, take PHYs out of reset, and initialize the MDIO + * interface. + */ +int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, + int reset) +{ + int ret; + unsigned int i, j = -1; + + get_pci_mode(adapter, &adapter->params.pci); + + adapter->params.info = ai; + adapter->params.nports = ai->nports0 + ai->nports1; + adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1); + adapter->params.rev = t3_read_reg(adapter, A_PL_REV); + /* + * We used to only run the "adapter check task" once a second if + * we had PHYs which didn't support interrupts (we would check + * their link status once a second). Now we check other conditions + * in that routine which could potentially impose a very high + * interrupt load on the system. As such, we now always scan the + * adapter state once a second ... + */ + adapter->params.linkpoll_period = 10; + adapter->params.stats_update_period = is_10G(adapter) ? + MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10); + adapter->params.pci.vpd_cap_addr = + pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD); + ret = get_vpd_params(adapter, &adapter->params.vpd); + if (ret < 0) + return ret; + + if (reset && t3_reset_adapter(adapter)) + return -1; + + t3_sge_prep(adapter, &adapter->params.sge); + + if (adapter->params.vpd.mclk) { + struct tp_params *p = &adapter->params.tp; + + mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX"); + mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX"); + mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM"); + + p->nchan = adapter->params.chan_map == 3 ? 2 : 1; + p->pmrx_size = t3_mc7_size(&adapter->pmrx); + p->pmtx_size = t3_mc7_size(&adapter->pmtx); + p->cm_size = t3_mc7_size(&adapter->cm); + p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */ + p->chan_tx_size = p->pmtx_size / p->nchan; + p->rx_pg_size = 64 * 1024; + p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024; + p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size); + p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size); + p->ntimer_qs = p->cm_size >= (128 << 20) || + adapter->params.rev > 0 ? 12 : 6; + } + + adapter->params.offload = t3_mc7_size(&adapter->pmrx) && + t3_mc7_size(&adapter->pmtx) && + t3_mc7_size(&adapter->cm); + + if (is_offload(adapter)) { + adapter->params.mc5.nservers = DEFAULT_NSERVERS; + adapter->params.mc5.nfilters = adapter->params.rev > 0 ? + DEFAULT_NFILTERS : 0; + adapter->params.mc5.nroutes = 0; + t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT); + + init_mtus(adapter->params.mtus); + init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); + } + + early_hw_init(adapter, ai); + ret = init_parity(adapter); + if (ret) + return ret; + + for_each_port(adapter, i) { + u8 hw_addr[6]; + const struct port_type_info *pti; + struct port_info *p = adap2pinfo(adapter, i); + + while (!adapter->params.vpd.port_type[++j]) + ; + + pti = &port_types[adapter->params.vpd.port_type[j]]; + if (!pti->phy_prep) { + CH_ALERT(adapter, "Invalid port type index %d\n", + adapter->params.vpd.port_type[j]); + return -EINVAL; + } + + p->phy.mdio.dev = adapter->port[i]; + ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j, + ai->mdio_ops); + if (ret) + return ret; + mac_prep(&p->mac, adapter, j); + + /* + * The VPD EEPROM stores the base Ethernet address for the + * card. A port's address is derived from the base by adding + * the port's index to the base's low octet. + */ + memcpy(hw_addr, adapter->params.vpd.eth_base, 5); + hw_addr[5] = adapter->params.vpd.eth_base[5] + i; + + memcpy(adapter->port[i]->dev_addr, hw_addr, + ETH_ALEN); + memcpy(adapter->port[i]->perm_addr, hw_addr, + ETH_ALEN); + init_link_config(&p->link_config, p->phy.caps); + p->phy.ops->power_down(&p->phy, 1); + + /* + * If the PHY doesn't support interrupts for link status + * changes, schedule a scan of the adapter links at least + * once a second. + */ + if (!(p->phy.caps & SUPPORTED_IRQ) && + adapter->params.linkpoll_period > 10) + adapter->params.linkpoll_period = 10; + } + + return 0; +} + +void t3_led_ready(struct adapter *adapter) +{ + t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, + F_GPIO0_OUT_VAL); +} + +int t3_replay_prep_adapter(struct adapter *adapter) +{ + const struct adapter_info *ai = adapter->params.info; + unsigned int i, j = -1; + int ret; + + early_hw_init(adapter, ai); + ret = init_parity(adapter); + if (ret) + return ret; + + for_each_port(adapter, i) { + const struct port_type_info *pti; + struct port_info *p = adap2pinfo(adapter, i); + + while (!adapter->params.vpd.port_type[++j]) + ; + + pti = &port_types[adapter->params.vpd.port_type[j]]; + ret = pti->phy_prep(&p->phy, adapter, p->phy.mdio.prtad, NULL); + if (ret) + return ret; + p->phy.ops->power_down(&p->phy, 1); + } + +return 0; +} + diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3cdev.h b/drivers/net/ethernet/chelsio/cxgb3/t3cdev.h new file mode 100644 index 0000000..705713b --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/t3cdev.h @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2006-2008 Chelsio Communications. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _T3CDEV_H_ +#define _T3CDEV_H_ + +#include +#include +#include +#include +#include +#include + +#define T3CNAMSIZ 16 + +struct cxgb3_client; + +enum t3ctype { + T3A = 0, + T3B, + T3C, +}; + +struct t3cdev { + char name[T3CNAMSIZ]; /* T3C device name */ + enum t3ctype type; + struct list_head ofld_dev_list; /* for list linking */ + struct net_device *lldev; /* LL dev associated with T3C messages */ + struct proc_dir_entry *proc_dir; /* root of proc dir for this T3C */ + int (*send)(struct t3cdev *dev, struct sk_buff *skb); + int (*recv)(struct t3cdev *dev, struct sk_buff **skb, int n); + int (*ctl)(struct t3cdev *dev, unsigned int req, void *data); + void (*neigh_update)(struct t3cdev *dev, struct neighbour *neigh); + void *priv; /* driver private data */ + void *l2opt; /* optional layer 2 data */ + void *l3opt; /* optional layer 3 data */ + void *l4opt; /* optional layer 4 data */ + void *ulp; /* ulp stuff */ + void *ulp_iscsi; /* ulp iscsi */ +}; + +#endif /* _T3CDEV_H_ */ diff --git a/drivers/net/ethernet/chelsio/cxgb3/version.h b/drivers/net/ethernet/chelsio/cxgb3/version.h new file mode 100644 index 0000000..8bda06e --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/version.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +/* $Date: 2006/10/31 18:57:51 $ $RCSfile: version.h,v $ $Revision: 1.3 $ */ +#ifndef __CHELSIO_VERSION_H +#define __CHELSIO_VERSION_H +#define DRV_DESC "Chelsio T3 Network Driver" +#define DRV_NAME "cxgb3" +/* Driver version */ +#define DRV_VERSION "1.1.4-ko" + +/* Firmware version */ +#define FW_VERSION_MAJOR 7 +#define FW_VERSION_MINOR 10 +#define FW_VERSION_MICRO 0 +#endif /* __CHELSIO_VERSION_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb3/vsc8211.c b/drivers/net/ethernet/chelsio/cxgb3/vsc8211.c new file mode 100644 index 0000000..4f9a1c2 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/vsc8211.c @@ -0,0 +1,416 @@ +/* + * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "common.h" + +/* VSC8211 PHY specific registers. */ +enum { + VSC8211_SIGDET_CTRL = 19, + VSC8211_EXT_CTRL = 23, + VSC8211_INTR_ENABLE = 25, + VSC8211_INTR_STATUS = 26, + VSC8211_LED_CTRL = 27, + VSC8211_AUX_CTRL_STAT = 28, + VSC8211_EXT_PAGE_AXS = 31, +}; + +enum { + VSC_INTR_RX_ERR = 1 << 0, + VSC_INTR_MS_ERR = 1 << 1, /* master/slave resolution error */ + VSC_INTR_CABLE = 1 << 2, /* cable impairment */ + VSC_INTR_FALSE_CARR = 1 << 3, /* false carrier */ + VSC_INTR_MEDIA_CHG = 1 << 4, /* AMS media change */ + VSC_INTR_RX_FIFO = 1 << 5, /* Rx FIFO over/underflow */ + VSC_INTR_TX_FIFO = 1 << 6, /* Tx FIFO over/underflow */ + VSC_INTR_DESCRAMBL = 1 << 7, /* descrambler lock-lost */ + VSC_INTR_SYMBOL_ERR = 1 << 8, /* symbol error */ + VSC_INTR_NEG_DONE = 1 << 10, /* autoneg done */ + VSC_INTR_NEG_ERR = 1 << 11, /* autoneg error */ + VSC_INTR_DPLX_CHG = 1 << 12, /* duplex change */ + VSC_INTR_LINK_CHG = 1 << 13, /* link change */ + VSC_INTR_SPD_CHG = 1 << 14, /* speed change */ + VSC_INTR_ENABLE = 1 << 15, /* interrupt enable */ +}; + +enum { + VSC_CTRL_CLAUSE37_VIEW = 1 << 4, /* Switch to Clause 37 view */ + VSC_CTRL_MEDIA_MODE_HI = 0xf000 /* High part of media mode select */ +}; + +#define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \ + VSC_INTR_DPLX_CHG | VSC_INTR_SPD_CHG | \ + VSC_INTR_NEG_DONE) +#define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \ + VSC_INTR_ENABLE) + +/* PHY specific auxiliary control & status register fields */ +#define S_ACSR_ACTIPHY_TMR 0 +#define M_ACSR_ACTIPHY_TMR 0x3 +#define V_ACSR_ACTIPHY_TMR(x) ((x) << S_ACSR_ACTIPHY_TMR) + +#define S_ACSR_SPEED 3 +#define M_ACSR_SPEED 0x3 +#define G_ACSR_SPEED(x) (((x) >> S_ACSR_SPEED) & M_ACSR_SPEED) + +#define S_ACSR_DUPLEX 5 +#define F_ACSR_DUPLEX (1 << S_ACSR_DUPLEX) + +#define S_ACSR_ACTIPHY 6 +#define F_ACSR_ACTIPHY (1 << S_ACSR_ACTIPHY) + +/* + * Reset the PHY. This PHY completes reset immediately so we never wait. + */ +static int vsc8211_reset(struct cphy *cphy, int wait) +{ + return t3_phy_reset(cphy, MDIO_DEVAD_NONE, 0); +} + +static int vsc8211_intr_enable(struct cphy *cphy) +{ + return t3_mdio_write(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_ENABLE, + INTR_MASK); +} + +static int vsc8211_intr_disable(struct cphy *cphy) +{ + return t3_mdio_write(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_ENABLE, 0); +} + +static int vsc8211_intr_clear(struct cphy *cphy) +{ + u32 val; + + /* Clear PHY interrupts by reading the register. */ + return t3_mdio_read(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_STATUS, &val); +} + +static int vsc8211_autoneg_enable(struct cphy *cphy) +{ + return t3_mdio_change_bits(cphy, MDIO_DEVAD_NONE, MII_BMCR, + BMCR_PDOWN | BMCR_ISOLATE, + BMCR_ANENABLE | BMCR_ANRESTART); +} + +static int vsc8211_autoneg_restart(struct cphy *cphy) +{ + return t3_mdio_change_bits(cphy, MDIO_DEVAD_NONE, MII_BMCR, + BMCR_PDOWN | BMCR_ISOLATE, + BMCR_ANRESTART); +} + +static int vsc8211_get_link_status(struct cphy *cphy, int *link_ok, + int *speed, int *duplex, int *fc) +{ + unsigned int bmcr, status, lpa, adv; + int err, sp = -1, dplx = -1, pause = 0; + + err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMCR, &bmcr); + if (!err) + err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR, &status); + if (err) + return err; + + if (link_ok) { + /* + * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it + * once more to get the current link state. + */ + if (!(status & BMSR_LSTATUS)) + err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR, + &status); + if (err) + return err; + *link_ok = (status & BMSR_LSTATUS) != 0; + } + if (!(bmcr & BMCR_ANENABLE)) { + dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; + if (bmcr & BMCR_SPEED1000) + sp = SPEED_1000; + else if (bmcr & BMCR_SPEED100) + sp = SPEED_100; + else + sp = SPEED_10; + } else if (status & BMSR_ANEGCOMPLETE) { + err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, VSC8211_AUX_CTRL_STAT, + &status); + if (err) + return err; + + dplx = (status & F_ACSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF; + sp = G_ACSR_SPEED(status); + if (sp == 0) + sp = SPEED_10; + else if (sp == 1) + sp = SPEED_100; + else + sp = SPEED_1000; + + if (fc && dplx == DUPLEX_FULL) { + err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_LPA, + &lpa); + if (!err) + err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, + MII_ADVERTISE, &adv); + if (err) + return err; + + if (lpa & adv & ADVERTISE_PAUSE_CAP) + pause = PAUSE_RX | PAUSE_TX; + else if ((lpa & ADVERTISE_PAUSE_CAP) && + (lpa & ADVERTISE_PAUSE_ASYM) && + (adv & ADVERTISE_PAUSE_ASYM)) + pause = PAUSE_TX; + else if ((lpa & ADVERTISE_PAUSE_ASYM) && + (adv & ADVERTISE_PAUSE_CAP)) + pause = PAUSE_RX; + } + } + if (speed) + *speed = sp; + if (duplex) + *duplex = dplx; + if (fc) + *fc = pause; + return 0; +} + +static int vsc8211_get_link_status_fiber(struct cphy *cphy, int *link_ok, + int *speed, int *duplex, int *fc) +{ + unsigned int bmcr, status, lpa, adv; + int err, sp = -1, dplx = -1, pause = 0; + + err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMCR, &bmcr); + if (!err) + err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR, &status); + if (err) + return err; + + if (link_ok) { + /* + * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it + * once more to get the current link state. + */ + if (!(status & BMSR_LSTATUS)) + err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR, + &status); + if (err) + return err; + *link_ok = (status & BMSR_LSTATUS) != 0; + } + if (!(bmcr & BMCR_ANENABLE)) { + dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; + if (bmcr & BMCR_SPEED1000) + sp = SPEED_1000; + else if (bmcr & BMCR_SPEED100) + sp = SPEED_100; + else + sp = SPEED_10; + } else if (status & BMSR_ANEGCOMPLETE) { + err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_LPA, &lpa); + if (!err) + err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_ADVERTISE, + &adv); + if (err) + return err; + + if (adv & lpa & ADVERTISE_1000XFULL) { + dplx = DUPLEX_FULL; + sp = SPEED_1000; + } else if (adv & lpa & ADVERTISE_1000XHALF) { + dplx = DUPLEX_HALF; + sp = SPEED_1000; + } + + if (fc && dplx == DUPLEX_FULL) { + if (lpa & adv & ADVERTISE_1000XPAUSE) + pause = PAUSE_RX | PAUSE_TX; + else if ((lpa & ADVERTISE_1000XPAUSE) && + (adv & lpa & ADVERTISE_1000XPSE_ASYM)) + pause = PAUSE_TX; + else if ((lpa & ADVERTISE_1000XPSE_ASYM) && + (adv & ADVERTISE_1000XPAUSE)) + pause = PAUSE_RX; + } + } + if (speed) + *speed = sp; + if (duplex) + *duplex = dplx; + if (fc) + *fc = pause; + return 0; +} + +#ifdef UNUSED +/* + * Enable/disable auto MDI/MDI-X in forced link speed mode. + */ +static int vsc8211_set_automdi(struct cphy *phy, int enable) +{ + int err; + + err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 0x52b5); + if (err) + return err; + + err = t3_mdio_write(phy, MDIO_DEVAD_NONE, 18, 0x12); + if (err) + return err; + + err = t3_mdio_write(phy, MDIO_DEVAD_NONE, 17, enable ? 0x2803 : 0x3003); + if (err) + return err; + + err = t3_mdio_write(phy, MDIO_DEVAD_NONE, 16, 0x87fa); + if (err) + return err; + + err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 0); + if (err) + return err; + + return 0; +} + +int vsc8211_set_speed_duplex(struct cphy *phy, int speed, int duplex) +{ + int err; + + err = t3_set_phy_speed_duplex(phy, speed, duplex); + if (!err) + err = vsc8211_set_automdi(phy, 1); + return err; +} +#endif /* UNUSED */ + +static int vsc8211_power_down(struct cphy *cphy, int enable) +{ + return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN, + enable ? BMCR_PDOWN : 0); +} + +static int vsc8211_intr_handler(struct cphy *cphy) +{ + unsigned int cause; + int err, cphy_cause = 0; + + err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_STATUS, &cause); + if (err) + return err; + + cause &= INTR_MASK; + if (cause & CFG_CHG_INTR_MASK) + cphy_cause |= cphy_cause_link_change; + if (cause & (VSC_INTR_RX_FIFO | VSC_INTR_TX_FIFO)) + cphy_cause |= cphy_cause_fifo_error; + return cphy_cause; +} + +static struct cphy_ops vsc8211_ops = { + .reset = vsc8211_reset, + .intr_enable = vsc8211_intr_enable, + .intr_disable = vsc8211_intr_disable, + .intr_clear = vsc8211_intr_clear, + .intr_handler = vsc8211_intr_handler, + .autoneg_enable = vsc8211_autoneg_enable, + .autoneg_restart = vsc8211_autoneg_restart, + .advertise = t3_phy_advertise, + .set_speed_duplex = t3_set_phy_speed_duplex, + .get_link_status = vsc8211_get_link_status, + .power_down = vsc8211_power_down, +}; + +static struct cphy_ops vsc8211_fiber_ops = { + .reset = vsc8211_reset, + .intr_enable = vsc8211_intr_enable, + .intr_disable = vsc8211_intr_disable, + .intr_clear = vsc8211_intr_clear, + .intr_handler = vsc8211_intr_handler, + .autoneg_enable = vsc8211_autoneg_enable, + .autoneg_restart = vsc8211_autoneg_restart, + .advertise = t3_phy_advertise_fiber, + .set_speed_duplex = t3_set_phy_speed_duplex, + .get_link_status = vsc8211_get_link_status_fiber, + .power_down = vsc8211_power_down, +}; + +int t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter, + int phy_addr, const struct mdio_ops *mdio_ops) +{ + int err; + unsigned int val; + + cphy_init(phy, adapter, phy_addr, &vsc8211_ops, mdio_ops, + SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII | + SUPPORTED_TP | SUPPORTED_IRQ, "10/100/1000BASE-T"); + msleep(20); /* PHY needs ~10ms to start responding to MDIO */ + + err = t3_mdio_read(phy, MDIO_DEVAD_NONE, VSC8211_EXT_CTRL, &val); + if (err) + return err; + if (val & VSC_CTRL_MEDIA_MODE_HI) { + /* copper interface, just need to configure the LEDs */ + return t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_LED_CTRL, + 0x100); + } + + phy->caps = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | + SUPPORTED_MII | SUPPORTED_FIBRE | SUPPORTED_IRQ; + phy->desc = "1000BASE-X"; + phy->ops = &vsc8211_fiber_ops; + + err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 1); + if (err) + return err; + + err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_SIGDET_CTRL, 1); + if (err) + return err; + + err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 0); + if (err) + return err; + + err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_CTRL, + val | VSC_CTRL_CLAUSE37_VIEW); + if (err) + return err; + + err = vsc8211_reset(phy, 0); + if (err) + return err; + + udelay(5); /* delay after reset before next SMI */ + return 0; +} diff --git a/drivers/net/ethernet/chelsio/cxgb3/xgmac.c b/drivers/net/ethernet/chelsio/cxgb3/xgmac.c new file mode 100644 index 0000000..3af19a5 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/xgmac.c @@ -0,0 +1,657 @@ +/* + * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "common.h" +#include "regs.h" + +/* + * # of exact address filters. The first one is used for the station address, + * the rest are available for multicast addresses. + */ +#define EXACT_ADDR_FILTERS 8 + +static inline int macidx(const struct cmac *mac) +{ + return mac->offset / (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR); +} + +static void xaui_serdes_reset(struct cmac *mac) +{ + static const unsigned int clear[] = { + F_PWRDN0 | F_PWRDN1, F_RESETPLL01, F_RESET0 | F_RESET1, + F_PWRDN2 | F_PWRDN3, F_RESETPLL23, F_RESET2 | F_RESET3 + }; + + int i; + struct adapter *adap = mac->adapter; + u32 ctrl = A_XGM_SERDES_CTRL0 + mac->offset; + + t3_write_reg(adap, ctrl, adap->params.vpd.xauicfg[macidx(mac)] | + F_RESET3 | F_RESET2 | F_RESET1 | F_RESET0 | + F_PWRDN3 | F_PWRDN2 | F_PWRDN1 | F_PWRDN0 | + F_RESETPLL23 | F_RESETPLL01); + t3_read_reg(adap, ctrl); + udelay(15); + + for (i = 0; i < ARRAY_SIZE(clear); i++) { + t3_set_reg_field(adap, ctrl, clear[i], 0); + udelay(15); + } +} + +void t3b_pcs_reset(struct cmac *mac) +{ + t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, + F_PCS_RESET_, 0); + udelay(20); + t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, 0, + F_PCS_RESET_); +} + +int t3_mac_reset(struct cmac *mac) +{ + static const struct addr_val_pair mac_reset_avp[] = { + {A_XGM_TX_CTRL, 0}, + {A_XGM_RX_CTRL, 0}, + {A_XGM_RX_CFG, F_DISPAUSEFRAMES | F_EN1536BFRAMES | + F_RMFCS | F_ENJUMBO | F_ENHASHMCAST}, + {A_XGM_RX_HASH_LOW, 0}, + {A_XGM_RX_HASH_HIGH, 0}, + {A_XGM_RX_EXACT_MATCH_LOW_1, 0}, + {A_XGM_RX_EXACT_MATCH_LOW_2, 0}, + {A_XGM_RX_EXACT_MATCH_LOW_3, 0}, + {A_XGM_RX_EXACT_MATCH_LOW_4, 0}, + {A_XGM_RX_EXACT_MATCH_LOW_5, 0}, + {A_XGM_RX_EXACT_MATCH_LOW_6, 0}, + {A_XGM_RX_EXACT_MATCH_LOW_7, 0}, + {A_XGM_RX_EXACT_MATCH_LOW_8, 0}, + {A_XGM_STAT_CTRL, F_CLRSTATS} + }; + u32 val; + struct adapter *adap = mac->adapter; + unsigned int oft = mac->offset; + + t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_); + t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ + + t3_write_regs(adap, mac_reset_avp, ARRAY_SIZE(mac_reset_avp), oft); + t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft, + F_RXSTRFRWRD | F_DISERRFRAMES, + uses_xaui(adap) ? 0 : F_RXSTRFRWRD); + t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + oft, 0, F_UNDERUNFIX); + + if (uses_xaui(adap)) { + if (adap->params.rev == 0) { + t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0, + F_RXENABLE | F_TXENABLE); + if (t3_wait_op_done(adap, A_XGM_SERDES_STATUS1 + oft, + F_CMULOCK, 1, 5, 2)) { + CH_ERR(adap, + "MAC %d XAUI SERDES CMU lock failed\n", + macidx(mac)); + return -1; + } + t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0, + F_SERDESRESET_); + } else + xaui_serdes_reset(mac); + } + + t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + oft, + V_RXMAXFRAMERSIZE(M_RXMAXFRAMERSIZE), + V_RXMAXFRAMERSIZE(MAX_FRAME_SIZE) | F_RXENFRAMER); + val = F_MAC_RESET_ | F_XGMAC_STOP_EN; + + if (is_10G(adap)) + val |= F_PCS_RESET_; + else if (uses_xaui(adap)) + val |= F_PCS_RESET_ | F_XG2G_RESET_; + else + val |= F_RGMII_RESET_ | F_XG2G_RESET_; + t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val); + t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ + if ((val & F_PCS_RESET_) && adap->params.rev) { + msleep(1); + t3b_pcs_reset(mac); + } + + memset(&mac->stats, 0, sizeof(mac->stats)); + return 0; +} + +static int t3b2_mac_reset(struct cmac *mac) +{ + struct adapter *adap = mac->adapter; + unsigned int oft = mac->offset, store; + int idx = macidx(mac); + u32 val; + + if (!macidx(mac)) + t3_set_reg_field(adap, A_MPS_CFG, F_PORT0ACTIVE, 0); + else + t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE, 0); + + /* Stop NIC traffic to reduce the number of TXTOGGLES */ + t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 0); + /* Ensure TX drains */ + t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN, 0); + + t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_); + t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ + + /* Store A_TP_TX_DROP_CFG_CH0 */ + t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); + store = t3_read_reg(adap, A_TP_TX_DROP_CFG_CH0 + idx); + + msleep(10); + + /* Change DROP_CFG to 0xc0000011 */ + t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); + t3_write_reg(adap, A_TP_PIO_DATA, 0xc0000011); + + /* Check for xgm Rx fifo empty */ + /* Increased loop count to 1000 from 5 cover 1G and 100Mbps case */ + if (t3_wait_op_done(adap, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT + oft, + 0x80000000, 1, 1000, 2)) { + CH_ERR(adap, "MAC %d Rx fifo drain failed\n", + macidx(mac)); + return -1; + } + + t3_write_reg(adap, A_XGM_RESET_CTRL + oft, 0); + t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ + + val = F_MAC_RESET_; + if (is_10G(adap)) + val |= F_PCS_RESET_; + else if (uses_xaui(adap)) + val |= F_PCS_RESET_ | F_XG2G_RESET_; + else + val |= F_RGMII_RESET_ | F_XG2G_RESET_; + t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val); + t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ + if ((val & F_PCS_RESET_) && adap->params.rev) { + msleep(1); + t3b_pcs_reset(mac); + } + t3_write_reg(adap, A_XGM_RX_CFG + oft, + F_DISPAUSEFRAMES | F_EN1536BFRAMES | + F_RMFCS | F_ENJUMBO | F_ENHASHMCAST); + + /* Restore the DROP_CFG */ + t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); + t3_write_reg(adap, A_TP_PIO_DATA, store); + + if (!idx) + t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT0ACTIVE); + else + t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT1ACTIVE); + + /* re-enable nic traffic */ + t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 1); + + /* Set: re-enable NIC traffic */ + t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 1); + + return 0; +} + +/* + * Set the exact match register 'idx' to recognize the given Ethernet address. + */ +static void set_addr_filter(struct cmac *mac, int idx, const u8 * addr) +{ + u32 addr_lo, addr_hi; + unsigned int oft = mac->offset + idx * 8; + + addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; + addr_hi = (addr[5] << 8) | addr[4]; + + t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1 + oft, addr_lo); + t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_HIGH_1 + oft, addr_hi); +} + +/* Set one of the station's unicast MAC addresses. */ +int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]) +{ + if (idx >= mac->nucast) + return -EINVAL; + set_addr_filter(mac, idx, addr); + return 0; +} + +/* + * Specify the number of exact address filters that should be reserved for + * unicast addresses. Caller should reload the unicast and multicast addresses + * after calling this. + */ +int t3_mac_set_num_ucast(struct cmac *mac, int n) +{ + if (n > EXACT_ADDR_FILTERS) + return -EINVAL; + mac->nucast = n; + return 0; +} + +void t3_mac_disable_exact_filters(struct cmac *mac) +{ + unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_LOW_1; + + for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) { + u32 v = t3_read_reg(mac->adapter, reg); + t3_write_reg(mac->adapter, reg, v); + } + t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1); /* flush */ +} + +void t3_mac_enable_exact_filters(struct cmac *mac) +{ + unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_HIGH_1; + + for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) { + u32 v = t3_read_reg(mac->adapter, reg); + t3_write_reg(mac->adapter, reg, v); + } + t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1); /* flush */ +} + +/* Calculate the RX hash filter index of an Ethernet address */ +static int hash_hw_addr(const u8 * addr) +{ + int hash = 0, octet, bit, i = 0, c; + + for (octet = 0; octet < 6; ++octet) + for (c = addr[octet], bit = 0; bit < 8; c >>= 1, ++bit) { + hash ^= (c & 1) << i; + if (++i == 6) + i = 0; + } + return hash; +} + +int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev) +{ + u32 val, hash_lo, hash_hi; + struct adapter *adap = mac->adapter; + unsigned int oft = mac->offset; + + val = t3_read_reg(adap, A_XGM_RX_CFG + oft) & ~F_COPYALLFRAMES; + if (dev->flags & IFF_PROMISC) + val |= F_COPYALLFRAMES; + t3_write_reg(adap, A_XGM_RX_CFG + oft, val); + + if (dev->flags & IFF_ALLMULTI) + hash_lo = hash_hi = 0xffffffff; + else { + struct netdev_hw_addr *ha; + int exact_addr_idx = mac->nucast; + + hash_lo = hash_hi = 0; + netdev_for_each_mc_addr(ha, dev) + if (exact_addr_idx < EXACT_ADDR_FILTERS) + set_addr_filter(mac, exact_addr_idx++, + ha->addr); + else { + int hash = hash_hw_addr(ha->addr); + + if (hash < 32) + hash_lo |= (1 << hash); + else + hash_hi |= (1 << (hash - 32)); + } + } + + t3_write_reg(adap, A_XGM_RX_HASH_LOW + oft, hash_lo); + t3_write_reg(adap, A_XGM_RX_HASH_HIGH + oft, hash_hi); + return 0; +} + +static int rx_fifo_hwm(int mtu) +{ + int hwm; + + hwm = max(MAC_RXFIFO_SIZE - 3 * mtu, (MAC_RXFIFO_SIZE * 38) / 100); + return min(hwm, MAC_RXFIFO_SIZE - 8192); +} + +int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu) +{ + int hwm, lwm, divisor; + int ipg; + unsigned int thres, v, reg; + struct adapter *adap = mac->adapter; + + /* + * MAX_FRAME_SIZE inludes header + FCS, mtu doesn't. The HW max + * packet size register includes header, but not FCS. + */ + mtu += 14; + if (mtu > 1536) + mtu += 4; + + if (mtu > MAX_FRAME_SIZE - 4) + return -EINVAL; + t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu); + + if (adap->params.rev >= T3_REV_B2 && + (t3_read_reg(adap, A_XGM_RX_CTRL + mac->offset) & F_RXEN)) { + t3_mac_disable_exact_filters(mac); + v = t3_read_reg(adap, A_XGM_RX_CFG + mac->offset); + t3_set_reg_field(adap, A_XGM_RX_CFG + mac->offset, + F_ENHASHMCAST | F_COPYALLFRAMES, F_DISBCAST); + + reg = adap->params.rev == T3_REV_B2 ? + A_XGM_RX_MAX_PKT_SIZE_ERR_CNT : A_XGM_RXFIFO_CFG; + + /* drain RX FIFO */ + if (t3_wait_op_done(adap, reg + mac->offset, + F_RXFIFO_EMPTY, 1, 20, 5)) { + t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v); + t3_mac_enable_exact_filters(mac); + return -EIO; + } + t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, + V_RXMAXPKTSIZE(M_RXMAXPKTSIZE), + V_RXMAXPKTSIZE(mtu)); + t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v); + t3_mac_enable_exact_filters(mac); + } else + t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, + V_RXMAXPKTSIZE(M_RXMAXPKTSIZE), + V_RXMAXPKTSIZE(mtu)); + + /* + * Adjust the PAUSE frame watermarks. We always set the LWM, and the + * HWM only if flow-control is enabled. + */ + hwm = rx_fifo_hwm(mtu); + lwm = min(3 * (int)mtu, MAC_RXFIFO_SIZE / 4); + v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset); + v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM); + v |= V_RXFIFOPAUSELWM(lwm / 8); + if (G_RXFIFOPAUSEHWM(v)) + v = (v & ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM)) | + V_RXFIFOPAUSEHWM(hwm / 8); + + t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v); + + /* Adjust the TX FIFO threshold based on the MTU */ + thres = (adap->params.vpd.cclk * 1000) / 15625; + thres = (thres * mtu) / 1000; + if (is_10G(adap)) + thres /= 10; + thres = mtu > thres ? (mtu - thres + 7) / 8 : 0; + thres = max(thres, 8U); /* need at least 8 */ + ipg = (adap->params.rev == T3_REV_C) ? 0 : 1; + t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset, + V_TXFIFOTHRESH(M_TXFIFOTHRESH) | V_TXIPG(M_TXIPG), + V_TXFIFOTHRESH(thres) | V_TXIPG(ipg)); + + if (adap->params.rev > 0) { + divisor = (adap->params.rev == T3_REV_C) ? 64 : 8; + t3_write_reg(adap, A_XGM_PAUSE_TIMER + mac->offset, + (hwm - lwm) * 4 / divisor); + } + t3_write_reg(adap, A_XGM_TX_PAUSE_QUANTA + mac->offset, + MAC_RXFIFO_SIZE * 4 * 8 / 512); + return 0; +} + +int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc) +{ + u32 val; + struct adapter *adap = mac->adapter; + unsigned int oft = mac->offset; + + if (duplex >= 0 && duplex != DUPLEX_FULL) + return -EINVAL; + if (speed >= 0) { + if (speed == SPEED_10) + val = V_PORTSPEED(0); + else if (speed == SPEED_100) + val = V_PORTSPEED(1); + else if (speed == SPEED_1000) + val = V_PORTSPEED(2); + else if (speed == SPEED_10000) + val = V_PORTSPEED(3); + else + return -EINVAL; + + t3_set_reg_field(adap, A_XGM_PORT_CFG + oft, + V_PORTSPEED(M_PORTSPEED), val); + } + + val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft); + val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM); + if (fc & PAUSE_TX) { + u32 rx_max_pkt_size = + G_RXMAXPKTSIZE(t3_read_reg(adap, + A_XGM_RX_MAX_PKT_SIZE + oft)); + val |= V_RXFIFOPAUSEHWM(rx_fifo_hwm(rx_max_pkt_size) / 8); + } + t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val); + + t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN, + (fc & PAUSE_RX) ? F_TXPAUSEEN : 0); + return 0; +} + +int t3_mac_enable(struct cmac *mac, int which) +{ + int idx = macidx(mac); + struct adapter *adap = mac->adapter; + unsigned int oft = mac->offset; + struct mac_stats *s = &mac->stats; + + if (which & MAC_DIRECTION_TX) { + t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); + t3_write_reg(adap, A_TP_PIO_DATA, + adap->params.rev == T3_REV_C ? + 0xc4ffff01 : 0xc0ede401); + t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE); + t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, + adap->params.rev == T3_REV_C ? 0 : 1 << idx); + + t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN); + + t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CNT_CH0 + idx); + mac->tx_mcnt = s->tx_frames; + mac->tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap, + A_TP_PIO_DATA))); + mac->tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap, + A_XGM_TX_SPI4_SOP_EOP_CNT + + oft))); + mac->rx_mcnt = s->rx_frames; + mac->rx_pause = s->rx_pause; + mac->rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap, + A_XGM_RX_SPI4_SOP_EOP_CNT + + oft))); + mac->rx_ocnt = s->rx_fifo_ovfl; + mac->txen = F_TXEN; + mac->toggle_cnt = 0; + } + if (which & MAC_DIRECTION_RX) + t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN); + return 0; +} + +int t3_mac_disable(struct cmac *mac, int which) +{ + struct adapter *adap = mac->adapter; + + if (which & MAC_DIRECTION_TX) { + t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0); + mac->txen = 0; + } + if (which & MAC_DIRECTION_RX) { + int val = F_MAC_RESET_; + + t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, + F_PCS_RESET_, 0); + msleep(100); + t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0); + if (is_10G(adap)) + val |= F_PCS_RESET_; + else if (uses_xaui(adap)) + val |= F_PCS_RESET_ | F_XG2G_RESET_; + else + val |= F_RGMII_RESET_ | F_XG2G_RESET_; + t3_write_reg(mac->adapter, A_XGM_RESET_CTRL + mac->offset, val); + } + return 0; +} + +int t3b2_mac_watchdog_task(struct cmac *mac) +{ + struct adapter *adap = mac->adapter; + struct mac_stats *s = &mac->stats; + unsigned int tx_tcnt, tx_xcnt; + u64 tx_mcnt = s->tx_frames; + int status; + + status = 0; + tx_xcnt = 1; /* By default tx_xcnt is making progress */ + tx_tcnt = mac->tx_tcnt; /* If tx_mcnt is progressing ignore tx_tcnt */ + if (tx_mcnt == mac->tx_mcnt && mac->rx_pause == s->rx_pause) { + tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap, + A_XGM_TX_SPI4_SOP_EOP_CNT + + mac->offset))); + if (tx_xcnt == 0) { + t3_write_reg(adap, A_TP_PIO_ADDR, + A_TP_TX_DROP_CNT_CH0 + macidx(mac)); + tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap, + A_TP_PIO_DATA))); + } else { + goto out; + } + } else { + mac->toggle_cnt = 0; + goto out; + } + + if ((tx_tcnt != mac->tx_tcnt) && (mac->tx_xcnt == 0)) { + if (mac->toggle_cnt > 4) { + status = 2; + goto out; + } else { + status = 1; + goto out; + } + } else { + mac->toggle_cnt = 0; + goto out; + } + +out: + mac->tx_tcnt = tx_tcnt; + mac->tx_xcnt = tx_xcnt; + mac->tx_mcnt = s->tx_frames; + mac->rx_pause = s->rx_pause; + if (status == 1) { + t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0); + t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */ + t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, mac->txen); + t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */ + mac->toggle_cnt++; + } else if (status == 2) { + t3b2_mac_reset(mac); + mac->toggle_cnt = 0; + } + return status; +} + +/* + * This function is called periodically to accumulate the current values of the + * RMON counters into the port statistics. Since the packet counters are only + * 32 bits they can overflow in ~286 secs at 10G, so the function should be + * called more frequently than that. The byte counters are 45-bit wide, they + * would overflow in ~7.8 hours. + */ +const struct mac_stats *t3_mac_update_stats(struct cmac *mac) +{ +#define RMON_READ(mac, addr) t3_read_reg(mac->adapter, addr + mac->offset) +#define RMON_UPDATE(mac, name, reg) \ + (mac)->stats.name += (u64)RMON_READ(mac, A_XGM_STAT_##reg) +#define RMON_UPDATE64(mac, name, reg_lo, reg_hi) \ + (mac)->stats.name += RMON_READ(mac, A_XGM_STAT_##reg_lo) + \ + ((u64)RMON_READ(mac, A_XGM_STAT_##reg_hi) << 32) + + u32 v, lo; + + RMON_UPDATE64(mac, rx_octets, RX_BYTES_LOW, RX_BYTES_HIGH); + RMON_UPDATE64(mac, rx_frames, RX_FRAMES_LOW, RX_FRAMES_HIGH); + RMON_UPDATE(mac, rx_mcast_frames, RX_MCAST_FRAMES); + RMON_UPDATE(mac, rx_bcast_frames, RX_BCAST_FRAMES); + RMON_UPDATE(mac, rx_fcs_errs, RX_CRC_ERR_FRAMES); + RMON_UPDATE(mac, rx_pause, RX_PAUSE_FRAMES); + RMON_UPDATE(mac, rx_jabber, RX_JABBER_FRAMES); + RMON_UPDATE(mac, rx_short, RX_SHORT_FRAMES); + RMON_UPDATE(mac, rx_symbol_errs, RX_SYM_CODE_ERR_FRAMES); + + RMON_UPDATE(mac, rx_too_long, RX_OVERSIZE_FRAMES); + + v = RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT); + if (mac->adapter->params.rev == T3_REV_B2) + v &= 0x7fffffff; + mac->stats.rx_too_long += v; + + RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES); + RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES); + RMON_UPDATE(mac, rx_frames_128_255, RX_128_255B_FRAMES); + RMON_UPDATE(mac, rx_frames_256_511, RX_256_511B_FRAMES); + RMON_UPDATE(mac, rx_frames_512_1023, RX_512_1023B_FRAMES); + RMON_UPDATE(mac, rx_frames_1024_1518, RX_1024_1518B_FRAMES); + RMON_UPDATE(mac, rx_frames_1519_max, RX_1519_MAXB_FRAMES); + + RMON_UPDATE64(mac, tx_octets, TX_BYTE_LOW, TX_BYTE_HIGH); + RMON_UPDATE64(mac, tx_frames, TX_FRAME_LOW, TX_FRAME_HIGH); + RMON_UPDATE(mac, tx_mcast_frames, TX_MCAST); + RMON_UPDATE(mac, tx_bcast_frames, TX_BCAST); + RMON_UPDATE(mac, tx_pause, TX_PAUSE); + /* This counts error frames in general (bad FCS, underrun, etc). */ + RMON_UPDATE(mac, tx_underrun, TX_ERR_FRAMES); + + RMON_UPDATE(mac, tx_frames_64, TX_64B_FRAMES); + RMON_UPDATE(mac, tx_frames_65_127, TX_65_127B_FRAMES); + RMON_UPDATE(mac, tx_frames_128_255, TX_128_255B_FRAMES); + RMON_UPDATE(mac, tx_frames_256_511, TX_256_511B_FRAMES); + RMON_UPDATE(mac, tx_frames_512_1023, TX_512_1023B_FRAMES); + RMON_UPDATE(mac, tx_frames_1024_1518, TX_1024_1518B_FRAMES); + RMON_UPDATE(mac, tx_frames_1519_max, TX_1519_MAXB_FRAMES); + + /* The next stat isn't clear-on-read. */ + t3_write_reg(mac->adapter, A_TP_MIB_INDEX, mac->offset ? 51 : 50); + v = t3_read_reg(mac->adapter, A_TP_MIB_RDATA); + lo = (u32) mac->stats.rx_cong_drops; + mac->stats.rx_cong_drops += (u64) (v - lo); + + return &mac->stats; +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile new file mode 100644 index 0000000..4986674 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile @@ -0,0 +1,7 @@ +# +# Chelsio T4 driver +# + +obj-$(CONFIG_CHELSIO_T4) += cxgb4.o + +cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h new file mode 100644 index 0000000..223a7f7 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -0,0 +1,722 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CXGB4_H__ +#define __CXGB4_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cxgb4_uld.h" +#include "t4_hw.h" + +#define FW_VERSION_MAJOR 1 +#define FW_VERSION_MINOR 1 +#define FW_VERSION_MICRO 0 + +enum { + MAX_NPORTS = 4, /* max # of ports */ + SERNUM_LEN = 24, /* Serial # length */ + EC_LEN = 16, /* E/C length */ + ID_LEN = 16, /* ID length */ +}; + +enum { + MEM_EDC0, + MEM_EDC1, + MEM_MC +}; + +enum dev_master { + MASTER_CANT, + MASTER_MAY, + MASTER_MUST +}; + +enum dev_state { + DEV_STATE_UNINIT, + DEV_STATE_INIT, + DEV_STATE_ERR +}; + +enum { + PAUSE_RX = 1 << 0, + PAUSE_TX = 1 << 1, + PAUSE_AUTONEG = 1 << 2 +}; + +struct port_stats { + u64 tx_octets; /* total # of octets in good frames */ + u64 tx_frames; /* all good frames */ + u64 tx_bcast_frames; /* all broadcast frames */ + u64 tx_mcast_frames; /* all multicast frames */ + u64 tx_ucast_frames; /* all unicast frames */ + u64 tx_error_frames; /* all error frames */ + + u64 tx_frames_64; /* # of Tx frames in a particular range */ + u64 tx_frames_65_127; + u64 tx_frames_128_255; + u64 tx_frames_256_511; + u64 tx_frames_512_1023; + u64 tx_frames_1024_1518; + u64 tx_frames_1519_max; + + u64 tx_drop; /* # of dropped Tx frames */ + u64 tx_pause; /* # of transmitted pause frames */ + u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */ + u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */ + u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */ + u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */ + u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */ + u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */ + u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */ + u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */ + + u64 rx_octets; /* total # of octets in good frames */ + u64 rx_frames; /* all good frames */ + u64 rx_bcast_frames; /* all broadcast frames */ + u64 rx_mcast_frames; /* all multicast frames */ + u64 rx_ucast_frames; /* all unicast frames */ + u64 rx_too_long; /* # of frames exceeding MTU */ + u64 rx_jabber; /* # of jabber frames */ + u64 rx_fcs_err; /* # of received frames with bad FCS */ + u64 rx_len_err; /* # of received frames with length error */ + u64 rx_symbol_err; /* symbol errors */ + u64 rx_runt; /* # of short frames */ + + u64 rx_frames_64; /* # of Rx frames in a particular range */ + u64 rx_frames_65_127; + u64 rx_frames_128_255; + u64 rx_frames_256_511; + u64 rx_frames_512_1023; + u64 rx_frames_1024_1518; + u64 rx_frames_1519_max; + + u64 rx_pause; /* # of received pause frames */ + u64 rx_ppp0; /* # of received PPP prio 0 frames */ + u64 rx_ppp1; /* # of received PPP prio 1 frames */ + u64 rx_ppp2; /* # of received PPP prio 2 frames */ + u64 rx_ppp3; /* # of received PPP prio 3 frames */ + u64 rx_ppp4; /* # of received PPP prio 4 frames */ + u64 rx_ppp5; /* # of received PPP prio 5 frames */ + u64 rx_ppp6; /* # of received PPP prio 6 frames */ + u64 rx_ppp7; /* # of received PPP prio 7 frames */ + + u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */ + u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */ + u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */ + u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */ + u64 rx_trunc0; /* buffer-group 0 truncated packets */ + u64 rx_trunc1; /* buffer-group 1 truncated packets */ + u64 rx_trunc2; /* buffer-group 2 truncated packets */ + u64 rx_trunc3; /* buffer-group 3 truncated packets */ +}; + +struct lb_port_stats { + u64 octets; + u64 frames; + u64 bcast_frames; + u64 mcast_frames; + u64 ucast_frames; + u64 error_frames; + + u64 frames_64; + u64 frames_65_127; + u64 frames_128_255; + u64 frames_256_511; + u64 frames_512_1023; + u64 frames_1024_1518; + u64 frames_1519_max; + + u64 drop; + + u64 ovflow0; + u64 ovflow1; + u64 ovflow2; + u64 ovflow3; + u64 trunc0; + u64 trunc1; + u64 trunc2; + u64 trunc3; +}; + +struct tp_tcp_stats { + u32 tcpOutRsts; + u64 tcpInSegs; + u64 tcpOutSegs; + u64 tcpRetransSegs; +}; + +struct tp_err_stats { + u32 macInErrs[4]; + u32 hdrInErrs[4]; + u32 tcpInErrs[4]; + u32 tnlCongDrops[4]; + u32 ofldChanDrops[4]; + u32 tnlTxDrops[4]; + u32 ofldVlanDrops[4]; + u32 tcp6InErrs[4]; + u32 ofldNoNeigh; + u32 ofldCongDefer; +}; + +struct tp_params { + unsigned int ntxchan; /* # of Tx channels */ + unsigned int tre; /* log2 of core clocks per TP tick */ +}; + +struct vpd_params { + unsigned int cclk; + u8 ec[EC_LEN + 1]; + u8 sn[SERNUM_LEN + 1]; + u8 id[ID_LEN + 1]; +}; + +struct pci_params { + unsigned char speed; + unsigned char width; +}; + +struct adapter_params { + struct tp_params tp; + struct vpd_params vpd; + struct pci_params pci; + + unsigned int sf_size; /* serial flash size in bytes */ + unsigned int sf_nsec; /* # of flash sectors */ + unsigned int sf_fw_start; /* start of FW image in flash */ + + unsigned int fw_vers; + unsigned int tp_vers; + u8 api_vers[7]; + + unsigned short mtus[NMTUS]; + unsigned short a_wnd[NCCTRL_WIN]; + unsigned short b_wnd[NCCTRL_WIN]; + + unsigned char nports; /* # of ethernet ports */ + unsigned char portvec; + unsigned char rev; /* chip revision */ + unsigned char offload; + + unsigned int ofldq_wr_cred; +}; + +struct trace_params { + u32 data[TRACE_LEN / 4]; + u32 mask[TRACE_LEN / 4]; + unsigned short snap_len; + unsigned short min_len; + unsigned char skip_ofst; + unsigned char skip_len; + unsigned char invert; + unsigned char port; +}; + +struct link_config { + unsigned short supported; /* link capabilities */ + unsigned short advertising; /* advertised capabilities */ + unsigned short requested_speed; /* speed user has requested */ + unsigned short speed; /* actual link speed */ + unsigned char requested_fc; /* flow control user has requested */ + unsigned char fc; /* actual link flow control */ + unsigned char autoneg; /* autonegotiating? */ + unsigned char link_ok; /* link up? */ +}; + +#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16) + +enum { + MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */ + MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */ + MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */ + MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */ +}; + +enum { + MAX_EGRQ = 128, /* max # of egress queues, including FLs */ + MAX_INGQ = 64 /* max # of interrupt-capable ingress queues */ +}; + +struct adapter; +struct sge_rspq; + +struct port_info { + struct adapter *adapter; + u16 viid; + s16 xact_addr_filt; /* index of exact MAC address filter */ + u16 rss_size; /* size of VI's RSS table slice */ + s8 mdio_addr; + u8 port_type; + u8 mod_type; + u8 port_id; + u8 tx_chan; + u8 lport; /* associated offload logical port */ + u8 nqsets; /* # of qsets */ + u8 first_qset; /* index of first qset */ + u8 rss_mode; + struct link_config link_cfg; + u16 *rss; +}; + +struct dentry; +struct work_struct; + +enum { /* adapter flags */ + FULL_INIT_DONE = (1 << 0), + USING_MSI = (1 << 1), + USING_MSIX = (1 << 2), + FW_OK = (1 << 4), +}; + +struct rx_sw_desc; + +struct sge_fl { /* SGE free-buffer queue state */ + unsigned int avail; /* # of available Rx buffers */ + unsigned int pend_cred; /* new buffers since last FL DB ring */ + unsigned int cidx; /* consumer index */ + unsigned int pidx; /* producer index */ + unsigned long alloc_failed; /* # of times buffer allocation failed */ + unsigned long large_alloc_failed; + unsigned long starving; + /* RO fields */ + unsigned int cntxt_id; /* SGE context id for the free list */ + unsigned int size; /* capacity of free list */ + struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ + __be64 *desc; /* address of HW Rx descriptor ring */ + dma_addr_t addr; /* bus address of HW ring start */ +}; + +/* A packet gather list */ +struct pkt_gl { + skb_frag_t frags[MAX_SKB_FRAGS]; + void *va; /* virtual address of first byte */ + unsigned int nfrags; /* # of fragments */ + unsigned int tot_len; /* total length of fragments */ +}; + +typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *gl); + +struct sge_rspq { /* state for an SGE response queue */ + struct napi_struct napi; + const __be64 *cur_desc; /* current descriptor in queue */ + unsigned int cidx; /* consumer index */ + u8 gen; /* current generation bit */ + u8 intr_params; /* interrupt holdoff parameters */ + u8 next_intr_params; /* holdoff params for next interrupt */ + u8 pktcnt_idx; /* interrupt packet threshold */ + u8 uld; /* ULD handling this queue */ + u8 idx; /* queue index within its group */ + int offset; /* offset into current Rx buffer */ + u16 cntxt_id; /* SGE context id for the response q */ + u16 abs_id; /* absolute SGE id for the response q */ + __be64 *desc; /* address of HW response ring */ + dma_addr_t phys_addr; /* physical address of the ring */ + unsigned int iqe_len; /* entry size */ + unsigned int size; /* capacity of response queue */ + struct adapter *adap; + struct net_device *netdev; /* associated net device */ + rspq_handler_t handler; +}; + +struct sge_eth_stats { /* Ethernet queue statistics */ + unsigned long pkts; /* # of ethernet packets */ + unsigned long lro_pkts; /* # of LRO super packets */ + unsigned long lro_merged; /* # of wire packets merged by LRO */ + unsigned long rx_cso; /* # of Rx checksum offloads */ + unsigned long vlan_ex; /* # of Rx VLAN extractions */ + unsigned long rx_drops; /* # of packets dropped due to no mem */ +}; + +struct sge_eth_rxq { /* SW Ethernet Rx queue */ + struct sge_rspq rspq; + struct sge_fl fl; + struct sge_eth_stats stats; +} ____cacheline_aligned_in_smp; + +struct sge_ofld_stats { /* offload queue statistics */ + unsigned long pkts; /* # of packets */ + unsigned long imm; /* # of immediate-data packets */ + unsigned long an; /* # of asynchronous notifications */ + unsigned long nomem; /* # of responses deferred due to no mem */ +}; + +struct sge_ofld_rxq { /* SW offload Rx queue */ + struct sge_rspq rspq; + struct sge_fl fl; + struct sge_ofld_stats stats; +} ____cacheline_aligned_in_smp; + +struct tx_desc { + __be64 flit[8]; +}; + +struct tx_sw_desc; + +struct sge_txq { + unsigned int in_use; /* # of in-use Tx descriptors */ + unsigned int size; /* # of descriptors */ + unsigned int cidx; /* SW consumer index */ + unsigned int pidx; /* producer index */ + unsigned long stops; /* # of times q has been stopped */ + unsigned long restarts; /* # of queue restarts */ + unsigned int cntxt_id; /* SGE context id for the Tx q */ + struct tx_desc *desc; /* address of HW Tx descriptor ring */ + struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */ + struct sge_qstat *stat; /* queue status entry */ + dma_addr_t phys_addr; /* physical address of the ring */ +}; + +struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ + struct sge_txq q; + struct netdev_queue *txq; /* associated netdev TX queue */ + unsigned long tso; /* # of TSO requests */ + unsigned long tx_cso; /* # of Tx checksum offloads */ + unsigned long vlan_ins; /* # of Tx VLAN insertions */ + unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ +} ____cacheline_aligned_in_smp; + +struct sge_ofld_txq { /* state for an SGE offload Tx queue */ + struct sge_txq q; + struct adapter *adap; + struct sk_buff_head sendq; /* list of backpressured packets */ + struct tasklet_struct qresume_tsk; /* restarts the queue */ + u8 full; /* the Tx ring is full */ + unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ +} ____cacheline_aligned_in_smp; + +struct sge_ctrl_txq { /* state for an SGE control Tx queue */ + struct sge_txq q; + struct adapter *adap; + struct sk_buff_head sendq; /* list of backpressured packets */ + struct tasklet_struct qresume_tsk; /* restarts the queue */ + u8 full; /* the Tx ring is full */ +} ____cacheline_aligned_in_smp; + +struct sge { + struct sge_eth_txq ethtxq[MAX_ETH_QSETS]; + struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS]; + struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES]; + + struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; + struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS]; + struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES]; + struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; + + struct sge_rspq intrq ____cacheline_aligned_in_smp; + spinlock_t intrq_lock; + + u16 max_ethqsets; /* # of available Ethernet queue sets */ + u16 ethqsets; /* # of active Ethernet queue sets */ + u16 ethtxq_rover; /* Tx queue to clean up next */ + u16 ofldqsets; /* # of active offload queue sets */ + u16 rdmaqs; /* # of available RDMA Rx queues */ + u16 ofld_rxq[MAX_OFLD_QSETS]; + u16 rdma_rxq[NCHAN]; + u16 timer_val[SGE_NTIMERS]; + u8 counter_val[SGE_NCOUNTERS]; + unsigned int starve_thres; + u8 idma_state[2]; + unsigned int egr_start; + unsigned int ingr_start; + void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */ + struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */ + DECLARE_BITMAP(starving_fl, MAX_EGRQ); + DECLARE_BITMAP(txq_maperr, MAX_EGRQ); + struct timer_list rx_timer; /* refills starving FLs */ + struct timer_list tx_timer; /* checks Tx queues */ +}; + +#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++) +#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++) +#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++) + +struct l2t_data; + +struct adapter { + void __iomem *regs; + struct pci_dev *pdev; + struct device *pdev_dev; + unsigned int fn; + unsigned int flags; + + int msg_enable; + + struct adapter_params params; + struct cxgb4_virt_res vres; + unsigned int swintr; + + unsigned int wol; + + struct { + unsigned short vec; + char desc[IFNAMSIZ + 10]; + } msix_info[MAX_INGQ + 1]; + + struct sge sge; + + struct net_device *port[MAX_NPORTS]; + u8 chan_map[NCHAN]; /* channel -> port map */ + + struct l2t_data *l2t; + void *uld_handle[CXGB4_ULD_MAX]; + struct list_head list_node; + + struct tid_info tids; + void **tid_release_head; + spinlock_t tid_release_lock; + struct work_struct tid_release_task; + bool tid_release_task_busy; + + struct dentry *debugfs_root; + + spinlock_t stats_lock; +}; + +static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) +{ + return readl(adap->regs + reg_addr); +} + +static inline void t4_write_reg(struct adapter *adap, u32 reg_addr, u32 val) +{ + writel(val, adap->regs + reg_addr); +} + +#ifndef readq +static inline u64 readq(const volatile void __iomem *addr) +{ + return readl(addr) + ((u64)readl(addr + 4) << 32); +} + +static inline void writeq(u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr + 4); +} +#endif + +static inline u64 t4_read_reg64(struct adapter *adap, u32 reg_addr) +{ + return readq(adap->regs + reg_addr); +} + +static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val) +{ + writeq(val, adap->regs + reg_addr); +} + +/** + * netdev2pinfo - return the port_info structure associated with a net_device + * @dev: the netdev + * + * Return the struct port_info associated with a net_device + */ +static inline struct port_info *netdev2pinfo(const struct net_device *dev) +{ + return netdev_priv(dev); +} + +/** + * adap2pinfo - return the port_info of a port + * @adap: the adapter + * @idx: the port index + * + * Return the port_info structure for the port of the given index. + */ +static inline struct port_info *adap2pinfo(struct adapter *adap, int idx) +{ + return netdev_priv(adap->port[idx]); +} + +/** + * netdev2adap - return the adapter structure associated with a net_device + * @dev: the netdev + * + * Return the struct adapter associated with a net_device + */ +static inline struct adapter *netdev2adap(const struct net_device *dev) +{ + return netdev2pinfo(dev)->adapter; +} + +void t4_os_portmod_changed(const struct adapter *adap, int port_id); +void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); + +void *t4_alloc_mem(size_t size); + +void t4_free_sge_resources(struct adapter *adap); +irq_handler_t t4_intr_handler(struct adapter *adap); +netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev); +int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *gl); +int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb); +int t4_ofld_send(struct adapter *adap, struct sk_buff *skb); +int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, + struct net_device *dev, int intr_idx, + struct sge_fl *fl, rspq_handler_t hnd); +int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, + struct net_device *dev, struct netdev_queue *netdevq, + unsigned int iqid); +int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, + struct net_device *dev, unsigned int iqid, + unsigned int cmplqid); +int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, + struct net_device *dev, unsigned int iqid); +irqreturn_t t4_sge_intr_msix(int irq, void *cookie); +void t4_sge_init(struct adapter *adap); +void t4_sge_start(struct adapter *adap); +void t4_sge_stop(struct adapter *adap); + +#define for_each_port(adapter, iter) \ + for (iter = 0; iter < (adapter)->params.nports; ++iter) + +static inline unsigned int core_ticks_per_usec(const struct adapter *adap) +{ + return adap->params.vpd.cclk / 1000; +} + +static inline unsigned int us_to_core_ticks(const struct adapter *adap, + unsigned int us) +{ + return (us * adap->params.vpd.cclk) / 1000; +} + +void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask, + u32 val); + +int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, + void *rpl, bool sleep_ok); + +static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd, + int size, void *rpl) +{ + return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true); +} + +static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd, + int size, void *rpl) +{ + return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false); +} + +void t4_intr_enable(struct adapter *adapter); +void t4_intr_disable(struct adapter *adapter); +int t4_slow_intr_handler(struct adapter *adapter); + +int t4_wait_dev_ready(struct adapter *adap); +int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, + struct link_config *lc); +int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); +int t4_seeprom_wp(struct adapter *adapter, bool enable); +int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); +int t4_check_fw_version(struct adapter *adapter); +int t4_prep_adapter(struct adapter *adapter); +int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); +void t4_fatal_err(struct adapter *adapter); +int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, + int start, int n, const u16 *rspq, unsigned int nrspq); +int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, + unsigned int flags); +int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity); +int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, + u64 *parity); + +void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); +void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); +void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, + struct tp_tcp_stats *v6); +void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, + const unsigned short *alpha, const unsigned short *beta); + +void t4_wol_magic_enable(struct adapter *adap, unsigned int port, + const u8 *addr); +int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, + u64 mask0, u64 mask1, unsigned int crc, bool enable); + +int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, + enum dev_master master, enum dev_state *state); +int t4_fw_bye(struct adapter *adap, unsigned int mbox); +int t4_early_init(struct adapter *adap, unsigned int mbox); +int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset); +int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + u32 *val); +int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + const u32 *val); +int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, + unsigned int rxqi, unsigned int rxq, unsigned int tc, + unsigned int vi, unsigned int cmask, unsigned int pmask, + unsigned int nexact, unsigned int rcaps, unsigned int wxcaps); +int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, + unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, + unsigned int *rss_size); +int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, + int mtu, int promisc, int all_multi, int bcast, int vlanex, + bool sleep_ok); +int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, + unsigned int viid, bool free, unsigned int naddr, + const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok); +int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, + int idx, const u8 *addr, bool persist, bool add_smt); +int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, + bool ucast, u64 vec, bool sleep_ok); +int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, + bool rx_en, bool tx_en); +int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, + unsigned int nblinks); +int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, + unsigned int mmd, unsigned int reg, u16 *valp); +int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, + unsigned int mmd, unsigned int reg, u16 val); +int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int iqtype, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id); +int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid); +int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid); +int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid); +int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl); +#endif /* __CXGB4_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c new file mode 100644 index 0000000..c9957b7 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -0,0 +1,3806 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cxgb4.h" +#include "t4_regs.h" +#include "t4_msg.h" +#include "t4fw_api.h" +#include "l2t.h" + +#define DRV_VERSION "1.3.0-ko" +#define DRV_DESC "Chelsio T4 Network Driver" + +/* + * Max interrupt hold-off timer value in us. Queues fall back to this value + * under extreme memory pressure so it's largish to give the system time to + * recover. + */ +#define MAX_SGE_TIMERVAL 200U + +#ifdef CONFIG_PCI_IOV +/* + * Virtual Function provisioning constants. We need two extra Ingress Queues + * with Interrupt capability to serve as the VF's Firmware Event Queue and + * Forwarded Interrupt Queue (when using MSI mode) -- neither will have Free + * Lists associated with them). For each Ethernet/Control Egress Queue and + * for each Free List, we need an Egress Context. + */ +enum { + VFRES_NPORTS = 1, /* # of "ports" per VF */ + VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */ + + VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */ + VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */ + VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */ + VFRES_NIQ = 0, /* # of non-fl/int ingress queues */ + VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */ + VFRES_TC = 0, /* PCI-E traffic class */ + VFRES_NEXACTF = 16, /* # of exact MPS filters */ + + VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT, + VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF, +}; + +/* + * Provide a Port Access Rights Mask for the specified PF/VF. This is very + * static and likely not to be useful in the long run. We really need to + * implement some form of persistent configuration which the firmware + * controls. + */ +static unsigned int pfvfres_pmask(struct adapter *adapter, + unsigned int pf, unsigned int vf) +{ + unsigned int portn, portvec; + + /* + * Give PF's access to all of the ports. + */ + if (vf == 0) + return FW_PFVF_CMD_PMASK_MASK; + + /* + * For VFs, we'll assign them access to the ports based purely on the + * PF. We assign active ports in order, wrapping around if there are + * fewer active ports than PFs: e.g. active port[pf % nports]. + * Unfortunately the adapter's port_info structs haven't been + * initialized yet so we have to compute this. + */ + if (adapter->params.nports == 0) + return 0; + + portn = pf % adapter->params.nports; + portvec = adapter->params.portvec; + for (;;) { + /* + * Isolate the lowest set bit in the port vector. If we're at + * the port number that we want, return that as the pmask. + * otherwise mask that bit out of the port vector and + * decrement our port number ... + */ + unsigned int pmask = portvec ^ (portvec & (portvec-1)); + if (portn == 0) + return pmask; + portn--; + portvec &= ~pmask; + } + /*NOTREACHED*/ +} +#endif + +enum { + MEMWIN0_APERTURE = 65536, + MEMWIN0_BASE = 0x30000, + MEMWIN1_APERTURE = 32768, + MEMWIN1_BASE = 0x28000, + MEMWIN2_APERTURE = 2048, + MEMWIN2_BASE = 0x1b800, +}; + +enum { + MAX_TXQ_ENTRIES = 16384, + MAX_CTRL_TXQ_ENTRIES = 1024, + MAX_RSPQ_ENTRIES = 16384, + MAX_RX_BUFFERS = 16384, + MIN_TXQ_ENTRIES = 32, + MIN_CTRL_TXQ_ENTRIES = 32, + MIN_RSPQ_ENTRIES = 128, + MIN_FL_ENTRIES = 16 +}; + +#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ + NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) + +#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) } + +static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = { + CH_DEVICE(0xa000, 0), /* PE10K */ + CH_DEVICE(0x4001, -1), + CH_DEVICE(0x4002, -1), + CH_DEVICE(0x4003, -1), + CH_DEVICE(0x4004, -1), + CH_DEVICE(0x4005, -1), + CH_DEVICE(0x4006, -1), + CH_DEVICE(0x4007, -1), + CH_DEVICE(0x4008, -1), + CH_DEVICE(0x4009, -1), + CH_DEVICE(0x400a, -1), + CH_DEVICE(0x4401, 4), + CH_DEVICE(0x4402, 4), + CH_DEVICE(0x4403, 4), + CH_DEVICE(0x4404, 4), + CH_DEVICE(0x4405, 4), + CH_DEVICE(0x4406, 4), + CH_DEVICE(0x4407, 4), + CH_DEVICE(0x4408, 4), + CH_DEVICE(0x4409, 4), + CH_DEVICE(0x440a, 4), + { 0, } +}; + +#define FW_FNAME "cxgb4/t4fw.bin" + +MODULE_DESCRIPTION(DRV_DESC); +MODULE_AUTHOR("Chelsio Communications"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRV_VERSION); +MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); +MODULE_FIRMWARE(FW_FNAME); + +static int dflt_msg_enable = DFLT_MSG_ENABLE; + +module_param(dflt_msg_enable, int, 0644); +MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap"); + +/* + * The driver uses the best interrupt scheme available on a platform in the + * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which + * of these schemes the driver may consider as follows: + * + * msi = 2: choose from among all three options + * msi = 1: only consider MSI and INTx interrupts + * msi = 0: force INTx interrupts + */ +static int msi = 2; + +module_param(msi, int, 0644); +MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)"); + +/* + * Queue interrupt hold-off timer values. Queues default to the first of these + * upon creation. + */ +static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 }; + +module_param_array(intr_holdoff, uint, NULL, 0644); +MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers " + "0..4 in microseconds"); + +static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 }; + +module_param_array(intr_cnt, uint, NULL, 0644); +MODULE_PARM_DESC(intr_cnt, + "thresholds 1..3 for queue interrupt packet counters"); + +static int vf_acls; + +#ifdef CONFIG_PCI_IOV +module_param(vf_acls, bool, 0644); +MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement"); + +static unsigned int num_vf[4]; + +module_param_array(num_vf, uint, NULL, 0644); +MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3"); +#endif + +static struct dentry *cxgb4_debugfs_root; + +static LIST_HEAD(adapter_list); +static DEFINE_MUTEX(uld_mutex); +static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX]; +static const char *uld_str[] = { "RDMA", "iSCSI" }; + +static void link_report(struct net_device *dev) +{ + if (!netif_carrier_ok(dev)) + netdev_info(dev, "link down\n"); + else { + static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" }; + + const char *s = "10Mbps"; + const struct port_info *p = netdev_priv(dev); + + switch (p->link_cfg.speed) { + case SPEED_10000: + s = "10Gbps"; + break; + case SPEED_1000: + s = "1000Mbps"; + break; + case SPEED_100: + s = "100Mbps"; + break; + } + + netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, + fc[p->link_cfg.fc]); + } +} + +void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat) +{ + struct net_device *dev = adapter->port[port_id]; + + /* Skip changes from disabled ports. */ + if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) { + if (link_stat) + netif_carrier_on(dev); + else + netif_carrier_off(dev); + + link_report(dev); + } +} + +void t4_os_portmod_changed(const struct adapter *adap, int port_id) +{ + static const char *mod_str[] = { + NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM" + }; + + const struct net_device *dev = adap->port[port_id]; + const struct port_info *pi = netdev_priv(dev); + + if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) + netdev_info(dev, "port module unplugged\n"); + else if (pi->mod_type < ARRAY_SIZE(mod_str)) + netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]); +} + +/* + * Configure the exact and hash address filters to handle a port's multicast + * and secondary unicast MAC addresses. + */ +static int set_addr_filters(const struct net_device *dev, bool sleep) +{ + u64 mhash = 0; + u64 uhash = 0; + bool free = true; + u16 filt_idx[7]; + const u8 *addr[7]; + int ret, naddr = 0; + const struct netdev_hw_addr *ha; + int uc_cnt = netdev_uc_count(dev); + int mc_cnt = netdev_mc_count(dev); + const struct port_info *pi = netdev_priv(dev); + unsigned int mb = pi->adapter->fn; + + /* first do the secondary unicast addresses */ + netdev_for_each_uc_addr(ha, dev) { + addr[naddr++] = ha->addr; + if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) { + ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free, + naddr, addr, filt_idx, &uhash, sleep); + if (ret < 0) + return ret; + + free = false; + naddr = 0; + } + } + + /* next set up the multicast addresses */ + netdev_for_each_mc_addr(ha, dev) { + addr[naddr++] = ha->addr; + if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) { + ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free, + naddr, addr, filt_idx, &mhash, sleep); + if (ret < 0) + return ret; + + free = false; + naddr = 0; + } + } + + return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0, + uhash | mhash, sleep); +} + +/* + * Set Rx properties of a port, such as promiscruity, address filters, and MTU. + * If @mtu is -1 it is left unchanged. + */ +static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) +{ + int ret; + struct port_info *pi = netdev_priv(dev); + + ret = set_addr_filters(dev, sleep_ok); + if (ret == 0) + ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu, + (dev->flags & IFF_PROMISC) ? 1 : 0, + (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1, + sleep_ok); + return ret; +} + +/** + * link_start - enable a port + * @dev: the port to enable + * + * Performs the MAC and PHY actions needed to enable a port. + */ +static int link_start(struct net_device *dev) +{ + int ret; + struct port_info *pi = netdev_priv(dev); + unsigned int mb = pi->adapter->fn; + + /* + * We do not set address filters and promiscuity here, the stack does + * that step explicitly. + */ + ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1, + !!(dev->features & NETIF_F_HW_VLAN_RX), true); + if (ret == 0) { + ret = t4_change_mac(pi->adapter, mb, pi->viid, + pi->xact_addr_filt, dev->dev_addr, true, + true); + if (ret >= 0) { + pi->xact_addr_filt = ret; + ret = 0; + } + } + if (ret == 0) + ret = t4_link_start(pi->adapter, mb, pi->tx_chan, + &pi->link_cfg); + if (ret == 0) + ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true); + return ret; +} + +/* + * Response queue handler for the FW event queue. + */ +static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *gl) +{ + u8 opcode = ((const struct rss_header *)rsp)->opcode; + + rsp++; /* skip RSS header */ + if (likely(opcode == CPL_SGE_EGR_UPDATE)) { + const struct cpl_sge_egr_update *p = (void *)rsp; + unsigned int qid = EGR_QID(ntohl(p->opcode_qid)); + struct sge_txq *txq; + + txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start]; + txq->restarts++; + if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) { + struct sge_eth_txq *eq; + + eq = container_of(txq, struct sge_eth_txq, q); + netif_tx_wake_queue(eq->txq); + } else { + struct sge_ofld_txq *oq; + + oq = container_of(txq, struct sge_ofld_txq, q); + tasklet_schedule(&oq->qresume_tsk); + } + } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) { + const struct cpl_fw6_msg *p = (void *)rsp; + + if (p->type == 0) + t4_handle_fw_rpl(q->adap, p->data); + } else if (opcode == CPL_L2T_WRITE_RPL) { + const struct cpl_l2t_write_rpl *p = (void *)rsp; + + do_l2t_write_rpl(q->adap, p); + } else + dev_err(q->adap->pdev_dev, + "unexpected CPL %#x on FW event queue\n", opcode); + return 0; +} + +/** + * uldrx_handler - response queue handler for ULD queues + * @q: the response queue that received the packet + * @rsp: the response queue descriptor holding the offload message + * @gl: the gather list of packet fragments + * + * Deliver an ingress offload packet to a ULD. All processing is done by + * the ULD, we just maintain statistics. + */ +static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *gl) +{ + struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq); + + if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) { + rxq->stats.nomem++; + return -1; + } + if (gl == NULL) + rxq->stats.imm++; + else if (gl == CXGB4_MSG_AN) + rxq->stats.an++; + else + rxq->stats.pkts++; + return 0; +} + +static void disable_msi(struct adapter *adapter) +{ + if (adapter->flags & USING_MSIX) { + pci_disable_msix(adapter->pdev); + adapter->flags &= ~USING_MSIX; + } else if (adapter->flags & USING_MSI) { + pci_disable_msi(adapter->pdev); + adapter->flags &= ~USING_MSI; + } +} + +/* + * Interrupt handler for non-data events used with MSI-X. + */ +static irqreturn_t t4_nondata_intr(int irq, void *cookie) +{ + struct adapter *adap = cookie; + + u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE)); + if (v & PFSW) { + adap->swintr = 1; + t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v); + } + t4_slow_intr_handler(adap); + return IRQ_HANDLED; +} + +/* + * Name the MSI-X interrupts. + */ +static void name_msix_vecs(struct adapter *adap) +{ + int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc); + + /* non-data interrupts */ + snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name); + + /* FW events */ + snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", + adap->port[0]->name); + + /* Ethernet queues */ + for_each_port(adap, j) { + struct net_device *d = adap->port[j]; + const struct port_info *pi = netdev_priv(d); + + for (i = 0; i < pi->nqsets; i++, msi_idx++) + snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d", + d->name, i); + } + + /* offload queues */ + for_each_ofldrxq(&adap->sge, i) + snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d", + adap->port[0]->name, i); + + for_each_rdmarxq(&adap->sge, i) + snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d", + adap->port[0]->name, i); +} + +static int request_msix_queue_irqs(struct adapter *adap) +{ + struct sge *s = &adap->sge; + int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2; + + err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, + adap->msix_info[1].desc, &s->fw_evtq); + if (err) + return err; + + for_each_ethrxq(s, ethqidx) { + err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, + adap->msix_info[msi].desc, + &s->ethrxq[ethqidx].rspq); + if (err) + goto unwind; + msi++; + } + for_each_ofldrxq(s, ofldqidx) { + err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, + adap->msix_info[msi].desc, + &s->ofldrxq[ofldqidx].rspq); + if (err) + goto unwind; + msi++; + } + for_each_rdmarxq(s, rdmaqidx) { + err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, + adap->msix_info[msi].desc, + &s->rdmarxq[rdmaqidx].rspq); + if (err) + goto unwind; + msi++; + } + return 0; + +unwind: + while (--rdmaqidx >= 0) + free_irq(adap->msix_info[--msi].vec, + &s->rdmarxq[rdmaqidx].rspq); + while (--ofldqidx >= 0) + free_irq(adap->msix_info[--msi].vec, + &s->ofldrxq[ofldqidx].rspq); + while (--ethqidx >= 0) + free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq); + free_irq(adap->msix_info[1].vec, &s->fw_evtq); + return err; +} + +static void free_msix_queue_irqs(struct adapter *adap) +{ + int i, msi = 2; + struct sge *s = &adap->sge; + + free_irq(adap->msix_info[1].vec, &s->fw_evtq); + for_each_ethrxq(s, i) + free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq); + for_each_ofldrxq(s, i) + free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq); + for_each_rdmarxq(s, i) + free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq); +} + +/** + * write_rss - write the RSS table for a given port + * @pi: the port + * @queues: array of queue indices for RSS + * + * Sets up the portion of the HW RSS table for the port's VI to distribute + * packets to the Rx queues in @queues. + */ +static int write_rss(const struct port_info *pi, const u16 *queues) +{ + u16 *rss; + int i, err; + const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset]; + + rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL); + if (!rss) + return -ENOMEM; + + /* map the queue indices to queue ids */ + for (i = 0; i < pi->rss_size; i++, queues++) + rss[i] = q[*queues].rspq.abs_id; + + err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0, + pi->rss_size, rss, pi->rss_size); + kfree(rss); + return err; +} + +/** + * setup_rss - configure RSS + * @adap: the adapter + * + * Sets up RSS for each port. + */ +static int setup_rss(struct adapter *adap) +{ + int i, err; + + for_each_port(adap, i) { + const struct port_info *pi = adap2pinfo(adap, i); + + err = write_rss(pi, pi->rss); + if (err) + return err; + } + return 0; +} + +/* + * Return the channel of the ingress queue with the given qid. + */ +static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid) +{ + qid -= p->ingr_start; + return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan; +} + +/* + * Wait until all NAPI handlers are descheduled. + */ +static void quiesce_rx(struct adapter *adap) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { + struct sge_rspq *q = adap->sge.ingr_map[i]; + + if (q && q->handler) + napi_disable(&q->napi); + } +} + +/* + * Enable NAPI scheduling and interrupt generation for all Rx queues. + */ +static void enable_rx(struct adapter *adap) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { + struct sge_rspq *q = adap->sge.ingr_map[i]; + + if (!q) + continue; + if (q->handler) + napi_enable(&q->napi); + /* 0-increment GTS to start the timer and enable interrupts */ + t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), + SEINTARM(q->intr_params) | + INGRESSQID(q->cntxt_id)); + } +} + +/** + * setup_sge_queues - configure SGE Tx/Rx/response queues + * @adap: the adapter + * + * Determines how many sets of SGE queues to use and initializes them. + * We support multiple queue sets per port if we have MSI-X, otherwise + * just one queue set per port. + */ +static int setup_sge_queues(struct adapter *adap) +{ + int err, msi_idx, i, j; + struct sge *s = &adap->sge; + + bitmap_zero(s->starving_fl, MAX_EGRQ); + bitmap_zero(s->txq_maperr, MAX_EGRQ); + + if (adap->flags & USING_MSIX) + msi_idx = 1; /* vector 0 is for non-queue interrupts */ + else { + err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, + NULL, NULL); + if (err) + return err; + msi_idx = -((int)s->intrq.abs_id + 1); + } + + err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], + msi_idx, NULL, fwevtq_handler); + if (err) { +freeout: t4_free_sge_resources(adap); + return err; + } + + for_each_port(adap, i) { + struct net_device *dev = adap->port[i]; + struct port_info *pi = netdev_priv(dev); + struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset]; + struct sge_eth_txq *t = &s->ethtxq[pi->first_qset]; + + for (j = 0; j < pi->nqsets; j++, q++) { + if (msi_idx > 0) + msi_idx++; + err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, + msi_idx, &q->fl, + t4_ethrx_handler); + if (err) + goto freeout; + q->rspq.idx = j; + memset(&q->stats, 0, sizeof(q->stats)); + } + for (j = 0; j < pi->nqsets; j++, t++) { + err = t4_sge_alloc_eth_txq(adap, t, dev, + netdev_get_tx_queue(dev, j), + s->fw_evtq.cntxt_id); + if (err) + goto freeout; + } + } + + j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */ + for_each_ofldrxq(s, i) { + struct sge_ofld_rxq *q = &s->ofldrxq[i]; + struct net_device *dev = adap->port[i / j]; + + if (msi_idx > 0) + msi_idx++; + err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx, + &q->fl, uldrx_handler); + if (err) + goto freeout; + memset(&q->stats, 0, sizeof(q->stats)); + s->ofld_rxq[i] = q->rspq.abs_id; + err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev, + s->fw_evtq.cntxt_id); + if (err) + goto freeout; + } + + for_each_rdmarxq(s, i) { + struct sge_ofld_rxq *q = &s->rdmarxq[i]; + + if (msi_idx > 0) + msi_idx++; + err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i], + msi_idx, &q->fl, uldrx_handler); + if (err) + goto freeout; + memset(&q->stats, 0, sizeof(q->stats)); + s->rdma_rxq[i] = q->rspq.abs_id; + } + + for_each_port(adap, i) { + /* + * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't + * have RDMA queues, and that's the right value. + */ + err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i], + s->fw_evtq.cntxt_id, + s->rdmarxq[i].rspq.cntxt_id); + if (err) + goto freeout; + } + + t4_write_reg(adap, MPS_TRC_RSS_CONTROL, + RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) | + QUEUENUMBER(s->ethrxq[0].rspq.abs_id)); + return 0; +} + +/* + * Returns 0 if new FW was successfully loaded, a positive errno if a load was + * started but failed, and a negative errno if flash load couldn't start. + */ +static int upgrade_fw(struct adapter *adap) +{ + int ret; + u32 vers; + const struct fw_hdr *hdr; + const struct firmware *fw; + struct device *dev = adap->pdev_dev; + + ret = request_firmware(&fw, FW_FNAME, dev); + if (ret < 0) { + dev_err(dev, "unable to load firmware image " FW_FNAME + ", error %d\n", ret); + return ret; + } + + hdr = (const struct fw_hdr *)fw->data; + vers = ntohl(hdr->fw_ver); + if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) { + ret = -EINVAL; /* wrong major version, won't do */ + goto out; + } + + /* + * If the flash FW is unusable or we found something newer, load it. + */ + if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR || + vers > adap->params.fw_vers) { + ret = -t4_load_fw(adap, fw->data, fw->size); + if (!ret) + dev_info(dev, "firmware upgraded to version %pI4 from " + FW_FNAME "\n", &hdr->fw_ver); + } +out: release_firmware(fw); + return ret; +} + +/* + * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. + * The allocated memory is cleared. + */ +void *t4_alloc_mem(size_t size) +{ + void *p = kzalloc(size, GFP_KERNEL); + + if (!p) + p = vzalloc(size); + return p; +} + +/* + * Free memory allocated through alloc_mem(). + */ +static void t4_free_mem(void *addr) +{ + if (is_vmalloc_addr(addr)) + vfree(addr); + else + kfree(addr); +} + +static inline int is_offload(const struct adapter *adap) +{ + return adap->params.offload; +} + +/* + * Implementation of ethtool operations. + */ + +static u32 get_msglevel(struct net_device *dev) +{ + return netdev2adap(dev)->msg_enable; +} + +static void set_msglevel(struct net_device *dev, u32 val) +{ + netdev2adap(dev)->msg_enable = val; +} + +static char stats_strings[][ETH_GSTRING_LEN] = { + "TxOctetsOK ", + "TxFramesOK ", + "TxBroadcastFrames ", + "TxMulticastFrames ", + "TxUnicastFrames ", + "TxErrorFrames ", + + "TxFrames64 ", + "TxFrames65To127 ", + "TxFrames128To255 ", + "TxFrames256To511 ", + "TxFrames512To1023 ", + "TxFrames1024To1518 ", + "TxFrames1519ToMax ", + + "TxFramesDropped ", + "TxPauseFrames ", + "TxPPP0Frames ", + "TxPPP1Frames ", + "TxPPP2Frames ", + "TxPPP3Frames ", + "TxPPP4Frames ", + "TxPPP5Frames ", + "TxPPP6Frames ", + "TxPPP7Frames ", + + "RxOctetsOK ", + "RxFramesOK ", + "RxBroadcastFrames ", + "RxMulticastFrames ", + "RxUnicastFrames ", + + "RxFramesTooLong ", + "RxJabberErrors ", + "RxFCSErrors ", + "RxLengthErrors ", + "RxSymbolErrors ", + "RxRuntFrames ", + + "RxFrames64 ", + "RxFrames65To127 ", + "RxFrames128To255 ", + "RxFrames256To511 ", + "RxFrames512To1023 ", + "RxFrames1024To1518 ", + "RxFrames1519ToMax ", + + "RxPauseFrames ", + "RxPPP0Frames ", + "RxPPP1Frames ", + "RxPPP2Frames ", + "RxPPP3Frames ", + "RxPPP4Frames ", + "RxPPP5Frames ", + "RxPPP6Frames ", + "RxPPP7Frames ", + + "RxBG0FramesDropped ", + "RxBG1FramesDropped ", + "RxBG2FramesDropped ", + "RxBG3FramesDropped ", + "RxBG0FramesTrunc ", + "RxBG1FramesTrunc ", + "RxBG2FramesTrunc ", + "RxBG3FramesTrunc ", + + "TSO ", + "TxCsumOffload ", + "RxCsumGood ", + "VLANextractions ", + "VLANinsertions ", + "GROpackets ", + "GROmerged ", +}; + +static int get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(stats_strings); + default: + return -EOPNOTSUPP; + } +} + +#define T4_REGMAP_SIZE (160 * 1024) + +static int get_regs_len(struct net_device *dev) +{ + return T4_REGMAP_SIZE; +} + +static int get_eeprom_len(struct net_device *dev) +{ + return EEPROMSIZE; +} + +static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct adapter *adapter = netdev2adap(dev); + + strcpy(info->driver, KBUILD_MODNAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->bus_info, pci_name(adapter->pdev)); + + if (!adapter->params.fw_vers) + strcpy(info->fw_version, "N/A"); + else + snprintf(info->fw_version, sizeof(info->fw_version), + "%u.%u.%u.%u, TP %u.%u.%u.%u", + FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers), + FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers), + FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers), + FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers), + FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers), + FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers), + FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers), + FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers)); +} + +static void get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + if (stringset == ETH_SS_STATS) + memcpy(data, stats_strings, sizeof(stats_strings)); +} + +/* + * port stats maintained per queue of the port. They should be in the same + * order as in stats_strings above. + */ +struct queue_port_stats { + u64 tso; + u64 tx_csum; + u64 rx_csum; + u64 vlan_ex; + u64 vlan_ins; + u64 gro_pkts; + u64 gro_merged; +}; + +static void collect_sge_port_stats(const struct adapter *adap, + const struct port_info *p, struct queue_port_stats *s) +{ + int i; + const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset]; + const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset]; + + memset(s, 0, sizeof(*s)); + for (i = 0; i < p->nqsets; i++, rx++, tx++) { + s->tso += tx->tso; + s->tx_csum += tx->tx_cso; + s->rx_csum += rx->stats.rx_cso; + s->vlan_ex += rx->stats.vlan_ex; + s->vlan_ins += tx->vlan_ins; + s->gro_pkts += rx->stats.lro_pkts; + s->gro_merged += rx->stats.lro_merged; + } +} + +static void get_stats(struct net_device *dev, struct ethtool_stats *stats, + u64 *data) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data); + + data += sizeof(struct port_stats) / sizeof(u64); + collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); +} + +/* + * Return a version number to identify the type of adapter. The scheme is: + * - bits 0..9: chip version + * - bits 10..15: chip revision + * - bits 16..23: register dump version + */ +static inline unsigned int mk_adap_vers(const struct adapter *ap) +{ + return 4 | (ap->params.rev << 10) | (1 << 16); +} + +static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start, + unsigned int end) +{ + u32 *p = buf + start; + + for ( ; start <= end; start += sizeof(u32)) + *p++ = t4_read_reg(ap, start); +} + +static void get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *buf) +{ + static const unsigned int reg_ranges[] = { + 0x1008, 0x1108, + 0x1180, 0x11b4, + 0x11fc, 0x123c, + 0x1300, 0x173c, + 0x1800, 0x18fc, + 0x3000, 0x30d8, + 0x30e0, 0x5924, + 0x5960, 0x59d4, + 0x5a00, 0x5af8, + 0x6000, 0x6098, + 0x6100, 0x6150, + 0x6200, 0x6208, + 0x6240, 0x6248, + 0x6280, 0x6338, + 0x6370, 0x638c, + 0x6400, 0x643c, + 0x6500, 0x6524, + 0x6a00, 0x6a38, + 0x6a60, 0x6a78, + 0x6b00, 0x6b84, + 0x6bf0, 0x6c84, + 0x6cf0, 0x6d84, + 0x6df0, 0x6e84, + 0x6ef0, 0x6f84, + 0x6ff0, 0x7084, + 0x70f0, 0x7184, + 0x71f0, 0x7284, + 0x72f0, 0x7384, + 0x73f0, 0x7450, + 0x7500, 0x7530, + 0x7600, 0x761c, + 0x7680, 0x76cc, + 0x7700, 0x7798, + 0x77c0, 0x77fc, + 0x7900, 0x79fc, + 0x7b00, 0x7c38, + 0x7d00, 0x7efc, + 0x8dc0, 0x8e1c, + 0x8e30, 0x8e78, + 0x8ea0, 0x8f6c, + 0x8fc0, 0x9074, + 0x90fc, 0x90fc, + 0x9400, 0x9458, + 0x9600, 0x96bc, + 0x9800, 0x9808, + 0x9820, 0x983c, + 0x9850, 0x9864, + 0x9c00, 0x9c6c, + 0x9c80, 0x9cec, + 0x9d00, 0x9d6c, + 0x9d80, 0x9dec, + 0x9e00, 0x9e6c, + 0x9e80, 0x9eec, + 0x9f00, 0x9f6c, + 0x9f80, 0x9fec, + 0xd004, 0xd03c, + 0xdfc0, 0xdfe0, + 0xe000, 0xea7c, + 0xf000, 0x11190, + 0x19040, 0x1906c, + 0x19078, 0x19080, + 0x1908c, 0x19124, + 0x19150, 0x191b0, + 0x191d0, 0x191e8, + 0x19238, 0x1924c, + 0x193f8, 0x19474, + 0x19490, 0x194f8, + 0x19800, 0x19f30, + 0x1a000, 0x1a06c, + 0x1a0b0, 0x1a120, + 0x1a128, 0x1a138, + 0x1a190, 0x1a1c4, + 0x1a1fc, 0x1a1fc, + 0x1e040, 0x1e04c, + 0x1e284, 0x1e28c, + 0x1e2c0, 0x1e2c0, + 0x1e2e0, 0x1e2e0, + 0x1e300, 0x1e384, + 0x1e3c0, 0x1e3c8, + 0x1e440, 0x1e44c, + 0x1e684, 0x1e68c, + 0x1e6c0, 0x1e6c0, + 0x1e6e0, 0x1e6e0, + 0x1e700, 0x1e784, + 0x1e7c0, 0x1e7c8, + 0x1e840, 0x1e84c, + 0x1ea84, 0x1ea8c, + 0x1eac0, 0x1eac0, + 0x1eae0, 0x1eae0, + 0x1eb00, 0x1eb84, + 0x1ebc0, 0x1ebc8, + 0x1ec40, 0x1ec4c, + 0x1ee84, 0x1ee8c, + 0x1eec0, 0x1eec0, + 0x1eee0, 0x1eee0, + 0x1ef00, 0x1ef84, + 0x1efc0, 0x1efc8, + 0x1f040, 0x1f04c, + 0x1f284, 0x1f28c, + 0x1f2c0, 0x1f2c0, + 0x1f2e0, 0x1f2e0, + 0x1f300, 0x1f384, + 0x1f3c0, 0x1f3c8, + 0x1f440, 0x1f44c, + 0x1f684, 0x1f68c, + 0x1f6c0, 0x1f6c0, + 0x1f6e0, 0x1f6e0, + 0x1f700, 0x1f784, + 0x1f7c0, 0x1f7c8, + 0x1f840, 0x1f84c, + 0x1fa84, 0x1fa8c, + 0x1fac0, 0x1fac0, + 0x1fae0, 0x1fae0, + 0x1fb00, 0x1fb84, + 0x1fbc0, 0x1fbc8, + 0x1fc40, 0x1fc4c, + 0x1fe84, 0x1fe8c, + 0x1fec0, 0x1fec0, + 0x1fee0, 0x1fee0, + 0x1ff00, 0x1ff84, + 0x1ffc0, 0x1ffc8, + 0x20000, 0x2002c, + 0x20100, 0x2013c, + 0x20190, 0x201c8, + 0x20200, 0x20318, + 0x20400, 0x20528, + 0x20540, 0x20614, + 0x21000, 0x21040, + 0x2104c, 0x21060, + 0x210c0, 0x210ec, + 0x21200, 0x21268, + 0x21270, 0x21284, + 0x212fc, 0x21388, + 0x21400, 0x21404, + 0x21500, 0x21518, + 0x2152c, 0x2153c, + 0x21550, 0x21554, + 0x21600, 0x21600, + 0x21608, 0x21628, + 0x21630, 0x2163c, + 0x21700, 0x2171c, + 0x21780, 0x2178c, + 0x21800, 0x21c38, + 0x21c80, 0x21d7c, + 0x21e00, 0x21e04, + 0x22000, 0x2202c, + 0x22100, 0x2213c, + 0x22190, 0x221c8, + 0x22200, 0x22318, + 0x22400, 0x22528, + 0x22540, 0x22614, + 0x23000, 0x23040, + 0x2304c, 0x23060, + 0x230c0, 0x230ec, + 0x23200, 0x23268, + 0x23270, 0x23284, + 0x232fc, 0x23388, + 0x23400, 0x23404, + 0x23500, 0x23518, + 0x2352c, 0x2353c, + 0x23550, 0x23554, + 0x23600, 0x23600, + 0x23608, 0x23628, + 0x23630, 0x2363c, + 0x23700, 0x2371c, + 0x23780, 0x2378c, + 0x23800, 0x23c38, + 0x23c80, 0x23d7c, + 0x23e00, 0x23e04, + 0x24000, 0x2402c, + 0x24100, 0x2413c, + 0x24190, 0x241c8, + 0x24200, 0x24318, + 0x24400, 0x24528, + 0x24540, 0x24614, + 0x25000, 0x25040, + 0x2504c, 0x25060, + 0x250c0, 0x250ec, + 0x25200, 0x25268, + 0x25270, 0x25284, + 0x252fc, 0x25388, + 0x25400, 0x25404, + 0x25500, 0x25518, + 0x2552c, 0x2553c, + 0x25550, 0x25554, + 0x25600, 0x25600, + 0x25608, 0x25628, + 0x25630, 0x2563c, + 0x25700, 0x2571c, + 0x25780, 0x2578c, + 0x25800, 0x25c38, + 0x25c80, 0x25d7c, + 0x25e00, 0x25e04, + 0x26000, 0x2602c, + 0x26100, 0x2613c, + 0x26190, 0x261c8, + 0x26200, 0x26318, + 0x26400, 0x26528, + 0x26540, 0x26614, + 0x27000, 0x27040, + 0x2704c, 0x27060, + 0x270c0, 0x270ec, + 0x27200, 0x27268, + 0x27270, 0x27284, + 0x272fc, 0x27388, + 0x27400, 0x27404, + 0x27500, 0x27518, + 0x2752c, 0x2753c, + 0x27550, 0x27554, + 0x27600, 0x27600, + 0x27608, 0x27628, + 0x27630, 0x2763c, + 0x27700, 0x2771c, + 0x27780, 0x2778c, + 0x27800, 0x27c38, + 0x27c80, 0x27d7c, + 0x27e00, 0x27e04 + }; + + int i; + struct adapter *ap = netdev2adap(dev); + + regs->version = mk_adap_vers(ap); + + memset(buf, 0, T4_REGMAP_SIZE); + for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2) + reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]); +} + +static int restart_autoneg(struct net_device *dev) +{ + struct port_info *p = netdev_priv(dev); + + if (!netif_running(dev)) + return -EAGAIN; + if (p->link_cfg.autoneg != AUTONEG_ENABLE) + return -EINVAL; + t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan); + return 0; +} + +static int identify_port(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + unsigned int val; + struct adapter *adap = netdev2adap(dev); + + if (state == ETHTOOL_ID_ACTIVE) + val = 0xffff; + else if (state == ETHTOOL_ID_INACTIVE) + val = 0; + else + return -EINVAL; + + return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val); +} + +static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps) +{ + unsigned int v = 0; + + if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI || + type == FW_PORT_TYPE_BT_XAUI) { + v |= SUPPORTED_TP; + if (caps & FW_PORT_CAP_SPEED_100M) + v |= SUPPORTED_100baseT_Full; + if (caps & FW_PORT_CAP_SPEED_1G) + v |= SUPPORTED_1000baseT_Full; + if (caps & FW_PORT_CAP_SPEED_10G) + v |= SUPPORTED_10000baseT_Full; + } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) { + v |= SUPPORTED_Backplane; + if (caps & FW_PORT_CAP_SPEED_1G) + v |= SUPPORTED_1000baseKX_Full; + if (caps & FW_PORT_CAP_SPEED_10G) + v |= SUPPORTED_10000baseKX4_Full; + } else if (type == FW_PORT_TYPE_KR) + v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full; + else if (type == FW_PORT_TYPE_BP_AP) + v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | + SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full; + else if (type == FW_PORT_TYPE_BP4_AP) + v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | + SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full | + SUPPORTED_10000baseKX4_Full; + else if (type == FW_PORT_TYPE_FIBER_XFI || + type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) + v |= SUPPORTED_FIBRE; + + if (caps & FW_PORT_CAP_ANEG) + v |= SUPPORTED_Autoneg; + return v; +} + +static unsigned int to_fw_linkcaps(unsigned int caps) +{ + unsigned int v = 0; + + if (caps & ADVERTISED_100baseT_Full) + v |= FW_PORT_CAP_SPEED_100M; + if (caps & ADVERTISED_1000baseT_Full) + v |= FW_PORT_CAP_SPEED_1G; + if (caps & ADVERTISED_10000baseT_Full) + v |= FW_PORT_CAP_SPEED_10G; + return v; +} + +static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + const struct port_info *p = netdev_priv(dev); + + if (p->port_type == FW_PORT_TYPE_BT_SGMII || + p->port_type == FW_PORT_TYPE_BT_XFI || + p->port_type == FW_PORT_TYPE_BT_XAUI) + cmd->port = PORT_TP; + else if (p->port_type == FW_PORT_TYPE_FIBER_XFI || + p->port_type == FW_PORT_TYPE_FIBER_XAUI) + cmd->port = PORT_FIBRE; + else if (p->port_type == FW_PORT_TYPE_SFP) { + if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE || + p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE) + cmd->port = PORT_DA; + else + cmd->port = PORT_FIBRE; + } else + cmd->port = PORT_OTHER; + + if (p->mdio_addr >= 0) { + cmd->phy_address = p->mdio_addr; + cmd->transceiver = XCVR_EXTERNAL; + cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ? + MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45; + } else { + cmd->phy_address = 0; /* not really, but no better option */ + cmd->transceiver = XCVR_INTERNAL; + cmd->mdio_support = 0; + } + + cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported); + cmd->advertising = from_fw_linkcaps(p->port_type, + p->link_cfg.advertising); + ethtool_cmd_speed_set(cmd, + netif_carrier_ok(dev) ? p->link_cfg.speed : 0); + cmd->duplex = DUPLEX_FULL; + cmd->autoneg = p->link_cfg.autoneg; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + return 0; +} + +static unsigned int speed_to_caps(int speed) +{ + if (speed == SPEED_100) + return FW_PORT_CAP_SPEED_100M; + if (speed == SPEED_1000) + return FW_PORT_CAP_SPEED_1G; + if (speed == SPEED_10000) + return FW_PORT_CAP_SPEED_10G; + return 0; +} + +static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + unsigned int cap; + struct port_info *p = netdev_priv(dev); + struct link_config *lc = &p->link_cfg; + u32 speed = ethtool_cmd_speed(cmd); + + if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */ + return -EINVAL; + + if (!(lc->supported & FW_PORT_CAP_ANEG)) { + /* + * PHY offers a single speed. See if that's what's + * being requested. + */ + if (cmd->autoneg == AUTONEG_DISABLE && + (lc->supported & speed_to_caps(speed))) + return 0; + return -EINVAL; + } + + if (cmd->autoneg == AUTONEG_DISABLE) { + cap = speed_to_caps(speed); + + if (!(lc->supported & cap) || (speed == SPEED_1000) || + (speed == SPEED_10000)) + return -EINVAL; + lc->requested_speed = cap; + lc->advertising = 0; + } else { + cap = to_fw_linkcaps(cmd->advertising); + if (!(lc->supported & cap)) + return -EINVAL; + lc->requested_speed = 0; + lc->advertising = cap | FW_PORT_CAP_ANEG; + } + lc->autoneg = cmd->autoneg; + + if (netif_running(dev)) + return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan, + lc); + return 0; +} + +static void get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct port_info *p = netdev_priv(dev); + + epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0; + epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0; + epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0; +} + +static int set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct port_info *p = netdev_priv(dev); + struct link_config *lc = &p->link_cfg; + + if (epause->autoneg == AUTONEG_DISABLE) + lc->requested_fc = 0; + else if (lc->supported & FW_PORT_CAP_ANEG) + lc->requested_fc = PAUSE_AUTONEG; + else + return -EINVAL; + + if (epause->rx_pause) + lc->requested_fc |= PAUSE_RX; + if (epause->tx_pause) + lc->requested_fc |= PAUSE_TX; + if (netif_running(dev)) + return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan, + lc); + return 0; +} + +static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) +{ + const struct port_info *pi = netdev_priv(dev); + const struct sge *s = &pi->adapter->sge; + + e->rx_max_pending = MAX_RX_BUFFERS; + e->rx_mini_max_pending = MAX_RSPQ_ENTRIES; + e->rx_jumbo_max_pending = 0; + e->tx_max_pending = MAX_TXQ_ENTRIES; + + e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8; + e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size; + e->rx_jumbo_pending = 0; + e->tx_pending = s->ethtxq[pi->first_qset].q.size; +} + +static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) +{ + int i; + const struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + struct sge *s = &adapter->sge; + + if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending || + e->tx_pending > MAX_TXQ_ENTRIES || + e->rx_mini_pending > MAX_RSPQ_ENTRIES || + e->rx_mini_pending < MIN_RSPQ_ENTRIES || + e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES) + return -EINVAL; + + if (adapter->flags & FULL_INIT_DONE) + return -EBUSY; + + for (i = 0; i < pi->nqsets; ++i) { + s->ethtxq[pi->first_qset + i].q.size = e->tx_pending; + s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8; + s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending; + } + return 0; +} + +static int closest_timer(const struct sge *s, int time) +{ + int i, delta, match = 0, min_delta = INT_MAX; + + for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { + delta = time - s->timer_val[i]; + if (delta < 0) + delta = -delta; + if (delta < min_delta) { + min_delta = delta; + match = i; + } + } + return match; +} + +static int closest_thres(const struct sge *s, int thres) +{ + int i, delta, match = 0, min_delta = INT_MAX; + + for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { + delta = thres - s->counter_val[i]; + if (delta < 0) + delta = -delta; + if (delta < min_delta) { + min_delta = delta; + match = i; + } + } + return match; +} + +/* + * Return a queue's interrupt hold-off time in us. 0 means no timer. + */ +static unsigned int qtimer_val(const struct adapter *adap, + const struct sge_rspq *q) +{ + unsigned int idx = q->intr_params >> 1; + + return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0; +} + +/** + * set_rxq_intr_params - set a queue's interrupt holdoff parameters + * @adap: the adapter + * @q: the Rx queue + * @us: the hold-off time in us, or 0 to disable timer + * @cnt: the hold-off packet count, or 0 to disable counter + * + * Sets an Rx queue's interrupt hold-off time and packet count. At least + * one of the two needs to be enabled for the queue to generate interrupts. + */ +static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q, + unsigned int us, unsigned int cnt) +{ + if ((us | cnt) == 0) + cnt = 1; + + if (cnt) { + int err; + u32 v, new_idx; + + new_idx = closest_thres(&adap->sge, cnt); + if (q->desc && q->pktcnt_idx != new_idx) { + /* the queue has already been created, update it */ + v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | + FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | + FW_PARAMS_PARAM_YZ(q->cntxt_id); + err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v, + &new_idx); + if (err) + return err; + } + q->pktcnt_idx = new_idx; + } + + us = us == 0 ? 6 : closest_timer(&adap->sge, us); + q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0); + return 0; +} + +static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +{ + const struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + + return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq, + c->rx_coalesce_usecs, c->rx_max_coalesced_frames); +} + +static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +{ + const struct port_info *pi = netdev_priv(dev); + const struct adapter *adap = pi->adapter; + const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq; + + c->rx_coalesce_usecs = qtimer_val(adap, rq); + c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ? + adap->sge.counter_val[rq->pktcnt_idx] : 0; + return 0; +} + +/** + * eeprom_ptov - translate a physical EEPROM address to virtual + * @phys_addr: the physical EEPROM address + * @fn: the PCI function number + * @sz: size of function-specific area + * + * Translate a physical EEPROM address to virtual. The first 1K is + * accessed through virtual addresses starting at 31K, the rest is + * accessed through virtual addresses starting at 0. + * + * The mapping is as follows: + * [0..1K) -> [31K..32K) + * [1K..1K+A) -> [31K-A..31K) + * [1K+A..ES) -> [0..ES-A-1K) + * + * where A = @fn * @sz, and ES = EEPROM size. + */ +static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) +{ + fn *= sz; + if (phys_addr < 1024) + return phys_addr + (31 << 10); + if (phys_addr < 1024 + fn) + return 31744 - fn + phys_addr - 1024; + if (phys_addr < EEPROMSIZE) + return phys_addr - 1024 - fn; + return -EINVAL; +} + +/* + * The next two routines implement eeprom read/write from physical addresses. + */ +static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) +{ + int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE); + + if (vaddr >= 0) + vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v); + return vaddr < 0 ? vaddr : 0; +} + +static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v) +{ + int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE); + + if (vaddr >= 0) + vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v); + return vaddr < 0 ? vaddr : 0; +} + +#define EEPROM_MAGIC 0x38E2F10C + +static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, + u8 *data) +{ + int i, err = 0; + struct adapter *adapter = netdev2adap(dev); + + u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + e->magic = EEPROM_MAGIC; + for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4) + err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]); + + if (!err) + memcpy(data, buf + e->offset, e->len); + kfree(buf); + return err; +} + +static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *data) +{ + u8 *buf; + int err = 0; + u32 aligned_offset, aligned_len, *p; + struct adapter *adapter = netdev2adap(dev); + + if (eeprom->magic != EEPROM_MAGIC) + return -EINVAL; + + aligned_offset = eeprom->offset & ~3; + aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3; + + if (adapter->fn > 0) { + u32 start = 1024 + adapter->fn * EEPROMPFSIZE; + + if (aligned_offset < start || + aligned_offset + aligned_len > start + EEPROMPFSIZE) + return -EPERM; + } + + if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) { + /* + * RMW possibly needed for first or last words. + */ + buf = kmalloc(aligned_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf); + if (!err && aligned_len > 4) + err = eeprom_rd_phys(adapter, + aligned_offset + aligned_len - 4, + (u32 *)&buf[aligned_len - 4]); + if (err) + goto out; + memcpy(buf + (eeprom->offset & 3), data, eeprom->len); + } else + buf = data; + + err = t4_seeprom_wp(adapter, false); + if (err) + goto out; + + for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) { + err = eeprom_wr_phys(adapter, aligned_offset, *p); + aligned_offset += 4; + } + + if (!err) + err = t4_seeprom_wp(adapter, true); +out: + if (buf != data) + kfree(buf); + return err; +} + +static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) +{ + int ret; + const struct firmware *fw; + struct adapter *adap = netdev2adap(netdev); + + ef->data[sizeof(ef->data) - 1] = '\0'; + ret = request_firmware(&fw, ef->data, adap->pdev_dev); + if (ret < 0) + return ret; + + ret = t4_load_fw(adap, fw->data, fw->size); + release_firmware(fw); + if (!ret) + dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data); + return ret; +} + +#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC) +#define BCAST_CRC 0xa0ccc1a6 + +static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + wol->supported = WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = netdev2adap(dev)->wol; + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + +static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + int err = 0; + struct port_info *pi = netdev_priv(dev); + + if (wol->wolopts & ~WOL_SUPPORTED) + return -EINVAL; + t4_wol_magic_enable(pi->adapter, pi->tx_chan, + (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL); + if (wol->wolopts & WAKE_BCAST) { + err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL, + ~0ULL, 0, false); + if (!err) + err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1, + ~6ULL, ~0ULL, BCAST_CRC, true); + } else + t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false); + return err; +} + +static int cxgb_set_features(struct net_device *dev, u32 features) +{ + const struct port_info *pi = netdev_priv(dev); + u32 changed = dev->features ^ features; + int err; + + if (!(changed & NETIF_F_HW_VLAN_RX)) + return 0; + + err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, + -1, -1, -1, + !!(features & NETIF_F_HW_VLAN_RX), true); + if (unlikely(err)) + dev->features = features ^ NETIF_F_HW_VLAN_RX; + return err; +} + +static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p) +{ + const struct port_info *pi = netdev_priv(dev); + unsigned int n = min_t(unsigned int, p->size, pi->rss_size); + + p->size = pi->rss_size; + while (n--) + p->ring_index[n] = pi->rss[n]; + return 0; +} + +static int set_rss_table(struct net_device *dev, + const struct ethtool_rxfh_indir *p) +{ + unsigned int i; + struct port_info *pi = netdev_priv(dev); + + if (p->size != pi->rss_size) + return -EINVAL; + for (i = 0; i < p->size; i++) + if (p->ring_index[i] >= pi->nqsets) + return -EINVAL; + for (i = 0; i < p->size; i++) + pi->rss[i] = p->ring_index[i]; + if (pi->adapter->flags & FULL_INIT_DONE) + return write_rss(pi, pi->rss); + return 0; +} + +static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, + void *rules) +{ + const struct port_info *pi = netdev_priv(dev); + + switch (info->cmd) { + case ETHTOOL_GRXFH: { + unsigned int v = pi->rss_mode; + + info->data = 0; + switch (info->flow_type) { + case TCP_V4_FLOW: + if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) + info->data = RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + case UDP_V4_FLOW: + if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) && + (v & FW_RSS_VI_CONFIG_CMD_UDPEN)) + info->data = RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case IPV4_FLOW: + if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) + info->data = RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + case UDP_V6_FLOW: + if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) && + (v & FW_RSS_VI_CONFIG_CMD_UDPEN)) + info->data = RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case IPV6_FLOW: + if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + } + return 0; + } + case ETHTOOL_GRXRINGS: + info->data = pi->nqsets; + return 0; + } + return -EOPNOTSUPP; +} + +static struct ethtool_ops cxgb_ethtool_ops = { + .get_settings = get_settings, + .set_settings = set_settings, + .get_drvinfo = get_drvinfo, + .get_msglevel = get_msglevel, + .set_msglevel = set_msglevel, + .get_ringparam = get_sge_param, + .set_ringparam = set_sge_param, + .get_coalesce = get_coalesce, + .set_coalesce = set_coalesce, + .get_eeprom_len = get_eeprom_len, + .get_eeprom = get_eeprom, + .set_eeprom = set_eeprom, + .get_pauseparam = get_pauseparam, + .set_pauseparam = set_pauseparam, + .get_link = ethtool_op_get_link, + .get_strings = get_strings, + .set_phys_id = identify_port, + .nway_reset = restart_autoneg, + .get_sset_count = get_sset_count, + .get_ethtool_stats = get_stats, + .get_regs_len = get_regs_len, + .get_regs = get_regs, + .get_wol = get_wol, + .set_wol = set_wol, + .get_rxnfc = get_rxnfc, + .get_rxfh_indir = get_rss_table, + .set_rxfh_indir = set_rss_table, + .flash_device = set_flash, +}; + +/* + * debugfs support + */ + +static int mem_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t mem_read(struct file *file, char __user *buf, size_t count, + loff_t *ppos) +{ + loff_t pos = *ppos; + loff_t avail = file->f_path.dentry->d_inode->i_size; + unsigned int mem = (uintptr_t)file->private_data & 3; + struct adapter *adap = file->private_data - mem; + + if (pos < 0) + return -EINVAL; + if (pos >= avail) + return 0; + if (count > avail - pos) + count = avail - pos; + + while (count) { + size_t len; + int ret, ofst; + __be32 data[16]; + + if (mem == MEM_MC) + ret = t4_mc_read(adap, pos, data, NULL); + else + ret = t4_edc_read(adap, mem, pos, data, NULL); + if (ret) + return ret; + + ofst = pos % sizeof(data); + len = min(count, sizeof(data) - ofst); + if (copy_to_user(buf, (u8 *)data + ofst, len)) + return -EFAULT; + + buf += len; + pos += len; + count -= len; + } + count = pos - *ppos; + *ppos = pos; + return count; +} + +static const struct file_operations mem_debugfs_fops = { + .owner = THIS_MODULE, + .open = mem_open, + .read = mem_read, + .llseek = default_llseek, +}; + +static void __devinit add_debugfs_mem(struct adapter *adap, const char *name, + unsigned int idx, unsigned int size_mb) +{ + struct dentry *de; + + de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root, + (void *)adap + idx, &mem_debugfs_fops); + if (de && de->d_inode) + de->d_inode->i_size = size_mb << 20; +} + +static int __devinit setup_debugfs(struct adapter *adap) +{ + int i; + + if (IS_ERR_OR_NULL(adap->debugfs_root)) + return -1; + + i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE); + if (i & EDRAM0_ENABLE) + add_debugfs_mem(adap, "edc0", MEM_EDC0, 5); + if (i & EDRAM1_ENABLE) + add_debugfs_mem(adap, "edc1", MEM_EDC1, 5); + if (i & EXT_MEM_ENABLE) + add_debugfs_mem(adap, "mc", MEM_MC, + EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR))); + if (adap->l2t) + debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap, + &t4_l2t_fops); + return 0; +} + +/* + * upper-layer driver support + */ + +/* + * Allocate an active-open TID and set it to the supplied value. + */ +int cxgb4_alloc_atid(struct tid_info *t, void *data) +{ + int atid = -1; + + spin_lock_bh(&t->atid_lock); + if (t->afree) { + union aopen_entry *p = t->afree; + + atid = p - t->atid_tab; + t->afree = p->next; + p->data = data; + t->atids_in_use++; + } + spin_unlock_bh(&t->atid_lock); + return atid; +} +EXPORT_SYMBOL(cxgb4_alloc_atid); + +/* + * Release an active-open TID. + */ +void cxgb4_free_atid(struct tid_info *t, unsigned int atid) +{ + union aopen_entry *p = &t->atid_tab[atid]; + + spin_lock_bh(&t->atid_lock); + p->next = t->afree; + t->afree = p; + t->atids_in_use--; + spin_unlock_bh(&t->atid_lock); +} +EXPORT_SYMBOL(cxgb4_free_atid); + +/* + * Allocate a server TID and set it to the supplied value. + */ +int cxgb4_alloc_stid(struct tid_info *t, int family, void *data) +{ + int stid; + + spin_lock_bh(&t->stid_lock); + if (family == PF_INET) { + stid = find_first_zero_bit(t->stid_bmap, t->nstids); + if (stid < t->nstids) + __set_bit(stid, t->stid_bmap); + else + stid = -1; + } else { + stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2); + if (stid < 0) + stid = -1; + } + if (stid >= 0) { + t->stid_tab[stid].data = data; + stid += t->stid_base; + t->stids_in_use++; + } + spin_unlock_bh(&t->stid_lock); + return stid; +} +EXPORT_SYMBOL(cxgb4_alloc_stid); + +/* + * Release a server TID. + */ +void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) +{ + stid -= t->stid_base; + spin_lock_bh(&t->stid_lock); + if (family == PF_INET) + __clear_bit(stid, t->stid_bmap); + else + bitmap_release_region(t->stid_bmap, stid, 2); + t->stid_tab[stid].data = NULL; + t->stids_in_use--; + spin_unlock_bh(&t->stid_lock); +} +EXPORT_SYMBOL(cxgb4_free_stid); + +/* + * Populate a TID_RELEASE WR. Caller must properly size the skb. + */ +static void mk_tid_release(struct sk_buff *skb, unsigned int chan, + unsigned int tid) +{ + struct cpl_tid_release *req; + + set_wr_txq(skb, CPL_PRIORITY_SETUP, chan); + req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req)); + INIT_TP_WR(req, tid); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); +} + +/* + * Queue a TID release request and if necessary schedule a work queue to + * process it. + */ +static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, + unsigned int tid) +{ + void **p = &t->tid_tab[tid]; + struct adapter *adap = container_of(t, struct adapter, tids); + + spin_lock_bh(&adap->tid_release_lock); + *p = adap->tid_release_head; + /* Low 2 bits encode the Tx channel number */ + adap->tid_release_head = (void **)((uintptr_t)p | chan); + if (!adap->tid_release_task_busy) { + adap->tid_release_task_busy = true; + schedule_work(&adap->tid_release_task); + } + spin_unlock_bh(&adap->tid_release_lock); +} + +/* + * Process the list of pending TID release requests. + */ +static void process_tid_release_list(struct work_struct *work) +{ + struct sk_buff *skb; + struct adapter *adap; + + adap = container_of(work, struct adapter, tid_release_task); + + spin_lock_bh(&adap->tid_release_lock); + while (adap->tid_release_head) { + void **p = adap->tid_release_head; + unsigned int chan = (uintptr_t)p & 3; + p = (void *)p - chan; + + adap->tid_release_head = *p; + *p = NULL; + spin_unlock_bh(&adap->tid_release_lock); + + while (!(skb = alloc_skb(sizeof(struct cpl_tid_release), + GFP_KERNEL))) + schedule_timeout_uninterruptible(1); + + mk_tid_release(skb, chan, p - adap->tids.tid_tab); + t4_ofld_send(adap, skb); + spin_lock_bh(&adap->tid_release_lock); + } + adap->tid_release_task_busy = false; + spin_unlock_bh(&adap->tid_release_lock); +} + +/* + * Release a TID and inform HW. If we are unable to allocate the release + * message we defer to a work queue. + */ +void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid) +{ + void *old; + struct sk_buff *skb; + struct adapter *adap = container_of(t, struct adapter, tids); + + old = t->tid_tab[tid]; + skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC); + if (likely(skb)) { + t->tid_tab[tid] = NULL; + mk_tid_release(skb, chan, tid); + t4_ofld_send(adap, skb); + } else + cxgb4_queue_tid_release(t, chan, tid); + if (old) + atomic_dec(&t->tids_in_use); +} +EXPORT_SYMBOL(cxgb4_remove_tid); + +/* + * Allocate and initialize the TID tables. Returns 0 on success. + */ +static int tid_init(struct tid_info *t) +{ + size_t size; + unsigned int natids = t->natids; + + size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) + + t->nstids * sizeof(*t->stid_tab) + + BITS_TO_LONGS(t->nstids) * sizeof(long); + t->tid_tab = t4_alloc_mem(size); + if (!t->tid_tab) + return -ENOMEM; + + t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids]; + t->stid_tab = (struct serv_entry *)&t->atid_tab[natids]; + t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids]; + spin_lock_init(&t->stid_lock); + spin_lock_init(&t->atid_lock); + + t->stids_in_use = 0; + t->afree = NULL; + t->atids_in_use = 0; + atomic_set(&t->tids_in_use, 0); + + /* Setup the free list for atid_tab and clear the stid bitmap. */ + if (natids) { + while (--natids) + t->atid_tab[natids - 1].next = &t->atid_tab[natids]; + t->afree = t->atid_tab; + } + bitmap_zero(t->stid_bmap, t->nstids); + return 0; +} + +/** + * cxgb4_create_server - create an IP server + * @dev: the device + * @stid: the server TID + * @sip: local IP address to bind server to + * @sport: the server's TCP port + * @queue: queue to direct messages from this server to + * + * Create an IP server for the given port and address. + * Returns <0 on error and one of the %NET_XMIT_* values on success. + */ +int cxgb4_create_server(const struct net_device *dev, unsigned int stid, + __be32 sip, __be16 sport, unsigned int queue) +{ + unsigned int chan; + struct sk_buff *skb; + struct adapter *adap; + struct cpl_pass_open_req *req; + + skb = alloc_skb(sizeof(*req), GFP_KERNEL); + if (!skb) + return -ENOMEM; + + adap = netdev2adap(dev); + req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req)); + INIT_TP_WR(req, 0); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid)); + req->local_port = sport; + req->peer_port = htons(0); + req->local_ip = sip; + req->peer_ip = htonl(0); + chan = rxq_to_chan(&adap->sge, queue); + req->opt0 = cpu_to_be64(TX_CHAN(chan)); + req->opt1 = cpu_to_be64(CONN_POLICY_ASK | + SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); + return t4_mgmt_tx(adap, skb); +} +EXPORT_SYMBOL(cxgb4_create_server); + +/** + * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU + * @mtus: the HW MTU table + * @mtu: the target MTU + * @idx: index of selected entry in the MTU table + * + * Returns the index and the value in the HW MTU table that is closest to + * but does not exceed @mtu, unless @mtu is smaller than any value in the + * table, in which case that smallest available value is selected. + */ +unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, + unsigned int *idx) +{ + unsigned int i = 0; + + while (i < NMTUS - 1 && mtus[i + 1] <= mtu) + ++i; + if (idx) + *idx = i; + return mtus[i]; +} +EXPORT_SYMBOL(cxgb4_best_mtu); + +/** + * cxgb4_port_chan - get the HW channel of a port + * @dev: the net device for the port + * + * Return the HW Tx channel of the given port. + */ +unsigned int cxgb4_port_chan(const struct net_device *dev) +{ + return netdev2pinfo(dev)->tx_chan; +} +EXPORT_SYMBOL(cxgb4_port_chan); + +/** + * cxgb4_port_viid - get the VI id of a port + * @dev: the net device for the port + * + * Return the VI id of the given port. + */ +unsigned int cxgb4_port_viid(const struct net_device *dev) +{ + return netdev2pinfo(dev)->viid; +} +EXPORT_SYMBOL(cxgb4_port_viid); + +/** + * cxgb4_port_idx - get the index of a port + * @dev: the net device for the port + * + * Return the index of the given port. + */ +unsigned int cxgb4_port_idx(const struct net_device *dev) +{ + return netdev2pinfo(dev)->port_id; +} +EXPORT_SYMBOL(cxgb4_port_idx); + +void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, + struct tp_tcp_stats *v6) +{ + struct adapter *adap = pci_get_drvdata(pdev); + + spin_lock(&adap->stats_lock); + t4_tp_get_tcp_stats(adap, v4, v6); + spin_unlock(&adap->stats_lock); +} +EXPORT_SYMBOL(cxgb4_get_tcp_stats); + +void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, + const unsigned int *pgsz_order) +{ + struct adapter *adap = netdev2adap(dev); + + t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask); + t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) | + HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) | + HPZ3(pgsz_order[3])); +} +EXPORT_SYMBOL(cxgb4_iscsi_init); + +static struct pci_driver cxgb4_driver; + +static void check_neigh_update(struct neighbour *neigh) +{ + const struct device *parent; + const struct net_device *netdev = neigh->dev; + + if (netdev->priv_flags & IFF_802_1Q_VLAN) + netdev = vlan_dev_real_dev(netdev); + parent = netdev->dev.parent; + if (parent && parent->driver == &cxgb4_driver.driver) + t4_l2t_update(dev_get_drvdata(parent), neigh); +} + +static int netevent_cb(struct notifier_block *nb, unsigned long event, + void *data) +{ + switch (event) { + case NETEVENT_NEIGH_UPDATE: + check_neigh_update(data); + break; + case NETEVENT_REDIRECT: + default: + break; + } + return 0; +} + +static bool netevent_registered; +static struct notifier_block cxgb4_netevent_nb = { + .notifier_call = netevent_cb +}; + +static void uld_attach(struct adapter *adap, unsigned int uld) +{ + void *handle; + struct cxgb4_lld_info lli; + + lli.pdev = adap->pdev; + lli.l2t = adap->l2t; + lli.tids = &adap->tids; + lli.ports = adap->port; + lli.vr = &adap->vres; + lli.mtus = adap->params.mtus; + if (uld == CXGB4_ULD_RDMA) { + lli.rxq_ids = adap->sge.rdma_rxq; + lli.nrxq = adap->sge.rdmaqs; + } else if (uld == CXGB4_ULD_ISCSI) { + lli.rxq_ids = adap->sge.ofld_rxq; + lli.nrxq = adap->sge.ofldqsets; + } + lli.ntxq = adap->sge.ofldqsets; + lli.nchan = adap->params.nports; + lli.nports = adap->params.nports; + lli.wr_cred = adap->params.ofldq_wr_cred; + lli.adapter_type = adap->params.rev; + lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2)); + lli.udb_density = 1 << QUEUESPERPAGEPF0_GET( + t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >> + (adap->fn * 4)); + lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( + t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >> + (adap->fn * 4)); + lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS); + lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL); + lli.fw_vers = adap->params.fw_vers; + + handle = ulds[uld].add(&lli); + if (IS_ERR(handle)) { + dev_warn(adap->pdev_dev, + "could not attach to the %s driver, error %ld\n", + uld_str[uld], PTR_ERR(handle)); + return; + } + + adap->uld_handle[uld] = handle; + + if (!netevent_registered) { + register_netevent_notifier(&cxgb4_netevent_nb); + netevent_registered = true; + } + + if (adap->flags & FULL_INIT_DONE) + ulds[uld].state_change(handle, CXGB4_STATE_UP); +} + +static void attach_ulds(struct adapter *adap) +{ + unsigned int i; + + mutex_lock(&uld_mutex); + list_add_tail(&adap->list_node, &adapter_list); + for (i = 0; i < CXGB4_ULD_MAX; i++) + if (ulds[i].add) + uld_attach(adap, i); + mutex_unlock(&uld_mutex); +} + +static void detach_ulds(struct adapter *adap) +{ + unsigned int i; + + mutex_lock(&uld_mutex); + list_del(&adap->list_node); + for (i = 0; i < CXGB4_ULD_MAX; i++) + if (adap->uld_handle[i]) { + ulds[i].state_change(adap->uld_handle[i], + CXGB4_STATE_DETACH); + adap->uld_handle[i] = NULL; + } + if (netevent_registered && list_empty(&adapter_list)) { + unregister_netevent_notifier(&cxgb4_netevent_nb); + netevent_registered = false; + } + mutex_unlock(&uld_mutex); +} + +static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state) +{ + unsigned int i; + + mutex_lock(&uld_mutex); + for (i = 0; i < CXGB4_ULD_MAX; i++) + if (adap->uld_handle[i]) + ulds[i].state_change(adap->uld_handle[i], new_state); + mutex_unlock(&uld_mutex); +} + +/** + * cxgb4_register_uld - register an upper-layer driver + * @type: the ULD type + * @p: the ULD methods + * + * Registers an upper-layer driver with this driver and notifies the ULD + * about any presently available devices that support its type. Returns + * %-EBUSY if a ULD of the same type is already registered. + */ +int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p) +{ + int ret = 0; + struct adapter *adap; + + if (type >= CXGB4_ULD_MAX) + return -EINVAL; + mutex_lock(&uld_mutex); + if (ulds[type].add) { + ret = -EBUSY; + goto out; + } + ulds[type] = *p; + list_for_each_entry(adap, &adapter_list, list_node) + uld_attach(adap, type); +out: mutex_unlock(&uld_mutex); + return ret; +} +EXPORT_SYMBOL(cxgb4_register_uld); + +/** + * cxgb4_unregister_uld - unregister an upper-layer driver + * @type: the ULD type + * + * Unregisters an existing upper-layer driver. + */ +int cxgb4_unregister_uld(enum cxgb4_uld type) +{ + struct adapter *adap; + + if (type >= CXGB4_ULD_MAX) + return -EINVAL; + mutex_lock(&uld_mutex); + list_for_each_entry(adap, &adapter_list, list_node) + adap->uld_handle[type] = NULL; + ulds[type].add = NULL; + mutex_unlock(&uld_mutex); + return 0; +} +EXPORT_SYMBOL(cxgb4_unregister_uld); + +/** + * cxgb_up - enable the adapter + * @adap: adapter being enabled + * + * Called when the first port is enabled, this function performs the + * actions necessary to make an adapter operational, such as completing + * the initialization of HW modules, and enabling interrupts. + * + * Must be called with the rtnl lock held. + */ +static int cxgb_up(struct adapter *adap) +{ + int err; + + err = setup_sge_queues(adap); + if (err) + goto out; + err = setup_rss(adap); + if (err) + goto freeq; + + if (adap->flags & USING_MSIX) { + name_msix_vecs(adap); + err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0, + adap->msix_info[0].desc, adap); + if (err) + goto irq_err; + + err = request_msix_queue_irqs(adap); + if (err) { + free_irq(adap->msix_info[0].vec, adap); + goto irq_err; + } + } else { + err = request_irq(adap->pdev->irq, t4_intr_handler(adap), + (adap->flags & USING_MSI) ? 0 : IRQF_SHARED, + adap->port[0]->name, adap); + if (err) + goto irq_err; + } + enable_rx(adap); + t4_sge_start(adap); + t4_intr_enable(adap); + adap->flags |= FULL_INIT_DONE; + notify_ulds(adap, CXGB4_STATE_UP); + out: + return err; + irq_err: + dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); + freeq: + t4_free_sge_resources(adap); + goto out; +} + +static void cxgb_down(struct adapter *adapter) +{ + t4_intr_disable(adapter); + cancel_work_sync(&adapter->tid_release_task); + adapter->tid_release_task_busy = false; + adapter->tid_release_head = NULL; + + if (adapter->flags & USING_MSIX) { + free_msix_queue_irqs(adapter); + free_irq(adapter->msix_info[0].vec, adapter); + } else + free_irq(adapter->pdev->irq, adapter); + quiesce_rx(adapter); + t4_sge_stop(adapter); + t4_free_sge_resources(adapter); + adapter->flags &= ~FULL_INIT_DONE; +} + +/* + * net_device operations + */ +static int cxgb_open(struct net_device *dev) +{ + int err; + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + netif_carrier_off(dev); + + if (!(adapter->flags & FULL_INIT_DONE)) { + err = cxgb_up(adapter); + if (err < 0) + return err; + } + + err = link_start(dev); + if (!err) + netif_tx_start_all_queues(dev); + return err; +} + +static int cxgb_close(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + netif_tx_stop_all_queues(dev); + netif_carrier_off(dev); + return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false); +} + +static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *ns) +{ + struct port_stats stats; + struct port_info *p = netdev_priv(dev); + struct adapter *adapter = p->adapter; + + spin_lock(&adapter->stats_lock); + t4_get_port_stats(adapter, p->tx_chan, &stats); + spin_unlock(&adapter->stats_lock); + + ns->tx_bytes = stats.tx_octets; + ns->tx_packets = stats.tx_frames; + ns->rx_bytes = stats.rx_octets; + ns->rx_packets = stats.rx_frames; + ns->multicast = stats.rx_mcast_frames; + + /* detailed rx_errors */ + ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long + + stats.rx_runt; + ns->rx_over_errors = 0; + ns->rx_crc_errors = stats.rx_fcs_err; + ns->rx_frame_errors = stats.rx_symbol_err; + ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 + + stats.rx_ovflow2 + stats.rx_ovflow3 + + stats.rx_trunc0 + stats.rx_trunc1 + + stats.rx_trunc2 + stats.rx_trunc3; + ns->rx_missed_errors = 0; + + /* detailed tx_errors */ + ns->tx_aborted_errors = 0; + ns->tx_carrier_errors = 0; + ns->tx_fifo_errors = 0; + ns->tx_heartbeat_errors = 0; + ns->tx_window_errors = 0; + + ns->tx_errors = stats.tx_error_frames; + ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err + + ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors; + return ns; +} + +static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) +{ + unsigned int mbox; + int ret = 0, prtad, devad; + struct port_info *pi = netdev_priv(dev); + struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data; + + switch (cmd) { + case SIOCGMIIPHY: + if (pi->mdio_addr < 0) + return -EOPNOTSUPP; + data->phy_id = pi->mdio_addr; + break; + case SIOCGMIIREG: + case SIOCSMIIREG: + if (mdio_phy_id_is_c45(data->phy_id)) { + prtad = mdio_phy_id_prtad(data->phy_id); + devad = mdio_phy_id_devad(data->phy_id); + } else if (data->phy_id < 32) { + prtad = data->phy_id; + devad = 0; + data->reg_num &= 0x1f; + } else + return -EINVAL; + + mbox = pi->adapter->fn; + if (cmd == SIOCGMIIREG) + ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad, + data->reg_num, &data->val_out); + else + ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad, + data->reg_num, data->val_in); + break; + default: + return -EOPNOTSUPP; + } + return ret; +} + +static void cxgb_set_rxmode(struct net_device *dev) +{ + /* unfortunately we can't return errors to the stack */ + set_rxmode(dev, -1, false); +} + +static int cxgb_change_mtu(struct net_device *dev, int new_mtu) +{ + int ret; + struct port_info *pi = netdev_priv(dev); + + if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */ + return -EINVAL; + ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1, + -1, -1, -1, true); + if (!ret) + dev->mtu = new_mtu; + return ret; +} + +static int cxgb_set_mac_addr(struct net_device *dev, void *p) +{ + int ret; + struct sockaddr *addr = p; + struct port_info *pi = netdev_priv(dev); + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid, + pi->xact_addr_filt, addr->sa_data, true, true); + if (ret < 0) + return ret; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + pi->xact_addr_filt = ret; + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void cxgb_netpoll(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + + if (adap->flags & USING_MSIX) { + int i; + struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset]; + + for (i = pi->nqsets; i; i--, rx++) + t4_sge_intr_msix(0, &rx->rspq); + } else + t4_intr_handler(adap)(0, adap); +} +#endif + +static const struct net_device_ops cxgb4_netdev_ops = { + .ndo_open = cxgb_open, + .ndo_stop = cxgb_close, + .ndo_start_xmit = t4_eth_xmit, + .ndo_get_stats64 = cxgb_get_stats, + .ndo_set_rx_mode = cxgb_set_rxmode, + .ndo_set_mac_address = cxgb_set_mac_addr, + .ndo_set_features = cxgb_set_features, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = cxgb_ioctl, + .ndo_change_mtu = cxgb_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = cxgb_netpoll, +#endif +}; + +void t4_fatal_err(struct adapter *adap) +{ + t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0); + t4_intr_disable(adap); + dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n"); +} + +static void setup_memwin(struct adapter *adap) +{ + u32 bar0; + + bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */ + t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0), + (bar0 + MEMWIN0_BASE) | BIR(0) | + WINDOW(ilog2(MEMWIN0_APERTURE) - 10)); + t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1), + (bar0 + MEMWIN1_BASE) | BIR(0) | + WINDOW(ilog2(MEMWIN1_APERTURE) - 10)); + t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2), + (bar0 + MEMWIN2_BASE) | BIR(0) | + WINDOW(ilog2(MEMWIN2_APERTURE) - 10)); + if (adap->vres.ocq.size) { + unsigned int start, sz_kb; + + start = pci_resource_start(adap->pdev, 2) + + OCQ_WIN_OFFSET(adap->pdev, &adap->vres); + sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10; + t4_write_reg(adap, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3), + start | BIR(1) | WINDOW(ilog2(sz_kb))); + t4_write_reg(adap, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3), + adap->vres.ocq.start); + t4_read_reg(adap, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3)); + } +} + +static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) +{ + u32 v; + int ret; + + /* get device capabilities */ + memset(c, 0, sizeof(*c)); + c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST | FW_CMD_READ); + c->retval_len16 = htonl(FW_LEN16(*c)); + ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c); + if (ret < 0) + return ret; + + /* select capabilities we'll be using */ + if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) { + if (!vf_acls) + c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM); + else + c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM); + } else if (vf_acls) { + dev_err(adap->pdev_dev, "virtualization ACLs not supported"); + return ret; + } + c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST | FW_CMD_WRITE); + ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL); + if (ret < 0) + return ret; + + ret = t4_config_glbl_rss(adap, adap->fn, + FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, + FW_RSS_GLB_CONFIG_CMD_TNLMAPEN | + FW_RSS_GLB_CONFIG_CMD_TNLALLLKP); + if (ret < 0) + return ret; + + ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ, + 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF); + if (ret < 0) + return ret; + + t4_sge_init(adap); + + /* tweak some settings */ + t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849); + t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12)); + t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG); + v = t4_read_reg(adap, TP_PIO_DATA); + t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR); + + /* get basic stuff going */ + return t4_early_init(adap, adap->fn); +} + +/* + * Max # of ATIDs. The absolute HW max is 16K but we keep it lower. + */ +#define MAX_ATIDS 8192U + +/* + * Phase 0 of initialization: contact FW, obtain config, perform basic init. + */ +static int adap_init0(struct adapter *adap) +{ + int ret; + u32 v, port_vec; + enum dev_state state; + u32 params[7], val[7]; + struct fw_caps_config_cmd c; + + ret = t4_check_fw_version(adap); + if (ret == -EINVAL || ret > 0) { + if (upgrade_fw(adap) >= 0) /* recache FW version */ + ret = t4_check_fw_version(adap); + } + if (ret < 0) + return ret; + + /* contact FW, request master */ + ret = t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, &state); + if (ret < 0) { + dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", + ret); + return ret; + } + + /* reset device */ + ret = t4_fw_reset(adap, adap->fn, PIORSTMODE | PIORST); + if (ret < 0) + goto bye; + + for (v = 0; v < SGE_NTIMERS - 1; v++) + adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL); + adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL; + adap->sge.counter_val[0] = 1; + for (v = 1; v < SGE_NCOUNTERS; v++) + adap->sge.counter_val[v] = min(intr_cnt[v - 1], + THRESHOLD_3_MASK); +#define FW_PARAM_DEV(param) \ + (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ + FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) + + params[0] = FW_PARAM_DEV(CCLK); + ret = t4_query_params(adap, adap->fn, adap->fn, 0, 1, params, val); + if (ret < 0) + goto bye; + adap->params.vpd.cclk = val[0]; + + ret = adap_init1(adap, &c); + if (ret < 0) + goto bye; + +#define FW_PARAM_PFVF(param) \ + (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ + FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \ + FW_PARAMS_PARAM_Y(adap->fn)) + + params[0] = FW_PARAM_DEV(PORTVEC); + params[1] = FW_PARAM_PFVF(L2T_START); + params[2] = FW_PARAM_PFVF(L2T_END); + params[3] = FW_PARAM_PFVF(FILTER_START); + params[4] = FW_PARAM_PFVF(FILTER_END); + params[5] = FW_PARAM_PFVF(IQFLINT_START); + params[6] = FW_PARAM_PFVF(EQ_START); + ret = t4_query_params(adap, adap->fn, adap->fn, 0, 7, params, val); + if (ret < 0) + goto bye; + port_vec = val[0]; + adap->tids.ftid_base = val[3]; + adap->tids.nftids = val[4] - val[3] + 1; + adap->sge.ingr_start = val[5]; + adap->sge.egr_start = val[6]; + + if (c.ofldcaps) { + /* query offload-related parameters */ + params[0] = FW_PARAM_DEV(NTID); + params[1] = FW_PARAM_PFVF(SERVER_START); + params[2] = FW_PARAM_PFVF(SERVER_END); + params[3] = FW_PARAM_PFVF(TDDP_START); + params[4] = FW_PARAM_PFVF(TDDP_END); + params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); + ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params, + val); + if (ret < 0) + goto bye; + adap->tids.ntids = val[0]; + adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); + adap->tids.stid_base = val[1]; + adap->tids.nstids = val[2] - val[1] + 1; + adap->vres.ddp.start = val[3]; + adap->vres.ddp.size = val[4] - val[3] + 1; + adap->params.ofldq_wr_cred = val[5]; + adap->params.offload = 1; + } + if (c.rdmacaps) { + params[0] = FW_PARAM_PFVF(STAG_START); + params[1] = FW_PARAM_PFVF(STAG_END); + params[2] = FW_PARAM_PFVF(RQ_START); + params[3] = FW_PARAM_PFVF(RQ_END); + params[4] = FW_PARAM_PFVF(PBL_START); + params[5] = FW_PARAM_PFVF(PBL_END); + ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params, + val); + if (ret < 0) + goto bye; + adap->vres.stag.start = val[0]; + adap->vres.stag.size = val[1] - val[0] + 1; + adap->vres.rq.start = val[2]; + adap->vres.rq.size = val[3] - val[2] + 1; + adap->vres.pbl.start = val[4]; + adap->vres.pbl.size = val[5] - val[4] + 1; + + params[0] = FW_PARAM_PFVF(SQRQ_START); + params[1] = FW_PARAM_PFVF(SQRQ_END); + params[2] = FW_PARAM_PFVF(CQ_START); + params[3] = FW_PARAM_PFVF(CQ_END); + params[4] = FW_PARAM_PFVF(OCQ_START); + params[5] = FW_PARAM_PFVF(OCQ_END); + ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params, + val); + if (ret < 0) + goto bye; + adap->vres.qp.start = val[0]; + adap->vres.qp.size = val[1] - val[0] + 1; + adap->vres.cq.start = val[2]; + adap->vres.cq.size = val[3] - val[2] + 1; + adap->vres.ocq.start = val[4]; + adap->vres.ocq.size = val[5] - val[4] + 1; + } + if (c.iscsicaps) { + params[0] = FW_PARAM_PFVF(ISCSI_START); + params[1] = FW_PARAM_PFVF(ISCSI_END); + ret = t4_query_params(adap, adap->fn, adap->fn, 0, 2, params, + val); + if (ret < 0) + goto bye; + adap->vres.iscsi.start = val[0]; + adap->vres.iscsi.size = val[1] - val[0] + 1; + } +#undef FW_PARAM_PFVF +#undef FW_PARAM_DEV + + adap->params.nports = hweight32(port_vec); + adap->params.portvec = port_vec; + adap->flags |= FW_OK; + + /* These are finalized by FW initialization, load their values now */ + v = t4_read_reg(adap, TP_TIMER_RESOLUTION); + adap->params.tp.tre = TIMERRESOLUTION_GET(v); + t4_read_mtu_tbl(adap, adap->params.mtus, NULL); + t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, + adap->params.b_wnd); + +#ifdef CONFIG_PCI_IOV + /* + * Provision resource limits for Virtual Functions. We currently + * grant them all the same static resource limits except for the Port + * Access Rights Mask which we're assigning based on the PF. All of + * the static provisioning stuff for both the PF and VF really needs + * to be managed in a persistent manner for each device which the + * firmware controls. + */ + { + int pf, vf; + + for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) { + if (num_vf[pf] <= 0) + continue; + + /* VF numbering starts at 1! */ + for (vf = 1; vf <= num_vf[pf]; vf++) { + ret = t4_cfg_pfvf(adap, adap->fn, pf, vf, + VFRES_NEQ, VFRES_NETHCTRL, + VFRES_NIQFLINT, VFRES_NIQ, + VFRES_TC, VFRES_NVI, + FW_PFVF_CMD_CMASK_MASK, + pfvfres_pmask(adap, pf, vf), + VFRES_NEXACTF, + VFRES_R_CAPS, VFRES_WX_CAPS); + if (ret < 0) + dev_warn(adap->pdev_dev, "failed to " + "provision pf/vf=%d/%d; " + "err=%d\n", pf, vf, ret); + } + } + } +#endif + + setup_memwin(adap); + return 0; + + /* + * If a command timed out or failed with EIO FW does not operate within + * its spec or something catastrophic happened to HW/FW, stop issuing + * commands. + */ +bye: if (ret != -ETIMEDOUT && ret != -EIO) + t4_fw_bye(adap, adap->fn); + return ret; +} + +/* EEH callbacks */ + +static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + int i; + struct adapter *adap = pci_get_drvdata(pdev); + + if (!adap) + goto out; + + rtnl_lock(); + adap->flags &= ~FW_OK; + notify_ulds(adap, CXGB4_STATE_START_RECOVERY); + for_each_port(adap, i) { + struct net_device *dev = adap->port[i]; + + netif_device_detach(dev); + netif_carrier_off(dev); + } + if (adap->flags & FULL_INIT_DONE) + cxgb_down(adap); + rtnl_unlock(); + pci_disable_device(pdev); +out: return state == pci_channel_io_perm_failure ? + PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev) +{ + int i, ret; + struct fw_caps_config_cmd c; + struct adapter *adap = pci_get_drvdata(pdev); + + if (!adap) { + pci_restore_state(pdev); + pci_save_state(pdev); + return PCI_ERS_RESULT_RECOVERED; + } + + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, "cannot reenable PCI device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + pci_cleanup_aer_uncorrect_error_status(pdev); + + if (t4_wait_dev_ready(adap) < 0) + return PCI_ERS_RESULT_DISCONNECT; + if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL)) + return PCI_ERS_RESULT_DISCONNECT; + adap->flags |= FW_OK; + if (adap_init1(adap, &c)) + return PCI_ERS_RESULT_DISCONNECT; + + for_each_port(adap, i) { + struct port_info *p = adap2pinfo(adap, i); + + ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1, + NULL, NULL); + if (ret < 0) + return PCI_ERS_RESULT_DISCONNECT; + p->viid = ret; + p->xact_addr_filt = -1; + } + + t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, + adap->params.b_wnd); + setup_memwin(adap); + if (cxgb_up(adap)) + return PCI_ERS_RESULT_DISCONNECT; + return PCI_ERS_RESULT_RECOVERED; +} + +static void eeh_resume(struct pci_dev *pdev) +{ + int i; + struct adapter *adap = pci_get_drvdata(pdev); + + if (!adap) + return; + + rtnl_lock(); + for_each_port(adap, i) { + struct net_device *dev = adap->port[i]; + + if (netif_running(dev)) { + link_start(dev); + cxgb_set_rxmode(dev); + } + netif_device_attach(dev); + } + rtnl_unlock(); +} + +static struct pci_error_handlers cxgb4_eeh = { + .error_detected = eeh_err_detected, + .slot_reset = eeh_slot_reset, + .resume = eeh_resume, +}; + +static inline bool is_10g_port(const struct link_config *lc) +{ + return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0; +} + +static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx, + unsigned int size, unsigned int iqe_size) +{ + q->intr_params = QINTR_TIMER_IDX(timer_idx) | + (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0); + q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0; + q->iqe_len = iqe_size; + q->size = size; +} + +/* + * Perform default configuration of DMA queues depending on the number and type + * of ports we found and the number of available CPUs. Most settings can be + * modified by the admin prior to actual use. + */ +static void __devinit cfg_queues(struct adapter *adap) +{ + struct sge *s = &adap->sge; + int i, q10g = 0, n10g = 0, qidx = 0; + + for_each_port(adap, i) + n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg); + + /* + * We default to 1 queue per non-10G port and up to # of cores queues + * per 10G port. + */ + if (n10g) + q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g; + if (q10g > num_online_cpus()) + q10g = num_online_cpus(); + + for_each_port(adap, i) { + struct port_info *pi = adap2pinfo(adap, i); + + pi->first_qset = qidx; + pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1; + qidx += pi->nqsets; + } + + s->ethqsets = qidx; + s->max_ethqsets = qidx; /* MSI-X may lower it later */ + + if (is_offload(adap)) { + /* + * For offload we use 1 queue/channel if all ports are up to 1G, + * otherwise we divide all available queues amongst the channels + * capped by the number of available cores. + */ + if (n10g) { + i = min_t(int, ARRAY_SIZE(s->ofldrxq), + num_online_cpus()); + s->ofldqsets = roundup(i, adap->params.nports); + } else + s->ofldqsets = adap->params.nports; + /* For RDMA one Rx queue per channel suffices */ + s->rdmaqs = adap->params.nports; + } + + for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { + struct sge_eth_rxq *r = &s->ethrxq[i]; + + init_rspq(&r->rspq, 0, 0, 1024, 64); + r->fl.size = 72; + } + + for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++) + s->ethtxq[i].q.size = 1024; + + for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) + s->ctrlq[i].q.size = 512; + + for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) + s->ofldtxq[i].q.size = 1024; + + for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) { + struct sge_ofld_rxq *r = &s->ofldrxq[i]; + + init_rspq(&r->rspq, 0, 0, 1024, 64); + r->rspq.uld = CXGB4_ULD_ISCSI; + r->fl.size = 72; + } + + for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) { + struct sge_ofld_rxq *r = &s->rdmarxq[i]; + + init_rspq(&r->rspq, 0, 0, 511, 64); + r->rspq.uld = CXGB4_ULD_RDMA; + r->fl.size = 72; + } + + init_rspq(&s->fw_evtq, 6, 0, 512, 64); + init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64); +} + +/* + * Reduce the number of Ethernet queues across all ports to at most n. + * n provides at least one queue per port. + */ +static void __devinit reduce_ethqs(struct adapter *adap, int n) +{ + int i; + struct port_info *pi; + + while (n < adap->sge.ethqsets) + for_each_port(adap, i) { + pi = adap2pinfo(adap, i); + if (pi->nqsets > 1) { + pi->nqsets--; + adap->sge.ethqsets--; + if (adap->sge.ethqsets <= n) + break; + } + } + + n = 0; + for_each_port(adap, i) { + pi = adap2pinfo(adap, i); + pi->first_qset = n; + n += pi->nqsets; + } +} + +/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */ +#define EXTRA_VECS 2 + +static int __devinit enable_msix(struct adapter *adap) +{ + int ofld_need = 0; + int i, err, want, need; + struct sge *s = &adap->sge; + unsigned int nchan = adap->params.nports; + struct msix_entry entries[MAX_INGQ + 1]; + + for (i = 0; i < ARRAY_SIZE(entries); ++i) + entries[i].entry = i; + + want = s->max_ethqsets + EXTRA_VECS; + if (is_offload(adap)) { + want += s->rdmaqs + s->ofldqsets; + /* need nchan for each possible ULD */ + ofld_need = 2 * nchan; + } + need = adap->params.nports + EXTRA_VECS + ofld_need; + + while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need) + want = err; + + if (!err) { + /* + * Distribute available vectors to the various queue groups. + * Every group gets its minimum requirement and NIC gets top + * priority for leftovers. + */ + i = want - EXTRA_VECS - ofld_need; + if (i < s->max_ethqsets) { + s->max_ethqsets = i; + if (i < s->ethqsets) + reduce_ethqs(adap, i); + } + if (is_offload(adap)) { + i = want - EXTRA_VECS - s->max_ethqsets; + i -= ofld_need - nchan; + s->ofldqsets = (i / nchan) * nchan; /* round down */ + } + for (i = 0; i < want; ++i) + adap->msix_info[i].vec = entries[i].vector; + } else if (err > 0) + dev_info(adap->pdev_dev, + "only %d MSI-X vectors left, not using MSI-X\n", err); + return err; +} + +#undef EXTRA_VECS + +static int __devinit init_rss(struct adapter *adap) +{ + unsigned int i, j; + + for_each_port(adap, i) { + struct port_info *pi = adap2pinfo(adap, i); + + pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL); + if (!pi->rss) + return -ENOMEM; + for (j = 0; j < pi->rss_size; j++) + pi->rss[j] = j % pi->nqsets; + } + return 0; +} + +static void __devinit print_port_info(const struct net_device *dev) +{ + static const char *base[] = { + "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4", + "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4" + }; + + char buf[80]; + char *bufp = buf; + const char *spd = ""; + const struct port_info *pi = netdev_priv(dev); + const struct adapter *adap = pi->adapter; + + if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB) + spd = " 2.5 GT/s"; + else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB) + spd = " 5 GT/s"; + + if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M) + bufp += sprintf(bufp, "100/"); + if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G) + bufp += sprintf(bufp, "1000/"); + if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) + bufp += sprintf(bufp, "10G/"); + if (bufp != buf) + --bufp; + sprintf(bufp, "BASE-%s", base[pi->port_type]); + + netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n", + adap->params.vpd.id, adap->params.rev, buf, + is_offload(adap) ? "R" : "", adap->params.pci.width, spd, + (adap->flags & USING_MSIX) ? " MSI-X" : + (adap->flags & USING_MSI) ? " MSI" : ""); + netdev_info(dev, "S/N: %s, E/C: %s\n", + adap->params.vpd.sn, adap->params.vpd.ec); +} + +static void __devinit enable_pcie_relaxed_ordering(struct pci_dev *dev) +{ + u16 v; + int pos; + + pos = pci_pcie_cap(dev); + if (pos > 0) { + pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &v); + v |= PCI_EXP_DEVCTL_RELAX_EN; + pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, v); + } +} + +/* + * Free the following resources: + * - memory used for tables + * - MSI/MSI-X + * - net devices + * - resources FW is holding for us + */ +static void free_some_resources(struct adapter *adapter) +{ + unsigned int i; + + t4_free_mem(adapter->l2t); + t4_free_mem(adapter->tids.tid_tab); + disable_msi(adapter); + + for_each_port(adapter, i) + if (adapter->port[i]) { + kfree(adap2pinfo(adapter, i)->rss); + free_netdev(adapter->port[i]); + } + if (adapter->flags & FW_OK) + t4_fw_bye(adapter, adapter->fn); +} + +#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) +#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \ + NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) + +static int __devinit init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int func, i, err; + struct port_info *pi; + unsigned int highdma = 0; + struct adapter *adapter = NULL; + + printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); + + err = pci_request_regions(pdev, KBUILD_MODNAME); + if (err) { + /* Just info, some other driver may have claimed the device. */ + dev_info(&pdev->dev, "cannot obtain PCI resources\n"); + return err; + } + + /* We control everything through one PF */ + func = PCI_FUNC(pdev->devfn); + if (func != ent->driver_data) { + pci_save_state(pdev); /* to restore SR-IOV later */ + goto sriov; + } + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "cannot enable PCI device\n"); + goto out_release_regions; + } + + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + highdma = NETIF_F_HIGHDMA; + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " + "coherent allocations\n"); + goto out_disable_device; + } + } else { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "no usable DMA configuration\n"); + goto out_disable_device; + } + } + + pci_enable_pcie_error_reporting(pdev); + enable_pcie_relaxed_ordering(pdev); + pci_set_master(pdev); + pci_save_state(pdev); + + adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); + if (!adapter) { + err = -ENOMEM; + goto out_disable_device; + } + + adapter->regs = pci_ioremap_bar(pdev, 0); + if (!adapter->regs) { + dev_err(&pdev->dev, "cannot map device registers\n"); + err = -ENOMEM; + goto out_free_adapter; + } + + adapter->pdev = pdev; + adapter->pdev_dev = &pdev->dev; + adapter->fn = func; + adapter->msg_enable = dflt_msg_enable; + memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); + + spin_lock_init(&adapter->stats_lock); + spin_lock_init(&adapter->tid_release_lock); + + INIT_WORK(&adapter->tid_release_task, process_tid_release_list); + + err = t4_prep_adapter(adapter); + if (err) + goto out_unmap_bar; + err = adap_init0(adapter); + if (err) + goto out_unmap_bar; + + for_each_port(adapter, i) { + struct net_device *netdev; + + netdev = alloc_etherdev_mq(sizeof(struct port_info), + MAX_ETH_QSETS); + if (!netdev) { + err = -ENOMEM; + goto out_free_dev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + adapter->port[i] = netdev; + pi = netdev_priv(netdev); + pi->adapter = adapter; + pi->xact_addr_filt = -1; + pi->port_id = i; + netdev->irq = pdev->irq; + + netdev->hw_features = NETIF_F_SG | TSO_FLAGS | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM | NETIF_F_RXHASH | + NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + netdev->features |= netdev->hw_features | highdma; + netdev->vlan_features = netdev->features & VLAN_FEAT; + + netdev->netdev_ops = &cxgb4_netdev_ops; + SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops); + } + + pci_set_drvdata(pdev, adapter); + + if (adapter->flags & FW_OK) { + err = t4_port_init(adapter, func, func, 0); + if (err) + goto out_free_dev; + } + + /* + * Configure queues and allocate tables now, they can be needed as + * soon as the first register_netdev completes. + */ + cfg_queues(adapter); + + adapter->l2t = t4_init_l2t(); + if (!adapter->l2t) { + /* We tolerate a lack of L2T, giving up some functionality */ + dev_warn(&pdev->dev, "could not allocate L2T, continuing\n"); + adapter->params.offload = 0; + } + + if (is_offload(adapter) && tid_init(&adapter->tids) < 0) { + dev_warn(&pdev->dev, "could not allocate TID table, " + "continuing\n"); + adapter->params.offload = 0; + } + + /* See what interrupts we'll be using */ + if (msi > 1 && enable_msix(adapter) == 0) + adapter->flags |= USING_MSIX; + else if (msi > 0 && pci_enable_msi(pdev) == 0) + adapter->flags |= USING_MSI; + + err = init_rss(adapter); + if (err) + goto out_free_dev; + + /* + * The card is now ready to go. If any errors occur during device + * registration we do not fail the whole card but rather proceed only + * with the ports we manage to register successfully. However we must + * register at least one net device. + */ + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets); + netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets); + + err = register_netdev(adapter->port[i]); + if (err) + break; + adapter->chan_map[pi->tx_chan] = i; + print_port_info(adapter->port[i]); + } + if (i == 0) { + dev_err(&pdev->dev, "could not register any net devices\n"); + goto out_free_dev; + } + if (err) { + dev_warn(&pdev->dev, "only %d net devices registered\n", i); + err = 0; + } + + if (cxgb4_debugfs_root) { + adapter->debugfs_root = debugfs_create_dir(pci_name(pdev), + cxgb4_debugfs_root); + setup_debugfs(adapter); + } + + if (is_offload(adapter)) + attach_ulds(adapter); + +sriov: +#ifdef CONFIG_PCI_IOV + if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0) + if (pci_enable_sriov(pdev, num_vf[func]) == 0) + dev_info(&pdev->dev, + "instantiated %u virtual functions\n", + num_vf[func]); +#endif + return 0; + + out_free_dev: + free_some_resources(adapter); + out_unmap_bar: + iounmap(adapter->regs); + out_free_adapter: + kfree(adapter); + out_disable_device: + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + out_release_regions: + pci_release_regions(pdev); + pci_set_drvdata(pdev, NULL); + return err; +} + +static void __devexit remove_one(struct pci_dev *pdev) +{ + struct adapter *adapter = pci_get_drvdata(pdev); + + pci_disable_sriov(pdev); + + if (adapter) { + int i; + + if (is_offload(adapter)) + detach_ulds(adapter); + + for_each_port(adapter, i) + if (adapter->port[i]->reg_state == NETREG_REGISTERED) + unregister_netdev(adapter->port[i]); + + if (adapter->debugfs_root) + debugfs_remove_recursive(adapter->debugfs_root); + + if (adapter->flags & FULL_INIT_DONE) + cxgb_down(adapter); + + free_some_resources(adapter); + iounmap(adapter->regs); + kfree(adapter); + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + pci_release_regions(pdev); + pci_set_drvdata(pdev, NULL); + } else + pci_release_regions(pdev); +} + +static struct pci_driver cxgb4_driver = { + .name = KBUILD_MODNAME, + .id_table = cxgb4_pci_tbl, + .probe = init_one, + .remove = __devexit_p(remove_one), + .err_handler = &cxgb4_eeh, +}; + +static int __init cxgb4_init_module(void) +{ + int ret; + + /* Debugfs support is optional, just warn if this fails */ + cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); + if (!cxgb4_debugfs_root) + pr_warning("could not create debugfs entry, continuing\n"); + + ret = pci_register_driver(&cxgb4_driver); + if (ret < 0) + debugfs_remove(cxgb4_debugfs_root); + return ret; +} + +static void __exit cxgb4_cleanup_module(void) +{ + pci_unregister_driver(&cxgb4_driver); + debugfs_remove(cxgb4_debugfs_root); /* NULL ok */ +} + +module_init(cxgb4_init_module); +module_exit(cxgb4_cleanup_module); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h new file mode 100644 index 0000000..b1d39b8 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -0,0 +1,239 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CXGB4_OFLD_H +#define __CXGB4_OFLD_H + +#include +#include +#include +#include + +/* CPL message priority levels */ +enum { + CPL_PRIORITY_DATA = 0, /* data messages */ + CPL_PRIORITY_SETUP = 1, /* connection setup messages */ + CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */ + CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */ + CPL_PRIORITY_ACK = 1, /* RX ACK messages */ + CPL_PRIORITY_CONTROL = 1 /* control messages */ +}; + +#define INIT_TP_WR(w, tid) do { \ + (w)->wr.wr_hi = htonl(FW_WR_OP(FW_TP_WR) | \ + FW_WR_IMMDLEN(sizeof(*w) - sizeof(w->wr))); \ + (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \ + FW_WR_FLOWID(tid)); \ + (w)->wr.wr_lo = cpu_to_be64(0); \ +} while (0) + +#define INIT_TP_WR_CPL(w, cpl, tid) do { \ + INIT_TP_WR(w, tid); \ + OPCODE_TID(w) = htonl(MK_OPCODE_TID(cpl, tid)); \ +} while (0) + +#define INIT_ULPTX_WR(w, wrlen, atomic, tid) do { \ + (w)->wr.wr_hi = htonl(FW_WR_OP(FW_ULPTX_WR) | FW_WR_ATOMIC(atomic)); \ + (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(wrlen, 16)) | \ + FW_WR_FLOWID(tid)); \ + (w)->wr.wr_lo = cpu_to_be64(0); \ +} while (0) + +/* Special asynchronous notification message */ +#define CXGB4_MSG_AN ((void *)1) + +struct serv_entry { + void *data; +}; + +union aopen_entry { + void *data; + union aopen_entry *next; +}; + +/* + * Holds the size, base address, free list start, etc of the TID, server TID, + * and active-open TID tables. The tables themselves are allocated dynamically. + */ +struct tid_info { + void **tid_tab; + unsigned int ntids; + + struct serv_entry *stid_tab; + unsigned long *stid_bmap; + unsigned int nstids; + unsigned int stid_base; + + union aopen_entry *atid_tab; + unsigned int natids; + + unsigned int nftids; + unsigned int ftid_base; + + spinlock_t atid_lock ____cacheline_aligned_in_smp; + union aopen_entry *afree; + unsigned int atids_in_use; + + spinlock_t stid_lock; + unsigned int stids_in_use; + + atomic_t tids_in_use; +}; + +static inline void *lookup_tid(const struct tid_info *t, unsigned int tid) +{ + return tid < t->ntids ? t->tid_tab[tid] : NULL; +} + +static inline void *lookup_atid(const struct tid_info *t, unsigned int atid) +{ + return atid < t->natids ? t->atid_tab[atid].data : NULL; +} + +static inline void *lookup_stid(const struct tid_info *t, unsigned int stid) +{ + stid -= t->stid_base; + return stid < t->nstids ? t->stid_tab[stid].data : NULL; +} + +static inline void cxgb4_insert_tid(struct tid_info *t, void *data, + unsigned int tid) +{ + t->tid_tab[tid] = data; + atomic_inc(&t->tids_in_use); +} + +int cxgb4_alloc_atid(struct tid_info *t, void *data); +int cxgb4_alloc_stid(struct tid_info *t, int family, void *data); +void cxgb4_free_atid(struct tid_info *t, unsigned int atid); +void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family); +void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid); + +struct in6_addr; + +int cxgb4_create_server(const struct net_device *dev, unsigned int stid, + __be32 sip, __be16 sport, unsigned int queue); + +static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue) +{ + skb_set_queue_mapping(skb, (queue << 1) | prio); +} + +enum cxgb4_uld { + CXGB4_ULD_RDMA, + CXGB4_ULD_ISCSI, + CXGB4_ULD_MAX +}; + +enum cxgb4_state { + CXGB4_STATE_UP, + CXGB4_STATE_START_RECOVERY, + CXGB4_STATE_DOWN, + CXGB4_STATE_DETACH +}; + +struct pci_dev; +struct l2t_data; +struct net_device; +struct pkt_gl; +struct tp_tcp_stats; + +struct cxgb4_range { + unsigned int start; + unsigned int size; +}; + +struct cxgb4_virt_res { /* virtualized HW resources */ + struct cxgb4_range ddp; + struct cxgb4_range iscsi; + struct cxgb4_range stag; + struct cxgb4_range rq; + struct cxgb4_range pbl; + struct cxgb4_range qp; + struct cxgb4_range cq; + struct cxgb4_range ocq; +}; + +#define OCQ_WIN_OFFSET(pdev, vres) \ + (pci_resource_len((pdev), 2) - roundup_pow_of_two((vres)->ocq.size)) + +/* + * Block of information the LLD provides to ULDs attaching to a device. + */ +struct cxgb4_lld_info { + struct pci_dev *pdev; /* associated PCI device */ + struct l2t_data *l2t; /* L2 table */ + struct tid_info *tids; /* TID table */ + struct net_device **ports; /* device ports */ + const struct cxgb4_virt_res *vr; /* assorted HW resources */ + const unsigned short *mtus; /* MTU table */ + const unsigned short *rxq_ids; /* the ULD's Rx queue ids */ + unsigned short nrxq; /* # of Rx queues */ + unsigned short ntxq; /* # of Tx queues */ + unsigned char nchan:4; /* # of channels */ + unsigned char nports:4; /* # of ports */ + unsigned char wr_cred; /* WR 16-byte credits */ + unsigned char adapter_type; /* type of adapter */ + unsigned char fw_api_ver; /* FW API version */ + unsigned int fw_vers; /* FW version */ + unsigned int iscsi_iolen; /* iSCSI max I/O length */ + unsigned short udb_density; /* # of user DB/page */ + unsigned short ucq_density; /* # of user CQs/page */ + void __iomem *gts_reg; /* address of GTS register */ + void __iomem *db_reg; /* address of kernel doorbell */ +}; + +struct cxgb4_uld_info { + const char *name; + void *(*add)(const struct cxgb4_lld_info *p); + int (*rx_handler)(void *handle, const __be64 *rsp, + const struct pkt_gl *gl); + int (*state_change)(void *handle, enum cxgb4_state new_state); +}; + +int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); +int cxgb4_unregister_uld(enum cxgb4_uld type); +int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb); +unsigned int cxgb4_port_chan(const struct net_device *dev); +unsigned int cxgb4_port_viid(const struct net_device *dev); +unsigned int cxgb4_port_idx(const struct net_device *dev); +unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, + unsigned int *idx); +void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, + struct tp_tcp_stats *v6); +void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, + const unsigned int *pgsz_order); +struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, + unsigned int skb_len, unsigned int pull_len); +#endif /* !__CXGB4_OFLD_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c new file mode 100644 index 0000000..a2d323c --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -0,0 +1,597 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include "cxgb4.h" +#include "l2t.h" +#include "t4_msg.h" +#include "t4fw_api.h" + +#define VLAN_NONE 0xfff + +/* identifies sync vs async L2T_WRITE_REQs */ +#define F_SYNC_WR (1 << 12) + +enum { + L2T_STATE_VALID, /* entry is up to date */ + L2T_STATE_STALE, /* entry may be used but needs revalidation */ + L2T_STATE_RESOLVING, /* entry needs address resolution */ + L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */ + + /* when state is one of the below the entry is not hashed */ + L2T_STATE_SWITCHING, /* entry is being used by a switching filter */ + L2T_STATE_UNUSED /* entry not in use */ +}; + +struct l2t_data { + rwlock_t lock; + atomic_t nfree; /* number of free entries */ + struct l2t_entry *rover; /* starting point for next allocation */ + struct l2t_entry l2tab[L2T_SIZE]; +}; + +static inline unsigned int vlan_prio(const struct l2t_entry *e) +{ + return e->vlan >> 13; +} + +static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) +{ + if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ + atomic_dec(&d->nfree); +} + +/* + * To avoid having to check address families we do not allow v4 and v6 + * neighbors to be on the same hash chain. We keep v4 entries in the first + * half of available hash buckets and v6 in the second. + */ +enum { + L2T_SZ_HALF = L2T_SIZE / 2, + L2T_HASH_MASK = L2T_SZ_HALF - 1 +}; + +static inline unsigned int arp_hash(const u32 *key, int ifindex) +{ + return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK; +} + +static inline unsigned int ipv6_hash(const u32 *key, int ifindex) +{ + u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3]; + + return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK); +} + +static unsigned int addr_hash(const u32 *addr, int addr_len, int ifindex) +{ + return addr_len == 4 ? arp_hash(addr, ifindex) : + ipv6_hash(addr, ifindex); +} + +/* + * Checks if an L2T entry is for the given IP/IPv6 address. It does not check + * whether the L2T entry and the address are of the same address family. + * Callers ensure an address is only checked against L2T entries of the same + * family, something made trivial by the separation of IP and IPv6 hash chains + * mentioned above. Returns 0 if there's a match, + */ +static int addreq(const struct l2t_entry *e, const u32 *addr) +{ + if (e->v6) + return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) | + (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]); + return e->addr[0] ^ addr[0]; +} + +static void neigh_replace(struct l2t_entry *e, struct neighbour *n) +{ + neigh_hold(n); + if (e->neigh) + neigh_release(e->neigh); + e->neigh = n; +} + +/* + * Write an L2T entry. Must be called with the entry locked. + * The write may be synchronous or asynchronous. + */ +static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync) +{ + struct sk_buff *skb; + struct cpl_l2t_write_req *req; + + skb = alloc_skb(sizeof(*req), GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req)); + INIT_TP_WR(req, 0); + + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, + e->idx | (sync ? F_SYNC_WR : 0) | + TID_QID(adap->sge.fw_evtq.abs_id))); + req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync)); + req->l2t_idx = htons(e->idx); + req->vlan = htons(e->vlan); + if (e->neigh) + memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); + memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); + + set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); + t4_ofld_send(adap, skb); + + if (sync && e->state != L2T_STATE_SWITCHING) + e->state = L2T_STATE_SYNC_WRITE; + return 0; +} + +/* + * Send packets waiting in an L2T entry's ARP queue. Must be called with the + * entry locked. + */ +static void send_pending(struct adapter *adap, struct l2t_entry *e) +{ + while (e->arpq_head) { + struct sk_buff *skb = e->arpq_head; + + e->arpq_head = skb->next; + skb->next = NULL; + t4_ofld_send(adap, skb); + } + e->arpq_tail = NULL; +} + +/* + * Process a CPL_L2T_WRITE_RPL. Wake up the ARP queue if it completes a + * synchronous L2T_WRITE. Note that the TID in the reply is really the L2T + * index it refers to. + */ +void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl) +{ + unsigned int tid = GET_TID(rpl); + unsigned int idx = tid & (L2T_SIZE - 1); + + if (unlikely(rpl->status != CPL_ERR_NONE)) { + dev_err(adap->pdev_dev, + "Unexpected L2T_WRITE_RPL status %u for entry %u\n", + rpl->status, idx); + return; + } + + if (tid & F_SYNC_WR) { + struct l2t_entry *e = &adap->l2t->l2tab[idx]; + + spin_lock(&e->lock); + if (e->state != L2T_STATE_SWITCHING) { + send_pending(adap, e); + e->state = (e->neigh->nud_state & NUD_STALE) ? + L2T_STATE_STALE : L2T_STATE_VALID; + } + spin_unlock(&e->lock); + } +} + +/* + * Add a packet to an L2T entry's queue of packets awaiting resolution. + * Must be called with the entry's lock held. + */ +static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb) +{ + skb->next = NULL; + if (e->arpq_head) + e->arpq_tail->next = skb; + else + e->arpq_head = skb; + e->arpq_tail = skb; +} + +int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb, + struct l2t_entry *e) +{ + struct adapter *adap = netdev2adap(dev); + +again: + switch (e->state) { + case L2T_STATE_STALE: /* entry is stale, kick off revalidation */ + neigh_event_send(e->neigh, NULL); + spin_lock_bh(&e->lock); + if (e->state == L2T_STATE_STALE) + e->state = L2T_STATE_VALID; + spin_unlock_bh(&e->lock); + case L2T_STATE_VALID: /* fast-path, send the packet on */ + return t4_ofld_send(adap, skb); + case L2T_STATE_RESOLVING: + case L2T_STATE_SYNC_WRITE: + spin_lock_bh(&e->lock); + if (e->state != L2T_STATE_SYNC_WRITE && + e->state != L2T_STATE_RESOLVING) { + spin_unlock_bh(&e->lock); + goto again; + } + arpq_enqueue(e, skb); + spin_unlock_bh(&e->lock); + + if (e->state == L2T_STATE_RESOLVING && + !neigh_event_send(e->neigh, NULL)) { + spin_lock_bh(&e->lock); + if (e->state == L2T_STATE_RESOLVING && e->arpq_head) + write_l2e(adap, e, 1); + spin_unlock_bh(&e->lock); + } + } + return 0; +} +EXPORT_SYMBOL(cxgb4_l2t_send); + +/* + * Allocate a free L2T entry. Must be called with l2t_data.lock held. + */ +static struct l2t_entry *alloc_l2e(struct l2t_data *d) +{ + struct l2t_entry *end, *e, **p; + + if (!atomic_read(&d->nfree)) + return NULL; + + /* there's definitely a free entry */ + for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e) + if (atomic_read(&e->refcnt) == 0) + goto found; + + for (e = d->l2tab; atomic_read(&e->refcnt); ++e) + ; +found: + d->rover = e + 1; + atomic_dec(&d->nfree); + + /* + * The entry we found may be an inactive entry that is + * presently in the hash table. We need to remove it. + */ + if (e->state < L2T_STATE_SWITCHING) + for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) + if (*p == e) { + *p = e->next; + e->next = NULL; + break; + } + + e->state = L2T_STATE_UNUSED; + return e; +} + +/* + * Called when an L2T entry has no more users. + */ +static void t4_l2e_free(struct l2t_entry *e) +{ + struct l2t_data *d; + + spin_lock_bh(&e->lock); + if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ + if (e->neigh) { + neigh_release(e->neigh); + e->neigh = NULL; + } + while (e->arpq_head) { + struct sk_buff *skb = e->arpq_head; + + e->arpq_head = skb->next; + kfree_skb(skb); + } + e->arpq_tail = NULL; + } + spin_unlock_bh(&e->lock); + + d = container_of(e, struct l2t_data, l2tab[e->idx]); + atomic_inc(&d->nfree); +} + +void cxgb4_l2t_release(struct l2t_entry *e) +{ + if (atomic_dec_and_test(&e->refcnt)) + t4_l2e_free(e); +} +EXPORT_SYMBOL(cxgb4_l2t_release); + +/* + * Update an L2T entry that was previously used for the same next hop as neigh. + * Must be called with softirqs disabled. + */ +static void reuse_entry(struct l2t_entry *e, struct neighbour *neigh) +{ + unsigned int nud_state; + + spin_lock(&e->lock); /* avoid race with t4_l2t_free */ + if (neigh != e->neigh) + neigh_replace(e, neigh); + nud_state = neigh->nud_state; + if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) || + !(nud_state & NUD_VALID)) + e->state = L2T_STATE_RESOLVING; + else if (nud_state & NUD_CONNECTED) + e->state = L2T_STATE_VALID; + else + e->state = L2T_STATE_STALE; + spin_unlock(&e->lock); +} + +struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, + const struct net_device *physdev, + unsigned int priority) +{ + u8 lport; + u16 vlan; + struct l2t_entry *e; + int addr_len = neigh->tbl->key_len; + u32 *addr = (u32 *)neigh->primary_key; + int ifidx = neigh->dev->ifindex; + int hash = addr_hash(addr, addr_len, ifidx); + + if (neigh->dev->flags & IFF_LOOPBACK) + lport = netdev2pinfo(physdev)->tx_chan + 4; + else + lport = netdev2pinfo(physdev)->lport; + + if (neigh->dev->priv_flags & IFF_802_1Q_VLAN) + vlan = vlan_dev_vlan_id(neigh->dev); + else + vlan = VLAN_NONE; + + write_lock_bh(&d->lock); + for (e = d->l2tab[hash].first; e; e = e->next) + if (!addreq(e, addr) && e->ifindex == ifidx && + e->vlan == vlan && e->lport == lport) { + l2t_hold(d, e); + if (atomic_read(&e->refcnt) == 1) + reuse_entry(e, neigh); + goto done; + } + + /* Need to allocate a new entry */ + e = alloc_l2e(d); + if (e) { + spin_lock(&e->lock); /* avoid race with t4_l2t_free */ + e->state = L2T_STATE_RESOLVING; + memcpy(e->addr, addr, addr_len); + e->ifindex = ifidx; + e->hash = hash; + e->lport = lport; + e->v6 = addr_len == 16; + atomic_set(&e->refcnt, 1); + neigh_replace(e, neigh); + e->vlan = vlan; + e->next = d->l2tab[hash].first; + d->l2tab[hash].first = e; + spin_unlock(&e->lock); + } +done: + write_unlock_bh(&d->lock); + return e; +} +EXPORT_SYMBOL(cxgb4_l2t_get); + +/* + * Called when address resolution fails for an L2T entry to handle packets + * on the arpq head. If a packet specifies a failure handler it is invoked, + * otherwise the packet is sent to the device. + */ +static void handle_failed_resolution(struct adapter *adap, struct sk_buff *arpq) +{ + while (arpq) { + struct sk_buff *skb = arpq; + const struct l2t_skb_cb *cb = L2T_SKB_CB(skb); + + arpq = skb->next; + skb->next = NULL; + if (cb->arp_err_handler) + cb->arp_err_handler(cb->handle, skb); + else + t4_ofld_send(adap, skb); + } +} + +/* + * Called when the host's neighbor layer makes a change to some entry that is + * loaded into the HW L2 table. + */ +void t4_l2t_update(struct adapter *adap, struct neighbour *neigh) +{ + struct l2t_entry *e; + struct sk_buff *arpq = NULL; + struct l2t_data *d = adap->l2t; + int addr_len = neigh->tbl->key_len; + u32 *addr = (u32 *) neigh->primary_key; + int ifidx = neigh->dev->ifindex; + int hash = addr_hash(addr, addr_len, ifidx); + + read_lock_bh(&d->lock); + for (e = d->l2tab[hash].first; e; e = e->next) + if (!addreq(e, addr) && e->ifindex == ifidx) { + spin_lock(&e->lock); + if (atomic_read(&e->refcnt)) + goto found; + spin_unlock(&e->lock); + break; + } + read_unlock_bh(&d->lock); + return; + + found: + read_unlock(&d->lock); + + if (neigh != e->neigh) + neigh_replace(e, neigh); + + if (e->state == L2T_STATE_RESOLVING) { + if (neigh->nud_state & NUD_FAILED) { + arpq = e->arpq_head; + e->arpq_head = e->arpq_tail = NULL; + } else if ((neigh->nud_state & (NUD_CONNECTED | NUD_STALE)) && + e->arpq_head) { + write_l2e(adap, e, 1); + } + } else { + e->state = neigh->nud_state & NUD_CONNECTED ? + L2T_STATE_VALID : L2T_STATE_STALE; + if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac))) + write_l2e(adap, e, 0); + } + + spin_unlock_bh(&e->lock); + + if (arpq) + handle_failed_resolution(adap, arpq); +} + +struct l2t_data *t4_init_l2t(void) +{ + int i; + struct l2t_data *d; + + d = t4_alloc_mem(sizeof(*d)); + if (!d) + return NULL; + + d->rover = d->l2tab; + atomic_set(&d->nfree, L2T_SIZE); + rwlock_init(&d->lock); + + for (i = 0; i < L2T_SIZE; ++i) { + d->l2tab[i].idx = i; + d->l2tab[i].state = L2T_STATE_UNUSED; + spin_lock_init(&d->l2tab[i].lock); + atomic_set(&d->l2tab[i].refcnt, 0); + } + return d; +} + +#include +#include +#include + +static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos) +{ + struct l2t_entry *l2tab = seq->private; + + return pos >= L2T_SIZE ? NULL : &l2tab[pos]; +} + +static void *l2t_seq_start(struct seq_file *seq, loff_t *pos) +{ + return *pos ? l2t_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; +} + +static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + v = l2t_get_idx(seq, *pos); + if (v) + ++*pos; + return v; +} + +static void l2t_seq_stop(struct seq_file *seq, void *v) +{ +} + +static char l2e_state(const struct l2t_entry *e) +{ + switch (e->state) { + case L2T_STATE_VALID: return 'V'; + case L2T_STATE_STALE: return 'S'; + case L2T_STATE_SYNC_WRITE: return 'W'; + case L2T_STATE_RESOLVING: return e->arpq_head ? 'A' : 'R'; + case L2T_STATE_SWITCHING: return 'X'; + default: + return 'U'; + } +} + +static int l2t_seq_show(struct seq_file *seq, void *v) +{ + if (v == SEQ_START_TOKEN) + seq_puts(seq, " Idx IP address " + "Ethernet address VLAN/P LP State Users Port\n"); + else { + char ip[60]; + struct l2t_entry *e = v; + + spin_lock_bh(&e->lock); + if (e->state == L2T_STATE_SWITCHING) + ip[0] = '\0'; + else + sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr); + seq_printf(seq, "%4u %-25s %17pM %4d %u %2u %c %5u %s\n", + e->idx, ip, e->dmac, + e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport, + l2e_state(e), atomic_read(&e->refcnt), + e->neigh ? e->neigh->dev->name : ""); + spin_unlock_bh(&e->lock); + } + return 0; +} + +static const struct seq_operations l2t_seq_ops = { + .start = l2t_seq_start, + .next = l2t_seq_next, + .stop = l2t_seq_stop, + .show = l2t_seq_show +}; + +static int l2t_seq_open(struct inode *inode, struct file *file) +{ + int rc = seq_open(file, &l2t_seq_ops); + + if (!rc) { + struct adapter *adap = inode->i_private; + struct seq_file *seq = file->private_data; + + seq->private = adap->l2t->l2tab; + } + return rc; +} + +const struct file_operations t4_l2t_fops = { + .owner = THIS_MODULE, + .open = l2t_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h new file mode 100644 index 0000000..02b31d0 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h @@ -0,0 +1,107 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CXGB4_L2T_H +#define __CXGB4_L2T_H + +#include +#include +#include + +struct adapter; +struct l2t_data; +struct neighbour; +struct net_device; +struct file_operations; +struct cpl_l2t_write_rpl; + +/* + * Each L2T entry plays multiple roles. First of all, it keeps state for the + * corresponding entry of the HW L2 table and maintains a queue of offload + * packets awaiting address resolution. Second, it is a node of a hash table + * chain, where the nodes of the chain are linked together through their next + * pointer. Finally, each node is a bucket of a hash table, pointing to the + * first element in its chain through its first pointer. + */ +struct l2t_entry { + u16 state; /* entry state */ + u16 idx; /* entry index */ + u32 addr[4]; /* next hop IP or IPv6 address */ + int ifindex; /* neighbor's net_device's ifindex */ + struct neighbour *neigh; /* associated neighbour */ + struct l2t_entry *first; /* start of hash chain */ + struct l2t_entry *next; /* next l2t_entry on chain */ + struct sk_buff *arpq_head; /* queue of packets awaiting resolution */ + struct sk_buff *arpq_tail; + spinlock_t lock; + atomic_t refcnt; /* entry reference count */ + u16 hash; /* hash bucket the entry is on */ + u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */ + u8 v6; /* whether entry is for IPv6 */ + u8 lport; /* associated offload logical interface */ + u8 dmac[ETH_ALEN]; /* neighbour's MAC address */ +}; + +typedef void (*arp_err_handler_t)(void *handle, struct sk_buff *skb); + +/* + * Callback stored in an skb to handle address resolution failure. + */ +struct l2t_skb_cb { + void *handle; + arp_err_handler_t arp_err_handler; +}; + +#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb) + +static inline void t4_set_arp_err_handler(struct sk_buff *skb, void *handle, + arp_err_handler_t handler) +{ + L2T_SKB_CB(skb)->handle = handle; + L2T_SKB_CB(skb)->arp_err_handler = handler; +} + +void cxgb4_l2t_release(struct l2t_entry *e); +int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb, + struct l2t_entry *e); +struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, + const struct net_device *physdev, + unsigned int priority); + +void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); +struct l2t_data *t4_init_l2t(void); +void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl); + +extern const struct file_operations t4_l2t_fops; +#endif /* __CXGB4_L2T_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c new file mode 100644 index 0000000..56adf44 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -0,0 +1,2442 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cxgb4.h" +#include "t4_regs.h" +#include "t4_msg.h" +#include "t4fw_api.h" + +/* + * Rx buffer size. We use largish buffers if possible but settle for single + * pages under memory shortage. + */ +#if PAGE_SHIFT >= 16 +# define FL_PG_ORDER 0 +#else +# define FL_PG_ORDER (16 - PAGE_SHIFT) +#endif + +/* RX_PULL_LEN should be <= RX_COPY_THRES */ +#define RX_COPY_THRES 256 +#define RX_PULL_LEN 128 + +/* + * Main body length for sk_buffs used for Rx Ethernet packets with fragments. + * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room. + */ +#define RX_PKT_SKB_LEN 512 + +/* Ethernet header padding prepended to RX_PKTs */ +#define RX_PKT_PAD 2 + +/* + * Max number of Tx descriptors we clean up at a time. Should be modest as + * freeing skbs isn't cheap and it happens while holding locks. We just need + * to free packets faster than they arrive, we eventually catch up and keep + * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES. + */ +#define MAX_TX_RECLAIM 16 + +/* + * Max number of Rx buffers we replenish at a time. Again keep this modest, + * allocating buffers isn't cheap either. + */ +#define MAX_RX_REFILL 16U + +/* + * Period of the Rx queue check timer. This timer is infrequent as it has + * something to do only when the system experiences severe memory shortage. + */ +#define RX_QCHECK_PERIOD (HZ / 2) + +/* + * Period of the Tx queue check timer. + */ +#define TX_QCHECK_PERIOD (HZ / 2) + +/* + * Max number of Tx descriptors to be reclaimed by the Tx timer. + */ +#define MAX_TIMER_TX_RECLAIM 100 + +/* + * Timer index used when backing off due to memory shortage. + */ +#define NOMEM_TMR_IDX (SGE_NTIMERS - 1) + +/* + * An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will + * attempt to refill it. + */ +#define FL_STARVE_THRES 4 + +/* + * Suspend an Ethernet Tx queue with fewer available descriptors than this. + * This is the same as calc_tx_descs() for a TSO packet with + * nr_frags == MAX_SKB_FRAGS. + */ +#define ETHTXQ_STOP_THRES \ + (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8)) + +/* + * Suspension threshold for non-Ethernet Tx queues. We require enough room + * for a full sized WR. + */ +#define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc)) + +/* + * Max Tx descriptor space we allow for an Ethernet packet to be inlined + * into a WR. + */ +#define MAX_IMM_TX_PKT_LEN 128 + +/* + * Max size of a WR sent through a control Tx queue. + */ +#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN + +enum { + /* packet alignment in FL buffers */ + FL_ALIGN = L1_CACHE_BYTES < 32 ? 32 : L1_CACHE_BYTES, + /* egress status entry size */ + STAT_LEN = L1_CACHE_BYTES > 64 ? 128 : 64 +}; + +struct tx_sw_desc { /* SW state per Tx descriptor */ + struct sk_buff *skb; + struct ulptx_sgl *sgl; +}; + +struct rx_sw_desc { /* SW state per Rx descriptor */ + struct page *page; + dma_addr_t dma_addr; +}; + +/* + * The low bits of rx_sw_desc.dma_addr have special meaning. + */ +enum { + RX_LARGE_BUF = 1 << 0, /* buffer is larger than PAGE_SIZE */ + RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */ +}; + +static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d) +{ + return d->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF); +} + +static inline bool is_buf_mapped(const struct rx_sw_desc *d) +{ + return !(d->dma_addr & RX_UNMAPPED_BUF); +} + +/** + * txq_avail - return the number of available slots in a Tx queue + * @q: the Tx queue + * + * Returns the number of descriptors in a Tx queue available to write new + * packets. + */ +static inline unsigned int txq_avail(const struct sge_txq *q) +{ + return q->size - 1 - q->in_use; +} + +/** + * fl_cap - return the capacity of a free-buffer list + * @fl: the FL + * + * Returns the capacity of a free-buffer list. The capacity is less than + * the size because one descriptor needs to be left unpopulated, otherwise + * HW will think the FL is empty. + */ +static inline unsigned int fl_cap(const struct sge_fl *fl) +{ + return fl->size - 8; /* 1 descriptor = 8 buffers */ +} + +static inline bool fl_starving(const struct sge_fl *fl) +{ + return fl->avail - fl->pend_cred <= FL_STARVE_THRES; +} + +static int map_skb(struct device *dev, const struct sk_buff *skb, + dma_addr_t *addr) +{ + const skb_frag_t *fp, *end; + const struct skb_shared_info *si; + + *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(dev, *addr)) + goto out_err; + + si = skb_shinfo(skb); + end = &si->frags[si->nr_frags]; + + for (fp = si->frags; fp < end; fp++) { + *++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, *addr)) + goto unwind; + } + return 0; + +unwind: + while (fp-- > si->frags) + dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE); + + dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); +out_err: + return -ENOMEM; +} + +#ifdef CONFIG_NEED_DMA_MAP_STATE +static void unmap_skb(struct device *dev, const struct sk_buff *skb, + const dma_addr_t *addr) +{ + const skb_frag_t *fp, *end; + const struct skb_shared_info *si; + + dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE); + + si = skb_shinfo(skb); + end = &si->frags[si->nr_frags]; + for (fp = si->frags; fp < end; fp++) + dma_unmap_page(dev, *addr++, fp->size, DMA_TO_DEVICE); +} + +/** + * deferred_unmap_destructor - unmap a packet when it is freed + * @skb: the packet + * + * This is the packet destructor used for Tx packets that need to remain + * mapped until they are freed rather than until their Tx descriptors are + * freed. + */ +static void deferred_unmap_destructor(struct sk_buff *skb) +{ + unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head); +} +#endif + +static void unmap_sgl(struct device *dev, const struct sk_buff *skb, + const struct ulptx_sgl *sgl, const struct sge_txq *q) +{ + const struct ulptx_sge_pair *p; + unsigned int nfrags = skb_shinfo(skb)->nr_frags; + + if (likely(skb_headlen(skb))) + dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0), + DMA_TO_DEVICE); + else { + dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0), + DMA_TO_DEVICE); + nfrags--; + } + + /* + * the complexity below is because of the possibility of a wrap-around + * in the middle of an SGL + */ + for (p = sgl->sge; nfrags >= 2; nfrags -= 2) { + if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) { +unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]), + ntohl(p->len[0]), DMA_TO_DEVICE); + dma_unmap_page(dev, be64_to_cpu(p->addr[1]), + ntohl(p->len[1]), DMA_TO_DEVICE); + p++; + } else if ((u8 *)p == (u8 *)q->stat) { + p = (const struct ulptx_sge_pair *)q->desc; + goto unmap; + } else if ((u8 *)p + 8 == (u8 *)q->stat) { + const __be64 *addr = (const __be64 *)q->desc; + + dma_unmap_page(dev, be64_to_cpu(addr[0]), + ntohl(p->len[0]), DMA_TO_DEVICE); + dma_unmap_page(dev, be64_to_cpu(addr[1]), + ntohl(p->len[1]), DMA_TO_DEVICE); + p = (const struct ulptx_sge_pair *)&addr[2]; + } else { + const __be64 *addr = (const __be64 *)q->desc; + + dma_unmap_page(dev, be64_to_cpu(p->addr[0]), + ntohl(p->len[0]), DMA_TO_DEVICE); + dma_unmap_page(dev, be64_to_cpu(addr[0]), + ntohl(p->len[1]), DMA_TO_DEVICE); + p = (const struct ulptx_sge_pair *)&addr[1]; + } + } + if (nfrags) { + __be64 addr; + + if ((u8 *)p == (u8 *)q->stat) + p = (const struct ulptx_sge_pair *)q->desc; + addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] : + *(const __be64 *)q->desc; + dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]), + DMA_TO_DEVICE); + } +} + +/** + * free_tx_desc - reclaims Tx descriptors and their buffers + * @adapter: the adapter + * @q: the Tx queue to reclaim descriptors from + * @n: the number of descriptors to reclaim + * @unmap: whether the buffers should be unmapped for DMA + * + * Reclaims Tx descriptors from an SGE Tx queue and frees the associated + * Tx buffers. Called with the Tx queue lock held. + */ +static void free_tx_desc(struct adapter *adap, struct sge_txq *q, + unsigned int n, bool unmap) +{ + struct tx_sw_desc *d; + unsigned int cidx = q->cidx; + struct device *dev = adap->pdev_dev; + + d = &q->sdesc[cidx]; + while (n--) { + if (d->skb) { /* an SGL is present */ + if (unmap) + unmap_sgl(dev, d->skb, d->sgl, q); + kfree_skb(d->skb); + d->skb = NULL; + } + ++d; + if (++cidx == q->size) { + cidx = 0; + d = q->sdesc; + } + } + q->cidx = cidx; +} + +/* + * Return the number of reclaimable descriptors in a Tx queue. + */ +static inline int reclaimable(const struct sge_txq *q) +{ + int hw_cidx = ntohs(q->stat->cidx); + hw_cidx -= q->cidx; + return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; +} + +/** + * reclaim_completed_tx - reclaims completed Tx descriptors + * @adap: the adapter + * @q: the Tx queue to reclaim completed descriptors from + * @unmap: whether the buffers should be unmapped for DMA + * + * Reclaims Tx descriptors that the SGE has indicated it has processed, + * and frees the associated buffers if possible. Called with the Tx + * queue locked. + */ +static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, + bool unmap) +{ + int avail = reclaimable(q); + + if (avail) { + /* + * Limit the amount of clean up work we do at a time to keep + * the Tx lock hold time O(1). + */ + if (avail > MAX_TX_RECLAIM) + avail = MAX_TX_RECLAIM; + + free_tx_desc(adap, q, avail, unmap); + q->in_use -= avail; + } +} + +static inline int get_buf_size(const struct rx_sw_desc *d) +{ +#if FL_PG_ORDER > 0 + return (d->dma_addr & RX_LARGE_BUF) ? (PAGE_SIZE << FL_PG_ORDER) : + PAGE_SIZE; +#else + return PAGE_SIZE; +#endif +} + +/** + * free_rx_bufs - free the Rx buffers on an SGE free list + * @adap: the adapter + * @q: the SGE free list to free buffers from + * @n: how many buffers to free + * + * Release the next @n buffers on an SGE free-buffer Rx queue. The + * buffers must be made inaccessible to HW before calling this function. + */ +static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n) +{ + while (n--) { + struct rx_sw_desc *d = &q->sdesc[q->cidx]; + + if (is_buf_mapped(d)) + dma_unmap_page(adap->pdev_dev, get_buf_addr(d), + get_buf_size(d), PCI_DMA_FROMDEVICE); + put_page(d->page); + d->page = NULL; + if (++q->cidx == q->size) + q->cidx = 0; + q->avail--; + } +} + +/** + * unmap_rx_buf - unmap the current Rx buffer on an SGE free list + * @adap: the adapter + * @q: the SGE free list + * + * Unmap the current buffer on an SGE free-buffer Rx queue. The + * buffer must be made inaccessible to HW before calling this function. + * + * This is similar to @free_rx_bufs above but does not free the buffer. + * Do note that the FL still loses any further access to the buffer. + */ +static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q) +{ + struct rx_sw_desc *d = &q->sdesc[q->cidx]; + + if (is_buf_mapped(d)) + dma_unmap_page(adap->pdev_dev, get_buf_addr(d), + get_buf_size(d), PCI_DMA_FROMDEVICE); + d->page = NULL; + if (++q->cidx == q->size) + q->cidx = 0; + q->avail--; +} + +static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) +{ + if (q->pend_cred >= 8) { + wmb(); + t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO | + QID(q->cntxt_id) | PIDX(q->pend_cred / 8)); + q->pend_cred &= 7; + } +} + +static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg, + dma_addr_t mapping) +{ + sd->page = pg; + sd->dma_addr = mapping; /* includes size low bits */ +} + +/** + * refill_fl - refill an SGE Rx buffer ring + * @adap: the adapter + * @q: the ring to refill + * @n: the number of new buffers to allocate + * @gfp: the gfp flags for the allocations + * + * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, + * allocated with the supplied gfp flags. The caller must assure that + * @n does not exceed the queue's capacity. If afterwards the queue is + * found critically low mark it as starving in the bitmap of starving FLs. + * + * Returns the number of buffers allocated. + */ +static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, + gfp_t gfp) +{ + struct page *pg; + dma_addr_t mapping; + unsigned int cred = q->avail; + __be64 *d = &q->desc[q->pidx]; + struct rx_sw_desc *sd = &q->sdesc[q->pidx]; + + gfp |= __GFP_NOWARN; /* failures are expected */ + +#if FL_PG_ORDER > 0 + /* + * Prefer large buffers + */ + while (n) { + pg = alloc_pages(gfp | __GFP_COMP, FL_PG_ORDER); + if (unlikely(!pg)) { + q->large_alloc_failed++; + break; /* fall back to single pages */ + } + + mapping = dma_map_page(adap->pdev_dev, pg, 0, + PAGE_SIZE << FL_PG_ORDER, + PCI_DMA_FROMDEVICE); + if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { + __free_pages(pg, FL_PG_ORDER); + goto out; /* do not try small pages for this error */ + } + mapping |= RX_LARGE_BUF; + *d++ = cpu_to_be64(mapping); + + set_rx_sw_desc(sd, pg, mapping); + sd++; + + q->avail++; + if (++q->pidx == q->size) { + q->pidx = 0; + sd = q->sdesc; + d = q->desc; + } + n--; + } +#endif + + while (n--) { + pg = __netdev_alloc_page(adap->port[0], gfp); + if (unlikely(!pg)) { + q->alloc_failed++; + break; + } + + mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { + netdev_free_page(adap->port[0], pg); + goto out; + } + *d++ = cpu_to_be64(mapping); + + set_rx_sw_desc(sd, pg, mapping); + sd++; + + q->avail++; + if (++q->pidx == q->size) { + q->pidx = 0; + sd = q->sdesc; + d = q->desc; + } + } + +out: cred = q->avail - cred; + q->pend_cred += cred; + ring_fl_db(adap, q); + + if (unlikely(fl_starving(q))) { + smp_wmb(); + set_bit(q->cntxt_id - adap->sge.egr_start, + adap->sge.starving_fl); + } + + return cred; +} + +static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) +{ + refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail), + GFP_ATOMIC); +} + +/** + * alloc_ring - allocate resources for an SGE descriptor ring + * @dev: the PCI device's core device + * @nelem: the number of descriptors + * @elem_size: the size of each descriptor + * @sw_size: the size of the SW state associated with each ring element + * @phys: the physical address of the allocated ring + * @metadata: address of the array holding the SW state for the ring + * @stat_size: extra space in HW ring for status information + * @node: preferred node for memory allocations + * + * Allocates resources for an SGE descriptor ring, such as Tx queues, + * free buffer lists, or response queues. Each SGE ring requires + * space for its HW descriptors plus, optionally, space for the SW state + * associated with each HW entry (the metadata). The function returns + * three values: the virtual address for the HW ring (the return value + * of the function), the bus address of the HW ring, and the address + * of the SW ring. + */ +static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size, + size_t sw_size, dma_addr_t *phys, void *metadata, + size_t stat_size, int node) +{ + size_t len = nelem * elem_size + stat_size; + void *s = NULL; + void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL); + + if (!p) + return NULL; + if (sw_size) { + s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node); + + if (!s) { + dma_free_coherent(dev, len, p, *phys); + return NULL; + } + } + if (metadata) + *(void **)metadata = s; + memset(p, 0, len); + return p; +} + +/** + * sgl_len - calculates the size of an SGL of the given capacity + * @n: the number of SGL entries + * + * Calculates the number of flits needed for a scatter/gather list that + * can hold the given number of entries. + */ +static inline unsigned int sgl_len(unsigned int n) +{ + n--; + return (3 * n) / 2 + (n & 1) + 2; +} + +/** + * flits_to_desc - returns the num of Tx descriptors for the given flits + * @n: the number of flits + * + * Returns the number of Tx descriptors needed for the supplied number + * of flits. + */ +static inline unsigned int flits_to_desc(unsigned int n) +{ + BUG_ON(n > SGE_MAX_WR_LEN / 8); + return DIV_ROUND_UP(n, 8); +} + +/** + * is_eth_imm - can an Ethernet packet be sent as immediate data? + * @skb: the packet + * + * Returns whether an Ethernet packet is small enough to fit as + * immediate data. + */ +static inline int is_eth_imm(const struct sk_buff *skb) +{ + return skb->len <= MAX_IMM_TX_PKT_LEN - sizeof(struct cpl_tx_pkt); +} + +/** + * calc_tx_flits - calculate the number of flits for a packet Tx WR + * @skb: the packet + * + * Returns the number of flits needed for a Tx WR for the given Ethernet + * packet, including the needed WR and CPL headers. + */ +static inline unsigned int calc_tx_flits(const struct sk_buff *skb) +{ + unsigned int flits; + + if (is_eth_imm(skb)) + return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 8); + + flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4; + if (skb_shinfo(skb)->gso_size) + flits += 2; + return flits; +} + +/** + * calc_tx_descs - calculate the number of Tx descriptors for a packet + * @skb: the packet + * + * Returns the number of Tx descriptors needed for the given Ethernet + * packet, including the needed WR and CPL headers. + */ +static inline unsigned int calc_tx_descs(const struct sk_buff *skb) +{ + return flits_to_desc(calc_tx_flits(skb)); +} + +/** + * write_sgl - populate a scatter/gather list for a packet + * @skb: the packet + * @q: the Tx queue we are writing into + * @sgl: starting location for writing the SGL + * @end: points right after the end of the SGL + * @start: start offset into skb main-body data to include in the SGL + * @addr: the list of bus addresses for the SGL elements + * + * Generates a gather list for the buffers that make up a packet. + * The caller must provide adequate space for the SGL that will be written. + * The SGL includes all of the packet's page fragments and the data in its + * main body except for the first @start bytes. @sgl must be 16-byte + * aligned and within a Tx descriptor with available space. @end points + * right after the end of the SGL but does not account for any potential + * wrap around, i.e., @end > @sgl. + */ +static void write_sgl(const struct sk_buff *skb, struct sge_txq *q, + struct ulptx_sgl *sgl, u64 *end, unsigned int start, + const dma_addr_t *addr) +{ + unsigned int i, len; + struct ulptx_sge_pair *to; + const struct skb_shared_info *si = skb_shinfo(skb); + unsigned int nfrags = si->nr_frags; + struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1]; + + len = skb_headlen(skb) - start; + if (likely(len)) { + sgl->len0 = htonl(len); + sgl->addr0 = cpu_to_be64(addr[0] + start); + nfrags++; + } else { + sgl->len0 = htonl(si->frags[0].size); + sgl->addr0 = cpu_to_be64(addr[1]); + } + + sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags)); + if (likely(--nfrags == 0)) + return; + /* + * Most of the complexity below deals with the possibility we hit the + * end of the queue in the middle of writing the SGL. For this case + * only we create the SGL in a temporary buffer and then copy it. + */ + to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; + + for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { + to->len[0] = cpu_to_be32(si->frags[i].size); + to->len[1] = cpu_to_be32(si->frags[++i].size); + to->addr[0] = cpu_to_be64(addr[i]); + to->addr[1] = cpu_to_be64(addr[++i]); + } + if (nfrags) { + to->len[0] = cpu_to_be32(si->frags[i].size); + to->len[1] = cpu_to_be32(0); + to->addr[0] = cpu_to_be64(addr[i + 1]); + } + if (unlikely((u8 *)end > (u8 *)q->stat)) { + unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; + + if (likely(part0)) + memcpy(sgl->sge, buf, part0); + part1 = (u8 *)end - (u8 *)q->stat; + memcpy(q->desc, (u8 *)buf + part0, part1); + end = (void *)q->desc + part1; + } + if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ + *(u64 *)end = 0; +} + +/** + * ring_tx_db - check and potentially ring a Tx queue's doorbell + * @adap: the adapter + * @q: the Tx queue + * @n: number of new descriptors to give to HW + * + * Ring the doorbel for a Tx queue. + */ +static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) +{ + wmb(); /* write descriptors before telling HW */ + t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), + QID(q->cntxt_id) | PIDX(n)); +} + +/** + * inline_tx_skb - inline a packet's data into Tx descriptors + * @skb: the packet + * @q: the Tx queue where the packet will be inlined + * @pos: starting position in the Tx queue where to inline the packet + * + * Inline a packet's contents directly into Tx descriptors, starting at + * the given position within the Tx DMA ring. + * Most of the complexity of this operation is dealing with wrap arounds + * in the middle of the packet we want to inline. + */ +static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q, + void *pos) +{ + u64 *p; + int left = (void *)q->stat - pos; + + if (likely(skb->len <= left)) { + if (likely(!skb->data_len)) + skb_copy_from_linear_data(skb, pos, skb->len); + else + skb_copy_bits(skb, 0, pos, skb->len); + pos += skb->len; + } else { + skb_copy_bits(skb, 0, pos, left); + skb_copy_bits(skb, left, q->desc, skb->len - left); + pos = (void *)q->desc + (skb->len - left); + } + + /* 0-pad to multiple of 16 */ + p = PTR_ALIGN(pos, 8); + if ((uintptr_t)p & 8) + *p = 0; +} + +/* + * Figure out what HW csum a packet wants and return the appropriate control + * bits. + */ +static u64 hwcsum(const struct sk_buff *skb) +{ + int csum_type; + const struct iphdr *iph = ip_hdr(skb); + + if (iph->version == 4) { + if (iph->protocol == IPPROTO_TCP) + csum_type = TX_CSUM_TCPIP; + else if (iph->protocol == IPPROTO_UDP) + csum_type = TX_CSUM_UDPIP; + else { +nocsum: /* + * unknown protocol, disable HW csum + * and hope a bad packet is detected + */ + return TXPKT_L4CSUM_DIS; + } + } else { + /* + * this doesn't work with extension headers + */ + const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph; + + if (ip6h->nexthdr == IPPROTO_TCP) + csum_type = TX_CSUM_TCPIP6; + else if (ip6h->nexthdr == IPPROTO_UDP) + csum_type = TX_CSUM_UDPIP6; + else + goto nocsum; + } + + if (likely(csum_type >= TX_CSUM_TCPIP)) + return TXPKT_CSUM_TYPE(csum_type) | + TXPKT_IPHDR_LEN(skb_network_header_len(skb)) | + TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN); + else { + int start = skb_transport_offset(skb); + + return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) | + TXPKT_CSUM_LOC(start + skb->csum_offset); + } +} + +static void eth_txq_stop(struct sge_eth_txq *q) +{ + netif_tx_stop_queue(q->txq); + q->q.stops++; +} + +static inline void txq_advance(struct sge_txq *q, unsigned int n) +{ + q->in_use += n; + q->pidx += n; + if (q->pidx >= q->size) + q->pidx -= q->size; +} + +/** + * t4_eth_xmit - add a packet to an Ethernet Tx queue + * @skb: the packet + * @dev: the egress net device + * + * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled. + */ +netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev) +{ + u32 wr_mid; + u64 cntrl, *end; + int qidx, credits; + unsigned int flits, ndesc; + struct adapter *adap; + struct sge_eth_txq *q; + const struct port_info *pi; + struct fw_eth_tx_pkt_wr *wr; + struct cpl_tx_pkt_core *cpl; + const struct skb_shared_info *ssi; + dma_addr_t addr[MAX_SKB_FRAGS + 1]; + + /* + * The chip min packet length is 10 octets but play safe and reject + * anything shorter than an Ethernet header. + */ + if (unlikely(skb->len < ETH_HLEN)) { +out_free: dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + pi = netdev_priv(dev); + adap = pi->adapter; + qidx = skb_get_queue_mapping(skb); + q = &adap->sge.ethtxq[qidx + pi->first_qset]; + + reclaim_completed_tx(adap, &q->q, true); + + flits = calc_tx_flits(skb); + ndesc = flits_to_desc(flits); + credits = txq_avail(&q->q) - ndesc; + + if (unlikely(credits < 0)) { + eth_txq_stop(q); + dev_err(adap->pdev_dev, + "%s: Tx ring %u full while queue awake!\n", + dev->name, qidx); + return NETDEV_TX_BUSY; + } + + if (!is_eth_imm(skb) && + unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) { + q->mapping_err++; + goto out_free; + } + + wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2)); + if (unlikely(credits < ETHTXQ_STOP_THRES)) { + eth_txq_stop(q); + wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ; + } + + wr = (void *)&q->q.desc[q->q.pidx]; + wr->equiq_to_len16 = htonl(wr_mid); + wr->r3 = cpu_to_be64(0); + end = (u64 *)wr + flits; + + ssi = skb_shinfo(skb); + if (ssi->gso_size) { + struct cpl_tx_pkt_lso *lso = (void *)wr; + bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; + int l3hdr_len = skb_network_header_len(skb); + int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; + + wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | + FW_WR_IMMDLEN(sizeof(*lso))); + lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) | + LSO_FIRST_SLICE | LSO_LAST_SLICE | + LSO_IPV6(v6) | + LSO_ETHHDR_LEN(eth_xtra_len / 4) | + LSO_IPHDR_LEN(l3hdr_len / 4) | + LSO_TCPHDR_LEN(tcp_hdr(skb)->doff)); + lso->c.ipid_ofst = htons(0); + lso->c.mss = htons(ssi->gso_size); + lso->c.seqno_offset = htonl(0); + lso->c.len = htonl(skb->len); + cpl = (void *)(lso + 1); + cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | + TXPKT_IPHDR_LEN(l3hdr_len) | + TXPKT_ETHHDR_LEN(eth_xtra_len); + q->tso++; + q->tx_cso += ssi->gso_segs; + } else { + int len; + + len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl); + wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | + FW_WR_IMMDLEN(len)); + cpl = (void *)(wr + 1); + if (skb->ip_summed == CHECKSUM_PARTIAL) { + cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS; + q->tx_cso++; + } else + cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS; + } + + if (vlan_tx_tag_present(skb)) { + q->vlan_ins++; + cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb)); + } + + cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) | + TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn)); + cpl->pack = htons(0); + cpl->len = htons(skb->len); + cpl->ctrl1 = cpu_to_be64(cntrl); + + if (is_eth_imm(skb)) { + inline_tx_skb(skb, &q->q, cpl + 1); + dev_kfree_skb(skb); + } else { + int last_desc; + + write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0, + addr); + skb_orphan(skb); + + last_desc = q->q.pidx + ndesc - 1; + if (last_desc >= q->q.size) + last_desc -= q->q.size; + q->q.sdesc[last_desc].skb = skb; + q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1); + } + + txq_advance(&q->q, ndesc); + + ring_tx_db(adap, &q->q, ndesc); + return NETDEV_TX_OK; +} + +/** + * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs + * @q: the SGE control Tx queue + * + * This is a variant of reclaim_completed_tx() that is used for Tx queues + * that send only immediate data (presently just the control queues) and + * thus do not have any sk_buffs to release. + */ +static inline void reclaim_completed_tx_imm(struct sge_txq *q) +{ + int hw_cidx = ntohs(q->stat->cidx); + int reclaim = hw_cidx - q->cidx; + + if (reclaim < 0) + reclaim += q->size; + + q->in_use -= reclaim; + q->cidx = hw_cidx; +} + +/** + * is_imm - check whether a packet can be sent as immediate data + * @skb: the packet + * + * Returns true if a packet can be sent as a WR with immediate data. + */ +static inline int is_imm(const struct sk_buff *skb) +{ + return skb->len <= MAX_CTRL_WR_LEN; +} + +/** + * ctrlq_check_stop - check if a control queue is full and should stop + * @q: the queue + * @wr: most recent WR written to the queue + * + * Check if a control queue has become full and should be stopped. + * We clean up control queue descriptors very lazily, only when we are out. + * If the queue is still full after reclaiming any completed descriptors + * we suspend it and have the last WR wake it up. + */ +static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr) +{ + reclaim_completed_tx_imm(&q->q); + if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { + wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ); + q->q.stops++; + q->full = 1; + } +} + +/** + * ctrl_xmit - send a packet through an SGE control Tx queue + * @q: the control queue + * @skb: the packet + * + * Send a packet through an SGE control Tx queue. Packets sent through + * a control queue must fit entirely as immediate data. + */ +static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb) +{ + unsigned int ndesc; + struct fw_wr_hdr *wr; + + if (unlikely(!is_imm(skb))) { + WARN_ON(1); + dev_kfree_skb(skb); + return NET_XMIT_DROP; + } + + ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc)); + spin_lock(&q->sendq.lock); + + if (unlikely(q->full)) { + skb->priority = ndesc; /* save for restart */ + __skb_queue_tail(&q->sendq, skb); + spin_unlock(&q->sendq.lock); + return NET_XMIT_CN; + } + + wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; + inline_tx_skb(skb, &q->q, wr); + + txq_advance(&q->q, ndesc); + if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) + ctrlq_check_stop(q, wr); + + ring_tx_db(q->adap, &q->q, ndesc); + spin_unlock(&q->sendq.lock); + + kfree_skb(skb); + return NET_XMIT_SUCCESS; +} + +/** + * restart_ctrlq - restart a suspended control queue + * @data: the control queue to restart + * + * Resumes transmission on a suspended Tx control queue. + */ +static void restart_ctrlq(unsigned long data) +{ + struct sk_buff *skb; + unsigned int written = 0; + struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data; + + spin_lock(&q->sendq.lock); + reclaim_completed_tx_imm(&q->q); + BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */ + + while ((skb = __skb_dequeue(&q->sendq)) != NULL) { + struct fw_wr_hdr *wr; + unsigned int ndesc = skb->priority; /* previously saved */ + + /* + * Write descriptors and free skbs outside the lock to limit + * wait times. q->full is still set so new skbs will be queued. + */ + spin_unlock(&q->sendq.lock); + + wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; + inline_tx_skb(skb, &q->q, wr); + kfree_skb(skb); + + written += ndesc; + txq_advance(&q->q, ndesc); + if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { + unsigned long old = q->q.stops; + + ctrlq_check_stop(q, wr); + if (q->q.stops != old) { /* suspended anew */ + spin_lock(&q->sendq.lock); + goto ringdb; + } + } + if (written > 16) { + ring_tx_db(q->adap, &q->q, written); + written = 0; + } + spin_lock(&q->sendq.lock); + } + q->full = 0; +ringdb: if (written) + ring_tx_db(q->adap, &q->q, written); + spin_unlock(&q->sendq.lock); +} + +/** + * t4_mgmt_tx - send a management message + * @adap: the adapter + * @skb: the packet containing the management message + * + * Send a management message through control queue 0. + */ +int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb) +{ + int ret; + + local_bh_disable(); + ret = ctrl_xmit(&adap->sge.ctrlq[0], skb); + local_bh_enable(); + return ret; +} + +/** + * is_ofld_imm - check whether a packet can be sent as immediate data + * @skb: the packet + * + * Returns true if a packet can be sent as an offload WR with immediate + * data. We currently use the same limit as for Ethernet packets. + */ +static inline int is_ofld_imm(const struct sk_buff *skb) +{ + return skb->len <= MAX_IMM_TX_PKT_LEN; +} + +/** + * calc_tx_flits_ofld - calculate # of flits for an offload packet + * @skb: the packet + * + * Returns the number of flits needed for the given offload packet. + * These packets are already fully constructed and no additional headers + * will be added. + */ +static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) +{ + unsigned int flits, cnt; + + if (is_ofld_imm(skb)) + return DIV_ROUND_UP(skb->len, 8); + + flits = skb_transport_offset(skb) / 8U; /* headers */ + cnt = skb_shinfo(skb)->nr_frags; + if (skb->tail != skb->transport_header) + cnt++; + return flits + sgl_len(cnt); +} + +/** + * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion + * @adap: the adapter + * @q: the queue to stop + * + * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting + * inability to map packets. A periodic timer attempts to restart + * queues so marked. + */ +static void txq_stop_maperr(struct sge_ofld_txq *q) +{ + q->mapping_err++; + q->q.stops++; + set_bit(q->q.cntxt_id - q->adap->sge.egr_start, + q->adap->sge.txq_maperr); +} + +/** + * ofldtxq_stop - stop an offload Tx queue that has become full + * @q: the queue to stop + * @skb: the packet causing the queue to become full + * + * Stops an offload Tx queue that has become full and modifies the packet + * being written to request a wakeup. + */ +static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb) +{ + struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data; + + wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ); + q->q.stops++; + q->full = 1; +} + +/** + * service_ofldq - restart a suspended offload queue + * @q: the offload queue + * + * Services an offload Tx queue by moving packets from its packet queue + * to the HW Tx ring. The function starts and ends with the queue locked. + */ +static void service_ofldq(struct sge_ofld_txq *q) +{ + u64 *pos; + int credits; + struct sk_buff *skb; + unsigned int written = 0; + unsigned int flits, ndesc; + + while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) { + /* + * We drop the lock but leave skb on sendq, thus retaining + * exclusive access to the state of the queue. + */ + spin_unlock(&q->sendq.lock); + + reclaim_completed_tx(q->adap, &q->q, false); + + flits = skb->priority; /* previously saved */ + ndesc = flits_to_desc(flits); + credits = txq_avail(&q->q) - ndesc; + BUG_ON(credits < 0); + if (unlikely(credits < TXQ_STOP_THRES)) + ofldtxq_stop(q, skb); + + pos = (u64 *)&q->q.desc[q->q.pidx]; + if (is_ofld_imm(skb)) + inline_tx_skb(skb, &q->q, pos); + else if (map_skb(q->adap->pdev_dev, skb, + (dma_addr_t *)skb->head)) { + txq_stop_maperr(q); + spin_lock(&q->sendq.lock); + break; + } else { + int last_desc, hdr_len = skb_transport_offset(skb); + + memcpy(pos, skb->data, hdr_len); + write_sgl(skb, &q->q, (void *)pos + hdr_len, + pos + flits, hdr_len, + (dma_addr_t *)skb->head); +#ifdef CONFIG_NEED_DMA_MAP_STATE + skb->dev = q->adap->port[0]; + skb->destructor = deferred_unmap_destructor; +#endif + last_desc = q->q.pidx + ndesc - 1; + if (last_desc >= q->q.size) + last_desc -= q->q.size; + q->q.sdesc[last_desc].skb = skb; + } + + txq_advance(&q->q, ndesc); + written += ndesc; + if (unlikely(written > 32)) { + ring_tx_db(q->adap, &q->q, written); + written = 0; + } + + spin_lock(&q->sendq.lock); + __skb_unlink(skb, &q->sendq); + if (is_ofld_imm(skb)) + kfree_skb(skb); + } + if (likely(written)) + ring_tx_db(q->adap, &q->q, written); +} + +/** + * ofld_xmit - send a packet through an offload queue + * @q: the Tx offload queue + * @skb: the packet + * + * Send an offload packet through an SGE offload queue. + */ +static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb) +{ + skb->priority = calc_tx_flits_ofld(skb); /* save for restart */ + spin_lock(&q->sendq.lock); + __skb_queue_tail(&q->sendq, skb); + if (q->sendq.qlen == 1) + service_ofldq(q); + spin_unlock(&q->sendq.lock); + return NET_XMIT_SUCCESS; +} + +/** + * restart_ofldq - restart a suspended offload queue + * @data: the offload queue to restart + * + * Resumes transmission on a suspended Tx offload queue. + */ +static void restart_ofldq(unsigned long data) +{ + struct sge_ofld_txq *q = (struct sge_ofld_txq *)data; + + spin_lock(&q->sendq.lock); + q->full = 0; /* the queue actually is completely empty now */ + service_ofldq(q); + spin_unlock(&q->sendq.lock); +} + +/** + * skb_txq - return the Tx queue an offload packet should use + * @skb: the packet + * + * Returns the Tx queue an offload packet should use as indicated by bits + * 1-15 in the packet's queue_mapping. + */ +static inline unsigned int skb_txq(const struct sk_buff *skb) +{ + return skb->queue_mapping >> 1; +} + +/** + * is_ctrl_pkt - return whether an offload packet is a control packet + * @skb: the packet + * + * Returns whether an offload packet should use an OFLD or a CTRL + * Tx queue as indicated by bit 0 in the packet's queue_mapping. + */ +static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb) +{ + return skb->queue_mapping & 1; +} + +static inline int ofld_send(struct adapter *adap, struct sk_buff *skb) +{ + unsigned int idx = skb_txq(skb); + + if (unlikely(is_ctrl_pkt(skb))) + return ctrl_xmit(&adap->sge.ctrlq[idx], skb); + return ofld_xmit(&adap->sge.ofldtxq[idx], skb); +} + +/** + * t4_ofld_send - send an offload packet + * @adap: the adapter + * @skb: the packet + * + * Sends an offload packet. We use the packet queue_mapping to select the + * appropriate Tx queue as follows: bit 0 indicates whether the packet + * should be sent as regular or control, bits 1-15 select the queue. + */ +int t4_ofld_send(struct adapter *adap, struct sk_buff *skb) +{ + int ret; + + local_bh_disable(); + ret = ofld_send(adap, skb); + local_bh_enable(); + return ret; +} + +/** + * cxgb4_ofld_send - send an offload packet + * @dev: the net device + * @skb: the packet + * + * Sends an offload packet. This is an exported version of @t4_ofld_send, + * intended for ULDs. + */ +int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb) +{ + return t4_ofld_send(netdev2adap(dev), skb); +} +EXPORT_SYMBOL(cxgb4_ofld_send); + +static inline void copy_frags(struct skb_shared_info *ssi, + const struct pkt_gl *gl, unsigned int offset) +{ + unsigned int n; + + /* usually there's just one frag */ + ssi->frags[0].page = gl->frags[0].page; + ssi->frags[0].page_offset = gl->frags[0].page_offset + offset; + ssi->frags[0].size = gl->frags[0].size - offset; + ssi->nr_frags = gl->nfrags; + n = gl->nfrags - 1; + if (n) + memcpy(&ssi->frags[1], &gl->frags[1], n * sizeof(skb_frag_t)); + + /* get a reference to the last page, we don't own it */ + get_page(gl->frags[n].page); +} + +/** + * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list + * @gl: the gather list + * @skb_len: size of sk_buff main body if it carries fragments + * @pull_len: amount of data to move to the sk_buff's main body + * + * Builds an sk_buff from the given packet gather list. Returns the + * sk_buff or %NULL if sk_buff allocation failed. + */ +struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, + unsigned int skb_len, unsigned int pull_len) +{ + struct sk_buff *skb; + + /* + * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer + * size, which is expected since buffers are at least PAGE_SIZEd. + * In this case packets up to RX_COPY_THRES have only one fragment. + */ + if (gl->tot_len <= RX_COPY_THRES) { + skb = dev_alloc_skb(gl->tot_len); + if (unlikely(!skb)) + goto out; + __skb_put(skb, gl->tot_len); + skb_copy_to_linear_data(skb, gl->va, gl->tot_len); + } else { + skb = dev_alloc_skb(skb_len); + if (unlikely(!skb)) + goto out; + __skb_put(skb, pull_len); + skb_copy_to_linear_data(skb, gl->va, pull_len); + + copy_frags(skb_shinfo(skb), gl, pull_len); + skb->len = gl->tot_len; + skb->data_len = skb->len - pull_len; + skb->truesize += skb->data_len; + } +out: return skb; +} +EXPORT_SYMBOL(cxgb4_pktgl_to_skb); + +/** + * t4_pktgl_free - free a packet gather list + * @gl: the gather list + * + * Releases the pages of a packet gather list. We do not own the last + * page on the list and do not free it. + */ +static void t4_pktgl_free(const struct pkt_gl *gl) +{ + int n; + const skb_frag_t *p; + + for (p = gl->frags, n = gl->nfrags - 1; n--; p++) + put_page(p->page); +} + +/* + * Process an MPS trace packet. Give it an unused protocol number so it won't + * be delivered to anyone and send it to the stack for capture. + */ +static noinline int handle_trace_pkt(struct adapter *adap, + const struct pkt_gl *gl) +{ + struct sk_buff *skb; + struct cpl_trace_pkt *p; + + skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); + if (unlikely(!skb)) { + t4_pktgl_free(gl); + return 0; + } + + p = (struct cpl_trace_pkt *)skb->data; + __skb_pull(skb, sizeof(*p)); + skb_reset_mac_header(skb); + skb->protocol = htons(0xffff); + skb->dev = adap->port[0]; + netif_receive_skb(skb); + return 0; +} + +static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, + const struct cpl_rx_pkt *pkt) +{ + int ret; + struct sk_buff *skb; + + skb = napi_get_frags(&rxq->rspq.napi); + if (unlikely(!skb)) { + t4_pktgl_free(gl); + rxq->stats.rx_drops++; + return; + } + + copy_frags(skb_shinfo(skb), gl, RX_PKT_PAD); + skb->len = gl->tot_len - RX_PKT_PAD; + skb->data_len = skb->len; + skb->truesize += skb->data_len; + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_record_rx_queue(skb, rxq->rspq.idx); + if (rxq->rspq.netdev->features & NETIF_F_RXHASH) + skb->rxhash = (__force u32)pkt->rsshdr.hash_val; + + if (unlikely(pkt->vlan_ex)) { + __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan)); + rxq->stats.vlan_ex++; + } + ret = napi_gro_frags(&rxq->rspq.napi); + if (ret == GRO_HELD) + rxq->stats.lro_pkts++; + else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE) + rxq->stats.lro_merged++; + rxq->stats.pkts++; + rxq->stats.rx_cso++; +} + +/** + * t4_ethrx_handler - process an ingress ethernet packet + * @q: the response queue that received the packet + * @rsp: the response queue descriptor holding the RX_PKT message + * @si: the gather list of packet fragments + * + * Process an ingress ethernet packet and deliver it to the stack. + */ +int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *si) +{ + bool csum_ok; + struct sk_buff *skb; + const struct cpl_rx_pkt *pkt; + struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); + + if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT)) + return handle_trace_pkt(q->adap, si); + + pkt = (const struct cpl_rx_pkt *)rsp; + csum_ok = pkt->csum_calc && !pkt->err_vec; + if ((pkt->l2info & htonl(RXF_TCP)) && + (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { + do_gro(rxq, si, pkt); + return 0; + } + + skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN); + if (unlikely(!skb)) { + t4_pktgl_free(si); + rxq->stats.rx_drops++; + return 0; + } + + __skb_pull(skb, RX_PKT_PAD); /* remove ethernet header padding */ + skb->protocol = eth_type_trans(skb, q->netdev); + skb_record_rx_queue(skb, q->idx); + if (skb->dev->features & NETIF_F_RXHASH) + skb->rxhash = (__force u32)pkt->rsshdr.hash_val; + + rxq->stats.pkts++; + + if (csum_ok && (q->netdev->features & NETIF_F_RXCSUM) && + (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) { + if (!pkt->ip_frag) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + rxq->stats.rx_cso++; + } else if (pkt->l2info & htonl(RXF_IP)) { + __sum16 c = (__force __sum16)pkt->csum; + skb->csum = csum_unfold(c); + skb->ip_summed = CHECKSUM_COMPLETE; + rxq->stats.rx_cso++; + } + } else + skb_checksum_none_assert(skb); + + if (unlikely(pkt->vlan_ex)) { + __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan)); + rxq->stats.vlan_ex++; + } + netif_receive_skb(skb); + return 0; +} + +/** + * restore_rx_bufs - put back a packet's Rx buffers + * @si: the packet gather list + * @q: the SGE free list + * @frags: number of FL buffers to restore + * + * Puts back on an FL the Rx buffers associated with @si. The buffers + * have already been unmapped and are left unmapped, we mark them so to + * prevent further unmapping attempts. + * + * This function undoes a series of @unmap_rx_buf calls when we find out + * that the current packet can't be processed right away afterall and we + * need to come back to it later. This is a very rare event and there's + * no effort to make this particularly efficient. + */ +static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q, + int frags) +{ + struct rx_sw_desc *d; + + while (frags--) { + if (q->cidx == 0) + q->cidx = q->size - 1; + else + q->cidx--; + d = &q->sdesc[q->cidx]; + d->page = si->frags[frags].page; + d->dma_addr |= RX_UNMAPPED_BUF; + q->avail++; + } +} + +/** + * is_new_response - check if a response is newly written + * @r: the response descriptor + * @q: the response queue + * + * Returns true if a response descriptor contains a yet unprocessed + * response. + */ +static inline bool is_new_response(const struct rsp_ctrl *r, + const struct sge_rspq *q) +{ + return RSPD_GEN(r->type_gen) == q->gen; +} + +/** + * rspq_next - advance to the next entry in a response queue + * @q: the queue + * + * Updates the state of a response queue to advance it to the next entry. + */ +static inline void rspq_next(struct sge_rspq *q) +{ + q->cur_desc = (void *)q->cur_desc + q->iqe_len; + if (unlikely(++q->cidx == q->size)) { + q->cidx = 0; + q->gen ^= 1; + q->cur_desc = q->desc; + } +} + +/** + * process_responses - process responses from an SGE response queue + * @q: the ingress queue to process + * @budget: how many responses can be processed in this round + * + * Process responses from an SGE response queue up to the supplied budget. + * Responses include received packets as well as control messages from FW + * or HW. + * + * Additionally choose the interrupt holdoff time for the next interrupt + * on this queue. If the system is under memory shortage use a fairly + * long delay to help recovery. + */ +static int process_responses(struct sge_rspq *q, int budget) +{ + int ret, rsp_type; + int budget_left = budget; + const struct rsp_ctrl *rc; + struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); + + while (likely(budget_left)) { + rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); + if (!is_new_response(rc, q)) + break; + + rmb(); + rsp_type = RSPD_TYPE(rc->type_gen); + if (likely(rsp_type == RSP_TYPE_FLBUF)) { + skb_frag_t *fp; + struct pkt_gl si; + const struct rx_sw_desc *rsd; + u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags; + + if (len & RSPD_NEWBUF) { + if (likely(q->offset > 0)) { + free_rx_bufs(q->adap, &rxq->fl, 1); + q->offset = 0; + } + len = RSPD_LEN(len); + } + si.tot_len = len; + + /* gather packet fragments */ + for (frags = 0, fp = si.frags; ; frags++, fp++) { + rsd = &rxq->fl.sdesc[rxq->fl.cidx]; + bufsz = get_buf_size(rsd); + fp->page = rsd->page; + fp->page_offset = q->offset; + fp->size = min(bufsz, len); + len -= fp->size; + if (!len) + break; + unmap_rx_buf(q->adap, &rxq->fl); + } + + /* + * Last buffer remains mapped so explicitly make it + * coherent for CPU access. + */ + dma_sync_single_for_cpu(q->adap->pdev_dev, + get_buf_addr(rsd), + fp->size, DMA_FROM_DEVICE); + + si.va = page_address(si.frags[0].page) + + si.frags[0].page_offset; + prefetch(si.va); + + si.nfrags = frags + 1; + ret = q->handler(q, q->cur_desc, &si); + if (likely(ret == 0)) + q->offset += ALIGN(fp->size, FL_ALIGN); + else + restore_rx_bufs(&si, &rxq->fl, frags); + } else if (likely(rsp_type == RSP_TYPE_CPL)) { + ret = q->handler(q, q->cur_desc, NULL); + } else { + ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN); + } + + if (unlikely(ret)) { + /* couldn't process descriptor, back off for recovery */ + q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX); + break; + } + + rspq_next(q); + budget_left--; + } + + if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16) + __refill_fl(q->adap, &rxq->fl); + return budget - budget_left; +} + +/** + * napi_rx_handler - the NAPI handler for Rx processing + * @napi: the napi instance + * @budget: how many packets we can process in this round + * + * Handler for new data events when using NAPI. This does not need any + * locking or protection from interrupts as data interrupts are off at + * this point and other adapter interrupts do not interfere (the latter + * in not a concern at all with MSI-X as non-data interrupts then have + * a separate handler). + */ +static int napi_rx_handler(struct napi_struct *napi, int budget) +{ + unsigned int params; + struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); + int work_done = process_responses(q, budget); + + if (likely(work_done < budget)) { + napi_complete(napi); + params = q->next_intr_params; + q->next_intr_params = q->intr_params; + } else + params = QINTR_TIMER_IDX(7); + + t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), CIDXINC(work_done) | + INGRESSQID((u32)q->cntxt_id) | SEINTARM(params)); + return work_done; +} + +/* + * The MSI-X interrupt handler for an SGE response queue. + */ +irqreturn_t t4_sge_intr_msix(int irq, void *cookie) +{ + struct sge_rspq *q = cookie; + + napi_schedule(&q->napi); + return IRQ_HANDLED; +} + +/* + * Process the indirect interrupt entries in the interrupt queue and kick off + * NAPI for each queue that has generated an entry. + */ +static unsigned int process_intrq(struct adapter *adap) +{ + unsigned int credits; + const struct rsp_ctrl *rc; + struct sge_rspq *q = &adap->sge.intrq; + + spin_lock(&adap->sge.intrq_lock); + for (credits = 0; ; credits++) { + rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); + if (!is_new_response(rc, q)) + break; + + rmb(); + if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) { + unsigned int qid = ntohl(rc->pldbuflen_qid); + + qid -= adap->sge.ingr_start; + napi_schedule(&adap->sge.ingr_map[qid]->napi); + } + + rspq_next(q); + } + + t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), CIDXINC(credits) | + INGRESSQID(q->cntxt_id) | SEINTARM(q->intr_params)); + spin_unlock(&adap->sge.intrq_lock); + return credits; +} + +/* + * The MSI interrupt handler, which handles data events from SGE response queues + * as well as error and other async events as they all use the same MSI vector. + */ +static irqreturn_t t4_intr_msi(int irq, void *cookie) +{ + struct adapter *adap = cookie; + + t4_slow_intr_handler(adap); + process_intrq(adap); + return IRQ_HANDLED; +} + +/* + * Interrupt handler for legacy INTx interrupts. + * Handles data events from SGE response queues as well as error and other + * async events as they all use the same interrupt line. + */ +static irqreturn_t t4_intr_intx(int irq, void *cookie) +{ + struct adapter *adap = cookie; + + t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0); + if (t4_slow_intr_handler(adap) | process_intrq(adap)) + return IRQ_HANDLED; + return IRQ_NONE; /* probably shared interrupt */ +} + +/** + * t4_intr_handler - select the top-level interrupt handler + * @adap: the adapter + * + * Selects the top-level interrupt handler based on the type of interrupts + * (MSI-X, MSI, or INTx). + */ +irq_handler_t t4_intr_handler(struct adapter *adap) +{ + if (adap->flags & USING_MSIX) + return t4_sge_intr_msix; + if (adap->flags & USING_MSI) + return t4_intr_msi; + return t4_intr_intx; +} + +static void sge_rx_timer_cb(unsigned long data) +{ + unsigned long m; + unsigned int i, cnt[2]; + struct adapter *adap = (struct adapter *)data; + struct sge *s = &adap->sge; + + for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) + for (m = s->starving_fl[i]; m; m &= m - 1) { + struct sge_eth_rxq *rxq; + unsigned int id = __ffs(m) + i * BITS_PER_LONG; + struct sge_fl *fl = s->egr_map[id]; + + clear_bit(id, s->starving_fl); + smp_mb__after_clear_bit(); + + if (fl_starving(fl)) { + rxq = container_of(fl, struct sge_eth_rxq, fl); + if (napi_reschedule(&rxq->rspq.napi)) + fl->starving++; + else + set_bit(id, s->starving_fl); + } + } + + t4_write_reg(adap, SGE_DEBUG_INDEX, 13); + cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH); + cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW); + + for (i = 0; i < 2; i++) + if (cnt[i] >= s->starve_thres) { + if (s->idma_state[i] || cnt[i] == 0xffffffff) + continue; + s->idma_state[i] = 1; + t4_write_reg(adap, SGE_DEBUG_INDEX, 11); + m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16); + dev_warn(adap->pdev_dev, + "SGE idma%u starvation detected for " + "queue %lu\n", i, m & 0xffff); + } else if (s->idma_state[i]) + s->idma_state[i] = 0; + + mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); +} + +static void sge_tx_timer_cb(unsigned long data) +{ + unsigned long m; + unsigned int i, budget; + struct adapter *adap = (struct adapter *)data; + struct sge *s = &adap->sge; + + for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++) + for (m = s->txq_maperr[i]; m; m &= m - 1) { + unsigned long id = __ffs(m) + i * BITS_PER_LONG; + struct sge_ofld_txq *txq = s->egr_map[id]; + + clear_bit(id, s->txq_maperr); + tasklet_schedule(&txq->qresume_tsk); + } + + budget = MAX_TIMER_TX_RECLAIM; + i = s->ethtxq_rover; + do { + struct sge_eth_txq *q = &s->ethtxq[i]; + + if (q->q.in_use && + time_after_eq(jiffies, q->txq->trans_start + HZ / 100) && + __netif_tx_trylock(q->txq)) { + int avail = reclaimable(&q->q); + + if (avail) { + if (avail > budget) + avail = budget; + + free_tx_desc(adap, &q->q, avail, true); + q->q.in_use -= avail; + budget -= avail; + } + __netif_tx_unlock(q->txq); + } + + if (++i >= s->ethqsets) + i = 0; + } while (budget && i != s->ethtxq_rover); + s->ethtxq_rover = i; + mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2)); +} + +int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, + struct net_device *dev, int intr_idx, + struct sge_fl *fl, rspq_handler_t hnd) +{ + int ret, flsz = 0; + struct fw_iq_cmd c; + struct port_info *pi = netdev_priv(dev); + + /* Size needs to be multiple of 16, including status entry. */ + iq->size = roundup(iq->size, 16); + + iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0, + &iq->phys_addr, NULL, 0, NUMA_NO_NODE); + if (!iq->desc) + return -ENOMEM; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_CMD_EXEC | + FW_IQ_CMD_PFN(adap->fn) | FW_IQ_CMD_VFN(0)); + c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) | + FW_LEN16(c)); + c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | + FW_IQ_CMD_IQASYNCH(fwevtq) | FW_IQ_CMD_VIID(pi->viid) | + FW_IQ_CMD_IQANDST(intr_idx < 0) | FW_IQ_CMD_IQANUD(1) | + FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx : + -intr_idx - 1)); + c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH(pi->tx_chan) | + FW_IQ_CMD_IQGTSMODE | + FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) | + FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4)); + c.iqsize = htons(iq->size); + c.iqaddr = cpu_to_be64(iq->phys_addr); + + if (fl) { + fl->size = roundup(fl->size, 8); + fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64), + sizeof(struct rx_sw_desc), &fl->addr, + &fl->sdesc, STAT_LEN, NUMA_NO_NODE); + if (!fl->desc) + goto fl_nomem; + + flsz = fl->size / 8 + STAT_LEN / sizeof(struct tx_desc); + c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN | + FW_IQ_CMD_FL0FETCHRO(1) | + FW_IQ_CMD_FL0DATARO(1) | + FW_IQ_CMD_FL0PADEN); + c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) | + FW_IQ_CMD_FL0FBMAX(3)); + c.fl0size = htons(flsz); + c.fl0addr = cpu_to_be64(fl->addr); + } + + ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); + if (ret) + goto err; + + netif_napi_add(dev, &iq->napi, napi_rx_handler, 64); + iq->cur_desc = iq->desc; + iq->cidx = 0; + iq->gen = 1; + iq->next_intr_params = iq->intr_params; + iq->cntxt_id = ntohs(c.iqid); + iq->abs_id = ntohs(c.physiqid); + iq->size--; /* subtract status entry */ + iq->adap = adap; + iq->netdev = dev; + iq->handler = hnd; + + /* set offset to -1 to distinguish ingress queues without FL */ + iq->offset = fl ? 0 : -1; + + adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq; + + if (fl) { + fl->cntxt_id = ntohs(c.fl0id); + fl->avail = fl->pend_cred = 0; + fl->pidx = fl->cidx = 0; + fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; + adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl; + refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL); + } + return 0; + +fl_nomem: + ret = -ENOMEM; +err: + if (iq->desc) { + dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len, + iq->desc, iq->phys_addr); + iq->desc = NULL; + } + if (fl && fl->desc) { + kfree(fl->sdesc); + fl->sdesc = NULL; + dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc), + fl->desc, fl->addr); + fl->desc = NULL; + } + return ret; +} + +static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) +{ + q->in_use = 0; + q->cidx = q->pidx = 0; + q->stops = q->restarts = 0; + q->stat = (void *)&q->desc[q->size]; + q->cntxt_id = id; + adap->sge.egr_map[id - adap->sge.egr_start] = q; +} + +int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, + struct net_device *dev, struct netdev_queue *netdevq, + unsigned int iqid) +{ + int ret, nentries; + struct fw_eq_eth_cmd c; + struct port_info *pi = netdev_priv(dev); + + /* Add status entries */ + nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); + + txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, + sizeof(struct tx_desc), sizeof(struct tx_sw_desc), + &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN, + netdev_queue_numa_node_read(netdevq)); + if (!txq->q.desc) + return -ENOMEM; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_CMD_EXEC | + FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0)); + c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC | + FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); + c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid)); + c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) | + FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | + FW_EQ_ETH_CMD_FETCHRO(1) | + FW_EQ_ETH_CMD_IQID(iqid)); + c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) | + FW_EQ_ETH_CMD_FBMAX(3) | + FW_EQ_ETH_CMD_CIDXFTHRESH(5) | + FW_EQ_ETH_CMD_EQSIZE(nentries)); + c.eqaddr = cpu_to_be64(txq->q.phys_addr); + + ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); + if (ret) { + kfree(txq->q.sdesc); + txq->q.sdesc = NULL; + dma_free_coherent(adap->pdev_dev, + nentries * sizeof(struct tx_desc), + txq->q.desc, txq->q.phys_addr); + txq->q.desc = NULL; + return ret; + } + + init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_GET(ntohl(c.eqid_pkd))); + txq->txq = netdevq; + txq->tso = txq->tx_cso = txq->vlan_ins = 0; + txq->mapping_err = 0; + return 0; +} + +int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, + struct net_device *dev, unsigned int iqid, + unsigned int cmplqid) +{ + int ret, nentries; + struct fw_eq_ctrl_cmd c; + struct port_info *pi = netdev_priv(dev); + + /* Add status entries */ + nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); + + txq->q.desc = alloc_ring(adap->pdev_dev, nentries, + sizeof(struct tx_desc), 0, &txq->q.phys_addr, + NULL, 0, NUMA_NO_NODE); + if (!txq->q.desc) + return -ENOMEM; + + c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_CMD_EXEC | + FW_EQ_CTRL_CMD_PFN(adap->fn) | + FW_EQ_CTRL_CMD_VFN(0)); + c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC | + FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c)); + c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid)); + c.physeqid_pkd = htonl(0); + c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) | + FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) | + FW_EQ_CTRL_CMD_FETCHRO | + FW_EQ_CTRL_CMD_IQID(iqid)); + c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) | + FW_EQ_CTRL_CMD_FBMAX(3) | + FW_EQ_CTRL_CMD_CIDXFTHRESH(5) | + FW_EQ_CTRL_CMD_EQSIZE(nentries)); + c.eqaddr = cpu_to_be64(txq->q.phys_addr); + + ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); + if (ret) { + dma_free_coherent(adap->pdev_dev, + nentries * sizeof(struct tx_desc), + txq->q.desc, txq->q.phys_addr); + txq->q.desc = NULL; + return ret; + } + + init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_GET(ntohl(c.cmpliqid_eqid))); + txq->adap = adap; + skb_queue_head_init(&txq->sendq); + tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq); + txq->full = 0; + return 0; +} + +int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, + struct net_device *dev, unsigned int iqid) +{ + int ret, nentries; + struct fw_eq_ofld_cmd c; + struct port_info *pi = netdev_priv(dev); + + /* Add status entries */ + nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); + + txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, + sizeof(struct tx_desc), sizeof(struct tx_sw_desc), + &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN, + NUMA_NO_NODE); + if (!txq->q.desc) + return -ENOMEM; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_CMD_EXEC | + FW_EQ_OFLD_CMD_PFN(adap->fn) | + FW_EQ_OFLD_CMD_VFN(0)); + c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC | + FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c)); + c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) | + FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) | + FW_EQ_OFLD_CMD_FETCHRO(1) | + FW_EQ_OFLD_CMD_IQID(iqid)); + c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) | + FW_EQ_OFLD_CMD_FBMAX(3) | + FW_EQ_OFLD_CMD_CIDXFTHRESH(5) | + FW_EQ_OFLD_CMD_EQSIZE(nentries)); + c.eqaddr = cpu_to_be64(txq->q.phys_addr); + + ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); + if (ret) { + kfree(txq->q.sdesc); + txq->q.sdesc = NULL; + dma_free_coherent(adap->pdev_dev, + nentries * sizeof(struct tx_desc), + txq->q.desc, txq->q.phys_addr); + txq->q.desc = NULL; + return ret; + } + + init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_GET(ntohl(c.eqid_pkd))); + txq->adap = adap; + skb_queue_head_init(&txq->sendq); + tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq); + txq->full = 0; + txq->mapping_err = 0; + return 0; +} + +static void free_txq(struct adapter *adap, struct sge_txq *q) +{ + dma_free_coherent(adap->pdev_dev, + q->size * sizeof(struct tx_desc) + STAT_LEN, + q->desc, q->phys_addr); + q->cntxt_id = 0; + q->sdesc = NULL; + q->desc = NULL; +} + +static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, + struct sge_fl *fl) +{ + unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; + + adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL; + t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP, + rq->cntxt_id, fl_id, 0xffff); + dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, + rq->desc, rq->phys_addr); + netif_napi_del(&rq->napi); + rq->netdev = NULL; + rq->cntxt_id = rq->abs_id = 0; + rq->desc = NULL; + + if (fl) { + free_rx_bufs(adap, fl, fl->avail); + dma_free_coherent(adap->pdev_dev, fl->size * 8 + STAT_LEN, + fl->desc, fl->addr); + kfree(fl->sdesc); + fl->sdesc = NULL; + fl->cntxt_id = 0; + fl->desc = NULL; + } +} + +/** + * t4_free_sge_resources - free SGE resources + * @adap: the adapter + * + * Frees resources used by the SGE queue sets. + */ +void t4_free_sge_resources(struct adapter *adap) +{ + int i; + struct sge_eth_rxq *eq = adap->sge.ethrxq; + struct sge_eth_txq *etq = adap->sge.ethtxq; + struct sge_ofld_rxq *oq = adap->sge.ofldrxq; + + /* clean up Ethernet Tx/Rx queues */ + for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) { + if (eq->rspq.desc) + free_rspq_fl(adap, &eq->rspq, &eq->fl); + if (etq->q.desc) { + t4_eth_eq_free(adap, adap->fn, adap->fn, 0, + etq->q.cntxt_id); + free_tx_desc(adap, &etq->q, etq->q.in_use, true); + kfree(etq->q.sdesc); + free_txq(adap, &etq->q); + } + } + + /* clean up RDMA and iSCSI Rx queues */ + for (i = 0; i < adap->sge.ofldqsets; i++, oq++) { + if (oq->rspq.desc) + free_rspq_fl(adap, &oq->rspq, &oq->fl); + } + for (i = 0, oq = adap->sge.rdmarxq; i < adap->sge.rdmaqs; i++, oq++) { + if (oq->rspq.desc) + free_rspq_fl(adap, &oq->rspq, &oq->fl); + } + + /* clean up offload Tx queues */ + for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) { + struct sge_ofld_txq *q = &adap->sge.ofldtxq[i]; + + if (q->q.desc) { + tasklet_kill(&q->qresume_tsk); + t4_ofld_eq_free(adap, adap->fn, adap->fn, 0, + q->q.cntxt_id); + free_tx_desc(adap, &q->q, q->q.in_use, false); + kfree(q->q.sdesc); + __skb_queue_purge(&q->sendq); + free_txq(adap, &q->q); + } + } + + /* clean up control Tx queues */ + for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) { + struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; + + if (cq->q.desc) { + tasklet_kill(&cq->qresume_tsk); + t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0, + cq->q.cntxt_id); + __skb_queue_purge(&cq->sendq); + free_txq(adap, &cq->q); + } + } + + if (adap->sge.fw_evtq.desc) + free_rspq_fl(adap, &adap->sge.fw_evtq, NULL); + + if (adap->sge.intrq.desc) + free_rspq_fl(adap, &adap->sge.intrq, NULL); + + /* clear the reverse egress queue map */ + memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map)); +} + +void t4_sge_start(struct adapter *adap) +{ + adap->sge.ethtxq_rover = 0; + mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD); + mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD); +} + +/** + * t4_sge_stop - disable SGE operation + * @adap: the adapter + * + * Stop tasklets and timers associated with the DMA engine. Note that + * this is effective only if measures have been taken to disable any HW + * events that may restart them. + */ +void t4_sge_stop(struct adapter *adap) +{ + int i; + struct sge *s = &adap->sge; + + if (in_interrupt()) /* actions below require waiting */ + return; + + if (s->rx_timer.function) + del_timer_sync(&s->rx_timer); + if (s->tx_timer.function) + del_timer_sync(&s->tx_timer); + + for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) { + struct sge_ofld_txq *q = &s->ofldtxq[i]; + + if (q->q.desc) + tasklet_kill(&q->qresume_tsk); + } + for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) { + struct sge_ctrl_txq *cq = &s->ctrlq[i]; + + if (cq->q.desc) + tasklet_kill(&cq->qresume_tsk); + } +} + +/** + * t4_sge_init - initialize SGE + * @adap: the adapter + * + * Performs SGE initialization needed every time after a chip reset. + * We do not initialize any of the queues here, instead the driver + * top-level must request them individually. + */ +void t4_sge_init(struct adapter *adap) +{ + unsigned int i, v; + struct sge *s = &adap->sge; + unsigned int fl_align_log = ilog2(FL_ALIGN); + + t4_set_reg_field(adap, SGE_CONTROL, PKTSHIFT_MASK | + INGPADBOUNDARY_MASK | EGRSTATUSPAGESIZE, + INGPADBOUNDARY(fl_align_log - 5) | PKTSHIFT(2) | + RXPKTCPLMODE | + (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0)); + + for (i = v = 0; i < 32; i += 4) + v |= (PAGE_SHIFT - 10) << i; + t4_write_reg(adap, SGE_HOST_PAGE_SIZE, v); + t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, PAGE_SIZE); +#if FL_PG_ORDER > 0 + t4_write_reg(adap, SGE_FL_BUFFER_SIZE1, PAGE_SIZE << FL_PG_ORDER); +#endif + t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD, + THRESHOLD_0(s->counter_val[0]) | + THRESHOLD_1(s->counter_val[1]) | + THRESHOLD_2(s->counter_val[2]) | + THRESHOLD_3(s->counter_val[3])); + t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1, + TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) | + TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1]))); + t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3, + TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[2])) | + TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[3]))); + t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5, + TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[4])) | + TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[5]))); + setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap); + setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap); + s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */ + s->idma_state[0] = s->idma_state[1] = 0; + spin_lock_init(&s->intrq_lock); +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c new file mode 100644 index 0000000..d1ec111 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -0,0 +1,2856 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include "cxgb4.h" +#include "t4_regs.h" +#include "t4fw_api.h" + +/** + * t4_wait_op_done_val - wait until an operation is completed + * @adapter: the adapter performing the operation + * @reg: the register to check for completion + * @mask: a single-bit field within @reg that indicates completion + * @polarity: the value of the field when the operation is completed + * @attempts: number of check iterations + * @delay: delay in usecs between iterations + * @valp: where to store the value of the register at completion time + * + * Wait until an operation is completed by checking a bit in a register + * up to @attempts times. If @valp is not NULL the value of the register + * at the time it indicated completion is stored there. Returns 0 if the + * operation completes and -EAGAIN otherwise. + */ +static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, + int polarity, int attempts, int delay, u32 *valp) +{ + while (1) { + u32 val = t4_read_reg(adapter, reg); + + if (!!(val & mask) == polarity) { + if (valp) + *valp = val; + return 0; + } + if (--attempts == 0) + return -EAGAIN; + if (delay) + udelay(delay); + } +} + +static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, + int polarity, int attempts, int delay) +{ + return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, + delay, NULL); +} + +/** + * t4_set_reg_field - set a register field to a value + * @adapter: the adapter to program + * @addr: the register address + * @mask: specifies the portion of the register to modify + * @val: the new value for the register field + * + * Sets a register field specified by the supplied mask to the + * given value. + */ +void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, + u32 val) +{ + u32 v = t4_read_reg(adapter, addr) & ~mask; + + t4_write_reg(adapter, addr, v | val); + (void) t4_read_reg(adapter, addr); /* flush */ +} + +/** + * t4_read_indirect - read indirectly addressed registers + * @adap: the adapter + * @addr_reg: register holding the indirect address + * @data_reg: register holding the value of the indirect register + * @vals: where the read register values are stored + * @nregs: how many indirect registers to read + * @start_idx: index of first indirect register to read + * + * Reads registers that are accessed indirectly through an address/data + * register pair. + */ +static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, + unsigned int data_reg, u32 *vals, + unsigned int nregs, unsigned int start_idx) +{ + while (nregs--) { + t4_write_reg(adap, addr_reg, start_idx); + *vals++ = t4_read_reg(adap, data_reg); + start_idx++; + } +} + +/* + * Get the reply to a mailbox command and store it in @rpl in big-endian order. + */ +static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, + u32 mbox_addr) +{ + for ( ; nflit; nflit--, mbox_addr += 8) + *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); +} + +/* + * Handle a FW assertion reported in a mailbox. + */ +static void fw_asrt(struct adapter *adap, u32 mbox_addr) +{ + struct fw_debug_cmd asrt; + + get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr); + dev_alert(adap->pdev_dev, + "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", + asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line), + ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y)); +} + +static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg) +{ + dev_err(adap->pdev_dev, + "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox, + (unsigned long long)t4_read_reg64(adap, data_reg), + (unsigned long long)t4_read_reg64(adap, data_reg + 8), + (unsigned long long)t4_read_reg64(adap, data_reg + 16), + (unsigned long long)t4_read_reg64(adap, data_reg + 24), + (unsigned long long)t4_read_reg64(adap, data_reg + 32), + (unsigned long long)t4_read_reg64(adap, data_reg + 40), + (unsigned long long)t4_read_reg64(adap, data_reg + 48), + (unsigned long long)t4_read_reg64(adap, data_reg + 56)); +} + +/** + * t4_wr_mbox_meat - send a command to FW through the given mailbox + * @adap: the adapter + * @mbox: index of the mailbox to use + * @cmd: the command to write + * @size: command length in bytes + * @rpl: where to optionally store the reply + * @sleep_ok: if true we may sleep while awaiting command completion + * + * Sends the given command to FW through the selected mailbox and waits + * for the FW to execute the command. If @rpl is not %NULL it is used to + * store the FW's reply to the command. The command and its optional + * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms + * to respond. @sleep_ok determines whether we may sleep while awaiting + * the response. If sleeping is allowed we use progressive backoff + * otherwise we spin. + * + * The return value is 0 on success or a negative errno on failure. A + * failure can happen either because we are not able to execute the + * command or FW executes it but signals an error. In the latter case + * the return value is the error code indicated by FW (negated). + */ +int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, + void *rpl, bool sleep_ok) +{ + static const int delay[] = { + 1, 1, 3, 5, 10, 10, 20, 50, 100, 200 + }; + + u32 v; + u64 res; + int i, ms, delay_idx; + const __be64 *p = cmd; + u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA); + u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL); + + if ((size & 15) || size > MBOX_LEN) + return -EINVAL; + + /* + * If the device is off-line, as in EEH, commands will time out. + * Fail them early so we don't waste time waiting. + */ + if (adap->pdev->error_state != pci_channel_io_normal) + return -EIO; + + v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); + for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) + v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); + + if (v != MBOX_OWNER_DRV) + return v ? -EBUSY : -ETIMEDOUT; + + for (i = 0; i < size; i += 8) + t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++)); + + t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); + t4_read_reg(adap, ctl_reg); /* flush write */ + + delay_idx = 0; + ms = delay[0]; + + for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { + if (sleep_ok) { + ms = delay[delay_idx]; /* last element may repeat */ + if (delay_idx < ARRAY_SIZE(delay) - 1) + delay_idx++; + msleep(ms); + } else + mdelay(ms); + + v = t4_read_reg(adap, ctl_reg); + if (MBOWNER_GET(v) == MBOX_OWNER_DRV) { + if (!(v & MBMSGVALID)) { + t4_write_reg(adap, ctl_reg, 0); + continue; + } + + res = t4_read_reg64(adap, data_reg); + if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) { + fw_asrt(adap, data_reg); + res = FW_CMD_RETVAL(EIO); + } else if (rpl) + get_mbox_rpl(adap, rpl, size / 8, data_reg); + + if (FW_CMD_RETVAL_GET((int)res)) + dump_mbox(adap, mbox, data_reg); + t4_write_reg(adap, ctl_reg, 0); + return -FW_CMD_RETVAL_GET((int)res); + } + } + + dump_mbox(adap, mbox, data_reg); + dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", + *(const u8 *)cmd, mbox); + return -ETIMEDOUT; +} + +/** + * t4_mc_read - read from MC through backdoor accesses + * @adap: the adapter + * @addr: address of first byte requested + * @data: 64 bytes of data containing the requested address + * @ecc: where to store the corresponding 64-bit ECC word + * + * Read 64 bytes of data from MC starting at a 64-byte-aligned address + * that covers the requested address @addr. If @parity is not %NULL it + * is assigned the 64-bit ECC word for the read data. + */ +int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc) +{ + int i; + + if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST) + return -EBUSY; + t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU); + t4_write_reg(adap, MC_BIST_CMD_LEN, 64); + t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc); + t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST | + BIST_CMD_GAP(1)); + i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1); + if (i) + return i; + +#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i) + + for (i = 15; i >= 0; i--) + *data++ = htonl(t4_read_reg(adap, MC_DATA(i))); + if (ecc) + *ecc = t4_read_reg64(adap, MC_DATA(16)); +#undef MC_DATA + return 0; +} + +/** + * t4_edc_read - read from EDC through backdoor accesses + * @adap: the adapter + * @idx: which EDC to access + * @addr: address of first byte requested + * @data: 64 bytes of data containing the requested address + * @ecc: where to store the corresponding 64-bit ECC word + * + * Read 64 bytes of data from EDC starting at a 64-byte-aligned address + * that covers the requested address @addr. If @parity is not %NULL it + * is assigned the 64-bit ECC word for the read data. + */ +int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) +{ + int i; + + idx *= EDC_STRIDE; + if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST) + return -EBUSY; + t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU); + t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64); + t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc); + t4_write_reg(adap, EDC_BIST_CMD + idx, + BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST); + i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1); + if (i) + return i; + +#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx) + + for (i = 15; i >= 0; i--) + *data++ = htonl(t4_read_reg(adap, EDC_DATA(i))); + if (ecc) + *ecc = t4_read_reg64(adap, EDC_DATA(16)); +#undef EDC_DATA + return 0; +} + +#define EEPROM_STAT_ADDR 0x7bfc +#define VPD_BASE 0 +#define VPD_LEN 512 + +/** + * t4_seeprom_wp - enable/disable EEPROM write protection + * @adapter: the adapter + * @enable: whether to enable or disable write protection + * + * Enables or disables write protection on the serial EEPROM. + */ +int t4_seeprom_wp(struct adapter *adapter, bool enable) +{ + unsigned int v = enable ? 0xc : 0; + int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v); + return ret < 0 ? ret : 0; +} + +/** + * get_vpd_params - read VPD parameters from VPD EEPROM + * @adapter: adapter to read + * @p: where to store the parameters + * + * Reads card parameters stored in VPD EEPROM. + */ +static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) +{ + int i, ret; + int ec, sn; + u8 vpd[VPD_LEN], csum; + unsigned int vpdr_len, kw_offset, id_len; + + ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd); + if (ret < 0) + return ret; + + if (vpd[0] != PCI_VPD_LRDT_ID_STRING) { + dev_err(adapter->pdev_dev, "missing VPD ID string\n"); + return -EINVAL; + } + + id_len = pci_vpd_lrdt_size(vpd); + if (id_len > ID_LEN) + id_len = ID_LEN; + + i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA); + if (i < 0) { + dev_err(adapter->pdev_dev, "missing VPD-R section\n"); + return -EINVAL; + } + + vpdr_len = pci_vpd_lrdt_size(&vpd[i]); + kw_offset = i + PCI_VPD_LRDT_TAG_SIZE; + if (vpdr_len + kw_offset > VPD_LEN) { + dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len); + return -EINVAL; + } + +#define FIND_VPD_KW(var, name) do { \ + var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \ + if (var < 0) { \ + dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \ + return -EINVAL; \ + } \ + var += PCI_VPD_INFO_FLD_HDR_SIZE; \ +} while (0) + + FIND_VPD_KW(i, "RV"); + for (csum = 0; i >= 0; i--) + csum += vpd[i]; + + if (csum) { + dev_err(adapter->pdev_dev, + "corrupted VPD EEPROM, actual csum %u\n", csum); + return -EINVAL; + } + + FIND_VPD_KW(ec, "EC"); + FIND_VPD_KW(sn, "SN"); +#undef FIND_VPD_KW + + memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len); + strim(p->id); + memcpy(p->ec, vpd + ec, EC_LEN); + strim(p->ec); + i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE); + memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); + strim(p->sn); + return 0; +} + +/* serial flash and firmware constants */ +enum { + SF_ATTEMPTS = 10, /* max retries for SF operations */ + + /* flash command opcodes */ + SF_PROG_PAGE = 2, /* program page */ + SF_WR_DISABLE = 4, /* disable writes */ + SF_RD_STATUS = 5, /* read status register */ + SF_WR_ENABLE = 6, /* enable writes */ + SF_RD_DATA_FAST = 0xb, /* read flash */ + SF_RD_ID = 0x9f, /* read ID */ + SF_ERASE_SECTOR = 0xd8, /* erase sector */ + + FW_MAX_SIZE = 512 * 1024, +}; + +/** + * sf1_read - read data from the serial flash + * @adapter: the adapter + * @byte_cnt: number of bytes to read + * @cont: whether another operation will be chained + * @lock: whether to lock SF for PL access only + * @valp: where to store the read data + * + * Reads up to 4 bytes of data from the serial flash. The location of + * the read needs to be specified prior to calling this by issuing the + * appropriate commands to the serial flash. + */ +static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, + int lock, u32 *valp) +{ + int ret; + + if (!byte_cnt || byte_cnt > 4) + return -EINVAL; + if (t4_read_reg(adapter, SF_OP) & BUSY) + return -EBUSY; + cont = cont ? SF_CONT : 0; + lock = lock ? SF_LOCK : 0; + t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1)); + ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5); + if (!ret) + *valp = t4_read_reg(adapter, SF_DATA); + return ret; +} + +/** + * sf1_write - write data to the serial flash + * @adapter: the adapter + * @byte_cnt: number of bytes to write + * @cont: whether another operation will be chained + * @lock: whether to lock SF for PL access only + * @val: value to write + * + * Writes up to 4 bytes of data to the serial flash. The location of + * the write needs to be specified prior to calling this by issuing the + * appropriate commands to the serial flash. + */ +static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, + int lock, u32 val) +{ + if (!byte_cnt || byte_cnt > 4) + return -EINVAL; + if (t4_read_reg(adapter, SF_OP) & BUSY) + return -EBUSY; + cont = cont ? SF_CONT : 0; + lock = lock ? SF_LOCK : 0; + t4_write_reg(adapter, SF_DATA, val); + t4_write_reg(adapter, SF_OP, lock | + cont | BYTECNT(byte_cnt - 1) | OP_WR); + return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5); +} + +/** + * flash_wait_op - wait for a flash operation to complete + * @adapter: the adapter + * @attempts: max number of polls of the status register + * @delay: delay between polls in ms + * + * Wait for a flash operation to complete by polling the status register. + */ +static int flash_wait_op(struct adapter *adapter, int attempts, int delay) +{ + int ret; + u32 status; + + while (1) { + if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 || + (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0) + return ret; + if (!(status & 1)) + return 0; + if (--attempts == 0) + return -EAGAIN; + if (delay) + msleep(delay); + } +} + +/** + * t4_read_flash - read words from serial flash + * @adapter: the adapter + * @addr: the start address for the read + * @nwords: how many 32-bit words to read + * @data: where to store the read data + * @byte_oriented: whether to store data as bytes or as words + * + * Read the specified number of 32-bit words from the serial flash. + * If @byte_oriented is set the read data is stored as a byte array + * (i.e., big-endian), otherwise as 32-bit words in the platform's + * natural endianess. + */ +static int t4_read_flash(struct adapter *adapter, unsigned int addr, + unsigned int nwords, u32 *data, int byte_oriented) +{ + int ret; + + if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3)) + return -EINVAL; + + addr = swab32(addr) | SF_RD_DATA_FAST; + + if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 || + (ret = sf1_read(adapter, 1, 1, 0, data)) != 0) + return ret; + + for ( ; nwords; nwords--, data++) { + ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); + if (nwords == 1) + t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ + if (ret) + return ret; + if (byte_oriented) + *data = htonl(*data); + } + return 0; +} + +/** + * t4_write_flash - write up to a page of data to the serial flash + * @adapter: the adapter + * @addr: the start address to write + * @n: length of data to write in bytes + * @data: the data to write + * + * Writes up to a page of data (256 bytes) to the serial flash starting + * at the given address. All the data must be written to the same page. + */ +static int t4_write_flash(struct adapter *adapter, unsigned int addr, + unsigned int n, const u8 *data) +{ + int ret; + u32 buf[64]; + unsigned int i, c, left, val, offset = addr & 0xff; + + if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE) + return -EINVAL; + + val = swab32(addr) | SF_PROG_PAGE; + + if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || + (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) + goto unlock; + + for (left = n; left; left -= c) { + c = min(left, 4U); + for (val = 0, i = 0; i < c; ++i) + val = (val << 8) + *data++; + + ret = sf1_write(adapter, c, c != left, 1, val); + if (ret) + goto unlock; + } + ret = flash_wait_op(adapter, 8, 1); + if (ret) + goto unlock; + + t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ + + /* Read the page to verify the write succeeded */ + ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); + if (ret) + return ret; + + if (memcmp(data - n, (u8 *)buf + offset, n)) { + dev_err(adapter->pdev_dev, + "failed to correctly write the flash page at %#x\n", + addr); + return -EIO; + } + return 0; + +unlock: + t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ + return ret; +} + +/** + * get_fw_version - read the firmware version + * @adapter: the adapter + * @vers: where to place the version + * + * Reads the FW version from flash. + */ +static int get_fw_version(struct adapter *adapter, u32 *vers) +{ + return t4_read_flash(adapter, adapter->params.sf_fw_start + + offsetof(struct fw_hdr, fw_ver), 1, vers, 0); +} + +/** + * get_tp_version - read the TP microcode version + * @adapter: the adapter + * @vers: where to place the version + * + * Reads the TP microcode version from flash. + */ +static int get_tp_version(struct adapter *adapter, u32 *vers) +{ + return t4_read_flash(adapter, adapter->params.sf_fw_start + + offsetof(struct fw_hdr, tp_microcode_ver), + 1, vers, 0); +} + +/** + * t4_check_fw_version - check if the FW is compatible with this driver + * @adapter: the adapter + * + * Checks if an adapter's FW is compatible with the driver. Returns 0 + * if there's exact match, a negative error if the version could not be + * read or there's a major version mismatch, and a positive value if the + * expected major version is found but there's a minor version mismatch. + */ +int t4_check_fw_version(struct adapter *adapter) +{ + u32 api_vers[2]; + int ret, major, minor, micro; + + ret = get_fw_version(adapter, &adapter->params.fw_vers); + if (!ret) + ret = get_tp_version(adapter, &adapter->params.tp_vers); + if (!ret) + ret = t4_read_flash(adapter, adapter->params.sf_fw_start + + offsetof(struct fw_hdr, intfver_nic), + 2, api_vers, 1); + if (ret) + return ret; + + major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers); + minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers); + micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers); + memcpy(adapter->params.api_vers, api_vers, + sizeof(adapter->params.api_vers)); + + if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */ + dev_err(adapter->pdev_dev, + "card FW has major version %u, driver wants %u\n", + major, FW_VERSION_MAJOR); + return -EINVAL; + } + + if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO) + return 0; /* perfect match */ + + /* Minor/micro version mismatch. Report it but often it's OK. */ + return 1; +} + +/** + * t4_flash_erase_sectors - erase a range of flash sectors + * @adapter: the adapter + * @start: the first sector to erase + * @end: the last sector to erase + * + * Erases the sectors in the given inclusive range. + */ +static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) +{ + int ret = 0; + + while (start <= end) { + if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || + (ret = sf1_write(adapter, 4, 0, 1, + SF_ERASE_SECTOR | (start << 8))) != 0 || + (ret = flash_wait_op(adapter, 14, 500)) != 0) { + dev_err(adapter->pdev_dev, + "erase of flash sector %d failed, error %d\n", + start, ret); + break; + } + start++; + } + t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ + return ret; +} + +/** + * t4_load_fw - download firmware + * @adap: the adapter + * @fw_data: the firmware image to write + * @size: image size + * + * Write the supplied firmware image to the card's serial flash. + */ +int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) +{ + u32 csum; + int ret, addr; + unsigned int i; + u8 first_page[SF_PAGE_SIZE]; + const u32 *p = (const u32 *)fw_data; + const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; + unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; + unsigned int fw_img_start = adap->params.sf_fw_start; + unsigned int fw_start_sec = fw_img_start / sf_sec_size; + + if (!size) { + dev_err(adap->pdev_dev, "FW image has no data\n"); + return -EINVAL; + } + if (size & 511) { + dev_err(adap->pdev_dev, + "FW image size not multiple of 512 bytes\n"); + return -EINVAL; + } + if (ntohs(hdr->len512) * 512 != size) { + dev_err(adap->pdev_dev, + "FW image size differs from size in FW header\n"); + return -EINVAL; + } + if (size > FW_MAX_SIZE) { + dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n", + FW_MAX_SIZE); + return -EFBIG; + } + + for (csum = 0, i = 0; i < size / sizeof(csum); i++) + csum += ntohl(p[i]); + + if (csum != 0xffffffff) { + dev_err(adap->pdev_dev, + "corrupted firmware image, checksum %#x\n", csum); + return -EINVAL; + } + + i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ + ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1); + if (ret) + goto out; + + /* + * We write the correct version at the end so the driver can see a bad + * version if the FW write fails. Start by writing a copy of the + * first page with a bad version. + */ + memcpy(first_page, fw_data, SF_PAGE_SIZE); + ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); + ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page); + if (ret) + goto out; + + addr = fw_img_start; + for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { + addr += SF_PAGE_SIZE; + fw_data += SF_PAGE_SIZE; + ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data); + if (ret) + goto out; + } + + ret = t4_write_flash(adap, + fw_img_start + offsetof(struct fw_hdr, fw_ver), + sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver); +out: + if (ret) + dev_err(adap->pdev_dev, "firmware download failed, error %d\n", + ret); + return ret; +} + +#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ + FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG) + +/** + * t4_link_start - apply link configuration to MAC/PHY + * @phy: the PHY to setup + * @mac: the MAC to setup + * @lc: the requested link configuration + * + * Set up a port's MAC and PHY according to a desired link configuration. + * - If the PHY can auto-negotiate first decide what to advertise, then + * enable/disable auto-negotiation as desired, and reset. + * - If the PHY does not auto-negotiate just reset it. + * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, + * otherwise do it later based on the outcome of auto-negotiation. + */ +int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, + struct link_config *lc) +{ + struct fw_port_cmd c; + unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO); + + lc->link_ok = 0; + if (lc->requested_fc & PAUSE_RX) + fc |= FW_PORT_CAP_FC_RX; + if (lc->requested_fc & PAUSE_TX) + fc |= FW_PORT_CAP_FC_TX; + + memset(&c, 0, sizeof(c)); + c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); + c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | + FW_LEN16(c)); + + if (!(lc->supported & FW_PORT_CAP_ANEG)) { + c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc); + lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + } else if (lc->autoneg == AUTONEG_DISABLE) { + c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi); + lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + } else + c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi); + + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_restart_aneg - restart autonegotiation + * @adap: the adapter + * @mbox: mbox to use for the FW command + * @port: the port id + * + * Restarts autonegotiation for the selected port. + */ +int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) +{ + struct fw_port_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); + c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | + FW_LEN16(c)); + c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +struct intr_info { + unsigned int mask; /* bits to check in interrupt status */ + const char *msg; /* message to print or NULL */ + short stat_idx; /* stat counter to increment or -1 */ + unsigned short fatal; /* whether the condition reported is fatal */ +}; + +/** + * t4_handle_intr_status - table driven interrupt handler + * @adapter: the adapter that generated the interrupt + * @reg: the interrupt status register to process + * @acts: table of interrupt actions + * + * A table driven interrupt handler that applies a set of masks to an + * interrupt status word and performs the corresponding actions if the + * interrupts described by the mask have occurred. The actions include + * optionally emitting a warning or alert message. The table is terminated + * by an entry specifying mask 0. Returns the number of fatal interrupt + * conditions. + */ +static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg, + const struct intr_info *acts) +{ + int fatal = 0; + unsigned int mask = 0; + unsigned int status = t4_read_reg(adapter, reg); + + for ( ; acts->mask; ++acts) { + if (!(status & acts->mask)) + continue; + if (acts->fatal) { + fatal++; + dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, + status & acts->mask); + } else if (acts->msg && printk_ratelimit()) + dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, + status & acts->mask); + mask |= acts->mask; + } + status &= mask; + if (status) /* clear processed interrupts */ + t4_write_reg(adapter, reg, status); + return fatal; +} + +/* + * Interrupt handler for the PCIE module. + */ +static void pcie_intr_handler(struct adapter *adapter) +{ + static const struct intr_info sysbus_intr_info[] = { + { RNPP, "RXNP array parity error", -1, 1 }, + { RPCP, "RXPC array parity error", -1, 1 }, + { RCIP, "RXCIF array parity error", -1, 1 }, + { RCCP, "Rx completions control array parity error", -1, 1 }, + { RFTP, "RXFT array parity error", -1, 1 }, + { 0 } + }; + static const struct intr_info pcie_port_intr_info[] = { + { TPCP, "TXPC array parity error", -1, 1 }, + { TNPP, "TXNP array parity error", -1, 1 }, + { TFTP, "TXFT array parity error", -1, 1 }, + { TCAP, "TXCA array parity error", -1, 1 }, + { TCIP, "TXCIF array parity error", -1, 1 }, + { RCAP, "RXCA array parity error", -1, 1 }, + { OTDD, "outbound request TLP discarded", -1, 1 }, + { RDPE, "Rx data parity error", -1, 1 }, + { TDUE, "Tx uncorrectable data error", -1, 1 }, + { 0 } + }; + static const struct intr_info pcie_intr_info[] = { + { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, + { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, + { MSIDATAPERR, "MSI data parity error", -1, 1 }, + { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, + { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, + { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, + { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, + { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, + { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, + { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, + { CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, + { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, + { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, + { DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, + { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, + { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, + { HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, + { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, + { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, + { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, + { FIDPERR, "PCI FID parity error", -1, 1 }, + { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, + { MATAGPERR, "PCI MA tag parity error", -1, 1 }, + { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, + { RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, + { RXWRPERR, "PCI Rx write parity error", -1, 1 }, + { RPLPERR, "PCI replay buffer parity error", -1, 1 }, + { PCIESINT, "PCI core secondary fault", -1, 1 }, + { PCIEPINT, "PCI core primary fault", -1, 1 }, + { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 }, + { 0 } + }; + + int fat; + + fat = t4_handle_intr_status(adapter, + PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, + sysbus_intr_info) + + t4_handle_intr_status(adapter, + PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, + pcie_port_intr_info) + + t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info); + if (fat) + t4_fatal_err(adapter); +} + +/* + * TP interrupt handler. + */ +static void tp_intr_handler(struct adapter *adapter) +{ + static const struct intr_info tp_intr_info[] = { + { 0x3fffffff, "TP parity error", -1, 1 }, + { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info)) + t4_fatal_err(adapter); +} + +/* + * SGE interrupt handler. + */ +static void sge_intr_handler(struct adapter *adapter) +{ + u64 v; + + static const struct intr_info sge_intr_info[] = { + { ERR_CPL_EXCEED_IQE_SIZE, + "SGE received CPL exceeding IQE size", -1, 1 }, + { ERR_INVALID_CIDX_INC, + "SGE GTS CIDX increment too large", -1, 0 }, + { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, + { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 }, + { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, + "SGE IQID > 1023 received CPL for FL", -1, 0 }, + { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, + 0 }, + { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, + 0 }, + { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, + 0 }, + { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, + 0 }, + { ERR_ING_CTXT_PRIO, + "SGE too many priority ingress contexts", -1, 0 }, + { ERR_EGR_CTXT_PRIO, + "SGE too many priority egress contexts", -1, 0 }, + { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, + { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, + { 0 } + }; + + v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) | + ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32); + if (v) { + dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n", + (unsigned long long)v); + t4_write_reg(adapter, SGE_INT_CAUSE1, v); + t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32); + } + + if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) || + v != 0) + t4_fatal_err(adapter); +} + +/* + * CIM interrupt handler. + */ +static void cim_intr_handler(struct adapter *adapter) +{ + static const struct intr_info cim_intr_info[] = { + { PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, + { OBQPARERR, "CIM OBQ parity error", -1, 1 }, + { IBQPARERR, "CIM IBQ parity error", -1, 1 }, + { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, + { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, + { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, + { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, + { 0 } + }; + static const struct intr_info cim_upintr_info[] = { + { RSVDSPACEINT, "CIM reserved space access", -1, 1 }, + { ILLTRANSINT, "CIM illegal transaction", -1, 1 }, + { ILLWRINT, "CIM illegal write", -1, 1 }, + { ILLRDINT, "CIM illegal read", -1, 1 }, + { ILLRDBEINT, "CIM illegal read BE", -1, 1 }, + { ILLWRBEINT, "CIM illegal write BE", -1, 1 }, + { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, + { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, + { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, + { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, + { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, + { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, + { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, + { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, + { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, + { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, + { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, + { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, + { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, + { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, + { SGLRDPLINT , "CIM single read from PL space", -1, 1 }, + { SGLWRPLINT , "CIM single write to PL space", -1, 1 }, + { BLKRDPLINT , "CIM block read from PL space", -1, 1 }, + { BLKWRPLINT , "CIM block write to PL space", -1, 1 }, + { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, + { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, + { TIMEOUTINT , "CIM PIF timeout", -1, 1 }, + { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, + { 0 } + }; + + int fat; + + fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE, + cim_intr_info) + + t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE, + cim_upintr_info); + if (fat) + t4_fatal_err(adapter); +} + +/* + * ULP RX interrupt handler. + */ +static void ulprx_intr_handler(struct adapter *adapter) +{ + static const struct intr_info ulprx_intr_info[] = { + { 0x1800000, "ULPRX context error", -1, 1 }, + { 0x7fffff, "ULPRX parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info)) + t4_fatal_err(adapter); +} + +/* + * ULP TX interrupt handler. + */ +static void ulptx_intr_handler(struct adapter *adapter) +{ + static const struct intr_info ulptx_intr_info[] = { + { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, + 0 }, + { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, + 0 }, + { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, + 0 }, + { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, + 0 }, + { 0xfffffff, "ULPTX parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info)) + t4_fatal_err(adapter); +} + +/* + * PM TX interrupt handler. + */ +static void pmtx_intr_handler(struct adapter *adapter) +{ + static const struct intr_info pmtx_intr_info[] = { + { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, + { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, + { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, + { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, + { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 }, + { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, + { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 }, + { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, + { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, + { 0 } + }; + + if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info)) + t4_fatal_err(adapter); +} + +/* + * PM RX interrupt handler. + */ +static void pmrx_intr_handler(struct adapter *adapter) +{ + static const struct intr_info pmrx_intr_info[] = { + { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, + { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 }, + { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, + { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 }, + { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, + { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, + { 0 } + }; + + if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info)) + t4_fatal_err(adapter); +} + +/* + * CPL switch interrupt handler. + */ +static void cplsw_intr_handler(struct adapter *adapter) +{ + static const struct intr_info cplsw_intr_info[] = { + { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, + { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, + { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, + { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, + { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, + { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info)) + t4_fatal_err(adapter); +} + +/* + * LE interrupt handler. + */ +static void le_intr_handler(struct adapter *adap) +{ + static const struct intr_info le_intr_info[] = { + { LIPMISS, "LE LIP miss", -1, 0 }, + { LIP0, "LE 0 LIP error", -1, 0 }, + { PARITYERR, "LE parity error", -1, 1 }, + { UNKNOWNCMD, "LE unknown command", -1, 1 }, + { REQQPARERR, "LE request queue parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info)) + t4_fatal_err(adap); +} + +/* + * MPS interrupt handler. + */ +static void mps_intr_handler(struct adapter *adapter) +{ + static const struct intr_info mps_rx_intr_info[] = { + { 0xffffff, "MPS Rx parity error", -1, 1 }, + { 0 } + }; + static const struct intr_info mps_tx_intr_info[] = { + { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 }, + { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, + { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 }, + { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 }, + { BUBBLE, "MPS Tx underflow", -1, 1 }, + { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, + { FRMERR, "MPS Tx framing error", -1, 1 }, + { 0 } + }; + static const struct intr_info mps_trc_intr_info[] = { + { FILTMEM, "MPS TRC filter parity error", -1, 1 }, + { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 }, + { MISCPERR, "MPS TRC misc parity error", -1, 1 }, + { 0 } + }; + static const struct intr_info mps_stat_sram_intr_info[] = { + { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, + { 0 } + }; + static const struct intr_info mps_stat_tx_intr_info[] = { + { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, + { 0 } + }; + static const struct intr_info mps_stat_rx_intr_info[] = { + { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, + { 0 } + }; + static const struct intr_info mps_cls_intr_info[] = { + { MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, + { MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, + { HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, + { 0 } + }; + + int fat; + + fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE, + mps_rx_intr_info) + + t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE, + mps_tx_intr_info) + + t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE, + mps_trc_intr_info) + + t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM, + mps_stat_sram_intr_info) + + t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO, + mps_stat_tx_intr_info) + + t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO, + mps_stat_rx_intr_info) + + t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE, + mps_cls_intr_info); + + t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT | + RXINT | TXINT | STATINT); + t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */ + if (fat) + t4_fatal_err(adapter); +} + +#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE) + +/* + * EDC/MC interrupt handler. + */ +static void mem_intr_handler(struct adapter *adapter, int idx) +{ + static const char name[3][5] = { "EDC0", "EDC1", "MC" }; + + unsigned int addr, cnt_addr, v; + + if (idx <= MEM_EDC1) { + addr = EDC_REG(EDC_INT_CAUSE, idx); + cnt_addr = EDC_REG(EDC_ECC_STATUS, idx); + } else { + addr = MC_INT_CAUSE; + cnt_addr = MC_ECC_STATUS; + } + + v = t4_read_reg(adapter, addr) & MEM_INT_MASK; + if (v & PERR_INT_CAUSE) + dev_alert(adapter->pdev_dev, "%s FIFO parity error\n", + name[idx]); + if (v & ECC_CE_INT_CAUSE) { + u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr)); + + t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK); + if (printk_ratelimit()) + dev_warn(adapter->pdev_dev, + "%u %s correctable ECC data error%s\n", + cnt, name[idx], cnt > 1 ? "s" : ""); + } + if (v & ECC_UE_INT_CAUSE) + dev_alert(adapter->pdev_dev, + "%s uncorrectable ECC data error\n", name[idx]); + + t4_write_reg(adapter, addr, v); + if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE)) + t4_fatal_err(adapter); +} + +/* + * MA interrupt handler. + */ +static void ma_intr_handler(struct adapter *adap) +{ + u32 v, status = t4_read_reg(adap, MA_INT_CAUSE); + + if (status & MEM_PERR_INT_CAUSE) + dev_alert(adap->pdev_dev, + "MA parity error, parity status %#x\n", + t4_read_reg(adap, MA_PARITY_ERROR_STATUS)); + if (status & MEM_WRAP_INT_CAUSE) { + v = t4_read_reg(adap, MA_INT_WRAP_STATUS); + dev_alert(adap->pdev_dev, "MA address wrap-around error by " + "client %u to address %#x\n", + MEM_WRAP_CLIENT_NUM_GET(v), + MEM_WRAP_ADDRESS_GET(v) << 4); + } + t4_write_reg(adap, MA_INT_CAUSE, status); + t4_fatal_err(adap); +} + +/* + * SMB interrupt handler. + */ +static void smb_intr_handler(struct adapter *adap) +{ + static const struct intr_info smb_intr_info[] = { + { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, + { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, + { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info)) + t4_fatal_err(adap); +} + +/* + * NC-SI interrupt handler. + */ +static void ncsi_intr_handler(struct adapter *adap) +{ + static const struct intr_info ncsi_intr_info[] = { + { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, + { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, + { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, + { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info)) + t4_fatal_err(adap); +} + +/* + * XGMAC interrupt handler. + */ +static void xgmac_intr_handler(struct adapter *adap, int port) +{ + u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE)); + + v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR; + if (!v) + return; + + if (v & TXFIFO_PRTY_ERR) + dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n", + port); + if (v & RXFIFO_PRTY_ERR) + dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n", + port); + t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v); + t4_fatal_err(adap); +} + +/* + * PL interrupt handler. + */ +static void pl_intr_handler(struct adapter *adap) +{ + static const struct intr_info pl_intr_info[] = { + { FATALPERR, "T4 fatal parity error", -1, 1 }, + { PERRVFID, "PL VFID_MAP parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info)) + t4_fatal_err(adap); +} + +#define PF_INTR_MASK (PFSW) +#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \ + EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \ + CPL_SWITCH | SGE | ULP_TX) + +/** + * t4_slow_intr_handler - control path interrupt handler + * @adapter: the adapter + * + * T4 interrupt handler for non-data global interrupt events, e.g., errors. + * The designation 'slow' is because it involves register reads, while + * data interrupts typically don't involve any MMIOs. + */ +int t4_slow_intr_handler(struct adapter *adapter) +{ + u32 cause = t4_read_reg(adapter, PL_INT_CAUSE); + + if (!(cause & GLBL_INTR_MASK)) + return 0; + if (cause & CIM) + cim_intr_handler(adapter); + if (cause & MPS) + mps_intr_handler(adapter); + if (cause & NCSI) + ncsi_intr_handler(adapter); + if (cause & PL) + pl_intr_handler(adapter); + if (cause & SMB) + smb_intr_handler(adapter); + if (cause & XGMAC0) + xgmac_intr_handler(adapter, 0); + if (cause & XGMAC1) + xgmac_intr_handler(adapter, 1); + if (cause & XGMAC_KR0) + xgmac_intr_handler(adapter, 2); + if (cause & XGMAC_KR1) + xgmac_intr_handler(adapter, 3); + if (cause & PCIE) + pcie_intr_handler(adapter); + if (cause & MC) + mem_intr_handler(adapter, MEM_MC); + if (cause & EDC0) + mem_intr_handler(adapter, MEM_EDC0); + if (cause & EDC1) + mem_intr_handler(adapter, MEM_EDC1); + if (cause & LE) + le_intr_handler(adapter); + if (cause & TP) + tp_intr_handler(adapter); + if (cause & MA) + ma_intr_handler(adapter); + if (cause & PM_TX) + pmtx_intr_handler(adapter); + if (cause & PM_RX) + pmrx_intr_handler(adapter); + if (cause & ULP_RX) + ulprx_intr_handler(adapter); + if (cause & CPL_SWITCH) + cplsw_intr_handler(adapter); + if (cause & SGE) + sge_intr_handler(adapter); + if (cause & ULP_TX) + ulptx_intr_handler(adapter); + + /* Clear the interrupts just processed for which we are the master. */ + t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK); + (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */ + return 1; +} + +/** + * t4_intr_enable - enable interrupts + * @adapter: the adapter whose interrupts should be enabled + * + * Enable PF-specific interrupts for the calling function and the top-level + * interrupt concentrator for global interrupts. Interrupts are already + * enabled at each module, here we just enable the roots of the interrupt + * hierarchies. + * + * Note: this function should be called only when the driver manages + * non PF-specific interrupts from the various HW modules. Only one PCI + * function at a time should be doing this. + */ +void t4_intr_enable(struct adapter *adapter) +{ + u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); + + t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE | + ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 | + ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 | + ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 | + ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | + ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | + ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR | + EGRESS_SIZE_ERR); + t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK); + t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf); +} + +/** + * t4_intr_disable - disable interrupts + * @adapter: the adapter whose interrupts should be disabled + * + * Disable interrupts. We only disable the top-level interrupt + * concentrators. The caller must be a PCI function managing global + * interrupts. + */ +void t4_intr_disable(struct adapter *adapter) +{ + u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); + + t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0); + t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0); +} + +/** + * hash_mac_addr - return the hash value of a MAC address + * @addr: the 48-bit Ethernet MAC address + * + * Hashes a MAC address according to the hash function used by HW inexact + * (hash) address matching. + */ +static int hash_mac_addr(const u8 *addr) +{ + u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; + u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; + a ^= b; + a ^= (a >> 12); + a ^= (a >> 6); + return a & 0x3f; +} + +/** + * t4_config_rss_range - configure a portion of the RSS mapping table + * @adapter: the adapter + * @mbox: mbox to use for the FW command + * @viid: virtual interface whose RSS subtable is to be written + * @start: start entry in the table to write + * @n: how many table entries to write + * @rspq: values for the response queue lookup table + * @nrspq: number of values in @rspq + * + * Programs the selected part of the VI's RSS mapping table with the + * provided values. If @nrspq < @n the supplied values are used repeatedly + * until the full table range is populated. + * + * The caller must ensure the values in @rspq are in the range allowed for + * @viid. + */ +int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, + int start, int n, const u16 *rspq, unsigned int nrspq) +{ + int ret; + const u16 *rsp = rspq; + const u16 *rsp_end = rspq + nrspq; + struct fw_rss_ind_tbl_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) | + FW_CMD_REQUEST | FW_CMD_WRITE | + FW_RSS_IND_TBL_CMD_VIID(viid)); + cmd.retval_len16 = htonl(FW_LEN16(cmd)); + + /* each fw_rss_ind_tbl_cmd takes up to 32 entries */ + while (n > 0) { + int nq = min(n, 32); + __be32 *qp = &cmd.iq0_to_iq2; + + cmd.niqid = htons(nq); + cmd.startidx = htons(start); + + start += nq; + n -= nq; + + while (nq > 0) { + unsigned int v; + + v = FW_RSS_IND_TBL_CMD_IQ0(*rsp); + if (++rsp >= rsp_end) + rsp = rspq; + v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp); + if (++rsp >= rsp_end) + rsp = rspq; + v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp); + if (++rsp >= rsp_end) + rsp = rspq; + + *qp++ = htonl(v); + nq -= 3; + } + + ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); + if (ret) + return ret; + } + return 0; +} + +/** + * t4_config_glbl_rss - configure the global RSS mode + * @adapter: the adapter + * @mbox: mbox to use for the FW command + * @mode: global RSS mode + * @flags: mode-specific flags + * + * Sets the global RSS mode. + */ +int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, + unsigned int flags) +{ + struct fw_rss_glb_config_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | + FW_CMD_REQUEST | FW_CMD_WRITE); + c.retval_len16 = htonl(FW_LEN16(c)); + if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { + c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); + } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { + c.u.basicvirtual.mode_pkd = + htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); + c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags); + } else + return -EINVAL; + return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_tp_get_tcp_stats - read TP's TCP MIB counters + * @adap: the adapter + * @v4: holds the TCP/IP counter values + * @v6: holds the TCP/IPv6 counter values + * + * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. + * Either @v4 or @v6 may be %NULL to skip the corresponding stats. + */ +void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, + struct tp_tcp_stats *v6) +{ + u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1]; + +#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST) +#define STAT(x) val[STAT_IDX(x)] +#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) + + if (v4) { + t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, + ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST); + v4->tcpOutRsts = STAT(OUT_RST); + v4->tcpInSegs = STAT64(IN_SEG); + v4->tcpOutSegs = STAT64(OUT_SEG); + v4->tcpRetransSegs = STAT64(RXT_SEG); + } + if (v6) { + t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, + ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST); + v6->tcpOutRsts = STAT(OUT_RST); + v6->tcpInSegs = STAT64(IN_SEG); + v6->tcpOutSegs = STAT64(OUT_SEG); + v6->tcpRetransSegs = STAT64(RXT_SEG); + } +#undef STAT64 +#undef STAT +#undef STAT_IDX +} + +/** + * t4_read_mtu_tbl - returns the values in the HW path MTU table + * @adap: the adapter + * @mtus: where to store the MTU values + * @mtu_log: where to store the MTU base-2 log (may be %NULL) + * + * Reads the HW path MTU table. + */ +void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) +{ + u32 v; + int i; + + for (i = 0; i < NMTUS; ++i) { + t4_write_reg(adap, TP_MTU_TABLE, + MTUINDEX(0xff) | MTUVALUE(i)); + v = t4_read_reg(adap, TP_MTU_TABLE); + mtus[i] = MTUVALUE_GET(v); + if (mtu_log) + mtu_log[i] = MTUWIDTH_GET(v); + } +} + +/** + * init_cong_ctrl - initialize congestion control parameters + * @a: the alpha values for congestion control + * @b: the beta values for congestion control + * + * Initialize the congestion control parameters. + */ +static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b) +{ + a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; + a[9] = 2; + a[10] = 3; + a[11] = 4; + a[12] = 5; + a[13] = 6; + a[14] = 7; + a[15] = 8; + a[16] = 9; + a[17] = 10; + a[18] = 14; + a[19] = 17; + a[20] = 21; + a[21] = 25; + a[22] = 30; + a[23] = 35; + a[24] = 45; + a[25] = 60; + a[26] = 80; + a[27] = 100; + a[28] = 200; + a[29] = 300; + a[30] = 400; + a[31] = 500; + + b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; + b[9] = b[10] = 1; + b[11] = b[12] = 2; + b[13] = b[14] = b[15] = b[16] = 3; + b[17] = b[18] = b[19] = b[20] = b[21] = 4; + b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; + b[28] = b[29] = 6; + b[30] = b[31] = 7; +} + +/* The minimum additive increment value for the congestion control table */ +#define CC_MIN_INCR 2U + +/** + * t4_load_mtus - write the MTU and congestion control HW tables + * @adap: the adapter + * @mtus: the values for the MTU table + * @alpha: the values for the congestion control alpha parameter + * @beta: the values for the congestion control beta parameter + * + * Write the HW MTU table with the supplied MTUs and the high-speed + * congestion control table with the supplied alpha, beta, and MTUs. + * We write the two tables together because the additive increments + * depend on the MTUs. + */ +void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, + const unsigned short *alpha, const unsigned short *beta) +{ + static const unsigned int avg_pkts[NCCTRL_WIN] = { + 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, + 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, + 28672, 40960, 57344, 81920, 114688, 163840, 229376 + }; + + unsigned int i, w; + + for (i = 0; i < NMTUS; ++i) { + unsigned int mtu = mtus[i]; + unsigned int log2 = fls(mtu); + + if (!(mtu & ((1 << log2) >> 2))) /* round */ + log2--; + t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) | + MTUWIDTH(log2) | MTUVALUE(mtu)); + + for (w = 0; w < NCCTRL_WIN; ++w) { + unsigned int inc; + + inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], + CC_MIN_INCR); + + t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) | + (w << 16) | (beta[w] << 13) | inc); + } + } +} + +/** + * get_mps_bg_map - return the buffer groups associated with a port + * @adap: the adapter + * @idx: the port index + * + * Returns a bitmap indicating which MPS buffer groups are associated + * with the given port. Bit i is set if buffer group i is used by the + * port. + */ +static unsigned int get_mps_bg_map(struct adapter *adap, int idx) +{ + u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL)); + + if (n == 0) + return idx == 0 ? 0xf : 0; + if (n == 1) + return idx < 2 ? (3 << (2 * idx)) : 0; + return 1 << idx; +} + +/** + * t4_get_port_stats - collect port statistics + * @adap: the adapter + * @idx: the port index + * @p: the stats structure to fill + * + * Collect statistics related to the given port from HW. + */ +void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) +{ + u32 bgmap = get_mps_bg_map(adap, idx); + +#define GET_STAT(name) \ + t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L)) +#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) + + p->tx_octets = GET_STAT(TX_PORT_BYTES); + p->tx_frames = GET_STAT(TX_PORT_FRAMES); + p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); + p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); + p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); + p->tx_error_frames = GET_STAT(TX_PORT_ERROR); + p->tx_frames_64 = GET_STAT(TX_PORT_64B); + p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); + p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); + p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); + p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); + p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); + p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); + p->tx_drop = GET_STAT(TX_PORT_DROP); + p->tx_pause = GET_STAT(TX_PORT_PAUSE); + p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); + p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); + p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); + p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); + p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); + p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); + p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); + p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); + + p->rx_octets = GET_STAT(RX_PORT_BYTES); + p->rx_frames = GET_STAT(RX_PORT_FRAMES); + p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); + p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); + p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); + p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); + p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); + p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); + p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); + p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); + p->rx_runt = GET_STAT(RX_PORT_LESS_64B); + p->rx_frames_64 = GET_STAT(RX_PORT_64B); + p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); + p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); + p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); + p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); + p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); + p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); + p->rx_pause = GET_STAT(RX_PORT_PAUSE); + p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); + p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); + p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); + p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); + p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); + p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); + p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); + p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); + + p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; + p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; + p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; + p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; + p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; + p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; + p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; + p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; + +#undef GET_STAT +#undef GET_STAT_COM +} + +/** + * t4_wol_magic_enable - enable/disable magic packet WoL + * @adap: the adapter + * @port: the physical port index + * @addr: MAC address expected in magic packets, %NULL to disable + * + * Enables/disables magic packet wake-on-LAN for the selected port. + */ +void t4_wol_magic_enable(struct adapter *adap, unsigned int port, + const u8 *addr) +{ + if (addr) { + t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO), + (addr[2] << 24) | (addr[3] << 16) | + (addr[4] << 8) | addr[5]); + t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI), + (addr[0] << 8) | addr[1]); + } + t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN, + addr ? MAGICEN : 0); +} + +/** + * t4_wol_pat_enable - enable/disable pattern-based WoL + * @adap: the adapter + * @port: the physical port index + * @map: bitmap of which HW pattern filters to set + * @mask0: byte mask for bytes 0-63 of a packet + * @mask1: byte mask for bytes 64-127 of a packet + * @crc: Ethernet CRC for selected bytes + * @enable: enable/disable switch + * + * Sets the pattern filters indicated in @map to mask out the bytes + * specified in @mask0/@mask1 in received packets and compare the CRC of + * the resulting packet against @crc. If @enable is %true pattern-based + * WoL is enabled, otherwise disabled. + */ +int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, + u64 mask0, u64 mask1, unsigned int crc, bool enable) +{ + int i; + + if (!enable) { + t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), + PATEN, 0); + return 0; + } + if (map > 0xff) + return -EINVAL; + +#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name) + + t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); + t4_write_reg(adap, EPIO_REG(DATA2), mask1); + t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32); + + for (i = 0; i < NWOL_PAT; i++, map >>= 1) { + if (!(map & 1)) + continue; + + /* write byte masks */ + t4_write_reg(adap, EPIO_REG(DATA0), mask0); + t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR); + t4_read_reg(adap, EPIO_REG(OP)); /* flush */ + if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY) + return -ETIMEDOUT; + + /* write CRC */ + t4_write_reg(adap, EPIO_REG(DATA0), crc); + t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR); + t4_read_reg(adap, EPIO_REG(OP)); /* flush */ + if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY) + return -ETIMEDOUT; + } +#undef EPIO_REG + + t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN); + return 0; +} + +#define INIT_CMD(var, cmd, rd_wr) do { \ + (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \ + FW_CMD_REQUEST | FW_CMD_##rd_wr); \ + (var).retval_len16 = htonl(FW_LEN16(var)); \ +} while (0) + +/** + * t4_mdio_rd - read a PHY register through MDIO + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @phy_addr: the PHY address + * @mmd: the PHY MMD to access (0 for clause 22 PHYs) + * @reg: the register to read + * @valp: where to store the value + * + * Issues a FW command through the given mailbox to read a PHY register. + */ +int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, + unsigned int mmd, unsigned int reg, u16 *valp) +{ + int ret; + struct fw_ldst_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | + FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); + c.cycles_to_len16 = htonl(FW_LEN16(c)); + c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | + FW_LDST_CMD_MMD(mmd)); + c.u.mdio.raddr = htons(reg); + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret == 0) + *valp = ntohs(c.u.mdio.rval); + return ret; +} + +/** + * t4_mdio_wr - write a PHY register through MDIO + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @phy_addr: the PHY address + * @mmd: the PHY MMD to access (0 for clause 22 PHYs) + * @reg: the register to write + * @valp: value to write + * + * Issues a FW command through the given mailbox to write a PHY register. + */ +int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, + unsigned int mmd, unsigned int reg, u16 val) +{ + struct fw_ldst_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); + c.cycles_to_len16 = htonl(FW_LEN16(c)); + c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | + FW_LDST_CMD_MMD(mmd)); + c.u.mdio.raddr = htons(reg); + c.u.mdio.rval = htons(val); + + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_fw_hello - establish communication with FW + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @evt_mbox: mailbox to receive async FW events + * @master: specifies the caller's willingness to be the device master + * @state: returns the current device state + * + * Issues a command to establish communication with FW. + */ +int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, + enum dev_master master, enum dev_state *state) +{ + int ret; + struct fw_hello_cmd c; + + INIT_CMD(c, HELLO, WRITE); + c.err_to_mbasyncnot = htonl( + FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | + FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | + FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) | + FW_HELLO_CMD_MBASYNCNOT(evt_mbox)); + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret == 0 && state) { + u32 v = ntohl(c.err_to_mbasyncnot); + if (v & FW_HELLO_CMD_INIT) + *state = DEV_STATE_INIT; + else if (v & FW_HELLO_CMD_ERR) + *state = DEV_STATE_ERR; + else + *state = DEV_STATE_UNINIT; + } + return ret; +} + +/** + * t4_fw_bye - end communication with FW + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * + * Issues a command to terminate communication with FW. + */ +int t4_fw_bye(struct adapter *adap, unsigned int mbox) +{ + struct fw_bye_cmd c; + + INIT_CMD(c, BYE, WRITE); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_init_cmd - ask FW to initialize the device + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * + * Issues a command to FW to partially initialize the device. This + * performs initialization that generally doesn't depend on user input. + */ +int t4_early_init(struct adapter *adap, unsigned int mbox) +{ + struct fw_initialize_cmd c; + + INIT_CMD(c, INITIALIZE, WRITE); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_fw_reset - issue a reset to FW + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @reset: specifies the type of reset to perform + * + * Issues a reset command of the specified type to FW. + */ +int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) +{ + struct fw_reset_cmd c; + + INIT_CMD(c, RESET, WRITE); + c.val = htonl(reset); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_query_params - query FW or device parameters + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF + * @vf: the VF + * @nparams: the number of parameters + * @params: the parameter names + * @val: the parameter values + * + * Reads the value of FW or device parameters. Up to 7 parameters can be + * queried at once. + */ +int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + u32 *val) +{ + int i, ret; + struct fw_params_cmd c; + __be32 *p = &c.param[0].mnem; + + if (nparams > 7) + return -EINVAL; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | + FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) | + FW_PARAMS_CMD_VFN(vf)); + c.retval_len16 = htonl(FW_LEN16(c)); + for (i = 0; i < nparams; i++, p += 2) + *p = htonl(*params++); + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret == 0) + for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) + *val++ = ntohl(*p); + return ret; +} + +/** + * t4_set_params - sets FW or device parameters + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF + * @vf: the VF + * @nparams: the number of parameters + * @params: the parameter names + * @val: the parameter values + * + * Sets the value of FW or device parameters. Up to 7 parameters can be + * specified at once. + */ +int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + const u32 *val) +{ + struct fw_params_cmd c; + __be32 *p = &c.param[0].mnem; + + if (nparams > 7) + return -EINVAL; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) | + FW_PARAMS_CMD_VFN(vf)); + c.retval_len16 = htonl(FW_LEN16(c)); + while (nparams--) { + *p++ = htonl(*params++); + *p++ = htonl(*val++); + } + + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_cfg_pfvf - configure PF/VF resource limits + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF being configured + * @vf: the VF being configured + * @txq: the max number of egress queues + * @txq_eth_ctrl: the max number of egress Ethernet or control queues + * @rxqi: the max number of interrupt-capable ingress queues + * @rxq: the max number of interruptless ingress queues + * @tc: the PCI traffic class + * @vi: the max number of virtual interfaces + * @cmask: the channel access rights mask for the PF/VF + * @pmask: the port access rights mask for the PF/VF + * @nexact: the maximum number of exact MPS filters + * @rcaps: read capabilities + * @wxcaps: write/execute capabilities + * + * Configures resource limits and capabilities for a physical or virtual + * function. + */ +int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, + unsigned int rxqi, unsigned int rxq, unsigned int tc, + unsigned int vi, unsigned int cmask, unsigned int pmask, + unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) +{ + struct fw_pfvf_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) | + FW_PFVF_CMD_VFN(vf)); + c.retval_len16 = htonl(FW_LEN16(c)); + c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) | + FW_PFVF_CMD_NIQ(rxq)); + c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) | + FW_PFVF_CMD_PMASK(pmask) | + FW_PFVF_CMD_NEQ(txq)); + c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) | + FW_PFVF_CMD_NEXACTF(nexact)); + c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) | + FW_PFVF_CMD_WX_CAPS(wxcaps) | + FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_alloc_vi - allocate a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @port: physical port associated with the VI + * @pf: the PF owning the VI + * @vf: the VF owning the VI + * @nmac: number of MAC addresses needed (1 to 5) + * @mac: the MAC addresses of the VI + * @rss_size: size of RSS table slice associated with this VI + * + * Allocates a virtual interface for the given physical port. If @mac is + * not %NULL it contains the MAC addresses of the VI as assigned by FW. + * @mac should be large enough to hold @nmac Ethernet addresses, they are + * stored consecutively so the space needed is @nmac * 6 bytes. + * Returns a negative error number or the non-negative VI id. + */ +int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, + unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, + unsigned int *rss_size) +{ + int ret; + struct fw_vi_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_CMD_EXEC | + FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf)); + c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c)); + c.portid_pkd = FW_VI_CMD_PORTID(port); + c.nmac = nmac - 1; + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret) + return ret; + + if (mac) { + memcpy(mac, c.mac, sizeof(c.mac)); + switch (nmac) { + case 5: + memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); + case 4: + memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); + case 3: + memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); + case 2: + memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); + } + } + if (rss_size) + *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd)); + return FW_VI_CMD_VIID_GET(ntohs(c.type_viid)); +} + +/** + * t4_set_rxmode - set Rx properties of a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @mtu: the new MTU or -1 + * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change + * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change + * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change + * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change + * @sleep_ok: if true we may sleep while awaiting command completion + * + * Sets Rx properties of a virtual interface. + */ +int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, + int mtu, int promisc, int all_multi, int bcast, int vlanex, + bool sleep_ok) +{ + struct fw_vi_rxmode_cmd c; + + /* convert to FW values */ + if (mtu < 0) + mtu = FW_RXMODE_MTU_NO_CHG; + if (promisc < 0) + promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK; + if (all_multi < 0) + all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK; + if (bcast < 0) + bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK; + if (vlanex < 0) + vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid)); + c.retval_len16 = htonl(FW_LEN16(c)); + c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) | + FW_VI_RXMODE_CMD_PROMISCEN(promisc) | + FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | + FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | + FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); + return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); +} + +/** + * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @free: if true any existing filters for this VI id are first removed + * @naddr: the number of MAC addresses to allocate filters for (up to 7) + * @addr: the MAC address(es) + * @idx: where to store the index of each allocated filter + * @hash: pointer to hash address filter bitmap + * @sleep_ok: call is allowed to sleep + * + * Allocates an exact-match filter for each of the supplied addresses and + * sets it to the corresponding address. If @idx is not %NULL it should + * have at least @naddr entries, each of which will be set to the index of + * the filter allocated for the corresponding MAC address. If a filter + * could not be allocated for an address its index is set to 0xffff. + * If @hash is not %NULL addresses that fail to allocate an exact filter + * are hashed and update the hash filter bitmap pointed at by @hash. + * + * Returns a negative error number or the number of filters allocated. + */ +int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, + unsigned int viid, bool free, unsigned int naddr, + const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) +{ + int i, ret; + struct fw_vi_mac_cmd c; + struct fw_vi_mac_exact *p; + + if (naddr > 7) + return -EINVAL; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) | + FW_VI_MAC_CMD_VIID(viid)); + c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) | + FW_CMD_LEN16((naddr + 2) / 2)); + + for (i = 0, p = c.u.exact; i < naddr; i++, p++) { + p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | + FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); + memcpy(p->macaddr, addr[i], sizeof(p->macaddr)); + } + + ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); + if (ret) + return ret; + + for (i = 0, p = c.u.exact; i < naddr; i++, p++) { + u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); + + if (idx) + idx[i] = index >= NEXACT_MAC ? 0xffff : index; + if (index < NEXACT_MAC) + ret++; + else if (hash) + *hash |= (1ULL << hash_mac_addr(addr[i])); + } + return ret; +} + +/** + * t4_change_mac - modifies the exact-match filter for a MAC address + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @idx: index of existing filter for old value of MAC address, or -1 + * @addr: the new MAC address value + * @persist: whether a new MAC allocation should be persistent + * @add_smt: if true also add the address to the HW SMT + * + * Modifies an exact-match filter and sets it to the new MAC address. + * Note that in general it is not possible to modify the value of a given + * filter so the generic way to modify an address filter is to free the one + * being used by the old address value and allocate a new filter for the + * new address value. @idx can be -1 if the address is a new addition. + * + * Returns a negative error number or the index of the filter with the new + * MAC value. + */ +int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, + int idx, const u8 *addr, bool persist, bool add_smt) +{ + int ret, mode; + struct fw_vi_mac_cmd c; + struct fw_vi_mac_exact *p = c.u.exact; + + if (idx < 0) /* new allocation */ + idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; + mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid)); + c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1)); + p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | + FW_VI_MAC_CMD_SMAC_RESULT(mode) | + FW_VI_MAC_CMD_IDX(idx)); + memcpy(p->macaddr, addr, sizeof(p->macaddr)); + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret == 0) { + ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); + if (ret >= NEXACT_MAC) + ret = -ENOMEM; + } + return ret; +} + +/** + * t4_set_addr_hash - program the MAC inexact-match hash filter + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @ucast: whether the hash filter should also match unicast addresses + * @vec: the value to be written to the hash filter + * @sleep_ok: call is allowed to sleep + * + * Sets the 64-bit inexact-match hash filter for a virtual interface. + */ +int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, + bool ucast, u64 vec, bool sleep_ok) +{ + struct fw_vi_mac_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid)); + c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN | + FW_VI_MAC_CMD_HASHUNIEN(ucast) | + FW_CMD_LEN16(1)); + c.u.hash.hashvec = cpu_to_be64(vec); + return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); +} + +/** + * t4_enable_vi - enable/disable a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @rx_en: 1=enable Rx, 0=disable Rx + * @tx_en: 1=enable Tx, 0=disable Tx + * + * Enables/disables a virtual interface. + */ +int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, + bool rx_en, bool tx_en) +{ + struct fw_vi_enable_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); + c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) | + FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_identify_port - identify a VI's port by blinking its LED + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @nblinks: how many times to blink LED at 2.5 Hz + * + * Identifies a VI's port by blinking its LED. + */ +int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, + unsigned int nblinks) +{ + struct fw_vi_enable_cmd c; + + c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); + c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); + c.blinkdur = htons(nblinks); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_iq_free - free an ingress queue and its FLs + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the queues + * @vf: the VF owning the queues + * @iqtype: the ingress queue type + * @iqid: ingress queue id + * @fl0id: FL0 queue id or 0xffff if no attached FL0 + * @fl1id: FL1 queue id or 0xffff if no attached FL1 + * + * Frees an ingress queue and its associated FLs, if any. + */ +int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int iqtype, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id) +{ + struct fw_iq_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) | + FW_IQ_CMD_VFN(vf)); + c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c)); + c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype)); + c.iqid = htons(iqid); + c.fl0id = htons(fl0id); + c.fl1id = htons(fl1id); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_eth_eq_free - free an Ethernet egress queue + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the queue + * @vf: the VF owning the queue + * @eqid: egress queue id + * + * Frees an Ethernet egress queue. + */ +int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid) +{ + struct fw_eq_eth_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) | + FW_EQ_ETH_CMD_VFN(vf)); + c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); + c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_ctrl_eq_free - free a control egress queue + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the queue + * @vf: the VF owning the queue + * @eqid: egress queue id + * + * Frees a control egress queue. + */ +int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid) +{ + struct fw_eq_ctrl_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) | + FW_EQ_CTRL_CMD_VFN(vf)); + c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); + c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_ofld_eq_free - free an offload egress queue + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the queue + * @vf: the VF owning the queue + * @eqid: egress queue id + * + * Frees a control egress queue. + */ +int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid) +{ + struct fw_eq_ofld_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) | + FW_EQ_OFLD_CMD_VFN(vf)); + c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); + c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_handle_fw_rpl - process a FW reply message + * @adap: the adapter + * @rpl: start of the FW message + * + * Processes a FW message, such as link state change messages. + */ +int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) +{ + u8 opcode = *(const u8 *)rpl; + + if (opcode == FW_PORT_CMD) { /* link/module state change message */ + int speed = 0, fc = 0; + const struct fw_port_cmd *p = (void *)rpl; + int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid)); + int port = adap->chan_map[chan]; + struct port_info *pi = adap2pinfo(adap, port); + struct link_config *lc = &pi->link_cfg; + u32 stat = ntohl(p->u.info.lstatus_to_modtype); + int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0; + u32 mod = FW_PORT_CMD_MODTYPE_GET(stat); + + if (stat & FW_PORT_CMD_RXPAUSE) + fc |= PAUSE_RX; + if (stat & FW_PORT_CMD_TXPAUSE) + fc |= PAUSE_TX; + if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) + speed = SPEED_100; + else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) + speed = SPEED_1000; + else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) + speed = SPEED_10000; + + if (link_ok != lc->link_ok || speed != lc->speed || + fc != lc->fc) { /* something changed */ + lc->link_ok = link_ok; + lc->speed = speed; + lc->fc = fc; + t4_os_link_changed(adap, port, link_ok); + } + if (mod != pi->mod_type) { + pi->mod_type = mod; + t4_os_portmod_changed(adap, port); + } + } + return 0; +} + +static void __devinit get_pci_mode(struct adapter *adapter, + struct pci_params *p) +{ + u16 val; + u32 pcie_cap = pci_pcie_cap(adapter->pdev); + + if (pcie_cap) { + pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA, + &val); + p->speed = val & PCI_EXP_LNKSTA_CLS; + p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; + } +} + +/** + * init_link_config - initialize a link's SW state + * @lc: structure holding the link state + * @caps: link capabilities + * + * Initializes the SW state maintained for each link, including the link's + * capabilities and default speed/flow-control/autonegotiation settings. + */ +static void __devinit init_link_config(struct link_config *lc, + unsigned int caps) +{ + lc->supported = caps; + lc->requested_speed = 0; + lc->speed = 0; + lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; + if (lc->supported & FW_PORT_CAP_ANEG) { + lc->advertising = lc->supported & ADVERT_MASK; + lc->autoneg = AUTONEG_ENABLE; + lc->requested_fc |= PAUSE_AUTONEG; + } else { + lc->advertising = 0; + lc->autoneg = AUTONEG_DISABLE; + } +} + +int t4_wait_dev_ready(struct adapter *adap) +{ + if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff) + return 0; + msleep(500); + return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO; +} + +static int __devinit get_flash_params(struct adapter *adap) +{ + int ret; + u32 info; + + ret = sf1_write(adap, 1, 1, 0, SF_RD_ID); + if (!ret) + ret = sf1_read(adap, 3, 0, 1, &info); + t4_write_reg(adap, SF_OP, 0); /* unlock SF */ + if (ret) + return ret; + + if ((info & 0xff) != 0x20) /* not a Numonix flash */ + return -EINVAL; + info >>= 16; /* log2 of size */ + if (info >= 0x14 && info < 0x18) + adap->params.sf_nsec = 1 << (info - 16); + else if (info == 0x18) + adap->params.sf_nsec = 64; + else + return -EINVAL; + adap->params.sf_size = 1 << info; + adap->params.sf_fw_start = + t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK; + return 0; +} + +/** + * t4_prep_adapter - prepare SW and HW for operation + * @adapter: the adapter + * @reset: if true perform a HW reset + * + * Initialize adapter SW state for the various HW modules, set initial + * values for some adapter tunables, take PHYs out of reset, and + * initialize the MDIO interface. + */ +int __devinit t4_prep_adapter(struct adapter *adapter) +{ + int ret; + + ret = t4_wait_dev_ready(adapter); + if (ret < 0) + return ret; + + get_pci_mode(adapter, &adapter->params.pci); + adapter->params.rev = t4_read_reg(adapter, PL_REV); + + ret = get_flash_params(adapter); + if (ret < 0) { + dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret); + return ret; + } + + ret = get_vpd_params(adapter, &adapter->params.vpd); + if (ret < 0) + return ret; + + init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); + + /* + * Default port for debugging in case we can't reach FW. + */ + adapter->params.nports = 1; + adapter->params.portvec = 1; + return 0; +} + +int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf) +{ + u8 addr[6]; + int ret, i, j = 0; + struct fw_port_cmd c; + struct fw_rss_vi_config_cmd rvc; + + memset(&c, 0, sizeof(c)); + memset(&rvc, 0, sizeof(rvc)); + + for_each_port(adap, i) { + unsigned int rss_size; + struct port_info *p = adap2pinfo(adap, i); + + while ((adap->params.portvec & (1 << j)) == 0) + j++; + + c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | + FW_CMD_REQUEST | FW_CMD_READ | + FW_PORT_CMD_PORTID(j)); + c.action_to_len16 = htonl( + FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | + FW_LEN16(c)); + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret) + return ret; + + ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); + if (ret < 0) + return ret; + + p->viid = ret; + p->tx_chan = j; + p->lport = j; + p->rss_size = rss_size; + memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN); + memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN); + adap->port[i]->dev_id = j; + + ret = ntohl(c.u.info.lstatus_to_modtype); + p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ? + FW_PORT_CMD_MDIOADDR_GET(ret) : -1; + p->port_type = FW_PORT_CMD_PTYPE_GET(ret); + p->mod_type = FW_PORT_MOD_TYPE_NA; + + rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | + FW_CMD_REQUEST | FW_CMD_READ | + FW_RSS_VI_CONFIG_CMD_VIID(p->viid)); + rvc.retval_len16 = htonl(FW_LEN16(rvc)); + ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc); + if (ret) + return ret; + p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen); + + init_link_config(&p->link_cfg, ntohs(c.u.info.pcap)); + j++; + } + return 0; +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h new file mode 100644 index 0000000..c26b455 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h @@ -0,0 +1,140 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __T4_HW_H +#define __T4_HW_H + +#include + +enum { + NCHAN = 4, /* # of HW channels */ + MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */ + EEPROMSIZE = 17408, /* Serial EEPROM physical size */ + EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */ + EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */ + RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */ + TCB_SIZE = 128, /* TCB size */ + NMTUS = 16, /* size of MTU table */ + NCCTRL_WIN = 32, /* # of congestion control windows */ + NEXACT_MAC = 336, /* # of exact MAC address filters */ + L2T_SIZE = 4096, /* # of L2T entries */ + MBOX_LEN = 64, /* mailbox size in bytes */ + TRACE_LEN = 112, /* length of trace data and mask */ + FILTER_OPT_LEN = 36, /* filter tuple width for optional components */ + NWOL_PAT = 8, /* # of WoL patterns */ + WOL_PAT_LEN = 128, /* length of WoL patterns */ +}; + +enum { + SF_PAGE_SIZE = 256, /* serial flash page size */ +}; + +enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */ + +enum { MBOX_OWNER_NONE, MBOX_OWNER_FW, MBOX_OWNER_DRV }; /* mailbox owners */ + +enum { + SGE_MAX_WR_LEN = 512, /* max WR size in bytes */ + SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */ + SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */ + + SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */ + SGE_TIMER_UPD_CIDX = 7, /* update cidx only */ + + SGE_EQ_IDXSIZE = 64, /* egress queue pidx/cidx unit size */ + + SGE_INTRDST_PCI = 0, /* interrupt destination is PCI-E */ + SGE_INTRDST_IQ = 1, /* destination is an ingress queue */ + + SGE_UPDATEDEL_NONE = 0, /* ingress queue pidx update delivery */ + SGE_UPDATEDEL_INTR = 1, /* interrupt */ + SGE_UPDATEDEL_STPG = 2, /* status page */ + SGE_UPDATEDEL_BOTH = 3, /* interrupt and status page */ + + SGE_HOSTFCMODE_NONE = 0, /* egress queue cidx updates */ + SGE_HOSTFCMODE_IQ = 1, /* sent to ingress queue */ + SGE_HOSTFCMODE_STPG = 2, /* sent to status page */ + SGE_HOSTFCMODE_BOTH = 3, /* ingress queue and status page */ + + SGE_FETCHBURSTMIN_16B = 0,/* egress queue descriptor fetch minimum */ + SGE_FETCHBURSTMIN_32B = 1, + SGE_FETCHBURSTMIN_64B = 2, + SGE_FETCHBURSTMIN_128B = 3, + + SGE_FETCHBURSTMAX_64B = 0,/* egress queue descriptor fetch maximum */ + SGE_FETCHBURSTMAX_128B = 1, + SGE_FETCHBURSTMAX_256B = 2, + SGE_FETCHBURSTMAX_512B = 3, + + SGE_CIDXFLUSHTHRESH_1 = 0,/* egress queue cidx flush threshold */ + SGE_CIDXFLUSHTHRESH_2 = 1, + SGE_CIDXFLUSHTHRESH_4 = 2, + SGE_CIDXFLUSHTHRESH_8 = 3, + SGE_CIDXFLUSHTHRESH_16 = 4, + SGE_CIDXFLUSHTHRESH_32 = 5, + SGE_CIDXFLUSHTHRESH_64 = 6, + SGE_CIDXFLUSHTHRESH_128 = 7, + + SGE_INGPADBOUNDARY_SHIFT = 5,/* ingress queue pad boundary */ +}; + +struct sge_qstat { /* data written to SGE queue status entries */ + __be32 qid; + __be16 cidx; + __be16 pidx; +}; + +/* + * Structure for last 128 bits of response descriptors + */ +struct rsp_ctrl { + __be32 hdrbuflen_pidx; + __be32 pldbuflen_qid; + union { + u8 type_gen; + __be64 last_flit; + }; +}; + +#define RSPD_NEWBUF 0x80000000U +#define RSPD_LEN(x) (((x) >> 0) & 0x7fffffffU) +#define RSPD_QID(x) RSPD_LEN(x) + +#define RSPD_GEN(x) ((x) >> 7) +#define RSPD_TYPE(x) (((x) >> 4) & 3) + +#define QINTR_CNT_EN 0x1 +#define QINTR_TIMER_IDX(x) ((x) << 1) +#define QINTR_TIMER_IDX_GET(x) (((x) >> 1) & 0x7) +#endif /* __T4_HW_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h new file mode 100644 index 0000000..eb71b82 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -0,0 +1,678 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __T4_MSG_H +#define __T4_MSG_H + +#include + +enum { + CPL_PASS_OPEN_REQ = 0x1, + CPL_PASS_ACCEPT_RPL = 0x2, + CPL_ACT_OPEN_REQ = 0x3, + CPL_SET_TCB_FIELD = 0x5, + CPL_GET_TCB = 0x6, + CPL_CLOSE_CON_REQ = 0x8, + CPL_CLOSE_LISTSRV_REQ = 0x9, + CPL_ABORT_REQ = 0xA, + CPL_ABORT_RPL = 0xB, + CPL_RX_DATA_ACK = 0xD, + CPL_TX_PKT = 0xE, + CPL_L2T_WRITE_REQ = 0x12, + CPL_TID_RELEASE = 0x1A, + + CPL_CLOSE_LISTSRV_RPL = 0x20, + CPL_L2T_WRITE_RPL = 0x23, + CPL_PASS_OPEN_RPL = 0x24, + CPL_ACT_OPEN_RPL = 0x25, + CPL_PEER_CLOSE = 0x26, + CPL_ABORT_REQ_RSS = 0x2B, + CPL_ABORT_RPL_RSS = 0x2D, + + CPL_CLOSE_CON_RPL = 0x32, + CPL_ISCSI_HDR = 0x33, + CPL_RDMA_CQE = 0x35, + CPL_RDMA_CQE_READ_RSP = 0x36, + CPL_RDMA_CQE_ERR = 0x37, + CPL_RX_DATA = 0x39, + CPL_SET_TCB_RPL = 0x3A, + CPL_RX_PKT = 0x3B, + CPL_RX_DDP_COMPLETE = 0x3F, + + CPL_ACT_ESTABLISH = 0x40, + CPL_PASS_ESTABLISH = 0x41, + CPL_RX_DATA_DDP = 0x42, + CPL_PASS_ACCEPT_REQ = 0x44, + + CPL_RDMA_READ_REQ = 0x60, + + CPL_PASS_OPEN_REQ6 = 0x81, + CPL_ACT_OPEN_REQ6 = 0x83, + + CPL_RDMA_TERMINATE = 0xA2, + CPL_RDMA_WRITE = 0xA4, + CPL_SGE_EGR_UPDATE = 0xA5, + + CPL_TRACE_PKT = 0xB0, + + CPL_FW4_MSG = 0xC0, + CPL_FW4_PLD = 0xC1, + CPL_FW4_ACK = 0xC3, + + CPL_FW6_MSG = 0xE0, + CPL_FW6_PLD = 0xE1, + CPL_TX_PKT_LSO = 0xED, + CPL_TX_PKT_XT = 0xEE, + + NUM_CPL_CMDS +}; + +enum CPL_error { + CPL_ERR_NONE = 0, + CPL_ERR_TCAM_FULL = 3, + CPL_ERR_BAD_LENGTH = 15, + CPL_ERR_BAD_ROUTE = 18, + CPL_ERR_CONN_RESET = 20, + CPL_ERR_CONN_EXIST_SYNRECV = 21, + CPL_ERR_CONN_EXIST = 22, + CPL_ERR_ARP_MISS = 23, + CPL_ERR_BAD_SYN = 24, + CPL_ERR_CONN_TIMEDOUT = 30, + CPL_ERR_XMIT_TIMEDOUT = 31, + CPL_ERR_PERSIST_TIMEDOUT = 32, + CPL_ERR_FINWAIT2_TIMEDOUT = 33, + CPL_ERR_KEEPALIVE_TIMEDOUT = 34, + CPL_ERR_RTX_NEG_ADVICE = 35, + CPL_ERR_PERSIST_NEG_ADVICE = 36, + CPL_ERR_ABORT_FAILED = 42, + CPL_ERR_IWARP_FLM = 50, +}; + +enum { + ULP_MODE_NONE = 0, + ULP_MODE_ISCSI = 2, + ULP_MODE_RDMA = 4, + ULP_MODE_TCPDDP = 5, + ULP_MODE_FCOE = 6, +}; + +enum { + ULP_CRC_HEADER = 1 << 0, + ULP_CRC_DATA = 1 << 1 +}; + +enum { + CPL_ABORT_SEND_RST = 0, + CPL_ABORT_NO_RST, +}; + +enum { /* TX_PKT_XT checksum types */ + TX_CSUM_TCP = 0, + TX_CSUM_UDP = 1, + TX_CSUM_CRC16 = 4, + TX_CSUM_CRC32 = 5, + TX_CSUM_CRC32C = 6, + TX_CSUM_FCOE = 7, + TX_CSUM_TCPIP = 8, + TX_CSUM_UDPIP = 9, + TX_CSUM_TCPIP6 = 10, + TX_CSUM_UDPIP6 = 11, + TX_CSUM_IP = 12, +}; + +union opcode_tid { + __be32 opcode_tid; + u8 opcode; +}; + +#define CPL_OPCODE(x) ((x) << 24) +#define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE(opcode) | (tid)) +#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid) +#define GET_TID(cmd) (ntohl(OPCODE_TID(cmd)) & 0xFFFFFF) + +/* partitioning of TID fields that also carry a queue id */ +#define GET_TID_TID(x) ((x) & 0x3fff) +#define GET_TID_QID(x) (((x) >> 14) & 0x3ff) +#define TID_QID(x) ((x) << 14) + +struct rss_header { + u8 opcode; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 channel:2; + u8 filter_hit:1; + u8 filter_tid:1; + u8 hash_type:2; + u8 ipv6:1; + u8 send2fw:1; +#else + u8 send2fw:1; + u8 ipv6:1; + u8 hash_type:2; + u8 filter_tid:1; + u8 filter_hit:1; + u8 channel:2; +#endif + __be16 qid; + __be32 hash_val; +}; + +struct work_request_hdr { + __be32 wr_hi; + __be32 wr_mid; + __be64 wr_lo; +}; + +#define WR_HDR struct work_request_hdr wr + +struct cpl_pass_open_req { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be32 local_ip; + __be32 peer_ip; + __be64 opt0; +#define TX_CHAN(x) ((x) << 2) +#define DELACK(x) ((x) << 5) +#define ULP_MODE(x) ((x) << 8) +#define RCV_BUFSIZ(x) ((x) << 12) +#define DSCP(x) ((x) << 22) +#define SMAC_SEL(x) ((u64)(x) << 28) +#define L2T_IDX(x) ((u64)(x) << 36) +#define NAGLE(x) ((u64)(x) << 49) +#define WND_SCALE(x) ((u64)(x) << 50) +#define KEEP_ALIVE(x) ((u64)(x) << 54) +#define MSS_IDX(x) ((u64)(x) << 60) + __be64 opt1; +#define SYN_RSS_ENABLE (1 << 0) +#define SYN_RSS_QUEUE(x) ((x) << 2) +#define CONN_POLICY_ASK (1 << 22) +}; + +struct cpl_pass_open_req6 { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be64 local_ip_hi; + __be64 local_ip_lo; + __be64 peer_ip_hi; + __be64 peer_ip_lo; + __be64 opt0; + __be64 opt1; +}; + +struct cpl_pass_open_rpl { + union opcode_tid ot; + u8 rsvd[3]; + u8 status; +}; + +struct cpl_pass_accept_rpl { + WR_HDR; + union opcode_tid ot; + __be32 opt2; +#define RSS_QUEUE(x) ((x) << 0) +#define RSS_QUEUE_VALID (1 << 10) +#define RX_COALESCE_VALID(x) ((x) << 11) +#define RX_COALESCE(x) ((x) << 12) +#define TX_QUEUE(x) ((x) << 23) +#define RX_CHANNEL(x) ((x) << 26) +#define WND_SCALE_EN(x) ((x) << 28) +#define TSTAMPS_EN(x) ((x) << 29) +#define SACK_EN(x) ((x) << 30) + __be64 opt0; +}; + +struct cpl_act_open_req { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be32 local_ip; + __be32 peer_ip; + __be64 opt0; + __be32 params; + __be32 opt2; +}; + +struct cpl_act_open_req6 { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be64 local_ip_hi; + __be64 local_ip_lo; + __be64 peer_ip_hi; + __be64 peer_ip_lo; + __be64 opt0; + __be32 params; + __be32 opt2; +}; + +struct cpl_act_open_rpl { + union opcode_tid ot; + __be32 atid_status; +#define GET_AOPEN_STATUS(x) ((x) & 0xff) +#define GET_AOPEN_ATID(x) (((x) >> 8) & 0xffffff) +}; + +struct cpl_pass_establish { + union opcode_tid ot; + __be32 rsvd; + __be32 tos_stid; +#define GET_POPEN_TID(x) ((x) & 0xffffff) +#define GET_POPEN_TOS(x) (((x) >> 24) & 0xff) + __be16 mac_idx; + __be16 tcp_opt; +#define GET_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1) +#define GET_TCPOPT_SACK(x) (((x) >> 6) & 1) +#define GET_TCPOPT_TSTAMP(x) (((x) >> 7) & 1) +#define GET_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf) +#define GET_TCPOPT_MSS(x) (((x) >> 12) & 0xf) + __be32 snd_isn; + __be32 rcv_isn; +}; + +struct cpl_act_establish { + union opcode_tid ot; + __be32 rsvd; + __be32 tos_atid; + __be16 mac_idx; + __be16 tcp_opt; + __be32 snd_isn; + __be32 rcv_isn; +}; + +struct cpl_get_tcb { + WR_HDR; + union opcode_tid ot; + __be16 reply_ctrl; +#define QUEUENO(x) ((x) << 0) +#define REPLY_CHAN(x) ((x) << 14) +#define NO_REPLY(x) ((x) << 15) + __be16 cookie; +}; + +struct cpl_set_tcb_field { + WR_HDR; + union opcode_tid ot; + __be16 reply_ctrl; + __be16 word_cookie; +#define TCB_WORD(x) ((x) << 0) +#define TCB_COOKIE(x) ((x) << 5) + __be64 mask; + __be64 val; +}; + +struct cpl_set_tcb_rpl { + union opcode_tid ot; + __be16 rsvd; + u8 cookie; + u8 status; + __be64 oldval; +}; + +struct cpl_close_con_req { + WR_HDR; + union opcode_tid ot; + __be32 rsvd; +}; + +struct cpl_close_con_rpl { + union opcode_tid ot; + u8 rsvd[3]; + u8 status; + __be32 snd_nxt; + __be32 rcv_nxt; +}; + +struct cpl_close_listsvr_req { + WR_HDR; + union opcode_tid ot; + __be16 reply_ctrl; +#define LISTSVR_IPV6 (1 << 14) + __be16 rsvd; +}; + +struct cpl_close_listsvr_rpl { + union opcode_tid ot; + u8 rsvd[3]; + u8 status; +}; + +struct cpl_abort_req_rss { + union opcode_tid ot; + u8 rsvd[3]; + u8 status; +}; + +struct cpl_abort_req { + WR_HDR; + union opcode_tid ot; + __be32 rsvd0; + u8 rsvd1; + u8 cmd; + u8 rsvd2[6]; +}; + +struct cpl_abort_rpl_rss { + union opcode_tid ot; + u8 rsvd[3]; + u8 status; +}; + +struct cpl_abort_rpl { + WR_HDR; + union opcode_tid ot; + __be32 rsvd0; + u8 rsvd1; + u8 cmd; + u8 rsvd2[6]; +}; + +struct cpl_peer_close { + union opcode_tid ot; + __be32 rcv_nxt; +}; + +struct cpl_tid_release { + WR_HDR; + union opcode_tid ot; + __be32 rsvd; +}; + +struct cpl_tx_pkt_core { + __be32 ctrl0; +#define TXPKT_VF(x) ((x) << 0) +#define TXPKT_PF(x) ((x) << 8) +#define TXPKT_VF_VLD (1 << 11) +#define TXPKT_OVLAN_IDX(x) ((x) << 12) +#define TXPKT_INTF(x) ((x) << 16) +#define TXPKT_INS_OVLAN (1 << 21) +#define TXPKT_OPCODE(x) ((x) << 24) + __be16 pack; + __be16 len; + __be64 ctrl1; +#define TXPKT_CSUM_END(x) ((x) << 12) +#define TXPKT_CSUM_START(x) ((x) << 20) +#define TXPKT_IPHDR_LEN(x) ((u64)(x) << 20) +#define TXPKT_CSUM_LOC(x) ((u64)(x) << 30) +#define TXPKT_ETHHDR_LEN(x) ((u64)(x) << 34) +#define TXPKT_CSUM_TYPE(x) ((u64)(x) << 40) +#define TXPKT_VLAN(x) ((u64)(x) << 44) +#define TXPKT_VLAN_VLD (1ULL << 60) +#define TXPKT_IPCSUM_DIS (1ULL << 62) +#define TXPKT_L4CSUM_DIS (1ULL << 63) +}; + +struct cpl_tx_pkt { + WR_HDR; + struct cpl_tx_pkt_core c; +}; + +#define cpl_tx_pkt_xt cpl_tx_pkt + +struct cpl_tx_pkt_lso_core { + __be32 lso_ctrl; +#define LSO_TCPHDR_LEN(x) ((x) << 0) +#define LSO_IPHDR_LEN(x) ((x) << 4) +#define LSO_ETHHDR_LEN(x) ((x) << 16) +#define LSO_IPV6(x) ((x) << 20) +#define LSO_LAST_SLICE (1 << 22) +#define LSO_FIRST_SLICE (1 << 23) +#define LSO_OPCODE(x) ((x) << 24) + __be16 ipid_ofst; + __be16 mss; + __be32 seqno_offset; + __be32 len; + /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */ +}; + +struct cpl_tx_pkt_lso { + WR_HDR; + struct cpl_tx_pkt_lso_core c; + /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */ +}; + +struct cpl_iscsi_hdr { + union opcode_tid ot; + __be16 pdu_len_ddp; +#define ISCSI_PDU_LEN(x) ((x) & 0x7FFF) +#define ISCSI_DDP (1 << 15) + __be16 len; + __be32 seq; + __be16 urg; + u8 rsvd; + u8 status; +}; + +struct cpl_rx_data { + union opcode_tid ot; + __be16 rsvd; + __be16 len; + __be32 seq; + __be16 urg; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 dack_mode:2; + u8 psh:1; + u8 heartbeat:1; + u8 ddp_off:1; + u8 :3; +#else + u8 :3; + u8 ddp_off:1; + u8 heartbeat:1; + u8 psh:1; + u8 dack_mode:2; +#endif + u8 status; +}; + +struct cpl_rx_data_ack { + WR_HDR; + union opcode_tid ot; + __be32 credit_dack; +#define RX_CREDITS(x) ((x) << 0) +#define RX_FORCE_ACK(x) ((x) << 28) +}; + +struct cpl_rx_pkt { + struct rss_header rsshdr; + u8 opcode; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 iff:4; + u8 csum_calc:1; + u8 ipmi_pkt:1; + u8 vlan_ex:1; + u8 ip_frag:1; +#else + u8 ip_frag:1; + u8 vlan_ex:1; + u8 ipmi_pkt:1; + u8 csum_calc:1; + u8 iff:4; +#endif + __be16 csum; + __be16 vlan; + __be16 len; + __be32 l2info; +#define RXF_UDP (1 << 22) +#define RXF_TCP (1 << 23) +#define RXF_IP (1 << 24) +#define RXF_IP6 (1 << 25) + __be16 hdr_len; + __be16 err_vec; +}; + +struct cpl_trace_pkt { + u8 opcode; + u8 intf; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 runt:4; + u8 filter_hit:4; + u8 :6; + u8 err:1; + u8 trunc:1; +#else + u8 filter_hit:4; + u8 runt:4; + u8 trunc:1; + u8 err:1; + u8 :6; +#endif + __be16 rsvd; + __be16 len; + __be64 tstamp; +}; + +struct cpl_l2t_write_req { + WR_HDR; + union opcode_tid ot; + __be16 params; +#define L2T_W_INFO(x) ((x) << 2) +#define L2T_W_PORT(x) ((x) << 8) +#define L2T_W_NOREPLY(x) ((x) << 15) + __be16 l2t_idx; + __be16 vlan; + u8 dst_mac[6]; +}; + +struct cpl_l2t_write_rpl { + union opcode_tid ot; + u8 status; + u8 rsvd[3]; +}; + +struct cpl_rdma_terminate { + union opcode_tid ot; + __be16 rsvd; + __be16 len; +}; + +struct cpl_sge_egr_update { + __be32 opcode_qid; +#define EGR_QID(x) ((x) & 0x1FFFF) + __be16 cidx; + __be16 pidx; +}; + +struct cpl_fw4_pld { + u8 opcode; + u8 rsvd0[3]; + u8 type; + u8 rsvd1; + __be16 len; + __be64 data; + __be64 rsvd2; +}; + +struct cpl_fw6_pld { + u8 opcode; + u8 rsvd[5]; + __be16 len; + __be64 data[4]; +}; + +struct cpl_fw4_msg { + u8 opcode; + u8 type; + __be16 rsvd0; + __be32 rsvd1; + __be64 data[2]; +}; + +struct cpl_fw4_ack { + union opcode_tid ot; + u8 credits; + u8 rsvd0[2]; + u8 seq_vld; + __be32 snd_nxt; + __be32 snd_una; + __be64 rsvd1; +}; + +struct cpl_fw6_msg { + u8 opcode; + u8 type; + __be16 rsvd0; + __be32 rsvd1; + __be64 data[4]; +}; + +/* cpl_fw6_msg.type values */ +enum { + FW6_TYPE_CMD_RPL = 0, +}; + +enum { + ULP_TX_MEM_READ = 2, + ULP_TX_MEM_WRITE = 3, + ULP_TX_PKT = 4 +}; + +enum { + ULP_TX_SC_NOOP = 0x80, + ULP_TX_SC_IMM = 0x81, + ULP_TX_SC_DSGL = 0x82, + ULP_TX_SC_ISGL = 0x83 +}; + +struct ulptx_sge_pair { + __be32 len[2]; + __be64 addr[2]; +}; + +struct ulptx_sgl { + __be32 cmd_nsge; +#define ULPTX_CMD(x) ((x) << 24) +#define ULPTX_NSGE(x) ((x) << 0) + __be32 len0; + __be64 addr0; + struct ulptx_sge_pair sge[0]; +}; + +struct ulp_mem_io { + WR_HDR; + __be32 cmd; +#define ULP_MEMIO_ORDER(x) ((x) << 23) + __be32 len16; /* command length */ + __be32 dlen; /* data length in 32-byte units */ +#define ULP_MEMIO_DATA_LEN(x) ((x) << 0) + __be32 lock_addr; +#define ULP_MEMIO_ADDR(x) ((x) << 0) +#define ULP_MEMIO_LOCK(x) ((x) << 31) +}; + +#endif /* __T4_MSG_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h new file mode 100644 index 0000000..0adc5bc --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -0,0 +1,885 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __T4_REGS_H +#define __T4_REGS_H + +#define MYPF_BASE 0x1b000 +#define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr)) + +#define PF0_BASE 0x1e000 +#define PF0_REG(reg_addr) (PF0_BASE + (reg_addr)) + +#define PF_STRIDE 0x400 +#define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE) +#define PF_REG(idx, reg) (PF_BASE(idx) + (reg)) + +#define MYPORT_BASE 0x1c000 +#define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr)) + +#define PORT0_BASE 0x20000 +#define PORT0_REG(reg_addr) (PORT0_BASE + (reg_addr)) + +#define PORT_STRIDE 0x2000 +#define PORT_BASE(idx) (PORT0_BASE + (idx) * PORT_STRIDE) +#define PORT_REG(idx, reg) (PORT_BASE(idx) + (reg)) + +#define EDC_STRIDE (EDC_1_BASE_ADDR - EDC_0_BASE_ADDR) +#define EDC_REG(reg, idx) (reg + EDC_STRIDE * idx) + +#define PCIE_MEM_ACCESS_REG(reg_addr, idx) ((reg_addr) + (idx) * 8) +#define PCIE_MAILBOX_REG(reg_addr, idx) ((reg_addr) + (idx) * 8) +#define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) +#define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) + +#define SGE_PF_KDOORBELL 0x0 +#define QID_MASK 0xffff8000U +#define QID_SHIFT 15 +#define QID(x) ((x) << QID_SHIFT) +#define DBPRIO 0x00004000U +#define PIDX_MASK 0x00003fffU +#define PIDX_SHIFT 0 +#define PIDX(x) ((x) << PIDX_SHIFT) + +#define SGE_PF_GTS 0x4 +#define INGRESSQID_MASK 0xffff0000U +#define INGRESSQID_SHIFT 16 +#define INGRESSQID(x) ((x) << INGRESSQID_SHIFT) +#define TIMERREG_MASK 0x0000e000U +#define TIMERREG_SHIFT 13 +#define TIMERREG(x) ((x) << TIMERREG_SHIFT) +#define SEINTARM_MASK 0x00001000U +#define SEINTARM_SHIFT 12 +#define SEINTARM(x) ((x) << SEINTARM_SHIFT) +#define CIDXINC_MASK 0x00000fffU +#define CIDXINC_SHIFT 0 +#define CIDXINC(x) ((x) << CIDXINC_SHIFT) + +#define SGE_CONTROL 0x1008 +#define DCASYSTYPE 0x00080000U +#define RXPKTCPLMODE 0x00040000U +#define EGRSTATUSPAGESIZE 0x00020000U +#define PKTSHIFT_MASK 0x00001c00U +#define PKTSHIFT_SHIFT 10 +#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT) +#define PKTSHIFT_GET(x) (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT) +#define INGPCIEBOUNDARY_MASK 0x00000380U +#define INGPCIEBOUNDARY_SHIFT 7 +#define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT) +#define INGPADBOUNDARY_MASK 0x00000070U +#define INGPADBOUNDARY_SHIFT 4 +#define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT) +#define INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \ + >> INGPADBOUNDARY_SHIFT) +#define EGRPCIEBOUNDARY_MASK 0x0000000eU +#define EGRPCIEBOUNDARY_SHIFT 1 +#define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT) +#define GLOBALENABLE 0x00000001U + +#define SGE_HOST_PAGE_SIZE 0x100c +#define HOSTPAGESIZEPF0_MASK 0x0000000fU +#define HOSTPAGESIZEPF0_SHIFT 0 +#define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_SHIFT) + +#define SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010 +#define QUEUESPERPAGEPF0_MASK 0x0000000fU +#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK) + +#define SGE_INT_CAUSE1 0x1024 +#define SGE_INT_CAUSE2 0x1030 +#define SGE_INT_CAUSE3 0x103c +#define ERR_FLM_DBP 0x80000000U +#define ERR_FLM_IDMA1 0x40000000U +#define ERR_FLM_IDMA0 0x20000000U +#define ERR_FLM_HINT 0x10000000U +#define ERR_PCIE_ERROR3 0x08000000U +#define ERR_PCIE_ERROR2 0x04000000U +#define ERR_PCIE_ERROR1 0x02000000U +#define ERR_PCIE_ERROR0 0x01000000U +#define ERR_TIMER_ABOVE_MAX_QID 0x00800000U +#define ERR_CPL_EXCEED_IQE_SIZE 0x00400000U +#define ERR_INVALID_CIDX_INC 0x00200000U +#define ERR_ITP_TIME_PAUSED 0x00100000U +#define ERR_CPL_OPCODE_0 0x00080000U +#define ERR_DROPPED_DB 0x00040000U +#define ERR_DATA_CPL_ON_HIGH_QID1 0x00020000U +#define ERR_DATA_CPL_ON_HIGH_QID0 0x00010000U +#define ERR_BAD_DB_PIDX3 0x00008000U +#define ERR_BAD_DB_PIDX2 0x00004000U +#define ERR_BAD_DB_PIDX1 0x00002000U +#define ERR_BAD_DB_PIDX0 0x00001000U +#define ERR_ING_PCIE_CHAN 0x00000800U +#define ERR_ING_CTXT_PRIO 0x00000400U +#define ERR_EGR_CTXT_PRIO 0x00000200U +#define DBFIFO_HP_INT 0x00000100U +#define DBFIFO_LP_INT 0x00000080U +#define REG_ADDRESS_ERR 0x00000040U +#define INGRESS_SIZE_ERR 0x00000020U +#define EGRESS_SIZE_ERR 0x00000010U +#define ERR_INV_CTXT3 0x00000008U +#define ERR_INV_CTXT2 0x00000004U +#define ERR_INV_CTXT1 0x00000002U +#define ERR_INV_CTXT0 0x00000001U + +#define SGE_INT_ENABLE3 0x1040 +#define SGE_FL_BUFFER_SIZE0 0x1044 +#define SGE_FL_BUFFER_SIZE1 0x1048 +#define SGE_INGRESS_RX_THRESHOLD 0x10a0 +#define THRESHOLD_0_MASK 0x3f000000U +#define THRESHOLD_0_SHIFT 24 +#define THRESHOLD_0(x) ((x) << THRESHOLD_0_SHIFT) +#define THRESHOLD_0_GET(x) (((x) & THRESHOLD_0_MASK) >> THRESHOLD_0_SHIFT) +#define THRESHOLD_1_MASK 0x003f0000U +#define THRESHOLD_1_SHIFT 16 +#define THRESHOLD_1(x) ((x) << THRESHOLD_1_SHIFT) +#define THRESHOLD_1_GET(x) (((x) & THRESHOLD_1_MASK) >> THRESHOLD_1_SHIFT) +#define THRESHOLD_2_MASK 0x00003f00U +#define THRESHOLD_2_SHIFT 8 +#define THRESHOLD_2(x) ((x) << THRESHOLD_2_SHIFT) +#define THRESHOLD_2_GET(x) (((x) & THRESHOLD_2_MASK) >> THRESHOLD_2_SHIFT) +#define THRESHOLD_3_MASK 0x0000003fU +#define THRESHOLD_3_SHIFT 0 +#define THRESHOLD_3(x) ((x) << THRESHOLD_3_SHIFT) +#define THRESHOLD_3_GET(x) (((x) & THRESHOLD_3_MASK) >> THRESHOLD_3_SHIFT) + +#define SGE_TIMER_VALUE_0_AND_1 0x10b8 +#define TIMERVALUE0_MASK 0xffff0000U +#define TIMERVALUE0_SHIFT 16 +#define TIMERVALUE0(x) ((x) << TIMERVALUE0_SHIFT) +#define TIMERVALUE0_GET(x) (((x) & TIMERVALUE0_MASK) >> TIMERVALUE0_SHIFT) +#define TIMERVALUE1_MASK 0x0000ffffU +#define TIMERVALUE1_SHIFT 0 +#define TIMERVALUE1(x) ((x) << TIMERVALUE1_SHIFT) +#define TIMERVALUE1_GET(x) (((x) & TIMERVALUE1_MASK) >> TIMERVALUE1_SHIFT) + +#define SGE_TIMER_VALUE_2_AND_3 0x10bc +#define SGE_TIMER_VALUE_4_AND_5 0x10c0 +#define SGE_DEBUG_INDEX 0x10cc +#define SGE_DEBUG_DATA_HIGH 0x10d0 +#define SGE_DEBUG_DATA_LOW 0x10d4 +#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4 + +#define PCIE_PF_CLI 0x44 +#define PCIE_INT_CAUSE 0x3004 +#define UNXSPLCPLERR 0x20000000U +#define PCIEPINT 0x10000000U +#define PCIESINT 0x08000000U +#define RPLPERR 0x04000000U +#define RXWRPERR 0x02000000U +#define RXCPLPERR 0x01000000U +#define PIOTAGPERR 0x00800000U +#define MATAGPERR 0x00400000U +#define INTXCLRPERR 0x00200000U +#define FIDPERR 0x00100000U +#define CFGSNPPERR 0x00080000U +#define HRSPPERR 0x00040000U +#define HREQPERR 0x00020000U +#define HCNTPERR 0x00010000U +#define DRSPPERR 0x00008000U +#define DREQPERR 0x00004000U +#define DCNTPERR 0x00002000U +#define CRSPPERR 0x00001000U +#define CREQPERR 0x00000800U +#define CCNTPERR 0x00000400U +#define TARTAGPERR 0x00000200U +#define PIOREQPERR 0x00000100U +#define PIOCPLPERR 0x00000080U +#define MSIXDIPERR 0x00000040U +#define MSIXDATAPERR 0x00000020U +#define MSIXADDRHPERR 0x00000010U +#define MSIXADDRLPERR 0x00000008U +#define MSIDATAPERR 0x00000004U +#define MSIADDRHPERR 0x00000002U +#define MSIADDRLPERR 0x00000001U + +#define PCIE_NONFAT_ERR 0x3010 +#define PCIE_MEM_ACCESS_BASE_WIN 0x3068 +#define PCIEOFST_MASK 0xfffffc00U +#define BIR_MASK 0x00000300U +#define BIR_SHIFT 8 +#define BIR(x) ((x) << BIR_SHIFT) +#define WINDOW_MASK 0x000000ffU +#define WINDOW_SHIFT 0 +#define WINDOW(x) ((x) << WINDOW_SHIFT) +#define PCIE_MEM_ACCESS_OFFSET 0x306c + +#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908 +#define RNPP 0x80000000U +#define RPCP 0x20000000U +#define RCIP 0x08000000U +#define RCCP 0x04000000U +#define RFTP 0x00800000U +#define PTRP 0x00100000U + +#define PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS 0x59a4 +#define TPCP 0x40000000U +#define TNPP 0x20000000U +#define TFTP 0x10000000U +#define TCAP 0x08000000U +#define TCIP 0x04000000U +#define RCAP 0x02000000U +#define PLUP 0x00800000U +#define PLDN 0x00400000U +#define OTDD 0x00200000U +#define GTRP 0x00100000U +#define RDPE 0x00040000U +#define TDCE 0x00020000U +#define TDUE 0x00010000U + +#define MC_INT_CAUSE 0x7518 +#define ECC_UE_INT_CAUSE 0x00000004U +#define ECC_CE_INT_CAUSE 0x00000002U +#define PERR_INT_CAUSE 0x00000001U + +#define MC_ECC_STATUS 0x751c +#define ECC_CECNT_MASK 0xffff0000U +#define ECC_CECNT_SHIFT 16 +#define ECC_CECNT(x) ((x) << ECC_CECNT_SHIFT) +#define ECC_CECNT_GET(x) (((x) & ECC_CECNT_MASK) >> ECC_CECNT_SHIFT) +#define ECC_UECNT_MASK 0x0000ffffU +#define ECC_UECNT_SHIFT 0 +#define ECC_UECNT(x) ((x) << ECC_UECNT_SHIFT) +#define ECC_UECNT_GET(x) (((x) & ECC_UECNT_MASK) >> ECC_UECNT_SHIFT) + +#define MC_BIST_CMD 0x7600 +#define START_BIST 0x80000000U +#define BIST_CMD_GAP_MASK 0x0000ff00U +#define BIST_CMD_GAP_SHIFT 8 +#define BIST_CMD_GAP(x) ((x) << BIST_CMD_GAP_SHIFT) +#define BIST_OPCODE_MASK 0x00000003U +#define BIST_OPCODE_SHIFT 0 +#define BIST_OPCODE(x) ((x) << BIST_OPCODE_SHIFT) + +#define MC_BIST_CMD_ADDR 0x7604 +#define MC_BIST_CMD_LEN 0x7608 +#define MC_BIST_DATA_PATTERN 0x760c +#define BIST_DATA_TYPE_MASK 0x0000000fU +#define BIST_DATA_TYPE_SHIFT 0 +#define BIST_DATA_TYPE(x) ((x) << BIST_DATA_TYPE_SHIFT) + +#define MC_BIST_STATUS_RDATA 0x7688 + +#define MA_EXT_MEMORY_BAR 0x77c8 +#define EXT_MEM_SIZE_MASK 0x00000fffU +#define EXT_MEM_SIZE_SHIFT 0 +#define EXT_MEM_SIZE_GET(x) (((x) & EXT_MEM_SIZE_MASK) >> EXT_MEM_SIZE_SHIFT) + +#define MA_TARGET_MEM_ENABLE 0x77d8 +#define EXT_MEM_ENABLE 0x00000004U +#define EDRAM1_ENABLE 0x00000002U +#define EDRAM0_ENABLE 0x00000001U + +#define MA_INT_CAUSE 0x77e0 +#define MEM_PERR_INT_CAUSE 0x00000002U +#define MEM_WRAP_INT_CAUSE 0x00000001U + +#define MA_INT_WRAP_STATUS 0x77e4 +#define MEM_WRAP_ADDRESS_MASK 0xfffffff0U +#define MEM_WRAP_ADDRESS_SHIFT 4 +#define MEM_WRAP_ADDRESS_GET(x) (((x) & MEM_WRAP_ADDRESS_MASK) >> MEM_WRAP_ADDRESS_SHIFT) +#define MEM_WRAP_CLIENT_NUM_MASK 0x0000000fU +#define MEM_WRAP_CLIENT_NUM_SHIFT 0 +#define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT) + +#define MA_PARITY_ERROR_STATUS 0x77f4 + +#define EDC_0_BASE_ADDR 0x7900 + +#define EDC_BIST_CMD 0x7904 +#define EDC_BIST_CMD_ADDR 0x7908 +#define EDC_BIST_CMD_LEN 0x790c +#define EDC_BIST_DATA_PATTERN 0x7910 +#define EDC_BIST_STATUS_RDATA 0x7928 +#define EDC_INT_CAUSE 0x7978 +#define ECC_UE_PAR 0x00000020U +#define ECC_CE_PAR 0x00000010U +#define PERR_PAR_CAUSE 0x00000008U + +#define EDC_ECC_STATUS 0x797c + +#define EDC_1_BASE_ADDR 0x7980 + +#define CIM_BOOT_CFG 0x7b00 +#define BOOTADDR_MASK 0xffffff00U + +#define CIM_PF_MAILBOX_DATA 0x240 +#define CIM_PF_MAILBOX_CTRL 0x280 +#define MBMSGVALID 0x00000008U +#define MBINTREQ 0x00000004U +#define MBOWNER_MASK 0x00000003U +#define MBOWNER_SHIFT 0 +#define MBOWNER(x) ((x) << MBOWNER_SHIFT) +#define MBOWNER_GET(x) (((x) & MBOWNER_MASK) >> MBOWNER_SHIFT) + +#define CIM_PF_HOST_INT_CAUSE 0x28c +#define MBMSGRDYINT 0x00080000U + +#define CIM_HOST_INT_CAUSE 0x7b2c +#define TIEQOUTPARERRINT 0x00100000U +#define TIEQINPARERRINT 0x00080000U +#define MBHOSTPARERR 0x00040000U +#define MBUPPARERR 0x00020000U +#define IBQPARERR 0x0001f800U +#define IBQTP0PARERR 0x00010000U +#define IBQTP1PARERR 0x00008000U +#define IBQULPPARERR 0x00004000U +#define IBQSGELOPARERR 0x00002000U +#define IBQSGEHIPARERR 0x00001000U +#define IBQNCSIPARERR 0x00000800U +#define OBQPARERR 0x000007e0U +#define OBQULP0PARERR 0x00000400U +#define OBQULP1PARERR 0x00000200U +#define OBQULP2PARERR 0x00000100U +#define OBQULP3PARERR 0x00000080U +#define OBQSGEPARERR 0x00000040U +#define OBQNCSIPARERR 0x00000020U +#define PREFDROPINT 0x00000002U +#define UPACCNONZERO 0x00000001U + +#define CIM_HOST_UPACC_INT_CAUSE 0x7b34 +#define EEPROMWRINT 0x40000000U +#define TIMEOUTMAINT 0x20000000U +#define TIMEOUTINT 0x10000000U +#define RSPOVRLOOKUPINT 0x08000000U +#define REQOVRLOOKUPINT 0x04000000U +#define BLKWRPLINT 0x02000000U +#define BLKRDPLINT 0x01000000U +#define SGLWRPLINT 0x00800000U +#define SGLRDPLINT 0x00400000U +#define BLKWRCTLINT 0x00200000U +#define BLKRDCTLINT 0x00100000U +#define SGLWRCTLINT 0x00080000U +#define SGLRDCTLINT 0x00040000U +#define BLKWREEPROMINT 0x00020000U +#define BLKRDEEPROMINT 0x00010000U +#define SGLWREEPROMINT 0x00008000U +#define SGLRDEEPROMINT 0x00004000U +#define BLKWRFLASHINT 0x00002000U +#define BLKRDFLASHINT 0x00001000U +#define SGLWRFLASHINT 0x00000800U +#define SGLRDFLASHINT 0x00000400U +#define BLKWRBOOTINT 0x00000200U +#define BLKRDBOOTINT 0x00000100U +#define SGLWRBOOTINT 0x00000080U +#define SGLRDBOOTINT 0x00000040U +#define ILLWRBEINT 0x00000020U +#define ILLRDBEINT 0x00000010U +#define ILLRDINT 0x00000008U +#define ILLWRINT 0x00000004U +#define ILLTRANSINT 0x00000002U +#define RSVDSPACEINT 0x00000001U + +#define TP_OUT_CONFIG 0x7d04 +#define VLANEXTENABLE_MASK 0x0000f000U +#define VLANEXTENABLE_SHIFT 12 + +#define TP_PARA_REG2 0x7d68 +#define MAXRXDATA_MASK 0xffff0000U +#define MAXRXDATA_SHIFT 16 +#define MAXRXDATA_GET(x) (((x) & MAXRXDATA_MASK) >> MAXRXDATA_SHIFT) + +#define TP_TIMER_RESOLUTION 0x7d90 +#define TIMERRESOLUTION_MASK 0x00ff0000U +#define TIMERRESOLUTION_SHIFT 16 +#define TIMERRESOLUTION_GET(x) (((x) & TIMERRESOLUTION_MASK) >> TIMERRESOLUTION_SHIFT) + +#define TP_SHIFT_CNT 0x7dc0 + +#define TP_CCTRL_TABLE 0x7ddc +#define TP_MTU_TABLE 0x7de4 +#define MTUINDEX_MASK 0xff000000U +#define MTUINDEX_SHIFT 24 +#define MTUINDEX(x) ((x) << MTUINDEX_SHIFT) +#define MTUWIDTH_MASK 0x000f0000U +#define MTUWIDTH_SHIFT 16 +#define MTUWIDTH(x) ((x) << MTUWIDTH_SHIFT) +#define MTUWIDTH_GET(x) (((x) & MTUWIDTH_MASK) >> MTUWIDTH_SHIFT) +#define MTUVALUE_MASK 0x00003fffU +#define MTUVALUE_SHIFT 0 +#define MTUVALUE(x) ((x) << MTUVALUE_SHIFT) +#define MTUVALUE_GET(x) (((x) & MTUVALUE_MASK) >> MTUVALUE_SHIFT) + +#define TP_RSS_LKP_TABLE 0x7dec +#define LKPTBLROWVLD 0x80000000U +#define LKPTBLQUEUE1_MASK 0x000ffc00U +#define LKPTBLQUEUE1_SHIFT 10 +#define LKPTBLQUEUE1(x) ((x) << LKPTBLQUEUE1_SHIFT) +#define LKPTBLQUEUE1_GET(x) (((x) & LKPTBLQUEUE1_MASK) >> LKPTBLQUEUE1_SHIFT) +#define LKPTBLQUEUE0_MASK 0x000003ffU +#define LKPTBLQUEUE0_SHIFT 0 +#define LKPTBLQUEUE0(x) ((x) << LKPTBLQUEUE0_SHIFT) +#define LKPTBLQUEUE0_GET(x) (((x) & LKPTBLQUEUE0_MASK) >> LKPTBLQUEUE0_SHIFT) + +#define TP_PIO_ADDR 0x7e40 +#define TP_PIO_DATA 0x7e44 +#define TP_MIB_INDEX 0x7e50 +#define TP_MIB_DATA 0x7e54 +#define TP_INT_CAUSE 0x7e74 +#define FLMTXFLSTEMPTY 0x40000000U + +#define TP_INGRESS_CONFIG 0x141 +#define VNIC 0x00000800U +#define CSUM_HAS_PSEUDO_HDR 0x00000400U +#define RM_OVLAN 0x00000200U +#define LOOKUPEVERYPKT 0x00000100U + +#define TP_MIB_MAC_IN_ERR_0 0x0 +#define TP_MIB_TCP_OUT_RST 0xc +#define TP_MIB_TCP_IN_SEG_HI 0x10 +#define TP_MIB_TCP_IN_SEG_LO 0x11 +#define TP_MIB_TCP_OUT_SEG_HI 0x12 +#define TP_MIB_TCP_OUT_SEG_LO 0x13 +#define TP_MIB_TCP_RXT_SEG_HI 0x14 +#define TP_MIB_TCP_RXT_SEG_LO 0x15 +#define TP_MIB_TNL_CNG_DROP_0 0x18 +#define TP_MIB_TCP_V6IN_ERR_0 0x28 +#define TP_MIB_TCP_V6OUT_RST 0x2c +#define TP_MIB_OFD_ARP_DROP 0x36 +#define TP_MIB_TNL_DROP_0 0x44 +#define TP_MIB_OFD_VLN_DROP_0 0x58 + +#define ULP_TX_INT_CAUSE 0x8dcc +#define PBL_BOUND_ERR_CH3 0x80000000U +#define PBL_BOUND_ERR_CH2 0x40000000U +#define PBL_BOUND_ERR_CH1 0x20000000U +#define PBL_BOUND_ERR_CH0 0x10000000U + +#define PM_RX_INT_CAUSE 0x8fdc +#define ZERO_E_CMD_ERROR 0x00400000U +#define PMRX_FRAMING_ERROR 0x003ffff0U +#define OCSPI_PAR_ERROR 0x00000008U +#define DB_OPTIONS_PAR_ERROR 0x00000004U +#define IESPI_PAR_ERROR 0x00000002U +#define E_PCMD_PAR_ERROR 0x00000001U + +#define PM_TX_INT_CAUSE 0x8ffc +#define PCMD_LEN_OVFL0 0x80000000U +#define PCMD_LEN_OVFL1 0x40000000U +#define PCMD_LEN_OVFL2 0x20000000U +#define ZERO_C_CMD_ERROR 0x10000000U +#define PMTX_FRAMING_ERROR 0x0ffffff0U +#define OESPI_PAR_ERROR 0x00000008U +#define ICSPI_PAR_ERROR 0x00000002U +#define C_PCMD_PAR_ERROR 0x00000001U + +#define MPS_PORT_STAT_TX_PORT_BYTES_L 0x400 +#define MPS_PORT_STAT_TX_PORT_BYTES_H 0x404 +#define MPS_PORT_STAT_TX_PORT_FRAMES_L 0x408 +#define MPS_PORT_STAT_TX_PORT_FRAMES_H 0x40c +#define MPS_PORT_STAT_TX_PORT_BCAST_L 0x410 +#define MPS_PORT_STAT_TX_PORT_BCAST_H 0x414 +#define MPS_PORT_STAT_TX_PORT_MCAST_L 0x418 +#define MPS_PORT_STAT_TX_PORT_MCAST_H 0x41c +#define MPS_PORT_STAT_TX_PORT_UCAST_L 0x420 +#define MPS_PORT_STAT_TX_PORT_UCAST_H 0x424 +#define MPS_PORT_STAT_TX_PORT_ERROR_L 0x428 +#define MPS_PORT_STAT_TX_PORT_ERROR_H 0x42c +#define MPS_PORT_STAT_TX_PORT_64B_L 0x430 +#define MPS_PORT_STAT_TX_PORT_64B_H 0x434 +#define MPS_PORT_STAT_TX_PORT_65B_127B_L 0x438 +#define MPS_PORT_STAT_TX_PORT_65B_127B_H 0x43c +#define MPS_PORT_STAT_TX_PORT_128B_255B_L 0x440 +#define MPS_PORT_STAT_TX_PORT_128B_255B_H 0x444 +#define MPS_PORT_STAT_TX_PORT_256B_511B_L 0x448 +#define MPS_PORT_STAT_TX_PORT_256B_511B_H 0x44c +#define MPS_PORT_STAT_TX_PORT_512B_1023B_L 0x450 +#define MPS_PORT_STAT_TX_PORT_512B_1023B_H 0x454 +#define MPS_PORT_STAT_TX_PORT_1024B_1518B_L 0x458 +#define MPS_PORT_STAT_TX_PORT_1024B_1518B_H 0x45c +#define MPS_PORT_STAT_TX_PORT_1519B_MAX_L 0x460 +#define MPS_PORT_STAT_TX_PORT_1519B_MAX_H 0x464 +#define MPS_PORT_STAT_TX_PORT_DROP_L 0x468 +#define MPS_PORT_STAT_TX_PORT_DROP_H 0x46c +#define MPS_PORT_STAT_TX_PORT_PAUSE_L 0x470 +#define MPS_PORT_STAT_TX_PORT_PAUSE_H 0x474 +#define MPS_PORT_STAT_TX_PORT_PPP0_L 0x478 +#define MPS_PORT_STAT_TX_PORT_PPP0_H 0x47c +#define MPS_PORT_STAT_TX_PORT_PPP1_L 0x480 +#define MPS_PORT_STAT_TX_PORT_PPP1_H 0x484 +#define MPS_PORT_STAT_TX_PORT_PPP2_L 0x488 +#define MPS_PORT_STAT_TX_PORT_PPP2_H 0x48c +#define MPS_PORT_STAT_TX_PORT_PPP3_L 0x490 +#define MPS_PORT_STAT_TX_PORT_PPP3_H 0x494 +#define MPS_PORT_STAT_TX_PORT_PPP4_L 0x498 +#define MPS_PORT_STAT_TX_PORT_PPP4_H 0x49c +#define MPS_PORT_STAT_TX_PORT_PPP5_L 0x4a0 +#define MPS_PORT_STAT_TX_PORT_PPP5_H 0x4a4 +#define MPS_PORT_STAT_TX_PORT_PPP6_L 0x4a8 +#define MPS_PORT_STAT_TX_PORT_PPP6_H 0x4ac +#define MPS_PORT_STAT_TX_PORT_PPP7_L 0x4b0 +#define MPS_PORT_STAT_TX_PORT_PPP7_H 0x4b4 +#define MPS_PORT_STAT_LB_PORT_BYTES_L 0x4c0 +#define MPS_PORT_STAT_LB_PORT_BYTES_H 0x4c4 +#define MPS_PORT_STAT_LB_PORT_FRAMES_L 0x4c8 +#define MPS_PORT_STAT_LB_PORT_FRAMES_H 0x4cc +#define MPS_PORT_STAT_LB_PORT_BCAST_L 0x4d0 +#define MPS_PORT_STAT_LB_PORT_BCAST_H 0x4d4 +#define MPS_PORT_STAT_LB_PORT_MCAST_L 0x4d8 +#define MPS_PORT_STAT_LB_PORT_MCAST_H 0x4dc +#define MPS_PORT_STAT_LB_PORT_UCAST_L 0x4e0 +#define MPS_PORT_STAT_LB_PORT_UCAST_H 0x4e4 +#define MPS_PORT_STAT_LB_PORT_ERROR_L 0x4e8 +#define MPS_PORT_STAT_LB_PORT_ERROR_H 0x4ec +#define MPS_PORT_STAT_LB_PORT_64B_L 0x4f0 +#define MPS_PORT_STAT_LB_PORT_64B_H 0x4f4 +#define MPS_PORT_STAT_LB_PORT_65B_127B_L 0x4f8 +#define MPS_PORT_STAT_LB_PORT_65B_127B_H 0x4fc +#define MPS_PORT_STAT_LB_PORT_128B_255B_L 0x500 +#define MPS_PORT_STAT_LB_PORT_128B_255B_H 0x504 +#define MPS_PORT_STAT_LB_PORT_256B_511B_L 0x508 +#define MPS_PORT_STAT_LB_PORT_256B_511B_H 0x50c +#define MPS_PORT_STAT_LB_PORT_512B_1023B_L 0x510 +#define MPS_PORT_STAT_LB_PORT_512B_1023B_H 0x514 +#define MPS_PORT_STAT_LB_PORT_1024B_1518B_L 0x518 +#define MPS_PORT_STAT_LB_PORT_1024B_1518B_H 0x51c +#define MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520 +#define MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524 +#define MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528 +#define MPS_PORT_STAT_RX_PORT_BYTES_L 0x540 +#define MPS_PORT_STAT_RX_PORT_BYTES_H 0x544 +#define MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548 +#define MPS_PORT_STAT_RX_PORT_FRAMES_H 0x54c +#define MPS_PORT_STAT_RX_PORT_BCAST_L 0x550 +#define MPS_PORT_STAT_RX_PORT_BCAST_H 0x554 +#define MPS_PORT_STAT_RX_PORT_MCAST_L 0x558 +#define MPS_PORT_STAT_RX_PORT_MCAST_H 0x55c +#define MPS_PORT_STAT_RX_PORT_UCAST_L 0x560 +#define MPS_PORT_STAT_RX_PORT_UCAST_H 0x564 +#define MPS_PORT_STAT_RX_PORT_MTU_ERROR_L 0x568 +#define MPS_PORT_STAT_RX_PORT_MTU_ERROR_H 0x56c +#define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L 0x570 +#define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_H 0x574 +#define MPS_PORT_STAT_RX_PORT_CRC_ERROR_L 0x578 +#define MPS_PORT_STAT_RX_PORT_CRC_ERROR_H 0x57c +#define MPS_PORT_STAT_RX_PORT_LEN_ERROR_L 0x580 +#define MPS_PORT_STAT_RX_PORT_LEN_ERROR_H 0x584 +#define MPS_PORT_STAT_RX_PORT_SYM_ERROR_L 0x588 +#define MPS_PORT_STAT_RX_PORT_SYM_ERROR_H 0x58c +#define MPS_PORT_STAT_RX_PORT_64B_L 0x590 +#define MPS_PORT_STAT_RX_PORT_64B_H 0x594 +#define MPS_PORT_STAT_RX_PORT_65B_127B_L 0x598 +#define MPS_PORT_STAT_RX_PORT_65B_127B_H 0x59c +#define MPS_PORT_STAT_RX_PORT_128B_255B_L 0x5a0 +#define MPS_PORT_STAT_RX_PORT_128B_255B_H 0x5a4 +#define MPS_PORT_STAT_RX_PORT_256B_511B_L 0x5a8 +#define MPS_PORT_STAT_RX_PORT_256B_511B_H 0x5ac +#define MPS_PORT_STAT_RX_PORT_512B_1023B_L 0x5b0 +#define MPS_PORT_STAT_RX_PORT_512B_1023B_H 0x5b4 +#define MPS_PORT_STAT_RX_PORT_1024B_1518B_L 0x5b8 +#define MPS_PORT_STAT_RX_PORT_1024B_1518B_H 0x5bc +#define MPS_PORT_STAT_RX_PORT_1519B_MAX_L 0x5c0 +#define MPS_PORT_STAT_RX_PORT_1519B_MAX_H 0x5c4 +#define MPS_PORT_STAT_RX_PORT_PAUSE_L 0x5c8 +#define MPS_PORT_STAT_RX_PORT_PAUSE_H 0x5cc +#define MPS_PORT_STAT_RX_PORT_PPP0_L 0x5d0 +#define MPS_PORT_STAT_RX_PORT_PPP0_H 0x5d4 +#define MPS_PORT_STAT_RX_PORT_PPP1_L 0x5d8 +#define MPS_PORT_STAT_RX_PORT_PPP1_H 0x5dc +#define MPS_PORT_STAT_RX_PORT_PPP2_L 0x5e0 +#define MPS_PORT_STAT_RX_PORT_PPP2_H 0x5e4 +#define MPS_PORT_STAT_RX_PORT_PPP3_L 0x5e8 +#define MPS_PORT_STAT_RX_PORT_PPP3_H 0x5ec +#define MPS_PORT_STAT_RX_PORT_PPP4_L 0x5f0 +#define MPS_PORT_STAT_RX_PORT_PPP4_H 0x5f4 +#define MPS_PORT_STAT_RX_PORT_PPP5_L 0x5f8 +#define MPS_PORT_STAT_RX_PORT_PPP5_H 0x5fc +#define MPS_PORT_STAT_RX_PORT_PPP6_L 0x600 +#define MPS_PORT_STAT_RX_PORT_PPP6_H 0x604 +#define MPS_PORT_STAT_RX_PORT_PPP7_L 0x608 +#define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c +#define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610 +#define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614 +#define MPS_CMN_CTL 0x9000 +#define NUMPORTS_MASK 0x00000003U +#define NUMPORTS_SHIFT 0 +#define NUMPORTS_GET(x) (((x) & NUMPORTS_MASK) >> NUMPORTS_SHIFT) + +#define MPS_INT_CAUSE 0x9008 +#define STATINT 0x00000020U +#define TXINT 0x00000010U +#define RXINT 0x00000008U +#define TRCINT 0x00000004U +#define CLSINT 0x00000002U +#define PLINT 0x00000001U + +#define MPS_TX_INT_CAUSE 0x9408 +#define PORTERR 0x00010000U +#define FRMERR 0x00008000U +#define SECNTERR 0x00004000U +#define BUBBLE 0x00002000U +#define TXDESCFIFO 0x00001e00U +#define TXDATAFIFO 0x000001e0U +#define NCSIFIFO 0x00000010U +#define TPFIFO 0x0000000fU + +#define MPS_STAT_PERR_INT_CAUSE_SRAM 0x9614 +#define MPS_STAT_PERR_INT_CAUSE_TX_FIFO 0x9620 +#define MPS_STAT_PERR_INT_CAUSE_RX_FIFO 0x962c + +#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640 +#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644 +#define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_L 0x9648 +#define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_H 0x964c +#define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_L 0x9650 +#define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_H 0x9654 +#define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_L 0x9658 +#define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_H 0x965c +#define MPS_STAT_RX_BG_0_LB_DROP_FRAME_L 0x9660 +#define MPS_STAT_RX_BG_0_LB_DROP_FRAME_H 0x9664 +#define MPS_STAT_RX_BG_1_LB_DROP_FRAME_L 0x9668 +#define MPS_STAT_RX_BG_1_LB_DROP_FRAME_H 0x966c +#define MPS_STAT_RX_BG_2_LB_DROP_FRAME_L 0x9670 +#define MPS_STAT_RX_BG_2_LB_DROP_FRAME_H 0x9674 +#define MPS_STAT_RX_BG_3_LB_DROP_FRAME_L 0x9678 +#define MPS_STAT_RX_BG_3_LB_DROP_FRAME_H 0x967c +#define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L 0x9680 +#define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_H 0x9684 +#define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_L 0x9688 +#define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_H 0x968c +#define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_L 0x9690 +#define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_H 0x9694 +#define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_L 0x9698 +#define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_H 0x969c +#define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L 0x96a0 +#define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_H 0x96a4 +#define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_L 0x96a8 +#define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_H 0x96ac +#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_L 0x96b0 +#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4 +#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8 +#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc +#define MPS_TRC_CFG 0x9800 +#define TRCFIFOEMPTY 0x00000010U +#define TRCIGNOREDROPINPUT 0x00000008U +#define TRCKEEPDUPLICATES 0x00000004U +#define TRCEN 0x00000002U +#define TRCMULTIFILTER 0x00000001U + +#define MPS_TRC_RSS_CONTROL 0x9808 +#define RSSCONTROL_MASK 0x00ff0000U +#define RSSCONTROL_SHIFT 16 +#define RSSCONTROL(x) ((x) << RSSCONTROL_SHIFT) +#define QUEUENUMBER_MASK 0x0000ffffU +#define QUEUENUMBER_SHIFT 0 +#define QUEUENUMBER(x) ((x) << QUEUENUMBER_SHIFT) + +#define MPS_TRC_FILTER_MATCH_CTL_A 0x9810 +#define TFINVERTMATCH 0x01000000U +#define TFPKTTOOLARGE 0x00800000U +#define TFEN 0x00400000U +#define TFPORT_MASK 0x003c0000U +#define TFPORT_SHIFT 18 +#define TFPORT(x) ((x) << TFPORT_SHIFT) +#define TFPORT_GET(x) (((x) & TFPORT_MASK) >> TFPORT_SHIFT) +#define TFDROP 0x00020000U +#define TFSOPEOPERR 0x00010000U +#define TFLENGTH_MASK 0x00001f00U +#define TFLENGTH_SHIFT 8 +#define TFLENGTH(x) ((x) << TFLENGTH_SHIFT) +#define TFLENGTH_GET(x) (((x) & TFLENGTH_MASK) >> TFLENGTH_SHIFT) +#define TFOFFSET_MASK 0x0000001fU +#define TFOFFSET_SHIFT 0 +#define TFOFFSET(x) ((x) << TFOFFSET_SHIFT) +#define TFOFFSET_GET(x) (((x) & TFOFFSET_MASK) >> TFOFFSET_SHIFT) + +#define MPS_TRC_FILTER_MATCH_CTL_B 0x9820 +#define TFMINPKTSIZE_MASK 0x01ff0000U +#define TFMINPKTSIZE_SHIFT 16 +#define TFMINPKTSIZE(x) ((x) << TFMINPKTSIZE_SHIFT) +#define TFMINPKTSIZE_GET(x) (((x) & TFMINPKTSIZE_MASK) >> TFMINPKTSIZE_SHIFT) +#define TFCAPTUREMAX_MASK 0x00003fffU +#define TFCAPTUREMAX_SHIFT 0 +#define TFCAPTUREMAX(x) ((x) << TFCAPTUREMAX_SHIFT) +#define TFCAPTUREMAX_GET(x) (((x) & TFCAPTUREMAX_MASK) >> TFCAPTUREMAX_SHIFT) + +#define MPS_TRC_INT_CAUSE 0x985c +#define MISCPERR 0x00000100U +#define PKTFIFO 0x000000f0U +#define FILTMEM 0x0000000fU + +#define MPS_TRC_FILTER0_MATCH 0x9c00 +#define MPS_TRC_FILTER0_DONT_CARE 0x9c80 +#define MPS_TRC_FILTER1_MATCH 0x9d00 +#define MPS_CLS_INT_CAUSE 0xd028 +#define PLERRENB 0x00000008U +#define HASHSRAM 0x00000004U +#define MATCHTCAM 0x00000002U +#define MATCHSRAM 0x00000001U + +#define MPS_RX_PERR_INT_CAUSE 0x11074 + +#define CPL_INTR_CAUSE 0x19054 +#define CIM_OP_MAP_PERR 0x00000020U +#define CIM_OVFL_ERROR 0x00000010U +#define TP_FRAMING_ERROR 0x00000008U +#define SGE_FRAMING_ERROR 0x00000004U +#define CIM_FRAMING_ERROR 0x00000002U +#define ZERO_SWITCH_ERROR 0x00000001U + +#define SMB_INT_CAUSE 0x19090 +#define MSTTXFIFOPARINT 0x00200000U +#define MSTRXFIFOPARINT 0x00100000U +#define SLVFIFOPARINT 0x00080000U + +#define ULP_RX_INT_CAUSE 0x19158 +#define ULP_RX_ISCSI_TAGMASK 0x19164 +#define ULP_RX_ISCSI_PSZ 0x19168 +#define HPZ3_MASK 0x0f000000U +#define HPZ3_SHIFT 24 +#define HPZ3(x) ((x) << HPZ3_SHIFT) +#define HPZ2_MASK 0x000f0000U +#define HPZ2_SHIFT 16 +#define HPZ2(x) ((x) << HPZ2_SHIFT) +#define HPZ1_MASK 0x00000f00U +#define HPZ1_SHIFT 8 +#define HPZ1(x) ((x) << HPZ1_SHIFT) +#define HPZ0_MASK 0x0000000fU +#define HPZ0_SHIFT 0 +#define HPZ0(x) ((x) << HPZ0_SHIFT) + +#define ULP_RX_TDDP_PSZ 0x19178 + +#define SF_DATA 0x193f8 +#define SF_OP 0x193fc +#define BUSY 0x80000000U +#define SF_LOCK 0x00000010U +#define SF_CONT 0x00000008U +#define BYTECNT_MASK 0x00000006U +#define BYTECNT_SHIFT 1 +#define BYTECNT(x) ((x) << BYTECNT_SHIFT) +#define OP_WR 0x00000001U + +#define PL_PF_INT_CAUSE 0x3c0 +#define PFSW 0x00000008U +#define PFSGE 0x00000004U +#define PFCIM 0x00000002U +#define PFMPS 0x00000001U + +#define PL_PF_INT_ENABLE 0x3c4 +#define PL_PF_CTL 0x3c8 +#define SWINT 0x00000001U + +#define PL_WHOAMI 0x19400 +#define SOURCEPF_MASK 0x00000700U +#define SOURCEPF_SHIFT 8 +#define SOURCEPF(x) ((x) << SOURCEPF_SHIFT) +#define SOURCEPF_GET(x) (((x) & SOURCEPF_MASK) >> SOURCEPF_SHIFT) +#define ISVF 0x00000080U +#define VFID_MASK 0x0000007fU +#define VFID_SHIFT 0 +#define VFID(x) ((x) << VFID_SHIFT) +#define VFID_GET(x) (((x) & VFID_MASK) >> VFID_SHIFT) + +#define PL_INT_CAUSE 0x1940c +#define ULP_TX 0x08000000U +#define SGE 0x04000000U +#define HMA 0x02000000U +#define CPL_SWITCH 0x01000000U +#define ULP_RX 0x00800000U +#define PM_RX 0x00400000U +#define PM_TX 0x00200000U +#define MA 0x00100000U +#define TP 0x00080000U +#define LE 0x00040000U +#define EDC1 0x00020000U +#define EDC0 0x00010000U +#define MC 0x00008000U +#define PCIE 0x00004000U +#define PMU 0x00002000U +#define XGMAC_KR1 0x00001000U +#define XGMAC_KR0 0x00000800U +#define XGMAC1 0x00000400U +#define XGMAC0 0x00000200U +#define SMB 0x00000100U +#define SF 0x00000080U +#define PL 0x00000040U +#define NCSI 0x00000020U +#define MPS 0x00000010U +#define MI 0x00000008U +#define DBG 0x00000004U +#define I2CM 0x00000002U +#define CIM 0x00000001U + +#define PL_INT_MAP0 0x19414 +#define PL_RST 0x19428 +#define PIORST 0x00000002U +#define PIORSTMODE 0x00000001U + +#define PL_PL_INT_CAUSE 0x19430 +#define FATALPERR 0x00000010U +#define PERRVFID 0x00000001U + +#define PL_REV 0x1943c + +#define LE_DB_CONFIG 0x19c04 +#define HASHEN 0x00100000U + +#define LE_DB_SERVER_INDEX 0x19c18 +#define LE_DB_ACT_CNT_IPV4 0x19c20 +#define LE_DB_ACT_CNT_IPV6 0x19c24 + +#define LE_DB_INT_CAUSE 0x19c3c +#define REQQPARERR 0x00010000U +#define UNKNOWNCMD 0x00008000U +#define PARITYERR 0x00000040U +#define LIPMISS 0x00000020U +#define LIP0 0x00000010U + +#define LE_DB_TID_HASHBASE 0x19df8 + +#define NCSI_INT_CAUSE 0x1a0d8 +#define CIM_DM_PRTY_ERR 0x00000100U +#define MPS_DM_PRTY_ERR 0x00000080U +#define TXFIFO_PRTY_ERR 0x00000002U +#define RXFIFO_PRTY_ERR 0x00000001U + +#define XGMAC_PORT_CFG2 0x1018 +#define PATEN 0x00040000U +#define MAGICEN 0x00020000U + +#define XGMAC_PORT_MAGIC_MACID_LO 0x1024 +#define XGMAC_PORT_MAGIC_MACID_HI 0x1028 + +#define XGMAC_PORT_EPIO_DATA0 0x10c0 +#define XGMAC_PORT_EPIO_DATA1 0x10c4 +#define XGMAC_PORT_EPIO_DATA2 0x10c8 +#define XGMAC_PORT_EPIO_DATA3 0x10cc +#define XGMAC_PORT_EPIO_OP 0x10d0 +#define EPIOWR 0x00000100U +#define ADDRESS_MASK 0x000000ffU +#define ADDRESS_SHIFT 0 +#define ADDRESS(x) ((x) << ADDRESS_SHIFT) + +#define XGMAC_PORT_INT_CAUSE 0x10dc +#endif /* __T4_REGS_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h new file mode 100644 index 0000000..edcfd7e --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -0,0 +1,1623 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _T4FW_INTERFACE_H_ +#define _T4FW_INTERFACE_H_ + +#define FW_T4VF_SGE_BASE_ADDR 0x0000 +#define FW_T4VF_MPS_BASE_ADDR 0x0100 +#define FW_T4VF_PL_BASE_ADDR 0x0200 +#define FW_T4VF_MBDATA_BASE_ADDR 0x0240 +#define FW_T4VF_CIM_BASE_ADDR 0x0300 + +enum fw_wr_opcodes { + FW_FILTER_WR = 0x02, + FW_ULPTX_WR = 0x04, + FW_TP_WR = 0x05, + FW_ETH_TX_PKT_WR = 0x08, + FW_FLOWC_WR = 0x0a, + FW_OFLD_TX_DATA_WR = 0x0b, + FW_CMD_WR = 0x10, + FW_ETH_TX_PKT_VM_WR = 0x11, + FW_RI_RES_WR = 0x0c, + FW_RI_INIT_WR = 0x0d, + FW_RI_RDMA_WRITE_WR = 0x14, + FW_RI_SEND_WR = 0x15, + FW_RI_RDMA_READ_WR = 0x16, + FW_RI_RECV_WR = 0x17, + FW_RI_BIND_MW_WR = 0x18, + FW_RI_FR_NSMR_WR = 0x19, + FW_RI_INV_LSTAG_WR = 0x1a, + FW_LASTC2E_WR = 0x40 +}; + +struct fw_wr_hdr { + __be32 hi; + __be32 lo; +}; + +#define FW_WR_OP(x) ((x) << 24) +#define FW_WR_ATOMIC(x) ((x) << 23) +#define FW_WR_FLUSH(x) ((x) << 22) +#define FW_WR_COMPL(x) ((x) << 21) +#define FW_WR_IMMDLEN_MASK 0xff +#define FW_WR_IMMDLEN(x) ((x) << 0) + +#define FW_WR_EQUIQ (1U << 31) +#define FW_WR_EQUEQ (1U << 30) +#define FW_WR_FLOWID(x) ((x) << 8) +#define FW_WR_LEN16(x) ((x) << 0) + +struct fw_ulptx_wr { + __be32 op_to_compl; + __be32 flowid_len16; + u64 cookie; +}; + +struct fw_tp_wr { + __be32 op_to_immdlen; + __be32 flowid_len16; + u64 cookie; +}; + +struct fw_eth_tx_pkt_wr { + __be32 op_immdlen; + __be32 equiq_to_len16; + __be64 r3; +}; + +enum fw_flowc_mnem { + FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */ + FW_FLOWC_MNEM_CH, + FW_FLOWC_MNEM_PORT, + FW_FLOWC_MNEM_IQID, + FW_FLOWC_MNEM_SNDNXT, + FW_FLOWC_MNEM_RCVNXT, + FW_FLOWC_MNEM_SNDBUF, + FW_FLOWC_MNEM_MSS, +}; + +struct fw_flowc_mnemval { + u8 mnemonic; + u8 r4[3]; + __be32 val; +}; + +struct fw_flowc_wr { + __be32 op_to_nparams; +#define FW_FLOWC_WR_NPARAMS(x) ((x) << 0) + __be32 flowid_len16; + struct fw_flowc_mnemval mnemval[0]; +}; + +struct fw_ofld_tx_data_wr { + __be32 op_to_immdlen; + __be32 flowid_len16; + __be32 plen; + __be32 tunnel_to_proxy; +#define FW_OFLD_TX_DATA_WR_TUNNEL(x) ((x) << 19) +#define FW_OFLD_TX_DATA_WR_SAVE(x) ((x) << 18) +#define FW_OFLD_TX_DATA_WR_FLUSH(x) ((x) << 17) +#define FW_OFLD_TX_DATA_WR_URGENT(x) ((x) << 16) +#define FW_OFLD_TX_DATA_WR_MORE(x) ((x) << 15) +#define FW_OFLD_TX_DATA_WR_SHOVE(x) ((x) << 14) +#define FW_OFLD_TX_DATA_WR_ULPMODE(x) ((x) << 10) +#define FW_OFLD_TX_DATA_WR_ULPSUBMODE(x) ((x) << 6) +}; + +struct fw_cmd_wr { + __be32 op_dma; +#define FW_CMD_WR_DMA (1U << 17) + __be32 len16_pkd; + __be64 cookie_daddr; +}; + +struct fw_eth_tx_pkt_vm_wr { + __be32 op_immdlen; + __be32 equiq_to_len16; + __be32 r3[2]; + u8 ethmacdst[6]; + u8 ethmacsrc[6]; + __be16 ethtype; + __be16 vlantci; +}; + +#define FW_CMD_MAX_TIMEOUT 3000 + +enum fw_cmd_opcodes { + FW_LDST_CMD = 0x01, + FW_RESET_CMD = 0x03, + FW_HELLO_CMD = 0x04, + FW_BYE_CMD = 0x05, + FW_INITIALIZE_CMD = 0x06, + FW_CAPS_CONFIG_CMD = 0x07, + FW_PARAMS_CMD = 0x08, + FW_PFVF_CMD = 0x09, + FW_IQ_CMD = 0x10, + FW_EQ_MNGT_CMD = 0x11, + FW_EQ_ETH_CMD = 0x12, + FW_EQ_CTRL_CMD = 0x13, + FW_EQ_OFLD_CMD = 0x21, + FW_VI_CMD = 0x14, + FW_VI_MAC_CMD = 0x15, + FW_VI_RXMODE_CMD = 0x16, + FW_VI_ENABLE_CMD = 0x17, + FW_ACL_MAC_CMD = 0x18, + FW_ACL_VLAN_CMD = 0x19, + FW_VI_STATS_CMD = 0x1a, + FW_PORT_CMD = 0x1b, + FW_PORT_STATS_CMD = 0x1c, + FW_PORT_LB_STATS_CMD = 0x1d, + FW_PORT_TRACE_CMD = 0x1e, + FW_PORT_TRACE_MMAP_CMD = 0x1f, + FW_RSS_IND_TBL_CMD = 0x20, + FW_RSS_GLB_CONFIG_CMD = 0x22, + FW_RSS_VI_CONFIG_CMD = 0x23, + FW_LASTC2E_CMD = 0x40, + FW_ERROR_CMD = 0x80, + FW_DEBUG_CMD = 0x81, +}; + +enum fw_cmd_cap { + FW_CMD_CAP_PF = 0x01, + FW_CMD_CAP_DMAQ = 0x02, + FW_CMD_CAP_PORT = 0x04, + FW_CMD_CAP_PORTPROMISC = 0x08, + FW_CMD_CAP_PORTSTATS = 0x10, + FW_CMD_CAP_VF = 0x80, +}; + +/* + * Generic command header flit0 + */ +struct fw_cmd_hdr { + __be32 hi; + __be32 lo; +}; + +#define FW_CMD_OP(x) ((x) << 24) +#define FW_CMD_OP_GET(x) (((x) >> 24) & 0xff) +#define FW_CMD_REQUEST (1U << 23) +#define FW_CMD_READ (1U << 22) +#define FW_CMD_WRITE (1U << 21) +#define FW_CMD_EXEC (1U << 20) +#define FW_CMD_RAMASK(x) ((x) << 20) +#define FW_CMD_RETVAL(x) ((x) << 8) +#define FW_CMD_RETVAL_GET(x) (((x) >> 8) & 0xff) +#define FW_CMD_LEN16(x) ((x) << 0) + +enum fw_ldst_addrspc { + FW_LDST_ADDRSPC_FIRMWARE = 0x0001, + FW_LDST_ADDRSPC_SGE_EGRC = 0x0008, + FW_LDST_ADDRSPC_SGE_INGC = 0x0009, + FW_LDST_ADDRSPC_SGE_FLMC = 0x000a, + FW_LDST_ADDRSPC_SGE_CONMC = 0x000b, + FW_LDST_ADDRSPC_TP_PIO = 0x0010, + FW_LDST_ADDRSPC_TP_TM_PIO = 0x0011, + FW_LDST_ADDRSPC_TP_MIB = 0x0012, + FW_LDST_ADDRSPC_MDIO = 0x0018, + FW_LDST_ADDRSPC_MPS = 0x0020, + FW_LDST_ADDRSPC_FUNC = 0x0028 +}; + +enum fw_ldst_mps_fid { + FW_LDST_MPS_ATRB, + FW_LDST_MPS_RPLC +}; + +enum fw_ldst_func_access_ctl { + FW_LDST_FUNC_ACC_CTL_VIID, + FW_LDST_FUNC_ACC_CTL_FID +}; + +enum fw_ldst_func_mod_index { + FW_LDST_FUNC_MPS +}; + +struct fw_ldst_cmd { + __be32 op_to_addrspace; +#define FW_LDST_CMD_ADDRSPACE(x) ((x) << 0) + __be32 cycles_to_len16; + union fw_ldst { + struct fw_ldst_addrval { + __be32 addr; + __be32 val; + } addrval; + struct fw_ldst_idctxt { + __be32 physid; + __be32 msg_pkd; + __be32 ctxt_data7; + __be32 ctxt_data6; + __be32 ctxt_data5; + __be32 ctxt_data4; + __be32 ctxt_data3; + __be32 ctxt_data2; + __be32 ctxt_data1; + __be32 ctxt_data0; + } idctxt; + struct fw_ldst_mdio { + __be16 paddr_mmd; + __be16 raddr; + __be16 vctl; + __be16 rval; + } mdio; + struct fw_ldst_mps { + __be16 fid_ctl; + __be16 rplcpf_pkd; + __be32 rplc127_96; + __be32 rplc95_64; + __be32 rplc63_32; + __be32 rplc31_0; + __be32 atrb; + __be16 vlan[16]; + } mps; + struct fw_ldst_func { + u8 access_ctl; + u8 mod_index; + __be16 ctl_id; + __be32 offset; + __be64 data0; + __be64 data1; + } func; + } u; +}; + +#define FW_LDST_CMD_MSG(x) ((x) << 31) +#define FW_LDST_CMD_PADDR(x) ((x) << 8) +#define FW_LDST_CMD_MMD(x) ((x) << 0) +#define FW_LDST_CMD_FID(x) ((x) << 15) +#define FW_LDST_CMD_CTL(x) ((x) << 0) +#define FW_LDST_CMD_RPLCPF(x) ((x) << 0) + +struct fw_reset_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be32 val; + __be32 r3; +}; + +struct fw_hello_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be32 err_to_mbasyncnot; +#define FW_HELLO_CMD_ERR (1U << 31) +#define FW_HELLO_CMD_INIT (1U << 30) +#define FW_HELLO_CMD_MASTERDIS(x) ((x) << 29) +#define FW_HELLO_CMD_MASTERFORCE(x) ((x) << 28) +#define FW_HELLO_CMD_MBMASTER(x) ((x) << 24) +#define FW_HELLO_CMD_MBASYNCNOT(x) ((x) << 20) + __be32 fwrev; +}; + +struct fw_bye_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be64 r3; +}; + +struct fw_initialize_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be64 r3; +}; + +enum fw_caps_config_hm { + FW_CAPS_CONFIG_HM_PCIE = 0x00000001, + FW_CAPS_CONFIG_HM_PL = 0x00000002, + FW_CAPS_CONFIG_HM_SGE = 0x00000004, + FW_CAPS_CONFIG_HM_CIM = 0x00000008, + FW_CAPS_CONFIG_HM_ULPTX = 0x00000010, + FW_CAPS_CONFIG_HM_TP = 0x00000020, + FW_CAPS_CONFIG_HM_ULPRX = 0x00000040, + FW_CAPS_CONFIG_HM_PMRX = 0x00000080, + FW_CAPS_CONFIG_HM_PMTX = 0x00000100, + FW_CAPS_CONFIG_HM_MC = 0x00000200, + FW_CAPS_CONFIG_HM_LE = 0x00000400, + FW_CAPS_CONFIG_HM_MPS = 0x00000800, + FW_CAPS_CONFIG_HM_XGMAC = 0x00001000, + FW_CAPS_CONFIG_HM_CPLSWITCH = 0x00002000, + FW_CAPS_CONFIG_HM_T4DBG = 0x00004000, + FW_CAPS_CONFIG_HM_MI = 0x00008000, + FW_CAPS_CONFIG_HM_I2CM = 0x00010000, + FW_CAPS_CONFIG_HM_NCSI = 0x00020000, + FW_CAPS_CONFIG_HM_SMB = 0x00040000, + FW_CAPS_CONFIG_HM_MA = 0x00080000, + FW_CAPS_CONFIG_HM_EDRAM = 0x00100000, + FW_CAPS_CONFIG_HM_PMU = 0x00200000, + FW_CAPS_CONFIG_HM_UART = 0x00400000, + FW_CAPS_CONFIG_HM_SF = 0x00800000, +}; + +enum fw_caps_config_nbm { + FW_CAPS_CONFIG_NBM_IPMI = 0x00000001, + FW_CAPS_CONFIG_NBM_NCSI = 0x00000002, +}; + +enum fw_caps_config_link { + FW_CAPS_CONFIG_LINK_PPP = 0x00000001, + FW_CAPS_CONFIG_LINK_QFC = 0x00000002, + FW_CAPS_CONFIG_LINK_DCBX = 0x00000004, +}; + +enum fw_caps_config_switch { + FW_CAPS_CONFIG_SWITCH_INGRESS = 0x00000001, + FW_CAPS_CONFIG_SWITCH_EGRESS = 0x00000002, +}; + +enum fw_caps_config_nic { + FW_CAPS_CONFIG_NIC = 0x00000001, + FW_CAPS_CONFIG_NIC_VM = 0x00000002, +}; + +enum fw_caps_config_ofld { + FW_CAPS_CONFIG_OFLD = 0x00000001, +}; + +enum fw_caps_config_rdma { + FW_CAPS_CONFIG_RDMA_RDDP = 0x00000001, + FW_CAPS_CONFIG_RDMA_RDMAC = 0x00000002, +}; + +enum fw_caps_config_iscsi { + FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU = 0x00000001, + FW_CAPS_CONFIG_ISCSI_TARGET_PDU = 0x00000002, + FW_CAPS_CONFIG_ISCSI_INITIATOR_CNXOFLD = 0x00000004, + FW_CAPS_CONFIG_ISCSI_TARGET_CNXOFLD = 0x00000008, +}; + +enum fw_caps_config_fcoe { + FW_CAPS_CONFIG_FCOE_INITIATOR = 0x00000001, + FW_CAPS_CONFIG_FCOE_TARGET = 0x00000002, +}; + +struct fw_caps_config_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be32 r2; + __be32 hwmbitmap; + __be16 nbmcaps; + __be16 linkcaps; + __be16 switchcaps; + __be16 r3; + __be16 niccaps; + __be16 ofldcaps; + __be16 rdmacaps; + __be16 r4; + __be16 iscsicaps; + __be16 fcoecaps; + __be32 r5; + __be64 r6; +}; + +/* + * params command mnemonics + */ +enum fw_params_mnem { + FW_PARAMS_MNEM_DEV = 1, /* device params */ + FW_PARAMS_MNEM_PFVF = 2, /* function params */ + FW_PARAMS_MNEM_REG = 3, /* limited register access */ + FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */ + FW_PARAMS_MNEM_LAST +}; + +/* + * device parameters + */ +enum fw_params_param_dev { + FW_PARAMS_PARAM_DEV_CCLK = 0x00, /* chip core clock in khz */ + FW_PARAMS_PARAM_DEV_PORTVEC = 0x01, /* the port vector */ + FW_PARAMS_PARAM_DEV_NTID = 0x02, /* reads the number of TIDs + * allocated by the device's + * Lookup Engine + */ + FW_PARAMS_PARAM_DEV_FLOWC_BUFFIFO_SZ = 0x03, + FW_PARAMS_PARAM_DEV_INTVER_NIC = 0x04, + FW_PARAMS_PARAM_DEV_INTVER_VNIC = 0x05, + FW_PARAMS_PARAM_DEV_INTVER_OFLD = 0x06, + FW_PARAMS_PARAM_DEV_INTVER_RI = 0x07, + FW_PARAMS_PARAM_DEV_INTVER_ISCSIPDU = 0x08, + FW_PARAMS_PARAM_DEV_INTVER_ISCSI = 0x09, + FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A, + FW_PARAMS_PARAM_DEV_FWREV = 0x0B, + FW_PARAMS_PARAM_DEV_TPREV = 0x0C, +}; + +/* + * physical and virtual function parameters + */ +enum fw_params_param_pfvf { + FW_PARAMS_PARAM_PFVF_RWXCAPS = 0x00, + FW_PARAMS_PARAM_PFVF_ROUTE_START = 0x01, + FW_PARAMS_PARAM_PFVF_ROUTE_END = 0x02, + FW_PARAMS_PARAM_PFVF_CLIP_START = 0x03, + FW_PARAMS_PARAM_PFVF_CLIP_END = 0x04, + FW_PARAMS_PARAM_PFVF_FILTER_START = 0x05, + FW_PARAMS_PARAM_PFVF_FILTER_END = 0x06, + FW_PARAMS_PARAM_PFVF_SERVER_START = 0x07, + FW_PARAMS_PARAM_PFVF_SERVER_END = 0x08, + FW_PARAMS_PARAM_PFVF_TDDP_START = 0x09, + FW_PARAMS_PARAM_PFVF_TDDP_END = 0x0A, + FW_PARAMS_PARAM_PFVF_ISCSI_START = 0x0B, + FW_PARAMS_PARAM_PFVF_ISCSI_END = 0x0C, + FW_PARAMS_PARAM_PFVF_STAG_START = 0x0D, + FW_PARAMS_PARAM_PFVF_STAG_END = 0x0E, + FW_PARAMS_PARAM_PFVF_RQ_START = 0x1F, + FW_PARAMS_PARAM_PFVF_RQ_END = 0x10, + FW_PARAMS_PARAM_PFVF_PBL_START = 0x11, + FW_PARAMS_PARAM_PFVF_PBL_END = 0x12, + FW_PARAMS_PARAM_PFVF_L2T_START = 0x13, + FW_PARAMS_PARAM_PFVF_L2T_END = 0x14, + FW_PARAMS_PARAM_PFVF_SQRQ_START = 0x15, + FW_PARAMS_PARAM_PFVF_SQRQ_END = 0x16, + FW_PARAMS_PARAM_PFVF_CQ_START = 0x17, + FW_PARAMS_PARAM_PFVF_CQ_END = 0x18, + FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH = 0x20, + FW_PARAMS_PARAM_PFVF_VIID = 0x24, + FW_PARAMS_PARAM_PFVF_CPMASK = 0x25, + FW_PARAMS_PARAM_PFVF_OCQ_START = 0x26, + FW_PARAMS_PARAM_PFVF_OCQ_END = 0x27, + FW_PARAMS_PARAM_PFVF_CONM_MAP = 0x28, + FW_PARAMS_PARAM_PFVF_IQFLINT_START = 0x29, + FW_PARAMS_PARAM_PFVF_IQFLINT_END = 0x2A, + FW_PARAMS_PARAM_PFVF_EQ_START = 0x2B, + FW_PARAMS_PARAM_PFVF_EQ_END = 0x2C, +}; + +/* + * dma queue parameters + */ +enum fw_params_param_dmaq { + FW_PARAMS_PARAM_DMAQ_IQ_DCAEN_DCACPU = 0x00, + FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH = 0x01, + FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_MNGT = 0x10, + FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL = 0x11, + FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH = 0x12, +}; + +#define FW_PARAMS_MNEM(x) ((x) << 24) +#define FW_PARAMS_PARAM_X(x) ((x) << 16) +#define FW_PARAMS_PARAM_Y(x) ((x) << 8) +#define FW_PARAMS_PARAM_Z(x) ((x) << 0) +#define FW_PARAMS_PARAM_XYZ(x) ((x) << 0) +#define FW_PARAMS_PARAM_YZ(x) ((x) << 0) + +struct fw_params_cmd { + __be32 op_to_vfn; + __be32 retval_len16; + struct fw_params_param { + __be32 mnem; + __be32 val; + } param[7]; +}; + +#define FW_PARAMS_CMD_PFN(x) ((x) << 8) +#define FW_PARAMS_CMD_VFN(x) ((x) << 0) + +struct fw_pfvf_cmd { + __be32 op_to_vfn; + __be32 retval_len16; + __be32 niqflint_niq; + __be32 type_to_neq; + __be32 tc_to_nexactf; + __be32 r_caps_to_nethctrl; + __be16 nricq; + __be16 nriqp; + __be32 r4; +}; + +#define FW_PFVF_CMD_PFN(x) ((x) << 8) +#define FW_PFVF_CMD_VFN(x) ((x) << 0) + +#define FW_PFVF_CMD_NIQFLINT(x) ((x) << 20) +#define FW_PFVF_CMD_NIQFLINT_GET(x) (((x) >> 20) & 0xfff) + +#define FW_PFVF_CMD_NIQ(x) ((x) << 0) +#define FW_PFVF_CMD_NIQ_GET(x) (((x) >> 0) & 0xfffff) + +#define FW_PFVF_CMD_TYPE (1 << 31) +#define FW_PFVF_CMD_TYPE_GET(x) (((x) >> 31) & 0x1) + +#define FW_PFVF_CMD_CMASK(x) ((x) << 24) +#define FW_PFVF_CMD_CMASK_MASK 0xf +#define FW_PFVF_CMD_CMASK_GET(x) (((x) >> 24) & FW_PFVF_CMD_CMASK_MASK) + +#define FW_PFVF_CMD_PMASK(x) ((x) << 20) +#define FW_PFVF_CMD_PMASK_MASK 0xf +#define FW_PFVF_CMD_PMASK_GET(x) (((x) >> 20) & FW_PFVF_CMD_PMASK_MASK) + +#define FW_PFVF_CMD_NEQ(x) ((x) << 0) +#define FW_PFVF_CMD_NEQ_GET(x) (((x) >> 0) & 0xfffff) + +#define FW_PFVF_CMD_TC(x) ((x) << 24) +#define FW_PFVF_CMD_TC_GET(x) (((x) >> 24) & 0xff) + +#define FW_PFVF_CMD_NVI(x) ((x) << 16) +#define FW_PFVF_CMD_NVI_GET(x) (((x) >> 16) & 0xff) + +#define FW_PFVF_CMD_NEXACTF(x) ((x) << 0) +#define FW_PFVF_CMD_NEXACTF_GET(x) (((x) >> 0) & 0xffff) + +#define FW_PFVF_CMD_R_CAPS(x) ((x) << 24) +#define FW_PFVF_CMD_R_CAPS_GET(x) (((x) >> 24) & 0xff) + +#define FW_PFVF_CMD_WX_CAPS(x) ((x) << 16) +#define FW_PFVF_CMD_WX_CAPS_GET(x) (((x) >> 16) & 0xff) + +#define FW_PFVF_CMD_NETHCTRL(x) ((x) << 0) +#define FW_PFVF_CMD_NETHCTRL_GET(x) (((x) >> 0) & 0xffff) + +enum fw_iq_type { + FW_IQ_TYPE_FL_INT_CAP, + FW_IQ_TYPE_NO_FL_INT_CAP +}; + +struct fw_iq_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be16 physiqid; + __be16 iqid; + __be16 fl0id; + __be16 fl1id; + __be32 type_to_iqandstindex; + __be16 iqdroprss_to_iqesize; + __be16 iqsize; + __be64 iqaddr; + __be32 iqns_to_fl0congen; + __be16 fl0dcaen_to_fl0cidxfthresh; + __be16 fl0size; + __be64 fl0addr; + __be32 fl1cngchmap_to_fl1congen; + __be16 fl1dcaen_to_fl1cidxfthresh; + __be16 fl1size; + __be64 fl1addr; +}; + +#define FW_IQ_CMD_PFN(x) ((x) << 8) +#define FW_IQ_CMD_VFN(x) ((x) << 0) + +#define FW_IQ_CMD_ALLOC (1U << 31) +#define FW_IQ_CMD_FREE (1U << 30) +#define FW_IQ_CMD_MODIFY (1U << 29) +#define FW_IQ_CMD_IQSTART(x) ((x) << 28) +#define FW_IQ_CMD_IQSTOP(x) ((x) << 27) + +#define FW_IQ_CMD_TYPE(x) ((x) << 29) +#define FW_IQ_CMD_IQASYNCH(x) ((x) << 28) +#define FW_IQ_CMD_VIID(x) ((x) << 16) +#define FW_IQ_CMD_IQANDST(x) ((x) << 15) +#define FW_IQ_CMD_IQANUS(x) ((x) << 14) +#define FW_IQ_CMD_IQANUD(x) ((x) << 12) +#define FW_IQ_CMD_IQANDSTINDEX(x) ((x) << 0) + +#define FW_IQ_CMD_IQDROPRSS (1U << 15) +#define FW_IQ_CMD_IQGTSMODE (1U << 14) +#define FW_IQ_CMD_IQPCIECH(x) ((x) << 12) +#define FW_IQ_CMD_IQDCAEN(x) ((x) << 11) +#define FW_IQ_CMD_IQDCACPU(x) ((x) << 6) +#define FW_IQ_CMD_IQINTCNTTHRESH(x) ((x) << 4) +#define FW_IQ_CMD_IQO (1U << 3) +#define FW_IQ_CMD_IQCPRIO(x) ((x) << 2) +#define FW_IQ_CMD_IQESIZE(x) ((x) << 0) + +#define FW_IQ_CMD_IQNS(x) ((x) << 31) +#define FW_IQ_CMD_IQRO(x) ((x) << 30) +#define FW_IQ_CMD_IQFLINTIQHSEN(x) ((x) << 28) +#define FW_IQ_CMD_IQFLINTCONGEN(x) ((x) << 27) +#define FW_IQ_CMD_IQFLINTISCSIC(x) ((x) << 26) +#define FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << 20) +#define FW_IQ_CMD_FL0CACHELOCK(x) ((x) << 15) +#define FW_IQ_CMD_FL0DBP(x) ((x) << 14) +#define FW_IQ_CMD_FL0DATANS(x) ((x) << 13) +#define FW_IQ_CMD_FL0DATARO(x) ((x) << 12) +#define FW_IQ_CMD_FL0CONGCIF(x) ((x) << 11) +#define FW_IQ_CMD_FL0ONCHIP(x) ((x) << 10) +#define FW_IQ_CMD_FL0STATUSPGNS(x) ((x) << 9) +#define FW_IQ_CMD_FL0STATUSPGRO(x) ((x) << 8) +#define FW_IQ_CMD_FL0FETCHNS(x) ((x) << 7) +#define FW_IQ_CMD_FL0FETCHRO(x) ((x) << 6) +#define FW_IQ_CMD_FL0HOSTFCMODE(x) ((x) << 4) +#define FW_IQ_CMD_FL0CPRIO(x) ((x) << 3) +#define FW_IQ_CMD_FL0PADEN (1U << 2) +#define FW_IQ_CMD_FL0PACKEN (1U << 1) +#define FW_IQ_CMD_FL0CONGEN (1U << 0) + +#define FW_IQ_CMD_FL0DCAEN(x) ((x) << 15) +#define FW_IQ_CMD_FL0DCACPU(x) ((x) << 10) +#define FW_IQ_CMD_FL0FBMIN(x) ((x) << 7) +#define FW_IQ_CMD_FL0FBMAX(x) ((x) << 4) +#define FW_IQ_CMD_FL0CIDXFTHRESHO (1U << 3) +#define FW_IQ_CMD_FL0CIDXFTHRESH(x) ((x) << 0) + +#define FW_IQ_CMD_FL1CNGCHMAP(x) ((x) << 20) +#define FW_IQ_CMD_FL1CACHELOCK(x) ((x) << 15) +#define FW_IQ_CMD_FL1DBP(x) ((x) << 14) +#define FW_IQ_CMD_FL1DATANS(x) ((x) << 13) +#define FW_IQ_CMD_FL1DATARO(x) ((x) << 12) +#define FW_IQ_CMD_FL1CONGCIF(x) ((x) << 11) +#define FW_IQ_CMD_FL1ONCHIP(x) ((x) << 10) +#define FW_IQ_CMD_FL1STATUSPGNS(x) ((x) << 9) +#define FW_IQ_CMD_FL1STATUSPGRO(x) ((x) << 8) +#define FW_IQ_CMD_FL1FETCHNS(x) ((x) << 7) +#define FW_IQ_CMD_FL1FETCHRO(x) ((x) << 6) +#define FW_IQ_CMD_FL1HOSTFCMODE(x) ((x) << 4) +#define FW_IQ_CMD_FL1CPRIO(x) ((x) << 3) +#define FW_IQ_CMD_FL1PADEN (1U << 2) +#define FW_IQ_CMD_FL1PACKEN (1U << 1) +#define FW_IQ_CMD_FL1CONGEN (1U << 0) + +#define FW_IQ_CMD_FL1DCAEN(x) ((x) << 15) +#define FW_IQ_CMD_FL1DCACPU(x) ((x) << 10) +#define FW_IQ_CMD_FL1FBMIN(x) ((x) << 7) +#define FW_IQ_CMD_FL1FBMAX(x) ((x) << 4) +#define FW_IQ_CMD_FL1CIDXFTHRESHO (1U << 3) +#define FW_IQ_CMD_FL1CIDXFTHRESH(x) ((x) << 0) + +struct fw_eq_eth_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be32 eqid_pkd; + __be32 physeqid_pkd; + __be32 fetchszm_to_iqid; + __be32 dcaen_to_eqsize; + __be64 eqaddr; + __be32 viid_pkd; + __be32 r8_lo; + __be64 r9; +}; + +#define FW_EQ_ETH_CMD_PFN(x) ((x) << 8) +#define FW_EQ_ETH_CMD_VFN(x) ((x) << 0) +#define FW_EQ_ETH_CMD_ALLOC (1U << 31) +#define FW_EQ_ETH_CMD_FREE (1U << 30) +#define FW_EQ_ETH_CMD_MODIFY (1U << 29) +#define FW_EQ_ETH_CMD_EQSTART (1U << 28) +#define FW_EQ_ETH_CMD_EQSTOP (1U << 27) + +#define FW_EQ_ETH_CMD_EQID(x) ((x) << 0) +#define FW_EQ_ETH_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) +#define FW_EQ_ETH_CMD_PHYSEQID(x) ((x) << 0) +#define FW_EQ_ETH_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff) + +#define FW_EQ_ETH_CMD_FETCHSZM(x) ((x) << 26) +#define FW_EQ_ETH_CMD_STATUSPGNS(x) ((x) << 25) +#define FW_EQ_ETH_CMD_STATUSPGRO(x) ((x) << 24) +#define FW_EQ_ETH_CMD_FETCHNS(x) ((x) << 23) +#define FW_EQ_ETH_CMD_FETCHRO(x) ((x) << 22) +#define FW_EQ_ETH_CMD_HOSTFCMODE(x) ((x) << 20) +#define FW_EQ_ETH_CMD_CPRIO(x) ((x) << 19) +#define FW_EQ_ETH_CMD_ONCHIP(x) ((x) << 18) +#define FW_EQ_ETH_CMD_PCIECHN(x) ((x) << 16) +#define FW_EQ_ETH_CMD_IQID(x) ((x) << 0) + +#define FW_EQ_ETH_CMD_DCAEN(x) ((x) << 31) +#define FW_EQ_ETH_CMD_DCACPU(x) ((x) << 26) +#define FW_EQ_ETH_CMD_FBMIN(x) ((x) << 23) +#define FW_EQ_ETH_CMD_FBMAX(x) ((x) << 20) +#define FW_EQ_ETH_CMD_CIDXFTHRESHO(x) ((x) << 19) +#define FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << 16) +#define FW_EQ_ETH_CMD_EQSIZE(x) ((x) << 0) + +#define FW_EQ_ETH_CMD_VIID(x) ((x) << 16) + +struct fw_eq_ctrl_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be32 cmpliqid_eqid; + __be32 physeqid_pkd; + __be32 fetchszm_to_iqid; + __be32 dcaen_to_eqsize; + __be64 eqaddr; +}; + +#define FW_EQ_CTRL_CMD_PFN(x) ((x) << 8) +#define FW_EQ_CTRL_CMD_VFN(x) ((x) << 0) + +#define FW_EQ_CTRL_CMD_ALLOC (1U << 31) +#define FW_EQ_CTRL_CMD_FREE (1U << 30) +#define FW_EQ_CTRL_CMD_MODIFY (1U << 29) +#define FW_EQ_CTRL_CMD_EQSTART (1U << 28) +#define FW_EQ_CTRL_CMD_EQSTOP (1U << 27) + +#define FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << 20) +#define FW_EQ_CTRL_CMD_EQID(x) ((x) << 0) +#define FW_EQ_CTRL_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) +#define FW_EQ_CTRL_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff) + +#define FW_EQ_CTRL_CMD_FETCHSZM (1U << 26) +#define FW_EQ_CTRL_CMD_STATUSPGNS (1U << 25) +#define FW_EQ_CTRL_CMD_STATUSPGRO (1U << 24) +#define FW_EQ_CTRL_CMD_FETCHNS (1U << 23) +#define FW_EQ_CTRL_CMD_FETCHRO (1U << 22) +#define FW_EQ_CTRL_CMD_HOSTFCMODE(x) ((x) << 20) +#define FW_EQ_CTRL_CMD_CPRIO(x) ((x) << 19) +#define FW_EQ_CTRL_CMD_ONCHIP(x) ((x) << 18) +#define FW_EQ_CTRL_CMD_PCIECHN(x) ((x) << 16) +#define FW_EQ_CTRL_CMD_IQID(x) ((x) << 0) + +#define FW_EQ_CTRL_CMD_DCAEN(x) ((x) << 31) +#define FW_EQ_CTRL_CMD_DCACPU(x) ((x) << 26) +#define FW_EQ_CTRL_CMD_FBMIN(x) ((x) << 23) +#define FW_EQ_CTRL_CMD_FBMAX(x) ((x) << 20) +#define FW_EQ_CTRL_CMD_CIDXFTHRESHO(x) ((x) << 19) +#define FW_EQ_CTRL_CMD_CIDXFTHRESH(x) ((x) << 16) +#define FW_EQ_CTRL_CMD_EQSIZE(x) ((x) << 0) + +struct fw_eq_ofld_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be32 eqid_pkd; + __be32 physeqid_pkd; + __be32 fetchszm_to_iqid; + __be32 dcaen_to_eqsize; + __be64 eqaddr; +}; + +#define FW_EQ_OFLD_CMD_PFN(x) ((x) << 8) +#define FW_EQ_OFLD_CMD_VFN(x) ((x) << 0) + +#define FW_EQ_OFLD_CMD_ALLOC (1U << 31) +#define FW_EQ_OFLD_CMD_FREE (1U << 30) +#define FW_EQ_OFLD_CMD_MODIFY (1U << 29) +#define FW_EQ_OFLD_CMD_EQSTART (1U << 28) +#define FW_EQ_OFLD_CMD_EQSTOP (1U << 27) + +#define FW_EQ_OFLD_CMD_EQID(x) ((x) << 0) +#define FW_EQ_OFLD_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) +#define FW_EQ_OFLD_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff) + +#define FW_EQ_OFLD_CMD_FETCHSZM(x) ((x) << 26) +#define FW_EQ_OFLD_CMD_STATUSPGNS(x) ((x) << 25) +#define FW_EQ_OFLD_CMD_STATUSPGRO(x) ((x) << 24) +#define FW_EQ_OFLD_CMD_FETCHNS(x) ((x) << 23) +#define FW_EQ_OFLD_CMD_FETCHRO(x) ((x) << 22) +#define FW_EQ_OFLD_CMD_HOSTFCMODE(x) ((x) << 20) +#define FW_EQ_OFLD_CMD_CPRIO(x) ((x) << 19) +#define FW_EQ_OFLD_CMD_ONCHIP(x) ((x) << 18) +#define FW_EQ_OFLD_CMD_PCIECHN(x) ((x) << 16) +#define FW_EQ_OFLD_CMD_IQID(x) ((x) << 0) + +#define FW_EQ_OFLD_CMD_DCAEN(x) ((x) << 31) +#define FW_EQ_OFLD_CMD_DCACPU(x) ((x) << 26) +#define FW_EQ_OFLD_CMD_FBMIN(x) ((x) << 23) +#define FW_EQ_OFLD_CMD_FBMAX(x) ((x) << 20) +#define FW_EQ_OFLD_CMD_CIDXFTHRESHO(x) ((x) << 19) +#define FW_EQ_OFLD_CMD_CIDXFTHRESH(x) ((x) << 16) +#define FW_EQ_OFLD_CMD_EQSIZE(x) ((x) << 0) + +/* + * Macros for VIID parsing: + * VIID - [10:8] PFN, [7] VI Valid, [6:0] VI number + */ +#define FW_VIID_PFN_GET(x) (((x) >> 8) & 0x7) +#define FW_VIID_VIVLD_GET(x) (((x) >> 7) & 0x1) +#define FW_VIID_VIN_GET(x) (((x) >> 0) & 0x7F) + +struct fw_vi_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be16 type_viid; + u8 mac[6]; + u8 portid_pkd; + u8 nmac; + u8 nmac0[6]; + __be16 rsssize_pkd; + u8 nmac1[6]; + __be16 idsiiq_pkd; + u8 nmac2[6]; + __be16 idseiq_pkd; + u8 nmac3[6]; + __be64 r9; + __be64 r10; +}; + +#define FW_VI_CMD_PFN(x) ((x) << 8) +#define FW_VI_CMD_VFN(x) ((x) << 0) +#define FW_VI_CMD_ALLOC (1U << 31) +#define FW_VI_CMD_FREE (1U << 30) +#define FW_VI_CMD_VIID(x) ((x) << 0) +#define FW_VI_CMD_VIID_GET(x) ((x) & 0xfff) +#define FW_VI_CMD_PORTID(x) ((x) << 4) +#define FW_VI_CMD_PORTID_GET(x) (((x) >> 4) & 0xf) +#define FW_VI_CMD_RSSSIZE_GET(x) (((x) >> 0) & 0x7ff) + +/* Special VI_MAC command index ids */ +#define FW_VI_MAC_ADD_MAC 0x3FF +#define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE +#define FW_VI_MAC_MAC_BASED_FREE 0x3FD +#define FW_CLS_TCAM_NUM_ENTRIES 336 + +enum fw_vi_mac_smac { + FW_VI_MAC_MPS_TCAM_ENTRY, + FW_VI_MAC_MPS_TCAM_ONLY, + FW_VI_MAC_SMT_ONLY, + FW_VI_MAC_SMT_AND_MPSTCAM +}; + +enum fw_vi_mac_result { + FW_VI_MAC_R_SUCCESS, + FW_VI_MAC_R_F_NONEXISTENT_NOMEM, + FW_VI_MAC_R_SMAC_FAIL, + FW_VI_MAC_R_F_ACL_CHECK +}; + +struct fw_vi_mac_cmd { + __be32 op_to_viid; + __be32 freemacs_to_len16; + union fw_vi_mac { + struct fw_vi_mac_exact { + __be16 valid_to_idx; + u8 macaddr[6]; + } exact[7]; + struct fw_vi_mac_hash { + __be64 hashvec; + } hash; + } u; +}; + +#define FW_VI_MAC_CMD_VIID(x) ((x) << 0) +#define FW_VI_MAC_CMD_FREEMACS(x) ((x) << 31) +#define FW_VI_MAC_CMD_HASHVECEN (1U << 23) +#define FW_VI_MAC_CMD_HASHUNIEN(x) ((x) << 22) +#define FW_VI_MAC_CMD_VALID (1U << 15) +#define FW_VI_MAC_CMD_PRIO(x) ((x) << 12) +#define FW_VI_MAC_CMD_SMAC_RESULT(x) ((x) << 10) +#define FW_VI_MAC_CMD_SMAC_RESULT_GET(x) (((x) >> 10) & 0x3) +#define FW_VI_MAC_CMD_IDX(x) ((x) << 0) +#define FW_VI_MAC_CMD_IDX_GET(x) (((x) >> 0) & 0x3ff) + +#define FW_RXMODE_MTU_NO_CHG 65535 + +struct fw_vi_rxmode_cmd { + __be32 op_to_viid; + __be32 retval_len16; + __be32 mtu_to_vlanexen; + __be32 r4_lo; +}; + +#define FW_VI_RXMODE_CMD_VIID(x) ((x) << 0) +#define FW_VI_RXMODE_CMD_MTU_MASK 0xffff +#define FW_VI_RXMODE_CMD_MTU(x) ((x) << 16) +#define FW_VI_RXMODE_CMD_PROMISCEN_MASK 0x3 +#define FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << 14) +#define FW_VI_RXMODE_CMD_ALLMULTIEN_MASK 0x3 +#define FW_VI_RXMODE_CMD_ALLMULTIEN(x) ((x) << 12) +#define FW_VI_RXMODE_CMD_BROADCASTEN_MASK 0x3 +#define FW_VI_RXMODE_CMD_BROADCASTEN(x) ((x) << 10) +#define FW_VI_RXMODE_CMD_VLANEXEN_MASK 0x3 +#define FW_VI_RXMODE_CMD_VLANEXEN(x) ((x) << 8) + +struct fw_vi_enable_cmd { + __be32 op_to_viid; + __be32 ien_to_len16; + __be16 blinkdur; + __be16 r3; + __be32 r4; +}; + +#define FW_VI_ENABLE_CMD_VIID(x) ((x) << 0) +#define FW_VI_ENABLE_CMD_IEN(x) ((x) << 31) +#define FW_VI_ENABLE_CMD_EEN(x) ((x) << 30) +#define FW_VI_ENABLE_CMD_LED (1U << 29) + +/* VI VF stats offset definitions */ +#define VI_VF_NUM_STATS 16 +enum fw_vi_stats_vf_index { + FW_VI_VF_STAT_TX_BCAST_BYTES_IX, + FW_VI_VF_STAT_TX_BCAST_FRAMES_IX, + FW_VI_VF_STAT_TX_MCAST_BYTES_IX, + FW_VI_VF_STAT_TX_MCAST_FRAMES_IX, + FW_VI_VF_STAT_TX_UCAST_BYTES_IX, + FW_VI_VF_STAT_TX_UCAST_FRAMES_IX, + FW_VI_VF_STAT_TX_DROP_FRAMES_IX, + FW_VI_VF_STAT_TX_OFLD_BYTES_IX, + FW_VI_VF_STAT_TX_OFLD_FRAMES_IX, + FW_VI_VF_STAT_RX_BCAST_BYTES_IX, + FW_VI_VF_STAT_RX_BCAST_FRAMES_IX, + FW_VI_VF_STAT_RX_MCAST_BYTES_IX, + FW_VI_VF_STAT_RX_MCAST_FRAMES_IX, + FW_VI_VF_STAT_RX_UCAST_BYTES_IX, + FW_VI_VF_STAT_RX_UCAST_FRAMES_IX, + FW_VI_VF_STAT_RX_ERR_FRAMES_IX +}; + +/* VI PF stats offset definitions */ +#define VI_PF_NUM_STATS 17 +enum fw_vi_stats_pf_index { + FW_VI_PF_STAT_TX_BCAST_BYTES_IX, + FW_VI_PF_STAT_TX_BCAST_FRAMES_IX, + FW_VI_PF_STAT_TX_MCAST_BYTES_IX, + FW_VI_PF_STAT_TX_MCAST_FRAMES_IX, + FW_VI_PF_STAT_TX_UCAST_BYTES_IX, + FW_VI_PF_STAT_TX_UCAST_FRAMES_IX, + FW_VI_PF_STAT_TX_OFLD_BYTES_IX, + FW_VI_PF_STAT_TX_OFLD_FRAMES_IX, + FW_VI_PF_STAT_RX_BYTES_IX, + FW_VI_PF_STAT_RX_FRAMES_IX, + FW_VI_PF_STAT_RX_BCAST_BYTES_IX, + FW_VI_PF_STAT_RX_BCAST_FRAMES_IX, + FW_VI_PF_STAT_RX_MCAST_BYTES_IX, + FW_VI_PF_STAT_RX_MCAST_FRAMES_IX, + FW_VI_PF_STAT_RX_UCAST_BYTES_IX, + FW_VI_PF_STAT_RX_UCAST_FRAMES_IX, + FW_VI_PF_STAT_RX_ERR_FRAMES_IX +}; + +struct fw_vi_stats_cmd { + __be32 op_to_viid; + __be32 retval_len16; + union fw_vi_stats { + struct fw_vi_stats_ctl { + __be16 nstats_ix; + __be16 r6; + __be32 r7; + __be64 stat0; + __be64 stat1; + __be64 stat2; + __be64 stat3; + __be64 stat4; + __be64 stat5; + } ctl; + struct fw_vi_stats_pf { + __be64 tx_bcast_bytes; + __be64 tx_bcast_frames; + __be64 tx_mcast_bytes; + __be64 tx_mcast_frames; + __be64 tx_ucast_bytes; + __be64 tx_ucast_frames; + __be64 tx_offload_bytes; + __be64 tx_offload_frames; + __be64 rx_pf_bytes; + __be64 rx_pf_frames; + __be64 rx_bcast_bytes; + __be64 rx_bcast_frames; + __be64 rx_mcast_bytes; + __be64 rx_mcast_frames; + __be64 rx_ucast_bytes; + __be64 rx_ucast_frames; + __be64 rx_err_frames; + } pf; + struct fw_vi_stats_vf { + __be64 tx_bcast_bytes; + __be64 tx_bcast_frames; + __be64 tx_mcast_bytes; + __be64 tx_mcast_frames; + __be64 tx_ucast_bytes; + __be64 tx_ucast_frames; + __be64 tx_drop_frames; + __be64 tx_offload_bytes; + __be64 tx_offload_frames; + __be64 rx_bcast_bytes; + __be64 rx_bcast_frames; + __be64 rx_mcast_bytes; + __be64 rx_mcast_frames; + __be64 rx_ucast_bytes; + __be64 rx_ucast_frames; + __be64 rx_err_frames; + } vf; + } u; +}; + +#define FW_VI_STATS_CMD_VIID(x) ((x) << 0) +#define FW_VI_STATS_CMD_NSTATS(x) ((x) << 12) +#define FW_VI_STATS_CMD_IX(x) ((x) << 0) + +struct fw_acl_mac_cmd { + __be32 op_to_vfn; + __be32 en_to_len16; + u8 nmac; + u8 r3[7]; + __be16 r4; + u8 macaddr0[6]; + __be16 r5; + u8 macaddr1[6]; + __be16 r6; + u8 macaddr2[6]; + __be16 r7; + u8 macaddr3[6]; +}; + +#define FW_ACL_MAC_CMD_PFN(x) ((x) << 8) +#define FW_ACL_MAC_CMD_VFN(x) ((x) << 0) +#define FW_ACL_MAC_CMD_EN(x) ((x) << 31) + +struct fw_acl_vlan_cmd { + __be32 op_to_vfn; + __be32 en_to_len16; + u8 nvlan; + u8 dropnovlan_fm; + u8 r3_lo[6]; + __be16 vlanid[16]; +}; + +#define FW_ACL_VLAN_CMD_PFN(x) ((x) << 8) +#define FW_ACL_VLAN_CMD_VFN(x) ((x) << 0) +#define FW_ACL_VLAN_CMD_EN(x) ((x) << 31) +#define FW_ACL_VLAN_CMD_DROPNOVLAN(x) ((x) << 7) +#define FW_ACL_VLAN_CMD_FM(x) ((x) << 6) + +enum fw_port_cap { + FW_PORT_CAP_SPEED_100M = 0x0001, + FW_PORT_CAP_SPEED_1G = 0x0002, + FW_PORT_CAP_SPEED_2_5G = 0x0004, + FW_PORT_CAP_SPEED_10G = 0x0008, + FW_PORT_CAP_SPEED_40G = 0x0010, + FW_PORT_CAP_SPEED_100G = 0x0020, + FW_PORT_CAP_FC_RX = 0x0040, + FW_PORT_CAP_FC_TX = 0x0080, + FW_PORT_CAP_ANEG = 0x0100, + FW_PORT_CAP_MDI_0 = 0x0200, + FW_PORT_CAP_MDI_1 = 0x0400, + FW_PORT_CAP_BEAN = 0x0800, + FW_PORT_CAP_PMA_LPBK = 0x1000, + FW_PORT_CAP_PCS_LPBK = 0x2000, + FW_PORT_CAP_PHYXS_LPBK = 0x4000, + FW_PORT_CAP_FAR_END_LPBK = 0x8000, +}; + +enum fw_port_mdi { + FW_PORT_MDI_UNCHANGED, + FW_PORT_MDI_AUTO, + FW_PORT_MDI_F_STRAIGHT, + FW_PORT_MDI_F_CROSSOVER +}; + +#define FW_PORT_MDI(x) ((x) << 9) + +enum fw_port_action { + FW_PORT_ACTION_L1_CFG = 0x0001, + FW_PORT_ACTION_L2_CFG = 0x0002, + FW_PORT_ACTION_GET_PORT_INFO = 0x0003, + FW_PORT_ACTION_L2_PPP_CFG = 0x0004, + FW_PORT_ACTION_L2_DCB_CFG = 0x0005, + FW_PORT_ACTION_LOW_PWR_TO_NORMAL = 0x0010, + FW_PORT_ACTION_L1_LOW_PWR_EN = 0x0011, + FW_PORT_ACTION_L2_WOL_MODE_EN = 0x0012, + FW_PORT_ACTION_LPBK_TO_NORMAL = 0x0020, + FW_PORT_ACTION_L1_LPBK = 0x0021, + FW_PORT_ACTION_L1_PMA_LPBK = 0x0022, + FW_PORT_ACTION_L1_PCS_LPBK = 0x0023, + FW_PORT_ACTION_L1_PHYXS_CSIDE_LPBK = 0x0024, + FW_PORT_ACTION_L1_PHYXS_ESIDE_LPBK = 0x0025, + FW_PORT_ACTION_PHY_RESET = 0x0040, + FW_PORT_ACTION_PMA_RESET = 0x0041, + FW_PORT_ACTION_PCS_RESET = 0x0042, + FW_PORT_ACTION_PHYXS_RESET = 0x0043, + FW_PORT_ACTION_DTEXS_REEST = 0x0044, + FW_PORT_ACTION_AN_RESET = 0x0045 +}; + +enum fw_port_l2cfg_ctlbf { + FW_PORT_L2_CTLBF_OVLAN0 = 0x01, + FW_PORT_L2_CTLBF_OVLAN1 = 0x02, + FW_PORT_L2_CTLBF_OVLAN2 = 0x04, + FW_PORT_L2_CTLBF_OVLAN3 = 0x08, + FW_PORT_L2_CTLBF_IVLAN = 0x10, + FW_PORT_L2_CTLBF_TXIPG = 0x20 +}; + +enum fw_port_dcb_cfg { + FW_PORT_DCB_CFG_PG = 0x01, + FW_PORT_DCB_CFG_PFC = 0x02, + FW_PORT_DCB_CFG_APPL = 0x04 +}; + +enum fw_port_dcb_cfg_rc { + FW_PORT_DCB_CFG_SUCCESS = 0x0, + FW_PORT_DCB_CFG_ERROR = 0x1 +}; + +struct fw_port_cmd { + __be32 op_to_portid; + __be32 action_to_len16; + union fw_port { + struct fw_port_l1cfg { + __be32 rcap; + __be32 r; + } l1cfg; + struct fw_port_l2cfg { + __be16 ctlbf_to_ivlan0; + __be16 ivlantype; + __be32 txipg_pkd; + __be16 ovlan0mask; + __be16 ovlan0type; + __be16 ovlan1mask; + __be16 ovlan1type; + __be16 ovlan2mask; + __be16 ovlan2type; + __be16 ovlan3mask; + __be16 ovlan3type; + } l2cfg; + struct fw_port_info { + __be32 lstatus_to_modtype; + __be16 pcap; + __be16 acap; + __be16 mtu; + __u8 cbllen; + __u8 r9; + __be32 r10; + __be64 r11; + } info; + struct fw_port_ppp { + __be32 pppen_to_ncsich; + __be32 r11; + } ppp; + struct fw_port_dcb { + __be16 cfg; + u8 up_map; + u8 sf_cfgrc; + __be16 prot_ix; + u8 pe7_to_pe0; + u8 numTCPFCs; + __be32 pgid0_to_pgid7; + __be32 numTCs_oui; + u8 pgpc[8]; + } dcb; + } u; +}; + +#define FW_PORT_CMD_READ (1U << 22) + +#define FW_PORT_CMD_PORTID(x) ((x) << 0) +#define FW_PORT_CMD_PORTID_GET(x) (((x) >> 0) & 0xf) + +#define FW_PORT_CMD_ACTION(x) ((x) << 16) +#define FW_PORT_CMD_ACTION_GET(x) (((x) >> 16) & 0xffff) + +#define FW_PORT_CMD_CTLBF(x) ((x) << 10) +#define FW_PORT_CMD_OVLAN3(x) ((x) << 7) +#define FW_PORT_CMD_OVLAN2(x) ((x) << 6) +#define FW_PORT_CMD_OVLAN1(x) ((x) << 5) +#define FW_PORT_CMD_OVLAN0(x) ((x) << 4) +#define FW_PORT_CMD_IVLAN0(x) ((x) << 3) + +#define FW_PORT_CMD_TXIPG(x) ((x) << 19) + +#define FW_PORT_CMD_LSTATUS (1U << 31) +#define FW_PORT_CMD_LSPEED(x) ((x) << 24) +#define FW_PORT_CMD_LSPEED_GET(x) (((x) >> 24) & 0x3f) +#define FW_PORT_CMD_TXPAUSE (1U << 23) +#define FW_PORT_CMD_RXPAUSE (1U << 22) +#define FW_PORT_CMD_MDIOCAP (1U << 21) +#define FW_PORT_CMD_MDIOADDR_GET(x) (((x) >> 16) & 0x1f) +#define FW_PORT_CMD_LPTXPAUSE (1U << 15) +#define FW_PORT_CMD_LPRXPAUSE (1U << 14) +#define FW_PORT_CMD_PTYPE_MASK 0x1f +#define FW_PORT_CMD_PTYPE_GET(x) (((x) >> 8) & FW_PORT_CMD_PTYPE_MASK) +#define FW_PORT_CMD_MODTYPE_MASK 0x1f +#define FW_PORT_CMD_MODTYPE_GET(x) (((x) >> 0) & FW_PORT_CMD_MODTYPE_MASK) + +#define FW_PORT_CMD_PPPEN(x) ((x) << 31) +#define FW_PORT_CMD_TPSRC(x) ((x) << 28) +#define FW_PORT_CMD_NCSISRC(x) ((x) << 24) + +#define FW_PORT_CMD_CH0(x) ((x) << 20) +#define FW_PORT_CMD_CH1(x) ((x) << 16) +#define FW_PORT_CMD_CH2(x) ((x) << 12) +#define FW_PORT_CMD_CH3(x) ((x) << 8) +#define FW_PORT_CMD_NCSICH(x) ((x) << 4) + +enum fw_port_type { + FW_PORT_TYPE_FIBER_XFI, + FW_PORT_TYPE_FIBER_XAUI, + FW_PORT_TYPE_BT_SGMII, + FW_PORT_TYPE_BT_XFI, + FW_PORT_TYPE_BT_XAUI, + FW_PORT_TYPE_KX4, + FW_PORT_TYPE_CX4, + FW_PORT_TYPE_KX, + FW_PORT_TYPE_KR, + FW_PORT_TYPE_SFP, + FW_PORT_TYPE_BP_AP, + FW_PORT_TYPE_BP4_AP, + + FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK +}; + +enum fw_port_module_type { + FW_PORT_MOD_TYPE_NA, + FW_PORT_MOD_TYPE_LR, + FW_PORT_MOD_TYPE_SR, + FW_PORT_MOD_TYPE_ER, + FW_PORT_MOD_TYPE_TWINAX_PASSIVE, + FW_PORT_MOD_TYPE_TWINAX_ACTIVE, + FW_PORT_MOD_TYPE_LRM, + + FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_MASK +}; + +/* port stats */ +#define FW_NUM_PORT_STATS 50 +#define FW_NUM_PORT_TX_STATS 23 +#define FW_NUM_PORT_RX_STATS 27 + +enum fw_port_stats_tx_index { + FW_STAT_TX_PORT_BYTES_IX, + FW_STAT_TX_PORT_FRAMES_IX, + FW_STAT_TX_PORT_BCAST_IX, + FW_STAT_TX_PORT_MCAST_IX, + FW_STAT_TX_PORT_UCAST_IX, + FW_STAT_TX_PORT_ERROR_IX, + FW_STAT_TX_PORT_64B_IX, + FW_STAT_TX_PORT_65B_127B_IX, + FW_STAT_TX_PORT_128B_255B_IX, + FW_STAT_TX_PORT_256B_511B_IX, + FW_STAT_TX_PORT_512B_1023B_IX, + FW_STAT_TX_PORT_1024B_1518B_IX, + FW_STAT_TX_PORT_1519B_MAX_IX, + FW_STAT_TX_PORT_DROP_IX, + FW_STAT_TX_PORT_PAUSE_IX, + FW_STAT_TX_PORT_PPP0_IX, + FW_STAT_TX_PORT_PPP1_IX, + FW_STAT_TX_PORT_PPP2_IX, + FW_STAT_TX_PORT_PPP3_IX, + FW_STAT_TX_PORT_PPP4_IX, + FW_STAT_TX_PORT_PPP5_IX, + FW_STAT_TX_PORT_PPP6_IX, + FW_STAT_TX_PORT_PPP7_IX +}; + +enum fw_port_stat_rx_index { + FW_STAT_RX_PORT_BYTES_IX, + FW_STAT_RX_PORT_FRAMES_IX, + FW_STAT_RX_PORT_BCAST_IX, + FW_STAT_RX_PORT_MCAST_IX, + FW_STAT_RX_PORT_UCAST_IX, + FW_STAT_RX_PORT_MTU_ERROR_IX, + FW_STAT_RX_PORT_MTU_CRC_ERROR_IX, + FW_STAT_RX_PORT_CRC_ERROR_IX, + FW_STAT_RX_PORT_LEN_ERROR_IX, + FW_STAT_RX_PORT_SYM_ERROR_IX, + FW_STAT_RX_PORT_64B_IX, + FW_STAT_RX_PORT_65B_127B_IX, + FW_STAT_RX_PORT_128B_255B_IX, + FW_STAT_RX_PORT_256B_511B_IX, + FW_STAT_RX_PORT_512B_1023B_IX, + FW_STAT_RX_PORT_1024B_1518B_IX, + FW_STAT_RX_PORT_1519B_MAX_IX, + FW_STAT_RX_PORT_PAUSE_IX, + FW_STAT_RX_PORT_PPP0_IX, + FW_STAT_RX_PORT_PPP1_IX, + FW_STAT_RX_PORT_PPP2_IX, + FW_STAT_RX_PORT_PPP3_IX, + FW_STAT_RX_PORT_PPP4_IX, + FW_STAT_RX_PORT_PPP5_IX, + FW_STAT_RX_PORT_PPP6_IX, + FW_STAT_RX_PORT_PPP7_IX, + FW_STAT_RX_PORT_LESS_64B_IX +}; + +struct fw_port_stats_cmd { + __be32 op_to_portid; + __be32 retval_len16; + union fw_port_stats { + struct fw_port_stats_ctl { + u8 nstats_bg_bm; + u8 tx_ix; + __be16 r6; + __be32 r7; + __be64 stat0; + __be64 stat1; + __be64 stat2; + __be64 stat3; + __be64 stat4; + __be64 stat5; + } ctl; + struct fw_port_stats_all { + __be64 tx_bytes; + __be64 tx_frames; + __be64 tx_bcast; + __be64 tx_mcast; + __be64 tx_ucast; + __be64 tx_error; + __be64 tx_64b; + __be64 tx_65b_127b; + __be64 tx_128b_255b; + __be64 tx_256b_511b; + __be64 tx_512b_1023b; + __be64 tx_1024b_1518b; + __be64 tx_1519b_max; + __be64 tx_drop; + __be64 tx_pause; + __be64 tx_ppp0; + __be64 tx_ppp1; + __be64 tx_ppp2; + __be64 tx_ppp3; + __be64 tx_ppp4; + __be64 tx_ppp5; + __be64 tx_ppp6; + __be64 tx_ppp7; + __be64 rx_bytes; + __be64 rx_frames; + __be64 rx_bcast; + __be64 rx_mcast; + __be64 rx_ucast; + __be64 rx_mtu_error; + __be64 rx_mtu_crc_error; + __be64 rx_crc_error; + __be64 rx_len_error; + __be64 rx_sym_error; + __be64 rx_64b; + __be64 rx_65b_127b; + __be64 rx_128b_255b; + __be64 rx_256b_511b; + __be64 rx_512b_1023b; + __be64 rx_1024b_1518b; + __be64 rx_1519b_max; + __be64 rx_pause; + __be64 rx_ppp0; + __be64 rx_ppp1; + __be64 rx_ppp2; + __be64 rx_ppp3; + __be64 rx_ppp4; + __be64 rx_ppp5; + __be64 rx_ppp6; + __be64 rx_ppp7; + __be64 rx_less_64b; + __be64 rx_bg_drop; + __be64 rx_bg_trunc; + } all; + } u; +}; + +#define FW_PORT_STATS_CMD_NSTATS(x) ((x) << 4) +#define FW_PORT_STATS_CMD_BG_BM(x) ((x) << 0) +#define FW_PORT_STATS_CMD_TX(x) ((x) << 7) +#define FW_PORT_STATS_CMD_IX(x) ((x) << 0) + +/* port loopback stats */ +#define FW_NUM_LB_STATS 16 +enum fw_port_lb_stats_index { + FW_STAT_LB_PORT_BYTES_IX, + FW_STAT_LB_PORT_FRAMES_IX, + FW_STAT_LB_PORT_BCAST_IX, + FW_STAT_LB_PORT_MCAST_IX, + FW_STAT_LB_PORT_UCAST_IX, + FW_STAT_LB_PORT_ERROR_IX, + FW_STAT_LB_PORT_64B_IX, + FW_STAT_LB_PORT_65B_127B_IX, + FW_STAT_LB_PORT_128B_255B_IX, + FW_STAT_LB_PORT_256B_511B_IX, + FW_STAT_LB_PORT_512B_1023B_IX, + FW_STAT_LB_PORT_1024B_1518B_IX, + FW_STAT_LB_PORT_1519B_MAX_IX, + FW_STAT_LB_PORT_DROP_FRAMES_IX +}; + +struct fw_port_lb_stats_cmd { + __be32 op_to_lbport; + __be32 retval_len16; + union fw_port_lb_stats { + struct fw_port_lb_stats_ctl { + u8 nstats_bg_bm; + u8 ix_pkd; + __be16 r6; + __be32 r7; + __be64 stat0; + __be64 stat1; + __be64 stat2; + __be64 stat3; + __be64 stat4; + __be64 stat5; + } ctl; + struct fw_port_lb_stats_all { + __be64 tx_bytes; + __be64 tx_frames; + __be64 tx_bcast; + __be64 tx_mcast; + __be64 tx_ucast; + __be64 tx_error; + __be64 tx_64b; + __be64 tx_65b_127b; + __be64 tx_128b_255b; + __be64 tx_256b_511b; + __be64 tx_512b_1023b; + __be64 tx_1024b_1518b; + __be64 tx_1519b_max; + __be64 rx_lb_drop; + __be64 rx_lb_trunc; + } all; + } u; +}; + +#define FW_PORT_LB_STATS_CMD_LBPORT(x) ((x) << 0) +#define FW_PORT_LB_STATS_CMD_NSTATS(x) ((x) << 4) +#define FW_PORT_LB_STATS_CMD_BG_BM(x) ((x) << 0) +#define FW_PORT_LB_STATS_CMD_IX(x) ((x) << 0) + +struct fw_rss_ind_tbl_cmd { + __be32 op_to_viid; +#define FW_RSS_IND_TBL_CMD_VIID(x) ((x) << 0) + __be32 retval_len16; + __be16 niqid; + __be16 startidx; + __be32 r3; + __be32 iq0_to_iq2; +#define FW_RSS_IND_TBL_CMD_IQ0(x) ((x) << 20) +#define FW_RSS_IND_TBL_CMD_IQ1(x) ((x) << 10) +#define FW_RSS_IND_TBL_CMD_IQ2(x) ((x) << 0) + __be32 iq3_to_iq5; + __be32 iq6_to_iq8; + __be32 iq9_to_iq11; + __be32 iq12_to_iq14; + __be32 iq15_to_iq17; + __be32 iq18_to_iq20; + __be32 iq21_to_iq23; + __be32 iq24_to_iq26; + __be32 iq27_to_iq29; + __be32 iq30_iq31; + __be32 r15_lo; +}; + +struct fw_rss_glb_config_cmd { + __be32 op_to_write; + __be32 retval_len16; + union fw_rss_glb_config { + struct fw_rss_glb_config_manual { + __be32 mode_pkd; + __be32 r3; + __be64 r4; + __be64 r5; + } manual; + struct fw_rss_glb_config_basicvirtual { + __be32 mode_pkd; + __be32 synmapen_to_hashtoeplitz; +#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN (1U << 8) +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 (1U << 7) +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 (1U << 6) +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 (1U << 5) +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 (1U << 4) +#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN (1U << 3) +#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN (1U << 2) +#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP (1U << 1) +#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ (1U << 0) + __be64 r8; + __be64 r9; + } basicvirtual; + } u; +}; + +#define FW_RSS_GLB_CONFIG_CMD_MODE(x) ((x) << 28) +#define FW_RSS_GLB_CONFIG_CMD_MODE_GET(x) (((x) >> 28) & 0xf) + +#define FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL 0 +#define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1 + +struct fw_rss_vi_config_cmd { + __be32 op_to_viid; +#define FW_RSS_VI_CONFIG_CMD_VIID(x) ((x) << 0) + __be32 retval_len16; + union fw_rss_vi_config { + struct fw_rss_vi_config_manual { + __be64 r3; + __be64 r4; + __be64 r5; + } manual; + struct fw_rss_vi_config_basicvirtual { + __be32 r6; + __be32 defaultq_to_udpen; +#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) ((x) << 16) +#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_GET(x) (((x) >> 16) & 0x3ff) +#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN (1U << 4) +#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN (1U << 3) +#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN (1U << 2) +#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN (1U << 1) +#define FW_RSS_VI_CONFIG_CMD_UDPEN (1U << 0) + __be64 r9; + __be64 r10; + } basicvirtual; + } u; +}; + +enum fw_error_type { + FW_ERROR_TYPE_EXCEPTION = 0x0, + FW_ERROR_TYPE_HWMODULE = 0x1, + FW_ERROR_TYPE_WR = 0x2, + FW_ERROR_TYPE_ACL = 0x3, +}; + +struct fw_error_cmd { + __be32 op_to_type; + __be32 len16_pkd; + union fw_error { + struct fw_error_exception { + __be32 info[6]; + } exception; + struct fw_error_hwmodule { + __be32 regaddr; + __be32 regval; + } hwmodule; + struct fw_error_wr { + __be16 cidx; + __be16 pfn_vfn; + __be32 eqid; + u8 wrhdr[16]; + } wr; + struct fw_error_acl { + __be16 cidx; + __be16 pfn_vfn; + __be32 eqid; + __be16 mv_pkd; + u8 val[6]; + __be64 r4; + } acl; + } u; +}; + +struct fw_debug_cmd { + __be32 op_type; +#define FW_DEBUG_CMD_TYPE_GET(x) ((x) & 0xff) + __be32 len16_pkd; + union fw_debug { + struct fw_debug_assert { + __be32 fcid; + __be32 line; + __be32 x; + __be32 y; + u8 filename_0_7[8]; + u8 filename_8_15[8]; + __be64 r3; + } assert; + struct fw_debug_prt { + __be16 dprtstridx; + __be16 r3[3]; + __be32 dprtstrparam0; + __be32 dprtstrparam1; + __be32 dprtstrparam2; + __be32 dprtstrparam3; + } prt; + } u; +}; + +struct fw_hdr { + u8 ver; + u8 reserved1; + __be16 len512; /* bin length in units of 512-bytes */ + __be32 fw_ver; /* firmware version */ + __be32 tp_microcode_ver; + u8 intfver_nic; + u8 intfver_vnic; + u8 intfver_ofld; + u8 intfver_ri; + u8 intfver_iscsipdu; + u8 intfver_iscsi; + u8 intfver_fcoe; + u8 reserved2; + __be32 reserved3[27]; +}; + +#define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff) +#define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff) +#define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff) +#define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff) +#endif /* _T4FW_INTERFACE_H_ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/Makefile b/drivers/net/ethernet/chelsio/cxgb4vf/Makefile new file mode 100644 index 0000000..d72ee26 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4vf/Makefile @@ -0,0 +1,7 @@ +# +# Chelsio T4 SR-IOV Virtual Function Driver +# + +obj-$(CONFIG_CHELSIO_T4VF) += cxgb4vf.o + +cxgb4vf-objs := cxgb4vf_main.o t4vf_hw.o sge.o diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h new file mode 100644 index 0000000..594334d --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h @@ -0,0 +1,534 @@ +/* + * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet + * driver for Linux. + * + * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * This file should not be included directly. Include t4vf_common.h instead. + */ + +#ifndef __CXGB4VF_ADAPTER_H__ +#define __CXGB4VF_ADAPTER_H__ + +#include +#include +#include +#include +#include +#include + +#include "../cxgb4/t4_hw.h" + +/* + * Constants of the implementation. + */ +enum { + MAX_NPORTS = 1, /* max # of "ports" */ + MAX_PORT_QSETS = 8, /* max # of Queue Sets / "port" */ + MAX_ETH_QSETS = MAX_NPORTS*MAX_PORT_QSETS, + + /* + * MSI-X interrupt index usage. + */ + MSIX_FW = 0, /* MSI-X index for firmware Q */ + MSIX_IQFLINT = 1, /* MSI-X index base for Ingress Qs */ + MSIX_EXTRAS = 1, + MSIX_ENTRIES = MAX_ETH_QSETS + MSIX_EXTRAS, + + /* + * The maximum number of Ingress and Egress Queues is determined by + * the maximum number of "Queue Sets" which we support plus any + * ancillary queues. Each "Queue Set" requires one Ingress Queue + * for RX Packet Ingress Event notifications and two Egress Queues for + * a Free List and an Ethernet TX list. + */ + INGQ_EXTRAS = 2, /* firmware event queue and */ + /* forwarded interrupts */ + MAX_INGQ = MAX_ETH_QSETS+INGQ_EXTRAS, + MAX_EGRQ = MAX_ETH_QSETS*2, +}; + +/* + * Forward structure definition references. + */ +struct adapter; +struct sge_eth_rxq; +struct sge_rspq; + +/* + * Per-"port" information. This is really per-Virtual Interface information + * but the use of the "port" nomanclature makes it easier to go back and forth + * between the PF and VF drivers ... + */ +struct port_info { + struct adapter *adapter; /* our adapter */ + u16 viid; /* virtual interface ID */ + s16 xact_addr_filt; /* index of our MAC address filter */ + u16 rss_size; /* size of VI's RSS table slice */ + u8 pidx; /* index into adapter port[] */ + u8 port_id; /* physical port ID */ + u8 nqsets; /* # of "Queue Sets" */ + u8 first_qset; /* index of first "Queue Set" */ + struct link_config link_cfg; /* physical port configuration */ +}; + +/* + * Scatter Gather Engine resources for the "adapter". Our ingress and egress + * queues are organized into "Queue Sets" with one ingress and one egress + * queue per Queue Set. These Queue Sets are aportionable between the "ports" + * (Virtual Interfaces). One extra ingress queue is used to receive + * asynchronous messages from the firmware. Note that the "Queue IDs" that we + * use here are really "Relative Queue IDs" which are returned as part of the + * firmware command to allocate queues. These queue IDs are relative to the + * absolute Queue ID base of the section of the Queue ID space allocated to + * the PF/VF. + */ + +/* + * SGE free-list queue state. + */ +struct rx_sw_desc; +struct sge_fl { + unsigned int avail; /* # of available RX buffers */ + unsigned int pend_cred; /* new buffers since last FL DB ring */ + unsigned int cidx; /* consumer index */ + unsigned int pidx; /* producer index */ + unsigned long alloc_failed; /* # of buffer allocation failures */ + unsigned long large_alloc_failed; + unsigned long starving; /* # of times FL was found starving */ + + /* + * Write-once/infrequently fields. + * ------------------------------- + */ + + unsigned int cntxt_id; /* SGE relative QID for the free list */ + unsigned int abs_id; /* SGE absolute QID for the free list */ + unsigned int size; /* capacity of free list */ + struct rx_sw_desc *sdesc; /* address of SW RX descriptor ring */ + __be64 *desc; /* address of HW RX descriptor ring */ + dma_addr_t addr; /* PCI bus address of hardware ring */ +}; + +/* + * An ingress packet gather list. + */ +struct pkt_gl { + skb_frag_t frags[MAX_SKB_FRAGS]; + void *va; /* virtual address of first byte */ + unsigned int nfrags; /* # of fragments */ + unsigned int tot_len; /* total length of fragments */ +}; + +typedef int (*rspq_handler_t)(struct sge_rspq *, const __be64 *, + const struct pkt_gl *); + +/* + * State for an SGE Response Queue. + */ +struct sge_rspq { + struct napi_struct napi; /* NAPI scheduling control */ + const __be64 *cur_desc; /* current descriptor in queue */ + unsigned int cidx; /* consumer index */ + u8 gen; /* current generation bit */ + u8 next_intr_params; /* holdoff params for next interrupt */ + int offset; /* offset into current FL buffer */ + + unsigned int unhandled_irqs; /* bogus interrupts */ + + /* + * Write-once/infrequently fields. + * ------------------------------- + */ + + u8 intr_params; /* interrupt holdoff parameters */ + u8 pktcnt_idx; /* interrupt packet threshold */ + u8 idx; /* queue index within its group */ + u16 cntxt_id; /* SGE rel QID for the response Q */ + u16 abs_id; /* SGE abs QID for the response Q */ + __be64 *desc; /* address of hardware response ring */ + dma_addr_t phys_addr; /* PCI bus address of ring */ + unsigned int iqe_len; /* entry size */ + unsigned int size; /* capcity of response Q */ + struct adapter *adapter; /* our adapter */ + struct net_device *netdev; /* associated net device */ + rspq_handler_t handler; /* the handler for this response Q */ +}; + +/* + * Ethernet queue statistics + */ +struct sge_eth_stats { + unsigned long pkts; /* # of ethernet packets */ + unsigned long lro_pkts; /* # of LRO super packets */ + unsigned long lro_merged; /* # of wire packets merged by LRO */ + unsigned long rx_cso; /* # of Rx checksum offloads */ + unsigned long vlan_ex; /* # of Rx VLAN extractions */ + unsigned long rx_drops; /* # of packets dropped due to no mem */ +}; + +/* + * State for an Ethernet Receive Queue. + */ +struct sge_eth_rxq { + struct sge_rspq rspq; /* Response Queue */ + struct sge_fl fl; /* Free List */ + struct sge_eth_stats stats; /* receive statistics */ +}; + +/* + * SGE Transmit Queue state. This contains all of the resources associated + * with the hardware status of a TX Queue which is a circular ring of hardware + * TX Descriptors. For convenience, it also contains a pointer to a parallel + * "Software Descriptor" array but we don't know anything about it here other + * than its type name. + */ +struct tx_desc { + /* + * Egress Queues are measured in units of SGE_EQ_IDXSIZE by the + * hardware: Sizes, Producer and Consumer indices, etc. + */ + __be64 flit[SGE_EQ_IDXSIZE/sizeof(__be64)]; +}; +struct tx_sw_desc; +struct sge_txq { + unsigned int in_use; /* # of in-use TX descriptors */ + unsigned int size; /* # of descriptors */ + unsigned int cidx; /* SW consumer index */ + unsigned int pidx; /* producer index */ + unsigned long stops; /* # of times queue has been stopped */ + unsigned long restarts; /* # of queue restarts */ + + /* + * Write-once/infrequently fields. + * ------------------------------- + */ + + unsigned int cntxt_id; /* SGE relative QID for the TX Q */ + unsigned int abs_id; /* SGE absolute QID for the TX Q */ + struct tx_desc *desc; /* address of HW TX descriptor ring */ + struct tx_sw_desc *sdesc; /* address of SW TX descriptor ring */ + struct sge_qstat *stat; /* queue status entry */ + dma_addr_t phys_addr; /* PCI bus address of hardware ring */ +}; + +/* + * State for an Ethernet Transmit Queue. + */ +struct sge_eth_txq { + struct sge_txq q; /* SGE TX Queue */ + struct netdev_queue *txq; /* associated netdev TX queue */ + unsigned long tso; /* # of TSO requests */ + unsigned long tx_cso; /* # of TX checksum offloads */ + unsigned long vlan_ins; /* # of TX VLAN insertions */ + unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ +}; + +/* + * The complete set of Scatter/Gather Engine resources. + */ +struct sge { + /* + * Our "Queue Sets" ... + */ + struct sge_eth_txq ethtxq[MAX_ETH_QSETS]; + struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; + + /* + * Extra ingress queues for asynchronous firmware events and + * forwarded interrupts (when in MSI mode). + */ + struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; + + struct sge_rspq intrq ____cacheline_aligned_in_smp; + spinlock_t intrq_lock; + + /* + * State for managing "starving Free Lists" -- Free Lists which have + * fallen below a certain threshold of buffers available to the + * hardware and attempts to refill them up to that threshold have + * failed. We have a regular "slow tick" timer process which will + * make periodic attempts to refill these starving Free Lists ... + */ + DECLARE_BITMAP(starving_fl, MAX_EGRQ); + struct timer_list rx_timer; + + /* + * State for cleaning up completed TX descriptors. + */ + struct timer_list tx_timer; + + /* + * Write-once/infrequently fields. + * ------------------------------- + */ + + u16 max_ethqsets; /* # of available Ethernet queue sets */ + u16 ethqsets; /* # of active Ethernet queue sets */ + u16 ethtxq_rover; /* Tx queue to clean up next */ + u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */ + u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */ + + /* + * Reverse maps from Absolute Queue IDs to associated queue pointers. + * The absolute Queue IDs are in a compact range which start at a + * [potentially large] Base Queue ID. We perform the reverse map by + * first converting the Absolute Queue ID into a Relative Queue ID by + * subtracting off the Base Queue ID and then use a Relative Queue ID + * indexed table to get the pointer to the corresponding software + * queue structure. + */ + unsigned int egr_base; + unsigned int ingr_base; + void *egr_map[MAX_EGRQ]; + struct sge_rspq *ingr_map[MAX_INGQ]; +}; + +/* + * Utility macros to convert Absolute- to Relative-Queue indices and Egress- + * and Ingress-Queues. The EQ_MAP() and IQ_MAP() macros which provide + * pointers to Ingress- and Egress-Queues can be used as both L- and R-values + */ +#define EQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->egr_base)) +#define IQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->ingr_base)) + +#define EQ_MAP(s, abs_id) ((s)->egr_map[EQ_IDX(s, abs_id)]) +#define IQ_MAP(s, abs_id) ((s)->ingr_map[IQ_IDX(s, abs_id)]) + +/* + * Macro to iterate across Queue Sets ("rxq" is a historic misnomer). + */ +#define for_each_ethrxq(sge, iter) \ + for (iter = 0; iter < (sge)->ethqsets; iter++) + +/* + * Per-"adapter" (Virtual Function) information. + */ +struct adapter { + /* PCI resources */ + void __iomem *regs; + struct pci_dev *pdev; + struct device *pdev_dev; + + /* "adapter" resources */ + unsigned long registered_device_map; + unsigned long open_device_map; + unsigned long flags; + struct adapter_params params; + + /* queue and interrupt resources */ + struct { + unsigned short vec; + char desc[22]; + } msix_info[MSIX_ENTRIES]; + struct sge sge; + + /* Linux network device resources */ + struct net_device *port[MAX_NPORTS]; + const char *name; + unsigned int msg_enable; + + /* debugfs resources */ + struct dentry *debugfs_root; + + /* various locks */ + spinlock_t stats_lock; +}; + +enum { /* adapter flags */ + FULL_INIT_DONE = (1UL << 0), + USING_MSI = (1UL << 1), + USING_MSIX = (1UL << 2), + QUEUES_BOUND = (1UL << 3), +}; + +/* + * The following register read/write routine definitions are required by + * the common code. + */ + +/** + * t4_read_reg - read a HW register + * @adapter: the adapter + * @reg_addr: the register address + * + * Returns the 32-bit value of the given HW register. + */ +static inline u32 t4_read_reg(struct adapter *adapter, u32 reg_addr) +{ + return readl(adapter->regs + reg_addr); +} + +/** + * t4_write_reg - write a HW register + * @adapter: the adapter + * @reg_addr: the register address + * @val: the value to write + * + * Write a 32-bit value into the given HW register. + */ +static inline void t4_write_reg(struct adapter *adapter, u32 reg_addr, u32 val) +{ + writel(val, adapter->regs + reg_addr); +} + +#ifndef readq +static inline u64 readq(const volatile void __iomem *addr) +{ + return readl(addr) + ((u64)readl(addr + 4) << 32); +} + +static inline void writeq(u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr + 4); +} +#endif + +/** + * t4_read_reg64 - read a 64-bit HW register + * @adapter: the adapter + * @reg_addr: the register address + * + * Returns the 64-bit value of the given HW register. + */ +static inline u64 t4_read_reg64(struct adapter *adapter, u32 reg_addr) +{ + return readq(adapter->regs + reg_addr); +} + +/** + * t4_write_reg64 - write a 64-bit HW register + * @adapter: the adapter + * @reg_addr: the register address + * @val: the value to write + * + * Write a 64-bit value into the given HW register. + */ +static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr, + u64 val) +{ + writeq(val, adapter->regs + reg_addr); +} + +/** + * port_name - return the string name of a port + * @adapter: the adapter + * @pidx: the port index + * + * Return the string name of the selected port. + */ +static inline const char *port_name(struct adapter *adapter, int pidx) +{ + return adapter->port[pidx]->name; +} + +/** + * t4_os_set_hw_addr - store a port's MAC address in SW + * @adapter: the adapter + * @pidx: the port index + * @hw_addr: the Ethernet address + * + * Store the Ethernet address of the given port in SW. Called by the common + * code when it retrieves a port's Ethernet address from EEPROM. + */ +static inline void t4_os_set_hw_addr(struct adapter *adapter, int pidx, + u8 hw_addr[]) +{ + memcpy(adapter->port[pidx]->dev_addr, hw_addr, ETH_ALEN); + memcpy(adapter->port[pidx]->perm_addr, hw_addr, ETH_ALEN); +} + +/** + * netdev2pinfo - return the port_info structure associated with a net_device + * @dev: the netdev + * + * Return the struct port_info associated with a net_device + */ +static inline struct port_info *netdev2pinfo(const struct net_device *dev) +{ + return netdev_priv(dev); +} + +/** + * adap2pinfo - return the port_info of a port + * @adap: the adapter + * @pidx: the port index + * + * Return the port_info structure for the adapter. + */ +static inline struct port_info *adap2pinfo(struct adapter *adapter, int pidx) +{ + return netdev_priv(adapter->port[pidx]); +} + +/** + * netdev2adap - return the adapter structure associated with a net_device + * @dev: the netdev + * + * Return the struct adapter associated with a net_device + */ +static inline struct adapter *netdev2adap(const struct net_device *dev) +{ + return netdev2pinfo(dev)->adapter; +} + +/* + * OS "Callback" function declarations. These are functions that the OS code + * is "contracted" to provide for the common code. + */ +void t4vf_os_link_changed(struct adapter *, int, int); + +/* + * SGE function prototype declarations. + */ +int t4vf_sge_alloc_rxq(struct adapter *, struct sge_rspq *, bool, + struct net_device *, int, + struct sge_fl *, rspq_handler_t); +int t4vf_sge_alloc_eth_txq(struct adapter *, struct sge_eth_txq *, + struct net_device *, struct netdev_queue *, + unsigned int); +void t4vf_free_sge_resources(struct adapter *); + +int t4vf_eth_xmit(struct sk_buff *, struct net_device *); +int t4vf_ethrx_handler(struct sge_rspq *, const __be64 *, + const struct pkt_gl *); + +irq_handler_t t4vf_intr_handler(struct adapter *); +irqreturn_t t4vf_sge_intr_msix(int, void *); + +int t4vf_sge_init(struct adapter *); +void t4vf_sge_start(struct adapter *); +void t4vf_sge_stop(struct adapter *); + +#endif /* __CXGB4VF_ADAPTER_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c new file mode 100644 index 0000000..ec79913 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -0,0 +1,2947 @@ +/* + * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet + * driver for Linux. + * + * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t4vf_common.h" +#include "t4vf_defs.h" + +#include "../cxgb4/t4_regs.h" +#include "../cxgb4/t4_msg.h" + +/* + * Generic information about the driver. + */ +#define DRV_VERSION "1.0.0" +#define DRV_DESC "Chelsio T4 Virtual Function (VF) Network Driver" + +/* + * Module Parameters. + * ================== + */ + +/* + * Default ethtool "message level" for adapters. + */ +#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ + NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) + +static int dflt_msg_enable = DFLT_MSG_ENABLE; + +module_param(dflt_msg_enable, int, 0644); +MODULE_PARM_DESC(dflt_msg_enable, + "default adapter ethtool message level bitmap"); + +/* + * The driver uses the best interrupt scheme available on a platform in the + * order MSI-X then MSI. This parameter determines which of these schemes the + * driver may consider as follows: + * + * msi = 2: choose from among MSI-X and MSI + * msi = 1: only consider MSI interrupts + * + * Note that unlike the Physical Function driver, this Virtual Function driver + * does _not_ support legacy INTx interrupts (this limitation is mandated by + * the PCI-E SR-IOV standard). + */ +#define MSI_MSIX 2 +#define MSI_MSI 1 +#define MSI_DEFAULT MSI_MSIX + +static int msi = MSI_DEFAULT; + +module_param(msi, int, 0644); +MODULE_PARM_DESC(msi, "whether to use MSI-X or MSI"); + +/* + * Fundamental constants. + * ====================== + */ + +enum { + MAX_TXQ_ENTRIES = 16384, + MAX_RSPQ_ENTRIES = 16384, + MAX_RX_BUFFERS = 16384, + + MIN_TXQ_ENTRIES = 32, + MIN_RSPQ_ENTRIES = 128, + MIN_FL_ENTRIES = 16, + + /* + * For purposes of manipulating the Free List size we need to + * recognize that Free Lists are actually Egress Queues (the host + * produces free buffers which the hardware consumes), Egress Queues + * indices are all in units of Egress Context Units bytes, and free + * list entries are 64-bit PCI DMA addresses. And since the state of + * the Producer Index == the Consumer Index implies an EMPTY list, we + * always have at least one Egress Unit's worth of Free List entries + * unused. See sge.c for more details ... + */ + EQ_UNIT = SGE_EQ_IDXSIZE, + FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), + MIN_FL_RESID = FL_PER_EQ_UNIT, +}; + +/* + * Global driver state. + * ==================== + */ + +static struct dentry *cxgb4vf_debugfs_root; + +/* + * OS "Callback" functions. + * ======================== + */ + +/* + * The link status has changed on the indicated "port" (Virtual Interface). + */ +void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok) +{ + struct net_device *dev = adapter->port[pidx]; + + /* + * If the port is disabled or the current recorded "link up" + * status matches the new status, just return. + */ + if (!netif_running(dev) || link_ok == netif_carrier_ok(dev)) + return; + + /* + * Tell the OS that the link status has changed and print a short + * informative message on the console about the event. + */ + if (link_ok) { + const char *s; + const char *fc; + const struct port_info *pi = netdev_priv(dev); + + netif_carrier_on(dev); + + switch (pi->link_cfg.speed) { + case SPEED_10000: + s = "10Gbps"; + break; + + case SPEED_1000: + s = "1000Mbps"; + break; + + case SPEED_100: + s = "100Mbps"; + break; + + default: + s = "unknown"; + break; + } + + switch (pi->link_cfg.fc) { + case PAUSE_RX: + fc = "RX"; + break; + + case PAUSE_TX: + fc = "TX"; + break; + + case PAUSE_RX|PAUSE_TX: + fc = "RX/TX"; + break; + + default: + fc = "no"; + break; + } + + printk(KERN_INFO "%s: link up, %s, full-duplex, %s PAUSE\n", + dev->name, s, fc); + } else { + netif_carrier_off(dev); + printk(KERN_INFO "%s: link down\n", dev->name); + } +} + +/* + * Net device operations. + * ====================== + */ + + + + +/* + * Perform the MAC and PHY actions needed to enable a "port" (Virtual + * Interface). + */ +static int link_start(struct net_device *dev) +{ + int ret; + struct port_info *pi = netdev_priv(dev); + + /* + * We do not set address filters and promiscuity here, the stack does + * that step explicitly. Enable vlan accel. + */ + ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, 1, + true); + if (ret == 0) { + ret = t4vf_change_mac(pi->adapter, pi->viid, + pi->xact_addr_filt, dev->dev_addr, true); + if (ret >= 0) { + pi->xact_addr_filt = ret; + ret = 0; + } + } + + /* + * We don't need to actually "start the link" itself since the + * firmware will do that for us when the first Virtual Interface + * is enabled on a port. + */ + if (ret == 0) + ret = t4vf_enable_vi(pi->adapter, pi->viid, true, true); + return ret; +} + +/* + * Name the MSI-X interrupts. + */ +static void name_msix_vecs(struct adapter *adapter) +{ + int namelen = sizeof(adapter->msix_info[0].desc) - 1; + int pidx; + + /* + * Firmware events. + */ + snprintf(adapter->msix_info[MSIX_FW].desc, namelen, + "%s-FWeventq", adapter->name); + adapter->msix_info[MSIX_FW].desc[namelen] = 0; + + /* + * Ethernet queues. + */ + for_each_port(adapter, pidx) { + struct net_device *dev = adapter->port[pidx]; + const struct port_info *pi = netdev_priv(dev); + int qs, msi; + + for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) { + snprintf(adapter->msix_info[msi].desc, namelen, + "%s-%d", dev->name, qs); + adapter->msix_info[msi].desc[namelen] = 0; + } + } +} + +/* + * Request all of our MSI-X resources. + */ +static int request_msix_queue_irqs(struct adapter *adapter) +{ + struct sge *s = &adapter->sge; + int rxq, msi, err; + + /* + * Firmware events. + */ + err = request_irq(adapter->msix_info[MSIX_FW].vec, t4vf_sge_intr_msix, + 0, adapter->msix_info[MSIX_FW].desc, &s->fw_evtq); + if (err) + return err; + + /* + * Ethernet queues. + */ + msi = MSIX_IQFLINT; + for_each_ethrxq(s, rxq) { + err = request_irq(adapter->msix_info[msi].vec, + t4vf_sge_intr_msix, 0, + adapter->msix_info[msi].desc, + &s->ethrxq[rxq].rspq); + if (err) + goto err_free_irqs; + msi++; + } + return 0; + +err_free_irqs: + while (--rxq >= 0) + free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq); + free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq); + return err; +} + +/* + * Free our MSI-X resources. + */ +static void free_msix_queue_irqs(struct adapter *adapter) +{ + struct sge *s = &adapter->sge; + int rxq, msi; + + free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq); + msi = MSIX_IQFLINT; + for_each_ethrxq(s, rxq) + free_irq(adapter->msix_info[msi++].vec, + &s->ethrxq[rxq].rspq); +} + +/* + * Turn on NAPI and start up interrupts on a response queue. + */ +static void qenable(struct sge_rspq *rspq) +{ + napi_enable(&rspq->napi); + + /* + * 0-increment the Going To Sleep register to start the timer and + * enable interrupts. + */ + t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, + CIDXINC(0) | + SEINTARM(rspq->intr_params) | + INGRESSQID(rspq->cntxt_id)); +} + +/* + * Enable NAPI scheduling and interrupt generation for all Receive Queues. + */ +static void enable_rx(struct adapter *adapter) +{ + int rxq; + struct sge *s = &adapter->sge; + + for_each_ethrxq(s, rxq) + qenable(&s->ethrxq[rxq].rspq); + qenable(&s->fw_evtq); + + /* + * The interrupt queue doesn't use NAPI so we do the 0-increment of + * its Going To Sleep register here to get it started. + */ + if (adapter->flags & USING_MSI) + t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, + CIDXINC(0) | + SEINTARM(s->intrq.intr_params) | + INGRESSQID(s->intrq.cntxt_id)); + +} + +/* + * Wait until all NAPI handlers are descheduled. + */ +static void quiesce_rx(struct adapter *adapter) +{ + struct sge *s = &adapter->sge; + int rxq; + + for_each_ethrxq(s, rxq) + napi_disable(&s->ethrxq[rxq].rspq.napi); + napi_disable(&s->fw_evtq.napi); +} + +/* + * Response queue handler for the firmware event queue. + */ +static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp, + const struct pkt_gl *gl) +{ + /* + * Extract response opcode and get pointer to CPL message body. + */ + struct adapter *adapter = rspq->adapter; + u8 opcode = ((const struct rss_header *)rsp)->opcode; + void *cpl = (void *)(rsp + 1); + + switch (opcode) { + case CPL_FW6_MSG: { + /* + * We've received an asynchronous message from the firmware. + */ + const struct cpl_fw6_msg *fw_msg = cpl; + if (fw_msg->type == FW6_TYPE_CMD_RPL) + t4vf_handle_fw_rpl(adapter, fw_msg->data); + break; + } + + case CPL_SGE_EGR_UPDATE: { + /* + * We've received an Egress Queue Status Update message. We + * get these, if the SGE is configured to send these when the + * firmware passes certain points in processing our TX + * Ethernet Queue or if we make an explicit request for one. + * We use these updates to determine when we may need to + * restart a TX Ethernet Queue which was stopped for lack of + * free TX Queue Descriptors ... + */ + const struct cpl_sge_egr_update *p = (void *)cpl; + unsigned int qid = EGR_QID(be32_to_cpu(p->opcode_qid)); + struct sge *s = &adapter->sge; + struct sge_txq *tq; + struct sge_eth_txq *txq; + unsigned int eq_idx; + + /* + * Perform sanity checking on the Queue ID to make sure it + * really refers to one of our TX Ethernet Egress Queues which + * is active and matches the queue's ID. None of these error + * conditions should ever happen so we may want to either make + * them fatal and/or conditionalized under DEBUG. + */ + eq_idx = EQ_IDX(s, qid); + if (unlikely(eq_idx >= MAX_EGRQ)) { + dev_err(adapter->pdev_dev, + "Egress Update QID %d out of range\n", qid); + break; + } + tq = s->egr_map[eq_idx]; + if (unlikely(tq == NULL)) { + dev_err(adapter->pdev_dev, + "Egress Update QID %d TXQ=NULL\n", qid); + break; + } + txq = container_of(tq, struct sge_eth_txq, q); + if (unlikely(tq->abs_id != qid)) { + dev_err(adapter->pdev_dev, + "Egress Update QID %d refers to TXQ %d\n", + qid, tq->abs_id); + break; + } + + /* + * Restart a stopped TX Queue which has less than half of its + * TX ring in use ... + */ + txq->q.restarts++; + netif_tx_wake_queue(txq->txq); + break; + } + + default: + dev_err(adapter->pdev_dev, + "unexpected CPL %#x on FW event queue\n", opcode); + } + + return 0; +} + +/* + * Allocate SGE TX/RX response queues. Determine how many sets of SGE queues + * to use and initializes them. We support multiple "Queue Sets" per port if + * we have MSI-X, otherwise just one queue set per port. + */ +static int setup_sge_queues(struct adapter *adapter) +{ + struct sge *s = &adapter->sge; + int err, pidx, msix; + + /* + * Clear "Queue Set" Free List Starving and TX Queue Mapping Error + * state. + */ + bitmap_zero(s->starving_fl, MAX_EGRQ); + + /* + * If we're using MSI interrupt mode we need to set up a "forwarded + * interrupt" queue which we'll set up with our MSI vector. The rest + * of the ingress queues will be set up to forward their interrupts to + * this queue ... This must be first since t4vf_sge_alloc_rxq() uses + * the intrq's queue ID as the interrupt forwarding queue for the + * subsequent calls ... + */ + if (adapter->flags & USING_MSI) { + err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false, + adapter->port[0], 0, NULL, NULL); + if (err) + goto err_free_queues; + } + + /* + * Allocate our ingress queue for asynchronous firmware messages. + */ + err = t4vf_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->port[0], + MSIX_FW, NULL, fwevtq_handler); + if (err) + goto err_free_queues; + + /* + * Allocate each "port"'s initial Queue Sets. These can be changed + * later on ... up to the point where any interface on the adapter is + * brought up at which point lots of things get nailed down + * permanently ... + */ + msix = MSIX_IQFLINT; + for_each_port(adapter, pidx) { + struct net_device *dev = adapter->port[pidx]; + struct port_info *pi = netdev_priv(dev); + struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset]; + struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset]; + int qs; + + for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { + err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false, + dev, msix++, + &rxq->fl, t4vf_ethrx_handler); + if (err) + goto err_free_queues; + + err = t4vf_sge_alloc_eth_txq(adapter, txq, dev, + netdev_get_tx_queue(dev, qs), + s->fw_evtq.cntxt_id); + if (err) + goto err_free_queues; + + rxq->rspq.idx = qs; + memset(&rxq->stats, 0, sizeof(rxq->stats)); + } + } + + /* + * Create the reverse mappings for the queues. + */ + s->egr_base = s->ethtxq[0].q.abs_id - s->ethtxq[0].q.cntxt_id; + s->ingr_base = s->ethrxq[0].rspq.abs_id - s->ethrxq[0].rspq.cntxt_id; + IQ_MAP(s, s->fw_evtq.abs_id) = &s->fw_evtq; + for_each_port(adapter, pidx) { + struct net_device *dev = adapter->port[pidx]; + struct port_info *pi = netdev_priv(dev); + struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset]; + struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset]; + int qs; + + for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { + IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq; + EQ_MAP(s, txq->q.abs_id) = &txq->q; + + /* + * The FW_IQ_CMD doesn't return the Absolute Queue IDs + * for Free Lists but since all of the Egress Queues + * (including Free Lists) have Relative Queue IDs + * which are computed as Absolute - Base Queue ID, we + * can synthesize the Absolute Queue IDs for the Free + * Lists. This is useful for debugging purposes when + * we want to dump Queue Contexts via the PF Driver. + */ + rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base; + EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl; + } + } + return 0; + +err_free_queues: + t4vf_free_sge_resources(adapter); + return err; +} + +/* + * Set up Receive Side Scaling (RSS) to distribute packets to multiple receive + * queues. We configure the RSS CPU lookup table to distribute to the number + * of HW receive queues, and the response queue lookup table to narrow that + * down to the response queues actually configured for each "port" (Virtual + * Interface). We always configure the RSS mapping for all ports since the + * mapping table has plenty of entries. + */ +static int setup_rss(struct adapter *adapter) +{ + int pidx; + + for_each_port(adapter, pidx) { + struct port_info *pi = adap2pinfo(adapter, pidx); + struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset]; + u16 rss[MAX_PORT_QSETS]; + int qs, err; + + for (qs = 0; qs < pi->nqsets; qs++) + rss[qs] = rxq[qs].rspq.abs_id; + + err = t4vf_config_rss_range(adapter, pi->viid, + 0, pi->rss_size, rss, pi->nqsets); + if (err) + return err; + + /* + * Perform Global RSS Mode-specific initialization. + */ + switch (adapter->params.rss.mode) { + case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: + /* + * If Tunnel All Lookup isn't specified in the global + * RSS Configuration, then we need to specify a + * default Ingress Queue for any ingress packets which + * aren't hashed. We'll use our first ingress queue + * ... + */ + if (!adapter->params.rss.u.basicvirtual.tnlalllookup) { + union rss_vi_config config; + err = t4vf_read_rss_vi_config(adapter, + pi->viid, + &config); + if (err) + return err; + config.basicvirtual.defaultq = + rxq[0].rspq.abs_id; + err = t4vf_write_rss_vi_config(adapter, + pi->viid, + &config); + if (err) + return err; + } + break; + } + } + + return 0; +} + +/* + * Bring the adapter up. Called whenever we go from no "ports" open to having + * one open. This function performs the actions necessary to make an adapter + * operational, such as completing the initialization of HW modules, and + * enabling interrupts. Must be called with the rtnl lock held. (Note that + * this is called "cxgb_up" in the PF Driver.) + */ +static int adapter_up(struct adapter *adapter) +{ + int err; + + /* + * If this is the first time we've been called, perform basic + * adapter setup. Once we've done this, many of our adapter + * parameters can no longer be changed ... + */ + if ((adapter->flags & FULL_INIT_DONE) == 0) { + err = setup_sge_queues(adapter); + if (err) + return err; + err = setup_rss(adapter); + if (err) { + t4vf_free_sge_resources(adapter); + return err; + } + + if (adapter->flags & USING_MSIX) + name_msix_vecs(adapter); + adapter->flags |= FULL_INIT_DONE; + } + + /* + * Acquire our interrupt resources. We only support MSI-X and MSI. + */ + BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0); + if (adapter->flags & USING_MSIX) + err = request_msix_queue_irqs(adapter); + else + err = request_irq(adapter->pdev->irq, + t4vf_intr_handler(adapter), 0, + adapter->name, adapter); + if (err) { + dev_err(adapter->pdev_dev, "request_irq failed, err %d\n", + err); + return err; + } + + /* + * Enable NAPI ingress processing and return success. + */ + enable_rx(adapter); + t4vf_sge_start(adapter); + return 0; +} + +/* + * Bring the adapter down. Called whenever the last "port" (Virtual + * Interface) closed. (Note that this routine is called "cxgb_down" in the PF + * Driver.) + */ +static void adapter_down(struct adapter *adapter) +{ + /* + * Free interrupt resources. + */ + if (adapter->flags & USING_MSIX) + free_msix_queue_irqs(adapter); + else + free_irq(adapter->pdev->irq, adapter); + + /* + * Wait for NAPI handlers to finish. + */ + quiesce_rx(adapter); +} + +/* + * Start up a net device. + */ +static int cxgb4vf_open(struct net_device *dev) +{ + int err; + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + /* + * If this is the first interface that we're opening on the "adapter", + * bring the "adapter" up now. + */ + if (adapter->open_device_map == 0) { + err = adapter_up(adapter); + if (err) + return err; + } + + /* + * Note that this interface is up and start everything up ... + */ + netif_set_real_num_tx_queues(dev, pi->nqsets); + err = netif_set_real_num_rx_queues(dev, pi->nqsets); + if (err) + goto err_unwind; + err = link_start(dev); + if (err) + goto err_unwind; + + netif_tx_start_all_queues(dev); + set_bit(pi->port_id, &adapter->open_device_map); + return 0; + +err_unwind: + if (adapter->open_device_map == 0) + adapter_down(adapter); + return err; +} + +/* + * Shut down a net device. This routine is called "cxgb_close" in the PF + * Driver ... + */ +static int cxgb4vf_stop(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + netif_tx_stop_all_queues(dev); + netif_carrier_off(dev); + t4vf_enable_vi(adapter, pi->viid, false, false); + pi->link_cfg.link_ok = 0; + + clear_bit(pi->port_id, &adapter->open_device_map); + if (adapter->open_device_map == 0) + adapter_down(adapter); + return 0; +} + +/* + * Translate our basic statistics into the standard "ifconfig" statistics. + */ +static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev) +{ + struct t4vf_port_stats stats; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adapter = pi->adapter; + struct net_device_stats *ns = &dev->stats; + int err; + + spin_lock(&adapter->stats_lock); + err = t4vf_get_port_stats(adapter, pi->pidx, &stats); + spin_unlock(&adapter->stats_lock); + + memset(ns, 0, sizeof(*ns)); + if (err) + return ns; + + ns->tx_bytes = (stats.tx_bcast_bytes + stats.tx_mcast_bytes + + stats.tx_ucast_bytes + stats.tx_offload_bytes); + ns->tx_packets = (stats.tx_bcast_frames + stats.tx_mcast_frames + + stats.tx_ucast_frames + stats.tx_offload_frames); + ns->rx_bytes = (stats.rx_bcast_bytes + stats.rx_mcast_bytes + + stats.rx_ucast_bytes); + ns->rx_packets = (stats.rx_bcast_frames + stats.rx_mcast_frames + + stats.rx_ucast_frames); + ns->multicast = stats.rx_mcast_frames; + ns->tx_errors = stats.tx_drop_frames; + ns->rx_errors = stats.rx_err_frames; + + return ns; +} + +/* + * Collect up to maxaddrs worth of a netdevice's unicast addresses, starting + * at a specified offset within the list, into an array of addrss pointers and + * return the number collected. + */ +static inline unsigned int collect_netdev_uc_list_addrs(const struct net_device *dev, + const u8 **addr, + unsigned int offset, + unsigned int maxaddrs) +{ + unsigned int index = 0; + unsigned int naddr = 0; + const struct netdev_hw_addr *ha; + + for_each_dev_addr(dev, ha) + if (index++ >= offset) { + addr[naddr++] = ha->addr; + if (naddr >= maxaddrs) + break; + } + return naddr; +} + +/* + * Collect up to maxaddrs worth of a netdevice's multicast addresses, starting + * at a specified offset within the list, into an array of addrss pointers and + * return the number collected. + */ +static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device *dev, + const u8 **addr, + unsigned int offset, + unsigned int maxaddrs) +{ + unsigned int index = 0; + unsigned int naddr = 0; + const struct netdev_hw_addr *ha; + + netdev_for_each_mc_addr(ha, dev) + if (index++ >= offset) { + addr[naddr++] = ha->addr; + if (naddr >= maxaddrs) + break; + } + return naddr; +} + +/* + * Configure the exact and hash address filters to handle a port's multicast + * and secondary unicast MAC addresses. + */ +static int set_addr_filters(const struct net_device *dev, bool sleep) +{ + u64 mhash = 0; + u64 uhash = 0; + bool free = true; + unsigned int offset, naddr; + const u8 *addr[7]; + int ret; + const struct port_info *pi = netdev_priv(dev); + + /* first do the secondary unicast addresses */ + for (offset = 0; ; offset += naddr) { + naddr = collect_netdev_uc_list_addrs(dev, addr, offset, + ARRAY_SIZE(addr)); + if (naddr == 0) + break; + + ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, + naddr, addr, NULL, &uhash, sleep); + if (ret < 0) + return ret; + + free = false; + } + + /* next set up the multicast addresses */ + for (offset = 0; ; offset += naddr) { + naddr = collect_netdev_mc_list_addrs(dev, addr, offset, + ARRAY_SIZE(addr)); + if (naddr == 0) + break; + + ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, + naddr, addr, NULL, &mhash, sleep); + if (ret < 0) + return ret; + free = false; + } + + return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0, + uhash | mhash, sleep); +} + +/* + * Set RX properties of a port, such as promiscruity, address filters, and MTU. + * If @mtu is -1 it is left unchanged. + */ +static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) +{ + int ret; + struct port_info *pi = netdev_priv(dev); + + ret = set_addr_filters(dev, sleep_ok); + if (ret == 0) + ret = t4vf_set_rxmode(pi->adapter, pi->viid, -1, + (dev->flags & IFF_PROMISC) != 0, + (dev->flags & IFF_ALLMULTI) != 0, + 1, -1, sleep_ok); + return ret; +} + +/* + * Set the current receive modes on the device. + */ +static void cxgb4vf_set_rxmode(struct net_device *dev) +{ + /* unfortunately we can't return errors to the stack */ + set_rxmode(dev, -1, false); +} + +/* + * Find the entry in the interrupt holdoff timer value array which comes + * closest to the specified interrupt holdoff value. + */ +static int closest_timer(const struct sge *s, int us) +{ + int i, timer_idx = 0, min_delta = INT_MAX; + + for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { + int delta = us - s->timer_val[i]; + if (delta < 0) + delta = -delta; + if (delta < min_delta) { + min_delta = delta; + timer_idx = i; + } + } + return timer_idx; +} + +static int closest_thres(const struct sge *s, int thres) +{ + int i, delta, pktcnt_idx = 0, min_delta = INT_MAX; + + for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { + delta = thres - s->counter_val[i]; + if (delta < 0) + delta = -delta; + if (delta < min_delta) { + min_delta = delta; + pktcnt_idx = i; + } + } + return pktcnt_idx; +} + +/* + * Return a queue's interrupt hold-off time in us. 0 means no timer. + */ +static unsigned int qtimer_val(const struct adapter *adapter, + const struct sge_rspq *rspq) +{ + unsigned int timer_idx = QINTR_TIMER_IDX_GET(rspq->intr_params); + + return timer_idx < SGE_NTIMERS + ? adapter->sge.timer_val[timer_idx] + : 0; +} + +/** + * set_rxq_intr_params - set a queue's interrupt holdoff parameters + * @adapter: the adapter + * @rspq: the RX response queue + * @us: the hold-off time in us, or 0 to disable timer + * @cnt: the hold-off packet count, or 0 to disable counter + * + * Sets an RX response queue's interrupt hold-off time and packet count. + * At least one of the two needs to be enabled for the queue to generate + * interrupts. + */ +static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq, + unsigned int us, unsigned int cnt) +{ + unsigned int timer_idx; + + /* + * If both the interrupt holdoff timer and count are specified as + * zero, default to a holdoff count of 1 ... + */ + if ((us | cnt) == 0) + cnt = 1; + + /* + * If an interrupt holdoff count has been specified, then find the + * closest configured holdoff count and use that. If the response + * queue has already been created, then update its queue context + * parameters ... + */ + if (cnt) { + int err; + u32 v, pktcnt_idx; + + pktcnt_idx = closest_thres(&adapter->sge, cnt); + if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) { + v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | + FW_PARAMS_PARAM_X( + FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | + FW_PARAMS_PARAM_YZ(rspq->cntxt_id); + err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx); + if (err) + return err; + } + rspq->pktcnt_idx = pktcnt_idx; + } + + /* + * Compute the closest holdoff timer index from the supplied holdoff + * timer value. + */ + timer_idx = (us == 0 + ? SGE_TIMER_RSTRT_CNTR + : closest_timer(&adapter->sge, us)); + + /* + * Update the response queue's interrupt coalescing parameters and + * return success. + */ + rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) | + (cnt > 0 ? QINTR_CNT_EN : 0)); + return 0; +} + +/* + * Return a version number to identify the type of adapter. The scheme is: + * - bits 0..9: chip version + * - bits 10..15: chip revision + */ +static inline unsigned int mk_adap_vers(const struct adapter *adapter) +{ + /* + * Chip version 4, revision 0x3f (cxgb4vf). + */ + return 4 | (0x3f << 10); +} + +/* + * Execute the specified ioctl command. + */ +static int cxgb4vf_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + int ret = 0; + + switch (cmd) { + /* + * The VF Driver doesn't have access to any of the other + * common Ethernet device ioctl()'s (like reading/writing + * PHY registers, etc. + */ + + default: + ret = -EOPNOTSUPP; + break; + } + return ret; +} + +/* + * Change the device's MTU. + */ +static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu) +{ + int ret; + struct port_info *pi = netdev_priv(dev); + + /* accommodate SACK */ + if (new_mtu < 81) + return -EINVAL; + + ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu, + -1, -1, -1, -1, true); + if (!ret) + dev->mtu = new_mtu; + return ret; +} + +static u32 cxgb4vf_fix_features(struct net_device *dev, u32 features) +{ + /* + * Since there is no support for separate rx/tx vlan accel + * enable/disable make sure tx flag is always in same state as rx. + */ + if (features & NETIF_F_HW_VLAN_RX) + features |= NETIF_F_HW_VLAN_TX; + else + features &= ~NETIF_F_HW_VLAN_TX; + + return features; +} + +static int cxgb4vf_set_features(struct net_device *dev, u32 features) +{ + struct port_info *pi = netdev_priv(dev); + u32 changed = dev->features ^ features; + + if (changed & NETIF_F_HW_VLAN_RX) + t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1, + features & NETIF_F_HW_VLAN_TX, 0); + + return 0; +} + +/* + * Change the devices MAC address. + */ +static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr) +{ + int ret; + struct sockaddr *addr = _addr; + struct port_info *pi = netdev_priv(dev); + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + ret = t4vf_change_mac(pi->adapter, pi->viid, pi->xact_addr_filt, + addr->sa_data, true); + if (ret < 0) + return ret; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + pi->xact_addr_filt = ret; + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Poll all of our receive queues. This is called outside of normal interrupt + * context. + */ +static void cxgb4vf_poll_controller(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + if (adapter->flags & USING_MSIX) { + struct sge_eth_rxq *rxq; + int nqsets; + + rxq = &adapter->sge.ethrxq[pi->first_qset]; + for (nqsets = pi->nqsets; nqsets; nqsets--) { + t4vf_sge_intr_msix(0, &rxq->rspq); + rxq++; + } + } else + t4vf_intr_handler(adapter)(0, adapter); +} +#endif + +/* + * Ethtool operations. + * =================== + * + * Note that we don't support any ethtool operations which change the physical + * state of the port to which we're linked. + */ + +/* + * Return current port link settings. + */ +static int cxgb4vf_get_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + const struct port_info *pi = netdev_priv(dev); + + cmd->supported = pi->link_cfg.supported; + cmd->advertising = pi->link_cfg.advertising; + ethtool_cmd_speed_set(cmd, + netif_carrier_ok(dev) ? pi->link_cfg.speed : -1); + cmd->duplex = DUPLEX_FULL; + + cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; + cmd->phy_address = pi->port_id; + cmd->transceiver = XCVR_EXTERNAL; + cmd->autoneg = pi->link_cfg.autoneg; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + return 0; +} + +/* + * Return our driver information. + */ +static void cxgb4vf_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + struct adapter *adapter = netdev2adap(dev); + + strcpy(drvinfo->driver, KBUILD_MODNAME); + strcpy(drvinfo->version, DRV_VERSION); + strcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent))); + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%u.%u.%u.%u, TP %u.%u.%u.%u", + FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.fwrev), + FW_HDR_FW_VER_MINOR_GET(adapter->params.dev.fwrev), + FW_HDR_FW_VER_MICRO_GET(adapter->params.dev.fwrev), + FW_HDR_FW_VER_BUILD_GET(adapter->params.dev.fwrev), + FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.tprev), + FW_HDR_FW_VER_MINOR_GET(adapter->params.dev.tprev), + FW_HDR_FW_VER_MICRO_GET(adapter->params.dev.tprev), + FW_HDR_FW_VER_BUILD_GET(adapter->params.dev.tprev)); +} + +/* + * Return current adapter message level. + */ +static u32 cxgb4vf_get_msglevel(struct net_device *dev) +{ + return netdev2adap(dev)->msg_enable; +} + +/* + * Set current adapter message level. + */ +static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel) +{ + netdev2adap(dev)->msg_enable = msglevel; +} + +/* + * Return the device's current Queue Set ring size parameters along with the + * allowed maximum values. Since ethtool doesn't understand the concept of + * multi-queue devices, we just return the current values associated with the + * first Queue Set. + */ +static void cxgb4vf_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *rp) +{ + const struct port_info *pi = netdev_priv(dev); + const struct sge *s = &pi->adapter->sge; + + rp->rx_max_pending = MAX_RX_BUFFERS; + rp->rx_mini_max_pending = MAX_RSPQ_ENTRIES; + rp->rx_jumbo_max_pending = 0; + rp->tx_max_pending = MAX_TXQ_ENTRIES; + + rp->rx_pending = s->ethrxq[pi->first_qset].fl.size - MIN_FL_RESID; + rp->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size; + rp->rx_jumbo_pending = 0; + rp->tx_pending = s->ethtxq[pi->first_qset].q.size; +} + +/* + * Set the Queue Set ring size parameters for the device. Again, since + * ethtool doesn't allow for the concept of multiple queues per device, we'll + * apply these new values across all of the Queue Sets associated with the + * device -- after vetting them of course! + */ +static int cxgb4vf_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *rp) +{ + const struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + struct sge *s = &adapter->sge; + int qs; + + if (rp->rx_pending > MAX_RX_BUFFERS || + rp->rx_jumbo_pending || + rp->tx_pending > MAX_TXQ_ENTRIES || + rp->rx_mini_pending > MAX_RSPQ_ENTRIES || + rp->rx_mini_pending < MIN_RSPQ_ENTRIES || + rp->rx_pending < MIN_FL_ENTRIES || + rp->tx_pending < MIN_TXQ_ENTRIES) + return -EINVAL; + + if (adapter->flags & FULL_INIT_DONE) + return -EBUSY; + + for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) { + s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID; + s->ethrxq[qs].rspq.size = rp->rx_mini_pending; + s->ethtxq[qs].q.size = rp->tx_pending; + } + return 0; +} + +/* + * Return the interrupt holdoff timer and count for the first Queue Set on the + * device. Our extension ioctl() (the cxgbtool interface) allows the + * interrupt holdoff timer to be read on all of the device's Queue Sets. + */ +static int cxgb4vf_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *coalesce) +{ + const struct port_info *pi = netdev_priv(dev); + const struct adapter *adapter = pi->adapter; + const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq; + + coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq); + coalesce->rx_max_coalesced_frames = + ((rspq->intr_params & QINTR_CNT_EN) + ? adapter->sge.counter_val[rspq->pktcnt_idx] + : 0); + return 0; +} + +/* + * Set the RX interrupt holdoff timer and count for the first Queue Set on the + * interface. Our extension ioctl() (the cxgbtool interface) allows us to set + * the interrupt holdoff timer on any of the device's Queue Sets. + */ +static int cxgb4vf_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *coalesce) +{ + const struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + return set_rxq_intr_params(adapter, + &adapter->sge.ethrxq[pi->first_qset].rspq, + coalesce->rx_coalesce_usecs, + coalesce->rx_max_coalesced_frames); +} + +/* + * Report current port link pause parameter settings. + */ +static void cxgb4vf_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pauseparam) +{ + struct port_info *pi = netdev_priv(dev); + + pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0; + pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0; + pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0; +} + +/* + * Identify the port by blinking the port's LED. + */ +static int cxgb4vf_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + unsigned int val; + struct port_info *pi = netdev_priv(dev); + + if (state == ETHTOOL_ID_ACTIVE) + val = 0xffff; + else if (state == ETHTOOL_ID_INACTIVE) + val = 0; + else + return -EINVAL; + + return t4vf_identify_port(pi->adapter, pi->viid, val); +} + +/* + * Port stats maintained per queue of the port. + */ +struct queue_port_stats { + u64 tso; + u64 tx_csum; + u64 rx_csum; + u64 vlan_ex; + u64 vlan_ins; + u64 lro_pkts; + u64 lro_merged; +}; + +/* + * Strings for the ETH_SS_STATS statistics set ("ethtool -S"). Note that + * these need to match the order of statistics returned by + * t4vf_get_port_stats(). + */ +static const char stats_strings[][ETH_GSTRING_LEN] = { + /* + * These must match the layout of the t4vf_port_stats structure. + */ + "TxBroadcastBytes ", + "TxBroadcastFrames ", + "TxMulticastBytes ", + "TxMulticastFrames ", + "TxUnicastBytes ", + "TxUnicastFrames ", + "TxDroppedFrames ", + "TxOffloadBytes ", + "TxOffloadFrames ", + "RxBroadcastBytes ", + "RxBroadcastFrames ", + "RxMulticastBytes ", + "RxMulticastFrames ", + "RxUnicastBytes ", + "RxUnicastFrames ", + "RxErrorFrames ", + + /* + * These are accumulated per-queue statistics and must match the + * order of the fields in the queue_port_stats structure. + */ + "TSO ", + "TxCsumOffload ", + "RxCsumGood ", + "VLANextractions ", + "VLANinsertions ", + "GROPackets ", + "GROMerged ", +}; + +/* + * Return the number of statistics in the specified statistics set. + */ +static int cxgb4vf_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(stats_strings); + default: + return -EOPNOTSUPP; + } + /*NOTREACHED*/ +} + +/* + * Return the strings for the specified statistics set. + */ +static void cxgb4vf_get_strings(struct net_device *dev, + u32 sset, + u8 *data) +{ + switch (sset) { + case ETH_SS_STATS: + memcpy(data, stats_strings, sizeof(stats_strings)); + break; + } +} + +/* + * Small utility routine to accumulate queue statistics across the queues of + * a "port". + */ +static void collect_sge_port_stats(const struct adapter *adapter, + const struct port_info *pi, + struct queue_port_stats *stats) +{ + const struct sge_eth_txq *txq = &adapter->sge.ethtxq[pi->first_qset]; + const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset]; + int qs; + + memset(stats, 0, sizeof(*stats)); + for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { + stats->tso += txq->tso; + stats->tx_csum += txq->tx_cso; + stats->rx_csum += rxq->stats.rx_cso; + stats->vlan_ex += rxq->stats.vlan_ex; + stats->vlan_ins += txq->vlan_ins; + stats->lro_pkts += rxq->stats.lro_pkts; + stats->lro_merged += rxq->stats.lro_merged; + } +} + +/* + * Return the ETH_SS_STATS statistics set. + */ +static void cxgb4vf_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, + u64 *data) +{ + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adapter = pi->adapter; + int err = t4vf_get_port_stats(adapter, pi->pidx, + (struct t4vf_port_stats *)data); + if (err) + memset(data, 0, sizeof(struct t4vf_port_stats)); + + data += sizeof(struct t4vf_port_stats) / sizeof(u64); + collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); +} + +/* + * Return the size of our register map. + */ +static int cxgb4vf_get_regs_len(struct net_device *dev) +{ + return T4VF_REGMAP_SIZE; +} + +/* + * Dump a block of registers, start to end inclusive, into a buffer. + */ +static void reg_block_dump(struct adapter *adapter, void *regbuf, + unsigned int start, unsigned int end) +{ + u32 *bp = regbuf + start - T4VF_REGMAP_START; + + for ( ; start <= end; start += sizeof(u32)) { + /* + * Avoid reading the Mailbox Control register since that + * can trigger a Mailbox Ownership Arbitration cycle and + * interfere with communication with the firmware. + */ + if (start == T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL) + *bp++ = 0xffff; + else + *bp++ = t4_read_reg(adapter, start); + } +} + +/* + * Copy our entire register map into the provided buffer. + */ +static void cxgb4vf_get_regs(struct net_device *dev, + struct ethtool_regs *regs, + void *regbuf) +{ + struct adapter *adapter = netdev2adap(dev); + + regs->version = mk_adap_vers(adapter); + + /* + * Fill in register buffer with our register map. + */ + memset(regbuf, 0, T4VF_REGMAP_SIZE); + + reg_block_dump(adapter, regbuf, + T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_FIRST, + T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_LAST); + reg_block_dump(adapter, regbuf, + T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST, + T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST); + reg_block_dump(adapter, regbuf, + T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST, + T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_LAST); + reg_block_dump(adapter, regbuf, + T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST, + T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST); + + reg_block_dump(adapter, regbuf, + T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_FIRST, + T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_LAST); +} + +/* + * Report current Wake On LAN settings. + */ +static void cxgb4vf_get_wol(struct net_device *dev, + struct ethtool_wolinfo *wol) +{ + wol->supported = 0; + wol->wolopts = 0; + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + +/* + * TCP Segmentation Offload flags which we support. + */ +#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) + +static struct ethtool_ops cxgb4vf_ethtool_ops = { + .get_settings = cxgb4vf_get_settings, + .get_drvinfo = cxgb4vf_get_drvinfo, + .get_msglevel = cxgb4vf_get_msglevel, + .set_msglevel = cxgb4vf_set_msglevel, + .get_ringparam = cxgb4vf_get_ringparam, + .set_ringparam = cxgb4vf_set_ringparam, + .get_coalesce = cxgb4vf_get_coalesce, + .set_coalesce = cxgb4vf_set_coalesce, + .get_pauseparam = cxgb4vf_get_pauseparam, + .get_link = ethtool_op_get_link, + .get_strings = cxgb4vf_get_strings, + .set_phys_id = cxgb4vf_phys_id, + .get_sset_count = cxgb4vf_get_sset_count, + .get_ethtool_stats = cxgb4vf_get_ethtool_stats, + .get_regs_len = cxgb4vf_get_regs_len, + .get_regs = cxgb4vf_get_regs, + .get_wol = cxgb4vf_get_wol, +}; + +/* + * /sys/kernel/debug/cxgb4vf support code and data. + * ================================================ + */ + +/* + * Show SGE Queue Set information. We display QPL Queues Sets per line. + */ +#define QPL 4 + +static int sge_qinfo_show(struct seq_file *seq, void *v) +{ + struct adapter *adapter = seq->private; + int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL); + int qs, r = (uintptr_t)v - 1; + + if (r) + seq_putc(seq, '\n'); + + #define S3(fmt_spec, s, v) \ + do {\ + seq_printf(seq, "%-12s", s); \ + for (qs = 0; qs < n; ++qs) \ + seq_printf(seq, " %16" fmt_spec, v); \ + seq_putc(seq, '\n'); \ + } while (0) + #define S(s, v) S3("s", s, v) + #define T(s, v) S3("u", s, txq[qs].v) + #define R(s, v) S3("u", s, rxq[qs].v) + + if (r < eth_entries) { + const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL]; + const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL]; + int n = min(QPL, adapter->sge.ethqsets - QPL * r); + + S("QType:", "Ethernet"); + S("Interface:", + (rxq[qs].rspq.netdev + ? rxq[qs].rspq.netdev->name + : "N/A")); + S3("d", "Port:", + (rxq[qs].rspq.netdev + ? ((struct port_info *) + netdev_priv(rxq[qs].rspq.netdev))->port_id + : -1)); + T("TxQ ID:", q.abs_id); + T("TxQ size:", q.size); + T("TxQ inuse:", q.in_use); + T("TxQ PIdx:", q.pidx); + T("TxQ CIdx:", q.cidx); + R("RspQ ID:", rspq.abs_id); + R("RspQ size:", rspq.size); + R("RspQE size:", rspq.iqe_len); + S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq)); + S3("u", "Intr pktcnt:", + adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]); + R("RspQ CIdx:", rspq.cidx); + R("RspQ Gen:", rspq.gen); + R("FL ID:", fl.abs_id); + R("FL size:", fl.size - MIN_FL_RESID); + R("FL avail:", fl.avail); + R("FL PIdx:", fl.pidx); + R("FL CIdx:", fl.cidx); + return 0; + } + + r -= eth_entries; + if (r == 0) { + const struct sge_rspq *evtq = &adapter->sge.fw_evtq; + + seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue"); + seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id); + seq_printf(seq, "%-12s %16u\n", "Intr delay:", + qtimer_val(adapter, evtq)); + seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:", + adapter->sge.counter_val[evtq->pktcnt_idx]); + seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", evtq->cidx); + seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen); + } else if (r == 1) { + const struct sge_rspq *intrq = &adapter->sge.intrq; + + seq_printf(seq, "%-12s %16s\n", "QType:", "Interrupt Queue"); + seq_printf(seq, "%-12s %16u\n", "RspQ ID:", intrq->abs_id); + seq_printf(seq, "%-12s %16u\n", "Intr delay:", + qtimer_val(adapter, intrq)); + seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:", + adapter->sge.counter_val[intrq->pktcnt_idx]); + seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", intrq->cidx); + seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", intrq->gen); + } + + #undef R + #undef T + #undef S + #undef S3 + + return 0; +} + +/* + * Return the number of "entries" in our "file". We group the multi-Queue + * sections with QPL Queue Sets per "entry". The sections of the output are: + * + * Ethernet RX/TX Queue Sets + * Firmware Event Queue + * Forwarded Interrupt Queue (if in MSI mode) + */ +static int sge_queue_entries(const struct adapter *adapter) +{ + return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 + + ((adapter->flags & USING_MSI) != 0); +} + +static void *sge_queue_start(struct seq_file *seq, loff_t *pos) +{ + int entries = sge_queue_entries(seq->private); + + return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL; +} + +static void sge_queue_stop(struct seq_file *seq, void *v) +{ +} + +static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos) +{ + int entries = sge_queue_entries(seq->private); + + ++*pos; + return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL; +} + +static const struct seq_operations sge_qinfo_seq_ops = { + .start = sge_queue_start, + .next = sge_queue_next, + .stop = sge_queue_stop, + .show = sge_qinfo_show +}; + +static int sge_qinfo_open(struct inode *inode, struct file *file) +{ + int res = seq_open(file, &sge_qinfo_seq_ops); + + if (!res) { + struct seq_file *seq = file->private_data; + seq->private = inode->i_private; + } + return res; +} + +static const struct file_operations sge_qinfo_debugfs_fops = { + .owner = THIS_MODULE, + .open = sge_qinfo_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +/* + * Show SGE Queue Set statistics. We display QPL Queues Sets per line. + */ +#define QPL 4 + +static int sge_qstats_show(struct seq_file *seq, void *v) +{ + struct adapter *adapter = seq->private; + int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL); + int qs, r = (uintptr_t)v - 1; + + if (r) + seq_putc(seq, '\n'); + + #define S3(fmt, s, v) \ + do { \ + seq_printf(seq, "%-16s", s); \ + for (qs = 0; qs < n; ++qs) \ + seq_printf(seq, " %8" fmt, v); \ + seq_putc(seq, '\n'); \ + } while (0) + #define S(s, v) S3("s", s, v) + + #define T3(fmt, s, v) S3(fmt, s, txq[qs].v) + #define T(s, v) T3("lu", s, v) + + #define R3(fmt, s, v) S3(fmt, s, rxq[qs].v) + #define R(s, v) R3("lu", s, v) + + if (r < eth_entries) { + const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL]; + const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL]; + int n = min(QPL, adapter->sge.ethqsets - QPL * r); + + S("QType:", "Ethernet"); + S("Interface:", + (rxq[qs].rspq.netdev + ? rxq[qs].rspq.netdev->name + : "N/A")); + R3("u", "RspQNullInts:", rspq.unhandled_irqs); + R("RxPackets:", stats.pkts); + R("RxCSO:", stats.rx_cso); + R("VLANxtract:", stats.vlan_ex); + R("LROmerged:", stats.lro_merged); + R("LROpackets:", stats.lro_pkts); + R("RxDrops:", stats.rx_drops); + T("TSO:", tso); + T("TxCSO:", tx_cso); + T("VLANins:", vlan_ins); + T("TxQFull:", q.stops); + T("TxQRestarts:", q.restarts); + T("TxMapErr:", mapping_err); + R("FLAllocErr:", fl.alloc_failed); + R("FLLrgAlcErr:", fl.large_alloc_failed); + R("FLStarving:", fl.starving); + return 0; + } + + r -= eth_entries; + if (r == 0) { + const struct sge_rspq *evtq = &adapter->sge.fw_evtq; + + seq_printf(seq, "%-8s %16s\n", "QType:", "FW event queue"); + seq_printf(seq, "%-16s %8u\n", "RspQNullInts:", + evtq->unhandled_irqs); + seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", evtq->cidx); + seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", evtq->gen); + } else if (r == 1) { + const struct sge_rspq *intrq = &adapter->sge.intrq; + + seq_printf(seq, "%-8s %16s\n", "QType:", "Interrupt Queue"); + seq_printf(seq, "%-16s %8u\n", "RspQNullInts:", + intrq->unhandled_irqs); + seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", intrq->cidx); + seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", intrq->gen); + } + + #undef R + #undef T + #undef S + #undef R3 + #undef T3 + #undef S3 + + return 0; +} + +/* + * Return the number of "entries" in our "file". We group the multi-Queue + * sections with QPL Queue Sets per "entry". The sections of the output are: + * + * Ethernet RX/TX Queue Sets + * Firmware Event Queue + * Forwarded Interrupt Queue (if in MSI mode) + */ +static int sge_qstats_entries(const struct adapter *adapter) +{ + return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 + + ((adapter->flags & USING_MSI) != 0); +} + +static void *sge_qstats_start(struct seq_file *seq, loff_t *pos) +{ + int entries = sge_qstats_entries(seq->private); + + return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL; +} + +static void sge_qstats_stop(struct seq_file *seq, void *v) +{ +} + +static void *sge_qstats_next(struct seq_file *seq, void *v, loff_t *pos) +{ + int entries = sge_qstats_entries(seq->private); + + (*pos)++; + return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL; +} + +static const struct seq_operations sge_qstats_seq_ops = { + .start = sge_qstats_start, + .next = sge_qstats_next, + .stop = sge_qstats_stop, + .show = sge_qstats_show +}; + +static int sge_qstats_open(struct inode *inode, struct file *file) +{ + int res = seq_open(file, &sge_qstats_seq_ops); + + if (res == 0) { + struct seq_file *seq = file->private_data; + seq->private = inode->i_private; + } + return res; +} + +static const struct file_operations sge_qstats_proc_fops = { + .owner = THIS_MODULE, + .open = sge_qstats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +/* + * Show PCI-E SR-IOV Virtual Function Resource Limits. + */ +static int resources_show(struct seq_file *seq, void *v) +{ + struct adapter *adapter = seq->private; + struct vf_resources *vfres = &adapter->params.vfres; + + #define S(desc, fmt, var) \ + seq_printf(seq, "%-60s " fmt "\n", \ + desc " (" #var "):", vfres->var) + + S("Virtual Interfaces", "%d", nvi); + S("Egress Queues", "%d", neq); + S("Ethernet Control", "%d", nethctrl); + S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint); + S("Ingress Queues", "%d", niq); + S("Traffic Class", "%d", tc); + S("Port Access Rights Mask", "%#x", pmask); + S("MAC Address Filters", "%d", nexactf); + S("Firmware Command Read Capabilities", "%#x", r_caps); + S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps); + + #undef S + + return 0; +} + +static int resources_open(struct inode *inode, struct file *file) +{ + return single_open(file, resources_show, inode->i_private); +} + +static const struct file_operations resources_proc_fops = { + .owner = THIS_MODULE, + .open = resources_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* + * Show Virtual Interfaces. + */ +static int interfaces_show(struct seq_file *seq, void *v) +{ + if (v == SEQ_START_TOKEN) { + seq_puts(seq, "Interface Port VIID\n"); + } else { + struct adapter *adapter = seq->private; + int pidx = (uintptr_t)v - 2; + struct net_device *dev = adapter->port[pidx]; + struct port_info *pi = netdev_priv(dev); + + seq_printf(seq, "%9s %4d %#5x\n", + dev->name, pi->port_id, pi->viid); + } + return 0; +} + +static inline void *interfaces_get_idx(struct adapter *adapter, loff_t pos) +{ + return pos <= adapter->params.nports + ? (void *)(uintptr_t)(pos + 1) + : NULL; +} + +static void *interfaces_start(struct seq_file *seq, loff_t *pos) +{ + return *pos + ? interfaces_get_idx(seq->private, *pos) + : SEQ_START_TOKEN; +} + +static void *interfaces_next(struct seq_file *seq, void *v, loff_t *pos) +{ + (*pos)++; + return interfaces_get_idx(seq->private, *pos); +} + +static void interfaces_stop(struct seq_file *seq, void *v) +{ +} + +static const struct seq_operations interfaces_seq_ops = { + .start = interfaces_start, + .next = interfaces_next, + .stop = interfaces_stop, + .show = interfaces_show +}; + +static int interfaces_open(struct inode *inode, struct file *file) +{ + int res = seq_open(file, &interfaces_seq_ops); + + if (res == 0) { + struct seq_file *seq = file->private_data; + seq->private = inode->i_private; + } + return res; +} + +static const struct file_operations interfaces_proc_fops = { + .owner = THIS_MODULE, + .open = interfaces_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +/* + * /sys/kernel/debugfs/cxgb4vf/ files list. + */ +struct cxgb4vf_debugfs_entry { + const char *name; /* name of debugfs node */ + mode_t mode; /* file system mode */ + const struct file_operations *fops; +}; + +static struct cxgb4vf_debugfs_entry debugfs_files[] = { + { "sge_qinfo", S_IRUGO, &sge_qinfo_debugfs_fops }, + { "sge_qstats", S_IRUGO, &sge_qstats_proc_fops }, + { "resources", S_IRUGO, &resources_proc_fops }, + { "interfaces", S_IRUGO, &interfaces_proc_fops }, +}; + +/* + * Module and device initialization and cleanup code. + * ================================================== + */ + +/* + * Set up out /sys/kernel/debug/cxgb4vf sub-nodes. We assume that the + * directory (debugfs_root) has already been set up. + */ +static int __devinit setup_debugfs(struct adapter *adapter) +{ + int i; + + BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); + + /* + * Debugfs support is best effort. + */ + for (i = 0; i < ARRAY_SIZE(debugfs_files); i++) + (void)debugfs_create_file(debugfs_files[i].name, + debugfs_files[i].mode, + adapter->debugfs_root, + (void *)adapter, + debugfs_files[i].fops); + + return 0; +} + +/* + * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave + * it to our caller to tear down the directory (debugfs_root). + */ +static void cleanup_debugfs(struct adapter *adapter) +{ + BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); + + /* + * Unlike our sister routine cleanup_proc(), we don't need to remove + * individual entries because a call will be made to + * debugfs_remove_recursive(). We just need to clean up any ancillary + * persistent state. + */ + /* nothing to do */ +} + +/* + * Perform early "adapter" initialization. This is where we discover what + * adapter parameters we're going to be using and initialize basic adapter + * hardware support. + */ +static int __devinit adap_init0(struct adapter *adapter) +{ + struct vf_resources *vfres = &adapter->params.vfres; + struct sge_params *sge_params = &adapter->params.sge; + struct sge *s = &adapter->sge; + unsigned int ethqsets; + int err; + + /* + * Wait for the device to become ready before proceeding ... + */ + err = t4vf_wait_dev_ready(adapter); + if (err) { + dev_err(adapter->pdev_dev, "device didn't become ready:" + " err=%d\n", err); + return err; + } + + /* + * Some environments do not properly handle PCIE FLRs -- e.g. in Linux + * 2.6.31 and later we can't call pci_reset_function() in order to + * issue an FLR because of a self- deadlock on the device semaphore. + * Meanwhile, the OS infrastructure doesn't issue FLRs in all the + * cases where they're needed -- for instance, some versions of KVM + * fail to reset "Assigned Devices" when the VM reboots. Therefore we + * use the firmware based reset in order to reset any per function + * state. + */ + err = t4vf_fw_reset(adapter); + if (err < 0) { + dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err); + return err; + } + + /* + * Grab basic operational parameters. These will predominantly have + * been set up by the Physical Function Driver or will be hard coded + * into the adapter. We just have to live with them ... Note that + * we _must_ get our VPD parameters before our SGE parameters because + * we need to know the adapter's core clock from the VPD in order to + * properly decode the SGE Timer Values. + */ + err = t4vf_get_dev_params(adapter); + if (err) { + dev_err(adapter->pdev_dev, "unable to retrieve adapter" + " device parameters: err=%d\n", err); + return err; + } + err = t4vf_get_vpd_params(adapter); + if (err) { + dev_err(adapter->pdev_dev, "unable to retrieve adapter" + " VPD parameters: err=%d\n", err); + return err; + } + err = t4vf_get_sge_params(adapter); + if (err) { + dev_err(adapter->pdev_dev, "unable to retrieve adapter" + " SGE parameters: err=%d\n", err); + return err; + } + err = t4vf_get_rss_glb_config(adapter); + if (err) { + dev_err(adapter->pdev_dev, "unable to retrieve adapter" + " RSS parameters: err=%d\n", err); + return err; + } + if (adapter->params.rss.mode != + FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { + dev_err(adapter->pdev_dev, "unable to operate with global RSS" + " mode %d\n", adapter->params.rss.mode); + return -EINVAL; + } + err = t4vf_sge_init(adapter); + if (err) { + dev_err(adapter->pdev_dev, "unable to use adapter parameters:" + " err=%d\n", err); + return err; + } + + /* + * Retrieve our RX interrupt holdoff timer values and counter + * threshold values from the SGE parameters. + */ + s->timer_val[0] = core_ticks_to_us(adapter, + TIMERVALUE0_GET(sge_params->sge_timer_value_0_and_1)); + s->timer_val[1] = core_ticks_to_us(adapter, + TIMERVALUE1_GET(sge_params->sge_timer_value_0_and_1)); + s->timer_val[2] = core_ticks_to_us(adapter, + TIMERVALUE0_GET(sge_params->sge_timer_value_2_and_3)); + s->timer_val[3] = core_ticks_to_us(adapter, + TIMERVALUE1_GET(sge_params->sge_timer_value_2_and_3)); + s->timer_val[4] = core_ticks_to_us(adapter, + TIMERVALUE0_GET(sge_params->sge_timer_value_4_and_5)); + s->timer_val[5] = core_ticks_to_us(adapter, + TIMERVALUE1_GET(sge_params->sge_timer_value_4_and_5)); + + s->counter_val[0] = + THRESHOLD_0_GET(sge_params->sge_ingress_rx_threshold); + s->counter_val[1] = + THRESHOLD_1_GET(sge_params->sge_ingress_rx_threshold); + s->counter_val[2] = + THRESHOLD_2_GET(sge_params->sge_ingress_rx_threshold); + s->counter_val[3] = + THRESHOLD_3_GET(sge_params->sge_ingress_rx_threshold); + + /* + * Grab our Virtual Interface resource allocation, extract the + * features that we're interested in and do a bit of sanity testing on + * what we discover. + */ + err = t4vf_get_vfres(adapter); + if (err) { + dev_err(adapter->pdev_dev, "unable to get virtual interface" + " resources: err=%d\n", err); + return err; + } + + /* + * The number of "ports" which we support is equal to the number of + * Virtual Interfaces with which we've been provisioned. + */ + adapter->params.nports = vfres->nvi; + if (adapter->params.nports > MAX_NPORTS) { + dev_warn(adapter->pdev_dev, "only using %d of %d allowed" + " virtual interfaces\n", MAX_NPORTS, + adapter->params.nports); + adapter->params.nports = MAX_NPORTS; + } + + /* + * We need to reserve a number of the ingress queues with Free List + * and Interrupt capabilities for special interrupt purposes (like + * asynchronous firmware messages, or forwarded interrupts if we're + * using MSI). The rest of the FL/Intr-capable ingress queues will be + * matched up one-for-one with Ethernet/Control egress queues in order + * to form "Queue Sets" which will be aportioned between the "ports". + * For each Queue Set, we'll need the ability to allocate two Egress + * Contexts -- one for the Ingress Queue Free List and one for the TX + * Ethernet Queue. + */ + ethqsets = vfres->niqflint - INGQ_EXTRAS; + if (vfres->nethctrl != ethqsets) { + dev_warn(adapter->pdev_dev, "unequal number of [available]" + " ingress/egress queues (%d/%d); using minimum for" + " number of Queue Sets\n", ethqsets, vfres->nethctrl); + ethqsets = min(vfres->nethctrl, ethqsets); + } + if (vfres->neq < ethqsets*2) { + dev_warn(adapter->pdev_dev, "Not enough Egress Contexts (%d)" + " to support Queue Sets (%d); reducing allowed Queue" + " Sets\n", vfres->neq, ethqsets); + ethqsets = vfres->neq/2; + } + if (ethqsets > MAX_ETH_QSETS) { + dev_warn(adapter->pdev_dev, "only using %d of %d allowed Queue" + " Sets\n", MAX_ETH_QSETS, adapter->sge.max_ethqsets); + ethqsets = MAX_ETH_QSETS; + } + if (vfres->niq != 0 || vfres->neq > ethqsets*2) { + dev_warn(adapter->pdev_dev, "unused resources niq/neq (%d/%d)" + " ignored\n", vfres->niq, vfres->neq - ethqsets*2); + } + adapter->sge.max_ethqsets = ethqsets; + + /* + * Check for various parameter sanity issues. Most checks simply + * result in us using fewer resources than our provissioning but we + * do need at least one "port" with which to work ... + */ + if (adapter->sge.max_ethqsets < adapter->params.nports) { + dev_warn(adapter->pdev_dev, "only using %d of %d available" + " virtual interfaces (too few Queue Sets)\n", + adapter->sge.max_ethqsets, adapter->params.nports); + adapter->params.nports = adapter->sge.max_ethqsets; + } + if (adapter->params.nports == 0) { + dev_err(adapter->pdev_dev, "no virtual interfaces configured/" + "usable!\n"); + return -EINVAL; + } + return 0; +} + +static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx, + u8 pkt_cnt_idx, unsigned int size, + unsigned int iqe_size) +{ + rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) | + (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0)); + rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS + ? pkt_cnt_idx + : 0); + rspq->iqe_len = iqe_size; + rspq->size = size; +} + +/* + * Perform default configuration of DMA queues depending on the number and + * type of ports we found and the number of available CPUs. Most settings can + * be modified by the admin via ethtool and cxgbtool prior to the adapter + * being brought up for the first time. + */ +static void __devinit cfg_queues(struct adapter *adapter) +{ + struct sge *s = &adapter->sge; + int q10g, n10g, qidx, pidx, qs; + size_t iqe_size; + + /* + * We should not be called till we know how many Queue Sets we can + * support. In particular, this means that we need to know what kind + * of interrupts we'll be using ... + */ + BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0); + + /* + * Count the number of 10GbE Virtual Interfaces that we have. + */ + n10g = 0; + for_each_port(adapter, pidx) + n10g += is_10g_port(&adap2pinfo(adapter, pidx)->link_cfg); + + /* + * We default to 1 queue per non-10G port and up to # of cores queues + * per 10G port. + */ + if (n10g == 0) + q10g = 0; + else { + int n1g = (adapter->params.nports - n10g); + q10g = (adapter->sge.max_ethqsets - n1g) / n10g; + if (q10g > num_online_cpus()) + q10g = num_online_cpus(); + } + + /* + * Allocate the "Queue Sets" to the various Virtual Interfaces. + * The layout will be established in setup_sge_queues() when the + * adapter is brough up for the first time. + */ + qidx = 0; + for_each_port(adapter, pidx) { + struct port_info *pi = adap2pinfo(adapter, pidx); + + pi->first_qset = qidx; + pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1; + qidx += pi->nqsets; + } + s->ethqsets = qidx; + + /* + * The Ingress Queue Entry Size for our various Response Queues needs + * to be big enough to accommodate the largest message we can receive + * from the chip/firmware; which is 64 bytes ... + */ + iqe_size = 64; + + /* + * Set up default Queue Set parameters ... Start off with the + * shortest interrupt holdoff timer. + */ + for (qs = 0; qs < s->max_ethqsets; qs++) { + struct sge_eth_rxq *rxq = &s->ethrxq[qs]; + struct sge_eth_txq *txq = &s->ethtxq[qs]; + + init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size); + rxq->fl.size = 72; + txq->q.size = 1024; + } + + /* + * The firmware event queue is used for link state changes and + * notifications of TX DMA completions. + */ + init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size); + + /* + * The forwarded interrupt queue is used when we're in MSI interrupt + * mode. In this mode all interrupts associated with RX queues will + * be forwarded to a single queue which we'll associate with our MSI + * interrupt vector. The messages dropped in the forwarded interrupt + * queue will indicate which ingress queue needs servicing ... This + * queue needs to be large enough to accommodate all of the ingress + * queues which are forwarding their interrupt (+1 to prevent the PIDX + * from equalling the CIDX if every ingress queue has an outstanding + * interrupt). The queue doesn't need to be any larger because no + * ingress queue will ever have more than one outstanding interrupt at + * any time ... + */ + init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1, + iqe_size); +} + +/* + * Reduce the number of Ethernet queues across all ports to at most n. + * n provides at least one queue per port. + */ +static void __devinit reduce_ethqs(struct adapter *adapter, int n) +{ + int i; + struct port_info *pi; + + /* + * While we have too many active Ether Queue Sets, interate across the + * "ports" and reduce their individual Queue Set allocations. + */ + BUG_ON(n < adapter->params.nports); + while (n < adapter->sge.ethqsets) + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + if (pi->nqsets > 1) { + pi->nqsets--; + adapter->sge.ethqsets--; + if (adapter->sge.ethqsets <= n) + break; + } + } + + /* + * Reassign the starting Queue Sets for each of the "ports" ... + */ + n = 0; + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + pi->first_qset = n; + n += pi->nqsets; + } +} + +/* + * We need to grab enough MSI-X vectors to cover our interrupt needs. Ideally + * we get a separate MSI-X vector for every "Queue Set" plus any extras we + * need. Minimally we need one for every Virtual Interface plus those needed + * for our "extras". Note that this process may lower the maximum number of + * allowed Queue Sets ... + */ +static int __devinit enable_msix(struct adapter *adapter) +{ + int i, err, want, need; + struct msix_entry entries[MSIX_ENTRIES]; + struct sge *s = &adapter->sge; + + for (i = 0; i < MSIX_ENTRIES; ++i) + entries[i].entry = i; + + /* + * We _want_ enough MSI-X interrupts to cover all of our "Queue Sets" + * plus those needed for our "extras" (for example, the firmware + * message queue). We _need_ at least one "Queue Set" per Virtual + * Interface plus those needed for our "extras". So now we get to see + * if the song is right ... + */ + want = s->max_ethqsets + MSIX_EXTRAS; + need = adapter->params.nports + MSIX_EXTRAS; + while ((err = pci_enable_msix(adapter->pdev, entries, want)) >= need) + want = err; + + if (err == 0) { + int nqsets = want - MSIX_EXTRAS; + if (nqsets < s->max_ethqsets) { + dev_warn(adapter->pdev_dev, "only enough MSI-X vectors" + " for %d Queue Sets\n", nqsets); + s->max_ethqsets = nqsets; + if (nqsets < s->ethqsets) + reduce_ethqs(adapter, nqsets); + } + for (i = 0; i < want; ++i) + adapter->msix_info[i].vec = entries[i].vector; + } else if (err > 0) { + pci_disable_msix(adapter->pdev); + dev_info(adapter->pdev_dev, "only %d MSI-X vectors left," + " not using MSI-X\n", err); + } + return err; +} + +static const struct net_device_ops cxgb4vf_netdev_ops = { + .ndo_open = cxgb4vf_open, + .ndo_stop = cxgb4vf_stop, + .ndo_start_xmit = t4vf_eth_xmit, + .ndo_get_stats = cxgb4vf_get_stats, + .ndo_set_rx_mode = cxgb4vf_set_rxmode, + .ndo_set_mac_address = cxgb4vf_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = cxgb4vf_do_ioctl, + .ndo_change_mtu = cxgb4vf_change_mtu, + .ndo_fix_features = cxgb4vf_fix_features, + .ndo_set_features = cxgb4vf_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = cxgb4vf_poll_controller, +#endif +}; + +/* + * "Probe" a device: initialize a device and construct all kernel and driver + * state needed to manage the device. This routine is called "init_one" in + * the PF Driver ... + */ +static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + static int version_printed; + + int pci_using_dac; + int err, pidx; + unsigned int pmask; + struct adapter *adapter; + struct port_info *pi; + struct net_device *netdev; + + /* + * Print our driver banner the first time we're called to initialize a + * device. + */ + if (version_printed == 0) { + printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); + version_printed = 1; + } + + /* + * Initialize generic PCI device state. + */ + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "cannot enable PCI device\n"); + return err; + } + + /* + * Reserve PCI resources for the device. If we can't get them some + * other driver may have already claimed the device ... + */ + err = pci_request_regions(pdev, KBUILD_MODNAME); + if (err) { + dev_err(&pdev->dev, "cannot obtain PCI resources\n"); + goto err_disable_device; + } + + /* + * Set up our DMA mask: try for 64-bit address masking first and + * fall back to 32-bit if we can't get 64 bits ... + */ + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err == 0) { + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "unable to obtain 64-bit DMA for" + " coherent allocations\n"); + goto err_release_regions; + } + pci_using_dac = 1; + } else { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err != 0) { + dev_err(&pdev->dev, "no usable DMA configuration\n"); + goto err_release_regions; + } + pci_using_dac = 0; + } + + /* + * Enable bus mastering for the device ... + */ + pci_set_master(pdev); + + /* + * Allocate our adapter data structure and attach it to the device. + */ + adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); + if (!adapter) { + err = -ENOMEM; + goto err_release_regions; + } + pci_set_drvdata(pdev, adapter); + adapter->pdev = pdev; + adapter->pdev_dev = &pdev->dev; + + /* + * Initialize SMP data synchronization resources. + */ + spin_lock_init(&adapter->stats_lock); + + /* + * Map our I/O registers in BAR0. + */ + adapter->regs = pci_ioremap_bar(pdev, 0); + if (!adapter->regs) { + dev_err(&pdev->dev, "cannot map device registers\n"); + err = -ENOMEM; + goto err_free_adapter; + } + + /* + * Initialize adapter level features. + */ + adapter->name = pci_name(pdev); + adapter->msg_enable = dflt_msg_enable; + err = adap_init0(adapter); + if (err) + goto err_unmap_bar; + + /* + * Allocate our "adapter ports" and stitch everything together. + */ + pmask = adapter->params.vfres.pmask; + for_each_port(adapter, pidx) { + int port_id, viid; + + /* + * We simplistically allocate our virtual interfaces + * sequentially across the port numbers to which we have + * access rights. This should be configurable in some manner + * ... + */ + if (pmask == 0) + break; + port_id = ffs(pmask) - 1; + pmask &= ~(1 << port_id); + viid = t4vf_alloc_vi(adapter, port_id); + if (viid < 0) { + dev_err(&pdev->dev, "cannot allocate VI for port %d:" + " err=%d\n", port_id, viid); + err = viid; + goto err_free_dev; + } + + /* + * Allocate our network device and stitch things together. + */ + netdev = alloc_etherdev_mq(sizeof(struct port_info), + MAX_PORT_QSETS); + if (netdev == NULL) { + dev_err(&pdev->dev, "cannot allocate netdev for" + " port %d\n", port_id); + t4vf_free_vi(adapter, viid); + err = -ENOMEM; + goto err_free_dev; + } + adapter->port[pidx] = netdev; + SET_NETDEV_DEV(netdev, &pdev->dev); + pi = netdev_priv(netdev); + pi->adapter = adapter; + pi->pidx = pidx; + pi->port_id = port_id; + pi->viid = viid; + + /* + * Initialize the starting state of our "port" and register + * it. + */ + pi->xact_addr_filt = -1; + netif_carrier_off(netdev); + netdev->irq = pdev->irq; + + netdev->hw_features = NETIF_F_SG | TSO_FLAGS | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_HW_VLAN_RX | NETIF_F_RXCSUM; + netdev->vlan_features = NETIF_F_SG | TSO_FLAGS | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_HIGHDMA; + netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_TX; + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + netdev->netdev_ops = &cxgb4vf_netdev_ops; + SET_ETHTOOL_OPS(netdev, &cxgb4vf_ethtool_ops); + + /* + * Initialize the hardware/software state for the port. + */ + err = t4vf_port_init(adapter, pidx); + if (err) { + dev_err(&pdev->dev, "cannot initialize port %d\n", + pidx); + goto err_free_dev; + } + } + + /* + * The "card" is now ready to go. If any errors occur during device + * registration we do not fail the whole "card" but rather proceed + * only with the ports we manage to register successfully. However we + * must register at least one net device. + */ + for_each_port(adapter, pidx) { + netdev = adapter->port[pidx]; + if (netdev == NULL) + continue; + + err = register_netdev(netdev); + if (err) { + dev_warn(&pdev->dev, "cannot register net device %s," + " skipping\n", netdev->name); + continue; + } + + set_bit(pidx, &adapter->registered_device_map); + } + if (adapter->registered_device_map == 0) { + dev_err(&pdev->dev, "could not register any net devices\n"); + goto err_free_dev; + } + + /* + * Set up our debugfs entries. + */ + if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) { + adapter->debugfs_root = + debugfs_create_dir(pci_name(pdev), + cxgb4vf_debugfs_root); + if (IS_ERR_OR_NULL(adapter->debugfs_root)) + dev_warn(&pdev->dev, "could not create debugfs" + " directory"); + else + setup_debugfs(adapter); + } + + /* + * See what interrupts we'll be using. If we've been configured to + * use MSI-X interrupts, try to enable them but fall back to using + * MSI interrupts if we can't enable MSI-X interrupts. If we can't + * get MSI interrupts we bail with the error. + */ + if (msi == MSI_MSIX && enable_msix(adapter) == 0) + adapter->flags |= USING_MSIX; + else { + err = pci_enable_msi(pdev); + if (err) { + dev_err(&pdev->dev, "Unable to allocate %s interrupts;" + " err=%d\n", + msi == MSI_MSIX ? "MSI-X or MSI" : "MSI", err); + goto err_free_debugfs; + } + adapter->flags |= USING_MSI; + } + + /* + * Now that we know how many "ports" we have and what their types are, + * and how many Queue Sets we can support, we can configure our queue + * resources. + */ + cfg_queues(adapter); + + /* + * Print a short notice on the existence and configuration of the new + * VF network device ... + */ + for_each_port(adapter, pidx) { + dev_info(adapter->pdev_dev, "%s: Chelsio VF NIC PCIe %s\n", + adapter->port[pidx]->name, + (adapter->flags & USING_MSIX) ? "MSI-X" : + (adapter->flags & USING_MSI) ? "MSI" : ""); + } + + /* + * Return success! + */ + return 0; + + /* + * Error recovery and exit code. Unwind state that's been created + * so far and return the error. + */ + +err_free_debugfs: + if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { + cleanup_debugfs(adapter); + debugfs_remove_recursive(adapter->debugfs_root); + } + +err_free_dev: + for_each_port(adapter, pidx) { + netdev = adapter->port[pidx]; + if (netdev == NULL) + continue; + pi = netdev_priv(netdev); + t4vf_free_vi(adapter, pi->viid); + if (test_bit(pidx, &adapter->registered_device_map)) + unregister_netdev(netdev); + free_netdev(netdev); + } + +err_unmap_bar: + iounmap(adapter->regs); + +err_free_adapter: + kfree(adapter); + pci_set_drvdata(pdev, NULL); + +err_release_regions: + pci_release_regions(pdev); + pci_set_drvdata(pdev, NULL); + pci_clear_master(pdev); + +err_disable_device: + pci_disable_device(pdev); + + return err; +} + +/* + * "Remove" a device: tear down all kernel and driver state created in the + * "probe" routine and quiesce the device (disable interrupts, etc.). (Note + * that this is called "remove_one" in the PF Driver.) + */ +static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev) +{ + struct adapter *adapter = pci_get_drvdata(pdev); + + /* + * Tear down driver state associated with device. + */ + if (adapter) { + int pidx; + + /* + * Stop all of our activity. Unregister network port, + * disable interrupts, etc. + */ + for_each_port(adapter, pidx) + if (test_bit(pidx, &adapter->registered_device_map)) + unregister_netdev(adapter->port[pidx]); + t4vf_sge_stop(adapter); + if (adapter->flags & USING_MSIX) { + pci_disable_msix(adapter->pdev); + adapter->flags &= ~USING_MSIX; + } else if (adapter->flags & USING_MSI) { + pci_disable_msi(adapter->pdev); + adapter->flags &= ~USING_MSI; + } + + /* + * Tear down our debugfs entries. + */ + if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { + cleanup_debugfs(adapter); + debugfs_remove_recursive(adapter->debugfs_root); + } + + /* + * Free all of the various resources which we've acquired ... + */ + t4vf_free_sge_resources(adapter); + for_each_port(adapter, pidx) { + struct net_device *netdev = adapter->port[pidx]; + struct port_info *pi; + + if (netdev == NULL) + continue; + + pi = netdev_priv(netdev); + t4vf_free_vi(adapter, pi->viid); + free_netdev(netdev); + } + iounmap(adapter->regs); + kfree(adapter); + pci_set_drvdata(pdev, NULL); + } + + /* + * Disable the device and release its PCI resources. + */ + pci_disable_device(pdev); + pci_clear_master(pdev); + pci_release_regions(pdev); +} + +/* + * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt + * delivery. + */ +static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev) +{ + struct adapter *adapter; + int pidx; + + adapter = pci_get_drvdata(pdev); + if (!adapter) + return; + + /* + * Disable all Virtual Interfaces. This will shut down the + * delivery of all ingress packets into the chip for these + * Virtual Interfaces. + */ + for_each_port(adapter, pidx) { + struct net_device *netdev; + struct port_info *pi; + + if (!test_bit(pidx, &adapter->registered_device_map)) + continue; + + netdev = adapter->port[pidx]; + if (!netdev) + continue; + + pi = netdev_priv(netdev); + t4vf_enable_vi(adapter, pi->viid, false, false); + } + + /* + * Free up all Queues which will prevent further DMA and + * Interrupts allowing various internal pathways to drain. + */ + t4vf_free_sge_resources(adapter); +} + +/* + * PCI Device registration data structures. + */ +#define CH_DEVICE(devid, idx) \ + { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx } + +static struct pci_device_id cxgb4vf_pci_tbl[] = { + CH_DEVICE(0xb000, 0), /* PE10K FPGA */ + CH_DEVICE(0x4800, 0), /* T440-dbg */ + CH_DEVICE(0x4801, 0), /* T420-cr */ + CH_DEVICE(0x4802, 0), /* T422-cr */ + CH_DEVICE(0x4803, 0), /* T440-cr */ + CH_DEVICE(0x4804, 0), /* T420-bch */ + CH_DEVICE(0x4805, 0), /* T440-bch */ + CH_DEVICE(0x4806, 0), /* T460-ch */ + CH_DEVICE(0x4807, 0), /* T420-so */ + CH_DEVICE(0x4808, 0), /* T420-cx */ + CH_DEVICE(0x4809, 0), /* T420-bt */ + CH_DEVICE(0x480a, 0), /* T404-bt */ + { 0, } +}; + +MODULE_DESCRIPTION(DRV_DESC); +MODULE_AUTHOR("Chelsio Communications"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRV_VERSION); +MODULE_DEVICE_TABLE(pci, cxgb4vf_pci_tbl); + +static struct pci_driver cxgb4vf_driver = { + .name = KBUILD_MODNAME, + .id_table = cxgb4vf_pci_tbl, + .probe = cxgb4vf_pci_probe, + .remove = __devexit_p(cxgb4vf_pci_remove), + .shutdown = __devexit_p(cxgb4vf_pci_shutdown), +}; + +/* + * Initialize global driver state. + */ +static int __init cxgb4vf_module_init(void) +{ + int ret; + + /* + * Vet our module parameters. + */ + if (msi != MSI_MSIX && msi != MSI_MSI) { + printk(KERN_WARNING KBUILD_MODNAME + ": bad module parameter msi=%d; must be %d" + " (MSI-X or MSI) or %d (MSI)\n", + msi, MSI_MSIX, MSI_MSI); + return -EINVAL; + } + + /* Debugfs support is optional, just warn if this fails */ + cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); + if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) + printk(KERN_WARNING KBUILD_MODNAME ": could not create" + " debugfs entry, continuing\n"); + + ret = pci_register_driver(&cxgb4vf_driver); + if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) + debugfs_remove(cxgb4vf_debugfs_root); + return ret; +} + +/* + * Tear down global driver state. + */ +static void __exit cxgb4vf_module_exit(void) +{ + pci_unregister_driver(&cxgb4vf_driver); + debugfs_remove(cxgb4vf_debugfs_root); +} + +module_init(cxgb4vf_module_init); +module_exit(cxgb4vf_module_exit); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c new file mode 100644 index 0000000..cffb328 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -0,0 +1,2465 @@ +/* + * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet + * driver for Linux. + * + * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t4vf_common.h" +#include "t4vf_defs.h" + +#include "../cxgb4/t4_regs.h" +#include "../cxgb4/t4fw_api.h" +#include "../cxgb4/t4_msg.h" + +/* + * Decoded Adapter Parameters. + */ +static u32 FL_PG_ORDER; /* large page allocation size */ +static u32 STAT_LEN; /* length of status page at ring end */ +static u32 PKTSHIFT; /* padding between CPL and packet data */ +static u32 FL_ALIGN; /* response queue message alignment */ + +/* + * Constants ... + */ +enum { + /* + * Egress Queue sizes, producer and consumer indices are all in units + * of Egress Context Units bytes. Note that as far as the hardware is + * concerned, the free list is an Egress Queue (the host produces free + * buffers which the hardware consumes) and free list entries are + * 64-bit PCI DMA addresses. + */ + EQ_UNIT = SGE_EQ_IDXSIZE, + FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), + TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), + + /* + * Max number of TX descriptors we clean up at a time. Should be + * modest as freeing skbs isn't cheap and it happens while holding + * locks. We just need to free packets faster than they arrive, we + * eventually catch up and keep the amortized cost reasonable. + */ + MAX_TX_RECLAIM = 16, + + /* + * Max number of Rx buffers we replenish at a time. Again keep this + * modest, allocating buffers isn't cheap either. + */ + MAX_RX_REFILL = 16, + + /* + * Period of the Rx queue check timer. This timer is infrequent as it + * has something to do only when the system experiences severe memory + * shortage. + */ + RX_QCHECK_PERIOD = (HZ / 2), + + /* + * Period of the TX queue check timer and the maximum number of TX + * descriptors to be reclaimed by the TX timer. + */ + TX_QCHECK_PERIOD = (HZ / 2), + MAX_TIMER_TX_RECLAIM = 100, + + /* + * An FL with <= FL_STARVE_THRES buffers is starving and a periodic + * timer will attempt to refill it. + */ + FL_STARVE_THRES = 4, + + /* + * Suspend an Ethernet TX queue with fewer available descriptors than + * this. We always want to have room for a maximum sized packet: + * inline immediate data + MAX_SKB_FRAGS. This is the same as + * calc_tx_flits() for a TSO packet with nr_frags == MAX_SKB_FRAGS + * (see that function and its helpers for a description of the + * calculation). + */ + ETHTXQ_MAX_FRAGS = MAX_SKB_FRAGS + 1, + ETHTXQ_MAX_SGL_LEN = ((3 * (ETHTXQ_MAX_FRAGS-1))/2 + + ((ETHTXQ_MAX_FRAGS-1) & 1) + + 2), + ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) + + sizeof(struct cpl_tx_pkt_lso_core) + + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64), + ETHTXQ_MAX_FLITS = ETHTXQ_MAX_SGL_LEN + ETHTXQ_MAX_HDR, + + ETHTXQ_STOP_THRES = 1 + DIV_ROUND_UP(ETHTXQ_MAX_FLITS, TXD_PER_EQ_UNIT), + + /* + * Max TX descriptor space we allow for an Ethernet packet to be + * inlined into a WR. This is limited by the maximum value which + * we can specify for immediate data in the firmware Ethernet TX + * Work Request. + */ + MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_MASK, + + /* + * Max size of a WR sent through a control TX queue. + */ + MAX_CTRL_WR_LEN = 256, + + /* + * Maximum amount of data which we'll ever need to inline into a + * TX ring: max(MAX_IMM_TX_PKT_LEN, MAX_CTRL_WR_LEN). + */ + MAX_IMM_TX_LEN = (MAX_IMM_TX_PKT_LEN > MAX_CTRL_WR_LEN + ? MAX_IMM_TX_PKT_LEN + : MAX_CTRL_WR_LEN), + + /* + * For incoming packets less than RX_COPY_THRES, we copy the data into + * an skb rather than referencing the data. We allocate enough + * in-line room in skb's to accommodate pulling in RX_PULL_LEN bytes + * of the data (header). + */ + RX_COPY_THRES = 256, + RX_PULL_LEN = 128, + + /* + * Main body length for sk_buffs used for RX Ethernet packets with + * fragments. Should be >= RX_PULL_LEN but possibly bigger to give + * pskb_may_pull() some room. + */ + RX_SKB_LEN = 512, +}; + +/* + * Software state per TX descriptor. + */ +struct tx_sw_desc { + struct sk_buff *skb; /* socket buffer of TX data source */ + struct ulptx_sgl *sgl; /* scatter/gather list in TX Queue */ +}; + +/* + * Software state per RX Free List descriptor. We keep track of the allocated + * FL page, its size, and its PCI DMA address (if the page is mapped). The FL + * page size and its PCI DMA mapped state are stored in the low bits of the + * PCI DMA address as per below. + */ +struct rx_sw_desc { + struct page *page; /* Free List page buffer */ + dma_addr_t dma_addr; /* PCI DMA address (if mapped) */ + /* and flags (see below) */ +}; + +/* + * The low bits of rx_sw_desc.dma_addr have special meaning. Note that the + * SGE also uses the low 4 bits to determine the size of the buffer. It uses + * those bits to index into the SGE_FL_BUFFER_SIZE[index] register array. + * Since we only use SGE_FL_BUFFER_SIZE0 and SGE_FL_BUFFER_SIZE1, these low 4 + * bits can only contain a 0 or a 1 to indicate which size buffer we're giving + * to the SGE. Thus, our software state of "is the buffer mapped for DMA" is + * maintained in an inverse sense so the hardware never sees that bit high. + */ +enum { + RX_LARGE_BUF = 1 << 0, /* buffer is SGE_FL_BUFFER_SIZE[1] */ + RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */ +}; + +/** + * get_buf_addr - return DMA buffer address of software descriptor + * @sdesc: pointer to the software buffer descriptor + * + * Return the DMA buffer address of a software descriptor (stripping out + * our low-order flag bits). + */ +static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *sdesc) +{ + return sdesc->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF); +} + +/** + * is_buf_mapped - is buffer mapped for DMA? + * @sdesc: pointer to the software buffer descriptor + * + * Determine whether the buffer associated with a software descriptor in + * mapped for DMA or not. + */ +static inline bool is_buf_mapped(const struct rx_sw_desc *sdesc) +{ + return !(sdesc->dma_addr & RX_UNMAPPED_BUF); +} + +/** + * need_skb_unmap - does the platform need unmapping of sk_buffs? + * + * Returns true if the platform needs sk_buff unmapping. The compiler + * optimizes away unnecessary code if this returns true. + */ +static inline int need_skb_unmap(void) +{ +#ifdef CONFIG_NEED_DMA_MAP_STATE + return 1; +#else + return 0; +#endif +} + +/** + * txq_avail - return the number of available slots in a TX queue + * @tq: the TX queue + * + * Returns the number of available descriptors in a TX queue. + */ +static inline unsigned int txq_avail(const struct sge_txq *tq) +{ + return tq->size - 1 - tq->in_use; +} + +/** + * fl_cap - return the capacity of a Free List + * @fl: the Free List + * + * Returns the capacity of a Free List. The capacity is less than the + * size because an Egress Queue Index Unit worth of descriptors needs to + * be left unpopulated, otherwise the Producer and Consumer indices PIDX + * and CIDX will match and the hardware will think the FL is empty. + */ +static inline unsigned int fl_cap(const struct sge_fl *fl) +{ + return fl->size - FL_PER_EQ_UNIT; +} + +/** + * fl_starving - return whether a Free List is starving. + * @fl: the Free List + * + * Tests specified Free List to see whether the number of buffers + * available to the hardware has falled below our "starvation" + * threshold. + */ +static inline bool fl_starving(const struct sge_fl *fl) +{ + return fl->avail - fl->pend_cred <= FL_STARVE_THRES; +} + +/** + * map_skb - map an skb for DMA to the device + * @dev: the egress net device + * @skb: the packet to map + * @addr: a pointer to the base of the DMA mapping array + * + * Map an skb for DMA to the device and return an array of DMA addresses. + */ +static int map_skb(struct device *dev, const struct sk_buff *skb, + dma_addr_t *addr) +{ + const skb_frag_t *fp, *end; + const struct skb_shared_info *si; + + *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(dev, *addr)) + goto out_err; + + si = skb_shinfo(skb); + end = &si->frags[si->nr_frags]; + for (fp = si->frags; fp < end; fp++) { + *++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, *addr)) + goto unwind; + } + return 0; + +unwind: + while (fp-- > si->frags) + dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE); + dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); + +out_err: + return -ENOMEM; +} + +static void unmap_sgl(struct device *dev, const struct sk_buff *skb, + const struct ulptx_sgl *sgl, const struct sge_txq *tq) +{ + const struct ulptx_sge_pair *p; + unsigned int nfrags = skb_shinfo(skb)->nr_frags; + + if (likely(skb_headlen(skb))) + dma_unmap_single(dev, be64_to_cpu(sgl->addr0), + be32_to_cpu(sgl->len0), DMA_TO_DEVICE); + else { + dma_unmap_page(dev, be64_to_cpu(sgl->addr0), + be32_to_cpu(sgl->len0), DMA_TO_DEVICE); + nfrags--; + } + + /* + * the complexity below is because of the possibility of a wrap-around + * in the middle of an SGL + */ + for (p = sgl->sge; nfrags >= 2; nfrags -= 2) { + if (likely((u8 *)(p + 1) <= (u8 *)tq->stat)) { +unmap: + dma_unmap_page(dev, be64_to_cpu(p->addr[0]), + be32_to_cpu(p->len[0]), DMA_TO_DEVICE); + dma_unmap_page(dev, be64_to_cpu(p->addr[1]), + be32_to_cpu(p->len[1]), DMA_TO_DEVICE); + p++; + } else if ((u8 *)p == (u8 *)tq->stat) { + p = (const struct ulptx_sge_pair *)tq->desc; + goto unmap; + } else if ((u8 *)p + 8 == (u8 *)tq->stat) { + const __be64 *addr = (const __be64 *)tq->desc; + + dma_unmap_page(dev, be64_to_cpu(addr[0]), + be32_to_cpu(p->len[0]), DMA_TO_DEVICE); + dma_unmap_page(dev, be64_to_cpu(addr[1]), + be32_to_cpu(p->len[1]), DMA_TO_DEVICE); + p = (const struct ulptx_sge_pair *)&addr[2]; + } else { + const __be64 *addr = (const __be64 *)tq->desc; + + dma_unmap_page(dev, be64_to_cpu(p->addr[0]), + be32_to_cpu(p->len[0]), DMA_TO_DEVICE); + dma_unmap_page(dev, be64_to_cpu(addr[0]), + be32_to_cpu(p->len[1]), DMA_TO_DEVICE); + p = (const struct ulptx_sge_pair *)&addr[1]; + } + } + if (nfrags) { + __be64 addr; + + if ((u8 *)p == (u8 *)tq->stat) + p = (const struct ulptx_sge_pair *)tq->desc; + addr = ((u8 *)p + 16 <= (u8 *)tq->stat + ? p->addr[0] + : *(const __be64 *)tq->desc); + dma_unmap_page(dev, be64_to_cpu(addr), be32_to_cpu(p->len[0]), + DMA_TO_DEVICE); + } +} + +/** + * free_tx_desc - reclaims TX descriptors and their buffers + * @adapter: the adapter + * @tq: the TX queue to reclaim descriptors from + * @n: the number of descriptors to reclaim + * @unmap: whether the buffers should be unmapped for DMA + * + * Reclaims TX descriptors from an SGE TX queue and frees the associated + * TX buffers. Called with the TX queue lock held. + */ +static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq, + unsigned int n, bool unmap) +{ + struct tx_sw_desc *sdesc; + unsigned int cidx = tq->cidx; + struct device *dev = adapter->pdev_dev; + + const int need_unmap = need_skb_unmap() && unmap; + + sdesc = &tq->sdesc[cidx]; + while (n--) { + /* + * If we kept a reference to the original TX skb, we need to + * unmap it from PCI DMA space (if required) and free it. + */ + if (sdesc->skb) { + if (need_unmap) + unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq); + kfree_skb(sdesc->skb); + sdesc->skb = NULL; + } + + sdesc++; + if (++cidx == tq->size) { + cidx = 0; + sdesc = tq->sdesc; + } + } + tq->cidx = cidx; +} + +/* + * Return the number of reclaimable descriptors in a TX queue. + */ +static inline int reclaimable(const struct sge_txq *tq) +{ + int hw_cidx = be16_to_cpu(tq->stat->cidx); + int reclaimable = hw_cidx - tq->cidx; + if (reclaimable < 0) + reclaimable += tq->size; + return reclaimable; +} + +/** + * reclaim_completed_tx - reclaims completed TX descriptors + * @adapter: the adapter + * @tq: the TX queue to reclaim completed descriptors from + * @unmap: whether the buffers should be unmapped for DMA + * + * Reclaims TX descriptors that the SGE has indicated it has processed, + * and frees the associated buffers if possible. Called with the TX + * queue locked. + */ +static inline void reclaim_completed_tx(struct adapter *adapter, + struct sge_txq *tq, + bool unmap) +{ + int avail = reclaimable(tq); + + if (avail) { + /* + * Limit the amount of clean up work we do at a time to keep + * the TX lock hold time O(1). + */ + if (avail > MAX_TX_RECLAIM) + avail = MAX_TX_RECLAIM; + + free_tx_desc(adapter, tq, avail, unmap); + tq->in_use -= avail; + } +} + +/** + * get_buf_size - return the size of an RX Free List buffer. + * @sdesc: pointer to the software buffer descriptor + */ +static inline int get_buf_size(const struct rx_sw_desc *sdesc) +{ + return FL_PG_ORDER > 0 && (sdesc->dma_addr & RX_LARGE_BUF) + ? (PAGE_SIZE << FL_PG_ORDER) + : PAGE_SIZE; +} + +/** + * free_rx_bufs - free RX buffers on an SGE Free List + * @adapter: the adapter + * @fl: the SGE Free List to free buffers from + * @n: how many buffers to free + * + * Release the next @n buffers on an SGE Free List RX queue. The + * buffers must be made inaccessible to hardware before calling this + * function. + */ +static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n) +{ + while (n--) { + struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx]; + + if (is_buf_mapped(sdesc)) + dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), + get_buf_size(sdesc), PCI_DMA_FROMDEVICE); + put_page(sdesc->page); + sdesc->page = NULL; + if (++fl->cidx == fl->size) + fl->cidx = 0; + fl->avail--; + } +} + +/** + * unmap_rx_buf - unmap the current RX buffer on an SGE Free List + * @adapter: the adapter + * @fl: the SGE Free List + * + * Unmap the current buffer on an SGE Free List RX queue. The + * buffer must be made inaccessible to HW before calling this function. + * + * This is similar to @free_rx_bufs above but does not free the buffer. + * Do note that the FL still loses any further access to the buffer. + * This is used predominantly to "transfer ownership" of an FL buffer + * to another entity (typically an skb's fragment list). + */ +static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl) +{ + struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx]; + + if (is_buf_mapped(sdesc)) + dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), + get_buf_size(sdesc), PCI_DMA_FROMDEVICE); + sdesc->page = NULL; + if (++fl->cidx == fl->size) + fl->cidx = 0; + fl->avail--; +} + +/** + * ring_fl_db - righ doorbell on free list + * @adapter: the adapter + * @fl: the Free List whose doorbell should be rung ... + * + * Tell the Scatter Gather Engine that there are new free list entries + * available. + */ +static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl) +{ + /* + * The SGE keeps track of its Producer and Consumer Indices in terms + * of Egress Queue Units so we can only tell it about integral numbers + * of multiples of Free List Entries per Egress Queue Units ... + */ + if (fl->pend_cred >= FL_PER_EQ_UNIT) { + wmb(); + t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, + DBPRIO | + QID(fl->cntxt_id) | + PIDX(fl->pend_cred / FL_PER_EQ_UNIT)); + fl->pend_cred %= FL_PER_EQ_UNIT; + } +} + +/** + * set_rx_sw_desc - initialize software RX buffer descriptor + * @sdesc: pointer to the softwore RX buffer descriptor + * @page: pointer to the page data structure backing the RX buffer + * @dma_addr: PCI DMA address (possibly with low-bit flags) + */ +static inline void set_rx_sw_desc(struct rx_sw_desc *sdesc, struct page *page, + dma_addr_t dma_addr) +{ + sdesc->page = page; + sdesc->dma_addr = dma_addr; +} + +/* + * Support for poisoning RX buffers ... + */ +#define POISON_BUF_VAL -1 + +static inline void poison_buf(struct page *page, size_t sz) +{ +#if POISON_BUF_VAL >= 0 + memset(page_address(page), POISON_BUF_VAL, sz); +#endif +} + +/** + * refill_fl - refill an SGE RX buffer ring + * @adapter: the adapter + * @fl: the Free List ring to refill + * @n: the number of new buffers to allocate + * @gfp: the gfp flags for the allocations + * + * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, + * allocated with the supplied gfp flags. The caller must assure that + * @n does not exceed the queue's capacity -- i.e. (cidx == pidx) _IN + * EGRESS QUEUE UNITS_ indicates an empty Free List! Returns the number + * of buffers allocated. If afterwards the queue is found critically low, + * mark it as starving in the bitmap of starving FLs. + */ +static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, + int n, gfp_t gfp) +{ + struct page *page; + dma_addr_t dma_addr; + unsigned int cred = fl->avail; + __be64 *d = &fl->desc[fl->pidx]; + struct rx_sw_desc *sdesc = &fl->sdesc[fl->pidx]; + + /* + * Sanity: ensure that the result of adding n Free List buffers + * won't result in wrapping the SGE's Producer Index around to + * it's Consumer Index thereby indicating an empty Free List ... + */ + BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT); + + /* + * If we support large pages, prefer large buffers and fail over to + * small pages if we can't allocate large pages to satisfy the refill. + * If we don't support large pages, drop directly into the small page + * allocation code. + */ + if (FL_PG_ORDER == 0) + goto alloc_small_pages; + + while (n) { + page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, + FL_PG_ORDER); + if (unlikely(!page)) { + /* + * We've failed inour attempt to allocate a "large + * page". Fail over to the "small page" allocation + * below. + */ + fl->large_alloc_failed++; + break; + } + poison_buf(page, PAGE_SIZE << FL_PG_ORDER); + + dma_addr = dma_map_page(adapter->pdev_dev, page, 0, + PAGE_SIZE << FL_PG_ORDER, + PCI_DMA_FROMDEVICE); + if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { + /* + * We've run out of DMA mapping space. Free up the + * buffer and return with what we've managed to put + * into the free list. We don't want to fail over to + * the small page allocation below in this case + * because DMA mapping resources are typically + * critical resources once they become scarse. + */ + __free_pages(page, FL_PG_ORDER); + goto out; + } + dma_addr |= RX_LARGE_BUF; + *d++ = cpu_to_be64(dma_addr); + + set_rx_sw_desc(sdesc, page, dma_addr); + sdesc++; + + fl->avail++; + if (++fl->pidx == fl->size) { + fl->pidx = 0; + sdesc = fl->sdesc; + d = fl->desc; + } + n--; + } + +alloc_small_pages: + while (n--) { + page = __netdev_alloc_page(adapter->port[0], + gfp | __GFP_NOWARN); + if (unlikely(!page)) { + fl->alloc_failed++; + break; + } + poison_buf(page, PAGE_SIZE); + + dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { + netdev_free_page(adapter->port[0], page); + break; + } + *d++ = cpu_to_be64(dma_addr); + + set_rx_sw_desc(sdesc, page, dma_addr); + sdesc++; + + fl->avail++; + if (++fl->pidx == fl->size) { + fl->pidx = 0; + sdesc = fl->sdesc; + d = fl->desc; + } + } + +out: + /* + * Update our accounting state to incorporate the new Free List + * buffers, tell the hardware about them and return the number of + * bufers which we were able to allocate. + */ + cred = fl->avail - cred; + fl->pend_cred += cred; + ring_fl_db(adapter, fl); + + if (unlikely(fl_starving(fl))) { + smp_wmb(); + set_bit(fl->cntxt_id, adapter->sge.starving_fl); + } + + return cred; +} + +/* + * Refill a Free List to its capacity or the Maximum Refill Increment, + * whichever is smaller ... + */ +static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl) +{ + refill_fl(adapter, fl, + min((unsigned int)MAX_RX_REFILL, fl_cap(fl) - fl->avail), + GFP_ATOMIC); +} + +/** + * alloc_ring - allocate resources for an SGE descriptor ring + * @dev: the PCI device's core device + * @nelem: the number of descriptors + * @hwsize: the size of each hardware descriptor + * @swsize: the size of each software descriptor + * @busaddrp: the physical PCI bus address of the allocated ring + * @swringp: return address pointer for software ring + * @stat_size: extra space in hardware ring for status information + * + * Allocates resources for an SGE descriptor ring, such as TX queues, + * free buffer lists, response queues, etc. Each SGE ring requires + * space for its hardware descriptors plus, optionally, space for software + * state associated with each hardware entry (the metadata). The function + * returns three values: the virtual address for the hardware ring (the + * return value of the function), the PCI bus address of the hardware + * ring (in *busaddrp), and the address of the software ring (in swringp). + * Both the hardware and software rings are returned zeroed out. + */ +static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize, + size_t swsize, dma_addr_t *busaddrp, void *swringp, + size_t stat_size) +{ + /* + * Allocate the hardware ring and PCI DMA bus address space for said. + */ + size_t hwlen = nelem * hwsize + stat_size; + void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL); + + if (!hwring) + return NULL; + + /* + * If the caller wants a software ring, allocate it and return a + * pointer to it in *swringp. + */ + BUG_ON((swsize != 0) != (swringp != NULL)); + if (swsize) { + void *swring = kcalloc(nelem, swsize, GFP_KERNEL); + + if (!swring) { + dma_free_coherent(dev, hwlen, hwring, *busaddrp); + return NULL; + } + *(void **)swringp = swring; + } + + /* + * Zero out the hardware ring and return its address as our function + * value. + */ + memset(hwring, 0, hwlen); + return hwring; +} + +/** + * sgl_len - calculates the size of an SGL of the given capacity + * @n: the number of SGL entries + * + * Calculates the number of flits (8-byte units) needed for a Direct + * Scatter/Gather List that can hold the given number of entries. + */ +static inline unsigned int sgl_len(unsigned int n) +{ + /* + * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA + * addresses. The DSGL Work Request starts off with a 32-bit DSGL + * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N, + * repeated sequences of { Length[i], Length[i+1], Address[i], + * Address[i+1] } (this ensures that all addresses are on 64-bit + * boundaries). If N is even, then Length[N+1] should be set to 0 and + * Address[N+1] is omitted. + * + * The following calculation incorporates all of the above. It's + * somewhat hard to follow but, briefly: the "+2" accounts for the + * first two flits which include the DSGL header, Length0 and + * Address0; the "(3*(n-1))/2" covers the main body of list entries (3 + * flits for every pair of the remaining N) +1 if (n-1) is odd; and + * finally the "+((n-1)&1)" adds the one remaining flit needed if + * (n-1) is odd ... + */ + n--; + return (3 * n) / 2 + (n & 1) + 2; +} + +/** + * flits_to_desc - returns the num of TX descriptors for the given flits + * @flits: the number of flits + * + * Returns the number of TX descriptors needed for the supplied number + * of flits. + */ +static inline unsigned int flits_to_desc(unsigned int flits) +{ + BUG_ON(flits > SGE_MAX_WR_LEN / sizeof(__be64)); + return DIV_ROUND_UP(flits, TXD_PER_EQ_UNIT); +} + +/** + * is_eth_imm - can an Ethernet packet be sent as immediate data? + * @skb: the packet + * + * Returns whether an Ethernet packet is small enough to fit completely as + * immediate data. + */ +static inline int is_eth_imm(const struct sk_buff *skb) +{ + /* + * The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request + * which does not accommodate immediate data. We could dike out all + * of the support code for immediate data but that would tie our hands + * too much if we ever want to enhace the firmware. It would also + * create more differences between the PF and VF Drivers. + */ + return false; +} + +/** + * calc_tx_flits - calculate the number of flits for a packet TX WR + * @skb: the packet + * + * Returns the number of flits needed for a TX Work Request for the + * given Ethernet packet, including the needed WR and CPL headers. + */ +static inline unsigned int calc_tx_flits(const struct sk_buff *skb) +{ + unsigned int flits; + + /* + * If the skb is small enough, we can pump it out as a work request + * with only immediate data. In that case we just have to have the + * TX Packet header plus the skb data in the Work Request. + */ + if (is_eth_imm(skb)) + return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), + sizeof(__be64)); + + /* + * Otherwise, we're going to have to construct a Scatter gather list + * of the skb body and fragments. We also include the flits necessary + * for the TX Packet Work Request and CPL. We always have a firmware + * Write Header (incorporated as part of the cpl_tx_pkt_lso and + * cpl_tx_pkt structures), followed by either a TX Packet Write CPL + * message or, if we're doing a Large Send Offload, an LSO CPL message + * with an embeded TX Packet Write CPL message. + */ + flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); + if (skb_shinfo(skb)->gso_size) + flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + + sizeof(struct cpl_tx_pkt_lso_core) + + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); + else + flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); + return flits; +} + +/** + * write_sgl - populate a Scatter/Gather List for a packet + * @skb: the packet + * @tq: the TX queue we are writing into + * @sgl: starting location for writing the SGL + * @end: points right after the end of the SGL + * @start: start offset into skb main-body data to include in the SGL + * @addr: the list of DMA bus addresses for the SGL elements + * + * Generates a Scatter/Gather List for the buffers that make up a packet. + * The caller must provide adequate space for the SGL that will be written. + * The SGL includes all of the packet's page fragments and the data in its + * main body except for the first @start bytes. @pos must be 16-byte + * aligned and within a TX descriptor with available space. @end points + * write after the end of the SGL but does not account for any potential + * wrap around, i.e., @end > @tq->stat. + */ +static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq, + struct ulptx_sgl *sgl, u64 *end, unsigned int start, + const dma_addr_t *addr) +{ + unsigned int i, len; + struct ulptx_sge_pair *to; + const struct skb_shared_info *si = skb_shinfo(skb); + unsigned int nfrags = si->nr_frags; + struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1]; + + len = skb_headlen(skb) - start; + if (likely(len)) { + sgl->len0 = htonl(len); + sgl->addr0 = cpu_to_be64(addr[0] + start); + nfrags++; + } else { + sgl->len0 = htonl(si->frags[0].size); + sgl->addr0 = cpu_to_be64(addr[1]); + } + + sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | + ULPTX_NSGE(nfrags)); + if (likely(--nfrags == 0)) + return; + /* + * Most of the complexity below deals with the possibility we hit the + * end of the queue in the middle of writing the SGL. For this case + * only we create the SGL in a temporary buffer and then copy it. + */ + to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge; + + for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { + to->len[0] = cpu_to_be32(si->frags[i].size); + to->len[1] = cpu_to_be32(si->frags[++i].size); + to->addr[0] = cpu_to_be64(addr[i]); + to->addr[1] = cpu_to_be64(addr[++i]); + } + if (nfrags) { + to->len[0] = cpu_to_be32(si->frags[i].size); + to->len[1] = cpu_to_be32(0); + to->addr[0] = cpu_to_be64(addr[i + 1]); + } + if (unlikely((u8 *)end > (u8 *)tq->stat)) { + unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1; + + if (likely(part0)) + memcpy(sgl->sge, buf, part0); + part1 = (u8 *)end - (u8 *)tq->stat; + memcpy(tq->desc, (u8 *)buf + part0, part1); + end = (void *)tq->desc + part1; + } + if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ + *(u64 *)end = 0; +} + +/** + * check_ring_tx_db - check and potentially ring a TX queue's doorbell + * @adapter: the adapter + * @tq: the TX queue + * @n: number of new descriptors to give to HW + * + * Ring the doorbel for a TX queue. + */ +static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq, + int n) +{ + /* + * Warn if we write doorbells with the wrong priority and write + * descriptors before telling HW. + */ + WARN_ON((QID(tq->cntxt_id) | PIDX(n)) & DBPRIO); + wmb(); + t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, + QID(tq->cntxt_id) | PIDX(n)); +} + +/** + * inline_tx_skb - inline a packet's data into TX descriptors + * @skb: the packet + * @tq: the TX queue where the packet will be inlined + * @pos: starting position in the TX queue to inline the packet + * + * Inline a packet's contents directly into TX descriptors, starting at + * the given position within the TX DMA ring. + * Most of the complexity of this operation is dealing with wrap arounds + * in the middle of the packet we want to inline. + */ +static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq, + void *pos) +{ + u64 *p; + int left = (void *)tq->stat - pos; + + if (likely(skb->len <= left)) { + if (likely(!skb->data_len)) + skb_copy_from_linear_data(skb, pos, skb->len); + else + skb_copy_bits(skb, 0, pos, skb->len); + pos += skb->len; + } else { + skb_copy_bits(skb, 0, pos, left); + skb_copy_bits(skb, left, tq->desc, skb->len - left); + pos = (void *)tq->desc + (skb->len - left); + } + + /* 0-pad to multiple of 16 */ + p = PTR_ALIGN(pos, 8); + if ((uintptr_t)p & 8) + *p = 0; +} + +/* + * Figure out what HW csum a packet wants and return the appropriate control + * bits. + */ +static u64 hwcsum(const struct sk_buff *skb) +{ + int csum_type; + const struct iphdr *iph = ip_hdr(skb); + + if (iph->version == 4) { + if (iph->protocol == IPPROTO_TCP) + csum_type = TX_CSUM_TCPIP; + else if (iph->protocol == IPPROTO_UDP) + csum_type = TX_CSUM_UDPIP; + else { +nocsum: + /* + * unknown protocol, disable HW csum + * and hope a bad packet is detected + */ + return TXPKT_L4CSUM_DIS; + } + } else { + /* + * this doesn't work with extension headers + */ + const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph; + + if (ip6h->nexthdr == IPPROTO_TCP) + csum_type = TX_CSUM_TCPIP6; + else if (ip6h->nexthdr == IPPROTO_UDP) + csum_type = TX_CSUM_UDPIP6; + else + goto nocsum; + } + + if (likely(csum_type >= TX_CSUM_TCPIP)) + return TXPKT_CSUM_TYPE(csum_type) | + TXPKT_IPHDR_LEN(skb_network_header_len(skb)) | + TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN); + else { + int start = skb_transport_offset(skb); + + return TXPKT_CSUM_TYPE(csum_type) | + TXPKT_CSUM_START(start) | + TXPKT_CSUM_LOC(start + skb->csum_offset); + } +} + +/* + * Stop an Ethernet TX queue and record that state change. + */ +static void txq_stop(struct sge_eth_txq *txq) +{ + netif_tx_stop_queue(txq->txq); + txq->q.stops++; +} + +/* + * Advance our software state for a TX queue by adding n in use descriptors. + */ +static inline void txq_advance(struct sge_txq *tq, unsigned int n) +{ + tq->in_use += n; + tq->pidx += n; + if (tq->pidx >= tq->size) + tq->pidx -= tq->size; +} + +/** + * t4vf_eth_xmit - add a packet to an Ethernet TX queue + * @skb: the packet + * @dev: the egress net device + * + * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled. + */ +int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) +{ + u32 wr_mid; + u64 cntrl, *end; + int qidx, credits; + unsigned int flits, ndesc; + struct adapter *adapter; + struct sge_eth_txq *txq; + const struct port_info *pi; + struct fw_eth_tx_pkt_vm_wr *wr; + struct cpl_tx_pkt_core *cpl; + const struct skb_shared_info *ssi; + dma_addr_t addr[MAX_SKB_FRAGS + 1]; + const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) + + sizeof(wr->ethmacsrc) + + sizeof(wr->ethtype) + + sizeof(wr->vlantci)); + + /* + * The chip minimum packet length is 10 octets but the firmware + * command that we are using requires that we copy the Ethernet header + * (including the VLAN tag) into the header so we reject anything + * smaller than that ... + */ + if (unlikely(skb->len < fw_hdr_copy_len)) + goto out_free; + + /* + * Figure out which TX Queue we're going to use. + */ + pi = netdev_priv(dev); + adapter = pi->adapter; + qidx = skb_get_queue_mapping(skb); + BUG_ON(qidx >= pi->nqsets); + txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; + + /* + * Take this opportunity to reclaim any TX Descriptors whose DMA + * transfers have completed. + */ + reclaim_completed_tx(adapter, &txq->q, true); + + /* + * Calculate the number of flits and TX Descriptors we're going to + * need along with how many TX Descriptors will be left over after + * we inject our Work Request. + */ + flits = calc_tx_flits(skb); + ndesc = flits_to_desc(flits); + credits = txq_avail(&txq->q) - ndesc; + + if (unlikely(credits < 0)) { + /* + * Not enough room for this packet's Work Request. Stop the + * TX Queue and return a "busy" condition. The queue will get + * started later on when the firmware informs us that space + * has opened up. + */ + txq_stop(txq); + dev_err(adapter->pdev_dev, + "%s: TX ring %u full while queue awake!\n", + dev->name, qidx); + return NETDEV_TX_BUSY; + } + + if (!is_eth_imm(skb) && + unlikely(map_skb(adapter->pdev_dev, skb, addr) < 0)) { + /* + * We need to map the skb into PCI DMA space (because it can't + * be in-lined directly into the Work Request) and the mapping + * operation failed. Record the error and drop the packet. + */ + txq->mapping_err++; + goto out_free; + } + + wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2)); + if (unlikely(credits < ETHTXQ_STOP_THRES)) { + /* + * After we're done injecting the Work Request for this + * packet, we'll be below our "stop threshold" so stop the TX + * Queue now and schedule a request for an SGE Egress Queue + * Update message. The queue will get started later on when + * the firmware processes this Work Request and sends us an + * Egress Queue Status Update message indicating that space + * has opened up. + */ + txq_stop(txq); + wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ; + } + + /* + * Start filling in our Work Request. Note that we do _not_ handle + * the WR Header wrapping around the TX Descriptor Ring. If our + * maximum header size ever exceeds one TX Descriptor, we'll need to + * do something else here. + */ + BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1); + wr = (void *)&txq->q.desc[txq->q.pidx]; + wr->equiq_to_len16 = cpu_to_be32(wr_mid); + wr->r3[0] = cpu_to_be64(0); + wr->r3[1] = cpu_to_be64(0); + skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len); + end = (u64 *)wr + flits; + + /* + * If this is a Large Send Offload packet we'll put in an LSO CPL + * message with an encapsulated TX Packet CPL message. Otherwise we + * just use a TX Packet CPL message. + */ + ssi = skb_shinfo(skb); + if (ssi->gso_size) { + struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); + bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; + int l3hdr_len = skb_network_header_len(skb); + int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; + + wr->op_immdlen = + cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) | + FW_WR_IMMDLEN(sizeof(*lso) + + sizeof(*cpl))); + /* + * Fill in the LSO CPL message. + */ + lso->lso_ctrl = + cpu_to_be32(LSO_OPCODE(CPL_TX_PKT_LSO) | + LSO_FIRST_SLICE | + LSO_LAST_SLICE | + LSO_IPV6(v6) | + LSO_ETHHDR_LEN(eth_xtra_len/4) | + LSO_IPHDR_LEN(l3hdr_len/4) | + LSO_TCPHDR_LEN(tcp_hdr(skb)->doff)); + lso->ipid_ofst = cpu_to_be16(0); + lso->mss = cpu_to_be16(ssi->gso_size); + lso->seqno_offset = cpu_to_be32(0); + lso->len = cpu_to_be32(skb->len); + + /* + * Set up TX Packet CPL pointer, control word and perform + * accounting. + */ + cpl = (void *)(lso + 1); + cntrl = (TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | + TXPKT_IPHDR_LEN(l3hdr_len) | + TXPKT_ETHHDR_LEN(eth_xtra_len)); + txq->tso++; + txq->tx_cso += ssi->gso_segs; + } else { + int len; + + len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl); + wr->op_immdlen = + cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) | + FW_WR_IMMDLEN(len)); + + /* + * Set up TX Packet CPL pointer, control word and perform + * accounting. + */ + cpl = (void *)(wr + 1); + if (skb->ip_summed == CHECKSUM_PARTIAL) { + cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS; + txq->tx_cso++; + } else + cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS; + } + + /* + * If there's a VLAN tag present, add that to the list of things to + * do in this Work Request. + */ + if (vlan_tx_tag_present(skb)) { + txq->vlan_ins++; + cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb)); + } + + /* + * Fill in the TX Packet CPL message header. + */ + cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE(CPL_TX_PKT_XT) | + TXPKT_INTF(pi->port_id) | + TXPKT_PF(0)); + cpl->pack = cpu_to_be16(0); + cpl->len = cpu_to_be16(skb->len); + cpl->ctrl1 = cpu_to_be64(cntrl); + +#ifdef T4_TRACE + T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7], + "eth_xmit: ndesc %u, credits %u, pidx %u, len %u, frags %u", + ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags); +#endif + + /* + * Fill in the body of the TX Packet CPL message with either in-lined + * data or a Scatter/Gather List. + */ + if (is_eth_imm(skb)) { + /* + * In-line the packet's data and free the skb since we don't + * need it any longer. + */ + inline_tx_skb(skb, &txq->q, cpl + 1); + dev_kfree_skb(skb); + } else { + /* + * Write the skb's Scatter/Gather list into the TX Packet CPL + * message and retain a pointer to the skb so we can free it + * later when its DMA completes. (We store the skb pointer + * in the Software Descriptor corresponding to the last TX + * Descriptor used by the Work Request.) + * + * The retained skb will be freed when the corresponding TX + * Descriptors are reclaimed after their DMAs complete. + * However, this could take quite a while since, in general, + * the hardware is set up to be lazy about sending DMA + * completion notifications to us and we mostly perform TX + * reclaims in the transmit routine. + * + * This is good for performamce but means that we rely on new + * TX packets arriving to run the destructors of completed + * packets, which open up space in their sockets' send queues. + * Sometimes we do not get such new packets causing TX to + * stall. A single UDP transmitter is a good example of this + * situation. We have a clean up timer that periodically + * reclaims completed packets but it doesn't run often enough + * (nor do we want it to) to prevent lengthy stalls. A + * solution to this problem is to run the destructor early, + * after the packet is queued but before it's DMAd. A con is + * that we lie to socket memory accounting, but the amount of + * extra memory is reasonable (limited by the number of TX + * descriptors), the packets do actually get freed quickly by + * new packets almost always, and for protocols like TCP that + * wait for acks to really free up the data the extra memory + * is even less. On the positive side we run the destructors + * on the sending CPU rather than on a potentially different + * completing CPU, usually a good thing. + * + * Run the destructor before telling the DMA engine about the + * packet to make sure it doesn't complete and get freed + * prematurely. + */ + struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1); + struct sge_txq *tq = &txq->q; + int last_desc; + + /* + * If the Work Request header was an exact multiple of our TX + * Descriptor length, then it's possible that the starting SGL + * pointer lines up exactly with the end of our TX Descriptor + * ring. If that's the case, wrap around to the beginning + * here ... + */ + if (unlikely((void *)sgl == (void *)tq->stat)) { + sgl = (void *)tq->desc; + end = (void *)((void *)tq->desc + + ((void *)end - (void *)tq->stat)); + } + + write_sgl(skb, tq, sgl, end, 0, addr); + skb_orphan(skb); + + last_desc = tq->pidx + ndesc - 1; + if (last_desc >= tq->size) + last_desc -= tq->size; + tq->sdesc[last_desc].skb = skb; + tq->sdesc[last_desc].sgl = sgl; + } + + /* + * Advance our internal TX Queue state, tell the hardware about + * the new TX descriptors and return success. + */ + txq_advance(&txq->q, ndesc); + dev->trans_start = jiffies; + ring_tx_db(adapter, &txq->q, ndesc); + return NETDEV_TX_OK; + +out_free: + /* + * An error of some sort happened. Free the TX skb and tell the + * OS that we've "dealt" with the packet ... + */ + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +/** + * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list + * @gl: the gather list + * @skb_len: size of sk_buff main body if it carries fragments + * @pull_len: amount of data to move to the sk_buff's main body + * + * Builds an sk_buff from the given packet gather list. Returns the + * sk_buff or %NULL if sk_buff allocation failed. + */ +struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl, + unsigned int skb_len, unsigned int pull_len) +{ + struct sk_buff *skb; + struct skb_shared_info *ssi; + + /* + * If the ingress packet is small enough, allocate an skb large enough + * for all of the data and copy it inline. Otherwise, allocate an skb + * with enough room to pull in the header and reference the rest of + * the data via the skb fragment list. + * + * Below we rely on RX_COPY_THRES being less than the smallest Rx + * buff! size, which is expected since buffers are at least + * PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one + * fragment. + */ + if (gl->tot_len <= RX_COPY_THRES) { + /* small packets have only one fragment */ + skb = alloc_skb(gl->tot_len, GFP_ATOMIC); + if (unlikely(!skb)) + goto out; + __skb_put(skb, gl->tot_len); + skb_copy_to_linear_data(skb, gl->va, gl->tot_len); + } else { + skb = alloc_skb(skb_len, GFP_ATOMIC); + if (unlikely(!skb)) + goto out; + __skb_put(skb, pull_len); + skb_copy_to_linear_data(skb, gl->va, pull_len); + + ssi = skb_shinfo(skb); + ssi->frags[0].page = gl->frags[0].page; + ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len; + ssi->frags[0].size = gl->frags[0].size - pull_len; + if (gl->nfrags > 1) + memcpy(&ssi->frags[1], &gl->frags[1], + (gl->nfrags-1) * sizeof(skb_frag_t)); + ssi->nr_frags = gl->nfrags; + + skb->len = gl->tot_len; + skb->data_len = skb->len - pull_len; + skb->truesize += skb->data_len; + + /* Get a reference for the last page, we don't own it */ + get_page(gl->frags[gl->nfrags - 1].page); + } + +out: + return skb; +} + +/** + * t4vf_pktgl_free - free a packet gather list + * @gl: the gather list + * + * Releases the pages of a packet gather list. We do not own the last + * page on the list and do not free it. + */ +void t4vf_pktgl_free(const struct pkt_gl *gl) +{ + int frag; + + frag = gl->nfrags - 1; + while (frag--) + put_page(gl->frags[frag].page); +} + +/** + * copy_frags - copy fragments from gather list into skb_shared_info + * @si: destination skb shared info structure + * @gl: source internal packet gather list + * @offset: packet start offset in first page + * + * Copy an internal packet gather list into a Linux skb_shared_info + * structure. + */ +static inline void copy_frags(struct skb_shared_info *si, + const struct pkt_gl *gl, + unsigned int offset) +{ + unsigned int n; + + /* usually there's just one frag */ + si->frags[0].page = gl->frags[0].page; + si->frags[0].page_offset = gl->frags[0].page_offset + offset; + si->frags[0].size = gl->frags[0].size - offset; + si->nr_frags = gl->nfrags; + + n = gl->nfrags - 1; + if (n) + memcpy(&si->frags[1], &gl->frags[1], n * sizeof(skb_frag_t)); + + /* get a reference to the last page, we don't own it */ + get_page(gl->frags[n].page); +} + +/** + * do_gro - perform Generic Receive Offload ingress packet processing + * @rxq: ingress RX Ethernet Queue + * @gl: gather list for ingress packet + * @pkt: CPL header for last packet fragment + * + * Perform Generic Receive Offload (GRO) ingress packet processing. + * We use the standard Linux GRO interfaces for this. + */ +static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, + const struct cpl_rx_pkt *pkt) +{ + int ret; + struct sk_buff *skb; + + skb = napi_get_frags(&rxq->rspq.napi); + if (unlikely(!skb)) { + t4vf_pktgl_free(gl); + rxq->stats.rx_drops++; + return; + } + + copy_frags(skb_shinfo(skb), gl, PKTSHIFT); + skb->len = gl->tot_len - PKTSHIFT; + skb->data_len = skb->len; + skb->truesize += skb->data_len; + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_record_rx_queue(skb, rxq->rspq.idx); + + if (pkt->vlan_ex) + __vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan)); + ret = napi_gro_frags(&rxq->rspq.napi); + + if (ret == GRO_HELD) + rxq->stats.lro_pkts++; + else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE) + rxq->stats.lro_merged++; + rxq->stats.pkts++; + rxq->stats.rx_cso++; +} + +/** + * t4vf_ethrx_handler - process an ingress ethernet packet + * @rspq: the response queue that received the packet + * @rsp: the response queue descriptor holding the RX_PKT message + * @gl: the gather list of packet fragments + * + * Process an ingress ethernet packet and deliver it to the stack. + */ +int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, + const struct pkt_gl *gl) +{ + struct sk_buff *skb; + const struct cpl_rx_pkt *pkt = (void *)&rsp[1]; + bool csum_ok = pkt->csum_calc && !pkt->err_vec; + struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); + + /* + * If this is a good TCP packet and we have Generic Receive Offload + * enabled, handle the packet in the GRO path. + */ + if ((pkt->l2info & cpu_to_be32(RXF_TCP)) && + (rspq->netdev->features & NETIF_F_GRO) && csum_ok && + !pkt->ip_frag) { + do_gro(rxq, gl, pkt); + return 0; + } + + /* + * Convert the Packet Gather List into an skb. + */ + skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN); + if (unlikely(!skb)) { + t4vf_pktgl_free(gl); + rxq->stats.rx_drops++; + return 0; + } + __skb_pull(skb, PKTSHIFT); + skb->protocol = eth_type_trans(skb, rspq->netdev); + skb_record_rx_queue(skb, rspq->idx); + rxq->stats.pkts++; + + if (csum_ok && (rspq->netdev->features & NETIF_F_RXCSUM) && + !pkt->err_vec && (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) { + if (!pkt->ip_frag) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else { + __sum16 c = (__force __sum16)pkt->csum; + skb->csum = csum_unfold(c); + skb->ip_summed = CHECKSUM_COMPLETE; + } + rxq->stats.rx_cso++; + } else + skb_checksum_none_assert(skb); + + if (pkt->vlan_ex) { + rxq->stats.vlan_ex++; + __vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan)); + } + + netif_receive_skb(skb); + + return 0; +} + +/** + * is_new_response - check if a response is newly written + * @rc: the response control descriptor + * @rspq: the response queue + * + * Returns true if a response descriptor contains a yet unprocessed + * response. + */ +static inline bool is_new_response(const struct rsp_ctrl *rc, + const struct sge_rspq *rspq) +{ + return RSPD_GEN(rc->type_gen) == rspq->gen; +} + +/** + * restore_rx_bufs - put back a packet's RX buffers + * @gl: the packet gather list + * @fl: the SGE Free List + * @nfrags: how many fragments in @si + * + * Called when we find out that the current packet, @si, can't be + * processed right away for some reason. This is a very rare event and + * there's no effort to make this suspension/resumption process + * particularly efficient. + * + * We implement the suspension by putting all of the RX buffers associated + * with the current packet back on the original Free List. The buffers + * have already been unmapped and are left unmapped, we mark them as + * unmapped in order to prevent further unmapping attempts. (Effectively + * this function undoes the series of @unmap_rx_buf calls which were done + * to create the current packet's gather list.) This leaves us ready to + * restart processing of the packet the next time we start processing the + * RX Queue ... + */ +static void restore_rx_bufs(const struct pkt_gl *gl, struct sge_fl *fl, + int frags) +{ + struct rx_sw_desc *sdesc; + + while (frags--) { + if (fl->cidx == 0) + fl->cidx = fl->size - 1; + else + fl->cidx--; + sdesc = &fl->sdesc[fl->cidx]; + sdesc->page = gl->frags[frags].page; + sdesc->dma_addr |= RX_UNMAPPED_BUF; + fl->avail++; + } +} + +/** + * rspq_next - advance to the next entry in a response queue + * @rspq: the queue + * + * Updates the state of a response queue to advance it to the next entry. + */ +static inline void rspq_next(struct sge_rspq *rspq) +{ + rspq->cur_desc = (void *)rspq->cur_desc + rspq->iqe_len; + if (unlikely(++rspq->cidx == rspq->size)) { + rspq->cidx = 0; + rspq->gen ^= 1; + rspq->cur_desc = rspq->desc; + } +} + +/** + * process_responses - process responses from an SGE response queue + * @rspq: the ingress response queue to process + * @budget: how many responses can be processed in this round + * + * Process responses from a Scatter Gather Engine response queue up to + * the supplied budget. Responses include received packets as well as + * control messages from firmware or hardware. + * + * Additionally choose the interrupt holdoff time for the next interrupt + * on this queue. If the system is under memory shortage use a fairly + * long delay to help recovery. + */ +int process_responses(struct sge_rspq *rspq, int budget) +{ + struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); + int budget_left = budget; + + while (likely(budget_left)) { + int ret, rsp_type; + const struct rsp_ctrl *rc; + + rc = (void *)rspq->cur_desc + (rspq->iqe_len - sizeof(*rc)); + if (!is_new_response(rc, rspq)) + break; + + /* + * Figure out what kind of response we've received from the + * SGE. + */ + rmb(); + rsp_type = RSPD_TYPE(rc->type_gen); + if (likely(rsp_type == RSP_TYPE_FLBUF)) { + skb_frag_t *fp; + struct pkt_gl gl; + const struct rx_sw_desc *sdesc; + u32 bufsz, frag; + u32 len = be32_to_cpu(rc->pldbuflen_qid); + + /* + * If we get a "new buffer" message from the SGE we + * need to move on to the next Free List buffer. + */ + if (len & RSPD_NEWBUF) { + /* + * We get one "new buffer" message when we + * first start up a queue so we need to ignore + * it when our offset into the buffer is 0. + */ + if (likely(rspq->offset > 0)) { + free_rx_bufs(rspq->adapter, &rxq->fl, + 1); + rspq->offset = 0; + } + len = RSPD_LEN(len); + } + gl.tot_len = len; + + /* + * Gather packet fragments. + */ + for (frag = 0, fp = gl.frags; /**/; frag++, fp++) { + BUG_ON(frag >= MAX_SKB_FRAGS); + BUG_ON(rxq->fl.avail == 0); + sdesc = &rxq->fl.sdesc[rxq->fl.cidx]; + bufsz = get_buf_size(sdesc); + fp->page = sdesc->page; + fp->page_offset = rspq->offset; + fp->size = min(bufsz, len); + len -= fp->size; + if (!len) + break; + unmap_rx_buf(rspq->adapter, &rxq->fl); + } + gl.nfrags = frag+1; + + /* + * Last buffer remains mapped so explicitly make it + * coherent for CPU access and start preloading first + * cache line ... + */ + dma_sync_single_for_cpu(rspq->adapter->pdev_dev, + get_buf_addr(sdesc), + fp->size, DMA_FROM_DEVICE); + gl.va = (page_address(gl.frags[0].page) + + gl.frags[0].page_offset); + prefetch(gl.va); + + /* + * Hand the new ingress packet to the handler for + * this Response Queue. + */ + ret = rspq->handler(rspq, rspq->cur_desc, &gl); + if (likely(ret == 0)) + rspq->offset += ALIGN(fp->size, FL_ALIGN); + else + restore_rx_bufs(&gl, &rxq->fl, frag); + } else if (likely(rsp_type == RSP_TYPE_CPL)) { + ret = rspq->handler(rspq, rspq->cur_desc, NULL); + } else { + WARN_ON(rsp_type > RSP_TYPE_CPL); + ret = 0; + } + + if (unlikely(ret)) { + /* + * Couldn't process descriptor, back off for recovery. + * We use the SGE's last timer which has the longest + * interrupt coalescing value ... + */ + const int NOMEM_TIMER_IDX = SGE_NTIMERS-1; + rspq->next_intr_params = + QINTR_TIMER_IDX(NOMEM_TIMER_IDX); + break; + } + + rspq_next(rspq); + budget_left--; + } + + /* + * If this is a Response Queue with an associated Free List and + * at least two Egress Queue units available in the Free List + * for new buffer pointers, refill the Free List. + */ + if (rspq->offset >= 0 && + rxq->fl.size - rxq->fl.avail >= 2*FL_PER_EQ_UNIT) + __refill_fl(rspq->adapter, &rxq->fl); + return budget - budget_left; +} + +/** + * napi_rx_handler - the NAPI handler for RX processing + * @napi: the napi instance + * @budget: how many packets we can process in this round + * + * Handler for new data events when using NAPI. This does not need any + * locking or protection from interrupts as data interrupts are off at + * this point and other adapter interrupts do not interfere (the latter + * in not a concern at all with MSI-X as non-data interrupts then have + * a separate handler). + */ +static int napi_rx_handler(struct napi_struct *napi, int budget) +{ + unsigned int intr_params; + struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi); + int work_done = process_responses(rspq, budget); + + if (likely(work_done < budget)) { + napi_complete(napi); + intr_params = rspq->next_intr_params; + rspq->next_intr_params = rspq->intr_params; + } else + intr_params = QINTR_TIMER_IDX(SGE_TIMER_UPD_CIDX); + + if (unlikely(work_done == 0)) + rspq->unhandled_irqs++; + + t4_write_reg(rspq->adapter, + T4VF_SGE_BASE_ADDR + SGE_VF_GTS, + CIDXINC(work_done) | + INGRESSQID((u32)rspq->cntxt_id) | + SEINTARM(intr_params)); + return work_done; +} + +/* + * The MSI-X interrupt handler for an SGE response queue for the NAPI case + * (i.e., response queue serviced by NAPI polling). + */ +irqreturn_t t4vf_sge_intr_msix(int irq, void *cookie) +{ + struct sge_rspq *rspq = cookie; + + napi_schedule(&rspq->napi); + return IRQ_HANDLED; +} + +/* + * Process the indirect interrupt entries in the interrupt queue and kick off + * NAPI for each queue that has generated an entry. + */ +static unsigned int process_intrq(struct adapter *adapter) +{ + struct sge *s = &adapter->sge; + struct sge_rspq *intrq = &s->intrq; + unsigned int work_done; + + spin_lock(&adapter->sge.intrq_lock); + for (work_done = 0; ; work_done++) { + const struct rsp_ctrl *rc; + unsigned int qid, iq_idx; + struct sge_rspq *rspq; + + /* + * Grab the next response from the interrupt queue and bail + * out if it's not a new response. + */ + rc = (void *)intrq->cur_desc + (intrq->iqe_len - sizeof(*rc)); + if (!is_new_response(rc, intrq)) + break; + + /* + * If the response isn't a forwarded interrupt message issue a + * error and go on to the next response message. This should + * never happen ... + */ + rmb(); + if (unlikely(RSPD_TYPE(rc->type_gen) != RSP_TYPE_INTR)) { + dev_err(adapter->pdev_dev, + "Unexpected INTRQ response type %d\n", + RSPD_TYPE(rc->type_gen)); + continue; + } + + /* + * Extract the Queue ID from the interrupt message and perform + * sanity checking to make sure it really refers to one of our + * Ingress Queues which is active and matches the queue's ID. + * None of these error conditions should ever happen so we may + * want to either make them fatal and/or conditionalized under + * DEBUG. + */ + qid = RSPD_QID(be32_to_cpu(rc->pldbuflen_qid)); + iq_idx = IQ_IDX(s, qid); + if (unlikely(iq_idx >= MAX_INGQ)) { + dev_err(adapter->pdev_dev, + "Ingress QID %d out of range\n", qid); + continue; + } + rspq = s->ingr_map[iq_idx]; + if (unlikely(rspq == NULL)) { + dev_err(adapter->pdev_dev, + "Ingress QID %d RSPQ=NULL\n", qid); + continue; + } + if (unlikely(rspq->abs_id != qid)) { + dev_err(adapter->pdev_dev, + "Ingress QID %d refers to RSPQ %d\n", + qid, rspq->abs_id); + continue; + } + + /* + * Schedule NAPI processing on the indicated Response Queue + * and move on to the next entry in the Forwarded Interrupt + * Queue. + */ + napi_schedule(&rspq->napi); + rspq_next(intrq); + } + + t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, + CIDXINC(work_done) | + INGRESSQID(intrq->cntxt_id) | + SEINTARM(intrq->intr_params)); + + spin_unlock(&adapter->sge.intrq_lock); + + return work_done; +} + +/* + * The MSI interrupt handler handles data events from SGE response queues as + * well as error and other async events as they all use the same MSI vector. + */ +irqreturn_t t4vf_intr_msi(int irq, void *cookie) +{ + struct adapter *adapter = cookie; + + process_intrq(adapter); + return IRQ_HANDLED; +} + +/** + * t4vf_intr_handler - select the top-level interrupt handler + * @adapter: the adapter + * + * Selects the top-level interrupt handler based on the type of interrupts + * (MSI-X or MSI). + */ +irq_handler_t t4vf_intr_handler(struct adapter *adapter) +{ + BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0); + if (adapter->flags & USING_MSIX) + return t4vf_sge_intr_msix; + else + return t4vf_intr_msi; +} + +/** + * sge_rx_timer_cb - perform periodic maintenance of SGE RX queues + * @data: the adapter + * + * Runs periodically from a timer to perform maintenance of SGE RX queues. + * + * a) Replenishes RX queues that have run out due to memory shortage. + * Normally new RX buffers are added when existing ones are consumed but + * when out of memory a queue can become empty. We schedule NAPI to do + * the actual refill. + */ +static void sge_rx_timer_cb(unsigned long data) +{ + struct adapter *adapter = (struct adapter *)data; + struct sge *s = &adapter->sge; + unsigned int i; + + /* + * Scan the "Starving Free Lists" flag array looking for any Free + * Lists in need of more free buffers. If we find one and it's not + * being actively polled, then bump its "starving" counter and attempt + * to refill it. If we're successful in adding enough buffers to push + * the Free List over the starving threshold, then we can clear its + * "starving" status. + */ + for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) { + unsigned long m; + + for (m = s->starving_fl[i]; m; m &= m - 1) { + unsigned int id = __ffs(m) + i * BITS_PER_LONG; + struct sge_fl *fl = s->egr_map[id]; + + clear_bit(id, s->starving_fl); + smp_mb__after_clear_bit(); + + /* + * Since we are accessing fl without a lock there's a + * small probability of a false positive where we + * schedule napi but the FL is no longer starving. + * No biggie. + */ + if (fl_starving(fl)) { + struct sge_eth_rxq *rxq; + + rxq = container_of(fl, struct sge_eth_rxq, fl); + if (napi_reschedule(&rxq->rspq.napi)) + fl->starving++; + else + set_bit(id, s->starving_fl); + } + } + } + + /* + * Reschedule the next scan for starving Free Lists ... + */ + mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); +} + +/** + * sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues + * @data: the adapter + * + * Runs periodically from a timer to perform maintenance of SGE TX queues. + * + * b) Reclaims completed Tx packets for the Ethernet queues. Normally + * packets are cleaned up by new Tx packets, this timer cleans up packets + * when no new packets are being submitted. This is essential for pktgen, + * at least. + */ +static void sge_tx_timer_cb(unsigned long data) +{ + struct adapter *adapter = (struct adapter *)data; + struct sge *s = &adapter->sge; + unsigned int i, budget; + + budget = MAX_TIMER_TX_RECLAIM; + i = s->ethtxq_rover; + do { + struct sge_eth_txq *txq = &s->ethtxq[i]; + + if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) { + int avail = reclaimable(&txq->q); + + if (avail > budget) + avail = budget; + + free_tx_desc(adapter, &txq->q, avail, true); + txq->q.in_use -= avail; + __netif_tx_unlock(txq->txq); + + budget -= avail; + if (!budget) + break; + } + + i++; + if (i >= s->ethqsets) + i = 0; + } while (i != s->ethtxq_rover); + s->ethtxq_rover = i; + + /* + * If we found too many reclaimable packets schedule a timer in the + * near future to continue where we left off. Otherwise the next timer + * will be at its normal interval. + */ + mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2)); +} + +/** + * t4vf_sge_alloc_rxq - allocate an SGE RX Queue + * @adapter: the adapter + * @rspq: pointer to to the new rxq's Response Queue to be filled in + * @iqasynch: if 0, a normal rspq; if 1, an asynchronous event queue + * @dev: the network device associated with the new rspq + * @intr_dest: MSI-X vector index (overriden in MSI mode) + * @fl: pointer to the new rxq's Free List to be filled in + * @hnd: the interrupt handler to invoke for the rspq + */ +int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, + bool iqasynch, struct net_device *dev, + int intr_dest, + struct sge_fl *fl, rspq_handler_t hnd) +{ + struct port_info *pi = netdev_priv(dev); + struct fw_iq_cmd cmd, rpl; + int ret, iqandst, flsz = 0; + + /* + * If we're using MSI interrupts and we're not initializing the + * Forwarded Interrupt Queue itself, then set up this queue for + * indirect interrupts to the Forwarded Interrupt Queue. Obviously + * the Forwarded Interrupt Queue must be set up before any other + * ingress queue ... + */ + if ((adapter->flags & USING_MSI) && rspq != &adapter->sge.intrq) { + iqandst = SGE_INTRDST_IQ; + intr_dest = adapter->sge.intrq.abs_id; + } else + iqandst = SGE_INTRDST_PCI; + + /* + * Allocate the hardware ring for the Response Queue. The size needs + * to be a multiple of 16 which includes the mandatory status entry + * (regardless of whether the Status Page capabilities are enabled or + * not). + */ + rspq->size = roundup(rspq->size, 16); + rspq->desc = alloc_ring(adapter->pdev_dev, rspq->size, rspq->iqe_len, + 0, &rspq->phys_addr, NULL, 0); + if (!rspq->desc) + return -ENOMEM; + + /* + * Fill in the Ingress Queue Command. Note: Ideally this code would + * be in t4vf_hw.c but there are so many parameters and dependencies + * on our Linux SGE state that we would end up having to pass tons of + * parameters. We'll have to think about how this might be migrated + * into OS-independent common code ... + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_IQ_CMD) | + FW_CMD_REQUEST | + FW_CMD_WRITE | + FW_CMD_EXEC); + cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC | + FW_IQ_CMD_IQSTART(1) | + FW_LEN16(cmd)); + cmd.type_to_iqandstindex = + cpu_to_be32(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | + FW_IQ_CMD_IQASYNCH(iqasynch) | + FW_IQ_CMD_VIID(pi->viid) | + FW_IQ_CMD_IQANDST(iqandst) | + FW_IQ_CMD_IQANUS(1) | + FW_IQ_CMD_IQANUD(SGE_UPDATEDEL_INTR) | + FW_IQ_CMD_IQANDSTINDEX(intr_dest)); + cmd.iqdroprss_to_iqesize = + cpu_to_be16(FW_IQ_CMD_IQPCIECH(pi->port_id) | + FW_IQ_CMD_IQGTSMODE | + FW_IQ_CMD_IQINTCNTTHRESH(rspq->pktcnt_idx) | + FW_IQ_CMD_IQESIZE(ilog2(rspq->iqe_len) - 4)); + cmd.iqsize = cpu_to_be16(rspq->size); + cmd.iqaddr = cpu_to_be64(rspq->phys_addr); + + if (fl) { + /* + * Allocate the ring for the hardware free list (with space + * for its status page) along with the associated software + * descriptor ring. The free list size needs to be a multiple + * of the Egress Queue Unit. + */ + fl->size = roundup(fl->size, FL_PER_EQ_UNIT); + fl->desc = alloc_ring(adapter->pdev_dev, fl->size, + sizeof(__be64), sizeof(struct rx_sw_desc), + &fl->addr, &fl->sdesc, STAT_LEN); + if (!fl->desc) { + ret = -ENOMEM; + goto err; + } + + /* + * Calculate the size of the hardware free list ring plus + * Status Page (which the SGE will place after the end of the + * free list ring) in Egress Queue Units. + */ + flsz = (fl->size / FL_PER_EQ_UNIT + + STAT_LEN / EQ_UNIT); + + /* + * Fill in all the relevant firmware Ingress Queue Command + * fields for the free list. + */ + cmd.iqns_to_fl0congen = + cpu_to_be32( + FW_IQ_CMD_FL0HOSTFCMODE(SGE_HOSTFCMODE_NONE) | + FW_IQ_CMD_FL0PACKEN | + FW_IQ_CMD_FL0PADEN); + cmd.fl0dcaen_to_fl0cidxfthresh = + cpu_to_be16( + FW_IQ_CMD_FL0FBMIN(SGE_FETCHBURSTMIN_64B) | + FW_IQ_CMD_FL0FBMAX(SGE_FETCHBURSTMAX_512B)); + cmd.fl0size = cpu_to_be16(flsz); + cmd.fl0addr = cpu_to_be64(fl->addr); + } + + /* + * Issue the firmware Ingress Queue Command and extract the results if + * it completes successfully. + */ + ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (ret) + goto err; + + netif_napi_add(dev, &rspq->napi, napi_rx_handler, 64); + rspq->cur_desc = rspq->desc; + rspq->cidx = 0; + rspq->gen = 1; + rspq->next_intr_params = rspq->intr_params; + rspq->cntxt_id = be16_to_cpu(rpl.iqid); + rspq->abs_id = be16_to_cpu(rpl.physiqid); + rspq->size--; /* subtract status entry */ + rspq->adapter = adapter; + rspq->netdev = dev; + rspq->handler = hnd; + + /* set offset to -1 to distinguish ingress queues without FL */ + rspq->offset = fl ? 0 : -1; + + if (fl) { + fl->cntxt_id = be16_to_cpu(rpl.fl0id); + fl->avail = 0; + fl->pend_cred = 0; + fl->pidx = 0; + fl->cidx = 0; + fl->alloc_failed = 0; + fl->large_alloc_failed = 0; + fl->starving = 0; + refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL); + } + + return 0; + +err: + /* + * An error occurred. Clean up our partial allocation state and + * return the error. + */ + if (rspq->desc) { + dma_free_coherent(adapter->pdev_dev, rspq->size * rspq->iqe_len, + rspq->desc, rspq->phys_addr); + rspq->desc = NULL; + } + if (fl && fl->desc) { + kfree(fl->sdesc); + fl->sdesc = NULL; + dma_free_coherent(adapter->pdev_dev, flsz * EQ_UNIT, + fl->desc, fl->addr); + fl->desc = NULL; + } + return ret; +} + +/** + * t4vf_sge_alloc_eth_txq - allocate an SGE Ethernet TX Queue + * @adapter: the adapter + * @txq: pointer to the new txq to be filled in + * @devq: the network TX queue associated with the new txq + * @iqid: the relative ingress queue ID to which events relating to + * the new txq should be directed + */ +int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, + struct net_device *dev, struct netdev_queue *devq, + unsigned int iqid) +{ + int ret, nentries; + struct fw_eq_eth_cmd cmd, rpl; + struct port_info *pi = netdev_priv(dev); + + /* + * Calculate the size of the hardware TX Queue (including the Status + * Page on the end of the TX Queue) in units of TX Descriptors. + */ + nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); + + /* + * Allocate the hardware ring for the TX ring (with space for its + * status page) along with the associated software descriptor ring. + */ + txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size, + sizeof(struct tx_desc), + sizeof(struct tx_sw_desc), + &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN); + if (!txq->q.desc) + return -ENOMEM; + + /* + * Fill in the Egress Queue Command. Note: As with the direct use of + * the firmware Ingress Queue COmmand above in our RXQ allocation + * routine, ideally, this code would be in t4vf_hw.c. Again, we'll + * have to see if there's some reasonable way to parameterize it + * into the common code ... + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_EQ_ETH_CMD) | + FW_CMD_REQUEST | + FW_CMD_WRITE | + FW_CMD_EXEC); + cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC | + FW_EQ_ETH_CMD_EQSTART | + FW_LEN16(cmd)); + cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_VIID(pi->viid)); + cmd.fetchszm_to_iqid = + cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE(SGE_HOSTFCMODE_STPG) | + FW_EQ_ETH_CMD_PCIECHN(pi->port_id) | + FW_EQ_ETH_CMD_IQID(iqid)); + cmd.dcaen_to_eqsize = + cpu_to_be32(FW_EQ_ETH_CMD_FBMIN(SGE_FETCHBURSTMIN_64B) | + FW_EQ_ETH_CMD_FBMAX(SGE_FETCHBURSTMAX_512B) | + FW_EQ_ETH_CMD_CIDXFTHRESH(SGE_CIDXFLUSHTHRESH_32) | + FW_EQ_ETH_CMD_EQSIZE(nentries)); + cmd.eqaddr = cpu_to_be64(txq->q.phys_addr); + + /* + * Issue the firmware Egress Queue Command and extract the results if + * it completes successfully. + */ + ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (ret) { + /* + * The girmware Ingress Queue Command failed for some reason. + * Free up our partial allocation state and return the error. + */ + kfree(txq->q.sdesc); + txq->q.sdesc = NULL; + dma_free_coherent(adapter->pdev_dev, + nentries * sizeof(struct tx_desc), + txq->q.desc, txq->q.phys_addr); + txq->q.desc = NULL; + return ret; + } + + txq->q.in_use = 0; + txq->q.cidx = 0; + txq->q.pidx = 0; + txq->q.stat = (void *)&txq->q.desc[txq->q.size]; + txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_GET(be32_to_cpu(rpl.eqid_pkd)); + txq->q.abs_id = + FW_EQ_ETH_CMD_PHYSEQID_GET(be32_to_cpu(rpl.physeqid_pkd)); + txq->txq = devq; + txq->tso = 0; + txq->tx_cso = 0; + txq->vlan_ins = 0; + txq->q.stops = 0; + txq->q.restarts = 0; + txq->mapping_err = 0; + return 0; +} + +/* + * Free the DMA map resources associated with a TX queue. + */ +static void free_txq(struct adapter *adapter, struct sge_txq *tq) +{ + dma_free_coherent(adapter->pdev_dev, + tq->size * sizeof(*tq->desc) + STAT_LEN, + tq->desc, tq->phys_addr); + tq->cntxt_id = 0; + tq->sdesc = NULL; + tq->desc = NULL; +} + +/* + * Free the resources associated with a response queue (possibly including a + * free list). + */ +static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq, + struct sge_fl *fl) +{ + unsigned int flid = fl ? fl->cntxt_id : 0xffff; + + t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP, + rspq->cntxt_id, flid, 0xffff); + dma_free_coherent(adapter->pdev_dev, (rspq->size + 1) * rspq->iqe_len, + rspq->desc, rspq->phys_addr); + netif_napi_del(&rspq->napi); + rspq->netdev = NULL; + rspq->cntxt_id = 0; + rspq->abs_id = 0; + rspq->desc = NULL; + + if (fl) { + free_rx_bufs(adapter, fl, fl->avail); + dma_free_coherent(adapter->pdev_dev, + fl->size * sizeof(*fl->desc) + STAT_LEN, + fl->desc, fl->addr); + kfree(fl->sdesc); + fl->sdesc = NULL; + fl->cntxt_id = 0; + fl->desc = NULL; + } +} + +/** + * t4vf_free_sge_resources - free SGE resources + * @adapter: the adapter + * + * Frees resources used by the SGE queue sets. + */ +void t4vf_free_sge_resources(struct adapter *adapter) +{ + struct sge *s = &adapter->sge; + struct sge_eth_rxq *rxq = s->ethrxq; + struct sge_eth_txq *txq = s->ethtxq; + struct sge_rspq *evtq = &s->fw_evtq; + struct sge_rspq *intrq = &s->intrq; + int qs; + + for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) { + if (rxq->rspq.desc) + free_rspq_fl(adapter, &rxq->rspq, &rxq->fl); + if (txq->q.desc) { + t4vf_eth_eq_free(adapter, txq->q.cntxt_id); + free_tx_desc(adapter, &txq->q, txq->q.in_use, true); + kfree(txq->q.sdesc); + free_txq(adapter, &txq->q); + } + } + if (evtq->desc) + free_rspq_fl(adapter, evtq, NULL); + if (intrq->desc) + free_rspq_fl(adapter, intrq, NULL); +} + +/** + * t4vf_sge_start - enable SGE operation + * @adapter: the adapter + * + * Start tasklets and timers associated with the DMA engine. + */ +void t4vf_sge_start(struct adapter *adapter) +{ + adapter->sge.ethtxq_rover = 0; + mod_timer(&adapter->sge.rx_timer, jiffies + RX_QCHECK_PERIOD); + mod_timer(&adapter->sge.tx_timer, jiffies + TX_QCHECK_PERIOD); +} + +/** + * t4vf_sge_stop - disable SGE operation + * @adapter: the adapter + * + * Stop tasklets and timers associated with the DMA engine. Note that + * this is effective only if measures have been taken to disable any HW + * events that may restart them. + */ +void t4vf_sge_stop(struct adapter *adapter) +{ + struct sge *s = &adapter->sge; + + if (s->rx_timer.function) + del_timer_sync(&s->rx_timer); + if (s->tx_timer.function) + del_timer_sync(&s->tx_timer); +} + +/** + * t4vf_sge_init - initialize SGE + * @adapter: the adapter + * + * Performs SGE initialization needed every time after a chip reset. + * We do not initialize any of the queue sets here, instead the driver + * top-level must request those individually. We also do not enable DMA + * here, that should be done after the queues have been set up. + */ +int t4vf_sge_init(struct adapter *adapter) +{ + struct sge_params *sge_params = &adapter->params.sge; + u32 fl0 = sge_params->sge_fl_buffer_size[0]; + u32 fl1 = sge_params->sge_fl_buffer_size[1]; + struct sge *s = &adapter->sge; + + /* + * Start by vetting the basic SGE parameters which have been set up by + * the Physical Function Driver. Ideally we should be able to deal + * with _any_ configuration. Practice is different ... + */ + if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) { + dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n", + fl0, fl1); + return -EINVAL; + } + if ((sge_params->sge_control & RXPKTCPLMODE) == 0) { + dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n"); + return -EINVAL; + } + + /* + * Now translate the adapter parameters into our internal forms. + */ + if (fl1) + FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT; + STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE) ? 128 : 64); + PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control); + FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) + + SGE_INGPADBOUNDARY_SHIFT); + + /* + * Set up tasklet timers. + */ + setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adapter); + setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adapter); + + /* + * Initialize Forwarded Interrupt Queue lock. + */ + spin_lock_init(&s->intrq_lock); + + return 0; +} diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h new file mode 100644 index 0000000..a65c80ae --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -0,0 +1,274 @@ +/* + * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet + * driver for Linux. + * + * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __T4VF_COMMON_H__ +#define __T4VF_COMMON_H__ + +#include "../cxgb4/t4fw_api.h" + +/* + * The "len16" field of a Firmware Command Structure ... + */ +#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16) + +/* + * Per-VF statistics. + */ +struct t4vf_port_stats { + /* + * TX statistics. + */ + u64 tx_bcast_bytes; /* broadcast */ + u64 tx_bcast_frames; + u64 tx_mcast_bytes; /* multicast */ + u64 tx_mcast_frames; + u64 tx_ucast_bytes; /* unicast */ + u64 tx_ucast_frames; + u64 tx_drop_frames; /* TX dropped frames */ + u64 tx_offload_bytes; /* offload */ + u64 tx_offload_frames; + + /* + * RX statistics. + */ + u64 rx_bcast_bytes; /* broadcast */ + u64 rx_bcast_frames; + u64 rx_mcast_bytes; /* multicast */ + u64 rx_mcast_frames; + u64 rx_ucast_bytes; + u64 rx_ucast_frames; /* unicast */ + + u64 rx_err_frames; /* RX error frames */ +}; + +/* + * Per-"port" (Virtual Interface) link configuration ... + */ +struct link_config { + unsigned int supported; /* link capabilities */ + unsigned int advertising; /* advertised capabilities */ + unsigned short requested_speed; /* speed user has requested */ + unsigned short speed; /* actual link speed */ + unsigned char requested_fc; /* flow control user has requested */ + unsigned char fc; /* actual link flow control */ + unsigned char autoneg; /* autonegotiating? */ + unsigned char link_ok; /* link up? */ +}; + +enum { + PAUSE_RX = 1 << 0, + PAUSE_TX = 1 << 1, + PAUSE_AUTONEG = 1 << 2 +}; + +/* + * General device parameters ... + */ +struct dev_params { + u32 fwrev; /* firmware version */ + u32 tprev; /* TP Microcode Version */ +}; + +/* + * Scatter Gather Engine parameters. These are almost all determined by the + * Physical Function Driver. We just need to grab them to see within which + * environment we're playing ... + */ +struct sge_params { + u32 sge_control; /* padding, boundaries, lengths, etc. */ + u32 sge_host_page_size; /* RDMA page sizes */ + u32 sge_queues_per_page; /* RDMA queues/page */ + u32 sge_user_mode_limits; /* limits for BAR2 user mode accesses */ + u32 sge_fl_buffer_size[16]; /* free list buffer sizes */ + u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */ + u32 sge_timer_value_0_and_1; /* interrupt coalescing timer values */ + u32 sge_timer_value_2_and_3; + u32 sge_timer_value_4_and_5; +}; + +/* + * Vital Product Data parameters. + */ +struct vpd_params { + u32 cclk; /* Core Clock (KHz) */ +}; + +/* + * Global Receive Side Scaling (RSS) parameters in host-native format. + */ +struct rss_params { + unsigned int mode; /* RSS mode */ + union { + struct { + unsigned int synmapen:1; /* SYN Map Enable */ + unsigned int syn4tupenipv6:1; /* enable hashing 4-tuple IPv6 SYNs */ + unsigned int syn2tupenipv6:1; /* enable hashing 2-tuple IPv6 SYNs */ + unsigned int syn4tupenipv4:1; /* enable hashing 4-tuple IPv4 SYNs */ + unsigned int syn2tupenipv4:1; /* enable hashing 2-tuple IPv4 SYNs */ + unsigned int ofdmapen:1; /* Offload Map Enable */ + unsigned int tnlmapen:1; /* Tunnel Map Enable */ + unsigned int tnlalllookup:1; /* Tunnel All Lookup */ + unsigned int hashtoeplitz:1; /* use Toeplitz hash */ + } basicvirtual; + } u; +}; + +/* + * Virtual Interface RSS Configuration in host-native format. + */ +union rss_vi_config { + struct { + u16 defaultq; /* Ingress Queue ID for !tnlalllookup */ + unsigned int ip6fourtupen:1; /* hash 4-tuple IPv6 ingress packets */ + unsigned int ip6twotupen:1; /* hash 2-tuple IPv6 ingress packets */ + unsigned int ip4fourtupen:1; /* hash 4-tuple IPv4 ingress packets */ + unsigned int ip4twotupen:1; /* hash 2-tuple IPv4 ingress packets */ + int udpen; /* hash 4-tuple UDP ingress packets */ + } basicvirtual; +}; + +/* + * Maximum resources provisioned for a PCI VF. + */ +struct vf_resources { + unsigned int nvi; /* N virtual interfaces */ + unsigned int neq; /* N egress Qs */ + unsigned int nethctrl; /* N egress ETH or CTRL Qs */ + unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */ + unsigned int niq; /* N ingress Qs */ + unsigned int tc; /* PCI-E traffic class */ + unsigned int pmask; /* port access rights mask */ + unsigned int nexactf; /* N exact MPS filters */ + unsigned int r_caps; /* read capabilities */ + unsigned int wx_caps; /* write/execute capabilities */ +}; + +/* + * Per-"adapter" (Virtual Function) parameters. + */ +struct adapter_params { + struct dev_params dev; /* general device parameters */ + struct sge_params sge; /* Scatter Gather Engine */ + struct vpd_params vpd; /* Vital Product Data */ + struct rss_params rss; /* Receive Side Scaling */ + struct vf_resources vfres; /* Virtual Function Resource limits */ + u8 nports; /* # of Ethernet "ports" */ +}; + +#include "adapter.h" + +#ifndef PCI_VENDOR_ID_CHELSIO +# define PCI_VENDOR_ID_CHELSIO 0x1425 +#endif + +#define for_each_port(adapter, iter) \ + for (iter = 0; iter < (adapter)->params.nports; iter++) + +static inline bool is_10g_port(const struct link_config *lc) +{ + return (lc->supported & SUPPORTED_10000baseT_Full) != 0; +} + +static inline unsigned int core_ticks_per_usec(const struct adapter *adapter) +{ + return adapter->params.vpd.cclk / 1000; +} + +static inline unsigned int us_to_core_ticks(const struct adapter *adapter, + unsigned int us) +{ + return (us * adapter->params.vpd.cclk) / 1000; +} + +static inline unsigned int core_ticks_to_us(const struct adapter *adapter, + unsigned int ticks) +{ + return (ticks * 1000) / adapter->params.vpd.cclk; +} + +int t4vf_wr_mbox_core(struct adapter *, const void *, int, void *, bool); + +static inline int t4vf_wr_mbox(struct adapter *adapter, const void *cmd, + int size, void *rpl) +{ + return t4vf_wr_mbox_core(adapter, cmd, size, rpl, true); +} + +static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd, + int size, void *rpl) +{ + return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false); +} + +int __devinit t4vf_wait_dev_ready(struct adapter *); +int __devinit t4vf_port_init(struct adapter *, int); + +int t4vf_fw_reset(struct adapter *); +int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *); +int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *); + +int t4vf_get_sge_params(struct adapter *); +int t4vf_get_vpd_params(struct adapter *); +int t4vf_get_dev_params(struct adapter *); +int t4vf_get_rss_glb_config(struct adapter *); +int t4vf_get_vfres(struct adapter *); + +int t4vf_read_rss_vi_config(struct adapter *, unsigned int, + union rss_vi_config *); +int t4vf_write_rss_vi_config(struct adapter *, unsigned int, + union rss_vi_config *); +int t4vf_config_rss_range(struct adapter *, unsigned int, int, int, + const u16 *, int); + +int t4vf_alloc_vi(struct adapter *, int); +int t4vf_free_vi(struct adapter *, int); +int t4vf_enable_vi(struct adapter *, unsigned int, bool, bool); +int t4vf_identify_port(struct adapter *, unsigned int, unsigned int); + +int t4vf_set_rxmode(struct adapter *, unsigned int, int, int, int, int, int, + bool); +int t4vf_alloc_mac_filt(struct adapter *, unsigned int, bool, unsigned int, + const u8 **, u16 *, u64 *, bool); +int t4vf_change_mac(struct adapter *, unsigned int, int, const u8 *, bool); +int t4vf_set_addr_hash(struct adapter *, unsigned int, bool, u64, bool); +int t4vf_get_port_stats(struct adapter *, int, struct t4vf_port_stats *); + +int t4vf_iq_free(struct adapter *, unsigned int, unsigned int, unsigned int, + unsigned int); +int t4vf_eth_eq_free(struct adapter *, unsigned int); + +int t4vf_handle_fw_rpl(struct adapter *, const __be64 *); + +#endif /* __T4VF_COMMON_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h new file mode 100644 index 0000000..c7b127d --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h @@ -0,0 +1,121 @@ +/* + * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet + * driver for Linux. + * + * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __T4VF_DEFS_H__ +#define __T4VF_DEFS_H__ + +#include "../cxgb4/t4_regs.h" + +/* + * The VF Register Map. + * + * The Scatter Gather Engine (SGE), Multiport Support module (MPS), PIO Local + * bus module (PL) and CPU Interface Module (CIM) components are mapped via + * the Slice to Module Map Table (see below) in the Physical Function Register + * Map. The Mail Box Data (MBDATA) range is mapped via the PCI-E Mailbox Base + * and Offset registers in the PF Register Map. The MBDATA base address is + * quite constrained as it determines the Mailbox Data addresses for both PFs + * and VFs, and therefore must fit in both the VF and PF Register Maps without + * overlapping other registers. + */ +#define T4VF_SGE_BASE_ADDR 0x0000 +#define T4VF_MPS_BASE_ADDR 0x0100 +#define T4VF_PL_BASE_ADDR 0x0200 +#define T4VF_MBDATA_BASE_ADDR 0x0240 +#define T4VF_CIM_BASE_ADDR 0x0300 + +#define T4VF_REGMAP_START 0x0000 +#define T4VF_REGMAP_SIZE 0x0400 + +/* + * There's no hardware limitation which requires that the addresses of the + * Mailbox Data in the fixed CIM PF map and the programmable VF map must + * match. However, it's a useful convention ... + */ +#if T4VF_MBDATA_BASE_ADDR != CIM_PF_MAILBOX_DATA +#error T4VF_MBDATA_BASE_ADDR must match CIM_PF_MAILBOX_DATA! +#endif + +/* + * Virtual Function "Slice to Module Map Table" definitions. + * + * This table allows us to map subsets of the various module register sets + * into the T4VF Register Map. Each table entry identifies the index of the + * module whose registers are being mapped, the offset within the module's + * register set that the mapping should start at, the limit of the mapping, + * and the offset within the T4VF Register Map to which the module's registers + * are being mapped. All addresses and qualtities are in terms of 32-bit + * words. The "limit" value is also in terms of 32-bit words and is equal to + * the last address mapped in the T4VF Register Map 1 (i.e. it's a "<=" + * relation rather than a "<"). + */ +#define T4VF_MOD_MAP(module, index, first, last) \ + T4VF_MOD_MAP_##module##_INDEX = (index), \ + T4VF_MOD_MAP_##module##_FIRST = (first), \ + T4VF_MOD_MAP_##module##_LAST = (last), \ + T4VF_MOD_MAP_##module##_OFFSET = ((first)/4), \ + T4VF_MOD_MAP_##module##_BASE = \ + (T4VF_##module##_BASE_ADDR/4 + (first)/4), \ + T4VF_MOD_MAP_##module##_LIMIT = \ + (T4VF_##module##_BASE_ADDR/4 + (last)/4), + +#define SGE_VF_KDOORBELL 0x0 +#define SGE_VF_GTS 0x4 +#define MPS_VF_CTL 0x0 +#define MPS_VF_STAT_RX_VF_ERR_FRAMES_H 0xfc +#define PL_VF_WHOAMI 0x0 +#define CIM_VF_EXT_MAILBOX_CTRL 0x0 +#define CIM_VF_EXT_MAILBOX_STATUS 0x4 + +enum { + T4VF_MOD_MAP(SGE, 2, SGE_VF_KDOORBELL, SGE_VF_GTS) + T4VF_MOD_MAP(MPS, 0, MPS_VF_CTL, MPS_VF_STAT_RX_VF_ERR_FRAMES_H) + T4VF_MOD_MAP(PL, 3, PL_VF_WHOAMI, PL_VF_WHOAMI) + T4VF_MOD_MAP(CIM, 1, CIM_VF_EXT_MAILBOX_CTRL, CIM_VF_EXT_MAILBOX_STATUS) +}; + +/* + * There isn't a Slice to Module Map Table entry for the Mailbox Data + * registers, but it's convenient to use similar names as above. There are 8 + * little-endian 64-bit Mailbox Data registers. Note that the "instances" + * value below is in terms of 32-bit words which matches the "word" addressing + * space we use above for the Slice to Module Map Space. + */ +#define NUM_CIM_VF_MAILBOX_DATA_INSTANCES 16 + +#define T4VF_MBDATA_FIRST 0 +#define T4VF_MBDATA_LAST ((NUM_CIM_VF_MAILBOX_DATA_INSTANCES-1)*4) + +#endif /* __T4T4VF_DEFS_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c new file mode 100644 index 0000000..fe3fd3d --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -0,0 +1,1387 @@ +/* + * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet + * driver for Linux. + * + * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include + +#include "t4vf_common.h" +#include "t4vf_defs.h" + +#include "../cxgb4/t4_regs.h" +#include "../cxgb4/t4fw_api.h" + +/* + * Wait for the device to become ready (signified by our "who am I" register + * returning a value other than all 1's). Return an error if it doesn't + * become ready ... + */ +int __devinit t4vf_wait_dev_ready(struct adapter *adapter) +{ + const u32 whoami = T4VF_PL_BASE_ADDR + PL_VF_WHOAMI; + const u32 notready1 = 0xffffffff; + const u32 notready2 = 0xeeeeeeee; + u32 val; + + val = t4_read_reg(adapter, whoami); + if (val != notready1 && val != notready2) + return 0; + msleep(500); + val = t4_read_reg(adapter, whoami); + if (val != notready1 && val != notready2) + return 0; + else + return -EIO; +} + +/* + * Get the reply to a mailbox command and store it in @rpl in big-endian order + * (since the firmware data structures are specified in a big-endian layout). + */ +static void get_mbox_rpl(struct adapter *adapter, __be64 *rpl, int size, + u32 mbox_data) +{ + for ( ; size; size -= 8, mbox_data += 8) + *rpl++ = cpu_to_be64(t4_read_reg64(adapter, mbox_data)); +} + +/* + * Dump contents of mailbox with a leading tag. + */ +static void dump_mbox(struct adapter *adapter, const char *tag, u32 mbox_data) +{ + dev_err(adapter->pdev_dev, + "mbox %s: %llx %llx %llx %llx %llx %llx %llx %llx\n", tag, + (unsigned long long)t4_read_reg64(adapter, mbox_data + 0), + (unsigned long long)t4_read_reg64(adapter, mbox_data + 8), + (unsigned long long)t4_read_reg64(adapter, mbox_data + 16), + (unsigned long long)t4_read_reg64(adapter, mbox_data + 24), + (unsigned long long)t4_read_reg64(adapter, mbox_data + 32), + (unsigned long long)t4_read_reg64(adapter, mbox_data + 40), + (unsigned long long)t4_read_reg64(adapter, mbox_data + 48), + (unsigned long long)t4_read_reg64(adapter, mbox_data + 56)); +} + +/** + * t4vf_wr_mbox_core - send a command to FW through the mailbox + * @adapter: the adapter + * @cmd: the command to write + * @size: command length in bytes + * @rpl: where to optionally store the reply + * @sleep_ok: if true we may sleep while awaiting command completion + * + * Sends the given command to FW through the mailbox and waits for the + * FW to execute the command. If @rpl is not %NULL it is used to store + * the FW's reply to the command. The command and its optional reply + * are of the same length. FW can take up to 500 ms to respond. + * @sleep_ok determines whether we may sleep while awaiting the response. + * If sleeping is allowed we use progressive backoff otherwise we spin. + * + * The return value is 0 on success or a negative errno on failure. A + * failure can happen either because we are not able to execute the + * command or FW executes it but signals an error. In the latter case + * the return value is the error code indicated by FW (negated). + */ +int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, + void *rpl, bool sleep_ok) +{ + static const int delay[] = { + 1, 1, 3, 5, 10, 10, 20, 50, 100 + }; + + u32 v; + int i, ms, delay_idx; + const __be64 *p; + u32 mbox_data = T4VF_MBDATA_BASE_ADDR; + u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL; + + /* + * Commands must be multiples of 16 bytes in length and may not be + * larger than the size of the Mailbox Data register array. + */ + if ((size % 16) != 0 || + size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4) + return -EINVAL; + + /* + * Loop trying to get ownership of the mailbox. Return an error + * if we can't gain ownership. + */ + v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl)); + for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) + v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl)); + if (v != MBOX_OWNER_DRV) + return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT; + + /* + * Write the command array into the Mailbox Data register array and + * transfer ownership of the mailbox to the firmware. + * + * For the VFs, the Mailbox Data "registers" are actually backed by + * T4's "MA" interface rather than PL Registers (as is the case for + * the PFs). Because these are in different coherency domains, the + * write to the VF's PL-register-backed Mailbox Control can race in + * front of the writes to the MA-backed VF Mailbox Data "registers". + * So we need to do a read-back on at least one byte of the VF Mailbox + * Data registers before doing the write to the VF Mailbox Control + * register. + */ + for (i = 0, p = cmd; i < size; i += 8) + t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); + t4_read_reg(adapter, mbox_data); /* flush write */ + + t4_write_reg(adapter, mbox_ctl, + MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); + t4_read_reg(adapter, mbox_ctl); /* flush write */ + + /* + * Spin waiting for firmware to acknowledge processing our command. + */ + delay_idx = 0; + ms = delay[0]; + + for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { + if (sleep_ok) { + ms = delay[delay_idx]; + if (delay_idx < ARRAY_SIZE(delay) - 1) + delay_idx++; + msleep(ms); + } else + mdelay(ms); + + /* + * If we're the owner, see if this is the reply we wanted. + */ + v = t4_read_reg(adapter, mbox_ctl); + if (MBOWNER_GET(v) == MBOX_OWNER_DRV) { + /* + * If the Message Valid bit isn't on, revoke ownership + * of the mailbox and continue waiting for our reply. + */ + if ((v & MBMSGVALID) == 0) { + t4_write_reg(adapter, mbox_ctl, + MBOWNER(MBOX_OWNER_NONE)); + continue; + } + + /* + * We now have our reply. Extract the command return + * value, copy the reply back to our caller's buffer + * (if specified) and revoke ownership of the mailbox. + * We return the (negated) firmware command return + * code (this depends on FW_SUCCESS == 0). + */ + + /* return value in low-order little-endian word */ + v = t4_read_reg(adapter, mbox_data); + if (FW_CMD_RETVAL_GET(v)) + dump_mbox(adapter, "FW Error", mbox_data); + + if (rpl) { + /* request bit in high-order BE word */ + WARN_ON((be32_to_cpu(*(const u32 *)cmd) + & FW_CMD_REQUEST) == 0); + get_mbox_rpl(adapter, rpl, size, mbox_data); + WARN_ON((be32_to_cpu(*(u32 *)rpl) + & FW_CMD_REQUEST) != 0); + } + t4_write_reg(adapter, mbox_ctl, + MBOWNER(MBOX_OWNER_NONE)); + return -FW_CMD_RETVAL_GET(v); + } + } + + /* + * We timed out. Return the error ... + */ + dump_mbox(adapter, "FW Timeout", mbox_data); + return -ETIMEDOUT; +} + +/** + * hash_mac_addr - return the hash value of a MAC address + * @addr: the 48-bit Ethernet MAC address + * + * Hashes a MAC address according to the hash function used by hardware + * inexact (hash) address matching. + */ +static int hash_mac_addr(const u8 *addr) +{ + u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; + u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; + a ^= b; + a ^= (a >> 12); + a ^= (a >> 6); + return a & 0x3f; +} + +/** + * init_link_config - initialize a link's SW state + * @lc: structure holding the link state + * @caps: link capabilities + * + * Initializes the SW state maintained for each link, including the link's + * capabilities and default speed/flow-control/autonegotiation settings. + */ +static void __devinit init_link_config(struct link_config *lc, + unsigned int caps) +{ + lc->supported = caps; + lc->requested_speed = 0; + lc->speed = 0; + lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; + if (lc->supported & SUPPORTED_Autoneg) { + lc->advertising = lc->supported; + lc->autoneg = AUTONEG_ENABLE; + lc->requested_fc |= PAUSE_AUTONEG; + } else { + lc->advertising = 0; + lc->autoneg = AUTONEG_DISABLE; + } +} + +/** + * t4vf_port_init - initialize port hardware/software state + * @adapter: the adapter + * @pidx: the adapter port index + */ +int __devinit t4vf_port_init(struct adapter *adapter, int pidx) +{ + struct port_info *pi = adap2pinfo(adapter, pidx); + struct fw_vi_cmd vi_cmd, vi_rpl; + struct fw_port_cmd port_cmd, port_rpl; + int v; + u32 word; + + /* + * Execute a VI Read command to get our Virtual Interface information + * like MAC address, etc. + */ + memset(&vi_cmd, 0, sizeof(vi_cmd)); + vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) | + FW_CMD_REQUEST | + FW_CMD_READ); + vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd)); + vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID(pi->viid)); + v = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl); + if (v) + return v; + + BUG_ON(pi->port_id != FW_VI_CMD_PORTID_GET(vi_rpl.portid_pkd)); + pi->rss_size = FW_VI_CMD_RSSSIZE_GET(be16_to_cpu(vi_rpl.rsssize_pkd)); + t4_os_set_hw_addr(adapter, pidx, vi_rpl.mac); + + /* + * If we don't have read access to our port information, we're done + * now. Otherwise, execute a PORT Read command to get it ... + */ + if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT)) + return 0; + + memset(&port_cmd, 0, sizeof(port_cmd)); + port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP(FW_PORT_CMD) | + FW_CMD_REQUEST | + FW_CMD_READ | + FW_PORT_CMD_PORTID(pi->port_id)); + port_cmd.action_to_len16 = + cpu_to_be32(FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | + FW_LEN16(port_cmd)); + v = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl); + if (v) + return v; + + v = 0; + word = be16_to_cpu(port_rpl.u.info.pcap); + if (word & FW_PORT_CAP_SPEED_100M) + v |= SUPPORTED_100baseT_Full; + if (word & FW_PORT_CAP_SPEED_1G) + v |= SUPPORTED_1000baseT_Full; + if (word & FW_PORT_CAP_SPEED_10G) + v |= SUPPORTED_10000baseT_Full; + if (word & FW_PORT_CAP_ANEG) + v |= SUPPORTED_Autoneg; + init_link_config(&pi->link_cfg, v); + + return 0; +} + +/** + * t4vf_fw_reset - issue a reset to FW + * @adapter: the adapter + * + * Issues a reset command to FW. For a Physical Function this would + * result in the Firmware reseting all of its state. For a Virtual + * Function this just resets the state associated with the VF. + */ +int t4vf_fw_reset(struct adapter *adapter) +{ + struct fw_reset_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RESET_CMD) | + FW_CMD_WRITE); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); +} + +/** + * t4vf_query_params - query FW or device parameters + * @adapter: the adapter + * @nparams: the number of parameters + * @params: the parameter names + * @vals: the parameter values + * + * Reads the values of firmware or device parameters. Up to 7 parameters + * can be queried at once. + */ +int t4vf_query_params(struct adapter *adapter, unsigned int nparams, + const u32 *params, u32 *vals) +{ + int i, ret; + struct fw_params_cmd cmd, rpl; + struct fw_params_param *p; + size_t len16; + + if (nparams > 7) + return -EINVAL; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) | + FW_CMD_REQUEST | + FW_CMD_READ); + len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, + param[nparams].mnem), 16); + cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16)); + for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) + p->mnem = htonl(*params++); + + ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (ret == 0) + for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++) + *vals++ = be32_to_cpu(p->val); + return ret; +} + +/** + * t4vf_set_params - sets FW or device parameters + * @adapter: the adapter + * @nparams: the number of parameters + * @params: the parameter names + * @vals: the parameter values + * + * Sets the values of firmware or device parameters. Up to 7 parameters + * can be specified at once. + */ +int t4vf_set_params(struct adapter *adapter, unsigned int nparams, + const u32 *params, const u32 *vals) +{ + int i; + struct fw_params_cmd cmd; + struct fw_params_param *p; + size_t len16; + + if (nparams > 7) + return -EINVAL; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) | + FW_CMD_REQUEST | + FW_CMD_WRITE); + len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, + param[nparams]), 16); + cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16)); + for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) { + p->mnem = cpu_to_be32(*params++); + p->val = cpu_to_be32(*vals++); + } + + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); +} + +/** + * t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters + * @adapter: the adapter + * + * Retrieves various core SGE parameters in the form of hardware SGE + * register values. The caller is responsible for decoding these as + * needed. The SGE parameters are stored in @adapter->params.sge. + */ +int t4vf_get_sge_params(struct adapter *adapter) +{ + struct sge_params *sge_params = &adapter->params.sge; + u32 params[7], vals[7]; + int v; + + params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ(SGE_CONTROL)); + params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ(SGE_HOST_PAGE_SIZE)); + params[2] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ(SGE_FL_BUFFER_SIZE0)); + params[3] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ(SGE_FL_BUFFER_SIZE1)); + params[4] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_0_AND_1)); + params[5] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_2_AND_3)); + params[6] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_4_AND_5)); + v = t4vf_query_params(adapter, 7, params, vals); + if (v) + return v; + sge_params->sge_control = vals[0]; + sge_params->sge_host_page_size = vals[1]; + sge_params->sge_fl_buffer_size[0] = vals[2]; + sge_params->sge_fl_buffer_size[1] = vals[3]; + sge_params->sge_timer_value_0_and_1 = vals[4]; + sge_params->sge_timer_value_2_and_3 = vals[5]; + sge_params->sge_timer_value_4_and_5 = vals[6]; + + params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD)); + v = t4vf_query_params(adapter, 1, params, vals); + if (v) + return v; + sge_params->sge_ingress_rx_threshold = vals[0]; + + return 0; +} + +/** + * t4vf_get_vpd_params - retrieve device VPD paremeters + * @adapter: the adapter + * + * Retrives various device Vital Product Data parameters. The parameters + * are stored in @adapter->params.vpd. + */ +int t4vf_get_vpd_params(struct adapter *adapter) +{ + struct vpd_params *vpd_params = &adapter->params.vpd; + u32 params[7], vals[7]; + int v; + + params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK)); + v = t4vf_query_params(adapter, 1, params, vals); + if (v) + return v; + vpd_params->cclk = vals[0]; + + return 0; +} + +/** + * t4vf_get_dev_params - retrieve device paremeters + * @adapter: the adapter + * + * Retrives various device parameters. The parameters are stored in + * @adapter->params.dev. + */ +int t4vf_get_dev_params(struct adapter *adapter) +{ + struct dev_params *dev_params = &adapter->params.dev; + u32 params[7], vals[7]; + int v; + + params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWREV)); + params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPREV)); + v = t4vf_query_params(adapter, 2, params, vals); + if (v) + return v; + dev_params->fwrev = vals[0]; + dev_params->tprev = vals[1]; + + return 0; +} + +/** + * t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration + * @adapter: the adapter + * + * Retrieves global RSS mode and parameters with which we have to live + * and stores them in the @adapter's RSS parameters. + */ +int t4vf_get_rss_glb_config(struct adapter *adapter) +{ + struct rss_params *rss = &adapter->params.rss; + struct fw_rss_glb_config_cmd cmd, rpl; + int v; + + /* + * Execute an RSS Global Configuration read command to retrieve + * our RSS configuration. + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | + FW_CMD_REQUEST | + FW_CMD_READ); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (v) + return v; + + /* + * Transate the big-endian RSS Global Configuration into our + * cpu-endian format based on the RSS mode. We also do first level + * filtering at this point to weed out modes which don't support + * VF Drivers ... + */ + rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_GET( + be32_to_cpu(rpl.u.manual.mode_pkd)); + switch (rss->mode) { + case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { + u32 word = be32_to_cpu( + rpl.u.basicvirtual.synmapen_to_hashtoeplitz); + + rss->u.basicvirtual.synmapen = + ((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) != 0); + rss->u.basicvirtual.syn4tupenipv6 = + ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) != 0); + rss->u.basicvirtual.syn2tupenipv6 = + ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) != 0); + rss->u.basicvirtual.syn4tupenipv4 = + ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) != 0); + rss->u.basicvirtual.syn2tupenipv4 = + ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) != 0); + + rss->u.basicvirtual.ofdmapen = + ((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) != 0); + + rss->u.basicvirtual.tnlmapen = + ((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) != 0); + rss->u.basicvirtual.tnlalllookup = + ((word & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) != 0); + + rss->u.basicvirtual.hashtoeplitz = + ((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) != 0); + + /* we need at least Tunnel Map Enable to be set */ + if (!rss->u.basicvirtual.tnlmapen) + return -EINVAL; + break; + } + + default: + /* all unknown/unsupported RSS modes result in an error */ + return -EINVAL; + } + + return 0; +} + +/** + * t4vf_get_vfres - retrieve VF resource limits + * @adapter: the adapter + * + * Retrieves configured resource limits and capabilities for a virtual + * function. The results are stored in @adapter->vfres. + */ +int t4vf_get_vfres(struct adapter *adapter) +{ + struct vf_resources *vfres = &adapter->params.vfres; + struct fw_pfvf_cmd cmd, rpl; + int v; + u32 word; + + /* + * Execute PFVF Read command to get VF resource limits; bail out early + * with error on command failure. + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PFVF_CMD) | + FW_CMD_REQUEST | + FW_CMD_READ); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (v) + return v; + + /* + * Extract VF resource limits and return success. + */ + word = be32_to_cpu(rpl.niqflint_niq); + vfres->niqflint = FW_PFVF_CMD_NIQFLINT_GET(word); + vfres->niq = FW_PFVF_CMD_NIQ_GET(word); + + word = be32_to_cpu(rpl.type_to_neq); + vfres->neq = FW_PFVF_CMD_NEQ_GET(word); + vfres->pmask = FW_PFVF_CMD_PMASK_GET(word); + + word = be32_to_cpu(rpl.tc_to_nexactf); + vfres->tc = FW_PFVF_CMD_TC_GET(word); + vfres->nvi = FW_PFVF_CMD_NVI_GET(word); + vfres->nexactf = FW_PFVF_CMD_NEXACTF_GET(word); + + word = be32_to_cpu(rpl.r_caps_to_nethctrl); + vfres->r_caps = FW_PFVF_CMD_R_CAPS_GET(word); + vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_GET(word); + vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_GET(word); + + return 0; +} + +/** + * t4vf_read_rss_vi_config - read a VI's RSS configuration + * @adapter: the adapter + * @viid: Virtual Interface ID + * @config: pointer to host-native VI RSS Configuration buffer + * + * Reads the Virtual Interface's RSS configuration information and + * translates it into CPU-native format. + */ +int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid, + union rss_vi_config *config) +{ + struct fw_rss_vi_config_cmd cmd, rpl; + int v; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | + FW_CMD_REQUEST | + FW_CMD_READ | + FW_RSS_VI_CONFIG_CMD_VIID(viid)); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (v) + return v; + + switch (adapter->params.rss.mode) { + case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { + u32 word = be32_to_cpu(rpl.u.basicvirtual.defaultq_to_udpen); + + config->basicvirtual.ip6fourtupen = + ((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) != 0); + config->basicvirtual.ip6twotupen = + ((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) != 0); + config->basicvirtual.ip4fourtupen = + ((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) != 0); + config->basicvirtual.ip4twotupen = + ((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) != 0); + config->basicvirtual.udpen = + ((word & FW_RSS_VI_CONFIG_CMD_UDPEN) != 0); + config->basicvirtual.defaultq = + FW_RSS_VI_CONFIG_CMD_DEFAULTQ_GET(word); + break; + } + + default: + return -EINVAL; + } + + return 0; +} + +/** + * t4vf_write_rss_vi_config - write a VI's RSS configuration + * @adapter: the adapter + * @viid: Virtual Interface ID + * @config: pointer to host-native VI RSS Configuration buffer + * + * Write the Virtual Interface's RSS configuration information + * (translating it into firmware-native format before writing). + */ +int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid, + union rss_vi_config *config) +{ + struct fw_rss_vi_config_cmd cmd, rpl; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | + FW_CMD_REQUEST | + FW_CMD_WRITE | + FW_RSS_VI_CONFIG_CMD_VIID(viid)); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + switch (adapter->params.rss.mode) { + case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { + u32 word = 0; + + if (config->basicvirtual.ip6fourtupen) + word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; + if (config->basicvirtual.ip6twotupen) + word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN; + if (config->basicvirtual.ip4fourtupen) + word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; + if (config->basicvirtual.ip4twotupen) + word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN; + if (config->basicvirtual.udpen) + word |= FW_RSS_VI_CONFIG_CMD_UDPEN; + word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ( + config->basicvirtual.defaultq); + cmd.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(word); + break; + } + + default: + return -EINVAL; + } + + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); +} + +/** + * t4vf_config_rss_range - configure a portion of the RSS mapping table + * @adapter: the adapter + * @viid: Virtual Interface of RSS Table Slice + * @start: starting entry in the table to write + * @n: how many table entries to write + * @rspq: values for the "Response Queue" (Ingress Queue) lookup table + * @nrspq: number of values in @rspq + * + * Programs the selected part of the VI's RSS mapping table with the + * provided values. If @nrspq < @n the supplied values are used repeatedly + * until the full table range is populated. + * + * The caller must ensure the values in @rspq are in the range 0..1023. + */ +int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid, + int start, int n, const u16 *rspq, int nrspq) +{ + const u16 *rsp = rspq; + const u16 *rsp_end = rspq+nrspq; + struct fw_rss_ind_tbl_cmd cmd; + + /* + * Initialize firmware command template to write the RSS table. + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_IND_TBL_CMD) | + FW_CMD_REQUEST | + FW_CMD_WRITE | + FW_RSS_IND_TBL_CMD_VIID(viid)); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + + /* + * Each firmware RSS command can accommodate up to 32 RSS Ingress + * Queue Identifiers. These Ingress Queue IDs are packed three to + * a 32-bit word as 10-bit values with the upper remaining 2 bits + * reserved. + */ + while (n > 0) { + __be32 *qp = &cmd.iq0_to_iq2; + int nq = min(n, 32); + int ret; + + /* + * Set up the firmware RSS command header to send the next + * "nq" Ingress Queue IDs to the firmware. + */ + cmd.niqid = cpu_to_be16(nq); + cmd.startidx = cpu_to_be16(start); + + /* + * "nq" more done for the start of the next loop. + */ + start += nq; + n -= nq; + + /* + * While there are still Ingress Queue IDs to stuff into the + * current firmware RSS command, retrieve them from the + * Ingress Queue ID array and insert them into the command. + */ + while (nq > 0) { + /* + * Grab up to the next 3 Ingress Queue IDs (wrapping + * around the Ingress Queue ID array if necessary) and + * insert them into the firmware RSS command at the + * current 3-tuple position within the commad. + */ + u16 qbuf[3]; + u16 *qbp = qbuf; + int nqbuf = min(3, nq); + + nq -= nqbuf; + qbuf[0] = qbuf[1] = qbuf[2] = 0; + while (nqbuf) { + nqbuf--; + *qbp++ = *rsp++; + if (rsp >= rsp_end) + rsp = rspq; + } + *qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) | + FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) | + FW_RSS_IND_TBL_CMD_IQ2(qbuf[2])); + } + + /* + * Send this portion of the RRS table update to the firmware; + * bail out on any errors. + */ + ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); + if (ret) + return ret; + } + return 0; +} + +/** + * t4vf_alloc_vi - allocate a virtual interface on a port + * @adapter: the adapter + * @port_id: physical port associated with the VI + * + * Allocate a new Virtual Interface and bind it to the indicated + * physical port. Return the new Virtual Interface Identifier on + * success, or a [negative] error number on failure. + */ +int t4vf_alloc_vi(struct adapter *adapter, int port_id) +{ + struct fw_vi_cmd cmd, rpl; + int v; + + /* + * Execute a VI command to allocate Virtual Interface and return its + * VIID. + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) | + FW_CMD_REQUEST | + FW_CMD_WRITE | + FW_CMD_EXEC); + cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | + FW_VI_CMD_ALLOC); + cmd.portid_pkd = FW_VI_CMD_PORTID(port_id); + v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (v) + return v; + + return FW_VI_CMD_VIID_GET(be16_to_cpu(rpl.type_viid)); +} + +/** + * t4vf_free_vi -- free a virtual interface + * @adapter: the adapter + * @viid: the virtual interface identifier + * + * Free a previously allocated Virtual Interface. Return an error on + * failure. + */ +int t4vf_free_vi(struct adapter *adapter, int viid) +{ + struct fw_vi_cmd cmd; + + /* + * Execute a VI command to free the Virtual Interface. + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) | + FW_CMD_REQUEST | + FW_CMD_EXEC); + cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | + FW_VI_CMD_FREE); + cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID(viid)); + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); +} + +/** + * t4vf_enable_vi - enable/disable a virtual interface + * @adapter: the adapter + * @viid: the Virtual Interface ID + * @rx_en: 1=enable Rx, 0=disable Rx + * @tx_en: 1=enable Tx, 0=disable Tx + * + * Enables/disables a virtual interface. + */ +int t4vf_enable_vi(struct adapter *adapter, unsigned int viid, + bool rx_en, bool tx_en) +{ + struct fw_vi_enable_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_ENABLE_CMD) | + FW_CMD_REQUEST | + FW_CMD_EXEC | + FW_VI_ENABLE_CMD_VIID(viid)); + cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN(rx_en) | + FW_VI_ENABLE_CMD_EEN(tx_en) | + FW_LEN16(cmd)); + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); +} + +/** + * t4vf_identify_port - identify a VI's port by blinking its LED + * @adapter: the adapter + * @viid: the Virtual Interface ID + * @nblinks: how many times to blink LED at 2.5 Hz + * + * Identifies a VI's port by blinking its LED. + */ +int t4vf_identify_port(struct adapter *adapter, unsigned int viid, + unsigned int nblinks) +{ + struct fw_vi_enable_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_ENABLE_CMD) | + FW_CMD_REQUEST | + FW_CMD_EXEC | + FW_VI_ENABLE_CMD_VIID(viid)); + cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED | + FW_LEN16(cmd)); + cmd.blinkdur = cpu_to_be16(nblinks); + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); +} + +/** + * t4vf_set_rxmode - set Rx properties of a virtual interface + * @adapter: the adapter + * @viid: the VI id + * @mtu: the new MTU or -1 for no change + * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change + * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change + * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change + * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it, + * -1 no change + * + * Sets Rx properties of a virtual interface. + */ +int t4vf_set_rxmode(struct adapter *adapter, unsigned int viid, + int mtu, int promisc, int all_multi, int bcast, int vlanex, + bool sleep_ok) +{ + struct fw_vi_rxmode_cmd cmd; + + /* convert to FW values */ + if (mtu < 0) + mtu = FW_VI_RXMODE_CMD_MTU_MASK; + if (promisc < 0) + promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK; + if (all_multi < 0) + all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK; + if (bcast < 0) + bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK; + if (vlanex < 0) + vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_RXMODE_CMD) | + FW_CMD_REQUEST | + FW_CMD_WRITE | + FW_VI_RXMODE_CMD_VIID(viid)); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + cmd.mtu_to_vlanexen = + cpu_to_be32(FW_VI_RXMODE_CMD_MTU(mtu) | + FW_VI_RXMODE_CMD_PROMISCEN(promisc) | + FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | + FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | + FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); + return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok); +} + +/** + * t4vf_alloc_mac_filt - allocates exact-match filters for MAC addresses + * @adapter: the adapter + * @viid: the Virtual Interface Identifier + * @free: if true any existing filters for this VI id are first removed + * @naddr: the number of MAC addresses to allocate filters for (up to 7) + * @addr: the MAC address(es) + * @idx: where to store the index of each allocated filter + * @hash: pointer to hash address filter bitmap + * @sleep_ok: call is allowed to sleep + * + * Allocates an exact-match filter for each of the supplied addresses and + * sets it to the corresponding address. If @idx is not %NULL it should + * have at least @naddr entries, each of which will be set to the index of + * the filter allocated for the corresponding MAC address. If a filter + * could not be allocated for an address its index is set to 0xffff. + * If @hash is not %NULL addresses that fail to allocate an exact filter + * are hashed and update the hash filter bitmap pointed at by @hash. + * + * Returns a negative error number or the number of filters allocated. + */ +int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, + unsigned int naddr, const u8 **addr, u16 *idx, + u64 *hash, bool sleep_ok) +{ + int offset, ret = 0; + unsigned nfilters = 0; + unsigned int rem = naddr; + struct fw_vi_mac_cmd cmd, rpl; + + if (naddr > FW_CLS_TCAM_NUM_ENTRIES) + return -EINVAL; + + for (offset = 0; offset < naddr; /**/) { + unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact) + ? rem + : ARRAY_SIZE(cmd.u.exact)); + size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, + u.exact[fw_naddr]), 16); + struct fw_vi_mac_exact *p; + int i; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) | + FW_CMD_REQUEST | + FW_CMD_WRITE | + (free ? FW_CMD_EXEC : 0) | + FW_VI_MAC_CMD_VIID(viid)); + cmd.freemacs_to_len16 = + cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) | + FW_CMD_LEN16(len16)); + + for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) { + p->valid_to_idx = cpu_to_be16( + FW_VI_MAC_CMD_VALID | + FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); + memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); + } + + + ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, + sleep_ok); + if (ret && ret != -ENOMEM) + break; + + for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) { + u16 index = FW_VI_MAC_CMD_IDX_GET( + be16_to_cpu(p->valid_to_idx)); + + if (idx) + idx[offset+i] = + (index >= FW_CLS_TCAM_NUM_ENTRIES + ? 0xffff + : index); + if (index < FW_CLS_TCAM_NUM_ENTRIES) + nfilters++; + else if (hash) + *hash |= (1ULL << hash_mac_addr(addr[offset+i])); + } + + free = false; + offset += fw_naddr; + rem -= fw_naddr; + } + + /* + * If there were no errors or we merely ran out of room in our MAC + * address arena, return the number of filters actually written. + */ + if (ret == 0 || ret == -ENOMEM) + ret = nfilters; + return ret; +} + +/** + * t4vf_change_mac - modifies the exact-match filter for a MAC address + * @adapter: the adapter + * @viid: the Virtual Interface ID + * @idx: index of existing filter for old value of MAC address, or -1 + * @addr: the new MAC address value + * @persist: if idx < 0, the new MAC allocation should be persistent + * + * Modifies an exact-match filter and sets it to the new MAC address. + * Note that in general it is not possible to modify the value of a given + * filter so the generic way to modify an address filter is to free the + * one being used by the old address value and allocate a new filter for + * the new address value. @idx can be -1 if the address is a new + * addition. + * + * Returns a negative error number or the index of the filter with the new + * MAC value. + */ +int t4vf_change_mac(struct adapter *adapter, unsigned int viid, + int idx, const u8 *addr, bool persist) +{ + int ret; + struct fw_vi_mac_cmd cmd, rpl; + struct fw_vi_mac_exact *p = &cmd.u.exact[0]; + size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, + u.exact[1]), 16); + + /* + * If this is a new allocation, determine whether it should be + * persistent (across a "freemacs" operation) or not. + */ + if (idx < 0) + idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) | + FW_CMD_REQUEST | + FW_CMD_WRITE | + FW_VI_MAC_CMD_VIID(viid)); + cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16(len16)); + p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID | + FW_VI_MAC_CMD_IDX(idx)); + memcpy(p->macaddr, addr, sizeof(p->macaddr)); + + ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (ret == 0) { + p = &rpl.u.exact[0]; + ret = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx)); + if (ret >= FW_CLS_TCAM_NUM_ENTRIES) + ret = -ENOMEM; + } + return ret; +} + +/** + * t4vf_set_addr_hash - program the MAC inexact-match hash filter + * @adapter: the adapter + * @viid: the Virtual Interface Identifier + * @ucast: whether the hash filter should also match unicast addresses + * @vec: the value to be written to the hash filter + * @sleep_ok: call is allowed to sleep + * + * Sets the 64-bit inexact-match hash filter for a virtual interface. + */ +int t4vf_set_addr_hash(struct adapter *adapter, unsigned int viid, + bool ucast, u64 vec, bool sleep_ok) +{ + struct fw_vi_mac_cmd cmd; + size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, + u.exact[0]), 16); + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) | + FW_CMD_REQUEST | + FW_CMD_WRITE | + FW_VI_ENABLE_CMD_VIID(viid)); + cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN | + FW_VI_MAC_CMD_HASHUNIEN(ucast) | + FW_CMD_LEN16(len16)); + cmd.u.hash.hashvec = cpu_to_be64(vec); + return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok); +} + +/** + * t4vf_get_port_stats - collect "port" statistics + * @adapter: the adapter + * @pidx: the port index + * @s: the stats structure to fill + * + * Collect statistics for the "port"'s Virtual Interface. + */ +int t4vf_get_port_stats(struct adapter *adapter, int pidx, + struct t4vf_port_stats *s) +{ + struct port_info *pi = adap2pinfo(adapter, pidx); + struct fw_vi_stats_vf fwstats; + unsigned int rem = VI_VF_NUM_STATS; + __be64 *fwsp = (__be64 *)&fwstats; + + /* + * Grab the Virtual Interface statistics a chunk at a time via mailbox + * commands. We could use a Work Request and get all of them at once + * but that's an asynchronous interface which is awkward to use. + */ + while (rem) { + unsigned int ix = VI_VF_NUM_STATS - rem; + unsigned int nstats = min(6U, rem); + struct fw_vi_stats_cmd cmd, rpl; + size_t len = (offsetof(struct fw_vi_stats_cmd, u) + + sizeof(struct fw_vi_stats_ctl)); + size_t len16 = DIV_ROUND_UP(len, 16); + int ret; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_STATS_CMD) | + FW_VI_STATS_CMD_VIID(pi->viid) | + FW_CMD_REQUEST | + FW_CMD_READ); + cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16)); + cmd.u.ctl.nstats_ix = + cpu_to_be16(FW_VI_STATS_CMD_IX(ix) | + FW_VI_STATS_CMD_NSTATS(nstats)); + ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl); + if (ret) + return ret; + + memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats); + + rem -= nstats; + fwsp += nstats; + } + + /* + * Translate firmware statistics into host native statistics. + */ + s->tx_bcast_bytes = be64_to_cpu(fwstats.tx_bcast_bytes); + s->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames); + s->tx_mcast_bytes = be64_to_cpu(fwstats.tx_mcast_bytes); + s->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames); + s->tx_ucast_bytes = be64_to_cpu(fwstats.tx_ucast_bytes); + s->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames); + s->tx_drop_frames = be64_to_cpu(fwstats.tx_drop_frames); + s->tx_offload_bytes = be64_to_cpu(fwstats.tx_offload_bytes); + s->tx_offload_frames = be64_to_cpu(fwstats.tx_offload_frames); + + s->rx_bcast_bytes = be64_to_cpu(fwstats.rx_bcast_bytes); + s->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames); + s->rx_mcast_bytes = be64_to_cpu(fwstats.rx_mcast_bytes); + s->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames); + s->rx_ucast_bytes = be64_to_cpu(fwstats.rx_ucast_bytes); + s->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames); + + s->rx_err_frames = be64_to_cpu(fwstats.rx_err_frames); + + return 0; +} + +/** + * t4vf_iq_free - free an ingress queue and its free lists + * @adapter: the adapter + * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) + * @iqid: ingress queue ID + * @fl0id: FL0 queue ID or 0xffff if no attached FL0 + * @fl1id: FL1 queue ID or 0xffff if no attached FL1 + * + * Frees an ingress queue and its associated free lists, if any. + */ +int t4vf_iq_free(struct adapter *adapter, unsigned int iqtype, + unsigned int iqid, unsigned int fl0id, unsigned int fl1id) +{ + struct fw_iq_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_IQ_CMD) | + FW_CMD_REQUEST | + FW_CMD_EXEC); + cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE | + FW_LEN16(cmd)); + cmd.type_to_iqandstindex = + cpu_to_be32(FW_IQ_CMD_TYPE(iqtype)); + + cmd.iqid = cpu_to_be16(iqid); + cmd.fl0id = cpu_to_be16(fl0id); + cmd.fl1id = cpu_to_be16(fl1id); + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); +} + +/** + * t4vf_eth_eq_free - free an Ethernet egress queue + * @adapter: the adapter + * @eqid: egress queue ID + * + * Frees an Ethernet egress queue. + */ +int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid) +{ + struct fw_eq_eth_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_EQ_ETH_CMD) | + FW_CMD_REQUEST | + FW_CMD_EXEC); + cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE | + FW_LEN16(cmd)); + cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID(eqid)); + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); +} + +/** + * t4vf_handle_fw_rpl - process a firmware reply message + * @adapter: the adapter + * @rpl: start of the firmware message + * + * Processes a firmware message, such as link state change messages. + */ +int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) +{ + const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl; + u8 opcode = FW_CMD_OP_GET(be32_to_cpu(cmd_hdr->hi)); + + switch (opcode) { + case FW_PORT_CMD: { + /* + * Link/module state change message. + */ + const struct fw_port_cmd *port_cmd = + (const struct fw_port_cmd *)rpl; + u32 word; + int action, port_id, link_ok, speed, fc, pidx; + + /* + * Extract various fields from port status change message. + */ + action = FW_PORT_CMD_ACTION_GET( + be32_to_cpu(port_cmd->action_to_len16)); + if (action != FW_PORT_ACTION_GET_PORT_INFO) { + dev_err(adapter->pdev_dev, + "Unknown firmware PORT reply action %x\n", + action); + break; + } + + port_id = FW_PORT_CMD_PORTID_GET( + be32_to_cpu(port_cmd->op_to_portid)); + + word = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype); + link_ok = (word & FW_PORT_CMD_LSTATUS) != 0; + speed = 0; + fc = 0; + if (word & FW_PORT_CMD_RXPAUSE) + fc |= PAUSE_RX; + if (word & FW_PORT_CMD_TXPAUSE) + fc |= PAUSE_TX; + if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) + speed = SPEED_100; + else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) + speed = SPEED_1000; + else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) + speed = SPEED_10000; + + /* + * Scan all of our "ports" (Virtual Interfaces) looking for + * those bound to the physical port which has changed. If + * our recorded state doesn't match the current state, + * signal that change to the OS code. + */ + for_each_port(adapter, pidx) { + struct port_info *pi = adap2pinfo(adapter, pidx); + struct link_config *lc; + + if (pi->port_id != port_id) + continue; + + lc = &pi->link_cfg; + if (link_ok != lc->link_ok || speed != lc->speed || + fc != lc->fc) { + /* something changed */ + lc->link_ok = link_ok; + lc->speed = speed; + lc->fc = fc; + t4vf_os_link_changed(adapter, pidx, link_ok); + } + } + break; + } + + default: + dev_err(adapter->pdev_dev, "Unknown firmware reply %X\n", + opcode); + } + return 0; +} diff --git a/drivers/scsi/cxgbi/cxgb3i/Kbuild b/drivers/scsi/cxgbi/cxgb3i/Kbuild index 09dbf9e..6f095e2 100644 --- a/drivers/scsi/cxgbi/cxgb3i/Kbuild +++ b/drivers/scsi/cxgbi/cxgb3i/Kbuild @@ -1,3 +1,3 @@ -EXTRA_CFLAGS += -I$(srctree)/drivers/net/cxgb3 +EXTRA_CFLAGS += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb3 obj-$(CONFIG_SCSI_CXGB3_ISCSI) += cxgb3i.o diff --git a/drivers/scsi/cxgbi/cxgb4i/Kbuild b/drivers/scsi/cxgbi/cxgb4i/Kbuild index b9f4af7..8290cda 100644 --- a/drivers/scsi/cxgbi/cxgb4i/Kbuild +++ b/drivers/scsi/cxgbi/cxgb4i/Kbuild @@ -1,3 +1,3 @@ -EXTRA_CFLAGS += -I$(srctree)/drivers/net/cxgb4 +EXTRA_CFLAGS += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb4 obj-$(CONFIG_SCSI_CXGB4_ISCSI) += cxgb4i.o -- cgit v0.10.2 From dee1ad47f2ee75f5146d83ca757c1b7861c34c3b Mon Sep 17 00:00:00 2001 From: Jeff Kirsher Date: Thu, 7 Apr 2011 07:42:33 -0700 Subject: intel: Move the Intel wired LAN drivers Moves the Intel wired LAN drivers into drivers/net/ethernet/intel/ and the necessary Kconfig and Makefile changes. Signed-off-by: Jeff Kirsher diff --git a/MAINTAINERS b/MAINTAINERS index 0853d00..f4838da 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3340,13 +3340,13 @@ M: Bruce Allan M: Carolyn Wyborny M: Don Skidmore M: Greg Rose -M: PJ Waskiewicz +M: Peter P Waskiewicz Jr M: Alex Duyck M: John Ronciak L: e1000-devel@lists.sourceforge.net W: http://e1000.sourceforge.net/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-2.6.git -T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next-2.6.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next.git S: Supported F: Documentation/networking/e100.txt F: Documentation/networking/e1000.txt @@ -3356,14 +3356,7 @@ F: Documentation/networking/igbvf.txt F: Documentation/networking/ixgb.txt F: Documentation/networking/ixgbe.txt F: Documentation/networking/ixgbevf.txt -F: drivers/net/e100.c -F: drivers/net/e1000/ -F: drivers/net/e1000e/ -F: drivers/net/igb/ -F: drivers/net/igbvf/ -F: drivers/net/ixgb/ -F: drivers/net/ixgbe/ -F: drivers/net/ixgbevf/ +F: drivers/net/ethernet/intel/ INTEL MRST PMU DRIVER M: Len Brown diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 77ab2e1..e649116 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1032,32 +1032,6 @@ config TC35815 depends on NET_PCI && PCI && MIPS select PHYLIB -config E100 - tristate "Intel(R) PRO/100+ support" - depends on NET_PCI && PCI - select MII - ---help--- - This driver supports Intel(R) PRO/100 family of adapters. - To verify that your adapter is supported, find the board ID number - on the adapter. Look for a label that has a barcode and a number - in the format 123456-001 (six digits hyphen three digits). - - Use the above information and the Adapter & Driver ID Guide at: - - - - to identify the adapter. - - For the latest Intel PRO/100 network driver for Linux, see: - - - - More specific information on configuring the driver is in - . - - To compile this driver as a module, choose M here. The module - will be called e100. - config FEALNX tristate "Myson MTD-8xx PCI Ethernet support" depends on NET_PCI && PCI @@ -1490,47 +1464,6 @@ config DL2K To compile this driver as a module, choose M here: the module will be called dl2k. -config E1000 - tristate "Intel(R) PRO/1000 Gigabit Ethernet support" - depends on PCI - ---help--- - This driver supports Intel(R) PRO/1000 gigabit ethernet family of - adapters. For more information on how to identify your adapter, go - to the Adapter & Driver ID Guide at: - - - - For general information and support, go to the Intel support - website at: - - - - More specific information on configuring the driver is in - . - - To compile this driver as a module, choose M here. The module - will be called e1000. - -config E1000E - tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support" - depends on PCI && (!SPARC32 || BROKEN) - select CRC32 - ---help--- - This driver supports the PCI-Express Intel(R) PRO/1000 gigabit - ethernet family of adapters. For PCI or PCI-X e1000 adapters, - use the regular e1000 driver For more information on how to - identify your adapter, go to the Adapter & Driver ID Guide at: - - - - For general information and support, go to the Intel support - website at: - - - - To compile this driver as a module, choose M here. The module - will be called e1000e. - config IP1000 tristate "IP1000 Gigabit Ethernet support" depends on PCI && EXPERIMENTAL @@ -1541,57 +1474,6 @@ config IP1000 To compile this driver as a module, choose M here: the module will be called ipg. This is recommended. -config IGB - tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support" - depends on PCI - ---help--- - This driver supports Intel(R) 82575/82576 gigabit ethernet family of - adapters. For more information on how to identify your adapter, go - to the Adapter & Driver ID Guide at: - - - - For general information and support, go to the Intel support - website at: - - - - More specific information on configuring the driver is in - . - - To compile this driver as a module, choose M here. The module - will be called igb. - -config IGB_DCA - bool "Direct Cache Access (DCA) Support" - default y - depends on IGB && DCA && !(IGB=y && DCA=m) - ---help--- - Say Y here if you want to use Direct Cache Access (DCA) in the - driver. DCA is a method for warming the CPU cache before data - is used, with the intent of lessening the impact of cache misses. - -config IGBVF - tristate "Intel(R) 82576 Virtual Function Ethernet support" - depends on PCI - ---help--- - This driver supports Intel(R) 82576 virtual functions. For more - information on how to identify your adapter, go to the Adapter & - Driver ID Guide at: - - - - For general information and support, go to the Intel support - website at: - - - - More specific information on configuring the driver is in - . - - To compile this driver as a module, choose M here. The module - will be called igbvf. - source "drivers/net/ixp2000/Kconfig" config NS83820 @@ -1958,88 +1840,6 @@ config ENIC help This enables the support for the Cisco VIC Ethernet card. -config IXGBE - tristate "Intel(R) 10GbE PCI Express adapters support" - depends on PCI && INET - select MDIO - ---help--- - This driver supports Intel(R) 10GbE PCI Express family of - adapters. For more information on how to identify your adapter, go - to the Adapter & Driver ID Guide at: - - - - For general information and support, go to the Intel support - website at: - - - - To compile this driver as a module, choose M here. The module - will be called ixgbe. - -config IXGBE_DCA - bool "Direct Cache Access (DCA) Support" - default y - depends on IXGBE && DCA && !(IXGBE=y && DCA=m) - ---help--- - Say Y here if you want to use Direct Cache Access (DCA) in the - driver. DCA is a method for warming the CPU cache before data - is used, with the intent of lessening the impact of cache misses. - -config IXGBE_DCB - bool "Data Center Bridging (DCB) Support" - default n - depends on IXGBE && DCB - ---help--- - Say Y here if you want to use Data Center Bridging (DCB) in the - driver. - - If unsure, say N. - -config IXGBEVF - tristate "Intel(R) 82599 Virtual Function Ethernet support" - depends on PCI_MSI - ---help--- - This driver supports Intel(R) 82599 virtual functions. For more - information on how to identify your adapter, go to the Adapter & - Driver ID Guide at: - - - - For general information and support, go to the Intel support - website at: - - - - More specific information on configuring the driver is in - . - - To compile this driver as a module, choose M here. The module - will be called ixgbevf. MSI-X interrupt support is required - for this driver to work correctly. - -config IXGB - tristate "Intel(R) PRO/10GbE support" - depends on PCI - ---help--- - This driver supports Intel(R) PRO/10GbE family of adapters for - PCI-X type cards. For PCI-E type cards, use the "ixgbe" driver - instead. For more information on how to identify your adapter, go - to the Adapter & Driver ID Guide at: - - - - For general information and support, go to the Intel support - website at: - - - - More specific information on configuring the driver is in - . - - To compile this driver as a module, choose M here. The module - will be called ixgb. - config S2IO tristate "Exar Xframe 10Gb Ethernet Adapter" depends on PCI diff --git a/drivers/net/Makefile b/drivers/net/Makefile index a987d46..84b9860 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -10,14 +10,7 @@ obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o -obj-$(CONFIG_E1000) += e1000/ -obj-$(CONFIG_E1000E) += e1000e/ obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac/ -obj-$(CONFIG_IGB) += igb/ -obj-$(CONFIG_IGBVF) += igbvf/ -obj-$(CONFIG_IXGBE) += ixgbe/ -obj-$(CONFIG_IXGBEVF) += ixgbevf/ -obj-$(CONFIG_IXGB) += ixgb/ obj-$(CONFIG_IP1000) += ipg.o obj-$(CONFIG_EHEA) += ehea/ obj-$(CONFIG_CAN) += can/ @@ -61,7 +54,6 @@ obj-$(CONFIG_SUNVNET) += sunvnet.o obj-$(CONFIG_MACE) += mace.o obj-$(CONFIG_BMAC) += bmac.o -obj-$(CONFIG_E100) += e100.o obj-$(CONFIG_TLAN) += tlan.o obj-$(CONFIG_EPIC100) += epic100.o obj-$(CONFIG_SMSC9420) += smsc9420.o diff --git a/drivers/net/e100.c b/drivers/net/e100.c deleted file mode 100644 index c1352c6..0000000 --- a/drivers/net/e100.c +++ /dev/null @@ -1,3109 +0,0 @@ -/******************************************************************************* - - Intel PRO/100 Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -/* - * e100.c: Intel(R) PRO/100 ethernet driver - * - * (Re)written 2003 by scott.feldman@intel.com. Based loosely on - * original e100 driver, but better described as a munging of - * e100, e1000, eepro100, tg3, 8139cp, and other drivers. - * - * References: - * Intel 8255x 10/100 Mbps Ethernet Controller Family, - * Open Source Software Developers Manual, - * http://sourceforge.net/projects/e1000 - * - * - * Theory of Operation - * - * I. General - * - * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet - * controller family, which includes the 82557, 82558, 82559, 82550, - * 82551, and 82562 devices. 82558 and greater controllers - * integrate the Intel 82555 PHY. The controllers are used in - * server and client network interface cards, as well as in - * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx - * configurations. 8255x supports a 32-bit linear addressing - * mode and operates at 33Mhz PCI clock rate. - * - * II. Driver Operation - * - * Memory-mapped mode is used exclusively to access the device's - * shared-memory structure, the Control/Status Registers (CSR). All - * setup, configuration, and control of the device, including queuing - * of Tx, Rx, and configuration commands is through the CSR. - * cmd_lock serializes accesses to the CSR command register. cb_lock - * protects the shared Command Block List (CBL). - * - * 8255x is highly MII-compliant and all access to the PHY go - * through the Management Data Interface (MDI). Consequently, the - * driver leverages the mii.c library shared with other MII-compliant - * devices. - * - * Big- and Little-Endian byte order as well as 32- and 64-bit - * archs are supported. Weak-ordered memory and non-cache-coherent - * archs are supported. - * - * III. Transmit - * - * A Tx skb is mapped and hangs off of a TCB. TCBs are linked - * together in a fixed-size ring (CBL) thus forming the flexible mode - * memory structure. A TCB marked with the suspend-bit indicates - * the end of the ring. The last TCB processed suspends the - * controller, and the controller can be restarted by issue a CU - * resume command to continue from the suspend point, or a CU start - * command to start at a given position in the ring. - * - * Non-Tx commands (config, multicast setup, etc) are linked - * into the CBL ring along with Tx commands. The common structure - * used for both Tx and non-Tx commands is the Command Block (CB). - * - * cb_to_use is the next CB to use for queuing a command; cb_to_clean - * is the next CB to check for completion; cb_to_send is the first - * CB to start on in case of a previous failure to resume. CB clean - * up happens in interrupt context in response to a CU interrupt. - * cbs_avail keeps track of number of free CB resources available. - * - * Hardware padding of short packets to minimum packet size is - * enabled. 82557 pads with 7Eh, while the later controllers pad - * with 00h. - * - * IV. Receive - * - * The Receive Frame Area (RFA) comprises a ring of Receive Frame - * Descriptors (RFD) + data buffer, thus forming the simplified mode - * memory structure. Rx skbs are allocated to contain both the RFD - * and the data buffer, but the RFD is pulled off before the skb is - * indicated. The data buffer is aligned such that encapsulated - * protocol headers are u32-aligned. Since the RFD is part of the - * mapped shared memory, and completion status is contained within - * the RFD, the RFD must be dma_sync'ed to maintain a consistent - * view from software and hardware. - * - * In order to keep updates to the RFD link field from colliding with - * hardware writes to mark packets complete, we use the feature that - * hardware will not write to a size 0 descriptor and mark the previous - * packet as end-of-list (EL). After updating the link, we remove EL - * and only then restore the size such that hardware may use the - * previous-to-end RFD. - * - * Under typical operation, the receive unit (RU) is start once, - * and the controller happily fills RFDs as frames arrive. If - * replacement RFDs cannot be allocated, or the RU goes non-active, - * the RU must be restarted. Frame arrival generates an interrupt, - * and Rx indication and re-allocation happen in the same context, - * therefore no locking is required. A software-generated interrupt - * is generated from the watchdog to recover from a failed allocation - * scenario where all Rx resources have been indicated and none re- - * placed. - * - * V. Miscellaneous - * - * VLAN offloading of tagging, stripping and filtering is not - * supported, but driver will accommodate the extra 4-byte VLAN tag - * for processing by upper layers. Tx/Rx Checksum offloading is not - * supported. Tx Scatter/Gather is not supported. Jumbo Frames is - * not supported (hardware limitation). - * - * MagicPacket(tm) WoL support is enabled/disabled via ethtool. - * - * Thanks to JC (jchapman@katalix.com) for helping with - * testing/troubleshooting the development driver. - * - * TODO: - * o several entry points race with dev->close - * o check for tx-no-resources/stop Q races with tx clean/wake Q - * - * FIXES: - * 2005/12/02 - Michael O'Donnell - * - Stratus87247: protect MDI control register manipulations - * 2009/06/01 - Andreas Mohr - * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -#define DRV_NAME "e100" -#define DRV_EXT "-NAPI" -#define DRV_VERSION "3.5.24-k2"DRV_EXT -#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" -#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation" - -#define E100_WATCHDOG_PERIOD (2 * HZ) -#define E100_NAPI_WEIGHT 16 - -#define FIRMWARE_D101M "e100/d101m_ucode.bin" -#define FIRMWARE_D101S "e100/d101s_ucode.bin" -#define FIRMWARE_D102E "e100/d102e_ucode.bin" - -MODULE_DESCRIPTION(DRV_DESCRIPTION); -MODULE_AUTHOR(DRV_COPYRIGHT); -MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_VERSION); -MODULE_FIRMWARE(FIRMWARE_D101M); -MODULE_FIRMWARE(FIRMWARE_D101S); -MODULE_FIRMWARE(FIRMWARE_D102E); - -static int debug = 3; -static int eeprom_bad_csum_allow = 0; -static int use_io = 0; -module_param(debug, int, 0); -module_param(eeprom_bad_csum_allow, int, 0); -module_param(use_io, int, 0); -MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); -MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums"); -MODULE_PARM_DESC(use_io, "Force use of i/o access mode"); - -#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\ - PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \ - PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich } -static DEFINE_PCI_DEVICE_TABLE(e100_id_table) = { - INTEL_8255X_ETHERNET_DEVICE(0x1029, 0), - INTEL_8255X_ETHERNET_DEVICE(0x1030, 0), - INTEL_8255X_ETHERNET_DEVICE(0x1031, 3), - INTEL_8255X_ETHERNET_DEVICE(0x1032, 3), - INTEL_8255X_ETHERNET_DEVICE(0x1033, 3), - INTEL_8255X_ETHERNET_DEVICE(0x1034, 3), - INTEL_8255X_ETHERNET_DEVICE(0x1038, 3), - INTEL_8255X_ETHERNET_DEVICE(0x1039, 4), - INTEL_8255X_ETHERNET_DEVICE(0x103A, 4), - INTEL_8255X_ETHERNET_DEVICE(0x103B, 4), - INTEL_8255X_ETHERNET_DEVICE(0x103C, 4), - INTEL_8255X_ETHERNET_DEVICE(0x103D, 4), - INTEL_8255X_ETHERNET_DEVICE(0x103E, 4), - INTEL_8255X_ETHERNET_DEVICE(0x1050, 5), - INTEL_8255X_ETHERNET_DEVICE(0x1051, 5), - INTEL_8255X_ETHERNET_DEVICE(0x1052, 5), - INTEL_8255X_ETHERNET_DEVICE(0x1053, 5), - INTEL_8255X_ETHERNET_DEVICE(0x1054, 5), - INTEL_8255X_ETHERNET_DEVICE(0x1055, 5), - INTEL_8255X_ETHERNET_DEVICE(0x1056, 5), - INTEL_8255X_ETHERNET_DEVICE(0x1057, 5), - INTEL_8255X_ETHERNET_DEVICE(0x1059, 0), - INTEL_8255X_ETHERNET_DEVICE(0x1064, 6), - INTEL_8255X_ETHERNET_DEVICE(0x1065, 6), - INTEL_8255X_ETHERNET_DEVICE(0x1066, 6), - INTEL_8255X_ETHERNET_DEVICE(0x1067, 6), - INTEL_8255X_ETHERNET_DEVICE(0x1068, 6), - INTEL_8255X_ETHERNET_DEVICE(0x1069, 6), - INTEL_8255X_ETHERNET_DEVICE(0x106A, 6), - INTEL_8255X_ETHERNET_DEVICE(0x106B, 6), - INTEL_8255X_ETHERNET_DEVICE(0x1091, 7), - INTEL_8255X_ETHERNET_DEVICE(0x1092, 7), - INTEL_8255X_ETHERNET_DEVICE(0x1093, 7), - INTEL_8255X_ETHERNET_DEVICE(0x1094, 7), - INTEL_8255X_ETHERNET_DEVICE(0x1095, 7), - INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7), - INTEL_8255X_ETHERNET_DEVICE(0x1209, 0), - INTEL_8255X_ETHERNET_DEVICE(0x1229, 0), - INTEL_8255X_ETHERNET_DEVICE(0x2449, 2), - INTEL_8255X_ETHERNET_DEVICE(0x2459, 2), - INTEL_8255X_ETHERNET_DEVICE(0x245D, 2), - INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7), - { 0, } -}; -MODULE_DEVICE_TABLE(pci, e100_id_table); - -enum mac { - mac_82557_D100_A = 0, - mac_82557_D100_B = 1, - mac_82557_D100_C = 2, - mac_82558_D101_A4 = 4, - mac_82558_D101_B0 = 5, - mac_82559_D101M = 8, - mac_82559_D101S = 9, - mac_82550_D102 = 12, - mac_82550_D102_C = 13, - mac_82551_E = 14, - mac_82551_F = 15, - mac_82551_10 = 16, - mac_unknown = 0xFF, -}; - -enum phy { - phy_100a = 0x000003E0, - phy_100c = 0x035002A8, - phy_82555_tx = 0x015002A8, - phy_nsc_tx = 0x5C002000, - phy_82562_et = 0x033002A8, - phy_82562_em = 0x032002A8, - phy_82562_ek = 0x031002A8, - phy_82562_eh = 0x017002A8, - phy_82552_v = 0xd061004d, - phy_unknown = 0xFFFFFFFF, -}; - -/* CSR (Control/Status Registers) */ -struct csr { - struct { - u8 status; - u8 stat_ack; - u8 cmd_lo; - u8 cmd_hi; - u32 gen_ptr; - } scb; - u32 port; - u16 flash_ctrl; - u8 eeprom_ctrl_lo; - u8 eeprom_ctrl_hi; - u32 mdi_ctrl; - u32 rx_dma_count; -}; - -enum scb_status { - rus_no_res = 0x08, - rus_ready = 0x10, - rus_mask = 0x3C, -}; - -enum ru_state { - RU_SUSPENDED = 0, - RU_RUNNING = 1, - RU_UNINITIALIZED = -1, -}; - -enum scb_stat_ack { - stat_ack_not_ours = 0x00, - stat_ack_sw_gen = 0x04, - stat_ack_rnr = 0x10, - stat_ack_cu_idle = 0x20, - stat_ack_frame_rx = 0x40, - stat_ack_cu_cmd_done = 0x80, - stat_ack_not_present = 0xFF, - stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx), - stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done), -}; - -enum scb_cmd_hi { - irq_mask_none = 0x00, - irq_mask_all = 0x01, - irq_sw_gen = 0x02, -}; - -enum scb_cmd_lo { - cuc_nop = 0x00, - ruc_start = 0x01, - ruc_load_base = 0x06, - cuc_start = 0x10, - cuc_resume = 0x20, - cuc_dump_addr = 0x40, - cuc_dump_stats = 0x50, - cuc_load_base = 0x60, - cuc_dump_reset = 0x70, -}; - -enum cuc_dump { - cuc_dump_complete = 0x0000A005, - cuc_dump_reset_complete = 0x0000A007, -}; - -enum port { - software_reset = 0x0000, - selftest = 0x0001, - selective_reset = 0x0002, -}; - -enum eeprom_ctrl_lo { - eesk = 0x01, - eecs = 0x02, - eedi = 0x04, - eedo = 0x08, -}; - -enum mdi_ctrl { - mdi_write = 0x04000000, - mdi_read = 0x08000000, - mdi_ready = 0x10000000, -}; - -enum eeprom_op { - op_write = 0x05, - op_read = 0x06, - op_ewds = 0x10, - op_ewen = 0x13, -}; - -enum eeprom_offsets { - eeprom_cnfg_mdix = 0x03, - eeprom_phy_iface = 0x06, - eeprom_id = 0x0A, - eeprom_config_asf = 0x0D, - eeprom_smbus_addr = 0x90, -}; - -enum eeprom_cnfg_mdix { - eeprom_mdix_enabled = 0x0080, -}; - -enum eeprom_phy_iface { - NoSuchPhy = 0, - I82553AB, - I82553C, - I82503, - DP83840, - S80C240, - S80C24, - I82555, - DP83840A = 10, -}; - -enum eeprom_id { - eeprom_id_wol = 0x0020, -}; - -enum eeprom_config_asf { - eeprom_asf = 0x8000, - eeprom_gcl = 0x4000, -}; - -enum cb_status { - cb_complete = 0x8000, - cb_ok = 0x2000, -}; - -enum cb_command { - cb_nop = 0x0000, - cb_iaaddr = 0x0001, - cb_config = 0x0002, - cb_multi = 0x0003, - cb_tx = 0x0004, - cb_ucode = 0x0005, - cb_dump = 0x0006, - cb_tx_sf = 0x0008, - cb_cid = 0x1f00, - cb_i = 0x2000, - cb_s = 0x4000, - cb_el = 0x8000, -}; - -struct rfd { - __le16 status; - __le16 command; - __le32 link; - __le32 rbd; - __le16 actual_size; - __le16 size; -}; - -struct rx { - struct rx *next, *prev; - struct sk_buff *skb; - dma_addr_t dma_addr; -}; - -#if defined(__BIG_ENDIAN_BITFIELD) -#define X(a,b) b,a -#else -#define X(a,b) a,b -#endif -struct config { -/*0*/ u8 X(byte_count:6, pad0:2); -/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1); -/*2*/ u8 adaptive_ifs; -/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1), - term_write_cache_line:1), pad3:4); -/*4*/ u8 X(rx_dma_max_count:7, pad4:1); -/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1); -/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1), - tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1), - rx_discard_overruns:1), rx_save_bad_frames:1); -/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2), - pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1), - tx_dynamic_tbd:1); -/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1); -/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1), - link_status_wake:1), arp_wake:1), mcmatch_wake:1); -/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2), - loopback:2); -/*11*/ u8 X(linear_priority:3, pad11:5); -/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4); -/*13*/ u8 ip_addr_lo; -/*14*/ u8 ip_addr_hi; -/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1), - wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1), - pad15_2:1), crs_or_cdt:1); -/*16*/ u8 fc_delay_lo; -/*17*/ u8 fc_delay_hi; -/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1), - rx_long_ok:1), fc_priority_threshold:3), pad18:1); -/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1), - fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1), - full_duplex_force:1), full_duplex_pin:1); -/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1); -/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4); -/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6); - u8 pad_d102[9]; -}; - -#define E100_MAX_MULTICAST_ADDRS 64 -struct multi { - __le16 count; - u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/]; -}; - -/* Important: keep total struct u32-aligned */ -#define UCODE_SIZE 134 -struct cb { - __le16 status; - __le16 command; - __le32 link; - union { - u8 iaaddr[ETH_ALEN]; - __le32 ucode[UCODE_SIZE]; - struct config config; - struct multi multi; - struct { - u32 tbd_array; - u16 tcb_byte_count; - u8 threshold; - u8 tbd_count; - struct { - __le32 buf_addr; - __le16 size; - u16 eol; - } tbd; - } tcb; - __le32 dump_buffer_addr; - } u; - struct cb *next, *prev; - dma_addr_t dma_addr; - struct sk_buff *skb; -}; - -enum loopback { - lb_none = 0, lb_mac = 1, lb_phy = 3, -}; - -struct stats { - __le32 tx_good_frames, tx_max_collisions, tx_late_collisions, - tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions, - tx_multiple_collisions, tx_total_collisions; - __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors, - rx_resource_errors, rx_overrun_errors, rx_cdt_errors, - rx_short_frame_errors; - __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported; - __le16 xmt_tco_frames, rcv_tco_frames; - __le32 complete; -}; - -struct mem { - struct { - u32 signature; - u32 result; - } selftest; - struct stats stats; - u8 dump_buf[596]; -}; - -struct param_range { - u32 min; - u32 max; - u32 count; -}; - -struct params { - struct param_range rfds; - struct param_range cbs; -}; - -struct nic { - /* Begin: frequently used values: keep adjacent for cache effect */ - u32 msg_enable ____cacheline_aligned; - struct net_device *netdev; - struct pci_dev *pdev; - u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data); - - struct rx *rxs ____cacheline_aligned; - struct rx *rx_to_use; - struct rx *rx_to_clean; - struct rfd blank_rfd; - enum ru_state ru_running; - - spinlock_t cb_lock ____cacheline_aligned; - spinlock_t cmd_lock; - struct csr __iomem *csr; - enum scb_cmd_lo cuc_cmd; - unsigned int cbs_avail; - struct napi_struct napi; - struct cb *cbs; - struct cb *cb_to_use; - struct cb *cb_to_send; - struct cb *cb_to_clean; - __le16 tx_command; - /* End: frequently used values: keep adjacent for cache effect */ - - enum { - ich = (1 << 0), - promiscuous = (1 << 1), - multicast_all = (1 << 2), - wol_magic = (1 << 3), - ich_10h_workaround = (1 << 4), - } flags ____cacheline_aligned; - - enum mac mac; - enum phy phy; - struct params params; - struct timer_list watchdog; - struct mii_if_info mii; - struct work_struct tx_timeout_task; - enum loopback loopback; - - struct mem *mem; - dma_addr_t dma_addr; - - struct pci_pool *cbs_pool; - dma_addr_t cbs_dma_addr; - u8 adaptive_ifs; - u8 tx_threshold; - u32 tx_frames; - u32 tx_collisions; - u32 tx_deferred; - u32 tx_single_collisions; - u32 tx_multiple_collisions; - u32 tx_fc_pause; - u32 tx_tco_frames; - - u32 rx_fc_pause; - u32 rx_fc_unsupported; - u32 rx_tco_frames; - u32 rx_over_length_errors; - - u16 eeprom_wc; - __le16 eeprom[256]; - spinlock_t mdio_lock; - const struct firmware *fw; -}; - -static inline void e100_write_flush(struct nic *nic) -{ - /* Flush previous PCI writes through intermediate bridges - * by doing a benign read */ - (void)ioread8(&nic->csr->scb.status); -} - -static void e100_enable_irq(struct nic *nic) -{ - unsigned long flags; - - spin_lock_irqsave(&nic->cmd_lock, flags); - iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi); - e100_write_flush(nic); - spin_unlock_irqrestore(&nic->cmd_lock, flags); -} - -static void e100_disable_irq(struct nic *nic) -{ - unsigned long flags; - - spin_lock_irqsave(&nic->cmd_lock, flags); - iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi); - e100_write_flush(nic); - spin_unlock_irqrestore(&nic->cmd_lock, flags); -} - -static void e100_hw_reset(struct nic *nic) -{ - /* Put CU and RU into idle with a selective reset to get - * device off of PCI bus */ - iowrite32(selective_reset, &nic->csr->port); - e100_write_flush(nic); udelay(20); - - /* Now fully reset device */ - iowrite32(software_reset, &nic->csr->port); - e100_write_flush(nic); udelay(20); - - /* Mask off our interrupt line - it's unmasked after reset */ - e100_disable_irq(nic); -} - -static int e100_self_test(struct nic *nic) -{ - u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest); - - /* Passing the self-test is a pretty good indication - * that the device can DMA to/from host memory */ - - nic->mem->selftest.signature = 0; - nic->mem->selftest.result = 0xFFFFFFFF; - - iowrite32(selftest | dma_addr, &nic->csr->port); - e100_write_flush(nic); - /* Wait 10 msec for self-test to complete */ - msleep(10); - - /* Interrupts are enabled after self-test */ - e100_disable_irq(nic); - - /* Check results of self-test */ - if (nic->mem->selftest.result != 0) { - netif_err(nic, hw, nic->netdev, - "Self-test failed: result=0x%08X\n", - nic->mem->selftest.result); - return -ETIMEDOUT; - } - if (nic->mem->selftest.signature == 0) { - netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n"); - return -ETIMEDOUT; - } - - return 0; -} - -static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data) -{ - u32 cmd_addr_data[3]; - u8 ctrl; - int i, j; - - /* Three cmds: write/erase enable, write data, write/erase disable */ - cmd_addr_data[0] = op_ewen << (addr_len - 2); - cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) | - le16_to_cpu(data); - cmd_addr_data[2] = op_ewds << (addr_len - 2); - - /* Bit-bang cmds to write word to eeprom */ - for (j = 0; j < 3; j++) { - - /* Chip select */ - iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); - e100_write_flush(nic); udelay(4); - - for (i = 31; i >= 0; i--) { - ctrl = (cmd_addr_data[j] & (1 << i)) ? - eecs | eedi : eecs; - iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); - e100_write_flush(nic); udelay(4); - - iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); - e100_write_flush(nic); udelay(4); - } - /* Wait 10 msec for cmd to complete */ - msleep(10); - - /* Chip deselect */ - iowrite8(0, &nic->csr->eeprom_ctrl_lo); - e100_write_flush(nic); udelay(4); - } -}; - -/* General technique stolen from the eepro100 driver - very clever */ -static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr) -{ - u32 cmd_addr_data; - u16 data = 0; - u8 ctrl; - int i; - - cmd_addr_data = ((op_read << *addr_len) | addr) << 16; - - /* Chip select */ - iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); - e100_write_flush(nic); udelay(4); - - /* Bit-bang to read word from eeprom */ - for (i = 31; i >= 0; i--) { - ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs; - iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); - e100_write_flush(nic); udelay(4); - - iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); - e100_write_flush(nic); udelay(4); - - /* Eeprom drives a dummy zero to EEDO after receiving - * complete address. Use this to adjust addr_len. */ - ctrl = ioread8(&nic->csr->eeprom_ctrl_lo); - if (!(ctrl & eedo) && i > 16) { - *addr_len -= (i - 16); - i = 17; - } - - data = (data << 1) | (ctrl & eedo ? 1 : 0); - } - - /* Chip deselect */ - iowrite8(0, &nic->csr->eeprom_ctrl_lo); - e100_write_flush(nic); udelay(4); - - return cpu_to_le16(data); -}; - -/* Load entire EEPROM image into driver cache and validate checksum */ -static int e100_eeprom_load(struct nic *nic) -{ - u16 addr, addr_len = 8, checksum = 0; - - /* Try reading with an 8-bit addr len to discover actual addr len */ - e100_eeprom_read(nic, &addr_len, 0); - nic->eeprom_wc = 1 << addr_len; - - for (addr = 0; addr < nic->eeprom_wc; addr++) { - nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr); - if (addr < nic->eeprom_wc - 1) - checksum += le16_to_cpu(nic->eeprom[addr]); - } - - /* The checksum, stored in the last word, is calculated such that - * the sum of words should be 0xBABA */ - if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) { - netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n"); - if (!eeprom_bad_csum_allow) - return -EAGAIN; - } - - return 0; -} - -/* Save (portion of) driver EEPROM cache to device and update checksum */ -static int e100_eeprom_save(struct nic *nic, u16 start, u16 count) -{ - u16 addr, addr_len = 8, checksum = 0; - - /* Try reading with an 8-bit addr len to discover actual addr len */ - e100_eeprom_read(nic, &addr_len, 0); - nic->eeprom_wc = 1 << addr_len; - - if (start + count >= nic->eeprom_wc) - return -EINVAL; - - for (addr = start; addr < start + count; addr++) - e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]); - - /* The checksum, stored in the last word, is calculated such that - * the sum of words should be 0xBABA */ - for (addr = 0; addr < nic->eeprom_wc - 1; addr++) - checksum += le16_to_cpu(nic->eeprom[addr]); - nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum); - e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1, - nic->eeprom[nic->eeprom_wc - 1]); - - return 0; -} - -#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ -#define E100_WAIT_SCB_FAST 20 /* delay like the old code */ -static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) -{ - unsigned long flags; - unsigned int i; - int err = 0; - - spin_lock_irqsave(&nic->cmd_lock, flags); - - /* Previous command is accepted when SCB clears */ - for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) { - if (likely(!ioread8(&nic->csr->scb.cmd_lo))) - break; - cpu_relax(); - if (unlikely(i > E100_WAIT_SCB_FAST)) - udelay(5); - } - if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) { - err = -EAGAIN; - goto err_unlock; - } - - if (unlikely(cmd != cuc_resume)) - iowrite32(dma_addr, &nic->csr->scb.gen_ptr); - iowrite8(cmd, &nic->csr->scb.cmd_lo); - -err_unlock: - spin_unlock_irqrestore(&nic->cmd_lock, flags); - - return err; -} - -static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, - void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) -{ - struct cb *cb; - unsigned long flags; - int err = 0; - - spin_lock_irqsave(&nic->cb_lock, flags); - - if (unlikely(!nic->cbs_avail)) { - err = -ENOMEM; - goto err_unlock; - } - - cb = nic->cb_to_use; - nic->cb_to_use = cb->next; - nic->cbs_avail--; - cb->skb = skb; - - if (unlikely(!nic->cbs_avail)) - err = -ENOSPC; - - cb_prepare(nic, cb, skb); - - /* Order is important otherwise we'll be in a race with h/w: - * set S-bit in current first, then clear S-bit in previous. */ - cb->command |= cpu_to_le16(cb_s); - wmb(); - cb->prev->command &= cpu_to_le16(~cb_s); - - while (nic->cb_to_send != nic->cb_to_use) { - if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd, - nic->cb_to_send->dma_addr))) { - /* Ok, here's where things get sticky. It's - * possible that we can't schedule the command - * because the controller is too busy, so - * let's just queue the command and try again - * when another command is scheduled. */ - if (err == -ENOSPC) { - //request a reset - schedule_work(&nic->tx_timeout_task); - } - break; - } else { - nic->cuc_cmd = cuc_resume; - nic->cb_to_send = nic->cb_to_send->next; - } - } - -err_unlock: - spin_unlock_irqrestore(&nic->cb_lock, flags); - - return err; -} - -static int mdio_read(struct net_device *netdev, int addr, int reg) -{ - struct nic *nic = netdev_priv(netdev); - return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0); -} - -static void mdio_write(struct net_device *netdev, int addr, int reg, int data) -{ - struct nic *nic = netdev_priv(netdev); - - nic->mdio_ctrl(nic, addr, mdi_write, reg, data); -} - -/* the standard mdio_ctrl() function for usual MII-compliant hardware */ -static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data) -{ - u32 data_out = 0; - unsigned int i; - unsigned long flags; - - - /* - * Stratus87247: we shouldn't be writing the MDI control - * register until the Ready bit shows True. Also, since - * manipulation of the MDI control registers is a multi-step - * procedure it should be done under lock. - */ - spin_lock_irqsave(&nic->mdio_lock, flags); - for (i = 100; i; --i) { - if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready) - break; - udelay(20); - } - if (unlikely(!i)) { - netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n"); - spin_unlock_irqrestore(&nic->mdio_lock, flags); - return 0; /* No way to indicate timeout error */ - } - iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl); - - for (i = 0; i < 100; i++) { - udelay(20); - if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready) - break; - } - spin_unlock_irqrestore(&nic->mdio_lock, flags); - netif_printk(nic, hw, KERN_DEBUG, nic->netdev, - "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n", - dir == mdi_read ? "READ" : "WRITE", - addr, reg, data, data_out); - return (u16)data_out; -} - -/* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */ -static u16 mdio_ctrl_phy_82552_v(struct nic *nic, - u32 addr, - u32 dir, - u32 reg, - u16 data) -{ - if ((reg == MII_BMCR) && (dir == mdi_write)) { - if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) { - u16 advert = mdio_read(nic->netdev, nic->mii.phy_id, - MII_ADVERTISE); - - /* - * Workaround Si issue where sometimes the part will not - * autoneg to 100Mbps even when advertised. - */ - if (advert & ADVERTISE_100FULL) - data |= BMCR_SPEED100 | BMCR_FULLDPLX; - else if (advert & ADVERTISE_100HALF) - data |= BMCR_SPEED100; - } - } - return mdio_ctrl_hw(nic, addr, dir, reg, data); -} - -/* Fully software-emulated mdio_ctrl() function for cards without - * MII-compliant PHYs. - * For now, this is mainly geared towards 80c24 support; in case of further - * requirements for other types (i82503, ...?) either extend this mechanism - * or split it, whichever is cleaner. - */ -static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic, - u32 addr, - u32 dir, - u32 reg, - u16 data) -{ - /* might need to allocate a netdev_priv'ed register array eventually - * to be able to record state changes, but for now - * some fully hardcoded register handling ought to be ok I guess. */ - - if (dir == mdi_read) { - switch (reg) { - case MII_BMCR: - /* Auto-negotiation, right? */ - return BMCR_ANENABLE | - BMCR_FULLDPLX; - case MII_BMSR: - return BMSR_LSTATUS /* for mii_link_ok() */ | - BMSR_ANEGCAPABLE | - BMSR_10FULL; - case MII_ADVERTISE: - /* 80c24 is a "combo card" PHY, right? */ - return ADVERTISE_10HALF | - ADVERTISE_10FULL; - default: - netif_printk(nic, hw, KERN_DEBUG, nic->netdev, - "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", - dir == mdi_read ? "READ" : "WRITE", - addr, reg, data); - return 0xFFFF; - } - } else { - switch (reg) { - default: - netif_printk(nic, hw, KERN_DEBUG, nic->netdev, - "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", - dir == mdi_read ? "READ" : "WRITE", - addr, reg, data); - return 0xFFFF; - } - } -} -static inline int e100_phy_supports_mii(struct nic *nic) -{ - /* for now, just check it by comparing whether we - are using MII software emulation. - */ - return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated); -} - -static void e100_get_defaults(struct nic *nic) -{ - struct param_range rfds = { .min = 16, .max = 256, .count = 256 }; - struct param_range cbs = { .min = 64, .max = 256, .count = 128 }; - - /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */ - nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision; - if (nic->mac == mac_unknown) - nic->mac = mac_82557_D100_A; - - nic->params.rfds = rfds; - nic->params.cbs = cbs; - - /* Quadwords to DMA into FIFO before starting frame transmit */ - nic->tx_threshold = 0xE0; - - /* no interrupt for every tx completion, delay = 256us if not 557 */ - nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf | - ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); - - /* Template for a freshly allocated RFD */ - nic->blank_rfd.command = 0; - nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF); - nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN); - - /* MII setup */ - nic->mii.phy_id_mask = 0x1F; - nic->mii.reg_num_mask = 0x1F; - nic->mii.dev = nic->netdev; - nic->mii.mdio_read = mdio_read; - nic->mii.mdio_write = mdio_write; -} - -static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) -{ - struct config *config = &cb->u.config; - u8 *c = (u8 *)config; - - cb->command = cpu_to_le16(cb_config); - - memset(config, 0, sizeof(struct config)); - - config->byte_count = 0x16; /* bytes in this struct */ - config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */ - config->direct_rx_dma = 0x1; /* reserved */ - config->standard_tcb = 0x1; /* 1=standard, 0=extended */ - config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */ - config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */ - config->tx_underrun_retry = 0x3; /* # of underrun retries */ - if (e100_phy_supports_mii(nic)) - config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */ - config->pad10 = 0x6; - config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */ - config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */ - config->ifs = 0x6; /* x16 = inter frame spacing */ - config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */ - config->pad15_1 = 0x1; - config->pad15_2 = 0x1; - config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */ - config->fc_delay_hi = 0x40; /* time delay for fc frame */ - config->tx_padding = 0x1; /* 1=pad short frames */ - config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */ - config->pad18 = 0x1; - config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */ - config->pad20_1 = 0x1F; - config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */ - config->pad21_1 = 0x5; - - config->adaptive_ifs = nic->adaptive_ifs; - config->loopback = nic->loopback; - - if (nic->mii.force_media && nic->mii.full_duplex) - config->full_duplex_force = 0x1; /* 1=force, 0=auto */ - - if (nic->flags & promiscuous || nic->loopback) { - config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ - config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ - config->promiscuous_mode = 0x1; /* 1=on, 0=off */ - } - - if (nic->flags & multicast_all) - config->multicast_all = 0x1; /* 1=accept, 0=no */ - - /* disable WoL when up */ - if (netif_running(nic->netdev) || !(nic->flags & wol_magic)) - config->magic_packet_disable = 0x1; /* 1=off, 0=on */ - - if (nic->mac >= mac_82558_D101_A4) { - config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */ - config->mwi_enable = 0x1; /* 1=enable, 0=disable */ - config->standard_tcb = 0x0; /* 1=standard, 0=extended */ - config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */ - if (nic->mac >= mac_82559_D101M) { - config->tno_intr = 0x1; /* TCO stats enable */ - /* Enable TCO in extended config */ - if (nic->mac >= mac_82551_10) { - config->byte_count = 0x20; /* extended bytes */ - config->rx_d102_mode = 0x1; /* GMRC for TCO */ - } - } else { - config->standard_stat_counter = 0x0; - } - } - - netif_printk(nic, hw, KERN_DEBUG, nic->netdev, - "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", - c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]); - netif_printk(nic, hw, KERN_DEBUG, nic->netdev, - "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", - c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]); - netif_printk(nic, hw, KERN_DEBUG, nic->netdev, - "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", - c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); -} - -/************************************************************************* -* CPUSaver parameters -* -* All CPUSaver parameters are 16-bit literals that are part of a -* "move immediate value" instruction. By changing the value of -* the literal in the instruction before the code is loaded, the -* driver can change the algorithm. -* -* INTDELAY - This loads the dead-man timer with its initial value. -* When this timer expires the interrupt is asserted, and the -* timer is reset each time a new packet is received. (see -* BUNDLEMAX below to set the limit on number of chained packets) -* The current default is 0x600 or 1536. Experiments show that -* the value should probably stay within the 0x200 - 0x1000. -* -* BUNDLEMAX - -* This sets the maximum number of frames that will be bundled. In -* some situations, such as the TCP windowing algorithm, it may be -* better to limit the growth of the bundle size than let it go as -* high as it can, because that could cause too much added latency. -* The default is six, because this is the number of packets in the -* default TCP window size. A value of 1 would make CPUSaver indicate -* an interrupt for every frame received. If you do not want to put -* a limit on the bundle size, set this value to xFFFF. -* -* BUNDLESMALL - -* This contains a bit-mask describing the minimum size frame that -* will be bundled. The default masks the lower 7 bits, which means -* that any frame less than 128 bytes in length will not be bundled, -* but will instead immediately generate an interrupt. This does -* not affect the current bundle in any way. Any frame that is 128 -* bytes or large will be bundled normally. This feature is meant -* to provide immediate indication of ACK frames in a TCP environment. -* Customers were seeing poor performance when a machine with CPUSaver -* enabled was sending but not receiving. The delay introduced when -* the ACKs were received was enough to reduce total throughput, because -* the sender would sit idle until the ACK was finally seen. -* -* The current default is 0xFF80, which masks out the lower 7 bits. -* This means that any frame which is x7F (127) bytes or smaller -* will cause an immediate interrupt. Because this value must be a -* bit mask, there are only a few valid values that can be used. To -* turn this feature off, the driver can write the value xFFFF to the -* lower word of this instruction (in the same way that the other -* parameters are used). Likewise, a value of 0xF800 (2047) would -* cause an interrupt to be generated for every frame, because all -* standard Ethernet frames are <= 2047 bytes in length. -*************************************************************************/ - -/* if you wish to disable the ucode functionality, while maintaining the - * workarounds it provides, set the following defines to: - * BUNDLESMALL 0 - * BUNDLEMAX 1 - * INTDELAY 1 - */ -#define BUNDLESMALL 1 -#define BUNDLEMAX (u16)6 -#define INTDELAY (u16)1536 /* 0x600 */ - -/* Initialize firmware */ -static const struct firmware *e100_request_firmware(struct nic *nic) -{ - const char *fw_name; - const struct firmware *fw = nic->fw; - u8 timer, bundle, min_size; - int err = 0; - - /* do not load u-code for ICH devices */ - if (nic->flags & ich) - return NULL; - - /* Search for ucode match against h/w revision */ - if (nic->mac == mac_82559_D101M) - fw_name = FIRMWARE_D101M; - else if (nic->mac == mac_82559_D101S) - fw_name = FIRMWARE_D101S; - else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) - fw_name = FIRMWARE_D102E; - else /* No ucode on other devices */ - return NULL; - - /* If the firmware has not previously been loaded, request a pointer - * to it. If it was previously loaded, we are reinitializing the - * adapter, possibly in a resume from hibernate, in which case - * request_firmware() cannot be used. - */ - if (!fw) - err = request_firmware(&fw, fw_name, &nic->pdev->dev); - - if (err) { - netif_err(nic, probe, nic->netdev, - "Failed to load firmware \"%s\": %d\n", - fw_name, err); - return ERR_PTR(err); - } - - /* Firmware should be precisely UCODE_SIZE (words) plus three bytes - indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */ - if (fw->size != UCODE_SIZE * 4 + 3) { - netif_err(nic, probe, nic->netdev, - "Firmware \"%s\" has wrong size %zu\n", - fw_name, fw->size); - release_firmware(fw); - return ERR_PTR(-EINVAL); - } - - /* Read timer, bundle and min_size from end of firmware blob */ - timer = fw->data[UCODE_SIZE * 4]; - bundle = fw->data[UCODE_SIZE * 4 + 1]; - min_size = fw->data[UCODE_SIZE * 4 + 2]; - - if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE || - min_size >= UCODE_SIZE) { - netif_err(nic, probe, nic->netdev, - "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n", - fw_name, timer, bundle, min_size); - release_firmware(fw); - return ERR_PTR(-EINVAL); - } - - /* OK, firmware is validated and ready to use. Save a pointer - * to it in the nic */ - nic->fw = fw; - return fw; -} - -static void e100_setup_ucode(struct nic *nic, struct cb *cb, - struct sk_buff *skb) -{ - const struct firmware *fw = (void *)skb; - u8 timer, bundle, min_size; - - /* It's not a real skb; we just abused the fact that e100_exec_cb - will pass it through to here... */ - cb->skb = NULL; - - /* firmware is stored as little endian already */ - memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4); - - /* Read timer, bundle and min_size from end of firmware blob */ - timer = fw->data[UCODE_SIZE * 4]; - bundle = fw->data[UCODE_SIZE * 4 + 1]; - min_size = fw->data[UCODE_SIZE * 4 + 2]; - - /* Insert user-tunable settings in cb->u.ucode */ - cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000); - cb->u.ucode[timer] |= cpu_to_le32(INTDELAY); - cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000); - cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX); - cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000); - cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); - - cb->command = cpu_to_le16(cb_ucode | cb_el); -} - -static inline int e100_load_ucode_wait(struct nic *nic) -{ - const struct firmware *fw; - int err = 0, counter = 50; - struct cb *cb = nic->cb_to_clean; - - fw = e100_request_firmware(nic); - /* If it's NULL, then no ucode is required */ - if (!fw || IS_ERR(fw)) - return PTR_ERR(fw); - - if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode))) - netif_err(nic, probe, nic->netdev, - "ucode cmd failed with error %d\n", err); - - /* must restart cuc */ - nic->cuc_cmd = cuc_start; - - /* wait for completion */ - e100_write_flush(nic); - udelay(10); - - /* wait for possibly (ouch) 500ms */ - while (!(cb->status & cpu_to_le16(cb_complete))) { - msleep(10); - if (!--counter) break; - } - - /* ack any interrupts, something could have been set */ - iowrite8(~0, &nic->csr->scb.stat_ack); - - /* if the command failed, or is not OK, notify and return */ - if (!counter || !(cb->status & cpu_to_le16(cb_ok))) { - netif_err(nic, probe, nic->netdev, "ucode load failed\n"); - err = -EPERM; - } - - return err; -} - -static void e100_setup_iaaddr(struct nic *nic, struct cb *cb, - struct sk_buff *skb) -{ - cb->command = cpu_to_le16(cb_iaaddr); - memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); -} - -static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) -{ - cb->command = cpu_to_le16(cb_dump); - cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + - offsetof(struct mem, dump_buf)); -} - -static int e100_phy_check_without_mii(struct nic *nic) -{ - u8 phy_type; - int without_mii; - - phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f; - - switch (phy_type) { - case NoSuchPhy: /* Non-MII PHY; UNTESTED! */ - case I82503: /* Non-MII PHY; UNTESTED! */ - case S80C24: /* Non-MII PHY; tested and working */ - /* paragraph from the FreeBSD driver, "FXP_PHY_80C24": - * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter - * doesn't have a programming interface of any sort. The - * media is sensed automatically based on how the link partner - * is configured. This is, in essence, manual configuration. - */ - netif_info(nic, probe, nic->netdev, - "found MII-less i82503 or 80c24 or other PHY\n"); - - nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated; - nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */ - - /* these might be needed for certain MII-less cards... - * nic->flags |= ich; - * nic->flags |= ich_10h_workaround; */ - - without_mii = 1; - break; - default: - without_mii = 0; - break; - } - return without_mii; -} - -#define NCONFIG_AUTO_SWITCH 0x0080 -#define MII_NSC_CONG MII_RESV1 -#define NSC_CONG_ENABLE 0x0100 -#define NSC_CONG_TXREADY 0x0400 -#define ADVERTISE_FC_SUPPORTED 0x0400 -static int e100_phy_init(struct nic *nic) -{ - struct net_device *netdev = nic->netdev; - u32 addr; - u16 bmcr, stat, id_lo, id_hi, cong; - - /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */ - for (addr = 0; addr < 32; addr++) { - nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr; - bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); - stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); - stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); - if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0)))) - break; - } - if (addr == 32) { - /* uhoh, no PHY detected: check whether we seem to be some - * weird, rare variant which is *known* to not have any MII. - * But do this AFTER MII checking only, since this does - * lookup of EEPROM values which may easily be unreliable. */ - if (e100_phy_check_without_mii(nic)) - return 0; /* simply return and hope for the best */ - else { - /* for unknown cases log a fatal error */ - netif_err(nic, hw, nic->netdev, - "Failed to locate any known PHY, aborting\n"); - return -EAGAIN; - } - } else - netif_printk(nic, hw, KERN_DEBUG, nic->netdev, - "phy_addr = %d\n", nic->mii.phy_id); - - /* Get phy ID */ - id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1); - id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2); - nic->phy = (u32)id_hi << 16 | (u32)id_lo; - netif_printk(nic, hw, KERN_DEBUG, nic->netdev, - "phy ID = 0x%08X\n", nic->phy); - - /* Select the phy and isolate the rest */ - for (addr = 0; addr < 32; addr++) { - if (addr != nic->mii.phy_id) { - mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE); - } else if (nic->phy != phy_82552_v) { - bmcr = mdio_read(netdev, addr, MII_BMCR); - mdio_write(netdev, addr, MII_BMCR, - bmcr & ~BMCR_ISOLATE); - } - } - /* - * Workaround for 82552: - * Clear the ISOLATE bit on selected phy_id last (mirrored on all - * other phy_id's) using bmcr value from addr discovery loop above. - */ - if (nic->phy == phy_82552_v) - mdio_write(netdev, nic->mii.phy_id, MII_BMCR, - bmcr & ~BMCR_ISOLATE); - - /* Handle National tx phys */ -#define NCS_PHY_MODEL_MASK 0xFFF0FFFF - if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) { - /* Disable congestion control */ - cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG); - cong |= NSC_CONG_TXREADY; - cong &= ~NSC_CONG_ENABLE; - mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong); - } - - if (nic->phy == phy_82552_v) { - u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE); - - /* assign special tweaked mdio_ctrl() function */ - nic->mdio_ctrl = mdio_ctrl_phy_82552_v; - - /* Workaround Si not advertising flow-control during autoneg */ - advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; - mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert); - - /* Reset for the above changes to take effect */ - bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); - bmcr |= BMCR_RESET; - mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); - } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && - (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && - !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) { - /* enable/disable MDI/MDI-X auto-switching. */ - mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, - nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH); - } - - return 0; -} - -static int e100_hw_init(struct nic *nic) -{ - int err = 0; - - e100_hw_reset(nic); - - netif_err(nic, hw, nic->netdev, "e100_hw_init\n"); - if (!in_interrupt() && (err = e100_self_test(nic))) - return err; - - if ((err = e100_phy_init(nic))) - return err; - if ((err = e100_exec_cmd(nic, cuc_load_base, 0))) - return err; - if ((err = e100_exec_cmd(nic, ruc_load_base, 0))) - return err; - if ((err = e100_load_ucode_wait(nic))) - return err; - if ((err = e100_exec_cb(nic, NULL, e100_configure))) - return err; - if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr))) - return err; - if ((err = e100_exec_cmd(nic, cuc_dump_addr, - nic->dma_addr + offsetof(struct mem, stats)))) - return err; - if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0))) - return err; - - e100_disable_irq(nic); - - return 0; -} - -static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) -{ - struct net_device *netdev = nic->netdev; - struct netdev_hw_addr *ha; - u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS); - - cb->command = cpu_to_le16(cb_multi); - cb->u.multi.count = cpu_to_le16(count * ETH_ALEN); - i = 0; - netdev_for_each_mc_addr(ha, netdev) { - if (i == count) - break; - memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, - ETH_ALEN); - } -} - -static void e100_set_multicast_list(struct net_device *netdev) -{ - struct nic *nic = netdev_priv(netdev); - - netif_printk(nic, hw, KERN_DEBUG, nic->netdev, - "mc_count=%d, flags=0x%04X\n", - netdev_mc_count(netdev), netdev->flags); - - if (netdev->flags & IFF_PROMISC) - nic->flags |= promiscuous; - else - nic->flags &= ~promiscuous; - - if (netdev->flags & IFF_ALLMULTI || - netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS) - nic->flags |= multicast_all; - else - nic->flags &= ~multicast_all; - - e100_exec_cb(nic, NULL, e100_configure); - e100_exec_cb(nic, NULL, e100_multi); -} - -static void e100_update_stats(struct nic *nic) -{ - struct net_device *dev = nic->netdev; - struct net_device_stats *ns = &dev->stats; - struct stats *s = &nic->mem->stats; - __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause : - (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames : - &s->complete; - - /* Device's stats reporting may take several microseconds to - * complete, so we're always waiting for results of the - * previous command. */ - - if (*complete == cpu_to_le32(cuc_dump_reset_complete)) { - *complete = 0; - nic->tx_frames = le32_to_cpu(s->tx_good_frames); - nic->tx_collisions = le32_to_cpu(s->tx_total_collisions); - ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions); - ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions); - ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs); - ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns); - ns->collisions += nic->tx_collisions; - ns->tx_errors += le32_to_cpu(s->tx_max_collisions) + - le32_to_cpu(s->tx_lost_crs); - ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) + - nic->rx_over_length_errors; - ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors); - ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors); - ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors); - ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors); - ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors); - ns->rx_errors += le32_to_cpu(s->rx_crc_errors) + - le32_to_cpu(s->rx_alignment_errors) + - le32_to_cpu(s->rx_short_frame_errors) + - le32_to_cpu(s->rx_cdt_errors); - nic->tx_deferred += le32_to_cpu(s->tx_deferred); - nic->tx_single_collisions += - le32_to_cpu(s->tx_single_collisions); - nic->tx_multiple_collisions += - le32_to_cpu(s->tx_multiple_collisions); - if (nic->mac >= mac_82558_D101_A4) { - nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause); - nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause); - nic->rx_fc_unsupported += - le32_to_cpu(s->fc_rcv_unsupported); - if (nic->mac >= mac_82559_D101M) { - nic->tx_tco_frames += - le16_to_cpu(s->xmt_tco_frames); - nic->rx_tco_frames += - le16_to_cpu(s->rcv_tco_frames); - } - } - } - - - if (e100_exec_cmd(nic, cuc_dump_reset, 0)) - netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, - "exec cuc_dump_reset failed\n"); -} - -static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) -{ - /* Adjust inter-frame-spacing (IFS) between two transmits if - * we're getting collisions on a half-duplex connection. */ - - if (duplex == DUPLEX_HALF) { - u32 prev = nic->adaptive_ifs; - u32 min_frames = (speed == SPEED_100) ? 1000 : 100; - - if ((nic->tx_frames / 32 < nic->tx_collisions) && - (nic->tx_frames > min_frames)) { - if (nic->adaptive_ifs < 60) - nic->adaptive_ifs += 5; - } else if (nic->tx_frames < min_frames) { - if (nic->adaptive_ifs >= 5) - nic->adaptive_ifs -= 5; - } - if (nic->adaptive_ifs != prev) - e100_exec_cb(nic, NULL, e100_configure); - } -} - -static void e100_watchdog(unsigned long data) -{ - struct nic *nic = (struct nic *)data; - struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET }; - u32 speed; - - netif_printk(nic, timer, KERN_DEBUG, nic->netdev, - "right now = %ld\n", jiffies); - - /* mii library handles link maintenance tasks */ - - mii_ethtool_gset(&nic->mii, &cmd); - speed = ethtool_cmd_speed(&cmd); - - if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) { - netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n", - speed == SPEED_100 ? 100 : 10, - cmd.duplex == DUPLEX_FULL ? "Full" : "Half"); - } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) { - netdev_info(nic->netdev, "NIC Link is Down\n"); - } - - mii_check_link(&nic->mii); - - /* Software generated interrupt to recover from (rare) Rx - * allocation failure. - * Unfortunately have to use a spinlock to not re-enable interrupts - * accidentally, due to hardware that shares a register between the - * interrupt mask bit and the SW Interrupt generation bit */ - spin_lock_irq(&nic->cmd_lock); - iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi); - e100_write_flush(nic); - spin_unlock_irq(&nic->cmd_lock); - - e100_update_stats(nic); - e100_adjust_adaptive_ifs(nic, speed, cmd.duplex); - - if (nic->mac <= mac_82557_D100_C) - /* Issue a multicast command to workaround a 557 lock up */ - e100_set_multicast_list(nic->netdev); - - if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF) - /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */ - nic->flags |= ich_10h_workaround; - else - nic->flags &= ~ich_10h_workaround; - - mod_timer(&nic->watchdog, - round_jiffies(jiffies + E100_WATCHDOG_PERIOD)); -} - -static void e100_xmit_prepare(struct nic *nic, struct cb *cb, - struct sk_buff *skb) -{ - cb->command = nic->tx_command; - /* interrupt every 16 packets regardless of delay */ - if ((nic->cbs_avail & ~15) == nic->cbs_avail) - cb->command |= cpu_to_le16(cb_i); - cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); - cb->u.tcb.tcb_byte_count = 0; - cb->u.tcb.threshold = nic->tx_threshold; - cb->u.tcb.tbd_count = 1; - cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev, - skb->data, skb->len, PCI_DMA_TODEVICE)); - /* check for mapping failure? */ - cb->u.tcb.tbd.size = cpu_to_le16(skb->len); -} - -static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, - struct net_device *netdev) -{ - struct nic *nic = netdev_priv(netdev); - int err; - - if (nic->flags & ich_10h_workaround) { - /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang. - Issue a NOP command followed by a 1us delay before - issuing the Tx command. */ - if (e100_exec_cmd(nic, cuc_nop, 0)) - netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, - "exec cuc_nop failed\n"); - udelay(1); - } - - err = e100_exec_cb(nic, skb, e100_xmit_prepare); - - switch (err) { - case -ENOSPC: - /* We queued the skb, but now we're out of space. */ - netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, - "No space for CB\n"); - netif_stop_queue(netdev); - break; - case -ENOMEM: - /* This is a hard error - log it. */ - netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, - "Out of Tx resources, returning skb\n"); - netif_stop_queue(netdev); - return NETDEV_TX_BUSY; - } - - return NETDEV_TX_OK; -} - -static int e100_tx_clean(struct nic *nic) -{ - struct net_device *dev = nic->netdev; - struct cb *cb; - int tx_cleaned = 0; - - spin_lock(&nic->cb_lock); - - /* Clean CBs marked complete */ - for (cb = nic->cb_to_clean; - cb->status & cpu_to_le16(cb_complete); - cb = nic->cb_to_clean = cb->next) { - rmb(); /* read skb after status */ - netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev, - "cb[%d]->status = 0x%04X\n", - (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), - cb->status); - - if (likely(cb->skb != NULL)) { - dev->stats.tx_packets++; - dev->stats.tx_bytes += cb->skb->len; - - pci_unmap_single(nic->pdev, - le32_to_cpu(cb->u.tcb.tbd.buf_addr), - le16_to_cpu(cb->u.tcb.tbd.size), - PCI_DMA_TODEVICE); - dev_kfree_skb_any(cb->skb); - cb->skb = NULL; - tx_cleaned = 1; - } - cb->status = 0; - nic->cbs_avail++; - } - - spin_unlock(&nic->cb_lock); - - /* Recover from running out of Tx resources in xmit_frame */ - if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev))) - netif_wake_queue(nic->netdev); - - return tx_cleaned; -} - -static void e100_clean_cbs(struct nic *nic) -{ - if (nic->cbs) { - while (nic->cbs_avail != nic->params.cbs.count) { - struct cb *cb = nic->cb_to_clean; - if (cb->skb) { - pci_unmap_single(nic->pdev, - le32_to_cpu(cb->u.tcb.tbd.buf_addr), - le16_to_cpu(cb->u.tcb.tbd.size), - PCI_DMA_TODEVICE); - dev_kfree_skb(cb->skb); - } - nic->cb_to_clean = nic->cb_to_clean->next; - nic->cbs_avail++; - } - pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr); - nic->cbs = NULL; - nic->cbs_avail = 0; - } - nic->cuc_cmd = cuc_start; - nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = - nic->cbs; -} - -static int e100_alloc_cbs(struct nic *nic) -{ - struct cb *cb; - unsigned int i, count = nic->params.cbs.count; - - nic->cuc_cmd = cuc_start; - nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL; - nic->cbs_avail = 0; - - nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL, - &nic->cbs_dma_addr); - if (!nic->cbs) - return -ENOMEM; - memset(nic->cbs, 0, count * sizeof(struct cb)); - - for (cb = nic->cbs, i = 0; i < count; cb++, i++) { - cb->next = (i + 1 < count) ? cb + 1 : nic->cbs; - cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1; - - cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb); - cb->link = cpu_to_le32(nic->cbs_dma_addr + - ((i+1) % count) * sizeof(struct cb)); - } - - nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs; - nic->cbs_avail = count; - - return 0; -} - -static inline void e100_start_receiver(struct nic *nic, struct rx *rx) -{ - if (!nic->rxs) return; - if (RU_SUSPENDED != nic->ru_running) return; - - /* handle init time starts */ - if (!rx) rx = nic->rxs; - - /* (Re)start RU if suspended or idle and RFA is non-NULL */ - if (rx->skb) { - e100_exec_cmd(nic, ruc_start, rx->dma_addr); - nic->ru_running = RU_RUNNING; - } -} - -#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN) -static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) -{ - if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN))) - return -ENOMEM; - - /* Init, and map the RFD. */ - skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd)); - rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, - RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); - - if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) { - dev_kfree_skb_any(rx->skb); - rx->skb = NULL; - rx->dma_addr = 0; - return -ENOMEM; - } - - /* Link the RFD to end of RFA by linking previous RFD to - * this one. We are safe to touch the previous RFD because - * it is protected by the before last buffer's el bit being set */ - if (rx->prev->skb) { - struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; - put_unaligned_le32(rx->dma_addr, &prev_rfd->link); - pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr, - sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); - } - - return 0; -} - -static int e100_rx_indicate(struct nic *nic, struct rx *rx, - unsigned int *work_done, unsigned int work_to_do) -{ - struct net_device *dev = nic->netdev; - struct sk_buff *skb = rx->skb; - struct rfd *rfd = (struct rfd *)skb->data; - u16 rfd_status, actual_size; - - if (unlikely(work_done && *work_done >= work_to_do)) - return -EAGAIN; - - /* Need to sync before taking a peek at cb_complete bit */ - pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr, - sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); - rfd_status = le16_to_cpu(rfd->status); - - netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev, - "status=0x%04X\n", rfd_status); - rmb(); /* read size after status bit */ - - /* If data isn't ready, nothing to indicate */ - if (unlikely(!(rfd_status & cb_complete))) { - /* If the next buffer has the el bit, but we think the receiver - * is still running, check to see if it really stopped while - * we had interrupts off. - * This allows for a fast restart without re-enabling - * interrupts */ - if ((le16_to_cpu(rfd->command) & cb_el) && - (RU_RUNNING == nic->ru_running)) - - if (ioread8(&nic->csr->scb.status) & rus_no_res) - nic->ru_running = RU_SUSPENDED; - pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr, - sizeof(struct rfd), - PCI_DMA_FROMDEVICE); - return -ENODATA; - } - - /* Get actual data size */ - actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF; - if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd))) - actual_size = RFD_BUF_LEN - sizeof(struct rfd); - - /* Get data */ - pci_unmap_single(nic->pdev, rx->dma_addr, - RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); - - /* If this buffer has the el bit, but we think the receiver - * is still running, check to see if it really stopped while - * we had interrupts off. - * This allows for a fast restart without re-enabling interrupts. - * This can happen when the RU sees the size change but also sees - * the el bit set. */ - if ((le16_to_cpu(rfd->command) & cb_el) && - (RU_RUNNING == nic->ru_running)) { - - if (ioread8(&nic->csr->scb.status) & rus_no_res) - nic->ru_running = RU_SUSPENDED; - } - - /* Pull off the RFD and put the actual data (minus eth hdr) */ - skb_reserve(skb, sizeof(struct rfd)); - skb_put(skb, actual_size); - skb->protocol = eth_type_trans(skb, nic->netdev); - - if (unlikely(!(rfd_status & cb_ok))) { - /* Don't indicate if hardware indicates errors */ - dev_kfree_skb_any(skb); - } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) { - /* Don't indicate oversized frames */ - nic->rx_over_length_errors++; - dev_kfree_skb_any(skb); - } else { - dev->stats.rx_packets++; - dev->stats.rx_bytes += actual_size; - netif_receive_skb(skb); - if (work_done) - (*work_done)++; - } - - rx->skb = NULL; - - return 0; -} - -static void e100_rx_clean(struct nic *nic, unsigned int *work_done, - unsigned int work_to_do) -{ - struct rx *rx; - int restart_required = 0, err = 0; - struct rx *old_before_last_rx, *new_before_last_rx; - struct rfd *old_before_last_rfd, *new_before_last_rfd; - - /* Indicate newly arrived packets */ - for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { - err = e100_rx_indicate(nic, rx, work_done, work_to_do); - /* Hit quota or no more to clean */ - if (-EAGAIN == err || -ENODATA == err) - break; - } - - - /* On EAGAIN, hit quota so have more work to do, restart once - * cleanup is complete. - * Else, are we already rnr? then pay attention!!! this ensures that - * the state machine progression never allows a start with a - * partially cleaned list, avoiding a race between hardware - * and rx_to_clean when in NAPI mode */ - if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running) - restart_required = 1; - - old_before_last_rx = nic->rx_to_use->prev->prev; - old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data; - - /* Alloc new skbs to refill list */ - for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { - if (unlikely(e100_rx_alloc_skb(nic, rx))) - break; /* Better luck next time (see watchdog) */ - } - - new_before_last_rx = nic->rx_to_use->prev->prev; - if (new_before_last_rx != old_before_last_rx) { - /* Set the el-bit on the buffer that is before the last buffer. - * This lets us update the next pointer on the last buffer - * without worrying about hardware touching it. - * We set the size to 0 to prevent hardware from touching this - * buffer. - * When the hardware hits the before last buffer with el-bit - * and size of 0, it will RNR interrupt, the RUS will go into - * the No Resources state. It will not complete nor write to - * this buffer. */ - new_before_last_rfd = - (struct rfd *)new_before_last_rx->skb->data; - new_before_last_rfd->size = 0; - new_before_last_rfd->command |= cpu_to_le16(cb_el); - pci_dma_sync_single_for_device(nic->pdev, - new_before_last_rx->dma_addr, sizeof(struct rfd), - PCI_DMA_BIDIRECTIONAL); - - /* Now that we have a new stopping point, we can clear the old - * stopping point. We must sync twice to get the proper - * ordering on the hardware side of things. */ - old_before_last_rfd->command &= ~cpu_to_le16(cb_el); - pci_dma_sync_single_for_device(nic->pdev, - old_before_last_rx->dma_addr, sizeof(struct rfd), - PCI_DMA_BIDIRECTIONAL); - old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN); - pci_dma_sync_single_for_device(nic->pdev, - old_before_last_rx->dma_addr, sizeof(struct rfd), - PCI_DMA_BIDIRECTIONAL); - } - - if (restart_required) { - // ack the rnr? - iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack); - e100_start_receiver(nic, nic->rx_to_clean); - if (work_done) - (*work_done)++; - } -} - -static void e100_rx_clean_list(struct nic *nic) -{ - struct rx *rx; - unsigned int i, count = nic->params.rfds.count; - - nic->ru_running = RU_UNINITIALIZED; - - if (nic->rxs) { - for (rx = nic->rxs, i = 0; i < count; rx++, i++) { - if (rx->skb) { - pci_unmap_single(nic->pdev, rx->dma_addr, - RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); - dev_kfree_skb(rx->skb); - } - } - kfree(nic->rxs); - nic->rxs = NULL; - } - - nic->rx_to_use = nic->rx_to_clean = NULL; -} - -static int e100_rx_alloc_list(struct nic *nic) -{ - struct rx *rx; - unsigned int i, count = nic->params.rfds.count; - struct rfd *before_last; - - nic->rx_to_use = nic->rx_to_clean = NULL; - nic->ru_running = RU_UNINITIALIZED; - - if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC))) - return -ENOMEM; - - for (rx = nic->rxs, i = 0; i < count; rx++, i++) { - rx->next = (i + 1 < count) ? rx + 1 : nic->rxs; - rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1; - if (e100_rx_alloc_skb(nic, rx)) { - e100_rx_clean_list(nic); - return -ENOMEM; - } - } - /* Set the el-bit on the buffer that is before the last buffer. - * This lets us update the next pointer on the last buffer without - * worrying about hardware touching it. - * We set the size to 0 to prevent hardware from touching this buffer. - * When the hardware hits the before last buffer with el-bit and size - * of 0, it will RNR interrupt, the RU will go into the No Resources - * state. It will not complete nor write to this buffer. */ - rx = nic->rxs->prev->prev; - before_last = (struct rfd *)rx->skb->data; - before_last->command |= cpu_to_le16(cb_el); - before_last->size = 0; - pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr, - sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); - - nic->rx_to_use = nic->rx_to_clean = nic->rxs; - nic->ru_running = RU_SUSPENDED; - - return 0; -} - -static irqreturn_t e100_intr(int irq, void *dev_id) -{ - struct net_device *netdev = dev_id; - struct nic *nic = netdev_priv(netdev); - u8 stat_ack = ioread8(&nic->csr->scb.stat_ack); - - netif_printk(nic, intr, KERN_DEBUG, nic->netdev, - "stat_ack = 0x%02X\n", stat_ack); - - if (stat_ack == stat_ack_not_ours || /* Not our interrupt */ - stat_ack == stat_ack_not_present) /* Hardware is ejected */ - return IRQ_NONE; - - /* Ack interrupt(s) */ - iowrite8(stat_ack, &nic->csr->scb.stat_ack); - - /* We hit Receive No Resource (RNR); restart RU after cleaning */ - if (stat_ack & stat_ack_rnr) - nic->ru_running = RU_SUSPENDED; - - if (likely(napi_schedule_prep(&nic->napi))) { - e100_disable_irq(nic); - __napi_schedule(&nic->napi); - } - - return IRQ_HANDLED; -} - -static int e100_poll(struct napi_struct *napi, int budget) -{ - struct nic *nic = container_of(napi, struct nic, napi); - unsigned int work_done = 0; - - e100_rx_clean(nic, &work_done, budget); - e100_tx_clean(nic); - - /* If budget not fully consumed, exit the polling mode */ - if (work_done < budget) { - napi_complete(napi); - e100_enable_irq(nic); - } - - return work_done; -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void e100_netpoll(struct net_device *netdev) -{ - struct nic *nic = netdev_priv(netdev); - - e100_disable_irq(nic); - e100_intr(nic->pdev->irq, netdev); - e100_tx_clean(nic); - e100_enable_irq(nic); -} -#endif - -static int e100_set_mac_address(struct net_device *netdev, void *p) -{ - struct nic *nic = netdev_priv(netdev); - struct sockaddr *addr = p; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - e100_exec_cb(nic, NULL, e100_setup_iaaddr); - - return 0; -} - -static int e100_change_mtu(struct net_device *netdev, int new_mtu) -{ - if (new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN) - return -EINVAL; - netdev->mtu = new_mtu; - return 0; -} - -static int e100_asf(struct nic *nic) -{ - /* ASF can be enabled from eeprom */ - return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) && - (nic->eeprom[eeprom_config_asf] & eeprom_asf) && - !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) && - ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE); -} - -static int e100_up(struct nic *nic) -{ - int err; - - if ((err = e100_rx_alloc_list(nic))) - return err; - if ((err = e100_alloc_cbs(nic))) - goto err_rx_clean_list; - if ((err = e100_hw_init(nic))) - goto err_clean_cbs; - e100_set_multicast_list(nic->netdev); - e100_start_receiver(nic, NULL); - mod_timer(&nic->watchdog, jiffies); - if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED, - nic->netdev->name, nic->netdev))) - goto err_no_irq; - netif_wake_queue(nic->netdev); - napi_enable(&nic->napi); - /* enable ints _after_ enabling poll, preventing a race between - * disable ints+schedule */ - e100_enable_irq(nic); - return 0; - -err_no_irq: - del_timer_sync(&nic->watchdog); -err_clean_cbs: - e100_clean_cbs(nic); -err_rx_clean_list: - e100_rx_clean_list(nic); - return err; -} - -static void e100_down(struct nic *nic) -{ - /* wait here for poll to complete */ - napi_disable(&nic->napi); - netif_stop_queue(nic->netdev); - e100_hw_reset(nic); - free_irq(nic->pdev->irq, nic->netdev); - del_timer_sync(&nic->watchdog); - netif_carrier_off(nic->netdev); - e100_clean_cbs(nic); - e100_rx_clean_list(nic); -} - -static void e100_tx_timeout(struct net_device *netdev) -{ - struct nic *nic = netdev_priv(netdev); - - /* Reset outside of interrupt context, to avoid request_irq - * in interrupt context */ - schedule_work(&nic->tx_timeout_task); -} - -static void e100_tx_timeout_task(struct work_struct *work) -{ - struct nic *nic = container_of(work, struct nic, tx_timeout_task); - struct net_device *netdev = nic->netdev; - - netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, - "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status)); - - rtnl_lock(); - if (netif_running(netdev)) { - e100_down(netdev_priv(netdev)); - e100_up(netdev_priv(netdev)); - } - rtnl_unlock(); -} - -static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode) -{ - int err; - struct sk_buff *skb; - - /* Use driver resources to perform internal MAC or PHY - * loopback test. A single packet is prepared and transmitted - * in loopback mode, and the test passes if the received - * packet compares byte-for-byte to the transmitted packet. */ - - if ((err = e100_rx_alloc_list(nic))) - return err; - if ((err = e100_alloc_cbs(nic))) - goto err_clean_rx; - - /* ICH PHY loopback is broken so do MAC loopback instead */ - if (nic->flags & ich && loopback_mode == lb_phy) - loopback_mode = lb_mac; - - nic->loopback = loopback_mode; - if ((err = e100_hw_init(nic))) - goto err_loopback_none; - - if (loopback_mode == lb_phy) - mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, - BMCR_LOOPBACK); - - e100_start_receiver(nic, NULL); - - if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) { - err = -ENOMEM; - goto err_loopback_none; - } - skb_put(skb, ETH_DATA_LEN); - memset(skb->data, 0xFF, ETH_DATA_LEN); - e100_xmit_frame(skb, nic->netdev); - - msleep(10); - - pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr, - RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); - - if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd), - skb->data, ETH_DATA_LEN)) - err = -EAGAIN; - -err_loopback_none: - mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0); - nic->loopback = lb_none; - e100_clean_cbs(nic); - e100_hw_reset(nic); -err_clean_rx: - e100_rx_clean_list(nic); - return err; -} - -#define MII_LED_CONTROL 0x1B -#define E100_82552_LED_OVERRIDE 0x19 -#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */ -#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */ - -static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) -{ - struct nic *nic = netdev_priv(netdev); - return mii_ethtool_gset(&nic->mii, cmd); -} - -static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd) -{ - struct nic *nic = netdev_priv(netdev); - int err; - - mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET); - err = mii_ethtool_sset(&nic->mii, cmd); - e100_exec_cb(nic, NULL, e100_configure); - - return err; -} - -static void e100_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *info) -{ - struct nic *nic = netdev_priv(netdev); - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->fw_version, "N/A"); - strcpy(info->bus_info, pci_name(nic->pdev)); -} - -#define E100_PHY_REGS 0x1C -static int e100_get_regs_len(struct net_device *netdev) -{ - struct nic *nic = netdev_priv(netdev); - return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf); -} - -static void e100_get_regs(struct net_device *netdev, - struct ethtool_regs *regs, void *p) -{ - struct nic *nic = netdev_priv(netdev); - u32 *buff = p; - int i; - - regs->version = (1 << 24) | nic->pdev->revision; - buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 | - ioread8(&nic->csr->scb.cmd_lo) << 16 | - ioread16(&nic->csr->scb.status); - for (i = E100_PHY_REGS; i >= 0; i--) - buff[1 + E100_PHY_REGS - i] = - mdio_read(netdev, nic->mii.phy_id, i); - memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf)); - e100_exec_cb(nic, NULL, e100_dump); - msleep(10); - memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf, - sizeof(nic->mem->dump_buf)); -} - -static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) -{ - struct nic *nic = netdev_priv(netdev); - wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0; - wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0; -} - -static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) -{ - struct nic *nic = netdev_priv(netdev); - - if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) || - !device_can_wakeup(&nic->pdev->dev)) - return -EOPNOTSUPP; - - if (wol->wolopts) - nic->flags |= wol_magic; - else - nic->flags &= ~wol_magic; - - device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts); - - e100_exec_cb(nic, NULL, e100_configure); - - return 0; -} - -static u32 e100_get_msglevel(struct net_device *netdev) -{ - struct nic *nic = netdev_priv(netdev); - return nic->msg_enable; -} - -static void e100_set_msglevel(struct net_device *netdev, u32 value) -{ - struct nic *nic = netdev_priv(netdev); - nic->msg_enable = value; -} - -static int e100_nway_reset(struct net_device *netdev) -{ - struct nic *nic = netdev_priv(netdev); - return mii_nway_restart(&nic->mii); -} - -static u32 e100_get_link(struct net_device *netdev) -{ - struct nic *nic = netdev_priv(netdev); - return mii_link_ok(&nic->mii); -} - -static int e100_get_eeprom_len(struct net_device *netdev) -{ - struct nic *nic = netdev_priv(netdev); - return nic->eeprom_wc << 1; -} - -#define E100_EEPROM_MAGIC 0x1234 -static int e100_get_eeprom(struct net_device *netdev, - struct ethtool_eeprom *eeprom, u8 *bytes) -{ - struct nic *nic = netdev_priv(netdev); - - eeprom->magic = E100_EEPROM_MAGIC; - memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len); - - return 0; -} - -static int e100_set_eeprom(struct net_device *netdev, - struct ethtool_eeprom *eeprom, u8 *bytes) -{ - struct nic *nic = netdev_priv(netdev); - - if (eeprom->magic != E100_EEPROM_MAGIC) - return -EINVAL; - - memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len); - - return e100_eeprom_save(nic, eeprom->offset >> 1, - (eeprom->len >> 1) + 1); -} - -static void e100_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -{ - struct nic *nic = netdev_priv(netdev); - struct param_range *rfds = &nic->params.rfds; - struct param_range *cbs = &nic->params.cbs; - - ring->rx_max_pending = rfds->max; - ring->tx_max_pending = cbs->max; - ring->rx_mini_max_pending = 0; - ring->rx_jumbo_max_pending = 0; - ring->rx_pending = rfds->count; - ring->tx_pending = cbs->count; - ring->rx_mini_pending = 0; - ring->rx_jumbo_pending = 0; -} - -static int e100_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -{ - struct nic *nic = netdev_priv(netdev); - struct param_range *rfds = &nic->params.rfds; - struct param_range *cbs = &nic->params.cbs; - - if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) - return -EINVAL; - - if (netif_running(netdev)) - e100_down(nic); - rfds->count = max(ring->rx_pending, rfds->min); - rfds->count = min(rfds->count, rfds->max); - cbs->count = max(ring->tx_pending, cbs->min); - cbs->count = min(cbs->count, cbs->max); - netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n", - rfds->count, cbs->count); - if (netif_running(netdev)) - e100_up(nic); - - return 0; -} - -static const char e100_gstrings_test[][ETH_GSTRING_LEN] = { - "Link test (on/offline)", - "Eeprom test (on/offline)", - "Self test (offline)", - "Mac loopback (offline)", - "Phy loopback (offline)", -}; -#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test) - -static void e100_diag_test(struct net_device *netdev, - struct ethtool_test *test, u64 *data) -{ - struct ethtool_cmd cmd; - struct nic *nic = netdev_priv(netdev); - int i, err; - - memset(data, 0, E100_TEST_LEN * sizeof(u64)); - data[0] = !mii_link_ok(&nic->mii); - data[1] = e100_eeprom_load(nic); - if (test->flags & ETH_TEST_FL_OFFLINE) { - - /* save speed, duplex & autoneg settings */ - err = mii_ethtool_gset(&nic->mii, &cmd); - - if (netif_running(netdev)) - e100_down(nic); - data[2] = e100_self_test(nic); - data[3] = e100_loopback_test(nic, lb_mac); - data[4] = e100_loopback_test(nic, lb_phy); - - /* restore speed, duplex & autoneg settings */ - err = mii_ethtool_sset(&nic->mii, &cmd); - - if (netif_running(netdev)) - e100_up(nic); - } - for (i = 0; i < E100_TEST_LEN; i++) - test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0; - - msleep_interruptible(4 * 1000); -} - -static int e100_set_phys_id(struct net_device *netdev, - enum ethtool_phys_id_state state) -{ - struct nic *nic = netdev_priv(netdev); - enum led_state { - led_on = 0x01, - led_off = 0x04, - led_on_559 = 0x05, - led_on_557 = 0x07, - }; - u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE : - MII_LED_CONTROL; - u16 leds = 0; - - switch (state) { - case ETHTOOL_ID_ACTIVE: - return 2; - - case ETHTOOL_ID_ON: - leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON : - (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559; - break; - - case ETHTOOL_ID_OFF: - leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off; - break; - - case ETHTOOL_ID_INACTIVE: - break; - } - - mdio_write(netdev, nic->mii.phy_id, led_reg, leds); - return 0; -} - -static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = { - "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", - "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", - "rx_length_errors", "rx_over_errors", "rx_crc_errors", - "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors", - "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", - "tx_heartbeat_errors", "tx_window_errors", - /* device-specific stats */ - "tx_deferred", "tx_single_collisions", "tx_multi_collisions", - "tx_flow_control_pause", "rx_flow_control_pause", - "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets", -}; -#define E100_NET_STATS_LEN 21 -#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats) - -static int e100_get_sset_count(struct net_device *netdev, int sset) -{ - switch (sset) { - case ETH_SS_TEST: - return E100_TEST_LEN; - case ETH_SS_STATS: - return E100_STATS_LEN; - default: - return -EOPNOTSUPP; - } -} - -static void e100_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, u64 *data) -{ - struct nic *nic = netdev_priv(netdev); - int i; - - for (i = 0; i < E100_NET_STATS_LEN; i++) - data[i] = ((unsigned long *)&netdev->stats)[i]; - - data[i++] = nic->tx_deferred; - data[i++] = nic->tx_single_collisions; - data[i++] = nic->tx_multiple_collisions; - data[i++] = nic->tx_fc_pause; - data[i++] = nic->rx_fc_pause; - data[i++] = nic->rx_fc_unsupported; - data[i++] = nic->tx_tco_frames; - data[i++] = nic->rx_tco_frames; -} - -static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data) -{ - switch (stringset) { - case ETH_SS_TEST: - memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test)); - break; - case ETH_SS_STATS: - memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats)); - break; - } -} - -static const struct ethtool_ops e100_ethtool_ops = { - .get_settings = e100_get_settings, - .set_settings = e100_set_settings, - .get_drvinfo = e100_get_drvinfo, - .get_regs_len = e100_get_regs_len, - .get_regs = e100_get_regs, - .get_wol = e100_get_wol, - .set_wol = e100_set_wol, - .get_msglevel = e100_get_msglevel, - .set_msglevel = e100_set_msglevel, - .nway_reset = e100_nway_reset, - .get_link = e100_get_link, - .get_eeprom_len = e100_get_eeprom_len, - .get_eeprom = e100_get_eeprom, - .set_eeprom = e100_set_eeprom, - .get_ringparam = e100_get_ringparam, - .set_ringparam = e100_set_ringparam, - .self_test = e100_diag_test, - .get_strings = e100_get_strings, - .set_phys_id = e100_set_phys_id, - .get_ethtool_stats = e100_get_ethtool_stats, - .get_sset_count = e100_get_sset_count, -}; - -static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) -{ - struct nic *nic = netdev_priv(netdev); - - return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL); -} - -static int e100_alloc(struct nic *nic) -{ - nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem), - &nic->dma_addr); - return nic->mem ? 0 : -ENOMEM; -} - -static void e100_free(struct nic *nic) -{ - if (nic->mem) { - pci_free_consistent(nic->pdev, sizeof(struct mem), - nic->mem, nic->dma_addr); - nic->mem = NULL; - } -} - -static int e100_open(struct net_device *netdev) -{ - struct nic *nic = netdev_priv(netdev); - int err = 0; - - netif_carrier_off(netdev); - if ((err = e100_up(nic))) - netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n"); - return err; -} - -static int e100_close(struct net_device *netdev) -{ - e100_down(netdev_priv(netdev)); - return 0; -} - -static const struct net_device_ops e100_netdev_ops = { - .ndo_open = e100_open, - .ndo_stop = e100_close, - .ndo_start_xmit = e100_xmit_frame, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_multicast_list = e100_set_multicast_list, - .ndo_set_mac_address = e100_set_mac_address, - .ndo_change_mtu = e100_change_mtu, - .ndo_do_ioctl = e100_do_ioctl, - .ndo_tx_timeout = e100_tx_timeout, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = e100_netpoll, -#endif -}; - -static int __devinit e100_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - struct net_device *netdev; - struct nic *nic; - int err; - - if (!(netdev = alloc_etherdev(sizeof(struct nic)))) { - if (((1 << debug) - 1) & NETIF_MSG_PROBE) - pr_err("Etherdev alloc failed, aborting\n"); - return -ENOMEM; - } - - netdev->netdev_ops = &e100_netdev_ops; - SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops); - netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; - strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); - - nic = netdev_priv(netdev); - netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT); - nic->netdev = netdev; - nic->pdev = pdev; - nic->msg_enable = (1 << debug) - 1; - nic->mdio_ctrl = mdio_ctrl_hw; - pci_set_drvdata(pdev, netdev); - - if ((err = pci_enable_device(pdev))) { - netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n"); - goto err_out_free_dev; - } - - if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { - netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n"); - err = -ENODEV; - goto err_out_disable_pdev; - } - - if ((err = pci_request_regions(pdev, DRV_NAME))) { - netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n"); - goto err_out_disable_pdev; - } - - if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { - netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n"); - goto err_out_free_res; - } - - SET_NETDEV_DEV(netdev, &pdev->dev); - - if (use_io) - netif_info(nic, probe, nic->netdev, "using i/o access mode\n"); - - nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr)); - if (!nic->csr) { - netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n"); - err = -ENOMEM; - goto err_out_free_res; - } - - if (ent->driver_data) - nic->flags |= ich; - else - nic->flags &= ~ich; - - e100_get_defaults(nic); - - /* locks must be initialized before calling hw_reset */ - spin_lock_init(&nic->cb_lock); - spin_lock_init(&nic->cmd_lock); - spin_lock_init(&nic->mdio_lock); - - /* Reset the device before pci_set_master() in case device is in some - * funky state and has an interrupt pending - hint: we don't have the - * interrupt handler registered yet. */ - e100_hw_reset(nic); - - pci_set_master(pdev); - - init_timer(&nic->watchdog); - nic->watchdog.function = e100_watchdog; - nic->watchdog.data = (unsigned long)nic; - - INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); - - if ((err = e100_alloc(nic))) { - netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n"); - goto err_out_iounmap; - } - - if ((err = e100_eeprom_load(nic))) - goto err_out_free; - - e100_phy_init(nic); - - memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN); - memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN); - if (!is_valid_ether_addr(netdev->perm_addr)) { - if (!eeprom_bad_csum_allow) { - netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n"); - err = -EAGAIN; - goto err_out_free; - } else { - netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n"); - } - } - - /* Wol magic packet can be enabled from eeprom */ - if ((nic->mac >= mac_82558_D101_A4) && - (nic->eeprom[eeprom_id] & eeprom_id_wol)) { - nic->flags |= wol_magic; - device_set_wakeup_enable(&pdev->dev, true); - } - - /* ack any pending wake events, disable PME */ - pci_pme_active(pdev, false); - - strcpy(netdev->name, "eth%d"); - if ((err = register_netdev(netdev))) { - netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n"); - goto err_out_free; - } - nic->cbs_pool = pci_pool_create(netdev->name, - nic->pdev, - nic->params.cbs.max * sizeof(struct cb), - sizeof(u32), - 0); - netif_info(nic, probe, nic->netdev, - "addr 0x%llx, irq %d, MAC addr %pM\n", - (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), - pdev->irq, netdev->dev_addr); - - return 0; - -err_out_free: - e100_free(nic); -err_out_iounmap: - pci_iounmap(pdev, nic->csr); -err_out_free_res: - pci_release_regions(pdev); -err_out_disable_pdev: - pci_disable_device(pdev); -err_out_free_dev: - pci_set_drvdata(pdev, NULL); - free_netdev(netdev); - return err; -} - -static void __devexit e100_remove(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - - if (netdev) { - struct nic *nic = netdev_priv(netdev); - unregister_netdev(netdev); - e100_free(nic); - pci_iounmap(pdev, nic->csr); - pci_pool_destroy(nic->cbs_pool); - free_netdev(netdev); - pci_release_regions(pdev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - } -} - -#define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */ -#define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */ -#define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */ -static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct nic *nic = netdev_priv(netdev); - - if (netif_running(netdev)) - e100_down(nic); - netif_device_detach(netdev); - - pci_save_state(pdev); - - if ((nic->flags & wol_magic) | e100_asf(nic)) { - /* enable reverse auto-negotiation */ - if (nic->phy == phy_82552_v) { - u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, - E100_82552_SMARTSPEED); - - mdio_write(netdev, nic->mii.phy_id, - E100_82552_SMARTSPEED, smartspeed | - E100_82552_REV_ANEG | E100_82552_ANEG_NOW); - } - *enable_wake = true; - } else { - *enable_wake = false; - } - - pci_disable_device(pdev); -} - -static int __e100_power_off(struct pci_dev *pdev, bool wake) -{ - if (wake) - return pci_prepare_to_sleep(pdev); - - pci_wake_from_d3(pdev, false); - pci_set_power_state(pdev, PCI_D3hot); - - return 0; -} - -#ifdef CONFIG_PM -static int e100_suspend(struct pci_dev *pdev, pm_message_t state) -{ - bool wake; - __e100_shutdown(pdev, &wake); - return __e100_power_off(pdev, wake); -} - -static int e100_resume(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct nic *nic = netdev_priv(netdev); - - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - /* ack any pending wake events, disable PME */ - pci_enable_wake(pdev, 0, 0); - - /* disable reverse auto-negotiation */ - if (nic->phy == phy_82552_v) { - u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, - E100_82552_SMARTSPEED); - - mdio_write(netdev, nic->mii.phy_id, - E100_82552_SMARTSPEED, - smartspeed & ~(E100_82552_REV_ANEG)); - } - - netif_device_attach(netdev); - if (netif_running(netdev)) - e100_up(nic); - - return 0; -} -#endif /* CONFIG_PM */ - -static void e100_shutdown(struct pci_dev *pdev) -{ - bool wake; - __e100_shutdown(pdev, &wake); - if (system_state == SYSTEM_POWER_OFF) - __e100_power_off(pdev, wake); -} - -/* ------------------ PCI Error Recovery infrastructure -------------- */ -/** - * e100_io_error_detected - called when PCI error is detected. - * @pdev: Pointer to PCI device - * @state: The current pci connection state - */ -static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct nic *nic = netdev_priv(netdev); - - netif_device_detach(netdev); - - if (state == pci_channel_io_perm_failure) - return PCI_ERS_RESULT_DISCONNECT; - - if (netif_running(netdev)) - e100_down(nic); - pci_disable_device(pdev); - - /* Request a slot reset. */ - return PCI_ERS_RESULT_NEED_RESET; -} - -/** - * e100_io_slot_reset - called after the pci bus has been reset. - * @pdev: Pointer to PCI device - * - * Restart the card from scratch. - */ -static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct nic *nic = netdev_priv(netdev); - - if (pci_enable_device(pdev)) { - pr_err("Cannot re-enable PCI device after reset\n"); - return PCI_ERS_RESULT_DISCONNECT; - } - pci_set_master(pdev); - - /* Only one device per card can do a reset */ - if (0 != PCI_FUNC(pdev->devfn)) - return PCI_ERS_RESULT_RECOVERED; - e100_hw_reset(nic); - e100_phy_init(nic); - - return PCI_ERS_RESULT_RECOVERED; -} - -/** - * e100_io_resume - resume normal operations - * @pdev: Pointer to PCI device - * - * Resume normal operations after an error recovery - * sequence has been completed. - */ -static void e100_io_resume(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct nic *nic = netdev_priv(netdev); - - /* ack any pending wake events, disable PME */ - pci_enable_wake(pdev, 0, 0); - - netif_device_attach(netdev); - if (netif_running(netdev)) { - e100_open(netdev); - mod_timer(&nic->watchdog, jiffies); - } -} - -static struct pci_error_handlers e100_err_handler = { - .error_detected = e100_io_error_detected, - .slot_reset = e100_io_slot_reset, - .resume = e100_io_resume, -}; - -static struct pci_driver e100_driver = { - .name = DRV_NAME, - .id_table = e100_id_table, - .probe = e100_probe, - .remove = __devexit_p(e100_remove), -#ifdef CONFIG_PM - /* Power Management hooks */ - .suspend = e100_suspend, - .resume = e100_resume, -#endif - .shutdown = e100_shutdown, - .err_handler = &e100_err_handler, -}; - -static int __init e100_init_module(void) -{ - if (((1 << debug) - 1) & NETIF_MSG_DRV) { - pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); - pr_info("%s\n", DRV_COPYRIGHT); - } - return pci_register_driver(&e100_driver); -} - -static void __exit e100_cleanup_module(void) -{ - pci_unregister_driver(&e100_driver); -} - -module_init(e100_init_module); -module_exit(e100_cleanup_module); diff --git a/drivers/net/e1000/Makefile b/drivers/net/e1000/Makefile deleted file mode 100644 index 4a6ab15..0000000 --- a/drivers/net/e1000/Makefile +++ /dev/null @@ -1,35 +0,0 @@ -################################################################################ -# -# Intel PRO/1000 Linux driver -# Copyright(c) 1999 - 2006 Intel Corporation. -# -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -# -# The full GNU General Public License is included in this distribution in -# the file called "COPYING". -# -# Contact Information: -# Linux NICS -# e1000-devel Mailing List -# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -# -################################################################################ - -# -# Makefile for the Intel(R) PRO/1000 ethernet driver -# - -obj-$(CONFIG_E1000) += e1000.o - -e1000-objs := e1000_main.o e1000_hw.o e1000_ethtool.o e1000_param.o diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h deleted file mode 100644 index 24f41da..0000000 --- a/drivers/net/e1000/e1000.h +++ /dev/null @@ -1,361 +0,0 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - - -/* Linux PRO/1000 Ethernet Driver main header file */ - -#ifndef _E1000_H_ -#define _E1000_H_ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define BAR_0 0 -#define BAR_1 1 -#define BAR_5 5 - -#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} - -struct e1000_adapter; - -#include "e1000_hw.h" - -#define E1000_MAX_INTR 10 - -/* TX/RX descriptor defines */ -#define E1000_DEFAULT_TXD 256 -#define E1000_MAX_TXD 256 -#define E1000_MIN_TXD 48 -#define E1000_MAX_82544_TXD 4096 - -#define E1000_DEFAULT_RXD 256 -#define E1000_MAX_RXD 256 -#define E1000_MIN_RXD 48 -#define E1000_MAX_82544_RXD 4096 - -#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ -#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */ - -/* this is the size past which hardware will drop packets when setting LPE=0 */ -#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 - -/* Supported Rx Buffer Sizes */ -#define E1000_RXBUFFER_128 128 /* Used for packet split */ -#define E1000_RXBUFFER_256 256 /* Used for packet split */ -#define E1000_RXBUFFER_512 512 -#define E1000_RXBUFFER_1024 1024 -#define E1000_RXBUFFER_2048 2048 -#define E1000_RXBUFFER_4096 4096 -#define E1000_RXBUFFER_8192 8192 -#define E1000_RXBUFFER_16384 16384 - -/* SmartSpeed delimiters */ -#define E1000_SMARTSPEED_DOWNSHIFT 3 -#define E1000_SMARTSPEED_MAX 15 - -/* Packet Buffer allocations */ -#define E1000_PBA_BYTES_SHIFT 0xA -#define E1000_TX_HEAD_ADDR_SHIFT 7 -#define E1000_PBA_TX_MASK 0xFFFF0000 - -/* Flow Control Watermarks */ -#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */ -#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */ - -#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */ - -/* How many Tx Descriptors do we need to call netif_wake_queue ? */ -#define E1000_TX_QUEUE_WAKE 16 -/* How many Rx Buffers do we bundle into one write to the hardware ? */ -#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ - -#define AUTO_ALL_MODES 0 -#define E1000_EEPROM_82544_APM 0x0004 -#define E1000_EEPROM_APME 0x0400 - -#ifndef E1000_MASTER_SLAVE -/* Switch to override PHY master/slave setting */ -#define E1000_MASTER_SLAVE e1000_ms_hw_default -#endif - -#define E1000_MNG_VLAN_NONE (-1) - -/* wrapper around a pointer to a socket buffer, - * so a DMA handle can be stored along with the buffer */ -struct e1000_buffer { - struct sk_buff *skb; - dma_addr_t dma; - struct page *page; - unsigned long time_stamp; - u16 length; - u16 next_to_watch; - u16 mapped_as_page; -}; - -struct e1000_tx_ring { - /* pointer to the descriptor ring memory */ - void *desc; - /* physical address of the descriptor ring */ - dma_addr_t dma; - /* length of descriptor ring in bytes */ - unsigned int size; - /* number of descriptors in the ring */ - unsigned int count; - /* next descriptor to associate a buffer with */ - unsigned int next_to_use; - /* next descriptor to check for DD status bit */ - unsigned int next_to_clean; - /* array of buffer information structs */ - struct e1000_buffer *buffer_info; - - u16 tdh; - u16 tdt; - bool last_tx_tso; -}; - -struct e1000_rx_ring { - /* pointer to the descriptor ring memory */ - void *desc; - /* physical address of the descriptor ring */ - dma_addr_t dma; - /* length of descriptor ring in bytes */ - unsigned int size; - /* number of descriptors in the ring */ - unsigned int count; - /* next descriptor to associate a buffer with */ - unsigned int next_to_use; - /* next descriptor to check for DD status bit */ - unsigned int next_to_clean; - /* array of buffer information structs */ - struct e1000_buffer *buffer_info; - struct sk_buff *rx_skb_top; - - /* cpu for rx queue */ - int cpu; - - u16 rdh; - u16 rdt; -}; - -#define E1000_DESC_UNUSED(R) \ - ((((R)->next_to_clean > (R)->next_to_use) \ - ? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1) - -#define E1000_RX_DESC_EXT(R, i) \ - (&(((union e1000_rx_desc_extended *)((R).desc))[i])) -#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) -#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) -#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) -#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc) - -/* board specific private data structure */ - -struct e1000_adapter { - struct timer_list tx_fifo_stall_timer; - struct timer_list watchdog_timer; - struct timer_list phy_info_timer; - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; - u16 mng_vlan_id; - u32 bd_number; - u32 rx_buffer_len; - u32 wol; - u32 smartspeed; - u32 en_mng_pt; - u16 link_speed; - u16 link_duplex; - spinlock_t stats_lock; - unsigned int total_tx_bytes; - unsigned int total_tx_packets; - unsigned int total_rx_bytes; - unsigned int total_rx_packets; - /* Interrupt Throttle Rate */ - u32 itr; - u32 itr_setting; - u16 tx_itr; - u16 rx_itr; - - struct work_struct reset_task; - u8 fc_autoneg; - - /* TX */ - struct e1000_tx_ring *tx_ring; /* One per active queue */ - unsigned int restart_queue; - u32 txd_cmd; - u32 tx_int_delay; - u32 tx_abs_int_delay; - u32 gotcl; - u64 gotcl_old; - u64 tpt_old; - u64 colc_old; - u32 tx_timeout_count; - u32 tx_fifo_head; - u32 tx_head_addr; - u32 tx_fifo_size; - u8 tx_timeout_factor; - atomic_t tx_fifo_stall; - bool pcix_82544; - bool detect_tx_hung; - - /* RX */ - bool (*clean_rx)(struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring, - int *work_done, int work_to_do); - void (*alloc_rx_buf)(struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring, - int cleaned_count); - struct e1000_rx_ring *rx_ring; /* One per active queue */ - struct napi_struct napi; - - int num_tx_queues; - int num_rx_queues; - - u64 hw_csum_err; - u64 hw_csum_good; - u32 alloc_rx_buff_failed; - u32 rx_int_delay; - u32 rx_abs_int_delay; - bool rx_csum; - u32 gorcl; - u64 gorcl_old; - - /* OS defined structs */ - struct net_device *netdev; - struct pci_dev *pdev; - - /* structs defined in e1000_hw.h */ - struct e1000_hw hw; - struct e1000_hw_stats stats; - struct e1000_phy_info phy_info; - struct e1000_phy_stats phy_stats; - - u32 test_icr; - struct e1000_tx_ring test_tx_ring; - struct e1000_rx_ring test_rx_ring; - - int msg_enable; - - /* to not mess up cache alignment, always add to the bottom */ - bool tso_force; - bool smart_power_down; /* phy smart power down */ - bool quad_port_a; - unsigned long flags; - u32 eeprom_wol; - - /* for ioport free */ - int bars; - int need_ioport; - - bool discarding; - - struct work_struct fifo_stall_task; - struct work_struct phy_info_task; -}; - -enum e1000_state_t { - __E1000_TESTING, - __E1000_RESETTING, - __E1000_DOWN -}; - -#undef pr_fmt -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw); -#define e_dbg(format, arg...) \ - netdev_dbg(e1000_get_hw_dev(hw), format, ## arg) -#define e_err(msglvl, format, arg...) \ - netif_err(adapter, msglvl, adapter->netdev, format, ## arg) -#define e_info(msglvl, format, arg...) \ - netif_info(adapter, msglvl, adapter->netdev, format, ## arg) -#define e_warn(msglvl, format, arg...) \ - netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) -#define e_notice(msglvl, format, arg...) \ - netif_notice(adapter, msglvl, adapter->netdev, format, ## arg) -#define e_dev_info(format, arg...) \ - dev_info(&adapter->pdev->dev, format, ## arg) -#define e_dev_warn(format, arg...) \ - dev_warn(&adapter->pdev->dev, format, ## arg) -#define e_dev_err(format, arg...) \ - dev_err(&adapter->pdev->dev, format, ## arg) - -extern char e1000_driver_name[]; -extern const char e1000_driver_version[]; - -extern int e1000_up(struct e1000_adapter *adapter); -extern void e1000_down(struct e1000_adapter *adapter); -extern void e1000_reinit_locked(struct e1000_adapter *adapter); -extern void e1000_reset(struct e1000_adapter *adapter); -extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx); -extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); -extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); -extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter); -extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter); -extern void e1000_update_stats(struct e1000_adapter *adapter); -extern bool e1000_has_link(struct e1000_adapter *adapter); -extern void e1000_power_up_phy(struct e1000_adapter *); -extern void e1000_set_ethtool_ops(struct net_device *netdev); -extern void e1000_check_options(struct e1000_adapter *adapter); -extern char *e1000_get_hw_dev_name(struct e1000_hw *hw); - -#endif /* _E1000_H_ */ diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c deleted file mode 100644 index 5548d46..0000000 --- a/drivers/net/e1000/e1000_ethtool.c +++ /dev/null @@ -1,1863 +0,0 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -/* ethtool support for e1000 */ - -#include "e1000.h" -#include - -enum {NETDEV_STATS, E1000_STATS}; - -struct e1000_stats { - char stat_string[ETH_GSTRING_LEN]; - int type; - int sizeof_stat; - int stat_offset; -}; - -#define E1000_STAT(m) E1000_STATS, \ - sizeof(((struct e1000_adapter *)0)->m), \ - offsetof(struct e1000_adapter, m) -#define E1000_NETDEV_STAT(m) NETDEV_STATS, \ - sizeof(((struct net_device *)0)->m), \ - offsetof(struct net_device, m) - -static const struct e1000_stats e1000_gstrings_stats[] = { - { "rx_packets", E1000_STAT(stats.gprc) }, - { "tx_packets", E1000_STAT(stats.gptc) }, - { "rx_bytes", E1000_STAT(stats.gorcl) }, - { "tx_bytes", E1000_STAT(stats.gotcl) }, - { "rx_broadcast", E1000_STAT(stats.bprc) }, - { "tx_broadcast", E1000_STAT(stats.bptc) }, - { "rx_multicast", E1000_STAT(stats.mprc) }, - { "tx_multicast", E1000_STAT(stats.mptc) }, - { "rx_errors", E1000_STAT(stats.rxerrc) }, - { "tx_errors", E1000_STAT(stats.txerrc) }, - { "tx_dropped", E1000_NETDEV_STAT(stats.tx_dropped) }, - { "multicast", E1000_STAT(stats.mprc) }, - { "collisions", E1000_STAT(stats.colc) }, - { "rx_length_errors", E1000_STAT(stats.rlerrc) }, - { "rx_over_errors", E1000_NETDEV_STAT(stats.rx_over_errors) }, - { "rx_crc_errors", E1000_STAT(stats.crcerrs) }, - { "rx_frame_errors", E1000_NETDEV_STAT(stats.rx_frame_errors) }, - { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, - { "rx_missed_errors", E1000_STAT(stats.mpc) }, - { "tx_aborted_errors", E1000_STAT(stats.ecol) }, - { "tx_carrier_errors", E1000_STAT(stats.tncrs) }, - { "tx_fifo_errors", E1000_NETDEV_STAT(stats.tx_fifo_errors) }, - { "tx_heartbeat_errors", E1000_NETDEV_STAT(stats.tx_heartbeat_errors) }, - { "tx_window_errors", E1000_STAT(stats.latecol) }, - { "tx_abort_late_coll", E1000_STAT(stats.latecol) }, - { "tx_deferred_ok", E1000_STAT(stats.dc) }, - { "tx_single_coll_ok", E1000_STAT(stats.scc) }, - { "tx_multi_coll_ok", E1000_STAT(stats.mcc) }, - { "tx_timeout_count", E1000_STAT(tx_timeout_count) }, - { "tx_restart_queue", E1000_STAT(restart_queue) }, - { "rx_long_length_errors", E1000_STAT(stats.roc) }, - { "rx_short_length_errors", E1000_STAT(stats.ruc) }, - { "rx_align_errors", E1000_STAT(stats.algnerrc) }, - { "tx_tcp_seg_good", E1000_STAT(stats.tsctc) }, - { "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) }, - { "rx_flow_control_xon", E1000_STAT(stats.xonrxc) }, - { "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) }, - { "tx_flow_control_xon", E1000_STAT(stats.xontxc) }, - { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) }, - { "rx_long_byte_count", E1000_STAT(stats.gorcl) }, - { "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, - { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, - { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, - { "tx_smbus", E1000_STAT(stats.mgptc) }, - { "rx_smbus", E1000_STAT(stats.mgprc) }, - { "dropped_smbus", E1000_STAT(stats.mgpdc) }, -}; - -#define E1000_QUEUE_STATS_LEN 0 -#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) -#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN) -static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { - "Register test (offline)", "Eeprom test (offline)", - "Interrupt test (offline)", "Loopback test (offline)", - "Link test (on/offline)" -}; -#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) - -static int e1000_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - - if (hw->media_type == e1000_media_type_copper) { - - ecmd->supported = (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Full| - SUPPORTED_Autoneg | - SUPPORTED_TP); - ecmd->advertising = ADVERTISED_TP; - - if (hw->autoneg == 1) { - ecmd->advertising |= ADVERTISED_Autoneg; - /* the e1000 autoneg seems to match ethtool nicely */ - ecmd->advertising |= hw->autoneg_advertised; - } - - ecmd->port = PORT_TP; - ecmd->phy_address = hw->phy_addr; - - if (hw->mac_type == e1000_82543) - ecmd->transceiver = XCVR_EXTERNAL; - else - ecmd->transceiver = XCVR_INTERNAL; - - } else { - ecmd->supported = (SUPPORTED_1000baseT_Full | - SUPPORTED_FIBRE | - SUPPORTED_Autoneg); - - ecmd->advertising = (ADVERTISED_1000baseT_Full | - ADVERTISED_FIBRE | - ADVERTISED_Autoneg); - - ecmd->port = PORT_FIBRE; - - if (hw->mac_type >= e1000_82545) - ecmd->transceiver = XCVR_INTERNAL; - else - ecmd->transceiver = XCVR_EXTERNAL; - } - - if (er32(STATUS) & E1000_STATUS_LU) { - - e1000_get_speed_and_duplex(hw, &adapter->link_speed, - &adapter->link_duplex); - ethtool_cmd_speed_set(ecmd, adapter->link_speed); - - /* unfortunately FULL_DUPLEX != DUPLEX_FULL - * and HALF_DUPLEX != DUPLEX_HALF */ - - if (adapter->link_duplex == FULL_DUPLEX) - ecmd->duplex = DUPLEX_FULL; - else - ecmd->duplex = DUPLEX_HALF; - } else { - ethtool_cmd_speed_set(ecmd, -1); - ecmd->duplex = -1; - } - - ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) || - hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; - return 0; -} - -static int e1000_set_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - - while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) - msleep(1); - - if (ecmd->autoneg == AUTONEG_ENABLE) { - hw->autoneg = 1; - if (hw->media_type == e1000_media_type_fiber) - hw->autoneg_advertised = ADVERTISED_1000baseT_Full | - ADVERTISED_FIBRE | - ADVERTISED_Autoneg; - else - hw->autoneg_advertised = ecmd->advertising | - ADVERTISED_TP | - ADVERTISED_Autoneg; - ecmd->advertising = hw->autoneg_advertised; - } else { - u32 speed = ethtool_cmd_speed(ecmd); - if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) { - clear_bit(__E1000_RESETTING, &adapter->flags); - return -EINVAL; - } - } - - /* reset the link */ - - if (netif_running(adapter->netdev)) { - e1000_down(adapter); - e1000_up(adapter); - } else - e1000_reset(adapter); - - clear_bit(__E1000_RESETTING, &adapter->flags); - return 0; -} - -static u32 e1000_get_link(struct net_device *netdev) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - - /* - * If the link is not reported up to netdev, interrupts are disabled, - * and so the physical link state may have changed since we last - * looked. Set get_link_status to make sure that the true link - * state is interrogated, rather than pulling a cached and possibly - * stale link state from the driver. - */ - if (!netif_carrier_ok(netdev)) - adapter->hw.get_link_status = 1; - - return e1000_has_link(adapter); -} - -static void e1000_get_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - - pause->autoneg = - (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); - - if (hw->fc == E1000_FC_RX_PAUSE) - pause->rx_pause = 1; - else if (hw->fc == E1000_FC_TX_PAUSE) - pause->tx_pause = 1; - else if (hw->fc == E1000_FC_FULL) { - pause->rx_pause = 1; - pause->tx_pause = 1; - } -} - -static int e1000_set_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - int retval = 0; - - adapter->fc_autoneg = pause->autoneg; - - while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) - msleep(1); - - if (pause->rx_pause && pause->tx_pause) - hw->fc = E1000_FC_FULL; - else if (pause->rx_pause && !pause->tx_pause) - hw->fc = E1000_FC_RX_PAUSE; - else if (!pause->rx_pause && pause->tx_pause) - hw->fc = E1000_FC_TX_PAUSE; - else if (!pause->rx_pause && !pause->tx_pause) - hw->fc = E1000_FC_NONE; - - hw->original_fc = hw->fc; - - if (adapter->fc_autoneg == AUTONEG_ENABLE) { - if (netif_running(adapter->netdev)) { - e1000_down(adapter); - e1000_up(adapter); - } else - e1000_reset(adapter); - } else - retval = ((hw->media_type == e1000_media_type_fiber) ? - e1000_setup_link(hw) : e1000_force_mac_fc(hw)); - - clear_bit(__E1000_RESETTING, &adapter->flags); - return retval; -} - -static u32 e1000_get_msglevel(struct net_device *netdev) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - return adapter->msg_enable; -} - -static void e1000_set_msglevel(struct net_device *netdev, u32 data) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - adapter->msg_enable = data; -} - -static int e1000_get_regs_len(struct net_device *netdev) -{ -#define E1000_REGS_LEN 32 - return E1000_REGS_LEN * sizeof(u32); -} - -static void e1000_get_regs(struct net_device *netdev, struct ethtool_regs *regs, - void *p) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u32 *regs_buff = p; - u16 phy_data; - - memset(p, 0, E1000_REGS_LEN * sizeof(u32)); - - regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; - - regs_buff[0] = er32(CTRL); - regs_buff[1] = er32(STATUS); - - regs_buff[2] = er32(RCTL); - regs_buff[3] = er32(RDLEN); - regs_buff[4] = er32(RDH); - regs_buff[5] = er32(RDT); - regs_buff[6] = er32(RDTR); - - regs_buff[7] = er32(TCTL); - regs_buff[8] = er32(TDLEN); - regs_buff[9] = er32(TDH); - regs_buff[10] = er32(TDT); - regs_buff[11] = er32(TIDV); - - regs_buff[12] = hw->phy_type; /* PHY type (IGP=1, M88=0) */ - if (hw->phy_type == e1000_phy_igp) { - e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, - IGP01E1000_PHY_AGC_A); - e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A & - IGP01E1000_PHY_PAGE_SELECT, &phy_data); - regs_buff[13] = (u32)phy_data; /* cable length */ - e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, - IGP01E1000_PHY_AGC_B); - e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B & - IGP01E1000_PHY_PAGE_SELECT, &phy_data); - regs_buff[14] = (u32)phy_data; /* cable length */ - e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, - IGP01E1000_PHY_AGC_C); - e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C & - IGP01E1000_PHY_PAGE_SELECT, &phy_data); - regs_buff[15] = (u32)phy_data; /* cable length */ - e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, - IGP01E1000_PHY_AGC_D); - e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D & - IGP01E1000_PHY_PAGE_SELECT, &phy_data); - regs_buff[16] = (u32)phy_data; /* cable length */ - regs_buff[17] = 0; /* extended 10bt distance (not needed) */ - e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); - e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS & - IGP01E1000_PHY_PAGE_SELECT, &phy_data); - regs_buff[18] = (u32)phy_data; /* cable polarity */ - e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, - IGP01E1000_PHY_PCS_INIT_REG); - e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG & - IGP01E1000_PHY_PAGE_SELECT, &phy_data); - regs_buff[19] = (u32)phy_data; /* cable polarity */ - regs_buff[20] = 0; /* polarity correction enabled (always) */ - regs_buff[22] = 0; /* phy receive errors (unavailable) */ - regs_buff[23] = regs_buff[18]; /* mdix mode */ - e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); - } else { - e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); - regs_buff[13] = (u32)phy_data; /* cable length */ - regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ - regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ - regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ - e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); - regs_buff[17] = (u32)phy_data; /* extended 10bt distance */ - regs_buff[18] = regs_buff[13]; /* cable polarity */ - regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ - regs_buff[20] = regs_buff[17]; /* polarity correction */ - /* phy receive errors */ - regs_buff[22] = adapter->phy_stats.receive_errors; - regs_buff[23] = regs_buff[13]; /* mdix mode */ - } - regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */ - e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); - regs_buff[24] = (u32)phy_data; /* phy local receiver status */ - regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ - if (hw->mac_type >= e1000_82540 && - hw->media_type == e1000_media_type_copper) { - regs_buff[26] = er32(MANC); - } -} - -static int e1000_get_eeprom_len(struct net_device *netdev) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - - return hw->eeprom.word_size * 2; -} - -static int e1000_get_eeprom(struct net_device *netdev, - struct ethtool_eeprom *eeprom, u8 *bytes) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u16 *eeprom_buff; - int first_word, last_word; - int ret_val = 0; - u16 i; - - if (eeprom->len == 0) - return -EINVAL; - - eeprom->magic = hw->vendor_id | (hw->device_id << 16); - - first_word = eeprom->offset >> 1; - last_word = (eeprom->offset + eeprom->len - 1) >> 1; - - eeprom_buff = kmalloc(sizeof(u16) * - (last_word - first_word + 1), GFP_KERNEL); - if (!eeprom_buff) - return -ENOMEM; - - if (hw->eeprom.type == e1000_eeprom_spi) - ret_val = e1000_read_eeprom(hw, first_word, - last_word - first_word + 1, - eeprom_buff); - else { - for (i = 0; i < last_word - first_word + 1; i++) { - ret_val = e1000_read_eeprom(hw, first_word + i, 1, - &eeprom_buff[i]); - if (ret_val) - break; - } - } - - /* Device's eeprom is always little-endian, word addressable */ - for (i = 0; i < last_word - first_word + 1; i++) - le16_to_cpus(&eeprom_buff[i]); - - memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), - eeprom->len); - kfree(eeprom_buff); - - return ret_val; -} - -static int e1000_set_eeprom(struct net_device *netdev, - struct ethtool_eeprom *eeprom, u8 *bytes) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u16 *eeprom_buff; - void *ptr; - int max_len, first_word, last_word, ret_val = 0; - u16 i; - - if (eeprom->len == 0) - return -EOPNOTSUPP; - - if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) - return -EFAULT; - - max_len = hw->eeprom.word_size * 2; - - first_word = eeprom->offset >> 1; - last_word = (eeprom->offset + eeprom->len - 1) >> 1; - eeprom_buff = kmalloc(max_len, GFP_KERNEL); - if (!eeprom_buff) - return -ENOMEM; - - ptr = (void *)eeprom_buff; - - if (eeprom->offset & 1) { - /* need read/modify/write of first changed EEPROM word */ - /* only the second byte of the word is being modified */ - ret_val = e1000_read_eeprom(hw, first_word, 1, - &eeprom_buff[0]); - ptr++; - } - if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { - /* need read/modify/write of last changed EEPROM word */ - /* only the first byte of the word is being modified */ - ret_val = e1000_read_eeprom(hw, last_word, 1, - &eeprom_buff[last_word - first_word]); - } - - /* Device's eeprom is always little-endian, word addressable */ - for (i = 0; i < last_word - first_word + 1; i++) - le16_to_cpus(&eeprom_buff[i]); - - memcpy(ptr, bytes, eeprom->len); - - for (i = 0; i < last_word - first_word + 1; i++) - eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); - - ret_val = e1000_write_eeprom(hw, first_word, - last_word - first_word + 1, eeprom_buff); - - /* Update the checksum over the first part of the EEPROM if needed */ - if ((ret_val == 0) && (first_word <= EEPROM_CHECKSUM_REG)) - e1000_update_eeprom_checksum(hw); - - kfree(eeprom_buff); - return ret_val; -} - -static void e1000_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - char firmware_version[32]; - - strncpy(drvinfo->driver, e1000_driver_name, 32); - strncpy(drvinfo->version, e1000_driver_version, 32); - - sprintf(firmware_version, "N/A"); - strncpy(drvinfo->fw_version, firmware_version, 32); - strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); - drvinfo->regdump_len = e1000_get_regs_len(netdev); - drvinfo->eedump_len = e1000_get_eeprom_len(netdev); -} - -static void e1000_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - e1000_mac_type mac_type = hw->mac_type; - struct e1000_tx_ring *txdr = adapter->tx_ring; - struct e1000_rx_ring *rxdr = adapter->rx_ring; - - ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD : - E1000_MAX_82544_RXD; - ring->tx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_TXD : - E1000_MAX_82544_TXD; - ring->rx_mini_max_pending = 0; - ring->rx_jumbo_max_pending = 0; - ring->rx_pending = rxdr->count; - ring->tx_pending = txdr->count; - ring->rx_mini_pending = 0; - ring->rx_jumbo_pending = 0; -} - -static int e1000_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - e1000_mac_type mac_type = hw->mac_type; - struct e1000_tx_ring *txdr, *tx_old; - struct e1000_rx_ring *rxdr, *rx_old; - int i, err; - - if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) - return -EINVAL; - - while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) - msleep(1); - - if (netif_running(adapter->netdev)) - e1000_down(adapter); - - tx_old = adapter->tx_ring; - rx_old = adapter->rx_ring; - - err = -ENOMEM; - txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), GFP_KERNEL); - if (!txdr) - goto err_alloc_tx; - - rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), GFP_KERNEL); - if (!rxdr) - goto err_alloc_rx; - - adapter->tx_ring = txdr; - adapter->rx_ring = rxdr; - - rxdr->count = max(ring->rx_pending,(u32)E1000_MIN_RXD); - rxdr->count = min(rxdr->count,(u32)(mac_type < e1000_82544 ? - E1000_MAX_RXD : E1000_MAX_82544_RXD)); - rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); - - txdr->count = max(ring->tx_pending,(u32)E1000_MIN_TXD); - txdr->count = min(txdr->count,(u32)(mac_type < e1000_82544 ? - E1000_MAX_TXD : E1000_MAX_82544_TXD)); - txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); - - for (i = 0; i < adapter->num_tx_queues; i++) - txdr[i].count = txdr->count; - for (i = 0; i < adapter->num_rx_queues; i++) - rxdr[i].count = rxdr->count; - - if (netif_running(adapter->netdev)) { - /* Try to get new resources before deleting old */ - err = e1000_setup_all_rx_resources(adapter); - if (err) - goto err_setup_rx; - err = e1000_setup_all_tx_resources(adapter); - if (err) - goto err_setup_tx; - - /* save the new, restore the old in order to free it, - * then restore the new back again */ - - adapter->rx_ring = rx_old; - adapter->tx_ring = tx_old; - e1000_free_all_rx_resources(adapter); - e1000_free_all_tx_resources(adapter); - kfree(tx_old); - kfree(rx_old); - adapter->rx_ring = rxdr; - adapter->tx_ring = txdr; - err = e1000_up(adapter); - if (err) - goto err_setup; - } - - clear_bit(__E1000_RESETTING, &adapter->flags); - return 0; -err_setup_tx: - e1000_free_all_rx_resources(adapter); -err_setup_rx: - adapter->rx_ring = rx_old; - adapter->tx_ring = tx_old; - kfree(rxdr); -err_alloc_rx: - kfree(txdr); -err_alloc_tx: - e1000_up(adapter); -err_setup: - clear_bit(__E1000_RESETTING, &adapter->flags); - return err; -} - -static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg, - u32 mask, u32 write) -{ - struct e1000_hw *hw = &adapter->hw; - static const u32 test[] = - {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; - u8 __iomem *address = hw->hw_addr + reg; - u32 read; - int i; - - for (i = 0; i < ARRAY_SIZE(test); i++) { - writel(write & test[i], address); - read = readl(address); - if (read != (write & test[i] & mask)) { - e_err(drv, "pattern test reg %04X failed: " - "got 0x%08X expected 0x%08X\n", - reg, read, (write & test[i] & mask)); - *data = reg; - return true; - } - } - return false; -} - -static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg, - u32 mask, u32 write) -{ - struct e1000_hw *hw = &adapter->hw; - u8 __iomem *address = hw->hw_addr + reg; - u32 read; - - writel(write & mask, address); - read = readl(address); - if ((read & mask) != (write & mask)) { - e_err(drv, "set/check reg %04X test failed: " - "got 0x%08X expected 0x%08X\n", - reg, (read & mask), (write & mask)); - *data = reg; - return true; - } - return false; -} - -#define REG_PATTERN_TEST(reg, mask, write) \ - do { \ - if (reg_pattern_test(adapter, data, \ - (hw->mac_type >= e1000_82543) \ - ? E1000_##reg : E1000_82542_##reg, \ - mask, write)) \ - return 1; \ - } while (0) - -#define REG_SET_AND_CHECK(reg, mask, write) \ - do { \ - if (reg_set_and_check(adapter, data, \ - (hw->mac_type >= e1000_82543) \ - ? E1000_##reg : E1000_82542_##reg, \ - mask, write)) \ - return 1; \ - } while (0) - -static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) -{ - u32 value, before, after; - u32 i, toggle; - struct e1000_hw *hw = &adapter->hw; - - /* The status register is Read Only, so a write should fail. - * Some bits that get toggled are ignored. - */ - - /* there are several bits on newer hardware that are r/w */ - toggle = 0xFFFFF833; - - before = er32(STATUS); - value = (er32(STATUS) & toggle); - ew32(STATUS, toggle); - after = er32(STATUS) & toggle; - if (value != after) { - e_err(drv, "failed STATUS register test got: " - "0x%08X expected: 0x%08X\n", after, value); - *data = 1; - return 1; - } - /* restore previous status */ - ew32(STATUS, before); - - REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF); - REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF); - REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF); - REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF); - - REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF); - REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); - REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF); - REG_PATTERN_TEST(RDH, 0x0000FFFF, 0x0000FFFF); - REG_PATTERN_TEST(RDT, 0x0000FFFF, 0x0000FFFF); - REG_PATTERN_TEST(FCRTH, 0x0000FFF8, 0x0000FFF8); - REG_PATTERN_TEST(FCTTV, 0x0000FFFF, 0x0000FFFF); - REG_PATTERN_TEST(TIPG, 0x3FFFFFFF, 0x3FFFFFFF); - REG_PATTERN_TEST(TDBAH, 0xFFFFFFFF, 0xFFFFFFFF); - REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF); - - REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000); - - before = 0x06DFB3FE; - REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB); - REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); - - if (hw->mac_type >= e1000_82543) { - - REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF); - REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); - REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF); - REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); - REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); - value = E1000_RAR_ENTRIES; - for (i = 0; i < value; i++) { - REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, - 0xFFFFFFFF); - } - - } else { - - REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF); - REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF); - REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF); - REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF); - - } - - value = E1000_MC_TBL_SIZE; - for (i = 0; i < value; i++) - REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); - - *data = 0; - return 0; -} - -static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) -{ - struct e1000_hw *hw = &adapter->hw; - u16 temp; - u16 checksum = 0; - u16 i; - - *data = 0; - /* Read and add up the contents of the EEPROM */ - for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { - if ((e1000_read_eeprom(hw, i, 1, &temp)) < 0) { - *data = 1; - break; - } - checksum += temp; - } - - /* If Checksum is not Correct return error else test passed */ - if ((checksum != (u16)EEPROM_SUM) && !(*data)) - *data = 2; - - return *data; -} - -static irqreturn_t e1000_test_intr(int irq, void *data) -{ - struct net_device *netdev = (struct net_device *)data; - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - - adapter->test_icr |= er32(ICR); - - return IRQ_HANDLED; -} - -static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) -{ - struct net_device *netdev = adapter->netdev; - u32 mask, i = 0; - bool shared_int = true; - u32 irq = adapter->pdev->irq; - struct e1000_hw *hw = &adapter->hw; - - *data = 0; - - /* NOTE: we don't test MSI interrupts here, yet */ - /* Hook up test interrupt handler just for this test */ - if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, - netdev)) - shared_int = false; - else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, - netdev->name, netdev)) { - *data = 1; - return -1; - } - e_info(hw, "testing %s interrupt\n", (shared_int ? - "shared" : "unshared")); - - /* Disable all the interrupts */ - ew32(IMC, 0xFFFFFFFF); - E1000_WRITE_FLUSH(); - msleep(10); - - /* Test each interrupt */ - for (; i < 10; i++) { - - /* Interrupt to test */ - mask = 1 << i; - - if (!shared_int) { - /* Disable the interrupt to be reported in - * the cause register and then force the same - * interrupt and see if one gets posted. If - * an interrupt was posted to the bus, the - * test failed. - */ - adapter->test_icr = 0; - ew32(IMC, mask); - ew32(ICS, mask); - E1000_WRITE_FLUSH(); - msleep(10); - - if (adapter->test_icr & mask) { - *data = 3; - break; - } - } - - /* Enable the interrupt to be reported in - * the cause register and then force the same - * interrupt and see if one gets posted. If - * an interrupt was not posted to the bus, the - * test failed. - */ - adapter->test_icr = 0; - ew32(IMS, mask); - ew32(ICS, mask); - E1000_WRITE_FLUSH(); - msleep(10); - - if (!(adapter->test_icr & mask)) { - *data = 4; - break; - } - - if (!shared_int) { - /* Disable the other interrupts to be reported in - * the cause register and then force the other - * interrupts and see if any get posted. If - * an interrupt was posted to the bus, the - * test failed. - */ - adapter->test_icr = 0; - ew32(IMC, ~mask & 0x00007FFF); - ew32(ICS, ~mask & 0x00007FFF); - E1000_WRITE_FLUSH(); - msleep(10); - - if (adapter->test_icr) { - *data = 5; - break; - } - } - } - - /* Disable all the interrupts */ - ew32(IMC, 0xFFFFFFFF); - E1000_WRITE_FLUSH(); - msleep(10); - - /* Unhook test interrupt handler */ - free_irq(irq, netdev); - - return *data; -} - -static void e1000_free_desc_rings(struct e1000_adapter *adapter) -{ - struct e1000_tx_ring *txdr = &adapter->test_tx_ring; - struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; - struct pci_dev *pdev = adapter->pdev; - int i; - - if (txdr->desc && txdr->buffer_info) { - for (i = 0; i < txdr->count; i++) { - if (txdr->buffer_info[i].dma) - dma_unmap_single(&pdev->dev, - txdr->buffer_info[i].dma, - txdr->buffer_info[i].length, - DMA_TO_DEVICE); - if (txdr->buffer_info[i].skb) - dev_kfree_skb(txdr->buffer_info[i].skb); - } - } - - if (rxdr->desc && rxdr->buffer_info) { - for (i = 0; i < rxdr->count; i++) { - if (rxdr->buffer_info[i].dma) - dma_unmap_single(&pdev->dev, - rxdr->buffer_info[i].dma, - rxdr->buffer_info[i].length, - DMA_FROM_DEVICE); - if (rxdr->buffer_info[i].skb) - dev_kfree_skb(rxdr->buffer_info[i].skb); - } - } - - if (txdr->desc) { - dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, - txdr->dma); - txdr->desc = NULL; - } - if (rxdr->desc) { - dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, - rxdr->dma); - rxdr->desc = NULL; - } - - kfree(txdr->buffer_info); - txdr->buffer_info = NULL; - kfree(rxdr->buffer_info); - rxdr->buffer_info = NULL; -} - -static int e1000_setup_desc_rings(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - struct e1000_tx_ring *txdr = &adapter->test_tx_ring; - struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; - struct pci_dev *pdev = adapter->pdev; - u32 rctl; - int i, ret_val; - - /* Setup Tx descriptor ring and Tx buffers */ - - if (!txdr->count) - txdr->count = E1000_DEFAULT_TXD; - - txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_buffer), - GFP_KERNEL); - if (!txdr->buffer_info) { - ret_val = 1; - goto err_nomem; - } - - txdr->size = txdr->count * sizeof(struct e1000_tx_desc); - txdr->size = ALIGN(txdr->size, 4096); - txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, - GFP_KERNEL); - if (!txdr->desc) { - ret_val = 2; - goto err_nomem; - } - memset(txdr->desc, 0, txdr->size); - txdr->next_to_use = txdr->next_to_clean = 0; - - ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF)); - ew32(TDBAH, ((u64)txdr->dma >> 32)); - ew32(TDLEN, txdr->count * sizeof(struct e1000_tx_desc)); - ew32(TDH, 0); - ew32(TDT, 0); - ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | - E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | - E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT); - - for (i = 0; i < txdr->count; i++) { - struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i); - struct sk_buff *skb; - unsigned int size = 1024; - - skb = alloc_skb(size, GFP_KERNEL); - if (!skb) { - ret_val = 3; - goto err_nomem; - } - skb_put(skb, size); - txdr->buffer_info[i].skb = skb; - txdr->buffer_info[i].length = skb->len; - txdr->buffer_info[i].dma = - dma_map_single(&pdev->dev, skb->data, skb->len, - DMA_TO_DEVICE); - tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma); - tx_desc->lower.data = cpu_to_le32(skb->len); - tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | - E1000_TXD_CMD_IFCS | - E1000_TXD_CMD_RPS); - tx_desc->upper.data = 0; - } - - /* Setup Rx descriptor ring and Rx buffers */ - - if (!rxdr->count) - rxdr->count = E1000_DEFAULT_RXD; - - rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer), - GFP_KERNEL); - if (!rxdr->buffer_info) { - ret_val = 4; - goto err_nomem; - } - - rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); - rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, - GFP_KERNEL); - if (!rxdr->desc) { - ret_val = 5; - goto err_nomem; - } - memset(rxdr->desc, 0, rxdr->size); - rxdr->next_to_use = rxdr->next_to_clean = 0; - - rctl = er32(RCTL); - ew32(RCTL, rctl & ~E1000_RCTL_EN); - ew32(RDBAL, ((u64)rxdr->dma & 0xFFFFFFFF)); - ew32(RDBAH, ((u64)rxdr->dma >> 32)); - ew32(RDLEN, rxdr->size); - ew32(RDH, 0); - ew32(RDT, 0); - rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | - E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | - (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); - ew32(RCTL, rctl); - - for (i = 0; i < rxdr->count; i++) { - struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); - struct sk_buff *skb; - - skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL); - if (!skb) { - ret_val = 6; - goto err_nomem; - } - skb_reserve(skb, NET_IP_ALIGN); - rxdr->buffer_info[i].skb = skb; - rxdr->buffer_info[i].length = E1000_RXBUFFER_2048; - rxdr->buffer_info[i].dma = - dma_map_single(&pdev->dev, skb->data, - E1000_RXBUFFER_2048, DMA_FROM_DEVICE); - rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma); - memset(skb->data, 0x00, skb->len); - } - - return 0; - -err_nomem: - e1000_free_desc_rings(adapter); - return ret_val; -} - -static void e1000_phy_disable_receiver(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - - /* Write out to PHY registers 29 and 30 to disable the Receiver. */ - e1000_write_phy_reg(hw, 29, 0x001F); - e1000_write_phy_reg(hw, 30, 0x8FFC); - e1000_write_phy_reg(hw, 29, 0x001A); - e1000_write_phy_reg(hw, 30, 0x8FF0); -} - -static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u16 phy_reg; - - /* Because we reset the PHY above, we need to re-force TX_CLK in the - * Extended PHY Specific Control Register to 25MHz clock. This - * value defaults back to a 2.5MHz clock when the PHY is reset. - */ - e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); - phy_reg |= M88E1000_EPSCR_TX_CLK_25; - e1000_write_phy_reg(hw, - M88E1000_EXT_PHY_SPEC_CTRL, phy_reg); - - /* In addition, because of the s/w reset above, we need to enable - * CRS on TX. This must be set for both full and half duplex - * operation. - */ - e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); - phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX; - e1000_write_phy_reg(hw, - M88E1000_PHY_SPEC_CTRL, phy_reg); -} - -static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 ctrl_reg; - u16 phy_reg; - - /* Setup the Device Control Register for PHY loopback test. */ - - ctrl_reg = er32(CTRL); - ctrl_reg |= (E1000_CTRL_ILOS | /* Invert Loss-Of-Signal */ - E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ - E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ - E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */ - E1000_CTRL_FD); /* Force Duplex to FULL */ - - ew32(CTRL, ctrl_reg); - - /* Read the PHY Specific Control Register (0x10) */ - e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); - - /* Clear Auto-Crossover bits in PHY Specific Control Register - * (bits 6:5). - */ - phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE; - e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg); - - /* Perform software reset on the PHY */ - e1000_phy_reset(hw); - - /* Have to setup TX_CLK and TX_CRS after software reset */ - e1000_phy_reset_clk_and_crs(adapter); - - e1000_write_phy_reg(hw, PHY_CTRL, 0x8100); - - /* Wait for reset to complete. */ - udelay(500); - - /* Have to setup TX_CLK and TX_CRS after software reset */ - e1000_phy_reset_clk_and_crs(adapter); - - /* Write out to PHY registers 29 and 30 to disable the Receiver. */ - e1000_phy_disable_receiver(adapter); - - /* Set the loopback bit in the PHY control register. */ - e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); - phy_reg |= MII_CR_LOOPBACK; - e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); - - /* Setup TX_CLK and TX_CRS one more time. */ - e1000_phy_reset_clk_and_crs(adapter); - - /* Check Phy Configuration */ - e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); - if (phy_reg != 0x4100) - return 9; - - e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); - if (phy_reg != 0x0070) - return 10; - - e1000_read_phy_reg(hw, 29, &phy_reg); - if (phy_reg != 0x001A) - return 11; - - return 0; -} - -static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 ctrl_reg = 0; - u32 stat_reg = 0; - - hw->autoneg = false; - - if (hw->phy_type == e1000_phy_m88) { - /* Auto-MDI/MDIX Off */ - e1000_write_phy_reg(hw, - M88E1000_PHY_SPEC_CTRL, 0x0808); - /* reset to update Auto-MDI/MDIX */ - e1000_write_phy_reg(hw, PHY_CTRL, 0x9140); - /* autoneg off */ - e1000_write_phy_reg(hw, PHY_CTRL, 0x8140); - } - - ctrl_reg = er32(CTRL); - - /* force 1000, set loopback */ - e1000_write_phy_reg(hw, PHY_CTRL, 0x4140); - - /* Now set up the MAC to the same speed/duplex as the PHY. */ - ctrl_reg = er32(CTRL); - ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ - ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ - E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ - E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ - E1000_CTRL_FD); /* Force Duplex to FULL */ - - if (hw->media_type == e1000_media_type_copper && - hw->phy_type == e1000_phy_m88) - ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ - else { - /* Set the ILOS bit on the fiber Nic is half - * duplex link is detected. */ - stat_reg = er32(STATUS); - if ((stat_reg & E1000_STATUS_FD) == 0) - ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); - } - - ew32(CTRL, ctrl_reg); - - /* Disable the receiver on the PHY so when a cable is plugged in, the - * PHY does not begin to autoneg when a cable is reconnected to the NIC. - */ - if (hw->phy_type == e1000_phy_m88) - e1000_phy_disable_receiver(adapter); - - udelay(500); - - return 0; -} - -static int e1000_set_phy_loopback(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u16 phy_reg = 0; - u16 count = 0; - - switch (hw->mac_type) { - case e1000_82543: - if (hw->media_type == e1000_media_type_copper) { - /* Attempt to setup Loopback mode on Non-integrated PHY. - * Some PHY registers get corrupted at random, so - * attempt this 10 times. - */ - while (e1000_nonintegrated_phy_loopback(adapter) && - count++ < 10); - if (count < 11) - return 0; - } - break; - - case e1000_82544: - case e1000_82540: - case e1000_82545: - case e1000_82545_rev_3: - case e1000_82546: - case e1000_82546_rev_3: - case e1000_82541: - case e1000_82541_rev_2: - case e1000_82547: - case e1000_82547_rev_2: - return e1000_integrated_phy_loopback(adapter); - break; - default: - /* Default PHY loopback work is to read the MII - * control register and assert bit 14 (loopback mode). - */ - e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); - phy_reg |= MII_CR_LOOPBACK; - e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); - return 0; - break; - } - - return 8; -} - -static int e1000_setup_loopback_test(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 rctl; - - if (hw->media_type == e1000_media_type_fiber || - hw->media_type == e1000_media_type_internal_serdes) { - switch (hw->mac_type) { - case e1000_82545: - case e1000_82546: - case e1000_82545_rev_3: - case e1000_82546_rev_3: - return e1000_set_phy_loopback(adapter); - break; - default: - rctl = er32(RCTL); - rctl |= E1000_RCTL_LBM_TCVR; - ew32(RCTL, rctl); - return 0; - } - } else if (hw->media_type == e1000_media_type_copper) - return e1000_set_phy_loopback(adapter); - - return 7; -} - -static void e1000_loopback_cleanup(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 rctl; - u16 phy_reg; - - rctl = er32(RCTL); - rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); - ew32(RCTL, rctl); - - switch (hw->mac_type) { - case e1000_82545: - case e1000_82546: - case e1000_82545_rev_3: - case e1000_82546_rev_3: - default: - hw->autoneg = true; - e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); - if (phy_reg & MII_CR_LOOPBACK) { - phy_reg &= ~MII_CR_LOOPBACK; - e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); - e1000_phy_reset(hw); - } - break; - } -} - -static void e1000_create_lbtest_frame(struct sk_buff *skb, - unsigned int frame_size) -{ - memset(skb->data, 0xFF, frame_size); - frame_size &= ~1; - memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); - memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); - memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); -} - -static int e1000_check_lbtest_frame(struct sk_buff *skb, - unsigned int frame_size) -{ - frame_size &= ~1; - if (*(skb->data + 3) == 0xFF) { - if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && - (*(skb->data + frame_size / 2 + 12) == 0xAF)) { - return 0; - } - } - return 13; -} - -static int e1000_run_loopback_test(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - struct e1000_tx_ring *txdr = &adapter->test_tx_ring; - struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; - struct pci_dev *pdev = adapter->pdev; - int i, j, k, l, lc, good_cnt, ret_val=0; - unsigned long time; - - ew32(RDT, rxdr->count - 1); - - /* Calculate the loop count based on the largest descriptor ring - * The idea is to wrap the largest ring a number of times using 64 - * send/receive pairs during each loop - */ - - if (rxdr->count <= txdr->count) - lc = ((txdr->count / 64) * 2) + 1; - else - lc = ((rxdr->count / 64) * 2) + 1; - - k = l = 0; - for (j = 0; j <= lc; j++) { /* loop count loop */ - for (i = 0; i < 64; i++) { /* send the packets */ - e1000_create_lbtest_frame(txdr->buffer_info[i].skb, - 1024); - dma_sync_single_for_device(&pdev->dev, - txdr->buffer_info[k].dma, - txdr->buffer_info[k].length, - DMA_TO_DEVICE); - if (unlikely(++k == txdr->count)) k = 0; - } - ew32(TDT, k); - E1000_WRITE_FLUSH(); - msleep(200); - time = jiffies; /* set the start time for the receive */ - good_cnt = 0; - do { /* receive the sent packets */ - dma_sync_single_for_cpu(&pdev->dev, - rxdr->buffer_info[l].dma, - rxdr->buffer_info[l].length, - DMA_FROM_DEVICE); - - ret_val = e1000_check_lbtest_frame( - rxdr->buffer_info[l].skb, - 1024); - if (!ret_val) - good_cnt++; - if (unlikely(++l == rxdr->count)) l = 0; - /* time + 20 msecs (200 msecs on 2.4) is more than - * enough time to complete the receives, if it's - * exceeded, break and error off - */ - } while (good_cnt < 64 && jiffies < (time + 20)); - if (good_cnt != 64) { - ret_val = 13; /* ret_val is the same as mis-compare */ - break; - } - if (jiffies >= (time + 2)) { - ret_val = 14; /* error code for time out error */ - break; - } - } /* end loop count loop */ - return ret_val; -} - -static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) -{ - *data = e1000_setup_desc_rings(adapter); - if (*data) - goto out; - *data = e1000_setup_loopback_test(adapter); - if (*data) - goto err_loopback; - *data = e1000_run_loopback_test(adapter); - e1000_loopback_cleanup(adapter); - -err_loopback: - e1000_free_desc_rings(adapter); -out: - return *data; -} - -static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) -{ - struct e1000_hw *hw = &adapter->hw; - *data = 0; - if (hw->media_type == e1000_media_type_internal_serdes) { - int i = 0; - hw->serdes_has_link = false; - - /* On some blade server designs, link establishment - * could take as long as 2-3 minutes */ - do { - e1000_check_for_link(hw); - if (hw->serdes_has_link) - return *data; - msleep(20); - } while (i++ < 3750); - - *data = 1; - } else { - e1000_check_for_link(hw); - if (hw->autoneg) /* if auto_neg is set wait for it */ - msleep(4000); - - if (!(er32(STATUS) & E1000_STATUS_LU)) { - *data = 1; - } - } - return *data; -} - -static int e1000_get_sset_count(struct net_device *netdev, int sset) -{ - switch (sset) { - case ETH_SS_TEST: - return E1000_TEST_LEN; - case ETH_SS_STATS: - return E1000_STATS_LEN; - default: - return -EOPNOTSUPP; - } -} - -static void e1000_diag_test(struct net_device *netdev, - struct ethtool_test *eth_test, u64 *data) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - bool if_running = netif_running(netdev); - - set_bit(__E1000_TESTING, &adapter->flags); - if (eth_test->flags == ETH_TEST_FL_OFFLINE) { - /* Offline tests */ - - /* save speed, duplex, autoneg settings */ - u16 autoneg_advertised = hw->autoneg_advertised; - u8 forced_speed_duplex = hw->forced_speed_duplex; - u8 autoneg = hw->autoneg; - - e_info(hw, "offline testing starting\n"); - - /* Link test performed before hardware reset so autoneg doesn't - * interfere with test result */ - if (e1000_link_test(adapter, &data[4])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - if (if_running) - /* indicate we're in test mode */ - dev_close(netdev); - else - e1000_reset(adapter); - - if (e1000_reg_test(adapter, &data[0])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - e1000_reset(adapter); - if (e1000_eeprom_test(adapter, &data[1])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - e1000_reset(adapter); - if (e1000_intr_test(adapter, &data[2])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - e1000_reset(adapter); - /* make sure the phy is powered up */ - e1000_power_up_phy(adapter); - if (e1000_loopback_test(adapter, &data[3])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - /* restore speed, duplex, autoneg settings */ - hw->autoneg_advertised = autoneg_advertised; - hw->forced_speed_duplex = forced_speed_duplex; - hw->autoneg = autoneg; - - e1000_reset(adapter); - clear_bit(__E1000_TESTING, &adapter->flags); - if (if_running) - dev_open(netdev); - } else { - e_info(hw, "online testing starting\n"); - /* Online tests */ - if (e1000_link_test(adapter, &data[4])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - /* Online tests aren't run; pass by default */ - data[0] = 0; - data[1] = 0; - data[2] = 0; - data[3] = 0; - - clear_bit(__E1000_TESTING, &adapter->flags); - } - msleep_interruptible(4 * 1000); -} - -static int e1000_wol_exclusion(struct e1000_adapter *adapter, - struct ethtool_wolinfo *wol) -{ - struct e1000_hw *hw = &adapter->hw; - int retval = 1; /* fail by default */ - - switch (hw->device_id) { - case E1000_DEV_ID_82542: - case E1000_DEV_ID_82543GC_FIBER: - case E1000_DEV_ID_82543GC_COPPER: - case E1000_DEV_ID_82544EI_FIBER: - case E1000_DEV_ID_82546EB_QUAD_COPPER: - case E1000_DEV_ID_82545EM_FIBER: - case E1000_DEV_ID_82545EM_COPPER: - case E1000_DEV_ID_82546GB_QUAD_COPPER: - case E1000_DEV_ID_82546GB_PCIE: - /* these don't support WoL at all */ - wol->supported = 0; - break; - case E1000_DEV_ID_82546EB_FIBER: - case E1000_DEV_ID_82546GB_FIBER: - /* Wake events not supported on port B */ - if (er32(STATUS) & E1000_STATUS_FUNC_1) { - wol->supported = 0; - break; - } - /* return success for non excluded adapter ports */ - retval = 0; - break; - case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: - /* quad port adapters only support WoL on port A */ - if (!adapter->quad_port_a) { - wol->supported = 0; - break; - } - /* return success for non excluded adapter ports */ - retval = 0; - break; - default: - /* dual port cards only support WoL on port A from now on - * unless it was enabled in the eeprom for port B - * so exclude FUNC_1 ports from having WoL enabled */ - if (er32(STATUS) & E1000_STATUS_FUNC_1 && - !adapter->eeprom_wol) { - wol->supported = 0; - break; - } - - retval = 0; - } - - return retval; -} - -static void e1000_get_wol(struct net_device *netdev, - struct ethtool_wolinfo *wol) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - - wol->supported = WAKE_UCAST | WAKE_MCAST | - WAKE_BCAST | WAKE_MAGIC; - wol->wolopts = 0; - - /* this function will set ->supported = 0 and return 1 if wol is not - * supported by this hardware */ - if (e1000_wol_exclusion(adapter, wol) || - !device_can_wakeup(&adapter->pdev->dev)) - return; - - /* apply any specific unsupported masks here */ - switch (hw->device_id) { - case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: - /* KSP3 does not suppport UCAST wake-ups */ - wol->supported &= ~WAKE_UCAST; - - if (adapter->wol & E1000_WUFC_EX) - e_err(drv, "Interface does not support directed " - "(unicast) frame wake-up packets\n"); - break; - default: - break; - } - - if (adapter->wol & E1000_WUFC_EX) - wol->wolopts |= WAKE_UCAST; - if (adapter->wol & E1000_WUFC_MC) - wol->wolopts |= WAKE_MCAST; - if (adapter->wol & E1000_WUFC_BC) - wol->wolopts |= WAKE_BCAST; - if (adapter->wol & E1000_WUFC_MAG) - wol->wolopts |= WAKE_MAGIC; -} - -static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - - if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) - return -EOPNOTSUPP; - - if (e1000_wol_exclusion(adapter, wol) || - !device_can_wakeup(&adapter->pdev->dev)) - return wol->wolopts ? -EOPNOTSUPP : 0; - - switch (hw->device_id) { - case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: - if (wol->wolopts & WAKE_UCAST) { - e_err(drv, "Interface does not support directed " - "(unicast) frame wake-up packets\n"); - return -EOPNOTSUPP; - } - break; - default: - break; - } - - /* these settings will always override what we currently have */ - adapter->wol = 0; - - if (wol->wolopts & WAKE_UCAST) - adapter->wol |= E1000_WUFC_EX; - if (wol->wolopts & WAKE_MCAST) - adapter->wol |= E1000_WUFC_MC; - if (wol->wolopts & WAKE_BCAST) - adapter->wol |= E1000_WUFC_BC; - if (wol->wolopts & WAKE_MAGIC) - adapter->wol |= E1000_WUFC_MAG; - - device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); - - return 0; -} - -static int e1000_set_phys_id(struct net_device *netdev, - enum ethtool_phys_id_state state) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - - switch (state) { - case ETHTOOL_ID_ACTIVE: - e1000_setup_led(hw); - return 2; - - case ETHTOOL_ID_ON: - e1000_led_on(hw); - break; - - case ETHTOOL_ID_OFF: - e1000_led_off(hw); - break; - - case ETHTOOL_ID_INACTIVE: - e1000_cleanup_led(hw); - } - - return 0; -} - -static int e1000_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - - if (adapter->hw.mac_type < e1000_82545) - return -EOPNOTSUPP; - - if (adapter->itr_setting <= 4) - ec->rx_coalesce_usecs = adapter->itr_setting; - else - ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting; - - return 0; -} - -static int e1000_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - - if (hw->mac_type < e1000_82545) - return -EOPNOTSUPP; - - if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) || - ((ec->rx_coalesce_usecs > 4) && - (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) || - (ec->rx_coalesce_usecs == 2)) - return -EINVAL; - - if (ec->rx_coalesce_usecs == 4) { - adapter->itr = adapter->itr_setting = 4; - } else if (ec->rx_coalesce_usecs <= 3) { - adapter->itr = 20000; - adapter->itr_setting = ec->rx_coalesce_usecs; - } else { - adapter->itr = (1000000 / ec->rx_coalesce_usecs); - adapter->itr_setting = adapter->itr & ~3; - } - - if (adapter->itr_setting != 0) - ew32(ITR, 1000000000 / (adapter->itr * 256)); - else - ew32(ITR, 0); - - return 0; -} - -static int e1000_nway_reset(struct net_device *netdev) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - if (netif_running(netdev)) - e1000_reinit_locked(adapter); - return 0; -} - -static void e1000_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, u64 *data) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - int i; - char *p = NULL; - - e1000_update_stats(adapter); - for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { - switch (e1000_gstrings_stats[i].type) { - case NETDEV_STATS: - p = (char *) netdev + - e1000_gstrings_stats[i].stat_offset; - break; - case E1000_STATS: - p = (char *) adapter + - e1000_gstrings_stats[i].stat_offset; - break; - } - - data[i] = (e1000_gstrings_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } -/* BUG_ON(i != E1000_STATS_LEN); */ -} - -static void e1000_get_strings(struct net_device *netdev, u32 stringset, - u8 *data) -{ - u8 *p = data; - int i; - - switch (stringset) { - case ETH_SS_TEST: - memcpy(data, *e1000_gstrings_test, - sizeof(e1000_gstrings_test)); - break; - case ETH_SS_STATS: - for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { - memcpy(p, e1000_gstrings_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } -/* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */ - break; - } -} - -static const struct ethtool_ops e1000_ethtool_ops = { - .get_settings = e1000_get_settings, - .set_settings = e1000_set_settings, - .get_drvinfo = e1000_get_drvinfo, - .get_regs_len = e1000_get_regs_len, - .get_regs = e1000_get_regs, - .get_wol = e1000_get_wol, - .set_wol = e1000_set_wol, - .get_msglevel = e1000_get_msglevel, - .set_msglevel = e1000_set_msglevel, - .nway_reset = e1000_nway_reset, - .get_link = e1000_get_link, - .get_eeprom_len = e1000_get_eeprom_len, - .get_eeprom = e1000_get_eeprom, - .set_eeprom = e1000_set_eeprom, - .get_ringparam = e1000_get_ringparam, - .set_ringparam = e1000_set_ringparam, - .get_pauseparam = e1000_get_pauseparam, - .set_pauseparam = e1000_set_pauseparam, - .self_test = e1000_diag_test, - .get_strings = e1000_get_strings, - .set_phys_id = e1000_set_phys_id, - .get_ethtool_stats = e1000_get_ethtool_stats, - .get_sset_count = e1000_get_sset_count, - .get_coalesce = e1000_get_coalesce, - .set_coalesce = e1000_set_coalesce, -}; - -void e1000_set_ethtool_ops(struct net_device *netdev) -{ - SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops); -} diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c deleted file mode 100644 index 8545c7a..0000000 --- a/drivers/net/e1000/e1000_hw.c +++ /dev/null @@ -1,5824 +0,0 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - - */ - -/* e1000_hw.c - * Shared functions for accessing and configuring the MAC - */ - -#include "e1000.h" - -static s32 e1000_check_downshift(struct e1000_hw *hw); -static s32 e1000_check_polarity(struct e1000_hw *hw, - e1000_rev_polarity *polarity); -static void e1000_clear_hw_cntrs(struct e1000_hw *hw); -static void e1000_clear_vfta(struct e1000_hw *hw); -static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, - bool link_up); -static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw); -static s32 e1000_detect_gig_phy(struct e1000_hw *hw); -static s32 e1000_get_auto_rd_done(struct e1000_hw *hw); -static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, - u16 *max_length); -static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); -static s32 e1000_id_led_init(struct e1000_hw *hw); -static void e1000_init_rx_addrs(struct e1000_hw *hw); -static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, - struct e1000_phy_info *phy_info); -static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, - struct e1000_phy_info *phy_info); -static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); -static s32 e1000_wait_autoneg(struct e1000_hw *hw); -static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value); -static s32 e1000_set_phy_type(struct e1000_hw *hw); -static void e1000_phy_init_script(struct e1000_hw *hw); -static s32 e1000_setup_copper_link(struct e1000_hw *hw); -static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw); -static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw); -static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); -static s32 e1000_config_mac_to_phy(struct e1000_hw *hw); -static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl); -static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl); -static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count); -static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw); -static s32 e1000_phy_reset_dsp(struct e1000_hw *hw); -static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, - u16 words, u16 *data); -static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, - u16 words, u16 *data); -static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw); -static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd); -static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd); -static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count); -static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, - u16 phy_data); -static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, - u16 *phy_data); -static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count); -static s32 e1000_acquire_eeprom(struct e1000_hw *hw); -static void e1000_release_eeprom(struct e1000_hw *hw); -static void e1000_standby_eeprom(struct e1000_hw *hw); -static s32 e1000_set_vco_speed(struct e1000_hw *hw); -static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw); -static s32 e1000_set_phy_mode(struct e1000_hw *hw); -static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, - u16 *data); -static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, - u16 *data); - -/* IGP cable length table */ -static const -u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = { - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25, - 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40, - 40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60, - 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90, - 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100, - 100, - 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, - 110, 110, - 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, - 120, 120 -}; - -static DEFINE_SPINLOCK(e1000_eeprom_lock); - -/** - * e1000_set_phy_type - Set the phy type member in the hw struct. - * @hw: Struct containing variables accessed by shared code - */ -static s32 e1000_set_phy_type(struct e1000_hw *hw) -{ - e_dbg("e1000_set_phy_type"); - - if (hw->mac_type == e1000_undefined) - return -E1000_ERR_PHY_TYPE; - - switch (hw->phy_id) { - case M88E1000_E_PHY_ID: - case M88E1000_I_PHY_ID: - case M88E1011_I_PHY_ID: - case M88E1111_I_PHY_ID: - case M88E1118_E_PHY_ID: - hw->phy_type = e1000_phy_m88; - break; - case IGP01E1000_I_PHY_ID: - if (hw->mac_type == e1000_82541 || - hw->mac_type == e1000_82541_rev_2 || - hw->mac_type == e1000_82547 || - hw->mac_type == e1000_82547_rev_2) - hw->phy_type = e1000_phy_igp; - break; - case RTL8211B_PHY_ID: - hw->phy_type = e1000_phy_8211; - break; - case RTL8201N_PHY_ID: - hw->phy_type = e1000_phy_8201; - break; - default: - /* Should never have loaded on this device */ - hw->phy_type = e1000_phy_undefined; - return -E1000_ERR_PHY_TYPE; - } - - return E1000_SUCCESS; -} - -/** - * e1000_phy_init_script - IGP phy init script - initializes the GbE PHY - * @hw: Struct containing variables accessed by shared code - */ -static void e1000_phy_init_script(struct e1000_hw *hw) -{ - u32 ret_val; - u16 phy_saved_data; - - e_dbg("e1000_phy_init_script"); - - if (hw->phy_init_script) { - msleep(20); - - /* Save off the current value of register 0x2F5B to be restored at - * the end of this routine. */ - ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); - - /* Disabled the PHY transmitter */ - e1000_write_phy_reg(hw, 0x2F5B, 0x0003); - msleep(20); - - e1000_write_phy_reg(hw, 0x0000, 0x0140); - msleep(5); - - switch (hw->mac_type) { - case e1000_82541: - case e1000_82547: - e1000_write_phy_reg(hw, 0x1F95, 0x0001); - e1000_write_phy_reg(hw, 0x1F71, 0xBD21); - e1000_write_phy_reg(hw, 0x1F79, 0x0018); - e1000_write_phy_reg(hw, 0x1F30, 0x1600); - e1000_write_phy_reg(hw, 0x1F31, 0x0014); - e1000_write_phy_reg(hw, 0x1F32, 0x161C); - e1000_write_phy_reg(hw, 0x1F94, 0x0003); - e1000_write_phy_reg(hw, 0x1F96, 0x003F); - e1000_write_phy_reg(hw, 0x2010, 0x0008); - break; - - case e1000_82541_rev_2: - case e1000_82547_rev_2: - e1000_write_phy_reg(hw, 0x1F73, 0x0099); - break; - default: - break; - } - - e1000_write_phy_reg(hw, 0x0000, 0x3300); - msleep(20); - - /* Now enable the transmitter */ - e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); - - if (hw->mac_type == e1000_82547) { - u16 fused, fine, coarse; - - /* Move to analog registers page */ - e1000_read_phy_reg(hw, - IGP01E1000_ANALOG_SPARE_FUSE_STATUS, - &fused); - - if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) { - e1000_read_phy_reg(hw, - IGP01E1000_ANALOG_FUSE_STATUS, - &fused); - - fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK; - coarse = - fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK; - - if (coarse > - IGP01E1000_ANALOG_FUSE_COARSE_THRESH) { - coarse -= - IGP01E1000_ANALOG_FUSE_COARSE_10; - fine -= IGP01E1000_ANALOG_FUSE_FINE_1; - } else if (coarse == - IGP01E1000_ANALOG_FUSE_COARSE_THRESH) - fine -= IGP01E1000_ANALOG_FUSE_FINE_10; - - fused = - (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) | - (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) | - (coarse & - IGP01E1000_ANALOG_FUSE_COARSE_MASK); - - e1000_write_phy_reg(hw, - IGP01E1000_ANALOG_FUSE_CONTROL, - fused); - e1000_write_phy_reg(hw, - IGP01E1000_ANALOG_FUSE_BYPASS, - IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL); - } - } - } -} - -/** - * e1000_set_mac_type - Set the mac type member in the hw struct. - * @hw: Struct containing variables accessed by shared code - */ -s32 e1000_set_mac_type(struct e1000_hw *hw) -{ - e_dbg("e1000_set_mac_type"); - - switch (hw->device_id) { - case E1000_DEV_ID_82542: - switch (hw->revision_id) { - case E1000_82542_2_0_REV_ID: - hw->mac_type = e1000_82542_rev2_0; - break; - case E1000_82542_2_1_REV_ID: - hw->mac_type = e1000_82542_rev2_1; - break; - default: - /* Invalid 82542 revision ID */ - return -E1000_ERR_MAC_TYPE; - } - break; - case E1000_DEV_ID_82543GC_FIBER: - case E1000_DEV_ID_82543GC_COPPER: - hw->mac_type = e1000_82543; - break; - case E1000_DEV_ID_82544EI_COPPER: - case E1000_DEV_ID_82544EI_FIBER: - case E1000_DEV_ID_82544GC_COPPER: - case E1000_DEV_ID_82544GC_LOM: - hw->mac_type = e1000_82544; - break; - case E1000_DEV_ID_82540EM: - case E1000_DEV_ID_82540EM_LOM: - case E1000_DEV_ID_82540EP: - case E1000_DEV_ID_82540EP_LOM: - case E1000_DEV_ID_82540EP_LP: - hw->mac_type = e1000_82540; - break; - case E1000_DEV_ID_82545EM_COPPER: - case E1000_DEV_ID_82545EM_FIBER: - hw->mac_type = e1000_82545; - break; - case E1000_DEV_ID_82545GM_COPPER: - case E1000_DEV_ID_82545GM_FIBER: - case E1000_DEV_ID_82545GM_SERDES: - hw->mac_type = e1000_82545_rev_3; - break; - case E1000_DEV_ID_82546EB_COPPER: - case E1000_DEV_ID_82546EB_FIBER: - case E1000_DEV_ID_82546EB_QUAD_COPPER: - hw->mac_type = e1000_82546; - break; - case E1000_DEV_ID_82546GB_COPPER: - case E1000_DEV_ID_82546GB_FIBER: - case E1000_DEV_ID_82546GB_SERDES: - case E1000_DEV_ID_82546GB_PCIE: - case E1000_DEV_ID_82546GB_QUAD_COPPER: - case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: - hw->mac_type = e1000_82546_rev_3; - break; - case E1000_DEV_ID_82541EI: - case E1000_DEV_ID_82541EI_MOBILE: - case E1000_DEV_ID_82541ER_LOM: - hw->mac_type = e1000_82541; - break; - case E1000_DEV_ID_82541ER: - case E1000_DEV_ID_82541GI: - case E1000_DEV_ID_82541GI_LF: - case E1000_DEV_ID_82541GI_MOBILE: - hw->mac_type = e1000_82541_rev_2; - break; - case E1000_DEV_ID_82547EI: - case E1000_DEV_ID_82547EI_MOBILE: - hw->mac_type = e1000_82547; - break; - case E1000_DEV_ID_82547GI: - hw->mac_type = e1000_82547_rev_2; - break; - case E1000_DEV_ID_INTEL_CE4100_GBE: - hw->mac_type = e1000_ce4100; - break; - default: - /* Should never have loaded on this device */ - return -E1000_ERR_MAC_TYPE; - } - - switch (hw->mac_type) { - case e1000_82541: - case e1000_82547: - case e1000_82541_rev_2: - case e1000_82547_rev_2: - hw->asf_firmware_present = true; - break; - default: - break; - } - - /* The 82543 chip does not count tx_carrier_errors properly in - * FD mode - */ - if (hw->mac_type == e1000_82543) - hw->bad_tx_carr_stats_fd = true; - - if (hw->mac_type > e1000_82544) - hw->has_smbus = true; - - return E1000_SUCCESS; -} - -/** - * e1000_set_media_type - Set media type and TBI compatibility. - * @hw: Struct containing variables accessed by shared code - */ -void e1000_set_media_type(struct e1000_hw *hw) -{ - u32 status; - - e_dbg("e1000_set_media_type"); - - if (hw->mac_type != e1000_82543) { - /* tbi_compatibility is only valid on 82543 */ - hw->tbi_compatibility_en = false; - } - - switch (hw->device_id) { - case E1000_DEV_ID_82545GM_SERDES: - case E1000_DEV_ID_82546GB_SERDES: - hw->media_type = e1000_media_type_internal_serdes; - break; - default: - switch (hw->mac_type) { - case e1000_82542_rev2_0: - case e1000_82542_rev2_1: - hw->media_type = e1000_media_type_fiber; - break; - case e1000_ce4100: - hw->media_type = e1000_media_type_copper; - break; - default: - status = er32(STATUS); - if (status & E1000_STATUS_TBIMODE) { - hw->media_type = e1000_media_type_fiber; - /* tbi_compatibility not valid on fiber */ - hw->tbi_compatibility_en = false; - } else { - hw->media_type = e1000_media_type_copper; - } - break; - } - } -} - -/** - * e1000_reset_hw: reset the hardware completely - * @hw: Struct containing variables accessed by shared code - * - * Reset the transmit and receive units; mask and clear all interrupts. - */ -s32 e1000_reset_hw(struct e1000_hw *hw) -{ - u32 ctrl; - u32 ctrl_ext; - u32 icr; - u32 manc; - u32 led_ctrl; - s32 ret_val; - - e_dbg("e1000_reset_hw"); - - /* For 82542 (rev 2.0), disable MWI before issuing a device reset */ - if (hw->mac_type == e1000_82542_rev2_0) { - e_dbg("Disabling MWI on 82542 rev 2.0\n"); - e1000_pci_clear_mwi(hw); - } - - /* Clear interrupt mask to stop board from generating interrupts */ - e_dbg("Masking off all interrupts\n"); - ew32(IMC, 0xffffffff); - - /* Disable the Transmit and Receive units. Then delay to allow - * any pending transactions to complete before we hit the MAC with - * the global reset. - */ - ew32(RCTL, 0); - ew32(TCTL, E1000_TCTL_PSP); - E1000_WRITE_FLUSH(); - - /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */ - hw->tbi_compatibility_on = false; - - /* Delay to allow any outstanding PCI transactions to complete before - * resetting the device - */ - msleep(10); - - ctrl = er32(CTRL); - - /* Must reset the PHY before resetting the MAC */ - if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { - ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST)); - E1000_WRITE_FLUSH(); - msleep(5); - } - - /* Issue a global reset to the MAC. This will reset the chip's - * transmit, receive, DMA, and link units. It will not effect - * the current PCI configuration. The global reset bit is self- - * clearing, and should clear within a microsecond. - */ - e_dbg("Issuing a global reset to MAC\n"); - - switch (hw->mac_type) { - case e1000_82544: - case e1000_82540: - case e1000_82545: - case e1000_82546: - case e1000_82541: - case e1000_82541_rev_2: - /* These controllers can't ack the 64-bit write when issuing the - * reset, so use IO-mapping as a workaround to issue the reset */ - E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST)); - break; - case e1000_82545_rev_3: - case e1000_82546_rev_3: - /* Reset is performed on a shadow of the control register */ - ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST)); - break; - case e1000_ce4100: - default: - ew32(CTRL, (ctrl | E1000_CTRL_RST)); - break; - } - - /* After MAC reset, force reload of EEPROM to restore power-on settings to - * device. Later controllers reload the EEPROM automatically, so just wait - * for reload to complete. - */ - switch (hw->mac_type) { - case e1000_82542_rev2_0: - case e1000_82542_rev2_1: - case e1000_82543: - case e1000_82544: - /* Wait for reset to complete */ - udelay(10); - ctrl_ext = er32(CTRL_EXT); - ctrl_ext |= E1000_CTRL_EXT_EE_RST; - ew32(CTRL_EXT, ctrl_ext); - E1000_WRITE_FLUSH(); - /* Wait for EEPROM reload */ - msleep(2); - break; - case e1000_82541: - case e1000_82541_rev_2: - case e1000_82547: - case e1000_82547_rev_2: - /* Wait for EEPROM reload */ - msleep(20); - break; - default: - /* Auto read done will delay 5ms or poll based on mac type */ - ret_val = e1000_get_auto_rd_done(hw); - if (ret_val) - return ret_val; - break; - } - - /* Disable HW ARPs on ASF enabled adapters */ - if (hw->mac_type >= e1000_82540) { - manc = er32(MANC); - manc &= ~(E1000_MANC_ARP_EN); - ew32(MANC, manc); - } - - if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { - e1000_phy_init_script(hw); - - /* Configure activity LED after PHY reset */ - led_ctrl = er32(LEDCTL); - led_ctrl &= IGP_ACTIVITY_LED_MASK; - led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); - ew32(LEDCTL, led_ctrl); - } - - /* Clear interrupt mask to stop board from generating interrupts */ - e_dbg("Masking off all interrupts\n"); - ew32(IMC, 0xffffffff); - - /* Clear any pending interrupt events. */ - icr = er32(ICR); - - /* If MWI was previously enabled, reenable it. */ - if (hw->mac_type == e1000_82542_rev2_0) { - if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) - e1000_pci_set_mwi(hw); - } - - return E1000_SUCCESS; -} - -/** - * e1000_init_hw: Performs basic configuration of the adapter. - * @hw: Struct containing variables accessed by shared code - * - * Assumes that the controller has previously been reset and is in a - * post-reset uninitialized state. Initializes the receive address registers, - * multicast table, and VLAN filter table. Calls routines to setup link - * configuration and flow control settings. Clears all on-chip counters. Leaves - * the transmit and receive units disabled and uninitialized. - */ -s32 e1000_init_hw(struct e1000_hw *hw) -{ - u32 ctrl; - u32 i; - s32 ret_val; - u32 mta_size; - u32 ctrl_ext; - - e_dbg("e1000_init_hw"); - - /* Initialize Identification LED */ - ret_val = e1000_id_led_init(hw); - if (ret_val) { - e_dbg("Error Initializing Identification LED\n"); - return ret_val; - } - - /* Set the media type and TBI compatibility */ - e1000_set_media_type(hw); - - /* Disabling VLAN filtering. */ - e_dbg("Initializing the IEEE VLAN\n"); - if (hw->mac_type < e1000_82545_rev_3) - ew32(VET, 0); - e1000_clear_vfta(hw); - - /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ - if (hw->mac_type == e1000_82542_rev2_0) { - e_dbg("Disabling MWI on 82542 rev 2.0\n"); - e1000_pci_clear_mwi(hw); - ew32(RCTL, E1000_RCTL_RST); - E1000_WRITE_FLUSH(); - msleep(5); - } - - /* Setup the receive address. This involves initializing all of the Receive - * Address Registers (RARs 0 - 15). - */ - e1000_init_rx_addrs(hw); - - /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */ - if (hw->mac_type == e1000_82542_rev2_0) { - ew32(RCTL, 0); - E1000_WRITE_FLUSH(); - msleep(1); - if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) - e1000_pci_set_mwi(hw); - } - - /* Zero out the Multicast HASH table */ - e_dbg("Zeroing the MTA\n"); - mta_size = E1000_MC_TBL_SIZE; - for (i = 0; i < mta_size; i++) { - E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); - /* use write flush to prevent Memory Write Block (MWB) from - * occurring when accessing our register space */ - E1000_WRITE_FLUSH(); - } - - /* Set the PCI priority bit correctly in the CTRL register. This - * determines if the adapter gives priority to receives, or if it - * gives equal priority to transmits and receives. Valid only on - * 82542 and 82543 silicon. - */ - if (hw->dma_fairness && hw->mac_type <= e1000_82543) { - ctrl = er32(CTRL); - ew32(CTRL, ctrl | E1000_CTRL_PRIOR); - } - - switch (hw->mac_type) { - case e1000_82545_rev_3: - case e1000_82546_rev_3: - break; - default: - /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */ - if (hw->bus_type == e1000_bus_type_pcix - && e1000_pcix_get_mmrbc(hw) > 2048) - e1000_pcix_set_mmrbc(hw, 2048); - break; - } - - /* Call a subroutine to configure the link and setup flow control. */ - ret_val = e1000_setup_link(hw); - - /* Set the transmit descriptor write-back policy */ - if (hw->mac_type > e1000_82544) { - ctrl = er32(TXDCTL); - ctrl = - (ctrl & ~E1000_TXDCTL_WTHRESH) | - E1000_TXDCTL_FULL_TX_DESC_WB; - ew32(TXDCTL, ctrl); - } - - /* Clear all of the statistics registers (clear on read). It is - * important that we do this after we have tried to establish link - * because the symbol error count will increment wildly if there - * is no link. - */ - e1000_clear_hw_cntrs(hw); - - if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER || - hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) { - ctrl_ext = er32(CTRL_EXT); - /* Relaxed ordering must be disabled to avoid a parity - * error crash in a PCI slot. */ - ctrl_ext |= E1000_CTRL_EXT_RO_DIS; - ew32(CTRL_EXT, ctrl_ext); - } - - return ret_val; -} - -/** - * e1000_adjust_serdes_amplitude - Adjust SERDES output amplitude based on EEPROM setting. - * @hw: Struct containing variables accessed by shared code. - */ -static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw) -{ - u16 eeprom_data; - s32 ret_val; - - e_dbg("e1000_adjust_serdes_amplitude"); - - if (hw->media_type != e1000_media_type_internal_serdes) - return E1000_SUCCESS; - - switch (hw->mac_type) { - case e1000_82545_rev_3: - case e1000_82546_rev_3: - break; - default: - return E1000_SUCCESS; - } - - ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1, - &eeprom_data); - if (ret_val) { - return ret_val; - } - - if (eeprom_data != EEPROM_RESERVED_WORD) { - /* Adjust SERDES output amplitude only. */ - eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK; - ret_val = - e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data); - if (ret_val) - return ret_val; - } - - return E1000_SUCCESS; -} - -/** - * e1000_setup_link - Configures flow control and link settings. - * @hw: Struct containing variables accessed by shared code - * - * Determines which flow control settings to use. Calls the appropriate media- - * specific link configuration function. Configures the flow control settings. - * Assuming the adapter has a valid link partner, a valid link should be - * established. Assumes the hardware has previously been reset and the - * transmitter and receiver are not enabled. - */ -s32 e1000_setup_link(struct e1000_hw *hw) -{ - u32 ctrl_ext; - s32 ret_val; - u16 eeprom_data; - - e_dbg("e1000_setup_link"); - - /* Read and store word 0x0F of the EEPROM. This word contains bits - * that determine the hardware's default PAUSE (flow control) mode, - * a bit that determines whether the HW defaults to enabling or - * disabling auto-negotiation, and the direction of the - * SW defined pins. If there is no SW over-ride of the flow - * control setting, then the variable hw->fc will - * be initialized based on a value in the EEPROM. - */ - if (hw->fc == E1000_FC_DEFAULT) { - ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, - 1, &eeprom_data); - if (ret_val) { - e_dbg("EEPROM Read Error\n"); - return -E1000_ERR_EEPROM; - } - if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0) - hw->fc = E1000_FC_NONE; - else if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == - EEPROM_WORD0F_ASM_DIR) - hw->fc = E1000_FC_TX_PAUSE; - else - hw->fc = E1000_FC_FULL; - } - - /* We want to save off the original Flow Control configuration just - * in case we get disconnected and then reconnected into a different - * hub or switch with different Flow Control capabilities. - */ - if (hw->mac_type == e1000_82542_rev2_0) - hw->fc &= (~E1000_FC_TX_PAUSE); - - if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1)) - hw->fc &= (~E1000_FC_RX_PAUSE); - - hw->original_fc = hw->fc; - - e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc); - - /* Take the 4 bits from EEPROM word 0x0F that determine the initial - * polarity value for the SW controlled pins, and setup the - * Extended Device Control reg with that info. - * This is needed because one of the SW controlled pins is used for - * signal detection. So this should be done before e1000_setup_pcs_link() - * or e1000_phy_setup() is called. - */ - if (hw->mac_type == e1000_82543) { - ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, - 1, &eeprom_data); - if (ret_val) { - e_dbg("EEPROM Read Error\n"); - return -E1000_ERR_EEPROM; - } - ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) << - SWDPIO__EXT_SHIFT); - ew32(CTRL_EXT, ctrl_ext); - } - - /* Call the necessary subroutine to configure the link. */ - ret_val = (hw->media_type == e1000_media_type_copper) ? - e1000_setup_copper_link(hw) : e1000_setup_fiber_serdes_link(hw); - - /* Initialize the flow control address, type, and PAUSE timer - * registers to their default values. This is done even if flow - * control is disabled, because it does not hurt anything to - * initialize these registers. - */ - e_dbg("Initializing the Flow Control address, type and timer regs\n"); - - ew32(FCT, FLOW_CONTROL_TYPE); - ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); - ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); - - ew32(FCTTV, hw->fc_pause_time); - - /* Set the flow control receive threshold registers. Normally, - * these registers will be set to a default threshold that may be - * adjusted later by the driver's runtime code. However, if the - * ability to transmit pause frames in not enabled, then these - * registers will be set to 0. - */ - if (!(hw->fc & E1000_FC_TX_PAUSE)) { - ew32(FCRTL, 0); - ew32(FCRTH, 0); - } else { - /* We need to set up the Receive Threshold high and low water marks - * as well as (optionally) enabling the transmission of XON frames. - */ - if (hw->fc_send_xon) { - ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE)); - ew32(FCRTH, hw->fc_high_water); - } else { - ew32(FCRTL, hw->fc_low_water); - ew32(FCRTH, hw->fc_high_water); - } - } - return ret_val; -} - -/** - * e1000_setup_fiber_serdes_link - prepare fiber or serdes link - * @hw: Struct containing variables accessed by shared code - * - * Manipulates Physical Coding Sublayer functions in order to configure - * link. Assumes the hardware has been previously reset and the transmitter - * and receiver are not enabled. - */ -static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) -{ - u32 ctrl; - u32 status; - u32 txcw = 0; - u32 i; - u32 signal = 0; - s32 ret_val; - - e_dbg("e1000_setup_fiber_serdes_link"); - - /* On adapters with a MAC newer than 82544, SWDP 1 will be - * set when the optics detect a signal. On older adapters, it will be - * cleared when there is a signal. This applies to fiber media only. - * If we're on serdes media, adjust the output amplitude to value - * set in the EEPROM. - */ - ctrl = er32(CTRL); - if (hw->media_type == e1000_media_type_fiber) - signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; - - ret_val = e1000_adjust_serdes_amplitude(hw); - if (ret_val) - return ret_val; - - /* Take the link out of reset */ - ctrl &= ~(E1000_CTRL_LRST); - - /* Adjust VCO speed to improve BER performance */ - ret_val = e1000_set_vco_speed(hw); - if (ret_val) - return ret_val; - - e1000_config_collision_dist(hw); - - /* Check for a software override of the flow control settings, and setup - * the device accordingly. If auto-negotiation is enabled, then software - * will have to set the "PAUSE" bits to the correct value in the Tranmsit - * Config Word Register (TXCW) and re-start auto-negotiation. However, if - * auto-negotiation is disabled, then software will have to manually - * configure the two flow control enable bits in the CTRL register. - * - * The possible values of the "fc" parameter are: - * 0: Flow control is completely disabled - * 1: Rx flow control is enabled (we can receive pause frames, but - * not send pause frames). - * 2: Tx flow control is enabled (we can send pause frames but we do - * not support receiving pause frames). - * 3: Both Rx and TX flow control (symmetric) are enabled. - */ - switch (hw->fc) { - case E1000_FC_NONE: - /* Flow control is completely disabled by a software over-ride. */ - txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); - break; - case E1000_FC_RX_PAUSE: - /* RX Flow control is enabled and TX Flow control is disabled by a - * software over-ride. Since there really isn't a way to advertise - * that we are capable of RX Pause ONLY, we will advertise that we - * support both symmetric and asymmetric RX PAUSE. Later, we will - * disable the adapter's ability to send PAUSE frames. - */ - txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); - break; - case E1000_FC_TX_PAUSE: - /* TX Flow control is enabled, and RX Flow control is disabled, by a - * software over-ride. - */ - txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); - break; - case E1000_FC_FULL: - /* Flow control (both RX and TX) is enabled by a software over-ride. */ - txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); - break; - default: - e_dbg("Flow control param set incorrectly\n"); - return -E1000_ERR_CONFIG; - break; - } - - /* Since auto-negotiation is enabled, take the link out of reset (the link - * will be in reset, because we previously reset the chip). This will - * restart auto-negotiation. If auto-negotiation is successful then the - * link-up status bit will be set and the flow control enable bits (RFCE - * and TFCE) will be set according to their negotiated value. - */ - e_dbg("Auto-negotiation enabled\n"); - - ew32(TXCW, txcw); - ew32(CTRL, ctrl); - E1000_WRITE_FLUSH(); - - hw->txcw = txcw; - msleep(1); - - /* If we have a signal (the cable is plugged in) then poll for a "Link-Up" - * indication in the Device Status Register. Time-out if a link isn't - * seen in 500 milliseconds seconds (Auto-negotiation should complete in - * less than 500 milliseconds even if the other end is doing it in SW). - * For internal serdes, we just assume a signal is present, then poll. - */ - if (hw->media_type == e1000_media_type_internal_serdes || - (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) { - e_dbg("Looking for Link\n"); - for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) { - msleep(10); - status = er32(STATUS); - if (status & E1000_STATUS_LU) - break; - } - if (i == (LINK_UP_TIMEOUT / 10)) { - e_dbg("Never got a valid link from auto-neg!!!\n"); - hw->autoneg_failed = 1; - /* AutoNeg failed to achieve a link, so we'll call - * e1000_check_for_link. This routine will force the link up if - * we detect a signal. This will allow us to communicate with - * non-autonegotiating link partners. - */ - ret_val = e1000_check_for_link(hw); - if (ret_val) { - e_dbg("Error while checking for link\n"); - return ret_val; - } - hw->autoneg_failed = 0; - } else { - hw->autoneg_failed = 0; - e_dbg("Valid Link Found\n"); - } - } else { - e_dbg("No Signal Detected\n"); - } - return E1000_SUCCESS; -} - -/** - * e1000_copper_link_rtl_setup - Copper link setup for e1000_phy_rtl series. - * @hw: Struct containing variables accessed by shared code - * - * Commits changes to PHY configuration by calling e1000_phy_reset(). - */ -static s32 e1000_copper_link_rtl_setup(struct e1000_hw *hw) -{ - s32 ret_val; - - /* SW reset the PHY so all changes take effect */ - ret_val = e1000_phy_reset(hw); - if (ret_val) { - e_dbg("Error Resetting the PHY\n"); - return ret_val; - } - - return E1000_SUCCESS; -} - -static s32 gbe_dhg_phy_setup(struct e1000_hw *hw) -{ - s32 ret_val; - u32 ctrl_aux; - - switch (hw->phy_type) { - case e1000_phy_8211: - ret_val = e1000_copper_link_rtl_setup(hw); - if (ret_val) { - e_dbg("e1000_copper_link_rtl_setup failed!\n"); - return ret_val; - } - break; - case e1000_phy_8201: - /* Set RMII mode */ - ctrl_aux = er32(CTL_AUX); - ctrl_aux |= E1000_CTL_AUX_RMII; - ew32(CTL_AUX, ctrl_aux); - E1000_WRITE_FLUSH(); - - /* Disable the J/K bits required for receive */ - ctrl_aux = er32(CTL_AUX); - ctrl_aux |= 0x4; - ctrl_aux &= ~0x2; - ew32(CTL_AUX, ctrl_aux); - E1000_WRITE_FLUSH(); - ret_val = e1000_copper_link_rtl_setup(hw); - - if (ret_val) { - e_dbg("e1000_copper_link_rtl_setup failed!\n"); - return ret_val; - } - break; - default: - e_dbg("Error Resetting the PHY\n"); - return E1000_ERR_PHY_TYPE; - } - - return E1000_SUCCESS; -} - -/** - * e1000_copper_link_preconfig - early configuration for copper - * @hw: Struct containing variables accessed by shared code - * - * Make sure we have a valid PHY and change PHY mode before link setup. - */ -static s32 e1000_copper_link_preconfig(struct e1000_hw *hw) -{ - u32 ctrl; - s32 ret_val; - u16 phy_data; - - e_dbg("e1000_copper_link_preconfig"); - - ctrl = er32(CTRL); - /* With 82543, we need to force speed and duplex on the MAC equal to what - * the PHY speed and duplex configuration is. In addition, we need to - * perform a hardware reset on the PHY to take it out of reset. - */ - if (hw->mac_type > e1000_82543) { - ctrl |= E1000_CTRL_SLU; - ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); - ew32(CTRL, ctrl); - } else { - ctrl |= - (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU); - ew32(CTRL, ctrl); - ret_val = e1000_phy_hw_reset(hw); - if (ret_val) - return ret_val; - } - - /* Make sure we have a valid PHY */ - ret_val = e1000_detect_gig_phy(hw); - if (ret_val) { - e_dbg("Error, did not detect valid phy.\n"); - return ret_val; - } - e_dbg("Phy ID = %x\n", hw->phy_id); - - /* Set PHY to class A mode (if necessary) */ - ret_val = e1000_set_phy_mode(hw); - if (ret_val) - return ret_val; - - if ((hw->mac_type == e1000_82545_rev_3) || - (hw->mac_type == e1000_82546_rev_3)) { - ret_val = - e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); - phy_data |= 0x00000008; - ret_val = - e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); - } - - if (hw->mac_type <= e1000_82543 || - hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 || - hw->mac_type == e1000_82541_rev_2 - || hw->mac_type == e1000_82547_rev_2) - hw->phy_reset_disable = false; - - return E1000_SUCCESS; -} - -/** - * e1000_copper_link_igp_setup - Copper link setup for e1000_phy_igp series. - * @hw: Struct containing variables accessed by shared code - */ -static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw) -{ - u32 led_ctrl; - s32 ret_val; - u16 phy_data; - - e_dbg("e1000_copper_link_igp_setup"); - - if (hw->phy_reset_disable) - return E1000_SUCCESS; - - ret_val = e1000_phy_reset(hw); - if (ret_val) { - e_dbg("Error Resetting the PHY\n"); - return ret_val; - } - - /* Wait 15ms for MAC to configure PHY from eeprom settings */ - msleep(15); - /* Configure activity LED after PHY reset */ - led_ctrl = er32(LEDCTL); - led_ctrl &= IGP_ACTIVITY_LED_MASK; - led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); - ew32(LEDCTL, led_ctrl); - - /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */ - if (hw->phy_type == e1000_phy_igp) { - /* disable lplu d3 during driver init */ - ret_val = e1000_set_d3_lplu_state(hw, false); - if (ret_val) { - e_dbg("Error Disabling LPLU D3\n"); - return ret_val; - } - } - - /* Configure mdi-mdix settings */ - ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); - if (ret_val) - return ret_val; - - if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { - hw->dsp_config_state = e1000_dsp_config_disabled; - /* Force MDI for earlier revs of the IGP PHY */ - phy_data &= - ~(IGP01E1000_PSCR_AUTO_MDIX | - IGP01E1000_PSCR_FORCE_MDI_MDIX); - hw->mdix = 1; - - } else { - hw->dsp_config_state = e1000_dsp_config_enabled; - phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; - - switch (hw->mdix) { - case 1: - phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; - break; - case 2: - phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; - break; - case 0: - default: - phy_data |= IGP01E1000_PSCR_AUTO_MDIX; - break; - } - } - ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); - if (ret_val) - return ret_val; - - /* set auto-master slave resolution settings */ - if (hw->autoneg) { - e1000_ms_type phy_ms_setting = hw->master_slave; - - if (hw->ffe_config_state == e1000_ffe_config_active) - hw->ffe_config_state = e1000_ffe_config_enabled; - - if (hw->dsp_config_state == e1000_dsp_config_activated) - hw->dsp_config_state = e1000_dsp_config_enabled; - - /* when autonegotiation advertisement is only 1000Mbps then we - * should disable SmartSpeed and enable Auto MasterSlave - * resolution as hardware default. */ - if (hw->autoneg_advertised == ADVERTISE_1000_FULL) { - /* Disable SmartSpeed */ - ret_val = - e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, - &phy_data); - if (ret_val) - return ret_val; - phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; - ret_val = - e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, - phy_data); - if (ret_val) - return ret_val; - /* Set auto Master/Slave resolution process */ - ret_val = - e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); - if (ret_val) - return ret_val; - phy_data &= ~CR_1000T_MS_ENABLE; - ret_val = - e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); - if (ret_val) - return ret_val; - } - - ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); - if (ret_val) - return ret_val; - - /* load defaults for future use */ - hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ? - ((phy_data & CR_1000T_MS_VALUE) ? - e1000_ms_force_master : - e1000_ms_force_slave) : e1000_ms_auto; - - switch (phy_ms_setting) { - case e1000_ms_force_master: - phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); - break; - case e1000_ms_force_slave: - phy_data |= CR_1000T_MS_ENABLE; - phy_data &= ~(CR_1000T_MS_VALUE); - break; - case e1000_ms_auto: - phy_data &= ~CR_1000T_MS_ENABLE; - default: - break; - } - ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); - if (ret_val) - return ret_val; - } - - return E1000_SUCCESS; -} - -/** - * e1000_copper_link_mgp_setup - Copper link setup for e1000_phy_m88 series. - * @hw: Struct containing variables accessed by shared code - */ -static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw) -{ - s32 ret_val; - u16 phy_data; - - e_dbg("e1000_copper_link_mgp_setup"); - - if (hw->phy_reset_disable) - return E1000_SUCCESS; - - /* Enable CRS on TX. This must be set for half-duplex operation. */ - ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); - if (ret_val) - return ret_val; - - phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; - - /* Options: - * MDI/MDI-X = 0 (default) - * 0 - Auto for all speeds - * 1 - MDI mode - * 2 - MDI-X mode - * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) - */ - phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; - - switch (hw->mdix) { - case 1: - phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; - break; - case 2: - phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; - break; - case 3: - phy_data |= M88E1000_PSCR_AUTO_X_1000T; - break; - case 0: - default: - phy_data |= M88E1000_PSCR_AUTO_X_MODE; - break; - } - - /* Options: - * disable_polarity_correction = 0 (default) - * Automatic Correction for Reversed Cable Polarity - * 0 - Disabled - * 1 - Enabled - */ - phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; - if (hw->disable_polarity_correction == 1) - phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; - ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); - if (ret_val) - return ret_val; - - if (hw->phy_revision < M88E1011_I_REV_4) { - /* Force TX_CLK in the Extended PHY Specific Control Register - * to 25MHz clock. - */ - ret_val = - e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, - &phy_data); - if (ret_val) - return ret_val; - - phy_data |= M88E1000_EPSCR_TX_CLK_25; - - if ((hw->phy_revision == E1000_REVISION_2) && - (hw->phy_id == M88E1111_I_PHY_ID)) { - /* Vidalia Phy, set the downshift counter to 5x */ - phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK); - phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; - ret_val = e1000_write_phy_reg(hw, - M88E1000_EXT_PHY_SPEC_CTRL, - phy_data); - if (ret_val) - return ret_val; - } else { - /* Configure Master and Slave downshift values */ - phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | - M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); - phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | - M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); - ret_val = e1000_write_phy_reg(hw, - M88E1000_EXT_PHY_SPEC_CTRL, - phy_data); - if (ret_val) - return ret_val; - } - } - - /* SW Reset the PHY so all changes take effect */ - ret_val = e1000_phy_reset(hw); - if (ret_val) { - e_dbg("Error Resetting the PHY\n"); - return ret_val; - } - - return E1000_SUCCESS; -} - -/** - * e1000_copper_link_autoneg - setup auto-neg - * @hw: Struct containing variables accessed by shared code - * - * Setup auto-negotiation and flow control advertisements, - * and then perform auto-negotiation. - */ -static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) -{ - s32 ret_val; - u16 phy_data; - - e_dbg("e1000_copper_link_autoneg"); - - /* Perform some bounds checking on the hw->autoneg_advertised - * parameter. If this variable is zero, then set it to the default. - */ - hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT; - - /* If autoneg_advertised is zero, we assume it was not defaulted - * by the calling code so we set to advertise full capability. - */ - if (hw->autoneg_advertised == 0) - hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; - - /* IFE/RTL8201N PHY only supports 10/100 */ - if (hw->phy_type == e1000_phy_8201) - hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL; - - e_dbg("Reconfiguring auto-neg advertisement params\n"); - ret_val = e1000_phy_setup_autoneg(hw); - if (ret_val) { - e_dbg("Error Setting up Auto-Negotiation\n"); - return ret_val; - } - e_dbg("Restarting Auto-Neg\n"); - - /* Restart auto-negotiation by setting the Auto Neg Enable bit and - * the Auto Neg Restart bit in the PHY control register. - */ - ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); - if (ret_val) - return ret_val; - - phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); - ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); - if (ret_val) - return ret_val; - - /* Does the user want to wait for Auto-Neg to complete here, or - * check at a later time (for example, callback routine). - */ - if (hw->wait_autoneg_complete) { - ret_val = e1000_wait_autoneg(hw); - if (ret_val) { - e_dbg - ("Error while waiting for autoneg to complete\n"); - return ret_val; - } - } - - hw->get_link_status = true; - - return E1000_SUCCESS; -} - -/** - * e1000_copper_link_postconfig - post link setup - * @hw: Struct containing variables accessed by shared code - * - * Config the MAC and the PHY after link is up. - * 1) Set up the MAC to the current PHY speed/duplex - * if we are on 82543. If we - * are on newer silicon, we only need to configure - * collision distance in the Transmit Control Register. - * 2) Set up flow control on the MAC to that established with - * the link partner. - * 3) Config DSP to improve Gigabit link quality for some PHY revisions. - */ -static s32 e1000_copper_link_postconfig(struct e1000_hw *hw) -{ - s32 ret_val; - e_dbg("e1000_copper_link_postconfig"); - - if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) { - e1000_config_collision_dist(hw); - } else { - ret_val = e1000_config_mac_to_phy(hw); - if (ret_val) { - e_dbg("Error configuring MAC to PHY settings\n"); - return ret_val; - } - } - ret_val = e1000_config_fc_after_link_up(hw); - if (ret_val) { - e_dbg("Error Configuring Flow Control\n"); - return ret_val; - } - - /* Config DSP to improve Giga link quality */ - if (hw->phy_type == e1000_phy_igp) { - ret_val = e1000_config_dsp_after_link_change(hw, true); - if (ret_val) { - e_dbg("Error Configuring DSP after link up\n"); - return ret_val; - } - } - - return E1000_SUCCESS; -} - -/** - * e1000_setup_copper_link - phy/speed/duplex setting - * @hw: Struct containing variables accessed by shared code - * - * Detects which PHY is present and sets up the speed and duplex - */ -static s32 e1000_setup_copper_link(struct e1000_hw *hw) -{ - s32 ret_val; - u16 i; - u16 phy_data; - - e_dbg("e1000_setup_copper_link"); - - /* Check if it is a valid PHY and set PHY mode if necessary. */ - ret_val = e1000_copper_link_preconfig(hw); - if (ret_val) - return ret_val; - - if (hw->phy_type == e1000_phy_igp) { - ret_val = e1000_copper_link_igp_setup(hw); - if (ret_val) - return ret_val; - } else if (hw->phy_type == e1000_phy_m88) { - ret_val = e1000_copper_link_mgp_setup(hw); - if (ret_val) - return ret_val; - } else { - ret_val = gbe_dhg_phy_setup(hw); - if (ret_val) { - e_dbg("gbe_dhg_phy_setup failed!\n"); - return ret_val; - } - } - - if (hw->autoneg) { - /* Setup autoneg and flow control advertisement - * and perform autonegotiation */ - ret_val = e1000_copper_link_autoneg(hw); - if (ret_val) - return ret_val; - } else { - /* PHY will be set to 10H, 10F, 100H,or 100F - * depending on value from forced_speed_duplex. */ - e_dbg("Forcing speed and duplex\n"); - ret_val = e1000_phy_force_speed_duplex(hw); - if (ret_val) { - e_dbg("Error Forcing Speed and Duplex\n"); - return ret_val; - } - } - - /* Check link status. Wait up to 100 microseconds for link to become - * valid. - */ - for (i = 0; i < 10; i++) { - ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); - if (ret_val) - return ret_val; - ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); - if (ret_val) - return ret_val; - - if (phy_data & MII_SR_LINK_STATUS) { - /* Config the MAC and PHY after link is up */ - ret_val = e1000_copper_link_postconfig(hw); - if (ret_val) - return ret_val; - - e_dbg("Valid link established!!!\n"); - return E1000_SUCCESS; - } - udelay(10); - } - - e_dbg("Unable to establish link!!!\n"); - return E1000_SUCCESS; -} - -/** - * e1000_phy_setup_autoneg - phy settings - * @hw: Struct containing variables accessed by shared code - * - * Configures PHY autoneg and flow control advertisement settings - */ -s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) -{ - s32 ret_val; - u16 mii_autoneg_adv_reg; - u16 mii_1000t_ctrl_reg; - - e_dbg("e1000_phy_setup_autoneg"); - - /* Read the MII Auto-Neg Advertisement Register (Address 4). */ - ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); - if (ret_val) - return ret_val; - - /* Read the MII 1000Base-T Control Register (Address 9). */ - ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); - if (ret_val) - return ret_val; - else if (hw->phy_type == e1000_phy_8201) - mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; - - /* Need to parse both autoneg_advertised and fc and set up - * the appropriate PHY registers. First we will parse for - * autoneg_advertised software override. Since we can advertise - * a plethora of combinations, we need to check each bit - * individually. - */ - - /* First we clear all the 10/100 mb speed bits in the Auto-Neg - * Advertisement Register (Address 4) and the 1000 mb speed bits in - * the 1000Base-T Control Register (Address 9). - */ - mii_autoneg_adv_reg &= ~REG4_SPEED_MASK; - mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; - - e_dbg("autoneg_advertised %x\n", hw->autoneg_advertised); - - /* Do we want to advertise 10 Mb Half Duplex? */ - if (hw->autoneg_advertised & ADVERTISE_10_HALF) { - e_dbg("Advertise 10mb Half duplex\n"); - mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; - } - - /* Do we want to advertise 10 Mb Full Duplex? */ - if (hw->autoneg_advertised & ADVERTISE_10_FULL) { - e_dbg("Advertise 10mb Full duplex\n"); - mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; - } - - /* Do we want to advertise 100 Mb Half Duplex? */ - if (hw->autoneg_advertised & ADVERTISE_100_HALF) { - e_dbg("Advertise 100mb Half duplex\n"); - mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; - } - - /* Do we want to advertise 100 Mb Full Duplex? */ - if (hw->autoneg_advertised & ADVERTISE_100_FULL) { - e_dbg("Advertise 100mb Full duplex\n"); - mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; - } - - /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ - if (hw->autoneg_advertised & ADVERTISE_1000_HALF) { - e_dbg - ("Advertise 1000mb Half duplex requested, request denied!\n"); - } - - /* Do we want to advertise 1000 Mb Full Duplex? */ - if (hw->autoneg_advertised & ADVERTISE_1000_FULL) { - e_dbg("Advertise 1000mb Full duplex\n"); - mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; - } - - /* Check for a software override of the flow control settings, and - * setup the PHY advertisement registers accordingly. If - * auto-negotiation is enabled, then software will have to set the - * "PAUSE" bits to the correct value in the Auto-Negotiation - * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-negotiation. - * - * The possible values of the "fc" parameter are: - * 0: Flow control is completely disabled - * 1: Rx flow control is enabled (we can receive pause frames - * but not send pause frames). - * 2: Tx flow control is enabled (we can send pause frames - * but we do not support receiving pause frames). - * 3: Both Rx and TX flow control (symmetric) are enabled. - * other: No software override. The flow control configuration - * in the EEPROM is used. - */ - switch (hw->fc) { - case E1000_FC_NONE: /* 0 */ - /* Flow control (RX & TX) is completely disabled by a - * software over-ride. - */ - mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); - break; - case E1000_FC_RX_PAUSE: /* 1 */ - /* RX Flow control is enabled, and TX Flow control is - * disabled, by a software over-ride. - */ - /* Since there really isn't a way to advertise that we are - * capable of RX Pause ONLY, we will advertise that we - * support both symmetric and asymmetric RX PAUSE. Later - * (in e1000_config_fc_after_link_up) we will disable the - *hw's ability to send PAUSE frames. - */ - mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); - break; - case E1000_FC_TX_PAUSE: /* 2 */ - /* TX Flow control is enabled, and RX Flow control is - * disabled, by a software over-ride. - */ - mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; - mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; - break; - case E1000_FC_FULL: /* 3 */ - /* Flow control (both RX and TX) is enabled by a software - * over-ride. - */ - mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); - break; - default: - e_dbg("Flow control param set incorrectly\n"); - return -E1000_ERR_CONFIG; - } - - ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); - if (ret_val) - return ret_val; - - e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); - - if (hw->phy_type == e1000_phy_8201) { - mii_1000t_ctrl_reg = 0; - } else { - ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, - mii_1000t_ctrl_reg); - if (ret_val) - return ret_val; - } - - return E1000_SUCCESS; -} - -/** - * e1000_phy_force_speed_duplex - force link settings - * @hw: Struct containing variables accessed by shared code - * - * Force PHY speed and duplex settings to hw->forced_speed_duplex - */ -static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) -{ - u32 ctrl; - s32 ret_val; - u16 mii_ctrl_reg; - u16 mii_status_reg; - u16 phy_data; - u16 i; - - e_dbg("e1000_phy_force_speed_duplex"); - - /* Turn off Flow control if we are forcing speed and duplex. */ - hw->fc = E1000_FC_NONE; - - e_dbg("hw->fc = %d\n", hw->fc); - - /* Read the Device Control Register. */ - ctrl = er32(CTRL); - - /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */ - ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); - ctrl &= ~(DEVICE_SPEED_MASK); - - /* Clear the Auto Speed Detect Enable bit. */ - ctrl &= ~E1000_CTRL_ASDE; - - /* Read the MII Control Register. */ - ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg); - if (ret_val) - return ret_val; - - /* We need to disable autoneg in order to force link and duplex. */ - - mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN; - - /* Are we forcing Full or Half Duplex? */ - if (hw->forced_speed_duplex == e1000_100_full || - hw->forced_speed_duplex == e1000_10_full) { - /* We want to force full duplex so we SET the full duplex bits in the - * Device and MII Control Registers. - */ - ctrl |= E1000_CTRL_FD; - mii_ctrl_reg |= MII_CR_FULL_DUPLEX; - e_dbg("Full Duplex\n"); - } else { - /* We want to force half duplex so we CLEAR the full duplex bits in - * the Device and MII Control Registers. - */ - ctrl &= ~E1000_CTRL_FD; - mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX; - e_dbg("Half Duplex\n"); - } - - /* Are we forcing 100Mbps??? */ - if (hw->forced_speed_duplex == e1000_100_full || - hw->forced_speed_duplex == e1000_100_half) { - /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */ - ctrl |= E1000_CTRL_SPD_100; - mii_ctrl_reg |= MII_CR_SPEED_100; - mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); - e_dbg("Forcing 100mb "); - } else { - /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */ - ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); - mii_ctrl_reg |= MII_CR_SPEED_10; - mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); - e_dbg("Forcing 10mb "); - } - - e1000_config_collision_dist(hw); - - /* Write the configured values back to the Device Control Reg. */ - ew32(CTRL, ctrl); - - if (hw->phy_type == e1000_phy_m88) { - ret_val = - e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); - if (ret_val) - return ret_val; - - /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI - * forced whenever speed are duplex are forced. - */ - phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; - ret_val = - e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); - if (ret_val) - return ret_val; - - e_dbg("M88E1000 PSCR: %x\n", phy_data); - - /* Need to reset the PHY or these changes will be ignored */ - mii_ctrl_reg |= MII_CR_RESET; - - /* Disable MDI-X support for 10/100 */ - } else { - /* Clear Auto-Crossover to force MDI manually. IGP requires MDI - * forced whenever speed or duplex are forced. - */ - ret_val = - e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); - if (ret_val) - return ret_val; - - phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; - phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; - - ret_val = - e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); - if (ret_val) - return ret_val; - } - - /* Write back the modified PHY MII control register. */ - ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg); - if (ret_val) - return ret_val; - - udelay(1); - - /* The wait_autoneg_complete flag may be a little misleading here. - * Since we are forcing speed and duplex, Auto-Neg is not enabled. - * But we do want to delay for a period while forcing only so we - * don't generate false No Link messages. So we will wait here - * only if the user has set wait_autoneg_complete to 1, which is - * the default. - */ - if (hw->wait_autoneg_complete) { - /* We will wait for autoneg to complete. */ - e_dbg("Waiting for forced speed/duplex link.\n"); - mii_status_reg = 0; - - /* We will wait for autoneg to complete or 4.5 seconds to expire. */ - for (i = PHY_FORCE_TIME; i > 0; i--) { - /* Read the MII Status Register and wait for Auto-Neg Complete bit - * to be set. - */ - ret_val = - e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); - if (ret_val) - return ret_val; - - ret_val = - e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); - if (ret_val) - return ret_val; - - if (mii_status_reg & MII_SR_LINK_STATUS) - break; - msleep(100); - } - if ((i == 0) && (hw->phy_type == e1000_phy_m88)) { - /* We didn't get link. Reset the DSP and wait again for link. */ - ret_val = e1000_phy_reset_dsp(hw); - if (ret_val) { - e_dbg("Error Resetting PHY DSP\n"); - return ret_val; - } - } - /* This loop will early-out if the link condition has been met. */ - for (i = PHY_FORCE_TIME; i > 0; i--) { - if (mii_status_reg & MII_SR_LINK_STATUS) - break; - msleep(100); - /* Read the MII Status Register and wait for Auto-Neg Complete bit - * to be set. - */ - ret_val = - e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); - if (ret_val) - return ret_val; - - ret_val = - e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); - if (ret_val) - return ret_val; - } - } - - if (hw->phy_type == e1000_phy_m88) { - /* Because we reset the PHY above, we need to re-force TX_CLK in the - * Extended PHY Specific Control Register to 25MHz clock. This value - * defaults back to a 2.5MHz clock when the PHY is reset. - */ - ret_val = - e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, - &phy_data); - if (ret_val) - return ret_val; - - phy_data |= M88E1000_EPSCR_TX_CLK_25; - ret_val = - e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, - phy_data); - if (ret_val) - return ret_val; - - /* In addition, because of the s/w reset above, we need to enable CRS on - * TX. This must be set for both full and half duplex operation. - */ - ret_val = - e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); - if (ret_val) - return ret_val; - - phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; - ret_val = - e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); - if (ret_val) - return ret_val; - - if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) - && (!hw->autoneg) - && (hw->forced_speed_duplex == e1000_10_full - || hw->forced_speed_duplex == e1000_10_half)) { - ret_val = e1000_polarity_reversal_workaround(hw); - if (ret_val) - return ret_val; - } - } - return E1000_SUCCESS; -} - -/** - * e1000_config_collision_dist - set collision distance register - * @hw: Struct containing variables accessed by shared code - * - * Sets the collision distance in the Transmit Control register. - * Link should have been established previously. Reads the speed and duplex - * information from the Device Status register. - */ -void e1000_config_collision_dist(struct e1000_hw *hw) -{ - u32 tctl, coll_dist; - - e_dbg("e1000_config_collision_dist"); - - if (hw->mac_type < e1000_82543) - coll_dist = E1000_COLLISION_DISTANCE_82542; - else - coll_dist = E1000_COLLISION_DISTANCE; - - tctl = er32(TCTL); - - tctl &= ~E1000_TCTL_COLD; - tctl |= coll_dist << E1000_COLD_SHIFT; - - ew32(TCTL, tctl); - E1000_WRITE_FLUSH(); -} - -/** - * e1000_config_mac_to_phy - sync phy and mac settings - * @hw: Struct containing variables accessed by shared code - * @mii_reg: data to write to the MII control register - * - * Sets MAC speed and duplex settings to reflect the those in the PHY - * The contents of the PHY register containing the needed information need to - * be passed in. - */ -static s32 e1000_config_mac_to_phy(struct e1000_hw *hw) -{ - u32 ctrl; - s32 ret_val; - u16 phy_data; - - e_dbg("e1000_config_mac_to_phy"); - - /* 82544 or newer MAC, Auto Speed Detection takes care of - * MAC speed/duplex configuration.*/ - if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) - return E1000_SUCCESS; - - /* Read the Device Control Register and set the bits to Force Speed - * and Duplex. - */ - ctrl = er32(CTRL); - ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); - ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); - - switch (hw->phy_type) { - case e1000_phy_8201: - ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); - if (ret_val) - return ret_val; - - if (phy_data & RTL_PHY_CTRL_FD) - ctrl |= E1000_CTRL_FD; - else - ctrl &= ~E1000_CTRL_FD; - - if (phy_data & RTL_PHY_CTRL_SPD_100) - ctrl |= E1000_CTRL_SPD_100; - else - ctrl |= E1000_CTRL_SPD_10; - - e1000_config_collision_dist(hw); - break; - default: - /* Set up duplex in the Device Control and Transmit Control - * registers depending on negotiated values. - */ - ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, - &phy_data); - if (ret_val) - return ret_val; - - if (phy_data & M88E1000_PSSR_DPLX) - ctrl |= E1000_CTRL_FD; - else - ctrl &= ~E1000_CTRL_FD; - - e1000_config_collision_dist(hw); - - /* Set up speed in the Device Control register depending on - * negotiated values. - */ - if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) - ctrl |= E1000_CTRL_SPD_1000; - else if ((phy_data & M88E1000_PSSR_SPEED) == - M88E1000_PSSR_100MBS) - ctrl |= E1000_CTRL_SPD_100; - } - - /* Write the configured values back to the Device Control Reg. */ - ew32(CTRL, ctrl); - return E1000_SUCCESS; -} - -/** - * e1000_force_mac_fc - force flow control settings - * @hw: Struct containing variables accessed by shared code - * - * Forces the MAC's flow control settings. - * Sets the TFCE and RFCE bits in the device control register to reflect - * the adapter settings. TFCE and RFCE need to be explicitly set by - * software when a Copper PHY is used because autonegotiation is managed - * by the PHY rather than the MAC. Software must also configure these - * bits when link is forced on a fiber connection. - */ -s32 e1000_force_mac_fc(struct e1000_hw *hw) -{ - u32 ctrl; - - e_dbg("e1000_force_mac_fc"); - - /* Get the current configuration of the Device Control Register */ - ctrl = er32(CTRL); - - /* Because we didn't get link via the internal auto-negotiation - * mechanism (we either forced link or we got link via PHY - * auto-neg), we have to manually enable/disable transmit an - * receive flow control. - * - * The "Case" statement below enables/disable flow control - * according to the "hw->fc" parameter. - * - * The possible values of the "fc" parameter are: - * 0: Flow control is completely disabled - * 1: Rx flow control is enabled (we can receive pause - * frames but not send pause frames). - * 2: Tx flow control is enabled (we can send pause frames - * frames but we do not receive pause frames). - * 3: Both Rx and TX flow control (symmetric) is enabled. - * other: No other values should be possible at this point. - */ - - switch (hw->fc) { - case E1000_FC_NONE: - ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); - break; - case E1000_FC_RX_PAUSE: - ctrl &= (~E1000_CTRL_TFCE); - ctrl |= E1000_CTRL_RFCE; - break; - case E1000_FC_TX_PAUSE: - ctrl &= (~E1000_CTRL_RFCE); - ctrl |= E1000_CTRL_TFCE; - break; - case E1000_FC_FULL: - ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); - break; - default: - e_dbg("Flow control param set incorrectly\n"); - return -E1000_ERR_CONFIG; - } - - /* Disable TX Flow Control for 82542 (rev 2.0) */ - if (hw->mac_type == e1000_82542_rev2_0) - ctrl &= (~E1000_CTRL_TFCE); - - ew32(CTRL, ctrl); - return E1000_SUCCESS; -} - -/** - * e1000_config_fc_after_link_up - configure flow control after autoneg - * @hw: Struct containing variables accessed by shared code - * - * Configures flow control settings after link is established - * Should be called immediately after a valid link has been established. - * Forces MAC flow control settings if link was forced. When in MII/GMII mode - * and autonegotiation is enabled, the MAC flow control settings will be set - * based on the flow control negotiated by the PHY. In TBI mode, the TFCE - * and RFCE bits will be automatically set to the negotiated flow control mode. - */ -static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) -{ - s32 ret_val; - u16 mii_status_reg; - u16 mii_nway_adv_reg; - u16 mii_nway_lp_ability_reg; - u16 speed; - u16 duplex; - - e_dbg("e1000_config_fc_after_link_up"); - - /* Check for the case where we have fiber media and auto-neg failed - * so we had to force link. In this case, we need to force the - * configuration of the MAC to match the "fc" parameter. - */ - if (((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed)) - || ((hw->media_type == e1000_media_type_internal_serdes) - && (hw->autoneg_failed)) - || ((hw->media_type == e1000_media_type_copper) - && (!hw->autoneg))) { - ret_val = e1000_force_mac_fc(hw); - if (ret_val) { - e_dbg("Error forcing flow control settings\n"); - return ret_val; - } - } - - /* Check for the case where we have copper media and auto-neg is - * enabled. In this case, we need to check and see if Auto-Neg - * has completed, and if so, how the PHY and link partner has - * flow control configured. - */ - if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) { - /* Read the MII Status Register and check to see if AutoNeg - * has completed. We read this twice because this reg has - * some "sticky" (latched) bits. - */ - ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); - if (ret_val) - return ret_val; - ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); - if (ret_val) - return ret_val; - - if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) { - /* The AutoNeg process has completed, so we now need to - * read both the Auto Negotiation Advertisement Register - * (Address 4) and the Auto_Negotiation Base Page Ability - * Register (Address 5) to determine how flow control was - * negotiated. - */ - ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, - &mii_nway_adv_reg); - if (ret_val) - return ret_val; - ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, - &mii_nway_lp_ability_reg); - if (ret_val) - return ret_val; - - /* Two bits in the Auto Negotiation Advertisement Register - * (Address 4) and two bits in the Auto Negotiation Base - * Page Ability Register (Address 5) determine flow control - * for both the PHY and the link partner. The following - * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, - * 1999, describes these PAUSE resolution bits and how flow - * control is determined based upon these settings. - * NOTE: DC = Don't Care - * - * LOCAL DEVICE | LINK PARTNER - * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution - *-------|---------|-------|---------|-------------------- - * 0 | 0 | DC | DC | E1000_FC_NONE - * 0 | 1 | 0 | DC | E1000_FC_NONE - * 0 | 1 | 1 | 0 | E1000_FC_NONE - * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE - * 1 | 0 | 0 | DC | E1000_FC_NONE - * 1 | DC | 1 | DC | E1000_FC_FULL - * 1 | 1 | 0 | 0 | E1000_FC_NONE - * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE - * - */ - /* Are both PAUSE bits set to 1? If so, this implies - * Symmetric Flow Control is enabled at both ends. The - * ASM_DIR bits are irrelevant per the spec. - * - * For Symmetric Flow Control: - * - * LOCAL DEVICE | LINK PARTNER - * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result - *-------|---------|-------|---------|-------------------- - * 1 | DC | 1 | DC | E1000_FC_FULL - * - */ - if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && - (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { - /* Now we need to check if the user selected RX ONLY - * of pause frames. In this case, we had to advertise - * FULL flow control because we could not advertise RX - * ONLY. Hence, we must now check to see if we need to - * turn OFF the TRANSMISSION of PAUSE frames. - */ - if (hw->original_fc == E1000_FC_FULL) { - hw->fc = E1000_FC_FULL; - e_dbg("Flow Control = FULL.\n"); - } else { - hw->fc = E1000_FC_RX_PAUSE; - e_dbg - ("Flow Control = RX PAUSE frames only.\n"); - } - } - /* For receiving PAUSE frames ONLY. - * - * LOCAL DEVICE | LINK PARTNER - * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result - *-------|---------|-------|---------|-------------------- - * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE - * - */ - else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && - (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && - (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && - (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) - { - hw->fc = E1000_FC_TX_PAUSE; - e_dbg - ("Flow Control = TX PAUSE frames only.\n"); - } - /* For transmitting PAUSE frames ONLY. - * - * LOCAL DEVICE | LINK PARTNER - * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result - *-------|---------|-------|---------|-------------------- - * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE - * - */ - else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && - (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && - !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && - (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) - { - hw->fc = E1000_FC_RX_PAUSE; - e_dbg - ("Flow Control = RX PAUSE frames only.\n"); - } - /* Per the IEEE spec, at this point flow control should be - * disabled. However, we want to consider that we could - * be connected to a legacy switch that doesn't advertise - * desired flow control, but can be forced on the link - * partner. So if we advertised no flow control, that is - * what we will resolve to. If we advertised some kind of - * receive capability (Rx Pause Only or Full Flow Control) - * and the link partner advertised none, we will configure - * ourselves to enable Rx Flow Control only. We can do - * this safely for two reasons: If the link partner really - * didn't want flow control enabled, and we enable Rx, no - * harm done since we won't be receiving any PAUSE frames - * anyway. If the intent on the link partner was to have - * flow control enabled, then by us enabling RX only, we - * can at least receive pause frames and process them. - * This is a good idea because in most cases, since we are - * predominantly a server NIC, more times than not we will - * be asked to delay transmission of packets than asking - * our link partner to pause transmission of frames. - */ - else if ((hw->original_fc == E1000_FC_NONE || - hw->original_fc == E1000_FC_TX_PAUSE) || - hw->fc_strict_ieee) { - hw->fc = E1000_FC_NONE; - e_dbg("Flow Control = NONE.\n"); - } else { - hw->fc = E1000_FC_RX_PAUSE; - e_dbg - ("Flow Control = RX PAUSE frames only.\n"); - } - - /* Now we need to do one last check... If we auto- - * negotiated to HALF DUPLEX, flow control should not be - * enabled per IEEE 802.3 spec. - */ - ret_val = - e1000_get_speed_and_duplex(hw, &speed, &duplex); - if (ret_val) { - e_dbg - ("Error getting link speed and duplex\n"); - return ret_val; - } - - if (duplex == HALF_DUPLEX) - hw->fc = E1000_FC_NONE; - - /* Now we call a subroutine to actually force the MAC - * controller to use the correct flow control settings. - */ - ret_val = e1000_force_mac_fc(hw); - if (ret_val) { - e_dbg - ("Error forcing flow control settings\n"); - return ret_val; - } - } else { - e_dbg - ("Copper PHY and Auto Neg has not completed.\n"); - } - } - return E1000_SUCCESS; -} - -/** - * e1000_check_for_serdes_link_generic - Check for link (Serdes) - * @hw: pointer to the HW structure - * - * Checks for link up on the hardware. If link is not up and we have - * a signal, then we need to force link up. - */ -static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) -{ - u32 rxcw; - u32 ctrl; - u32 status; - s32 ret_val = E1000_SUCCESS; - - e_dbg("e1000_check_for_serdes_link_generic"); - - ctrl = er32(CTRL); - status = er32(STATUS); - rxcw = er32(RXCW); - - /* - * If we don't have link (auto-negotiation failed or link partner - * cannot auto-negotiate), and our link partner is not trying to - * auto-negotiate with us (we are receiving idles or data), - * we need to force link up. We also need to give auto-negotiation - * time to complete. - */ - /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ - if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) { - if (hw->autoneg_failed == 0) { - hw->autoneg_failed = 1; - goto out; - } - e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); - - /* Disable auto-negotiation in the TXCW register */ - ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE)); - - /* Force link-up and also force full-duplex. */ - ctrl = er32(CTRL); - ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); - ew32(CTRL, ctrl); - - /* Configure Flow Control after forcing link up. */ - ret_val = e1000_config_fc_after_link_up(hw); - if (ret_val) { - e_dbg("Error configuring flow control\n"); - goto out; - } - } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { - /* - * If we are forcing link and we are receiving /C/ ordered - * sets, re-enable auto-negotiation in the TXCW register - * and disable forced link in the Device Control register - * in an attempt to auto-negotiate with our link partner. - */ - e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); - ew32(TXCW, hw->txcw); - ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); - - hw->serdes_has_link = true; - } else if (!(E1000_TXCW_ANE & er32(TXCW))) { - /* - * If we force link for non-auto-negotiation switch, check - * link status based on MAC synchronization for internal - * serdes media type. - */ - /* SYNCH bit and IV bit are sticky. */ - udelay(10); - rxcw = er32(RXCW); - if (rxcw & E1000_RXCW_SYNCH) { - if (!(rxcw & E1000_RXCW_IV)) { - hw->serdes_has_link = true; - e_dbg("SERDES: Link up - forced.\n"); - } - } else { - hw->serdes_has_link = false; - e_dbg("SERDES: Link down - force failed.\n"); - } - } - - if (E1000_TXCW_ANE & er32(TXCW)) { - status = er32(STATUS); - if (status & E1000_STATUS_LU) { - /* SYNCH bit and IV bit are sticky, so reread rxcw. */ - udelay(10); - rxcw = er32(RXCW); - if (rxcw & E1000_RXCW_SYNCH) { - if (!(rxcw & E1000_RXCW_IV)) { - hw->serdes_has_link = true; - e_dbg("SERDES: Link up - autoneg " - "completed successfully.\n"); - } else { - hw->serdes_has_link = false; - e_dbg("SERDES: Link down - invalid" - "codewords detected in autoneg.\n"); - } - } else { - hw->serdes_has_link = false; - e_dbg("SERDES: Link down - no sync.\n"); - } - } else { - hw->serdes_has_link = false; - e_dbg("SERDES: Link down - autoneg failed\n"); - } - } - - out: - return ret_val; -} - -/** - * e1000_check_for_link - * @hw: Struct containing variables accessed by shared code - * - * Checks to see if the link status of the hardware has changed. - * Called by any function that needs to check the link status of the adapter. - */ -s32 e1000_check_for_link(struct e1000_hw *hw) -{ - u32 rxcw = 0; - u32 ctrl; - u32 status; - u32 rctl; - u32 icr; - u32 signal = 0; - s32 ret_val; - u16 phy_data; - - e_dbg("e1000_check_for_link"); - - ctrl = er32(CTRL); - status = er32(STATUS); - - /* On adapters with a MAC newer than 82544, SW Definable pin 1 will be - * set when the optics detect a signal. On older adapters, it will be - * cleared when there is a signal. This applies to fiber media only. - */ - if ((hw->media_type == e1000_media_type_fiber) || - (hw->media_type == e1000_media_type_internal_serdes)) { - rxcw = er32(RXCW); - - if (hw->media_type == e1000_media_type_fiber) { - signal = - (hw->mac_type > - e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; - if (status & E1000_STATUS_LU) - hw->get_link_status = false; - } - } - - /* If we have a copper PHY then we only want to go out to the PHY - * registers to see if Auto-Neg has completed and/or if our link - * status has changed. The get_link_status flag will be set if we - * receive a Link Status Change interrupt or we have Rx Sequence - * Errors. - */ - if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) { - /* First we want to see if the MII Status Register reports - * link. If so, then we want to get the current speed/duplex - * of the PHY. - * Read the register twice since the link bit is sticky. - */ - ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); - if (ret_val) - return ret_val; - ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); - if (ret_val) - return ret_val; - - if (phy_data & MII_SR_LINK_STATUS) { - hw->get_link_status = false; - /* Check if there was DownShift, must be checked immediately after - * link-up */ - e1000_check_downshift(hw); - - /* If we are on 82544 or 82543 silicon and speed/duplex - * are forced to 10H or 10F, then we will implement the polarity - * reversal workaround. We disable interrupts first, and upon - * returning, place the devices interrupt state to its previous - * value except for the link status change interrupt which will - * happen due to the execution of this workaround. - */ - - if ((hw->mac_type == e1000_82544 - || hw->mac_type == e1000_82543) && (!hw->autoneg) - && (hw->forced_speed_duplex == e1000_10_full - || hw->forced_speed_duplex == e1000_10_half)) { - ew32(IMC, 0xffffffff); - ret_val = - e1000_polarity_reversal_workaround(hw); - icr = er32(ICR); - ew32(ICS, (icr & ~E1000_ICS_LSC)); - ew32(IMS, IMS_ENABLE_MASK); - } - - } else { - /* No link detected */ - e1000_config_dsp_after_link_change(hw, false); - return 0; - } - - /* If we are forcing speed/duplex, then we simply return since - * we have already determined whether we have link or not. - */ - if (!hw->autoneg) - return -E1000_ERR_CONFIG; - - /* optimize the dsp settings for the igp phy */ - e1000_config_dsp_after_link_change(hw, true); - - /* We have a M88E1000 PHY and Auto-Neg is enabled. If we - * have Si on board that is 82544 or newer, Auto - * Speed Detection takes care of MAC speed/duplex - * configuration. So we only need to configure Collision - * Distance in the MAC. Otherwise, we need to force - * speed/duplex on the MAC to the current PHY speed/duplex - * settings. - */ - if ((hw->mac_type >= e1000_82544) && - (hw->mac_type != e1000_ce4100)) - e1000_config_collision_dist(hw); - else { - ret_val = e1000_config_mac_to_phy(hw); - if (ret_val) { - e_dbg - ("Error configuring MAC to PHY settings\n"); - return ret_val; - } - } - - /* Configure Flow Control now that Auto-Neg has completed. First, we - * need to restore the desired flow control settings because we may - * have had to re-autoneg with a different link partner. - */ - ret_val = e1000_config_fc_after_link_up(hw); - if (ret_val) { - e_dbg("Error configuring flow control\n"); - return ret_val; - } - - /* At this point we know that we are on copper and we have - * auto-negotiated link. These are conditions for checking the link - * partner capability register. We use the link speed to determine if - * TBI compatibility needs to be turned on or off. If the link is not - * at gigabit speed, then TBI compatibility is not needed. If we are - * at gigabit speed, we turn on TBI compatibility. - */ - if (hw->tbi_compatibility_en) { - u16 speed, duplex; - ret_val = - e1000_get_speed_and_duplex(hw, &speed, &duplex); - if (ret_val) { - e_dbg - ("Error getting link speed and duplex\n"); - return ret_val; - } - if (speed != SPEED_1000) { - /* If link speed is not set to gigabit speed, we do not need - * to enable TBI compatibility. - */ - if (hw->tbi_compatibility_on) { - /* If we previously were in the mode, turn it off. */ - rctl = er32(RCTL); - rctl &= ~E1000_RCTL_SBP; - ew32(RCTL, rctl); - hw->tbi_compatibility_on = false; - } - } else { - /* If TBI compatibility is was previously off, turn it on. For - * compatibility with a TBI link partner, we will store bad - * packets. Some frames have an additional byte on the end and - * will look like CRC errors to to the hardware. - */ - if (!hw->tbi_compatibility_on) { - hw->tbi_compatibility_on = true; - rctl = er32(RCTL); - rctl |= E1000_RCTL_SBP; - ew32(RCTL, rctl); - } - } - } - } - - if ((hw->media_type == e1000_media_type_fiber) || - (hw->media_type == e1000_media_type_internal_serdes)) - e1000_check_for_serdes_link_generic(hw); - - return E1000_SUCCESS; -} - -/** - * e1000_get_speed_and_duplex - * @hw: Struct containing variables accessed by shared code - * @speed: Speed of the connection - * @duplex: Duplex setting of the connection - - * Detects the current speed and duplex settings of the hardware. - */ -s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) -{ - u32 status; - s32 ret_val; - u16 phy_data; - - e_dbg("e1000_get_speed_and_duplex"); - - if (hw->mac_type >= e1000_82543) { - status = er32(STATUS); - if (status & E1000_STATUS_SPEED_1000) { - *speed = SPEED_1000; - e_dbg("1000 Mbs, "); - } else if (status & E1000_STATUS_SPEED_100) { - *speed = SPEED_100; - e_dbg("100 Mbs, "); - } else { - *speed = SPEED_10; - e_dbg("10 Mbs, "); - } - - if (status & E1000_STATUS_FD) { - *duplex = FULL_DUPLEX; - e_dbg("Full Duplex\n"); - } else { - *duplex = HALF_DUPLEX; - e_dbg(" Half Duplex\n"); - } - } else { - e_dbg("1000 Mbs, Full Duplex\n"); - *speed = SPEED_1000; - *duplex = FULL_DUPLEX; - } - - /* IGP01 PHY may advertise full duplex operation after speed downgrade even - * if it is operating at half duplex. Here we set the duplex settings to - * match the duplex in the link partner's capabilities. - */ - if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) { - ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data); - if (ret_val) - return ret_val; - - if (!(phy_data & NWAY_ER_LP_NWAY_CAPS)) - *duplex = HALF_DUPLEX; - else { - ret_val = - e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data); - if (ret_val) - return ret_val; - if ((*speed == SPEED_100 - && !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) - || (*speed == SPEED_10 - && !(phy_data & NWAY_LPAR_10T_FD_CAPS))) - *duplex = HALF_DUPLEX; - } - } - - return E1000_SUCCESS; -} - -/** - * e1000_wait_autoneg - * @hw: Struct containing variables accessed by shared code - * - * Blocks until autoneg completes or times out (~4.5 seconds) - */ -static s32 e1000_wait_autoneg(struct e1000_hw *hw) -{ - s32 ret_val; - u16 i; - u16 phy_data; - - e_dbg("e1000_wait_autoneg"); - e_dbg("Waiting for Auto-Neg to complete.\n"); - - /* We will wait for autoneg to complete or 4.5 seconds to expire. */ - for (i = PHY_AUTO_NEG_TIME; i > 0; i--) { - /* Read the MII Status Register and wait for Auto-Neg - * Complete bit to be set. - */ - ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); - if (ret_val) - return ret_val; - ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); - if (ret_val) - return ret_val; - if (phy_data & MII_SR_AUTONEG_COMPLETE) { - return E1000_SUCCESS; - } - msleep(100); - } - return E1000_SUCCESS; -} - -/** - * e1000_raise_mdi_clk - Raises the Management Data Clock - * @hw: Struct containing variables accessed by shared code - * @ctrl: Device control register's current value - */ -static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl) -{ - /* Raise the clock input to the Management Data Clock (by setting the MDC - * bit), and then delay 10 microseconds. - */ - ew32(CTRL, (*ctrl | E1000_CTRL_MDC)); - E1000_WRITE_FLUSH(); - udelay(10); -} - -/** - * e1000_lower_mdi_clk - Lowers the Management Data Clock - * @hw: Struct containing variables accessed by shared code - * @ctrl: Device control register's current value - */ -static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl) -{ - /* Lower the clock input to the Management Data Clock (by clearing the MDC - * bit), and then delay 10 microseconds. - */ - ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC)); - E1000_WRITE_FLUSH(); - udelay(10); -} - -/** - * e1000_shift_out_mdi_bits - Shifts data bits out to the PHY - * @hw: Struct containing variables accessed by shared code - * @data: Data to send out to the PHY - * @count: Number of bits to shift out - * - * Bits are shifted out in MSB to LSB order. - */ -static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count) -{ - u32 ctrl; - u32 mask; - - /* We need to shift "count" number of bits out to the PHY. So, the value - * in the "data" parameter will be shifted out to the PHY one bit at a - * time. In order to do this, "data" must be broken down into bits. - */ - mask = 0x01; - mask <<= (count - 1); - - ctrl = er32(CTRL); - - /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */ - ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR); - - while (mask) { - /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and - * then raising and lowering the Management Data Clock. A "0" is - * shifted out to the PHY by setting the MDIO bit to "0" and then - * raising and lowering the clock. - */ - if (data & mask) - ctrl |= E1000_CTRL_MDIO; - else - ctrl &= ~E1000_CTRL_MDIO; - - ew32(CTRL, ctrl); - E1000_WRITE_FLUSH(); - - udelay(10); - - e1000_raise_mdi_clk(hw, &ctrl); - e1000_lower_mdi_clk(hw, &ctrl); - - mask = mask >> 1; - } -} - -/** - * e1000_shift_in_mdi_bits - Shifts data bits in from the PHY - * @hw: Struct containing variables accessed by shared code - * - * Bits are shifted in in MSB to LSB order. - */ -static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw) -{ - u32 ctrl; - u16 data = 0; - u8 i; - - /* In order to read a register from the PHY, we need to shift in a total - * of 18 bits from the PHY. The first two bit (turnaround) times are used - * to avoid contention on the MDIO pin when a read operation is performed. - * These two bits are ignored by us and thrown away. Bits are "shifted in" - * by raising the input to the Management Data Clock (setting the MDC bit), - * and then reading the value of the MDIO bit. - */ - ctrl = er32(CTRL); - - /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */ - ctrl &= ~E1000_CTRL_MDIO_DIR; - ctrl &= ~E1000_CTRL_MDIO; - - ew32(CTRL, ctrl); - E1000_WRITE_FLUSH(); - - /* Raise and Lower the clock before reading in the data. This accounts for - * the turnaround bits. The first clock occurred when we clocked out the - * last bit of the Register Address. - */ - e1000_raise_mdi_clk(hw, &ctrl); - e1000_lower_mdi_clk(hw, &ctrl); - - for (data = 0, i = 0; i < 16; i++) { - data = data << 1; - e1000_raise_mdi_clk(hw, &ctrl); - ctrl = er32(CTRL); - /* Check to see if we shifted in a "1". */ - if (ctrl & E1000_CTRL_MDIO) - data |= 1; - e1000_lower_mdi_clk(hw, &ctrl); - } - - e1000_raise_mdi_clk(hw, &ctrl); - e1000_lower_mdi_clk(hw, &ctrl); - - return data; -} - - -/** - * e1000_read_phy_reg - read a phy register - * @hw: Struct containing variables accessed by shared code - * @reg_addr: address of the PHY register to read - * - * Reads the value from a PHY register, if the value is on a specific non zero - * page, sets the page first. - */ -s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data) -{ - u32 ret_val; - - e_dbg("e1000_read_phy_reg"); - - if ((hw->phy_type == e1000_phy_igp) && - (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { - ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, - (u16) reg_addr); - if (ret_val) - return ret_val; - } - - ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, - phy_data); - - return ret_val; -} - -static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, - u16 *phy_data) -{ - u32 i; - u32 mdic = 0; - const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; - - e_dbg("e1000_read_phy_reg_ex"); - - if (reg_addr > MAX_PHY_REG_ADDRESS) { - e_dbg("PHY Address %d is out of range\n", reg_addr); - return -E1000_ERR_PARAM; - } - - if (hw->mac_type > e1000_82543) { - /* Set up Op-code, Phy Address, and register address in the MDI - * Control register. The MAC will take care of interfacing with the - * PHY to retrieve the desired data. - */ - if (hw->mac_type == e1000_ce4100) { - mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | - (phy_addr << E1000_MDIC_PHY_SHIFT) | - (INTEL_CE_GBE_MDIC_OP_READ) | - (INTEL_CE_GBE_MDIC_GO)); - - writel(mdic, E1000_MDIO_CMD); - - /* Poll the ready bit to see if the MDI read - * completed - */ - for (i = 0; i < 64; i++) { - udelay(50); - mdic = readl(E1000_MDIO_CMD); - if (!(mdic & INTEL_CE_GBE_MDIC_GO)) - break; - } - - if (mdic & INTEL_CE_GBE_MDIC_GO) { - e_dbg("MDI Read did not complete\n"); - return -E1000_ERR_PHY; - } - - mdic = readl(E1000_MDIO_STS); - if (mdic & INTEL_CE_GBE_MDIC_READ_ERROR) { - e_dbg("MDI Read Error\n"); - return -E1000_ERR_PHY; - } - *phy_data = (u16) mdic; - } else { - mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | - (phy_addr << E1000_MDIC_PHY_SHIFT) | - (E1000_MDIC_OP_READ)); - - ew32(MDIC, mdic); - - /* Poll the ready bit to see if the MDI read - * completed - */ - for (i = 0; i < 64; i++) { - udelay(50); - mdic = er32(MDIC); - if (mdic & E1000_MDIC_READY) - break; - } - if (!(mdic & E1000_MDIC_READY)) { - e_dbg("MDI Read did not complete\n"); - return -E1000_ERR_PHY; - } - if (mdic & E1000_MDIC_ERROR) { - e_dbg("MDI Error\n"); - return -E1000_ERR_PHY; - } - *phy_data = (u16) mdic; - } - } else { - /* We must first send a preamble through the MDIO pin to signal the - * beginning of an MII instruction. This is done by sending 32 - * consecutive "1" bits. - */ - e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); - - /* Now combine the next few fields that are required for a read - * operation. We use this method instead of calling the - * e1000_shift_out_mdi_bits routine five different times. The format of - * a MII read instruction consists of a shift out of 14 bits and is - * defined as follows: - * - * followed by a shift in of 18 bits. This first two bits shifted in - * are TurnAround bits used to avoid contention on the MDIO pin when a - * READ operation is performed. These two bits are thrown away - * followed by a shift in of 16 bits which contains the desired data. - */ - mdic = ((reg_addr) | (phy_addr << 5) | - (PHY_OP_READ << 10) | (PHY_SOF << 12)); - - e1000_shift_out_mdi_bits(hw, mdic, 14); - - /* Now that we've shifted out the read command to the MII, we need to - * "shift in" the 16-bit value (18 total bits) of the requested PHY - * register address. - */ - *phy_data = e1000_shift_in_mdi_bits(hw); - } - return E1000_SUCCESS; -} - -/** - * e1000_write_phy_reg - write a phy register - * - * @hw: Struct containing variables accessed by shared code - * @reg_addr: address of the PHY register to write - * @data: data to write to the PHY - - * Writes a value to a PHY register - */ -s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data) -{ - u32 ret_val; - - e_dbg("e1000_write_phy_reg"); - - if ((hw->phy_type == e1000_phy_igp) && - (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { - ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, - (u16) reg_addr); - if (ret_val) - return ret_val; - } - - ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, - phy_data); - - return ret_val; -} - -static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, - u16 phy_data) -{ - u32 i; - u32 mdic = 0; - const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; - - e_dbg("e1000_write_phy_reg_ex"); - - if (reg_addr > MAX_PHY_REG_ADDRESS) { - e_dbg("PHY Address %d is out of range\n", reg_addr); - return -E1000_ERR_PARAM; - } - - if (hw->mac_type > e1000_82543) { - /* Set up Op-code, Phy Address, register address, and data - * intended for the PHY register in the MDI Control register. - * The MAC will take care of interfacing with the PHY to send - * the desired data. - */ - if (hw->mac_type == e1000_ce4100) { - mdic = (((u32) phy_data) | - (reg_addr << E1000_MDIC_REG_SHIFT) | - (phy_addr << E1000_MDIC_PHY_SHIFT) | - (INTEL_CE_GBE_MDIC_OP_WRITE) | - (INTEL_CE_GBE_MDIC_GO)); - - writel(mdic, E1000_MDIO_CMD); - - /* Poll the ready bit to see if the MDI read - * completed - */ - for (i = 0; i < 640; i++) { - udelay(5); - mdic = readl(E1000_MDIO_CMD); - if (!(mdic & INTEL_CE_GBE_MDIC_GO)) - break; - } - if (mdic & INTEL_CE_GBE_MDIC_GO) { - e_dbg("MDI Write did not complete\n"); - return -E1000_ERR_PHY; - } - } else { - mdic = (((u32) phy_data) | - (reg_addr << E1000_MDIC_REG_SHIFT) | - (phy_addr << E1000_MDIC_PHY_SHIFT) | - (E1000_MDIC_OP_WRITE)); - - ew32(MDIC, mdic); - - /* Poll the ready bit to see if the MDI read - * completed - */ - for (i = 0; i < 641; i++) { - udelay(5); - mdic = er32(MDIC); - if (mdic & E1000_MDIC_READY) - break; - } - if (!(mdic & E1000_MDIC_READY)) { - e_dbg("MDI Write did not complete\n"); - return -E1000_ERR_PHY; - } - } - } else { - /* We'll need to use the SW defined pins to shift the write command - * out to the PHY. We first send a preamble to the PHY to signal the - * beginning of the MII instruction. This is done by sending 32 - * consecutive "1" bits. - */ - e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); - - /* Now combine the remaining required fields that will indicate a - * write operation. We use this method instead of calling the - * e1000_shift_out_mdi_bits routine for each field in the command. The - * format of a MII write instruction is as follows: - * . - */ - mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) | - (PHY_OP_WRITE << 12) | (PHY_SOF << 14)); - mdic <<= 16; - mdic |= (u32) phy_data; - - e1000_shift_out_mdi_bits(hw, mdic, 32); - } - - return E1000_SUCCESS; -} - -/** - * e1000_phy_hw_reset - reset the phy, hardware style - * @hw: Struct containing variables accessed by shared code - * - * Returns the PHY to the power-on reset state - */ -s32 e1000_phy_hw_reset(struct e1000_hw *hw) -{ - u32 ctrl, ctrl_ext; - u32 led_ctrl; - - e_dbg("e1000_phy_hw_reset"); - - e_dbg("Resetting Phy...\n"); - - if (hw->mac_type > e1000_82543) { - /* Read the device control register and assert the E1000_CTRL_PHY_RST - * bit. Then, take it out of reset. - * For e1000 hardware, we delay for 10ms between the assert - * and deassert. - */ - ctrl = er32(CTRL); - ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); - E1000_WRITE_FLUSH(); - - msleep(10); - - ew32(CTRL, ctrl); - E1000_WRITE_FLUSH(); - - } else { - /* Read the Extended Device Control Register, assert the PHY_RESET_DIR - * bit to put the PHY into reset. Then, take it out of reset. - */ - ctrl_ext = er32(CTRL_EXT); - ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR; - ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA; - ew32(CTRL_EXT, ctrl_ext); - E1000_WRITE_FLUSH(); - msleep(10); - ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA; - ew32(CTRL_EXT, ctrl_ext); - E1000_WRITE_FLUSH(); - } - udelay(150); - - if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { - /* Configure activity LED after PHY reset */ - led_ctrl = er32(LEDCTL); - led_ctrl &= IGP_ACTIVITY_LED_MASK; - led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); - ew32(LEDCTL, led_ctrl); - } - - /* Wait for FW to finish PHY configuration. */ - return e1000_get_phy_cfg_done(hw); -} - -/** - * e1000_phy_reset - reset the phy to commit settings - * @hw: Struct containing variables accessed by shared code - * - * Resets the PHY - * Sets bit 15 of the MII Control register - */ -s32 e1000_phy_reset(struct e1000_hw *hw) -{ - s32 ret_val; - u16 phy_data; - - e_dbg("e1000_phy_reset"); - - switch (hw->phy_type) { - case e1000_phy_igp: - ret_val = e1000_phy_hw_reset(hw); - if (ret_val) - return ret_val; - break; - default: - ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); - if (ret_val) - return ret_val; - - phy_data |= MII_CR_RESET; - ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); - if (ret_val) - return ret_val; - - udelay(1); - break; - } - - if (hw->phy_type == e1000_phy_igp) - e1000_phy_init_script(hw); - - return E1000_SUCCESS; -} - -/** - * e1000_detect_gig_phy - check the phy type - * @hw: Struct containing variables accessed by shared code - * - * Probes the expected PHY address for known PHY IDs - */ -static s32 e1000_detect_gig_phy(struct e1000_hw *hw) -{ - s32 phy_init_status, ret_val; - u16 phy_id_high, phy_id_low; - bool match = false; - - e_dbg("e1000_detect_gig_phy"); - - if (hw->phy_id != 0) - return E1000_SUCCESS; - - /* Read the PHY ID Registers to identify which PHY is onboard. */ - ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high); - if (ret_val) - return ret_val; - - hw->phy_id = (u32) (phy_id_high << 16); - udelay(20); - ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low); - if (ret_val) - return ret_val; - - hw->phy_id |= (u32) (phy_id_low & PHY_REVISION_MASK); - hw->phy_revision = (u32) phy_id_low & ~PHY_REVISION_MASK; - - switch (hw->mac_type) { - case e1000_82543: - if (hw->phy_id == M88E1000_E_PHY_ID) - match = true; - break; - case e1000_82544: - if (hw->phy_id == M88E1000_I_PHY_ID) - match = true; - break; - case e1000_82540: - case e1000_82545: - case e1000_82545_rev_3: - case e1000_82546: - case e1000_82546_rev_3: - if (hw->phy_id == M88E1011_I_PHY_ID) - match = true; - break; - case e1000_ce4100: - if ((hw->phy_id == RTL8211B_PHY_ID) || - (hw->phy_id == RTL8201N_PHY_ID) || - (hw->phy_id == M88E1118_E_PHY_ID)) - match = true; - break; - case e1000_82541: - case e1000_82541_rev_2: - case e1000_82547: - case e1000_82547_rev_2: - if (hw->phy_id == IGP01E1000_I_PHY_ID) - match = true; - break; - default: - e_dbg("Invalid MAC type %d\n", hw->mac_type); - return -E1000_ERR_CONFIG; - } - phy_init_status = e1000_set_phy_type(hw); - - if ((match) && (phy_init_status == E1000_SUCCESS)) { - e_dbg("PHY ID 0x%X detected\n", hw->phy_id); - return E1000_SUCCESS; - } - e_dbg("Invalid PHY ID 0x%X\n", hw->phy_id); - return -E1000_ERR_PHY; -} - -/** - * e1000_phy_reset_dsp - reset DSP - * @hw: Struct containing variables accessed by shared code - * - * Resets the PHY's DSP - */ -static s32 e1000_phy_reset_dsp(struct e1000_hw *hw) -{ - s32 ret_val; - e_dbg("e1000_phy_reset_dsp"); - - do { - ret_val = e1000_write_phy_reg(hw, 29, 0x001d); - if (ret_val) - break; - ret_val = e1000_write_phy_reg(hw, 30, 0x00c1); - if (ret_val) - break; - ret_val = e1000_write_phy_reg(hw, 30, 0x0000); - if (ret_val) - break; - ret_val = E1000_SUCCESS; - } while (0); - - return ret_val; -} - -/** - * e1000_phy_igp_get_info - get igp specific registers - * @hw: Struct containing variables accessed by shared code - * @phy_info: PHY information structure - * - * Get PHY information from various PHY registers for igp PHY only. - */ -static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, - struct e1000_phy_info *phy_info) -{ - s32 ret_val; - u16 phy_data, min_length, max_length, average; - e1000_rev_polarity polarity; - - e_dbg("e1000_phy_igp_get_info"); - - /* The downshift status is checked only once, after link is established, - * and it stored in the hw->speed_downgraded parameter. */ - phy_info->downshift = (e1000_downshift) hw->speed_downgraded; - - /* IGP01E1000 does not need to support it. */ - phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal; - - /* IGP01E1000 always correct polarity reversal */ - phy_info->polarity_correction = e1000_polarity_reversal_enabled; - - /* Check polarity status */ - ret_val = e1000_check_polarity(hw, &polarity); - if (ret_val) - return ret_val; - - phy_info->cable_polarity = polarity; - - ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data); - if (ret_val) - return ret_val; - - phy_info->mdix_mode = - (e1000_auto_x_mode) ((phy_data & IGP01E1000_PSSR_MDIX) >> - IGP01E1000_PSSR_MDIX_SHIFT); - - if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == - IGP01E1000_PSSR_SPEED_1000MBPS) { - /* Local/Remote Receiver Information are only valid at 1000 Mbps */ - ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); - if (ret_val) - return ret_val; - - phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> - SR_1000T_LOCAL_RX_STATUS_SHIFT) ? - e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; - phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> - SR_1000T_REMOTE_RX_STATUS_SHIFT) ? - e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; - - /* Get cable length */ - ret_val = e1000_get_cable_length(hw, &min_length, &max_length); - if (ret_val) - return ret_val; - - /* Translate to old method */ - average = (max_length + min_length) / 2; - - if (average <= e1000_igp_cable_length_50) - phy_info->cable_length = e1000_cable_length_50; - else if (average <= e1000_igp_cable_length_80) - phy_info->cable_length = e1000_cable_length_50_80; - else if (average <= e1000_igp_cable_length_110) - phy_info->cable_length = e1000_cable_length_80_110; - else if (average <= e1000_igp_cable_length_140) - phy_info->cable_length = e1000_cable_length_110_140; - else - phy_info->cable_length = e1000_cable_length_140; - } - - return E1000_SUCCESS; -} - -/** - * e1000_phy_m88_get_info - get m88 specific registers - * @hw: Struct containing variables accessed by shared code - * @phy_info: PHY information structure - * - * Get PHY information from various PHY registers for m88 PHY only. - */ -static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, - struct e1000_phy_info *phy_info) -{ - s32 ret_val; - u16 phy_data; - e1000_rev_polarity polarity; - - e_dbg("e1000_phy_m88_get_info"); - - /* The downshift status is checked only once, after link is established, - * and it stored in the hw->speed_downgraded parameter. */ - phy_info->downshift = (e1000_downshift) hw->speed_downgraded; - - ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); - if (ret_val) - return ret_val; - - phy_info->extended_10bt_distance = - ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >> - M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ? - e1000_10bt_ext_dist_enable_lower : - e1000_10bt_ext_dist_enable_normal; - - phy_info->polarity_correction = - ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >> - M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ? - e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled; - - /* Check polarity status */ - ret_val = e1000_check_polarity(hw, &polarity); - if (ret_val) - return ret_val; - phy_info->cable_polarity = polarity; - - ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); - if (ret_val) - return ret_val; - - phy_info->mdix_mode = - (e1000_auto_x_mode) ((phy_data & M88E1000_PSSR_MDIX) >> - M88E1000_PSSR_MDIX_SHIFT); - - if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { - /* Cable Length Estimation and Local/Remote Receiver Information - * are only valid at 1000 Mbps. - */ - phy_info->cable_length = - (e1000_cable_length) ((phy_data & - M88E1000_PSSR_CABLE_LENGTH) >> - M88E1000_PSSR_CABLE_LENGTH_SHIFT); - - ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); - if (ret_val) - return ret_val; - - phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> - SR_1000T_LOCAL_RX_STATUS_SHIFT) ? - e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; - phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> - SR_1000T_REMOTE_RX_STATUS_SHIFT) ? - e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; - - } - - return E1000_SUCCESS; -} - -/** - * e1000_phy_get_info - request phy info - * @hw: Struct containing variables accessed by shared code - * @phy_info: PHY information structure - * - * Get PHY information from various PHY registers - */ -s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info) -{ - s32 ret_val; - u16 phy_data; - - e_dbg("e1000_phy_get_info"); - - phy_info->cable_length = e1000_cable_length_undefined; - phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined; - phy_info->cable_polarity = e1000_rev_polarity_undefined; - phy_info->downshift = e1000_downshift_undefined; - phy_info->polarity_correction = e1000_polarity_reversal_undefined; - phy_info->mdix_mode = e1000_auto_x_mode_undefined; - phy_info->local_rx = e1000_1000t_rx_status_undefined; - phy_info->remote_rx = e1000_1000t_rx_status_undefined; - - if (hw->media_type != e1000_media_type_copper) { - e_dbg("PHY info is only valid for copper media\n"); - return -E1000_ERR_CONFIG; - } - - ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); - if (ret_val) - return ret_val; - - ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); - if (ret_val) - return ret_val; - - if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) { - e_dbg("PHY info is only valid if link is up\n"); - return -E1000_ERR_CONFIG; - } - - if (hw->phy_type == e1000_phy_igp) - return e1000_phy_igp_get_info(hw, phy_info); - else if ((hw->phy_type == e1000_phy_8211) || - (hw->phy_type == e1000_phy_8201)) - return E1000_SUCCESS; - else - return e1000_phy_m88_get_info(hw, phy_info); -} - -s32 e1000_validate_mdi_setting(struct e1000_hw *hw) -{ - e_dbg("e1000_validate_mdi_settings"); - - if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) { - e_dbg("Invalid MDI setting detected\n"); - hw->mdix = 1; - return -E1000_ERR_CONFIG; - } - return E1000_SUCCESS; -} - -/** - * e1000_init_eeprom_params - initialize sw eeprom vars - * @hw: Struct containing variables accessed by shared code - * - * Sets up eeprom variables in the hw struct. Must be called after mac_type - * is configured. - */ -s32 e1000_init_eeprom_params(struct e1000_hw *hw) -{ - struct e1000_eeprom_info *eeprom = &hw->eeprom; - u32 eecd = er32(EECD); - s32 ret_val = E1000_SUCCESS; - u16 eeprom_size; - - e_dbg("e1000_init_eeprom_params"); - - switch (hw->mac_type) { - case e1000_82542_rev2_0: - case e1000_82542_rev2_1: - case e1000_82543: - case e1000_82544: - eeprom->type = e1000_eeprom_microwire; - eeprom->word_size = 64; - eeprom->opcode_bits = 3; - eeprom->address_bits = 6; - eeprom->delay_usec = 50; - break; - case e1000_82540: - case e1000_82545: - case e1000_82545_rev_3: - case e1000_82546: - case e1000_82546_rev_3: - eeprom->type = e1000_eeprom_microwire; - eeprom->opcode_bits = 3; - eeprom->delay_usec = 50; - if (eecd & E1000_EECD_SIZE) { - eeprom->word_size = 256; - eeprom->address_bits = 8; - } else { - eeprom->word_size = 64; - eeprom->address_bits = 6; - } - break; - case e1000_82541: - case e1000_82541_rev_2: - case e1000_82547: - case e1000_82547_rev_2: - if (eecd & E1000_EECD_TYPE) { - eeprom->type = e1000_eeprom_spi; - eeprom->opcode_bits = 8; - eeprom->delay_usec = 1; - if (eecd & E1000_EECD_ADDR_BITS) { - eeprom->page_size = 32; - eeprom->address_bits = 16; - } else { - eeprom->page_size = 8; - eeprom->address_bits = 8; - } - } else { - eeprom->type = e1000_eeprom_microwire; - eeprom->opcode_bits = 3; - eeprom->delay_usec = 50; - if (eecd & E1000_EECD_ADDR_BITS) { - eeprom->word_size = 256; - eeprom->address_bits = 8; - } else { - eeprom->word_size = 64; - eeprom->address_bits = 6; - } - } - break; - default: - break; - } - - if (eeprom->type == e1000_eeprom_spi) { - /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to - * 32KB (incremented by powers of 2). - */ - /* Set to default value for initial eeprom read. */ - eeprom->word_size = 64; - ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size); - if (ret_val) - return ret_val; - eeprom_size = - (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT; - /* 256B eeprom size was not supported in earlier hardware, so we - * bump eeprom_size up one to ensure that "1" (which maps to 256B) - * is never the result used in the shifting logic below. */ - if (eeprom_size) - eeprom_size++; - - eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT); - } - return ret_val; -} - -/** - * e1000_raise_ee_clk - Raises the EEPROM's clock input. - * @hw: Struct containing variables accessed by shared code - * @eecd: EECD's current value - */ -static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd) -{ - /* Raise the clock input to the EEPROM (by setting the SK bit), and then - * wait microseconds. - */ - *eecd = *eecd | E1000_EECD_SK; - ew32(EECD, *eecd); - E1000_WRITE_FLUSH(); - udelay(hw->eeprom.delay_usec); -} - -/** - * e1000_lower_ee_clk - Lowers the EEPROM's clock input. - * @hw: Struct containing variables accessed by shared code - * @eecd: EECD's current value - */ -static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd) -{ - /* Lower the clock input to the EEPROM (by clearing the SK bit), and then - * wait 50 microseconds. - */ - *eecd = *eecd & ~E1000_EECD_SK; - ew32(EECD, *eecd); - E1000_WRITE_FLUSH(); - udelay(hw->eeprom.delay_usec); -} - -/** - * e1000_shift_out_ee_bits - Shift data bits out to the EEPROM. - * @hw: Struct containing variables accessed by shared code - * @data: data to send to the EEPROM - * @count: number of bits to shift out - */ -static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count) -{ - struct e1000_eeprom_info *eeprom = &hw->eeprom; - u32 eecd; - u32 mask; - - /* We need to shift "count" bits out to the EEPROM. So, value in the - * "data" parameter will be shifted out to the EEPROM one bit at a time. - * In order to do this, "data" must be broken down into bits. - */ - mask = 0x01 << (count - 1); - eecd = er32(EECD); - if (eeprom->type == e1000_eeprom_microwire) { - eecd &= ~E1000_EECD_DO; - } else if (eeprom->type == e1000_eeprom_spi) { - eecd |= E1000_EECD_DO; - } - do { - /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1", - * and then raising and then lowering the clock (the SK bit controls - * the clock input to the EEPROM). A "0" is shifted out to the EEPROM - * by setting "DI" to "0" and then raising and then lowering the clock. - */ - eecd &= ~E1000_EECD_DI; - - if (data & mask) - eecd |= E1000_EECD_DI; - - ew32(EECD, eecd); - E1000_WRITE_FLUSH(); - - udelay(eeprom->delay_usec); - - e1000_raise_ee_clk(hw, &eecd); - e1000_lower_ee_clk(hw, &eecd); - - mask = mask >> 1; - - } while (mask); - - /* We leave the "DI" bit set to "0" when we leave this routine. */ - eecd &= ~E1000_EECD_DI; - ew32(EECD, eecd); -} - -/** - * e1000_shift_in_ee_bits - Shift data bits in from the EEPROM - * @hw: Struct containing variables accessed by shared code - * @count: number of bits to shift in - */ -static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count) -{ - u32 eecd; - u32 i; - u16 data; - - /* In order to read a register from the EEPROM, we need to shift 'count' - * bits in from the EEPROM. Bits are "shifted in" by raising the clock - * input to the EEPROM (setting the SK bit), and then reading the value of - * the "DO" bit. During this "shifting in" process the "DI" bit should - * always be clear. - */ - - eecd = er32(EECD); - - eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); - data = 0; - - for (i = 0; i < count; i++) { - data = data << 1; - e1000_raise_ee_clk(hw, &eecd); - - eecd = er32(EECD); - - eecd &= ~(E1000_EECD_DI); - if (eecd & E1000_EECD_DO) - data |= 1; - - e1000_lower_ee_clk(hw, &eecd); - } - - return data; -} - -/** - * e1000_acquire_eeprom - Prepares EEPROM for access - * @hw: Struct containing variables accessed by shared code - * - * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This - * function should be called before issuing a command to the EEPROM. - */ -static s32 e1000_acquire_eeprom(struct e1000_hw *hw) -{ - struct e1000_eeprom_info *eeprom = &hw->eeprom; - u32 eecd, i = 0; - - e_dbg("e1000_acquire_eeprom"); - - eecd = er32(EECD); - - /* Request EEPROM Access */ - if (hw->mac_type > e1000_82544) { - eecd |= E1000_EECD_REQ; - ew32(EECD, eecd); - eecd = er32(EECD); - while ((!(eecd & E1000_EECD_GNT)) && - (i < E1000_EEPROM_GRANT_ATTEMPTS)) { - i++; - udelay(5); - eecd = er32(EECD); - } - if (!(eecd & E1000_EECD_GNT)) { - eecd &= ~E1000_EECD_REQ; - ew32(EECD, eecd); - e_dbg("Could not acquire EEPROM grant\n"); - return -E1000_ERR_EEPROM; - } - } - - /* Setup EEPROM for Read/Write */ - - if (eeprom->type == e1000_eeprom_microwire) { - /* Clear SK and DI */ - eecd &= ~(E1000_EECD_DI | E1000_EECD_SK); - ew32(EECD, eecd); - - /* Set CS */ - eecd |= E1000_EECD_CS; - ew32(EECD, eecd); - } else if (eeprom->type == e1000_eeprom_spi) { - /* Clear SK and CS */ - eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); - ew32(EECD, eecd); - E1000_WRITE_FLUSH(); - udelay(1); - } - - return E1000_SUCCESS; -} - -/** - * e1000_standby_eeprom - Returns EEPROM to a "standby" state - * @hw: Struct containing variables accessed by shared code - */ -static void e1000_standby_eeprom(struct e1000_hw *hw) -{ - struct e1000_eeprom_info *eeprom = &hw->eeprom; - u32 eecd; - - eecd = er32(EECD); - - if (eeprom->type == e1000_eeprom_microwire) { - eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); - ew32(EECD, eecd); - E1000_WRITE_FLUSH(); - udelay(eeprom->delay_usec); - - /* Clock high */ - eecd |= E1000_EECD_SK; - ew32(EECD, eecd); - E1000_WRITE_FLUSH(); - udelay(eeprom->delay_usec); - - /* Select EEPROM */ - eecd |= E1000_EECD_CS; - ew32(EECD, eecd); - E1000_WRITE_FLUSH(); - udelay(eeprom->delay_usec); - - /* Clock low */ - eecd &= ~E1000_EECD_SK; - ew32(EECD, eecd); - E1000_WRITE_FLUSH(); - udelay(eeprom->delay_usec); - } else if (eeprom->type == e1000_eeprom_spi) { - /* Toggle CS to flush commands */ - eecd |= E1000_EECD_CS; - ew32(EECD, eecd); - E1000_WRITE_FLUSH(); - udelay(eeprom->delay_usec); - eecd &= ~E1000_EECD_CS; - ew32(EECD, eecd); - E1000_WRITE_FLUSH(); - udelay(eeprom->delay_usec); - } -} - -/** - * e1000_release_eeprom - drop chip select - * @hw: Struct containing variables accessed by shared code - * - * Terminates a command by inverting the EEPROM's chip select pin - */ -static void e1000_release_eeprom(struct e1000_hw *hw) -{ - u32 eecd; - - e_dbg("e1000_release_eeprom"); - - eecd = er32(EECD); - - if (hw->eeprom.type == e1000_eeprom_spi) { - eecd |= E1000_EECD_CS; /* Pull CS high */ - eecd &= ~E1000_EECD_SK; /* Lower SCK */ - - ew32(EECD, eecd); - E1000_WRITE_FLUSH(); - - udelay(hw->eeprom.delay_usec); - } else if (hw->eeprom.type == e1000_eeprom_microwire) { - /* cleanup eeprom */ - - /* CS on Microwire is active-high */ - eecd &= ~(E1000_EECD_CS | E1000_EECD_DI); - - ew32(EECD, eecd); - - /* Rising edge of clock */ - eecd |= E1000_EECD_SK; - ew32(EECD, eecd); - E1000_WRITE_FLUSH(); - udelay(hw->eeprom.delay_usec); - - /* Falling edge of clock */ - eecd &= ~E1000_EECD_SK; - ew32(EECD, eecd); - E1000_WRITE_FLUSH(); - udelay(hw->eeprom.delay_usec); - } - - /* Stop requesting EEPROM access */ - if (hw->mac_type > e1000_82544) { - eecd &= ~E1000_EECD_REQ; - ew32(EECD, eecd); - } -} - -/** - * e1000_spi_eeprom_ready - Reads a 16 bit word from the EEPROM. - * @hw: Struct containing variables accessed by shared code - */ -static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw) -{ - u16 retry_count = 0; - u8 spi_stat_reg; - - e_dbg("e1000_spi_eeprom_ready"); - - /* Read "Status Register" repeatedly until the LSB is cleared. The - * EEPROM will signal that the command has been completed by clearing - * bit 0 of the internal status register. If it's not cleared within - * 5 milliseconds, then error out. - */ - retry_count = 0; - do { - e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI, - hw->eeprom.opcode_bits); - spi_stat_reg = (u8) e1000_shift_in_ee_bits(hw, 8); - if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI)) - break; - - udelay(5); - retry_count += 5; - - e1000_standby_eeprom(hw); - } while (retry_count < EEPROM_MAX_RETRY_SPI); - - /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and - * only 0-5mSec on 5V devices) - */ - if (retry_count >= EEPROM_MAX_RETRY_SPI) { - e_dbg("SPI EEPROM Status error\n"); - return -E1000_ERR_EEPROM; - } - - return E1000_SUCCESS; -} - -/** - * e1000_read_eeprom - Reads a 16 bit word from the EEPROM. - * @hw: Struct containing variables accessed by shared code - * @offset: offset of word in the EEPROM to read - * @data: word read from the EEPROM - * @words: number of words to read - */ -s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) -{ - s32 ret; - spin_lock(&e1000_eeprom_lock); - ret = e1000_do_read_eeprom(hw, offset, words, data); - spin_unlock(&e1000_eeprom_lock); - return ret; -} - -static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, - u16 *data) -{ - struct e1000_eeprom_info *eeprom = &hw->eeprom; - u32 i = 0; - - e_dbg("e1000_read_eeprom"); - - if (hw->mac_type == e1000_ce4100) { - GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words, - data); - return E1000_SUCCESS; - } - - /* If eeprom is not yet detected, do so now */ - if (eeprom->word_size == 0) - e1000_init_eeprom_params(hw); - - /* A check for invalid values: offset too large, too many words, and not - * enough words. - */ - if ((offset >= eeprom->word_size) - || (words > eeprom->word_size - offset) || (words == 0)) { - e_dbg("\"words\" parameter out of bounds. Words = %d," - "size = %d\n", offset, eeprom->word_size); - return -E1000_ERR_EEPROM; - } - - /* EEPROM's that don't use EERD to read require us to bit-bang the SPI - * directly. In this case, we need to acquire the EEPROM so that - * FW or other port software does not interrupt. - */ - /* Prepare the EEPROM for bit-bang reading */ - if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) - return -E1000_ERR_EEPROM; - - /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have - * acquired the EEPROM at this point, so any returns should release it */ - if (eeprom->type == e1000_eeprom_spi) { - u16 word_in; - u8 read_opcode = EEPROM_READ_OPCODE_SPI; - - if (e1000_spi_eeprom_ready(hw)) { - e1000_release_eeprom(hw); - return -E1000_ERR_EEPROM; - } - - e1000_standby_eeprom(hw); - - /* Some SPI eeproms use the 8th address bit embedded in the opcode */ - if ((eeprom->address_bits == 8) && (offset >= 128)) - read_opcode |= EEPROM_A8_OPCODE_SPI; - - /* Send the READ command (opcode + addr) */ - e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits); - e1000_shift_out_ee_bits(hw, (u16) (offset * 2), - eeprom->address_bits); - - /* Read the data. The address of the eeprom internally increments with - * each byte (spi) being read, saving on the overhead of eeprom setup - * and tear-down. The address counter will roll over if reading beyond - * the size of the eeprom, thus allowing the entire memory to be read - * starting from any offset. */ - for (i = 0; i < words; i++) { - word_in = e1000_shift_in_ee_bits(hw, 16); - data[i] = (word_in >> 8) | (word_in << 8); - } - } else if (eeprom->type == e1000_eeprom_microwire) { - for (i = 0; i < words; i++) { - /* Send the READ command (opcode + addr) */ - e1000_shift_out_ee_bits(hw, - EEPROM_READ_OPCODE_MICROWIRE, - eeprom->opcode_bits); - e1000_shift_out_ee_bits(hw, (u16) (offset + i), - eeprom->address_bits); - - /* Read the data. For microwire, each word requires the overhead - * of eeprom setup and tear-down. */ - data[i] = e1000_shift_in_ee_bits(hw, 16); - e1000_standby_eeprom(hw); - } - } - - /* End this read operation */ - e1000_release_eeprom(hw); - - return E1000_SUCCESS; -} - -/** - * e1000_validate_eeprom_checksum - Verifies that the EEPROM has a valid checksum - * @hw: Struct containing variables accessed by shared code - * - * Reads the first 64 16 bit words of the EEPROM and sums the values read. - * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is - * valid. - */ -s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw) -{ - u16 checksum = 0; - u16 i, eeprom_data; - - e_dbg("e1000_validate_eeprom_checksum"); - - for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { - if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { - e_dbg("EEPROM Read Error\n"); - return -E1000_ERR_EEPROM; - } - checksum += eeprom_data; - } - - if (checksum == (u16) EEPROM_SUM) - return E1000_SUCCESS; - else { - e_dbg("EEPROM Checksum Invalid\n"); - return -E1000_ERR_EEPROM; - } -} - -/** - * e1000_update_eeprom_checksum - Calculates/writes the EEPROM checksum - * @hw: Struct containing variables accessed by shared code - * - * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA. - * Writes the difference to word offset 63 of the EEPROM. - */ -s32 e1000_update_eeprom_checksum(struct e1000_hw *hw) -{ - u16 checksum = 0; - u16 i, eeprom_data; - - e_dbg("e1000_update_eeprom_checksum"); - - for (i = 0; i < EEPROM_CHECKSUM_REG; i++) { - if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { - e_dbg("EEPROM Read Error\n"); - return -E1000_ERR_EEPROM; - } - checksum += eeprom_data; - } - checksum = (u16) EEPROM_SUM - checksum; - if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { - e_dbg("EEPROM Write Error\n"); - return -E1000_ERR_EEPROM; - } - return E1000_SUCCESS; -} - -/** - * e1000_write_eeprom - write words to the different EEPROM types. - * @hw: Struct containing variables accessed by shared code - * @offset: offset within the EEPROM to be written to - * @words: number of words to write - * @data: 16 bit word to be written to the EEPROM - * - * If e1000_update_eeprom_checksum is not called after this function, the - * EEPROM will most likely contain an invalid checksum. - */ -s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) -{ - s32 ret; - spin_lock(&e1000_eeprom_lock); - ret = e1000_do_write_eeprom(hw, offset, words, data); - spin_unlock(&e1000_eeprom_lock); - return ret; -} - -static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, - u16 *data) -{ - struct e1000_eeprom_info *eeprom = &hw->eeprom; - s32 status = 0; - - e_dbg("e1000_write_eeprom"); - - if (hw->mac_type == e1000_ce4100) { - GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words, - data); - return E1000_SUCCESS; - } - - /* If eeprom is not yet detected, do so now */ - if (eeprom->word_size == 0) - e1000_init_eeprom_params(hw); - - /* A check for invalid values: offset too large, too many words, and not - * enough words. - */ - if ((offset >= eeprom->word_size) - || (words > eeprom->word_size - offset) || (words == 0)) { - e_dbg("\"words\" parameter out of bounds\n"); - return -E1000_ERR_EEPROM; - } - - /* Prepare the EEPROM for writing */ - if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) - return -E1000_ERR_EEPROM; - - if (eeprom->type == e1000_eeprom_microwire) { - status = e1000_write_eeprom_microwire(hw, offset, words, data); - } else { - status = e1000_write_eeprom_spi(hw, offset, words, data); - msleep(10); - } - - /* Done with writing */ - e1000_release_eeprom(hw); - - return status; -} - -/** - * e1000_write_eeprom_spi - Writes a 16 bit word to a given offset in an SPI EEPROM. - * @hw: Struct containing variables accessed by shared code - * @offset: offset within the EEPROM to be written to - * @words: number of words to write - * @data: pointer to array of 8 bit words to be written to the EEPROM - */ -static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words, - u16 *data) -{ - struct e1000_eeprom_info *eeprom = &hw->eeprom; - u16 widx = 0; - - e_dbg("e1000_write_eeprom_spi"); - - while (widx < words) { - u8 write_opcode = EEPROM_WRITE_OPCODE_SPI; - - if (e1000_spi_eeprom_ready(hw)) - return -E1000_ERR_EEPROM; - - e1000_standby_eeprom(hw); - - /* Send the WRITE ENABLE command (8 bit opcode ) */ - e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI, - eeprom->opcode_bits); - - e1000_standby_eeprom(hw); - - /* Some SPI eeproms use the 8th address bit embedded in the opcode */ - if ((eeprom->address_bits == 8) && (offset >= 128)) - write_opcode |= EEPROM_A8_OPCODE_SPI; - - /* Send the Write command (8-bit opcode + addr) */ - e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits); - - e1000_shift_out_ee_bits(hw, (u16) ((offset + widx) * 2), - eeprom->address_bits); - - /* Send the data */ - - /* Loop to allow for up to whole page write (32 bytes) of eeprom */ - while (widx < words) { - u16 word_out = data[widx]; - word_out = (word_out >> 8) | (word_out << 8); - e1000_shift_out_ee_bits(hw, word_out, 16); - widx++; - - /* Some larger eeprom sizes are capable of a 32-byte PAGE WRITE - * operation, while the smaller eeproms are capable of an 8-byte - * PAGE WRITE operation. Break the inner loop to pass new address - */ - if ((((offset + widx) * 2) % eeprom->page_size) == 0) { - e1000_standby_eeprom(hw); - break; - } - } - } - - return E1000_SUCCESS; -} - -/** - * e1000_write_eeprom_microwire - Writes a 16 bit word to a given offset in a Microwire EEPROM. - * @hw: Struct containing variables accessed by shared code - * @offset: offset within the EEPROM to be written to - * @words: number of words to write - * @data: pointer to array of 8 bit words to be written to the EEPROM - */ -static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, - u16 words, u16 *data) -{ - struct e1000_eeprom_info *eeprom = &hw->eeprom; - u32 eecd; - u16 words_written = 0; - u16 i = 0; - - e_dbg("e1000_write_eeprom_microwire"); - - /* Send the write enable command to the EEPROM (3-bit opcode plus - * 6/8-bit dummy address beginning with 11). It's less work to include - * the 11 of the dummy address as part of the opcode than it is to shift - * it over the correct number of bits for the address. This puts the - * EEPROM into write/erase mode. - */ - e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE, - (u16) (eeprom->opcode_bits + 2)); - - e1000_shift_out_ee_bits(hw, 0, (u16) (eeprom->address_bits - 2)); - - /* Prepare the EEPROM */ - e1000_standby_eeprom(hw); - - while (words_written < words) { - /* Send the Write command (3-bit opcode + addr) */ - e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE, - eeprom->opcode_bits); - - e1000_shift_out_ee_bits(hw, (u16) (offset + words_written), - eeprom->address_bits); - - /* Send the data */ - e1000_shift_out_ee_bits(hw, data[words_written], 16); - - /* Toggle the CS line. This in effect tells the EEPROM to execute - * the previous command. - */ - e1000_standby_eeprom(hw); - - /* Read DO repeatedly until it is high (equal to '1'). The EEPROM will - * signal that the command has been completed by raising the DO signal. - * If DO does not go high in 10 milliseconds, then error out. - */ - for (i = 0; i < 200; i++) { - eecd = er32(EECD); - if (eecd & E1000_EECD_DO) - break; - udelay(50); - } - if (i == 200) { - e_dbg("EEPROM Write did not complete\n"); - return -E1000_ERR_EEPROM; - } - - /* Recover from write */ - e1000_standby_eeprom(hw); - - words_written++; - } - - /* Send the write disable command to the EEPROM (3-bit opcode plus - * 6/8-bit dummy address beginning with 10). It's less work to include - * the 10 of the dummy address as part of the opcode than it is to shift - * it over the correct number of bits for the address. This takes the - * EEPROM out of write/erase mode. - */ - e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE, - (u16) (eeprom->opcode_bits + 2)); - - e1000_shift_out_ee_bits(hw, 0, (u16) (eeprom->address_bits - 2)); - - return E1000_SUCCESS; -} - -/** - * e1000_read_mac_addr - read the adapters MAC from eeprom - * @hw: Struct containing variables accessed by shared code - * - * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the - * second function of dual function devices - */ -s32 e1000_read_mac_addr(struct e1000_hw *hw) -{ - u16 offset; - u16 eeprom_data, i; - - e_dbg("e1000_read_mac_addr"); - - for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) { - offset = i >> 1; - if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) { - e_dbg("EEPROM Read Error\n"); - return -E1000_ERR_EEPROM; - } - hw->perm_mac_addr[i] = (u8) (eeprom_data & 0x00FF); - hw->perm_mac_addr[i + 1] = (u8) (eeprom_data >> 8); - } - - switch (hw->mac_type) { - default: - break; - case e1000_82546: - case e1000_82546_rev_3: - if (er32(STATUS) & E1000_STATUS_FUNC_1) - hw->perm_mac_addr[5] ^= 0x01; - break; - } - - for (i = 0; i < NODE_ADDRESS_SIZE; i++) - hw->mac_addr[i] = hw->perm_mac_addr[i]; - return E1000_SUCCESS; -} - -/** - * e1000_init_rx_addrs - Initializes receive address filters. - * @hw: Struct containing variables accessed by shared code - * - * Places the MAC address in receive address register 0 and clears the rest - * of the receive address registers. Clears the multicast table. Assumes - * the receiver is in reset when the routine is called. - */ -static void e1000_init_rx_addrs(struct e1000_hw *hw) -{ - u32 i; - u32 rar_num; - - e_dbg("e1000_init_rx_addrs"); - - /* Setup the receive address. */ - e_dbg("Programming MAC Address into RAR[0]\n"); - - e1000_rar_set(hw, hw->mac_addr, 0); - - rar_num = E1000_RAR_ENTRIES; - - /* Zero out the other 15 receive addresses. */ - e_dbg("Clearing RAR[1-15]\n"); - for (i = 1; i < rar_num; i++) { - E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); - E1000_WRITE_FLUSH(); - E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); - E1000_WRITE_FLUSH(); - } -} - -/** - * e1000_hash_mc_addr - Hashes an address to determine its location in the multicast table - * @hw: Struct containing variables accessed by shared code - * @mc_addr: the multicast address to hash - */ -u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) -{ - u32 hash_value = 0; - - /* The portion of the address that is used for the hash table is - * determined by the mc_filter_type setting. - */ - switch (hw->mc_filter_type) { - /* [0] [1] [2] [3] [4] [5] - * 01 AA 00 12 34 56 - * LSB MSB - */ - case 0: - /* [47:36] i.e. 0x563 for above example address */ - hash_value = ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4)); - break; - case 1: - /* [46:35] i.e. 0xAC6 for above example address */ - hash_value = ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5)); - break; - case 2: - /* [45:34] i.e. 0x5D8 for above example address */ - hash_value = ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6)); - break; - case 3: - /* [43:32] i.e. 0x634 for above example address */ - hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8)); - break; - } - - hash_value &= 0xFFF; - return hash_value; -} - -/** - * e1000_rar_set - Puts an ethernet address into a receive address register. - * @hw: Struct containing variables accessed by shared code - * @addr: Address to put into receive address register - * @index: Receive address register to write - */ -void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) -{ - u32 rar_low, rar_high; - - /* HW expects these in little endian so we reverse the byte order - * from network order (big endian) to little endian - */ - rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | - ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); - rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); - - /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx - * unit hang. - * - * Description: - * If there are any Rx frames queued up or otherwise present in the HW - * before RSS is enabled, and then we enable RSS, the HW Rx unit will - * hang. To work around this issue, we have to disable receives and - * flush out all Rx frames before we enable RSS. To do so, we modify we - * redirect all Rx traffic to manageability and then reset the HW. - * This flushes away Rx frames, and (since the redirections to - * manageability persists across resets) keeps new ones from coming in - * while we work. Then, we clear the Address Valid AV bit for all MAC - * addresses and undo the re-direction to manageability. - * Now, frames are coming in again, but the MAC won't accept them, so - * far so good. We now proceed to initialize RSS (if necessary) and - * configure the Rx unit. Last, we re-enable the AV bits and continue - * on our merry way. - */ - switch (hw->mac_type) { - default: - /* Indicate to hardware the Address is Valid. */ - rar_high |= E1000_RAH_AV; - break; - } - - E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); - E1000_WRITE_FLUSH(); - E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); - E1000_WRITE_FLUSH(); -} - -/** - * e1000_write_vfta - Writes a value to the specified offset in the VLAN filter table. - * @hw: Struct containing variables accessed by shared code - * @offset: Offset in VLAN filer table to write - * @value: Value to write into VLAN filter table - */ -void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) -{ - u32 temp; - - if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) { - temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1)); - E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); - E1000_WRITE_FLUSH(); - E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp); - E1000_WRITE_FLUSH(); - } else { - E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); - E1000_WRITE_FLUSH(); - } -} - -/** - * e1000_clear_vfta - Clears the VLAN filer table - * @hw: Struct containing variables accessed by shared code - */ -static void e1000_clear_vfta(struct e1000_hw *hw) -{ - u32 offset; - u32 vfta_value = 0; - u32 vfta_offset = 0; - u32 vfta_bit_in_reg = 0; - - for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { - /* If the offset we want to clear is the same offset of the - * manageability VLAN ID, then clear all bits except that of the - * manageability unit */ - vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; - E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value); - E1000_WRITE_FLUSH(); - } -} - -static s32 e1000_id_led_init(struct e1000_hw *hw) -{ - u32 ledctl; - const u32 ledctl_mask = 0x000000FF; - const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; - const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; - u16 eeprom_data, i, temp; - const u16 led_mask = 0x0F; - - e_dbg("e1000_id_led_init"); - - if (hw->mac_type < e1000_82540) { - /* Nothing to do */ - return E1000_SUCCESS; - } - - ledctl = er32(LEDCTL); - hw->ledctl_default = ledctl; - hw->ledctl_mode1 = hw->ledctl_default; - hw->ledctl_mode2 = hw->ledctl_default; - - if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) { - e_dbg("EEPROM Read Error\n"); - return -E1000_ERR_EEPROM; - } - - if ((eeprom_data == ID_LED_RESERVED_0000) || - (eeprom_data == ID_LED_RESERVED_FFFF)) { - eeprom_data = ID_LED_DEFAULT; - } - - for (i = 0; i < 4; i++) { - temp = (eeprom_data >> (i << 2)) & led_mask; - switch (temp) { - case ID_LED_ON1_DEF2: - case ID_LED_ON1_ON2: - case ID_LED_ON1_OFF2: - hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); - hw->ledctl_mode1 |= ledctl_on << (i << 3); - break; - case ID_LED_OFF1_DEF2: - case ID_LED_OFF1_ON2: - case ID_LED_OFF1_OFF2: - hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); - hw->ledctl_mode1 |= ledctl_off << (i << 3); - break; - default: - /* Do nothing */ - break; - } - switch (temp) { - case ID_LED_DEF1_ON2: - case ID_LED_ON1_ON2: - case ID_LED_OFF1_ON2: - hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); - hw->ledctl_mode2 |= ledctl_on << (i << 3); - break; - case ID_LED_DEF1_OFF2: - case ID_LED_ON1_OFF2: - case ID_LED_OFF1_OFF2: - hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); - hw->ledctl_mode2 |= ledctl_off << (i << 3); - break; - default: - /* Do nothing */ - break; - } - } - return E1000_SUCCESS; -} - -/** - * e1000_setup_led - * @hw: Struct containing variables accessed by shared code - * - * Prepares SW controlable LED for use and saves the current state of the LED. - */ -s32 e1000_setup_led(struct e1000_hw *hw) -{ - u32 ledctl; - s32 ret_val = E1000_SUCCESS; - - e_dbg("e1000_setup_led"); - - switch (hw->mac_type) { - case e1000_82542_rev2_0: - case e1000_82542_rev2_1: - case e1000_82543: - case e1000_82544: - /* No setup necessary */ - break; - case e1000_82541: - case e1000_82547: - case e1000_82541_rev_2: - case e1000_82547_rev_2: - /* Turn off PHY Smart Power Down (if enabled) */ - ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, - &hw->phy_spd_default); - if (ret_val) - return ret_val; - ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, - (u16) (hw->phy_spd_default & - ~IGP01E1000_GMII_SPD)); - if (ret_val) - return ret_val; - /* Fall Through */ - default: - if (hw->media_type == e1000_media_type_fiber) { - ledctl = er32(LEDCTL); - /* Save current LEDCTL settings */ - hw->ledctl_default = ledctl; - /* Turn off LED0 */ - ledctl &= ~(E1000_LEDCTL_LED0_IVRT | - E1000_LEDCTL_LED0_BLINK | - E1000_LEDCTL_LED0_MODE_MASK); - ledctl |= (E1000_LEDCTL_MODE_LED_OFF << - E1000_LEDCTL_LED0_MODE_SHIFT); - ew32(LEDCTL, ledctl); - } else if (hw->media_type == e1000_media_type_copper) - ew32(LEDCTL, hw->ledctl_mode1); - break; - } - - return E1000_SUCCESS; -} - -/** - * e1000_cleanup_led - Restores the saved state of the SW controlable LED. - * @hw: Struct containing variables accessed by shared code - */ -s32 e1000_cleanup_led(struct e1000_hw *hw) -{ - s32 ret_val = E1000_SUCCESS; - - e_dbg("e1000_cleanup_led"); - - switch (hw->mac_type) { - case e1000_82542_rev2_0: - case e1000_82542_rev2_1: - case e1000_82543: - case e1000_82544: - /* No cleanup necessary */ - break; - case e1000_82541: - case e1000_82547: - case e1000_82541_rev_2: - case e1000_82547_rev_2: - /* Turn on PHY Smart Power Down (if previously enabled) */ - ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, - hw->phy_spd_default); - if (ret_val) - return ret_val; - /* Fall Through */ - default: - /* Restore LEDCTL settings */ - ew32(LEDCTL, hw->ledctl_default); - break; - } - - return E1000_SUCCESS; -} - -/** - * e1000_led_on - Turns on the software controllable LED - * @hw: Struct containing variables accessed by shared code - */ -s32 e1000_led_on(struct e1000_hw *hw) -{ - u32 ctrl = er32(CTRL); - - e_dbg("e1000_led_on"); - - switch (hw->mac_type) { - case e1000_82542_rev2_0: - case e1000_82542_rev2_1: - case e1000_82543: - /* Set SW Defineable Pin 0 to turn on the LED */ - ctrl |= E1000_CTRL_SWDPIN0; - ctrl |= E1000_CTRL_SWDPIO0; - break; - case e1000_82544: - if (hw->media_type == e1000_media_type_fiber) { - /* Set SW Defineable Pin 0 to turn on the LED */ - ctrl |= E1000_CTRL_SWDPIN0; - ctrl |= E1000_CTRL_SWDPIO0; - } else { - /* Clear SW Defineable Pin 0 to turn on the LED */ - ctrl &= ~E1000_CTRL_SWDPIN0; - ctrl |= E1000_CTRL_SWDPIO0; - } - break; - default: - if (hw->media_type == e1000_media_type_fiber) { - /* Clear SW Defineable Pin 0 to turn on the LED */ - ctrl &= ~E1000_CTRL_SWDPIN0; - ctrl |= E1000_CTRL_SWDPIO0; - } else if (hw->media_type == e1000_media_type_copper) { - ew32(LEDCTL, hw->ledctl_mode2); - return E1000_SUCCESS; - } - break; - } - - ew32(CTRL, ctrl); - - return E1000_SUCCESS; -} - -/** - * e1000_led_off - Turns off the software controllable LED - * @hw: Struct containing variables accessed by shared code - */ -s32 e1000_led_off(struct e1000_hw *hw) -{ - u32 ctrl = er32(CTRL); - - e_dbg("e1000_led_off"); - - switch (hw->mac_type) { - case e1000_82542_rev2_0: - case e1000_82542_rev2_1: - case e1000_82543: - /* Clear SW Defineable Pin 0 to turn off the LED */ - ctrl &= ~E1000_CTRL_SWDPIN0; - ctrl |= E1000_CTRL_SWDPIO0; - break; - case e1000_82544: - if (hw->media_type == e1000_media_type_fiber) { - /* Clear SW Defineable Pin 0 to turn off the LED */ - ctrl &= ~E1000_CTRL_SWDPIN0; - ctrl |= E1000_CTRL_SWDPIO0; - } else { - /* Set SW Defineable Pin 0 to turn off the LED */ - ctrl |= E1000_CTRL_SWDPIN0; - ctrl |= E1000_CTRL_SWDPIO0; - } - break; - default: - if (hw->media_type == e1000_media_type_fiber) { - /* Set SW Defineable Pin 0 to turn off the LED */ - ctrl |= E1000_CTRL_SWDPIN0; - ctrl |= E1000_CTRL_SWDPIO0; - } else if (hw->media_type == e1000_media_type_copper) { - ew32(LEDCTL, hw->ledctl_mode1); - return E1000_SUCCESS; - } - break; - } - - ew32(CTRL, ctrl); - - return E1000_SUCCESS; -} - -/** - * e1000_clear_hw_cntrs - Clears all hardware statistics counters. - * @hw: Struct containing variables accessed by shared code - */ -static void e1000_clear_hw_cntrs(struct e1000_hw *hw) -{ - volatile u32 temp; - - temp = er32(CRCERRS); - temp = er32(SYMERRS); - temp = er32(MPC); - temp = er32(SCC); - temp = er32(ECOL); - temp = er32(MCC); - temp = er32(LATECOL); - temp = er32(COLC); - temp = er32(DC); - temp = er32(SEC); - temp = er32(RLEC); - temp = er32(XONRXC); - temp = er32(XONTXC); - temp = er32(XOFFRXC); - temp = er32(XOFFTXC); - temp = er32(FCRUC); - - temp = er32(PRC64); - temp = er32(PRC127); - temp = er32(PRC255); - temp = er32(PRC511); - temp = er32(PRC1023); - temp = er32(PRC1522); - - temp = er32(GPRC); - temp = er32(BPRC); - temp = er32(MPRC); - temp = er32(GPTC); - temp = er32(GORCL); - temp = er32(GORCH); - temp = er32(GOTCL); - temp = er32(GOTCH); - temp = er32(RNBC); - temp = er32(RUC); - temp = er32(RFC); - temp = er32(ROC); - temp = er32(RJC); - temp = er32(TORL); - temp = er32(TORH); - temp = er32(TOTL); - temp = er32(TOTH); - temp = er32(TPR); - temp = er32(TPT); - - temp = er32(PTC64); - temp = er32(PTC127); - temp = er32(PTC255); - temp = er32(PTC511); - temp = er32(PTC1023); - temp = er32(PTC1522); - - temp = er32(MPTC); - temp = er32(BPTC); - - if (hw->mac_type < e1000_82543) - return; - - temp = er32(ALGNERRC); - temp = er32(RXERRC); - temp = er32(TNCRS); - temp = er32(CEXTERR); - temp = er32(TSCTC); - temp = er32(TSCTFC); - - if (hw->mac_type <= e1000_82544) - return; - - temp = er32(MGTPRC); - temp = er32(MGTPDC); - temp = er32(MGTPTC); -} - -/** - * e1000_reset_adaptive - Resets Adaptive IFS to its default state. - * @hw: Struct containing variables accessed by shared code - * - * Call this after e1000_init_hw. You may override the IFS defaults by setting - * hw->ifs_params_forced to true. However, you must initialize hw-> - * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio - * before calling this function. - */ -void e1000_reset_adaptive(struct e1000_hw *hw) -{ - e_dbg("e1000_reset_adaptive"); - - if (hw->adaptive_ifs) { - if (!hw->ifs_params_forced) { - hw->current_ifs_val = 0; - hw->ifs_min_val = IFS_MIN; - hw->ifs_max_val = IFS_MAX; - hw->ifs_step_size = IFS_STEP; - hw->ifs_ratio = IFS_RATIO; - } - hw->in_ifs_mode = false; - ew32(AIT, 0); - } else { - e_dbg("Not in Adaptive IFS mode!\n"); - } -} - -/** - * e1000_update_adaptive - update adaptive IFS - * @hw: Struct containing variables accessed by shared code - * @tx_packets: Number of transmits since last callback - * @total_collisions: Number of collisions since last callback - * - * Called during the callback/watchdog routine to update IFS value based on - * the ratio of transmits to collisions. - */ -void e1000_update_adaptive(struct e1000_hw *hw) -{ - e_dbg("e1000_update_adaptive"); - - if (hw->adaptive_ifs) { - if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) { - if (hw->tx_packet_delta > MIN_NUM_XMITS) { - hw->in_ifs_mode = true; - if (hw->current_ifs_val < hw->ifs_max_val) { - if (hw->current_ifs_val == 0) - hw->current_ifs_val = - hw->ifs_min_val; - else - hw->current_ifs_val += - hw->ifs_step_size; - ew32(AIT, hw->current_ifs_val); - } - } - } else { - if (hw->in_ifs_mode - && (hw->tx_packet_delta <= MIN_NUM_XMITS)) { - hw->current_ifs_val = 0; - hw->in_ifs_mode = false; - ew32(AIT, 0); - } - } - } else { - e_dbg("Not in Adaptive IFS mode!\n"); - } -} - -/** - * e1000_tbi_adjust_stats - * @hw: Struct containing variables accessed by shared code - * @frame_len: The length of the frame in question - * @mac_addr: The Ethernet destination address of the frame in question - * - * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT - */ -void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, - u32 frame_len, u8 *mac_addr) -{ - u64 carry_bit; - - /* First adjust the frame length. */ - frame_len--; - /* We need to adjust the statistics counters, since the hardware - * counters overcount this packet as a CRC error and undercount - * the packet as a good packet - */ - /* This packet should not be counted as a CRC error. */ - stats->crcerrs--; - /* This packet does count as a Good Packet Received. */ - stats->gprc++; - - /* Adjust the Good Octets received counters */ - carry_bit = 0x80000000 & stats->gorcl; - stats->gorcl += frame_len; - /* If the high bit of Gorcl (the low 32 bits of the Good Octets - * Received Count) was one before the addition, - * AND it is zero after, then we lost the carry out, - * need to add one to Gorch (Good Octets Received Count High). - * This could be simplified if all environments supported - * 64-bit integers. - */ - if (carry_bit && ((stats->gorcl & 0x80000000) == 0)) - stats->gorch++; - /* Is this a broadcast or multicast? Check broadcast first, - * since the test for a multicast frame will test positive on - * a broadcast frame. - */ - if ((mac_addr[0] == (u8) 0xff) && (mac_addr[1] == (u8) 0xff)) - /* Broadcast packet */ - stats->bprc++; - else if (*mac_addr & 0x01) - /* Multicast packet */ - stats->mprc++; - - if (frame_len == hw->max_frame_size) { - /* In this case, the hardware has overcounted the number of - * oversize frames. - */ - if (stats->roc > 0) - stats->roc--; - } - - /* Adjust the bin counters when the extra byte put the frame in the - * wrong bin. Remember that the frame_len was adjusted above. - */ - if (frame_len == 64) { - stats->prc64++; - stats->prc127--; - } else if (frame_len == 127) { - stats->prc127++; - stats->prc255--; - } else if (frame_len == 255) { - stats->prc255++; - stats->prc511--; - } else if (frame_len == 511) { - stats->prc511++; - stats->prc1023--; - } else if (frame_len == 1023) { - stats->prc1023++; - stats->prc1522--; - } else if (frame_len == 1522) { - stats->prc1522++; - } -} - -/** - * e1000_get_bus_info - * @hw: Struct containing variables accessed by shared code - * - * Gets the current PCI bus type, speed, and width of the hardware - */ -void e1000_get_bus_info(struct e1000_hw *hw) -{ - u32 status; - - switch (hw->mac_type) { - case e1000_82542_rev2_0: - case e1000_82542_rev2_1: - hw->bus_type = e1000_bus_type_pci; - hw->bus_speed = e1000_bus_speed_unknown; - hw->bus_width = e1000_bus_width_unknown; - break; - default: - status = er32(STATUS); - hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ? - e1000_bus_type_pcix : e1000_bus_type_pci; - - if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) { - hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ? - e1000_bus_speed_66 : e1000_bus_speed_120; - } else if (hw->bus_type == e1000_bus_type_pci) { - hw->bus_speed = (status & E1000_STATUS_PCI66) ? - e1000_bus_speed_66 : e1000_bus_speed_33; - } else { - switch (status & E1000_STATUS_PCIX_SPEED) { - case E1000_STATUS_PCIX_SPEED_66: - hw->bus_speed = e1000_bus_speed_66; - break; - case E1000_STATUS_PCIX_SPEED_100: - hw->bus_speed = e1000_bus_speed_100; - break; - case E1000_STATUS_PCIX_SPEED_133: - hw->bus_speed = e1000_bus_speed_133; - break; - default: - hw->bus_speed = e1000_bus_speed_reserved; - break; - } - } - hw->bus_width = (status & E1000_STATUS_BUS64) ? - e1000_bus_width_64 : e1000_bus_width_32; - break; - } -} - -/** - * e1000_write_reg_io - * @hw: Struct containing variables accessed by shared code - * @offset: offset to write to - * @value: value to write - * - * Writes a value to one of the devices registers using port I/O (as opposed to - * memory mapped I/O). Only 82544 and newer devices support port I/O. - */ -static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value) -{ - unsigned long io_addr = hw->io_base; - unsigned long io_data = hw->io_base + 4; - - e1000_io_write(hw, io_addr, offset); - e1000_io_write(hw, io_data, value); -} - -/** - * e1000_get_cable_length - Estimates the cable length. - * @hw: Struct containing variables accessed by shared code - * @min_length: The estimated minimum length - * @max_length: The estimated maximum length - * - * returns: - E1000_ERR_XXX - * E1000_SUCCESS - * - * This function always returns a ranged length (minimum & maximum). - * So for M88 phy's, this function interprets the one value returned from the - * register to the minimum and maximum range. - * For IGP phy's, the function calculates the range by the AGC registers. - */ -static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, - u16 *max_length) -{ - s32 ret_val; - u16 agc_value = 0; - u16 i, phy_data; - u16 cable_length; - - e_dbg("e1000_get_cable_length"); - - *min_length = *max_length = 0; - - /* Use old method for Phy older than IGP */ - if (hw->phy_type == e1000_phy_m88) { - - ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, - &phy_data); - if (ret_val) - return ret_val; - cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> - M88E1000_PSSR_CABLE_LENGTH_SHIFT; - - /* Convert the enum value to ranged values */ - switch (cable_length) { - case e1000_cable_length_50: - *min_length = 0; - *max_length = e1000_igp_cable_length_50; - break; - case e1000_cable_length_50_80: - *min_length = e1000_igp_cable_length_50; - *max_length = e1000_igp_cable_length_80; - break; - case e1000_cable_length_80_110: - *min_length = e1000_igp_cable_length_80; - *max_length = e1000_igp_cable_length_110; - break; - case e1000_cable_length_110_140: - *min_length = e1000_igp_cable_length_110; - *max_length = e1000_igp_cable_length_140; - break; - case e1000_cable_length_140: - *min_length = e1000_igp_cable_length_140; - *max_length = e1000_igp_cable_length_170; - break; - default: - return -E1000_ERR_PHY; - break; - } - } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ - u16 cur_agc_value; - u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; - static const u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { - IGP01E1000_PHY_AGC_A, - IGP01E1000_PHY_AGC_B, - IGP01E1000_PHY_AGC_C, - IGP01E1000_PHY_AGC_D - }; - /* Read the AGC registers for all channels */ - for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { - - ret_val = - e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data); - if (ret_val) - return ret_val; - - cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT; - - /* Value bound check. */ - if ((cur_agc_value >= - IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) - || (cur_agc_value == 0)) - return -E1000_ERR_PHY; - - agc_value += cur_agc_value; - - /* Update minimal AGC value. */ - if (min_agc_value > cur_agc_value) - min_agc_value = cur_agc_value; - } - - /* Remove the minimal AGC result for length < 50m */ - if (agc_value < - IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) { - agc_value -= min_agc_value; - - /* Get the average length of the remaining 3 channels */ - agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1); - } else { - /* Get the average length of all the 4 channels. */ - agc_value /= IGP01E1000_PHY_CHANNEL_NUM; - } - - /* Set the range of the calculated length. */ - *min_length = ((e1000_igp_cable_length_table[agc_value] - - IGP01E1000_AGC_RANGE) > 0) ? - (e1000_igp_cable_length_table[agc_value] - - IGP01E1000_AGC_RANGE) : 0; - *max_length = e1000_igp_cable_length_table[agc_value] + - IGP01E1000_AGC_RANGE; - } - - return E1000_SUCCESS; -} - -/** - * e1000_check_polarity - Check the cable polarity - * @hw: Struct containing variables accessed by shared code - * @polarity: output parameter : 0 - Polarity is not reversed - * 1 - Polarity is reversed. - * - * returns: - E1000_ERR_XXX - * E1000_SUCCESS - * - * For phy's older than IGP, this function simply reads the polarity bit in the - * Phy Status register. For IGP phy's, this bit is valid only if link speed is - * 10 Mbps. If the link speed is 100 Mbps there is no polarity so this bit will - * return 0. If the link speed is 1000 Mbps the polarity status is in the - * IGP01E1000_PHY_PCS_INIT_REG. - */ -static s32 e1000_check_polarity(struct e1000_hw *hw, - e1000_rev_polarity *polarity) -{ - s32 ret_val; - u16 phy_data; - - e_dbg("e1000_check_polarity"); - - if (hw->phy_type == e1000_phy_m88) { - /* return the Polarity bit in the Status register. */ - ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, - &phy_data); - if (ret_val) - return ret_val; - *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >> - M88E1000_PSSR_REV_POLARITY_SHIFT) ? - e1000_rev_polarity_reversed : e1000_rev_polarity_normal; - - } else if (hw->phy_type == e1000_phy_igp) { - /* Read the Status register to check the speed */ - ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, - &phy_data); - if (ret_val) - return ret_val; - - /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to - * find the polarity status */ - if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == - IGP01E1000_PSSR_SPEED_1000MBPS) { - - /* Read the GIG initialization PCS register (0x00B4) */ - ret_val = - e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG, - &phy_data); - if (ret_val) - return ret_val; - - /* Check the polarity bits */ - *polarity = (phy_data & IGP01E1000_PHY_POLARITY_MASK) ? - e1000_rev_polarity_reversed : - e1000_rev_polarity_normal; - } else { - /* For 10 Mbps, read the polarity bit in the status register. (for - * 100 Mbps this bit is always 0) */ - *polarity = - (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ? - e1000_rev_polarity_reversed : - e1000_rev_polarity_normal; - } - } - return E1000_SUCCESS; -} - -/** - * e1000_check_downshift - Check if Downshift occurred - * @hw: Struct containing variables accessed by shared code - * @downshift: output parameter : 0 - No Downshift occurred. - * 1 - Downshift occurred. - * - * returns: - E1000_ERR_XXX - * E1000_SUCCESS - * - * For phy's older than IGP, this function reads the Downshift bit in the Phy - * Specific Status register. For IGP phy's, it reads the Downgrade bit in the - * Link Health register. In IGP this bit is latched high, so the driver must - * read it immediately after link is established. - */ -static s32 e1000_check_downshift(struct e1000_hw *hw) -{ - s32 ret_val; - u16 phy_data; - - e_dbg("e1000_check_downshift"); - - if (hw->phy_type == e1000_phy_igp) { - ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, - &phy_data); - if (ret_val) - return ret_val; - - hw->speed_downgraded = - (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0; - } else if (hw->phy_type == e1000_phy_m88) { - ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, - &phy_data); - if (ret_val) - return ret_val; - - hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >> - M88E1000_PSSR_DOWNSHIFT_SHIFT; - } - - return E1000_SUCCESS; -} - -/** - * e1000_config_dsp_after_link_change - * @hw: Struct containing variables accessed by shared code - * @link_up: was link up at the time this was called - * - * returns: - E1000_ERR_PHY if fail to read/write the PHY - * E1000_SUCCESS at any other case. - * - * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a - * gigabit link is achieved to improve link quality. - */ - -static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) -{ - s32 ret_val; - u16 phy_data, phy_saved_data, speed, duplex, i; - static const u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { - IGP01E1000_PHY_AGC_PARAM_A, - IGP01E1000_PHY_AGC_PARAM_B, - IGP01E1000_PHY_AGC_PARAM_C, - IGP01E1000_PHY_AGC_PARAM_D - }; - u16 min_length, max_length; - - e_dbg("e1000_config_dsp_after_link_change"); - - if (hw->phy_type != e1000_phy_igp) - return E1000_SUCCESS; - - if (link_up) { - ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); - if (ret_val) { - e_dbg("Error getting link speed and duplex\n"); - return ret_val; - } - - if (speed == SPEED_1000) { - - ret_val = - e1000_get_cable_length(hw, &min_length, - &max_length); - if (ret_val) - return ret_val; - - if ((hw->dsp_config_state == e1000_dsp_config_enabled) - && min_length >= e1000_igp_cable_length_50) { - - for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { - ret_val = - e1000_read_phy_reg(hw, - dsp_reg_array[i], - &phy_data); - if (ret_val) - return ret_val; - - phy_data &= - ~IGP01E1000_PHY_EDAC_MU_INDEX; - - ret_val = - e1000_write_phy_reg(hw, - dsp_reg_array - [i], phy_data); - if (ret_val) - return ret_val; - } - hw->dsp_config_state = - e1000_dsp_config_activated; - } - - if ((hw->ffe_config_state == e1000_ffe_config_enabled) - && (min_length < e1000_igp_cable_length_50)) { - - u16 ffe_idle_err_timeout = - FFE_IDLE_ERR_COUNT_TIMEOUT_20; - u32 idle_errs = 0; - - /* clear previous idle error counts */ - ret_val = - e1000_read_phy_reg(hw, PHY_1000T_STATUS, - &phy_data); - if (ret_val) - return ret_val; - - for (i = 0; i < ffe_idle_err_timeout; i++) { - udelay(1000); - ret_val = - e1000_read_phy_reg(hw, - PHY_1000T_STATUS, - &phy_data); - if (ret_val) - return ret_val; - - idle_errs += - (phy_data & - SR_1000T_IDLE_ERROR_CNT); - if (idle_errs > - SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) - { - hw->ffe_config_state = - e1000_ffe_config_active; - - ret_val = - e1000_write_phy_reg(hw, - IGP01E1000_PHY_DSP_FFE, - IGP01E1000_PHY_DSP_FFE_CM_CP); - if (ret_val) - return ret_val; - break; - } - - if (idle_errs) - ffe_idle_err_timeout = - FFE_IDLE_ERR_COUNT_TIMEOUT_100; - } - } - } - } else { - if (hw->dsp_config_state == e1000_dsp_config_activated) { - /* Save off the current value of register 0x2F5B to be restored at - * the end of the routines. */ - ret_val = - e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); - - if (ret_val) - return ret_val; - - /* Disable the PHY transmitter */ - ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); - - if (ret_val) - return ret_val; - - mdelay(20); - - ret_val = e1000_write_phy_reg(hw, 0x0000, - IGP01E1000_IEEE_FORCE_GIGA); - if (ret_val) - return ret_val; - for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { - ret_val = - e1000_read_phy_reg(hw, dsp_reg_array[i], - &phy_data); - if (ret_val) - return ret_val; - - phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; - phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS; - - ret_val = - e1000_write_phy_reg(hw, dsp_reg_array[i], - phy_data); - if (ret_val) - return ret_val; - } - - ret_val = e1000_write_phy_reg(hw, 0x0000, - IGP01E1000_IEEE_RESTART_AUTONEG); - if (ret_val) - return ret_val; - - mdelay(20); - - /* Now enable the transmitter */ - ret_val = - e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); - - if (ret_val) - return ret_val; - - hw->dsp_config_state = e1000_dsp_config_enabled; - } - - if (hw->ffe_config_state == e1000_ffe_config_active) { - /* Save off the current value of register 0x2F5B to be restored at - * the end of the routines. */ - ret_val = - e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); - - if (ret_val) - return ret_val; - - /* Disable the PHY transmitter */ - ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); - - if (ret_val) - return ret_val; - - mdelay(20); - - ret_val = e1000_write_phy_reg(hw, 0x0000, - IGP01E1000_IEEE_FORCE_GIGA); - if (ret_val) - return ret_val; - ret_val = - e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE, - IGP01E1000_PHY_DSP_FFE_DEFAULT); - if (ret_val) - return ret_val; - - ret_val = e1000_write_phy_reg(hw, 0x0000, - IGP01E1000_IEEE_RESTART_AUTONEG); - if (ret_val) - return ret_val; - - mdelay(20); - - /* Now enable the transmitter */ - ret_val = - e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); - - if (ret_val) - return ret_val; - - hw->ffe_config_state = e1000_ffe_config_enabled; - } - } - return E1000_SUCCESS; -} - -/** - * e1000_set_phy_mode - Set PHY to class A mode - * @hw: Struct containing variables accessed by shared code - * - * Assumes the following operations will follow to enable the new class mode. - * 1. Do a PHY soft reset - * 2. Restart auto-negotiation or force link. - */ -static s32 e1000_set_phy_mode(struct e1000_hw *hw) -{ - s32 ret_val; - u16 eeprom_data; - - e_dbg("e1000_set_phy_mode"); - - if ((hw->mac_type == e1000_82545_rev_3) && - (hw->media_type == e1000_media_type_copper)) { - ret_val = - e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, - &eeprom_data); - if (ret_val) { - return ret_val; - } - - if ((eeprom_data != EEPROM_RESERVED_WORD) && - (eeprom_data & EEPROM_PHY_CLASS_A)) { - ret_val = - e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, - 0x000B); - if (ret_val) - return ret_val; - ret_val = - e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, - 0x8104); - if (ret_val) - return ret_val; - - hw->phy_reset_disable = false; - } - } - - return E1000_SUCCESS; -} - -/** - * e1000_set_d3_lplu_state - set d3 link power state - * @hw: Struct containing variables accessed by shared code - * @active: true to enable lplu false to disable lplu. - * - * This function sets the lplu state according to the active flag. When - * activating lplu this function also disables smart speed and vise versa. - * lplu will not be activated unless the device autonegotiation advertisement - * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes. - * - * returns: - E1000_ERR_PHY if fail to read/write the PHY - * E1000_SUCCESS at any other case. - */ -static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) -{ - s32 ret_val; - u16 phy_data; - e_dbg("e1000_set_d3_lplu_state"); - - if (hw->phy_type != e1000_phy_igp) - return E1000_SUCCESS; - - /* During driver activity LPLU should not be used or it will attain link - * from the lowest speeds starting from 10Mbps. The capability is used for - * Dx transitions and states */ - if (hw->mac_type == e1000_82541_rev_2 - || hw->mac_type == e1000_82547_rev_2) { - ret_val = - e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data); - if (ret_val) - return ret_val; - } - - if (!active) { - if (hw->mac_type == e1000_82541_rev_2 || - hw->mac_type == e1000_82547_rev_2) { - phy_data &= ~IGP01E1000_GMII_FLEX_SPD; - ret_val = - e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, - phy_data); - if (ret_val) - return ret_val; - } - - /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during - * Dx states where the power conservation is most important. During - * driver activity we should enable SmartSpeed, so performance is - * maintained. */ - if (hw->smart_speed == e1000_smart_speed_on) { - ret_val = - e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, - &phy_data); - if (ret_val) - return ret_val; - - phy_data |= IGP01E1000_PSCFR_SMART_SPEED; - ret_val = - e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, - phy_data); - if (ret_val) - return ret_val; - } else if (hw->smart_speed == e1000_smart_speed_off) { - ret_val = - e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, - &phy_data); - if (ret_val) - return ret_val; - - phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; - ret_val = - e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, - phy_data); - if (ret_val) - return ret_val; - } - } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) - || (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL) - || (hw->autoneg_advertised == - AUTONEG_ADVERTISE_10_100_ALL)) { - - if (hw->mac_type == e1000_82541_rev_2 || - hw->mac_type == e1000_82547_rev_2) { - phy_data |= IGP01E1000_GMII_FLEX_SPD; - ret_val = - e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, - phy_data); - if (ret_val) - return ret_val; - } - - /* When LPLU is enabled we should disable SmartSpeed */ - ret_val = - e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, - &phy_data); - if (ret_val) - return ret_val; - - phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; - ret_val = - e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, - phy_data); - if (ret_val) - return ret_val; - - } - return E1000_SUCCESS; -} - -/** - * e1000_set_vco_speed - * @hw: Struct containing variables accessed by shared code - * - * Change VCO speed register to improve Bit Error Rate performance of SERDES. - */ -static s32 e1000_set_vco_speed(struct e1000_hw *hw) -{ - s32 ret_val; - u16 default_page = 0; - u16 phy_data; - - e_dbg("e1000_set_vco_speed"); - - switch (hw->mac_type) { - case e1000_82545_rev_3: - case e1000_82546_rev_3: - break; - default: - return E1000_SUCCESS; - } - - /* Set PHY register 30, page 5, bit 8 to 0 */ - - ret_val = - e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page); - if (ret_val) - return ret_val; - - ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005); - if (ret_val) - return ret_val; - - ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); - if (ret_val) - return ret_val; - - phy_data &= ~M88E1000_PHY_VCO_REG_BIT8; - ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); - if (ret_val) - return ret_val; - - /* Set PHY register 30, page 4, bit 11 to 1 */ - - ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004); - if (ret_val) - return ret_val; - - ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); - if (ret_val) - return ret_val; - - phy_data |= M88E1000_PHY_VCO_REG_BIT11; - ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); - if (ret_val) - return ret_val; - - ret_val = - e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page); - if (ret_val) - return ret_val; - - return E1000_SUCCESS; -} - - -/** - * e1000_enable_mng_pass_thru - check for bmc pass through - * @hw: Struct containing variables accessed by shared code - * - * Verifies the hardware needs to allow ARPs to be processed by the host - * returns: - true/false - */ -u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw) -{ - u32 manc; - - if (hw->asf_firmware_present) { - manc = er32(MANC); - - if (!(manc & E1000_MANC_RCV_TCO_EN) || - !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) - return false; - if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN)) - return true; - } - return false; -} - -static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw) -{ - s32 ret_val; - u16 mii_status_reg; - u16 i; - - /* Polarity reversal workaround for forced 10F/10H links. */ - - /* Disable the transmitter on the PHY */ - - ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); - if (ret_val) - return ret_val; - ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF); - if (ret_val) - return ret_val; - - ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); - if (ret_val) - return ret_val; - - /* This loop will early-out if the NO link condition has been met. */ - for (i = PHY_FORCE_TIME; i > 0; i--) { - /* Read the MII Status Register and wait for Link Status bit - * to be clear. - */ - - ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); - if (ret_val) - return ret_val; - - ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); - if (ret_val) - return ret_val; - - if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) - break; - mdelay(100); - } - - /* Recommended delay time after link has been lost */ - mdelay(1000); - - /* Now we will re-enable th transmitter on the PHY */ - - ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); - if (ret_val) - return ret_val; - mdelay(50); - ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0); - if (ret_val) - return ret_val; - mdelay(50); - ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00); - if (ret_val) - return ret_val; - mdelay(50); - ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000); - if (ret_val) - return ret_val; - - ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); - if (ret_val) - return ret_val; - - /* This loop will early-out if the link condition has been met. */ - for (i = PHY_FORCE_TIME; i > 0; i--) { - /* Read the MII Status Register and wait for Link Status bit - * to be set. - */ - - ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); - if (ret_val) - return ret_val; - - ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); - if (ret_val) - return ret_val; - - if (mii_status_reg & MII_SR_LINK_STATUS) - break; - mdelay(100); - } - return E1000_SUCCESS; -} - -/** - * e1000_get_auto_rd_done - * @hw: Struct containing variables accessed by shared code - * - * Check for EEPROM Auto Read bit done. - * returns: - E1000_ERR_RESET if fail to reset MAC - * E1000_SUCCESS at any other case. - */ -static s32 e1000_get_auto_rd_done(struct e1000_hw *hw) -{ - e_dbg("e1000_get_auto_rd_done"); - msleep(5); - return E1000_SUCCESS; -} - -/** - * e1000_get_phy_cfg_done - * @hw: Struct containing variables accessed by shared code - * - * Checks if the PHY configuration is done - * returns: - E1000_ERR_RESET if fail to reset MAC - * E1000_SUCCESS at any other case. - */ -static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) -{ - e_dbg("e1000_get_phy_cfg_done"); - mdelay(10); - return E1000_SUCCESS; -} diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h deleted file mode 100644 index 5c9a840..0000000 --- a/drivers/net/e1000/e1000_hw.h +++ /dev/null @@ -1,3103 +0,0 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -/* e1000_hw.h - * Structures, enums, and macros for the MAC - */ - -#ifndef _E1000_HW_H_ -#define _E1000_HW_H_ - -#include "e1000_osdep.h" - - -/* Forward declarations of structures used by the shared code */ -struct e1000_hw; -struct e1000_hw_stats; - -/* Enumerated types specific to the e1000 hardware */ -/* Media Access Controllers */ -typedef enum { - e1000_undefined = 0, - e1000_82542_rev2_0, - e1000_82542_rev2_1, - e1000_82543, - e1000_82544, - e1000_82540, - e1000_82545, - e1000_82545_rev_3, - e1000_82546, - e1000_ce4100, - e1000_82546_rev_3, - e1000_82541, - e1000_82541_rev_2, - e1000_82547, - e1000_82547_rev_2, - e1000_num_macs -} e1000_mac_type; - -typedef enum { - e1000_eeprom_uninitialized = 0, - e1000_eeprom_spi, - e1000_eeprom_microwire, - e1000_eeprom_flash, - e1000_eeprom_none, /* No NVM support */ - e1000_num_eeprom_types -} e1000_eeprom_type; - -/* Media Types */ -typedef enum { - e1000_media_type_copper = 0, - e1000_media_type_fiber = 1, - e1000_media_type_internal_serdes = 2, - e1000_num_media_types -} e1000_media_type; - -typedef enum { - e1000_10_half = 0, - e1000_10_full = 1, - e1000_100_half = 2, - e1000_100_full = 3 -} e1000_speed_duplex_type; - -/* Flow Control Settings */ -typedef enum { - E1000_FC_NONE = 0, - E1000_FC_RX_PAUSE = 1, - E1000_FC_TX_PAUSE = 2, - E1000_FC_FULL = 3, - E1000_FC_DEFAULT = 0xFF -} e1000_fc_type; - -struct e1000_shadow_ram { - u16 eeprom_word; - bool modified; -}; - -/* PCI bus types */ -typedef enum { - e1000_bus_type_unknown = 0, - e1000_bus_type_pci, - e1000_bus_type_pcix, - e1000_bus_type_reserved -} e1000_bus_type; - -/* PCI bus speeds */ -typedef enum { - e1000_bus_speed_unknown = 0, - e1000_bus_speed_33, - e1000_bus_speed_66, - e1000_bus_speed_100, - e1000_bus_speed_120, - e1000_bus_speed_133, - e1000_bus_speed_reserved -} e1000_bus_speed; - -/* PCI bus widths */ -typedef enum { - e1000_bus_width_unknown = 0, - e1000_bus_width_32, - e1000_bus_width_64, - e1000_bus_width_reserved -} e1000_bus_width; - -/* PHY status info structure and supporting enums */ -typedef enum { - e1000_cable_length_50 = 0, - e1000_cable_length_50_80, - e1000_cable_length_80_110, - e1000_cable_length_110_140, - e1000_cable_length_140, - e1000_cable_length_undefined = 0xFF -} e1000_cable_length; - -typedef enum { - e1000_gg_cable_length_60 = 0, - e1000_gg_cable_length_60_115 = 1, - e1000_gg_cable_length_115_150 = 2, - e1000_gg_cable_length_150 = 4 -} e1000_gg_cable_length; - -typedef enum { - e1000_igp_cable_length_10 = 10, - e1000_igp_cable_length_20 = 20, - e1000_igp_cable_length_30 = 30, - e1000_igp_cable_length_40 = 40, - e1000_igp_cable_length_50 = 50, - e1000_igp_cable_length_60 = 60, - e1000_igp_cable_length_70 = 70, - e1000_igp_cable_length_80 = 80, - e1000_igp_cable_length_90 = 90, - e1000_igp_cable_length_100 = 100, - e1000_igp_cable_length_110 = 110, - e1000_igp_cable_length_115 = 115, - e1000_igp_cable_length_120 = 120, - e1000_igp_cable_length_130 = 130, - e1000_igp_cable_length_140 = 140, - e1000_igp_cable_length_150 = 150, - e1000_igp_cable_length_160 = 160, - e1000_igp_cable_length_170 = 170, - e1000_igp_cable_length_180 = 180 -} e1000_igp_cable_length; - -typedef enum { - e1000_10bt_ext_dist_enable_normal = 0, - e1000_10bt_ext_dist_enable_lower, - e1000_10bt_ext_dist_enable_undefined = 0xFF -} e1000_10bt_ext_dist_enable; - -typedef enum { - e1000_rev_polarity_normal = 0, - e1000_rev_polarity_reversed, - e1000_rev_polarity_undefined = 0xFF -} e1000_rev_polarity; - -typedef enum { - e1000_downshift_normal = 0, - e1000_downshift_activated, - e1000_downshift_undefined = 0xFF -} e1000_downshift; - -typedef enum { - e1000_smart_speed_default = 0, - e1000_smart_speed_on, - e1000_smart_speed_off -} e1000_smart_speed; - -typedef enum { - e1000_polarity_reversal_enabled = 0, - e1000_polarity_reversal_disabled, - e1000_polarity_reversal_undefined = 0xFF -} e1000_polarity_reversal; - -typedef enum { - e1000_auto_x_mode_manual_mdi = 0, - e1000_auto_x_mode_manual_mdix, - e1000_auto_x_mode_auto1, - e1000_auto_x_mode_auto2, - e1000_auto_x_mode_undefined = 0xFF -} e1000_auto_x_mode; - -typedef enum { - e1000_1000t_rx_status_not_ok = 0, - e1000_1000t_rx_status_ok, - e1000_1000t_rx_status_undefined = 0xFF -} e1000_1000t_rx_status; - -typedef enum { - e1000_phy_m88 = 0, - e1000_phy_igp, - e1000_phy_8211, - e1000_phy_8201, - e1000_phy_undefined = 0xFF -} e1000_phy_type; - -typedef enum { - e1000_ms_hw_default = 0, - e1000_ms_force_master, - e1000_ms_force_slave, - e1000_ms_auto -} e1000_ms_type; - -typedef enum { - e1000_ffe_config_enabled = 0, - e1000_ffe_config_active, - e1000_ffe_config_blocked -} e1000_ffe_config; - -typedef enum { - e1000_dsp_config_disabled = 0, - e1000_dsp_config_enabled, - e1000_dsp_config_activated, - e1000_dsp_config_undefined = 0xFF -} e1000_dsp_config; - -struct e1000_phy_info { - e1000_cable_length cable_length; - e1000_10bt_ext_dist_enable extended_10bt_distance; - e1000_rev_polarity cable_polarity; - e1000_downshift downshift; - e1000_polarity_reversal polarity_correction; - e1000_auto_x_mode mdix_mode; - e1000_1000t_rx_status local_rx; - e1000_1000t_rx_status remote_rx; -}; - -struct e1000_phy_stats { - u32 idle_errors; - u32 receive_errors; -}; - -struct e1000_eeprom_info { - e1000_eeprom_type type; - u16 word_size; - u16 opcode_bits; - u16 address_bits; - u16 delay_usec; - u16 page_size; -}; - -/* Flex ASF Information */ -#define E1000_HOST_IF_MAX_SIZE 2048 - -typedef enum { - e1000_byte_align = 0, - e1000_word_align = 1, - e1000_dword_align = 2 -} e1000_align_type; - -/* Error Codes */ -#define E1000_SUCCESS 0 -#define E1000_ERR_EEPROM 1 -#define E1000_ERR_PHY 2 -#define E1000_ERR_CONFIG 3 -#define E1000_ERR_PARAM 4 -#define E1000_ERR_MAC_TYPE 5 -#define E1000_ERR_PHY_TYPE 6 -#define E1000_ERR_RESET 9 -#define E1000_ERR_MASTER_REQUESTS_PENDING 10 -#define E1000_ERR_HOST_INTERFACE_COMMAND 11 -#define E1000_BLK_PHY_RESET 12 - -#define E1000_BYTE_SWAP_WORD(_value) ((((_value) & 0x00ff) << 8) | \ - (((_value) & 0xff00) >> 8)) - -/* Function prototypes */ -/* Initialization */ -s32 e1000_reset_hw(struct e1000_hw *hw); -s32 e1000_init_hw(struct e1000_hw *hw); -s32 e1000_set_mac_type(struct e1000_hw *hw); -void e1000_set_media_type(struct e1000_hw *hw); - -/* Link Configuration */ -s32 e1000_setup_link(struct e1000_hw *hw); -s32 e1000_phy_setup_autoneg(struct e1000_hw *hw); -void e1000_config_collision_dist(struct e1000_hw *hw); -s32 e1000_check_for_link(struct e1000_hw *hw); -s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 * speed, u16 * duplex); -s32 e1000_force_mac_fc(struct e1000_hw *hw); - -/* PHY */ -s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 * phy_data); -s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 data); -s32 e1000_phy_hw_reset(struct e1000_hw *hw); -s32 e1000_phy_reset(struct e1000_hw *hw); -s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); -s32 e1000_validate_mdi_setting(struct e1000_hw *hw); - -/* EEPROM Functions */ -s32 e1000_init_eeprom_params(struct e1000_hw *hw); - -/* MNG HOST IF functions */ -u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw); - -#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 -#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */ - -#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */ -#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */ -#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */ -#define E1000_MNG_IAMT_MODE 0x3 -#define E1000_MNG_ICH_IAMT_MODE 0x2 -#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */ - -#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */ -#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT 0x2 /* DHCP parsing enabled */ -#define E1000_VFTA_ENTRY_SHIFT 0x5 -#define E1000_VFTA_ENTRY_MASK 0x7F -#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F - -struct e1000_host_mng_command_header { - u8 command_id; - u8 checksum; - u16 reserved1; - u16 reserved2; - u16 command_length; -}; - -struct e1000_host_mng_command_info { - struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ - u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658 */ -}; -#ifdef __BIG_ENDIAN -struct e1000_host_mng_dhcp_cookie { - u32 signature; - u16 vlan_id; - u8 reserved0; - u8 status; - u32 reserved1; - u8 checksum; - u8 reserved3; - u16 reserved2; -}; -#else -struct e1000_host_mng_dhcp_cookie { - u32 signature; - u8 status; - u8 reserved0; - u16 vlan_id; - u32 reserved1; - u16 reserved2; - u8 reserved3; - u8 checksum; -}; -#endif - -bool e1000_check_mng_mode(struct e1000_hw *hw); -s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data); -s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw); -s32 e1000_update_eeprom_checksum(struct e1000_hw *hw); -s32 e1000_write_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data); -s32 e1000_read_mac_addr(struct e1000_hw *hw); - -/* Filters (multicast, vlan, receive) */ -u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr); -void e1000_mta_set(struct e1000_hw *hw, u32 hash_value); -void e1000_rar_set(struct e1000_hw *hw, u8 * mc_addr, u32 rar_index); -void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); - -/* LED functions */ -s32 e1000_setup_led(struct e1000_hw *hw); -s32 e1000_cleanup_led(struct e1000_hw *hw); -s32 e1000_led_on(struct e1000_hw *hw); -s32 e1000_led_off(struct e1000_hw *hw); -s32 e1000_blink_led_start(struct e1000_hw *hw); - -/* Adaptive IFS Functions */ - -/* Everything else */ -void e1000_reset_adaptive(struct e1000_hw *hw); -void e1000_update_adaptive(struct e1000_hw *hw); -void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, - u32 frame_len, u8 * mac_addr); -void e1000_get_bus_info(struct e1000_hw *hw); -void e1000_pci_set_mwi(struct e1000_hw *hw); -void e1000_pci_clear_mwi(struct e1000_hw *hw); -void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc); -int e1000_pcix_get_mmrbc(struct e1000_hw *hw); -/* Port I/O is only supported on 82544 and newer */ -void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value); - -#define E1000_READ_REG_IO(a, reg) \ - e1000_read_reg_io((a), E1000_##reg) -#define E1000_WRITE_REG_IO(a, reg, val) \ - e1000_write_reg_io((a), E1000_##reg, val) - -/* PCI Device IDs */ -#define E1000_DEV_ID_82542 0x1000 -#define E1000_DEV_ID_82543GC_FIBER 0x1001 -#define E1000_DEV_ID_82543GC_COPPER 0x1004 -#define E1000_DEV_ID_82544EI_COPPER 0x1008 -#define E1000_DEV_ID_82544EI_FIBER 0x1009 -#define E1000_DEV_ID_82544GC_COPPER 0x100C -#define E1000_DEV_ID_82544GC_LOM 0x100D -#define E1000_DEV_ID_82540EM 0x100E -#define E1000_DEV_ID_82540EM_LOM 0x1015 -#define E1000_DEV_ID_82540EP_LOM 0x1016 -#define E1000_DEV_ID_82540EP 0x1017 -#define E1000_DEV_ID_82540EP_LP 0x101E -#define E1000_DEV_ID_82545EM_COPPER 0x100F -#define E1000_DEV_ID_82545EM_FIBER 0x1011 -#define E1000_DEV_ID_82545GM_COPPER 0x1026 -#define E1000_DEV_ID_82545GM_FIBER 0x1027 -#define E1000_DEV_ID_82545GM_SERDES 0x1028 -#define E1000_DEV_ID_82546EB_COPPER 0x1010 -#define E1000_DEV_ID_82546EB_FIBER 0x1012 -#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D -#define E1000_DEV_ID_82541EI 0x1013 -#define E1000_DEV_ID_82541EI_MOBILE 0x1018 -#define E1000_DEV_ID_82541ER_LOM 0x1014 -#define E1000_DEV_ID_82541ER 0x1078 -#define E1000_DEV_ID_82547GI 0x1075 -#define E1000_DEV_ID_82541GI 0x1076 -#define E1000_DEV_ID_82541GI_MOBILE 0x1077 -#define E1000_DEV_ID_82541GI_LF 0x107C -#define E1000_DEV_ID_82546GB_COPPER 0x1079 -#define E1000_DEV_ID_82546GB_FIBER 0x107A -#define E1000_DEV_ID_82546GB_SERDES 0x107B -#define E1000_DEV_ID_82546GB_PCIE 0x108A -#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 -#define E1000_DEV_ID_82547EI 0x1019 -#define E1000_DEV_ID_82547EI_MOBILE 0x101A -#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 -#define E1000_DEV_ID_INTEL_CE4100_GBE 0x2E6E - -#define NODE_ADDRESS_SIZE 6 -#define ETH_LENGTH_OF_ADDRESS 6 - -/* MAC decode size is 128K - This is the size of BAR0 */ -#define MAC_DECODE_SIZE (128 * 1024) - -#define E1000_82542_2_0_REV_ID 2 -#define E1000_82542_2_1_REV_ID 3 -#define E1000_REVISION_0 0 -#define E1000_REVISION_1 1 -#define E1000_REVISION_2 2 -#define E1000_REVISION_3 3 - -#define SPEED_10 10 -#define SPEED_100 100 -#define SPEED_1000 1000 -#define HALF_DUPLEX 1 -#define FULL_DUPLEX 2 - -/* The sizes (in bytes) of a ethernet packet */ -#define ENET_HEADER_SIZE 14 -#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */ -#define ETHERNET_FCS_SIZE 4 -#define MINIMUM_ETHERNET_PACKET_SIZE \ - (MINIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE) -#define CRC_LENGTH ETHERNET_FCS_SIZE -#define MAX_JUMBO_FRAME_SIZE 0x3F00 - -/* 802.1q VLAN Packet Sizes */ -#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */ - -/* Ethertype field values */ -#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ -#define ETHERNET_IP_TYPE 0x0800 /* IP packets */ -#define ETHERNET_ARP_TYPE 0x0806 /* Address Resolution Protocol (ARP) */ - -/* Packet Header defines */ -#define IP_PROTOCOL_TCP 6 -#define IP_PROTOCOL_UDP 0x11 - -/* This defines the bits that are set in the Interrupt Mask - * Set/Read Register. Each bit is documented below: - * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) - * o RXSEQ = Receive Sequence Error - */ -#define POLL_IMS_ENABLE_MASK ( \ - E1000_IMS_RXDMT0 | \ - E1000_IMS_RXSEQ) - -/* This defines the bits that are set in the Interrupt Mask - * Set/Read Register. Each bit is documented below: - * o RXT0 = Receiver Timer Interrupt (ring 0) - * o TXDW = Transmit Descriptor Written Back - * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) - * o RXSEQ = Receive Sequence Error - * o LSC = Link Status Change - */ -#define IMS_ENABLE_MASK ( \ - E1000_IMS_RXT0 | \ - E1000_IMS_TXDW | \ - E1000_IMS_RXDMT0 | \ - E1000_IMS_RXSEQ | \ - E1000_IMS_LSC) - -/* Number of high/low register pairs in the RAR. The RAR (Receive Address - * Registers) holds the directed and multicast addresses that we monitor. We - * reserve one of these spots for our directed address, allowing us room for - * E1000_RAR_ENTRIES - 1 multicast addresses. - */ -#define E1000_RAR_ENTRIES 15 - -#define MIN_NUMBER_OF_DESCRIPTORS 8 -#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 - -/* Receive Descriptor */ -struct e1000_rx_desc { - __le64 buffer_addr; /* Address of the descriptor's data buffer */ - __le16 length; /* Length of data DMAed into data buffer */ - __le16 csum; /* Packet checksum */ - u8 status; /* Descriptor status */ - u8 errors; /* Descriptor Errors */ - __le16 special; -}; - -/* Receive Descriptor - Extended */ -union e1000_rx_desc_extended { - struct { - __le64 buffer_addr; - __le64 reserved; - } read; - struct { - struct { - __le32 mrq; /* Multiple Rx Queues */ - union { - __le32 rss; /* RSS Hash */ - struct { - __le16 ip_id; /* IP id */ - __le16 csum; /* Packet Checksum */ - } csum_ip; - } hi_dword; - } lower; - struct { - __le32 status_error; /* ext status/error */ - __le16 length; - __le16 vlan; /* VLAN tag */ - } upper; - } wb; /* writeback */ -}; - -#define MAX_PS_BUFFERS 4 -/* Receive Descriptor - Packet Split */ -union e1000_rx_desc_packet_split { - struct { - /* one buffer for protocol header(s), three data buffers */ - __le64 buffer_addr[MAX_PS_BUFFERS]; - } read; - struct { - struct { - __le32 mrq; /* Multiple Rx Queues */ - union { - __le32 rss; /* RSS Hash */ - struct { - __le16 ip_id; /* IP id */ - __le16 csum; /* Packet Checksum */ - } csum_ip; - } hi_dword; - } lower; - struct { - __le32 status_error; /* ext status/error */ - __le16 length0; /* length of buffer 0 */ - __le16 vlan; /* VLAN tag */ - } middle; - struct { - __le16 header_status; - __le16 length[3]; /* length of buffers 1-3 */ - } upper; - __le64 reserved; - } wb; /* writeback */ -}; - -/* Receive Descriptor bit definitions */ -#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ -#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ -#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ -#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ -#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ -#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ -#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ -#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ -#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ -#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ -#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ -#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ -#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ -#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ -#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ -#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ -#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ -#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ -#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ -#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ -#define E1000_RXD_SPC_PRI_SHIFT 13 -#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */ -#define E1000_RXD_SPC_CFI_SHIFT 12 - -#define E1000_RXDEXT_STATERR_CE 0x01000000 -#define E1000_RXDEXT_STATERR_SE 0x02000000 -#define E1000_RXDEXT_STATERR_SEQ 0x04000000 -#define E1000_RXDEXT_STATERR_CXE 0x10000000 -#define E1000_RXDEXT_STATERR_TCPE 0x20000000 -#define E1000_RXDEXT_STATERR_IPE 0x40000000 -#define E1000_RXDEXT_STATERR_RXE 0x80000000 - -#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 -#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF - -/* mask to determine if packets should be dropped due to frame errors */ -#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ - E1000_RXD_ERR_CE | \ - E1000_RXD_ERR_SE | \ - E1000_RXD_ERR_SEQ | \ - E1000_RXD_ERR_CXE | \ - E1000_RXD_ERR_RXE) - -/* Same mask, but for extended and packet split descriptors */ -#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ - E1000_RXDEXT_STATERR_CE | \ - E1000_RXDEXT_STATERR_SE | \ - E1000_RXDEXT_STATERR_SEQ | \ - E1000_RXDEXT_STATERR_CXE | \ - E1000_RXDEXT_STATERR_RXE) - -/* Transmit Descriptor */ -struct e1000_tx_desc { - __le64 buffer_addr; /* Address of the descriptor's data buffer */ - union { - __le32 data; - struct { - __le16 length; /* Data buffer length */ - u8 cso; /* Checksum offset */ - u8 cmd; /* Descriptor control */ - } flags; - } lower; - union { - __le32 data; - struct { - u8 status; /* Descriptor status */ - u8 css; /* Checksum start */ - __le16 special; - } fields; - } upper; -}; - -/* Transmit Descriptor bit definitions */ -#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ -#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ -#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ -#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ -#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ -#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ -#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ -#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ -#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ -#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ -#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ -#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ -#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ -#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ -#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ -#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ -#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ -#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ -#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ -#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ - -/* Offload Context Descriptor */ -struct e1000_context_desc { - union { - __le32 ip_config; - struct { - u8 ipcss; /* IP checksum start */ - u8 ipcso; /* IP checksum offset */ - __le16 ipcse; /* IP checksum end */ - } ip_fields; - } lower_setup; - union { - __le32 tcp_config; - struct { - u8 tucss; /* TCP checksum start */ - u8 tucso; /* TCP checksum offset */ - __le16 tucse; /* TCP checksum end */ - } tcp_fields; - } upper_setup; - __le32 cmd_and_length; /* */ - union { - __le32 data; - struct { - u8 status; /* Descriptor status */ - u8 hdr_len; /* Header length */ - __le16 mss; /* Maximum segment size */ - } fields; - } tcp_seg_setup; -}; - -/* Offload data descriptor */ -struct e1000_data_desc { - __le64 buffer_addr; /* Address of the descriptor's buffer address */ - union { - __le32 data; - struct { - __le16 length; /* Data buffer length */ - u8 typ_len_ext; /* */ - u8 cmd; /* */ - } flags; - } lower; - union { - __le32 data; - struct { - u8 status; /* Descriptor status */ - u8 popts; /* Packet Options */ - __le16 special; /* */ - } fields; - } upper; -}; - -/* Filters */ -#define E1000_NUM_UNICAST 16 /* Unicast filter entries */ -#define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */ -#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ - -/* Receive Address Register */ -struct e1000_rar { - volatile __le32 low; /* receive address low */ - volatile __le32 high; /* receive address high */ -}; - -/* Number of entries in the Multicast Table Array (MTA). */ -#define E1000_NUM_MTA_REGISTERS 128 - -/* IPv4 Address Table Entry */ -struct e1000_ipv4_at_entry { - volatile u32 ipv4_addr; /* IP Address (RW) */ - volatile u32 reserved; -}; - -/* Four wakeup IP addresses are supported */ -#define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4 -#define E1000_IP4AT_SIZE E1000_WAKEUP_IP_ADDRESS_COUNT_MAX -#define E1000_IP6AT_SIZE 1 - -/* IPv6 Address Table Entry */ -struct e1000_ipv6_at_entry { - volatile u8 ipv6_addr[16]; -}; - -/* Flexible Filter Length Table Entry */ -struct e1000_fflt_entry { - volatile u32 length; /* Flexible Filter Length (RW) */ - volatile u32 reserved; -}; - -/* Flexible Filter Mask Table Entry */ -struct e1000_ffmt_entry { - volatile u32 mask; /* Flexible Filter Mask (RW) */ - volatile u32 reserved; -}; - -/* Flexible Filter Value Table Entry */ -struct e1000_ffvt_entry { - volatile u32 value; /* Flexible Filter Value (RW) */ - volatile u32 reserved; -}; - -/* Four Flexible Filters are supported */ -#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4 - -/* Each Flexible Filter is at most 128 (0x80) bytes in length */ -#define E1000_FLEXIBLE_FILTER_SIZE_MAX 128 - -#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX -#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX -#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX - -#define E1000_DISABLE_SERDES_LOOPBACK 0x0400 - -/* Register Set. (82543, 82544) - * - * Registers are defined to be 32 bits and should be accessed as 32 bit values. - * These registers are physically located on the NIC, but are mapped into the - * host memory address space. - * - * RW - register is both readable and writable - * RO - register is read only - * WO - register is write only - * R/clr - register is read only and is cleared when read - * A - register array - */ -#define E1000_CTRL 0x00000 /* Device Control - RW */ -#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */ -#define E1000_STATUS 0x00008 /* Device Status - RO */ -#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ -#define E1000_EERD 0x00014 /* EEPROM Read - RW */ -#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ -#define E1000_FLA 0x0001C /* Flash Access - RW */ -#define E1000_MDIC 0x00020 /* MDI Control - RW */ - -extern void __iomem *ce4100_gbe_mdio_base_virt; -#define INTEL_CE_GBE_MDIO_RCOMP_BASE (ce4100_gbe_mdio_base_virt) -#define E1000_MDIO_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0) -#define E1000_MDIO_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4) -#define E1000_MDIO_DRV (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8) -#define E1000_MDC_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0xC) -#define E1000_RCOMP_CTL (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x20) -#define E1000_RCOMP_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x24) - -#define E1000_SCTL 0x00024 /* SerDes Control - RW */ -#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */ -#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ -#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ -#define E1000_FCT 0x00030 /* Flow Control Type - RW */ -#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ -#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ -#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ -#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ -#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ -#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ -#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ - -/* Auxiliary Control Register. This register is CE4100 specific, - * RMII/RGMII function is switched by this register - RW - * Following are bits definitions of the Auxiliary Control Register - */ -#define E1000_CTL_AUX 0x000E0 -#define E1000_CTL_AUX_END_SEL_SHIFT 10 -#define E1000_CTL_AUX_ENDIANESS_SHIFT 8 -#define E1000_CTL_AUX_RGMII_RMII_SHIFT 0 - -/* descriptor and packet transfer use CTL_AUX.ENDIANESS */ -#define E1000_CTL_AUX_DES_PKT (0x0 << E1000_CTL_AUX_END_SEL_SHIFT) -/* descriptor use CTL_AUX.ENDIANESS, packet use default */ -#define E1000_CTL_AUX_DES (0x1 << E1000_CTL_AUX_END_SEL_SHIFT) -/* descriptor use default, packet use CTL_AUX.ENDIANESS */ -#define E1000_CTL_AUX_PKT (0x2 << E1000_CTL_AUX_END_SEL_SHIFT) -/* all use CTL_AUX.ENDIANESS */ -#define E1000_CTL_AUX_ALL (0x3 << E1000_CTL_AUX_END_SEL_SHIFT) - -#define E1000_CTL_AUX_RGMII (0x0 << E1000_CTL_AUX_RGMII_RMII_SHIFT) -#define E1000_CTL_AUX_RMII (0x1 << E1000_CTL_AUX_RGMII_RMII_SHIFT) - -/* LW little endian, Byte big endian */ -#define E1000_CTL_AUX_LWLE_BBE (0x0 << E1000_CTL_AUX_ENDIANESS_SHIFT) -#define E1000_CTL_AUX_LWLE_BLE (0x1 << E1000_CTL_AUX_ENDIANESS_SHIFT) -#define E1000_CTL_AUX_LWBE_BBE (0x2 << E1000_CTL_AUX_ENDIANESS_SHIFT) -#define E1000_CTL_AUX_LWBE_BLE (0x3 << E1000_CTL_AUX_ENDIANESS_SHIFT) - -#define E1000_RCTL 0x00100 /* RX Control - RW */ -#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */ -#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */ -#define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */ -#define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */ -#define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */ -#define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */ -#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ -#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ -#define E1000_RXCW 0x00180 /* RX Configuration Word - RO */ -#define E1000_TCTL 0x00400 /* TX Control - RW */ -#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ -#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ -#define E1000_TBT 0x00448 /* TX Burst Timer - RW */ -#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ -#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ -#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ -#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ -#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ -#define FEXTNVM_SW_CONFIG 0x0001 -#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ -#define E1000_PBS 0x01008 /* Packet Buffer Size */ -#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ -#define E1000_FLASH_UPDATES 1000 -#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ -#define E1000_FLASHT 0x01028 /* FLASH Timer Register */ -#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ -#define E1000_FLSWCTL 0x01030 /* FLASH control register */ -#define E1000_FLSWDATA 0x01034 /* FLASH data register */ -#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */ -#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ -#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ -#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ -#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ -#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ -#define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */ -#define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */ -#define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */ -#define E1000_RDH 0x02810 /* RX Descriptor Head - RW */ -#define E1000_RDT 0x02818 /* RX Descriptor Tail - RW */ -#define E1000_RDTR 0x02820 /* RX Delay Timer - RW */ -#define E1000_RDBAL0 E1000_RDBAL /* RX Desc Base Address Low (0) - RW */ -#define E1000_RDBAH0 E1000_RDBAH /* RX Desc Base Address High (0) - RW */ -#define E1000_RDLEN0 E1000_RDLEN /* RX Desc Length (0) - RW */ -#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */ -#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */ -#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */ -#define E1000_RXDCTL 0x02828 /* RX Descriptor Control queue 0 - RW */ -#define E1000_RXDCTL1 0x02928 /* RX Descriptor Control queue 1 - RW */ -#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ -#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ -#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ -#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */ -#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ -#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ -#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ -#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ -#define E1000_TDFTS 0x03428 /* TX Data FIFO Tail Saved - RW */ -#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */ -#define E1000_TDBAL 0x03800 /* TX Descriptor Base Address Low - RW */ -#define E1000_TDBAH 0x03804 /* TX Descriptor Base Address High - RW */ -#define E1000_TDLEN 0x03808 /* TX Descriptor Length - RW */ -#define E1000_TDH 0x03810 /* TX Descriptor Head - RW */ -#define E1000_TDT 0x03818 /* TX Descripotr Tail - RW */ -#define E1000_TIDV 0x03820 /* TX Interrupt Delay Value - RW */ -#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */ -#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */ -#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ -#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */ -#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */ -#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */ -#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */ -#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */ -#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */ -#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */ -#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */ -#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ -#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ -#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ -#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ -#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ -#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ -#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ -#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ -#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ -#define E1000_COLC 0x04028 /* Collision Count - R/clr */ -#define E1000_DC 0x04030 /* Defer Count - R/clr */ -#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */ -#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ -#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ -#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ -#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */ -#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */ -#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */ -#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */ -#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */ -#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */ -#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */ -#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */ -#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */ -#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */ -#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */ -#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */ -#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */ -#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */ -#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */ -#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */ -#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */ -#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */ -#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */ -#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */ -#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */ -#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */ -#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */ -#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */ -#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */ -#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ -#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */ -#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */ -#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */ -#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */ -#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */ -#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */ -#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */ -#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */ -#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */ -#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */ -#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */ -#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */ -#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */ -#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */ -#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ -#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ -#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ -#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ -#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */ -#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */ -#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */ -#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */ -#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ -#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */ -#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */ -#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ -#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ -#define E1000_RFCTL 0x05008 /* Receive Filter Control */ -#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ -#define E1000_RA 0x05400 /* Receive Address - RW Array */ -#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ -#define E1000_WUC 0x05800 /* Wakeup Control - RW */ -#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ -#define E1000_WUS 0x05810 /* Wakeup Status - RO */ -#define E1000_MANC 0x05820 /* Management Control - RW */ -#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ -#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ -#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ -#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ -#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ -#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ -#define E1000_HOST_IF 0x08800 /* Host Interface */ -#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ -#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ - -#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */ -#define E1000_MDPHYA 0x0003C /* PHY address - RW */ -#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ -#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ - -#define E1000_GCR 0x05B00 /* PCI-Ex Control */ -#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ -#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ -#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ -#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ -#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ -#define E1000_SWSM 0x05B50 /* SW Semaphore */ -#define E1000_FWSM 0x05B54 /* FW Semaphore */ -#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ -#define E1000_HICR 0x08F00 /* Host Interface Control */ - -/* RSS registers */ -#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ -#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ -#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */ -#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */ -#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ -#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ -/* Register Set (82542) - * - * Some of the 82542 registers are located at different offsets than they are - * in more current versions of the 8254x. Despite the difference in location, - * the registers function in the same manner. - */ -#define E1000_82542_CTL_AUX E1000_CTL_AUX -#define E1000_82542_CTRL E1000_CTRL -#define E1000_82542_CTRL_DUP E1000_CTRL_DUP -#define E1000_82542_STATUS E1000_STATUS -#define E1000_82542_EECD E1000_EECD -#define E1000_82542_EERD E1000_EERD -#define E1000_82542_CTRL_EXT E1000_CTRL_EXT -#define E1000_82542_FLA E1000_FLA -#define E1000_82542_MDIC E1000_MDIC -#define E1000_82542_SCTL E1000_SCTL -#define E1000_82542_FEXTNVM E1000_FEXTNVM -#define E1000_82542_FCAL E1000_FCAL -#define E1000_82542_FCAH E1000_FCAH -#define E1000_82542_FCT E1000_FCT -#define E1000_82542_VET E1000_VET -#define E1000_82542_RA 0x00040 -#define E1000_82542_ICR E1000_ICR -#define E1000_82542_ITR E1000_ITR -#define E1000_82542_ICS E1000_ICS -#define E1000_82542_IMS E1000_IMS -#define E1000_82542_IMC E1000_IMC -#define E1000_82542_RCTL E1000_RCTL -#define E1000_82542_RDTR 0x00108 -#define E1000_82542_RDBAL 0x00110 -#define E1000_82542_RDBAH 0x00114 -#define E1000_82542_RDLEN 0x00118 -#define E1000_82542_RDH 0x00120 -#define E1000_82542_RDT 0x00128 -#define E1000_82542_RDTR0 E1000_82542_RDTR -#define E1000_82542_RDBAL0 E1000_82542_RDBAL -#define E1000_82542_RDBAH0 E1000_82542_RDBAH -#define E1000_82542_RDLEN0 E1000_82542_RDLEN -#define E1000_82542_RDH0 E1000_82542_RDH -#define E1000_82542_RDT0 E1000_82542_RDT -#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication - * RX Control - RW */ -#define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8)) -#define E1000_82542_RDBAH3 0x02B04 /* RX Desc Base High Queue 3 - RW */ -#define E1000_82542_RDBAL3 0x02B00 /* RX Desc Low Queue 3 - RW */ -#define E1000_82542_RDLEN3 0x02B08 /* RX Desc Length Queue 3 - RW */ -#define E1000_82542_RDH3 0x02B10 /* RX Desc Head Queue 3 - RW */ -#define E1000_82542_RDT3 0x02B18 /* RX Desc Tail Queue 3 - RW */ -#define E1000_82542_RDBAL2 0x02A00 /* RX Desc Base Low Queue 2 - RW */ -#define E1000_82542_RDBAH2 0x02A04 /* RX Desc Base High Queue 2 - RW */ -#define E1000_82542_RDLEN2 0x02A08 /* RX Desc Length Queue 2 - RW */ -#define E1000_82542_RDH2 0x02A10 /* RX Desc Head Queue 2 - RW */ -#define E1000_82542_RDT2 0x02A18 /* RX Desc Tail Queue 2 - RW */ -#define E1000_82542_RDTR1 0x00130 -#define E1000_82542_RDBAL1 0x00138 -#define E1000_82542_RDBAH1 0x0013C -#define E1000_82542_RDLEN1 0x00140 -#define E1000_82542_RDH1 0x00148 -#define E1000_82542_RDT1 0x00150 -#define E1000_82542_FCRTH 0x00160 -#define E1000_82542_FCRTL 0x00168 -#define E1000_82542_FCTTV E1000_FCTTV -#define E1000_82542_TXCW E1000_TXCW -#define E1000_82542_RXCW E1000_RXCW -#define E1000_82542_MTA 0x00200 -#define E1000_82542_TCTL E1000_TCTL -#define E1000_82542_TCTL_EXT E1000_TCTL_EXT -#define E1000_82542_TIPG E1000_TIPG -#define E1000_82542_TDBAL 0x00420 -#define E1000_82542_TDBAH 0x00424 -#define E1000_82542_TDLEN 0x00428 -#define E1000_82542_TDH 0x00430 -#define E1000_82542_TDT 0x00438 -#define E1000_82542_TIDV 0x00440 -#define E1000_82542_TBT E1000_TBT -#define E1000_82542_AIT E1000_AIT -#define E1000_82542_VFTA 0x00600 -#define E1000_82542_LEDCTL E1000_LEDCTL -#define E1000_82542_PBA E1000_PBA -#define E1000_82542_PBS E1000_PBS -#define E1000_82542_EEMNGCTL E1000_EEMNGCTL -#define E1000_82542_EEARBC E1000_EEARBC -#define E1000_82542_FLASHT E1000_FLASHT -#define E1000_82542_EEWR E1000_EEWR -#define E1000_82542_FLSWCTL E1000_FLSWCTL -#define E1000_82542_FLSWDATA E1000_FLSWDATA -#define E1000_82542_FLSWCNT E1000_FLSWCNT -#define E1000_82542_FLOP E1000_FLOP -#define E1000_82542_EXTCNF_CTRL E1000_EXTCNF_CTRL -#define E1000_82542_EXTCNF_SIZE E1000_EXTCNF_SIZE -#define E1000_82542_PHY_CTRL E1000_PHY_CTRL -#define E1000_82542_ERT E1000_ERT -#define E1000_82542_RXDCTL E1000_RXDCTL -#define E1000_82542_RXDCTL1 E1000_RXDCTL1 -#define E1000_82542_RADV E1000_RADV -#define E1000_82542_RSRPD E1000_RSRPD -#define E1000_82542_TXDMAC E1000_TXDMAC -#define E1000_82542_KABGTXD E1000_KABGTXD -#define E1000_82542_TDFHS E1000_TDFHS -#define E1000_82542_TDFTS E1000_TDFTS -#define E1000_82542_TDFPC E1000_TDFPC -#define E1000_82542_TXDCTL E1000_TXDCTL -#define E1000_82542_TADV E1000_TADV -#define E1000_82542_TSPMT E1000_TSPMT -#define E1000_82542_CRCERRS E1000_CRCERRS -#define E1000_82542_ALGNERRC E1000_ALGNERRC -#define E1000_82542_SYMERRS E1000_SYMERRS -#define E1000_82542_RXERRC E1000_RXERRC -#define E1000_82542_MPC E1000_MPC -#define E1000_82542_SCC E1000_SCC -#define E1000_82542_ECOL E1000_ECOL -#define E1000_82542_MCC E1000_MCC -#define E1000_82542_LATECOL E1000_LATECOL -#define E1000_82542_COLC E1000_COLC -#define E1000_82542_DC E1000_DC -#define E1000_82542_TNCRS E1000_TNCRS -#define E1000_82542_SEC E1000_SEC -#define E1000_82542_CEXTERR E1000_CEXTERR -#define E1000_82542_RLEC E1000_RLEC -#define E1000_82542_XONRXC E1000_XONRXC -#define E1000_82542_XONTXC E1000_XONTXC -#define E1000_82542_XOFFRXC E1000_XOFFRXC -#define E1000_82542_XOFFTXC E1000_XOFFTXC -#define E1000_82542_FCRUC E1000_FCRUC -#define E1000_82542_PRC64 E1000_PRC64 -#define E1000_82542_PRC127 E1000_PRC127 -#define E1000_82542_PRC255 E1000_PRC255 -#define E1000_82542_PRC511 E1000_PRC511 -#define E1000_82542_PRC1023 E1000_PRC1023 -#define E1000_82542_PRC1522 E1000_PRC1522 -#define E1000_82542_GPRC E1000_GPRC -#define E1000_82542_BPRC E1000_BPRC -#define E1000_82542_MPRC E1000_MPRC -#define E1000_82542_GPTC E1000_GPTC -#define E1000_82542_GORCL E1000_GORCL -#define E1000_82542_GORCH E1000_GORCH -#define E1000_82542_GOTCL E1000_GOTCL -#define E1000_82542_GOTCH E1000_GOTCH -#define E1000_82542_RNBC E1000_RNBC -#define E1000_82542_RUC E1000_RUC -#define E1000_82542_RFC E1000_RFC -#define E1000_82542_ROC E1000_ROC -#define E1000_82542_RJC E1000_RJC -#define E1000_82542_MGTPRC E1000_MGTPRC -#define E1000_82542_MGTPDC E1000_MGTPDC -#define E1000_82542_MGTPTC E1000_MGTPTC -#define E1000_82542_TORL E1000_TORL -#define E1000_82542_TORH E1000_TORH -#define E1000_82542_TOTL E1000_TOTL -#define E1000_82542_TOTH E1000_TOTH -#define E1000_82542_TPR E1000_TPR -#define E1000_82542_TPT E1000_TPT -#define E1000_82542_PTC64 E1000_PTC64 -#define E1000_82542_PTC127 E1000_PTC127 -#define E1000_82542_PTC255 E1000_PTC255 -#define E1000_82542_PTC511 E1000_PTC511 -#define E1000_82542_PTC1023 E1000_PTC1023 -#define E1000_82542_PTC1522 E1000_PTC1522 -#define E1000_82542_MPTC E1000_MPTC -#define E1000_82542_BPTC E1000_BPTC -#define E1000_82542_TSCTC E1000_TSCTC -#define E1000_82542_TSCTFC E1000_TSCTFC -#define E1000_82542_RXCSUM E1000_RXCSUM -#define E1000_82542_WUC E1000_WUC -#define E1000_82542_WUFC E1000_WUFC -#define E1000_82542_WUS E1000_WUS -#define E1000_82542_MANC E1000_MANC -#define E1000_82542_IPAV E1000_IPAV -#define E1000_82542_IP4AT E1000_IP4AT -#define E1000_82542_IP6AT E1000_IP6AT -#define E1000_82542_WUPL E1000_WUPL -#define E1000_82542_WUPM E1000_WUPM -#define E1000_82542_FFLT E1000_FFLT -#define E1000_82542_TDFH 0x08010 -#define E1000_82542_TDFT 0x08018 -#define E1000_82542_FFMT E1000_FFMT -#define E1000_82542_FFVT E1000_FFVT -#define E1000_82542_HOST_IF E1000_HOST_IF -#define E1000_82542_IAM E1000_IAM -#define E1000_82542_EEMNGCTL E1000_EEMNGCTL -#define E1000_82542_PSRCTL E1000_PSRCTL -#define E1000_82542_RAID E1000_RAID -#define E1000_82542_TARC0 E1000_TARC0 -#define E1000_82542_TDBAL1 E1000_TDBAL1 -#define E1000_82542_TDBAH1 E1000_TDBAH1 -#define E1000_82542_TDLEN1 E1000_TDLEN1 -#define E1000_82542_TDH1 E1000_TDH1 -#define E1000_82542_TDT1 E1000_TDT1 -#define E1000_82542_TXDCTL1 E1000_TXDCTL1 -#define E1000_82542_TARC1 E1000_TARC1 -#define E1000_82542_RFCTL E1000_RFCTL -#define E1000_82542_GCR E1000_GCR -#define E1000_82542_GSCL_1 E1000_GSCL_1 -#define E1000_82542_GSCL_2 E1000_GSCL_2 -#define E1000_82542_GSCL_3 E1000_GSCL_3 -#define E1000_82542_GSCL_4 E1000_GSCL_4 -#define E1000_82542_FACTPS E1000_FACTPS -#define E1000_82542_SWSM E1000_SWSM -#define E1000_82542_FWSM E1000_FWSM -#define E1000_82542_FFLT_DBG E1000_FFLT_DBG -#define E1000_82542_IAC E1000_IAC -#define E1000_82542_ICRXPTC E1000_ICRXPTC -#define E1000_82542_ICRXATC E1000_ICRXATC -#define E1000_82542_ICTXPTC E1000_ICTXPTC -#define E1000_82542_ICTXATC E1000_ICTXATC -#define E1000_82542_ICTXQEC E1000_ICTXQEC -#define E1000_82542_ICTXQMTC E1000_ICTXQMTC -#define E1000_82542_ICRXDMTC E1000_ICRXDMTC -#define E1000_82542_ICRXOC E1000_ICRXOC -#define E1000_82542_HICR E1000_HICR - -#define E1000_82542_CPUVEC E1000_CPUVEC -#define E1000_82542_MRQC E1000_MRQC -#define E1000_82542_RETA E1000_RETA -#define E1000_82542_RSSRK E1000_RSSRK -#define E1000_82542_RSSIM E1000_RSSIM -#define E1000_82542_RSSIR E1000_RSSIR -#define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA -#define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC - -/* Statistics counters collected by the MAC */ -struct e1000_hw_stats { - u64 crcerrs; - u64 algnerrc; - u64 symerrs; - u64 rxerrc; - u64 txerrc; - u64 mpc; - u64 scc; - u64 ecol; - u64 mcc; - u64 latecol; - u64 colc; - u64 dc; - u64 tncrs; - u64 sec; - u64 cexterr; - u64 rlec; - u64 xonrxc; - u64 xontxc; - u64 xoffrxc; - u64 xofftxc; - u64 fcruc; - u64 prc64; - u64 prc127; - u64 prc255; - u64 prc511; - u64 prc1023; - u64 prc1522; - u64 gprc; - u64 bprc; - u64 mprc; - u64 gptc; - u64 gorcl; - u64 gorch; - u64 gotcl; - u64 gotch; - u64 rnbc; - u64 ruc; - u64 rfc; - u64 roc; - u64 rlerrc; - u64 rjc; - u64 mgprc; - u64 mgpdc; - u64 mgptc; - u64 torl; - u64 torh; - u64 totl; - u64 toth; - u64 tpr; - u64 tpt; - u64 ptc64; - u64 ptc127; - u64 ptc255; - u64 ptc511; - u64 ptc1023; - u64 ptc1522; - u64 mptc; - u64 bptc; - u64 tsctc; - u64 tsctfc; - u64 iac; - u64 icrxptc; - u64 icrxatc; - u64 ictxptc; - u64 ictxatc; - u64 ictxqec; - u64 ictxqmtc; - u64 icrxdmtc; - u64 icrxoc; -}; - -/* Structure containing variables used by the shared code (e1000_hw.c) */ -struct e1000_hw { - u8 __iomem *hw_addr; - u8 __iomem *flash_address; - e1000_mac_type mac_type; - e1000_phy_type phy_type; - u32 phy_init_script; - e1000_media_type media_type; - void *back; - struct e1000_shadow_ram *eeprom_shadow_ram; - u32 flash_bank_size; - u32 flash_base_addr; - e1000_fc_type fc; - e1000_bus_speed bus_speed; - e1000_bus_width bus_width; - e1000_bus_type bus_type; - struct e1000_eeprom_info eeprom; - e1000_ms_type master_slave; - e1000_ms_type original_master_slave; - e1000_ffe_config ffe_config_state; - u32 asf_firmware_present; - u32 eeprom_semaphore_present; - unsigned long io_base; - u32 phy_id; - u32 phy_revision; - u32 phy_addr; - u32 original_fc; - u32 txcw; - u32 autoneg_failed; - u32 max_frame_size; - u32 min_frame_size; - u32 mc_filter_type; - u32 num_mc_addrs; - u32 collision_delta; - u32 tx_packet_delta; - u32 ledctl_default; - u32 ledctl_mode1; - u32 ledctl_mode2; - bool tx_pkt_filtering; - struct e1000_host_mng_dhcp_cookie mng_cookie; - u16 phy_spd_default; - u16 autoneg_advertised; - u16 pci_cmd_word; - u16 fc_high_water; - u16 fc_low_water; - u16 fc_pause_time; - u16 current_ifs_val; - u16 ifs_min_val; - u16 ifs_max_val; - u16 ifs_step_size; - u16 ifs_ratio; - u16 device_id; - u16 vendor_id; - u16 subsystem_id; - u16 subsystem_vendor_id; - u8 revision_id; - u8 autoneg; - u8 mdix; - u8 forced_speed_duplex; - u8 wait_autoneg_complete; - u8 dma_fairness; - u8 mac_addr[NODE_ADDRESS_SIZE]; - u8 perm_mac_addr[NODE_ADDRESS_SIZE]; - bool disable_polarity_correction; - bool speed_downgraded; - e1000_smart_speed smart_speed; - e1000_dsp_config dsp_config_state; - bool get_link_status; - bool serdes_has_link; - bool tbi_compatibility_en; - bool tbi_compatibility_on; - bool laa_is_present; - bool phy_reset_disable; - bool initialize_hw_bits_disable; - bool fc_send_xon; - bool fc_strict_ieee; - bool report_tx_early; - bool adaptive_ifs; - bool ifs_params_forced; - bool in_ifs_mode; - bool mng_reg_access_disabled; - bool leave_av_bit_off; - bool bad_tx_carr_stats_fd; - bool has_smbus; -}; - -#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */ -#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */ -#define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */ -#define E1000_EEPROM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ -#define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */ -#define E1000_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ -#define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */ -#define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */ -/* Register Bit Masks */ -/* Device Control */ -#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ -#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */ -#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ -#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ -#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ -#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */ -#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */ -#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ -#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ -#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ -#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ -#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ -#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ -#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ -#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ -#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ -#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ -#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */ -#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */ -#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */ -#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */ -#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ -#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ -#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ -#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ -#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ -#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */ -#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */ -#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */ -#define E1000_CTRL_RST 0x04000000 /* Global reset */ -#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ -#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ -#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */ -#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ -#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ -#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */ - -/* Device Status */ -#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ -#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ -#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ -#define E1000_STATUS_FUNC_SHIFT 2 -#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */ -#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ -#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ -#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */ -#define E1000_STATUS_SPEED_MASK 0x000000C0 -#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ -#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ -#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ -#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion - by EEPROM/Flash */ -#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ -#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */ -#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ -#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */ -#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ -#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ -#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ -#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ -#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */ -#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */ -#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */ -#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */ -#define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution disabled */ -#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */ -#define E1000_STATUS_FUSE_8 0x04000000 -#define E1000_STATUS_FUSE_9 0x08000000 -#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */ -#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */ - -/* Constants used to interpret the masked PCI-X bus speed. */ -#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */ -#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */ -#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */ - -/* EEPROM/Flash Control */ -#define E1000_EECD_SK 0x00000001 /* EEPROM Clock */ -#define E1000_EECD_CS 0x00000002 /* EEPROM Chip Select */ -#define E1000_EECD_DI 0x00000004 /* EEPROM Data In */ -#define E1000_EECD_DO 0x00000008 /* EEPROM Data Out */ -#define E1000_EECD_FWE_MASK 0x00000030 -#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */ -#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */ -#define E1000_EECD_FWE_SHIFT 4 -#define E1000_EECD_REQ 0x00000040 /* EEPROM Access Request */ -#define E1000_EECD_GNT 0x00000080 /* EEPROM Access Grant */ -#define E1000_EECD_PRES 0x00000100 /* EEPROM Present */ -#define E1000_EECD_SIZE 0x00000200 /* EEPROM Size (0=64 word 1=256 word) */ -#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type - * (0-small, 1-large) */ -#define E1000_EECD_TYPE 0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */ -#ifndef E1000_EEPROM_GRANT_ATTEMPTS -#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ -#endif -#define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */ -#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */ -#define E1000_EECD_SIZE_EX_SHIFT 11 -#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */ -#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */ -#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */ -#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ -#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ -#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */ -#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ -#define E1000_EECD_SECVAL_SHIFT 22 -#define E1000_STM_OPCODE 0xDB00 -#define E1000_HICR_FW_RESET 0xC0 - -#define E1000_SHADOW_RAM_WORDS 2048 -#define E1000_ICH_NVM_SIG_WORD 0x13 -#define E1000_ICH_NVM_SIG_MASK 0xC0 - -/* EEPROM Read */ -#define E1000_EERD_START 0x00000001 /* Start Read */ -#define E1000_EERD_DONE 0x00000010 /* Read Done */ -#define E1000_EERD_ADDR_SHIFT 8 -#define E1000_EERD_ADDR_MASK 0x0000FF00 /* Read Address */ -#define E1000_EERD_DATA_SHIFT 16 -#define E1000_EERD_DATA_MASK 0xFFFF0000 /* Read Data */ - -/* SPI EEPROM Status Register */ -#define EEPROM_STATUS_RDY_SPI 0x01 -#define EEPROM_STATUS_WEN_SPI 0x02 -#define EEPROM_STATUS_BP0_SPI 0x04 -#define EEPROM_STATUS_BP1_SPI 0x08 -#define EEPROM_STATUS_WPEN_SPI 0x80 - -/* Extended Device Control */ -#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */ -#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */ -#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN -#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */ -#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */ -#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */ -#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */ -#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA -#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */ -#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */ -#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */ -#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */ -#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ -#define E1000_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7 0=in 1=out */ -#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */ -#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ -#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */ -#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ -#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ -#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 -#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 -#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 -#define E1000_CTRL_EXT_LINK_MODE_KMRN 0x00000000 -#define E1000_CTRL_EXT_LINK_MODE_SERDES 0x00C00000 -#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 -#define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000 -#define E1000_CTRL_EXT_WR_WMARK_256 0x00000000 -#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 -#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 -#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 -#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ -#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ -#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ -#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error detection enabled */ -#define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity error detection enable */ -#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000 - -/* MDI Control */ -#define E1000_MDIC_DATA_MASK 0x0000FFFF -#define E1000_MDIC_REG_MASK 0x001F0000 -#define E1000_MDIC_REG_SHIFT 16 -#define E1000_MDIC_PHY_MASK 0x03E00000 -#define E1000_MDIC_PHY_SHIFT 21 -#define E1000_MDIC_OP_WRITE 0x04000000 -#define E1000_MDIC_OP_READ 0x08000000 -#define E1000_MDIC_READY 0x10000000 -#define E1000_MDIC_INT_EN 0x20000000 -#define E1000_MDIC_ERROR 0x40000000 - -#define INTEL_CE_GBE_MDIC_OP_WRITE 0x04000000 -#define INTEL_CE_GBE_MDIC_OP_READ 0x00000000 -#define INTEL_CE_GBE_MDIC_GO 0x80000000 -#define INTEL_CE_GBE_MDIC_READ_ERROR 0x80000000 - -#define E1000_KUMCTRLSTA_MASK 0x0000FFFF -#define E1000_KUMCTRLSTA_OFFSET 0x001F0000 -#define E1000_KUMCTRLSTA_OFFSET_SHIFT 16 -#define E1000_KUMCTRLSTA_REN 0x00200000 - -#define E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL 0x00000000 -#define E1000_KUMCTRLSTA_OFFSET_CTRL 0x00000001 -#define E1000_KUMCTRLSTA_OFFSET_INB_CTRL 0x00000002 -#define E1000_KUMCTRLSTA_OFFSET_DIAG 0x00000003 -#define E1000_KUMCTRLSTA_OFFSET_TIMEOUTS 0x00000004 -#define E1000_KUMCTRLSTA_OFFSET_INB_PARAM 0x00000009 -#define E1000_KUMCTRLSTA_OFFSET_HD_CTRL 0x00000010 -#define E1000_KUMCTRLSTA_OFFSET_M2P_SERDES 0x0000001E -#define E1000_KUMCTRLSTA_OFFSET_M2P_MODES 0x0000001F - -/* FIFO Control */ -#define E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS 0x00000008 -#define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS 0x00000800 - -/* In-Band Control */ -#define E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT 0x00000500 -#define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING 0x00000010 - -/* Half-Duplex Control */ -#define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004 -#define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT 0x00000000 - -#define E1000_KUMCTRLSTA_OFFSET_K0S_CTRL 0x0000001E - -#define E1000_KUMCTRLSTA_DIAG_FELPBK 0x2000 -#define E1000_KUMCTRLSTA_DIAG_NELPBK 0x1000 - -#define E1000_KUMCTRLSTA_K0S_100_EN 0x2000 -#define E1000_KUMCTRLSTA_K0S_GBE_EN 0x1000 -#define E1000_KUMCTRLSTA_K0S_ENTRY_LATENCY_MASK 0x0003 - -#define E1000_KABGTXD_BGSQLBIAS 0x00050000 - -#define E1000_PHY_CTRL_SPD_EN 0x00000001 -#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 -#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 -#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 -#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 -#define E1000_PHY_CTRL_B2B_EN 0x00000080 - -/* LED Control */ -#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F -#define E1000_LEDCTL_LED0_MODE_SHIFT 0 -#define E1000_LEDCTL_LED0_BLINK_RATE 0x0000020 -#define E1000_LEDCTL_LED0_IVRT 0x00000040 -#define E1000_LEDCTL_LED0_BLINK 0x00000080 -#define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00 -#define E1000_LEDCTL_LED1_MODE_SHIFT 8 -#define E1000_LEDCTL_LED1_BLINK_RATE 0x0002000 -#define E1000_LEDCTL_LED1_IVRT 0x00004000 -#define E1000_LEDCTL_LED1_BLINK 0x00008000 -#define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000 -#define E1000_LEDCTL_LED2_MODE_SHIFT 16 -#define E1000_LEDCTL_LED2_BLINK_RATE 0x00200000 -#define E1000_LEDCTL_LED2_IVRT 0x00400000 -#define E1000_LEDCTL_LED2_BLINK 0x00800000 -#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000 -#define E1000_LEDCTL_LED3_MODE_SHIFT 24 -#define E1000_LEDCTL_LED3_BLINK_RATE 0x20000000 -#define E1000_LEDCTL_LED3_IVRT 0x40000000 -#define E1000_LEDCTL_LED3_BLINK 0x80000000 - -#define E1000_LEDCTL_MODE_LINK_10_1000 0x0 -#define E1000_LEDCTL_MODE_LINK_100_1000 0x1 -#define E1000_LEDCTL_MODE_LINK_UP 0x2 -#define E1000_LEDCTL_MODE_ACTIVITY 0x3 -#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4 -#define E1000_LEDCTL_MODE_LINK_10 0x5 -#define E1000_LEDCTL_MODE_LINK_100 0x6 -#define E1000_LEDCTL_MODE_LINK_1000 0x7 -#define E1000_LEDCTL_MODE_PCIX_MODE 0x8 -#define E1000_LEDCTL_MODE_FULL_DUPLEX 0x9 -#define E1000_LEDCTL_MODE_COLLISION 0xA -#define E1000_LEDCTL_MODE_BUS_SPEED 0xB -#define E1000_LEDCTL_MODE_BUS_SIZE 0xC -#define E1000_LEDCTL_MODE_PAUSED 0xD -#define E1000_LEDCTL_MODE_LED_ON 0xE -#define E1000_LEDCTL_MODE_LED_OFF 0xF - -/* Receive Address */ -#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ - -/* Interrupt Cause Read */ -#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ -#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ -#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ -#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ -#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ -#define E1000_ICR_RXO 0x00000040 /* rx overrun */ -#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ -#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */ -#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */ -#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ -#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ -#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ -#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ -#define E1000_ICR_TXD_LOW 0x00008000 -#define E1000_ICR_SRPD 0x00010000 -#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */ -#define E1000_ICR_MNG 0x00040000 /* Manageability event */ -#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ -#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ -#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */ -#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */ -#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */ -#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */ -#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */ -#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */ -#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */ -#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */ -#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */ -#define E1000_ICR_EPRST 0x00100000 /* ME hardware reset occurs */ - -/* Interrupt Cause Set */ -#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ -#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ -#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ -#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ -#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ -#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */ -#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ -#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */ -#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ -#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ -#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ -#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ -#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ -#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW -#define E1000_ICS_SRPD E1000_ICR_SRPD -#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ -#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ -#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ -#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ -#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ -#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ -#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ -#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ -#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ -#define E1000_ICS_DSW E1000_ICR_DSW -#define E1000_ICS_PHYINT E1000_ICR_PHYINT -#define E1000_ICS_EPRST E1000_ICR_EPRST - -/* Interrupt Mask Set */ -#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ -#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ -#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ -#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ -#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ -#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */ -#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ -#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */ -#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ -#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ -#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ -#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ -#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ -#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW -#define E1000_IMS_SRPD E1000_ICR_SRPD -#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ -#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ -#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ -#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ -#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ -#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ -#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ -#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ -#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ -#define E1000_IMS_DSW E1000_ICR_DSW -#define E1000_IMS_PHYINT E1000_ICR_PHYINT -#define E1000_IMS_EPRST E1000_ICR_EPRST - -/* Interrupt Mask Clear */ -#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ -#define E1000_IMC_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ -#define E1000_IMC_LSC E1000_ICR_LSC /* Link Status Change */ -#define E1000_IMC_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ -#define E1000_IMC_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ -#define E1000_IMC_RXO E1000_ICR_RXO /* rx overrun */ -#define E1000_IMC_RXT0 E1000_ICR_RXT0 /* rx timer intr */ -#define E1000_IMC_MDAC E1000_ICR_MDAC /* MDIO access complete */ -#define E1000_IMC_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ -#define E1000_IMC_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ -#define E1000_IMC_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ -#define E1000_IMC_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ -#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ -#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW -#define E1000_IMC_SRPD E1000_ICR_SRPD -#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */ -#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */ -#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */ -#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ -#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ -#define E1000_IMC_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ -#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ -#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ -#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ -#define E1000_IMC_DSW E1000_ICR_DSW -#define E1000_IMC_PHYINT E1000_ICR_PHYINT -#define E1000_IMC_EPRST E1000_ICR_EPRST - -/* Receive Control */ -#define E1000_RCTL_RST 0x00000001 /* Software reset */ -#define E1000_RCTL_EN 0x00000002 /* enable */ -#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ -#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ -#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ -#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ -#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ -#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ -#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */ -#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ -#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */ -#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ -#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ -#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */ -#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */ -#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ -#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */ -#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */ -#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */ -#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ -#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */ -#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ -/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ -#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */ -#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */ -#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ -#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ -/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ -#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */ -#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */ -#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */ -#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ -#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ -#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ -#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ -#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ -#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ -#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ -#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */ -#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */ - -/* Use byte values for the following shift parameters - * Usage: - * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & - * E1000_PSRCTL_BSIZE0_MASK) | - * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & - * E1000_PSRCTL_BSIZE1_MASK) | - * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & - * E1000_PSRCTL_BSIZE2_MASK) | - * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; - * E1000_PSRCTL_BSIZE3_MASK)) - * where value0 = [128..16256], default=256 - * value1 = [1024..64512], default=4096 - * value2 = [0..64512], default=4096 - * value3 = [0..64512], default=0 - */ - -#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F -#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 -#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 -#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 - -#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ -#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ -#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ -#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ - -/* SW_W_SYNC definitions */ -#define E1000_SWFW_EEP_SM 0x0001 -#define E1000_SWFW_PHY0_SM 0x0002 -#define E1000_SWFW_PHY1_SM 0x0004 -#define E1000_SWFW_MAC_CSR_SM 0x0008 - -/* Receive Descriptor */ -#define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */ -#define E1000_RDT_FPDB 0x80000000 /* Flush descriptor block */ -#define E1000_RDLEN_LEN 0x0007ff80 /* descriptor length */ -#define E1000_RDH_RDH 0x0000ffff /* receive descriptor head */ -#define E1000_RDT_RDT 0x0000ffff /* receive descriptor tail */ - -/* Flow Control */ -#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ -#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */ -#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ -#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ - -/* Header split receive */ -#define E1000_RFCTL_ISCSI_DIS 0x00000001 -#define E1000_RFCTL_ISCSI_DWC_MASK 0x0000003E -#define E1000_RFCTL_ISCSI_DWC_SHIFT 1 -#define E1000_RFCTL_NFSW_DIS 0x00000040 -#define E1000_RFCTL_NFSR_DIS 0x00000080 -#define E1000_RFCTL_NFS_VER_MASK 0x00000300 -#define E1000_RFCTL_NFS_VER_SHIFT 8 -#define E1000_RFCTL_IPV6_DIS 0x00000400 -#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800 -#define E1000_RFCTL_ACK_DIS 0x00001000 -#define E1000_RFCTL_ACKD_DIS 0x00002000 -#define E1000_RFCTL_IPFRSP_DIS 0x00004000 -#define E1000_RFCTL_EXTEN 0x00008000 -#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 -#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 - -/* Receive Descriptor Control */ -#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */ -#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */ -#define E1000_RXDCTL_WTHRESH 0x003F0000 /* RXDCTL Writeback Threshold */ -#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */ - -/* Transmit Descriptor Control */ -#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ -#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ -#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ -#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ -#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */ -#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ -#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. - still to be processed. */ -/* Transmit Configuration Word */ -#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ -#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */ -#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ -#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ -#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ -#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */ -#define E1000_TXCW_NP 0x00008000 /* TXCW next page */ -#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */ -#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */ -#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ - -/* Receive Configuration Word */ -#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ -#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */ -#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ -#define E1000_RXCW_CC 0x10000000 /* Receive config change */ -#define E1000_RXCW_C 0x20000000 /* Receive config */ -#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ -#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */ - -/* Transmit Control */ -#define E1000_TCTL_RST 0x00000001 /* software reset */ -#define E1000_TCTL_EN 0x00000002 /* enable tx */ -#define E1000_TCTL_BCE 0x00000004 /* busy check enable */ -#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ -#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ -#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ -#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */ -#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */ -#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ -#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ -#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ -/* Extended Transmit Control */ -#define E1000_TCTL_EXT_BST_MASK 0x000003FF /* Backoff Slot Time */ -#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ - -/* Receive Checksum Control */ -#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ -#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ -#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ -#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */ -#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ -#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ - -/* Multiple Receive Queue Control */ -#define E1000_MRQC_ENABLE_MASK 0x00000003 -#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001 -#define E1000_MRQC_ENABLE_RSS_INT 0x00000004 -#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 -#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 -#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 -#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 -#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000 -#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 -#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 - -/* Definitions for power management and wakeup registers */ -/* Wake Up Control */ -#define E1000_WUC_APME 0x00000001 /* APM Enable */ -#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ -#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ -#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ -#define E1000_WUC_SPM 0x80000000 /* Enable SPM */ - -/* Wake Up Filter Control */ -#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ -#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ -#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ -#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ -#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ -#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ -#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ -#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ -#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ -#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ -#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ -#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ -#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ -#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */ -#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ -#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ - -/* Wake Up Status */ -#define E1000_WUS_LNKC 0x00000001 /* Link Status Changed */ -#define E1000_WUS_MAG 0x00000002 /* Magic Packet Received */ -#define E1000_WUS_EX 0x00000004 /* Directed Exact Received */ -#define E1000_WUS_MC 0x00000008 /* Directed Multicast Received */ -#define E1000_WUS_BC 0x00000010 /* Broadcast Received */ -#define E1000_WUS_ARP 0x00000020 /* ARP Request Packet Received */ -#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Received */ -#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Received */ -#define E1000_WUS_FLX0 0x00010000 /* Flexible Filter 0 Match */ -#define E1000_WUS_FLX1 0x00020000 /* Flexible Filter 1 Match */ -#define E1000_WUS_FLX2 0x00040000 /* Flexible Filter 2 Match */ -#define E1000_WUS_FLX3 0x00080000 /* Flexible Filter 3 Match */ -#define E1000_WUS_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ - -/* Management Control */ -#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ -#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ -#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */ -#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */ -#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */ -#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */ -#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */ -#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */ -#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ -#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery - * Filtering */ -#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */ -#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ -#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ -#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ -#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */ -#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ -#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address - * filtering */ -#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host - * memory */ -#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address - * filtering */ -#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */ -#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */ -#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ -#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ -#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ -#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */ -#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */ -#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */ - -#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */ -#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */ - -/* SW Semaphore Register */ -#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ -#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ -#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ -#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ - -/* FW Semaphore Register */ -#define E1000_FWSM_MODE_MASK 0x0000000E /* FW mode */ -#define E1000_FWSM_MODE_SHIFT 1 -#define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */ - -#define E1000_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI reset */ -#define E1000_FWSM_DISSW 0x10000000 /* FW disable SW Write Access */ -#define E1000_FWSM_SKUSEL_MASK 0x60000000 /* LAN SKU select */ -#define E1000_FWSM_SKUEL_SHIFT 29 -#define E1000_FWSM_SKUSEL_EMB 0x0 /* Embedded SKU */ -#define E1000_FWSM_SKUSEL_CONS 0x1 /* Consumer SKU */ -#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */ -#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */ - -/* FFLT Debug Register */ -#define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */ - -typedef enum { - e1000_mng_mode_none = 0, - e1000_mng_mode_asf, - e1000_mng_mode_pt, - e1000_mng_mode_ipmi, - e1000_mng_mode_host_interface_only -} e1000_mng_mode; - -/* Host Interface Control Register */ -#define E1000_HICR_EN 0x00000001 /* Enable Bit - RO */ -#define E1000_HICR_C 0x00000002 /* Driver sets this bit when done - * to put command in RAM */ -#define E1000_HICR_SV 0x00000004 /* Status Validity */ -#define E1000_HICR_FWR 0x00000080 /* FW reset. Set by the Host */ - -/* Host Interface Command Interface - Address range 0x8800-0x8EFF */ -#define E1000_HI_MAX_DATA_LENGTH 252 /* Host Interface data length */ -#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Number of bytes in range */ -#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Number of dwords in range */ -#define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */ - -struct e1000_host_command_header { - u8 command_id; - u8 command_length; - u8 command_options; /* I/F bits for command, status for return */ - u8 checksum; -}; -struct e1000_host_command_info { - struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ - u8 command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */ -}; - -/* Host SMB register #0 */ -#define E1000_HSMC0R_CLKIN 0x00000001 /* SMB Clock in */ -#define E1000_HSMC0R_DATAIN 0x00000002 /* SMB Data in */ -#define E1000_HSMC0R_DATAOUT 0x00000004 /* SMB Data out */ -#define E1000_HSMC0R_CLKOUT 0x00000008 /* SMB Clock out */ - -/* Host SMB register #1 */ -#define E1000_HSMC1R_CLKIN E1000_HSMC0R_CLKIN -#define E1000_HSMC1R_DATAIN E1000_HSMC0R_DATAIN -#define E1000_HSMC1R_DATAOUT E1000_HSMC0R_DATAOUT -#define E1000_HSMC1R_CLKOUT E1000_HSMC0R_CLKOUT - -/* FW Status Register */ -#define E1000_FWSTS_FWS_MASK 0x000000FF /* FW Status */ - -/* Wake Up Packet Length */ -#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */ - -#define E1000_MDALIGN 4096 - -/* PCI-Ex registers*/ - -/* PCI-Ex Control Register */ -#define E1000_GCR_RXD_NO_SNOOP 0x00000001 -#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 -#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 -#define E1000_GCR_TXD_NO_SNOOP 0x00000008 -#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 -#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 - -#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ - E1000_GCR_RXDSCW_NO_SNOOP | \ - E1000_GCR_RXDSCR_NO_SNOOP | \ - E1000_GCR_TXD_NO_SNOOP | \ - E1000_GCR_TXDSCW_NO_SNOOP | \ - E1000_GCR_TXDSCR_NO_SNOOP) - -#define PCI_EX_82566_SNOOP_ALL PCI_EX_NO_SNOOP_ALL - -#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 -/* Function Active and Power State to MNG */ -#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003 -#define E1000_FACTPS_LAN0_VALID 0x00000004 -#define E1000_FACTPS_FUNC0_AUX_EN 0x00000008 -#define E1000_FACTPS_FUNC1_POWER_STATE_MASK 0x000000C0 -#define E1000_FACTPS_FUNC1_POWER_STATE_SHIFT 6 -#define E1000_FACTPS_LAN1_VALID 0x00000100 -#define E1000_FACTPS_FUNC1_AUX_EN 0x00000200 -#define E1000_FACTPS_FUNC2_POWER_STATE_MASK 0x00003000 -#define E1000_FACTPS_FUNC2_POWER_STATE_SHIFT 12 -#define E1000_FACTPS_IDE_ENABLE 0x00004000 -#define E1000_FACTPS_FUNC2_AUX_EN 0x00008000 -#define E1000_FACTPS_FUNC3_POWER_STATE_MASK 0x000C0000 -#define E1000_FACTPS_FUNC3_POWER_STATE_SHIFT 18 -#define E1000_FACTPS_SP_ENABLE 0x00100000 -#define E1000_FACTPS_FUNC3_AUX_EN 0x00200000 -#define E1000_FACTPS_FUNC4_POWER_STATE_MASK 0x03000000 -#define E1000_FACTPS_FUNC4_POWER_STATE_SHIFT 24 -#define E1000_FACTPS_IPMI_ENABLE 0x04000000 -#define E1000_FACTPS_FUNC4_AUX_EN 0x08000000 -#define E1000_FACTPS_MNGCG 0x20000000 -#define E1000_FACTPS_LAN_FUNC_SEL 0x40000000 -#define E1000_FACTPS_PM_STATE_CHANGED 0x80000000 - -/* PCI-Ex Config Space */ -#define PCI_EX_LINK_STATUS 0x12 -#define PCI_EX_LINK_WIDTH_MASK 0x3F0 -#define PCI_EX_LINK_WIDTH_SHIFT 4 - -/* EEPROM Commands - Microwire */ -#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */ -#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */ -#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7 /* EEPROM erase opcode */ -#define EEPROM_EWEN_OPCODE_MICROWIRE 0x13 /* EEPROM erase/write enable */ -#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erase/write disable */ - -/* EEPROM Commands - SPI */ -#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ -#define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ -#define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ -#define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ -#define EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Enable latch */ -#define EEPROM_WRDI_OPCODE_SPI 0x04 /* EEPROM reset Write Enable latch */ -#define EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status register */ -#define EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status register */ -#define EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ -#define EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ -#define EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ - -/* EEPROM Size definitions */ -#define EEPROM_WORD_SIZE_SHIFT 6 -#define EEPROM_SIZE_SHIFT 10 -#define EEPROM_SIZE_MASK 0x1C00 - -/* EEPROM Word Offsets */ -#define EEPROM_COMPAT 0x0003 -#define EEPROM_ID_LED_SETTINGS 0x0004 -#define EEPROM_VERSION 0x0005 -#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */ -#define EEPROM_PHY_CLASS_WORD 0x0007 -#define EEPROM_INIT_CONTROL1_REG 0x000A -#define EEPROM_INIT_CONTROL2_REG 0x000F -#define EEPROM_SWDEF_PINS_CTRL_PORT_1 0x0010 -#define EEPROM_INIT_CONTROL3_PORT_B 0x0014 -#define EEPROM_INIT_3GIO_3 0x001A -#define EEPROM_SWDEF_PINS_CTRL_PORT_0 0x0020 -#define EEPROM_INIT_CONTROL3_PORT_A 0x0024 -#define EEPROM_CFG 0x0012 -#define EEPROM_FLASH_VERSION 0x0032 -#define EEPROM_CHECKSUM_REG 0x003F - -#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */ -#define E1000_EEPROM_CFG_DONE_PORT_1 0x00080000 /* ...for second port */ - -/* Word definitions for ID LED Settings */ -#define ID_LED_RESERVED_0000 0x0000 -#define ID_LED_RESERVED_FFFF 0xFFFF -#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ - (ID_LED_OFF1_OFF2 << 8) | \ - (ID_LED_DEF1_DEF2 << 4) | \ - (ID_LED_DEF1_DEF2)) -#define ID_LED_DEF1_DEF2 0x1 -#define ID_LED_DEF1_ON2 0x2 -#define ID_LED_DEF1_OFF2 0x3 -#define ID_LED_ON1_DEF2 0x4 -#define ID_LED_ON1_ON2 0x5 -#define ID_LED_ON1_OFF2 0x6 -#define ID_LED_OFF1_DEF2 0x7 -#define ID_LED_OFF1_ON2 0x8 -#define ID_LED_OFF1_OFF2 0x9 - -#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF -#define IGP_ACTIVITY_LED_ENABLE 0x0300 -#define IGP_LED3_MODE 0x07000000 - -/* Mask bits for SERDES amplitude adjustment in Word 6 of the EEPROM */ -#define EEPROM_SERDES_AMPLITUDE_MASK 0x000F - -/* Mask bit for PHY class in Word 7 of the EEPROM */ -#define EEPROM_PHY_CLASS_A 0x8000 - -/* Mask bits for fields in Word 0x0a of the EEPROM */ -#define EEPROM_WORD0A_ILOS 0x0010 -#define EEPROM_WORD0A_SWDPIO 0x01E0 -#define EEPROM_WORD0A_LRST 0x0200 -#define EEPROM_WORD0A_FD 0x0400 -#define EEPROM_WORD0A_66MHZ 0x0800 - -/* Mask bits for fields in Word 0x0f of the EEPROM */ -#define EEPROM_WORD0F_PAUSE_MASK 0x3000 -#define EEPROM_WORD0F_PAUSE 0x1000 -#define EEPROM_WORD0F_ASM_DIR 0x2000 -#define EEPROM_WORD0F_ANE 0x0800 -#define EEPROM_WORD0F_SWPDIO_EXT 0x00F0 -#define EEPROM_WORD0F_LPLU 0x0001 - -/* Mask bits for fields in Word 0x10/0x20 of the EEPROM */ -#define EEPROM_WORD1020_GIGA_DISABLE 0x0010 -#define EEPROM_WORD1020_GIGA_DISABLE_NON_D0A 0x0008 - -/* Mask bits for fields in Word 0x1a of the EEPROM */ -#define EEPROM_WORD1A_ASPM_MASK 0x000C - -/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */ -#define EEPROM_SUM 0xBABA - -/* EEPROM Map defines (WORD OFFSETS)*/ -#define EEPROM_NODE_ADDRESS_BYTE_0 0 -#define EEPROM_PBA_BYTE_1 8 - -#define EEPROM_RESERVED_WORD 0xFFFF - -/* EEPROM Map Sizes (Byte Counts) */ -#define PBA_SIZE 4 - -/* Collision related configuration parameters */ -#define E1000_COLLISION_THRESHOLD 15 -#define E1000_CT_SHIFT 4 -/* Collision distance is a 0-based value that applies to - * half-duplex-capable hardware only. */ -#define E1000_COLLISION_DISTANCE 63 -#define E1000_COLLISION_DISTANCE_82542 64 -#define E1000_FDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE -#define E1000_HDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE -#define E1000_COLD_SHIFT 12 - -/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ -#define REQ_TX_DESCRIPTOR_MULTIPLE 8 -#define REQ_RX_DESCRIPTOR_MULTIPLE 8 - -/* Default values for the transmit IPG register */ -#define DEFAULT_82542_TIPG_IPGT 10 -#define DEFAULT_82543_TIPG_IPGT_FIBER 9 -#define DEFAULT_82543_TIPG_IPGT_COPPER 8 - -#define E1000_TIPG_IPGT_MASK 0x000003FF -#define E1000_TIPG_IPGR1_MASK 0x000FFC00 -#define E1000_TIPG_IPGR2_MASK 0x3FF00000 - -#define DEFAULT_82542_TIPG_IPGR1 2 -#define DEFAULT_82543_TIPG_IPGR1 8 -#define E1000_TIPG_IPGR1_SHIFT 10 - -#define DEFAULT_82542_TIPG_IPGR2 10 -#define DEFAULT_82543_TIPG_IPGR2 6 -#define E1000_TIPG_IPGR2_SHIFT 20 - -#define E1000_TXDMAC_DPP 0x00000001 - -/* Adaptive IFS defines */ -#define TX_THRESHOLD_START 8 -#define TX_THRESHOLD_INCREMENT 10 -#define TX_THRESHOLD_DECREMENT 1 -#define TX_THRESHOLD_STOP 190 -#define TX_THRESHOLD_DISABLE 0 -#define TX_THRESHOLD_TIMER_MS 10000 -#define MIN_NUM_XMITS 1000 -#define IFS_MAX 80 -#define IFS_STEP 10 -#define IFS_MIN 40 -#define IFS_RATIO 4 - -/* Extended Configuration Control and Size */ -#define E1000_EXTCNF_CTRL_PCIE_WRITE_ENABLE 0x00000001 -#define E1000_EXTCNF_CTRL_PHY_WRITE_ENABLE 0x00000002 -#define E1000_EXTCNF_CTRL_D_UD_ENABLE 0x00000004 -#define E1000_EXTCNF_CTRL_D_UD_LATENCY 0x00000008 -#define E1000_EXTCNF_CTRL_D_UD_OWNER 0x00000010 -#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 -#define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040 -#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x0FFF0000 - -#define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF -#define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00 -#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000 -#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 -#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 - -/* PBA constants */ -#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */ -#define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */ -#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ -#define E1000_PBA_20K 0x0014 -#define E1000_PBA_22K 0x0016 -#define E1000_PBA_24K 0x0018 -#define E1000_PBA_30K 0x001E -#define E1000_PBA_32K 0x0020 -#define E1000_PBA_34K 0x0022 -#define E1000_PBA_38K 0x0026 -#define E1000_PBA_40K 0x0028 -#define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */ - -#define E1000_PBS_16K E1000_PBA_16K - -/* Flow Control Constants */ -#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 -#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 -#define FLOW_CONTROL_TYPE 0x8808 - -/* The historical defaults for the flow control values are given below. */ -#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */ -#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */ -#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */ - -/* PCIX Config space */ -#define PCIX_COMMAND_REGISTER 0xE6 -#define PCIX_STATUS_REGISTER_LO 0xE8 -#define PCIX_STATUS_REGISTER_HI 0xEA - -#define PCIX_COMMAND_MMRBC_MASK 0x000C -#define PCIX_COMMAND_MMRBC_SHIFT 0x2 -#define PCIX_STATUS_HI_MMRBC_MASK 0x0060 -#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5 -#define PCIX_STATUS_HI_MMRBC_4K 0x3 -#define PCIX_STATUS_HI_MMRBC_2K 0x2 - -/* Number of bits required to shift right the "pause" bits from the - * EEPROM (bits 13:12) to the "pause" (bits 8:7) field in the TXCW register. - */ -#define PAUSE_SHIFT 5 - -/* Number of bits required to shift left the "SWDPIO" bits from the - * EEPROM (bits 8:5) to the "SWDPIO" (bits 25:22) field in the CTRL register. - */ -#define SWDPIO_SHIFT 17 - -/* Number of bits required to shift left the "SWDPIO_EXT" bits from the - * EEPROM word F (bits 7:4) to the bits 11:8 of The Extended CTRL register. - */ -#define SWDPIO__EXT_SHIFT 4 - -/* Number of bits required to shift left the "ILOS" bit from the EEPROM - * (bit 4) to the "ILOS" (bit 7) field in the CTRL register. - */ -#define ILOS_SHIFT 3 - -#define RECEIVE_BUFFER_ALIGN_SIZE (256) - -/* Number of milliseconds we wait for auto-negotiation to complete */ -#define LINK_UP_TIMEOUT 500 - -/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */ -#define AUTO_READ_DONE_TIMEOUT 10 -/* Number of milliseconds we wait for PHY configuration done after MAC reset */ -#define PHY_CFG_TIMEOUT 100 - -#define E1000_TX_BUFFER_SIZE ((u32)1514) - -/* The carrier extension symbol, as received by the NIC. */ -#define CARRIER_EXTENSION 0x0F - -/* TBI_ACCEPT macro definition: - * - * This macro requires: - * adapter = a pointer to struct e1000_hw - * status = the 8 bit status field of the RX descriptor with EOP set - * error = the 8 bit error field of the RX descriptor with EOP set - * length = the sum of all the length fields of the RX descriptors that - * make up the current frame - * last_byte = the last byte of the frame DMAed by the hardware - * max_frame_length = the maximum frame length we want to accept. - * min_frame_length = the minimum frame length we want to accept. - * - * This macro is a conditional that should be used in the interrupt - * handler's Rx processing routine when RxErrors have been detected. - * - * Typical use: - * ... - * if (TBI_ACCEPT) { - * accept_frame = true; - * e1000_tbi_adjust_stats(adapter, MacAddress); - * frame_length--; - * } else { - * accept_frame = false; - * } - * ... - */ - -#define TBI_ACCEPT(adapter, status, errors, length, last_byte) \ - ((adapter)->tbi_compatibility_on && \ - (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \ - ((last_byte) == CARRIER_EXTENSION) && \ - (((status) & E1000_RXD_STAT_VP) ? \ - (((length) > ((adapter)->min_frame_size - VLAN_TAG_SIZE)) && \ - ((length) <= ((adapter)->max_frame_size + 1))) : \ - (((length) > (adapter)->min_frame_size) && \ - ((length) <= ((adapter)->max_frame_size + VLAN_TAG_SIZE + 1))))) - -/* Structures, enums, and macros for the PHY */ - -/* Bit definitions for the Management Data IO (MDIO) and Management Data - * Clock (MDC) pins in the Device Control Register. - */ -#define E1000_CTRL_PHY_RESET_DIR E1000_CTRL_SWDPIO0 -#define E1000_CTRL_PHY_RESET E1000_CTRL_SWDPIN0 -#define E1000_CTRL_MDIO_DIR E1000_CTRL_SWDPIO2 -#define E1000_CTRL_MDIO E1000_CTRL_SWDPIN2 -#define E1000_CTRL_MDC_DIR E1000_CTRL_SWDPIO3 -#define E1000_CTRL_MDC E1000_CTRL_SWDPIN3 -#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR -#define E1000_CTRL_PHY_RESET4 E1000_CTRL_EXT_SDP4_DATA - -/* PHY 1000 MII Register/Bit Definitions */ -/* PHY Registers defined by IEEE */ -#define PHY_CTRL 0x00 /* Control Register */ -#define PHY_STATUS 0x01 /* Status Register */ -#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ -#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ -#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ -#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ -#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ -#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */ -#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ -#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ -#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ -#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ - -#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ -#define MAX_PHY_MULTI_PAGE_REG 0xF /* Registers equal on all pages */ - -/* M88E1000 Specific Registers */ -#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ -#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ -#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Register */ -#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Register */ -#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ -#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */ - -#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */ -#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ -#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ -#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */ -#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */ - -#define IGP01E1000_IEEE_REGS_PAGE 0x0000 -#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300 -#define IGP01E1000_IEEE_FORCE_GIGA 0x0140 - -/* IGP01E1000 Specific Registers */ -#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* PHY Specific Port Config Register */ -#define IGP01E1000_PHY_PORT_STATUS 0x11 /* PHY Specific Status Register */ -#define IGP01E1000_PHY_PORT_CTRL 0x12 /* PHY Specific Control Register */ -#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */ -#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */ -#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */ -#define IGP02E1000_PHY_POWER_MGMT 0x19 -#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */ - -/* IGP01E1000 AGC Registers - stores the cable length values*/ -#define IGP01E1000_PHY_AGC_A 0x1172 -#define IGP01E1000_PHY_AGC_B 0x1272 -#define IGP01E1000_PHY_AGC_C 0x1472 -#define IGP01E1000_PHY_AGC_D 0x1872 - -/* IGP02E1000 AGC Registers for cable length values */ -#define IGP02E1000_PHY_AGC_A 0x11B1 -#define IGP02E1000_PHY_AGC_B 0x12B1 -#define IGP02E1000_PHY_AGC_C 0x14B1 -#define IGP02E1000_PHY_AGC_D 0x18B1 - -/* IGP01E1000 DSP Reset Register */ -#define IGP01E1000_PHY_DSP_RESET 0x1F33 -#define IGP01E1000_PHY_DSP_SET 0x1F71 -#define IGP01E1000_PHY_DSP_FFE 0x1F35 - -#define IGP01E1000_PHY_CHANNEL_NUM 4 -#define IGP02E1000_PHY_CHANNEL_NUM 4 - -#define IGP01E1000_PHY_AGC_PARAM_A 0x1171 -#define IGP01E1000_PHY_AGC_PARAM_B 0x1271 -#define IGP01E1000_PHY_AGC_PARAM_C 0x1471 -#define IGP01E1000_PHY_AGC_PARAM_D 0x1871 - -#define IGP01E1000_PHY_EDAC_MU_INDEX 0xC000 -#define IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS 0x8000 - -#define IGP01E1000_PHY_ANALOG_TX_STATE 0x2890 -#define IGP01E1000_PHY_ANALOG_CLASS_A 0x2000 -#define IGP01E1000_PHY_FORCE_ANALOG_ENABLE 0x0004 -#define IGP01E1000_PHY_DSP_FFE_CM_CP 0x0069 - -#define IGP01E1000_PHY_DSP_FFE_DEFAULT 0x002A -/* IGP01E1000 PCS Initialization register - stores the polarity status when - * speed = 1000 Mbps. */ -#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 -#define IGP01E1000_PHY_PCS_CTRL_REG 0x00B5 - -#define IGP01E1000_ANALOG_REGS_PAGE 0x20C0 - -/* PHY Control Register */ -#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ -#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ -#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ -#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ -#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ -#define MII_CR_POWER_DOWN 0x0800 /* Power down */ -#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ -#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ -#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ -#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ - -/* PHY Status Register */ -#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ -#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ -#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ -#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ -#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ -#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ -#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ -#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ -#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ -#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ -#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ -#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ -#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ -#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ -#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ - -/* Autoneg Advertisement Register */ -#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ -#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ -#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ -#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ -#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ -#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ -#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ -#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ -#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ -#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ - -/* Link Partner Ability Register (Base Page) */ -#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ -#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP is 10T Half Duplex Capable */ -#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP is 10T Full Duplex Capable */ -#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP is 100TX Half Duplex Capable */ -#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP is 100TX Full Duplex Capable */ -#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ -#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ -#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ -#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP has detected Remote Fault */ -#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP has rx'd link code word */ -#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ - -/* Autoneg Expansion Register */ -#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ -#define NWAY_ER_PAGE_RXD 0x0002 /* LP is 10T Half Duplex Capable */ -#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP is 10T Full Duplex Capable */ -#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */ -#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP is 100TX Full Duplex Capable */ - -/* Next Page TX Register */ -#define NPTX_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ -#define NPTX_TOGGLE 0x0800 /* Toggles between exchanges - * of different NP - */ -#define NPTX_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg - * 0 = cannot comply with msg - */ -#define NPTX_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ -#define NPTX_NEXT_PAGE 0x8000 /* 1 = addition NP will follow - * 0 = sending last NP - */ - -/* Link Partner Next Page Register */ -#define LP_RNPR_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ -#define LP_RNPR_TOGGLE 0x0800 /* Toggles between exchanges - * of different NP - */ -#define LP_RNPR_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg - * 0 = cannot comply with msg - */ -#define LP_RNPR_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ -#define LP_RNPR_ACKNOWLDGE 0x4000 /* 1 = ACK / 0 = NO ACK */ -#define LP_RNPR_NEXT_PAGE 0x8000 /* 1 = addition NP will follow - * 0 = sending last NP - */ - -/* 1000BASE-T Control Register */ -#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ -#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ -#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ -#define CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */ - /* 0=DTE device */ -#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ - /* 0=Configure PHY as Slave */ -#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ - /* 0=Automatic Master/Slave config */ -#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ -#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ -#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ -#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ -#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ - -/* 1000BASE-T Status Register */ -#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */ -#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */ -#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ -#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ -#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ -#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ -#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */ -#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ -#define SR_1000T_REMOTE_RX_STATUS_SHIFT 12 -#define SR_1000T_LOCAL_RX_STATUS_SHIFT 13 -#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 -#define FFE_IDLE_ERR_COUNT_TIMEOUT_20 20 -#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100 - -/* Extended Status Register */ -#define IEEE_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */ -#define IEEE_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */ -#define IEEE_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */ -#define IEEE_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */ - -#define PHY_TX_POLARITY_MASK 0x0100 /* register 10h bit 8 (polarity bit) */ -#define PHY_TX_NORMAL_POLARITY 0 /* register 10h bit 8 (normal polarity) */ - -#define AUTO_POLARITY_DISABLE 0x0010 /* register 11h bit 4 */ - /* (0=enable, 1=disable) */ - -/* M88E1000 PHY Specific Control Register */ -#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ -#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ -#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ -#define M88E1000_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low, - * 0=CLK125 toggling - */ -#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ - /* Manual MDI configuration */ -#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ -#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, - * 100BASE-TX/10BASE-T: - * MDI Mode - */ -#define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled - * all speeds. - */ -#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE 0x0080 - /* 1=Enable Extended 10BASE-T distance - * (Lower 10BASE-T RX Threshold) - * 0=Normal 10BASE-T RX Threshold */ -#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100 - /* 1=5-Bit interface in 100BASE-TX - * 0=MII interface in 100BASE-TX */ -#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */ -#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ -#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ - -#define M88E1000_PSCR_POLARITY_REVERSAL_SHIFT 1 -#define M88E1000_PSCR_AUTO_X_MODE_SHIFT 5 -#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7 - -/* M88E1000 PHY Specific Status Register */ -#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */ -#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ -#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ -#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ -#define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M; - * 3=110-140M;4=>140M */ -#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ -#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ -#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */ -#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ -#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ -#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */ -#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */ -#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ - -#define M88E1000_PSSR_REV_POLARITY_SHIFT 1 -#define M88E1000_PSSR_DOWNSHIFT_SHIFT 5 -#define M88E1000_PSSR_MDIX_SHIFT 6 -#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 - -/* M88E1000 Extended PHY Specific Control Register */ -#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */ -#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000 /* 1=Lost lock detect enabled. - * Will assert lost lock and bring - * link down if idle not seen - * within 1ms in 1000BASE-T - */ -/* Number of times we will attempt to autonegotiate before downshifting if we - * are the master */ -#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 -#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 -#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X 0x0400 -#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X 0x0800 -#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X 0x0C00 -/* Number of times we will attempt to autonegotiate before downshifting if we - * are the slave */ -#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 -#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS 0x0000 -#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 -#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X 0x0200 -#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X 0x0300 -#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */ -#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ -#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */ - -/* M88EC018 Rev 2 specific DownShift settings */ -#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 -#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X 0x0000 -#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X 0x0200 -#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X 0x0400 -#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X 0x0600 -#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 -#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X 0x0A00 -#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X 0x0C00 -#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X 0x0E00 - -/* IGP01E1000 Specific Port Config Register - R/W */ -#define IGP01E1000_PSCFR_AUTO_MDIX_PAR_DETECT 0x0010 -#define IGP01E1000_PSCFR_PRE_EN 0x0020 -#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 -#define IGP01E1000_PSCFR_DISABLE_TPLOOPBACK 0x0100 -#define IGP01E1000_PSCFR_DISABLE_JABBER 0x0400 -#define IGP01E1000_PSCFR_DISABLE_TRANSMIT 0x2000 - -/* IGP01E1000 Specific Port Status Register - R/O */ -#define IGP01E1000_PSSR_AUTONEG_FAILED 0x0001 /* RO LH SC */ -#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 -#define IGP01E1000_PSSR_CABLE_LENGTH 0x007C -#define IGP01E1000_PSSR_FULL_DUPLEX 0x0200 -#define IGP01E1000_PSSR_LINK_UP 0x0400 -#define IGP01E1000_PSSR_MDIX 0x0800 -#define IGP01E1000_PSSR_SPEED_MASK 0xC000 /* speed bits mask */ -#define IGP01E1000_PSSR_SPEED_10MBPS 0x4000 -#define IGP01E1000_PSSR_SPEED_100MBPS 0x8000 -#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 -#define IGP01E1000_PSSR_CABLE_LENGTH_SHIFT 0x0002 /* shift right 2 */ -#define IGP01E1000_PSSR_MDIX_SHIFT 0x000B /* shift right 11 */ - -/* IGP01E1000 Specific Port Control Register - R/W */ -#define IGP01E1000_PSCR_TP_LOOPBACK 0x0010 -#define IGP01E1000_PSCR_CORRECT_NC_SCMBLR 0x0200 -#define IGP01E1000_PSCR_TEN_CRS_SELECT 0x0400 -#define IGP01E1000_PSCR_FLIP_CHIP 0x0800 -#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 -#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0-MDI, 1-MDIX */ - -/* IGP01E1000 Specific Port Link Health Register */ -#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 -#define IGP01E1000_PLHR_GIG_SCRAMBLER_ERROR 0x4000 -#define IGP01E1000_PLHR_MASTER_FAULT 0x2000 -#define IGP01E1000_PLHR_MASTER_RESOLUTION 0x1000 -#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK 0x0800 /* LH */ -#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW 0x0400 /* LH */ -#define IGP01E1000_PLHR_DATA_ERR_1 0x0200 /* LH */ -#define IGP01E1000_PLHR_DATA_ERR_0 0x0100 -#define IGP01E1000_PLHR_AUTONEG_FAULT 0x0040 -#define IGP01E1000_PLHR_AUTONEG_ACTIVE 0x0010 -#define IGP01E1000_PLHR_VALID_CHANNEL_D 0x0008 -#define IGP01E1000_PLHR_VALID_CHANNEL_C 0x0004 -#define IGP01E1000_PLHR_VALID_CHANNEL_B 0x0002 -#define IGP01E1000_PLHR_VALID_CHANNEL_A 0x0001 - -/* IGP01E1000 Channel Quality Register */ -#define IGP01E1000_MSE_CHANNEL_D 0x000F -#define IGP01E1000_MSE_CHANNEL_C 0x00F0 -#define IGP01E1000_MSE_CHANNEL_B 0x0F00 -#define IGP01E1000_MSE_CHANNEL_A 0xF000 - -#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ -#define IGP02E1000_PM_D3_LPLU 0x0004 /* Enable LPLU in non-D0a modes */ -#define IGP02E1000_PM_D0_LPLU 0x0002 /* Enable LPLU in D0a mode */ - -/* IGP01E1000 DSP reset macros */ -#define DSP_RESET_ENABLE 0x0 -#define DSP_RESET_DISABLE 0x2 -#define E1000_MAX_DSP_RESETS 10 - -/* IGP01E1000 & IGP02E1000 AGC Registers */ - -#define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */ -#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Coarse - 15:13, Fine - 12:9 */ - -/* IGP02E1000 AGC Register Length 9-bit mask */ -#define IGP02E1000_AGC_LENGTH_MASK 0x7F - -/* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */ -#define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128 -#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 113 - -/* The precision error of the cable length is +/- 10 meters */ -#define IGP01E1000_AGC_RANGE 10 -#define IGP02E1000_AGC_RANGE 15 - -/* IGP01E1000 PCS Initialization register */ -/* bits 3:6 in the PCS registers stores the channels polarity */ -#define IGP01E1000_PHY_POLARITY_MASK 0x0078 - -/* IGP01E1000 GMII FIFO Register */ -#define IGP01E1000_GMII_FLEX_SPD 0x10 /* Enable flexible speed - * on Link-Up */ -#define IGP01E1000_GMII_SPD 0x20 /* Enable SPD */ - -/* IGP01E1000 Analog Register */ -#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS 0x20D1 -#define IGP01E1000_ANALOG_FUSE_STATUS 0x20D0 -#define IGP01E1000_ANALOG_FUSE_CONTROL 0x20DC -#define IGP01E1000_ANALOG_FUSE_BYPASS 0x20DE - -#define IGP01E1000_ANALOG_FUSE_POLY_MASK 0xF000 -#define IGP01E1000_ANALOG_FUSE_FINE_MASK 0x0F80 -#define IGP01E1000_ANALOG_FUSE_COARSE_MASK 0x0070 -#define IGP01E1000_ANALOG_SPARE_FUSE_ENABLED 0x0100 -#define IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL 0x0002 - -#define IGP01E1000_ANALOG_FUSE_COARSE_THRESH 0x0040 -#define IGP01E1000_ANALOG_FUSE_COARSE_10 0x0010 -#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080 -#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500 - -/* Bit definitions for valid PHY IDs. */ -/* I = Integrated - * E = External - */ -#define M88_VENDOR 0x0141 -#define M88E1000_E_PHY_ID 0x01410C50 -#define M88E1000_I_PHY_ID 0x01410C30 -#define M88E1011_I_PHY_ID 0x01410C20 -#define IGP01E1000_I_PHY_ID 0x02A80380 -#define M88E1000_12_PHY_ID M88E1000_E_PHY_ID -#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID -#define M88E1011_I_REV_4 0x04 -#define M88E1111_I_PHY_ID 0x01410CC0 -#define M88E1118_E_PHY_ID 0x01410E40 -#define L1LXT971A_PHY_ID 0x001378E0 - -#define RTL8211B_PHY_ID 0x001CC910 -#define RTL8201N_PHY_ID 0x8200 -#define RTL_PHY_CTRL_FD 0x0100 /* Full duplex.0=half; 1=full */ -#define RTL_PHY_CTRL_SPD_100 0x200000 /* Force 100Mb */ - -/* Bits... - * 15-5: page - * 4-0: register offset - */ -#define PHY_PAGE_SHIFT 5 -#define PHY_REG(page, reg) \ - (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) - -#define IGP3_PHY_PORT_CTRL \ - PHY_REG(769, 17) /* Port General Configuration */ -#define IGP3_PHY_RATE_ADAPT_CTRL \ - PHY_REG(769, 25) /* Rate Adapter Control Register */ - -#define IGP3_KMRN_FIFO_CTRL_STATS \ - PHY_REG(770, 16) /* KMRN FIFO's control/status register */ -#define IGP3_KMRN_POWER_MNG_CTRL \ - PHY_REG(770, 17) /* KMRN Power Management Control Register */ -#define IGP3_KMRN_INBAND_CTRL \ - PHY_REG(770, 18) /* KMRN Inband Control Register */ -#define IGP3_KMRN_DIAG \ - PHY_REG(770, 19) /* KMRN Diagnostic register */ -#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */ -#define IGP3_KMRN_ACK_TIMEOUT \ - PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */ - -#define IGP3_VR_CTRL \ - PHY_REG(776, 18) /* Voltage regulator control register */ -#define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */ -#define IGP3_VR_CTRL_MODE_MASK 0x0300 /* Shutdown VR Mask */ - -#define IGP3_CAPABILITY \ - PHY_REG(776, 19) /* IGP3 Capability Register */ - -/* Capabilities for SKU Control */ -#define IGP3_CAP_INITIATE_TEAM 0x0001 /* Able to initiate a team */ -#define IGP3_CAP_WFM 0x0002 /* Support WoL and PXE */ -#define IGP3_CAP_ASF 0x0004 /* Support ASF */ -#define IGP3_CAP_LPLU 0x0008 /* Support Low Power Link Up */ -#define IGP3_CAP_DC_AUTO_SPEED 0x0010 /* Support AC/DC Auto Link Speed */ -#define IGP3_CAP_SPD 0x0020 /* Support Smart Power Down */ -#define IGP3_CAP_MULT_QUEUE 0x0040 /* Support 2 tx & 2 rx queues */ -#define IGP3_CAP_RSS 0x0080 /* Support RSS */ -#define IGP3_CAP_8021PQ 0x0100 /* Support 802.1Q & 802.1p */ -#define IGP3_CAP_AMT_CB 0x0200 /* Support active manageability and circuit breaker */ - -#define IGP3_PPC_JORDAN_EN 0x0001 -#define IGP3_PPC_JORDAN_GIGA_SPEED 0x0002 - -#define IGP3_KMRN_PMC_EE_IDLE_LINK_DIS 0x0001 -#define IGP3_KMRN_PMC_K0S_ENTRY_LATENCY_MASK 0x001E -#define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA 0x0020 -#define IGP3_KMRN_PMC_K0S_MODE1_EN_100 0x0040 - -#define IGP3E1000_PHY_MISC_CTRL 0x1B /* Misc. Ctrl register */ -#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Duplex Manual Set */ - -#define IGP3_KMRN_EXT_CTRL PHY_REG(770, 18) -#define IGP3_KMRN_EC_DIS_INBAND 0x0080 - -#define IGP03E1000_E_PHY_ID 0x02A80390 -#define IFE_E_PHY_ID 0x02A80330 /* 10/100 PHY */ -#define IFE_PLUS_E_PHY_ID 0x02A80320 -#define IFE_C_E_PHY_ID 0x02A80310 - -#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 /* 100BaseTx Extended Status, Control and Address */ -#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY special control register */ -#define IFE_PHY_RCV_FALSE_CARRIER 0x13 /* 100BaseTx Receive False Carrier Counter */ -#define IFE_PHY_RCV_DISCONNECT 0x14 /* 100BaseTx Receive Disconnect Counter */ -#define IFE_PHY_RCV_ERROT_FRAME 0x15 /* 100BaseTx Receive Error Frame Counter */ -#define IFE_PHY_RCV_SYMBOL_ERR 0x16 /* Receive Symbol Error Counter */ -#define IFE_PHY_PREM_EOF_ERR 0x17 /* 100BaseTx Receive Premature End Of Frame Error Counter */ -#define IFE_PHY_RCV_EOF_ERR 0x18 /* 10BaseT Receive End Of Frame Error Counter */ -#define IFE_PHY_TX_JABBER_DETECT 0x19 /* 10BaseT Transmit Jabber Detect Counter */ -#define IFE_PHY_EQUALIZER 0x1A /* PHY Equalizer Control and Status */ -#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY special control and LED configuration */ -#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control register */ -#define IFE_PHY_HWI_CONTROL 0x1D /* Hardware Integrity Control (HWI) */ - -#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE 0x2000 /* Default 1 = Disable auto reduced power down */ -#define IFE_PESC_100BTX_POWER_DOWN 0x0400 /* Indicates the power state of 100BASE-TX */ -#define IFE_PESC_10BTX_POWER_DOWN 0x0200 /* Indicates the power state of 10BASE-T */ -#define IFE_PESC_POLARITY_REVERSED 0x0100 /* Indicates 10BASE-T polarity */ -#define IFE_PESC_PHY_ADDR_MASK 0x007C /* Bit 6:2 for sampled PHY address */ -#define IFE_PESC_SPEED 0x0002 /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */ -#define IFE_PESC_DUPLEX 0x0001 /* Auto-negotiation duplex result 1=Full, 0=Half */ -#define IFE_PESC_POLARITY_REVERSED_SHIFT 8 - -#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 /* 1 = Dynamic Power Down disabled */ -#define IFE_PSC_FORCE_POLARITY 0x0020 /* 1=Reversed Polarity, 0=Normal */ -#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 /* 1=Auto Polarity Disabled, 0=Enabled */ -#define IFE_PSC_JABBER_FUNC_DISABLE 0x0001 /* 1=Jabber Disabled, 0=Normal Jabber Operation */ -#define IFE_PSC_FORCE_POLARITY_SHIFT 5 -#define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT 4 - -#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable MDI/MDI-X feature, default 0=disabled */ -#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDIX-X, 0=force MDI */ -#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ -#define IFE_PMC_AUTO_MDIX_COMPLETE 0x0010 /* Resolution algorithm is completed */ -#define IFE_PMC_MDIX_MODE_SHIFT 6 -#define IFE_PHC_MDIX_RESET_ALL_MASK 0x0000 /* Disable auto MDI-X */ - -#define IFE_PHC_HWI_ENABLE 0x8000 /* Enable the HWI feature */ -#define IFE_PHC_ABILITY_CHECK 0x4000 /* 1= Test Passed, 0=failed */ -#define IFE_PHC_TEST_EXEC 0x2000 /* PHY launch test pulses on the wire */ -#define IFE_PHC_HIGHZ 0x0200 /* 1 = Open Circuit */ -#define IFE_PHC_LOWZ 0x0400 /* 1 = Short Circuit */ -#define IFE_PHC_LOW_HIGH_Z_MASK 0x0600 /* Mask for indication type of problem on the line */ -#define IFE_PHC_DISTANCE_MASK 0x01FF /* Mask for distance to the cable problem, in 80cm granularity */ -#define IFE_PHC_RESET_ALL_MASK 0x0000 /* Disable HWI */ -#define IFE_PSCL_PROBE_MODE 0x0020 /* LED Probe mode */ -#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ -#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ - -#define ICH_FLASH_COMMAND_TIMEOUT 5000 /* 5000 uSecs - adjusted */ -#define ICH_FLASH_ERASE_TIMEOUT 3000000 /* Up to 3 seconds - worst case */ -#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles */ -#define ICH_FLASH_SEG_SIZE_256 256 -#define ICH_FLASH_SEG_SIZE_4K 4096 -#define ICH_FLASH_SEG_SIZE_64K 65536 - -#define ICH_CYCLE_READ 0x0 -#define ICH_CYCLE_RESERVED 0x1 -#define ICH_CYCLE_WRITE 0x2 -#define ICH_CYCLE_ERASE 0x3 - -#define ICH_FLASH_GFPREG 0x0000 -#define ICH_FLASH_HSFSTS 0x0004 -#define ICH_FLASH_HSFCTL 0x0006 -#define ICH_FLASH_FADDR 0x0008 -#define ICH_FLASH_FDATA0 0x0010 -#define ICH_FLASH_FRACC 0x0050 -#define ICH_FLASH_FREG0 0x0054 -#define ICH_FLASH_FREG1 0x0058 -#define ICH_FLASH_FREG2 0x005C -#define ICH_FLASH_FREG3 0x0060 -#define ICH_FLASH_FPR0 0x0074 -#define ICH_FLASH_FPR1 0x0078 -#define ICH_FLASH_SSFSTS 0x0090 -#define ICH_FLASH_SSFCTL 0x0092 -#define ICH_FLASH_PREOP 0x0094 -#define ICH_FLASH_OPTYPE 0x0096 -#define ICH_FLASH_OPMENU 0x0098 - -#define ICH_FLASH_REG_MAPSIZE 0x00A0 -#define ICH_FLASH_SECTOR_SIZE 4096 -#define ICH_GFPREG_BASE_MASK 0x1FFF -#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF - -/* Miscellaneous PHY bit definitions. */ -#define PHY_PREAMBLE 0xFFFFFFFF -#define PHY_SOF 0x01 -#define PHY_OP_READ 0x02 -#define PHY_OP_WRITE 0x01 -#define PHY_TURNAROUND 0x02 -#define PHY_PREAMBLE_SIZE 32 -#define MII_CR_SPEED_1000 0x0040 -#define MII_CR_SPEED_100 0x2000 -#define MII_CR_SPEED_10 0x0000 -#define E1000_PHY_ADDRESS 0x01 -#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */ -#define PHY_FORCE_TIME 20 /* 2.0 Seconds */ -#define PHY_REVISION_MASK 0xFFFFFFF0 -#define DEVICE_SPEED_MASK 0x00000300 /* Device Ctrl Reg Speed Mask */ -#define REG4_SPEED_MASK 0x01E0 -#define REG9_SPEED_MASK 0x0300 -#define ADVERTISE_10_HALF 0x0001 -#define ADVERTISE_10_FULL 0x0002 -#define ADVERTISE_100_HALF 0x0004 -#define ADVERTISE_100_FULL 0x0008 -#define ADVERTISE_1000_HALF 0x0010 -#define ADVERTISE_1000_FULL 0x0020 -#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */ -#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */ -#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */ - -#endif /* _E1000_HW_H_ */ diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c deleted file mode 100644 index f97afda..0000000 --- a/drivers/net/e1000/e1000_main.c +++ /dev/null @@ -1,4974 +0,0 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include "e1000.h" -#include -#include -#include -#include -#include - -/* Intel Media SOC GbE MDIO physical base address */ -static unsigned long ce4100_gbe_mdio_base_phy; -/* Intel Media SOC GbE MDIO virtual base address */ -void __iomem *ce4100_gbe_mdio_base_virt; - -char e1000_driver_name[] = "e1000"; -static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; -#define DRV_VERSION "7.3.21-k8-NAPI" -const char e1000_driver_version[] = DRV_VERSION; -static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; - -/* e1000_pci_tbl - PCI Device ID Table - * - * Last entry must be all 0s - * - * Macro expands to... - * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} - */ -static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { - INTEL_E1000_ETHERNET_DEVICE(0x1000), - INTEL_E1000_ETHERNET_DEVICE(0x1001), - INTEL_E1000_ETHERNET_DEVICE(0x1004), - INTEL_E1000_ETHERNET_DEVICE(0x1008), - INTEL_E1000_ETHERNET_DEVICE(0x1009), - INTEL_E1000_ETHERNET_DEVICE(0x100C), - INTEL_E1000_ETHERNET_DEVICE(0x100D), - INTEL_E1000_ETHERNET_DEVICE(0x100E), - INTEL_E1000_ETHERNET_DEVICE(0x100F), - INTEL_E1000_ETHERNET_DEVICE(0x1010), - INTEL_E1000_ETHERNET_DEVICE(0x1011), - INTEL_E1000_ETHERNET_DEVICE(0x1012), - INTEL_E1000_ETHERNET_DEVICE(0x1013), - INTEL_E1000_ETHERNET_DEVICE(0x1014), - INTEL_E1000_ETHERNET_DEVICE(0x1015), - INTEL_E1000_ETHERNET_DEVICE(0x1016), - INTEL_E1000_ETHERNET_DEVICE(0x1017), - INTEL_E1000_ETHERNET_DEVICE(0x1018), - INTEL_E1000_ETHERNET_DEVICE(0x1019), - INTEL_E1000_ETHERNET_DEVICE(0x101A), - INTEL_E1000_ETHERNET_DEVICE(0x101D), - INTEL_E1000_ETHERNET_DEVICE(0x101E), - INTEL_E1000_ETHERNET_DEVICE(0x1026), - INTEL_E1000_ETHERNET_DEVICE(0x1027), - INTEL_E1000_ETHERNET_DEVICE(0x1028), - INTEL_E1000_ETHERNET_DEVICE(0x1075), - INTEL_E1000_ETHERNET_DEVICE(0x1076), - INTEL_E1000_ETHERNET_DEVICE(0x1077), - INTEL_E1000_ETHERNET_DEVICE(0x1078), - INTEL_E1000_ETHERNET_DEVICE(0x1079), - INTEL_E1000_ETHERNET_DEVICE(0x107A), - INTEL_E1000_ETHERNET_DEVICE(0x107B), - INTEL_E1000_ETHERNET_DEVICE(0x107C), - INTEL_E1000_ETHERNET_DEVICE(0x108A), - INTEL_E1000_ETHERNET_DEVICE(0x1099), - INTEL_E1000_ETHERNET_DEVICE(0x10B5), - INTEL_E1000_ETHERNET_DEVICE(0x2E6E), - /* required last entry */ - {0,} -}; - -MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); - -int e1000_up(struct e1000_adapter *adapter); -void e1000_down(struct e1000_adapter *adapter); -void e1000_reinit_locked(struct e1000_adapter *adapter); -void e1000_reset(struct e1000_adapter *adapter); -int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); -int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); -void e1000_free_all_tx_resources(struct e1000_adapter *adapter); -void e1000_free_all_rx_resources(struct e1000_adapter *adapter); -static int e1000_setup_tx_resources(struct e1000_adapter *adapter, - struct e1000_tx_ring *txdr); -static int e1000_setup_rx_resources(struct e1000_adapter *adapter, - struct e1000_rx_ring *rxdr); -static void e1000_free_tx_resources(struct e1000_adapter *adapter, - struct e1000_tx_ring *tx_ring); -static void e1000_free_rx_resources(struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring); -void e1000_update_stats(struct e1000_adapter *adapter); - -static int e1000_init_module(void); -static void e1000_exit_module(void); -static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); -static void __devexit e1000_remove(struct pci_dev *pdev); -static int e1000_alloc_queues(struct e1000_adapter *adapter); -static int e1000_sw_init(struct e1000_adapter *adapter); -static int e1000_open(struct net_device *netdev); -static int e1000_close(struct net_device *netdev); -static void e1000_configure_tx(struct e1000_adapter *adapter); -static void e1000_configure_rx(struct e1000_adapter *adapter); -static void e1000_setup_rctl(struct e1000_adapter *adapter); -static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); -static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); -static void e1000_clean_tx_ring(struct e1000_adapter *adapter, - struct e1000_tx_ring *tx_ring); -static void e1000_clean_rx_ring(struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring); -static void e1000_set_rx_mode(struct net_device *netdev); -static void e1000_update_phy_info(unsigned long data); -static void e1000_update_phy_info_task(struct work_struct *work); -static void e1000_watchdog(unsigned long data); -static void e1000_82547_tx_fifo_stall(unsigned long data); -static void e1000_82547_tx_fifo_stall_task(struct work_struct *work); -static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, - struct net_device *netdev); -static struct net_device_stats * e1000_get_stats(struct net_device *netdev); -static int e1000_change_mtu(struct net_device *netdev, int new_mtu); -static int e1000_set_mac(struct net_device *netdev, void *p); -static irqreturn_t e1000_intr(int irq, void *data); -static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, - struct e1000_tx_ring *tx_ring); -static int e1000_clean(struct napi_struct *napi, int budget); -static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring, - int *work_done, int work_to_do); -static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring, - int *work_done, int work_to_do); -static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring, - int cleaned_count); -static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring, - int cleaned_count); -static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); -static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, - int cmd); -static void e1000_enter_82542_rst(struct e1000_adapter *adapter); -static void e1000_leave_82542_rst(struct e1000_adapter *adapter); -static void e1000_tx_timeout(struct net_device *dev); -static void e1000_reset_task(struct work_struct *work); -static void e1000_smartspeed(struct e1000_adapter *adapter); -static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, - struct sk_buff *skb); - -static bool e1000_vlan_used(struct e1000_adapter *adapter); -static void e1000_vlan_mode(struct net_device *netdev, u32 features); -static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid); -static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); -static void e1000_restore_vlan(struct e1000_adapter *adapter); - -#ifdef CONFIG_PM -static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); -static int e1000_resume(struct pci_dev *pdev); -#endif -static void e1000_shutdown(struct pci_dev *pdev); - -#ifdef CONFIG_NET_POLL_CONTROLLER -/* for netdump / net console */ -static void e1000_netpoll (struct net_device *netdev); -#endif - -#define COPYBREAK_DEFAULT 256 -static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT; -module_param(copybreak, uint, 0644); -MODULE_PARM_DESC(copybreak, - "Maximum size of packet that is copied to a new buffer on receive"); - -static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state); -static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); -static void e1000_io_resume(struct pci_dev *pdev); - -static struct pci_error_handlers e1000_err_handler = { - .error_detected = e1000_io_error_detected, - .slot_reset = e1000_io_slot_reset, - .resume = e1000_io_resume, -}; - -static struct pci_driver e1000_driver = { - .name = e1000_driver_name, - .id_table = e1000_pci_tbl, - .probe = e1000_probe, - .remove = __devexit_p(e1000_remove), -#ifdef CONFIG_PM - /* Power Management Hooks */ - .suspend = e1000_suspend, - .resume = e1000_resume, -#endif - .shutdown = e1000_shutdown, - .err_handler = &e1000_err_handler -}; - -MODULE_AUTHOR("Intel Corporation, "); -MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_VERSION); - -static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE; -module_param(debug, int, 0); -MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); - -/** - * e1000_get_hw_dev - return device - * used by hardware layer to print debugging information - * - **/ -struct net_device *e1000_get_hw_dev(struct e1000_hw *hw) -{ - struct e1000_adapter *adapter = hw->back; - return adapter->netdev; -} - -/** - * e1000_init_module - Driver Registration Routine - * - * e1000_init_module is the first routine called when the driver is - * loaded. All it does is register with the PCI subsystem. - **/ - -static int __init e1000_init_module(void) -{ - int ret; - pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version); - - pr_info("%s\n", e1000_copyright); - - ret = pci_register_driver(&e1000_driver); - if (copybreak != COPYBREAK_DEFAULT) { - if (copybreak == 0) - pr_info("copybreak disabled\n"); - else - pr_info("copybreak enabled for " - "packets <= %u bytes\n", copybreak); - } - return ret; -} - -module_init(e1000_init_module); - -/** - * e1000_exit_module - Driver Exit Cleanup Routine - * - * e1000_exit_module is called just before the driver is removed - * from memory. - **/ - -static void __exit e1000_exit_module(void) -{ - pci_unregister_driver(&e1000_driver); -} - -module_exit(e1000_exit_module); - -static int e1000_request_irq(struct e1000_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - irq_handler_t handler = e1000_intr; - int irq_flags = IRQF_SHARED; - int err; - - err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, - netdev); - if (err) { - e_err(probe, "Unable to allocate interrupt Error: %d\n", err); - } - - return err; -} - -static void e1000_free_irq(struct e1000_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - - free_irq(adapter->pdev->irq, netdev); -} - -/** - * e1000_irq_disable - Mask off interrupt generation on the NIC - * @adapter: board private structure - **/ - -static void e1000_irq_disable(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - - ew32(IMC, ~0); - E1000_WRITE_FLUSH(); - synchronize_irq(adapter->pdev->irq); -} - -/** - * e1000_irq_enable - Enable default interrupt generation settings - * @adapter: board private structure - **/ - -static void e1000_irq_enable(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - - ew32(IMS, IMS_ENABLE_MASK); - E1000_WRITE_FLUSH(); -} - -static void e1000_update_mng_vlan(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - u16 vid = hw->mng_cookie.vlan_id; - u16 old_vid = adapter->mng_vlan_id; - - if (!e1000_vlan_used(adapter)) - return; - - if (!test_bit(vid, adapter->active_vlans)) { - if (hw->mng_cookie.status & - E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { - e1000_vlan_rx_add_vid(netdev, vid); - adapter->mng_vlan_id = vid; - } else { - adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; - } - if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && - (vid != old_vid) && - !test_bit(old_vid, adapter->active_vlans)) - e1000_vlan_rx_kill_vid(netdev, old_vid); - } else { - adapter->mng_vlan_id = vid; - } -} - -static void e1000_init_manageability(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - - if (adapter->en_mng_pt) { - u32 manc = er32(MANC); - - /* disable hardware interception of ARP */ - manc &= ~(E1000_MANC_ARP_EN); - - ew32(MANC, manc); - } -} - -static void e1000_release_manageability(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - - if (adapter->en_mng_pt) { - u32 manc = er32(MANC); - - /* re-enable hardware interception of ARP */ - manc |= E1000_MANC_ARP_EN; - - ew32(MANC, manc); - } -} - -/** - * e1000_configure - configure the hardware for RX and TX - * @adapter = private board structure - **/ -static void e1000_configure(struct e1000_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - int i; - - e1000_set_rx_mode(netdev); - - e1000_restore_vlan(adapter); - e1000_init_manageability(adapter); - - e1000_configure_tx(adapter); - e1000_setup_rctl(adapter); - e1000_configure_rx(adapter); - /* call E1000_DESC_UNUSED which always leaves - * at least 1 descriptor unused to make sure - * next_to_use != next_to_clean */ - for (i = 0; i < adapter->num_rx_queues; i++) { - struct e1000_rx_ring *ring = &adapter->rx_ring[i]; - adapter->alloc_rx_buf(adapter, ring, - E1000_DESC_UNUSED(ring)); - } -} - -int e1000_up(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - - /* hardware has been reset, we need to reload some things */ - e1000_configure(adapter); - - clear_bit(__E1000_DOWN, &adapter->flags); - - napi_enable(&adapter->napi); - - e1000_irq_enable(adapter); - - netif_wake_queue(adapter->netdev); - - /* fire a link change interrupt to start the watchdog */ - ew32(ICS, E1000_ICS_LSC); - return 0; -} - -/** - * e1000_power_up_phy - restore link in case the phy was powered down - * @adapter: address of board private structure - * - * The phy may be powered down to save power and turn off link when the - * driver is unloaded and wake on lan is not enabled (among others) - * *** this routine MUST be followed by a call to e1000_reset *** - * - **/ - -void e1000_power_up_phy(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u16 mii_reg = 0; - - /* Just clear the power down bit to wake the phy back up */ - if (hw->media_type == e1000_media_type_copper) { - /* according to the manual, the phy will retain its - * settings across a power-down/up cycle */ - e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); - mii_reg &= ~MII_CR_POWER_DOWN; - e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); - } -} - -static void e1000_power_down_phy(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - - /* Power down the PHY so no link is implied when interface is down * - * The PHY cannot be powered down if any of the following is true * - * (a) WoL is enabled - * (b) AMT is active - * (c) SoL/IDER session is active */ - if (!adapter->wol && hw->mac_type >= e1000_82540 && - hw->media_type == e1000_media_type_copper) { - u16 mii_reg = 0; - - switch (hw->mac_type) { - case e1000_82540: - case e1000_82545: - case e1000_82545_rev_3: - case e1000_82546: - case e1000_ce4100: - case e1000_82546_rev_3: - case e1000_82541: - case e1000_82541_rev_2: - case e1000_82547: - case e1000_82547_rev_2: - if (er32(MANC) & E1000_MANC_SMBUS_EN) - goto out; - break; - default: - goto out; - } - e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); - mii_reg |= MII_CR_POWER_DOWN; - e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); - mdelay(1); - } -out: - return; -} - -void e1000_down(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - u32 rctl, tctl; - - - /* disable receives in the hardware */ - rctl = er32(RCTL); - ew32(RCTL, rctl & ~E1000_RCTL_EN); - /* flush and sleep below */ - - netif_tx_disable(netdev); - - /* disable transmits in the hardware */ - tctl = er32(TCTL); - tctl &= ~E1000_TCTL_EN; - ew32(TCTL, tctl); - /* flush both disables and wait for them to finish */ - E1000_WRITE_FLUSH(); - msleep(10); - - napi_disable(&adapter->napi); - - e1000_irq_disable(adapter); - - /* - * Setting DOWN must be after irq_disable to prevent - * a screaming interrupt. Setting DOWN also prevents - * timers and tasks from rescheduling. - */ - set_bit(__E1000_DOWN, &adapter->flags); - - del_timer_sync(&adapter->tx_fifo_stall_timer); - del_timer_sync(&adapter->watchdog_timer); - del_timer_sync(&adapter->phy_info_timer); - - adapter->link_speed = 0; - adapter->link_duplex = 0; - netif_carrier_off(netdev); - - e1000_reset(adapter); - e1000_clean_all_tx_rings(adapter); - e1000_clean_all_rx_rings(adapter); -} - -static void e1000_reinit_safe(struct e1000_adapter *adapter) -{ - while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) - msleep(1); - rtnl_lock(); - e1000_down(adapter); - e1000_up(adapter); - rtnl_unlock(); - clear_bit(__E1000_RESETTING, &adapter->flags); -} - -void e1000_reinit_locked(struct e1000_adapter *adapter) -{ - /* if rtnl_lock is not held the call path is bogus */ - ASSERT_RTNL(); - WARN_ON(in_interrupt()); - while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) - msleep(1); - e1000_down(adapter); - e1000_up(adapter); - clear_bit(__E1000_RESETTING, &adapter->flags); -} - -void e1000_reset(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 pba = 0, tx_space, min_tx_space, min_rx_space; - bool legacy_pba_adjust = false; - u16 hwm; - - /* Repartition Pba for greater than 9k mtu - * To take effect CTRL.RST is required. - */ - - switch (hw->mac_type) { - case e1000_82542_rev2_0: - case e1000_82542_rev2_1: - case e1000_82543: - case e1000_82544: - case e1000_82540: - case e1000_82541: - case e1000_82541_rev_2: - legacy_pba_adjust = true; - pba = E1000_PBA_48K; - break; - case e1000_82545: - case e1000_82545_rev_3: - case e1000_82546: - case e1000_ce4100: - case e1000_82546_rev_3: - pba = E1000_PBA_48K; - break; - case e1000_82547: - case e1000_82547_rev_2: - legacy_pba_adjust = true; - pba = E1000_PBA_30K; - break; - case e1000_undefined: - case e1000_num_macs: - break; - } - - if (legacy_pba_adjust) { - if (hw->max_frame_size > E1000_RXBUFFER_8192) - pba -= 8; /* allocate more FIFO for Tx */ - - if (hw->mac_type == e1000_82547) { - adapter->tx_fifo_head = 0; - adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; - adapter->tx_fifo_size = - (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; - atomic_set(&adapter->tx_fifo_stall, 0); - } - } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { - /* adjust PBA for jumbo frames */ - ew32(PBA, pba); - - /* To maintain wire speed transmits, the Tx FIFO should be - * large enough to accommodate two full transmit packets, - * rounded up to the next 1KB and expressed in KB. Likewise, - * the Rx FIFO should be large enough to accommodate at least - * one full receive packet and is similarly rounded up and - * expressed in KB. */ - pba = er32(PBA); - /* upper 16 bits has Tx packet buffer allocation size in KB */ - tx_space = pba >> 16; - /* lower 16 bits has Rx packet buffer allocation size in KB */ - pba &= 0xffff; - /* - * the tx fifo also stores 16 bytes of information about the tx - * but don't include ethernet FCS because hardware appends it - */ - min_tx_space = (hw->max_frame_size + - sizeof(struct e1000_tx_desc) - - ETH_FCS_LEN) * 2; - min_tx_space = ALIGN(min_tx_space, 1024); - min_tx_space >>= 10; - /* software strips receive CRC, so leave room for it */ - min_rx_space = hw->max_frame_size; - min_rx_space = ALIGN(min_rx_space, 1024); - min_rx_space >>= 10; - - /* If current Tx allocation is less than the min Tx FIFO size, - * and the min Tx FIFO size is less than the current Rx FIFO - * allocation, take space away from current Rx allocation */ - if (tx_space < min_tx_space && - ((min_tx_space - tx_space) < pba)) { - pba = pba - (min_tx_space - tx_space); - - /* PCI/PCIx hardware has PBA alignment constraints */ - switch (hw->mac_type) { - case e1000_82545 ... e1000_82546_rev_3: - pba &= ~(E1000_PBA_8K - 1); - break; - default: - break; - } - - /* if short on rx space, rx wins and must trump tx - * adjustment or use Early Receive if available */ - if (pba < min_rx_space) - pba = min_rx_space; - } - } - - ew32(PBA, pba); - - /* - * flow control settings: - * The high water mark must be low enough to fit one full frame - * (or the size used for early receive) above it in the Rx FIFO. - * Set it to the lower of: - * - 90% of the Rx FIFO size, and - * - the full Rx FIFO size minus the early receive size (for parts - * with ERT support assuming ERT set to E1000_ERT_2048), or - * - the full Rx FIFO size minus one full frame - */ - hwm = min(((pba << 10) * 9 / 10), - ((pba << 10) - hw->max_frame_size)); - - hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */ - hw->fc_low_water = hw->fc_high_water - 8; - hw->fc_pause_time = E1000_FC_PAUSE_TIME; - hw->fc_send_xon = 1; - hw->fc = hw->original_fc; - - /* Allow time for pending master requests to run */ - e1000_reset_hw(hw); - if (hw->mac_type >= e1000_82544) - ew32(WUC, 0); - - if (e1000_init_hw(hw)) - e_dev_err("Hardware Error\n"); - e1000_update_mng_vlan(adapter); - - /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ - if (hw->mac_type >= e1000_82544 && - hw->autoneg == 1 && - hw->autoneg_advertised == ADVERTISE_1000_FULL) { - u32 ctrl = er32(CTRL); - /* clear phy power management bit if we are in gig only mode, - * which if enabled will attempt negotiation to 100Mb, which - * can cause a loss of link at power off or driver unload */ - ctrl &= ~E1000_CTRL_SWDPIN3; - ew32(CTRL, ctrl); - } - - /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ - ew32(VET, ETHERNET_IEEE_VLAN_TYPE); - - e1000_reset_adaptive(hw); - e1000_phy_get_info(hw, &adapter->phy_info); - - e1000_release_manageability(adapter); -} - -/** - * Dump the eeprom for users having checksum issues - **/ -static void e1000_dump_eeprom(struct e1000_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct ethtool_eeprom eeprom; - const struct ethtool_ops *ops = netdev->ethtool_ops; - u8 *data; - int i; - u16 csum_old, csum_new = 0; - - eeprom.len = ops->get_eeprom_len(netdev); - eeprom.offset = 0; - - data = kmalloc(eeprom.len, GFP_KERNEL); - if (!data) { - pr_err("Unable to allocate memory to dump EEPROM data\n"); - return; - } - - ops->get_eeprom(netdev, &eeprom, data); - - csum_old = (data[EEPROM_CHECKSUM_REG * 2]) + - (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8); - for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2) - csum_new += data[i] + (data[i + 1] << 8); - csum_new = EEPROM_SUM - csum_new; - - pr_err("/*********************/\n"); - pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old); - pr_err("Calculated : 0x%04x\n", csum_new); - - pr_err("Offset Values\n"); - pr_err("======== ======\n"); - print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0); - - pr_err("Include this output when contacting your support provider.\n"); - pr_err("This is not a software error! Something bad happened to\n"); - pr_err("your hardware or EEPROM image. Ignoring this problem could\n"); - pr_err("result in further problems, possibly loss of data,\n"); - pr_err("corruption or system hangs!\n"); - pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n"); - pr_err("which is invalid and requires you to set the proper MAC\n"); - pr_err("address manually before continuing to enable this network\n"); - pr_err("device. Please inspect the EEPROM dump and report the\n"); - pr_err("issue to your hardware vendor or Intel Customer Support.\n"); - pr_err("/*********************/\n"); - - kfree(data); -} - -/** - * e1000_is_need_ioport - determine if an adapter needs ioport resources or not - * @pdev: PCI device information struct - * - * Return true if an adapter needs ioport resources - **/ -static int e1000_is_need_ioport(struct pci_dev *pdev) -{ - switch (pdev->device) { - case E1000_DEV_ID_82540EM: - case E1000_DEV_ID_82540EM_LOM: - case E1000_DEV_ID_82540EP: - case E1000_DEV_ID_82540EP_LOM: - case E1000_DEV_ID_82540EP_LP: - case E1000_DEV_ID_82541EI: - case E1000_DEV_ID_82541EI_MOBILE: - case E1000_DEV_ID_82541ER: - case E1000_DEV_ID_82541ER_LOM: - case E1000_DEV_ID_82541GI: - case E1000_DEV_ID_82541GI_LF: - case E1000_DEV_ID_82541GI_MOBILE: - case E1000_DEV_ID_82544EI_COPPER: - case E1000_DEV_ID_82544EI_FIBER: - case E1000_DEV_ID_82544GC_COPPER: - case E1000_DEV_ID_82544GC_LOM: - case E1000_DEV_ID_82545EM_COPPER: - case E1000_DEV_ID_82545EM_FIBER: - case E1000_DEV_ID_82546EB_COPPER: - case E1000_DEV_ID_82546EB_FIBER: - case E1000_DEV_ID_82546EB_QUAD_COPPER: - return true; - default: - return false; - } -} - -static u32 e1000_fix_features(struct net_device *netdev, u32 features) -{ - /* - * Since there is no support for separate rx/tx vlan accel - * enable/disable make sure tx flag is always in same state as rx. - */ - if (features & NETIF_F_HW_VLAN_RX) - features |= NETIF_F_HW_VLAN_TX; - else - features &= ~NETIF_F_HW_VLAN_TX; - - return features; -} - -static int e1000_set_features(struct net_device *netdev, u32 features) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - u32 changed = features ^ netdev->features; - - if (changed & NETIF_F_HW_VLAN_RX) - e1000_vlan_mode(netdev, features); - - if (!(changed & NETIF_F_RXCSUM)) - return 0; - - adapter->rx_csum = !!(features & NETIF_F_RXCSUM); - - if (netif_running(netdev)) - e1000_reinit_locked(adapter); - else - e1000_reset(adapter); - - return 0; -} - -static const struct net_device_ops e1000_netdev_ops = { - .ndo_open = e1000_open, - .ndo_stop = e1000_close, - .ndo_start_xmit = e1000_xmit_frame, - .ndo_get_stats = e1000_get_stats, - .ndo_set_rx_mode = e1000_set_rx_mode, - .ndo_set_mac_address = e1000_set_mac, - .ndo_tx_timeout = e1000_tx_timeout, - .ndo_change_mtu = e1000_change_mtu, - .ndo_do_ioctl = e1000_ioctl, - .ndo_validate_addr = eth_validate_addr, - .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = e1000_netpoll, -#endif - .ndo_fix_features = e1000_fix_features, - .ndo_set_features = e1000_set_features, -}; - -/** - * e1000_init_hw_struct - initialize members of hw struct - * @adapter: board private struct - * @hw: structure used by e1000_hw.c - * - * Factors out initialization of the e1000_hw struct to its own function - * that can be called very early at init (just after struct allocation). - * Fields are initialized based on PCI device information and - * OS network device settings (MTU size). - * Returns negative error codes if MAC type setup fails. - */ -static int e1000_init_hw_struct(struct e1000_adapter *adapter, - struct e1000_hw *hw) -{ - struct pci_dev *pdev = adapter->pdev; - - /* PCI config space info */ - hw->vendor_id = pdev->vendor; - hw->device_id = pdev->device; - hw->subsystem_vendor_id = pdev->subsystem_vendor; - hw->subsystem_id = pdev->subsystem_device; - hw->revision_id = pdev->revision; - - pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); - - hw->max_frame_size = adapter->netdev->mtu + - ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; - hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; - - /* identify the MAC */ - if (e1000_set_mac_type(hw)) { - e_err(probe, "Unknown MAC Type\n"); - return -EIO; - } - - switch (hw->mac_type) { - default: - break; - case e1000_82541: - case e1000_82547: - case e1000_82541_rev_2: - case e1000_82547_rev_2: - hw->phy_init_script = 1; - break; - } - - e1000_set_media_type(hw); - e1000_get_bus_info(hw); - - hw->wait_autoneg_complete = false; - hw->tbi_compatibility_en = true; - hw->adaptive_ifs = true; - - /* Copper options */ - - if (hw->media_type == e1000_media_type_copper) { - hw->mdix = AUTO_ALL_MODES; - hw->disable_polarity_correction = false; - hw->master_slave = E1000_MASTER_SLAVE; - } - - return 0; -} - -/** - * e1000_probe - Device Initialization Routine - * @pdev: PCI device information struct - * @ent: entry in e1000_pci_tbl - * - * Returns 0 on success, negative on failure - * - * e1000_probe initializes an adapter identified by a pci_dev structure. - * The OS initialization, configuring of the adapter private structure, - * and a hardware reset occur. - **/ -static int __devinit e1000_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - struct net_device *netdev; - struct e1000_adapter *adapter; - struct e1000_hw *hw; - - static int cards_found = 0; - static int global_quad_port_a = 0; /* global ksp3 port a indication */ - int i, err, pci_using_dac; - u16 eeprom_data = 0; - u16 tmp = 0; - u16 eeprom_apme_mask = E1000_EEPROM_APME; - int bars, need_ioport; - - /* do not allocate ioport bars when not needed */ - need_ioport = e1000_is_need_ioport(pdev); - if (need_ioport) { - bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); - err = pci_enable_device(pdev); - } else { - bars = pci_select_bars(pdev, IORESOURCE_MEM); - err = pci_enable_device_mem(pdev); - } - if (err) - return err; - - err = pci_request_selected_regions(pdev, bars, e1000_driver_name); - if (err) - goto err_pci_reg; - - pci_set_master(pdev); - err = pci_save_state(pdev); - if (err) - goto err_alloc_etherdev; - - err = -ENOMEM; - netdev = alloc_etherdev(sizeof(struct e1000_adapter)); - if (!netdev) - goto err_alloc_etherdev; - - SET_NETDEV_DEV(netdev, &pdev->dev); - - pci_set_drvdata(pdev, netdev); - adapter = netdev_priv(netdev); - adapter->netdev = netdev; - adapter->pdev = pdev; - adapter->msg_enable = (1 << debug) - 1; - adapter->bars = bars; - adapter->need_ioport = need_ioport; - - hw = &adapter->hw; - hw->back = adapter; - - err = -EIO; - hw->hw_addr = pci_ioremap_bar(pdev, BAR_0); - if (!hw->hw_addr) - goto err_ioremap; - - if (adapter->need_ioport) { - for (i = BAR_1; i <= BAR_5; i++) { - if (pci_resource_len(pdev, i) == 0) - continue; - if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { - hw->io_base = pci_resource_start(pdev, i); - break; - } - } - } - - /* make ready for any if (hw->...) below */ - err = e1000_init_hw_struct(adapter, hw); - if (err) - goto err_sw_init; - - /* - * there is a workaround being applied below that limits - * 64-bit DMA addresses to 64-bit hardware. There are some - * 32-bit adapters that Tx hang when given 64-bit DMA addresses - */ - pci_using_dac = 0; - if ((hw->bus_type == e1000_bus_type_pcix) && - !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { - /* - * according to DMA-API-HOWTO, coherent calls will always - * succeed if the set call did - */ - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); - pci_using_dac = 1; - } else { - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); - if (err) { - pr_err("No usable DMA config, aborting\n"); - goto err_dma; - } - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); - } - - netdev->netdev_ops = &e1000_netdev_ops; - e1000_set_ethtool_ops(netdev); - netdev->watchdog_timeo = 5 * HZ; - netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); - - strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); - - adapter->bd_number = cards_found; - - /* setup the private structure */ - - err = e1000_sw_init(adapter); - if (err) - goto err_sw_init; - - err = -EIO; - if (hw->mac_type == e1000_ce4100) { - ce4100_gbe_mdio_base_phy = pci_resource_start(pdev, BAR_1); - ce4100_gbe_mdio_base_virt = ioremap(ce4100_gbe_mdio_base_phy, - pci_resource_len(pdev, BAR_1)); - - if (!ce4100_gbe_mdio_base_virt) - goto err_mdio_ioremap; - } - - if (hw->mac_type >= e1000_82543) { - netdev->hw_features = NETIF_F_SG | - NETIF_F_HW_CSUM | - NETIF_F_HW_VLAN_RX; - netdev->features = NETIF_F_HW_VLAN_TX | - NETIF_F_HW_VLAN_FILTER; - } - - if ((hw->mac_type >= e1000_82544) && - (hw->mac_type != e1000_82547)) - netdev->hw_features |= NETIF_F_TSO; - - netdev->features |= netdev->hw_features; - netdev->hw_features |= NETIF_F_RXCSUM; - - if (pci_using_dac) { - netdev->features |= NETIF_F_HIGHDMA; - netdev->vlan_features |= NETIF_F_HIGHDMA; - } - - netdev->vlan_features |= NETIF_F_TSO; - netdev->vlan_features |= NETIF_F_HW_CSUM; - netdev->vlan_features |= NETIF_F_SG; - - adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); - - /* initialize eeprom parameters */ - if (e1000_init_eeprom_params(hw)) { - e_err(probe, "EEPROM initialization failed\n"); - goto err_eeprom; - } - - /* before reading the EEPROM, reset the controller to - * put the device in a known good starting state */ - - e1000_reset_hw(hw); - - /* make sure the EEPROM is good */ - if (e1000_validate_eeprom_checksum(hw) < 0) { - e_err(probe, "The EEPROM Checksum Is Not Valid\n"); - e1000_dump_eeprom(adapter); - /* - * set MAC address to all zeroes to invalidate and temporary - * disable this device for the user. This blocks regular - * traffic while still permitting ethtool ioctls from reaching - * the hardware as well as allowing the user to run the - * interface after manually setting a hw addr using - * `ip set address` - */ - memset(hw->mac_addr, 0, netdev->addr_len); - } else { - /* copy the MAC address out of the EEPROM */ - if (e1000_read_mac_addr(hw)) - e_err(probe, "EEPROM Read Error\n"); - } - /* don't block initalization here due to bad MAC address */ - memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); - memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len); - - if (!is_valid_ether_addr(netdev->perm_addr)) - e_err(probe, "Invalid MAC Address\n"); - - init_timer(&adapter->tx_fifo_stall_timer); - adapter->tx_fifo_stall_timer.function = e1000_82547_tx_fifo_stall; - adapter->tx_fifo_stall_timer.data = (unsigned long)adapter; - - init_timer(&adapter->watchdog_timer); - adapter->watchdog_timer.function = e1000_watchdog; - adapter->watchdog_timer.data = (unsigned long) adapter; - - init_timer(&adapter->phy_info_timer); - adapter->phy_info_timer.function = e1000_update_phy_info; - adapter->phy_info_timer.data = (unsigned long)adapter; - - INIT_WORK(&adapter->fifo_stall_task, e1000_82547_tx_fifo_stall_task); - INIT_WORK(&adapter->reset_task, e1000_reset_task); - INIT_WORK(&adapter->phy_info_task, e1000_update_phy_info_task); - - e1000_check_options(adapter); - - /* Initial Wake on LAN setting - * If APM wake is enabled in the EEPROM, - * enable the ACPI Magic Packet filter - */ - - switch (hw->mac_type) { - case e1000_82542_rev2_0: - case e1000_82542_rev2_1: - case e1000_82543: - break; - case e1000_82544: - e1000_read_eeprom(hw, - EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); - eeprom_apme_mask = E1000_EEPROM_82544_APM; - break; - case e1000_82546: - case e1000_82546_rev_3: - if (er32(STATUS) & E1000_STATUS_FUNC_1){ - e1000_read_eeprom(hw, - EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); - break; - } - /* Fall Through */ - default: - e1000_read_eeprom(hw, - EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); - break; - } - if (eeprom_data & eeprom_apme_mask) - adapter->eeprom_wol |= E1000_WUFC_MAG; - - /* now that we have the eeprom settings, apply the special cases - * where the eeprom may be wrong or the board simply won't support - * wake on lan on a particular port */ - switch (pdev->device) { - case E1000_DEV_ID_82546GB_PCIE: - adapter->eeprom_wol = 0; - break; - case E1000_DEV_ID_82546EB_FIBER: - case E1000_DEV_ID_82546GB_FIBER: - /* Wake events only supported on port A for dual fiber - * regardless of eeprom setting */ - if (er32(STATUS) & E1000_STATUS_FUNC_1) - adapter->eeprom_wol = 0; - break; - case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: - /* if quad port adapter, disable WoL on all but port A */ - if (global_quad_port_a != 0) - adapter->eeprom_wol = 0; - else - adapter->quad_port_a = 1; - /* Reset for multiple quad port adapters */ - if (++global_quad_port_a == 4) - global_quad_port_a = 0; - break; - } - - /* initialize the wol settings based on the eeprom settings */ - adapter->wol = adapter->eeprom_wol; - device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); - - /* Auto detect PHY address */ - if (hw->mac_type == e1000_ce4100) { - for (i = 0; i < 32; i++) { - hw->phy_addr = i; - e1000_read_phy_reg(hw, PHY_ID2, &tmp); - if (tmp == 0 || tmp == 0xFF) { - if (i == 31) - goto err_eeprom; - continue; - } else - break; - } - } - - /* reset the hardware with the new settings */ - e1000_reset(adapter); - - strcpy(netdev->name, "eth%d"); - err = register_netdev(netdev); - if (err) - goto err_register; - - e1000_vlan_mode(netdev, netdev->features); - - /* print bus type/speed/width info */ - e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n", - ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), - ((hw->bus_speed == e1000_bus_speed_133) ? 133 : - (hw->bus_speed == e1000_bus_speed_120) ? 120 : - (hw->bus_speed == e1000_bus_speed_100) ? 100 : - (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33), - ((hw->bus_width == e1000_bus_width_64) ? 64 : 32), - netdev->dev_addr); - - /* carrier off reporting is important to ethtool even BEFORE open */ - netif_carrier_off(netdev); - - e_info(probe, "Intel(R) PRO/1000 Network Connection\n"); - - cards_found++; - return 0; - -err_register: -err_eeprom: - e1000_phy_hw_reset(hw); - - if (hw->flash_address) - iounmap(hw->flash_address); - kfree(adapter->tx_ring); - kfree(adapter->rx_ring); -err_dma: -err_sw_init: -err_mdio_ioremap: - iounmap(ce4100_gbe_mdio_base_virt); - iounmap(hw->hw_addr); -err_ioremap: - free_netdev(netdev); -err_alloc_etherdev: - pci_release_selected_regions(pdev, bars); -err_pci_reg: - pci_disable_device(pdev); - return err; -} - -/** - * e1000_remove - Device Removal Routine - * @pdev: PCI device information struct - * - * e1000_remove is called by the PCI subsystem to alert the driver - * that it should release a PCI device. The could be caused by a - * Hot-Plug event, or because the driver is going to be removed from - * memory. - **/ - -static void __devexit e1000_remove(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - - set_bit(__E1000_DOWN, &adapter->flags); - del_timer_sync(&adapter->tx_fifo_stall_timer); - del_timer_sync(&adapter->watchdog_timer); - del_timer_sync(&adapter->phy_info_timer); - - cancel_work_sync(&adapter->reset_task); - - e1000_release_manageability(adapter); - - unregister_netdev(netdev); - - e1000_phy_hw_reset(hw); - - kfree(adapter->tx_ring); - kfree(adapter->rx_ring); - - iounmap(hw->hw_addr); - if (hw->flash_address) - iounmap(hw->flash_address); - pci_release_selected_regions(pdev, adapter->bars); - - free_netdev(netdev); - - pci_disable_device(pdev); -} - -/** - * e1000_sw_init - Initialize general software structures (struct e1000_adapter) - * @adapter: board private structure to initialize - * - * e1000_sw_init initializes the Adapter private data structure. - * e1000_init_hw_struct MUST be called before this function - **/ - -static int __devinit e1000_sw_init(struct e1000_adapter *adapter) -{ - adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; - - adapter->num_tx_queues = 1; - adapter->num_rx_queues = 1; - - if (e1000_alloc_queues(adapter)) { - e_err(probe, "Unable to allocate memory for queues\n"); - return -ENOMEM; - } - - /* Explicitly disable IRQ since the NIC can be in any state. */ - e1000_irq_disable(adapter); - - spin_lock_init(&adapter->stats_lock); - - set_bit(__E1000_DOWN, &adapter->flags); - - return 0; -} - -/** - * e1000_alloc_queues - Allocate memory for all rings - * @adapter: board private structure to initialize - * - * We allocate one ring per queue at run-time since we don't know the - * number of queues at compile-time. - **/ - -static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter) -{ - adapter->tx_ring = kcalloc(adapter->num_tx_queues, - sizeof(struct e1000_tx_ring), GFP_KERNEL); - if (!adapter->tx_ring) - return -ENOMEM; - - adapter->rx_ring = kcalloc(adapter->num_rx_queues, - sizeof(struct e1000_rx_ring), GFP_KERNEL); - if (!adapter->rx_ring) { - kfree(adapter->tx_ring); - return -ENOMEM; - } - - return E1000_SUCCESS; -} - -/** - * e1000_open - Called when a network interface is made active - * @netdev: network interface device structure - * - * Returns 0 on success, negative value on failure - * - * The open entry point is called when a network interface is made - * active by the system (IFF_UP). At this point all resources needed - * for transmit and receive operations are allocated, the interrupt - * handler is registered with the OS, the watchdog timer is started, - * and the stack is notified that the interface is ready. - **/ - -static int e1000_open(struct net_device *netdev) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - int err; - - /* disallow open during test */ - if (test_bit(__E1000_TESTING, &adapter->flags)) - return -EBUSY; - - netif_carrier_off(netdev); - - /* allocate transmit descriptors */ - err = e1000_setup_all_tx_resources(adapter); - if (err) - goto err_setup_tx; - - /* allocate receive descriptors */ - err = e1000_setup_all_rx_resources(adapter); - if (err) - goto err_setup_rx; - - e1000_power_up_phy(adapter); - - adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; - if ((hw->mng_cookie.status & - E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { - e1000_update_mng_vlan(adapter); - } - - /* before we allocate an interrupt, we must be ready to handle it. - * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt - * as soon as we call pci_request_irq, so we have to setup our - * clean_rx handler before we do so. */ - e1000_configure(adapter); - - err = e1000_request_irq(adapter); - if (err) - goto err_req_irq; - - /* From here on the code is the same as e1000_up() */ - clear_bit(__E1000_DOWN, &adapter->flags); - - napi_enable(&adapter->napi); - - e1000_irq_enable(adapter); - - netif_start_queue(netdev); - - /* fire a link status change interrupt to start the watchdog */ - ew32(ICS, E1000_ICS_LSC); - - return E1000_SUCCESS; - -err_req_irq: - e1000_power_down_phy(adapter); - e1000_free_all_rx_resources(adapter); -err_setup_rx: - e1000_free_all_tx_resources(adapter); -err_setup_tx: - e1000_reset(adapter); - - return err; -} - -/** - * e1000_close - Disables a network interface - * @netdev: network interface device structure - * - * Returns 0, this is not allowed to fail - * - * The close entry point is called when an interface is de-activated - * by the OS. The hardware is still under the drivers control, but - * needs to be disabled. A global MAC reset is issued to stop the - * hardware, and all transmit and receive resources are freed. - **/ - -static int e1000_close(struct net_device *netdev) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - - WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); - e1000_down(adapter); - e1000_power_down_phy(adapter); - e1000_free_irq(adapter); - - e1000_free_all_tx_resources(adapter); - e1000_free_all_rx_resources(adapter); - - /* kill manageability vlan ID if supported, but not if a vlan with - * the same ID is registered on the host OS (let 8021q kill it) */ - if ((hw->mng_cookie.status & - E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && - !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) { - e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); - } - - return 0; -} - -/** - * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary - * @adapter: address of board private structure - * @start: address of beginning of memory - * @len: length of memory - **/ -static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, - unsigned long len) -{ - struct e1000_hw *hw = &adapter->hw; - unsigned long begin = (unsigned long)start; - unsigned long end = begin + len; - - /* First rev 82545 and 82546 need to not allow any memory - * write location to cross 64k boundary due to errata 23 */ - if (hw->mac_type == e1000_82545 || - hw->mac_type == e1000_ce4100 || - hw->mac_type == e1000_82546) { - return ((begin ^ (end - 1)) >> 16) != 0 ? false : true; - } - - return true; -} - -/** - * e1000_setup_tx_resources - allocate Tx resources (Descriptors) - * @adapter: board private structure - * @txdr: tx descriptor ring (for a specific queue) to setup - * - * Return 0 on success, negative on failure - **/ - -static int e1000_setup_tx_resources(struct e1000_adapter *adapter, - struct e1000_tx_ring *txdr) -{ - struct pci_dev *pdev = adapter->pdev; - int size; - - size = sizeof(struct e1000_buffer) * txdr->count; - txdr->buffer_info = vzalloc(size); - if (!txdr->buffer_info) { - e_err(probe, "Unable to allocate memory for the Tx descriptor " - "ring\n"); - return -ENOMEM; - } - - /* round up to nearest 4K */ - - txdr->size = txdr->count * sizeof(struct e1000_tx_desc); - txdr->size = ALIGN(txdr->size, 4096); - - txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, - GFP_KERNEL); - if (!txdr->desc) { -setup_tx_desc_die: - vfree(txdr->buffer_info); - e_err(probe, "Unable to allocate memory for the Tx descriptor " - "ring\n"); - return -ENOMEM; - } - - /* Fix for errata 23, can't cross 64kB boundary */ - if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { - void *olddesc = txdr->desc; - dma_addr_t olddma = txdr->dma; - e_err(tx_err, "txdr align check failed: %u bytes at %p\n", - txdr->size, txdr->desc); - /* Try again, without freeing the previous */ - txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, - &txdr->dma, GFP_KERNEL); - /* Failed allocation, critical failure */ - if (!txdr->desc) { - dma_free_coherent(&pdev->dev, txdr->size, olddesc, - olddma); - goto setup_tx_desc_die; - } - - if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { - /* give up */ - dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, - txdr->dma); - dma_free_coherent(&pdev->dev, txdr->size, olddesc, - olddma); - e_err(probe, "Unable to allocate aligned memory " - "for the transmit descriptor ring\n"); - vfree(txdr->buffer_info); - return -ENOMEM; - } else { - /* Free old allocation, new allocation was successful */ - dma_free_coherent(&pdev->dev, txdr->size, olddesc, - olddma); - } - } - memset(txdr->desc, 0, txdr->size); - - txdr->next_to_use = 0; - txdr->next_to_clean = 0; - - return 0; -} - -/** - * e1000_setup_all_tx_resources - wrapper to allocate Tx resources - * (Descriptors) for all queues - * @adapter: board private structure - * - * Return 0 on success, negative on failure - **/ - -int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) -{ - int i, err = 0; - - for (i = 0; i < adapter->num_tx_queues; i++) { - err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); - if (err) { - e_err(probe, "Allocation for Tx Queue %u failed\n", i); - for (i-- ; i >= 0; i--) - e1000_free_tx_resources(adapter, - &adapter->tx_ring[i]); - break; - } - } - - return err; -} - -/** - * e1000_configure_tx - Configure 8254x Transmit Unit after Reset - * @adapter: board private structure - * - * Configure the Tx unit of the MAC after a reset. - **/ - -static void e1000_configure_tx(struct e1000_adapter *adapter) -{ - u64 tdba; - struct e1000_hw *hw = &adapter->hw; - u32 tdlen, tctl, tipg; - u32 ipgr1, ipgr2; - - /* Setup the HW Tx Head and Tail descriptor pointers */ - - switch (adapter->num_tx_queues) { - case 1: - default: - tdba = adapter->tx_ring[0].dma; - tdlen = adapter->tx_ring[0].count * - sizeof(struct e1000_tx_desc); - ew32(TDLEN, tdlen); - ew32(TDBAH, (tdba >> 32)); - ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); - ew32(TDT, 0); - ew32(TDH, 0); - adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH); - adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT); - break; - } - - /* Set the default values for the Tx Inter Packet Gap timer */ - if ((hw->media_type == e1000_media_type_fiber || - hw->media_type == e1000_media_type_internal_serdes)) - tipg = DEFAULT_82543_TIPG_IPGT_FIBER; - else - tipg = DEFAULT_82543_TIPG_IPGT_COPPER; - - switch (hw->mac_type) { - case e1000_82542_rev2_0: - case e1000_82542_rev2_1: - tipg = DEFAULT_82542_TIPG_IPGT; - ipgr1 = DEFAULT_82542_TIPG_IPGR1; - ipgr2 = DEFAULT_82542_TIPG_IPGR2; - break; - default: - ipgr1 = DEFAULT_82543_TIPG_IPGR1; - ipgr2 = DEFAULT_82543_TIPG_IPGR2; - break; - } - tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; - tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; - ew32(TIPG, tipg); - - /* Set the Tx Interrupt Delay register */ - - ew32(TIDV, adapter->tx_int_delay); - if (hw->mac_type >= e1000_82540) - ew32(TADV, adapter->tx_abs_int_delay); - - /* Program the Transmit Control Register */ - - tctl = er32(TCTL); - tctl &= ~E1000_TCTL_CT; - tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | - (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); - - e1000_config_collision_dist(hw); - - /* Setup Transmit Descriptor Settings for eop descriptor */ - adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; - - /* only set IDE if we are delaying interrupts using the timers */ - if (adapter->tx_int_delay) - adapter->txd_cmd |= E1000_TXD_CMD_IDE; - - if (hw->mac_type < e1000_82543) - adapter->txd_cmd |= E1000_TXD_CMD_RPS; - else - adapter->txd_cmd |= E1000_TXD_CMD_RS; - - /* Cache if we're 82544 running in PCI-X because we'll - * need this to apply a workaround later in the send path. */ - if (hw->mac_type == e1000_82544 && - hw->bus_type == e1000_bus_type_pcix) - adapter->pcix_82544 = 1; - - ew32(TCTL, tctl); - -} - -/** - * e1000_setup_rx_resources - allocate Rx resources (Descriptors) - * @adapter: board private structure - * @rxdr: rx descriptor ring (for a specific queue) to setup - * - * Returns 0 on success, negative on failure - **/ - -static int e1000_setup_rx_resources(struct e1000_adapter *adapter, - struct e1000_rx_ring *rxdr) -{ - struct pci_dev *pdev = adapter->pdev; - int size, desc_len; - - size = sizeof(struct e1000_buffer) * rxdr->count; - rxdr->buffer_info = vzalloc(size); - if (!rxdr->buffer_info) { - e_err(probe, "Unable to allocate memory for the Rx descriptor " - "ring\n"); - return -ENOMEM; - } - - desc_len = sizeof(struct e1000_rx_desc); - - /* Round up to nearest 4K */ - - rxdr->size = rxdr->count * desc_len; - rxdr->size = ALIGN(rxdr->size, 4096); - - rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, - GFP_KERNEL); - - if (!rxdr->desc) { - e_err(probe, "Unable to allocate memory for the Rx descriptor " - "ring\n"); -setup_rx_desc_die: - vfree(rxdr->buffer_info); - return -ENOMEM; - } - - /* Fix for errata 23, can't cross 64kB boundary */ - if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { - void *olddesc = rxdr->desc; - dma_addr_t olddma = rxdr->dma; - e_err(rx_err, "rxdr align check failed: %u bytes at %p\n", - rxdr->size, rxdr->desc); - /* Try again, without freeing the previous */ - rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, - &rxdr->dma, GFP_KERNEL); - /* Failed allocation, critical failure */ - if (!rxdr->desc) { - dma_free_coherent(&pdev->dev, rxdr->size, olddesc, - olddma); - e_err(probe, "Unable to allocate memory for the Rx " - "descriptor ring\n"); - goto setup_rx_desc_die; - } - - if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { - /* give up */ - dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, - rxdr->dma); - dma_free_coherent(&pdev->dev, rxdr->size, olddesc, - olddma); - e_err(probe, "Unable to allocate aligned memory for " - "the Rx descriptor ring\n"); - goto setup_rx_desc_die; - } else { - /* Free old allocation, new allocation was successful */ - dma_free_coherent(&pdev->dev, rxdr->size, olddesc, - olddma); - } - } - memset(rxdr->desc, 0, rxdr->size); - - rxdr->next_to_clean = 0; - rxdr->next_to_use = 0; - rxdr->rx_skb_top = NULL; - - return 0; -} - -/** - * e1000_setup_all_rx_resources - wrapper to allocate Rx resources - * (Descriptors) for all queues - * @adapter: board private structure - * - * Return 0 on success, negative on failure - **/ - -int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) -{ - int i, err = 0; - - for (i = 0; i < adapter->num_rx_queues; i++) { - err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); - if (err) { - e_err(probe, "Allocation for Rx Queue %u failed\n", i); - for (i-- ; i >= 0; i--) - e1000_free_rx_resources(adapter, - &adapter->rx_ring[i]); - break; - } - } - - return err; -} - -/** - * e1000_setup_rctl - configure the receive control registers - * @adapter: Board private structure - **/ -static void e1000_setup_rctl(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 rctl; - - rctl = er32(RCTL); - - rctl &= ~(3 << E1000_RCTL_MO_SHIFT); - - rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | - E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | - (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); - - if (hw->tbi_compatibility_on == 1) - rctl |= E1000_RCTL_SBP; - else - rctl &= ~E1000_RCTL_SBP; - - if (adapter->netdev->mtu <= ETH_DATA_LEN) - rctl &= ~E1000_RCTL_LPE; - else - rctl |= E1000_RCTL_LPE; - - /* Setup buffer sizes */ - rctl &= ~E1000_RCTL_SZ_4096; - rctl |= E1000_RCTL_BSEX; - switch (adapter->rx_buffer_len) { - case E1000_RXBUFFER_2048: - default: - rctl |= E1000_RCTL_SZ_2048; - rctl &= ~E1000_RCTL_BSEX; - break; - case E1000_RXBUFFER_4096: - rctl |= E1000_RCTL_SZ_4096; - break; - case E1000_RXBUFFER_8192: - rctl |= E1000_RCTL_SZ_8192; - break; - case E1000_RXBUFFER_16384: - rctl |= E1000_RCTL_SZ_16384; - break; - } - - ew32(RCTL, rctl); -} - -/** - * e1000_configure_rx - Configure 8254x Receive Unit after Reset - * @adapter: board private structure - * - * Configure the Rx unit of the MAC after a reset. - **/ - -static void e1000_configure_rx(struct e1000_adapter *adapter) -{ - u64 rdba; - struct e1000_hw *hw = &adapter->hw; - u32 rdlen, rctl, rxcsum; - - if (adapter->netdev->mtu > ETH_DATA_LEN) { - rdlen = adapter->rx_ring[0].count * - sizeof(struct e1000_rx_desc); - adapter->clean_rx = e1000_clean_jumbo_rx_irq; - adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; - } else { - rdlen = adapter->rx_ring[0].count * - sizeof(struct e1000_rx_desc); - adapter->clean_rx = e1000_clean_rx_irq; - adapter->alloc_rx_buf = e1000_alloc_rx_buffers; - } - - /* disable receives while setting up the descriptors */ - rctl = er32(RCTL); - ew32(RCTL, rctl & ~E1000_RCTL_EN); - - /* set the Receive Delay Timer Register */ - ew32(RDTR, adapter->rx_int_delay); - - if (hw->mac_type >= e1000_82540) { - ew32(RADV, adapter->rx_abs_int_delay); - if (adapter->itr_setting != 0) - ew32(ITR, 1000000000 / (adapter->itr * 256)); - } - - /* Setup the HW Rx Head and Tail Descriptor Pointers and - * the Base and Length of the Rx Descriptor Ring */ - switch (adapter->num_rx_queues) { - case 1: - default: - rdba = adapter->rx_ring[0].dma; - ew32(RDLEN, rdlen); - ew32(RDBAH, (rdba >> 32)); - ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); - ew32(RDT, 0); - ew32(RDH, 0); - adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH); - adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT); - break; - } - - /* Enable 82543 Receive Checksum Offload for TCP and UDP */ - if (hw->mac_type >= e1000_82543) { - rxcsum = er32(RXCSUM); - if (adapter->rx_csum) - rxcsum |= E1000_RXCSUM_TUOFL; - else - /* don't need to clear IPPCSE as it defaults to 0 */ - rxcsum &= ~E1000_RXCSUM_TUOFL; - ew32(RXCSUM, rxcsum); - } - - /* Enable Receives */ - ew32(RCTL, rctl); -} - -/** - * e1000_free_tx_resources - Free Tx Resources per Queue - * @adapter: board private structure - * @tx_ring: Tx descriptor ring for a specific queue - * - * Free all transmit software resources - **/ - -static void e1000_free_tx_resources(struct e1000_adapter *adapter, - struct e1000_tx_ring *tx_ring) -{ - struct pci_dev *pdev = adapter->pdev; - - e1000_clean_tx_ring(adapter, tx_ring); - - vfree(tx_ring->buffer_info); - tx_ring->buffer_info = NULL; - - dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, - tx_ring->dma); - - tx_ring->desc = NULL; -} - -/** - * e1000_free_all_tx_resources - Free Tx Resources for All Queues - * @adapter: board private structure - * - * Free all transmit software resources - **/ - -void e1000_free_all_tx_resources(struct e1000_adapter *adapter) -{ - int i; - - for (i = 0; i < adapter->num_tx_queues; i++) - e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); -} - -static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, - struct e1000_buffer *buffer_info) -{ - if (buffer_info->dma) { - if (buffer_info->mapped_as_page) - dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, - buffer_info->length, DMA_TO_DEVICE); - else - dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, - buffer_info->length, - DMA_TO_DEVICE); - buffer_info->dma = 0; - } - if (buffer_info->skb) { - dev_kfree_skb_any(buffer_info->skb); - buffer_info->skb = NULL; - } - buffer_info->time_stamp = 0; - /* buffer_info must be completely set up in the transmit path */ -} - -/** - * e1000_clean_tx_ring - Free Tx Buffers - * @adapter: board private structure - * @tx_ring: ring to be cleaned - **/ - -static void e1000_clean_tx_ring(struct e1000_adapter *adapter, - struct e1000_tx_ring *tx_ring) -{ - struct e1000_hw *hw = &adapter->hw; - struct e1000_buffer *buffer_info; - unsigned long size; - unsigned int i; - - /* Free all the Tx ring sk_buffs */ - - for (i = 0; i < tx_ring->count; i++) { - buffer_info = &tx_ring->buffer_info[i]; - e1000_unmap_and_free_tx_resource(adapter, buffer_info); - } - - size = sizeof(struct e1000_buffer) * tx_ring->count; - memset(tx_ring->buffer_info, 0, size); - - /* Zero out the descriptor ring */ - - memset(tx_ring->desc, 0, tx_ring->size); - - tx_ring->next_to_use = 0; - tx_ring->next_to_clean = 0; - tx_ring->last_tx_tso = 0; - - writel(0, hw->hw_addr + tx_ring->tdh); - writel(0, hw->hw_addr + tx_ring->tdt); -} - -/** - * e1000_clean_all_tx_rings - Free Tx Buffers for all queues - * @adapter: board private structure - **/ - -static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) -{ - int i; - - for (i = 0; i < adapter->num_tx_queues; i++) - e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); -} - -/** - * e1000_free_rx_resources - Free Rx Resources - * @adapter: board private structure - * @rx_ring: ring to clean the resources from - * - * Free all receive software resources - **/ - -static void e1000_free_rx_resources(struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring) -{ - struct pci_dev *pdev = adapter->pdev; - - e1000_clean_rx_ring(adapter, rx_ring); - - vfree(rx_ring->buffer_info); - rx_ring->buffer_info = NULL; - - dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, - rx_ring->dma); - - rx_ring->desc = NULL; -} - -/** - * e1000_free_all_rx_resources - Free Rx Resources for All Queues - * @adapter: board private structure - * - * Free all receive software resources - **/ - -void e1000_free_all_rx_resources(struct e1000_adapter *adapter) -{ - int i; - - for (i = 0; i < adapter->num_rx_queues; i++) - e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); -} - -/** - * e1000_clean_rx_ring - Free Rx Buffers per Queue - * @adapter: board private structure - * @rx_ring: ring to free buffers from - **/ - -static void e1000_clean_rx_ring(struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring) -{ - struct e1000_hw *hw = &adapter->hw; - struct e1000_buffer *buffer_info; - struct pci_dev *pdev = adapter->pdev; - unsigned long size; - unsigned int i; - - /* Free all the Rx ring sk_buffs */ - for (i = 0; i < rx_ring->count; i++) { - buffer_info = &rx_ring->buffer_info[i]; - if (buffer_info->dma && - adapter->clean_rx == e1000_clean_rx_irq) { - dma_unmap_single(&pdev->dev, buffer_info->dma, - buffer_info->length, - DMA_FROM_DEVICE); - } else if (buffer_info->dma && - adapter->clean_rx == e1000_clean_jumbo_rx_irq) { - dma_unmap_page(&pdev->dev, buffer_info->dma, - buffer_info->length, - DMA_FROM_DEVICE); - } - - buffer_info->dma = 0; - if (buffer_info->page) { - put_page(buffer_info->page); - buffer_info->page = NULL; - } - if (buffer_info->skb) { - dev_kfree_skb(buffer_info->skb); - buffer_info->skb = NULL; - } - } - - /* there also may be some cached data from a chained receive */ - if (rx_ring->rx_skb_top) { - dev_kfree_skb(rx_ring->rx_skb_top); - rx_ring->rx_skb_top = NULL; - } - - size = sizeof(struct e1000_buffer) * rx_ring->count; - memset(rx_ring->buffer_info, 0, size); - - /* Zero out the descriptor ring */ - memset(rx_ring->desc, 0, rx_ring->size); - - rx_ring->next_to_clean = 0; - rx_ring->next_to_use = 0; - - writel(0, hw->hw_addr + rx_ring->rdh); - writel(0, hw->hw_addr + rx_ring->rdt); -} - -/** - * e1000_clean_all_rx_rings - Free Rx Buffers for all queues - * @adapter: board private structure - **/ - -static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) -{ - int i; - - for (i = 0; i < adapter->num_rx_queues; i++) - e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); -} - -/* The 82542 2.0 (revision 2) needs to have the receive unit in reset - * and memory write and invalidate disabled for certain operations - */ -static void e1000_enter_82542_rst(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - u32 rctl; - - e1000_pci_clear_mwi(hw); - - rctl = er32(RCTL); - rctl |= E1000_RCTL_RST; - ew32(RCTL, rctl); - E1000_WRITE_FLUSH(); - mdelay(5); - - if (netif_running(netdev)) - e1000_clean_all_rx_rings(adapter); -} - -static void e1000_leave_82542_rst(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - u32 rctl; - - rctl = er32(RCTL); - rctl &= ~E1000_RCTL_RST; - ew32(RCTL, rctl); - E1000_WRITE_FLUSH(); - mdelay(5); - - if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) - e1000_pci_set_mwi(hw); - - if (netif_running(netdev)) { - /* No need to loop, because 82542 supports only 1 queue */ - struct e1000_rx_ring *ring = &adapter->rx_ring[0]; - e1000_configure_rx(adapter); - adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); - } -} - -/** - * e1000_set_mac - Change the Ethernet Address of the NIC - * @netdev: network interface device structure - * @p: pointer to an address structure - * - * Returns 0 on success, negative on failure - **/ - -static int e1000_set_mac(struct net_device *netdev, void *p) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - struct sockaddr *addr = p; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - - /* 82542 2.0 needs to be in reset to write receive address registers */ - - if (hw->mac_type == e1000_82542_rev2_0) - e1000_enter_82542_rst(adapter); - - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); - - e1000_rar_set(hw, hw->mac_addr, 0); - - if (hw->mac_type == e1000_82542_rev2_0) - e1000_leave_82542_rst(adapter); - - return 0; -} - -/** - * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set - * @netdev: network interface device structure - * - * The set_rx_mode entry point is called whenever the unicast or multicast - * address lists or the network interface flags are updated. This routine is - * responsible for configuring the hardware for proper unicast, multicast, - * promiscuous mode, and all-multi behavior. - **/ - -static void e1000_set_rx_mode(struct net_device *netdev) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - struct netdev_hw_addr *ha; - bool use_uc = false; - u32 rctl; - u32 hash_value; - int i, rar_entries = E1000_RAR_ENTRIES; - int mta_reg_count = E1000_NUM_MTA_REGISTERS; - u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); - - if (!mcarray) { - e_err(probe, "memory allocation failed\n"); - return; - } - - /* Check for Promiscuous and All Multicast modes */ - - rctl = er32(RCTL); - - if (netdev->flags & IFF_PROMISC) { - rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); - rctl &= ~E1000_RCTL_VFE; - } else { - if (netdev->flags & IFF_ALLMULTI) - rctl |= E1000_RCTL_MPE; - else - rctl &= ~E1000_RCTL_MPE; - /* Enable VLAN filter if there is a VLAN */ - if (e1000_vlan_used(adapter)) - rctl |= E1000_RCTL_VFE; - } - - if (netdev_uc_count(netdev) > rar_entries - 1) { - rctl |= E1000_RCTL_UPE; - } else if (!(netdev->flags & IFF_PROMISC)) { - rctl &= ~E1000_RCTL_UPE; - use_uc = true; - } - - ew32(RCTL, rctl); - - /* 82542 2.0 needs to be in reset to write receive address registers */ - - if (hw->mac_type == e1000_82542_rev2_0) - e1000_enter_82542_rst(adapter); - - /* load the first 14 addresses into the exact filters 1-14. Unicast - * addresses take precedence to avoid disabling unicast filtering - * when possible. - * - * RAR 0 is used for the station MAC address - * if there are not 14 addresses, go ahead and clear the filters - */ - i = 1; - if (use_uc) - netdev_for_each_uc_addr(ha, netdev) { - if (i == rar_entries) - break; - e1000_rar_set(hw, ha->addr, i++); - } - - netdev_for_each_mc_addr(ha, netdev) { - if (i == rar_entries) { - /* load any remaining addresses into the hash table */ - u32 hash_reg, hash_bit, mta; - hash_value = e1000_hash_mc_addr(hw, ha->addr); - hash_reg = (hash_value >> 5) & 0x7F; - hash_bit = hash_value & 0x1F; - mta = (1 << hash_bit); - mcarray[hash_reg] |= mta; - } else { - e1000_rar_set(hw, ha->addr, i++); - } - } - - for (; i < rar_entries; i++) { - E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); - E1000_WRITE_FLUSH(); - E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); - E1000_WRITE_FLUSH(); - } - - /* write the hash table completely, write from bottom to avoid - * both stupid write combining chipsets, and flushing each write */ - for (i = mta_reg_count - 1; i >= 0 ; i--) { - /* - * If we are on an 82544 has an errata where writing odd - * offsets overwrites the previous even offset, but writing - * backwards over the range solves the issue by always - * writing the odd offset first - */ - E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]); - } - E1000_WRITE_FLUSH(); - - if (hw->mac_type == e1000_82542_rev2_0) - e1000_leave_82542_rst(adapter); - - kfree(mcarray); -} - -/* Need to wait a few seconds after link up to get diagnostic information from - * the phy */ - -static void e1000_update_phy_info(unsigned long data) -{ - struct e1000_adapter *adapter = (struct e1000_adapter *)data; - schedule_work(&adapter->phy_info_task); -} - -static void e1000_update_phy_info_task(struct work_struct *work) -{ - struct e1000_adapter *adapter = container_of(work, - struct e1000_adapter, - phy_info_task); - struct e1000_hw *hw = &adapter->hw; - - rtnl_lock(); - e1000_phy_get_info(hw, &adapter->phy_info); - rtnl_unlock(); -} - -/** - * e1000_82547_tx_fifo_stall - Timer Call-back - * @data: pointer to adapter cast into an unsigned long - **/ -static void e1000_82547_tx_fifo_stall(unsigned long data) -{ - struct e1000_adapter *adapter = (struct e1000_adapter *)data; - schedule_work(&adapter->fifo_stall_task); -} - -/** - * e1000_82547_tx_fifo_stall_task - task to complete work - * @work: work struct contained inside adapter struct - **/ -static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) -{ - struct e1000_adapter *adapter = container_of(work, - struct e1000_adapter, - fifo_stall_task); - struct e1000_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - u32 tctl; - - rtnl_lock(); - if (atomic_read(&adapter->tx_fifo_stall)) { - if ((er32(TDT) == er32(TDH)) && - (er32(TDFT) == er32(TDFH)) && - (er32(TDFTS) == er32(TDFHS))) { - tctl = er32(TCTL); - ew32(TCTL, tctl & ~E1000_TCTL_EN); - ew32(TDFT, adapter->tx_head_addr); - ew32(TDFH, adapter->tx_head_addr); - ew32(TDFTS, adapter->tx_head_addr); - ew32(TDFHS, adapter->tx_head_addr); - ew32(TCTL, tctl); - E1000_WRITE_FLUSH(); - - adapter->tx_fifo_head = 0; - atomic_set(&adapter->tx_fifo_stall, 0); - netif_wake_queue(netdev); - } else if (!test_bit(__E1000_DOWN, &adapter->flags)) { - mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1); - } - } - rtnl_unlock(); -} - -bool e1000_has_link(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - bool link_active = false; - - /* get_link_status is set on LSC (link status) interrupt or rx - * sequence error interrupt (except on intel ce4100). - * get_link_status will stay false until the - * e1000_check_for_link establishes link for copper adapters - * ONLY - */ - switch (hw->media_type) { - case e1000_media_type_copper: - if (hw->mac_type == e1000_ce4100) - hw->get_link_status = 1; - if (hw->get_link_status) { - e1000_check_for_link(hw); - link_active = !hw->get_link_status; - } else { - link_active = true; - } - break; - case e1000_media_type_fiber: - e1000_check_for_link(hw); - link_active = !!(er32(STATUS) & E1000_STATUS_LU); - break; - case e1000_media_type_internal_serdes: - e1000_check_for_link(hw); - link_active = hw->serdes_has_link; - break; - default: - break; - } - - return link_active; -} - -/** - * e1000_watchdog - Timer Call-back - * @data: pointer to adapter cast into an unsigned long - **/ -static void e1000_watchdog(unsigned long data) -{ - struct e1000_adapter *adapter = (struct e1000_adapter *)data; - struct e1000_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - struct e1000_tx_ring *txdr = adapter->tx_ring; - u32 link, tctl; - - link = e1000_has_link(adapter); - if ((netif_carrier_ok(netdev)) && link) - goto link_up; - - if (link) { - if (!netif_carrier_ok(netdev)) { - u32 ctrl; - bool txb2b = true; - /* update snapshot of PHY registers on LSC */ - e1000_get_speed_and_duplex(hw, - &adapter->link_speed, - &adapter->link_duplex); - - ctrl = er32(CTRL); - pr_info("%s NIC Link is Up %d Mbps %s, " - "Flow Control: %s\n", - netdev->name, - adapter->link_speed, - adapter->link_duplex == FULL_DUPLEX ? - "Full Duplex" : "Half Duplex", - ((ctrl & E1000_CTRL_TFCE) && (ctrl & - E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & - E1000_CTRL_RFCE) ? "RX" : ((ctrl & - E1000_CTRL_TFCE) ? "TX" : "None"))); - - /* adjust timeout factor according to speed/duplex */ - adapter->tx_timeout_factor = 1; - switch (adapter->link_speed) { - case SPEED_10: - txb2b = false; - adapter->tx_timeout_factor = 16; - break; - case SPEED_100: - txb2b = false; - /* maybe add some timeout factor ? */ - break; - } - - /* enable transmits in the hardware */ - tctl = er32(TCTL); - tctl |= E1000_TCTL_EN; - ew32(TCTL, tctl); - - netif_carrier_on(netdev); - if (!test_bit(__E1000_DOWN, &adapter->flags)) - mod_timer(&adapter->phy_info_timer, - round_jiffies(jiffies + 2 * HZ)); - adapter->smartspeed = 0; - } - } else { - if (netif_carrier_ok(netdev)) { - adapter->link_speed = 0; - adapter->link_duplex = 0; - pr_info("%s NIC Link is Down\n", - netdev->name); - netif_carrier_off(netdev); - - if (!test_bit(__E1000_DOWN, &adapter->flags)) - mod_timer(&adapter->phy_info_timer, - round_jiffies(jiffies + 2 * HZ)); - } - - e1000_smartspeed(adapter); - } - -link_up: - e1000_update_stats(adapter); - - hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; - adapter->tpt_old = adapter->stats.tpt; - hw->collision_delta = adapter->stats.colc - adapter->colc_old; - adapter->colc_old = adapter->stats.colc; - - adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; - adapter->gorcl_old = adapter->stats.gorcl; - adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; - adapter->gotcl_old = adapter->stats.gotcl; - - e1000_update_adaptive(hw); - - if (!netif_carrier_ok(netdev)) { - if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { - /* We've lost link, so the controller stops DMA, - * but we've got queued Tx work that's never going - * to get done, so reset controller to flush Tx. - * (Do the reset outside of interrupt context). */ - adapter->tx_timeout_count++; - schedule_work(&adapter->reset_task); - /* return immediately since reset is imminent */ - return; - } - } - - /* Simple mode for Interrupt Throttle Rate (ITR) */ - if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { - /* - * Symmetric Tx/Rx gets a reduced ITR=2000; - * Total asymmetrical Tx or Rx gets ITR=8000; - * everyone else is between 2000-8000. - */ - u32 goc = (adapter->gotcl + adapter->gorcl) / 10000; - u32 dif = (adapter->gotcl > adapter->gorcl ? - adapter->gotcl - adapter->gorcl : - adapter->gorcl - adapter->gotcl) / 10000; - u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; - - ew32(ITR, 1000000000 / (itr * 256)); - } - - /* Cause software interrupt to ensure rx ring is cleaned */ - ew32(ICS, E1000_ICS_RXDMT0); - - /* Force detection of hung controller every watchdog period */ - adapter->detect_tx_hung = true; - - /* Reset the timer */ - if (!test_bit(__E1000_DOWN, &adapter->flags)) - mod_timer(&adapter->watchdog_timer, - round_jiffies(jiffies + 2 * HZ)); -} - -enum latency_range { - lowest_latency = 0, - low_latency = 1, - bulk_latency = 2, - latency_invalid = 255 -}; - -/** - * e1000_update_itr - update the dynamic ITR value based on statistics - * @adapter: pointer to adapter - * @itr_setting: current adapter->itr - * @packets: the number of packets during this measurement interval - * @bytes: the number of bytes during this measurement interval - * - * Stores a new ITR value based on packets and byte - * counts during the last interrupt. The advantage of per interrupt - * computation is faster updates and more accurate ITR for the current - * traffic pattern. Constants in this function were computed - * based on theoretical maximum wire speed and thresholds were set based - * on testing data as well as attempting to minimize response time - * while increasing bulk throughput. - * this functionality is controlled by the InterruptThrottleRate module - * parameter (see e1000_param.c) - **/ -static unsigned int e1000_update_itr(struct e1000_adapter *adapter, - u16 itr_setting, int packets, int bytes) -{ - unsigned int retval = itr_setting; - struct e1000_hw *hw = &adapter->hw; - - if (unlikely(hw->mac_type < e1000_82540)) - goto update_itr_done; - - if (packets == 0) - goto update_itr_done; - - switch (itr_setting) { - case lowest_latency: - /* jumbo frames get bulk treatment*/ - if (bytes/packets > 8000) - retval = bulk_latency; - else if ((packets < 5) && (bytes > 512)) - retval = low_latency; - break; - case low_latency: /* 50 usec aka 20000 ints/s */ - if (bytes > 10000) { - /* jumbo frames need bulk latency setting */ - if (bytes/packets > 8000) - retval = bulk_latency; - else if ((packets < 10) || ((bytes/packets) > 1200)) - retval = bulk_latency; - else if ((packets > 35)) - retval = lowest_latency; - } else if (bytes/packets > 2000) - retval = bulk_latency; - else if (packets <= 2 && bytes < 512) - retval = lowest_latency; - break; - case bulk_latency: /* 250 usec aka 4000 ints/s */ - if (bytes > 25000) { - if (packets > 35) - retval = low_latency; - } else if (bytes < 6000) { - retval = low_latency; - } - break; - } - -update_itr_done: - return retval; -} - -static void e1000_set_itr(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u16 current_itr; - u32 new_itr = adapter->itr; - - if (unlikely(hw->mac_type < e1000_82540)) - return; - - /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ - if (unlikely(adapter->link_speed != SPEED_1000)) { - current_itr = 0; - new_itr = 4000; - goto set_itr_now; - } - - adapter->tx_itr = e1000_update_itr(adapter, - adapter->tx_itr, - adapter->total_tx_packets, - adapter->total_tx_bytes); - /* conservative mode (itr 3) eliminates the lowest_latency setting */ - if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) - adapter->tx_itr = low_latency; - - adapter->rx_itr = e1000_update_itr(adapter, - adapter->rx_itr, - adapter->total_rx_packets, - adapter->total_rx_bytes); - /* conservative mode (itr 3) eliminates the lowest_latency setting */ - if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) - adapter->rx_itr = low_latency; - - current_itr = max(adapter->rx_itr, adapter->tx_itr); - - switch (current_itr) { - /* counts and packets in update_itr are dependent on these numbers */ - case lowest_latency: - new_itr = 70000; - break; - case low_latency: - new_itr = 20000; /* aka hwitr = ~200 */ - break; - case bulk_latency: - new_itr = 4000; - break; - default: - break; - } - -set_itr_now: - if (new_itr != adapter->itr) { - /* this attempts to bias the interrupt rate towards Bulk - * by adding intermediate steps when interrupt rate is - * increasing */ - new_itr = new_itr > adapter->itr ? - min(adapter->itr + (new_itr >> 2), new_itr) : - new_itr; - adapter->itr = new_itr; - ew32(ITR, 1000000000 / (new_itr * 256)); - } -} - -#define E1000_TX_FLAGS_CSUM 0x00000001 -#define E1000_TX_FLAGS_VLAN 0x00000002 -#define E1000_TX_FLAGS_TSO 0x00000004 -#define E1000_TX_FLAGS_IPV4 0x00000008 -#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 -#define E1000_TX_FLAGS_VLAN_SHIFT 16 - -static int e1000_tso(struct e1000_adapter *adapter, - struct e1000_tx_ring *tx_ring, struct sk_buff *skb) -{ - struct e1000_context_desc *context_desc; - struct e1000_buffer *buffer_info; - unsigned int i; - u32 cmd_length = 0; - u16 ipcse = 0, tucse, mss; - u8 ipcss, ipcso, tucss, tucso, hdr_len; - int err; - - if (skb_is_gso(skb)) { - if (skb_header_cloned(skb)) { - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); - if (err) - return err; - } - - hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - mss = skb_shinfo(skb)->gso_size; - if (skb->protocol == htons(ETH_P_IP)) { - struct iphdr *iph = ip_hdr(skb); - iph->tot_len = 0; - iph->check = 0; - tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, - 0); - cmd_length = E1000_TXD_CMD_IP; - ipcse = skb_transport_offset(skb) - 1; - } else if (skb->protocol == htons(ETH_P_IPV6)) { - ipv6_hdr(skb)->payload_len = 0; - tcp_hdr(skb)->check = - ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); - ipcse = 0; - } - ipcss = skb_network_offset(skb); - ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; - tucss = skb_transport_offset(skb); - tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; - tucse = 0; - - cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | - E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); - - i = tx_ring->next_to_use; - context_desc = E1000_CONTEXT_DESC(*tx_ring, i); - buffer_info = &tx_ring->buffer_info[i]; - - context_desc->lower_setup.ip_fields.ipcss = ipcss; - context_desc->lower_setup.ip_fields.ipcso = ipcso; - context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); - context_desc->upper_setup.tcp_fields.tucss = tucss; - context_desc->upper_setup.tcp_fields.tucso = tucso; - context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); - context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); - context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; - context_desc->cmd_and_length = cpu_to_le32(cmd_length); - - buffer_info->time_stamp = jiffies; - buffer_info->next_to_watch = i; - - if (++i == tx_ring->count) i = 0; - tx_ring->next_to_use = i; - - return true; - } - return false; -} - -static bool e1000_tx_csum(struct e1000_adapter *adapter, - struct e1000_tx_ring *tx_ring, struct sk_buff *skb) -{ - struct e1000_context_desc *context_desc; - struct e1000_buffer *buffer_info; - unsigned int i; - u8 css; - u32 cmd_len = E1000_TXD_CMD_DEXT; - - if (skb->ip_summed != CHECKSUM_PARTIAL) - return false; - - switch (skb->protocol) { - case cpu_to_be16(ETH_P_IP): - if (ip_hdr(skb)->protocol == IPPROTO_TCP) - cmd_len |= E1000_TXD_CMD_TCP; - break; - case cpu_to_be16(ETH_P_IPV6): - /* XXX not handling all IPV6 headers */ - if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) - cmd_len |= E1000_TXD_CMD_TCP; - break; - default: - if (unlikely(net_ratelimit())) - e_warn(drv, "checksum_partial proto=%x!\n", - skb->protocol); - break; - } - - css = skb_checksum_start_offset(skb); - - i = tx_ring->next_to_use; - buffer_info = &tx_ring->buffer_info[i]; - context_desc = E1000_CONTEXT_DESC(*tx_ring, i); - - context_desc->lower_setup.ip_config = 0; - context_desc->upper_setup.tcp_fields.tucss = css; - context_desc->upper_setup.tcp_fields.tucso = - css + skb->csum_offset; - context_desc->upper_setup.tcp_fields.tucse = 0; - context_desc->tcp_seg_setup.data = 0; - context_desc->cmd_and_length = cpu_to_le32(cmd_len); - - buffer_info->time_stamp = jiffies; - buffer_info->next_to_watch = i; - - if (unlikely(++i == tx_ring->count)) i = 0; - tx_ring->next_to_use = i; - - return true; -} - -#define E1000_MAX_TXD_PWR 12 -#define E1000_MAX_DATA_PER_TXD (1<hw; - struct pci_dev *pdev = adapter->pdev; - struct e1000_buffer *buffer_info; - unsigned int len = skb_headlen(skb); - unsigned int offset = 0, size, count = 0, i; - unsigned int f; - - i = tx_ring->next_to_use; - - while (len) { - buffer_info = &tx_ring->buffer_info[i]; - size = min(len, max_per_txd); - /* Workaround for Controller erratum -- - * descriptor for non-tso packet in a linear SKB that follows a - * tso gets written back prematurely before the data is fully - * DMA'd to the controller */ - if (!skb->data_len && tx_ring->last_tx_tso && - !skb_is_gso(skb)) { - tx_ring->last_tx_tso = 0; - size -= 4; - } - - /* Workaround for premature desc write-backs - * in TSO mode. Append 4-byte sentinel desc */ - if (unlikely(mss && !nr_frags && size == len && size > 8)) - size -= 4; - /* work-around for errata 10 and it applies - * to all controllers in PCI-X mode - * The fix is to make sure that the first descriptor of a - * packet is smaller than 2048 - 16 - 16 (or 2016) bytes - */ - if (unlikely((hw->bus_type == e1000_bus_type_pcix) && - (size > 2015) && count == 0)) - size = 2015; - - /* Workaround for potential 82544 hang in PCI-X. Avoid - * terminating buffers within evenly-aligned dwords. */ - if (unlikely(adapter->pcix_82544 && - !((unsigned long)(skb->data + offset + size - 1) & 4) && - size > 4)) - size -= 4; - - buffer_info->length = size; - /* set time_stamp *before* dma to help avoid a possible race */ - buffer_info->time_stamp = jiffies; - buffer_info->mapped_as_page = false; - buffer_info->dma = dma_map_single(&pdev->dev, - skb->data + offset, - size, DMA_TO_DEVICE); - if (dma_mapping_error(&pdev->dev, buffer_info->dma)) - goto dma_error; - buffer_info->next_to_watch = i; - - len -= size; - offset += size; - count++; - if (len) { - i++; - if (unlikely(i == tx_ring->count)) - i = 0; - } - } - - for (f = 0; f < nr_frags; f++) { - struct skb_frag_struct *frag; - - frag = &skb_shinfo(skb)->frags[f]; - len = frag->size; - offset = frag->page_offset; - - while (len) { - i++; - if (unlikely(i == tx_ring->count)) - i = 0; - - buffer_info = &tx_ring->buffer_info[i]; - size = min(len, max_per_txd); - /* Workaround for premature desc write-backs - * in TSO mode. Append 4-byte sentinel desc */ - if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8)) - size -= 4; - /* Workaround for potential 82544 hang in PCI-X. - * Avoid terminating buffers within evenly-aligned - * dwords. */ - if (unlikely(adapter->pcix_82544 && - !((unsigned long)(page_to_phys(frag->page) + offset - + size - 1) & 4) && - size > 4)) - size -= 4; - - buffer_info->length = size; - buffer_info->time_stamp = jiffies; - buffer_info->mapped_as_page = true; - buffer_info->dma = dma_map_page(&pdev->dev, frag->page, - offset, size, - DMA_TO_DEVICE); - if (dma_mapping_error(&pdev->dev, buffer_info->dma)) - goto dma_error; - buffer_info->next_to_watch = i; - - len -= size; - offset += size; - count++; - } - } - - tx_ring->buffer_info[i].skb = skb; - tx_ring->buffer_info[first].next_to_watch = i; - - return count; - -dma_error: - dev_err(&pdev->dev, "TX DMA map failed\n"); - buffer_info->dma = 0; - if (count) - count--; - - while (count--) { - if (i==0) - i += tx_ring->count; - i--; - buffer_info = &tx_ring->buffer_info[i]; - e1000_unmap_and_free_tx_resource(adapter, buffer_info); - } - - return 0; -} - -static void e1000_tx_queue(struct e1000_adapter *adapter, - struct e1000_tx_ring *tx_ring, int tx_flags, - int count) -{ - struct e1000_hw *hw = &adapter->hw; - struct e1000_tx_desc *tx_desc = NULL; - struct e1000_buffer *buffer_info; - u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; - unsigned int i; - - if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { - txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | - E1000_TXD_CMD_TSE; - txd_upper |= E1000_TXD_POPTS_TXSM << 8; - - if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) - txd_upper |= E1000_TXD_POPTS_IXSM << 8; - } - - if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) { - txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; - txd_upper |= E1000_TXD_POPTS_TXSM << 8; - } - - if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { - txd_lower |= E1000_TXD_CMD_VLE; - txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); - } - - i = tx_ring->next_to_use; - - while (count--) { - buffer_info = &tx_ring->buffer_info[i]; - tx_desc = E1000_TX_DESC(*tx_ring, i); - tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); - tx_desc->lower.data = - cpu_to_le32(txd_lower | buffer_info->length); - tx_desc->upper.data = cpu_to_le32(txd_upper); - if (unlikely(++i == tx_ring->count)) i = 0; - } - - tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); - - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). */ - wmb(); - - tx_ring->next_to_use = i; - writel(i, hw->hw_addr + tx_ring->tdt); - /* we need this if more than one processor can write to our tail - * at a time, it syncronizes IO on IA64/Altix systems */ - mmiowb(); -} - -/** - * 82547 workaround to avoid controller hang in half-duplex environment. - * The workaround is to avoid queuing a large packet that would span - * the internal Tx FIFO ring boundary by notifying the stack to resend - * the packet at a later time. This gives the Tx FIFO an opportunity to - * flush all packets. When that occurs, we reset the Tx FIFO pointers - * to the beginning of the Tx FIFO. - **/ - -#define E1000_FIFO_HDR 0x10 -#define E1000_82547_PAD_LEN 0x3E0 - -static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, - struct sk_buff *skb) -{ - u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; - u32 skb_fifo_len = skb->len + E1000_FIFO_HDR; - - skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); - - if (adapter->link_duplex != HALF_DUPLEX) - goto no_fifo_stall_required; - - if (atomic_read(&adapter->tx_fifo_stall)) - return 1; - - if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { - atomic_set(&adapter->tx_fifo_stall, 1); - return 1; - } - -no_fifo_stall_required: - adapter->tx_fifo_head += skb_fifo_len; - if (adapter->tx_fifo_head >= adapter->tx_fifo_size) - adapter->tx_fifo_head -= adapter->tx_fifo_size; - return 0; -} - -static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_tx_ring *tx_ring = adapter->tx_ring; - - netif_stop_queue(netdev); - /* Herbert's original patch had: - * smp_mb__after_netif_stop_queue(); - * but since that doesn't exist yet, just open code it. */ - smp_mb(); - - /* We need to check again in a case another CPU has just - * made room available. */ - if (likely(E1000_DESC_UNUSED(tx_ring) < size)) - return -EBUSY; - - /* A reprieve! */ - netif_start_queue(netdev); - ++adapter->restart_queue; - return 0; -} - -static int e1000_maybe_stop_tx(struct net_device *netdev, - struct e1000_tx_ring *tx_ring, int size) -{ - if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) - return 0; - return __e1000_maybe_stop_tx(netdev, size); -} - -#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) -static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, - struct net_device *netdev) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - struct e1000_tx_ring *tx_ring; - unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; - unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; - unsigned int tx_flags = 0; - unsigned int len = skb_headlen(skb); - unsigned int nr_frags; - unsigned int mss; - int count = 0; - int tso; - unsigned int f; - - /* This goes back to the question of how to logically map a tx queue - * to a flow. Right now, performance is impacted slightly negatively - * if using multiple tx queues. If the stack breaks away from a - * single qdisc implementation, we can look at this again. */ - tx_ring = adapter->tx_ring; - - if (unlikely(skb->len <= 0)) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - - mss = skb_shinfo(skb)->gso_size; - /* The controller does a simple calculation to - * make sure there is enough room in the FIFO before - * initiating the DMA for each buffer. The calc is: - * 4 = ceil(buffer len/mss). To make sure we don't - * overrun the FIFO, adjust the max buffer len if mss - * drops. */ - if (mss) { - u8 hdr_len; - max_per_txd = min(mss << 2, max_per_txd); - max_txd_pwr = fls(max_per_txd) - 1; - - hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - if (skb->data_len && hdr_len == len) { - switch (hw->mac_type) { - unsigned int pull_size; - case e1000_82544: - /* Make sure we have room to chop off 4 bytes, - * and that the end alignment will work out to - * this hardware's requirements - * NOTE: this is a TSO only workaround - * if end byte alignment not correct move us - * into the next dword */ - if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4) - break; - /* fall through */ - pull_size = min((unsigned int)4, skb->data_len); - if (!__pskb_pull_tail(skb, pull_size)) { - e_err(drv, "__pskb_pull_tail " - "failed.\n"); - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - len = skb_headlen(skb); - break; - default: - /* do nothing */ - break; - } - } - } - - /* reserve a descriptor for the offload context */ - if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) - count++; - count++; - - /* Controller Erratum workaround */ - if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) - count++; - - count += TXD_USE_COUNT(len, max_txd_pwr); - - if (adapter->pcix_82544) - count++; - - /* work-around for errata 10 and it applies to all controllers - * in PCI-X mode, so add one more descriptor to the count - */ - if (unlikely((hw->bus_type == e1000_bus_type_pcix) && - (len > 2015))) - count++; - - nr_frags = skb_shinfo(skb)->nr_frags; - for (f = 0; f < nr_frags; f++) - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, - max_txd_pwr); - if (adapter->pcix_82544) - count += nr_frags; - - /* need: count + 2 desc gap to keep tail from touching - * head, otherwise try next time */ - if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) - return NETDEV_TX_BUSY; - - if (unlikely(hw->mac_type == e1000_82547)) { - if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) { - netif_stop_queue(netdev); - if (!test_bit(__E1000_DOWN, &adapter->flags)) - mod_timer(&adapter->tx_fifo_stall_timer, - jiffies + 1); - return NETDEV_TX_BUSY; - } - } - - if (vlan_tx_tag_present(skb)) { - tx_flags |= E1000_TX_FLAGS_VLAN; - tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); - } - - first = tx_ring->next_to_use; - - tso = e1000_tso(adapter, tx_ring, skb); - if (tso < 0) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - - if (likely(tso)) { - if (likely(hw->mac_type != e1000_82544)) - tx_ring->last_tx_tso = 1; - tx_flags |= E1000_TX_FLAGS_TSO; - } else if (likely(e1000_tx_csum(adapter, tx_ring, skb))) - tx_flags |= E1000_TX_FLAGS_CSUM; - - if (likely(skb->protocol == htons(ETH_P_IP))) - tx_flags |= E1000_TX_FLAGS_IPV4; - - count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, - nr_frags, mss); - - if (count) { - e1000_tx_queue(adapter, tx_ring, tx_flags, count); - /* Make sure there is space in the ring for the next send. */ - e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); - - } else { - dev_kfree_skb_any(skb); - tx_ring->buffer_info[first].time_stamp = 0; - tx_ring->next_to_use = first; - } - - return NETDEV_TX_OK; -} - -/** - * e1000_tx_timeout - Respond to a Tx Hang - * @netdev: network interface device structure - **/ - -static void e1000_tx_timeout(struct net_device *netdev) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - - /* Do the reset outside of interrupt context */ - adapter->tx_timeout_count++; - schedule_work(&adapter->reset_task); -} - -static void e1000_reset_task(struct work_struct *work) -{ - struct e1000_adapter *adapter = - container_of(work, struct e1000_adapter, reset_task); - - e1000_reinit_safe(adapter); -} - -/** - * e1000_get_stats - Get System Network Statistics - * @netdev: network interface device structure - * - * Returns the address of the device statistics structure. - * The statistics are actually updated from the timer callback. - **/ - -static struct net_device_stats *e1000_get_stats(struct net_device *netdev) -{ - /* only return the current stats */ - return &netdev->stats; -} - -/** - * e1000_change_mtu - Change the Maximum Transfer Unit - * @netdev: network interface device structure - * @new_mtu: new value for maximum frame size - * - * Returns 0 on success, negative on failure - **/ - -static int e1000_change_mtu(struct net_device *netdev, int new_mtu) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; - - if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || - (max_frame > MAX_JUMBO_FRAME_SIZE)) { - e_err(probe, "Invalid MTU setting\n"); - return -EINVAL; - } - - /* Adapter-specific max frame size limits. */ - switch (hw->mac_type) { - case e1000_undefined ... e1000_82542_rev2_1: - if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { - e_err(probe, "Jumbo Frames not supported.\n"); - return -EINVAL; - } - break; - default: - /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ - break; - } - - while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) - msleep(1); - /* e1000_down has a dependency on max_frame_size */ - hw->max_frame_size = max_frame; - if (netif_running(netdev)) - e1000_down(adapter); - - /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN - * means we reserve 2 more, this pushes us to allocate from the next - * larger slab size. - * i.e. RXBUFFER_2048 --> size-4096 slab - * however with the new *_jumbo_rx* routines, jumbo receives will use - * fragmented skbs */ - - if (max_frame <= E1000_RXBUFFER_2048) - adapter->rx_buffer_len = E1000_RXBUFFER_2048; - else -#if (PAGE_SIZE >= E1000_RXBUFFER_16384) - adapter->rx_buffer_len = E1000_RXBUFFER_16384; -#elif (PAGE_SIZE >= E1000_RXBUFFER_4096) - adapter->rx_buffer_len = PAGE_SIZE; -#endif - - /* adjust allocation if LPE protects us, and we aren't using SBP */ - if (!hw->tbi_compatibility_on && - ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) || - (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) - adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; - - pr_info("%s changing MTU from %d to %d\n", - netdev->name, netdev->mtu, new_mtu); - netdev->mtu = new_mtu; - - if (netif_running(netdev)) - e1000_up(adapter); - else - e1000_reset(adapter); - - clear_bit(__E1000_RESETTING, &adapter->flags); - - return 0; -} - -/** - * e1000_update_stats - Update the board statistics counters - * @adapter: board private structure - **/ - -void e1000_update_stats(struct e1000_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct e1000_hw *hw = &adapter->hw; - struct pci_dev *pdev = adapter->pdev; - unsigned long flags; - u16 phy_tmp; - -#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF - - /* - * Prevent stats update while adapter is being reset, or if the pci - * connection is down. - */ - if (adapter->link_speed == 0) - return; - if (pci_channel_offline(pdev)) - return; - - spin_lock_irqsave(&adapter->stats_lock, flags); - - /* these counters are modified from e1000_tbi_adjust_stats, - * called from the interrupt context, so they must only - * be written while holding adapter->stats_lock - */ - - adapter->stats.crcerrs += er32(CRCERRS); - adapter->stats.gprc += er32(GPRC); - adapter->stats.gorcl += er32(GORCL); - adapter->stats.gorch += er32(GORCH); - adapter->stats.bprc += er32(BPRC); - adapter->stats.mprc += er32(MPRC); - adapter->stats.roc += er32(ROC); - - adapter->stats.prc64 += er32(PRC64); - adapter->stats.prc127 += er32(PRC127); - adapter->stats.prc255 += er32(PRC255); - adapter->stats.prc511 += er32(PRC511); - adapter->stats.prc1023 += er32(PRC1023); - adapter->stats.prc1522 += er32(PRC1522); - - adapter->stats.symerrs += er32(SYMERRS); - adapter->stats.mpc += er32(MPC); - adapter->stats.scc += er32(SCC); - adapter->stats.ecol += er32(ECOL); - adapter->stats.mcc += er32(MCC); - adapter->stats.latecol += er32(LATECOL); - adapter->stats.dc += er32(DC); - adapter->stats.sec += er32(SEC); - adapter->stats.rlec += er32(RLEC); - adapter->stats.xonrxc += er32(XONRXC); - adapter->stats.xontxc += er32(XONTXC); - adapter->stats.xoffrxc += er32(XOFFRXC); - adapter->stats.xofftxc += er32(XOFFTXC); - adapter->stats.fcruc += er32(FCRUC); - adapter->stats.gptc += er32(GPTC); - adapter->stats.gotcl += er32(GOTCL); - adapter->stats.gotch += er32(GOTCH); - adapter->stats.rnbc += er32(RNBC); - adapter->stats.ruc += er32(RUC); - adapter->stats.rfc += er32(RFC); - adapter->stats.rjc += er32(RJC); - adapter->stats.torl += er32(TORL); - adapter->stats.torh += er32(TORH); - adapter->stats.totl += er32(TOTL); - adapter->stats.toth += er32(TOTH); - adapter->stats.tpr += er32(TPR); - - adapter->stats.ptc64 += er32(PTC64); - adapter->stats.ptc127 += er32(PTC127); - adapter->stats.ptc255 += er32(PTC255); - adapter->stats.ptc511 += er32(PTC511); - adapter->stats.ptc1023 += er32(PTC1023); - adapter->stats.ptc1522 += er32(PTC1522); - - adapter->stats.mptc += er32(MPTC); - adapter->stats.bptc += er32(BPTC); - - /* used for adaptive IFS */ - - hw->tx_packet_delta = er32(TPT); - adapter->stats.tpt += hw->tx_packet_delta; - hw->collision_delta = er32(COLC); - adapter->stats.colc += hw->collision_delta; - - if (hw->mac_type >= e1000_82543) { - adapter->stats.algnerrc += er32(ALGNERRC); - adapter->stats.rxerrc += er32(RXERRC); - adapter->stats.tncrs += er32(TNCRS); - adapter->stats.cexterr += er32(CEXTERR); - adapter->stats.tsctc += er32(TSCTC); - adapter->stats.tsctfc += er32(TSCTFC); - } - - /* Fill out the OS statistics structure */ - netdev->stats.multicast = adapter->stats.mprc; - netdev->stats.collisions = adapter->stats.colc; - - /* Rx Errors */ - - /* RLEC on some newer hardware can be incorrect so build - * our own version based on RUC and ROC */ - netdev->stats.rx_errors = adapter->stats.rxerrc + - adapter->stats.crcerrs + adapter->stats.algnerrc + - adapter->stats.ruc + adapter->stats.roc + - adapter->stats.cexterr; - adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc; - netdev->stats.rx_length_errors = adapter->stats.rlerrc; - netdev->stats.rx_crc_errors = adapter->stats.crcerrs; - netdev->stats.rx_frame_errors = adapter->stats.algnerrc; - netdev->stats.rx_missed_errors = adapter->stats.mpc; - - /* Tx Errors */ - adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol; - netdev->stats.tx_errors = adapter->stats.txerrc; - netdev->stats.tx_aborted_errors = adapter->stats.ecol; - netdev->stats.tx_window_errors = adapter->stats.latecol; - netdev->stats.tx_carrier_errors = adapter->stats.tncrs; - if (hw->bad_tx_carr_stats_fd && - adapter->link_duplex == FULL_DUPLEX) { - netdev->stats.tx_carrier_errors = 0; - adapter->stats.tncrs = 0; - } - - /* Tx Dropped needs to be maintained elsewhere */ - - /* Phy Stats */ - if (hw->media_type == e1000_media_type_copper) { - if ((adapter->link_speed == SPEED_1000) && - (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { - phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; - adapter->phy_stats.idle_errors += phy_tmp; - } - - if ((hw->mac_type <= e1000_82546) && - (hw->phy_type == e1000_phy_m88) && - !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) - adapter->phy_stats.receive_errors += phy_tmp; - } - - /* Management Stats */ - if (hw->has_smbus) { - adapter->stats.mgptc += er32(MGTPTC); - adapter->stats.mgprc += er32(MGTPRC); - adapter->stats.mgpdc += er32(MGTPDC); - } - - spin_unlock_irqrestore(&adapter->stats_lock, flags); -} - -/** - * e1000_intr - Interrupt Handler - * @irq: interrupt number - * @data: pointer to a network interface device structure - **/ - -static irqreturn_t e1000_intr(int irq, void *data) -{ - struct net_device *netdev = data; - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u32 icr = er32(ICR); - - if (unlikely((!icr))) - return IRQ_NONE; /* Not our interrupt */ - - /* - * we might have caused the interrupt, but the above - * read cleared it, and just in case the driver is - * down there is nothing to do so return handled - */ - if (unlikely(test_bit(__E1000_DOWN, &adapter->flags))) - return IRQ_HANDLED; - - if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { - hw->get_link_status = 1; - /* guard against interrupt when we're going down */ - if (!test_bit(__E1000_DOWN, &adapter->flags)) - mod_timer(&adapter->watchdog_timer, jiffies + 1); - } - - /* disable interrupts, without the synchronize_irq bit */ - ew32(IMC, ~0); - E1000_WRITE_FLUSH(); - - if (likely(napi_schedule_prep(&adapter->napi))) { - adapter->total_tx_bytes = 0; - adapter->total_tx_packets = 0; - adapter->total_rx_bytes = 0; - adapter->total_rx_packets = 0; - __napi_schedule(&adapter->napi); - } else { - /* this really should not happen! if it does it is basically a - * bug, but not a hard error, so enable ints and continue */ - if (!test_bit(__E1000_DOWN, &adapter->flags)) - e1000_irq_enable(adapter); - } - - return IRQ_HANDLED; -} - -/** - * e1000_clean - NAPI Rx polling callback - * @adapter: board private structure - **/ -static int e1000_clean(struct napi_struct *napi, int budget) -{ - struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); - int tx_clean_complete = 0, work_done = 0; - - tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); - - adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget); - - if (!tx_clean_complete) - work_done = budget; - - /* If budget not fully consumed, exit the polling mode */ - if (work_done < budget) { - if (likely(adapter->itr_setting & 3)) - e1000_set_itr(adapter); - napi_complete(napi); - if (!test_bit(__E1000_DOWN, &adapter->flags)) - e1000_irq_enable(adapter); - } - - return work_done; -} - -/** - * e1000_clean_tx_irq - Reclaim resources after transmit completes - * @adapter: board private structure - **/ -static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, - struct e1000_tx_ring *tx_ring) -{ - struct e1000_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - struct e1000_tx_desc *tx_desc, *eop_desc; - struct e1000_buffer *buffer_info; - unsigned int i, eop; - unsigned int count = 0; - unsigned int total_tx_bytes=0, total_tx_packets=0; - - i = tx_ring->next_to_clean; - eop = tx_ring->buffer_info[i].next_to_watch; - eop_desc = E1000_TX_DESC(*tx_ring, eop); - - while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && - (count < tx_ring->count)) { - bool cleaned = false; - rmb(); /* read buffer_info after eop_desc */ - for ( ; !cleaned; count++) { - tx_desc = E1000_TX_DESC(*tx_ring, i); - buffer_info = &tx_ring->buffer_info[i]; - cleaned = (i == eop); - - if (cleaned) { - struct sk_buff *skb = buffer_info->skb; - unsigned int segs, bytecount; - segs = skb_shinfo(skb)->gso_segs ?: 1; - /* multiply data chunks by size of headers */ - bytecount = ((segs - 1) * skb_headlen(skb)) + - skb->len; - total_tx_packets += segs; - total_tx_bytes += bytecount; - } - e1000_unmap_and_free_tx_resource(adapter, buffer_info); - tx_desc->upper.data = 0; - - if (unlikely(++i == tx_ring->count)) i = 0; - } - - eop = tx_ring->buffer_info[i].next_to_watch; - eop_desc = E1000_TX_DESC(*tx_ring, eop); - } - - tx_ring->next_to_clean = i; - -#define TX_WAKE_THRESHOLD 32 - if (unlikely(count && netif_carrier_ok(netdev) && - E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { - /* Make sure that anybody stopping the queue after this - * sees the new next_to_clean. - */ - smp_mb(); - - if (netif_queue_stopped(netdev) && - !(test_bit(__E1000_DOWN, &adapter->flags))) { - netif_wake_queue(netdev); - ++adapter->restart_queue; - } - } - - if (adapter->detect_tx_hung) { - /* Detect a transmit hang in hardware, this serializes the - * check with the clearing of time_stamp and movement of i */ - adapter->detect_tx_hung = false; - if (tx_ring->buffer_info[eop].time_stamp && - time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + - (adapter->tx_timeout_factor * HZ)) && - !(er32(STATUS) & E1000_STATUS_TXOFF)) { - - /* detected Tx unit hang */ - e_err(drv, "Detected Tx Unit Hang\n" - " Tx Queue <%lu>\n" - " TDH <%x>\n" - " TDT <%x>\n" - " next_to_use <%x>\n" - " next_to_clean <%x>\n" - "buffer_info[next_to_clean]\n" - " time_stamp <%lx>\n" - " next_to_watch <%x>\n" - " jiffies <%lx>\n" - " next_to_watch.status <%x>\n", - (unsigned long)((tx_ring - adapter->tx_ring) / - sizeof(struct e1000_tx_ring)), - readl(hw->hw_addr + tx_ring->tdh), - readl(hw->hw_addr + tx_ring->tdt), - tx_ring->next_to_use, - tx_ring->next_to_clean, - tx_ring->buffer_info[eop].time_stamp, - eop, - jiffies, - eop_desc->upper.fields.status); - netif_stop_queue(netdev); - } - } - adapter->total_tx_bytes += total_tx_bytes; - adapter->total_tx_packets += total_tx_packets; - netdev->stats.tx_bytes += total_tx_bytes; - netdev->stats.tx_packets += total_tx_packets; - return count < tx_ring->count; -} - -/** - * e1000_rx_checksum - Receive Checksum Offload for 82543 - * @adapter: board private structure - * @status_err: receive descriptor status and error fields - * @csum: receive descriptor csum field - * @sk_buff: socket buffer with received data - **/ - -static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, - u32 csum, struct sk_buff *skb) -{ - struct e1000_hw *hw = &adapter->hw; - u16 status = (u16)status_err; - u8 errors = (u8)(status_err >> 24); - - skb_checksum_none_assert(skb); - - /* 82543 or newer only */ - if (unlikely(hw->mac_type < e1000_82543)) return; - /* Ignore Checksum bit is set */ - if (unlikely(status & E1000_RXD_STAT_IXSM)) return; - /* TCP/UDP checksum error bit is set */ - if (unlikely(errors & E1000_RXD_ERR_TCPE)) { - /* let the stack verify checksum errors */ - adapter->hw_csum_err++; - return; - } - /* TCP/UDP Checksum has not been calculated */ - if (!(status & E1000_RXD_STAT_TCPCS)) - return; - - /* It must be a TCP or UDP packet with a valid checksum */ - if (likely(status & E1000_RXD_STAT_TCPCS)) { - /* TCP checksum is good */ - skb->ip_summed = CHECKSUM_UNNECESSARY; - } - adapter->hw_csum_good++; -} - -/** - * e1000_consume_page - helper function - **/ -static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, - u16 length) -{ - bi->page = NULL; - skb->len += length; - skb->data_len += length; - skb->truesize += length; -} - -/** - * e1000_receive_skb - helper function to handle rx indications - * @adapter: board private structure - * @status: descriptor status field as written by hardware - * @vlan: descriptor vlan field as written by hardware (no le/be conversion) - * @skb: pointer to sk_buff to be indicated to stack - */ -static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, - __le16 vlan, struct sk_buff *skb) -{ - skb->protocol = eth_type_trans(skb, adapter->netdev); - - if (status & E1000_RXD_STAT_VP) { - u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; - - __vlan_hwaccel_put_tag(skb, vid); - } - napi_gro_receive(&adapter->napi, skb); -} - -/** - * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy - * @adapter: board private structure - * @rx_ring: ring to clean - * @work_done: amount of napi work completed this call - * @work_to_do: max amount of work allowed for this call to do - * - * the return value indicates whether actual cleaning was done, there - * is no guarantee that everything was cleaned - */ -static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring, - int *work_done, int work_to_do) -{ - struct e1000_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - struct e1000_rx_desc *rx_desc, *next_rxd; - struct e1000_buffer *buffer_info, *next_buffer; - unsigned long irq_flags; - u32 length; - unsigned int i; - int cleaned_count = 0; - bool cleaned = false; - unsigned int total_rx_bytes=0, total_rx_packets=0; - - i = rx_ring->next_to_clean; - rx_desc = E1000_RX_DESC(*rx_ring, i); - buffer_info = &rx_ring->buffer_info[i]; - - while (rx_desc->status & E1000_RXD_STAT_DD) { - struct sk_buff *skb; - u8 status; - - if (*work_done >= work_to_do) - break; - (*work_done)++; - rmb(); /* read descriptor and rx_buffer_info after status DD */ - - status = rx_desc->status; - skb = buffer_info->skb; - buffer_info->skb = NULL; - - if (++i == rx_ring->count) i = 0; - next_rxd = E1000_RX_DESC(*rx_ring, i); - prefetch(next_rxd); - - next_buffer = &rx_ring->buffer_info[i]; - - cleaned = true; - cleaned_count++; - dma_unmap_page(&pdev->dev, buffer_info->dma, - buffer_info->length, DMA_FROM_DEVICE); - buffer_info->dma = 0; - - length = le16_to_cpu(rx_desc->length); - - /* errors is only valid for DD + EOP descriptors */ - if (unlikely((status & E1000_RXD_STAT_EOP) && - (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { - u8 last_byte = *(skb->data + length - 1); - if (TBI_ACCEPT(hw, status, rx_desc->errors, length, - last_byte)) { - spin_lock_irqsave(&adapter->stats_lock, - irq_flags); - e1000_tbi_adjust_stats(hw, &adapter->stats, - length, skb->data); - spin_unlock_irqrestore(&adapter->stats_lock, - irq_flags); - length--; - } else { - /* recycle both page and skb */ - buffer_info->skb = skb; - /* an error means any chain goes out the window - * too */ - if (rx_ring->rx_skb_top) - dev_kfree_skb(rx_ring->rx_skb_top); - rx_ring->rx_skb_top = NULL; - goto next_desc; - } - } - -#define rxtop rx_ring->rx_skb_top - if (!(status & E1000_RXD_STAT_EOP)) { - /* this descriptor is only the beginning (or middle) */ - if (!rxtop) { - /* this is the beginning of a chain */ - rxtop = skb; - skb_fill_page_desc(rxtop, 0, buffer_info->page, - 0, length); - } else { - /* this is the middle of a chain */ - skb_fill_page_desc(rxtop, - skb_shinfo(rxtop)->nr_frags, - buffer_info->page, 0, length); - /* re-use the skb, only consumed the page */ - buffer_info->skb = skb; - } - e1000_consume_page(buffer_info, rxtop, length); - goto next_desc; - } else { - if (rxtop) { - /* end of the chain */ - skb_fill_page_desc(rxtop, - skb_shinfo(rxtop)->nr_frags, - buffer_info->page, 0, length); - /* re-use the current skb, we only consumed the - * page */ - buffer_info->skb = skb; - skb = rxtop; - rxtop = NULL; - e1000_consume_page(buffer_info, skb, length); - } else { - /* no chain, got EOP, this buf is the packet - * copybreak to save the put_page/alloc_page */ - if (length <= copybreak && - skb_tailroom(skb) >= length) { - u8 *vaddr; - vaddr = kmap_atomic(buffer_info->page, - KM_SKB_DATA_SOFTIRQ); - memcpy(skb_tail_pointer(skb), vaddr, length); - kunmap_atomic(vaddr, - KM_SKB_DATA_SOFTIRQ); - /* re-use the page, so don't erase - * buffer_info->page */ - skb_put(skb, length); - } else { - skb_fill_page_desc(skb, 0, - buffer_info->page, 0, - length); - e1000_consume_page(buffer_info, skb, - length); - } - } - } - - /* Receive Checksum Offload XXX recompute due to CRC strip? */ - e1000_rx_checksum(adapter, - (u32)(status) | - ((u32)(rx_desc->errors) << 24), - le16_to_cpu(rx_desc->csum), skb); - - pskb_trim(skb, skb->len - 4); - - /* probably a little skewed due to removing CRC */ - total_rx_bytes += skb->len; - total_rx_packets++; - - /* eth type trans needs skb->data to point to something */ - if (!pskb_may_pull(skb, ETH_HLEN)) { - e_err(drv, "pskb_may_pull failed.\n"); - dev_kfree_skb(skb); - goto next_desc; - } - - e1000_receive_skb(adapter, status, rx_desc->special, skb); - -next_desc: - rx_desc->status = 0; - - /* return some buffers to hardware, one at a time is too slow */ - if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { - adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); - cleaned_count = 0; - } - - /* use prefetched values */ - rx_desc = next_rxd; - buffer_info = next_buffer; - } - rx_ring->next_to_clean = i; - - cleaned_count = E1000_DESC_UNUSED(rx_ring); - if (cleaned_count) - adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); - - adapter->total_rx_packets += total_rx_packets; - adapter->total_rx_bytes += total_rx_bytes; - netdev->stats.rx_bytes += total_rx_bytes; - netdev->stats.rx_packets += total_rx_packets; - return cleaned; -} - -/* - * this should improve performance for small packets with large amounts - * of reassembly being done in the stack - */ -static void e1000_check_copybreak(struct net_device *netdev, - struct e1000_buffer *buffer_info, - u32 length, struct sk_buff **skb) -{ - struct sk_buff *new_skb; - - if (length > copybreak) - return; - - new_skb = netdev_alloc_skb_ip_align(netdev, length); - if (!new_skb) - return; - - skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN, - (*skb)->data - NET_IP_ALIGN, - length + NET_IP_ALIGN); - /* save the skb in buffer_info as good */ - buffer_info->skb = *skb; - *skb = new_skb; -} - -/** - * e1000_clean_rx_irq - Send received data up the network stack; legacy - * @adapter: board private structure - * @rx_ring: ring to clean - * @work_done: amount of napi work completed this call - * @work_to_do: max amount of work allowed for this call to do - */ -static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring, - int *work_done, int work_to_do) -{ - struct e1000_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - struct e1000_rx_desc *rx_desc, *next_rxd; - struct e1000_buffer *buffer_info, *next_buffer; - unsigned long flags; - u32 length; - unsigned int i; - int cleaned_count = 0; - bool cleaned = false; - unsigned int total_rx_bytes=0, total_rx_packets=0; - - i = rx_ring->next_to_clean; - rx_desc = E1000_RX_DESC(*rx_ring, i); - buffer_info = &rx_ring->buffer_info[i]; - - while (rx_desc->status & E1000_RXD_STAT_DD) { - struct sk_buff *skb; - u8 status; - - if (*work_done >= work_to_do) - break; - (*work_done)++; - rmb(); /* read descriptor and rx_buffer_info after status DD */ - - status = rx_desc->status; - skb = buffer_info->skb; - buffer_info->skb = NULL; - - prefetch(skb->data - NET_IP_ALIGN); - - if (++i == rx_ring->count) i = 0; - next_rxd = E1000_RX_DESC(*rx_ring, i); - prefetch(next_rxd); - - next_buffer = &rx_ring->buffer_info[i]; - - cleaned = true; - cleaned_count++; - dma_unmap_single(&pdev->dev, buffer_info->dma, - buffer_info->length, DMA_FROM_DEVICE); - buffer_info->dma = 0; - - length = le16_to_cpu(rx_desc->length); - /* !EOP means multiple descriptors were used to store a single - * packet, if thats the case we need to toss it. In fact, we - * to toss every packet with the EOP bit clear and the next - * frame that _does_ have the EOP bit set, as it is by - * definition only a frame fragment - */ - if (unlikely(!(status & E1000_RXD_STAT_EOP))) - adapter->discarding = true; - - if (adapter->discarding) { - /* All receives must fit into a single buffer */ - e_dbg("Receive packet consumed multiple buffers\n"); - /* recycle */ - buffer_info->skb = skb; - if (status & E1000_RXD_STAT_EOP) - adapter->discarding = false; - goto next_desc; - } - - if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { - u8 last_byte = *(skb->data + length - 1); - if (TBI_ACCEPT(hw, status, rx_desc->errors, length, - last_byte)) { - spin_lock_irqsave(&adapter->stats_lock, flags); - e1000_tbi_adjust_stats(hw, &adapter->stats, - length, skb->data); - spin_unlock_irqrestore(&adapter->stats_lock, - flags); - length--; - } else { - /* recycle */ - buffer_info->skb = skb; - goto next_desc; - } - } - - /* adjust length to remove Ethernet CRC, this must be - * done after the TBI_ACCEPT workaround above */ - length -= 4; - - /* probably a little skewed due to removing CRC */ - total_rx_bytes += length; - total_rx_packets++; - - e1000_check_copybreak(netdev, buffer_info, length, &skb); - - skb_put(skb, length); - - /* Receive Checksum Offload */ - e1000_rx_checksum(adapter, - (u32)(status) | - ((u32)(rx_desc->errors) << 24), - le16_to_cpu(rx_desc->csum), skb); - - e1000_receive_skb(adapter, status, rx_desc->special, skb); - -next_desc: - rx_desc->status = 0; - - /* return some buffers to hardware, one at a time is too slow */ - if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { - adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); - cleaned_count = 0; - } - - /* use prefetched values */ - rx_desc = next_rxd; - buffer_info = next_buffer; - } - rx_ring->next_to_clean = i; - - cleaned_count = E1000_DESC_UNUSED(rx_ring); - if (cleaned_count) - adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); - - adapter->total_rx_packets += total_rx_packets; - adapter->total_rx_bytes += total_rx_bytes; - netdev->stats.rx_bytes += total_rx_bytes; - netdev->stats.rx_packets += total_rx_packets; - return cleaned; -} - -/** - * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers - * @adapter: address of board private structure - * @rx_ring: pointer to receive ring structure - * @cleaned_count: number of buffers to allocate this pass - **/ - -static void -e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring, int cleaned_count) -{ - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - struct e1000_rx_desc *rx_desc; - struct e1000_buffer *buffer_info; - struct sk_buff *skb; - unsigned int i; - unsigned int bufsz = 256 - 16 /*for skb_reserve */ ; - - i = rx_ring->next_to_use; - buffer_info = &rx_ring->buffer_info[i]; - - while (cleaned_count--) { - skb = buffer_info->skb; - if (skb) { - skb_trim(skb, 0); - goto check_page; - } - - skb = netdev_alloc_skb_ip_align(netdev, bufsz); - if (unlikely(!skb)) { - /* Better luck next round */ - adapter->alloc_rx_buff_failed++; - break; - } - - /* Fix for errata 23, can't cross 64kB boundary */ - if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { - struct sk_buff *oldskb = skb; - e_err(rx_err, "skb align check failed: %u bytes at " - "%p\n", bufsz, skb->data); - /* Try again, without freeing the previous */ - skb = netdev_alloc_skb_ip_align(netdev, bufsz); - /* Failed allocation, critical failure */ - if (!skb) { - dev_kfree_skb(oldskb); - adapter->alloc_rx_buff_failed++; - break; - } - - if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { - /* give up */ - dev_kfree_skb(skb); - dev_kfree_skb(oldskb); - break; /* while (cleaned_count--) */ - } - - /* Use new allocation */ - dev_kfree_skb(oldskb); - } - buffer_info->skb = skb; - buffer_info->length = adapter->rx_buffer_len; -check_page: - /* allocate a new page if necessary */ - if (!buffer_info->page) { - buffer_info->page = alloc_page(GFP_ATOMIC); - if (unlikely(!buffer_info->page)) { - adapter->alloc_rx_buff_failed++; - break; - } - } - - if (!buffer_info->dma) { - buffer_info->dma = dma_map_page(&pdev->dev, - buffer_info->page, 0, - buffer_info->length, - DMA_FROM_DEVICE); - if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { - put_page(buffer_info->page); - dev_kfree_skb(skb); - buffer_info->page = NULL; - buffer_info->skb = NULL; - buffer_info->dma = 0; - adapter->alloc_rx_buff_failed++; - break; /* while !buffer_info->skb */ - } - } - - rx_desc = E1000_RX_DESC(*rx_ring, i); - rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); - - if (unlikely(++i == rx_ring->count)) - i = 0; - buffer_info = &rx_ring->buffer_info[i]; - } - - if (likely(rx_ring->next_to_use != i)) { - rx_ring->next_to_use = i; - if (unlikely(i-- == 0)) - i = (rx_ring->count - 1); - - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). */ - wmb(); - writel(i, adapter->hw.hw_addr + rx_ring->rdt); - } -} - -/** - * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended - * @adapter: address of board private structure - **/ - -static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring, - int cleaned_count) -{ - struct e1000_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - struct e1000_rx_desc *rx_desc; - struct e1000_buffer *buffer_info; - struct sk_buff *skb; - unsigned int i; - unsigned int bufsz = adapter->rx_buffer_len; - - i = rx_ring->next_to_use; - buffer_info = &rx_ring->buffer_info[i]; - - while (cleaned_count--) { - skb = buffer_info->skb; - if (skb) { - skb_trim(skb, 0); - goto map_skb; - } - - skb = netdev_alloc_skb_ip_align(netdev, bufsz); - if (unlikely(!skb)) { - /* Better luck next round */ - adapter->alloc_rx_buff_failed++; - break; - } - - /* Fix for errata 23, can't cross 64kB boundary */ - if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { - struct sk_buff *oldskb = skb; - e_err(rx_err, "skb align check failed: %u bytes at " - "%p\n", bufsz, skb->data); - /* Try again, without freeing the previous */ - skb = netdev_alloc_skb_ip_align(netdev, bufsz); - /* Failed allocation, critical failure */ - if (!skb) { - dev_kfree_skb(oldskb); - adapter->alloc_rx_buff_failed++; - break; - } - - if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { - /* give up */ - dev_kfree_skb(skb); - dev_kfree_skb(oldskb); - adapter->alloc_rx_buff_failed++; - break; /* while !buffer_info->skb */ - } - - /* Use new allocation */ - dev_kfree_skb(oldskb); - } - buffer_info->skb = skb; - buffer_info->length = adapter->rx_buffer_len; -map_skb: - buffer_info->dma = dma_map_single(&pdev->dev, - skb->data, - buffer_info->length, - DMA_FROM_DEVICE); - if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { - dev_kfree_skb(skb); - buffer_info->skb = NULL; - buffer_info->dma = 0; - adapter->alloc_rx_buff_failed++; - break; /* while !buffer_info->skb */ - } - - /* - * XXX if it was allocated cleanly it will never map to a - * boundary crossing - */ - - /* Fix for errata 23, can't cross 64kB boundary */ - if (!e1000_check_64k_bound(adapter, - (void *)(unsigned long)buffer_info->dma, - adapter->rx_buffer_len)) { - e_err(rx_err, "dma align check failed: %u bytes at " - "%p\n", adapter->rx_buffer_len, - (void *)(unsigned long)buffer_info->dma); - dev_kfree_skb(skb); - buffer_info->skb = NULL; - - dma_unmap_single(&pdev->dev, buffer_info->dma, - adapter->rx_buffer_len, - DMA_FROM_DEVICE); - buffer_info->dma = 0; - - adapter->alloc_rx_buff_failed++; - break; /* while !buffer_info->skb */ - } - rx_desc = E1000_RX_DESC(*rx_ring, i); - rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); - - if (unlikely(++i == rx_ring->count)) - i = 0; - buffer_info = &rx_ring->buffer_info[i]; - } - - if (likely(rx_ring->next_to_use != i)) { - rx_ring->next_to_use = i; - if (unlikely(i-- == 0)) - i = (rx_ring->count - 1); - - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). */ - wmb(); - writel(i, hw->hw_addr + rx_ring->rdt); - } -} - -/** - * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. - * @adapter: - **/ - -static void e1000_smartspeed(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u16 phy_status; - u16 phy_ctrl; - - if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg || - !(hw->autoneg_advertised & ADVERTISE_1000_FULL)) - return; - - if (adapter->smartspeed == 0) { - /* If Master/Slave config fault is asserted twice, - * we assume back-to-back */ - e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); - if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; - e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); - if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; - e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); - if (phy_ctrl & CR_1000T_MS_ENABLE) { - phy_ctrl &= ~CR_1000T_MS_ENABLE; - e1000_write_phy_reg(hw, PHY_1000T_CTRL, - phy_ctrl); - adapter->smartspeed++; - if (!e1000_phy_setup_autoneg(hw) && - !e1000_read_phy_reg(hw, PHY_CTRL, - &phy_ctrl)) { - phy_ctrl |= (MII_CR_AUTO_NEG_EN | - MII_CR_RESTART_AUTO_NEG); - e1000_write_phy_reg(hw, PHY_CTRL, - phy_ctrl); - } - } - return; - } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { - /* If still no link, perhaps using 2/3 pair cable */ - e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); - phy_ctrl |= CR_1000T_MS_ENABLE; - e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); - if (!e1000_phy_setup_autoneg(hw) && - !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) { - phy_ctrl |= (MII_CR_AUTO_NEG_EN | - MII_CR_RESTART_AUTO_NEG); - e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl); - } - } - /* Restart process after E1000_SMARTSPEED_MAX iterations */ - if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX) - adapter->smartspeed = 0; -} - -/** - * e1000_ioctl - - * @netdev: - * @ifreq: - * @cmd: - **/ - -static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) -{ - switch (cmd) { - case SIOCGMIIPHY: - case SIOCGMIIREG: - case SIOCSMIIREG: - return e1000_mii_ioctl(netdev, ifr, cmd); - default: - return -EOPNOTSUPP; - } -} - -/** - * e1000_mii_ioctl - - * @netdev: - * @ifreq: - * @cmd: - **/ - -static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, - int cmd) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - struct mii_ioctl_data *data = if_mii(ifr); - int retval; - u16 mii_reg; - unsigned long flags; - - if (hw->media_type != e1000_media_type_copper) - return -EOPNOTSUPP; - - switch (cmd) { - case SIOCGMIIPHY: - data->phy_id = hw->phy_addr; - break; - case SIOCGMIIREG: - spin_lock_irqsave(&adapter->stats_lock, flags); - if (e1000_read_phy_reg(hw, data->reg_num & 0x1F, - &data->val_out)) { - spin_unlock_irqrestore(&adapter->stats_lock, flags); - return -EIO; - } - spin_unlock_irqrestore(&adapter->stats_lock, flags); - break; - case SIOCSMIIREG: - if (data->reg_num & ~(0x1F)) - return -EFAULT; - mii_reg = data->val_in; - spin_lock_irqsave(&adapter->stats_lock, flags); - if (e1000_write_phy_reg(hw, data->reg_num, - mii_reg)) { - spin_unlock_irqrestore(&adapter->stats_lock, flags); - return -EIO; - } - spin_unlock_irqrestore(&adapter->stats_lock, flags); - if (hw->media_type == e1000_media_type_copper) { - switch (data->reg_num) { - case PHY_CTRL: - if (mii_reg & MII_CR_POWER_DOWN) - break; - if (mii_reg & MII_CR_AUTO_NEG_EN) { - hw->autoneg = 1; - hw->autoneg_advertised = 0x2F; - } else { - u32 speed; - if (mii_reg & 0x40) - speed = SPEED_1000; - else if (mii_reg & 0x2000) - speed = SPEED_100; - else - speed = SPEED_10; - retval = e1000_set_spd_dplx( - adapter, speed, - ((mii_reg & 0x100) - ? DUPLEX_FULL : - DUPLEX_HALF)); - if (retval) - return retval; - } - if (netif_running(adapter->netdev)) - e1000_reinit_locked(adapter); - else - e1000_reset(adapter); - break; - case M88E1000_PHY_SPEC_CTRL: - case M88E1000_EXT_PHY_SPEC_CTRL: - if (e1000_phy_reset(hw)) - return -EIO; - break; - } - } else { - switch (data->reg_num) { - case PHY_CTRL: - if (mii_reg & MII_CR_POWER_DOWN) - break; - if (netif_running(adapter->netdev)) - e1000_reinit_locked(adapter); - else - e1000_reset(adapter); - break; - } - } - break; - default: - return -EOPNOTSUPP; - } - return E1000_SUCCESS; -} - -void e1000_pci_set_mwi(struct e1000_hw *hw) -{ - struct e1000_adapter *adapter = hw->back; - int ret_val = pci_set_mwi(adapter->pdev); - - if (ret_val) - e_err(probe, "Error in setting MWI\n"); -} - -void e1000_pci_clear_mwi(struct e1000_hw *hw) -{ - struct e1000_adapter *adapter = hw->back; - - pci_clear_mwi(adapter->pdev); -} - -int e1000_pcix_get_mmrbc(struct e1000_hw *hw) -{ - struct e1000_adapter *adapter = hw->back; - return pcix_get_mmrbc(adapter->pdev); -} - -void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc) -{ - struct e1000_adapter *adapter = hw->back; - pcix_set_mmrbc(adapter->pdev, mmrbc); -} - -void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value) -{ - outl(value, port); -} - -static bool e1000_vlan_used(struct e1000_adapter *adapter) -{ - u16 vid; - - for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) - return true; - return false; -} - -static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, - bool filter_on) -{ - struct e1000_hw *hw = &adapter->hw; - u32 rctl; - - if (!test_bit(__E1000_DOWN, &adapter->flags)) - e1000_irq_disable(adapter); - - if (filter_on) { - /* enable VLAN receive filtering */ - rctl = er32(RCTL); - rctl &= ~E1000_RCTL_CFIEN; - if (!(adapter->netdev->flags & IFF_PROMISC)) - rctl |= E1000_RCTL_VFE; - ew32(RCTL, rctl); - e1000_update_mng_vlan(adapter); - } else { - /* disable VLAN receive filtering */ - rctl = er32(RCTL); - rctl &= ~E1000_RCTL_VFE; - ew32(RCTL, rctl); - } - - if (!test_bit(__E1000_DOWN, &adapter->flags)) - e1000_irq_enable(adapter); -} - -static void e1000_vlan_mode(struct net_device *netdev, u32 features) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u32 ctrl; - - if (!test_bit(__E1000_DOWN, &adapter->flags)) - e1000_irq_disable(adapter); - - ctrl = er32(CTRL); - if (features & NETIF_F_HW_VLAN_RX) { - /* enable VLAN tag insert/strip */ - ctrl |= E1000_CTRL_VME; - } else { - /* disable VLAN tag insert/strip */ - ctrl &= ~E1000_CTRL_VME; - } - ew32(CTRL, ctrl); - - if (!test_bit(__E1000_DOWN, &adapter->flags)) - e1000_irq_enable(adapter); -} - -static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u32 vfta, index; - - if ((hw->mng_cookie.status & - E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && - (vid == adapter->mng_vlan_id)) - return; - - if (!e1000_vlan_used(adapter)) - e1000_vlan_filter_on_off(adapter, true); - - /* add VID to filter table */ - index = (vid >> 5) & 0x7F; - vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); - vfta |= (1 << (vid & 0x1F)); - e1000_write_vfta(hw, index, vfta); - - set_bit(vid, adapter->active_vlans); -} - -static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u32 vfta, index; - - if (!test_bit(__E1000_DOWN, &adapter->flags)) - e1000_irq_disable(adapter); - if (!test_bit(__E1000_DOWN, &adapter->flags)) - e1000_irq_enable(adapter); - - /* remove VID from filter table */ - index = (vid >> 5) & 0x7F; - vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); - vfta &= ~(1 << (vid & 0x1F)); - e1000_write_vfta(hw, index, vfta); - - clear_bit(vid, adapter->active_vlans); - - if (!e1000_vlan_used(adapter)) - e1000_vlan_filter_on_off(adapter, false); -} - -static void e1000_restore_vlan(struct e1000_adapter *adapter) -{ - u16 vid; - - if (!e1000_vlan_used(adapter)) - return; - - e1000_vlan_filter_on_off(adapter, true); - for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) - e1000_vlan_rx_add_vid(adapter->netdev, vid); -} - -int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) -{ - struct e1000_hw *hw = &adapter->hw; - - hw->autoneg = 0; - - /* Make sure dplx is at most 1 bit and lsb of speed is not set - * for the switch() below to work */ - if ((spd & 1) || (dplx & ~1)) - goto err_inval; - - /* Fiber NICs only allow 1000 gbps Full duplex */ - if ((hw->media_type == e1000_media_type_fiber) && - spd != SPEED_1000 && - dplx != DUPLEX_FULL) - goto err_inval; - - switch (spd + dplx) { - case SPEED_10 + DUPLEX_HALF: - hw->forced_speed_duplex = e1000_10_half; - break; - case SPEED_10 + DUPLEX_FULL: - hw->forced_speed_duplex = e1000_10_full; - break; - case SPEED_100 + DUPLEX_HALF: - hw->forced_speed_duplex = e1000_100_half; - break; - case SPEED_100 + DUPLEX_FULL: - hw->forced_speed_duplex = e1000_100_full; - break; - case SPEED_1000 + DUPLEX_FULL: - hw->autoneg = 1; - hw->autoneg_advertised = ADVERTISE_1000_FULL; - break; - case SPEED_1000 + DUPLEX_HALF: /* not supported */ - default: - goto err_inval; - } - return 0; - -err_inval: - e_err(probe, "Unsupported Speed/Duplex configuration\n"); - return -EINVAL; -} - -static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u32 ctrl, ctrl_ext, rctl, status; - u32 wufc = adapter->wol; -#ifdef CONFIG_PM - int retval = 0; -#endif - - netif_device_detach(netdev); - - if (netif_running(netdev)) { - WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); - e1000_down(adapter); - } - -#ifdef CONFIG_PM - retval = pci_save_state(pdev); - if (retval) - return retval; -#endif - - status = er32(STATUS); - if (status & E1000_STATUS_LU) - wufc &= ~E1000_WUFC_LNKC; - - if (wufc) { - e1000_setup_rctl(adapter); - e1000_set_rx_mode(netdev); - - /* turn on all-multi mode if wake on multicast is enabled */ - if (wufc & E1000_WUFC_MC) { - rctl = er32(RCTL); - rctl |= E1000_RCTL_MPE; - ew32(RCTL, rctl); - } - - if (hw->mac_type >= e1000_82540) { - ctrl = er32(CTRL); - /* advertise wake from D3Cold */ - #define E1000_CTRL_ADVD3WUC 0x00100000 - /* phy power management enable */ - #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 - ctrl |= E1000_CTRL_ADVD3WUC | - E1000_CTRL_EN_PHY_PWR_MGMT; - ew32(CTRL, ctrl); - } - - if (hw->media_type == e1000_media_type_fiber || - hw->media_type == e1000_media_type_internal_serdes) { - /* keep the laser running in D3 */ - ctrl_ext = er32(CTRL_EXT); - ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; - ew32(CTRL_EXT, ctrl_ext); - } - - ew32(WUC, E1000_WUC_PME_EN); - ew32(WUFC, wufc); - } else { - ew32(WUC, 0); - ew32(WUFC, 0); - } - - e1000_release_manageability(adapter); - - *enable_wake = !!wufc; - - /* make sure adapter isn't asleep if manageability is enabled */ - if (adapter->en_mng_pt) - *enable_wake = true; - - if (netif_running(netdev)) - e1000_free_irq(adapter); - - pci_disable_device(pdev); - - return 0; -} - -#ifdef CONFIG_PM -static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) -{ - int retval; - bool wake; - - retval = __e1000_shutdown(pdev, &wake); - if (retval) - return retval; - - if (wake) { - pci_prepare_to_sleep(pdev); - } else { - pci_wake_from_d3(pdev, false); - pci_set_power_state(pdev, PCI_D3hot); - } - - return 0; -} - -static int e1000_resume(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u32 err; - - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - pci_save_state(pdev); - - if (adapter->need_ioport) - err = pci_enable_device(pdev); - else - err = pci_enable_device_mem(pdev); - if (err) { - pr_err("Cannot enable PCI device from suspend\n"); - return err; - } - pci_set_master(pdev); - - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); - - if (netif_running(netdev)) { - err = e1000_request_irq(adapter); - if (err) - return err; - } - - e1000_power_up_phy(adapter); - e1000_reset(adapter); - ew32(WUS, ~0); - - e1000_init_manageability(adapter); - - if (netif_running(netdev)) - e1000_up(adapter); - - netif_device_attach(netdev); - - return 0; -} -#endif - -static void e1000_shutdown(struct pci_dev *pdev) -{ - bool wake; - - __e1000_shutdown(pdev, &wake); - - if (system_state == SYSTEM_POWER_OFF) { - pci_wake_from_d3(pdev, wake); - pci_set_power_state(pdev, PCI_D3hot); - } -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -/* - * Polling 'interrupt' - used by things like netconsole to send skbs - * without having to re-enable interrupts. It's not called while - * the interrupt routine is executing. - */ -static void e1000_netpoll(struct net_device *netdev) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - - disable_irq(adapter->pdev->irq); - e1000_intr(adapter->pdev->irq, netdev); - enable_irq(adapter->pdev->irq); -} -#endif - -/** - * e1000_io_error_detected - called when PCI error is detected - * @pdev: Pointer to PCI device - * @state: The current pci connection state - * - * This function is called after a PCI bus error affecting - * this device has been detected. - */ -static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct e1000_adapter *adapter = netdev_priv(netdev); - - netif_device_detach(netdev); - - if (state == pci_channel_io_perm_failure) - return PCI_ERS_RESULT_DISCONNECT; - - if (netif_running(netdev)) - e1000_down(adapter); - pci_disable_device(pdev); - - /* Request a slot slot reset. */ - return PCI_ERS_RESULT_NEED_RESET; -} - -/** - * e1000_io_slot_reset - called after the pci bus has been reset. - * @pdev: Pointer to PCI device - * - * Restart the card from scratch, as if from a cold-boot. Implementation - * resembles the first-half of the e1000_resume routine. - */ -static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - int err; - - if (adapter->need_ioport) - err = pci_enable_device(pdev); - else - err = pci_enable_device_mem(pdev); - if (err) { - pr_err("Cannot re-enable PCI device after reset.\n"); - return PCI_ERS_RESULT_DISCONNECT; - } - pci_set_master(pdev); - - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); - - e1000_reset(adapter); - ew32(WUS, ~0); - - return PCI_ERS_RESULT_RECOVERED; -} - -/** - * e1000_io_resume - called when traffic can start flowing again. - * @pdev: Pointer to PCI device - * - * This callback is called when the error recovery driver tells us that - * its OK to resume normal operation. Implementation resembles the - * second-half of the e1000_resume routine. - */ -static void e1000_io_resume(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct e1000_adapter *adapter = netdev_priv(netdev); - - e1000_init_manageability(adapter); - - if (netif_running(netdev)) { - if (e1000_up(adapter)) { - pr_info("can't bring device back up after reset\n"); - return; - } - } - - netif_device_attach(netdev); -} - -/* e1000_main.c */ diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h deleted file mode 100644 index 33e7c45a..0000000 --- a/drivers/net/e1000/e1000_osdep.h +++ /dev/null @@ -1,109 +0,0 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - - -/* glue for the OS independent part of e1000 - * includes register access macros - */ - -#ifndef _E1000_OSDEP_H_ -#define _E1000_OSDEP_H_ - -#include - -#define CONFIG_RAM_BASE 0x60000 -#define GBE_CONFIG_OFFSET 0x0 - -#define GBE_CONFIG_RAM_BASE \ - ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET)) - -#define GBE_CONFIG_BASE_VIRT \ - ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE)) - -#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \ - (iowrite16_rep(base + offset, data, count)) - -#define GBE_CONFIG_FLASH_READ(base, offset, count, data) \ - (ioread16_rep(base + (offset << 1), data, count)) - -#define er32(reg) \ - (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \ - ? E1000_##reg : E1000_82542_##reg))) - -#define ew32(reg, value) \ - (writel((value), (hw->hw_addr + ((hw->mac_type >= e1000_82543) \ - ? E1000_##reg : E1000_82542_##reg)))) - -#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \ - writel((value), ((a)->hw_addr + \ - (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ - ((offset) << 2)))) - -#define E1000_READ_REG_ARRAY(a, reg, offset) ( \ - readl((a)->hw_addr + \ - (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ - ((offset) << 2))) - -#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY -#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY - -#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \ - writew((value), ((a)->hw_addr + \ - (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ - ((offset) << 1)))) - -#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \ - readw((a)->hw_addr + \ - (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ - ((offset) << 1))) - -#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \ - writeb((value), ((a)->hw_addr + \ - (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ - (offset)))) - -#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \ - readb((a)->hw_addr + \ - (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ - (offset))) - -#define E1000_WRITE_FLUSH() er32(STATUS) - -#define E1000_WRITE_ICH_FLASH_REG(a, reg, value) ( \ - writel((value), ((a)->flash_address + reg))) - -#define E1000_READ_ICH_FLASH_REG(a, reg) ( \ - readl((a)->flash_address + reg)) - -#define E1000_WRITE_ICH_FLASH_REG16(a, reg, value) ( \ - writew((value), ((a)->flash_address + reg))) - -#define E1000_READ_ICH_FLASH_REG16(a, reg) ( \ - readw((a)->flash_address + reg)) - -#endif /* _E1000_OSDEP_H_ */ diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c deleted file mode 100644 index 1301eba..0000000 --- a/drivers/net/e1000/e1000_param.c +++ /dev/null @@ -1,755 +0,0 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include "e1000.h" - -/* This is the only thing that needs to be changed to adjust the - * maximum number of ports that the driver can manage. - */ - -#define E1000_MAX_NIC 32 - -#define OPTION_UNSET -1 -#define OPTION_DISABLED 0 -#define OPTION_ENABLED 1 - -/* All parameters are treated the same, as an integer array of values. - * This macro just reduces the need to repeat the same declaration code - * over and over (plus this helps to avoid typo bugs). - */ - -#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } -#define E1000_PARAM(X, desc) \ - static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ - static unsigned int num_##X; \ - module_param_array_named(X, X, int, &num_##X, 0); \ - MODULE_PARM_DESC(X, desc); - -/* Transmit Descriptor Count - * - * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers - * Valid Range: 80-4096 for 82544 and newer - * - * Default Value: 256 - */ -E1000_PARAM(TxDescriptors, "Number of transmit descriptors"); - -/* Receive Descriptor Count - * - * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers - * Valid Range: 80-4096 for 82544 and newer - * - * Default Value: 256 - */ -E1000_PARAM(RxDescriptors, "Number of receive descriptors"); - -/* User Specified Speed Override - * - * Valid Range: 0, 10, 100, 1000 - * - 0 - auto-negotiate at all supported speeds - * - 10 - only link at 10 Mbps - * - 100 - only link at 100 Mbps - * - 1000 - only link at 1000 Mbps - * - * Default Value: 0 - */ -E1000_PARAM(Speed, "Speed setting"); - -/* User Specified Duplex Override - * - * Valid Range: 0-2 - * - 0 - auto-negotiate for duplex - * - 1 - only link at half duplex - * - 2 - only link at full duplex - * - * Default Value: 0 - */ -E1000_PARAM(Duplex, "Duplex setting"); - -/* Auto-negotiation Advertisement Override - * - * Valid Range: 0x01-0x0F, 0x20-0x2F (copper); 0x20 (fiber) - * - * The AutoNeg value is a bit mask describing which speed and duplex - * combinations should be advertised during auto-negotiation. - * The supported speed and duplex modes are listed below - * - * Bit 7 6 5 4 3 2 1 0 - * Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10 - * Duplex Full Full Half Full Half - * - * Default Value: 0x2F (copper); 0x20 (fiber) - */ -E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting"); -#define AUTONEG_ADV_DEFAULT 0x2F -#define AUTONEG_ADV_MASK 0x2F - -/* User Specified Flow Control Override - * - * Valid Range: 0-3 - * - 0 - No Flow Control - * - 1 - Rx only, respond to PAUSE frames but do not generate them - * - 2 - Tx only, generate PAUSE frames but ignore them on receive - * - 3 - Full Flow Control Support - * - * Default Value: Read flow control settings from the EEPROM - */ -E1000_PARAM(FlowControl, "Flow Control setting"); -#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL - -/* XsumRX - Receive Checksum Offload Enable/Disable - * - * Valid Range: 0, 1 - * - 0 - disables all checksum offload - * - 1 - enables receive IP/TCP/UDP checksum offload - * on 82543 and newer -based NICs - * - * Default Value: 1 - */ -E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload"); - -/* Transmit Interrupt Delay in units of 1.024 microseconds - * Tx interrupt delay needs to typically be set to something non zero - * - * Valid Range: 0-65535 - */ -E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay"); -#define DEFAULT_TIDV 8 -#define MAX_TXDELAY 0xFFFF -#define MIN_TXDELAY 0 - -/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds - * - * Valid Range: 0-65535 - */ -E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay"); -#define DEFAULT_TADV 32 -#define MAX_TXABSDELAY 0xFFFF -#define MIN_TXABSDELAY 0 - -/* Receive Interrupt Delay in units of 1.024 microseconds - * hardware will likely hang if you set this to anything but zero. - * - * Valid Range: 0-65535 - */ -E1000_PARAM(RxIntDelay, "Receive Interrupt Delay"); -#define DEFAULT_RDTR 0 -#define MAX_RXDELAY 0xFFFF -#define MIN_RXDELAY 0 - -/* Receive Absolute Interrupt Delay in units of 1.024 microseconds - * - * Valid Range: 0-65535 - */ -E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); -#define DEFAULT_RADV 8 -#define MAX_RXABSDELAY 0xFFFF -#define MIN_RXABSDELAY 0 - -/* Interrupt Throttle Rate (interrupts/sec) - * - * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) - */ -E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); -#define DEFAULT_ITR 3 -#define MAX_ITR 100000 -#define MIN_ITR 100 - -/* Enable Smart Power Down of the PHY - * - * Valid Range: 0, 1 - * - * Default Value: 0 (disabled) - */ -E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); - -struct e1000_option { - enum { enable_option, range_option, list_option } type; - const char *name; - const char *err; - int def; - union { - struct { /* range_option info */ - int min; - int max; - } r; - struct { /* list_option info */ - int nr; - const struct e1000_opt_list { int i; char *str; } *p; - } l; - } arg; -}; - -static int __devinit e1000_validate_option(unsigned int *value, - const struct e1000_option *opt, - struct e1000_adapter *adapter) -{ - if (*value == OPTION_UNSET) { - *value = opt->def; - return 0; - } - - switch (opt->type) { - case enable_option: - switch (*value) { - case OPTION_ENABLED: - e_dev_info("%s Enabled\n", opt->name); - return 0; - case OPTION_DISABLED: - e_dev_info("%s Disabled\n", opt->name); - return 0; - } - break; - case range_option: - if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { - e_dev_info("%s set to %i\n", opt->name, *value); - return 0; - } - break; - case list_option: { - int i; - const struct e1000_opt_list *ent; - - for (i = 0; i < opt->arg.l.nr; i++) { - ent = &opt->arg.l.p[i]; - if (*value == ent->i) { - if (ent->str[0] != '\0') - e_dev_info("%s\n", ent->str); - return 0; - } - } - } - break; - default: - BUG(); - } - - e_dev_info("Invalid %s value specified (%i) %s\n", - opt->name, *value, opt->err); - *value = opt->def; - return -1; -} - -static void e1000_check_fiber_options(struct e1000_adapter *adapter); -static void e1000_check_copper_options(struct e1000_adapter *adapter); - -/** - * e1000_check_options - Range Checking for Command Line Parameters - * @adapter: board private structure - * - * This routine checks all command line parameters for valid user - * input. If an invalid value is given, or if no user specified - * value exists, a default value is used. The final value is stored - * in a variable in the adapter structure. - **/ - -void __devinit e1000_check_options(struct e1000_adapter *adapter) -{ - struct e1000_option opt; - int bd = adapter->bd_number; - - if (bd >= E1000_MAX_NIC) { - e_dev_warn("Warning: no configuration for board #%i " - "using defaults for all values\n", bd); - } - - { /* Transmit Descriptor Count */ - struct e1000_tx_ring *tx_ring = adapter->tx_ring; - int i; - e1000_mac_type mac_type = adapter->hw.mac_type; - - opt = (struct e1000_option) { - .type = range_option, - .name = "Transmit Descriptors", - .err = "using default of " - __MODULE_STRING(E1000_DEFAULT_TXD), - .def = E1000_DEFAULT_TXD, - .arg = { .r = { - .min = E1000_MIN_TXD, - .max = mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD - }} - }; - - if (num_TxDescriptors > bd) { - tx_ring->count = TxDescriptors[bd]; - e1000_validate_option(&tx_ring->count, &opt, adapter); - tx_ring->count = ALIGN(tx_ring->count, - REQ_TX_DESCRIPTOR_MULTIPLE); - } else { - tx_ring->count = opt.def; - } - for (i = 0; i < adapter->num_tx_queues; i++) - tx_ring[i].count = tx_ring->count; - } - { /* Receive Descriptor Count */ - struct e1000_rx_ring *rx_ring = adapter->rx_ring; - int i; - e1000_mac_type mac_type = adapter->hw.mac_type; - - opt = (struct e1000_option) { - .type = range_option, - .name = "Receive Descriptors", - .err = "using default of " - __MODULE_STRING(E1000_DEFAULT_RXD), - .def = E1000_DEFAULT_RXD, - .arg = { .r = { - .min = E1000_MIN_RXD, - .max = mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD - }} - }; - - if (num_RxDescriptors > bd) { - rx_ring->count = RxDescriptors[bd]; - e1000_validate_option(&rx_ring->count, &opt, adapter); - rx_ring->count = ALIGN(rx_ring->count, - REQ_RX_DESCRIPTOR_MULTIPLE); - } else { - rx_ring->count = opt.def; - } - for (i = 0; i < adapter->num_rx_queues; i++) - rx_ring[i].count = rx_ring->count; - } - { /* Checksum Offload Enable/Disable */ - opt = (struct e1000_option) { - .type = enable_option, - .name = "Checksum Offload", - .err = "defaulting to Enabled", - .def = OPTION_ENABLED - }; - - if (num_XsumRX > bd) { - unsigned int rx_csum = XsumRX[bd]; - e1000_validate_option(&rx_csum, &opt, adapter); - adapter->rx_csum = rx_csum; - } else { - adapter->rx_csum = opt.def; - } - } - { /* Flow Control */ - - static const struct e1000_opt_list fc_list[] = { - { E1000_FC_NONE, "Flow Control Disabled" }, - { E1000_FC_RX_PAUSE, "Flow Control Receive Only" }, - { E1000_FC_TX_PAUSE, "Flow Control Transmit Only" }, - { E1000_FC_FULL, "Flow Control Enabled" }, - { E1000_FC_DEFAULT, "Flow Control Hardware Default" } - }; - - opt = (struct e1000_option) { - .type = list_option, - .name = "Flow Control", - .err = "reading default settings from EEPROM", - .def = E1000_FC_DEFAULT, - .arg = { .l = { .nr = ARRAY_SIZE(fc_list), - .p = fc_list }} - }; - - if (num_FlowControl > bd) { - unsigned int fc = FlowControl[bd]; - e1000_validate_option(&fc, &opt, adapter); - adapter->hw.fc = adapter->hw.original_fc = fc; - } else { - adapter->hw.fc = adapter->hw.original_fc = opt.def; - } - } - { /* Transmit Interrupt Delay */ - opt = (struct e1000_option) { - .type = range_option, - .name = "Transmit Interrupt Delay", - .err = "using default of " __MODULE_STRING(DEFAULT_TIDV), - .def = DEFAULT_TIDV, - .arg = { .r = { .min = MIN_TXDELAY, - .max = MAX_TXDELAY }} - }; - - if (num_TxIntDelay > bd) { - adapter->tx_int_delay = TxIntDelay[bd]; - e1000_validate_option(&adapter->tx_int_delay, &opt, - adapter); - } else { - adapter->tx_int_delay = opt.def; - } - } - { /* Transmit Absolute Interrupt Delay */ - opt = (struct e1000_option) { - .type = range_option, - .name = "Transmit Absolute Interrupt Delay", - .err = "using default of " __MODULE_STRING(DEFAULT_TADV), - .def = DEFAULT_TADV, - .arg = { .r = { .min = MIN_TXABSDELAY, - .max = MAX_TXABSDELAY }} - }; - - if (num_TxAbsIntDelay > bd) { - adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; - e1000_validate_option(&adapter->tx_abs_int_delay, &opt, - adapter); - } else { - adapter->tx_abs_int_delay = opt.def; - } - } - { /* Receive Interrupt Delay */ - opt = (struct e1000_option) { - .type = range_option, - .name = "Receive Interrupt Delay", - .err = "using default of " __MODULE_STRING(DEFAULT_RDTR), - .def = DEFAULT_RDTR, - .arg = { .r = { .min = MIN_RXDELAY, - .max = MAX_RXDELAY }} - }; - - if (num_RxIntDelay > bd) { - adapter->rx_int_delay = RxIntDelay[bd]; - e1000_validate_option(&adapter->rx_int_delay, &opt, - adapter); - } else { - adapter->rx_int_delay = opt.def; - } - } - { /* Receive Absolute Interrupt Delay */ - opt = (struct e1000_option) { - .type = range_option, - .name = "Receive Absolute Interrupt Delay", - .err = "using default of " __MODULE_STRING(DEFAULT_RADV), - .def = DEFAULT_RADV, - .arg = { .r = { .min = MIN_RXABSDELAY, - .max = MAX_RXABSDELAY }} - }; - - if (num_RxAbsIntDelay > bd) { - adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; - e1000_validate_option(&adapter->rx_abs_int_delay, &opt, - adapter); - } else { - adapter->rx_abs_int_delay = opt.def; - } - } - { /* Interrupt Throttling Rate */ - opt = (struct e1000_option) { - .type = range_option, - .name = "Interrupt Throttling Rate (ints/sec)", - .err = "using default of " __MODULE_STRING(DEFAULT_ITR), - .def = DEFAULT_ITR, - .arg = { .r = { .min = MIN_ITR, - .max = MAX_ITR }} - }; - - if (num_InterruptThrottleRate > bd) { - adapter->itr = InterruptThrottleRate[bd]; - switch (adapter->itr) { - case 0: - e_dev_info("%s turned off\n", opt.name); - break; - case 1: - e_dev_info("%s set to dynamic mode\n", - opt.name); - adapter->itr_setting = adapter->itr; - adapter->itr = 20000; - break; - case 3: - e_dev_info("%s set to dynamic conservative " - "mode\n", opt.name); - adapter->itr_setting = adapter->itr; - adapter->itr = 20000; - break; - case 4: - e_dev_info("%s set to simplified " - "(2000-8000) ints mode\n", opt.name); - adapter->itr_setting = adapter->itr; - break; - default: - e1000_validate_option(&adapter->itr, &opt, - adapter); - /* save the setting, because the dynamic bits - * change itr. - * clear the lower two bits because they are - * used as control */ - adapter->itr_setting = adapter->itr & ~3; - break; - } - } else { - adapter->itr_setting = opt.def; - adapter->itr = 20000; - } - } - { /* Smart Power Down */ - opt = (struct e1000_option) { - .type = enable_option, - .name = "PHY Smart Power Down", - .err = "defaulting to Disabled", - .def = OPTION_DISABLED - }; - - if (num_SmartPowerDownEnable > bd) { - unsigned int spd = SmartPowerDownEnable[bd]; - e1000_validate_option(&spd, &opt, adapter); - adapter->smart_power_down = spd; - } else { - adapter->smart_power_down = opt.def; - } - } - - switch (adapter->hw.media_type) { - case e1000_media_type_fiber: - case e1000_media_type_internal_serdes: - e1000_check_fiber_options(adapter); - break; - case e1000_media_type_copper: - e1000_check_copper_options(adapter); - break; - default: - BUG(); - } -} - -/** - * e1000_check_fiber_options - Range Checking for Link Options, Fiber Version - * @adapter: board private structure - * - * Handles speed and duplex options on fiber adapters - **/ - -static void __devinit e1000_check_fiber_options(struct e1000_adapter *adapter) -{ - int bd = adapter->bd_number; - if (num_Speed > bd) { - e_dev_info("Speed not valid for fiber adapters, parameter " - "ignored\n"); - } - - if (num_Duplex > bd) { - e_dev_info("Duplex not valid for fiber adapters, parameter " - "ignored\n"); - } - - if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) { - e_dev_info("AutoNeg other than 1000/Full is not valid for fiber" - "adapters, parameter ignored\n"); - } -} - -/** - * e1000_check_copper_options - Range Checking for Link Options, Copper Version - * @adapter: board private structure - * - * Handles speed and duplex options on copper adapters - **/ - -static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter) -{ - struct e1000_option opt; - unsigned int speed, dplx, an; - int bd = adapter->bd_number; - - { /* Speed */ - static const struct e1000_opt_list speed_list[] = { - { 0, "" }, - { SPEED_10, "" }, - { SPEED_100, "" }, - { SPEED_1000, "" }}; - - opt = (struct e1000_option) { - .type = list_option, - .name = "Speed", - .err = "parameter ignored", - .def = 0, - .arg = { .l = { .nr = ARRAY_SIZE(speed_list), - .p = speed_list }} - }; - - if (num_Speed > bd) { - speed = Speed[bd]; - e1000_validate_option(&speed, &opt, adapter); - } else { - speed = opt.def; - } - } - { /* Duplex */ - static const struct e1000_opt_list dplx_list[] = { - { 0, "" }, - { HALF_DUPLEX, "" }, - { FULL_DUPLEX, "" }}; - - opt = (struct e1000_option) { - .type = list_option, - .name = "Duplex", - .err = "parameter ignored", - .def = 0, - .arg = { .l = { .nr = ARRAY_SIZE(dplx_list), - .p = dplx_list }} - }; - - if (num_Duplex > bd) { - dplx = Duplex[bd]; - e1000_validate_option(&dplx, &opt, adapter); - } else { - dplx = opt.def; - } - } - - if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) { - e_dev_info("AutoNeg specified along with Speed or Duplex, " - "parameter ignored\n"); - adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT; - } else { /* Autoneg */ - static const struct e1000_opt_list an_list[] = - #define AA "AutoNeg advertising " - {{ 0x01, AA "10/HD" }, - { 0x02, AA "10/FD" }, - { 0x03, AA "10/FD, 10/HD" }, - { 0x04, AA "100/HD" }, - { 0x05, AA "100/HD, 10/HD" }, - { 0x06, AA "100/HD, 10/FD" }, - { 0x07, AA "100/HD, 10/FD, 10/HD" }, - { 0x08, AA "100/FD" }, - { 0x09, AA "100/FD, 10/HD" }, - { 0x0a, AA "100/FD, 10/FD" }, - { 0x0b, AA "100/FD, 10/FD, 10/HD" }, - { 0x0c, AA "100/FD, 100/HD" }, - { 0x0d, AA "100/FD, 100/HD, 10/HD" }, - { 0x0e, AA "100/FD, 100/HD, 10/FD" }, - { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" }, - { 0x20, AA "1000/FD" }, - { 0x21, AA "1000/FD, 10/HD" }, - { 0x22, AA "1000/FD, 10/FD" }, - { 0x23, AA "1000/FD, 10/FD, 10/HD" }, - { 0x24, AA "1000/FD, 100/HD" }, - { 0x25, AA "1000/FD, 100/HD, 10/HD" }, - { 0x26, AA "1000/FD, 100/HD, 10/FD" }, - { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" }, - { 0x28, AA "1000/FD, 100/FD" }, - { 0x29, AA "1000/FD, 100/FD, 10/HD" }, - { 0x2a, AA "1000/FD, 100/FD, 10/FD" }, - { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" }, - { 0x2c, AA "1000/FD, 100/FD, 100/HD" }, - { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" }, - { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" }, - { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }}; - - opt = (struct e1000_option) { - .type = list_option, - .name = "AutoNeg", - .err = "parameter ignored", - .def = AUTONEG_ADV_DEFAULT, - .arg = { .l = { .nr = ARRAY_SIZE(an_list), - .p = an_list }} - }; - - if (num_AutoNeg > bd) { - an = AutoNeg[bd]; - e1000_validate_option(&an, &opt, adapter); - } else { - an = opt.def; - } - adapter->hw.autoneg_advertised = an; - } - - switch (speed + dplx) { - case 0: - adapter->hw.autoneg = adapter->fc_autoneg = 1; - if ((num_Speed > bd) && (speed != 0 || dplx != 0)) - e_dev_info("Speed and duplex autonegotiation " - "enabled\n"); - break; - case HALF_DUPLEX: - e_dev_info("Half Duplex specified without Speed\n"); - e_dev_info("Using Autonegotiation at Half Duplex only\n"); - adapter->hw.autoneg = adapter->fc_autoneg = 1; - adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | - ADVERTISE_100_HALF; - break; - case FULL_DUPLEX: - e_dev_info("Full Duplex specified without Speed\n"); - e_dev_info("Using Autonegotiation at Full Duplex only\n"); - adapter->hw.autoneg = adapter->fc_autoneg = 1; - adapter->hw.autoneg_advertised = ADVERTISE_10_FULL | - ADVERTISE_100_FULL | - ADVERTISE_1000_FULL; - break; - case SPEED_10: - e_dev_info("10 Mbps Speed specified without Duplex\n"); - e_dev_info("Using Autonegotiation at 10 Mbps only\n"); - adapter->hw.autoneg = adapter->fc_autoneg = 1; - adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | - ADVERTISE_10_FULL; - break; - case SPEED_10 + HALF_DUPLEX: - e_dev_info("Forcing to 10 Mbps Half Duplex\n"); - adapter->hw.autoneg = adapter->fc_autoneg = 0; - adapter->hw.forced_speed_duplex = e1000_10_half; - adapter->hw.autoneg_advertised = 0; - break; - case SPEED_10 + FULL_DUPLEX: - e_dev_info("Forcing to 10 Mbps Full Duplex\n"); - adapter->hw.autoneg = adapter->fc_autoneg = 0; - adapter->hw.forced_speed_duplex = e1000_10_full; - adapter->hw.autoneg_advertised = 0; - break; - case SPEED_100: - e_dev_info("100 Mbps Speed specified without Duplex\n"); - e_dev_info("Using Autonegotiation at 100 Mbps only\n"); - adapter->hw.autoneg = adapter->fc_autoneg = 1; - adapter->hw.autoneg_advertised = ADVERTISE_100_HALF | - ADVERTISE_100_FULL; - break; - case SPEED_100 + HALF_DUPLEX: - e_dev_info("Forcing to 100 Mbps Half Duplex\n"); - adapter->hw.autoneg = adapter->fc_autoneg = 0; - adapter->hw.forced_speed_duplex = e1000_100_half; - adapter->hw.autoneg_advertised = 0; - break; - case SPEED_100 + FULL_DUPLEX: - e_dev_info("Forcing to 100 Mbps Full Duplex\n"); - adapter->hw.autoneg = adapter->fc_autoneg = 0; - adapter->hw.forced_speed_duplex = e1000_100_full; - adapter->hw.autoneg_advertised = 0; - break; - case SPEED_1000: - e_dev_info("1000 Mbps Speed specified without Duplex\n"); - goto full_duplex_only; - case SPEED_1000 + HALF_DUPLEX: - e_dev_info("Half Duplex is not supported at 1000 Mbps\n"); - /* fall through */ - case SPEED_1000 + FULL_DUPLEX: -full_duplex_only: - e_dev_info("Using Autonegotiation at 1000 Mbps Full Duplex " - "only\n"); - adapter->hw.autoneg = adapter->fc_autoneg = 1; - adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL; - break; - default: - BUG(); - } - - /* Speed, AutoNeg and MDI/MDI-X must all play nice */ - if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) { - e_dev_info("Speed, AutoNeg and MDI-X specs are incompatible. " - "Setting MDI-X to a compatible value.\n"); - } -} - diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c deleted file mode 100644 index 480f259..0000000 --- a/drivers/net/e1000e/82571.c +++ /dev/null @@ -1,2115 +0,0 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -/* - * 82571EB Gigabit Ethernet Controller - * 82571EB Gigabit Ethernet Controller (Copper) - * 82571EB Gigabit Ethernet Controller (Fiber) - * 82571EB Dual Port Gigabit Mezzanine Adapter - * 82571EB Quad Port Gigabit Mezzanine Adapter - * 82571PT Gigabit PT Quad Port Server ExpressModule - * 82572EI Gigabit Ethernet Controller (Copper) - * 82572EI Gigabit Ethernet Controller (Fiber) - * 82572EI Gigabit Ethernet Controller - * 82573V Gigabit Ethernet Controller (Copper) - * 82573E Gigabit Ethernet Controller (Copper) - * 82573L Gigabit Ethernet Controller - * 82574L Gigabit Network Connection - * 82583V Gigabit Network Connection - */ - -#include "e1000.h" - -#define ID_LED_RESERVED_F746 0xF746 -#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \ - (ID_LED_OFF1_ON2 << 8) | \ - (ID_LED_DEF1_DEF2 << 4) | \ - (ID_LED_DEF1_DEF2)) - -#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 -#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */ -#define E1000_BASE1000T_STATUS 10 -#define E1000_IDLE_ERROR_COUNT_MASK 0xFF -#define E1000_RECEIVE_ERROR_COUNTER 21 -#define E1000_RECEIVE_ERROR_MAX 0xFFFF - -#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */ - -static s32 e1000_get_phy_id_82571(struct e1000_hw *hw); -static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw); -static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw); -static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw); -static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, - u16 words, u16 *data); -static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw); -static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw); -static s32 e1000_setup_link_82571(struct e1000_hw *hw); -static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw); -static void e1000_clear_vfta_82571(struct e1000_hw *hw); -static bool e1000_check_mng_mode_82574(struct e1000_hw *hw); -static s32 e1000_led_on_82574(struct e1000_hw *hw); -static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw); -static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw); -static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw); -static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw); -static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw); -static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active); -static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active); - -/** - * e1000_init_phy_params_82571 - Init PHY func ptrs. - * @hw: pointer to the HW structure - **/ -static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - - if (hw->phy.media_type != e1000_media_type_copper) { - phy->type = e1000_phy_none; - return 0; - } - - phy->addr = 1; - phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; - phy->reset_delay_us = 100; - - phy->ops.power_up = e1000_power_up_phy_copper; - phy->ops.power_down = e1000_power_down_phy_copper_82571; - - switch (hw->mac.type) { - case e1000_82571: - case e1000_82572: - phy->type = e1000_phy_igp_2; - break; - case e1000_82573: - phy->type = e1000_phy_m88; - break; - case e1000_82574: - case e1000_82583: - phy->type = e1000_phy_bm; - phy->ops.acquire = e1000_get_hw_semaphore_82574; - phy->ops.release = e1000_put_hw_semaphore_82574; - phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574; - phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574; - break; - default: - return -E1000_ERR_PHY; - break; - } - - /* This can only be done after all function pointers are setup. */ - ret_val = e1000_get_phy_id_82571(hw); - if (ret_val) { - e_dbg("Error getting PHY ID\n"); - return ret_val; - } - - /* Verify phy id */ - switch (hw->mac.type) { - case e1000_82571: - case e1000_82572: - if (phy->id != IGP01E1000_I_PHY_ID) - ret_val = -E1000_ERR_PHY; - break; - case e1000_82573: - if (phy->id != M88E1111_I_PHY_ID) - ret_val = -E1000_ERR_PHY; - break; - case e1000_82574: - case e1000_82583: - if (phy->id != BME1000_E_PHY_ID_R2) - ret_val = -E1000_ERR_PHY; - break; - default: - ret_val = -E1000_ERR_PHY; - break; - } - - if (ret_val) - e_dbg("PHY ID unknown: type = 0x%08x\n", phy->id); - - return ret_val; -} - -/** - * e1000_init_nvm_params_82571 - Init NVM func ptrs. - * @hw: pointer to the HW structure - **/ -static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) -{ - struct e1000_nvm_info *nvm = &hw->nvm; - u32 eecd = er32(EECD); - u16 size; - - nvm->opcode_bits = 8; - nvm->delay_usec = 1; - switch (nvm->override) { - case e1000_nvm_override_spi_large: - nvm->page_size = 32; - nvm->address_bits = 16; - break; - case e1000_nvm_override_spi_small: - nvm->page_size = 8; - nvm->address_bits = 8; - break; - default: - nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; - nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; - break; - } - - switch (hw->mac.type) { - case e1000_82573: - case e1000_82574: - case e1000_82583: - if (((eecd >> 15) & 0x3) == 0x3) { - nvm->type = e1000_nvm_flash_hw; - nvm->word_size = 2048; - /* - * Autonomous Flash update bit must be cleared due - * to Flash update issue. - */ - eecd &= ~E1000_EECD_AUPDEN; - ew32(EECD, eecd); - break; - } - /* Fall Through */ - default: - nvm->type = e1000_nvm_eeprom_spi; - size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> - E1000_EECD_SIZE_EX_SHIFT); - /* - * Added to a constant, "size" becomes the left-shift value - * for setting word_size. - */ - size += NVM_WORD_SIZE_BASE_SHIFT; - - /* EEPROM access above 16k is unsupported */ - if (size > 14) - size = 14; - nvm->word_size = 1 << size; - break; - } - - /* Function Pointers */ - switch (hw->mac.type) { - case e1000_82574: - case e1000_82583: - nvm->ops.acquire = e1000_get_hw_semaphore_82574; - nvm->ops.release = e1000_put_hw_semaphore_82574; - break; - default: - break; - } - - return 0; -} - -/** - * e1000_init_mac_params_82571 - Init MAC func ptrs. - * @hw: pointer to the HW structure - **/ -static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - struct e1000_mac_info *mac = &hw->mac; - struct e1000_mac_operations *func = &mac->ops; - u32 swsm = 0; - u32 swsm2 = 0; - bool force_clear_smbi = false; - - /* Set media type */ - switch (adapter->pdev->device) { - case E1000_DEV_ID_82571EB_FIBER: - case E1000_DEV_ID_82572EI_FIBER: - case E1000_DEV_ID_82571EB_QUAD_FIBER: - hw->phy.media_type = e1000_media_type_fiber; - break; - case E1000_DEV_ID_82571EB_SERDES: - case E1000_DEV_ID_82572EI_SERDES: - case E1000_DEV_ID_82571EB_SERDES_DUAL: - case E1000_DEV_ID_82571EB_SERDES_QUAD: - hw->phy.media_type = e1000_media_type_internal_serdes; - break; - default: - hw->phy.media_type = e1000_media_type_copper; - break; - } - - /* Set mta register count */ - mac->mta_reg_count = 128; - /* Set rar entry count */ - mac->rar_entry_count = E1000_RAR_ENTRIES; - /* Adaptive IFS supported */ - mac->adaptive_ifs = true; - - /* check for link */ - switch (hw->phy.media_type) { - case e1000_media_type_copper: - func->setup_physical_interface = e1000_setup_copper_link_82571; - func->check_for_link = e1000e_check_for_copper_link; - func->get_link_up_info = e1000e_get_speed_and_duplex_copper; - break; - case e1000_media_type_fiber: - func->setup_physical_interface = - e1000_setup_fiber_serdes_link_82571; - func->check_for_link = e1000e_check_for_fiber_link; - func->get_link_up_info = - e1000e_get_speed_and_duplex_fiber_serdes; - break; - case e1000_media_type_internal_serdes: - func->setup_physical_interface = - e1000_setup_fiber_serdes_link_82571; - func->check_for_link = e1000_check_for_serdes_link_82571; - func->get_link_up_info = - e1000e_get_speed_and_duplex_fiber_serdes; - break; - default: - return -E1000_ERR_CONFIG; - break; - } - - switch (hw->mac.type) { - case e1000_82573: - func->set_lan_id = e1000_set_lan_id_single_port; - func->check_mng_mode = e1000e_check_mng_mode_generic; - func->led_on = e1000e_led_on_generic; - func->blink_led = e1000e_blink_led_generic; - - /* FWSM register */ - mac->has_fwsm = true; - /* - * ARC supported; valid only if manageability features are - * enabled. - */ - mac->arc_subsystem_valid = - (er32(FWSM) & E1000_FWSM_MODE_MASK) - ? true : false; - break; - case e1000_82574: - case e1000_82583: - func->set_lan_id = e1000_set_lan_id_single_port; - func->check_mng_mode = e1000_check_mng_mode_82574; - func->led_on = e1000_led_on_82574; - break; - default: - func->check_mng_mode = e1000e_check_mng_mode_generic; - func->led_on = e1000e_led_on_generic; - func->blink_led = e1000e_blink_led_generic; - - /* FWSM register */ - mac->has_fwsm = true; - break; - } - - /* - * Ensure that the inter-port SWSM.SMBI lock bit is clear before - * first NVM or PHY access. This should be done for single-port - * devices, and for one port only on dual-port devices so that - * for those devices we can still use the SMBI lock to synchronize - * inter-port accesses to the PHY & NVM. - */ - switch (hw->mac.type) { - case e1000_82571: - case e1000_82572: - swsm2 = er32(SWSM2); - - if (!(swsm2 & E1000_SWSM2_LOCK)) { - /* Only do this for the first interface on this card */ - ew32(SWSM2, - swsm2 | E1000_SWSM2_LOCK); - force_clear_smbi = true; - } else - force_clear_smbi = false; - break; - default: - force_clear_smbi = true; - break; - } - - if (force_clear_smbi) { - /* Make sure SWSM.SMBI is clear */ - swsm = er32(SWSM); - if (swsm & E1000_SWSM_SMBI) { - /* This bit should not be set on a first interface, and - * indicates that the bootagent or EFI code has - * improperly left this bit enabled - */ - e_dbg("Please update your 82571 Bootagent\n"); - } - ew32(SWSM, swsm & ~E1000_SWSM_SMBI); - } - - /* - * Initialize device specific counter of SMBI acquisition - * timeouts. - */ - hw->dev_spec.e82571.smb_counter = 0; - - return 0; -} - -static s32 e1000_get_variants_82571(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - static int global_quad_port_a; /* global port a indication */ - struct pci_dev *pdev = adapter->pdev; - int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1; - s32 rc; - - rc = e1000_init_mac_params_82571(adapter); - if (rc) - return rc; - - rc = e1000_init_nvm_params_82571(hw); - if (rc) - return rc; - - rc = e1000_init_phy_params_82571(hw); - if (rc) - return rc; - - /* tag quad port adapters first, it's used below */ - switch (pdev->device) { - case E1000_DEV_ID_82571EB_QUAD_COPPER: - case E1000_DEV_ID_82571EB_QUAD_FIBER: - case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: - case E1000_DEV_ID_82571PT_QUAD_COPPER: - adapter->flags |= FLAG_IS_QUAD_PORT; - /* mark the first port */ - if (global_quad_port_a == 0) - adapter->flags |= FLAG_IS_QUAD_PORT_A; - /* Reset for multiple quad port adapters */ - global_quad_port_a++; - if (global_quad_port_a == 4) - global_quad_port_a = 0; - break; - default: - break; - } - - switch (adapter->hw.mac.type) { - case e1000_82571: - /* these dual ports don't have WoL on port B at all */ - if (((pdev->device == E1000_DEV_ID_82571EB_FIBER) || - (pdev->device == E1000_DEV_ID_82571EB_SERDES) || - (pdev->device == E1000_DEV_ID_82571EB_COPPER)) && - (is_port_b)) - adapter->flags &= ~FLAG_HAS_WOL; - /* quad ports only support WoL on port A */ - if (adapter->flags & FLAG_IS_QUAD_PORT && - (!(adapter->flags & FLAG_IS_QUAD_PORT_A))) - adapter->flags &= ~FLAG_HAS_WOL; - /* Does not support WoL on any port */ - if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) - adapter->flags &= ~FLAG_HAS_WOL; - break; - case e1000_82573: - if (pdev->device == E1000_DEV_ID_82573L) { - adapter->flags |= FLAG_HAS_JUMBO_FRAMES; - adapter->max_hw_frame_size = DEFAULT_JUMBO; - } - break; - default: - break; - } - - return 0; -} - -/** - * e1000_get_phy_id_82571 - Retrieve the PHY ID and revision - * @hw: pointer to the HW structure - * - * Reads the PHY registers and stores the PHY ID and possibly the PHY - * revision in the hardware structure. - **/ -static s32 e1000_get_phy_id_82571(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_id = 0; - - switch (hw->mac.type) { - case e1000_82571: - case e1000_82572: - /* - * The 82571 firmware may still be configuring the PHY. - * In this case, we cannot access the PHY until the - * configuration is done. So we explicitly set the - * PHY ID. - */ - phy->id = IGP01E1000_I_PHY_ID; - break; - case e1000_82573: - return e1000e_get_phy_id(hw); - break; - case e1000_82574: - case e1000_82583: - ret_val = e1e_rphy(hw, PHY_ID1, &phy_id); - if (ret_val) - return ret_val; - - phy->id = (u32)(phy_id << 16); - udelay(20); - ret_val = e1e_rphy(hw, PHY_ID2, &phy_id); - if (ret_val) - return ret_val; - - phy->id |= (u32)(phy_id); - phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); - break; - default: - return -E1000_ERR_PHY; - break; - } - - return 0; -} - -/** - * e1000_get_hw_semaphore_82571 - Acquire hardware semaphore - * @hw: pointer to the HW structure - * - * Acquire the HW semaphore to access the PHY or NVM - **/ -static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw) -{ - u32 swsm; - s32 sw_timeout = hw->nvm.word_size + 1; - s32 fw_timeout = hw->nvm.word_size + 1; - s32 i = 0; - - /* - * If we have timedout 3 times on trying to acquire - * the inter-port SMBI semaphore, there is old code - * operating on the other port, and it is not - * releasing SMBI. Modify the number of times that - * we try for the semaphore to interwork with this - * older code. - */ - if (hw->dev_spec.e82571.smb_counter > 2) - sw_timeout = 1; - - /* Get the SW semaphore */ - while (i < sw_timeout) { - swsm = er32(SWSM); - if (!(swsm & E1000_SWSM_SMBI)) - break; - - udelay(50); - i++; - } - - if (i == sw_timeout) { - e_dbg("Driver can't access device - SMBI bit is set.\n"); - hw->dev_spec.e82571.smb_counter++; - } - /* Get the FW semaphore. */ - for (i = 0; i < fw_timeout; i++) { - swsm = er32(SWSM); - ew32(SWSM, swsm | E1000_SWSM_SWESMBI); - - /* Semaphore acquired if bit latched */ - if (er32(SWSM) & E1000_SWSM_SWESMBI) - break; - - udelay(50); - } - - if (i == fw_timeout) { - /* Release semaphores */ - e1000_put_hw_semaphore_82571(hw); - e_dbg("Driver can't access the NVM\n"); - return -E1000_ERR_NVM; - } - - return 0; -} - -/** - * e1000_put_hw_semaphore_82571 - Release hardware semaphore - * @hw: pointer to the HW structure - * - * Release hardware semaphore used to access the PHY or NVM - **/ -static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw) -{ - u32 swsm; - - swsm = er32(SWSM); - swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); - ew32(SWSM, swsm); -} -/** - * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore - * @hw: pointer to the HW structure - * - * Acquire the HW semaphore during reset. - * - **/ -static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw) -{ - u32 extcnf_ctrl; - s32 ret_val = 0; - s32 i = 0; - - extcnf_ctrl = er32(EXTCNF_CTRL); - extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; - do { - ew32(EXTCNF_CTRL, extcnf_ctrl); - extcnf_ctrl = er32(EXTCNF_CTRL); - - if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP) - break; - - extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; - - usleep_range(2000, 4000); - i++; - } while (i < MDIO_OWNERSHIP_TIMEOUT); - - if (i == MDIO_OWNERSHIP_TIMEOUT) { - /* Release semaphores */ - e1000_put_hw_semaphore_82573(hw); - e_dbg("Driver can't access the PHY\n"); - ret_val = -E1000_ERR_PHY; - goto out; - } - -out: - return ret_val; -} - -/** - * e1000_put_hw_semaphore_82573 - Release hardware semaphore - * @hw: pointer to the HW structure - * - * Release hardware semaphore used during reset. - * - **/ -static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw) -{ - u32 extcnf_ctrl; - - extcnf_ctrl = er32(EXTCNF_CTRL); - extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; - ew32(EXTCNF_CTRL, extcnf_ctrl); -} - -static DEFINE_MUTEX(swflag_mutex); - -/** - * e1000_get_hw_semaphore_82574 - Acquire hardware semaphore - * @hw: pointer to the HW structure - * - * Acquire the HW semaphore to access the PHY or NVM. - * - **/ -static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw) -{ - s32 ret_val; - - mutex_lock(&swflag_mutex); - ret_val = e1000_get_hw_semaphore_82573(hw); - if (ret_val) - mutex_unlock(&swflag_mutex); - return ret_val; -} - -/** - * e1000_put_hw_semaphore_82574 - Release hardware semaphore - * @hw: pointer to the HW structure - * - * Release hardware semaphore used to access the PHY or NVM - * - **/ -static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw) -{ - e1000_put_hw_semaphore_82573(hw); - mutex_unlock(&swflag_mutex); -} - -/** - * e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state - * @hw: pointer to the HW structure - * @active: true to enable LPLU, false to disable - * - * Sets the LPLU D0 state according to the active flag. - * LPLU will not be activated unless the - * device autonegotiation advertisement meets standards of - * either 10 or 10/100 or 10/100/1000 at all duplexes. - * This is a function pointer entry point only called by - * PHY setup routines. - **/ -static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active) -{ - u16 data = er32(POEMB); - - if (active) - data |= E1000_PHY_CTRL_D0A_LPLU; - else - data &= ~E1000_PHY_CTRL_D0A_LPLU; - - ew32(POEMB, data); - return 0; -} - -/** - * e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3 - * @hw: pointer to the HW structure - * @active: boolean used to enable/disable lplu - * - * The low power link up (lplu) state is set to the power management level D3 - * when active is true, else clear lplu for D3. LPLU - * is used during Dx states where the power conservation is most important. - * During driver activity, SmartSpeed should be enabled so performance is - * maintained. - **/ -static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active) -{ - u16 data = er32(POEMB); - - if (!active) { - data &= ~E1000_PHY_CTRL_NOND0A_LPLU; - } else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || - (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) || - (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) { - data |= E1000_PHY_CTRL_NOND0A_LPLU; - } - - ew32(POEMB, data); - return 0; -} - -/** - * e1000_acquire_nvm_82571 - Request for access to the EEPROM - * @hw: pointer to the HW structure - * - * To gain access to the EEPROM, first we must obtain a hardware semaphore. - * Then for non-82573 hardware, set the EEPROM access request bit and wait - * for EEPROM access grant bit. If the access grant bit is not set, release - * hardware semaphore. - **/ -static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw) -{ - s32 ret_val; - - ret_val = e1000_get_hw_semaphore_82571(hw); - if (ret_val) - return ret_val; - - switch (hw->mac.type) { - case e1000_82573: - break; - default: - ret_val = e1000e_acquire_nvm(hw); - break; - } - - if (ret_val) - e1000_put_hw_semaphore_82571(hw); - - return ret_val; -} - -/** - * e1000_release_nvm_82571 - Release exclusive access to EEPROM - * @hw: pointer to the HW structure - * - * Stop any current commands to the EEPROM and clear the EEPROM request bit. - **/ -static void e1000_release_nvm_82571(struct e1000_hw *hw) -{ - e1000e_release_nvm(hw); - e1000_put_hw_semaphore_82571(hw); -} - -/** - * e1000_write_nvm_82571 - Write to EEPROM using appropriate interface - * @hw: pointer to the HW structure - * @offset: offset within the EEPROM to be written to - * @words: number of words to write - * @data: 16 bit word(s) to be written to the EEPROM - * - * For non-82573 silicon, write data to EEPROM at offset using SPI interface. - * - * If e1000e_update_nvm_checksum is not called after this function, the - * EEPROM will most likely contain an invalid checksum. - **/ -static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words, - u16 *data) -{ - s32 ret_val; - - switch (hw->mac.type) { - case e1000_82573: - case e1000_82574: - case e1000_82583: - ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data); - break; - case e1000_82571: - case e1000_82572: - ret_val = e1000e_write_nvm_spi(hw, offset, words, data); - break; - default: - ret_val = -E1000_ERR_NVM; - break; - } - - return ret_val; -} - -/** - * e1000_update_nvm_checksum_82571 - Update EEPROM checksum - * @hw: pointer to the HW structure - * - * Updates the EEPROM checksum by reading/adding each word of the EEPROM - * up to the checksum. Then calculates the EEPROM checksum and writes the - * value to the EEPROM. - **/ -static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw) -{ - u32 eecd; - s32 ret_val; - u16 i; - - ret_val = e1000e_update_nvm_checksum_generic(hw); - if (ret_val) - return ret_val; - - /* - * If our nvm is an EEPROM, then we're done - * otherwise, commit the checksum to the flash NVM. - */ - if (hw->nvm.type != e1000_nvm_flash_hw) - return ret_val; - - /* Check for pending operations. */ - for (i = 0; i < E1000_FLASH_UPDATES; i++) { - usleep_range(1000, 2000); - if ((er32(EECD) & E1000_EECD_FLUPD) == 0) - break; - } - - if (i == E1000_FLASH_UPDATES) - return -E1000_ERR_NVM; - - /* Reset the firmware if using STM opcode. */ - if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) { - /* - * The enabling of and the actual reset must be done - * in two write cycles. - */ - ew32(HICR, E1000_HICR_FW_RESET_ENABLE); - e1e_flush(); - ew32(HICR, E1000_HICR_FW_RESET); - } - - /* Commit the write to flash */ - eecd = er32(EECD) | E1000_EECD_FLUPD; - ew32(EECD, eecd); - - for (i = 0; i < E1000_FLASH_UPDATES; i++) { - usleep_range(1000, 2000); - if ((er32(EECD) & E1000_EECD_FLUPD) == 0) - break; - } - - if (i == E1000_FLASH_UPDATES) - return -E1000_ERR_NVM; - - return 0; -} - -/** - * e1000_validate_nvm_checksum_82571 - Validate EEPROM checksum - * @hw: pointer to the HW structure - * - * Calculates the EEPROM checksum by reading/adding each word of the EEPROM - * and then verifies that the sum of the EEPROM is equal to 0xBABA. - **/ -static s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw) -{ - if (hw->nvm.type == e1000_nvm_flash_hw) - e1000_fix_nvm_checksum_82571(hw); - - return e1000e_validate_nvm_checksum_generic(hw); -} - -/** - * e1000_write_nvm_eewr_82571 - Write to EEPROM for 82573 silicon - * @hw: pointer to the HW structure - * @offset: offset within the EEPROM to be written to - * @words: number of words to write - * @data: 16 bit word(s) to be written to the EEPROM - * - * After checking for invalid values, poll the EEPROM to ensure the previous - * command has completed before trying to write the next word. After write - * poll for completion. - * - * If e1000e_update_nvm_checksum is not called after this function, the - * EEPROM will most likely contain an invalid checksum. - **/ -static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, - u16 words, u16 *data) -{ - struct e1000_nvm_info *nvm = &hw->nvm; - u32 i, eewr = 0; - s32 ret_val = 0; - - /* - * A check for invalid values: offset too large, too many words, - * and not enough words. - */ - if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || - (words == 0)) { - e_dbg("nvm parameter(s) out of bounds\n"); - return -E1000_ERR_NVM; - } - - for (i = 0; i < words; i++) { - eewr = (data[i] << E1000_NVM_RW_REG_DATA) | - ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | - E1000_NVM_RW_REG_START; - - ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); - if (ret_val) - break; - - ew32(EEWR, eewr); - - ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); - if (ret_val) - break; - } - - return ret_val; -} - -/** - * e1000_get_cfg_done_82571 - Poll for configuration done - * @hw: pointer to the HW structure - * - * Reads the management control register for the config done bit to be set. - **/ -static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw) -{ - s32 timeout = PHY_CFG_TIMEOUT; - - while (timeout) { - if (er32(EEMNGCTL) & - E1000_NVM_CFG_DONE_PORT_0) - break; - usleep_range(1000, 2000); - timeout--; - } - if (!timeout) { - e_dbg("MNG configuration cycle has not completed.\n"); - return -E1000_ERR_RESET; - } - - return 0; -} - -/** - * e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state - * @hw: pointer to the HW structure - * @active: true to enable LPLU, false to disable - * - * Sets the LPLU D0 state according to the active flag. When activating LPLU - * this function also disables smart speed and vice versa. LPLU will not be - * activated unless the device autonegotiation advertisement meets standards - * of either 10 or 10/100 or 10/100/1000 at all duplexes. This is a function - * pointer entry point only called by PHY setup routines. - **/ -static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 data; - - ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data); - if (ret_val) - return ret_val; - - if (active) { - data |= IGP02E1000_PM_D0_LPLU; - ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); - if (ret_val) - return ret_val; - - /* When LPLU is enabled, we should disable SmartSpeed */ - ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); - data &= ~IGP01E1000_PSCFR_SMART_SPEED; - ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); - if (ret_val) - return ret_val; - } else { - data &= ~IGP02E1000_PM_D0_LPLU; - ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); - /* - * LPLU and SmartSpeed are mutually exclusive. LPLU is used - * during Dx states where the power conservation is most - * important. During driver activity we should enable - * SmartSpeed, so performance is maintained. - */ - if (phy->smart_speed == e1000_smart_speed_on) { - ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, - &data); - if (ret_val) - return ret_val; - - data |= IGP01E1000_PSCFR_SMART_SPEED; - ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, - data); - if (ret_val) - return ret_val; - } else if (phy->smart_speed == e1000_smart_speed_off) { - ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, - &data); - if (ret_val) - return ret_val; - - data &= ~IGP01E1000_PSCFR_SMART_SPEED; - ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, - data); - if (ret_val) - return ret_val; - } - } - - return 0; -} - -/** - * e1000_reset_hw_82571 - Reset hardware - * @hw: pointer to the HW structure - * - * This resets the hardware into a known state. - **/ -static s32 e1000_reset_hw_82571(struct e1000_hw *hw) -{ - u32 ctrl, ctrl_ext; - s32 ret_val; - - /* - * Prevent the PCI-E bus from sticking if there is no TLP connection - * on the last TLP read/write transaction when MAC is reset. - */ - ret_val = e1000e_disable_pcie_master(hw); - if (ret_val) - e_dbg("PCI-E Master disable polling has failed.\n"); - - e_dbg("Masking off all interrupts\n"); - ew32(IMC, 0xffffffff); - - ew32(RCTL, 0); - ew32(TCTL, E1000_TCTL_PSP); - e1e_flush(); - - usleep_range(10000, 20000); - - /* - * Must acquire the MDIO ownership before MAC reset. - * Ownership defaults to firmware after a reset. - */ - switch (hw->mac.type) { - case e1000_82573: - ret_val = e1000_get_hw_semaphore_82573(hw); - break; - case e1000_82574: - case e1000_82583: - ret_val = e1000_get_hw_semaphore_82574(hw); - break; - default: - break; - } - if (ret_val) - e_dbg("Cannot acquire MDIO ownership\n"); - - ctrl = er32(CTRL); - - e_dbg("Issuing a global reset to MAC\n"); - ew32(CTRL, ctrl | E1000_CTRL_RST); - - /* Must release MDIO ownership and mutex after MAC reset. */ - switch (hw->mac.type) { - case e1000_82574: - case e1000_82583: - e1000_put_hw_semaphore_82574(hw); - break; - default: - break; - } - - if (hw->nvm.type == e1000_nvm_flash_hw) { - udelay(10); - ctrl_ext = er32(CTRL_EXT); - ctrl_ext |= E1000_CTRL_EXT_EE_RST; - ew32(CTRL_EXT, ctrl_ext); - e1e_flush(); - } - - ret_val = e1000e_get_auto_rd_done(hw); - if (ret_val) - /* We don't want to continue accessing MAC registers. */ - return ret_val; - - /* - * Phy configuration from NVM just starts after EECD_AUTO_RD is set. - * Need to wait for Phy configuration completion before accessing - * NVM and Phy. - */ - - switch (hw->mac.type) { - case e1000_82573: - case e1000_82574: - case e1000_82583: - msleep(25); - break; - default: - break; - } - - /* Clear any pending interrupt events. */ - ew32(IMC, 0xffffffff); - er32(ICR); - - if (hw->mac.type == e1000_82571) { - /* Install any alternate MAC address into RAR0 */ - ret_val = e1000_check_alt_mac_addr_generic(hw); - if (ret_val) - return ret_val; - - e1000e_set_laa_state_82571(hw, true); - } - - /* Reinitialize the 82571 serdes link state machine */ - if (hw->phy.media_type == e1000_media_type_internal_serdes) - hw->mac.serdes_link_state = e1000_serdes_link_down; - - return 0; -} - -/** - * e1000_init_hw_82571 - Initialize hardware - * @hw: pointer to the HW structure - * - * This inits the hardware readying it for operation. - **/ -static s32 e1000_init_hw_82571(struct e1000_hw *hw) -{ - struct e1000_mac_info *mac = &hw->mac; - u32 reg_data; - s32 ret_val; - u16 i, rar_count = mac->rar_entry_count; - - e1000_initialize_hw_bits_82571(hw); - - /* Initialize identification LED */ - ret_val = e1000e_id_led_init(hw); - if (ret_val) - e_dbg("Error initializing identification LED\n"); - /* This is not fatal and we should not stop init due to this */ - - /* Disabling VLAN filtering */ - e_dbg("Initializing the IEEE VLAN\n"); - mac->ops.clear_vfta(hw); - - /* Setup the receive address. */ - /* - * If, however, a locally administered address was assigned to the - * 82571, we must reserve a RAR for it to work around an issue where - * resetting one port will reload the MAC on the other port. - */ - if (e1000e_get_laa_state_82571(hw)) - rar_count--; - e1000e_init_rx_addrs(hw, rar_count); - - /* Zero out the Multicast HASH table */ - e_dbg("Zeroing the MTA\n"); - for (i = 0; i < mac->mta_reg_count; i++) - E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); - - /* Setup link and flow control */ - ret_val = e1000_setup_link_82571(hw); - - /* Set the transmit descriptor write-back policy */ - reg_data = er32(TXDCTL(0)); - reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | - E1000_TXDCTL_FULL_TX_DESC_WB | - E1000_TXDCTL_COUNT_DESC; - ew32(TXDCTL(0), reg_data); - - /* ...for both queues. */ - switch (mac->type) { - case e1000_82573: - e1000e_enable_tx_pkt_filtering(hw); - /* fall through */ - case e1000_82574: - case e1000_82583: - reg_data = er32(GCR); - reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX; - ew32(GCR, reg_data); - break; - default: - reg_data = er32(TXDCTL(1)); - reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | - E1000_TXDCTL_FULL_TX_DESC_WB | - E1000_TXDCTL_COUNT_DESC; - ew32(TXDCTL(1), reg_data); - break; - } - - /* - * Clear all of the statistics registers (clear on read). It is - * important that we do this after we have tried to establish link - * because the symbol error count will increment wildly if there - * is no link. - */ - e1000_clear_hw_cntrs_82571(hw); - - return ret_val; -} - -/** - * e1000_initialize_hw_bits_82571 - Initialize hardware-dependent bits - * @hw: pointer to the HW structure - * - * Initializes required hardware-dependent bits needed for normal operation. - **/ -static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) -{ - u32 reg; - - /* Transmit Descriptor Control 0 */ - reg = er32(TXDCTL(0)); - reg |= (1 << 22); - ew32(TXDCTL(0), reg); - - /* Transmit Descriptor Control 1 */ - reg = er32(TXDCTL(1)); - reg |= (1 << 22); - ew32(TXDCTL(1), reg); - - /* Transmit Arbitration Control 0 */ - reg = er32(TARC(0)); - reg &= ~(0xF << 27); /* 30:27 */ - switch (hw->mac.type) { - case e1000_82571: - case e1000_82572: - reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26); - break; - default: - break; - } - ew32(TARC(0), reg); - - /* Transmit Arbitration Control 1 */ - reg = er32(TARC(1)); - switch (hw->mac.type) { - case e1000_82571: - case e1000_82572: - reg &= ~((1 << 29) | (1 << 30)); - reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26); - if (er32(TCTL) & E1000_TCTL_MULR) - reg &= ~(1 << 28); - else - reg |= (1 << 28); - ew32(TARC(1), reg); - break; - default: - break; - } - - /* Device Control */ - switch (hw->mac.type) { - case e1000_82573: - case e1000_82574: - case e1000_82583: - reg = er32(CTRL); - reg &= ~(1 << 29); - ew32(CTRL, reg); - break; - default: - break; - } - - /* Extended Device Control */ - switch (hw->mac.type) { - case e1000_82573: - case e1000_82574: - case e1000_82583: - reg = er32(CTRL_EXT); - reg &= ~(1 << 23); - reg |= (1 << 22); - ew32(CTRL_EXT, reg); - break; - default: - break; - } - - if (hw->mac.type == e1000_82571) { - reg = er32(PBA_ECC); - reg |= E1000_PBA_ECC_CORR_EN; - ew32(PBA_ECC, reg); - } - /* - * Workaround for hardware errata. - * Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572 - */ - - if ((hw->mac.type == e1000_82571) || - (hw->mac.type == e1000_82572)) { - reg = er32(CTRL_EXT); - reg &= ~E1000_CTRL_EXT_DMA_DYN_CLK_EN; - ew32(CTRL_EXT, reg); - } - - - /* PCI-Ex Control Registers */ - switch (hw->mac.type) { - case e1000_82574: - case e1000_82583: - reg = er32(GCR); - reg |= (1 << 22); - ew32(GCR, reg); - - /* - * Workaround for hardware errata. - * apply workaround for hardware errata documented in errata - * docs Fixes issue where some error prone or unreliable PCIe - * completions are occurring, particularly with ASPM enabled. - * Without fix, issue can cause Tx timeouts. - */ - reg = er32(GCR2); - reg |= 1; - ew32(GCR2, reg); - break; - default: - break; - } -} - -/** - * e1000_clear_vfta_82571 - Clear VLAN filter table - * @hw: pointer to the HW structure - * - * Clears the register array which contains the VLAN filter table by - * setting all the values to 0. - **/ -static void e1000_clear_vfta_82571(struct e1000_hw *hw) -{ - u32 offset; - u32 vfta_value = 0; - u32 vfta_offset = 0; - u32 vfta_bit_in_reg = 0; - - switch (hw->mac.type) { - case e1000_82573: - case e1000_82574: - case e1000_82583: - if (hw->mng_cookie.vlan_id != 0) { - /* - * The VFTA is a 4096b bit-field, each identifying - * a single VLAN ID. The following operations - * determine which 32b entry (i.e. offset) into the - * array we want to set the VLAN ID (i.e. bit) of - * the manageability unit. - */ - vfta_offset = (hw->mng_cookie.vlan_id >> - E1000_VFTA_ENTRY_SHIFT) & - E1000_VFTA_ENTRY_MASK; - vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id & - E1000_VFTA_ENTRY_BIT_SHIFT_MASK); - } - break; - default: - break; - } - for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { - /* - * If the offset we want to clear is the same offset of the - * manageability VLAN ID, then clear all bits except that of - * the manageability unit. - */ - vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; - E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, vfta_value); - e1e_flush(); - } -} - -/** - * e1000_check_mng_mode_82574 - Check manageability is enabled - * @hw: pointer to the HW structure - * - * Reads the NVM Initialization Control Word 2 and returns true - * (>0) if any manageability is enabled, else false (0). - **/ -static bool e1000_check_mng_mode_82574(struct e1000_hw *hw) -{ - u16 data; - - e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data); - return (data & E1000_NVM_INIT_CTRL2_MNGM) != 0; -} - -/** - * e1000_led_on_82574 - Turn LED on - * @hw: pointer to the HW structure - * - * Turn LED on. - **/ -static s32 e1000_led_on_82574(struct e1000_hw *hw) -{ - u32 ctrl; - u32 i; - - ctrl = hw->mac.ledctl_mode2; - if (!(E1000_STATUS_LU & er32(STATUS))) { - /* - * If no link, then turn LED on by setting the invert bit - * for each LED that's "on" (0x0E) in ledctl_mode2. - */ - for (i = 0; i < 4; i++) - if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == - E1000_LEDCTL_MODE_LED_ON) - ctrl |= (E1000_LEDCTL_LED0_IVRT << (i * 8)); - } - ew32(LEDCTL, ctrl); - - return 0; -} - -/** - * e1000_check_phy_82574 - check 82574 phy hung state - * @hw: pointer to the HW structure - * - * Returns whether phy is hung or not - **/ -bool e1000_check_phy_82574(struct e1000_hw *hw) -{ - u16 status_1kbt = 0; - u16 receive_errors = 0; - bool phy_hung = false; - s32 ret_val = 0; - - /* - * Read PHY Receive Error counter first, if its is max - all F's then - * read the Base1000T status register If both are max then PHY is hung. - */ - ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors); - - if (ret_val) - goto out; - if (receive_errors == E1000_RECEIVE_ERROR_MAX) { - ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt); - if (ret_val) - goto out; - if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) == - E1000_IDLE_ERROR_COUNT_MASK) - phy_hung = true; - } -out: - return phy_hung; -} - -/** - * e1000_setup_link_82571 - Setup flow control and link settings - * @hw: pointer to the HW structure - * - * Determines which flow control settings to use, then configures flow - * control. Calls the appropriate media-specific link configuration - * function. Assuming the adapter has a valid link partner, a valid link - * should be established. Assumes the hardware has previously been reset - * and the transmitter and receiver are not enabled. - **/ -static s32 e1000_setup_link_82571(struct e1000_hw *hw) -{ - /* - * 82573 does not have a word in the NVM to determine - * the default flow control setting, so we explicitly - * set it to full. - */ - switch (hw->mac.type) { - case e1000_82573: - case e1000_82574: - case e1000_82583: - if (hw->fc.requested_mode == e1000_fc_default) - hw->fc.requested_mode = e1000_fc_full; - break; - default: - break; - } - - return e1000e_setup_link(hw); -} - -/** - * e1000_setup_copper_link_82571 - Configure copper link settings - * @hw: pointer to the HW structure - * - * Configures the link for auto-neg or forced speed and duplex. Then we check - * for link, once link is established calls to configure collision distance - * and flow control are called. - **/ -static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw) -{ - u32 ctrl; - s32 ret_val; - - ctrl = er32(CTRL); - ctrl |= E1000_CTRL_SLU; - ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); - ew32(CTRL, ctrl); - - switch (hw->phy.type) { - case e1000_phy_m88: - case e1000_phy_bm: - ret_val = e1000e_copper_link_setup_m88(hw); - break; - case e1000_phy_igp_2: - ret_val = e1000e_copper_link_setup_igp(hw); - break; - default: - return -E1000_ERR_PHY; - break; - } - - if (ret_val) - return ret_val; - - ret_val = e1000e_setup_copper_link(hw); - - return ret_val; -} - -/** - * e1000_setup_fiber_serdes_link_82571 - Setup link for fiber/serdes - * @hw: pointer to the HW structure - * - * Configures collision distance and flow control for fiber and serdes links. - * Upon successful setup, poll for link. - **/ -static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw) -{ - switch (hw->mac.type) { - case e1000_82571: - case e1000_82572: - /* - * If SerDes loopback mode is entered, there is no form - * of reset to take the adapter out of that mode. So we - * have to explicitly take the adapter out of loopback - * mode. This prevents drivers from twiddling their thumbs - * if another tool failed to take it out of loopback mode. - */ - ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); - break; - default: - break; - } - - return e1000e_setup_fiber_serdes_link(hw); -} - -/** - * e1000_check_for_serdes_link_82571 - Check for link (Serdes) - * @hw: pointer to the HW structure - * - * Reports the link state as up or down. - * - * If autonegotiation is supported by the link partner, the link state is - * determined by the result of autonegotiation. This is the most likely case. - * If autonegotiation is not supported by the link partner, and the link - * has a valid signal, force the link up. - * - * The link state is represented internally here by 4 states: - * - * 1) down - * 2) autoneg_progress - * 3) autoneg_complete (the link successfully autonegotiated) - * 4) forced_up (the link has been forced up, it did not autonegotiate) - * - **/ -static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) -{ - struct e1000_mac_info *mac = &hw->mac; - u32 rxcw; - u32 ctrl; - u32 status; - u32 txcw; - u32 i; - s32 ret_val = 0; - - ctrl = er32(CTRL); - status = er32(STATUS); - rxcw = er32(RXCW); - - if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) { - - /* Receiver is synchronized with no invalid bits. */ - switch (mac->serdes_link_state) { - case e1000_serdes_link_autoneg_complete: - if (!(status & E1000_STATUS_LU)) { - /* - * We have lost link, retry autoneg before - * reporting link failure - */ - mac->serdes_link_state = - e1000_serdes_link_autoneg_progress; - mac->serdes_has_link = false; - e_dbg("AN_UP -> AN_PROG\n"); - } else { - mac->serdes_has_link = true; - } - break; - - case e1000_serdes_link_forced_up: - /* - * If we are receiving /C/ ordered sets, re-enable - * auto-negotiation in the TXCW register and disable - * forced link in the Device Control register in an - * attempt to auto-negotiate with our link partner. - * If the partner code word is null, stop forcing - * and restart auto negotiation. - */ - if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) { - /* Enable autoneg, and unforce link up */ - ew32(TXCW, mac->txcw); - ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); - mac->serdes_link_state = - e1000_serdes_link_autoneg_progress; - mac->serdes_has_link = false; - e_dbg("FORCED_UP -> AN_PROG\n"); - } else { - mac->serdes_has_link = true; - } - break; - - case e1000_serdes_link_autoneg_progress: - if (rxcw & E1000_RXCW_C) { - /* - * We received /C/ ordered sets, meaning the - * link partner has autonegotiated, and we can - * trust the Link Up (LU) status bit. - */ - if (status & E1000_STATUS_LU) { - mac->serdes_link_state = - e1000_serdes_link_autoneg_complete; - e_dbg("AN_PROG -> AN_UP\n"); - mac->serdes_has_link = true; - } else { - /* Autoneg completed, but failed. */ - mac->serdes_link_state = - e1000_serdes_link_down; - e_dbg("AN_PROG -> DOWN\n"); - } - } else { - /* - * The link partner did not autoneg. - * Force link up and full duplex, and change - * state to forced. - */ - ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); - ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); - ew32(CTRL, ctrl); - - /* Configure Flow Control after link up. */ - ret_val = e1000e_config_fc_after_link_up(hw); - if (ret_val) { - e_dbg("Error config flow control\n"); - break; - } - mac->serdes_link_state = - e1000_serdes_link_forced_up; - mac->serdes_has_link = true; - e_dbg("AN_PROG -> FORCED_UP\n"); - } - break; - - case e1000_serdes_link_down: - default: - /* - * The link was down but the receiver has now gained - * valid sync, so lets see if we can bring the link - * up. - */ - ew32(TXCW, mac->txcw); - ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); - mac->serdes_link_state = - e1000_serdes_link_autoneg_progress; - mac->serdes_has_link = false; - e_dbg("DOWN -> AN_PROG\n"); - break; - } - } else { - if (!(rxcw & E1000_RXCW_SYNCH)) { - mac->serdes_has_link = false; - mac->serdes_link_state = e1000_serdes_link_down; - e_dbg("ANYSTATE -> DOWN\n"); - } else { - /* - * Check several times, if Sync and Config - * both are consistently 1 then simply ignore - * the Invalid bit and restart Autoneg - */ - for (i = 0; i < AN_RETRY_COUNT; i++) { - udelay(10); - rxcw = er32(RXCW); - if ((rxcw & E1000_RXCW_IV) && - !((rxcw & E1000_RXCW_SYNCH) && - (rxcw & E1000_RXCW_C))) { - mac->serdes_has_link = false; - mac->serdes_link_state = - e1000_serdes_link_down; - e_dbg("ANYSTATE -> DOWN\n"); - break; - } - } - - if (i == AN_RETRY_COUNT) { - txcw = er32(TXCW); - txcw |= E1000_TXCW_ANE; - ew32(TXCW, txcw); - mac->serdes_link_state = - e1000_serdes_link_autoneg_progress; - mac->serdes_has_link = false; - e_dbg("ANYSTATE -> AN_PROG\n"); - } - } - } - - return ret_val; -} - -/** - * e1000_valid_led_default_82571 - Verify a valid default LED config - * @hw: pointer to the HW structure - * @data: pointer to the NVM (EEPROM) - * - * Read the EEPROM for the current default LED configuration. If the - * LED configuration is not valid, set to a valid LED configuration. - **/ -static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data) -{ - s32 ret_val; - - ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); - if (ret_val) { - e_dbg("NVM Read Error\n"); - return ret_val; - } - - switch (hw->mac.type) { - case e1000_82573: - case e1000_82574: - case e1000_82583: - if (*data == ID_LED_RESERVED_F746) - *data = ID_LED_DEFAULT_82573; - break; - default: - if (*data == ID_LED_RESERVED_0000 || - *data == ID_LED_RESERVED_FFFF) - *data = ID_LED_DEFAULT; - break; - } - - return 0; -} - -/** - * e1000e_get_laa_state_82571 - Get locally administered address state - * @hw: pointer to the HW structure - * - * Retrieve and return the current locally administered address state. - **/ -bool e1000e_get_laa_state_82571(struct e1000_hw *hw) -{ - if (hw->mac.type != e1000_82571) - return false; - - return hw->dev_spec.e82571.laa_is_present; -} - -/** - * e1000e_set_laa_state_82571 - Set locally administered address state - * @hw: pointer to the HW structure - * @state: enable/disable locally administered address - * - * Enable/Disable the current locally administered address state. - **/ -void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state) -{ - if (hw->mac.type != e1000_82571) - return; - - hw->dev_spec.e82571.laa_is_present = state; - - /* If workaround is activated... */ - if (state) - /* - * Hold a copy of the LAA in RAR[14] This is done so that - * between the time RAR[0] gets clobbered and the time it - * gets fixed, the actual LAA is in one of the RARs and no - * incoming packets directed to this port are dropped. - * Eventually the LAA will be in RAR[0] and RAR[14]. - */ - e1000e_rar_set(hw, hw->mac.addr, hw->mac.rar_entry_count - 1); -} - -/** - * e1000_fix_nvm_checksum_82571 - Fix EEPROM checksum - * @hw: pointer to the HW structure - * - * Verifies that the EEPROM has completed the update. After updating the - * EEPROM, we need to check bit 15 in work 0x23 for the checksum fix. If - * the checksum fix is not implemented, we need to set the bit and update - * the checksum. Otherwise, if bit 15 is set and the checksum is incorrect, - * we need to return bad checksum. - **/ -static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw) -{ - struct e1000_nvm_info *nvm = &hw->nvm; - s32 ret_val; - u16 data; - - if (nvm->type != e1000_nvm_flash_hw) - return 0; - - /* - * Check bit 4 of word 10h. If it is 0, firmware is done updating - * 10h-12h. Checksum may need to be fixed. - */ - ret_val = e1000_read_nvm(hw, 0x10, 1, &data); - if (ret_val) - return ret_val; - - if (!(data & 0x10)) { - /* - * Read 0x23 and check bit 15. This bit is a 1 - * when the checksum has already been fixed. If - * the checksum is still wrong and this bit is a - * 1, we need to return bad checksum. Otherwise, - * we need to set this bit to a 1 and update the - * checksum. - */ - ret_val = e1000_read_nvm(hw, 0x23, 1, &data); - if (ret_val) - return ret_val; - - if (!(data & 0x8000)) { - data |= 0x8000; - ret_val = e1000_write_nvm(hw, 0x23, 1, &data); - if (ret_val) - return ret_val; - ret_val = e1000e_update_nvm_checksum(hw); - } - } - - return 0; -} - -/** - * e1000_read_mac_addr_82571 - Read device MAC address - * @hw: pointer to the HW structure - **/ -static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw) -{ - s32 ret_val = 0; - - if (hw->mac.type == e1000_82571) { - /* - * If there's an alternate MAC address place it in RAR0 - * so that it will override the Si installed default perm - * address. - */ - ret_val = e1000_check_alt_mac_addr_generic(hw); - if (ret_val) - goto out; - } - - ret_val = e1000_read_mac_addr_generic(hw); - -out: - return ret_val; -} - -/** - * e1000_power_down_phy_copper_82571 - Remove link during PHY power down - * @hw: pointer to the HW structure - * - * In the case of a PHY power down to save power, or to turn off link during a - * driver unload, or wake on lan is not enabled, remove the link. - **/ -static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - struct e1000_mac_info *mac = &hw->mac; - - if (!(phy->ops.check_reset_block)) - return; - - /* If the management interface is not enabled, then power down */ - if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw))) - e1000_power_down_phy_copper(hw); -} - -/** - * e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters - * @hw: pointer to the HW structure - * - * Clears the hardware counters by reading the counter registers. - **/ -static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw) -{ - e1000e_clear_hw_cntrs_base(hw); - - er32(PRC64); - er32(PRC127); - er32(PRC255); - er32(PRC511); - er32(PRC1023); - er32(PRC1522); - er32(PTC64); - er32(PTC127); - er32(PTC255); - er32(PTC511); - er32(PTC1023); - er32(PTC1522); - - er32(ALGNERRC); - er32(RXERRC); - er32(TNCRS); - er32(CEXTERR); - er32(TSCTC); - er32(TSCTFC); - - er32(MGTPRC); - er32(MGTPDC); - er32(MGTPTC); - - er32(IAC); - er32(ICRXOC); - - er32(ICRXPTC); - er32(ICRXATC); - er32(ICTXPTC); - er32(ICTXATC); - er32(ICTXQEC); - er32(ICTXQMTC); - er32(ICRXDMTC); -} - -static struct e1000_mac_operations e82571_mac_ops = { - /* .check_mng_mode: mac type dependent */ - /* .check_for_link: media type dependent */ - .id_led_init = e1000e_id_led_init, - .cleanup_led = e1000e_cleanup_led_generic, - .clear_hw_cntrs = e1000_clear_hw_cntrs_82571, - .get_bus_info = e1000e_get_bus_info_pcie, - .set_lan_id = e1000_set_lan_id_multi_port_pcie, - /* .get_link_up_info: media type dependent */ - /* .led_on: mac type dependent */ - .led_off = e1000e_led_off_generic, - .update_mc_addr_list = e1000e_update_mc_addr_list_generic, - .write_vfta = e1000_write_vfta_generic, - .clear_vfta = e1000_clear_vfta_82571, - .reset_hw = e1000_reset_hw_82571, - .init_hw = e1000_init_hw_82571, - .setup_link = e1000_setup_link_82571, - /* .setup_physical_interface: media type dependent */ - .setup_led = e1000e_setup_led_generic, - .read_mac_addr = e1000_read_mac_addr_82571, -}; - -static struct e1000_phy_operations e82_phy_ops_igp = { - .acquire = e1000_get_hw_semaphore_82571, - .check_polarity = e1000_check_polarity_igp, - .check_reset_block = e1000e_check_reset_block_generic, - .commit = NULL, - .force_speed_duplex = e1000e_phy_force_speed_duplex_igp, - .get_cfg_done = e1000_get_cfg_done_82571, - .get_cable_length = e1000e_get_cable_length_igp_2, - .get_info = e1000e_get_phy_info_igp, - .read_reg = e1000e_read_phy_reg_igp, - .release = e1000_put_hw_semaphore_82571, - .reset = e1000e_phy_hw_reset_generic, - .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, - .set_d3_lplu_state = e1000e_set_d3_lplu_state, - .write_reg = e1000e_write_phy_reg_igp, - .cfg_on_link_up = NULL, -}; - -static struct e1000_phy_operations e82_phy_ops_m88 = { - .acquire = e1000_get_hw_semaphore_82571, - .check_polarity = e1000_check_polarity_m88, - .check_reset_block = e1000e_check_reset_block_generic, - .commit = e1000e_phy_sw_reset, - .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, - .get_cfg_done = e1000e_get_cfg_done, - .get_cable_length = e1000e_get_cable_length_m88, - .get_info = e1000e_get_phy_info_m88, - .read_reg = e1000e_read_phy_reg_m88, - .release = e1000_put_hw_semaphore_82571, - .reset = e1000e_phy_hw_reset_generic, - .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, - .set_d3_lplu_state = e1000e_set_d3_lplu_state, - .write_reg = e1000e_write_phy_reg_m88, - .cfg_on_link_up = NULL, -}; - -static struct e1000_phy_operations e82_phy_ops_bm = { - .acquire = e1000_get_hw_semaphore_82571, - .check_polarity = e1000_check_polarity_m88, - .check_reset_block = e1000e_check_reset_block_generic, - .commit = e1000e_phy_sw_reset, - .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, - .get_cfg_done = e1000e_get_cfg_done, - .get_cable_length = e1000e_get_cable_length_m88, - .get_info = e1000e_get_phy_info_m88, - .read_reg = e1000e_read_phy_reg_bm2, - .release = e1000_put_hw_semaphore_82571, - .reset = e1000e_phy_hw_reset_generic, - .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, - .set_d3_lplu_state = e1000e_set_d3_lplu_state, - .write_reg = e1000e_write_phy_reg_bm2, - .cfg_on_link_up = NULL, -}; - -static struct e1000_nvm_operations e82571_nvm_ops = { - .acquire = e1000_acquire_nvm_82571, - .read = e1000e_read_nvm_eerd, - .release = e1000_release_nvm_82571, - .update = e1000_update_nvm_checksum_82571, - .valid_led_default = e1000_valid_led_default_82571, - .validate = e1000_validate_nvm_checksum_82571, - .write = e1000_write_nvm_82571, -}; - -struct e1000_info e1000_82571_info = { - .mac = e1000_82571, - .flags = FLAG_HAS_HW_VLAN_FILTER - | FLAG_HAS_JUMBO_FRAMES - | FLAG_HAS_WOL - | FLAG_APME_IN_CTRL3 - | FLAG_RX_CSUM_ENABLED - | FLAG_HAS_CTRLEXT_ON_LOAD - | FLAG_HAS_SMART_POWER_DOWN - | FLAG_RESET_OVERWRITES_LAA /* errata */ - | FLAG_TARC_SPEED_MODE_BIT /* errata */ - | FLAG_APME_CHECK_PORT_B, - .flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */ - | FLAG2_DMA_BURST, - .pba = 38, - .max_hw_frame_size = DEFAULT_JUMBO, - .get_variants = e1000_get_variants_82571, - .mac_ops = &e82571_mac_ops, - .phy_ops = &e82_phy_ops_igp, - .nvm_ops = &e82571_nvm_ops, -}; - -struct e1000_info e1000_82572_info = { - .mac = e1000_82572, - .flags = FLAG_HAS_HW_VLAN_FILTER - | FLAG_HAS_JUMBO_FRAMES - | FLAG_HAS_WOL - | FLAG_APME_IN_CTRL3 - | FLAG_RX_CSUM_ENABLED - | FLAG_HAS_CTRLEXT_ON_LOAD - | FLAG_TARC_SPEED_MODE_BIT, /* errata */ - .flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */ - | FLAG2_DMA_BURST, - .pba = 38, - .max_hw_frame_size = DEFAULT_JUMBO, - .get_variants = e1000_get_variants_82571, - .mac_ops = &e82571_mac_ops, - .phy_ops = &e82_phy_ops_igp, - .nvm_ops = &e82571_nvm_ops, -}; - -struct e1000_info e1000_82573_info = { - .mac = e1000_82573, - .flags = FLAG_HAS_HW_VLAN_FILTER - | FLAG_HAS_WOL - | FLAG_APME_IN_CTRL3 - | FLAG_RX_CSUM_ENABLED - | FLAG_HAS_SMART_POWER_DOWN - | FLAG_HAS_AMT - | FLAG_HAS_SWSM_ON_LOAD, - .flags2 = FLAG2_DISABLE_ASPM_L1 - | FLAG2_DISABLE_ASPM_L0S, - .pba = 20, - .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, - .get_variants = e1000_get_variants_82571, - .mac_ops = &e82571_mac_ops, - .phy_ops = &e82_phy_ops_m88, - .nvm_ops = &e82571_nvm_ops, -}; - -struct e1000_info e1000_82574_info = { - .mac = e1000_82574, - .flags = FLAG_HAS_HW_VLAN_FILTER - | FLAG_HAS_MSIX - | FLAG_HAS_JUMBO_FRAMES - | FLAG_HAS_WOL - | FLAG_APME_IN_CTRL3 - | FLAG_RX_CSUM_ENABLED - | FLAG_HAS_SMART_POWER_DOWN - | FLAG_HAS_AMT - | FLAG_HAS_CTRLEXT_ON_LOAD, - .flags2 = FLAG2_CHECK_PHY_HANG - | FLAG2_DISABLE_ASPM_L0S, - .pba = 32, - .max_hw_frame_size = DEFAULT_JUMBO, - .get_variants = e1000_get_variants_82571, - .mac_ops = &e82571_mac_ops, - .phy_ops = &e82_phy_ops_bm, - .nvm_ops = &e82571_nvm_ops, -}; - -struct e1000_info e1000_82583_info = { - .mac = e1000_82583, - .flags = FLAG_HAS_HW_VLAN_FILTER - | FLAG_HAS_WOL - | FLAG_APME_IN_CTRL3 - | FLAG_RX_CSUM_ENABLED - | FLAG_HAS_SMART_POWER_DOWN - | FLAG_HAS_AMT - | FLAG_HAS_JUMBO_FRAMES - | FLAG_HAS_CTRLEXT_ON_LOAD, - .flags2 = FLAG2_DISABLE_ASPM_L0S, - .pba = 32, - .max_hw_frame_size = DEFAULT_JUMBO, - .get_variants = e1000_get_variants_82571, - .mac_ops = &e82571_mac_ops, - .phy_ops = &e82_phy_ops_bm, - .nvm_ops = &e82571_nvm_ops, -}; - diff --git a/drivers/net/e1000e/Makefile b/drivers/net/e1000e/Makefile deleted file mode 100644 index 28519ac..0000000 --- a/drivers/net/e1000e/Makefile +++ /dev/null @@ -1,37 +0,0 @@ -################################################################################ -# -# Intel PRO/1000 Linux driver -# Copyright(c) 1999 - 2011 Intel Corporation. -# -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -# -# The full GNU General Public License is included in this distribution in -# the file called "COPYING". -# -# Contact Information: -# Linux NICS -# e1000-devel Mailing List -# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -# -################################################################################ - -# -# Makefile for the Intel(R) PRO/1000 ethernet driver -# - -obj-$(CONFIG_E1000E) += e1000e.o - -e1000e-objs := 82571.o ich8lan.o es2lan.o \ - lib.o phy.o param.o ethtool.o netdev.o - diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h deleted file mode 100644 index c516a74..0000000 --- a/drivers/net/e1000e/defines.h +++ /dev/null @@ -1,844 +0,0 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _E1000_DEFINES_H_ -#define _E1000_DEFINES_H_ - -#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ -#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ -#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ -#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ -#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ -#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ -#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ -#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ -#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ -#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ -#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ -#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ -#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ -#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ -#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ -#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ -#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ -#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ - -/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ -#define REQ_TX_DESCRIPTOR_MULTIPLE 8 -#define REQ_RX_DESCRIPTOR_MULTIPLE 8 - -/* Definitions for power management and wakeup registers */ -/* Wake Up Control */ -#define E1000_WUC_APME 0x00000001 /* APM Enable */ -#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ -#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */ - -/* Wake Up Filter Control */ -#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ -#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ -#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ -#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ -#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ -#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ - -/* Wake Up Status */ -#define E1000_WUS_LNKC E1000_WUFC_LNKC -#define E1000_WUS_MAG E1000_WUFC_MAG -#define E1000_WUS_EX E1000_WUFC_EX -#define E1000_WUS_MC E1000_WUFC_MC -#define E1000_WUS_BC E1000_WUFC_BC - -/* Extended Device Control */ -#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */ -#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ -#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ -#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ -#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */ -#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 -#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 -#define E1000_CTRL_EXT_EIAME 0x01000000 -#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ -#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ -#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ -#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ -#define E1000_CTRL_EXT_LSECCK 0x00001000 -#define E1000_CTRL_EXT_PHYPDEN 0x00100000 - -/* Receive Descriptor bit definitions */ -#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ -#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ -#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ -#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ -#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ -#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ -#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ -#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ -#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ -#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ -#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ -#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ -#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ - -#define E1000_RXDEXT_STATERR_CE 0x01000000 -#define E1000_RXDEXT_STATERR_SE 0x02000000 -#define E1000_RXDEXT_STATERR_SEQ 0x04000000 -#define E1000_RXDEXT_STATERR_CXE 0x10000000 -#define E1000_RXDEXT_STATERR_RXE 0x80000000 - -/* mask to determine if packets should be dropped due to frame errors */ -#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ - E1000_RXD_ERR_CE | \ - E1000_RXD_ERR_SE | \ - E1000_RXD_ERR_SEQ | \ - E1000_RXD_ERR_CXE | \ - E1000_RXD_ERR_RXE) - -/* Same mask, but for extended and packet split descriptors */ -#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ - E1000_RXDEXT_STATERR_CE | \ - E1000_RXDEXT_STATERR_SE | \ - E1000_RXDEXT_STATERR_SEQ | \ - E1000_RXDEXT_STATERR_CXE | \ - E1000_RXDEXT_STATERR_RXE) - -#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 - -/* Management Control */ -#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ -#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ -#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ -#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ -#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ -/* Enable MAC address filtering */ -#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 -/* Enable MNG packets to host memory */ -#define E1000_MANC_EN_MNG2HOST 0x00200000 - -#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */ -#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */ -#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */ -#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */ - -/* Receive Control */ -#define E1000_RCTL_EN 0x00000002 /* enable */ -#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ -#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ -#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ -#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ -#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ -#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ -#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ -#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ -#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min threshold size */ -#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ -#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ -#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ -/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ -#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */ -#define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */ -#define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */ -#define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ -/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ -#define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */ -#define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */ -#define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */ -#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ -#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ -#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ -#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ -#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ -#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ - -/* - * Use byte values for the following shift parameters - * Usage: - * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & - * E1000_PSRCTL_BSIZE0_MASK) | - * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & - * E1000_PSRCTL_BSIZE1_MASK) | - * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & - * E1000_PSRCTL_BSIZE2_MASK) | - * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; - * E1000_PSRCTL_BSIZE3_MASK)) - * where value0 = [128..16256], default=256 - * value1 = [1024..64512], default=4096 - * value2 = [0..64512], default=4096 - * value3 = [0..64512], default=0 - */ - -#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F -#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 -#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 -#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 - -#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ -#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ -#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ -#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ - -/* SWFW_SYNC Definitions */ -#define E1000_SWFW_EEP_SM 0x1 -#define E1000_SWFW_PHY0_SM 0x2 -#define E1000_SWFW_PHY1_SM 0x4 -#define E1000_SWFW_CSR_SM 0x8 - -/* Device Control */ -#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ -#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ -#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ -#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ -#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ -#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ -#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ -#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ -#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ -#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ -#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ -#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ -#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */ -#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */ -#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ -#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ -#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ -#define E1000_CTRL_RST 0x04000000 /* Global reset */ -#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ -#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ -#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ -#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ - -/* - * Bit definitions for the Management Data IO (MDIO) and Management Data - * Clock (MDC) pins in the Device Control Register. - */ - -/* Device Status */ -#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ -#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ -#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ -#define E1000_STATUS_FUNC_SHIFT 2 -#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ -#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ -#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ -#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ -#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ -#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */ -#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ -#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ - -/* Constants used to interpret the masked PCI-X bus speed. */ - -#define HALF_DUPLEX 1 -#define FULL_DUPLEX 2 - - -#define ADVERTISE_10_HALF 0x0001 -#define ADVERTISE_10_FULL 0x0002 -#define ADVERTISE_100_HALF 0x0004 -#define ADVERTISE_100_FULL 0x0008 -#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ -#define ADVERTISE_1000_FULL 0x0020 - -/* 1000/H is not supported, nor spec-compliant. */ -#define E1000_ALL_SPEED_DUPLEX ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ - ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ - ADVERTISE_1000_FULL) -#define E1000_ALL_NOT_GIG ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ - ADVERTISE_100_HALF | ADVERTISE_100_FULL) -#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) -#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) -#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) - -#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX - -/* LED Control */ -#define E1000_PHY_LED0_MODE_MASK 0x00000007 -#define E1000_PHY_LED0_IVRT 0x00000008 -#define E1000_PHY_LED0_MASK 0x0000001F - -#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F -#define E1000_LEDCTL_LED0_MODE_SHIFT 0 -#define E1000_LEDCTL_LED0_IVRT 0x00000040 -#define E1000_LEDCTL_LED0_BLINK 0x00000080 - -#define E1000_LEDCTL_MODE_LINK_UP 0x2 -#define E1000_LEDCTL_MODE_LED_ON 0xE -#define E1000_LEDCTL_MODE_LED_OFF 0xF - -/* Transmit Descriptor bit definitions */ -#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ -#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ -#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ -#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ -#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ -#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ -#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ -#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ -#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ -#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ -#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ -#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ -#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ -#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ -#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ -#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ -#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ -#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ -#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ - -/* Transmit Control */ -#define E1000_TCTL_EN 0x00000002 /* enable Tx */ -#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ -#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ -#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ -#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ -#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ - -/* Transmit Arbitration Count */ - -/* SerDes Control */ -#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 - -/* Receive Checksum Control */ -#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ -#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ - -/* Header split receive */ -#define E1000_RFCTL_NFSW_DIS 0x00000040 -#define E1000_RFCTL_NFSR_DIS 0x00000080 -#define E1000_RFCTL_ACK_DIS 0x00001000 -#define E1000_RFCTL_EXTEN 0x00008000 -#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 -#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 - -/* Collision related configuration parameters */ -#define E1000_COLLISION_THRESHOLD 15 -#define E1000_CT_SHIFT 4 -#define E1000_COLLISION_DISTANCE 63 -#define E1000_COLD_SHIFT 12 - -/* Default values for the transmit IPG register */ -#define DEFAULT_82543_TIPG_IPGT_COPPER 8 - -#define E1000_TIPG_IPGT_MASK 0x000003FF - -#define DEFAULT_82543_TIPG_IPGR1 8 -#define E1000_TIPG_IPGR1_SHIFT 10 - -#define DEFAULT_82543_TIPG_IPGR2 6 -#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7 -#define E1000_TIPG_IPGR2_SHIFT 20 - -#define MAX_JUMBO_FRAME_SIZE 0x3F00 - -/* Extended Configuration Control and Size */ -#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 -#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 -#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008 -#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 -#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080 -#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 -#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 -#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000 -#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16 - -#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 -#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 -#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 -#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 - -#define E1000_KABGTXD_BGSQLBIAS 0x00050000 - -/* PBA constants */ -#define E1000_PBA_8K 0x0008 /* 8KB */ -#define E1000_PBA_16K 0x0010 /* 16KB */ - -#define E1000_PBS_16K E1000_PBA_16K - -#define IFS_MAX 80 -#define IFS_MIN 40 -#define IFS_RATIO 4 -#define IFS_STEP 10 -#define MIN_NUM_XMITS 1000 - -/* SW Semaphore Register */ -#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ -#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ -#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ - -#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */ - -/* Interrupt Cause Read */ -#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ -#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ -#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ -#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ -#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ -#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ -#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */ -#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ -#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */ -#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */ -#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */ - -/* PBA ECC Register */ -#define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */ -#define E1000_PBA_ECC_COUNTER_SHIFT 20 /* ECC counter shift value */ -#define E1000_PBA_ECC_CORR_EN 0x00000001 /* ECC correction enable */ -#define E1000_PBA_ECC_STAT_CLR 0x00000002 /* Clear ECC error counter */ -#define E1000_PBA_ECC_INT_EN 0x00000004 /* Enable ICR bit 5 for ECC */ - -/* - * This defines the bits that are set in the Interrupt Mask - * Set/Read Register. Each bit is documented below: - * o RXT0 = Receiver Timer Interrupt (ring 0) - * o TXDW = Transmit Descriptor Written Back - * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) - * o RXSEQ = Receive Sequence Error - * o LSC = Link Status Change - */ -#define IMS_ENABLE_MASK ( \ - E1000_IMS_RXT0 | \ - E1000_IMS_TXDW | \ - E1000_IMS_RXDMT0 | \ - E1000_IMS_RXSEQ | \ - E1000_IMS_LSC) - -/* Interrupt Mask Set */ -#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ -#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ -#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ -#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ -#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ -#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */ -#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */ -#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */ -#define E1000_IMS_TXQ1 E1000_ICR_TXQ1 /* Tx Queue 1 Interrupt */ -#define E1000_IMS_OTHER E1000_ICR_OTHER /* Other Interrupts */ - -/* Interrupt Cause Set */ -#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ -#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ -#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ - -/* Transmit Descriptor Control */ -#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ -#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ -#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ -#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ -#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ -#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */ -/* Enable the counting of desc. still to be processed. */ -#define E1000_TXDCTL_COUNT_DESC 0x00400000 - -/* Flow Control Constants */ -#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 -#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 -#define FLOW_CONTROL_TYPE 0x8808 - -/* 802.1q VLAN Packet Size */ -#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ - -/* Receive Address */ -/* - * Number of high/low register pairs in the RAR. The RAR (Receive Address - * Registers) holds the directed and multicast addresses that we monitor. - * Technically, we have 16 spots. However, we reserve one of these spots - * (RAR[15]) for our directed address used by controllers with - * manageability enabled, allowing us room for 15 multicast addresses. - */ -#define E1000_RAR_ENTRIES 15 -#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ -#define E1000_RAL_MAC_ADDR_LEN 4 -#define E1000_RAH_MAC_ADDR_LEN 2 - -/* Error Codes */ -#define E1000_ERR_NVM 1 -#define E1000_ERR_PHY 2 -#define E1000_ERR_CONFIG 3 -#define E1000_ERR_PARAM 4 -#define E1000_ERR_MAC_INIT 5 -#define E1000_ERR_PHY_TYPE 6 -#define E1000_ERR_RESET 9 -#define E1000_ERR_MASTER_REQUESTS_PENDING 10 -#define E1000_ERR_HOST_INTERFACE_COMMAND 11 -#define E1000_BLK_PHY_RESET 12 -#define E1000_ERR_SWFW_SYNC 13 -#define E1000_NOT_IMPLEMENTED 14 -#define E1000_ERR_INVALID_ARGUMENT 16 -#define E1000_ERR_NO_SPACE 17 -#define E1000_ERR_NVM_PBA_SECTION 18 - -/* Loop limit on how long we wait for auto-negotiation to complete */ -#define FIBER_LINK_UP_LIMIT 50 -#define COPPER_LINK_UP_LIMIT 10 -#define PHY_AUTO_NEG_LIMIT 45 -#define PHY_FORCE_LIMIT 20 -/* Number of 100 microseconds we wait for PCI Express master disable */ -#define MASTER_DISABLE_TIMEOUT 800 -/* Number of milliseconds we wait for PHY configuration done after MAC reset */ -#define PHY_CFG_TIMEOUT 100 -/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ -#define MDIO_OWNERSHIP_TIMEOUT 10 -/* Number of milliseconds for NVM auto read done after MAC reset. */ -#define AUTO_READ_DONE_TIMEOUT 10 - -/* Flow Control */ -#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ -#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ -#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ - -/* Transmit Configuration Word */ -#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ -#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ -#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ -#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ -#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ - -/* Receive Configuration Word */ -#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ -#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ -#define E1000_RXCW_C 0x20000000 /* Receive config */ -#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ - -/* PCI Express Control */ -#define E1000_GCR_RXD_NO_SNOOP 0x00000001 -#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 -#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 -#define E1000_GCR_TXD_NO_SNOOP 0x00000008 -#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 -#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 - -#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ - E1000_GCR_RXDSCW_NO_SNOOP | \ - E1000_GCR_RXDSCR_NO_SNOOP | \ - E1000_GCR_TXD_NO_SNOOP | \ - E1000_GCR_TXDSCW_NO_SNOOP | \ - E1000_GCR_TXDSCR_NO_SNOOP) - -/* PHY Control Register */ -#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ -#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ -#define MII_CR_POWER_DOWN 0x0800 /* Power down */ -#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ -#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ -#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ -#define MII_CR_SPEED_1000 0x0040 -#define MII_CR_SPEED_100 0x2000 -#define MII_CR_SPEED_10 0x0000 - -/* PHY Status Register */ -#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ -#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ - -/* Autoneg Advertisement Register */ -#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ -#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ -#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ -#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ -#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ -#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ - -/* Link Partner Ability Register (Base Page) */ -#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ -#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ - -/* Autoneg Expansion Register */ -#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ - -/* 1000BASE-T Control Register */ -#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ -#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ - /* 0=DTE device */ -#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ - /* 0=Configure PHY as Slave */ -#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ - /* 0=Automatic Master/Slave config */ - -/* 1000BASE-T Status Register */ -#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ -#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ - - -/* PHY 1000 MII Register/Bit Definitions */ -/* PHY Registers defined by IEEE */ -#define PHY_CONTROL 0x00 /* Control Register */ -#define PHY_STATUS 0x01 /* Status Register */ -#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ -#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ -#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ -#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ -#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ -#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ -#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ -#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ - -#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */ - -/* NVM Control */ -#define E1000_EECD_SK 0x00000001 /* NVM Clock */ -#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ -#define E1000_EECD_DI 0x00000004 /* NVM Data In */ -#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ -#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ -#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ -#define E1000_EECD_PRES 0x00000100 /* NVM Present */ -#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ -/* NVM Addressing bits based on type (0-small, 1-large) */ -#define E1000_EECD_ADDR_BITS 0x00000400 -#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ -#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ -#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ -#define E1000_EECD_SIZE_EX_SHIFT 11 -#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ -#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ -#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ -#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES) - -#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write registers */ -#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ -#define E1000_NVM_RW_REG_START 1 /* Start operation */ -#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ -#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ -#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ -#define E1000_FLASH_UPDATES 2000 - -/* NVM Word Offsets */ -#define NVM_COMPAT 0x0003 -#define NVM_ID_LED_SETTINGS 0x0004 -#define NVM_INIT_CONTROL2_REG 0x000F -#define NVM_INIT_CONTROL3_PORT_B 0x0014 -#define NVM_INIT_3GIO_3 0x001A -#define NVM_INIT_CONTROL3_PORT_A 0x0024 -#define NVM_CFG 0x0012 -#define NVM_ALT_MAC_ADDR_PTR 0x0037 -#define NVM_CHECKSUM_REG 0x003F - -#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */ - -#define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */ -#define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */ - -/* Mask bits for fields in Word 0x0f of the NVM */ -#define NVM_WORD0F_PAUSE_MASK 0x3000 -#define NVM_WORD0F_PAUSE 0x1000 -#define NVM_WORD0F_ASM_DIR 0x2000 - -/* Mask bits for fields in Word 0x1a of the NVM */ -#define NVM_WORD1A_ASPM_MASK 0x000C - -/* Mask bits for fields in Word 0x03 of the EEPROM */ -#define NVM_COMPAT_LOM 0x0800 - -/* length of string needed to store PBA number */ -#define E1000_PBANUM_LENGTH 11 - -/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ -#define NVM_SUM 0xBABA - -/* PBA (printed board assembly) number words */ -#define NVM_PBA_OFFSET_0 8 -#define NVM_PBA_OFFSET_1 9 -#define NVM_PBA_PTR_GUARD 0xFAFA -#define NVM_WORD_SIZE_BASE_SHIFT 6 - -/* NVM Commands - SPI */ -#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ -#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ -#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ -#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ -#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ -#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ - -/* SPI NVM Status Register */ -#define NVM_STATUS_RDY_SPI 0x01 - -/* Word definitions for ID LED Settings */ -#define ID_LED_RESERVED_0000 0x0000 -#define ID_LED_RESERVED_FFFF 0xFFFF -#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ - (ID_LED_OFF1_OFF2 << 8) | \ - (ID_LED_DEF1_DEF2 << 4) | \ - (ID_LED_DEF1_DEF2)) -#define ID_LED_DEF1_DEF2 0x1 -#define ID_LED_DEF1_ON2 0x2 -#define ID_LED_DEF1_OFF2 0x3 -#define ID_LED_ON1_DEF2 0x4 -#define ID_LED_ON1_ON2 0x5 -#define ID_LED_ON1_OFF2 0x6 -#define ID_LED_OFF1_DEF2 0x7 -#define ID_LED_OFF1_ON2 0x8 -#define ID_LED_OFF1_OFF2 0x9 - -#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF -#define IGP_ACTIVITY_LED_ENABLE 0x0300 -#define IGP_LED3_MODE 0x07000000 - -/* PCI/PCI-X/PCI-EX Config space */ -#define PCI_HEADER_TYPE_REGISTER 0x0E -#define PCIE_LINK_STATUS 0x12 - -#define PCI_HEADER_TYPE_MULTIFUNC 0x80 -#define PCIE_LINK_WIDTH_MASK 0x3F0 -#define PCIE_LINK_WIDTH_SHIFT 4 - -#define PHY_REVISION_MASK 0xFFFFFFF0 -#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ -#define MAX_PHY_MULTI_PAGE_REG 0xF - -/* Bit definitions for valid PHY IDs. */ -/* - * I = Integrated - * E = External - */ -#define M88E1000_E_PHY_ID 0x01410C50 -#define M88E1000_I_PHY_ID 0x01410C30 -#define M88E1011_I_PHY_ID 0x01410C20 -#define IGP01E1000_I_PHY_ID 0x02A80380 -#define M88E1111_I_PHY_ID 0x01410CC0 -#define GG82563_E_PHY_ID 0x01410CA0 -#define IGP03E1000_E_PHY_ID 0x02A80390 -#define IFE_E_PHY_ID 0x02A80330 -#define IFE_PLUS_E_PHY_ID 0x02A80320 -#define IFE_C_E_PHY_ID 0x02A80310 -#define BME1000_E_PHY_ID 0x01410CB0 -#define BME1000_E_PHY_ID_R2 0x01410CB1 -#define I82577_E_PHY_ID 0x01540050 -#define I82578_E_PHY_ID 0x004DD040 -#define I82579_E_PHY_ID 0x01540090 - -/* M88E1000 Specific Registers */ -#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ -#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ -#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ - -#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ -#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ - -/* M88E1000 PHY Specific Control Register */ -#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ -#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ - /* Manual MDI configuration */ -#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ -/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ -#define M88E1000_PSCR_AUTO_X_1000T 0x0040 -/* Auto crossover enabled all speeds */ -#define M88E1000_PSCR_AUTO_X_MODE 0x0060 -/* - * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold) - * 0=Normal 10BASE-T Rx Threshold - */ -#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ - -/* M88E1000 PHY Specific Status Register */ -#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ -#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ -#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ -/* 0=<50M; 1=50-80M; 2=80-110M; 3=110-140M; 4=>140M */ -#define M88E1000_PSSR_CABLE_LENGTH 0x0380 -#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ -#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ - -#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 - -/* - * Number of times we will attempt to autonegotiate before downshifting if we - * are the master - */ -#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 -#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 -/* - * Number of times we will attempt to autonegotiate before downshifting if we - * are the slave - */ -#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 -#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 -#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ - -/* M88EC018 Rev 2 specific DownShift settings */ -#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 -#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 - -#define I82578_EPSCR_DOWNSHIFT_ENABLE 0x0020 -#define I82578_EPSCR_DOWNSHIFT_COUNTER_MASK 0x001C - -/* BME1000 PHY Specific Control Register */ -#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */ - - -#define PHY_PAGE_SHIFT 5 -#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ - ((reg) & MAX_PHY_REG_ADDRESS)) - -/* - * Bits... - * 15-5: page - * 4-0: register offset - */ -#define GG82563_PAGE_SHIFT 5 -#define GG82563_REG(page, reg) \ - (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) -#define GG82563_MIN_ALT_REG 30 - -/* GG82563 Specific Registers */ -#define GG82563_PHY_SPEC_CTRL \ - GG82563_REG(0, 16) /* PHY Specific Control */ -#define GG82563_PHY_PAGE_SELECT \ - GG82563_REG(0, 22) /* Page Select */ -#define GG82563_PHY_SPEC_CTRL_2 \ - GG82563_REG(0, 26) /* PHY Specific Control 2 */ -#define GG82563_PHY_PAGE_SELECT_ALT \ - GG82563_REG(0, 29) /* Alternate Page Select */ - -#define GG82563_PHY_MAC_SPEC_CTRL \ - GG82563_REG(2, 21) /* MAC Specific Control Register */ - -#define GG82563_PHY_DSP_DISTANCE \ - GG82563_REG(5, 26) /* DSP Distance */ - -/* Page 193 - Port Control Registers */ -#define GG82563_PHY_KMRN_MODE_CTRL \ - GG82563_REG(193, 16) /* Kumeran Mode Control */ -#define GG82563_PHY_PWR_MGMT_CTRL \ - GG82563_REG(193, 20) /* Power Management Control */ - -/* Page 194 - KMRN Registers */ -#define GG82563_PHY_INBAND_CTRL \ - GG82563_REG(194, 18) /* Inband Control */ - -/* MDI Control */ -#define E1000_MDIC_REG_SHIFT 16 -#define E1000_MDIC_PHY_SHIFT 21 -#define E1000_MDIC_OP_WRITE 0x04000000 -#define E1000_MDIC_OP_READ 0x08000000 -#define E1000_MDIC_READY 0x10000000 -#define E1000_MDIC_ERROR 0x40000000 - -/* SerDes Control */ -#define E1000_GEN_POLL_TIMEOUT 640 - -#endif /* _E1000_DEFINES_H_ */ diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h deleted file mode 100644 index 638d175..0000000 --- a/drivers/net/e1000e/e1000.h +++ /dev/null @@ -1,736 +0,0 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -/* Linux PRO/1000 Ethernet Driver main header file */ - -#ifndef _E1000_H_ -#define _E1000_H_ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "hw.h" - -struct e1000_info; - -#define e_dbg(format, arg...) \ - netdev_dbg(hw->adapter->netdev, format, ## arg) -#define e_err(format, arg...) \ - netdev_err(adapter->netdev, format, ## arg) -#define e_info(format, arg...) \ - netdev_info(adapter->netdev, format, ## arg) -#define e_warn(format, arg...) \ - netdev_warn(adapter->netdev, format, ## arg) -#define e_notice(format, arg...) \ - netdev_notice(adapter->netdev, format, ## arg) - - -/* Interrupt modes, as used by the IntMode parameter */ -#define E1000E_INT_MODE_LEGACY 0 -#define E1000E_INT_MODE_MSI 1 -#define E1000E_INT_MODE_MSIX 2 - -/* Tx/Rx descriptor defines */ -#define E1000_DEFAULT_TXD 256 -#define E1000_MAX_TXD 4096 -#define E1000_MIN_TXD 64 - -#define E1000_DEFAULT_RXD 256 -#define E1000_MAX_RXD 4096 -#define E1000_MIN_RXD 64 - -#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ -#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */ - -/* Early Receive defines */ -#define E1000_ERT_2048 0x100 - -#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */ - -/* How many Tx Descriptors do we need to call netif_wake_queue ? */ -/* How many Rx Buffers do we bundle into one write to the hardware ? */ -#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ - -#define AUTO_ALL_MODES 0 -#define E1000_EEPROM_APME 0x0400 - -#define E1000_MNG_VLAN_NONE (-1) - -/* Number of packet split data buffers (not including the header buffer) */ -#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) - -#define DEFAULT_JUMBO 9234 - -/* BM/HV Specific Registers */ -#define BM_PORT_CTRL_PAGE 769 - -#define PHY_UPPER_SHIFT 21 -#define BM_PHY_REG(page, reg) \ - (((reg) & MAX_PHY_REG_ADDRESS) |\ - (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\ - (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT))) - -/* PHY Wakeup Registers and defines */ -#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17) -#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0) -#define BM_WUC PHY_REG(BM_WUC_PAGE, 1) -#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2) -#define BM_WUS PHY_REG(BM_WUC_PAGE, 3) -#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2))) -#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2))) -#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2))) -#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2))) -#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1))) - -#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */ -#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */ -#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */ -#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */ -#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */ -#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */ -#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */ - -#define HV_STATS_PAGE 778 -#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision Count */ -#define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17) -#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. Count */ -#define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19) -#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Coll. Count */ -#define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21) -#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision Count */ -#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24) -#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision Count */ -#define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26) -#define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */ -#define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28) -#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Transmit with no CRS */ -#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30) - -#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */ - -/* BM PHY Copper Specific Status */ -#define BM_CS_STATUS 17 -#define BM_CS_STATUS_LINK_UP 0x0400 -#define BM_CS_STATUS_RESOLVED 0x0800 -#define BM_CS_STATUS_SPEED_MASK 0xC000 -#define BM_CS_STATUS_SPEED_1000 0x8000 - -/* 82577 Mobile Phy Status Register */ -#define HV_M_STATUS 26 -#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000 -#define HV_M_STATUS_SPEED_MASK 0x0300 -#define HV_M_STATUS_SPEED_1000 0x0200 -#define HV_M_STATUS_LINK_UP 0x0040 - -/* Time to wait before putting the device into D3 if there's no link (in ms). */ -#define LINK_TIMEOUT 100 - -#define DEFAULT_RDTR 0 -#define DEFAULT_RADV 8 -#define BURST_RDTR 0x20 -#define BURST_RADV 0x20 - -/* - * in the case of WTHRESH, it appears at least the 82571/2 hardware - * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when - * WTHRESH=4, and since we want 64 bytes at a time written back, set - * it to 5 - */ -#define E1000_TXDCTL_DMA_BURST_ENABLE \ - (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \ - E1000_TXDCTL_COUNT_DESC | \ - (5 << 16) | /* wthresh must be +1 more than desired */\ - (1 << 8) | /* hthresh */ \ - 0x1f) /* pthresh */ - -#define E1000_RXDCTL_DMA_BURST_ENABLE \ - (0x01000000 | /* set descriptor granularity */ \ - (4 << 16) | /* set writeback threshold */ \ - (4 << 8) | /* set prefetch threshold */ \ - 0x20) /* set hthresh */ - -#define E1000_TIDV_FPD (1 << 31) -#define E1000_RDTR_FPD (1 << 31) - -enum e1000_boards { - board_82571, - board_82572, - board_82573, - board_82574, - board_82583, - board_80003es2lan, - board_ich8lan, - board_ich9lan, - board_ich10lan, - board_pchlan, - board_pch2lan, -}; - -struct e1000_ps_page { - struct page *page; - u64 dma; /* must be u64 - written to hw */ -}; - -/* - * wrappers around a pointer to a socket buffer, - * so a DMA handle can be stored along with the buffer - */ -struct e1000_buffer { - dma_addr_t dma; - struct sk_buff *skb; - union { - /* Tx */ - struct { - unsigned long time_stamp; - u16 length; - u16 next_to_watch; - unsigned int segs; - unsigned int bytecount; - u16 mapped_as_page; - }; - /* Rx */ - struct { - /* arrays of page information for packet split */ - struct e1000_ps_page *ps_pages; - struct page *page; - }; - }; -}; - -struct e1000_ring { - void *desc; /* pointer to ring memory */ - dma_addr_t dma; /* phys address of ring */ - unsigned int size; /* length of ring in bytes */ - unsigned int count; /* number of desc. in ring */ - - u16 next_to_use; - u16 next_to_clean; - - u16 head; - u16 tail; - - /* array of buffer information structs */ - struct e1000_buffer *buffer_info; - - char name[IFNAMSIZ + 5]; - u32 ims_val; - u32 itr_val; - u16 itr_register; - int set_itr; - - struct sk_buff *rx_skb_top; -}; - -/* PHY register snapshot values */ -struct e1000_phy_regs { - u16 bmcr; /* basic mode control register */ - u16 bmsr; /* basic mode status register */ - u16 advertise; /* auto-negotiation advertisement */ - u16 lpa; /* link partner ability register */ - u16 expansion; /* auto-negotiation expansion reg */ - u16 ctrl1000; /* 1000BASE-T control register */ - u16 stat1000; /* 1000BASE-T status register */ - u16 estatus; /* extended status register */ -}; - -/* board specific private data structure */ -struct e1000_adapter { - struct timer_list watchdog_timer; - struct timer_list phy_info_timer; - struct timer_list blink_timer; - - struct work_struct reset_task; - struct work_struct watchdog_task; - - const struct e1000_info *ei; - - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; - u32 bd_number; - u32 rx_buffer_len; - u16 mng_vlan_id; - u16 link_speed; - u16 link_duplex; - u16 eeprom_vers; - - /* track device up/down/testing state */ - unsigned long state; - - /* Interrupt Throttle Rate */ - u32 itr; - u32 itr_setting; - u16 tx_itr; - u16 rx_itr; - - /* - * Tx - */ - struct e1000_ring *tx_ring /* One per active queue */ - ____cacheline_aligned_in_smp; - - struct napi_struct napi; - - unsigned int restart_queue; - u32 txd_cmd; - - bool detect_tx_hung; - u8 tx_timeout_factor; - - u32 tx_int_delay; - u32 tx_abs_int_delay; - - unsigned int total_tx_bytes; - unsigned int total_tx_packets; - unsigned int total_rx_bytes; - unsigned int total_rx_packets; - - /* Tx stats */ - u64 tpt_old; - u64 colc_old; - u32 gotc; - u64 gotc_old; - u32 tx_timeout_count; - u32 tx_fifo_head; - u32 tx_head_addr; - u32 tx_fifo_size; - u32 tx_dma_failed; - - /* - * Rx - */ - bool (*clean_rx) (struct e1000_adapter *adapter, - int *work_done, int work_to_do) - ____cacheline_aligned_in_smp; - void (*alloc_rx_buf) (struct e1000_adapter *adapter, - int cleaned_count, gfp_t gfp); - struct e1000_ring *rx_ring; - - u32 rx_int_delay; - u32 rx_abs_int_delay; - - /* Rx stats */ - u64 hw_csum_err; - u64 hw_csum_good; - u64 rx_hdr_split; - u32 gorc; - u64 gorc_old; - u32 alloc_rx_buff_failed; - u32 rx_dma_failed; - - unsigned int rx_ps_pages; - u16 rx_ps_bsize0; - u32 max_frame_size; - u32 min_frame_size; - - /* OS defined structs */ - struct net_device *netdev; - struct pci_dev *pdev; - - /* structs defined in e1000_hw.h */ - struct e1000_hw hw; - - spinlock_t stats64_lock; - struct e1000_hw_stats stats; - struct e1000_phy_info phy_info; - struct e1000_phy_stats phy_stats; - - /* Snapshot of PHY registers */ - struct e1000_phy_regs phy_regs; - - struct e1000_ring test_tx_ring; - struct e1000_ring test_rx_ring; - u32 test_icr; - - u32 msg_enable; - unsigned int num_vectors; - struct msix_entry *msix_entries; - int int_mode; - u32 eiac_mask; - - u32 eeprom_wol; - u32 wol; - u32 pba; - u32 max_hw_frame_size; - - bool fc_autoneg; - - unsigned int flags; - unsigned int flags2; - struct work_struct downshift_task; - struct work_struct update_phy_task; - struct work_struct print_hang_task; - - bool idle_check; - int phy_hang_count; -}; - -struct e1000_info { - enum e1000_mac_type mac; - unsigned int flags; - unsigned int flags2; - u32 pba; - u32 max_hw_frame_size; - s32 (*get_variants)(struct e1000_adapter *); - struct e1000_mac_operations *mac_ops; - struct e1000_phy_operations *phy_ops; - struct e1000_nvm_operations *nvm_ops; -}; - -/* hardware capability, feature, and workaround flags */ -#define FLAG_HAS_AMT (1 << 0) -#define FLAG_HAS_FLASH (1 << 1) -#define FLAG_HAS_HW_VLAN_FILTER (1 << 2) -#define FLAG_HAS_WOL (1 << 3) -#define FLAG_HAS_ERT (1 << 4) -#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5) -#define FLAG_HAS_SWSM_ON_LOAD (1 << 6) -#define FLAG_HAS_JUMBO_FRAMES (1 << 7) -#define FLAG_READ_ONLY_NVM (1 << 8) -#define FLAG_IS_ICH (1 << 9) -#define FLAG_HAS_MSIX (1 << 10) -#define FLAG_HAS_SMART_POWER_DOWN (1 << 11) -#define FLAG_IS_QUAD_PORT_A (1 << 12) -#define FLAG_IS_QUAD_PORT (1 << 13) -#define FLAG_TIPG_MEDIUM_FOR_80003ESLAN (1 << 14) -#define FLAG_APME_IN_WUC (1 << 15) -#define FLAG_APME_IN_CTRL3 (1 << 16) -#define FLAG_APME_CHECK_PORT_B (1 << 17) -#define FLAG_DISABLE_FC_PAUSE_TIME (1 << 18) -#define FLAG_NO_WAKE_UCAST (1 << 19) -#define FLAG_MNG_PT_ENABLED (1 << 20) -#define FLAG_RESET_OVERWRITES_LAA (1 << 21) -#define FLAG_TARC_SPEED_MODE_BIT (1 << 22) -#define FLAG_TARC_SET_BIT_ZERO (1 << 23) -#define FLAG_RX_NEEDS_RESTART (1 << 24) -#define FLAG_LSC_GIG_SPEED_DROP (1 << 25) -#define FLAG_SMART_POWER_DOWN (1 << 26) -#define FLAG_MSI_ENABLED (1 << 27) -#define FLAG_RX_CSUM_ENABLED (1 << 28) -#define FLAG_TSO_FORCE (1 << 29) -#define FLAG_RX_RESTART_NOW (1 << 30) -#define FLAG_MSI_TEST_FAILED (1 << 31) - -/* CRC Stripping defines */ -#define FLAG2_CRC_STRIPPING (1 << 0) -#define FLAG2_HAS_PHY_WAKEUP (1 << 1) -#define FLAG2_IS_DISCARDING (1 << 2) -#define FLAG2_DISABLE_ASPM_L1 (1 << 3) -#define FLAG2_HAS_PHY_STATS (1 << 4) -#define FLAG2_HAS_EEE (1 << 5) -#define FLAG2_DMA_BURST (1 << 6) -#define FLAG2_DISABLE_ASPM_L0S (1 << 7) -#define FLAG2_DISABLE_AIM (1 << 8) -#define FLAG2_CHECK_PHY_HANG (1 << 9) - -#define E1000_RX_DESC_PS(R, i) \ - (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) -#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) -#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) -#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) -#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc) - -enum e1000_state_t { - __E1000_TESTING, - __E1000_RESETTING, - __E1000_DOWN -}; - -enum latency_range { - lowest_latency = 0, - low_latency = 1, - bulk_latency = 2, - latency_invalid = 255 -}; - -extern char e1000e_driver_name[]; -extern const char e1000e_driver_version[]; - -extern void e1000e_check_options(struct e1000_adapter *adapter); -extern void e1000e_set_ethtool_ops(struct net_device *netdev); - -extern int e1000e_up(struct e1000_adapter *adapter); -extern void e1000e_down(struct e1000_adapter *adapter); -extern void e1000e_reinit_locked(struct e1000_adapter *adapter); -extern void e1000e_reset(struct e1000_adapter *adapter); -extern void e1000e_power_up_phy(struct e1000_adapter *adapter); -extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter); -extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter); -extern void e1000e_free_rx_resources(struct e1000_adapter *adapter); -extern void e1000e_free_tx_resources(struct e1000_adapter *adapter); -extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 - *stats); -extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); -extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); -extern void e1000e_get_hw_control(struct e1000_adapter *adapter); -extern void e1000e_release_hw_control(struct e1000_adapter *adapter); - -extern unsigned int copybreak; - -extern char *e1000e_get_hw_dev_name(struct e1000_hw *hw); - -extern struct e1000_info e1000_82571_info; -extern struct e1000_info e1000_82572_info; -extern struct e1000_info e1000_82573_info; -extern struct e1000_info e1000_82574_info; -extern struct e1000_info e1000_82583_info; -extern struct e1000_info e1000_ich8_info; -extern struct e1000_info e1000_ich9_info; -extern struct e1000_info e1000_ich10_info; -extern struct e1000_info e1000_pch_info; -extern struct e1000_info e1000_pch2_info; -extern struct e1000_info e1000_es2_info; - -extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, - u32 pba_num_size); - -extern s32 e1000e_commit_phy(struct e1000_hw *hw); - -extern bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw); - -extern bool e1000e_get_laa_state_82571(struct e1000_hw *hw); -extern void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state); - -extern void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw); -extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, - bool state); -extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); -extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); -extern void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw); -extern void e1000_resume_workarounds_pchlan(struct e1000_hw *hw); -extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable); -extern s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable); -extern void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw); - -extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw); -extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); -extern s32 e1000e_check_for_serdes_link(struct e1000_hw *hw); -extern s32 e1000e_setup_led_generic(struct e1000_hw *hw); -extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw); -extern s32 e1000e_led_on_generic(struct e1000_hw *hw); -extern s32 e1000e_led_off_generic(struct e1000_hw *hw); -extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw); -extern void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw); -extern void e1000_set_lan_id_single_port(struct e1000_hw *hw); -extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex); -extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex); -extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw); -extern s32 e1000e_get_auto_rd_done(struct e1000_hw *hw); -extern s32 e1000e_id_led_init(struct e1000_hw *hw); -extern void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw); -extern s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw); -extern s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw); -extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw); -extern s32 e1000e_setup_link(struct e1000_hw *hw); -extern void e1000_clear_vfta_generic(struct e1000_hw *hw); -extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); -extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, - u8 *mc_addr_list, - u32 mc_addr_count); -extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); -extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw); -extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop); -extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw); -extern s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data); -extern void e1000e_config_collision_dist(struct e1000_hw *hw); -extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw); -extern s32 e1000e_force_mac_fc(struct e1000_hw *hw); -extern s32 e1000e_blink_led_generic(struct e1000_hw *hw); -extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); -extern s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw); -extern void e1000e_reset_adaptive(struct e1000_hw *hw); -extern void e1000e_update_adaptive(struct e1000_hw *hw); - -extern s32 e1000e_setup_copper_link(struct e1000_hw *hw); -extern s32 e1000e_get_phy_id(struct e1000_hw *hw); -extern void e1000e_put_hw_semaphore(struct e1000_hw *hw); -extern s32 e1000e_check_reset_block_generic(struct e1000_hw *hw); -extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw); -extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw); -extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw); -extern s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page); -extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); -extern s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, - u16 *data); -extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw); -extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active); -extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); -extern s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, - u16 data); -extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw); -extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw); -extern s32 e1000e_get_cfg_done(struct e1000_hw *hw); -extern s32 e1000e_get_cable_length_m88(struct e1000_hw *hw); -extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw); -extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); -extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); -extern s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw); -extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id); -extern s32 e1000e_determine_phy_address(struct e1000_hw *hw); -extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data); -extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data); -extern s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, - u16 *phy_reg); -extern s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, - u16 *phy_reg); -extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data); -extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data); -extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); -extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); -extern s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, - u16 data); -extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); -extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, - u16 *data); -extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, - u32 usec_interval, bool *success); -extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw); -extern void e1000_power_up_phy_copper(struct e1000_hw *hw); -extern void e1000_power_down_phy_copper(struct e1000_hw *hw); -extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); -extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); -extern s32 e1000e_check_downshift(struct e1000_hw *hw); -extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data); -extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, - u16 *data); -extern s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, - u16 *data); -extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data); -extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, - u16 data); -extern s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, - u16 data); -extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw); -extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw); -extern s32 e1000_check_polarity_82577(struct e1000_hw *hw); -extern s32 e1000_get_phy_info_82577(struct e1000_hw *hw); -extern s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw); -extern s32 e1000_get_cable_length_82577(struct e1000_hw *hw); - -extern s32 e1000_check_polarity_m88(struct e1000_hw *hw); -extern s32 e1000_get_phy_info_ife(struct e1000_hw *hw); -extern s32 e1000_check_polarity_ife(struct e1000_hw *hw); -extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw); -extern s32 e1000_check_polarity_igp(struct e1000_hw *hw); -extern bool e1000_check_phy_82574(struct e1000_hw *hw); - -static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw) -{ - return hw->phy.ops.reset(hw); -} - -static inline s32 e1000_check_reset_block(struct e1000_hw *hw) -{ - return hw->phy.ops.check_reset_block(hw); -} - -static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data) -{ - return hw->phy.ops.read_reg(hw, offset, data); -} - -static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data) -{ - return hw->phy.ops.write_reg(hw, offset, data); -} - -static inline s32 e1000_get_cable_length(struct e1000_hw *hw) -{ - return hw->phy.ops.get_cable_length(hw); -} - -extern s32 e1000e_acquire_nvm(struct e1000_hw *hw); -extern s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); -extern s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw); -extern s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg); -extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); -extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw); -extern void e1000e_release_nvm(struct e1000_hw *hw); -extern void e1000e_reload_nvm(struct e1000_hw *hw); -extern s32 e1000_read_mac_addr_generic(struct e1000_hw *hw); - -static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw) -{ - if (hw->mac.ops.read_mac_addr) - return hw->mac.ops.read_mac_addr(hw); - - return e1000_read_mac_addr_generic(hw); -} - -static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw) -{ - return hw->nvm.ops.validate(hw); -} - -static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw) -{ - return hw->nvm.ops.update(hw); -} - -static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) -{ - return hw->nvm.ops.read(hw, offset, words, data); -} - -static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) -{ - return hw->nvm.ops.write(hw, offset, words, data); -} - -static inline s32 e1000_get_phy_info(struct e1000_hw *hw) -{ - return hw->phy.ops.get_info(hw); -} - -static inline s32 e1000e_check_mng_mode(struct e1000_hw *hw) -{ - return hw->mac.ops.check_mng_mode(hw); -} - -extern bool e1000e_check_mng_mode_generic(struct e1000_hw *hw); -extern bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw); -extern s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length); - -static inline u32 __er32(struct e1000_hw *hw, unsigned long reg) -{ - return readl(hw->hw_addr + reg); -} - -static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val) -{ - writel(val, hw->hw_addr + reg); -} - -#endif /* _E1000_H_ */ diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c deleted file mode 100644 index e4f4225..0000000 --- a/drivers/net/e1000e/es2lan.c +++ /dev/null @@ -1,1516 +0,0 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -/* - * 80003ES2LAN Gigabit Ethernet Controller (Copper) - * 80003ES2LAN Gigabit Ethernet Controller (Serdes) - */ - -#include "e1000.h" - -#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00 -#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02 -#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10 -#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE 0x1F - -#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008 -#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800 -#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010 - -#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004 -#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000 -#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000 - -#define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C -#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004 - -#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ -#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000 - -#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8 -#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9 - -/* GG82563 PHY Specific Status Register (Page 0, Register 16 */ -#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Disab. */ -#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060 -#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */ -#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */ -#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */ - -/* PHY Specific Control Register 2 (Page 0, Register 26) */ -#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 - /* 1=Reverse Auto-Negotiation */ - -/* MAC Specific Control Register (Page 2, Register 21) */ -/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */ -#define GG82563_MSCR_TX_CLK_MASK 0x0007 -#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004 -#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005 -#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007 - -#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */ - -/* DSP Distance Register (Page 5, Register 26) */ -#define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M - 1 = 50-80M - 2 = 80-110M - 3 = 110-140M - 4 = >140M */ - -/* Kumeran Mode Control Register (Page 193, Register 16) */ -#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800 - -/* Max number of times Kumeran read/write should be validated */ -#define GG82563_MAX_KMRN_RETRY 0x5 - -/* Power Management Control Register (Page 193, Register 20) */ -#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001 - /* 1=Enable SERDES Electrical Idle */ - -/* In-Band Control Register (Page 194, Register 18) */ -#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */ - -/* - * A table for the GG82563 cable length where the range is defined - * with a lower bound at "index" and the upper bound at - * "index + 5". - */ -static const u16 e1000_gg82563_cable_length_table[] = { - 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF }; -#define GG82563_CABLE_LENGTH_TABLE_SIZE \ - ARRAY_SIZE(e1000_gg82563_cable_length_table) - -static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw); -static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); -static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); -static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw); -static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw); -static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw); -static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex); -static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw); -static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, - u16 *data); -static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, - u16 data); -static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw); - -/** - * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs. - * @hw: pointer to the HW structure - **/ -static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - - if (hw->phy.media_type != e1000_media_type_copper) { - phy->type = e1000_phy_none; - return 0; - } else { - phy->ops.power_up = e1000_power_up_phy_copper; - phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan; - } - - phy->addr = 1; - phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; - phy->reset_delay_us = 100; - phy->type = e1000_phy_gg82563; - - /* This can only be done after all function pointers are setup. */ - ret_val = e1000e_get_phy_id(hw); - - /* Verify phy id */ - if (phy->id != GG82563_E_PHY_ID) - return -E1000_ERR_PHY; - - return ret_val; -} - -/** - * e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs. - * @hw: pointer to the HW structure - **/ -static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) -{ - struct e1000_nvm_info *nvm = &hw->nvm; - u32 eecd = er32(EECD); - u16 size; - - nvm->opcode_bits = 8; - nvm->delay_usec = 1; - switch (nvm->override) { - case e1000_nvm_override_spi_large: - nvm->page_size = 32; - nvm->address_bits = 16; - break; - case e1000_nvm_override_spi_small: - nvm->page_size = 8; - nvm->address_bits = 8; - break; - default: - nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; - nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; - break; - } - - nvm->type = e1000_nvm_eeprom_spi; - - size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> - E1000_EECD_SIZE_EX_SHIFT); - - /* - * Added to a constant, "size" becomes the left-shift value - * for setting word_size. - */ - size += NVM_WORD_SIZE_BASE_SHIFT; - - /* EEPROM access above 16k is unsupported */ - if (size > 14) - size = 14; - nvm->word_size = 1 << size; - - return 0; -} - -/** - * e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs. - * @hw: pointer to the HW structure - **/ -static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - struct e1000_mac_info *mac = &hw->mac; - struct e1000_mac_operations *func = &mac->ops; - - /* Set media type */ - switch (adapter->pdev->device) { - case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: - hw->phy.media_type = e1000_media_type_internal_serdes; - break; - default: - hw->phy.media_type = e1000_media_type_copper; - break; - } - - /* Set mta register count */ - mac->mta_reg_count = 128; - /* Set rar entry count */ - mac->rar_entry_count = E1000_RAR_ENTRIES; - /* FWSM register */ - mac->has_fwsm = true; - /* ARC supported; valid only if manageability features are enabled. */ - mac->arc_subsystem_valid = - (er32(FWSM) & E1000_FWSM_MODE_MASK) - ? true : false; - /* Adaptive IFS not supported */ - mac->adaptive_ifs = false; - - /* check for link */ - switch (hw->phy.media_type) { - case e1000_media_type_copper: - func->setup_physical_interface = e1000_setup_copper_link_80003es2lan; - func->check_for_link = e1000e_check_for_copper_link; - break; - case e1000_media_type_fiber: - func->setup_physical_interface = e1000e_setup_fiber_serdes_link; - func->check_for_link = e1000e_check_for_fiber_link; - break; - case e1000_media_type_internal_serdes: - func->setup_physical_interface = e1000e_setup_fiber_serdes_link; - func->check_for_link = e1000e_check_for_serdes_link; - break; - default: - return -E1000_ERR_CONFIG; - break; - } - - /* set lan id for port to determine which phy lock to use */ - hw->mac.ops.set_lan_id(hw); - - return 0; -} - -static s32 e1000_get_variants_80003es2lan(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - s32 rc; - - rc = e1000_init_mac_params_80003es2lan(adapter); - if (rc) - return rc; - - rc = e1000_init_nvm_params_80003es2lan(hw); - if (rc) - return rc; - - rc = e1000_init_phy_params_80003es2lan(hw); - if (rc) - return rc; - - return 0; -} - -/** - * e1000_acquire_phy_80003es2lan - Acquire rights to access PHY - * @hw: pointer to the HW structure - * - * A wrapper to acquire access rights to the correct PHY. - **/ -static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw) -{ - u16 mask; - - mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; - return e1000_acquire_swfw_sync_80003es2lan(hw, mask); -} - -/** - * e1000_release_phy_80003es2lan - Release rights to access PHY - * @hw: pointer to the HW structure - * - * A wrapper to release access rights to the correct PHY. - **/ -static void e1000_release_phy_80003es2lan(struct e1000_hw *hw) -{ - u16 mask; - - mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; - e1000_release_swfw_sync_80003es2lan(hw, mask); -} - -/** - * e1000_acquire_mac_csr_80003es2lan - Acquire rights to access Kumeran register - * @hw: pointer to the HW structure - * - * Acquire the semaphore to access the Kumeran interface. - * - **/ -static s32 e1000_acquire_mac_csr_80003es2lan(struct e1000_hw *hw) -{ - u16 mask; - - mask = E1000_SWFW_CSR_SM; - - return e1000_acquire_swfw_sync_80003es2lan(hw, mask); -} - -/** - * e1000_release_mac_csr_80003es2lan - Release rights to access Kumeran Register - * @hw: pointer to the HW structure - * - * Release the semaphore used to access the Kumeran interface - **/ -static void e1000_release_mac_csr_80003es2lan(struct e1000_hw *hw) -{ - u16 mask; - - mask = E1000_SWFW_CSR_SM; - - e1000_release_swfw_sync_80003es2lan(hw, mask); -} - -/** - * e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM - * @hw: pointer to the HW structure - * - * Acquire the semaphore to access the EEPROM. - **/ -static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw) -{ - s32 ret_val; - - ret_val = e1000_acquire_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); - if (ret_val) - return ret_val; - - ret_val = e1000e_acquire_nvm(hw); - - if (ret_val) - e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); - - return ret_val; -} - -/** - * e1000_release_nvm_80003es2lan - Relinquish rights to access NVM - * @hw: pointer to the HW structure - * - * Release the semaphore used to access the EEPROM. - **/ -static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw) -{ - e1000e_release_nvm(hw); - e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); -} - -/** - * e1000_acquire_swfw_sync_80003es2lan - Acquire SW/FW semaphore - * @hw: pointer to the HW structure - * @mask: specifies which semaphore to acquire - * - * Acquire the SW/FW semaphore to access the PHY or NVM. The mask - * will also specify which port we're acquiring the lock for. - **/ -static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) -{ - u32 swfw_sync; - u32 swmask = mask; - u32 fwmask = mask << 16; - s32 i = 0; - s32 timeout = 50; - - while (i < timeout) { - if (e1000e_get_hw_semaphore(hw)) - return -E1000_ERR_SWFW_SYNC; - - swfw_sync = er32(SW_FW_SYNC); - if (!(swfw_sync & (fwmask | swmask))) - break; - - /* - * Firmware currently using resource (fwmask) - * or other software thread using resource (swmask) - */ - e1000e_put_hw_semaphore(hw); - mdelay(5); - i++; - } - - if (i == timeout) { - e_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); - return -E1000_ERR_SWFW_SYNC; - } - - swfw_sync |= swmask; - ew32(SW_FW_SYNC, swfw_sync); - - e1000e_put_hw_semaphore(hw); - - return 0; -} - -/** - * e1000_release_swfw_sync_80003es2lan - Release SW/FW semaphore - * @hw: pointer to the HW structure - * @mask: specifies which semaphore to acquire - * - * Release the SW/FW semaphore used to access the PHY or NVM. The mask - * will also specify which port we're releasing the lock for. - **/ -static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) -{ - u32 swfw_sync; - - while (e1000e_get_hw_semaphore(hw) != 0) - ; /* Empty */ - - swfw_sync = er32(SW_FW_SYNC); - swfw_sync &= ~mask; - ew32(SW_FW_SYNC, swfw_sync); - - e1000e_put_hw_semaphore(hw); -} - -/** - * e1000_read_phy_reg_gg82563_80003es2lan - Read GG82563 PHY register - * @hw: pointer to the HW structure - * @offset: offset of the register to read - * @data: pointer to the data returned from the operation - * - * Read the GG82563 PHY register. - **/ -static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, - u32 offset, u16 *data) -{ - s32 ret_val; - u32 page_select; - u16 temp; - - ret_val = e1000_acquire_phy_80003es2lan(hw); - if (ret_val) - return ret_val; - - /* Select Configuration Page */ - if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { - page_select = GG82563_PHY_PAGE_SELECT; - } else { - /* - * Use Alternative Page Select register to access - * registers 30 and 31 - */ - page_select = GG82563_PHY_PAGE_SELECT_ALT; - } - - temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT); - ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp); - if (ret_val) { - e1000_release_phy_80003es2lan(hw); - return ret_val; - } - - if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) { - /* - * The "ready" bit in the MDIC register may be incorrectly set - * before the device has completed the "Page Select" MDI - * transaction. So we wait 200us after each MDI command... - */ - udelay(200); - - /* ...and verify the command was successful. */ - ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); - - if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { - ret_val = -E1000_ERR_PHY; - e1000_release_phy_80003es2lan(hw); - return ret_val; - } - - udelay(200); - - ret_val = e1000e_read_phy_reg_mdic(hw, - MAX_PHY_REG_ADDRESS & offset, - data); - - udelay(200); - } else { - ret_val = e1000e_read_phy_reg_mdic(hw, - MAX_PHY_REG_ADDRESS & offset, - data); - } - - e1000_release_phy_80003es2lan(hw); - - return ret_val; -} - -/** - * e1000_write_phy_reg_gg82563_80003es2lan - Write GG82563 PHY register - * @hw: pointer to the HW structure - * @offset: offset of the register to read - * @data: value to write to the register - * - * Write to the GG82563 PHY register. - **/ -static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, - u32 offset, u16 data) -{ - s32 ret_val; - u32 page_select; - u16 temp; - - ret_val = e1000_acquire_phy_80003es2lan(hw); - if (ret_val) - return ret_val; - - /* Select Configuration Page */ - if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { - page_select = GG82563_PHY_PAGE_SELECT; - } else { - /* - * Use Alternative Page Select register to access - * registers 30 and 31 - */ - page_select = GG82563_PHY_PAGE_SELECT_ALT; - } - - temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT); - ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp); - if (ret_val) { - e1000_release_phy_80003es2lan(hw); - return ret_val; - } - - if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) { - /* - * The "ready" bit in the MDIC register may be incorrectly set - * before the device has completed the "Page Select" MDI - * transaction. So we wait 200us after each MDI command... - */ - udelay(200); - - /* ...and verify the command was successful. */ - ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); - - if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { - e1000_release_phy_80003es2lan(hw); - return -E1000_ERR_PHY; - } - - udelay(200); - - ret_val = e1000e_write_phy_reg_mdic(hw, - MAX_PHY_REG_ADDRESS & offset, - data); - - udelay(200); - } else { - ret_val = e1000e_write_phy_reg_mdic(hw, - MAX_PHY_REG_ADDRESS & offset, - data); - } - - e1000_release_phy_80003es2lan(hw); - - return ret_val; -} - -/** - * e1000_write_nvm_80003es2lan - Write to ESB2 NVM - * @hw: pointer to the HW structure - * @offset: offset of the register to read - * @words: number of words to write - * @data: buffer of data to write to the NVM - * - * Write "words" of data to the ESB2 NVM. - **/ -static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset, - u16 words, u16 *data) -{ - return e1000e_write_nvm_spi(hw, offset, words, data); -} - -/** - * e1000_get_cfg_done_80003es2lan - Wait for configuration to complete - * @hw: pointer to the HW structure - * - * Wait a specific amount of time for manageability processes to complete. - * This is a function pointer entry point called by the phy module. - **/ -static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw) -{ - s32 timeout = PHY_CFG_TIMEOUT; - u32 mask = E1000_NVM_CFG_DONE_PORT_0; - - if (hw->bus.func == 1) - mask = E1000_NVM_CFG_DONE_PORT_1; - - while (timeout) { - if (er32(EEMNGCTL) & mask) - break; - usleep_range(1000, 2000); - timeout--; - } - if (!timeout) { - e_dbg("MNG configuration cycle has not completed.\n"); - return -E1000_ERR_RESET; - } - - return 0; -} - -/** - * e1000_phy_force_speed_duplex_80003es2lan - Force PHY speed and duplex - * @hw: pointer to the HW structure - * - * Force the speed and duplex settings onto the PHY. This is a - * function pointer entry point called by the phy module. - **/ -static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) -{ - s32 ret_val; - u16 phy_data; - bool link; - - /* - * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI - * forced whenever speed and duplex are forced. - */ - ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); - if (ret_val) - return ret_val; - - phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_AUTO; - ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, phy_data); - if (ret_val) - return ret_val; - - e_dbg("GG82563 PSCR: %X\n", phy_data); - - ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); - if (ret_val) - return ret_val; - - e1000e_phy_force_speed_duplex_setup(hw, &phy_data); - - /* Reset the phy to commit changes. */ - phy_data |= MII_CR_RESET; - - ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); - if (ret_val) - return ret_val; - - udelay(1); - - if (hw->phy.autoneg_wait_to_complete) { - e_dbg("Waiting for forced speed/duplex link " - "on GG82563 phy.\n"); - - ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, - 100000, &link); - if (ret_val) - return ret_val; - - if (!link) { - /* - * We didn't get link. - * Reset the DSP and cross our fingers. - */ - ret_val = e1000e_phy_reset_dsp(hw); - if (ret_val) - return ret_val; - } - - /* Try once more */ - ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, - 100000, &link); - if (ret_val) - return ret_val; - } - - ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data); - if (ret_val) - return ret_val; - - /* - * Resetting the phy means we need to verify the TX_CLK corresponds - * to the link speed. 10Mbps -> 2.5MHz, else 25MHz. - */ - phy_data &= ~GG82563_MSCR_TX_CLK_MASK; - if (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED) - phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5; - else - phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25; - - /* - * In addition, we must re-enable CRS on Tx for both half and full - * duplex. - */ - phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; - ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data); - - return ret_val; -} - -/** - * e1000_get_cable_length_80003es2lan - Set approximate cable length - * @hw: pointer to the HW structure - * - * Find the approximate cable length as measured by the GG82563 PHY. - * This is a function pointer entry point called by the phy module. - **/ -static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val = 0; - u16 phy_data, index; - - ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data); - if (ret_val) - goto out; - - index = phy_data & GG82563_DSPD_CABLE_LENGTH; - - if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5) { - ret_val = -E1000_ERR_PHY; - goto out; - } - - phy->min_cable_length = e1000_gg82563_cable_length_table[index]; - phy->max_cable_length = e1000_gg82563_cable_length_table[index + 5]; - - phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; - -out: - return ret_val; -} - -/** - * e1000_get_link_up_info_80003es2lan - Report speed and duplex - * @hw: pointer to the HW structure - * @speed: pointer to speed buffer - * @duplex: pointer to duplex buffer - * - * Retrieve the current speed and duplex configuration. - **/ -static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, - u16 *duplex) -{ - s32 ret_val; - - if (hw->phy.media_type == e1000_media_type_copper) { - ret_val = e1000e_get_speed_and_duplex_copper(hw, - speed, - duplex); - hw->phy.ops.cfg_on_link_up(hw); - } else { - ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw, - speed, - duplex); - } - - return ret_val; -} - -/** - * e1000_reset_hw_80003es2lan - Reset the ESB2 controller - * @hw: pointer to the HW structure - * - * Perform a global reset to the ESB2 controller. - **/ -static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) -{ - u32 ctrl; - s32 ret_val; - - /* - * Prevent the PCI-E bus from sticking if there is no TLP connection - * on the last TLP read/write transaction when MAC is reset. - */ - ret_val = e1000e_disable_pcie_master(hw); - if (ret_val) - e_dbg("PCI-E Master disable polling has failed.\n"); - - e_dbg("Masking off all interrupts\n"); - ew32(IMC, 0xffffffff); - - ew32(RCTL, 0); - ew32(TCTL, E1000_TCTL_PSP); - e1e_flush(); - - usleep_range(10000, 20000); - - ctrl = er32(CTRL); - - ret_val = e1000_acquire_phy_80003es2lan(hw); - e_dbg("Issuing a global reset to MAC\n"); - ew32(CTRL, ctrl | E1000_CTRL_RST); - e1000_release_phy_80003es2lan(hw); - - ret_val = e1000e_get_auto_rd_done(hw); - if (ret_val) - /* We don't want to continue accessing MAC registers. */ - return ret_val; - - /* Clear any pending interrupt events. */ - ew32(IMC, 0xffffffff); - er32(ICR); - - ret_val = e1000_check_alt_mac_addr_generic(hw); - - return ret_val; -} - -/** - * e1000_init_hw_80003es2lan - Initialize the ESB2 controller - * @hw: pointer to the HW structure - * - * Initialize the hw bits, LED, VFTA, MTA, link and hw counters. - **/ -static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) -{ - struct e1000_mac_info *mac = &hw->mac; - u32 reg_data; - s32 ret_val; - u16 kum_reg_data; - u16 i; - - e1000_initialize_hw_bits_80003es2lan(hw); - - /* Initialize identification LED */ - ret_val = e1000e_id_led_init(hw); - if (ret_val) - e_dbg("Error initializing identification LED\n"); - /* This is not fatal and we should not stop init due to this */ - - /* Disabling VLAN filtering */ - e_dbg("Initializing the IEEE VLAN\n"); - mac->ops.clear_vfta(hw); - - /* Setup the receive address. */ - e1000e_init_rx_addrs(hw, mac->rar_entry_count); - - /* Zero out the Multicast HASH table */ - e_dbg("Zeroing the MTA\n"); - for (i = 0; i < mac->mta_reg_count; i++) - E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); - - /* Setup link and flow control */ - ret_val = e1000e_setup_link(hw); - - /* Disable IBIST slave mode (far-end loopback) */ - e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, - &kum_reg_data); - kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; - e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, - kum_reg_data); - - /* Set the transmit descriptor write-back policy */ - reg_data = er32(TXDCTL(0)); - reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | - E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; - ew32(TXDCTL(0), reg_data); - - /* ...for both queues. */ - reg_data = er32(TXDCTL(1)); - reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | - E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; - ew32(TXDCTL(1), reg_data); - - /* Enable retransmit on late collisions */ - reg_data = er32(TCTL); - reg_data |= E1000_TCTL_RTLC; - ew32(TCTL, reg_data); - - /* Configure Gigabit Carry Extend Padding */ - reg_data = er32(TCTL_EXT); - reg_data &= ~E1000_TCTL_EXT_GCEX_MASK; - reg_data |= DEFAULT_TCTL_EXT_GCEX_80003ES2LAN; - ew32(TCTL_EXT, reg_data); - - /* Configure Transmit Inter-Packet Gap */ - reg_data = er32(TIPG); - reg_data &= ~E1000_TIPG_IPGT_MASK; - reg_data |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN; - ew32(TIPG, reg_data); - - reg_data = E1000_READ_REG_ARRAY(hw, E1000_FFLT, 0x0001); - reg_data &= ~0x00100000; - E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data); - - /* default to true to enable the MDIC W/A */ - hw->dev_spec.e80003es2lan.mdic_wa_enable = true; - - ret_val = e1000_read_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET >> - E1000_KMRNCTRLSTA_OFFSET_SHIFT, - &i); - if (!ret_val) { - if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) == - E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO) - hw->dev_spec.e80003es2lan.mdic_wa_enable = false; - } - - /* - * Clear all of the statistics registers (clear on read). It is - * important that we do this after we have tried to establish link - * because the symbol error count will increment wildly if there - * is no link. - */ - e1000_clear_hw_cntrs_80003es2lan(hw); - - return ret_val; -} - -/** - * e1000_initialize_hw_bits_80003es2lan - Init hw bits of ESB2 - * @hw: pointer to the HW structure - * - * Initializes required hardware-dependent bits needed for normal operation. - **/ -static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw) -{ - u32 reg; - - /* Transmit Descriptor Control 0 */ - reg = er32(TXDCTL(0)); - reg |= (1 << 22); - ew32(TXDCTL(0), reg); - - /* Transmit Descriptor Control 1 */ - reg = er32(TXDCTL(1)); - reg |= (1 << 22); - ew32(TXDCTL(1), reg); - - /* Transmit Arbitration Control 0 */ - reg = er32(TARC(0)); - reg &= ~(0xF << 27); /* 30:27 */ - if (hw->phy.media_type != e1000_media_type_copper) - reg &= ~(1 << 20); - ew32(TARC(0), reg); - - /* Transmit Arbitration Control 1 */ - reg = er32(TARC(1)); - if (er32(TCTL) & E1000_TCTL_MULR) - reg &= ~(1 << 28); - else - reg |= (1 << 28); - ew32(TARC(1), reg); -} - -/** - * e1000_copper_link_setup_gg82563_80003es2lan - Configure GG82563 Link - * @hw: pointer to the HW structure - * - * Setup some GG82563 PHY registers for obtaining link - **/ -static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u32 ctrl_ext; - u16 data; - - ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data); - if (ret_val) - return ret_val; - - data |= GG82563_MSCR_ASSERT_CRS_ON_TX; - /* Use 25MHz for both link down and 1000Base-T for Tx clock. */ - data |= GG82563_MSCR_TX_CLK_1000MBPS_25; - - ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, data); - if (ret_val) - return ret_val; - - /* - * Options: - * MDI/MDI-X = 0 (default) - * 0 - Auto for all speeds - * 1 - MDI mode - * 2 - MDI-X mode - * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) - */ - ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL, &data); - if (ret_val) - return ret_val; - - data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK; - - switch (phy->mdix) { - case 1: - data |= GG82563_PSCR_CROSSOVER_MODE_MDI; - break; - case 2: - data |= GG82563_PSCR_CROSSOVER_MODE_MDIX; - break; - case 0: - default: - data |= GG82563_PSCR_CROSSOVER_MODE_AUTO; - break; - } - - /* - * Options: - * disable_polarity_correction = 0 (default) - * Automatic Correction for Reversed Cable Polarity - * 0 - Disabled - * 1 - Enabled - */ - data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE; - if (phy->disable_polarity_correction) - data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE; - - ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, data); - if (ret_val) - return ret_val; - - /* SW Reset the PHY so all changes take effect */ - ret_val = e1000e_commit_phy(hw); - if (ret_val) { - e_dbg("Error Resetting the PHY\n"); - return ret_val; - } - - /* Bypass Rx and Tx FIFO's */ - ret_val = e1000_write_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL, - E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS | - E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS); - if (ret_val) - return ret_val; - - ret_val = e1000_read_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE, - &data); - if (ret_val) - return ret_val; - data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE; - ret_val = e1000_write_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE, - data); - if (ret_val) - return ret_val; - - ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL_2, &data); - if (ret_val) - return ret_val; - - data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG; - ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL_2, data); - if (ret_val) - return ret_val; - - ctrl_ext = er32(CTRL_EXT); - ctrl_ext &= ~(E1000_CTRL_EXT_LINK_MODE_MASK); - ew32(CTRL_EXT, ctrl_ext); - - ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data); - if (ret_val) - return ret_val; - - /* - * Do not init these registers when the HW is in IAMT mode, since the - * firmware will have already initialized them. We only initialize - * them if the HW is not in IAMT mode. - */ - if (!e1000e_check_mng_mode(hw)) { - /* Enable Electrical Idle on the PHY */ - data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE; - ret_val = e1e_wphy(hw, GG82563_PHY_PWR_MGMT_CTRL, data); - if (ret_val) - return ret_val; - - ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &data); - if (ret_val) - return ret_val; - - data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; - ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, data); - if (ret_val) - return ret_val; - } - - /* - * Workaround: Disable padding in Kumeran interface in the MAC - * and in the PHY to avoid CRC errors. - */ - ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data); - if (ret_val) - return ret_val; - - data |= GG82563_ICR_DIS_PADDING; - ret_val = e1e_wphy(hw, GG82563_PHY_INBAND_CTRL, data); - if (ret_val) - return ret_val; - - return 0; -} - -/** - * e1000_setup_copper_link_80003es2lan - Setup Copper Link for ESB2 - * @hw: pointer to the HW structure - * - * Essentially a wrapper for setting up all things "copper" related. - * This is a function pointer entry point called by the mac module. - **/ -static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw) -{ - u32 ctrl; - s32 ret_val; - u16 reg_data; - - ctrl = er32(CTRL); - ctrl |= E1000_CTRL_SLU; - ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); - ew32(CTRL, ctrl); - - /* - * Set the mac to wait the maximum time between each - * iteration and increase the max iterations when - * polling the phy; this fixes erroneous timeouts at 10Mbps. - */ - ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4), - 0xFFFF); - if (ret_val) - return ret_val; - ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), - ®_data); - if (ret_val) - return ret_val; - reg_data |= 0x3F; - ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), - reg_data); - if (ret_val) - return ret_val; - ret_val = e1000_read_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, - ®_data); - if (ret_val) - return ret_val; - reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING; - ret_val = e1000_write_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, - reg_data); - if (ret_val) - return ret_val; - - ret_val = e1000_copper_link_setup_gg82563_80003es2lan(hw); - if (ret_val) - return ret_val; - - ret_val = e1000e_setup_copper_link(hw); - - return 0; -} - -/** - * e1000_cfg_on_link_up_80003es2lan - es2 link configuration after link-up - * @hw: pointer to the HW structure - * @duplex: current duplex setting - * - * Configure the KMRN interface by applying last minute quirks for - * 10/100 operation. - **/ -static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw) -{ - s32 ret_val = 0; - u16 speed; - u16 duplex; - - if (hw->phy.media_type == e1000_media_type_copper) { - ret_val = e1000e_get_speed_and_duplex_copper(hw, &speed, - &duplex); - if (ret_val) - return ret_val; - - if (speed == SPEED_1000) - ret_val = e1000_cfg_kmrn_1000_80003es2lan(hw); - else - ret_val = e1000_cfg_kmrn_10_100_80003es2lan(hw, duplex); - } - - return ret_val; -} - -/** - * e1000_cfg_kmrn_10_100_80003es2lan - Apply "quirks" for 10/100 operation - * @hw: pointer to the HW structure - * @duplex: current duplex setting - * - * Configure the KMRN interface by applying last minute quirks for - * 10/100 operation. - **/ -static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex) -{ - s32 ret_val; - u32 tipg; - u32 i = 0; - u16 reg_data, reg_data2; - - reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT; - ret_val = e1000_write_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, - reg_data); - if (ret_val) - return ret_val; - - /* Configure Transmit Inter-Packet Gap */ - tipg = er32(TIPG); - tipg &= ~E1000_TIPG_IPGT_MASK; - tipg |= DEFAULT_TIPG_IPGT_10_100_80003ES2LAN; - ew32(TIPG, tipg); - - do { - ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); - if (ret_val) - return ret_val; - - ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data2); - if (ret_val) - return ret_val; - i++; - } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY)); - - if (duplex == HALF_DUPLEX) - reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER; - else - reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; - - ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); - - return 0; -} - -/** - * e1000_cfg_kmrn_1000_80003es2lan - Apply "quirks" for gigabit operation - * @hw: pointer to the HW structure - * - * Configure the KMRN interface by applying last minute quirks for - * gigabit operation. - **/ -static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw) -{ - s32 ret_val; - u16 reg_data, reg_data2; - u32 tipg; - u32 i = 0; - - reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT; - ret_val = e1000_write_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, - reg_data); - if (ret_val) - return ret_val; - - /* Configure Transmit Inter-Packet Gap */ - tipg = er32(TIPG); - tipg &= ~E1000_TIPG_IPGT_MASK; - tipg |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN; - ew32(TIPG, tipg); - - do { - ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); - if (ret_val) - return ret_val; - - ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data2); - if (ret_val) - return ret_val; - i++; - } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY)); - - reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; - ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); - - return ret_val; -} - -/** - * e1000_read_kmrn_reg_80003es2lan - Read kumeran register - * @hw: pointer to the HW structure - * @offset: register offset to be read - * @data: pointer to the read data - * - * Acquire semaphore, then read the PHY register at offset - * using the kumeran interface. The information retrieved is stored in data. - * Release the semaphore before exiting. - **/ -static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, - u16 *data) -{ - u32 kmrnctrlsta; - s32 ret_val = 0; - - ret_val = e1000_acquire_mac_csr_80003es2lan(hw); - if (ret_val) - return ret_val; - - kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & - E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; - ew32(KMRNCTRLSTA, kmrnctrlsta); - e1e_flush(); - - udelay(2); - - kmrnctrlsta = er32(KMRNCTRLSTA); - *data = (u16)kmrnctrlsta; - - e1000_release_mac_csr_80003es2lan(hw); - - return ret_val; -} - -/** - * e1000_write_kmrn_reg_80003es2lan - Write kumeran register - * @hw: pointer to the HW structure - * @offset: register offset to write to - * @data: data to write at register offset - * - * Acquire semaphore, then write the data to PHY register - * at the offset using the kumeran interface. Release semaphore - * before exiting. - **/ -static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, - u16 data) -{ - u32 kmrnctrlsta; - s32 ret_val = 0; - - ret_val = e1000_acquire_mac_csr_80003es2lan(hw); - if (ret_val) - return ret_val; - - kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & - E1000_KMRNCTRLSTA_OFFSET) | data; - ew32(KMRNCTRLSTA, kmrnctrlsta); - e1e_flush(); - - udelay(2); - - e1000_release_mac_csr_80003es2lan(hw); - - return ret_val; -} - -/** - * e1000_read_mac_addr_80003es2lan - Read device MAC address - * @hw: pointer to the HW structure - **/ -static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw) -{ - s32 ret_val = 0; - - /* - * If there's an alternate MAC address place it in RAR0 - * so that it will override the Si installed default perm - * address. - */ - ret_val = e1000_check_alt_mac_addr_generic(hw); - if (ret_val) - goto out; - - ret_val = e1000_read_mac_addr_generic(hw); - -out: - return ret_val; -} - -/** - * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down - * @hw: pointer to the HW structure - * - * In the case of a PHY power down to save power, or to turn off link during a - * driver unload, or wake on lan is not enabled, remove the link. - **/ -static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw) -{ - /* If the management interface is not enabled, then power down */ - if (!(hw->mac.ops.check_mng_mode(hw) || - hw->phy.ops.check_reset_block(hw))) - e1000_power_down_phy_copper(hw); -} - -/** - * e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters - * @hw: pointer to the HW structure - * - * Clears the hardware counters by reading the counter registers. - **/ -static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw) -{ - e1000e_clear_hw_cntrs_base(hw); - - er32(PRC64); - er32(PRC127); - er32(PRC255); - er32(PRC511); - er32(PRC1023); - er32(PRC1522); - er32(PTC64); - er32(PTC127); - er32(PTC255); - er32(PTC511); - er32(PTC1023); - er32(PTC1522); - - er32(ALGNERRC); - er32(RXERRC); - er32(TNCRS); - er32(CEXTERR); - er32(TSCTC); - er32(TSCTFC); - - er32(MGTPRC); - er32(MGTPDC); - er32(MGTPTC); - - er32(IAC); - er32(ICRXOC); - - er32(ICRXPTC); - er32(ICRXATC); - er32(ICTXPTC); - er32(ICTXATC); - er32(ICTXQEC); - er32(ICTXQMTC); - er32(ICRXDMTC); -} - -static struct e1000_mac_operations es2_mac_ops = { - .read_mac_addr = e1000_read_mac_addr_80003es2lan, - .id_led_init = e1000e_id_led_init, - .blink_led = e1000e_blink_led_generic, - .check_mng_mode = e1000e_check_mng_mode_generic, - /* check_for_link dependent on media type */ - .cleanup_led = e1000e_cleanup_led_generic, - .clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan, - .get_bus_info = e1000e_get_bus_info_pcie, - .set_lan_id = e1000_set_lan_id_multi_port_pcie, - .get_link_up_info = e1000_get_link_up_info_80003es2lan, - .led_on = e1000e_led_on_generic, - .led_off = e1000e_led_off_generic, - .update_mc_addr_list = e1000e_update_mc_addr_list_generic, - .write_vfta = e1000_write_vfta_generic, - .clear_vfta = e1000_clear_vfta_generic, - .reset_hw = e1000_reset_hw_80003es2lan, - .init_hw = e1000_init_hw_80003es2lan, - .setup_link = e1000e_setup_link, - /* setup_physical_interface dependent on media type */ - .setup_led = e1000e_setup_led_generic, -}; - -static struct e1000_phy_operations es2_phy_ops = { - .acquire = e1000_acquire_phy_80003es2lan, - .check_polarity = e1000_check_polarity_m88, - .check_reset_block = e1000e_check_reset_block_generic, - .commit = e1000e_phy_sw_reset, - .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan, - .get_cfg_done = e1000_get_cfg_done_80003es2lan, - .get_cable_length = e1000_get_cable_length_80003es2lan, - .get_info = e1000e_get_phy_info_m88, - .read_reg = e1000_read_phy_reg_gg82563_80003es2lan, - .release = e1000_release_phy_80003es2lan, - .reset = e1000e_phy_hw_reset_generic, - .set_d0_lplu_state = NULL, - .set_d3_lplu_state = e1000e_set_d3_lplu_state, - .write_reg = e1000_write_phy_reg_gg82563_80003es2lan, - .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan, -}; - -static struct e1000_nvm_operations es2_nvm_ops = { - .acquire = e1000_acquire_nvm_80003es2lan, - .read = e1000e_read_nvm_eerd, - .release = e1000_release_nvm_80003es2lan, - .update = e1000e_update_nvm_checksum_generic, - .valid_led_default = e1000e_valid_led_default, - .validate = e1000e_validate_nvm_checksum_generic, - .write = e1000_write_nvm_80003es2lan, -}; - -struct e1000_info e1000_es2_info = { - .mac = e1000_80003es2lan, - .flags = FLAG_HAS_HW_VLAN_FILTER - | FLAG_HAS_JUMBO_FRAMES - | FLAG_HAS_WOL - | FLAG_APME_IN_CTRL3 - | FLAG_RX_CSUM_ENABLED - | FLAG_HAS_CTRLEXT_ON_LOAD - | FLAG_RX_NEEDS_RESTART /* errata */ - | FLAG_TARC_SET_BIT_ZERO /* errata */ - | FLAG_APME_CHECK_PORT_B - | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ - | FLAG_TIPG_MEDIUM_FOR_80003ESLAN, - .flags2 = FLAG2_DMA_BURST, - .pba = 38, - .max_hw_frame_size = DEFAULT_JUMBO, - .get_variants = e1000_get_variants_80003es2lan, - .mac_ops = &es2_mac_ops, - .phy_ops = &es2_phy_ops, - .nvm_ops = &es2_nvm_ops, -}; - diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c deleted file mode 100644 index 06d88f3..0000000 --- a/drivers/net/e1000e/ethtool.c +++ /dev/null @@ -1,2081 +0,0 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -/* ethtool support for e1000 */ - -#include -#include -#include -#include -#include -#include - -#include "e1000.h" - -enum {NETDEV_STATS, E1000_STATS}; - -struct e1000_stats { - char stat_string[ETH_GSTRING_LEN]; - int type; - int sizeof_stat; - int stat_offset; -}; - -#define E1000_STAT(str, m) { \ - .stat_string = str, \ - .type = E1000_STATS, \ - .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \ - .stat_offset = offsetof(struct e1000_adapter, m) } -#define E1000_NETDEV_STAT(str, m) { \ - .stat_string = str, \ - .type = NETDEV_STATS, \ - .sizeof_stat = sizeof(((struct rtnl_link_stats64 *)0)->m), \ - .stat_offset = offsetof(struct rtnl_link_stats64, m) } - -static const struct e1000_stats e1000_gstrings_stats[] = { - E1000_STAT("rx_packets", stats.gprc), - E1000_STAT("tx_packets", stats.gptc), - E1000_STAT("rx_bytes", stats.gorc), - E1000_STAT("tx_bytes", stats.gotc), - E1000_STAT("rx_broadcast", stats.bprc), - E1000_STAT("tx_broadcast", stats.bptc), - E1000_STAT("rx_multicast", stats.mprc), - E1000_STAT("tx_multicast", stats.mptc), - E1000_NETDEV_STAT("rx_errors", rx_errors), - E1000_NETDEV_STAT("tx_errors", tx_errors), - E1000_NETDEV_STAT("tx_dropped", tx_dropped), - E1000_STAT("multicast", stats.mprc), - E1000_STAT("collisions", stats.colc), - E1000_NETDEV_STAT("rx_length_errors", rx_length_errors), - E1000_NETDEV_STAT("rx_over_errors", rx_over_errors), - E1000_STAT("rx_crc_errors", stats.crcerrs), - E1000_NETDEV_STAT("rx_frame_errors", rx_frame_errors), - E1000_STAT("rx_no_buffer_count", stats.rnbc), - E1000_STAT("rx_missed_errors", stats.mpc), - E1000_STAT("tx_aborted_errors", stats.ecol), - E1000_STAT("tx_carrier_errors", stats.tncrs), - E1000_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors), - E1000_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors), - E1000_STAT("tx_window_errors", stats.latecol), - E1000_STAT("tx_abort_late_coll", stats.latecol), - E1000_STAT("tx_deferred_ok", stats.dc), - E1000_STAT("tx_single_coll_ok", stats.scc), - E1000_STAT("tx_multi_coll_ok", stats.mcc), - E1000_STAT("tx_timeout_count", tx_timeout_count), - E1000_STAT("tx_restart_queue", restart_queue), - E1000_STAT("rx_long_length_errors", stats.roc), - E1000_STAT("rx_short_length_errors", stats.ruc), - E1000_STAT("rx_align_errors", stats.algnerrc), - E1000_STAT("tx_tcp_seg_good", stats.tsctc), - E1000_STAT("tx_tcp_seg_failed", stats.tsctfc), - E1000_STAT("rx_flow_control_xon", stats.xonrxc), - E1000_STAT("rx_flow_control_xoff", stats.xoffrxc), - E1000_STAT("tx_flow_control_xon", stats.xontxc), - E1000_STAT("tx_flow_control_xoff", stats.xofftxc), - E1000_STAT("rx_long_byte_count", stats.gorc), - E1000_STAT("rx_csum_offload_good", hw_csum_good), - E1000_STAT("rx_csum_offload_errors", hw_csum_err), - E1000_STAT("rx_header_split", rx_hdr_split), - E1000_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), - E1000_STAT("tx_smbus", stats.mgptc), - E1000_STAT("rx_smbus", stats.mgprc), - E1000_STAT("dropped_smbus", stats.mgpdc), - E1000_STAT("rx_dma_failed", rx_dma_failed), - E1000_STAT("tx_dma_failed", tx_dma_failed), -}; - -#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) -#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN) -static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { - "Register test (offline)", "Eeprom test (offline)", - "Interrupt test (offline)", "Loopback test (offline)", - "Link test (on/offline)" -}; -#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) - -static int e1000_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u32 speed; - - if (hw->phy.media_type == e1000_media_type_copper) { - - ecmd->supported = (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_Autoneg | - SUPPORTED_TP); - if (hw->phy.type == e1000_phy_ife) - ecmd->supported &= ~SUPPORTED_1000baseT_Full; - ecmd->advertising = ADVERTISED_TP; - - if (hw->mac.autoneg == 1) { - ecmd->advertising |= ADVERTISED_Autoneg; - /* the e1000 autoneg seems to match ethtool nicely */ - ecmd->advertising |= hw->phy.autoneg_advertised; - } - - ecmd->port = PORT_TP; - ecmd->phy_address = hw->phy.addr; - ecmd->transceiver = XCVR_INTERNAL; - - } else { - ecmd->supported = (SUPPORTED_1000baseT_Full | - SUPPORTED_FIBRE | - SUPPORTED_Autoneg); - - ecmd->advertising = (ADVERTISED_1000baseT_Full | - ADVERTISED_FIBRE | - ADVERTISED_Autoneg); - - ecmd->port = PORT_FIBRE; - ecmd->transceiver = XCVR_EXTERNAL; - } - - speed = -1; - ecmd->duplex = -1; - - if (netif_running(netdev)) { - if (netif_carrier_ok(netdev)) { - speed = adapter->link_speed; - ecmd->duplex = adapter->link_duplex - 1; - } - } else { - u32 status = er32(STATUS); - if (status & E1000_STATUS_LU) { - if (status & E1000_STATUS_SPEED_1000) - speed = SPEED_1000; - else if (status & E1000_STATUS_SPEED_100) - speed = SPEED_100; - else - speed = SPEED_10; - - if (status & E1000_STATUS_FD) - ecmd->duplex = DUPLEX_FULL; - else - ecmd->duplex = DUPLEX_HALF; - } - } - - ethtool_cmd_speed_set(ecmd, speed); - ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) || - hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; - - /* MDI-X => 2; MDI =>1; Invalid =>0 */ - if ((hw->phy.media_type == e1000_media_type_copper) && - netif_carrier_ok(netdev)) - ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : - ETH_TP_MDI; - else - ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; - - return 0; -} - -static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) -{ - struct e1000_mac_info *mac = &adapter->hw.mac; - - mac->autoneg = 0; - - /* Make sure dplx is at most 1 bit and lsb of speed is not set - * for the switch() below to work */ - if ((spd & 1) || (dplx & ~1)) - goto err_inval; - - /* Fiber NICs only allow 1000 gbps Full duplex */ - if ((adapter->hw.phy.media_type == e1000_media_type_fiber) && - spd != SPEED_1000 && - dplx != DUPLEX_FULL) { - goto err_inval; - } - - switch (spd + dplx) { - case SPEED_10 + DUPLEX_HALF: - mac->forced_speed_duplex = ADVERTISE_10_HALF; - break; - case SPEED_10 + DUPLEX_FULL: - mac->forced_speed_duplex = ADVERTISE_10_FULL; - break; - case SPEED_100 + DUPLEX_HALF: - mac->forced_speed_duplex = ADVERTISE_100_HALF; - break; - case SPEED_100 + DUPLEX_FULL: - mac->forced_speed_duplex = ADVERTISE_100_FULL; - break; - case SPEED_1000 + DUPLEX_FULL: - mac->autoneg = 1; - adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; - break; - case SPEED_1000 + DUPLEX_HALF: /* not supported */ - default: - goto err_inval; - } - return 0; - -err_inval: - e_err("Unsupported Speed/Duplex configuration\n"); - return -EINVAL; -} - -static int e1000_set_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - - /* - * When SoL/IDER sessions are active, autoneg/speed/duplex - * cannot be changed - */ - if (e1000_check_reset_block(hw)) { - e_err("Cannot change link characteristics when SoL/IDER is " - "active.\n"); - return -EINVAL; - } - - while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) - usleep_range(1000, 2000); - - if (ecmd->autoneg == AUTONEG_ENABLE) { - hw->mac.autoneg = 1; - if (hw->phy.media_type == e1000_media_type_fiber) - hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full | - ADVERTISED_FIBRE | - ADVERTISED_Autoneg; - else - hw->phy.autoneg_advertised = ecmd->advertising | - ADVERTISED_TP | - ADVERTISED_Autoneg; - ecmd->advertising = hw->phy.autoneg_advertised; - if (adapter->fc_autoneg) - hw->fc.requested_mode = e1000_fc_default; - } else { - u32 speed = ethtool_cmd_speed(ecmd); - if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) { - clear_bit(__E1000_RESETTING, &adapter->state); - return -EINVAL; - } - } - - /* reset the link */ - - if (netif_running(adapter->netdev)) { - e1000e_down(adapter); - e1000e_up(adapter); - } else { - e1000e_reset(adapter); - } - - clear_bit(__E1000_RESETTING, &adapter->state); - return 0; -} - -static void e1000_get_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - - pause->autoneg = - (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); - - if (hw->fc.current_mode == e1000_fc_rx_pause) { - pause->rx_pause = 1; - } else if (hw->fc.current_mode == e1000_fc_tx_pause) { - pause->tx_pause = 1; - } else if (hw->fc.current_mode == e1000_fc_full) { - pause->rx_pause = 1; - pause->tx_pause = 1; - } -} - -static int e1000_set_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - int retval = 0; - - adapter->fc_autoneg = pause->autoneg; - - while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) - usleep_range(1000, 2000); - - if (adapter->fc_autoneg == AUTONEG_ENABLE) { - hw->fc.requested_mode = e1000_fc_default; - if (netif_running(adapter->netdev)) { - e1000e_down(adapter); - e1000e_up(adapter); - } else { - e1000e_reset(adapter); - } - } else { - if (pause->rx_pause && pause->tx_pause) - hw->fc.requested_mode = e1000_fc_full; - else if (pause->rx_pause && !pause->tx_pause) - hw->fc.requested_mode = e1000_fc_rx_pause; - else if (!pause->rx_pause && pause->tx_pause) - hw->fc.requested_mode = e1000_fc_tx_pause; - else if (!pause->rx_pause && !pause->tx_pause) - hw->fc.requested_mode = e1000_fc_none; - - hw->fc.current_mode = hw->fc.requested_mode; - - if (hw->phy.media_type == e1000_media_type_fiber) { - retval = hw->mac.ops.setup_link(hw); - /* implicit goto out */ - } else { - retval = e1000e_force_mac_fc(hw); - if (retval) - goto out; - e1000e_set_fc_watermarks(hw); - } - } - -out: - clear_bit(__E1000_RESETTING, &adapter->state); - return retval; -} - -static u32 e1000_get_rx_csum(struct net_device *netdev) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - return adapter->flags & FLAG_RX_CSUM_ENABLED; -} - -static int e1000_set_rx_csum(struct net_device *netdev, u32 data) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - - if (data) - adapter->flags |= FLAG_RX_CSUM_ENABLED; - else - adapter->flags &= ~FLAG_RX_CSUM_ENABLED; - - if (netif_running(netdev)) - e1000e_reinit_locked(adapter); - else - e1000e_reset(adapter); - return 0; -} - -static u32 e1000_get_tx_csum(struct net_device *netdev) -{ - return (netdev->features & NETIF_F_HW_CSUM) != 0; -} - -static int e1000_set_tx_csum(struct net_device *netdev, u32 data) -{ - if (data) - netdev->features |= NETIF_F_HW_CSUM; - else - netdev->features &= ~NETIF_F_HW_CSUM; - - return 0; -} - -static int e1000_set_tso(struct net_device *netdev, u32 data) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - - if (data) { - netdev->features |= NETIF_F_TSO; - netdev->features |= NETIF_F_TSO6; - } else { - netdev->features &= ~NETIF_F_TSO; - netdev->features &= ~NETIF_F_TSO6; - } - - adapter->flags |= FLAG_TSO_FORCE; - return 0; -} - -static u32 e1000_get_msglevel(struct net_device *netdev) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - return adapter->msg_enable; -} - -static void e1000_set_msglevel(struct net_device *netdev, u32 data) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - adapter->msg_enable = data; -} - -static int e1000_get_regs_len(struct net_device *netdev) -{ -#define E1000_REGS_LEN 32 /* overestimate */ - return E1000_REGS_LEN * sizeof(u32); -} - -static void e1000_get_regs(struct net_device *netdev, - struct ethtool_regs *regs, void *p) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u32 *regs_buff = p; - u16 phy_data; - - memset(p, 0, E1000_REGS_LEN * sizeof(u32)); - - regs->version = (1 << 24) | (adapter->pdev->revision << 16) | - adapter->pdev->device; - - regs_buff[0] = er32(CTRL); - regs_buff[1] = er32(STATUS); - - regs_buff[2] = er32(RCTL); - regs_buff[3] = er32(RDLEN); - regs_buff[4] = er32(RDH); - regs_buff[5] = er32(RDT); - regs_buff[6] = er32(RDTR); - - regs_buff[7] = er32(TCTL); - regs_buff[8] = er32(TDLEN); - regs_buff[9] = er32(TDH); - regs_buff[10] = er32(TDT); - regs_buff[11] = er32(TIDV); - - regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */ - - /* ethtool doesn't use anything past this point, so all this - * code is likely legacy junk for apps that may or may not - * exist */ - if (hw->phy.type == e1000_phy_m88) { - e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); - regs_buff[13] = (u32)phy_data; /* cable length */ - regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ - regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ - regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ - e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); - regs_buff[17] = (u32)phy_data; /* extended 10bt distance */ - regs_buff[18] = regs_buff[13]; /* cable polarity */ - regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ - regs_buff[20] = regs_buff[17]; /* polarity correction */ - /* phy receive errors */ - regs_buff[22] = adapter->phy_stats.receive_errors; - regs_buff[23] = regs_buff[13]; /* mdix mode */ - } - regs_buff[21] = 0; /* was idle_errors */ - e1e_rphy(hw, PHY_1000T_STATUS, &phy_data); - regs_buff[24] = (u32)phy_data; /* phy local receiver status */ - regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ -} - -static int e1000_get_eeprom_len(struct net_device *netdev) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - return adapter->hw.nvm.word_size * 2; -} - -static int e1000_get_eeprom(struct net_device *netdev, - struct ethtool_eeprom *eeprom, u8 *bytes) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u16 *eeprom_buff; - int first_word; - int last_word; - int ret_val = 0; - u16 i; - - if (eeprom->len == 0) - return -EINVAL; - - eeprom->magic = adapter->pdev->vendor | (adapter->pdev->device << 16); - - first_word = eeprom->offset >> 1; - last_word = (eeprom->offset + eeprom->len - 1) >> 1; - - eeprom_buff = kmalloc(sizeof(u16) * - (last_word - first_word + 1), GFP_KERNEL); - if (!eeprom_buff) - return -ENOMEM; - - if (hw->nvm.type == e1000_nvm_eeprom_spi) { - ret_val = e1000_read_nvm(hw, first_word, - last_word - first_word + 1, - eeprom_buff); - } else { - for (i = 0; i < last_word - first_word + 1; i++) { - ret_val = e1000_read_nvm(hw, first_word + i, 1, - &eeprom_buff[i]); - if (ret_val) - break; - } - } - - if (ret_val) { - /* a read error occurred, throw away the result */ - memset(eeprom_buff, 0xff, sizeof(u16) * - (last_word - first_word + 1)); - } else { - /* Device's eeprom is always little-endian, word addressable */ - for (i = 0; i < last_word - first_word + 1; i++) - le16_to_cpus(&eeprom_buff[i]); - } - - memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); - kfree(eeprom_buff); - - return ret_val; -} - -static int e1000_set_eeprom(struct net_device *netdev, - struct ethtool_eeprom *eeprom, u8 *bytes) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u16 *eeprom_buff; - void *ptr; - int max_len; - int first_word; - int last_word; - int ret_val = 0; - u16 i; - - if (eeprom->len == 0) - return -EOPNOTSUPP; - - if (eeprom->magic != (adapter->pdev->vendor | (adapter->pdev->device << 16))) - return -EFAULT; - - if (adapter->flags & FLAG_READ_ONLY_NVM) - return -EINVAL; - - max_len = hw->nvm.word_size * 2; - - first_word = eeprom->offset >> 1; - last_word = (eeprom->offset + eeprom->len - 1) >> 1; - eeprom_buff = kmalloc(max_len, GFP_KERNEL); - if (!eeprom_buff) - return -ENOMEM; - - ptr = (void *)eeprom_buff; - - if (eeprom->offset & 1) { - /* need read/modify/write of first changed EEPROM word */ - /* only the second byte of the word is being modified */ - ret_val = e1000_read_nvm(hw, first_word, 1, &eeprom_buff[0]); - ptr++; - } - if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) - /* need read/modify/write of last changed EEPROM word */ - /* only the first byte of the word is being modified */ - ret_val = e1000_read_nvm(hw, last_word, 1, - &eeprom_buff[last_word - first_word]); - - if (ret_val) - goto out; - - /* Device's eeprom is always little-endian, word addressable */ - for (i = 0; i < last_word - first_word + 1; i++) - le16_to_cpus(&eeprom_buff[i]); - - memcpy(ptr, bytes, eeprom->len); - - for (i = 0; i < last_word - first_word + 1; i++) - eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); - - ret_val = e1000_write_nvm(hw, first_word, - last_word - first_word + 1, eeprom_buff); - - if (ret_val) - goto out; - - /* - * Update the checksum over the first part of the EEPROM if needed - * and flush shadow RAM for applicable controllers - */ - if ((first_word <= NVM_CHECKSUM_REG) || - (hw->mac.type == e1000_82583) || - (hw->mac.type == e1000_82574) || - (hw->mac.type == e1000_82573)) - ret_val = e1000e_update_nvm_checksum(hw); - -out: - kfree(eeprom_buff); - return ret_val; -} - -static void e1000_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - char firmware_version[32]; - - strncpy(drvinfo->driver, e1000e_driver_name, - sizeof(drvinfo->driver) - 1); - strncpy(drvinfo->version, e1000e_driver_version, - sizeof(drvinfo->version) - 1); - - /* - * EEPROM image version # is reported as firmware version # for - * PCI-E controllers - */ - snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d", - (adapter->eeprom_vers & 0xF000) >> 12, - (adapter->eeprom_vers & 0x0FF0) >> 4, - (adapter->eeprom_vers & 0x000F)); - - strncpy(drvinfo->fw_version, firmware_version, - sizeof(drvinfo->fw_version) - 1); - strncpy(drvinfo->bus_info, pci_name(adapter->pdev), - sizeof(drvinfo->bus_info) - 1); - drvinfo->regdump_len = e1000_get_regs_len(netdev); - drvinfo->eedump_len = e1000_get_eeprom_len(netdev); -} - -static void e1000_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_ring *tx_ring = adapter->tx_ring; - struct e1000_ring *rx_ring = adapter->rx_ring; - - ring->rx_max_pending = E1000_MAX_RXD; - ring->tx_max_pending = E1000_MAX_TXD; - ring->rx_mini_max_pending = 0; - ring->rx_jumbo_max_pending = 0; - ring->rx_pending = rx_ring->count; - ring->tx_pending = tx_ring->count; - ring->rx_mini_pending = 0; - ring->rx_jumbo_pending = 0; -} - -static int e1000_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_ring *tx_ring, *tx_old; - struct e1000_ring *rx_ring, *rx_old; - int err; - - if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) - return -EINVAL; - - while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) - usleep_range(1000, 2000); - - if (netif_running(adapter->netdev)) - e1000e_down(adapter); - - tx_old = adapter->tx_ring; - rx_old = adapter->rx_ring; - - err = -ENOMEM; - tx_ring = kmemdup(tx_old, sizeof(struct e1000_ring), GFP_KERNEL); - if (!tx_ring) - goto err_alloc_tx; - - rx_ring = kmemdup(rx_old, sizeof(struct e1000_ring), GFP_KERNEL); - if (!rx_ring) - goto err_alloc_rx; - - adapter->tx_ring = tx_ring; - adapter->rx_ring = rx_ring; - - rx_ring->count = max(ring->rx_pending, (u32)E1000_MIN_RXD); - rx_ring->count = min(rx_ring->count, (u32)(E1000_MAX_RXD)); - rx_ring->count = ALIGN(rx_ring->count, REQ_RX_DESCRIPTOR_MULTIPLE); - - tx_ring->count = max(ring->tx_pending, (u32)E1000_MIN_TXD); - tx_ring->count = min(tx_ring->count, (u32)(E1000_MAX_TXD)); - tx_ring->count = ALIGN(tx_ring->count, REQ_TX_DESCRIPTOR_MULTIPLE); - - if (netif_running(adapter->netdev)) { - /* Try to get new resources before deleting old */ - err = e1000e_setup_rx_resources(adapter); - if (err) - goto err_setup_rx; - err = e1000e_setup_tx_resources(adapter); - if (err) - goto err_setup_tx; - - /* - * restore the old in order to free it, - * then add in the new - */ - adapter->rx_ring = rx_old; - adapter->tx_ring = tx_old; - e1000e_free_rx_resources(adapter); - e1000e_free_tx_resources(adapter); - kfree(tx_old); - kfree(rx_old); - adapter->rx_ring = rx_ring; - adapter->tx_ring = tx_ring; - err = e1000e_up(adapter); - if (err) - goto err_setup; - } - - clear_bit(__E1000_RESETTING, &adapter->state); - return 0; -err_setup_tx: - e1000e_free_rx_resources(adapter); -err_setup_rx: - adapter->rx_ring = rx_old; - adapter->tx_ring = tx_old; - kfree(rx_ring); -err_alloc_rx: - kfree(tx_ring); -err_alloc_tx: - e1000e_up(adapter); -err_setup: - clear_bit(__E1000_RESETTING, &adapter->state); - return err; -} - -static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, - int reg, int offset, u32 mask, u32 write) -{ - u32 pat, val; - static const u32 test[] = { - 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; - for (pat = 0; pat < ARRAY_SIZE(test); pat++) { - E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset, - (test[pat] & write)); - val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset); - if (val != (test[pat] & write & mask)) { - e_err("pattern test reg %04X failed: got 0x%08X " - "expected 0x%08X\n", reg + offset, val, - (test[pat] & write & mask)); - *data = reg; - return 1; - } - } - return 0; -} - -static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, - int reg, u32 mask, u32 write) -{ - u32 val; - __ew32(&adapter->hw, reg, write & mask); - val = __er32(&adapter->hw, reg); - if ((write & mask) != (val & mask)) { - e_err("set/check reg %04X test failed: got 0x%08X " - "expected 0x%08X\n", reg, (val & mask), (write & mask)); - *data = reg; - return 1; - } - return 0; -} -#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \ - do { \ - if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \ - return 1; \ - } while (0) -#define REG_PATTERN_TEST(reg, mask, write) \ - REG_PATTERN_TEST_ARRAY(reg, 0, mask, write) - -#define REG_SET_AND_CHECK(reg, mask, write) \ - do { \ - if (reg_set_and_check(adapter, data, reg, mask, write)) \ - return 1; \ - } while (0) - -static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) -{ - struct e1000_hw *hw = &adapter->hw; - struct e1000_mac_info *mac = &adapter->hw.mac; - u32 value; - u32 before; - u32 after; - u32 i; - u32 toggle; - u32 mask; - - /* - * The status register is Read Only, so a write should fail. - * Some bits that get toggled are ignored. - */ - switch (mac->type) { - /* there are several bits on newer hardware that are r/w */ - case e1000_82571: - case e1000_82572: - case e1000_80003es2lan: - toggle = 0x7FFFF3FF; - break; - default: - toggle = 0x7FFFF033; - break; - } - - before = er32(STATUS); - value = (er32(STATUS) & toggle); - ew32(STATUS, toggle); - after = er32(STATUS) & toggle; - if (value != after) { - e_err("failed STATUS register test got: 0x%08X expected: " - "0x%08X\n", after, value); - *data = 1; - return 1; - } - /* restore previous status */ - ew32(STATUS, before); - - if (!(adapter->flags & FLAG_IS_ICH)) { - REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF); - REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF); - REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF); - REG_PATTERN_TEST(E1000_VET, 0x0000FFFF, 0xFFFFFFFF); - } - - REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF); - REG_PATTERN_TEST(E1000_RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); - REG_PATTERN_TEST(E1000_RDLEN, 0x000FFF80, 0x000FFFFF); - REG_PATTERN_TEST(E1000_RDH, 0x0000FFFF, 0x0000FFFF); - REG_PATTERN_TEST(E1000_RDT, 0x0000FFFF, 0x0000FFFF); - REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8); - REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF); - REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF); - REG_PATTERN_TEST(E1000_TDBAH, 0xFFFFFFFF, 0xFFFFFFFF); - REG_PATTERN_TEST(E1000_TDLEN, 0x000FFF80, 0x000FFFFF); - - REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000); - - before = ((adapter->flags & FLAG_IS_ICH) ? 0x06C3B33E : 0x06DFB3FE); - REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB); - REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000); - - REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF); - REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); - if (!(adapter->flags & FLAG_IS_ICH)) - REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF); - REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); - REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF); - mask = 0x8003FFFF; - switch (mac->type) { - case e1000_ich10lan: - case e1000_pchlan: - case e1000_pch2lan: - mask |= (1 << 18); - break; - default: - break; - } - for (i = 0; i < mac->rar_entry_count; i++) - REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), - mask, 0xFFFFFFFF); - - for (i = 0; i < mac->mta_reg_count; i++) - REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF); - - *data = 0; - return 0; -} - -static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) -{ - u16 temp; - u16 checksum = 0; - u16 i; - - *data = 0; - /* Read and add up the contents of the EEPROM */ - for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { - if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) { - *data = 1; - return *data; - } - checksum += temp; - } - - /* If Checksum is not Correct return error else test passed */ - if ((checksum != (u16) NVM_SUM) && !(*data)) - *data = 2; - - return *data; -} - -static irqreturn_t e1000_test_intr(int irq, void *data) -{ - struct net_device *netdev = (struct net_device *) data; - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - - adapter->test_icr |= er32(ICR); - - return IRQ_HANDLED; -} - -static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) -{ - struct net_device *netdev = adapter->netdev; - struct e1000_hw *hw = &adapter->hw; - u32 mask; - u32 shared_int = 1; - u32 irq = adapter->pdev->irq; - int i; - int ret_val = 0; - int int_mode = E1000E_INT_MODE_LEGACY; - - *data = 0; - - /* NOTE: we don't test MSI/MSI-X interrupts here, yet */ - if (adapter->int_mode == E1000E_INT_MODE_MSIX) { - int_mode = adapter->int_mode; - e1000e_reset_interrupt_capability(adapter); - adapter->int_mode = E1000E_INT_MODE_LEGACY; - e1000e_set_interrupt_capability(adapter); - } - /* Hook up test interrupt handler just for this test */ - if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, - netdev)) { - shared_int = 0; - } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, - netdev->name, netdev)) { - *data = 1; - ret_val = -1; - goto out; - } - e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared")); - - /* Disable all the interrupts */ - ew32(IMC, 0xFFFFFFFF); - e1e_flush(); - usleep_range(10000, 20000); - - /* Test each interrupt */ - for (i = 0; i < 10; i++) { - /* Interrupt to test */ - mask = 1 << i; - - if (adapter->flags & FLAG_IS_ICH) { - switch (mask) { - case E1000_ICR_RXSEQ: - continue; - case 0x00000100: - if (adapter->hw.mac.type == e1000_ich8lan || - adapter->hw.mac.type == e1000_ich9lan) - continue; - break; - default: - break; - } - } - - if (!shared_int) { - /* - * Disable the interrupt to be reported in - * the cause register and then force the same - * interrupt and see if one gets posted. If - * an interrupt was posted to the bus, the - * test failed. - */ - adapter->test_icr = 0; - ew32(IMC, mask); - ew32(ICS, mask); - e1e_flush(); - usleep_range(10000, 20000); - - if (adapter->test_icr & mask) { - *data = 3; - break; - } - } - - /* - * Enable the interrupt to be reported in - * the cause register and then force the same - * interrupt and see if one gets posted. If - * an interrupt was not posted to the bus, the - * test failed. - */ - adapter->test_icr = 0; - ew32(IMS, mask); - ew32(ICS, mask); - e1e_flush(); - usleep_range(10000, 20000); - - if (!(adapter->test_icr & mask)) { - *data = 4; - break; - } - - if (!shared_int) { - /* - * Disable the other interrupts to be reported in - * the cause register and then force the other - * interrupts and see if any get posted. If - * an interrupt was posted to the bus, the - * test failed. - */ - adapter->test_icr = 0; - ew32(IMC, ~mask & 0x00007FFF); - ew32(ICS, ~mask & 0x00007FFF); - e1e_flush(); - usleep_range(10000, 20000); - - if (adapter->test_icr) { - *data = 5; - break; - } - } - } - - /* Disable all the interrupts */ - ew32(IMC, 0xFFFFFFFF); - e1e_flush(); - usleep_range(10000, 20000); - - /* Unhook test interrupt handler */ - free_irq(irq, netdev); - -out: - if (int_mode == E1000E_INT_MODE_MSIX) { - e1000e_reset_interrupt_capability(adapter); - adapter->int_mode = int_mode; - e1000e_set_interrupt_capability(adapter); - } - - return ret_val; -} - -static void e1000_free_desc_rings(struct e1000_adapter *adapter) -{ - struct e1000_ring *tx_ring = &adapter->test_tx_ring; - struct e1000_ring *rx_ring = &adapter->test_rx_ring; - struct pci_dev *pdev = adapter->pdev; - int i; - - if (tx_ring->desc && tx_ring->buffer_info) { - for (i = 0; i < tx_ring->count; i++) { - if (tx_ring->buffer_info[i].dma) - dma_unmap_single(&pdev->dev, - tx_ring->buffer_info[i].dma, - tx_ring->buffer_info[i].length, - DMA_TO_DEVICE); - if (tx_ring->buffer_info[i].skb) - dev_kfree_skb(tx_ring->buffer_info[i].skb); - } - } - - if (rx_ring->desc && rx_ring->buffer_info) { - for (i = 0; i < rx_ring->count; i++) { - if (rx_ring->buffer_info[i].dma) - dma_unmap_single(&pdev->dev, - rx_ring->buffer_info[i].dma, - 2048, DMA_FROM_DEVICE); - if (rx_ring->buffer_info[i].skb) - dev_kfree_skb(rx_ring->buffer_info[i].skb); - } - } - - if (tx_ring->desc) { - dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, - tx_ring->dma); - tx_ring->desc = NULL; - } - if (rx_ring->desc) { - dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, - rx_ring->dma); - rx_ring->desc = NULL; - } - - kfree(tx_ring->buffer_info); - tx_ring->buffer_info = NULL; - kfree(rx_ring->buffer_info); - rx_ring->buffer_info = NULL; -} - -static int e1000_setup_desc_rings(struct e1000_adapter *adapter) -{ - struct e1000_ring *tx_ring = &adapter->test_tx_ring; - struct e1000_ring *rx_ring = &adapter->test_rx_ring; - struct pci_dev *pdev = adapter->pdev; - struct e1000_hw *hw = &adapter->hw; - u32 rctl; - int i; - int ret_val; - - /* Setup Tx descriptor ring and Tx buffers */ - - if (!tx_ring->count) - tx_ring->count = E1000_DEFAULT_TXD; - - tx_ring->buffer_info = kcalloc(tx_ring->count, - sizeof(struct e1000_buffer), - GFP_KERNEL); - if (!(tx_ring->buffer_info)) { - ret_val = 1; - goto err_nomem; - } - - tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); - tx_ring->size = ALIGN(tx_ring->size, 4096); - tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, - &tx_ring->dma, GFP_KERNEL); - if (!tx_ring->desc) { - ret_val = 2; - goto err_nomem; - } - tx_ring->next_to_use = 0; - tx_ring->next_to_clean = 0; - - ew32(TDBAL, ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); - ew32(TDBAH, ((u64) tx_ring->dma >> 32)); - ew32(TDLEN, tx_ring->count * sizeof(struct e1000_tx_desc)); - ew32(TDH, 0); - ew32(TDT, 0); - ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR | - E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | - E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT); - - for (i = 0; i < tx_ring->count; i++) { - struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); - struct sk_buff *skb; - unsigned int skb_size = 1024; - - skb = alloc_skb(skb_size, GFP_KERNEL); - if (!skb) { - ret_val = 3; - goto err_nomem; - } - skb_put(skb, skb_size); - tx_ring->buffer_info[i].skb = skb; - tx_ring->buffer_info[i].length = skb->len; - tx_ring->buffer_info[i].dma = - dma_map_single(&pdev->dev, skb->data, skb->len, - DMA_TO_DEVICE); - if (dma_mapping_error(&pdev->dev, - tx_ring->buffer_info[i].dma)) { - ret_val = 4; - goto err_nomem; - } - tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma); - tx_desc->lower.data = cpu_to_le32(skb->len); - tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | - E1000_TXD_CMD_IFCS | - E1000_TXD_CMD_RS); - tx_desc->upper.data = 0; - } - - /* Setup Rx descriptor ring and Rx buffers */ - - if (!rx_ring->count) - rx_ring->count = E1000_DEFAULT_RXD; - - rx_ring->buffer_info = kcalloc(rx_ring->count, - sizeof(struct e1000_buffer), - GFP_KERNEL); - if (!(rx_ring->buffer_info)) { - ret_val = 5; - goto err_nomem; - } - - rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc); - rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, - &rx_ring->dma, GFP_KERNEL); - if (!rx_ring->desc) { - ret_val = 6; - goto err_nomem; - } - rx_ring->next_to_use = 0; - rx_ring->next_to_clean = 0; - - rctl = er32(RCTL); - ew32(RCTL, rctl & ~E1000_RCTL_EN); - ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF)); - ew32(RDBAH, ((u64) rx_ring->dma >> 32)); - ew32(RDLEN, rx_ring->size); - ew32(RDH, 0); - ew32(RDT, 0); - rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | - E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE | - E1000_RCTL_SBP | E1000_RCTL_SECRC | - E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | - (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); - ew32(RCTL, rctl); - - for (i = 0; i < rx_ring->count; i++) { - struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); - struct sk_buff *skb; - - skb = alloc_skb(2048 + NET_IP_ALIGN, GFP_KERNEL); - if (!skb) { - ret_val = 7; - goto err_nomem; - } - skb_reserve(skb, NET_IP_ALIGN); - rx_ring->buffer_info[i].skb = skb; - rx_ring->buffer_info[i].dma = - dma_map_single(&pdev->dev, skb->data, 2048, - DMA_FROM_DEVICE); - if (dma_mapping_error(&pdev->dev, - rx_ring->buffer_info[i].dma)) { - ret_val = 8; - goto err_nomem; - } - rx_desc->buffer_addr = - cpu_to_le64(rx_ring->buffer_info[i].dma); - memset(skb->data, 0x00, skb->len); - } - - return 0; - -err_nomem: - e1000_free_desc_rings(adapter); - return ret_val; -} - -static void e1000_phy_disable_receiver(struct e1000_adapter *adapter) -{ - /* Write out to PHY registers 29 and 30 to disable the Receiver. */ - e1e_wphy(&adapter->hw, 29, 0x001F); - e1e_wphy(&adapter->hw, 30, 0x8FFC); - e1e_wphy(&adapter->hw, 29, 0x001A); - e1e_wphy(&adapter->hw, 30, 0x8FF0); -} - -static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 ctrl_reg = 0; - u16 phy_reg = 0; - s32 ret_val = 0; - - hw->mac.autoneg = 0; - - if (hw->phy.type == e1000_phy_ife) { - /* force 100, set loopback */ - e1e_wphy(hw, PHY_CONTROL, 0x6100); - - /* Now set up the MAC to the same speed/duplex as the PHY. */ - ctrl_reg = er32(CTRL); - ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ - ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ - E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ - E1000_CTRL_SPD_100 |/* Force Speed to 100 */ - E1000_CTRL_FD); /* Force Duplex to FULL */ - - ew32(CTRL, ctrl_reg); - e1e_flush(); - udelay(500); - - return 0; - } - - /* Specific PHY configuration for loopback */ - switch (hw->phy.type) { - case e1000_phy_m88: - /* Auto-MDI/MDIX Off */ - e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); - /* reset to update Auto-MDI/MDIX */ - e1e_wphy(hw, PHY_CONTROL, 0x9140); - /* autoneg off */ - e1e_wphy(hw, PHY_CONTROL, 0x8140); - break; - case e1000_phy_gg82563: - e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC); - break; - case e1000_phy_bm: - /* Set Default MAC Interface speed to 1GB */ - e1e_rphy(hw, PHY_REG(2, 21), &phy_reg); - phy_reg &= ~0x0007; - phy_reg |= 0x006; - e1e_wphy(hw, PHY_REG(2, 21), phy_reg); - /* Assert SW reset for above settings to take effect */ - e1000e_commit_phy(hw); - mdelay(1); - /* Force Full Duplex */ - e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); - e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C); - /* Set Link Up (in force link) */ - e1e_rphy(hw, PHY_REG(776, 16), &phy_reg); - e1e_wphy(hw, PHY_REG(776, 16), phy_reg | 0x0040); - /* Force Link */ - e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); - e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x0040); - /* Set Early Link Enable */ - e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); - e1e_wphy(hw, PHY_REG(769, 20), phy_reg | 0x0400); - break; - case e1000_phy_82577: - case e1000_phy_82578: - /* Workaround: K1 must be disabled for stable 1Gbps operation */ - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) { - e_err("Cannot setup 1Gbps loopback.\n"); - return ret_val; - } - e1000_configure_k1_ich8lan(hw, false); - hw->phy.ops.release(hw); - break; - case e1000_phy_82579: - /* Disable PHY energy detect power down */ - e1e_rphy(hw, PHY_REG(0, 21), &phy_reg); - e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~(1 << 3)); - /* Disable full chip energy detect */ - e1e_rphy(hw, PHY_REG(776, 18), &phy_reg); - e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1); - /* Enable loopback on the PHY */ -#define I82577_PHY_LBK_CTRL 19 - e1e_wphy(hw, I82577_PHY_LBK_CTRL, 0x8001); - break; - default: - break; - } - - /* force 1000, set loopback */ - e1e_wphy(hw, PHY_CONTROL, 0x4140); - mdelay(250); - - /* Now set up the MAC to the same speed/duplex as the PHY. */ - ctrl_reg = er32(CTRL); - ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ - ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ - E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ - E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ - E1000_CTRL_FD); /* Force Duplex to FULL */ - - if (adapter->flags & FLAG_IS_ICH) - ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */ - - if (hw->phy.media_type == e1000_media_type_copper && - hw->phy.type == e1000_phy_m88) { - ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ - } else { - /* - * Set the ILOS bit on the fiber Nic if half duplex link is - * detected. - */ - if ((er32(STATUS) & E1000_STATUS_FD) == 0) - ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); - } - - ew32(CTRL, ctrl_reg); - - /* - * Disable the receiver on the PHY so when a cable is plugged in, the - * PHY does not begin to autoneg when a cable is reconnected to the NIC. - */ - if (hw->phy.type == e1000_phy_m88) - e1000_phy_disable_receiver(adapter); - - udelay(500); - - return 0; -} - -static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 ctrl = er32(CTRL); - int link = 0; - - /* special requirements for 82571/82572 fiber adapters */ - - /* - * jump through hoops to make sure link is up because serdes - * link is hardwired up - */ - ctrl |= E1000_CTRL_SLU; - ew32(CTRL, ctrl); - - /* disable autoneg */ - ctrl = er32(TXCW); - ctrl &= ~(1 << 31); - ew32(TXCW, ctrl); - - link = (er32(STATUS) & E1000_STATUS_LU); - - if (!link) { - /* set invert loss of signal */ - ctrl = er32(CTRL); - ctrl |= E1000_CTRL_ILOS; - ew32(CTRL, ctrl); - } - - /* - * special write to serdes control register to enable SerDes analog - * loopback - */ -#define E1000_SERDES_LB_ON 0x410 - ew32(SCTL, E1000_SERDES_LB_ON); - e1e_flush(); - usleep_range(10000, 20000); - - return 0; -} - -/* only call this for fiber/serdes connections to es2lan */ -static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 ctrlext = er32(CTRL_EXT); - u32 ctrl = er32(CTRL); - - /* - * save CTRL_EXT to restore later, reuse an empty variable (unused - * on mac_type 80003es2lan) - */ - adapter->tx_fifo_head = ctrlext; - - /* clear the serdes mode bits, putting the device into mac loopback */ - ctrlext &= ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; - ew32(CTRL_EXT, ctrlext); - - /* force speed to 1000/FD, link up */ - ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); - ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | - E1000_CTRL_SPD_1000 | E1000_CTRL_FD); - ew32(CTRL, ctrl); - - /* set mac loopback */ - ctrl = er32(RCTL); - ctrl |= E1000_RCTL_LBM_MAC; - ew32(RCTL, ctrl); - - /* set testing mode parameters (no need to reset later) */ -#define KMRNCTRLSTA_OPMODE (0x1F << 16) -#define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582 - ew32(KMRNCTRLSTA, - (KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII)); - - return 0; -} - -static int e1000_setup_loopback_test(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 rctl; - - if (hw->phy.media_type == e1000_media_type_fiber || - hw->phy.media_type == e1000_media_type_internal_serdes) { - switch (hw->mac.type) { - case e1000_80003es2lan: - return e1000_set_es2lan_mac_loopback(adapter); - break; - case e1000_82571: - case e1000_82572: - return e1000_set_82571_fiber_loopback(adapter); - break; - default: - rctl = er32(RCTL); - rctl |= E1000_RCTL_LBM_TCVR; - ew32(RCTL, rctl); - return 0; - } - } else if (hw->phy.media_type == e1000_media_type_copper) { - return e1000_integrated_phy_loopback(adapter); - } - - return 7; -} - -static void e1000_loopback_cleanup(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 rctl; - u16 phy_reg; - - rctl = er32(RCTL); - rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); - ew32(RCTL, rctl); - - switch (hw->mac.type) { - case e1000_80003es2lan: - if (hw->phy.media_type == e1000_media_type_fiber || - hw->phy.media_type == e1000_media_type_internal_serdes) { - /* restore CTRL_EXT, stealing space from tx_fifo_head */ - ew32(CTRL_EXT, adapter->tx_fifo_head); - adapter->tx_fifo_head = 0; - } - /* fall through */ - case e1000_82571: - case e1000_82572: - if (hw->phy.media_type == e1000_media_type_fiber || - hw->phy.media_type == e1000_media_type_internal_serdes) { -#define E1000_SERDES_LB_OFF 0x400 - ew32(SCTL, E1000_SERDES_LB_OFF); - e1e_flush(); - usleep_range(10000, 20000); - break; - } - /* Fall Through */ - default: - hw->mac.autoneg = 1; - if (hw->phy.type == e1000_phy_gg82563) - e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x180); - e1e_rphy(hw, PHY_CONTROL, &phy_reg); - if (phy_reg & MII_CR_LOOPBACK) { - phy_reg &= ~MII_CR_LOOPBACK; - e1e_wphy(hw, PHY_CONTROL, phy_reg); - e1000e_commit_phy(hw); - } - break; - } -} - -static void e1000_create_lbtest_frame(struct sk_buff *skb, - unsigned int frame_size) -{ - memset(skb->data, 0xFF, frame_size); - frame_size &= ~1; - memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); - memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); - memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); -} - -static int e1000_check_lbtest_frame(struct sk_buff *skb, - unsigned int frame_size) -{ - frame_size &= ~1; - if (*(skb->data + 3) == 0xFF) - if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && - (*(skb->data + frame_size / 2 + 12) == 0xAF)) - return 0; - return 13; -} - -static int e1000_run_loopback_test(struct e1000_adapter *adapter) -{ - struct e1000_ring *tx_ring = &adapter->test_tx_ring; - struct e1000_ring *rx_ring = &adapter->test_rx_ring; - struct pci_dev *pdev = adapter->pdev; - struct e1000_hw *hw = &adapter->hw; - int i, j, k, l; - int lc; - int good_cnt; - int ret_val = 0; - unsigned long time; - - ew32(RDT, rx_ring->count - 1); - - /* - * Calculate the loop count based on the largest descriptor ring - * The idea is to wrap the largest ring a number of times using 64 - * send/receive pairs during each loop - */ - - if (rx_ring->count <= tx_ring->count) - lc = ((tx_ring->count / 64) * 2) + 1; - else - lc = ((rx_ring->count / 64) * 2) + 1; - - k = 0; - l = 0; - for (j = 0; j <= lc; j++) { /* loop count loop */ - for (i = 0; i < 64; i++) { /* send the packets */ - e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb, - 1024); - dma_sync_single_for_device(&pdev->dev, - tx_ring->buffer_info[k].dma, - tx_ring->buffer_info[k].length, - DMA_TO_DEVICE); - k++; - if (k == tx_ring->count) - k = 0; - } - ew32(TDT, k); - e1e_flush(); - msleep(200); - time = jiffies; /* set the start time for the receive */ - good_cnt = 0; - do { /* receive the sent packets */ - dma_sync_single_for_cpu(&pdev->dev, - rx_ring->buffer_info[l].dma, 2048, - DMA_FROM_DEVICE); - - ret_val = e1000_check_lbtest_frame( - rx_ring->buffer_info[l].skb, 1024); - if (!ret_val) - good_cnt++; - l++; - if (l == rx_ring->count) - l = 0; - /* - * time + 20 msecs (200 msecs on 2.4) is more than - * enough time to complete the receives, if it's - * exceeded, break and error off - */ - } while ((good_cnt < 64) && !time_after(jiffies, time + 20)); - if (good_cnt != 64) { - ret_val = 13; /* ret_val is the same as mis-compare */ - break; - } - if (jiffies >= (time + 20)) { - ret_val = 14; /* error code for time out error */ - break; - } - } /* end loop count loop */ - return ret_val; -} - -static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) -{ - /* - * PHY loopback cannot be performed if SoL/IDER - * sessions are active - */ - if (e1000_check_reset_block(&adapter->hw)) { - e_err("Cannot do PHY loopback test when SoL/IDER is active.\n"); - *data = 0; - goto out; - } - - *data = e1000_setup_desc_rings(adapter); - if (*data) - goto out; - - *data = e1000_setup_loopback_test(adapter); - if (*data) - goto err_loopback; - - *data = e1000_run_loopback_test(adapter); - e1000_loopback_cleanup(adapter); - -err_loopback: - e1000_free_desc_rings(adapter); -out: - return *data; -} - -static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) -{ - struct e1000_hw *hw = &adapter->hw; - - *data = 0; - if (hw->phy.media_type == e1000_media_type_internal_serdes) { - int i = 0; - hw->mac.serdes_has_link = false; - - /* - * On some blade server designs, link establishment - * could take as long as 2-3 minutes - */ - do { - hw->mac.ops.check_for_link(hw); - if (hw->mac.serdes_has_link) - return *data; - msleep(20); - } while (i++ < 3750); - - *data = 1; - } else { - hw->mac.ops.check_for_link(hw); - if (hw->mac.autoneg) - /* - * On some Phy/switch combinations, link establishment - * can take a few seconds more than expected. - */ - msleep(5000); - - if (!(er32(STATUS) & E1000_STATUS_LU)) - *data = 1; - } - return *data; -} - -static int e1000e_get_sset_count(struct net_device *netdev, int sset) -{ - switch (sset) { - case ETH_SS_TEST: - return E1000_TEST_LEN; - case ETH_SS_STATS: - return E1000_STATS_LEN; - default: - return -EOPNOTSUPP; - } -} - -static void e1000_diag_test(struct net_device *netdev, - struct ethtool_test *eth_test, u64 *data) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - u16 autoneg_advertised; - u8 forced_speed_duplex; - u8 autoneg; - bool if_running = netif_running(netdev); - - set_bit(__E1000_TESTING, &adapter->state); - - if (!if_running) { - /* Get control of and reset hardware */ - if (adapter->flags & FLAG_HAS_AMT) - e1000e_get_hw_control(adapter); - - e1000e_power_up_phy(adapter); - - adapter->hw.phy.autoneg_wait_to_complete = 1; - e1000e_reset(adapter); - adapter->hw.phy.autoneg_wait_to_complete = 0; - } - - if (eth_test->flags == ETH_TEST_FL_OFFLINE) { - /* Offline tests */ - - /* save speed, duplex, autoneg settings */ - autoneg_advertised = adapter->hw.phy.autoneg_advertised; - forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; - autoneg = adapter->hw.mac.autoneg; - - e_info("offline testing starting\n"); - - if (if_running) - /* indicate we're in test mode */ - dev_close(netdev); - - if (e1000_reg_test(adapter, &data[0])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - e1000e_reset(adapter); - if (e1000_eeprom_test(adapter, &data[1])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - e1000e_reset(adapter); - if (e1000_intr_test(adapter, &data[2])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - e1000e_reset(adapter); - if (e1000_loopback_test(adapter, &data[3])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - /* force this routine to wait until autoneg complete/timeout */ - adapter->hw.phy.autoneg_wait_to_complete = 1; - e1000e_reset(adapter); - adapter->hw.phy.autoneg_wait_to_complete = 0; - - if (e1000_link_test(adapter, &data[4])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - /* restore speed, duplex, autoneg settings */ - adapter->hw.phy.autoneg_advertised = autoneg_advertised; - adapter->hw.mac.forced_speed_duplex = forced_speed_duplex; - adapter->hw.mac.autoneg = autoneg; - e1000e_reset(adapter); - - clear_bit(__E1000_TESTING, &adapter->state); - if (if_running) - dev_open(netdev); - } else { - /* Online tests */ - - e_info("online testing starting\n"); - - /* register, eeprom, intr and loopback tests not run online */ - data[0] = 0; - data[1] = 0; - data[2] = 0; - data[3] = 0; - - if (e1000_link_test(adapter, &data[4])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - clear_bit(__E1000_TESTING, &adapter->state); - } - - if (!if_running) { - e1000e_reset(adapter); - - if (adapter->flags & FLAG_HAS_AMT) - e1000e_release_hw_control(adapter); - } - - msleep_interruptible(4 * 1000); -} - -static void e1000_get_wol(struct net_device *netdev, - struct ethtool_wolinfo *wol) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - - wol->supported = 0; - wol->wolopts = 0; - - if (!(adapter->flags & FLAG_HAS_WOL) || - !device_can_wakeup(&adapter->pdev->dev)) - return; - - wol->supported = WAKE_UCAST | WAKE_MCAST | - WAKE_BCAST | WAKE_MAGIC | WAKE_PHY; - - /* apply any specific unsupported masks here */ - if (adapter->flags & FLAG_NO_WAKE_UCAST) { - wol->supported &= ~WAKE_UCAST; - - if (adapter->wol & E1000_WUFC_EX) - e_err("Interface does not support directed (unicast) " - "frame wake-up packets\n"); - } - - if (adapter->wol & E1000_WUFC_EX) - wol->wolopts |= WAKE_UCAST; - if (adapter->wol & E1000_WUFC_MC) - wol->wolopts |= WAKE_MCAST; - if (adapter->wol & E1000_WUFC_BC) - wol->wolopts |= WAKE_BCAST; - if (adapter->wol & E1000_WUFC_MAG) - wol->wolopts |= WAKE_MAGIC; - if (adapter->wol & E1000_WUFC_LNKC) - wol->wolopts |= WAKE_PHY; -} - -static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - - if (!(adapter->flags & FLAG_HAS_WOL) || - !device_can_wakeup(&adapter->pdev->dev) || - (wol->wolopts & ~(WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | - WAKE_MAGIC | WAKE_PHY))) - return -EOPNOTSUPP; - - /* these settings will always override what we currently have */ - adapter->wol = 0; - - if (wol->wolopts & WAKE_UCAST) - adapter->wol |= E1000_WUFC_EX; - if (wol->wolopts & WAKE_MCAST) - adapter->wol |= E1000_WUFC_MC; - if (wol->wolopts & WAKE_BCAST) - adapter->wol |= E1000_WUFC_BC; - if (wol->wolopts & WAKE_MAGIC) - adapter->wol |= E1000_WUFC_MAG; - if (wol->wolopts & WAKE_PHY) - adapter->wol |= E1000_WUFC_LNKC; - - device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); - - return 0; -} - -static int e1000_set_phys_id(struct net_device *netdev, - enum ethtool_phys_id_state state) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - - switch (state) { - case ETHTOOL_ID_ACTIVE: - if (!hw->mac.ops.blink_led) - return 2; /* cycle on/off twice per second */ - - hw->mac.ops.blink_led(hw); - break; - - case ETHTOOL_ID_INACTIVE: - if (hw->phy.type == e1000_phy_ife) - e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); - hw->mac.ops.led_off(hw); - hw->mac.ops.cleanup_led(hw); - break; - - case ETHTOOL_ID_ON: - adapter->hw.mac.ops.led_on(&adapter->hw); - break; - - case ETHTOOL_ID_OFF: - adapter->hw.mac.ops.led_off(&adapter->hw); - break; - } - return 0; -} - -static int e1000_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - - if (adapter->itr_setting <= 4) - ec->rx_coalesce_usecs = adapter->itr_setting; - else - ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting; - - return 0; -} - -static int e1000_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - - if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) || - ((ec->rx_coalesce_usecs > 4) && - (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) || - (ec->rx_coalesce_usecs == 2)) - return -EINVAL; - - if (ec->rx_coalesce_usecs == 4) { - adapter->itr = adapter->itr_setting = 4; - } else if (ec->rx_coalesce_usecs <= 3) { - adapter->itr = 20000; - adapter->itr_setting = ec->rx_coalesce_usecs; - } else { - adapter->itr = (1000000 / ec->rx_coalesce_usecs); - adapter->itr_setting = adapter->itr & ~3; - } - - if (adapter->itr_setting != 0) - ew32(ITR, 1000000000 / (adapter->itr * 256)); - else - ew32(ITR, 0); - - return 0; -} - -static int e1000_nway_reset(struct net_device *netdev) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - - if (!netif_running(netdev)) - return -EAGAIN; - - if (!adapter->hw.mac.autoneg) - return -EINVAL; - - e1000e_reinit_locked(adapter); - - return 0; -} - -static void e1000_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, - u64 *data) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct rtnl_link_stats64 net_stats; - int i; - char *p = NULL; - - e1000e_get_stats64(netdev, &net_stats); - for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { - switch (e1000_gstrings_stats[i].type) { - case NETDEV_STATS: - p = (char *) &net_stats + - e1000_gstrings_stats[i].stat_offset; - break; - case E1000_STATS: - p = (char *) adapter + - e1000_gstrings_stats[i].stat_offset; - break; - default: - data[i] = 0; - continue; - } - - data[i] = (e1000_gstrings_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } -} - -static void e1000_get_strings(struct net_device *netdev, u32 stringset, - u8 *data) -{ - u8 *p = data; - int i; - - switch (stringset) { - case ETH_SS_TEST: - memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test)); - break; - case ETH_SS_STATS: - for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { - memcpy(p, e1000_gstrings_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - break; - } -} - -static int e1000e_set_flags(struct net_device *netdev, u32 data) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - bool need_reset = false; - int rc; - - need_reset = (data & ETH_FLAG_RXVLAN) != - (netdev->features & NETIF_F_HW_VLAN_RX); - - rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_RXVLAN | - ETH_FLAG_TXVLAN); - - if (rc) - return rc; - - if (need_reset) { - if (netif_running(netdev)) - e1000e_reinit_locked(adapter); - else - e1000e_reset(adapter); - } - - return 0; -} - -static const struct ethtool_ops e1000_ethtool_ops = { - .get_settings = e1000_get_settings, - .set_settings = e1000_set_settings, - .get_drvinfo = e1000_get_drvinfo, - .get_regs_len = e1000_get_regs_len, - .get_regs = e1000_get_regs, - .get_wol = e1000_get_wol, - .set_wol = e1000_set_wol, - .get_msglevel = e1000_get_msglevel, - .set_msglevel = e1000_set_msglevel, - .nway_reset = e1000_nway_reset, - .get_link = ethtool_op_get_link, - .get_eeprom_len = e1000_get_eeprom_len, - .get_eeprom = e1000_get_eeprom, - .set_eeprom = e1000_set_eeprom, - .get_ringparam = e1000_get_ringparam, - .set_ringparam = e1000_set_ringparam, - .get_pauseparam = e1000_get_pauseparam, - .set_pauseparam = e1000_set_pauseparam, - .get_rx_csum = e1000_get_rx_csum, - .set_rx_csum = e1000_set_rx_csum, - .get_tx_csum = e1000_get_tx_csum, - .set_tx_csum = e1000_set_tx_csum, - .get_sg = ethtool_op_get_sg, - .set_sg = ethtool_op_set_sg, - .get_tso = ethtool_op_get_tso, - .set_tso = e1000_set_tso, - .self_test = e1000_diag_test, - .get_strings = e1000_get_strings, - .set_phys_id = e1000_set_phys_id, - .get_ethtool_stats = e1000_get_ethtool_stats, - .get_sset_count = e1000e_get_sset_count, - .get_coalesce = e1000_get_coalesce, - .set_coalesce = e1000_set_coalesce, - .get_flags = ethtool_op_get_flags, - .set_flags = e1000e_set_flags, -}; - -void e1000e_set_ethtool_ops(struct net_device *netdev) -{ - SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops); -} diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h deleted file mode 100644 index 2967039..0000000 --- a/drivers/net/e1000e/hw.h +++ /dev/null @@ -1,984 +0,0 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _E1000_HW_H_ -#define _E1000_HW_H_ - -#include - -struct e1000_hw; -struct e1000_adapter; - -#include "defines.h" - -#define er32(reg) __er32(hw, E1000_##reg) -#define ew32(reg,val) __ew32(hw, E1000_##reg, (val)) -#define e1e_flush() er32(STATUS) - -#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) \ - (writel((value), ((a)->hw_addr + reg + ((offset) << 2)))) - -#define E1000_READ_REG_ARRAY(a, reg, offset) \ - (readl((a)->hw_addr + reg + ((offset) << 2))) - -enum e1e_registers { - E1000_CTRL = 0x00000, /* Device Control - RW */ - E1000_STATUS = 0x00008, /* Device Status - RO */ - E1000_EECD = 0x00010, /* EEPROM/Flash Control - RW */ - E1000_EERD = 0x00014, /* EEPROM Read - RW */ - E1000_CTRL_EXT = 0x00018, /* Extended Device Control - RW */ - E1000_FLA = 0x0001C, /* Flash Access - RW */ - E1000_MDIC = 0x00020, /* MDI Control - RW */ - E1000_SCTL = 0x00024, /* SerDes Control - RW */ - E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */ - E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */ - E1000_FEXTNVM4 = 0x00024, /* Future Extended NVM 4 - RW */ - E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */ - E1000_FCT = 0x00030, /* Flow Control Type - RW */ - E1000_VET = 0x00038, /* VLAN Ether Type - RW */ - E1000_ICR = 0x000C0, /* Interrupt Cause Read - R/clr */ - E1000_ITR = 0x000C4, /* Interrupt Throttling Rate - RW */ - E1000_ICS = 0x000C8, /* Interrupt Cause Set - WO */ - E1000_IMS = 0x000D0, /* Interrupt Mask Set - RW */ - E1000_IMC = 0x000D8, /* Interrupt Mask Clear - WO */ - E1000_EIAC_82574 = 0x000DC, /* Ext. Interrupt Auto Clear - RW */ - E1000_IAM = 0x000E0, /* Interrupt Acknowledge Auto Mask */ - E1000_IVAR = 0x000E4, /* Interrupt Vector Allocation - RW */ - E1000_EITR_82574_BASE = 0x000E8, /* Interrupt Throttling - RW */ -#define E1000_EITR_82574(_n) (E1000_EITR_82574_BASE + (_n << 2)) - E1000_RCTL = 0x00100, /* Rx Control - RW */ - E1000_FCTTV = 0x00170, /* Flow Control Transmit Timer Value - RW */ - E1000_TXCW = 0x00178, /* Tx Configuration Word - RW */ - E1000_RXCW = 0x00180, /* Rx Configuration Word - RO */ - E1000_TCTL = 0x00400, /* Tx Control - RW */ - E1000_TCTL_EXT = 0x00404, /* Extended Tx Control - RW */ - E1000_TIPG = 0x00410, /* Tx Inter-packet gap -RW */ - E1000_AIT = 0x00458, /* Adaptive Interframe Spacing Throttle -RW */ - E1000_LEDCTL = 0x00E00, /* LED Control - RW */ - E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */ - E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */ - E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */ -#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */ - E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */ - E1000_PBS = 0x01008, /* Packet Buffer Size */ - E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */ - E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */ - E1000_FLOP = 0x0103C, /* FLASH Opcode Register */ - E1000_PBA_ECC = 0x01100, /* PBA ECC Register */ - E1000_ERT = 0x02008, /* Early Rx Threshold - RW */ - E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */ - E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */ - E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */ - E1000_RDBAL = 0x02800, /* Rx Descriptor Base Address Low - RW */ - E1000_RDBAH = 0x02804, /* Rx Descriptor Base Address High - RW */ - E1000_RDLEN = 0x02808, /* Rx Descriptor Length - RW */ - E1000_RDH = 0x02810, /* Rx Descriptor Head - RW */ - E1000_RDT = 0x02818, /* Rx Descriptor Tail - RW */ - E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */ - E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */ -#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8)) - E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */ - -/* Convenience macros - * - * Note: "_n" is the queue number of the register to be written to. - * - * Example usage: - * E1000_RDBAL_REG(current_rx_queue) - * - */ -#define E1000_RDBAL_REG(_n) (E1000_RDBAL + (_n << 8)) - E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */ - E1000_TDBAL = 0x03800, /* Tx Descriptor Base Address Low - RW */ - E1000_TDBAH = 0x03804, /* Tx Descriptor Base Address High - RW */ - E1000_TDLEN = 0x03808, /* Tx Descriptor Length - RW */ - E1000_TDH = 0x03810, /* Tx Descriptor Head - RW */ - E1000_TDT = 0x03818, /* Tx Descriptor Tail - RW */ - E1000_TIDV = 0x03820, /* Tx Interrupt Delay Value - RW */ - E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */ -#define E1000_TXDCTL(_n) (E1000_TXDCTL_BASE + (_n << 8)) - E1000_TADV = 0x0382C, /* Tx Interrupt Absolute Delay Val - RW */ - E1000_TARC_BASE = 0x03840, /* Tx Arbitration Count (0) */ -#define E1000_TARC(_n) (E1000_TARC_BASE + (_n << 8)) - E1000_CRCERRS = 0x04000, /* CRC Error Count - R/clr */ - E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */ - E1000_SYMERRS = 0x04008, /* Symbol Error Count - R/clr */ - E1000_RXERRC = 0x0400C, /* Receive Error Count - R/clr */ - E1000_MPC = 0x04010, /* Missed Packet Count - R/clr */ - E1000_SCC = 0x04014, /* Single Collision Count - R/clr */ - E1000_ECOL = 0x04018, /* Excessive Collision Count - R/clr */ - E1000_MCC = 0x0401C, /* Multiple Collision Count - R/clr */ - E1000_LATECOL = 0x04020, /* Late Collision Count - R/clr */ - E1000_COLC = 0x04028, /* Collision Count - R/clr */ - E1000_DC = 0x04030, /* Defer Count - R/clr */ - E1000_TNCRS = 0x04034, /* Tx-No CRS - R/clr */ - E1000_SEC = 0x04038, /* Sequence Error Count - R/clr */ - E1000_CEXTERR = 0x0403C, /* Carrier Extension Error Count - R/clr */ - E1000_RLEC = 0x04040, /* Receive Length Error Count - R/clr */ - E1000_XONRXC = 0x04048, /* XON Rx Count - R/clr */ - E1000_XONTXC = 0x0404C, /* XON Tx Count - R/clr */ - E1000_XOFFRXC = 0x04050, /* XOFF Rx Count - R/clr */ - E1000_XOFFTXC = 0x04054, /* XOFF Tx Count - R/clr */ - E1000_FCRUC = 0x04058, /* Flow Control Rx Unsupported Count- R/clr */ - E1000_PRC64 = 0x0405C, /* Packets Rx (64 bytes) - R/clr */ - E1000_PRC127 = 0x04060, /* Packets Rx (65-127 bytes) - R/clr */ - E1000_PRC255 = 0x04064, /* Packets Rx (128-255 bytes) - R/clr */ - E1000_PRC511 = 0x04068, /* Packets Rx (255-511 bytes) - R/clr */ - E1000_PRC1023 = 0x0406C, /* Packets Rx (512-1023 bytes) - R/clr */ - E1000_PRC1522 = 0x04070, /* Packets Rx (1024-1522 bytes) - R/clr */ - E1000_GPRC = 0x04074, /* Good Packets Rx Count - R/clr */ - E1000_BPRC = 0x04078, /* Broadcast Packets Rx Count - R/clr */ - E1000_MPRC = 0x0407C, /* Multicast Packets Rx Count - R/clr */ - E1000_GPTC = 0x04080, /* Good Packets Tx Count - R/clr */ - E1000_GORCL = 0x04088, /* Good Octets Rx Count Low - R/clr */ - E1000_GORCH = 0x0408C, /* Good Octets Rx Count High - R/clr */ - E1000_GOTCL = 0x04090, /* Good Octets Tx Count Low - R/clr */ - E1000_GOTCH = 0x04094, /* Good Octets Tx Count High - R/clr */ - E1000_RNBC = 0x040A0, /* Rx No Buffers Count - R/clr */ - E1000_RUC = 0x040A4, /* Rx Undersize Count - R/clr */ - E1000_RFC = 0x040A8, /* Rx Fragment Count - R/clr */ - E1000_ROC = 0x040AC, /* Rx Oversize Count - R/clr */ - E1000_RJC = 0x040B0, /* Rx Jabber Count - R/clr */ - E1000_MGTPRC = 0x040B4, /* Management Packets Rx Count - R/clr */ - E1000_MGTPDC = 0x040B8, /* Management Packets Dropped Count - R/clr */ - E1000_MGTPTC = 0x040BC, /* Management Packets Tx Count - R/clr */ - E1000_TORL = 0x040C0, /* Total Octets Rx Low - R/clr */ - E1000_TORH = 0x040C4, /* Total Octets Rx High - R/clr */ - E1000_TOTL = 0x040C8, /* Total Octets Tx Low - R/clr */ - E1000_TOTH = 0x040CC, /* Total Octets Tx High - R/clr */ - E1000_TPR = 0x040D0, /* Total Packets Rx - R/clr */ - E1000_TPT = 0x040D4, /* Total Packets Tx - R/clr */ - E1000_PTC64 = 0x040D8, /* Packets Tx (64 bytes) - R/clr */ - E1000_PTC127 = 0x040DC, /* Packets Tx (65-127 bytes) - R/clr */ - E1000_PTC255 = 0x040E0, /* Packets Tx (128-255 bytes) - R/clr */ - E1000_PTC511 = 0x040E4, /* Packets Tx (256-511 bytes) - R/clr */ - E1000_PTC1023 = 0x040E8, /* Packets Tx (512-1023 bytes) - R/clr */ - E1000_PTC1522 = 0x040EC, /* Packets Tx (1024-1522 Bytes) - R/clr */ - E1000_MPTC = 0x040F0, /* Multicast Packets Tx Count - R/clr */ - E1000_BPTC = 0x040F4, /* Broadcast Packets Tx Count - R/clr */ - E1000_TSCTC = 0x040F8, /* TCP Segmentation Context Tx - R/clr */ - E1000_TSCTFC = 0x040FC, /* TCP Segmentation Context Tx Fail - R/clr */ - E1000_IAC = 0x04100, /* Interrupt Assertion Count */ - E1000_ICRXPTC = 0x04104, /* Irq Cause Rx Packet Timer Expire Count */ - E1000_ICRXATC = 0x04108, /* Irq Cause Rx Abs Timer Expire Count */ - E1000_ICTXPTC = 0x0410C, /* Irq Cause Tx Packet Timer Expire Count */ - E1000_ICTXATC = 0x04110, /* Irq Cause Tx Abs Timer Expire Count */ - E1000_ICTXQEC = 0x04118, /* Irq Cause Tx Queue Empty Count */ - E1000_ICTXQMTC = 0x0411C, /* Irq Cause Tx Queue MinThreshold Count */ - E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */ - E1000_ICRXOC = 0x04124, /* Irq Cause Receiver Overrun Count */ - E1000_RXCSUM = 0x05000, /* Rx Checksum Control - RW */ - E1000_RFCTL = 0x05008, /* Receive Filter Control */ - E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */ - E1000_RAL_BASE = 0x05400, /* Receive Address Low - RW */ -#define E1000_RAL(_n) (E1000_RAL_BASE + ((_n) * 8)) -#define E1000_RA (E1000_RAL(0)) - E1000_RAH_BASE = 0x05404, /* Receive Address High - RW */ -#define E1000_RAH(_n) (E1000_RAH_BASE + ((_n) * 8)) - E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */ - E1000_WUC = 0x05800, /* Wakeup Control - RW */ - E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */ - E1000_WUS = 0x05810, /* Wakeup Status - RO */ - E1000_MANC = 0x05820, /* Management Control - RW */ - E1000_FFLT = 0x05F00, /* Flexible Filter Length Table - RW Array */ - E1000_HOST_IF = 0x08800, /* Host Interface */ - - E1000_KMRNCTRLSTA = 0x00034, /* MAC-PHY interface - RW */ - E1000_MANC2H = 0x05860, /* Management Control To Host - RW */ - E1000_MDEF_BASE = 0x05890, /* Management Decision Filters */ -#define E1000_MDEF(_n) (E1000_MDEF_BASE + ((_n) * 4)) - E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */ - E1000_GCR = 0x05B00, /* PCI-Ex Control */ - E1000_GCR2 = 0x05B64, /* PCI-Ex Control #2 */ - E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */ - E1000_SWSM = 0x05B50, /* SW Semaphore */ - E1000_FWSM = 0x05B54, /* FW Semaphore */ - E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */ - E1000_FFLT_DBG = 0x05F04, /* Debug Register */ - E1000_PCH_RAICC_BASE = 0x05F50, /* Receive Address Initial CRC */ -#define E1000_PCH_RAICC(_n) (E1000_PCH_RAICC_BASE + ((_n) * 4)) -#define E1000_CRC_OFFSET E1000_PCH_RAICC_BASE - E1000_HICR = 0x08F00, /* Host Interface Control */ -}; - -#define E1000_MAX_PHY_ADDR 4 - -/* IGP01E1000 Specific Registers */ -#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ -#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ -#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ -#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ -#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ -#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ -#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */ -#define IGP_PAGE_SHIFT 5 -#define PHY_REG_MASK 0x1F - -#define BM_WUC_PAGE 800 -#define BM_WUC_ADDRESS_OPCODE 0x11 -#define BM_WUC_DATA_OPCODE 0x12 -#define BM_WUC_ENABLE_PAGE 769 -#define BM_WUC_ENABLE_REG 17 -#define BM_WUC_ENABLE_BIT (1 << 2) -#define BM_WUC_HOST_WU_BIT (1 << 4) -#define BM_WUC_ME_WU_BIT (1 << 5) - -#define BM_WUC PHY_REG(BM_WUC_PAGE, 1) -#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2) -#define BM_WUS PHY_REG(BM_WUC_PAGE, 3) - -#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 -#define IGP01E1000_PHY_POLARITY_MASK 0x0078 - -#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 -#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ - -#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 - -#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ -#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ -#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ - -#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 - -#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 -#define IGP01E1000_PSSR_MDIX 0x0800 -#define IGP01E1000_PSSR_SPEED_MASK 0xC000 -#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 - -#define IGP02E1000_PHY_CHANNEL_NUM 4 -#define IGP02E1000_PHY_AGC_A 0x11B1 -#define IGP02E1000_PHY_AGC_B 0x12B1 -#define IGP02E1000_PHY_AGC_C 0x14B1 -#define IGP02E1000_PHY_AGC_D 0x18B1 - -#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */ -#define IGP02E1000_AGC_LENGTH_MASK 0x7F -#define IGP02E1000_AGC_RANGE 15 - -/* manage.c */ -#define E1000_VFTA_ENTRY_SHIFT 5 -#define E1000_VFTA_ENTRY_MASK 0x7F -#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F - -#define E1000_HICR_EN 0x01 /* Enable bit - RO */ -/* Driver sets this bit when done to put command in RAM */ -#define E1000_HICR_C 0x02 -#define E1000_HICR_FW_RESET_ENABLE 0x40 -#define E1000_HICR_FW_RESET 0x80 - -#define E1000_FWSM_MODE_MASK 0xE -#define E1000_FWSM_MODE_SHIFT 1 - -#define E1000_MNG_IAMT_MODE 0x3 -#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 -#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 -#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 -#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 -#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1 -#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 - -/* nvm.c */ -#define E1000_STM_OPCODE 0xDB00 - -#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000 -#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16 -#define E1000_KMRNCTRLSTA_REN 0x00200000 -#define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */ -#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ -#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */ -#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */ -#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */ -#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ -#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7 -#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002 -#define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */ - -#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 -#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */ -#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */ -#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */ - -/* IFE PHY Extended Status Control */ -#define IFE_PESC_POLARITY_REVERSED 0x0100 - -/* IFE PHY Special Control */ -#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 -#define IFE_PSC_FORCE_POLARITY 0x0020 - -/* IFE PHY Special Control and LED Control */ -#define IFE_PSCL_PROBE_MODE 0x0020 -#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ -#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ - -/* IFE PHY MDIX Control */ -#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ -#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */ -#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */ - -#define E1000_CABLE_LENGTH_UNDEFINED 0xFF - -#define E1000_DEV_ID_82571EB_COPPER 0x105E -#define E1000_DEV_ID_82571EB_FIBER 0x105F -#define E1000_DEV_ID_82571EB_SERDES 0x1060 -#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4 -#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5 -#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5 -#define E1000_DEV_ID_82571EB_QUAD_COPPER_LP 0x10BC -#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9 -#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA -#define E1000_DEV_ID_82572EI_COPPER 0x107D -#define E1000_DEV_ID_82572EI_FIBER 0x107E -#define E1000_DEV_ID_82572EI_SERDES 0x107F -#define E1000_DEV_ID_82572EI 0x10B9 -#define E1000_DEV_ID_82573E 0x108B -#define E1000_DEV_ID_82573E_IAMT 0x108C -#define E1000_DEV_ID_82573L 0x109A -#define E1000_DEV_ID_82574L 0x10D3 -#define E1000_DEV_ID_82574LA 0x10F6 -#define E1000_DEV_ID_82583V 0x150C - -#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096 -#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098 -#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA -#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB - -#define E1000_DEV_ID_ICH8_82567V_3 0x1501 -#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049 -#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A -#define E1000_DEV_ID_ICH8_IGP_C 0x104B -#define E1000_DEV_ID_ICH8_IFE 0x104C -#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4 -#define E1000_DEV_ID_ICH8_IFE_G 0x10C5 -#define E1000_DEV_ID_ICH8_IGP_M 0x104D -#define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD -#define E1000_DEV_ID_ICH9_BM 0x10E5 -#define E1000_DEV_ID_ICH9_IGP_M_AMT 0x10F5 -#define E1000_DEV_ID_ICH9_IGP_M 0x10BF -#define E1000_DEV_ID_ICH9_IGP_M_V 0x10CB -#define E1000_DEV_ID_ICH9_IGP_C 0x294C -#define E1000_DEV_ID_ICH9_IFE 0x10C0 -#define E1000_DEV_ID_ICH9_IFE_GT 0x10C3 -#define E1000_DEV_ID_ICH9_IFE_G 0x10C2 -#define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC -#define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD -#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE -#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE -#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF -#define E1000_DEV_ID_ICH10_D_BM_V 0x1525 -#define E1000_DEV_ID_PCH_M_HV_LM 0x10EA -#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB -#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF -#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0 -#define E1000_DEV_ID_PCH2_LV_LM 0x1502 -#define E1000_DEV_ID_PCH2_LV_V 0x1503 - -#define E1000_REVISION_4 4 - -#define E1000_FUNC_1 1 - -#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 -#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 - -enum e1000_mac_type { - e1000_82571, - e1000_82572, - e1000_82573, - e1000_82574, - e1000_82583, - e1000_80003es2lan, - e1000_ich8lan, - e1000_ich9lan, - e1000_ich10lan, - e1000_pchlan, - e1000_pch2lan, -}; - -enum e1000_media_type { - e1000_media_type_unknown = 0, - e1000_media_type_copper = 1, - e1000_media_type_fiber = 2, - e1000_media_type_internal_serdes = 3, - e1000_num_media_types -}; - -enum e1000_nvm_type { - e1000_nvm_unknown = 0, - e1000_nvm_none, - e1000_nvm_eeprom_spi, - e1000_nvm_flash_hw, - e1000_nvm_flash_sw -}; - -enum e1000_nvm_override { - e1000_nvm_override_none = 0, - e1000_nvm_override_spi_small, - e1000_nvm_override_spi_large -}; - -enum e1000_phy_type { - e1000_phy_unknown = 0, - e1000_phy_none, - e1000_phy_m88, - e1000_phy_igp, - e1000_phy_igp_2, - e1000_phy_gg82563, - e1000_phy_igp_3, - e1000_phy_ife, - e1000_phy_bm, - e1000_phy_82578, - e1000_phy_82577, - e1000_phy_82579, -}; - -enum e1000_bus_width { - e1000_bus_width_unknown = 0, - e1000_bus_width_pcie_x1, - e1000_bus_width_pcie_x2, - e1000_bus_width_pcie_x4 = 4, - e1000_bus_width_32, - e1000_bus_width_64, - e1000_bus_width_reserved -}; - -enum e1000_1000t_rx_status { - e1000_1000t_rx_status_not_ok = 0, - e1000_1000t_rx_status_ok, - e1000_1000t_rx_status_undefined = 0xFF -}; - -enum e1000_rev_polarity{ - e1000_rev_polarity_normal = 0, - e1000_rev_polarity_reversed, - e1000_rev_polarity_undefined = 0xFF -}; - -enum e1000_fc_mode { - e1000_fc_none = 0, - e1000_fc_rx_pause, - e1000_fc_tx_pause, - e1000_fc_full, - e1000_fc_default = 0xFF -}; - -enum e1000_ms_type { - e1000_ms_hw_default = 0, - e1000_ms_force_master, - e1000_ms_force_slave, - e1000_ms_auto -}; - -enum e1000_smart_speed { - e1000_smart_speed_default = 0, - e1000_smart_speed_on, - e1000_smart_speed_off -}; - -enum e1000_serdes_link_state { - e1000_serdes_link_down = 0, - e1000_serdes_link_autoneg_progress, - e1000_serdes_link_autoneg_complete, - e1000_serdes_link_forced_up -}; - -/* Receive Descriptor */ -struct e1000_rx_desc { - __le64 buffer_addr; /* Address of the descriptor's data buffer */ - __le16 length; /* Length of data DMAed into data buffer */ - __le16 csum; /* Packet checksum */ - u8 status; /* Descriptor status */ - u8 errors; /* Descriptor Errors */ - __le16 special; -}; - -/* Receive Descriptor - Extended */ -union e1000_rx_desc_extended { - struct { - __le64 buffer_addr; - __le64 reserved; - } read; - struct { - struct { - __le32 mrq; /* Multiple Rx Queues */ - union { - __le32 rss; /* RSS Hash */ - struct { - __le16 ip_id; /* IP id */ - __le16 csum; /* Packet Checksum */ - } csum_ip; - } hi_dword; - } lower; - struct { - __le32 status_error; /* ext status/error */ - __le16 length; - __le16 vlan; /* VLAN tag */ - } upper; - } wb; /* writeback */ -}; - -#define MAX_PS_BUFFERS 4 -/* Receive Descriptor - Packet Split */ -union e1000_rx_desc_packet_split { - struct { - /* one buffer for protocol header(s), three data buffers */ - __le64 buffer_addr[MAX_PS_BUFFERS]; - } read; - struct { - struct { - __le32 mrq; /* Multiple Rx Queues */ - union { - __le32 rss; /* RSS Hash */ - struct { - __le16 ip_id; /* IP id */ - __le16 csum; /* Packet Checksum */ - } csum_ip; - } hi_dword; - } lower; - struct { - __le32 status_error; /* ext status/error */ - __le16 length0; /* length of buffer 0 */ - __le16 vlan; /* VLAN tag */ - } middle; - struct { - __le16 header_status; - __le16 length[3]; /* length of buffers 1-3 */ - } upper; - __le64 reserved; - } wb; /* writeback */ -}; - -/* Transmit Descriptor */ -struct e1000_tx_desc { - __le64 buffer_addr; /* Address of the descriptor's data buffer */ - union { - __le32 data; - struct { - __le16 length; /* Data buffer length */ - u8 cso; /* Checksum offset */ - u8 cmd; /* Descriptor control */ - } flags; - } lower; - union { - __le32 data; - struct { - u8 status; /* Descriptor status */ - u8 css; /* Checksum start */ - __le16 special; - } fields; - } upper; -}; - -/* Offload Context Descriptor */ -struct e1000_context_desc { - union { - __le32 ip_config; - struct { - u8 ipcss; /* IP checksum start */ - u8 ipcso; /* IP checksum offset */ - __le16 ipcse; /* IP checksum end */ - } ip_fields; - } lower_setup; - union { - __le32 tcp_config; - struct { - u8 tucss; /* TCP checksum start */ - u8 tucso; /* TCP checksum offset */ - __le16 tucse; /* TCP checksum end */ - } tcp_fields; - } upper_setup; - __le32 cmd_and_length; - union { - __le32 data; - struct { - u8 status; /* Descriptor status */ - u8 hdr_len; /* Header length */ - __le16 mss; /* Maximum segment size */ - } fields; - } tcp_seg_setup; -}; - -/* Offload data descriptor */ -struct e1000_data_desc { - __le64 buffer_addr; /* Address of the descriptor's buffer address */ - union { - __le32 data; - struct { - __le16 length; /* Data buffer length */ - u8 typ_len_ext; - u8 cmd; - } flags; - } lower; - union { - __le32 data; - struct { - u8 status; /* Descriptor status */ - u8 popts; /* Packet Options */ - __le16 special; /* */ - } fields; - } upper; -}; - -/* Statistics counters collected by the MAC */ -struct e1000_hw_stats { - u64 crcerrs; - u64 algnerrc; - u64 symerrs; - u64 rxerrc; - u64 mpc; - u64 scc; - u64 ecol; - u64 mcc; - u64 latecol; - u64 colc; - u64 dc; - u64 tncrs; - u64 sec; - u64 cexterr; - u64 rlec; - u64 xonrxc; - u64 xontxc; - u64 xoffrxc; - u64 xofftxc; - u64 fcruc; - u64 prc64; - u64 prc127; - u64 prc255; - u64 prc511; - u64 prc1023; - u64 prc1522; - u64 gprc; - u64 bprc; - u64 mprc; - u64 gptc; - u64 gorc; - u64 gotc; - u64 rnbc; - u64 ruc; - u64 rfc; - u64 roc; - u64 rjc; - u64 mgprc; - u64 mgpdc; - u64 mgptc; - u64 tor; - u64 tot; - u64 tpr; - u64 tpt; - u64 ptc64; - u64 ptc127; - u64 ptc255; - u64 ptc511; - u64 ptc1023; - u64 ptc1522; - u64 mptc; - u64 bptc; - u64 tsctc; - u64 tsctfc; - u64 iac; - u64 icrxptc; - u64 icrxatc; - u64 ictxptc; - u64 ictxatc; - u64 ictxqec; - u64 ictxqmtc; - u64 icrxdmtc; - u64 icrxoc; -}; - -struct e1000_phy_stats { - u32 idle_errors; - u32 receive_errors; -}; - -struct e1000_host_mng_dhcp_cookie { - u32 signature; - u8 status; - u8 reserved0; - u16 vlan_id; - u32 reserved1; - u16 reserved2; - u8 reserved3; - u8 checksum; -}; - -/* Host Interface "Rev 1" */ -struct e1000_host_command_header { - u8 command_id; - u8 command_length; - u8 command_options; - u8 checksum; -}; - -#define E1000_HI_MAX_DATA_LENGTH 252 -struct e1000_host_command_info { - struct e1000_host_command_header command_header; - u8 command_data[E1000_HI_MAX_DATA_LENGTH]; -}; - -/* Host Interface "Rev 2" */ -struct e1000_host_mng_command_header { - u8 command_id; - u8 checksum; - u16 reserved1; - u16 reserved2; - u16 command_length; -}; - -#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 -struct e1000_host_mng_command_info { - struct e1000_host_mng_command_header command_header; - u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; -}; - -/* Function pointers and static data for the MAC. */ -struct e1000_mac_operations { - s32 (*id_led_init)(struct e1000_hw *); - s32 (*blink_led)(struct e1000_hw *); - bool (*check_mng_mode)(struct e1000_hw *); - s32 (*check_for_link)(struct e1000_hw *); - s32 (*cleanup_led)(struct e1000_hw *); - void (*clear_hw_cntrs)(struct e1000_hw *); - void (*clear_vfta)(struct e1000_hw *); - s32 (*get_bus_info)(struct e1000_hw *); - void (*set_lan_id)(struct e1000_hw *); - s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); - s32 (*led_on)(struct e1000_hw *); - s32 (*led_off)(struct e1000_hw *); - void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32); - s32 (*reset_hw)(struct e1000_hw *); - s32 (*init_hw)(struct e1000_hw *); - s32 (*setup_link)(struct e1000_hw *); - s32 (*setup_physical_interface)(struct e1000_hw *); - s32 (*setup_led)(struct e1000_hw *); - void (*write_vfta)(struct e1000_hw *, u32, u32); - s32 (*read_mac_addr)(struct e1000_hw *); -}; - -/* - * When to use various PHY register access functions: - * - * Func Caller - * Function Does Does When to use - * ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * X_reg L,P,A n/a for simple PHY reg accesses - * X_reg_locked P,A L for multiple accesses of different regs - * on different pages - * X_reg_page A L,P for multiple accesses of different regs - * on the same page - * - * Where X=[read|write], L=locking, P=sets page, A=register access - * - */ -struct e1000_phy_operations { - s32 (*acquire)(struct e1000_hw *); - s32 (*cfg_on_link_up)(struct e1000_hw *); - s32 (*check_polarity)(struct e1000_hw *); - s32 (*check_reset_block)(struct e1000_hw *); - s32 (*commit)(struct e1000_hw *); - s32 (*force_speed_duplex)(struct e1000_hw *); - s32 (*get_cfg_done)(struct e1000_hw *hw); - s32 (*get_cable_length)(struct e1000_hw *); - s32 (*get_info)(struct e1000_hw *); - s32 (*set_page)(struct e1000_hw *, u16); - s32 (*read_reg)(struct e1000_hw *, u32, u16 *); - s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *); - s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *); - void (*release)(struct e1000_hw *); - s32 (*reset)(struct e1000_hw *); - s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); - s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); - s32 (*write_reg)(struct e1000_hw *, u32, u16); - s32 (*write_reg_locked)(struct e1000_hw *, u32, u16); - s32 (*write_reg_page)(struct e1000_hw *, u32, u16); - void (*power_up)(struct e1000_hw *); - void (*power_down)(struct e1000_hw *); -}; - -/* Function pointers for the NVM. */ -struct e1000_nvm_operations { - s32 (*acquire)(struct e1000_hw *); - s32 (*read)(struct e1000_hw *, u16, u16, u16 *); - void (*release)(struct e1000_hw *); - s32 (*update)(struct e1000_hw *); - s32 (*valid_led_default)(struct e1000_hw *, u16 *); - s32 (*validate)(struct e1000_hw *); - s32 (*write)(struct e1000_hw *, u16, u16, u16 *); -}; - -struct e1000_mac_info { - struct e1000_mac_operations ops; - u8 addr[ETH_ALEN]; - u8 perm_addr[ETH_ALEN]; - - enum e1000_mac_type type; - - u32 collision_delta; - u32 ledctl_default; - u32 ledctl_mode1; - u32 ledctl_mode2; - u32 mc_filter_type; - u32 tx_packet_delta; - u32 txcw; - - u16 current_ifs_val; - u16 ifs_max_val; - u16 ifs_min_val; - u16 ifs_ratio; - u16 ifs_step_size; - u16 mta_reg_count; - - /* Maximum size of the MTA register table in all supported adapters */ - #define MAX_MTA_REG 128 - u32 mta_shadow[MAX_MTA_REG]; - u16 rar_entry_count; - - u8 forced_speed_duplex; - - bool adaptive_ifs; - bool has_fwsm; - bool arc_subsystem_valid; - bool autoneg; - bool autoneg_failed; - bool get_link_status; - bool in_ifs_mode; - bool serdes_has_link; - bool tx_pkt_filtering; - enum e1000_serdes_link_state serdes_link_state; -}; - -struct e1000_phy_info { - struct e1000_phy_operations ops; - - enum e1000_phy_type type; - - enum e1000_1000t_rx_status local_rx; - enum e1000_1000t_rx_status remote_rx; - enum e1000_ms_type ms_type; - enum e1000_ms_type original_ms_type; - enum e1000_rev_polarity cable_polarity; - enum e1000_smart_speed smart_speed; - - u32 addr; - u32 id; - u32 reset_delay_us; /* in usec */ - u32 revision; - - enum e1000_media_type media_type; - - u16 autoneg_advertised; - u16 autoneg_mask; - u16 cable_length; - u16 max_cable_length; - u16 min_cable_length; - - u8 mdix; - - bool disable_polarity_correction; - bool is_mdix; - bool polarity_correction; - bool speed_downgraded; - bool autoneg_wait_to_complete; -}; - -struct e1000_nvm_info { - struct e1000_nvm_operations ops; - - enum e1000_nvm_type type; - enum e1000_nvm_override override; - - u32 flash_bank_size; - u32 flash_base_addr; - - u16 word_size; - u16 delay_usec; - u16 address_bits; - u16 opcode_bits; - u16 page_size; -}; - -struct e1000_bus_info { - enum e1000_bus_width width; - - u16 func; -}; - -struct e1000_fc_info { - u32 high_water; /* Flow control high-water mark */ - u32 low_water; /* Flow control low-water mark */ - u16 pause_time; /* Flow control pause timer */ - u16 refresh_time; /* Flow control refresh timer */ - bool send_xon; /* Flow control send XON */ - bool strict_ieee; /* Strict IEEE mode */ - enum e1000_fc_mode current_mode; /* FC mode in effect */ - enum e1000_fc_mode requested_mode; /* FC mode requested by caller */ -}; - -struct e1000_dev_spec_82571 { - bool laa_is_present; - u32 smb_counter; -}; - -struct e1000_dev_spec_80003es2lan { - bool mdic_wa_enable; -}; - -struct e1000_shadow_ram { - u16 value; - bool modified; -}; - -#define E1000_ICH8_SHADOW_RAM_WORDS 2048 - -struct e1000_dev_spec_ich8lan { - bool kmrn_lock_loss_workaround_enabled; - struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS]; - bool nvm_k1_enabled; - bool eee_disable; -}; - -struct e1000_hw { - struct e1000_adapter *adapter; - - u8 __iomem *hw_addr; - u8 __iomem *flash_address; - - struct e1000_mac_info mac; - struct e1000_fc_info fc; - struct e1000_phy_info phy; - struct e1000_nvm_info nvm; - struct e1000_bus_info bus; - struct e1000_host_mng_dhcp_cookie mng_cookie; - - union { - struct e1000_dev_spec_82571 e82571; - struct e1000_dev_spec_80003es2lan e80003es2lan; - struct e1000_dev_spec_ich8lan ich8lan; - } dev_spec; -}; - -#endif diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c deleted file mode 100644 index 4e36978..0000000 --- a/drivers/net/e1000e/ich8lan.c +++ /dev/null @@ -1,4111 +0,0 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -/* - * 82562G 10/100 Network Connection - * 82562G-2 10/100 Network Connection - * 82562GT 10/100 Network Connection - * 82562GT-2 10/100 Network Connection - * 82562V 10/100 Network Connection - * 82562V-2 10/100 Network Connection - * 82566DC-2 Gigabit Network Connection - * 82566DC Gigabit Network Connection - * 82566DM-2 Gigabit Network Connection - * 82566DM Gigabit Network Connection - * 82566MC Gigabit Network Connection - * 82566MM Gigabit Network Connection - * 82567LM Gigabit Network Connection - * 82567LF Gigabit Network Connection - * 82567V Gigabit Network Connection - * 82567LM-2 Gigabit Network Connection - * 82567LF-2 Gigabit Network Connection - * 82567V-2 Gigabit Network Connection - * 82567LF-3 Gigabit Network Connection - * 82567LM-3 Gigabit Network Connection - * 82567LM-4 Gigabit Network Connection - * 82577LM Gigabit Network Connection - * 82577LC Gigabit Network Connection - * 82578DM Gigabit Network Connection - * 82578DC Gigabit Network Connection - * 82579LM Gigabit Network Connection - * 82579V Gigabit Network Connection - */ - -#include "e1000.h" - -#define ICH_FLASH_GFPREG 0x0000 -#define ICH_FLASH_HSFSTS 0x0004 -#define ICH_FLASH_HSFCTL 0x0006 -#define ICH_FLASH_FADDR 0x0008 -#define ICH_FLASH_FDATA0 0x0010 -#define ICH_FLASH_PR0 0x0074 - -#define ICH_FLASH_READ_COMMAND_TIMEOUT 500 -#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 500 -#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 3000000 -#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF -#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 - -#define ICH_CYCLE_READ 0 -#define ICH_CYCLE_WRITE 2 -#define ICH_CYCLE_ERASE 3 - -#define FLASH_GFPREG_BASE_MASK 0x1FFF -#define FLASH_SECTOR_ADDR_SHIFT 12 - -#define ICH_FLASH_SEG_SIZE_256 256 -#define ICH_FLASH_SEG_SIZE_4K 4096 -#define ICH_FLASH_SEG_SIZE_8K 8192 -#define ICH_FLASH_SEG_SIZE_64K 65536 - - -#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */ -/* FW established a valid mode */ -#define E1000_ICH_FWSM_FW_VALID 0x00008000 - -#define E1000_ICH_MNG_IAMT_MODE 0x2 - -#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \ - (ID_LED_DEF1_OFF2 << 8) | \ - (ID_LED_DEF1_ON2 << 4) | \ - (ID_LED_DEF1_DEF2)) - -#define E1000_ICH_NVM_SIG_WORD 0x13 -#define E1000_ICH_NVM_SIG_MASK 0xC000 -#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0 -#define E1000_ICH_NVM_SIG_VALUE 0x80 - -#define E1000_ICH8_LAN_INIT_TIMEOUT 1500 - -#define E1000_FEXTNVM_SW_CONFIG 1 -#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */ - -#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7 -#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 -#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 - -#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL - -#define E1000_ICH_RAR_ENTRIES 7 - -#define PHY_PAGE_SHIFT 5 -#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ - ((reg) & MAX_PHY_REG_ADDRESS)) -#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */ -#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */ - -#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 -#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300 -#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200 - -#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */ - -#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */ - -/* SMBus Address Phy Register */ -#define HV_SMB_ADDR PHY_REG(768, 26) -#define HV_SMB_ADDR_MASK 0x007F -#define HV_SMB_ADDR_PEC_EN 0x0200 -#define HV_SMB_ADDR_VALID 0x0080 - -/* PHY Power Management Control */ -#define HV_PM_CTRL PHY_REG(770, 17) - -/* PHY Low Power Idle Control */ -#define I82579_LPI_CTRL PHY_REG(772, 20) -#define I82579_LPI_CTRL_ENABLE_MASK 0x6000 - -/* EMI Registers */ -#define I82579_EMI_ADDR 0x10 -#define I82579_EMI_DATA 0x11 -#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */ - -/* Strapping Option Register - RO */ -#define E1000_STRAP 0x0000C -#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000 -#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17 - -/* OEM Bits Phy Register */ -#define HV_OEM_BITS PHY_REG(768, 25) -#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */ -#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */ -#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */ - -#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */ -#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */ - -/* KMRN Mode Control */ -#define HV_KMRN_MODE_CTRL PHY_REG(769, 16) -#define HV_KMRN_MDIO_SLOW 0x0400 - -/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ -/* Offset 04h HSFSTS */ -union ich8_hws_flash_status { - struct ich8_hsfsts { - u16 flcdone :1; /* bit 0 Flash Cycle Done */ - u16 flcerr :1; /* bit 1 Flash Cycle Error */ - u16 dael :1; /* bit 2 Direct Access error Log */ - u16 berasesz :2; /* bit 4:3 Sector Erase Size */ - u16 flcinprog :1; /* bit 5 flash cycle in Progress */ - u16 reserved1 :2; /* bit 13:6 Reserved */ - u16 reserved2 :6; /* bit 13:6 Reserved */ - u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */ - u16 flockdn :1; /* bit 15 Flash Config Lock-Down */ - } hsf_status; - u16 regval; -}; - -/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */ -/* Offset 06h FLCTL */ -union ich8_hws_flash_ctrl { - struct ich8_hsflctl { - u16 flcgo :1; /* 0 Flash Cycle Go */ - u16 flcycle :2; /* 2:1 Flash Cycle */ - u16 reserved :5; /* 7:3 Reserved */ - u16 fldbcount :2; /* 9:8 Flash Data Byte Count */ - u16 flockdn :6; /* 15:10 Reserved */ - } hsf_ctrl; - u16 regval; -}; - -/* ICH Flash Region Access Permissions */ -union ich8_hws_flash_regacc { - struct ich8_flracc { - u32 grra :8; /* 0:7 GbE region Read Access */ - u32 grwa :8; /* 8:15 GbE region Write Access */ - u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */ - u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */ - } hsf_flregacc; - u16 regval; -}; - -/* ICH Flash Protected Region */ -union ich8_flash_protected_range { - struct ich8_pr { - u32 base:13; /* 0:12 Protected Range Base */ - u32 reserved1:2; /* 13:14 Reserved */ - u32 rpe:1; /* 15 Read Protection Enable */ - u32 limit:13; /* 16:28 Protected Range Limit */ - u32 reserved2:2; /* 29:30 Reserved */ - u32 wpe:1; /* 31 Write Protection Enable */ - } range; - u32 regval; -}; - -static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw); -static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); -static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); -static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); -static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, - u32 offset, u8 byte); -static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, - u8 *data); -static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, - u16 *data); -static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, - u8 size, u16 *data); -static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw); -static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); -static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw); -static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); -static s32 e1000_led_on_ich8lan(struct e1000_hw *hw); -static s32 e1000_led_off_ich8lan(struct e1000_hw *hw); -static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw); -static s32 e1000_setup_led_pchlan(struct e1000_hw *hw); -static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw); -static s32 e1000_led_on_pchlan(struct e1000_hw *hw); -static s32 e1000_led_off_pchlan(struct e1000_hw *hw); -static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); -static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw); -static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw); -static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); -static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); -static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); -static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); -static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); -static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); - -static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) -{ - return readw(hw->flash_address + reg); -} - -static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg) -{ - return readl(hw->flash_address + reg); -} - -static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val) -{ - writew(val, hw->flash_address + reg); -} - -static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val) -{ - writel(val, hw->flash_address + reg); -} - -#define er16flash(reg) __er16flash(hw, (reg)) -#define er32flash(reg) __er32flash(hw, (reg)) -#define ew16flash(reg,val) __ew16flash(hw, (reg), (val)) -#define ew32flash(reg,val) __ew32flash(hw, (reg), (val)) - -static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw) -{ - u32 ctrl; - - ctrl = er32(CTRL); - ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; - ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; - ew32(CTRL, ctrl); - e1e_flush(); - udelay(10); - ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE; - ew32(CTRL, ctrl); -} - -/** - * e1000_init_phy_params_pchlan - Initialize PHY function pointers - * @hw: pointer to the HW structure - * - * Initialize family-specific PHY parameters and function pointers. - **/ -static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - u32 fwsm; - s32 ret_val = 0; - - phy->addr = 1; - phy->reset_delay_us = 100; - - phy->ops.set_page = e1000_set_page_igp; - phy->ops.read_reg = e1000_read_phy_reg_hv; - phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; - phy->ops.read_reg_page = e1000_read_phy_reg_page_hv; - phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; - phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; - phy->ops.write_reg = e1000_write_phy_reg_hv; - phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; - phy->ops.write_reg_page = e1000_write_phy_reg_page_hv; - phy->ops.power_up = e1000_power_up_phy_copper; - phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; - phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; - - /* - * The MAC-PHY interconnect may still be in SMBus mode - * after Sx->S0. If the manageability engine (ME) is - * disabled, then toggle the LANPHYPC Value bit to force - * the interconnect to PCIe mode. - */ - fwsm = er32(FWSM); - if (!(fwsm & E1000_ICH_FWSM_FW_VALID) && !e1000_check_reset_block(hw)) { - e1000_toggle_lanphypc_value_ich8lan(hw); - msleep(50); - - /* - * Gate automatic PHY configuration by hardware on - * non-managed 82579 - */ - if (hw->mac.type == e1000_pch2lan) - e1000_gate_hw_phy_config_ich8lan(hw, true); - } - - /* - * Reset the PHY before any access to it. Doing so, ensures that - * the PHY is in a known good state before we read/write PHY registers. - * The generic reset is sufficient here, because we haven't determined - * the PHY type yet. - */ - ret_val = e1000e_phy_hw_reset_generic(hw); - if (ret_val) - goto out; - - /* Ungate automatic PHY configuration on non-managed 82579 */ - if ((hw->mac.type == e1000_pch2lan) && - !(fwsm & E1000_ICH_FWSM_FW_VALID)) { - usleep_range(10000, 20000); - e1000_gate_hw_phy_config_ich8lan(hw, false); - } - - phy->id = e1000_phy_unknown; - switch (hw->mac.type) { - default: - ret_val = e1000e_get_phy_id(hw); - if (ret_val) - goto out; - if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK)) - break; - /* fall-through */ - case e1000_pch2lan: - /* - * In case the PHY needs to be in mdio slow mode, - * set slow mode and try to get the PHY id again. - */ - ret_val = e1000_set_mdio_slow_mode_hv(hw); - if (ret_val) - goto out; - ret_val = e1000e_get_phy_id(hw); - if (ret_val) - goto out; - break; - } - phy->type = e1000e_get_phy_type_from_id(phy->id); - - switch (phy->type) { - case e1000_phy_82577: - case e1000_phy_82579: - phy->ops.check_polarity = e1000_check_polarity_82577; - phy->ops.force_speed_duplex = - e1000_phy_force_speed_duplex_82577; - phy->ops.get_cable_length = e1000_get_cable_length_82577; - phy->ops.get_info = e1000_get_phy_info_82577; - phy->ops.commit = e1000e_phy_sw_reset; - break; - case e1000_phy_82578: - phy->ops.check_polarity = e1000_check_polarity_m88; - phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88; - phy->ops.get_cable_length = e1000e_get_cable_length_m88; - phy->ops.get_info = e1000e_get_phy_info_m88; - break; - default: - ret_val = -E1000_ERR_PHY; - break; - } - -out: - return ret_val; -} - -/** - * e1000_init_phy_params_ich8lan - Initialize PHY function pointers - * @hw: pointer to the HW structure - * - * Initialize family-specific PHY parameters and function pointers. - **/ -static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 i = 0; - - phy->addr = 1; - phy->reset_delay_us = 100; - - phy->ops.power_up = e1000_power_up_phy_copper; - phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; - - /* - * We may need to do this twice - once for IGP and if that fails, - * we'll set BM func pointers and try again - */ - ret_val = e1000e_determine_phy_address(hw); - if (ret_val) { - phy->ops.write_reg = e1000e_write_phy_reg_bm; - phy->ops.read_reg = e1000e_read_phy_reg_bm; - ret_val = e1000e_determine_phy_address(hw); - if (ret_val) { - e_dbg("Cannot determine PHY addr. Erroring out\n"); - return ret_val; - } - } - - phy->id = 0; - while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) && - (i++ < 100)) { - usleep_range(1000, 2000); - ret_val = e1000e_get_phy_id(hw); - if (ret_val) - return ret_val; - } - - /* Verify phy id */ - switch (phy->id) { - case IGP03E1000_E_PHY_ID: - phy->type = e1000_phy_igp_3; - phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; - phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked; - phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked; - phy->ops.get_info = e1000e_get_phy_info_igp; - phy->ops.check_polarity = e1000_check_polarity_igp; - phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp; - break; - case IFE_E_PHY_ID: - case IFE_PLUS_E_PHY_ID: - case IFE_C_E_PHY_ID: - phy->type = e1000_phy_ife; - phy->autoneg_mask = E1000_ALL_NOT_GIG; - phy->ops.get_info = e1000_get_phy_info_ife; - phy->ops.check_polarity = e1000_check_polarity_ife; - phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife; - break; - case BME1000_E_PHY_ID: - phy->type = e1000_phy_bm; - phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; - phy->ops.read_reg = e1000e_read_phy_reg_bm; - phy->ops.write_reg = e1000e_write_phy_reg_bm; - phy->ops.commit = e1000e_phy_sw_reset; - phy->ops.get_info = e1000e_get_phy_info_m88; - phy->ops.check_polarity = e1000_check_polarity_m88; - phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88; - break; - default: - return -E1000_ERR_PHY; - break; - } - - return 0; -} - -/** - * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers - * @hw: pointer to the HW structure - * - * Initialize family-specific NVM parameters and function - * pointers. - **/ -static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) -{ - struct e1000_nvm_info *nvm = &hw->nvm; - struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; - u32 gfpreg, sector_base_addr, sector_end_addr; - u16 i; - - /* Can't read flash registers if the register set isn't mapped. */ - if (!hw->flash_address) { - e_dbg("ERROR: Flash registers not mapped\n"); - return -E1000_ERR_CONFIG; - } - - nvm->type = e1000_nvm_flash_sw; - - gfpreg = er32flash(ICH_FLASH_GFPREG); - - /* - * sector_X_addr is a "sector"-aligned address (4096 bytes) - * Add 1 to sector_end_addr since this sector is included in - * the overall size. - */ - sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; - sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; - - /* flash_base_addr is byte-aligned */ - nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; - - /* - * find total size of the NVM, then cut in half since the total - * size represents two separate NVM banks. - */ - nvm->flash_bank_size = (sector_end_addr - sector_base_addr) - << FLASH_SECTOR_ADDR_SHIFT; - nvm->flash_bank_size /= 2; - /* Adjust to word count */ - nvm->flash_bank_size /= sizeof(u16); - - nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS; - - /* Clear shadow ram */ - for (i = 0; i < nvm->word_size; i++) { - dev_spec->shadow_ram[i].modified = false; - dev_spec->shadow_ram[i].value = 0xFFFF; - } - - return 0; -} - -/** - * e1000_init_mac_params_ich8lan - Initialize MAC function pointers - * @hw: pointer to the HW structure - * - * Initialize family-specific MAC parameters and function - * pointers. - **/ -static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - struct e1000_mac_info *mac = &hw->mac; - - /* Set media type function pointer */ - hw->phy.media_type = e1000_media_type_copper; - - /* Set mta register count */ - mac->mta_reg_count = 32; - /* Set rar entry count */ - mac->rar_entry_count = E1000_ICH_RAR_ENTRIES; - if (mac->type == e1000_ich8lan) - mac->rar_entry_count--; - /* FWSM register */ - mac->has_fwsm = true; - /* ARC subsystem not supported */ - mac->arc_subsystem_valid = false; - /* Adaptive IFS supported */ - mac->adaptive_ifs = true; - - /* LED operations */ - switch (mac->type) { - case e1000_ich8lan: - case e1000_ich9lan: - case e1000_ich10lan: - /* check management mode */ - mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan; - /* ID LED init */ - mac->ops.id_led_init = e1000e_id_led_init; - /* blink LED */ - mac->ops.blink_led = e1000e_blink_led_generic; - /* setup LED */ - mac->ops.setup_led = e1000e_setup_led_generic; - /* cleanup LED */ - mac->ops.cleanup_led = e1000_cleanup_led_ich8lan; - /* turn on/off LED */ - mac->ops.led_on = e1000_led_on_ich8lan; - mac->ops.led_off = e1000_led_off_ich8lan; - break; - case e1000_pchlan: - case e1000_pch2lan: - /* check management mode */ - mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; - /* ID LED init */ - mac->ops.id_led_init = e1000_id_led_init_pchlan; - /* setup LED */ - mac->ops.setup_led = e1000_setup_led_pchlan; - /* cleanup LED */ - mac->ops.cleanup_led = e1000_cleanup_led_pchlan; - /* turn on/off LED */ - mac->ops.led_on = e1000_led_on_pchlan; - mac->ops.led_off = e1000_led_off_pchlan; - break; - default: - break; - } - - /* Enable PCS Lock-loss workaround for ICH8 */ - if (mac->type == e1000_ich8lan) - e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); - - /* Gate automatic PHY configuration by hardware on managed 82579 */ - if ((mac->type == e1000_pch2lan) && - (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) - e1000_gate_hw_phy_config_ich8lan(hw, true); - - return 0; -} - -/** - * e1000_set_eee_pchlan - Enable/disable EEE support - * @hw: pointer to the HW structure - * - * Enable/disable EEE based on setting in dev_spec structure. The bits in - * the LPI Control register will remain set only if/when link is up. - **/ -static s32 e1000_set_eee_pchlan(struct e1000_hw *hw) -{ - s32 ret_val = 0; - u16 phy_reg; - - if (hw->phy.type != e1000_phy_82579) - goto out; - - ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg); - if (ret_val) - goto out; - - if (hw->dev_spec.ich8lan.eee_disable) - phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK; - else - phy_reg |= I82579_LPI_CTRL_ENABLE_MASK; - - ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg); -out: - return ret_val; -} - -/** - * e1000_check_for_copper_link_ich8lan - Check for link (Copper) - * @hw: pointer to the HW structure - * - * Checks to see of the link status of the hardware has changed. If a - * change in link status has been detected, then we read the PHY registers - * to get the current speed/duplex if link exists. - **/ -static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) -{ - struct e1000_mac_info *mac = &hw->mac; - s32 ret_val; - bool link; - - /* - * We only want to go out to the PHY registers to see if Auto-Neg - * has completed and/or if our link status has changed. The - * get_link_status flag is set upon receiving a Link Status - * Change or Rx Sequence Error interrupt. - */ - if (!mac->get_link_status) { - ret_val = 0; - goto out; - } - - /* - * First we want to see if the MII Status Register reports - * link. If so, then we want to get the current speed/duplex - * of the PHY. - */ - ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); - if (ret_val) - goto out; - - if (hw->mac.type == e1000_pchlan) { - ret_val = e1000_k1_gig_workaround_hv(hw, link); - if (ret_val) - goto out; - } - - if (!link) - goto out; /* No link detected */ - - mac->get_link_status = false; - - if (hw->phy.type == e1000_phy_82578) { - ret_val = e1000_link_stall_workaround_hv(hw); - if (ret_val) - goto out; - } - - if (hw->mac.type == e1000_pch2lan) { - ret_val = e1000_k1_workaround_lv(hw); - if (ret_val) - goto out; - } - - /* - * Check if there was DownShift, must be checked - * immediately after link-up - */ - e1000e_check_downshift(hw); - - /* Enable/Disable EEE after link up */ - ret_val = e1000_set_eee_pchlan(hw); - if (ret_val) - goto out; - - /* - * If we are forcing speed/duplex, then we simply return since - * we have already determined whether we have link or not. - */ - if (!mac->autoneg) { - ret_val = -E1000_ERR_CONFIG; - goto out; - } - - /* - * Auto-Neg is enabled. Auto Speed Detection takes care - * of MAC speed/duplex configuration. So we only need to - * configure Collision Distance in the MAC. - */ - e1000e_config_collision_dist(hw); - - /* - * Configure Flow Control now that Auto-Neg has completed. - * First, we need to restore the desired flow control - * settings because we may have had to re-autoneg with a - * different link partner. - */ - ret_val = e1000e_config_fc_after_link_up(hw); - if (ret_val) - e_dbg("Error configuring flow control\n"); - -out: - return ret_val; -} - -static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - s32 rc; - - rc = e1000_init_mac_params_ich8lan(adapter); - if (rc) - return rc; - - rc = e1000_init_nvm_params_ich8lan(hw); - if (rc) - return rc; - - switch (hw->mac.type) { - case e1000_ich8lan: - case e1000_ich9lan: - case e1000_ich10lan: - rc = e1000_init_phy_params_ich8lan(hw); - break; - case e1000_pchlan: - case e1000_pch2lan: - rc = e1000_init_phy_params_pchlan(hw); - break; - default: - break; - } - if (rc) - return rc; - - /* - * Disable Jumbo Frame support on parts with Intel 10/100 PHY or - * on parts with MACsec enabled in NVM (reflected in CTRL_EXT). - */ - if ((adapter->hw.phy.type == e1000_phy_ife) || - ((adapter->hw.mac.type >= e1000_pch2lan) && - (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) { - adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES; - adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN; - - hw->mac.ops.blink_led = NULL; - } - - if ((adapter->hw.mac.type == e1000_ich8lan) && - (adapter->hw.phy.type == e1000_phy_igp_3)) - adapter->flags |= FLAG_LSC_GIG_SPEED_DROP; - - /* Disable EEE by default until IEEE802.3az spec is finalized */ - if (adapter->flags2 & FLAG2_HAS_EEE) - adapter->hw.dev_spec.ich8lan.eee_disable = true; - - return 0; -} - -static DEFINE_MUTEX(nvm_mutex); - -/** - * e1000_acquire_nvm_ich8lan - Acquire NVM mutex - * @hw: pointer to the HW structure - * - * Acquires the mutex for performing NVM operations. - **/ -static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw) -{ - mutex_lock(&nvm_mutex); - - return 0; -} - -/** - * e1000_release_nvm_ich8lan - Release NVM mutex - * @hw: pointer to the HW structure - * - * Releases the mutex used while performing NVM operations. - **/ -static void e1000_release_nvm_ich8lan(struct e1000_hw *hw) -{ - mutex_unlock(&nvm_mutex); -} - -static DEFINE_MUTEX(swflag_mutex); - -/** - * e1000_acquire_swflag_ich8lan - Acquire software control flag - * @hw: pointer to the HW structure - * - * Acquires the software control flag for performing PHY and select - * MAC CSR accesses. - **/ -static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) -{ - u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; - s32 ret_val = 0; - - mutex_lock(&swflag_mutex); - - while (timeout) { - extcnf_ctrl = er32(EXTCNF_CTRL); - if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) - break; - - mdelay(1); - timeout--; - } - - if (!timeout) { - e_dbg("SW/FW/HW has locked the resource for too long.\n"); - ret_val = -E1000_ERR_CONFIG; - goto out; - } - - timeout = SW_FLAG_TIMEOUT; - - extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; - ew32(EXTCNF_CTRL, extcnf_ctrl); - - while (timeout) { - extcnf_ctrl = er32(EXTCNF_CTRL); - if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) - break; - - mdelay(1); - timeout--; - } - - if (!timeout) { - e_dbg("Failed to acquire the semaphore.\n"); - extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; - ew32(EXTCNF_CTRL, extcnf_ctrl); - ret_val = -E1000_ERR_CONFIG; - goto out; - } - -out: - if (ret_val) - mutex_unlock(&swflag_mutex); - - return ret_val; -} - -/** - * e1000_release_swflag_ich8lan - Release software control flag - * @hw: pointer to the HW structure - * - * Releases the software control flag for performing PHY and select - * MAC CSR accesses. - **/ -static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) -{ - u32 extcnf_ctrl; - - extcnf_ctrl = er32(EXTCNF_CTRL); - - if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) { - extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; - ew32(EXTCNF_CTRL, extcnf_ctrl); - } else { - e_dbg("Semaphore unexpectedly released by sw/fw/hw\n"); - } - - mutex_unlock(&swflag_mutex); -} - -/** - * e1000_check_mng_mode_ich8lan - Checks management mode - * @hw: pointer to the HW structure - * - * This checks if the adapter has any manageability enabled. - * This is a function pointer entry point only called by read/write - * routines for the PHY and NVM parts. - **/ -static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) -{ - u32 fwsm; - - fwsm = er32(FWSM); - return (fwsm & E1000_ICH_FWSM_FW_VALID) && - ((fwsm & E1000_FWSM_MODE_MASK) == - (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); -} - -/** - * e1000_check_mng_mode_pchlan - Checks management mode - * @hw: pointer to the HW structure - * - * This checks if the adapter has iAMT enabled. - * This is a function pointer entry point only called by read/write - * routines for the PHY and NVM parts. - **/ -static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw) -{ - u32 fwsm; - - fwsm = er32(FWSM); - return (fwsm & E1000_ICH_FWSM_FW_VALID) && - (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); -} - -/** - * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked - * @hw: pointer to the HW structure - * - * Checks if firmware is blocking the reset of the PHY. - * This is a function pointer entry point only called by - * reset routines. - **/ -static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) -{ - u32 fwsm; - - fwsm = er32(FWSM); - - return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? 0 : E1000_BLK_PHY_RESET; -} - -/** - * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states - * @hw: pointer to the HW structure - * - * Assumes semaphore already acquired. - * - **/ -static s32 e1000_write_smbus_addr(struct e1000_hw *hw) -{ - u16 phy_data; - u32 strap = er32(STRAP); - s32 ret_val = 0; - - strap &= E1000_STRAP_SMBUS_ADDRESS_MASK; - - ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data); - if (ret_val) - goto out; - - phy_data &= ~HV_SMB_ADDR_MASK; - phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT); - phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; - ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); - -out: - return ret_val; -} - -/** - * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration - * @hw: pointer to the HW structure - * - * SW should configure the LCD from the NVM extended configuration region - * as a workaround for certain parts. - **/ -static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; - s32 ret_val = 0; - u16 word_addr, reg_data, reg_addr, phy_page = 0; - - /* - * Initialize the PHY from the NVM on ICH platforms. This - * is needed due to an issue where the NVM configuration is - * not properly autoloaded after power transitions. - * Therefore, after each PHY reset, we will load the - * configuration data out of the NVM manually. - */ - switch (hw->mac.type) { - case e1000_ich8lan: - if (phy->type != e1000_phy_igp_3) - return ret_val; - - if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) || - (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) { - sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; - break; - } - /* Fall-thru */ - case e1000_pchlan: - case e1000_pch2lan: - sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; - break; - default: - return ret_val; - } - - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return ret_val; - - data = er32(FEXTNVM); - if (!(data & sw_cfg_mask)) - goto out; - - /* - * Make sure HW does not configure LCD from PHY - * extended configuration before SW configuration - */ - data = er32(EXTCNF_CTRL); - if (!(hw->mac.type == e1000_pch2lan)) { - if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) - goto out; - } - - cnf_size = er32(EXTCNF_SIZE); - cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; - cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; - if (!cnf_size) - goto out; - - cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; - cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; - - if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) && - (hw->mac.type == e1000_pchlan)) || - (hw->mac.type == e1000_pch2lan)) { - /* - * HW configures the SMBus address and LEDs when the - * OEM and LCD Write Enable bits are set in the NVM. - * When both NVM bits are cleared, SW will configure - * them instead. - */ - ret_val = e1000_write_smbus_addr(hw); - if (ret_val) - goto out; - - data = er32(LEDCTL); - ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG, - (u16)data); - if (ret_val) - goto out; - } - - /* Configure LCD from extended configuration region. */ - - /* cnf_base_addr is in DWORD */ - word_addr = (u16)(cnf_base_addr << 1); - - for (i = 0; i < cnf_size; i++) { - ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, - ®_data); - if (ret_val) - goto out; - - ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1), - 1, ®_addr); - if (ret_val) - goto out; - - /* Save off the PHY page for future writes. */ - if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { - phy_page = reg_data; - continue; - } - - reg_addr &= PHY_REG_MASK; - reg_addr |= phy_page; - - ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr, - reg_data); - if (ret_val) - goto out; - } - -out: - hw->phy.ops.release(hw); - return ret_val; -} - -/** - * e1000_k1_gig_workaround_hv - K1 Si workaround - * @hw: pointer to the HW structure - * @link: link up bool flag - * - * If K1 is enabled for 1Gbps, the MAC might stall when transitioning - * from a lower speed. This workaround disables K1 whenever link is at 1Gig - * If link is down, the function will restore the default K1 setting located - * in the NVM. - **/ -static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) -{ - s32 ret_val = 0; - u16 status_reg = 0; - bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled; - - if (hw->mac.type != e1000_pchlan) - goto out; - - /* Wrap the whole flow with the sw flag */ - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - goto out; - - /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ - if (link) { - if (hw->phy.type == e1000_phy_82578) { - ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS, - &status_reg); - if (ret_val) - goto release; - - status_reg &= BM_CS_STATUS_LINK_UP | - BM_CS_STATUS_RESOLVED | - BM_CS_STATUS_SPEED_MASK; - - if (status_reg == (BM_CS_STATUS_LINK_UP | - BM_CS_STATUS_RESOLVED | - BM_CS_STATUS_SPEED_1000)) - k1_enable = false; - } - - if (hw->phy.type == e1000_phy_82577) { - ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS, - &status_reg); - if (ret_val) - goto release; - - status_reg &= HV_M_STATUS_LINK_UP | - HV_M_STATUS_AUTONEG_COMPLETE | - HV_M_STATUS_SPEED_MASK; - - if (status_reg == (HV_M_STATUS_LINK_UP | - HV_M_STATUS_AUTONEG_COMPLETE | - HV_M_STATUS_SPEED_1000)) - k1_enable = false; - } - - /* Link stall fix for link up */ - ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), - 0x0100); - if (ret_val) - goto release; - - } else { - /* Link stall fix for link down */ - ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), - 0x4100); - if (ret_val) - goto release; - } - - ret_val = e1000_configure_k1_ich8lan(hw, k1_enable); - -release: - hw->phy.ops.release(hw); -out: - return ret_val; -} - -/** - * e1000_configure_k1_ich8lan - Configure K1 power state - * @hw: pointer to the HW structure - * @enable: K1 state to configure - * - * Configure the K1 power state based on the provided parameter. - * Assumes semaphore already acquired. - * - * Success returns 0, Failure returns -E1000_ERR_PHY (-2) - **/ -s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) -{ - s32 ret_val = 0; - u32 ctrl_reg = 0; - u32 ctrl_ext = 0; - u32 reg = 0; - u16 kmrn_reg = 0; - - ret_val = e1000e_read_kmrn_reg_locked(hw, - E1000_KMRNCTRLSTA_K1_CONFIG, - &kmrn_reg); - if (ret_val) - goto out; - - if (k1_enable) - kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE; - else - kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE; - - ret_val = e1000e_write_kmrn_reg_locked(hw, - E1000_KMRNCTRLSTA_K1_CONFIG, - kmrn_reg); - if (ret_val) - goto out; - - udelay(20); - ctrl_ext = er32(CTRL_EXT); - ctrl_reg = er32(CTRL); - - reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); - reg |= E1000_CTRL_FRCSPD; - ew32(CTRL, reg); - - ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); - e1e_flush(); - udelay(20); - ew32(CTRL, ctrl_reg); - ew32(CTRL_EXT, ctrl_ext); - e1e_flush(); - udelay(20); - -out: - return ret_val; -} - -/** - * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration - * @hw: pointer to the HW structure - * @d0_state: boolean if entering d0 or d3 device state - * - * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are - * collectively called OEM bits. The OEM Write Enable bit and SW Config bit - * in NVM determines whether HW should configure LPLU and Gbe Disable. - **/ -static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) -{ - s32 ret_val = 0; - u32 mac_reg; - u16 oem_reg; - - if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan)) - return ret_val; - - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return ret_val; - - if (!(hw->mac.type == e1000_pch2lan)) { - mac_reg = er32(EXTCNF_CTRL); - if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) - goto out; - } - - mac_reg = er32(FEXTNVM); - if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M)) - goto out; - - mac_reg = er32(PHY_CTRL); - - ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg); - if (ret_val) - goto out; - - oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU); - - if (d0_state) { - if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE) - oem_reg |= HV_OEM_BITS_GBE_DIS; - - if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) - oem_reg |= HV_OEM_BITS_LPLU; - } else { - if (mac_reg & E1000_PHY_CTRL_NOND0A_GBE_DISABLE) - oem_reg |= HV_OEM_BITS_GBE_DIS; - - if (mac_reg & E1000_PHY_CTRL_NOND0A_LPLU) - oem_reg |= HV_OEM_BITS_LPLU; - } - /* Restart auto-neg to activate the bits */ - if (!e1000_check_reset_block(hw)) - oem_reg |= HV_OEM_BITS_RESTART_AN; - ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); - -out: - hw->phy.ops.release(hw); - - return ret_val; -} - - -/** - * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode - * @hw: pointer to the HW structure - **/ -static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw) -{ - s32 ret_val; - u16 data; - - ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data); - if (ret_val) - return ret_val; - - data |= HV_KMRN_MDIO_SLOW; - - ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data); - - return ret_val; -} - -/** - * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be - * done after every PHY reset. - **/ -static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) -{ - s32 ret_val = 0; - u16 phy_data; - - if (hw->mac.type != e1000_pchlan) - return ret_val; - - /* Set MDIO slow mode before any other MDIO access */ - if (hw->phy.type == e1000_phy_82577) { - ret_val = e1000_set_mdio_slow_mode_hv(hw); - if (ret_val) - goto out; - } - - if (((hw->phy.type == e1000_phy_82577) && - ((hw->phy.revision == 1) || (hw->phy.revision == 2))) || - ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) { - /* Disable generation of early preamble */ - ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431); - if (ret_val) - return ret_val; - - /* Preamble tuning for SSC */ - ret_val = e1e_wphy(hw, PHY_REG(770, 16), 0xA204); - if (ret_val) - return ret_val; - } - - if (hw->phy.type == e1000_phy_82578) { - /* - * Return registers to default by doing a soft reset then - * writing 0x3140 to the control register. - */ - if (hw->phy.revision < 2) { - e1000e_phy_sw_reset(hw); - ret_val = e1e_wphy(hw, PHY_CONTROL, 0x3140); - } - } - - /* Select page 0 */ - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return ret_val; - - hw->phy.addr = 1; - ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); - hw->phy.ops.release(hw); - if (ret_val) - goto out; - - /* - * Configure the K1 Si workaround during phy reset assuming there is - * link so that it disables K1 if link is in 1Gbps. - */ - ret_val = e1000_k1_gig_workaround_hv(hw, true); - if (ret_val) - goto out; - - /* Workaround for link disconnects on a busy hub in half duplex */ - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - goto out; - ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data); - if (ret_val) - goto release; - ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG, - phy_data & 0x00FF); -release: - hw->phy.ops.release(hw); -out: - return ret_val; -} - -/** - * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY - * @hw: pointer to the HW structure - **/ -void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw) -{ - u32 mac_reg; - u16 i, phy_reg = 0; - s32 ret_val; - - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return; - ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); - if (ret_val) - goto release; - - /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */ - for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { - mac_reg = er32(RAL(i)); - hw->phy.ops.write_reg_page(hw, BM_RAR_L(i), - (u16)(mac_reg & 0xFFFF)); - hw->phy.ops.write_reg_page(hw, BM_RAR_M(i), - (u16)((mac_reg >> 16) & 0xFFFF)); - - mac_reg = er32(RAH(i)); - hw->phy.ops.write_reg_page(hw, BM_RAR_H(i), - (u16)(mac_reg & 0xFFFF)); - hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i), - (u16)((mac_reg & E1000_RAH_AV) - >> 16)); - } - - e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); - -release: - hw->phy.ops.release(hw); -} - -/** - * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation - * with 82579 PHY - * @hw: pointer to the HW structure - * @enable: flag to enable/disable workaround when enabling/disabling jumbos - **/ -s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) -{ - s32 ret_val = 0; - u16 phy_reg, data; - u32 mac_reg; - u16 i; - - if (hw->mac.type != e1000_pch2lan) - goto out; - - /* disable Rx path while enabling/disabling workaround */ - e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); - ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14)); - if (ret_val) - goto out; - - if (enable) { - /* - * Write Rx addresses (rar_entry_count for RAL/H, +4 for - * SHRAL/H) and initial CRC values to the MAC - */ - for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { - u8 mac_addr[ETH_ALEN] = {0}; - u32 addr_high, addr_low; - - addr_high = er32(RAH(i)); - if (!(addr_high & E1000_RAH_AV)) - continue; - addr_low = er32(RAL(i)); - mac_addr[0] = (addr_low & 0xFF); - mac_addr[1] = ((addr_low >> 8) & 0xFF); - mac_addr[2] = ((addr_low >> 16) & 0xFF); - mac_addr[3] = ((addr_low >> 24) & 0xFF); - mac_addr[4] = (addr_high & 0xFF); - mac_addr[5] = ((addr_high >> 8) & 0xFF); - - ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr)); - } - - /* Write Rx addresses to the PHY */ - e1000_copy_rx_addrs_to_phy_ich8lan(hw); - - /* Enable jumbo frame workaround in the MAC */ - mac_reg = er32(FFLT_DBG); - mac_reg &= ~(1 << 14); - mac_reg |= (7 << 15); - ew32(FFLT_DBG, mac_reg); - - mac_reg = er32(RCTL); - mac_reg |= E1000_RCTL_SECRC; - ew32(RCTL, mac_reg); - - ret_val = e1000e_read_kmrn_reg(hw, - E1000_KMRNCTRLSTA_CTRL_OFFSET, - &data); - if (ret_val) - goto out; - ret_val = e1000e_write_kmrn_reg(hw, - E1000_KMRNCTRLSTA_CTRL_OFFSET, - data | (1 << 0)); - if (ret_val) - goto out; - ret_val = e1000e_read_kmrn_reg(hw, - E1000_KMRNCTRLSTA_HD_CTRL, - &data); - if (ret_val) - goto out; - data &= ~(0xF << 8); - data |= (0xB << 8); - ret_val = e1000e_write_kmrn_reg(hw, - E1000_KMRNCTRLSTA_HD_CTRL, - data); - if (ret_val) - goto out; - - /* Enable jumbo frame workaround in the PHY */ - e1e_rphy(hw, PHY_REG(769, 23), &data); - data &= ~(0x7F << 5); - data |= (0x37 << 5); - ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); - if (ret_val) - goto out; - e1e_rphy(hw, PHY_REG(769, 16), &data); - data &= ~(1 << 13); - ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); - if (ret_val) - goto out; - e1e_rphy(hw, PHY_REG(776, 20), &data); - data &= ~(0x3FF << 2); - data |= (0x1A << 2); - ret_val = e1e_wphy(hw, PHY_REG(776, 20), data); - if (ret_val) - goto out; - ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xFE00); - if (ret_val) - goto out; - e1e_rphy(hw, HV_PM_CTRL, &data); - ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10)); - if (ret_val) - goto out; - } else { - /* Write MAC register values back to h/w defaults */ - mac_reg = er32(FFLT_DBG); - mac_reg &= ~(0xF << 14); - ew32(FFLT_DBG, mac_reg); - - mac_reg = er32(RCTL); - mac_reg &= ~E1000_RCTL_SECRC; - ew32(RCTL, mac_reg); - - ret_val = e1000e_read_kmrn_reg(hw, - E1000_KMRNCTRLSTA_CTRL_OFFSET, - &data); - if (ret_val) - goto out; - ret_val = e1000e_write_kmrn_reg(hw, - E1000_KMRNCTRLSTA_CTRL_OFFSET, - data & ~(1 << 0)); - if (ret_val) - goto out; - ret_val = e1000e_read_kmrn_reg(hw, - E1000_KMRNCTRLSTA_HD_CTRL, - &data); - if (ret_val) - goto out; - data &= ~(0xF << 8); - data |= (0xB << 8); - ret_val = e1000e_write_kmrn_reg(hw, - E1000_KMRNCTRLSTA_HD_CTRL, - data); - if (ret_val) - goto out; - - /* Write PHY register values back to h/w defaults */ - e1e_rphy(hw, PHY_REG(769, 23), &data); - data &= ~(0x7F << 5); - ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); - if (ret_val) - goto out; - e1e_rphy(hw, PHY_REG(769, 16), &data); - data |= (1 << 13); - ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); - if (ret_val) - goto out; - e1e_rphy(hw, PHY_REG(776, 20), &data); - data &= ~(0x3FF << 2); - data |= (0x8 << 2); - ret_val = e1e_wphy(hw, PHY_REG(776, 20), data); - if (ret_val) - goto out; - ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00); - if (ret_val) - goto out; - e1e_rphy(hw, HV_PM_CTRL, &data); - ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10)); - if (ret_val) - goto out; - } - - /* re-enable Rx path after enabling/disabling workaround */ - ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14)); - -out: - return ret_val; -} - -/** - * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be - * done after every PHY reset. - **/ -static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw) -{ - s32 ret_val = 0; - - if (hw->mac.type != e1000_pch2lan) - goto out; - - /* Set MDIO slow mode before any other MDIO access */ - ret_val = e1000_set_mdio_slow_mode_hv(hw); - -out: - return ret_val; -} - -/** - * e1000_k1_gig_workaround_lv - K1 Si workaround - * @hw: pointer to the HW structure - * - * Workaround to set the K1 beacon duration for 82579 parts - **/ -static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) -{ - s32 ret_val = 0; - u16 status_reg = 0; - u32 mac_reg; - - if (hw->mac.type != e1000_pch2lan) - goto out; - - /* Set K1 beacon duration based on 1Gbps speed or otherwise */ - ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg); - if (ret_val) - goto out; - - if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) - == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { - mac_reg = er32(FEXTNVM4); - mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; - - if (status_reg & HV_M_STATUS_SPEED_1000) - mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; - else - mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; - - ew32(FEXTNVM4, mac_reg); - } - -out: - return ret_val; -} - -/** - * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware - * @hw: pointer to the HW structure - * @gate: boolean set to true to gate, false to ungate - * - * Gate/ungate the automatic PHY configuration via hardware; perform - * the configuration via software instead. - **/ -static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate) -{ - u32 extcnf_ctrl; - - if (hw->mac.type != e1000_pch2lan) - return; - - extcnf_ctrl = er32(EXTCNF_CTRL); - - if (gate) - extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; - else - extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG; - - ew32(EXTCNF_CTRL, extcnf_ctrl); - return; -} - -/** - * e1000_lan_init_done_ich8lan - Check for PHY config completion - * @hw: pointer to the HW structure - * - * Check the appropriate indication the MAC has finished configuring the - * PHY after a software reset. - **/ -static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw) -{ - u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT; - - /* Wait for basic configuration completes before proceeding */ - do { - data = er32(STATUS); - data &= E1000_STATUS_LAN_INIT_DONE; - udelay(100); - } while ((!data) && --loop); - - /* - * If basic configuration is incomplete before the above loop - * count reaches 0, loading the configuration from NVM will - * leave the PHY in a bad state possibly resulting in no link. - */ - if (loop == 0) - e_dbg("LAN_INIT_DONE not set, increase timeout\n"); - - /* Clear the Init Done bit for the next init event */ - data = er32(STATUS); - data &= ~E1000_STATUS_LAN_INIT_DONE; - ew32(STATUS, data); -} - -/** - * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset - * @hw: pointer to the HW structure - **/ -static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) -{ - s32 ret_val = 0; - u16 reg; - - if (e1000_check_reset_block(hw)) - goto out; - - /* Allow time for h/w to get to quiescent state after reset */ - usleep_range(10000, 20000); - - /* Perform any necessary post-reset workarounds */ - switch (hw->mac.type) { - case e1000_pchlan: - ret_val = e1000_hv_phy_workarounds_ich8lan(hw); - if (ret_val) - goto out; - break; - case e1000_pch2lan: - ret_val = e1000_lv_phy_workarounds_ich8lan(hw); - if (ret_val) - goto out; - break; - default: - break; - } - - /* Clear the host wakeup bit after lcd reset */ - if (hw->mac.type >= e1000_pchlan) { - e1e_rphy(hw, BM_PORT_GEN_CFG, ®); - reg &= ~BM_WUC_HOST_WU_BIT; - e1e_wphy(hw, BM_PORT_GEN_CFG, reg); - } - - /* Configure the LCD with the extended configuration region in NVM */ - ret_val = e1000_sw_lcd_config_ich8lan(hw); - if (ret_val) - goto out; - - /* Configure the LCD with the OEM bits in NVM */ - ret_val = e1000_oem_bits_config_ich8lan(hw, true); - - if (hw->mac.type == e1000_pch2lan) { - /* Ungate automatic PHY configuration on non-managed 82579 */ - if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { - usleep_range(10000, 20000); - e1000_gate_hw_phy_config_ich8lan(hw, false); - } - - /* Set EEE LPI Update Timer to 200usec */ - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - goto out; - ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, - I82579_LPI_UPDATE_TIMER); - if (ret_val) - goto release; - ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, - 0x1387); -release: - hw->phy.ops.release(hw); - } - -out: - return ret_val; -} - -/** - * e1000_phy_hw_reset_ich8lan - Performs a PHY reset - * @hw: pointer to the HW structure - * - * Resets the PHY - * This is a function pointer entry point called by drivers - * or other shared routines. - **/ -static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) -{ - s32 ret_val = 0; - - /* Gate automatic PHY configuration by hardware on non-managed 82579 */ - if ((hw->mac.type == e1000_pch2lan) && - !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) - e1000_gate_hw_phy_config_ich8lan(hw, true); - - ret_val = e1000e_phy_hw_reset_generic(hw); - if (ret_val) - goto out; - - ret_val = e1000_post_phy_reset_ich8lan(hw); - -out: - return ret_val; -} - -/** - * e1000_set_lplu_state_pchlan - Set Low Power Link Up state - * @hw: pointer to the HW structure - * @active: true to enable LPLU, false to disable - * - * Sets the LPLU state according to the active flag. For PCH, if OEM write - * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set - * the phy speed. This function will manually set the LPLU bit and restart - * auto-neg as hw would do. D3 and D0 LPLU will call the same function - * since it configures the same bit. - **/ -static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active) -{ - s32 ret_val = 0; - u16 oem_reg; - - ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg); - if (ret_val) - goto out; - - if (active) - oem_reg |= HV_OEM_BITS_LPLU; - else - oem_reg &= ~HV_OEM_BITS_LPLU; - - oem_reg |= HV_OEM_BITS_RESTART_AN; - ret_val = e1e_wphy(hw, HV_OEM_BITS, oem_reg); - -out: - return ret_val; -} - -/** - * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state - * @hw: pointer to the HW structure - * @active: true to enable LPLU, false to disable - * - * Sets the LPLU D0 state according to the active flag. When - * activating LPLU this function also disables smart speed - * and vice versa. LPLU will not be activated unless the - * device autonegotiation advertisement meets standards of - * either 10 or 10/100 or 10/100/1000 at all duplexes. - * This is a function pointer entry point only called by - * PHY setup routines. - **/ -static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) -{ - struct e1000_phy_info *phy = &hw->phy; - u32 phy_ctrl; - s32 ret_val = 0; - u16 data; - - if (phy->type == e1000_phy_ife) - return ret_val; - - phy_ctrl = er32(PHY_CTRL); - - if (active) { - phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; - ew32(PHY_CTRL, phy_ctrl); - - if (phy->type != e1000_phy_igp_3) - return 0; - - /* - * Call gig speed drop workaround on LPLU before accessing - * any PHY registers - */ - if (hw->mac.type == e1000_ich8lan) - e1000e_gig_downshift_workaround_ich8lan(hw); - - /* When LPLU is enabled, we should disable SmartSpeed */ - ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); - data &= ~IGP01E1000_PSCFR_SMART_SPEED; - ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); - if (ret_val) - return ret_val; - } else { - phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; - ew32(PHY_CTRL, phy_ctrl); - - if (phy->type != e1000_phy_igp_3) - return 0; - - /* - * LPLU and SmartSpeed are mutually exclusive. LPLU is used - * during Dx states where the power conservation is most - * important. During driver activity we should enable - * SmartSpeed, so performance is maintained. - */ - if (phy->smart_speed == e1000_smart_speed_on) { - ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, - &data); - if (ret_val) - return ret_val; - - data |= IGP01E1000_PSCFR_SMART_SPEED; - ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, - data); - if (ret_val) - return ret_val; - } else if (phy->smart_speed == e1000_smart_speed_off) { - ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, - &data); - if (ret_val) - return ret_val; - - data &= ~IGP01E1000_PSCFR_SMART_SPEED; - ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, - data); - if (ret_val) - return ret_val; - } - } - - return 0; -} - -/** - * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state - * @hw: pointer to the HW structure - * @active: true to enable LPLU, false to disable - * - * Sets the LPLU D3 state according to the active flag. When - * activating LPLU this function also disables smart speed - * and vice versa. LPLU will not be activated unless the - * device autonegotiation advertisement meets standards of - * either 10 or 10/100 or 10/100/1000 at all duplexes. - * This is a function pointer entry point only called by - * PHY setup routines. - **/ -static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active) -{ - struct e1000_phy_info *phy = &hw->phy; - u32 phy_ctrl; - s32 ret_val; - u16 data; - - phy_ctrl = er32(PHY_CTRL); - - if (!active) { - phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; - ew32(PHY_CTRL, phy_ctrl); - - if (phy->type != e1000_phy_igp_3) - return 0; - - /* - * LPLU and SmartSpeed are mutually exclusive. LPLU is used - * during Dx states where the power conservation is most - * important. During driver activity we should enable - * SmartSpeed, so performance is maintained. - */ - if (phy->smart_speed == e1000_smart_speed_on) { - ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, - &data); - if (ret_val) - return ret_val; - - data |= IGP01E1000_PSCFR_SMART_SPEED; - ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, - data); - if (ret_val) - return ret_val; - } else if (phy->smart_speed == e1000_smart_speed_off) { - ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, - &data); - if (ret_val) - return ret_val; - - data &= ~IGP01E1000_PSCFR_SMART_SPEED; - ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, - data); - if (ret_val) - return ret_val; - } - } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || - (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || - (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { - phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; - ew32(PHY_CTRL, phy_ctrl); - - if (phy->type != e1000_phy_igp_3) - return 0; - - /* - * Call gig speed drop workaround on LPLU before accessing - * any PHY registers - */ - if (hw->mac.type == e1000_ich8lan) - e1000e_gig_downshift_workaround_ich8lan(hw); - - /* When LPLU is enabled, we should disable SmartSpeed */ - ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); - if (ret_val) - return ret_val; - - data &= ~IGP01E1000_PSCFR_SMART_SPEED; - ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); - } - - return 0; -} - -/** - * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1 - * @hw: pointer to the HW structure - * @bank: pointer to the variable that returns the active bank - * - * Reads signature byte from the NVM using the flash access registers. - * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank. - **/ -static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) -{ - u32 eecd; - struct e1000_nvm_info *nvm = &hw->nvm; - u32 bank1_offset = nvm->flash_bank_size * sizeof(u16); - u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1; - u8 sig_byte = 0; - s32 ret_val = 0; - - switch (hw->mac.type) { - case e1000_ich8lan: - case e1000_ich9lan: - eecd = er32(EECD); - if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) == - E1000_EECD_SEC1VAL_VALID_MASK) { - if (eecd & E1000_EECD_SEC1VAL) - *bank = 1; - else - *bank = 0; - - return 0; - } - e_dbg("Unable to determine valid NVM bank via EEC - " - "reading flash signature\n"); - /* fall-thru */ - default: - /* set bank to 0 in case flash read fails */ - *bank = 0; - - /* Check bank 0 */ - ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset, - &sig_byte); - if (ret_val) - return ret_val; - if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == - E1000_ICH_NVM_SIG_VALUE) { - *bank = 0; - return 0; - } - - /* Check bank 1 */ - ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset + - bank1_offset, - &sig_byte); - if (ret_val) - return ret_val; - if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == - E1000_ICH_NVM_SIG_VALUE) { - *bank = 1; - return 0; - } - - e_dbg("ERROR: No valid NVM bank present\n"); - return -E1000_ERR_NVM; - } - - return 0; -} - -/** - * e1000_read_nvm_ich8lan - Read word(s) from the NVM - * @hw: pointer to the HW structure - * @offset: The offset (in bytes) of the word(s) to read. - * @words: Size of data to read in words - * @data: Pointer to the word(s) to read at offset. - * - * Reads a word(s) from the NVM using the flash access registers. - **/ -static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, - u16 *data) -{ - struct e1000_nvm_info *nvm = &hw->nvm; - struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; - u32 act_offset; - s32 ret_val = 0; - u32 bank = 0; - u16 i, word; - - if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || - (words == 0)) { - e_dbg("nvm parameter(s) out of bounds\n"); - ret_val = -E1000_ERR_NVM; - goto out; - } - - nvm->ops.acquire(hw); - - ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); - if (ret_val) { - e_dbg("Could not detect valid bank, assuming bank 0\n"); - bank = 0; - } - - act_offset = (bank) ? nvm->flash_bank_size : 0; - act_offset += offset; - - ret_val = 0; - for (i = 0; i < words; i++) { - if (dev_spec->shadow_ram[offset+i].modified) { - data[i] = dev_spec->shadow_ram[offset+i].value; - } else { - ret_val = e1000_read_flash_word_ich8lan(hw, - act_offset + i, - &word); - if (ret_val) - break; - data[i] = word; - } - } - - nvm->ops.release(hw); - -out: - if (ret_val) - e_dbg("NVM read error: %d\n", ret_val); - - return ret_val; -} - -/** - * e1000_flash_cycle_init_ich8lan - Initialize flash - * @hw: pointer to the HW structure - * - * This function does initial flash setup so that a new read/write/erase cycle - * can be started. - **/ -static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) -{ - union ich8_hws_flash_status hsfsts; - s32 ret_val = -E1000_ERR_NVM; - - hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); - - /* Check if the flash descriptor is valid */ - if (hsfsts.hsf_status.fldesvalid == 0) { - e_dbg("Flash descriptor invalid. " - "SW Sequencing must be used.\n"); - return -E1000_ERR_NVM; - } - - /* Clear FCERR and DAEL in hw status by writing 1 */ - hsfsts.hsf_status.flcerr = 1; - hsfsts.hsf_status.dael = 1; - - ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); - - /* - * Either we should have a hardware SPI cycle in progress - * bit to check against, in order to start a new cycle or - * FDONE bit should be changed in the hardware so that it - * is 1 after hardware reset, which can then be used as an - * indication whether a cycle is in progress or has been - * completed. - */ - - if (hsfsts.hsf_status.flcinprog == 0) { - /* - * There is no cycle running at present, - * so we can start a cycle. - * Begin by setting Flash Cycle Done. - */ - hsfsts.hsf_status.flcdone = 1; - ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); - ret_val = 0; - } else { - s32 i = 0; - - /* - * Otherwise poll for sometime so the current - * cycle has a chance to end before giving up. - */ - for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { - hsfsts.regval = __er16flash(hw, ICH_FLASH_HSFSTS); - if (hsfsts.hsf_status.flcinprog == 0) { - ret_val = 0; - break; - } - udelay(1); - } - if (ret_val == 0) { - /* - * Successful in waiting for previous cycle to timeout, - * now set the Flash Cycle Done. - */ - hsfsts.hsf_status.flcdone = 1; - ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); - } else { - e_dbg("Flash controller busy, cannot get access\n"); - } - } - - return ret_val; -} - -/** - * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase) - * @hw: pointer to the HW structure - * @timeout: maximum time to wait for completion - * - * This function starts a flash cycle and waits for its completion. - **/ -static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) -{ - union ich8_hws_flash_ctrl hsflctl; - union ich8_hws_flash_status hsfsts; - s32 ret_val = -E1000_ERR_NVM; - u32 i = 0; - - /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ - hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); - hsflctl.hsf_ctrl.flcgo = 1; - ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); - - /* wait till FDONE bit is set to 1 */ - do { - hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); - if (hsfsts.hsf_status.flcdone == 1) - break; - udelay(1); - } while (i++ < timeout); - - if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) - return 0; - - return ret_val; -} - -/** - * e1000_read_flash_word_ich8lan - Read word from flash - * @hw: pointer to the HW structure - * @offset: offset to data location - * @data: pointer to the location for storing the data - * - * Reads the flash word at offset into data. Offset is converted - * to bytes before read. - **/ -static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, - u16 *data) -{ - /* Must convert offset into bytes. */ - offset <<= 1; - - return e1000_read_flash_data_ich8lan(hw, offset, 2, data); -} - -/** - * e1000_read_flash_byte_ich8lan - Read byte from flash - * @hw: pointer to the HW structure - * @offset: The offset of the byte to read. - * @data: Pointer to a byte to store the value read. - * - * Reads a single byte from the NVM using the flash access registers. - **/ -static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, - u8 *data) -{ - s32 ret_val; - u16 word = 0; - - ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); - if (ret_val) - return ret_val; - - *data = (u8)word; - - return 0; -} - -/** - * e1000_read_flash_data_ich8lan - Read byte or word from NVM - * @hw: pointer to the HW structure - * @offset: The offset (in bytes) of the byte or word to read. - * @size: Size of data to read, 1=byte 2=word - * @data: Pointer to the word to store the value read. - * - * Reads a byte or word from the NVM using the flash access registers. - **/ -static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, - u8 size, u16 *data) -{ - union ich8_hws_flash_status hsfsts; - union ich8_hws_flash_ctrl hsflctl; - u32 flash_linear_addr; - u32 flash_data = 0; - s32 ret_val = -E1000_ERR_NVM; - u8 count = 0; - - if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) - return -E1000_ERR_NVM; - - flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + - hw->nvm.flash_base_addr; - - do { - udelay(1); - /* Steps */ - ret_val = e1000_flash_cycle_init_ich8lan(hw); - if (ret_val != 0) - break; - - hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); - /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ - hsflctl.hsf_ctrl.fldbcount = size - 1; - hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; - ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); - - ew32flash(ICH_FLASH_FADDR, flash_linear_addr); - - ret_val = e1000_flash_cycle_ich8lan(hw, - ICH_FLASH_READ_COMMAND_TIMEOUT); - - /* - * Check if FCERR is set to 1, if set to 1, clear it - * and try the whole sequence a few more times, else - * read in (shift in) the Flash Data0, the order is - * least significant byte first msb to lsb - */ - if (ret_val == 0) { - flash_data = er32flash(ICH_FLASH_FDATA0); - if (size == 1) - *data = (u8)(flash_data & 0x000000FF); - else if (size == 2) - *data = (u16)(flash_data & 0x0000FFFF); - break; - } else { - /* - * If we've gotten here, then things are probably - * completely hosed, but if the error condition is - * detected, it won't hurt to give it another try... - * ICH_FLASH_CYCLE_REPEAT_COUNT times. - */ - hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); - if (hsfsts.hsf_status.flcerr == 1) { - /* Repeat for some time before giving up. */ - continue; - } else if (hsfsts.hsf_status.flcdone == 0) { - e_dbg("Timeout error - flash cycle " - "did not complete.\n"); - break; - } - } - } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); - - return ret_val; -} - -/** - * e1000_write_nvm_ich8lan - Write word(s) to the NVM - * @hw: pointer to the HW structure - * @offset: The offset (in bytes) of the word(s) to write. - * @words: Size of data to write in words - * @data: Pointer to the word(s) to write at offset. - * - * Writes a byte or word to the NVM using the flash access registers. - **/ -static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, - u16 *data) -{ - struct e1000_nvm_info *nvm = &hw->nvm; - struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; - u16 i; - - if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || - (words == 0)) { - e_dbg("nvm parameter(s) out of bounds\n"); - return -E1000_ERR_NVM; - } - - nvm->ops.acquire(hw); - - for (i = 0; i < words; i++) { - dev_spec->shadow_ram[offset+i].modified = true; - dev_spec->shadow_ram[offset+i].value = data[i]; - } - - nvm->ops.release(hw); - - return 0; -} - -/** - * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM - * @hw: pointer to the HW structure - * - * The NVM checksum is updated by calling the generic update_nvm_checksum, - * which writes the checksum to the shadow ram. The changes in the shadow - * ram are then committed to the EEPROM by processing each bank at a time - * checking for the modified bit and writing only the pending changes. - * After a successful commit, the shadow ram is cleared and is ready for - * future writes. - **/ -static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) -{ - struct e1000_nvm_info *nvm = &hw->nvm; - struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; - u32 i, act_offset, new_bank_offset, old_bank_offset, bank; - s32 ret_val; - u16 data; - - ret_val = e1000e_update_nvm_checksum_generic(hw); - if (ret_val) - goto out; - - if (nvm->type != e1000_nvm_flash_sw) - goto out; - - nvm->ops.acquire(hw); - - /* - * We're writing to the opposite bank so if we're on bank 1, - * write to bank 0 etc. We also need to erase the segment that - * is going to be written - */ - ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); - if (ret_val) { - e_dbg("Could not detect valid bank, assuming bank 0\n"); - bank = 0; - } - - if (bank == 0) { - new_bank_offset = nvm->flash_bank_size; - old_bank_offset = 0; - ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); - if (ret_val) - goto release; - } else { - old_bank_offset = nvm->flash_bank_size; - new_bank_offset = 0; - ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); - if (ret_val) - goto release; - } - - for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { - /* - * Determine whether to write the value stored - * in the other NVM bank or a modified value stored - * in the shadow RAM - */ - if (dev_spec->shadow_ram[i].modified) { - data = dev_spec->shadow_ram[i].value; - } else { - ret_val = e1000_read_flash_word_ich8lan(hw, i + - old_bank_offset, - &data); - if (ret_val) - break; - } - - /* - * If the word is 0x13, then make sure the signature bits - * (15:14) are 11b until the commit has completed. - * This will allow us to write 10b which indicates the - * signature is valid. We want to do this after the write - * has completed so that we don't mark the segment valid - * while the write is still in progress - */ - if (i == E1000_ICH_NVM_SIG_WORD) - data |= E1000_ICH_NVM_SIG_MASK; - - /* Convert offset to bytes. */ - act_offset = (i + new_bank_offset) << 1; - - udelay(100); - /* Write the bytes to the new bank. */ - ret_val = e1000_retry_write_flash_byte_ich8lan(hw, - act_offset, - (u8)data); - if (ret_val) - break; - - udelay(100); - ret_val = e1000_retry_write_flash_byte_ich8lan(hw, - act_offset + 1, - (u8)(data >> 8)); - if (ret_val) - break; - } - - /* - * Don't bother writing the segment valid bits if sector - * programming failed. - */ - if (ret_val) { - /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ - e_dbg("Flash commit failed.\n"); - goto release; - } - - /* - * Finally validate the new segment by setting bit 15:14 - * to 10b in word 0x13 , this can be done without an - * erase as well since these bits are 11 to start with - * and we need to change bit 14 to 0b - */ - act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; - ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); - if (ret_val) - goto release; - - data &= 0xBFFF; - ret_val = e1000_retry_write_flash_byte_ich8lan(hw, - act_offset * 2 + 1, - (u8)(data >> 8)); - if (ret_val) - goto release; - - /* - * And invalidate the previously valid segment by setting - * its signature word (0x13) high_byte to 0b. This can be - * done without an erase because flash erase sets all bits - * to 1's. We can write 1's to 0's without an erase - */ - act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; - ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); - if (ret_val) - goto release; - - /* Great! Everything worked, we can now clear the cached entries. */ - for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { - dev_spec->shadow_ram[i].modified = false; - dev_spec->shadow_ram[i].value = 0xFFFF; - } - -release: - nvm->ops.release(hw); - - /* - * Reload the EEPROM, or else modifications will not appear - * until after the next adapter reset. - */ - if (!ret_val) { - e1000e_reload_nvm(hw); - usleep_range(10000, 20000); - } - -out: - if (ret_val) - e_dbg("NVM update error: %d\n", ret_val); - - return ret_val; -} - -/** - * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum - * @hw: pointer to the HW structure - * - * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19. - * If the bit is 0, that the EEPROM had been modified, but the checksum was not - * calculated, in which case we need to calculate the checksum and set bit 6. - **/ -static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) -{ - s32 ret_val; - u16 data; - - /* - * Read 0x19 and check bit 6. If this bit is 0, the checksum - * needs to be fixed. This bit is an indication that the NVM - * was prepared by OEM software and did not calculate the - * checksum...a likely scenario. - */ - ret_val = e1000_read_nvm(hw, 0x19, 1, &data); - if (ret_val) - return ret_val; - - if ((data & 0x40) == 0) { - data |= 0x40; - ret_val = e1000_write_nvm(hw, 0x19, 1, &data); - if (ret_val) - return ret_val; - ret_val = e1000e_update_nvm_checksum(hw); - if (ret_val) - return ret_val; - } - - return e1000e_validate_nvm_checksum_generic(hw); -} - -/** - * e1000e_write_protect_nvm_ich8lan - Make the NVM read-only - * @hw: pointer to the HW structure - * - * To prevent malicious write/erase of the NVM, set it to be read-only - * so that the hardware ignores all write/erase cycles of the NVM via - * the flash control registers. The shadow-ram copy of the NVM will - * still be updated, however any updates to this copy will not stick - * across driver reloads. - **/ -void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw) -{ - struct e1000_nvm_info *nvm = &hw->nvm; - union ich8_flash_protected_range pr0; - union ich8_hws_flash_status hsfsts; - u32 gfpreg; - - nvm->ops.acquire(hw); - - gfpreg = er32flash(ICH_FLASH_GFPREG); - - /* Write-protect GbE Sector of NVM */ - pr0.regval = er32flash(ICH_FLASH_PR0); - pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK; - pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK); - pr0.range.wpe = true; - ew32flash(ICH_FLASH_PR0, pr0.regval); - - /* - * Lock down a subset of GbE Flash Control Registers, e.g. - * PR0 to prevent the write-protection from being lifted. - * Once FLOCKDN is set, the registers protected by it cannot - * be written until FLOCKDN is cleared by a hardware reset. - */ - hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); - hsfsts.hsf_status.flockdn = true; - ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval); - - nvm->ops.release(hw); -} - -/** - * e1000_write_flash_data_ich8lan - Writes bytes to the NVM - * @hw: pointer to the HW structure - * @offset: The offset (in bytes) of the byte/word to read. - * @size: Size of data to read, 1=byte 2=word - * @data: The byte(s) to write to the NVM. - * - * Writes one/two bytes to the NVM using the flash access registers. - **/ -static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, - u8 size, u16 data) -{ - union ich8_hws_flash_status hsfsts; - union ich8_hws_flash_ctrl hsflctl; - u32 flash_linear_addr; - u32 flash_data = 0; - s32 ret_val; - u8 count = 0; - - if (size < 1 || size > 2 || data > size * 0xff || - offset > ICH_FLASH_LINEAR_ADDR_MASK) - return -E1000_ERR_NVM; - - flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + - hw->nvm.flash_base_addr; - - do { - udelay(1); - /* Steps */ - ret_val = e1000_flash_cycle_init_ich8lan(hw); - if (ret_val) - break; - - hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); - /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ - hsflctl.hsf_ctrl.fldbcount = size -1; - hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; - ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); - - ew32flash(ICH_FLASH_FADDR, flash_linear_addr); - - if (size == 1) - flash_data = (u32)data & 0x00FF; - else - flash_data = (u32)data; - - ew32flash(ICH_FLASH_FDATA0, flash_data); - - /* - * check if FCERR is set to 1 , if set to 1, clear it - * and try the whole sequence a few more times else done - */ - ret_val = e1000_flash_cycle_ich8lan(hw, - ICH_FLASH_WRITE_COMMAND_TIMEOUT); - if (!ret_val) - break; - - /* - * If we're here, then things are most likely - * completely hosed, but if the error condition - * is detected, it won't hurt to give it another - * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. - */ - hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); - if (hsfsts.hsf_status.flcerr == 1) - /* Repeat for some time before giving up. */ - continue; - if (hsfsts.hsf_status.flcdone == 0) { - e_dbg("Timeout error - flash cycle " - "did not complete."); - break; - } - } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); - - return ret_val; -} - -/** - * e1000_write_flash_byte_ich8lan - Write a single byte to NVM - * @hw: pointer to the HW structure - * @offset: The index of the byte to read. - * @data: The byte to write to the NVM. - * - * Writes a single byte to the NVM using the flash access registers. - **/ -static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, - u8 data) -{ - u16 word = (u16)data; - - return e1000_write_flash_data_ich8lan(hw, offset, 1, word); -} - -/** - * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM - * @hw: pointer to the HW structure - * @offset: The offset of the byte to write. - * @byte: The byte to write to the NVM. - * - * Writes a single byte to the NVM using the flash access registers. - * Goes through a retry algorithm before giving up. - **/ -static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, - u32 offset, u8 byte) -{ - s32 ret_val; - u16 program_retries; - - ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); - if (!ret_val) - return ret_val; - - for (program_retries = 0; program_retries < 100; program_retries++) { - e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset); - udelay(100); - ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); - if (!ret_val) - break; - } - if (program_retries == 100) - return -E1000_ERR_NVM; - - return 0; -} - -/** - * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM - * @hw: pointer to the HW structure - * @bank: 0 for first bank, 1 for second bank, etc. - * - * Erases the bank specified. Each bank is a 4k block. Banks are 0 based. - * bank N is 4096 * N + flash_reg_addr. - **/ -static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) -{ - struct e1000_nvm_info *nvm = &hw->nvm; - union ich8_hws_flash_status hsfsts; - union ich8_hws_flash_ctrl hsflctl; - u32 flash_linear_addr; - /* bank size is in 16bit words - adjust to bytes */ - u32 flash_bank_size = nvm->flash_bank_size * 2; - s32 ret_val; - s32 count = 0; - s32 j, iteration, sector_size; - - hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); - - /* - * Determine HW Sector size: Read BERASE bits of hw flash status - * register - * 00: The Hw sector is 256 bytes, hence we need to erase 16 - * consecutive sectors. The start index for the nth Hw sector - * can be calculated as = bank * 4096 + n * 256 - * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. - * The start index for the nth Hw sector can be calculated - * as = bank * 4096 - * 10: The Hw sector is 8K bytes, nth sector = bank * 8192 - * (ich9 only, otherwise error condition) - * 11: The Hw sector is 64K bytes, nth sector = bank * 65536 - */ - switch (hsfsts.hsf_status.berasesz) { - case 0: - /* Hw sector size 256 */ - sector_size = ICH_FLASH_SEG_SIZE_256; - iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256; - break; - case 1: - sector_size = ICH_FLASH_SEG_SIZE_4K; - iteration = 1; - break; - case 2: - sector_size = ICH_FLASH_SEG_SIZE_8K; - iteration = 1; - break; - case 3: - sector_size = ICH_FLASH_SEG_SIZE_64K; - iteration = 1; - break; - default: - return -E1000_ERR_NVM; - } - - /* Start with the base address, then add the sector offset. */ - flash_linear_addr = hw->nvm.flash_base_addr; - flash_linear_addr += (bank) ? flash_bank_size : 0; - - for (j = 0; j < iteration ; j++) { - do { - /* Steps */ - ret_val = e1000_flash_cycle_init_ich8lan(hw); - if (ret_val) - return ret_val; - - /* - * Write a value 11 (block Erase) in Flash - * Cycle field in hw flash control - */ - hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); - hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; - ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); - - /* - * Write the last 24 bits of an index within the - * block into Flash Linear address field in Flash - * Address. - */ - flash_linear_addr += (j * sector_size); - ew32flash(ICH_FLASH_FADDR, flash_linear_addr); - - ret_val = e1000_flash_cycle_ich8lan(hw, - ICH_FLASH_ERASE_COMMAND_TIMEOUT); - if (ret_val == 0) - break; - - /* - * Check if FCERR is set to 1. If 1, - * clear it and try the whole sequence - * a few more times else Done - */ - hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); - if (hsfsts.hsf_status.flcerr == 1) - /* repeat for some time before giving up */ - continue; - else if (hsfsts.hsf_status.flcdone == 0) - return ret_val; - } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT); - } - - return 0; -} - -/** - * e1000_valid_led_default_ich8lan - Set the default LED settings - * @hw: pointer to the HW structure - * @data: Pointer to the LED settings - * - * Reads the LED default settings from the NVM to data. If the NVM LED - * settings is all 0's or F's, set the LED default to a valid LED default - * setting. - **/ -static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data) -{ - s32 ret_val; - - ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); - if (ret_val) { - e_dbg("NVM Read Error\n"); - return ret_val; - } - - if (*data == ID_LED_RESERVED_0000 || - *data == ID_LED_RESERVED_FFFF) - *data = ID_LED_DEFAULT_ICH8LAN; - - return 0; -} - -/** - * e1000_id_led_init_pchlan - store LED configurations - * @hw: pointer to the HW structure - * - * PCH does not control LEDs via the LEDCTL register, rather it uses - * the PHY LED configuration register. - * - * PCH also does not have an "always on" or "always off" mode which - * complicates the ID feature. Instead of using the "on" mode to indicate - * in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init()), - * use "link_up" mode. The LEDs will still ID on request if there is no - * link based on logic in e1000_led_[on|off]_pchlan(). - **/ -static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw) -{ - struct e1000_mac_info *mac = &hw->mac; - s32 ret_val; - const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP; - const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT; - u16 data, i, temp, shift; - - /* Get default ID LED modes */ - ret_val = hw->nvm.ops.valid_led_default(hw, &data); - if (ret_val) - goto out; - - mac->ledctl_default = er32(LEDCTL); - mac->ledctl_mode1 = mac->ledctl_default; - mac->ledctl_mode2 = mac->ledctl_default; - - for (i = 0; i < 4; i++) { - temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK; - shift = (i * 5); - switch (temp) { - case ID_LED_ON1_DEF2: - case ID_LED_ON1_ON2: - case ID_LED_ON1_OFF2: - mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); - mac->ledctl_mode1 |= (ledctl_on << shift); - break; - case ID_LED_OFF1_DEF2: - case ID_LED_OFF1_ON2: - case ID_LED_OFF1_OFF2: - mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); - mac->ledctl_mode1 |= (ledctl_off << shift); - break; - default: - /* Do nothing */ - break; - } - switch (temp) { - case ID_LED_DEF1_ON2: - case ID_LED_ON1_ON2: - case ID_LED_OFF1_ON2: - mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); - mac->ledctl_mode2 |= (ledctl_on << shift); - break; - case ID_LED_DEF1_OFF2: - case ID_LED_ON1_OFF2: - case ID_LED_OFF1_OFF2: - mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); - mac->ledctl_mode2 |= (ledctl_off << shift); - break; - default: - /* Do nothing */ - break; - } - } - -out: - return ret_val; -} - -/** - * e1000_get_bus_info_ich8lan - Get/Set the bus type and width - * @hw: pointer to the HW structure - * - * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability - * register, so the the bus width is hard coded. - **/ -static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw) -{ - struct e1000_bus_info *bus = &hw->bus; - s32 ret_val; - - ret_val = e1000e_get_bus_info_pcie(hw); - - /* - * ICH devices are "PCI Express"-ish. They have - * a configuration space, but do not contain - * PCI Express Capability registers, so bus width - * must be hardcoded. - */ - if (bus->width == e1000_bus_width_unknown) - bus->width = e1000_bus_width_pcie_x1; - - return ret_val; -} - -/** - * e1000_reset_hw_ich8lan - Reset the hardware - * @hw: pointer to the HW structure - * - * Does a full reset of the hardware which includes a reset of the PHY and - * MAC. - **/ -static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) -{ - struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; - u16 reg; - u32 ctrl, kab; - s32 ret_val; - - /* - * Prevent the PCI-E bus from sticking if there is no TLP connection - * on the last TLP read/write transaction when MAC is reset. - */ - ret_val = e1000e_disable_pcie_master(hw); - if (ret_val) - e_dbg("PCI-E Master disable polling has failed.\n"); - - e_dbg("Masking off all interrupts\n"); - ew32(IMC, 0xffffffff); - - /* - * Disable the Transmit and Receive units. Then delay to allow - * any pending transactions to complete before we hit the MAC - * with the global reset. - */ - ew32(RCTL, 0); - ew32(TCTL, E1000_TCTL_PSP); - e1e_flush(); - - usleep_range(10000, 20000); - - /* Workaround for ICH8 bit corruption issue in FIFO memory */ - if (hw->mac.type == e1000_ich8lan) { - /* Set Tx and Rx buffer allocation to 8k apiece. */ - ew32(PBA, E1000_PBA_8K); - /* Set Packet Buffer Size to 16k. */ - ew32(PBS, E1000_PBS_16K); - } - - if (hw->mac.type == e1000_pchlan) { - /* Save the NVM K1 bit setting*/ - ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, ®); - if (ret_val) - return ret_val; - - if (reg & E1000_NVM_K1_ENABLE) - dev_spec->nvm_k1_enabled = true; - else - dev_spec->nvm_k1_enabled = false; - } - - ctrl = er32(CTRL); - - if (!e1000_check_reset_block(hw)) { - /* - * Full-chip reset requires MAC and PHY reset at the same - * time to make sure the interface between MAC and the - * external PHY is reset. - */ - ctrl |= E1000_CTRL_PHY_RST; - - /* - * Gate automatic PHY configuration by hardware on - * non-managed 82579 - */ - if ((hw->mac.type == e1000_pch2lan) && - !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) - e1000_gate_hw_phy_config_ich8lan(hw, true); - } - ret_val = e1000_acquire_swflag_ich8lan(hw); - e_dbg("Issuing a global reset to ich8lan\n"); - ew32(CTRL, (ctrl | E1000_CTRL_RST)); - /* cannot issue a flush here because it hangs the hardware */ - msleep(20); - - if (!ret_val) - mutex_unlock(&swflag_mutex); - - if (ctrl & E1000_CTRL_PHY_RST) { - ret_val = hw->phy.ops.get_cfg_done(hw); - if (ret_val) - goto out; - - ret_val = e1000_post_phy_reset_ich8lan(hw); - if (ret_val) - goto out; - } - - /* - * For PCH, this write will make sure that any noise - * will be detected as a CRC error and be dropped rather than show up - * as a bad packet to the DMA engine. - */ - if (hw->mac.type == e1000_pchlan) - ew32(CRC_OFFSET, 0x65656565); - - ew32(IMC, 0xffffffff); - er32(ICR); - - kab = er32(KABGTXD); - kab |= E1000_KABGTXD_BGSQLBIAS; - ew32(KABGTXD, kab); - -out: - return ret_val; -} - -/** - * e1000_init_hw_ich8lan - Initialize the hardware - * @hw: pointer to the HW structure - * - * Prepares the hardware for transmit and receive by doing the following: - * - initialize hardware bits - * - initialize LED identification - * - setup receive address registers - * - setup flow control - * - setup transmit descriptors - * - clear statistics - **/ -static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) -{ - struct e1000_mac_info *mac = &hw->mac; - u32 ctrl_ext, txdctl, snoop; - s32 ret_val; - u16 i; - - e1000_initialize_hw_bits_ich8lan(hw); - - /* Initialize identification LED */ - ret_val = mac->ops.id_led_init(hw); - if (ret_val) - e_dbg("Error initializing identification LED\n"); - /* This is not fatal and we should not stop init due to this */ - - /* Setup the receive address. */ - e1000e_init_rx_addrs(hw, mac->rar_entry_count); - - /* Zero out the Multicast HASH table */ - e_dbg("Zeroing the MTA\n"); - for (i = 0; i < mac->mta_reg_count; i++) - E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); - - /* - * The 82578 Rx buffer will stall if wakeup is enabled in host and - * the ME. Disable wakeup by clearing the host wakeup bit. - * Reset the phy after disabling host wakeup to reset the Rx buffer. - */ - if (hw->phy.type == e1000_phy_82578) { - e1e_rphy(hw, BM_PORT_GEN_CFG, &i); - i &= ~BM_WUC_HOST_WU_BIT; - e1e_wphy(hw, BM_PORT_GEN_CFG, i); - ret_val = e1000_phy_hw_reset_ich8lan(hw); - if (ret_val) - return ret_val; - } - - /* Setup link and flow control */ - ret_val = e1000_setup_link_ich8lan(hw); - - /* Set the transmit descriptor write-back policy for both queues */ - txdctl = er32(TXDCTL(0)); - txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | - E1000_TXDCTL_FULL_TX_DESC_WB; - txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | - E1000_TXDCTL_MAX_TX_DESC_PREFETCH; - ew32(TXDCTL(0), txdctl); - txdctl = er32(TXDCTL(1)); - txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | - E1000_TXDCTL_FULL_TX_DESC_WB; - txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | - E1000_TXDCTL_MAX_TX_DESC_PREFETCH; - ew32(TXDCTL(1), txdctl); - - /* - * ICH8 has opposite polarity of no_snoop bits. - * By default, we should use snoop behavior. - */ - if (mac->type == e1000_ich8lan) - snoop = PCIE_ICH8_SNOOP_ALL; - else - snoop = (u32) ~(PCIE_NO_SNOOP_ALL); - e1000e_set_pcie_no_snoop(hw, snoop); - - ctrl_ext = er32(CTRL_EXT); - ctrl_ext |= E1000_CTRL_EXT_RO_DIS; - ew32(CTRL_EXT, ctrl_ext); - - /* - * Clear all of the statistics registers (clear on read). It is - * important that we do this after we have tried to establish link - * because the symbol error count will increment wildly if there - * is no link. - */ - e1000_clear_hw_cntrs_ich8lan(hw); - - return 0; -} -/** - * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits - * @hw: pointer to the HW structure - * - * Sets/Clears required hardware bits necessary for correctly setting up the - * hardware for transmit and receive. - **/ -static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) -{ - u32 reg; - - /* Extended Device Control */ - reg = er32(CTRL_EXT); - reg |= (1 << 22); - /* Enable PHY low-power state when MAC is at D3 w/o WoL */ - if (hw->mac.type >= e1000_pchlan) - reg |= E1000_CTRL_EXT_PHYPDEN; - ew32(CTRL_EXT, reg); - - /* Transmit Descriptor Control 0 */ - reg = er32(TXDCTL(0)); - reg |= (1 << 22); - ew32(TXDCTL(0), reg); - - /* Transmit Descriptor Control 1 */ - reg = er32(TXDCTL(1)); - reg |= (1 << 22); - ew32(TXDCTL(1), reg); - - /* Transmit Arbitration Control 0 */ - reg = er32(TARC(0)); - if (hw->mac.type == e1000_ich8lan) - reg |= (1 << 28) | (1 << 29); - reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27); - ew32(TARC(0), reg); - - /* Transmit Arbitration Control 1 */ - reg = er32(TARC(1)); - if (er32(TCTL) & E1000_TCTL_MULR) - reg &= ~(1 << 28); - else - reg |= (1 << 28); - reg |= (1 << 24) | (1 << 26) | (1 << 30); - ew32(TARC(1), reg); - - /* Device Status */ - if (hw->mac.type == e1000_ich8lan) { - reg = er32(STATUS); - reg &= ~(1 << 31); - ew32(STATUS, reg); - } - - /* - * work-around descriptor data corruption issue during nfs v2 udp - * traffic, just disable the nfs filtering capability - */ - reg = er32(RFCTL); - reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); - ew32(RFCTL, reg); -} - -/** - * e1000_setup_link_ich8lan - Setup flow control and link settings - * @hw: pointer to the HW structure - * - * Determines which flow control settings to use, then configures flow - * control. Calls the appropriate media-specific link configuration - * function. Assuming the adapter has a valid link partner, a valid link - * should be established. Assumes the hardware has previously been reset - * and the transmitter and receiver are not enabled. - **/ -static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) -{ - s32 ret_val; - - if (e1000_check_reset_block(hw)) - return 0; - - /* - * ICH parts do not have a word in the NVM to determine - * the default flow control setting, so we explicitly - * set it to full. - */ - if (hw->fc.requested_mode == e1000_fc_default) { - /* Workaround h/w hang when Tx flow control enabled */ - if (hw->mac.type == e1000_pchlan) - hw->fc.requested_mode = e1000_fc_rx_pause; - else - hw->fc.requested_mode = e1000_fc_full; - } - - /* - * Save off the requested flow control mode for use later. Depending - * on the link partner's capabilities, we may or may not use this mode. - */ - hw->fc.current_mode = hw->fc.requested_mode; - - e_dbg("After fix-ups FlowControl is now = %x\n", - hw->fc.current_mode); - - /* Continue to configure the copper link. */ - ret_val = e1000_setup_copper_link_ich8lan(hw); - if (ret_val) - return ret_val; - - ew32(FCTTV, hw->fc.pause_time); - if ((hw->phy.type == e1000_phy_82578) || - (hw->phy.type == e1000_phy_82579) || - (hw->phy.type == e1000_phy_82577)) { - ew32(FCRTV_PCH, hw->fc.refresh_time); - - ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27), - hw->fc.pause_time); - if (ret_val) - return ret_val; - } - - return e1000e_set_fc_watermarks(hw); -} - -/** - * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface - * @hw: pointer to the HW structure - * - * Configures the kumeran interface to the PHY to wait the appropriate time - * when polling the PHY, then call the generic setup_copper_link to finish - * configuring the copper link. - **/ -static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) -{ - u32 ctrl; - s32 ret_val; - u16 reg_data; - - ctrl = er32(CTRL); - ctrl |= E1000_CTRL_SLU; - ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); - ew32(CTRL, ctrl); - - /* - * Set the mac to wait the maximum time between each iteration - * and increase the max iterations when polling the phy; - * this fixes erroneous timeouts at 10Mbps. - */ - ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF); - if (ret_val) - return ret_val; - ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, - ®_data); - if (ret_val) - return ret_val; - reg_data |= 0x3F; - ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, - reg_data); - if (ret_val) - return ret_val; - - switch (hw->phy.type) { - case e1000_phy_igp_3: - ret_val = e1000e_copper_link_setup_igp(hw); - if (ret_val) - return ret_val; - break; - case e1000_phy_bm: - case e1000_phy_82578: - ret_val = e1000e_copper_link_setup_m88(hw); - if (ret_val) - return ret_val; - break; - case e1000_phy_82577: - case e1000_phy_82579: - ret_val = e1000_copper_link_setup_82577(hw); - if (ret_val) - return ret_val; - break; - case e1000_phy_ife: - ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, ®_data); - if (ret_val) - return ret_val; - - reg_data &= ~IFE_PMC_AUTO_MDIX; - - switch (hw->phy.mdix) { - case 1: - reg_data &= ~IFE_PMC_FORCE_MDIX; - break; - case 2: - reg_data |= IFE_PMC_FORCE_MDIX; - break; - case 0: - default: - reg_data |= IFE_PMC_AUTO_MDIX; - break; - } - ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data); - if (ret_val) - return ret_val; - break; - default: - break; - } - return e1000e_setup_copper_link(hw); -} - -/** - * e1000_get_link_up_info_ich8lan - Get current link speed and duplex - * @hw: pointer to the HW structure - * @speed: pointer to store current link speed - * @duplex: pointer to store the current link duplex - * - * Calls the generic get_speed_and_duplex to retrieve the current link - * information and then calls the Kumeran lock loss workaround for links at - * gigabit speeds. - **/ -static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, - u16 *duplex) -{ - s32 ret_val; - - ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex); - if (ret_val) - return ret_val; - - if ((hw->mac.type == e1000_ich8lan) && - (hw->phy.type == e1000_phy_igp_3) && - (*speed == SPEED_1000)) { - ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw); - } - - return ret_val; -} - -/** - * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround - * @hw: pointer to the HW structure - * - * Work-around for 82566 Kumeran PCS lock loss: - * On link status change (i.e. PCI reset, speed change) and link is up and - * speed is gigabit- - * 0) if workaround is optionally disabled do nothing - * 1) wait 1ms for Kumeran link to come up - * 2) check Kumeran Diagnostic register PCS lock loss bit - * 3) if not set the link is locked (all is good), otherwise... - * 4) reset the PHY - * 5) repeat up to 10 times - * Note: this is only called for IGP3 copper when speed is 1gb. - **/ -static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) -{ - struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; - u32 phy_ctrl; - s32 ret_val; - u16 i, data; - bool link; - - if (!dev_spec->kmrn_lock_loss_workaround_enabled) - return 0; - - /* - * Make sure link is up before proceeding. If not just return. - * Attempting this while link is negotiating fouled up link - * stability - */ - ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); - if (!link) - return 0; - - for (i = 0; i < 10; i++) { - /* read once to clear */ - ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data); - if (ret_val) - return ret_val; - /* and again to get new status */ - ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data); - if (ret_val) - return ret_val; - - /* check for PCS lock */ - if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) - return 0; - - /* Issue PHY reset */ - e1000_phy_hw_reset(hw); - mdelay(5); - } - /* Disable GigE link negotiation */ - phy_ctrl = er32(PHY_CTRL); - phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE | - E1000_PHY_CTRL_NOND0A_GBE_DISABLE); - ew32(PHY_CTRL, phy_ctrl); - - /* - * Call gig speed drop workaround on Gig disable before accessing - * any PHY registers - */ - e1000e_gig_downshift_workaround_ich8lan(hw); - - /* unable to acquire PCS lock */ - return -E1000_ERR_PHY; -} - -/** - * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state - * @hw: pointer to the HW structure - * @state: boolean value used to set the current Kumeran workaround state - * - * If ICH8, set the current Kumeran workaround state (enabled - true - * /disabled - false). - **/ -void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, - bool state) -{ - struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; - - if (hw->mac.type != e1000_ich8lan) { - e_dbg("Workaround applies to ICH8 only.\n"); - return; - } - - dev_spec->kmrn_lock_loss_workaround_enabled = state; -} - -/** - * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3 - * @hw: pointer to the HW structure - * - * Workaround for 82566 power-down on D3 entry: - * 1) disable gigabit link - * 2) write VR power-down enable - * 3) read it back - * Continue if successful, else issue LCD reset and repeat - **/ -void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) -{ - u32 reg; - u16 data; - u8 retry = 0; - - if (hw->phy.type != e1000_phy_igp_3) - return; - - /* Try the workaround twice (if needed) */ - do { - /* Disable link */ - reg = er32(PHY_CTRL); - reg |= (E1000_PHY_CTRL_GBE_DISABLE | - E1000_PHY_CTRL_NOND0A_GBE_DISABLE); - ew32(PHY_CTRL, reg); - - /* - * Call gig speed drop workaround on Gig disable before - * accessing any PHY registers - */ - if (hw->mac.type == e1000_ich8lan) - e1000e_gig_downshift_workaround_ich8lan(hw); - - /* Write VR power-down enable */ - e1e_rphy(hw, IGP3_VR_CTRL, &data); - data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; - e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN); - - /* Read it back and test */ - e1e_rphy(hw, IGP3_VR_CTRL, &data); - data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; - if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry) - break; - - /* Issue PHY reset and repeat at most one more time */ - reg = er32(CTRL); - ew32(CTRL, reg | E1000_CTRL_PHY_RST); - retry++; - } while (retry); -} - -/** - * e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working - * @hw: pointer to the HW structure - * - * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC), - * LPLU, Gig disable, MDIC PHY reset): - * 1) Set Kumeran Near-end loopback - * 2) Clear Kumeran Near-end loopback - * Should only be called for ICH8[m] devices with IGP_3 Phy. - **/ -void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) -{ - s32 ret_val; - u16 reg_data; - - if ((hw->mac.type != e1000_ich8lan) || - (hw->phy.type != e1000_phy_igp_3)) - return; - - ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, - ®_data); - if (ret_val) - return; - reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK; - ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, - reg_data); - if (ret_val) - return; - reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK; - ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, - reg_data); -} - -/** - * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx - * @hw: pointer to the HW structure - * - * During S0 to Sx transition, it is possible the link remains at gig - * instead of negotiating to a lower speed. Before going to Sx, set - * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation - * to a lower speed. For PCH and newer parts, the OEM bits PHY register - * (LED, GbE disable and LPLU configurations) also needs to be written. - **/ -void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) -{ - u32 phy_ctrl; - s32 ret_val; - - phy_ctrl = er32(PHY_CTRL); - phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE; - ew32(PHY_CTRL, phy_ctrl); - - if (hw->mac.type >= e1000_pchlan) { - e1000_oem_bits_config_ich8lan(hw, false); - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return; - e1000_write_smbus_addr(hw); - hw->phy.ops.release(hw); - } -} - -/** - * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0 - * @hw: pointer to the HW structure - * - * During Sx to S0 transitions on non-managed devices or managed devices - * on which PHY resets are not blocked, if the PHY registers cannot be - * accessed properly by the s/w toggle the LANPHYPC value to power cycle - * the PHY. - **/ -void e1000_resume_workarounds_pchlan(struct e1000_hw *hw) -{ - u32 fwsm; - - if (hw->mac.type != e1000_pch2lan) - return; - - fwsm = er32(FWSM); - if (!(fwsm & E1000_ICH_FWSM_FW_VALID) || !e1000_check_reset_block(hw)) { - u16 phy_id1, phy_id2; - s32 ret_val; - - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) { - e_dbg("Failed to acquire PHY semaphore in resume\n"); - return; - } - - /* Test access to the PHY registers by reading the ID regs */ - ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1); - if (ret_val) - goto release; - ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2); - if (ret_val) - goto release; - - if (hw->phy.id == ((u32)(phy_id1 << 16) | - (u32)(phy_id2 & PHY_REVISION_MASK))) - goto release; - - e1000_toggle_lanphypc_value_ich8lan(hw); - - hw->phy.ops.release(hw); - msleep(50); - e1000_phy_hw_reset(hw); - msleep(50); - return; - } - -release: - hw->phy.ops.release(hw); - - return; -} - -/** - * e1000_cleanup_led_ich8lan - Restore the default LED operation - * @hw: pointer to the HW structure - * - * Return the LED back to the default configuration. - **/ -static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw) -{ - if (hw->phy.type == e1000_phy_ife) - return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); - - ew32(LEDCTL, hw->mac.ledctl_default); - return 0; -} - -/** - * e1000_led_on_ich8lan - Turn LEDs on - * @hw: pointer to the HW structure - * - * Turn on the LEDs. - **/ -static s32 e1000_led_on_ich8lan(struct e1000_hw *hw) -{ - if (hw->phy.type == e1000_phy_ife) - return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, - (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); - - ew32(LEDCTL, hw->mac.ledctl_mode2); - return 0; -} - -/** - * e1000_led_off_ich8lan - Turn LEDs off - * @hw: pointer to the HW structure - * - * Turn off the LEDs. - **/ -static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) -{ - if (hw->phy.type == e1000_phy_ife) - return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, - (IFE_PSCL_PROBE_MODE | - IFE_PSCL_PROBE_LEDS_OFF)); - - ew32(LEDCTL, hw->mac.ledctl_mode1); - return 0; -} - -/** - * e1000_setup_led_pchlan - Configures SW controllable LED - * @hw: pointer to the HW structure - * - * This prepares the SW controllable LED for use. - **/ -static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) -{ - return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1); -} - -/** - * e1000_cleanup_led_pchlan - Restore the default LED operation - * @hw: pointer to the HW structure - * - * Return the LED back to the default configuration. - **/ -static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) -{ - return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default); -} - -/** - * e1000_led_on_pchlan - Turn LEDs on - * @hw: pointer to the HW structure - * - * Turn on the LEDs. - **/ -static s32 e1000_led_on_pchlan(struct e1000_hw *hw) -{ - u16 data = (u16)hw->mac.ledctl_mode2; - u32 i, led; - - /* - * If no link, then turn LED on by setting the invert bit - * for each LED that's mode is "link_up" in ledctl_mode2. - */ - if (!(er32(STATUS) & E1000_STATUS_LU)) { - for (i = 0; i < 3; i++) { - led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; - if ((led & E1000_PHY_LED0_MODE_MASK) != - E1000_LEDCTL_MODE_LINK_UP) - continue; - if (led & E1000_PHY_LED0_IVRT) - data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); - else - data |= (E1000_PHY_LED0_IVRT << (i * 5)); - } - } - - return e1e_wphy(hw, HV_LED_CONFIG, data); -} - -/** - * e1000_led_off_pchlan - Turn LEDs off - * @hw: pointer to the HW structure - * - * Turn off the LEDs. - **/ -static s32 e1000_led_off_pchlan(struct e1000_hw *hw) -{ - u16 data = (u16)hw->mac.ledctl_mode1; - u32 i, led; - - /* - * If no link, then turn LED off by clearing the invert bit - * for each LED that's mode is "link_up" in ledctl_mode1. - */ - if (!(er32(STATUS) & E1000_STATUS_LU)) { - for (i = 0; i < 3; i++) { - led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; - if ((led & E1000_PHY_LED0_MODE_MASK) != - E1000_LEDCTL_MODE_LINK_UP) - continue; - if (led & E1000_PHY_LED0_IVRT) - data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); - else - data |= (E1000_PHY_LED0_IVRT << (i * 5)); - } - } - - return e1e_wphy(hw, HV_LED_CONFIG, data); -} - -/** - * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset - * @hw: pointer to the HW structure - * - * Read appropriate register for the config done bit for completion status - * and configure the PHY through s/w for EEPROM-less parts. - * - * NOTE: some silicon which is EEPROM-less will fail trying to read the - * config done bit, so only an error is logged and continues. If we were - * to return with error, EEPROM-less silicon would not be able to be reset - * or change link. - **/ -static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) -{ - s32 ret_val = 0; - u32 bank = 0; - u32 status; - - e1000e_get_cfg_done(hw); - - /* Wait for indication from h/w that it has completed basic config */ - if (hw->mac.type >= e1000_ich10lan) { - e1000_lan_init_done_ich8lan(hw); - } else { - ret_val = e1000e_get_auto_rd_done(hw); - if (ret_val) { - /* - * When auto config read does not complete, do not - * return with an error. This can happen in situations - * where there is no eeprom and prevents getting link. - */ - e_dbg("Auto Read Done did not complete\n"); - ret_val = 0; - } - } - - /* Clear PHY Reset Asserted bit */ - status = er32(STATUS); - if (status & E1000_STATUS_PHYRA) - ew32(STATUS, status & ~E1000_STATUS_PHYRA); - else - e_dbg("PHY Reset Asserted not set - needs delay\n"); - - /* If EEPROM is not marked present, init the IGP 3 PHY manually */ - if (hw->mac.type <= e1000_ich9lan) { - if (((er32(EECD) & E1000_EECD_PRES) == 0) && - (hw->phy.type == e1000_phy_igp_3)) { - e1000e_phy_init_script_igp3(hw); - } - } else { - if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) { - /* Maybe we should do a basic PHY config */ - e_dbg("EEPROM not present\n"); - ret_val = -E1000_ERR_CONFIG; - } - } - - return ret_val; -} - -/** - * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down - * @hw: pointer to the HW structure - * - * In the case of a PHY power down to save power, or to turn off link during a - * driver unload, or wake on lan is not enabled, remove the link. - **/ -static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw) -{ - /* If the management interface is not enabled, then power down */ - if (!(hw->mac.ops.check_mng_mode(hw) || - hw->phy.ops.check_reset_block(hw))) - e1000_power_down_phy_copper(hw); -} - -/** - * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters - * @hw: pointer to the HW structure - * - * Clears hardware counters specific to the silicon family and calls - * clear_hw_cntrs_generic to clear all general purpose counters. - **/ -static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) -{ - u16 phy_data; - s32 ret_val; - - e1000e_clear_hw_cntrs_base(hw); - - er32(ALGNERRC); - er32(RXERRC); - er32(TNCRS); - er32(CEXTERR); - er32(TSCTC); - er32(TSCTFC); - - er32(MGTPRC); - er32(MGTPDC); - er32(MGTPTC); - - er32(IAC); - er32(ICRXOC); - - /* Clear PHY statistics registers */ - if ((hw->phy.type == e1000_phy_82578) || - (hw->phy.type == e1000_phy_82579) || - (hw->phy.type == e1000_phy_82577)) { - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return; - ret_val = hw->phy.ops.set_page(hw, - HV_STATS_PAGE << IGP_PAGE_SHIFT); - if (ret_val) - goto release; - hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); - hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); - hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); - hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); - hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); - hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); - hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); - hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); - hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); - hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); - hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); - hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); - hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); - hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); -release: - hw->phy.ops.release(hw); - } -} - -static struct e1000_mac_operations ich8_mac_ops = { - .id_led_init = e1000e_id_led_init, - /* check_mng_mode dependent on mac type */ - .check_for_link = e1000_check_for_copper_link_ich8lan, - /* cleanup_led dependent on mac type */ - .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan, - .get_bus_info = e1000_get_bus_info_ich8lan, - .set_lan_id = e1000_set_lan_id_single_port, - .get_link_up_info = e1000_get_link_up_info_ich8lan, - /* led_on dependent on mac type */ - /* led_off dependent on mac type */ - .update_mc_addr_list = e1000e_update_mc_addr_list_generic, - .reset_hw = e1000_reset_hw_ich8lan, - .init_hw = e1000_init_hw_ich8lan, - .setup_link = e1000_setup_link_ich8lan, - .setup_physical_interface= e1000_setup_copper_link_ich8lan, - /* id_led_init dependent on mac type */ -}; - -static struct e1000_phy_operations ich8_phy_ops = { - .acquire = e1000_acquire_swflag_ich8lan, - .check_reset_block = e1000_check_reset_block_ich8lan, - .commit = NULL, - .get_cfg_done = e1000_get_cfg_done_ich8lan, - .get_cable_length = e1000e_get_cable_length_igp_2, - .read_reg = e1000e_read_phy_reg_igp, - .release = e1000_release_swflag_ich8lan, - .reset = e1000_phy_hw_reset_ich8lan, - .set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan, - .set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan, - .write_reg = e1000e_write_phy_reg_igp, -}; - -static struct e1000_nvm_operations ich8_nvm_ops = { - .acquire = e1000_acquire_nvm_ich8lan, - .read = e1000_read_nvm_ich8lan, - .release = e1000_release_nvm_ich8lan, - .update = e1000_update_nvm_checksum_ich8lan, - .valid_led_default = e1000_valid_led_default_ich8lan, - .validate = e1000_validate_nvm_checksum_ich8lan, - .write = e1000_write_nvm_ich8lan, -}; - -struct e1000_info e1000_ich8_info = { - .mac = e1000_ich8lan, - .flags = FLAG_HAS_WOL - | FLAG_IS_ICH - | FLAG_RX_CSUM_ENABLED - | FLAG_HAS_CTRLEXT_ON_LOAD - | FLAG_HAS_AMT - | FLAG_HAS_FLASH - | FLAG_APME_IN_WUC, - .pba = 8, - .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, - .get_variants = e1000_get_variants_ich8lan, - .mac_ops = &ich8_mac_ops, - .phy_ops = &ich8_phy_ops, - .nvm_ops = &ich8_nvm_ops, -}; - -struct e1000_info e1000_ich9_info = { - .mac = e1000_ich9lan, - .flags = FLAG_HAS_JUMBO_FRAMES - | FLAG_IS_ICH - | FLAG_HAS_WOL - | FLAG_RX_CSUM_ENABLED - | FLAG_HAS_CTRLEXT_ON_LOAD - | FLAG_HAS_AMT - | FLAG_HAS_ERT - | FLAG_HAS_FLASH - | FLAG_APME_IN_WUC, - .pba = 10, - .max_hw_frame_size = DEFAULT_JUMBO, - .get_variants = e1000_get_variants_ich8lan, - .mac_ops = &ich8_mac_ops, - .phy_ops = &ich8_phy_ops, - .nvm_ops = &ich8_nvm_ops, -}; - -struct e1000_info e1000_ich10_info = { - .mac = e1000_ich10lan, - .flags = FLAG_HAS_JUMBO_FRAMES - | FLAG_IS_ICH - | FLAG_HAS_WOL - | FLAG_RX_CSUM_ENABLED - | FLAG_HAS_CTRLEXT_ON_LOAD - | FLAG_HAS_AMT - | FLAG_HAS_ERT - | FLAG_HAS_FLASH - | FLAG_APME_IN_WUC, - .pba = 10, - .max_hw_frame_size = DEFAULT_JUMBO, - .get_variants = e1000_get_variants_ich8lan, - .mac_ops = &ich8_mac_ops, - .phy_ops = &ich8_phy_ops, - .nvm_ops = &ich8_nvm_ops, -}; - -struct e1000_info e1000_pch_info = { - .mac = e1000_pchlan, - .flags = FLAG_IS_ICH - | FLAG_HAS_WOL - | FLAG_RX_CSUM_ENABLED - | FLAG_HAS_CTRLEXT_ON_LOAD - | FLAG_HAS_AMT - | FLAG_HAS_FLASH - | FLAG_HAS_JUMBO_FRAMES - | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ - | FLAG_APME_IN_WUC, - .flags2 = FLAG2_HAS_PHY_STATS, - .pba = 26, - .max_hw_frame_size = 4096, - .get_variants = e1000_get_variants_ich8lan, - .mac_ops = &ich8_mac_ops, - .phy_ops = &ich8_phy_ops, - .nvm_ops = &ich8_nvm_ops, -}; - -struct e1000_info e1000_pch2_info = { - .mac = e1000_pch2lan, - .flags = FLAG_IS_ICH - | FLAG_HAS_WOL - | FLAG_RX_CSUM_ENABLED - | FLAG_HAS_CTRLEXT_ON_LOAD - | FLAG_HAS_AMT - | FLAG_HAS_FLASH - | FLAG_HAS_JUMBO_FRAMES - | FLAG_APME_IN_WUC, - .flags2 = FLAG2_HAS_PHY_STATS - | FLAG2_HAS_EEE, - .pba = 26, - .max_hw_frame_size = DEFAULT_JUMBO, - .get_variants = e1000_get_variants_ich8lan, - .mac_ops = &ich8_mac_ops, - .phy_ops = &ich8_phy_ops, - .nvm_ops = &ich8_nvm_ops, -}; diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c deleted file mode 100644 index 7898a67..0000000 --- a/drivers/net/e1000e/lib.c +++ /dev/null @@ -1,2692 +0,0 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include "e1000.h" - -enum e1000_mng_mode { - e1000_mng_mode_none = 0, - e1000_mng_mode_asf, - e1000_mng_mode_pt, - e1000_mng_mode_ipmi, - e1000_mng_mode_host_if_only -}; - -#define E1000_FACTPS_MNGCG 0x20000000 - -/* Intel(R) Active Management Technology signature */ -#define E1000_IAMT_SIGNATURE 0x544D4149 - -/** - * e1000e_get_bus_info_pcie - Get PCIe bus information - * @hw: pointer to the HW structure - * - * Determines and stores the system bus information for a particular - * network interface. The following bus information is determined and stored: - * bus speed, bus width, type (PCIe), and PCIe function. - **/ -s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw) -{ - struct e1000_mac_info *mac = &hw->mac; - struct e1000_bus_info *bus = &hw->bus; - struct e1000_adapter *adapter = hw->adapter; - u16 pcie_link_status, cap_offset; - - cap_offset = adapter->pdev->pcie_cap; - if (!cap_offset) { - bus->width = e1000_bus_width_unknown; - } else { - pci_read_config_word(adapter->pdev, - cap_offset + PCIE_LINK_STATUS, - &pcie_link_status); - bus->width = (enum e1000_bus_width)((pcie_link_status & - PCIE_LINK_WIDTH_MASK) >> - PCIE_LINK_WIDTH_SHIFT); - } - - mac->ops.set_lan_id(hw); - - return 0; -} - -/** - * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices - * - * @hw: pointer to the HW structure - * - * Determines the LAN function id by reading memory-mapped registers - * and swaps the port value if requested. - **/ -void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw) -{ - struct e1000_bus_info *bus = &hw->bus; - u32 reg; - - /* - * The status register reports the correct function number - * for the device regardless of function swap state. - */ - reg = er32(STATUS); - bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; -} - -/** - * e1000_set_lan_id_single_port - Set LAN id for a single port device - * @hw: pointer to the HW structure - * - * Sets the LAN function id to zero for a single port device. - **/ -void e1000_set_lan_id_single_port(struct e1000_hw *hw) -{ - struct e1000_bus_info *bus = &hw->bus; - - bus->func = 0; -} - -/** - * e1000_clear_vfta_generic - Clear VLAN filter table - * @hw: pointer to the HW structure - * - * Clears the register array which contains the VLAN filter table by - * setting all the values to 0. - **/ -void e1000_clear_vfta_generic(struct e1000_hw *hw) -{ - u32 offset; - - for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { - E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); - e1e_flush(); - } -} - -/** - * e1000_write_vfta_generic - Write value to VLAN filter table - * @hw: pointer to the HW structure - * @offset: register offset in VLAN filter table - * @value: register value written to VLAN filter table - * - * Writes value at the given offset in the register array which stores - * the VLAN filter table. - **/ -void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value) -{ - E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); - e1e_flush(); -} - -/** - * e1000e_init_rx_addrs - Initialize receive address's - * @hw: pointer to the HW structure - * @rar_count: receive address registers - * - * Setup the receive address registers by setting the base receive address - * register to the devices MAC address and clearing all the other receive - * address registers to 0. - **/ -void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) -{ - u32 i; - u8 mac_addr[ETH_ALEN] = {0}; - - /* Setup the receive address */ - e_dbg("Programming MAC Address into RAR[0]\n"); - - e1000e_rar_set(hw, hw->mac.addr, 0); - - /* Zero out the other (rar_entry_count - 1) receive addresses */ - e_dbg("Clearing RAR[1-%u]\n", rar_count-1); - for (i = 1; i < rar_count; i++) - e1000e_rar_set(hw, mac_addr, i); -} - -/** - * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr - * @hw: pointer to the HW structure - * - * Checks the nvm for an alternate MAC address. An alternate MAC address - * can be setup by pre-boot software and must be treated like a permanent - * address and must override the actual permanent MAC address. If an - * alternate MAC address is found it is programmed into RAR0, replacing - * the permanent address that was installed into RAR0 by the Si on reset. - * This function will return SUCCESS unless it encounters an error while - * reading the EEPROM. - **/ -s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) -{ - u32 i; - s32 ret_val = 0; - u16 offset, nvm_alt_mac_addr_offset, nvm_data; - u8 alt_mac_addr[ETH_ALEN]; - - ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data); - if (ret_val) - goto out; - - /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */ - if (!((nvm_data & NVM_COMPAT_LOM) || - (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) || - (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD))) - goto out; - - ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, - &nvm_alt_mac_addr_offset); - if (ret_val) { - e_dbg("NVM Read Error\n"); - goto out; - } - - if (nvm_alt_mac_addr_offset == 0xFFFF) { - /* There is no Alternate MAC Address */ - goto out; - } - - if (hw->bus.func == E1000_FUNC_1) - nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; - for (i = 0; i < ETH_ALEN; i += 2) { - offset = nvm_alt_mac_addr_offset + (i >> 1); - ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data); - if (ret_val) { - e_dbg("NVM Read Error\n"); - goto out; - } - - alt_mac_addr[i] = (u8)(nvm_data & 0xFF); - alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); - } - - /* if multicast bit is set, the alternate address will not be used */ - if (is_multicast_ether_addr(alt_mac_addr)) { - e_dbg("Ignoring Alternate Mac Address with MC bit set\n"); - goto out; - } - - /* - * We have a valid alternate MAC address, and we want to treat it the - * same as the normal permanent MAC address stored by the HW into the - * RAR. Do this by mapping this address into RAR0. - */ - e1000e_rar_set(hw, alt_mac_addr, 0); - -out: - return ret_val; -} - -/** - * e1000e_rar_set - Set receive address register - * @hw: pointer to the HW structure - * @addr: pointer to the receive address - * @index: receive address array register - * - * Sets the receive address array register at index to the address passed - * in by addr. - **/ -void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) -{ - u32 rar_low, rar_high; - - /* - * HW expects these in little endian so we reverse the byte order - * from network order (big endian) to little endian - */ - rar_low = ((u32) addr[0] | - ((u32) addr[1] << 8) | - ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); - - rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); - - /* If MAC address zero, no need to set the AV bit */ - if (rar_low || rar_high) - rar_high |= E1000_RAH_AV; - - /* - * Some bridges will combine consecutive 32-bit writes into - * a single burst write, which will malfunction on some parts. - * The flushes avoid this. - */ - ew32(RAL(index), rar_low); - e1e_flush(); - ew32(RAH(index), rar_high); - e1e_flush(); -} - -/** - * e1000_hash_mc_addr - Generate a multicast hash value - * @hw: pointer to the HW structure - * @mc_addr: pointer to a multicast address - * - * Generates a multicast address hash value which is used to determine - * the multicast filter table array address and new table value. See - * e1000_mta_set_generic() - **/ -static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) -{ - u32 hash_value, hash_mask; - u8 bit_shift = 0; - - /* Register count multiplied by bits per register */ - hash_mask = (hw->mac.mta_reg_count * 32) - 1; - - /* - * For a mc_filter_type of 0, bit_shift is the number of left-shifts - * where 0xFF would still fall within the hash mask. - */ - while (hash_mask >> bit_shift != 0xFF) - bit_shift++; - - /* - * The portion of the address that is used for the hash table - * is determined by the mc_filter_type setting. - * The algorithm is such that there is a total of 8 bits of shifting. - * The bit_shift for a mc_filter_type of 0 represents the number of - * left-shifts where the MSB of mc_addr[5] would still fall within - * the hash_mask. Case 0 does this exactly. Since there are a total - * of 8 bits of shifting, then mc_addr[4] will shift right the - * remaining number of bits. Thus 8 - bit_shift. The rest of the - * cases are a variation of this algorithm...essentially raising the - * number of bits to shift mc_addr[5] left, while still keeping the - * 8-bit shifting total. - * - * For example, given the following Destination MAC Address and an - * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), - * we can see that the bit_shift for case 0 is 4. These are the hash - * values resulting from each mc_filter_type... - * [0] [1] [2] [3] [4] [5] - * 01 AA 00 12 34 56 - * LSB MSB - * - * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 - * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 - * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 - * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 - */ - switch (hw->mac.mc_filter_type) { - default: - case 0: - break; - case 1: - bit_shift += 1; - break; - case 2: - bit_shift += 2; - break; - case 3: - bit_shift += 4; - break; - } - - hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | - (((u16) mc_addr[5]) << bit_shift))); - - return hash_value; -} - -/** - * e1000e_update_mc_addr_list_generic - Update Multicast addresses - * @hw: pointer to the HW structure - * @mc_addr_list: array of multicast addresses to program - * @mc_addr_count: number of multicast addresses to program - * - * Updates entire Multicast Table Array. - * The caller must have a packed mc_addr_list of multicast addresses. - **/ -void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, - u8 *mc_addr_list, u32 mc_addr_count) -{ - u32 hash_value, hash_bit, hash_reg; - int i; - - /* clear mta_shadow */ - memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); - - /* update mta_shadow from mc_addr_list */ - for (i = 0; (u32) i < mc_addr_count; i++) { - hash_value = e1000_hash_mc_addr(hw, mc_addr_list); - - hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); - hash_bit = hash_value & 0x1F; - - hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); - mc_addr_list += (ETH_ALEN); - } - - /* replace the entire MTA table */ - for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) - E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]); - e1e_flush(); -} - -/** - * e1000e_clear_hw_cntrs_base - Clear base hardware counters - * @hw: pointer to the HW structure - * - * Clears the base hardware counters by reading the counter registers. - **/ -void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) -{ - er32(CRCERRS); - er32(SYMERRS); - er32(MPC); - er32(SCC); - er32(ECOL); - er32(MCC); - er32(LATECOL); - er32(COLC); - er32(DC); - er32(SEC); - er32(RLEC); - er32(XONRXC); - er32(XONTXC); - er32(XOFFRXC); - er32(XOFFTXC); - er32(FCRUC); - er32(GPRC); - er32(BPRC); - er32(MPRC); - er32(GPTC); - er32(GORCL); - er32(GORCH); - er32(GOTCL); - er32(GOTCH); - er32(RNBC); - er32(RUC); - er32(RFC); - er32(ROC); - er32(RJC); - er32(TORL); - er32(TORH); - er32(TOTL); - er32(TOTH); - er32(TPR); - er32(TPT); - er32(MPTC); - er32(BPTC); -} - -/** - * e1000e_check_for_copper_link - Check for link (Copper) - * @hw: pointer to the HW structure - * - * Checks to see of the link status of the hardware has changed. If a - * change in link status has been detected, then we read the PHY registers - * to get the current speed/duplex if link exists. - **/ -s32 e1000e_check_for_copper_link(struct e1000_hw *hw) -{ - struct e1000_mac_info *mac = &hw->mac; - s32 ret_val; - bool link; - - /* - * We only want to go out to the PHY registers to see if Auto-Neg - * has completed and/or if our link status has changed. The - * get_link_status flag is set upon receiving a Link Status - * Change or Rx Sequence Error interrupt. - */ - if (!mac->get_link_status) - return 0; - - /* - * First we want to see if the MII Status Register reports - * link. If so, then we want to get the current speed/duplex - * of the PHY. - */ - ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); - if (ret_val) - return ret_val; - - if (!link) - return ret_val; /* No link detected */ - - mac->get_link_status = false; - - /* - * Check if there was DownShift, must be checked - * immediately after link-up - */ - e1000e_check_downshift(hw); - - /* - * If we are forcing speed/duplex, then we simply return since - * we have already determined whether we have link or not. - */ - if (!mac->autoneg) { - ret_val = -E1000_ERR_CONFIG; - return ret_val; - } - - /* - * Auto-Neg is enabled. Auto Speed Detection takes care - * of MAC speed/duplex configuration. So we only need to - * configure Collision Distance in the MAC. - */ - e1000e_config_collision_dist(hw); - - /* - * Configure Flow Control now that Auto-Neg has completed. - * First, we need to restore the desired flow control - * settings because we may have had to re-autoneg with a - * different link partner. - */ - ret_val = e1000e_config_fc_after_link_up(hw); - if (ret_val) - e_dbg("Error configuring flow control\n"); - - return ret_val; -} - -/** - * e1000e_check_for_fiber_link - Check for link (Fiber) - * @hw: pointer to the HW structure - * - * Checks for link up on the hardware. If link is not up and we have - * a signal, then we need to force link up. - **/ -s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) -{ - struct e1000_mac_info *mac = &hw->mac; - u32 rxcw; - u32 ctrl; - u32 status; - s32 ret_val; - - ctrl = er32(CTRL); - status = er32(STATUS); - rxcw = er32(RXCW); - - /* - * If we don't have link (auto-negotiation failed or link partner - * cannot auto-negotiate), the cable is plugged in (we have signal), - * and our link partner is not trying to auto-negotiate with us (we - * are receiving idles or data), we need to force link up. We also - * need to give auto-negotiation time to complete, in case the cable - * was just plugged in. The autoneg_failed flag does this. - */ - /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ - if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) && - (!(rxcw & E1000_RXCW_C))) { - if (mac->autoneg_failed == 0) { - mac->autoneg_failed = 1; - return 0; - } - e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); - - /* Disable auto-negotiation in the TXCW register */ - ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); - - /* Force link-up and also force full-duplex. */ - ctrl = er32(CTRL); - ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); - ew32(CTRL, ctrl); - - /* Configure Flow Control after forcing link up. */ - ret_val = e1000e_config_fc_after_link_up(hw); - if (ret_val) { - e_dbg("Error configuring flow control\n"); - return ret_val; - } - } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { - /* - * If we are forcing link and we are receiving /C/ ordered - * sets, re-enable auto-negotiation in the TXCW register - * and disable forced link in the Device Control register - * in an attempt to auto-negotiate with our link partner. - */ - e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); - ew32(TXCW, mac->txcw); - ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); - - mac->serdes_has_link = true; - } - - return 0; -} - -/** - * e1000e_check_for_serdes_link - Check for link (Serdes) - * @hw: pointer to the HW structure - * - * Checks for link up on the hardware. If link is not up and we have - * a signal, then we need to force link up. - **/ -s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) -{ - struct e1000_mac_info *mac = &hw->mac; - u32 rxcw; - u32 ctrl; - u32 status; - s32 ret_val; - - ctrl = er32(CTRL); - status = er32(STATUS); - rxcw = er32(RXCW); - - /* - * If we don't have link (auto-negotiation failed or link partner - * cannot auto-negotiate), and our link partner is not trying to - * auto-negotiate with us (we are receiving idles or data), - * we need to force link up. We also need to give auto-negotiation - * time to complete. - */ - /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ - if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) { - if (mac->autoneg_failed == 0) { - mac->autoneg_failed = 1; - return 0; - } - e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); - - /* Disable auto-negotiation in the TXCW register */ - ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); - - /* Force link-up and also force full-duplex. */ - ctrl = er32(CTRL); - ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); - ew32(CTRL, ctrl); - - /* Configure Flow Control after forcing link up. */ - ret_val = e1000e_config_fc_after_link_up(hw); - if (ret_val) { - e_dbg("Error configuring flow control\n"); - return ret_val; - } - } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { - /* - * If we are forcing link and we are receiving /C/ ordered - * sets, re-enable auto-negotiation in the TXCW register - * and disable forced link in the Device Control register - * in an attempt to auto-negotiate with our link partner. - */ - e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); - ew32(TXCW, mac->txcw); - ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); - - mac->serdes_has_link = true; - } else if (!(E1000_TXCW_ANE & er32(TXCW))) { - /* - * If we force link for non-auto-negotiation switch, check - * link status based on MAC synchronization for internal - * serdes media type. - */ - /* SYNCH bit and IV bit are sticky. */ - udelay(10); - rxcw = er32(RXCW); - if (rxcw & E1000_RXCW_SYNCH) { - if (!(rxcw & E1000_RXCW_IV)) { - mac->serdes_has_link = true; - e_dbg("SERDES: Link up - forced.\n"); - } - } else { - mac->serdes_has_link = false; - e_dbg("SERDES: Link down - force failed.\n"); - } - } - - if (E1000_TXCW_ANE & er32(TXCW)) { - status = er32(STATUS); - if (status & E1000_STATUS_LU) { - /* SYNCH bit and IV bit are sticky, so reread rxcw. */ - udelay(10); - rxcw = er32(RXCW); - if (rxcw & E1000_RXCW_SYNCH) { - if (!(rxcw & E1000_RXCW_IV)) { - mac->serdes_has_link = true; - e_dbg("SERDES: Link up - autoneg " - "completed successfully.\n"); - } else { - mac->serdes_has_link = false; - e_dbg("SERDES: Link down - invalid" - "codewords detected in autoneg.\n"); - } - } else { - mac->serdes_has_link = false; - e_dbg("SERDES: Link down - no sync.\n"); - } - } else { - mac->serdes_has_link = false; - e_dbg("SERDES: Link down - autoneg failed\n"); - } - } - - return 0; -} - -/** - * e1000_set_default_fc_generic - Set flow control default values - * @hw: pointer to the HW structure - * - * Read the EEPROM for the default values for flow control and store the - * values. - **/ -static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) -{ - s32 ret_val; - u16 nvm_data; - - /* - * Read and store word 0x0F of the EEPROM. This word contains bits - * that determine the hardware's default PAUSE (flow control) mode, - * a bit that determines whether the HW defaults to enabling or - * disabling auto-negotiation, and the direction of the - * SW defined pins. If there is no SW over-ride of the flow - * control setting, then the variable hw->fc will - * be initialized based on a value in the EEPROM. - */ - ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); - - if (ret_val) { - e_dbg("NVM Read Error\n"); - return ret_val; - } - - if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) - hw->fc.requested_mode = e1000_fc_none; - else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == - NVM_WORD0F_ASM_DIR) - hw->fc.requested_mode = e1000_fc_tx_pause; - else - hw->fc.requested_mode = e1000_fc_full; - - return 0; -} - -/** - * e1000e_setup_link - Setup flow control and link settings - * @hw: pointer to the HW structure - * - * Determines which flow control settings to use, then configures flow - * control. Calls the appropriate media-specific link configuration - * function. Assuming the adapter has a valid link partner, a valid link - * should be established. Assumes the hardware has previously been reset - * and the transmitter and receiver are not enabled. - **/ -s32 e1000e_setup_link(struct e1000_hw *hw) -{ - struct e1000_mac_info *mac = &hw->mac; - s32 ret_val; - - /* - * In the case of the phy reset being blocked, we already have a link. - * We do not need to set it up again. - */ - if (e1000_check_reset_block(hw)) - return 0; - - /* - * If requested flow control is set to default, set flow control - * based on the EEPROM flow control settings. - */ - if (hw->fc.requested_mode == e1000_fc_default) { - ret_val = e1000_set_default_fc_generic(hw); - if (ret_val) - return ret_val; - } - - /* - * Save off the requested flow control mode for use later. Depending - * on the link partner's capabilities, we may or may not use this mode. - */ - hw->fc.current_mode = hw->fc.requested_mode; - - e_dbg("After fix-ups FlowControl is now = %x\n", - hw->fc.current_mode); - - /* Call the necessary media_type subroutine to configure the link. */ - ret_val = mac->ops.setup_physical_interface(hw); - if (ret_val) - return ret_val; - - /* - * Initialize the flow control address, type, and PAUSE timer - * registers to their default values. This is done even if flow - * control is disabled, because it does not hurt anything to - * initialize these registers. - */ - e_dbg("Initializing the Flow Control address, type and timer regs\n"); - ew32(FCT, FLOW_CONTROL_TYPE); - ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); - ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); - - ew32(FCTTV, hw->fc.pause_time); - - return e1000e_set_fc_watermarks(hw); -} - -/** - * e1000_commit_fc_settings_generic - Configure flow control - * @hw: pointer to the HW structure - * - * Write the flow control settings to the Transmit Config Word Register (TXCW) - * base on the flow control settings in e1000_mac_info. - **/ -static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) -{ - struct e1000_mac_info *mac = &hw->mac; - u32 txcw; - - /* - * Check for a software override of the flow control settings, and - * setup the device accordingly. If auto-negotiation is enabled, then - * software will have to set the "PAUSE" bits to the correct value in - * the Transmit Config Word Register (TXCW) and re-start auto- - * negotiation. However, if auto-negotiation is disabled, then - * software will have to manually configure the two flow control enable - * bits in the CTRL register. - * - * The possible values of the "fc" parameter are: - * 0: Flow control is completely disabled - * 1: Rx flow control is enabled (we can receive pause frames, - * but not send pause frames). - * 2: Tx flow control is enabled (we can send pause frames but we - * do not support receiving pause frames). - * 3: Both Rx and Tx flow control (symmetric) are enabled. - */ - switch (hw->fc.current_mode) { - case e1000_fc_none: - /* Flow control completely disabled by a software over-ride. */ - txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); - break; - case e1000_fc_rx_pause: - /* - * Rx Flow control is enabled and Tx Flow control is disabled - * by a software over-ride. Since there really isn't a way to - * advertise that we are capable of Rx Pause ONLY, we will - * advertise that we support both symmetric and asymmetric Rx - * PAUSE. Later, we will disable the adapter's ability to send - * PAUSE frames. - */ - txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); - break; - case e1000_fc_tx_pause: - /* - * Tx Flow control is enabled, and Rx Flow control is disabled, - * by a software over-ride. - */ - txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); - break; - case e1000_fc_full: - /* - * Flow control (both Rx and Tx) is enabled by a software - * over-ride. - */ - txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); - break; - default: - e_dbg("Flow control param set incorrectly\n"); - return -E1000_ERR_CONFIG; - break; - } - - ew32(TXCW, txcw); - mac->txcw = txcw; - - return 0; -} - -/** - * e1000_poll_fiber_serdes_link_generic - Poll for link up - * @hw: pointer to the HW structure - * - * Polls for link up by reading the status register, if link fails to come - * up with auto-negotiation, then the link is forced if a signal is detected. - **/ -static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) -{ - struct e1000_mac_info *mac = &hw->mac; - u32 i, status; - s32 ret_val; - - /* - * If we have a signal (the cable is plugged in, or assumed true for - * serdes media) then poll for a "Link-Up" indication in the Device - * Status Register. Time-out if a link isn't seen in 500 milliseconds - * seconds (Auto-negotiation should complete in less than 500 - * milliseconds even if the other end is doing it in SW). - */ - for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { - usleep_range(10000, 20000); - status = er32(STATUS); - if (status & E1000_STATUS_LU) - break; - } - if (i == FIBER_LINK_UP_LIMIT) { - e_dbg("Never got a valid link from auto-neg!!!\n"); - mac->autoneg_failed = 1; - /* - * AutoNeg failed to achieve a link, so we'll call - * mac->check_for_link. This routine will force the - * link up if we detect a signal. This will allow us to - * communicate with non-autonegotiating link partners. - */ - ret_val = mac->ops.check_for_link(hw); - if (ret_val) { - e_dbg("Error while checking for link\n"); - return ret_val; - } - mac->autoneg_failed = 0; - } else { - mac->autoneg_failed = 0; - e_dbg("Valid Link Found\n"); - } - - return 0; -} - -/** - * e1000e_setup_fiber_serdes_link - Setup link for fiber/serdes - * @hw: pointer to the HW structure - * - * Configures collision distance and flow control for fiber and serdes - * links. Upon successful setup, poll for link. - **/ -s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) -{ - u32 ctrl; - s32 ret_val; - - ctrl = er32(CTRL); - - /* Take the link out of reset */ - ctrl &= ~E1000_CTRL_LRST; - - e1000e_config_collision_dist(hw); - - ret_val = e1000_commit_fc_settings_generic(hw); - if (ret_val) - return ret_val; - - /* - * Since auto-negotiation is enabled, take the link out of reset (the - * link will be in reset, because we previously reset the chip). This - * will restart auto-negotiation. If auto-negotiation is successful - * then the link-up status bit will be set and the flow control enable - * bits (RFCE and TFCE) will be set according to their negotiated value. - */ - e_dbg("Auto-negotiation enabled\n"); - - ew32(CTRL, ctrl); - e1e_flush(); - usleep_range(1000, 2000); - - /* - * For these adapters, the SW definable pin 1 is set when the optics - * detect a signal. If we have a signal, then poll for a "Link-Up" - * indication. - */ - if (hw->phy.media_type == e1000_media_type_internal_serdes || - (er32(CTRL) & E1000_CTRL_SWDPIN1)) { - ret_val = e1000_poll_fiber_serdes_link_generic(hw); - } else { - e_dbg("No signal detected\n"); - } - - return 0; -} - -/** - * e1000e_config_collision_dist - Configure collision distance - * @hw: pointer to the HW structure - * - * Configures the collision distance to the default value and is used - * during link setup. Currently no func pointer exists and all - * implementations are handled in the generic version of this function. - **/ -void e1000e_config_collision_dist(struct e1000_hw *hw) -{ - u32 tctl; - - tctl = er32(TCTL); - - tctl &= ~E1000_TCTL_COLD; - tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; - - ew32(TCTL, tctl); - e1e_flush(); -} - -/** - * e1000e_set_fc_watermarks - Set flow control high/low watermarks - * @hw: pointer to the HW structure - * - * Sets the flow control high/low threshold (watermark) registers. If - * flow control XON frame transmission is enabled, then set XON frame - * transmission as well. - **/ -s32 e1000e_set_fc_watermarks(struct e1000_hw *hw) -{ - u32 fcrtl = 0, fcrth = 0; - - /* - * Set the flow control receive threshold registers. Normally, - * these registers will be set to a default threshold that may be - * adjusted later by the driver's runtime code. However, if the - * ability to transmit pause frames is not enabled, then these - * registers will be set to 0. - */ - if (hw->fc.current_mode & e1000_fc_tx_pause) { - /* - * We need to set up the Receive Threshold high and low water - * marks as well as (optionally) enabling the transmission of - * XON frames. - */ - fcrtl = hw->fc.low_water; - fcrtl |= E1000_FCRTL_XONE; - fcrth = hw->fc.high_water; - } - ew32(FCRTL, fcrtl); - ew32(FCRTH, fcrth); - - return 0; -} - -/** - * e1000e_force_mac_fc - Force the MAC's flow control settings - * @hw: pointer to the HW structure - * - * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the - * device control register to reflect the adapter settings. TFCE and RFCE - * need to be explicitly set by software when a copper PHY is used because - * autonegotiation is managed by the PHY rather than the MAC. Software must - * also configure these bits when link is forced on a fiber connection. - **/ -s32 e1000e_force_mac_fc(struct e1000_hw *hw) -{ - u32 ctrl; - - ctrl = er32(CTRL); - - /* - * Because we didn't get link via the internal auto-negotiation - * mechanism (we either forced link or we got link via PHY - * auto-neg), we have to manually enable/disable transmit an - * receive flow control. - * - * The "Case" statement below enables/disable flow control - * according to the "hw->fc.current_mode" parameter. - * - * The possible values of the "fc" parameter are: - * 0: Flow control is completely disabled - * 1: Rx flow control is enabled (we can receive pause - * frames but not send pause frames). - * 2: Tx flow control is enabled (we can send pause frames - * frames but we do not receive pause frames). - * 3: Both Rx and Tx flow control (symmetric) is enabled. - * other: No other values should be possible at this point. - */ - e_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); - - switch (hw->fc.current_mode) { - case e1000_fc_none: - ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); - break; - case e1000_fc_rx_pause: - ctrl &= (~E1000_CTRL_TFCE); - ctrl |= E1000_CTRL_RFCE; - break; - case e1000_fc_tx_pause: - ctrl &= (~E1000_CTRL_RFCE); - ctrl |= E1000_CTRL_TFCE; - break; - case e1000_fc_full: - ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); - break; - default: - e_dbg("Flow control param set incorrectly\n"); - return -E1000_ERR_CONFIG; - } - - ew32(CTRL, ctrl); - - return 0; -} - -/** - * e1000e_config_fc_after_link_up - Configures flow control after link - * @hw: pointer to the HW structure - * - * Checks the status of auto-negotiation after link up to ensure that the - * speed and duplex were not forced. If the link needed to be forced, then - * flow control needs to be forced also. If auto-negotiation is enabled - * and did not fail, then we configure flow control based on our link - * partner. - **/ -s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) -{ - struct e1000_mac_info *mac = &hw->mac; - s32 ret_val = 0; - u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; - u16 speed, duplex; - - /* - * Check for the case where we have fiber media and auto-neg failed - * so we had to force link. In this case, we need to force the - * configuration of the MAC to match the "fc" parameter. - */ - if (mac->autoneg_failed) { - if (hw->phy.media_type == e1000_media_type_fiber || - hw->phy.media_type == e1000_media_type_internal_serdes) - ret_val = e1000e_force_mac_fc(hw); - } else { - if (hw->phy.media_type == e1000_media_type_copper) - ret_val = e1000e_force_mac_fc(hw); - } - - if (ret_val) { - e_dbg("Error forcing flow control settings\n"); - return ret_val; - } - - /* - * Check for the case where we have copper media and auto-neg is - * enabled. In this case, we need to check and see if Auto-Neg - * has completed, and if so, how the PHY and link partner has - * flow control configured. - */ - if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { - /* - * Read the MII Status Register and check to see if AutoNeg - * has completed. We read this twice because this reg has - * some "sticky" (latched) bits. - */ - ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg); - if (ret_val) - return ret_val; - ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg); - if (ret_val) - return ret_val; - - if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { - e_dbg("Copper PHY and Auto Neg " - "has not completed.\n"); - return ret_val; - } - - /* - * The AutoNeg process has completed, so we now need to - * read both the Auto Negotiation Advertisement - * Register (Address 4) and the Auto_Negotiation Base - * Page Ability Register (Address 5) to determine how - * flow control was negotiated. - */ - ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg); - if (ret_val) - return ret_val; - ret_val = - e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg); - if (ret_val) - return ret_val; - - /* - * Two bits in the Auto Negotiation Advertisement Register - * (Address 4) and two bits in the Auto Negotiation Base - * Page Ability Register (Address 5) determine flow control - * for both the PHY and the link partner. The following - * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, - * 1999, describes these PAUSE resolution bits and how flow - * control is determined based upon these settings. - * NOTE: DC = Don't Care - * - * LOCAL DEVICE | LINK PARTNER - * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution - *-------|---------|-------|---------|-------------------- - * 0 | 0 | DC | DC | e1000_fc_none - * 0 | 1 | 0 | DC | e1000_fc_none - * 0 | 1 | 1 | 0 | e1000_fc_none - * 0 | 1 | 1 | 1 | e1000_fc_tx_pause - * 1 | 0 | 0 | DC | e1000_fc_none - * 1 | DC | 1 | DC | e1000_fc_full - * 1 | 1 | 0 | 0 | e1000_fc_none - * 1 | 1 | 0 | 1 | e1000_fc_rx_pause - * - * Are both PAUSE bits set to 1? If so, this implies - * Symmetric Flow Control is enabled at both ends. The - * ASM_DIR bits are irrelevant per the spec. - * - * For Symmetric Flow Control: - * - * LOCAL DEVICE | LINK PARTNER - * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result - *-------|---------|-------|---------|-------------------- - * 1 | DC | 1 | DC | E1000_fc_full - * - */ - if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && - (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { - /* - * Now we need to check if the user selected Rx ONLY - * of pause frames. In this case, we had to advertise - * FULL flow control because we could not advertise Rx - * ONLY. Hence, we must now check to see if we need to - * turn OFF the TRANSMISSION of PAUSE frames. - */ - if (hw->fc.requested_mode == e1000_fc_full) { - hw->fc.current_mode = e1000_fc_full; - e_dbg("Flow Control = FULL.\r\n"); - } else { - hw->fc.current_mode = e1000_fc_rx_pause; - e_dbg("Flow Control = " - "Rx PAUSE frames only.\r\n"); - } - } - /* - * For receiving PAUSE frames ONLY. - * - * LOCAL DEVICE | LINK PARTNER - * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result - *-------|---------|-------|---------|-------------------- - * 0 | 1 | 1 | 1 | e1000_fc_tx_pause - */ - else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && - (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && - (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && - (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { - hw->fc.current_mode = e1000_fc_tx_pause; - e_dbg("Flow Control = Tx PAUSE frames only.\r\n"); - } - /* - * For transmitting PAUSE frames ONLY. - * - * LOCAL DEVICE | LINK PARTNER - * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result - *-------|---------|-------|---------|-------------------- - * 1 | 1 | 0 | 1 | e1000_fc_rx_pause - */ - else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && - (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && - !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && - (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { - hw->fc.current_mode = e1000_fc_rx_pause; - e_dbg("Flow Control = Rx PAUSE frames only.\r\n"); - } else { - /* - * Per the IEEE spec, at this point flow control - * should be disabled. - */ - hw->fc.current_mode = e1000_fc_none; - e_dbg("Flow Control = NONE.\r\n"); - } - - /* - * Now we need to do one last check... If we auto- - * negotiated to HALF DUPLEX, flow control should not be - * enabled per IEEE 802.3 spec. - */ - ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); - if (ret_val) { - e_dbg("Error getting link speed and duplex\n"); - return ret_val; - } - - if (duplex == HALF_DUPLEX) - hw->fc.current_mode = e1000_fc_none; - - /* - * Now we call a subroutine to actually force the MAC - * controller to use the correct flow control settings. - */ - ret_val = e1000e_force_mac_fc(hw); - if (ret_val) { - e_dbg("Error forcing flow control settings\n"); - return ret_val; - } - } - - return 0; -} - -/** - * e1000e_get_speed_and_duplex_copper - Retrieve current speed/duplex - * @hw: pointer to the HW structure - * @speed: stores the current speed - * @duplex: stores the current duplex - * - * Read the status register for the current speed/duplex and store the current - * speed and duplex for copper connections. - **/ -s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex) -{ - u32 status; - - status = er32(STATUS); - if (status & E1000_STATUS_SPEED_1000) - *speed = SPEED_1000; - else if (status & E1000_STATUS_SPEED_100) - *speed = SPEED_100; - else - *speed = SPEED_10; - - if (status & E1000_STATUS_FD) - *duplex = FULL_DUPLEX; - else - *duplex = HALF_DUPLEX; - - e_dbg("%u Mbps, %s Duplex\n", - *speed == SPEED_1000 ? 1000 : *speed == SPEED_100 ? 100 : 10, - *duplex == FULL_DUPLEX ? "Full" : "Half"); - - return 0; -} - -/** - * e1000e_get_speed_and_duplex_fiber_serdes - Retrieve current speed/duplex - * @hw: pointer to the HW structure - * @speed: stores the current speed - * @duplex: stores the current duplex - * - * Sets the speed and duplex to gigabit full duplex (the only possible option) - * for fiber/serdes links. - **/ -s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex) -{ - *speed = SPEED_1000; - *duplex = FULL_DUPLEX; - - return 0; -} - -/** - * e1000e_get_hw_semaphore - Acquire hardware semaphore - * @hw: pointer to the HW structure - * - * Acquire the HW semaphore to access the PHY or NVM - **/ -s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) -{ - u32 swsm; - s32 timeout = hw->nvm.word_size + 1; - s32 i = 0; - - /* Get the SW semaphore */ - while (i < timeout) { - swsm = er32(SWSM); - if (!(swsm & E1000_SWSM_SMBI)) - break; - - udelay(50); - i++; - } - - if (i == timeout) { - e_dbg("Driver can't access device - SMBI bit is set.\n"); - return -E1000_ERR_NVM; - } - - /* Get the FW semaphore. */ - for (i = 0; i < timeout; i++) { - swsm = er32(SWSM); - ew32(SWSM, swsm | E1000_SWSM_SWESMBI); - - /* Semaphore acquired if bit latched */ - if (er32(SWSM) & E1000_SWSM_SWESMBI) - break; - - udelay(50); - } - - if (i == timeout) { - /* Release semaphores */ - e1000e_put_hw_semaphore(hw); - e_dbg("Driver can't access the NVM\n"); - return -E1000_ERR_NVM; - } - - return 0; -} - -/** - * e1000e_put_hw_semaphore - Release hardware semaphore - * @hw: pointer to the HW structure - * - * Release hardware semaphore used to access the PHY or NVM - **/ -void e1000e_put_hw_semaphore(struct e1000_hw *hw) -{ - u32 swsm; - - swsm = er32(SWSM); - swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); - ew32(SWSM, swsm); -} - -/** - * e1000e_get_auto_rd_done - Check for auto read completion - * @hw: pointer to the HW structure - * - * Check EEPROM for Auto Read done bit. - **/ -s32 e1000e_get_auto_rd_done(struct e1000_hw *hw) -{ - s32 i = 0; - - while (i < AUTO_READ_DONE_TIMEOUT) { - if (er32(EECD) & E1000_EECD_AUTO_RD) - break; - usleep_range(1000, 2000); - i++; - } - - if (i == AUTO_READ_DONE_TIMEOUT) { - e_dbg("Auto read by HW from NVM has not completed.\n"); - return -E1000_ERR_RESET; - } - - return 0; -} - -/** - * e1000e_valid_led_default - Verify a valid default LED config - * @hw: pointer to the HW structure - * @data: pointer to the NVM (EEPROM) - * - * Read the EEPROM for the current default LED configuration. If the - * LED configuration is not valid, set to a valid LED configuration. - **/ -s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data) -{ - s32 ret_val; - - ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); - if (ret_val) { - e_dbg("NVM Read Error\n"); - return ret_val; - } - - if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) - *data = ID_LED_DEFAULT; - - return 0; -} - -/** - * e1000e_id_led_init - - * @hw: pointer to the HW structure - * - **/ -s32 e1000e_id_led_init(struct e1000_hw *hw) -{ - struct e1000_mac_info *mac = &hw->mac; - s32 ret_val; - const u32 ledctl_mask = 0x000000FF; - const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; - const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; - u16 data, i, temp; - const u16 led_mask = 0x0F; - - ret_val = hw->nvm.ops.valid_led_default(hw, &data); - if (ret_val) - return ret_val; - - mac->ledctl_default = er32(LEDCTL); - mac->ledctl_mode1 = mac->ledctl_default; - mac->ledctl_mode2 = mac->ledctl_default; - - for (i = 0; i < 4; i++) { - temp = (data >> (i << 2)) & led_mask; - switch (temp) { - case ID_LED_ON1_DEF2: - case ID_LED_ON1_ON2: - case ID_LED_ON1_OFF2: - mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); - mac->ledctl_mode1 |= ledctl_on << (i << 3); - break; - case ID_LED_OFF1_DEF2: - case ID_LED_OFF1_ON2: - case ID_LED_OFF1_OFF2: - mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); - mac->ledctl_mode1 |= ledctl_off << (i << 3); - break; - default: - /* Do nothing */ - break; - } - switch (temp) { - case ID_LED_DEF1_ON2: - case ID_LED_ON1_ON2: - case ID_LED_OFF1_ON2: - mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); - mac->ledctl_mode2 |= ledctl_on << (i << 3); - break; - case ID_LED_DEF1_OFF2: - case ID_LED_ON1_OFF2: - case ID_LED_OFF1_OFF2: - mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); - mac->ledctl_mode2 |= ledctl_off << (i << 3); - break; - default: - /* Do nothing */ - break; - } - } - - return 0; -} - -/** - * e1000e_setup_led_generic - Configures SW controllable LED - * @hw: pointer to the HW structure - * - * This prepares the SW controllable LED for use and saves the current state - * of the LED so it can be later restored. - **/ -s32 e1000e_setup_led_generic(struct e1000_hw *hw) -{ - u32 ledctl; - - if (hw->mac.ops.setup_led != e1000e_setup_led_generic) - return -E1000_ERR_CONFIG; - - if (hw->phy.media_type == e1000_media_type_fiber) { - ledctl = er32(LEDCTL); - hw->mac.ledctl_default = ledctl; - /* Turn off LED0 */ - ledctl &= ~(E1000_LEDCTL_LED0_IVRT | - E1000_LEDCTL_LED0_BLINK | - E1000_LEDCTL_LED0_MODE_MASK); - ledctl |= (E1000_LEDCTL_MODE_LED_OFF << - E1000_LEDCTL_LED0_MODE_SHIFT); - ew32(LEDCTL, ledctl); - } else if (hw->phy.media_type == e1000_media_type_copper) { - ew32(LEDCTL, hw->mac.ledctl_mode1); - } - - return 0; -} - -/** - * e1000e_cleanup_led_generic - Set LED config to default operation - * @hw: pointer to the HW structure - * - * Remove the current LED configuration and set the LED configuration - * to the default value, saved from the EEPROM. - **/ -s32 e1000e_cleanup_led_generic(struct e1000_hw *hw) -{ - ew32(LEDCTL, hw->mac.ledctl_default); - return 0; -} - -/** - * e1000e_blink_led_generic - Blink LED - * @hw: pointer to the HW structure - * - * Blink the LEDs which are set to be on. - **/ -s32 e1000e_blink_led_generic(struct e1000_hw *hw) -{ - u32 ledctl_blink = 0; - u32 i; - - if (hw->phy.media_type == e1000_media_type_fiber) { - /* always blink LED0 for PCI-E fiber */ - ledctl_blink = E1000_LEDCTL_LED0_BLINK | - (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); - } else { - /* - * set the blink bit for each LED that's "on" (0x0E) - * in ledctl_mode2 - */ - ledctl_blink = hw->mac.ledctl_mode2; - for (i = 0; i < 4; i++) - if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == - E1000_LEDCTL_MODE_LED_ON) - ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << - (i * 8)); - } - - ew32(LEDCTL, ledctl_blink); - - return 0; -} - -/** - * e1000e_led_on_generic - Turn LED on - * @hw: pointer to the HW structure - * - * Turn LED on. - **/ -s32 e1000e_led_on_generic(struct e1000_hw *hw) -{ - u32 ctrl; - - switch (hw->phy.media_type) { - case e1000_media_type_fiber: - ctrl = er32(CTRL); - ctrl &= ~E1000_CTRL_SWDPIN0; - ctrl |= E1000_CTRL_SWDPIO0; - ew32(CTRL, ctrl); - break; - case e1000_media_type_copper: - ew32(LEDCTL, hw->mac.ledctl_mode2); - break; - default: - break; - } - - return 0; -} - -/** - * e1000e_led_off_generic - Turn LED off - * @hw: pointer to the HW structure - * - * Turn LED off. - **/ -s32 e1000e_led_off_generic(struct e1000_hw *hw) -{ - u32 ctrl; - - switch (hw->phy.media_type) { - case e1000_media_type_fiber: - ctrl = er32(CTRL); - ctrl |= E1000_CTRL_SWDPIN0; - ctrl |= E1000_CTRL_SWDPIO0; - ew32(CTRL, ctrl); - break; - case e1000_media_type_copper: - ew32(LEDCTL, hw->mac.ledctl_mode1); - break; - default: - break; - } - - return 0; -} - -/** - * e1000e_set_pcie_no_snoop - Set PCI-express capabilities - * @hw: pointer to the HW structure - * @no_snoop: bitmap of snoop events - * - * Set the PCI-express register to snoop for events enabled in 'no_snoop'. - **/ -void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop) -{ - u32 gcr; - - if (no_snoop) { - gcr = er32(GCR); - gcr &= ~(PCIE_NO_SNOOP_ALL); - gcr |= no_snoop; - ew32(GCR, gcr); - } -} - -/** - * e1000e_disable_pcie_master - Disables PCI-express master access - * @hw: pointer to the HW structure - * - * Returns 0 if successful, else returns -10 - * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused - * the master requests to be disabled. - * - * Disables PCI-Express master access and verifies there are no pending - * requests. - **/ -s32 e1000e_disable_pcie_master(struct e1000_hw *hw) -{ - u32 ctrl; - s32 timeout = MASTER_DISABLE_TIMEOUT; - - ctrl = er32(CTRL); - ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; - ew32(CTRL, ctrl); - - while (timeout) { - if (!(er32(STATUS) & - E1000_STATUS_GIO_MASTER_ENABLE)) - break; - udelay(100); - timeout--; - } - - if (!timeout) { - e_dbg("Master requests are pending.\n"); - return -E1000_ERR_MASTER_REQUESTS_PENDING; - } - - return 0; -} - -/** - * e1000e_reset_adaptive - Reset Adaptive Interframe Spacing - * @hw: pointer to the HW structure - * - * Reset the Adaptive Interframe Spacing throttle to default values. - **/ -void e1000e_reset_adaptive(struct e1000_hw *hw) -{ - struct e1000_mac_info *mac = &hw->mac; - - if (!mac->adaptive_ifs) { - e_dbg("Not in Adaptive IFS mode!\n"); - goto out; - } - - mac->current_ifs_val = 0; - mac->ifs_min_val = IFS_MIN; - mac->ifs_max_val = IFS_MAX; - mac->ifs_step_size = IFS_STEP; - mac->ifs_ratio = IFS_RATIO; - - mac->in_ifs_mode = false; - ew32(AIT, 0); -out: - return; -} - -/** - * e1000e_update_adaptive - Update Adaptive Interframe Spacing - * @hw: pointer to the HW structure - * - * Update the Adaptive Interframe Spacing Throttle value based on the - * time between transmitted packets and time between collisions. - **/ -void e1000e_update_adaptive(struct e1000_hw *hw) -{ - struct e1000_mac_info *mac = &hw->mac; - - if (!mac->adaptive_ifs) { - e_dbg("Not in Adaptive IFS mode!\n"); - goto out; - } - - if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { - if (mac->tx_packet_delta > MIN_NUM_XMITS) { - mac->in_ifs_mode = true; - if (mac->current_ifs_val < mac->ifs_max_val) { - if (!mac->current_ifs_val) - mac->current_ifs_val = mac->ifs_min_val; - else - mac->current_ifs_val += - mac->ifs_step_size; - ew32(AIT, mac->current_ifs_val); - } - } - } else { - if (mac->in_ifs_mode && - (mac->tx_packet_delta <= MIN_NUM_XMITS)) { - mac->current_ifs_val = 0; - mac->in_ifs_mode = false; - ew32(AIT, 0); - } - } -out: - return; -} - -/** - * e1000_raise_eec_clk - Raise EEPROM clock - * @hw: pointer to the HW structure - * @eecd: pointer to the EEPROM - * - * Enable/Raise the EEPROM clock bit. - **/ -static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) -{ - *eecd = *eecd | E1000_EECD_SK; - ew32(EECD, *eecd); - e1e_flush(); - udelay(hw->nvm.delay_usec); -} - -/** - * e1000_lower_eec_clk - Lower EEPROM clock - * @hw: pointer to the HW structure - * @eecd: pointer to the EEPROM - * - * Clear/Lower the EEPROM clock bit. - **/ -static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) -{ - *eecd = *eecd & ~E1000_EECD_SK; - ew32(EECD, *eecd); - e1e_flush(); - udelay(hw->nvm.delay_usec); -} - -/** - * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM - * @hw: pointer to the HW structure - * @data: data to send to the EEPROM - * @count: number of bits to shift out - * - * We need to shift 'count' bits out to the EEPROM. So, the value in the - * "data" parameter will be shifted out to the EEPROM one bit at a time. - * In order to do this, "data" must be broken down into bits. - **/ -static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) -{ - struct e1000_nvm_info *nvm = &hw->nvm; - u32 eecd = er32(EECD); - u32 mask; - - mask = 0x01 << (count - 1); - if (nvm->type == e1000_nvm_eeprom_spi) - eecd |= E1000_EECD_DO; - - do { - eecd &= ~E1000_EECD_DI; - - if (data & mask) - eecd |= E1000_EECD_DI; - - ew32(EECD, eecd); - e1e_flush(); - - udelay(nvm->delay_usec); - - e1000_raise_eec_clk(hw, &eecd); - e1000_lower_eec_clk(hw, &eecd); - - mask >>= 1; - } while (mask); - - eecd &= ~E1000_EECD_DI; - ew32(EECD, eecd); -} - -/** - * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM - * @hw: pointer to the HW structure - * @count: number of bits to shift in - * - * In order to read a register from the EEPROM, we need to shift 'count' bits - * in from the EEPROM. Bits are "shifted in" by raising the clock input to - * the EEPROM (setting the SK bit), and then reading the value of the data out - * "DO" bit. During this "shifting in" process the data in "DI" bit should - * always be clear. - **/ -static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count) -{ - u32 eecd; - u32 i; - u16 data; - - eecd = er32(EECD); - - eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); - data = 0; - - for (i = 0; i < count; i++) { - data <<= 1; - e1000_raise_eec_clk(hw, &eecd); - - eecd = er32(EECD); - - eecd &= ~E1000_EECD_DI; - if (eecd & E1000_EECD_DO) - data |= 1; - - e1000_lower_eec_clk(hw, &eecd); - } - - return data; -} - -/** - * e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion - * @hw: pointer to the HW structure - * @ee_reg: EEPROM flag for polling - * - * Polls the EEPROM status bit for either read or write completion based - * upon the value of 'ee_reg'. - **/ -s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) -{ - u32 attempts = 100000; - u32 i, reg = 0; - - for (i = 0; i < attempts; i++) { - if (ee_reg == E1000_NVM_POLL_READ) - reg = er32(EERD); - else - reg = er32(EEWR); - - if (reg & E1000_NVM_RW_REG_DONE) - return 0; - - udelay(5); - } - - return -E1000_ERR_NVM; -} - -/** - * e1000e_acquire_nvm - Generic request for access to EEPROM - * @hw: pointer to the HW structure - * - * Set the EEPROM access request bit and wait for EEPROM access grant bit. - * Return successful if access grant bit set, else clear the request for - * EEPROM access and return -E1000_ERR_NVM (-1). - **/ -s32 e1000e_acquire_nvm(struct e1000_hw *hw) -{ - u32 eecd = er32(EECD); - s32 timeout = E1000_NVM_GRANT_ATTEMPTS; - - ew32(EECD, eecd | E1000_EECD_REQ); - eecd = er32(EECD); - - while (timeout) { - if (eecd & E1000_EECD_GNT) - break; - udelay(5); - eecd = er32(EECD); - timeout--; - } - - if (!timeout) { - eecd &= ~E1000_EECD_REQ; - ew32(EECD, eecd); - e_dbg("Could not acquire NVM grant\n"); - return -E1000_ERR_NVM; - } - - return 0; -} - -/** - * e1000_standby_nvm - Return EEPROM to standby state - * @hw: pointer to the HW structure - * - * Return the EEPROM to a standby state. - **/ -static void e1000_standby_nvm(struct e1000_hw *hw) -{ - struct e1000_nvm_info *nvm = &hw->nvm; - u32 eecd = er32(EECD); - - if (nvm->type == e1000_nvm_eeprom_spi) { - /* Toggle CS to flush commands */ - eecd |= E1000_EECD_CS; - ew32(EECD, eecd); - e1e_flush(); - udelay(nvm->delay_usec); - eecd &= ~E1000_EECD_CS; - ew32(EECD, eecd); - e1e_flush(); - udelay(nvm->delay_usec); - } -} - -/** - * e1000_stop_nvm - Terminate EEPROM command - * @hw: pointer to the HW structure - * - * Terminates the current command by inverting the EEPROM's chip select pin. - **/ -static void e1000_stop_nvm(struct e1000_hw *hw) -{ - u32 eecd; - - eecd = er32(EECD); - if (hw->nvm.type == e1000_nvm_eeprom_spi) { - /* Pull CS high */ - eecd |= E1000_EECD_CS; - e1000_lower_eec_clk(hw, &eecd); - } -} - -/** - * e1000e_release_nvm - Release exclusive access to EEPROM - * @hw: pointer to the HW structure - * - * Stop any current commands to the EEPROM and clear the EEPROM request bit. - **/ -void e1000e_release_nvm(struct e1000_hw *hw) -{ - u32 eecd; - - e1000_stop_nvm(hw); - - eecd = er32(EECD); - eecd &= ~E1000_EECD_REQ; - ew32(EECD, eecd); -} - -/** - * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write - * @hw: pointer to the HW structure - * - * Setups the EEPROM for reading and writing. - **/ -static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) -{ - struct e1000_nvm_info *nvm = &hw->nvm; - u32 eecd = er32(EECD); - u8 spi_stat_reg; - - if (nvm->type == e1000_nvm_eeprom_spi) { - u16 timeout = NVM_MAX_RETRY_SPI; - - /* Clear SK and CS */ - eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); - ew32(EECD, eecd); - e1e_flush(); - udelay(1); - - /* - * Read "Status Register" repeatedly until the LSB is cleared. - * The EEPROM will signal that the command has been completed - * by clearing bit 0 of the internal status register. If it's - * not cleared within 'timeout', then error out. - */ - while (timeout) { - e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, - hw->nvm.opcode_bits); - spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8); - if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) - break; - - udelay(5); - e1000_standby_nvm(hw); - timeout--; - } - - if (!timeout) { - e_dbg("SPI NVM Status error\n"); - return -E1000_ERR_NVM; - } - } - - return 0; -} - -/** - * e1000e_read_nvm_eerd - Reads EEPROM using EERD register - * @hw: pointer to the HW structure - * @offset: offset of word in the EEPROM to read - * @words: number of words to read - * @data: word read from the EEPROM - * - * Reads a 16 bit word from the EEPROM using the EERD register. - **/ -s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) -{ - struct e1000_nvm_info *nvm = &hw->nvm; - u32 i, eerd = 0; - s32 ret_val = 0; - - /* - * A check for invalid values: offset too large, too many words, - * too many words for the offset, and not enough words. - */ - if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || - (words == 0)) { - e_dbg("nvm parameter(s) out of bounds\n"); - return -E1000_ERR_NVM; - } - - for (i = 0; i < words; i++) { - eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + - E1000_NVM_RW_REG_START; - - ew32(EERD, eerd); - ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); - if (ret_val) - break; - - data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA); - } - - return ret_val; -} - -/** - * e1000e_write_nvm_spi - Write to EEPROM using SPI - * @hw: pointer to the HW structure - * @offset: offset within the EEPROM to be written to - * @words: number of words to write - * @data: 16 bit word(s) to be written to the EEPROM - * - * Writes data to EEPROM at offset using SPI interface. - * - * If e1000e_update_nvm_checksum is not called after this function , the - * EEPROM will most likely contain an invalid checksum. - **/ -s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) -{ - struct e1000_nvm_info *nvm = &hw->nvm; - s32 ret_val; - u16 widx = 0; - - /* - * A check for invalid values: offset too large, too many words, - * and not enough words. - */ - if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || - (words == 0)) { - e_dbg("nvm parameter(s) out of bounds\n"); - return -E1000_ERR_NVM; - } - - ret_val = nvm->ops.acquire(hw); - if (ret_val) - return ret_val; - - while (widx < words) { - u8 write_opcode = NVM_WRITE_OPCODE_SPI; - - ret_val = e1000_ready_nvm_eeprom(hw); - if (ret_val) { - nvm->ops.release(hw); - return ret_val; - } - - e1000_standby_nvm(hw); - - /* Send the WRITE ENABLE command (8 bit opcode) */ - e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, - nvm->opcode_bits); - - e1000_standby_nvm(hw); - - /* - * Some SPI eeproms use the 8th address bit embedded in the - * opcode - */ - if ((nvm->address_bits == 8) && (offset >= 128)) - write_opcode |= NVM_A8_OPCODE_SPI; - - /* Send the Write command (8-bit opcode + addr) */ - e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); - e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), - nvm->address_bits); - - /* Loop to allow for up to whole page write of eeprom */ - while (widx < words) { - u16 word_out = data[widx]; - word_out = (word_out >> 8) | (word_out << 8); - e1000_shift_out_eec_bits(hw, word_out, 16); - widx++; - - if ((((offset + widx) * 2) % nvm->page_size) == 0) { - e1000_standby_nvm(hw); - break; - } - } - } - - usleep_range(10000, 20000); - nvm->ops.release(hw); - return 0; -} - -/** - * e1000_read_pba_string_generic - Read device part number - * @hw: pointer to the HW structure - * @pba_num: pointer to device part number - * @pba_num_size: size of part number buffer - * - * Reads the product board assembly (PBA) number from the EEPROM and stores - * the value in pba_num. - **/ -s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, - u32 pba_num_size) -{ - s32 ret_val; - u16 nvm_data; - u16 pba_ptr; - u16 offset; - u16 length; - - if (pba_num == NULL) { - e_dbg("PBA string buffer was null\n"); - ret_val = E1000_ERR_INVALID_ARGUMENT; - goto out; - } - - ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); - if (ret_val) { - e_dbg("NVM Read Error\n"); - goto out; - } - - ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); - if (ret_val) { - e_dbg("NVM Read Error\n"); - goto out; - } - - /* - * if nvm_data is not ptr guard the PBA must be in legacy format which - * means pba_ptr is actually our second data word for the PBA number - * and we can decode it into an ascii string - */ - if (nvm_data != NVM_PBA_PTR_GUARD) { - e_dbg("NVM PBA number is not stored as string\n"); - - /* we will need 11 characters to store the PBA */ - if (pba_num_size < 11) { - e_dbg("PBA string buffer too small\n"); - return E1000_ERR_NO_SPACE; - } - - /* extract hex string from data and pba_ptr */ - pba_num[0] = (nvm_data >> 12) & 0xF; - pba_num[1] = (nvm_data >> 8) & 0xF; - pba_num[2] = (nvm_data >> 4) & 0xF; - pba_num[3] = nvm_data & 0xF; - pba_num[4] = (pba_ptr >> 12) & 0xF; - pba_num[5] = (pba_ptr >> 8) & 0xF; - pba_num[6] = '-'; - pba_num[7] = 0; - pba_num[8] = (pba_ptr >> 4) & 0xF; - pba_num[9] = pba_ptr & 0xF; - - /* put a null character on the end of our string */ - pba_num[10] = '\0'; - - /* switch all the data but the '-' to hex char */ - for (offset = 0; offset < 10; offset++) { - if (pba_num[offset] < 0xA) - pba_num[offset] += '0'; - else if (pba_num[offset] < 0x10) - pba_num[offset] += 'A' - 0xA; - } - - goto out; - } - - ret_val = e1000_read_nvm(hw, pba_ptr, 1, &length); - if (ret_val) { - e_dbg("NVM Read Error\n"); - goto out; - } - - if (length == 0xFFFF || length == 0) { - e_dbg("NVM PBA number section invalid length\n"); - ret_val = E1000_ERR_NVM_PBA_SECTION; - goto out; - } - /* check if pba_num buffer is big enough */ - if (pba_num_size < (((u32)length * 2) - 1)) { - e_dbg("PBA string buffer too small\n"); - ret_val = E1000_ERR_NO_SPACE; - goto out; - } - - /* trim pba length from start of string */ - pba_ptr++; - length--; - - for (offset = 0; offset < length; offset++) { - ret_val = e1000_read_nvm(hw, pba_ptr + offset, 1, &nvm_data); - if (ret_val) { - e_dbg("NVM Read Error\n"); - goto out; - } - pba_num[offset * 2] = (u8)(nvm_data >> 8); - pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); - } - pba_num[offset * 2] = '\0'; - -out: - return ret_val; -} - -/** - * e1000_read_mac_addr_generic - Read device MAC address - * @hw: pointer to the HW structure - * - * Reads the device MAC address from the EEPROM and stores the value. - * Since devices with two ports use the same EEPROM, we increment the - * last bit in the MAC address for the second port. - **/ -s32 e1000_read_mac_addr_generic(struct e1000_hw *hw) -{ - u32 rar_high; - u32 rar_low; - u16 i; - - rar_high = er32(RAH(0)); - rar_low = er32(RAL(0)); - - for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++) - hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8)); - - for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++) - hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8)); - - for (i = 0; i < ETH_ALEN; i++) - hw->mac.addr[i] = hw->mac.perm_addr[i]; - - return 0; -} - -/** - * e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum - * @hw: pointer to the HW structure - * - * Calculates the EEPROM checksum by reading/adding each word of the EEPROM - * and then verifies that the sum of the EEPROM is equal to 0xBABA. - **/ -s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw) -{ - s32 ret_val; - u16 checksum = 0; - u16 i, nvm_data; - - for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { - ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); - if (ret_val) { - e_dbg("NVM Read Error\n"); - return ret_val; - } - checksum += nvm_data; - } - - if (checksum != (u16) NVM_SUM) { - e_dbg("NVM Checksum Invalid\n"); - return -E1000_ERR_NVM; - } - - return 0; -} - -/** - * e1000e_update_nvm_checksum_generic - Update EEPROM checksum - * @hw: pointer to the HW structure - * - * Updates the EEPROM checksum by reading/adding each word of the EEPROM - * up to the checksum. Then calculates the EEPROM checksum and writes the - * value to the EEPROM. - **/ -s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw) -{ - s32 ret_val; - u16 checksum = 0; - u16 i, nvm_data; - - for (i = 0; i < NVM_CHECKSUM_REG; i++) { - ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); - if (ret_val) { - e_dbg("NVM Read Error while updating checksum.\n"); - return ret_val; - } - checksum += nvm_data; - } - checksum = (u16) NVM_SUM - checksum; - ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); - if (ret_val) - e_dbg("NVM Write Error while updating checksum.\n"); - - return ret_val; -} - -/** - * e1000e_reload_nvm - Reloads EEPROM - * @hw: pointer to the HW structure - * - * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the - * extended control register. - **/ -void e1000e_reload_nvm(struct e1000_hw *hw) -{ - u32 ctrl_ext; - - udelay(10); - ctrl_ext = er32(CTRL_EXT); - ctrl_ext |= E1000_CTRL_EXT_EE_RST; - ew32(CTRL_EXT, ctrl_ext); - e1e_flush(); -} - -/** - * e1000_calculate_checksum - Calculate checksum for buffer - * @buffer: pointer to EEPROM - * @length: size of EEPROM to calculate a checksum for - * - * Calculates the checksum for some buffer on a specified length. The - * checksum calculated is returned. - **/ -static u8 e1000_calculate_checksum(u8 *buffer, u32 length) -{ - u32 i; - u8 sum = 0; - - if (!buffer) - return 0; - - for (i = 0; i < length; i++) - sum += buffer[i]; - - return (u8) (0 - sum); -} - -/** - * e1000_mng_enable_host_if - Checks host interface is enabled - * @hw: pointer to the HW structure - * - * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND - * - * This function checks whether the HOST IF is enabled for command operation - * and also checks whether the previous command is completed. It busy waits - * in case of previous command is not completed. - **/ -static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) -{ - u32 hicr; - u8 i; - - if (!(hw->mac.arc_subsystem_valid)) { - e_dbg("ARC subsystem not valid.\n"); - return -E1000_ERR_HOST_INTERFACE_COMMAND; - } - - /* Check that the host interface is enabled. */ - hicr = er32(HICR); - if ((hicr & E1000_HICR_EN) == 0) { - e_dbg("E1000_HOST_EN bit disabled.\n"); - return -E1000_ERR_HOST_INTERFACE_COMMAND; - } - /* check the previous command is completed */ - for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { - hicr = er32(HICR); - if (!(hicr & E1000_HICR_C)) - break; - mdelay(1); - } - - if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { - e_dbg("Previous command timeout failed .\n"); - return -E1000_ERR_HOST_INTERFACE_COMMAND; - } - - return 0; -} - -/** - * e1000e_check_mng_mode_generic - check management mode - * @hw: pointer to the HW structure - * - * Reads the firmware semaphore register and returns true (>0) if - * manageability is enabled, else false (0). - **/ -bool e1000e_check_mng_mode_generic(struct e1000_hw *hw) -{ - u32 fwsm = er32(FWSM); - - return (fwsm & E1000_FWSM_MODE_MASK) == - (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); -} - -/** - * e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx - * @hw: pointer to the HW structure - * - * Enables packet filtering on transmit packets if manageability is enabled - * and host interface is enabled. - **/ -bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) -{ - struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie; - u32 *buffer = (u32 *)&hw->mng_cookie; - u32 offset; - s32 ret_val, hdr_csum, csum; - u8 i, len; - - hw->mac.tx_pkt_filtering = true; - - /* No manageability, no filtering */ - if (!e1000e_check_mng_mode(hw)) { - hw->mac.tx_pkt_filtering = false; - goto out; - } - - /* - * If we can't read from the host interface for whatever - * reason, disable filtering. - */ - ret_val = e1000_mng_enable_host_if(hw); - if (ret_val) { - hw->mac.tx_pkt_filtering = false; - goto out; - } - - /* Read in the header. Length and offset are in dwords. */ - len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2; - offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2; - for (i = 0; i < len; i++) - *(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset + i); - hdr_csum = hdr->checksum; - hdr->checksum = 0; - csum = e1000_calculate_checksum((u8 *)hdr, - E1000_MNG_DHCP_COOKIE_LENGTH); - /* - * If either the checksums or signature don't match, then - * the cookie area isn't considered valid, in which case we - * take the safe route of assuming Tx filtering is enabled. - */ - if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { - hw->mac.tx_pkt_filtering = true; - goto out; - } - - /* Cookie area is valid, make the final check for filtering. */ - if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) { - hw->mac.tx_pkt_filtering = false; - goto out; - } - -out: - return hw->mac.tx_pkt_filtering; -} - -/** - * e1000_mng_write_cmd_header - Writes manageability command header - * @hw: pointer to the HW structure - * @hdr: pointer to the host interface command header - * - * Writes the command header after does the checksum calculation. - **/ -static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, - struct e1000_host_mng_command_header *hdr) -{ - u16 i, length = sizeof(struct e1000_host_mng_command_header); - - /* Write the whole command header structure with new checksum. */ - - hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length); - - length >>= 2; - /* Write the relevant command block into the ram area. */ - for (i = 0; i < length; i++) { - E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i, - *((u32 *) hdr + i)); - e1e_flush(); - } - - return 0; -} - -/** - * e1000_mng_host_if_write - Write to the manageability host interface - * @hw: pointer to the HW structure - * @buffer: pointer to the host interface buffer - * @length: size of the buffer - * @offset: location in the buffer to write to - * @sum: sum of the data (not checksum) - * - * This function writes the buffer content at the offset given on the host if. - * It also does alignment considerations to do the writes in most efficient - * way. Also fills up the sum of the buffer in *buffer parameter. - **/ -static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, - u16 length, u16 offset, u8 *sum) -{ - u8 *tmp; - u8 *bufptr = buffer; - u32 data = 0; - u16 remaining, i, j, prev_bytes; - - /* sum = only sum of the data and it is not checksum */ - - if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) - return -E1000_ERR_PARAM; - - tmp = (u8 *)&data; - prev_bytes = offset & 0x3; - offset >>= 2; - - if (prev_bytes) { - data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset); - for (j = prev_bytes; j < sizeof(u32); j++) { - *(tmp + j) = *bufptr++; - *sum += *(tmp + j); - } - E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data); - length -= j - prev_bytes; - offset++; - } - - remaining = length & 0x3; - length -= remaining; - - /* Calculate length in DWORDs */ - length >>= 2; - - /* - * The device driver writes the relevant command block into the - * ram area. - */ - for (i = 0; i < length; i++) { - for (j = 0; j < sizeof(u32); j++) { - *(tmp + j) = *bufptr++; - *sum += *(tmp + j); - } - - E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data); - } - if (remaining) { - for (j = 0; j < sizeof(u32); j++) { - if (j < remaining) - *(tmp + j) = *bufptr++; - else - *(tmp + j) = 0; - - *sum += *(tmp + j); - } - E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data); - } - - return 0; -} - -/** - * e1000e_mng_write_dhcp_info - Writes DHCP info to host interface - * @hw: pointer to the HW structure - * @buffer: pointer to the host interface - * @length: size of the buffer - * - * Writes the DHCP information to the host interface. - **/ -s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length) -{ - struct e1000_host_mng_command_header hdr; - s32 ret_val; - u32 hicr; - - hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; - hdr.command_length = length; - hdr.reserved1 = 0; - hdr.reserved2 = 0; - hdr.checksum = 0; - - /* Enable the host interface */ - ret_val = e1000_mng_enable_host_if(hw); - if (ret_val) - return ret_val; - - /* Populate the host interface with the contents of "buffer". */ - ret_val = e1000_mng_host_if_write(hw, buffer, length, - sizeof(hdr), &(hdr.checksum)); - if (ret_val) - return ret_val; - - /* Write the manageability command header */ - ret_val = e1000_mng_write_cmd_header(hw, &hdr); - if (ret_val) - return ret_val; - - /* Tell the ARC a new command is pending. */ - hicr = er32(HICR); - ew32(HICR, hicr | E1000_HICR_C); - - return 0; -} - -/** - * e1000e_enable_mng_pass_thru - Check if management passthrough is needed - * @hw: pointer to the HW structure - * - * Verifies the hardware needs to leave interface enabled so that frames can - * be directed to and from the management interface. - **/ -bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw) -{ - u32 manc; - u32 fwsm, factps; - bool ret_val = false; - - manc = er32(MANC); - - if (!(manc & E1000_MANC_RCV_TCO_EN)) - goto out; - - if (hw->mac.has_fwsm) { - fwsm = er32(FWSM); - factps = er32(FACTPS); - - if (!(factps & E1000_FACTPS_MNGCG) && - ((fwsm & E1000_FWSM_MODE_MASK) == - (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { - ret_val = true; - goto out; - } - } else if ((hw->mac.type == e1000_82574) || - (hw->mac.type == e1000_82583)) { - u16 data; - - factps = er32(FACTPS); - e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data); - - if (!(factps & E1000_FACTPS_MNGCG) && - ((data & E1000_NVM_INIT_CTRL2_MNGM) == - (e1000_mng_mode_pt << 13))) { - ret_val = true; - goto out; - } - } else if ((manc & E1000_MANC_SMBUS_EN) && - !(manc & E1000_MANC_ASF_EN)) { - ret_val = true; - goto out; - } - -out: - return ret_val; -} diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c deleted file mode 100644 index ab4be80..0000000 --- a/drivers/net/e1000e/netdev.c +++ /dev/null @@ -1,6312 +0,0 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "e1000.h" - -#define DRV_EXTRAVERSION "-k" - -#define DRV_VERSION "1.3.16" DRV_EXTRAVERSION -char e1000e_driver_name[] = "e1000e"; -const char e1000e_driver_version[] = DRV_VERSION; - -static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state); - -static const struct e1000_info *e1000_info_tbl[] = { - [board_82571] = &e1000_82571_info, - [board_82572] = &e1000_82572_info, - [board_82573] = &e1000_82573_info, - [board_82574] = &e1000_82574_info, - [board_82583] = &e1000_82583_info, - [board_80003es2lan] = &e1000_es2_info, - [board_ich8lan] = &e1000_ich8_info, - [board_ich9lan] = &e1000_ich9_info, - [board_ich10lan] = &e1000_ich10_info, - [board_pchlan] = &e1000_pch_info, - [board_pch2lan] = &e1000_pch2_info, -}; - -struct e1000_reg_info { - u32 ofs; - char *name; -}; - -#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ -#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ -#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ -#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ -#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ - -#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ -#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ -#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ -#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ -#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ - -static const struct e1000_reg_info e1000_reg_info_tbl[] = { - - /* General Registers */ - {E1000_CTRL, "CTRL"}, - {E1000_STATUS, "STATUS"}, - {E1000_CTRL_EXT, "CTRL_EXT"}, - - /* Interrupt Registers */ - {E1000_ICR, "ICR"}, - - /* Rx Registers */ - {E1000_RCTL, "RCTL"}, - {E1000_RDLEN, "RDLEN"}, - {E1000_RDH, "RDH"}, - {E1000_RDT, "RDT"}, - {E1000_RDTR, "RDTR"}, - {E1000_RXDCTL(0), "RXDCTL"}, - {E1000_ERT, "ERT"}, - {E1000_RDBAL, "RDBAL"}, - {E1000_RDBAH, "RDBAH"}, - {E1000_RDFH, "RDFH"}, - {E1000_RDFT, "RDFT"}, - {E1000_RDFHS, "RDFHS"}, - {E1000_RDFTS, "RDFTS"}, - {E1000_RDFPC, "RDFPC"}, - - /* Tx Registers */ - {E1000_TCTL, "TCTL"}, - {E1000_TDBAL, "TDBAL"}, - {E1000_TDBAH, "TDBAH"}, - {E1000_TDLEN, "TDLEN"}, - {E1000_TDH, "TDH"}, - {E1000_TDT, "TDT"}, - {E1000_TIDV, "TIDV"}, - {E1000_TXDCTL(0), "TXDCTL"}, - {E1000_TADV, "TADV"}, - {E1000_TARC(0), "TARC"}, - {E1000_TDFH, "TDFH"}, - {E1000_TDFT, "TDFT"}, - {E1000_TDFHS, "TDFHS"}, - {E1000_TDFTS, "TDFTS"}, - {E1000_TDFPC, "TDFPC"}, - - /* List Terminator */ - {} -}; - -/* - * e1000_regdump - register printout routine - */ -static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) -{ - int n = 0; - char rname[16]; - u32 regs[8]; - - switch (reginfo->ofs) { - case E1000_RXDCTL(0): - for (n = 0; n < 2; n++) - regs[n] = __er32(hw, E1000_RXDCTL(n)); - break; - case E1000_TXDCTL(0): - for (n = 0; n < 2; n++) - regs[n] = __er32(hw, E1000_TXDCTL(n)); - break; - case E1000_TARC(0): - for (n = 0; n < 2; n++) - regs[n] = __er32(hw, E1000_TARC(n)); - break; - default: - printk(KERN_INFO "%-15s %08x\n", - reginfo->name, __er32(hw, reginfo->ofs)); - return; - } - - snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]"); - printk(KERN_INFO "%-15s ", rname); - for (n = 0; n < 2; n++) - printk(KERN_CONT "%08x ", regs[n]); - printk(KERN_CONT "\n"); -} - -/* - * e1000e_dump - Print registers, Tx-ring and Rx-ring - */ -static void e1000e_dump(struct e1000_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct e1000_hw *hw = &adapter->hw; - struct e1000_reg_info *reginfo; - struct e1000_ring *tx_ring = adapter->tx_ring; - struct e1000_tx_desc *tx_desc; - struct my_u0 { - u64 a; - u64 b; - } *u0; - struct e1000_buffer *buffer_info; - struct e1000_ring *rx_ring = adapter->rx_ring; - union e1000_rx_desc_packet_split *rx_desc_ps; - struct e1000_rx_desc *rx_desc; - struct my_u1 { - u64 a; - u64 b; - u64 c; - u64 d; - } *u1; - u32 staterr; - int i = 0; - - if (!netif_msg_hw(adapter)) - return; - - /* Print netdevice Info */ - if (netdev) { - dev_info(&adapter->pdev->dev, "Net device Info\n"); - printk(KERN_INFO "Device Name state " - "trans_start last_rx\n"); - printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", - netdev->name, netdev->state, netdev->trans_start, - netdev->last_rx); - } - - /* Print Registers */ - dev_info(&adapter->pdev->dev, "Register Dump\n"); - printk(KERN_INFO " Register Name Value\n"); - for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl; - reginfo->name; reginfo++) { - e1000_regdump(hw, reginfo); - } - - /* Print Tx Ring Summary */ - if (!netdev || !netif_running(netdev)) - goto exit; - - dev_info(&adapter->pdev->dev, "Tx Ring Summary\n"); - printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]" - " leng ntw timestamp\n"); - buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; - printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", - 0, tx_ring->next_to_use, tx_ring->next_to_clean, - (unsigned long long)buffer_info->dma, - buffer_info->length, - buffer_info->next_to_watch, - (unsigned long long)buffer_info->time_stamp); - - /* Print Tx Ring */ - if (!netif_msg_tx_done(adapter)) - goto rx_ring_summary; - - dev_info(&adapter->pdev->dev, "Tx Ring Dump\n"); - - /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) - * - * Legacy Transmit Descriptor - * +--------------------------------------------------------------+ - * 0 | Buffer Address [63:0] (Reserved on Write Back) | - * +--------------------------------------------------------------+ - * 8 | Special | CSS | Status | CMD | CSO | Length | - * +--------------------------------------------------------------+ - * 63 48 47 36 35 32 31 24 23 16 15 0 - * - * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload - * 63 48 47 40 39 32 31 16 15 8 7 0 - * +----------------------------------------------------------------+ - * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | - * +----------------------------------------------------------------+ - * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | - * +----------------------------------------------------------------+ - * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 - * - * Extended Data Descriptor (DTYP=0x1) - * +----------------------------------------------------------------+ - * 0 | Buffer Address [63:0] | - * +----------------------------------------------------------------+ - * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | - * +----------------------------------------------------------------+ - * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 - */ - printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]" - " [bi->dma ] leng ntw timestamp bi->skb " - "<-- Legacy format\n"); - printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]" - " [bi->dma ] leng ntw timestamp bi->skb " - "<-- Ext Context format\n"); - printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]" - " [bi->dma ] leng ntw timestamp bi->skb " - "<-- Ext Data format\n"); - for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { - tx_desc = E1000_TX_DESC(*tx_ring, i); - buffer_info = &tx_ring->buffer_info[i]; - u0 = (struct my_u0 *)tx_desc; - printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX " - "%04X %3X %016llX %p", - (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' : - ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i, - (unsigned long long)le64_to_cpu(u0->a), - (unsigned long long)le64_to_cpu(u0->b), - (unsigned long long)buffer_info->dma, - buffer_info->length, buffer_info->next_to_watch, - (unsigned long long)buffer_info->time_stamp, - buffer_info->skb); - if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) - printk(KERN_CONT " NTC/U\n"); - else if (i == tx_ring->next_to_use) - printk(KERN_CONT " NTU\n"); - else if (i == tx_ring->next_to_clean) - printk(KERN_CONT " NTC\n"); - else - printk(KERN_CONT "\n"); - - if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) - print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, - 16, 1, phys_to_virt(buffer_info->dma), - buffer_info->length, true); - } - - /* Print Rx Ring Summary */ -rx_ring_summary: - dev_info(&adapter->pdev->dev, "Rx Ring Summary\n"); - printk(KERN_INFO "Queue [NTU] [NTC]\n"); - printk(KERN_INFO " %5d %5X %5X\n", 0, - rx_ring->next_to_use, rx_ring->next_to_clean); - - /* Print Rx Ring */ - if (!netif_msg_rx_status(adapter)) - goto exit; - - dev_info(&adapter->pdev->dev, "Rx Ring Dump\n"); - switch (adapter->rx_ps_pages) { - case 1: - case 2: - case 3: - /* [Extended] Packet Split Receive Descriptor Format - * - * +-----------------------------------------------------+ - * 0 | Buffer Address 0 [63:0] | - * +-----------------------------------------------------+ - * 8 | Buffer Address 1 [63:0] | - * +-----------------------------------------------------+ - * 16 | Buffer Address 2 [63:0] | - * +-----------------------------------------------------+ - * 24 | Buffer Address 3 [63:0] | - * +-----------------------------------------------------+ - */ - printk(KERN_INFO "R [desc] [buffer 0 63:0 ] " - "[buffer 1 63:0 ] " - "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] " - "[bi->skb] <-- Ext Pkt Split format\n"); - /* [Extended] Receive Descriptor (Write-Back) Format - * - * 63 48 47 32 31 13 12 8 7 4 3 0 - * +------------------------------------------------------+ - * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS | - * | Checksum | Ident | | Queue | | Type | - * +------------------------------------------------------+ - * 8 | VLAN Tag | Length | Extended Error | Extended Status | - * +------------------------------------------------------+ - * 63 48 47 32 31 20 19 0 - */ - printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] " - "[vl l0 ee es] " - "[ l3 l2 l1 hs] [reserved ] ---------------- " - "[bi->skb] <-- Ext Rx Write-Back format\n"); - for (i = 0; i < rx_ring->count; i++) { - buffer_info = &rx_ring->buffer_info[i]; - rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); - u1 = (struct my_u1 *)rx_desc_ps; - staterr = - le32_to_cpu(rx_desc_ps->wb.middle.status_error); - if (staterr & E1000_RXD_STAT_DD) { - /* Descriptor Done */ - printk(KERN_INFO "RWB[0x%03X] %016llX " - "%016llX %016llX %016llX " - "---------------- %p", i, - (unsigned long long)le64_to_cpu(u1->a), - (unsigned long long)le64_to_cpu(u1->b), - (unsigned long long)le64_to_cpu(u1->c), - (unsigned long long)le64_to_cpu(u1->d), - buffer_info->skb); - } else { - printk(KERN_INFO "R [0x%03X] %016llX " - "%016llX %016llX %016llX %016llX %p", i, - (unsigned long long)le64_to_cpu(u1->a), - (unsigned long long)le64_to_cpu(u1->b), - (unsigned long long)le64_to_cpu(u1->c), - (unsigned long long)le64_to_cpu(u1->d), - (unsigned long long)buffer_info->dma, - buffer_info->skb); - - if (netif_msg_pktdata(adapter)) - print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, 16, 1, - phys_to_virt(buffer_info->dma), - adapter->rx_ps_bsize0, true); - } - - if (i == rx_ring->next_to_use) - printk(KERN_CONT " NTU\n"); - else if (i == rx_ring->next_to_clean) - printk(KERN_CONT " NTC\n"); - else - printk(KERN_CONT "\n"); - } - break; - default: - case 0: - /* Legacy Receive Descriptor Format - * - * +-----------------------------------------------------+ - * | Buffer Address [63:0] | - * +-----------------------------------------------------+ - * | VLAN Tag | Errors | Status 0 | Packet csum | Length | - * +-----------------------------------------------------+ - * 63 48 47 40 39 32 31 16 15 0 - */ - printk(KERN_INFO "Rl[desc] [address 63:0 ] " - "[vl er S cks ln] [bi->dma ] [bi->skb] " - "<-- Legacy format\n"); - for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { - rx_desc = E1000_RX_DESC(*rx_ring, i); - buffer_info = &rx_ring->buffer_info[i]; - u0 = (struct my_u0 *)rx_desc; - printk(KERN_INFO "Rl[0x%03X] %016llX %016llX " - "%016llX %p", i, - (unsigned long long)le64_to_cpu(u0->a), - (unsigned long long)le64_to_cpu(u0->b), - (unsigned long long)buffer_info->dma, - buffer_info->skb); - if (i == rx_ring->next_to_use) - printk(KERN_CONT " NTU\n"); - else if (i == rx_ring->next_to_clean) - printk(KERN_CONT " NTC\n"); - else - printk(KERN_CONT "\n"); - - if (netif_msg_pktdata(adapter)) - print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, - 16, 1, - phys_to_virt(buffer_info->dma), - adapter->rx_buffer_len, true); - } - } - -exit: - return; -} - -/** - * e1000_desc_unused - calculate if we have unused descriptors - **/ -static int e1000_desc_unused(struct e1000_ring *ring) -{ - if (ring->next_to_clean > ring->next_to_use) - return ring->next_to_clean - ring->next_to_use - 1; - - return ring->count + ring->next_to_clean - ring->next_to_use - 1; -} - -/** - * e1000_receive_skb - helper function to handle Rx indications - * @adapter: board private structure - * @status: descriptor status field as written by hardware - * @vlan: descriptor vlan field as written by hardware (no le/be conversion) - * @skb: pointer to sk_buff to be indicated to stack - **/ -static void e1000_receive_skb(struct e1000_adapter *adapter, - struct net_device *netdev, struct sk_buff *skb, - u8 status, __le16 vlan) -{ - u16 tag = le16_to_cpu(vlan); - skb->protocol = eth_type_trans(skb, netdev); - - if (status & E1000_RXD_STAT_VP) - __vlan_hwaccel_put_tag(skb, tag); - - napi_gro_receive(&adapter->napi, skb); -} - -/** - * e1000_rx_checksum - Receive Checksum Offload - * @adapter: board private structure - * @status_err: receive descriptor status and error fields - * @csum: receive descriptor csum field - * @sk_buff: socket buffer with received data - **/ -static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, - u32 csum, struct sk_buff *skb) -{ - u16 status = (u16)status_err; - u8 errors = (u8)(status_err >> 24); - - skb_checksum_none_assert(skb); - - /* Ignore Checksum bit is set */ - if (status & E1000_RXD_STAT_IXSM) - return; - /* TCP/UDP checksum error bit is set */ - if (errors & E1000_RXD_ERR_TCPE) { - /* let the stack verify checksum errors */ - adapter->hw_csum_err++; - return; - } - - /* TCP/UDP Checksum has not been calculated */ - if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) - return; - - /* It must be a TCP or UDP packet with a valid checksum */ - if (status & E1000_RXD_STAT_TCPCS) { - /* TCP checksum is good */ - skb->ip_summed = CHECKSUM_UNNECESSARY; - } else { - /* - * IP fragment with UDP payload - * Hardware complements the payload checksum, so we undo it - * and then put the value in host order for further stack use. - */ - __sum16 sum = (__force __sum16)htons(csum); - skb->csum = csum_unfold(~sum); - skb->ip_summed = CHECKSUM_COMPLETE; - } - adapter->hw_csum_good++; -} - -/** - * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended - * @adapter: address of board private structure - **/ -static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, - int cleaned_count, gfp_t gfp) -{ - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - struct e1000_ring *rx_ring = adapter->rx_ring; - struct e1000_rx_desc *rx_desc; - struct e1000_buffer *buffer_info; - struct sk_buff *skb; - unsigned int i; - unsigned int bufsz = adapter->rx_buffer_len; - - i = rx_ring->next_to_use; - buffer_info = &rx_ring->buffer_info[i]; - - while (cleaned_count--) { - skb = buffer_info->skb; - if (skb) { - skb_trim(skb, 0); - goto map_skb; - } - - skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp); - if (!skb) { - /* Better luck next round */ - adapter->alloc_rx_buff_failed++; - break; - } - - buffer_info->skb = skb; -map_skb: - buffer_info->dma = dma_map_single(&pdev->dev, skb->data, - adapter->rx_buffer_len, - DMA_FROM_DEVICE); - if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { - dev_err(&pdev->dev, "Rx DMA map failed\n"); - adapter->rx_dma_failed++; - break; - } - - rx_desc = E1000_RX_DESC(*rx_ring, i); - rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); - - if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { - /* - * Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - writel(i, adapter->hw.hw_addr + rx_ring->tail); - } - i++; - if (i == rx_ring->count) - i = 0; - buffer_info = &rx_ring->buffer_info[i]; - } - - rx_ring->next_to_use = i; -} - -/** - * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split - * @adapter: address of board private structure - **/ -static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, - int cleaned_count, gfp_t gfp) -{ - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - union e1000_rx_desc_packet_split *rx_desc; - struct e1000_ring *rx_ring = adapter->rx_ring; - struct e1000_buffer *buffer_info; - struct e1000_ps_page *ps_page; - struct sk_buff *skb; - unsigned int i, j; - - i = rx_ring->next_to_use; - buffer_info = &rx_ring->buffer_info[i]; - - while (cleaned_count--) { - rx_desc = E1000_RX_DESC_PS(*rx_ring, i); - - for (j = 0; j < PS_PAGE_BUFFERS; j++) { - ps_page = &buffer_info->ps_pages[j]; - if (j >= adapter->rx_ps_pages) { - /* all unused desc entries get hw null ptr */ - rx_desc->read.buffer_addr[j + 1] = - ~cpu_to_le64(0); - continue; - } - if (!ps_page->page) { - ps_page->page = alloc_page(gfp); - if (!ps_page->page) { - adapter->alloc_rx_buff_failed++; - goto no_buffers; - } - ps_page->dma = dma_map_page(&pdev->dev, - ps_page->page, - 0, PAGE_SIZE, - DMA_FROM_DEVICE); - if (dma_mapping_error(&pdev->dev, - ps_page->dma)) { - dev_err(&adapter->pdev->dev, - "Rx DMA page map failed\n"); - adapter->rx_dma_failed++; - goto no_buffers; - } - } - /* - * Refresh the desc even if buffer_addrs - * didn't change because each write-back - * erases this info. - */ - rx_desc->read.buffer_addr[j + 1] = - cpu_to_le64(ps_page->dma); - } - - skb = __netdev_alloc_skb_ip_align(netdev, - adapter->rx_ps_bsize0, - gfp); - - if (!skb) { - adapter->alloc_rx_buff_failed++; - break; - } - - buffer_info->skb = skb; - buffer_info->dma = dma_map_single(&pdev->dev, skb->data, - adapter->rx_ps_bsize0, - DMA_FROM_DEVICE); - if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { - dev_err(&pdev->dev, "Rx DMA map failed\n"); - adapter->rx_dma_failed++; - /* cleanup skb */ - dev_kfree_skb_any(skb); - buffer_info->skb = NULL; - break; - } - - rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); - - if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { - /* - * Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - writel(i << 1, adapter->hw.hw_addr + rx_ring->tail); - } - - i++; - if (i == rx_ring->count) - i = 0; - buffer_info = &rx_ring->buffer_info[i]; - } - -no_buffers: - rx_ring->next_to_use = i; -} - -/** - * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers - * @adapter: address of board private structure - * @cleaned_count: number of buffers to allocate this pass - **/ - -static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, - int cleaned_count, gfp_t gfp) -{ - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - struct e1000_rx_desc *rx_desc; - struct e1000_ring *rx_ring = adapter->rx_ring; - struct e1000_buffer *buffer_info; - struct sk_buff *skb; - unsigned int i; - unsigned int bufsz = 256 - 16 /* for skb_reserve */; - - i = rx_ring->next_to_use; - buffer_info = &rx_ring->buffer_info[i]; - - while (cleaned_count--) { - skb = buffer_info->skb; - if (skb) { - skb_trim(skb, 0); - goto check_page; - } - - skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp); - if (unlikely(!skb)) { - /* Better luck next round */ - adapter->alloc_rx_buff_failed++; - break; - } - - buffer_info->skb = skb; -check_page: - /* allocate a new page if necessary */ - if (!buffer_info->page) { - buffer_info->page = alloc_page(gfp); - if (unlikely(!buffer_info->page)) { - adapter->alloc_rx_buff_failed++; - break; - } - } - - if (!buffer_info->dma) - buffer_info->dma = dma_map_page(&pdev->dev, - buffer_info->page, 0, - PAGE_SIZE, - DMA_FROM_DEVICE); - - rx_desc = E1000_RX_DESC(*rx_ring, i); - rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); - - if (unlikely(++i == rx_ring->count)) - i = 0; - buffer_info = &rx_ring->buffer_info[i]; - } - - if (likely(rx_ring->next_to_use != i)) { - rx_ring->next_to_use = i; - if (unlikely(i-- == 0)) - i = (rx_ring->count - 1); - - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). */ - wmb(); - writel(i, adapter->hw.hw_addr + rx_ring->tail); - } -} - -/** - * e1000_clean_rx_irq - Send received data up the network stack; legacy - * @adapter: board private structure - * - * the return value indicates whether actual cleaning was done, there - * is no guarantee that everything was cleaned - **/ -static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, - int *work_done, int work_to_do) -{ - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - struct e1000_hw *hw = &adapter->hw; - struct e1000_ring *rx_ring = adapter->rx_ring; - struct e1000_rx_desc *rx_desc, *next_rxd; - struct e1000_buffer *buffer_info, *next_buffer; - u32 length; - unsigned int i; - int cleaned_count = 0; - bool cleaned = 0; - unsigned int total_rx_bytes = 0, total_rx_packets = 0; - - i = rx_ring->next_to_clean; - rx_desc = E1000_RX_DESC(*rx_ring, i); - buffer_info = &rx_ring->buffer_info[i]; - - while (rx_desc->status & E1000_RXD_STAT_DD) { - struct sk_buff *skb; - u8 status; - - if (*work_done >= work_to_do) - break; - (*work_done)++; - rmb(); /* read descriptor and rx_buffer_info after status DD */ - - status = rx_desc->status; - skb = buffer_info->skb; - buffer_info->skb = NULL; - - prefetch(skb->data - NET_IP_ALIGN); - - i++; - if (i == rx_ring->count) - i = 0; - next_rxd = E1000_RX_DESC(*rx_ring, i); - prefetch(next_rxd); - - next_buffer = &rx_ring->buffer_info[i]; - - cleaned = 1; - cleaned_count++; - dma_unmap_single(&pdev->dev, - buffer_info->dma, - adapter->rx_buffer_len, - DMA_FROM_DEVICE); - buffer_info->dma = 0; - - length = le16_to_cpu(rx_desc->length); - - /* - * !EOP means multiple descriptors were used to store a single - * packet, if that's the case we need to toss it. In fact, we - * need to toss every packet with the EOP bit clear and the - * next frame that _does_ have the EOP bit set, as it is by - * definition only a frame fragment - */ - if (unlikely(!(status & E1000_RXD_STAT_EOP))) - adapter->flags2 |= FLAG2_IS_DISCARDING; - - if (adapter->flags2 & FLAG2_IS_DISCARDING) { - /* All receives must fit into a single buffer */ - e_dbg("Receive packet consumed multiple buffers\n"); - /* recycle */ - buffer_info->skb = skb; - if (status & E1000_RXD_STAT_EOP) - adapter->flags2 &= ~FLAG2_IS_DISCARDING; - goto next_desc; - } - - if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) { - /* recycle */ - buffer_info->skb = skb; - goto next_desc; - } - - /* adjust length to remove Ethernet CRC */ - if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) - length -= 4; - - total_rx_bytes += length; - total_rx_packets++; - - /* - * code added for copybreak, this should improve - * performance for small packets with large amounts - * of reassembly being done in the stack - */ - if (length < copybreak) { - struct sk_buff *new_skb = - netdev_alloc_skb_ip_align(netdev, length); - if (new_skb) { - skb_copy_to_linear_data_offset(new_skb, - -NET_IP_ALIGN, - (skb->data - - NET_IP_ALIGN), - (length + - NET_IP_ALIGN)); - /* save the skb in buffer_info as good */ - buffer_info->skb = skb; - skb = new_skb; - } - /* else just continue with the old one */ - } - /* end copybreak code */ - skb_put(skb, length); - - /* Receive Checksum Offload */ - e1000_rx_checksum(adapter, - (u32)(status) | - ((u32)(rx_desc->errors) << 24), - le16_to_cpu(rx_desc->csum), skb); - - e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special); - -next_desc: - rx_desc->status = 0; - - /* return some buffers to hardware, one at a time is too slow */ - if (cleaned_count >= E1000_RX_BUFFER_WRITE) { - adapter->alloc_rx_buf(adapter, cleaned_count, - GFP_ATOMIC); - cleaned_count = 0; - } - - /* use prefetched values */ - rx_desc = next_rxd; - buffer_info = next_buffer; - } - rx_ring->next_to_clean = i; - - cleaned_count = e1000_desc_unused(rx_ring); - if (cleaned_count) - adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC); - - adapter->total_rx_bytes += total_rx_bytes; - adapter->total_rx_packets += total_rx_packets; - return cleaned; -} - -static void e1000_put_txbuf(struct e1000_adapter *adapter, - struct e1000_buffer *buffer_info) -{ - if (buffer_info->dma) { - if (buffer_info->mapped_as_page) - dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, - buffer_info->length, DMA_TO_DEVICE); - else - dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, - buffer_info->length, DMA_TO_DEVICE); - buffer_info->dma = 0; - } - if (buffer_info->skb) { - dev_kfree_skb_any(buffer_info->skb); - buffer_info->skb = NULL; - } - buffer_info->time_stamp = 0; -} - -static void e1000_print_hw_hang(struct work_struct *work) -{ - struct e1000_adapter *adapter = container_of(work, - struct e1000_adapter, - print_hang_task); - struct e1000_ring *tx_ring = adapter->tx_ring; - unsigned int i = tx_ring->next_to_clean; - unsigned int eop = tx_ring->buffer_info[i].next_to_watch; - struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); - struct e1000_hw *hw = &adapter->hw; - u16 phy_status, phy_1000t_status, phy_ext_status; - u16 pci_status; - - if (test_bit(__E1000_DOWN, &adapter->state)) - return; - - e1e_rphy(hw, PHY_STATUS, &phy_status); - e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); - e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); - - pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status); - - /* detected Hardware unit hang */ - e_err("Detected Hardware Unit Hang:\n" - " TDH <%x>\n" - " TDT <%x>\n" - " next_to_use <%x>\n" - " next_to_clean <%x>\n" - "buffer_info[next_to_clean]:\n" - " time_stamp <%lx>\n" - " next_to_watch <%x>\n" - " jiffies <%lx>\n" - " next_to_watch.status <%x>\n" - "MAC Status <%x>\n" - "PHY Status <%x>\n" - "PHY 1000BASE-T Status <%x>\n" - "PHY Extended Status <%x>\n" - "PCI Status <%x>\n", - readl(adapter->hw.hw_addr + tx_ring->head), - readl(adapter->hw.hw_addr + tx_ring->tail), - tx_ring->next_to_use, - tx_ring->next_to_clean, - tx_ring->buffer_info[eop].time_stamp, - eop, - jiffies, - eop_desc->upper.fields.status, - er32(STATUS), - phy_status, - phy_1000t_status, - phy_ext_status, - pci_status); -} - -/** - * e1000_clean_tx_irq - Reclaim resources after transmit completes - * @adapter: board private structure - * - * the return value indicates whether actual cleaning was done, there - * is no guarantee that everything was cleaned - **/ -static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct e1000_hw *hw = &adapter->hw; - struct e1000_ring *tx_ring = adapter->tx_ring; - struct e1000_tx_desc *tx_desc, *eop_desc; - struct e1000_buffer *buffer_info; - unsigned int i, eop; - unsigned int count = 0; - unsigned int total_tx_bytes = 0, total_tx_packets = 0; - - i = tx_ring->next_to_clean; - eop = tx_ring->buffer_info[i].next_to_watch; - eop_desc = E1000_TX_DESC(*tx_ring, eop); - - while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && - (count < tx_ring->count)) { - bool cleaned = false; - rmb(); /* read buffer_info after eop_desc */ - for (; !cleaned; count++) { - tx_desc = E1000_TX_DESC(*tx_ring, i); - buffer_info = &tx_ring->buffer_info[i]; - cleaned = (i == eop); - - if (cleaned) { - total_tx_packets += buffer_info->segs; - total_tx_bytes += buffer_info->bytecount; - } - - e1000_put_txbuf(adapter, buffer_info); - tx_desc->upper.data = 0; - - i++; - if (i == tx_ring->count) - i = 0; - } - - if (i == tx_ring->next_to_use) - break; - eop = tx_ring->buffer_info[i].next_to_watch; - eop_desc = E1000_TX_DESC(*tx_ring, eop); - } - - tx_ring->next_to_clean = i; - -#define TX_WAKE_THRESHOLD 32 - if (count && netif_carrier_ok(netdev) && - e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) { - /* Make sure that anybody stopping the queue after this - * sees the new next_to_clean. - */ - smp_mb(); - - if (netif_queue_stopped(netdev) && - !(test_bit(__E1000_DOWN, &adapter->state))) { - netif_wake_queue(netdev); - ++adapter->restart_queue; - } - } - - if (adapter->detect_tx_hung) { - /* - * Detect a transmit hang in hardware, this serializes the - * check with the clearing of time_stamp and movement of i - */ - adapter->detect_tx_hung = 0; - if (tx_ring->buffer_info[i].time_stamp && - time_after(jiffies, tx_ring->buffer_info[i].time_stamp - + (adapter->tx_timeout_factor * HZ)) && - !(er32(STATUS) & E1000_STATUS_TXOFF)) { - schedule_work(&adapter->print_hang_task); - netif_stop_queue(netdev); - } - } - adapter->total_tx_bytes += total_tx_bytes; - adapter->total_tx_packets += total_tx_packets; - return count < tx_ring->count; -} - -/** - * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split - * @adapter: board private structure - * - * the return value indicates whether actual cleaning was done, there - * is no guarantee that everything was cleaned - **/ -static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, - int *work_done, int work_to_do) -{ - struct e1000_hw *hw = &adapter->hw; - union e1000_rx_desc_packet_split *rx_desc, *next_rxd; - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - struct e1000_ring *rx_ring = adapter->rx_ring; - struct e1000_buffer *buffer_info, *next_buffer; - struct e1000_ps_page *ps_page; - struct sk_buff *skb; - unsigned int i, j; - u32 length, staterr; - int cleaned_count = 0; - bool cleaned = 0; - unsigned int total_rx_bytes = 0, total_rx_packets = 0; - - i = rx_ring->next_to_clean; - rx_desc = E1000_RX_DESC_PS(*rx_ring, i); - staterr = le32_to_cpu(rx_desc->wb.middle.status_error); - buffer_info = &rx_ring->buffer_info[i]; - - while (staterr & E1000_RXD_STAT_DD) { - if (*work_done >= work_to_do) - break; - (*work_done)++; - skb = buffer_info->skb; - rmb(); /* read descriptor and rx_buffer_info after status DD */ - - /* in the packet split case this is header only */ - prefetch(skb->data - NET_IP_ALIGN); - - i++; - if (i == rx_ring->count) - i = 0; - next_rxd = E1000_RX_DESC_PS(*rx_ring, i); - prefetch(next_rxd); - - next_buffer = &rx_ring->buffer_info[i]; - - cleaned = 1; - cleaned_count++; - dma_unmap_single(&pdev->dev, buffer_info->dma, - adapter->rx_ps_bsize0, DMA_FROM_DEVICE); - buffer_info->dma = 0; - - /* see !EOP comment in other Rx routine */ - if (!(staterr & E1000_RXD_STAT_EOP)) - adapter->flags2 |= FLAG2_IS_DISCARDING; - - if (adapter->flags2 & FLAG2_IS_DISCARDING) { - e_dbg("Packet Split buffers didn't pick up the full " - "packet\n"); - dev_kfree_skb_irq(skb); - if (staterr & E1000_RXD_STAT_EOP) - adapter->flags2 &= ~FLAG2_IS_DISCARDING; - goto next_desc; - } - - if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { - dev_kfree_skb_irq(skb); - goto next_desc; - } - - length = le16_to_cpu(rx_desc->wb.middle.length0); - - if (!length) { - e_dbg("Last part of the packet spanning multiple " - "descriptors\n"); - dev_kfree_skb_irq(skb); - goto next_desc; - } - - /* Good Receive */ - skb_put(skb, length); - - { - /* - * this looks ugly, but it seems compiler issues make it - * more efficient than reusing j - */ - int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); - - /* - * page alloc/put takes too long and effects small packet - * throughput, so unsplit small packets and save the alloc/put - * only valid in softirq (napi) context to call kmap_* - */ - if (l1 && (l1 <= copybreak) && - ((length + l1) <= adapter->rx_ps_bsize0)) { - u8 *vaddr; - - ps_page = &buffer_info->ps_pages[0]; - - /* - * there is no documentation about how to call - * kmap_atomic, so we can't hold the mapping - * very long - */ - dma_sync_single_for_cpu(&pdev->dev, ps_page->dma, - PAGE_SIZE, DMA_FROM_DEVICE); - vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ); - memcpy(skb_tail_pointer(skb), vaddr, l1); - kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); - dma_sync_single_for_device(&pdev->dev, ps_page->dma, - PAGE_SIZE, DMA_FROM_DEVICE); - - /* remove the CRC */ - if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) - l1 -= 4; - - skb_put(skb, l1); - goto copydone; - } /* if */ - } - - for (j = 0; j < PS_PAGE_BUFFERS; j++) { - length = le16_to_cpu(rx_desc->wb.upper.length[j]); - if (!length) - break; - - ps_page = &buffer_info->ps_pages[j]; - dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, - DMA_FROM_DEVICE); - ps_page->dma = 0; - skb_fill_page_desc(skb, j, ps_page->page, 0, length); - ps_page->page = NULL; - skb->len += length; - skb->data_len += length; - skb->truesize += length; - } - - /* strip the ethernet crc, problem is we're using pages now so - * this whole operation can get a little cpu intensive - */ - if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) - pskb_trim(skb, skb->len - 4); - -copydone: - total_rx_bytes += skb->len; - total_rx_packets++; - - e1000_rx_checksum(adapter, staterr, le16_to_cpu( - rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); - - if (rx_desc->wb.upper.header_status & - cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) - adapter->rx_hdr_split++; - - e1000_receive_skb(adapter, netdev, skb, - staterr, rx_desc->wb.middle.vlan); - -next_desc: - rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); - buffer_info->skb = NULL; - - /* return some buffers to hardware, one at a time is too slow */ - if (cleaned_count >= E1000_RX_BUFFER_WRITE) { - adapter->alloc_rx_buf(adapter, cleaned_count, - GFP_ATOMIC); - cleaned_count = 0; - } - - /* use prefetched values */ - rx_desc = next_rxd; - buffer_info = next_buffer; - - staterr = le32_to_cpu(rx_desc->wb.middle.status_error); - } - rx_ring->next_to_clean = i; - - cleaned_count = e1000_desc_unused(rx_ring); - if (cleaned_count) - adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC); - - adapter->total_rx_bytes += total_rx_bytes; - adapter->total_rx_packets += total_rx_packets; - return cleaned; -} - -/** - * e1000_consume_page - helper function - **/ -static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, - u16 length) -{ - bi->page = NULL; - skb->len += length; - skb->data_len += length; - skb->truesize += length; -} - -/** - * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy - * @adapter: board private structure - * - * the return value indicates whether actual cleaning was done, there - * is no guarantee that everything was cleaned - **/ - -static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, - int *work_done, int work_to_do) -{ - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - struct e1000_ring *rx_ring = adapter->rx_ring; - struct e1000_rx_desc *rx_desc, *next_rxd; - struct e1000_buffer *buffer_info, *next_buffer; - u32 length; - unsigned int i; - int cleaned_count = 0; - bool cleaned = false; - unsigned int total_rx_bytes=0, total_rx_packets=0; - - i = rx_ring->next_to_clean; - rx_desc = E1000_RX_DESC(*rx_ring, i); - buffer_info = &rx_ring->buffer_info[i]; - - while (rx_desc->status & E1000_RXD_STAT_DD) { - struct sk_buff *skb; - u8 status; - - if (*work_done >= work_to_do) - break; - (*work_done)++; - rmb(); /* read descriptor and rx_buffer_info after status DD */ - - status = rx_desc->status; - skb = buffer_info->skb; - buffer_info->skb = NULL; - - ++i; - if (i == rx_ring->count) - i = 0; - next_rxd = E1000_RX_DESC(*rx_ring, i); - prefetch(next_rxd); - - next_buffer = &rx_ring->buffer_info[i]; - - cleaned = true; - cleaned_count++; - dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE, - DMA_FROM_DEVICE); - buffer_info->dma = 0; - - length = le16_to_cpu(rx_desc->length); - - /* errors is only valid for DD + EOP descriptors */ - if (unlikely((status & E1000_RXD_STAT_EOP) && - (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { - /* recycle both page and skb */ - buffer_info->skb = skb; - /* an error means any chain goes out the window - * too */ - if (rx_ring->rx_skb_top) - dev_kfree_skb_irq(rx_ring->rx_skb_top); - rx_ring->rx_skb_top = NULL; - goto next_desc; - } - -#define rxtop (rx_ring->rx_skb_top) - if (!(status & E1000_RXD_STAT_EOP)) { - /* this descriptor is only the beginning (or middle) */ - if (!rxtop) { - /* this is the beginning of a chain */ - rxtop = skb; - skb_fill_page_desc(rxtop, 0, buffer_info->page, - 0, length); - } else { - /* this is the middle of a chain */ - skb_fill_page_desc(rxtop, - skb_shinfo(rxtop)->nr_frags, - buffer_info->page, 0, length); - /* re-use the skb, only consumed the page */ - buffer_info->skb = skb; - } - e1000_consume_page(buffer_info, rxtop, length); - goto next_desc; - } else { - if (rxtop) { - /* end of the chain */ - skb_fill_page_desc(rxtop, - skb_shinfo(rxtop)->nr_frags, - buffer_info->page, 0, length); - /* re-use the current skb, we only consumed the - * page */ - buffer_info->skb = skb; - skb = rxtop; - rxtop = NULL; - e1000_consume_page(buffer_info, skb, length); - } else { - /* no chain, got EOP, this buf is the packet - * copybreak to save the put_page/alloc_page */ - if (length <= copybreak && - skb_tailroom(skb) >= length) { - u8 *vaddr; - vaddr = kmap_atomic(buffer_info->page, - KM_SKB_DATA_SOFTIRQ); - memcpy(skb_tail_pointer(skb), vaddr, - length); - kunmap_atomic(vaddr, - KM_SKB_DATA_SOFTIRQ); - /* re-use the page, so don't erase - * buffer_info->page */ - skb_put(skb, length); - } else { - skb_fill_page_desc(skb, 0, - buffer_info->page, 0, - length); - e1000_consume_page(buffer_info, skb, - length); - } - } - } - - /* Receive Checksum Offload XXX recompute due to CRC strip? */ - e1000_rx_checksum(adapter, - (u32)(status) | - ((u32)(rx_desc->errors) << 24), - le16_to_cpu(rx_desc->csum), skb); - - /* probably a little skewed due to removing CRC */ - total_rx_bytes += skb->len; - total_rx_packets++; - - /* eth type trans needs skb->data to point to something */ - if (!pskb_may_pull(skb, ETH_HLEN)) { - e_err("pskb_may_pull failed.\n"); - dev_kfree_skb_irq(skb); - goto next_desc; - } - - e1000_receive_skb(adapter, netdev, skb, status, - rx_desc->special); - -next_desc: - rx_desc->status = 0; - - /* return some buffers to hardware, one at a time is too slow */ - if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { - adapter->alloc_rx_buf(adapter, cleaned_count, - GFP_ATOMIC); - cleaned_count = 0; - } - - /* use prefetched values */ - rx_desc = next_rxd; - buffer_info = next_buffer; - } - rx_ring->next_to_clean = i; - - cleaned_count = e1000_desc_unused(rx_ring); - if (cleaned_count) - adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC); - - adapter->total_rx_bytes += total_rx_bytes; - adapter->total_rx_packets += total_rx_packets; - return cleaned; -} - -/** - * e1000_clean_rx_ring - Free Rx Buffers per Queue - * @adapter: board private structure - **/ -static void e1000_clean_rx_ring(struct e1000_adapter *adapter) -{ - struct e1000_ring *rx_ring = adapter->rx_ring; - struct e1000_buffer *buffer_info; - struct e1000_ps_page *ps_page; - struct pci_dev *pdev = adapter->pdev; - unsigned int i, j; - - /* Free all the Rx ring sk_buffs */ - for (i = 0; i < rx_ring->count; i++) { - buffer_info = &rx_ring->buffer_info[i]; - if (buffer_info->dma) { - if (adapter->clean_rx == e1000_clean_rx_irq) - dma_unmap_single(&pdev->dev, buffer_info->dma, - adapter->rx_buffer_len, - DMA_FROM_DEVICE); - else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) - dma_unmap_page(&pdev->dev, buffer_info->dma, - PAGE_SIZE, - DMA_FROM_DEVICE); - else if (adapter->clean_rx == e1000_clean_rx_irq_ps) - dma_unmap_single(&pdev->dev, buffer_info->dma, - adapter->rx_ps_bsize0, - DMA_FROM_DEVICE); - buffer_info->dma = 0; - } - - if (buffer_info->page) { - put_page(buffer_info->page); - buffer_info->page = NULL; - } - - if (buffer_info->skb) { - dev_kfree_skb(buffer_info->skb); - buffer_info->skb = NULL; - } - - for (j = 0; j < PS_PAGE_BUFFERS; j++) { - ps_page = &buffer_info->ps_pages[j]; - if (!ps_page->page) - break; - dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, - DMA_FROM_DEVICE); - ps_page->dma = 0; - put_page(ps_page->page); - ps_page->page = NULL; - } - } - - /* there also may be some cached data from a chained receive */ - if (rx_ring->rx_skb_top) { - dev_kfree_skb(rx_ring->rx_skb_top); - rx_ring->rx_skb_top = NULL; - } - - /* Zero out the descriptor ring */ - memset(rx_ring->desc, 0, rx_ring->size); - - rx_ring->next_to_clean = 0; - rx_ring->next_to_use = 0; - adapter->flags2 &= ~FLAG2_IS_DISCARDING; - - writel(0, adapter->hw.hw_addr + rx_ring->head); - writel(0, adapter->hw.hw_addr + rx_ring->tail); -} - -static void e1000e_downshift_workaround(struct work_struct *work) -{ - struct e1000_adapter *adapter = container_of(work, - struct e1000_adapter, downshift_task); - - if (test_bit(__E1000_DOWN, &adapter->state)) - return; - - e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); -} - -/** - * e1000_intr_msi - Interrupt Handler - * @irq: interrupt number - * @data: pointer to a network interface device structure - **/ -static irqreturn_t e1000_intr_msi(int irq, void *data) -{ - struct net_device *netdev = data; - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u32 icr = er32(ICR); - - /* - * read ICR disables interrupts using IAM - */ - - if (icr & E1000_ICR_LSC) { - hw->mac.get_link_status = 1; - /* - * ICH8 workaround-- Call gig speed drop workaround on cable - * disconnect (LSC) before accessing any PHY registers - */ - if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && - (!(er32(STATUS) & E1000_STATUS_LU))) - schedule_work(&adapter->downshift_task); - - /* - * 80003ES2LAN workaround-- For packet buffer work-around on - * link down event; disable receives here in the ISR and reset - * adapter in watchdog - */ - if (netif_carrier_ok(netdev) && - adapter->flags & FLAG_RX_NEEDS_RESTART) { - /* disable receives */ - u32 rctl = er32(RCTL); - ew32(RCTL, rctl & ~E1000_RCTL_EN); - adapter->flags |= FLAG_RX_RESTART_NOW; - } - /* guard against interrupt when we're going down */ - if (!test_bit(__E1000_DOWN, &adapter->state)) - mod_timer(&adapter->watchdog_timer, jiffies + 1); - } - - if (napi_schedule_prep(&adapter->napi)) { - adapter->total_tx_bytes = 0; - adapter->total_tx_packets = 0; - adapter->total_rx_bytes = 0; - adapter->total_rx_packets = 0; - __napi_schedule(&adapter->napi); - } - - return IRQ_HANDLED; -} - -/** - * e1000_intr - Interrupt Handler - * @irq: interrupt number - * @data: pointer to a network interface device structure - **/ -static irqreturn_t e1000_intr(int irq, void *data) -{ - struct net_device *netdev = data; - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u32 rctl, icr = er32(ICR); - - if (!icr || test_bit(__E1000_DOWN, &adapter->state)) - return IRQ_NONE; /* Not our interrupt */ - - /* - * IMS will not auto-mask if INT_ASSERTED is not set, and if it is - * not set, then the adapter didn't send an interrupt - */ - if (!(icr & E1000_ICR_INT_ASSERTED)) - return IRQ_NONE; - - /* - * Interrupt Auto-Mask...upon reading ICR, - * interrupts are masked. No need for the - * IMC write - */ - - if (icr & E1000_ICR_LSC) { - hw->mac.get_link_status = 1; - /* - * ICH8 workaround-- Call gig speed drop workaround on cable - * disconnect (LSC) before accessing any PHY registers - */ - if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && - (!(er32(STATUS) & E1000_STATUS_LU))) - schedule_work(&adapter->downshift_task); - - /* - * 80003ES2LAN workaround-- - * For packet buffer work-around on link down event; - * disable receives here in the ISR and - * reset adapter in watchdog - */ - if (netif_carrier_ok(netdev) && - (adapter->flags & FLAG_RX_NEEDS_RESTART)) { - /* disable receives */ - rctl = er32(RCTL); - ew32(RCTL, rctl & ~E1000_RCTL_EN); - adapter->flags |= FLAG_RX_RESTART_NOW; - } - /* guard against interrupt when we're going down */ - if (!test_bit(__E1000_DOWN, &adapter->state)) - mod_timer(&adapter->watchdog_timer, jiffies + 1); - } - - if (napi_schedule_prep(&adapter->napi)) { - adapter->total_tx_bytes = 0; - adapter->total_tx_packets = 0; - adapter->total_rx_bytes = 0; - adapter->total_rx_packets = 0; - __napi_schedule(&adapter->napi); - } - - return IRQ_HANDLED; -} - -static irqreturn_t e1000_msix_other(int irq, void *data) -{ - struct net_device *netdev = data; - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u32 icr = er32(ICR); - - if (!(icr & E1000_ICR_INT_ASSERTED)) { - if (!test_bit(__E1000_DOWN, &adapter->state)) - ew32(IMS, E1000_IMS_OTHER); - return IRQ_NONE; - } - - if (icr & adapter->eiac_mask) - ew32(ICS, (icr & adapter->eiac_mask)); - - if (icr & E1000_ICR_OTHER) { - if (!(icr & E1000_ICR_LSC)) - goto no_link_interrupt; - hw->mac.get_link_status = 1; - /* guard against interrupt when we're going down */ - if (!test_bit(__E1000_DOWN, &adapter->state)) - mod_timer(&adapter->watchdog_timer, jiffies + 1); - } - -no_link_interrupt: - if (!test_bit(__E1000_DOWN, &adapter->state)) - ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER); - - return IRQ_HANDLED; -} - - -static irqreturn_t e1000_intr_msix_tx(int irq, void *data) -{ - struct net_device *netdev = data; - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - struct e1000_ring *tx_ring = adapter->tx_ring; - - - adapter->total_tx_bytes = 0; - adapter->total_tx_packets = 0; - - if (!e1000_clean_tx_irq(adapter)) - /* Ring was not completely cleaned, so fire another interrupt */ - ew32(ICS, tx_ring->ims_val); - - return IRQ_HANDLED; -} - -static irqreturn_t e1000_intr_msix_rx(int irq, void *data) -{ - struct net_device *netdev = data; - struct e1000_adapter *adapter = netdev_priv(netdev); - - /* Write the ITR value calculated at the end of the - * previous interrupt. - */ - if (adapter->rx_ring->set_itr) { - writel(1000000000 / (adapter->rx_ring->itr_val * 256), - adapter->hw.hw_addr + adapter->rx_ring->itr_register); - adapter->rx_ring->set_itr = 0; - } - - if (napi_schedule_prep(&adapter->napi)) { - adapter->total_rx_bytes = 0; - adapter->total_rx_packets = 0; - __napi_schedule(&adapter->napi); - } - return IRQ_HANDLED; -} - -/** - * e1000_configure_msix - Configure MSI-X hardware - * - * e1000_configure_msix sets up the hardware to properly - * generate MSI-X interrupts. - **/ -static void e1000_configure_msix(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - struct e1000_ring *rx_ring = adapter->rx_ring; - struct e1000_ring *tx_ring = adapter->tx_ring; - int vector = 0; - u32 ctrl_ext, ivar = 0; - - adapter->eiac_mask = 0; - - /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */ - if (hw->mac.type == e1000_82574) { - u32 rfctl = er32(RFCTL); - rfctl |= E1000_RFCTL_ACK_DIS; - ew32(RFCTL, rfctl); - } - -#define E1000_IVAR_INT_ALLOC_VALID 0x8 - /* Configure Rx vector */ - rx_ring->ims_val = E1000_IMS_RXQ0; - adapter->eiac_mask |= rx_ring->ims_val; - if (rx_ring->itr_val) - writel(1000000000 / (rx_ring->itr_val * 256), - hw->hw_addr + rx_ring->itr_register); - else - writel(1, hw->hw_addr + rx_ring->itr_register); - ivar = E1000_IVAR_INT_ALLOC_VALID | vector; - - /* Configure Tx vector */ - tx_ring->ims_val = E1000_IMS_TXQ0; - vector++; - if (tx_ring->itr_val) - writel(1000000000 / (tx_ring->itr_val * 256), - hw->hw_addr + tx_ring->itr_register); - else - writel(1, hw->hw_addr + tx_ring->itr_register); - adapter->eiac_mask |= tx_ring->ims_val; - ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8); - - /* set vector for Other Causes, e.g. link changes */ - vector++; - ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16); - if (rx_ring->itr_val) - writel(1000000000 / (rx_ring->itr_val * 256), - hw->hw_addr + E1000_EITR_82574(vector)); - else - writel(1, hw->hw_addr + E1000_EITR_82574(vector)); - - /* Cause Tx interrupts on every write back */ - ivar |= (1 << 31); - - ew32(IVAR, ivar); - - /* enable MSI-X PBA support */ - ctrl_ext = er32(CTRL_EXT); - ctrl_ext |= E1000_CTRL_EXT_PBA_CLR; - - /* Auto-Mask Other interrupts upon ICR read */ -#define E1000_EIAC_MASK_82574 0x01F00000 - ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER); - ctrl_ext |= E1000_CTRL_EXT_EIAME; - ew32(CTRL_EXT, ctrl_ext); - e1e_flush(); -} - -void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter) -{ - if (adapter->msix_entries) { - pci_disable_msix(adapter->pdev); - kfree(adapter->msix_entries); - adapter->msix_entries = NULL; - } else if (adapter->flags & FLAG_MSI_ENABLED) { - pci_disable_msi(adapter->pdev); - adapter->flags &= ~FLAG_MSI_ENABLED; - } -} - -/** - * e1000e_set_interrupt_capability - set MSI or MSI-X if supported - * - * Attempt to configure interrupts using the best available - * capabilities of the hardware and kernel. - **/ -void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) -{ - int err; - int i; - - switch (adapter->int_mode) { - case E1000E_INT_MODE_MSIX: - if (adapter->flags & FLAG_HAS_MSIX) { - adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */ - adapter->msix_entries = kcalloc(adapter->num_vectors, - sizeof(struct msix_entry), - GFP_KERNEL); - if (adapter->msix_entries) { - for (i = 0; i < adapter->num_vectors; i++) - adapter->msix_entries[i].entry = i; - - err = pci_enable_msix(adapter->pdev, - adapter->msix_entries, - adapter->num_vectors); - if (err == 0) - return; - } - /* MSI-X failed, so fall through and try MSI */ - e_err("Failed to initialize MSI-X interrupts. " - "Falling back to MSI interrupts.\n"); - e1000e_reset_interrupt_capability(adapter); - } - adapter->int_mode = E1000E_INT_MODE_MSI; - /* Fall through */ - case E1000E_INT_MODE_MSI: - if (!pci_enable_msi(adapter->pdev)) { - adapter->flags |= FLAG_MSI_ENABLED; - } else { - adapter->int_mode = E1000E_INT_MODE_LEGACY; - e_err("Failed to initialize MSI interrupts. Falling " - "back to legacy interrupts.\n"); - } - /* Fall through */ - case E1000E_INT_MODE_LEGACY: - /* Don't do anything; this is the system default */ - break; - } - - /* store the number of vectors being used */ - adapter->num_vectors = 1; -} - -/** - * e1000_request_msix - Initialize MSI-X interrupts - * - * e1000_request_msix allocates MSI-X vectors and requests interrupts from the - * kernel. - **/ -static int e1000_request_msix(struct e1000_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - int err = 0, vector = 0; - - if (strlen(netdev->name) < (IFNAMSIZ - 5)) - snprintf(adapter->rx_ring->name, - sizeof(adapter->rx_ring->name) - 1, - "%s-rx-0", netdev->name); - else - memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); - err = request_irq(adapter->msix_entries[vector].vector, - e1000_intr_msix_rx, 0, adapter->rx_ring->name, - netdev); - if (err) - goto out; - adapter->rx_ring->itr_register = E1000_EITR_82574(vector); - adapter->rx_ring->itr_val = adapter->itr; - vector++; - - if (strlen(netdev->name) < (IFNAMSIZ - 5)) - snprintf(adapter->tx_ring->name, - sizeof(adapter->tx_ring->name) - 1, - "%s-tx-0", netdev->name); - else - memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); - err = request_irq(adapter->msix_entries[vector].vector, - e1000_intr_msix_tx, 0, adapter->tx_ring->name, - netdev); - if (err) - goto out; - adapter->tx_ring->itr_register = E1000_EITR_82574(vector); - adapter->tx_ring->itr_val = adapter->itr; - vector++; - - err = request_irq(adapter->msix_entries[vector].vector, - e1000_msix_other, 0, netdev->name, netdev); - if (err) - goto out; - - e1000_configure_msix(adapter); - return 0; -out: - return err; -} - -/** - * e1000_request_irq - initialize interrupts - * - * Attempts to configure interrupts using the best available - * capabilities of the hardware and kernel. - **/ -static int e1000_request_irq(struct e1000_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - int err; - - if (adapter->msix_entries) { - err = e1000_request_msix(adapter); - if (!err) - return err; - /* fall back to MSI */ - e1000e_reset_interrupt_capability(adapter); - adapter->int_mode = E1000E_INT_MODE_MSI; - e1000e_set_interrupt_capability(adapter); - } - if (adapter->flags & FLAG_MSI_ENABLED) { - err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0, - netdev->name, netdev); - if (!err) - return err; - - /* fall back to legacy interrupt */ - e1000e_reset_interrupt_capability(adapter); - adapter->int_mode = E1000E_INT_MODE_LEGACY; - } - - err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED, - netdev->name, netdev); - if (err) - e_err("Unable to allocate interrupt, Error: %d\n", err); - - return err; -} - -static void e1000_free_irq(struct e1000_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - - if (adapter->msix_entries) { - int vector = 0; - - free_irq(adapter->msix_entries[vector].vector, netdev); - vector++; - - free_irq(adapter->msix_entries[vector].vector, netdev); - vector++; - - /* Other Causes interrupt vector */ - free_irq(adapter->msix_entries[vector].vector, netdev); - return; - } - - free_irq(adapter->pdev->irq, netdev); -} - -/** - * e1000_irq_disable - Mask off interrupt generation on the NIC - **/ -static void e1000_irq_disable(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - - ew32(IMC, ~0); - if (adapter->msix_entries) - ew32(EIAC_82574, 0); - e1e_flush(); - - if (adapter->msix_entries) { - int i; - for (i = 0; i < adapter->num_vectors; i++) - synchronize_irq(adapter->msix_entries[i].vector); - } else { - synchronize_irq(adapter->pdev->irq); - } -} - -/** - * e1000_irq_enable - Enable default interrupt generation settings - **/ -static void e1000_irq_enable(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - - if (adapter->msix_entries) { - ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); - ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); - } else { - ew32(IMS, IMS_ENABLE_MASK); - } - e1e_flush(); -} - -/** - * e1000e_get_hw_control - get control of the h/w from f/w - * @adapter: address of board private structure - * - * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. - * For ASF and Pass Through versions of f/w this means that - * the driver is loaded. For AMT version (only with 82573) - * of the f/w this means that the network i/f is open. - **/ -void e1000e_get_hw_control(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 ctrl_ext; - u32 swsm; - - /* Let firmware know the driver has taken over */ - if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { - swsm = er32(SWSM); - ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD); - } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { - ctrl_ext = er32(CTRL_EXT); - ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); - } -} - -/** - * e1000e_release_hw_control - release control of the h/w to f/w - * @adapter: address of board private structure - * - * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. - * For ASF and Pass Through versions of f/w this means that the - * driver is no longer loaded. For AMT version (only with 82573) i - * of the f/w this means that the network i/f is closed. - * - **/ -void e1000e_release_hw_control(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 ctrl_ext; - u32 swsm; - - /* Let firmware taken over control of h/w */ - if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { - swsm = er32(SWSM); - ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD); - } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { - ctrl_ext = er32(CTRL_EXT); - ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); - } -} - -/** - * @e1000_alloc_ring - allocate memory for a ring structure - **/ -static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, - struct e1000_ring *ring) -{ - struct pci_dev *pdev = adapter->pdev; - - ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, - GFP_KERNEL); - if (!ring->desc) - return -ENOMEM; - - return 0; -} - -/** - * e1000e_setup_tx_resources - allocate Tx resources (Descriptors) - * @adapter: board private structure - * - * Return 0 on success, negative on failure - **/ -int e1000e_setup_tx_resources(struct e1000_adapter *adapter) -{ - struct e1000_ring *tx_ring = adapter->tx_ring; - int err = -ENOMEM, size; - - size = sizeof(struct e1000_buffer) * tx_ring->count; - tx_ring->buffer_info = vzalloc(size); - if (!tx_ring->buffer_info) - goto err; - - /* round up to nearest 4K */ - tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); - tx_ring->size = ALIGN(tx_ring->size, 4096); - - err = e1000_alloc_ring_dma(adapter, tx_ring); - if (err) - goto err; - - tx_ring->next_to_use = 0; - tx_ring->next_to_clean = 0; - - return 0; -err: - vfree(tx_ring->buffer_info); - e_err("Unable to allocate memory for the transmit descriptor ring\n"); - return err; -} - -/** - * e1000e_setup_rx_resources - allocate Rx resources (Descriptors) - * @adapter: board private structure - * - * Returns 0 on success, negative on failure - **/ -int e1000e_setup_rx_resources(struct e1000_adapter *adapter) -{ - struct e1000_ring *rx_ring = adapter->rx_ring; - struct e1000_buffer *buffer_info; - int i, size, desc_len, err = -ENOMEM; - - size = sizeof(struct e1000_buffer) * rx_ring->count; - rx_ring->buffer_info = vzalloc(size); - if (!rx_ring->buffer_info) - goto err; - - for (i = 0; i < rx_ring->count; i++) { - buffer_info = &rx_ring->buffer_info[i]; - buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS, - sizeof(struct e1000_ps_page), - GFP_KERNEL); - if (!buffer_info->ps_pages) - goto err_pages; - } - - desc_len = sizeof(union e1000_rx_desc_packet_split); - - /* Round up to nearest 4K */ - rx_ring->size = rx_ring->count * desc_len; - rx_ring->size = ALIGN(rx_ring->size, 4096); - - err = e1000_alloc_ring_dma(adapter, rx_ring); - if (err) - goto err_pages; - - rx_ring->next_to_clean = 0; - rx_ring->next_to_use = 0; - rx_ring->rx_skb_top = NULL; - - return 0; - -err_pages: - for (i = 0; i < rx_ring->count; i++) { - buffer_info = &rx_ring->buffer_info[i]; - kfree(buffer_info->ps_pages); - } -err: - vfree(rx_ring->buffer_info); - e_err("Unable to allocate memory for the receive descriptor ring\n"); - return err; -} - -/** - * e1000_clean_tx_ring - Free Tx Buffers - * @adapter: board private structure - **/ -static void e1000_clean_tx_ring(struct e1000_adapter *adapter) -{ - struct e1000_ring *tx_ring = adapter->tx_ring; - struct e1000_buffer *buffer_info; - unsigned long size; - unsigned int i; - - for (i = 0; i < tx_ring->count; i++) { - buffer_info = &tx_ring->buffer_info[i]; - e1000_put_txbuf(adapter, buffer_info); - } - - size = sizeof(struct e1000_buffer) * tx_ring->count; - memset(tx_ring->buffer_info, 0, size); - - memset(tx_ring->desc, 0, tx_ring->size); - - tx_ring->next_to_use = 0; - tx_ring->next_to_clean = 0; - - writel(0, adapter->hw.hw_addr + tx_ring->head); - writel(0, adapter->hw.hw_addr + tx_ring->tail); -} - -/** - * e1000e_free_tx_resources - Free Tx Resources per Queue - * @adapter: board private structure - * - * Free all transmit software resources - **/ -void e1000e_free_tx_resources(struct e1000_adapter *adapter) -{ - struct pci_dev *pdev = adapter->pdev; - struct e1000_ring *tx_ring = adapter->tx_ring; - - e1000_clean_tx_ring(adapter); - - vfree(tx_ring->buffer_info); - tx_ring->buffer_info = NULL; - - dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, - tx_ring->dma); - tx_ring->desc = NULL; -} - -/** - * e1000e_free_rx_resources - Free Rx Resources - * @adapter: board private structure - * - * Free all receive software resources - **/ - -void e1000e_free_rx_resources(struct e1000_adapter *adapter) -{ - struct pci_dev *pdev = adapter->pdev; - struct e1000_ring *rx_ring = adapter->rx_ring; - int i; - - e1000_clean_rx_ring(adapter); - - for (i = 0; i < rx_ring->count; i++) - kfree(rx_ring->buffer_info[i].ps_pages); - - vfree(rx_ring->buffer_info); - rx_ring->buffer_info = NULL; - - dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, - rx_ring->dma); - rx_ring->desc = NULL; -} - -/** - * e1000_update_itr - update the dynamic ITR value based on statistics - * @adapter: pointer to adapter - * @itr_setting: current adapter->itr - * @packets: the number of packets during this measurement interval - * @bytes: the number of bytes during this measurement interval - * - * Stores a new ITR value based on packets and byte - * counts during the last interrupt. The advantage of per interrupt - * computation is faster updates and more accurate ITR for the current - * traffic pattern. Constants in this function were computed - * based on theoretical maximum wire speed and thresholds were set based - * on testing data as well as attempting to minimize response time - * while increasing bulk throughput. This functionality is controlled - * by the InterruptThrottleRate module parameter. - **/ -static unsigned int e1000_update_itr(struct e1000_adapter *adapter, - u16 itr_setting, int packets, - int bytes) -{ - unsigned int retval = itr_setting; - - if (packets == 0) - goto update_itr_done; - - switch (itr_setting) { - case lowest_latency: - /* handle TSO and jumbo frames */ - if (bytes/packets > 8000) - retval = bulk_latency; - else if ((packets < 5) && (bytes > 512)) - retval = low_latency; - break; - case low_latency: /* 50 usec aka 20000 ints/s */ - if (bytes > 10000) { - /* this if handles the TSO accounting */ - if (bytes/packets > 8000) - retval = bulk_latency; - else if ((packets < 10) || ((bytes/packets) > 1200)) - retval = bulk_latency; - else if ((packets > 35)) - retval = lowest_latency; - } else if (bytes/packets > 2000) { - retval = bulk_latency; - } else if (packets <= 2 && bytes < 512) { - retval = lowest_latency; - } - break; - case bulk_latency: /* 250 usec aka 4000 ints/s */ - if (bytes > 25000) { - if (packets > 35) - retval = low_latency; - } else if (bytes < 6000) { - retval = low_latency; - } - break; - } - -update_itr_done: - return retval; -} - -static void e1000_set_itr(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u16 current_itr; - u32 new_itr = adapter->itr; - - /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ - if (adapter->link_speed != SPEED_1000) { - current_itr = 0; - new_itr = 4000; - goto set_itr_now; - } - - if (adapter->flags2 & FLAG2_DISABLE_AIM) { - new_itr = 0; - goto set_itr_now; - } - - adapter->tx_itr = e1000_update_itr(adapter, - adapter->tx_itr, - adapter->total_tx_packets, - adapter->total_tx_bytes); - /* conservative mode (itr 3) eliminates the lowest_latency setting */ - if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) - adapter->tx_itr = low_latency; - - adapter->rx_itr = e1000_update_itr(adapter, - adapter->rx_itr, - adapter->total_rx_packets, - adapter->total_rx_bytes); - /* conservative mode (itr 3) eliminates the lowest_latency setting */ - if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) - adapter->rx_itr = low_latency; - - current_itr = max(adapter->rx_itr, adapter->tx_itr); - - switch (current_itr) { - /* counts and packets in update_itr are dependent on these numbers */ - case lowest_latency: - new_itr = 70000; - break; - case low_latency: - new_itr = 20000; /* aka hwitr = ~200 */ - break; - case bulk_latency: - new_itr = 4000; - break; - default: - break; - } - -set_itr_now: - if (new_itr != adapter->itr) { - /* - * this attempts to bias the interrupt rate towards Bulk - * by adding intermediate steps when interrupt rate is - * increasing - */ - new_itr = new_itr > adapter->itr ? - min(adapter->itr + (new_itr >> 2), new_itr) : - new_itr; - adapter->itr = new_itr; - adapter->rx_ring->itr_val = new_itr; - if (adapter->msix_entries) - adapter->rx_ring->set_itr = 1; - else - if (new_itr) - ew32(ITR, 1000000000 / (new_itr * 256)); - else - ew32(ITR, 0); - } -} - -/** - * e1000_alloc_queues - Allocate memory for all rings - * @adapter: board private structure to initialize - **/ -static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter) -{ - adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); - if (!adapter->tx_ring) - goto err; - - adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); - if (!adapter->rx_ring) - goto err; - - return 0; -err: - e_err("Unable to allocate memory for queues\n"); - kfree(adapter->rx_ring); - kfree(adapter->tx_ring); - return -ENOMEM; -} - -/** - * e1000_clean - NAPI Rx polling callback - * @napi: struct associated with this polling callback - * @budget: amount of packets driver is allowed to process this poll - **/ -static int e1000_clean(struct napi_struct *napi, int budget) -{ - struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); - struct e1000_hw *hw = &adapter->hw; - struct net_device *poll_dev = adapter->netdev; - int tx_cleaned = 1, work_done = 0; - - adapter = netdev_priv(poll_dev); - - if (adapter->msix_entries && - !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) - goto clean_rx; - - tx_cleaned = e1000_clean_tx_irq(adapter); - -clean_rx: - adapter->clean_rx(adapter, &work_done, budget); - - if (!tx_cleaned) - work_done = budget; - - /* If budget not fully consumed, exit the polling mode */ - if (work_done < budget) { - if (adapter->itr_setting & 3) - e1000_set_itr(adapter); - napi_complete(napi); - if (!test_bit(__E1000_DOWN, &adapter->state)) { - if (adapter->msix_entries) - ew32(IMS, adapter->rx_ring->ims_val); - else - e1000_irq_enable(adapter); - } - } - - return work_done; -} - -static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u32 vfta, index; - - /* don't update vlan cookie if already programmed */ - if ((adapter->hw.mng_cookie.status & - E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && - (vid == adapter->mng_vlan_id)) - return; - - /* add VID to filter table */ - if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { - index = (vid >> 5) & 0x7F; - vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); - vfta |= (1 << (vid & 0x1F)); - hw->mac.ops.write_vfta(hw, index, vfta); - } - - set_bit(vid, adapter->active_vlans); -} - -static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u32 vfta, index; - - if ((adapter->hw.mng_cookie.status & - E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && - (vid == adapter->mng_vlan_id)) { - /* release control to f/w */ - e1000e_release_hw_control(adapter); - return; - } - - /* remove VID from filter table */ - if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { - index = (vid >> 5) & 0x7F; - vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); - vfta &= ~(1 << (vid & 0x1F)); - hw->mac.ops.write_vfta(hw, index, vfta); - } - - clear_bit(vid, adapter->active_vlans); -} - -/** - * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering - * @adapter: board private structure to initialize - **/ -static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct e1000_hw *hw = &adapter->hw; - u32 rctl; - - if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { - /* disable VLAN receive filtering */ - rctl = er32(RCTL); - rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN); - ew32(RCTL, rctl); - - if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) { - e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); - adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; - } - } -} - -/** - * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering - * @adapter: board private structure to initialize - **/ -static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 rctl; - - if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { - /* enable VLAN receive filtering */ - rctl = er32(RCTL); - rctl |= E1000_RCTL_VFE; - rctl &= ~E1000_RCTL_CFIEN; - ew32(RCTL, rctl); - } -} - -/** - * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping - * @adapter: board private structure to initialize - **/ -static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 ctrl; - - /* disable VLAN tag insert/strip */ - ctrl = er32(CTRL); - ctrl &= ~E1000_CTRL_VME; - ew32(CTRL, ctrl); -} - -/** - * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping - * @adapter: board private structure to initialize - **/ -static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 ctrl; - - /* enable VLAN tag insert/strip */ - ctrl = er32(CTRL); - ctrl |= E1000_CTRL_VME; - ew32(CTRL, ctrl); -} - -static void e1000_update_mng_vlan(struct e1000_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - u16 vid = adapter->hw.mng_cookie.vlan_id; - u16 old_vid = adapter->mng_vlan_id; - - if (adapter->hw.mng_cookie.status & - E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { - e1000_vlan_rx_add_vid(netdev, vid); - adapter->mng_vlan_id = vid; - } - - if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid)) - e1000_vlan_rx_kill_vid(netdev, old_vid); -} - -static void e1000_restore_vlan(struct e1000_adapter *adapter) -{ - u16 vid; - - e1000_vlan_rx_add_vid(adapter->netdev, 0); - - for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) - e1000_vlan_rx_add_vid(adapter->netdev, vid); -} - -static void e1000_init_manageability_pt(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 manc, manc2h, mdef, i, j; - - if (!(adapter->flags & FLAG_MNG_PT_ENABLED)) - return; - - manc = er32(MANC); - - /* - * enable receiving management packets to the host. this will probably - * generate destination unreachable messages from the host OS, but - * the packets will be handled on SMBUS - */ - manc |= E1000_MANC_EN_MNG2HOST; - manc2h = er32(MANC2H); - - switch (hw->mac.type) { - default: - manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664); - break; - case e1000_82574: - case e1000_82583: - /* - * Check if IPMI pass-through decision filter already exists; - * if so, enable it. - */ - for (i = 0, j = 0; i < 8; i++) { - mdef = er32(MDEF(i)); - - /* Ignore filters with anything other than IPMI ports */ - if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) - continue; - - /* Enable this decision filter in MANC2H */ - if (mdef) - manc2h |= (1 << i); - - j |= mdef; - } - - if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) - break; - - /* Create new decision filter in an empty filter */ - for (i = 0, j = 0; i < 8; i++) - if (er32(MDEF(i)) == 0) { - ew32(MDEF(i), (E1000_MDEF_PORT_623 | - E1000_MDEF_PORT_664)); - manc2h |= (1 << 1); - j++; - break; - } - - if (!j) - e_warn("Unable to create IPMI pass-through filter\n"); - break; - } - - ew32(MANC2H, manc2h); - ew32(MANC, manc); -} - -/** - * e1000_configure_tx - Configure Transmit Unit after Reset - * @adapter: board private structure - * - * Configure the Tx unit of the MAC after a reset. - **/ -static void e1000_configure_tx(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - struct e1000_ring *tx_ring = adapter->tx_ring; - u64 tdba; - u32 tdlen, tctl, tipg, tarc; - u32 ipgr1, ipgr2; - - /* Setup the HW Tx Head and Tail descriptor pointers */ - tdba = tx_ring->dma; - tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); - ew32(TDBAL, (tdba & DMA_BIT_MASK(32))); - ew32(TDBAH, (tdba >> 32)); - ew32(TDLEN, tdlen); - ew32(TDH, 0); - ew32(TDT, 0); - tx_ring->head = E1000_TDH; - tx_ring->tail = E1000_TDT; - - /* Set the default values for the Tx Inter Packet Gap timer */ - tipg = DEFAULT_82543_TIPG_IPGT_COPPER; /* 8 */ - ipgr1 = DEFAULT_82543_TIPG_IPGR1; /* 8 */ - ipgr2 = DEFAULT_82543_TIPG_IPGR2; /* 6 */ - - if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN) - ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /* 7 */ - - tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; - tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; - ew32(TIPG, tipg); - - /* Set the Tx Interrupt Delay register */ - ew32(TIDV, adapter->tx_int_delay); - /* Tx irq moderation */ - ew32(TADV, adapter->tx_abs_int_delay); - - if (adapter->flags2 & FLAG2_DMA_BURST) { - u32 txdctl = er32(TXDCTL(0)); - txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH | - E1000_TXDCTL_WTHRESH); - /* - * set up some performance related parameters to encourage the - * hardware to use the bus more efficiently in bursts, depends - * on the tx_int_delay to be enabled, - * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time - * hthresh = 1 ==> prefetch when one or more available - * pthresh = 0x1f ==> prefetch if internal cache 31 or less - * BEWARE: this seems to work but should be considered first if - * there are Tx hangs or other Tx related bugs - */ - txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE; - ew32(TXDCTL(0), txdctl); - /* erratum work around: set txdctl the same for both queues */ - ew32(TXDCTL(1), txdctl); - } - - /* Program the Transmit Control Register */ - tctl = er32(TCTL); - tctl &= ~E1000_TCTL_CT; - tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | - (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); - - if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { - tarc = er32(TARC(0)); - /* - * set the speed mode bit, we'll clear it if we're not at - * gigabit link later - */ -#define SPEED_MODE_BIT (1 << 21) - tarc |= SPEED_MODE_BIT; - ew32(TARC(0), tarc); - } - - /* errata: program both queues to unweighted RR */ - if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) { - tarc = er32(TARC(0)); - tarc |= 1; - ew32(TARC(0), tarc); - tarc = er32(TARC(1)); - tarc |= 1; - ew32(TARC(1), tarc); - } - - /* Setup Transmit Descriptor Settings for eop descriptor */ - adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; - - /* only set IDE if we are delaying interrupts using the timers */ - if (adapter->tx_int_delay) - adapter->txd_cmd |= E1000_TXD_CMD_IDE; - - /* enable Report Status bit */ - adapter->txd_cmd |= E1000_TXD_CMD_RS; - - ew32(TCTL, tctl); - - e1000e_config_collision_dist(hw); -} - -/** - * e1000_setup_rctl - configure the receive control registers - * @adapter: Board private structure - **/ -#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ - (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) -static void e1000_setup_rctl(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 rctl, rfctl; - u32 pages = 0; - - /* Workaround Si errata on 82579 - configure jumbo frame flow */ - if (hw->mac.type == e1000_pch2lan) { - s32 ret_val; - - if (adapter->netdev->mtu > ETH_DATA_LEN) - ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); - else - ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); - - if (ret_val) - e_dbg("failed to enable jumbo frame workaround mode\n"); - } - - /* Program MC offset vector base */ - rctl = er32(RCTL); - rctl &= ~(3 << E1000_RCTL_MO_SHIFT); - rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | - E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | - (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); - - /* Do not Store bad packets */ - rctl &= ~E1000_RCTL_SBP; - - /* Enable Long Packet receive */ - if (adapter->netdev->mtu <= ETH_DATA_LEN) - rctl &= ~E1000_RCTL_LPE; - else - rctl |= E1000_RCTL_LPE; - - /* Some systems expect that the CRC is included in SMBUS traffic. The - * hardware strips the CRC before sending to both SMBUS (BMC) and to - * host memory when this is enabled - */ - if (adapter->flags2 & FLAG2_CRC_STRIPPING) - rctl |= E1000_RCTL_SECRC; - - /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */ - if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) { - u16 phy_data; - - e1e_rphy(hw, PHY_REG(770, 26), &phy_data); - phy_data &= 0xfff8; - phy_data |= (1 << 2); - e1e_wphy(hw, PHY_REG(770, 26), phy_data); - - e1e_rphy(hw, 22, &phy_data); - phy_data &= 0x0fff; - phy_data |= (1 << 14); - e1e_wphy(hw, 0x10, 0x2823); - e1e_wphy(hw, 0x11, 0x0003); - e1e_wphy(hw, 22, phy_data); - } - - /* Setup buffer sizes */ - rctl &= ~E1000_RCTL_SZ_4096; - rctl |= E1000_RCTL_BSEX; - switch (adapter->rx_buffer_len) { - case 2048: - default: - rctl |= E1000_RCTL_SZ_2048; - rctl &= ~E1000_RCTL_BSEX; - break; - case 4096: - rctl |= E1000_RCTL_SZ_4096; - break; - case 8192: - rctl |= E1000_RCTL_SZ_8192; - break; - case 16384: - rctl |= E1000_RCTL_SZ_16384; - break; - } - - /* - * 82571 and greater support packet-split where the protocol - * header is placed in skb->data and the packet data is - * placed in pages hanging off of skb_shinfo(skb)->nr_frags. - * In the case of a non-split, skb->data is linearly filled, - * followed by the page buffers. Therefore, skb->data is - * sized to hold the largest protocol header. - * - * allocations using alloc_page take too long for regular MTU - * so only enable packet split for jumbo frames - * - * Using pages when the page size is greater than 16k wastes - * a lot of memory, since we allocate 3 pages at all times - * per packet. - */ - pages = PAGE_USE_COUNT(adapter->netdev->mtu); - if (!(adapter->flags & FLAG_HAS_ERT) && (pages <= 3) && - (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) - adapter->rx_ps_pages = pages; - else - adapter->rx_ps_pages = 0; - - if (adapter->rx_ps_pages) { - u32 psrctl = 0; - - /* Configure extra packet-split registers */ - rfctl = er32(RFCTL); - rfctl |= E1000_RFCTL_EXTEN; - /* - * disable packet split support for IPv6 extension headers, - * because some malformed IPv6 headers can hang the Rx - */ - rfctl |= (E1000_RFCTL_IPV6_EX_DIS | - E1000_RFCTL_NEW_IPV6_EXT_DIS); - - ew32(RFCTL, rfctl); - - /* Enable Packet split descriptors */ - rctl |= E1000_RCTL_DTYP_PS; - - psrctl |= adapter->rx_ps_bsize0 >> - E1000_PSRCTL_BSIZE0_SHIFT; - - switch (adapter->rx_ps_pages) { - case 3: - psrctl |= PAGE_SIZE << - E1000_PSRCTL_BSIZE3_SHIFT; - case 2: - psrctl |= PAGE_SIZE << - E1000_PSRCTL_BSIZE2_SHIFT; - case 1: - psrctl |= PAGE_SIZE >> - E1000_PSRCTL_BSIZE1_SHIFT; - break; - } - - ew32(PSRCTL, psrctl); - } - - ew32(RCTL, rctl); - /* just started the receive unit, no need to restart */ - adapter->flags &= ~FLAG_RX_RESTART_NOW; -} - -/** - * e1000_configure_rx - Configure Receive Unit after Reset - * @adapter: board private structure - * - * Configure the Rx unit of the MAC after a reset. - **/ -static void e1000_configure_rx(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - struct e1000_ring *rx_ring = adapter->rx_ring; - u64 rdba; - u32 rdlen, rctl, rxcsum, ctrl_ext; - - if (adapter->rx_ps_pages) { - /* this is a 32 byte descriptor */ - rdlen = rx_ring->count * - sizeof(union e1000_rx_desc_packet_split); - adapter->clean_rx = e1000_clean_rx_irq_ps; - adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; - } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { - rdlen = rx_ring->count * sizeof(struct e1000_rx_desc); - adapter->clean_rx = e1000_clean_jumbo_rx_irq; - adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; - } else { - rdlen = rx_ring->count * sizeof(struct e1000_rx_desc); - adapter->clean_rx = e1000_clean_rx_irq; - adapter->alloc_rx_buf = e1000_alloc_rx_buffers; - } - - /* disable receives while setting up the descriptors */ - rctl = er32(RCTL); - ew32(RCTL, rctl & ~E1000_RCTL_EN); - e1e_flush(); - usleep_range(10000, 20000); - - if (adapter->flags2 & FLAG2_DMA_BURST) { - /* - * set the writeback threshold (only takes effect if the RDTR - * is set). set GRAN=1 and write back up to 0x4 worth, and - * enable prefetching of 0x20 Rx descriptors - * granularity = 01 - * wthresh = 04, - * hthresh = 04, - * pthresh = 0x20 - */ - ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE); - ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE); - - /* - * override the delay timers for enabling bursting, only if - * the value was not set by the user via module options - */ - if (adapter->rx_int_delay == DEFAULT_RDTR) - adapter->rx_int_delay = BURST_RDTR; - if (adapter->rx_abs_int_delay == DEFAULT_RADV) - adapter->rx_abs_int_delay = BURST_RADV; - } - - /* set the Receive Delay Timer Register */ - ew32(RDTR, adapter->rx_int_delay); - - /* irq moderation */ - ew32(RADV, adapter->rx_abs_int_delay); - if ((adapter->itr_setting != 0) && (adapter->itr != 0)) - ew32(ITR, 1000000000 / (adapter->itr * 256)); - - ctrl_ext = er32(CTRL_EXT); - /* Auto-Mask interrupts upon ICR access */ - ctrl_ext |= E1000_CTRL_EXT_IAME; - ew32(IAM, 0xffffffff); - ew32(CTRL_EXT, ctrl_ext); - e1e_flush(); - - /* - * Setup the HW Rx Head and Tail Descriptor Pointers and - * the Base and Length of the Rx Descriptor Ring - */ - rdba = rx_ring->dma; - ew32(RDBAL, (rdba & DMA_BIT_MASK(32))); - ew32(RDBAH, (rdba >> 32)); - ew32(RDLEN, rdlen); - ew32(RDH, 0); - ew32(RDT, 0); - rx_ring->head = E1000_RDH; - rx_ring->tail = E1000_RDT; - - /* Enable Receive Checksum Offload for TCP and UDP */ - rxcsum = er32(RXCSUM); - if (adapter->flags & FLAG_RX_CSUM_ENABLED) { - rxcsum |= E1000_RXCSUM_TUOFL; - - /* - * IPv4 payload checksum for UDP fragments must be - * used in conjunction with packet-split. - */ - if (adapter->rx_ps_pages) - rxcsum |= E1000_RXCSUM_IPPCSE; - } else { - rxcsum &= ~E1000_RXCSUM_TUOFL; - /* no need to clear IPPCSE as it defaults to 0 */ - } - ew32(RXCSUM, rxcsum); - - /* - * Enable early receives on supported devices, only takes effect when - * packet size is equal or larger than the specified value (in 8 byte - * units), e.g. using jumbo frames when setting to E1000_ERT_2048 - */ - if ((adapter->flags & FLAG_HAS_ERT) || - (adapter->hw.mac.type == e1000_pch2lan)) { - if (adapter->netdev->mtu > ETH_DATA_LEN) { - u32 rxdctl = er32(RXDCTL(0)); - ew32(RXDCTL(0), rxdctl | 0x3); - if (adapter->flags & FLAG_HAS_ERT) - ew32(ERT, E1000_ERT_2048 | (1 << 13)); - /* - * With jumbo frames and early-receive enabled, - * excessive C-state transition latencies result in - * dropped transactions. - */ - pm_qos_update_request(&adapter->netdev->pm_qos_req, 55); - } else { - pm_qos_update_request(&adapter->netdev->pm_qos_req, - PM_QOS_DEFAULT_VALUE); - } - } - - /* Enable Receives */ - ew32(RCTL, rctl); -} - -/** - * e1000_update_mc_addr_list - Update Multicast addresses - * @hw: pointer to the HW structure - * @mc_addr_list: array of multicast addresses to program - * @mc_addr_count: number of multicast addresses to program - * - * Updates the Multicast Table Array. - * The caller must have a packed mc_addr_list of multicast addresses. - **/ -static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, - u32 mc_addr_count) -{ - hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count); -} - -/** - * e1000_set_multi - Multicast and Promiscuous mode set - * @netdev: network interface device structure - * - * The set_multi entry point is called whenever the multicast address - * list or the network interface flags are updated. This routine is - * responsible for configuring the hardware for proper multicast, - * promiscuous mode, and all-multi behavior. - **/ -static void e1000_set_multi(struct net_device *netdev) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - struct netdev_hw_addr *ha; - u8 *mta_list; - u32 rctl; - - /* Check for Promiscuous and All Multicast modes */ - - rctl = er32(RCTL); - - if (netdev->flags & IFF_PROMISC) { - rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); - rctl &= ~E1000_RCTL_VFE; - /* Do not hardware filter VLANs in promisc mode */ - e1000e_vlan_filter_disable(adapter); - } else { - if (netdev->flags & IFF_ALLMULTI) { - rctl |= E1000_RCTL_MPE; - rctl &= ~E1000_RCTL_UPE; - } else { - rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); - } - e1000e_vlan_filter_enable(adapter); - } - - ew32(RCTL, rctl); - - if (!netdev_mc_empty(netdev)) { - int i = 0; - - mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); - if (!mta_list) - return; - - /* prepare a packed array of only addresses. */ - netdev_for_each_mc_addr(ha, netdev) - memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); - - e1000_update_mc_addr_list(hw, mta_list, i); - kfree(mta_list); - } else { - /* - * if we're called from probe, we might not have - * anything to do here, so clear out the list - */ - e1000_update_mc_addr_list(hw, NULL, 0); - } - - if (netdev->features & NETIF_F_HW_VLAN_RX) - e1000e_vlan_strip_enable(adapter); - else - e1000e_vlan_strip_disable(adapter); -} - -/** - * e1000_configure - configure the hardware for Rx and Tx - * @adapter: private board structure - **/ -static void e1000_configure(struct e1000_adapter *adapter) -{ - e1000_set_multi(adapter->netdev); - - e1000_restore_vlan(adapter); - e1000_init_manageability_pt(adapter); - - e1000_configure_tx(adapter); - e1000_setup_rctl(adapter); - e1000_configure_rx(adapter); - adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring), - GFP_KERNEL); -} - -/** - * e1000e_power_up_phy - restore link in case the phy was powered down - * @adapter: address of board private structure - * - * The phy may be powered down to save power and turn off link when the - * driver is unloaded and wake on lan is not enabled (among others) - * *** this routine MUST be followed by a call to e1000e_reset *** - **/ -void e1000e_power_up_phy(struct e1000_adapter *adapter) -{ - if (adapter->hw.phy.ops.power_up) - adapter->hw.phy.ops.power_up(&adapter->hw); - - adapter->hw.mac.ops.setup_link(&adapter->hw); -} - -/** - * e1000_power_down_phy - Power down the PHY - * - * Power down the PHY so no link is implied when interface is down. - * The PHY cannot be powered down if management or WoL is active. - */ -static void e1000_power_down_phy(struct e1000_adapter *adapter) -{ - /* WoL is enabled */ - if (adapter->wol) - return; - - if (adapter->hw.phy.ops.power_down) - adapter->hw.phy.ops.power_down(&adapter->hw); -} - -/** - * e1000e_reset - bring the hardware into a known good state - * - * This function boots the hardware and enables some settings that - * require a configuration cycle of the hardware - those cannot be - * set/changed during runtime. After reset the device needs to be - * properly configured for Rx, Tx etc. - */ -void e1000e_reset(struct e1000_adapter *adapter) -{ - struct e1000_mac_info *mac = &adapter->hw.mac; - struct e1000_fc_info *fc = &adapter->hw.fc; - struct e1000_hw *hw = &adapter->hw; - u32 tx_space, min_tx_space, min_rx_space; - u32 pba = adapter->pba; - u16 hwm; - - /* reset Packet Buffer Allocation to default */ - ew32(PBA, pba); - - if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { - /* - * To maintain wire speed transmits, the Tx FIFO should be - * large enough to accommodate two full transmit packets, - * rounded up to the next 1KB and expressed in KB. Likewise, - * the Rx FIFO should be large enough to accommodate at least - * one full receive packet and is similarly rounded up and - * expressed in KB. - */ - pba = er32(PBA); - /* upper 16 bits has Tx packet buffer allocation size in KB */ - tx_space = pba >> 16; - /* lower 16 bits has Rx packet buffer allocation size in KB */ - pba &= 0xffff; - /* - * the Tx fifo also stores 16 bytes of information about the Tx - * but don't include ethernet FCS because hardware appends it - */ - min_tx_space = (adapter->max_frame_size + - sizeof(struct e1000_tx_desc) - - ETH_FCS_LEN) * 2; - min_tx_space = ALIGN(min_tx_space, 1024); - min_tx_space >>= 10; - /* software strips receive CRC, so leave room for it */ - min_rx_space = adapter->max_frame_size; - min_rx_space = ALIGN(min_rx_space, 1024); - min_rx_space >>= 10; - - /* - * If current Tx allocation is less than the min Tx FIFO size, - * and the min Tx FIFO size is less than the current Rx FIFO - * allocation, take space away from current Rx allocation - */ - if ((tx_space < min_tx_space) && - ((min_tx_space - tx_space) < pba)) { - pba -= min_tx_space - tx_space; - - /* - * if short on Rx space, Rx wins and must trump Tx - * adjustment or use Early Receive if available - */ - if ((pba < min_rx_space) && - (!(adapter->flags & FLAG_HAS_ERT))) - /* ERT enabled in e1000_configure_rx */ - pba = min_rx_space; - } - - ew32(PBA, pba); - } - - /* - * flow control settings - * - * The high water mark must be low enough to fit one full frame - * (or the size used for early receive) above it in the Rx FIFO. - * Set it to the lower of: - * - 90% of the Rx FIFO size, and - * - the full Rx FIFO size minus the early receive size (for parts - * with ERT support assuming ERT set to E1000_ERT_2048), or - * - the full Rx FIFO size minus one full frame - */ - if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) - fc->pause_time = 0xFFFF; - else - fc->pause_time = E1000_FC_PAUSE_TIME; - fc->send_xon = 1; - fc->current_mode = fc->requested_mode; - - switch (hw->mac.type) { - default: - if ((adapter->flags & FLAG_HAS_ERT) && - (adapter->netdev->mtu > ETH_DATA_LEN)) - hwm = min(((pba << 10) * 9 / 10), - ((pba << 10) - (E1000_ERT_2048 << 3))); - else - hwm = min(((pba << 10) * 9 / 10), - ((pba << 10) - adapter->max_frame_size)); - - fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ - fc->low_water = fc->high_water - 8; - break; - case e1000_pchlan: - /* - * Workaround PCH LOM adapter hangs with certain network - * loads. If hangs persist, try disabling Tx flow control. - */ - if (adapter->netdev->mtu > ETH_DATA_LEN) { - fc->high_water = 0x3500; - fc->low_water = 0x1500; - } else { - fc->high_water = 0x5000; - fc->low_water = 0x3000; - } - fc->refresh_time = 0x1000; - break; - case e1000_pch2lan: - fc->high_water = 0x05C20; - fc->low_water = 0x05048; - fc->pause_time = 0x0650; - fc->refresh_time = 0x0400; - if (adapter->netdev->mtu > ETH_DATA_LEN) { - pba = 14; - ew32(PBA, pba); - } - break; - } - - /* - * Disable Adaptive Interrupt Moderation if 2 full packets cannot - * fit in receive buffer and early-receive not supported. - */ - if (adapter->itr_setting & 0x3) { - if (((adapter->max_frame_size * 2) > (pba << 10)) && - !(adapter->flags & FLAG_HAS_ERT)) { - if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) { - dev_info(&adapter->pdev->dev, - "Interrupt Throttle Rate turned off\n"); - adapter->flags2 |= FLAG2_DISABLE_AIM; - ew32(ITR, 0); - } - } else if (adapter->flags2 & FLAG2_DISABLE_AIM) { - dev_info(&adapter->pdev->dev, - "Interrupt Throttle Rate turned on\n"); - adapter->flags2 &= ~FLAG2_DISABLE_AIM; - adapter->itr = 20000; - ew32(ITR, 1000000000 / (adapter->itr * 256)); - } - } - - /* Allow time for pending master requests to run */ - mac->ops.reset_hw(hw); - - /* - * For parts with AMT enabled, let the firmware know - * that the network interface is in control - */ - if (adapter->flags & FLAG_HAS_AMT) - e1000e_get_hw_control(adapter); - - ew32(WUC, 0); - - if (mac->ops.init_hw(hw)) - e_err("Hardware Error\n"); - - e1000_update_mng_vlan(adapter); - - /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ - ew32(VET, ETH_P_8021Q); - - e1000e_reset_adaptive(hw); - - if (!netif_running(adapter->netdev) && - !test_bit(__E1000_TESTING, &adapter->state)) { - e1000_power_down_phy(adapter); - return; - } - - e1000_get_phy_info(hw); - - if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && - !(adapter->flags & FLAG_SMART_POWER_DOWN)) { - u16 phy_data = 0; - /* - * speed up time to link by disabling smart power down, ignore - * the return value of this function because there is nothing - * different we would do if it failed - */ - e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); - phy_data &= ~IGP02E1000_PM_SPD; - e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); - } -} - -int e1000e_up(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - - /* hardware has been reset, we need to reload some things */ - e1000_configure(adapter); - - clear_bit(__E1000_DOWN, &adapter->state); - - napi_enable(&adapter->napi); - if (adapter->msix_entries) - e1000_configure_msix(adapter); - e1000_irq_enable(adapter); - - netif_start_queue(adapter->netdev); - - /* fire a link change interrupt to start the watchdog */ - if (adapter->msix_entries) - ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); - else - ew32(ICS, E1000_ICS_LSC); - - return 0; -} - -static void e1000e_flush_descriptors(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - - if (!(adapter->flags2 & FLAG2_DMA_BURST)) - return; - - /* flush pending descriptor writebacks to memory */ - ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); - ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); - - /* execute the writes immediately */ - e1e_flush(); -} - -static void e1000e_update_stats(struct e1000_adapter *adapter); - -void e1000e_down(struct e1000_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct e1000_hw *hw = &adapter->hw; - u32 tctl, rctl; - - /* - * signal that we're down so the interrupt handler does not - * reschedule our watchdog timer - */ - set_bit(__E1000_DOWN, &adapter->state); - - /* disable receives in the hardware */ - rctl = er32(RCTL); - ew32(RCTL, rctl & ~E1000_RCTL_EN); - /* flush and sleep below */ - - netif_stop_queue(netdev); - - /* disable transmits in the hardware */ - tctl = er32(TCTL); - tctl &= ~E1000_TCTL_EN; - ew32(TCTL, tctl); - /* flush both disables and wait for them to finish */ - e1e_flush(); - usleep_range(10000, 20000); - - napi_disable(&adapter->napi); - e1000_irq_disable(adapter); - - del_timer_sync(&adapter->watchdog_timer); - del_timer_sync(&adapter->phy_info_timer); - - netif_carrier_off(netdev); - - spin_lock(&adapter->stats64_lock); - e1000e_update_stats(adapter); - spin_unlock(&adapter->stats64_lock); - - e1000e_flush_descriptors(adapter); - e1000_clean_tx_ring(adapter); - e1000_clean_rx_ring(adapter); - - adapter->link_speed = 0; - adapter->link_duplex = 0; - - if (!pci_channel_offline(adapter->pdev)) - e1000e_reset(adapter); - - /* - * TODO: for power management, we could drop the link and - * pci_disable_device here. - */ -} - -void e1000e_reinit_locked(struct e1000_adapter *adapter) -{ - might_sleep(); - while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) - usleep_range(1000, 2000); - e1000e_down(adapter); - e1000e_up(adapter); - clear_bit(__E1000_RESETTING, &adapter->state); -} - -/** - * e1000_sw_init - Initialize general software structures (struct e1000_adapter) - * @adapter: board private structure to initialize - * - * e1000_sw_init initializes the Adapter private data structure. - * Fields are initialized based on PCI device information and - * OS network device settings (MTU size). - **/ -static int __devinit e1000_sw_init(struct e1000_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - - adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; - adapter->rx_ps_bsize0 = 128; - adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; - adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; - - spin_lock_init(&adapter->stats64_lock); - - e1000e_set_interrupt_capability(adapter); - - if (e1000_alloc_queues(adapter)) - return -ENOMEM; - - /* Explicitly disable IRQ since the NIC can be in any state. */ - e1000_irq_disable(adapter); - - set_bit(__E1000_DOWN, &adapter->state); - return 0; -} - -/** - * e1000_intr_msi_test - Interrupt Handler - * @irq: interrupt number - * @data: pointer to a network interface device structure - **/ -static irqreturn_t e1000_intr_msi_test(int irq, void *data) -{ - struct net_device *netdev = data; - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u32 icr = er32(ICR); - - e_dbg("icr is %08X\n", icr); - if (icr & E1000_ICR_RXSEQ) { - adapter->flags &= ~FLAG_MSI_TEST_FAILED; - wmb(); - } - - return IRQ_HANDLED; -} - -/** - * e1000_test_msi_interrupt - Returns 0 for successful test - * @adapter: board private struct - * - * code flow taken from tg3.c - **/ -static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct e1000_hw *hw = &adapter->hw; - int err; - - /* poll_enable hasn't been called yet, so don't need disable */ - /* clear any pending events */ - er32(ICR); - - /* free the real vector and request a test handler */ - e1000_free_irq(adapter); - e1000e_reset_interrupt_capability(adapter); - - /* Assume that the test fails, if it succeeds then the test - * MSI irq handler will unset this flag */ - adapter->flags |= FLAG_MSI_TEST_FAILED; - - err = pci_enable_msi(adapter->pdev); - if (err) - goto msi_test_failed; - - err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0, - netdev->name, netdev); - if (err) { - pci_disable_msi(adapter->pdev); - goto msi_test_failed; - } - - wmb(); - - e1000_irq_enable(adapter); - - /* fire an unusual interrupt on the test handler */ - ew32(ICS, E1000_ICS_RXSEQ); - e1e_flush(); - msleep(50); - - e1000_irq_disable(adapter); - - rmb(); - - if (adapter->flags & FLAG_MSI_TEST_FAILED) { - adapter->int_mode = E1000E_INT_MODE_LEGACY; - e_info("MSI interrupt test failed, using legacy interrupt.\n"); - } else - e_dbg("MSI interrupt test succeeded!\n"); - - free_irq(adapter->pdev->irq, netdev); - pci_disable_msi(adapter->pdev); - -msi_test_failed: - e1000e_set_interrupt_capability(adapter); - return e1000_request_irq(adapter); -} - -/** - * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored - * @adapter: board private struct - * - * code flow taken from tg3.c, called with e1000 interrupts disabled. - **/ -static int e1000_test_msi(struct e1000_adapter *adapter) -{ - int err; - u16 pci_cmd; - - if (!(adapter->flags & FLAG_MSI_ENABLED)) - return 0; - - /* disable SERR in case the MSI write causes a master abort */ - pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); - if (pci_cmd & PCI_COMMAND_SERR) - pci_write_config_word(adapter->pdev, PCI_COMMAND, - pci_cmd & ~PCI_COMMAND_SERR); - - err = e1000_test_msi_interrupt(adapter); - - /* re-enable SERR */ - if (pci_cmd & PCI_COMMAND_SERR) { - pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); - pci_cmd |= PCI_COMMAND_SERR; - pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); - } - - return err; -} - -/** - * e1000_open - Called when a network interface is made active - * @netdev: network interface device structure - * - * Returns 0 on success, negative value on failure - * - * The open entry point is called when a network interface is made - * active by the system (IFF_UP). At this point all resources needed - * for transmit and receive operations are allocated, the interrupt - * handler is registered with the OS, the watchdog timer is started, - * and the stack is notified that the interface is ready. - **/ -static int e1000_open(struct net_device *netdev) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - struct pci_dev *pdev = adapter->pdev; - int err; - - /* disallow open during test */ - if (test_bit(__E1000_TESTING, &adapter->state)) - return -EBUSY; - - pm_runtime_get_sync(&pdev->dev); - - netif_carrier_off(netdev); - - /* allocate transmit descriptors */ - err = e1000e_setup_tx_resources(adapter); - if (err) - goto err_setup_tx; - - /* allocate receive descriptors */ - err = e1000e_setup_rx_resources(adapter); - if (err) - goto err_setup_rx; - - /* - * If AMT is enabled, let the firmware know that the network - * interface is now open and reset the part to a known state. - */ - if (adapter->flags & FLAG_HAS_AMT) { - e1000e_get_hw_control(adapter); - e1000e_reset(adapter); - } - - e1000e_power_up_phy(adapter); - - adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; - if ((adapter->hw.mng_cookie.status & - E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) - e1000_update_mng_vlan(adapter); - - /* DMA latency requirement to workaround early-receive/jumbo issue */ - if ((adapter->flags & FLAG_HAS_ERT) || - (adapter->hw.mac.type == e1000_pch2lan)) - pm_qos_add_request(&adapter->netdev->pm_qos_req, - PM_QOS_CPU_DMA_LATENCY, - PM_QOS_DEFAULT_VALUE); - - /* - * before we allocate an interrupt, we must be ready to handle it. - * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt - * as soon as we call pci_request_irq, so we have to setup our - * clean_rx handler before we do so. - */ - e1000_configure(adapter); - - err = e1000_request_irq(adapter); - if (err) - goto err_req_irq; - - /* - * Work around PCIe errata with MSI interrupts causing some chipsets to - * ignore e1000e MSI messages, which means we need to test our MSI - * interrupt now - */ - if (adapter->int_mode != E1000E_INT_MODE_LEGACY) { - err = e1000_test_msi(adapter); - if (err) { - e_err("Interrupt allocation failed\n"); - goto err_req_irq; - } - } - - /* From here on the code is the same as e1000e_up() */ - clear_bit(__E1000_DOWN, &adapter->state); - - napi_enable(&adapter->napi); - - e1000_irq_enable(adapter); - - netif_start_queue(netdev); - - adapter->idle_check = true; - pm_runtime_put(&pdev->dev); - - /* fire a link status change interrupt to start the watchdog */ - if (adapter->msix_entries) - ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); - else - ew32(ICS, E1000_ICS_LSC); - - return 0; - -err_req_irq: - e1000e_release_hw_control(adapter); - e1000_power_down_phy(adapter); - e1000e_free_rx_resources(adapter); -err_setup_rx: - e1000e_free_tx_resources(adapter); -err_setup_tx: - e1000e_reset(adapter); - pm_runtime_put_sync(&pdev->dev); - - return err; -} - -/** - * e1000_close - Disables a network interface - * @netdev: network interface device structure - * - * Returns 0, this is not allowed to fail - * - * The close entry point is called when an interface is de-activated - * by the OS. The hardware is still under the drivers control, but - * needs to be disabled. A global MAC reset is issued to stop the - * hardware, and all transmit and receive resources are freed. - **/ -static int e1000_close(struct net_device *netdev) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct pci_dev *pdev = adapter->pdev; - - WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); - - pm_runtime_get_sync(&pdev->dev); - - if (!test_bit(__E1000_DOWN, &adapter->state)) { - e1000e_down(adapter); - e1000_free_irq(adapter); - } - e1000_power_down_phy(adapter); - - e1000e_free_tx_resources(adapter); - e1000e_free_rx_resources(adapter); - - /* - * kill manageability vlan ID if supported, but not if a vlan with - * the same ID is registered on the host OS (let 8021q kill it) - */ - if (adapter->hw.mng_cookie.status & - E1000_MNG_DHCP_COOKIE_STATUS_VLAN) - e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); - - /* - * If AMT is enabled, let the firmware know that the network - * interface is now closed - */ - if ((adapter->flags & FLAG_HAS_AMT) && - !test_bit(__E1000_TESTING, &adapter->state)) - e1000e_release_hw_control(adapter); - - if ((adapter->flags & FLAG_HAS_ERT) || - (adapter->hw.mac.type == e1000_pch2lan)) - pm_qos_remove_request(&adapter->netdev->pm_qos_req); - - pm_runtime_put_sync(&pdev->dev); - - return 0; -} -/** - * e1000_set_mac - Change the Ethernet Address of the NIC - * @netdev: network interface device structure - * @p: pointer to an address structure - * - * Returns 0 on success, negative on failure - **/ -static int e1000_set_mac(struct net_device *netdev, void *p) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct sockaddr *addr = p; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); - - e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); - - if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) { - /* activate the work around */ - e1000e_set_laa_state_82571(&adapter->hw, 1); - - /* - * Hold a copy of the LAA in RAR[14] This is done so that - * between the time RAR[0] gets clobbered and the time it - * gets fixed (in e1000_watchdog), the actual LAA is in one - * of the RARs and no incoming packets directed to this port - * are dropped. Eventually the LAA will be in RAR[0] and - * RAR[14] - */ - e1000e_rar_set(&adapter->hw, - adapter->hw.mac.addr, - adapter->hw.mac.rar_entry_count - 1); - } - - return 0; -} - -/** - * e1000e_update_phy_task - work thread to update phy - * @work: pointer to our work struct - * - * this worker thread exists because we must acquire a - * semaphore to read the phy, which we could msleep while - * waiting for it, and we can't msleep in a timer. - **/ -static void e1000e_update_phy_task(struct work_struct *work) -{ - struct e1000_adapter *adapter = container_of(work, - struct e1000_adapter, update_phy_task); - - if (test_bit(__E1000_DOWN, &adapter->state)) - return; - - e1000_get_phy_info(&adapter->hw); -} - -/* - * Need to wait a few seconds after link up to get diagnostic information from - * the phy - */ -static void e1000_update_phy_info(unsigned long data) -{ - struct e1000_adapter *adapter = (struct e1000_adapter *) data; - - if (test_bit(__E1000_DOWN, &adapter->state)) - return; - - schedule_work(&adapter->update_phy_task); -} - -/** - * e1000e_update_phy_stats - Update the PHY statistics counters - * @adapter: board private structure - * - * Read/clear the upper 16-bit PHY registers and read/accumulate lower - **/ -static void e1000e_update_phy_stats(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - s32 ret_val; - u16 phy_data; - - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return; - - /* - * A page set is expensive so check if already on desired page. - * If not, set to the page with the PHY status registers. - */ - hw->phy.addr = 1; - ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, - &phy_data); - if (ret_val) - goto release; - if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) { - ret_val = hw->phy.ops.set_page(hw, - HV_STATS_PAGE << IGP_PAGE_SHIFT); - if (ret_val) - goto release; - } - - /* Single Collision Count */ - hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); - ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); - if (!ret_val) - adapter->stats.scc += phy_data; - - /* Excessive Collision Count */ - hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); - ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); - if (!ret_val) - adapter->stats.ecol += phy_data; - - /* Multiple Collision Count */ - hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); - ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); - if (!ret_val) - adapter->stats.mcc += phy_data; - - /* Late Collision Count */ - hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); - ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); - if (!ret_val) - adapter->stats.latecol += phy_data; - - /* Collision Count - also used for adaptive IFS */ - hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); - ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); - if (!ret_val) - hw->mac.collision_delta = phy_data; - - /* Defer Count */ - hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); - ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); - if (!ret_val) - adapter->stats.dc += phy_data; - - /* Transmit with no CRS */ - hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); - ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); - if (!ret_val) - adapter->stats.tncrs += phy_data; - -release: - hw->phy.ops.release(hw); -} - -/** - * e1000e_update_stats - Update the board statistics counters - * @adapter: board private structure - **/ -static void e1000e_update_stats(struct e1000_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct e1000_hw *hw = &adapter->hw; - struct pci_dev *pdev = adapter->pdev; - - /* - * Prevent stats update while adapter is being reset, or if the pci - * connection is down. - */ - if (adapter->link_speed == 0) - return; - if (pci_channel_offline(pdev)) - return; - - adapter->stats.crcerrs += er32(CRCERRS); - adapter->stats.gprc += er32(GPRC); - adapter->stats.gorc += er32(GORCL); - er32(GORCH); /* Clear gorc */ - adapter->stats.bprc += er32(BPRC); - adapter->stats.mprc += er32(MPRC); - adapter->stats.roc += er32(ROC); - - adapter->stats.mpc += er32(MPC); - - /* Half-duplex statistics */ - if (adapter->link_duplex == HALF_DUPLEX) { - if (adapter->flags2 & FLAG2_HAS_PHY_STATS) { - e1000e_update_phy_stats(adapter); - } else { - adapter->stats.scc += er32(SCC); - adapter->stats.ecol += er32(ECOL); - adapter->stats.mcc += er32(MCC); - adapter->stats.latecol += er32(LATECOL); - adapter->stats.dc += er32(DC); - - hw->mac.collision_delta = er32(COLC); - - if ((hw->mac.type != e1000_82574) && - (hw->mac.type != e1000_82583)) - adapter->stats.tncrs += er32(TNCRS); - } - adapter->stats.colc += hw->mac.collision_delta; - } - - adapter->stats.xonrxc += er32(XONRXC); - adapter->stats.xontxc += er32(XONTXC); - adapter->stats.xoffrxc += er32(XOFFRXC); - adapter->stats.xofftxc += er32(XOFFTXC); - adapter->stats.gptc += er32(GPTC); - adapter->stats.gotc += er32(GOTCL); - er32(GOTCH); /* Clear gotc */ - adapter->stats.rnbc += er32(RNBC); - adapter->stats.ruc += er32(RUC); - - adapter->stats.mptc += er32(MPTC); - adapter->stats.bptc += er32(BPTC); - - /* used for adaptive IFS */ - - hw->mac.tx_packet_delta = er32(TPT); - adapter->stats.tpt += hw->mac.tx_packet_delta; - - adapter->stats.algnerrc += er32(ALGNERRC); - adapter->stats.rxerrc += er32(RXERRC); - adapter->stats.cexterr += er32(CEXTERR); - adapter->stats.tsctc += er32(TSCTC); - adapter->stats.tsctfc += er32(TSCTFC); - - /* Fill out the OS statistics structure */ - netdev->stats.multicast = adapter->stats.mprc; - netdev->stats.collisions = adapter->stats.colc; - - /* Rx Errors */ - - /* - * RLEC on some newer hardware can be incorrect so build - * our own version based on RUC and ROC - */ - netdev->stats.rx_errors = adapter->stats.rxerrc + - adapter->stats.crcerrs + adapter->stats.algnerrc + - adapter->stats.ruc + adapter->stats.roc + - adapter->stats.cexterr; - netdev->stats.rx_length_errors = adapter->stats.ruc + - adapter->stats.roc; - netdev->stats.rx_crc_errors = adapter->stats.crcerrs; - netdev->stats.rx_frame_errors = adapter->stats.algnerrc; - netdev->stats.rx_missed_errors = adapter->stats.mpc; - - /* Tx Errors */ - netdev->stats.tx_errors = adapter->stats.ecol + - adapter->stats.latecol; - netdev->stats.tx_aborted_errors = adapter->stats.ecol; - netdev->stats.tx_window_errors = adapter->stats.latecol; - netdev->stats.tx_carrier_errors = adapter->stats.tncrs; - - /* Tx Dropped needs to be maintained elsewhere */ - - /* Management Stats */ - adapter->stats.mgptc += er32(MGTPTC); - adapter->stats.mgprc += er32(MGTPRC); - adapter->stats.mgpdc += er32(MGTPDC); -} - -/** - * e1000_phy_read_status - Update the PHY register status snapshot - * @adapter: board private structure - **/ -static void e1000_phy_read_status(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - struct e1000_phy_regs *phy = &adapter->phy_regs; - - if ((er32(STATUS) & E1000_STATUS_LU) && - (adapter->hw.phy.media_type == e1000_media_type_copper)) { - int ret_val; - - ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr); - ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr); - ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise); - ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa); - ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion); - ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000); - ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000); - ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus); - if (ret_val) - e_warn("Error reading PHY register\n"); - } else { - /* - * Do not read PHY registers if link is not up - * Set values to typical power-on defaults - */ - phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX); - phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | - BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE | - BMSR_ERCAP); - phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP | - ADVERTISE_ALL | ADVERTISE_CSMA); - phy->lpa = 0; - phy->expansion = EXPANSION_ENABLENPAGE; - phy->ctrl1000 = ADVERTISE_1000FULL; - phy->stat1000 = 0; - phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF); - } -} - -static void e1000_print_link_info(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 ctrl = er32(CTRL); - - /* Link status message must follow this format for user tools */ - printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, " - "Flow Control: %s\n", - adapter->netdev->name, - adapter->link_speed, - (adapter->link_duplex == FULL_DUPLEX) ? - "Full Duplex" : "Half Duplex", - ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? - "Rx/Tx" : - ((ctrl & E1000_CTRL_RFCE) ? "Rx" : - ((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None"))); -} - -static bool e1000e_has_link(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - bool link_active = 0; - s32 ret_val = 0; - - /* - * get_link_status is set on LSC (link status) interrupt or - * Rx sequence error interrupt. get_link_status will stay - * false until the check_for_link establishes link - * for copper adapters ONLY - */ - switch (hw->phy.media_type) { - case e1000_media_type_copper: - if (hw->mac.get_link_status) { - ret_val = hw->mac.ops.check_for_link(hw); - link_active = !hw->mac.get_link_status; - } else { - link_active = 1; - } - break; - case e1000_media_type_fiber: - ret_val = hw->mac.ops.check_for_link(hw); - link_active = !!(er32(STATUS) & E1000_STATUS_LU); - break; - case e1000_media_type_internal_serdes: - ret_val = hw->mac.ops.check_for_link(hw); - link_active = adapter->hw.mac.serdes_has_link; - break; - default: - case e1000_media_type_unknown: - break; - } - - if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && - (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { - /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ - e_info("Gigabit has been disabled, downgrading speed\n"); - } - - return link_active; -} - -static void e1000e_enable_receives(struct e1000_adapter *adapter) -{ - /* make sure the receive unit is started */ - if ((adapter->flags & FLAG_RX_NEEDS_RESTART) && - (adapter->flags & FLAG_RX_RESTART_NOW)) { - struct e1000_hw *hw = &adapter->hw; - u32 rctl = er32(RCTL); - ew32(RCTL, rctl | E1000_RCTL_EN); - adapter->flags &= ~FLAG_RX_RESTART_NOW; - } -} - -static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - - /* - * With 82574 controllers, PHY needs to be checked periodically - * for hung state and reset, if two calls return true - */ - if (e1000_check_phy_82574(hw)) - adapter->phy_hang_count++; - else - adapter->phy_hang_count = 0; - - if (adapter->phy_hang_count > 1) { - adapter->phy_hang_count = 0; - schedule_work(&adapter->reset_task); - } -} - -/** - * e1000_watchdog - Timer Call-back - * @data: pointer to adapter cast into an unsigned long - **/ -static void e1000_watchdog(unsigned long data) -{ - struct e1000_adapter *adapter = (struct e1000_adapter *) data; - - /* Do the rest outside of interrupt context */ - schedule_work(&adapter->watchdog_task); - - /* TODO: make this use queue_delayed_work() */ -} - -static void e1000_watchdog_task(struct work_struct *work) -{ - struct e1000_adapter *adapter = container_of(work, - struct e1000_adapter, watchdog_task); - struct net_device *netdev = adapter->netdev; - struct e1000_mac_info *mac = &adapter->hw.mac; - struct e1000_phy_info *phy = &adapter->hw.phy; - struct e1000_ring *tx_ring = adapter->tx_ring; - struct e1000_hw *hw = &adapter->hw; - u32 link, tctl; - - if (test_bit(__E1000_DOWN, &adapter->state)) - return; - - link = e1000e_has_link(adapter); - if ((netif_carrier_ok(netdev)) && link) { - /* Cancel scheduled suspend requests. */ - pm_runtime_resume(netdev->dev.parent); - - e1000e_enable_receives(adapter); - goto link_up; - } - - if ((e1000e_enable_tx_pkt_filtering(hw)) && - (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)) - e1000_update_mng_vlan(adapter); - - if (link) { - if (!netif_carrier_ok(netdev)) { - bool txb2b = 1; - - /* Cancel scheduled suspend requests. */ - pm_runtime_resume(netdev->dev.parent); - - /* update snapshot of PHY registers on LSC */ - e1000_phy_read_status(adapter); - mac->ops.get_link_up_info(&adapter->hw, - &adapter->link_speed, - &adapter->link_duplex); - e1000_print_link_info(adapter); - /* - * On supported PHYs, check for duplex mismatch only - * if link has autonegotiated at 10/100 half - */ - if ((hw->phy.type == e1000_phy_igp_3 || - hw->phy.type == e1000_phy_bm) && - (hw->mac.autoneg == true) && - (adapter->link_speed == SPEED_10 || - adapter->link_speed == SPEED_100) && - (adapter->link_duplex == HALF_DUPLEX)) { - u16 autoneg_exp; - - e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp); - - if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS)) - e_info("Autonegotiated half duplex but" - " link partner cannot autoneg. " - " Try forcing full duplex if " - "link gets many collisions.\n"); - } - - /* adjust timeout factor according to speed/duplex */ - adapter->tx_timeout_factor = 1; - switch (adapter->link_speed) { - case SPEED_10: - txb2b = 0; - adapter->tx_timeout_factor = 16; - break; - case SPEED_100: - txb2b = 0; - adapter->tx_timeout_factor = 10; - break; - } - - /* - * workaround: re-program speed mode bit after - * link-up event - */ - if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && - !txb2b) { - u32 tarc0; - tarc0 = er32(TARC(0)); - tarc0 &= ~SPEED_MODE_BIT; - ew32(TARC(0), tarc0); - } - - /* - * disable TSO for pcie and 10/100 speeds, to avoid - * some hardware issues - */ - if (!(adapter->flags & FLAG_TSO_FORCE)) { - switch (adapter->link_speed) { - case SPEED_10: - case SPEED_100: - e_info("10/100 speed: disabling TSO\n"); - netdev->features &= ~NETIF_F_TSO; - netdev->features &= ~NETIF_F_TSO6; - break; - case SPEED_1000: - netdev->features |= NETIF_F_TSO; - netdev->features |= NETIF_F_TSO6; - break; - default: - /* oops */ - break; - } - } - - /* - * enable transmits in the hardware, need to do this - * after setting TARC(0) - */ - tctl = er32(TCTL); - tctl |= E1000_TCTL_EN; - ew32(TCTL, tctl); - - /* - * Perform any post-link-up configuration before - * reporting link up. - */ - if (phy->ops.cfg_on_link_up) - phy->ops.cfg_on_link_up(hw); - - netif_carrier_on(netdev); - - if (!test_bit(__E1000_DOWN, &adapter->state)) - mod_timer(&adapter->phy_info_timer, - round_jiffies(jiffies + 2 * HZ)); - } - } else { - if (netif_carrier_ok(netdev)) { - adapter->link_speed = 0; - adapter->link_duplex = 0; - /* Link status message must follow this format */ - printk(KERN_INFO "e1000e: %s NIC Link is Down\n", - adapter->netdev->name); - netif_carrier_off(netdev); - if (!test_bit(__E1000_DOWN, &adapter->state)) - mod_timer(&adapter->phy_info_timer, - round_jiffies(jiffies + 2 * HZ)); - - if (adapter->flags & FLAG_RX_NEEDS_RESTART) - schedule_work(&adapter->reset_task); - else - pm_schedule_suspend(netdev->dev.parent, - LINK_TIMEOUT); - } - } - -link_up: - spin_lock(&adapter->stats64_lock); - e1000e_update_stats(adapter); - - mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; - adapter->tpt_old = adapter->stats.tpt; - mac->collision_delta = adapter->stats.colc - adapter->colc_old; - adapter->colc_old = adapter->stats.colc; - - adapter->gorc = adapter->stats.gorc - adapter->gorc_old; - adapter->gorc_old = adapter->stats.gorc; - adapter->gotc = adapter->stats.gotc - adapter->gotc_old; - adapter->gotc_old = adapter->stats.gotc; - spin_unlock(&adapter->stats64_lock); - - e1000e_update_adaptive(&adapter->hw); - - if (!netif_carrier_ok(netdev) && - (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) { - /* - * We've lost link, so the controller stops DMA, - * but we've got queued Tx work that's never going - * to get done, so reset controller to flush Tx. - * (Do the reset outside of interrupt context). - */ - schedule_work(&adapter->reset_task); - /* return immediately since reset is imminent */ - return; - } - - /* Simple mode for Interrupt Throttle Rate (ITR) */ - if (adapter->itr_setting == 4) { - /* - * Symmetric Tx/Rx gets a reduced ITR=2000; - * Total asymmetrical Tx or Rx gets ITR=8000; - * everyone else is between 2000-8000. - */ - u32 goc = (adapter->gotc + adapter->gorc) / 10000; - u32 dif = (adapter->gotc > adapter->gorc ? - adapter->gotc - adapter->gorc : - adapter->gorc - adapter->gotc) / 10000; - u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; - - ew32(ITR, 1000000000 / (itr * 256)); - } - - /* Cause software interrupt to ensure Rx ring is cleaned */ - if (adapter->msix_entries) - ew32(ICS, adapter->rx_ring->ims_val); - else - ew32(ICS, E1000_ICS_RXDMT0); - - /* flush pending descriptors to memory before detecting Tx hang */ - e1000e_flush_descriptors(adapter); - - /* Force detection of hung controller every watchdog period */ - adapter->detect_tx_hung = 1; - - /* - * With 82571 controllers, LAA may be overwritten due to controller - * reset from the other port. Set the appropriate LAA in RAR[0] - */ - if (e1000e_get_laa_state_82571(hw)) - e1000e_rar_set(hw, adapter->hw.mac.addr, 0); - - if (adapter->flags2 & FLAG2_CHECK_PHY_HANG) - e1000e_check_82574_phy_workaround(adapter); - - /* Reset the timer */ - if (!test_bit(__E1000_DOWN, &adapter->state)) - mod_timer(&adapter->watchdog_timer, - round_jiffies(jiffies + 2 * HZ)); -} - -#define E1000_TX_FLAGS_CSUM 0x00000001 -#define E1000_TX_FLAGS_VLAN 0x00000002 -#define E1000_TX_FLAGS_TSO 0x00000004 -#define E1000_TX_FLAGS_IPV4 0x00000008 -#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 -#define E1000_TX_FLAGS_VLAN_SHIFT 16 - -static int e1000_tso(struct e1000_adapter *adapter, - struct sk_buff *skb) -{ - struct e1000_ring *tx_ring = adapter->tx_ring; - struct e1000_context_desc *context_desc; - struct e1000_buffer *buffer_info; - unsigned int i; - u32 cmd_length = 0; - u16 ipcse = 0, tucse, mss; - u8 ipcss, ipcso, tucss, tucso, hdr_len; - - if (!skb_is_gso(skb)) - return 0; - - if (skb_header_cloned(skb)) { - int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); - - if (err) - return err; - } - - hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - mss = skb_shinfo(skb)->gso_size; - if (skb->protocol == htons(ETH_P_IP)) { - struct iphdr *iph = ip_hdr(skb); - iph->tot_len = 0; - iph->check = 0; - tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, - 0, IPPROTO_TCP, 0); - cmd_length = E1000_TXD_CMD_IP; - ipcse = skb_transport_offset(skb) - 1; - } else if (skb_is_gso_v6(skb)) { - ipv6_hdr(skb)->payload_len = 0; - tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); - ipcse = 0; - } - ipcss = skb_network_offset(skb); - ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; - tucss = skb_transport_offset(skb); - tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; - tucse = 0; - - cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | - E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); - - i = tx_ring->next_to_use; - context_desc = E1000_CONTEXT_DESC(*tx_ring, i); - buffer_info = &tx_ring->buffer_info[i]; - - context_desc->lower_setup.ip_fields.ipcss = ipcss; - context_desc->lower_setup.ip_fields.ipcso = ipcso; - context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); - context_desc->upper_setup.tcp_fields.tucss = tucss; - context_desc->upper_setup.tcp_fields.tucso = tucso; - context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); - context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); - context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; - context_desc->cmd_and_length = cpu_to_le32(cmd_length); - - buffer_info->time_stamp = jiffies; - buffer_info->next_to_watch = i; - - i++; - if (i == tx_ring->count) - i = 0; - tx_ring->next_to_use = i; - - return 1; -} - -static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) -{ - struct e1000_ring *tx_ring = adapter->tx_ring; - struct e1000_context_desc *context_desc; - struct e1000_buffer *buffer_info; - unsigned int i; - u8 css; - u32 cmd_len = E1000_TXD_CMD_DEXT; - __be16 protocol; - - if (skb->ip_summed != CHECKSUM_PARTIAL) - return 0; - - if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) - protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; - else - protocol = skb->protocol; - - switch (protocol) { - case cpu_to_be16(ETH_P_IP): - if (ip_hdr(skb)->protocol == IPPROTO_TCP) - cmd_len |= E1000_TXD_CMD_TCP; - break; - case cpu_to_be16(ETH_P_IPV6): - /* XXX not handling all IPV6 headers */ - if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) - cmd_len |= E1000_TXD_CMD_TCP; - break; - default: - if (unlikely(net_ratelimit())) - e_warn("checksum_partial proto=%x!\n", - be16_to_cpu(protocol)); - break; - } - - css = skb_checksum_start_offset(skb); - - i = tx_ring->next_to_use; - buffer_info = &tx_ring->buffer_info[i]; - context_desc = E1000_CONTEXT_DESC(*tx_ring, i); - - context_desc->lower_setup.ip_config = 0; - context_desc->upper_setup.tcp_fields.tucss = css; - context_desc->upper_setup.tcp_fields.tucso = - css + skb->csum_offset; - context_desc->upper_setup.tcp_fields.tucse = 0; - context_desc->tcp_seg_setup.data = 0; - context_desc->cmd_and_length = cpu_to_le32(cmd_len); - - buffer_info->time_stamp = jiffies; - buffer_info->next_to_watch = i; - - i++; - if (i == tx_ring->count) - i = 0; - tx_ring->next_to_use = i; - - return 1; -} - -#define E1000_MAX_PER_TXD 8192 -#define E1000_MAX_TXD_PWR 12 - -static int e1000_tx_map(struct e1000_adapter *adapter, - struct sk_buff *skb, unsigned int first, - unsigned int max_per_txd, unsigned int nr_frags, - unsigned int mss) -{ - struct e1000_ring *tx_ring = adapter->tx_ring; - struct pci_dev *pdev = adapter->pdev; - struct e1000_buffer *buffer_info; - unsigned int len = skb_headlen(skb); - unsigned int offset = 0, size, count = 0, i; - unsigned int f, bytecount, segs; - - i = tx_ring->next_to_use; - - while (len) { - buffer_info = &tx_ring->buffer_info[i]; - size = min(len, max_per_txd); - - buffer_info->length = size; - buffer_info->time_stamp = jiffies; - buffer_info->next_to_watch = i; - buffer_info->dma = dma_map_single(&pdev->dev, - skb->data + offset, - size, DMA_TO_DEVICE); - buffer_info->mapped_as_page = false; - if (dma_mapping_error(&pdev->dev, buffer_info->dma)) - goto dma_error; - - len -= size; - offset += size; - count++; - - if (len) { - i++; - if (i == tx_ring->count) - i = 0; - } - } - - for (f = 0; f < nr_frags; f++) { - struct skb_frag_struct *frag; - - frag = &skb_shinfo(skb)->frags[f]; - len = frag->size; - offset = frag->page_offset; - - while (len) { - i++; - if (i == tx_ring->count) - i = 0; - - buffer_info = &tx_ring->buffer_info[i]; - size = min(len, max_per_txd); - - buffer_info->length = size; - buffer_info->time_stamp = jiffies; - buffer_info->next_to_watch = i; - buffer_info->dma = dma_map_page(&pdev->dev, frag->page, - offset, size, - DMA_TO_DEVICE); - buffer_info->mapped_as_page = true; - if (dma_mapping_error(&pdev->dev, buffer_info->dma)) - goto dma_error; - - len -= size; - offset += size; - count++; - } - } - - segs = skb_shinfo(skb)->gso_segs ? : 1; - /* multiply data chunks by size of headers */ - bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; - - tx_ring->buffer_info[i].skb = skb; - tx_ring->buffer_info[i].segs = segs; - tx_ring->buffer_info[i].bytecount = bytecount; - tx_ring->buffer_info[first].next_to_watch = i; - - return count; - -dma_error: - dev_err(&pdev->dev, "Tx DMA map failed\n"); - buffer_info->dma = 0; - if (count) - count--; - - while (count--) { - if (i == 0) - i += tx_ring->count; - i--; - buffer_info = &tx_ring->buffer_info[i]; - e1000_put_txbuf(adapter, buffer_info); - } - - return 0; -} - -static void e1000_tx_queue(struct e1000_adapter *adapter, - int tx_flags, int count) -{ - struct e1000_ring *tx_ring = adapter->tx_ring; - struct e1000_tx_desc *tx_desc = NULL; - struct e1000_buffer *buffer_info; - u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; - unsigned int i; - - if (tx_flags & E1000_TX_FLAGS_TSO) { - txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | - E1000_TXD_CMD_TSE; - txd_upper |= E1000_TXD_POPTS_TXSM << 8; - - if (tx_flags & E1000_TX_FLAGS_IPV4) - txd_upper |= E1000_TXD_POPTS_IXSM << 8; - } - - if (tx_flags & E1000_TX_FLAGS_CSUM) { - txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; - txd_upper |= E1000_TXD_POPTS_TXSM << 8; - } - - if (tx_flags & E1000_TX_FLAGS_VLAN) { - txd_lower |= E1000_TXD_CMD_VLE; - txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); - } - - i = tx_ring->next_to_use; - - do { - buffer_info = &tx_ring->buffer_info[i]; - tx_desc = E1000_TX_DESC(*tx_ring, i); - tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); - tx_desc->lower.data = - cpu_to_le32(txd_lower | buffer_info->length); - tx_desc->upper.data = cpu_to_le32(txd_upper); - - i++; - if (i == tx_ring->count) - i = 0; - } while (--count > 0); - - tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); - - /* - * Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - - tx_ring->next_to_use = i; - writel(i, adapter->hw.hw_addr + tx_ring->tail); - /* - * we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); -} - -#define MINIMUM_DHCP_PACKET_SIZE 282 -static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, - struct sk_buff *skb) -{ - struct e1000_hw *hw = &adapter->hw; - u16 length, offset; - - if (vlan_tx_tag_present(skb)) { - if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && - (adapter->hw.mng_cookie.status & - E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) - return 0; - } - - if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) - return 0; - - if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP)) - return 0; - - { - const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14); - struct udphdr *udp; - - if (ip->protocol != IPPROTO_UDP) - return 0; - - udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); - if (ntohs(udp->dest) != 67) - return 0; - - offset = (u8 *)udp + 8 - skb->data; - length = skb->len - offset; - return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length); - } - - return 0; -} - -static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - - netif_stop_queue(netdev); - /* - * Herbert's original patch had: - * smp_mb__after_netif_stop_queue(); - * but since that doesn't exist yet, just open code it. - */ - smp_mb(); - - /* - * We need to check again in a case another CPU has just - * made room available. - */ - if (e1000_desc_unused(adapter->tx_ring) < size) - return -EBUSY; - - /* A reprieve! */ - netif_start_queue(netdev); - ++adapter->restart_queue; - return 0; -} - -static int e1000_maybe_stop_tx(struct net_device *netdev, int size) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - - if (e1000_desc_unused(adapter->tx_ring) >= size) - return 0; - return __e1000_maybe_stop_tx(netdev, size); -} - -#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) -static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, - struct net_device *netdev) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_ring *tx_ring = adapter->tx_ring; - unsigned int first; - unsigned int max_per_txd = E1000_MAX_PER_TXD; - unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; - unsigned int tx_flags = 0; - unsigned int len = skb_headlen(skb); - unsigned int nr_frags; - unsigned int mss; - int count = 0; - int tso; - unsigned int f; - - if (test_bit(__E1000_DOWN, &adapter->state)) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - - if (skb->len <= 0) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - - mss = skb_shinfo(skb)->gso_size; - /* - * The controller does a simple calculation to - * make sure there is enough room in the FIFO before - * initiating the DMA for each buffer. The calc is: - * 4 = ceil(buffer len/mss). To make sure we don't - * overrun the FIFO, adjust the max buffer len if mss - * drops. - */ - if (mss) { - u8 hdr_len; - max_per_txd = min(mss << 2, max_per_txd); - max_txd_pwr = fls(max_per_txd) - 1; - - /* - * TSO Workaround for 82571/2/3 Controllers -- if skb->data - * points to just header, pull a few bytes of payload from - * frags into skb->data - */ - hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - /* - * we do this workaround for ES2LAN, but it is un-necessary, - * avoiding it could save a lot of cycles - */ - if (skb->data_len && (hdr_len == len)) { - unsigned int pull_size; - - pull_size = min((unsigned int)4, skb->data_len); - if (!__pskb_pull_tail(skb, pull_size)) { - e_err("__pskb_pull_tail failed.\n"); - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - len = skb_headlen(skb); - } - } - - /* reserve a descriptor for the offload context */ - if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) - count++; - count++; - - count += TXD_USE_COUNT(len, max_txd_pwr); - - nr_frags = skb_shinfo(skb)->nr_frags; - for (f = 0; f < nr_frags; f++) - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, - max_txd_pwr); - - if (adapter->hw.mac.tx_pkt_filtering) - e1000_transfer_dhcp_info(adapter, skb); - - /* - * need: count + 2 desc gap to keep tail from touching - * head, otherwise try next time - */ - if (e1000_maybe_stop_tx(netdev, count + 2)) - return NETDEV_TX_BUSY; - - if (vlan_tx_tag_present(skb)) { - tx_flags |= E1000_TX_FLAGS_VLAN; - tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); - } - - first = tx_ring->next_to_use; - - tso = e1000_tso(adapter, skb); - if (tso < 0) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - - if (tso) - tx_flags |= E1000_TX_FLAGS_TSO; - else if (e1000_tx_csum(adapter, skb)) - tx_flags |= E1000_TX_FLAGS_CSUM; - - /* - * Old method was to assume IPv4 packet by default if TSO was enabled. - * 82571 hardware supports TSO capabilities for IPv6 as well... - * no longer assume, we must. - */ - if (skb->protocol == htons(ETH_P_IP)) - tx_flags |= E1000_TX_FLAGS_IPV4; - - /* if count is 0 then mapping error has occurred */ - count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss); - if (count) { - e1000_tx_queue(adapter, tx_flags, count); - /* Make sure there is space in the ring for the next send. */ - e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2); - - } else { - dev_kfree_skb_any(skb); - tx_ring->buffer_info[first].time_stamp = 0; - tx_ring->next_to_use = first; - } - - return NETDEV_TX_OK; -} - -/** - * e1000_tx_timeout - Respond to a Tx Hang - * @netdev: network interface device structure - **/ -static void e1000_tx_timeout(struct net_device *netdev) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - - /* Do the reset outside of interrupt context */ - adapter->tx_timeout_count++; - schedule_work(&adapter->reset_task); -} - -static void e1000_reset_task(struct work_struct *work) -{ - struct e1000_adapter *adapter; - adapter = container_of(work, struct e1000_adapter, reset_task); - - /* don't run the task if already down */ - if (test_bit(__E1000_DOWN, &adapter->state)) - return; - - if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && - (adapter->flags & FLAG_RX_RESTART_NOW))) { - e1000e_dump(adapter); - e_err("Reset adapter\n"); - } - e1000e_reinit_locked(adapter); -} - -/** - * e1000_get_stats64 - Get System Network Statistics - * @netdev: network interface device structure - * @stats: rtnl_link_stats64 pointer - * - * Returns the address of the device statistics structure. - **/ -struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - - memset(stats, 0, sizeof(struct rtnl_link_stats64)); - spin_lock(&adapter->stats64_lock); - e1000e_update_stats(adapter); - /* Fill out the OS statistics structure */ - stats->rx_bytes = adapter->stats.gorc; - stats->rx_packets = adapter->stats.gprc; - stats->tx_bytes = adapter->stats.gotc; - stats->tx_packets = adapter->stats.gptc; - stats->multicast = adapter->stats.mprc; - stats->collisions = adapter->stats.colc; - - /* Rx Errors */ - - /* - * RLEC on some newer hardware can be incorrect so build - * our own version based on RUC and ROC - */ - stats->rx_errors = adapter->stats.rxerrc + - adapter->stats.crcerrs + adapter->stats.algnerrc + - adapter->stats.ruc + adapter->stats.roc + - adapter->stats.cexterr; - stats->rx_length_errors = adapter->stats.ruc + - adapter->stats.roc; - stats->rx_crc_errors = adapter->stats.crcerrs; - stats->rx_frame_errors = adapter->stats.algnerrc; - stats->rx_missed_errors = adapter->stats.mpc; - - /* Tx Errors */ - stats->tx_errors = adapter->stats.ecol + - adapter->stats.latecol; - stats->tx_aborted_errors = adapter->stats.ecol; - stats->tx_window_errors = adapter->stats.latecol; - stats->tx_carrier_errors = adapter->stats.tncrs; - - /* Tx Dropped needs to be maintained elsewhere */ - - spin_unlock(&adapter->stats64_lock); - return stats; -} - -/** - * e1000_change_mtu - Change the Maximum Transfer Unit - * @netdev: network interface device structure - * @new_mtu: new value for maximum frame size - * - * Returns 0 on success, negative on failure - **/ -static int e1000_change_mtu(struct net_device *netdev, int new_mtu) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; - - /* Jumbo frame support */ - if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) && - !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { - e_err("Jumbo Frames not supported.\n"); - return -EINVAL; - } - - /* Supported frame sizes */ - if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) || - (max_frame > adapter->max_hw_frame_size)) { - e_err("Unsupported MTU setting\n"); - return -EINVAL; - } - - /* Jumbo frame workaround on 82579 requires CRC be stripped */ - if ((adapter->hw.mac.type == e1000_pch2lan) && - !(adapter->flags2 & FLAG2_CRC_STRIPPING) && - (new_mtu > ETH_DATA_LEN)) { - e_err("Jumbo Frames not supported on 82579 when CRC " - "stripping is disabled.\n"); - return -EINVAL; - } - - /* 82573 Errata 17 */ - if (((adapter->hw.mac.type == e1000_82573) || - (adapter->hw.mac.type == e1000_82574)) && - (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) { - adapter->flags2 |= FLAG2_DISABLE_ASPM_L1; - e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1); - } - - while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) - usleep_range(1000, 2000); - /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ - adapter->max_frame_size = max_frame; - e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); - netdev->mtu = new_mtu; - if (netif_running(netdev)) - e1000e_down(adapter); - - /* - * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN - * means we reserve 2 more, this pushes us to allocate from the next - * larger slab size. - * i.e. RXBUFFER_2048 --> size-4096 slab - * However with the new *_jumbo_rx* routines, jumbo receives will use - * fragmented skbs - */ - - if (max_frame <= 2048) - adapter->rx_buffer_len = 2048; - else - adapter->rx_buffer_len = 4096; - - /* adjust allocation if LPE protects us, and we aren't using SBP */ - if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || - (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) - adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN - + ETH_FCS_LEN; - - if (netif_running(netdev)) - e1000e_up(adapter); - else - e1000e_reset(adapter); - - clear_bit(__E1000_RESETTING, &adapter->state); - - return 0; -} - -static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, - int cmd) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - struct mii_ioctl_data *data = if_mii(ifr); - - if (adapter->hw.phy.media_type != e1000_media_type_copper) - return -EOPNOTSUPP; - - switch (cmd) { - case SIOCGMIIPHY: - data->phy_id = adapter->hw.phy.addr; - break; - case SIOCGMIIREG: - e1000_phy_read_status(adapter); - - switch (data->reg_num & 0x1F) { - case MII_BMCR: - data->val_out = adapter->phy_regs.bmcr; - break; - case MII_BMSR: - data->val_out = adapter->phy_regs.bmsr; - break; - case MII_PHYSID1: - data->val_out = (adapter->hw.phy.id >> 16); - break; - case MII_PHYSID2: - data->val_out = (adapter->hw.phy.id & 0xFFFF); - break; - case MII_ADVERTISE: - data->val_out = adapter->phy_regs.advertise; - break; - case MII_LPA: - data->val_out = adapter->phy_regs.lpa; - break; - case MII_EXPANSION: - data->val_out = adapter->phy_regs.expansion; - break; - case MII_CTRL1000: - data->val_out = adapter->phy_regs.ctrl1000; - break; - case MII_STAT1000: - data->val_out = adapter->phy_regs.stat1000; - break; - case MII_ESTATUS: - data->val_out = adapter->phy_regs.estatus; - break; - default: - return -EIO; - } - break; - case SIOCSMIIREG: - default: - return -EOPNOTSUPP; - } - return 0; -} - -static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) -{ - switch (cmd) { - case SIOCGMIIPHY: - case SIOCGMIIREG: - case SIOCSMIIREG: - return e1000_mii_ioctl(netdev, ifr, cmd); - default: - return -EOPNOTSUPP; - } -} - -static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) -{ - struct e1000_hw *hw = &adapter->hw; - u32 i, mac_reg; - u16 phy_reg, wuc_enable; - int retval = 0; - - /* copy MAC RARs to PHY RARs */ - e1000_copy_rx_addrs_to_phy_ich8lan(hw); - - retval = hw->phy.ops.acquire(hw); - if (retval) { - e_err("Could not acquire PHY\n"); - return retval; - } - - /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */ - retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable); - if (retval) - goto out; - - /* copy MAC MTA to PHY MTA - only needed for pchlan */ - for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) { - mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i); - hw->phy.ops.write_reg_page(hw, BM_MTA(i), - (u16)(mac_reg & 0xFFFF)); - hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1, - (u16)((mac_reg >> 16) & 0xFFFF)); - } - - /* configure PHY Rx Control register */ - hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg); - mac_reg = er32(RCTL); - if (mac_reg & E1000_RCTL_UPE) - phy_reg |= BM_RCTL_UPE; - if (mac_reg & E1000_RCTL_MPE) - phy_reg |= BM_RCTL_MPE; - phy_reg &= ~(BM_RCTL_MO_MASK); - if (mac_reg & E1000_RCTL_MO_3) - phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT) - << BM_RCTL_MO_SHIFT); - if (mac_reg & E1000_RCTL_BAM) - phy_reg |= BM_RCTL_BAM; - if (mac_reg & E1000_RCTL_PMCF) - phy_reg |= BM_RCTL_PMCF; - mac_reg = er32(CTRL); - if (mac_reg & E1000_CTRL_RFCE) - phy_reg |= BM_RCTL_RFCE; - hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg); - - /* enable PHY wakeup in MAC register */ - ew32(WUFC, wufc); - ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN); - - /* configure and enable PHY wakeup in PHY registers */ - hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc); - hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); - - /* activate PHY wakeup */ - wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; - retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable); - if (retval) - e_err("Could not set PHY Host Wakeup bit\n"); -out: - hw->phy.ops.release(hw); - - return retval; -} - -static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, - bool runtime) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u32 ctrl, ctrl_ext, rctl, status; - /* Runtime suspend should only enable wakeup for link changes */ - u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; - int retval = 0; - - netif_device_detach(netdev); - - if (netif_running(netdev)) { - WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); - e1000e_down(adapter); - e1000_free_irq(adapter); - } - e1000e_reset_interrupt_capability(adapter); - - retval = pci_save_state(pdev); - if (retval) - return retval; - - status = er32(STATUS); - if (status & E1000_STATUS_LU) - wufc &= ~E1000_WUFC_LNKC; - - if (wufc) { - e1000_setup_rctl(adapter); - e1000_set_multi(netdev); - - /* turn on all-multi mode if wake on multicast is enabled */ - if (wufc & E1000_WUFC_MC) { - rctl = er32(RCTL); - rctl |= E1000_RCTL_MPE; - ew32(RCTL, rctl); - } - - ctrl = er32(CTRL); - /* advertise wake from D3Cold */ - #define E1000_CTRL_ADVD3WUC 0x00100000 - /* phy power management enable */ - #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 - ctrl |= E1000_CTRL_ADVD3WUC; - if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)) - ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT; - ew32(CTRL, ctrl); - - if (adapter->hw.phy.media_type == e1000_media_type_fiber || - adapter->hw.phy.media_type == - e1000_media_type_internal_serdes) { - /* keep the laser running in D3 */ - ctrl_ext = er32(CTRL_EXT); - ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA; - ew32(CTRL_EXT, ctrl_ext); - } - - if (adapter->flags & FLAG_IS_ICH) - e1000_suspend_workarounds_ich8lan(&adapter->hw); - - /* Allow time for pending master requests to run */ - e1000e_disable_pcie_master(&adapter->hw); - - if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { - /* enable wakeup by the PHY */ - retval = e1000_init_phy_wakeup(adapter, wufc); - if (retval) - return retval; - } else { - /* enable wakeup by the MAC */ - ew32(WUFC, wufc); - ew32(WUC, E1000_WUC_PME_EN); - } - } else { - ew32(WUC, 0); - ew32(WUFC, 0); - } - - *enable_wake = !!wufc; - - /* make sure adapter isn't asleep if manageability is enabled */ - if ((adapter->flags & FLAG_MNG_PT_ENABLED) || - (hw->mac.ops.check_mng_mode(hw))) - *enable_wake = true; - - if (adapter->hw.phy.type == e1000_phy_igp_3) - e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); - - /* - * Release control of h/w to f/w. If f/w is AMT enabled, this - * would have already happened in close and is redundant. - */ - e1000e_release_hw_control(adapter); - - pci_disable_device(pdev); - - return 0; -} - -static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake) -{ - if (sleep && wake) { - pci_prepare_to_sleep(pdev); - return; - } - - pci_wake_from_d3(pdev, wake); - pci_set_power_state(pdev, PCI_D3hot); -} - -static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, - bool wake) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct e1000_adapter *adapter = netdev_priv(netdev); - - /* - * The pci-e switch on some quad port adapters will report a - * correctable error when the MAC transitions from D0 to D3. To - * prevent this we need to mask off the correctable errors on the - * downstream port of the pci-e switch. - */ - if (adapter->flags & FLAG_IS_QUAD_PORT) { - struct pci_dev *us_dev = pdev->bus->self; - int pos = pci_pcie_cap(us_dev); - u16 devctl; - - pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl); - pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, - (devctl & ~PCI_EXP_DEVCTL_CERE)); - - e1000_power_off(pdev, sleep, wake); - - pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl); - } else { - e1000_power_off(pdev, sleep, wake); - } -} - -#ifdef CONFIG_PCIEASPM -static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) -{ - pci_disable_link_state_locked(pdev, state); -} -#else -static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) -{ - int pos; - u16 reg16; - - /* - * Both device and parent should have the same ASPM setting. - * Disable ASPM in downstream component first and then upstream. - */ - pos = pci_pcie_cap(pdev); - pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); - reg16 &= ~state; - pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); - - if (!pdev->bus->self) - return; - - pos = pci_pcie_cap(pdev->bus->self); - pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, ®16); - reg16 &= ~state; - pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16); -} -#endif -static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) -{ - dev_info(&pdev->dev, "Disabling ASPM %s %s\n", - (state & PCIE_LINK_STATE_L0S) ? "L0s" : "", - (state & PCIE_LINK_STATE_L1) ? "L1" : ""); - - __e1000e_disable_aspm(pdev, state); -} - -#ifdef CONFIG_PM -static bool e1000e_pm_ready(struct e1000_adapter *adapter) -{ - return !!adapter->tx_ring->buffer_info; -} - -static int __e1000_resume(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u16 aspm_disable_flag = 0; - u32 err; - - if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) - aspm_disable_flag = PCIE_LINK_STATE_L0S; - if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) - aspm_disable_flag |= PCIE_LINK_STATE_L1; - if (aspm_disable_flag) - e1000e_disable_aspm(pdev, aspm_disable_flag); - - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - pci_save_state(pdev); - - e1000e_set_interrupt_capability(adapter); - if (netif_running(netdev)) { - err = e1000_request_irq(adapter); - if (err) - return err; - } - - if (hw->mac.type == e1000_pch2lan) - e1000_resume_workarounds_pchlan(&adapter->hw); - - e1000e_power_up_phy(adapter); - - /* report the system wakeup cause from S3/S4 */ - if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { - u16 phy_data; - - e1e_rphy(&adapter->hw, BM_WUS, &phy_data); - if (phy_data) { - e_info("PHY Wakeup cause - %s\n", - phy_data & E1000_WUS_EX ? "Unicast Packet" : - phy_data & E1000_WUS_MC ? "Multicast Packet" : - phy_data & E1000_WUS_BC ? "Broadcast Packet" : - phy_data & E1000_WUS_MAG ? "Magic Packet" : - phy_data & E1000_WUS_LNKC ? "Link Status " - " Change" : "other"); - } - e1e_wphy(&adapter->hw, BM_WUS, ~0); - } else { - u32 wus = er32(WUS); - if (wus) { - e_info("MAC Wakeup cause - %s\n", - wus & E1000_WUS_EX ? "Unicast Packet" : - wus & E1000_WUS_MC ? "Multicast Packet" : - wus & E1000_WUS_BC ? "Broadcast Packet" : - wus & E1000_WUS_MAG ? "Magic Packet" : - wus & E1000_WUS_LNKC ? "Link Status Change" : - "other"); - } - ew32(WUS, ~0); - } - - e1000e_reset(adapter); - - e1000_init_manageability_pt(adapter); - - if (netif_running(netdev)) - e1000e_up(adapter); - - netif_device_attach(netdev); - - /* - * If the controller has AMT, do not set DRV_LOAD until the interface - * is up. For all other cases, let the f/w know that the h/w is now - * under the control of the driver. - */ - if (!(adapter->flags & FLAG_HAS_AMT)) - e1000e_get_hw_control(adapter); - - return 0; -} - -#ifdef CONFIG_PM_SLEEP -static int e1000_suspend(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - int retval; - bool wake; - - retval = __e1000_shutdown(pdev, &wake, false); - if (!retval) - e1000_complete_shutdown(pdev, true, wake); - - return retval; -} - -static int e1000_resume(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *netdev = pci_get_drvdata(pdev); - struct e1000_adapter *adapter = netdev_priv(netdev); - - if (e1000e_pm_ready(adapter)) - adapter->idle_check = true; - - return __e1000_resume(pdev); -} -#endif /* CONFIG_PM_SLEEP */ - -#ifdef CONFIG_PM_RUNTIME -static int e1000_runtime_suspend(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *netdev = pci_get_drvdata(pdev); - struct e1000_adapter *adapter = netdev_priv(netdev); - - if (e1000e_pm_ready(adapter)) { - bool wake; - - __e1000_shutdown(pdev, &wake, true); - } - - return 0; -} - -static int e1000_idle(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *netdev = pci_get_drvdata(pdev); - struct e1000_adapter *adapter = netdev_priv(netdev); - - if (!e1000e_pm_ready(adapter)) - return 0; - - if (adapter->idle_check) { - adapter->idle_check = false; - if (!e1000e_has_link(adapter)) - pm_schedule_suspend(dev, MSEC_PER_SEC); - } - - return -EBUSY; -} - -static int e1000_runtime_resume(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *netdev = pci_get_drvdata(pdev); - struct e1000_adapter *adapter = netdev_priv(netdev); - - if (!e1000e_pm_ready(adapter)) - return 0; - - adapter->idle_check = !dev->power.runtime_auto; - return __e1000_resume(pdev); -} -#endif /* CONFIG_PM_RUNTIME */ -#endif /* CONFIG_PM */ - -static void e1000_shutdown(struct pci_dev *pdev) -{ - bool wake = false; - - __e1000_shutdown(pdev, &wake, false); - - if (system_state == SYSTEM_POWER_OFF) - e1000_complete_shutdown(pdev, false, wake); -} - -#ifdef CONFIG_NET_POLL_CONTROLLER - -static irqreturn_t e1000_intr_msix(int irq, void *data) -{ - struct net_device *netdev = data; - struct e1000_adapter *adapter = netdev_priv(netdev); - - if (adapter->msix_entries) { - int vector, msix_irq; - - vector = 0; - msix_irq = adapter->msix_entries[vector].vector; - disable_irq(msix_irq); - e1000_intr_msix_rx(msix_irq, netdev); - enable_irq(msix_irq); - - vector++; - msix_irq = adapter->msix_entries[vector].vector; - disable_irq(msix_irq); - e1000_intr_msix_tx(msix_irq, netdev); - enable_irq(msix_irq); - - vector++; - msix_irq = adapter->msix_entries[vector].vector; - disable_irq(msix_irq); - e1000_msix_other(msix_irq, netdev); - enable_irq(msix_irq); - } - - return IRQ_HANDLED; -} - -/* - * Polling 'interrupt' - used by things like netconsole to send skbs - * without having to re-enable interrupts. It's not called while - * the interrupt routine is executing. - */ -static void e1000_netpoll(struct net_device *netdev) -{ - struct e1000_adapter *adapter = netdev_priv(netdev); - - switch (adapter->int_mode) { - case E1000E_INT_MODE_MSIX: - e1000_intr_msix(adapter->pdev->irq, netdev); - break; - case E1000E_INT_MODE_MSI: - disable_irq(adapter->pdev->irq); - e1000_intr_msi(adapter->pdev->irq, netdev); - enable_irq(adapter->pdev->irq); - break; - default: /* E1000E_INT_MODE_LEGACY */ - disable_irq(adapter->pdev->irq); - e1000_intr(adapter->pdev->irq, netdev); - enable_irq(adapter->pdev->irq); - break; - } -} -#endif - -/** - * e1000_io_error_detected - called when PCI error is detected - * @pdev: Pointer to PCI device - * @state: The current pci connection state - * - * This function is called after a PCI bus error affecting - * this device has been detected. - */ -static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct e1000_adapter *adapter = netdev_priv(netdev); - - netif_device_detach(netdev); - - if (state == pci_channel_io_perm_failure) - return PCI_ERS_RESULT_DISCONNECT; - - if (netif_running(netdev)) - e1000e_down(adapter); - pci_disable_device(pdev); - - /* Request a slot slot reset. */ - return PCI_ERS_RESULT_NEED_RESET; -} - -/** - * e1000_io_slot_reset - called after the pci bus has been reset. - * @pdev: Pointer to PCI device - * - * Restart the card from scratch, as if from a cold-boot. Implementation - * resembles the first-half of the e1000_resume routine. - */ -static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u16 aspm_disable_flag = 0; - int err; - pci_ers_result_t result; - - if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) - aspm_disable_flag = PCIE_LINK_STATE_L0S; - if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) - aspm_disable_flag |= PCIE_LINK_STATE_L1; - if (aspm_disable_flag) - e1000e_disable_aspm(pdev, aspm_disable_flag); - - err = pci_enable_device_mem(pdev); - if (err) { - dev_err(&pdev->dev, - "Cannot re-enable PCI device after reset.\n"); - result = PCI_ERS_RESULT_DISCONNECT; - } else { - pci_set_master(pdev); - pdev->state_saved = true; - pci_restore_state(pdev); - - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); - - e1000e_reset(adapter); - ew32(WUS, ~0); - result = PCI_ERS_RESULT_RECOVERED; - } - - pci_cleanup_aer_uncorrect_error_status(pdev); - - return result; -} - -/** - * e1000_io_resume - called when traffic can start flowing again. - * @pdev: Pointer to PCI device - * - * This callback is called when the error recovery driver tells us that - * its OK to resume normal operation. Implementation resembles the - * second-half of the e1000_resume routine. - */ -static void e1000_io_resume(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct e1000_adapter *adapter = netdev_priv(netdev); - - e1000_init_manageability_pt(adapter); - - if (netif_running(netdev)) { - if (e1000e_up(adapter)) { - dev_err(&pdev->dev, - "can't bring device back up after reset\n"); - return; - } - } - - netif_device_attach(netdev); - - /* - * If the controller has AMT, do not set DRV_LOAD until the interface - * is up. For all other cases, let the f/w know that the h/w is now - * under the control of the driver. - */ - if (!(adapter->flags & FLAG_HAS_AMT)) - e1000e_get_hw_control(adapter); - -} - -static void e1000_print_device_info(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - u32 ret_val; - u8 pba_str[E1000_PBANUM_LENGTH]; - - /* print bus type/speed/width info */ - e_info("(PCI Express:2.5GT/s:%s) %pM\n", - /* bus width */ - ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : - "Width x1"), - /* MAC address */ - netdev->dev_addr); - e_info("Intel(R) PRO/%s Network Connection\n", - (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000"); - ret_val = e1000_read_pba_string_generic(hw, pba_str, - E1000_PBANUM_LENGTH); - if (ret_val) - strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1); - e_info("MAC: %d, PHY: %d, PBA No: %s\n", - hw->mac.type, hw->phy.type, pba_str); -} - -static void e1000_eeprom_checks(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - int ret_val; - u16 buf = 0; - - if (hw->mac.type != e1000_82573) - return; - - ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf); - if (!ret_val && (!(le16_to_cpu(buf) & (1 << 0)))) { - /* Deep Smart Power Down (DSPD) */ - dev_warn(&adapter->pdev->dev, - "Warning: detected DSPD enabled in EEPROM\n"); - } -} - -static const struct net_device_ops e1000e_netdev_ops = { - .ndo_open = e1000_open, - .ndo_stop = e1000_close, - .ndo_start_xmit = e1000_xmit_frame, - .ndo_get_stats64 = e1000e_get_stats64, - .ndo_set_multicast_list = e1000_set_multi, - .ndo_set_mac_address = e1000_set_mac, - .ndo_change_mtu = e1000_change_mtu, - .ndo_do_ioctl = e1000_ioctl, - .ndo_tx_timeout = e1000_tx_timeout, - .ndo_validate_addr = eth_validate_addr, - - .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = e1000_netpoll, -#endif -}; - -/** - * e1000_probe - Device Initialization Routine - * @pdev: PCI device information struct - * @ent: entry in e1000_pci_tbl - * - * Returns 0 on success, negative on failure - * - * e1000_probe initializes an adapter identified by a pci_dev structure. - * The OS initialization, configuring of the adapter private structure, - * and a hardware reset occur. - **/ -static int __devinit e1000_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - struct net_device *netdev; - struct e1000_adapter *adapter; - struct e1000_hw *hw; - const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; - resource_size_t mmio_start, mmio_len; - resource_size_t flash_start, flash_len; - - static int cards_found; - u16 aspm_disable_flag = 0; - int i, err, pci_using_dac; - u16 eeprom_data = 0; - u16 eeprom_apme_mask = E1000_EEPROM_APME; - - if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S) - aspm_disable_flag = PCIE_LINK_STATE_L0S; - if (ei->flags2 & FLAG2_DISABLE_ASPM_L1) - aspm_disable_flag |= PCIE_LINK_STATE_L1; - if (aspm_disable_flag) - e1000e_disable_aspm(pdev, aspm_disable_flag); - - err = pci_enable_device_mem(pdev); - if (err) - return err; - - pci_using_dac = 0; - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); - if (!err) { - err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); - if (!err) - pci_using_dac = 1; - } else { - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); - if (err) { - err = dma_set_coherent_mask(&pdev->dev, - DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, "No usable DMA " - "configuration, aborting\n"); - goto err_dma; - } - } - } - - err = pci_request_selected_regions_exclusive(pdev, - pci_select_bars(pdev, IORESOURCE_MEM), - e1000e_driver_name); - if (err) - goto err_pci_reg; - - /* AER (Advanced Error Reporting) hooks */ - pci_enable_pcie_error_reporting(pdev); - - pci_set_master(pdev); - /* PCI config space info */ - err = pci_save_state(pdev); - if (err) - goto err_alloc_etherdev; - - err = -ENOMEM; - netdev = alloc_etherdev(sizeof(struct e1000_adapter)); - if (!netdev) - goto err_alloc_etherdev; - - SET_NETDEV_DEV(netdev, &pdev->dev); - - netdev->irq = pdev->irq; - - pci_set_drvdata(pdev, netdev); - adapter = netdev_priv(netdev); - hw = &adapter->hw; - adapter->netdev = netdev; - adapter->pdev = pdev; - adapter->ei = ei; - adapter->pba = ei->pba; - adapter->flags = ei->flags; - adapter->flags2 = ei->flags2; - adapter->hw.adapter = adapter; - adapter->hw.mac.type = ei->mac; - adapter->max_hw_frame_size = ei->max_hw_frame_size; - adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; - - mmio_start = pci_resource_start(pdev, 0); - mmio_len = pci_resource_len(pdev, 0); - - err = -EIO; - adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); - if (!adapter->hw.hw_addr) - goto err_ioremap; - - if ((adapter->flags & FLAG_HAS_FLASH) && - (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { - flash_start = pci_resource_start(pdev, 1); - flash_len = pci_resource_len(pdev, 1); - adapter->hw.flash_address = ioremap(flash_start, flash_len); - if (!adapter->hw.flash_address) - goto err_flashmap; - } - - /* construct the net_device struct */ - netdev->netdev_ops = &e1000e_netdev_ops; - e1000e_set_ethtool_ops(netdev); - netdev->watchdog_timeo = 5 * HZ; - netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); - strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); - - netdev->mem_start = mmio_start; - netdev->mem_end = mmio_start + mmio_len; - - adapter->bd_number = cards_found++; - - e1000e_check_options(adapter); - - /* setup adapter struct */ - err = e1000_sw_init(adapter); - if (err) - goto err_sw_init; - - memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); - memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); - memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); - - err = ei->get_variants(adapter); - if (err) - goto err_hw_init; - - if ((adapter->flags & FLAG_IS_ICH) && - (adapter->flags & FLAG_READ_ONLY_NVM)) - e1000e_write_protect_nvm_ich8lan(&adapter->hw); - - hw->mac.ops.get_bus_info(&adapter->hw); - - adapter->hw.phy.autoneg_wait_to_complete = 0; - - /* Copper options */ - if (adapter->hw.phy.media_type == e1000_media_type_copper) { - adapter->hw.phy.mdix = AUTO_ALL_MODES; - adapter->hw.phy.disable_polarity_correction = 0; - adapter->hw.phy.ms_type = e1000_ms_hw_default; - } - - if (e1000_check_reset_block(&adapter->hw)) - e_info("PHY reset is blocked due to SOL/IDER session.\n"); - - netdev->features = NETIF_F_SG | - NETIF_F_HW_CSUM | - NETIF_F_HW_VLAN_TX | - NETIF_F_HW_VLAN_RX; - - if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) - netdev->features |= NETIF_F_HW_VLAN_FILTER; - - netdev->features |= NETIF_F_TSO; - netdev->features |= NETIF_F_TSO6; - - netdev->vlan_features |= NETIF_F_TSO; - netdev->vlan_features |= NETIF_F_TSO6; - netdev->vlan_features |= NETIF_F_HW_CSUM; - netdev->vlan_features |= NETIF_F_SG; - - if (pci_using_dac) { - netdev->features |= NETIF_F_HIGHDMA; - netdev->vlan_features |= NETIF_F_HIGHDMA; - } - - if (e1000e_enable_mng_pass_thru(&adapter->hw)) - adapter->flags |= FLAG_MNG_PT_ENABLED; - - /* - * before reading the NVM, reset the controller to - * put the device in a known good starting state - */ - adapter->hw.mac.ops.reset_hw(&adapter->hw); - - /* - * systems with ASPM and others may see the checksum fail on the first - * attempt. Let's give it a few tries - */ - for (i = 0;; i++) { - if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) - break; - if (i == 2) { - e_err("The NVM Checksum Is Not Valid\n"); - err = -EIO; - goto err_eeprom; - } - } - - e1000_eeprom_checks(adapter); - - /* copy the MAC address */ - if (e1000e_read_mac_addr(&adapter->hw)) - e_err("NVM Read Error while reading MAC address\n"); - - memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); - memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); - - if (!is_valid_ether_addr(netdev->perm_addr)) { - e_err("Invalid MAC Address: %pM\n", netdev->perm_addr); - err = -EIO; - goto err_eeprom; - } - - init_timer(&adapter->watchdog_timer); - adapter->watchdog_timer.function = e1000_watchdog; - adapter->watchdog_timer.data = (unsigned long) adapter; - - init_timer(&adapter->phy_info_timer); - adapter->phy_info_timer.function = e1000_update_phy_info; - adapter->phy_info_timer.data = (unsigned long) adapter; - - INIT_WORK(&adapter->reset_task, e1000_reset_task); - INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); - INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); - INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); - INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); - - /* Initialize link parameters. User can change them with ethtool */ - adapter->hw.mac.autoneg = 1; - adapter->fc_autoneg = 1; - adapter->hw.fc.requested_mode = e1000_fc_default; - adapter->hw.fc.current_mode = e1000_fc_default; - adapter->hw.phy.autoneg_advertised = 0x2f; - - /* ring size defaults */ - adapter->rx_ring->count = 256; - adapter->tx_ring->count = 256; - - /* - * Initial Wake on LAN setting - If APM wake is enabled in - * the EEPROM, enable the ACPI Magic Packet filter - */ - if (adapter->flags & FLAG_APME_IN_WUC) { - /* APME bit in EEPROM is mapped to WUC.APME */ - eeprom_data = er32(WUC); - eeprom_apme_mask = E1000_WUC_APME; - if ((hw->mac.type > e1000_ich10lan) && - (eeprom_data & E1000_WUC_PHY_WAKE)) - adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; - } else if (adapter->flags & FLAG_APME_IN_CTRL3) { - if (adapter->flags & FLAG_APME_CHECK_PORT_B && - (adapter->hw.bus.func == 1)) - e1000_read_nvm(&adapter->hw, - NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); - else - e1000_read_nvm(&adapter->hw, - NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); - } - - /* fetch WoL from EEPROM */ - if (eeprom_data & eeprom_apme_mask) - adapter->eeprom_wol |= E1000_WUFC_MAG; - - /* - * now that we have the eeprom settings, apply the special cases - * where the eeprom may be wrong or the board simply won't support - * wake on lan on a particular port - */ - if (!(adapter->flags & FLAG_HAS_WOL)) - adapter->eeprom_wol = 0; - - /* initialize the wol settings based on the eeprom settings */ - adapter->wol = adapter->eeprom_wol; - device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); - - /* save off EEPROM version number */ - e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); - - /* reset the hardware with the new settings */ - e1000e_reset(adapter); - - /* - * If the controller has AMT, do not set DRV_LOAD until the interface - * is up. For all other cases, let the f/w know that the h/w is now - * under the control of the driver. - */ - if (!(adapter->flags & FLAG_HAS_AMT)) - e1000e_get_hw_control(adapter); - - strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1); - err = register_netdev(netdev); - if (err) - goto err_register; - - /* carrier off reporting is important to ethtool even BEFORE open */ - netif_carrier_off(netdev); - - e1000_print_device_info(adapter); - - if (pci_dev_run_wake(pdev)) - pm_runtime_put_noidle(&pdev->dev); - - return 0; - -err_register: - if (!(adapter->flags & FLAG_HAS_AMT)) - e1000e_release_hw_control(adapter); -err_eeprom: - if (!e1000_check_reset_block(&adapter->hw)) - e1000_phy_hw_reset(&adapter->hw); -err_hw_init: - kfree(adapter->tx_ring); - kfree(adapter->rx_ring); -err_sw_init: - if (adapter->hw.flash_address) - iounmap(adapter->hw.flash_address); - e1000e_reset_interrupt_capability(adapter); -err_flashmap: - iounmap(adapter->hw.hw_addr); -err_ioremap: - free_netdev(netdev); -err_alloc_etherdev: - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); -err_pci_reg: -err_dma: - pci_disable_device(pdev); - return err; -} - -/** - * e1000_remove - Device Removal Routine - * @pdev: PCI device information struct - * - * e1000_remove is called by the PCI subsystem to alert the driver - * that it should release a PCI device. The could be caused by a - * Hot-Plug event, or because the driver is going to be removed from - * memory. - **/ -static void __devexit e1000_remove(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct e1000_adapter *adapter = netdev_priv(netdev); - bool down = test_bit(__E1000_DOWN, &adapter->state); - - /* - * The timers may be rescheduled, so explicitly disable them - * from being rescheduled. - */ - if (!down) - set_bit(__E1000_DOWN, &adapter->state); - del_timer_sync(&adapter->watchdog_timer); - del_timer_sync(&adapter->phy_info_timer); - - cancel_work_sync(&adapter->reset_task); - cancel_work_sync(&adapter->watchdog_task); - cancel_work_sync(&adapter->downshift_task); - cancel_work_sync(&adapter->update_phy_task); - cancel_work_sync(&adapter->print_hang_task); - - if (!(netdev->flags & IFF_UP)) - e1000_power_down_phy(adapter); - - /* Don't lie to e1000_close() down the road. */ - if (!down) - clear_bit(__E1000_DOWN, &adapter->state); - unregister_netdev(netdev); - - if (pci_dev_run_wake(pdev)) - pm_runtime_get_noresume(&pdev->dev); - - /* - * Release control of h/w to f/w. If f/w is AMT enabled, this - * would have already happened in close and is redundant. - */ - e1000e_release_hw_control(adapter); - - e1000e_reset_interrupt_capability(adapter); - kfree(adapter->tx_ring); - kfree(adapter->rx_ring); - - iounmap(adapter->hw.hw_addr); - if (adapter->hw.flash_address) - iounmap(adapter->hw.flash_address); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); - - free_netdev(netdev); - - /* AER disable */ - pci_disable_pcie_error_reporting(pdev); - - pci_disable_device(pdev); -} - -/* PCI Error Recovery (ERS) */ -static struct pci_error_handlers e1000_err_handler = { - .error_detected = e1000_io_error_detected, - .slot_reset = e1000_io_slot_reset, - .resume = e1000_io_resume, -}; - -static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 }, - - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 }, - - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 }, - - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 }, - - { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT), - board_80003es2lan }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT), - board_80003es2lan }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT), - board_80003es2lan }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT), - board_80003es2lan }, - - { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan }, - - { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan }, - - { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan }, - - { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan }, - - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan }, - - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan }, - - { } /* terminate list */ -}; -MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); - -#ifdef CONFIG_PM -static const struct dev_pm_ops e1000_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume) - SET_RUNTIME_PM_OPS(e1000_runtime_suspend, - e1000_runtime_resume, e1000_idle) -}; -#endif - -/* PCI Device API Driver */ -static struct pci_driver e1000_driver = { - .name = e1000e_driver_name, - .id_table = e1000_pci_tbl, - .probe = e1000_probe, - .remove = __devexit_p(e1000_remove), -#ifdef CONFIG_PM - .driver.pm = &e1000_pm_ops, -#endif - .shutdown = e1000_shutdown, - .err_handler = &e1000_err_handler -}; - -/** - * e1000_init_module - Driver Registration Routine - * - * e1000_init_module is the first routine called when the driver is - * loaded. All it does is register with the PCI subsystem. - **/ -static int __init e1000_init_module(void) -{ - int ret; - pr_info("Intel(R) PRO/1000 Network Driver - %s\n", - e1000e_driver_version); - pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n"); - ret = pci_register_driver(&e1000_driver); - - return ret; -} -module_init(e1000_init_module); - -/** - * e1000_exit_module - Driver Exit Cleanup Routine - * - * e1000_exit_module is called just before the driver is removed - * from memory. - **/ -static void __exit e1000_exit_module(void) -{ - pci_unregister_driver(&e1000_driver); -} -module_exit(e1000_exit_module); - - -MODULE_AUTHOR("Intel Corporation, "); -MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_VERSION); - -/* e1000_main.c */ diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c deleted file mode 100644 index 4dd9b63..0000000 --- a/drivers/net/e1000e/param.c +++ /dev/null @@ -1,478 +0,0 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include -#include - -#include "e1000.h" - -/* - * This is the only thing that needs to be changed to adjust the - * maximum number of ports that the driver can manage. - */ - -#define E1000_MAX_NIC 32 - -#define OPTION_UNSET -1 -#define OPTION_DISABLED 0 -#define OPTION_ENABLED 1 - -#define COPYBREAK_DEFAULT 256 -unsigned int copybreak = COPYBREAK_DEFAULT; -module_param(copybreak, uint, 0644); -MODULE_PARM_DESC(copybreak, - "Maximum size of packet that is copied to a new buffer on receive"); - -/* - * All parameters are treated the same, as an integer array of values. - * This macro just reduces the need to repeat the same declaration code - * over and over (plus this helps to avoid typo bugs). - */ - -#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } -#define E1000_PARAM(X, desc) \ - static int __devinitdata X[E1000_MAX_NIC+1] \ - = E1000_PARAM_INIT; \ - static unsigned int num_##X; \ - module_param_array_named(X, X, int, &num_##X, 0); \ - MODULE_PARM_DESC(X, desc); - -/* - * Transmit Interrupt Delay in units of 1.024 microseconds - * Tx interrupt delay needs to typically be set to something non-zero - * - * Valid Range: 0-65535 - */ -E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay"); -#define DEFAULT_TIDV 8 -#define MAX_TXDELAY 0xFFFF -#define MIN_TXDELAY 0 - -/* - * Transmit Absolute Interrupt Delay in units of 1.024 microseconds - * - * Valid Range: 0-65535 - */ -E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay"); -#define DEFAULT_TADV 32 -#define MAX_TXABSDELAY 0xFFFF -#define MIN_TXABSDELAY 0 - -/* - * Receive Interrupt Delay in units of 1.024 microseconds - * hardware will likely hang if you set this to anything but zero. - * - * Valid Range: 0-65535 - */ -E1000_PARAM(RxIntDelay, "Receive Interrupt Delay"); -#define MAX_RXDELAY 0xFFFF -#define MIN_RXDELAY 0 - -/* - * Receive Absolute Interrupt Delay in units of 1.024 microseconds - * - * Valid Range: 0-65535 - */ -E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); -#define MAX_RXABSDELAY 0xFFFF -#define MIN_RXABSDELAY 0 - -/* - * Interrupt Throttle Rate (interrupts/sec) - * - * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) - */ -E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); -#define DEFAULT_ITR 3 -#define MAX_ITR 100000 -#define MIN_ITR 100 - -/* IntMode (Interrupt Mode) - * - * Valid Range: 0 - 2 - * - * Default Value: 2 (MSI-X) - */ -E1000_PARAM(IntMode, "Interrupt Mode"); -#define MAX_INTMODE 2 -#define MIN_INTMODE 0 - -/* - * Enable Smart Power Down of the PHY - * - * Valid Range: 0, 1 - * - * Default Value: 0 (disabled) - */ -E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); - -/* - * Enable Kumeran Lock Loss workaround - * - * Valid Range: 0, 1 - * - * Default Value: 1 (enabled) - */ -E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround"); - -/* - * Write Protect NVM - * - * Valid Range: 0, 1 - * - * Default Value: 1 (enabled) - */ -E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]"); - -/* - * Enable CRC Stripping - * - * Valid Range: 0, 1 - * - * Default Value: 1 (enabled) - */ -E1000_PARAM(CrcStripping, "Enable CRC Stripping, disable if your BMC needs " \ - "the CRC"); - -struct e1000_option { - enum { enable_option, range_option, list_option } type; - const char *name; - const char *err; - int def; - union { - struct { /* range_option info */ - int min; - int max; - } r; - struct { /* list_option info */ - int nr; - struct e1000_opt_list { int i; char *str; } *p; - } l; - } arg; -}; - -static int __devinit e1000_validate_option(unsigned int *value, - const struct e1000_option *opt, - struct e1000_adapter *adapter) -{ - if (*value == OPTION_UNSET) { - *value = opt->def; - return 0; - } - - switch (opt->type) { - case enable_option: - switch (*value) { - case OPTION_ENABLED: - e_info("%s Enabled\n", opt->name); - return 0; - case OPTION_DISABLED: - e_info("%s Disabled\n", opt->name); - return 0; - } - break; - case range_option: - if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { - e_info("%s set to %i\n", opt->name, *value); - return 0; - } - break; - case list_option: { - int i; - struct e1000_opt_list *ent; - - for (i = 0; i < opt->arg.l.nr; i++) { - ent = &opt->arg.l.p[i]; - if (*value == ent->i) { - if (ent->str[0] != '\0') - e_info("%s\n", ent->str); - return 0; - } - } - } - break; - default: - BUG(); - } - - e_info("Invalid %s value specified (%i) %s\n", opt->name, *value, - opt->err); - *value = opt->def; - return -1; -} - -/** - * e1000e_check_options - Range Checking for Command Line Parameters - * @adapter: board private structure - * - * This routine checks all command line parameters for valid user - * input. If an invalid value is given, or if no user specified - * value exists, a default value is used. The final value is stored - * in a variable in the adapter structure. - **/ -void __devinit e1000e_check_options(struct e1000_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - int bd = adapter->bd_number; - - if (bd >= E1000_MAX_NIC) { - e_notice("Warning: no configuration for board #%i\n", bd); - e_notice("Using defaults for all values\n"); - } - - { /* Transmit Interrupt Delay */ - static const struct e1000_option opt = { - .type = range_option, - .name = "Transmit Interrupt Delay", - .err = "using default of " - __MODULE_STRING(DEFAULT_TIDV), - .def = DEFAULT_TIDV, - .arg = { .r = { .min = MIN_TXDELAY, - .max = MAX_TXDELAY } } - }; - - if (num_TxIntDelay > bd) { - adapter->tx_int_delay = TxIntDelay[bd]; - e1000_validate_option(&adapter->tx_int_delay, &opt, - adapter); - } else { - adapter->tx_int_delay = opt.def; - } - } - { /* Transmit Absolute Interrupt Delay */ - static const struct e1000_option opt = { - .type = range_option, - .name = "Transmit Absolute Interrupt Delay", - .err = "using default of " - __MODULE_STRING(DEFAULT_TADV), - .def = DEFAULT_TADV, - .arg = { .r = { .min = MIN_TXABSDELAY, - .max = MAX_TXABSDELAY } } - }; - - if (num_TxAbsIntDelay > bd) { - adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; - e1000_validate_option(&adapter->tx_abs_int_delay, &opt, - adapter); - } else { - adapter->tx_abs_int_delay = opt.def; - } - } - { /* Receive Interrupt Delay */ - static struct e1000_option opt = { - .type = range_option, - .name = "Receive Interrupt Delay", - .err = "using default of " - __MODULE_STRING(DEFAULT_RDTR), - .def = DEFAULT_RDTR, - .arg = { .r = { .min = MIN_RXDELAY, - .max = MAX_RXDELAY } } - }; - - if (num_RxIntDelay > bd) { - adapter->rx_int_delay = RxIntDelay[bd]; - e1000_validate_option(&adapter->rx_int_delay, &opt, - adapter); - } else { - adapter->rx_int_delay = opt.def; - } - } - { /* Receive Absolute Interrupt Delay */ - static const struct e1000_option opt = { - .type = range_option, - .name = "Receive Absolute Interrupt Delay", - .err = "using default of " - __MODULE_STRING(DEFAULT_RADV), - .def = DEFAULT_RADV, - .arg = { .r = { .min = MIN_RXABSDELAY, - .max = MAX_RXABSDELAY } } - }; - - if (num_RxAbsIntDelay > bd) { - adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; - e1000_validate_option(&adapter->rx_abs_int_delay, &opt, - adapter); - } else { - adapter->rx_abs_int_delay = opt.def; - } - } - { /* Interrupt Throttling Rate */ - static const struct e1000_option opt = { - .type = range_option, - .name = "Interrupt Throttling Rate (ints/sec)", - .err = "using default of " - __MODULE_STRING(DEFAULT_ITR), - .def = DEFAULT_ITR, - .arg = { .r = { .min = MIN_ITR, - .max = MAX_ITR } } - }; - - if (num_InterruptThrottleRate > bd) { - adapter->itr = InterruptThrottleRate[bd]; - switch (adapter->itr) { - case 0: - e_info("%s turned off\n", opt.name); - break; - case 1: - e_info("%s set to dynamic mode\n", opt.name); - adapter->itr_setting = adapter->itr; - adapter->itr = 20000; - break; - case 3: - e_info("%s set to dynamic conservative mode\n", - opt.name); - adapter->itr_setting = adapter->itr; - adapter->itr = 20000; - break; - case 4: - e_info("%s set to simplified (2000-8000 ints) " - "mode\n", opt.name); - adapter->itr_setting = 4; - break; - default: - /* - * Save the setting, because the dynamic bits - * change itr. - */ - if (e1000_validate_option(&adapter->itr, &opt, - adapter) && - (adapter->itr == 3)) { - /* - * In case of invalid user value, - * default to conservative mode. - */ - adapter->itr_setting = adapter->itr; - adapter->itr = 20000; - } else { - /* - * Clear the lower two bits because - * they are used as control. - */ - adapter->itr_setting = - adapter->itr & ~3; - } - break; - } - } else { - adapter->itr_setting = opt.def; - adapter->itr = 20000; - } - } - { /* Interrupt Mode */ - static struct e1000_option opt = { - .type = range_option, - .name = "Interrupt Mode", - .err = "defaulting to 2 (MSI-X)", - .def = E1000E_INT_MODE_MSIX, - .arg = { .r = { .min = MIN_INTMODE, - .max = MAX_INTMODE } } - }; - - if (num_IntMode > bd) { - unsigned int int_mode = IntMode[bd]; - e1000_validate_option(&int_mode, &opt, adapter); - adapter->int_mode = int_mode; - } else { - adapter->int_mode = opt.def; - } - } - { /* Smart Power Down */ - static const struct e1000_option opt = { - .type = enable_option, - .name = "PHY Smart Power Down", - .err = "defaulting to Disabled", - .def = OPTION_DISABLED - }; - - if (num_SmartPowerDownEnable > bd) { - unsigned int spd = SmartPowerDownEnable[bd]; - e1000_validate_option(&spd, &opt, adapter); - if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) - && spd) - adapter->flags |= FLAG_SMART_POWER_DOWN; - } - } - { /* CRC Stripping */ - static const struct e1000_option opt = { - .type = enable_option, - .name = "CRC Stripping", - .err = "defaulting to Enabled", - .def = OPTION_ENABLED - }; - - if (num_CrcStripping > bd) { - unsigned int crc_stripping = CrcStripping[bd]; - e1000_validate_option(&crc_stripping, &opt, adapter); - if (crc_stripping == OPTION_ENABLED) - adapter->flags2 |= FLAG2_CRC_STRIPPING; - } else { - adapter->flags2 |= FLAG2_CRC_STRIPPING; - } - } - { /* Kumeran Lock Loss Workaround */ - static const struct e1000_option opt = { - .type = enable_option, - .name = "Kumeran Lock Loss Workaround", - .err = "defaulting to Enabled", - .def = OPTION_ENABLED - }; - - if (num_KumeranLockLoss > bd) { - unsigned int kmrn_lock_loss = KumeranLockLoss[bd]; - e1000_validate_option(&kmrn_lock_loss, &opt, adapter); - if (hw->mac.type == e1000_ich8lan) - e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, - kmrn_lock_loss); - } else { - if (hw->mac.type == e1000_ich8lan) - e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, - opt.def); - } - } - { /* Write-protect NVM */ - static const struct e1000_option opt = { - .type = enable_option, - .name = "Write-protect NVM", - .err = "defaulting to Enabled", - .def = OPTION_ENABLED - }; - - if (adapter->flags & FLAG_IS_ICH) { - if (num_WriteProtectNVM > bd) { - unsigned int write_protect_nvm = WriteProtectNVM[bd]; - e1000_validate_option(&write_protect_nvm, &opt, - adapter); - if (write_protect_nvm) - adapter->flags |= FLAG_READ_ONLY_NVM; - } else { - if (opt.def) - adapter->flags |= FLAG_READ_ONLY_NVM; - } - } - } -} diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c deleted file mode 100644 index 8666476..0000000 --- a/drivers/net/e1000e/phy.c +++ /dev/null @@ -1,3377 +0,0 @@ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include - -#include "e1000.h" - -static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); -static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); -static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); -static s32 e1000_wait_autoneg(struct e1000_hw *hw); -static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg); -static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, - u16 *data, bool read, bool page_set); -static u32 e1000_get_phy_addr_for_hv_page(u32 page); -static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, - u16 *data, bool read); - -/* Cable length tables */ -static const u16 e1000_m88_cable_length_table[] = { - 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; -#define M88E1000_CABLE_LENGTH_TABLE_SIZE \ - ARRAY_SIZE(e1000_m88_cable_length_table) - -static const u16 e1000_igp_2_cable_length_table[] = { - 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3, - 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22, - 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40, - 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61, - 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82, - 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, - 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, - 124}; -#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ - ARRAY_SIZE(e1000_igp_2_cable_length_table) - -#define BM_PHY_REG_PAGE(offset) \ - ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF)) -#define BM_PHY_REG_NUM(offset) \ - ((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\ - (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\ - ~MAX_PHY_REG_ADDRESS))) - -#define HV_INTC_FC_PAGE_START 768 -#define I82578_ADDR_REG 29 -#define I82577_ADDR_REG 16 -#define I82577_CFG_REG 22 -#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) -#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */ -#define I82577_CTRL_REG 23 - -/* 82577 specific PHY registers */ -#define I82577_PHY_CTRL_2 18 -#define I82577_PHY_STATUS_2 26 -#define I82577_PHY_DIAG_STATUS 31 - -/* I82577 PHY Status 2 */ -#define I82577_PHY_STATUS2_REV_POLARITY 0x0400 -#define I82577_PHY_STATUS2_MDIX 0x0800 -#define I82577_PHY_STATUS2_SPEED_MASK 0x0300 -#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200 - -/* I82577 PHY Control 2 */ -#define I82577_PHY_CTRL2_AUTO_MDIX 0x0400 -#define I82577_PHY_CTRL2_FORCE_MDI_MDIX 0x0200 - -/* I82577 PHY Diagnostics Status */ -#define I82577_DSTATUS_CABLE_LENGTH 0x03FC -#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2 - -/* BM PHY Copper Specific Control 1 */ -#define BM_CS_CTRL1 16 - -#define HV_MUX_DATA_CTRL PHY_REG(776, 16) -#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400 -#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004 - -/** - * e1000e_check_reset_block_generic - Check if PHY reset is blocked - * @hw: pointer to the HW structure - * - * Read the PHY management control register and check whether a PHY reset - * is blocked. If a reset is not blocked return 0, otherwise - * return E1000_BLK_PHY_RESET (12). - **/ -s32 e1000e_check_reset_block_generic(struct e1000_hw *hw) -{ - u32 manc; - - manc = er32(MANC); - - return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? - E1000_BLK_PHY_RESET : 0; -} - -/** - * e1000e_get_phy_id - Retrieve the PHY ID and revision - * @hw: pointer to the HW structure - * - * Reads the PHY registers and stores the PHY ID and possibly the PHY - * revision in the hardware structure. - **/ -s32 e1000e_get_phy_id(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val = 0; - u16 phy_id; - u16 retry_count = 0; - - if (!(phy->ops.read_reg)) - goto out; - - while (retry_count < 2) { - ret_val = e1e_rphy(hw, PHY_ID1, &phy_id); - if (ret_val) - goto out; - - phy->id = (u32)(phy_id << 16); - udelay(20); - ret_val = e1e_rphy(hw, PHY_ID2, &phy_id); - if (ret_val) - goto out; - - phy->id |= (u32)(phy_id & PHY_REVISION_MASK); - phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); - - if (phy->id != 0 && phy->id != PHY_REVISION_MASK) - goto out; - - retry_count++; - } -out: - return ret_val; -} - -/** - * e1000e_phy_reset_dsp - Reset PHY DSP - * @hw: pointer to the HW structure - * - * Reset the digital signal processor. - **/ -s32 e1000e_phy_reset_dsp(struct e1000_hw *hw) -{ - s32 ret_val; - - ret_val = e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); - if (ret_val) - return ret_val; - - return e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0); -} - -/** - * e1000e_read_phy_reg_mdic - Read MDI control register - * @hw: pointer to the HW structure - * @offset: register offset to be read - * @data: pointer to the read data - * - * Reads the MDI control register in the PHY at offset and stores the - * information read to data. - **/ -s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) -{ - struct e1000_phy_info *phy = &hw->phy; - u32 i, mdic = 0; - - if (offset > MAX_PHY_REG_ADDRESS) { - e_dbg("PHY Address %d is out of range\n", offset); - return -E1000_ERR_PARAM; - } - - /* - * Set up Op-code, Phy Address, and register offset in the MDI - * Control register. The MAC will take care of interfacing with the - * PHY to retrieve the desired data. - */ - mdic = ((offset << E1000_MDIC_REG_SHIFT) | - (phy->addr << E1000_MDIC_PHY_SHIFT) | - (E1000_MDIC_OP_READ)); - - ew32(MDIC, mdic); - - /* - * Poll the ready bit to see if the MDI read completed - * Increasing the time out as testing showed failures with - * the lower time out - */ - for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { - udelay(50); - mdic = er32(MDIC); - if (mdic & E1000_MDIC_READY) - break; - } - if (!(mdic & E1000_MDIC_READY)) { - e_dbg("MDI Read did not complete\n"); - return -E1000_ERR_PHY; - } - if (mdic & E1000_MDIC_ERROR) { - e_dbg("MDI Error\n"); - return -E1000_ERR_PHY; - } - *data = (u16) mdic; - - /* - * Allow some time after each MDIC transaction to avoid - * reading duplicate data in the next MDIC transaction. - */ - if (hw->mac.type == e1000_pch2lan) - udelay(100); - - return 0; -} - -/** - * e1000e_write_phy_reg_mdic - Write MDI control register - * @hw: pointer to the HW structure - * @offset: register offset to write to - * @data: data to write to register at offset - * - * Writes data to MDI control register in the PHY at offset. - **/ -s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) -{ - struct e1000_phy_info *phy = &hw->phy; - u32 i, mdic = 0; - - if (offset > MAX_PHY_REG_ADDRESS) { - e_dbg("PHY Address %d is out of range\n", offset); - return -E1000_ERR_PARAM; - } - - /* - * Set up Op-code, Phy Address, and register offset in the MDI - * Control register. The MAC will take care of interfacing with the - * PHY to retrieve the desired data. - */ - mdic = (((u32)data) | - (offset << E1000_MDIC_REG_SHIFT) | - (phy->addr << E1000_MDIC_PHY_SHIFT) | - (E1000_MDIC_OP_WRITE)); - - ew32(MDIC, mdic); - - /* - * Poll the ready bit to see if the MDI read completed - * Increasing the time out as testing showed failures with - * the lower time out - */ - for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { - udelay(50); - mdic = er32(MDIC); - if (mdic & E1000_MDIC_READY) - break; - } - if (!(mdic & E1000_MDIC_READY)) { - e_dbg("MDI Write did not complete\n"); - return -E1000_ERR_PHY; - } - if (mdic & E1000_MDIC_ERROR) { - e_dbg("MDI Error\n"); - return -E1000_ERR_PHY; - } - - /* - * Allow some time after each MDIC transaction to avoid - * reading duplicate data in the next MDIC transaction. - */ - if (hw->mac.type == e1000_pch2lan) - udelay(100); - - return 0; -} - -/** - * e1000e_read_phy_reg_m88 - Read m88 PHY register - * @hw: pointer to the HW structure - * @offset: register offset to be read - * @data: pointer to the read data - * - * Acquires semaphore, if necessary, then reads the PHY register at offset - * and storing the retrieved information in data. Release any acquired - * semaphores before exiting. - **/ -s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data) -{ - s32 ret_val; - - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return ret_val; - - ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, - data); - - hw->phy.ops.release(hw); - - return ret_val; -} - -/** - * e1000e_write_phy_reg_m88 - Write m88 PHY register - * @hw: pointer to the HW structure - * @offset: register offset to write to - * @data: data to write at register offset - * - * Acquires semaphore, if necessary, then writes the data to PHY register - * at the offset. Release any acquired semaphores before exiting. - **/ -s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data) -{ - s32 ret_val; - - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return ret_val; - - ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, - data); - - hw->phy.ops.release(hw); - - return ret_val; -} - -/** - * e1000_set_page_igp - Set page as on IGP-like PHY(s) - * @hw: pointer to the HW structure - * @page: page to set (shifted left when necessary) - * - * Sets PHY page required for PHY register access. Assumes semaphore is - * already acquired. Note, this function sets phy.addr to 1 so the caller - * must set it appropriately (if necessary) after this function returns. - **/ -s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page) -{ - e_dbg("Setting page 0x%x\n", page); - - hw->phy.addr = 1; - - return e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page); -} - -/** - * __e1000e_read_phy_reg_igp - Read igp PHY register - * @hw: pointer to the HW structure - * @offset: register offset to be read - * @data: pointer to the read data - * @locked: semaphore has already been acquired or not - * - * Acquires semaphore, if necessary, then reads the PHY register at offset - * and stores the retrieved information in data. Release any acquired - * semaphores before exiting. - **/ -static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, - bool locked) -{ - s32 ret_val = 0; - - if (!locked) { - if (!(hw->phy.ops.acquire)) - goto out; - - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - goto out; - } - - if (offset > MAX_PHY_MULTI_PAGE_REG) { - ret_val = e1000e_write_phy_reg_mdic(hw, - IGP01E1000_PHY_PAGE_SELECT, - (u16)offset); - if (ret_val) - goto release; - } - - ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, - data); - -release: - if (!locked) - hw->phy.ops.release(hw); -out: - return ret_val; -} - -/** - * e1000e_read_phy_reg_igp - Read igp PHY register - * @hw: pointer to the HW structure - * @offset: register offset to be read - * @data: pointer to the read data - * - * Acquires semaphore then reads the PHY register at offset and stores the - * retrieved information in data. - * Release the acquired semaphore before exiting. - **/ -s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) -{ - return __e1000e_read_phy_reg_igp(hw, offset, data, false); -} - -/** - * e1000e_read_phy_reg_igp_locked - Read igp PHY register - * @hw: pointer to the HW structure - * @offset: register offset to be read - * @data: pointer to the read data - * - * Reads the PHY register at offset and stores the retrieved information - * in data. Assumes semaphore already acquired. - **/ -s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data) -{ - return __e1000e_read_phy_reg_igp(hw, offset, data, true); -} - -/** - * e1000e_write_phy_reg_igp - Write igp PHY register - * @hw: pointer to the HW structure - * @offset: register offset to write to - * @data: data to write at register offset - * @locked: semaphore has already been acquired or not - * - * Acquires semaphore, if necessary, then writes the data to PHY register - * at the offset. Release any acquired semaphores before exiting. - **/ -static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, - bool locked) -{ - s32 ret_val = 0; - - if (!locked) { - if (!(hw->phy.ops.acquire)) - goto out; - - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - goto out; - } - - if (offset > MAX_PHY_MULTI_PAGE_REG) { - ret_val = e1000e_write_phy_reg_mdic(hw, - IGP01E1000_PHY_PAGE_SELECT, - (u16)offset); - if (ret_val) - goto release; - } - - ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, - data); - -release: - if (!locked) - hw->phy.ops.release(hw); - -out: - return ret_val; -} - -/** - * e1000e_write_phy_reg_igp - Write igp PHY register - * @hw: pointer to the HW structure - * @offset: register offset to write to - * @data: data to write at register offset - * - * Acquires semaphore then writes the data to PHY register - * at the offset. Release any acquired semaphores before exiting. - **/ -s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) -{ - return __e1000e_write_phy_reg_igp(hw, offset, data, false); -} - -/** - * e1000e_write_phy_reg_igp_locked - Write igp PHY register - * @hw: pointer to the HW structure - * @offset: register offset to write to - * @data: data to write at register offset - * - * Writes the data to PHY register at the offset. - * Assumes semaphore already acquired. - **/ -s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data) -{ - return __e1000e_write_phy_reg_igp(hw, offset, data, true); -} - -/** - * __e1000_read_kmrn_reg - Read kumeran register - * @hw: pointer to the HW structure - * @offset: register offset to be read - * @data: pointer to the read data - * @locked: semaphore has already been acquired or not - * - * Acquires semaphore, if necessary. Then reads the PHY register at offset - * using the kumeran interface. The information retrieved is stored in data. - * Release any acquired semaphores before exiting. - **/ -static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, - bool locked) -{ - u32 kmrnctrlsta; - s32 ret_val = 0; - - if (!locked) { - if (!(hw->phy.ops.acquire)) - goto out; - - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - goto out; - } - - kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & - E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; - ew32(KMRNCTRLSTA, kmrnctrlsta); - e1e_flush(); - - udelay(2); - - kmrnctrlsta = er32(KMRNCTRLSTA); - *data = (u16)kmrnctrlsta; - - if (!locked) - hw->phy.ops.release(hw); - -out: - return ret_val; -} - -/** - * e1000e_read_kmrn_reg - Read kumeran register - * @hw: pointer to the HW structure - * @offset: register offset to be read - * @data: pointer to the read data - * - * Acquires semaphore then reads the PHY register at offset using the - * kumeran interface. The information retrieved is stored in data. - * Release the acquired semaphore before exiting. - **/ -s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) -{ - return __e1000_read_kmrn_reg(hw, offset, data, false); -} - -/** - * e1000e_read_kmrn_reg_locked - Read kumeran register - * @hw: pointer to the HW structure - * @offset: register offset to be read - * @data: pointer to the read data - * - * Reads the PHY register at offset using the kumeran interface. The - * information retrieved is stored in data. - * Assumes semaphore already acquired. - **/ -s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data) -{ - return __e1000_read_kmrn_reg(hw, offset, data, true); -} - -/** - * __e1000_write_kmrn_reg - Write kumeran register - * @hw: pointer to the HW structure - * @offset: register offset to write to - * @data: data to write at register offset - * @locked: semaphore has already been acquired or not - * - * Acquires semaphore, if necessary. Then write the data to PHY register - * at the offset using the kumeran interface. Release any acquired semaphores - * before exiting. - **/ -static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, - bool locked) -{ - u32 kmrnctrlsta; - s32 ret_val = 0; - - if (!locked) { - if (!(hw->phy.ops.acquire)) - goto out; - - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - goto out; - } - - kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & - E1000_KMRNCTRLSTA_OFFSET) | data; - ew32(KMRNCTRLSTA, kmrnctrlsta); - e1e_flush(); - - udelay(2); - - if (!locked) - hw->phy.ops.release(hw); - -out: - return ret_val; -} - -/** - * e1000e_write_kmrn_reg - Write kumeran register - * @hw: pointer to the HW structure - * @offset: register offset to write to - * @data: data to write at register offset - * - * Acquires semaphore then writes the data to the PHY register at the offset - * using the kumeran interface. Release the acquired semaphore before exiting. - **/ -s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) -{ - return __e1000_write_kmrn_reg(hw, offset, data, false); -} - -/** - * e1000e_write_kmrn_reg_locked - Write kumeran register - * @hw: pointer to the HW structure - * @offset: register offset to write to - * @data: data to write at register offset - * - * Write the data to PHY register at the offset using the kumeran interface. - * Assumes semaphore already acquired. - **/ -s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data) -{ - return __e1000_write_kmrn_reg(hw, offset, data, true); -} - -/** - * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link - * @hw: pointer to the HW structure - * - * Sets up Carrier-sense on Transmit and downshift values. - **/ -s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) -{ - s32 ret_val; - u16 phy_data; - - /* Enable CRS on Tx. This must be set for half-duplex operation. */ - ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data); - if (ret_val) - goto out; - - phy_data |= I82577_CFG_ASSERT_CRS_ON_TX; - - /* Enable downshift */ - phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; - - ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data); - -out: - return ret_val; -} - -/** - * e1000e_copper_link_setup_m88 - Setup m88 PHY's for copper link - * @hw: pointer to the HW structure - * - * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock - * and downshift values are set also. - **/ -s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_data; - - /* Enable CRS on Tx. This must be set for half-duplex operation. */ - ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); - if (ret_val) - return ret_val; - - /* For BM PHY this bit is downshift enable */ - if (phy->type != e1000_phy_bm) - phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; - - /* - * Options: - * MDI/MDI-X = 0 (default) - * 0 - Auto for all speeds - * 1 - MDI mode - * 2 - MDI-X mode - * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) - */ - phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; - - switch (phy->mdix) { - case 1: - phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; - break; - case 2: - phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; - break; - case 3: - phy_data |= M88E1000_PSCR_AUTO_X_1000T; - break; - case 0: - default: - phy_data |= M88E1000_PSCR_AUTO_X_MODE; - break; - } - - /* - * Options: - * disable_polarity_correction = 0 (default) - * Automatic Correction for Reversed Cable Polarity - * 0 - Disabled - * 1 - Enabled - */ - phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; - if (phy->disable_polarity_correction == 1) - phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; - - /* Enable downshift on BM (disabled by default) */ - if (phy->type == e1000_phy_bm) - phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT; - - ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); - if (ret_val) - return ret_val; - - if ((phy->type == e1000_phy_m88) && - (phy->revision < E1000_REVISION_4) && - (phy->id != BME1000_E_PHY_ID_R2)) { - /* - * Force TX_CLK in the Extended PHY Specific Control Register - * to 25MHz clock. - */ - ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); - if (ret_val) - return ret_val; - - phy_data |= M88E1000_EPSCR_TX_CLK_25; - - if ((phy->revision == 2) && - (phy->id == M88E1111_I_PHY_ID)) { - /* 82573L PHY - set the downshift counter to 5x. */ - phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; - phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; - } else { - /* Configure Master and Slave downshift values */ - phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | - M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); - phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | - M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); - } - ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); - if (ret_val) - return ret_val; - } - - if ((phy->type == e1000_phy_bm) && (phy->id == BME1000_E_PHY_ID_R2)) { - /* Set PHY page 0, register 29 to 0x0003 */ - ret_val = e1e_wphy(hw, 29, 0x0003); - if (ret_val) - return ret_val; - - /* Set PHY page 0, register 30 to 0x0000 */ - ret_val = e1e_wphy(hw, 30, 0x0000); - if (ret_val) - return ret_val; - } - - /* Commit the changes. */ - ret_val = e1000e_commit_phy(hw); - if (ret_val) { - e_dbg("Error committing the PHY changes\n"); - return ret_val; - } - - if (phy->type == e1000_phy_82578) { - ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); - if (ret_val) - return ret_val; - - /* 82578 PHY - set the downshift count to 1x. */ - phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE; - phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK; - ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); - if (ret_val) - return ret_val; - } - - return 0; -} - -/** - * e1000e_copper_link_setup_igp - Setup igp PHY's for copper link - * @hw: pointer to the HW structure - * - * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for - * igp PHY's. - **/ -s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 data; - - ret_val = e1000_phy_hw_reset(hw); - if (ret_val) { - e_dbg("Error resetting the PHY.\n"); - return ret_val; - } - - /* - * Wait 100ms for MAC to configure PHY from NVM settings, to avoid - * timeout issues when LFS is enabled. - */ - msleep(100); - - /* disable lplu d0 during driver init */ - ret_val = e1000_set_d0_lplu_state(hw, false); - if (ret_val) { - e_dbg("Error Disabling LPLU D0\n"); - return ret_val; - } - /* Configure mdi-mdix settings */ - ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &data); - if (ret_val) - return ret_val; - - data &= ~IGP01E1000_PSCR_AUTO_MDIX; - - switch (phy->mdix) { - case 1: - data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; - break; - case 2: - data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; - break; - case 0: - default: - data |= IGP01E1000_PSCR_AUTO_MDIX; - break; - } - ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, data); - if (ret_val) - return ret_val; - - /* set auto-master slave resolution settings */ - if (hw->mac.autoneg) { - /* - * when autonegotiation advertisement is only 1000Mbps then we - * should disable SmartSpeed and enable Auto MasterSlave - * resolution as hardware default. - */ - if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { - /* Disable SmartSpeed */ - ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, - &data); - if (ret_val) - return ret_val; - - data &= ~IGP01E1000_PSCFR_SMART_SPEED; - ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, - data); - if (ret_val) - return ret_val; - - /* Set auto Master/Slave resolution process */ - ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data); - if (ret_val) - return ret_val; - - data &= ~CR_1000T_MS_ENABLE; - ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data); - if (ret_val) - return ret_val; - } - - ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data); - if (ret_val) - return ret_val; - - /* load defaults for future use */ - phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ? - ((data & CR_1000T_MS_VALUE) ? - e1000_ms_force_master : - e1000_ms_force_slave) : - e1000_ms_auto; - - switch (phy->ms_type) { - case e1000_ms_force_master: - data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); - break; - case e1000_ms_force_slave: - data |= CR_1000T_MS_ENABLE; - data &= ~(CR_1000T_MS_VALUE); - break; - case e1000_ms_auto: - data &= ~CR_1000T_MS_ENABLE; - default: - break; - } - ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data); - } - - return ret_val; -} - -/** - * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation - * @hw: pointer to the HW structure - * - * Reads the MII auto-neg advertisement register and/or the 1000T control - * register and if the PHY is already setup for auto-negotiation, then - * return successful. Otherwise, setup advertisement and flow control to - * the appropriate values for the wanted auto-negotiation. - **/ -static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 mii_autoneg_adv_reg; - u16 mii_1000t_ctrl_reg = 0; - - phy->autoneg_advertised &= phy->autoneg_mask; - - /* Read the MII Auto-Neg Advertisement Register (Address 4). */ - ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); - if (ret_val) - return ret_val; - - if (phy->autoneg_mask & ADVERTISE_1000_FULL) { - /* Read the MII 1000Base-T Control Register (Address 9). */ - ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); - if (ret_val) - return ret_val; - } - - /* - * Need to parse both autoneg_advertised and fc and set up - * the appropriate PHY registers. First we will parse for - * autoneg_advertised software override. Since we can advertise - * a plethora of combinations, we need to check each bit - * individually. - */ - - /* - * First we clear all the 10/100 mb speed bits in the Auto-Neg - * Advertisement Register (Address 4) and the 1000 mb speed bits in - * the 1000Base-T Control Register (Address 9). - */ - mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | - NWAY_AR_100TX_HD_CAPS | - NWAY_AR_10T_FD_CAPS | - NWAY_AR_10T_HD_CAPS); - mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); - - e_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); - - /* Do we want to advertise 10 Mb Half Duplex? */ - if (phy->autoneg_advertised & ADVERTISE_10_HALF) { - e_dbg("Advertise 10mb Half duplex\n"); - mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; - } - - /* Do we want to advertise 10 Mb Full Duplex? */ - if (phy->autoneg_advertised & ADVERTISE_10_FULL) { - e_dbg("Advertise 10mb Full duplex\n"); - mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; - } - - /* Do we want to advertise 100 Mb Half Duplex? */ - if (phy->autoneg_advertised & ADVERTISE_100_HALF) { - e_dbg("Advertise 100mb Half duplex\n"); - mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; - } - - /* Do we want to advertise 100 Mb Full Duplex? */ - if (phy->autoneg_advertised & ADVERTISE_100_FULL) { - e_dbg("Advertise 100mb Full duplex\n"); - mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; - } - - /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ - if (phy->autoneg_advertised & ADVERTISE_1000_HALF) - e_dbg("Advertise 1000mb Half duplex request denied!\n"); - - /* Do we want to advertise 1000 Mb Full Duplex? */ - if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { - e_dbg("Advertise 1000mb Full duplex\n"); - mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; - } - - /* - * Check for a software override of the flow control settings, and - * setup the PHY advertisement registers accordingly. If - * auto-negotiation is enabled, then software will have to set the - * "PAUSE" bits to the correct value in the Auto-Negotiation - * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- - * negotiation. - * - * The possible values of the "fc" parameter are: - * 0: Flow control is completely disabled - * 1: Rx flow control is enabled (we can receive pause frames - * but not send pause frames). - * 2: Tx flow control is enabled (we can send pause frames - * but we do not support receiving pause frames). - * 3: Both Rx and Tx flow control (symmetric) are enabled. - * other: No software override. The flow control configuration - * in the EEPROM is used. - */ - switch (hw->fc.current_mode) { - case e1000_fc_none: - /* - * Flow control (Rx & Tx) is completely disabled by a - * software over-ride. - */ - mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); - break; - case e1000_fc_rx_pause: - /* - * Rx Flow control is enabled, and Tx Flow control is - * disabled, by a software over-ride. - * - * Since there really isn't a way to advertise that we are - * capable of Rx Pause ONLY, we will advertise that we - * support both symmetric and asymmetric Rx PAUSE. Later - * (in e1000e_config_fc_after_link_up) we will disable the - * hw's ability to send PAUSE frames. - */ - mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); - break; - case e1000_fc_tx_pause: - /* - * Tx Flow control is enabled, and Rx Flow control is - * disabled, by a software over-ride. - */ - mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; - mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; - break; - case e1000_fc_full: - /* - * Flow control (both Rx and Tx) is enabled by a software - * over-ride. - */ - mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); - break; - default: - e_dbg("Flow control param set incorrectly\n"); - ret_val = -E1000_ERR_CONFIG; - return ret_val; - } - - ret_val = e1e_wphy(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); - if (ret_val) - return ret_val; - - e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); - - if (phy->autoneg_mask & ADVERTISE_1000_FULL) - ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); - - return ret_val; -} - -/** - * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link - * @hw: pointer to the HW structure - * - * Performs initial bounds checking on autoneg advertisement parameter, then - * configure to advertise the full capability. Setup the PHY to autoneg - * and restart the negotiation process between the link partner. If - * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. - **/ -static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_ctrl; - - /* - * Perform some bounds checking on the autoneg advertisement - * parameter. - */ - phy->autoneg_advertised &= phy->autoneg_mask; - - /* - * If autoneg_advertised is zero, we assume it was not defaulted - * by the calling code so we set to advertise full capability. - */ - if (phy->autoneg_advertised == 0) - phy->autoneg_advertised = phy->autoneg_mask; - - e_dbg("Reconfiguring auto-neg advertisement params\n"); - ret_val = e1000_phy_setup_autoneg(hw); - if (ret_val) { - e_dbg("Error Setting up Auto-Negotiation\n"); - return ret_val; - } - e_dbg("Restarting Auto-Neg\n"); - - /* - * Restart auto-negotiation by setting the Auto Neg Enable bit and - * the Auto Neg Restart bit in the PHY control register. - */ - ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl); - if (ret_val) - return ret_val; - - phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); - ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl); - if (ret_val) - return ret_val; - - /* - * Does the user want to wait for Auto-Neg to complete here, or - * check at a later time (for example, callback routine). - */ - if (phy->autoneg_wait_to_complete) { - ret_val = e1000_wait_autoneg(hw); - if (ret_val) { - e_dbg("Error while waiting for " - "autoneg to complete\n"); - return ret_val; - } - } - - hw->mac.get_link_status = 1; - - return ret_val; -} - -/** - * e1000e_setup_copper_link - Configure copper link settings - * @hw: pointer to the HW structure - * - * Calls the appropriate function to configure the link for auto-neg or forced - * speed and duplex. Then we check for link, once link is established calls - * to configure collision distance and flow control are called. If link is - * not established, we return -E1000_ERR_PHY (-2). - **/ -s32 e1000e_setup_copper_link(struct e1000_hw *hw) -{ - s32 ret_val; - bool link; - - if (hw->mac.autoneg) { - /* - * Setup autoneg and flow control advertisement and perform - * autonegotiation. - */ - ret_val = e1000_copper_link_autoneg(hw); - if (ret_val) - return ret_val; - } else { - /* - * PHY will be set to 10H, 10F, 100H or 100F - * depending on user settings. - */ - e_dbg("Forcing Speed and Duplex\n"); - ret_val = e1000_phy_force_speed_duplex(hw); - if (ret_val) { - e_dbg("Error Forcing Speed and Duplex\n"); - return ret_val; - } - } - - /* - * Check link status. Wait up to 100 microseconds for link to become - * valid. - */ - ret_val = e1000e_phy_has_link_generic(hw, - COPPER_LINK_UP_LIMIT, - 10, - &link); - if (ret_val) - return ret_val; - - if (link) { - e_dbg("Valid link established!!!\n"); - e1000e_config_collision_dist(hw); - ret_val = e1000e_config_fc_after_link_up(hw); - } else { - e_dbg("Unable to establish link!!!\n"); - } - - return ret_val; -} - -/** - * e1000e_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY - * @hw: pointer to the HW structure - * - * Calls the PHY setup function to force speed and duplex. Clears the - * auto-crossover to force MDI manually. Waits for link and returns - * successful if link up is successful, else -E1000_ERR_PHY (-2). - **/ -s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_data; - bool link; - - ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); - if (ret_val) - return ret_val; - - e1000e_phy_force_speed_duplex_setup(hw, &phy_data); - - ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); - if (ret_val) - return ret_val; - - /* - * Clear Auto-Crossover to force MDI manually. IGP requires MDI - * forced whenever speed and duplex are forced. - */ - ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); - if (ret_val) - return ret_val; - - phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; - phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; - - ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); - if (ret_val) - return ret_val; - - e_dbg("IGP PSCR: %X\n", phy_data); - - udelay(1); - - if (phy->autoneg_wait_to_complete) { - e_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); - - ret_val = e1000e_phy_has_link_generic(hw, - PHY_FORCE_LIMIT, - 100000, - &link); - if (ret_val) - return ret_val; - - if (!link) - e_dbg("Link taking longer than expected.\n"); - - /* Try once more */ - ret_val = e1000e_phy_has_link_generic(hw, - PHY_FORCE_LIMIT, - 100000, - &link); - if (ret_val) - return ret_val; - } - - return ret_val; -} - -/** - * e1000e_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY - * @hw: pointer to the HW structure - * - * Calls the PHY setup function to force speed and duplex. Clears the - * auto-crossover to force MDI manually. Resets the PHY to commit the - * changes. If time expires while waiting for link up, we reset the DSP. - * After reset, TX_CLK and CRS on Tx must be set. Return successful upon - * successful completion, else return corresponding error code. - **/ -s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_data; - bool link; - - /* - * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI - * forced whenever speed and duplex are forced. - */ - ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); - if (ret_val) - return ret_val; - - phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; - ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); - if (ret_val) - return ret_val; - - e_dbg("M88E1000 PSCR: %X\n", phy_data); - - ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); - if (ret_val) - return ret_val; - - e1000e_phy_force_speed_duplex_setup(hw, &phy_data); - - ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); - if (ret_val) - return ret_val; - - /* Reset the phy to commit changes. */ - ret_val = e1000e_commit_phy(hw); - if (ret_val) - return ret_val; - - if (phy->autoneg_wait_to_complete) { - e_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); - - ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, - 100000, &link); - if (ret_val) - return ret_val; - - if (!link) { - if (hw->phy.type != e1000_phy_m88) { - e_dbg("Link taking longer than expected.\n"); - } else { - /* - * We didn't get link. - * Reset the DSP and cross our fingers. - */ - ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT, - 0x001d); - if (ret_val) - return ret_val; - ret_val = e1000e_phy_reset_dsp(hw); - if (ret_val) - return ret_val; - } - } - - /* Try once more */ - ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, - 100000, &link); - if (ret_val) - return ret_val; - } - - if (hw->phy.type != e1000_phy_m88) - return 0; - - ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); - if (ret_val) - return ret_val; - - /* - * Resetting the phy means we need to re-force TX_CLK in the - * Extended PHY Specific Control Register to 25MHz clock from - * the reset value of 2.5MHz. - */ - phy_data |= M88E1000_EPSCR_TX_CLK_25; - ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); - if (ret_val) - return ret_val; - - /* - * In addition, we must re-enable CRS on Tx for both half and full - * duplex. - */ - ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); - if (ret_val) - return ret_val; - - phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; - ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); - - return ret_val; -} - -/** - * e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex - * @hw: pointer to the HW structure - * - * Forces the speed and duplex settings of the PHY. - * This is a function pointer entry point only called by - * PHY setup routines. - **/ -s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 data; - bool link; - - ret_val = e1e_rphy(hw, PHY_CONTROL, &data); - if (ret_val) - goto out; - - e1000e_phy_force_speed_duplex_setup(hw, &data); - - ret_val = e1e_wphy(hw, PHY_CONTROL, data); - if (ret_val) - goto out; - - /* Disable MDI-X support for 10/100 */ - ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); - if (ret_val) - goto out; - - data &= ~IFE_PMC_AUTO_MDIX; - data &= ~IFE_PMC_FORCE_MDIX; - - ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, data); - if (ret_val) - goto out; - - e_dbg("IFE PMC: %X\n", data); - - udelay(1); - - if (phy->autoneg_wait_to_complete) { - e_dbg("Waiting for forced speed/duplex link on IFE phy.\n"); - - ret_val = e1000e_phy_has_link_generic(hw, - PHY_FORCE_LIMIT, - 100000, - &link); - if (ret_val) - goto out; - - if (!link) - e_dbg("Link taking longer than expected.\n"); - - /* Try once more */ - ret_val = e1000e_phy_has_link_generic(hw, - PHY_FORCE_LIMIT, - 100000, - &link); - if (ret_val) - goto out; - } - -out: - return ret_val; -} - -/** - * e1000e_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex - * @hw: pointer to the HW structure - * @phy_ctrl: pointer to current value of PHY_CONTROL - * - * Forces speed and duplex on the PHY by doing the following: disable flow - * control, force speed/duplex on the MAC, disable auto speed detection, - * disable auto-negotiation, configure duplex, configure speed, configure - * the collision distance, write configuration to CTRL register. The - * caller must write to the PHY_CONTROL register for these settings to - * take affect. - **/ -void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) -{ - struct e1000_mac_info *mac = &hw->mac; - u32 ctrl; - - /* Turn off flow control when forcing speed/duplex */ - hw->fc.current_mode = e1000_fc_none; - - /* Force speed/duplex on the mac */ - ctrl = er32(CTRL); - ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); - ctrl &= ~E1000_CTRL_SPD_SEL; - - /* Disable Auto Speed Detection */ - ctrl &= ~E1000_CTRL_ASDE; - - /* Disable autoneg on the phy */ - *phy_ctrl &= ~MII_CR_AUTO_NEG_EN; - - /* Forcing Full or Half Duplex? */ - if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { - ctrl &= ~E1000_CTRL_FD; - *phy_ctrl &= ~MII_CR_FULL_DUPLEX; - e_dbg("Half Duplex\n"); - } else { - ctrl |= E1000_CTRL_FD; - *phy_ctrl |= MII_CR_FULL_DUPLEX; - e_dbg("Full Duplex\n"); - } - - /* Forcing 10mb or 100mb? */ - if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { - ctrl |= E1000_CTRL_SPD_100; - *phy_ctrl |= MII_CR_SPEED_100; - *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); - e_dbg("Forcing 100mb\n"); - } else { - ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); - *phy_ctrl |= MII_CR_SPEED_10; - *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); - e_dbg("Forcing 10mb\n"); - } - - e1000e_config_collision_dist(hw); - - ew32(CTRL, ctrl); -} - -/** - * e1000e_set_d3_lplu_state - Sets low power link up state for D3 - * @hw: pointer to the HW structure - * @active: boolean used to enable/disable lplu - * - * Success returns 0, Failure returns 1 - * - * The low power link up (lplu) state is set to the power management level D3 - * and SmartSpeed is disabled when active is true, else clear lplu for D3 - * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU - * is used during Dx states where the power conservation is most important. - * During driver activity, SmartSpeed should be enabled so performance is - * maintained. - **/ -s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 data; - - ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data); - if (ret_val) - return ret_val; - - if (!active) { - data &= ~IGP02E1000_PM_D3_LPLU; - ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); - if (ret_val) - return ret_val; - /* - * LPLU and SmartSpeed are mutually exclusive. LPLU is used - * during Dx states where the power conservation is most - * important. During driver activity we should enable - * SmartSpeed, so performance is maintained. - */ - if (phy->smart_speed == e1000_smart_speed_on) { - ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, - &data); - if (ret_val) - return ret_val; - - data |= IGP01E1000_PSCFR_SMART_SPEED; - ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, - data); - if (ret_val) - return ret_val; - } else if (phy->smart_speed == e1000_smart_speed_off) { - ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, - &data); - if (ret_val) - return ret_val; - - data &= ~IGP01E1000_PSCFR_SMART_SPEED; - ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, - data); - if (ret_val) - return ret_val; - } - } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || - (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || - (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { - data |= IGP02E1000_PM_D3_LPLU; - ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); - if (ret_val) - return ret_val; - - /* When LPLU is enabled, we should disable SmartSpeed */ - ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); - if (ret_val) - return ret_val; - - data &= ~IGP01E1000_PSCFR_SMART_SPEED; - ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); - } - - return ret_val; -} - -/** - * e1000e_check_downshift - Checks whether a downshift in speed occurred - * @hw: pointer to the HW structure - * - * Success returns 0, Failure returns 1 - * - * A downshift is detected by querying the PHY link health. - **/ -s32 e1000e_check_downshift(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_data, offset, mask; - - switch (phy->type) { - case e1000_phy_m88: - case e1000_phy_gg82563: - case e1000_phy_bm: - case e1000_phy_82578: - offset = M88E1000_PHY_SPEC_STATUS; - mask = M88E1000_PSSR_DOWNSHIFT; - break; - case e1000_phy_igp_2: - case e1000_phy_igp_3: - offset = IGP01E1000_PHY_LINK_HEALTH; - mask = IGP01E1000_PLHR_SS_DOWNGRADE; - break; - default: - /* speed downshift not supported */ - phy->speed_downgraded = false; - return 0; - } - - ret_val = e1e_rphy(hw, offset, &phy_data); - - if (!ret_val) - phy->speed_downgraded = (phy_data & mask); - - return ret_val; -} - -/** - * e1000_check_polarity_m88 - Checks the polarity. - * @hw: pointer to the HW structure - * - * Success returns 0, Failure returns -E1000_ERR_PHY (-2) - * - * Polarity is determined based on the PHY specific status register. - **/ -s32 e1000_check_polarity_m88(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 data; - - ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data); - - if (!ret_val) - phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) - ? e1000_rev_polarity_reversed - : e1000_rev_polarity_normal; - - return ret_val; -} - -/** - * e1000_check_polarity_igp - Checks the polarity. - * @hw: pointer to the HW structure - * - * Success returns 0, Failure returns -E1000_ERR_PHY (-2) - * - * Polarity is determined based on the PHY port status register, and the - * current speed (since there is no polarity at 100Mbps). - **/ -s32 e1000_check_polarity_igp(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 data, offset, mask; - - /* - * Polarity is determined based on the speed of - * our connection. - */ - ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); - if (ret_val) - return ret_val; - - if ((data & IGP01E1000_PSSR_SPEED_MASK) == - IGP01E1000_PSSR_SPEED_1000MBPS) { - offset = IGP01E1000_PHY_PCS_INIT_REG; - mask = IGP01E1000_PHY_POLARITY_MASK; - } else { - /* - * This really only applies to 10Mbps since - * there is no polarity for 100Mbps (always 0). - */ - offset = IGP01E1000_PHY_PORT_STATUS; - mask = IGP01E1000_PSSR_POLARITY_REVERSED; - } - - ret_val = e1e_rphy(hw, offset, &data); - - if (!ret_val) - phy->cable_polarity = (data & mask) - ? e1000_rev_polarity_reversed - : e1000_rev_polarity_normal; - - return ret_val; -} - -/** - * e1000_check_polarity_ife - Check cable polarity for IFE PHY - * @hw: pointer to the HW structure - * - * Polarity is determined on the polarity reversal feature being enabled. - **/ -s32 e1000_check_polarity_ife(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_data, offset, mask; - - /* - * Polarity is determined based on the reversal feature being enabled. - */ - if (phy->polarity_correction) { - offset = IFE_PHY_EXTENDED_STATUS_CONTROL; - mask = IFE_PESC_POLARITY_REVERSED; - } else { - offset = IFE_PHY_SPECIAL_CONTROL; - mask = IFE_PSC_FORCE_POLARITY; - } - - ret_val = e1e_rphy(hw, offset, &phy_data); - - if (!ret_val) - phy->cable_polarity = (phy_data & mask) - ? e1000_rev_polarity_reversed - : e1000_rev_polarity_normal; - - return ret_val; -} - -/** - * e1000_wait_autoneg - Wait for auto-neg completion - * @hw: pointer to the HW structure - * - * Waits for auto-negotiation to complete or for the auto-negotiation time - * limit to expire, which ever happens first. - **/ -static s32 e1000_wait_autoneg(struct e1000_hw *hw) -{ - s32 ret_val = 0; - u16 i, phy_status; - - /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ - for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { - ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); - if (ret_val) - break; - ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); - if (ret_val) - break; - if (phy_status & MII_SR_AUTONEG_COMPLETE) - break; - msleep(100); - } - - /* - * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation - * has completed. - */ - return ret_val; -} - -/** - * e1000e_phy_has_link_generic - Polls PHY for link - * @hw: pointer to the HW structure - * @iterations: number of times to poll for link - * @usec_interval: delay between polling attempts - * @success: pointer to whether polling was successful or not - * - * Polls the PHY status register for link, 'iterations' number of times. - **/ -s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, - u32 usec_interval, bool *success) -{ - s32 ret_val = 0; - u16 i, phy_status; - - for (i = 0; i < iterations; i++) { - /* - * Some PHYs require the PHY_STATUS register to be read - * twice due to the link bit being sticky. No harm doing - * it across the board. - */ - ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); - if (ret_val) - /* - * If the first read fails, another entity may have - * ownership of the resources, wait and try again to - * see if they have relinquished the resources yet. - */ - udelay(usec_interval); - ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); - if (ret_val) - break; - if (phy_status & MII_SR_LINK_STATUS) - break; - if (usec_interval >= 1000) - mdelay(usec_interval/1000); - else - udelay(usec_interval); - } - - *success = (i < iterations); - - return ret_val; -} - -/** - * e1000e_get_cable_length_m88 - Determine cable length for m88 PHY - * @hw: pointer to the HW structure - * - * Reads the PHY specific status register to retrieve the cable length - * information. The cable length is determined by averaging the minimum and - * maximum values to get the "average" cable length. The m88 PHY has four - * possible cable length values, which are: - * Register Value Cable Length - * 0 < 50 meters - * 1 50 - 80 meters - * 2 80 - 110 meters - * 3 110 - 140 meters - * 4 > 140 meters - **/ -s32 e1000e_get_cable_length_m88(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_data, index; - - ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); - if (ret_val) - goto out; - - index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> - M88E1000_PSSR_CABLE_LENGTH_SHIFT; - if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { - ret_val = -E1000_ERR_PHY; - goto out; - } - - phy->min_cable_length = e1000_m88_cable_length_table[index]; - phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; - - phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; - -out: - return ret_val; -} - -/** - * e1000e_get_cable_length_igp_2 - Determine cable length for igp2 PHY - * @hw: pointer to the HW structure - * - * The automatic gain control (agc) normalizes the amplitude of the - * received signal, adjusting for the attenuation produced by the - * cable. By reading the AGC registers, which represent the - * combination of coarse and fine gain value, the value can be put - * into a lookup table to obtain the approximate cable length - * for each channel. - **/ -s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_data, i, agc_value = 0; - u16 cur_agc_index, max_agc_index = 0; - u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; - static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { - IGP02E1000_PHY_AGC_A, - IGP02E1000_PHY_AGC_B, - IGP02E1000_PHY_AGC_C, - IGP02E1000_PHY_AGC_D - }; - - /* Read the AGC registers for all channels */ - for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { - ret_val = e1e_rphy(hw, agc_reg_array[i], &phy_data); - if (ret_val) - return ret_val; - - /* - * Getting bits 15:9, which represent the combination of - * coarse and fine gain values. The result is a number - * that can be put into the lookup table to obtain the - * approximate cable length. - */ - cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & - IGP02E1000_AGC_LENGTH_MASK; - - /* Array index bound check. */ - if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || - (cur_agc_index == 0)) - return -E1000_ERR_PHY; - - /* Remove min & max AGC values from calculation. */ - if (e1000_igp_2_cable_length_table[min_agc_index] > - e1000_igp_2_cable_length_table[cur_agc_index]) - min_agc_index = cur_agc_index; - if (e1000_igp_2_cable_length_table[max_agc_index] < - e1000_igp_2_cable_length_table[cur_agc_index]) - max_agc_index = cur_agc_index; - - agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; - } - - agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + - e1000_igp_2_cable_length_table[max_agc_index]); - agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); - - /* Calculate cable length with the error range of +/- 10 meters. */ - phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? - (agc_value - IGP02E1000_AGC_RANGE) : 0; - phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; - - phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; - - return ret_val; -} - -/** - * e1000e_get_phy_info_m88 - Retrieve PHY information - * @hw: pointer to the HW structure - * - * Valid for only copper links. Read the PHY status register (sticky read) - * to verify that link is up. Read the PHY special control register to - * determine the polarity and 10base-T extended distance. Read the PHY - * special status register to determine MDI/MDIx and current speed. If - * speed is 1000, then determine cable length, local and remote receiver. - **/ -s32 e1000e_get_phy_info_m88(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_data; - bool link; - - if (phy->media_type != e1000_media_type_copper) { - e_dbg("Phy info is only valid for copper media\n"); - return -E1000_ERR_CONFIG; - } - - ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); - if (ret_val) - return ret_val; - - if (!link) { - e_dbg("Phy info is only valid if link is up\n"); - return -E1000_ERR_CONFIG; - } - - ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); - if (ret_val) - return ret_val; - - phy->polarity_correction = (phy_data & - M88E1000_PSCR_POLARITY_REVERSAL); - - ret_val = e1000_check_polarity_m88(hw); - if (ret_val) - return ret_val; - - ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); - if (ret_val) - return ret_val; - - phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX); - - if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { - ret_val = e1000_get_cable_length(hw); - if (ret_val) - return ret_val; - - ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &phy_data); - if (ret_val) - return ret_val; - - phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) - ? e1000_1000t_rx_status_ok - : e1000_1000t_rx_status_not_ok; - - phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) - ? e1000_1000t_rx_status_ok - : e1000_1000t_rx_status_not_ok; - } else { - /* Set values to "undefined" */ - phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; - phy->local_rx = e1000_1000t_rx_status_undefined; - phy->remote_rx = e1000_1000t_rx_status_undefined; - } - - return ret_val; -} - -/** - * e1000e_get_phy_info_igp - Retrieve igp PHY information - * @hw: pointer to the HW structure - * - * Read PHY status to determine if link is up. If link is up, then - * set/determine 10base-T extended distance and polarity correction. Read - * PHY port status to determine MDI/MDIx and speed. Based on the speed, - * determine on the cable length, local and remote receiver. - **/ -s32 e1000e_get_phy_info_igp(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 data; - bool link; - - ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); - if (ret_val) - return ret_val; - - if (!link) { - e_dbg("Phy info is only valid if link is up\n"); - return -E1000_ERR_CONFIG; - } - - phy->polarity_correction = true; - - ret_val = e1000_check_polarity_igp(hw); - if (ret_val) - return ret_val; - - ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); - if (ret_val) - return ret_val; - - phy->is_mdix = (data & IGP01E1000_PSSR_MDIX); - - if ((data & IGP01E1000_PSSR_SPEED_MASK) == - IGP01E1000_PSSR_SPEED_1000MBPS) { - ret_val = e1000_get_cable_length(hw); - if (ret_val) - return ret_val; - - ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data); - if (ret_val) - return ret_val; - - phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) - ? e1000_1000t_rx_status_ok - : e1000_1000t_rx_status_not_ok; - - phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) - ? e1000_1000t_rx_status_ok - : e1000_1000t_rx_status_not_ok; - } else { - phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; - phy->local_rx = e1000_1000t_rx_status_undefined; - phy->remote_rx = e1000_1000t_rx_status_undefined; - } - - return ret_val; -} - -/** - * e1000_get_phy_info_ife - Retrieves various IFE PHY states - * @hw: pointer to the HW structure - * - * Populates "phy" structure with various feature states. - **/ -s32 e1000_get_phy_info_ife(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 data; - bool link; - - ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); - if (ret_val) - goto out; - - if (!link) { - e_dbg("Phy info is only valid if link is up\n"); - ret_val = -E1000_ERR_CONFIG; - goto out; - } - - ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data); - if (ret_val) - goto out; - phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE) - ? false : true; - - if (phy->polarity_correction) { - ret_val = e1000_check_polarity_ife(hw); - if (ret_val) - goto out; - } else { - /* Polarity is forced */ - phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY) - ? e1000_rev_polarity_reversed - : e1000_rev_polarity_normal; - } - - ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); - if (ret_val) - goto out; - - phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? true : false; - - /* The following parameters are undefined for 10/100 operation. */ - phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; - phy->local_rx = e1000_1000t_rx_status_undefined; - phy->remote_rx = e1000_1000t_rx_status_undefined; - -out: - return ret_val; -} - -/** - * e1000e_phy_sw_reset - PHY software reset - * @hw: pointer to the HW structure - * - * Does a software reset of the PHY by reading the PHY control register and - * setting/write the control register reset bit to the PHY. - **/ -s32 e1000e_phy_sw_reset(struct e1000_hw *hw) -{ - s32 ret_val; - u16 phy_ctrl; - - ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl); - if (ret_val) - return ret_val; - - phy_ctrl |= MII_CR_RESET; - ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl); - if (ret_val) - return ret_val; - - udelay(1); - - return ret_val; -} - -/** - * e1000e_phy_hw_reset_generic - PHY hardware reset - * @hw: pointer to the HW structure - * - * Verify the reset block is not blocking us from resetting. Acquire - * semaphore (if necessary) and read/set/write the device control reset - * bit in the PHY. Wait the appropriate delay time for the device to - * reset and release the semaphore (if necessary). - **/ -s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u32 ctrl; - - ret_val = e1000_check_reset_block(hw); - if (ret_val) - return 0; - - ret_val = phy->ops.acquire(hw); - if (ret_val) - return ret_val; - - ctrl = er32(CTRL); - ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); - e1e_flush(); - - udelay(phy->reset_delay_us); - - ew32(CTRL, ctrl); - e1e_flush(); - - udelay(150); - - phy->ops.release(hw); - - return e1000_get_phy_cfg_done(hw); -} - -/** - * e1000e_get_cfg_done - Generic configuration done - * @hw: pointer to the HW structure - * - * Generic function to wait 10 milli-seconds for configuration to complete - * and return success. - **/ -s32 e1000e_get_cfg_done(struct e1000_hw *hw) -{ - mdelay(10); - return 0; -} - -/** - * e1000e_phy_init_script_igp3 - Inits the IGP3 PHY - * @hw: pointer to the HW structure - * - * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. - **/ -s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw) -{ - e_dbg("Running IGP 3 PHY init script\n"); - - /* PHY init IGP 3 */ - /* Enable rise/fall, 10-mode work in class-A */ - e1e_wphy(hw, 0x2F5B, 0x9018); - /* Remove all caps from Replica path filter */ - e1e_wphy(hw, 0x2F52, 0x0000); - /* Bias trimming for ADC, AFE and Driver (Default) */ - e1e_wphy(hw, 0x2FB1, 0x8B24); - /* Increase Hybrid poly bias */ - e1e_wphy(hw, 0x2FB2, 0xF8F0); - /* Add 4% to Tx amplitude in Gig mode */ - e1e_wphy(hw, 0x2010, 0x10B0); - /* Disable trimming (TTT) */ - e1e_wphy(hw, 0x2011, 0x0000); - /* Poly DC correction to 94.6% + 2% for all channels */ - e1e_wphy(hw, 0x20DD, 0x249A); - /* ABS DC correction to 95.9% */ - e1e_wphy(hw, 0x20DE, 0x00D3); - /* BG temp curve trim */ - e1e_wphy(hw, 0x28B4, 0x04CE); - /* Increasing ADC OPAMP stage 1 currents to max */ - e1e_wphy(hw, 0x2F70, 0x29E4); - /* Force 1000 ( required for enabling PHY regs configuration) */ - e1e_wphy(hw, 0x0000, 0x0140); - /* Set upd_freq to 6 */ - e1e_wphy(hw, 0x1F30, 0x1606); - /* Disable NPDFE */ - e1e_wphy(hw, 0x1F31, 0xB814); - /* Disable adaptive fixed FFE (Default) */ - e1e_wphy(hw, 0x1F35, 0x002A); - /* Enable FFE hysteresis */ - e1e_wphy(hw, 0x1F3E, 0x0067); - /* Fixed FFE for short cable lengths */ - e1e_wphy(hw, 0x1F54, 0x0065); - /* Fixed FFE for medium cable lengths */ - e1e_wphy(hw, 0x1F55, 0x002A); - /* Fixed FFE for long cable lengths */ - e1e_wphy(hw, 0x1F56, 0x002A); - /* Enable Adaptive Clip Threshold */ - e1e_wphy(hw, 0x1F72, 0x3FB0); - /* AHT reset limit to 1 */ - e1e_wphy(hw, 0x1F76, 0xC0FF); - /* Set AHT master delay to 127 msec */ - e1e_wphy(hw, 0x1F77, 0x1DEC); - /* Set scan bits for AHT */ - e1e_wphy(hw, 0x1F78, 0xF9EF); - /* Set AHT Preset bits */ - e1e_wphy(hw, 0x1F79, 0x0210); - /* Change integ_factor of channel A to 3 */ - e1e_wphy(hw, 0x1895, 0x0003); - /* Change prop_factor of channels BCD to 8 */ - e1e_wphy(hw, 0x1796, 0x0008); - /* Change cg_icount + enable integbp for channels BCD */ - e1e_wphy(hw, 0x1798, 0xD008); - /* - * Change cg_icount + enable integbp + change prop_factor_master - * to 8 for channel A - */ - e1e_wphy(hw, 0x1898, 0xD918); - /* Disable AHT in Slave mode on channel A */ - e1e_wphy(hw, 0x187A, 0x0800); - /* - * Enable LPLU and disable AN to 1000 in non-D0a states, - * Enable SPD+B2B - */ - e1e_wphy(hw, 0x0019, 0x008D); - /* Enable restart AN on an1000_dis change */ - e1e_wphy(hw, 0x001B, 0x2080); - /* Enable wh_fifo read clock in 10/100 modes */ - e1e_wphy(hw, 0x0014, 0x0045); - /* Restart AN, Speed selection is 1000 */ - e1e_wphy(hw, 0x0000, 0x1340); - - return 0; -} - -/* Internal function pointers */ - -/** - * e1000_get_phy_cfg_done - Generic PHY configuration done - * @hw: pointer to the HW structure - * - * Return success if silicon family did not implement a family specific - * get_cfg_done function. - **/ -static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) -{ - if (hw->phy.ops.get_cfg_done) - return hw->phy.ops.get_cfg_done(hw); - - return 0; -} - -/** - * e1000_phy_force_speed_duplex - Generic force PHY speed/duplex - * @hw: pointer to the HW structure - * - * When the silicon family has not implemented a forced speed/duplex - * function for the PHY, simply return 0. - **/ -static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) -{ - if (hw->phy.ops.force_speed_duplex) - return hw->phy.ops.force_speed_duplex(hw); - - return 0; -} - -/** - * e1000e_get_phy_type_from_id - Get PHY type from id - * @phy_id: phy_id read from the phy - * - * Returns the phy type from the id. - **/ -enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id) -{ - enum e1000_phy_type phy_type = e1000_phy_unknown; - - switch (phy_id) { - case M88E1000_I_PHY_ID: - case M88E1000_E_PHY_ID: - case M88E1111_I_PHY_ID: - case M88E1011_I_PHY_ID: - phy_type = e1000_phy_m88; - break; - case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ - phy_type = e1000_phy_igp_2; - break; - case GG82563_E_PHY_ID: - phy_type = e1000_phy_gg82563; - break; - case IGP03E1000_E_PHY_ID: - phy_type = e1000_phy_igp_3; - break; - case IFE_E_PHY_ID: - case IFE_PLUS_E_PHY_ID: - case IFE_C_E_PHY_ID: - phy_type = e1000_phy_ife; - break; - case BME1000_E_PHY_ID: - case BME1000_E_PHY_ID_R2: - phy_type = e1000_phy_bm; - break; - case I82578_E_PHY_ID: - phy_type = e1000_phy_82578; - break; - case I82577_E_PHY_ID: - phy_type = e1000_phy_82577; - break; - case I82579_E_PHY_ID: - phy_type = e1000_phy_82579; - break; - default: - phy_type = e1000_phy_unknown; - break; - } - return phy_type; -} - -/** - * e1000e_determine_phy_address - Determines PHY address. - * @hw: pointer to the HW structure - * - * This uses a trial and error method to loop through possible PHY - * addresses. It tests each by reading the PHY ID registers and - * checking for a match. - **/ -s32 e1000e_determine_phy_address(struct e1000_hw *hw) -{ - s32 ret_val = -E1000_ERR_PHY_TYPE; - u32 phy_addr = 0; - u32 i; - enum e1000_phy_type phy_type = e1000_phy_unknown; - - hw->phy.id = phy_type; - - for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) { - hw->phy.addr = phy_addr; - i = 0; - - do { - e1000e_get_phy_id(hw); - phy_type = e1000e_get_phy_type_from_id(hw->phy.id); - - /* - * If phy_type is valid, break - we found our - * PHY address - */ - if (phy_type != e1000_phy_unknown) { - ret_val = 0; - goto out; - } - usleep_range(1000, 2000); - i++; - } while (i < 10); - } - -out: - return ret_val; -} - -/** - * e1000_get_phy_addr_for_bm_page - Retrieve PHY page address - * @page: page to access - * - * Returns the phy address for the page requested. - **/ -static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg) -{ - u32 phy_addr = 2; - - if ((page >= 768) || (page == 0 && reg == 25) || (reg == 31)) - phy_addr = 1; - - return phy_addr; -} - -/** - * e1000e_write_phy_reg_bm - Write BM PHY register - * @hw: pointer to the HW structure - * @offset: register offset to write to - * @data: data to write at register offset - * - * Acquires semaphore, if necessary, then writes the data to PHY register - * at the offset. Release any acquired semaphores before exiting. - **/ -s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) -{ - s32 ret_val; - u32 page = offset >> IGP_PAGE_SHIFT; - - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return ret_val; - - /* Page 800 works differently than the rest so it has its own func */ - if (page == BM_WUC_PAGE) { - ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, - false, false); - goto out; - } - - hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); - - if (offset > MAX_PHY_MULTI_PAGE_REG) { - u32 page_shift, page_select; - - /* - * Page select is register 31 for phy address 1 and 22 for - * phy address 2 and 3. Page select is shifted only for - * phy address 1. - */ - if (hw->phy.addr == 1) { - page_shift = IGP_PAGE_SHIFT; - page_select = IGP01E1000_PHY_PAGE_SELECT; - } else { - page_shift = 0; - page_select = BM_PHY_PAGE_SELECT; - } - - /* Page is shifted left, PHY expects (page x 32) */ - ret_val = e1000e_write_phy_reg_mdic(hw, page_select, - (page << page_shift)); - if (ret_val) - goto out; - } - - ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, - data); - -out: - hw->phy.ops.release(hw); - return ret_val; -} - -/** - * e1000e_read_phy_reg_bm - Read BM PHY register - * @hw: pointer to the HW structure - * @offset: register offset to be read - * @data: pointer to the read data - * - * Acquires semaphore, if necessary, then reads the PHY register at offset - * and storing the retrieved information in data. Release any acquired - * semaphores before exiting. - **/ -s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) -{ - s32 ret_val; - u32 page = offset >> IGP_PAGE_SHIFT; - - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return ret_val; - - /* Page 800 works differently than the rest so it has its own func */ - if (page == BM_WUC_PAGE) { - ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, - true, false); - goto out; - } - - hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); - - if (offset > MAX_PHY_MULTI_PAGE_REG) { - u32 page_shift, page_select; - - /* - * Page select is register 31 for phy address 1 and 22 for - * phy address 2 and 3. Page select is shifted only for - * phy address 1. - */ - if (hw->phy.addr == 1) { - page_shift = IGP_PAGE_SHIFT; - page_select = IGP01E1000_PHY_PAGE_SELECT; - } else { - page_shift = 0; - page_select = BM_PHY_PAGE_SELECT; - } - - /* Page is shifted left, PHY expects (page x 32) */ - ret_val = e1000e_write_phy_reg_mdic(hw, page_select, - (page << page_shift)); - if (ret_val) - goto out; - } - - ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, - data); -out: - hw->phy.ops.release(hw); - return ret_val; -} - -/** - * e1000e_read_phy_reg_bm2 - Read BM PHY register - * @hw: pointer to the HW structure - * @offset: register offset to be read - * @data: pointer to the read data - * - * Acquires semaphore, if necessary, then reads the PHY register at offset - * and storing the retrieved information in data. Release any acquired - * semaphores before exiting. - **/ -s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data) -{ - s32 ret_val; - u16 page = (u16)(offset >> IGP_PAGE_SHIFT); - - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return ret_val; - - /* Page 800 works differently than the rest so it has its own func */ - if (page == BM_WUC_PAGE) { - ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, - true, false); - goto out; - } - - hw->phy.addr = 1; - - if (offset > MAX_PHY_MULTI_PAGE_REG) { - - /* Page is shifted left, PHY expects (page x 32) */ - ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, - page); - - if (ret_val) - goto out; - } - - ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, - data); -out: - hw->phy.ops.release(hw); - return ret_val; -} - -/** - * e1000e_write_phy_reg_bm2 - Write BM PHY register - * @hw: pointer to the HW structure - * @offset: register offset to write to - * @data: data to write at register offset - * - * Acquires semaphore, if necessary, then writes the data to PHY register - * at the offset. Release any acquired semaphores before exiting. - **/ -s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data) -{ - s32 ret_val; - u16 page = (u16)(offset >> IGP_PAGE_SHIFT); - - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return ret_val; - - /* Page 800 works differently than the rest so it has its own func */ - if (page == BM_WUC_PAGE) { - ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, - false, false); - goto out; - } - - hw->phy.addr = 1; - - if (offset > MAX_PHY_MULTI_PAGE_REG) { - /* Page is shifted left, PHY expects (page x 32) */ - ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, - page); - - if (ret_val) - goto out; - } - - ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, - data); - -out: - hw->phy.ops.release(hw); - return ret_val; -} - -/** - * e1000_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers - * @hw: pointer to the HW structure - * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG - * - * Assumes semaphore already acquired and phy_reg points to a valid memory - * address to store contents of the BM_WUC_ENABLE_REG register. - **/ -s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) -{ - s32 ret_val; - u16 temp; - - /* All page select, port ctrl and wakeup registers use phy address 1 */ - hw->phy.addr = 1; - - /* Select Port Control Registers page */ - ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); - if (ret_val) { - e_dbg("Could not set Port Control page\n"); - goto out; - } - - ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); - if (ret_val) { - e_dbg("Could not read PHY register %d.%d\n", - BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); - goto out; - } - - /* - * Enable both PHY wakeup mode and Wakeup register page writes. - * Prevent a power state change by disabling ME and Host PHY wakeup. - */ - temp = *phy_reg; - temp |= BM_WUC_ENABLE_BIT; - temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT); - - ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, temp); - if (ret_val) { - e_dbg("Could not write PHY register %d.%d\n", - BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); - goto out; - } - - /* Select Host Wakeup Registers page */ - ret_val = e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT)); - - /* caller now able to write registers on the Wakeup registers page */ -out: - return ret_val; -} - -/** - * e1000_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs - * @hw: pointer to the HW structure - * @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG - * - * Restore BM_WUC_ENABLE_REG to its original value. - * - * Assumes semaphore already acquired and *phy_reg is the contents of the - * BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by - * caller. - **/ -s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) -{ - s32 ret_val = 0; - - /* Select Port Control Registers page */ - ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); - if (ret_val) { - e_dbg("Could not set Port Control page\n"); - goto out; - } - - /* Restore 769.17 to its original value */ - ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, *phy_reg); - if (ret_val) - e_dbg("Could not restore PHY register %d.%d\n", - BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); -out: - return ret_val; -} - -/** - * e1000_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register - * @hw: pointer to the HW structure - * @offset: register offset to be read or written - * @data: pointer to the data to read or write - * @read: determines if operation is read or write - * @page_set: BM_WUC_PAGE already set and access enabled - * - * Read the PHY register at offset and store the retrieved information in - * data, or write data to PHY register at offset. Note the procedure to - * access the PHY wakeup registers is different than reading the other PHY - * registers. It works as such: - * 1) Set 769.17.2 (page 769, register 17, bit 2) = 1 - * 2) Set page to 800 for host (801 if we were manageability) - * 3) Write the address using the address opcode (0x11) - * 4) Read or write the data using the data opcode (0x12) - * 5) Restore 769.17.2 to its original value - * - * Steps 1 and 2 are done by e1000_enable_phy_wakeup_reg_access_bm() and - * step 5 is done by e1000_disable_phy_wakeup_reg_access_bm(). - * - * Assumes semaphore is already acquired. When page_set==true, assumes - * the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack - * is responsible for calls to e1000_[enable|disable]_phy_wakeup_reg_bm()). - **/ -static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, - u16 *data, bool read, bool page_set) -{ - s32 ret_val; - u16 reg = BM_PHY_REG_NUM(offset); - u16 page = BM_PHY_REG_PAGE(offset); - u16 phy_reg = 0; - - /* Gig must be disabled for MDIO accesses to Host Wakeup reg page */ - if ((hw->mac.type == e1000_pchlan) && - (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE))) - e_dbg("Attempting to access page %d while gig enabled.\n", - page); - - if (!page_set) { - /* Enable access to PHY wakeup registers */ - ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); - if (ret_val) { - e_dbg("Could not enable PHY wakeup reg access\n"); - goto out; - } - } - - e_dbg("Accessing PHY page %d reg 0x%x\n", page, reg); - - /* Write the Wakeup register page offset value using opcode 0x11 */ - ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg); - if (ret_val) { - e_dbg("Could not write address opcode to page %d\n", page); - goto out; - } - - if (read) { - /* Read the Wakeup register page value using opcode 0x12 */ - ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, - data); - } else { - /* Write the Wakeup register page value using opcode 0x12 */ - ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, - *data); - } - - if (ret_val) { - e_dbg("Could not access PHY reg %d.%d\n", page, reg); - goto out; - } - - if (!page_set) - ret_val = e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); - -out: - return ret_val; -} - -/** - * e1000_power_up_phy_copper - Restore copper link in case of PHY power down - * @hw: pointer to the HW structure - * - * In the case of a PHY power down to save power, or to turn off link during a - * driver unload, or wake on lan is not enabled, restore the link to previous - * settings. - **/ -void e1000_power_up_phy_copper(struct e1000_hw *hw) -{ - u16 mii_reg = 0; - - /* The PHY will retain its settings across a power down/up cycle */ - e1e_rphy(hw, PHY_CONTROL, &mii_reg); - mii_reg &= ~MII_CR_POWER_DOWN; - e1e_wphy(hw, PHY_CONTROL, mii_reg); -} - -/** - * e1000_power_down_phy_copper - Restore copper link in case of PHY power down - * @hw: pointer to the HW structure - * - * In the case of a PHY power down to save power, or to turn off link during a - * driver unload, or wake on lan is not enabled, restore the link to previous - * settings. - **/ -void e1000_power_down_phy_copper(struct e1000_hw *hw) -{ - u16 mii_reg = 0; - - /* The PHY will retain its settings across a power down/up cycle */ - e1e_rphy(hw, PHY_CONTROL, &mii_reg); - mii_reg |= MII_CR_POWER_DOWN; - e1e_wphy(hw, PHY_CONTROL, mii_reg); - usleep_range(1000, 2000); -} - -/** - * e1000e_commit_phy - Soft PHY reset - * @hw: pointer to the HW structure - * - * Performs a soft PHY reset on those that apply. This is a function pointer - * entry point called by drivers. - **/ -s32 e1000e_commit_phy(struct e1000_hw *hw) -{ - if (hw->phy.ops.commit) - return hw->phy.ops.commit(hw); - - return 0; -} - -/** - * e1000_set_d0_lplu_state - Sets low power link up state for D0 - * @hw: pointer to the HW structure - * @active: boolean used to enable/disable lplu - * - * Success returns 0, Failure returns 1 - * - * The low power link up (lplu) state is set to the power management level D0 - * and SmartSpeed is disabled when active is true, else clear lplu for D0 - * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU - * is used during Dx states where the power conservation is most important. - * During driver activity, SmartSpeed should be enabled so performance is - * maintained. This is a function pointer entry point called by drivers. - **/ -static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active) -{ - if (hw->phy.ops.set_d0_lplu_state) - return hw->phy.ops.set_d0_lplu_state(hw, active); - - return 0; -} - -/** - * __e1000_read_phy_reg_hv - Read HV PHY register - * @hw: pointer to the HW structure - * @offset: register offset to be read - * @data: pointer to the read data - * @locked: semaphore has already been acquired or not - * - * Acquires semaphore, if necessary, then reads the PHY register at offset - * and stores the retrieved information in data. Release any acquired - * semaphore before exiting. - **/ -static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data, - bool locked, bool page_set) -{ - s32 ret_val; - u16 page = BM_PHY_REG_PAGE(offset); - u16 reg = BM_PHY_REG_NUM(offset); - u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); - - if (!locked) { - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return ret_val; - } - - /* Page 800 works differently than the rest so it has its own func */ - if (page == BM_WUC_PAGE) { - ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, - true, page_set); - goto out; - } - - if (page > 0 && page < HV_INTC_FC_PAGE_START) { - ret_val = e1000_access_phy_debug_regs_hv(hw, offset, - data, true); - goto out; - } - - if (!page_set) { - if (page == HV_INTC_FC_PAGE_START) - page = 0; - - if (reg > MAX_PHY_MULTI_PAGE_REG) { - /* Page is shifted left, PHY expects (page x 32) */ - ret_val = e1000_set_page_igp(hw, - (page << IGP_PAGE_SHIFT)); - - hw->phy.addr = phy_addr; - - if (ret_val) - goto out; - } - } - - e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page, - page << IGP_PAGE_SHIFT, reg); - - ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, - data); -out: - if (!locked) - hw->phy.ops.release(hw); - - return ret_val; -} - -/** - * e1000_read_phy_reg_hv - Read HV PHY register - * @hw: pointer to the HW structure - * @offset: register offset to be read - * @data: pointer to the read data - * - * Acquires semaphore then reads the PHY register at offset and stores - * the retrieved information in data. Release the acquired semaphore - * before exiting. - **/ -s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data) -{ - return __e1000_read_phy_reg_hv(hw, offset, data, false, false); -} - -/** - * e1000_read_phy_reg_hv_locked - Read HV PHY register - * @hw: pointer to the HW structure - * @offset: register offset to be read - * @data: pointer to the read data - * - * Reads the PHY register at offset and stores the retrieved information - * in data. Assumes semaphore already acquired. - **/ -s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data) -{ - return __e1000_read_phy_reg_hv(hw, offset, data, true, false); -} - -/** - * e1000_read_phy_reg_page_hv - Read HV PHY register - * @hw: pointer to the HW structure - * @offset: register offset to write to - * @data: data to write at register offset - * - * Reads the PHY register at offset and stores the retrieved information - * in data. Assumes semaphore already acquired and page already set. - **/ -s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data) -{ - return __e1000_read_phy_reg_hv(hw, offset, data, true, true); -} - -/** - * __e1000_write_phy_reg_hv - Write HV PHY register - * @hw: pointer to the HW structure - * @offset: register offset to write to - * @data: data to write at register offset - * @locked: semaphore has already been acquired or not - * - * Acquires semaphore, if necessary, then writes the data to PHY register - * at the offset. Release any acquired semaphores before exiting. - **/ -static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data, - bool locked, bool page_set) -{ - s32 ret_val; - u16 page = BM_PHY_REG_PAGE(offset); - u16 reg = BM_PHY_REG_NUM(offset); - u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); - - if (!locked) { - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return ret_val; - } - - /* Page 800 works differently than the rest so it has its own func */ - if (page == BM_WUC_PAGE) { - ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, - false, page_set); - goto out; - } - - if (page > 0 && page < HV_INTC_FC_PAGE_START) { - ret_val = e1000_access_phy_debug_regs_hv(hw, offset, - &data, false); - goto out; - } - - if (!page_set) { - if (page == HV_INTC_FC_PAGE_START) - page = 0; - - /* - * Workaround MDIO accesses being disabled after entering IEEE - * Power Down (when bit 11 of the PHY Control register is set) - */ - if ((hw->phy.type == e1000_phy_82578) && - (hw->phy.revision >= 1) && - (hw->phy.addr == 2) && - ((MAX_PHY_REG_ADDRESS & reg) == 0) && (data & (1 << 11))) { - u16 data2 = 0x7EFF; - ret_val = e1000_access_phy_debug_regs_hv(hw, - (1 << 6) | 0x3, - &data2, false); - if (ret_val) - goto out; - } - - if (reg > MAX_PHY_MULTI_PAGE_REG) { - /* Page is shifted left, PHY expects (page x 32) */ - ret_val = e1000_set_page_igp(hw, - (page << IGP_PAGE_SHIFT)); - - hw->phy.addr = phy_addr; - - if (ret_val) - goto out; - } - } - - e_dbg("writing PHY page %d (or 0x%x shifted) reg 0x%x\n", page, - page << IGP_PAGE_SHIFT, reg); - - ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, - data); - -out: - if (!locked) - hw->phy.ops.release(hw); - - return ret_val; -} - -/** - * e1000_write_phy_reg_hv - Write HV PHY register - * @hw: pointer to the HW structure - * @offset: register offset to write to - * @data: data to write at register offset - * - * Acquires semaphore then writes the data to PHY register at the offset. - * Release the acquired semaphores before exiting. - **/ -s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data) -{ - return __e1000_write_phy_reg_hv(hw, offset, data, false, false); -} - -/** - * e1000_write_phy_reg_hv_locked - Write HV PHY register - * @hw: pointer to the HW structure - * @offset: register offset to write to - * @data: data to write at register offset - * - * Writes the data to PHY register at the offset. Assumes semaphore - * already acquired. - **/ -s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data) -{ - return __e1000_write_phy_reg_hv(hw, offset, data, true, false); -} - -/** - * e1000_write_phy_reg_page_hv - Write HV PHY register - * @hw: pointer to the HW structure - * @offset: register offset to write to - * @data: data to write at register offset - * - * Writes the data to PHY register at the offset. Assumes semaphore - * already acquired and page already set. - **/ -s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data) -{ - return __e1000_write_phy_reg_hv(hw, offset, data, true, true); -} - -/** - * e1000_get_phy_addr_for_hv_page - Get PHY address based on page - * @page: page to be accessed - **/ -static u32 e1000_get_phy_addr_for_hv_page(u32 page) -{ - u32 phy_addr = 2; - - if (page >= HV_INTC_FC_PAGE_START) - phy_addr = 1; - - return phy_addr; -} - -/** - * e1000_access_phy_debug_regs_hv - Read HV PHY vendor specific high registers - * @hw: pointer to the HW structure - * @offset: register offset to be read or written - * @data: pointer to the data to be read or written - * @read: determines if operation is read or write - * - * Reads the PHY register at offset and stores the retreived information - * in data. Assumes semaphore already acquired. Note that the procedure - * to access these regs uses the address port and data port to read/write. - * These accesses done with PHY address 2 and without using pages. - **/ -static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, - u16 *data, bool read) -{ - s32 ret_val; - u32 addr_reg = 0; - u32 data_reg = 0; - - /* This takes care of the difference with desktop vs mobile phy */ - addr_reg = (hw->phy.type == e1000_phy_82578) ? - I82578_ADDR_REG : I82577_ADDR_REG; - data_reg = addr_reg + 1; - - /* All operations in this function are phy address 2 */ - hw->phy.addr = 2; - - /* masking with 0x3F to remove the page from offset */ - ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F); - if (ret_val) { - e_dbg("Could not write the Address Offset port register\n"); - goto out; - } - - /* Read or write the data value next */ - if (read) - ret_val = e1000e_read_phy_reg_mdic(hw, data_reg, data); - else - ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data); - - if (ret_val) { - e_dbg("Could not access the Data port register\n"); - goto out; - } - -out: - return ret_val; -} - -/** - * e1000_link_stall_workaround_hv - Si workaround - * @hw: pointer to the HW structure - * - * This function works around a Si bug where the link partner can get - * a link up indication before the PHY does. If small packets are sent - * by the link partner they can be placed in the packet buffer without - * being properly accounted for by the PHY and will stall preventing - * further packets from being received. The workaround is to clear the - * packet buffer after the PHY detects link up. - **/ -s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw) -{ - s32 ret_val = 0; - u16 data; - - if (hw->phy.type != e1000_phy_82578) - goto out; - - /* Do not apply workaround if in PHY loopback bit 14 set */ - e1e_rphy(hw, PHY_CONTROL, &data); - if (data & PHY_CONTROL_LB) - goto out; - - /* check if link is up and at 1Gbps */ - ret_val = e1e_rphy(hw, BM_CS_STATUS, &data); - if (ret_val) - goto out; - - data &= BM_CS_STATUS_LINK_UP | - BM_CS_STATUS_RESOLVED | - BM_CS_STATUS_SPEED_MASK; - - if (data != (BM_CS_STATUS_LINK_UP | - BM_CS_STATUS_RESOLVED | - BM_CS_STATUS_SPEED_1000)) - goto out; - - mdelay(200); - - /* flush the packets in the fifo buffer */ - ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC | - HV_MUX_DATA_CTRL_FORCE_SPEED); - if (ret_val) - goto out; - - ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC); - -out: - return ret_val; -} - -/** - * e1000_check_polarity_82577 - Checks the polarity. - * @hw: pointer to the HW structure - * - * Success returns 0, Failure returns -E1000_ERR_PHY (-2) - * - * Polarity is determined based on the PHY specific status register. - **/ -s32 e1000_check_polarity_82577(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 data; - - ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); - - if (!ret_val) - phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) - ? e1000_rev_polarity_reversed - : e1000_rev_polarity_normal; - - return ret_val; -} - -/** - * e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY - * @hw: pointer to the HW structure - * - * Calls the PHY setup function to force speed and duplex. - **/ -s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_data; - bool link; - - ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); - if (ret_val) - goto out; - - e1000e_phy_force_speed_duplex_setup(hw, &phy_data); - - ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); - if (ret_val) - goto out; - - udelay(1); - - if (phy->autoneg_wait_to_complete) { - e_dbg("Waiting for forced speed/duplex link on 82577 phy\n"); - - ret_val = e1000e_phy_has_link_generic(hw, - PHY_FORCE_LIMIT, - 100000, - &link); - if (ret_val) - goto out; - - if (!link) - e_dbg("Link taking longer than expected.\n"); - - /* Try once more */ - ret_val = e1000e_phy_has_link_generic(hw, - PHY_FORCE_LIMIT, - 100000, - &link); - if (ret_val) - goto out; - } - -out: - return ret_val; -} - -/** - * e1000_get_phy_info_82577 - Retrieve I82577 PHY information - * @hw: pointer to the HW structure - * - * Read PHY status to determine if link is up. If link is up, then - * set/determine 10base-T extended distance and polarity correction. Read - * PHY port status to determine MDI/MDIx and speed. Based on the speed, - * determine on the cable length, local and remote receiver. - **/ -s32 e1000_get_phy_info_82577(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 data; - bool link; - - ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); - if (ret_val) - goto out; - - if (!link) { - e_dbg("Phy info is only valid if link is up\n"); - ret_val = -E1000_ERR_CONFIG; - goto out; - } - - phy->polarity_correction = true; - - ret_val = e1000_check_polarity_82577(hw); - if (ret_val) - goto out; - - ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); - if (ret_val) - goto out; - - phy->is_mdix = (data & I82577_PHY_STATUS2_MDIX) ? true : false; - - if ((data & I82577_PHY_STATUS2_SPEED_MASK) == - I82577_PHY_STATUS2_SPEED_1000MBPS) { - ret_val = hw->phy.ops.get_cable_length(hw); - if (ret_val) - goto out; - - ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data); - if (ret_val) - goto out; - - phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) - ? e1000_1000t_rx_status_ok - : e1000_1000t_rx_status_not_ok; - - phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) - ? e1000_1000t_rx_status_ok - : e1000_1000t_rx_status_not_ok; - } else { - phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; - phy->local_rx = e1000_1000t_rx_status_undefined; - phy->remote_rx = e1000_1000t_rx_status_undefined; - } - -out: - return ret_val; -} - -/** - * e1000_get_cable_length_82577 - Determine cable length for 82577 PHY - * @hw: pointer to the HW structure - * - * Reads the diagnostic status register and verifies result is valid before - * placing it in the phy_cable_length field. - **/ -s32 e1000_get_cable_length_82577(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_data, length; - - ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data); - if (ret_val) - goto out; - - length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >> - I82577_DSTATUS_CABLE_LENGTH_SHIFT; - - if (length == E1000_CABLE_LENGTH_UNDEFINED) - ret_val = -E1000_ERR_PHY; - - phy->cable_length = length; - -out: - return ret_val; -} diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 69d6403..a2fd385 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -16,5 +16,6 @@ source "drivers/net/ethernet/8390/Kconfig" source "drivers/net/ethernet/amd/Kconfig" source "drivers/net/ethernet/broadcom/Kconfig" source "drivers/net/ethernet/chelsio/Kconfig" +source "drivers/net/ethernet/intel/Kconfig" endif # ETHERNET diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 470e5d8..5265271 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -7,3 +7,4 @@ obj-$(CONFIG_NET_VENDOR_8390) += 8390/ obj-$(CONFIG_NET_VENDOR_AMD) += amd/ obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ +obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig new file mode 100644 index 0000000..5fe185b --- /dev/null +++ b/drivers/net/ethernet/intel/Kconfig @@ -0,0 +1,220 @@ +# +# Intel network device configuration +# + +config NET_VENDOR_INTEL + bool "Intel devices" + depends on PCI || PCI_MSI + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Intel cards. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_INTEL + +config E100 + tristate "Intel(R) PRO/100+ support" + depends on PCI + select MII + ---help--- + This driver supports Intel(R) PRO/100 family of adapters. + To verify that your adapter is supported, find the board ID number + on the adapter. Look for a label that has a barcode and a number + in the format 123456-001 (six digits hyphen three digits). + + Use the above information and the Adapter & Driver ID Guide at: + + + + to identify the adapter. + + For the latest Intel PRO/100 network driver for Linux, see: + + + + More specific information on configuring the driver is in + . + + To compile this driver as a module, choose M here. The module + will be called e100. + +config E1000 + tristate "Intel(R) PRO/1000 Gigabit Ethernet support" + depends on PCI + ---help--- + This driver supports Intel(R) PRO/1000 gigabit ethernet family of + adapters. For more information on how to identify your adapter, go + to the Adapter & Driver ID Guide at: + + + + For general information and support, go to the Intel support + website at: + + + + More specific information on configuring the driver is in + . + + To compile this driver as a module, choose M here. The module + will be called e1000. + +config E1000E + tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support" + depends on PCI && (!SPARC32 || BROKEN) + select CRC32 + ---help--- + This driver supports the PCI-Express Intel(R) PRO/1000 gigabit + ethernet family of adapters. For PCI or PCI-X e1000 adapters, + use the regular e1000 driver For more information on how to + identify your adapter, go to the Adapter & Driver ID Guide at: + + + + For general information and support, go to the Intel support + website at: + + + + To compile this driver as a module, choose M here. The module + will be called e1000e. + +config IGB + tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support" + depends on PCI + ---help--- + This driver supports Intel(R) 82575/82576 gigabit ethernet family of + adapters. For more information on how to identify your adapter, go + to the Adapter & Driver ID Guide at: + + + + For general information and support, go to the Intel support + website at: + + + + More specific information on configuring the driver is in + . + + To compile this driver as a module, choose M here. The module + will be called igb. + +config IGB_DCA + bool "Direct Cache Access (DCA) Support" + default y + depends on IGB && DCA && !(IGB=y && DCA=m) + ---help--- + Say Y here if you want to use Direct Cache Access (DCA) in the + driver. DCA is a method for warming the CPU cache before data + is used, with the intent of lessening the impact of cache misses. + +config IGBVF + tristate "Intel(R) 82576 Virtual Function Ethernet support" + depends on PCI + ---help--- + This driver supports Intel(R) 82576 virtual functions. For more + information on how to identify your adapter, go to the Adapter & + Driver ID Guide at: + + + + For general information and support, go to the Intel support + website at: + + + + More specific information on configuring the driver is in + . + + To compile this driver as a module, choose M here. The module + will be called igbvf. + +config IXGB + tristate "Intel(R) PRO/10GbE support" + depends on PCI + ---help--- + This driver supports Intel(R) PRO/10GbE family of adapters for + PCI-X type cards. For PCI-E type cards, use the "ixgbe" driver + instead. For more information on how to identify your adapter, go + to the Adapter & Driver ID Guide at: + + + + For general information and support, go to the Intel support + website at: + + + + More specific information on configuring the driver is in + . + + To compile this driver as a module, choose M here. The module + will be called ixgb. + +config IXGBE + tristate "Intel(R) 10GbE PCI Express adapters support" + depends on PCI && INET + select MDIO + ---help--- + This driver supports Intel(R) 10GbE PCI Express family of + adapters. For more information on how to identify your adapter, go + to the Adapter & Driver ID Guide at: + + + + For general information and support, go to the Intel support + website at: + + + + To compile this driver as a module, choose M here. The module + will be called ixgbe. + +config IXGBE_DCA + bool "Direct Cache Access (DCA) Support" + default y + depends on IXGBE && DCA && !(IXGBE=y && DCA=m) + ---help--- + Say Y here if you want to use Direct Cache Access (DCA) in the + driver. DCA is a method for warming the CPU cache before data + is used, with the intent of lessening the impact of cache misses. + +config IXGBE_DCB + bool "Data Center Bridging (DCB) Support" + default n + depends on IXGBE && DCB + ---help--- + Say Y here if you want to use Data Center Bridging (DCB) in the + driver. + + If unsure, say N. + +config IXGBEVF + tristate "Intel(R) 82599 Virtual Function Ethernet support" + depends on PCI_MSI + ---help--- + This driver supports Intel(R) 82599 virtual functions. For more + information on how to identify your adapter, go to the Adapter & + Driver ID Guide at: + + + + For general information and support, go to the Intel support + website at: + + + + More specific information on configuring the driver is in + . + + To compile this driver as a module, choose M here. The module + will be called ixgbevf. MSI-X interrupt support is required + for this driver to work correctly. + +endif # NET_VENDOR_INTEL diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile new file mode 100644 index 0000000..c8210e6 --- /dev/null +++ b/drivers/net/ethernet/intel/Makefile @@ -0,0 +1,12 @@ +# +# Makefile for the Intel network device drivers. +# + +obj-$(CONFIG_E100) += e100.o +obj-$(CONFIG_E1000) += e1000/ +obj-$(CONFIG_E1000E) += e1000e/ +obj-$(CONFIG_IGB) += igb/ +obj-$(CONFIG_IGBVF) += igbvf/ +obj-$(CONFIG_IXGBE) += ixgbe/ +obj-$(CONFIG_IXGBEVF) += ixgbevf/ +obj-$(CONFIG_IXGB) += ixgb/ diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c new file mode 100644 index 0000000..c1352c6 --- /dev/null +++ b/drivers/net/ethernet/intel/e100.c @@ -0,0 +1,3109 @@ +/******************************************************************************* + + Intel PRO/100 Linux driver + Copyright(c) 1999 - 2006 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* + * e100.c: Intel(R) PRO/100 ethernet driver + * + * (Re)written 2003 by scott.feldman@intel.com. Based loosely on + * original e100 driver, but better described as a munging of + * e100, e1000, eepro100, tg3, 8139cp, and other drivers. + * + * References: + * Intel 8255x 10/100 Mbps Ethernet Controller Family, + * Open Source Software Developers Manual, + * http://sourceforge.net/projects/e1000 + * + * + * Theory of Operation + * + * I. General + * + * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet + * controller family, which includes the 82557, 82558, 82559, 82550, + * 82551, and 82562 devices. 82558 and greater controllers + * integrate the Intel 82555 PHY. The controllers are used in + * server and client network interface cards, as well as in + * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx + * configurations. 8255x supports a 32-bit linear addressing + * mode and operates at 33Mhz PCI clock rate. + * + * II. Driver Operation + * + * Memory-mapped mode is used exclusively to access the device's + * shared-memory structure, the Control/Status Registers (CSR). All + * setup, configuration, and control of the device, including queuing + * of Tx, Rx, and configuration commands is through the CSR. + * cmd_lock serializes accesses to the CSR command register. cb_lock + * protects the shared Command Block List (CBL). + * + * 8255x is highly MII-compliant and all access to the PHY go + * through the Management Data Interface (MDI). Consequently, the + * driver leverages the mii.c library shared with other MII-compliant + * devices. + * + * Big- and Little-Endian byte order as well as 32- and 64-bit + * archs are supported. Weak-ordered memory and non-cache-coherent + * archs are supported. + * + * III. Transmit + * + * A Tx skb is mapped and hangs off of a TCB. TCBs are linked + * together in a fixed-size ring (CBL) thus forming the flexible mode + * memory structure. A TCB marked with the suspend-bit indicates + * the end of the ring. The last TCB processed suspends the + * controller, and the controller can be restarted by issue a CU + * resume command to continue from the suspend point, or a CU start + * command to start at a given position in the ring. + * + * Non-Tx commands (config, multicast setup, etc) are linked + * into the CBL ring along with Tx commands. The common structure + * used for both Tx and non-Tx commands is the Command Block (CB). + * + * cb_to_use is the next CB to use for queuing a command; cb_to_clean + * is the next CB to check for completion; cb_to_send is the first + * CB to start on in case of a previous failure to resume. CB clean + * up happens in interrupt context in response to a CU interrupt. + * cbs_avail keeps track of number of free CB resources available. + * + * Hardware padding of short packets to minimum packet size is + * enabled. 82557 pads with 7Eh, while the later controllers pad + * with 00h. + * + * IV. Receive + * + * The Receive Frame Area (RFA) comprises a ring of Receive Frame + * Descriptors (RFD) + data buffer, thus forming the simplified mode + * memory structure. Rx skbs are allocated to contain both the RFD + * and the data buffer, but the RFD is pulled off before the skb is + * indicated. The data buffer is aligned such that encapsulated + * protocol headers are u32-aligned. Since the RFD is part of the + * mapped shared memory, and completion status is contained within + * the RFD, the RFD must be dma_sync'ed to maintain a consistent + * view from software and hardware. + * + * In order to keep updates to the RFD link field from colliding with + * hardware writes to mark packets complete, we use the feature that + * hardware will not write to a size 0 descriptor and mark the previous + * packet as end-of-list (EL). After updating the link, we remove EL + * and only then restore the size such that hardware may use the + * previous-to-end RFD. + * + * Under typical operation, the receive unit (RU) is start once, + * and the controller happily fills RFDs as frames arrive. If + * replacement RFDs cannot be allocated, or the RU goes non-active, + * the RU must be restarted. Frame arrival generates an interrupt, + * and Rx indication and re-allocation happen in the same context, + * therefore no locking is required. A software-generated interrupt + * is generated from the watchdog to recover from a failed allocation + * scenario where all Rx resources have been indicated and none re- + * placed. + * + * V. Miscellaneous + * + * VLAN offloading of tagging, stripping and filtering is not + * supported, but driver will accommodate the extra 4-byte VLAN tag + * for processing by upper layers. Tx/Rx Checksum offloading is not + * supported. Tx Scatter/Gather is not supported. Jumbo Frames is + * not supported (hardware limitation). + * + * MagicPacket(tm) WoL support is enabled/disabled via ethtool. + * + * Thanks to JC (jchapman@katalix.com) for helping with + * testing/troubleshooting the development driver. + * + * TODO: + * o several entry points race with dev->close + * o check for tx-no-resources/stop Q races with tx clean/wake Q + * + * FIXES: + * 2005/12/02 - Michael O'Donnell + * - Stratus87247: protect MDI control register manipulations + * 2009/06/01 - Andreas Mohr + * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define DRV_NAME "e100" +#define DRV_EXT "-NAPI" +#define DRV_VERSION "3.5.24-k2"DRV_EXT +#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" +#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation" + +#define E100_WATCHDOG_PERIOD (2 * HZ) +#define E100_NAPI_WEIGHT 16 + +#define FIRMWARE_D101M "e100/d101m_ucode.bin" +#define FIRMWARE_D101S "e100/d101s_ucode.bin" +#define FIRMWARE_D102E "e100/d102e_ucode.bin" + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR(DRV_COPYRIGHT); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); +MODULE_FIRMWARE(FIRMWARE_D101M); +MODULE_FIRMWARE(FIRMWARE_D101S); +MODULE_FIRMWARE(FIRMWARE_D102E); + +static int debug = 3; +static int eeprom_bad_csum_allow = 0; +static int use_io = 0; +module_param(debug, int, 0); +module_param(eeprom_bad_csum_allow, int, 0); +module_param(use_io, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); +MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums"); +MODULE_PARM_DESC(use_io, "Force use of i/o access mode"); + +#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\ + PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \ + PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich } +static DEFINE_PCI_DEVICE_TABLE(e100_id_table) = { + INTEL_8255X_ETHERNET_DEVICE(0x1029, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1030, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1031, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1032, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1033, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1034, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1038, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1039, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103A, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103B, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103C, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103D, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103E, 4), + INTEL_8255X_ETHERNET_DEVICE(0x1050, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1051, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1052, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1053, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1054, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1055, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1056, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1057, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1059, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1064, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1065, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1066, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1067, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1068, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1069, 6), + INTEL_8255X_ETHERNET_DEVICE(0x106A, 6), + INTEL_8255X_ETHERNET_DEVICE(0x106B, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1091, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1092, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1093, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1094, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1095, 7), + INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1209, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1229, 0), + INTEL_8255X_ETHERNET_DEVICE(0x2449, 2), + INTEL_8255X_ETHERNET_DEVICE(0x2459, 2), + INTEL_8255X_ETHERNET_DEVICE(0x245D, 2), + INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7), + { 0, } +}; +MODULE_DEVICE_TABLE(pci, e100_id_table); + +enum mac { + mac_82557_D100_A = 0, + mac_82557_D100_B = 1, + mac_82557_D100_C = 2, + mac_82558_D101_A4 = 4, + mac_82558_D101_B0 = 5, + mac_82559_D101M = 8, + mac_82559_D101S = 9, + mac_82550_D102 = 12, + mac_82550_D102_C = 13, + mac_82551_E = 14, + mac_82551_F = 15, + mac_82551_10 = 16, + mac_unknown = 0xFF, +}; + +enum phy { + phy_100a = 0x000003E0, + phy_100c = 0x035002A8, + phy_82555_tx = 0x015002A8, + phy_nsc_tx = 0x5C002000, + phy_82562_et = 0x033002A8, + phy_82562_em = 0x032002A8, + phy_82562_ek = 0x031002A8, + phy_82562_eh = 0x017002A8, + phy_82552_v = 0xd061004d, + phy_unknown = 0xFFFFFFFF, +}; + +/* CSR (Control/Status Registers) */ +struct csr { + struct { + u8 status; + u8 stat_ack; + u8 cmd_lo; + u8 cmd_hi; + u32 gen_ptr; + } scb; + u32 port; + u16 flash_ctrl; + u8 eeprom_ctrl_lo; + u8 eeprom_ctrl_hi; + u32 mdi_ctrl; + u32 rx_dma_count; +}; + +enum scb_status { + rus_no_res = 0x08, + rus_ready = 0x10, + rus_mask = 0x3C, +}; + +enum ru_state { + RU_SUSPENDED = 0, + RU_RUNNING = 1, + RU_UNINITIALIZED = -1, +}; + +enum scb_stat_ack { + stat_ack_not_ours = 0x00, + stat_ack_sw_gen = 0x04, + stat_ack_rnr = 0x10, + stat_ack_cu_idle = 0x20, + stat_ack_frame_rx = 0x40, + stat_ack_cu_cmd_done = 0x80, + stat_ack_not_present = 0xFF, + stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx), + stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done), +}; + +enum scb_cmd_hi { + irq_mask_none = 0x00, + irq_mask_all = 0x01, + irq_sw_gen = 0x02, +}; + +enum scb_cmd_lo { + cuc_nop = 0x00, + ruc_start = 0x01, + ruc_load_base = 0x06, + cuc_start = 0x10, + cuc_resume = 0x20, + cuc_dump_addr = 0x40, + cuc_dump_stats = 0x50, + cuc_load_base = 0x60, + cuc_dump_reset = 0x70, +}; + +enum cuc_dump { + cuc_dump_complete = 0x0000A005, + cuc_dump_reset_complete = 0x0000A007, +}; + +enum port { + software_reset = 0x0000, + selftest = 0x0001, + selective_reset = 0x0002, +}; + +enum eeprom_ctrl_lo { + eesk = 0x01, + eecs = 0x02, + eedi = 0x04, + eedo = 0x08, +}; + +enum mdi_ctrl { + mdi_write = 0x04000000, + mdi_read = 0x08000000, + mdi_ready = 0x10000000, +}; + +enum eeprom_op { + op_write = 0x05, + op_read = 0x06, + op_ewds = 0x10, + op_ewen = 0x13, +}; + +enum eeprom_offsets { + eeprom_cnfg_mdix = 0x03, + eeprom_phy_iface = 0x06, + eeprom_id = 0x0A, + eeprom_config_asf = 0x0D, + eeprom_smbus_addr = 0x90, +}; + +enum eeprom_cnfg_mdix { + eeprom_mdix_enabled = 0x0080, +}; + +enum eeprom_phy_iface { + NoSuchPhy = 0, + I82553AB, + I82553C, + I82503, + DP83840, + S80C240, + S80C24, + I82555, + DP83840A = 10, +}; + +enum eeprom_id { + eeprom_id_wol = 0x0020, +}; + +enum eeprom_config_asf { + eeprom_asf = 0x8000, + eeprom_gcl = 0x4000, +}; + +enum cb_status { + cb_complete = 0x8000, + cb_ok = 0x2000, +}; + +enum cb_command { + cb_nop = 0x0000, + cb_iaaddr = 0x0001, + cb_config = 0x0002, + cb_multi = 0x0003, + cb_tx = 0x0004, + cb_ucode = 0x0005, + cb_dump = 0x0006, + cb_tx_sf = 0x0008, + cb_cid = 0x1f00, + cb_i = 0x2000, + cb_s = 0x4000, + cb_el = 0x8000, +}; + +struct rfd { + __le16 status; + __le16 command; + __le32 link; + __le32 rbd; + __le16 actual_size; + __le16 size; +}; + +struct rx { + struct rx *next, *prev; + struct sk_buff *skb; + dma_addr_t dma_addr; +}; + +#if defined(__BIG_ENDIAN_BITFIELD) +#define X(a,b) b,a +#else +#define X(a,b) a,b +#endif +struct config { +/*0*/ u8 X(byte_count:6, pad0:2); +/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1); +/*2*/ u8 adaptive_ifs; +/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1), + term_write_cache_line:1), pad3:4); +/*4*/ u8 X(rx_dma_max_count:7, pad4:1); +/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1); +/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1), + tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1), + rx_discard_overruns:1), rx_save_bad_frames:1); +/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2), + pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1), + tx_dynamic_tbd:1); +/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1); +/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1), + link_status_wake:1), arp_wake:1), mcmatch_wake:1); +/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2), + loopback:2); +/*11*/ u8 X(linear_priority:3, pad11:5); +/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4); +/*13*/ u8 ip_addr_lo; +/*14*/ u8 ip_addr_hi; +/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1), + wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1), + pad15_2:1), crs_or_cdt:1); +/*16*/ u8 fc_delay_lo; +/*17*/ u8 fc_delay_hi; +/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1), + rx_long_ok:1), fc_priority_threshold:3), pad18:1); +/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1), + fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1), + full_duplex_force:1), full_duplex_pin:1); +/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1); +/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4); +/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6); + u8 pad_d102[9]; +}; + +#define E100_MAX_MULTICAST_ADDRS 64 +struct multi { + __le16 count; + u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/]; +}; + +/* Important: keep total struct u32-aligned */ +#define UCODE_SIZE 134 +struct cb { + __le16 status; + __le16 command; + __le32 link; + union { + u8 iaaddr[ETH_ALEN]; + __le32 ucode[UCODE_SIZE]; + struct config config; + struct multi multi; + struct { + u32 tbd_array; + u16 tcb_byte_count; + u8 threshold; + u8 tbd_count; + struct { + __le32 buf_addr; + __le16 size; + u16 eol; + } tbd; + } tcb; + __le32 dump_buffer_addr; + } u; + struct cb *next, *prev; + dma_addr_t dma_addr; + struct sk_buff *skb; +}; + +enum loopback { + lb_none = 0, lb_mac = 1, lb_phy = 3, +}; + +struct stats { + __le32 tx_good_frames, tx_max_collisions, tx_late_collisions, + tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions, + tx_multiple_collisions, tx_total_collisions; + __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors, + rx_resource_errors, rx_overrun_errors, rx_cdt_errors, + rx_short_frame_errors; + __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported; + __le16 xmt_tco_frames, rcv_tco_frames; + __le32 complete; +}; + +struct mem { + struct { + u32 signature; + u32 result; + } selftest; + struct stats stats; + u8 dump_buf[596]; +}; + +struct param_range { + u32 min; + u32 max; + u32 count; +}; + +struct params { + struct param_range rfds; + struct param_range cbs; +}; + +struct nic { + /* Begin: frequently used values: keep adjacent for cache effect */ + u32 msg_enable ____cacheline_aligned; + struct net_device *netdev; + struct pci_dev *pdev; + u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data); + + struct rx *rxs ____cacheline_aligned; + struct rx *rx_to_use; + struct rx *rx_to_clean; + struct rfd blank_rfd; + enum ru_state ru_running; + + spinlock_t cb_lock ____cacheline_aligned; + spinlock_t cmd_lock; + struct csr __iomem *csr; + enum scb_cmd_lo cuc_cmd; + unsigned int cbs_avail; + struct napi_struct napi; + struct cb *cbs; + struct cb *cb_to_use; + struct cb *cb_to_send; + struct cb *cb_to_clean; + __le16 tx_command; + /* End: frequently used values: keep adjacent for cache effect */ + + enum { + ich = (1 << 0), + promiscuous = (1 << 1), + multicast_all = (1 << 2), + wol_magic = (1 << 3), + ich_10h_workaround = (1 << 4), + } flags ____cacheline_aligned; + + enum mac mac; + enum phy phy; + struct params params; + struct timer_list watchdog; + struct mii_if_info mii; + struct work_struct tx_timeout_task; + enum loopback loopback; + + struct mem *mem; + dma_addr_t dma_addr; + + struct pci_pool *cbs_pool; + dma_addr_t cbs_dma_addr; + u8 adaptive_ifs; + u8 tx_threshold; + u32 tx_frames; + u32 tx_collisions; + u32 tx_deferred; + u32 tx_single_collisions; + u32 tx_multiple_collisions; + u32 tx_fc_pause; + u32 tx_tco_frames; + + u32 rx_fc_pause; + u32 rx_fc_unsupported; + u32 rx_tco_frames; + u32 rx_over_length_errors; + + u16 eeprom_wc; + __le16 eeprom[256]; + spinlock_t mdio_lock; + const struct firmware *fw; +}; + +static inline void e100_write_flush(struct nic *nic) +{ + /* Flush previous PCI writes through intermediate bridges + * by doing a benign read */ + (void)ioread8(&nic->csr->scb.status); +} + +static void e100_enable_irq(struct nic *nic) +{ + unsigned long flags; + + spin_lock_irqsave(&nic->cmd_lock, flags); + iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irqrestore(&nic->cmd_lock, flags); +} + +static void e100_disable_irq(struct nic *nic) +{ + unsigned long flags; + + spin_lock_irqsave(&nic->cmd_lock, flags); + iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irqrestore(&nic->cmd_lock, flags); +} + +static void e100_hw_reset(struct nic *nic) +{ + /* Put CU and RU into idle with a selective reset to get + * device off of PCI bus */ + iowrite32(selective_reset, &nic->csr->port); + e100_write_flush(nic); udelay(20); + + /* Now fully reset device */ + iowrite32(software_reset, &nic->csr->port); + e100_write_flush(nic); udelay(20); + + /* Mask off our interrupt line - it's unmasked after reset */ + e100_disable_irq(nic); +} + +static int e100_self_test(struct nic *nic) +{ + u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest); + + /* Passing the self-test is a pretty good indication + * that the device can DMA to/from host memory */ + + nic->mem->selftest.signature = 0; + nic->mem->selftest.result = 0xFFFFFFFF; + + iowrite32(selftest | dma_addr, &nic->csr->port); + e100_write_flush(nic); + /* Wait 10 msec for self-test to complete */ + msleep(10); + + /* Interrupts are enabled after self-test */ + e100_disable_irq(nic); + + /* Check results of self-test */ + if (nic->mem->selftest.result != 0) { + netif_err(nic, hw, nic->netdev, + "Self-test failed: result=0x%08X\n", + nic->mem->selftest.result); + return -ETIMEDOUT; + } + if (nic->mem->selftest.signature == 0) { + netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data) +{ + u32 cmd_addr_data[3]; + u8 ctrl; + int i, j; + + /* Three cmds: write/erase enable, write data, write/erase disable */ + cmd_addr_data[0] = op_ewen << (addr_len - 2); + cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) | + le16_to_cpu(data); + cmd_addr_data[2] = op_ewds << (addr_len - 2); + + /* Bit-bang cmds to write word to eeprom */ + for (j = 0; j < 3; j++) { + + /* Chip select */ + iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + for (i = 31; i >= 0; i--) { + ctrl = (cmd_addr_data[j] & (1 << i)) ? + eecs | eedi : eecs; + iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + } + /* Wait 10 msec for cmd to complete */ + msleep(10); + + /* Chip deselect */ + iowrite8(0, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + } +}; + +/* General technique stolen from the eepro100 driver - very clever */ +static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr) +{ + u32 cmd_addr_data; + u16 data = 0; + u8 ctrl; + int i; + + cmd_addr_data = ((op_read << *addr_len) | addr) << 16; + + /* Chip select */ + iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + /* Bit-bang to read word from eeprom */ + for (i = 31; i >= 0; i--) { + ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs; + iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + /* Eeprom drives a dummy zero to EEDO after receiving + * complete address. Use this to adjust addr_len. */ + ctrl = ioread8(&nic->csr->eeprom_ctrl_lo); + if (!(ctrl & eedo) && i > 16) { + *addr_len -= (i - 16); + i = 17; + } + + data = (data << 1) | (ctrl & eedo ? 1 : 0); + } + + /* Chip deselect */ + iowrite8(0, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + return cpu_to_le16(data); +}; + +/* Load entire EEPROM image into driver cache and validate checksum */ +static int e100_eeprom_load(struct nic *nic) +{ + u16 addr, addr_len = 8, checksum = 0; + + /* Try reading with an 8-bit addr len to discover actual addr len */ + e100_eeprom_read(nic, &addr_len, 0); + nic->eeprom_wc = 1 << addr_len; + + for (addr = 0; addr < nic->eeprom_wc; addr++) { + nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr); + if (addr < nic->eeprom_wc - 1) + checksum += le16_to_cpu(nic->eeprom[addr]); + } + + /* The checksum, stored in the last word, is calculated such that + * the sum of words should be 0xBABA */ + if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) { + netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n"); + if (!eeprom_bad_csum_allow) + return -EAGAIN; + } + + return 0; +} + +/* Save (portion of) driver EEPROM cache to device and update checksum */ +static int e100_eeprom_save(struct nic *nic, u16 start, u16 count) +{ + u16 addr, addr_len = 8, checksum = 0; + + /* Try reading with an 8-bit addr len to discover actual addr len */ + e100_eeprom_read(nic, &addr_len, 0); + nic->eeprom_wc = 1 << addr_len; + + if (start + count >= nic->eeprom_wc) + return -EINVAL; + + for (addr = start; addr < start + count; addr++) + e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]); + + /* The checksum, stored in the last word, is calculated such that + * the sum of words should be 0xBABA */ + for (addr = 0; addr < nic->eeprom_wc - 1; addr++) + checksum += le16_to_cpu(nic->eeprom[addr]); + nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum); + e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1, + nic->eeprom[nic->eeprom_wc - 1]); + + return 0; +} + +#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ +#define E100_WAIT_SCB_FAST 20 /* delay like the old code */ +static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) +{ + unsigned long flags; + unsigned int i; + int err = 0; + + spin_lock_irqsave(&nic->cmd_lock, flags); + + /* Previous command is accepted when SCB clears */ + for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) { + if (likely(!ioread8(&nic->csr->scb.cmd_lo))) + break; + cpu_relax(); + if (unlikely(i > E100_WAIT_SCB_FAST)) + udelay(5); + } + if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) { + err = -EAGAIN; + goto err_unlock; + } + + if (unlikely(cmd != cuc_resume)) + iowrite32(dma_addr, &nic->csr->scb.gen_ptr); + iowrite8(cmd, &nic->csr->scb.cmd_lo); + +err_unlock: + spin_unlock_irqrestore(&nic->cmd_lock, flags); + + return err; +} + +static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, + void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) +{ + struct cb *cb; + unsigned long flags; + int err = 0; + + spin_lock_irqsave(&nic->cb_lock, flags); + + if (unlikely(!nic->cbs_avail)) { + err = -ENOMEM; + goto err_unlock; + } + + cb = nic->cb_to_use; + nic->cb_to_use = cb->next; + nic->cbs_avail--; + cb->skb = skb; + + if (unlikely(!nic->cbs_avail)) + err = -ENOSPC; + + cb_prepare(nic, cb, skb); + + /* Order is important otherwise we'll be in a race with h/w: + * set S-bit in current first, then clear S-bit in previous. */ + cb->command |= cpu_to_le16(cb_s); + wmb(); + cb->prev->command &= cpu_to_le16(~cb_s); + + while (nic->cb_to_send != nic->cb_to_use) { + if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd, + nic->cb_to_send->dma_addr))) { + /* Ok, here's where things get sticky. It's + * possible that we can't schedule the command + * because the controller is too busy, so + * let's just queue the command and try again + * when another command is scheduled. */ + if (err == -ENOSPC) { + //request a reset + schedule_work(&nic->tx_timeout_task); + } + break; + } else { + nic->cuc_cmd = cuc_resume; + nic->cb_to_send = nic->cb_to_send->next; + } + } + +err_unlock: + spin_unlock_irqrestore(&nic->cb_lock, flags); + + return err; +} + +static int mdio_read(struct net_device *netdev, int addr, int reg) +{ + struct nic *nic = netdev_priv(netdev); + return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0); +} + +static void mdio_write(struct net_device *netdev, int addr, int reg, int data) +{ + struct nic *nic = netdev_priv(netdev); + + nic->mdio_ctrl(nic, addr, mdi_write, reg, data); +} + +/* the standard mdio_ctrl() function for usual MII-compliant hardware */ +static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data) +{ + u32 data_out = 0; + unsigned int i; + unsigned long flags; + + + /* + * Stratus87247: we shouldn't be writing the MDI control + * register until the Ready bit shows True. Also, since + * manipulation of the MDI control registers is a multi-step + * procedure it should be done under lock. + */ + spin_lock_irqsave(&nic->mdio_lock, flags); + for (i = 100; i; --i) { + if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready) + break; + udelay(20); + } + if (unlikely(!i)) { + netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n"); + spin_unlock_irqrestore(&nic->mdio_lock, flags); + return 0; /* No way to indicate timeout error */ + } + iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl); + + for (i = 0; i < 100; i++) { + udelay(20); + if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready) + break; + } + spin_unlock_irqrestore(&nic->mdio_lock, flags); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data, data_out); + return (u16)data_out; +} + +/* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */ +static u16 mdio_ctrl_phy_82552_v(struct nic *nic, + u32 addr, + u32 dir, + u32 reg, + u16 data) +{ + if ((reg == MII_BMCR) && (dir == mdi_write)) { + if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) { + u16 advert = mdio_read(nic->netdev, nic->mii.phy_id, + MII_ADVERTISE); + + /* + * Workaround Si issue where sometimes the part will not + * autoneg to 100Mbps even when advertised. + */ + if (advert & ADVERTISE_100FULL) + data |= BMCR_SPEED100 | BMCR_FULLDPLX; + else if (advert & ADVERTISE_100HALF) + data |= BMCR_SPEED100; + } + } + return mdio_ctrl_hw(nic, addr, dir, reg, data); +} + +/* Fully software-emulated mdio_ctrl() function for cards without + * MII-compliant PHYs. + * For now, this is mainly geared towards 80c24 support; in case of further + * requirements for other types (i82503, ...?) either extend this mechanism + * or split it, whichever is cleaner. + */ +static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic, + u32 addr, + u32 dir, + u32 reg, + u16 data) +{ + /* might need to allocate a netdev_priv'ed register array eventually + * to be able to record state changes, but for now + * some fully hardcoded register handling ought to be ok I guess. */ + + if (dir == mdi_read) { + switch (reg) { + case MII_BMCR: + /* Auto-negotiation, right? */ + return BMCR_ANENABLE | + BMCR_FULLDPLX; + case MII_BMSR: + return BMSR_LSTATUS /* for mii_link_ok() */ | + BMSR_ANEGCAPABLE | + BMSR_10FULL; + case MII_ADVERTISE: + /* 80c24 is a "combo card" PHY, right? */ + return ADVERTISE_10HALF | + ADVERTISE_10FULL; + default: + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data); + return 0xFFFF; + } + } else { + switch (reg) { + default: + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data); + return 0xFFFF; + } + } +} +static inline int e100_phy_supports_mii(struct nic *nic) +{ + /* for now, just check it by comparing whether we + are using MII software emulation. + */ + return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated); +} + +static void e100_get_defaults(struct nic *nic) +{ + struct param_range rfds = { .min = 16, .max = 256, .count = 256 }; + struct param_range cbs = { .min = 64, .max = 256, .count = 128 }; + + /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */ + nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision; + if (nic->mac == mac_unknown) + nic->mac = mac_82557_D100_A; + + nic->params.rfds = rfds; + nic->params.cbs = cbs; + + /* Quadwords to DMA into FIFO before starting frame transmit */ + nic->tx_threshold = 0xE0; + + /* no interrupt for every tx completion, delay = 256us if not 557 */ + nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf | + ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); + + /* Template for a freshly allocated RFD */ + nic->blank_rfd.command = 0; + nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF); + nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN); + + /* MII setup */ + nic->mii.phy_id_mask = 0x1F; + nic->mii.reg_num_mask = 0x1F; + nic->mii.dev = nic->netdev; + nic->mii.mdio_read = mdio_read; + nic->mii.mdio_write = mdio_write; +} + +static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + struct config *config = &cb->u.config; + u8 *c = (u8 *)config; + + cb->command = cpu_to_le16(cb_config); + + memset(config, 0, sizeof(struct config)); + + config->byte_count = 0x16; /* bytes in this struct */ + config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */ + config->direct_rx_dma = 0x1; /* reserved */ + config->standard_tcb = 0x1; /* 1=standard, 0=extended */ + config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */ + config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */ + config->tx_underrun_retry = 0x3; /* # of underrun retries */ + if (e100_phy_supports_mii(nic)) + config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */ + config->pad10 = 0x6; + config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */ + config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */ + config->ifs = 0x6; /* x16 = inter frame spacing */ + config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */ + config->pad15_1 = 0x1; + config->pad15_2 = 0x1; + config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */ + config->fc_delay_hi = 0x40; /* time delay for fc frame */ + config->tx_padding = 0x1; /* 1=pad short frames */ + config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */ + config->pad18 = 0x1; + config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */ + config->pad20_1 = 0x1F; + config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */ + config->pad21_1 = 0x5; + + config->adaptive_ifs = nic->adaptive_ifs; + config->loopback = nic->loopback; + + if (nic->mii.force_media && nic->mii.full_duplex) + config->full_duplex_force = 0x1; /* 1=force, 0=auto */ + + if (nic->flags & promiscuous || nic->loopback) { + config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ + config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ + config->promiscuous_mode = 0x1; /* 1=on, 0=off */ + } + + if (nic->flags & multicast_all) + config->multicast_all = 0x1; /* 1=accept, 0=no */ + + /* disable WoL when up */ + if (netif_running(nic->netdev) || !(nic->flags & wol_magic)) + config->magic_packet_disable = 0x1; /* 1=off, 0=on */ + + if (nic->mac >= mac_82558_D101_A4) { + config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */ + config->mwi_enable = 0x1; /* 1=enable, 0=disable */ + config->standard_tcb = 0x0; /* 1=standard, 0=extended */ + config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */ + if (nic->mac >= mac_82559_D101M) { + config->tno_intr = 0x1; /* TCO stats enable */ + /* Enable TCO in extended config */ + if (nic->mac >= mac_82551_10) { + config->byte_count = 0x20; /* extended bytes */ + config->rx_d102_mode = 0x1; /* GMRC for TCO */ + } + } else { + config->standard_stat_counter = 0x0; + } + } + + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", + c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", + c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", + c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); +} + +/************************************************************************* +* CPUSaver parameters +* +* All CPUSaver parameters are 16-bit literals that are part of a +* "move immediate value" instruction. By changing the value of +* the literal in the instruction before the code is loaded, the +* driver can change the algorithm. +* +* INTDELAY - This loads the dead-man timer with its initial value. +* When this timer expires the interrupt is asserted, and the +* timer is reset each time a new packet is received. (see +* BUNDLEMAX below to set the limit on number of chained packets) +* The current default is 0x600 or 1536. Experiments show that +* the value should probably stay within the 0x200 - 0x1000. +* +* BUNDLEMAX - +* This sets the maximum number of frames that will be bundled. In +* some situations, such as the TCP windowing algorithm, it may be +* better to limit the growth of the bundle size than let it go as +* high as it can, because that could cause too much added latency. +* The default is six, because this is the number of packets in the +* default TCP window size. A value of 1 would make CPUSaver indicate +* an interrupt for every frame received. If you do not want to put +* a limit on the bundle size, set this value to xFFFF. +* +* BUNDLESMALL - +* This contains a bit-mask describing the minimum size frame that +* will be bundled. The default masks the lower 7 bits, which means +* that any frame less than 128 bytes in length will not be bundled, +* but will instead immediately generate an interrupt. This does +* not affect the current bundle in any way. Any frame that is 128 +* bytes or large will be bundled normally. This feature is meant +* to provide immediate indication of ACK frames in a TCP environment. +* Customers were seeing poor performance when a machine with CPUSaver +* enabled was sending but not receiving. The delay introduced when +* the ACKs were received was enough to reduce total throughput, because +* the sender would sit idle until the ACK was finally seen. +* +* The current default is 0xFF80, which masks out the lower 7 bits. +* This means that any frame which is x7F (127) bytes or smaller +* will cause an immediate interrupt. Because this value must be a +* bit mask, there are only a few valid values that can be used. To +* turn this feature off, the driver can write the value xFFFF to the +* lower word of this instruction (in the same way that the other +* parameters are used). Likewise, a value of 0xF800 (2047) would +* cause an interrupt to be generated for every frame, because all +* standard Ethernet frames are <= 2047 bytes in length. +*************************************************************************/ + +/* if you wish to disable the ucode functionality, while maintaining the + * workarounds it provides, set the following defines to: + * BUNDLESMALL 0 + * BUNDLEMAX 1 + * INTDELAY 1 + */ +#define BUNDLESMALL 1 +#define BUNDLEMAX (u16)6 +#define INTDELAY (u16)1536 /* 0x600 */ + +/* Initialize firmware */ +static const struct firmware *e100_request_firmware(struct nic *nic) +{ + const char *fw_name; + const struct firmware *fw = nic->fw; + u8 timer, bundle, min_size; + int err = 0; + + /* do not load u-code for ICH devices */ + if (nic->flags & ich) + return NULL; + + /* Search for ucode match against h/w revision */ + if (nic->mac == mac_82559_D101M) + fw_name = FIRMWARE_D101M; + else if (nic->mac == mac_82559_D101S) + fw_name = FIRMWARE_D101S; + else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) + fw_name = FIRMWARE_D102E; + else /* No ucode on other devices */ + return NULL; + + /* If the firmware has not previously been loaded, request a pointer + * to it. If it was previously loaded, we are reinitializing the + * adapter, possibly in a resume from hibernate, in which case + * request_firmware() cannot be used. + */ + if (!fw) + err = request_firmware(&fw, fw_name, &nic->pdev->dev); + + if (err) { + netif_err(nic, probe, nic->netdev, + "Failed to load firmware \"%s\": %d\n", + fw_name, err); + return ERR_PTR(err); + } + + /* Firmware should be precisely UCODE_SIZE (words) plus three bytes + indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */ + if (fw->size != UCODE_SIZE * 4 + 3) { + netif_err(nic, probe, nic->netdev, + "Firmware \"%s\" has wrong size %zu\n", + fw_name, fw->size); + release_firmware(fw); + return ERR_PTR(-EINVAL); + } + + /* Read timer, bundle and min_size from end of firmware blob */ + timer = fw->data[UCODE_SIZE * 4]; + bundle = fw->data[UCODE_SIZE * 4 + 1]; + min_size = fw->data[UCODE_SIZE * 4 + 2]; + + if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE || + min_size >= UCODE_SIZE) { + netif_err(nic, probe, nic->netdev, + "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n", + fw_name, timer, bundle, min_size); + release_firmware(fw); + return ERR_PTR(-EINVAL); + } + + /* OK, firmware is validated and ready to use. Save a pointer + * to it in the nic */ + nic->fw = fw; + return fw; +} + +static void e100_setup_ucode(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + const struct firmware *fw = (void *)skb; + u8 timer, bundle, min_size; + + /* It's not a real skb; we just abused the fact that e100_exec_cb + will pass it through to here... */ + cb->skb = NULL; + + /* firmware is stored as little endian already */ + memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4); + + /* Read timer, bundle and min_size from end of firmware blob */ + timer = fw->data[UCODE_SIZE * 4]; + bundle = fw->data[UCODE_SIZE * 4 + 1]; + min_size = fw->data[UCODE_SIZE * 4 + 2]; + + /* Insert user-tunable settings in cb->u.ucode */ + cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[timer] |= cpu_to_le32(INTDELAY); + cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX); + cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); + + cb->command = cpu_to_le16(cb_ucode | cb_el); +} + +static inline int e100_load_ucode_wait(struct nic *nic) +{ + const struct firmware *fw; + int err = 0, counter = 50; + struct cb *cb = nic->cb_to_clean; + + fw = e100_request_firmware(nic); + /* If it's NULL, then no ucode is required */ + if (!fw || IS_ERR(fw)) + return PTR_ERR(fw); + + if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode))) + netif_err(nic, probe, nic->netdev, + "ucode cmd failed with error %d\n", err); + + /* must restart cuc */ + nic->cuc_cmd = cuc_start; + + /* wait for completion */ + e100_write_flush(nic); + udelay(10); + + /* wait for possibly (ouch) 500ms */ + while (!(cb->status & cpu_to_le16(cb_complete))) { + msleep(10); + if (!--counter) break; + } + + /* ack any interrupts, something could have been set */ + iowrite8(~0, &nic->csr->scb.stat_ack); + + /* if the command failed, or is not OK, notify and return */ + if (!counter || !(cb->status & cpu_to_le16(cb_ok))) { + netif_err(nic, probe, nic->netdev, "ucode load failed\n"); + err = -EPERM; + } + + return err; +} + +static void e100_setup_iaaddr(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + cb->command = cpu_to_le16(cb_iaaddr); + memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); +} + +static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + cb->command = cpu_to_le16(cb_dump); + cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + + offsetof(struct mem, dump_buf)); +} + +static int e100_phy_check_without_mii(struct nic *nic) +{ + u8 phy_type; + int without_mii; + + phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f; + + switch (phy_type) { + case NoSuchPhy: /* Non-MII PHY; UNTESTED! */ + case I82503: /* Non-MII PHY; UNTESTED! */ + case S80C24: /* Non-MII PHY; tested and working */ + /* paragraph from the FreeBSD driver, "FXP_PHY_80C24": + * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter + * doesn't have a programming interface of any sort. The + * media is sensed automatically based on how the link partner + * is configured. This is, in essence, manual configuration. + */ + netif_info(nic, probe, nic->netdev, + "found MII-less i82503 or 80c24 or other PHY\n"); + + nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated; + nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */ + + /* these might be needed for certain MII-less cards... + * nic->flags |= ich; + * nic->flags |= ich_10h_workaround; */ + + without_mii = 1; + break; + default: + without_mii = 0; + break; + } + return without_mii; +} + +#define NCONFIG_AUTO_SWITCH 0x0080 +#define MII_NSC_CONG MII_RESV1 +#define NSC_CONG_ENABLE 0x0100 +#define NSC_CONG_TXREADY 0x0400 +#define ADVERTISE_FC_SUPPORTED 0x0400 +static int e100_phy_init(struct nic *nic) +{ + struct net_device *netdev = nic->netdev; + u32 addr; + u16 bmcr, stat, id_lo, id_hi, cong; + + /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */ + for (addr = 0; addr < 32; addr++) { + nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr; + bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); + stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); + stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); + if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0)))) + break; + } + if (addr == 32) { + /* uhoh, no PHY detected: check whether we seem to be some + * weird, rare variant which is *known* to not have any MII. + * But do this AFTER MII checking only, since this does + * lookup of EEPROM values which may easily be unreliable. */ + if (e100_phy_check_without_mii(nic)) + return 0; /* simply return and hope for the best */ + else { + /* for unknown cases log a fatal error */ + netif_err(nic, hw, nic->netdev, + "Failed to locate any known PHY, aborting\n"); + return -EAGAIN; + } + } else + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "phy_addr = %d\n", nic->mii.phy_id); + + /* Get phy ID */ + id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1); + id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2); + nic->phy = (u32)id_hi << 16 | (u32)id_lo; + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "phy ID = 0x%08X\n", nic->phy); + + /* Select the phy and isolate the rest */ + for (addr = 0; addr < 32; addr++) { + if (addr != nic->mii.phy_id) { + mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE); + } else if (nic->phy != phy_82552_v) { + bmcr = mdio_read(netdev, addr, MII_BMCR); + mdio_write(netdev, addr, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + } + } + /* + * Workaround for 82552: + * Clear the ISOLATE bit on selected phy_id last (mirrored on all + * other phy_id's) using bmcr value from addr discovery loop above. + */ + if (nic->phy == phy_82552_v) + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + + /* Handle National tx phys */ +#define NCS_PHY_MODEL_MASK 0xFFF0FFFF + if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) { + /* Disable congestion control */ + cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG); + cong |= NSC_CONG_TXREADY; + cong &= ~NSC_CONG_ENABLE; + mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong); + } + + if (nic->phy == phy_82552_v) { + u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE); + + /* assign special tweaked mdio_ctrl() function */ + nic->mdio_ctrl = mdio_ctrl_phy_82552_v; + + /* Workaround Si not advertising flow-control during autoneg */ + advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert); + + /* Reset for the above changes to take effect */ + bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); + bmcr |= BMCR_RESET; + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); + } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && + (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && + !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) { + /* enable/disable MDI/MDI-X auto-switching. */ + mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, + nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH); + } + + return 0; +} + +static int e100_hw_init(struct nic *nic) +{ + int err = 0; + + e100_hw_reset(nic); + + netif_err(nic, hw, nic->netdev, "e100_hw_init\n"); + if (!in_interrupt() && (err = e100_self_test(nic))) + return err; + + if ((err = e100_phy_init(nic))) + return err; + if ((err = e100_exec_cmd(nic, cuc_load_base, 0))) + return err; + if ((err = e100_exec_cmd(nic, ruc_load_base, 0))) + return err; + if ((err = e100_load_ucode_wait(nic))) + return err; + if ((err = e100_exec_cb(nic, NULL, e100_configure))) + return err; + if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr))) + return err; + if ((err = e100_exec_cmd(nic, cuc_dump_addr, + nic->dma_addr + offsetof(struct mem, stats)))) + return err; + if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0))) + return err; + + e100_disable_irq(nic); + + return 0; +} + +static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + struct net_device *netdev = nic->netdev; + struct netdev_hw_addr *ha; + u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS); + + cb->command = cpu_to_le16(cb_multi); + cb->u.multi.count = cpu_to_le16(count * ETH_ALEN); + i = 0; + netdev_for_each_mc_addr(ha, netdev) { + if (i == count) + break; + memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, + ETH_ALEN); + } +} + +static void e100_set_multicast_list(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "mc_count=%d, flags=0x%04X\n", + netdev_mc_count(netdev), netdev->flags); + + if (netdev->flags & IFF_PROMISC) + nic->flags |= promiscuous; + else + nic->flags &= ~promiscuous; + + if (netdev->flags & IFF_ALLMULTI || + netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS) + nic->flags |= multicast_all; + else + nic->flags &= ~multicast_all; + + e100_exec_cb(nic, NULL, e100_configure); + e100_exec_cb(nic, NULL, e100_multi); +} + +static void e100_update_stats(struct nic *nic) +{ + struct net_device *dev = nic->netdev; + struct net_device_stats *ns = &dev->stats; + struct stats *s = &nic->mem->stats; + __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause : + (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames : + &s->complete; + + /* Device's stats reporting may take several microseconds to + * complete, so we're always waiting for results of the + * previous command. */ + + if (*complete == cpu_to_le32(cuc_dump_reset_complete)) { + *complete = 0; + nic->tx_frames = le32_to_cpu(s->tx_good_frames); + nic->tx_collisions = le32_to_cpu(s->tx_total_collisions); + ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions); + ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions); + ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs); + ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns); + ns->collisions += nic->tx_collisions; + ns->tx_errors += le32_to_cpu(s->tx_max_collisions) + + le32_to_cpu(s->tx_lost_crs); + ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) + + nic->rx_over_length_errors; + ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors); + ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors); + ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors); + ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors); + ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors); + ns->rx_errors += le32_to_cpu(s->rx_crc_errors) + + le32_to_cpu(s->rx_alignment_errors) + + le32_to_cpu(s->rx_short_frame_errors) + + le32_to_cpu(s->rx_cdt_errors); + nic->tx_deferred += le32_to_cpu(s->tx_deferred); + nic->tx_single_collisions += + le32_to_cpu(s->tx_single_collisions); + nic->tx_multiple_collisions += + le32_to_cpu(s->tx_multiple_collisions); + if (nic->mac >= mac_82558_D101_A4) { + nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause); + nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause); + nic->rx_fc_unsupported += + le32_to_cpu(s->fc_rcv_unsupported); + if (nic->mac >= mac_82559_D101M) { + nic->tx_tco_frames += + le16_to_cpu(s->xmt_tco_frames); + nic->rx_tco_frames += + le16_to_cpu(s->rcv_tco_frames); + } + } + } + + + if (e100_exec_cmd(nic, cuc_dump_reset, 0)) + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "exec cuc_dump_reset failed\n"); +} + +static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) +{ + /* Adjust inter-frame-spacing (IFS) between two transmits if + * we're getting collisions on a half-duplex connection. */ + + if (duplex == DUPLEX_HALF) { + u32 prev = nic->adaptive_ifs; + u32 min_frames = (speed == SPEED_100) ? 1000 : 100; + + if ((nic->tx_frames / 32 < nic->tx_collisions) && + (nic->tx_frames > min_frames)) { + if (nic->adaptive_ifs < 60) + nic->adaptive_ifs += 5; + } else if (nic->tx_frames < min_frames) { + if (nic->adaptive_ifs >= 5) + nic->adaptive_ifs -= 5; + } + if (nic->adaptive_ifs != prev) + e100_exec_cb(nic, NULL, e100_configure); + } +} + +static void e100_watchdog(unsigned long data) +{ + struct nic *nic = (struct nic *)data; + struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET }; + u32 speed; + + netif_printk(nic, timer, KERN_DEBUG, nic->netdev, + "right now = %ld\n", jiffies); + + /* mii library handles link maintenance tasks */ + + mii_ethtool_gset(&nic->mii, &cmd); + speed = ethtool_cmd_speed(&cmd); + + if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) { + netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n", + speed == SPEED_100 ? 100 : 10, + cmd.duplex == DUPLEX_FULL ? "Full" : "Half"); + } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) { + netdev_info(nic->netdev, "NIC Link is Down\n"); + } + + mii_check_link(&nic->mii); + + /* Software generated interrupt to recover from (rare) Rx + * allocation failure. + * Unfortunately have to use a spinlock to not re-enable interrupts + * accidentally, due to hardware that shares a register between the + * interrupt mask bit and the SW Interrupt generation bit */ + spin_lock_irq(&nic->cmd_lock); + iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irq(&nic->cmd_lock); + + e100_update_stats(nic); + e100_adjust_adaptive_ifs(nic, speed, cmd.duplex); + + if (nic->mac <= mac_82557_D100_C) + /* Issue a multicast command to workaround a 557 lock up */ + e100_set_multicast_list(nic->netdev); + + if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF) + /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */ + nic->flags |= ich_10h_workaround; + else + nic->flags &= ~ich_10h_workaround; + + mod_timer(&nic->watchdog, + round_jiffies(jiffies + E100_WATCHDOG_PERIOD)); +} + +static void e100_xmit_prepare(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + cb->command = nic->tx_command; + /* interrupt every 16 packets regardless of delay */ + if ((nic->cbs_avail & ~15) == nic->cbs_avail) + cb->command |= cpu_to_le16(cb_i); + cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); + cb->u.tcb.tcb_byte_count = 0; + cb->u.tcb.threshold = nic->tx_threshold; + cb->u.tcb.tbd_count = 1; + cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev, + skb->data, skb->len, PCI_DMA_TODEVICE)); + /* check for mapping failure? */ + cb->u.tcb.tbd.size = cpu_to_le16(skb->len); +} + +static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + int err; + + if (nic->flags & ich_10h_workaround) { + /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang. + Issue a NOP command followed by a 1us delay before + issuing the Tx command. */ + if (e100_exec_cmd(nic, cuc_nop, 0)) + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "exec cuc_nop failed\n"); + udelay(1); + } + + err = e100_exec_cb(nic, skb, e100_xmit_prepare); + + switch (err) { + case -ENOSPC: + /* We queued the skb, but now we're out of space. */ + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "No space for CB\n"); + netif_stop_queue(netdev); + break; + case -ENOMEM: + /* This is a hard error - log it. */ + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "Out of Tx resources, returning skb\n"); + netif_stop_queue(netdev); + return NETDEV_TX_BUSY; + } + + return NETDEV_TX_OK; +} + +static int e100_tx_clean(struct nic *nic) +{ + struct net_device *dev = nic->netdev; + struct cb *cb; + int tx_cleaned = 0; + + spin_lock(&nic->cb_lock); + + /* Clean CBs marked complete */ + for (cb = nic->cb_to_clean; + cb->status & cpu_to_le16(cb_complete); + cb = nic->cb_to_clean = cb->next) { + rmb(); /* read skb after status */ + netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev, + "cb[%d]->status = 0x%04X\n", + (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), + cb->status); + + if (likely(cb->skb != NULL)) { + dev->stats.tx_packets++; + dev->stats.tx_bytes += cb->skb->len; + + pci_unmap_single(nic->pdev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + PCI_DMA_TODEVICE); + dev_kfree_skb_any(cb->skb); + cb->skb = NULL; + tx_cleaned = 1; + } + cb->status = 0; + nic->cbs_avail++; + } + + spin_unlock(&nic->cb_lock); + + /* Recover from running out of Tx resources in xmit_frame */ + if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev))) + netif_wake_queue(nic->netdev); + + return tx_cleaned; +} + +static void e100_clean_cbs(struct nic *nic) +{ + if (nic->cbs) { + while (nic->cbs_avail != nic->params.cbs.count) { + struct cb *cb = nic->cb_to_clean; + if (cb->skb) { + pci_unmap_single(nic->pdev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + PCI_DMA_TODEVICE); + dev_kfree_skb(cb->skb); + } + nic->cb_to_clean = nic->cb_to_clean->next; + nic->cbs_avail++; + } + pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr); + nic->cbs = NULL; + nic->cbs_avail = 0; + } + nic->cuc_cmd = cuc_start; + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = + nic->cbs; +} + +static int e100_alloc_cbs(struct nic *nic) +{ + struct cb *cb; + unsigned int i, count = nic->params.cbs.count; + + nic->cuc_cmd = cuc_start; + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL; + nic->cbs_avail = 0; + + nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL, + &nic->cbs_dma_addr); + if (!nic->cbs) + return -ENOMEM; + memset(nic->cbs, 0, count * sizeof(struct cb)); + + for (cb = nic->cbs, i = 0; i < count; cb++, i++) { + cb->next = (i + 1 < count) ? cb + 1 : nic->cbs; + cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1; + + cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb); + cb->link = cpu_to_le32(nic->cbs_dma_addr + + ((i+1) % count) * sizeof(struct cb)); + } + + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs; + nic->cbs_avail = count; + + return 0; +} + +static inline void e100_start_receiver(struct nic *nic, struct rx *rx) +{ + if (!nic->rxs) return; + if (RU_SUSPENDED != nic->ru_running) return; + + /* handle init time starts */ + if (!rx) rx = nic->rxs; + + /* (Re)start RU if suspended or idle and RFA is non-NULL */ + if (rx->skb) { + e100_exec_cmd(nic, ruc_start, rx->dma_addr); + nic->ru_running = RU_RUNNING; + } +} + +#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN) +static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) +{ + if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN))) + return -ENOMEM; + + /* Init, and map the RFD. */ + skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd)); + rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, + RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + + if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) { + dev_kfree_skb_any(rx->skb); + rx->skb = NULL; + rx->dma_addr = 0; + return -ENOMEM; + } + + /* Link the RFD to end of RFA by linking previous RFD to + * this one. We are safe to touch the previous RFD because + * it is protected by the before last buffer's el bit being set */ + if (rx->prev->skb) { + struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; + put_unaligned_le32(rx->dma_addr, &prev_rfd->link); + pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr, + sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); + } + + return 0; +} + +static int e100_rx_indicate(struct nic *nic, struct rx *rx, + unsigned int *work_done, unsigned int work_to_do) +{ + struct net_device *dev = nic->netdev; + struct sk_buff *skb = rx->skb; + struct rfd *rfd = (struct rfd *)skb->data; + u16 rfd_status, actual_size; + + if (unlikely(work_done && *work_done >= work_to_do)) + return -EAGAIN; + + /* Need to sync before taking a peek at cb_complete bit */ + pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr, + sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); + rfd_status = le16_to_cpu(rfd->status); + + netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev, + "status=0x%04X\n", rfd_status); + rmb(); /* read size after status bit */ + + /* If data isn't ready, nothing to indicate */ + if (unlikely(!(rfd_status & cb_complete))) { + /* If the next buffer has the el bit, but we think the receiver + * is still running, check to see if it really stopped while + * we had interrupts off. + * This allows for a fast restart without re-enabling + * interrupts */ + if ((le16_to_cpu(rfd->command) & cb_el) && + (RU_RUNNING == nic->ru_running)) + + if (ioread8(&nic->csr->scb.status) & rus_no_res) + nic->ru_running = RU_SUSPENDED; + pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr, + sizeof(struct rfd), + PCI_DMA_FROMDEVICE); + return -ENODATA; + } + + /* Get actual data size */ + actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF; + if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd))) + actual_size = RFD_BUF_LEN - sizeof(struct rfd); + + /* Get data */ + pci_unmap_single(nic->pdev, rx->dma_addr, + RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + + /* If this buffer has the el bit, but we think the receiver + * is still running, check to see if it really stopped while + * we had interrupts off. + * This allows for a fast restart without re-enabling interrupts. + * This can happen when the RU sees the size change but also sees + * the el bit set. */ + if ((le16_to_cpu(rfd->command) & cb_el) && + (RU_RUNNING == nic->ru_running)) { + + if (ioread8(&nic->csr->scb.status) & rus_no_res) + nic->ru_running = RU_SUSPENDED; + } + + /* Pull off the RFD and put the actual data (minus eth hdr) */ + skb_reserve(skb, sizeof(struct rfd)); + skb_put(skb, actual_size); + skb->protocol = eth_type_trans(skb, nic->netdev); + + if (unlikely(!(rfd_status & cb_ok))) { + /* Don't indicate if hardware indicates errors */ + dev_kfree_skb_any(skb); + } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) { + /* Don't indicate oversized frames */ + nic->rx_over_length_errors++; + dev_kfree_skb_any(skb); + } else { + dev->stats.rx_packets++; + dev->stats.rx_bytes += actual_size; + netif_receive_skb(skb); + if (work_done) + (*work_done)++; + } + + rx->skb = NULL; + + return 0; +} + +static void e100_rx_clean(struct nic *nic, unsigned int *work_done, + unsigned int work_to_do) +{ + struct rx *rx; + int restart_required = 0, err = 0; + struct rx *old_before_last_rx, *new_before_last_rx; + struct rfd *old_before_last_rfd, *new_before_last_rfd; + + /* Indicate newly arrived packets */ + for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { + err = e100_rx_indicate(nic, rx, work_done, work_to_do); + /* Hit quota or no more to clean */ + if (-EAGAIN == err || -ENODATA == err) + break; + } + + + /* On EAGAIN, hit quota so have more work to do, restart once + * cleanup is complete. + * Else, are we already rnr? then pay attention!!! this ensures that + * the state machine progression never allows a start with a + * partially cleaned list, avoiding a race between hardware + * and rx_to_clean when in NAPI mode */ + if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running) + restart_required = 1; + + old_before_last_rx = nic->rx_to_use->prev->prev; + old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data; + + /* Alloc new skbs to refill list */ + for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { + if (unlikely(e100_rx_alloc_skb(nic, rx))) + break; /* Better luck next time (see watchdog) */ + } + + new_before_last_rx = nic->rx_to_use->prev->prev; + if (new_before_last_rx != old_before_last_rx) { + /* Set the el-bit on the buffer that is before the last buffer. + * This lets us update the next pointer on the last buffer + * without worrying about hardware touching it. + * We set the size to 0 to prevent hardware from touching this + * buffer. + * When the hardware hits the before last buffer with el-bit + * and size of 0, it will RNR interrupt, the RUS will go into + * the No Resources state. It will not complete nor write to + * this buffer. */ + new_before_last_rfd = + (struct rfd *)new_before_last_rx->skb->data; + new_before_last_rfd->size = 0; + new_before_last_rfd->command |= cpu_to_le16(cb_el); + pci_dma_sync_single_for_device(nic->pdev, + new_before_last_rx->dma_addr, sizeof(struct rfd), + PCI_DMA_BIDIRECTIONAL); + + /* Now that we have a new stopping point, we can clear the old + * stopping point. We must sync twice to get the proper + * ordering on the hardware side of things. */ + old_before_last_rfd->command &= ~cpu_to_le16(cb_el); + pci_dma_sync_single_for_device(nic->pdev, + old_before_last_rx->dma_addr, sizeof(struct rfd), + PCI_DMA_BIDIRECTIONAL); + old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN); + pci_dma_sync_single_for_device(nic->pdev, + old_before_last_rx->dma_addr, sizeof(struct rfd), + PCI_DMA_BIDIRECTIONAL); + } + + if (restart_required) { + // ack the rnr? + iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack); + e100_start_receiver(nic, nic->rx_to_clean); + if (work_done) + (*work_done)++; + } +} + +static void e100_rx_clean_list(struct nic *nic) +{ + struct rx *rx; + unsigned int i, count = nic->params.rfds.count; + + nic->ru_running = RU_UNINITIALIZED; + + if (nic->rxs) { + for (rx = nic->rxs, i = 0; i < count; rx++, i++) { + if (rx->skb) { + pci_unmap_single(nic->pdev, rx->dma_addr, + RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + dev_kfree_skb(rx->skb); + } + } + kfree(nic->rxs); + nic->rxs = NULL; + } + + nic->rx_to_use = nic->rx_to_clean = NULL; +} + +static int e100_rx_alloc_list(struct nic *nic) +{ + struct rx *rx; + unsigned int i, count = nic->params.rfds.count; + struct rfd *before_last; + + nic->rx_to_use = nic->rx_to_clean = NULL; + nic->ru_running = RU_UNINITIALIZED; + + if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC))) + return -ENOMEM; + + for (rx = nic->rxs, i = 0; i < count; rx++, i++) { + rx->next = (i + 1 < count) ? rx + 1 : nic->rxs; + rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1; + if (e100_rx_alloc_skb(nic, rx)) { + e100_rx_clean_list(nic); + return -ENOMEM; + } + } + /* Set the el-bit on the buffer that is before the last buffer. + * This lets us update the next pointer on the last buffer without + * worrying about hardware touching it. + * We set the size to 0 to prevent hardware from touching this buffer. + * When the hardware hits the before last buffer with el-bit and size + * of 0, it will RNR interrupt, the RU will go into the No Resources + * state. It will not complete nor write to this buffer. */ + rx = nic->rxs->prev->prev; + before_last = (struct rfd *)rx->skb->data; + before_last->command |= cpu_to_le16(cb_el); + before_last->size = 0; + pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr, + sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); + + nic->rx_to_use = nic->rx_to_clean = nic->rxs; + nic->ru_running = RU_SUSPENDED; + + return 0; +} + +static irqreturn_t e100_intr(int irq, void *dev_id) +{ + struct net_device *netdev = dev_id; + struct nic *nic = netdev_priv(netdev); + u8 stat_ack = ioread8(&nic->csr->scb.stat_ack); + + netif_printk(nic, intr, KERN_DEBUG, nic->netdev, + "stat_ack = 0x%02X\n", stat_ack); + + if (stat_ack == stat_ack_not_ours || /* Not our interrupt */ + stat_ack == stat_ack_not_present) /* Hardware is ejected */ + return IRQ_NONE; + + /* Ack interrupt(s) */ + iowrite8(stat_ack, &nic->csr->scb.stat_ack); + + /* We hit Receive No Resource (RNR); restart RU after cleaning */ + if (stat_ack & stat_ack_rnr) + nic->ru_running = RU_SUSPENDED; + + if (likely(napi_schedule_prep(&nic->napi))) { + e100_disable_irq(nic); + __napi_schedule(&nic->napi); + } + + return IRQ_HANDLED; +} + +static int e100_poll(struct napi_struct *napi, int budget) +{ + struct nic *nic = container_of(napi, struct nic, napi); + unsigned int work_done = 0; + + e100_rx_clean(nic, &work_done, budget); + e100_tx_clean(nic); + + /* If budget not fully consumed, exit the polling mode */ + if (work_done < budget) { + napi_complete(napi); + e100_enable_irq(nic); + } + + return work_done; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void e100_netpoll(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + e100_disable_irq(nic); + e100_intr(nic->pdev->irq, netdev); + e100_tx_clean(nic); + e100_enable_irq(nic); +} +#endif + +static int e100_set_mac_address(struct net_device *netdev, void *p) +{ + struct nic *nic = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + e100_exec_cb(nic, NULL, e100_setup_iaaddr); + + return 0; +} + +static int e100_change_mtu(struct net_device *netdev, int new_mtu) +{ + if (new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN) + return -EINVAL; + netdev->mtu = new_mtu; + return 0; +} + +static int e100_asf(struct nic *nic) +{ + /* ASF can be enabled from eeprom */ + return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) && + (nic->eeprom[eeprom_config_asf] & eeprom_asf) && + !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) && + ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE); +} + +static int e100_up(struct nic *nic) +{ + int err; + + if ((err = e100_rx_alloc_list(nic))) + return err; + if ((err = e100_alloc_cbs(nic))) + goto err_rx_clean_list; + if ((err = e100_hw_init(nic))) + goto err_clean_cbs; + e100_set_multicast_list(nic->netdev); + e100_start_receiver(nic, NULL); + mod_timer(&nic->watchdog, jiffies); + if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED, + nic->netdev->name, nic->netdev))) + goto err_no_irq; + netif_wake_queue(nic->netdev); + napi_enable(&nic->napi); + /* enable ints _after_ enabling poll, preventing a race between + * disable ints+schedule */ + e100_enable_irq(nic); + return 0; + +err_no_irq: + del_timer_sync(&nic->watchdog); +err_clean_cbs: + e100_clean_cbs(nic); +err_rx_clean_list: + e100_rx_clean_list(nic); + return err; +} + +static void e100_down(struct nic *nic) +{ + /* wait here for poll to complete */ + napi_disable(&nic->napi); + netif_stop_queue(nic->netdev); + e100_hw_reset(nic); + free_irq(nic->pdev->irq, nic->netdev); + del_timer_sync(&nic->watchdog); + netif_carrier_off(nic->netdev); + e100_clean_cbs(nic); + e100_rx_clean_list(nic); +} + +static void e100_tx_timeout(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + /* Reset outside of interrupt context, to avoid request_irq + * in interrupt context */ + schedule_work(&nic->tx_timeout_task); +} + +static void e100_tx_timeout_task(struct work_struct *work) +{ + struct nic *nic = container_of(work, struct nic, tx_timeout_task); + struct net_device *netdev = nic->netdev; + + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status)); + + rtnl_lock(); + if (netif_running(netdev)) { + e100_down(netdev_priv(netdev)); + e100_up(netdev_priv(netdev)); + } + rtnl_unlock(); +} + +static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode) +{ + int err; + struct sk_buff *skb; + + /* Use driver resources to perform internal MAC or PHY + * loopback test. A single packet is prepared and transmitted + * in loopback mode, and the test passes if the received + * packet compares byte-for-byte to the transmitted packet. */ + + if ((err = e100_rx_alloc_list(nic))) + return err; + if ((err = e100_alloc_cbs(nic))) + goto err_clean_rx; + + /* ICH PHY loopback is broken so do MAC loopback instead */ + if (nic->flags & ich && loopback_mode == lb_phy) + loopback_mode = lb_mac; + + nic->loopback = loopback_mode; + if ((err = e100_hw_init(nic))) + goto err_loopback_none; + + if (loopback_mode == lb_phy) + mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, + BMCR_LOOPBACK); + + e100_start_receiver(nic, NULL); + + if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) { + err = -ENOMEM; + goto err_loopback_none; + } + skb_put(skb, ETH_DATA_LEN); + memset(skb->data, 0xFF, ETH_DATA_LEN); + e100_xmit_frame(skb, nic->netdev); + + msleep(10); + + pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr, + RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + + if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd), + skb->data, ETH_DATA_LEN)) + err = -EAGAIN; + +err_loopback_none: + mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0); + nic->loopback = lb_none; + e100_clean_cbs(nic); + e100_hw_reset(nic); +err_clean_rx: + e100_rx_clean_list(nic); + return err; +} + +#define MII_LED_CONTROL 0x1B +#define E100_82552_LED_OVERRIDE 0x19 +#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */ +#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */ + +static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +{ + struct nic *nic = netdev_priv(netdev); + return mii_ethtool_gset(&nic->mii, cmd); +} + +static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +{ + struct nic *nic = netdev_priv(netdev); + int err; + + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET); + err = mii_ethtool_sset(&nic->mii, cmd); + e100_exec_cb(nic, NULL, e100_configure); + + return err; +} + +static void e100_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct nic *nic = netdev_priv(netdev); + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->fw_version, "N/A"); + strcpy(info->bus_info, pci_name(nic->pdev)); +} + +#define E100_PHY_REGS 0x1C +static int e100_get_regs_len(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf); +} + +static void e100_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct nic *nic = netdev_priv(netdev); + u32 *buff = p; + int i; + + regs->version = (1 << 24) | nic->pdev->revision; + buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 | + ioread8(&nic->csr->scb.cmd_lo) << 16 | + ioread16(&nic->csr->scb.status); + for (i = E100_PHY_REGS; i >= 0; i--) + buff[1 + E100_PHY_REGS - i] = + mdio_read(netdev, nic->mii.phy_id, i); + memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf)); + e100_exec_cb(nic, NULL, e100_dump); + msleep(10); + memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf, + sizeof(nic->mem->dump_buf)); +} + +static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nic *nic = netdev_priv(netdev); + wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0; + wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0; +} + +static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nic *nic = netdev_priv(netdev); + + if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) || + !device_can_wakeup(&nic->pdev->dev)) + return -EOPNOTSUPP; + + if (wol->wolopts) + nic->flags |= wol_magic; + else + nic->flags &= ~wol_magic; + + device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts); + + e100_exec_cb(nic, NULL, e100_configure); + + return 0; +} + +static u32 e100_get_msglevel(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return nic->msg_enable; +} + +static void e100_set_msglevel(struct net_device *netdev, u32 value) +{ + struct nic *nic = netdev_priv(netdev); + nic->msg_enable = value; +} + +static int e100_nway_reset(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return mii_nway_restart(&nic->mii); +} + +static u32 e100_get_link(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return mii_link_ok(&nic->mii); +} + +static int e100_get_eeprom_len(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return nic->eeprom_wc << 1; +} + +#define E100_EEPROM_MAGIC 0x1234 +static int e100_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct nic *nic = netdev_priv(netdev); + + eeprom->magic = E100_EEPROM_MAGIC; + memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len); + + return 0; +} + +static int e100_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct nic *nic = netdev_priv(netdev); + + if (eeprom->magic != E100_EEPROM_MAGIC) + return -EINVAL; + + memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len); + + return e100_eeprom_save(nic, eeprom->offset >> 1, + (eeprom->len >> 1) + 1); +} + +static void e100_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct nic *nic = netdev_priv(netdev); + struct param_range *rfds = &nic->params.rfds; + struct param_range *cbs = &nic->params.cbs; + + ring->rx_max_pending = rfds->max; + ring->tx_max_pending = cbs->max; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = rfds->count; + ring->tx_pending = cbs->count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int e100_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct nic *nic = netdev_priv(netdev); + struct param_range *rfds = &nic->params.rfds; + struct param_range *cbs = &nic->params.cbs; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + if (netif_running(netdev)) + e100_down(nic); + rfds->count = max(ring->rx_pending, rfds->min); + rfds->count = min(rfds->count, rfds->max); + cbs->count = max(ring->tx_pending, cbs->min); + cbs->count = min(cbs->count, cbs->max); + netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n", + rfds->count, cbs->count); + if (netif_running(netdev)) + e100_up(nic); + + return 0; +} + +static const char e100_gstrings_test[][ETH_GSTRING_LEN] = { + "Link test (on/offline)", + "Eeprom test (on/offline)", + "Self test (offline)", + "Mac loopback (offline)", + "Phy loopback (offline)", +}; +#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test) + +static void e100_diag_test(struct net_device *netdev, + struct ethtool_test *test, u64 *data) +{ + struct ethtool_cmd cmd; + struct nic *nic = netdev_priv(netdev); + int i, err; + + memset(data, 0, E100_TEST_LEN * sizeof(u64)); + data[0] = !mii_link_ok(&nic->mii); + data[1] = e100_eeprom_load(nic); + if (test->flags & ETH_TEST_FL_OFFLINE) { + + /* save speed, duplex & autoneg settings */ + err = mii_ethtool_gset(&nic->mii, &cmd); + + if (netif_running(netdev)) + e100_down(nic); + data[2] = e100_self_test(nic); + data[3] = e100_loopback_test(nic, lb_mac); + data[4] = e100_loopback_test(nic, lb_phy); + + /* restore speed, duplex & autoneg settings */ + err = mii_ethtool_sset(&nic->mii, &cmd); + + if (netif_running(netdev)) + e100_up(nic); + } + for (i = 0; i < E100_TEST_LEN; i++) + test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0; + + msleep_interruptible(4 * 1000); +} + +static int e100_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct nic *nic = netdev_priv(netdev); + enum led_state { + led_on = 0x01, + led_off = 0x04, + led_on_559 = 0x05, + led_on_557 = 0x07, + }; + u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE : + MII_LED_CONTROL; + u16 leds = 0; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 2; + + case ETHTOOL_ID_ON: + leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON : + (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559; + break; + + case ETHTOOL_ID_OFF: + leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off; + break; + + case ETHTOOL_ID_INACTIVE: + break; + } + + mdio_write(netdev, nic->mii.phy_id, led_reg, leds); + return 0; +} + +static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = { + "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", + "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", + "rx_length_errors", "rx_over_errors", "rx_crc_errors", + "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors", + "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", + "tx_heartbeat_errors", "tx_window_errors", + /* device-specific stats */ + "tx_deferred", "tx_single_collisions", "tx_multi_collisions", + "tx_flow_control_pause", "rx_flow_control_pause", + "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets", +}; +#define E100_NET_STATS_LEN 21 +#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats) + +static int e100_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E100_TEST_LEN; + case ETH_SS_STATS: + return E100_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e100_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct nic *nic = netdev_priv(netdev); + int i; + + for (i = 0; i < E100_NET_STATS_LEN; i++) + data[i] = ((unsigned long *)&netdev->stats)[i]; + + data[i++] = nic->tx_deferred; + data[i++] = nic->tx_single_collisions; + data[i++] = nic->tx_multiple_collisions; + data[i++] = nic->tx_fc_pause; + data[i++] = nic->rx_fc_pause; + data[i++] = nic->rx_fc_unsupported; + data[i++] = nic->tx_tco_frames; + data[i++] = nic->rx_tco_frames; +} + +static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test)); + break; + case ETH_SS_STATS: + memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats)); + break; + } +} + +static const struct ethtool_ops e100_ethtool_ops = { + .get_settings = e100_get_settings, + .set_settings = e100_set_settings, + .get_drvinfo = e100_get_drvinfo, + .get_regs_len = e100_get_regs_len, + .get_regs = e100_get_regs, + .get_wol = e100_get_wol, + .set_wol = e100_set_wol, + .get_msglevel = e100_get_msglevel, + .set_msglevel = e100_set_msglevel, + .nway_reset = e100_nway_reset, + .get_link = e100_get_link, + .get_eeprom_len = e100_get_eeprom_len, + .get_eeprom = e100_get_eeprom, + .set_eeprom = e100_set_eeprom, + .get_ringparam = e100_get_ringparam, + .set_ringparam = e100_set_ringparam, + .self_test = e100_diag_test, + .get_strings = e100_get_strings, + .set_phys_id = e100_set_phys_id, + .get_ethtool_stats = e100_get_ethtool_stats, + .get_sset_count = e100_get_sset_count, +}; + +static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct nic *nic = netdev_priv(netdev); + + return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL); +} + +static int e100_alloc(struct nic *nic) +{ + nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem), + &nic->dma_addr); + return nic->mem ? 0 : -ENOMEM; +} + +static void e100_free(struct nic *nic) +{ + if (nic->mem) { + pci_free_consistent(nic->pdev, sizeof(struct mem), + nic->mem, nic->dma_addr); + nic->mem = NULL; + } +} + +static int e100_open(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + int err = 0; + + netif_carrier_off(netdev); + if ((err = e100_up(nic))) + netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n"); + return err; +} + +static int e100_close(struct net_device *netdev) +{ + e100_down(netdev_priv(netdev)); + return 0; +} + +static const struct net_device_ops e100_netdev_ops = { + .ndo_open = e100_open, + .ndo_stop = e100_close, + .ndo_start_xmit = e100_xmit_frame, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_multicast_list = e100_set_multicast_list, + .ndo_set_mac_address = e100_set_mac_address, + .ndo_change_mtu = e100_change_mtu, + .ndo_do_ioctl = e100_do_ioctl, + .ndo_tx_timeout = e100_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e100_netpoll, +#endif +}; + +static int __devinit e100_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct nic *nic; + int err; + + if (!(netdev = alloc_etherdev(sizeof(struct nic)))) { + if (((1 << debug) - 1) & NETIF_MSG_PROBE) + pr_err("Etherdev alloc failed, aborting\n"); + return -ENOMEM; + } + + netdev->netdev_ops = &e100_netdev_ops; + SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops); + netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + nic = netdev_priv(netdev); + netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT); + nic->netdev = netdev; + nic->pdev = pdev; + nic->msg_enable = (1 << debug) - 1; + nic->mdio_ctrl = mdio_ctrl_hw; + pci_set_drvdata(pdev, netdev); + + if ((err = pci_enable_device(pdev))) { + netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n"); + goto err_out_free_dev; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n"); + err = -ENODEV; + goto err_out_disable_pdev; + } + + if ((err = pci_request_regions(pdev, DRV_NAME))) { + netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n"); + goto err_out_disable_pdev; + } + + if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { + netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n"); + goto err_out_free_res; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + if (use_io) + netif_info(nic, probe, nic->netdev, "using i/o access mode\n"); + + nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr)); + if (!nic->csr) { + netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n"); + err = -ENOMEM; + goto err_out_free_res; + } + + if (ent->driver_data) + nic->flags |= ich; + else + nic->flags &= ~ich; + + e100_get_defaults(nic); + + /* locks must be initialized before calling hw_reset */ + spin_lock_init(&nic->cb_lock); + spin_lock_init(&nic->cmd_lock); + spin_lock_init(&nic->mdio_lock); + + /* Reset the device before pci_set_master() in case device is in some + * funky state and has an interrupt pending - hint: we don't have the + * interrupt handler registered yet. */ + e100_hw_reset(nic); + + pci_set_master(pdev); + + init_timer(&nic->watchdog); + nic->watchdog.function = e100_watchdog; + nic->watchdog.data = (unsigned long)nic; + + INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); + + if ((err = e100_alloc(nic))) { + netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n"); + goto err_out_iounmap; + } + + if ((err = e100_eeprom_load(nic))) + goto err_out_free; + + e100_phy_init(nic); + + memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN); + memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN); + if (!is_valid_ether_addr(netdev->perm_addr)) { + if (!eeprom_bad_csum_allow) { + netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n"); + err = -EAGAIN; + goto err_out_free; + } else { + netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n"); + } + } + + /* Wol magic packet can be enabled from eeprom */ + if ((nic->mac >= mac_82558_D101_A4) && + (nic->eeprom[eeprom_id] & eeprom_id_wol)) { + nic->flags |= wol_magic; + device_set_wakeup_enable(&pdev->dev, true); + } + + /* ack any pending wake events, disable PME */ + pci_pme_active(pdev, false); + + strcpy(netdev->name, "eth%d"); + if ((err = register_netdev(netdev))) { + netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n"); + goto err_out_free; + } + nic->cbs_pool = pci_pool_create(netdev->name, + nic->pdev, + nic->params.cbs.max * sizeof(struct cb), + sizeof(u32), + 0); + netif_info(nic, probe, nic->netdev, + "addr 0x%llx, irq %d, MAC addr %pM\n", + (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), + pdev->irq, netdev->dev_addr); + + return 0; + +err_out_free: + e100_free(nic); +err_out_iounmap: + pci_iounmap(pdev, nic->csr); +err_out_free_res: + pci_release_regions(pdev); +err_out_disable_pdev: + pci_disable_device(pdev); +err_out_free_dev: + pci_set_drvdata(pdev, NULL); + free_netdev(netdev); + return err; +} + +static void __devexit e100_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + + if (netdev) { + struct nic *nic = netdev_priv(netdev); + unregister_netdev(netdev); + e100_free(nic); + pci_iounmap(pdev, nic->csr); + pci_pool_destroy(nic->cbs_pool); + free_netdev(netdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + } +} + +#define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */ +#define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */ +#define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */ +static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + if (netif_running(netdev)) + e100_down(nic); + netif_device_detach(netdev); + + pci_save_state(pdev); + + if ((nic->flags & wol_magic) | e100_asf(nic)) { + /* enable reverse auto-negotiation */ + if (nic->phy == phy_82552_v) { + u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED); + + mdio_write(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED, smartspeed | + E100_82552_REV_ANEG | E100_82552_ANEG_NOW); + } + *enable_wake = true; + } else { + *enable_wake = false; + } + + pci_disable_device(pdev); +} + +static int __e100_power_off(struct pci_dev *pdev, bool wake) +{ + if (wake) + return pci_prepare_to_sleep(pdev); + + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +#ifdef CONFIG_PM +static int e100_suspend(struct pci_dev *pdev, pm_message_t state) +{ + bool wake; + __e100_shutdown(pdev, &wake); + return __e100_power_off(pdev, wake); +} + +static int e100_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + /* ack any pending wake events, disable PME */ + pci_enable_wake(pdev, 0, 0); + + /* disable reverse auto-negotiation */ + if (nic->phy == phy_82552_v) { + u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED); + + mdio_write(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED, + smartspeed & ~(E100_82552_REV_ANEG)); + } + + netif_device_attach(netdev); + if (netif_running(netdev)) + e100_up(nic); + + return 0; +} +#endif /* CONFIG_PM */ + +static void e100_shutdown(struct pci_dev *pdev) +{ + bool wake; + __e100_shutdown(pdev, &wake); + if (system_state == SYSTEM_POWER_OFF) + __e100_power_off(pdev, wake); +} + +/* ------------------ PCI Error Recovery infrastructure -------------- */ +/** + * e100_io_error_detected - called when PCI error is detected. + * @pdev: Pointer to PCI device + * @state: The current pci connection state + */ +static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + e100_down(nic); + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e100_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch. + */ +static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + if (pci_enable_device(pdev)) { + pr_err("Cannot re-enable PCI device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + /* Only one device per card can do a reset */ + if (0 != PCI_FUNC(pdev->devfn)) + return PCI_ERS_RESULT_RECOVERED; + e100_hw_reset(nic); + e100_phy_init(nic); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * e100_io_resume - resume normal operations + * @pdev: Pointer to PCI device + * + * Resume normal operations after an error recovery + * sequence has been completed. + */ +static void e100_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + /* ack any pending wake events, disable PME */ + pci_enable_wake(pdev, 0, 0); + + netif_device_attach(netdev); + if (netif_running(netdev)) { + e100_open(netdev); + mod_timer(&nic->watchdog, jiffies); + } +} + +static struct pci_error_handlers e100_err_handler = { + .error_detected = e100_io_error_detected, + .slot_reset = e100_io_slot_reset, + .resume = e100_io_resume, +}; + +static struct pci_driver e100_driver = { + .name = DRV_NAME, + .id_table = e100_id_table, + .probe = e100_probe, + .remove = __devexit_p(e100_remove), +#ifdef CONFIG_PM + /* Power Management hooks */ + .suspend = e100_suspend, + .resume = e100_resume, +#endif + .shutdown = e100_shutdown, + .err_handler = &e100_err_handler, +}; + +static int __init e100_init_module(void) +{ + if (((1 << debug) - 1) & NETIF_MSG_DRV) { + pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); + pr_info("%s\n", DRV_COPYRIGHT); + } + return pci_register_driver(&e100_driver); +} + +static void __exit e100_cleanup_module(void) +{ + pci_unregister_driver(&e100_driver); +} + +module_init(e100_init_module); +module_exit(e100_cleanup_module); diff --git a/drivers/net/ethernet/intel/e1000/Makefile b/drivers/net/ethernet/intel/e1000/Makefile new file mode 100644 index 0000000..4a6ab15 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000/Makefile @@ -0,0 +1,35 @@ +################################################################################ +# +# Intel PRO/1000 Linux driver +# Copyright(c) 1999 - 2006 Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: +# Linux NICS +# e1000-devel Mailing List +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +# +################################################################################ + +# +# Makefile for the Intel(R) PRO/1000 ethernet driver +# + +obj-$(CONFIG_E1000) += e1000.o + +e1000-objs := e1000_main.o e1000_hw.o e1000_ethtool.o e1000_param.o diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h new file mode 100644 index 0000000..24f41da --- /dev/null +++ b/drivers/net/ethernet/intel/e1000/e1000.h @@ -0,0 +1,361 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2006 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _E1000_H_ +#define _E1000_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BAR_0 0 +#define BAR_1 1 +#define BAR_5 5 + +#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + +struct e1000_adapter; + +#include "e1000_hw.h" + +#define E1000_MAX_INTR 10 + +/* TX/RX descriptor defines */ +#define E1000_DEFAULT_TXD 256 +#define E1000_MAX_TXD 256 +#define E1000_MIN_TXD 48 +#define E1000_MAX_82544_TXD 4096 + +#define E1000_DEFAULT_RXD 256 +#define E1000_MAX_RXD 256 +#define E1000_MIN_RXD 48 +#define E1000_MAX_82544_RXD 4096 + +#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ +#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */ + +/* this is the size past which hardware will drop packets when setting LPE=0 */ +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 + +/* Supported Rx Buffer Sizes */ +#define E1000_RXBUFFER_128 128 /* Used for packet split */ +#define E1000_RXBUFFER_256 256 /* Used for packet split */ +#define E1000_RXBUFFER_512 512 +#define E1000_RXBUFFER_1024 1024 +#define E1000_RXBUFFER_2048 2048 +#define E1000_RXBUFFER_4096 4096 +#define E1000_RXBUFFER_8192 8192 +#define E1000_RXBUFFER_16384 16384 + +/* SmartSpeed delimiters */ +#define E1000_SMARTSPEED_DOWNSHIFT 3 +#define E1000_SMARTSPEED_MAX 15 + +/* Packet Buffer allocations */ +#define E1000_PBA_BYTES_SHIFT 0xA +#define E1000_TX_HEAD_ADDR_SHIFT 7 +#define E1000_PBA_TX_MASK 0xFFFF0000 + +/* Flow Control Watermarks */ +#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */ +#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */ + +#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */ + +/* How many Tx Descriptors do we need to call netif_wake_queue ? */ +#define E1000_TX_QUEUE_WAKE 16 +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define AUTO_ALL_MODES 0 +#define E1000_EEPROM_82544_APM 0x0004 +#define E1000_EEPROM_APME 0x0400 + +#ifndef E1000_MASTER_SLAVE +/* Switch to override PHY master/slave setting */ +#define E1000_MASTER_SLAVE e1000_ms_hw_default +#endif + +#define E1000_MNG_VLAN_NONE (-1) + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer */ +struct e1000_buffer { + struct sk_buff *skb; + dma_addr_t dma; + struct page *page; + unsigned long time_stamp; + u16 length; + u16 next_to_watch; + u16 mapped_as_page; +}; + +struct e1000_tx_ring { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; + /* number of descriptors in the ring */ + unsigned int count; + /* next descriptor to associate a buffer with */ + unsigned int next_to_use; + /* next descriptor to check for DD status bit */ + unsigned int next_to_clean; + /* array of buffer information structs */ + struct e1000_buffer *buffer_info; + + u16 tdh; + u16 tdt; + bool last_tx_tso; +}; + +struct e1000_rx_ring { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; + /* number of descriptors in the ring */ + unsigned int count; + /* next descriptor to associate a buffer with */ + unsigned int next_to_use; + /* next descriptor to check for DD status bit */ + unsigned int next_to_clean; + /* array of buffer information structs */ + struct e1000_buffer *buffer_info; + struct sk_buff *rx_skb_top; + + /* cpu for rx queue */ + int cpu; + + u16 rdh; + u16 rdt; +}; + +#define E1000_DESC_UNUSED(R) \ + ((((R)->next_to_clean > (R)->next_to_use) \ + ? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1) + +#define E1000_RX_DESC_EXT(R, i) \ + (&(((union e1000_rx_desc_extended *)((R).desc))[i])) +#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) +#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) +#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) +#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc) + +/* board specific private data structure */ + +struct e1000_adapter { + struct timer_list tx_fifo_stall_timer; + struct timer_list watchdog_timer; + struct timer_list phy_info_timer; + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u16 mng_vlan_id; + u32 bd_number; + u32 rx_buffer_len; + u32 wol; + u32 smartspeed; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + spinlock_t stats_lock; + unsigned int total_tx_bytes; + unsigned int total_tx_packets; + unsigned int total_rx_bytes; + unsigned int total_rx_packets; + /* Interrupt Throttle Rate */ + u32 itr; + u32 itr_setting; + u16 tx_itr; + u16 rx_itr; + + struct work_struct reset_task; + u8 fc_autoneg; + + /* TX */ + struct e1000_tx_ring *tx_ring; /* One per active queue */ + unsigned int restart_queue; + u32 txd_cmd; + u32 tx_int_delay; + u32 tx_abs_int_delay; + u32 gotcl; + u64 gotcl_old; + u64 tpt_old; + u64 colc_old; + u32 tx_timeout_count; + u32 tx_fifo_head; + u32 tx_head_addr; + u32 tx_fifo_size; + u8 tx_timeout_factor; + atomic_t tx_fifo_stall; + bool pcix_82544; + bool detect_tx_hung; + + /* RX */ + bool (*clean_rx)(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); + void (*alloc_rx_buf)(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); + struct e1000_rx_ring *rx_ring; /* One per active queue */ + struct napi_struct napi; + + int num_tx_queues; + int num_rx_queues; + + u64 hw_csum_err; + u64 hw_csum_good; + u32 alloc_rx_buff_failed; + u32 rx_int_delay; + u32 rx_abs_int_delay; + bool rx_csum; + u32 gorcl; + u64 gorcl_old; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + struct e1000_phy_stats phy_stats; + + u32 test_icr; + struct e1000_tx_ring test_tx_ring; + struct e1000_rx_ring test_rx_ring; + + int msg_enable; + + /* to not mess up cache alignment, always add to the bottom */ + bool tso_force; + bool smart_power_down; /* phy smart power down */ + bool quad_port_a; + unsigned long flags; + u32 eeprom_wol; + + /* for ioport free */ + int bars; + int need_ioport; + + bool discarding; + + struct work_struct fifo_stall_task; + struct work_struct phy_info_task; +}; + +enum e1000_state_t { + __E1000_TESTING, + __E1000_RESETTING, + __E1000_DOWN +}; + +#undef pr_fmt +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw); +#define e_dbg(format, arg...) \ + netdev_dbg(e1000_get_hw_dev(hw), format, ## arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_notice(msglvl, format, arg...) \ + netif_notice(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_dev_info(format, arg...) \ + dev_info(&adapter->pdev->dev, format, ## arg) +#define e_dev_warn(format, arg...) \ + dev_warn(&adapter->pdev->dev, format, ## arg) +#define e_dev_err(format, arg...) \ + dev_err(&adapter->pdev->dev, format, ## arg) + +extern char e1000_driver_name[]; +extern const char e1000_driver_version[]; + +extern int e1000_up(struct e1000_adapter *adapter); +extern void e1000_down(struct e1000_adapter *adapter); +extern void e1000_reinit_locked(struct e1000_adapter *adapter); +extern void e1000_reset(struct e1000_adapter *adapter); +extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx); +extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); +extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); +extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter); +extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter); +extern void e1000_update_stats(struct e1000_adapter *adapter); +extern bool e1000_has_link(struct e1000_adapter *adapter); +extern void e1000_power_up_phy(struct e1000_adapter *); +extern void e1000_set_ethtool_ops(struct net_device *netdev); +extern void e1000_check_options(struct e1000_adapter *adapter); +extern char *e1000_get_hw_dev_name(struct e1000_hw *hw); + +#endif /* _E1000_H_ */ diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c new file mode 100644 index 0000000..5548d46 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -0,0 +1,1863 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2006 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* ethtool support for e1000 */ + +#include "e1000.h" +#include + +enum {NETDEV_STATS, E1000_STATS}; + +struct e1000_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +#define E1000_STAT(m) E1000_STATS, \ + sizeof(((struct e1000_adapter *)0)->m), \ + offsetof(struct e1000_adapter, m) +#define E1000_NETDEV_STAT(m) NETDEV_STATS, \ + sizeof(((struct net_device *)0)->m), \ + offsetof(struct net_device, m) + +static const struct e1000_stats e1000_gstrings_stats[] = { + { "rx_packets", E1000_STAT(stats.gprc) }, + { "tx_packets", E1000_STAT(stats.gptc) }, + { "rx_bytes", E1000_STAT(stats.gorcl) }, + { "tx_bytes", E1000_STAT(stats.gotcl) }, + { "rx_broadcast", E1000_STAT(stats.bprc) }, + { "tx_broadcast", E1000_STAT(stats.bptc) }, + { "rx_multicast", E1000_STAT(stats.mprc) }, + { "tx_multicast", E1000_STAT(stats.mptc) }, + { "rx_errors", E1000_STAT(stats.rxerrc) }, + { "tx_errors", E1000_STAT(stats.txerrc) }, + { "tx_dropped", E1000_NETDEV_STAT(stats.tx_dropped) }, + { "multicast", E1000_STAT(stats.mprc) }, + { "collisions", E1000_STAT(stats.colc) }, + { "rx_length_errors", E1000_STAT(stats.rlerrc) }, + { "rx_over_errors", E1000_NETDEV_STAT(stats.rx_over_errors) }, + { "rx_crc_errors", E1000_STAT(stats.crcerrs) }, + { "rx_frame_errors", E1000_NETDEV_STAT(stats.rx_frame_errors) }, + { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, + { "rx_missed_errors", E1000_STAT(stats.mpc) }, + { "tx_aborted_errors", E1000_STAT(stats.ecol) }, + { "tx_carrier_errors", E1000_STAT(stats.tncrs) }, + { "tx_fifo_errors", E1000_NETDEV_STAT(stats.tx_fifo_errors) }, + { "tx_heartbeat_errors", E1000_NETDEV_STAT(stats.tx_heartbeat_errors) }, + { "tx_window_errors", E1000_STAT(stats.latecol) }, + { "tx_abort_late_coll", E1000_STAT(stats.latecol) }, + { "tx_deferred_ok", E1000_STAT(stats.dc) }, + { "tx_single_coll_ok", E1000_STAT(stats.scc) }, + { "tx_multi_coll_ok", E1000_STAT(stats.mcc) }, + { "tx_timeout_count", E1000_STAT(tx_timeout_count) }, + { "tx_restart_queue", E1000_STAT(restart_queue) }, + { "rx_long_length_errors", E1000_STAT(stats.roc) }, + { "rx_short_length_errors", E1000_STAT(stats.ruc) }, + { "rx_align_errors", E1000_STAT(stats.algnerrc) }, + { "tx_tcp_seg_good", E1000_STAT(stats.tsctc) }, + { "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) }, + { "rx_flow_control_xon", E1000_STAT(stats.xonrxc) }, + { "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) }, + { "tx_flow_control_xon", E1000_STAT(stats.xontxc) }, + { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) }, + { "rx_long_byte_count", E1000_STAT(stats.gorcl) }, + { "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, + { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, + { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, + { "tx_smbus", E1000_STAT(stats.mgptc) }, + { "rx_smbus", E1000_STAT(stats.mgprc) }, + { "dropped_smbus", E1000_STAT(stats.mgpdc) }, +}; + +#define E1000_QUEUE_STATS_LEN 0 +#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) +#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN) +static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; +#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) + +static int e1000_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (hw->media_type == e1000_media_type_copper) { + + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full| + SUPPORTED_Autoneg | + SUPPORTED_TP); + ecmd->advertising = ADVERTISED_TP; + + if (hw->autoneg == 1) { + ecmd->advertising |= ADVERTISED_Autoneg; + /* the e1000 autoneg seems to match ethtool nicely */ + ecmd->advertising |= hw->autoneg_advertised; + } + + ecmd->port = PORT_TP; + ecmd->phy_address = hw->phy_addr; + + if (hw->mac_type == e1000_82543) + ecmd->transceiver = XCVR_EXTERNAL; + else + ecmd->transceiver = XCVR_INTERNAL; + + } else { + ecmd->supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg); + + ecmd->advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg); + + ecmd->port = PORT_FIBRE; + + if (hw->mac_type >= e1000_82545) + ecmd->transceiver = XCVR_INTERNAL; + else + ecmd->transceiver = XCVR_EXTERNAL; + } + + if (er32(STATUS) & E1000_STATUS_LU) { + + e1000_get_speed_and_duplex(hw, &adapter->link_speed, + &adapter->link_duplex); + ethtool_cmd_speed_set(ecmd, adapter->link_speed); + + /* unfortunately FULL_DUPLEX != DUPLEX_FULL + * and HALF_DUPLEX != DUPLEX_HALF */ + + if (adapter->link_duplex == FULL_DUPLEX) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } else { + ethtool_cmd_speed_set(ecmd, -1); + ecmd->duplex = -1; + } + + ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) || + hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; + return 0; +} + +static int e1000_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + hw->autoneg = 1; + if (hw->media_type == e1000_media_type_fiber) + hw->autoneg_advertised = ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg; + else + hw->autoneg_advertised = ecmd->advertising | + ADVERTISED_TP | + ADVERTISED_Autoneg; + ecmd->advertising = hw->autoneg_advertised; + } else { + u32 speed = ethtool_cmd_speed(ecmd); + if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) { + clear_bit(__E1000_RESETTING, &adapter->flags); + return -EINVAL; + } + } + + /* reset the link */ + + if (netif_running(adapter->netdev)) { + e1000_down(adapter); + e1000_up(adapter); + } else + e1000_reset(adapter); + + clear_bit(__E1000_RESETTING, &adapter->flags); + return 0; +} + +static u32 e1000_get_link(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* + * If the link is not reported up to netdev, interrupts are disabled, + * and so the physical link state may have changed since we last + * looked. Set get_link_status to make sure that the true link + * state is interrogated, rather than pulling a cached and possibly + * stale link state from the driver. + */ + if (!netif_carrier_ok(netdev)) + adapter->hw.get_link_status = 1; + + return e1000_has_link(adapter); +} + +static void e1000_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc == E1000_FC_RX_PAUSE) + pause->rx_pause = 1; + else if (hw->fc == E1000_FC_TX_PAUSE) + pause->tx_pause = 1; + else if (hw->fc == E1000_FC_FULL) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int e1000_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int retval = 0; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (pause->rx_pause && pause->tx_pause) + hw->fc = E1000_FC_FULL; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc = E1000_FC_RX_PAUSE; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc = E1000_FC_TX_PAUSE; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc = E1000_FC_NONE; + + hw->original_fc = hw->fc; + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + if (netif_running(adapter->netdev)) { + e1000_down(adapter); + e1000_up(adapter); + } else + e1000_reset(adapter); + } else + retval = ((hw->media_type == e1000_media_type_fiber) ? + e1000_setup_link(hw) : e1000_force_mac_fc(hw)); + + clear_bit(__E1000_RESETTING, &adapter->flags); + return retval; +} + +static u32 e1000_get_msglevel(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void e1000_set_msglevel(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int e1000_get_regs_len(struct net_device *netdev) +{ +#define E1000_REGS_LEN 32 + return E1000_REGS_LEN * sizeof(u32); +} + +static void e1000_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u16 phy_data; + + memset(p, 0, E1000_REGS_LEN * sizeof(u32)); + + regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN); + regs_buff[4] = er32(RDH); + regs_buff[5] = er32(RDT); + regs_buff[6] = er32(RDTR); + + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDLEN); + regs_buff[9] = er32(TDH); + regs_buff[10] = er32(TDT); + regs_buff[11] = er32(TIDV); + + regs_buff[12] = hw->phy_type; /* PHY type (IGP=1, M88=0) */ + if (hw->phy_type == e1000_phy_igp) { + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_A); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_B); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[14] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_C); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[15] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_D); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[16] = (u32)phy_data; /* cable length */ + regs_buff[17] = 0; /* extended 10bt distance (not needed) */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[18] = (u32)phy_data; /* cable polarity */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_PCS_INIT_REG); + e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[19] = (u32)phy_data; /* cable polarity */ + regs_buff[20] = 0; /* polarity correction enabled (always) */ + regs_buff[22] = 0; /* phy receive errors (unavailable) */ + regs_buff[23] = regs_buff[18]; /* mdix mode */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); + } else { + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + regs_buff[17] = (u32)phy_data; /* extended 10bt distance */ + regs_buff[18] = regs_buff[13]; /* cable polarity */ + regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[20] = regs_buff[17]; /* polarity correction */ + /* phy receive errors */ + regs_buff[22] = adapter->phy_stats.receive_errors; + regs_buff[23] = regs_buff[13]; /* mdix mode */ + } + regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */ + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + regs_buff[24] = (u32)phy_data; /* phy local receiver status */ + regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ + if (hw->mac_type >= e1000_82540 && + hw->media_type == e1000_media_type_copper) { + regs_buff[26] = er32(MANC); + } +} + +static int e1000_get_eeprom_len(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + return hw->eeprom.word_size * 2; +} + +static int e1000_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word, last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc(sizeof(u16) * + (last_word - first_word + 1), GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->eeprom.type == e1000_eeprom_spi) + ret_val = e1000_read_eeprom(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = e1000_read_eeprom(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int e1000_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len, first_word, last_word, ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = hw->eeprom.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word */ + /* only the second byte of the word is being modified */ + ret_val = e1000_read_eeprom(hw, first_word, 1, + &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { + /* need read/modify/write of last changed EEPROM word */ + /* only the first byte of the word is being modified */ + ret_val = e1000_read_eeprom(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); + + ret_val = e1000_write_eeprom(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + /* Update the checksum over the first part of the EEPROM if needed */ + if ((ret_val == 0) && (first_word <= EEPROM_CHECKSUM_REG)) + e1000_update_eeprom_checksum(hw); + + kfree(eeprom_buff); + return ret_val; +} + +static void e1000_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + char firmware_version[32]; + + strncpy(drvinfo->driver, e1000_driver_name, 32); + strncpy(drvinfo->version, e1000_driver_version, 32); + + sprintf(firmware_version, "N/A"); + strncpy(drvinfo->fw_version, firmware_version, 32); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + drvinfo->regdump_len = e1000_get_regs_len(netdev); + drvinfo->eedump_len = e1000_get_eeprom_len(netdev); +} + +static void e1000_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + e1000_mac_type mac_type = hw->mac_type; + struct e1000_tx_ring *txdr = adapter->tx_ring; + struct e1000_rx_ring *rxdr = adapter->rx_ring; + + ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD : + E1000_MAX_82544_RXD; + ring->tx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_TXD : + E1000_MAX_82544_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = rxdr->count; + ring->tx_pending = txdr->count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int e1000_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + e1000_mac_type mac_type = hw->mac_type; + struct e1000_tx_ring *txdr, *tx_old; + struct e1000_rx_ring *rxdr, *rx_old; + int i, err; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (netif_running(adapter->netdev)) + e1000_down(adapter); + + tx_old = adapter->tx_ring; + rx_old = adapter->rx_ring; + + err = -ENOMEM; + txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), GFP_KERNEL); + if (!txdr) + goto err_alloc_tx; + + rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), GFP_KERNEL); + if (!rxdr) + goto err_alloc_rx; + + adapter->tx_ring = txdr; + adapter->rx_ring = rxdr; + + rxdr->count = max(ring->rx_pending,(u32)E1000_MIN_RXD); + rxdr->count = min(rxdr->count,(u32)(mac_type < e1000_82544 ? + E1000_MAX_RXD : E1000_MAX_82544_RXD)); + rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); + + txdr->count = max(ring->tx_pending,(u32)E1000_MIN_TXD); + txdr->count = min(txdr->count,(u32)(mac_type < e1000_82544 ? + E1000_MAX_TXD : E1000_MAX_82544_TXD)); + txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); + + for (i = 0; i < adapter->num_tx_queues; i++) + txdr[i].count = txdr->count; + for (i = 0; i < adapter->num_rx_queues; i++) + rxdr[i].count = rxdr->count; + + if (netif_running(adapter->netdev)) { + /* Try to get new resources before deleting old */ + err = e1000_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + err = e1000_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* save the new, restore the old in order to free it, + * then restore the new back again */ + + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + e1000_free_all_rx_resources(adapter); + e1000_free_all_tx_resources(adapter); + kfree(tx_old); + kfree(rx_old); + adapter->rx_ring = rxdr; + adapter->tx_ring = txdr; + err = e1000_up(adapter); + if (err) + goto err_setup; + } + + clear_bit(__E1000_RESETTING, &adapter->flags); + return 0; +err_setup_tx: + e1000_free_all_rx_resources(adapter); +err_setup_rx: + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + kfree(rxdr); +err_alloc_rx: + kfree(txdr); +err_alloc_tx: + e1000_up(adapter); +err_setup: + clear_bit(__E1000_RESETTING, &adapter->flags); + return err; +} + +static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + static const u32 test[] = + {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; + u8 __iomem *address = hw->hw_addr + reg; + u32 read; + int i; + + for (i = 0; i < ARRAY_SIZE(test); i++) { + writel(write & test[i], address); + read = readl(address); + if (read != (write & test[i] & mask)) { + e_err(drv, "pattern test reg %04X failed: " + "got 0x%08X expected 0x%08X\n", + reg, read, (write & test[i] & mask)); + *data = reg; + return true; + } + } + return false; +} + +static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + u8 __iomem *address = hw->hw_addr + reg; + u32 read; + + writel(write & mask, address); + read = readl(address); + if ((read & mask) != (write & mask)) { + e_err(drv, "set/check reg %04X test failed: " + "got 0x%08X expected 0x%08X\n", + reg, (read & mask), (write & mask)); + *data = reg; + return true; + } + return false; +} + +#define REG_PATTERN_TEST(reg, mask, write) \ + do { \ + if (reg_pattern_test(adapter, data, \ + (hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg, \ + mask, write)) \ + return 1; \ + } while (0) + +#define REG_SET_AND_CHECK(reg, mask, write) \ + do { \ + if (reg_set_and_check(adapter, data, \ + (hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg, \ + mask, write)) \ + return 1; \ + } while (0) + +static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) +{ + u32 value, before, after; + u32 i, toggle; + struct e1000_hw *hw = &adapter->hw; + + /* The status register is Read Only, so a write should fail. + * Some bits that get toggled are ignored. + */ + + /* there are several bits on newer hardware that are r/w */ + toggle = 0xFFFFF833; + + before = er32(STATUS); + value = (er32(STATUS) & toggle); + ew32(STATUS, toggle); + after = er32(STATUS) & toggle; + if (value != after) { + e_err(drv, "failed STATUS register test got: " + "0x%08X expected: 0x%08X\n", after, value); + *data = 1; + return 1; + } + /* restore previous status */ + ew32(STATUS, before); + + REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF); + + REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF); + REG_PATTERN_TEST(RDH, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(RDT, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(FCRTH, 0x0000FFF8, 0x0000FFF8); + REG_PATTERN_TEST(FCTTV, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TIPG, 0x3FFFFFFF, 0x3FFFFFFF); + REG_PATTERN_TEST(TDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF); + + REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000); + + before = 0x06DFB3FE; + REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB); + REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); + + if (hw->mac_type >= e1000_82543) { + + REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF); + REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); + value = E1000_RAR_ENTRIES; + for (i = 0; i < value; i++) { + REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, + 0xFFFFFFFF); + } + + } else { + + REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF); + REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF); + REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF); + + } + + value = E1000_MC_TBL_SIZE; + for (i = 0; i < value; i++) + REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); + + *data = 0; + return 0; +} + +static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + u16 temp; + u16 checksum = 0; + u16 i; + + *data = 0; + /* Read and add up the contents of the EEPROM */ + for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { + if ((e1000_read_eeprom(hw, i, 1, &temp)) < 0) { + *data = 1; + break; + } + checksum += temp; + } + + /* If Checksum is not Correct return error else test passed */ + if ((checksum != (u16)EEPROM_SUM) && !(*data)) + *data = 2; + + return *data; +} + +static irqreturn_t e1000_test_intr(int irq, void *data) +{ + struct net_device *netdev = (struct net_device *)data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + adapter->test_icr |= er32(ICR); + + return IRQ_HANDLED; +} + +static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + u32 mask, i = 0; + bool shared_int = true; + u32 irq = adapter->pdev->irq; + struct e1000_hw *hw = &adapter->hw; + + *data = 0; + + /* NOTE: we don't test MSI interrupts here, yet */ + /* Hook up test interrupt handler just for this test */ + if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, + netdev)) + shared_int = false; + else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, + netdev->name, netdev)) { + *data = 1; + return -1; + } + e_info(hw, "testing %s interrupt\n", (shared_int ? + "shared" : "unshared")); + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + E1000_WRITE_FLUSH(); + msleep(10); + + /* Test each interrupt */ + for (; i < 10; i++) { + + /* Interrupt to test */ + mask = 1 << i; + + if (!shared_int) { + /* Disable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, mask); + ew32(ICS, mask); + E1000_WRITE_FLUSH(); + msleep(10); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* Enable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was not posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMS, mask); + ew32(ICS, mask); + E1000_WRITE_FLUSH(); + msleep(10); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, ~mask & 0x00007FFF); + ew32(ICS, ~mask & 0x00007FFF); + E1000_WRITE_FLUSH(); + msleep(10); + + if (adapter->test_icr) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + E1000_WRITE_FLUSH(); + msleep(10); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + + return *data; +} + +static void e1000_free_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + int i; + + if (txdr->desc && txdr->buffer_info) { + for (i = 0; i < txdr->count; i++) { + if (txdr->buffer_info[i].dma) + dma_unmap_single(&pdev->dev, + txdr->buffer_info[i].dma, + txdr->buffer_info[i].length, + DMA_TO_DEVICE); + if (txdr->buffer_info[i].skb) + dev_kfree_skb(txdr->buffer_info[i].skb); + } + } + + if (rxdr->desc && rxdr->buffer_info) { + for (i = 0; i < rxdr->count; i++) { + if (rxdr->buffer_info[i].dma) + dma_unmap_single(&pdev->dev, + rxdr->buffer_info[i].dma, + rxdr->buffer_info[i].length, + DMA_FROM_DEVICE); + if (rxdr->buffer_info[i].skb) + dev_kfree_skb(rxdr->buffer_info[i].skb); + } + } + + if (txdr->desc) { + dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, + txdr->dma); + txdr->desc = NULL; + } + if (rxdr->desc) { + dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, + rxdr->dma); + rxdr->desc = NULL; + } + + kfree(txdr->buffer_info); + txdr->buffer_info = NULL; + kfree(rxdr->buffer_info); + rxdr->buffer_info = NULL; +} + +static int e1000_setup_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + u32 rctl; + int i, ret_val; + + /* Setup Tx descriptor ring and Tx buffers */ + + if (!txdr->count) + txdr->count = E1000_DEFAULT_TXD; + + txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_buffer), + GFP_KERNEL); + if (!txdr->buffer_info) { + ret_val = 1; + goto err_nomem; + } + + txdr->size = txdr->count * sizeof(struct e1000_tx_desc); + txdr->size = ALIGN(txdr->size, 4096); + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL); + if (!txdr->desc) { + ret_val = 2; + goto err_nomem; + } + memset(txdr->desc, 0, txdr->size); + txdr->next_to_use = txdr->next_to_clean = 0; + + ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF)); + ew32(TDBAH, ((u64)txdr->dma >> 32)); + ew32(TDLEN, txdr->count * sizeof(struct e1000_tx_desc)); + ew32(TDH, 0); + ew32(TDT, 0); + ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | + E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | + E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT); + + for (i = 0; i < txdr->count; i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i); + struct sk_buff *skb; + unsigned int size = 1024; + + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) { + ret_val = 3; + goto err_nomem; + } + skb_put(skb, size); + txdr->buffer_info[i].skb = skb; + txdr->buffer_info[i].length = skb->len; + txdr->buffer_info[i].dma = + dma_map_single(&pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma); + tx_desc->lower.data = cpu_to_le32(skb->len); + tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | + E1000_TXD_CMD_IFCS | + E1000_TXD_CMD_RPS); + tx_desc->upper.data = 0; + } + + /* Setup Rx descriptor ring and Rx buffers */ + + if (!rxdr->count) + rxdr->count = E1000_DEFAULT_RXD; + + rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer), + GFP_KERNEL); + if (!rxdr->buffer_info) { + ret_val = 4; + goto err_nomem; + } + + rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL); + if (!rxdr->desc) { + ret_val = 5; + goto err_nomem; + } + memset(rxdr->desc, 0, rxdr->size); + rxdr->next_to_use = rxdr->next_to_clean = 0; + + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + ew32(RDBAL, ((u64)rxdr->dma & 0xFFFFFFFF)); + ew32(RDBAH, ((u64)rxdr->dma >> 32)); + ew32(RDLEN, rxdr->size); + ew32(RDH, 0); + ew32(RDT, 0); + rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); + ew32(RCTL, rctl); + + for (i = 0; i < rxdr->count; i++) { + struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); + struct sk_buff *skb; + + skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL); + if (!skb) { + ret_val = 6; + goto err_nomem; + } + skb_reserve(skb, NET_IP_ALIGN); + rxdr->buffer_info[i].skb = skb; + rxdr->buffer_info[i].length = E1000_RXBUFFER_2048; + rxdr->buffer_info[i].dma = + dma_map_single(&pdev->dev, skb->data, + E1000_RXBUFFER_2048, DMA_FROM_DEVICE); + rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma); + memset(skb->data, 0x00, skb->len); + } + + return 0; + +err_nomem: + e1000_free_desc_rings(adapter); + return ret_val; +} + +static void e1000_phy_disable_receiver(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1000_write_phy_reg(hw, 29, 0x001F); + e1000_write_phy_reg(hw, 30, 0x8FFC); + e1000_write_phy_reg(hw, 29, 0x001A); + e1000_write_phy_reg(hw, 30, 0x8FF0); +} + +static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_reg; + + /* Because we reset the PHY above, we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock. This + * value defaults back to a 2.5MHz clock when the PHY is reset. + */ + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); + phy_reg |= M88E1000_EPSCR_TX_CLK_25; + e1000_write_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, phy_reg); + + /* In addition, because of the s/w reset above, we need to enable + * CRS on TX. This must be set for both full and half duplex + * operation. + */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); + phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + e1000_write_phy_reg(hw, + M88E1000_PHY_SPEC_CTRL, phy_reg); +} + +static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg; + u16 phy_reg; + + /* Setup the Device Control Register for PHY loopback test. */ + + ctrl_reg = er32(CTRL); + ctrl_reg |= (E1000_CTRL_ILOS | /* Invert Loss-Of-Signal */ + E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + ew32(CTRL, ctrl_reg); + + /* Read the PHY Specific Control Register (0x10) */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); + + /* Clear Auto-Crossover bits in PHY Specific Control Register + * (bits 6:5). + */ + phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE; + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg); + + /* Perform software reset on the PHY */ + e1000_phy_reset(hw); + + /* Have to setup TX_CLK and TX_CRS after software reset */ + e1000_phy_reset_clk_and_crs(adapter); + + e1000_write_phy_reg(hw, PHY_CTRL, 0x8100); + + /* Wait for reset to complete. */ + udelay(500); + + /* Have to setup TX_CLK and TX_CRS after software reset */ + e1000_phy_reset_clk_and_crs(adapter); + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1000_phy_disable_receiver(adapter); + + /* Set the loopback bit in the PHY control register. */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + phy_reg |= MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + + /* Setup TX_CLK and TX_CRS one more time. */ + e1000_phy_reset_clk_and_crs(adapter); + + /* Check Phy Configuration */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + if (phy_reg != 0x4100) + return 9; + + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); + if (phy_reg != 0x0070) + return 10; + + e1000_read_phy_reg(hw, 29, &phy_reg); + if (phy_reg != 0x001A) + return 11; + + return 0; +} + +static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg = 0; + u32 stat_reg = 0; + + hw->autoneg = false; + + if (hw->phy_type == e1000_phy_m88) { + /* Auto-MDI/MDIX Off */ + e1000_write_phy_reg(hw, + M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x9140); + /* autoneg off */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x8140); + } + + ctrl_reg = er32(CTRL); + + /* force 1000, set loopback */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x4140); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = er32(CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + if (hw->media_type == e1000_media_type_copper && + hw->phy_type == e1000_phy_m88) + ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ + else { + /* Set the ILOS bit on the fiber Nic is half + * duplex link is detected. */ + stat_reg = er32(STATUS); + if ((stat_reg & E1000_STATUS_FD) == 0) + ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); + } + + ew32(CTRL, ctrl_reg); + + /* Disable the receiver on the PHY so when a cable is plugged in, the + * PHY does not begin to autoneg when a cable is reconnected to the NIC. + */ + if (hw->phy_type == e1000_phy_m88) + e1000_phy_disable_receiver(adapter); + + udelay(500); + + return 0; +} + +static int e1000_set_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_reg = 0; + u16 count = 0; + + switch (hw->mac_type) { + case e1000_82543: + if (hw->media_type == e1000_media_type_copper) { + /* Attempt to setup Loopback mode on Non-integrated PHY. + * Some PHY registers get corrupted at random, so + * attempt this 10 times. + */ + while (e1000_nonintegrated_phy_loopback(adapter) && + count++ < 10); + if (count < 11) + return 0; + } + break; + + case e1000_82544: + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + return e1000_integrated_phy_loopback(adapter); + break; + default: + /* Default PHY loopback work is to read the MII + * control register and assert bit 14 (loopback mode). + */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + phy_reg |= MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + return 0; + break; + } + + return 8; +} + +static int e1000_setup_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) { + switch (hw->mac_type) { + case e1000_82545: + case e1000_82546: + case e1000_82545_rev_3: + case e1000_82546_rev_3: + return e1000_set_phy_loopback(adapter); + break; + default: + rctl = er32(RCTL); + rctl |= E1000_RCTL_LBM_TCVR; + ew32(RCTL, rctl); + return 0; + } + } else if (hw->media_type == e1000_media_type_copper) + return e1000_set_phy_loopback(adapter); + + return 7; +} + +static void e1000_loopback_cleanup(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + u16 phy_reg; + + rctl = er32(RCTL); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + ew32(RCTL, rctl); + + switch (hw->mac_type) { + case e1000_82545: + case e1000_82546: + case e1000_82545_rev_3: + case e1000_82546_rev_3: + default: + hw->autoneg = true; + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + if (phy_reg & MII_CR_LOOPBACK) { + phy_reg &= ~MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + e1000_phy_reset(hw); + } + break; + } +} + +static void e1000_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size &= ~1; + memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); + memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); + memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); +} + +static int e1000_check_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + frame_size &= ~1; + if (*(skb->data + 3) == 0xFF) { + if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && + (*(skb->data + frame_size / 2 + 12) == 0xAF)) { + return 0; + } + } + return 13; +} + +static int e1000_run_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + int i, j, k, l, lc, good_cnt, ret_val=0; + unsigned long time; + + ew32(RDT, rxdr->count - 1); + + /* Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rxdr->count <= txdr->count) + lc = ((txdr->count / 64) * 2) + 1; + else + lc = ((rxdr->count / 64) * 2) + 1; + + k = l = 0; + for (j = 0; j <= lc; j++) { /* loop count loop */ + for (i = 0; i < 64; i++) { /* send the packets */ + e1000_create_lbtest_frame(txdr->buffer_info[i].skb, + 1024); + dma_sync_single_for_device(&pdev->dev, + txdr->buffer_info[k].dma, + txdr->buffer_info[k].length, + DMA_TO_DEVICE); + if (unlikely(++k == txdr->count)) k = 0; + } + ew32(TDT, k); + E1000_WRITE_FLUSH(); + msleep(200); + time = jiffies; /* set the start time for the receive */ + good_cnt = 0; + do { /* receive the sent packets */ + dma_sync_single_for_cpu(&pdev->dev, + rxdr->buffer_info[l].dma, + rxdr->buffer_info[l].length, + DMA_FROM_DEVICE); + + ret_val = e1000_check_lbtest_frame( + rxdr->buffer_info[l].skb, + 1024); + if (!ret_val) + good_cnt++; + if (unlikely(++l == rxdr->count)) l = 0; + /* time + 20 msecs (200 msecs on 2.4) is more than + * enough time to complete the receives, if it's + * exceeded, break and error off + */ + } while (good_cnt < 64 && jiffies < (time + 20)); + if (good_cnt != 64) { + ret_val = 13; /* ret_val is the same as mis-compare */ + break; + } + if (jiffies >= (time + 2)) { + ret_val = 14; /* error code for time out error */ + break; + } + } /* end loop count loop */ + return ret_val; +} + +static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) +{ + *data = e1000_setup_desc_rings(adapter); + if (*data) + goto out; + *data = e1000_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + *data = e1000_run_loopback_test(adapter); + e1000_loopback_cleanup(adapter); + +err_loopback: + e1000_free_desc_rings(adapter); +out: + return *data; +} + +static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + *data = 0; + if (hw->media_type == e1000_media_type_internal_serdes) { + int i = 0; + hw->serdes_has_link = false; + + /* On some blade server designs, link establishment + * could take as long as 2-3 minutes */ + do { + e1000_check_for_link(hw); + if (hw->serdes_has_link) + return *data; + msleep(20); + } while (i++ < 3750); + + *data = 1; + } else { + e1000_check_for_link(hw); + if (hw->autoneg) /* if auto_neg is set wait for it */ + msleep(4000); + + if (!(er32(STATUS) & E1000_STATUS_LU)) { + *data = 1; + } + } + return *data; +} + +static int e1000_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E1000_TEST_LEN; + case ETH_SS_STATS: + return E1000_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e1000_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + bool if_running = netif_running(netdev); + + set_bit(__E1000_TESTING, &adapter->flags); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + /* save speed, duplex, autoneg settings */ + u16 autoneg_advertised = hw->autoneg_advertised; + u8 forced_speed_duplex = hw->forced_speed_duplex; + u8 autoneg = hw->autoneg; + + e_info(hw, "offline testing starting\n"); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result */ + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + dev_close(netdev); + else + e1000_reset(adapter); + + if (e1000_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + if (e1000_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + if (e1000_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + /* make sure the phy is powered up */ + e1000_power_up_phy(adapter); + if (e1000_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* restore speed, duplex, autoneg settings */ + hw->autoneg_advertised = autoneg_advertised; + hw->forced_speed_duplex = forced_speed_duplex; + hw->autoneg = autoneg; + + e1000_reset(adapter); + clear_bit(__E1000_TESTING, &adapter->flags); + if (if_running) + dev_open(netdev); + } else { + e_info(hw, "online testing starting\n"); + /* Online tests */ + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Online tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__E1000_TESTING, &adapter->flags); + } + msleep_interruptible(4 * 1000); +} + +static int e1000_wol_exclusion(struct e1000_adapter *adapter, + struct ethtool_wolinfo *wol) +{ + struct e1000_hw *hw = &adapter->hw; + int retval = 1; /* fail by default */ + + switch (hw->device_id) { + case E1000_DEV_ID_82542: + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82543GC_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82546GB_QUAD_COPPER: + case E1000_DEV_ID_82546GB_PCIE: + /* these don't support WoL at all */ + wol->supported = 0; + break; + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546GB_FIBER: + /* Wake events not supported on port B */ + if (er32(STATUS) & E1000_STATUS_FUNC_1) { + wol->supported = 0; + break; + } + /* return success for non excluded adapter ports */ + retval = 0; + break; + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* quad port adapters only support WoL on port A */ + if (!adapter->quad_port_a) { + wol->supported = 0; + break; + } + /* return success for non excluded adapter ports */ + retval = 0; + break; + default: + /* dual port cards only support WoL on port A from now on + * unless it was enabled in the eeprom for port B + * so exclude FUNC_1 ports from having WoL enabled */ + if (er32(STATUS) & E1000_STATUS_FUNC_1 && + !adapter->eeprom_wol) { + wol->supported = 0; + break; + } + + retval = 0; + } + + return retval; +} + +static void e1000_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = 0; + + /* this function will set ->supported = 0 and return 1 if wol is not + * supported by this hardware */ + if (e1000_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return; + + /* apply any specific unsupported masks here */ + switch (hw->device_id) { + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* KSP3 does not suppport UCAST wake-ups */ + wol->supported &= ~WAKE_UCAST; + + if (adapter->wol & E1000_WUFC_EX) + e_err(drv, "Interface does not support directed " + "(unicast) frame wake-up packets\n"); + break; + default: + break; + } + + if (adapter->wol & E1000_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & E1000_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & E1000_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & E1000_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; +} + +static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + + if (e1000_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + switch (hw->device_id) { + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + if (wol->wolopts & WAKE_UCAST) { + e_err(drv, "Interface does not support directed " + "(unicast) frame wake-up packets\n"); + return -EOPNOTSUPP; + } + break; + default: + break; + } + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= E1000_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= E1000_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= E1000_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= E1000_WUFC_MAG; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static int e1000_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + e1000_setup_led(hw); + return 2; + + case ETHTOOL_ID_ON: + e1000_led_on(hw); + break; + + case ETHTOOL_ID_OFF: + e1000_led_off(hw); + break; + + case ETHTOOL_ID_INACTIVE: + e1000_cleanup_led(hw); + } + + return 0; +} + +static int e1000_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (adapter->hw.mac_type < e1000_82545) + return -EOPNOTSUPP; + + if (adapter->itr_setting <= 4) + ec->rx_coalesce_usecs = adapter->itr_setting; + else + ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting; + + return 0; +} + +static int e1000_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (hw->mac_type < e1000_82545) + return -EOPNOTSUPP; + + if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) || + ((ec->rx_coalesce_usecs > 4) && + (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) || + (ec->rx_coalesce_usecs == 2)) + return -EINVAL; + + if (ec->rx_coalesce_usecs == 4) { + adapter->itr = adapter->itr_setting = 4; + } else if (ec->rx_coalesce_usecs <= 3) { + adapter->itr = 20000; + adapter->itr_setting = ec->rx_coalesce_usecs; + } else { + adapter->itr = (1000000 / ec->rx_coalesce_usecs); + adapter->itr_setting = adapter->itr & ~3; + } + + if (adapter->itr_setting != 0) + ew32(ITR, 1000000000 / (adapter->itr * 256)); + else + ew32(ITR, 0); + + return 0; +} + +static int e1000_nway_reset(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) + e1000_reinit_locked(adapter); + return 0; +} + +static void e1000_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + int i; + char *p = NULL; + + e1000_update_stats(adapter); + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { + switch (e1000_gstrings_stats[i].type) { + case NETDEV_STATS: + p = (char *) netdev + + e1000_gstrings_stats[i].stat_offset; + break; + case E1000_STATS: + p = (char *) adapter + + e1000_gstrings_stats[i].stat_offset; + break; + } + + data[i] = (e1000_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } +/* BUG_ON(i != E1000_STATS_LEN); */ +} + +static void e1000_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *e1000_gstrings_test, + sizeof(e1000_gstrings_test)); + break; + case ETH_SS_STATS: + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { + memcpy(p, e1000_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } +/* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */ + break; + } +} + +static const struct ethtool_ops e1000_ethtool_ops = { + .get_settings = e1000_get_settings, + .set_settings = e1000_set_settings, + .get_drvinfo = e1000_get_drvinfo, + .get_regs_len = e1000_get_regs_len, + .get_regs = e1000_get_regs, + .get_wol = e1000_get_wol, + .set_wol = e1000_set_wol, + .get_msglevel = e1000_get_msglevel, + .set_msglevel = e1000_set_msglevel, + .nway_reset = e1000_nway_reset, + .get_link = e1000_get_link, + .get_eeprom_len = e1000_get_eeprom_len, + .get_eeprom = e1000_get_eeprom, + .set_eeprom = e1000_set_eeprom, + .get_ringparam = e1000_get_ringparam, + .set_ringparam = e1000_set_ringparam, + .get_pauseparam = e1000_get_pauseparam, + .set_pauseparam = e1000_set_pauseparam, + .self_test = e1000_diag_test, + .get_strings = e1000_get_strings, + .set_phys_id = e1000_set_phys_id, + .get_ethtool_stats = e1000_get_ethtool_stats, + .get_sset_count = e1000_get_sset_count, + .get_coalesce = e1000_get_coalesce, + .set_coalesce = e1000_set_coalesce, +}; + +void e1000_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops); +} diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c new file mode 100644 index 0000000..8545c7a --- /dev/null +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c @@ -0,0 +1,5824 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2006 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + + */ + +/* e1000_hw.c + * Shared functions for accessing and configuring the MAC + */ + +#include "e1000.h" + +static s32 e1000_check_downshift(struct e1000_hw *hw); +static s32 e1000_check_polarity(struct e1000_hw *hw, + e1000_rev_polarity *polarity); +static void e1000_clear_hw_cntrs(struct e1000_hw *hw); +static void e1000_clear_vfta(struct e1000_hw *hw); +static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, + bool link_up); +static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw); +static s32 e1000_detect_gig_phy(struct e1000_hw *hw); +static s32 e1000_get_auto_rd_done(struct e1000_hw *hw); +static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, + u16 *max_length); +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); +static s32 e1000_id_led_init(struct e1000_hw *hw); +static void e1000_init_rx_addrs(struct e1000_hw *hw); +static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info); +static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info); +static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); +static s32 e1000_wait_autoneg(struct e1000_hw *hw); +static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value); +static s32 e1000_set_phy_type(struct e1000_hw *hw); +static void e1000_phy_init_script(struct e1000_hw *hw); +static s32 e1000_setup_copper_link(struct e1000_hw *hw); +static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw); +static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw); +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); +static s32 e1000_config_mac_to_phy(struct e1000_hw *hw); +static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl); +static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl); +static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count); +static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw); +static s32 e1000_phy_reset_dsp(struct e1000_hw *hw); +static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw); +static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd); +static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd); +static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count); +static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 phy_data); +static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 *phy_data); +static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count); +static s32 e1000_acquire_eeprom(struct e1000_hw *hw); +static void e1000_release_eeprom(struct e1000_hw *hw); +static void e1000_standby_eeprom(struct e1000_hw *hw); +static s32 e1000_set_vco_speed(struct e1000_hw *hw); +static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw); +static s32 e1000_set_phy_mode(struct e1000_hw *hw); +static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); + +/* IGP cable length table */ +static const +u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = { + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25, + 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40, + 40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60, + 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90, + 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, + 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, + 110, 110, + 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, + 120, 120 +}; + +static DEFINE_SPINLOCK(e1000_eeprom_lock); + +/** + * e1000_set_phy_type - Set the phy type member in the hw struct. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_set_phy_type(struct e1000_hw *hw) +{ + e_dbg("e1000_set_phy_type"); + + if (hw->mac_type == e1000_undefined) + return -E1000_ERR_PHY_TYPE; + + switch (hw->phy_id) { + case M88E1000_E_PHY_ID: + case M88E1000_I_PHY_ID: + case M88E1011_I_PHY_ID: + case M88E1111_I_PHY_ID: + case M88E1118_E_PHY_ID: + hw->phy_type = e1000_phy_m88; + break; + case IGP01E1000_I_PHY_ID: + if (hw->mac_type == e1000_82541 || + hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547 || + hw->mac_type == e1000_82547_rev_2) + hw->phy_type = e1000_phy_igp; + break; + case RTL8211B_PHY_ID: + hw->phy_type = e1000_phy_8211; + break; + case RTL8201N_PHY_ID: + hw->phy_type = e1000_phy_8201; + break; + default: + /* Should never have loaded on this device */ + hw->phy_type = e1000_phy_undefined; + return -E1000_ERR_PHY_TYPE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_init_script - IGP phy init script - initializes the GbE PHY + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_phy_init_script(struct e1000_hw *hw) +{ + u32 ret_val; + u16 phy_saved_data; + + e_dbg("e1000_phy_init_script"); + + if (hw->phy_init_script) { + msleep(20); + + /* Save off the current value of register 0x2F5B to be restored at + * the end of this routine. */ + ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + /* Disabled the PHY transmitter */ + e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + msleep(20); + + e1000_write_phy_reg(hw, 0x0000, 0x0140); + msleep(5); + + switch (hw->mac_type) { + case e1000_82541: + case e1000_82547: + e1000_write_phy_reg(hw, 0x1F95, 0x0001); + e1000_write_phy_reg(hw, 0x1F71, 0xBD21); + e1000_write_phy_reg(hw, 0x1F79, 0x0018); + e1000_write_phy_reg(hw, 0x1F30, 0x1600); + e1000_write_phy_reg(hw, 0x1F31, 0x0014); + e1000_write_phy_reg(hw, 0x1F32, 0x161C); + e1000_write_phy_reg(hw, 0x1F94, 0x0003); + e1000_write_phy_reg(hw, 0x1F96, 0x003F); + e1000_write_phy_reg(hw, 0x2010, 0x0008); + break; + + case e1000_82541_rev_2: + case e1000_82547_rev_2: + e1000_write_phy_reg(hw, 0x1F73, 0x0099); + break; + default: + break; + } + + e1000_write_phy_reg(hw, 0x0000, 0x3300); + msleep(20); + + /* Now enable the transmitter */ + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (hw->mac_type == e1000_82547) { + u16 fused, fine, coarse; + + /* Move to analog registers page */ + e1000_read_phy_reg(hw, + IGP01E1000_ANALOG_SPARE_FUSE_STATUS, + &fused); + + if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) { + e1000_read_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_STATUS, + &fused); + + fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK; + coarse = + fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK; + + if (coarse > + IGP01E1000_ANALOG_FUSE_COARSE_THRESH) { + coarse -= + IGP01E1000_ANALOG_FUSE_COARSE_10; + fine -= IGP01E1000_ANALOG_FUSE_FINE_1; + } else if (coarse == + IGP01E1000_ANALOG_FUSE_COARSE_THRESH) + fine -= IGP01E1000_ANALOG_FUSE_FINE_10; + + fused = + (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) | + (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) | + (coarse & + IGP01E1000_ANALOG_FUSE_COARSE_MASK); + + e1000_write_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_CONTROL, + fused); + e1000_write_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_BYPASS, + IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL); + } + } + } +} + +/** + * e1000_set_mac_type - Set the mac type member in the hw struct. + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_set_mac_type(struct e1000_hw *hw) +{ + e_dbg("e1000_set_mac_type"); + + switch (hw->device_id) { + case E1000_DEV_ID_82542: + switch (hw->revision_id) { + case E1000_82542_2_0_REV_ID: + hw->mac_type = e1000_82542_rev2_0; + break; + case E1000_82542_2_1_REV_ID: + hw->mac_type = e1000_82542_rev2_1; + break; + default: + /* Invalid 82542 revision ID */ + return -E1000_ERR_MAC_TYPE; + } + break; + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82543GC_COPPER: + hw->mac_type = e1000_82543; + break; + case E1000_DEV_ID_82544EI_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82544GC_COPPER: + case E1000_DEV_ID_82544GC_LOM: + hw->mac_type = e1000_82544; + break; + case E1000_DEV_ID_82540EM: + case E1000_DEV_ID_82540EM_LOM: + case E1000_DEV_ID_82540EP: + case E1000_DEV_ID_82540EP_LOM: + case E1000_DEV_ID_82540EP_LP: + hw->mac_type = e1000_82540; + break; + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + hw->mac_type = e1000_82545; + break; + case E1000_DEV_ID_82545GM_COPPER: + case E1000_DEV_ID_82545GM_FIBER: + case E1000_DEV_ID_82545GM_SERDES: + hw->mac_type = e1000_82545_rev_3; + break; + case E1000_DEV_ID_82546EB_COPPER: + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + hw->mac_type = e1000_82546; + break; + case E1000_DEV_ID_82546GB_COPPER: + case E1000_DEV_ID_82546GB_FIBER: + case E1000_DEV_ID_82546GB_SERDES: + case E1000_DEV_ID_82546GB_PCIE: + case E1000_DEV_ID_82546GB_QUAD_COPPER: + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + hw->mac_type = e1000_82546_rev_3; + break; + case E1000_DEV_ID_82541EI: + case E1000_DEV_ID_82541EI_MOBILE: + case E1000_DEV_ID_82541ER_LOM: + hw->mac_type = e1000_82541; + break; + case E1000_DEV_ID_82541ER: + case E1000_DEV_ID_82541GI: + case E1000_DEV_ID_82541GI_LF: + case E1000_DEV_ID_82541GI_MOBILE: + hw->mac_type = e1000_82541_rev_2; + break; + case E1000_DEV_ID_82547EI: + case E1000_DEV_ID_82547EI_MOBILE: + hw->mac_type = e1000_82547; + break; + case E1000_DEV_ID_82547GI: + hw->mac_type = e1000_82547_rev_2; + break; + case E1000_DEV_ID_INTEL_CE4100_GBE: + hw->mac_type = e1000_ce4100; + break; + default: + /* Should never have loaded on this device */ + return -E1000_ERR_MAC_TYPE; + } + + switch (hw->mac_type) { + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + hw->asf_firmware_present = true; + break; + default: + break; + } + + /* The 82543 chip does not count tx_carrier_errors properly in + * FD mode + */ + if (hw->mac_type == e1000_82543) + hw->bad_tx_carr_stats_fd = true; + + if (hw->mac_type > e1000_82544) + hw->has_smbus = true; + + return E1000_SUCCESS; +} + +/** + * e1000_set_media_type - Set media type and TBI compatibility. + * @hw: Struct containing variables accessed by shared code + */ +void e1000_set_media_type(struct e1000_hw *hw) +{ + u32 status; + + e_dbg("e1000_set_media_type"); + + if (hw->mac_type != e1000_82543) { + /* tbi_compatibility is only valid on 82543 */ + hw->tbi_compatibility_en = false; + } + + switch (hw->device_id) { + case E1000_DEV_ID_82545GM_SERDES: + case E1000_DEV_ID_82546GB_SERDES: + hw->media_type = e1000_media_type_internal_serdes; + break; + default: + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + hw->media_type = e1000_media_type_fiber; + break; + case e1000_ce4100: + hw->media_type = e1000_media_type_copper; + break; + default: + status = er32(STATUS); + if (status & E1000_STATUS_TBIMODE) { + hw->media_type = e1000_media_type_fiber; + /* tbi_compatibility not valid on fiber */ + hw->tbi_compatibility_en = false; + } else { + hw->media_type = e1000_media_type_copper; + } + break; + } + } +} + +/** + * e1000_reset_hw: reset the hardware completely + * @hw: Struct containing variables accessed by shared code + * + * Reset the transmit and receive units; mask and clear all interrupts. + */ +s32 e1000_reset_hw(struct e1000_hw *hw) +{ + u32 ctrl; + u32 ctrl_ext; + u32 icr; + u32 manc; + u32 led_ctrl; + s32 ret_val; + + e_dbg("e1000_reset_hw"); + + /* For 82542 (rev 2.0), disable MWI before issuing a device reset */ + if (hw->mac_type == e1000_82542_rev2_0) { + e_dbg("Disabling MWI on 82542 rev 2.0\n"); + e1000_pci_clear_mwi(hw); + } + + /* Clear interrupt mask to stop board from generating interrupts */ + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* Disable the Transmit and Receive units. Then delay to allow + * any pending transactions to complete before we hit the MAC with + * the global reset. + */ + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(); + + /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */ + hw->tbi_compatibility_on = false; + + /* Delay to allow any outstanding PCI transactions to complete before + * resetting the device + */ + msleep(10); + + ctrl = er32(CTRL); + + /* Must reset the PHY before resetting the MAC */ + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST)); + E1000_WRITE_FLUSH(); + msleep(5); + } + + /* Issue a global reset to the MAC. This will reset the chip's + * transmit, receive, DMA, and link units. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + e_dbg("Issuing a global reset to MAC\n"); + + switch (hw->mac_type) { + case e1000_82544: + case e1000_82540: + case e1000_82545: + case e1000_82546: + case e1000_82541: + case e1000_82541_rev_2: + /* These controllers can't ack the 64-bit write when issuing the + * reset, so use IO-mapping as a workaround to issue the reset */ + E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST)); + break; + case e1000_82545_rev_3: + case e1000_82546_rev_3: + /* Reset is performed on a shadow of the control register */ + ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST)); + break; + case e1000_ce4100: + default: + ew32(CTRL, (ctrl | E1000_CTRL_RST)); + break; + } + + /* After MAC reset, force reload of EEPROM to restore power-on settings to + * device. Later controllers reload the EEPROM automatically, so just wait + * for reload to complete. + */ + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* Wait for reset to complete */ + udelay(10); + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + /* Wait for EEPROM reload */ + msleep(2); + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + /* Wait for EEPROM reload */ + msleep(20); + break; + default: + /* Auto read done will delay 5ms or poll based on mac type */ + ret_val = e1000_get_auto_rd_done(hw); + if (ret_val) + return ret_val; + break; + } + + /* Disable HW ARPs on ASF enabled adapters */ + if (hw->mac_type >= e1000_82540) { + manc = er32(MANC); + manc &= ~(E1000_MANC_ARP_EN); + ew32(MANC, manc); + } + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + e1000_phy_init_script(hw); + + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + } + + /* Clear interrupt mask to stop board from generating interrupts */ + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* Clear any pending interrupt events. */ + icr = er32(ICR); + + /* If MWI was previously enabled, reenable it. */ + if (hw->mac_type == e1000_82542_rev2_0) { + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + return E1000_SUCCESS; +} + +/** + * e1000_init_hw: Performs basic configuration of the adapter. + * @hw: Struct containing variables accessed by shared code + * + * Assumes that the controller has previously been reset and is in a + * post-reset uninitialized state. Initializes the receive address registers, + * multicast table, and VLAN filter table. Calls routines to setup link + * configuration and flow control settings. Clears all on-chip counters. Leaves + * the transmit and receive units disabled and uninitialized. + */ +s32 e1000_init_hw(struct e1000_hw *hw) +{ + u32 ctrl; + u32 i; + s32 ret_val; + u32 mta_size; + u32 ctrl_ext; + + e_dbg("e1000_init_hw"); + + /* Initialize Identification LED */ + ret_val = e1000_id_led_init(hw); + if (ret_val) { + e_dbg("Error Initializing Identification LED\n"); + return ret_val; + } + + /* Set the media type and TBI compatibility */ + e1000_set_media_type(hw); + + /* Disabling VLAN filtering. */ + e_dbg("Initializing the IEEE VLAN\n"); + if (hw->mac_type < e1000_82545_rev_3) + ew32(VET, 0); + e1000_clear_vfta(hw); + + /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ + if (hw->mac_type == e1000_82542_rev2_0) { + e_dbg("Disabling MWI on 82542 rev 2.0\n"); + e1000_pci_clear_mwi(hw); + ew32(RCTL, E1000_RCTL_RST); + E1000_WRITE_FLUSH(); + msleep(5); + } + + /* Setup the receive address. This involves initializing all of the Receive + * Address Registers (RARs 0 - 15). + */ + e1000_init_rx_addrs(hw); + + /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */ + if (hw->mac_type == e1000_82542_rev2_0) { + ew32(RCTL, 0); + E1000_WRITE_FLUSH(); + msleep(1); + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + /* Zero out the Multicast HASH table */ + e_dbg("Zeroing the MTA\n"); + mta_size = E1000_MC_TBL_SIZE; + for (i = 0; i < mta_size; i++) { + E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); + /* use write flush to prevent Memory Write Block (MWB) from + * occurring when accessing our register space */ + E1000_WRITE_FLUSH(); + } + + /* Set the PCI priority bit correctly in the CTRL register. This + * determines if the adapter gives priority to receives, or if it + * gives equal priority to transmits and receives. Valid only on + * 82542 and 82543 silicon. + */ + if (hw->dma_fairness && hw->mac_type <= e1000_82543) { + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_PRIOR); + } + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */ + if (hw->bus_type == e1000_bus_type_pcix + && e1000_pcix_get_mmrbc(hw) > 2048) + e1000_pcix_set_mmrbc(hw, 2048); + break; + } + + /* Call a subroutine to configure the link and setup flow control. */ + ret_val = e1000_setup_link(hw); + + /* Set the transmit descriptor write-back policy */ + if (hw->mac_type > e1000_82544) { + ctrl = er32(TXDCTL); + ctrl = + (ctrl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB; + ew32(TXDCTL, ctrl); + } + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs(hw); + + if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER || + hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) { + ctrl_ext = er32(CTRL_EXT); + /* Relaxed ordering must be disabled to avoid a parity + * error crash in a PCI slot. */ + ctrl_ext |= E1000_CTRL_EXT_RO_DIS; + ew32(CTRL_EXT, ctrl_ext); + } + + return ret_val; +} + +/** + * e1000_adjust_serdes_amplitude - Adjust SERDES output amplitude based on EEPROM setting. + * @hw: Struct containing variables accessed by shared code. + */ +static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw) +{ + u16 eeprom_data; + s32 ret_val; + + e_dbg("e1000_adjust_serdes_amplitude"); + + if (hw->media_type != e1000_media_type_internal_serdes) + return E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + return E1000_SUCCESS; + } + + ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1, + &eeprom_data); + if (ret_val) { + return ret_val; + } + + if (eeprom_data != EEPROM_RESERVED_WORD) { + /* Adjust SERDES output amplitude only. */ + eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_link - Configures flow control and link settings. + * @hw: Struct containing variables accessed by shared code + * + * Determines which flow control settings to use. Calls the appropriate media- + * specific link configuration function. Configures the flow control settings. + * Assuming the adapter has a valid link partner, a valid link should be + * established. Assumes the hardware has previously been reset and the + * transmitter and receiver are not enabled. + */ +s32 e1000_setup_link(struct e1000_hw *hw) +{ + u32 ctrl_ext; + s32 ret_val; + u16 eeprom_data; + + e_dbg("e1000_setup_link"); + + /* Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + if (hw->fc == E1000_FC_DEFAULT) { + ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, + 1, &eeprom_data); + if (ret_val) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0) + hw->fc = E1000_FC_NONE; + else if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == + EEPROM_WORD0F_ASM_DIR) + hw->fc = E1000_FC_TX_PAUSE; + else + hw->fc = E1000_FC_FULL; + } + + /* We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + if (hw->mac_type == e1000_82542_rev2_0) + hw->fc &= (~E1000_FC_TX_PAUSE); + + if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1)) + hw->fc &= (~E1000_FC_RX_PAUSE); + + hw->original_fc = hw->fc; + + e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc); + + /* Take the 4 bits from EEPROM word 0x0F that determine the initial + * polarity value for the SW controlled pins, and setup the + * Extended Device Control reg with that info. + * This is needed because one of the SW controlled pins is used for + * signal detection. So this should be done before e1000_setup_pcs_link() + * or e1000_phy_setup() is called. + */ + if (hw->mac_type == e1000_82543) { + ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, + 1, &eeprom_data); + if (ret_val) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) << + SWDPIO__EXT_SHIFT); + ew32(CTRL_EXT, ctrl_ext); + } + + /* Call the necessary subroutine to configure the link. */ + ret_val = (hw->media_type == e1000_media_type_copper) ? + e1000_setup_copper_link(hw) : e1000_setup_fiber_serdes_link(hw); + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + e_dbg("Initializing the Flow Control address, type and timer regs\n"); + + ew32(FCT, FLOW_CONTROL_TYPE); + ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); + ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); + + ew32(FCTTV, hw->fc_pause_time); + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames in not enabled, then these + * registers will be set to 0. + */ + if (!(hw->fc & E1000_FC_TX_PAUSE)) { + ew32(FCRTL, 0); + ew32(FCRTH, 0); + } else { + /* We need to set up the Receive Threshold high and low water marks + * as well as (optionally) enabling the transmission of XON frames. + */ + if (hw->fc_send_xon) { + ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE)); + ew32(FCRTH, hw->fc_high_water); + } else { + ew32(FCRTL, hw->fc_low_water); + ew32(FCRTH, hw->fc_high_water); + } + } + return ret_val; +} + +/** + * e1000_setup_fiber_serdes_link - prepare fiber or serdes link + * @hw: Struct containing variables accessed by shared code + * + * Manipulates Physical Coding Sublayer functions in order to configure + * link. Assumes the hardware has been previously reset and the transmitter + * and receiver are not enabled. + */ +static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) +{ + u32 ctrl; + u32 status; + u32 txcw = 0; + u32 i; + u32 signal = 0; + s32 ret_val; + + e_dbg("e1000_setup_fiber_serdes_link"); + + /* On adapters with a MAC newer than 82544, SWDP 1 will be + * set when the optics detect a signal. On older adapters, it will be + * cleared when there is a signal. This applies to fiber media only. + * If we're on serdes media, adjust the output amplitude to value + * set in the EEPROM. + */ + ctrl = er32(CTRL); + if (hw->media_type == e1000_media_type_fiber) + signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; + + ret_val = e1000_adjust_serdes_amplitude(hw); + if (ret_val) + return ret_val; + + /* Take the link out of reset */ + ctrl &= ~(E1000_CTRL_LRST); + + /* Adjust VCO speed to improve BER performance */ + ret_val = e1000_set_vco_speed(hw); + if (ret_val) + return ret_val; + + e1000_config_collision_dist(hw); + + /* Check for a software override of the flow control settings, and setup + * the device accordingly. If auto-negotiation is enabled, then software + * will have to set the "PAUSE" bits to the correct value in the Tranmsit + * Config Word Register (TXCW) and re-start auto-negotiation. However, if + * auto-negotiation is disabled, then software will have to manually + * configure the two flow control enable bits in the CTRL register. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, but + * not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we do + * not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + */ + switch (hw->fc) { + case E1000_FC_NONE: + /* Flow control is completely disabled by a software over-ride. */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); + break; + case E1000_FC_RX_PAUSE: + /* RX Flow control is enabled and TX Flow control is disabled by a + * software over-ride. Since there really isn't a way to advertise + * that we are capable of RX Pause ONLY, we will advertise that we + * support both symmetric and asymmetric RX PAUSE. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + case E1000_FC_TX_PAUSE: + /* TX Flow control is enabled, and RX Flow control is disabled, by a + * software over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); + break; + case E1000_FC_FULL: + /* Flow control (both RX and TX) is enabled by a software over-ride. */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + break; + } + + /* Since auto-negotiation is enabled, take the link out of reset (the link + * will be in reset, because we previously reset the chip). This will + * restart auto-negotiation. If auto-negotiation is successful then the + * link-up status bit will be set and the flow control enable bits (RFCE + * and TFCE) will be set according to their negotiated value. + */ + e_dbg("Auto-negotiation enabled\n"); + + ew32(TXCW, txcw); + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + hw->txcw = txcw; + msleep(1); + + /* If we have a signal (the cable is plugged in) then poll for a "Link-Up" + * indication in the Device Status Register. Time-out if a link isn't + * seen in 500 milliseconds seconds (Auto-negotiation should complete in + * less than 500 milliseconds even if the other end is doing it in SW). + * For internal serdes, we just assume a signal is present, then poll. + */ + if (hw->media_type == e1000_media_type_internal_serdes || + (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) { + e_dbg("Looking for Link\n"); + for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) { + msleep(10); + status = er32(STATUS); + if (status & E1000_STATUS_LU) + break; + } + if (i == (LINK_UP_TIMEOUT / 10)) { + e_dbg("Never got a valid link from auto-neg!!!\n"); + hw->autoneg_failed = 1; + /* AutoNeg failed to achieve a link, so we'll call + * e1000_check_for_link. This routine will force the link up if + * we detect a signal. This will allow us to communicate with + * non-autonegotiating link partners. + */ + ret_val = e1000_check_for_link(hw); + if (ret_val) { + e_dbg("Error while checking for link\n"); + return ret_val; + } + hw->autoneg_failed = 0; + } else { + hw->autoneg_failed = 0; + e_dbg("Valid Link Found\n"); + } + } else { + e_dbg("No Signal Detected\n"); + } + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_rtl_setup - Copper link setup for e1000_phy_rtl series. + * @hw: Struct containing variables accessed by shared code + * + * Commits changes to PHY configuration by calling e1000_phy_reset(). + */ +static s32 e1000_copper_link_rtl_setup(struct e1000_hw *hw) +{ + s32 ret_val; + + /* SW reset the PHY so all changes take effect */ + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + return E1000_SUCCESS; +} + +static s32 gbe_dhg_phy_setup(struct e1000_hw *hw) +{ + s32 ret_val; + u32 ctrl_aux; + + switch (hw->phy_type) { + case e1000_phy_8211: + ret_val = e1000_copper_link_rtl_setup(hw); + if (ret_val) { + e_dbg("e1000_copper_link_rtl_setup failed!\n"); + return ret_val; + } + break; + case e1000_phy_8201: + /* Set RMII mode */ + ctrl_aux = er32(CTL_AUX); + ctrl_aux |= E1000_CTL_AUX_RMII; + ew32(CTL_AUX, ctrl_aux); + E1000_WRITE_FLUSH(); + + /* Disable the J/K bits required for receive */ + ctrl_aux = er32(CTL_AUX); + ctrl_aux |= 0x4; + ctrl_aux &= ~0x2; + ew32(CTL_AUX, ctrl_aux); + E1000_WRITE_FLUSH(); + ret_val = e1000_copper_link_rtl_setup(hw); + + if (ret_val) { + e_dbg("e1000_copper_link_rtl_setup failed!\n"); + return ret_val; + } + break; + default: + e_dbg("Error Resetting the PHY\n"); + return E1000_ERR_PHY_TYPE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_preconfig - early configuration for copper + * @hw: Struct containing variables accessed by shared code + * + * Make sure we have a valid PHY and change PHY mode before link setup. + */ +static s32 e1000_copper_link_preconfig(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 phy_data; + + e_dbg("e1000_copper_link_preconfig"); + + ctrl = er32(CTRL); + /* With 82543, we need to force speed and duplex on the MAC equal to what + * the PHY speed and duplex configuration is. In addition, we need to + * perform a hardware reset on the PHY to take it out of reset. + */ + if (hw->mac_type > e1000_82543) { + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + } else { + ctrl |= + (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU); + ew32(CTRL, ctrl); + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) + return ret_val; + } + + /* Make sure we have a valid PHY */ + ret_val = e1000_detect_gig_phy(hw); + if (ret_val) { + e_dbg("Error, did not detect valid phy.\n"); + return ret_val; + } + e_dbg("Phy ID = %x\n", hw->phy_id); + + /* Set PHY to class A mode (if necessary) */ + ret_val = e1000_set_phy_mode(hw); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82545_rev_3) || + (hw->mac_type == e1000_82546_rev_3)) { + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + phy_data |= 0x00000008; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + } + + if (hw->mac_type <= e1000_82543 || + hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 || + hw->mac_type == e1000_82541_rev_2 + || hw->mac_type == e1000_82547_rev_2) + hw->phy_reset_disable = false; + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_igp_setup - Copper link setup for e1000_phy_igp series. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw) +{ + u32 led_ctrl; + s32 ret_val; + u16 phy_data; + + e_dbg("e1000_copper_link_igp_setup"); + + if (hw->phy_reset_disable) + return E1000_SUCCESS; + + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + /* Wait 15ms for MAC to configure PHY from eeprom settings */ + msleep(15); + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + + /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */ + if (hw->phy_type == e1000_phy_igp) { + /* disable lplu d3 during driver init */ + ret_val = e1000_set_d3_lplu_state(hw, false); + if (ret_val) { + e_dbg("Error Disabling LPLU D3\n"); + return ret_val; + } + } + + /* Configure mdi-mdix settings */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + hw->dsp_config_state = e1000_dsp_config_disabled; + /* Force MDI for earlier revs of the IGP PHY */ + phy_data &= + ~(IGP01E1000_PSCR_AUTO_MDIX | + IGP01E1000_PSCR_FORCE_MDI_MDIX); + hw->mdix = 1; + + } else { + hw->dsp_config_state = e1000_dsp_config_enabled; + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (hw->mdix) { + case 1: + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + phy_data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + } + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* set auto-master slave resolution settings */ + if (hw->autoneg) { + e1000_ms_type phy_ms_setting = hw->master_slave; + + if (hw->ffe_config_state == e1000_ffe_config_active) + hw->ffe_config_state = e1000_ffe_config_enabled; + + if (hw->dsp_config_state == e1000_dsp_config_activated) + hw->dsp_config_state = e1000_dsp_config_enabled; + + /* when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. */ + if (hw->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + /* Set auto Master/Slave resolution process */ + ret_val = + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~CR_1000T_MS_ENABLE; + ret_val = + e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ? + ((phy_data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : e1000_ms_auto; + + switch (phy_ms_setting) { + case e1000_ms_force_master: + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + phy_data |= CR_1000T_MS_ENABLE; + phy_data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + phy_data &= ~CR_1000T_MS_ENABLE; + default: + break; + } + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_mgp_setup - Copper link setup for e1000_phy_m88 series. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + e_dbg("e1000_copper_link_mgp_setup"); + + if (hw->phy_reset_disable) + return E1000_SUCCESS; + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (hw->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (hw->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if (hw->phy_revision < M88E1011_I_REV_4) { + /* Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((hw->phy_revision == E1000_REVISION_2) && + (hw->phy_id == M88E1111_I_PHY_ID)) { + /* Vidalia Phy, set the downshift counter to 5x */ + phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK); + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + ret_val = e1000_write_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + ret_val = e1000_write_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } + } + + /* SW Reset the PHY so all changes take effect */ + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_autoneg - setup auto-neg + * @hw: Struct containing variables accessed by shared code + * + * Setup auto-negotiation and flow control advertisements, + * and then perform auto-negotiation. + */ +static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + e_dbg("e1000_copper_link_autoneg"); + + /* Perform some bounds checking on the hw->autoneg_advertised + * parameter. If this variable is zero, then set it to the default. + */ + hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (hw->autoneg_advertised == 0) + hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; + + /* IFE/RTL8201N PHY only supports 10/100 */ + if (hw->phy_type == e1000_phy_8201) + hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL; + + e_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = e1000_phy_setup_autoneg(hw); + if (ret_val) { + e_dbg("Error Setting up Auto-Negotiation\n"); + return ret_val; + } + e_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (hw->wait_autoneg_complete) { + ret_val = e1000_wait_autoneg(hw); + if (ret_val) { + e_dbg + ("Error while waiting for autoneg to complete\n"); + return ret_val; + } + } + + hw->get_link_status = true; + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_postconfig - post link setup + * @hw: Struct containing variables accessed by shared code + * + * Config the MAC and the PHY after link is up. + * 1) Set up the MAC to the current PHY speed/duplex + * if we are on 82543. If we + * are on newer silicon, we only need to configure + * collision distance in the Transmit Control Register. + * 2) Set up flow control on the MAC to that established with + * the link partner. + * 3) Config DSP to improve Gigabit link quality for some PHY revisions. + */ +static s32 e1000_copper_link_postconfig(struct e1000_hw *hw) +{ + s32 ret_val; + e_dbg("e1000_copper_link_postconfig"); + + if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) { + e1000_config_collision_dist(hw); + } else { + ret_val = e1000_config_mac_to_phy(hw); + if (ret_val) { + e_dbg("Error configuring MAC to PHY settings\n"); + return ret_val; + } + } + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error Configuring Flow Control\n"); + return ret_val; + } + + /* Config DSP to improve Giga link quality */ + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_config_dsp_after_link_change(hw, true); + if (ret_val) { + e_dbg("Error Configuring DSP after link up\n"); + return ret_val; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_copper_link - phy/speed/duplex setting + * @hw: Struct containing variables accessed by shared code + * + * Detects which PHY is present and sets up the speed and duplex + */ +static s32 e1000_setup_copper_link(struct e1000_hw *hw) +{ + s32 ret_val; + u16 i; + u16 phy_data; + + e_dbg("e1000_setup_copper_link"); + + /* Check if it is a valid PHY and set PHY mode if necessary. */ + ret_val = e1000_copper_link_preconfig(hw); + if (ret_val) + return ret_val; + + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_copper_link_igp_setup(hw); + if (ret_val) + return ret_val; + } else if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_copper_link_mgp_setup(hw); + if (ret_val) + return ret_val; + } else { + ret_val = gbe_dhg_phy_setup(hw); + if (ret_val) { + e_dbg("gbe_dhg_phy_setup failed!\n"); + return ret_val; + } + } + + if (hw->autoneg) { + /* Setup autoneg and flow control advertisement + * and perform autonegotiation */ + ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) + return ret_val; + } else { + /* PHY will be set to 10H, 10F, 100H,or 100F + * depending on value from forced_speed_duplex. */ + e_dbg("Forcing speed and duplex\n"); + ret_val = e1000_phy_force_speed_duplex(hw); + if (ret_val) { + e_dbg("Error Forcing Speed and Duplex\n"); + return ret_val; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + for (i = 0; i < 10; i++) { + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & MII_SR_LINK_STATUS) { + /* Config the MAC and PHY after link is up */ + ret_val = e1000_copper_link_postconfig(hw); + if (ret_val) + return ret_val; + + e_dbg("Valid link established!!!\n"); + return E1000_SUCCESS; + } + udelay(10); + } + + e_dbg("Unable to establish link!!!\n"); + return E1000_SUCCESS; +} + +/** + * e1000_phy_setup_autoneg - phy settings + * @hw: Struct containing variables accessed by shared code + * + * Configures PHY autoneg and flow control advertisement settings + */ +s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg; + + e_dbg("e1000_phy_setup_autoneg"); + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + else if (hw->phy_type == e1000_phy_8201) + mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~REG4_SPEED_MASK; + mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; + + e_dbg("autoneg_advertised %x\n", hw->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_10_HALF) { + e_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_10_FULL) { + e_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_100_HALF) { + e_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_100_FULL) { + e_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (hw->autoneg_advertised & ADVERTISE_1000_HALF) { + e_dbg + ("Advertise 1000mb Half duplex requested, request denied!\n"); + } + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_1000_FULL) { + e_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc) { + case E1000_FC_NONE: /* 0 */ + /* Flow control (RX & TX) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case E1000_FC_RX_PAUSE: /* 1 */ + /* RX Flow control is enabled, and TX Flow control is + * disabled, by a software over-ride. + */ + /* Since there really isn't a way to advertise that we are + * capable of RX Pause ONLY, we will advertise that we + * support both symmetric and asymmetric RX PAUSE. Later + * (in e1000_config_fc_after_link_up) we will disable the + *hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case E1000_FC_TX_PAUSE: /* 2 */ + /* TX Flow control is enabled, and RX Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case E1000_FC_FULL: /* 3 */ + /* Flow control (both RX and TX) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (hw->phy_type == e1000_phy_8201) { + mii_1000t_ctrl_reg = 0; + } else { + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_force_speed_duplex - force link settings + * @hw: Struct containing variables accessed by shared code + * + * Force PHY speed and duplex settings to hw->forced_speed_duplex + */ +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 mii_ctrl_reg; + u16 mii_status_reg; + u16 phy_data; + u16 i; + + e_dbg("e1000_phy_force_speed_duplex"); + + /* Turn off Flow control if we are forcing speed and duplex. */ + hw->fc = E1000_FC_NONE; + + e_dbg("hw->fc = %d\n", hw->fc); + + /* Read the Device Control Register. */ + ctrl = er32(CTRL); + + /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */ + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~(DEVICE_SPEED_MASK); + + /* Clear the Auto Speed Detect Enable bit. */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Read the MII Control Register. */ + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg); + if (ret_val) + return ret_val; + + /* We need to disable autoneg in order to force link and duplex. */ + + mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN; + + /* Are we forcing Full or Half Duplex? */ + if (hw->forced_speed_duplex == e1000_100_full || + hw->forced_speed_duplex == e1000_10_full) { + /* We want to force full duplex so we SET the full duplex bits in the + * Device and MII Control Registers. + */ + ctrl |= E1000_CTRL_FD; + mii_ctrl_reg |= MII_CR_FULL_DUPLEX; + e_dbg("Full Duplex\n"); + } else { + /* We want to force half duplex so we CLEAR the full duplex bits in + * the Device and MII Control Registers. + */ + ctrl &= ~E1000_CTRL_FD; + mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX; + e_dbg("Half Duplex\n"); + } + + /* Are we forcing 100Mbps??? */ + if (hw->forced_speed_duplex == e1000_100_full || + hw->forced_speed_duplex == e1000_100_half) { + /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */ + ctrl |= E1000_CTRL_SPD_100; + mii_ctrl_reg |= MII_CR_SPEED_100; + mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); + e_dbg("Forcing 100mb "); + } else { + /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */ + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + mii_ctrl_reg |= MII_CR_SPEED_10; + mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + e_dbg("Forcing 10mb "); + } + + e1000_config_collision_dist(hw); + + /* Write the configured values back to the Device Control Reg. */ + ew32(CTRL, ctrl); + + if (hw->phy_type == e1000_phy_m88) { + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI + * forced whenever speed are duplex are forced. + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + e_dbg("M88E1000 PSCR: %x\n", phy_data); + + /* Need to reset the PHY or these changes will be ignored */ + mii_ctrl_reg |= MII_CR_RESET; + + /* Disable MDI-X support for 10/100 */ + } else { + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed or duplex are forced. + */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + /* Write back the modified PHY MII control register. */ + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg); + if (ret_val) + return ret_val; + + udelay(1); + + /* The wait_autoneg_complete flag may be a little misleading here. + * Since we are forcing speed and duplex, Auto-Neg is not enabled. + * But we do want to delay for a period while forcing only so we + * don't generate false No Link messages. So we will wait here + * only if the user has set wait_autoneg_complete to 1, which is + * the default. + */ + if (hw->wait_autoneg_complete) { + /* We will wait for autoneg to complete. */ + e_dbg("Waiting for forced speed/duplex link.\n"); + mii_status_reg = 0; + + /* We will wait for autoneg to complete or 4.5 seconds to expire. */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Auto-Neg Complete bit + * to be set. + */ + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + } + if ((i == 0) && (hw->phy_type == e1000_phy_m88)) { + /* We didn't get link. Reset the DSP and wait again for link. */ + ret_val = e1000_phy_reset_dsp(hw); + if (ret_val) { + e_dbg("Error Resetting PHY DSP\n"); + return ret_val; + } + } + /* This loop will early-out if the link condition has been met. */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + /* Read the MII Status Register and wait for Auto-Neg Complete bit + * to be set. + */ + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + } + } + + if (hw->phy_type == e1000_phy_m88) { + /* Because we reset the PHY above, we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock. This value + * defaults back to a 2.5MHz clock when the PHY is reset. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = + e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + + /* In addition, because of the s/w reset above, we need to enable CRS on + * TX. This must be set for both full and half duplex operation. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) + && (!hw->autoneg) + && (hw->forced_speed_duplex == e1000_10_full + || hw->forced_speed_duplex == e1000_10_half)) { + ret_val = e1000_polarity_reversal_workaround(hw); + if (ret_val) + return ret_val; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_config_collision_dist - set collision distance register + * @hw: Struct containing variables accessed by shared code + * + * Sets the collision distance in the Transmit Control register. + * Link should have been established previously. Reads the speed and duplex + * information from the Device Status register. + */ +void e1000_config_collision_dist(struct e1000_hw *hw) +{ + u32 tctl, coll_dist; + + e_dbg("e1000_config_collision_dist"); + + if (hw->mac_type < e1000_82543) + coll_dist = E1000_COLLISION_DISTANCE_82542; + else + coll_dist = E1000_COLLISION_DISTANCE; + + tctl = er32(TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= coll_dist << E1000_COLD_SHIFT; + + ew32(TCTL, tctl); + E1000_WRITE_FLUSH(); +} + +/** + * e1000_config_mac_to_phy - sync phy and mac settings + * @hw: Struct containing variables accessed by shared code + * @mii_reg: data to write to the MII control register + * + * Sets MAC speed and duplex settings to reflect the those in the PHY + * The contents of the PHY register containing the needed information need to + * be passed in. + */ +static s32 e1000_config_mac_to_phy(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 phy_data; + + e_dbg("e1000_config_mac_to_phy"); + + /* 82544 or newer MAC, Auto Speed Detection takes care of + * MAC speed/duplex configuration.*/ + if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) + return E1000_SUCCESS; + + /* Read the Device Control Register and set the bits to Force Speed + * and Duplex. + */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); + + switch (hw->phy_type) { + case e1000_phy_8201: + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & RTL_PHY_CTRL_FD) + ctrl |= E1000_CTRL_FD; + else + ctrl &= ~E1000_CTRL_FD; + + if (phy_data & RTL_PHY_CTRL_SPD_100) + ctrl |= E1000_CTRL_SPD_100; + else + ctrl |= E1000_CTRL_SPD_10; + + e1000_config_collision_dist(hw); + break; + default: + /* Set up duplex in the Device Control and Transmit Control + * registers depending on negotiated values. + */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & M88E1000_PSSR_DPLX) + ctrl |= E1000_CTRL_FD; + else + ctrl &= ~E1000_CTRL_FD; + + e1000_config_collision_dist(hw); + + /* Set up speed in the Device Control register depending on + * negotiated values. + */ + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) + ctrl |= E1000_CTRL_SPD_1000; + else if ((phy_data & M88E1000_PSSR_SPEED) == + M88E1000_PSSR_100MBS) + ctrl |= E1000_CTRL_SPD_100; + } + + /* Write the configured values back to the Device Control Reg. */ + ew32(CTRL, ctrl); + return E1000_SUCCESS; +} + +/** + * e1000_force_mac_fc - force flow control settings + * @hw: Struct containing variables accessed by shared code + * + * Forces the MAC's flow control settings. + * Sets the TFCE and RFCE bits in the device control register to reflect + * the adapter settings. TFCE and RFCE need to be explicitly set by + * software when a Copper PHY is used because autonegotiation is managed + * by the PHY rather than the MAC. Software must also configure these + * bits when link is forced on a fiber connection. + */ +s32 e1000_force_mac_fc(struct e1000_hw *hw) +{ + u32 ctrl; + + e_dbg("e1000_force_mac_fc"); + + /* Get the current configuration of the Device Control Register */ + ctrl = er32(CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + + switch (hw->fc) { + case E1000_FC_NONE: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case E1000_FC_RX_PAUSE: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case E1000_FC_TX_PAUSE: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case E1000_FC_FULL: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + /* Disable TX Flow Control for 82542 (rev 2.0) */ + if (hw->mac_type == e1000_82542_rev2_0) + ctrl &= (~E1000_CTRL_TFCE); + + ew32(CTRL, ctrl); + return E1000_SUCCESS; +} + +/** + * e1000_config_fc_after_link_up - configure flow control after autoneg + * @hw: Struct containing variables accessed by shared code + * + * Configures flow control settings after link is established + * Should be called immediately after a valid link has been established. + * Forces MAC flow control settings if link was forced. When in MII/GMII mode + * and autonegotiation is enabled, the MAC flow control settings will be set + * based on the flow control negotiated by the PHY. In TBI mode, the TFCE + * and RFCE bits will be automatically set to the negotiated flow control mode. + */ +static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_status_reg; + u16 mii_nway_adv_reg; + u16 mii_nway_lp_ability_reg; + u16 speed; + u16 duplex; + + e_dbg("e1000_config_fc_after_link_up"); + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed)) + || ((hw->media_type == e1000_media_type_internal_serdes) + && (hw->autoneg_failed)) + || ((hw->media_type == e1000_media_type_copper) + && (!hw->autoneg))) { + ret_val = e1000_force_mac_fc(hw); + if (ret_val) { + e_dbg("Error forcing flow control settings\n"); + return ret_val; + } + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) { + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement Register + * (Address 4) and the Auto_Negotiation Base Page Ability + * Register (Address 5) to determine how flow control was + * negotiated. + */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + return ret_val; + + /* Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | E1000_FC_NONE + * 0 | 1 | 0 | DC | E1000_FC_NONE + * 0 | 1 | 1 | 0 | E1000_FC_NONE + * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE + * 1 | 0 | 0 | DC | E1000_FC_NONE + * 1 | DC | 1 | DC | E1000_FC_FULL + * 1 | 1 | 0 | 0 | E1000_FC_NONE + * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE + * + */ + /* Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | E1000_FC_FULL + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected RX ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->original_fc == E1000_FC_FULL) { + hw->fc = E1000_FC_FULL; + e_dbg("Flow Control = FULL.\n"); + } else { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE + * + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) + { + hw->fc = E1000_FC_TX_PAUSE; + e_dbg + ("Flow Control = TX PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE + * + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) + { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + /* Per the IEEE spec, at this point flow control should be + * disabled. However, we want to consider that we could + * be connected to a legacy switch that doesn't advertise + * desired flow control, but can be forced on the link + * partner. So if we advertised no flow control, that is + * what we will resolve to. If we advertised some kind of + * receive capability (Rx Pause Only or Full Flow Control) + * and the link partner advertised none, we will configure + * ourselves to enable Rx Flow Control only. We can do + * this safely for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, no + * harm done since we won't be receiving any PAUSE frames + * anyway. If the intent on the link partner was to have + * flow control enabled, then by us enabling RX only, we + * can at least receive pause frames and process them. + * This is a good idea because in most cases, since we are + * predominantly a server NIC, more times than not we will + * be asked to delay transmission of packets than asking + * our link partner to pause transmission of frames. + */ + else if ((hw->original_fc == E1000_FC_NONE || + hw->original_fc == E1000_FC_TX_PAUSE) || + hw->fc_strict_ieee) { + hw->fc = E1000_FC_NONE; + e_dbg("Flow Control = NONE.\n"); + } else { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = + e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + e_dbg + ("Error getting link speed and duplex\n"); + return ret_val; + } + + if (duplex == HALF_DUPLEX) + hw->fc = E1000_FC_NONE; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = e1000_force_mac_fc(hw); + if (ret_val) { + e_dbg + ("Error forcing flow control settings\n"); + return ret_val; + } + } else { + e_dbg + ("Copper PHY and Auto Neg has not completed.\n"); + } + } + return E1000_SUCCESS; +} + +/** + * e1000_check_for_serdes_link_generic - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + */ +static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) +{ + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val = E1000_SUCCESS; + + e_dbg("e1000_check_for_serdes_link_generic"); + + ctrl = er32(CTRL); + status = er32(STATUS); + rxcw = er32(RXCW); + + /* + * If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), and our link partner is not trying to + * auto-negotiate with us (we are receiving idles or data), + * we need to force link up. We also need to give auto-negotiation + * time to complete. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) { + if (hw->autoneg_failed == 0) { + hw->autoneg_failed = 1; + goto out; + } + e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + goto out; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* + * If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); + ew32(TXCW, hw->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + + hw->serdes_has_link = true; + } else if (!(E1000_TXCW_ANE & er32(TXCW))) { + /* + * If we force link for non-auto-negotiation switch, check + * link status based on MAC synchronization for internal + * serdes media type. + */ + /* SYNCH bit and IV bit are sticky. */ + udelay(10); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + hw->serdes_has_link = true; + e_dbg("SERDES: Link up - forced.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - force failed.\n"); + } + } + + if (E1000_TXCW_ANE & er32(TXCW)) { + status = er32(STATUS); + if (status & E1000_STATUS_LU) { + /* SYNCH bit and IV bit are sticky, so reread rxcw. */ + udelay(10); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + hw->serdes_has_link = true; + e_dbg("SERDES: Link up - autoneg " + "completed successfully.\n"); + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - invalid" + "codewords detected in autoneg.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - no sync.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - autoneg failed\n"); + } + } + + out: + return ret_val; +} + +/** + * e1000_check_for_link + * @hw: Struct containing variables accessed by shared code + * + * Checks to see if the link status of the hardware has changed. + * Called by any function that needs to check the link status of the adapter. + */ +s32 e1000_check_for_link(struct e1000_hw *hw) +{ + u32 rxcw = 0; + u32 ctrl; + u32 status; + u32 rctl; + u32 icr; + u32 signal = 0; + s32 ret_val; + u16 phy_data; + + e_dbg("e1000_check_for_link"); + + ctrl = er32(CTRL); + status = er32(STATUS); + + /* On adapters with a MAC newer than 82544, SW Definable pin 1 will be + * set when the optics detect a signal. On older adapters, it will be + * cleared when there is a signal. This applies to fiber media only. + */ + if ((hw->media_type == e1000_media_type_fiber) || + (hw->media_type == e1000_media_type_internal_serdes)) { + rxcw = er32(RXCW); + + if (hw->media_type == e1000_media_type_fiber) { + signal = + (hw->mac_type > + e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; + if (status & E1000_STATUS_LU) + hw->get_link_status = false; + } + } + + /* If we have a copper PHY then we only want to go out to the PHY + * registers to see if Auto-Neg has completed and/or if our link + * status has changed. The get_link_status flag will be set if we + * receive a Link Status Change interrupt or we have Rx Sequence + * Errors. + */ + if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) { + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + * Read the register twice since the link bit is sticky. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & MII_SR_LINK_STATUS) { + hw->get_link_status = false; + /* Check if there was DownShift, must be checked immediately after + * link-up */ + e1000_check_downshift(hw); + + /* If we are on 82544 or 82543 silicon and speed/duplex + * are forced to 10H or 10F, then we will implement the polarity + * reversal workaround. We disable interrupts first, and upon + * returning, place the devices interrupt state to its previous + * value except for the link status change interrupt which will + * happen due to the execution of this workaround. + */ + + if ((hw->mac_type == e1000_82544 + || hw->mac_type == e1000_82543) && (!hw->autoneg) + && (hw->forced_speed_duplex == e1000_10_full + || hw->forced_speed_duplex == e1000_10_half)) { + ew32(IMC, 0xffffffff); + ret_val = + e1000_polarity_reversal_workaround(hw); + icr = er32(ICR); + ew32(ICS, (icr & ~E1000_ICS_LSC)); + ew32(IMS, IMS_ENABLE_MASK); + } + + } else { + /* No link detected */ + e1000_config_dsp_after_link_change(hw, false); + return 0; + } + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!hw->autoneg) + return -E1000_ERR_CONFIG; + + /* optimize the dsp settings for the igp phy */ + e1000_config_dsp_after_link_change(hw, true); + + /* We have a M88E1000 PHY and Auto-Neg is enabled. If we + * have Si on board that is 82544 or newer, Auto + * Speed Detection takes care of MAC speed/duplex + * configuration. So we only need to configure Collision + * Distance in the MAC. Otherwise, we need to force + * speed/duplex on the MAC to the current PHY speed/duplex + * settings. + */ + if ((hw->mac_type >= e1000_82544) && + (hw->mac_type != e1000_ce4100)) + e1000_config_collision_dist(hw); + else { + ret_val = e1000_config_mac_to_phy(hw); + if (ret_val) { + e_dbg + ("Error configuring MAC to PHY settings\n"); + return ret_val; + } + } + + /* Configure Flow Control now that Auto-Neg has completed. First, we + * need to restore the desired flow control settings because we may + * have had to re-autoneg with a different link partner. + */ + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + return ret_val; + } + + /* At this point we know that we are on copper and we have + * auto-negotiated link. These are conditions for checking the link + * partner capability register. We use the link speed to determine if + * TBI compatibility needs to be turned on or off. If the link is not + * at gigabit speed, then TBI compatibility is not needed. If we are + * at gigabit speed, we turn on TBI compatibility. + */ + if (hw->tbi_compatibility_en) { + u16 speed, duplex; + ret_val = + e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + e_dbg + ("Error getting link speed and duplex\n"); + return ret_val; + } + if (speed != SPEED_1000) { + /* If link speed is not set to gigabit speed, we do not need + * to enable TBI compatibility. + */ + if (hw->tbi_compatibility_on) { + /* If we previously were in the mode, turn it off. */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_SBP; + ew32(RCTL, rctl); + hw->tbi_compatibility_on = false; + } + } else { + /* If TBI compatibility is was previously off, turn it on. For + * compatibility with a TBI link partner, we will store bad + * packets. Some frames have an additional byte on the end and + * will look like CRC errors to to the hardware. + */ + if (!hw->tbi_compatibility_on) { + hw->tbi_compatibility_on = true; + rctl = er32(RCTL); + rctl |= E1000_RCTL_SBP; + ew32(RCTL, rctl); + } + } + } + } + + if ((hw->media_type == e1000_media_type_fiber) || + (hw->media_type == e1000_media_type_internal_serdes)) + e1000_check_for_serdes_link_generic(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_get_speed_and_duplex + * @hw: Struct containing variables accessed by shared code + * @speed: Speed of the connection + * @duplex: Duplex setting of the connection + + * Detects the current speed and duplex settings of the hardware. + */ +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) +{ + u32 status; + s32 ret_val; + u16 phy_data; + + e_dbg("e1000_get_speed_and_duplex"); + + if (hw->mac_type >= e1000_82543) { + status = er32(STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + e_dbg("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + e_dbg("100 Mbs, "); + } else { + *speed = SPEED_10; + e_dbg("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + e_dbg("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + e_dbg(" Half Duplex\n"); + } + } else { + e_dbg("1000 Mbs, Full Duplex\n"); + *speed = SPEED_1000; + *duplex = FULL_DUPLEX; + } + + /* IGP01 PHY may advertise full duplex operation after speed downgrade even + * if it is operating at half duplex. Here we set the duplex settings to + * match the duplex in the link partner's capabilities. + */ + if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) { + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data); + if (ret_val) + return ret_val; + + if (!(phy_data & NWAY_ER_LP_NWAY_CAPS)) + *duplex = HALF_DUPLEX; + else { + ret_val = + e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data); + if (ret_val) + return ret_val; + if ((*speed == SPEED_100 + && !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) + || (*speed == SPEED_10 + && !(phy_data & NWAY_LPAR_10T_FD_CAPS))) + *duplex = HALF_DUPLEX; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_wait_autoneg + * @hw: Struct containing variables accessed by shared code + * + * Blocks until autoneg completes or times out (~4.5 seconds) + */ +static s32 e1000_wait_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 i; + u16 phy_data; + + e_dbg("e1000_wait_autoneg"); + e_dbg("Waiting for Auto-Neg to complete.\n"); + + /* We will wait for autoneg to complete or 4.5 seconds to expire. */ + for (i = PHY_AUTO_NEG_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + if (phy_data & MII_SR_AUTONEG_COMPLETE) { + return E1000_SUCCESS; + } + msleep(100); + } + return E1000_SUCCESS; +} + +/** + * e1000_raise_mdi_clk - Raises the Management Data Clock + * @hw: Struct containing variables accessed by shared code + * @ctrl: Device control register's current value + */ +static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl) +{ + /* Raise the clock input to the Management Data Clock (by setting the MDC + * bit), and then delay 10 microseconds. + */ + ew32(CTRL, (*ctrl | E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(); + udelay(10); +} + +/** + * e1000_lower_mdi_clk - Lowers the Management Data Clock + * @hw: Struct containing variables accessed by shared code + * @ctrl: Device control register's current value + */ +static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl) +{ + /* Lower the clock input to the Management Data Clock (by clearing the MDC + * bit), and then delay 10 microseconds. + */ + ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(); + udelay(10); +} + +/** + * e1000_shift_out_mdi_bits - Shifts data bits out to the PHY + * @hw: Struct containing variables accessed by shared code + * @data: Data to send out to the PHY + * @count: Number of bits to shift out + * + * Bits are shifted out in MSB to LSB order. + */ +static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count) +{ + u32 ctrl; + u32 mask; + + /* We need to shift "count" number of bits out to the PHY. So, the value + * in the "data" parameter will be shifted out to the PHY one bit at a + * time. In order to do this, "data" must be broken down into bits. + */ + mask = 0x01; + mask <<= (count - 1); + + ctrl = er32(CTRL); + + /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */ + ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR); + + while (mask) { + /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and + * then raising and lowering the Management Data Clock. A "0" is + * shifted out to the PHY by setting the MDIO bit to "0" and then + * raising and lowering the clock. + */ + if (data & mask) + ctrl |= E1000_CTRL_MDIO; + else + ctrl &= ~E1000_CTRL_MDIO; + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + udelay(10); + + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + mask = mask >> 1; + } +} + +/** + * e1000_shift_in_mdi_bits - Shifts data bits in from the PHY + * @hw: Struct containing variables accessed by shared code + * + * Bits are shifted in in MSB to LSB order. + */ +static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw) +{ + u32 ctrl; + u16 data = 0; + u8 i; + + /* In order to read a register from the PHY, we need to shift in a total + * of 18 bits from the PHY. The first two bit (turnaround) times are used + * to avoid contention on the MDIO pin when a read operation is performed. + * These two bits are ignored by us and thrown away. Bits are "shifted in" + * by raising the input to the Management Data Clock (setting the MDC bit), + * and then reading the value of the MDIO bit. + */ + ctrl = er32(CTRL); + + /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */ + ctrl &= ~E1000_CTRL_MDIO_DIR; + ctrl &= ~E1000_CTRL_MDIO; + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + /* Raise and Lower the clock before reading in the data. This accounts for + * the turnaround bits. The first clock occurred when we clocked out the + * last bit of the Register Address. + */ + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + for (data = 0, i = 0; i < 16; i++) { + data = data << 1; + e1000_raise_mdi_clk(hw, &ctrl); + ctrl = er32(CTRL); + /* Check to see if we shifted in a "1". */ + if (ctrl & E1000_CTRL_MDIO) + data |= 1; + e1000_lower_mdi_clk(hw, &ctrl); + } + + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + return data; +} + + +/** + * e1000_read_phy_reg - read a phy register + * @hw: Struct containing variables accessed by shared code + * @reg_addr: address of the PHY register to read + * + * Reads the value from a PHY register, if the value is on a specific non zero + * page, sets the page first. + */ +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data) +{ + u32 ret_val; + + e_dbg("e1000_read_phy_reg"); + + if ((hw->phy_type == e1000_phy_igp) && + (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { + ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, + (u16) reg_addr); + if (ret_val) + return ret_val; + } + + ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, + phy_data); + + return ret_val; +} + +static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 *phy_data) +{ + u32 i; + u32 mdic = 0; + const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; + + e_dbg("e1000_read_phy_reg_ex"); + + if (reg_addr > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", reg_addr); + return -E1000_ERR_PARAM; + } + + if (hw->mac_type > e1000_82543) { + /* Set up Op-code, Phy Address, and register address in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + if (hw->mac_type == e1000_ce4100) { + mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (INTEL_CE_GBE_MDIC_OP_READ) | + (INTEL_CE_GBE_MDIC_GO)); + + writel(mdic, E1000_MDIO_CMD); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 64; i++) { + udelay(50); + mdic = readl(E1000_MDIO_CMD); + if (!(mdic & INTEL_CE_GBE_MDIC_GO)) + break; + } + + if (mdic & INTEL_CE_GBE_MDIC_GO) { + e_dbg("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + + mdic = readl(E1000_MDIO_STS); + if (mdic & INTEL_CE_GBE_MDIC_READ_ERROR) { + e_dbg("MDI Read Error\n"); + return -E1000_ERR_PHY; + } + *phy_data = (u16) mdic; + } else { + mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 64; i++) { + udelay(50); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + e_dbg("MDI Error\n"); + return -E1000_ERR_PHY; + } + *phy_data = (u16) mdic; + } + } else { + /* We must first send a preamble through the MDIO pin to signal the + * beginning of an MII instruction. This is done by sending 32 + * consecutive "1" bits. + */ + e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* Now combine the next few fields that are required for a read + * operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine five different times. The format of + * a MII read instruction consists of a shift out of 14 bits and is + * defined as follows: + * + * followed by a shift in of 18 bits. This first two bits shifted in + * are TurnAround bits used to avoid contention on the MDIO pin when a + * READ operation is performed. These two bits are thrown away + * followed by a shift in of 16 bits which contains the desired data. + */ + mdic = ((reg_addr) | (phy_addr << 5) | + (PHY_OP_READ << 10) | (PHY_SOF << 12)); + + e1000_shift_out_mdi_bits(hw, mdic, 14); + + /* Now that we've shifted out the read command to the MII, we need to + * "shift in" the 16-bit value (18 total bits) of the requested PHY + * register address. + */ + *phy_data = e1000_shift_in_mdi_bits(hw); + } + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg - write a phy register + * + * @hw: Struct containing variables accessed by shared code + * @reg_addr: address of the PHY register to write + * @data: data to write to the PHY + + * Writes a value to a PHY register + */ +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data) +{ + u32 ret_val; + + e_dbg("e1000_write_phy_reg"); + + if ((hw->phy_type == e1000_phy_igp) && + (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { + ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, + (u16) reg_addr); + if (ret_val) + return ret_val; + } + + ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, + phy_data); + + return ret_val; +} + +static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 phy_data) +{ + u32 i; + u32 mdic = 0; + const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; + + e_dbg("e1000_write_phy_reg_ex"); + + if (reg_addr > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", reg_addr); + return -E1000_ERR_PARAM; + } + + if (hw->mac_type > e1000_82543) { + /* Set up Op-code, Phy Address, register address, and data + * intended for the PHY register in the MDI Control register. + * The MAC will take care of interfacing with the PHY to send + * the desired data. + */ + if (hw->mac_type == e1000_ce4100) { + mdic = (((u32) phy_data) | + (reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (INTEL_CE_GBE_MDIC_OP_WRITE) | + (INTEL_CE_GBE_MDIC_GO)); + + writel(mdic, E1000_MDIO_CMD); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 640; i++) { + udelay(5); + mdic = readl(E1000_MDIO_CMD); + if (!(mdic & INTEL_CE_GBE_MDIC_GO)) + break; + } + if (mdic & INTEL_CE_GBE_MDIC_GO) { + e_dbg("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + } else { + mdic = (((u32) phy_data) | + (reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 641; i++) { + udelay(5); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + } + } else { + /* We'll need to use the SW defined pins to shift the write command + * out to the PHY. We first send a preamble to the PHY to signal the + * beginning of the MII instruction. This is done by sending 32 + * consecutive "1" bits. + */ + e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* Now combine the remaining required fields that will indicate a + * write operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine for each field in the command. The + * format of a MII write instruction is as follows: + * . + */ + mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) | + (PHY_OP_WRITE << 12) | (PHY_SOF << 14)); + mdic <<= 16; + mdic |= (u32) phy_data; + + e1000_shift_out_mdi_bits(hw, mdic, 32); + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_hw_reset - reset the phy, hardware style + * @hw: Struct containing variables accessed by shared code + * + * Returns the PHY to the power-on reset state + */ +s32 e1000_phy_hw_reset(struct e1000_hw *hw) +{ + u32 ctrl, ctrl_ext; + u32 led_ctrl; + + e_dbg("e1000_phy_hw_reset"); + + e_dbg("Resetting Phy...\n"); + + if (hw->mac_type > e1000_82543) { + /* Read the device control register and assert the E1000_CTRL_PHY_RST + * bit. Then, take it out of reset. + * For e1000 hardware, we delay for 10ms between the assert + * and deassert. + */ + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); + E1000_WRITE_FLUSH(); + + msleep(10); + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + } else { + /* Read the Extended Device Control Register, assert the PHY_RESET_DIR + * bit to put the PHY into reset. Then, take it out of reset. + */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR; + ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + msleep(10); + ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + } + udelay(150); + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + } + + /* Wait for FW to finish PHY configuration. */ + return e1000_get_phy_cfg_done(hw); +} + +/** + * e1000_phy_reset - reset the phy to commit settings + * @hw: Struct containing variables accessed by shared code + * + * Resets the PHY + * Sets bit 15 of the MII Control register + */ +s32 e1000_phy_reset(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + e_dbg("e1000_phy_reset"); + + switch (hw->phy_type) { + case e1000_phy_igp: + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) + return ret_val; + break; + default: + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= MII_CR_RESET; + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); + if (ret_val) + return ret_val; + + udelay(1); + break; + } + + if (hw->phy_type == e1000_phy_igp) + e1000_phy_init_script(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_detect_gig_phy - check the phy type + * @hw: Struct containing variables accessed by shared code + * + * Probes the expected PHY address for known PHY IDs + */ +static s32 e1000_detect_gig_phy(struct e1000_hw *hw) +{ + s32 phy_init_status, ret_val; + u16 phy_id_high, phy_id_low; + bool match = false; + + e_dbg("e1000_detect_gig_phy"); + + if (hw->phy_id != 0) + return E1000_SUCCESS; + + /* Read the PHY ID Registers to identify which PHY is onboard. */ + ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high); + if (ret_val) + return ret_val; + + hw->phy_id = (u32) (phy_id_high << 16); + udelay(20); + ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low); + if (ret_val) + return ret_val; + + hw->phy_id |= (u32) (phy_id_low & PHY_REVISION_MASK); + hw->phy_revision = (u32) phy_id_low & ~PHY_REVISION_MASK; + + switch (hw->mac_type) { + case e1000_82543: + if (hw->phy_id == M88E1000_E_PHY_ID) + match = true; + break; + case e1000_82544: + if (hw->phy_id == M88E1000_I_PHY_ID) + match = true; + break; + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + if (hw->phy_id == M88E1011_I_PHY_ID) + match = true; + break; + case e1000_ce4100: + if ((hw->phy_id == RTL8211B_PHY_ID) || + (hw->phy_id == RTL8201N_PHY_ID) || + (hw->phy_id == M88E1118_E_PHY_ID)) + match = true; + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (hw->phy_id == IGP01E1000_I_PHY_ID) + match = true; + break; + default: + e_dbg("Invalid MAC type %d\n", hw->mac_type); + return -E1000_ERR_CONFIG; + } + phy_init_status = e1000_set_phy_type(hw); + + if ((match) && (phy_init_status == E1000_SUCCESS)) { + e_dbg("PHY ID 0x%X detected\n", hw->phy_id); + return E1000_SUCCESS; + } + e_dbg("Invalid PHY ID 0x%X\n", hw->phy_id); + return -E1000_ERR_PHY; +} + +/** + * e1000_phy_reset_dsp - reset DSP + * @hw: Struct containing variables accessed by shared code + * + * Resets the PHY's DSP + */ +static s32 e1000_phy_reset_dsp(struct e1000_hw *hw) +{ + s32 ret_val; + e_dbg("e1000_phy_reset_dsp"); + + do { + ret_val = e1000_write_phy_reg(hw, 29, 0x001d); + if (ret_val) + break; + ret_val = e1000_write_phy_reg(hw, 30, 0x00c1); + if (ret_val) + break; + ret_val = e1000_write_phy_reg(hw, 30, 0x0000); + if (ret_val) + break; + ret_val = E1000_SUCCESS; + } while (0); + + return ret_val; +} + +/** + * e1000_phy_igp_get_info - get igp specific registers + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers for igp PHY only. + */ +static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data, min_length, max_length, average; + e1000_rev_polarity polarity; + + e_dbg("e1000_phy_igp_get_info"); + + /* The downshift status is checked only once, after link is established, + * and it stored in the hw->speed_downgraded parameter. */ + phy_info->downshift = (e1000_downshift) hw->speed_downgraded; + + /* IGP01E1000 does not need to support it. */ + phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal; + + /* IGP01E1000 always correct polarity reversal */ + phy_info->polarity_correction = e1000_polarity_reversal_enabled; + + /* Check polarity status */ + ret_val = e1000_check_polarity(hw, &polarity); + if (ret_val) + return ret_val; + + phy_info->cable_polarity = polarity; + + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->mdix_mode = + (e1000_auto_x_mode) ((phy_data & IGP01E1000_PSSR_MDIX) >> + IGP01E1000_PSSR_MDIX_SHIFT); + + if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + /* Local/Remote Receiver Information are only valid at 1000 Mbps */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> + SR_1000T_LOCAL_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> + SR_1000T_REMOTE_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + + /* Get cable length */ + ret_val = e1000_get_cable_length(hw, &min_length, &max_length); + if (ret_val) + return ret_val; + + /* Translate to old method */ + average = (max_length + min_length) / 2; + + if (average <= e1000_igp_cable_length_50) + phy_info->cable_length = e1000_cable_length_50; + else if (average <= e1000_igp_cable_length_80) + phy_info->cable_length = e1000_cable_length_50_80; + else if (average <= e1000_igp_cable_length_110) + phy_info->cable_length = e1000_cable_length_80_110; + else if (average <= e1000_igp_cable_length_140) + phy_info->cable_length = e1000_cable_length_110_140; + else + phy_info->cable_length = e1000_cable_length_140; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_m88_get_info - get m88 specific registers + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers for m88 PHY only. + */ +static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data; + e1000_rev_polarity polarity; + + e_dbg("e1000_phy_m88_get_info"); + + /* The downshift status is checked only once, after link is established, + * and it stored in the hw->speed_downgraded parameter. */ + phy_info->downshift = (e1000_downshift) hw->speed_downgraded; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_info->extended_10bt_distance = + ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >> + M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ? + e1000_10bt_ext_dist_enable_lower : + e1000_10bt_ext_dist_enable_normal; + + phy_info->polarity_correction = + ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >> + M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ? + e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled; + + /* Check polarity status */ + ret_val = e1000_check_polarity(hw, &polarity); + if (ret_val) + return ret_val; + phy_info->cable_polarity = polarity; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->mdix_mode = + (e1000_auto_x_mode) ((phy_data & M88E1000_PSSR_MDIX) >> + M88E1000_PSSR_MDIX_SHIFT); + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + /* Cable Length Estimation and Local/Remote Receiver Information + * are only valid at 1000 Mbps. + */ + phy_info->cable_length = + (e1000_cable_length) ((phy_data & + M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT); + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> + SR_1000T_LOCAL_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> + SR_1000T_REMOTE_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_get_info - request phy info + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers + */ +s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data; + + e_dbg("e1000_phy_get_info"); + + phy_info->cable_length = e1000_cable_length_undefined; + phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined; + phy_info->cable_polarity = e1000_rev_polarity_undefined; + phy_info->downshift = e1000_downshift_undefined; + phy_info->polarity_correction = e1000_polarity_reversal_undefined; + phy_info->mdix_mode = e1000_auto_x_mode_undefined; + phy_info->local_rx = e1000_1000t_rx_status_undefined; + phy_info->remote_rx = e1000_1000t_rx_status_undefined; + + if (hw->media_type != e1000_media_type_copper) { + e_dbg("PHY info is only valid for copper media\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) { + e_dbg("PHY info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + if (hw->phy_type == e1000_phy_igp) + return e1000_phy_igp_get_info(hw, phy_info); + else if ((hw->phy_type == e1000_phy_8211) || + (hw->phy_type == e1000_phy_8201)) + return E1000_SUCCESS; + else + return e1000_phy_m88_get_info(hw, phy_info); +} + +s32 e1000_validate_mdi_setting(struct e1000_hw *hw) +{ + e_dbg("e1000_validate_mdi_settings"); + + if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) { + e_dbg("Invalid MDI setting detected\n"); + hw->mdix = 1; + return -E1000_ERR_CONFIG; + } + return E1000_SUCCESS; +} + +/** + * e1000_init_eeprom_params - initialize sw eeprom vars + * @hw: Struct containing variables accessed by shared code + * + * Sets up eeprom variables in the hw struct. Must be called after mac_type + * is configured. + */ +s32 e1000_init_eeprom_params(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd = er32(EECD); + s32 ret_val = E1000_SUCCESS; + u16 eeprom_size; + + e_dbg("e1000_init_eeprom_params"); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + eeprom->type = e1000_eeprom_microwire; + eeprom->word_size = 64; + eeprom->opcode_bits = 3; + eeprom->address_bits = 6; + eeprom->delay_usec = 50; + break; + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + eeprom->type = e1000_eeprom_microwire; + eeprom->opcode_bits = 3; + eeprom->delay_usec = 50; + if (eecd & E1000_EECD_SIZE) { + eeprom->word_size = 256; + eeprom->address_bits = 8; + } else { + eeprom->word_size = 64; + eeprom->address_bits = 6; + } + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (eecd & E1000_EECD_TYPE) { + eeprom->type = e1000_eeprom_spi; + eeprom->opcode_bits = 8; + eeprom->delay_usec = 1; + if (eecd & E1000_EECD_ADDR_BITS) { + eeprom->page_size = 32; + eeprom->address_bits = 16; + } else { + eeprom->page_size = 8; + eeprom->address_bits = 8; + } + } else { + eeprom->type = e1000_eeprom_microwire; + eeprom->opcode_bits = 3; + eeprom->delay_usec = 50; + if (eecd & E1000_EECD_ADDR_BITS) { + eeprom->word_size = 256; + eeprom->address_bits = 8; + } else { + eeprom->word_size = 64; + eeprom->address_bits = 6; + } + } + break; + default: + break; + } + + if (eeprom->type == e1000_eeprom_spi) { + /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to + * 32KB (incremented by powers of 2). + */ + /* Set to default value for initial eeprom read. */ + eeprom->word_size = 64; + ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size); + if (ret_val) + return ret_val; + eeprom_size = + (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT; + /* 256B eeprom size was not supported in earlier hardware, so we + * bump eeprom_size up one to ensure that "1" (which maps to 256B) + * is never the result used in the shifting logic below. */ + if (eeprom_size) + eeprom_size++; + + eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT); + } + return ret_val; +} + +/** + * e1000_raise_ee_clk - Raises the EEPROM's clock input. + * @hw: Struct containing variables accessed by shared code + * @eecd: EECD's current value + */ +static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd) +{ + /* Raise the clock input to the EEPROM (by setting the SK bit), and then + * wait microseconds. + */ + *eecd = *eecd | E1000_EECD_SK; + ew32(EECD, *eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); +} + +/** + * e1000_lower_ee_clk - Lowers the EEPROM's clock input. + * @hw: Struct containing variables accessed by shared code + * @eecd: EECD's current value + */ +static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd) +{ + /* Lower the clock input to the EEPROM (by clearing the SK bit), and then + * wait 50 microseconds. + */ + *eecd = *eecd & ~E1000_EECD_SK; + ew32(EECD, *eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); +} + +/** + * e1000_shift_out_ee_bits - Shift data bits out to the EEPROM. + * @hw: Struct containing variables accessed by shared code + * @data: data to send to the EEPROM + * @count: number of bits to shift out + */ +static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + u32 mask; + + /* We need to shift "count" bits out to the EEPROM. So, value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + */ + mask = 0x01 << (count - 1); + eecd = er32(EECD); + if (eeprom->type == e1000_eeprom_microwire) { + eecd &= ~E1000_EECD_DO; + } else if (eeprom->type == e1000_eeprom_spi) { + eecd |= E1000_EECD_DO; + } + do { + /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1", + * and then raising and then lowering the clock (the SK bit controls + * the clock input to the EEPROM). A "0" is shifted out to the EEPROM + * by setting "DI" to "0" and then raising and then lowering the clock. + */ + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + + udelay(eeprom->delay_usec); + + e1000_raise_ee_clk(hw, &eecd); + e1000_lower_ee_clk(hw, &eecd); + + mask = mask >> 1; + + } while (mask); + + /* We leave the "DI" bit set to "0" when we leave this routine. */ + eecd &= ~E1000_EECD_DI; + ew32(EECD, eecd); +} + +/** + * e1000_shift_in_ee_bits - Shift data bits in from the EEPROM + * @hw: Struct containing variables accessed by shared code + * @count: number of bits to shift in + */ +static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + /* In order to read a register from the EEPROM, we need to shift 'count' + * bits in from the EEPROM. Bits are "shifted in" by raising the clock + * input to the EEPROM (setting the SK bit), and then reading the value of + * the "DO" bit. During this "shifting in" process the "DI" bit should + * always be clear. + */ + + eecd = er32(EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data = data << 1; + e1000_raise_ee_clk(hw, &eecd); + + eecd = er32(EECD); + + eecd &= ~(E1000_EECD_DI); + if (eecd & E1000_EECD_DO) + data |= 1; + + e1000_lower_ee_clk(hw, &eecd); + } + + return data; +} + +/** + * e1000_acquire_eeprom - Prepares EEPROM for access + * @hw: Struct containing variables accessed by shared code + * + * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This + * function should be called before issuing a command to the EEPROM. + */ +static s32 e1000_acquire_eeprom(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd, i = 0; + + e_dbg("e1000_acquire_eeprom"); + + eecd = er32(EECD); + + /* Request EEPROM Access */ + if (hw->mac_type > e1000_82544) { + eecd |= E1000_EECD_REQ; + ew32(EECD, eecd); + eecd = er32(EECD); + while ((!(eecd & E1000_EECD_GNT)) && + (i < E1000_EEPROM_GRANT_ATTEMPTS)) { + i++; + udelay(5); + eecd = er32(EECD); + } + if (!(eecd & E1000_EECD_GNT)) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + e_dbg("Could not acquire EEPROM grant\n"); + return -E1000_ERR_EEPROM; + } + } + + /* Setup EEPROM for Read/Write */ + + if (eeprom->type == e1000_eeprom_microwire) { + /* Clear SK and DI */ + eecd &= ~(E1000_EECD_DI | E1000_EECD_SK); + ew32(EECD, eecd); + + /* Set CS */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + } else if (eeprom->type == e1000_eeprom_spi) { + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(1); + } + + return E1000_SUCCESS; +} + +/** + * e1000_standby_eeprom - Returns EEPROM to a "standby" state + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_standby_eeprom(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + + eecd = er32(EECD); + + if (eeprom->type == e1000_eeprom_microwire) { + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Clock high */ + eecd |= E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Select EEPROM */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Clock low */ + eecd &= ~E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + } else if (eeprom->type == e1000_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + eecd &= ~E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + } +} + +/** + * e1000_release_eeprom - drop chip select + * @hw: Struct containing variables accessed by shared code + * + * Terminates a command by inverting the EEPROM's chip select pin + */ +static void e1000_release_eeprom(struct e1000_hw *hw) +{ + u32 eecd; + + e_dbg("e1000_release_eeprom"); + + eecd = er32(EECD); + + if (hw->eeprom.type == e1000_eeprom_spi) { + eecd |= E1000_EECD_CS; /* Pull CS high */ + eecd &= ~E1000_EECD_SK; /* Lower SCK */ + + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + + udelay(hw->eeprom.delay_usec); + } else if (hw->eeprom.type == e1000_eeprom_microwire) { + /* cleanup eeprom */ + + /* CS on Microwire is active-high */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_DI); + + ew32(EECD, eecd); + + /* Rising edge of clock */ + eecd |= E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); + + /* Falling edge of clock */ + eecd &= ~E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); + } + + /* Stop requesting EEPROM access */ + if (hw->mac_type > e1000_82544) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + } +} + +/** + * e1000_spi_eeprom_ready - Reads a 16 bit word from the EEPROM. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw) +{ + u16 retry_count = 0; + u8 spi_stat_reg; + + e_dbg("e1000_spi_eeprom_ready"); + + /* Read "Status Register" repeatedly until the LSB is cleared. The + * EEPROM will signal that the command has been completed by clearing + * bit 0 of the internal status register. If it's not cleared within + * 5 milliseconds, then error out. + */ + retry_count = 0; + do { + e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI, + hw->eeprom.opcode_bits); + spi_stat_reg = (u8) e1000_shift_in_ee_bits(hw, 8); + if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI)) + break; + + udelay(5); + retry_count += 5; + + e1000_standby_eeprom(hw); + } while (retry_count < EEPROM_MAX_RETRY_SPI); + + /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and + * only 0-5mSec on 5V devices) + */ + if (retry_count >= EEPROM_MAX_RETRY_SPI) { + e_dbg("SPI EEPROM Status error\n"); + return -E1000_ERR_EEPROM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_eeprom - Reads a 16 bit word from the EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * @words: number of words to read + */ +s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + s32 ret; + spin_lock(&e1000_eeprom_lock); + ret = e1000_do_read_eeprom(hw, offset, words, data); + spin_unlock(&e1000_eeprom_lock); + return ret; +} + +static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 i = 0; + + e_dbg("e1000_read_eeprom"); + + if (hw->mac_type == e1000_ce4100) { + GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words, + data); + return E1000_SUCCESS; + } + + /* If eeprom is not yet detected, do so now */ + if (eeprom->word_size == 0) + e1000_init_eeprom_params(hw); + + /* A check for invalid values: offset too large, too many words, and not + * enough words. + */ + if ((offset >= eeprom->word_size) + || (words > eeprom->word_size - offset) || (words == 0)) { + e_dbg("\"words\" parameter out of bounds. Words = %d," + "size = %d\n", offset, eeprom->word_size); + return -E1000_ERR_EEPROM; + } + + /* EEPROM's that don't use EERD to read require us to bit-bang the SPI + * directly. In this case, we need to acquire the EEPROM so that + * FW or other port software does not interrupt. + */ + /* Prepare the EEPROM for bit-bang reading */ + if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) + return -E1000_ERR_EEPROM; + + /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have + * acquired the EEPROM at this point, so any returns should release it */ + if (eeprom->type == e1000_eeprom_spi) { + u16 word_in; + u8 read_opcode = EEPROM_READ_OPCODE_SPI; + + if (e1000_spi_eeprom_ready(hw)) { + e1000_release_eeprom(hw); + return -E1000_ERR_EEPROM; + } + + e1000_standby_eeprom(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the opcode */ + if ((eeprom->address_bits == 8) && (offset >= 128)) + read_opcode |= EEPROM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits); + e1000_shift_out_ee_bits(hw, (u16) (offset * 2), + eeprom->address_bits); + + /* Read the data. The address of the eeprom internally increments with + * each byte (spi) being read, saving on the overhead of eeprom setup + * and tear-down. The address counter will roll over if reading beyond + * the size of the eeprom, thus allowing the entire memory to be read + * starting from any offset. */ + for (i = 0; i < words; i++) { + word_in = e1000_shift_in_ee_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + } else if (eeprom->type == e1000_eeprom_microwire) { + for (i = 0; i < words; i++) { + /* Send the READ command (opcode + addr) */ + e1000_shift_out_ee_bits(hw, + EEPROM_READ_OPCODE_MICROWIRE, + eeprom->opcode_bits); + e1000_shift_out_ee_bits(hw, (u16) (offset + i), + eeprom->address_bits); + + /* Read the data. For microwire, each word requires the overhead + * of eeprom setup and tear-down. */ + data[i] = e1000_shift_in_ee_bits(hw, 16); + e1000_standby_eeprom(hw); + } + } + + /* End this read operation */ + e1000_release_eeprom(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_validate_eeprom_checksum - Verifies that the EEPROM has a valid checksum + * @hw: Struct containing variables accessed by shared code + * + * Reads the first 64 16 bit words of the EEPROM and sums the values read. + * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is + * valid. + */ +s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw) +{ + u16 checksum = 0; + u16 i, eeprom_data; + + e_dbg("e1000_validate_eeprom_checksum"); + + for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { + if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + checksum += eeprom_data; + } + + if (checksum == (u16) EEPROM_SUM) + return E1000_SUCCESS; + else { + e_dbg("EEPROM Checksum Invalid\n"); + return -E1000_ERR_EEPROM; + } +} + +/** + * e1000_update_eeprom_checksum - Calculates/writes the EEPROM checksum + * @hw: Struct containing variables accessed by shared code + * + * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA. + * Writes the difference to word offset 63 of the EEPROM. + */ +s32 e1000_update_eeprom_checksum(struct e1000_hw *hw) +{ + u16 checksum = 0; + u16 i, eeprom_data; + + e_dbg("e1000_update_eeprom_checksum"); + + for (i = 0; i < EEPROM_CHECKSUM_REG; i++) { + if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + checksum += eeprom_data; + } + checksum = (u16) EEPROM_SUM - checksum; + if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { + e_dbg("EEPROM Write Error\n"); + return -E1000_ERR_EEPROM; + } + return E1000_SUCCESS; +} + +/** + * e1000_write_eeprom - write words to the different EEPROM types. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word to be written to the EEPROM + * + * If e1000_update_eeprom_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + */ +s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + s32 ret; + spin_lock(&e1000_eeprom_lock); + ret = e1000_do_write_eeprom(hw, offset, words, data); + spin_unlock(&e1000_eeprom_lock); + return ret; +} + +static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + s32 status = 0; + + e_dbg("e1000_write_eeprom"); + + if (hw->mac_type == e1000_ce4100) { + GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words, + data); + return E1000_SUCCESS; + } + + /* If eeprom is not yet detected, do so now */ + if (eeprom->word_size == 0) + e1000_init_eeprom_params(hw); + + /* A check for invalid values: offset too large, too many words, and not + * enough words. + */ + if ((offset >= eeprom->word_size) + || (words > eeprom->word_size - offset) || (words == 0)) { + e_dbg("\"words\" parameter out of bounds\n"); + return -E1000_ERR_EEPROM; + } + + /* Prepare the EEPROM for writing */ + if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) + return -E1000_ERR_EEPROM; + + if (eeprom->type == e1000_eeprom_microwire) { + status = e1000_write_eeprom_microwire(hw, offset, words, data); + } else { + status = e1000_write_eeprom_spi(hw, offset, words, data); + msleep(10); + } + + /* Done with writing */ + e1000_release_eeprom(hw); + + return status; +} + +/** + * e1000_write_eeprom_spi - Writes a 16 bit word to a given offset in an SPI EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: pointer to array of 8 bit words to be written to the EEPROM + */ +static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u16 widx = 0; + + e_dbg("e1000_write_eeprom_spi"); + + while (widx < words) { + u8 write_opcode = EEPROM_WRITE_OPCODE_SPI; + + if (e1000_spi_eeprom_ready(hw)) + return -E1000_ERR_EEPROM; + + e1000_standby_eeprom(hw); + + /* Send the WRITE ENABLE command (8 bit opcode ) */ + e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI, + eeprom->opcode_bits); + + e1000_standby_eeprom(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the opcode */ + if ((eeprom->address_bits == 8) && (offset >= 128)) + write_opcode |= EEPROM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits); + + e1000_shift_out_ee_bits(hw, (u16) ((offset + widx) * 2), + eeprom->address_bits); + + /* Send the data */ + + /* Loop to allow for up to whole page write (32 bytes) of eeprom */ + while (widx < words) { + u16 word_out = data[widx]; + word_out = (word_out >> 8) | (word_out << 8); + e1000_shift_out_ee_bits(hw, word_out, 16); + widx++; + + /* Some larger eeprom sizes are capable of a 32-byte PAGE WRITE + * operation, while the smaller eeproms are capable of an 8-byte + * PAGE WRITE operation. Break the inner loop to pass new address + */ + if ((((offset + widx) * 2) % eeprom->page_size) == 0) { + e1000_standby_eeprom(hw); + break; + } + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_write_eeprom_microwire - Writes a 16 bit word to a given offset in a Microwire EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: pointer to array of 8 bit words to be written to the EEPROM + */ +static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + u16 words_written = 0; + u16 i = 0; + + e_dbg("e1000_write_eeprom_microwire"); + + /* Send the write enable command to the EEPROM (3-bit opcode plus + * 6/8-bit dummy address beginning with 11). It's less work to include + * the 11 of the dummy address as part of the opcode than it is to shift + * it over the correct number of bits for the address. This puts the + * EEPROM into write/erase mode. + */ + e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE, + (u16) (eeprom->opcode_bits + 2)); + + e1000_shift_out_ee_bits(hw, 0, (u16) (eeprom->address_bits - 2)); + + /* Prepare the EEPROM */ + e1000_standby_eeprom(hw); + + while (words_written < words) { + /* Send the Write command (3-bit opcode + addr) */ + e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE, + eeprom->opcode_bits); + + e1000_shift_out_ee_bits(hw, (u16) (offset + words_written), + eeprom->address_bits); + + /* Send the data */ + e1000_shift_out_ee_bits(hw, data[words_written], 16); + + /* Toggle the CS line. This in effect tells the EEPROM to execute + * the previous command. + */ + e1000_standby_eeprom(hw); + + /* Read DO repeatedly until it is high (equal to '1'). The EEPROM will + * signal that the command has been completed by raising the DO signal. + * If DO does not go high in 10 milliseconds, then error out. + */ + for (i = 0; i < 200; i++) { + eecd = er32(EECD); + if (eecd & E1000_EECD_DO) + break; + udelay(50); + } + if (i == 200) { + e_dbg("EEPROM Write did not complete\n"); + return -E1000_ERR_EEPROM; + } + + /* Recover from write */ + e1000_standby_eeprom(hw); + + words_written++; + } + + /* Send the write disable command to the EEPROM (3-bit opcode plus + * 6/8-bit dummy address beginning with 10). It's less work to include + * the 10 of the dummy address as part of the opcode than it is to shift + * it over the correct number of bits for the address. This takes the + * EEPROM out of write/erase mode. + */ + e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE, + (u16) (eeprom->opcode_bits + 2)); + + e1000_shift_out_ee_bits(hw, 0, (u16) (eeprom->address_bits - 2)); + + return E1000_SUCCESS; +} + +/** + * e1000_read_mac_addr - read the adapters MAC from eeprom + * @hw: Struct containing variables accessed by shared code + * + * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the + * second function of dual function devices + */ +s32 e1000_read_mac_addr(struct e1000_hw *hw) +{ + u16 offset; + u16 eeprom_data, i; + + e_dbg("e1000_read_mac_addr"); + + for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) { + offset = i >> 1; + if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + hw->perm_mac_addr[i] = (u8) (eeprom_data & 0x00FF); + hw->perm_mac_addr[i + 1] = (u8) (eeprom_data >> 8); + } + + switch (hw->mac_type) { + default: + break; + case e1000_82546: + case e1000_82546_rev_3: + if (er32(STATUS) & E1000_STATUS_FUNC_1) + hw->perm_mac_addr[5] ^= 0x01; + break; + } + + for (i = 0; i < NODE_ADDRESS_SIZE; i++) + hw->mac_addr[i] = hw->perm_mac_addr[i]; + return E1000_SUCCESS; +} + +/** + * e1000_init_rx_addrs - Initializes receive address filters. + * @hw: Struct containing variables accessed by shared code + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + */ +static void e1000_init_rx_addrs(struct e1000_hw *hw) +{ + u32 i; + u32 rar_num; + + e_dbg("e1000_init_rx_addrs"); + + /* Setup the receive address. */ + e_dbg("Programming MAC Address into RAR[0]\n"); + + e1000_rar_set(hw, hw->mac_addr, 0); + + rar_num = E1000_RAR_ENTRIES; + + /* Zero out the other 15 receive addresses. */ + e_dbg("Clearing RAR[1-15]\n"); + for (i = 1; i < rar_num; i++) { + E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); + E1000_WRITE_FLUSH(); + } +} + +/** + * e1000_hash_mc_addr - Hashes an address to determine its location in the multicast table + * @hw: Struct containing variables accessed by shared code + * @mc_addr: the multicast address to hash + */ +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value = 0; + + /* The portion of the address that is used for the hash table is + * determined by the mc_filter_type setting. + */ + switch (hw->mc_filter_type) { + /* [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + */ + case 0: + /* [47:36] i.e. 0x563 for above example address */ + hash_value = ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4)); + break; + case 1: + /* [46:35] i.e. 0xAC6 for above example address */ + hash_value = ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5)); + break; + case 2: + /* [45:34] i.e. 0x5D8 for above example address */ + hash_value = ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6)); + break; + case 3: + /* [43:32] i.e. 0x634 for above example address */ + hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8)); + break; + } + + hash_value &= 0xFFF; + return hash_value; +} + +/** + * e1000_rar_set - Puts an ethernet address into a receive address register. + * @hw: Struct containing variables accessed by shared code + * @addr: Address to put into receive address register + * @index: Receive address register to write + */ +void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx + * unit hang. + * + * Description: + * If there are any Rx frames queued up or otherwise present in the HW + * before RSS is enabled, and then we enable RSS, the HW Rx unit will + * hang. To work around this issue, we have to disable receives and + * flush out all Rx frames before we enable RSS. To do so, we modify we + * redirect all Rx traffic to manageability and then reset the HW. + * This flushes away Rx frames, and (since the redirections to + * manageability persists across resets) keeps new ones from coming in + * while we work. Then, we clear the Address Valid AV bit for all MAC + * addresses and undo the re-direction to manageability. + * Now, frames are coming in again, but the MAC won't accept them, so + * far so good. We now proceed to initialize RSS (if necessary) and + * configure the Rx unit. Last, we re-enable the AV bits and continue + * on our merry way. + */ + switch (hw->mac_type) { + default: + /* Indicate to hardware the Address is Valid. */ + rar_high |= E1000_RAH_AV; + break; + } + + E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); + E1000_WRITE_FLUSH(); +} + +/** + * e1000_write_vfta - Writes a value to the specified offset in the VLAN filter table. + * @hw: Struct containing variables accessed by shared code + * @offset: Offset in VLAN filer table to write + * @value: Value to write into VLAN filter table + */ +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +{ + u32 temp; + + if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) { + temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1)); + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp); + E1000_WRITE_FLUSH(); + } else { + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); + E1000_WRITE_FLUSH(); + } +} + +/** + * e1000_clear_vfta - Clears the VLAN filer table + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_clear_vfta(struct e1000_hw *hw) +{ + u32 offset; + u32 vfta_value = 0; + u32 vfta_offset = 0; + u32 vfta_bit_in_reg = 0; + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + /* If the offset we want to clear is the same offset of the + * manageability VLAN ID, then clear all bits except that of the + * manageability unit */ + vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value); + E1000_WRITE_FLUSH(); + } +} + +static s32 e1000_id_led_init(struct e1000_hw *hw) +{ + u32 ledctl; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 eeprom_data, i, temp; + const u16 led_mask = 0x0F; + + e_dbg("e1000_id_led_init"); + + if (hw->mac_type < e1000_82540) { + /* Nothing to do */ + return E1000_SUCCESS; + } + + ledctl = er32(LEDCTL); + hw->ledctl_default = ledctl; + hw->ledctl_mode1 = hw->ledctl_default; + hw->ledctl_mode2 = hw->ledctl_default; + + if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + + if ((eeprom_data == ID_LED_RESERVED_0000) || + (eeprom_data == ID_LED_RESERVED_FFFF)) { + eeprom_data = ID_LED_DEFAULT; + } + + for (i = 0; i < 4; i++) { + temp = (eeprom_data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_setup_led + * @hw: Struct containing variables accessed by shared code + * + * Prepares SW controlable LED for use and saves the current state of the LED. + */ +s32 e1000_setup_led(struct e1000_hw *hw) +{ + u32 ledctl; + s32 ret_val = E1000_SUCCESS; + + e_dbg("e1000_setup_led"); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* No setup necessary */ + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + /* Turn off PHY Smart Power Down (if enabled) */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, + &hw->phy_spd_default); + if (ret_val) + return ret_val; + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + (u16) (hw->phy_spd_default & + ~IGP01E1000_GMII_SPD)); + if (ret_val) + return ret_val; + /* Fall Through */ + default: + if (hw->media_type == e1000_media_type_fiber) { + ledctl = er32(LEDCTL); + /* Save current LEDCTL settings */ + hw->ledctl_default = ledctl; + /* Turn off LED0 */ + ledctl &= ~(E1000_LEDCTL_LED0_IVRT | + E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_LED0_MODE_MASK); + ledctl |= (E1000_LEDCTL_MODE_LED_OFF << + E1000_LEDCTL_LED0_MODE_SHIFT); + ew32(LEDCTL, ledctl); + } else if (hw->media_type == e1000_media_type_copper) + ew32(LEDCTL, hw->ledctl_mode1); + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_cleanup_led - Restores the saved state of the SW controlable LED. + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_cleanup_led(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + e_dbg("e1000_cleanup_led"); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* No cleanup necessary */ + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + /* Turn on PHY Smart Power Down (if previously enabled) */ + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + hw->phy_spd_default); + if (ret_val) + return ret_val; + /* Fall Through */ + default: + /* Restore LEDCTL settings */ + ew32(LEDCTL, hw->ledctl_default); + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_led_on - Turns on the software controllable LED + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_led_on(struct e1000_hw *hw) +{ + u32 ctrl = er32(CTRL); + + e_dbg("e1000_led_on"); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + /* Set SW Defineable Pin 0 to turn on the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + break; + case e1000_82544: + if (hw->media_type == e1000_media_type_fiber) { + /* Set SW Defineable Pin 0 to turn on the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + /* Clear SW Defineable Pin 0 to turn on the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + break; + default: + if (hw->media_type == e1000_media_type_fiber) { + /* Clear SW Defineable Pin 0 to turn on the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else if (hw->media_type == e1000_media_type_copper) { + ew32(LEDCTL, hw->ledctl_mode2); + return E1000_SUCCESS; + } + break; + } + + ew32(CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_led_off - Turns off the software controllable LED + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_led_off(struct e1000_hw *hw) +{ + u32 ctrl = er32(CTRL); + + e_dbg("e1000_led_off"); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + /* Clear SW Defineable Pin 0 to turn off the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + break; + case e1000_82544: + if (hw->media_type == e1000_media_type_fiber) { + /* Clear SW Defineable Pin 0 to turn off the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + /* Set SW Defineable Pin 0 to turn off the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + break; + default: + if (hw->media_type == e1000_media_type_fiber) { + /* Set SW Defineable Pin 0 to turn off the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else if (hw->media_type == e1000_media_type_copper) { + ew32(LEDCTL, hw->ledctl_mode1); + return E1000_SUCCESS; + } + break; + } + + ew32(CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_clear_hw_cntrs - Clears all hardware statistics counters. + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_clear_hw_cntrs(struct e1000_hw *hw) +{ + volatile u32 temp; + + temp = er32(CRCERRS); + temp = er32(SYMERRS); + temp = er32(MPC); + temp = er32(SCC); + temp = er32(ECOL); + temp = er32(MCC); + temp = er32(LATECOL); + temp = er32(COLC); + temp = er32(DC); + temp = er32(SEC); + temp = er32(RLEC); + temp = er32(XONRXC); + temp = er32(XONTXC); + temp = er32(XOFFRXC); + temp = er32(XOFFTXC); + temp = er32(FCRUC); + + temp = er32(PRC64); + temp = er32(PRC127); + temp = er32(PRC255); + temp = er32(PRC511); + temp = er32(PRC1023); + temp = er32(PRC1522); + + temp = er32(GPRC); + temp = er32(BPRC); + temp = er32(MPRC); + temp = er32(GPTC); + temp = er32(GORCL); + temp = er32(GORCH); + temp = er32(GOTCL); + temp = er32(GOTCH); + temp = er32(RNBC); + temp = er32(RUC); + temp = er32(RFC); + temp = er32(ROC); + temp = er32(RJC); + temp = er32(TORL); + temp = er32(TORH); + temp = er32(TOTL); + temp = er32(TOTH); + temp = er32(TPR); + temp = er32(TPT); + + temp = er32(PTC64); + temp = er32(PTC127); + temp = er32(PTC255); + temp = er32(PTC511); + temp = er32(PTC1023); + temp = er32(PTC1522); + + temp = er32(MPTC); + temp = er32(BPTC); + + if (hw->mac_type < e1000_82543) + return; + + temp = er32(ALGNERRC); + temp = er32(RXERRC); + temp = er32(TNCRS); + temp = er32(CEXTERR); + temp = er32(TSCTC); + temp = er32(TSCTFC); + + if (hw->mac_type <= e1000_82544) + return; + + temp = er32(MGTPRC); + temp = er32(MGTPDC); + temp = er32(MGTPTC); +} + +/** + * e1000_reset_adaptive - Resets Adaptive IFS to its default state. + * @hw: Struct containing variables accessed by shared code + * + * Call this after e1000_init_hw. You may override the IFS defaults by setting + * hw->ifs_params_forced to true. However, you must initialize hw-> + * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio + * before calling this function. + */ +void e1000_reset_adaptive(struct e1000_hw *hw) +{ + e_dbg("e1000_reset_adaptive"); + + if (hw->adaptive_ifs) { + if (!hw->ifs_params_forced) { + hw->current_ifs_val = 0; + hw->ifs_min_val = IFS_MIN; + hw->ifs_max_val = IFS_MAX; + hw->ifs_step_size = IFS_STEP; + hw->ifs_ratio = IFS_RATIO; + } + hw->in_ifs_mode = false; + ew32(AIT, 0); + } else { + e_dbg("Not in Adaptive IFS mode!\n"); + } +} + +/** + * e1000_update_adaptive - update adaptive IFS + * @hw: Struct containing variables accessed by shared code + * @tx_packets: Number of transmits since last callback + * @total_collisions: Number of collisions since last callback + * + * Called during the callback/watchdog routine to update IFS value based on + * the ratio of transmits to collisions. + */ +void e1000_update_adaptive(struct e1000_hw *hw) +{ + e_dbg("e1000_update_adaptive"); + + if (hw->adaptive_ifs) { + if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) { + if (hw->tx_packet_delta > MIN_NUM_XMITS) { + hw->in_ifs_mode = true; + if (hw->current_ifs_val < hw->ifs_max_val) { + if (hw->current_ifs_val == 0) + hw->current_ifs_val = + hw->ifs_min_val; + else + hw->current_ifs_val += + hw->ifs_step_size; + ew32(AIT, hw->current_ifs_val); + } + } + } else { + if (hw->in_ifs_mode + && (hw->tx_packet_delta <= MIN_NUM_XMITS)) { + hw->current_ifs_val = 0; + hw->in_ifs_mode = false; + ew32(AIT, 0); + } + } + } else { + e_dbg("Not in Adaptive IFS mode!\n"); + } +} + +/** + * e1000_tbi_adjust_stats + * @hw: Struct containing variables accessed by shared code + * @frame_len: The length of the frame in question + * @mac_addr: The Ethernet destination address of the frame in question + * + * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT + */ +void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, + u32 frame_len, u8 *mac_addr) +{ + u64 carry_bit; + + /* First adjust the frame length. */ + frame_len--; + /* We need to adjust the statistics counters, since the hardware + * counters overcount this packet as a CRC error and undercount + * the packet as a good packet + */ + /* This packet should not be counted as a CRC error. */ + stats->crcerrs--; + /* This packet does count as a Good Packet Received. */ + stats->gprc++; + + /* Adjust the Good Octets received counters */ + carry_bit = 0x80000000 & stats->gorcl; + stats->gorcl += frame_len; + /* If the high bit of Gorcl (the low 32 bits of the Good Octets + * Received Count) was one before the addition, + * AND it is zero after, then we lost the carry out, + * need to add one to Gorch (Good Octets Received Count High). + * This could be simplified if all environments supported + * 64-bit integers. + */ + if (carry_bit && ((stats->gorcl & 0x80000000) == 0)) + stats->gorch++; + /* Is this a broadcast or multicast? Check broadcast first, + * since the test for a multicast frame will test positive on + * a broadcast frame. + */ + if ((mac_addr[0] == (u8) 0xff) && (mac_addr[1] == (u8) 0xff)) + /* Broadcast packet */ + stats->bprc++; + else if (*mac_addr & 0x01) + /* Multicast packet */ + stats->mprc++; + + if (frame_len == hw->max_frame_size) { + /* In this case, the hardware has overcounted the number of + * oversize frames. + */ + if (stats->roc > 0) + stats->roc--; + } + + /* Adjust the bin counters when the extra byte put the frame in the + * wrong bin. Remember that the frame_len was adjusted above. + */ + if (frame_len == 64) { + stats->prc64++; + stats->prc127--; + } else if (frame_len == 127) { + stats->prc127++; + stats->prc255--; + } else if (frame_len == 255) { + stats->prc255++; + stats->prc511--; + } else if (frame_len == 511) { + stats->prc511++; + stats->prc1023--; + } else if (frame_len == 1023) { + stats->prc1023++; + stats->prc1522--; + } else if (frame_len == 1522) { + stats->prc1522++; + } +} + +/** + * e1000_get_bus_info + * @hw: Struct containing variables accessed by shared code + * + * Gets the current PCI bus type, speed, and width of the hardware + */ +void e1000_get_bus_info(struct e1000_hw *hw) +{ + u32 status; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + hw->bus_type = e1000_bus_type_pci; + hw->bus_speed = e1000_bus_speed_unknown; + hw->bus_width = e1000_bus_width_unknown; + break; + default: + status = er32(STATUS); + hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ? + e1000_bus_type_pcix : e1000_bus_type_pci; + + if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) { + hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ? + e1000_bus_speed_66 : e1000_bus_speed_120; + } else if (hw->bus_type == e1000_bus_type_pci) { + hw->bus_speed = (status & E1000_STATUS_PCI66) ? + e1000_bus_speed_66 : e1000_bus_speed_33; + } else { + switch (status & E1000_STATUS_PCIX_SPEED) { + case E1000_STATUS_PCIX_SPEED_66: + hw->bus_speed = e1000_bus_speed_66; + break; + case E1000_STATUS_PCIX_SPEED_100: + hw->bus_speed = e1000_bus_speed_100; + break; + case E1000_STATUS_PCIX_SPEED_133: + hw->bus_speed = e1000_bus_speed_133; + break; + default: + hw->bus_speed = e1000_bus_speed_reserved; + break; + } + } + hw->bus_width = (status & E1000_STATUS_BUS64) ? + e1000_bus_width_64 : e1000_bus_width_32; + break; + } +} + +/** + * e1000_write_reg_io + * @hw: Struct containing variables accessed by shared code + * @offset: offset to write to + * @value: value to write + * + * Writes a value to one of the devices registers using port I/O (as opposed to + * memory mapped I/O). Only 82544 and newer devices support port I/O. + */ +static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value) +{ + unsigned long io_addr = hw->io_base; + unsigned long io_data = hw->io_base + 4; + + e1000_io_write(hw, io_addr, offset); + e1000_io_write(hw, io_data, value); +} + +/** + * e1000_get_cable_length - Estimates the cable length. + * @hw: Struct containing variables accessed by shared code + * @min_length: The estimated minimum length + * @max_length: The estimated maximum length + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * This function always returns a ranged length (minimum & maximum). + * So for M88 phy's, this function interprets the one value returned from the + * register to the minimum and maximum range. + * For IGP phy's, the function calculates the range by the AGC registers. + */ +static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, + u16 *max_length) +{ + s32 ret_val; + u16 agc_value = 0; + u16 i, phy_data; + u16 cable_length; + + e_dbg("e1000_get_cable_length"); + + *min_length = *max_length = 0; + + /* Use old method for Phy older than IGP */ + if (hw->phy_type == e1000_phy_m88) { + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + + /* Convert the enum value to ranged values */ + switch (cable_length) { + case e1000_cable_length_50: + *min_length = 0; + *max_length = e1000_igp_cable_length_50; + break; + case e1000_cable_length_50_80: + *min_length = e1000_igp_cable_length_50; + *max_length = e1000_igp_cable_length_80; + break; + case e1000_cable_length_80_110: + *min_length = e1000_igp_cable_length_80; + *max_length = e1000_igp_cable_length_110; + break; + case e1000_cable_length_110_140: + *min_length = e1000_igp_cable_length_110; + *max_length = e1000_igp_cable_length_140; + break; + case e1000_cable_length_140: + *min_length = e1000_igp_cable_length_140; + *max_length = e1000_igp_cable_length_170; + break; + default: + return -E1000_ERR_PHY; + break; + } + } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ + u16 cur_agc_value; + u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; + static const u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { + IGP01E1000_PHY_AGC_A, + IGP01E1000_PHY_AGC_B, + IGP01E1000_PHY_AGC_C, + IGP01E1000_PHY_AGC_D + }; + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + + ret_val = + e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) + return ret_val; + + cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT; + + /* Value bound check. */ + if ((cur_agc_value >= + IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) + || (cur_agc_value == 0)) + return -E1000_ERR_PHY; + + agc_value += cur_agc_value; + + /* Update minimal AGC value. */ + if (min_agc_value > cur_agc_value) + min_agc_value = cur_agc_value; + } + + /* Remove the minimal AGC result for length < 50m */ + if (agc_value < + IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) { + agc_value -= min_agc_value; + + /* Get the average length of the remaining 3 channels */ + agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1); + } else { + /* Get the average length of all the 4 channels. */ + agc_value /= IGP01E1000_PHY_CHANNEL_NUM; + } + + /* Set the range of the calculated length. */ + *min_length = ((e1000_igp_cable_length_table[agc_value] - + IGP01E1000_AGC_RANGE) > 0) ? + (e1000_igp_cable_length_table[agc_value] - + IGP01E1000_AGC_RANGE) : 0; + *max_length = e1000_igp_cable_length_table[agc_value] + + IGP01E1000_AGC_RANGE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_check_polarity - Check the cable polarity + * @hw: Struct containing variables accessed by shared code + * @polarity: output parameter : 0 - Polarity is not reversed + * 1 - Polarity is reversed. + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * For phy's older than IGP, this function simply reads the polarity bit in the + * Phy Status register. For IGP phy's, this bit is valid only if link speed is + * 10 Mbps. If the link speed is 100 Mbps there is no polarity so this bit will + * return 0. If the link speed is 1000 Mbps the polarity status is in the + * IGP01E1000_PHY_PCS_INIT_REG. + */ +static s32 e1000_check_polarity(struct e1000_hw *hw, + e1000_rev_polarity *polarity) +{ + s32 ret_val; + u16 phy_data; + + e_dbg("e1000_check_polarity"); + + if (hw->phy_type == e1000_phy_m88) { + /* return the Polarity bit in the Status register. */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >> + M88E1000_PSSR_REV_POLARITY_SHIFT) ? + e1000_rev_polarity_reversed : e1000_rev_polarity_normal; + + } else if (hw->phy_type == e1000_phy_igp) { + /* Read the Status register to check the speed */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to + * find the polarity status */ + if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + + /* Read the GIG initialization PCS register (0x00B4) */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG, + &phy_data); + if (ret_val) + return ret_val; + + /* Check the polarity bits */ + *polarity = (phy_data & IGP01E1000_PHY_POLARITY_MASK) ? + e1000_rev_polarity_reversed : + e1000_rev_polarity_normal; + } else { + /* For 10 Mbps, read the polarity bit in the status register. (for + * 100 Mbps this bit is always 0) */ + *polarity = + (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ? + e1000_rev_polarity_reversed : + e1000_rev_polarity_normal; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_check_downshift - Check if Downshift occurred + * @hw: Struct containing variables accessed by shared code + * @downshift: output parameter : 0 - No Downshift occurred. + * 1 - Downshift occurred. + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * For phy's older than IGP, this function reads the Downshift bit in the Phy + * Specific Status register. For IGP phy's, it reads the Downgrade bit in the + * Link Health register. In IGP this bit is latched high, so the driver must + * read it immediately after link is established. + */ +static s32 e1000_check_downshift(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + e_dbg("e1000_check_downshift"); + + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, + &phy_data); + if (ret_val) + return ret_val; + + hw->speed_downgraded = + (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0; + } else if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >> + M88E1000_PSSR_DOWNSHIFT_SHIFT; + } + + return E1000_SUCCESS; +} + +/** + * e1000_config_dsp_after_link_change + * @hw: Struct containing variables accessed by shared code + * @link_up: was link up at the time this was called + * + * returns: - E1000_ERR_PHY if fail to read/write the PHY + * E1000_SUCCESS at any other case. + * + * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a + * gigabit link is achieved to improve link quality. + */ + +static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) +{ + s32 ret_val; + u16 phy_data, phy_saved_data, speed, duplex, i; + static const u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { + IGP01E1000_PHY_AGC_PARAM_A, + IGP01E1000_PHY_AGC_PARAM_B, + IGP01E1000_PHY_AGC_PARAM_C, + IGP01E1000_PHY_AGC_PARAM_D + }; + u16 min_length, max_length; + + e_dbg("e1000_config_dsp_after_link_change"); + + if (hw->phy_type != e1000_phy_igp) + return E1000_SUCCESS; + + if (link_up) { + ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + e_dbg("Error getting link speed and duplex\n"); + return ret_val; + } + + if (speed == SPEED_1000) { + + ret_val = + e1000_get_cable_length(hw, &min_length, + &max_length); + if (ret_val) + return ret_val; + + if ((hw->dsp_config_state == e1000_dsp_config_enabled) + && min_length >= e1000_igp_cable_length_50) { + + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = + e1000_read_phy_reg(hw, + dsp_reg_array[i], + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= + ~IGP01E1000_PHY_EDAC_MU_INDEX; + + ret_val = + e1000_write_phy_reg(hw, + dsp_reg_array + [i], phy_data); + if (ret_val) + return ret_val; + } + hw->dsp_config_state = + e1000_dsp_config_activated; + } + + if ((hw->ffe_config_state == e1000_ffe_config_enabled) + && (min_length < e1000_igp_cable_length_50)) { + + u16 ffe_idle_err_timeout = + FFE_IDLE_ERR_COUNT_TIMEOUT_20; + u32 idle_errs = 0; + + /* clear previous idle error counts */ + ret_val = + e1000_read_phy_reg(hw, PHY_1000T_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + for (i = 0; i < ffe_idle_err_timeout; i++) { + udelay(1000); + ret_val = + e1000_read_phy_reg(hw, + PHY_1000T_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + idle_errs += + (phy_data & + SR_1000T_IDLE_ERROR_CNT); + if (idle_errs > + SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) + { + hw->ffe_config_state = + e1000_ffe_config_active; + + ret_val = + e1000_write_phy_reg(hw, + IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_CM_CP); + if (ret_val) + return ret_val; + break; + } + + if (idle_errs) + ffe_idle_err_timeout = + FFE_IDLE_ERR_COUNT_TIMEOUT_100; + } + } + } + } else { + if (hw->dsp_config_state == e1000_dsp_config_activated) { + /* Save off the current value of register 0x2F5B to be restored at + * the end of the routines. */ + ret_val = + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + if (ret_val) + return ret_val; + + /* Disable the PHY transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + + if (ret_val) + return ret_val; + + mdelay(20); + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_FORCE_GIGA); + if (ret_val) + return ret_val; + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = + e1000_read_phy_reg(hw, dsp_reg_array[i], + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS; + + ret_val = + e1000_write_phy_reg(hw, dsp_reg_array[i], + phy_data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + return ret_val; + + mdelay(20); + + /* Now enable the transmitter */ + ret_val = + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (ret_val) + return ret_val; + + hw->dsp_config_state = e1000_dsp_config_enabled; + } + + if (hw->ffe_config_state == e1000_ffe_config_active) { + /* Save off the current value of register 0x2F5B to be restored at + * the end of the routines. */ + ret_val = + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + if (ret_val) + return ret_val; + + /* Disable the PHY transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + + if (ret_val) + return ret_val; + + mdelay(20); + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_FORCE_GIGA); + if (ret_val) + return ret_val; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_DEFAULT); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + return ret_val; + + mdelay(20); + + /* Now enable the transmitter */ + ret_val = + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (ret_val) + return ret_val; + + hw->ffe_config_state = e1000_ffe_config_enabled; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_set_phy_mode - Set PHY to class A mode + * @hw: Struct containing variables accessed by shared code + * + * Assumes the following operations will follow to enable the new class mode. + * 1. Do a PHY soft reset + * 2. Restart auto-negotiation or force link. + */ +static s32 e1000_set_phy_mode(struct e1000_hw *hw) +{ + s32 ret_val; + u16 eeprom_data; + + e_dbg("e1000_set_phy_mode"); + + if ((hw->mac_type == e1000_82545_rev_3) && + (hw->media_type == e1000_media_type_copper)) { + ret_val = + e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, + &eeprom_data); + if (ret_val) { + return ret_val; + } + + if ((eeprom_data != EEPROM_RESERVED_WORD) && + (eeprom_data & EEPROM_PHY_CLASS_A)) { + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, + 0x000B); + if (ret_val) + return ret_val; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, + 0x8104); + if (ret_val) + return ret_val; + + hw->phy_reset_disable = false; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_set_d3_lplu_state - set d3 link power state + * @hw: Struct containing variables accessed by shared code + * @active: true to enable lplu false to disable lplu. + * + * This function sets the lplu state according to the active flag. When + * activating lplu this function also disables smart speed and vise versa. + * lplu will not be activated unless the device autonegotiation advertisement + * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes. + * + * returns: - E1000_ERR_PHY if fail to read/write the PHY + * E1000_SUCCESS at any other case. + */ +static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + s32 ret_val; + u16 phy_data; + e_dbg("e1000_set_d3_lplu_state"); + + if (hw->phy_type != e1000_phy_igp) + return E1000_SUCCESS; + + /* During driver activity LPLU should not be used or it will attain link + * from the lowest speeds starting from 10Mbps. The capability is used for + * Dx transitions and states */ + if (hw->mac_type == e1000_82541_rev_2 + || hw->mac_type == e1000_82547_rev_2) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data); + if (ret_val) + return ret_val; + } + + if (!active) { + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + phy_data &= ~IGP01E1000_GMII_FLEX_SPD; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + phy_data); + if (ret_val) + return ret_val; + } + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during + * Dx states where the power conservation is most important. During + * driver activity we should enable SmartSpeed, so performance is + * maintained. */ + if (hw->smart_speed == e1000_smart_speed_on) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } else if (hw->smart_speed == e1000_smart_speed_off) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } + } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) + || (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL) + || (hw->autoneg_advertised == + AUTONEG_ADVERTISE_10_100_ALL)) { + + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + phy_data |= IGP01E1000_GMII_FLEX_SPD; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + phy_data); + if (ret_val) + return ret_val; + } + + /* When LPLU is enabled we should disable SmartSpeed */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + + } + return E1000_SUCCESS; +} + +/** + * e1000_set_vco_speed + * @hw: Struct containing variables accessed by shared code + * + * Change VCO speed register to improve Bit Error Rate performance of SERDES. + */ +static s32 e1000_set_vco_speed(struct e1000_hw *hw) +{ + s32 ret_val; + u16 default_page = 0; + u16 phy_data; + + e_dbg("e1000_set_vco_speed"); + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + return E1000_SUCCESS; + } + + /* Set PHY register 30, page 5, bit 8 to 0 */ + + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~M88E1000_PHY_VCO_REG_BIT8; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + return ret_val; + + /* Set PHY register 30, page 4, bit 11 to 1 */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PHY_VCO_REG_BIT11; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + return ret_val; + + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page); + if (ret_val) + return ret_val; + + return E1000_SUCCESS; +} + + +/** + * e1000_enable_mng_pass_thru - check for bmc pass through + * @hw: Struct containing variables accessed by shared code + * + * Verifies the hardware needs to allow ARPs to be processed by the host + * returns: - true/false + */ +u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + + if (hw->asf_firmware_present) { + manc = er32(MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN) || + !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) + return false; + if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN)) + return true; + } + return false; +} + +static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_status_reg; + u16 i; + + /* Polarity reversal workaround for forced 10F/10H links. */ + + /* Disable the transmitter on the PHY */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + return ret_val; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + return ret_val; + + /* This loop will early-out if the NO link condition has been met. */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Link Status bit + * to be clear. + */ + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) + break; + mdelay(100); + } + + /* Recommended delay time after link has been lost */ + mdelay(1000); + + /* Now we will re-enable th transmitter on the PHY */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + return ret_val; + mdelay(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0); + if (ret_val) + return ret_val; + mdelay(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00); + if (ret_val) + return ret_val; + mdelay(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + return ret_val; + + /* This loop will early-out if the link condition has been met. */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Link Status bit + * to be set. + */ + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + mdelay(100); + } + return E1000_SUCCESS; +} + +/** + * e1000_get_auto_rd_done + * @hw: Struct containing variables accessed by shared code + * + * Check for EEPROM Auto Read bit done. + * returns: - E1000_ERR_RESET if fail to reset MAC + * E1000_SUCCESS at any other case. + */ +static s32 e1000_get_auto_rd_done(struct e1000_hw *hw) +{ + e_dbg("e1000_get_auto_rd_done"); + msleep(5); + return E1000_SUCCESS; +} + +/** + * e1000_get_phy_cfg_done + * @hw: Struct containing variables accessed by shared code + * + * Checks if the PHY configuration is done + * returns: - E1000_ERR_RESET if fail to reset MAC + * E1000_SUCCESS at any other case. + */ +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) +{ + e_dbg("e1000_get_phy_cfg_done"); + mdelay(10); + return E1000_SUCCESS; +} diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.h b/drivers/net/ethernet/intel/e1000/e1000_hw.h new file mode 100644 index 0000000..5c9a840 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.h @@ -0,0 +1,3103 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2006 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* e1000_hw.h + * Structures, enums, and macros for the MAC + */ + +#ifndef _E1000_HW_H_ +#define _E1000_HW_H_ + +#include "e1000_osdep.h" + + +/* Forward declarations of structures used by the shared code */ +struct e1000_hw; +struct e1000_hw_stats; + +/* Enumerated types specific to the e1000 hardware */ +/* Media Access Controllers */ +typedef enum { + e1000_undefined = 0, + e1000_82542_rev2_0, + e1000_82542_rev2_1, + e1000_82543, + e1000_82544, + e1000_82540, + e1000_82545, + e1000_82545_rev_3, + e1000_82546, + e1000_ce4100, + e1000_82546_rev_3, + e1000_82541, + e1000_82541_rev_2, + e1000_82547, + e1000_82547_rev_2, + e1000_num_macs +} e1000_mac_type; + +typedef enum { + e1000_eeprom_uninitialized = 0, + e1000_eeprom_spi, + e1000_eeprom_microwire, + e1000_eeprom_flash, + e1000_eeprom_none, /* No NVM support */ + e1000_num_eeprom_types +} e1000_eeprom_type; + +/* Media Types */ +typedef enum { + e1000_media_type_copper = 0, + e1000_media_type_fiber = 1, + e1000_media_type_internal_serdes = 2, + e1000_num_media_types +} e1000_media_type; + +typedef enum { + e1000_10_half = 0, + e1000_10_full = 1, + e1000_100_half = 2, + e1000_100_full = 3 +} e1000_speed_duplex_type; + +/* Flow Control Settings */ +typedef enum { + E1000_FC_NONE = 0, + E1000_FC_RX_PAUSE = 1, + E1000_FC_TX_PAUSE = 2, + E1000_FC_FULL = 3, + E1000_FC_DEFAULT = 0xFF +} e1000_fc_type; + +struct e1000_shadow_ram { + u16 eeprom_word; + bool modified; +}; + +/* PCI bus types */ +typedef enum { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci, + e1000_bus_type_pcix, + e1000_bus_type_reserved +} e1000_bus_type; + +/* PCI bus speeds */ +typedef enum { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33, + e1000_bus_speed_66, + e1000_bus_speed_100, + e1000_bus_speed_120, + e1000_bus_speed_133, + e1000_bus_speed_reserved +} e1000_bus_speed; + +/* PCI bus widths */ +typedef enum { + e1000_bus_width_unknown = 0, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +} e1000_bus_width; + +/* PHY status info structure and supporting enums */ +typedef enum { + e1000_cable_length_50 = 0, + e1000_cable_length_50_80, + e1000_cable_length_80_110, + e1000_cable_length_110_140, + e1000_cable_length_140, + e1000_cable_length_undefined = 0xFF +} e1000_cable_length; + +typedef enum { + e1000_gg_cable_length_60 = 0, + e1000_gg_cable_length_60_115 = 1, + e1000_gg_cable_length_115_150 = 2, + e1000_gg_cable_length_150 = 4 +} e1000_gg_cable_length; + +typedef enum { + e1000_igp_cable_length_10 = 10, + e1000_igp_cable_length_20 = 20, + e1000_igp_cable_length_30 = 30, + e1000_igp_cable_length_40 = 40, + e1000_igp_cable_length_50 = 50, + e1000_igp_cable_length_60 = 60, + e1000_igp_cable_length_70 = 70, + e1000_igp_cable_length_80 = 80, + e1000_igp_cable_length_90 = 90, + e1000_igp_cable_length_100 = 100, + e1000_igp_cable_length_110 = 110, + e1000_igp_cable_length_115 = 115, + e1000_igp_cable_length_120 = 120, + e1000_igp_cable_length_130 = 130, + e1000_igp_cable_length_140 = 140, + e1000_igp_cable_length_150 = 150, + e1000_igp_cable_length_160 = 160, + e1000_igp_cable_length_170 = 170, + e1000_igp_cable_length_180 = 180 +} e1000_igp_cable_length; + +typedef enum { + e1000_10bt_ext_dist_enable_normal = 0, + e1000_10bt_ext_dist_enable_lower, + e1000_10bt_ext_dist_enable_undefined = 0xFF +} e1000_10bt_ext_dist_enable; + +typedef enum { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +} e1000_rev_polarity; + +typedef enum { + e1000_downshift_normal = 0, + e1000_downshift_activated, + e1000_downshift_undefined = 0xFF +} e1000_downshift; + +typedef enum { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +} e1000_smart_speed; + +typedef enum { + e1000_polarity_reversal_enabled = 0, + e1000_polarity_reversal_disabled, + e1000_polarity_reversal_undefined = 0xFF +} e1000_polarity_reversal; + +typedef enum { + e1000_auto_x_mode_manual_mdi = 0, + e1000_auto_x_mode_manual_mdix, + e1000_auto_x_mode_auto1, + e1000_auto_x_mode_auto2, + e1000_auto_x_mode_undefined = 0xFF +} e1000_auto_x_mode; + +typedef enum { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +} e1000_1000t_rx_status; + +typedef enum { + e1000_phy_m88 = 0, + e1000_phy_igp, + e1000_phy_8211, + e1000_phy_8201, + e1000_phy_undefined = 0xFF +} e1000_phy_type; + +typedef enum { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +} e1000_ms_type; + +typedef enum { + e1000_ffe_config_enabled = 0, + e1000_ffe_config_active, + e1000_ffe_config_blocked +} e1000_ffe_config; + +typedef enum { + e1000_dsp_config_disabled = 0, + e1000_dsp_config_enabled, + e1000_dsp_config_activated, + e1000_dsp_config_undefined = 0xFF +} e1000_dsp_config; + +struct e1000_phy_info { + e1000_cable_length cable_length; + e1000_10bt_ext_dist_enable extended_10bt_distance; + e1000_rev_polarity cable_polarity; + e1000_downshift downshift; + e1000_polarity_reversal polarity_correction; + e1000_auto_x_mode mdix_mode; + e1000_1000t_rx_status local_rx; + e1000_1000t_rx_status remote_rx; +}; + +struct e1000_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct e1000_eeprom_info { + e1000_eeprom_type type; + u16 word_size; + u16 opcode_bits; + u16 address_bits; + u16 delay_usec; + u16 page_size; +}; + +/* Flex ASF Information */ +#define E1000_HOST_IF_MAX_SIZE 2048 + +typedef enum { + e1000_byte_align = 0, + e1000_word_align = 1, + e1000_dword_align = 2 +} e1000_align_type; + +/* Error Codes */ +#define E1000_SUCCESS 0 +#define E1000_ERR_EEPROM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_TYPE 5 +#define E1000_ERR_PHY_TYPE 6 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11 +#define E1000_BLK_PHY_RESET 12 + +#define E1000_BYTE_SWAP_WORD(_value) ((((_value) & 0x00ff) << 8) | \ + (((_value) & 0xff00) >> 8)) + +/* Function prototypes */ +/* Initialization */ +s32 e1000_reset_hw(struct e1000_hw *hw); +s32 e1000_init_hw(struct e1000_hw *hw); +s32 e1000_set_mac_type(struct e1000_hw *hw); +void e1000_set_media_type(struct e1000_hw *hw); + +/* Link Configuration */ +s32 e1000_setup_link(struct e1000_hw *hw); +s32 e1000_phy_setup_autoneg(struct e1000_hw *hw); +void e1000_config_collision_dist(struct e1000_hw *hw); +s32 e1000_check_for_link(struct e1000_hw *hw); +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 * speed, u16 * duplex); +s32 e1000_force_mac_fc(struct e1000_hw *hw); + +/* PHY */ +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 * phy_data); +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 data); +s32 e1000_phy_hw_reset(struct e1000_hw *hw); +s32 e1000_phy_reset(struct e1000_hw *hw); +s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); +s32 e1000_validate_mdi_setting(struct e1000_hw *hw); + +/* EEPROM Functions */ +s32 e1000_init_eeprom_params(struct e1000_hw *hw); + +/* MNG HOST IF functions */ +u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw); + +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */ + +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */ +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */ +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */ +#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_MNG_ICH_IAMT_MODE 0x2 +#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */ + +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */ +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT 0x2 /* DHCP parsing enabled */ +#define E1000_VFTA_ENTRY_SHIFT 0x5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658 */ +}; +#ifdef __BIG_ENDIAN +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u16 vlan_id; + u8 reserved0; + u8 status; + u32 reserved1; + u8 checksum; + u8 reserved3; + u16 reserved2; +}; +#else +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; +#endif + +bool e1000_check_mng_mode(struct e1000_hw *hw); +s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data); +s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw); +s32 e1000_update_eeprom_checksum(struct e1000_hw *hw); +s32 e1000_write_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data); +s32 e1000_read_mac_addr(struct e1000_hw *hw); + +/* Filters (multicast, vlan, receive) */ +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr); +void e1000_mta_set(struct e1000_hw *hw, u32 hash_value); +void e1000_rar_set(struct e1000_hw *hw, u8 * mc_addr, u32 rar_index); +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); + +/* LED functions */ +s32 e1000_setup_led(struct e1000_hw *hw); +s32 e1000_cleanup_led(struct e1000_hw *hw); +s32 e1000_led_on(struct e1000_hw *hw); +s32 e1000_led_off(struct e1000_hw *hw); +s32 e1000_blink_led_start(struct e1000_hw *hw); + +/* Adaptive IFS Functions */ + +/* Everything else */ +void e1000_reset_adaptive(struct e1000_hw *hw); +void e1000_update_adaptive(struct e1000_hw *hw); +void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, + u32 frame_len, u8 * mac_addr); +void e1000_get_bus_info(struct e1000_hw *hw); +void e1000_pci_set_mwi(struct e1000_hw *hw); +void e1000_pci_clear_mwi(struct e1000_hw *hw); +void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc); +int e1000_pcix_get_mmrbc(struct e1000_hw *hw); +/* Port I/O is only supported on 82544 and newer */ +void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value); + +#define E1000_READ_REG_IO(a, reg) \ + e1000_read_reg_io((a), E1000_##reg) +#define E1000_WRITE_REG_IO(a, reg, val) \ + e1000_write_reg_io((a), E1000_##reg, val) + +/* PCI Device IDs */ +#define E1000_DEV_ID_82542 0x1000 +#define E1000_DEV_ID_82543GC_FIBER 0x1001 +#define E1000_DEV_ID_82543GC_COPPER 0x1004 +#define E1000_DEV_ID_82544EI_COPPER 0x1008 +#define E1000_DEV_ID_82544EI_FIBER 0x1009 +#define E1000_DEV_ID_82544GC_COPPER 0x100C +#define E1000_DEV_ID_82544GC_LOM 0x100D +#define E1000_DEV_ID_82540EM 0x100E +#define E1000_DEV_ID_82540EM_LOM 0x1015 +#define E1000_DEV_ID_82540EP_LOM 0x1016 +#define E1000_DEV_ID_82540EP 0x1017 +#define E1000_DEV_ID_82540EP_LP 0x101E +#define E1000_DEV_ID_82545EM_COPPER 0x100F +#define E1000_DEV_ID_82545EM_FIBER 0x1011 +#define E1000_DEV_ID_82545GM_COPPER 0x1026 +#define E1000_DEV_ID_82545GM_FIBER 0x1027 +#define E1000_DEV_ID_82545GM_SERDES 0x1028 +#define E1000_DEV_ID_82546EB_COPPER 0x1010 +#define E1000_DEV_ID_82546EB_FIBER 0x1012 +#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D +#define E1000_DEV_ID_82541EI 0x1013 +#define E1000_DEV_ID_82541EI_MOBILE 0x1018 +#define E1000_DEV_ID_82541ER_LOM 0x1014 +#define E1000_DEV_ID_82541ER 0x1078 +#define E1000_DEV_ID_82547GI 0x1075 +#define E1000_DEV_ID_82541GI 0x1076 +#define E1000_DEV_ID_82541GI_MOBILE 0x1077 +#define E1000_DEV_ID_82541GI_LF 0x107C +#define E1000_DEV_ID_82546GB_COPPER 0x1079 +#define E1000_DEV_ID_82546GB_FIBER 0x107A +#define E1000_DEV_ID_82546GB_SERDES 0x107B +#define E1000_DEV_ID_82546GB_PCIE 0x108A +#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 +#define E1000_DEV_ID_82547EI 0x1019 +#define E1000_DEV_ID_82547EI_MOBILE 0x101A +#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 +#define E1000_DEV_ID_INTEL_CE4100_GBE 0x2E6E + +#define NODE_ADDRESS_SIZE 6 +#define ETH_LENGTH_OF_ADDRESS 6 + +/* MAC decode size is 128K - This is the size of BAR0 */ +#define MAC_DECODE_SIZE (128 * 1024) + +#define E1000_82542_2_0_REV_ID 2 +#define E1000_82542_2_1_REV_ID 3 +#define E1000_REVISION_0 0 +#define E1000_REVISION_1 1 +#define E1000_REVISION_2 2 +#define E1000_REVISION_3 3 + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +/* The sizes (in bytes) of a ethernet packet */ +#define ENET_HEADER_SIZE 14 +#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */ +#define ETHERNET_FCS_SIZE 4 +#define MINIMUM_ETHERNET_PACKET_SIZE \ + (MINIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE) +#define CRC_LENGTH ETHERNET_FCS_SIZE +#define MAX_JUMBO_FRAME_SIZE 0x3F00 + +/* 802.1q VLAN Packet Sizes */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */ + +/* Ethertype field values */ +#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ +#define ETHERNET_IP_TYPE 0x0800 /* IP packets */ +#define ETHERNET_ARP_TYPE 0x0806 /* Address Resolution Protocol (ARP) */ + +/* Packet Header defines */ +#define IP_PROTOCOL_TCP 6 +#define IP_PROTOCOL_UDP 0x11 + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + */ +#define POLL_IMS_ENABLE_MASK ( \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ) + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC) + +/* Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. We + * reserve one of these spots for our directed address, allowing us room for + * E1000_RAR_ENTRIES - 1 multicast addresses. + */ +#define E1000_RAR_ENTRIES 15 + +#define MIN_NUMBER_OF_DESCRIPTORS 8 +#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 + +/* Receive Descriptor */ +struct e1000_rx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 length; /* Length of data DMAed into data buffer */ + __le16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + __le16 special; +}; + +/* Receive Descriptor - Extended */ +union e1000_rx_desc_extended { + struct { + __le64 buffer_addr; + __le64 reserved; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 +/* Receive Descriptor - Packet Split */ +union e1000_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + __le64 buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length0; /* length of buffer 0 */ + __le16 vlan; /* VLAN tag */ + } middle; + struct { + __le16 header_status; + __le16 length[3]; /* length of buffers 1-3 */ + } upper; + __le64 reserved; + } wb; /* writeback */ +}; + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ +#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define E1000_RXD_SPC_PRI_SHIFT 13 +#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define E1000_RXD_SPC_CFI_SHIFT 12 + +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 +#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF + +/* mask to determine if packets should be dropped due to frame errors */ +#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ + E1000_RXD_ERR_CE | \ + E1000_RXD_ERR_SE | \ + E1000_RXD_ERR_SEQ | \ + E1000_RXD_ERR_CXE | \ + E1000_RXD_ERR_RXE) + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +/* Transmit Descriptor */ +struct e1000_tx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + __le16 special; + } fields; + } upper; +}; + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ + +/* Offload Context Descriptor */ +struct e1000_context_desc { + union { + __le32 ip_config; + struct { + u8 ipcss; /* IP checksum start */ + u8 ipcso; /* IP checksum offset */ + __le16 ipcse; /* IP checksum end */ + } ip_fields; + } lower_setup; + union { + __le32 tcp_config; + struct { + u8 tucss; /* TCP checksum start */ + u8 tucso; /* TCP checksum offset */ + __le16 tucse; /* TCP checksum end */ + } tcp_fields; + } upper_setup; + __le32 cmd_and_length; /* */ + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 hdr_len; /* Header length */ + __le16 mss; /* Maximum segment size */ + } fields; + } tcp_seg_setup; +}; + +/* Offload data descriptor */ +struct e1000_data_desc { + __le64 buffer_addr; /* Address of the descriptor's buffer address */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 typ_len_ext; /* */ + u8 cmd; /* */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 popts; /* Packet Options */ + __le16 special; /* */ + } fields; + } upper; +}; + +/* Filters */ +#define E1000_NUM_UNICAST 16 /* Unicast filter entries */ +#define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address Register */ +struct e1000_rar { + volatile __le32 low; /* receive address low */ + volatile __le32 high; /* receive address high */ +}; + +/* Number of entries in the Multicast Table Array (MTA). */ +#define E1000_NUM_MTA_REGISTERS 128 + +/* IPv4 Address Table Entry */ +struct e1000_ipv4_at_entry { + volatile u32 ipv4_addr; /* IP Address (RW) */ + volatile u32 reserved; +}; + +/* Four wakeup IP addresses are supported */ +#define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4 +#define E1000_IP4AT_SIZE E1000_WAKEUP_IP_ADDRESS_COUNT_MAX +#define E1000_IP6AT_SIZE 1 + +/* IPv6 Address Table Entry */ +struct e1000_ipv6_at_entry { + volatile u8 ipv6_addr[16]; +}; + +/* Flexible Filter Length Table Entry */ +struct e1000_fflt_entry { + volatile u32 length; /* Flexible Filter Length (RW) */ + volatile u32 reserved; +}; + +/* Flexible Filter Mask Table Entry */ +struct e1000_ffmt_entry { + volatile u32 mask; /* Flexible Filter Mask (RW) */ + volatile u32 reserved; +}; + +/* Flexible Filter Value Table Entry */ +struct e1000_ffvt_entry { + volatile u32 value; /* Flexible Filter Value (RW) */ + volatile u32 reserved; +}; + +/* Four Flexible Filters are supported */ +#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4 + +/* Each Flexible Filter is at most 128 (0x80) bytes in length */ +#define E1000_FLEXIBLE_FILTER_SIZE_MAX 128 + +#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX +#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX +#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX + +#define E1000_DISABLE_SERDES_LOOPBACK 0x0400 + +/* Register Set. (82543, 82544) + * + * Registers are defined to be 32 bits and should be accessed as 32 bit values. + * These registers are physically located on the NIC, but are mapped into the + * host memory address space. + * + * RW - register is both readable and writable + * RO - register is read only + * WO - register is write only + * R/clr - register is read only and is cleared when read + * A - register array + */ +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_FLA 0x0001C /* Flash Access - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ + +extern void __iomem *ce4100_gbe_mdio_base_virt; +#define INTEL_CE_GBE_MDIO_RCOMP_BASE (ce4100_gbe_mdio_base_virt) +#define E1000_MDIO_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0) +#define E1000_MDIO_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4) +#define E1000_MDIO_DRV (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8) +#define E1000_MDC_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0xC) +#define E1000_RCOMP_CTL (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x20) +#define E1000_RCOMP_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x24) + +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ + +/* Auxiliary Control Register. This register is CE4100 specific, + * RMII/RGMII function is switched by this register - RW + * Following are bits definitions of the Auxiliary Control Register + */ +#define E1000_CTL_AUX 0x000E0 +#define E1000_CTL_AUX_END_SEL_SHIFT 10 +#define E1000_CTL_AUX_ENDIANESS_SHIFT 8 +#define E1000_CTL_AUX_RGMII_RMII_SHIFT 0 + +/* descriptor and packet transfer use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_DES_PKT (0x0 << E1000_CTL_AUX_END_SEL_SHIFT) +/* descriptor use CTL_AUX.ENDIANESS, packet use default */ +#define E1000_CTL_AUX_DES (0x1 << E1000_CTL_AUX_END_SEL_SHIFT) +/* descriptor use default, packet use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_PKT (0x2 << E1000_CTL_AUX_END_SEL_SHIFT) +/* all use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_ALL (0x3 << E1000_CTL_AUX_END_SEL_SHIFT) + +#define E1000_CTL_AUX_RGMII (0x0 << E1000_CTL_AUX_RGMII_RMII_SHIFT) +#define E1000_CTL_AUX_RMII (0x1 << E1000_CTL_AUX_RGMII_RMII_SHIFT) + +/* LW little endian, Byte big endian */ +#define E1000_CTL_AUX_LWLE_BBE (0x0 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWLE_BLE (0x1 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWBE_BBE (0x2 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWBE_BLE (0x3 << E1000_CTL_AUX_ENDIANESS_SHIFT) + +#define E1000_RCTL 0x00100 /* RX Control - RW */ +#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */ +#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */ +#define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */ +#define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */ +#define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */ +#define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ +#define E1000_RXCW 0x00180 /* RX Configuration Word - RO */ +#define E1000_TCTL 0x00400 /* TX Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ +#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ +#define E1000_TBT 0x00448 /* TX Burst Timer - RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ +#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ +#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ +#define FEXTNVM_SW_CONFIG 0x0001 +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_FLASH_UPDATES 1000 +#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ +#define E1000_FLASHT 0x01028 /* FLASH Timer Register */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_FLSWCTL 0x01030 /* FLASH control register */ +#define E1000_FLSWDATA 0x01034 /* FLASH data register */ +#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */ +#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ +#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ +#define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */ +#define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */ +#define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */ +#define E1000_RDH 0x02810 /* RX Descriptor Head - RW */ +#define E1000_RDT 0x02818 /* RX Descriptor Tail - RW */ +#define E1000_RDTR 0x02820 /* RX Delay Timer - RW */ +#define E1000_RDBAL0 E1000_RDBAL /* RX Desc Base Address Low (0) - RW */ +#define E1000_RDBAH0 E1000_RDBAH /* RX Desc Base Address High (0) - RW */ +#define E1000_RDLEN0 E1000_RDLEN /* RX Desc Length (0) - RW */ +#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */ +#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */ +#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */ +#define E1000_RXDCTL 0x02828 /* RX Descriptor Control queue 0 - RW */ +#define E1000_RXDCTL1 0x02928 /* RX Descriptor Control queue 1 - RW */ +#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ +#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ +#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ +#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */ +#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ +#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* TX Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */ +#define E1000_TDBAL 0x03800 /* TX Descriptor Base Address Low - RW */ +#define E1000_TDBAH 0x03804 /* TX Descriptor Base Address High - RW */ +#define E1000_TDLEN 0x03808 /* TX Descriptor Length - RW */ +#define E1000_TDH 0x03810 /* TX Descriptor Head - RW */ +#define E1000_TDT 0x03818 /* TX Descripotr Tail - RW */ +#define E1000_TIDV 0x03820 /* TX Interrupt Delay Value - RW */ +#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */ +#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */ +#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ +#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */ +#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */ +#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */ +#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */ +#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */ +#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */ +#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */ +#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */ +#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */ +#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */ +#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */ +#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */ +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ +#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control */ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ +#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ +#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ +#define E1000_HOST_IF 0x08800 /* Host Interface */ +#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ +#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ + +#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */ +#define E1000_MDPHYA 0x0003C /* PHY address - RW */ +#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ +#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ + +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ +#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ +#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ +#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ +#define E1000_HICR 0x08F00 /* Host Interface Control */ + +/* RSS registers */ +#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */ +#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */ +#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ +#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ +/* Register Set (82542) + * + * Some of the 82542 registers are located at different offsets than they are + * in more current versions of the 8254x. Despite the difference in location, + * the registers function in the same manner. + */ +#define E1000_82542_CTL_AUX E1000_CTL_AUX +#define E1000_82542_CTRL E1000_CTRL +#define E1000_82542_CTRL_DUP E1000_CTRL_DUP +#define E1000_82542_STATUS E1000_STATUS +#define E1000_82542_EECD E1000_EECD +#define E1000_82542_EERD E1000_EERD +#define E1000_82542_CTRL_EXT E1000_CTRL_EXT +#define E1000_82542_FLA E1000_FLA +#define E1000_82542_MDIC E1000_MDIC +#define E1000_82542_SCTL E1000_SCTL +#define E1000_82542_FEXTNVM E1000_FEXTNVM +#define E1000_82542_FCAL E1000_FCAL +#define E1000_82542_FCAH E1000_FCAH +#define E1000_82542_FCT E1000_FCT +#define E1000_82542_VET E1000_VET +#define E1000_82542_RA 0x00040 +#define E1000_82542_ICR E1000_ICR +#define E1000_82542_ITR E1000_ITR +#define E1000_82542_ICS E1000_ICS +#define E1000_82542_IMS E1000_IMS +#define E1000_82542_IMC E1000_IMC +#define E1000_82542_RCTL E1000_RCTL +#define E1000_82542_RDTR 0x00108 +#define E1000_82542_RDBAL 0x00110 +#define E1000_82542_RDBAH 0x00114 +#define E1000_82542_RDLEN 0x00118 +#define E1000_82542_RDH 0x00120 +#define E1000_82542_RDT 0x00128 +#define E1000_82542_RDTR0 E1000_82542_RDTR +#define E1000_82542_RDBAL0 E1000_82542_RDBAL +#define E1000_82542_RDBAH0 E1000_82542_RDBAH +#define E1000_82542_RDLEN0 E1000_82542_RDLEN +#define E1000_82542_RDH0 E1000_82542_RDH +#define E1000_82542_RDT0 E1000_82542_RDT +#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication + * RX Control - RW */ +#define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8)) +#define E1000_82542_RDBAH3 0x02B04 /* RX Desc Base High Queue 3 - RW */ +#define E1000_82542_RDBAL3 0x02B00 /* RX Desc Low Queue 3 - RW */ +#define E1000_82542_RDLEN3 0x02B08 /* RX Desc Length Queue 3 - RW */ +#define E1000_82542_RDH3 0x02B10 /* RX Desc Head Queue 3 - RW */ +#define E1000_82542_RDT3 0x02B18 /* RX Desc Tail Queue 3 - RW */ +#define E1000_82542_RDBAL2 0x02A00 /* RX Desc Base Low Queue 2 - RW */ +#define E1000_82542_RDBAH2 0x02A04 /* RX Desc Base High Queue 2 - RW */ +#define E1000_82542_RDLEN2 0x02A08 /* RX Desc Length Queue 2 - RW */ +#define E1000_82542_RDH2 0x02A10 /* RX Desc Head Queue 2 - RW */ +#define E1000_82542_RDT2 0x02A18 /* RX Desc Tail Queue 2 - RW */ +#define E1000_82542_RDTR1 0x00130 +#define E1000_82542_RDBAL1 0x00138 +#define E1000_82542_RDBAH1 0x0013C +#define E1000_82542_RDLEN1 0x00140 +#define E1000_82542_RDH1 0x00148 +#define E1000_82542_RDT1 0x00150 +#define E1000_82542_FCRTH 0x00160 +#define E1000_82542_FCRTL 0x00168 +#define E1000_82542_FCTTV E1000_FCTTV +#define E1000_82542_TXCW E1000_TXCW +#define E1000_82542_RXCW E1000_RXCW +#define E1000_82542_MTA 0x00200 +#define E1000_82542_TCTL E1000_TCTL +#define E1000_82542_TCTL_EXT E1000_TCTL_EXT +#define E1000_82542_TIPG E1000_TIPG +#define E1000_82542_TDBAL 0x00420 +#define E1000_82542_TDBAH 0x00424 +#define E1000_82542_TDLEN 0x00428 +#define E1000_82542_TDH 0x00430 +#define E1000_82542_TDT 0x00438 +#define E1000_82542_TIDV 0x00440 +#define E1000_82542_TBT E1000_TBT +#define E1000_82542_AIT E1000_AIT +#define E1000_82542_VFTA 0x00600 +#define E1000_82542_LEDCTL E1000_LEDCTL +#define E1000_82542_PBA E1000_PBA +#define E1000_82542_PBS E1000_PBS +#define E1000_82542_EEMNGCTL E1000_EEMNGCTL +#define E1000_82542_EEARBC E1000_EEARBC +#define E1000_82542_FLASHT E1000_FLASHT +#define E1000_82542_EEWR E1000_EEWR +#define E1000_82542_FLSWCTL E1000_FLSWCTL +#define E1000_82542_FLSWDATA E1000_FLSWDATA +#define E1000_82542_FLSWCNT E1000_FLSWCNT +#define E1000_82542_FLOP E1000_FLOP +#define E1000_82542_EXTCNF_CTRL E1000_EXTCNF_CTRL +#define E1000_82542_EXTCNF_SIZE E1000_EXTCNF_SIZE +#define E1000_82542_PHY_CTRL E1000_PHY_CTRL +#define E1000_82542_ERT E1000_ERT +#define E1000_82542_RXDCTL E1000_RXDCTL +#define E1000_82542_RXDCTL1 E1000_RXDCTL1 +#define E1000_82542_RADV E1000_RADV +#define E1000_82542_RSRPD E1000_RSRPD +#define E1000_82542_TXDMAC E1000_TXDMAC +#define E1000_82542_KABGTXD E1000_KABGTXD +#define E1000_82542_TDFHS E1000_TDFHS +#define E1000_82542_TDFTS E1000_TDFTS +#define E1000_82542_TDFPC E1000_TDFPC +#define E1000_82542_TXDCTL E1000_TXDCTL +#define E1000_82542_TADV E1000_TADV +#define E1000_82542_TSPMT E1000_TSPMT +#define E1000_82542_CRCERRS E1000_CRCERRS +#define E1000_82542_ALGNERRC E1000_ALGNERRC +#define E1000_82542_SYMERRS E1000_SYMERRS +#define E1000_82542_RXERRC E1000_RXERRC +#define E1000_82542_MPC E1000_MPC +#define E1000_82542_SCC E1000_SCC +#define E1000_82542_ECOL E1000_ECOL +#define E1000_82542_MCC E1000_MCC +#define E1000_82542_LATECOL E1000_LATECOL +#define E1000_82542_COLC E1000_COLC +#define E1000_82542_DC E1000_DC +#define E1000_82542_TNCRS E1000_TNCRS +#define E1000_82542_SEC E1000_SEC +#define E1000_82542_CEXTERR E1000_CEXTERR +#define E1000_82542_RLEC E1000_RLEC +#define E1000_82542_XONRXC E1000_XONRXC +#define E1000_82542_XONTXC E1000_XONTXC +#define E1000_82542_XOFFRXC E1000_XOFFRXC +#define E1000_82542_XOFFTXC E1000_XOFFTXC +#define E1000_82542_FCRUC E1000_FCRUC +#define E1000_82542_PRC64 E1000_PRC64 +#define E1000_82542_PRC127 E1000_PRC127 +#define E1000_82542_PRC255 E1000_PRC255 +#define E1000_82542_PRC511 E1000_PRC511 +#define E1000_82542_PRC1023 E1000_PRC1023 +#define E1000_82542_PRC1522 E1000_PRC1522 +#define E1000_82542_GPRC E1000_GPRC +#define E1000_82542_BPRC E1000_BPRC +#define E1000_82542_MPRC E1000_MPRC +#define E1000_82542_GPTC E1000_GPTC +#define E1000_82542_GORCL E1000_GORCL +#define E1000_82542_GORCH E1000_GORCH +#define E1000_82542_GOTCL E1000_GOTCL +#define E1000_82542_GOTCH E1000_GOTCH +#define E1000_82542_RNBC E1000_RNBC +#define E1000_82542_RUC E1000_RUC +#define E1000_82542_RFC E1000_RFC +#define E1000_82542_ROC E1000_ROC +#define E1000_82542_RJC E1000_RJC +#define E1000_82542_MGTPRC E1000_MGTPRC +#define E1000_82542_MGTPDC E1000_MGTPDC +#define E1000_82542_MGTPTC E1000_MGTPTC +#define E1000_82542_TORL E1000_TORL +#define E1000_82542_TORH E1000_TORH +#define E1000_82542_TOTL E1000_TOTL +#define E1000_82542_TOTH E1000_TOTH +#define E1000_82542_TPR E1000_TPR +#define E1000_82542_TPT E1000_TPT +#define E1000_82542_PTC64 E1000_PTC64 +#define E1000_82542_PTC127 E1000_PTC127 +#define E1000_82542_PTC255 E1000_PTC255 +#define E1000_82542_PTC511 E1000_PTC511 +#define E1000_82542_PTC1023 E1000_PTC1023 +#define E1000_82542_PTC1522 E1000_PTC1522 +#define E1000_82542_MPTC E1000_MPTC +#define E1000_82542_BPTC E1000_BPTC +#define E1000_82542_TSCTC E1000_TSCTC +#define E1000_82542_TSCTFC E1000_TSCTFC +#define E1000_82542_RXCSUM E1000_RXCSUM +#define E1000_82542_WUC E1000_WUC +#define E1000_82542_WUFC E1000_WUFC +#define E1000_82542_WUS E1000_WUS +#define E1000_82542_MANC E1000_MANC +#define E1000_82542_IPAV E1000_IPAV +#define E1000_82542_IP4AT E1000_IP4AT +#define E1000_82542_IP6AT E1000_IP6AT +#define E1000_82542_WUPL E1000_WUPL +#define E1000_82542_WUPM E1000_WUPM +#define E1000_82542_FFLT E1000_FFLT +#define E1000_82542_TDFH 0x08010 +#define E1000_82542_TDFT 0x08018 +#define E1000_82542_FFMT E1000_FFMT +#define E1000_82542_FFVT E1000_FFVT +#define E1000_82542_HOST_IF E1000_HOST_IF +#define E1000_82542_IAM E1000_IAM +#define E1000_82542_EEMNGCTL E1000_EEMNGCTL +#define E1000_82542_PSRCTL E1000_PSRCTL +#define E1000_82542_RAID E1000_RAID +#define E1000_82542_TARC0 E1000_TARC0 +#define E1000_82542_TDBAL1 E1000_TDBAL1 +#define E1000_82542_TDBAH1 E1000_TDBAH1 +#define E1000_82542_TDLEN1 E1000_TDLEN1 +#define E1000_82542_TDH1 E1000_TDH1 +#define E1000_82542_TDT1 E1000_TDT1 +#define E1000_82542_TXDCTL1 E1000_TXDCTL1 +#define E1000_82542_TARC1 E1000_TARC1 +#define E1000_82542_RFCTL E1000_RFCTL +#define E1000_82542_GCR E1000_GCR +#define E1000_82542_GSCL_1 E1000_GSCL_1 +#define E1000_82542_GSCL_2 E1000_GSCL_2 +#define E1000_82542_GSCL_3 E1000_GSCL_3 +#define E1000_82542_GSCL_4 E1000_GSCL_4 +#define E1000_82542_FACTPS E1000_FACTPS +#define E1000_82542_SWSM E1000_SWSM +#define E1000_82542_FWSM E1000_FWSM +#define E1000_82542_FFLT_DBG E1000_FFLT_DBG +#define E1000_82542_IAC E1000_IAC +#define E1000_82542_ICRXPTC E1000_ICRXPTC +#define E1000_82542_ICRXATC E1000_ICRXATC +#define E1000_82542_ICTXPTC E1000_ICTXPTC +#define E1000_82542_ICTXATC E1000_ICTXATC +#define E1000_82542_ICTXQEC E1000_ICTXQEC +#define E1000_82542_ICTXQMTC E1000_ICTXQMTC +#define E1000_82542_ICRXDMTC E1000_ICRXDMTC +#define E1000_82542_ICRXOC E1000_ICRXOC +#define E1000_82542_HICR E1000_HICR + +#define E1000_82542_CPUVEC E1000_CPUVEC +#define E1000_82542_MRQC E1000_MRQC +#define E1000_82542_RETA E1000_RETA +#define E1000_82542_RSSRK E1000_RSSRK +#define E1000_82542_RSSIM E1000_RSSIM +#define E1000_82542_RSSIR E1000_RSSIR +#define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA +#define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 txerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorcl; + u64 gorch; + u64 gotcl; + u64 gotch; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rlerrc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 torl; + u64 torh; + u64 totl; + u64 toth; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; +}; + +/* Structure containing variables used by the shared code (e1000_hw.c) */ +struct e1000_hw { + u8 __iomem *hw_addr; + u8 __iomem *flash_address; + e1000_mac_type mac_type; + e1000_phy_type phy_type; + u32 phy_init_script; + e1000_media_type media_type; + void *back; + struct e1000_shadow_ram *eeprom_shadow_ram; + u32 flash_bank_size; + u32 flash_base_addr; + e1000_fc_type fc; + e1000_bus_speed bus_speed; + e1000_bus_width bus_width; + e1000_bus_type bus_type; + struct e1000_eeprom_info eeprom; + e1000_ms_type master_slave; + e1000_ms_type original_master_slave; + e1000_ffe_config ffe_config_state; + u32 asf_firmware_present; + u32 eeprom_semaphore_present; + unsigned long io_base; + u32 phy_id; + u32 phy_revision; + u32 phy_addr; + u32 original_fc; + u32 txcw; + u32 autoneg_failed; + u32 max_frame_size; + u32 min_frame_size; + u32 mc_filter_type; + u32 num_mc_addrs; + u32 collision_delta; + u32 tx_packet_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + bool tx_pkt_filtering; + struct e1000_host_mng_dhcp_cookie mng_cookie; + u16 phy_spd_default; + u16 autoneg_advertised; + u16 pci_cmd_word; + u16 fc_high_water; + u16 fc_low_water; + u16 fc_pause_time; + u16 current_ifs_val; + u16 ifs_min_val; + u16 ifs_max_val; + u16 ifs_step_size; + u16 ifs_ratio; + u16 device_id; + u16 vendor_id; + u16 subsystem_id; + u16 subsystem_vendor_id; + u8 revision_id; + u8 autoneg; + u8 mdix; + u8 forced_speed_duplex; + u8 wait_autoneg_complete; + u8 dma_fairness; + u8 mac_addr[NODE_ADDRESS_SIZE]; + u8 perm_mac_addr[NODE_ADDRESS_SIZE]; + bool disable_polarity_correction; + bool speed_downgraded; + e1000_smart_speed smart_speed; + e1000_dsp_config dsp_config_state; + bool get_link_status; + bool serdes_has_link; + bool tbi_compatibility_en; + bool tbi_compatibility_on; + bool laa_is_present; + bool phy_reset_disable; + bool initialize_hw_bits_disable; + bool fc_send_xon; + bool fc_strict_ieee; + bool report_tx_early; + bool adaptive_ifs; + bool ifs_params_forced; + bool in_ifs_mode; + bool mng_reg_access_disabled; + bool leave_av_bit_off; + bool bad_tx_carr_stats_fd; + bool has_smbus; +}; + +#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */ +#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */ +#define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */ +#define E1000_EEPROM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */ +#define E1000_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */ +/* Register Bit Masks */ +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */ +#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */ +#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */ +#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */ +#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */ +#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ +#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */ +#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */ +#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */ + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */ +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */ +#define E1000_STATUS_SPEED_MASK 0x000000C0 +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion + by EEPROM/Flash */ +#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ +#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ +#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */ +#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ +#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ +#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ +#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ +#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */ +#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */ +#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */ +#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */ +#define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution disabled */ +#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */ +#define E1000_STATUS_FUSE_8 0x04000000 +#define E1000_STATUS_FUSE_9 0x08000000 +#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */ +#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */ + +/* Constants used to interpret the masked PCI-X bus speed. */ +#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */ +#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */ +#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */ + +/* EEPROM/Flash Control */ +#define E1000_EECD_SK 0x00000001 /* EEPROM Clock */ +#define E1000_EECD_CS 0x00000002 /* EEPROM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* EEPROM Data In */ +#define E1000_EECD_DO 0x00000008 /* EEPROM Data Out */ +#define E1000_EECD_FWE_MASK 0x00000030 +#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */ +#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */ +#define E1000_EECD_FWE_SHIFT 4 +#define E1000_EECD_REQ 0x00000040 /* EEPROM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* EEPROM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* EEPROM Present */ +#define E1000_EECD_SIZE 0x00000200 /* EEPROM Size (0=64 word 1=256 word) */ +#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type + * (0-small, 1-large) */ +#define E1000_EECD_TYPE 0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */ +#ifndef E1000_EEPROM_GRANT_ATTEMPTS +#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ +#endif +#define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */ +#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */ +#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */ +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ +#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ +#define E1000_EECD_SECVAL_SHIFT 22 +#define E1000_STM_OPCODE 0xDB00 +#define E1000_HICR_FW_RESET 0xC0 + +#define E1000_SHADOW_RAM_WORDS 2048 +#define E1000_ICH_NVM_SIG_WORD 0x13 +#define E1000_ICH_NVM_SIG_MASK 0xC0 + +/* EEPROM Read */ +#define E1000_EERD_START 0x00000001 /* Start Read */ +#define E1000_EERD_DONE 0x00000010 /* Read Done */ +#define E1000_EERD_ADDR_SHIFT 8 +#define E1000_EERD_ADDR_MASK 0x0000FF00 /* Read Address */ +#define E1000_EERD_DATA_SHIFT 16 +#define E1000_EERD_DATA_MASK 0xFFFF0000 /* Read Data */ + +/* SPI EEPROM Status Register */ +#define EEPROM_STATUS_RDY_SPI 0x01 +#define EEPROM_STATUS_WEN_SPI 0x02 +#define EEPROM_STATUS_BP0_SPI 0x04 +#define EEPROM_STATUS_BP1_SPI 0x08 +#define EEPROM_STATUS_WPEN_SPI 0x80 + +/* Extended Device Control */ +#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */ +#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */ +#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN +#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */ +#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */ +#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */ +#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */ +#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA +#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */ +#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */ +#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */ +#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */ +#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ +#define E1000_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7 0=in 1=out */ +#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */ +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ +#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */ +#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ +#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_KMRN 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000 +#define E1000_CTRL_EXT_WR_WMARK_256 0x00000000 +#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 +#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 +#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ +#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ +#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ +#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error detection enabled */ +#define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity error detection enable */ +#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000 + +/* MDI Control */ +#define E1000_MDIC_DATA_MASK 0x0000FFFF +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_INT_EN 0x20000000 +#define E1000_MDIC_ERROR 0x40000000 + +#define INTEL_CE_GBE_MDIC_OP_WRITE 0x04000000 +#define INTEL_CE_GBE_MDIC_OP_READ 0x00000000 +#define INTEL_CE_GBE_MDIC_GO 0x80000000 +#define INTEL_CE_GBE_MDIC_READ_ERROR 0x80000000 + +#define E1000_KUMCTRLSTA_MASK 0x0000FFFF +#define E1000_KUMCTRLSTA_OFFSET 0x001F0000 +#define E1000_KUMCTRLSTA_OFFSET_SHIFT 16 +#define E1000_KUMCTRLSTA_REN 0x00200000 + +#define E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL 0x00000000 +#define E1000_KUMCTRLSTA_OFFSET_CTRL 0x00000001 +#define E1000_KUMCTRLSTA_OFFSET_INB_CTRL 0x00000002 +#define E1000_KUMCTRLSTA_OFFSET_DIAG 0x00000003 +#define E1000_KUMCTRLSTA_OFFSET_TIMEOUTS 0x00000004 +#define E1000_KUMCTRLSTA_OFFSET_INB_PARAM 0x00000009 +#define E1000_KUMCTRLSTA_OFFSET_HD_CTRL 0x00000010 +#define E1000_KUMCTRLSTA_OFFSET_M2P_SERDES 0x0000001E +#define E1000_KUMCTRLSTA_OFFSET_M2P_MODES 0x0000001F + +/* FIFO Control */ +#define E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS 0x00000008 +#define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS 0x00000800 + +/* In-Band Control */ +#define E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT 0x00000500 +#define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING 0x00000010 + +/* Half-Duplex Control */ +#define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004 +#define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT 0x00000000 + +#define E1000_KUMCTRLSTA_OFFSET_K0S_CTRL 0x0000001E + +#define E1000_KUMCTRLSTA_DIAG_FELPBK 0x2000 +#define E1000_KUMCTRLSTA_DIAG_NELPBK 0x1000 + +#define E1000_KUMCTRLSTA_K0S_100_EN 0x2000 +#define E1000_KUMCTRLSTA_K0S_GBE_EN 0x1000 +#define E1000_KUMCTRLSTA_K0S_ENTRY_LATENCY_MASK 0x0003 + +#define E1000_KABGTXD_BGSQLBIAS 0x00050000 + +#define E1000_PHY_CTRL_SPD_EN 0x00000001 +#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 +#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 +#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 +#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 +#define E1000_PHY_CTRL_B2B_EN 0x00000080 + +/* LED Control */ +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_BLINK_RATE 0x0000020 +#define E1000_LEDCTL_LED0_IVRT 0x00000040 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 +#define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00 +#define E1000_LEDCTL_LED1_MODE_SHIFT 8 +#define E1000_LEDCTL_LED1_BLINK_RATE 0x0002000 +#define E1000_LEDCTL_LED1_IVRT 0x00004000 +#define E1000_LEDCTL_LED1_BLINK 0x00008000 +#define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000 +#define E1000_LEDCTL_LED2_MODE_SHIFT 16 +#define E1000_LEDCTL_LED2_BLINK_RATE 0x00200000 +#define E1000_LEDCTL_LED2_IVRT 0x00400000 +#define E1000_LEDCTL_LED2_BLINK 0x00800000 +#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000 +#define E1000_LEDCTL_LED3_MODE_SHIFT 24 +#define E1000_LEDCTL_LED3_BLINK_RATE 0x20000000 +#define E1000_LEDCTL_LED3_IVRT 0x40000000 +#define E1000_LEDCTL_LED3_BLINK 0x80000000 + +#define E1000_LEDCTL_MODE_LINK_10_1000 0x0 +#define E1000_LEDCTL_MODE_LINK_100_1000 0x1 +#define E1000_LEDCTL_MODE_LINK_UP 0x2 +#define E1000_LEDCTL_MODE_ACTIVITY 0x3 +#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4 +#define E1000_LEDCTL_MODE_LINK_10 0x5 +#define E1000_LEDCTL_MODE_LINK_100 0x6 +#define E1000_LEDCTL_MODE_LINK_1000 0x7 +#define E1000_LEDCTL_MODE_PCIX_MODE 0x8 +#define E1000_LEDCTL_MODE_FULL_DUPLEX 0x9 +#define E1000_LEDCTL_MODE_COLLISION 0xA +#define E1000_LEDCTL_MODE_BUS_SPEED 0xB +#define E1000_LEDCTL_MODE_BUS_SIZE 0xC +#define E1000_LEDCTL_MODE_PAUSED 0xD +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Receive Address */ +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ +#define E1000_ICR_RXO 0x00000040 /* rx overrun */ +#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ +#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */ +#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */ +#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ +#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ +#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ +#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ +#define E1000_ICR_TXD_LOW 0x00008000 +#define E1000_ICR_SRPD 0x00010000 +#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */ +#define E1000_ICR_MNG 0x00040000 /* Manageability event */ +#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ +#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */ +#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */ +#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */ +#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */ +#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */ +#define E1000_ICR_EPRST 0x00100000 /* ME hardware reset occurs */ + +/* Interrupt Cause Set */ +#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_ICS_SRPD E1000_ICR_SRPD +#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICS_DSW E1000_ICR_DSW +#define E1000_ICS_PHYINT E1000_ICR_PHYINT +#define E1000_ICS_EPRST E1000_ICR_EPRST + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMS_SRPD E1000_ICR_SRPD +#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMS_DSW E1000_ICR_DSW +#define E1000_IMS_PHYINT E1000_ICR_PHYINT +#define E1000_IMS_EPRST E1000_ICR_EPRST + +/* Interrupt Mask Clear */ +#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMC_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMC_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMC_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMC_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMC_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMC_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMC_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMC_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_IMC_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMC_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMC_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMC_SRPD E1000_ICR_SRPD +#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMC_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMC_DSW E1000_ICR_DSW +#define E1000_IMC_PHYINT E1000_ICR_PHYINT +#define E1000_IMC_EPRST E1000_ICR_EPRST + +/* Receive Control */ +#define E1000_RCTL_RST 0x00000001 /* Software reset */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */ +#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */ +#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */ +#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ +#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ +#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */ +#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */ +#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ +#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */ +#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */ +#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ +#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */ +#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */ + +/* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SW_W_SYNC definitions */ +#define E1000_SWFW_EEP_SM 0x0001 +#define E1000_SWFW_PHY0_SM 0x0002 +#define E1000_SWFW_PHY1_SM 0x0004 +#define E1000_SWFW_MAC_CSR_SM 0x0008 + +/* Receive Descriptor */ +#define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */ +#define E1000_RDT_FPDB 0x80000000 /* Flush descriptor block */ +#define E1000_RDLEN_LEN 0x0007ff80 /* descriptor length */ +#define E1000_RDH_RDH 0x0000ffff /* receive descriptor head */ +#define E1000_RDT_RDT 0x0000ffff /* receive descriptor tail */ + +/* Flow Control */ +#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ +#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */ +#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +/* Header split receive */ +#define E1000_RFCTL_ISCSI_DIS 0x00000001 +#define E1000_RFCTL_ISCSI_DWC_MASK 0x0000003E +#define E1000_RFCTL_ISCSI_DWC_SHIFT 1 +#define E1000_RFCTL_NFSW_DIS 0x00000040 +#define E1000_RFCTL_NFSR_DIS 0x00000080 +#define E1000_RFCTL_NFS_VER_MASK 0x00000300 +#define E1000_RFCTL_NFS_VER_SHIFT 8 +#define E1000_RFCTL_IPV6_DIS 0x00000400 +#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800 +#define E1000_RFCTL_ACK_DIS 0x00001000 +#define E1000_RFCTL_ACKD_DIS 0x00002000 +#define E1000_RFCTL_IPFRSP_DIS 0x00004000 +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Receive Descriptor Control */ +#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */ +#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */ +#define E1000_RXDCTL_WTHRESH 0x003F0000 /* RXDCTL Writeback Threshold */ +#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */ + +/* Transmit Descriptor Control */ +#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ +#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ +#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ +#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ +#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */ +#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ +#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. + still to be processed. */ +/* Transmit Configuration Word */ +#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ +#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ +#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */ +#define E1000_TXCW_NP 0x00008000 /* TXCW next page */ +#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */ +#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */ +#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ + +/* Receive Configuration Word */ +#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ +#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */ +#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ +#define E1000_RXCW_CC 0x10000000 /* Receive config change */ +#define E1000_RXCW_C 0x20000000 /* Receive config */ +#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ +#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */ + +/* Transmit Control */ +#define E1000_TCTL_RST 0x00000001 /* software reset */ +#define E1000_TCTL_EN 0x00000002 /* enable tx */ +#define E1000_TCTL_BCE 0x00000004 /* busy check enable */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */ +#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ +/* Extended Transmit Control */ +#define E1000_TCTL_EXT_BST_MASK 0x000003FF /* Backoff Slot Time */ +#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ + +/* Receive Checksum Control */ +#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ +#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */ +#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* Multiple Receive Queue Control */ +#define E1000_MRQC_ENABLE_MASK 0x00000003 +#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001 +#define E1000_MRQC_ENABLE_RSS_INT 0x00000004 +#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_APME 0x00000001 /* APM Enable */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ +#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ +#define E1000_WUC_SPM 0x80000000 /* Enable SPM */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ +#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ +#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ +#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ +#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ +#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ +#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */ +#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ +#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ + +/* Wake Up Status */ +#define E1000_WUS_LNKC 0x00000001 /* Link Status Changed */ +#define E1000_WUS_MAG 0x00000002 /* Magic Packet Received */ +#define E1000_WUS_EX 0x00000004 /* Directed Exact Received */ +#define E1000_WUS_MC 0x00000008 /* Directed Multicast Received */ +#define E1000_WUS_BC 0x00000010 /* Broadcast Received */ +#define E1000_WUS_ARP 0x00000020 /* ARP Request Packet Received */ +#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Received */ +#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Received */ +#define E1000_WUS_FLX0 0x00010000 /* Flexible Filter 0 Match */ +#define E1000_WUS_FLX1 0x00020000 /* Flexible Filter 1 Match */ +#define E1000_WUS_FLX2 0x00040000 /* Flexible Filter 2 Match */ +#define E1000_WUS_FLX3 0x00080000 /* Flexible Filter 3 Match */ +#define E1000_WUS_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */ +#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */ +#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */ +#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */ +#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */ +#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */ +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ +#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery + * Filtering */ +#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */ +#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ +#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address + * filtering */ +#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host + * memory */ +#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address + * filtering */ +#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */ +#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */ +#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ +#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ +#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ +#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */ +#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */ +#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */ + +#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */ +#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */ + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ + +/* FW Semaphore Register */ +#define E1000_FWSM_MODE_MASK 0x0000000E /* FW mode */ +#define E1000_FWSM_MODE_SHIFT 1 +#define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */ + +#define E1000_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI reset */ +#define E1000_FWSM_DISSW 0x10000000 /* FW disable SW Write Access */ +#define E1000_FWSM_SKUSEL_MASK 0x60000000 /* LAN SKU select */ +#define E1000_FWSM_SKUEL_SHIFT 29 +#define E1000_FWSM_SKUSEL_EMB 0x0 /* Embedded SKU */ +#define E1000_FWSM_SKUSEL_CONS 0x1 /* Consumer SKU */ +#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */ +#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */ + +/* FFLT Debug Register */ +#define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */ + +typedef enum { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_interface_only +} e1000_mng_mode; + +/* Host Interface Control Register */ +#define E1000_HICR_EN 0x00000001 /* Enable Bit - RO */ +#define E1000_HICR_C 0x00000002 /* Driver sets this bit when done + * to put command in RAM */ +#define E1000_HICR_SV 0x00000004 /* Status Validity */ +#define E1000_HICR_FWR 0x00000080 /* FW reset. Set by the Host */ + +/* Host Interface Command Interface - Address range 0x8800-0x8EFF */ +#define E1000_HI_MAX_DATA_LENGTH 252 /* Host Interface data length */ +#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Number of bytes in range */ +#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Number of dwords in range */ +#define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */ + +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; /* I/F bits for command, status for return */ + u8 checksum; +}; +struct e1000_host_command_info { + struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */ +}; + +/* Host SMB register #0 */ +#define E1000_HSMC0R_CLKIN 0x00000001 /* SMB Clock in */ +#define E1000_HSMC0R_DATAIN 0x00000002 /* SMB Data in */ +#define E1000_HSMC0R_DATAOUT 0x00000004 /* SMB Data out */ +#define E1000_HSMC0R_CLKOUT 0x00000008 /* SMB Clock out */ + +/* Host SMB register #1 */ +#define E1000_HSMC1R_CLKIN E1000_HSMC0R_CLKIN +#define E1000_HSMC1R_DATAIN E1000_HSMC0R_DATAIN +#define E1000_HSMC1R_DATAOUT E1000_HSMC0R_DATAOUT +#define E1000_HSMC1R_CLKOUT E1000_HSMC0R_CLKOUT + +/* FW Status Register */ +#define E1000_FWSTS_FWS_MASK 0x000000FF /* FW Status */ + +/* Wake Up Packet Length */ +#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */ + +#define E1000_MDALIGN 4096 + +/* PCI-Ex registers*/ + +/* PCI-Ex Control Register */ +#define E1000_GCR_RXD_NO_SNOOP 0x00000001 +#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 +#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 +#define E1000_GCR_TXD_NO_SNOOP 0x00000008 +#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 +#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 + +#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ + E1000_GCR_RXDSCW_NO_SNOOP | \ + E1000_GCR_RXDSCR_NO_SNOOP | \ + E1000_GCR_TXD_NO_SNOOP | \ + E1000_GCR_TXDSCW_NO_SNOOP | \ + E1000_GCR_TXDSCR_NO_SNOOP) + +#define PCI_EX_82566_SNOOP_ALL PCI_EX_NO_SNOOP_ALL + +#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 +/* Function Active and Power State to MNG */ +#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003 +#define E1000_FACTPS_LAN0_VALID 0x00000004 +#define E1000_FACTPS_FUNC0_AUX_EN 0x00000008 +#define E1000_FACTPS_FUNC1_POWER_STATE_MASK 0x000000C0 +#define E1000_FACTPS_FUNC1_POWER_STATE_SHIFT 6 +#define E1000_FACTPS_LAN1_VALID 0x00000100 +#define E1000_FACTPS_FUNC1_AUX_EN 0x00000200 +#define E1000_FACTPS_FUNC2_POWER_STATE_MASK 0x00003000 +#define E1000_FACTPS_FUNC2_POWER_STATE_SHIFT 12 +#define E1000_FACTPS_IDE_ENABLE 0x00004000 +#define E1000_FACTPS_FUNC2_AUX_EN 0x00008000 +#define E1000_FACTPS_FUNC3_POWER_STATE_MASK 0x000C0000 +#define E1000_FACTPS_FUNC3_POWER_STATE_SHIFT 18 +#define E1000_FACTPS_SP_ENABLE 0x00100000 +#define E1000_FACTPS_FUNC3_AUX_EN 0x00200000 +#define E1000_FACTPS_FUNC4_POWER_STATE_MASK 0x03000000 +#define E1000_FACTPS_FUNC4_POWER_STATE_SHIFT 24 +#define E1000_FACTPS_IPMI_ENABLE 0x04000000 +#define E1000_FACTPS_FUNC4_AUX_EN 0x08000000 +#define E1000_FACTPS_MNGCG 0x20000000 +#define E1000_FACTPS_LAN_FUNC_SEL 0x40000000 +#define E1000_FACTPS_PM_STATE_CHANGED 0x80000000 + +/* PCI-Ex Config Space */ +#define PCI_EX_LINK_STATUS 0x12 +#define PCI_EX_LINK_WIDTH_MASK 0x3F0 +#define PCI_EX_LINK_WIDTH_SHIFT 4 + +/* EEPROM Commands - Microwire */ +#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */ +#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7 /* EEPROM erase opcode */ +#define EEPROM_EWEN_OPCODE_MICROWIRE 0x13 /* EEPROM erase/write enable */ +#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erase/write disable */ + +/* EEPROM Commands - SPI */ +#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +#define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Enable latch */ +#define EEPROM_WRDI_OPCODE_SPI 0x04 /* EEPROM reset Write Enable latch */ +#define EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status register */ +#define EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status register */ +#define EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +#define EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +#define EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ + +/* EEPROM Size definitions */ +#define EEPROM_WORD_SIZE_SHIFT 6 +#define EEPROM_SIZE_SHIFT 10 +#define EEPROM_SIZE_MASK 0x1C00 + +/* EEPROM Word Offsets */ +#define EEPROM_COMPAT 0x0003 +#define EEPROM_ID_LED_SETTINGS 0x0004 +#define EEPROM_VERSION 0x0005 +#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */ +#define EEPROM_PHY_CLASS_WORD 0x0007 +#define EEPROM_INIT_CONTROL1_REG 0x000A +#define EEPROM_INIT_CONTROL2_REG 0x000F +#define EEPROM_SWDEF_PINS_CTRL_PORT_1 0x0010 +#define EEPROM_INIT_CONTROL3_PORT_B 0x0014 +#define EEPROM_INIT_3GIO_3 0x001A +#define EEPROM_SWDEF_PINS_CTRL_PORT_0 0x0020 +#define EEPROM_INIT_CONTROL3_PORT_A 0x0024 +#define EEPROM_CFG 0x0012 +#define EEPROM_FLASH_VERSION 0x0032 +#define EEPROM_CHECKSUM_REG 0x003F + +#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */ +#define E1000_EEPROM_CFG_DONE_PORT_1 0x00080000 /* ...for second port */ + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* Mask bits for SERDES amplitude adjustment in Word 6 of the EEPROM */ +#define EEPROM_SERDES_AMPLITUDE_MASK 0x000F + +/* Mask bit for PHY class in Word 7 of the EEPROM */ +#define EEPROM_PHY_CLASS_A 0x8000 + +/* Mask bits for fields in Word 0x0a of the EEPROM */ +#define EEPROM_WORD0A_ILOS 0x0010 +#define EEPROM_WORD0A_SWDPIO 0x01E0 +#define EEPROM_WORD0A_LRST 0x0200 +#define EEPROM_WORD0A_FD 0x0400 +#define EEPROM_WORD0A_66MHZ 0x0800 + +/* Mask bits for fields in Word 0x0f of the EEPROM */ +#define EEPROM_WORD0F_PAUSE_MASK 0x3000 +#define EEPROM_WORD0F_PAUSE 0x1000 +#define EEPROM_WORD0F_ASM_DIR 0x2000 +#define EEPROM_WORD0F_ANE 0x0800 +#define EEPROM_WORD0F_SWPDIO_EXT 0x00F0 +#define EEPROM_WORD0F_LPLU 0x0001 + +/* Mask bits for fields in Word 0x10/0x20 of the EEPROM */ +#define EEPROM_WORD1020_GIGA_DISABLE 0x0010 +#define EEPROM_WORD1020_GIGA_DISABLE_NON_D0A 0x0008 + +/* Mask bits for fields in Word 0x1a of the EEPROM */ +#define EEPROM_WORD1A_ASPM_MASK 0x000C + +/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */ +#define EEPROM_SUM 0xBABA + +/* EEPROM Map defines (WORD OFFSETS)*/ +#define EEPROM_NODE_ADDRESS_BYTE_0 0 +#define EEPROM_PBA_BYTE_1 8 + +#define EEPROM_RESERVED_WORD 0xFFFF + +/* EEPROM Map Sizes (Byte Counts) */ +#define PBA_SIZE 4 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +/* Collision distance is a 0-based value that applies to + * half-duplex-capable hardware only. */ +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLLISION_DISTANCE_82542 64 +#define E1000_FDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE +#define E1000_HDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE +#define E1000_COLD_SHIFT 12 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Default values for the transmit IPG register */ +#define DEFAULT_82542_TIPG_IPGT 10 +#define DEFAULT_82543_TIPG_IPGT_FIBER 9 +#define DEFAULT_82543_TIPG_IPGT_COPPER 8 + +#define E1000_TIPG_IPGT_MASK 0x000003FF +#define E1000_TIPG_IPGR1_MASK 0x000FFC00 +#define E1000_TIPG_IPGR2_MASK 0x3FF00000 + +#define DEFAULT_82542_TIPG_IPGR1 2 +#define DEFAULT_82543_TIPG_IPGR1 8 +#define E1000_TIPG_IPGR1_SHIFT 10 + +#define DEFAULT_82542_TIPG_IPGR2 10 +#define DEFAULT_82543_TIPG_IPGR2 6 +#define E1000_TIPG_IPGR2_SHIFT 20 + +#define E1000_TXDMAC_DPP 0x00000001 + +/* Adaptive IFS defines */ +#define TX_THRESHOLD_START 8 +#define TX_THRESHOLD_INCREMENT 10 +#define TX_THRESHOLD_DECREMENT 1 +#define TX_THRESHOLD_STOP 190 +#define TX_THRESHOLD_DISABLE 0 +#define TX_THRESHOLD_TIMER_MS 10000 +#define MIN_NUM_XMITS 1000 +#define IFS_MAX 80 +#define IFS_STEP 10 +#define IFS_MIN 40 +#define IFS_RATIO 4 + +/* Extended Configuration Control and Size */ +#define E1000_EXTCNF_CTRL_PCIE_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_PHY_WRITE_ENABLE 0x00000002 +#define E1000_EXTCNF_CTRL_D_UD_ENABLE 0x00000004 +#define E1000_EXTCNF_CTRL_D_UD_LATENCY 0x00000008 +#define E1000_EXTCNF_CTRL_D_UD_OWNER 0x00000010 +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 +#define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x0FFF0000 + +#define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF +#define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000 +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 + +/* PBA constants */ +#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */ +#define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */ +#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ +#define E1000_PBA_20K 0x0014 +#define E1000_PBA_22K 0x0016 +#define E1000_PBA_24K 0x0018 +#define E1000_PBA_30K 0x001E +#define E1000_PBA_32K 0x0020 +#define E1000_PBA_34K 0x0022 +#define E1000_PBA_38K 0x0026 +#define E1000_PBA_40K 0x0028 +#define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */ + +#define E1000_PBS_16K E1000_PBA_16K + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* The historical defaults for the flow control values are given below. */ +#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */ +#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */ +#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */ + +/* PCIX Config space */ +#define PCIX_COMMAND_REGISTER 0xE6 +#define PCIX_STATUS_REGISTER_LO 0xE8 +#define PCIX_STATUS_REGISTER_HI 0xEA + +#define PCIX_COMMAND_MMRBC_MASK 0x000C +#define PCIX_COMMAND_MMRBC_SHIFT 0x2 +#define PCIX_STATUS_HI_MMRBC_MASK 0x0060 +#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5 +#define PCIX_STATUS_HI_MMRBC_4K 0x3 +#define PCIX_STATUS_HI_MMRBC_2K 0x2 + +/* Number of bits required to shift right the "pause" bits from the + * EEPROM (bits 13:12) to the "pause" (bits 8:7) field in the TXCW register. + */ +#define PAUSE_SHIFT 5 + +/* Number of bits required to shift left the "SWDPIO" bits from the + * EEPROM (bits 8:5) to the "SWDPIO" (bits 25:22) field in the CTRL register. + */ +#define SWDPIO_SHIFT 17 + +/* Number of bits required to shift left the "SWDPIO_EXT" bits from the + * EEPROM word F (bits 7:4) to the bits 11:8 of The Extended CTRL register. + */ +#define SWDPIO__EXT_SHIFT 4 + +/* Number of bits required to shift left the "ILOS" bit from the EEPROM + * (bit 4) to the "ILOS" (bit 7) field in the CTRL register. + */ +#define ILOS_SHIFT 3 + +#define RECEIVE_BUFFER_ALIGN_SIZE (256) + +/* Number of milliseconds we wait for auto-negotiation to complete */ +#define LINK_UP_TIMEOUT 500 + +/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */ +#define AUTO_READ_DONE_TIMEOUT 10 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 + +#define E1000_TX_BUFFER_SIZE ((u32)1514) + +/* The carrier extension symbol, as received by the NIC. */ +#define CARRIER_EXTENSION 0x0F + +/* TBI_ACCEPT macro definition: + * + * This macro requires: + * adapter = a pointer to struct e1000_hw + * status = the 8 bit status field of the RX descriptor with EOP set + * error = the 8 bit error field of the RX descriptor with EOP set + * length = the sum of all the length fields of the RX descriptors that + * make up the current frame + * last_byte = the last byte of the frame DMAed by the hardware + * max_frame_length = the maximum frame length we want to accept. + * min_frame_length = the minimum frame length we want to accept. + * + * This macro is a conditional that should be used in the interrupt + * handler's Rx processing routine when RxErrors have been detected. + * + * Typical use: + * ... + * if (TBI_ACCEPT) { + * accept_frame = true; + * e1000_tbi_adjust_stats(adapter, MacAddress); + * frame_length--; + * } else { + * accept_frame = false; + * } + * ... + */ + +#define TBI_ACCEPT(adapter, status, errors, length, last_byte) \ + ((adapter)->tbi_compatibility_on && \ + (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \ + ((last_byte) == CARRIER_EXTENSION) && \ + (((status) & E1000_RXD_STAT_VP) ? \ + (((length) > ((adapter)->min_frame_size - VLAN_TAG_SIZE)) && \ + ((length) <= ((adapter)->max_frame_size + 1))) : \ + (((length) > (adapter)->min_frame_size) && \ + ((length) <= ((adapter)->max_frame_size + VLAN_TAG_SIZE + 1))))) + +/* Structures, enums, and macros for the PHY */ + +/* Bit definitions for the Management Data IO (MDIO) and Management Data + * Clock (MDC) pins in the Device Control Register. + */ +#define E1000_CTRL_PHY_RESET_DIR E1000_CTRL_SWDPIO0 +#define E1000_CTRL_PHY_RESET E1000_CTRL_SWDPIN0 +#define E1000_CTRL_MDIO_DIR E1000_CTRL_SWDPIO2 +#define E1000_CTRL_MDIO E1000_CTRL_SWDPIN2 +#define E1000_CTRL_MDC_DIR E1000_CTRL_SWDPIO3 +#define E1000_CTRL_MDC E1000_CTRL_SWDPIN3 +#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR +#define E1000_CTRL_PHY_RESET4 E1000_CTRL_EXT_SDP4_DATA + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CTRL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ +#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */ +#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ +#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ + +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF /* Registers equal on all pages */ + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Register */ +#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Register */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ +#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */ + +#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */ +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ +#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */ +#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */ + +#define IGP01E1000_IEEE_REGS_PAGE 0x0000 +#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300 +#define IGP01E1000_IEEE_FORCE_GIGA 0x0140 + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* PHY Specific Port Config Register */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* PHY Specific Status Register */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* PHY Specific Control Register */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */ +#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */ +#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */ + +/* IGP01E1000 AGC Registers - stores the cable length values*/ +#define IGP01E1000_PHY_AGC_A 0x1172 +#define IGP01E1000_PHY_AGC_B 0x1272 +#define IGP01E1000_PHY_AGC_C 0x1472 +#define IGP01E1000_PHY_AGC_D 0x1872 + +/* IGP02E1000 AGC Registers for cable length values */ +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 + +/* IGP01E1000 DSP Reset Register */ +#define IGP01E1000_PHY_DSP_RESET 0x1F33 +#define IGP01E1000_PHY_DSP_SET 0x1F71 +#define IGP01E1000_PHY_DSP_FFE 0x1F35 + +#define IGP01E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_CHANNEL_NUM 4 + +#define IGP01E1000_PHY_AGC_PARAM_A 0x1171 +#define IGP01E1000_PHY_AGC_PARAM_B 0x1271 +#define IGP01E1000_PHY_AGC_PARAM_C 0x1471 +#define IGP01E1000_PHY_AGC_PARAM_D 0x1871 + +#define IGP01E1000_PHY_EDAC_MU_INDEX 0xC000 +#define IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS 0x8000 + +#define IGP01E1000_PHY_ANALOG_TX_STATE 0x2890 +#define IGP01E1000_PHY_ANALOG_CLASS_A 0x2000 +#define IGP01E1000_PHY_FORCE_ANALOG_ENABLE 0x0004 +#define IGP01E1000_PHY_DSP_FFE_CM_CP 0x0069 + +#define IGP01E1000_PHY_DSP_FFE_DEFAULT 0x002A +/* IGP01E1000 PCS Initialization register - stores the polarity status when + * speed = 1000 Mbps. */ +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_PCS_CTRL_REG 0x00B5 + +#define IGP01E1000_ANALOG_REGS_PAGE 0x20C0 + +/* PHY Control Register */ +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ + +/* PHY Status Register */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ +#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ +#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ +#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP is 10T Half Duplex Capable */ +#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP is 10T Full Duplex Capable */ +#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP is 100TX Half Duplex Capable */ +#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP is 100TX Full Duplex Capable */ +#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ +#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP has detected Remote Fault */ +#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP has rx'd link code word */ +#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Autoneg Expansion Register */ +#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ +#define NWAY_ER_PAGE_RXD 0x0002 /* LP is 10T Half Duplex Capable */ +#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP is 10T Full Duplex Capable */ +#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */ +#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP is 100TX Full Duplex Capable */ + +/* Next Page TX Register */ +#define NPTX_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ +#define NPTX_TOGGLE 0x0800 /* Toggles between exchanges + * of different NP + */ +#define NPTX_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg + * 0 = cannot comply with msg + */ +#define NPTX_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ +#define NPTX_NEXT_PAGE 0x8000 /* 1 = addition NP will follow + * 0 = sending last NP + */ + +/* Link Partner Next Page Register */ +#define LP_RNPR_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ +#define LP_RNPR_TOGGLE 0x0800 /* Toggles between exchanges + * of different NP + */ +#define LP_RNPR_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg + * 0 = cannot comply with msg + */ +#define LP_RNPR_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ +#define LP_RNPR_ACKNOWLDGE 0x4000 /* 1 = ACK / 0 = NO ACK */ +#define LP_RNPR_NEXT_PAGE 0x8000 /* 1 = addition NP will follow + * 0 = sending last NP + */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +#define CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */ + /* 0=DTE device */ +#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ + /* 0=Configure PHY as Slave */ +#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ + /* 0=Automatic Master/Slave config */ +#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ +#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ +#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */ +#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */ +#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ +#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ +#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */ +#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ +#define SR_1000T_REMOTE_RX_STATUS_SHIFT 12 +#define SR_1000T_LOCAL_RX_STATUS_SHIFT 13 +#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_20 20 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100 + +/* Extended Status Register */ +#define IEEE_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */ +#define IEEE_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */ +#define IEEE_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */ +#define IEEE_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */ + +#define PHY_TX_POLARITY_MASK 0x0100 /* register 10h bit 8 (polarity bit) */ +#define PHY_TX_NORMAL_POLARITY 0 /* register 10h bit 8 (normal polarity) */ + +#define AUTO_POLARITY_DISABLE 0x0010 /* register 11h bit 4 */ + /* (0=enable, 1=disable) */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ +#define M88E1000_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low, + * 0=CLK125 toggling + */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ + /* Manual MDI configuration */ +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, + * 100BASE-TX/10BASE-T: + * MDI Mode + */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled + * all speeds. + */ +#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE 0x0080 + /* 1=Enable Extended 10BASE-T distance + * (Lower 10BASE-T RX Threshold) + * 0=Normal 10BASE-T RX Threshold */ +#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100 + /* 1=5-Bit interface in 100BASE-TX + * 0=MII interface in 100BASE-TX */ +#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */ +#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ + +#define M88E1000_PSCR_POLARITY_REVERSAL_SHIFT 1 +#define M88E1000_PSCR_AUTO_X_MODE_SHIFT 5 +#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7 + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M; + * 3=110-140M;4=>140M */ +#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ +#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */ +#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */ +#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_REV_POLARITY_SHIFT 1 +#define M88E1000_PSSR_DOWNSHIFT_SHIFT 5 +#define M88E1000_PSSR_MDIX_SHIFT 6 +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* M88E1000 Extended PHY Specific Control Register */ +#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */ +#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000 /* 1=Lost lock detect enabled. + * Will assert lost lock and bring + * link down if idle not seen + * within 1ms in 1000BASE-T + */ +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X 0x0400 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X 0x0800 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X 0x0C00 +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the slave */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS 0x0000 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X 0x0200 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X 0x0300 +#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */ + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X 0x0000 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X 0x0200 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X 0x0400 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X 0x0600 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X 0x0A00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X 0x0C00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X 0x0E00 + +/* IGP01E1000 Specific Port Config Register - R/W */ +#define IGP01E1000_PSCFR_AUTO_MDIX_PAR_DETECT 0x0010 +#define IGP01E1000_PSCFR_PRE_EN 0x0020 +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 +#define IGP01E1000_PSCFR_DISABLE_TPLOOPBACK 0x0100 +#define IGP01E1000_PSCFR_DISABLE_JABBER 0x0400 +#define IGP01E1000_PSCFR_DISABLE_TRANSMIT 0x2000 + +/* IGP01E1000 Specific Port Status Register - R/O */ +#define IGP01E1000_PSSR_AUTONEG_FAILED 0x0001 /* RO LH SC */ +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_CABLE_LENGTH 0x007C +#define IGP01E1000_PSSR_FULL_DUPLEX 0x0200 +#define IGP01E1000_PSSR_LINK_UP 0x0400 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 /* speed bits mask */ +#define IGP01E1000_PSSR_SPEED_10MBPS 0x4000 +#define IGP01E1000_PSSR_SPEED_100MBPS 0x8000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 +#define IGP01E1000_PSSR_CABLE_LENGTH_SHIFT 0x0002 /* shift right 2 */ +#define IGP01E1000_PSSR_MDIX_SHIFT 0x000B /* shift right 11 */ + +/* IGP01E1000 Specific Port Control Register - R/W */ +#define IGP01E1000_PSCR_TP_LOOPBACK 0x0010 +#define IGP01E1000_PSCR_CORRECT_NC_SCMBLR 0x0200 +#define IGP01E1000_PSCR_TEN_CRS_SELECT 0x0400 +#define IGP01E1000_PSCR_FLIP_CHIP 0x0800 +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0-MDI, 1-MDIX */ + +/* IGP01E1000 Specific Port Link Health Register */ +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 +#define IGP01E1000_PLHR_GIG_SCRAMBLER_ERROR 0x4000 +#define IGP01E1000_PLHR_MASTER_FAULT 0x2000 +#define IGP01E1000_PLHR_MASTER_RESOLUTION 0x1000 +#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK 0x0800 /* LH */ +#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW 0x0400 /* LH */ +#define IGP01E1000_PLHR_DATA_ERR_1 0x0200 /* LH */ +#define IGP01E1000_PLHR_DATA_ERR_0 0x0100 +#define IGP01E1000_PLHR_AUTONEG_FAULT 0x0040 +#define IGP01E1000_PLHR_AUTONEG_ACTIVE 0x0010 +#define IGP01E1000_PLHR_VALID_CHANNEL_D 0x0008 +#define IGP01E1000_PLHR_VALID_CHANNEL_C 0x0004 +#define IGP01E1000_PLHR_VALID_CHANNEL_B 0x0002 +#define IGP01E1000_PLHR_VALID_CHANNEL_A 0x0001 + +/* IGP01E1000 Channel Quality Register */ +#define IGP01E1000_MSE_CHANNEL_D 0x000F +#define IGP01E1000_MSE_CHANNEL_C 0x00F0 +#define IGP01E1000_MSE_CHANNEL_B 0x0F00 +#define IGP01E1000_MSE_CHANNEL_A 0xF000 + +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* Enable LPLU in non-D0a modes */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* Enable LPLU in D0a mode */ + +/* IGP01E1000 DSP reset macros */ +#define DSP_RESET_ENABLE 0x0 +#define DSP_RESET_DISABLE 0x2 +#define E1000_MAX_DSP_RESETS 10 + +/* IGP01E1000 & IGP02E1000 AGC Registers */ + +#define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */ +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Coarse - 15:13, Fine - 12:9 */ + +/* IGP02E1000 AGC Register Length 9-bit mask */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F + +/* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */ +#define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128 +#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 113 + +/* The precision error of the cable length is +/- 10 meters */ +#define IGP01E1000_AGC_RANGE 10 +#define IGP02E1000_AGC_RANGE 15 + +/* IGP01E1000 PCS Initialization register */ +/* bits 3:6 in the PCS registers stores the channels polarity */ +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 + +/* IGP01E1000 GMII FIFO Register */ +#define IGP01E1000_GMII_FLEX_SPD 0x10 /* Enable flexible speed + * on Link-Up */ +#define IGP01E1000_GMII_SPD 0x20 /* Enable SPD */ + +/* IGP01E1000 Analog Register */ +#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS 0x20D1 +#define IGP01E1000_ANALOG_FUSE_STATUS 0x20D0 +#define IGP01E1000_ANALOG_FUSE_CONTROL 0x20DC +#define IGP01E1000_ANALOG_FUSE_BYPASS 0x20DE + +#define IGP01E1000_ANALOG_FUSE_POLY_MASK 0xF000 +#define IGP01E1000_ANALOG_FUSE_FINE_MASK 0x0F80 +#define IGP01E1000_ANALOG_FUSE_COARSE_MASK 0x0070 +#define IGP01E1000_ANALOG_SPARE_FUSE_ENABLED 0x0100 +#define IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL 0x0002 + +#define IGP01E1000_ANALOG_FUSE_COARSE_THRESH 0x0040 +#define IGP01E1000_ANALOG_FUSE_COARSE_10 0x0010 +#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080 +#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500 + +/* Bit definitions for valid PHY IDs. */ +/* I = Integrated + * E = External + */ +#define M88_VENDOR 0x0141 +#define M88E1000_E_PHY_ID 0x01410C50 +#define M88E1000_I_PHY_ID 0x01410C30 +#define M88E1011_I_PHY_ID 0x01410C20 +#define IGP01E1000_I_PHY_ID 0x02A80380 +#define M88E1000_12_PHY_ID M88E1000_E_PHY_ID +#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID +#define M88E1011_I_REV_4 0x04 +#define M88E1111_I_PHY_ID 0x01410CC0 +#define M88E1118_E_PHY_ID 0x01410E40 +#define L1LXT971A_PHY_ID 0x001378E0 + +#define RTL8211B_PHY_ID 0x001CC910 +#define RTL8201N_PHY_ID 0x8200 +#define RTL_PHY_CTRL_FD 0x0100 /* Full duplex.0=half; 1=full */ +#define RTL_PHY_CTRL_SPD_100 0x200000 /* Force 100Mb */ + +/* Bits... + * 15-5: page + * 4-0: register offset + */ +#define PHY_PAGE_SHIFT 5 +#define PHY_REG(page, reg) \ + (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) + +#define IGP3_PHY_PORT_CTRL \ + PHY_REG(769, 17) /* Port General Configuration */ +#define IGP3_PHY_RATE_ADAPT_CTRL \ + PHY_REG(769, 25) /* Rate Adapter Control Register */ + +#define IGP3_KMRN_FIFO_CTRL_STATS \ + PHY_REG(770, 16) /* KMRN FIFO's control/status register */ +#define IGP3_KMRN_POWER_MNG_CTRL \ + PHY_REG(770, 17) /* KMRN Power Management Control Register */ +#define IGP3_KMRN_INBAND_CTRL \ + PHY_REG(770, 18) /* KMRN Inband Control Register */ +#define IGP3_KMRN_DIAG \ + PHY_REG(770, 19) /* KMRN Diagnostic register */ +#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */ +#define IGP3_KMRN_ACK_TIMEOUT \ + PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */ + +#define IGP3_VR_CTRL \ + PHY_REG(776, 18) /* Voltage regulator control register */ +#define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */ +#define IGP3_VR_CTRL_MODE_MASK 0x0300 /* Shutdown VR Mask */ + +#define IGP3_CAPABILITY \ + PHY_REG(776, 19) /* IGP3 Capability Register */ + +/* Capabilities for SKU Control */ +#define IGP3_CAP_INITIATE_TEAM 0x0001 /* Able to initiate a team */ +#define IGP3_CAP_WFM 0x0002 /* Support WoL and PXE */ +#define IGP3_CAP_ASF 0x0004 /* Support ASF */ +#define IGP3_CAP_LPLU 0x0008 /* Support Low Power Link Up */ +#define IGP3_CAP_DC_AUTO_SPEED 0x0010 /* Support AC/DC Auto Link Speed */ +#define IGP3_CAP_SPD 0x0020 /* Support Smart Power Down */ +#define IGP3_CAP_MULT_QUEUE 0x0040 /* Support 2 tx & 2 rx queues */ +#define IGP3_CAP_RSS 0x0080 /* Support RSS */ +#define IGP3_CAP_8021PQ 0x0100 /* Support 802.1Q & 802.1p */ +#define IGP3_CAP_AMT_CB 0x0200 /* Support active manageability and circuit breaker */ + +#define IGP3_PPC_JORDAN_EN 0x0001 +#define IGP3_PPC_JORDAN_GIGA_SPEED 0x0002 + +#define IGP3_KMRN_PMC_EE_IDLE_LINK_DIS 0x0001 +#define IGP3_KMRN_PMC_K0S_ENTRY_LATENCY_MASK 0x001E +#define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA 0x0020 +#define IGP3_KMRN_PMC_K0S_MODE1_EN_100 0x0040 + +#define IGP3E1000_PHY_MISC_CTRL 0x1B /* Misc. Ctrl register */ +#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Duplex Manual Set */ + +#define IGP3_KMRN_EXT_CTRL PHY_REG(770, 18) +#define IGP3_KMRN_EC_DIS_INBAND 0x0080 + +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define IFE_E_PHY_ID 0x02A80330 /* 10/100 PHY */ +#define IFE_PLUS_E_PHY_ID 0x02A80320 +#define IFE_C_E_PHY_ID 0x02A80310 + +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 /* 100BaseTx Extended Status, Control and Address */ +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY special control register */ +#define IFE_PHY_RCV_FALSE_CARRIER 0x13 /* 100BaseTx Receive False Carrier Counter */ +#define IFE_PHY_RCV_DISCONNECT 0x14 /* 100BaseTx Receive Disconnect Counter */ +#define IFE_PHY_RCV_ERROT_FRAME 0x15 /* 100BaseTx Receive Error Frame Counter */ +#define IFE_PHY_RCV_SYMBOL_ERR 0x16 /* Receive Symbol Error Counter */ +#define IFE_PHY_PREM_EOF_ERR 0x17 /* 100BaseTx Receive Premature End Of Frame Error Counter */ +#define IFE_PHY_RCV_EOF_ERR 0x18 /* 10BaseT Receive End Of Frame Error Counter */ +#define IFE_PHY_TX_JABBER_DETECT 0x19 /* 10BaseT Transmit Jabber Detect Counter */ +#define IFE_PHY_EQUALIZER 0x1A /* PHY Equalizer Control and Status */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY special control and LED configuration */ +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control register */ +#define IFE_PHY_HWI_CONTROL 0x1D /* Hardware Integrity Control (HWI) */ + +#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE 0x2000 /* Default 1 = Disable auto reduced power down */ +#define IFE_PESC_100BTX_POWER_DOWN 0x0400 /* Indicates the power state of 100BASE-TX */ +#define IFE_PESC_10BTX_POWER_DOWN 0x0200 /* Indicates the power state of 10BASE-T */ +#define IFE_PESC_POLARITY_REVERSED 0x0100 /* Indicates 10BASE-T polarity */ +#define IFE_PESC_PHY_ADDR_MASK 0x007C /* Bit 6:2 for sampled PHY address */ +#define IFE_PESC_SPEED 0x0002 /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */ +#define IFE_PESC_DUPLEX 0x0001 /* Auto-negotiation duplex result 1=Full, 0=Half */ +#define IFE_PESC_POLARITY_REVERSED_SHIFT 8 + +#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 /* 1 = Dynamic Power Down disabled */ +#define IFE_PSC_FORCE_POLARITY 0x0020 /* 1=Reversed Polarity, 0=Normal */ +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 /* 1=Auto Polarity Disabled, 0=Enabled */ +#define IFE_PSC_JABBER_FUNC_DISABLE 0x0001 /* 1=Jabber Disabled, 0=Normal Jabber Operation */ +#define IFE_PSC_FORCE_POLARITY_SHIFT 5 +#define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT 4 + +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable MDI/MDI-X feature, default 0=disabled */ +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDIX-X, 0=force MDI */ +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ +#define IFE_PMC_AUTO_MDIX_COMPLETE 0x0010 /* Resolution algorithm is completed */ +#define IFE_PMC_MDIX_MODE_SHIFT 6 +#define IFE_PHC_MDIX_RESET_ALL_MASK 0x0000 /* Disable auto MDI-X */ + +#define IFE_PHC_HWI_ENABLE 0x8000 /* Enable the HWI feature */ +#define IFE_PHC_ABILITY_CHECK 0x4000 /* 1= Test Passed, 0=failed */ +#define IFE_PHC_TEST_EXEC 0x2000 /* PHY launch test pulses on the wire */ +#define IFE_PHC_HIGHZ 0x0200 /* 1 = Open Circuit */ +#define IFE_PHC_LOWZ 0x0400 /* 1 = Short Circuit */ +#define IFE_PHC_LOW_HIGH_Z_MASK 0x0600 /* Mask for indication type of problem on the line */ +#define IFE_PHC_DISTANCE_MASK 0x01FF /* Mask for distance to the cable problem, in 80cm granularity */ +#define IFE_PHC_RESET_ALL_MASK 0x0000 /* Disable HWI */ +#define IFE_PSCL_PROBE_MODE 0x0020 /* LED Probe mode */ +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ + +#define ICH_FLASH_COMMAND_TIMEOUT 5000 /* 5000 uSecs - adjusted */ +#define ICH_FLASH_ERASE_TIMEOUT 3000000 /* Up to 3 seconds - worst case */ +#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles */ +#define ICH_FLASH_SEG_SIZE_256 256 +#define ICH_FLASH_SEG_SIZE_4K 4096 +#define ICH_FLASH_SEG_SIZE_64K 65536 + +#define ICH_CYCLE_READ 0x0 +#define ICH_CYCLE_RESERVED 0x1 +#define ICH_CYCLE_WRITE 0x2 +#define ICH_CYCLE_ERASE 0x3 + +#define ICH_FLASH_GFPREG 0x0000 +#define ICH_FLASH_HSFSTS 0x0004 +#define ICH_FLASH_HSFCTL 0x0006 +#define ICH_FLASH_FADDR 0x0008 +#define ICH_FLASH_FDATA0 0x0010 +#define ICH_FLASH_FRACC 0x0050 +#define ICH_FLASH_FREG0 0x0054 +#define ICH_FLASH_FREG1 0x0058 +#define ICH_FLASH_FREG2 0x005C +#define ICH_FLASH_FREG3 0x0060 +#define ICH_FLASH_FPR0 0x0074 +#define ICH_FLASH_FPR1 0x0078 +#define ICH_FLASH_SSFSTS 0x0090 +#define ICH_FLASH_SSFCTL 0x0092 +#define ICH_FLASH_PREOP 0x0094 +#define ICH_FLASH_OPTYPE 0x0096 +#define ICH_FLASH_OPMENU 0x0098 + +#define ICH_FLASH_REG_MAPSIZE 0x00A0 +#define ICH_FLASH_SECTOR_SIZE 4096 +#define ICH_GFPREG_BASE_MASK 0x1FFF +#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF + +/* Miscellaneous PHY bit definitions. */ +#define PHY_PREAMBLE 0xFFFFFFFF +#define PHY_SOF 0x01 +#define PHY_OP_READ 0x02 +#define PHY_OP_WRITE 0x01 +#define PHY_TURNAROUND 0x02 +#define PHY_PREAMBLE_SIZE 32 +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 +#define E1000_PHY_ADDRESS 0x01 +#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */ +#define PHY_FORCE_TIME 20 /* 2.0 Seconds */ +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define DEVICE_SPEED_MASK 0x00000300 /* Device Ctrl Reg Speed Mask */ +#define REG4_SPEED_MASK 0x01E0 +#define REG9_SPEED_MASK 0x0300 +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 +#define ADVERTISE_1000_FULL 0x0020 +#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */ +#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */ +#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */ + +#endif /* _E1000_HW_H_ */ diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c new file mode 100644 index 0000000..f97afda --- /dev/null +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -0,0 +1,4974 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2006 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000.h" +#include +#include +#include +#include +#include + +/* Intel Media SOC GbE MDIO physical base address */ +static unsigned long ce4100_gbe_mdio_base_phy; +/* Intel Media SOC GbE MDIO virtual base address */ +void __iomem *ce4100_gbe_mdio_base_virt; + +char e1000_driver_name[] = "e1000"; +static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; +#define DRV_VERSION "7.3.21-k8-NAPI" +const char e1000_driver_version[] = DRV_VERSION; +static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; + +/* e1000_pci_tbl - PCI Device ID Table + * + * Last entry must be all 0s + * + * Macro expands to... + * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + */ +static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { + INTEL_E1000_ETHERNET_DEVICE(0x1000), + INTEL_E1000_ETHERNET_DEVICE(0x1001), + INTEL_E1000_ETHERNET_DEVICE(0x1004), + INTEL_E1000_ETHERNET_DEVICE(0x1008), + INTEL_E1000_ETHERNET_DEVICE(0x1009), + INTEL_E1000_ETHERNET_DEVICE(0x100C), + INTEL_E1000_ETHERNET_DEVICE(0x100D), + INTEL_E1000_ETHERNET_DEVICE(0x100E), + INTEL_E1000_ETHERNET_DEVICE(0x100F), + INTEL_E1000_ETHERNET_DEVICE(0x1010), + INTEL_E1000_ETHERNET_DEVICE(0x1011), + INTEL_E1000_ETHERNET_DEVICE(0x1012), + INTEL_E1000_ETHERNET_DEVICE(0x1013), + INTEL_E1000_ETHERNET_DEVICE(0x1014), + INTEL_E1000_ETHERNET_DEVICE(0x1015), + INTEL_E1000_ETHERNET_DEVICE(0x1016), + INTEL_E1000_ETHERNET_DEVICE(0x1017), + INTEL_E1000_ETHERNET_DEVICE(0x1018), + INTEL_E1000_ETHERNET_DEVICE(0x1019), + INTEL_E1000_ETHERNET_DEVICE(0x101A), + INTEL_E1000_ETHERNET_DEVICE(0x101D), + INTEL_E1000_ETHERNET_DEVICE(0x101E), + INTEL_E1000_ETHERNET_DEVICE(0x1026), + INTEL_E1000_ETHERNET_DEVICE(0x1027), + INTEL_E1000_ETHERNET_DEVICE(0x1028), + INTEL_E1000_ETHERNET_DEVICE(0x1075), + INTEL_E1000_ETHERNET_DEVICE(0x1076), + INTEL_E1000_ETHERNET_DEVICE(0x1077), + INTEL_E1000_ETHERNET_DEVICE(0x1078), + INTEL_E1000_ETHERNET_DEVICE(0x1079), + INTEL_E1000_ETHERNET_DEVICE(0x107A), + INTEL_E1000_ETHERNET_DEVICE(0x107B), + INTEL_E1000_ETHERNET_DEVICE(0x107C), + INTEL_E1000_ETHERNET_DEVICE(0x108A), + INTEL_E1000_ETHERNET_DEVICE(0x1099), + INTEL_E1000_ETHERNET_DEVICE(0x10B5), + INTEL_E1000_ETHERNET_DEVICE(0x2E6E), + /* required last entry */ + {0,} +}; + +MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); + +int e1000_up(struct e1000_adapter *adapter); +void e1000_down(struct e1000_adapter *adapter); +void e1000_reinit_locked(struct e1000_adapter *adapter); +void e1000_reset(struct e1000_adapter *adapter); +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); +void e1000_free_all_tx_resources(struct e1000_adapter *adapter); +void e1000_free_all_rx_resources(struct e1000_adapter *adapter); +static int e1000_setup_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *txdr); +static int e1000_setup_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rxdr); +static void e1000_free_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static void e1000_free_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); +void e1000_update_stats(struct e1000_adapter *adapter); + +static int e1000_init_module(void); +static void e1000_exit_module(void); +static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void __devexit e1000_remove(struct pci_dev *pdev); +static int e1000_alloc_queues(struct e1000_adapter *adapter); +static int e1000_sw_init(struct e1000_adapter *adapter); +static int e1000_open(struct net_device *netdev); +static int e1000_close(struct net_device *netdev); +static void e1000_configure_tx(struct e1000_adapter *adapter); +static void e1000_configure_rx(struct e1000_adapter *adapter); +static void e1000_setup_rctl(struct e1000_adapter *adapter); +static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); +static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); +static void e1000_clean_tx_ring(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static void e1000_clean_rx_ring(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); +static void e1000_set_rx_mode(struct net_device *netdev); +static void e1000_update_phy_info(unsigned long data); +static void e1000_update_phy_info_task(struct work_struct *work); +static void e1000_watchdog(unsigned long data); +static void e1000_82547_tx_fifo_stall(unsigned long data); +static void e1000_82547_tx_fifo_stall_task(struct work_struct *work); +static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + struct net_device *netdev); +static struct net_device_stats * e1000_get_stats(struct net_device *netdev); +static int e1000_change_mtu(struct net_device *netdev, int new_mtu); +static int e1000_set_mac(struct net_device *netdev, void *p); +static irqreturn_t e1000_intr(int irq, void *data); +static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static int e1000_clean(struct napi_struct *napi, int budget); +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); +static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); +static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd); +static void e1000_enter_82542_rst(struct e1000_adapter *adapter); +static void e1000_leave_82542_rst(struct e1000_adapter *adapter); +static void e1000_tx_timeout(struct net_device *dev); +static void e1000_reset_task(struct work_struct *work); +static void e1000_smartspeed(struct e1000_adapter *adapter); +static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, + struct sk_buff *skb); + +static bool e1000_vlan_used(struct e1000_adapter *adapter); +static void e1000_vlan_mode(struct net_device *netdev, u32 features); +static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid); +static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); +static void e1000_restore_vlan(struct e1000_adapter *adapter); + +#ifdef CONFIG_PM +static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); +static int e1000_resume(struct pci_dev *pdev); +#endif +static void e1000_shutdown(struct pci_dev *pdev); + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* for netdump / net console */ +static void e1000_netpoll (struct net_device *netdev); +#endif + +#define COPYBREAK_DEFAULT 256 +static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT; +module_param(copybreak, uint, 0644); +MODULE_PARM_DESC(copybreak, + "Maximum size of packet that is copied to a new buffer on receive"); + +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state); +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); +static void e1000_io_resume(struct pci_dev *pdev); + +static struct pci_error_handlers e1000_err_handler = { + .error_detected = e1000_io_error_detected, + .slot_reset = e1000_io_slot_reset, + .resume = e1000_io_resume, +}; + +static struct pci_driver e1000_driver = { + .name = e1000_driver_name, + .id_table = e1000_pci_tbl, + .probe = e1000_probe, + .remove = __devexit_p(e1000_remove), +#ifdef CONFIG_PM + /* Power Management Hooks */ + .suspend = e1000_suspend, + .resume = e1000_resume, +#endif + .shutdown = e1000_shutdown, + .err_handler = &e1000_err_handler +}; + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +/** + * e1000_get_hw_dev - return device + * used by hardware layer to print debugging information + * + **/ +struct net_device *e1000_get_hw_dev(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + return adapter->netdev; +} + +/** + * e1000_init_module - Driver Registration Routine + * + * e1000_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ + +static int __init e1000_init_module(void) +{ + int ret; + pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version); + + pr_info("%s\n", e1000_copyright); + + ret = pci_register_driver(&e1000_driver); + if (copybreak != COPYBREAK_DEFAULT) { + if (copybreak == 0) + pr_info("copybreak disabled\n"); + else + pr_info("copybreak enabled for " + "packets <= %u bytes\n", copybreak); + } + return ret; +} + +module_init(e1000_init_module); + +/** + * e1000_exit_module - Driver Exit Cleanup Routine + * + * e1000_exit_module is called just before the driver is removed + * from memory. + **/ + +static void __exit e1000_exit_module(void) +{ + pci_unregister_driver(&e1000_driver); +} + +module_exit(e1000_exit_module); + +static int e1000_request_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + irq_handler_t handler = e1000_intr; + int irq_flags = IRQF_SHARED; + int err; + + err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, + netdev); + if (err) { + e_err(probe, "Unable to allocate interrupt Error: %d\n", err); + } + + return err; +} + +static void e1000_free_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + free_irq(adapter->pdev->irq, netdev); +} + +/** + * e1000_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ + +static void e1000_irq_disable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + ew32(IMC, ~0); + E1000_WRITE_FLUSH(); + synchronize_irq(adapter->pdev->irq); +} + +/** + * e1000_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ + +static void e1000_irq_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + ew32(IMS, IMS_ENABLE_MASK); + E1000_WRITE_FLUSH(); +} + +static void e1000_update_mng_vlan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u16 vid = hw->mng_cookie.vlan_id; + u16 old_vid = adapter->mng_vlan_id; + + if (!e1000_vlan_used(adapter)) + return; + + if (!test_bit(vid, adapter->active_vlans)) { + if (hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { + e1000_vlan_rx_add_vid(netdev, vid); + adapter->mng_vlan_id = vid; + } else { + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + } + if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && + (vid != old_vid) && + !test_bit(old_vid, adapter->active_vlans)) + e1000_vlan_rx_kill_vid(netdev, old_vid); + } else { + adapter->mng_vlan_id = vid; + } +} + +static void e1000_init_manageability(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->en_mng_pt) { + u32 manc = er32(MANC); + + /* disable hardware interception of ARP */ + manc &= ~(E1000_MANC_ARP_EN); + + ew32(MANC, manc); + } +} + +static void e1000_release_manageability(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->en_mng_pt) { + u32 manc = er32(MANC); + + /* re-enable hardware interception of ARP */ + manc |= E1000_MANC_ARP_EN; + + ew32(MANC, manc); + } +} + +/** + * e1000_configure - configure the hardware for RX and TX + * @adapter = private board structure + **/ +static void e1000_configure(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i; + + e1000_set_rx_mode(netdev); + + e1000_restore_vlan(adapter); + e1000_init_manageability(adapter); + + e1000_configure_tx(adapter); + e1000_setup_rctl(adapter); + e1000_configure_rx(adapter); + /* call E1000_DESC_UNUSED which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct e1000_rx_ring *ring = &adapter->rx_ring[i]; + adapter->alloc_rx_buf(adapter, ring, + E1000_DESC_UNUSED(ring)); + } +} + +int e1000_up(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* hardware has been reset, we need to reload some things */ + e1000_configure(adapter); + + clear_bit(__E1000_DOWN, &adapter->flags); + + napi_enable(&adapter->napi); + + e1000_irq_enable(adapter); + + netif_wake_queue(adapter->netdev); + + /* fire a link change interrupt to start the watchdog */ + ew32(ICS, E1000_ICS_LSC); + return 0; +} + +/** + * e1000_power_up_phy - restore link in case the phy was powered down + * @adapter: address of board private structure + * + * The phy may be powered down to save power and turn off link when the + * driver is unloaded and wake on lan is not enabled (among others) + * *** this routine MUST be followed by a call to e1000_reset *** + * + **/ + +void e1000_power_up_phy(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 mii_reg = 0; + + /* Just clear the power down bit to wake the phy back up */ + if (hw->media_type == e1000_media_type_copper) { + /* according to the manual, the phy will retain its + * settings across a power-down/up cycle */ + e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); + } +} + +static void e1000_power_down_phy(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Power down the PHY so no link is implied when interface is down * + * The PHY cannot be powered down if any of the following is true * + * (a) WoL is enabled + * (b) AMT is active + * (c) SoL/IDER session is active */ + if (!adapter->wol && hw->mac_type >= e1000_82540 && + hw->media_type == e1000_media_type_copper) { + u16 mii_reg = 0; + + switch (hw->mac_type) { + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_ce4100: + case e1000_82546_rev_3: + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (er32(MANC) & E1000_MANC_SMBUS_EN) + goto out; + break; + default: + goto out; + } + e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); + mdelay(1); + } +out: + return; +} + +void e1000_down(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl, tctl; + + + /* disable receives in the hardware */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + /* flush and sleep below */ + + netif_tx_disable(netdev); + + /* disable transmits in the hardware */ + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_EN; + ew32(TCTL, tctl); + /* flush both disables and wait for them to finish */ + E1000_WRITE_FLUSH(); + msleep(10); + + napi_disable(&adapter->napi); + + e1000_irq_disable(adapter); + + /* + * Setting DOWN must be after irq_disable to prevent + * a screaming interrupt. Setting DOWN also prevents + * timers and tasks from rescheduling. + */ + set_bit(__E1000_DOWN, &adapter->flags); + + del_timer_sync(&adapter->tx_fifo_stall_timer); + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + netif_carrier_off(netdev); + + e1000_reset(adapter); + e1000_clean_all_tx_rings(adapter); + e1000_clean_all_rx_rings(adapter); +} + +static void e1000_reinit_safe(struct e1000_adapter *adapter) +{ + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + rtnl_lock(); + e1000_down(adapter); + e1000_up(adapter); + rtnl_unlock(); + clear_bit(__E1000_RESETTING, &adapter->flags); +} + +void e1000_reinit_locked(struct e1000_adapter *adapter) +{ + /* if rtnl_lock is not held the call path is bogus */ + ASSERT_RTNL(); + WARN_ON(in_interrupt()); + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + e1000_down(adapter); + e1000_up(adapter); + clear_bit(__E1000_RESETTING, &adapter->flags); +} + +void e1000_reset(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 pba = 0, tx_space, min_tx_space, min_rx_space; + bool legacy_pba_adjust = false; + u16 hwm; + + /* Repartition Pba for greater than 9k mtu + * To take effect CTRL.RST is required. + */ + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + case e1000_82540: + case e1000_82541: + case e1000_82541_rev_2: + legacy_pba_adjust = true; + pba = E1000_PBA_48K; + break; + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_ce4100: + case e1000_82546_rev_3: + pba = E1000_PBA_48K; + break; + case e1000_82547: + case e1000_82547_rev_2: + legacy_pba_adjust = true; + pba = E1000_PBA_30K; + break; + case e1000_undefined: + case e1000_num_macs: + break; + } + + if (legacy_pba_adjust) { + if (hw->max_frame_size > E1000_RXBUFFER_8192) + pba -= 8; /* allocate more FIFO for Tx */ + + if (hw->mac_type == e1000_82547) { + adapter->tx_fifo_head = 0; + adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; + adapter->tx_fifo_size = + (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; + atomic_set(&adapter->tx_fifo_stall, 0); + } + } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { + /* adjust PBA for jumbo frames */ + ew32(PBA, pba); + + /* To maintain wire speed transmits, the Tx FIFO should be + * large enough to accommodate two full transmit packets, + * rounded up to the next 1KB and expressed in KB. Likewise, + * the Rx FIFO should be large enough to accommodate at least + * one full receive packet and is similarly rounded up and + * expressed in KB. */ + pba = er32(PBA); + /* upper 16 bits has Tx packet buffer allocation size in KB */ + tx_space = pba >> 16; + /* lower 16 bits has Rx packet buffer allocation size in KB */ + pba &= 0xffff; + /* + * the tx fifo also stores 16 bytes of information about the tx + * but don't include ethernet FCS because hardware appends it + */ + min_tx_space = (hw->max_frame_size + + sizeof(struct e1000_tx_desc) - + ETH_FCS_LEN) * 2; + min_tx_space = ALIGN(min_tx_space, 1024); + min_tx_space >>= 10; + /* software strips receive CRC, so leave room for it */ + min_rx_space = hw->max_frame_size; + min_rx_space = ALIGN(min_rx_space, 1024); + min_rx_space >>= 10; + + /* If current Tx allocation is less than the min Tx FIFO size, + * and the min Tx FIFO size is less than the current Rx FIFO + * allocation, take space away from current Rx allocation */ + if (tx_space < min_tx_space && + ((min_tx_space - tx_space) < pba)) { + pba = pba - (min_tx_space - tx_space); + + /* PCI/PCIx hardware has PBA alignment constraints */ + switch (hw->mac_type) { + case e1000_82545 ... e1000_82546_rev_3: + pba &= ~(E1000_PBA_8K - 1); + break; + default: + break; + } + + /* if short on rx space, rx wins and must trump tx + * adjustment or use Early Receive if available */ + if (pba < min_rx_space) + pba = min_rx_space; + } + } + + ew32(PBA, pba); + + /* + * flow control settings: + * The high water mark must be low enough to fit one full frame + * (or the size used for early receive) above it in the Rx FIFO. + * Set it to the lower of: + * - 90% of the Rx FIFO size, and + * - the full Rx FIFO size minus the early receive size (for parts + * with ERT support assuming ERT set to E1000_ERT_2048), or + * - the full Rx FIFO size minus one full frame + */ + hwm = min(((pba << 10) * 9 / 10), + ((pba << 10) - hw->max_frame_size)); + + hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */ + hw->fc_low_water = hw->fc_high_water - 8; + hw->fc_pause_time = E1000_FC_PAUSE_TIME; + hw->fc_send_xon = 1; + hw->fc = hw->original_fc; + + /* Allow time for pending master requests to run */ + e1000_reset_hw(hw); + if (hw->mac_type >= e1000_82544) + ew32(WUC, 0); + + if (e1000_init_hw(hw)) + e_dev_err("Hardware Error\n"); + e1000_update_mng_vlan(adapter); + + /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ + if (hw->mac_type >= e1000_82544 && + hw->autoneg == 1 && + hw->autoneg_advertised == ADVERTISE_1000_FULL) { + u32 ctrl = er32(CTRL); + /* clear phy power management bit if we are in gig only mode, + * which if enabled will attempt negotiation to 100Mb, which + * can cause a loss of link at power off or driver unload */ + ctrl &= ~E1000_CTRL_SWDPIN3; + ew32(CTRL, ctrl); + } + + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ + ew32(VET, ETHERNET_IEEE_VLAN_TYPE); + + e1000_reset_adaptive(hw); + e1000_phy_get_info(hw, &adapter->phy_info); + + e1000_release_manageability(adapter); +} + +/** + * Dump the eeprom for users having checksum issues + **/ +static void e1000_dump_eeprom(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ethtool_eeprom eeprom; + const struct ethtool_ops *ops = netdev->ethtool_ops; + u8 *data; + int i; + u16 csum_old, csum_new = 0; + + eeprom.len = ops->get_eeprom_len(netdev); + eeprom.offset = 0; + + data = kmalloc(eeprom.len, GFP_KERNEL); + if (!data) { + pr_err("Unable to allocate memory to dump EEPROM data\n"); + return; + } + + ops->get_eeprom(netdev, &eeprom, data); + + csum_old = (data[EEPROM_CHECKSUM_REG * 2]) + + (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8); + for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2) + csum_new += data[i] + (data[i + 1] << 8); + csum_new = EEPROM_SUM - csum_new; + + pr_err("/*********************/\n"); + pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old); + pr_err("Calculated : 0x%04x\n", csum_new); + + pr_err("Offset Values\n"); + pr_err("======== ======\n"); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0); + + pr_err("Include this output when contacting your support provider.\n"); + pr_err("This is not a software error! Something bad happened to\n"); + pr_err("your hardware or EEPROM image. Ignoring this problem could\n"); + pr_err("result in further problems, possibly loss of data,\n"); + pr_err("corruption or system hangs!\n"); + pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n"); + pr_err("which is invalid and requires you to set the proper MAC\n"); + pr_err("address manually before continuing to enable this network\n"); + pr_err("device. Please inspect the EEPROM dump and report the\n"); + pr_err("issue to your hardware vendor or Intel Customer Support.\n"); + pr_err("/*********************/\n"); + + kfree(data); +} + +/** + * e1000_is_need_ioport - determine if an adapter needs ioport resources or not + * @pdev: PCI device information struct + * + * Return true if an adapter needs ioport resources + **/ +static int e1000_is_need_ioport(struct pci_dev *pdev) +{ + switch (pdev->device) { + case E1000_DEV_ID_82540EM: + case E1000_DEV_ID_82540EM_LOM: + case E1000_DEV_ID_82540EP: + case E1000_DEV_ID_82540EP_LOM: + case E1000_DEV_ID_82540EP_LP: + case E1000_DEV_ID_82541EI: + case E1000_DEV_ID_82541EI_MOBILE: + case E1000_DEV_ID_82541ER: + case E1000_DEV_ID_82541ER_LOM: + case E1000_DEV_ID_82541GI: + case E1000_DEV_ID_82541GI_LF: + case E1000_DEV_ID_82541GI_MOBILE: + case E1000_DEV_ID_82544EI_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82544GC_COPPER: + case E1000_DEV_ID_82544GC_LOM: + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + case E1000_DEV_ID_82546EB_COPPER: + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + return true; + default: + return false; + } +} + +static u32 e1000_fix_features(struct net_device *netdev, u32 features) +{ + /* + * Since there is no support for separate rx/tx vlan accel + * enable/disable make sure tx flag is always in same state as rx. + */ + if (features & NETIF_F_HW_VLAN_RX) + features |= NETIF_F_HW_VLAN_TX; + else + features &= ~NETIF_F_HW_VLAN_TX; + + return features; +} + +static int e1000_set_features(struct net_device *netdev, u32 features) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + u32 changed = features ^ netdev->features; + + if (changed & NETIF_F_HW_VLAN_RX) + e1000_vlan_mode(netdev, features); + + if (!(changed & NETIF_F_RXCSUM)) + return 0; + + adapter->rx_csum = !!(features & NETIF_F_RXCSUM); + + if (netif_running(netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + + return 0; +} + +static const struct net_device_ops e1000_netdev_ops = { + .ndo_open = e1000_open, + .ndo_stop = e1000_close, + .ndo_start_xmit = e1000_xmit_frame, + .ndo_get_stats = e1000_get_stats, + .ndo_set_rx_mode = e1000_set_rx_mode, + .ndo_set_mac_address = e1000_set_mac, + .ndo_tx_timeout = e1000_tx_timeout, + .ndo_change_mtu = e1000_change_mtu, + .ndo_do_ioctl = e1000_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e1000_netpoll, +#endif + .ndo_fix_features = e1000_fix_features, + .ndo_set_features = e1000_set_features, +}; + +/** + * e1000_init_hw_struct - initialize members of hw struct + * @adapter: board private struct + * @hw: structure used by e1000_hw.c + * + * Factors out initialization of the e1000_hw struct to its own function + * that can be called very early at init (just after struct allocation). + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + * Returns negative error codes if MAC type setup fails. + */ +static int e1000_init_hw_struct(struct e1000_adapter *adapter, + struct e1000_hw *hw) +{ + struct pci_dev *pdev = adapter->pdev; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_id = pdev->subsystem_device; + hw->revision_id = pdev->revision; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); + + hw->max_frame_size = adapter->netdev->mtu + + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; + hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; + + /* identify the MAC */ + if (e1000_set_mac_type(hw)) { + e_err(probe, "Unknown MAC Type\n"); + return -EIO; + } + + switch (hw->mac_type) { + default: + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + hw->phy_init_script = 1; + break; + } + + e1000_set_media_type(hw); + e1000_get_bus_info(hw); + + hw->wait_autoneg_complete = false; + hw->tbi_compatibility_en = true; + hw->adaptive_ifs = true; + + /* Copper options */ + + if (hw->media_type == e1000_media_type_copper) { + hw->mdix = AUTO_ALL_MODES; + hw->disable_polarity_correction = false; + hw->master_slave = E1000_MASTER_SLAVE; + } + + return 0; +} + +/** + * e1000_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in e1000_pci_tbl + * + * Returns 0 on success, negative on failure + * + * e1000_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int __devinit e1000_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct e1000_adapter *adapter; + struct e1000_hw *hw; + + static int cards_found = 0; + static int global_quad_port_a = 0; /* global ksp3 port a indication */ + int i, err, pci_using_dac; + u16 eeprom_data = 0; + u16 tmp = 0; + u16 eeprom_apme_mask = E1000_EEPROM_APME; + int bars, need_ioport; + + /* do not allocate ioport bars when not needed */ + need_ioport = e1000_is_need_ioport(pdev); + if (need_ioport) { + bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); + err = pci_enable_device(pdev); + } else { + bars = pci_select_bars(pdev, IORESOURCE_MEM); + err = pci_enable_device_mem(pdev); + } + if (err) + return err; + + err = pci_request_selected_regions(pdev, bars, e1000_driver_name); + if (err) + goto err_pci_reg; + + pci_set_master(pdev); + err = pci_save_state(pdev); + if (err) + goto err_alloc_etherdev; + + err = -ENOMEM; + netdev = alloc_etherdev(sizeof(struct e1000_adapter)); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->msg_enable = (1 << debug) - 1; + adapter->bars = bars; + adapter->need_ioport = need_ioport; + + hw = &adapter->hw; + hw->back = adapter; + + err = -EIO; + hw->hw_addr = pci_ioremap_bar(pdev, BAR_0); + if (!hw->hw_addr) + goto err_ioremap; + + if (adapter->need_ioport) { + for (i = BAR_1; i <= BAR_5; i++) { + if (pci_resource_len(pdev, i) == 0) + continue; + if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { + hw->io_base = pci_resource_start(pdev, i); + break; + } + } + } + + /* make ready for any if (hw->...) below */ + err = e1000_init_hw_struct(adapter, hw); + if (err) + goto err_sw_init; + + /* + * there is a workaround being applied below that limits + * 64-bit DMA addresses to 64-bit hardware. There are some + * 32-bit adapters that Tx hang when given 64-bit DMA addresses + */ + pci_using_dac = 0; + if ((hw->bus_type == e1000_bus_type_pcix) && + !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { + /* + * according to DMA-API-HOWTO, coherent calls will always + * succeed if the set call did + */ + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); + pci_using_dac = 1; + } else { + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + pr_err("No usable DMA config, aborting\n"); + goto err_dma; + } + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + } + + netdev->netdev_ops = &e1000_netdev_ops; + e1000_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); + + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + adapter->bd_number = cards_found; + + /* setup the private structure */ + + err = e1000_sw_init(adapter); + if (err) + goto err_sw_init; + + err = -EIO; + if (hw->mac_type == e1000_ce4100) { + ce4100_gbe_mdio_base_phy = pci_resource_start(pdev, BAR_1); + ce4100_gbe_mdio_base_virt = ioremap(ce4100_gbe_mdio_base_phy, + pci_resource_len(pdev, BAR_1)); + + if (!ce4100_gbe_mdio_base_virt) + goto err_mdio_ioremap; + } + + if (hw->mac_type >= e1000_82543) { + netdev->hw_features = NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_RX; + netdev->features = NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_FILTER; + } + + if ((hw->mac_type >= e1000_82544) && + (hw->mac_type != e1000_82547)) + netdev->hw_features |= NETIF_F_TSO; + + netdev->features |= netdev->hw_features; + netdev->hw_features |= NETIF_F_RXCSUM; + + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + netdev->vlan_features |= NETIF_F_TSO; + netdev->vlan_features |= NETIF_F_HW_CSUM; + netdev->vlan_features |= NETIF_F_SG; + + adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); + + /* initialize eeprom parameters */ + if (e1000_init_eeprom_params(hw)) { + e_err(probe, "EEPROM initialization failed\n"); + goto err_eeprom; + } + + /* before reading the EEPROM, reset the controller to + * put the device in a known good starting state */ + + e1000_reset_hw(hw); + + /* make sure the EEPROM is good */ + if (e1000_validate_eeprom_checksum(hw) < 0) { + e_err(probe, "The EEPROM Checksum Is Not Valid\n"); + e1000_dump_eeprom(adapter); + /* + * set MAC address to all zeroes to invalidate and temporary + * disable this device for the user. This blocks regular + * traffic while still permitting ethtool ioctls from reaching + * the hardware as well as allowing the user to run the + * interface after manually setting a hw addr using + * `ip set address` + */ + memset(hw->mac_addr, 0, netdev->addr_len); + } else { + /* copy the MAC address out of the EEPROM */ + if (e1000_read_mac_addr(hw)) + e_err(probe, "EEPROM Read Error\n"); + } + /* don't block initalization here due to bad MAC address */ + memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); + memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->perm_addr)) + e_err(probe, "Invalid MAC Address\n"); + + init_timer(&adapter->tx_fifo_stall_timer); + adapter->tx_fifo_stall_timer.function = e1000_82547_tx_fifo_stall; + adapter->tx_fifo_stall_timer.data = (unsigned long)adapter; + + init_timer(&adapter->watchdog_timer); + adapter->watchdog_timer.function = e1000_watchdog; + adapter->watchdog_timer.data = (unsigned long) adapter; + + init_timer(&adapter->phy_info_timer); + adapter->phy_info_timer.function = e1000_update_phy_info; + adapter->phy_info_timer.data = (unsigned long)adapter; + + INIT_WORK(&adapter->fifo_stall_task, e1000_82547_tx_fifo_stall_task); + INIT_WORK(&adapter->reset_task, e1000_reset_task); + INIT_WORK(&adapter->phy_info_task, e1000_update_phy_info_task); + + e1000_check_options(adapter); + + /* Initial Wake on LAN setting + * If APM wake is enabled in the EEPROM, + * enable the ACPI Magic Packet filter + */ + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + break; + case e1000_82544: + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); + eeprom_apme_mask = E1000_EEPROM_82544_APM; + break; + case e1000_82546: + case e1000_82546_rev_3: + if (er32(STATUS) & E1000_STATUS_FUNC_1){ + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); + break; + } + /* Fall Through */ + default: + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); + break; + } + if (eeprom_data & eeprom_apme_mask) + adapter->eeprom_wol |= E1000_WUFC_MAG; + + /* now that we have the eeprom settings, apply the special cases + * where the eeprom may be wrong or the board simply won't support + * wake on lan on a particular port */ + switch (pdev->device) { + case E1000_DEV_ID_82546GB_PCIE: + adapter->eeprom_wol = 0; + break; + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546GB_FIBER: + /* Wake events only supported on port A for dual fiber + * regardless of eeprom setting */ + if (er32(STATUS) & E1000_STATUS_FUNC_1) + adapter->eeprom_wol = 0; + break; + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* if quad port adapter, disable WoL on all but port A */ + if (global_quad_port_a != 0) + adapter->eeprom_wol = 0; + else + adapter->quad_port_a = 1; + /* Reset for multiple quad port adapters */ + if (++global_quad_port_a == 4) + global_quad_port_a = 0; + break; + } + + /* initialize the wol settings based on the eeprom settings */ + adapter->wol = adapter->eeprom_wol; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + /* Auto detect PHY address */ + if (hw->mac_type == e1000_ce4100) { + for (i = 0; i < 32; i++) { + hw->phy_addr = i; + e1000_read_phy_reg(hw, PHY_ID2, &tmp); + if (tmp == 0 || tmp == 0xFF) { + if (i == 31) + goto err_eeprom; + continue; + } else + break; + } + } + + /* reset the hardware with the new settings */ + e1000_reset(adapter); + + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + + e1000_vlan_mode(netdev, netdev->features); + + /* print bus type/speed/width info */ + e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n", + ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), + ((hw->bus_speed == e1000_bus_speed_133) ? 133 : + (hw->bus_speed == e1000_bus_speed_120) ? 120 : + (hw->bus_speed == e1000_bus_speed_100) ? 100 : + (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33), + ((hw->bus_width == e1000_bus_width_64) ? 64 : 32), + netdev->dev_addr); + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + e_info(probe, "Intel(R) PRO/1000 Network Connection\n"); + + cards_found++; + return 0; + +err_register: +err_eeprom: + e1000_phy_hw_reset(hw); + + if (hw->flash_address) + iounmap(hw->flash_address); + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); +err_dma: +err_sw_init: +err_mdio_ioremap: + iounmap(ce4100_gbe_mdio_base_virt); + iounmap(hw->hw_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_selected_regions(pdev, bars); +err_pci_reg: + pci_disable_device(pdev); + return err; +} + +/** + * e1000_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * e1000_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ + +static void __devexit e1000_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + set_bit(__E1000_DOWN, &adapter->flags); + del_timer_sync(&adapter->tx_fifo_stall_timer); + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + cancel_work_sync(&adapter->reset_task); + + e1000_release_manageability(adapter); + + unregister_netdev(netdev); + + e1000_phy_hw_reset(hw); + + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + + iounmap(hw->hw_addr); + if (hw->flash_address) + iounmap(hw->flash_address); + pci_release_selected_regions(pdev, adapter->bars); + + free_netdev(netdev); + + pci_disable_device(pdev); +} + +/** + * e1000_sw_init - Initialize general software structures (struct e1000_adapter) + * @adapter: board private structure to initialize + * + * e1000_sw_init initializes the Adapter private data structure. + * e1000_init_hw_struct MUST be called before this function + **/ + +static int __devinit e1000_sw_init(struct e1000_adapter *adapter) +{ + adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + + adapter->num_tx_queues = 1; + adapter->num_rx_queues = 1; + + if (e1000_alloc_queues(adapter)) { + e_err(probe, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + /* Explicitly disable IRQ since the NIC can be in any state. */ + e1000_irq_disable(adapter); + + spin_lock_init(&adapter->stats_lock); + + set_bit(__E1000_DOWN, &adapter->flags); + + return 0; +} + +/** + * e1000_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + * We allocate one ring per queue at run-time since we don't know the + * number of queues at compile-time. + **/ + +static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter) +{ + adapter->tx_ring = kcalloc(adapter->num_tx_queues, + sizeof(struct e1000_tx_ring), GFP_KERNEL); + if (!adapter->tx_ring) + return -ENOMEM; + + adapter->rx_ring = kcalloc(adapter->num_rx_queues, + sizeof(struct e1000_rx_ring), GFP_KERNEL); + if (!adapter->rx_ring) { + kfree(adapter->tx_ring); + return -ENOMEM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ + +static int e1000_open(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err; + + /* disallow open during test */ + if (test_bit(__E1000_TESTING, &adapter->flags)) + return -EBUSY; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = e1000_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = e1000_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + e1000_power_up_phy(adapter); + + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { + e1000_update_mng_vlan(adapter); + } + + /* before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. */ + e1000_configure(adapter); + + err = e1000_request_irq(adapter); + if (err) + goto err_req_irq; + + /* From here on the code is the same as e1000_up() */ + clear_bit(__E1000_DOWN, &adapter->flags); + + napi_enable(&adapter->napi); + + e1000_irq_enable(adapter); + + netif_start_queue(netdev); + + /* fire a link status change interrupt to start the watchdog */ + ew32(ICS, E1000_ICS_LSC); + + return E1000_SUCCESS; + +err_req_irq: + e1000_power_down_phy(adapter); + e1000_free_all_rx_resources(adapter); +err_setup_rx: + e1000_free_all_tx_resources(adapter); +err_setup_tx: + e1000_reset(adapter); + + return err; +} + +/** + * e1000_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ + +static int e1000_close(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); + e1000_down(adapter); + e1000_power_down_phy(adapter); + e1000_free_irq(adapter); + + e1000_free_all_tx_resources(adapter); + e1000_free_all_rx_resources(adapter); + + /* kill manageability vlan ID if supported, but not if a vlan with + * the same ID is registered on the host OS (let 8021q kill it) */ + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && + !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) { + e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); + } + + return 0; +} + +/** + * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary + * @adapter: address of board private structure + * @start: address of beginning of memory + * @len: length of memory + **/ +static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, + unsigned long len) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned long begin = (unsigned long)start; + unsigned long end = begin + len; + + /* First rev 82545 and 82546 need to not allow any memory + * write location to cross 64k boundary due to errata 23 */ + if (hw->mac_type == e1000_82545 || + hw->mac_type == e1000_ce4100 || + hw->mac_type == e1000_82546) { + return ((begin ^ (end - 1)) >> 16) != 0 ? false : true; + } + + return true; +} + +/** + * e1000_setup_tx_resources - allocate Tx resources (Descriptors) + * @adapter: board private structure + * @txdr: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ + +static int e1000_setup_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *txdr) +{ + struct pci_dev *pdev = adapter->pdev; + int size; + + size = sizeof(struct e1000_buffer) * txdr->count; + txdr->buffer_info = vzalloc(size); + if (!txdr->buffer_info) { + e_err(probe, "Unable to allocate memory for the Tx descriptor " + "ring\n"); + return -ENOMEM; + } + + /* round up to nearest 4K */ + + txdr->size = txdr->count * sizeof(struct e1000_tx_desc); + txdr->size = ALIGN(txdr->size, 4096); + + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL); + if (!txdr->desc) { +setup_tx_desc_die: + vfree(txdr->buffer_info); + e_err(probe, "Unable to allocate memory for the Tx descriptor " + "ring\n"); + return -ENOMEM; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { + void *olddesc = txdr->desc; + dma_addr_t olddma = txdr->dma; + e_err(tx_err, "txdr align check failed: %u bytes at %p\n", + txdr->size, txdr->desc); + /* Try again, without freeing the previous */ + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, + &txdr->dma, GFP_KERNEL); + /* Failed allocation, critical failure */ + if (!txdr->desc) { + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + goto setup_tx_desc_die; + } + + if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { + /* give up */ + dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, + txdr->dma); + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + e_err(probe, "Unable to allocate aligned memory " + "for the transmit descriptor ring\n"); + vfree(txdr->buffer_info); + return -ENOMEM; + } else { + /* Free old allocation, new allocation was successful */ + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + } + } + memset(txdr->desc, 0, txdr->size); + + txdr->next_to_use = 0; + txdr->next_to_clean = 0; + + return 0; +} + +/** + * e1000_setup_all_tx_resources - wrapper to allocate Tx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ + +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); + if (err) { + e_err(probe, "Allocation for Tx Queue %u failed\n", i); + for (i-- ; i >= 0; i--) + e1000_free_tx_resources(adapter, + &adapter->tx_ring[i]); + break; + } + } + + return err; +} + +/** + * e1000_configure_tx - Configure 8254x Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ + +static void e1000_configure_tx(struct e1000_adapter *adapter) +{ + u64 tdba; + struct e1000_hw *hw = &adapter->hw; + u32 tdlen, tctl, tipg; + u32 ipgr1, ipgr2; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + + switch (adapter->num_tx_queues) { + case 1: + default: + tdba = adapter->tx_ring[0].dma; + tdlen = adapter->tx_ring[0].count * + sizeof(struct e1000_tx_desc); + ew32(TDLEN, tdlen); + ew32(TDBAH, (tdba >> 32)); + ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); + ew32(TDT, 0); + ew32(TDH, 0); + adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH); + adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT); + break; + } + + /* Set the default values for the Tx Inter Packet Gap timer */ + if ((hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes)) + tipg = DEFAULT_82543_TIPG_IPGT_FIBER; + else + tipg = DEFAULT_82543_TIPG_IPGT_COPPER; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + tipg = DEFAULT_82542_TIPG_IPGT; + ipgr1 = DEFAULT_82542_TIPG_IPGR1; + ipgr2 = DEFAULT_82542_TIPG_IPGR2; + break; + default: + ipgr1 = DEFAULT_82543_TIPG_IPGR1; + ipgr2 = DEFAULT_82543_TIPG_IPGR2; + break; + } + tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; + tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; + ew32(TIPG, tipg); + + /* Set the Tx Interrupt Delay register */ + + ew32(TIDV, adapter->tx_int_delay); + if (hw->mac_type >= e1000_82540) + ew32(TADV, adapter->tx_abs_int_delay); + + /* Program the Transmit Control Register */ + + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + + e1000_config_collision_dist(hw); + + /* Setup Transmit Descriptor Settings for eop descriptor */ + adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; + + /* only set IDE if we are delaying interrupts using the timers */ + if (adapter->tx_int_delay) + adapter->txd_cmd |= E1000_TXD_CMD_IDE; + + if (hw->mac_type < e1000_82543) + adapter->txd_cmd |= E1000_TXD_CMD_RPS; + else + adapter->txd_cmd |= E1000_TXD_CMD_RS; + + /* Cache if we're 82544 running in PCI-X because we'll + * need this to apply a workaround later in the send path. */ + if (hw->mac_type == e1000_82544 && + hw->bus_type == e1000_bus_type_pcix) + adapter->pcix_82544 = 1; + + ew32(TCTL, tctl); + +} + +/** + * e1000_setup_rx_resources - allocate Rx resources (Descriptors) + * @adapter: board private structure + * @rxdr: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ + +static int e1000_setup_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rxdr) +{ + struct pci_dev *pdev = adapter->pdev; + int size, desc_len; + + size = sizeof(struct e1000_buffer) * rxdr->count; + rxdr->buffer_info = vzalloc(size); + if (!rxdr->buffer_info) { + e_err(probe, "Unable to allocate memory for the Rx descriptor " + "ring\n"); + return -ENOMEM; + } + + desc_len = sizeof(struct e1000_rx_desc); + + /* Round up to nearest 4K */ + + rxdr->size = rxdr->count * desc_len; + rxdr->size = ALIGN(rxdr->size, 4096); + + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL); + + if (!rxdr->desc) { + e_err(probe, "Unable to allocate memory for the Rx descriptor " + "ring\n"); +setup_rx_desc_die: + vfree(rxdr->buffer_info); + return -ENOMEM; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { + void *olddesc = rxdr->desc; + dma_addr_t olddma = rxdr->dma; + e_err(rx_err, "rxdr align check failed: %u bytes at %p\n", + rxdr->size, rxdr->desc); + /* Try again, without freeing the previous */ + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, + &rxdr->dma, GFP_KERNEL); + /* Failed allocation, critical failure */ + if (!rxdr->desc) { + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + e_err(probe, "Unable to allocate memory for the Rx " + "descriptor ring\n"); + goto setup_rx_desc_die; + } + + if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { + /* give up */ + dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, + rxdr->dma); + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + e_err(probe, "Unable to allocate aligned memory for " + "the Rx descriptor ring\n"); + goto setup_rx_desc_die; + } else { + /* Free old allocation, new allocation was successful */ + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + } + } + memset(rxdr->desc, 0, rxdr->size); + + rxdr->next_to_clean = 0; + rxdr->next_to_use = 0; + rxdr->rx_skb_top = NULL; + + return 0; +} + +/** + * e1000_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ + +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); + if (err) { + e_err(probe, "Allocation for Rx Queue %u failed\n", i); + for (i-- ; i >= 0; i--) + e1000_free_rx_resources(adapter, + &adapter->rx_ring[i]); + break; + } + } + + return err; +} + +/** + * e1000_setup_rctl - configure the receive control registers + * @adapter: Board private structure + **/ +static void e1000_setup_rctl(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + rctl = er32(RCTL); + + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + + rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); + + if (hw->tbi_compatibility_on == 1) + rctl |= E1000_RCTL_SBP; + else + rctl &= ~E1000_RCTL_SBP; + + if (adapter->netdev->mtu <= ETH_DATA_LEN) + rctl &= ~E1000_RCTL_LPE; + else + rctl |= E1000_RCTL_LPE; + + /* Setup buffer sizes */ + rctl &= ~E1000_RCTL_SZ_4096; + rctl |= E1000_RCTL_BSEX; + switch (adapter->rx_buffer_len) { + case E1000_RXBUFFER_2048: + default: + rctl |= E1000_RCTL_SZ_2048; + rctl &= ~E1000_RCTL_BSEX; + break; + case E1000_RXBUFFER_4096: + rctl |= E1000_RCTL_SZ_4096; + break; + case E1000_RXBUFFER_8192: + rctl |= E1000_RCTL_SZ_8192; + break; + case E1000_RXBUFFER_16384: + rctl |= E1000_RCTL_SZ_16384; + break; + } + + ew32(RCTL, rctl); +} + +/** + * e1000_configure_rx - Configure 8254x Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ + +static void e1000_configure_rx(struct e1000_adapter *adapter) +{ + u64 rdba; + struct e1000_hw *hw = &adapter->hw; + u32 rdlen, rctl, rxcsum; + + if (adapter->netdev->mtu > ETH_DATA_LEN) { + rdlen = adapter->rx_ring[0].count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_jumbo_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; + } else { + rdlen = adapter->rx_ring[0].count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers; + } + + /* disable receives while setting up the descriptors */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + + /* set the Receive Delay Timer Register */ + ew32(RDTR, adapter->rx_int_delay); + + if (hw->mac_type >= e1000_82540) { + ew32(RADV, adapter->rx_abs_int_delay); + if (adapter->itr_setting != 0) + ew32(ITR, 1000000000 / (adapter->itr * 256)); + } + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring */ + switch (adapter->num_rx_queues) { + case 1: + default: + rdba = adapter->rx_ring[0].dma; + ew32(RDLEN, rdlen); + ew32(RDBAH, (rdba >> 32)); + ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); + ew32(RDT, 0); + ew32(RDH, 0); + adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH); + adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT); + break; + } + + /* Enable 82543 Receive Checksum Offload for TCP and UDP */ + if (hw->mac_type >= e1000_82543) { + rxcsum = er32(RXCSUM); + if (adapter->rx_csum) + rxcsum |= E1000_RXCSUM_TUOFL; + else + /* don't need to clear IPPCSE as it defaults to 0 */ + rxcsum &= ~E1000_RXCSUM_TUOFL; + ew32(RXCSUM, rxcsum); + } + + /* Enable Receives */ + ew32(RCTL, rctl); +} + +/** + * e1000_free_tx_resources - Free Tx Resources per Queue + * @adapter: board private structure + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ + +static void e1000_free_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_tx_ring(adapter, tx_ring); + + vfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * e1000_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ + +void e1000_free_all_tx_resources(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); +} + +static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, + struct e1000_buffer *buffer_info) +{ + if (buffer_info->dma) { + if (buffer_info->mapped_as_page) + dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); + else + dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); + buffer_info->dma = 0; + } + if (buffer_info->skb) { + dev_kfree_skb_any(buffer_info->skb); + buffer_info->skb = NULL; + } + buffer_info->time_stamp = 0; + /* buffer_info must be completely set up in the transmit path */ +} + +/** + * e1000_clean_tx_ring - Free Tx Buffers + * @adapter: board private structure + * @tx_ring: ring to be cleaned + **/ + +static void e1000_clean_tx_ring(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_buffer *buffer_info; + unsigned long size; + unsigned int i; + + /* Free all the Tx ring sk_buffs */ + + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + } + + size = sizeof(struct e1000_buffer) * tx_ring->count; + memset(tx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->last_tx_tso = 0; + + writel(0, hw->hw_addr + tx_ring->tdh); + writel(0, hw->hw_addr + tx_ring->tdt); +} + +/** + * e1000_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ + +static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); +} + +/** + * e1000_free_rx_resources - Free Rx Resources + * @adapter: board private structure + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ + +static void e1000_free_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_rx_ring(adapter, rx_ring); + + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * e1000_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ + +void e1000_free_all_rx_resources(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); +} + +/** + * e1000_clean_rx_ring - Free Rx Buffers per Queue + * @adapter: board private structure + * @rx_ring: ring to free buffers from + **/ + +static void e1000_clean_rx_ring(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + if (buffer_info->dma && + adapter->clean_rx == e1000_clean_rx_irq) { + dma_unmap_single(&pdev->dev, buffer_info->dma, + buffer_info->length, + DMA_FROM_DEVICE); + } else if (buffer_info->dma && + adapter->clean_rx == e1000_clean_jumbo_rx_irq) { + dma_unmap_page(&pdev->dev, buffer_info->dma, + buffer_info->length, + DMA_FROM_DEVICE); + } + + buffer_info->dma = 0; + if (buffer_info->page) { + put_page(buffer_info->page); + buffer_info->page = NULL; + } + if (buffer_info->skb) { + dev_kfree_skb(buffer_info->skb); + buffer_info->skb = NULL; + } + } + + /* there also may be some cached data from a chained receive */ + if (rx_ring->rx_skb_top) { + dev_kfree_skb(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + } + + size = sizeof(struct e1000_buffer) * rx_ring->count; + memset(rx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + writel(0, hw->hw_addr + rx_ring->rdh); + writel(0, hw->hw_addr + rx_ring->rdt); +} + +/** + * e1000_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ + +static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); +} + +/* The 82542 2.0 (revision 2) needs to have the receive unit in reset + * and memory write and invalidate disabled for certain operations + */ +static void e1000_enter_82542_rst(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl; + + e1000_pci_clear_mwi(hw); + + rctl = er32(RCTL); + rctl |= E1000_RCTL_RST; + ew32(RCTL, rctl); + E1000_WRITE_FLUSH(); + mdelay(5); + + if (netif_running(netdev)) + e1000_clean_all_rx_rings(adapter); +} + +static void e1000_leave_82542_rst(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl; + + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_RST; + ew32(RCTL, rctl); + E1000_WRITE_FLUSH(); + mdelay(5); + + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + + if (netif_running(netdev)) { + /* No need to loop, because 82542 supports only 1 queue */ + struct e1000_rx_ring *ring = &adapter->rx_ring[0]; + e1000_configure_rx(adapter); + adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); + } +} + +/** + * e1000_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ + +static int e1000_set_mac(struct net_device *netdev, void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + /* 82542 2.0 needs to be in reset to write receive address registers */ + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_enter_82542_rst(adapter); + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); + + e1000_rar_set(hw, hw->mac_addr, 0); + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_leave_82542_rst(adapter); + + return 0; +} + +/** + * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_mode entry point is called whenever the unicast or multicast + * address lists or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + **/ + +static void e1000_set_rx_mode(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + bool use_uc = false; + u32 rctl; + u32 hash_value; + int i, rar_entries = E1000_RAR_ENTRIES; + int mta_reg_count = E1000_NUM_MTA_REGISTERS; + u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); + + if (!mcarray) { + e_err(probe, "memory allocation failed\n"); + return; + } + + /* Check for Promiscuous and All Multicast modes */ + + rctl = er32(RCTL); + + if (netdev->flags & IFF_PROMISC) { + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + rctl &= ~E1000_RCTL_VFE; + } else { + if (netdev->flags & IFF_ALLMULTI) + rctl |= E1000_RCTL_MPE; + else + rctl &= ~E1000_RCTL_MPE; + /* Enable VLAN filter if there is a VLAN */ + if (e1000_vlan_used(adapter)) + rctl |= E1000_RCTL_VFE; + } + + if (netdev_uc_count(netdev) > rar_entries - 1) { + rctl |= E1000_RCTL_UPE; + } else if (!(netdev->flags & IFF_PROMISC)) { + rctl &= ~E1000_RCTL_UPE; + use_uc = true; + } + + ew32(RCTL, rctl); + + /* 82542 2.0 needs to be in reset to write receive address registers */ + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_enter_82542_rst(adapter); + + /* load the first 14 addresses into the exact filters 1-14. Unicast + * addresses take precedence to avoid disabling unicast filtering + * when possible. + * + * RAR 0 is used for the station MAC address + * if there are not 14 addresses, go ahead and clear the filters + */ + i = 1; + if (use_uc) + netdev_for_each_uc_addr(ha, netdev) { + if (i == rar_entries) + break; + e1000_rar_set(hw, ha->addr, i++); + } + + netdev_for_each_mc_addr(ha, netdev) { + if (i == rar_entries) { + /* load any remaining addresses into the hash table */ + u32 hash_reg, hash_bit, mta; + hash_value = e1000_hash_mc_addr(hw, ha->addr); + hash_reg = (hash_value >> 5) & 0x7F; + hash_bit = hash_value & 0x1F; + mta = (1 << hash_bit); + mcarray[hash_reg] |= mta; + } else { + e1000_rar_set(hw, ha->addr, i++); + } + } + + for (; i < rar_entries; i++) { + E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); + E1000_WRITE_FLUSH(); + } + + /* write the hash table completely, write from bottom to avoid + * both stupid write combining chipsets, and flushing each write */ + for (i = mta_reg_count - 1; i >= 0 ; i--) { + /* + * If we are on an 82544 has an errata where writing odd + * offsets overwrites the previous even offset, but writing + * backwards over the range solves the issue by always + * writing the odd offset first + */ + E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]); + } + E1000_WRITE_FLUSH(); + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_leave_82542_rst(adapter); + + kfree(mcarray); +} + +/* Need to wait a few seconds after link up to get diagnostic information from + * the phy */ + +static void e1000_update_phy_info(unsigned long data) +{ + struct e1000_adapter *adapter = (struct e1000_adapter *)data; + schedule_work(&adapter->phy_info_task); +} + +static void e1000_update_phy_info_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + phy_info_task); + struct e1000_hw *hw = &adapter->hw; + + rtnl_lock(); + e1000_phy_get_info(hw, &adapter->phy_info); + rtnl_unlock(); +} + +/** + * e1000_82547_tx_fifo_stall - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void e1000_82547_tx_fifo_stall(unsigned long data) +{ + struct e1000_adapter *adapter = (struct e1000_adapter *)data; + schedule_work(&adapter->fifo_stall_task); +} + +/** + * e1000_82547_tx_fifo_stall_task - task to complete work + * @work: work struct contained inside adapter struct + **/ +static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + fifo_stall_task); + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 tctl; + + rtnl_lock(); + if (atomic_read(&adapter->tx_fifo_stall)) { + if ((er32(TDT) == er32(TDH)) && + (er32(TDFT) == er32(TDFH)) && + (er32(TDFTS) == er32(TDFHS))) { + tctl = er32(TCTL); + ew32(TCTL, tctl & ~E1000_TCTL_EN); + ew32(TDFT, adapter->tx_head_addr); + ew32(TDFH, adapter->tx_head_addr); + ew32(TDFTS, adapter->tx_head_addr); + ew32(TDFHS, adapter->tx_head_addr); + ew32(TCTL, tctl); + E1000_WRITE_FLUSH(); + + adapter->tx_fifo_head = 0; + atomic_set(&adapter->tx_fifo_stall, 0); + netif_wake_queue(netdev); + } else if (!test_bit(__E1000_DOWN, &adapter->flags)) { + mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1); + } + } + rtnl_unlock(); +} + +bool e1000_has_link(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + bool link_active = false; + + /* get_link_status is set on LSC (link status) interrupt or rx + * sequence error interrupt (except on intel ce4100). + * get_link_status will stay false until the + * e1000_check_for_link establishes link for copper adapters + * ONLY + */ + switch (hw->media_type) { + case e1000_media_type_copper: + if (hw->mac_type == e1000_ce4100) + hw->get_link_status = 1; + if (hw->get_link_status) { + e1000_check_for_link(hw); + link_active = !hw->get_link_status; + } else { + link_active = true; + } + break; + case e1000_media_type_fiber: + e1000_check_for_link(hw); + link_active = !!(er32(STATUS) & E1000_STATUS_LU); + break; + case e1000_media_type_internal_serdes: + e1000_check_for_link(hw); + link_active = hw->serdes_has_link; + break; + default: + break; + } + + return link_active; +} + +/** + * e1000_watchdog - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void e1000_watchdog(unsigned long data) +{ + struct e1000_adapter *adapter = (struct e1000_adapter *)data; + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct e1000_tx_ring *txdr = adapter->tx_ring; + u32 link, tctl; + + link = e1000_has_link(adapter); + if ((netif_carrier_ok(netdev)) && link) + goto link_up; + + if (link) { + if (!netif_carrier_ok(netdev)) { + u32 ctrl; + bool txb2b = true; + /* update snapshot of PHY registers on LSC */ + e1000_get_speed_and_duplex(hw, + &adapter->link_speed, + &adapter->link_duplex); + + ctrl = er32(CTRL); + pr_info("%s NIC Link is Up %d Mbps %s, " + "Flow Control: %s\n", + netdev->name, + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full Duplex" : "Half Duplex", + ((ctrl & E1000_CTRL_TFCE) && (ctrl & + E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & + E1000_CTRL_RFCE) ? "RX" : ((ctrl & + E1000_CTRL_TFCE) ? "TX" : "None"))); + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + txb2b = false; + adapter->tx_timeout_factor = 16; + break; + case SPEED_100: + txb2b = false; + /* maybe add some timeout factor ? */ + break; + } + + /* enable transmits in the hardware */ + tctl = er32(TCTL); + tctl |= E1000_TCTL_EN; + ew32(TCTL, tctl); + + netif_carrier_on(netdev); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + adapter->smartspeed = 0; + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + pr_info("%s NIC Link is Down\n", + netdev->name); + netif_carrier_off(netdev); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + } + + e1000_smartspeed(adapter); + } + +link_up: + e1000_update_stats(adapter); + + hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; + adapter->tpt_old = adapter->stats.tpt; + hw->collision_delta = adapter->stats.colc - adapter->colc_old; + adapter->colc_old = adapter->stats.colc; + + adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; + adapter->gorcl_old = adapter->stats.gorcl; + adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; + adapter->gotcl_old = adapter->stats.gotcl; + + e1000_update_adaptive(hw); + + if (!netif_carrier_ok(netdev)) { + if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + /* return immediately since reset is imminent */ + return; + } + } + + /* Simple mode for Interrupt Throttle Rate (ITR) */ + if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { + /* + * Symmetric Tx/Rx gets a reduced ITR=2000; + * Total asymmetrical Tx or Rx gets ITR=8000; + * everyone else is between 2000-8000. + */ + u32 goc = (adapter->gotcl + adapter->gorcl) / 10000; + u32 dif = (adapter->gotcl > adapter->gorcl ? + adapter->gotcl - adapter->gorcl : + adapter->gorcl - adapter->gotcl) / 10000; + u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; + + ew32(ITR, 1000000000 / (itr * 256)); + } + + /* Cause software interrupt to ensure rx ring is cleaned */ + ew32(ICS, E1000_ICS_RXDMT0); + + /* Force detection of hung controller every watchdog period */ + adapter->detect_tx_hung = true; + + /* Reset the timer */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * e1000_update_itr - update the dynamic ITR value based on statistics + * @adapter: pointer to adapter + * @itr_setting: current adapter->itr + * @packets: the number of packets during this measurement interval + * @bytes: the number of bytes during this measurement interval + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * this functionality is controlled by the InterruptThrottleRate module + * parameter (see e1000_param.c) + **/ +static unsigned int e1000_update_itr(struct e1000_adapter *adapter, + u16 itr_setting, int packets, int bytes) +{ + unsigned int retval = itr_setting; + struct e1000_hw *hw = &adapter->hw; + + if (unlikely(hw->mac_type < e1000_82540)) + goto update_itr_done; + + if (packets == 0) + goto update_itr_done; + + switch (itr_setting) { + case lowest_latency: + /* jumbo frames get bulk treatment*/ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + retval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* jumbo frames need bulk latency setting */ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 10) || ((bytes/packets) > 1200)) + retval = bulk_latency; + else if ((packets > 35)) + retval = lowest_latency; + } else if (bytes/packets > 2000) + retval = bulk_latency; + else if (packets <= 2 && bytes < 512) + retval = lowest_latency; + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + retval = low_latency; + } else if (bytes < 6000) { + retval = low_latency; + } + break; + } + +update_itr_done: + return retval; +} + +static void e1000_set_itr(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 current_itr; + u32 new_itr = adapter->itr; + + if (unlikely(hw->mac_type < e1000_82540)) + return; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + if (unlikely(adapter->link_speed != SPEED_1000)) { + current_itr = 0; + new_itr = 4000; + goto set_itr_now; + } + + adapter->tx_itr = e1000_update_itr(adapter, + adapter->tx_itr, + adapter->total_tx_packets, + adapter->total_tx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) + adapter->tx_itr = low_latency; + + adapter->rx_itr = e1000_update_itr(adapter, + adapter->rx_itr, + adapter->total_rx_packets, + adapter->total_rx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) + adapter->rx_itr = low_latency; + + current_itr = max(adapter->rx_itr, adapter->tx_itr); + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = 70000; + break; + case low_latency: + new_itr = 20000; /* aka hwitr = ~200 */ + break; + case bulk_latency: + new_itr = 4000; + break; + default: + break; + } + +set_itr_now: + if (new_itr != adapter->itr) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing */ + new_itr = new_itr > adapter->itr ? + min(adapter->itr + (new_itr >> 2), new_itr) : + new_itr; + adapter->itr = new_itr; + ew32(ITR, 1000000000 / (new_itr * 256)); + } +} + +#define E1000_TX_FLAGS_CSUM 0x00000001 +#define E1000_TX_FLAGS_VLAN 0x00000002 +#define E1000_TX_FLAGS_TSO 0x00000004 +#define E1000_TX_FLAGS_IPV4 0x00000008 +#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 +#define E1000_TX_FLAGS_VLAN_SHIFT 16 + +static int e1000_tso(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, struct sk_buff *skb) +{ + struct e1000_context_desc *context_desc; + struct e1000_buffer *buffer_info; + unsigned int i; + u32 cmd_length = 0; + u16 ipcse = 0, tucse, mss; + u8 ipcss, ipcso, tucss, tucso, hdr_len; + int err; + + if (skb_is_gso(skb)) { + if (skb_header_cloned(skb)) { + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (err) + return err; + } + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + mss = skb_shinfo(skb)->gso_size; + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + cmd_length = E1000_TXD_CMD_IP; + ipcse = skb_transport_offset(skb) - 1; + } else if (skb->protocol == htons(ETH_P_IPV6)) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + ipcse = 0; + } + ipcss = skb_network_offset(skb); + ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; + tucss = skb_transport_offset(skb); + tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; + tucse = 0; + + cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | + E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); + + i = tx_ring->next_to_use; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + + context_desc->lower_setup.ip_fields.ipcss = ipcss; + context_desc->lower_setup.ip_fields.ipcso = ipcso; + context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); + context_desc->upper_setup.tcp_fields.tucss = tucss; + context_desc->upper_setup.tcp_fields.tucso = tucso; + context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); + context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); + context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; + context_desc->cmd_and_length = cpu_to_le32(cmd_length); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + if (++i == tx_ring->count) i = 0; + tx_ring->next_to_use = i; + + return true; + } + return false; +} + +static bool e1000_tx_csum(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, struct sk_buff *skb) +{ + struct e1000_context_desc *context_desc; + struct e1000_buffer *buffer_info; + unsigned int i; + u8 css; + u32 cmd_len = E1000_TXD_CMD_DEXT; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return false; + + switch (skb->protocol) { + case cpu_to_be16(ETH_P_IP): + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + case cpu_to_be16(ETH_P_IPV6): + /* XXX not handling all IPV6 headers */ + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + default: + if (unlikely(net_ratelimit())) + e_warn(drv, "checksum_partial proto=%x!\n", + skb->protocol); + break; + } + + css = skb_checksum_start_offset(skb); + + i = tx_ring->next_to_use; + buffer_info = &tx_ring->buffer_info[i]; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + + context_desc->lower_setup.ip_config = 0; + context_desc->upper_setup.tcp_fields.tucss = css; + context_desc->upper_setup.tcp_fields.tucso = + css + skb->csum_offset; + context_desc->upper_setup.tcp_fields.tucse = 0; + context_desc->tcp_seg_setup.data = 0; + context_desc->cmd_and_length = cpu_to_le32(cmd_len); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + if (unlikely(++i == tx_ring->count)) i = 0; + tx_ring->next_to_use = i; + + return true; +} + +#define E1000_MAX_TXD_PWR 12 +#define E1000_MAX_DATA_PER_TXD (1<hw; + struct pci_dev *pdev = adapter->pdev; + struct e1000_buffer *buffer_info; + unsigned int len = skb_headlen(skb); + unsigned int offset = 0, size, count = 0, i; + unsigned int f; + + i = tx_ring->next_to_use; + + while (len) { + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + /* Workaround for Controller erratum -- + * descriptor for non-tso packet in a linear SKB that follows a + * tso gets written back prematurely before the data is fully + * DMA'd to the controller */ + if (!skb->data_len && tx_ring->last_tx_tso && + !skb_is_gso(skb)) { + tx_ring->last_tx_tso = 0; + size -= 4; + } + + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc */ + if (unlikely(mss && !nr_frags && size == len && size > 8)) + size -= 4; + /* work-around for errata 10 and it applies + * to all controllers in PCI-X mode + * The fix is to make sure that the first descriptor of a + * packet is smaller than 2048 - 16 - 16 (or 2016) bytes + */ + if (unlikely((hw->bus_type == e1000_bus_type_pcix) && + (size > 2015) && count == 0)) + size = 2015; + + /* Workaround for potential 82544 hang in PCI-X. Avoid + * terminating buffers within evenly-aligned dwords. */ + if (unlikely(adapter->pcix_82544 && + !((unsigned long)(skb->data + offset + size - 1) & 4) && + size > 4)) + size -= 4; + + buffer_info->length = size; + /* set time_stamp *before* dma to help avoid a possible race */ + buffer_info->time_stamp = jiffies; + buffer_info->mapped_as_page = false; + buffer_info->dma = dma_map_single(&pdev->dev, + skb->data + offset, + size, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + if (len) { + i++; + if (unlikely(i == tx_ring->count)) + i = 0; + } + } + + for (f = 0; f < nr_frags; f++) { + struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[f]; + len = frag->size; + offset = frag->page_offset; + + while (len) { + i++; + if (unlikely(i == tx_ring->count)) + i = 0; + + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc */ + if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8)) + size -= 4; + /* Workaround for potential 82544 hang in PCI-X. + * Avoid terminating buffers within evenly-aligned + * dwords. */ + if (unlikely(adapter->pcix_82544 && + !((unsigned long)(page_to_phys(frag->page) + offset + + size - 1) & 4) && + size > 4)) + size -= 4; + + buffer_info->length = size; + buffer_info->time_stamp = jiffies; + buffer_info->mapped_as_page = true; + buffer_info->dma = dma_map_page(&pdev->dev, frag->page, + offset, size, + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + } + } + + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[first].next_to_watch = i; + + return count; + +dma_error: + dev_err(&pdev->dev, "TX DMA map failed\n"); + buffer_info->dma = 0; + if (count) + count--; + + while (count--) { + if (i==0) + i += tx_ring->count; + i--; + buffer_info = &tx_ring->buffer_info[i]; + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + } + + return 0; +} + +static void e1000_tx_queue(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, int tx_flags, + int count) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_desc *tx_desc = NULL; + struct e1000_buffer *buffer_info; + u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; + unsigned int i; + + if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | + E1000_TXD_CMD_TSE; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + + if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) + txd_upper |= E1000_TXD_POPTS_IXSM << 8; + } + + if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + } + + if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { + txd_lower |= E1000_TXD_CMD_VLE; + txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); + } + + i = tx_ring->next_to_use; + + while (count--) { + buffer_info = &tx_ring->buffer_info[i]; + tx_desc = E1000_TX_DESC(*tx_ring, i); + tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + tx_desc->lower.data = + cpu_to_le32(txd_lower | buffer_info->length); + tx_desc->upper.data = cpu_to_le32(txd_upper); + if (unlikely(++i == tx_ring->count)) i = 0; + } + + tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + + tx_ring->next_to_use = i; + writel(i, hw->hw_addr + tx_ring->tdt); + /* we need this if more than one processor can write to our tail + * at a time, it syncronizes IO on IA64/Altix systems */ + mmiowb(); +} + +/** + * 82547 workaround to avoid controller hang in half-duplex environment. + * The workaround is to avoid queuing a large packet that would span + * the internal Tx FIFO ring boundary by notifying the stack to resend + * the packet at a later time. This gives the Tx FIFO an opportunity to + * flush all packets. When that occurs, we reset the Tx FIFO pointers + * to the beginning of the Tx FIFO. + **/ + +#define E1000_FIFO_HDR 0x10 +#define E1000_82547_PAD_LEN 0x3E0 + +static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, + struct sk_buff *skb) +{ + u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; + u32 skb_fifo_len = skb->len + E1000_FIFO_HDR; + + skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); + + if (adapter->link_duplex != HALF_DUPLEX) + goto no_fifo_stall_required; + + if (atomic_read(&adapter->tx_fifo_stall)) + return 1; + + if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { + atomic_set(&adapter->tx_fifo_stall, 1); + return 1; + } + +no_fifo_stall_required: + adapter->tx_fifo_head += skb_fifo_len; + if (adapter->tx_fifo_head >= adapter->tx_fifo_size) + adapter->tx_fifo_head -= adapter->tx_fifo_size; + return 0; +} + +static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + + netif_stop_queue(netdev); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. */ + if (likely(E1000_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! */ + netif_start_queue(netdev); + ++adapter->restart_queue; + return 0; +} + +static int e1000_maybe_stop_tx(struct net_device *netdev, + struct e1000_tx_ring *tx_ring, int size) +{ + if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __e1000_maybe_stop_tx(netdev, size); +} + +#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) +static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *tx_ring; + unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; + unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; + unsigned int tx_flags = 0; + unsigned int len = skb_headlen(skb); + unsigned int nr_frags; + unsigned int mss; + int count = 0; + int tso; + unsigned int f; + + /* This goes back to the question of how to logically map a tx queue + * to a flow. Right now, performance is impacted slightly negatively + * if using multiple tx queues. If the stack breaks away from a + * single qdisc implementation, we can look at this again. */ + tx_ring = adapter->tx_ring; + + if (unlikely(skb->len <= 0)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + mss = skb_shinfo(skb)->gso_size; + /* The controller does a simple calculation to + * make sure there is enough room in the FIFO before + * initiating the DMA for each buffer. The calc is: + * 4 = ceil(buffer len/mss). To make sure we don't + * overrun the FIFO, adjust the max buffer len if mss + * drops. */ + if (mss) { + u8 hdr_len; + max_per_txd = min(mss << 2, max_per_txd); + max_txd_pwr = fls(max_per_txd) - 1; + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (skb->data_len && hdr_len == len) { + switch (hw->mac_type) { + unsigned int pull_size; + case e1000_82544: + /* Make sure we have room to chop off 4 bytes, + * and that the end alignment will work out to + * this hardware's requirements + * NOTE: this is a TSO only workaround + * if end byte alignment not correct move us + * into the next dword */ + if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4) + break; + /* fall through */ + pull_size = min((unsigned int)4, skb->data_len); + if (!__pskb_pull_tail(skb, pull_size)) { + e_err(drv, "__pskb_pull_tail " + "failed.\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + len = skb_headlen(skb); + break; + default: + /* do nothing */ + break; + } + } + } + + /* reserve a descriptor for the offload context */ + if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) + count++; + count++; + + /* Controller Erratum workaround */ + if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) + count++; + + count += TXD_USE_COUNT(len, max_txd_pwr); + + if (adapter->pcix_82544) + count++; + + /* work-around for errata 10 and it applies to all controllers + * in PCI-X mode, so add one more descriptor to the count + */ + if (unlikely((hw->bus_type == e1000_bus_type_pcix) && + (len > 2015))) + count++; + + nr_frags = skb_shinfo(skb)->nr_frags; + for (f = 0; f < nr_frags; f++) + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, + max_txd_pwr); + if (adapter->pcix_82544) + count += nr_frags; + + /* need: count + 2 desc gap to keep tail from touching + * head, otherwise try next time */ + if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) + return NETDEV_TX_BUSY; + + if (unlikely(hw->mac_type == e1000_82547)) { + if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) { + netif_stop_queue(netdev); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + mod_timer(&adapter->tx_fifo_stall_timer, + jiffies + 1); + return NETDEV_TX_BUSY; + } + } + + if (vlan_tx_tag_present(skb)) { + tx_flags |= E1000_TX_FLAGS_VLAN; + tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); + } + + first = tx_ring->next_to_use; + + tso = e1000_tso(adapter, tx_ring, skb); + if (tso < 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (likely(tso)) { + if (likely(hw->mac_type != e1000_82544)) + tx_ring->last_tx_tso = 1; + tx_flags |= E1000_TX_FLAGS_TSO; + } else if (likely(e1000_tx_csum(adapter, tx_ring, skb))) + tx_flags |= E1000_TX_FLAGS_CSUM; + + if (likely(skb->protocol == htons(ETH_P_IP))) + tx_flags |= E1000_TX_FLAGS_IPV4; + + count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, + nr_frags, mss); + + if (count) { + e1000_tx_queue(adapter, tx_ring, tx_flags, count); + /* Make sure there is space in the ring for the next send. */ + e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); + + } else { + dev_kfree_skb_any(skb); + tx_ring->buffer_info[first].time_stamp = 0; + tx_ring->next_to_use = first; + } + + return NETDEV_TX_OK; +} + +/** + * e1000_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ + +static void e1000_tx_timeout(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); +} + +static void e1000_reset_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = + container_of(work, struct e1000_adapter, reset_task); + + e1000_reinit_safe(adapter); +} + +/** + * e1000_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + **/ + +static struct net_device_stats *e1000_get_stats(struct net_device *netdev) +{ + /* only return the current stats */ + return &netdev->stats; +} + +/** + * e1000_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ + +static int e1000_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; + + if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || + (max_frame > MAX_JUMBO_FRAME_SIZE)) { + e_err(probe, "Invalid MTU setting\n"); + return -EINVAL; + } + + /* Adapter-specific max frame size limits. */ + switch (hw->mac_type) { + case e1000_undefined ... e1000_82542_rev2_1: + if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { + e_err(probe, "Jumbo Frames not supported.\n"); + return -EINVAL; + } + break; + default: + /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ + break; + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + /* e1000_down has a dependency on max_frame_size */ + hw->max_frame_size = max_frame; + if (netif_running(netdev)) + e1000_down(adapter); + + /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + * means we reserve 2 more, this pushes us to allocate from the next + * larger slab size. + * i.e. RXBUFFER_2048 --> size-4096 slab + * however with the new *_jumbo_rx* routines, jumbo receives will use + * fragmented skbs */ + + if (max_frame <= E1000_RXBUFFER_2048) + adapter->rx_buffer_len = E1000_RXBUFFER_2048; + else +#if (PAGE_SIZE >= E1000_RXBUFFER_16384) + adapter->rx_buffer_len = E1000_RXBUFFER_16384; +#elif (PAGE_SIZE >= E1000_RXBUFFER_4096) + adapter->rx_buffer_len = PAGE_SIZE; +#endif + + /* adjust allocation if LPE protects us, and we aren't using SBP */ + if (!hw->tbi_compatibility_on && + ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) || + (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) + adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + + pr_info("%s changing MTU from %d to %d\n", + netdev->name, netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + e1000_up(adapter); + else + e1000_reset(adapter); + + clear_bit(__E1000_RESETTING, &adapter->flags); + + return 0; +} + +/** + * e1000_update_stats - Update the board statistics counters + * @adapter: board private structure + **/ + +void e1000_update_stats(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + unsigned long flags; + u16 phy_tmp; + +#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF + + /* + * Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + spin_lock_irqsave(&adapter->stats_lock, flags); + + /* these counters are modified from e1000_tbi_adjust_stats, + * called from the interrupt context, so they must only + * be written while holding adapter->stats_lock + */ + + adapter->stats.crcerrs += er32(CRCERRS); + adapter->stats.gprc += er32(GPRC); + adapter->stats.gorcl += er32(GORCL); + adapter->stats.gorch += er32(GORCH); + adapter->stats.bprc += er32(BPRC); + adapter->stats.mprc += er32(MPRC); + adapter->stats.roc += er32(ROC); + + adapter->stats.prc64 += er32(PRC64); + adapter->stats.prc127 += er32(PRC127); + adapter->stats.prc255 += er32(PRC255); + adapter->stats.prc511 += er32(PRC511); + adapter->stats.prc1023 += er32(PRC1023); + adapter->stats.prc1522 += er32(PRC1522); + + adapter->stats.symerrs += er32(SYMERRS); + adapter->stats.mpc += er32(MPC); + adapter->stats.scc += er32(SCC); + adapter->stats.ecol += er32(ECOL); + adapter->stats.mcc += er32(MCC); + adapter->stats.latecol += er32(LATECOL); + adapter->stats.dc += er32(DC); + adapter->stats.sec += er32(SEC); + adapter->stats.rlec += er32(RLEC); + adapter->stats.xonrxc += er32(XONRXC); + adapter->stats.xontxc += er32(XONTXC); + adapter->stats.xoffrxc += er32(XOFFRXC); + adapter->stats.xofftxc += er32(XOFFTXC); + adapter->stats.fcruc += er32(FCRUC); + adapter->stats.gptc += er32(GPTC); + adapter->stats.gotcl += er32(GOTCL); + adapter->stats.gotch += er32(GOTCH); + adapter->stats.rnbc += er32(RNBC); + adapter->stats.ruc += er32(RUC); + adapter->stats.rfc += er32(RFC); + adapter->stats.rjc += er32(RJC); + adapter->stats.torl += er32(TORL); + adapter->stats.torh += er32(TORH); + adapter->stats.totl += er32(TOTL); + adapter->stats.toth += er32(TOTH); + adapter->stats.tpr += er32(TPR); + + adapter->stats.ptc64 += er32(PTC64); + adapter->stats.ptc127 += er32(PTC127); + adapter->stats.ptc255 += er32(PTC255); + adapter->stats.ptc511 += er32(PTC511); + adapter->stats.ptc1023 += er32(PTC1023); + adapter->stats.ptc1522 += er32(PTC1522); + + adapter->stats.mptc += er32(MPTC); + adapter->stats.bptc += er32(BPTC); + + /* used for adaptive IFS */ + + hw->tx_packet_delta = er32(TPT); + adapter->stats.tpt += hw->tx_packet_delta; + hw->collision_delta = er32(COLC); + adapter->stats.colc += hw->collision_delta; + + if (hw->mac_type >= e1000_82543) { + adapter->stats.algnerrc += er32(ALGNERRC); + adapter->stats.rxerrc += er32(RXERRC); + adapter->stats.tncrs += er32(TNCRS); + adapter->stats.cexterr += er32(CEXTERR); + adapter->stats.tsctc += er32(TSCTC); + adapter->stats.tsctfc += er32(TSCTFC); + } + + /* Fill out the OS statistics structure */ + netdev->stats.multicast = adapter->stats.mprc; + netdev->stats.collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC */ + netdev->stats.rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc; + netdev->stats.rx_length_errors = adapter->stats.rlerrc; + netdev->stats.rx_crc_errors = adapter->stats.crcerrs; + netdev->stats.rx_frame_errors = adapter->stats.algnerrc; + netdev->stats.rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol; + netdev->stats.tx_errors = adapter->stats.txerrc; + netdev->stats.tx_aborted_errors = adapter->stats.ecol; + netdev->stats.tx_window_errors = adapter->stats.latecol; + netdev->stats.tx_carrier_errors = adapter->stats.tncrs; + if (hw->bad_tx_carr_stats_fd && + adapter->link_duplex == FULL_DUPLEX) { + netdev->stats.tx_carrier_errors = 0; + adapter->stats.tncrs = 0; + } + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Phy Stats */ + if (hw->media_type == e1000_media_type_copper) { + if ((adapter->link_speed == SPEED_1000) && + (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { + phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; + adapter->phy_stats.idle_errors += phy_tmp; + } + + if ((hw->mac_type <= e1000_82546) && + (hw->phy_type == e1000_phy_m88) && + !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) + adapter->phy_stats.receive_errors += phy_tmp; + } + + /* Management Stats */ + if (hw->has_smbus) { + adapter->stats.mgptc += er32(MGTPTC); + adapter->stats.mgprc += er32(MGTPRC); + adapter->stats.mgpdc += er32(MGTPDC); + } + + spin_unlock_irqrestore(&adapter->stats_lock, flags); +} + +/** + * e1000_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ + +static irqreturn_t e1000_intr(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + if (unlikely((!icr))) + return IRQ_NONE; /* Not our interrupt */ + + /* + * we might have caused the interrupt, but the above + * read cleared it, and just in case the driver is + * down there is nothing to do so return handled + */ + if (unlikely(test_bit(__E1000_DOWN, &adapter->flags))) + return IRQ_HANDLED; + + if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { + hw->get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + /* disable interrupts, without the synchronize_irq bit */ + ew32(IMC, ~0); + E1000_WRITE_FLUSH(); + + if (likely(napi_schedule_prep(&adapter->napi))) { + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->napi); + } else { + /* this really should not happen! if it does it is basically a + * bug, but not a hard error, so enable ints and continue */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + } + + return IRQ_HANDLED; +} + +/** + * e1000_clean - NAPI Rx polling callback + * @adapter: board private structure + **/ +static int e1000_clean(struct napi_struct *napi, int budget) +{ + struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); + int tx_clean_complete = 0, work_done = 0; + + tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); + + adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget); + + if (!tx_clean_complete) + work_done = budget; + + /* If budget not fully consumed, exit the polling mode */ + if (work_done < budget) { + if (likely(adapter->itr_setting & 3)) + e1000_set_itr(adapter); + napi_complete(napi); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + } + + return work_done; +} + +/** + * e1000_clean_tx_irq - Reclaim resources after transmit completes + * @adapter: board private structure + **/ +static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct e1000_tx_desc *tx_desc, *eop_desc; + struct e1000_buffer *buffer_info; + unsigned int i, eop; + unsigned int count = 0; + unsigned int total_tx_bytes=0, total_tx_packets=0; + + i = tx_ring->next_to_clean; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + + while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && + (count < tx_ring->count)) { + bool cleaned = false; + rmb(); /* read buffer_info after eop_desc */ + for ( ; !cleaned; count++) { + tx_desc = E1000_TX_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + cleaned = (i == eop); + + if (cleaned) { + struct sk_buff *skb = buffer_info->skb; + unsigned int segs, bytecount; + segs = skb_shinfo(skb)->gso_segs ?: 1; + /* multiply data chunks by size of headers */ + bytecount = ((segs - 1) * skb_headlen(skb)) + + skb->len; + total_tx_packets += segs; + total_tx_bytes += bytecount; + } + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + tx_desc->upper.data = 0; + + if (unlikely(++i == tx_ring->count)) i = 0; + } + + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + } + + tx_ring->next_to_clean = i; + +#define TX_WAKE_THRESHOLD 32 + if (unlikely(count && netif_carrier_ok(netdev) && + E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (netif_queue_stopped(netdev) && + !(test_bit(__E1000_DOWN, &adapter->flags))) { + netif_wake_queue(netdev); + ++adapter->restart_queue; + } + } + + if (adapter->detect_tx_hung) { + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i */ + adapter->detect_tx_hung = false; + if (tx_ring->buffer_info[eop].time_stamp && + time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(er32(STATUS) & E1000_STATUS_TXOFF)) { + + /* detected Tx unit hang */ + e_err(drv, "Detected Tx Unit Hang\n" + " Tx Queue <%lu>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " next_to_watch.status <%x>\n", + (unsigned long)((tx_ring - adapter->tx_ring) / + sizeof(struct e1000_tx_ring)), + readl(hw->hw_addr + tx_ring->tdh), + readl(hw->hw_addr + tx_ring->tdt), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_ring->buffer_info[eop].time_stamp, + eop, + jiffies, + eop_desc->upper.fields.status); + netif_stop_queue(netdev); + } + } + adapter->total_tx_bytes += total_tx_bytes; + adapter->total_tx_packets += total_tx_packets; + netdev->stats.tx_bytes += total_tx_bytes; + netdev->stats.tx_packets += total_tx_packets; + return count < tx_ring->count; +} + +/** + * e1000_rx_checksum - Receive Checksum Offload for 82543 + * @adapter: board private structure + * @status_err: receive descriptor status and error fields + * @csum: receive descriptor csum field + * @sk_buff: socket buffer with received data + **/ + +static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, + u32 csum, struct sk_buff *skb) +{ + struct e1000_hw *hw = &adapter->hw; + u16 status = (u16)status_err; + u8 errors = (u8)(status_err >> 24); + + skb_checksum_none_assert(skb); + + /* 82543 or newer only */ + if (unlikely(hw->mac_type < e1000_82543)) return; + /* Ignore Checksum bit is set */ + if (unlikely(status & E1000_RXD_STAT_IXSM)) return; + /* TCP/UDP checksum error bit is set */ + if (unlikely(errors & E1000_RXD_ERR_TCPE)) { + /* let the stack verify checksum errors */ + adapter->hw_csum_err++; + return; + } + /* TCP/UDP Checksum has not been calculated */ + if (!(status & E1000_RXD_STAT_TCPCS)) + return; + + /* It must be a TCP or UDP packet with a valid checksum */ + if (likely(status & E1000_RXD_STAT_TCPCS)) { + /* TCP checksum is good */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + adapter->hw_csum_good++; +} + +/** + * e1000_consume_page - helper function + **/ +static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, + u16 length) +{ + bi->page = NULL; + skb->len += length; + skb->data_len += length; + skb->truesize += length; +} + +/** + * e1000_receive_skb - helper function to handle rx indications + * @adapter: board private structure + * @status: descriptor status field as written by hardware + * @vlan: descriptor vlan field as written by hardware (no le/be conversion) + * @skb: pointer to sk_buff to be indicated to stack + */ +static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, + __le16 vlan, struct sk_buff *skb) +{ + skb->protocol = eth_type_trans(skb, adapter->netdev); + + if (status & E1000_RXD_STAT_VP) { + u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; + + __vlan_hwaccel_put_tag(skb, vid); + } + napi_gro_receive(&adapter->napi, skb); +} + +/** + * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * @rx_ring: ring to clean + * @work_done: amount of napi work completed this call + * @work_to_do: max amount of work allowed for this call to do + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + */ +static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_buffer *buffer_info, *next_buffer; + unsigned long irq_flags; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes=0, total_rx_packets=0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + rmb(); /* read descriptor and rx_buffer_info after status DD */ + + status = rx_desc->status; + skb = buffer_info->skb; + buffer_info->skb = NULL; + + if (++i == rx_ring->count) i = 0; + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + dma_unmap_page(&pdev->dev, buffer_info->dma, + buffer_info->length, DMA_FROM_DEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->length); + + /* errors is only valid for DD + EOP descriptors */ + if (unlikely((status & E1000_RXD_STAT_EOP) && + (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { + u8 last_byte = *(skb->data + length - 1); + if (TBI_ACCEPT(hw, status, rx_desc->errors, length, + last_byte)) { + spin_lock_irqsave(&adapter->stats_lock, + irq_flags); + e1000_tbi_adjust_stats(hw, &adapter->stats, + length, skb->data); + spin_unlock_irqrestore(&adapter->stats_lock, + irq_flags); + length--; + } else { + /* recycle both page and skb */ + buffer_info->skb = skb; + /* an error means any chain goes out the window + * too */ + if (rx_ring->rx_skb_top) + dev_kfree_skb(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + goto next_desc; + } + } + +#define rxtop rx_ring->rx_skb_top + if (!(status & E1000_RXD_STAT_EOP)) { + /* this descriptor is only the beginning (or middle) */ + if (!rxtop) { + /* this is the beginning of a chain */ + rxtop = skb; + skb_fill_page_desc(rxtop, 0, buffer_info->page, + 0, length); + } else { + /* this is the middle of a chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->page, 0, length); + /* re-use the skb, only consumed the page */ + buffer_info->skb = skb; + } + e1000_consume_page(buffer_info, rxtop, length); + goto next_desc; + } else { + if (rxtop) { + /* end of the chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->page, 0, length); + /* re-use the current skb, we only consumed the + * page */ + buffer_info->skb = skb; + skb = rxtop; + rxtop = NULL; + e1000_consume_page(buffer_info, skb, length); + } else { + /* no chain, got EOP, this buf is the packet + * copybreak to save the put_page/alloc_page */ + if (length <= copybreak && + skb_tailroom(skb) >= length) { + u8 *vaddr; + vaddr = kmap_atomic(buffer_info->page, + KM_SKB_DATA_SOFTIRQ); + memcpy(skb_tail_pointer(skb), vaddr, length); + kunmap_atomic(vaddr, + KM_SKB_DATA_SOFTIRQ); + /* re-use the page, so don't erase + * buffer_info->page */ + skb_put(skb, length); + } else { + skb_fill_page_desc(skb, 0, + buffer_info->page, 0, + length); + e1000_consume_page(buffer_info, skb, + length); + } + } + } + + /* Receive Checksum Offload XXX recompute due to CRC strip? */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + pskb_trim(skb, skb->len - 4); + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + total_rx_packets++; + + /* eth type trans needs skb->data to point to something */ + if (!pskb_may_pull(skb, ETH_HLEN)) { + e_err(drv, "pskb_may_pull failed.\n"); + dev_kfree_skb(skb); + goto next_desc; + } + + e1000_receive_skb(adapter, status, rx_desc->special, skb); + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = E1000_DESC_UNUSED(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + netdev->stats.rx_bytes += total_rx_bytes; + netdev->stats.rx_packets += total_rx_packets; + return cleaned; +} + +/* + * this should improve performance for small packets with large amounts + * of reassembly being done in the stack + */ +static void e1000_check_copybreak(struct net_device *netdev, + struct e1000_buffer *buffer_info, + u32 length, struct sk_buff **skb) +{ + struct sk_buff *new_skb; + + if (length > copybreak) + return; + + new_skb = netdev_alloc_skb_ip_align(netdev, length); + if (!new_skb) + return; + + skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN, + (*skb)->data - NET_IP_ALIGN, + length + NET_IP_ALIGN); + /* save the skb in buffer_info as good */ + buffer_info->skb = *skb; + *skb = new_skb; +} + +/** + * e1000_clean_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * @rx_ring: ring to clean + * @work_done: amount of napi work completed this call + * @work_to_do: max amount of work allowed for this call to do + */ +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_buffer *buffer_info, *next_buffer; + unsigned long flags; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes=0, total_rx_packets=0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + rmb(); /* read descriptor and rx_buffer_info after status DD */ + + status = rx_desc->status; + skb = buffer_info->skb; + buffer_info->skb = NULL; + + prefetch(skb->data - NET_IP_ALIGN); + + if (++i == rx_ring->count) i = 0; + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + dma_unmap_single(&pdev->dev, buffer_info->dma, + buffer_info->length, DMA_FROM_DEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->length); + /* !EOP means multiple descriptors were used to store a single + * packet, if thats the case we need to toss it. In fact, we + * to toss every packet with the EOP bit clear and the next + * frame that _does_ have the EOP bit set, as it is by + * definition only a frame fragment + */ + if (unlikely(!(status & E1000_RXD_STAT_EOP))) + adapter->discarding = true; + + if (adapter->discarding) { + /* All receives must fit into a single buffer */ + e_dbg("Receive packet consumed multiple buffers\n"); + /* recycle */ + buffer_info->skb = skb; + if (status & E1000_RXD_STAT_EOP) + adapter->discarding = false; + goto next_desc; + } + + if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { + u8 last_byte = *(skb->data + length - 1); + if (TBI_ACCEPT(hw, status, rx_desc->errors, length, + last_byte)) { + spin_lock_irqsave(&adapter->stats_lock, flags); + e1000_tbi_adjust_stats(hw, &adapter->stats, + length, skb->data); + spin_unlock_irqrestore(&adapter->stats_lock, + flags); + length--; + } else { + /* recycle */ + buffer_info->skb = skb; + goto next_desc; + } + } + + /* adjust length to remove Ethernet CRC, this must be + * done after the TBI_ACCEPT workaround above */ + length -= 4; + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += length; + total_rx_packets++; + + e1000_check_copybreak(netdev, buffer_info, length, &skb); + + skb_put(skb, length); + + /* Receive Checksum Offload */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + e1000_receive_skb(adapter, status, rx_desc->special, skb); + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = E1000_DESC_UNUSED(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + netdev->stats.rx_bytes += total_rx_bytes; + netdev->stats.rx_packets += total_rx_packets; + return cleaned; +} + +/** + * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers + * @adapter: address of board private structure + * @rx_ring: pointer to receive ring structure + * @cleaned_count: number of buffers to allocate this pass + **/ + +static void +e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, int cleaned_count) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc; + struct e1000_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + unsigned int bufsz = 256 - 16 /*for skb_reserve */ ; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + skb = buffer_info->skb; + if (skb) { + skb_trim(skb, 0); + goto check_page; + } + + skb = netdev_alloc_skb_ip_align(netdev, bufsz); + if (unlikely(!skb)) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { + struct sk_buff *oldskb = skb; + e_err(rx_err, "skb align check failed: %u bytes at " + "%p\n", bufsz, skb->data); + /* Try again, without freeing the previous */ + skb = netdev_alloc_skb_ip_align(netdev, bufsz); + /* Failed allocation, critical failure */ + if (!skb) { + dev_kfree_skb(oldskb); + adapter->alloc_rx_buff_failed++; + break; + } + + if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { + /* give up */ + dev_kfree_skb(skb); + dev_kfree_skb(oldskb); + break; /* while (cleaned_count--) */ + } + + /* Use new allocation */ + dev_kfree_skb(oldskb); + } + buffer_info->skb = skb; + buffer_info->length = adapter->rx_buffer_len; +check_page: + /* allocate a new page if necessary */ + if (!buffer_info->page) { + buffer_info->page = alloc_page(GFP_ATOMIC); + if (unlikely(!buffer_info->page)) { + adapter->alloc_rx_buff_failed++; + break; + } + } + + if (!buffer_info->dma) { + buffer_info->dma = dma_map_page(&pdev->dev, + buffer_info->page, 0, + buffer_info->length, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + put_page(buffer_info->page); + dev_kfree_skb(skb); + buffer_info->page = NULL; + buffer_info->skb = NULL; + buffer_info->dma = 0; + adapter->alloc_rx_buff_failed++; + break; /* while !buffer_info->skb */ + } + } + + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + writel(i, adapter->hw.hw_addr + rx_ring->rdt); + } +} + +/** + * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended + * @adapter: address of board private structure + **/ + +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc; + struct e1000_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + unsigned int bufsz = adapter->rx_buffer_len; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + skb = buffer_info->skb; + if (skb) { + skb_trim(skb, 0); + goto map_skb; + } + + skb = netdev_alloc_skb_ip_align(netdev, bufsz); + if (unlikely(!skb)) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { + struct sk_buff *oldskb = skb; + e_err(rx_err, "skb align check failed: %u bytes at " + "%p\n", bufsz, skb->data); + /* Try again, without freeing the previous */ + skb = netdev_alloc_skb_ip_align(netdev, bufsz); + /* Failed allocation, critical failure */ + if (!skb) { + dev_kfree_skb(oldskb); + adapter->alloc_rx_buff_failed++; + break; + } + + if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { + /* give up */ + dev_kfree_skb(skb); + dev_kfree_skb(oldskb); + adapter->alloc_rx_buff_failed++; + break; /* while !buffer_info->skb */ + } + + /* Use new allocation */ + dev_kfree_skb(oldskb); + } + buffer_info->skb = skb; + buffer_info->length = adapter->rx_buffer_len; +map_skb: + buffer_info->dma = dma_map_single(&pdev->dev, + skb->data, + buffer_info->length, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + dev_kfree_skb(skb); + buffer_info->skb = NULL; + buffer_info->dma = 0; + adapter->alloc_rx_buff_failed++; + break; /* while !buffer_info->skb */ + } + + /* + * XXX if it was allocated cleanly it will never map to a + * boundary crossing + */ + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, + (void *)(unsigned long)buffer_info->dma, + adapter->rx_buffer_len)) { + e_err(rx_err, "dma align check failed: %u bytes at " + "%p\n", adapter->rx_buffer_len, + (void *)(unsigned long)buffer_info->dma); + dev_kfree_skb(skb); + buffer_info->skb = NULL; + + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + + adapter->alloc_rx_buff_failed++; + break; /* while !buffer_info->skb */ + } + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + writel(i, hw->hw_addr + rx_ring->rdt); + } +} + +/** + * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. + * @adapter: + **/ + +static void e1000_smartspeed(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_status; + u16 phy_ctrl; + + if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg || + !(hw->autoneg_advertised & ADVERTISE_1000_FULL)) + return; + + if (adapter->smartspeed == 0) { + /* If Master/Slave config fault is asserted twice, + * we assume back-to-back */ + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); + if (phy_ctrl & CR_1000T_MS_ENABLE) { + phy_ctrl &= ~CR_1000T_MS_ENABLE; + e1000_write_phy_reg(hw, PHY_1000T_CTRL, + phy_ctrl); + adapter->smartspeed++; + if (!e1000_phy_setup_autoneg(hw) && + !e1000_read_phy_reg(hw, PHY_CTRL, + &phy_ctrl)) { + phy_ctrl |= (MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); + e1000_write_phy_reg(hw, PHY_CTRL, + phy_ctrl); + } + } + return; + } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { + /* If still no link, perhaps using 2/3 pair cable */ + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); + phy_ctrl |= CR_1000T_MS_ENABLE; + e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); + if (!e1000_phy_setup_autoneg(hw) && + !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) { + phy_ctrl |= (MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); + e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl); + } + } + /* Restart process after E1000_SMARTSPEED_MAX iterations */ + if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX) + adapter->smartspeed = 0; +} + +/** + * e1000_ioctl - + * @netdev: + * @ifreq: + * @cmd: + **/ + +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return e1000_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +/** + * e1000_mii_ioctl - + * @netdev: + * @ifreq: + * @cmd: + **/ + +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct mii_ioctl_data *data = if_mii(ifr); + int retval; + u16 mii_reg; + unsigned long flags; + + if (hw->media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = hw->phy_addr; + break; + case SIOCGMIIREG: + spin_lock_irqsave(&adapter->stats_lock, flags); + if (e1000_read_phy_reg(hw, data->reg_num & 0x1F, + &data->val_out)) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + return -EIO; + } + spin_unlock_irqrestore(&adapter->stats_lock, flags); + break; + case SIOCSMIIREG: + if (data->reg_num & ~(0x1F)) + return -EFAULT; + mii_reg = data->val_in; + spin_lock_irqsave(&adapter->stats_lock, flags); + if (e1000_write_phy_reg(hw, data->reg_num, + mii_reg)) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + return -EIO; + } + spin_unlock_irqrestore(&adapter->stats_lock, flags); + if (hw->media_type == e1000_media_type_copper) { + switch (data->reg_num) { + case PHY_CTRL: + if (mii_reg & MII_CR_POWER_DOWN) + break; + if (mii_reg & MII_CR_AUTO_NEG_EN) { + hw->autoneg = 1; + hw->autoneg_advertised = 0x2F; + } else { + u32 speed; + if (mii_reg & 0x40) + speed = SPEED_1000; + else if (mii_reg & 0x2000) + speed = SPEED_100; + else + speed = SPEED_10; + retval = e1000_set_spd_dplx( + adapter, speed, + ((mii_reg & 0x100) + ? DUPLEX_FULL : + DUPLEX_HALF)); + if (retval) + return retval; + } + if (netif_running(adapter->netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + break; + case M88E1000_PHY_SPEC_CTRL: + case M88E1000_EXT_PHY_SPEC_CTRL: + if (e1000_phy_reset(hw)) + return -EIO; + break; + } + } else { + switch (data->reg_num) { + case PHY_CTRL: + if (mii_reg & MII_CR_POWER_DOWN) + break; + if (netif_running(adapter->netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + break; + } + } + break; + default: + return -EOPNOTSUPP; + } + return E1000_SUCCESS; +} + +void e1000_pci_set_mwi(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + int ret_val = pci_set_mwi(adapter->pdev); + + if (ret_val) + e_err(probe, "Error in setting MWI\n"); +} + +void e1000_pci_clear_mwi(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + + pci_clear_mwi(adapter->pdev); +} + +int e1000_pcix_get_mmrbc(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + return pcix_get_mmrbc(adapter->pdev); +} + +void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc) +{ + struct e1000_adapter *adapter = hw->back; + pcix_set_mmrbc(adapter->pdev, mmrbc); +} + +void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value) +{ + outl(value, port); +} + +static bool e1000_vlan_used(struct e1000_adapter *adapter) +{ + u16 vid; + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + return true; + return false; +} + +static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, + bool filter_on) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + + if (filter_on) { + /* enable VLAN receive filtering */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_CFIEN; + if (!(adapter->netdev->flags & IFF_PROMISC)) + rctl |= E1000_RCTL_VFE; + ew32(RCTL, rctl); + e1000_update_mng_vlan(adapter); + } else { + /* disable VLAN receive filtering */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_VFE; + ew32(RCTL, rctl); + } + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); +} + +static void e1000_vlan_mode(struct net_device *netdev, u32 features) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl; + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + + ctrl = er32(CTRL); + if (features & NETIF_F_HW_VLAN_RX) { + /* enable VLAN tag insert/strip */ + ctrl |= E1000_CTRL_VME; + } else { + /* disable VLAN tag insert/strip */ + ctrl &= ~E1000_CTRL_VME; + } + ew32(CTRL, ctrl); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); +} + +static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && + (vid == adapter->mng_vlan_id)) + return; + + if (!e1000_vlan_used(adapter)) + e1000_vlan_filter_on_off(adapter, true); + + /* add VID to filter table */ + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); + vfta |= (1 << (vid & 0x1F)); + e1000_write_vfta(hw, index, vfta); + + set_bit(vid, adapter->active_vlans); +} + +static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + + /* remove VID from filter table */ + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); + vfta &= ~(1 << (vid & 0x1F)); + e1000_write_vfta(hw, index, vfta); + + clear_bit(vid, adapter->active_vlans); + + if (!e1000_vlan_used(adapter)) + e1000_vlan_filter_on_off(adapter, false); +} + +static void e1000_restore_vlan(struct e1000_adapter *adapter) +{ + u16 vid; + + if (!e1000_vlan_used(adapter)) + return; + + e1000_vlan_filter_on_off(adapter, true); + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + e1000_vlan_rx_add_vid(adapter->netdev, vid); +} + +int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) +{ + struct e1000_hw *hw = &adapter->hw; + + hw->autoneg = 0; + + /* Make sure dplx is at most 1 bit and lsb of speed is not set + * for the switch() below to work */ + if ((spd & 1) || (dplx & ~1)) + goto err_inval; + + /* Fiber NICs only allow 1000 gbps Full duplex */ + if ((hw->media_type == e1000_media_type_fiber) && + spd != SPEED_1000 && + dplx != DUPLEX_FULL) + goto err_inval; + + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + hw->forced_speed_duplex = e1000_10_half; + break; + case SPEED_10 + DUPLEX_FULL: + hw->forced_speed_duplex = e1000_10_full; + break; + case SPEED_100 + DUPLEX_HALF: + hw->forced_speed_duplex = e1000_100_half; + break; + case SPEED_100 + DUPLEX_FULL: + hw->forced_speed_duplex = e1000_100_full; + break; + case SPEED_1000 + DUPLEX_FULL: + hw->autoneg = 1; + hw->autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + default: + goto err_inval; + } + return 0; + +err_inval: + e_err(probe, "Unsupported Speed/Duplex configuration\n"); + return -EINVAL; +} + +static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, ctrl_ext, rctl, status; + u32 wufc = adapter->wol; +#ifdef CONFIG_PM + int retval = 0; +#endif + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); + e1000_down(adapter); + } + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; +#endif + + status = er32(STATUS); + if (status & E1000_STATUS_LU) + wufc &= ~E1000_WUFC_LNKC; + + if (wufc) { + e1000_setup_rctl(adapter); + e1000_set_rx_mode(netdev); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & E1000_WUFC_MC) { + rctl = er32(RCTL); + rctl |= E1000_RCTL_MPE; + ew32(RCTL, rctl); + } + + if (hw->mac_type >= e1000_82540) { + ctrl = er32(CTRL); + /* advertise wake from D3Cold */ + #define E1000_CTRL_ADVD3WUC 0x00100000 + /* phy power management enable */ + #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 + ctrl |= E1000_CTRL_ADVD3WUC | + E1000_CTRL_EN_PHY_PWR_MGMT; + ew32(CTRL, ctrl); + } + + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) { + /* keep the laser running in D3 */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; + ew32(CTRL_EXT, ctrl_ext); + } + + ew32(WUC, E1000_WUC_PME_EN); + ew32(WUFC, wufc); + } else { + ew32(WUC, 0); + ew32(WUFC, 0); + } + + e1000_release_manageability(adapter); + + *enable_wake = !!wufc; + + /* make sure adapter isn't asleep if manageability is enabled */ + if (adapter->en_mng_pt) + *enable_wake = true; + + if (netif_running(netdev)) + e1000_free_irq(adapter); + + pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) +{ + int retval; + bool wake; + + retval = __e1000_shutdown(pdev, &wake); + if (retval) + return retval; + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + + return 0; +} + +static int e1000_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (adapter->need_ioport) + err = pci_enable_device(pdev); + else + err = pci_enable_device_mem(pdev); + if (err) { + pr_err("Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (netif_running(netdev)) { + err = e1000_request_irq(adapter); + if (err) + return err; + } + + e1000_power_up_phy(adapter); + e1000_reset(adapter); + ew32(WUS, ~0); + + e1000_init_manageability(adapter); + + if (netif_running(netdev)) + e1000_up(adapter); + + netif_device_attach(netdev); + + return 0; +} +#endif + +static void e1000_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __e1000_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void e1000_netpoll(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + disable_irq(adapter->pdev->irq); + e1000_intr(adapter->pdev->irq, netdev); + enable_irq(adapter->pdev->irq); +} +#endif + +/** + * e1000_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + e1000_down(adapter); + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e1000_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the e1000_resume routine. + */ +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err; + + if (adapter->need_ioport) + err = pci_enable_device(pdev); + else + err = pci_enable_device_mem(pdev); + if (err) { + pr_err("Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + e1000_reset(adapter); + ew32(WUS, ~0); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * e1000_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the e1000_resume routine. + */ +static void e1000_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + e1000_init_manageability(adapter); + + if (netif_running(netdev)) { + if (e1000_up(adapter)) { + pr_info("can't bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); +} + +/* e1000_main.c */ diff --git a/drivers/net/ethernet/intel/e1000/e1000_osdep.h b/drivers/net/ethernet/intel/e1000/e1000_osdep.h new file mode 100644 index 0000000..33e7c45a --- /dev/null +++ b/drivers/net/ethernet/intel/e1000/e1000_osdep.h @@ -0,0 +1,109 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2006 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + + +/* glue for the OS independent part of e1000 + * includes register access macros + */ + +#ifndef _E1000_OSDEP_H_ +#define _E1000_OSDEP_H_ + +#include + +#define CONFIG_RAM_BASE 0x60000 +#define GBE_CONFIG_OFFSET 0x0 + +#define GBE_CONFIG_RAM_BASE \ + ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET)) + +#define GBE_CONFIG_BASE_VIRT \ + ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE)) + +#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \ + (iowrite16_rep(base + offset, data, count)) + +#define GBE_CONFIG_FLASH_READ(base, offset, count, data) \ + (ioread16_rep(base + (offset << 1), data, count)) + +#define er32(reg) \ + (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg))) + +#define ew32(reg, value) \ + (writel((value), (hw->hw_addr + ((hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg)))) + +#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \ + writel((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 2)))) + +#define E1000_READ_REG_ARRAY(a, reg, offset) ( \ + readl((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 2))) + +#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY +#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY + +#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \ + writew((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 1)))) + +#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \ + readw((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 1))) + +#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \ + writeb((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + (offset)))) + +#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \ + readb((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + (offset))) + +#define E1000_WRITE_FLUSH() er32(STATUS) + +#define E1000_WRITE_ICH_FLASH_REG(a, reg, value) ( \ + writel((value), ((a)->flash_address + reg))) + +#define E1000_READ_ICH_FLASH_REG(a, reg) ( \ + readl((a)->flash_address + reg)) + +#define E1000_WRITE_ICH_FLASH_REG16(a, reg, value) ( \ + writew((value), ((a)->flash_address + reg))) + +#define E1000_READ_ICH_FLASH_REG16(a, reg) ( \ + readw((a)->flash_address + reg)) + +#endif /* _E1000_OSDEP_H_ */ diff --git a/drivers/net/ethernet/intel/e1000/e1000_param.c b/drivers/net/ethernet/intel/e1000/e1000_param.c new file mode 100644 index 0000000..1301eba --- /dev/null +++ b/drivers/net/ethernet/intel/e1000/e1000_param.c @@ -0,0 +1,755 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2006 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define E1000_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } +#define E1000_PARAM(X, desc) \ + static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); + +/* Transmit Descriptor Count + * + * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers + * Valid Range: 80-4096 for 82544 and newer + * + * Default Value: 256 + */ +E1000_PARAM(TxDescriptors, "Number of transmit descriptors"); + +/* Receive Descriptor Count + * + * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers + * Valid Range: 80-4096 for 82544 and newer + * + * Default Value: 256 + */ +E1000_PARAM(RxDescriptors, "Number of receive descriptors"); + +/* User Specified Speed Override + * + * Valid Range: 0, 10, 100, 1000 + * - 0 - auto-negotiate at all supported speeds + * - 10 - only link at 10 Mbps + * - 100 - only link at 100 Mbps + * - 1000 - only link at 1000 Mbps + * + * Default Value: 0 + */ +E1000_PARAM(Speed, "Speed setting"); + +/* User Specified Duplex Override + * + * Valid Range: 0-2 + * - 0 - auto-negotiate for duplex + * - 1 - only link at half duplex + * - 2 - only link at full duplex + * + * Default Value: 0 + */ +E1000_PARAM(Duplex, "Duplex setting"); + +/* Auto-negotiation Advertisement Override + * + * Valid Range: 0x01-0x0F, 0x20-0x2F (copper); 0x20 (fiber) + * + * The AutoNeg value is a bit mask describing which speed and duplex + * combinations should be advertised during auto-negotiation. + * The supported speed and duplex modes are listed below + * + * Bit 7 6 5 4 3 2 1 0 + * Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10 + * Duplex Full Full Half Full Half + * + * Default Value: 0x2F (copper); 0x20 (fiber) + */ +E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting"); +#define AUTONEG_ADV_DEFAULT 0x2F +#define AUTONEG_ADV_MASK 0x2F + +/* User Specified Flow Control Override + * + * Valid Range: 0-3 + * - 0 - No Flow Control + * - 1 - Rx only, respond to PAUSE frames but do not generate them + * - 2 - Tx only, generate PAUSE frames but ignore them on receive + * - 3 - Full Flow Control Support + * + * Default Value: Read flow control settings from the EEPROM + */ +E1000_PARAM(FlowControl, "Flow Control setting"); +#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL + +/* XsumRX - Receive Checksum Offload Enable/Disable + * + * Valid Range: 0, 1 + * - 0 - disables all checksum offload + * - 1 - enables receive IP/TCP/UDP checksum offload + * on 82543 and newer -based NICs + * + * Default Value: 1 + */ +E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload"); + +/* Transmit Interrupt Delay in units of 1.024 microseconds + * Tx interrupt delay needs to typically be set to something non zero + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay"); +#define DEFAULT_TIDV 8 +#define MAX_TXDELAY 0xFFFF +#define MIN_TXDELAY 0 + +/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay"); +#define DEFAULT_TADV 32 +#define MAX_TXABSDELAY 0xFFFF +#define MIN_TXABSDELAY 0 + +/* Receive Interrupt Delay in units of 1.024 microseconds + * hardware will likely hang if you set this to anything but zero. + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxIntDelay, "Receive Interrupt Delay"); +#define DEFAULT_RDTR 0 +#define MAX_RXDELAY 0xFFFF +#define MIN_RXDELAY 0 + +/* Receive Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); +#define DEFAULT_RADV 8 +#define MAX_RXABSDELAY 0xFFFF +#define MIN_RXABSDELAY 0 + +/* Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) + */ +E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); +#define DEFAULT_ITR 3 +#define MAX_ITR 100000 +#define MIN_ITR 100 + +/* Enable Smart Power Down of the PHY + * + * Valid Range: 0, 1 + * + * Default Value: 0 (disabled) + */ +E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); + +struct e1000_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + const struct e1000_opt_list { int i; char *str; } *p; + } l; + } arg; +}; + +static int __devinit e1000_validate_option(unsigned int *value, + const struct e1000_option *opt, + struct e1000_adapter *adapter) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + e_dev_info("%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + e_dev_info("%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + e_dev_info("%s set to %i\n", opt->name, *value); + return 0; + } + break; + case list_option: { + int i; + const struct e1000_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + e_dev_info("%s\n", ent->str); + return 0; + } + } + } + break; + default: + BUG(); + } + + e_dev_info("Invalid %s value specified (%i) %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +static void e1000_check_fiber_options(struct e1000_adapter *adapter); +static void e1000_check_copper_options(struct e1000_adapter *adapter); + +/** + * e1000_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ + +void __devinit e1000_check_options(struct e1000_adapter *adapter) +{ + struct e1000_option opt; + int bd = adapter->bd_number; + + if (bd >= E1000_MAX_NIC) { + e_dev_warn("Warning: no configuration for board #%i " + "using defaults for all values\n", bd); + } + + { /* Transmit Descriptor Count */ + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + int i; + e1000_mac_type mac_type = adapter->hw.mac_type; + + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Descriptors", + .err = "using default of " + __MODULE_STRING(E1000_DEFAULT_TXD), + .def = E1000_DEFAULT_TXD, + .arg = { .r = { + .min = E1000_MIN_TXD, + .max = mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD + }} + }; + + if (num_TxDescriptors > bd) { + tx_ring->count = TxDescriptors[bd]; + e1000_validate_option(&tx_ring->count, &opt, adapter); + tx_ring->count = ALIGN(tx_ring->count, + REQ_TX_DESCRIPTOR_MULTIPLE); + } else { + tx_ring->count = opt.def; + } + for (i = 0; i < adapter->num_tx_queues; i++) + tx_ring[i].count = tx_ring->count; + } + { /* Receive Descriptor Count */ + struct e1000_rx_ring *rx_ring = adapter->rx_ring; + int i; + e1000_mac_type mac_type = adapter->hw.mac_type; + + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Descriptors", + .err = "using default of " + __MODULE_STRING(E1000_DEFAULT_RXD), + .def = E1000_DEFAULT_RXD, + .arg = { .r = { + .min = E1000_MIN_RXD, + .max = mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD + }} + }; + + if (num_RxDescriptors > bd) { + rx_ring->count = RxDescriptors[bd]; + e1000_validate_option(&rx_ring->count, &opt, adapter); + rx_ring->count = ALIGN(rx_ring->count, + REQ_RX_DESCRIPTOR_MULTIPLE); + } else { + rx_ring->count = opt.def; + } + for (i = 0; i < adapter->num_rx_queues; i++) + rx_ring[i].count = rx_ring->count; + } + { /* Checksum Offload Enable/Disable */ + opt = (struct e1000_option) { + .type = enable_option, + .name = "Checksum Offload", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (num_XsumRX > bd) { + unsigned int rx_csum = XsumRX[bd]; + e1000_validate_option(&rx_csum, &opt, adapter); + adapter->rx_csum = rx_csum; + } else { + adapter->rx_csum = opt.def; + } + } + { /* Flow Control */ + + static const struct e1000_opt_list fc_list[] = { + { E1000_FC_NONE, "Flow Control Disabled" }, + { E1000_FC_RX_PAUSE, "Flow Control Receive Only" }, + { E1000_FC_TX_PAUSE, "Flow Control Transmit Only" }, + { E1000_FC_FULL, "Flow Control Enabled" }, + { E1000_FC_DEFAULT, "Flow Control Hardware Default" } + }; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Flow Control", + .err = "reading default settings from EEPROM", + .def = E1000_FC_DEFAULT, + .arg = { .l = { .nr = ARRAY_SIZE(fc_list), + .p = fc_list }} + }; + + if (num_FlowControl > bd) { + unsigned int fc = FlowControl[bd]; + e1000_validate_option(&fc, &opt, adapter); + adapter->hw.fc = adapter->hw.original_fc = fc; + } else { + adapter->hw.fc = adapter->hw.original_fc = opt.def; + } + } + { /* Transmit Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_TIDV), + .def = DEFAULT_TIDV, + .arg = { .r = { .min = MIN_TXDELAY, + .max = MAX_TXDELAY }} + }; + + if (num_TxIntDelay > bd) { + adapter->tx_int_delay = TxIntDelay[bd]; + e1000_validate_option(&adapter->tx_int_delay, &opt, + adapter); + } else { + adapter->tx_int_delay = opt.def; + } + } + { /* Transmit Absolute Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Absolute Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_TADV), + .def = DEFAULT_TADV, + .arg = { .r = { .min = MIN_TXABSDELAY, + .max = MAX_TXABSDELAY }} + }; + + if (num_TxAbsIntDelay > bd) { + adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; + e1000_validate_option(&adapter->tx_abs_int_delay, &opt, + adapter); + } else { + adapter->tx_abs_int_delay = opt.def; + } + } + { /* Receive Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_RDTR), + .def = DEFAULT_RDTR, + .arg = { .r = { .min = MIN_RXDELAY, + .max = MAX_RXDELAY }} + }; + + if (num_RxIntDelay > bd) { + adapter->rx_int_delay = RxIntDelay[bd]; + e1000_validate_option(&adapter->rx_int_delay, &opt, + adapter); + } else { + adapter->rx_int_delay = opt.def; + } + } + { /* Receive Absolute Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Absolute Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_RADV), + .def = DEFAULT_RADV, + .arg = { .r = { .min = MIN_RXABSDELAY, + .max = MAX_RXABSDELAY }} + }; + + if (num_RxAbsIntDelay > bd) { + adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; + e1000_validate_option(&adapter->rx_abs_int_delay, &opt, + adapter); + } else { + adapter->rx_abs_int_delay = opt.def; + } + } + { /* Interrupt Throttling Rate */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of " __MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR }} + }; + + if (num_InterruptThrottleRate > bd) { + adapter->itr = InterruptThrottleRate[bd]; + switch (adapter->itr) { + case 0: + e_dev_info("%s turned off\n", opt.name); + break; + case 1: + e_dev_info("%s set to dynamic mode\n", + opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + case 3: + e_dev_info("%s set to dynamic conservative " + "mode\n", opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + case 4: + e_dev_info("%s set to simplified " + "(2000-8000) ints mode\n", opt.name); + adapter->itr_setting = adapter->itr; + break; + default: + e1000_validate_option(&adapter->itr, &opt, + adapter); + /* save the setting, because the dynamic bits + * change itr. + * clear the lower two bits because they are + * used as control */ + adapter->itr_setting = adapter->itr & ~3; + break; + } + } else { + adapter->itr_setting = opt.def; + adapter->itr = 20000; + } + } + { /* Smart Power Down */ + opt = (struct e1000_option) { + .type = enable_option, + .name = "PHY Smart Power Down", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; + + if (num_SmartPowerDownEnable > bd) { + unsigned int spd = SmartPowerDownEnable[bd]; + e1000_validate_option(&spd, &opt, adapter); + adapter->smart_power_down = spd; + } else { + adapter->smart_power_down = opt.def; + } + } + + switch (adapter->hw.media_type) { + case e1000_media_type_fiber: + case e1000_media_type_internal_serdes: + e1000_check_fiber_options(adapter); + break; + case e1000_media_type_copper: + e1000_check_copper_options(adapter); + break; + default: + BUG(); + } +} + +/** + * e1000_check_fiber_options - Range Checking for Link Options, Fiber Version + * @adapter: board private structure + * + * Handles speed and duplex options on fiber adapters + **/ + +static void __devinit e1000_check_fiber_options(struct e1000_adapter *adapter) +{ + int bd = adapter->bd_number; + if (num_Speed > bd) { + e_dev_info("Speed not valid for fiber adapters, parameter " + "ignored\n"); + } + + if (num_Duplex > bd) { + e_dev_info("Duplex not valid for fiber adapters, parameter " + "ignored\n"); + } + + if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) { + e_dev_info("AutoNeg other than 1000/Full is not valid for fiber" + "adapters, parameter ignored\n"); + } +} + +/** + * e1000_check_copper_options - Range Checking for Link Options, Copper Version + * @adapter: board private structure + * + * Handles speed and duplex options on copper adapters + **/ + +static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter) +{ + struct e1000_option opt; + unsigned int speed, dplx, an; + int bd = adapter->bd_number; + + { /* Speed */ + static const struct e1000_opt_list speed_list[] = { + { 0, "" }, + { SPEED_10, "" }, + { SPEED_100, "" }, + { SPEED_1000, "" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Speed", + .err = "parameter ignored", + .def = 0, + .arg = { .l = { .nr = ARRAY_SIZE(speed_list), + .p = speed_list }} + }; + + if (num_Speed > bd) { + speed = Speed[bd]; + e1000_validate_option(&speed, &opt, adapter); + } else { + speed = opt.def; + } + } + { /* Duplex */ + static const struct e1000_opt_list dplx_list[] = { + { 0, "" }, + { HALF_DUPLEX, "" }, + { FULL_DUPLEX, "" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Duplex", + .err = "parameter ignored", + .def = 0, + .arg = { .l = { .nr = ARRAY_SIZE(dplx_list), + .p = dplx_list }} + }; + + if (num_Duplex > bd) { + dplx = Duplex[bd]; + e1000_validate_option(&dplx, &opt, adapter); + } else { + dplx = opt.def; + } + } + + if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) { + e_dev_info("AutoNeg specified along with Speed or Duplex, " + "parameter ignored\n"); + adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT; + } else { /* Autoneg */ + static const struct e1000_opt_list an_list[] = + #define AA "AutoNeg advertising " + {{ 0x01, AA "10/HD" }, + { 0x02, AA "10/FD" }, + { 0x03, AA "10/FD, 10/HD" }, + { 0x04, AA "100/HD" }, + { 0x05, AA "100/HD, 10/HD" }, + { 0x06, AA "100/HD, 10/FD" }, + { 0x07, AA "100/HD, 10/FD, 10/HD" }, + { 0x08, AA "100/FD" }, + { 0x09, AA "100/FD, 10/HD" }, + { 0x0a, AA "100/FD, 10/FD" }, + { 0x0b, AA "100/FD, 10/FD, 10/HD" }, + { 0x0c, AA "100/FD, 100/HD" }, + { 0x0d, AA "100/FD, 100/HD, 10/HD" }, + { 0x0e, AA "100/FD, 100/HD, 10/FD" }, + { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" }, + { 0x20, AA "1000/FD" }, + { 0x21, AA "1000/FD, 10/HD" }, + { 0x22, AA "1000/FD, 10/FD" }, + { 0x23, AA "1000/FD, 10/FD, 10/HD" }, + { 0x24, AA "1000/FD, 100/HD" }, + { 0x25, AA "1000/FD, 100/HD, 10/HD" }, + { 0x26, AA "1000/FD, 100/HD, 10/FD" }, + { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" }, + { 0x28, AA "1000/FD, 100/FD" }, + { 0x29, AA "1000/FD, 100/FD, 10/HD" }, + { 0x2a, AA "1000/FD, 100/FD, 10/FD" }, + { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" }, + { 0x2c, AA "1000/FD, 100/FD, 100/HD" }, + { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" }, + { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" }, + { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "AutoNeg", + .err = "parameter ignored", + .def = AUTONEG_ADV_DEFAULT, + .arg = { .l = { .nr = ARRAY_SIZE(an_list), + .p = an_list }} + }; + + if (num_AutoNeg > bd) { + an = AutoNeg[bd]; + e1000_validate_option(&an, &opt, adapter); + } else { + an = opt.def; + } + adapter->hw.autoneg_advertised = an; + } + + switch (speed + dplx) { + case 0: + adapter->hw.autoneg = adapter->fc_autoneg = 1; + if ((num_Speed > bd) && (speed != 0 || dplx != 0)) + e_dev_info("Speed and duplex autonegotiation " + "enabled\n"); + break; + case HALF_DUPLEX: + e_dev_info("Half Duplex specified without Speed\n"); + e_dev_info("Using Autonegotiation at Half Duplex only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | + ADVERTISE_100_HALF; + break; + case FULL_DUPLEX: + e_dev_info("Full Duplex specified without Speed\n"); + e_dev_info("Using Autonegotiation at Full Duplex only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_FULL | + ADVERTISE_100_FULL | + ADVERTISE_1000_FULL; + break; + case SPEED_10: + e_dev_info("10 Mbps Speed specified without Duplex\n"); + e_dev_info("Using Autonegotiation at 10 Mbps only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | + ADVERTISE_10_FULL; + break; + case SPEED_10 + HALF_DUPLEX: + e_dev_info("Forcing to 10 Mbps Half Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_10_half; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_10 + FULL_DUPLEX: + e_dev_info("Forcing to 10 Mbps Full Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_10_full; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_100: + e_dev_info("100 Mbps Speed specified without Duplex\n"); + e_dev_info("Using Autonegotiation at 100 Mbps only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_100_HALF | + ADVERTISE_100_FULL; + break; + case SPEED_100 + HALF_DUPLEX: + e_dev_info("Forcing to 100 Mbps Half Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_100_half; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_100 + FULL_DUPLEX: + e_dev_info("Forcing to 100 Mbps Full Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_100_full; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_1000: + e_dev_info("1000 Mbps Speed specified without Duplex\n"); + goto full_duplex_only; + case SPEED_1000 + HALF_DUPLEX: + e_dev_info("Half Duplex is not supported at 1000 Mbps\n"); + /* fall through */ + case SPEED_1000 + FULL_DUPLEX: +full_duplex_only: + e_dev_info("Using Autonegotiation at 1000 Mbps Full Duplex " + "only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL; + break; + default: + BUG(); + } + + /* Speed, AutoNeg and MDI/MDI-X must all play nice */ + if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) { + e_dev_info("Speed, AutoNeg and MDI-X specs are incompatible. " + "Setting MDI-X to a compatible value.\n"); + } +} + diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c new file mode 100644 index 0000000..e4f4225 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c @@ -0,0 +1,1516 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* + * 80003ES2LAN Gigabit Ethernet Controller (Copper) + * 80003ES2LAN Gigabit Ethernet Controller (Serdes) + */ + +#include "e1000.h" + +#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00 +#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02 +#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10 +#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE 0x1F + +#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008 +#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800 +#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010 + +#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004 +#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000 +#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000 + +#define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C +#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004 + +#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ +#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000 + +#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8 +#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9 + +/* GG82563 PHY Specific Status Register (Page 0, Register 16 */ +#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Disab. */ +#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060 +#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */ +#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */ +#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */ + +/* PHY Specific Control Register 2 (Page 0, Register 26) */ +#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 + /* 1=Reverse Auto-Negotiation */ + +/* MAC Specific Control Register (Page 2, Register 21) */ +/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */ +#define GG82563_MSCR_TX_CLK_MASK 0x0007 +#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004 +#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005 +#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007 + +#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */ + +/* DSP Distance Register (Page 5, Register 26) */ +#define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M + 1 = 50-80M + 2 = 80-110M + 3 = 110-140M + 4 = >140M */ + +/* Kumeran Mode Control Register (Page 193, Register 16) */ +#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800 + +/* Max number of times Kumeran read/write should be validated */ +#define GG82563_MAX_KMRN_RETRY 0x5 + +/* Power Management Control Register (Page 193, Register 20) */ +#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001 + /* 1=Enable SERDES Electrical Idle */ + +/* In-Band Control Register (Page 194, Register 18) */ +#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */ + +/* + * A table for the GG82563 cable length where the range is defined + * with a lower bound at "index" and the upper bound at + * "index + 5". + */ +static const u16 e1000_gg82563_cable_length_table[] = { + 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF }; +#define GG82563_CABLE_LENGTH_TABLE_SIZE \ + ARRAY_SIZE(e1000_gg82563_cable_length_table) + +static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw); +static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); +static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); +static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw); +static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw); +static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw); +static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex); +static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw); +static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, + u16 *data); +static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, + u16 data); +static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw); + +/** + * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + return 0; + } else { + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan; + } + + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + phy->type = e1000_phy_gg82563; + + /* This can only be done after all function pointers are setup. */ + ret_val = e1000e_get_phy_id(hw); + + /* Verify phy id */ + if (phy->id != GG82563_E_PHY_ID) + return -E1000_ERR_PHY; + + return ret_val; +} + +/** + * e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u16 size; + + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; + break; + } + + nvm->type = e1000_nvm_eeprom_spi; + + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + + /* + * Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* EEPROM access above 16k is unsupported */ + if (size > 14) + size = 14; + nvm->word_size = 1 << size; + + return 0; +} + +/** + * e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &hw->mac; + struct e1000_mac_operations *func = &mac->ops; + + /* Set media type */ + switch (adapter->pdev->device) { + case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: + hw->phy.media_type = e1000_media_type_internal_serdes; + break; + default: + hw->phy.media_type = e1000_media_type_copper; + break; + } + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + /* FWSM register */ + mac->has_fwsm = true; + /* ARC supported; valid only if manageability features are enabled. */ + mac->arc_subsystem_valid = + (er32(FWSM) & E1000_FWSM_MODE_MASK) + ? true : false; + /* Adaptive IFS not supported */ + mac->adaptive_ifs = false; + + /* check for link */ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + func->setup_physical_interface = e1000_setup_copper_link_80003es2lan; + func->check_for_link = e1000e_check_for_copper_link; + break; + case e1000_media_type_fiber: + func->setup_physical_interface = e1000e_setup_fiber_serdes_link; + func->check_for_link = e1000e_check_for_fiber_link; + break; + case e1000_media_type_internal_serdes: + func->setup_physical_interface = e1000e_setup_fiber_serdes_link; + func->check_for_link = e1000e_check_for_serdes_link; + break; + default: + return -E1000_ERR_CONFIG; + break; + } + + /* set lan id for port to determine which phy lock to use */ + hw->mac.ops.set_lan_id(hw); + + return 0; +} + +static s32 e1000_get_variants_80003es2lan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + s32 rc; + + rc = e1000_init_mac_params_80003es2lan(adapter); + if (rc) + return rc; + + rc = e1000_init_nvm_params_80003es2lan(hw); + if (rc) + return rc; + + rc = e1000_init_phy_params_80003es2lan(hw); + if (rc) + return rc; + + return 0; +} + +/** + * e1000_acquire_phy_80003es2lan - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to acquire access rights to the correct PHY. + **/ +static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; + return e1000_acquire_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_release_phy_80003es2lan - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. + **/ +static void e1000_release_phy_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; + e1000_release_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_acquire_mac_csr_80003es2lan - Acquire rights to access Kumeran register + * @hw: pointer to the HW structure + * + * Acquire the semaphore to access the Kumeran interface. + * + **/ +static s32 e1000_acquire_mac_csr_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + mask = E1000_SWFW_CSR_SM; + + return e1000_acquire_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_release_mac_csr_80003es2lan - Release rights to access Kumeran Register + * @hw: pointer to the HW structure + * + * Release the semaphore used to access the Kumeran interface + **/ +static void e1000_release_mac_csr_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + mask = E1000_SWFW_CSR_SM; + + e1000_release_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM + * @hw: pointer to the HW structure + * + * Acquire the semaphore to access the EEPROM. + **/ +static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + + ret_val = e1000_acquire_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); + if (ret_val) + return ret_val; + + ret_val = e1000e_acquire_nvm(hw); + + if (ret_val) + e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); + + return ret_val; +} + +/** + * e1000_release_nvm_80003es2lan - Relinquish rights to access NVM + * @hw: pointer to the HW structure + * + * Release the semaphore used to access the EEPROM. + **/ +static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw) +{ + e1000e_release_nvm(hw); + e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); +} + +/** + * e1000_acquire_swfw_sync_80003es2lan - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 i = 0; + s32 timeout = 50; + + while (i < timeout) { + if (e1000e_get_hw_semaphore(hw)) + return -E1000_ERR_SWFW_SYNC; + + swfw_sync = er32(SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* + * Firmware currently using resource (fwmask) + * or other software thread using resource (swmask) + */ + e1000e_put_hw_semaphore(hw); + mdelay(5); + i++; + } + + if (i == timeout) { + e_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); + return -E1000_ERR_SWFW_SYNC; + } + + swfw_sync |= swmask; + ew32(SW_FW_SYNC, swfw_sync); + + e1000e_put_hw_semaphore(hw); + + return 0; +} + +/** + * e1000_release_swfw_sync_80003es2lan - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + while (e1000e_get_hw_semaphore(hw) != 0) + ; /* Empty */ + + swfw_sync = er32(SW_FW_SYNC); + swfw_sync &= ~mask; + ew32(SW_FW_SYNC, swfw_sync); + + e1000e_put_hw_semaphore(hw); +} + +/** + * e1000_read_phy_reg_gg82563_80003es2lan - Read GG82563 PHY register + * @hw: pointer to the HW structure + * @offset: offset of the register to read + * @data: pointer to the data returned from the operation + * + * Read the GG82563 PHY register. + **/ +static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, + u32 offset, u16 *data) +{ + s32 ret_val; + u32 page_select; + u16 temp; + + ret_val = e1000_acquire_phy_80003es2lan(hw); + if (ret_val) + return ret_val; + + /* Select Configuration Page */ + if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { + page_select = GG82563_PHY_PAGE_SELECT; + } else { + /* + * Use Alternative Page Select register to access + * registers 30 and 31 + */ + page_select = GG82563_PHY_PAGE_SELECT_ALT; + } + + temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT); + ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp); + if (ret_val) { + e1000_release_phy_80003es2lan(hw); + return ret_val; + } + + if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) { + /* + * The "ready" bit in the MDIC register may be incorrectly set + * before the device has completed the "Page Select" MDI + * transaction. So we wait 200us after each MDI command... + */ + udelay(200); + + /* ...and verify the command was successful. */ + ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); + + if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { + ret_val = -E1000_ERR_PHY; + e1000_release_phy_80003es2lan(hw); + return ret_val; + } + + udelay(200); + + ret_val = e1000e_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + udelay(200); + } else { + ret_val = e1000e_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + } + + e1000_release_phy_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_write_phy_reg_gg82563_80003es2lan - Write GG82563 PHY register + * @hw: pointer to the HW structure + * @offset: offset of the register to read + * @data: value to write to the register + * + * Write to the GG82563 PHY register. + **/ +static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, + u32 offset, u16 data) +{ + s32 ret_val; + u32 page_select; + u16 temp; + + ret_val = e1000_acquire_phy_80003es2lan(hw); + if (ret_val) + return ret_val; + + /* Select Configuration Page */ + if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { + page_select = GG82563_PHY_PAGE_SELECT; + } else { + /* + * Use Alternative Page Select register to access + * registers 30 and 31 + */ + page_select = GG82563_PHY_PAGE_SELECT_ALT; + } + + temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT); + ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp); + if (ret_val) { + e1000_release_phy_80003es2lan(hw); + return ret_val; + } + + if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) { + /* + * The "ready" bit in the MDIC register may be incorrectly set + * before the device has completed the "Page Select" MDI + * transaction. So we wait 200us after each MDI command... + */ + udelay(200); + + /* ...and verify the command was successful. */ + ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); + + if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { + e1000_release_phy_80003es2lan(hw); + return -E1000_ERR_PHY; + } + + udelay(200); + + ret_val = e1000e_write_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + udelay(200); + } else { + ret_val = e1000e_write_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + } + + e1000_release_phy_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_write_nvm_80003es2lan - Write to ESB2 NVM + * @hw: pointer to the HW structure + * @offset: offset of the register to read + * @words: number of words to write + * @data: buffer of data to write to the NVM + * + * Write "words" of data to the ESB2 NVM. + **/ +static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + return e1000e_write_nvm_spi(hw, offset, words, data); +} + +/** + * e1000_get_cfg_done_80003es2lan - Wait for configuration to complete + * @hw: pointer to the HW structure + * + * Wait a specific amount of time for manageability processes to complete. + * This is a function pointer entry point called by the phy module. + **/ +static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + if (hw->bus.func == 1) + mask = E1000_NVM_CFG_DONE_PORT_1; + + while (timeout) { + if (er32(EEMNGCTL) & mask) + break; + usleep_range(1000, 2000); + timeout--; + } + if (!timeout) { + e_dbg("MNG configuration cycle has not completed.\n"); + return -E1000_ERR_RESET; + } + + return 0; +} + +/** + * e1000_phy_force_speed_duplex_80003es2lan - Force PHY speed and duplex + * @hw: pointer to the HW structure + * + * Force the speed and duplex settings onto the PHY. This is a + * function pointer entry point called by the phy module. + **/ +static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + bool link; + + /* + * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_AUTO; + ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + e_dbg("GG82563 PSCR: %X\n", phy_data); + + ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + e1000e_phy_force_speed_duplex_setup(hw, &phy_data); + + /* Reset the phy to commit changes. */ + phy_data |= MII_CR_RESET; + + ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); + if (ret_val) + return ret_val; + + udelay(1); + + if (hw->phy.autoneg_wait_to_complete) { + e_dbg("Waiting for forced speed/duplex link " + "on GG82563 phy.\n"); + + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) { + /* + * We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = e1000e_phy_reset_dsp(hw); + if (ret_val) + return ret_val; + } + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + } + + ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* + * Resetting the phy means we need to verify the TX_CLK corresponds + * to the link speed. 10Mbps -> 2.5MHz, else 25MHz. + */ + phy_data &= ~GG82563_MSCR_TX_CLK_MASK; + if (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED) + phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5; + else + phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25; + + /* + * In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; + ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data); + + return ret_val; +} + +/** + * e1000_get_cable_length_80003es2lan - Set approximate cable length + * @hw: pointer to the HW structure + * + * Find the approximate cable length as measured by the GG82563 PHY. + * This is a function pointer entry point called by the phy module. + **/ +static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_data, index; + + ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data); + if (ret_val) + goto out; + + index = phy_data & GG82563_DSPD_CABLE_LENGTH; + + if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + phy->min_cable_length = e1000_gg82563_cable_length_table[index]; + phy->max_cable_length = e1000_gg82563_cable_length_table[index + 5]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +out: + return ret_val; +} + +/** + * e1000_get_link_up_info_80003es2lan - Report speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to speed buffer + * @duplex: pointer to duplex buffer + * + * Retrieve the current speed and duplex configuration. + **/ +static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + if (hw->phy.media_type == e1000_media_type_copper) { + ret_val = e1000e_get_speed_and_duplex_copper(hw, + speed, + duplex); + hw->phy.ops.cfg_on_link_up(hw); + } else { + ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw, + speed, + duplex); + } + + return ret_val; +} + +/** + * e1000_reset_hw_80003es2lan - Reset the ESB2 controller + * @hw: pointer to the HW structure + * + * Perform a global reset to the ESB2 controller. + **/ +static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + /* + * Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000e_disable_pcie_master(hw); + if (ret_val) + e_dbg("PCI-E Master disable polling has failed.\n"); + + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + e1e_flush(); + + usleep_range(10000, 20000); + + ctrl = er32(CTRL); + + ret_val = e1000_acquire_phy_80003es2lan(hw); + e_dbg("Issuing a global reset to MAC\n"); + ew32(CTRL, ctrl | E1000_CTRL_RST); + e1000_release_phy_80003es2lan(hw); + + ret_val = e1000e_get_auto_rd_done(hw); + if (ret_val) + /* We don't want to continue accessing MAC registers. */ + return ret_val; + + /* Clear any pending interrupt events. */ + ew32(IMC, 0xffffffff); + er32(ICR); + + ret_val = e1000_check_alt_mac_addr_generic(hw); + + return ret_val; +} + +/** + * e1000_init_hw_80003es2lan - Initialize the ESB2 controller + * @hw: pointer to the HW structure + * + * Initialize the hw bits, LED, VFTA, MTA, link and hw counters. + **/ +static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 reg_data; + s32 ret_val; + u16 kum_reg_data; + u16 i; + + e1000_initialize_hw_bits_80003es2lan(hw); + + /* Initialize identification LED */ + ret_val = e1000e_id_led_init(hw); + if (ret_val) + e_dbg("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + + /* Disabling VLAN filtering */ + e_dbg("Initializing the IEEE VLAN\n"); + mac->ops.clear_vfta(hw); + + /* Setup the receive address. */ + e1000e_init_rx_addrs(hw, mac->rar_entry_count); + + /* Zero out the Multicast HASH table */ + e_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Setup link and flow control */ + ret_val = e1000e_setup_link(hw); + + /* Disable IBIST slave mode (far-end loopback) */ + e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + &kum_reg_data); + kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; + e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + kum_reg_data); + + /* Set the transmit descriptor write-back policy */ + reg_data = er32(TXDCTL(0)); + reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; + ew32(TXDCTL(0), reg_data); + + /* ...for both queues. */ + reg_data = er32(TXDCTL(1)); + reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; + ew32(TXDCTL(1), reg_data); + + /* Enable retransmit on late collisions */ + reg_data = er32(TCTL); + reg_data |= E1000_TCTL_RTLC; + ew32(TCTL, reg_data); + + /* Configure Gigabit Carry Extend Padding */ + reg_data = er32(TCTL_EXT); + reg_data &= ~E1000_TCTL_EXT_GCEX_MASK; + reg_data |= DEFAULT_TCTL_EXT_GCEX_80003ES2LAN; + ew32(TCTL_EXT, reg_data); + + /* Configure Transmit Inter-Packet Gap */ + reg_data = er32(TIPG); + reg_data &= ~E1000_TIPG_IPGT_MASK; + reg_data |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN; + ew32(TIPG, reg_data); + + reg_data = E1000_READ_REG_ARRAY(hw, E1000_FFLT, 0x0001); + reg_data &= ~0x00100000; + E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data); + + /* default to true to enable the MDIC W/A */ + hw->dev_spec.e80003es2lan.mdic_wa_enable = true; + + ret_val = e1000_read_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET >> + E1000_KMRNCTRLSTA_OFFSET_SHIFT, + &i); + if (!ret_val) { + if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) == + E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO) + hw->dev_spec.e80003es2lan.mdic_wa_enable = false; + } + + /* + * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_initialize_hw_bits_80003es2lan - Init hw bits of ESB2 + * @hw: pointer to the HW structure + * + * Initializes required hardware-dependent bits needed for normal operation. + **/ +static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw) +{ + u32 reg; + + /* Transmit Descriptor Control 0 */ + reg = er32(TXDCTL(0)); + reg |= (1 << 22); + ew32(TXDCTL(0), reg); + + /* Transmit Descriptor Control 1 */ + reg = er32(TXDCTL(1)); + reg |= (1 << 22); + ew32(TXDCTL(1), reg); + + /* Transmit Arbitration Control 0 */ + reg = er32(TARC(0)); + reg &= ~(0xF << 27); /* 30:27 */ + if (hw->phy.media_type != e1000_media_type_copper) + reg &= ~(1 << 20); + ew32(TARC(0), reg); + + /* Transmit Arbitration Control 1 */ + reg = er32(TARC(1)); + if (er32(TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + ew32(TARC(1), reg); +} + +/** + * e1000_copper_link_setup_gg82563_80003es2lan - Configure GG82563 Link + * @hw: pointer to the HW structure + * + * Setup some GG82563 PHY registers for obtaining link + **/ +static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u32 ctrl_ext; + u16 data; + + ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data); + if (ret_val) + return ret_val; + + data |= GG82563_MSCR_ASSERT_CRS_ON_TX; + /* Use 25MHz for both link down and 1000Base-T for Tx clock. */ + data |= GG82563_MSCR_TX_CLK_1000MBPS_25; + + ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, data); + if (ret_val) + return ret_val; + + /* + * Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK; + + switch (phy->mdix) { + case 1: + data |= GG82563_PSCR_CROSSOVER_MODE_MDI; + break; + case 2: + data |= GG82563_PSCR_CROSSOVER_MODE_MDIX; + break; + case 0: + default: + data |= GG82563_PSCR_CROSSOVER_MODE_AUTO; + break; + } + + /* + * Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE; + if (phy->disable_polarity_correction) + data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE; + + ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, data); + if (ret_val) + return ret_val; + + /* SW Reset the PHY so all changes take effect */ + ret_val = e1000e_commit_phy(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + /* Bypass Rx and Tx FIFO's */ + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL, + E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS | + E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS); + if (ret_val) + return ret_val; + + ret_val = e1000_read_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE, + &data); + if (ret_val) + return ret_val; + data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE, + data); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL_2, &data); + if (ret_val) + return ret_val; + + data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG; + ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL_2, data); + if (ret_val) + return ret_val; + + ctrl_ext = er32(CTRL_EXT); + ctrl_ext &= ~(E1000_CTRL_EXT_LINK_MODE_MASK); + ew32(CTRL_EXT, ctrl_ext); + + ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data); + if (ret_val) + return ret_val; + + /* + * Do not init these registers when the HW is in IAMT mode, since the + * firmware will have already initialized them. We only initialize + * them if the HW is not in IAMT mode. + */ + if (!e1000e_check_mng_mode(hw)) { + /* Enable Electrical Idle on the PHY */ + data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE; + ret_val = e1e_wphy(hw, GG82563_PHY_PWR_MGMT_CTRL, data); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, data); + if (ret_val) + return ret_val; + } + + /* + * Workaround: Disable padding in Kumeran interface in the MAC + * and in the PHY to avoid CRC errors. + */ + ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data); + if (ret_val) + return ret_val; + + data |= GG82563_ICR_DIS_PADDING; + ret_val = e1e_wphy(hw, GG82563_PHY_INBAND_CTRL, data); + if (ret_val) + return ret_val; + + return 0; +} + +/** + * e1000_setup_copper_link_80003es2lan - Setup Copper Link for ESB2 + * @hw: pointer to the HW structure + * + * Essentially a wrapper for setting up all things "copper" related. + * This is a function pointer entry point called by the mac module. + **/ +static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 reg_data; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + + /* + * Set the mac to wait the maximum time between each + * iteration and increase the max iterations when + * polling the phy; this fixes erroneous timeouts at 10Mbps. + */ + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4), + 0xFFFF); + if (ret_val) + return ret_val; + ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), + ®_data); + if (ret_val) + return ret_val; + reg_data |= 0x3F; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), + reg_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, + ®_data); + if (ret_val) + return ret_val; + reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, + reg_data); + if (ret_val) + return ret_val; + + ret_val = e1000_copper_link_setup_gg82563_80003es2lan(hw); + if (ret_val) + return ret_val; + + ret_val = e1000e_setup_copper_link(hw); + + return 0; +} + +/** + * e1000_cfg_on_link_up_80003es2lan - es2 link configuration after link-up + * @hw: pointer to the HW structure + * @duplex: current duplex setting + * + * Configure the KMRN interface by applying last minute quirks for + * 10/100 operation. + **/ +static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 speed; + u16 duplex; + + if (hw->phy.media_type == e1000_media_type_copper) { + ret_val = e1000e_get_speed_and_duplex_copper(hw, &speed, + &duplex); + if (ret_val) + return ret_val; + + if (speed == SPEED_1000) + ret_val = e1000_cfg_kmrn_1000_80003es2lan(hw); + else + ret_val = e1000_cfg_kmrn_10_100_80003es2lan(hw, duplex); + } + + return ret_val; +} + +/** + * e1000_cfg_kmrn_10_100_80003es2lan - Apply "quirks" for 10/100 operation + * @hw: pointer to the HW structure + * @duplex: current duplex setting + * + * Configure the KMRN interface by applying last minute quirks for + * 10/100 operation. + **/ +static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex) +{ + s32 ret_val; + u32 tipg; + u32 i = 0; + u16 reg_data, reg_data2; + + reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, + reg_data); + if (ret_val) + return ret_val; + + /* Configure Transmit Inter-Packet Gap */ + tipg = er32(TIPG); + tipg &= ~E1000_TIPG_IPGT_MASK; + tipg |= DEFAULT_TIPG_IPGT_10_100_80003ES2LAN; + ew32(TIPG, tipg); + + do { + ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data2); + if (ret_val) + return ret_val; + i++; + } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY)); + + if (duplex == HALF_DUPLEX) + reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER; + else + reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + + ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); + + return 0; +} + +/** + * e1000_cfg_kmrn_1000_80003es2lan - Apply "quirks" for gigabit operation + * @hw: pointer to the HW structure + * + * Configure the KMRN interface by applying last minute quirks for + * gigabit operation. + **/ +static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 reg_data, reg_data2; + u32 tipg; + u32 i = 0; + + reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, + reg_data); + if (ret_val) + return ret_val; + + /* Configure Transmit Inter-Packet Gap */ + tipg = er32(TIPG); + tipg &= ~E1000_TIPG_IPGT_MASK; + tipg |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN; + ew32(TIPG, tipg); + + do { + ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data2); + if (ret_val) + return ret_val; + i++; + } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY)); + + reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); + + return ret_val; +} + +/** + * e1000_read_kmrn_reg_80003es2lan - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquire semaphore, then read the PHY register at offset + * using the kumeran interface. The information retrieved is stored in data. + * Release the semaphore before exiting. + **/ +static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, + u16 *data) +{ + u32 kmrnctrlsta; + s32 ret_val = 0; + + ret_val = e1000_acquire_mac_csr_80003es2lan(hw); + if (ret_val) + return ret_val; + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; + ew32(KMRNCTRLSTA, kmrnctrlsta); + e1e_flush(); + + udelay(2); + + kmrnctrlsta = er32(KMRNCTRLSTA); + *data = (u16)kmrnctrlsta; + + e1000_release_mac_csr_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_write_kmrn_reg_80003es2lan - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquire semaphore, then write the data to PHY register + * at the offset using the kumeran interface. Release semaphore + * before exiting. + **/ +static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, + u16 data) +{ + u32 kmrnctrlsta; + s32 ret_val = 0; + + ret_val = e1000_acquire_mac_csr_80003es2lan(hw); + if (ret_val) + return ret_val; + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | data; + ew32(KMRNCTRLSTA, kmrnctrlsta); + e1e_flush(); + + udelay(2); + + e1000_release_mac_csr_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_read_mac_addr_80003es2lan - Read device MAC address + * @hw: pointer to the HW structure + **/ +static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + /* + * If there's an alternate MAC address place it in RAR0 + * so that it will override the Si installed default perm + * address. + */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + goto out; + + ret_val = e1000_read_mac_addr_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(hw->mac.ops.check_mng_mode(hw) || + hw->phy.ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); +} + +/** + * e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw) +{ + e1000e_clear_hw_cntrs_base(hw); + + er32(PRC64); + er32(PRC127); + er32(PRC255); + er32(PRC511); + er32(PRC1023); + er32(PRC1522); + er32(PTC64); + er32(PTC127); + er32(PTC255); + er32(PTC511); + er32(PTC1023); + er32(PTC1522); + + er32(ALGNERRC); + er32(RXERRC); + er32(TNCRS); + er32(CEXTERR); + er32(TSCTC); + er32(TSCTFC); + + er32(MGTPRC); + er32(MGTPDC); + er32(MGTPTC); + + er32(IAC); + er32(ICRXOC); + + er32(ICRXPTC); + er32(ICRXATC); + er32(ICTXPTC); + er32(ICTXATC); + er32(ICTXQEC); + er32(ICTXQMTC); + er32(ICRXDMTC); +} + +static struct e1000_mac_operations es2_mac_ops = { + .read_mac_addr = e1000_read_mac_addr_80003es2lan, + .id_led_init = e1000e_id_led_init, + .blink_led = e1000e_blink_led_generic, + .check_mng_mode = e1000e_check_mng_mode_generic, + /* check_for_link dependent on media type */ + .cleanup_led = e1000e_cleanup_led_generic, + .clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan, + .get_bus_info = e1000e_get_bus_info_pcie, + .set_lan_id = e1000_set_lan_id_multi_port_pcie, + .get_link_up_info = e1000_get_link_up_info_80003es2lan, + .led_on = e1000e_led_on_generic, + .led_off = e1000e_led_off_generic, + .update_mc_addr_list = e1000e_update_mc_addr_list_generic, + .write_vfta = e1000_write_vfta_generic, + .clear_vfta = e1000_clear_vfta_generic, + .reset_hw = e1000_reset_hw_80003es2lan, + .init_hw = e1000_init_hw_80003es2lan, + .setup_link = e1000e_setup_link, + /* setup_physical_interface dependent on media type */ + .setup_led = e1000e_setup_led_generic, +}; + +static struct e1000_phy_operations es2_phy_ops = { + .acquire = e1000_acquire_phy_80003es2lan, + .check_polarity = e1000_check_polarity_m88, + .check_reset_block = e1000e_check_reset_block_generic, + .commit = e1000e_phy_sw_reset, + .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan, + .get_cfg_done = e1000_get_cfg_done_80003es2lan, + .get_cable_length = e1000_get_cable_length_80003es2lan, + .get_info = e1000e_get_phy_info_m88, + .read_reg = e1000_read_phy_reg_gg82563_80003es2lan, + .release = e1000_release_phy_80003es2lan, + .reset = e1000e_phy_hw_reset_generic, + .set_d0_lplu_state = NULL, + .set_d3_lplu_state = e1000e_set_d3_lplu_state, + .write_reg = e1000_write_phy_reg_gg82563_80003es2lan, + .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan, +}; + +static struct e1000_nvm_operations es2_nvm_ops = { + .acquire = e1000_acquire_nvm_80003es2lan, + .read = e1000e_read_nvm_eerd, + .release = e1000_release_nvm_80003es2lan, + .update = e1000e_update_nvm_checksum_generic, + .valid_led_default = e1000e_valid_led_default, + .validate = e1000e_validate_nvm_checksum_generic, + .write = e1000_write_nvm_80003es2lan, +}; + +struct e1000_info e1000_es2_info = { + .mac = e1000_80003es2lan, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_RX_NEEDS_RESTART /* errata */ + | FLAG_TARC_SET_BIT_ZERO /* errata */ + | FLAG_APME_CHECK_PORT_B + | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ + | FLAG_TIPG_MEDIUM_FOR_80003ESLAN, + .flags2 = FLAG2_DMA_BURST, + .pba = 38, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_80003es2lan, + .mac_ops = &es2_mac_ops, + .phy_ops = &es2_phy_ops, + .nvm_ops = &es2_nvm_ops, +}; + diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c new file mode 100644 index 0000000..480f259 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -0,0 +1,2115 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* + * 82571EB Gigabit Ethernet Controller + * 82571EB Gigabit Ethernet Controller (Copper) + * 82571EB Gigabit Ethernet Controller (Fiber) + * 82571EB Dual Port Gigabit Mezzanine Adapter + * 82571EB Quad Port Gigabit Mezzanine Adapter + * 82571PT Gigabit PT Quad Port Server ExpressModule + * 82572EI Gigabit Ethernet Controller (Copper) + * 82572EI Gigabit Ethernet Controller (Fiber) + * 82572EI Gigabit Ethernet Controller + * 82573V Gigabit Ethernet Controller (Copper) + * 82573E Gigabit Ethernet Controller (Copper) + * 82573L Gigabit Ethernet Controller + * 82574L Gigabit Network Connection + * 82583V Gigabit Network Connection + */ + +#include "e1000.h" + +#define ID_LED_RESERVED_F746 0xF746 +#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_OFF1_ON2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) + +#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 +#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */ +#define E1000_BASE1000T_STATUS 10 +#define E1000_IDLE_ERROR_COUNT_MASK 0xFF +#define E1000_RECEIVE_ERROR_COUNTER 21 +#define E1000_RECEIVE_ERROR_MAX 0xFFFF + +#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */ + +static s32 e1000_get_phy_id_82571(struct e1000_hw *hw); +static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw); +static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw); +static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw); +static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw); +static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw); +static s32 e1000_setup_link_82571(struct e1000_hw *hw); +static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw); +static void e1000_clear_vfta_82571(struct e1000_hw *hw); +static bool e1000_check_mng_mode_82574(struct e1000_hw *hw); +static s32 e1000_led_on_82574(struct e1000_hw *hw); +static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw); +static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw); +static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw); +static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw); +static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw); +static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active); +static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active); + +/** + * e1000_init_phy_params_82571 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + return 0; + } + + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_82571; + + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + phy->type = e1000_phy_igp_2; + break; + case e1000_82573: + phy->type = e1000_phy_m88; + break; + case e1000_82574: + case e1000_82583: + phy->type = e1000_phy_bm; + phy->ops.acquire = e1000_get_hw_semaphore_82574; + phy->ops.release = e1000_put_hw_semaphore_82574; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574; + break; + default: + return -E1000_ERR_PHY; + break; + } + + /* This can only be done after all function pointers are setup. */ + ret_val = e1000_get_phy_id_82571(hw); + if (ret_val) { + e_dbg("Error getting PHY ID\n"); + return ret_val; + } + + /* Verify phy id */ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + if (phy->id != IGP01E1000_I_PHY_ID) + ret_val = -E1000_ERR_PHY; + break; + case e1000_82573: + if (phy->id != M88E1111_I_PHY_ID) + ret_val = -E1000_ERR_PHY; + break; + case e1000_82574: + case e1000_82583: + if (phy->id != BME1000_E_PHY_ID_R2) + ret_val = -E1000_ERR_PHY; + break; + default: + ret_val = -E1000_ERR_PHY; + break; + } + + if (ret_val) + e_dbg("PHY ID unknown: type = 0x%08x\n", phy->id); + + return ret_val; +} + +/** + * e1000_init_nvm_params_82571 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u16 size; + + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; + break; + } + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (((eecd >> 15) & 0x3) == 0x3) { + nvm->type = e1000_nvm_flash_hw; + nvm->word_size = 2048; + /* + * Autonomous Flash update bit must be cleared due + * to Flash update issue. + */ + eecd &= ~E1000_EECD_AUPDEN; + ew32(EECD, eecd); + break; + } + /* Fall Through */ + default: + nvm->type = e1000_nvm_eeprom_spi; + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + /* + * Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* EEPROM access above 16k is unsupported */ + if (size > 14) + size = 14; + nvm->word_size = 1 << size; + break; + } + + /* Function Pointers */ + switch (hw->mac.type) { + case e1000_82574: + case e1000_82583: + nvm->ops.acquire = e1000_get_hw_semaphore_82574; + nvm->ops.release = e1000_put_hw_semaphore_82574; + break; + default: + break; + } + + return 0; +} + +/** + * e1000_init_mac_params_82571 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &hw->mac; + struct e1000_mac_operations *func = &mac->ops; + u32 swsm = 0; + u32 swsm2 = 0; + bool force_clear_smbi = false; + + /* Set media type */ + switch (adapter->pdev->device) { + case E1000_DEV_ID_82571EB_FIBER: + case E1000_DEV_ID_82572EI_FIBER: + case E1000_DEV_ID_82571EB_QUAD_FIBER: + hw->phy.media_type = e1000_media_type_fiber; + break; + case E1000_DEV_ID_82571EB_SERDES: + case E1000_DEV_ID_82572EI_SERDES: + case E1000_DEV_ID_82571EB_SERDES_DUAL: + case E1000_DEV_ID_82571EB_SERDES_QUAD: + hw->phy.media_type = e1000_media_type_internal_serdes; + break; + default: + hw->phy.media_type = e1000_media_type_copper; + break; + } + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + /* Adaptive IFS supported */ + mac->adaptive_ifs = true; + + /* check for link */ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + func->setup_physical_interface = e1000_setup_copper_link_82571; + func->check_for_link = e1000e_check_for_copper_link; + func->get_link_up_info = e1000e_get_speed_and_duplex_copper; + break; + case e1000_media_type_fiber: + func->setup_physical_interface = + e1000_setup_fiber_serdes_link_82571; + func->check_for_link = e1000e_check_for_fiber_link; + func->get_link_up_info = + e1000e_get_speed_and_duplex_fiber_serdes; + break; + case e1000_media_type_internal_serdes: + func->setup_physical_interface = + e1000_setup_fiber_serdes_link_82571; + func->check_for_link = e1000_check_for_serdes_link_82571; + func->get_link_up_info = + e1000e_get_speed_and_duplex_fiber_serdes; + break; + default: + return -E1000_ERR_CONFIG; + break; + } + + switch (hw->mac.type) { + case e1000_82573: + func->set_lan_id = e1000_set_lan_id_single_port; + func->check_mng_mode = e1000e_check_mng_mode_generic; + func->led_on = e1000e_led_on_generic; + func->blink_led = e1000e_blink_led_generic; + + /* FWSM register */ + mac->has_fwsm = true; + /* + * ARC supported; valid only if manageability features are + * enabled. + */ + mac->arc_subsystem_valid = + (er32(FWSM) & E1000_FWSM_MODE_MASK) + ? true : false; + break; + case e1000_82574: + case e1000_82583: + func->set_lan_id = e1000_set_lan_id_single_port; + func->check_mng_mode = e1000_check_mng_mode_82574; + func->led_on = e1000_led_on_82574; + break; + default: + func->check_mng_mode = e1000e_check_mng_mode_generic; + func->led_on = e1000e_led_on_generic; + func->blink_led = e1000e_blink_led_generic; + + /* FWSM register */ + mac->has_fwsm = true; + break; + } + + /* + * Ensure that the inter-port SWSM.SMBI lock bit is clear before + * first NVM or PHY access. This should be done for single-port + * devices, and for one port only on dual-port devices so that + * for those devices we can still use the SMBI lock to synchronize + * inter-port accesses to the PHY & NVM. + */ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + swsm2 = er32(SWSM2); + + if (!(swsm2 & E1000_SWSM2_LOCK)) { + /* Only do this for the first interface on this card */ + ew32(SWSM2, + swsm2 | E1000_SWSM2_LOCK); + force_clear_smbi = true; + } else + force_clear_smbi = false; + break; + default: + force_clear_smbi = true; + break; + } + + if (force_clear_smbi) { + /* Make sure SWSM.SMBI is clear */ + swsm = er32(SWSM); + if (swsm & E1000_SWSM_SMBI) { + /* This bit should not be set on a first interface, and + * indicates that the bootagent or EFI code has + * improperly left this bit enabled + */ + e_dbg("Please update your 82571 Bootagent\n"); + } + ew32(SWSM, swsm & ~E1000_SWSM_SMBI); + } + + /* + * Initialize device specific counter of SMBI acquisition + * timeouts. + */ + hw->dev_spec.e82571.smb_counter = 0; + + return 0; +} + +static s32 e1000_get_variants_82571(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + static int global_quad_port_a; /* global port a indication */ + struct pci_dev *pdev = adapter->pdev; + int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1; + s32 rc; + + rc = e1000_init_mac_params_82571(adapter); + if (rc) + return rc; + + rc = e1000_init_nvm_params_82571(hw); + if (rc) + return rc; + + rc = e1000_init_phy_params_82571(hw); + if (rc) + return rc; + + /* tag quad port adapters first, it's used below */ + switch (pdev->device) { + case E1000_DEV_ID_82571EB_QUAD_COPPER: + case E1000_DEV_ID_82571EB_QUAD_FIBER: + case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: + case E1000_DEV_ID_82571PT_QUAD_COPPER: + adapter->flags |= FLAG_IS_QUAD_PORT; + /* mark the first port */ + if (global_quad_port_a == 0) + adapter->flags |= FLAG_IS_QUAD_PORT_A; + /* Reset for multiple quad port adapters */ + global_quad_port_a++; + if (global_quad_port_a == 4) + global_quad_port_a = 0; + break; + default: + break; + } + + switch (adapter->hw.mac.type) { + case e1000_82571: + /* these dual ports don't have WoL on port B at all */ + if (((pdev->device == E1000_DEV_ID_82571EB_FIBER) || + (pdev->device == E1000_DEV_ID_82571EB_SERDES) || + (pdev->device == E1000_DEV_ID_82571EB_COPPER)) && + (is_port_b)) + adapter->flags &= ~FLAG_HAS_WOL; + /* quad ports only support WoL on port A */ + if (adapter->flags & FLAG_IS_QUAD_PORT && + (!(adapter->flags & FLAG_IS_QUAD_PORT_A))) + adapter->flags &= ~FLAG_HAS_WOL; + /* Does not support WoL on any port */ + if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) + adapter->flags &= ~FLAG_HAS_WOL; + break; + case e1000_82573: + if (pdev->device == E1000_DEV_ID_82573L) { + adapter->flags |= FLAG_HAS_JUMBO_FRAMES; + adapter->max_hw_frame_size = DEFAULT_JUMBO; + } + break; + default: + break; + } + + return 0; +} + +/** + * e1000_get_phy_id_82571 - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +static s32 e1000_get_phy_id_82571(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_id = 0; + + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + /* + * The 82571 firmware may still be configuring the PHY. + * In this case, we cannot access the PHY until the + * configuration is done. So we explicitly set the + * PHY ID. + */ + phy->id = IGP01E1000_I_PHY_ID; + break; + case e1000_82573: + return e1000e_get_phy_id(hw); + break; + case e1000_82574: + case e1000_82583: + ret_val = e1e_rphy(hw, PHY_ID1, &phy_id); + if (ret_val) + return ret_val; + + phy->id = (u32)(phy_id << 16); + udelay(20); + ret_val = e1e_rphy(hw, PHY_ID2, &phy_id); + if (ret_val) + return ret_val; + + phy->id |= (u32)(phy_id); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + break; + default: + return -E1000_ERR_PHY; + break; + } + + return 0; +} + +/** + * e1000_get_hw_semaphore_82571 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw) +{ + u32 swsm; + s32 sw_timeout = hw->nvm.word_size + 1; + s32 fw_timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* + * If we have timedout 3 times on trying to acquire + * the inter-port SMBI semaphore, there is old code + * operating on the other port, and it is not + * releasing SMBI. Modify the number of times that + * we try for the semaphore to interwork with this + * older code. + */ + if (hw->dev_spec.e82571.smb_counter > 2) + sw_timeout = 1; + + /* Get the SW semaphore */ + while (i < sw_timeout) { + swsm = er32(SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + udelay(50); + i++; + } + + if (i == sw_timeout) { + e_dbg("Driver can't access device - SMBI bit is set.\n"); + hw->dev_spec.e82571.smb_counter++; + } + /* Get the FW semaphore. */ + for (i = 0; i < fw_timeout; i++) { + swsm = er32(SWSM); + ew32(SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (er32(SWSM) & E1000_SWSM_SWESMBI) + break; + + udelay(50); + } + + if (i == fw_timeout) { + /* Release semaphores */ + e1000_put_hw_semaphore_82571(hw); + e_dbg("Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000_put_hw_semaphore_82571 - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw) +{ + u32 swsm; + + swsm = er32(SWSM); + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + ew32(SWSM, swsm); +} +/** + * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore during reset. + * + **/ +static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw) +{ + u32 extcnf_ctrl; + s32 ret_val = 0; + s32 i = 0; + + extcnf_ctrl = er32(EXTCNF_CTRL); + extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; + do { + ew32(EXTCNF_CTRL, extcnf_ctrl); + extcnf_ctrl = er32(EXTCNF_CTRL); + + if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP) + break; + + extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; + + usleep_range(2000, 4000); + i++; + } while (i < MDIO_OWNERSHIP_TIMEOUT); + + if (i == MDIO_OWNERSHIP_TIMEOUT) { + /* Release semaphores */ + e1000_put_hw_semaphore_82573(hw); + e_dbg("Driver can't access the PHY\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_put_hw_semaphore_82573 - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used during reset. + * + **/ +static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw) +{ + u32 extcnf_ctrl; + + extcnf_ctrl = er32(EXTCNF_CTRL); + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; + ew32(EXTCNF_CTRL, extcnf_ctrl); +} + +static DEFINE_MUTEX(swflag_mutex); + +/** + * e1000_get_hw_semaphore_82574 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM. + * + **/ +static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw) +{ + s32 ret_val; + + mutex_lock(&swflag_mutex); + ret_val = e1000_get_hw_semaphore_82573(hw); + if (ret_val) + mutex_unlock(&swflag_mutex); + return ret_val; +} + +/** + * e1000_put_hw_semaphore_82574 - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + * + **/ +static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw) +{ + e1000_put_hw_semaphore_82573(hw); + mutex_unlock(&swflag_mutex); +} + +/** + * e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. + * LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active) +{ + u16 data = er32(POEMB); + + if (active) + data |= E1000_PHY_CTRL_D0A_LPLU; + else + data &= ~E1000_PHY_CTRL_D0A_LPLU; + + ew32(POEMB, data); + return 0; +} + +/** + * e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * The low power link up (lplu) state is set to the power management level D3 + * when active is true, else clear lplu for D3. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active) +{ + u16 data = er32(POEMB); + + if (!active) { + data &= ~E1000_PHY_CTRL_NOND0A_LPLU; + } else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) || + (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= E1000_PHY_CTRL_NOND0A_LPLU; + } + + ew32(POEMB, data); + return 0; +} + +/** + * e1000_acquire_nvm_82571 - Request for access to the EEPROM + * @hw: pointer to the HW structure + * + * To gain access to the EEPROM, first we must obtain a hardware semaphore. + * Then for non-82573 hardware, set the EEPROM access request bit and wait + * for EEPROM access grant bit. If the access grant bit is not set, release + * hardware semaphore. + **/ +static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw) +{ + s32 ret_val; + + ret_val = e1000_get_hw_semaphore_82571(hw); + if (ret_val) + return ret_val; + + switch (hw->mac.type) { + case e1000_82573: + break; + default: + ret_val = e1000e_acquire_nvm(hw); + break; + } + + if (ret_val) + e1000_put_hw_semaphore_82571(hw); + + return ret_val; +} + +/** + * e1000_release_nvm_82571 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +static void e1000_release_nvm_82571(struct e1000_hw *hw) +{ + e1000e_release_nvm(hw); + e1000_put_hw_semaphore_82571(hw); +} + +/** + * e1000_write_nvm_82571 - Write to EEPROM using appropriate interface + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * For non-82573 silicon, write data to EEPROM at offset using SPI interface. + * + * If e1000e_update_nvm_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + **/ +static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 ret_val; + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data); + break; + case e1000_82571: + case e1000_82572: + ret_val = e1000e_write_nvm_spi(hw, offset, words, data); + break; + default: + ret_val = -E1000_ERR_NVM; + break; + } + + return ret_val; +} + +/** + * e1000_update_nvm_checksum_82571 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw) +{ + u32 eecd; + s32 ret_val; + u16 i; + + ret_val = e1000e_update_nvm_checksum_generic(hw); + if (ret_val) + return ret_val; + + /* + * If our nvm is an EEPROM, then we're done + * otherwise, commit the checksum to the flash NVM. + */ + if (hw->nvm.type != e1000_nvm_flash_hw) + return ret_val; + + /* Check for pending operations. */ + for (i = 0; i < E1000_FLASH_UPDATES; i++) { + usleep_range(1000, 2000); + if ((er32(EECD) & E1000_EECD_FLUPD) == 0) + break; + } + + if (i == E1000_FLASH_UPDATES) + return -E1000_ERR_NVM; + + /* Reset the firmware if using STM opcode. */ + if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) { + /* + * The enabling of and the actual reset must be done + * in two write cycles. + */ + ew32(HICR, E1000_HICR_FW_RESET_ENABLE); + e1e_flush(); + ew32(HICR, E1000_HICR_FW_RESET); + } + + /* Commit the write to flash */ + eecd = er32(EECD) | E1000_EECD_FLUPD; + ew32(EECD, eecd); + + for (i = 0; i < E1000_FLASH_UPDATES; i++) { + usleep_range(1000, 2000); + if ((er32(EECD) & E1000_EECD_FLUPD) == 0) + break; + } + + if (i == E1000_FLASH_UPDATES) + return -E1000_ERR_NVM; + + return 0; +} + +/** + * e1000_validate_nvm_checksum_82571 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +static s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw) +{ + if (hw->nvm.type == e1000_nvm_flash_hw) + e1000_fix_nvm_checksum_82571(hw); + + return e1000e_validate_nvm_checksum_generic(hw); +} + +/** + * e1000_write_nvm_eewr_82571 - Write to EEPROM for 82573 silicon + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * After checking for invalid values, poll the EEPROM to ensure the previous + * command has completed before trying to write the next word. After write + * poll for completion. + * + * If e1000e_update_nvm_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + **/ +static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eewr = 0; + s32 ret_val = 0; + + /* + * A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + for (i = 0; i < words; i++) { + eewr = (data[i] << E1000_NVM_RW_REG_DATA) | + ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | + E1000_NVM_RW_REG_START; + + ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); + if (ret_val) + break; + + ew32(EEWR, eewr); + + ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); + if (ret_val) + break; + } + + return ret_val; +} + +/** + * e1000_get_cfg_done_82571 - Poll for configuration done + * @hw: pointer to the HW structure + * + * Reads the management control register for the config done bit to be set. + **/ +static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + + while (timeout) { + if (er32(EEMNGCTL) & + E1000_NVM_CFG_DONE_PORT_0) + break; + usleep_range(1000, 2000); + timeout--; + } + if (!timeout) { + e_dbg("MNG configuration cycle has not completed.\n"); + return -E1000_ERR_RESET; + } + + return 0; +} + +/** + * e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When activating LPLU + * this function also disables smart speed and vice versa. LPLU will not be + * activated unless the device autonegotiation advertisement meets standards + * of either 10 or 10/100 or 10/100/1000 at all duplexes. This is a function + * pointer entry point only called by PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + return ret_val; + + if (active) { + data |= IGP02E1000_PM_D0_LPLU; + ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + if (ret_val) + return ret_val; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); + if (ret_val) + return ret_val; + } else { + data &= ~IGP02E1000_PM_D0_LPLU; + ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } + + return 0; +} + +/** + * e1000_reset_hw_82571 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. + **/ +static s32 e1000_reset_hw_82571(struct e1000_hw *hw) +{ + u32 ctrl, ctrl_ext; + s32 ret_val; + + /* + * Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000e_disable_pcie_master(hw); + if (ret_val) + e_dbg("PCI-E Master disable polling has failed.\n"); + + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + e1e_flush(); + + usleep_range(10000, 20000); + + /* + * Must acquire the MDIO ownership before MAC reset. + * Ownership defaults to firmware after a reset. + */ + switch (hw->mac.type) { + case e1000_82573: + ret_val = e1000_get_hw_semaphore_82573(hw); + break; + case e1000_82574: + case e1000_82583: + ret_val = e1000_get_hw_semaphore_82574(hw); + break; + default: + break; + } + if (ret_val) + e_dbg("Cannot acquire MDIO ownership\n"); + + ctrl = er32(CTRL); + + e_dbg("Issuing a global reset to MAC\n"); + ew32(CTRL, ctrl | E1000_CTRL_RST); + + /* Must release MDIO ownership and mutex after MAC reset. */ + switch (hw->mac.type) { + case e1000_82574: + case e1000_82583: + e1000_put_hw_semaphore_82574(hw); + break; + default: + break; + } + + if (hw->nvm.type == e1000_nvm_flash_hw) { + udelay(10); + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); + } + + ret_val = e1000e_get_auto_rd_done(hw); + if (ret_val) + /* We don't want to continue accessing MAC registers. */ + return ret_val; + + /* + * Phy configuration from NVM just starts after EECD_AUTO_RD is set. + * Need to wait for Phy configuration completion before accessing + * NVM and Phy. + */ + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + msleep(25); + break; + default: + break; + } + + /* Clear any pending interrupt events. */ + ew32(IMC, 0xffffffff); + er32(ICR); + + if (hw->mac.type == e1000_82571) { + /* Install any alternate MAC address into RAR0 */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + return ret_val; + + e1000e_set_laa_state_82571(hw, true); + } + + /* Reinitialize the 82571 serdes link state machine */ + if (hw->phy.media_type == e1000_media_type_internal_serdes) + hw->mac.serdes_link_state = e1000_serdes_link_down; + + return 0; +} + +/** + * e1000_init_hw_82571 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +static s32 e1000_init_hw_82571(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 reg_data; + s32 ret_val; + u16 i, rar_count = mac->rar_entry_count; + + e1000_initialize_hw_bits_82571(hw); + + /* Initialize identification LED */ + ret_val = e1000e_id_led_init(hw); + if (ret_val) + e_dbg("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + + /* Disabling VLAN filtering */ + e_dbg("Initializing the IEEE VLAN\n"); + mac->ops.clear_vfta(hw); + + /* Setup the receive address. */ + /* + * If, however, a locally administered address was assigned to the + * 82571, we must reserve a RAR for it to work around an issue where + * resetting one port will reload the MAC on the other port. + */ + if (e1000e_get_laa_state_82571(hw)) + rar_count--; + e1000e_init_rx_addrs(hw, rar_count); + + /* Zero out the Multicast HASH table */ + e_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Setup link and flow control */ + ret_val = e1000_setup_link_82571(hw); + + /* Set the transmit descriptor write-back policy */ + reg_data = er32(TXDCTL(0)); + reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | + E1000_TXDCTL_COUNT_DESC; + ew32(TXDCTL(0), reg_data); + + /* ...for both queues. */ + switch (mac->type) { + case e1000_82573: + e1000e_enable_tx_pkt_filtering(hw); + /* fall through */ + case e1000_82574: + case e1000_82583: + reg_data = er32(GCR); + reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX; + ew32(GCR, reg_data); + break; + default: + reg_data = er32(TXDCTL(1)); + reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | + E1000_TXDCTL_COUNT_DESC; + ew32(TXDCTL(1), reg_data); + break; + } + + /* + * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_82571(hw); + + return ret_val; +} + +/** + * e1000_initialize_hw_bits_82571 - Initialize hardware-dependent bits + * @hw: pointer to the HW structure + * + * Initializes required hardware-dependent bits needed for normal operation. + **/ +static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) +{ + u32 reg; + + /* Transmit Descriptor Control 0 */ + reg = er32(TXDCTL(0)); + reg |= (1 << 22); + ew32(TXDCTL(0), reg); + + /* Transmit Descriptor Control 1 */ + reg = er32(TXDCTL(1)); + reg |= (1 << 22); + ew32(TXDCTL(1), reg); + + /* Transmit Arbitration Control 0 */ + reg = er32(TARC(0)); + reg &= ~(0xF << 27); /* 30:27 */ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26); + break; + default: + break; + } + ew32(TARC(0), reg); + + /* Transmit Arbitration Control 1 */ + reg = er32(TARC(1)); + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + reg &= ~((1 << 29) | (1 << 30)); + reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26); + if (er32(TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + ew32(TARC(1), reg); + break; + default: + break; + } + + /* Device Control */ + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + reg = er32(CTRL); + reg &= ~(1 << 29); + ew32(CTRL, reg); + break; + default: + break; + } + + /* Extended Device Control */ + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + reg = er32(CTRL_EXT); + reg &= ~(1 << 23); + reg |= (1 << 22); + ew32(CTRL_EXT, reg); + break; + default: + break; + } + + if (hw->mac.type == e1000_82571) { + reg = er32(PBA_ECC); + reg |= E1000_PBA_ECC_CORR_EN; + ew32(PBA_ECC, reg); + } + /* + * Workaround for hardware errata. + * Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572 + */ + + if ((hw->mac.type == e1000_82571) || + (hw->mac.type == e1000_82572)) { + reg = er32(CTRL_EXT); + reg &= ~E1000_CTRL_EXT_DMA_DYN_CLK_EN; + ew32(CTRL_EXT, reg); + } + + + /* PCI-Ex Control Registers */ + switch (hw->mac.type) { + case e1000_82574: + case e1000_82583: + reg = er32(GCR); + reg |= (1 << 22); + ew32(GCR, reg); + + /* + * Workaround for hardware errata. + * apply workaround for hardware errata documented in errata + * docs Fixes issue where some error prone or unreliable PCIe + * completions are occurring, particularly with ASPM enabled. + * Without fix, issue can cause Tx timeouts. + */ + reg = er32(GCR2); + reg |= 1; + ew32(GCR2, reg); + break; + default: + break; + } +} + +/** + * e1000_clear_vfta_82571 - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +static void e1000_clear_vfta_82571(struct e1000_hw *hw) +{ + u32 offset; + u32 vfta_value = 0; + u32 vfta_offset = 0; + u32 vfta_bit_in_reg = 0; + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (hw->mng_cookie.vlan_id != 0) { + /* + * The VFTA is a 4096b bit-field, each identifying + * a single VLAN ID. The following operations + * determine which 32b entry (i.e. offset) into the + * array we want to set the VLAN ID (i.e. bit) of + * the manageability unit. + */ + vfta_offset = (hw->mng_cookie.vlan_id >> + E1000_VFTA_ENTRY_SHIFT) & + E1000_VFTA_ENTRY_MASK; + vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id & + E1000_VFTA_ENTRY_BIT_SHIFT_MASK); + } + break; + default: + break; + } + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + /* + * If the offset we want to clear is the same offset of the + * manageability VLAN ID, then clear all bits except that of + * the manageability unit. + */ + vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, vfta_value); + e1e_flush(); + } +} + +/** + * e1000_check_mng_mode_82574 - Check manageability is enabled + * @hw: pointer to the HW structure + * + * Reads the NVM Initialization Control Word 2 and returns true + * (>0) if any manageability is enabled, else false (0). + **/ +static bool e1000_check_mng_mode_82574(struct e1000_hw *hw) +{ + u16 data; + + e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data); + return (data & E1000_NVM_INIT_CTRL2_MNGM) != 0; +} + +/** + * e1000_led_on_82574 - Turn LED on + * @hw: pointer to the HW structure + * + * Turn LED on. + **/ +static s32 e1000_led_on_82574(struct e1000_hw *hw) +{ + u32 ctrl; + u32 i; + + ctrl = hw->mac.ledctl_mode2; + if (!(E1000_STATUS_LU & er32(STATUS))) { + /* + * If no link, then turn LED on by setting the invert bit + * for each LED that's "on" (0x0E) in ledctl_mode2. + */ + for (i = 0; i < 4; i++) + if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == + E1000_LEDCTL_MODE_LED_ON) + ctrl |= (E1000_LEDCTL_LED0_IVRT << (i * 8)); + } + ew32(LEDCTL, ctrl); + + return 0; +} + +/** + * e1000_check_phy_82574 - check 82574 phy hung state + * @hw: pointer to the HW structure + * + * Returns whether phy is hung or not + **/ +bool e1000_check_phy_82574(struct e1000_hw *hw) +{ + u16 status_1kbt = 0; + u16 receive_errors = 0; + bool phy_hung = false; + s32 ret_val = 0; + + /* + * Read PHY Receive Error counter first, if its is max - all F's then + * read the Base1000T status register If both are max then PHY is hung. + */ + ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors); + + if (ret_val) + goto out; + if (receive_errors == E1000_RECEIVE_ERROR_MAX) { + ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt); + if (ret_val) + goto out; + if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) == + E1000_IDLE_ERROR_COUNT_MASK) + phy_hung = true; + } +out: + return phy_hung; +} + +/** + * e1000_setup_link_82571 - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +static s32 e1000_setup_link_82571(struct e1000_hw *hw) +{ + /* + * 82573 does not have a word in the NVM to determine + * the default flow control setting, so we explicitly + * set it to full. + */ + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (hw->fc.requested_mode == e1000_fc_default) + hw->fc.requested_mode = e1000_fc_full; + break; + default: + break; + } + + return e1000e_setup_link(hw); +} + +/** + * e1000_setup_copper_link_82571 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + **/ +static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + + switch (hw->phy.type) { + case e1000_phy_m88: + case e1000_phy_bm: + ret_val = e1000e_copper_link_setup_m88(hw); + break; + case e1000_phy_igp_2: + ret_val = e1000e_copper_link_setup_igp(hw); + break; + default: + return -E1000_ERR_PHY; + break; + } + + if (ret_val) + return ret_val; + + ret_val = e1000e_setup_copper_link(hw); + + return ret_val; +} + +/** + * e1000_setup_fiber_serdes_link_82571 - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Configures collision distance and flow control for fiber and serdes links. + * Upon successful setup, poll for link. + **/ +static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw) +{ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + /* + * If SerDes loopback mode is entered, there is no form + * of reset to take the adapter out of that mode. So we + * have to explicitly take the adapter out of loopback + * mode. This prevents drivers from twiddling their thumbs + * if another tool failed to take it out of loopback mode. + */ + ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); + break; + default: + break; + } + + return e1000e_setup_fiber_serdes_link(hw); +} + +/** + * e1000_check_for_serdes_link_82571 - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Reports the link state as up or down. + * + * If autonegotiation is supported by the link partner, the link state is + * determined by the result of autonegotiation. This is the most likely case. + * If autonegotiation is not supported by the link partner, and the link + * has a valid signal, force the link up. + * + * The link state is represented internally here by 4 states: + * + * 1) down + * 2) autoneg_progress + * 3) autoneg_complete (the link successfully autonegotiated) + * 4) forced_up (the link has been forced up, it did not autonegotiate) + * + **/ +static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + u32 txcw; + u32 i; + s32 ret_val = 0; + + ctrl = er32(CTRL); + status = er32(STATUS); + rxcw = er32(RXCW); + + if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) { + + /* Receiver is synchronized with no invalid bits. */ + switch (mac->serdes_link_state) { + case e1000_serdes_link_autoneg_complete: + if (!(status & E1000_STATUS_LU)) { + /* + * We have lost link, retry autoneg before + * reporting link failure + */ + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + e_dbg("AN_UP -> AN_PROG\n"); + } else { + mac->serdes_has_link = true; + } + break; + + case e1000_serdes_link_forced_up: + /* + * If we are receiving /C/ ordered sets, re-enable + * auto-negotiation in the TXCW register and disable + * forced link in the Device Control register in an + * attempt to auto-negotiate with our link partner. + * If the partner code word is null, stop forcing + * and restart auto negotiation. + */ + if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) { + /* Enable autoneg, and unforce link up */ + ew32(TXCW, mac->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + e_dbg("FORCED_UP -> AN_PROG\n"); + } else { + mac->serdes_has_link = true; + } + break; + + case e1000_serdes_link_autoneg_progress: + if (rxcw & E1000_RXCW_C) { + /* + * We received /C/ ordered sets, meaning the + * link partner has autonegotiated, and we can + * trust the Link Up (LU) status bit. + */ + if (status & E1000_STATUS_LU) { + mac->serdes_link_state = + e1000_serdes_link_autoneg_complete; + e_dbg("AN_PROG -> AN_UP\n"); + mac->serdes_has_link = true; + } else { + /* Autoneg completed, but failed. */ + mac->serdes_link_state = + e1000_serdes_link_down; + e_dbg("AN_PROG -> DOWN\n"); + } + } else { + /* + * The link partner did not autoneg. + * Force link up and full duplex, and change + * state to forced. + */ + ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after link up. */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error config flow control\n"); + break; + } + mac->serdes_link_state = + e1000_serdes_link_forced_up; + mac->serdes_has_link = true; + e_dbg("AN_PROG -> FORCED_UP\n"); + } + break; + + case e1000_serdes_link_down: + default: + /* + * The link was down but the receiver has now gained + * valid sync, so lets see if we can bring the link + * up. + */ + ew32(TXCW, mac->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + e_dbg("DOWN -> AN_PROG\n"); + break; + } + } else { + if (!(rxcw & E1000_RXCW_SYNCH)) { + mac->serdes_has_link = false; + mac->serdes_link_state = e1000_serdes_link_down; + e_dbg("ANYSTATE -> DOWN\n"); + } else { + /* + * Check several times, if Sync and Config + * both are consistently 1 then simply ignore + * the Invalid bit and restart Autoneg + */ + for (i = 0; i < AN_RETRY_COUNT; i++) { + udelay(10); + rxcw = er32(RXCW); + if ((rxcw & E1000_RXCW_IV) && + !((rxcw & E1000_RXCW_SYNCH) && + (rxcw & E1000_RXCW_C))) { + mac->serdes_has_link = false; + mac->serdes_link_state = + e1000_serdes_link_down; + e_dbg("ANYSTATE -> DOWN\n"); + break; + } + } + + if (i == AN_RETRY_COUNT) { + txcw = er32(TXCW); + txcw |= E1000_TXCW_ANE; + ew32(TXCW, txcw); + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + e_dbg("ANYSTATE -> AN_PROG\n"); + } + } + } + + return ret_val; +} + +/** + * e1000_valid_led_default_82571 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (*data == ID_LED_RESERVED_F746) + *data = ID_LED_DEFAULT_82573; + break; + default: + if (*data == ID_LED_RESERVED_0000 || + *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT; + break; + } + + return 0; +} + +/** + * e1000e_get_laa_state_82571 - Get locally administered address state + * @hw: pointer to the HW structure + * + * Retrieve and return the current locally administered address state. + **/ +bool e1000e_get_laa_state_82571(struct e1000_hw *hw) +{ + if (hw->mac.type != e1000_82571) + return false; + + return hw->dev_spec.e82571.laa_is_present; +} + +/** + * e1000e_set_laa_state_82571 - Set locally administered address state + * @hw: pointer to the HW structure + * @state: enable/disable locally administered address + * + * Enable/Disable the current locally administered address state. + **/ +void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state) +{ + if (hw->mac.type != e1000_82571) + return; + + hw->dev_spec.e82571.laa_is_present = state; + + /* If workaround is activated... */ + if (state) + /* + * Hold a copy of the LAA in RAR[14] This is done so that + * between the time RAR[0] gets clobbered and the time it + * gets fixed, the actual LAA is in one of the RARs and no + * incoming packets directed to this port are dropped. + * Eventually the LAA will be in RAR[0] and RAR[14]. + */ + e1000e_rar_set(hw, hw->mac.addr, hw->mac.rar_entry_count - 1); +} + +/** + * e1000_fix_nvm_checksum_82571 - Fix EEPROM checksum + * @hw: pointer to the HW structure + * + * Verifies that the EEPROM has completed the update. After updating the + * EEPROM, we need to check bit 15 in work 0x23 for the checksum fix. If + * the checksum fix is not implemented, we need to set the bit and update + * the checksum. Otherwise, if bit 15 is set and the checksum is incorrect, + * we need to return bad checksum. + **/ +static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val; + u16 data; + + if (nvm->type != e1000_nvm_flash_hw) + return 0; + + /* + * Check bit 4 of word 10h. If it is 0, firmware is done updating + * 10h-12h. Checksum may need to be fixed. + */ + ret_val = e1000_read_nvm(hw, 0x10, 1, &data); + if (ret_val) + return ret_val; + + if (!(data & 0x10)) { + /* + * Read 0x23 and check bit 15. This bit is a 1 + * when the checksum has already been fixed. If + * the checksum is still wrong and this bit is a + * 1, we need to return bad checksum. Otherwise, + * we need to set this bit to a 1 and update the + * checksum. + */ + ret_val = e1000_read_nvm(hw, 0x23, 1, &data); + if (ret_val) + return ret_val; + + if (!(data & 0x8000)) { + data |= 0x8000; + ret_val = e1000_write_nvm(hw, 0x23, 1, &data); + if (ret_val) + return ret_val; + ret_val = e1000e_update_nvm_checksum(hw); + } + } + + return 0; +} + +/** + * e1000_read_mac_addr_82571 - Read device MAC address + * @hw: pointer to the HW structure + **/ +static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + if (hw->mac.type == e1000_82571) { + /* + * If there's an alternate MAC address place it in RAR0 + * so that it will override the Si installed default perm + * address. + */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + goto out; + } + + ret_val = e1000_read_mac_addr_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_power_down_phy_copper_82571 - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + struct e1000_mac_info *mac = &hw->mac; + + if (!(phy->ops.check_reset_block)) + return; + + /* If the management interface is not enabled, then power down */ + if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); +} + +/** + * e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw) +{ + e1000e_clear_hw_cntrs_base(hw); + + er32(PRC64); + er32(PRC127); + er32(PRC255); + er32(PRC511); + er32(PRC1023); + er32(PRC1522); + er32(PTC64); + er32(PTC127); + er32(PTC255); + er32(PTC511); + er32(PTC1023); + er32(PTC1522); + + er32(ALGNERRC); + er32(RXERRC); + er32(TNCRS); + er32(CEXTERR); + er32(TSCTC); + er32(TSCTFC); + + er32(MGTPRC); + er32(MGTPDC); + er32(MGTPTC); + + er32(IAC); + er32(ICRXOC); + + er32(ICRXPTC); + er32(ICRXATC); + er32(ICTXPTC); + er32(ICTXATC); + er32(ICTXQEC); + er32(ICTXQMTC); + er32(ICRXDMTC); +} + +static struct e1000_mac_operations e82571_mac_ops = { + /* .check_mng_mode: mac type dependent */ + /* .check_for_link: media type dependent */ + .id_led_init = e1000e_id_led_init, + .cleanup_led = e1000e_cleanup_led_generic, + .clear_hw_cntrs = e1000_clear_hw_cntrs_82571, + .get_bus_info = e1000e_get_bus_info_pcie, + .set_lan_id = e1000_set_lan_id_multi_port_pcie, + /* .get_link_up_info: media type dependent */ + /* .led_on: mac type dependent */ + .led_off = e1000e_led_off_generic, + .update_mc_addr_list = e1000e_update_mc_addr_list_generic, + .write_vfta = e1000_write_vfta_generic, + .clear_vfta = e1000_clear_vfta_82571, + .reset_hw = e1000_reset_hw_82571, + .init_hw = e1000_init_hw_82571, + .setup_link = e1000_setup_link_82571, + /* .setup_physical_interface: media type dependent */ + .setup_led = e1000e_setup_led_generic, + .read_mac_addr = e1000_read_mac_addr_82571, +}; + +static struct e1000_phy_operations e82_phy_ops_igp = { + .acquire = e1000_get_hw_semaphore_82571, + .check_polarity = e1000_check_polarity_igp, + .check_reset_block = e1000e_check_reset_block_generic, + .commit = NULL, + .force_speed_duplex = e1000e_phy_force_speed_duplex_igp, + .get_cfg_done = e1000_get_cfg_done_82571, + .get_cable_length = e1000e_get_cable_length_igp_2, + .get_info = e1000e_get_phy_info_igp, + .read_reg = e1000e_read_phy_reg_igp, + .release = e1000_put_hw_semaphore_82571, + .reset = e1000e_phy_hw_reset_generic, + .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, + .set_d3_lplu_state = e1000e_set_d3_lplu_state, + .write_reg = e1000e_write_phy_reg_igp, + .cfg_on_link_up = NULL, +}; + +static struct e1000_phy_operations e82_phy_ops_m88 = { + .acquire = e1000_get_hw_semaphore_82571, + .check_polarity = e1000_check_polarity_m88, + .check_reset_block = e1000e_check_reset_block_generic, + .commit = e1000e_phy_sw_reset, + .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, + .get_cfg_done = e1000e_get_cfg_done, + .get_cable_length = e1000e_get_cable_length_m88, + .get_info = e1000e_get_phy_info_m88, + .read_reg = e1000e_read_phy_reg_m88, + .release = e1000_put_hw_semaphore_82571, + .reset = e1000e_phy_hw_reset_generic, + .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, + .set_d3_lplu_state = e1000e_set_d3_lplu_state, + .write_reg = e1000e_write_phy_reg_m88, + .cfg_on_link_up = NULL, +}; + +static struct e1000_phy_operations e82_phy_ops_bm = { + .acquire = e1000_get_hw_semaphore_82571, + .check_polarity = e1000_check_polarity_m88, + .check_reset_block = e1000e_check_reset_block_generic, + .commit = e1000e_phy_sw_reset, + .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, + .get_cfg_done = e1000e_get_cfg_done, + .get_cable_length = e1000e_get_cable_length_m88, + .get_info = e1000e_get_phy_info_m88, + .read_reg = e1000e_read_phy_reg_bm2, + .release = e1000_put_hw_semaphore_82571, + .reset = e1000e_phy_hw_reset_generic, + .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, + .set_d3_lplu_state = e1000e_set_d3_lplu_state, + .write_reg = e1000e_write_phy_reg_bm2, + .cfg_on_link_up = NULL, +}; + +static struct e1000_nvm_operations e82571_nvm_ops = { + .acquire = e1000_acquire_nvm_82571, + .read = e1000e_read_nvm_eerd, + .release = e1000_release_nvm_82571, + .update = e1000_update_nvm_checksum_82571, + .valid_led_default = e1000_valid_led_default_82571, + .validate = e1000_validate_nvm_checksum_82571, + .write = e1000_write_nvm_82571, +}; + +struct e1000_info e1000_82571_info = { + .mac = e1000_82571, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_SMART_POWER_DOWN + | FLAG_RESET_OVERWRITES_LAA /* errata */ + | FLAG_TARC_SPEED_MODE_BIT /* errata */ + | FLAG_APME_CHECK_PORT_B, + .flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */ + | FLAG2_DMA_BURST, + .pba = 38, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_igp, + .nvm_ops = &e82571_nvm_ops, +}; + +struct e1000_info e1000_82572_info = { + .mac = e1000_82572, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_TARC_SPEED_MODE_BIT, /* errata */ + .flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */ + | FLAG2_DMA_BURST, + .pba = 38, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_igp, + .nvm_ops = &e82571_nvm_ops, +}; + +struct e1000_info e1000_82573_info = { + .mac = e1000_82573, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_SMART_POWER_DOWN + | FLAG_HAS_AMT + | FLAG_HAS_SWSM_ON_LOAD, + .flags2 = FLAG2_DISABLE_ASPM_L1 + | FLAG2_DISABLE_ASPM_L0S, + .pba = 20, + .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, + .get_variants = e1000_get_variants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_m88, + .nvm_ops = &e82571_nvm_ops, +}; + +struct e1000_info e1000_82574_info = { + .mac = e1000_82574, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_MSIX + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_SMART_POWER_DOWN + | FLAG_HAS_AMT + | FLAG_HAS_CTRLEXT_ON_LOAD, + .flags2 = FLAG2_CHECK_PHY_HANG + | FLAG2_DISABLE_ASPM_L0S, + .pba = 32, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_bm, + .nvm_ops = &e82571_nvm_ops, +}; + +struct e1000_info e1000_82583_info = { + .mac = e1000_82583, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_SMART_POWER_DOWN + | FLAG_HAS_AMT + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_CTRLEXT_ON_LOAD, + .flags2 = FLAG2_DISABLE_ASPM_L0S, + .pba = 32, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_bm, + .nvm_ops = &e82571_nvm_ops, +}; + diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile new file mode 100644 index 0000000..948c05d --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/Makefile @@ -0,0 +1,37 @@ +################################################################################ +# +# Intel PRO/1000 Linux driver +# Copyright(c) 1999 - 2011 Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: +# Linux NICS +# e1000-devel Mailing List +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +# +################################################################################ + +# +# Makefile for the Intel(R) PRO/1000 ethernet driver +# + +obj-$(CONFIG_E1000E) += e1000e.o + +e1000e-objs := 82571.o ich8lan.o 80003es2lan.o \ + lib.o phy.o param.o ethtool.o netdev.o + diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h new file mode 100644 index 0000000..c516a74 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -0,0 +1,844 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_DEFINES_H_ +#define _E1000_DEFINES_H_ + +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_APME 0x00000001 /* APM Enable */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ +#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ + +/* Wake Up Status */ +#define E1000_WUS_LNKC E1000_WUFC_LNKC +#define E1000_WUS_MAG E1000_WUFC_MAG +#define E1000_WUS_EX E1000_WUFC_EX +#define E1000_WUS_MC E1000_WUFC_MC +#define E1000_WUS_BC E1000_WUFC_BC + +/* Extended Device Control */ +#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */ +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ +#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ +#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_EIAME 0x01000000 +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ +#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ +#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ +#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ +#define E1000_CTRL_EXT_LSECCK 0x00001000 +#define E1000_CTRL_EXT_PHYPDEN 0x00100000 + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ + +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +/* mask to determine if packets should be dropped due to frame errors */ +#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ + E1000_RXD_ERR_CE | \ + E1000_RXD_ERR_SE | \ + E1000_RXD_ERR_SEQ | \ + E1000_RXD_ERR_CXE | \ + E1000_RXD_ERR_RXE) + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +/* Enable MAC address filtering */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 +/* Enable MNG packets to host memory */ +#define E1000_MANC_EN_MNG2HOST 0x00200000 + +#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */ +#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */ +#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */ +#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */ + +/* Receive Control */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min threshold size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ +#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */ +#define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */ +#define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ +#define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */ +#define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */ +#define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ + +/* + * Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SWFW_SYNC Definitions */ +#define E1000_SWFW_EEP_SM 0x1 +#define E1000_SWFW_PHY0_SM 0x2 +#define E1000_SWFW_PHY1_SM 0x4 +#define E1000_SWFW_CSR_SM 0x8 + +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */ +#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ + +/* + * Bit definitions for the Management Data IO (MDIO) and Management Data + * Clock (MDC) pins in the Device Control Register. + */ + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */ +#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ + +/* Constants used to interpret the masked PCI-X bus speed. */ + +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + + +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 + +/* 1000/H is not supported, nor spec-compliant. */ +#define E1000_ALL_SPEED_DUPLEX ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ + ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ + ADVERTISE_1000_FULL) +#define E1000_ALL_NOT_GIG ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ + ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) +#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX + +/* LED Control */ +#define E1000_PHY_LED0_MODE_MASK 0x00000007 +#define E1000_PHY_LED0_IVRT 0x00000008 +#define E1000_PHY_LED0_MASK 0x0000001F + +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_IVRT 0x00000040 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 + +#define E1000_LEDCTL_MODE_LINK_UP 0x2 +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ + +/* Transmit Control */ +#define E1000_TCTL_EN 0x00000002 /* enable Tx */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ + +/* Transmit Arbitration Count */ + +/* SerDes Control */ +#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 + +/* Receive Checksum Control */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ + +/* Header split receive */ +#define E1000_RFCTL_NFSW_DIS 0x00000040 +#define E1000_RFCTL_NFSR_DIS 0x00000080 +#define E1000_RFCTL_ACK_DIS 0x00001000 +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLD_SHIFT 12 + +/* Default values for the transmit IPG register */ +#define DEFAULT_82543_TIPG_IPGT_COPPER 8 + +#define E1000_TIPG_IPGT_MASK 0x000003FF + +#define DEFAULT_82543_TIPG_IPGR1 8 +#define E1000_TIPG_IPGR1_SHIFT 10 + +#define DEFAULT_82543_TIPG_IPGR2 6 +#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7 +#define E1000_TIPG_IPGR2_SHIFT 20 + +#define MAX_JUMBO_FRAME_SIZE 0x3F00 + +/* Extended Configuration Control and Size */ +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008 +#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 +#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16 + +#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 +#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 +#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 +#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 + +#define E1000_KABGTXD_BGSQLBIAS 0x00050000 + +/* PBA constants */ +#define E1000_PBA_8K 0x0008 /* 8KB */ +#define E1000_PBA_16K 0x0010 /* 16KB */ + +#define E1000_PBS_16K E1000_PBA_16K + +#define IFS_MAX 80 +#define IFS_MIN 40 +#define IFS_RATIO 4 +#define IFS_STEP 10 +#define MIN_NUM_XMITS 1000 + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ + +#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ +#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ +#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */ +#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ +#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */ +#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */ +#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */ + +/* PBA ECC Register */ +#define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */ +#define E1000_PBA_ECC_COUNTER_SHIFT 20 /* ECC counter shift value */ +#define E1000_PBA_ECC_CORR_EN 0x00000001 /* ECC correction enable */ +#define E1000_PBA_ECC_STAT_CLR 0x00000002 /* Clear ECC error counter */ +#define E1000_PBA_ECC_INT_EN 0x00000004 /* Enable ICR bit 5 for ECC */ + +/* + * This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC) + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ +#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */ +#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */ +#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */ +#define E1000_IMS_TXQ1 E1000_ICR_TXQ1 /* Tx Queue 1 Interrupt */ +#define E1000_IMS_OTHER E1000_ICR_OTHER /* Other Interrupts */ + +/* Interrupt Cause Set */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ + +/* Transmit Descriptor Control */ +#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ +#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ +#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ +#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ +#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ +#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */ +/* Enable the counting of desc. still to be processed. */ +#define E1000_TXDCTL_COUNT_DESC 0x00400000 + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* 802.1q VLAN Packet Size */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address */ +/* + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +#define E1000_RAR_ENTRIES 15 +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ +#define E1000_RAL_MAC_ADDR_LEN 4 +#define E1000_RAH_MAC_ADDR_LEN 2 + +/* Error Codes */ +#define E1000_ERR_NVM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_INIT 5 +#define E1000_ERR_PHY_TYPE 6 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11 +#define E1000_BLK_PHY_RESET 12 +#define E1000_ERR_SWFW_SYNC 13 +#define E1000_NOT_IMPLEMENTED 14 +#define E1000_ERR_INVALID_ARGUMENT 16 +#define E1000_ERR_NO_SPACE 17 +#define E1000_ERR_NVM_PBA_SECTION 18 + +/* Loop limit on how long we wait for auto-negotiation to complete */ +#define FIBER_LINK_UP_LIMIT 50 +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 +#define PHY_FORCE_LIMIT 20 +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 +/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ +#define MDIO_OWNERSHIP_TIMEOUT 10 +/* Number of milliseconds for NVM auto read done after MAC reset. */ +#define AUTO_READ_DONE_TIMEOUT 10 + +/* Flow Control */ +#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ +#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +/* Transmit Configuration Word */ +#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ +#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ + +/* Receive Configuration Word */ +#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ +#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ +#define E1000_RXCW_C 0x20000000 /* Receive config */ +#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ + +/* PCI Express Control */ +#define E1000_GCR_RXD_NO_SNOOP 0x00000001 +#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 +#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 +#define E1000_GCR_TXD_NO_SNOOP 0x00000008 +#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 +#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 + +#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ + E1000_GCR_RXDSCW_NO_SNOOP | \ + E1000_GCR_RXDSCR_NO_SNOOP | \ + E1000_GCR_TXD_NO_SNOOP | \ + E1000_GCR_TXDSCW_NO_SNOOP | \ + E1000_GCR_TXDSCR_NO_SNOOP) + +/* PHY Control Register */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 + +/* PHY Status Register */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ + +/* Autoneg Expansion Register */ +#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ + /* 0=DTE device */ +#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ + /* 0=Configure PHY as Slave */ +#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ + /* 0=Automatic Master/Slave config */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ + + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CONTROL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ +#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ + +#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */ + +/* NVM Control */ +#define E1000_EECD_SK 0x00000001 /* NVM Clock */ +#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* NVM Data In */ +#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ +#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* NVM Present */ +#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ +/* NVM Addressing bits based on type (0-small, 1-large) */ +#define E1000_EECD_ADDR_BITS 0x00000400 +#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ +#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES) + +#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write registers */ +#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_NVM_RW_REG_START 1 /* Start operation */ +#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ +#define E1000_FLASH_UPDATES 2000 + +/* NVM Word Offsets */ +#define NVM_COMPAT 0x0003 +#define NVM_ID_LED_SETTINGS 0x0004 +#define NVM_INIT_CONTROL2_REG 0x000F +#define NVM_INIT_CONTROL3_PORT_B 0x0014 +#define NVM_INIT_3GIO_3 0x001A +#define NVM_INIT_CONTROL3_PORT_A 0x0024 +#define NVM_CFG 0x0012 +#define NVM_ALT_MAC_ADDR_PTR 0x0037 +#define NVM_CHECKSUM_REG 0x003F + +#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */ + +#define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */ +#define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */ + +/* Mask bits for fields in Word 0x0f of the NVM */ +#define NVM_WORD0F_PAUSE_MASK 0x3000 +#define NVM_WORD0F_PAUSE 0x1000 +#define NVM_WORD0F_ASM_DIR 0x2000 + +/* Mask bits for fields in Word 0x1a of the NVM */ +#define NVM_WORD1A_ASPM_MASK 0x000C + +/* Mask bits for fields in Word 0x03 of the EEPROM */ +#define NVM_COMPAT_LOM 0x0800 + +/* length of string needed to store PBA number */ +#define E1000_PBANUM_LENGTH 11 + +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA + +/* PBA (printed board assembly) number words */ +#define NVM_PBA_OFFSET_0 8 +#define NVM_PBA_OFFSET_1 9 +#define NVM_PBA_PTR_GUARD 0xFAFA +#define NVM_WORD_SIZE_BASE_SHIFT 6 + +/* NVM Commands - SPI */ +#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ +#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ +#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ +#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ + +/* SPI NVM Status Register */ +#define NVM_STATUS_RDY_SPI 0x01 + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* PCI/PCI-X/PCI-EX Config space */ +#define PCI_HEADER_TYPE_REGISTER 0x0E +#define PCIE_LINK_STATUS 0x12 + +#define PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define PCIE_LINK_WIDTH_MASK 0x3F0 +#define PCIE_LINK_WIDTH_SHIFT 4 + +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF + +/* Bit definitions for valid PHY IDs. */ +/* + * I = Integrated + * E = External + */ +#define M88E1000_E_PHY_ID 0x01410C50 +#define M88E1000_I_PHY_ID 0x01410C30 +#define M88E1011_I_PHY_ID 0x01410C20 +#define IGP01E1000_I_PHY_ID 0x02A80380 +#define M88E1111_I_PHY_ID 0x01410CC0 +#define GG82563_E_PHY_ID 0x01410CA0 +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define IFE_E_PHY_ID 0x02A80330 +#define IFE_PLUS_E_PHY_ID 0x02A80320 +#define IFE_C_E_PHY_ID 0x02A80310 +#define BME1000_E_PHY_ID 0x01410CB0 +#define BME1000_E_PHY_ID_R2 0x01410CB1 +#define I82577_E_PHY_ID 0x01540050 +#define I82578_E_PHY_ID 0x004DD040 +#define I82579_E_PHY_ID 0x01540090 + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ + +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ + /* Manual MDI configuration */ +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 +/* Auto crossover enabled all speeds */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 +/* + * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold) + * 0=Normal 10BASE-T Rx Threshold + */ +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +/* 0=<50M; 1=50-80M; 2=80-110M; 3=110-140M; 4=>140M */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* + * Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +/* + * Number of times we will attempt to autonegotiate before downshifting if we + * are the slave + */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 + +#define I82578_EPSCR_DOWNSHIFT_ENABLE 0x0020 +#define I82578_EPSCR_DOWNSHIFT_COUNTER_MASK 0x001C + +/* BME1000 PHY Specific Control Register */ +#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */ + + +#define PHY_PAGE_SHIFT 5 +#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ + ((reg) & MAX_PHY_REG_ADDRESS)) + +/* + * Bits... + * 15-5: page + * 4-0: register offset + */ +#define GG82563_PAGE_SHIFT 5 +#define GG82563_REG(page, reg) \ + (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) +#define GG82563_MIN_ALT_REG 30 + +/* GG82563 Specific Registers */ +#define GG82563_PHY_SPEC_CTRL \ + GG82563_REG(0, 16) /* PHY Specific Control */ +#define GG82563_PHY_PAGE_SELECT \ + GG82563_REG(0, 22) /* Page Select */ +#define GG82563_PHY_SPEC_CTRL_2 \ + GG82563_REG(0, 26) /* PHY Specific Control 2 */ +#define GG82563_PHY_PAGE_SELECT_ALT \ + GG82563_REG(0, 29) /* Alternate Page Select */ + +#define GG82563_PHY_MAC_SPEC_CTRL \ + GG82563_REG(2, 21) /* MAC Specific Control Register */ + +#define GG82563_PHY_DSP_DISTANCE \ + GG82563_REG(5, 26) /* DSP Distance */ + +/* Page 193 - Port Control Registers */ +#define GG82563_PHY_KMRN_MODE_CTRL \ + GG82563_REG(193, 16) /* Kumeran Mode Control */ +#define GG82563_PHY_PWR_MGMT_CTRL \ + GG82563_REG(193, 20) /* Power Management Control */ + +/* Page 194 - KMRN Registers */ +#define GG82563_PHY_INBAND_CTRL \ + GG82563_REG(194, 18) /* Inband Control */ + +/* MDI Control */ +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_ERROR 0x40000000 + +/* SerDes Control */ +#define E1000_GEN_POLL_TIMEOUT 640 + +#endif /* _E1000_DEFINES_H_ */ diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h new file mode 100644 index 0000000..638d175 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -0,0 +1,736 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _E1000_H_ +#define _E1000_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hw.h" + +struct e1000_info; + +#define e_dbg(format, arg...) \ + netdev_dbg(hw->adapter->netdev, format, ## arg) +#define e_err(format, arg...) \ + netdev_err(adapter->netdev, format, ## arg) +#define e_info(format, arg...) \ + netdev_info(adapter->netdev, format, ## arg) +#define e_warn(format, arg...) \ + netdev_warn(adapter->netdev, format, ## arg) +#define e_notice(format, arg...) \ + netdev_notice(adapter->netdev, format, ## arg) + + +/* Interrupt modes, as used by the IntMode parameter */ +#define E1000E_INT_MODE_LEGACY 0 +#define E1000E_INT_MODE_MSI 1 +#define E1000E_INT_MODE_MSIX 2 + +/* Tx/Rx descriptor defines */ +#define E1000_DEFAULT_TXD 256 +#define E1000_MAX_TXD 4096 +#define E1000_MIN_TXD 64 + +#define E1000_DEFAULT_RXD 256 +#define E1000_MAX_RXD 4096 +#define E1000_MIN_RXD 64 + +#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ +#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */ + +/* Early Receive defines */ +#define E1000_ERT_2048 0x100 + +#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */ + +/* How many Tx Descriptors do we need to call netif_wake_queue ? */ +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define AUTO_ALL_MODES 0 +#define E1000_EEPROM_APME 0x0400 + +#define E1000_MNG_VLAN_NONE (-1) + +/* Number of packet split data buffers (not including the header buffer) */ +#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) + +#define DEFAULT_JUMBO 9234 + +/* BM/HV Specific Registers */ +#define BM_PORT_CTRL_PAGE 769 + +#define PHY_UPPER_SHIFT 21 +#define BM_PHY_REG(page, reg) \ + (((reg) & MAX_PHY_REG_ADDRESS) |\ + (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\ + (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT))) + +/* PHY Wakeup Registers and defines */ +#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17) +#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0) +#define BM_WUC PHY_REG(BM_WUC_PAGE, 1) +#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2) +#define BM_WUS PHY_REG(BM_WUC_PAGE, 3) +#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2))) +#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2))) +#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2))) +#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2))) +#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1))) + +#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */ +#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */ +#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */ +#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */ +#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */ +#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */ +#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */ + +#define HV_STATS_PAGE 778 +#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision Count */ +#define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17) +#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. Count */ +#define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19) +#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Coll. Count */ +#define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21) +#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision Count */ +#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24) +#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision Count */ +#define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26) +#define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */ +#define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28) +#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Transmit with no CRS */ +#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30) + +#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */ + +/* BM PHY Copper Specific Status */ +#define BM_CS_STATUS 17 +#define BM_CS_STATUS_LINK_UP 0x0400 +#define BM_CS_STATUS_RESOLVED 0x0800 +#define BM_CS_STATUS_SPEED_MASK 0xC000 +#define BM_CS_STATUS_SPEED_1000 0x8000 + +/* 82577 Mobile Phy Status Register */ +#define HV_M_STATUS 26 +#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000 +#define HV_M_STATUS_SPEED_MASK 0x0300 +#define HV_M_STATUS_SPEED_1000 0x0200 +#define HV_M_STATUS_LINK_UP 0x0040 + +/* Time to wait before putting the device into D3 if there's no link (in ms). */ +#define LINK_TIMEOUT 100 + +#define DEFAULT_RDTR 0 +#define DEFAULT_RADV 8 +#define BURST_RDTR 0x20 +#define BURST_RADV 0x20 + +/* + * in the case of WTHRESH, it appears at least the 82571/2 hardware + * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when + * WTHRESH=4, and since we want 64 bytes at a time written back, set + * it to 5 + */ +#define E1000_TXDCTL_DMA_BURST_ENABLE \ + (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \ + E1000_TXDCTL_COUNT_DESC | \ + (5 << 16) | /* wthresh must be +1 more than desired */\ + (1 << 8) | /* hthresh */ \ + 0x1f) /* pthresh */ + +#define E1000_RXDCTL_DMA_BURST_ENABLE \ + (0x01000000 | /* set descriptor granularity */ \ + (4 << 16) | /* set writeback threshold */ \ + (4 << 8) | /* set prefetch threshold */ \ + 0x20) /* set hthresh */ + +#define E1000_TIDV_FPD (1 << 31) +#define E1000_RDTR_FPD (1 << 31) + +enum e1000_boards { + board_82571, + board_82572, + board_82573, + board_82574, + board_82583, + board_80003es2lan, + board_ich8lan, + board_ich9lan, + board_ich10lan, + board_pchlan, + board_pch2lan, +}; + +struct e1000_ps_page { + struct page *page; + u64 dma; /* must be u64 - written to hw */ +}; + +/* + * wrappers around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct e1000_buffer { + dma_addr_t dma; + struct sk_buff *skb; + union { + /* Tx */ + struct { + unsigned long time_stamp; + u16 length; + u16 next_to_watch; + unsigned int segs; + unsigned int bytecount; + u16 mapped_as_page; + }; + /* Rx */ + struct { + /* arrays of page information for packet split */ + struct e1000_ps_page *ps_pages; + struct page *page; + }; + }; +}; + +struct e1000_ring { + void *desc; /* pointer to ring memory */ + dma_addr_t dma; /* phys address of ring */ + unsigned int size; /* length of ring in bytes */ + unsigned int count; /* number of desc. in ring */ + + u16 next_to_use; + u16 next_to_clean; + + u16 head; + u16 tail; + + /* array of buffer information structs */ + struct e1000_buffer *buffer_info; + + char name[IFNAMSIZ + 5]; + u32 ims_val; + u32 itr_val; + u16 itr_register; + int set_itr; + + struct sk_buff *rx_skb_top; +}; + +/* PHY register snapshot values */ +struct e1000_phy_regs { + u16 bmcr; /* basic mode control register */ + u16 bmsr; /* basic mode status register */ + u16 advertise; /* auto-negotiation advertisement */ + u16 lpa; /* link partner ability register */ + u16 expansion; /* auto-negotiation expansion reg */ + u16 ctrl1000; /* 1000BASE-T control register */ + u16 stat1000; /* 1000BASE-T status register */ + u16 estatus; /* extended status register */ +}; + +/* board specific private data structure */ +struct e1000_adapter { + struct timer_list watchdog_timer; + struct timer_list phy_info_timer; + struct timer_list blink_timer; + + struct work_struct reset_task; + struct work_struct watchdog_task; + + const struct e1000_info *ei; + + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u32 bd_number; + u32 rx_buffer_len; + u16 mng_vlan_id; + u16 link_speed; + u16 link_duplex; + u16 eeprom_vers; + + /* track device up/down/testing state */ + unsigned long state; + + /* Interrupt Throttle Rate */ + u32 itr; + u32 itr_setting; + u16 tx_itr; + u16 rx_itr; + + /* + * Tx + */ + struct e1000_ring *tx_ring /* One per active queue */ + ____cacheline_aligned_in_smp; + + struct napi_struct napi; + + unsigned int restart_queue; + u32 txd_cmd; + + bool detect_tx_hung; + u8 tx_timeout_factor; + + u32 tx_int_delay; + u32 tx_abs_int_delay; + + unsigned int total_tx_bytes; + unsigned int total_tx_packets; + unsigned int total_rx_bytes; + unsigned int total_rx_packets; + + /* Tx stats */ + u64 tpt_old; + u64 colc_old; + u32 gotc; + u64 gotc_old; + u32 tx_timeout_count; + u32 tx_fifo_head; + u32 tx_head_addr; + u32 tx_fifo_size; + u32 tx_dma_failed; + + /* + * Rx + */ + bool (*clean_rx) (struct e1000_adapter *adapter, + int *work_done, int work_to_do) + ____cacheline_aligned_in_smp; + void (*alloc_rx_buf) (struct e1000_adapter *adapter, + int cleaned_count, gfp_t gfp); + struct e1000_ring *rx_ring; + + u32 rx_int_delay; + u32 rx_abs_int_delay; + + /* Rx stats */ + u64 hw_csum_err; + u64 hw_csum_good; + u64 rx_hdr_split; + u32 gorc; + u64 gorc_old; + u32 alloc_rx_buff_failed; + u32 rx_dma_failed; + + unsigned int rx_ps_pages; + u16 rx_ps_bsize0; + u32 max_frame_size; + u32 min_frame_size; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + + spinlock_t stats64_lock; + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + struct e1000_phy_stats phy_stats; + + /* Snapshot of PHY registers */ + struct e1000_phy_regs phy_regs; + + struct e1000_ring test_tx_ring; + struct e1000_ring test_rx_ring; + u32 test_icr; + + u32 msg_enable; + unsigned int num_vectors; + struct msix_entry *msix_entries; + int int_mode; + u32 eiac_mask; + + u32 eeprom_wol; + u32 wol; + u32 pba; + u32 max_hw_frame_size; + + bool fc_autoneg; + + unsigned int flags; + unsigned int flags2; + struct work_struct downshift_task; + struct work_struct update_phy_task; + struct work_struct print_hang_task; + + bool idle_check; + int phy_hang_count; +}; + +struct e1000_info { + enum e1000_mac_type mac; + unsigned int flags; + unsigned int flags2; + u32 pba; + u32 max_hw_frame_size; + s32 (*get_variants)(struct e1000_adapter *); + struct e1000_mac_operations *mac_ops; + struct e1000_phy_operations *phy_ops; + struct e1000_nvm_operations *nvm_ops; +}; + +/* hardware capability, feature, and workaround flags */ +#define FLAG_HAS_AMT (1 << 0) +#define FLAG_HAS_FLASH (1 << 1) +#define FLAG_HAS_HW_VLAN_FILTER (1 << 2) +#define FLAG_HAS_WOL (1 << 3) +#define FLAG_HAS_ERT (1 << 4) +#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5) +#define FLAG_HAS_SWSM_ON_LOAD (1 << 6) +#define FLAG_HAS_JUMBO_FRAMES (1 << 7) +#define FLAG_READ_ONLY_NVM (1 << 8) +#define FLAG_IS_ICH (1 << 9) +#define FLAG_HAS_MSIX (1 << 10) +#define FLAG_HAS_SMART_POWER_DOWN (1 << 11) +#define FLAG_IS_QUAD_PORT_A (1 << 12) +#define FLAG_IS_QUAD_PORT (1 << 13) +#define FLAG_TIPG_MEDIUM_FOR_80003ESLAN (1 << 14) +#define FLAG_APME_IN_WUC (1 << 15) +#define FLAG_APME_IN_CTRL3 (1 << 16) +#define FLAG_APME_CHECK_PORT_B (1 << 17) +#define FLAG_DISABLE_FC_PAUSE_TIME (1 << 18) +#define FLAG_NO_WAKE_UCAST (1 << 19) +#define FLAG_MNG_PT_ENABLED (1 << 20) +#define FLAG_RESET_OVERWRITES_LAA (1 << 21) +#define FLAG_TARC_SPEED_MODE_BIT (1 << 22) +#define FLAG_TARC_SET_BIT_ZERO (1 << 23) +#define FLAG_RX_NEEDS_RESTART (1 << 24) +#define FLAG_LSC_GIG_SPEED_DROP (1 << 25) +#define FLAG_SMART_POWER_DOWN (1 << 26) +#define FLAG_MSI_ENABLED (1 << 27) +#define FLAG_RX_CSUM_ENABLED (1 << 28) +#define FLAG_TSO_FORCE (1 << 29) +#define FLAG_RX_RESTART_NOW (1 << 30) +#define FLAG_MSI_TEST_FAILED (1 << 31) + +/* CRC Stripping defines */ +#define FLAG2_CRC_STRIPPING (1 << 0) +#define FLAG2_HAS_PHY_WAKEUP (1 << 1) +#define FLAG2_IS_DISCARDING (1 << 2) +#define FLAG2_DISABLE_ASPM_L1 (1 << 3) +#define FLAG2_HAS_PHY_STATS (1 << 4) +#define FLAG2_HAS_EEE (1 << 5) +#define FLAG2_DMA_BURST (1 << 6) +#define FLAG2_DISABLE_ASPM_L0S (1 << 7) +#define FLAG2_DISABLE_AIM (1 << 8) +#define FLAG2_CHECK_PHY_HANG (1 << 9) + +#define E1000_RX_DESC_PS(R, i) \ + (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) +#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) +#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) +#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) +#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc) + +enum e1000_state_t { + __E1000_TESTING, + __E1000_RESETTING, + __E1000_DOWN +}; + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +extern char e1000e_driver_name[]; +extern const char e1000e_driver_version[]; + +extern void e1000e_check_options(struct e1000_adapter *adapter); +extern void e1000e_set_ethtool_ops(struct net_device *netdev); + +extern int e1000e_up(struct e1000_adapter *adapter); +extern void e1000e_down(struct e1000_adapter *adapter); +extern void e1000e_reinit_locked(struct e1000_adapter *adapter); +extern void e1000e_reset(struct e1000_adapter *adapter); +extern void e1000e_power_up_phy(struct e1000_adapter *adapter); +extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter); +extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter); +extern void e1000e_free_rx_resources(struct e1000_adapter *adapter); +extern void e1000e_free_tx_resources(struct e1000_adapter *adapter); +extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 + *stats); +extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); +extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); +extern void e1000e_get_hw_control(struct e1000_adapter *adapter); +extern void e1000e_release_hw_control(struct e1000_adapter *adapter); + +extern unsigned int copybreak; + +extern char *e1000e_get_hw_dev_name(struct e1000_hw *hw); + +extern struct e1000_info e1000_82571_info; +extern struct e1000_info e1000_82572_info; +extern struct e1000_info e1000_82573_info; +extern struct e1000_info e1000_82574_info; +extern struct e1000_info e1000_82583_info; +extern struct e1000_info e1000_ich8_info; +extern struct e1000_info e1000_ich9_info; +extern struct e1000_info e1000_ich10_info; +extern struct e1000_info e1000_pch_info; +extern struct e1000_info e1000_pch2_info; +extern struct e1000_info e1000_es2_info; + +extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, + u32 pba_num_size); + +extern s32 e1000e_commit_phy(struct e1000_hw *hw); + +extern bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw); + +extern bool e1000e_get_laa_state_82571(struct e1000_hw *hw); +extern void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state); + +extern void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw); +extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, + bool state); +extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); +extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); +extern void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw); +extern void e1000_resume_workarounds_pchlan(struct e1000_hw *hw); +extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable); +extern s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable); +extern void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw); + +extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw); +extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); +extern s32 e1000e_check_for_serdes_link(struct e1000_hw *hw); +extern s32 e1000e_setup_led_generic(struct e1000_hw *hw); +extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw); +extern s32 e1000e_led_on_generic(struct e1000_hw *hw); +extern s32 e1000e_led_off_generic(struct e1000_hw *hw); +extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw); +extern void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw); +extern void e1000_set_lan_id_single_port(struct e1000_hw *hw); +extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex); +extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex); +extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw); +extern s32 e1000e_get_auto_rd_done(struct e1000_hw *hw); +extern s32 e1000e_id_led_init(struct e1000_hw *hw); +extern void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw); +extern s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw); +extern s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw); +extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw); +extern s32 e1000e_setup_link(struct e1000_hw *hw); +extern void e1000_clear_vfta_generic(struct e1000_hw *hw); +extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); +extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, + u8 *mc_addr_list, + u32 mc_addr_count); +extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); +extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw); +extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop); +extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw); +extern s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data); +extern void e1000e_config_collision_dist(struct e1000_hw *hw); +extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw); +extern s32 e1000e_force_mac_fc(struct e1000_hw *hw); +extern s32 e1000e_blink_led_generic(struct e1000_hw *hw); +extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); +extern s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw); +extern void e1000e_reset_adaptive(struct e1000_hw *hw); +extern void e1000e_update_adaptive(struct e1000_hw *hw); + +extern s32 e1000e_setup_copper_link(struct e1000_hw *hw); +extern s32 e1000e_get_phy_id(struct e1000_hw *hw); +extern void e1000e_put_hw_semaphore(struct e1000_hw *hw); +extern s32 e1000e_check_reset_block_generic(struct e1000_hw *hw); +extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw); +extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw); +extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw); +extern s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page); +extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); +extern s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, + u16 *data); +extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw); +extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active); +extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); +extern s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, + u16 data); +extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw); +extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw); +extern s32 e1000e_get_cfg_done(struct e1000_hw *hw); +extern s32 e1000e_get_cable_length_m88(struct e1000_hw *hw); +extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw); +extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); +extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); +extern s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw); +extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id); +extern s32 e1000e_determine_phy_address(struct e1000_hw *hw); +extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data); +extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data); +extern s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, + u16 *phy_reg); +extern s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, + u16 *phy_reg); +extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data); +extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data); +extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); +extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); +extern s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, + u16 data); +extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); +extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, + u16 *data); +extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw); +extern void e1000_power_up_phy_copper(struct e1000_hw *hw); +extern void e1000_power_down_phy_copper(struct e1000_hw *hw); +extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); +extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); +extern s32 e1000e_check_downshift(struct e1000_hw *hw); +extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data); +extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, + u16 *data); +extern s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, + u16 *data); +extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data); +extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, + u16 data); +extern s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, + u16 data); +extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw); +extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw); +extern s32 e1000_check_polarity_82577(struct e1000_hw *hw); +extern s32 e1000_get_phy_info_82577(struct e1000_hw *hw); +extern s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw); +extern s32 e1000_get_cable_length_82577(struct e1000_hw *hw); + +extern s32 e1000_check_polarity_m88(struct e1000_hw *hw); +extern s32 e1000_get_phy_info_ife(struct e1000_hw *hw); +extern s32 e1000_check_polarity_ife(struct e1000_hw *hw); +extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw); +extern s32 e1000_check_polarity_igp(struct e1000_hw *hw); +extern bool e1000_check_phy_82574(struct e1000_hw *hw); + +static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw) +{ + return hw->phy.ops.reset(hw); +} + +static inline s32 e1000_check_reset_block(struct e1000_hw *hw) +{ + return hw->phy.ops.check_reset_block(hw); +} + +static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return hw->phy.ops.read_reg(hw, offset, data); +} + +static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data) +{ + return hw->phy.ops.write_reg(hw, offset, data); +} + +static inline s32 e1000_get_cable_length(struct e1000_hw *hw) +{ + return hw->phy.ops.get_cable_length(hw); +} + +extern s32 e1000e_acquire_nvm(struct e1000_hw *hw); +extern s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +extern s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw); +extern s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg); +extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw); +extern void e1000e_release_nvm(struct e1000_hw *hw); +extern void e1000e_reload_nvm(struct e1000_hw *hw); +extern s32 e1000_read_mac_addr_generic(struct e1000_hw *hw); + +static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw) +{ + if (hw->mac.ops.read_mac_addr) + return hw->mac.ops.read_mac_addr(hw); + + return e1000_read_mac_addr_generic(hw); +} + +static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw) +{ + return hw->nvm.ops.validate(hw); +} + +static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw) +{ + return hw->nvm.ops.update(hw); +} + +static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + return hw->nvm.ops.read(hw, offset, words, data); +} + +static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + return hw->nvm.ops.write(hw, offset, words, data); +} + +static inline s32 e1000_get_phy_info(struct e1000_hw *hw) +{ + return hw->phy.ops.get_info(hw); +} + +static inline s32 e1000e_check_mng_mode(struct e1000_hw *hw) +{ + return hw->mac.ops.check_mng_mode(hw); +} + +extern bool e1000e_check_mng_mode_generic(struct e1000_hw *hw); +extern bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw); +extern s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length); + +static inline u32 __er32(struct e1000_hw *hw, unsigned long reg) +{ + return readl(hw->hw_addr + reg); +} + +static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val) +{ + writel(val, hw->hw_addr + reg); +} + +#endif /* _E1000_H_ */ diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c new file mode 100644 index 0000000..06d88f3 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -0,0 +1,2081 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* ethtool support for e1000 */ + +#include +#include +#include +#include +#include +#include + +#include "e1000.h" + +enum {NETDEV_STATS, E1000_STATS}; + +struct e1000_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +#define E1000_STAT(str, m) { \ + .stat_string = str, \ + .type = E1000_STATS, \ + .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \ + .stat_offset = offsetof(struct e1000_adapter, m) } +#define E1000_NETDEV_STAT(str, m) { \ + .stat_string = str, \ + .type = NETDEV_STATS, \ + .sizeof_stat = sizeof(((struct rtnl_link_stats64 *)0)->m), \ + .stat_offset = offsetof(struct rtnl_link_stats64, m) } + +static const struct e1000_stats e1000_gstrings_stats[] = { + E1000_STAT("rx_packets", stats.gprc), + E1000_STAT("tx_packets", stats.gptc), + E1000_STAT("rx_bytes", stats.gorc), + E1000_STAT("tx_bytes", stats.gotc), + E1000_STAT("rx_broadcast", stats.bprc), + E1000_STAT("tx_broadcast", stats.bptc), + E1000_STAT("rx_multicast", stats.mprc), + E1000_STAT("tx_multicast", stats.mptc), + E1000_NETDEV_STAT("rx_errors", rx_errors), + E1000_NETDEV_STAT("tx_errors", tx_errors), + E1000_NETDEV_STAT("tx_dropped", tx_dropped), + E1000_STAT("multicast", stats.mprc), + E1000_STAT("collisions", stats.colc), + E1000_NETDEV_STAT("rx_length_errors", rx_length_errors), + E1000_NETDEV_STAT("rx_over_errors", rx_over_errors), + E1000_STAT("rx_crc_errors", stats.crcerrs), + E1000_NETDEV_STAT("rx_frame_errors", rx_frame_errors), + E1000_STAT("rx_no_buffer_count", stats.rnbc), + E1000_STAT("rx_missed_errors", stats.mpc), + E1000_STAT("tx_aborted_errors", stats.ecol), + E1000_STAT("tx_carrier_errors", stats.tncrs), + E1000_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors), + E1000_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors), + E1000_STAT("tx_window_errors", stats.latecol), + E1000_STAT("tx_abort_late_coll", stats.latecol), + E1000_STAT("tx_deferred_ok", stats.dc), + E1000_STAT("tx_single_coll_ok", stats.scc), + E1000_STAT("tx_multi_coll_ok", stats.mcc), + E1000_STAT("tx_timeout_count", tx_timeout_count), + E1000_STAT("tx_restart_queue", restart_queue), + E1000_STAT("rx_long_length_errors", stats.roc), + E1000_STAT("rx_short_length_errors", stats.ruc), + E1000_STAT("rx_align_errors", stats.algnerrc), + E1000_STAT("tx_tcp_seg_good", stats.tsctc), + E1000_STAT("tx_tcp_seg_failed", stats.tsctfc), + E1000_STAT("rx_flow_control_xon", stats.xonrxc), + E1000_STAT("rx_flow_control_xoff", stats.xoffrxc), + E1000_STAT("tx_flow_control_xon", stats.xontxc), + E1000_STAT("tx_flow_control_xoff", stats.xofftxc), + E1000_STAT("rx_long_byte_count", stats.gorc), + E1000_STAT("rx_csum_offload_good", hw_csum_good), + E1000_STAT("rx_csum_offload_errors", hw_csum_err), + E1000_STAT("rx_header_split", rx_hdr_split), + E1000_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), + E1000_STAT("tx_smbus", stats.mgptc), + E1000_STAT("rx_smbus", stats.mgprc), + E1000_STAT("dropped_smbus", stats.mgpdc), + E1000_STAT("rx_dma_failed", rx_dma_failed), + E1000_STAT("tx_dma_failed", tx_dma_failed), +}; + +#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) +#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN) +static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; +#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) + +static int e1000_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 speed; + + if (hw->phy.media_type == e1000_media_type_copper) { + + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_TP); + if (hw->phy.type == e1000_phy_ife) + ecmd->supported &= ~SUPPORTED_1000baseT_Full; + ecmd->advertising = ADVERTISED_TP; + + if (hw->mac.autoneg == 1) { + ecmd->advertising |= ADVERTISED_Autoneg; + /* the e1000 autoneg seems to match ethtool nicely */ + ecmd->advertising |= hw->phy.autoneg_advertised; + } + + ecmd->port = PORT_TP; + ecmd->phy_address = hw->phy.addr; + ecmd->transceiver = XCVR_INTERNAL; + + } else { + ecmd->supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg); + + ecmd->advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg); + + ecmd->port = PORT_FIBRE; + ecmd->transceiver = XCVR_EXTERNAL; + } + + speed = -1; + ecmd->duplex = -1; + + if (netif_running(netdev)) { + if (netif_carrier_ok(netdev)) { + speed = adapter->link_speed; + ecmd->duplex = adapter->link_duplex - 1; + } + } else { + u32 status = er32(STATUS); + if (status & E1000_STATUS_LU) { + if (status & E1000_STATUS_SPEED_1000) + speed = SPEED_1000; + else if (status & E1000_STATUS_SPEED_100) + speed = SPEED_100; + else + speed = SPEED_10; + + if (status & E1000_STATUS_FD) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } + } + + ethtool_cmd_speed_set(ecmd, speed); + ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) || + hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + /* MDI-X => 2; MDI =>1; Invalid =>0 */ + if ((hw->phy.media_type == e1000_media_type_copper) && + netif_carrier_ok(netdev)) + ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : + ETH_TP_MDI; + else + ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; + + return 0; +} + +static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) +{ + struct e1000_mac_info *mac = &adapter->hw.mac; + + mac->autoneg = 0; + + /* Make sure dplx is at most 1 bit and lsb of speed is not set + * for the switch() below to work */ + if ((spd & 1) || (dplx & ~1)) + goto err_inval; + + /* Fiber NICs only allow 1000 gbps Full duplex */ + if ((adapter->hw.phy.media_type == e1000_media_type_fiber) && + spd != SPEED_1000 && + dplx != DUPLEX_FULL) { + goto err_inval; + } + + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_10_HALF; + break; + case SPEED_10 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_10_FULL; + break; + case SPEED_100 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_100_HALF; + break; + case SPEED_100 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_100_FULL; + break; + case SPEED_1000 + DUPLEX_FULL: + mac->autoneg = 1; + adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + default: + goto err_inval; + } + return 0; + +err_inval: + e_err("Unsupported Speed/Duplex configuration\n"); + return -EINVAL; +} + +static int e1000_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + /* + * When SoL/IDER sessions are active, autoneg/speed/duplex + * cannot be changed + */ + if (e1000_check_reset_block(hw)) { + e_err("Cannot change link characteristics when SoL/IDER is " + "active.\n"); + return -EINVAL; + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = 1; + if (hw->phy.media_type == e1000_media_type_fiber) + hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg; + else + hw->phy.autoneg_advertised = ecmd->advertising | + ADVERTISED_TP | + ADVERTISED_Autoneg; + ecmd->advertising = hw->phy.autoneg_advertised; + if (adapter->fc_autoneg) + hw->fc.requested_mode = e1000_fc_default; + } else { + u32 speed = ethtool_cmd_speed(ecmd); + if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) { + clear_bit(__E1000_RESETTING, &adapter->state); + return -EINVAL; + } + } + + /* reset the link */ + + if (netif_running(adapter->netdev)) { + e1000e_down(adapter); + e1000e_up(adapter); + } else { + e1000e_reset(adapter); + } + + clear_bit(__E1000_RESETTING, &adapter->state); + return 0; +} + +static void e1000_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc.current_mode == e1000_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->fc.current_mode == e1000_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->fc.current_mode == e1000_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int e1000_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int retval = 0; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + hw->fc.requested_mode = e1000_fc_default; + if (netif_running(adapter->netdev)) { + e1000e_down(adapter); + e1000e_up(adapter); + } else { + e1000e_reset(adapter); + } + } else { + if (pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = e1000_fc_full; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = e1000_fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = e1000_fc_tx_pause; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = e1000_fc_none; + + hw->fc.current_mode = hw->fc.requested_mode; + + if (hw->phy.media_type == e1000_media_type_fiber) { + retval = hw->mac.ops.setup_link(hw); + /* implicit goto out */ + } else { + retval = e1000e_force_mac_fc(hw); + if (retval) + goto out; + e1000e_set_fc_watermarks(hw); + } + } + +out: + clear_bit(__E1000_RESETTING, &adapter->state); + return retval; +} + +static u32 e1000_get_rx_csum(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + return adapter->flags & FLAG_RX_CSUM_ENABLED; +} + +static int e1000_set_rx_csum(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (data) + adapter->flags |= FLAG_RX_CSUM_ENABLED; + else + adapter->flags &= ~FLAG_RX_CSUM_ENABLED; + + if (netif_running(netdev)) + e1000e_reinit_locked(adapter); + else + e1000e_reset(adapter); + return 0; +} + +static u32 e1000_get_tx_csum(struct net_device *netdev) +{ + return (netdev->features & NETIF_F_HW_CSUM) != 0; +} + +static int e1000_set_tx_csum(struct net_device *netdev, u32 data) +{ + if (data) + netdev->features |= NETIF_F_HW_CSUM; + else + netdev->features &= ~NETIF_F_HW_CSUM; + + return 0; +} + +static int e1000_set_tso(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (data) { + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + } else { + netdev->features &= ~NETIF_F_TSO; + netdev->features &= ~NETIF_F_TSO6; + } + + adapter->flags |= FLAG_TSO_FORCE; + return 0; +} + +static u32 e1000_get_msglevel(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void e1000_set_msglevel(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int e1000_get_regs_len(struct net_device *netdev) +{ +#define E1000_REGS_LEN 32 /* overestimate */ + return E1000_REGS_LEN * sizeof(u32); +} + +static void e1000_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u16 phy_data; + + memset(p, 0, E1000_REGS_LEN * sizeof(u32)); + + regs->version = (1 << 24) | (adapter->pdev->revision << 16) | + adapter->pdev->device; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN); + regs_buff[4] = er32(RDH); + regs_buff[5] = er32(RDT); + regs_buff[6] = er32(RDTR); + + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDLEN); + regs_buff[9] = er32(TDH); + regs_buff[10] = er32(TDT); + regs_buff[11] = er32(TIDV); + + regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */ + + /* ethtool doesn't use anything past this point, so all this + * code is likely legacy junk for apps that may or may not + * exist */ + if (hw->phy.type == e1000_phy_m88) { + e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + regs_buff[17] = (u32)phy_data; /* extended 10bt distance */ + regs_buff[18] = regs_buff[13]; /* cable polarity */ + regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[20] = regs_buff[17]; /* polarity correction */ + /* phy receive errors */ + regs_buff[22] = adapter->phy_stats.receive_errors; + regs_buff[23] = regs_buff[13]; /* mdix mode */ + } + regs_buff[21] = 0; /* was idle_errors */ + e1e_rphy(hw, PHY_1000T_STATUS, &phy_data); + regs_buff[24] = (u32)phy_data; /* phy local receiver status */ + regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ +} + +static int e1000_get_eeprom_len(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + return adapter->hw.nvm.word_size * 2; +} + +static int e1000_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word; + int last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = adapter->pdev->vendor | (adapter->pdev->device << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc(sizeof(u16) * + (last_word - first_word + 1), GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + ret_val = e1000_read_nvm(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + } else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = e1000_read_nvm(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + if (ret_val) { + /* a read error occurred, throw away the result */ + memset(eeprom_buff, 0xff, sizeof(u16) * + (last_word - first_word + 1)); + } else { + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + } + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int e1000_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len; + int first_word; + int last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (eeprom->magic != (adapter->pdev->vendor | (adapter->pdev->device << 16))) + return -EFAULT; + + if (adapter->flags & FLAG_READ_ONLY_NVM) + return -EINVAL; + + max_len = hw->nvm.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word */ + /* only the second byte of the word is being modified */ + ret_val = e1000_read_nvm(hw, first_word, 1, &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) + /* need read/modify/write of last changed EEPROM word */ + /* only the first byte of the word is being modified */ + ret_val = e1000_read_nvm(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + + if (ret_val) + goto out; + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); + + ret_val = e1000_write_nvm(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + if (ret_val) + goto out; + + /* + * Update the checksum over the first part of the EEPROM if needed + * and flush shadow RAM for applicable controllers + */ + if ((first_word <= NVM_CHECKSUM_REG) || + (hw->mac.type == e1000_82583) || + (hw->mac.type == e1000_82574) || + (hw->mac.type == e1000_82573)) + ret_val = e1000e_update_nvm_checksum(hw); + +out: + kfree(eeprom_buff); + return ret_val; +} + +static void e1000_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + char firmware_version[32]; + + strncpy(drvinfo->driver, e1000e_driver_name, + sizeof(drvinfo->driver) - 1); + strncpy(drvinfo->version, e1000e_driver_version, + sizeof(drvinfo->version) - 1); + + /* + * EEPROM image version # is reported as firmware version # for + * PCI-E controllers + */ + snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d", + (adapter->eeprom_vers & 0xF000) >> 12, + (adapter->eeprom_vers & 0x0FF0) >> 4, + (adapter->eeprom_vers & 0x000F)); + + strncpy(drvinfo->fw_version, firmware_version, + sizeof(drvinfo->fw_version) - 1); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info) - 1); + drvinfo->regdump_len = e1000_get_regs_len(netdev); + drvinfo->eedump_len = e1000_get_eeprom_len(netdev); +} + +static void e1000_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_ring *rx_ring = adapter->rx_ring; + + ring->rx_max_pending = E1000_MAX_RXD; + ring->tx_max_pending = E1000_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = rx_ring->count; + ring->tx_pending = tx_ring->count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int e1000_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_ring *tx_ring, *tx_old; + struct e1000_ring *rx_ring, *rx_old; + int err; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (netif_running(adapter->netdev)) + e1000e_down(adapter); + + tx_old = adapter->tx_ring; + rx_old = adapter->rx_ring; + + err = -ENOMEM; + tx_ring = kmemdup(tx_old, sizeof(struct e1000_ring), GFP_KERNEL); + if (!tx_ring) + goto err_alloc_tx; + + rx_ring = kmemdup(rx_old, sizeof(struct e1000_ring), GFP_KERNEL); + if (!rx_ring) + goto err_alloc_rx; + + adapter->tx_ring = tx_ring; + adapter->rx_ring = rx_ring; + + rx_ring->count = max(ring->rx_pending, (u32)E1000_MIN_RXD); + rx_ring->count = min(rx_ring->count, (u32)(E1000_MAX_RXD)); + rx_ring->count = ALIGN(rx_ring->count, REQ_RX_DESCRIPTOR_MULTIPLE); + + tx_ring->count = max(ring->tx_pending, (u32)E1000_MIN_TXD); + tx_ring->count = min(tx_ring->count, (u32)(E1000_MAX_TXD)); + tx_ring->count = ALIGN(tx_ring->count, REQ_TX_DESCRIPTOR_MULTIPLE); + + if (netif_running(adapter->netdev)) { + /* Try to get new resources before deleting old */ + err = e1000e_setup_rx_resources(adapter); + if (err) + goto err_setup_rx; + err = e1000e_setup_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* + * restore the old in order to free it, + * then add in the new + */ + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + e1000e_free_rx_resources(adapter); + e1000e_free_tx_resources(adapter); + kfree(tx_old); + kfree(rx_old); + adapter->rx_ring = rx_ring; + adapter->tx_ring = tx_ring; + err = e1000e_up(adapter); + if (err) + goto err_setup; + } + + clear_bit(__E1000_RESETTING, &adapter->state); + return 0; +err_setup_tx: + e1000e_free_rx_resources(adapter); +err_setup_rx: + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + kfree(rx_ring); +err_alloc_rx: + kfree(tx_ring); +err_alloc_tx: + e1000e_up(adapter); +err_setup: + clear_bit(__E1000_RESETTING, &adapter->state); + return err; +} + +static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, + int reg, int offset, u32 mask, u32 write) +{ + u32 pat, val; + static const u32 test[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; + for (pat = 0; pat < ARRAY_SIZE(test); pat++) { + E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset, + (test[pat] & write)); + val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset); + if (val != (test[pat] & write & mask)) { + e_err("pattern test reg %04X failed: got 0x%08X " + "expected 0x%08X\n", reg + offset, val, + (test[pat] & write & mask)); + *data = reg; + return 1; + } + } + return 0; +} + +static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, + int reg, u32 mask, u32 write) +{ + u32 val; + __ew32(&adapter->hw, reg, write & mask); + val = __er32(&adapter->hw, reg); + if ((write & mask) != (val & mask)) { + e_err("set/check reg %04X test failed: got 0x%08X " + "expected 0x%08X\n", reg, (val & mask), (write & mask)); + *data = reg; + return 1; + } + return 0; +} +#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \ + do { \ + if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \ + return 1; \ + } while (0) +#define REG_PATTERN_TEST(reg, mask, write) \ + REG_PATTERN_TEST_ARRAY(reg, 0, mask, write) + +#define REG_SET_AND_CHECK(reg, mask, write) \ + do { \ + if (reg_set_and_check(adapter, data, reg, mask, write)) \ + return 1; \ + } while (0) + +static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &adapter->hw.mac; + u32 value; + u32 before; + u32 after; + u32 i; + u32 toggle; + u32 mask; + + /* + * The status register is Read Only, so a write should fail. + * Some bits that get toggled are ignored. + */ + switch (mac->type) { + /* there are several bits on newer hardware that are r/w */ + case e1000_82571: + case e1000_82572: + case e1000_80003es2lan: + toggle = 0x7FFFF3FF; + break; + default: + toggle = 0x7FFFF033; + break; + } + + before = er32(STATUS); + value = (er32(STATUS) & toggle); + ew32(STATUS, toggle); + after = er32(STATUS) & toggle; + if (value != after) { + e_err("failed STATUS register test got: 0x%08X expected: " + "0x%08X\n", after, value); + *data = 1; + return 1; + } + /* restore previous status */ + ew32(STATUS, before); + + if (!(adapter->flags & FLAG_IS_ICH)) { + REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_VET, 0x0000FFFF, 0xFFFFFFFF); + } + + REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_RDLEN, 0x000FFF80, 0x000FFFFF); + REG_PATTERN_TEST(E1000_RDH, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_RDT, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8); + REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF); + REG_PATTERN_TEST(E1000_TDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_TDLEN, 0x000FFF80, 0x000FFFFF); + + REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000); + + before = ((adapter->flags & FLAG_IS_ICH) ? 0x06C3B33E : 0x06DFB3FE); + REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB); + REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000); + + REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + if (!(adapter->flags & FLAG_IS_ICH)) + REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF); + mask = 0x8003FFFF; + switch (mac->type) { + case e1000_ich10lan: + case e1000_pchlan: + case e1000_pch2lan: + mask |= (1 << 18); + break; + default: + break; + } + for (i = 0; i < mac->rar_entry_count; i++) + REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), + mask, 0xFFFFFFFF); + + for (i = 0; i < mac->mta_reg_count; i++) + REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF); + + *data = 0; + return 0; +} + +static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) +{ + u16 temp; + u16 checksum = 0; + u16 i; + + *data = 0; + /* Read and add up the contents of the EEPROM */ + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) { + *data = 1; + return *data; + } + checksum += temp; + } + + /* If Checksum is not Correct return error else test passed */ + if ((checksum != (u16) NVM_SUM) && !(*data)) + *data = 2; + + return *data; +} + +static irqreturn_t e1000_test_intr(int irq, void *data) +{ + struct net_device *netdev = (struct net_device *) data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + adapter->test_icr |= er32(ICR); + + return IRQ_HANDLED; +} + +static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 mask; + u32 shared_int = 1; + u32 irq = adapter->pdev->irq; + int i; + int ret_val = 0; + int int_mode = E1000E_INT_MODE_LEGACY; + + *data = 0; + + /* NOTE: we don't test MSI/MSI-X interrupts here, yet */ + if (adapter->int_mode == E1000E_INT_MODE_MSIX) { + int_mode = adapter->int_mode; + e1000e_reset_interrupt_capability(adapter); + adapter->int_mode = E1000E_INT_MODE_LEGACY; + e1000e_set_interrupt_capability(adapter); + } + /* Hook up test interrupt handler just for this test */ + if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, + netdev)) { + shared_int = 0; + } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, + netdev->name, netdev)) { + *data = 1; + ret_val = -1; + goto out; + } + e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared")); + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + e1e_flush(); + usleep_range(10000, 20000); + + /* Test each interrupt */ + for (i = 0; i < 10; i++) { + /* Interrupt to test */ + mask = 1 << i; + + if (adapter->flags & FLAG_IS_ICH) { + switch (mask) { + case E1000_ICR_RXSEQ: + continue; + case 0x00000100: + if (adapter->hw.mac.type == e1000_ich8lan || + adapter->hw.mac.type == e1000_ich9lan) + continue; + break; + default: + break; + } + } + + if (!shared_int) { + /* + * Disable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, mask); + ew32(ICS, mask); + e1e_flush(); + usleep_range(10000, 20000); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* + * Enable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was not posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMS, mask); + ew32(ICS, mask); + e1e_flush(); + usleep_range(10000, 20000); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* + * Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, ~mask & 0x00007FFF); + ew32(ICS, ~mask & 0x00007FFF); + e1e_flush(); + usleep_range(10000, 20000); + + if (adapter->test_icr) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + e1e_flush(); + usleep_range(10000, 20000); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + +out: + if (int_mode == E1000E_INT_MODE_MSIX) { + e1000e_reset_interrupt_capability(adapter); + adapter->int_mode = int_mode; + e1000e_set_interrupt_capability(adapter); + } + + return ret_val; +} + +static void e1000_free_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = &adapter->test_tx_ring; + struct e1000_ring *rx_ring = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + int i; + + if (tx_ring->desc && tx_ring->buffer_info) { + for (i = 0; i < tx_ring->count; i++) { + if (tx_ring->buffer_info[i].dma) + dma_unmap_single(&pdev->dev, + tx_ring->buffer_info[i].dma, + tx_ring->buffer_info[i].length, + DMA_TO_DEVICE); + if (tx_ring->buffer_info[i].skb) + dev_kfree_skb(tx_ring->buffer_info[i].skb); + } + } + + if (rx_ring->desc && rx_ring->buffer_info) { + for (i = 0; i < rx_ring->count; i++) { + if (rx_ring->buffer_info[i].dma) + dma_unmap_single(&pdev->dev, + rx_ring->buffer_info[i].dma, + 2048, DMA_FROM_DEVICE); + if (rx_ring->buffer_info[i].skb) + dev_kfree_skb(rx_ring->buffer_info[i].skb); + } + } + + if (tx_ring->desc) { + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + tx_ring->desc = NULL; + } + if (rx_ring->desc) { + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + rx_ring->desc = NULL; + } + + kfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + kfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; +} + +static int e1000_setup_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = &adapter->test_tx_ring; + struct e1000_ring *rx_ring = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + int i; + int ret_val; + + /* Setup Tx descriptor ring and Tx buffers */ + + if (!tx_ring->count) + tx_ring->count = E1000_DEFAULT_TXD; + + tx_ring->buffer_info = kcalloc(tx_ring->count, + sizeof(struct e1000_buffer), + GFP_KERNEL); + if (!(tx_ring->buffer_info)) { + ret_val = 1; + goto err_nomem; + } + + tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) { + ret_val = 2; + goto err_nomem; + } + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + ew32(TDBAL, ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); + ew32(TDBAH, ((u64) tx_ring->dma >> 32)); + ew32(TDLEN, tx_ring->count * sizeof(struct e1000_tx_desc)); + ew32(TDH, 0); + ew32(TDT, 0); + ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR | + E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | + E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT); + + for (i = 0; i < tx_ring->count; i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); + struct sk_buff *skb; + unsigned int skb_size = 1024; + + skb = alloc_skb(skb_size, GFP_KERNEL); + if (!skb) { + ret_val = 3; + goto err_nomem; + } + skb_put(skb, skb_size); + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[i].length = skb->len; + tx_ring->buffer_info[i].dma = + dma_map_single(&pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, + tx_ring->buffer_info[i].dma)) { + ret_val = 4; + goto err_nomem; + } + tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma); + tx_desc->lower.data = cpu_to_le32(skb->len); + tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | + E1000_TXD_CMD_IFCS | + E1000_TXD_CMD_RS); + tx_desc->upper.data = 0; + } + + /* Setup Rx descriptor ring and Rx buffers */ + + if (!rx_ring->count) + rx_ring->count = E1000_DEFAULT_RXD; + + rx_ring->buffer_info = kcalloc(rx_ring->count, + sizeof(struct e1000_buffer), + GFP_KERNEL); + if (!(rx_ring->buffer_info)) { + ret_val = 5; + goto err_nomem; + } + + rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc); + rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) { + ret_val = 6; + goto err_nomem; + } + rx_ring->next_to_use = 0; + rx_ring->next_to_clean = 0; + + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF)); + ew32(RDBAH, ((u64) rx_ring->dma >> 32)); + ew32(RDLEN, rx_ring->size); + ew32(RDH, 0); + ew32(RDT, 0); + rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | + E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE | + E1000_RCTL_SBP | E1000_RCTL_SECRC | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + ew32(RCTL, rctl); + + for (i = 0; i < rx_ring->count; i++) { + struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); + struct sk_buff *skb; + + skb = alloc_skb(2048 + NET_IP_ALIGN, GFP_KERNEL); + if (!skb) { + ret_val = 7; + goto err_nomem; + } + skb_reserve(skb, NET_IP_ALIGN); + rx_ring->buffer_info[i].skb = skb; + rx_ring->buffer_info[i].dma = + dma_map_single(&pdev->dev, skb->data, 2048, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, + rx_ring->buffer_info[i].dma)) { + ret_val = 8; + goto err_nomem; + } + rx_desc->buffer_addr = + cpu_to_le64(rx_ring->buffer_info[i].dma); + memset(skb->data, 0x00, skb->len); + } + + return 0; + +err_nomem: + e1000_free_desc_rings(adapter); + return ret_val; +} + +static void e1000_phy_disable_receiver(struct e1000_adapter *adapter) +{ + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1e_wphy(&adapter->hw, 29, 0x001F); + e1e_wphy(&adapter->hw, 30, 0x8FFC); + e1e_wphy(&adapter->hw, 29, 0x001A); + e1e_wphy(&adapter->hw, 30, 0x8FF0); +} + +static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg = 0; + u16 phy_reg = 0; + s32 ret_val = 0; + + hw->mac.autoneg = 0; + + if (hw->phy.type == e1000_phy_ife) { + /* force 100, set loopback */ + e1e_wphy(hw, PHY_CONTROL, 0x6100); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = er32(CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_100 |/* Force Speed to 100 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + ew32(CTRL, ctrl_reg); + e1e_flush(); + udelay(500); + + return 0; + } + + /* Specific PHY configuration for loopback */ + switch (hw->phy.type) { + case e1000_phy_m88: + /* Auto-MDI/MDIX Off */ + e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ + e1e_wphy(hw, PHY_CONTROL, 0x9140); + /* autoneg off */ + e1e_wphy(hw, PHY_CONTROL, 0x8140); + break; + case e1000_phy_gg82563: + e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC); + break; + case e1000_phy_bm: + /* Set Default MAC Interface speed to 1GB */ + e1e_rphy(hw, PHY_REG(2, 21), &phy_reg); + phy_reg &= ~0x0007; + phy_reg |= 0x006; + e1e_wphy(hw, PHY_REG(2, 21), phy_reg); + /* Assert SW reset for above settings to take effect */ + e1000e_commit_phy(hw); + mdelay(1); + /* Force Full Duplex */ + e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); + e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C); + /* Set Link Up (in force link) */ + e1e_rphy(hw, PHY_REG(776, 16), &phy_reg); + e1e_wphy(hw, PHY_REG(776, 16), phy_reg | 0x0040); + /* Force Link */ + e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); + e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x0040); + /* Set Early Link Enable */ + e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); + e1e_wphy(hw, PHY_REG(769, 20), phy_reg | 0x0400); + break; + case e1000_phy_82577: + case e1000_phy_82578: + /* Workaround: K1 must be disabled for stable 1Gbps operation */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + e_err("Cannot setup 1Gbps loopback.\n"); + return ret_val; + } + e1000_configure_k1_ich8lan(hw, false); + hw->phy.ops.release(hw); + break; + case e1000_phy_82579: + /* Disable PHY energy detect power down */ + e1e_rphy(hw, PHY_REG(0, 21), &phy_reg); + e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~(1 << 3)); + /* Disable full chip energy detect */ + e1e_rphy(hw, PHY_REG(776, 18), &phy_reg); + e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1); + /* Enable loopback on the PHY */ +#define I82577_PHY_LBK_CTRL 19 + e1e_wphy(hw, I82577_PHY_LBK_CTRL, 0x8001); + break; + default: + break; + } + + /* force 1000, set loopback */ + e1e_wphy(hw, PHY_CONTROL, 0x4140); + mdelay(250); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = er32(CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + if (adapter->flags & FLAG_IS_ICH) + ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */ + + if (hw->phy.media_type == e1000_media_type_copper && + hw->phy.type == e1000_phy_m88) { + ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ + } else { + /* + * Set the ILOS bit on the fiber Nic if half duplex link is + * detected. + */ + if ((er32(STATUS) & E1000_STATUS_FD) == 0) + ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); + } + + ew32(CTRL, ctrl_reg); + + /* + * Disable the receiver on the PHY so when a cable is plugged in, the + * PHY does not begin to autoneg when a cable is reconnected to the NIC. + */ + if (hw->phy.type == e1000_phy_m88) + e1000_phy_disable_receiver(adapter); + + udelay(500); + + return 0; +} + +static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl = er32(CTRL); + int link = 0; + + /* special requirements for 82571/82572 fiber adapters */ + + /* + * jump through hoops to make sure link is up because serdes + * link is hardwired up + */ + ctrl |= E1000_CTRL_SLU; + ew32(CTRL, ctrl); + + /* disable autoneg */ + ctrl = er32(TXCW); + ctrl &= ~(1 << 31); + ew32(TXCW, ctrl); + + link = (er32(STATUS) & E1000_STATUS_LU); + + if (!link) { + /* set invert loss of signal */ + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_ILOS; + ew32(CTRL, ctrl); + } + + /* + * special write to serdes control register to enable SerDes analog + * loopback + */ +#define E1000_SERDES_LB_ON 0x410 + ew32(SCTL, E1000_SERDES_LB_ON); + e1e_flush(); + usleep_range(10000, 20000); + + return 0; +} + +/* only call this for fiber/serdes connections to es2lan */ +static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrlext = er32(CTRL_EXT); + u32 ctrl = er32(CTRL); + + /* + * save CTRL_EXT to restore later, reuse an empty variable (unused + * on mac_type 80003es2lan) + */ + adapter->tx_fifo_head = ctrlext; + + /* clear the serdes mode bits, putting the device into mac loopback */ + ctrlext &= ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + ew32(CTRL_EXT, ctrlext); + + /* force speed to 1000/FD, link up */ + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | + E1000_CTRL_SPD_1000 | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* set mac loopback */ + ctrl = er32(RCTL); + ctrl |= E1000_RCTL_LBM_MAC; + ew32(RCTL, ctrl); + + /* set testing mode parameters (no need to reset later) */ +#define KMRNCTRLSTA_OPMODE (0x1F << 16) +#define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582 + ew32(KMRNCTRLSTA, + (KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII)); + + return 0; +} + +static int e1000_setup_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) { + switch (hw->mac.type) { + case e1000_80003es2lan: + return e1000_set_es2lan_mac_loopback(adapter); + break; + case e1000_82571: + case e1000_82572: + return e1000_set_82571_fiber_loopback(adapter); + break; + default: + rctl = er32(RCTL); + rctl |= E1000_RCTL_LBM_TCVR; + ew32(RCTL, rctl); + return 0; + } + } else if (hw->phy.media_type == e1000_media_type_copper) { + return e1000_integrated_phy_loopback(adapter); + } + + return 7; +} + +static void e1000_loopback_cleanup(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + u16 phy_reg; + + rctl = er32(RCTL); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + ew32(RCTL, rctl); + + switch (hw->mac.type) { + case e1000_80003es2lan: + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) { + /* restore CTRL_EXT, stealing space from tx_fifo_head */ + ew32(CTRL_EXT, adapter->tx_fifo_head); + adapter->tx_fifo_head = 0; + } + /* fall through */ + case e1000_82571: + case e1000_82572: + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) { +#define E1000_SERDES_LB_OFF 0x400 + ew32(SCTL, E1000_SERDES_LB_OFF); + e1e_flush(); + usleep_range(10000, 20000); + break; + } + /* Fall Through */ + default: + hw->mac.autoneg = 1; + if (hw->phy.type == e1000_phy_gg82563) + e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x180); + e1e_rphy(hw, PHY_CONTROL, &phy_reg); + if (phy_reg & MII_CR_LOOPBACK) { + phy_reg &= ~MII_CR_LOOPBACK; + e1e_wphy(hw, PHY_CONTROL, phy_reg); + e1000e_commit_phy(hw); + } + break; + } +} + +static void e1000_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size &= ~1; + memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); + memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); + memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); +} + +static int e1000_check_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + frame_size &= ~1; + if (*(skb->data + 3) == 0xFF) + if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && + (*(skb->data + frame_size / 2 + 12) == 0xAF)) + return 0; + return 13; +} + +static int e1000_run_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = &adapter->test_tx_ring; + struct e1000_ring *rx_ring = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + int i, j, k, l; + int lc; + int good_cnt; + int ret_val = 0; + unsigned long time; + + ew32(RDT, rx_ring->count - 1); + + /* + * Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rx_ring->count <= tx_ring->count) + lc = ((tx_ring->count / 64) * 2) + 1; + else + lc = ((rx_ring->count / 64) * 2) + 1; + + k = 0; + l = 0; + for (j = 0; j <= lc; j++) { /* loop count loop */ + for (i = 0; i < 64; i++) { /* send the packets */ + e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb, + 1024); + dma_sync_single_for_device(&pdev->dev, + tx_ring->buffer_info[k].dma, + tx_ring->buffer_info[k].length, + DMA_TO_DEVICE); + k++; + if (k == tx_ring->count) + k = 0; + } + ew32(TDT, k); + e1e_flush(); + msleep(200); + time = jiffies; /* set the start time for the receive */ + good_cnt = 0; + do { /* receive the sent packets */ + dma_sync_single_for_cpu(&pdev->dev, + rx_ring->buffer_info[l].dma, 2048, + DMA_FROM_DEVICE); + + ret_val = e1000_check_lbtest_frame( + rx_ring->buffer_info[l].skb, 1024); + if (!ret_val) + good_cnt++; + l++; + if (l == rx_ring->count) + l = 0; + /* + * time + 20 msecs (200 msecs on 2.4) is more than + * enough time to complete the receives, if it's + * exceeded, break and error off + */ + } while ((good_cnt < 64) && !time_after(jiffies, time + 20)); + if (good_cnt != 64) { + ret_val = 13; /* ret_val is the same as mis-compare */ + break; + } + if (jiffies >= (time + 20)) { + ret_val = 14; /* error code for time out error */ + break; + } + } /* end loop count loop */ + return ret_val; +} + +static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) +{ + /* + * PHY loopback cannot be performed if SoL/IDER + * sessions are active + */ + if (e1000_check_reset_block(&adapter->hw)) { + e_err("Cannot do PHY loopback test when SoL/IDER is active.\n"); + *data = 0; + goto out; + } + + *data = e1000_setup_desc_rings(adapter); + if (*data) + goto out; + + *data = e1000_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + + *data = e1000_run_loopback_test(adapter); + e1000_loopback_cleanup(adapter); + +err_loopback: + e1000_free_desc_rings(adapter); +out: + return *data; +} + +static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + + *data = 0; + if (hw->phy.media_type == e1000_media_type_internal_serdes) { + int i = 0; + hw->mac.serdes_has_link = false; + + /* + * On some blade server designs, link establishment + * could take as long as 2-3 minutes + */ + do { + hw->mac.ops.check_for_link(hw); + if (hw->mac.serdes_has_link) + return *data; + msleep(20); + } while (i++ < 3750); + + *data = 1; + } else { + hw->mac.ops.check_for_link(hw); + if (hw->mac.autoneg) + /* + * On some Phy/switch combinations, link establishment + * can take a few seconds more than expected. + */ + msleep(5000); + + if (!(er32(STATUS) & E1000_STATUS_LU)) + *data = 1; + } + return *data; +} + +static int e1000e_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E1000_TEST_LEN; + case ETH_SS_STATS: + return E1000_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e1000_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + u16 autoneg_advertised; + u8 forced_speed_duplex; + u8 autoneg; + bool if_running = netif_running(netdev); + + set_bit(__E1000_TESTING, &adapter->state); + + if (!if_running) { + /* Get control of and reset hardware */ + if (adapter->flags & FLAG_HAS_AMT) + e1000e_get_hw_control(adapter); + + e1000e_power_up_phy(adapter); + + adapter->hw.phy.autoneg_wait_to_complete = 1; + e1000e_reset(adapter); + adapter->hw.phy.autoneg_wait_to_complete = 0; + } + + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + /* save speed, duplex, autoneg settings */ + autoneg_advertised = adapter->hw.phy.autoneg_advertised; + forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; + autoneg = adapter->hw.mac.autoneg; + + e_info("offline testing starting\n"); + + if (if_running) + /* indicate we're in test mode */ + dev_close(netdev); + + if (e1000_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000e_reset(adapter); + if (e1000_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000e_reset(adapter); + if (e1000_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000e_reset(adapter); + if (e1000_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* force this routine to wait until autoneg complete/timeout */ + adapter->hw.phy.autoneg_wait_to_complete = 1; + e1000e_reset(adapter); + adapter->hw.phy.autoneg_wait_to_complete = 0; + + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* restore speed, duplex, autoneg settings */ + adapter->hw.phy.autoneg_advertised = autoneg_advertised; + adapter->hw.mac.forced_speed_duplex = forced_speed_duplex; + adapter->hw.mac.autoneg = autoneg; + e1000e_reset(adapter); + + clear_bit(__E1000_TESTING, &adapter->state); + if (if_running) + dev_open(netdev); + } else { + /* Online tests */ + + e_info("online testing starting\n"); + + /* register, eeprom, intr and loopback tests not run online */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + clear_bit(__E1000_TESTING, &adapter->state); + } + + if (!if_running) { + e1000e_reset(adapter); + + if (adapter->flags & FLAG_HAS_AMT) + e1000e_release_hw_control(adapter); + } + + msleep_interruptible(4 * 1000); +} + +static void e1000_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + wol->supported = 0; + wol->wolopts = 0; + + if (!(adapter->flags & FLAG_HAS_WOL) || + !device_can_wakeup(&adapter->pdev->dev)) + return; + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC | WAKE_PHY; + + /* apply any specific unsupported masks here */ + if (adapter->flags & FLAG_NO_WAKE_UCAST) { + wol->supported &= ~WAKE_UCAST; + + if (adapter->wol & E1000_WUFC_EX) + e_err("Interface does not support directed (unicast) " + "frame wake-up packets\n"); + } + + if (adapter->wol & E1000_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & E1000_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & E1000_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & E1000_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + if (adapter->wol & E1000_WUFC_LNKC) + wol->wolopts |= WAKE_PHY; +} + +static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!(adapter->flags & FLAG_HAS_WOL) || + !device_can_wakeup(&adapter->pdev->dev) || + (wol->wolopts & ~(WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | + WAKE_MAGIC | WAKE_PHY))) + return -EOPNOTSUPP; + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= E1000_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= E1000_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= E1000_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= E1000_WUFC_MAG; + if (wol->wolopts & WAKE_PHY) + adapter->wol |= E1000_WUFC_LNKC; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static int e1000_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + if (!hw->mac.ops.blink_led) + return 2; /* cycle on/off twice per second */ + + hw->mac.ops.blink_led(hw); + break; + + case ETHTOOL_ID_INACTIVE: + if (hw->phy.type == e1000_phy_ife) + e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); + hw->mac.ops.led_off(hw); + hw->mac.ops.cleanup_led(hw); + break; + + case ETHTOOL_ID_ON: + adapter->hw.mac.ops.led_on(&adapter->hw); + break; + + case ETHTOOL_ID_OFF: + adapter->hw.mac.ops.led_off(&adapter->hw); + break; + } + return 0; +} + +static int e1000_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (adapter->itr_setting <= 4) + ec->rx_coalesce_usecs = adapter->itr_setting; + else + ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting; + + return 0; +} + +static int e1000_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) || + ((ec->rx_coalesce_usecs > 4) && + (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) || + (ec->rx_coalesce_usecs == 2)) + return -EINVAL; + + if (ec->rx_coalesce_usecs == 4) { + adapter->itr = adapter->itr_setting = 4; + } else if (ec->rx_coalesce_usecs <= 3) { + adapter->itr = 20000; + adapter->itr_setting = ec->rx_coalesce_usecs; + } else { + adapter->itr = (1000000 / ec->rx_coalesce_usecs); + adapter->itr_setting = adapter->itr & ~3; + } + + if (adapter->itr_setting != 0) + ew32(ITR, 1000000000 / (adapter->itr * 256)); + else + ew32(ITR, 0); + + return 0; +} + +static int e1000_nway_reset(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!netif_running(netdev)) + return -EAGAIN; + + if (!adapter->hw.mac.autoneg) + return -EINVAL; + + e1000e_reinit_locked(adapter); + + return 0; +} + +static void e1000_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, + u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct rtnl_link_stats64 net_stats; + int i; + char *p = NULL; + + e1000e_get_stats64(netdev, &net_stats); + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { + switch (e1000_gstrings_stats[i].type) { + case NETDEV_STATS: + p = (char *) &net_stats + + e1000_gstrings_stats[i].stat_offset; + break; + case E1000_STATS: + p = (char *) adapter + + e1000_gstrings_stats[i].stat_offset; + break; + default: + data[i] = 0; + continue; + } + + data[i] = (e1000_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } +} + +static void e1000_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test)); + break; + case ETH_SS_STATS: + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { + memcpy(p, e1000_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static int e1000e_set_flags(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + bool need_reset = false; + int rc; + + need_reset = (data & ETH_FLAG_RXVLAN) != + (netdev->features & NETIF_F_HW_VLAN_RX); + + rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_RXVLAN | + ETH_FLAG_TXVLAN); + + if (rc) + return rc; + + if (need_reset) { + if (netif_running(netdev)) + e1000e_reinit_locked(adapter); + else + e1000e_reset(adapter); + } + + return 0; +} + +static const struct ethtool_ops e1000_ethtool_ops = { + .get_settings = e1000_get_settings, + .set_settings = e1000_set_settings, + .get_drvinfo = e1000_get_drvinfo, + .get_regs_len = e1000_get_regs_len, + .get_regs = e1000_get_regs, + .get_wol = e1000_get_wol, + .set_wol = e1000_set_wol, + .get_msglevel = e1000_get_msglevel, + .set_msglevel = e1000_set_msglevel, + .nway_reset = e1000_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = e1000_get_eeprom_len, + .get_eeprom = e1000_get_eeprom, + .set_eeprom = e1000_set_eeprom, + .get_ringparam = e1000_get_ringparam, + .set_ringparam = e1000_set_ringparam, + .get_pauseparam = e1000_get_pauseparam, + .set_pauseparam = e1000_set_pauseparam, + .get_rx_csum = e1000_get_rx_csum, + .set_rx_csum = e1000_set_rx_csum, + .get_tx_csum = e1000_get_tx_csum, + .set_tx_csum = e1000_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, + .get_tso = ethtool_op_get_tso, + .set_tso = e1000_set_tso, + .self_test = e1000_diag_test, + .get_strings = e1000_get_strings, + .set_phys_id = e1000_set_phys_id, + .get_ethtool_stats = e1000_get_ethtool_stats, + .get_sset_count = e1000e_get_sset_count, + .get_coalesce = e1000_get_coalesce, + .set_coalesce = e1000_set_coalesce, + .get_flags = ethtool_op_get_flags, + .set_flags = e1000e_set_flags, +}; + +void e1000e_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops); +} diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h new file mode 100644 index 0000000..2967039 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -0,0 +1,984 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_HW_H_ +#define _E1000_HW_H_ + +#include + +struct e1000_hw; +struct e1000_adapter; + +#include "defines.h" + +#define er32(reg) __er32(hw, E1000_##reg) +#define ew32(reg,val) __ew32(hw, E1000_##reg, (val)) +#define e1e_flush() er32(STATUS) + +#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) \ + (writel((value), ((a)->hw_addr + reg + ((offset) << 2)))) + +#define E1000_READ_REG_ARRAY(a, reg, offset) \ + (readl((a)->hw_addr + reg + ((offset) << 2))) + +enum e1e_registers { + E1000_CTRL = 0x00000, /* Device Control - RW */ + E1000_STATUS = 0x00008, /* Device Status - RO */ + E1000_EECD = 0x00010, /* EEPROM/Flash Control - RW */ + E1000_EERD = 0x00014, /* EEPROM Read - RW */ + E1000_CTRL_EXT = 0x00018, /* Extended Device Control - RW */ + E1000_FLA = 0x0001C, /* Flash Access - RW */ + E1000_MDIC = 0x00020, /* MDI Control - RW */ + E1000_SCTL = 0x00024, /* SerDes Control - RW */ + E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */ + E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */ + E1000_FEXTNVM4 = 0x00024, /* Future Extended NVM 4 - RW */ + E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */ + E1000_FCT = 0x00030, /* Flow Control Type - RW */ + E1000_VET = 0x00038, /* VLAN Ether Type - RW */ + E1000_ICR = 0x000C0, /* Interrupt Cause Read - R/clr */ + E1000_ITR = 0x000C4, /* Interrupt Throttling Rate - RW */ + E1000_ICS = 0x000C8, /* Interrupt Cause Set - WO */ + E1000_IMS = 0x000D0, /* Interrupt Mask Set - RW */ + E1000_IMC = 0x000D8, /* Interrupt Mask Clear - WO */ + E1000_EIAC_82574 = 0x000DC, /* Ext. Interrupt Auto Clear - RW */ + E1000_IAM = 0x000E0, /* Interrupt Acknowledge Auto Mask */ + E1000_IVAR = 0x000E4, /* Interrupt Vector Allocation - RW */ + E1000_EITR_82574_BASE = 0x000E8, /* Interrupt Throttling - RW */ +#define E1000_EITR_82574(_n) (E1000_EITR_82574_BASE + (_n << 2)) + E1000_RCTL = 0x00100, /* Rx Control - RW */ + E1000_FCTTV = 0x00170, /* Flow Control Transmit Timer Value - RW */ + E1000_TXCW = 0x00178, /* Tx Configuration Word - RW */ + E1000_RXCW = 0x00180, /* Rx Configuration Word - RO */ + E1000_TCTL = 0x00400, /* Tx Control - RW */ + E1000_TCTL_EXT = 0x00404, /* Extended Tx Control - RW */ + E1000_TIPG = 0x00410, /* Tx Inter-packet gap -RW */ + E1000_AIT = 0x00458, /* Adaptive Interframe Spacing Throttle -RW */ + E1000_LEDCTL = 0x00E00, /* LED Control - RW */ + E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */ + E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */ + E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */ +#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */ + E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */ + E1000_PBS = 0x01008, /* Packet Buffer Size */ + E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */ + E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */ + E1000_FLOP = 0x0103C, /* FLASH Opcode Register */ + E1000_PBA_ECC = 0x01100, /* PBA ECC Register */ + E1000_ERT = 0x02008, /* Early Rx Threshold - RW */ + E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */ + E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */ + E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */ + E1000_RDBAL = 0x02800, /* Rx Descriptor Base Address Low - RW */ + E1000_RDBAH = 0x02804, /* Rx Descriptor Base Address High - RW */ + E1000_RDLEN = 0x02808, /* Rx Descriptor Length - RW */ + E1000_RDH = 0x02810, /* Rx Descriptor Head - RW */ + E1000_RDT = 0x02818, /* Rx Descriptor Tail - RW */ + E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */ + E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */ +#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8)) + E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */ + +/* Convenience macros + * + * Note: "_n" is the queue number of the register to be written to. + * + * Example usage: + * E1000_RDBAL_REG(current_rx_queue) + * + */ +#define E1000_RDBAL_REG(_n) (E1000_RDBAL + (_n << 8)) + E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */ + E1000_TDBAL = 0x03800, /* Tx Descriptor Base Address Low - RW */ + E1000_TDBAH = 0x03804, /* Tx Descriptor Base Address High - RW */ + E1000_TDLEN = 0x03808, /* Tx Descriptor Length - RW */ + E1000_TDH = 0x03810, /* Tx Descriptor Head - RW */ + E1000_TDT = 0x03818, /* Tx Descriptor Tail - RW */ + E1000_TIDV = 0x03820, /* Tx Interrupt Delay Value - RW */ + E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */ +#define E1000_TXDCTL(_n) (E1000_TXDCTL_BASE + (_n << 8)) + E1000_TADV = 0x0382C, /* Tx Interrupt Absolute Delay Val - RW */ + E1000_TARC_BASE = 0x03840, /* Tx Arbitration Count (0) */ +#define E1000_TARC(_n) (E1000_TARC_BASE + (_n << 8)) + E1000_CRCERRS = 0x04000, /* CRC Error Count - R/clr */ + E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */ + E1000_SYMERRS = 0x04008, /* Symbol Error Count - R/clr */ + E1000_RXERRC = 0x0400C, /* Receive Error Count - R/clr */ + E1000_MPC = 0x04010, /* Missed Packet Count - R/clr */ + E1000_SCC = 0x04014, /* Single Collision Count - R/clr */ + E1000_ECOL = 0x04018, /* Excessive Collision Count - R/clr */ + E1000_MCC = 0x0401C, /* Multiple Collision Count - R/clr */ + E1000_LATECOL = 0x04020, /* Late Collision Count - R/clr */ + E1000_COLC = 0x04028, /* Collision Count - R/clr */ + E1000_DC = 0x04030, /* Defer Count - R/clr */ + E1000_TNCRS = 0x04034, /* Tx-No CRS - R/clr */ + E1000_SEC = 0x04038, /* Sequence Error Count - R/clr */ + E1000_CEXTERR = 0x0403C, /* Carrier Extension Error Count - R/clr */ + E1000_RLEC = 0x04040, /* Receive Length Error Count - R/clr */ + E1000_XONRXC = 0x04048, /* XON Rx Count - R/clr */ + E1000_XONTXC = 0x0404C, /* XON Tx Count - R/clr */ + E1000_XOFFRXC = 0x04050, /* XOFF Rx Count - R/clr */ + E1000_XOFFTXC = 0x04054, /* XOFF Tx Count - R/clr */ + E1000_FCRUC = 0x04058, /* Flow Control Rx Unsupported Count- R/clr */ + E1000_PRC64 = 0x0405C, /* Packets Rx (64 bytes) - R/clr */ + E1000_PRC127 = 0x04060, /* Packets Rx (65-127 bytes) - R/clr */ + E1000_PRC255 = 0x04064, /* Packets Rx (128-255 bytes) - R/clr */ + E1000_PRC511 = 0x04068, /* Packets Rx (255-511 bytes) - R/clr */ + E1000_PRC1023 = 0x0406C, /* Packets Rx (512-1023 bytes) - R/clr */ + E1000_PRC1522 = 0x04070, /* Packets Rx (1024-1522 bytes) - R/clr */ + E1000_GPRC = 0x04074, /* Good Packets Rx Count - R/clr */ + E1000_BPRC = 0x04078, /* Broadcast Packets Rx Count - R/clr */ + E1000_MPRC = 0x0407C, /* Multicast Packets Rx Count - R/clr */ + E1000_GPTC = 0x04080, /* Good Packets Tx Count - R/clr */ + E1000_GORCL = 0x04088, /* Good Octets Rx Count Low - R/clr */ + E1000_GORCH = 0x0408C, /* Good Octets Rx Count High - R/clr */ + E1000_GOTCL = 0x04090, /* Good Octets Tx Count Low - R/clr */ + E1000_GOTCH = 0x04094, /* Good Octets Tx Count High - R/clr */ + E1000_RNBC = 0x040A0, /* Rx No Buffers Count - R/clr */ + E1000_RUC = 0x040A4, /* Rx Undersize Count - R/clr */ + E1000_RFC = 0x040A8, /* Rx Fragment Count - R/clr */ + E1000_ROC = 0x040AC, /* Rx Oversize Count - R/clr */ + E1000_RJC = 0x040B0, /* Rx Jabber Count - R/clr */ + E1000_MGTPRC = 0x040B4, /* Management Packets Rx Count - R/clr */ + E1000_MGTPDC = 0x040B8, /* Management Packets Dropped Count - R/clr */ + E1000_MGTPTC = 0x040BC, /* Management Packets Tx Count - R/clr */ + E1000_TORL = 0x040C0, /* Total Octets Rx Low - R/clr */ + E1000_TORH = 0x040C4, /* Total Octets Rx High - R/clr */ + E1000_TOTL = 0x040C8, /* Total Octets Tx Low - R/clr */ + E1000_TOTH = 0x040CC, /* Total Octets Tx High - R/clr */ + E1000_TPR = 0x040D0, /* Total Packets Rx - R/clr */ + E1000_TPT = 0x040D4, /* Total Packets Tx - R/clr */ + E1000_PTC64 = 0x040D8, /* Packets Tx (64 bytes) - R/clr */ + E1000_PTC127 = 0x040DC, /* Packets Tx (65-127 bytes) - R/clr */ + E1000_PTC255 = 0x040E0, /* Packets Tx (128-255 bytes) - R/clr */ + E1000_PTC511 = 0x040E4, /* Packets Tx (256-511 bytes) - R/clr */ + E1000_PTC1023 = 0x040E8, /* Packets Tx (512-1023 bytes) - R/clr */ + E1000_PTC1522 = 0x040EC, /* Packets Tx (1024-1522 Bytes) - R/clr */ + E1000_MPTC = 0x040F0, /* Multicast Packets Tx Count - R/clr */ + E1000_BPTC = 0x040F4, /* Broadcast Packets Tx Count - R/clr */ + E1000_TSCTC = 0x040F8, /* TCP Segmentation Context Tx - R/clr */ + E1000_TSCTFC = 0x040FC, /* TCP Segmentation Context Tx Fail - R/clr */ + E1000_IAC = 0x04100, /* Interrupt Assertion Count */ + E1000_ICRXPTC = 0x04104, /* Irq Cause Rx Packet Timer Expire Count */ + E1000_ICRXATC = 0x04108, /* Irq Cause Rx Abs Timer Expire Count */ + E1000_ICTXPTC = 0x0410C, /* Irq Cause Tx Packet Timer Expire Count */ + E1000_ICTXATC = 0x04110, /* Irq Cause Tx Abs Timer Expire Count */ + E1000_ICTXQEC = 0x04118, /* Irq Cause Tx Queue Empty Count */ + E1000_ICTXQMTC = 0x0411C, /* Irq Cause Tx Queue MinThreshold Count */ + E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */ + E1000_ICRXOC = 0x04124, /* Irq Cause Receiver Overrun Count */ + E1000_RXCSUM = 0x05000, /* Rx Checksum Control - RW */ + E1000_RFCTL = 0x05008, /* Receive Filter Control */ + E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */ + E1000_RAL_BASE = 0x05400, /* Receive Address Low - RW */ +#define E1000_RAL(_n) (E1000_RAL_BASE + ((_n) * 8)) +#define E1000_RA (E1000_RAL(0)) + E1000_RAH_BASE = 0x05404, /* Receive Address High - RW */ +#define E1000_RAH(_n) (E1000_RAH_BASE + ((_n) * 8)) + E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */ + E1000_WUC = 0x05800, /* Wakeup Control - RW */ + E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */ + E1000_WUS = 0x05810, /* Wakeup Status - RO */ + E1000_MANC = 0x05820, /* Management Control - RW */ + E1000_FFLT = 0x05F00, /* Flexible Filter Length Table - RW Array */ + E1000_HOST_IF = 0x08800, /* Host Interface */ + + E1000_KMRNCTRLSTA = 0x00034, /* MAC-PHY interface - RW */ + E1000_MANC2H = 0x05860, /* Management Control To Host - RW */ + E1000_MDEF_BASE = 0x05890, /* Management Decision Filters */ +#define E1000_MDEF(_n) (E1000_MDEF_BASE + ((_n) * 4)) + E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */ + E1000_GCR = 0x05B00, /* PCI-Ex Control */ + E1000_GCR2 = 0x05B64, /* PCI-Ex Control #2 */ + E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */ + E1000_SWSM = 0x05B50, /* SW Semaphore */ + E1000_FWSM = 0x05B54, /* FW Semaphore */ + E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */ + E1000_FFLT_DBG = 0x05F04, /* Debug Register */ + E1000_PCH_RAICC_BASE = 0x05F50, /* Receive Address Initial CRC */ +#define E1000_PCH_RAICC(_n) (E1000_PCH_RAICC_BASE + ((_n) * 4)) +#define E1000_CRC_OFFSET E1000_PCH_RAICC_BASE + E1000_HICR = 0x08F00, /* Host Interface Control */ +}; + +#define E1000_MAX_PHY_ADDR 4 + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ +#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */ +#define IGP_PAGE_SHIFT 5 +#define PHY_REG_MASK 0x1F + +#define BM_WUC_PAGE 800 +#define BM_WUC_ADDRESS_OPCODE 0x11 +#define BM_WUC_DATA_OPCODE 0x12 +#define BM_WUC_ENABLE_PAGE 769 +#define BM_WUC_ENABLE_REG 17 +#define BM_WUC_ENABLE_BIT (1 << 2) +#define BM_WUC_HOST_WU_BIT (1 << 4) +#define BM_WUC_ME_WU_BIT (1 << 5) + +#define BM_WUC PHY_REG(BM_WUC_PAGE, 1) +#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2) +#define BM_WUS PHY_REG(BM_WUC_PAGE, 3) + +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 + +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ + +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 + +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ + +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 + +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 + +#define IGP02E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 + +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F +#define IGP02E1000_AGC_RANGE 15 + +/* manage.c */ +#define E1000_VFTA_ENTRY_SHIFT 5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +#define E1000_HICR_EN 0x01 /* Enable bit - RO */ +/* Driver sets this bit when done to put command in RAM */ +#define E1000_HICR_C 0x02 +#define E1000_HICR_FW_RESET_ENABLE 0x40 +#define E1000_HICR_FW_RESET 0x80 + +#define E1000_FWSM_MODE_MASK 0xE +#define E1000_FWSM_MODE_SHIFT 1 + +#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1 +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 + +/* nvm.c */ +#define E1000_STM_OPCODE 0xDB00 + +#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000 +#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16 +#define E1000_KMRNCTRLSTA_REN 0x00200000 +#define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */ +#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ +#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */ +#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */ +#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */ +#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ +#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7 +#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002 +#define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */ + +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */ +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */ + +/* IFE PHY Extended Status Control */ +#define IFE_PESC_POLARITY_REVERSED 0x0100 + +/* IFE PHY Special Control */ +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 +#define IFE_PSC_FORCE_POLARITY 0x0020 + +/* IFE PHY Special Control and LED Control */ +#define IFE_PSCL_PROBE_MODE 0x0020 +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ + +/* IFE PHY MDIX Control */ +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */ +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */ + +#define E1000_CABLE_LENGTH_UNDEFINED 0xFF + +#define E1000_DEV_ID_82571EB_COPPER 0x105E +#define E1000_DEV_ID_82571EB_FIBER 0x105F +#define E1000_DEV_ID_82571EB_SERDES 0x1060 +#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4 +#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5 +#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5 +#define E1000_DEV_ID_82571EB_QUAD_COPPER_LP 0x10BC +#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9 +#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA +#define E1000_DEV_ID_82572EI_COPPER 0x107D +#define E1000_DEV_ID_82572EI_FIBER 0x107E +#define E1000_DEV_ID_82572EI_SERDES 0x107F +#define E1000_DEV_ID_82572EI 0x10B9 +#define E1000_DEV_ID_82573E 0x108B +#define E1000_DEV_ID_82573E_IAMT 0x108C +#define E1000_DEV_ID_82573L 0x109A +#define E1000_DEV_ID_82574L 0x10D3 +#define E1000_DEV_ID_82574LA 0x10F6 +#define E1000_DEV_ID_82583V 0x150C + +#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096 +#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098 +#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA +#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB + +#define E1000_DEV_ID_ICH8_82567V_3 0x1501 +#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049 +#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A +#define E1000_DEV_ID_ICH8_IGP_C 0x104B +#define E1000_DEV_ID_ICH8_IFE 0x104C +#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4 +#define E1000_DEV_ID_ICH8_IFE_G 0x10C5 +#define E1000_DEV_ID_ICH8_IGP_M 0x104D +#define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD +#define E1000_DEV_ID_ICH9_BM 0x10E5 +#define E1000_DEV_ID_ICH9_IGP_M_AMT 0x10F5 +#define E1000_DEV_ID_ICH9_IGP_M 0x10BF +#define E1000_DEV_ID_ICH9_IGP_M_V 0x10CB +#define E1000_DEV_ID_ICH9_IGP_C 0x294C +#define E1000_DEV_ID_ICH9_IFE 0x10C0 +#define E1000_DEV_ID_ICH9_IFE_GT 0x10C3 +#define E1000_DEV_ID_ICH9_IFE_G 0x10C2 +#define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC +#define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD +#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE +#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE +#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF +#define E1000_DEV_ID_ICH10_D_BM_V 0x1525 +#define E1000_DEV_ID_PCH_M_HV_LM 0x10EA +#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB +#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF +#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0 +#define E1000_DEV_ID_PCH2_LV_LM 0x1502 +#define E1000_DEV_ID_PCH2_LV_V 0x1503 + +#define E1000_REVISION_4 4 + +#define E1000_FUNC_1 1 + +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 + +enum e1000_mac_type { + e1000_82571, + e1000_82572, + e1000_82573, + e1000_82574, + e1000_82583, + e1000_80003es2lan, + e1000_ich8lan, + e1000_ich9lan, + e1000_ich10lan, + e1000_pchlan, + e1000_pch2lan, +}; + +enum e1000_media_type { + e1000_media_type_unknown = 0, + e1000_media_type_copper = 1, + e1000_media_type_fiber = 2, + e1000_media_type_internal_serdes = 3, + e1000_num_media_types +}; + +enum e1000_nvm_type { + e1000_nvm_unknown = 0, + e1000_nvm_none, + e1000_nvm_eeprom_spi, + e1000_nvm_flash_hw, + e1000_nvm_flash_sw +}; + +enum e1000_nvm_override { + e1000_nvm_override_none = 0, + e1000_nvm_override_spi_small, + e1000_nvm_override_spi_large +}; + +enum e1000_phy_type { + e1000_phy_unknown = 0, + e1000_phy_none, + e1000_phy_m88, + e1000_phy_igp, + e1000_phy_igp_2, + e1000_phy_gg82563, + e1000_phy_igp_3, + e1000_phy_ife, + e1000_phy_bm, + e1000_phy_82578, + e1000_phy_82577, + e1000_phy_82579, +}; + +enum e1000_bus_width { + e1000_bus_width_unknown = 0, + e1000_bus_width_pcie_x1, + e1000_bus_width_pcie_x2, + e1000_bus_width_pcie_x4 = 4, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +}; + +enum e1000_1000t_rx_status { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +}; + +enum e1000_rev_polarity{ + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +}; + +enum e1000_fc_mode { + e1000_fc_none = 0, + e1000_fc_rx_pause, + e1000_fc_tx_pause, + e1000_fc_full, + e1000_fc_default = 0xFF +}; + +enum e1000_ms_type { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +}; + +enum e1000_smart_speed { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +}; + +enum e1000_serdes_link_state { + e1000_serdes_link_down = 0, + e1000_serdes_link_autoneg_progress, + e1000_serdes_link_autoneg_complete, + e1000_serdes_link_forced_up +}; + +/* Receive Descriptor */ +struct e1000_rx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 length; /* Length of data DMAed into data buffer */ + __le16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + __le16 special; +}; + +/* Receive Descriptor - Extended */ +union e1000_rx_desc_extended { + struct { + __le64 buffer_addr; + __le64 reserved; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 +/* Receive Descriptor - Packet Split */ +union e1000_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + __le64 buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length0; /* length of buffer 0 */ + __le16 vlan; /* VLAN tag */ + } middle; + struct { + __le16 header_status; + __le16 length[3]; /* length of buffers 1-3 */ + } upper; + __le64 reserved; + } wb; /* writeback */ +}; + +/* Transmit Descriptor */ +struct e1000_tx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + __le16 special; + } fields; + } upper; +}; + +/* Offload Context Descriptor */ +struct e1000_context_desc { + union { + __le32 ip_config; + struct { + u8 ipcss; /* IP checksum start */ + u8 ipcso; /* IP checksum offset */ + __le16 ipcse; /* IP checksum end */ + } ip_fields; + } lower_setup; + union { + __le32 tcp_config; + struct { + u8 tucss; /* TCP checksum start */ + u8 tucso; /* TCP checksum offset */ + __le16 tucse; /* TCP checksum end */ + } tcp_fields; + } upper_setup; + __le32 cmd_and_length; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 hdr_len; /* Header length */ + __le16 mss; /* Maximum segment size */ + } fields; + } tcp_seg_setup; +}; + +/* Offload data descriptor */ +struct e1000_data_desc { + __le64 buffer_addr; /* Address of the descriptor's buffer address */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 typ_len_ext; + u8 cmd; + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 popts; /* Packet Options */ + __le16 special; /* */ + } fields; + } upper; +}; + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; +}; + +struct e1000_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; + +/* Host Interface "Rev 1" */ +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; + u8 checksum; +}; + +#define E1000_HI_MAX_DATA_LENGTH 252 +struct e1000_host_command_info { + struct e1000_host_command_header command_header; + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; +}; + +/* Host Interface "Rev 2" */ +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; +}; + +/* Function pointers and static data for the MAC. */ +struct e1000_mac_operations { + s32 (*id_led_init)(struct e1000_hw *); + s32 (*blink_led)(struct e1000_hw *); + bool (*check_mng_mode)(struct e1000_hw *); + s32 (*check_for_link)(struct e1000_hw *); + s32 (*cleanup_led)(struct e1000_hw *); + void (*clear_hw_cntrs)(struct e1000_hw *); + void (*clear_vfta)(struct e1000_hw *); + s32 (*get_bus_info)(struct e1000_hw *); + void (*set_lan_id)(struct e1000_hw *); + s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); + s32 (*led_on)(struct e1000_hw *); + s32 (*led_off)(struct e1000_hw *); + void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + s32 (*setup_link)(struct e1000_hw *); + s32 (*setup_physical_interface)(struct e1000_hw *); + s32 (*setup_led)(struct e1000_hw *); + void (*write_vfta)(struct e1000_hw *, u32, u32); + s32 (*read_mac_addr)(struct e1000_hw *); +}; + +/* + * When to use various PHY register access functions: + * + * Func Caller + * Function Does Does When to use + * ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * X_reg L,P,A n/a for simple PHY reg accesses + * X_reg_locked P,A L for multiple accesses of different regs + * on different pages + * X_reg_page A L,P for multiple accesses of different regs + * on the same page + * + * Where X=[read|write], L=locking, P=sets page, A=register access + * + */ +struct e1000_phy_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*cfg_on_link_up)(struct e1000_hw *); + s32 (*check_polarity)(struct e1000_hw *); + s32 (*check_reset_block)(struct e1000_hw *); + s32 (*commit)(struct e1000_hw *); + s32 (*force_speed_duplex)(struct e1000_hw *); + s32 (*get_cfg_done)(struct e1000_hw *hw); + s32 (*get_cable_length)(struct e1000_hw *); + s32 (*get_info)(struct e1000_hw *); + s32 (*set_page)(struct e1000_hw *, u16); + s32 (*read_reg)(struct e1000_hw *, u32, u16 *); + s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *); + s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *); + void (*release)(struct e1000_hw *); + s32 (*reset)(struct e1000_hw *); + s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); + s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); + s32 (*write_reg)(struct e1000_hw *, u32, u16); + s32 (*write_reg_locked)(struct e1000_hw *, u32, u16); + s32 (*write_reg_page)(struct e1000_hw *, u32, u16); + void (*power_up)(struct e1000_hw *); + void (*power_down)(struct e1000_hw *); +}; + +/* Function pointers for the NVM. */ +struct e1000_nvm_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*read)(struct e1000_hw *, u16, u16, u16 *); + void (*release)(struct e1000_hw *); + s32 (*update)(struct e1000_hw *); + s32 (*valid_led_default)(struct e1000_hw *, u16 *); + s32 (*validate)(struct e1000_hw *); + s32 (*write)(struct e1000_hw *, u16, u16, u16 *); +}; + +struct e1000_mac_info { + struct e1000_mac_operations ops; + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + + enum e1000_mac_type type; + + u32 collision_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; + u32 tx_packet_delta; + u32 txcw; + + u16 current_ifs_val; + u16 ifs_max_val; + u16 ifs_min_val; + u16 ifs_ratio; + u16 ifs_step_size; + u16 mta_reg_count; + + /* Maximum size of the MTA register table in all supported adapters */ + #define MAX_MTA_REG 128 + u32 mta_shadow[MAX_MTA_REG]; + u16 rar_entry_count; + + u8 forced_speed_duplex; + + bool adaptive_ifs; + bool has_fwsm; + bool arc_subsystem_valid; + bool autoneg; + bool autoneg_failed; + bool get_link_status; + bool in_ifs_mode; + bool serdes_has_link; + bool tx_pkt_filtering; + enum e1000_serdes_link_state serdes_link_state; +}; + +struct e1000_phy_info { + struct e1000_phy_operations ops; + + enum e1000_phy_type type; + + enum e1000_1000t_rx_status local_rx; + enum e1000_1000t_rx_status remote_rx; + enum e1000_ms_type ms_type; + enum e1000_ms_type original_ms_type; + enum e1000_rev_polarity cable_polarity; + enum e1000_smart_speed smart_speed; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + enum e1000_media_type media_type; + + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + + u8 mdix; + + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct e1000_nvm_info { + struct e1000_nvm_operations ops; + + enum e1000_nvm_type type; + enum e1000_nvm_override override; + + u32 flash_bank_size; + u32 flash_base_addr; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct e1000_bus_info { + enum e1000_bus_width width; + + u16 func; +}; + +struct e1000_fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + u16 refresh_time; /* Flow control refresh timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum e1000_fc_mode current_mode; /* FC mode in effect */ + enum e1000_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +struct e1000_dev_spec_82571 { + bool laa_is_present; + u32 smb_counter; +}; + +struct e1000_dev_spec_80003es2lan { + bool mdic_wa_enable; +}; + +struct e1000_shadow_ram { + u16 value; + bool modified; +}; + +#define E1000_ICH8_SHADOW_RAM_WORDS 2048 + +struct e1000_dev_spec_ich8lan { + bool kmrn_lock_loss_workaround_enabled; + struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS]; + bool nvm_k1_enabled; + bool eee_disable; +}; + +struct e1000_hw { + struct e1000_adapter *adapter; + + u8 __iomem *hw_addr; + u8 __iomem *flash_address; + + struct e1000_mac_info mac; + struct e1000_fc_info fc; + struct e1000_phy_info phy; + struct e1000_nvm_info nvm; + struct e1000_bus_info bus; + struct e1000_host_mng_dhcp_cookie mng_cookie; + + union { + struct e1000_dev_spec_82571 e82571; + struct e1000_dev_spec_80003es2lan e80003es2lan; + struct e1000_dev_spec_ich8lan ich8lan; + } dev_spec; +}; + +#endif diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c new file mode 100644 index 0000000..4e36978 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -0,0 +1,4111 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* + * 82562G 10/100 Network Connection + * 82562G-2 10/100 Network Connection + * 82562GT 10/100 Network Connection + * 82562GT-2 10/100 Network Connection + * 82562V 10/100 Network Connection + * 82562V-2 10/100 Network Connection + * 82566DC-2 Gigabit Network Connection + * 82566DC Gigabit Network Connection + * 82566DM-2 Gigabit Network Connection + * 82566DM Gigabit Network Connection + * 82566MC Gigabit Network Connection + * 82566MM Gigabit Network Connection + * 82567LM Gigabit Network Connection + * 82567LF Gigabit Network Connection + * 82567V Gigabit Network Connection + * 82567LM-2 Gigabit Network Connection + * 82567LF-2 Gigabit Network Connection + * 82567V-2 Gigabit Network Connection + * 82567LF-3 Gigabit Network Connection + * 82567LM-3 Gigabit Network Connection + * 82567LM-4 Gigabit Network Connection + * 82577LM Gigabit Network Connection + * 82577LC Gigabit Network Connection + * 82578DM Gigabit Network Connection + * 82578DC Gigabit Network Connection + * 82579LM Gigabit Network Connection + * 82579V Gigabit Network Connection + */ + +#include "e1000.h" + +#define ICH_FLASH_GFPREG 0x0000 +#define ICH_FLASH_HSFSTS 0x0004 +#define ICH_FLASH_HSFCTL 0x0006 +#define ICH_FLASH_FADDR 0x0008 +#define ICH_FLASH_FDATA0 0x0010 +#define ICH_FLASH_PR0 0x0074 + +#define ICH_FLASH_READ_COMMAND_TIMEOUT 500 +#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 500 +#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 3000000 +#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF +#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 + +#define ICH_CYCLE_READ 0 +#define ICH_CYCLE_WRITE 2 +#define ICH_CYCLE_ERASE 3 + +#define FLASH_GFPREG_BASE_MASK 0x1FFF +#define FLASH_SECTOR_ADDR_SHIFT 12 + +#define ICH_FLASH_SEG_SIZE_256 256 +#define ICH_FLASH_SEG_SIZE_4K 4096 +#define ICH_FLASH_SEG_SIZE_8K 8192 +#define ICH_FLASH_SEG_SIZE_64K 65536 + + +#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */ +/* FW established a valid mode */ +#define E1000_ICH_FWSM_FW_VALID 0x00008000 + +#define E1000_ICH_MNG_IAMT_MODE 0x2 + +#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_DEF1_OFF2 << 8) | \ + (ID_LED_DEF1_ON2 << 4) | \ + (ID_LED_DEF1_DEF2)) + +#define E1000_ICH_NVM_SIG_WORD 0x13 +#define E1000_ICH_NVM_SIG_MASK 0xC000 +#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0 +#define E1000_ICH_NVM_SIG_VALUE 0x80 + +#define E1000_ICH8_LAN_INIT_TIMEOUT 1500 + +#define E1000_FEXTNVM_SW_CONFIG 1 +#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */ + +#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7 +#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 +#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 + +#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL + +#define E1000_ICH_RAR_ENTRIES 7 + +#define PHY_PAGE_SHIFT 5 +#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ + ((reg) & MAX_PHY_REG_ADDRESS)) +#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */ +#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */ + +#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 +#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300 +#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200 + +#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */ + +#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */ + +/* SMBus Address Phy Register */ +#define HV_SMB_ADDR PHY_REG(768, 26) +#define HV_SMB_ADDR_MASK 0x007F +#define HV_SMB_ADDR_PEC_EN 0x0200 +#define HV_SMB_ADDR_VALID 0x0080 + +/* PHY Power Management Control */ +#define HV_PM_CTRL PHY_REG(770, 17) + +/* PHY Low Power Idle Control */ +#define I82579_LPI_CTRL PHY_REG(772, 20) +#define I82579_LPI_CTRL_ENABLE_MASK 0x6000 + +/* EMI Registers */ +#define I82579_EMI_ADDR 0x10 +#define I82579_EMI_DATA 0x11 +#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */ + +/* Strapping Option Register - RO */ +#define E1000_STRAP 0x0000C +#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000 +#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17 + +/* OEM Bits Phy Register */ +#define HV_OEM_BITS PHY_REG(768, 25) +#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */ +#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */ +#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */ + +#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */ +#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */ + +/* KMRN Mode Control */ +#define HV_KMRN_MODE_CTRL PHY_REG(769, 16) +#define HV_KMRN_MDIO_SLOW 0x0400 + +/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ +/* Offset 04h HSFSTS */ +union ich8_hws_flash_status { + struct ich8_hsfsts { + u16 flcdone :1; /* bit 0 Flash Cycle Done */ + u16 flcerr :1; /* bit 1 Flash Cycle Error */ + u16 dael :1; /* bit 2 Direct Access error Log */ + u16 berasesz :2; /* bit 4:3 Sector Erase Size */ + u16 flcinprog :1; /* bit 5 flash cycle in Progress */ + u16 reserved1 :2; /* bit 13:6 Reserved */ + u16 reserved2 :6; /* bit 13:6 Reserved */ + u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */ + u16 flockdn :1; /* bit 15 Flash Config Lock-Down */ + } hsf_status; + u16 regval; +}; + +/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */ +/* Offset 06h FLCTL */ +union ich8_hws_flash_ctrl { + struct ich8_hsflctl { + u16 flcgo :1; /* 0 Flash Cycle Go */ + u16 flcycle :2; /* 2:1 Flash Cycle */ + u16 reserved :5; /* 7:3 Reserved */ + u16 fldbcount :2; /* 9:8 Flash Data Byte Count */ + u16 flockdn :6; /* 15:10 Reserved */ + } hsf_ctrl; + u16 regval; +}; + +/* ICH Flash Region Access Permissions */ +union ich8_hws_flash_regacc { + struct ich8_flracc { + u32 grra :8; /* 0:7 GbE region Read Access */ + u32 grwa :8; /* 8:15 GbE region Write Access */ + u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */ + u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */ + } hsf_flregacc; + u16 regval; +}; + +/* ICH Flash Protected Region */ +union ich8_flash_protected_range { + struct ich8_pr { + u32 base:13; /* 0:12 Protected Range Base */ + u32 reserved1:2; /* 13:14 Reserved */ + u32 rpe:1; /* 15 Read Protection Enable */ + u32 limit:13; /* 16:28 Protected Range Limit */ + u32 reserved2:2; /* 29:30 Reserved */ + u32 wpe:1; /* 31 Write Protection Enable */ + } range; + u32 regval; +}; + +static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw); +static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); +static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); +static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); +static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, + u32 offset, u8 byte); +static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, + u8 *data); +static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, + u16 *data); +static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 *data); +static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw); +static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); +static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw); +static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); +static s32 e1000_led_on_ich8lan(struct e1000_hw *hw); +static s32 e1000_led_off_ich8lan(struct e1000_hw *hw); +static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw); +static s32 e1000_setup_led_pchlan(struct e1000_hw *hw); +static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw); +static s32 e1000_led_on_pchlan(struct e1000_hw *hw); +static s32 e1000_led_off_pchlan(struct e1000_hw *hw); +static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); +static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw); +static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw); +static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); +static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); +static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); +static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); +static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); +static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); + +static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) +{ + return readw(hw->flash_address + reg); +} + +static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg) +{ + return readl(hw->flash_address + reg); +} + +static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val) +{ + writew(val, hw->flash_address + reg); +} + +static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val) +{ + writel(val, hw->flash_address + reg); +} + +#define er16flash(reg) __er16flash(hw, (reg)) +#define er32flash(reg) __er32flash(hw, (reg)) +#define ew16flash(reg,val) __ew16flash(hw, (reg), (val)) +#define ew32flash(reg,val) __ew32flash(hw, (reg), (val)) + +static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw) +{ + u32 ctrl; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; + ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; + ew32(CTRL, ctrl); + e1e_flush(); + udelay(10); + ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE; + ew32(CTRL, ctrl); +} + +/** + * e1000_init_phy_params_pchlan - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific PHY parameters and function pointers. + **/ +static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 fwsm; + s32 ret_val = 0; + + phy->addr = 1; + phy->reset_delay_us = 100; + + phy->ops.set_page = e1000_set_page_igp; + phy->ops.read_reg = e1000_read_phy_reg_hv; + phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; + phy->ops.read_reg_page = e1000_read_phy_reg_page_hv; + phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; + phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; + phy->ops.write_reg = e1000_write_phy_reg_hv; + phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; + phy->ops.write_reg_page = e1000_write_phy_reg_page_hv; + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + + /* + * The MAC-PHY interconnect may still be in SMBus mode + * after Sx->S0. If the manageability engine (ME) is + * disabled, then toggle the LANPHYPC Value bit to force + * the interconnect to PCIe mode. + */ + fwsm = er32(FWSM); + if (!(fwsm & E1000_ICH_FWSM_FW_VALID) && !e1000_check_reset_block(hw)) { + e1000_toggle_lanphypc_value_ich8lan(hw); + msleep(50); + + /* + * Gate automatic PHY configuration by hardware on + * non-managed 82579 + */ + if (hw->mac.type == e1000_pch2lan) + e1000_gate_hw_phy_config_ich8lan(hw, true); + } + + /* + * Reset the PHY before any access to it. Doing so, ensures that + * the PHY is in a known good state before we read/write PHY registers. + * The generic reset is sufficient here, because we haven't determined + * the PHY type yet. + */ + ret_val = e1000e_phy_hw_reset_generic(hw); + if (ret_val) + goto out; + + /* Ungate automatic PHY configuration on non-managed 82579 */ + if ((hw->mac.type == e1000_pch2lan) && + !(fwsm & E1000_ICH_FWSM_FW_VALID)) { + usleep_range(10000, 20000); + e1000_gate_hw_phy_config_ich8lan(hw, false); + } + + phy->id = e1000_phy_unknown; + switch (hw->mac.type) { + default: + ret_val = e1000e_get_phy_id(hw); + if (ret_val) + goto out; + if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK)) + break; + /* fall-through */ + case e1000_pch2lan: + /* + * In case the PHY needs to be in mdio slow mode, + * set slow mode and try to get the PHY id again. + */ + ret_val = e1000_set_mdio_slow_mode_hv(hw); + if (ret_val) + goto out; + ret_val = e1000e_get_phy_id(hw); + if (ret_val) + goto out; + break; + } + phy->type = e1000e_get_phy_type_from_id(phy->id); + + switch (phy->type) { + case e1000_phy_82577: + case e1000_phy_82579: + phy->ops.check_polarity = e1000_check_polarity_82577; + phy->ops.force_speed_duplex = + e1000_phy_force_speed_duplex_82577; + phy->ops.get_cable_length = e1000_get_cable_length_82577; + phy->ops.get_info = e1000_get_phy_info_82577; + phy->ops.commit = e1000e_phy_sw_reset; + break; + case e1000_phy_82578: + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88; + phy->ops.get_cable_length = e1000e_get_cable_length_m88; + phy->ops.get_info = e1000e_get_phy_info_m88; + break; + default: + ret_val = -E1000_ERR_PHY; + break; + } + +out: + return ret_val; +} + +/** + * e1000_init_phy_params_ich8lan - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific PHY parameters and function pointers. + **/ +static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 i = 0; + + phy->addr = 1; + phy->reset_delay_us = 100; + + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; + + /* + * We may need to do this twice - once for IGP and if that fails, + * we'll set BM func pointers and try again + */ + ret_val = e1000e_determine_phy_address(hw); + if (ret_val) { + phy->ops.write_reg = e1000e_write_phy_reg_bm; + phy->ops.read_reg = e1000e_read_phy_reg_bm; + ret_val = e1000e_determine_phy_address(hw); + if (ret_val) { + e_dbg("Cannot determine PHY addr. Erroring out\n"); + return ret_val; + } + } + + phy->id = 0; + while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) && + (i++ < 100)) { + usleep_range(1000, 2000); + ret_val = e1000e_get_phy_id(hw); + if (ret_val) + return ret_val; + } + + /* Verify phy id */ + switch (phy->id) { + case IGP03E1000_E_PHY_ID: + phy->type = e1000_phy_igp_3; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked; + phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked; + phy->ops.get_info = e1000e_get_phy_info_igp; + phy->ops.check_polarity = e1000_check_polarity_igp; + phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp; + break; + case IFE_E_PHY_ID: + case IFE_PLUS_E_PHY_ID: + case IFE_C_E_PHY_ID: + phy->type = e1000_phy_ife; + phy->autoneg_mask = E1000_ALL_NOT_GIG; + phy->ops.get_info = e1000_get_phy_info_ife; + phy->ops.check_polarity = e1000_check_polarity_ife; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife; + break; + case BME1000_E_PHY_ID: + phy->type = e1000_phy_bm; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->ops.read_reg = e1000e_read_phy_reg_bm; + phy->ops.write_reg = e1000e_write_phy_reg_bm; + phy->ops.commit = e1000e_phy_sw_reset; + phy->ops.get_info = e1000e_get_phy_info_m88; + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88; + break; + default: + return -E1000_ERR_PHY; + break; + } + + return 0; +} + +/** + * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific NVM parameters and function + * pointers. + **/ +static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 gfpreg, sector_base_addr, sector_end_addr; + u16 i; + + /* Can't read flash registers if the register set isn't mapped. */ + if (!hw->flash_address) { + e_dbg("ERROR: Flash registers not mapped\n"); + return -E1000_ERR_CONFIG; + } + + nvm->type = e1000_nvm_flash_sw; + + gfpreg = er32flash(ICH_FLASH_GFPREG); + + /* + * sector_X_addr is a "sector"-aligned address (4096 bytes) + * Add 1 to sector_end_addr since this sector is included in + * the overall size. + */ + sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; + sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; + + /* flash_base_addr is byte-aligned */ + nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; + + /* + * find total size of the NVM, then cut in half since the total + * size represents two separate NVM banks. + */ + nvm->flash_bank_size = (sector_end_addr - sector_base_addr) + << FLASH_SECTOR_ADDR_SHIFT; + nvm->flash_bank_size /= 2; + /* Adjust to word count */ + nvm->flash_bank_size /= sizeof(u16); + + nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS; + + /* Clear shadow ram */ + for (i = 0; i < nvm->word_size; i++) { + dev_spec->shadow_ram[i].modified = false; + dev_spec->shadow_ram[i].value = 0xFFFF; + } + + return 0; +} + +/** + * e1000_init_mac_params_ich8lan - Initialize MAC function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific MAC parameters and function + * pointers. + **/ +static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &hw->mac; + + /* Set media type function pointer */ + hw->phy.media_type = e1000_media_type_copper; + + /* Set mta register count */ + mac->mta_reg_count = 32; + /* Set rar entry count */ + mac->rar_entry_count = E1000_ICH_RAR_ENTRIES; + if (mac->type == e1000_ich8lan) + mac->rar_entry_count--; + /* FWSM register */ + mac->has_fwsm = true; + /* ARC subsystem not supported */ + mac->arc_subsystem_valid = false; + /* Adaptive IFS supported */ + mac->adaptive_ifs = true; + + /* LED operations */ + switch (mac->type) { + case e1000_ich8lan: + case e1000_ich9lan: + case e1000_ich10lan: + /* check management mode */ + mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan; + /* ID LED init */ + mac->ops.id_led_init = e1000e_id_led_init; + /* blink LED */ + mac->ops.blink_led = e1000e_blink_led_generic; + /* setup LED */ + mac->ops.setup_led = e1000e_setup_led_generic; + /* cleanup LED */ + mac->ops.cleanup_led = e1000_cleanup_led_ich8lan; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_ich8lan; + mac->ops.led_off = e1000_led_off_ich8lan; + break; + case e1000_pchlan: + case e1000_pch2lan: + /* check management mode */ + mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; + /* ID LED init */ + mac->ops.id_led_init = e1000_id_led_init_pchlan; + /* setup LED */ + mac->ops.setup_led = e1000_setup_led_pchlan; + /* cleanup LED */ + mac->ops.cleanup_led = e1000_cleanup_led_pchlan; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_pchlan; + mac->ops.led_off = e1000_led_off_pchlan; + break; + default: + break; + } + + /* Enable PCS Lock-loss workaround for ICH8 */ + if (mac->type == e1000_ich8lan) + e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); + + /* Gate automatic PHY configuration by hardware on managed 82579 */ + if ((mac->type == e1000_pch2lan) && + (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) + e1000_gate_hw_phy_config_ich8lan(hw, true); + + return 0; +} + +/** + * e1000_set_eee_pchlan - Enable/disable EEE support + * @hw: pointer to the HW structure + * + * Enable/disable EEE based on setting in dev_spec structure. The bits in + * the LPI Control register will remain set only if/when link is up. + **/ +static s32 e1000_set_eee_pchlan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 phy_reg; + + if (hw->phy.type != e1000_phy_82579) + goto out; + + ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg); + if (ret_val) + goto out; + + if (hw->dev_spec.ich8lan.eee_disable) + phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK; + else + phy_reg |= I82579_LPI_CTRL_ENABLE_MASK; + + ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg); +out: + return ret_val; +} + +/** + * e1000_check_for_copper_link_ich8lan - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + + /* + * We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) { + ret_val = 0; + goto out; + } + + /* + * First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (hw->mac.type == e1000_pchlan) { + ret_val = e1000_k1_gig_workaround_hv(hw, link); + if (ret_val) + goto out; + } + + if (!link) + goto out; /* No link detected */ + + mac->get_link_status = false; + + if (hw->phy.type == e1000_phy_82578) { + ret_val = e1000_link_stall_workaround_hv(hw); + if (ret_val) + goto out; + } + + if (hw->mac.type == e1000_pch2lan) { + ret_val = e1000_k1_workaround_lv(hw); + if (ret_val) + goto out; + } + + /* + * Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000e_check_downshift(hw); + + /* Enable/Disable EEE after link up */ + ret_val = e1000_set_eee_pchlan(hw); + if (ret_val) + goto out; + + /* + * If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + /* + * Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + e1000e_config_collision_dist(hw); + + /* + * Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) + e_dbg("Error configuring flow control\n"); + +out: + return ret_val; +} + +static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + s32 rc; + + rc = e1000_init_mac_params_ich8lan(adapter); + if (rc) + return rc; + + rc = e1000_init_nvm_params_ich8lan(hw); + if (rc) + return rc; + + switch (hw->mac.type) { + case e1000_ich8lan: + case e1000_ich9lan: + case e1000_ich10lan: + rc = e1000_init_phy_params_ich8lan(hw); + break; + case e1000_pchlan: + case e1000_pch2lan: + rc = e1000_init_phy_params_pchlan(hw); + break; + default: + break; + } + if (rc) + return rc; + + /* + * Disable Jumbo Frame support on parts with Intel 10/100 PHY or + * on parts with MACsec enabled in NVM (reflected in CTRL_EXT). + */ + if ((adapter->hw.phy.type == e1000_phy_ife) || + ((adapter->hw.mac.type >= e1000_pch2lan) && + (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) { + adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES; + adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN; + + hw->mac.ops.blink_led = NULL; + } + + if ((adapter->hw.mac.type == e1000_ich8lan) && + (adapter->hw.phy.type == e1000_phy_igp_3)) + adapter->flags |= FLAG_LSC_GIG_SPEED_DROP; + + /* Disable EEE by default until IEEE802.3az spec is finalized */ + if (adapter->flags2 & FLAG2_HAS_EEE) + adapter->hw.dev_spec.ich8lan.eee_disable = true; + + return 0; +} + +static DEFINE_MUTEX(nvm_mutex); + +/** + * e1000_acquire_nvm_ich8lan - Acquire NVM mutex + * @hw: pointer to the HW structure + * + * Acquires the mutex for performing NVM operations. + **/ +static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw) +{ + mutex_lock(&nvm_mutex); + + return 0; +} + +/** + * e1000_release_nvm_ich8lan - Release NVM mutex + * @hw: pointer to the HW structure + * + * Releases the mutex used while performing NVM operations. + **/ +static void e1000_release_nvm_ich8lan(struct e1000_hw *hw) +{ + mutex_unlock(&nvm_mutex); +} + +static DEFINE_MUTEX(swflag_mutex); + +/** + * e1000_acquire_swflag_ich8lan - Acquire software control flag + * @hw: pointer to the HW structure + * + * Acquires the software control flag for performing PHY and select + * MAC CSR accesses. + **/ +static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) +{ + u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; + s32 ret_val = 0; + + mutex_lock(&swflag_mutex); + + while (timeout) { + extcnf_ctrl = er32(EXTCNF_CTRL); + if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) + break; + + mdelay(1); + timeout--; + } + + if (!timeout) { + e_dbg("SW/FW/HW has locked the resource for too long.\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + timeout = SW_FLAG_TIMEOUT; + + extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; + ew32(EXTCNF_CTRL, extcnf_ctrl); + + while (timeout) { + extcnf_ctrl = er32(EXTCNF_CTRL); + if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) + break; + + mdelay(1); + timeout--; + } + + if (!timeout) { + e_dbg("Failed to acquire the semaphore.\n"); + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; + ew32(EXTCNF_CTRL, extcnf_ctrl); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + +out: + if (ret_val) + mutex_unlock(&swflag_mutex); + + return ret_val; +} + +/** + * e1000_release_swflag_ich8lan - Release software control flag + * @hw: pointer to the HW structure + * + * Releases the software control flag for performing PHY and select + * MAC CSR accesses. + **/ +static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) +{ + u32 extcnf_ctrl; + + extcnf_ctrl = er32(EXTCNF_CTRL); + + if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) { + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; + ew32(EXTCNF_CTRL, extcnf_ctrl); + } else { + e_dbg("Semaphore unexpectedly released by sw/fw/hw\n"); + } + + mutex_unlock(&swflag_mutex); +} + +/** + * e1000_check_mng_mode_ich8lan - Checks management mode + * @hw: pointer to the HW structure + * + * This checks if the adapter has any manageability enabled. + * This is a function pointer entry point only called by read/write + * routines for the PHY and NVM parts. + **/ +static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) +{ + u32 fwsm; + + fwsm = er32(FWSM); + return (fwsm & E1000_ICH_FWSM_FW_VALID) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); +} + +/** + * e1000_check_mng_mode_pchlan - Checks management mode + * @hw: pointer to the HW structure + * + * This checks if the adapter has iAMT enabled. + * This is a function pointer entry point only called by read/write + * routines for the PHY and NVM parts. + **/ +static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw) +{ + u32 fwsm; + + fwsm = er32(FWSM); + return (fwsm & E1000_ICH_FWSM_FW_VALID) && + (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); +} + +/** + * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Checks if firmware is blocking the reset of the PHY. + * This is a function pointer entry point only called by + * reset routines. + **/ +static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) +{ + u32 fwsm; + + fwsm = er32(FWSM); + + return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? 0 : E1000_BLK_PHY_RESET; +} + +/** + * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states + * @hw: pointer to the HW structure + * + * Assumes semaphore already acquired. + * + **/ +static s32 e1000_write_smbus_addr(struct e1000_hw *hw) +{ + u16 phy_data; + u32 strap = er32(STRAP); + s32 ret_val = 0; + + strap &= E1000_STRAP_SMBUS_ADDRESS_MASK; + + ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data); + if (ret_val) + goto out; + + phy_data &= ~HV_SMB_ADDR_MASK; + phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT); + phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); + +out: + return ret_val; +} + +/** + * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration + * @hw: pointer to the HW structure + * + * SW should configure the LCD from the NVM extended configuration region + * as a workaround for certain parts. + **/ +static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; + s32 ret_val = 0; + u16 word_addr, reg_data, reg_addr, phy_page = 0; + + /* + * Initialize the PHY from the NVM on ICH platforms. This + * is needed due to an issue where the NVM configuration is + * not properly autoloaded after power transitions. + * Therefore, after each PHY reset, we will load the + * configuration data out of the NVM manually. + */ + switch (hw->mac.type) { + case e1000_ich8lan: + if (phy->type != e1000_phy_igp_3) + return ret_val; + + if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) || + (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) { + sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; + break; + } + /* Fall-thru */ + case e1000_pchlan: + case e1000_pch2lan: + sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; + break; + default: + return ret_val; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + data = er32(FEXTNVM); + if (!(data & sw_cfg_mask)) + goto out; + + /* + * Make sure HW does not configure LCD from PHY + * extended configuration before SW configuration + */ + data = er32(EXTCNF_CTRL); + if (!(hw->mac.type == e1000_pch2lan)) { + if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) + goto out; + } + + cnf_size = er32(EXTCNF_SIZE); + cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; + cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; + if (!cnf_size) + goto out; + + cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; + cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; + + if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) && + (hw->mac.type == e1000_pchlan)) || + (hw->mac.type == e1000_pch2lan)) { + /* + * HW configures the SMBus address and LEDs when the + * OEM and LCD Write Enable bits are set in the NVM. + * When both NVM bits are cleared, SW will configure + * them instead. + */ + ret_val = e1000_write_smbus_addr(hw); + if (ret_val) + goto out; + + data = er32(LEDCTL); + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG, + (u16)data); + if (ret_val) + goto out; + } + + /* Configure LCD from extended configuration region. */ + + /* cnf_base_addr is in DWORD */ + word_addr = (u16)(cnf_base_addr << 1); + + for (i = 0; i < cnf_size; i++) { + ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, + ®_data); + if (ret_val) + goto out; + + ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1), + 1, ®_addr); + if (ret_val) + goto out; + + /* Save off the PHY page for future writes. */ + if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { + phy_page = reg_data; + continue; + } + + reg_addr &= PHY_REG_MASK; + reg_addr |= phy_page; + + ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr, + reg_data); + if (ret_val) + goto out; + } + +out: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000_k1_gig_workaround_hv - K1 Si workaround + * @hw: pointer to the HW structure + * @link: link up bool flag + * + * If K1 is enabled for 1Gbps, the MAC might stall when transitioning + * from a lower speed. This workaround disables K1 whenever link is at 1Gig + * If link is down, the function will restore the default K1 setting located + * in the NVM. + **/ +static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) +{ + s32 ret_val = 0; + u16 status_reg = 0; + bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled; + + if (hw->mac.type != e1000_pchlan) + goto out; + + /* Wrap the whole flow with the sw flag */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ + if (link) { + if (hw->phy.type == e1000_phy_82578) { + ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS, + &status_reg); + if (ret_val) + goto release; + + status_reg &= BM_CS_STATUS_LINK_UP | + BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_MASK; + + if (status_reg == (BM_CS_STATUS_LINK_UP | + BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_1000)) + k1_enable = false; + } + + if (hw->phy.type == e1000_phy_82577) { + ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS, + &status_reg); + if (ret_val) + goto release; + + status_reg &= HV_M_STATUS_LINK_UP | + HV_M_STATUS_AUTONEG_COMPLETE | + HV_M_STATUS_SPEED_MASK; + + if (status_reg == (HV_M_STATUS_LINK_UP | + HV_M_STATUS_AUTONEG_COMPLETE | + HV_M_STATUS_SPEED_1000)) + k1_enable = false; + } + + /* Link stall fix for link up */ + ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), + 0x0100); + if (ret_val) + goto release; + + } else { + /* Link stall fix for link down */ + ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), + 0x4100); + if (ret_val) + goto release; + } + + ret_val = e1000_configure_k1_ich8lan(hw, k1_enable); + +release: + hw->phy.ops.release(hw); +out: + return ret_val; +} + +/** + * e1000_configure_k1_ich8lan - Configure K1 power state + * @hw: pointer to the HW structure + * @enable: K1 state to configure + * + * Configure the K1 power state based on the provided parameter. + * Assumes semaphore already acquired. + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + **/ +s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) +{ + s32 ret_val = 0; + u32 ctrl_reg = 0; + u32 ctrl_ext = 0; + u32 reg = 0; + u16 kmrn_reg = 0; + + ret_val = e1000e_read_kmrn_reg_locked(hw, + E1000_KMRNCTRLSTA_K1_CONFIG, + &kmrn_reg); + if (ret_val) + goto out; + + if (k1_enable) + kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE; + else + kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE; + + ret_val = e1000e_write_kmrn_reg_locked(hw, + E1000_KMRNCTRLSTA_K1_CONFIG, + kmrn_reg); + if (ret_val) + goto out; + + udelay(20); + ctrl_ext = er32(CTRL_EXT); + ctrl_reg = er32(CTRL); + + reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + reg |= E1000_CTRL_FRCSPD; + ew32(CTRL, reg); + + ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); + e1e_flush(); + udelay(20); + ew32(CTRL, ctrl_reg); + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); + udelay(20); + +out: + return ret_val; +} + +/** + * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration + * @hw: pointer to the HW structure + * @d0_state: boolean if entering d0 or d3 device state + * + * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are + * collectively called OEM bits. The OEM Write Enable bit and SW Config bit + * in NVM determines whether HW should configure LPLU and Gbe Disable. + **/ +static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) +{ + s32 ret_val = 0; + u32 mac_reg; + u16 oem_reg; + + if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan)) + return ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + if (!(hw->mac.type == e1000_pch2lan)) { + mac_reg = er32(EXTCNF_CTRL); + if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) + goto out; + } + + mac_reg = er32(FEXTNVM); + if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M)) + goto out; + + mac_reg = er32(PHY_CTRL); + + ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg); + if (ret_val) + goto out; + + oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU); + + if (d0_state) { + if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE) + oem_reg |= HV_OEM_BITS_GBE_DIS; + + if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) + oem_reg |= HV_OEM_BITS_LPLU; + } else { + if (mac_reg & E1000_PHY_CTRL_NOND0A_GBE_DISABLE) + oem_reg |= HV_OEM_BITS_GBE_DIS; + + if (mac_reg & E1000_PHY_CTRL_NOND0A_LPLU) + oem_reg |= HV_OEM_BITS_LPLU; + } + /* Restart auto-neg to activate the bits */ + if (!e1000_check_reset_block(hw)) + oem_reg |= HV_OEM_BITS_RESTART_AN; + ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); + +out: + hw->phy.ops.release(hw); + + return ret_val; +} + + +/** + * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode + * @hw: pointer to the HW structure + **/ +static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw) +{ + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data); + if (ret_val) + return ret_val; + + data |= HV_KMRN_MDIO_SLOW; + + ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data); + + return ret_val; +} + +/** + * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be + * done after every PHY reset. + **/ +static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 phy_data; + + if (hw->mac.type != e1000_pchlan) + return ret_val; + + /* Set MDIO slow mode before any other MDIO access */ + if (hw->phy.type == e1000_phy_82577) { + ret_val = e1000_set_mdio_slow_mode_hv(hw); + if (ret_val) + goto out; + } + + if (((hw->phy.type == e1000_phy_82577) && + ((hw->phy.revision == 1) || (hw->phy.revision == 2))) || + ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) { + /* Disable generation of early preamble */ + ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431); + if (ret_val) + return ret_val; + + /* Preamble tuning for SSC */ + ret_val = e1e_wphy(hw, PHY_REG(770, 16), 0xA204); + if (ret_val) + return ret_val; + } + + if (hw->phy.type == e1000_phy_82578) { + /* + * Return registers to default by doing a soft reset then + * writing 0x3140 to the control register. + */ + if (hw->phy.revision < 2) { + e1000e_phy_sw_reset(hw); + ret_val = e1e_wphy(hw, PHY_CONTROL, 0x3140); + } + } + + /* Select page 0 */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + hw->phy.addr = 1; + ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); + hw->phy.ops.release(hw); + if (ret_val) + goto out; + + /* + * Configure the K1 Si workaround during phy reset assuming there is + * link so that it disables K1 if link is in 1Gbps. + */ + ret_val = e1000_k1_gig_workaround_hv(hw, true); + if (ret_val) + goto out; + + /* Workaround for link disconnects on a busy hub in half duplex */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data); + if (ret_val) + goto release; + ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG, + phy_data & 0x00FF); +release: + hw->phy.ops.release(hw); +out: + return ret_val; +} + +/** + * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY + * @hw: pointer to the HW structure + **/ +void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw) +{ + u32 mac_reg; + u16 i, phy_reg = 0; + s32 ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); + if (ret_val) + goto release; + + /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */ + for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { + mac_reg = er32(RAL(i)); + hw->phy.ops.write_reg_page(hw, BM_RAR_L(i), + (u16)(mac_reg & 0xFFFF)); + hw->phy.ops.write_reg_page(hw, BM_RAR_M(i), + (u16)((mac_reg >> 16) & 0xFFFF)); + + mac_reg = er32(RAH(i)); + hw->phy.ops.write_reg_page(hw, BM_RAR_H(i), + (u16)(mac_reg & 0xFFFF)); + hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i), + (u16)((mac_reg & E1000_RAH_AV) + >> 16)); + } + + e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); + +release: + hw->phy.ops.release(hw); +} + +/** + * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation + * with 82579 PHY + * @hw: pointer to the HW structure + * @enable: flag to enable/disable workaround when enabling/disabling jumbos + **/ +s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) +{ + s32 ret_val = 0; + u16 phy_reg, data; + u32 mac_reg; + u16 i; + + if (hw->mac.type != e1000_pch2lan) + goto out; + + /* disable Rx path while enabling/disabling workaround */ + e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); + ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14)); + if (ret_val) + goto out; + + if (enable) { + /* + * Write Rx addresses (rar_entry_count for RAL/H, +4 for + * SHRAL/H) and initial CRC values to the MAC + */ + for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { + u8 mac_addr[ETH_ALEN] = {0}; + u32 addr_high, addr_low; + + addr_high = er32(RAH(i)); + if (!(addr_high & E1000_RAH_AV)) + continue; + addr_low = er32(RAL(i)); + mac_addr[0] = (addr_low & 0xFF); + mac_addr[1] = ((addr_low >> 8) & 0xFF); + mac_addr[2] = ((addr_low >> 16) & 0xFF); + mac_addr[3] = ((addr_low >> 24) & 0xFF); + mac_addr[4] = (addr_high & 0xFF); + mac_addr[5] = ((addr_high >> 8) & 0xFF); + + ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr)); + } + + /* Write Rx addresses to the PHY */ + e1000_copy_rx_addrs_to_phy_ich8lan(hw); + + /* Enable jumbo frame workaround in the MAC */ + mac_reg = er32(FFLT_DBG); + mac_reg &= ~(1 << 14); + mac_reg |= (7 << 15); + ew32(FFLT_DBG, mac_reg); + + mac_reg = er32(RCTL); + mac_reg |= E1000_RCTL_SECRC; + ew32(RCTL, mac_reg); + + ret_val = e1000e_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + &data); + if (ret_val) + goto out; + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + data | (1 << 0)); + if (ret_val) + goto out; + ret_val = e1000e_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + &data); + if (ret_val) + goto out; + data &= ~(0xF << 8); + data |= (0xB << 8); + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + data); + if (ret_val) + goto out; + + /* Enable jumbo frame workaround in the PHY */ + e1e_rphy(hw, PHY_REG(769, 23), &data); + data &= ~(0x7F << 5); + data |= (0x37 << 5); + ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); + if (ret_val) + goto out; + e1e_rphy(hw, PHY_REG(769, 16), &data); + data &= ~(1 << 13); + ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); + if (ret_val) + goto out; + e1e_rphy(hw, PHY_REG(776, 20), &data); + data &= ~(0x3FF << 2); + data |= (0x1A << 2); + ret_val = e1e_wphy(hw, PHY_REG(776, 20), data); + if (ret_val) + goto out; + ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xFE00); + if (ret_val) + goto out; + e1e_rphy(hw, HV_PM_CTRL, &data); + ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10)); + if (ret_val) + goto out; + } else { + /* Write MAC register values back to h/w defaults */ + mac_reg = er32(FFLT_DBG); + mac_reg &= ~(0xF << 14); + ew32(FFLT_DBG, mac_reg); + + mac_reg = er32(RCTL); + mac_reg &= ~E1000_RCTL_SECRC; + ew32(RCTL, mac_reg); + + ret_val = e1000e_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + &data); + if (ret_val) + goto out; + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + data & ~(1 << 0)); + if (ret_val) + goto out; + ret_val = e1000e_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + &data); + if (ret_val) + goto out; + data &= ~(0xF << 8); + data |= (0xB << 8); + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + data); + if (ret_val) + goto out; + + /* Write PHY register values back to h/w defaults */ + e1e_rphy(hw, PHY_REG(769, 23), &data); + data &= ~(0x7F << 5); + ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); + if (ret_val) + goto out; + e1e_rphy(hw, PHY_REG(769, 16), &data); + data |= (1 << 13); + ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); + if (ret_val) + goto out; + e1e_rphy(hw, PHY_REG(776, 20), &data); + data &= ~(0x3FF << 2); + data |= (0x8 << 2); + ret_val = e1e_wphy(hw, PHY_REG(776, 20), data); + if (ret_val) + goto out; + ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00); + if (ret_val) + goto out; + e1e_rphy(hw, HV_PM_CTRL, &data); + ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10)); + if (ret_val) + goto out; + } + + /* re-enable Rx path after enabling/disabling workaround */ + ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14)); + +out: + return ret_val; +} + +/** + * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be + * done after every PHY reset. + **/ +static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + if (hw->mac.type != e1000_pch2lan) + goto out; + + /* Set MDIO slow mode before any other MDIO access */ + ret_val = e1000_set_mdio_slow_mode_hv(hw); + +out: + return ret_val; +} + +/** + * e1000_k1_gig_workaround_lv - K1 Si workaround + * @hw: pointer to the HW structure + * + * Workaround to set the K1 beacon duration for 82579 parts + **/ +static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 status_reg = 0; + u32 mac_reg; + + if (hw->mac.type != e1000_pch2lan) + goto out; + + /* Set K1 beacon duration based on 1Gbps speed or otherwise */ + ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg); + if (ret_val) + goto out; + + if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) + == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { + mac_reg = er32(FEXTNVM4); + mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; + + if (status_reg & HV_M_STATUS_SPEED_1000) + mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; + else + mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; + + ew32(FEXTNVM4, mac_reg); + } + +out: + return ret_val; +} + +/** + * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware + * @hw: pointer to the HW structure + * @gate: boolean set to true to gate, false to ungate + * + * Gate/ungate the automatic PHY configuration via hardware; perform + * the configuration via software instead. + **/ +static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate) +{ + u32 extcnf_ctrl; + + if (hw->mac.type != e1000_pch2lan) + return; + + extcnf_ctrl = er32(EXTCNF_CTRL); + + if (gate) + extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; + else + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG; + + ew32(EXTCNF_CTRL, extcnf_ctrl); + return; +} + +/** + * e1000_lan_init_done_ich8lan - Check for PHY config completion + * @hw: pointer to the HW structure + * + * Check the appropriate indication the MAC has finished configuring the + * PHY after a software reset. + **/ +static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw) +{ + u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT; + + /* Wait for basic configuration completes before proceeding */ + do { + data = er32(STATUS); + data &= E1000_STATUS_LAN_INIT_DONE; + udelay(100); + } while ((!data) && --loop); + + /* + * If basic configuration is incomplete before the above loop + * count reaches 0, loading the configuration from NVM will + * leave the PHY in a bad state possibly resulting in no link. + */ + if (loop == 0) + e_dbg("LAN_INIT_DONE not set, increase timeout\n"); + + /* Clear the Init Done bit for the next init event */ + data = er32(STATUS); + data &= ~E1000_STATUS_LAN_INIT_DONE; + ew32(STATUS, data); +} + +/** + * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset + * @hw: pointer to the HW structure + **/ +static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 reg; + + if (e1000_check_reset_block(hw)) + goto out; + + /* Allow time for h/w to get to quiescent state after reset */ + usleep_range(10000, 20000); + + /* Perform any necessary post-reset workarounds */ + switch (hw->mac.type) { + case e1000_pchlan: + ret_val = e1000_hv_phy_workarounds_ich8lan(hw); + if (ret_val) + goto out; + break; + case e1000_pch2lan: + ret_val = e1000_lv_phy_workarounds_ich8lan(hw); + if (ret_val) + goto out; + break; + default: + break; + } + + /* Clear the host wakeup bit after lcd reset */ + if (hw->mac.type >= e1000_pchlan) { + e1e_rphy(hw, BM_PORT_GEN_CFG, ®); + reg &= ~BM_WUC_HOST_WU_BIT; + e1e_wphy(hw, BM_PORT_GEN_CFG, reg); + } + + /* Configure the LCD with the extended configuration region in NVM */ + ret_val = e1000_sw_lcd_config_ich8lan(hw); + if (ret_val) + goto out; + + /* Configure the LCD with the OEM bits in NVM */ + ret_val = e1000_oem_bits_config_ich8lan(hw, true); + + if (hw->mac.type == e1000_pch2lan) { + /* Ungate automatic PHY configuration on non-managed 82579 */ + if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { + usleep_range(10000, 20000); + e1000_gate_hw_phy_config_ich8lan(hw, false); + } + + /* Set EEE LPI Update Timer to 200usec */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, + I82579_LPI_UPDATE_TIMER); + if (ret_val) + goto release; + ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, + 0x1387); +release: + hw->phy.ops.release(hw); + } + +out: + return ret_val; +} + +/** + * e1000_phy_hw_reset_ich8lan - Performs a PHY reset + * @hw: pointer to the HW structure + * + * Resets the PHY + * This is a function pointer entry point called by drivers + * or other shared routines. + **/ +static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + /* Gate automatic PHY configuration by hardware on non-managed 82579 */ + if ((hw->mac.type == e1000_pch2lan) && + !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) + e1000_gate_hw_phy_config_ich8lan(hw, true); + + ret_val = e1000e_phy_hw_reset_generic(hw); + if (ret_val) + goto out; + + ret_val = e1000_post_phy_reset_ich8lan(hw); + +out: + return ret_val; +} + +/** + * e1000_set_lplu_state_pchlan - Set Low Power Link Up state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU state according to the active flag. For PCH, if OEM write + * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set + * the phy speed. This function will manually set the LPLU bit and restart + * auto-neg as hw would do. D3 and D0 LPLU will call the same function + * since it configures the same bit. + **/ +static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active) +{ + s32 ret_val = 0; + u16 oem_reg; + + ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg); + if (ret_val) + goto out; + + if (active) + oem_reg |= HV_OEM_BITS_LPLU; + else + oem_reg &= ~HV_OEM_BITS_LPLU; + + oem_reg |= HV_OEM_BITS_RESTART_AN; + ret_val = e1e_wphy(hw, HV_OEM_BITS, oem_reg); + +out: + return ret_val; +} + +/** + * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 phy_ctrl; + s32 ret_val = 0; + u16 data; + + if (phy->type == e1000_phy_ife) + return ret_val; + + phy_ctrl = er32(PHY_CTRL); + + if (active) { + phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return 0; + + /* + * Call gig speed drop workaround on LPLU before accessing + * any PHY registers + */ + if (hw->mac.type == e1000_ich8lan) + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); + if (ret_val) + return ret_val; + } else { + phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return 0; + + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } + + return 0; +} + +/** + * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D3 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 phy_ctrl; + s32 ret_val; + u16 data; + + phy_ctrl = er32(PHY_CTRL); + + if (!active) { + phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return 0; + + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return 0; + + /* + * Call gig speed drop workaround on LPLU before accessing + * any PHY registers + */ + if (hw->mac.type == e1000_ich8lan) + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); + } + + return 0; +} + +/** + * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1 + * @hw: pointer to the HW structure + * @bank: pointer to the variable that returns the active bank + * + * Reads signature byte from the NVM using the flash access registers. + * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank. + **/ +static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) +{ + u32 eecd; + struct e1000_nvm_info *nvm = &hw->nvm; + u32 bank1_offset = nvm->flash_bank_size * sizeof(u16); + u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1; + u8 sig_byte = 0; + s32 ret_val = 0; + + switch (hw->mac.type) { + case e1000_ich8lan: + case e1000_ich9lan: + eecd = er32(EECD); + if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) == + E1000_EECD_SEC1VAL_VALID_MASK) { + if (eecd & E1000_EECD_SEC1VAL) + *bank = 1; + else + *bank = 0; + + return 0; + } + e_dbg("Unable to determine valid NVM bank via EEC - " + "reading flash signature\n"); + /* fall-thru */ + default: + /* set bank to 0 in case flash read fails */ + *bank = 0; + + /* Check bank 0 */ + ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset, + &sig_byte); + if (ret_val) + return ret_val; + if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == + E1000_ICH_NVM_SIG_VALUE) { + *bank = 0; + return 0; + } + + /* Check bank 1 */ + ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset + + bank1_offset, + &sig_byte); + if (ret_val) + return ret_val; + if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == + E1000_ICH_NVM_SIG_VALUE) { + *bank = 1; + return 0; + } + + e_dbg("ERROR: No valid NVM bank present\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000_read_nvm_ich8lan - Read word(s) from the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the word(s) to read. + * @words: Size of data to read in words + * @data: Pointer to the word(s) to read at offset. + * + * Reads a word(s) from the NVM using the flash access registers. + **/ +static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 act_offset; + s32 ret_val = 0; + u32 bank = 0; + u16 i, word; + + if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + nvm->ops.acquire(hw); + + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val) { + e_dbg("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + act_offset = (bank) ? nvm->flash_bank_size : 0; + act_offset += offset; + + ret_val = 0; + for (i = 0; i < words; i++) { + if (dev_spec->shadow_ram[offset+i].modified) { + data[i] = dev_spec->shadow_ram[offset+i].value; + } else { + ret_val = e1000_read_flash_word_ich8lan(hw, + act_offset + i, + &word); + if (ret_val) + break; + data[i] = word; + } + } + + nvm->ops.release(hw); + +out: + if (ret_val) + e_dbg("NVM read error: %d\n", ret_val); + + return ret_val; +} + +/** + * e1000_flash_cycle_init_ich8lan - Initialize flash + * @hw: pointer to the HW structure + * + * This function does initial flash setup so that a new read/write/erase cycle + * can be started. + **/ +static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) +{ + union ich8_hws_flash_status hsfsts; + s32 ret_val = -E1000_ERR_NVM; + + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + + /* Check if the flash descriptor is valid */ + if (hsfsts.hsf_status.fldesvalid == 0) { + e_dbg("Flash descriptor invalid. " + "SW Sequencing must be used.\n"); + return -E1000_ERR_NVM; + } + + /* Clear FCERR and DAEL in hw status by writing 1 */ + hsfsts.hsf_status.flcerr = 1; + hsfsts.hsf_status.dael = 1; + + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + + /* + * Either we should have a hardware SPI cycle in progress + * bit to check against, in order to start a new cycle or + * FDONE bit should be changed in the hardware so that it + * is 1 after hardware reset, which can then be used as an + * indication whether a cycle is in progress or has been + * completed. + */ + + if (hsfsts.hsf_status.flcinprog == 0) { + /* + * There is no cycle running at present, + * so we can start a cycle. + * Begin by setting Flash Cycle Done. + */ + hsfsts.hsf_status.flcdone = 1; + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + ret_val = 0; + } else { + s32 i = 0; + + /* + * Otherwise poll for sometime so the current + * cycle has a chance to end before giving up. + */ + for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { + hsfsts.regval = __er16flash(hw, ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcinprog == 0) { + ret_val = 0; + break; + } + udelay(1); + } + if (ret_val == 0) { + /* + * Successful in waiting for previous cycle to timeout, + * now set the Flash Cycle Done. + */ + hsfsts.hsf_status.flcdone = 1; + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + } else { + e_dbg("Flash controller busy, cannot get access\n"); + } + } + + return ret_val; +} + +/** + * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase) + * @hw: pointer to the HW structure + * @timeout: maximum time to wait for completion + * + * This function starts a flash cycle and waits for its completion. + **/ +static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) +{ + union ich8_hws_flash_ctrl hsflctl; + union ich8_hws_flash_status hsfsts; + s32 ret_val = -E1000_ERR_NVM; + u32 i = 0; + + /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + hsflctl.hsf_ctrl.flcgo = 1; + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + /* wait till FDONE bit is set to 1 */ + do { + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcdone == 1) + break; + udelay(1); + } while (i++ < timeout); + + if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) + return 0; + + return ret_val; +} + +/** + * e1000_read_flash_word_ich8lan - Read word from flash + * @hw: pointer to the HW structure + * @offset: offset to data location + * @data: pointer to the location for storing the data + * + * Reads the flash word at offset into data. Offset is converted + * to bytes before read. + **/ +static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, + u16 *data) +{ + /* Must convert offset into bytes. */ + offset <<= 1; + + return e1000_read_flash_data_ich8lan(hw, offset, 2, data); +} + +/** + * e1000_read_flash_byte_ich8lan - Read byte from flash + * @hw: pointer to the HW structure + * @offset: The offset of the byte to read. + * @data: Pointer to a byte to store the value read. + * + * Reads a single byte from the NVM using the flash access registers. + **/ +static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, + u8 *data) +{ + s32 ret_val; + u16 word = 0; + + ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); + if (ret_val) + return ret_val; + + *data = (u8)word; + + return 0; +} + +/** + * e1000_read_flash_data_ich8lan - Read byte or word from NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the byte or word to read. + * @size: Size of data to read, 1=byte 2=word + * @data: Pointer to the word to store the value read. + * + * Reads a byte or word from the NVM using the flash access registers. + **/ +static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 *data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + u32 flash_data = 0; + s32 ret_val = -E1000_ERR_NVM; + u8 count = 0; + + if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + + flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr; + + do { + udelay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val != 0) + break; + + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = size - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + ret_val = e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_READ_COMMAND_TIMEOUT); + + /* + * Check if FCERR is set to 1, if set to 1, clear it + * and try the whole sequence a few more times, else + * read in (shift in) the Flash Data0, the order is + * least significant byte first msb to lsb + */ + if (ret_val == 0) { + flash_data = er32flash(ICH_FLASH_FDATA0); + if (size == 1) + *data = (u8)(flash_data & 0x000000FF); + else if (size == 2) + *data = (u16)(flash_data & 0x0000FFFF); + break; + } else { + /* + * If we've gotten here, then things are probably + * completely hosed, but if the error condition is + * detected, it won't hurt to give it another try... + * ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr == 1) { + /* Repeat for some time before giving up. */ + continue; + } else if (hsfsts.hsf_status.flcdone == 0) { + e_dbg("Timeout error - flash cycle " + "did not complete.\n"); + break; + } + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** + * e1000_write_nvm_ich8lan - Write word(s) to the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the word(s) to write. + * @words: Size of data to write in words + * @data: Pointer to the word(s) to write at offset. + * + * Writes a byte or word to the NVM using the flash access registers. + **/ +static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u16 i; + + if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + nvm->ops.acquire(hw); + + for (i = 0; i < words; i++) { + dev_spec->shadow_ram[offset+i].modified = true; + dev_spec->shadow_ram[offset+i].value = data[i]; + } + + nvm->ops.release(hw); + + return 0; +} + +/** + * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM + * @hw: pointer to the HW structure + * + * The NVM checksum is updated by calling the generic update_nvm_checksum, + * which writes the checksum to the shadow ram. The changes in the shadow + * ram are then committed to the EEPROM by processing each bank at a time + * checking for the modified bit and writing only the pending changes. + * After a successful commit, the shadow ram is cleared and is ready for + * future writes. + **/ +static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 i, act_offset, new_bank_offset, old_bank_offset, bank; + s32 ret_val; + u16 data; + + ret_val = e1000e_update_nvm_checksum_generic(hw); + if (ret_val) + goto out; + + if (nvm->type != e1000_nvm_flash_sw) + goto out; + + nvm->ops.acquire(hw); + + /* + * We're writing to the opposite bank so if we're on bank 1, + * write to bank 0 etc. We also need to erase the segment that + * is going to be written + */ + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val) { + e_dbg("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + if (bank == 0) { + new_bank_offset = nvm->flash_bank_size; + old_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); + if (ret_val) + goto release; + } else { + old_bank_offset = nvm->flash_bank_size; + new_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); + if (ret_val) + goto release; + } + + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { + /* + * Determine whether to write the value stored + * in the other NVM bank or a modified value stored + * in the shadow RAM + */ + if (dev_spec->shadow_ram[i].modified) { + data = dev_spec->shadow_ram[i].value; + } else { + ret_val = e1000_read_flash_word_ich8lan(hw, i + + old_bank_offset, + &data); + if (ret_val) + break; + } + + /* + * If the word is 0x13, then make sure the signature bits + * (15:14) are 11b until the commit has completed. + * This will allow us to write 10b which indicates the + * signature is valid. We want to do this after the write + * has completed so that we don't mark the segment valid + * while the write is still in progress + */ + if (i == E1000_ICH_NVM_SIG_WORD) + data |= E1000_ICH_NVM_SIG_MASK; + + /* Convert offset to bytes. */ + act_offset = (i + new_bank_offset) << 1; + + udelay(100); + /* Write the bytes to the new bank. */ + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, + act_offset, + (u8)data); + if (ret_val) + break; + + udelay(100); + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, + act_offset + 1, + (u8)(data >> 8)); + if (ret_val) + break; + } + + /* + * Don't bother writing the segment valid bits if sector + * programming failed. + */ + if (ret_val) { + /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ + e_dbg("Flash commit failed.\n"); + goto release; + } + + /* + * Finally validate the new segment by setting bit 15:14 + * to 10b in word 0x13 , this can be done without an + * erase as well since these bits are 11 to start with + * and we need to change bit 14 to 0b + */ + act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; + ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); + if (ret_val) + goto release; + + data &= 0xBFFF; + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, + act_offset * 2 + 1, + (u8)(data >> 8)); + if (ret_val) + goto release; + + /* + * And invalidate the previously valid segment by setting + * its signature word (0x13) high_byte to 0b. This can be + * done without an erase because flash erase sets all bits + * to 1's. We can write 1's to 0's without an erase + */ + act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); + if (ret_val) + goto release; + + /* Great! Everything worked, we can now clear the cached entries. */ + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { + dev_spec->shadow_ram[i].modified = false; + dev_spec->shadow_ram[i].value = 0xFFFF; + } + +release: + nvm->ops.release(hw); + + /* + * Reload the EEPROM, or else modifications will not appear + * until after the next adapter reset. + */ + if (!ret_val) { + e1000e_reload_nvm(hw); + usleep_range(10000, 20000); + } + +out: + if (ret_val) + e_dbg("NVM update error: %d\n", ret_val); + + return ret_val; +} + +/** + * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19. + * If the bit is 0, that the EEPROM had been modified, but the checksum was not + * calculated, in which case we need to calculate the checksum and set bit 6. + **/ +static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 data; + + /* + * Read 0x19 and check bit 6. If this bit is 0, the checksum + * needs to be fixed. This bit is an indication that the NVM + * was prepared by OEM software and did not calculate the + * checksum...a likely scenario. + */ + ret_val = e1000_read_nvm(hw, 0x19, 1, &data); + if (ret_val) + return ret_val; + + if ((data & 0x40) == 0) { + data |= 0x40; + ret_val = e1000_write_nvm(hw, 0x19, 1, &data); + if (ret_val) + return ret_val; + ret_val = e1000e_update_nvm_checksum(hw); + if (ret_val) + return ret_val; + } + + return e1000e_validate_nvm_checksum_generic(hw); +} + +/** + * e1000e_write_protect_nvm_ich8lan - Make the NVM read-only + * @hw: pointer to the HW structure + * + * To prevent malicious write/erase of the NVM, set it to be read-only + * so that the hardware ignores all write/erase cycles of the NVM via + * the flash control registers. The shadow-ram copy of the NVM will + * still be updated, however any updates to this copy will not stick + * across driver reloads. + **/ +void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + union ich8_flash_protected_range pr0; + union ich8_hws_flash_status hsfsts; + u32 gfpreg; + + nvm->ops.acquire(hw); + + gfpreg = er32flash(ICH_FLASH_GFPREG); + + /* Write-protect GbE Sector of NVM */ + pr0.regval = er32flash(ICH_FLASH_PR0); + pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK; + pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK); + pr0.range.wpe = true; + ew32flash(ICH_FLASH_PR0, pr0.regval); + + /* + * Lock down a subset of GbE Flash Control Registers, e.g. + * PR0 to prevent the write-protection from being lifted. + * Once FLOCKDN is set, the registers protected by it cannot + * be written until FLOCKDN is cleared by a hardware reset. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + hsfsts.hsf_status.flockdn = true; + ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval); + + nvm->ops.release(hw); +} + +/** + * e1000_write_flash_data_ich8lan - Writes bytes to the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the byte/word to read. + * @size: Size of data to read, 1=byte 2=word + * @data: The byte(s) to write to the NVM. + * + * Writes one/two bytes to the NVM using the flash access registers. + **/ +static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + u32 flash_data = 0; + s32 ret_val; + u8 count = 0; + + if (size < 1 || size > 2 || data > size * 0xff || + offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + + flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr; + + do { + udelay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + break; + + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = size -1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + if (size == 1) + flash_data = (u32)data & 0x00FF; + else + flash_data = (u32)data; + + ew32flash(ICH_FLASH_FDATA0, flash_data); + + /* + * check if FCERR is set to 1 , if set to 1, clear it + * and try the whole sequence a few more times else done + */ + ret_val = e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_WRITE_COMMAND_TIMEOUT); + if (!ret_val) + break; + + /* + * If we're here, then things are most likely + * completely hosed, but if the error condition + * is detected, it won't hurt to give it another + * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr == 1) + /* Repeat for some time before giving up. */ + continue; + if (hsfsts.hsf_status.flcdone == 0) { + e_dbg("Timeout error - flash cycle " + "did not complete."); + break; + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** + * e1000_write_flash_byte_ich8lan - Write a single byte to NVM + * @hw: pointer to the HW structure + * @offset: The index of the byte to read. + * @data: The byte to write to the NVM. + * + * Writes a single byte to the NVM using the flash access registers. + **/ +static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, + u8 data) +{ + u16 word = (u16)data; + + return e1000_write_flash_data_ich8lan(hw, offset, 1, word); +} + +/** + * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM + * @hw: pointer to the HW structure + * @offset: The offset of the byte to write. + * @byte: The byte to write to the NVM. + * + * Writes a single byte to the NVM using the flash access registers. + * Goes through a retry algorithm before giving up. + **/ +static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, + u32 offset, u8 byte) +{ + s32 ret_val; + u16 program_retries; + + ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); + if (!ret_val) + return ret_val; + + for (program_retries = 0; program_retries < 100; program_retries++) { + e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset); + udelay(100); + ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); + if (!ret_val) + break; + } + if (program_retries == 100) + return -E1000_ERR_NVM; + + return 0; +} + +/** + * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM + * @hw: pointer to the HW structure + * @bank: 0 for first bank, 1 for second bank, etc. + * + * Erases the bank specified. Each bank is a 4k block. Banks are 0 based. + * bank N is 4096 * N + flash_reg_addr. + **/ +static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + /* bank size is in 16bit words - adjust to bytes */ + u32 flash_bank_size = nvm->flash_bank_size * 2; + s32 ret_val; + s32 count = 0; + s32 j, iteration, sector_size; + + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + + /* + * Determine HW Sector size: Read BERASE bits of hw flash status + * register + * 00: The Hw sector is 256 bytes, hence we need to erase 16 + * consecutive sectors. The start index for the nth Hw sector + * can be calculated as = bank * 4096 + n * 256 + * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. + * The start index for the nth Hw sector can be calculated + * as = bank * 4096 + * 10: The Hw sector is 8K bytes, nth sector = bank * 8192 + * (ich9 only, otherwise error condition) + * 11: The Hw sector is 64K bytes, nth sector = bank * 65536 + */ + switch (hsfsts.hsf_status.berasesz) { + case 0: + /* Hw sector size 256 */ + sector_size = ICH_FLASH_SEG_SIZE_256; + iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256; + break; + case 1: + sector_size = ICH_FLASH_SEG_SIZE_4K; + iteration = 1; + break; + case 2: + sector_size = ICH_FLASH_SEG_SIZE_8K; + iteration = 1; + break; + case 3: + sector_size = ICH_FLASH_SEG_SIZE_64K; + iteration = 1; + break; + default: + return -E1000_ERR_NVM; + } + + /* Start with the base address, then add the sector offset. */ + flash_linear_addr = hw->nvm.flash_base_addr; + flash_linear_addr += (bank) ? flash_bank_size : 0; + + for (j = 0; j < iteration ; j++) { + do { + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + return ret_val; + + /* + * Write a value 11 (block Erase) in Flash + * Cycle field in hw flash control + */ + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + /* + * Write the last 24 bits of an index within the + * block into Flash Linear address field in Flash + * Address. + */ + flash_linear_addr += (j * sector_size); + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + ret_val = e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_ERASE_COMMAND_TIMEOUT); + if (ret_val == 0) + break; + + /* + * Check if FCERR is set to 1. If 1, + * clear it and try the whole sequence + * a few more times else Done + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr == 1) + /* repeat for some time before giving up */ + continue; + else if (hsfsts.hsf_status.flcdone == 0) + return ret_val; + } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT); + } + + return 0; +} + +/** + * e1000_valid_led_default_ich8lan - Set the default LED settings + * @hw: pointer to the HW structure + * @data: Pointer to the LED settings + * + * Reads the LED default settings from the NVM to data. If the NVM LED + * settings is all 0's or F's, set the LED default to a valid LED default + * setting. + **/ +static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + if (*data == ID_LED_RESERVED_0000 || + *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT_ICH8LAN; + + return 0; +} + +/** + * e1000_id_led_init_pchlan - store LED configurations + * @hw: pointer to the HW structure + * + * PCH does not control LEDs via the LEDCTL register, rather it uses + * the PHY LED configuration register. + * + * PCH also does not have an "always on" or "always off" mode which + * complicates the ID feature. Instead of using the "on" mode to indicate + * in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init()), + * use "link_up" mode. The LEDs will still ID on request if there is no + * link based on logic in e1000_led_[on|off]_pchlan(). + **/ +static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP; + const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT; + u16 data, i, temp, shift; + + /* Get default ID LED modes */ + ret_val = hw->nvm.ops.valid_led_default(hw, &data); + if (ret_val) + goto out; + + mac->ledctl_default = er32(LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK; + shift = (i * 5); + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode1 |= (ledctl_on << shift); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode1 |= (ledctl_off << shift); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode2 |= (ledctl_on << shift); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode2 |= (ledctl_off << shift); + break; + default: + /* Do nothing */ + break; + } + } + +out: + return ret_val; +} + +/** + * e1000_get_bus_info_ich8lan - Get/Set the bus type and width + * @hw: pointer to the HW structure + * + * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability + * register, so the the bus width is hard coded. + **/ +static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + s32 ret_val; + + ret_val = e1000e_get_bus_info_pcie(hw); + + /* + * ICH devices are "PCI Express"-ish. They have + * a configuration space, but do not contain + * PCI Express Capability registers, so bus width + * must be hardcoded. + */ + if (bus->width == e1000_bus_width_unknown) + bus->width = e1000_bus_width_pcie_x1; + + return ret_val; +} + +/** + * e1000_reset_hw_ich8lan - Reset the hardware + * @hw: pointer to the HW structure + * + * Does a full reset of the hardware which includes a reset of the PHY and + * MAC. + **/ +static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u16 reg; + u32 ctrl, kab; + s32 ret_val; + + /* + * Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000e_disable_pcie_master(hw); + if (ret_val) + e_dbg("PCI-E Master disable polling has failed.\n"); + + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* + * Disable the Transmit and Receive units. Then delay to allow + * any pending transactions to complete before we hit the MAC + * with the global reset. + */ + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + e1e_flush(); + + usleep_range(10000, 20000); + + /* Workaround for ICH8 bit corruption issue in FIFO memory */ + if (hw->mac.type == e1000_ich8lan) { + /* Set Tx and Rx buffer allocation to 8k apiece. */ + ew32(PBA, E1000_PBA_8K); + /* Set Packet Buffer Size to 16k. */ + ew32(PBS, E1000_PBS_16K); + } + + if (hw->mac.type == e1000_pchlan) { + /* Save the NVM K1 bit setting*/ + ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, ®); + if (ret_val) + return ret_val; + + if (reg & E1000_NVM_K1_ENABLE) + dev_spec->nvm_k1_enabled = true; + else + dev_spec->nvm_k1_enabled = false; + } + + ctrl = er32(CTRL); + + if (!e1000_check_reset_block(hw)) { + /* + * Full-chip reset requires MAC and PHY reset at the same + * time to make sure the interface between MAC and the + * external PHY is reset. + */ + ctrl |= E1000_CTRL_PHY_RST; + + /* + * Gate automatic PHY configuration by hardware on + * non-managed 82579 + */ + if ((hw->mac.type == e1000_pch2lan) && + !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) + e1000_gate_hw_phy_config_ich8lan(hw, true); + } + ret_val = e1000_acquire_swflag_ich8lan(hw); + e_dbg("Issuing a global reset to ich8lan\n"); + ew32(CTRL, (ctrl | E1000_CTRL_RST)); + /* cannot issue a flush here because it hangs the hardware */ + msleep(20); + + if (!ret_val) + mutex_unlock(&swflag_mutex); + + if (ctrl & E1000_CTRL_PHY_RST) { + ret_val = hw->phy.ops.get_cfg_done(hw); + if (ret_val) + goto out; + + ret_val = e1000_post_phy_reset_ich8lan(hw); + if (ret_val) + goto out; + } + + /* + * For PCH, this write will make sure that any noise + * will be detected as a CRC error and be dropped rather than show up + * as a bad packet to the DMA engine. + */ + if (hw->mac.type == e1000_pchlan) + ew32(CRC_OFFSET, 0x65656565); + + ew32(IMC, 0xffffffff); + er32(ICR); + + kab = er32(KABGTXD); + kab |= E1000_KABGTXD_BGSQLBIAS; + ew32(KABGTXD, kab); + +out: + return ret_val; +} + +/** + * e1000_init_hw_ich8lan - Initialize the hardware + * @hw: pointer to the HW structure + * + * Prepares the hardware for transmit and receive by doing the following: + * - initialize hardware bits + * - initialize LED identification + * - setup receive address registers + * - setup flow control + * - setup transmit descriptors + * - clear statistics + **/ +static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl_ext, txdctl, snoop; + s32 ret_val; + u16 i; + + e1000_initialize_hw_bits_ich8lan(hw); + + /* Initialize identification LED */ + ret_val = mac->ops.id_led_init(hw); + if (ret_val) + e_dbg("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + + /* Setup the receive address. */ + e1000e_init_rx_addrs(hw, mac->rar_entry_count); + + /* Zero out the Multicast HASH table */ + e_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* + * The 82578 Rx buffer will stall if wakeup is enabled in host and + * the ME. Disable wakeup by clearing the host wakeup bit. + * Reset the phy after disabling host wakeup to reset the Rx buffer. + */ + if (hw->phy.type == e1000_phy_82578) { + e1e_rphy(hw, BM_PORT_GEN_CFG, &i); + i &= ~BM_WUC_HOST_WU_BIT; + e1e_wphy(hw, BM_PORT_GEN_CFG, i); + ret_val = e1000_phy_hw_reset_ich8lan(hw); + if (ret_val) + return ret_val; + } + + /* Setup link and flow control */ + ret_val = e1000_setup_link_ich8lan(hw); + + /* Set the transmit descriptor write-back policy for both queues */ + txdctl = er32(TXDCTL(0)); + txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB; + txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | + E1000_TXDCTL_MAX_TX_DESC_PREFETCH; + ew32(TXDCTL(0), txdctl); + txdctl = er32(TXDCTL(1)); + txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB; + txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | + E1000_TXDCTL_MAX_TX_DESC_PREFETCH; + ew32(TXDCTL(1), txdctl); + + /* + * ICH8 has opposite polarity of no_snoop bits. + * By default, we should use snoop behavior. + */ + if (mac->type == e1000_ich8lan) + snoop = PCIE_ICH8_SNOOP_ALL; + else + snoop = (u32) ~(PCIE_NO_SNOOP_ALL); + e1000e_set_pcie_no_snoop(hw, snoop); + + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_RO_DIS; + ew32(CTRL_EXT, ctrl_ext); + + /* + * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_ich8lan(hw); + + return 0; +} +/** + * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits + * @hw: pointer to the HW structure + * + * Sets/Clears required hardware bits necessary for correctly setting up the + * hardware for transmit and receive. + **/ +static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) +{ + u32 reg; + + /* Extended Device Control */ + reg = er32(CTRL_EXT); + reg |= (1 << 22); + /* Enable PHY low-power state when MAC is at D3 w/o WoL */ + if (hw->mac.type >= e1000_pchlan) + reg |= E1000_CTRL_EXT_PHYPDEN; + ew32(CTRL_EXT, reg); + + /* Transmit Descriptor Control 0 */ + reg = er32(TXDCTL(0)); + reg |= (1 << 22); + ew32(TXDCTL(0), reg); + + /* Transmit Descriptor Control 1 */ + reg = er32(TXDCTL(1)); + reg |= (1 << 22); + ew32(TXDCTL(1), reg); + + /* Transmit Arbitration Control 0 */ + reg = er32(TARC(0)); + if (hw->mac.type == e1000_ich8lan) + reg |= (1 << 28) | (1 << 29); + reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27); + ew32(TARC(0), reg); + + /* Transmit Arbitration Control 1 */ + reg = er32(TARC(1)); + if (er32(TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + reg |= (1 << 24) | (1 << 26) | (1 << 30); + ew32(TARC(1), reg); + + /* Device Status */ + if (hw->mac.type == e1000_ich8lan) { + reg = er32(STATUS); + reg &= ~(1 << 31); + ew32(STATUS, reg); + } + + /* + * work-around descriptor data corruption issue during nfs v2 udp + * traffic, just disable the nfs filtering capability + */ + reg = er32(RFCTL); + reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); + ew32(RFCTL, reg); +} + +/** + * e1000_setup_link_ich8lan - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val; + + if (e1000_check_reset_block(hw)) + return 0; + + /* + * ICH parts do not have a word in the NVM to determine + * the default flow control setting, so we explicitly + * set it to full. + */ + if (hw->fc.requested_mode == e1000_fc_default) { + /* Workaround h/w hang when Tx flow control enabled */ + if (hw->mac.type == e1000_pchlan) + hw->fc.requested_mode = e1000_fc_rx_pause; + else + hw->fc.requested_mode = e1000_fc_full; + } + + /* + * Save off the requested flow control mode for use later. Depending + * on the link partner's capabilities, we may or may not use this mode. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + e_dbg("After fix-ups FlowControl is now = %x\n", + hw->fc.current_mode); + + /* Continue to configure the copper link. */ + ret_val = e1000_setup_copper_link_ich8lan(hw); + if (ret_val) + return ret_val; + + ew32(FCTTV, hw->fc.pause_time); + if ((hw->phy.type == e1000_phy_82578) || + (hw->phy.type == e1000_phy_82579) || + (hw->phy.type == e1000_phy_82577)) { + ew32(FCRTV_PCH, hw->fc.refresh_time); + + ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27), + hw->fc.pause_time); + if (ret_val) + return ret_val; + } + + return e1000e_set_fc_watermarks(hw); +} + +/** + * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface + * @hw: pointer to the HW structure + * + * Configures the kumeran interface to the PHY to wait the appropriate time + * when polling the PHY, then call the generic setup_copper_link to finish + * configuring the copper link. + **/ +static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 reg_data; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + + /* + * Set the mac to wait the maximum time between each iteration + * and increase the max iterations when polling the phy; + * this fixes erroneous timeouts at 10Mbps. + */ + ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF); + if (ret_val) + return ret_val; + ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + ®_data); + if (ret_val) + return ret_val; + reg_data |= 0x3F; + ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + reg_data); + if (ret_val) + return ret_val; + + switch (hw->phy.type) { + case e1000_phy_igp_3: + ret_val = e1000e_copper_link_setup_igp(hw); + if (ret_val) + return ret_val; + break; + case e1000_phy_bm: + case e1000_phy_82578: + ret_val = e1000e_copper_link_setup_m88(hw); + if (ret_val) + return ret_val; + break; + case e1000_phy_82577: + case e1000_phy_82579: + ret_val = e1000_copper_link_setup_82577(hw); + if (ret_val) + return ret_val; + break; + case e1000_phy_ife: + ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, ®_data); + if (ret_val) + return ret_val; + + reg_data &= ~IFE_PMC_AUTO_MDIX; + + switch (hw->phy.mdix) { + case 1: + reg_data &= ~IFE_PMC_FORCE_MDIX; + break; + case 2: + reg_data |= IFE_PMC_FORCE_MDIX; + break; + case 0: + default: + reg_data |= IFE_PMC_AUTO_MDIX; + break; + } + ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data); + if (ret_val) + return ret_val; + break; + default: + break; + } + return e1000e_setup_copper_link(hw); +} + +/** + * e1000_get_link_up_info_ich8lan - Get current link speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to store current link speed + * @duplex: pointer to store the current link duplex + * + * Calls the generic get_speed_and_duplex to retrieve the current link + * information and then calls the Kumeran lock loss workaround for links at + * gigabit speeds. + **/ +static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex); + if (ret_val) + return ret_val; + + if ((hw->mac.type == e1000_ich8lan) && + (hw->phy.type == e1000_phy_igp_3) && + (*speed == SPEED_1000)) { + ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw); + } + + return ret_val; +} + +/** + * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround + * @hw: pointer to the HW structure + * + * Work-around for 82566 Kumeran PCS lock loss: + * On link status change (i.e. PCI reset, speed change) and link is up and + * speed is gigabit- + * 0) if workaround is optionally disabled do nothing + * 1) wait 1ms for Kumeran link to come up + * 2) check Kumeran Diagnostic register PCS lock loss bit + * 3) if not set the link is locked (all is good), otherwise... + * 4) reset the PHY + * 5) repeat up to 10 times + * Note: this is only called for IGP3 copper when speed is 1gb. + **/ +static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 phy_ctrl; + s32 ret_val; + u16 i, data; + bool link; + + if (!dev_spec->kmrn_lock_loss_workaround_enabled) + return 0; + + /* + * Make sure link is up before proceeding. If not just return. + * Attempting this while link is negotiating fouled up link + * stability + */ + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (!link) + return 0; + + for (i = 0; i < 10; i++) { + /* read once to clear */ + ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data); + if (ret_val) + return ret_val; + /* and again to get new status */ + ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data); + if (ret_val) + return ret_val; + + /* check for PCS lock */ + if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) + return 0; + + /* Issue PHY reset */ + e1000_phy_hw_reset(hw); + mdelay(5); + } + /* Disable GigE link negotiation */ + phy_ctrl = er32(PHY_CTRL); + phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE | + E1000_PHY_CTRL_NOND0A_GBE_DISABLE); + ew32(PHY_CTRL, phy_ctrl); + + /* + * Call gig speed drop workaround on Gig disable before accessing + * any PHY registers + */ + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* unable to acquire PCS lock */ + return -E1000_ERR_PHY; +} + +/** + * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state + * @hw: pointer to the HW structure + * @state: boolean value used to set the current Kumeran workaround state + * + * If ICH8, set the current Kumeran workaround state (enabled - true + * /disabled - false). + **/ +void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, + bool state) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + + if (hw->mac.type != e1000_ich8lan) { + e_dbg("Workaround applies to ICH8 only.\n"); + return; + } + + dev_spec->kmrn_lock_loss_workaround_enabled = state; +} + +/** + * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3 + * @hw: pointer to the HW structure + * + * Workaround for 82566 power-down on D3 entry: + * 1) disable gigabit link + * 2) write VR power-down enable + * 3) read it back + * Continue if successful, else issue LCD reset and repeat + **/ +void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) +{ + u32 reg; + u16 data; + u8 retry = 0; + + if (hw->phy.type != e1000_phy_igp_3) + return; + + /* Try the workaround twice (if needed) */ + do { + /* Disable link */ + reg = er32(PHY_CTRL); + reg |= (E1000_PHY_CTRL_GBE_DISABLE | + E1000_PHY_CTRL_NOND0A_GBE_DISABLE); + ew32(PHY_CTRL, reg); + + /* + * Call gig speed drop workaround on Gig disable before + * accessing any PHY registers + */ + if (hw->mac.type == e1000_ich8lan) + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* Write VR power-down enable */ + e1e_rphy(hw, IGP3_VR_CTRL, &data); + data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; + e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN); + + /* Read it back and test */ + e1e_rphy(hw, IGP3_VR_CTRL, &data); + data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; + if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry) + break; + + /* Issue PHY reset and repeat at most one more time */ + reg = er32(CTRL); + ew32(CTRL, reg | E1000_CTRL_PHY_RST); + retry++; + } while (retry); +} + +/** + * e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working + * @hw: pointer to the HW structure + * + * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC), + * LPLU, Gig disable, MDIC PHY reset): + * 1) Set Kumeran Near-end loopback + * 2) Clear Kumeran Near-end loopback + * Should only be called for ICH8[m] devices with IGP_3 Phy. + **/ +void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 reg_data; + + if ((hw->mac.type != e1000_ich8lan) || + (hw->phy.type != e1000_phy_igp_3)) + return; + + ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, + ®_data); + if (ret_val) + return; + reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK; + ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, + reg_data); + if (ret_val) + return; + reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK; + ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, + reg_data); +} + +/** + * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx + * @hw: pointer to the HW structure + * + * During S0 to Sx transition, it is possible the link remains at gig + * instead of negotiating to a lower speed. Before going to Sx, set + * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation + * to a lower speed. For PCH and newer parts, the OEM bits PHY register + * (LED, GbE disable and LPLU configurations) also needs to be written. + **/ +void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) +{ + u32 phy_ctrl; + s32 ret_val; + + phy_ctrl = er32(PHY_CTRL); + phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE; + ew32(PHY_CTRL, phy_ctrl); + + if (hw->mac.type >= e1000_pchlan) { + e1000_oem_bits_config_ich8lan(hw, false); + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + e1000_write_smbus_addr(hw); + hw->phy.ops.release(hw); + } +} + +/** + * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0 + * @hw: pointer to the HW structure + * + * During Sx to S0 transitions on non-managed devices or managed devices + * on which PHY resets are not blocked, if the PHY registers cannot be + * accessed properly by the s/w toggle the LANPHYPC value to power cycle + * the PHY. + **/ +void e1000_resume_workarounds_pchlan(struct e1000_hw *hw) +{ + u32 fwsm; + + if (hw->mac.type != e1000_pch2lan) + return; + + fwsm = er32(FWSM); + if (!(fwsm & E1000_ICH_FWSM_FW_VALID) || !e1000_check_reset_block(hw)) { + u16 phy_id1, phy_id2; + s32 ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + e_dbg("Failed to acquire PHY semaphore in resume\n"); + return; + } + + /* Test access to the PHY registers by reading the ID regs */ + ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1); + if (ret_val) + goto release; + ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2); + if (ret_val) + goto release; + + if (hw->phy.id == ((u32)(phy_id1 << 16) | + (u32)(phy_id2 & PHY_REVISION_MASK))) + goto release; + + e1000_toggle_lanphypc_value_ich8lan(hw); + + hw->phy.ops.release(hw); + msleep(50); + e1000_phy_hw_reset(hw); + msleep(50); + return; + } + +release: + hw->phy.ops.release(hw); + + return; +} + +/** + * e1000_cleanup_led_ich8lan - Restore the default LED operation + * @hw: pointer to the HW structure + * + * Return the LED back to the default configuration. + **/ +static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw) +{ + if (hw->phy.type == e1000_phy_ife) + return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); + + ew32(LEDCTL, hw->mac.ledctl_default); + return 0; +} + +/** + * e1000_led_on_ich8lan - Turn LEDs on + * @hw: pointer to the HW structure + * + * Turn on the LEDs. + **/ +static s32 e1000_led_on_ich8lan(struct e1000_hw *hw) +{ + if (hw->phy.type == e1000_phy_ife) + return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, + (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); + + ew32(LEDCTL, hw->mac.ledctl_mode2); + return 0; +} + +/** + * e1000_led_off_ich8lan - Turn LEDs off + * @hw: pointer to the HW structure + * + * Turn off the LEDs. + **/ +static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) +{ + if (hw->phy.type == e1000_phy_ife) + return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, + (IFE_PSCL_PROBE_MODE | + IFE_PSCL_PROBE_LEDS_OFF)); + + ew32(LEDCTL, hw->mac.ledctl_mode1); + return 0; +} + +/** + * e1000_setup_led_pchlan - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use. + **/ +static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) +{ + return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1); +} + +/** + * e1000_cleanup_led_pchlan - Restore the default LED operation + * @hw: pointer to the HW structure + * + * Return the LED back to the default configuration. + **/ +static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) +{ + return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default); +} + +/** + * e1000_led_on_pchlan - Turn LEDs on + * @hw: pointer to the HW structure + * + * Turn on the LEDs. + **/ +static s32 e1000_led_on_pchlan(struct e1000_hw *hw) +{ + u16 data = (u16)hw->mac.ledctl_mode2; + u32 i, led; + + /* + * If no link, then turn LED on by setting the invert bit + * for each LED that's mode is "link_up" in ledctl_mode2. + */ + if (!(er32(STATUS) & E1000_STATUS_LU)) { + for (i = 0; i < 3; i++) { + led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; + if ((led & E1000_PHY_LED0_MODE_MASK) != + E1000_LEDCTL_MODE_LINK_UP) + continue; + if (led & E1000_PHY_LED0_IVRT) + data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); + else + data |= (E1000_PHY_LED0_IVRT << (i * 5)); + } + } + + return e1e_wphy(hw, HV_LED_CONFIG, data); +} + +/** + * e1000_led_off_pchlan - Turn LEDs off + * @hw: pointer to the HW structure + * + * Turn off the LEDs. + **/ +static s32 e1000_led_off_pchlan(struct e1000_hw *hw) +{ + u16 data = (u16)hw->mac.ledctl_mode1; + u32 i, led; + + /* + * If no link, then turn LED off by clearing the invert bit + * for each LED that's mode is "link_up" in ledctl_mode1. + */ + if (!(er32(STATUS) & E1000_STATUS_LU)) { + for (i = 0; i < 3; i++) { + led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; + if ((led & E1000_PHY_LED0_MODE_MASK) != + E1000_LEDCTL_MODE_LINK_UP) + continue; + if (led & E1000_PHY_LED0_IVRT) + data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); + else + data |= (E1000_PHY_LED0_IVRT << (i * 5)); + } + } + + return e1e_wphy(hw, HV_LED_CONFIG, data); +} + +/** + * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset + * @hw: pointer to the HW structure + * + * Read appropriate register for the config done bit for completion status + * and configure the PHY through s/w for EEPROM-less parts. + * + * NOTE: some silicon which is EEPROM-less will fail trying to read the + * config done bit, so only an error is logged and continues. If we were + * to return with error, EEPROM-less silicon would not be able to be reset + * or change link. + **/ +static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u32 bank = 0; + u32 status; + + e1000e_get_cfg_done(hw); + + /* Wait for indication from h/w that it has completed basic config */ + if (hw->mac.type >= e1000_ich10lan) { + e1000_lan_init_done_ich8lan(hw); + } else { + ret_val = e1000e_get_auto_rd_done(hw); + if (ret_val) { + /* + * When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + e_dbg("Auto Read Done did not complete\n"); + ret_val = 0; + } + } + + /* Clear PHY Reset Asserted bit */ + status = er32(STATUS); + if (status & E1000_STATUS_PHYRA) + ew32(STATUS, status & ~E1000_STATUS_PHYRA); + else + e_dbg("PHY Reset Asserted not set - needs delay\n"); + + /* If EEPROM is not marked present, init the IGP 3 PHY manually */ + if (hw->mac.type <= e1000_ich9lan) { + if (((er32(EECD) & E1000_EECD_PRES) == 0) && + (hw->phy.type == e1000_phy_igp_3)) { + e1000e_phy_init_script_igp3(hw); + } + } else { + if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) { + /* Maybe we should do a basic PHY config */ + e_dbg("EEPROM not present\n"); + ret_val = -E1000_ERR_CONFIG; + } + } + + return ret_val; +} + +/** + * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(hw->mac.ops.check_mng_mode(hw) || + hw->phy.ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); +} + +/** + * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters + * @hw: pointer to the HW structure + * + * Clears hardware counters specific to the silicon family and calls + * clear_hw_cntrs_generic to clear all general purpose counters. + **/ +static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) +{ + u16 phy_data; + s32 ret_val; + + e1000e_clear_hw_cntrs_base(hw); + + er32(ALGNERRC); + er32(RXERRC); + er32(TNCRS); + er32(CEXTERR); + er32(TSCTC); + er32(TSCTFC); + + er32(MGTPRC); + er32(MGTPDC); + er32(MGTPTC); + + er32(IAC); + er32(ICRXOC); + + /* Clear PHY statistics registers */ + if ((hw->phy.type == e1000_phy_82578) || + (hw->phy.type == e1000_phy_82579) || + (hw->phy.type == e1000_phy_82577)) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + ret_val = hw->phy.ops.set_page(hw, + HV_STATS_PAGE << IGP_PAGE_SHIFT); + if (ret_val) + goto release; + hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); +release: + hw->phy.ops.release(hw); + } +} + +static struct e1000_mac_operations ich8_mac_ops = { + .id_led_init = e1000e_id_led_init, + /* check_mng_mode dependent on mac type */ + .check_for_link = e1000_check_for_copper_link_ich8lan, + /* cleanup_led dependent on mac type */ + .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan, + .get_bus_info = e1000_get_bus_info_ich8lan, + .set_lan_id = e1000_set_lan_id_single_port, + .get_link_up_info = e1000_get_link_up_info_ich8lan, + /* led_on dependent on mac type */ + /* led_off dependent on mac type */ + .update_mc_addr_list = e1000e_update_mc_addr_list_generic, + .reset_hw = e1000_reset_hw_ich8lan, + .init_hw = e1000_init_hw_ich8lan, + .setup_link = e1000_setup_link_ich8lan, + .setup_physical_interface= e1000_setup_copper_link_ich8lan, + /* id_led_init dependent on mac type */ +}; + +static struct e1000_phy_operations ich8_phy_ops = { + .acquire = e1000_acquire_swflag_ich8lan, + .check_reset_block = e1000_check_reset_block_ich8lan, + .commit = NULL, + .get_cfg_done = e1000_get_cfg_done_ich8lan, + .get_cable_length = e1000e_get_cable_length_igp_2, + .read_reg = e1000e_read_phy_reg_igp, + .release = e1000_release_swflag_ich8lan, + .reset = e1000_phy_hw_reset_ich8lan, + .set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan, + .set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan, + .write_reg = e1000e_write_phy_reg_igp, +}; + +static struct e1000_nvm_operations ich8_nvm_ops = { + .acquire = e1000_acquire_nvm_ich8lan, + .read = e1000_read_nvm_ich8lan, + .release = e1000_release_nvm_ich8lan, + .update = e1000_update_nvm_checksum_ich8lan, + .valid_led_default = e1000_valid_led_default_ich8lan, + .validate = e1000_validate_nvm_checksum_ich8lan, + .write = e1000_write_nvm_ich8lan, +}; + +struct e1000_info e1000_ich8_info = { + .mac = e1000_ich8lan, + .flags = FLAG_HAS_WOL + | FLAG_IS_ICH + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_APME_IN_WUC, + .pba = 8, + .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +struct e1000_info e1000_ich9_info = { + .mac = e1000_ich9lan, + .flags = FLAG_HAS_JUMBO_FRAMES + | FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_ERT + | FLAG_HAS_FLASH + | FLAG_APME_IN_WUC, + .pba = 10, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +struct e1000_info e1000_ich10_info = { + .mac = e1000_ich10lan, + .flags = FLAG_HAS_JUMBO_FRAMES + | FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_ERT + | FLAG_HAS_FLASH + | FLAG_APME_IN_WUC, + .pba = 10, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +struct e1000_info e1000_pch_info = { + .mac = e1000_pchlan, + .flags = FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_HAS_JUMBO_FRAMES + | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ + | FLAG_APME_IN_WUC, + .flags2 = FLAG2_HAS_PHY_STATS, + .pba = 26, + .max_hw_frame_size = 4096, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +struct e1000_info e1000_pch2_info = { + .mac = e1000_pch2lan, + .flags = FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_HAS_JUMBO_FRAMES + | FLAG_APME_IN_WUC, + .flags2 = FLAG2_HAS_PHY_STATS + | FLAG2_HAS_EEE, + .pba = 26, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; diff --git a/drivers/net/ethernet/intel/e1000e/lib.c b/drivers/net/ethernet/intel/e1000e/lib.c new file mode 100644 index 0000000..7898a67 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/lib.c @@ -0,0 +1,2692 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000.h" + +enum e1000_mng_mode { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_if_only +}; + +#define E1000_FACTPS_MNGCG 0x20000000 + +/* Intel(R) Active Management Technology signature */ +#define E1000_IAMT_SIGNATURE 0x544D4149 + +/** + * e1000e_get_bus_info_pcie - Get PCIe bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCIe), and PCIe function. + **/ +s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_bus_info *bus = &hw->bus; + struct e1000_adapter *adapter = hw->adapter; + u16 pcie_link_status, cap_offset; + + cap_offset = adapter->pdev->pcie_cap; + if (!cap_offset) { + bus->width = e1000_bus_width_unknown; + } else { + pci_read_config_word(adapter->pdev, + cap_offset + PCIE_LINK_STATUS, + &pcie_link_status); + bus->width = (enum e1000_bus_width)((pcie_link_status & + PCIE_LINK_WIDTH_MASK) >> + PCIE_LINK_WIDTH_SHIFT); + } + + mac->ops.set_lan_id(hw); + + return 0; +} + +/** + * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices + * + * @hw: pointer to the HW structure + * + * Determines the LAN function id by reading memory-mapped registers + * and swaps the port value if requested. + **/ +void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + u32 reg; + + /* + * The status register reports the correct function number + * for the device regardless of function swap state. + */ + reg = er32(STATUS); + bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; +} + +/** + * e1000_set_lan_id_single_port - Set LAN id for a single port device + * @hw: pointer to the HW structure + * + * Sets the LAN function id to zero for a single port device. + **/ +void e1000_set_lan_id_single_port(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + + bus->func = 0; +} + +/** + * e1000_clear_vfta_generic - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void e1000_clear_vfta_generic(struct e1000_hw *hw) +{ + u32 offset; + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); + e1e_flush(); + } +} + +/** + * e1000_write_vfta_generic - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value) +{ + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); + e1e_flush(); +} + +/** + * e1000e_init_rx_addrs - Initialize receive address's + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setup the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + **/ +void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) +{ + u32 i; + u8 mac_addr[ETH_ALEN] = {0}; + + /* Setup the receive address */ + e_dbg("Programming MAC Address into RAR[0]\n"); + + e1000e_rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + e_dbg("Clearing RAR[1-%u]\n", rar_count-1); + for (i = 1; i < rar_count; i++) + e1000e_rar_set(hw, mac_addr, i); +} + +/** + * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr + * @hw: pointer to the HW structure + * + * Checks the nvm for an alternate MAC address. An alternate MAC address + * can be setup by pre-boot software and must be treated like a permanent + * address and must override the actual permanent MAC address. If an + * alternate MAC address is found it is programmed into RAR0, replacing + * the permanent address that was installed into RAR0 by the Si on reset. + * This function will return SUCCESS unless it encounters an error while + * reading the EEPROM. + **/ +s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) +{ + u32 i; + s32 ret_val = 0; + u16 offset, nvm_alt_mac_addr_offset, nvm_data; + u8 alt_mac_addr[ETH_ALEN]; + + ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data); + if (ret_val) + goto out; + + /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */ + if (!((nvm_data & NVM_COMPAT_LOM) || + (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) || + (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD))) + goto out; + + ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, + &nvm_alt_mac_addr_offset); + if (ret_val) { + e_dbg("NVM Read Error\n"); + goto out; + } + + if (nvm_alt_mac_addr_offset == 0xFFFF) { + /* There is no Alternate MAC Address */ + goto out; + } + + if (hw->bus.func == E1000_FUNC_1) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; + for (i = 0; i < ETH_ALEN; i += 2) { + offset = nvm_alt_mac_addr_offset + (i >> 1); + ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + goto out; + } + + alt_mac_addr[i] = (u8)(nvm_data & 0xFF); + alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); + } + + /* if multicast bit is set, the alternate address will not be used */ + if (is_multicast_ether_addr(alt_mac_addr)) { + e_dbg("Ignoring Alternate Mac Address with MC bit set\n"); + goto out; + } + + /* + * We have a valid alternate MAC address, and we want to treat it the + * same as the normal permanent MAC address stored by the HW into the + * RAR. Do this by mapping this address into RAR0. + */ + e1000e_rar_set(hw, alt_mac_addr, 0); + +out: + return ret_val; +} + +/** + * e1000e_rar_set - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + **/ +void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* + * HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | + ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + /* + * Some bridges will combine consecutive 32-bit writes into + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ + ew32(RAL(index), rar_low); + e1e_flush(); + ew32(RAH(index), rar_high); + e1e_flush(); +} + +/** + * e1000_hash_mc_addr - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. See + * e1000_mta_set_generic() + **/ +static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* + * For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* + * The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16) mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * e1000e_update_mc_addr_list_generic - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates entire Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + u32 hash_value, hash_bit, hash_reg; + int i; + + /* clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32) i < mc_addr_count; i++) { + hash_value = e1000_hash_mc_addr(hw, mc_addr_list); + + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); + mc_addr_list += (ETH_ALEN); + } + + /* replace the entire MTA table */ + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]); + e1e_flush(); +} + +/** + * e1000e_clear_hw_cntrs_base - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + **/ +void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) +{ + er32(CRCERRS); + er32(SYMERRS); + er32(MPC); + er32(SCC); + er32(ECOL); + er32(MCC); + er32(LATECOL); + er32(COLC); + er32(DC); + er32(SEC); + er32(RLEC); + er32(XONRXC); + er32(XONTXC); + er32(XOFFRXC); + er32(XOFFTXC); + er32(FCRUC); + er32(GPRC); + er32(BPRC); + er32(MPRC); + er32(GPTC); + er32(GORCL); + er32(GORCH); + er32(GOTCL); + er32(GOTCH); + er32(RNBC); + er32(RUC); + er32(RFC); + er32(ROC); + er32(RJC); + er32(TORL); + er32(TORH); + er32(TOTL); + er32(TOTH); + er32(TPR); + er32(TPT); + er32(MPTC); + er32(BPTC); +} + +/** + * e1000e_check_for_copper_link - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +s32 e1000e_check_for_copper_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + + /* + * We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) + return 0; + + /* + * First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) + return ret_val; /* No link detected */ + + mac->get_link_status = false; + + /* + * Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000e_check_downshift(hw); + + /* + * If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -E1000_ERR_CONFIG; + return ret_val; + } + + /* + * Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + e1000e_config_collision_dist(hw); + + /* + * Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) + e_dbg("Error configuring flow control\n"); + + return ret_val; +} + +/** + * e1000e_check_for_fiber_link - Check for link (Fiber) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val; + + ctrl = er32(CTRL); + status = er32(STATUS); + rxcw = er32(RXCW); + + /* + * If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), the cable is plugged in (we have signal), + * and our link partner is not trying to auto-negotiate with us (we + * are receiving idles or data), we need to force link up. We also + * need to give auto-negotiation time to complete, in case the cable + * was just plugged in. The autoneg_failed flag does this. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) && + (!(rxcw & E1000_RXCW_C))) { + if (mac->autoneg_failed == 0) { + mac->autoneg_failed = 1; + return 0; + } + e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + return ret_val; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* + * If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); + ew32(TXCW, mac->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = true; + } + + return 0; +} + +/** + * e1000e_check_for_serdes_link - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val; + + ctrl = er32(CTRL); + status = er32(STATUS); + rxcw = er32(RXCW); + + /* + * If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), and our link partner is not trying to + * auto-negotiate with us (we are receiving idles or data), + * we need to force link up. We also need to give auto-negotiation + * time to complete. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) { + if (mac->autoneg_failed == 0) { + mac->autoneg_failed = 1; + return 0; + } + e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + return ret_val; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* + * If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); + ew32(TXCW, mac->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = true; + } else if (!(E1000_TXCW_ANE & er32(TXCW))) { + /* + * If we force link for non-auto-negotiation switch, check + * link status based on MAC synchronization for internal + * serdes media type. + */ + /* SYNCH bit and IV bit are sticky. */ + udelay(10); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + mac->serdes_has_link = true; + e_dbg("SERDES: Link up - forced.\n"); + } + } else { + mac->serdes_has_link = false; + e_dbg("SERDES: Link down - force failed.\n"); + } + } + + if (E1000_TXCW_ANE & er32(TXCW)) { + status = er32(STATUS); + if (status & E1000_STATUS_LU) { + /* SYNCH bit and IV bit are sticky, so reread rxcw. */ + udelay(10); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + mac->serdes_has_link = true; + e_dbg("SERDES: Link up - autoneg " + "completed successfully.\n"); + } else { + mac->serdes_has_link = false; + e_dbg("SERDES: Link down - invalid" + "codewords detected in autoneg.\n"); + } + } else { + mac->serdes_has_link = false; + e_dbg("SERDES: Link down - no sync.\n"); + } + } else { + mac->serdes_has_link = false; + e_dbg("SERDES: Link down - autoneg failed\n"); + } + } + + return 0; +} + +/** + * e1000_set_default_fc_generic - Set flow control default values + * @hw: pointer to the HW structure + * + * Read the EEPROM for the default values for flow control and store the + * values. + **/ +static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 nvm_data; + + /* + * Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); + + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) + hw->fc.requested_mode = e1000_fc_none; + else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == + NVM_WORD0F_ASM_DIR) + hw->fc.requested_mode = e1000_fc_tx_pause; + else + hw->fc.requested_mode = e1000_fc_full; + + return 0; +} + +/** + * e1000e_setup_link - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +s32 e1000e_setup_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + + /* + * In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (e1000_check_reset_block(hw)) + return 0; + + /* + * If requested flow control is set to default, set flow control + * based on the EEPROM flow control settings. + */ + if (hw->fc.requested_mode == e1000_fc_default) { + ret_val = e1000_set_default_fc_generic(hw); + if (ret_val) + return ret_val; + } + + /* + * Save off the requested flow control mode for use later. Depending + * on the link partner's capabilities, we may or may not use this mode. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + e_dbg("After fix-ups FlowControl is now = %x\n", + hw->fc.current_mode); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = mac->ops.setup_physical_interface(hw); + if (ret_val) + return ret_val; + + /* + * Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + e_dbg("Initializing the Flow Control address, type and timer regs\n"); + ew32(FCT, FLOW_CONTROL_TYPE); + ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); + ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); + + ew32(FCTTV, hw->fc.pause_time); + + return e1000e_set_fc_watermarks(hw); +} + +/** + * e1000_commit_fc_settings_generic - Configure flow control + * @hw: pointer to the HW structure + * + * Write the flow control settings to the Transmit Config Word Register (TXCW) + * base on the flow control settings in e1000_mac_info. + **/ +static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 txcw; + + /* + * Check for a software override of the flow control settings, and + * setup the device accordingly. If auto-negotiation is enabled, then + * software will have to set the "PAUSE" bits to the correct value in + * the Transmit Config Word Register (TXCW) and re-start auto- + * negotiation. However, if auto-negotiation is disabled, then + * software will have to manually configure the two flow control enable + * bits in the CTRL register. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we + * do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: + /* Flow control completely disabled by a software over-ride. */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); + break; + case e1000_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is disabled + * by a software over-ride. Since there really isn't a way to + * advertise that we are capable of Rx Pause ONLY, we will + * advertise that we support both symmetric and asymmetric Rx + * PAUSE. Later, we will disable the adapter's ability to send + * PAUSE frames. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + case e1000_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is disabled, + * by a software over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); + break; + case e1000_fc_full: + /* + * Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + break; + } + + ew32(TXCW, txcw); + mac->txcw = txcw; + + return 0; +} + +/** + * e1000_poll_fiber_serdes_link_generic - Poll for link up + * @hw: pointer to the HW structure + * + * Polls for link up by reading the status register, if link fails to come + * up with auto-negotiation, then the link is forced if a signal is detected. + **/ +static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 i, status; + s32 ret_val; + + /* + * If we have a signal (the cable is plugged in, or assumed true for + * serdes media) then poll for a "Link-Up" indication in the Device + * Status Register. Time-out if a link isn't seen in 500 milliseconds + * seconds (Auto-negotiation should complete in less than 500 + * milliseconds even if the other end is doing it in SW). + */ + for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { + usleep_range(10000, 20000); + status = er32(STATUS); + if (status & E1000_STATUS_LU) + break; + } + if (i == FIBER_LINK_UP_LIMIT) { + e_dbg("Never got a valid link from auto-neg!!!\n"); + mac->autoneg_failed = 1; + /* + * AutoNeg failed to achieve a link, so we'll call + * mac->check_for_link. This routine will force the + * link up if we detect a signal. This will allow us to + * communicate with non-autonegotiating link partners. + */ + ret_val = mac->ops.check_for_link(hw); + if (ret_val) { + e_dbg("Error while checking for link\n"); + return ret_val; + } + mac->autoneg_failed = 0; + } else { + mac->autoneg_failed = 0; + e_dbg("Valid Link Found\n"); + } + + return 0; +} + +/** + * e1000e_setup_fiber_serdes_link - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Configures collision distance and flow control for fiber and serdes + * links. Upon successful setup, poll for link. + **/ +s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + ctrl = er32(CTRL); + + /* Take the link out of reset */ + ctrl &= ~E1000_CTRL_LRST; + + e1000e_config_collision_dist(hw); + + ret_val = e1000_commit_fc_settings_generic(hw); + if (ret_val) + return ret_val; + + /* + * Since auto-negotiation is enabled, take the link out of reset (the + * link will be in reset, because we previously reset the chip). This + * will restart auto-negotiation. If auto-negotiation is successful + * then the link-up status bit will be set and the flow control enable + * bits (RFCE and TFCE) will be set according to their negotiated value. + */ + e_dbg("Auto-negotiation enabled\n"); + + ew32(CTRL, ctrl); + e1e_flush(); + usleep_range(1000, 2000); + + /* + * For these adapters, the SW definable pin 1 is set when the optics + * detect a signal. If we have a signal, then poll for a "Link-Up" + * indication. + */ + if (hw->phy.media_type == e1000_media_type_internal_serdes || + (er32(CTRL) & E1000_CTRL_SWDPIN1)) { + ret_val = e1000_poll_fiber_serdes_link_generic(hw); + } else { + e_dbg("No signal detected\n"); + } + + return 0; +} + +/** + * e1000e_config_collision_dist - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +void e1000e_config_collision_dist(struct e1000_hw *hw) +{ + u32 tctl; + + tctl = er32(TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; + + ew32(TCTL, tctl); + e1e_flush(); +} + +/** + * e1000e_set_fc_watermarks - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * transmission as well. + **/ +s32 e1000e_set_fc_watermarks(struct e1000_hw *hw) +{ + u32 fcrtl = 0, fcrth = 0; + + /* + * Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.current_mode & e1000_fc_tx_pause) { + /* + * We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + fcrtl |= E1000_FCRTL_XONE; + fcrth = hw->fc.high_water; + } + ew32(FCRTL, fcrtl); + ew32(FCRTH, fcrth); + + return 0; +} + +/** + * e1000e_force_mac_fc - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + **/ +s32 e1000e_force_mac_fc(struct e1000_hw *hw) +{ + u32 ctrl; + + ctrl = er32(CTRL); + + /* + * Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc.current_mode" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and Tx flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + e_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); + + switch (hw->fc.current_mode) { + case e1000_fc_none: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case e1000_fc_rx_pause: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case e1000_fc_tx_pause: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case e1000_fc_full: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + ew32(CTRL, ctrl); + + return 0; +} + +/** + * e1000e_config_fc_after_link_up - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + **/ +s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = 0; + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + u16 speed, duplex; + + /* + * Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) { + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) + ret_val = e1000e_force_mac_fc(hw); + } else { + if (hw->phy.media_type == e1000_media_type_copper) + ret_val = e1000e_force_mac_fc(hw); + } + + if (ret_val) { + e_dbg("Error forcing flow control settings\n"); + return ret_val; + } + + /* + * Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { + /* + * Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { + e_dbg("Copper PHY and Auto Neg " + "has not completed.\n"); + return ret_val; + } + + /* + * The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg); + if (ret_val) + return ret_val; + ret_val = + e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg); + if (ret_val) + return ret_val; + + /* + * Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | E1000_fc_full + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* + * Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise Rx + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + e_dbg("Flow Control = FULL.\r\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + e_dbg("Flow Control = " + "Rx PAUSE frames only.\r\n"); + } + } + /* + * For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + e_dbg("Flow Control = Tx PAUSE frames only.\r\n"); + } + /* + * For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + e_dbg("Flow Control = Rx PAUSE frames only.\r\n"); + } else { + /* + * Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.current_mode = e1000_fc_none; + e_dbg("Flow Control = NONE.\r\n"); + } + + /* + * Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); + if (ret_val) { + e_dbg("Error getting link speed and duplex\n"); + return ret_val; + } + + if (duplex == HALF_DUPLEX) + hw->fc.current_mode = e1000_fc_none; + + /* + * Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = e1000e_force_mac_fc(hw); + if (ret_val) { + e_dbg("Error forcing flow control settings\n"); + return ret_val; + } + } + + return 0; +} + +/** + * e1000e_get_speed_and_duplex_copper - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + **/ +s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex) +{ + u32 status; + + status = er32(STATUS); + if (status & E1000_STATUS_SPEED_1000) + *speed = SPEED_1000; + else if (status & E1000_STATUS_SPEED_100) + *speed = SPEED_100; + else + *speed = SPEED_10; + + if (status & E1000_STATUS_FD) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + e_dbg("%u Mbps, %s Duplex\n", + *speed == SPEED_1000 ? 1000 : *speed == SPEED_100 ? 100 : 10, + *duplex == FULL_DUPLEX ? "Full" : "Half"); + + return 0; +} + +/** + * e1000e_get_speed_and_duplex_fiber_serdes - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Sets the speed and duplex to gigabit full duplex (the only possible option) + * for fiber/serdes links. + **/ +s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex) +{ + *speed = SPEED_1000; + *duplex = FULL_DUPLEX; + + return 0; +} + +/** + * e1000e_get_hw_semaphore - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = er32(SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + udelay(50); + i++; + } + + if (i == timeout) { + e_dbg("Driver can't access device - SMBI bit is set.\n"); + return -E1000_ERR_NVM; + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = er32(SWSM); + ew32(SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (er32(SWSM) & E1000_SWSM_SWESMBI) + break; + + udelay(50); + } + + if (i == timeout) { + /* Release semaphores */ + e1000e_put_hw_semaphore(hw); + e_dbg("Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000e_put_hw_semaphore - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +void e1000e_put_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + + swsm = er32(SWSM); + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + ew32(SWSM, swsm); +} + +/** + * e1000e_get_auto_rd_done - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + **/ +s32 e1000e_get_auto_rd_done(struct e1000_hw *hw) +{ + s32 i = 0; + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (er32(EECD) & E1000_EECD_AUTO_RD) + break; + usleep_range(1000, 2000); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + e_dbg("Auto read by HW from NVM has not completed.\n"); + return -E1000_ERR_RESET; + } + + return 0; +} + +/** + * e1000e_valid_led_default - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT; + + return 0; +} + +/** + * e1000e_id_led_init - + * @hw: pointer to the HW structure + * + **/ +s32 e1000e_id_led_init(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 data, i, temp; + const u16 led_mask = 0x0F; + + ret_val = hw->nvm.ops.valid_led_default(hw, &data); + if (ret_val) + return ret_val; + + mac->ledctl_default = er32(LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + + return 0; +} + +/** + * e1000e_setup_led_generic - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use and saves the current state + * of the LED so it can be later restored. + **/ +s32 e1000e_setup_led_generic(struct e1000_hw *hw) +{ + u32 ledctl; + + if (hw->mac.ops.setup_led != e1000e_setup_led_generic) + return -E1000_ERR_CONFIG; + + if (hw->phy.media_type == e1000_media_type_fiber) { + ledctl = er32(LEDCTL); + hw->mac.ledctl_default = ledctl; + /* Turn off LED0 */ + ledctl &= ~(E1000_LEDCTL_LED0_IVRT | + E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_LED0_MODE_MASK); + ledctl |= (E1000_LEDCTL_MODE_LED_OFF << + E1000_LEDCTL_LED0_MODE_SHIFT); + ew32(LEDCTL, ledctl); + } else if (hw->phy.media_type == e1000_media_type_copper) { + ew32(LEDCTL, hw->mac.ledctl_mode1); + } + + return 0; +} + +/** + * e1000e_cleanup_led_generic - Set LED config to default operation + * @hw: pointer to the HW structure + * + * Remove the current LED configuration and set the LED configuration + * to the default value, saved from the EEPROM. + **/ +s32 e1000e_cleanup_led_generic(struct e1000_hw *hw) +{ + ew32(LEDCTL, hw->mac.ledctl_default); + return 0; +} + +/** + * e1000e_blink_led_generic - Blink LED + * @hw: pointer to the HW structure + * + * Blink the LEDs which are set to be on. + **/ +s32 e1000e_blink_led_generic(struct e1000_hw *hw) +{ + u32 ledctl_blink = 0; + u32 i; + + if (hw->phy.media_type == e1000_media_type_fiber) { + /* always blink LED0 for PCI-E fiber */ + ledctl_blink = E1000_LEDCTL_LED0_BLINK | + (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); + } else { + /* + * set the blink bit for each LED that's "on" (0x0E) + * in ledctl_mode2 + */ + ledctl_blink = hw->mac.ledctl_mode2; + for (i = 0; i < 4; i++) + if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == + E1000_LEDCTL_MODE_LED_ON) + ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << + (i * 8)); + } + + ew32(LEDCTL, ledctl_blink); + + return 0; +} + +/** + * e1000e_led_on_generic - Turn LED on + * @hw: pointer to the HW structure + * + * Turn LED on. + **/ +s32 e1000e_led_on_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + switch (hw->phy.media_type) { + case e1000_media_type_fiber: + ctrl = er32(CTRL); + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + ew32(CTRL, ctrl); + break; + case e1000_media_type_copper: + ew32(LEDCTL, hw->mac.ledctl_mode2); + break; + default: + break; + } + + return 0; +} + +/** + * e1000e_led_off_generic - Turn LED off + * @hw: pointer to the HW structure + * + * Turn LED off. + **/ +s32 e1000e_led_off_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + switch (hw->phy.media_type) { + case e1000_media_type_fiber: + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + ew32(CTRL, ctrl); + break; + case e1000_media_type_copper: + ew32(LEDCTL, hw->mac.ledctl_mode1); + break; + default: + break; + } + + return 0; +} + +/** + * e1000e_set_pcie_no_snoop - Set PCI-express capabilities + * @hw: pointer to the HW structure + * @no_snoop: bitmap of snoop events + * + * Set the PCI-express register to snoop for events enabled in 'no_snoop'. + **/ +void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop) +{ + u32 gcr; + + if (no_snoop) { + gcr = er32(GCR); + gcr &= ~(PCIE_NO_SNOOP_ALL); + gcr |= no_snoop; + ew32(GCR, gcr); + } +} + +/** + * e1000e_disable_pcie_master - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns 0 if successful, else returns -10 + * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + **/ +s32 e1000e_disable_pcie_master(struct e1000_hw *hw) +{ + u32 ctrl; + s32 timeout = MASTER_DISABLE_TIMEOUT; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; + ew32(CTRL, ctrl); + + while (timeout) { + if (!(er32(STATUS) & + E1000_STATUS_GIO_MASTER_ENABLE)) + break; + udelay(100); + timeout--; + } + + if (!timeout) { + e_dbg("Master requests are pending.\n"); + return -E1000_ERR_MASTER_REQUESTS_PENDING; + } + + return 0; +} + +/** + * e1000e_reset_adaptive - Reset Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Reset the Adaptive Interframe Spacing throttle to default values. + **/ +void e1000e_reset_adaptive(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + if (!mac->adaptive_ifs) { + e_dbg("Not in Adaptive IFS mode!\n"); + goto out; + } + + mac->current_ifs_val = 0; + mac->ifs_min_val = IFS_MIN; + mac->ifs_max_val = IFS_MAX; + mac->ifs_step_size = IFS_STEP; + mac->ifs_ratio = IFS_RATIO; + + mac->in_ifs_mode = false; + ew32(AIT, 0); +out: + return; +} + +/** + * e1000e_update_adaptive - Update Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Update the Adaptive Interframe Spacing Throttle value based on the + * time between transmitted packets and time between collisions. + **/ +void e1000e_update_adaptive(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + if (!mac->adaptive_ifs) { + e_dbg("Not in Adaptive IFS mode!\n"); + goto out; + } + + if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { + if (mac->tx_packet_delta > MIN_NUM_XMITS) { + mac->in_ifs_mode = true; + if (mac->current_ifs_val < mac->ifs_max_val) { + if (!mac->current_ifs_val) + mac->current_ifs_val = mac->ifs_min_val; + else + mac->current_ifs_val += + mac->ifs_step_size; + ew32(AIT, mac->current_ifs_val); + } + } + } else { + if (mac->in_ifs_mode && + (mac->tx_packet_delta <= MIN_NUM_XMITS)) { + mac->current_ifs_val = 0; + mac->in_ifs_mode = false; + ew32(AIT, 0); + } + } +out: + return; +} + +/** + * e1000_raise_eec_clk - Raise EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Enable/Raise the EEPROM clock bit. + **/ +static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd | E1000_EECD_SK; + ew32(EECD, *eecd); + e1e_flush(); + udelay(hw->nvm.delay_usec); +} + +/** + * e1000_lower_eec_clk - Lower EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Clear/Lower the EEPROM clock bit. + **/ +static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd & ~E1000_EECD_SK; + ew32(EECD, *eecd); + e1e_flush(); + udelay(hw->nvm.delay_usec); +} + +/** + * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM + * @hw: pointer to the HW structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out + * + * We need to shift 'count' bits out to the EEPROM. So, the value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + **/ +static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u32 mask; + + mask = 0x01 << (count - 1); + if (nvm->type == e1000_nvm_eeprom_spi) + eecd |= E1000_EECD_DO; + + do { + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + ew32(EECD, eecd); + e1e_flush(); + + udelay(nvm->delay_usec); + + e1000_raise_eec_clk(hw, &eecd); + e1000_lower_eec_clk(hw, &eecd); + + mask >>= 1; + } while (mask); + + eecd &= ~E1000_EECD_DI; + ew32(EECD, eecd); +} + +/** + * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM + * @hw: pointer to the HW structure + * @count: number of bits to shift in + * + * In order to read a register from the EEPROM, we need to shift 'count' bits + * in from the EEPROM. Bits are "shifted in" by raising the clock input to + * the EEPROM (setting the SK bit), and then reading the value of the data out + * "DO" bit. During this "shifting in" process the data in "DI" bit should + * always be clear. + **/ +static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + eecd = er32(EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data <<= 1; + e1000_raise_eec_clk(hw, &eecd); + + eecd = er32(EECD); + + eecd &= ~E1000_EECD_DI; + if (eecd & E1000_EECD_DO) + data |= 1; + + e1000_lower_eec_clk(hw, &eecd); + } + + return data; +} + +/** + * e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + **/ +s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) +{ + u32 attempts = 100000; + u32 i, reg = 0; + + for (i = 0; i < attempts; i++) { + if (ee_reg == E1000_NVM_POLL_READ) + reg = er32(EERD); + else + reg = er32(EEWR); + + if (reg & E1000_NVM_RW_REG_DONE) + return 0; + + udelay(5); + } + + return -E1000_ERR_NVM; +} + +/** + * e1000e_acquire_nvm - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +s32 e1000e_acquire_nvm(struct e1000_hw *hw) +{ + u32 eecd = er32(EECD); + s32 timeout = E1000_NVM_GRANT_ATTEMPTS; + + ew32(EECD, eecd | E1000_EECD_REQ); + eecd = er32(EECD); + + while (timeout) { + if (eecd & E1000_EECD_GNT) + break; + udelay(5); + eecd = er32(EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + e_dbg("Could not acquire NVM grant\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000_standby_nvm - Return EEPROM to standby state + * @hw: pointer to the HW structure + * + * Return the EEPROM to a standby state. + **/ +static void e1000_standby_nvm(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + + if (nvm->type == e1000_nvm_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + e1e_flush(); + udelay(nvm->delay_usec); + eecd &= ~E1000_EECD_CS; + ew32(EECD, eecd); + e1e_flush(); + udelay(nvm->delay_usec); + } +} + +/** + * e1000_stop_nvm - Terminate EEPROM command + * @hw: pointer to the HW structure + * + * Terminates the current command by inverting the EEPROM's chip select pin. + **/ +static void e1000_stop_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + eecd = er32(EECD); + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + /* Pull CS high */ + eecd |= E1000_EECD_CS; + e1000_lower_eec_clk(hw, &eecd); + } +} + +/** + * e1000e_release_nvm - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +void e1000e_release_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + e1000_stop_nvm(hw); + + eecd = er32(EECD); + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); +} + +/** + * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write + * @hw: pointer to the HW structure + * + * Setups the EEPROM for reading and writing. + **/ +static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u8 spi_stat_reg; + + if (nvm->type == e1000_nvm_eeprom_spi) { + u16 timeout = NVM_MAX_RETRY_SPI; + + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + e1e_flush(); + udelay(1); + + /* + * Read "Status Register" repeatedly until the LSB is cleared. + * The EEPROM will signal that the command has been completed + * by clearing bit 0 of the internal status register. If it's + * not cleared within 'timeout', then error out. + */ + while (timeout) { + e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, + hw->nvm.opcode_bits); + spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8); + if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) + break; + + udelay(5); + e1000_standby_nvm(hw); + timeout--; + } + + if (!timeout) { + e_dbg("SPI NVM Status error\n"); + return -E1000_ERR_NVM; + } + } + + return 0; +} + +/** + * e1000e_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = 0; + + /* + * A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + for (i = 0; i < words; i++) { + eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + + E1000_NVM_RW_REG_START; + + ew32(EERD, eerd); + ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); + if (ret_val) + break; + + data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA); + } + + return ret_val; +} + +/** + * e1000e_write_nvm_spi - Write to EEPROM using SPI + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * Writes data to EEPROM at offset using SPI interface. + * + * If e1000e_update_nvm_checksum is not called after this function , the + * EEPROM will most likely contain an invalid checksum. + **/ +s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val; + u16 widx = 0; + + /* + * A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + return ret_val; + + while (widx < words) { + u8 write_opcode = NVM_WRITE_OPCODE_SPI; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) { + nvm->ops.release(hw); + return ret_val; + } + + e1000_standby_nvm(hw); + + /* Send the WRITE ENABLE command (8 bit opcode) */ + e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, + nvm->opcode_bits); + + e1000_standby_nvm(hw); + + /* + * Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((nvm->address_bits == 8) && (offset >= 128)) + write_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), + nvm->address_bits); + + /* Loop to allow for up to whole page write of eeprom */ + while (widx < words) { + u16 word_out = data[widx]; + word_out = (word_out >> 8) | (word_out << 8); + e1000_shift_out_eec_bits(hw, word_out, 16); + widx++; + + if ((((offset + widx) * 2) % nvm->page_size) == 0) { + e1000_standby_nvm(hw); + break; + } + } + } + + usleep_range(10000, 20000); + nvm->ops.release(hw); + return 0; +} + +/** + * e1000_read_pba_string_generic - Read device part number + * @hw: pointer to the HW structure + * @pba_num: pointer to device part number + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in pba_num. + **/ +s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, + u32 pba_num_size) +{ + s32 ret_val; + u16 nvm_data; + u16 pba_ptr; + u16 offset; + u16 length; + + if (pba_num == NULL) { + e_dbg("PBA string buffer was null\n"); + ret_val = E1000_ERR_INVALID_ARGUMENT; + goto out; + } + + ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + goto out; + } + + ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); + if (ret_val) { + e_dbg("NVM Read Error\n"); + goto out; + } + + /* + * if nvm_data is not ptr guard the PBA must be in legacy format which + * means pba_ptr is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (nvm_data != NVM_PBA_PTR_GUARD) { + e_dbg("NVM PBA number is not stored as string\n"); + + /* we will need 11 characters to store the PBA */ + if (pba_num_size < 11) { + e_dbg("PBA string buffer too small\n"); + return E1000_ERR_NO_SPACE; + } + + /* extract hex string from data and pba_ptr */ + pba_num[0] = (nvm_data >> 12) & 0xF; + pba_num[1] = (nvm_data >> 8) & 0xF; + pba_num[2] = (nvm_data >> 4) & 0xF; + pba_num[3] = nvm_data & 0xF; + pba_num[4] = (pba_ptr >> 12) & 0xF; + pba_num[5] = (pba_ptr >> 8) & 0xF; + pba_num[6] = '-'; + pba_num[7] = 0; + pba_num[8] = (pba_ptr >> 4) & 0xF; + pba_num[9] = pba_ptr & 0xF; + + /* put a null character on the end of our string */ + pba_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { + if (pba_num[offset] < 0xA) + pba_num[offset] += '0'; + else if (pba_num[offset] < 0x10) + pba_num[offset] += 'A' - 0xA; + } + + goto out; + } + + ret_val = e1000_read_nvm(hw, pba_ptr, 1, &length); + if (ret_val) { + e_dbg("NVM Read Error\n"); + goto out; + } + + if (length == 0xFFFF || length == 0) { + e_dbg("NVM PBA number section invalid length\n"); + ret_val = E1000_ERR_NVM_PBA_SECTION; + goto out; + } + /* check if pba_num buffer is big enough */ + if (pba_num_size < (((u32)length * 2) - 1)) { + e_dbg("PBA string buffer too small\n"); + ret_val = E1000_ERR_NO_SPACE; + goto out; + } + + /* trim pba length from start of string */ + pba_ptr++; + length--; + + for (offset = 0; offset < length; offset++) { + ret_val = e1000_read_nvm(hw, pba_ptr + offset, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + goto out; + } + pba_num[offset * 2] = (u8)(nvm_data >> 8); + pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); + } + pba_num[offset * 2] = '\0'; + +out: + return ret_val; +} + +/** + * e1000_read_mac_addr_generic - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the EEPROM and stores the value. + * Since devices with two ports use the same EEPROM, we increment the + * last bit in the MAC address for the second port. + **/ +s32 e1000_read_mac_addr_generic(struct e1000_hw *hw) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = er32(RAH(0)); + rar_low = er32(RAL(0)); + + for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8)); + + for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8)); + + for (i = 0; i < ETH_ALEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return 0; +} + +/** + * e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + e_dbg("NVM Checksum Invalid\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000e_update_nvm_checksum_generic - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error while updating checksum.\n"); + return ret_val; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) + e_dbg("NVM Write Error while updating checksum.\n"); + + return ret_val; +} + +/** + * e1000e_reload_nvm - Reloads EEPROM + * @hw: pointer to the HW structure + * + * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the + * extended control register. + **/ +void e1000e_reload_nvm(struct e1000_hw *hw) +{ + u32 ctrl_ext; + + udelay(10); + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); +} + +/** + * e1000_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +static u8 e1000_calculate_checksum(u8 *buffer, u32 length) +{ + u32 i; + u8 sum = 0; + + if (!buffer) + return 0; + + for (i = 0; i < length; i++) + sum += buffer[i]; + + return (u8) (0 - sum); +} + +/** + * e1000_mng_enable_host_if - Checks host interface is enabled + * @hw: pointer to the HW structure + * + * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND + * + * This function checks whether the HOST IF is enabled for command operation + * and also checks whether the previous command is completed. It busy waits + * in case of previous command is not completed. + **/ +static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) +{ + u32 hicr; + u8 i; + + if (!(hw->mac.arc_subsystem_valid)) { + e_dbg("ARC subsystem not valid.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Check that the host interface is enabled. */ + hicr = er32(HICR); + if ((hicr & E1000_HICR_EN) == 0) { + e_dbg("E1000_HOST_EN bit disabled.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + /* check the previous command is completed */ + for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { + hicr = er32(HICR); + if (!(hicr & E1000_HICR_C)) + break; + mdelay(1); + } + + if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { + e_dbg("Previous command timeout failed .\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + return 0; +} + +/** + * e1000e_check_mng_mode_generic - check management mode + * @hw: pointer to the HW structure + * + * Reads the firmware semaphore register and returns true (>0) if + * manageability is enabled, else false (0). + **/ +bool e1000e_check_mng_mode_generic(struct e1000_hw *hw) +{ + u32 fwsm = er32(FWSM); + + return (fwsm & E1000_FWSM_MODE_MASK) == + (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); +} + +/** + * e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx + * @hw: pointer to the HW structure + * + * Enables packet filtering on transmit packets if manageability is enabled + * and host interface is enabled. + **/ +bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) +{ + struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie; + u32 *buffer = (u32 *)&hw->mng_cookie; + u32 offset; + s32 ret_val, hdr_csum, csum; + u8 i, len; + + hw->mac.tx_pkt_filtering = true; + + /* No manageability, no filtering */ + if (!e1000e_check_mng_mode(hw)) { + hw->mac.tx_pkt_filtering = false; + goto out; + } + + /* + * If we can't read from the host interface for whatever + * reason, disable filtering. + */ + ret_val = e1000_mng_enable_host_if(hw); + if (ret_val) { + hw->mac.tx_pkt_filtering = false; + goto out; + } + + /* Read in the header. Length and offset are in dwords. */ + len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2; + offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2; + for (i = 0; i < len; i++) + *(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset + i); + hdr_csum = hdr->checksum; + hdr->checksum = 0; + csum = e1000_calculate_checksum((u8 *)hdr, + E1000_MNG_DHCP_COOKIE_LENGTH); + /* + * If either the checksums or signature don't match, then + * the cookie area isn't considered valid, in which case we + * take the safe route of assuming Tx filtering is enabled. + */ + if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { + hw->mac.tx_pkt_filtering = true; + goto out; + } + + /* Cookie area is valid, make the final check for filtering. */ + if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) { + hw->mac.tx_pkt_filtering = false; + goto out; + } + +out: + return hw->mac.tx_pkt_filtering; +} + +/** + * e1000_mng_write_cmd_header - Writes manageability command header + * @hw: pointer to the HW structure + * @hdr: pointer to the host interface command header + * + * Writes the command header after does the checksum calculation. + **/ +static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr) +{ + u16 i, length = sizeof(struct e1000_host_mng_command_header); + + /* Write the whole command header structure with new checksum. */ + + hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length); + + length >>= 2; + /* Write the relevant command block into the ram area. */ + for (i = 0; i < length; i++) { + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i, + *((u32 *) hdr + i)); + e1e_flush(); + } + + return 0; +} + +/** + * e1000_mng_host_if_write - Write to the manageability host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface buffer + * @length: size of the buffer + * @offset: location in the buffer to write to + * @sum: sum of the data (not checksum) + * + * This function writes the buffer content at the offset given on the host if. + * It also does alignment considerations to do the writes in most efficient + * way. Also fills up the sum of the buffer in *buffer parameter. + **/ +static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, + u16 length, u16 offset, u8 *sum) +{ + u8 *tmp; + u8 *bufptr = buffer; + u32 data = 0; + u16 remaining, i, j, prev_bytes; + + /* sum = only sum of the data and it is not checksum */ + + if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) + return -E1000_ERR_PARAM; + + tmp = (u8 *)&data; + prev_bytes = offset & 0x3; + offset >>= 2; + + if (prev_bytes) { + data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset); + for (j = prev_bytes; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data); + length -= j - prev_bytes; + offset++; + } + + remaining = length & 0x3; + length -= remaining; + + /* Calculate length in DWORDs */ + length >>= 2; + + /* + * The device driver writes the relevant command block into the + * ram area. + */ + for (i = 0; i < length; i++) { + for (j = 0; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data); + } + if (remaining) { + for (j = 0; j < sizeof(u32); j++) { + if (j < remaining) + *(tmp + j) = *bufptr++; + else + *(tmp + j) = 0; + + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data); + } + + return 0; +} + +/** + * e1000e_mng_write_dhcp_info - Writes DHCP info to host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface + * @length: size of the buffer + * + * Writes the DHCP information to the host interface. + **/ +s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length) +{ + struct e1000_host_mng_command_header hdr; + s32 ret_val; + u32 hicr; + + hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; + hdr.command_length = length; + hdr.reserved1 = 0; + hdr.reserved2 = 0; + hdr.checksum = 0; + + /* Enable the host interface */ + ret_val = e1000_mng_enable_host_if(hw); + if (ret_val) + return ret_val; + + /* Populate the host interface with the contents of "buffer". */ + ret_val = e1000_mng_host_if_write(hw, buffer, length, + sizeof(hdr), &(hdr.checksum)); + if (ret_val) + return ret_val; + + /* Write the manageability command header */ + ret_val = e1000_mng_write_cmd_header(hw, &hdr); + if (ret_val) + return ret_val; + + /* Tell the ARC a new command is pending. */ + hicr = er32(HICR); + ew32(HICR, hicr | E1000_HICR_C); + + return 0; +} + +/** + * e1000e_enable_mng_pass_thru - Check if management passthrough is needed + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. + **/ +bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + u32 fwsm, factps; + bool ret_val = false; + + manc = er32(MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN)) + goto out; + + if (hw->mac.has_fwsm) { + fwsm = er32(FWSM); + factps = er32(FACTPS); + + if (!(factps & E1000_FACTPS_MNGCG) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { + ret_val = true; + goto out; + } + } else if ((hw->mac.type == e1000_82574) || + (hw->mac.type == e1000_82583)) { + u16 data; + + factps = er32(FACTPS); + e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data); + + if (!(factps & E1000_FACTPS_MNGCG) && + ((data & E1000_NVM_INIT_CTRL2_MNGM) == + (e1000_mng_mode_pt << 13))) { + ret_val = true; + goto out; + } + } else if ((manc & E1000_MANC_SMBUS_EN) && + !(manc & E1000_MANC_ASF_EN)) { + ret_val = true; + goto out; + } + +out: + return ret_val; +} diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c new file mode 100644 index 0000000..ab4be80 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -0,0 +1,6312 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "e1000.h" + +#define DRV_EXTRAVERSION "-k" + +#define DRV_VERSION "1.3.16" DRV_EXTRAVERSION +char e1000e_driver_name[] = "e1000e"; +const char e1000e_driver_version[] = DRV_VERSION; + +static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state); + +static const struct e1000_info *e1000_info_tbl[] = { + [board_82571] = &e1000_82571_info, + [board_82572] = &e1000_82572_info, + [board_82573] = &e1000_82573_info, + [board_82574] = &e1000_82574_info, + [board_82583] = &e1000_82583_info, + [board_80003es2lan] = &e1000_es2_info, + [board_ich8lan] = &e1000_ich8_info, + [board_ich9lan] = &e1000_ich9_info, + [board_ich10lan] = &e1000_ich10_info, + [board_pchlan] = &e1000_pch_info, + [board_pch2lan] = &e1000_pch2_info, +}; + +struct e1000_reg_info { + u32 ofs; + char *name; +}; + +#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ +#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ +#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ +#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ +#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ + +#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ + +static const struct e1000_reg_info e1000_reg_info_tbl[] = { + + /* General Registers */ + {E1000_CTRL, "CTRL"}, + {E1000_STATUS, "STATUS"}, + {E1000_CTRL_EXT, "CTRL_EXT"}, + + /* Interrupt Registers */ + {E1000_ICR, "ICR"}, + + /* Rx Registers */ + {E1000_RCTL, "RCTL"}, + {E1000_RDLEN, "RDLEN"}, + {E1000_RDH, "RDH"}, + {E1000_RDT, "RDT"}, + {E1000_RDTR, "RDTR"}, + {E1000_RXDCTL(0), "RXDCTL"}, + {E1000_ERT, "ERT"}, + {E1000_RDBAL, "RDBAL"}, + {E1000_RDBAH, "RDBAH"}, + {E1000_RDFH, "RDFH"}, + {E1000_RDFT, "RDFT"}, + {E1000_RDFHS, "RDFHS"}, + {E1000_RDFTS, "RDFTS"}, + {E1000_RDFPC, "RDFPC"}, + + /* Tx Registers */ + {E1000_TCTL, "TCTL"}, + {E1000_TDBAL, "TDBAL"}, + {E1000_TDBAH, "TDBAH"}, + {E1000_TDLEN, "TDLEN"}, + {E1000_TDH, "TDH"}, + {E1000_TDT, "TDT"}, + {E1000_TIDV, "TIDV"}, + {E1000_TXDCTL(0), "TXDCTL"}, + {E1000_TADV, "TADV"}, + {E1000_TARC(0), "TARC"}, + {E1000_TDFH, "TDFH"}, + {E1000_TDFT, "TDFT"}, + {E1000_TDFHS, "TDFHS"}, + {E1000_TDFTS, "TDFTS"}, + {E1000_TDFPC, "TDFPC"}, + + /* List Terminator */ + {} +}; + +/* + * e1000_regdump - register printout routine + */ +static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) +{ + int n = 0; + char rname[16]; + u32 regs[8]; + + switch (reginfo->ofs) { + case E1000_RXDCTL(0): + for (n = 0; n < 2; n++) + regs[n] = __er32(hw, E1000_RXDCTL(n)); + break; + case E1000_TXDCTL(0): + for (n = 0; n < 2; n++) + regs[n] = __er32(hw, E1000_TXDCTL(n)); + break; + case E1000_TARC(0): + for (n = 0; n < 2; n++) + regs[n] = __er32(hw, E1000_TARC(n)); + break; + default: + printk(KERN_INFO "%-15s %08x\n", + reginfo->name, __er32(hw, reginfo->ofs)); + return; + } + + snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]"); + printk(KERN_INFO "%-15s ", rname); + for (n = 0; n < 2; n++) + printk(KERN_CONT "%08x ", regs[n]); + printk(KERN_CONT "\n"); +} + +/* + * e1000e_dump - Print registers, Tx-ring and Rx-ring + */ +static void e1000e_dump(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct e1000_reg_info *reginfo; + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_tx_desc *tx_desc; + struct my_u0 { + u64 a; + u64 b; + } *u0; + struct e1000_buffer *buffer_info; + struct e1000_ring *rx_ring = adapter->rx_ring; + union e1000_rx_desc_packet_split *rx_desc_ps; + struct e1000_rx_desc *rx_desc; + struct my_u1 { + u64 a; + u64 b; + u64 c; + u64 d; + } *u1; + u32 staterr; + int i = 0; + + if (!netif_msg_hw(adapter)) + return; + + /* Print netdevice Info */ + if (netdev) { + dev_info(&adapter->pdev->dev, "Net device Info\n"); + printk(KERN_INFO "Device Name state " + "trans_start last_rx\n"); + printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", + netdev->name, netdev->state, netdev->trans_start, + netdev->last_rx); + } + + /* Print Registers */ + dev_info(&adapter->pdev->dev, "Register Dump\n"); + printk(KERN_INFO " Register Name Value\n"); + for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl; + reginfo->name; reginfo++) { + e1000_regdump(hw, reginfo); + } + + /* Print Tx Ring Summary */ + if (!netdev || !netif_running(netdev)) + goto exit; + + dev_info(&adapter->pdev->dev, "Tx Ring Summary\n"); + printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]" + " leng ntw timestamp\n"); + buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; + printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", + 0, tx_ring->next_to_use, tx_ring->next_to_clean, + (unsigned long long)buffer_info->dma, + buffer_info->length, + buffer_info->next_to_watch, + (unsigned long long)buffer_info->time_stamp); + + /* Print Tx Ring */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + dev_info(&adapter->pdev->dev, "Tx Ring Dump\n"); + + /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) + * + * Legacy Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] (Reserved on Write Back) | + * +--------------------------------------------------------------+ + * 8 | Special | CSS | Status | CMD | CSO | Length | + * +--------------------------------------------------------------+ + * 63 48 47 36 35 32 31 24 23 16 15 0 + * + * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload + * 63 48 47 40 39 32 31 16 15 8 7 0 + * +----------------------------------------------------------------+ + * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | + * +----------------------------------------------------------------+ + * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + * + * Extended Data Descriptor (DTYP=0x1) + * +----------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +----------------------------------------------------------------+ + * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + */ + printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]" + " [bi->dma ] leng ntw timestamp bi->skb " + "<-- Legacy format\n"); + printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]" + " [bi->dma ] leng ntw timestamp bi->skb " + "<-- Ext Context format\n"); + printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]" + " [bi->dma ] leng ntw timestamp bi->skb " + "<-- Ext Data format\n"); + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + tx_desc = E1000_TX_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX " + "%04X %3X %016llX %p", + (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' : + ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i, + (unsigned long long)le64_to_cpu(u0->a), + (unsigned long long)le64_to_cpu(u0->b), + (unsigned long long)buffer_info->dma, + buffer_info->length, buffer_info->next_to_watch, + (unsigned long long)buffer_info->time_stamp, + buffer_info->skb); + if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) + printk(KERN_CONT " NTC/U\n"); + else if (i == tx_ring->next_to_use) + printk(KERN_CONT " NTU\n"); + else if (i == tx_ring->next_to_clean) + printk(KERN_CONT " NTC\n"); + else + printk(KERN_CONT "\n"); + + if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, + 16, 1, phys_to_virt(buffer_info->dma), + buffer_info->length, true); + } + + /* Print Rx Ring Summary */ +rx_ring_summary: + dev_info(&adapter->pdev->dev, "Rx Ring Summary\n"); + printk(KERN_INFO "Queue [NTU] [NTC]\n"); + printk(KERN_INFO " %5d %5X %5X\n", 0, + rx_ring->next_to_use, rx_ring->next_to_clean); + + /* Print Rx Ring */ + if (!netif_msg_rx_status(adapter)) + goto exit; + + dev_info(&adapter->pdev->dev, "Rx Ring Dump\n"); + switch (adapter->rx_ps_pages) { + case 1: + case 2: + case 3: + /* [Extended] Packet Split Receive Descriptor Format + * + * +-----------------------------------------------------+ + * 0 | Buffer Address 0 [63:0] | + * +-----------------------------------------------------+ + * 8 | Buffer Address 1 [63:0] | + * +-----------------------------------------------------+ + * 16 | Buffer Address 2 [63:0] | + * +-----------------------------------------------------+ + * 24 | Buffer Address 3 [63:0] | + * +-----------------------------------------------------+ + */ + printk(KERN_INFO "R [desc] [buffer 0 63:0 ] " + "[buffer 1 63:0 ] " + "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] " + "[bi->skb] <-- Ext Pkt Split format\n"); + /* [Extended] Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 13 12 8 7 4 3 0 + * +------------------------------------------------------+ + * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS | + * | Checksum | Ident | | Queue | | Type | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] " + "[vl l0 ee es] " + "[ l3 l2 l1 hs] [reserved ] ---------------- " + "[bi->skb] <-- Ext Rx Write-Back format\n"); + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); + u1 = (struct my_u1 *)rx_desc_ps; + staterr = + le32_to_cpu(rx_desc_ps->wb.middle.status_error); + if (staterr & E1000_RXD_STAT_DD) { + /* Descriptor Done */ + printk(KERN_INFO "RWB[0x%03X] %016llX " + "%016llX %016llX %016llX " + "---------------- %p", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + (unsigned long long)le64_to_cpu(u1->c), + (unsigned long long)le64_to_cpu(u1->d), + buffer_info->skb); + } else { + printk(KERN_INFO "R [0x%03X] %016llX " + "%016llX %016llX %016llX %016llX %p", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + (unsigned long long)le64_to_cpu(u1->c), + (unsigned long long)le64_to_cpu(u1->d), + (unsigned long long)buffer_info->dma, + buffer_info->skb); + + if (netif_msg_pktdata(adapter)) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, 1, + phys_to_virt(buffer_info->dma), + adapter->rx_ps_bsize0, true); + } + + if (i == rx_ring->next_to_use) + printk(KERN_CONT " NTU\n"); + else if (i == rx_ring->next_to_clean) + printk(KERN_CONT " NTC\n"); + else + printk(KERN_CONT "\n"); + } + break; + default: + case 0: + /* Legacy Receive Descriptor Format + * + * +-----------------------------------------------------+ + * | Buffer Address [63:0] | + * +-----------------------------------------------------+ + * | VLAN Tag | Errors | Status 0 | Packet csum | Length | + * +-----------------------------------------------------+ + * 63 48 47 40 39 32 31 16 15 0 + */ + printk(KERN_INFO "Rl[desc] [address 63:0 ] " + "[vl er S cks ln] [bi->dma ] [bi->skb] " + "<-- Legacy format\n"); + for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + u0 = (struct my_u0 *)rx_desc; + printk(KERN_INFO "Rl[0x%03X] %016llX %016llX " + "%016llX %p", i, + (unsigned long long)le64_to_cpu(u0->a), + (unsigned long long)le64_to_cpu(u0->b), + (unsigned long long)buffer_info->dma, + buffer_info->skb); + if (i == rx_ring->next_to_use) + printk(KERN_CONT " NTU\n"); + else if (i == rx_ring->next_to_clean) + printk(KERN_CONT " NTC\n"); + else + printk(KERN_CONT "\n"); + + if (netif_msg_pktdata(adapter)) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, + phys_to_virt(buffer_info->dma), + adapter->rx_buffer_len, true); + } + } + +exit: + return; +} + +/** + * e1000_desc_unused - calculate if we have unused descriptors + **/ +static int e1000_desc_unused(struct e1000_ring *ring) +{ + if (ring->next_to_clean > ring->next_to_use) + return ring->next_to_clean - ring->next_to_use - 1; + + return ring->count + ring->next_to_clean - ring->next_to_use - 1; +} + +/** + * e1000_receive_skb - helper function to handle Rx indications + * @adapter: board private structure + * @status: descriptor status field as written by hardware + * @vlan: descriptor vlan field as written by hardware (no le/be conversion) + * @skb: pointer to sk_buff to be indicated to stack + **/ +static void e1000_receive_skb(struct e1000_adapter *adapter, + struct net_device *netdev, struct sk_buff *skb, + u8 status, __le16 vlan) +{ + u16 tag = le16_to_cpu(vlan); + skb->protocol = eth_type_trans(skb, netdev); + + if (status & E1000_RXD_STAT_VP) + __vlan_hwaccel_put_tag(skb, tag); + + napi_gro_receive(&adapter->napi, skb); +} + +/** + * e1000_rx_checksum - Receive Checksum Offload + * @adapter: board private structure + * @status_err: receive descriptor status and error fields + * @csum: receive descriptor csum field + * @sk_buff: socket buffer with received data + **/ +static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, + u32 csum, struct sk_buff *skb) +{ + u16 status = (u16)status_err; + u8 errors = (u8)(status_err >> 24); + + skb_checksum_none_assert(skb); + + /* Ignore Checksum bit is set */ + if (status & E1000_RXD_STAT_IXSM) + return; + /* TCP/UDP checksum error bit is set */ + if (errors & E1000_RXD_ERR_TCPE) { + /* let the stack verify checksum errors */ + adapter->hw_csum_err++; + return; + } + + /* TCP/UDP Checksum has not been calculated */ + if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) + return; + + /* It must be a TCP or UDP packet with a valid checksum */ + if (status & E1000_RXD_STAT_TCPCS) { + /* TCP checksum is good */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + /* + * IP fragment with UDP payload + * Hardware complements the payload checksum, so we undo it + * and then put the value in host order for further stack use. + */ + __sum16 sum = (__force __sum16)htons(csum); + skb->csum = csum_unfold(~sum); + skb->ip_summed = CHECKSUM_COMPLETE; + } + adapter->hw_csum_good++; +} + +/** + * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended + * @adapter: address of board private structure + **/ +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + int cleaned_count, gfp_t gfp) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_rx_desc *rx_desc; + struct e1000_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + unsigned int bufsz = adapter->rx_buffer_len; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + skb = buffer_info->skb; + if (skb) { + skb_trim(skb, 0); + goto map_skb; + } + + skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp); + if (!skb) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + buffer_info->skb = skb; +map_skb: + buffer_info->dma = dma_map_single(&pdev->dev, skb->data, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + dev_err(&pdev->dev, "Rx DMA map failed\n"); + adapter->rx_dma_failed++; + break; + } + + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, adapter->hw.hw_addr + rx_ring->tail); + } + i++; + if (i == rx_ring->count) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + rx_ring->next_to_use = i; +} + +/** + * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split + * @adapter: address of board private structure + **/ +static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, + int cleaned_count, gfp_t gfp) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + union e1000_rx_desc_packet_split *rx_desc; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_buffer *buffer_info; + struct e1000_ps_page *ps_page; + struct sk_buff *skb; + unsigned int i, j; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + rx_desc = E1000_RX_DESC_PS(*rx_ring, i); + + for (j = 0; j < PS_PAGE_BUFFERS; j++) { + ps_page = &buffer_info->ps_pages[j]; + if (j >= adapter->rx_ps_pages) { + /* all unused desc entries get hw null ptr */ + rx_desc->read.buffer_addr[j + 1] = + ~cpu_to_le64(0); + continue; + } + if (!ps_page->page) { + ps_page->page = alloc_page(gfp); + if (!ps_page->page) { + adapter->alloc_rx_buff_failed++; + goto no_buffers; + } + ps_page->dma = dma_map_page(&pdev->dev, + ps_page->page, + 0, PAGE_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, + ps_page->dma)) { + dev_err(&adapter->pdev->dev, + "Rx DMA page map failed\n"); + adapter->rx_dma_failed++; + goto no_buffers; + } + } + /* + * Refresh the desc even if buffer_addrs + * didn't change because each write-back + * erases this info. + */ + rx_desc->read.buffer_addr[j + 1] = + cpu_to_le64(ps_page->dma); + } + + skb = __netdev_alloc_skb_ip_align(netdev, + adapter->rx_ps_bsize0, + gfp); + + if (!skb) { + adapter->alloc_rx_buff_failed++; + break; + } + + buffer_info->skb = skb; + buffer_info->dma = dma_map_single(&pdev->dev, skb->data, + adapter->rx_ps_bsize0, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + dev_err(&pdev->dev, "Rx DMA map failed\n"); + adapter->rx_dma_failed++; + /* cleanup skb */ + dev_kfree_skb_any(skb); + buffer_info->skb = NULL; + break; + } + + rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); + + if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i << 1, adapter->hw.hw_addr + rx_ring->tail); + } + + i++; + if (i == rx_ring->count) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + +no_buffers: + rx_ring->next_to_use = i; +} + +/** + * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers + * @adapter: address of board private structure + * @cleaned_count: number of buffers to allocate this pass + **/ + +static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, + int cleaned_count, gfp_t gfp) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + unsigned int bufsz = 256 - 16 /* for skb_reserve */; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + skb = buffer_info->skb; + if (skb) { + skb_trim(skb, 0); + goto check_page; + } + + skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp); + if (unlikely(!skb)) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + buffer_info->skb = skb; +check_page: + /* allocate a new page if necessary */ + if (!buffer_info->page) { + buffer_info->page = alloc_page(gfp); + if (unlikely(!buffer_info->page)) { + adapter->alloc_rx_buff_failed++; + break; + } + } + + if (!buffer_info->dma) + buffer_info->dma = dma_map_page(&pdev->dev, + buffer_info->page, 0, + PAGE_SIZE, + DMA_FROM_DEVICE); + + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + writel(i, adapter->hw.hw_addr + rx_ring->tail); + } +} + +/** + * e1000_clean_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_buffer *buffer_info, *next_buffer; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = 0; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + rmb(); /* read descriptor and rx_buffer_info after status DD */ + + status = rx_desc->status; + skb = buffer_info->skb; + buffer_info->skb = NULL; + + prefetch(skb->data - NET_IP_ALIGN); + + i++; + if (i == rx_ring->count) + i = 0; + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = 1; + cleaned_count++; + dma_unmap_single(&pdev->dev, + buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->length); + + /* + * !EOP means multiple descriptors were used to store a single + * packet, if that's the case we need to toss it. In fact, we + * need to toss every packet with the EOP bit clear and the + * next frame that _does_ have the EOP bit set, as it is by + * definition only a frame fragment + */ + if (unlikely(!(status & E1000_RXD_STAT_EOP))) + adapter->flags2 |= FLAG2_IS_DISCARDING; + + if (adapter->flags2 & FLAG2_IS_DISCARDING) { + /* All receives must fit into a single buffer */ + e_dbg("Receive packet consumed multiple buffers\n"); + /* recycle */ + buffer_info->skb = skb; + if (status & E1000_RXD_STAT_EOP) + adapter->flags2 &= ~FLAG2_IS_DISCARDING; + goto next_desc; + } + + if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) { + /* recycle */ + buffer_info->skb = skb; + goto next_desc; + } + + /* adjust length to remove Ethernet CRC */ + if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) + length -= 4; + + total_rx_bytes += length; + total_rx_packets++; + + /* + * code added for copybreak, this should improve + * performance for small packets with large amounts + * of reassembly being done in the stack + */ + if (length < copybreak) { + struct sk_buff *new_skb = + netdev_alloc_skb_ip_align(netdev, length); + if (new_skb) { + skb_copy_to_linear_data_offset(new_skb, + -NET_IP_ALIGN, + (skb->data - + NET_IP_ALIGN), + (length + + NET_IP_ALIGN)); + /* save the skb in buffer_info as good */ + buffer_info->skb = skb; + skb = new_skb; + } + /* else just continue with the old one */ + } + /* end copybreak code */ + skb_put(skb, length); + + /* Receive Checksum Offload */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special); + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= E1000_RX_BUFFER_WRITE) { + adapter->alloc_rx_buf(adapter, cleaned_count, + GFP_ATOMIC); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = e1000_desc_unused(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC); + + adapter->total_rx_bytes += total_rx_bytes; + adapter->total_rx_packets += total_rx_packets; + return cleaned; +} + +static void e1000_put_txbuf(struct e1000_adapter *adapter, + struct e1000_buffer *buffer_info) +{ + if (buffer_info->dma) { + if (buffer_info->mapped_as_page) + dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); + else + dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); + buffer_info->dma = 0; + } + if (buffer_info->skb) { + dev_kfree_skb_any(buffer_info->skb); + buffer_info->skb = NULL; + } + buffer_info->time_stamp = 0; +} + +static void e1000_print_hw_hang(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + print_hang_task); + struct e1000_ring *tx_ring = adapter->tx_ring; + unsigned int i = tx_ring->next_to_clean; + unsigned int eop = tx_ring->buffer_info[i].next_to_watch; + struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); + struct e1000_hw *hw = &adapter->hw; + u16 phy_status, phy_1000t_status, phy_ext_status; + u16 pci_status; + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + e1e_rphy(hw, PHY_STATUS, &phy_status); + e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); + e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); + + pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status); + + /* detected Hardware unit hang */ + e_err("Detected Hardware Unit Hang:\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]:\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " next_to_watch.status <%x>\n" + "MAC Status <%x>\n" + "PHY Status <%x>\n" + "PHY 1000BASE-T Status <%x>\n" + "PHY Extended Status <%x>\n" + "PCI Status <%x>\n", + readl(adapter->hw.hw_addr + tx_ring->head), + readl(adapter->hw.hw_addr + tx_ring->tail), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_ring->buffer_info[eop].time_stamp, + eop, + jiffies, + eop_desc->upper.fields.status, + er32(STATUS), + phy_status, + phy_1000t_status, + phy_ext_status, + pci_status); +} + +/** + * e1000_clean_tx_irq - Reclaim resources after transmit completes + * @adapter: board private structure + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_tx_desc *tx_desc, *eop_desc; + struct e1000_buffer *buffer_info; + unsigned int i, eop; + unsigned int count = 0; + unsigned int total_tx_bytes = 0, total_tx_packets = 0; + + i = tx_ring->next_to_clean; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + + while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && + (count < tx_ring->count)) { + bool cleaned = false; + rmb(); /* read buffer_info after eop_desc */ + for (; !cleaned; count++) { + tx_desc = E1000_TX_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + cleaned = (i == eop); + + if (cleaned) { + total_tx_packets += buffer_info->segs; + total_tx_bytes += buffer_info->bytecount; + } + + e1000_put_txbuf(adapter, buffer_info); + tx_desc->upper.data = 0; + + i++; + if (i == tx_ring->count) + i = 0; + } + + if (i == tx_ring->next_to_use) + break; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + } + + tx_ring->next_to_clean = i; + +#define TX_WAKE_THRESHOLD 32 + if (count && netif_carrier_ok(netdev) && + e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (netif_queue_stopped(netdev) && + !(test_bit(__E1000_DOWN, &adapter->state))) { + netif_wake_queue(netdev); + ++adapter->restart_queue; + } + } + + if (adapter->detect_tx_hung) { + /* + * Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + adapter->detect_tx_hung = 0; + if (tx_ring->buffer_info[i].time_stamp && + time_after(jiffies, tx_ring->buffer_info[i].time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(er32(STATUS) & E1000_STATUS_TXOFF)) { + schedule_work(&adapter->print_hang_task); + netif_stop_queue(netdev); + } + } + adapter->total_tx_bytes += total_tx_bytes; + adapter->total_tx_packets += total_tx_packets; + return count < tx_ring->count; +} + +/** + * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split + * @adapter: board private structure + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, + int *work_done, int work_to_do) +{ + struct e1000_hw *hw = &adapter->hw; + union e1000_rx_desc_packet_split *rx_desc, *next_rxd; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_buffer *buffer_info, *next_buffer; + struct e1000_ps_page *ps_page; + struct sk_buff *skb; + unsigned int i, j; + u32 length, staterr; + int cleaned_count = 0; + bool cleaned = 0; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC_PS(*rx_ring, i); + staterr = le32_to_cpu(rx_desc->wb.middle.status_error); + buffer_info = &rx_ring->buffer_info[i]; + + while (staterr & E1000_RXD_STAT_DD) { + if (*work_done >= work_to_do) + break; + (*work_done)++; + skb = buffer_info->skb; + rmb(); /* read descriptor and rx_buffer_info after status DD */ + + /* in the packet split case this is header only */ + prefetch(skb->data - NET_IP_ALIGN); + + i++; + if (i == rx_ring->count) + i = 0; + next_rxd = E1000_RX_DESC_PS(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = 1; + cleaned_count++; + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_ps_bsize0, DMA_FROM_DEVICE); + buffer_info->dma = 0; + + /* see !EOP comment in other Rx routine */ + if (!(staterr & E1000_RXD_STAT_EOP)) + adapter->flags2 |= FLAG2_IS_DISCARDING; + + if (adapter->flags2 & FLAG2_IS_DISCARDING) { + e_dbg("Packet Split buffers didn't pick up the full " + "packet\n"); + dev_kfree_skb_irq(skb); + if (staterr & E1000_RXD_STAT_EOP) + adapter->flags2 &= ~FLAG2_IS_DISCARDING; + goto next_desc; + } + + if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { + dev_kfree_skb_irq(skb); + goto next_desc; + } + + length = le16_to_cpu(rx_desc->wb.middle.length0); + + if (!length) { + e_dbg("Last part of the packet spanning multiple " + "descriptors\n"); + dev_kfree_skb_irq(skb); + goto next_desc; + } + + /* Good Receive */ + skb_put(skb, length); + + { + /* + * this looks ugly, but it seems compiler issues make it + * more efficient than reusing j + */ + int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); + + /* + * page alloc/put takes too long and effects small packet + * throughput, so unsplit small packets and save the alloc/put + * only valid in softirq (napi) context to call kmap_* + */ + if (l1 && (l1 <= copybreak) && + ((length + l1) <= adapter->rx_ps_bsize0)) { + u8 *vaddr; + + ps_page = &buffer_info->ps_pages[0]; + + /* + * there is no documentation about how to call + * kmap_atomic, so we can't hold the mapping + * very long + */ + dma_sync_single_for_cpu(&pdev->dev, ps_page->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ); + memcpy(skb_tail_pointer(skb), vaddr, l1); + kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); + dma_sync_single_for_device(&pdev->dev, ps_page->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + + /* remove the CRC */ + if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) + l1 -= 4; + + skb_put(skb, l1); + goto copydone; + } /* if */ + } + + for (j = 0; j < PS_PAGE_BUFFERS; j++) { + length = le16_to_cpu(rx_desc->wb.upper.length[j]); + if (!length) + break; + + ps_page = &buffer_info->ps_pages[j]; + dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, + DMA_FROM_DEVICE); + ps_page->dma = 0; + skb_fill_page_desc(skb, j, ps_page->page, 0, length); + ps_page->page = NULL; + skb->len += length; + skb->data_len += length; + skb->truesize += length; + } + + /* strip the ethernet crc, problem is we're using pages now so + * this whole operation can get a little cpu intensive + */ + if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) + pskb_trim(skb, skb->len - 4); + +copydone: + total_rx_bytes += skb->len; + total_rx_packets++; + + e1000_rx_checksum(adapter, staterr, le16_to_cpu( + rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); + + if (rx_desc->wb.upper.header_status & + cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) + adapter->rx_hdr_split++; + + e1000_receive_skb(adapter, netdev, skb, + staterr, rx_desc->wb.middle.vlan); + +next_desc: + rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); + buffer_info->skb = NULL; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= E1000_RX_BUFFER_WRITE) { + adapter->alloc_rx_buf(adapter, cleaned_count, + GFP_ATOMIC); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + + staterr = le32_to_cpu(rx_desc->wb.middle.status_error); + } + rx_ring->next_to_clean = i; + + cleaned_count = e1000_desc_unused(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC); + + adapter->total_rx_bytes += total_rx_bytes; + adapter->total_rx_packets += total_rx_packets; + return cleaned; +} + +/** + * e1000_consume_page - helper function + **/ +static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, + u16 length) +{ + bi->page = NULL; + skb->len += length; + skb->data_len += length; + skb->truesize += length; +} + +/** + * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ + +static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_buffer *buffer_info, *next_buffer; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes=0, total_rx_packets=0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + rmb(); /* read descriptor and rx_buffer_info after status DD */ + + status = rx_desc->status; + skb = buffer_info->skb; + buffer_info->skb = NULL; + + ++i; + if (i == rx_ring->count) + i = 0; + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->length); + + /* errors is only valid for DD + EOP descriptors */ + if (unlikely((status & E1000_RXD_STAT_EOP) && + (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { + /* recycle both page and skb */ + buffer_info->skb = skb; + /* an error means any chain goes out the window + * too */ + if (rx_ring->rx_skb_top) + dev_kfree_skb_irq(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + goto next_desc; + } + +#define rxtop (rx_ring->rx_skb_top) + if (!(status & E1000_RXD_STAT_EOP)) { + /* this descriptor is only the beginning (or middle) */ + if (!rxtop) { + /* this is the beginning of a chain */ + rxtop = skb; + skb_fill_page_desc(rxtop, 0, buffer_info->page, + 0, length); + } else { + /* this is the middle of a chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->page, 0, length); + /* re-use the skb, only consumed the page */ + buffer_info->skb = skb; + } + e1000_consume_page(buffer_info, rxtop, length); + goto next_desc; + } else { + if (rxtop) { + /* end of the chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->page, 0, length); + /* re-use the current skb, we only consumed the + * page */ + buffer_info->skb = skb; + skb = rxtop; + rxtop = NULL; + e1000_consume_page(buffer_info, skb, length); + } else { + /* no chain, got EOP, this buf is the packet + * copybreak to save the put_page/alloc_page */ + if (length <= copybreak && + skb_tailroom(skb) >= length) { + u8 *vaddr; + vaddr = kmap_atomic(buffer_info->page, + KM_SKB_DATA_SOFTIRQ); + memcpy(skb_tail_pointer(skb), vaddr, + length); + kunmap_atomic(vaddr, + KM_SKB_DATA_SOFTIRQ); + /* re-use the page, so don't erase + * buffer_info->page */ + skb_put(skb, length); + } else { + skb_fill_page_desc(skb, 0, + buffer_info->page, 0, + length); + e1000_consume_page(buffer_info, skb, + length); + } + } + } + + /* Receive Checksum Offload XXX recompute due to CRC strip? */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + total_rx_packets++; + + /* eth type trans needs skb->data to point to something */ + if (!pskb_may_pull(skb, ETH_HLEN)) { + e_err("pskb_may_pull failed.\n"); + dev_kfree_skb_irq(skb); + goto next_desc; + } + + e1000_receive_skb(adapter, netdev, skb, status, + rx_desc->special); + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, cleaned_count, + GFP_ATOMIC); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = e1000_desc_unused(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC); + + adapter->total_rx_bytes += total_rx_bytes; + adapter->total_rx_packets += total_rx_packets; + return cleaned; +} + +/** + * e1000_clean_rx_ring - Free Rx Buffers per Queue + * @adapter: board private structure + **/ +static void e1000_clean_rx_ring(struct e1000_adapter *adapter) +{ + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_buffer *buffer_info; + struct e1000_ps_page *ps_page; + struct pci_dev *pdev = adapter->pdev; + unsigned int i, j; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + if (buffer_info->dma) { + if (adapter->clean_rx == e1000_clean_rx_irq) + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) + dma_unmap_page(&pdev->dev, buffer_info->dma, + PAGE_SIZE, + DMA_FROM_DEVICE); + else if (adapter->clean_rx == e1000_clean_rx_irq_ps) + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_ps_bsize0, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + } + + if (buffer_info->page) { + put_page(buffer_info->page); + buffer_info->page = NULL; + } + + if (buffer_info->skb) { + dev_kfree_skb(buffer_info->skb); + buffer_info->skb = NULL; + } + + for (j = 0; j < PS_PAGE_BUFFERS; j++) { + ps_page = &buffer_info->ps_pages[j]; + if (!ps_page->page) + break; + dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, + DMA_FROM_DEVICE); + ps_page->dma = 0; + put_page(ps_page->page); + ps_page->page = NULL; + } + } + + /* there also may be some cached data from a chained receive */ + if (rx_ring->rx_skb_top) { + dev_kfree_skb(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + } + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + adapter->flags2 &= ~FLAG2_IS_DISCARDING; + + writel(0, adapter->hw.hw_addr + rx_ring->head); + writel(0, adapter->hw.hw_addr + rx_ring->tail); +} + +static void e1000e_downshift_workaround(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, downshift_task); + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); +} + +/** + * e1000_intr_msi - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr_msi(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + /* + * read ICR disables interrupts using IAM + */ + + if (icr & E1000_ICR_LSC) { + hw->mac.get_link_status = 1; + /* + * ICH8 workaround-- Call gig speed drop workaround on cable + * disconnect (LSC) before accessing any PHY registers + */ + if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && + (!(er32(STATUS) & E1000_STATUS_LU))) + schedule_work(&adapter->downshift_task); + + /* + * 80003ES2LAN workaround-- For packet buffer work-around on + * link down event; disable receives here in the ISR and reset + * adapter in watchdog + */ + if (netif_carrier_ok(netdev) && + adapter->flags & FLAG_RX_NEEDS_RESTART) { + /* disable receives */ + u32 rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + adapter->flags |= FLAG_RX_RESTART_NOW; + } + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (napi_schedule_prep(&adapter->napi)) { + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->napi); + } + + return IRQ_HANDLED; +} + +/** + * e1000_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 rctl, icr = er32(ICR); + + if (!icr || test_bit(__E1000_DOWN, &adapter->state)) + return IRQ_NONE; /* Not our interrupt */ + + /* + * IMS will not auto-mask if INT_ASSERTED is not set, and if it is + * not set, then the adapter didn't send an interrupt + */ + if (!(icr & E1000_ICR_INT_ASSERTED)) + return IRQ_NONE; + + /* + * Interrupt Auto-Mask...upon reading ICR, + * interrupts are masked. No need for the + * IMC write + */ + + if (icr & E1000_ICR_LSC) { + hw->mac.get_link_status = 1; + /* + * ICH8 workaround-- Call gig speed drop workaround on cable + * disconnect (LSC) before accessing any PHY registers + */ + if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && + (!(er32(STATUS) & E1000_STATUS_LU))) + schedule_work(&adapter->downshift_task); + + /* + * 80003ES2LAN workaround-- + * For packet buffer work-around on link down event; + * disable receives here in the ISR and + * reset adapter in watchdog + */ + if (netif_carrier_ok(netdev) && + (adapter->flags & FLAG_RX_NEEDS_RESTART)) { + /* disable receives */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + adapter->flags |= FLAG_RX_RESTART_NOW; + } + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (napi_schedule_prep(&adapter->napi)) { + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->napi); + } + + return IRQ_HANDLED; +} + +static irqreturn_t e1000_msix_other(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + if (!(icr & E1000_ICR_INT_ASSERTED)) { + if (!test_bit(__E1000_DOWN, &adapter->state)) + ew32(IMS, E1000_IMS_OTHER); + return IRQ_NONE; + } + + if (icr & adapter->eiac_mask) + ew32(ICS, (icr & adapter->eiac_mask)); + + if (icr & E1000_ICR_OTHER) { + if (!(icr & E1000_ICR_LSC)) + goto no_link_interrupt; + hw->mac.get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + +no_link_interrupt: + if (!test_bit(__E1000_DOWN, &adapter->state)) + ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER); + + return IRQ_HANDLED; +} + + +static irqreturn_t e1000_intr_msix_tx(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *tx_ring = adapter->tx_ring; + + + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + + if (!e1000_clean_tx_irq(adapter)) + /* Ring was not completely cleaned, so fire another interrupt */ + ew32(ICS, tx_ring->ims_val); + + return IRQ_HANDLED; +} + +static irqreturn_t e1000_intr_msix_rx(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* Write the ITR value calculated at the end of the + * previous interrupt. + */ + if (adapter->rx_ring->set_itr) { + writel(1000000000 / (adapter->rx_ring->itr_val * 256), + adapter->hw.hw_addr + adapter->rx_ring->itr_register); + adapter->rx_ring->set_itr = 0; + } + + if (napi_schedule_prep(&adapter->napi)) { + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->napi); + } + return IRQ_HANDLED; +} + +/** + * e1000_configure_msix - Configure MSI-X hardware + * + * e1000_configure_msix sets up the hardware to properly + * generate MSI-X interrupts. + **/ +static void e1000_configure_msix(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_ring *tx_ring = adapter->tx_ring; + int vector = 0; + u32 ctrl_ext, ivar = 0; + + adapter->eiac_mask = 0; + + /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */ + if (hw->mac.type == e1000_82574) { + u32 rfctl = er32(RFCTL); + rfctl |= E1000_RFCTL_ACK_DIS; + ew32(RFCTL, rfctl); + } + +#define E1000_IVAR_INT_ALLOC_VALID 0x8 + /* Configure Rx vector */ + rx_ring->ims_val = E1000_IMS_RXQ0; + adapter->eiac_mask |= rx_ring->ims_val; + if (rx_ring->itr_val) + writel(1000000000 / (rx_ring->itr_val * 256), + hw->hw_addr + rx_ring->itr_register); + else + writel(1, hw->hw_addr + rx_ring->itr_register); + ivar = E1000_IVAR_INT_ALLOC_VALID | vector; + + /* Configure Tx vector */ + tx_ring->ims_val = E1000_IMS_TXQ0; + vector++; + if (tx_ring->itr_val) + writel(1000000000 / (tx_ring->itr_val * 256), + hw->hw_addr + tx_ring->itr_register); + else + writel(1, hw->hw_addr + tx_ring->itr_register); + adapter->eiac_mask |= tx_ring->ims_val; + ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8); + + /* set vector for Other Causes, e.g. link changes */ + vector++; + ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16); + if (rx_ring->itr_val) + writel(1000000000 / (rx_ring->itr_val * 256), + hw->hw_addr + E1000_EITR_82574(vector)); + else + writel(1, hw->hw_addr + E1000_EITR_82574(vector)); + + /* Cause Tx interrupts on every write back */ + ivar |= (1 << 31); + + ew32(IVAR, ivar); + + /* enable MSI-X PBA support */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_PBA_CLR; + + /* Auto-Mask Other interrupts upon ICR read */ +#define E1000_EIAC_MASK_82574 0x01F00000 + ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER); + ctrl_ext |= E1000_CTRL_EXT_EIAME; + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); +} + +void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter) +{ + if (adapter->msix_entries) { + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & FLAG_MSI_ENABLED) { + pci_disable_msi(adapter->pdev); + adapter->flags &= ~FLAG_MSI_ENABLED; + } +} + +/** + * e1000e_set_interrupt_capability - set MSI or MSI-X if supported + * + * Attempt to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) +{ + int err; + int i; + + switch (adapter->int_mode) { + case E1000E_INT_MODE_MSIX: + if (adapter->flags & FLAG_HAS_MSIX) { + adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */ + adapter->msix_entries = kcalloc(adapter->num_vectors, + sizeof(struct msix_entry), + GFP_KERNEL); + if (adapter->msix_entries) { + for (i = 0; i < adapter->num_vectors; i++) + adapter->msix_entries[i].entry = i; + + err = pci_enable_msix(adapter->pdev, + adapter->msix_entries, + adapter->num_vectors); + if (err == 0) + return; + } + /* MSI-X failed, so fall through and try MSI */ + e_err("Failed to initialize MSI-X interrupts. " + "Falling back to MSI interrupts.\n"); + e1000e_reset_interrupt_capability(adapter); + } + adapter->int_mode = E1000E_INT_MODE_MSI; + /* Fall through */ + case E1000E_INT_MODE_MSI: + if (!pci_enable_msi(adapter->pdev)) { + adapter->flags |= FLAG_MSI_ENABLED; + } else { + adapter->int_mode = E1000E_INT_MODE_LEGACY; + e_err("Failed to initialize MSI interrupts. Falling " + "back to legacy interrupts.\n"); + } + /* Fall through */ + case E1000E_INT_MODE_LEGACY: + /* Don't do anything; this is the system default */ + break; + } + + /* store the number of vectors being used */ + adapter->num_vectors = 1; +} + +/** + * e1000_request_msix - Initialize MSI-X interrupts + * + * e1000_request_msix allocates MSI-X vectors and requests interrupts from the + * kernel. + **/ +static int e1000_request_msix(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err = 0, vector = 0; + + if (strlen(netdev->name) < (IFNAMSIZ - 5)) + snprintf(adapter->rx_ring->name, + sizeof(adapter->rx_ring->name) - 1, + "%s-rx-0", netdev->name); + else + memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); + err = request_irq(adapter->msix_entries[vector].vector, + e1000_intr_msix_rx, 0, adapter->rx_ring->name, + netdev); + if (err) + goto out; + adapter->rx_ring->itr_register = E1000_EITR_82574(vector); + adapter->rx_ring->itr_val = adapter->itr; + vector++; + + if (strlen(netdev->name) < (IFNAMSIZ - 5)) + snprintf(adapter->tx_ring->name, + sizeof(adapter->tx_ring->name) - 1, + "%s-tx-0", netdev->name); + else + memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); + err = request_irq(adapter->msix_entries[vector].vector, + e1000_intr_msix_tx, 0, adapter->tx_ring->name, + netdev); + if (err) + goto out; + adapter->tx_ring->itr_register = E1000_EITR_82574(vector); + adapter->tx_ring->itr_val = adapter->itr; + vector++; + + err = request_irq(adapter->msix_entries[vector].vector, + e1000_msix_other, 0, netdev->name, netdev); + if (err) + goto out; + + e1000_configure_msix(adapter); + return 0; +out: + return err; +} + +/** + * e1000_request_irq - initialize interrupts + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int e1000_request_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + if (adapter->msix_entries) { + err = e1000_request_msix(adapter); + if (!err) + return err; + /* fall back to MSI */ + e1000e_reset_interrupt_capability(adapter); + adapter->int_mode = E1000E_INT_MODE_MSI; + e1000e_set_interrupt_capability(adapter); + } + if (adapter->flags & FLAG_MSI_ENABLED) { + err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0, + netdev->name, netdev); + if (!err) + return err; + + /* fall back to legacy interrupt */ + e1000e_reset_interrupt_capability(adapter); + adapter->int_mode = E1000E_INT_MODE_LEGACY; + } + + err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED, + netdev->name, netdev); + if (err) + e_err("Unable to allocate interrupt, Error: %d\n", err); + + return err; +} + +static void e1000_free_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (adapter->msix_entries) { + int vector = 0; + + free_irq(adapter->msix_entries[vector].vector, netdev); + vector++; + + free_irq(adapter->msix_entries[vector].vector, netdev); + vector++; + + /* Other Causes interrupt vector */ + free_irq(adapter->msix_entries[vector].vector, netdev); + return; + } + + free_irq(adapter->pdev->irq, netdev); +} + +/** + * e1000_irq_disable - Mask off interrupt generation on the NIC + **/ +static void e1000_irq_disable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + ew32(IMC, ~0); + if (adapter->msix_entries) + ew32(EIAC_82574, 0); + e1e_flush(); + + if (adapter->msix_entries) { + int i; + for (i = 0; i < adapter->num_vectors; i++) + synchronize_irq(adapter->msix_entries[i].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +/** + * e1000_irq_enable - Enable default interrupt generation settings + **/ +static void e1000_irq_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->msix_entries) { + ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); + ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); + } else { + ew32(IMS, IMS_ENABLE_MASK); + } + e1e_flush(); +} + +/** + * e1000e_get_hw_control - get control of the h/w from f/w + * @adapter: address of board private structure + * + * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that + * the driver is loaded. For AMT version (only with 82573) + * of the f/w this means that the network i/f is open. + **/ +void e1000e_get_hw_control(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext; + u32 swsm; + + /* Let firmware know the driver has taken over */ + if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { + swsm = er32(SWSM); + ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD); + } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { + ctrl_ext = er32(CTRL_EXT); + ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); + } +} + +/** + * e1000e_release_hw_control - release control of the h/w to f/w + * @adapter: address of board private structure + * + * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. For AMT version (only with 82573) i + * of the f/w this means that the network i/f is closed. + * + **/ +void e1000e_release_hw_control(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext; + u32 swsm; + + /* Let firmware taken over control of h/w */ + if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { + swsm = er32(SWSM); + ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD); + } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { + ctrl_ext = er32(CTRL_EXT); + ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); + } +} + +/** + * @e1000_alloc_ring - allocate memory for a ring structure + **/ +static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, + struct e1000_ring *ring) +{ + struct pci_dev *pdev = adapter->pdev; + + ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, + GFP_KERNEL); + if (!ring->desc) + return -ENOMEM; + + return 0; +} + +/** + * e1000e_setup_tx_resources - allocate Tx resources (Descriptors) + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int e1000e_setup_tx_resources(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = adapter->tx_ring; + int err = -ENOMEM, size; + + size = sizeof(struct e1000_buffer) * tx_ring->count; + tx_ring->buffer_info = vzalloc(size); + if (!tx_ring->buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + err = e1000_alloc_ring_dma(adapter, tx_ring); + if (err) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + return 0; +err: + vfree(tx_ring->buffer_info); + e_err("Unable to allocate memory for the transmit descriptor ring\n"); + return err; +} + +/** + * e1000e_setup_rx_resources - allocate Rx resources (Descriptors) + * @adapter: board private structure + * + * Returns 0 on success, negative on failure + **/ +int e1000e_setup_rx_resources(struct e1000_adapter *adapter) +{ + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_buffer *buffer_info; + int i, size, desc_len, err = -ENOMEM; + + size = sizeof(struct e1000_buffer) * rx_ring->count; + rx_ring->buffer_info = vzalloc(size); + if (!rx_ring->buffer_info) + goto err; + + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS, + sizeof(struct e1000_ps_page), + GFP_KERNEL); + if (!buffer_info->ps_pages) + goto err_pages; + } + + desc_len = sizeof(union e1000_rx_desc_packet_split); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * desc_len; + rx_ring->size = ALIGN(rx_ring->size, 4096); + + err = e1000_alloc_ring_dma(adapter, rx_ring); + if (err) + goto err_pages; + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->rx_skb_top = NULL; + + return 0; + +err_pages: + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + kfree(buffer_info->ps_pages); + } +err: + vfree(rx_ring->buffer_info); + e_err("Unable to allocate memory for the receive descriptor ring\n"); + return err; +} + +/** + * e1000_clean_tx_ring - Free Tx Buffers + * @adapter: board private structure + **/ +static void e1000_clean_tx_ring(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_buffer *buffer_info; + unsigned long size; + unsigned int i; + + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + e1000_put_txbuf(adapter, buffer_info); + } + + size = sizeof(struct e1000_buffer) * tx_ring->count; + memset(tx_ring->buffer_info, 0, size); + + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + writel(0, adapter->hw.hw_addr + tx_ring->head); + writel(0, adapter->hw.hw_addr + tx_ring->tail); +} + +/** + * e1000e_free_tx_resources - Free Tx Resources per Queue + * @adapter: board private structure + * + * Free all transmit software resources + **/ +void e1000e_free_tx_resources(struct e1000_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_ring *tx_ring = adapter->tx_ring; + + e1000_clean_tx_ring(adapter); + + vfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + tx_ring->desc = NULL; +} + +/** + * e1000e_free_rx_resources - Free Rx Resources + * @adapter: board private structure + * + * Free all receive software resources + **/ + +void e1000e_free_rx_resources(struct e1000_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_ring *rx_ring = adapter->rx_ring; + int i; + + e1000_clean_rx_ring(adapter); + + for (i = 0; i < rx_ring->count; i++) + kfree(rx_ring->buffer_info[i].ps_pages); + + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + rx_ring->desc = NULL; +} + +/** + * e1000_update_itr - update the dynamic ITR value based on statistics + * @adapter: pointer to adapter + * @itr_setting: current adapter->itr + * @packets: the number of packets during this measurement interval + * @bytes: the number of bytes during this measurement interval + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. This functionality is controlled + * by the InterruptThrottleRate module parameter. + **/ +static unsigned int e1000_update_itr(struct e1000_adapter *adapter, + u16 itr_setting, int packets, + int bytes) +{ + unsigned int retval = itr_setting; + + if (packets == 0) + goto update_itr_done; + + switch (itr_setting) { + case lowest_latency: + /* handle TSO and jumbo frames */ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + retval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* this if handles the TSO accounting */ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 10) || ((bytes/packets) > 1200)) + retval = bulk_latency; + else if ((packets > 35)) + retval = lowest_latency; + } else if (bytes/packets > 2000) { + retval = bulk_latency; + } else if (packets <= 2 && bytes < 512) { + retval = lowest_latency; + } + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + retval = low_latency; + } else if (bytes < 6000) { + retval = low_latency; + } + break; + } + +update_itr_done: + return retval; +} + +static void e1000_set_itr(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 current_itr; + u32 new_itr = adapter->itr; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + if (adapter->link_speed != SPEED_1000) { + current_itr = 0; + new_itr = 4000; + goto set_itr_now; + } + + if (adapter->flags2 & FLAG2_DISABLE_AIM) { + new_itr = 0; + goto set_itr_now; + } + + adapter->tx_itr = e1000_update_itr(adapter, + adapter->tx_itr, + adapter->total_tx_packets, + adapter->total_tx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) + adapter->tx_itr = low_latency; + + adapter->rx_itr = e1000_update_itr(adapter, + adapter->rx_itr, + adapter->total_rx_packets, + adapter->total_rx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) + adapter->rx_itr = low_latency; + + current_itr = max(adapter->rx_itr, adapter->tx_itr); + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = 70000; + break; + case low_latency: + new_itr = 20000; /* aka hwitr = ~200 */ + break; + case bulk_latency: + new_itr = 4000; + break; + default: + break; + } + +set_itr_now: + if (new_itr != adapter->itr) { + /* + * this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > adapter->itr ? + min(adapter->itr + (new_itr >> 2), new_itr) : + new_itr; + adapter->itr = new_itr; + adapter->rx_ring->itr_val = new_itr; + if (adapter->msix_entries) + adapter->rx_ring->set_itr = 1; + else + if (new_itr) + ew32(ITR, 1000000000 / (new_itr * 256)); + else + ew32(ITR, 0); + } +} + +/** + * e1000_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + **/ +static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter) +{ + adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); + if (!adapter->tx_ring) + goto err; + + adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); + if (!adapter->rx_ring) + goto err; + + return 0; +err: + e_err("Unable to allocate memory for queues\n"); + kfree(adapter->rx_ring); + kfree(adapter->tx_ring); + return -ENOMEM; +} + +/** + * e1000_clean - NAPI Rx polling callback + * @napi: struct associated with this polling callback + * @budget: amount of packets driver is allowed to process this poll + **/ +static int e1000_clean(struct napi_struct *napi, int budget) +{ + struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); + struct e1000_hw *hw = &adapter->hw; + struct net_device *poll_dev = adapter->netdev; + int tx_cleaned = 1, work_done = 0; + + adapter = netdev_priv(poll_dev); + + if (adapter->msix_entries && + !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) + goto clean_rx; + + tx_cleaned = e1000_clean_tx_irq(adapter); + +clean_rx: + adapter->clean_rx(adapter, &work_done, budget); + + if (!tx_cleaned) + work_done = budget; + + /* If budget not fully consumed, exit the polling mode */ + if (work_done < budget) { + if (adapter->itr_setting & 3) + e1000_set_itr(adapter); + napi_complete(napi); + if (!test_bit(__E1000_DOWN, &adapter->state)) { + if (adapter->msix_entries) + ew32(IMS, adapter->rx_ring->ims_val); + else + e1000_irq_enable(adapter); + } + } + + return work_done; +} + +static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + /* don't update vlan cookie if already programmed */ + if ((adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && + (vid == adapter->mng_vlan_id)) + return; + + /* add VID to filter table */ + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); + vfta |= (1 << (vid & 0x1F)); + hw->mac.ops.write_vfta(hw, index, vfta); + } + + set_bit(vid, adapter->active_vlans); +} + +static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + if ((adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && + (vid == adapter->mng_vlan_id)) { + /* release control to f/w */ + e1000e_release_hw_control(adapter); + return; + } + + /* remove VID from filter table */ + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); + vfta &= ~(1 << (vid & 0x1F)); + hw->mac.ops.write_vfta(hw, index, vfta); + } + + clear_bit(vid, adapter->active_vlans); +} + +/** + * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering + * @adapter: board private structure to initialize + **/ +static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { + /* disable VLAN receive filtering */ + rctl = er32(RCTL); + rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN); + ew32(RCTL, rctl); + + if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) { + e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + } + } +} + +/** + * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering + * @adapter: board private structure to initialize + **/ +static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { + /* enable VLAN receive filtering */ + rctl = er32(RCTL); + rctl |= E1000_RCTL_VFE; + rctl &= ~E1000_RCTL_CFIEN; + ew32(RCTL, rctl); + } +} + +/** + * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping + * @adapter: board private structure to initialize + **/ +static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl; + + /* disable VLAN tag insert/strip */ + ctrl = er32(CTRL); + ctrl &= ~E1000_CTRL_VME; + ew32(CTRL, ctrl); +} + +/** + * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping + * @adapter: board private structure to initialize + **/ +static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl; + + /* enable VLAN tag insert/strip */ + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_VME; + ew32(CTRL, ctrl); +} + +static void e1000_update_mng_vlan(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u16 vid = adapter->hw.mng_cookie.vlan_id; + u16 old_vid = adapter->mng_vlan_id; + + if (adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { + e1000_vlan_rx_add_vid(netdev, vid); + adapter->mng_vlan_id = vid; + } + + if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid)) + e1000_vlan_rx_kill_vid(netdev, old_vid); +} + +static void e1000_restore_vlan(struct e1000_adapter *adapter) +{ + u16 vid; + + e1000_vlan_rx_add_vid(adapter->netdev, 0); + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + e1000_vlan_rx_add_vid(adapter->netdev, vid); +} + +static void e1000_init_manageability_pt(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 manc, manc2h, mdef, i, j; + + if (!(adapter->flags & FLAG_MNG_PT_ENABLED)) + return; + + manc = er32(MANC); + + /* + * enable receiving management packets to the host. this will probably + * generate destination unreachable messages from the host OS, but + * the packets will be handled on SMBUS + */ + manc |= E1000_MANC_EN_MNG2HOST; + manc2h = er32(MANC2H); + + switch (hw->mac.type) { + default: + manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664); + break; + case e1000_82574: + case e1000_82583: + /* + * Check if IPMI pass-through decision filter already exists; + * if so, enable it. + */ + for (i = 0, j = 0; i < 8; i++) { + mdef = er32(MDEF(i)); + + /* Ignore filters with anything other than IPMI ports */ + if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) + continue; + + /* Enable this decision filter in MANC2H */ + if (mdef) + manc2h |= (1 << i); + + j |= mdef; + } + + if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) + break; + + /* Create new decision filter in an empty filter */ + for (i = 0, j = 0; i < 8; i++) + if (er32(MDEF(i)) == 0) { + ew32(MDEF(i), (E1000_MDEF_PORT_623 | + E1000_MDEF_PORT_664)); + manc2h |= (1 << 1); + j++; + break; + } + + if (!j) + e_warn("Unable to create IPMI pass-through filter\n"); + break; + } + + ew32(MANC2H, manc2h); + ew32(MANC, manc); +} + +/** + * e1000_configure_tx - Configure Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void e1000_configure_tx(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *tx_ring = adapter->tx_ring; + u64 tdba; + u32 tdlen, tctl, tipg, tarc; + u32 ipgr1, ipgr2; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + tdba = tx_ring->dma; + tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); + ew32(TDBAL, (tdba & DMA_BIT_MASK(32))); + ew32(TDBAH, (tdba >> 32)); + ew32(TDLEN, tdlen); + ew32(TDH, 0); + ew32(TDT, 0); + tx_ring->head = E1000_TDH; + tx_ring->tail = E1000_TDT; + + /* Set the default values for the Tx Inter Packet Gap timer */ + tipg = DEFAULT_82543_TIPG_IPGT_COPPER; /* 8 */ + ipgr1 = DEFAULT_82543_TIPG_IPGR1; /* 8 */ + ipgr2 = DEFAULT_82543_TIPG_IPGR2; /* 6 */ + + if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN) + ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /* 7 */ + + tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; + tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; + ew32(TIPG, tipg); + + /* Set the Tx Interrupt Delay register */ + ew32(TIDV, adapter->tx_int_delay); + /* Tx irq moderation */ + ew32(TADV, adapter->tx_abs_int_delay); + + if (adapter->flags2 & FLAG2_DMA_BURST) { + u32 txdctl = er32(TXDCTL(0)); + txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH | + E1000_TXDCTL_WTHRESH); + /* + * set up some performance related parameters to encourage the + * hardware to use the bus more efficiently in bursts, depends + * on the tx_int_delay to be enabled, + * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time + * hthresh = 1 ==> prefetch when one or more available + * pthresh = 0x1f ==> prefetch if internal cache 31 or less + * BEWARE: this seems to work but should be considered first if + * there are Tx hangs or other Tx related bugs + */ + txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE; + ew32(TXDCTL(0), txdctl); + /* erratum work around: set txdctl the same for both queues */ + ew32(TXDCTL(1), txdctl); + } + + /* Program the Transmit Control Register */ + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + + if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { + tarc = er32(TARC(0)); + /* + * set the speed mode bit, we'll clear it if we're not at + * gigabit link later + */ +#define SPEED_MODE_BIT (1 << 21) + tarc |= SPEED_MODE_BIT; + ew32(TARC(0), tarc); + } + + /* errata: program both queues to unweighted RR */ + if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) { + tarc = er32(TARC(0)); + tarc |= 1; + ew32(TARC(0), tarc); + tarc = er32(TARC(1)); + tarc |= 1; + ew32(TARC(1), tarc); + } + + /* Setup Transmit Descriptor Settings for eop descriptor */ + adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; + + /* only set IDE if we are delaying interrupts using the timers */ + if (adapter->tx_int_delay) + adapter->txd_cmd |= E1000_TXD_CMD_IDE; + + /* enable Report Status bit */ + adapter->txd_cmd |= E1000_TXD_CMD_RS; + + ew32(TCTL, tctl); + + e1000e_config_collision_dist(hw); +} + +/** + * e1000_setup_rctl - configure the receive control registers + * @adapter: Board private structure + **/ +#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ + (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) +static void e1000_setup_rctl(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl, rfctl; + u32 pages = 0; + + /* Workaround Si errata on 82579 - configure jumbo frame flow */ + if (hw->mac.type == e1000_pch2lan) { + s32 ret_val; + + if (adapter->netdev->mtu > ETH_DATA_LEN) + ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); + else + ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); + + if (ret_val) + e_dbg("failed to enable jumbo frame workaround mode\n"); + } + + /* Program MC offset vector base */ + rctl = er32(RCTL); + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + + /* Do not Store bad packets */ + rctl &= ~E1000_RCTL_SBP; + + /* Enable Long Packet receive */ + if (adapter->netdev->mtu <= ETH_DATA_LEN) + rctl &= ~E1000_RCTL_LPE; + else + rctl |= E1000_RCTL_LPE; + + /* Some systems expect that the CRC is included in SMBUS traffic. The + * hardware strips the CRC before sending to both SMBUS (BMC) and to + * host memory when this is enabled + */ + if (adapter->flags2 & FLAG2_CRC_STRIPPING) + rctl |= E1000_RCTL_SECRC; + + /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */ + if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) { + u16 phy_data; + + e1e_rphy(hw, PHY_REG(770, 26), &phy_data); + phy_data &= 0xfff8; + phy_data |= (1 << 2); + e1e_wphy(hw, PHY_REG(770, 26), phy_data); + + e1e_rphy(hw, 22, &phy_data); + phy_data &= 0x0fff; + phy_data |= (1 << 14); + e1e_wphy(hw, 0x10, 0x2823); + e1e_wphy(hw, 0x11, 0x0003); + e1e_wphy(hw, 22, phy_data); + } + + /* Setup buffer sizes */ + rctl &= ~E1000_RCTL_SZ_4096; + rctl |= E1000_RCTL_BSEX; + switch (adapter->rx_buffer_len) { + case 2048: + default: + rctl |= E1000_RCTL_SZ_2048; + rctl &= ~E1000_RCTL_BSEX; + break; + case 4096: + rctl |= E1000_RCTL_SZ_4096; + break; + case 8192: + rctl |= E1000_RCTL_SZ_8192; + break; + case 16384: + rctl |= E1000_RCTL_SZ_16384; + break; + } + + /* + * 82571 and greater support packet-split where the protocol + * header is placed in skb->data and the packet data is + * placed in pages hanging off of skb_shinfo(skb)->nr_frags. + * In the case of a non-split, skb->data is linearly filled, + * followed by the page buffers. Therefore, skb->data is + * sized to hold the largest protocol header. + * + * allocations using alloc_page take too long for regular MTU + * so only enable packet split for jumbo frames + * + * Using pages when the page size is greater than 16k wastes + * a lot of memory, since we allocate 3 pages at all times + * per packet. + */ + pages = PAGE_USE_COUNT(adapter->netdev->mtu); + if (!(adapter->flags & FLAG_HAS_ERT) && (pages <= 3) && + (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) + adapter->rx_ps_pages = pages; + else + adapter->rx_ps_pages = 0; + + if (adapter->rx_ps_pages) { + u32 psrctl = 0; + + /* Configure extra packet-split registers */ + rfctl = er32(RFCTL); + rfctl |= E1000_RFCTL_EXTEN; + /* + * disable packet split support for IPv6 extension headers, + * because some malformed IPv6 headers can hang the Rx + */ + rfctl |= (E1000_RFCTL_IPV6_EX_DIS | + E1000_RFCTL_NEW_IPV6_EXT_DIS); + + ew32(RFCTL, rfctl); + + /* Enable Packet split descriptors */ + rctl |= E1000_RCTL_DTYP_PS; + + psrctl |= adapter->rx_ps_bsize0 >> + E1000_PSRCTL_BSIZE0_SHIFT; + + switch (adapter->rx_ps_pages) { + case 3: + psrctl |= PAGE_SIZE << + E1000_PSRCTL_BSIZE3_SHIFT; + case 2: + psrctl |= PAGE_SIZE << + E1000_PSRCTL_BSIZE2_SHIFT; + case 1: + psrctl |= PAGE_SIZE >> + E1000_PSRCTL_BSIZE1_SHIFT; + break; + } + + ew32(PSRCTL, psrctl); + } + + ew32(RCTL, rctl); + /* just started the receive unit, no need to restart */ + adapter->flags &= ~FLAG_RX_RESTART_NOW; +} + +/** + * e1000_configure_rx - Configure Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void e1000_configure_rx(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *rx_ring = adapter->rx_ring; + u64 rdba; + u32 rdlen, rctl, rxcsum, ctrl_ext; + + if (adapter->rx_ps_pages) { + /* this is a 32 byte descriptor */ + rdlen = rx_ring->count * + sizeof(union e1000_rx_desc_packet_split); + adapter->clean_rx = e1000_clean_rx_irq_ps; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; + } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { + rdlen = rx_ring->count * sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_jumbo_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; + } else { + rdlen = rx_ring->count * sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers; + } + + /* disable receives while setting up the descriptors */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + e1e_flush(); + usleep_range(10000, 20000); + + if (adapter->flags2 & FLAG2_DMA_BURST) { + /* + * set the writeback threshold (only takes effect if the RDTR + * is set). set GRAN=1 and write back up to 0x4 worth, and + * enable prefetching of 0x20 Rx descriptors + * granularity = 01 + * wthresh = 04, + * hthresh = 04, + * pthresh = 0x20 + */ + ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE); + ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE); + + /* + * override the delay timers for enabling bursting, only if + * the value was not set by the user via module options + */ + if (adapter->rx_int_delay == DEFAULT_RDTR) + adapter->rx_int_delay = BURST_RDTR; + if (adapter->rx_abs_int_delay == DEFAULT_RADV) + adapter->rx_abs_int_delay = BURST_RADV; + } + + /* set the Receive Delay Timer Register */ + ew32(RDTR, adapter->rx_int_delay); + + /* irq moderation */ + ew32(RADV, adapter->rx_abs_int_delay); + if ((adapter->itr_setting != 0) && (adapter->itr != 0)) + ew32(ITR, 1000000000 / (adapter->itr * 256)); + + ctrl_ext = er32(CTRL_EXT); + /* Auto-Mask interrupts upon ICR access */ + ctrl_ext |= E1000_CTRL_EXT_IAME; + ew32(IAM, 0xffffffff); + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + rdba = rx_ring->dma; + ew32(RDBAL, (rdba & DMA_BIT_MASK(32))); + ew32(RDBAH, (rdba >> 32)); + ew32(RDLEN, rdlen); + ew32(RDH, 0); + ew32(RDT, 0); + rx_ring->head = E1000_RDH; + rx_ring->tail = E1000_RDT; + + /* Enable Receive Checksum Offload for TCP and UDP */ + rxcsum = er32(RXCSUM); + if (adapter->flags & FLAG_RX_CSUM_ENABLED) { + rxcsum |= E1000_RXCSUM_TUOFL; + + /* + * IPv4 payload checksum for UDP fragments must be + * used in conjunction with packet-split. + */ + if (adapter->rx_ps_pages) + rxcsum |= E1000_RXCSUM_IPPCSE; + } else { + rxcsum &= ~E1000_RXCSUM_TUOFL; + /* no need to clear IPPCSE as it defaults to 0 */ + } + ew32(RXCSUM, rxcsum); + + /* + * Enable early receives on supported devices, only takes effect when + * packet size is equal or larger than the specified value (in 8 byte + * units), e.g. using jumbo frames when setting to E1000_ERT_2048 + */ + if ((adapter->flags & FLAG_HAS_ERT) || + (adapter->hw.mac.type == e1000_pch2lan)) { + if (adapter->netdev->mtu > ETH_DATA_LEN) { + u32 rxdctl = er32(RXDCTL(0)); + ew32(RXDCTL(0), rxdctl | 0x3); + if (adapter->flags & FLAG_HAS_ERT) + ew32(ERT, E1000_ERT_2048 | (1 << 13)); + /* + * With jumbo frames and early-receive enabled, + * excessive C-state transition latencies result in + * dropped transactions. + */ + pm_qos_update_request(&adapter->netdev->pm_qos_req, 55); + } else { + pm_qos_update_request(&adapter->netdev->pm_qos_req, + PM_QOS_DEFAULT_VALUE); + } + } + + /* Enable Receives */ + ew32(RCTL, rctl); +} + +/** + * e1000_update_mc_addr_list - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates the Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count) +{ + hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count); +} + +/** + * e1000_set_multi - Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_multi entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void e1000_set_multi(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u8 *mta_list; + u32 rctl; + + /* Check for Promiscuous and All Multicast modes */ + + rctl = er32(RCTL); + + if (netdev->flags & IFF_PROMISC) { + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + rctl &= ~E1000_RCTL_VFE; + /* Do not hardware filter VLANs in promisc mode */ + e1000e_vlan_filter_disable(adapter); + } else { + if (netdev->flags & IFF_ALLMULTI) { + rctl |= E1000_RCTL_MPE; + rctl &= ~E1000_RCTL_UPE; + } else { + rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); + } + e1000e_vlan_filter_enable(adapter); + } + + ew32(RCTL, rctl); + + if (!netdev_mc_empty(netdev)) { + int i = 0; + + mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); + if (!mta_list) + return; + + /* prepare a packed array of only addresses. */ + netdev_for_each_mc_addr(ha, netdev) + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); + + e1000_update_mc_addr_list(hw, mta_list, i); + kfree(mta_list); + } else { + /* + * if we're called from probe, we might not have + * anything to do here, so clear out the list + */ + e1000_update_mc_addr_list(hw, NULL, 0); + } + + if (netdev->features & NETIF_F_HW_VLAN_RX) + e1000e_vlan_strip_enable(adapter); + else + e1000e_vlan_strip_disable(adapter); +} + +/** + * e1000_configure - configure the hardware for Rx and Tx + * @adapter: private board structure + **/ +static void e1000_configure(struct e1000_adapter *adapter) +{ + e1000_set_multi(adapter->netdev); + + e1000_restore_vlan(adapter); + e1000_init_manageability_pt(adapter); + + e1000_configure_tx(adapter); + e1000_setup_rctl(adapter); + e1000_configure_rx(adapter); + adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring), + GFP_KERNEL); +} + +/** + * e1000e_power_up_phy - restore link in case the phy was powered down + * @adapter: address of board private structure + * + * The phy may be powered down to save power and turn off link when the + * driver is unloaded and wake on lan is not enabled (among others) + * *** this routine MUST be followed by a call to e1000e_reset *** + **/ +void e1000e_power_up_phy(struct e1000_adapter *adapter) +{ + if (adapter->hw.phy.ops.power_up) + adapter->hw.phy.ops.power_up(&adapter->hw); + + adapter->hw.mac.ops.setup_link(&adapter->hw); +} + +/** + * e1000_power_down_phy - Power down the PHY + * + * Power down the PHY so no link is implied when interface is down. + * The PHY cannot be powered down if management or WoL is active. + */ +static void e1000_power_down_phy(struct e1000_adapter *adapter) +{ + /* WoL is enabled */ + if (adapter->wol) + return; + + if (adapter->hw.phy.ops.power_down) + adapter->hw.phy.ops.power_down(&adapter->hw); +} + +/** + * e1000e_reset - bring the hardware into a known good state + * + * This function boots the hardware and enables some settings that + * require a configuration cycle of the hardware - those cannot be + * set/changed during runtime. After reset the device needs to be + * properly configured for Rx, Tx etc. + */ +void e1000e_reset(struct e1000_adapter *adapter) +{ + struct e1000_mac_info *mac = &adapter->hw.mac; + struct e1000_fc_info *fc = &adapter->hw.fc; + struct e1000_hw *hw = &adapter->hw; + u32 tx_space, min_tx_space, min_rx_space; + u32 pba = adapter->pba; + u16 hwm; + + /* reset Packet Buffer Allocation to default */ + ew32(PBA, pba); + + if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { + /* + * To maintain wire speed transmits, the Tx FIFO should be + * large enough to accommodate two full transmit packets, + * rounded up to the next 1KB and expressed in KB. Likewise, + * the Rx FIFO should be large enough to accommodate at least + * one full receive packet and is similarly rounded up and + * expressed in KB. + */ + pba = er32(PBA); + /* upper 16 bits has Tx packet buffer allocation size in KB */ + tx_space = pba >> 16; + /* lower 16 bits has Rx packet buffer allocation size in KB */ + pba &= 0xffff; + /* + * the Tx fifo also stores 16 bytes of information about the Tx + * but don't include ethernet FCS because hardware appends it + */ + min_tx_space = (adapter->max_frame_size + + sizeof(struct e1000_tx_desc) - + ETH_FCS_LEN) * 2; + min_tx_space = ALIGN(min_tx_space, 1024); + min_tx_space >>= 10; + /* software strips receive CRC, so leave room for it */ + min_rx_space = adapter->max_frame_size; + min_rx_space = ALIGN(min_rx_space, 1024); + min_rx_space >>= 10; + + /* + * If current Tx allocation is less than the min Tx FIFO size, + * and the min Tx FIFO size is less than the current Rx FIFO + * allocation, take space away from current Rx allocation + */ + if ((tx_space < min_tx_space) && + ((min_tx_space - tx_space) < pba)) { + pba -= min_tx_space - tx_space; + + /* + * if short on Rx space, Rx wins and must trump Tx + * adjustment or use Early Receive if available + */ + if ((pba < min_rx_space) && + (!(adapter->flags & FLAG_HAS_ERT))) + /* ERT enabled in e1000_configure_rx */ + pba = min_rx_space; + } + + ew32(PBA, pba); + } + + /* + * flow control settings + * + * The high water mark must be low enough to fit one full frame + * (or the size used for early receive) above it in the Rx FIFO. + * Set it to the lower of: + * - 90% of the Rx FIFO size, and + * - the full Rx FIFO size minus the early receive size (for parts + * with ERT support assuming ERT set to E1000_ERT_2048), or + * - the full Rx FIFO size minus one full frame + */ + if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) + fc->pause_time = 0xFFFF; + else + fc->pause_time = E1000_FC_PAUSE_TIME; + fc->send_xon = 1; + fc->current_mode = fc->requested_mode; + + switch (hw->mac.type) { + default: + if ((adapter->flags & FLAG_HAS_ERT) && + (adapter->netdev->mtu > ETH_DATA_LEN)) + hwm = min(((pba << 10) * 9 / 10), + ((pba << 10) - (E1000_ERT_2048 << 3))); + else + hwm = min(((pba << 10) * 9 / 10), + ((pba << 10) - adapter->max_frame_size)); + + fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ + fc->low_water = fc->high_water - 8; + break; + case e1000_pchlan: + /* + * Workaround PCH LOM adapter hangs with certain network + * loads. If hangs persist, try disabling Tx flow control. + */ + if (adapter->netdev->mtu > ETH_DATA_LEN) { + fc->high_water = 0x3500; + fc->low_water = 0x1500; + } else { + fc->high_water = 0x5000; + fc->low_water = 0x3000; + } + fc->refresh_time = 0x1000; + break; + case e1000_pch2lan: + fc->high_water = 0x05C20; + fc->low_water = 0x05048; + fc->pause_time = 0x0650; + fc->refresh_time = 0x0400; + if (adapter->netdev->mtu > ETH_DATA_LEN) { + pba = 14; + ew32(PBA, pba); + } + break; + } + + /* + * Disable Adaptive Interrupt Moderation if 2 full packets cannot + * fit in receive buffer and early-receive not supported. + */ + if (adapter->itr_setting & 0x3) { + if (((adapter->max_frame_size * 2) > (pba << 10)) && + !(adapter->flags & FLAG_HAS_ERT)) { + if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) { + dev_info(&adapter->pdev->dev, + "Interrupt Throttle Rate turned off\n"); + adapter->flags2 |= FLAG2_DISABLE_AIM; + ew32(ITR, 0); + } + } else if (adapter->flags2 & FLAG2_DISABLE_AIM) { + dev_info(&adapter->pdev->dev, + "Interrupt Throttle Rate turned on\n"); + adapter->flags2 &= ~FLAG2_DISABLE_AIM; + adapter->itr = 20000; + ew32(ITR, 1000000000 / (adapter->itr * 256)); + } + } + + /* Allow time for pending master requests to run */ + mac->ops.reset_hw(hw); + + /* + * For parts with AMT enabled, let the firmware know + * that the network interface is in control + */ + if (adapter->flags & FLAG_HAS_AMT) + e1000e_get_hw_control(adapter); + + ew32(WUC, 0); + + if (mac->ops.init_hw(hw)) + e_err("Hardware Error\n"); + + e1000_update_mng_vlan(adapter); + + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ + ew32(VET, ETH_P_8021Q); + + e1000e_reset_adaptive(hw); + + if (!netif_running(adapter->netdev) && + !test_bit(__E1000_TESTING, &adapter->state)) { + e1000_power_down_phy(adapter); + return; + } + + e1000_get_phy_info(hw); + + if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && + !(adapter->flags & FLAG_SMART_POWER_DOWN)) { + u16 phy_data = 0; + /* + * speed up time to link by disabling smart power down, ignore + * the return value of this function because there is nothing + * different we would do if it failed + */ + e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); + phy_data &= ~IGP02E1000_PM_SPD; + e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); + } +} + +int e1000e_up(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* hardware has been reset, we need to reload some things */ + e1000_configure(adapter); + + clear_bit(__E1000_DOWN, &adapter->state); + + napi_enable(&adapter->napi); + if (adapter->msix_entries) + e1000_configure_msix(adapter); + e1000_irq_enable(adapter); + + netif_start_queue(adapter->netdev); + + /* fire a link change interrupt to start the watchdog */ + if (adapter->msix_entries) + ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); + else + ew32(ICS, E1000_ICS_LSC); + + return 0; +} + +static void e1000e_flush_descriptors(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (!(adapter->flags2 & FLAG2_DMA_BURST)) + return; + + /* flush pending descriptor writebacks to memory */ + ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); + ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); + + /* execute the writes immediately */ + e1e_flush(); +} + +static void e1000e_update_stats(struct e1000_adapter *adapter); + +void e1000e_down(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 tctl, rctl; + + /* + * signal that we're down so the interrupt handler does not + * reschedule our watchdog timer + */ + set_bit(__E1000_DOWN, &adapter->state); + + /* disable receives in the hardware */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + /* flush and sleep below */ + + netif_stop_queue(netdev); + + /* disable transmits in the hardware */ + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_EN; + ew32(TCTL, tctl); + /* flush both disables and wait for them to finish */ + e1e_flush(); + usleep_range(10000, 20000); + + napi_disable(&adapter->napi); + e1000_irq_disable(adapter); + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + netif_carrier_off(netdev); + + spin_lock(&adapter->stats64_lock); + e1000e_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + e1000e_flush_descriptors(adapter); + e1000_clean_tx_ring(adapter); + e1000_clean_rx_ring(adapter); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + if (!pci_channel_offline(adapter->pdev)) + e1000e_reset(adapter); + + /* + * TODO: for power management, we could drop the link and + * pci_disable_device here. + */ +} + +void e1000e_reinit_locked(struct e1000_adapter *adapter) +{ + might_sleep(); + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + e1000e_down(adapter); + e1000e_up(adapter); + clear_bit(__E1000_RESETTING, &adapter->state); +} + +/** + * e1000_sw_init - Initialize general software structures (struct e1000_adapter) + * @adapter: board private structure to initialize + * + * e1000_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int __devinit e1000_sw_init(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; + adapter->rx_ps_bsize0 = 128; + adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + spin_lock_init(&adapter->stats64_lock); + + e1000e_set_interrupt_capability(adapter); + + if (e1000_alloc_queues(adapter)) + return -ENOMEM; + + /* Explicitly disable IRQ since the NIC can be in any state. */ + e1000_irq_disable(adapter); + + set_bit(__E1000_DOWN, &adapter->state); + return 0; +} + +/** + * e1000_intr_msi_test - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr_msi_test(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + e_dbg("icr is %08X\n", icr); + if (icr & E1000_ICR_RXSEQ) { + adapter->flags &= ~FLAG_MSI_TEST_FAILED; + wmb(); + } + + return IRQ_HANDLED; +} + +/** + * e1000_test_msi_interrupt - Returns 0 for successful test + * @adapter: board private struct + * + * code flow taken from tg3.c + **/ +static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + int err; + + /* poll_enable hasn't been called yet, so don't need disable */ + /* clear any pending events */ + er32(ICR); + + /* free the real vector and request a test handler */ + e1000_free_irq(adapter); + e1000e_reset_interrupt_capability(adapter); + + /* Assume that the test fails, if it succeeds then the test + * MSI irq handler will unset this flag */ + adapter->flags |= FLAG_MSI_TEST_FAILED; + + err = pci_enable_msi(adapter->pdev); + if (err) + goto msi_test_failed; + + err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0, + netdev->name, netdev); + if (err) { + pci_disable_msi(adapter->pdev); + goto msi_test_failed; + } + + wmb(); + + e1000_irq_enable(adapter); + + /* fire an unusual interrupt on the test handler */ + ew32(ICS, E1000_ICS_RXSEQ); + e1e_flush(); + msleep(50); + + e1000_irq_disable(adapter); + + rmb(); + + if (adapter->flags & FLAG_MSI_TEST_FAILED) { + adapter->int_mode = E1000E_INT_MODE_LEGACY; + e_info("MSI interrupt test failed, using legacy interrupt.\n"); + } else + e_dbg("MSI interrupt test succeeded!\n"); + + free_irq(adapter->pdev->irq, netdev); + pci_disable_msi(adapter->pdev); + +msi_test_failed: + e1000e_set_interrupt_capability(adapter); + return e1000_request_irq(adapter); +} + +/** + * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored + * @adapter: board private struct + * + * code flow taken from tg3.c, called with e1000 interrupts disabled. + **/ +static int e1000_test_msi(struct e1000_adapter *adapter) +{ + int err; + u16 pci_cmd; + + if (!(adapter->flags & FLAG_MSI_ENABLED)) + return 0; + + /* disable SERR in case the MSI write causes a master abort */ + pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); + if (pci_cmd & PCI_COMMAND_SERR) + pci_write_config_word(adapter->pdev, PCI_COMMAND, + pci_cmd & ~PCI_COMMAND_SERR); + + err = e1000_test_msi_interrupt(adapter); + + /* re-enable SERR */ + if (pci_cmd & PCI_COMMAND_SERR) { + pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); + pci_cmd |= PCI_COMMAND_SERR; + pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); + } + + return err; +} + +/** + * e1000_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int e1000_open(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + int err; + + /* disallow open during test */ + if (test_bit(__E1000_TESTING, &adapter->state)) + return -EBUSY; + + pm_runtime_get_sync(&pdev->dev); + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = e1000e_setup_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = e1000e_setup_rx_resources(adapter); + if (err) + goto err_setup_rx; + + /* + * If AMT is enabled, let the firmware know that the network + * interface is now open and reset the part to a known state. + */ + if (adapter->flags & FLAG_HAS_AMT) { + e1000e_get_hw_control(adapter); + e1000e_reset(adapter); + } + + e1000e_power_up_phy(adapter); + + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + if ((adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) + e1000_update_mng_vlan(adapter); + + /* DMA latency requirement to workaround early-receive/jumbo issue */ + if ((adapter->flags & FLAG_HAS_ERT) || + (adapter->hw.mac.type == e1000_pch2lan)) + pm_qos_add_request(&adapter->netdev->pm_qos_req, + PM_QOS_CPU_DMA_LATENCY, + PM_QOS_DEFAULT_VALUE); + + /* + * before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. + */ + e1000_configure(adapter); + + err = e1000_request_irq(adapter); + if (err) + goto err_req_irq; + + /* + * Work around PCIe errata with MSI interrupts causing some chipsets to + * ignore e1000e MSI messages, which means we need to test our MSI + * interrupt now + */ + if (adapter->int_mode != E1000E_INT_MODE_LEGACY) { + err = e1000_test_msi(adapter); + if (err) { + e_err("Interrupt allocation failed\n"); + goto err_req_irq; + } + } + + /* From here on the code is the same as e1000e_up() */ + clear_bit(__E1000_DOWN, &adapter->state); + + napi_enable(&adapter->napi); + + e1000_irq_enable(adapter); + + netif_start_queue(netdev); + + adapter->idle_check = true; + pm_runtime_put(&pdev->dev); + + /* fire a link status change interrupt to start the watchdog */ + if (adapter->msix_entries) + ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); + else + ew32(ICS, E1000_ICS_LSC); + + return 0; + +err_req_irq: + e1000e_release_hw_control(adapter); + e1000_power_down_phy(adapter); + e1000e_free_rx_resources(adapter); +err_setup_rx: + e1000e_free_tx_resources(adapter); +err_setup_tx: + e1000e_reset(adapter); + pm_runtime_put_sync(&pdev->dev); + + return err; +} + +/** + * e1000_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int e1000_close(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + + WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); + + pm_runtime_get_sync(&pdev->dev); + + if (!test_bit(__E1000_DOWN, &adapter->state)) { + e1000e_down(adapter); + e1000_free_irq(adapter); + } + e1000_power_down_phy(adapter); + + e1000e_free_tx_resources(adapter); + e1000e_free_rx_resources(adapter); + + /* + * kill manageability vlan ID if supported, but not if a vlan with + * the same ID is registered on the host OS (let 8021q kill it) + */ + if (adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN) + e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); + + /* + * If AMT is enabled, let the firmware know that the network + * interface is now closed + */ + if ((adapter->flags & FLAG_HAS_AMT) && + !test_bit(__E1000_TESTING, &adapter->state)) + e1000e_release_hw_control(adapter); + + if ((adapter->flags & FLAG_HAS_ERT) || + (adapter->hw.mac.type == e1000_pch2lan)) + pm_qos_remove_request(&adapter->netdev->pm_qos_req); + + pm_runtime_put_sync(&pdev->dev); + + return 0; +} +/** + * e1000_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int e1000_set_mac(struct net_device *netdev, void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); + + e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); + + if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) { + /* activate the work around */ + e1000e_set_laa_state_82571(&adapter->hw, 1); + + /* + * Hold a copy of the LAA in RAR[14] This is done so that + * between the time RAR[0] gets clobbered and the time it + * gets fixed (in e1000_watchdog), the actual LAA is in one + * of the RARs and no incoming packets directed to this port + * are dropped. Eventually the LAA will be in RAR[0] and + * RAR[14] + */ + e1000e_rar_set(&adapter->hw, + adapter->hw.mac.addr, + adapter->hw.mac.rar_entry_count - 1); + } + + return 0; +} + +/** + * e1000e_update_phy_task - work thread to update phy + * @work: pointer to our work struct + * + * this worker thread exists because we must acquire a + * semaphore to read the phy, which we could msleep while + * waiting for it, and we can't msleep in a timer. + **/ +static void e1000e_update_phy_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, update_phy_task); + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + e1000_get_phy_info(&adapter->hw); +} + +/* + * Need to wait a few seconds after link up to get diagnostic information from + * the phy + */ +static void e1000_update_phy_info(unsigned long data) +{ + struct e1000_adapter *adapter = (struct e1000_adapter *) data; + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + schedule_work(&adapter->update_phy_task); +} + +/** + * e1000e_update_phy_stats - Update the PHY statistics counters + * @adapter: board private structure + * + * Read/clear the upper 16-bit PHY registers and read/accumulate lower + **/ +static void e1000e_update_phy_stats(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + s32 ret_val; + u16 phy_data; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + + /* + * A page set is expensive so check if already on desired page. + * If not, set to the page with the PHY status registers. + */ + hw->phy.addr = 1; + ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, + &phy_data); + if (ret_val) + goto release; + if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) { + ret_val = hw->phy.ops.set_page(hw, + HV_STATS_PAGE << IGP_PAGE_SHIFT); + if (ret_val) + goto release; + } + + /* Single Collision Count */ + hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); + if (!ret_val) + adapter->stats.scc += phy_data; + + /* Excessive Collision Count */ + hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); + if (!ret_val) + adapter->stats.ecol += phy_data; + + /* Multiple Collision Count */ + hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); + if (!ret_val) + adapter->stats.mcc += phy_data; + + /* Late Collision Count */ + hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); + if (!ret_val) + adapter->stats.latecol += phy_data; + + /* Collision Count - also used for adaptive IFS */ + hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); + if (!ret_val) + hw->mac.collision_delta = phy_data; + + /* Defer Count */ + hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); + if (!ret_val) + adapter->stats.dc += phy_data; + + /* Transmit with no CRS */ + hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); + if (!ret_val) + adapter->stats.tncrs += phy_data; + +release: + hw->phy.ops.release(hw); +} + +/** + * e1000e_update_stats - Update the board statistics counters + * @adapter: board private structure + **/ +static void e1000e_update_stats(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + + /* + * Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + adapter->stats.crcerrs += er32(CRCERRS); + adapter->stats.gprc += er32(GPRC); + adapter->stats.gorc += er32(GORCL); + er32(GORCH); /* Clear gorc */ + adapter->stats.bprc += er32(BPRC); + adapter->stats.mprc += er32(MPRC); + adapter->stats.roc += er32(ROC); + + adapter->stats.mpc += er32(MPC); + + /* Half-duplex statistics */ + if (adapter->link_duplex == HALF_DUPLEX) { + if (adapter->flags2 & FLAG2_HAS_PHY_STATS) { + e1000e_update_phy_stats(adapter); + } else { + adapter->stats.scc += er32(SCC); + adapter->stats.ecol += er32(ECOL); + adapter->stats.mcc += er32(MCC); + adapter->stats.latecol += er32(LATECOL); + adapter->stats.dc += er32(DC); + + hw->mac.collision_delta = er32(COLC); + + if ((hw->mac.type != e1000_82574) && + (hw->mac.type != e1000_82583)) + adapter->stats.tncrs += er32(TNCRS); + } + adapter->stats.colc += hw->mac.collision_delta; + } + + adapter->stats.xonrxc += er32(XONRXC); + adapter->stats.xontxc += er32(XONTXC); + adapter->stats.xoffrxc += er32(XOFFRXC); + adapter->stats.xofftxc += er32(XOFFTXC); + adapter->stats.gptc += er32(GPTC); + adapter->stats.gotc += er32(GOTCL); + er32(GOTCH); /* Clear gotc */ + adapter->stats.rnbc += er32(RNBC); + adapter->stats.ruc += er32(RUC); + + adapter->stats.mptc += er32(MPTC); + adapter->stats.bptc += er32(BPTC); + + /* used for adaptive IFS */ + + hw->mac.tx_packet_delta = er32(TPT); + adapter->stats.tpt += hw->mac.tx_packet_delta; + + adapter->stats.algnerrc += er32(ALGNERRC); + adapter->stats.rxerrc += er32(RXERRC); + adapter->stats.cexterr += er32(CEXTERR); + adapter->stats.tsctc += er32(TSCTC); + adapter->stats.tsctfc += er32(TSCTFC); + + /* Fill out the OS statistics structure */ + netdev->stats.multicast = adapter->stats.mprc; + netdev->stats.collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* + * RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + netdev->stats.rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + netdev->stats.rx_length_errors = adapter->stats.ruc + + adapter->stats.roc; + netdev->stats.rx_crc_errors = adapter->stats.crcerrs; + netdev->stats.rx_frame_errors = adapter->stats.algnerrc; + netdev->stats.rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + netdev->stats.tx_errors = adapter->stats.ecol + + adapter->stats.latecol; + netdev->stats.tx_aborted_errors = adapter->stats.ecol; + netdev->stats.tx_window_errors = adapter->stats.latecol; + netdev->stats.tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Management Stats */ + adapter->stats.mgptc += er32(MGTPTC); + adapter->stats.mgprc += er32(MGTPRC); + adapter->stats.mgpdc += er32(MGTPDC); +} + +/** + * e1000_phy_read_status - Update the PHY register status snapshot + * @adapter: board private structure + **/ +static void e1000_phy_read_status(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_phy_regs *phy = &adapter->phy_regs; + + if ((er32(STATUS) & E1000_STATUS_LU) && + (adapter->hw.phy.media_type == e1000_media_type_copper)) { + int ret_val; + + ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr); + ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr); + ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise); + ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa); + ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion); + ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000); + ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000); + ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus); + if (ret_val) + e_warn("Error reading PHY register\n"); + } else { + /* + * Do not read PHY registers if link is not up + * Set values to typical power-on defaults + */ + phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX); + phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | + BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE | + BMSR_ERCAP); + phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP | + ADVERTISE_ALL | ADVERTISE_CSMA); + phy->lpa = 0; + phy->expansion = EXPANSION_ENABLENPAGE; + phy->ctrl1000 = ADVERTISE_1000FULL; + phy->stat1000 = 0; + phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF); + } +} + +static void e1000_print_link_info(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl = er32(CTRL); + + /* Link status message must follow this format for user tools */ + printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, " + "Flow Control: %s\n", + adapter->netdev->name, + adapter->link_speed, + (adapter->link_duplex == FULL_DUPLEX) ? + "Full Duplex" : "Half Duplex", + ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? + "Rx/Tx" : + ((ctrl & E1000_CTRL_RFCE) ? "Rx" : + ((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None"))); +} + +static bool e1000e_has_link(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + bool link_active = 0; + s32 ret_val = 0; + + /* + * get_link_status is set on LSC (link status) interrupt or + * Rx sequence error interrupt. get_link_status will stay + * false until the check_for_link establishes link + * for copper adapters ONLY + */ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + if (hw->mac.get_link_status) { + ret_val = hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status; + } else { + link_active = 1; + } + break; + case e1000_media_type_fiber: + ret_val = hw->mac.ops.check_for_link(hw); + link_active = !!(er32(STATUS) & E1000_STATUS_LU); + break; + case e1000_media_type_internal_serdes: + ret_val = hw->mac.ops.check_for_link(hw); + link_active = adapter->hw.mac.serdes_has_link; + break; + default: + case e1000_media_type_unknown: + break; + } + + if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && + (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { + /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ + e_info("Gigabit has been disabled, downgrading speed\n"); + } + + return link_active; +} + +static void e1000e_enable_receives(struct e1000_adapter *adapter) +{ + /* make sure the receive unit is started */ + if ((adapter->flags & FLAG_RX_NEEDS_RESTART) && + (adapter->flags & FLAG_RX_RESTART_NOW)) { + struct e1000_hw *hw = &adapter->hw; + u32 rctl = er32(RCTL); + ew32(RCTL, rctl | E1000_RCTL_EN); + adapter->flags &= ~FLAG_RX_RESTART_NOW; + } +} + +static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* + * With 82574 controllers, PHY needs to be checked periodically + * for hung state and reset, if two calls return true + */ + if (e1000_check_phy_82574(hw)) + adapter->phy_hang_count++; + else + adapter->phy_hang_count = 0; + + if (adapter->phy_hang_count > 1) { + adapter->phy_hang_count = 0; + schedule_work(&adapter->reset_task); + } +} + +/** + * e1000_watchdog - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void e1000_watchdog(unsigned long data) +{ + struct e1000_adapter *adapter = (struct e1000_adapter *) data; + + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); + + /* TODO: make this use queue_delayed_work() */ +} + +static void e1000_watchdog_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, watchdog_task); + struct net_device *netdev = adapter->netdev; + struct e1000_mac_info *mac = &adapter->hw.mac; + struct e1000_phy_info *phy = &adapter->hw.phy; + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_hw *hw = &adapter->hw; + u32 link, tctl; + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + link = e1000e_has_link(adapter); + if ((netif_carrier_ok(netdev)) && link) { + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + + e1000e_enable_receives(adapter); + goto link_up; + } + + if ((e1000e_enable_tx_pkt_filtering(hw)) && + (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)) + e1000_update_mng_vlan(adapter); + + if (link) { + if (!netif_carrier_ok(netdev)) { + bool txb2b = 1; + + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + + /* update snapshot of PHY registers on LSC */ + e1000_phy_read_status(adapter); + mac->ops.get_link_up_info(&adapter->hw, + &adapter->link_speed, + &adapter->link_duplex); + e1000_print_link_info(adapter); + /* + * On supported PHYs, check for duplex mismatch only + * if link has autonegotiated at 10/100 half + */ + if ((hw->phy.type == e1000_phy_igp_3 || + hw->phy.type == e1000_phy_bm) && + (hw->mac.autoneg == true) && + (adapter->link_speed == SPEED_10 || + adapter->link_speed == SPEED_100) && + (adapter->link_duplex == HALF_DUPLEX)) { + u16 autoneg_exp; + + e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp); + + if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS)) + e_info("Autonegotiated half duplex but" + " link partner cannot autoneg. " + " Try forcing full duplex if " + "link gets many collisions.\n"); + } + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + txb2b = 0; + adapter->tx_timeout_factor = 16; + break; + case SPEED_100: + txb2b = 0; + adapter->tx_timeout_factor = 10; + break; + } + + /* + * workaround: re-program speed mode bit after + * link-up event + */ + if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && + !txb2b) { + u32 tarc0; + tarc0 = er32(TARC(0)); + tarc0 &= ~SPEED_MODE_BIT; + ew32(TARC(0), tarc0); + } + + /* + * disable TSO for pcie and 10/100 speeds, to avoid + * some hardware issues + */ + if (!(adapter->flags & FLAG_TSO_FORCE)) { + switch (adapter->link_speed) { + case SPEED_10: + case SPEED_100: + e_info("10/100 speed: disabling TSO\n"); + netdev->features &= ~NETIF_F_TSO; + netdev->features &= ~NETIF_F_TSO6; + break; + case SPEED_1000: + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + break; + default: + /* oops */ + break; + } + } + + /* + * enable transmits in the hardware, need to do this + * after setting TARC(0) + */ + tctl = er32(TCTL); + tctl |= E1000_TCTL_EN; + ew32(TCTL, tctl); + + /* + * Perform any post-link-up configuration before + * reporting link up. + */ + if (phy->ops.cfg_on_link_up) + phy->ops.cfg_on_link_up(hw); + + netif_carrier_on(netdev); + + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + /* Link status message must follow this format */ + printk(KERN_INFO "e1000e: %s NIC Link is Down\n", + adapter->netdev->name); + netif_carrier_off(netdev); + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + + if (adapter->flags & FLAG_RX_NEEDS_RESTART) + schedule_work(&adapter->reset_task); + else + pm_schedule_suspend(netdev->dev.parent, + LINK_TIMEOUT); + } + } + +link_up: + spin_lock(&adapter->stats64_lock); + e1000e_update_stats(adapter); + + mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; + adapter->tpt_old = adapter->stats.tpt; + mac->collision_delta = adapter->stats.colc - adapter->colc_old; + adapter->colc_old = adapter->stats.colc; + + adapter->gorc = adapter->stats.gorc - adapter->gorc_old; + adapter->gorc_old = adapter->stats.gorc; + adapter->gotc = adapter->stats.gotc - adapter->gotc_old; + adapter->gotc_old = adapter->stats.gotc; + spin_unlock(&adapter->stats64_lock); + + e1000e_update_adaptive(&adapter->hw); + + if (!netif_carrier_ok(netdev) && + (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) { + /* + * We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + schedule_work(&adapter->reset_task); + /* return immediately since reset is imminent */ + return; + } + + /* Simple mode for Interrupt Throttle Rate (ITR) */ + if (adapter->itr_setting == 4) { + /* + * Symmetric Tx/Rx gets a reduced ITR=2000; + * Total asymmetrical Tx or Rx gets ITR=8000; + * everyone else is between 2000-8000. + */ + u32 goc = (adapter->gotc + adapter->gorc) / 10000; + u32 dif = (adapter->gotc > adapter->gorc ? + adapter->gotc - adapter->gorc : + adapter->gorc - adapter->gotc) / 10000; + u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; + + ew32(ITR, 1000000000 / (itr * 256)); + } + + /* Cause software interrupt to ensure Rx ring is cleaned */ + if (adapter->msix_entries) + ew32(ICS, adapter->rx_ring->ims_val); + else + ew32(ICS, E1000_ICS_RXDMT0); + + /* flush pending descriptors to memory before detecting Tx hang */ + e1000e_flush_descriptors(adapter); + + /* Force detection of hung controller every watchdog period */ + adapter->detect_tx_hung = 1; + + /* + * With 82571 controllers, LAA may be overwritten due to controller + * reset from the other port. Set the appropriate LAA in RAR[0] + */ + if (e1000e_get_laa_state_82571(hw)) + e1000e_rar_set(hw, adapter->hw.mac.addr, 0); + + if (adapter->flags2 & FLAG2_CHECK_PHY_HANG) + e1000e_check_82574_phy_workaround(adapter); + + /* Reset the timer */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); +} + +#define E1000_TX_FLAGS_CSUM 0x00000001 +#define E1000_TX_FLAGS_VLAN 0x00000002 +#define E1000_TX_FLAGS_TSO 0x00000004 +#define E1000_TX_FLAGS_IPV4 0x00000008 +#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 +#define E1000_TX_FLAGS_VLAN_SHIFT 16 + +static int e1000_tso(struct e1000_adapter *adapter, + struct sk_buff *skb) +{ + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_context_desc *context_desc; + struct e1000_buffer *buffer_info; + unsigned int i; + u32 cmd_length = 0; + u16 ipcse = 0, tucse, mss; + u8 ipcss, ipcso, tucss, tucso, hdr_len; + + if (!skb_is_gso(skb)) + return 0; + + if (skb_header_cloned(skb)) { + int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + + if (err) + return err; + } + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + mss = skb_shinfo(skb)->gso_size; + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + 0, IPPROTO_TCP, 0); + cmd_length = E1000_TXD_CMD_IP; + ipcse = skb_transport_offset(skb) - 1; + } else if (skb_is_gso_v6(skb)) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + ipcse = 0; + } + ipcss = skb_network_offset(skb); + ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; + tucss = skb_transport_offset(skb); + tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; + tucse = 0; + + cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | + E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); + + i = tx_ring->next_to_use; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + + context_desc->lower_setup.ip_fields.ipcss = ipcss; + context_desc->lower_setup.ip_fields.ipcso = ipcso; + context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); + context_desc->upper_setup.tcp_fields.tucss = tucss; + context_desc->upper_setup.tcp_fields.tucso = tucso; + context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); + context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); + context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; + context_desc->cmd_and_length = cpu_to_le32(cmd_length); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; + + return 1; +} + +static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) +{ + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_context_desc *context_desc; + struct e1000_buffer *buffer_info; + unsigned int i; + u8 css; + u32 cmd_len = E1000_TXD_CMD_DEXT; + __be16 protocol; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) + protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; + else + protocol = skb->protocol; + + switch (protocol) { + case cpu_to_be16(ETH_P_IP): + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + case cpu_to_be16(ETH_P_IPV6): + /* XXX not handling all IPV6 headers */ + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + default: + if (unlikely(net_ratelimit())) + e_warn("checksum_partial proto=%x!\n", + be16_to_cpu(protocol)); + break; + } + + css = skb_checksum_start_offset(skb); + + i = tx_ring->next_to_use; + buffer_info = &tx_ring->buffer_info[i]; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + + context_desc->lower_setup.ip_config = 0; + context_desc->upper_setup.tcp_fields.tucss = css; + context_desc->upper_setup.tcp_fields.tucso = + css + skb->csum_offset; + context_desc->upper_setup.tcp_fields.tucse = 0; + context_desc->tcp_seg_setup.data = 0; + context_desc->cmd_and_length = cpu_to_le32(cmd_len); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; + + return 1; +} + +#define E1000_MAX_PER_TXD 8192 +#define E1000_MAX_TXD_PWR 12 + +static int e1000_tx_map(struct e1000_adapter *adapter, + struct sk_buff *skb, unsigned int first, + unsigned int max_per_txd, unsigned int nr_frags, + unsigned int mss) +{ + struct e1000_ring *tx_ring = adapter->tx_ring; + struct pci_dev *pdev = adapter->pdev; + struct e1000_buffer *buffer_info; + unsigned int len = skb_headlen(skb); + unsigned int offset = 0, size, count = 0, i; + unsigned int f, bytecount, segs; + + i = tx_ring->next_to_use; + + while (len) { + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + + buffer_info->length = size; + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + buffer_info->dma = dma_map_single(&pdev->dev, + skb->data + offset, + size, DMA_TO_DEVICE); + buffer_info->mapped_as_page = false; + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + + len -= size; + offset += size; + count++; + + if (len) { + i++; + if (i == tx_ring->count) + i = 0; + } + } + + for (f = 0; f < nr_frags; f++) { + struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[f]; + len = frag->size; + offset = frag->page_offset; + + while (len) { + i++; + if (i == tx_ring->count) + i = 0; + + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + + buffer_info->length = size; + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + buffer_info->dma = dma_map_page(&pdev->dev, frag->page, + offset, size, + DMA_TO_DEVICE); + buffer_info->mapped_as_page = true; + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + + len -= size; + offset += size; + count++; + } + } + + segs = skb_shinfo(skb)->gso_segs ? : 1; + /* multiply data chunks by size of headers */ + bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; + + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[i].segs = segs; + tx_ring->buffer_info[i].bytecount = bytecount; + tx_ring->buffer_info[first].next_to_watch = i; + + return count; + +dma_error: + dev_err(&pdev->dev, "Tx DMA map failed\n"); + buffer_info->dma = 0; + if (count) + count--; + + while (count--) { + if (i == 0) + i += tx_ring->count; + i--; + buffer_info = &tx_ring->buffer_info[i]; + e1000_put_txbuf(adapter, buffer_info); + } + + return 0; +} + +static void e1000_tx_queue(struct e1000_adapter *adapter, + int tx_flags, int count) +{ + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_tx_desc *tx_desc = NULL; + struct e1000_buffer *buffer_info; + u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; + unsigned int i; + + if (tx_flags & E1000_TX_FLAGS_TSO) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | + E1000_TXD_CMD_TSE; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + + if (tx_flags & E1000_TX_FLAGS_IPV4) + txd_upper |= E1000_TXD_POPTS_IXSM << 8; + } + + if (tx_flags & E1000_TX_FLAGS_CSUM) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + } + + if (tx_flags & E1000_TX_FLAGS_VLAN) { + txd_lower |= E1000_TXD_CMD_VLE; + txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); + } + + i = tx_ring->next_to_use; + + do { + buffer_info = &tx_ring->buffer_info[i]; + tx_desc = E1000_TX_DESC(*tx_ring, i); + tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + tx_desc->lower.data = + cpu_to_le32(txd_lower | buffer_info->length); + tx_desc->upper.data = cpu_to_le32(txd_upper); + + i++; + if (i == tx_ring->count) + i = 0; + } while (--count > 0); + + tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); + + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + + tx_ring->next_to_use = i; + writel(i, adapter->hw.hw_addr + tx_ring->tail); + /* + * we need this if more than one processor can write to our tail + * at a time, it synchronizes IO on IA64/Altix systems + */ + mmiowb(); +} + +#define MINIMUM_DHCP_PACKET_SIZE 282 +static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, + struct sk_buff *skb) +{ + struct e1000_hw *hw = &adapter->hw; + u16 length, offset; + + if (vlan_tx_tag_present(skb)) { + if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && + (adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) + return 0; + } + + if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) + return 0; + + if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP)) + return 0; + + { + const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14); + struct udphdr *udp; + + if (ip->protocol != IPPROTO_UDP) + return 0; + + udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); + if (ntohs(udp->dest) != 67) + return 0; + + offset = (u8 *)udp + 8 - skb->data; + length = skb->len - offset; + return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length); + } + + return 0; +} + +static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + netif_stop_queue(netdev); + /* + * Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* + * We need to check again in a case another CPU has just + * made room available. + */ + if (e1000_desc_unused(adapter->tx_ring) < size) + return -EBUSY; + + /* A reprieve! */ + netif_start_queue(netdev); + ++adapter->restart_queue; + return 0; +} + +static int e1000_maybe_stop_tx(struct net_device *netdev, int size) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (e1000_desc_unused(adapter->tx_ring) >= size) + return 0; + return __e1000_maybe_stop_tx(netdev, size); +} + +#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) +static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_ring *tx_ring = adapter->tx_ring; + unsigned int first; + unsigned int max_per_txd = E1000_MAX_PER_TXD; + unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; + unsigned int tx_flags = 0; + unsigned int len = skb_headlen(skb); + unsigned int nr_frags; + unsigned int mss; + int count = 0; + int tso; + unsigned int f; + + if (test_bit(__E1000_DOWN, &adapter->state)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (skb->len <= 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + mss = skb_shinfo(skb)->gso_size; + /* + * The controller does a simple calculation to + * make sure there is enough room in the FIFO before + * initiating the DMA for each buffer. The calc is: + * 4 = ceil(buffer len/mss). To make sure we don't + * overrun the FIFO, adjust the max buffer len if mss + * drops. + */ + if (mss) { + u8 hdr_len; + max_per_txd = min(mss << 2, max_per_txd); + max_txd_pwr = fls(max_per_txd) - 1; + + /* + * TSO Workaround for 82571/2/3 Controllers -- if skb->data + * points to just header, pull a few bytes of payload from + * frags into skb->data + */ + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + /* + * we do this workaround for ES2LAN, but it is un-necessary, + * avoiding it could save a lot of cycles + */ + if (skb->data_len && (hdr_len == len)) { + unsigned int pull_size; + + pull_size = min((unsigned int)4, skb->data_len); + if (!__pskb_pull_tail(skb, pull_size)) { + e_err("__pskb_pull_tail failed.\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + len = skb_headlen(skb); + } + } + + /* reserve a descriptor for the offload context */ + if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) + count++; + count++; + + count += TXD_USE_COUNT(len, max_txd_pwr); + + nr_frags = skb_shinfo(skb)->nr_frags; + for (f = 0; f < nr_frags; f++) + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, + max_txd_pwr); + + if (adapter->hw.mac.tx_pkt_filtering) + e1000_transfer_dhcp_info(adapter, skb); + + /* + * need: count + 2 desc gap to keep tail from touching + * head, otherwise try next time + */ + if (e1000_maybe_stop_tx(netdev, count + 2)) + return NETDEV_TX_BUSY; + + if (vlan_tx_tag_present(skb)) { + tx_flags |= E1000_TX_FLAGS_VLAN; + tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); + } + + first = tx_ring->next_to_use; + + tso = e1000_tso(adapter, skb); + if (tso < 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (tso) + tx_flags |= E1000_TX_FLAGS_TSO; + else if (e1000_tx_csum(adapter, skb)) + tx_flags |= E1000_TX_FLAGS_CSUM; + + /* + * Old method was to assume IPv4 packet by default if TSO was enabled. + * 82571 hardware supports TSO capabilities for IPv6 as well... + * no longer assume, we must. + */ + if (skb->protocol == htons(ETH_P_IP)) + tx_flags |= E1000_TX_FLAGS_IPV4; + + /* if count is 0 then mapping error has occurred */ + count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss); + if (count) { + e1000_tx_queue(adapter, tx_flags, count); + /* Make sure there is space in the ring for the next send. */ + e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2); + + } else { + dev_kfree_skb_any(skb); + tx_ring->buffer_info[first].time_stamp = 0; + tx_ring->next_to_use = first; + } + + return NETDEV_TX_OK; +} + +/** + * e1000_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void e1000_tx_timeout(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); +} + +static void e1000_reset_task(struct work_struct *work) +{ + struct e1000_adapter *adapter; + adapter = container_of(work, struct e1000_adapter, reset_task); + + /* don't run the task if already down */ + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && + (adapter->flags & FLAG_RX_RESTART_NOW))) { + e1000e_dump(adapter); + e_err("Reset adapter\n"); + } + e1000e_reinit_locked(adapter); +} + +/** + * e1000_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: rtnl_link_stats64 pointer + * + * Returns the address of the device statistics structure. + **/ +struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + memset(stats, 0, sizeof(struct rtnl_link_stats64)); + spin_lock(&adapter->stats64_lock); + e1000e_update_stats(adapter); + /* Fill out the OS statistics structure */ + stats->rx_bytes = adapter->stats.gorc; + stats->rx_packets = adapter->stats.gprc; + stats->tx_bytes = adapter->stats.gotc; + stats->tx_packets = adapter->stats.gptc; + stats->multicast = adapter->stats.mprc; + stats->collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* + * RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + stats->rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + stats->rx_length_errors = adapter->stats.ruc + + adapter->stats.roc; + stats->rx_crc_errors = adapter->stats.crcerrs; + stats->rx_frame_errors = adapter->stats.algnerrc; + stats->rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + stats->tx_errors = adapter->stats.ecol + + adapter->stats.latecol; + stats->tx_aborted_errors = adapter->stats.ecol; + stats->tx_window_errors = adapter->stats.latecol; + stats->tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped needs to be maintained elsewhere */ + + spin_unlock(&adapter->stats64_lock); + return stats; +} + +/** + * e1000_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int e1000_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + + /* Jumbo frame support */ + if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) && + !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { + e_err("Jumbo Frames not supported.\n"); + return -EINVAL; + } + + /* Supported frame sizes */ + if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) || + (max_frame > adapter->max_hw_frame_size)) { + e_err("Unsupported MTU setting\n"); + return -EINVAL; + } + + /* Jumbo frame workaround on 82579 requires CRC be stripped */ + if ((adapter->hw.mac.type == e1000_pch2lan) && + !(adapter->flags2 & FLAG2_CRC_STRIPPING) && + (new_mtu > ETH_DATA_LEN)) { + e_err("Jumbo Frames not supported on 82579 when CRC " + "stripping is disabled.\n"); + return -EINVAL; + } + + /* 82573 Errata 17 */ + if (((adapter->hw.mac.type == e1000_82573) || + (adapter->hw.mac.type == e1000_82574)) && + (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) { + adapter->flags2 |= FLAG2_DISABLE_ASPM_L1; + e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1); + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ + adapter->max_frame_size = max_frame; + e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + if (netif_running(netdev)) + e1000e_down(adapter); + + /* + * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + * means we reserve 2 more, this pushes us to allocate from the next + * larger slab size. + * i.e. RXBUFFER_2048 --> size-4096 slab + * However with the new *_jumbo_rx* routines, jumbo receives will use + * fragmented skbs + */ + + if (max_frame <= 2048) + adapter->rx_buffer_len = 2048; + else + adapter->rx_buffer_len = 4096; + + /* adjust allocation if LPE protects us, and we aren't using SBP */ + if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || + (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) + adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + + ETH_FCS_LEN; + + if (netif_running(netdev)) + e1000e_up(adapter); + else + e1000e_reset(adapter); + + clear_bit(__E1000_RESETTING, &adapter->state); + + return 0; +} + +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct mii_ioctl_data *data = if_mii(ifr); + + if (adapter->hw.phy.media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = adapter->hw.phy.addr; + break; + case SIOCGMIIREG: + e1000_phy_read_status(adapter); + + switch (data->reg_num & 0x1F) { + case MII_BMCR: + data->val_out = adapter->phy_regs.bmcr; + break; + case MII_BMSR: + data->val_out = adapter->phy_regs.bmsr; + break; + case MII_PHYSID1: + data->val_out = (adapter->hw.phy.id >> 16); + break; + case MII_PHYSID2: + data->val_out = (adapter->hw.phy.id & 0xFFFF); + break; + case MII_ADVERTISE: + data->val_out = adapter->phy_regs.advertise; + break; + case MII_LPA: + data->val_out = adapter->phy_regs.lpa; + break; + case MII_EXPANSION: + data->val_out = adapter->phy_regs.expansion; + break; + case MII_CTRL1000: + data->val_out = adapter->phy_regs.ctrl1000; + break; + case MII_STAT1000: + data->val_out = adapter->phy_regs.stat1000; + break; + case MII_ESTATUS: + data->val_out = adapter->phy_regs.estatus; + break; + default: + return -EIO; + } + break; + case SIOCSMIIREG: + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return e1000_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) +{ + struct e1000_hw *hw = &adapter->hw; + u32 i, mac_reg; + u16 phy_reg, wuc_enable; + int retval = 0; + + /* copy MAC RARs to PHY RARs */ + e1000_copy_rx_addrs_to_phy_ich8lan(hw); + + retval = hw->phy.ops.acquire(hw); + if (retval) { + e_err("Could not acquire PHY\n"); + return retval; + } + + /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */ + retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable); + if (retval) + goto out; + + /* copy MAC MTA to PHY MTA - only needed for pchlan */ + for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) { + mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i); + hw->phy.ops.write_reg_page(hw, BM_MTA(i), + (u16)(mac_reg & 0xFFFF)); + hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1, + (u16)((mac_reg >> 16) & 0xFFFF)); + } + + /* configure PHY Rx Control register */ + hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg); + mac_reg = er32(RCTL); + if (mac_reg & E1000_RCTL_UPE) + phy_reg |= BM_RCTL_UPE; + if (mac_reg & E1000_RCTL_MPE) + phy_reg |= BM_RCTL_MPE; + phy_reg &= ~(BM_RCTL_MO_MASK); + if (mac_reg & E1000_RCTL_MO_3) + phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT) + << BM_RCTL_MO_SHIFT); + if (mac_reg & E1000_RCTL_BAM) + phy_reg |= BM_RCTL_BAM; + if (mac_reg & E1000_RCTL_PMCF) + phy_reg |= BM_RCTL_PMCF; + mac_reg = er32(CTRL); + if (mac_reg & E1000_CTRL_RFCE) + phy_reg |= BM_RCTL_RFCE; + hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg); + + /* enable PHY wakeup in MAC register */ + ew32(WUFC, wufc); + ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN); + + /* configure and enable PHY wakeup in PHY registers */ + hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc); + hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); + + /* activate PHY wakeup */ + wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; + retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable); + if (retval) + e_err("Could not set PHY Host Wakeup bit\n"); +out: + hw->phy.ops.release(hw); + + return retval; +} + +static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, + bool runtime) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, ctrl_ext, rctl, status; + /* Runtime suspend should only enable wakeup for link changes */ + u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; + int retval = 0; + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); + e1000e_down(adapter); + e1000_free_irq(adapter); + } + e1000e_reset_interrupt_capability(adapter); + + retval = pci_save_state(pdev); + if (retval) + return retval; + + status = er32(STATUS); + if (status & E1000_STATUS_LU) + wufc &= ~E1000_WUFC_LNKC; + + if (wufc) { + e1000_setup_rctl(adapter); + e1000_set_multi(netdev); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & E1000_WUFC_MC) { + rctl = er32(RCTL); + rctl |= E1000_RCTL_MPE; + ew32(RCTL, rctl); + } + + ctrl = er32(CTRL); + /* advertise wake from D3Cold */ + #define E1000_CTRL_ADVD3WUC 0x00100000 + /* phy power management enable */ + #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 + ctrl |= E1000_CTRL_ADVD3WUC; + if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)) + ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT; + ew32(CTRL, ctrl); + + if (adapter->hw.phy.media_type == e1000_media_type_fiber || + adapter->hw.phy.media_type == + e1000_media_type_internal_serdes) { + /* keep the laser running in D3 */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA; + ew32(CTRL_EXT, ctrl_ext); + } + + if (adapter->flags & FLAG_IS_ICH) + e1000_suspend_workarounds_ich8lan(&adapter->hw); + + /* Allow time for pending master requests to run */ + e1000e_disable_pcie_master(&adapter->hw); + + if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { + /* enable wakeup by the PHY */ + retval = e1000_init_phy_wakeup(adapter, wufc); + if (retval) + return retval; + } else { + /* enable wakeup by the MAC */ + ew32(WUFC, wufc); + ew32(WUC, E1000_WUC_PME_EN); + } + } else { + ew32(WUC, 0); + ew32(WUFC, 0); + } + + *enable_wake = !!wufc; + + /* make sure adapter isn't asleep if manageability is enabled */ + if ((adapter->flags & FLAG_MNG_PT_ENABLED) || + (hw->mac.ops.check_mng_mode(hw))) + *enable_wake = true; + + if (adapter->hw.phy.type == e1000_phy_igp_3) + e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); + + /* + * Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + e1000e_release_hw_control(adapter); + + pci_disable_device(pdev); + + return 0; +} + +static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake) +{ + if (sleep && wake) { + pci_prepare_to_sleep(pdev); + return; + } + + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); +} + +static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, + bool wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* + * The pci-e switch on some quad port adapters will report a + * correctable error when the MAC transitions from D0 to D3. To + * prevent this we need to mask off the correctable errors on the + * downstream port of the pci-e switch. + */ + if (adapter->flags & FLAG_IS_QUAD_PORT) { + struct pci_dev *us_dev = pdev->bus->self; + int pos = pci_pcie_cap(us_dev); + u16 devctl; + + pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl); + pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, + (devctl & ~PCI_EXP_DEVCTL_CERE)); + + e1000_power_off(pdev, sleep, wake); + + pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl); + } else { + e1000_power_off(pdev, sleep, wake); + } +} + +#ifdef CONFIG_PCIEASPM +static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) +{ + pci_disable_link_state_locked(pdev, state); +} +#else +static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) +{ + int pos; + u16 reg16; + + /* + * Both device and parent should have the same ASPM setting. + * Disable ASPM in downstream component first and then upstream. + */ + pos = pci_pcie_cap(pdev); + pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); + reg16 &= ~state; + pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); + + if (!pdev->bus->self) + return; + + pos = pci_pcie_cap(pdev->bus->self); + pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, ®16); + reg16 &= ~state; + pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16); +} +#endif +static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) +{ + dev_info(&pdev->dev, "Disabling ASPM %s %s\n", + (state & PCIE_LINK_STATE_L0S) ? "L0s" : "", + (state & PCIE_LINK_STATE_L1) ? "L1" : ""); + + __e1000e_disable_aspm(pdev, state); +} + +#ifdef CONFIG_PM +static bool e1000e_pm_ready(struct e1000_adapter *adapter) +{ + return !!adapter->tx_ring->buffer_info; +} + +static int __e1000_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 aspm_disable_flag = 0; + u32 err; + + if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) + aspm_disable_flag = PCIE_LINK_STATE_L0S; + if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) + aspm_disable_flag |= PCIE_LINK_STATE_L1; + if (aspm_disable_flag) + e1000e_disable_aspm(pdev, aspm_disable_flag); + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); + + e1000e_set_interrupt_capability(adapter); + if (netif_running(netdev)) { + err = e1000_request_irq(adapter); + if (err) + return err; + } + + if (hw->mac.type == e1000_pch2lan) + e1000_resume_workarounds_pchlan(&adapter->hw); + + e1000e_power_up_phy(adapter); + + /* report the system wakeup cause from S3/S4 */ + if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { + u16 phy_data; + + e1e_rphy(&adapter->hw, BM_WUS, &phy_data); + if (phy_data) { + e_info("PHY Wakeup cause - %s\n", + phy_data & E1000_WUS_EX ? "Unicast Packet" : + phy_data & E1000_WUS_MC ? "Multicast Packet" : + phy_data & E1000_WUS_BC ? "Broadcast Packet" : + phy_data & E1000_WUS_MAG ? "Magic Packet" : + phy_data & E1000_WUS_LNKC ? "Link Status " + " Change" : "other"); + } + e1e_wphy(&adapter->hw, BM_WUS, ~0); + } else { + u32 wus = er32(WUS); + if (wus) { + e_info("MAC Wakeup cause - %s\n", + wus & E1000_WUS_EX ? "Unicast Packet" : + wus & E1000_WUS_MC ? "Multicast Packet" : + wus & E1000_WUS_BC ? "Broadcast Packet" : + wus & E1000_WUS_MAG ? "Magic Packet" : + wus & E1000_WUS_LNKC ? "Link Status Change" : + "other"); + } + ew32(WUS, ~0); + } + + e1000e_reset(adapter); + + e1000_init_manageability_pt(adapter); + + if (netif_running(netdev)) + e1000e_up(adapter); + + netif_device_attach(netdev); + + /* + * If the controller has AMT, do not set DRV_LOAD until the interface + * is up. For all other cases, let the f/w know that the h/w is now + * under the control of the driver. + */ + if (!(adapter->flags & FLAG_HAS_AMT)) + e1000e_get_hw_control(adapter); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int e1000_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int retval; + bool wake; + + retval = __e1000_shutdown(pdev, &wake, false); + if (!retval) + e1000_complete_shutdown(pdev, true, wake); + + return retval; +} + +static int e1000_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (e1000e_pm_ready(adapter)) + adapter->idle_check = true; + + return __e1000_resume(pdev); +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM_RUNTIME +static int e1000_runtime_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (e1000e_pm_ready(adapter)) { + bool wake; + + __e1000_shutdown(pdev, &wake, true); + } + + return 0; +} + +static int e1000_idle(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!e1000e_pm_ready(adapter)) + return 0; + + if (adapter->idle_check) { + adapter->idle_check = false; + if (!e1000e_has_link(adapter)) + pm_schedule_suspend(dev, MSEC_PER_SEC); + } + + return -EBUSY; +} + +static int e1000_runtime_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!e1000e_pm_ready(adapter)) + return 0; + + adapter->idle_check = !dev->power.runtime_auto; + return __e1000_resume(pdev); +} +#endif /* CONFIG_PM_RUNTIME */ +#endif /* CONFIG_PM */ + +static void e1000_shutdown(struct pci_dev *pdev) +{ + bool wake = false; + + __e1000_shutdown(pdev, &wake, false); + + if (system_state == SYSTEM_POWER_OFF) + e1000_complete_shutdown(pdev, false, wake); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER + +static irqreturn_t e1000_intr_msix(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (adapter->msix_entries) { + int vector, msix_irq; + + vector = 0; + msix_irq = adapter->msix_entries[vector].vector; + disable_irq(msix_irq); + e1000_intr_msix_rx(msix_irq, netdev); + enable_irq(msix_irq); + + vector++; + msix_irq = adapter->msix_entries[vector].vector; + disable_irq(msix_irq); + e1000_intr_msix_tx(msix_irq, netdev); + enable_irq(msix_irq); + + vector++; + msix_irq = adapter->msix_entries[vector].vector; + disable_irq(msix_irq); + e1000_msix_other(msix_irq, netdev); + enable_irq(msix_irq); + } + + return IRQ_HANDLED; +} + +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void e1000_netpoll(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + switch (adapter->int_mode) { + case E1000E_INT_MODE_MSIX: + e1000_intr_msix(adapter->pdev->irq, netdev); + break; + case E1000E_INT_MODE_MSI: + disable_irq(adapter->pdev->irq); + e1000_intr_msi(adapter->pdev->irq, netdev); + enable_irq(adapter->pdev->irq); + break; + default: /* E1000E_INT_MODE_LEGACY */ + disable_irq(adapter->pdev->irq); + e1000_intr(adapter->pdev->irq, netdev); + enable_irq(adapter->pdev->irq); + break; + } +} +#endif + +/** + * e1000_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + e1000e_down(adapter); + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e1000_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the e1000_resume routine. + */ +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 aspm_disable_flag = 0; + int err; + pci_ers_result_t result; + + if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) + aspm_disable_flag = PCIE_LINK_STATE_L0S; + if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) + aspm_disable_flag |= PCIE_LINK_STATE_L1; + if (aspm_disable_flag) + e1000e_disable_aspm(pdev, aspm_disable_flag); + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pci_set_master(pdev); + pdev->state_saved = true; + pci_restore_state(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + e1000e_reset(adapter); + ew32(WUS, ~0); + result = PCI_ERS_RESULT_RECOVERED; + } + + pci_cleanup_aer_uncorrect_error_status(pdev); + + return result; +} + +/** + * e1000_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the e1000_resume routine. + */ +static void e1000_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + e1000_init_manageability_pt(adapter); + + if (netif_running(netdev)) { + if (e1000e_up(adapter)) { + dev_err(&pdev->dev, + "can't bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); + + /* + * If the controller has AMT, do not set DRV_LOAD until the interface + * is up. For all other cases, let the f/w know that the h/w is now + * under the control of the driver. + */ + if (!(adapter->flags & FLAG_HAS_AMT)) + e1000e_get_hw_control(adapter); + +} + +static void e1000_print_device_info(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 ret_val; + u8 pba_str[E1000_PBANUM_LENGTH]; + + /* print bus type/speed/width info */ + e_info("(PCI Express:2.5GT/s:%s) %pM\n", + /* bus width */ + ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : + "Width x1"), + /* MAC address */ + netdev->dev_addr); + e_info("Intel(R) PRO/%s Network Connection\n", + (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000"); + ret_val = e1000_read_pba_string_generic(hw, pba_str, + E1000_PBANUM_LENGTH); + if (ret_val) + strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1); + e_info("MAC: %d, PHY: %d, PBA No: %s\n", + hw->mac.type, hw->phy.type, pba_str); +} + +static void e1000_eeprom_checks(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int ret_val; + u16 buf = 0; + + if (hw->mac.type != e1000_82573) + return; + + ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf); + if (!ret_val && (!(le16_to_cpu(buf) & (1 << 0)))) { + /* Deep Smart Power Down (DSPD) */ + dev_warn(&adapter->pdev->dev, + "Warning: detected DSPD enabled in EEPROM\n"); + } +} + +static const struct net_device_ops e1000e_netdev_ops = { + .ndo_open = e1000_open, + .ndo_stop = e1000_close, + .ndo_start_xmit = e1000_xmit_frame, + .ndo_get_stats64 = e1000e_get_stats64, + .ndo_set_multicast_list = e1000_set_multi, + .ndo_set_mac_address = e1000_set_mac, + .ndo_change_mtu = e1000_change_mtu, + .ndo_do_ioctl = e1000_ioctl, + .ndo_tx_timeout = e1000_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + + .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e1000_netpoll, +#endif +}; + +/** + * e1000_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in e1000_pci_tbl + * + * Returns 0 on success, negative on failure + * + * e1000_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int __devinit e1000_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct e1000_adapter *adapter; + struct e1000_hw *hw; + const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; + resource_size_t mmio_start, mmio_len; + resource_size_t flash_start, flash_len; + + static int cards_found; + u16 aspm_disable_flag = 0; + int i, err, pci_using_dac; + u16 eeprom_data = 0; + u16 eeprom_apme_mask = E1000_EEPROM_APME; + + if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S) + aspm_disable_flag = PCIE_LINK_STATE_L0S; + if (ei->flags2 & FLAG2_DISABLE_ASPM_L1) + aspm_disable_flag |= PCIE_LINK_STATE_L1; + if (aspm_disable_flag) + e1000e_disable_aspm(pdev, aspm_disable_flag); + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + pci_using_dac = 0; + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (!err) { + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (!err) + pci_using_dac = 1; + } else { + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + err = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "No usable DMA " + "configuration, aborting\n"); + goto err_dma; + } + } + } + + err = pci_request_selected_regions_exclusive(pdev, + pci_select_bars(pdev, IORESOURCE_MEM), + e1000e_driver_name); + if (err) + goto err_pci_reg; + + /* AER (Advanced Error Reporting) hooks */ + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + /* PCI config space info */ + err = pci_save_state(pdev); + if (err) + goto err_alloc_etherdev; + + err = -ENOMEM; + netdev = alloc_etherdev(sizeof(struct e1000_adapter)); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + netdev->irq = pdev->irq; + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + hw = &adapter->hw; + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->ei = ei; + adapter->pba = ei->pba; + adapter->flags = ei->flags; + adapter->flags2 = ei->flags2; + adapter->hw.adapter = adapter; + adapter->hw.mac.type = ei->mac; + adapter->max_hw_frame_size = ei->max_hw_frame_size; + adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; + + mmio_start = pci_resource_start(pdev, 0); + mmio_len = pci_resource_len(pdev, 0); + + err = -EIO; + adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); + if (!adapter->hw.hw_addr) + goto err_ioremap; + + if ((adapter->flags & FLAG_HAS_FLASH) && + (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { + flash_start = pci_resource_start(pdev, 1); + flash_len = pci_resource_len(pdev, 1); + adapter->hw.flash_address = ioremap(flash_start, flash_len); + if (!adapter->hw.flash_address) + goto err_flashmap; + } + + /* construct the net_device struct */ + netdev->netdev_ops = &e1000e_netdev_ops; + e1000e_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + netdev->mem_start = mmio_start; + netdev->mem_end = mmio_start + mmio_len; + + adapter->bd_number = cards_found++; + + e1000e_check_options(adapter); + + /* setup adapter struct */ + err = e1000_sw_init(adapter); + if (err) + goto err_sw_init; + + memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); + memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); + memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); + + err = ei->get_variants(adapter); + if (err) + goto err_hw_init; + + if ((adapter->flags & FLAG_IS_ICH) && + (adapter->flags & FLAG_READ_ONLY_NVM)) + e1000e_write_protect_nvm_ich8lan(&adapter->hw); + + hw->mac.ops.get_bus_info(&adapter->hw); + + adapter->hw.phy.autoneg_wait_to_complete = 0; + + /* Copper options */ + if (adapter->hw.phy.media_type == e1000_media_type_copper) { + adapter->hw.phy.mdix = AUTO_ALL_MODES; + adapter->hw.phy.disable_polarity_correction = 0; + adapter->hw.phy.ms_type = e1000_ms_hw_default; + } + + if (e1000_check_reset_block(&adapter->hw)) + e_info("PHY reset is blocked due to SOL/IDER session.\n"); + + netdev->features = NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX; + + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) + netdev->features |= NETIF_F_HW_VLAN_FILTER; + + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + + netdev->vlan_features |= NETIF_F_TSO; + netdev->vlan_features |= NETIF_F_TSO6; + netdev->vlan_features |= NETIF_F_HW_CSUM; + netdev->vlan_features |= NETIF_F_SG; + + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + if (e1000e_enable_mng_pass_thru(&adapter->hw)) + adapter->flags |= FLAG_MNG_PT_ENABLED; + + /* + * before reading the NVM, reset the controller to + * put the device in a known good starting state + */ + adapter->hw.mac.ops.reset_hw(&adapter->hw); + + /* + * systems with ASPM and others may see the checksum fail on the first + * attempt. Let's give it a few tries + */ + for (i = 0;; i++) { + if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) + break; + if (i == 2) { + e_err("The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + } + + e1000_eeprom_checks(adapter); + + /* copy the MAC address */ + if (e1000e_read_mac_addr(&adapter->hw)) + e_err("NVM Read Error while reading MAC address\n"); + + memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); + memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->perm_addr)) { + e_err("Invalid MAC Address: %pM\n", netdev->perm_addr); + err = -EIO; + goto err_eeprom; + } + + init_timer(&adapter->watchdog_timer); + adapter->watchdog_timer.function = e1000_watchdog; + adapter->watchdog_timer.data = (unsigned long) adapter; + + init_timer(&adapter->phy_info_timer); + adapter->phy_info_timer.function = e1000_update_phy_info; + adapter->phy_info_timer.data = (unsigned long) adapter; + + INIT_WORK(&adapter->reset_task, e1000_reset_task); + INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); + INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); + INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); + INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); + + /* Initialize link parameters. User can change them with ethtool */ + adapter->hw.mac.autoneg = 1; + adapter->fc_autoneg = 1; + adapter->hw.fc.requested_mode = e1000_fc_default; + adapter->hw.fc.current_mode = e1000_fc_default; + adapter->hw.phy.autoneg_advertised = 0x2f; + + /* ring size defaults */ + adapter->rx_ring->count = 256; + adapter->tx_ring->count = 256; + + /* + * Initial Wake on LAN setting - If APM wake is enabled in + * the EEPROM, enable the ACPI Magic Packet filter + */ + if (adapter->flags & FLAG_APME_IN_WUC) { + /* APME bit in EEPROM is mapped to WUC.APME */ + eeprom_data = er32(WUC); + eeprom_apme_mask = E1000_WUC_APME; + if ((hw->mac.type > e1000_ich10lan) && + (eeprom_data & E1000_WUC_PHY_WAKE)) + adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; + } else if (adapter->flags & FLAG_APME_IN_CTRL3) { + if (adapter->flags & FLAG_APME_CHECK_PORT_B && + (adapter->hw.bus.func == 1)) + e1000_read_nvm(&adapter->hw, + NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); + else + e1000_read_nvm(&adapter->hw, + NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); + } + + /* fetch WoL from EEPROM */ + if (eeprom_data & eeprom_apme_mask) + adapter->eeprom_wol |= E1000_WUFC_MAG; + + /* + * now that we have the eeprom settings, apply the special cases + * where the eeprom may be wrong or the board simply won't support + * wake on lan on a particular port + */ + if (!(adapter->flags & FLAG_HAS_WOL)) + adapter->eeprom_wol = 0; + + /* initialize the wol settings based on the eeprom settings */ + adapter->wol = adapter->eeprom_wol; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + /* save off EEPROM version number */ + e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); + + /* reset the hardware with the new settings */ + e1000e_reset(adapter); + + /* + * If the controller has AMT, do not set DRV_LOAD until the interface + * is up. For all other cases, let the f/w know that the h/w is now + * under the control of the driver. + */ + if (!(adapter->flags & FLAG_HAS_AMT)) + e1000e_get_hw_control(adapter); + + strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1); + err = register_netdev(netdev); + if (err) + goto err_register; + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + e1000_print_device_info(adapter); + + if (pci_dev_run_wake(pdev)) + pm_runtime_put_noidle(&pdev->dev); + + return 0; + +err_register: + if (!(adapter->flags & FLAG_HAS_AMT)) + e1000e_release_hw_control(adapter); +err_eeprom: + if (!e1000_check_reset_block(&adapter->hw)) + e1000_phy_hw_reset(&adapter->hw); +err_hw_init: + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); +err_sw_init: + if (adapter->hw.flash_address) + iounmap(adapter->hw.flash_address); + e1000e_reset_interrupt_capability(adapter); +err_flashmap: + iounmap(adapter->hw.hw_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * e1000_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * e1000_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void __devexit e1000_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + bool down = test_bit(__E1000_DOWN, &adapter->state); + + /* + * The timers may be rescheduled, so explicitly disable them + * from being rescheduled. + */ + if (!down) + set_bit(__E1000_DOWN, &adapter->state); + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); + cancel_work_sync(&adapter->downshift_task); + cancel_work_sync(&adapter->update_phy_task); + cancel_work_sync(&adapter->print_hang_task); + + if (!(netdev->flags & IFF_UP)) + e1000_power_down_phy(adapter); + + /* Don't lie to e1000_close() down the road. */ + if (!down) + clear_bit(__E1000_DOWN, &adapter->state); + unregister_netdev(netdev); + + if (pci_dev_run_wake(pdev)) + pm_runtime_get_noresume(&pdev->dev); + + /* + * Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + e1000e_release_hw_control(adapter); + + e1000e_reset_interrupt_capability(adapter); + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + + iounmap(adapter->hw.hw_addr); + if (adapter->hw.flash_address) + iounmap(adapter->hw.flash_address); + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); + + free_netdev(netdev); + + /* AER disable */ + pci_disable_pcie_error_reporting(pdev); + + pci_disable_device(pdev); +} + +/* PCI Error Recovery (ERS) */ +static struct pci_error_handlers e1000_err_handler = { + .error_detected = e1000_io_error_detected, + .slot_reset = e1000_io_slot_reset, + .resume = e1000_io_resume, +}; + +static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT), + board_80003es2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT), + board_80003es2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT), + board_80003es2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT), + board_80003es2lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan }, + + { } /* terminate list */ +}; +MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); + +#ifdef CONFIG_PM +static const struct dev_pm_ops e1000_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume) + SET_RUNTIME_PM_OPS(e1000_runtime_suspend, + e1000_runtime_resume, e1000_idle) +}; +#endif + +/* PCI Device API Driver */ +static struct pci_driver e1000_driver = { + .name = e1000e_driver_name, + .id_table = e1000_pci_tbl, + .probe = e1000_probe, + .remove = __devexit_p(e1000_remove), +#ifdef CONFIG_PM + .driver.pm = &e1000_pm_ops, +#endif + .shutdown = e1000_shutdown, + .err_handler = &e1000_err_handler +}; + +/** + * e1000_init_module - Driver Registration Routine + * + * e1000_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init e1000_init_module(void) +{ + int ret; + pr_info("Intel(R) PRO/1000 Network Driver - %s\n", + e1000e_driver_version); + pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n"); + ret = pci_register_driver(&e1000_driver); + + return ret; +} +module_init(e1000_init_module); + +/** + * e1000_exit_module - Driver Exit Cleanup Routine + * + * e1000_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit e1000_exit_module(void) +{ + pci_unregister_driver(&e1000_driver); +} +module_exit(e1000_exit_module); + + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +/* e1000_main.c */ diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c new file mode 100644 index 0000000..4dd9b63 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/param.c @@ -0,0 +1,478 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include + +#include "e1000.h" + +/* + * This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define E1000_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +#define COPYBREAK_DEFAULT 256 +unsigned int copybreak = COPYBREAK_DEFAULT; +module_param(copybreak, uint, 0644); +MODULE_PARM_DESC(copybreak, + "Maximum size of packet that is copied to a new buffer on receive"); + +/* + * All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } +#define E1000_PARAM(X, desc) \ + static int __devinitdata X[E1000_MAX_NIC+1] \ + = E1000_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); + +/* + * Transmit Interrupt Delay in units of 1.024 microseconds + * Tx interrupt delay needs to typically be set to something non-zero + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay"); +#define DEFAULT_TIDV 8 +#define MAX_TXDELAY 0xFFFF +#define MIN_TXDELAY 0 + +/* + * Transmit Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay"); +#define DEFAULT_TADV 32 +#define MAX_TXABSDELAY 0xFFFF +#define MIN_TXABSDELAY 0 + +/* + * Receive Interrupt Delay in units of 1.024 microseconds + * hardware will likely hang if you set this to anything but zero. + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxIntDelay, "Receive Interrupt Delay"); +#define MAX_RXDELAY 0xFFFF +#define MIN_RXDELAY 0 + +/* + * Receive Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); +#define MAX_RXABSDELAY 0xFFFF +#define MIN_RXABSDELAY 0 + +/* + * Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) + */ +E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); +#define DEFAULT_ITR 3 +#define MAX_ITR 100000 +#define MIN_ITR 100 + +/* IntMode (Interrupt Mode) + * + * Valid Range: 0 - 2 + * + * Default Value: 2 (MSI-X) + */ +E1000_PARAM(IntMode, "Interrupt Mode"); +#define MAX_INTMODE 2 +#define MIN_INTMODE 0 + +/* + * Enable Smart Power Down of the PHY + * + * Valid Range: 0, 1 + * + * Default Value: 0 (disabled) + */ +E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); + +/* + * Enable Kumeran Lock Loss workaround + * + * Valid Range: 0, 1 + * + * Default Value: 1 (enabled) + */ +E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround"); + +/* + * Write Protect NVM + * + * Valid Range: 0, 1 + * + * Default Value: 1 (enabled) + */ +E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]"); + +/* + * Enable CRC Stripping + * + * Valid Range: 0, 1 + * + * Default Value: 1 (enabled) + */ +E1000_PARAM(CrcStripping, "Enable CRC Stripping, disable if your BMC needs " \ + "the CRC"); + +struct e1000_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + struct e1000_opt_list { int i; char *str; } *p; + } l; + } arg; +}; + +static int __devinit e1000_validate_option(unsigned int *value, + const struct e1000_option *opt, + struct e1000_adapter *adapter) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + e_info("%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + e_info("%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + e_info("%s set to %i\n", opt->name, *value); + return 0; + } + break; + case list_option: { + int i; + struct e1000_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + e_info("%s\n", ent->str); + return 0; + } + } + } + break; + default: + BUG(); + } + + e_info("Invalid %s value specified (%i) %s\n", opt->name, *value, + opt->err); + *value = opt->def; + return -1; +} + +/** + * e1000e_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +void __devinit e1000e_check_options(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int bd = adapter->bd_number; + + if (bd >= E1000_MAX_NIC) { + e_notice("Warning: no configuration for board #%i\n", bd); + e_notice("Using defaults for all values\n"); + } + + { /* Transmit Interrupt Delay */ + static const struct e1000_option opt = { + .type = range_option, + .name = "Transmit Interrupt Delay", + .err = "using default of " + __MODULE_STRING(DEFAULT_TIDV), + .def = DEFAULT_TIDV, + .arg = { .r = { .min = MIN_TXDELAY, + .max = MAX_TXDELAY } } + }; + + if (num_TxIntDelay > bd) { + adapter->tx_int_delay = TxIntDelay[bd]; + e1000_validate_option(&adapter->tx_int_delay, &opt, + adapter); + } else { + adapter->tx_int_delay = opt.def; + } + } + { /* Transmit Absolute Interrupt Delay */ + static const struct e1000_option opt = { + .type = range_option, + .name = "Transmit Absolute Interrupt Delay", + .err = "using default of " + __MODULE_STRING(DEFAULT_TADV), + .def = DEFAULT_TADV, + .arg = { .r = { .min = MIN_TXABSDELAY, + .max = MAX_TXABSDELAY } } + }; + + if (num_TxAbsIntDelay > bd) { + adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; + e1000_validate_option(&adapter->tx_abs_int_delay, &opt, + adapter); + } else { + adapter->tx_abs_int_delay = opt.def; + } + } + { /* Receive Interrupt Delay */ + static struct e1000_option opt = { + .type = range_option, + .name = "Receive Interrupt Delay", + .err = "using default of " + __MODULE_STRING(DEFAULT_RDTR), + .def = DEFAULT_RDTR, + .arg = { .r = { .min = MIN_RXDELAY, + .max = MAX_RXDELAY } } + }; + + if (num_RxIntDelay > bd) { + adapter->rx_int_delay = RxIntDelay[bd]; + e1000_validate_option(&adapter->rx_int_delay, &opt, + adapter); + } else { + adapter->rx_int_delay = opt.def; + } + } + { /* Receive Absolute Interrupt Delay */ + static const struct e1000_option opt = { + .type = range_option, + .name = "Receive Absolute Interrupt Delay", + .err = "using default of " + __MODULE_STRING(DEFAULT_RADV), + .def = DEFAULT_RADV, + .arg = { .r = { .min = MIN_RXABSDELAY, + .max = MAX_RXABSDELAY } } + }; + + if (num_RxAbsIntDelay > bd) { + adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; + e1000_validate_option(&adapter->rx_abs_int_delay, &opt, + adapter); + } else { + adapter->rx_abs_int_delay = opt.def; + } + } + { /* Interrupt Throttling Rate */ + static const struct e1000_option opt = { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of " + __MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR } } + }; + + if (num_InterruptThrottleRate > bd) { + adapter->itr = InterruptThrottleRate[bd]; + switch (adapter->itr) { + case 0: + e_info("%s turned off\n", opt.name); + break; + case 1: + e_info("%s set to dynamic mode\n", opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + case 3: + e_info("%s set to dynamic conservative mode\n", + opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + case 4: + e_info("%s set to simplified (2000-8000 ints) " + "mode\n", opt.name); + adapter->itr_setting = 4; + break; + default: + /* + * Save the setting, because the dynamic bits + * change itr. + */ + if (e1000_validate_option(&adapter->itr, &opt, + adapter) && + (adapter->itr == 3)) { + /* + * In case of invalid user value, + * default to conservative mode. + */ + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + } else { + /* + * Clear the lower two bits because + * they are used as control. + */ + adapter->itr_setting = + adapter->itr & ~3; + } + break; + } + } else { + adapter->itr_setting = opt.def; + adapter->itr = 20000; + } + } + { /* Interrupt Mode */ + static struct e1000_option opt = { + .type = range_option, + .name = "Interrupt Mode", + .err = "defaulting to 2 (MSI-X)", + .def = E1000E_INT_MODE_MSIX, + .arg = { .r = { .min = MIN_INTMODE, + .max = MAX_INTMODE } } + }; + + if (num_IntMode > bd) { + unsigned int int_mode = IntMode[bd]; + e1000_validate_option(&int_mode, &opt, adapter); + adapter->int_mode = int_mode; + } else { + adapter->int_mode = opt.def; + } + } + { /* Smart Power Down */ + static const struct e1000_option opt = { + .type = enable_option, + .name = "PHY Smart Power Down", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; + + if (num_SmartPowerDownEnable > bd) { + unsigned int spd = SmartPowerDownEnable[bd]; + e1000_validate_option(&spd, &opt, adapter); + if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) + && spd) + adapter->flags |= FLAG_SMART_POWER_DOWN; + } + } + { /* CRC Stripping */ + static const struct e1000_option opt = { + .type = enable_option, + .name = "CRC Stripping", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (num_CrcStripping > bd) { + unsigned int crc_stripping = CrcStripping[bd]; + e1000_validate_option(&crc_stripping, &opt, adapter); + if (crc_stripping == OPTION_ENABLED) + adapter->flags2 |= FLAG2_CRC_STRIPPING; + } else { + adapter->flags2 |= FLAG2_CRC_STRIPPING; + } + } + { /* Kumeran Lock Loss Workaround */ + static const struct e1000_option opt = { + .type = enable_option, + .name = "Kumeran Lock Loss Workaround", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (num_KumeranLockLoss > bd) { + unsigned int kmrn_lock_loss = KumeranLockLoss[bd]; + e1000_validate_option(&kmrn_lock_loss, &opt, adapter); + if (hw->mac.type == e1000_ich8lan) + e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, + kmrn_lock_loss); + } else { + if (hw->mac.type == e1000_ich8lan) + e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, + opt.def); + } + } + { /* Write-protect NVM */ + static const struct e1000_option opt = { + .type = enable_option, + .name = "Write-protect NVM", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (adapter->flags & FLAG_IS_ICH) { + if (num_WriteProtectNVM > bd) { + unsigned int write_protect_nvm = WriteProtectNVM[bd]; + e1000_validate_option(&write_protect_nvm, &opt, + adapter); + if (write_protect_nvm) + adapter->flags |= FLAG_READ_ONLY_NVM; + } else { + if (opt.def) + adapter->flags |= FLAG_READ_ONLY_NVM; + } + } + } +} diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c new file mode 100644 index 0000000..8666476 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -0,0 +1,3377 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include + +#include "e1000.h" + +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); +static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); +static s32 e1000_wait_autoneg(struct e1000_hw *hw); +static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg); +static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, + u16 *data, bool read, bool page_set); +static u32 e1000_get_phy_addr_for_hv_page(u32 page); +static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, + u16 *data, bool read); + +/* Cable length tables */ +static const u16 e1000_m88_cable_length_table[] = { + 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; +#define M88E1000_CABLE_LENGTH_TABLE_SIZE \ + ARRAY_SIZE(e1000_m88_cable_length_table) + +static const u16 e1000_igp_2_cable_length_table[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3, + 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22, + 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40, + 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61, + 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82, + 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, + 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, + 124}; +#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ + ARRAY_SIZE(e1000_igp_2_cable_length_table) + +#define BM_PHY_REG_PAGE(offset) \ + ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF)) +#define BM_PHY_REG_NUM(offset) \ + ((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\ + (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\ + ~MAX_PHY_REG_ADDRESS))) + +#define HV_INTC_FC_PAGE_START 768 +#define I82578_ADDR_REG 29 +#define I82577_ADDR_REG 16 +#define I82577_CFG_REG 22 +#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) +#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */ +#define I82577_CTRL_REG 23 + +/* 82577 specific PHY registers */ +#define I82577_PHY_CTRL_2 18 +#define I82577_PHY_STATUS_2 26 +#define I82577_PHY_DIAG_STATUS 31 + +/* I82577 PHY Status 2 */ +#define I82577_PHY_STATUS2_REV_POLARITY 0x0400 +#define I82577_PHY_STATUS2_MDIX 0x0800 +#define I82577_PHY_STATUS2_SPEED_MASK 0x0300 +#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200 + +/* I82577 PHY Control 2 */ +#define I82577_PHY_CTRL2_AUTO_MDIX 0x0400 +#define I82577_PHY_CTRL2_FORCE_MDI_MDIX 0x0200 + +/* I82577 PHY Diagnostics Status */ +#define I82577_DSTATUS_CABLE_LENGTH 0x03FC +#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2 + +/* BM PHY Copper Specific Control 1 */ +#define BM_CS_CTRL1 16 + +#define HV_MUX_DATA_CTRL PHY_REG(776, 16) +#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400 +#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004 + +/** + * e1000e_check_reset_block_generic - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset + * is blocked. If a reset is not blocked return 0, otherwise + * return E1000_BLK_PHY_RESET (12). + **/ +s32 e1000e_check_reset_block_generic(struct e1000_hw *hw) +{ + u32 manc; + + manc = er32(MANC); + + return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? + E1000_BLK_PHY_RESET : 0; +} + +/** + * e1000e_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +s32 e1000e_get_phy_id(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_id; + u16 retry_count = 0; + + if (!(phy->ops.read_reg)) + goto out; + + while (retry_count < 2) { + ret_val = e1e_rphy(hw, PHY_ID1, &phy_id); + if (ret_val) + goto out; + + phy->id = (u32)(phy_id << 16); + udelay(20); + ret_val = e1e_rphy(hw, PHY_ID2, &phy_id); + if (ret_val) + goto out; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + + if (phy->id != 0 && phy->id != PHY_REVISION_MASK) + goto out; + + retry_count++; + } +out: + return ret_val; +} + +/** + * e1000e_phy_reset_dsp - Reset PHY DSP + * @hw: pointer to the HW structure + * + * Reset the digital signal processor. + **/ +s32 e1000e_phy_reset_dsp(struct e1000_hw *hw) +{ + s32 ret_val; + + ret_val = e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); + if (ret_val) + return ret_val; + + return e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0); +} + +/** + * e1000e_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", offset); + return -E1000_ERR_PARAM; + } + + /* + * Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + ew32(MDIC, mdic); + + /* + * Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + udelay(50); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + e_dbg("MDI Error\n"); + return -E1000_ERR_PHY; + } + *data = (u16) mdic; + + /* + * Allow some time after each MDIC transaction to avoid + * reading duplicate data in the next MDIC transaction. + */ + if (hw->mac.type == e1000_pch2lan) + udelay(100); + + return 0; +} + +/** + * e1000e_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", offset); + return -E1000_ERR_PARAM; + } + + /* + * Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = (((u32)data) | + (offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + ew32(MDIC, mdic); + + /* + * Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + udelay(50); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + e_dbg("MDI Error\n"); + return -E1000_ERR_PHY; + } + + /* + * Allow some time after each MDIC transaction to avoid + * reading duplicate data in the next MDIC transaction. + */ + if (hw->mac.type == e1000_pch2lan) + udelay(100); + + return 0; +} + +/** + * e1000e_read_phy_reg_m88 - Read m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000e_write_phy_reg_m88 - Write m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_set_page_igp - Set page as on IGP-like PHY(s) + * @hw: pointer to the HW structure + * @page: page to set (shifted left when necessary) + * + * Sets PHY page required for PHY register access. Assumes semaphore is + * already acquired. Note, this function sets phy.addr to 1 so the caller + * must set it appropriately (if necessary) after this function returns. + **/ +s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page) +{ + e_dbg("Setting page 0x%x\n", page); + + hw->phy.addr = 1; + + return e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page); +} + +/** + * __e1000e_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and stores the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked) +{ + s32 ret_val = 0; + + if (!locked) { + if (!(hw->phy.ops.acquire)) + goto out; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + } + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + ret_val = e1000e_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (ret_val) + goto release; + } + + ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + +release: + if (!locked) + hw->phy.ops.release(hw); +out: + return ret_val; +} + +/** + * e1000e_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset and stores the + * retrieved information in data. + * Release the acquired semaphore before exiting. + **/ +s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000e_read_phy_reg_igp(hw, offset, data, false); +} + +/** + * e1000e_read_phy_reg_igp_locked - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired. + **/ +s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000e_read_phy_reg_igp(hw, offset, data, true); +} + +/** + * e1000e_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, + bool locked) +{ + s32 ret_val = 0; + + if (!locked) { + if (!(hw->phy.ops.acquire)) + goto out; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + } + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + ret_val = e1000e_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (ret_val) + goto release; + } + + ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + +release: + if (!locked) + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000e_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000e_write_phy_reg_igp(hw, offset, data, false); +} + +/** + * e1000e_write_phy_reg_igp_locked - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. + * Assumes semaphore already acquired. + **/ +s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000e_write_phy_reg_igp(hw, offset, data, true); +} + +/** + * __e1000_read_kmrn_reg - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary. Then reads the PHY register at offset + * using the kumeran interface. The information retrieved is stored in data. + * Release any acquired semaphores before exiting. + **/ +static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked) +{ + u32 kmrnctrlsta; + s32 ret_val = 0; + + if (!locked) { + if (!(hw->phy.ops.acquire)) + goto out; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + } + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; + ew32(KMRNCTRLSTA, kmrnctrlsta); + e1e_flush(); + + udelay(2); + + kmrnctrlsta = er32(KMRNCTRLSTA); + *data = (u16)kmrnctrlsta; + + if (!locked) + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000e_read_kmrn_reg - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset using the + * kumeran interface. The information retrieved is stored in data. + * Release the acquired semaphore before exiting. + **/ +s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_kmrn_reg(hw, offset, data, false); +} + +/** + * e1000e_read_kmrn_reg_locked - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the kumeran interface. The + * information retrieved is stored in data. + * Assumes semaphore already acquired. + **/ +s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_kmrn_reg(hw, offset, data, true); +} + +/** + * __e1000_write_kmrn_reg - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary. Then write the data to PHY register + * at the offset using the kumeran interface. Release any acquired semaphores + * before exiting. + **/ +static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, + bool locked) +{ + u32 kmrnctrlsta; + s32 ret_val = 0; + + if (!locked) { + if (!(hw->phy.ops.acquire)) + goto out; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + } + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | data; + ew32(KMRNCTRLSTA, kmrnctrlsta); + e1e_flush(); + + udelay(2); + + if (!locked) + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000e_write_kmrn_reg - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to the PHY register at the offset + * using the kumeran interface. Release the acquired semaphore before exiting. + **/ +s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_kmrn_reg(hw, offset, data, false); +} + +/** + * e1000e_write_kmrn_reg_locked - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Write the data to PHY register at the offset using the kumeran interface. + * Assumes semaphore already acquired. + **/ +s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_kmrn_reg(hw, offset, data, true); +} + +/** + * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link + * @hw: pointer to the HW structure + * + * Sets up Carrier-sense on Transmit and downshift values. + **/ +s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data); + if (ret_val) + goto out; + + phy_data |= I82577_CFG_ASSERT_CRS_ON_TX; + + /* Enable downshift */ + phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; + + ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data); + +out: + return ret_val; +} + +/** + * e1000e_copper_link_setup_m88 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock + * and downshift values are set also. + **/ +s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* For BM PHY this bit is downshift enable */ + if (phy->type != e1000_phy_bm) + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* + * Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* + * Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + /* Enable downshift on BM (disabled by default) */ + if (phy->type == e1000_phy_bm) + phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT; + + ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if ((phy->type == e1000_phy_m88) && + (phy->revision < E1000_REVISION_4) && + (phy->id != BME1000_E_PHY_ID_R2)) { + /* + * Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((phy->revision == 2) && + (phy->id == M88E1111_I_PHY_ID)) { + /* 82573L PHY - set the downshift counter to 5x. */ + phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + } + ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + if ((phy->type == e1000_phy_bm) && (phy->id == BME1000_E_PHY_ID_R2)) { + /* Set PHY page 0, register 29 to 0x0003 */ + ret_val = e1e_wphy(hw, 29, 0x0003); + if (ret_val) + return ret_val; + + /* Set PHY page 0, register 30 to 0x0000 */ + ret_val = e1e_wphy(hw, 30, 0x0000); + if (ret_val) + return ret_val; + } + + /* Commit the changes. */ + ret_val = e1000e_commit_phy(hw); + if (ret_val) { + e_dbg("Error committing the PHY changes\n"); + return ret_val; + } + + if (phy->type == e1000_phy_82578) { + ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* 82578 PHY - set the downshift count to 1x. */ + phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE; + phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK; + ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + return 0; +} + +/** + * e1000e_copper_link_setup_igp - Setup igp PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for + * igp PHY's. + **/ +s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) { + e_dbg("Error resetting the PHY.\n"); + return ret_val; + } + + /* + * Wait 100ms for MAC to configure PHY from NVM settings, to avoid + * timeout issues when LFS is enabled. + */ + msleep(100); + + /* disable lplu d0 during driver init */ + ret_val = e1000_set_d0_lplu_state(hw, false); + if (ret_val) { + e_dbg("Error Disabling LPLU D0\n"); + return ret_val; + } + /* Configure mdi-mdix settings */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (phy->mdix) { + case 1: + data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, data); + if (ret_val) + return ret_val; + + /* set auto-master slave resolution settings */ + if (hw->mac.autoneg) { + /* + * when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. + */ + if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + + /* Set auto Master/Slave resolution process */ + ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~CR_1000T_MS_ENABLE; + ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data); + if (ret_val) + return ret_val; + } + + ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ? + ((data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : + e1000_ms_auto; + + switch (phy->ms_type) { + case e1000_ms_force_master: + data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + data |= CR_1000T_MS_ENABLE; + data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + data &= ~CR_1000T_MS_ENABLE; + default: + break; + } + ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data); + } + + return ret_val; +} + +/** + * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control + * register and if the PHY is already setup for auto-negotiation, then + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + **/ +static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg = 0; + + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + /* + * Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* + * First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | + NWAY_AR_100TX_HD_CAPS | + NWAY_AR_10T_FD_CAPS | + NWAY_AR_10T_HD_CAPS); + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); + + e_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { + e_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { + e_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { + e_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { + e_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) + e_dbg("Advertise 1000mb Half duplex request denied!\n"); + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { + e_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* + * Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- + * negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: + /* + * Flow control (Rx & Tx) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_rx_pause: + /* + * Rx Flow control is enabled, and Tx Flow control is + * disabled, by a software over-ride. + * + * Since there really isn't a way to advertise that we are + * capable of Rx Pause ONLY, we will advertise that we + * support both symmetric and asymmetric Rx PAUSE. Later + * (in e1000e_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case e1000_fc_full: + /* + * Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + ret_val = -E1000_ERR_CONFIG; + return ret_val; + } + + ret_val = e1e_wphy(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) + ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); + + return ret_val; +} + +/** + * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + **/ +static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_ctrl; + + /* + * Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* + * If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (phy->autoneg_advertised == 0) + phy->autoneg_advertised = phy->autoneg_mask; + + e_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = e1000_phy_setup_autoneg(hw); + if (ret_val) { + e_dbg("Error Setting up Auto-Negotiation\n"); + return ret_val; + } + e_dbg("Restarting Auto-Neg\n"); + + /* + * Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + return ret_val; + + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + return ret_val; + + /* + * Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = e1000_wait_autoneg(hw); + if (ret_val) { + e_dbg("Error while waiting for " + "autoneg to complete\n"); + return ret_val; + } + } + + hw->mac.get_link_status = 1; + + return ret_val; +} + +/** + * e1000e_setup_copper_link - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -E1000_ERR_PHY (-2). + **/ +s32 e1000e_setup_copper_link(struct e1000_hw *hw) +{ + s32 ret_val; + bool link; + + if (hw->mac.autoneg) { + /* + * Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) + return ret_val; + } else { + /* + * PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + e_dbg("Forcing Speed and Duplex\n"); + ret_val = e1000_phy_force_speed_duplex(hw); + if (ret_val) { + e_dbg("Error Forcing Speed and Duplex\n"); + return ret_val; + } + } + + /* + * Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = e1000e_phy_has_link_generic(hw, + COPPER_LINK_UP_LIMIT, + 10, + &link); + if (ret_val) + return ret_val; + + if (link) { + e_dbg("Valid link established!!!\n"); + e1000e_config_collision_dist(hw); + ret_val = e1000e_config_fc_after_link_up(hw); + } else { + e_dbg("Unable to establish link!!!\n"); + } + + return ret_val; +} + +/** + * e1000e_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -E1000_ERR_PHY (-2). + **/ +s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + e1000e_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); + if (ret_val) + return ret_val; + + /* + * Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + + e_dbg("IGP PSCR: %X\n", phy_data); + + udelay(1); + + if (phy->autoneg_wait_to_complete) { + e_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); + + ret_val = e1000e_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + return ret_val; + + if (!link) + e_dbg("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + return ret_val; + } + + return ret_val; +} + +/** + * e1000e_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Resets the PHY to commit the + * changes. If time expires while waiting for link up, we reset the DSP. + * After reset, TX_CLK and CRS on Tx must be set. Return successful upon + * successful completion, else return corresponding error code. + **/ +s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + /* + * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + e_dbg("M88E1000 PSCR: %X\n", phy_data); + + ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + e1000e_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); + if (ret_val) + return ret_val; + + /* Reset the phy to commit changes. */ + ret_val = e1000e_commit_phy(hw); + if (ret_val) + return ret_val; + + if (phy->autoneg_wait_to_complete) { + e_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); + + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) { + if (hw->phy.type != e1000_phy_m88) { + e_dbg("Link taking longer than expected.\n"); + } else { + /* + * We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT, + 0x001d); + if (ret_val) + return ret_val; + ret_val = e1000e_phy_reset_dsp(hw); + if (ret_val) + return ret_val; + } + } + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + } + + if (hw->phy.type != e1000_phy_m88) + return 0; + + ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* + * Resetting the phy means we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock from + * the reset value of 2.5MHz. + */ + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* + * In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex + * @hw: pointer to the HW structure + * + * Forces the speed and duplex settings of the PHY. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = e1e_rphy(hw, PHY_CONTROL, &data); + if (ret_val) + goto out; + + e1000e_phy_force_speed_duplex_setup(hw, &data); + + ret_val = e1e_wphy(hw, PHY_CONTROL, data); + if (ret_val) + goto out; + + /* Disable MDI-X support for 10/100 */ + ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); + if (ret_val) + goto out; + + data &= ~IFE_PMC_AUTO_MDIX; + data &= ~IFE_PMC_FORCE_MDIX; + + ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, data); + if (ret_val) + goto out; + + e_dbg("IFE PMC: %X\n", data); + + udelay(1); + + if (phy->autoneg_wait_to_complete) { + e_dbg("Waiting for forced speed/duplex link on IFE phy.\n"); + + ret_val = e1000e_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + goto out; + + if (!link) + e_dbg("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000e_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex + * @hw: pointer to the HW structure + * @phy_ctrl: pointer to current value of PHY_CONTROL + * + * Forces speed and duplex on the PHY by doing the following: disable flow + * control, force speed/duplex on the MAC, disable auto speed detection, + * disable auto-negotiation, configure duplex, configure speed, configure + * the collision distance, write configuration to CTRL register. The + * caller must write to the PHY_CONTROL register for these settings to + * take affect. + **/ +void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl; + + /* Turn off flow control when forcing speed/duplex */ + hw->fc.current_mode = e1000_fc_none; + + /* Force speed/duplex on the mac */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~E1000_CTRL_SPD_SEL; + + /* Disable Auto Speed Detection */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Disable autoneg on the phy */ + *phy_ctrl &= ~MII_CR_AUTO_NEG_EN; + + /* Forcing Full or Half Duplex? */ + if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { + ctrl &= ~E1000_CTRL_FD; + *phy_ctrl &= ~MII_CR_FULL_DUPLEX; + e_dbg("Half Duplex\n"); + } else { + ctrl |= E1000_CTRL_FD; + *phy_ctrl |= MII_CR_FULL_DUPLEX; + e_dbg("Full Duplex\n"); + } + + /* Forcing 10mb or 100mb? */ + if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { + ctrl |= E1000_CTRL_SPD_100; + *phy_ctrl |= MII_CR_SPEED_100; + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); + e_dbg("Forcing 100mb\n"); + } else { + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + *phy_ctrl |= MII_CR_SPEED_10; + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + e_dbg("Forcing 10mb\n"); + } + + e1000e_config_collision_dist(hw); + + ew32(CTRL, ctrl); +} + +/** + * e1000e_set_d3_lplu_state - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + return ret_val; + + if (!active) { + data &= ~IGP02E1000_PM_D3_LPLU; + ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + if (ret_val) + return ret_val; + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= IGP02E1000_PM_D3_LPLU; + ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + if (ret_val) + return ret_val; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); + } + + return ret_val; +} + +/** + * e1000e_check_downshift - Checks whether a downshift in speed occurred + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns 1 + * + * A downshift is detected by querying the PHY link health. + **/ +s32 e1000e_check_downshift(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + switch (phy->type) { + case e1000_phy_m88: + case e1000_phy_gg82563: + case e1000_phy_bm: + case e1000_phy_82578: + offset = M88E1000_PHY_SPEC_STATUS; + mask = M88E1000_PSSR_DOWNSHIFT; + break; + case e1000_phy_igp_2: + case e1000_phy_igp_3: + offset = IGP01E1000_PHY_LINK_HEALTH; + mask = IGP01E1000_PLHR_SS_DOWNGRADE; + break; + default: + /* speed downshift not supported */ + phy->speed_downgraded = false; + return 0; + } + + ret_val = e1e_rphy(hw, offset, &phy_data); + + if (!ret_val) + phy->speed_downgraded = (phy_data & mask); + + return ret_val; +} + +/** + * e1000_check_polarity_m88 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +s32 e1000_check_polarity_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data); + + if (!ret_val) + phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * e1000_check_polarity_igp - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY port status register, and the + * current speed (since there is no polarity at 100Mbps). + **/ +s32 e1000_check_polarity_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data, offset, mask; + + /* + * Polarity is determined based on the speed of + * our connection. + */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + return ret_val; + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + offset = IGP01E1000_PHY_PCS_INIT_REG; + mask = IGP01E1000_PHY_POLARITY_MASK; + } else { + /* + * This really only applies to 10Mbps since + * there is no polarity for 100Mbps (always 0). + */ + offset = IGP01E1000_PHY_PORT_STATUS; + mask = IGP01E1000_PSSR_POLARITY_REVERSED; + } + + ret_val = e1e_rphy(hw, offset, &data); + + if (!ret_val) + phy->cable_polarity = (data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * e1000_check_polarity_ife - Check cable polarity for IFE PHY + * @hw: pointer to the HW structure + * + * Polarity is determined on the polarity reversal feature being enabled. + **/ +s32 e1000_check_polarity_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + /* + * Polarity is determined based on the reversal feature being enabled. + */ + if (phy->polarity_correction) { + offset = IFE_PHY_EXTENDED_STATUS_CONTROL; + mask = IFE_PESC_POLARITY_REVERSED; + } else { + offset = IFE_PHY_SPECIAL_CONTROL; + mask = IFE_PSC_FORCE_POLARITY; + } + + ret_val = e1e_rphy(hw, offset, &phy_data); + + if (!ret_val) + phy->cable_polarity = (phy_data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * e1000_wait_autoneg - Wait for auto-neg completion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + **/ +static s32 e1000_wait_autoneg(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 i, phy_status; + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; + msleep(100); + } + + /* + * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * e1000e_phy_has_link_generic - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts + * @success: pointer to whether polling was successful or not + * + * Polls the PHY status register for link, 'iterations' number of times. + **/ +s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success) +{ + s32 ret_val = 0; + u16 i, phy_status; + + for (i = 0; i < iterations; i++) { + /* + * Some PHYs require the PHY_STATUS register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); + if (ret_val) + /* + * If the first read fails, another entity may have + * ownership of the resources, wait and try again to + * see if they have relinquished the resources yet. + */ + udelay(usec_interval); + ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_LINK_STATUS) + break; + if (usec_interval >= 1000) + mdelay(usec_interval/1000); + else + udelay(usec_interval); + } + + *success = (i < iterations); + + return ret_val; +} + +/** + * e1000e_get_cable_length_m88 - Determine cable length for m88 PHY + * @hw: pointer to the HW structure + * + * Reads the PHY specific status register to retrieve the cable length + * information. The cable length is determined by averaging the minimum and + * maximum values to get the "average" cable length. The m88 PHY has four + * possible cable length values, which are: + * Register Value Cable Length + * 0 < 50 meters + * 1 50 - 80 meters + * 2 80 - 110 meters + * 3 110 - 140 meters + * 4 > 140 meters + **/ +s32 e1000e_get_cable_length_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, index; + + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + goto out; + + index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +out: + return ret_val; +} + +/** + * e1000e_get_cable_length_igp_2 - Determine cable length for igp2 PHY + * @hw: pointer to the HW structure + * + * The automatic gain control (agc) normalizes the amplitude of the + * received signal, adjusting for the attenuation produced by the + * cable. By reading the AGC registers, which represent the + * combination of coarse and fine gain value, the value can be put + * into a lookup table to obtain the approximate cable length + * for each channel. + **/ +s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, i, agc_value = 0; + u16 cur_agc_index, max_agc_index = 0; + u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; + static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { + IGP02E1000_PHY_AGC_A, + IGP02E1000_PHY_AGC_B, + IGP02E1000_PHY_AGC_C, + IGP02E1000_PHY_AGC_D + }; + + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { + ret_val = e1e_rphy(hw, agc_reg_array[i], &phy_data); + if (ret_val) + return ret_val; + + /* + * Getting bits 15:9, which represent the combination of + * coarse and fine gain values. The result is a number + * that can be put into the lookup table to obtain the + * approximate cable length. + */ + cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & + IGP02E1000_AGC_LENGTH_MASK; + + /* Array index bound check. */ + if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || + (cur_agc_index == 0)) + return -E1000_ERR_PHY; + + /* Remove min & max AGC values from calculation. */ + if (e1000_igp_2_cable_length_table[min_agc_index] > + e1000_igp_2_cable_length_table[cur_agc_index]) + min_agc_index = cur_agc_index; + if (e1000_igp_2_cable_length_table[max_agc_index] < + e1000_igp_2_cable_length_table[cur_agc_index]) + max_agc_index = cur_agc_index; + + agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; + } + + agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + + e1000_igp_2_cable_length_table[max_agc_index]); + agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); + + /* Calculate cable length with the error range of +/- 10 meters. */ + phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? + (agc_value - IGP02E1000_AGC_RANGE) : 0; + phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + + return ret_val; +} + +/** + * e1000e_get_phy_info_m88 - Retrieve PHY information + * @hw: pointer to the HW structure + * + * Valid for only copper links. Read the PHY status register (sticky read) + * to verify that link is up. Read the PHY special control register to + * determine the polarity and 10base-T extended distance. Read the PHY + * special status register to determine MDI/MDIx and current speed. If + * speed is 1000, then determine cable length, local and remote receiver. + **/ +s32 e1000e_get_phy_info_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + if (phy->media_type != e1000_media_type_copper) { + e_dbg("Phy info is only valid for copper media\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + e_dbg("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy->polarity_correction = (phy_data & + M88E1000_PSCR_POLARITY_REVERSAL); + + ret_val = e1000_check_polarity_m88(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX); + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + ret_val = e1000_get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + /* Set values to "undefined" */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + + return ret_val; +} + +/** + * e1000e_get_phy_info_igp - Retrieve igp PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 e1000e_get_phy_info_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + e_dbg("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + phy->polarity_correction = true; + + ret_val = e1000_check_polarity_igp(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = (data & IGP01E1000_PSSR_MDIX); + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + ret_val = e1000_get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data); + if (ret_val) + return ret_val; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + + return ret_val; +} + +/** + * e1000_get_phy_info_ife - Retrieves various IFE PHY states + * @hw: pointer to the HW structure + * + * Populates "phy" structure with various feature states. + **/ +s32 e1000_get_phy_info_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + e_dbg("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data); + if (ret_val) + goto out; + phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE) + ? false : true; + + if (phy->polarity_correction) { + ret_val = e1000_check_polarity_ife(hw); + if (ret_val) + goto out; + } else { + /* Polarity is forced */ + phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + } + + ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); + if (ret_val) + goto out; + + phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? true : false; + + /* The following parameters are undefined for 10/100 operation. */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + +out: + return ret_val; +} + +/** + * e1000e_phy_sw_reset - PHY software reset + * @hw: pointer to the HW structure + * + * Does a software reset of the PHY by reading the PHY control register and + * setting/write the control register reset bit to the PHY. + **/ +s32 e1000e_phy_sw_reset(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_ctrl; + + ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + return ret_val; + + phy_ctrl |= MII_CR_RESET; + ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + return ret_val; + + udelay(1); + + return ret_val; +} + +/** + * e1000e_phy_hw_reset_generic - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and release the semaphore (if necessary). + **/ +s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u32 ctrl; + + ret_val = e1000_check_reset_block(hw); + if (ret_val) + return 0; + + ret_val = phy->ops.acquire(hw); + if (ret_val) + return ret_val; + + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); + e1e_flush(); + + udelay(phy->reset_delay_us); + + ew32(CTRL, ctrl); + e1e_flush(); + + udelay(150); + + phy->ops.release(hw); + + return e1000_get_phy_cfg_done(hw); +} + +/** + * e1000e_get_cfg_done - Generic configuration done + * @hw: pointer to the HW structure + * + * Generic function to wait 10 milli-seconds for configuration to complete + * and return success. + **/ +s32 e1000e_get_cfg_done(struct e1000_hw *hw) +{ + mdelay(10); + return 0; +} + +/** + * e1000e_phy_init_script_igp3 - Inits the IGP3 PHY + * @hw: pointer to the HW structure + * + * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. + **/ +s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw) +{ + e_dbg("Running IGP 3 PHY init script\n"); + + /* PHY init IGP 3 */ + /* Enable rise/fall, 10-mode work in class-A */ + e1e_wphy(hw, 0x2F5B, 0x9018); + /* Remove all caps from Replica path filter */ + e1e_wphy(hw, 0x2F52, 0x0000); + /* Bias trimming for ADC, AFE and Driver (Default) */ + e1e_wphy(hw, 0x2FB1, 0x8B24); + /* Increase Hybrid poly bias */ + e1e_wphy(hw, 0x2FB2, 0xF8F0); + /* Add 4% to Tx amplitude in Gig mode */ + e1e_wphy(hw, 0x2010, 0x10B0); + /* Disable trimming (TTT) */ + e1e_wphy(hw, 0x2011, 0x0000); + /* Poly DC correction to 94.6% + 2% for all channels */ + e1e_wphy(hw, 0x20DD, 0x249A); + /* ABS DC correction to 95.9% */ + e1e_wphy(hw, 0x20DE, 0x00D3); + /* BG temp curve trim */ + e1e_wphy(hw, 0x28B4, 0x04CE); + /* Increasing ADC OPAMP stage 1 currents to max */ + e1e_wphy(hw, 0x2F70, 0x29E4); + /* Force 1000 ( required for enabling PHY regs configuration) */ + e1e_wphy(hw, 0x0000, 0x0140); + /* Set upd_freq to 6 */ + e1e_wphy(hw, 0x1F30, 0x1606); + /* Disable NPDFE */ + e1e_wphy(hw, 0x1F31, 0xB814); + /* Disable adaptive fixed FFE (Default) */ + e1e_wphy(hw, 0x1F35, 0x002A); + /* Enable FFE hysteresis */ + e1e_wphy(hw, 0x1F3E, 0x0067); + /* Fixed FFE for short cable lengths */ + e1e_wphy(hw, 0x1F54, 0x0065); + /* Fixed FFE for medium cable lengths */ + e1e_wphy(hw, 0x1F55, 0x002A); + /* Fixed FFE for long cable lengths */ + e1e_wphy(hw, 0x1F56, 0x002A); + /* Enable Adaptive Clip Threshold */ + e1e_wphy(hw, 0x1F72, 0x3FB0); + /* AHT reset limit to 1 */ + e1e_wphy(hw, 0x1F76, 0xC0FF); + /* Set AHT master delay to 127 msec */ + e1e_wphy(hw, 0x1F77, 0x1DEC); + /* Set scan bits for AHT */ + e1e_wphy(hw, 0x1F78, 0xF9EF); + /* Set AHT Preset bits */ + e1e_wphy(hw, 0x1F79, 0x0210); + /* Change integ_factor of channel A to 3 */ + e1e_wphy(hw, 0x1895, 0x0003); + /* Change prop_factor of channels BCD to 8 */ + e1e_wphy(hw, 0x1796, 0x0008); + /* Change cg_icount + enable integbp for channels BCD */ + e1e_wphy(hw, 0x1798, 0xD008); + /* + * Change cg_icount + enable integbp + change prop_factor_master + * to 8 for channel A + */ + e1e_wphy(hw, 0x1898, 0xD918); + /* Disable AHT in Slave mode on channel A */ + e1e_wphy(hw, 0x187A, 0x0800); + /* + * Enable LPLU and disable AN to 1000 in non-D0a states, + * Enable SPD+B2B + */ + e1e_wphy(hw, 0x0019, 0x008D); + /* Enable restart AN on an1000_dis change */ + e1e_wphy(hw, 0x001B, 0x2080); + /* Enable wh_fifo read clock in 10/100 modes */ + e1e_wphy(hw, 0x0014, 0x0045); + /* Restart AN, Speed selection is 1000 */ + e1e_wphy(hw, 0x0000, 0x1340); + + return 0; +} + +/* Internal function pointers */ + +/** + * e1000_get_phy_cfg_done - Generic PHY configuration done + * @hw: pointer to the HW structure + * + * Return success if silicon family did not implement a family specific + * get_cfg_done function. + **/ +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) +{ + if (hw->phy.ops.get_cfg_done) + return hw->phy.ops.get_cfg_done(hw); + + return 0; +} + +/** + * e1000_phy_force_speed_duplex - Generic force PHY speed/duplex + * @hw: pointer to the HW structure + * + * When the silicon family has not implemented a forced speed/duplex + * function for the PHY, simply return 0. + **/ +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) +{ + if (hw->phy.ops.force_speed_duplex) + return hw->phy.ops.force_speed_duplex(hw); + + return 0; +} + +/** + * e1000e_get_phy_type_from_id - Get PHY type from id + * @phy_id: phy_id read from the phy + * + * Returns the phy type from the id. + **/ +enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id) +{ + enum e1000_phy_type phy_type = e1000_phy_unknown; + + switch (phy_id) { + case M88E1000_I_PHY_ID: + case M88E1000_E_PHY_ID: + case M88E1111_I_PHY_ID: + case M88E1011_I_PHY_ID: + phy_type = e1000_phy_m88; + break; + case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ + phy_type = e1000_phy_igp_2; + break; + case GG82563_E_PHY_ID: + phy_type = e1000_phy_gg82563; + break; + case IGP03E1000_E_PHY_ID: + phy_type = e1000_phy_igp_3; + break; + case IFE_E_PHY_ID: + case IFE_PLUS_E_PHY_ID: + case IFE_C_E_PHY_ID: + phy_type = e1000_phy_ife; + break; + case BME1000_E_PHY_ID: + case BME1000_E_PHY_ID_R2: + phy_type = e1000_phy_bm; + break; + case I82578_E_PHY_ID: + phy_type = e1000_phy_82578; + break; + case I82577_E_PHY_ID: + phy_type = e1000_phy_82577; + break; + case I82579_E_PHY_ID: + phy_type = e1000_phy_82579; + break; + default: + phy_type = e1000_phy_unknown; + break; + } + return phy_type; +} + +/** + * e1000e_determine_phy_address - Determines PHY address. + * @hw: pointer to the HW structure + * + * This uses a trial and error method to loop through possible PHY + * addresses. It tests each by reading the PHY ID registers and + * checking for a match. + **/ +s32 e1000e_determine_phy_address(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_PHY_TYPE; + u32 phy_addr = 0; + u32 i; + enum e1000_phy_type phy_type = e1000_phy_unknown; + + hw->phy.id = phy_type; + + for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) { + hw->phy.addr = phy_addr; + i = 0; + + do { + e1000e_get_phy_id(hw); + phy_type = e1000e_get_phy_type_from_id(hw->phy.id); + + /* + * If phy_type is valid, break - we found our + * PHY address + */ + if (phy_type != e1000_phy_unknown) { + ret_val = 0; + goto out; + } + usleep_range(1000, 2000); + i++; + } while (i < 10); + } + +out: + return ret_val; +} + +/** + * e1000_get_phy_addr_for_bm_page - Retrieve PHY page address + * @page: page to access + * + * Returns the phy address for the page requested. + **/ +static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg) +{ + u32 phy_addr = 2; + + if ((page >= 768) || (page == 0 && reg == 25) || (reg == 31)) + phy_addr = 1; + + return phy_addr; +} + +/** + * e1000e_write_phy_reg_bm - Write BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + u32 page = offset >> IGP_PAGE_SHIFT; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, + false, false); + goto out; + } + + hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + u32 page_shift, page_select; + + /* + * Page select is register 31 for phy address 1 and 22 for + * phy address 2 and 3. Page select is shifted only for + * phy address 1. + */ + if (hw->phy.addr == 1) { + page_shift = IGP_PAGE_SHIFT; + page_select = IGP01E1000_PHY_PAGE_SELECT; + } else { + page_shift = 0; + page_select = BM_PHY_PAGE_SELECT; + } + + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000e_write_phy_reg_mdic(hw, page_select, + (page << page_shift)); + if (ret_val) + goto out; + } + + ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + +out: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000e_read_phy_reg_bm - Read BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + u32 page = offset >> IGP_PAGE_SHIFT; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, + true, false); + goto out; + } + + hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + u32 page_shift, page_select; + + /* + * Page select is register 31 for phy address 1 and 22 for + * phy address 2 and 3. Page select is shifted only for + * phy address 1. + */ + if (hw->phy.addr == 1) { + page_shift = IGP_PAGE_SHIFT; + page_select = IGP01E1000_PHY_PAGE_SELECT; + } else { + page_shift = 0; + page_select = BM_PHY_PAGE_SELECT; + } + + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000e_write_phy_reg_mdic(hw, page_select, + (page << page_shift)); + if (ret_val) + goto out; + } + + ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); +out: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000e_read_phy_reg_bm2 - Read BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + u16 page = (u16)(offset >> IGP_PAGE_SHIFT); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, + true, false); + goto out; + } + + hw->phy.addr = 1; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, + page); + + if (ret_val) + goto out; + } + + ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); +out: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000e_write_phy_reg_bm2 - Write BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + u16 page = (u16)(offset >> IGP_PAGE_SHIFT); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, + false, false); + goto out; + } + + hw->phy.addr = 1; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, + page); + + if (ret_val) + goto out; + } + + ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + +out: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers + * @hw: pointer to the HW structure + * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG + * + * Assumes semaphore already acquired and phy_reg points to a valid memory + * address to store contents of the BM_WUC_ENABLE_REG register. + **/ +s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) +{ + s32 ret_val; + u16 temp; + + /* All page select, port ctrl and wakeup registers use phy address 1 */ + hw->phy.addr = 1; + + /* Select Port Control Registers page */ + ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); + if (ret_val) { + e_dbg("Could not set Port Control page\n"); + goto out; + } + + ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); + if (ret_val) { + e_dbg("Could not read PHY register %d.%d\n", + BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); + goto out; + } + + /* + * Enable both PHY wakeup mode and Wakeup register page writes. + * Prevent a power state change by disabling ME and Host PHY wakeup. + */ + temp = *phy_reg; + temp |= BM_WUC_ENABLE_BIT; + temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT); + + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, temp); + if (ret_val) { + e_dbg("Could not write PHY register %d.%d\n", + BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); + goto out; + } + + /* Select Host Wakeup Registers page */ + ret_val = e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT)); + + /* caller now able to write registers on the Wakeup registers page */ +out: + return ret_val; +} + +/** + * e1000_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs + * @hw: pointer to the HW structure + * @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG + * + * Restore BM_WUC_ENABLE_REG to its original value. + * + * Assumes semaphore already acquired and *phy_reg is the contents of the + * BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by + * caller. + **/ +s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) +{ + s32 ret_val = 0; + + /* Select Port Control Registers page */ + ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); + if (ret_val) { + e_dbg("Could not set Port Control page\n"); + goto out; + } + + /* Restore 769.17 to its original value */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, *phy_reg); + if (ret_val) + e_dbg("Could not restore PHY register %d.%d\n", + BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); +out: + return ret_val; +} + +/** + * e1000_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register + * @hw: pointer to the HW structure + * @offset: register offset to be read or written + * @data: pointer to the data to read or write + * @read: determines if operation is read or write + * @page_set: BM_WUC_PAGE already set and access enabled + * + * Read the PHY register at offset and store the retrieved information in + * data, or write data to PHY register at offset. Note the procedure to + * access the PHY wakeup registers is different than reading the other PHY + * registers. It works as such: + * 1) Set 769.17.2 (page 769, register 17, bit 2) = 1 + * 2) Set page to 800 for host (801 if we were manageability) + * 3) Write the address using the address opcode (0x11) + * 4) Read or write the data using the data opcode (0x12) + * 5) Restore 769.17.2 to its original value + * + * Steps 1 and 2 are done by e1000_enable_phy_wakeup_reg_access_bm() and + * step 5 is done by e1000_disable_phy_wakeup_reg_access_bm(). + * + * Assumes semaphore is already acquired. When page_set==true, assumes + * the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack + * is responsible for calls to e1000_[enable|disable]_phy_wakeup_reg_bm()). + **/ +static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, + u16 *data, bool read, bool page_set) +{ + s32 ret_val; + u16 reg = BM_PHY_REG_NUM(offset); + u16 page = BM_PHY_REG_PAGE(offset); + u16 phy_reg = 0; + + /* Gig must be disabled for MDIO accesses to Host Wakeup reg page */ + if ((hw->mac.type == e1000_pchlan) && + (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE))) + e_dbg("Attempting to access page %d while gig enabled.\n", + page); + + if (!page_set) { + /* Enable access to PHY wakeup registers */ + ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); + if (ret_val) { + e_dbg("Could not enable PHY wakeup reg access\n"); + goto out; + } + } + + e_dbg("Accessing PHY page %d reg 0x%x\n", page, reg); + + /* Write the Wakeup register page offset value using opcode 0x11 */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg); + if (ret_val) { + e_dbg("Could not write address opcode to page %d\n", page); + goto out; + } + + if (read) { + /* Read the Wakeup register page value using opcode 0x12 */ + ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, + data); + } else { + /* Write the Wakeup register page value using opcode 0x12 */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, + *data); + } + + if (ret_val) { + e_dbg("Could not access PHY reg %d.%d\n", page, reg); + goto out; + } + + if (!page_set) + ret_val = e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); + +out: + return ret_val; +} + +/** + * e1000_power_up_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, restore the link to previous + * settings. + **/ +void e1000_power_up_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + e1e_rphy(hw, PHY_CONTROL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + e1e_wphy(hw, PHY_CONTROL, mii_reg); +} + +/** + * e1000_power_down_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, restore the link to previous + * settings. + **/ +void e1000_power_down_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + e1e_rphy(hw, PHY_CONTROL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + e1e_wphy(hw, PHY_CONTROL, mii_reg); + usleep_range(1000, 2000); +} + +/** + * e1000e_commit_phy - Soft PHY reset + * @hw: pointer to the HW structure + * + * Performs a soft PHY reset on those that apply. This is a function pointer + * entry point called by drivers. + **/ +s32 e1000e_commit_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.commit) + return hw->phy.ops.commit(hw); + + return 0; +} + +/** + * e1000_set_d0_lplu_state - Sets low power link up state for D0 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D0 + * and SmartSpeed is disabled when active is true, else clear lplu for D0 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. This is a function pointer entry point called by drivers. + **/ +static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active) +{ + if (hw->phy.ops.set_d0_lplu_state) + return hw->phy.ops.set_d0_lplu_state(hw, active); + + return 0; +} + +/** + * __e1000_read_phy_reg_hv - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and stores the retrieved information in data. Release any acquired + * semaphore before exiting. + **/ +static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked, bool page_set) +{ + s32 ret_val; + u16 page = BM_PHY_REG_PAGE(offset); + u16 reg = BM_PHY_REG_NUM(offset); + u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); + + if (!locked) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, + true, page_set); + goto out; + } + + if (page > 0 && page < HV_INTC_FC_PAGE_START) { + ret_val = e1000_access_phy_debug_regs_hv(hw, offset, + data, true); + goto out; + } + + if (!page_set) { + if (page == HV_INTC_FC_PAGE_START) + page = 0; + + if (reg > MAX_PHY_MULTI_PAGE_REG) { + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000_set_page_igp(hw, + (page << IGP_PAGE_SHIFT)); + + hw->phy.addr = phy_addr; + + if (ret_val) + goto out; + } + } + + e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page, + page << IGP_PAGE_SHIFT, reg); + + ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, + data); +out: + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_read_phy_reg_hv - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset and stores + * the retrieved information in data. Release the acquired semaphore + * before exiting. + **/ +s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_hv(hw, offset, data, false, false); +} + +/** + * e1000_read_phy_reg_hv_locked - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired. + **/ +s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_hv(hw, offset, data, true, false); +} + +/** + * e1000_read_phy_reg_page_hv - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired and page already set. + **/ +s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_hv(hw, offset, data, true, true); +} + +/** + * __e1000_write_phy_reg_hv - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data, + bool locked, bool page_set) +{ + s32 ret_val; + u16 page = BM_PHY_REG_PAGE(offset); + u16 reg = BM_PHY_REG_NUM(offset); + u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); + + if (!locked) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, + false, page_set); + goto out; + } + + if (page > 0 && page < HV_INTC_FC_PAGE_START) { + ret_val = e1000_access_phy_debug_regs_hv(hw, offset, + &data, false); + goto out; + } + + if (!page_set) { + if (page == HV_INTC_FC_PAGE_START) + page = 0; + + /* + * Workaround MDIO accesses being disabled after entering IEEE + * Power Down (when bit 11 of the PHY Control register is set) + */ + if ((hw->phy.type == e1000_phy_82578) && + (hw->phy.revision >= 1) && + (hw->phy.addr == 2) && + ((MAX_PHY_REG_ADDRESS & reg) == 0) && (data & (1 << 11))) { + u16 data2 = 0x7EFF; + ret_val = e1000_access_phy_debug_regs_hv(hw, + (1 << 6) | 0x3, + &data2, false); + if (ret_val) + goto out; + } + + if (reg > MAX_PHY_MULTI_PAGE_REG) { + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000_set_page_igp(hw, + (page << IGP_PAGE_SHIFT)); + + hw->phy.addr = phy_addr; + + if (ret_val) + goto out; + } + } + + e_dbg("writing PHY page %d (or 0x%x shifted) reg 0x%x\n", page, + page << IGP_PAGE_SHIFT, reg); + + ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, + data); + +out: + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_write_phy_reg_hv - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to PHY register at the offset. + * Release the acquired semaphores before exiting. + **/ +s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_hv(hw, offset, data, false, false); +} + +/** + * e1000_write_phy_reg_hv_locked - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. Assumes semaphore + * already acquired. + **/ +s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_hv(hw, offset, data, true, false); +} + +/** + * e1000_write_phy_reg_page_hv - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. Assumes semaphore + * already acquired and page already set. + **/ +s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_hv(hw, offset, data, true, true); +} + +/** + * e1000_get_phy_addr_for_hv_page - Get PHY address based on page + * @page: page to be accessed + **/ +static u32 e1000_get_phy_addr_for_hv_page(u32 page) +{ + u32 phy_addr = 2; + + if (page >= HV_INTC_FC_PAGE_START) + phy_addr = 1; + + return phy_addr; +} + +/** + * e1000_access_phy_debug_regs_hv - Read HV PHY vendor specific high registers + * @hw: pointer to the HW structure + * @offset: register offset to be read or written + * @data: pointer to the data to be read or written + * @read: determines if operation is read or write + * + * Reads the PHY register at offset and stores the retreived information + * in data. Assumes semaphore already acquired. Note that the procedure + * to access these regs uses the address port and data port to read/write. + * These accesses done with PHY address 2 and without using pages. + **/ +static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, + u16 *data, bool read) +{ + s32 ret_val; + u32 addr_reg = 0; + u32 data_reg = 0; + + /* This takes care of the difference with desktop vs mobile phy */ + addr_reg = (hw->phy.type == e1000_phy_82578) ? + I82578_ADDR_REG : I82577_ADDR_REG; + data_reg = addr_reg + 1; + + /* All operations in this function are phy address 2 */ + hw->phy.addr = 2; + + /* masking with 0x3F to remove the page from offset */ + ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F); + if (ret_val) { + e_dbg("Could not write the Address Offset port register\n"); + goto out; + } + + /* Read or write the data value next */ + if (read) + ret_val = e1000e_read_phy_reg_mdic(hw, data_reg, data); + else + ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data); + + if (ret_val) { + e_dbg("Could not access the Data port register\n"); + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_link_stall_workaround_hv - Si workaround + * @hw: pointer to the HW structure + * + * This function works around a Si bug where the link partner can get + * a link up indication before the PHY does. If small packets are sent + * by the link partner they can be placed in the packet buffer without + * being properly accounted for by the PHY and will stall preventing + * further packets from being received. The workaround is to clear the + * packet buffer after the PHY detects link up. + **/ +s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 data; + + if (hw->phy.type != e1000_phy_82578) + goto out; + + /* Do not apply workaround if in PHY loopback bit 14 set */ + e1e_rphy(hw, PHY_CONTROL, &data); + if (data & PHY_CONTROL_LB) + goto out; + + /* check if link is up and at 1Gbps */ + ret_val = e1e_rphy(hw, BM_CS_STATUS, &data); + if (ret_val) + goto out; + + data &= BM_CS_STATUS_LINK_UP | + BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_MASK; + + if (data != (BM_CS_STATUS_LINK_UP | + BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_1000)) + goto out; + + mdelay(200); + + /* flush the packets in the fifo buffer */ + ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC | + HV_MUX_DATA_CTRL_FORCE_SPEED); + if (ret_val) + goto out; + + ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC); + +out: + return ret_val; +} + +/** + * e1000_check_polarity_82577 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +s32 e1000_check_polarity_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); + + if (!ret_val) + phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. + **/ +s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + e1000e_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + udelay(1); + + if (phy->autoneg_wait_to_complete) { + e_dbg("Waiting for forced speed/duplex link on 82577 phy\n"); + + ret_val = e1000e_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + goto out; + + if (!link) + e_dbg("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_get_phy_info_82577 - Retrieve I82577 PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 e1000_get_phy_info_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + e_dbg("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + phy->polarity_correction = true; + + ret_val = e1000_check_polarity_82577(hw); + if (ret_val) + goto out; + + ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); + if (ret_val) + goto out; + + phy->is_mdix = (data & I82577_PHY_STATUS2_MDIX) ? true : false; + + if ((data & I82577_PHY_STATUS2_SPEED_MASK) == + I82577_PHY_STATUS2_SPEED_1000MBPS) { + ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) + goto out; + + ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data); + if (ret_val) + goto out; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +out: + return ret_val; +} + +/** + * e1000_get_cable_length_82577 - Determine cable length for 82577 PHY + * @hw: pointer to the HW structure + * + * Reads the diagnostic status register and verifies result is valid before + * placing it in the phy_cable_length field. + **/ +s32 e1000_get_cable_length_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, length; + + ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data); + if (ret_val) + goto out; + + length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >> + I82577_DSTATUS_CABLE_LENGTH_SHIFT; + + if (length == E1000_CABLE_LENGTH_UNDEFINED) + ret_val = -E1000_ERR_PHY; + + phy->cable_length = length; + +out: + return ret_val; +} diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile new file mode 100644 index 0000000..c6e4621 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/Makefile @@ -0,0 +1,37 @@ +################################################################################ +# +# Intel 82575 PCI-Express Ethernet Linux driver +# Copyright(c) 1999 - 2011 Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: +# Linux NICS +# e1000-devel Mailing List +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +# +################################################################################ + +# +# Makefile for the Intel(R) 82575 PCI-Express ethernet driver +# + +obj-$(CONFIG_IGB) += igb.o + +igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \ + e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o + diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c new file mode 100644 index 0000000..c0857bd --- /dev/null +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -0,0 +1,2084 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* e1000_82575 + * e1000_82576 + */ + +#include +#include + +#include "e1000_mac.h" +#include "e1000_82575.h" + +static s32 igb_get_invariants_82575(struct e1000_hw *); +static s32 igb_acquire_phy_82575(struct e1000_hw *); +static void igb_release_phy_82575(struct e1000_hw *); +static s32 igb_acquire_nvm_82575(struct e1000_hw *); +static void igb_release_nvm_82575(struct e1000_hw *); +static s32 igb_check_for_link_82575(struct e1000_hw *); +static s32 igb_get_cfg_done_82575(struct e1000_hw *); +static s32 igb_init_hw_82575(struct e1000_hw *); +static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *); +static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *); +static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *); +static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16); +static s32 igb_reset_hw_82575(struct e1000_hw *); +static s32 igb_reset_hw_82580(struct e1000_hw *); +static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); +static s32 igb_setup_copper_link_82575(struct e1000_hw *); +static s32 igb_setup_serdes_link_82575(struct e1000_hw *); +static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16); +static void igb_clear_hw_cntrs_82575(struct e1000_hw *); +static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16); +static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *, + u16 *); +static s32 igb_get_phy_id_82575(struct e1000_hw *); +static void igb_release_swfw_sync_82575(struct e1000_hw *, u16); +static bool igb_sgmii_active_82575(struct e1000_hw *); +static s32 igb_reset_init_script_82575(struct e1000_hw *); +static s32 igb_read_mac_addr_82575(struct e1000_hw *); +static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw); +static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw); +static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw); +static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw); +static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, + u16 offset); +static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, + u16 offset); +static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw); +static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw); +static const u16 e1000_82580_rxpbs_table[] = + { 36, 72, 144, 1, 2, 4, 8, 16, + 35, 70, 140 }; +#define E1000_82580_RXPBS_TABLE_SIZE \ + (sizeof(e1000_82580_rxpbs_table)/sizeof(u16)) + +/** + * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO + * @hw: pointer to the HW structure + * + * Called to determine if the I2C pins are being used for I2C or as an + * external MDIO interface since the two options are mutually exclusive. + **/ +static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw) +{ + u32 reg = 0; + bool ext_mdio = false; + + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + reg = rd32(E1000_MDIC); + ext_mdio = !!(reg & E1000_MDIC_DEST); + break; + case e1000_82580: + case e1000_i350: + reg = rd32(E1000_MDICNFG); + ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); + break; + default: + break; + } + return ext_mdio; +} + +static s32 igb_get_invariants_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_mac_info *mac = &hw->mac; + struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575; + u32 eecd; + s32 ret_val; + u16 size; + u32 ctrl_ext = 0; + + switch (hw->device_id) { + case E1000_DEV_ID_82575EB_COPPER: + case E1000_DEV_ID_82575EB_FIBER_SERDES: + case E1000_DEV_ID_82575GB_QUAD_COPPER: + mac->type = e1000_82575; + break; + case E1000_DEV_ID_82576: + case E1000_DEV_ID_82576_NS: + case E1000_DEV_ID_82576_NS_SERDES: + case E1000_DEV_ID_82576_FIBER: + case E1000_DEV_ID_82576_SERDES: + case E1000_DEV_ID_82576_QUAD_COPPER: + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: + case E1000_DEV_ID_82576_SERDES_QUAD: + mac->type = e1000_82576; + break; + case E1000_DEV_ID_82580_COPPER: + case E1000_DEV_ID_82580_FIBER: + case E1000_DEV_ID_82580_QUAD_FIBER: + case E1000_DEV_ID_82580_SERDES: + case E1000_DEV_ID_82580_SGMII: + case E1000_DEV_ID_82580_COPPER_DUAL: + case E1000_DEV_ID_DH89XXCC_SGMII: + case E1000_DEV_ID_DH89XXCC_SERDES: + case E1000_DEV_ID_DH89XXCC_BACKPLANE: + case E1000_DEV_ID_DH89XXCC_SFP: + mac->type = e1000_82580; + break; + case E1000_DEV_ID_I350_COPPER: + case E1000_DEV_ID_I350_FIBER: + case E1000_DEV_ID_I350_SERDES: + case E1000_DEV_ID_I350_SGMII: + mac->type = e1000_i350; + break; + default: + return -E1000_ERR_MAC_INIT; + break; + } + + /* Set media type */ + /* + * The 82575 uses bits 22:23 for link mode. The mode can be changed + * based on the EEPROM. We cannot rely upon device ID. There + * is no distinguishable difference between fiber and internal + * SerDes mode on the 82575. There can be an external PHY attached + * on the SGMII interface. For this, we'll set sgmii_active to true. + */ + phy->media_type = e1000_media_type_copper; + dev_spec->sgmii_active = false; + + ctrl_ext = rd32(E1000_CTRL_EXT); + switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { + case E1000_CTRL_EXT_LINK_MODE_SGMII: + dev_spec->sgmii_active = true; + break; + case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: + case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: + hw->phy.media_type = e1000_media_type_internal_serdes; + break; + default: + break; + } + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES_82575; + if (mac->type == e1000_82576) + mac->rar_entry_count = E1000_RAR_ENTRIES_82576; + if (mac->type == e1000_82580) + mac->rar_entry_count = E1000_RAR_ENTRIES_82580; + if (mac->type == e1000_i350) + mac->rar_entry_count = E1000_RAR_ENTRIES_I350; + /* reset */ + if (mac->type >= e1000_82580) + mac->ops.reset_hw = igb_reset_hw_82580; + else + mac->ops.reset_hw = igb_reset_hw_82575; + /* Set if part includes ASF firmware */ + mac->asf_firmware_present = true; + /* Set if manageability features are enabled. */ + mac->arc_subsystem_valid = + (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK) + ? true : false; + /* enable EEE on i350 parts */ + if (mac->type == e1000_i350) + dev_spec->eee_disable = false; + else + dev_spec->eee_disable = true; + /* physical interface link setup */ + mac->ops.setup_physical_interface = + (hw->phy.media_type == e1000_media_type_copper) + ? igb_setup_copper_link_82575 + : igb_setup_serdes_link_82575; + + /* NVM initialization */ + eecd = rd32(E1000_EECD); + + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; + break; + } + + nvm->type = e1000_nvm_eeprom_spi; + + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + + /* + * Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* + * Check for invalid size + */ + if ((hw->mac.type == e1000_82576) && (size > 15)) { + printk("igb: The NVM size is not valid, " + "defaulting to 32K.\n"); + size = 15; + } + nvm->word_size = 1 << size; + if (nvm->word_size == (1 << 15)) + nvm->page_size = 128; + + /* NVM Function Pointers */ + nvm->ops.acquire = igb_acquire_nvm_82575; + if (nvm->word_size < (1 << 15)) + nvm->ops.read = igb_read_nvm_eerd; + else + nvm->ops.read = igb_read_nvm_spi; + + nvm->ops.release = igb_release_nvm_82575; + switch (hw->mac.type) { + case e1000_82580: + nvm->ops.validate = igb_validate_nvm_checksum_82580; + nvm->ops.update = igb_update_nvm_checksum_82580; + break; + case e1000_i350: + nvm->ops.validate = igb_validate_nvm_checksum_i350; + nvm->ops.update = igb_update_nvm_checksum_i350; + break; + default: + nvm->ops.validate = igb_validate_nvm_checksum; + nvm->ops.update = igb_update_nvm_checksum; + } + nvm->ops.write = igb_write_nvm_spi; + + /* if part supports SR-IOV then initialize mailbox parameters */ + switch (mac->type) { + case e1000_82576: + case e1000_i350: + igb_init_mbx_params_pf(hw); + break; + default: + break; + } + + /* setup PHY parameters */ + if (phy->media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + return 0; + } + + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + + ctrl_ext = rd32(E1000_CTRL_EXT); + + /* PHY function pointers */ + if (igb_sgmii_active_82575(hw)) { + phy->ops.reset = igb_phy_hw_reset_sgmii_82575; + ctrl_ext |= E1000_CTRL_I2C_ENA; + } else { + phy->ops.reset = igb_phy_hw_reset; + ctrl_ext &= ~E1000_CTRL_I2C_ENA; + } + + wr32(E1000_CTRL_EXT, ctrl_ext); + igb_reset_mdicnfg_82580(hw); + + if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) { + phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; + phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; + } else if (hw->mac.type >= e1000_82580) { + phy->ops.read_reg = igb_read_phy_reg_82580; + phy->ops.write_reg = igb_write_phy_reg_82580; + } else { + phy->ops.read_reg = igb_read_phy_reg_igp; + phy->ops.write_reg = igb_write_phy_reg_igp; + } + + /* set lan id */ + hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> + E1000_STATUS_FUNC_SHIFT; + + /* Set phy->phy_addr and phy->id. */ + ret_val = igb_get_phy_id_82575(hw); + if (ret_val) + return ret_val; + + /* Verify phy id and set remaining function pointers */ + switch (phy->id) { + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1111_I_PHY_ID: + phy->type = e1000_phy_m88; + phy->ops.get_phy_info = igb_get_phy_info_m88; + + if (phy->id == I347AT4_E_PHY_ID || + phy->id == M88E1112_E_PHY_ID) + phy->ops.get_cable_length = igb_get_cable_length_m88_gen2; + else + phy->ops.get_cable_length = igb_get_cable_length_m88; + + phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; + break; + case IGP03E1000_E_PHY_ID: + phy->type = e1000_phy_igp_3; + phy->ops.get_phy_info = igb_get_phy_info_igp; + phy->ops.get_cable_length = igb_get_cable_length_igp_2; + phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp; + phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575; + phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state; + break; + case I82580_I_PHY_ID: + case I350_I_PHY_ID: + phy->type = e1000_phy_82580; + phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580; + phy->ops.get_cable_length = igb_get_cable_length_82580; + phy->ops.get_phy_info = igb_get_phy_info_82580; + break; + default: + return -E1000_ERR_PHY; + } + + return 0; +} + +/** + * igb_acquire_phy_82575 - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * Acquire access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + **/ +static s32 igb_acquire_phy_82575(struct e1000_hw *hw) +{ + u16 mask = E1000_SWFW_PHY0_SM; + + if (hw->bus.func == E1000_FUNC_1) + mask = E1000_SWFW_PHY1_SM; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_SWFW_PHY2_SM; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_SWFW_PHY3_SM; + + return igb_acquire_swfw_sync_82575(hw, mask); +} + +/** + * igb_release_phy_82575 - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + **/ +static void igb_release_phy_82575(struct e1000_hw *hw) +{ + u16 mask = E1000_SWFW_PHY0_SM; + + if (hw->bus.func == E1000_FUNC_1) + mask = E1000_SWFW_PHY1_SM; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_SWFW_PHY2_SM; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_SWFW_PHY3_SM; + + igb_release_swfw_sync_82575(hw, mask); +} + +/** + * igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the serial gigabit media independent + * interface and stores the retrieved information in data. + **/ +static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 *data) +{ + s32 ret_val = -E1000_ERR_PARAM; + + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { + hw_dbg("PHY Address %u is out of range\n", offset); + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_read_phy_reg_i2c(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset using the serial gigabit + * media independent interface. + **/ +static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 data) +{ + s32 ret_val = -E1000_ERR_PARAM; + + + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { + hw_dbg("PHY Address %d is out of range\n", offset); + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_write_phy_reg_i2c(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_get_phy_id_82575 - Retrieve PHY addr and id + * @hw: pointer to the HW structure + * + * Retrieves the PHY address and ID for both PHY's which do and do not use + * sgmi interface. + **/ +static s32 igb_get_phy_id_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_id; + u32 ctrl_ext; + u32 mdic; + + /* + * For SGMII PHYs, we try the list of possible addresses until + * we find one that works. For non-SGMII PHYs + * (e.g. integrated copper PHYs), an address of 1 should + * work. The result of this function should mean phy->phy_addr + * and phy->id are set correctly. + */ + if (!(igb_sgmii_active_82575(hw))) { + phy->addr = 1; + ret_val = igb_get_phy_id(hw); + goto out; + } + + if (igb_sgmii_uses_mdio_82575(hw)) { + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + mdic = rd32(E1000_MDIC); + mdic &= E1000_MDIC_PHY_MASK; + phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; + break; + case e1000_82580: + case e1000_i350: + mdic = rd32(E1000_MDICNFG); + mdic &= E1000_MDICNFG_PHY_MASK; + phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + break; + } + ret_val = igb_get_phy_id(hw); + goto out; + } + + /* Power on sgmii phy if it is disabled */ + ctrl_ext = rd32(E1000_CTRL_EXT); + wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); + wrfl(); + msleep(300); + + /* + * The address field in the I2CCMD register is 3 bits and 0 is invalid. + * Therefore, we need to test 1-7 + */ + for (phy->addr = 1; phy->addr < 8; phy->addr++) { + ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); + if (ret_val == 0) { + hw_dbg("Vendor ID 0x%08X read at address %u\n", + phy_id, phy->addr); + /* + * At the time of this writing, The M88 part is + * the only supported SGMII PHY product. + */ + if (phy_id == M88_VENDOR) + break; + } else { + hw_dbg("PHY address %u was unreadable\n", phy->addr); + } + } + + /* A valid PHY type couldn't be found. */ + if (phy->addr == 8) { + phy->addr = 0; + ret_val = -E1000_ERR_PHY; + goto out; + } else { + ret_val = igb_get_phy_id(hw); + } + + /* restore previous sfp cage power state */ + wr32(E1000_CTRL_EXT, ctrl_ext); + +out: + return ret_val; +} + +/** + * igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset + * @hw: pointer to the HW structure + * + * Resets the PHY using the serial gigabit media independent interface. + **/ +static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) +{ + s32 ret_val; + + /* + * This isn't a true "hard" reset, but is the only reset + * available to us at this time. + */ + + hw_dbg("Soft resetting SGMII attached PHY...\n"); + + /* + * SFP documentation requires the following to configure the SPF module + * to work on SGMII. No further documentation is given. + */ + ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); + if (ret_val) + goto out; + + ret_val = igb_phy_sw_reset(hw); + +out: + return ret_val; +} + +/** + * igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + goto out; + + if (active) { + data |= IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else { + data &= ~IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + goto out; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, data); + if (ret_val) + goto out; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, data); + if (ret_val) + goto out; + } + } + +out: + return ret_val; +} + +/** + * igb_acquire_nvm_82575 - Request for access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +static s32 igb_acquire_nvm_82575(struct e1000_hw *hw) +{ + s32 ret_val; + + ret_val = igb_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); + if (ret_val) + goto out; + + ret_val = igb_acquire_nvm(hw); + + if (ret_val) + igb_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); + +out: + return ret_val; +} + +/** + * igb_release_nvm_82575 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + **/ +static void igb_release_nvm_82575(struct e1000_hw *hw) +{ + igb_release_nvm(hw); + igb_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); +} + +/** + * igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 ret_val = 0; + s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ + + while (i < timeout) { + if (igb_get_hw_semaphore(hw)) { + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = rd32(E1000_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* + * Firmware currently using resource (fwmask) + * or other software thread using resource (swmask) + */ + igb_put_hw_semaphore(hw); + mdelay(5); + i++; + } + + if (i == timeout) { + hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + wr32(E1000_SW_FW_SYNC, swfw_sync); + + igb_put_hw_semaphore(hw); + +out: + return ret_val; +} + +/** + * igb_release_swfw_sync_82575 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + while (igb_get_hw_semaphore(hw) != 0); + /* Empty */ + + swfw_sync = rd32(E1000_SW_FW_SYNC); + swfw_sync &= ~mask; + wr32(E1000_SW_FW_SYNC, swfw_sync); + + igb_put_hw_semaphore(hw); +} + +/** + * igb_get_cfg_done_82575 - Read config done bit + * @hw: pointer to the HW structure + * + * Read the management control register for the config done bit for + * completion status. NOTE: silicon which is EEPROM-less will fail trying + * to read the config done bit, so an error is *ONLY* logged and returns + * 0. If we were to return with error, EEPROM-less silicon + * would not be able to be reset or change link. + **/ +static s32 igb_get_cfg_done_82575(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + s32 ret_val = 0; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + if (hw->bus.func == 1) + mask = E1000_NVM_CFG_DONE_PORT_1; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_NVM_CFG_DONE_PORT_2; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_NVM_CFG_DONE_PORT_3; + + while (timeout) { + if (rd32(E1000_EEMNGCTL) & mask) + break; + msleep(1); + timeout--; + } + if (!timeout) + hw_dbg("MNG configuration cycle has not completed.\n"); + + /* If EEPROM is not marked present, init the PHY manually */ + if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) && + (hw->phy.type == e1000_phy_igp_3)) + igb_phy_init_script_igp3(hw); + + return ret_val; +} + +/** + * igb_check_for_link_82575 - Check for link + * @hw: pointer to the HW structure + * + * If sgmii is enabled, then use the pcs register to determine link, otherwise + * use the generic interface for determining link. + **/ +static s32 igb_check_for_link_82575(struct e1000_hw *hw) +{ + s32 ret_val; + u16 speed, duplex; + + if (hw->phy.media_type != e1000_media_type_copper) { + ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, + &duplex); + /* + * Use this flag to determine if link needs to be checked or + * not. If we have link clear the flag so that we do not + * continue to check for link. + */ + hw->mac.get_link_status = !hw->mac.serdes_has_link; + } else { + ret_val = igb_check_for_copper_link(hw); + } + + return ret_val; +} + +/** + * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown + * @hw: pointer to the HW structure + **/ +void igb_power_up_serdes_link_82575(struct e1000_hw *hw) +{ + u32 reg; + + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && + !igb_sgmii_active_82575(hw)) + return; + + /* Enable PCS to turn on link */ + reg = rd32(E1000_PCS_CFG0); + reg |= E1000_PCS_CFG_PCS_EN; + wr32(E1000_PCS_CFG0, reg); + + /* Power up the laser */ + reg = rd32(E1000_CTRL_EXT); + reg &= ~E1000_CTRL_EXT_SDP3_DATA; + wr32(E1000_CTRL_EXT, reg); + + /* flush the write to verify completion */ + wrfl(); + msleep(1); +} + +/** + * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Using the physical coding sub-layer (PCS), retrieve the current speed and + * duplex, then store the values in the pointers provided. + **/ +static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 pcs; + + /* Set up defaults for the return values of this function */ + mac->serdes_has_link = false; + *speed = 0; + *duplex = 0; + + /* + * Read the PCS Status register for link state. For non-copper mode, + * the status register is not accurate. The PCS status register is + * used instead. + */ + pcs = rd32(E1000_PCS_LSTAT); + + /* + * The link up bit determines when link is up on autoneg. The sync ok + * gets set once both sides sync up and agree upon link. Stable link + * can be determined by checking for both link up and link sync ok + */ + if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) { + mac->serdes_has_link = true; + + /* Detect and store PCS speed */ + if (pcs & E1000_PCS_LSTS_SPEED_1000) { + *speed = SPEED_1000; + } else if (pcs & E1000_PCS_LSTS_SPEED_100) { + *speed = SPEED_100; + } else { + *speed = SPEED_10; + } + + /* Detect and store PCS duplex */ + if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) { + *duplex = FULL_DUPLEX; + } else { + *duplex = HALF_DUPLEX; + } + } + + return 0; +} + +/** + * igb_shutdown_serdes_link_82575 - Remove link during power down + * @hw: pointer to the HW structure + * + * In the case of fiber serdes, shut down optics and PCS on driver unload + * when management pass thru is not enabled. + **/ +void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) +{ + u32 reg; + + if (hw->phy.media_type != e1000_media_type_internal_serdes && + igb_sgmii_active_82575(hw)) + return; + + if (!igb_enable_mng_pass_thru(hw)) { + /* Disable PCS to turn off link */ + reg = rd32(E1000_PCS_CFG0); + reg &= ~E1000_PCS_CFG_PCS_EN; + wr32(E1000_PCS_CFG0, reg); + + /* shutdown the laser */ + reg = rd32(E1000_CTRL_EXT); + reg |= E1000_CTRL_EXT_SDP3_DATA; + wr32(E1000_CTRL_EXT, reg); + + /* flush the write to verify completion */ + wrfl(); + msleep(1); + } +} + +/** + * igb_reset_hw_82575 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a + * function pointer entry point called by the api module. + **/ +static s32 igb_reset_hw_82575(struct e1000_hw *hw) +{ + u32 ctrl, icr; + s32 ret_val; + + /* + * Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = igb_disable_pcie_master(hw); + if (ret_val) + hw_dbg("PCI-E Master disable polling has failed.\n"); + + /* set the completion timeout for interface */ + ret_val = igb_set_pcie_completion_timeout(hw); + if (ret_val) { + hw_dbg("PCI-E Set completion timeout has failed.\n"); + } + + hw_dbg("Masking off all interrupts\n"); + wr32(E1000_IMC, 0xffffffff); + + wr32(E1000_RCTL, 0); + wr32(E1000_TCTL, E1000_TCTL_PSP); + wrfl(); + + msleep(10); + + ctrl = rd32(E1000_CTRL); + + hw_dbg("Issuing a global reset to MAC\n"); + wr32(E1000_CTRL, ctrl | E1000_CTRL_RST); + + ret_val = igb_get_auto_rd_done(hw); + if (ret_val) { + /* + * When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + hw_dbg("Auto Read Done did not complete\n"); + } + + /* If EEPROM is not present, run manual init scripts */ + if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) + igb_reset_init_script_82575(hw); + + /* Clear any pending interrupt events. */ + wr32(E1000_IMC, 0xffffffff); + icr = rd32(E1000_ICR); + + /* Install any alternate MAC address into RAR0 */ + ret_val = igb_check_alt_mac_addr(hw); + + return ret_val; +} + +/** + * igb_init_hw_82575 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +static s32 igb_init_hw_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + u16 i, rar_count = mac->rar_entry_count; + + /* Initialize identification LED */ + ret_val = igb_id_led_init(hw); + if (ret_val) { + hw_dbg("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + } + + /* Disabling VLAN filtering */ + hw_dbg("Initializing the IEEE VLAN\n"); + igb_clear_vfta(hw); + + /* Setup the receive address */ + igb_init_rx_addrs(hw, rar_count); + + /* Zero out the Multicast HASH table */ + hw_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + array_wr32(E1000_MTA, i, 0); + + /* Zero out the Unicast HASH table */ + hw_dbg("Zeroing the UTA\n"); + for (i = 0; i < mac->uta_reg_count; i++) + array_wr32(E1000_UTA, i, 0); + + /* Setup link and flow control */ + ret_val = igb_setup_link(hw); + + /* + * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + igb_clear_hw_cntrs_82575(hw); + + return ret_val; +} + +/** + * igb_setup_copper_link_82575 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + **/ +static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + ctrl = rd32(E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + wr32(E1000_CTRL, ctrl); + + ret_val = igb_setup_serdes_link_82575(hw); + if (ret_val) + goto out; + + if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) { + /* allow time for SFP cage time to power up phy */ + msleep(300); + + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY.\n"); + goto out; + } + } + switch (hw->phy.type) { + case e1000_phy_m88: + if (hw->phy.id == I347AT4_E_PHY_ID || + hw->phy.id == M88E1112_E_PHY_ID) + ret_val = igb_copper_link_setup_m88_gen2(hw); + else + ret_val = igb_copper_link_setup_m88(hw); + break; + case e1000_phy_igp_3: + ret_val = igb_copper_link_setup_igp(hw); + break; + case e1000_phy_82580: + ret_val = igb_copper_link_setup_82580(hw); + break; + default: + ret_val = -E1000_ERR_PHY; + break; + } + + if (ret_val) + goto out; + + ret_val = igb_setup_copper_link(hw); +out: + return ret_val; +} + +/** + * igb_setup_serdes_link_82575 - Setup link for serdes + * @hw: pointer to the HW structure + * + * Configure the physical coding sub-layer (PCS) link. The PCS link is + * used on copper connections where the serialized gigabit media independent + * interface (sgmii), or serdes fiber is being used. Configures the link + * for auto-negotiation or forces speed/duplex. + **/ +static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) +{ + u32 ctrl_ext, ctrl_reg, reg; + bool pcs_autoneg; + s32 ret_val = E1000_SUCCESS; + u16 data; + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && + !igb_sgmii_active_82575(hw)) + return ret_val; + + + /* + * On the 82575, SerDes loopback mode persists until it is + * explicitly turned off or a power cycle is performed. A read to + * the register does not indicate its status. Therefore, we ensure + * loopback mode is disabled during initialization. + */ + wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); + + /* power on the sfp cage if present */ + ctrl_ext = rd32(E1000_CTRL_EXT); + ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; + wr32(E1000_CTRL_EXT, ctrl_ext); + + ctrl_reg = rd32(E1000_CTRL); + ctrl_reg |= E1000_CTRL_SLU; + + if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) { + /* set both sw defined pins */ + ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1; + + /* Set switch control to serdes energy detect */ + reg = rd32(E1000_CONNSW); + reg |= E1000_CONNSW_ENRGSRC; + wr32(E1000_CONNSW, reg); + } + + reg = rd32(E1000_PCS_LCTL); + + /* default pcs_autoneg to the same setting as mac autoneg */ + pcs_autoneg = hw->mac.autoneg; + + switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { + case E1000_CTRL_EXT_LINK_MODE_SGMII: + /* sgmii mode lets the phy handle forcing speed/duplex */ + pcs_autoneg = true; + /* autoneg time out should be disabled for SGMII mode */ + reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT); + break; + case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: + /* disable PCS autoneg and support parallel detect only */ + pcs_autoneg = false; + default: + if (hw->mac.type == e1000_82575 || + hw->mac.type == e1000_82576) { + ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); + if (ret_val) { + printk(KERN_DEBUG "NVM Read Error\n\n"); + return ret_val; + } + + if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT) + pcs_autoneg = false; + } + + /* + * non-SGMII modes only supports a speed of 1000/Full for the + * link so it is best to just force the MAC and let the pcs + * link either autoneg or be forced to 1000/Full + */ + ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | + E1000_CTRL_FD | E1000_CTRL_FRCDPX; + + /* set speed of 1000/Full if speed/duplex is forced */ + reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; + break; + } + + wr32(E1000_CTRL, ctrl_reg); + + /* + * New SerDes mode allows for forcing speed or autonegotiating speed + * at 1gb. Autoneg should be default set by most drivers. This is the + * mode that will be compatible with older link partners and switches. + * However, both are supported by the hardware and some drivers/tools. + */ + reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | + E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); + + /* + * We force flow control to prevent the CTRL register values from being + * overwritten by the autonegotiated flow control values + */ + reg |= E1000_PCS_LCTL_FORCE_FCTRL; + + if (pcs_autoneg) { + /* Set PCS register for autoneg */ + reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ + E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ + hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); + } else { + /* Set PCS register for forced link */ + reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ + + hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); + } + + wr32(E1000_PCS_LCTL, reg); + + if (!igb_sgmii_active_82575(hw)) + igb_force_mac_fc(hw); + + return ret_val; +} + +/** + * igb_sgmii_active_82575 - Return sgmii state + * @hw: pointer to the HW structure + * + * 82575 silicon has a serialized gigabit media independent interface (sgmii) + * which can be enabled for use in the embedded applications. Simply + * return the current state of the sgmii interface. + **/ +static bool igb_sgmii_active_82575(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + return dev_spec->sgmii_active; +} + +/** + * igb_reset_init_script_82575 - Inits HW defaults after reset + * @hw: pointer to the HW structure + * + * Inits recommended HW defaults after a reset when there is no EEPROM + * detected. This is only for the 82575. + **/ +static s32 igb_reset_init_script_82575(struct e1000_hw *hw) +{ + if (hw->mac.type == e1000_82575) { + hw_dbg("Running reset init script for 82575\n"); + /* SerDes configuration via SERDESCTRL */ + igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C); + igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78); + igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23); + igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15); + + /* CCM configuration via CCMCTL register */ + igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00); + igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00); + + /* PCIe lanes configuration */ + igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC); + igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF); + igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05); + igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81); + + /* PCIe PLL Configuration */ + igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47); + igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00); + igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00); + } + + return 0; +} + +/** + * igb_read_mac_addr_82575 - Read device MAC address + * @hw: pointer to the HW structure + **/ +static s32 igb_read_mac_addr_82575(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + /* + * If there's an alternate MAC address place it in RAR0 + * so that it will override the Si installed default perm + * address. + */ + ret_val = igb_check_alt_mac_addr(hw); + if (ret_val) + goto out; + + ret_val = igb_read_mac_addr(hw); + +out: + return ret_val; +} + +/** + * igb_power_down_phy_copper_82575 - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +void igb_power_down_phy_copper_82575(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw))) + igb_power_down_phy_copper(hw); +} + +/** + * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw) +{ + igb_clear_hw_cntrs_base(hw); + + rd32(E1000_PRC64); + rd32(E1000_PRC127); + rd32(E1000_PRC255); + rd32(E1000_PRC511); + rd32(E1000_PRC1023); + rd32(E1000_PRC1522); + rd32(E1000_PTC64); + rd32(E1000_PTC127); + rd32(E1000_PTC255); + rd32(E1000_PTC511); + rd32(E1000_PTC1023); + rd32(E1000_PTC1522); + + rd32(E1000_ALGNERRC); + rd32(E1000_RXERRC); + rd32(E1000_TNCRS); + rd32(E1000_CEXTERR); + rd32(E1000_TSCTC); + rd32(E1000_TSCTFC); + + rd32(E1000_MGTPRC); + rd32(E1000_MGTPDC); + rd32(E1000_MGTPTC); + + rd32(E1000_IAC); + rd32(E1000_ICRXOC); + + rd32(E1000_ICRXPTC); + rd32(E1000_ICRXATC); + rd32(E1000_ICTXPTC); + rd32(E1000_ICTXATC); + rd32(E1000_ICTXQEC); + rd32(E1000_ICTXQMTC); + rd32(E1000_ICRXDMTC); + + rd32(E1000_CBTMPC); + rd32(E1000_HTDPMC); + rd32(E1000_CBRMPC); + rd32(E1000_RPTHC); + rd32(E1000_HGPTC); + rd32(E1000_HTCBDPC); + rd32(E1000_HGORCL); + rd32(E1000_HGORCH); + rd32(E1000_HGOTCL); + rd32(E1000_HGOTCH); + rd32(E1000_LENERRS); + + /* This register should not be read in copper configurations */ + if (hw->phy.media_type == e1000_media_type_internal_serdes || + igb_sgmii_active_82575(hw)) + rd32(E1000_SCVPC); +} + +/** + * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable + * @hw: pointer to the HW structure + * + * After rx enable if managability is enabled then there is likely some + * bad data at the start of the fifo and possibly in the DMA fifo. This + * function clears the fifos and flushes any packets that came in as rx was + * being enabled. + **/ +void igb_rx_fifo_flush_82575(struct e1000_hw *hw) +{ + u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; + int i, ms_wait; + + if (hw->mac.type != e1000_82575 || + !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN)) + return; + + /* Disable all RX queues */ + for (i = 0; i < 4; i++) { + rxdctl[i] = rd32(E1000_RXDCTL(i)); + wr32(E1000_RXDCTL(i), + rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); + } + /* Poll all queues to verify they have shut down */ + for (ms_wait = 0; ms_wait < 10; ms_wait++) { + msleep(1); + rx_enabled = 0; + for (i = 0; i < 4; i++) + rx_enabled |= rd32(E1000_RXDCTL(i)); + if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) + break; + } + + if (ms_wait == 10) + hw_dbg("Queue disable timed out after 10ms\n"); + + /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all + * incoming packets are rejected. Set enable and wait 2ms so that + * any packet that was coming in as RCTL.EN was set is flushed + */ + rfctl = rd32(E1000_RFCTL); + wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); + + rlpml = rd32(E1000_RLPML); + wr32(E1000_RLPML, 0); + + rctl = rd32(E1000_RCTL); + temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); + temp_rctl |= E1000_RCTL_LPE; + + wr32(E1000_RCTL, temp_rctl); + wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN); + wrfl(); + msleep(2); + + /* Enable RX queues that were previously enabled and restore our + * previous state + */ + for (i = 0; i < 4; i++) + wr32(E1000_RXDCTL(i), rxdctl[i]); + wr32(E1000_RCTL, rctl); + wrfl(); + + wr32(E1000_RLPML, rlpml); + wr32(E1000_RFCTL, rfctl); + + /* Flush receive errors generated by workaround */ + rd32(E1000_ROC); + rd32(E1000_RNBC); + rd32(E1000_MPC); +} + +/** + * igb_set_pcie_completion_timeout - set pci-e completion timeout + * @hw: pointer to the HW structure + * + * The defaults for 82575 and 82576 should be in the range of 50us to 50ms, + * however the hardware default for these parts is 500us to 1ms which is less + * than the 10ms recommended by the pci-e spec. To address this we need to + * increase the value to either 10ms to 200ms for capability version 1 config, + * or 16ms to 55ms for version 2. + **/ +static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw) +{ + u32 gcr = rd32(E1000_GCR); + s32 ret_val = 0; + u16 pcie_devctl2; + + /* only take action if timeout value is defaulted to 0 */ + if (gcr & E1000_GCR_CMPL_TMOUT_MASK) + goto out; + + /* + * if capababilities version is type 1 we can write the + * timeout of 10ms to 200ms through the GCR register + */ + if (!(gcr & E1000_GCR_CAP_VER2)) { + gcr |= E1000_GCR_CMPL_TMOUT_10ms; + goto out; + } + + /* + * for version 2 capabilities we need to write the config space + * directly in order to set the completion timeout value for + * 16ms to 55ms + */ + ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, + &pcie_devctl2); + if (ret_val) + goto out; + + pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; + + ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, + &pcie_devctl2); +out: + /* disable completion timeout resend */ + gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; + + wr32(E1000_GCR, gcr); + return ret_val; +} + +/** + * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * @pf: Physical Function pool - do not set anti-spoofing for the PF + * + * enables/disables L2 switch anti-spoofing functionality. + **/ +void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) +{ + u32 dtxswc; + + switch (hw->mac.type) { + case e1000_82576: + case e1000_i350: + dtxswc = rd32(E1000_DTXSWC); + if (enable) { + dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + /* The PF can spoof - it has to in order to + * support emulation mode NICs */ + dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); + } else { + dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + } + wr32(E1000_DTXSWC, dtxswc); + break; + default: + break; + } +} + +/** + * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * + * enables/disables L2 switch loopback functionality. + **/ +void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) +{ + u32 dtxswc = rd32(E1000_DTXSWC); + + if (enable) + dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; + else + dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; + + wr32(E1000_DTXSWC, dtxswc); +} + +/** + * igb_vmdq_set_replication_pf - enable or disable vmdq replication + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * + * enables/disables replication of packets across multiple pools. + **/ +void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) +{ + u32 vt_ctl = rd32(E1000_VT_CTL); + + if (enable) + vt_ctl |= E1000_VT_CTL_VM_REPL_EN; + else + vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN; + + wr32(E1000_VT_CTL, vt_ctl); +} + +/** + * igb_read_phy_reg_82580 - Read 82580 MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_read_phy_reg_mdic(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_write_phy_reg_82580 - Write 82580 MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_write_phy_reg_mdic(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits + * @hw: pointer to the HW structure + * + * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on + * the values found in the EEPROM. This addresses an issue in which these + * bits are not restored from EEPROM after reset. + **/ +static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u32 mdicnfg; + u16 nvm_data = 0; + + if (hw->mac.type != e1000_82580) + goto out; + if (!igb_sgmii_active_82575(hw)) + goto out; + + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, + &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + mdicnfg = rd32(E1000_MDICNFG); + if (nvm_data & NVM_WORD24_EXT_MDIO) + mdicnfg |= E1000_MDICNFG_EXT_MDIO; + if (nvm_data & NVM_WORD24_COM_MDIO) + mdicnfg |= E1000_MDICNFG_COM_MDIO; + wr32(E1000_MDICNFG, mdicnfg); +out: + return ret_val; +} + +/** + * igb_reset_hw_82580 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets function or entire device (all ports, etc.) + * to a known state. + **/ +static s32 igb_reset_hw_82580(struct e1000_hw *hw) +{ + s32 ret_val = 0; + /* BH SW mailbox bit in SW_FW_SYNC */ + u16 swmbsw_mask = E1000_SW_SYNCH_MB; + u32 ctrl, icr; + bool global_device_reset = hw->dev_spec._82575.global_device_reset; + + + hw->dev_spec._82575.global_device_reset = false; + + /* Get current control state. */ + ctrl = rd32(E1000_CTRL); + + /* + * Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = igb_disable_pcie_master(hw); + if (ret_val) + hw_dbg("PCI-E Master disable polling has failed.\n"); + + hw_dbg("Masking off all interrupts\n"); + wr32(E1000_IMC, 0xffffffff); + wr32(E1000_RCTL, 0); + wr32(E1000_TCTL, E1000_TCTL_PSP); + wrfl(); + + msleep(10); + + /* Determine whether or not a global dev reset is requested */ + if (global_device_reset && + igb_acquire_swfw_sync_82575(hw, swmbsw_mask)) + global_device_reset = false; + + if (global_device_reset && + !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET)) + ctrl |= E1000_CTRL_DEV_RST; + else + ctrl |= E1000_CTRL_RST; + + wr32(E1000_CTRL, ctrl); + wrfl(); + + /* Add delay to insure DEV_RST has time to complete */ + if (global_device_reset) + msleep(5); + + ret_val = igb_get_auto_rd_done(hw); + if (ret_val) { + /* + * When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + hw_dbg("Auto Read Done did not complete\n"); + } + + /* If EEPROM is not present, run manual init scripts */ + if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) + igb_reset_init_script_82575(hw); + + /* clear global device reset status bit */ + wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET); + + /* Clear any pending interrupt events. */ + wr32(E1000_IMC, 0xffffffff); + icr = rd32(E1000_ICR); + + ret_val = igb_reset_mdicnfg_82580(hw); + if (ret_val) + hw_dbg("Could not reset MDICNFG based on EEPROM\n"); + + /* Install any alternate MAC address into RAR0 */ + ret_val = igb_check_alt_mac_addr(hw); + + /* Release semaphore */ + if (global_device_reset) + igb_release_swfw_sync_82575(hw, swmbsw_mask); + + return ret_val; +} + +/** + * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size + * @data: data received by reading RXPBS register + * + * The 82580 uses a table based approach for packet buffer allocation sizes. + * This function converts the retrieved value into the correct table value + * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 + * 0x0 36 72 144 1 2 4 8 16 + * 0x8 35 70 140 rsv rsv rsv rsv rsv + */ +u16 igb_rxpbs_adjust_82580(u32 data) +{ + u16 ret_val = 0; + + if (data < E1000_82580_RXPBS_TABLE_SIZE) + ret_val = e1000_82580_rxpbs_table[data]; + + return ret_val; +} + +/** + * igb_validate_nvm_checksum_with_offset - Validate EEPROM + * checksum + * @hw: pointer to the HW structure + * @offset: offset in words of the checksum protected region + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) +{ + s32 ret_val = 0; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + hw_dbg("NVM Checksum Invalid\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum_with_offset - Update EEPROM + * checksum + * @hw: pointer to the HW structure + * @offset: offset in words of the checksum protected region + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, + &checksum); + if (ret_val) + hw_dbg("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} + +/** + * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM section checksum by reading/adding each word of + * the EEPROM and then verifies that the sum of the EEPROM is + * equal to 0xBABA. + **/ +static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 eeprom_regions_count = 1; + u16 j, nvm_data; + u16 nvm_offset; + + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { + /* if checksums compatibility bit is set validate checksums + * for all 4 ports. */ + eeprom_regions_count = 4; + } + + for (j = 0; j < eeprom_regions_count; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != 0) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum_82580 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM section checksums for all 4 ports by reading/adding + * each word of the EEPROM up to the checksum. Then calculates the EEPROM + * checksum and writes the value to the EEPROM. + **/ +static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw) +{ + s32 ret_val; + u16 j, nvm_data; + u16 nvm_offset; + + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum" + " compatibility bit.\n"); + goto out; + } + + if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) { + /* set compatibility bit to validate checksums appropriately */ + nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; + ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, + &nvm_data); + if (ret_val) { + hw_dbg("NVM Write Error while updating checksum" + " compatibility bit.\n"); + goto out; + } + } + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM section checksum by reading/adding each word of + * the EEPROM and then verifies that the sum of the EEPROM is + * equal to 0xBABA. + **/ +static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 j; + u16 nvm_offset; + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != 0) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum_i350 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM section checksums for all 4 ports by reading/adding + * each word of the EEPROM up to the checksum. Then calculates the EEPROM + * checksum and writes the value to the EEPROM. + **/ +static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 j; + u16 nvm_offset; + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); + if (ret_val != 0) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_set_eee_i350 - Enable/disable EEE support + * @hw: pointer to the HW structure + * + * Enable/disable EEE based on setting in dev_spec structure. + * + **/ +s32 igb_set_eee_i350(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u32 ipcnfg, eeer, ctrl_ext; + + ctrl_ext = rd32(E1000_CTRL_EXT); + if ((hw->mac.type != e1000_i350) || + (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK)) + goto out; + ipcnfg = rd32(E1000_IPCNFG); + eeer = rd32(E1000_EEER); + + /* enable or disable per user setting */ + if (!(hw->dev_spec._82575.eee_disable)) { + ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | + E1000_IPCNFG_EEE_100M_AN); + eeer |= (E1000_EEER_TX_LPI_EN | + E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); + + } else { + ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | + E1000_IPCNFG_EEE_100M_AN); + eeer &= ~(E1000_EEER_TX_LPI_EN | + E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); + } + wr32(E1000_IPCNFG, ipcnfg); + wr32(E1000_EEER, eeer); +out: + + return ret_val; +} + +static struct e1000_mac_operations e1000_mac_ops_82575 = { + .init_hw = igb_init_hw_82575, + .check_for_link = igb_check_for_link_82575, + .rar_set = igb_rar_set, + .read_mac_addr = igb_read_mac_addr_82575, + .get_speed_and_duplex = igb_get_speed_and_duplex_copper, +}; + +static struct e1000_phy_operations e1000_phy_ops_82575 = { + .acquire = igb_acquire_phy_82575, + .get_cfg_done = igb_get_cfg_done_82575, + .release = igb_release_phy_82575, +}; + +static struct e1000_nvm_operations e1000_nvm_ops_82575 = { + .acquire = igb_acquire_nvm_82575, + .read = igb_read_nvm_eerd, + .release = igb_release_nvm_82575, + .write = igb_write_nvm_spi, +}; + +const struct e1000_info e1000_82575_info = { + .get_invariants = igb_get_invariants_82575, + .mac_ops = &e1000_mac_ops_82575, + .phy_ops = &e1000_phy_ops_82575, + .nvm_ops = &e1000_nvm_ops_82575, +}; + diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h new file mode 100644 index 0000000..786e110 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/e1000_82575.h @@ -0,0 +1,258 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_82575_H_ +#define _E1000_82575_H_ + +extern void igb_shutdown_serdes_link_82575(struct e1000_hw *hw); +extern void igb_power_up_serdes_link_82575(struct e1000_hw *hw); +extern void igb_power_down_phy_copper_82575(struct e1000_hw *hw); +extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw); + +#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_DEF1_DEF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_ON2)) + +#define E1000_RAR_ENTRIES_82575 16 +#define E1000_RAR_ENTRIES_82576 24 +#define E1000_RAR_ENTRIES_82580 24 +#define E1000_RAR_ENTRIES_I350 32 + +#define E1000_SW_SYNCH_MB 0x00000100 +#define E1000_STAT_DEV_RST_SET 0x00100000 +#define E1000_CTRL_DEV_RST 0x20000000 + +/* SRRCTL bit definitions */ +#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define E1000_SRRCTL_DROP_EN 0x80000000 +#define E1000_SRRCTL_TIMESTAMP 0x40000000 + +#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002 +#define E1000_MRQC_ENABLE_VMDQ 0x00000003 +#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005 +#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 + +#define E1000_EICR_TX_QUEUE ( \ + E1000_EICR_TX_QUEUE0 | \ + E1000_EICR_TX_QUEUE1 | \ + E1000_EICR_TX_QUEUE2 | \ + E1000_EICR_TX_QUEUE3) + +#define E1000_EICR_RX_QUEUE ( \ + E1000_EICR_RX_QUEUE0 | \ + E1000_EICR_RX_QUEUE1 | \ + E1000_EICR_RX_QUEUE2 | \ + E1000_EICR_RX_QUEUE3) + +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ +#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ + +/* Receive Descriptor - Advanced */ +union e1000_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + struct { + __le16 pkt_info; /* RSS type, Packet type */ + __le16 hdr_info; /* Split Header, + * header buffer length */ + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 +#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 +#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ +#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ + +/* Transmit Descriptor - Advanced */ +union e1000_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ +#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +/* Context descriptors */ +struct e1000_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */ +/* IPSec Encrypt Enable for ESP */ +#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +/* Adv ctxt IPSec SA IDX mask */ +/* Adv ctxt IPSec ESP len mask */ + +/* Additional Transmit Descriptor Control definitions */ +#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ +/* Tx Queue Arbitration Priority 0=low, 1=high */ + +/* Additional Receive Descriptor Control definitions */ +#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ + +/* Direct Cache Access (DCA) definitions */ +#define E1000_DCA_CTRL_DCA_MODE_DISABLE 0x01 /* DCA Disable */ +#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ + +#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ +#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ +#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ + +#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ +#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ + +/* Additional DCA related definitions, note change in position of CPUID */ +#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ +#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */ +#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */ +#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */ + +/* ETQF register bit definitions */ +#define E1000_ETQF_FILTER_ENABLE (1 << 26) +#define E1000_ETQF_1588 (1 << 30) + +/* FTQF register bit definitions */ +#define E1000_FTQF_VF_BP 0x00008000 +#define E1000_FTQF_1588_TIME_STAMP 0x08000000 +#define E1000_FTQF_MASK 0xF0000000 +#define E1000_FTQF_MASK_PROTO_BP 0x10000000 +#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000 + +#define E1000_NVM_APME_82575 0x0400 +#define MAX_NUM_VFS 8 + +#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof control */ +#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */ +#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ +#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8 +#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ + +/* Easy defines for setting default pool, would normally be left a zero */ +#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7 +#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT) + +/* Other useful VMD_CTL register defines */ +#define E1000_VT_CTL_IGNORE_MAC (1 << 28) +#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29) +#define E1000_VT_CTL_VM_REPL_EN (1 << 30) + +/* Per VM Offload register setup */ +#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */ +#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */ +#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */ +#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */ +#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */ +#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */ +#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */ +#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */ +#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + +#define E1000_VLVF_ARRAY_SIZE 32 +#define E1000_VLVF_VLANID_MASK 0x00000FFF +#define E1000_VLVF_POOLSEL_SHIFT 12 +#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT) +#define E1000_VLVF_LVLAN 0x00100000 +#define E1000_VLVF_VLANID_ENABLE 0x80000000 + +#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ +#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ + +#define E1000_IOVCTL 0x05BBC +#define E1000_IOVCTL_REUSE_VFQ 0x00000001 + +#define E1000_RPLOLR_STRVLAN 0x40000000 +#define E1000_RPLOLR_STRCRC 0x80000000 + +#define E1000_DTXCTL_8023LL 0x0004 +#define E1000_DTXCTL_VLAN_ADDED 0x0008 +#define E1000_DTXCTL_OOS_ENABLE 0x0010 +#define E1000_DTXCTL_MDP_EN 0x0020 +#define E1000_DTXCTL_SPOOF_INT 0x0040 + +#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14) + +#define ALL_QUEUES 0xFFFF + +/* RX packet buffer size defines */ +#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F +void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int); +void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool); +void igb_vmdq_set_replication_pf(struct e1000_hw *, bool); +u16 igb_rxpbs_adjust_82580(u32 data); +s32 igb_set_eee_i350(struct e1000_hw *); + +#endif diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h new file mode 100644 index 0000000..7b8ddd8 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -0,0 +1,834 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_DEFINES_H_ +#define _E1000_DEFINES_H_ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ + +/* Extended Device Control */ +#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */ +/* Physical Func Reset Done Indication */ +#define E1000_CTRL_EXT_PFRSTD 0x00004000 +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_EIAME 0x01000000 +#define E1000_CTRL_EXT_IRCA 0x00000001 +/* Interrupt delay cancellation */ +/* Driver loaded bit for FW */ +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 +/* Interrupt acknowledge Auto-mask */ +/* Clear Interrupt timers after IMS clear */ +/* packet buffer parity error detection enabled */ +/* descriptor FIFO parity error detection enable */ +#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ +#define E1000_I2CCMD_REG_ADDR_SHIFT 16 +#define E1000_I2CCMD_PHY_ADDR_SHIFT 24 +#define E1000_I2CCMD_OPCODE_READ 0x08000000 +#define E1000_I2CCMD_OPCODE_WRITE 0x00000000 +#define E1000_I2CCMD_READY 0x20000000 +#define E1000_I2CCMD_ERROR 0x80000000 +#define E1000_MAX_SGMII_PHY_REG_ADDR 255 +#define E1000_I2CCMD_PHY_TIMEOUT 200 +#define E1000_IVAR_VALID 0x80 +#define E1000_GPIE_NSICR 0x00000001 +#define E1000_GPIE_MSIX_MODE 0x00000010 +#define E1000_GPIE_EIAME 0x40000000 +#define E1000_GPIE_PBA 0x80000000 + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_TS 0x10000 /* Pkt was time stamped */ + +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_EN_BMC2OS 0x10000000 /* OSBMC is Enabled or not */ +/* Enable Neighbor Discovery Filtering */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +/* Enable MAC address filtering */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 + +/* Receive Control */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ + +/* + * Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SWFW_SYNC Definitions */ +#define E1000_SWFW_EEP_SM 0x1 +#define E1000_SWFW_PHY0_SM 0x2 +#define E1000_SWFW_PHY1_SM 0x4 +#define E1000_SWFW_PHY2_SM 0x20 +#define E1000_SWFW_PHY3_SM 0x40 + +/* FACTPS Definitions */ +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +/* Defined polarity of Dock/Undock indication in SDP[0] */ +/* Reset both PHY ports, through PHYRST_N pin */ +/* enable link status from external LINK_0 and LINK_1 pins */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +/* Initiate an interrupt to manageability engine */ +#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */ + +/* Bit definitions for the Management Data IO (MDIO) and Management Data + * Clock (MDC) pins in the Device Control Register. + */ + +#define E1000_CONNSW_ENRGSRC 0x4 +#define E1000_PCS_CFG_PCS_EN 8 +#define E1000_PCS_LCTL_FLV_LINK_UP 1 +#define E1000_PCS_LCTL_FSV_100 2 +#define E1000_PCS_LCTL_FSV_1000 4 +#define E1000_PCS_LCTL_FDV_FULL 8 +#define E1000_PCS_LCTL_FSD 0x10 +#define E1000_PCS_LCTL_FORCE_LINK 0x20 +#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 +#define E1000_PCS_LCTL_AN_ENABLE 0x10000 +#define E1000_PCS_LCTL_AN_RESTART 0x20000 +#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 +#define E1000_ENABLE_SERDES_LOOPBACK 0x0410 + +#define E1000_PCS_LSTS_LINK_OK 1 +#define E1000_PCS_LSTS_SPEED_100 2 +#define E1000_PCS_LSTS_SPEED_1000 4 +#define E1000_PCS_LSTS_DUPLEX_FULL 8 +#define E1000_PCS_LSTS_SYNK_OK 0x10 + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +/* Change in Dock/Undock state. Clear on write '0'. */ +/* Status of Master requests. */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 +/* BMC external code execution disabled */ + +/* Constants used to intrepret the masked PCI-X bus speed. */ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + + +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 + +/* 1000/H is not supported, nor spec-compliant. */ +#define E1000_ALL_SPEED_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ + ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ + ADVERTISE_1000_FULL) +#define E1000_ALL_NOT_GIG (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ + ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) +#define E1000_ALL_FULL_DUPLEX (ADVERTISE_10_FULL | ADVERTISE_100_FULL | \ + ADVERTISE_1000_FULL) +#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX + +/* LED Control */ +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 + +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +/* Extended desc bits for Linksec and timesync */ + +/* Transmit Control */ +#define E1000_TCTL_EN 0x00000002 /* enable tx */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ + +/* DMA Coalescing register fields */ +#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coalescing + * Watchdog Timer */ +#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coalescing Receive + * Threshold */ +#define E1000_DMACR_DMACTHR_SHIFT 16 +#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe + * transactions */ +#define E1000_DMACR_DMAC_LX_SHIFT 28 +#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ + +#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coalescing Transmit + * Threshold */ + +#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ + +#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Receive Traffic Rate + * Threshold */ +#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rcv packet rate in + * current window */ + +#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rcv Traffic + * Current Cnt */ + +#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* Flow ctrl Rcv Threshold + * High val */ +#define E1000_FCRTC_RTH_COAL_SHIFT 4 +#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */ + +/* SerDes Control */ +#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 + +/* Receive Checksum Control */ +#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* Header split receive */ +#define E1000_RFCTL_LEF 0x00040000 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLD_SHIFT 12 + +/* Ethertype field values */ +#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ + +#define MAX_JUMBO_FRAME_SIZE 0x3F00 + +/* PBA constants */ +#define E1000_PBA_34K 0x0022 +#define E1000_PBA_64K 0x0040 /* 64KB */ + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ +#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ +#define E1000_ICR_VMMB 0x00000100 /* VM MB event */ +#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ +/* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_INT_ASSERTED 0x80000000 +/* LAN connected device generates an interrupt */ +#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ + +/* Extended Interrupt Cause Read */ +#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ +#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */ +#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */ +#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */ +#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */ +#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */ +#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */ +#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */ +#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ +/* TCP Timer */ + +/* + * This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC | \ + E1000_IMS_DOUTSYNC) + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */ +#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ + +/* Extended Interrupt Mask Set */ +#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ + +/* Interrupt Cause Set */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */ + +/* Extended Interrupt Cause Set */ + +/* Transmit Descriptor Control */ +/* Enable the counting of descriptors still to be processed. */ + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* 802.1q VLAN Packet Size */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address */ +/* + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ +#define E1000_RAL_MAC_ADDR_LEN 4 +#define E1000_RAH_MAC_ADDR_LEN 2 +#define E1000_RAH_POOL_MASK 0x03FC0000 +#define E1000_RAH_POOL_1 0x00040000 + +/* Error Codes */ +#define E1000_SUCCESS 0 +#define E1000_ERR_NVM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_INIT 5 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_BLK_PHY_RESET 12 +#define E1000_ERR_SWFW_SYNC 13 +#define E1000_NOT_IMPLEMENTED 14 +#define E1000_ERR_MBX 15 +#define E1000_ERR_INVALID_ARGUMENT 16 +#define E1000_ERR_NO_SPACE 17 +#define E1000_ERR_NVM_PBA_SECTION 18 + +/* Loop limit on how long we wait for auto-negotiation to complete */ +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 +#define PHY_FORCE_LIMIT 20 +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 +/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ +/* Number of milliseconds for NVM auto read done after MAC reset. */ +#define AUTO_READ_DONE_TIMEOUT 10 + +/* Flow Control */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +#define E1000_TSYNCTXCTL_VALID 0x00000001 /* tx timestamp valid */ +#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable tx timestampping */ + +#define E1000_TSYNCRXCTL_VALID 0x00000001 /* rx timestamp valid */ +#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* rx type mask */ +#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define E1000_TSYNCRXCTL_TYPE_ALL 0x08 +#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable rx timestampping */ + +#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF +#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 +#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03 +#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04 + +#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00 +#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300 +#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00 +#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00 +#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00 +#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00 + +#define E1000_TIMINCA_16NS_SHIFT 24 + +#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ +#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ +#define E1000_MDICNFG_PHY_MASK 0x03E00000 +#define E1000_MDICNFG_PHY_SHIFT 21 + +/* PCI Express Control */ +#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 +#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 +#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000 +#define E1000_GCR_CAP_VER2 0x00040000 + +/* mPHY Address Control and Data Registers */ +#define E1000_MPHY_ADDR_CTL 0x0024 /* mPHY Address Control Register */ +#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000 +#define E1000_MPHY_DATA 0x0E10 /* mPHY Data Register */ + +/* mPHY PCS CLK Register */ +#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004 /* mPHY PCS CLK AFE CSR Offset */ +/* mPHY Near End Digital Loopback Override Bit */ +#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10 + +/* PHY Control Register */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 + +/* PHY Status Register */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ + +/* Autoneg Expansion Register */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ + /* 0=Configure PHY as Slave */ +#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ + /* 0=Automatic Master/Slave config */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ + + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CONTROL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ + +/* NVM Control */ +#define E1000_EECD_SK 0x00000001 /* NVM Clock */ +#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* NVM Data In */ +#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ +#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* NVM Present */ +/* NVM Addressing bits based on type 0=small, 1=large */ +#define E1000_EECD_ADDR_BITS 0x00000400 +#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 + +/* Offset to data in NVM read/write registers */ +#define E1000_NVM_RW_REG_DATA 16 +#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_NVM_RW_REG_START 1 /* Start operation */ +#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ + +/* NVM Word Offsets */ +#define NVM_COMPAT 0x0003 +#define NVM_ID_LED_SETTINGS 0x0004 /* SERDES output amplitude */ +#define NVM_INIT_CONTROL2_REG 0x000F +#define NVM_INIT_CONTROL3_PORT_B 0x0014 +#define NVM_INIT_CONTROL3_PORT_A 0x0024 +#define NVM_ALT_MAC_ADDR_PTR 0x0037 +#define NVM_CHECKSUM_REG 0x003F +#define NVM_COMPATIBILITY_REG_3 0x0003 +#define NVM_COMPATIBILITY_BIT_MASK 0x8000 + +#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ +#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ +#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */ +#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */ + +#define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0) + +/* Mask bits for fields in Word 0x24 of the NVM */ +#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */ +#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed external */ + +/* Mask bits for fields in Word 0x0f of the NVM */ +#define NVM_WORD0F_PAUSE_MASK 0x3000 +#define NVM_WORD0F_ASM_DIR 0x2000 + +/* Mask bits for fields in Word 0x1a of the NVM */ + +/* length of string needed to store part num */ +#define E1000_PBANUM_LENGTH 11 + +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA + +#define NVM_PBA_OFFSET_0 8 +#define NVM_PBA_OFFSET_1 9 +#define NVM_PBA_PTR_GUARD 0xFAFA +#define NVM_WORD_SIZE_BASE_SHIFT 6 + +/* NVM Commands - Microwire */ + +/* NVM Commands - SPI */ +#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ +#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ +#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ +#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ + +/* SPI NVM Status Register */ +#define NVM_STATUS_RDY_SPI 0x01 + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* PCI/PCI-X/PCI-EX Config space */ +#define PCIE_DEVICE_CONTROL2 0x28 +#define PCIE_DEVICE_CONTROL2_16ms 0x0005 + +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF + +/* Bit definitions for valid PHY IDs. */ +/* + * I = Integrated + * E = External + */ +#define M88E1111_I_PHY_ID 0x01410CC0 +#define M88E1112_E_PHY_ID 0x01410C90 +#define I347AT4_E_PHY_ID 0x01410DC0 +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define I82580_I_PHY_ID 0x015403A0 +#define I350_I_PHY_ID 0x015403B0 +#define M88_VENDOR 0x0141 + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ + +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +/* 1=CLK125 low, 0=CLK125 toggling */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ + /* Manual MDI configuration */ +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 +/* Auto crossover enabled all speeds */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 +/* + * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold + * 0=Normal 10BASE-T Rx Threshold + */ +/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */ +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +/* + * 0 = <50M + * 1 = 50-80M + * 2 = 80-110M + * 3 = 110-140M + * 4 = >140M + */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* M88E1000 Extended PHY Specific Control Register */ +/* + * 1 = Lost lock detect enabled. + * Will assert lost lock and bring + * link down if idle not seen + * within 1ms in 1000BASE-T + */ +/* + * Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +/* + * Number of times we will attempt to autonegotiate before downshifting if we + * are the slave + */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ + +/* Intel i347-AT4 Registers */ + +#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */ +#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */ +#define I347AT4_PAGE_SELECT 0x16 + +/* i347-AT4 Extended PHY Specific Control Register */ + +/* + * Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800 +#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000 +#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000 +#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000 +#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000 +#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000 +#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000 +#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000 +#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000 +#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000 + +/* i347-AT4 PHY Cable Diagnostics Control */ +#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */ + +/* Marvell 1112 only registers */ +#define M88E1112_VCT_DSP_DISTANCE 0x001A + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 + +/* MDI Control */ +#define E1000_MDIC_DATA_MASK 0x0000FFFF +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_INT_EN 0x20000000 +#define E1000_MDIC_ERROR 0x40000000 +#define E1000_MDIC_DEST 0x80000000 + +/* Thermal Sensor */ +#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */ +#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Speed Throttle Event */ + +/* Energy Efficient Ethernet */ +#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* EEE Enable 1G AN */ +#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */ +#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */ +#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */ +#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */ + +/* SerDes Control */ +#define E1000_GEN_CTL_READY 0x80000000 +#define E1000_GEN_CTL_ADDRESS_SHIFT 8 +#define E1000_GEN_POLL_TIMEOUT 640 + +#define E1000_VFTA_ENTRY_SHIFT 5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +/* DMA Coalescing register fields */ +#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based + on DMA coal */ + +/* Tx Rate-Scheduler Config fields */ +#define E1000_RTTBCNRC_RS_ENA 0x80000000 +#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF +#define E1000_RTTBCNRC_RF_INT_SHIFT 14 +#define E1000_RTTBCNRC_RF_INT_MASK \ + (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT) + +#endif diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h new file mode 100644 index 0000000..4519a13 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -0,0 +1,529 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_HW_H_ +#define _E1000_HW_H_ + +#include +#include +#include +#include + +#include "e1000_regs.h" +#include "e1000_defines.h" + +struct e1000_hw; + +#define E1000_DEV_ID_82576 0x10C9 +#define E1000_DEV_ID_82576_FIBER 0x10E6 +#define E1000_DEV_ID_82576_SERDES 0x10E7 +#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 +#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526 +#define E1000_DEV_ID_82576_NS 0x150A +#define E1000_DEV_ID_82576_NS_SERDES 0x1518 +#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D +#define E1000_DEV_ID_82575EB_COPPER 0x10A7 +#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 +#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 +#define E1000_DEV_ID_82580_COPPER 0x150E +#define E1000_DEV_ID_82580_FIBER 0x150F +#define E1000_DEV_ID_82580_SERDES 0x1510 +#define E1000_DEV_ID_82580_SGMII 0x1511 +#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 +#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527 +#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 +#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A +#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C +#define E1000_DEV_ID_DH89XXCC_SFP 0x0440 +#define E1000_DEV_ID_I350_COPPER 0x1521 +#define E1000_DEV_ID_I350_FIBER 0x1522 +#define E1000_DEV_ID_I350_SERDES 0x1523 +#define E1000_DEV_ID_I350_SGMII 0x1524 + +#define E1000_REVISION_2 2 +#define E1000_REVISION_4 4 + +#define E1000_FUNC_0 0 +#define E1000_FUNC_1 1 +#define E1000_FUNC_2 2 +#define E1000_FUNC_3 3 + +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9 + +enum e1000_mac_type { + e1000_undefined = 0, + e1000_82575, + e1000_82576, + e1000_82580, + e1000_i350, + e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ +}; + +enum e1000_media_type { + e1000_media_type_unknown = 0, + e1000_media_type_copper = 1, + e1000_media_type_internal_serdes = 2, + e1000_num_media_types +}; + +enum e1000_nvm_type { + e1000_nvm_unknown = 0, + e1000_nvm_none, + e1000_nvm_eeprom_spi, + e1000_nvm_flash_hw, + e1000_nvm_flash_sw +}; + +enum e1000_nvm_override { + e1000_nvm_override_none = 0, + e1000_nvm_override_spi_small, + e1000_nvm_override_spi_large, +}; + +enum e1000_phy_type { + e1000_phy_unknown = 0, + e1000_phy_none, + e1000_phy_m88, + e1000_phy_igp, + e1000_phy_igp_2, + e1000_phy_gg82563, + e1000_phy_igp_3, + e1000_phy_ife, + e1000_phy_82580, +}; + +enum e1000_bus_type { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci, + e1000_bus_type_pcix, + e1000_bus_type_pci_express, + e1000_bus_type_reserved +}; + +enum e1000_bus_speed { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33, + e1000_bus_speed_66, + e1000_bus_speed_100, + e1000_bus_speed_120, + e1000_bus_speed_133, + e1000_bus_speed_2500, + e1000_bus_speed_5000, + e1000_bus_speed_reserved +}; + +enum e1000_bus_width { + e1000_bus_width_unknown = 0, + e1000_bus_width_pcie_x1, + e1000_bus_width_pcie_x2, + e1000_bus_width_pcie_x4 = 4, + e1000_bus_width_pcie_x8 = 8, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +}; + +enum e1000_1000t_rx_status { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +}; + +enum e1000_rev_polarity { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +}; + +enum e1000_fc_mode { + e1000_fc_none = 0, + e1000_fc_rx_pause, + e1000_fc_tx_pause, + e1000_fc_full, + e1000_fc_default = 0xFF +}; + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; + u64 cbtmpc; + u64 htdpmc; + u64 cbrdpc; + u64 cbrmpc; + u64 rpthc; + u64 hgptc; + u64 htcbdpc; + u64 hgorc; + u64 hgotc; + u64 lenerrs; + u64 scvpc; + u64 hrmpc; + u64 doosync; + u64 o2bgptc; + u64 o2bspc; + u64 b2ospc; + u64 b2ogprc; +}; + +struct e1000_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; + +/* Host Interface "Rev 1" */ +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; + u8 checksum; +}; + +#define E1000_HI_MAX_DATA_LENGTH 252 +struct e1000_host_command_info { + struct e1000_host_command_header command_header; + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; +}; + +/* Host Interface "Rev 2" */ +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; +}; + +#include "e1000_mac.h" +#include "e1000_phy.h" +#include "e1000_nvm.h" +#include "e1000_mbx.h" + +struct e1000_mac_operations { + s32 (*check_for_link)(struct e1000_hw *); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + bool (*check_mng_mode)(struct e1000_hw *); + s32 (*setup_physical_interface)(struct e1000_hw *); + void (*rar_set)(struct e1000_hw *, u8 *, u32); + s32 (*read_mac_addr)(struct e1000_hw *); + s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); +}; + +struct e1000_phy_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*check_polarity)(struct e1000_hw *); + s32 (*check_reset_block)(struct e1000_hw *); + s32 (*force_speed_duplex)(struct e1000_hw *); + s32 (*get_cfg_done)(struct e1000_hw *hw); + s32 (*get_cable_length)(struct e1000_hw *); + s32 (*get_phy_info)(struct e1000_hw *); + s32 (*read_reg)(struct e1000_hw *, u32, u16 *); + void (*release)(struct e1000_hw *); + s32 (*reset)(struct e1000_hw *); + s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); + s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); + s32 (*write_reg)(struct e1000_hw *, u32, u16); +}; + +struct e1000_nvm_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*read)(struct e1000_hw *, u16, u16, u16 *); + void (*release)(struct e1000_hw *); + s32 (*write)(struct e1000_hw *, u16, u16, u16 *); + s32 (*update)(struct e1000_hw *); + s32 (*validate)(struct e1000_hw *); +}; + +struct e1000_info { + s32 (*get_invariants)(struct e1000_hw *); + struct e1000_mac_operations *mac_ops; + struct e1000_phy_operations *phy_ops; + struct e1000_nvm_operations *nvm_ops; +}; + +extern const struct e1000_info e1000_82575_info; + +struct e1000_mac_info { + struct e1000_mac_operations ops; + + u8 addr[6]; + u8 perm_addr[6]; + + enum e1000_mac_type type; + + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; + u32 txcw; + + u16 mta_reg_count; + u16 uta_reg_count; + + /* Maximum size of the MTA register table in all supported adapters */ + #define MAX_MTA_REG 128 + u32 mta_shadow[MAX_MTA_REG]; + u16 rar_entry_count; + + u8 forced_speed_duplex; + + bool adaptive_ifs; + bool arc_subsystem_valid; + bool asf_firmware_present; + bool autoneg; + bool autoneg_failed; + bool disable_hw_init_bits; + bool get_link_status; + bool ifs_params_forced; + bool in_ifs_mode; + bool report_tx_early; + bool serdes_has_link; + bool tx_pkt_filtering; +}; + +struct e1000_phy_info { + struct e1000_phy_operations ops; + + enum e1000_phy_type type; + + enum e1000_1000t_rx_status local_rx; + enum e1000_1000t_rx_status remote_rx; + enum e1000_ms_type ms_type; + enum e1000_ms_type original_ms_type; + enum e1000_rev_polarity cable_polarity; + enum e1000_smart_speed smart_speed; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + enum e1000_media_type media_type; + + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + + u8 mdix; + + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool reset_disable; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct e1000_nvm_info { + struct e1000_nvm_operations ops; + enum e1000_nvm_type type; + enum e1000_nvm_override override; + + u32 flash_bank_size; + u32 flash_base_addr; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct e1000_bus_info { + enum e1000_bus_type type; + enum e1000_bus_speed speed; + enum e1000_bus_width width; + + u32 snoop; + + u16 func; + u16 pci_cmd_word; +}; + +struct e1000_fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum e1000_fc_mode current_mode; /* Type of flow control */ + enum e1000_fc_mode requested_mode; +}; + +struct e1000_mbx_operations { + s32 (*init_params)(struct e1000_hw *hw); + s32 (*read)(struct e1000_hw *, u32 *, u16, u16); + s32 (*write)(struct e1000_hw *, u32 *, u16, u16); + s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16); + s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16); + s32 (*check_for_msg)(struct e1000_hw *, u16); + s32 (*check_for_ack)(struct e1000_hw *, u16); + s32 (*check_for_rst)(struct e1000_hw *, u16); +}; + +struct e1000_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct e1000_mbx_info { + struct e1000_mbx_operations ops; + struct e1000_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u16 size; +}; + +struct e1000_dev_spec_82575 { + bool sgmii_active; + bool global_device_reset; + bool eee_disable; +}; + +struct e1000_hw { + void *back; + + u8 __iomem *hw_addr; + u8 __iomem *flash_address; + unsigned long io_base; + + struct e1000_mac_info mac; + struct e1000_fc_info fc; + struct e1000_phy_info phy; + struct e1000_nvm_info nvm; + struct e1000_bus_info bus; + struct e1000_mbx_info mbx; + struct e1000_host_mng_dhcp_cookie mng_cookie; + + union { + struct e1000_dev_spec_82575 _82575; + } dev_spec; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +extern struct net_device *igb_get_hw_dev(struct e1000_hw *hw); +#define hw_dbg(format, arg...) \ + netdev_dbg(igb_get_hw_dev(hw), format, ##arg) + +/* These functions must be implemented by drivers */ +s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); +s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); +#endif /* _E1000_HW_H_ */ diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c new file mode 100644 index 0000000..2b5ef76 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -0,0 +1,1421 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include +#include +#include + +#include "e1000_mac.h" + +#include "igb.h" + +static s32 igb_set_default_fc(struct e1000_hw *hw); +static s32 igb_set_fc_watermarks(struct e1000_hw *hw); + +/** + * igb_get_bus_info_pcie - Get PCIe bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCIe), and PCIe function. + **/ +s32 igb_get_bus_info_pcie(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + s32 ret_val; + u32 reg; + u16 pcie_link_status; + + bus->type = e1000_bus_type_pci_express; + + ret_val = igb_read_pcie_cap_reg(hw, + PCI_EXP_LNKSTA, + &pcie_link_status); + if (ret_val) { + bus->width = e1000_bus_width_unknown; + bus->speed = e1000_bus_speed_unknown; + } else { + switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) { + case PCI_EXP_LNKSTA_CLS_2_5GB: + bus->speed = e1000_bus_speed_2500; + break; + case PCI_EXP_LNKSTA_CLS_5_0GB: + bus->speed = e1000_bus_speed_5000; + break; + default: + bus->speed = e1000_bus_speed_unknown; + break; + } + + bus->width = (enum e1000_bus_width)((pcie_link_status & + PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT); + } + + reg = rd32(E1000_STATUS); + bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; + + return 0; +} + +/** + * igb_clear_vfta - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void igb_clear_vfta(struct e1000_hw *hw) +{ + u32 offset; + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + array_wr32(E1000_VFTA, offset, 0); + wrfl(); + } +} + +/** + * igb_write_vfta - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +{ + array_wr32(E1000_VFTA, offset, value); + wrfl(); +} + +/** + * igb_init_rx_addrs - Initialize receive address's + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setups the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + **/ +void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) +{ + u32 i; + u8 mac_addr[ETH_ALEN] = {0}; + + /* Setup the receive address */ + hw_dbg("Programming MAC Address into RAR[0]\n"); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + hw_dbg("Clearing RAR[1-%u]\n", rar_count-1); + for (i = 1; i < rar_count; i++) + hw->mac.ops.rar_set(hw, mac_addr, i); +} + +/** + * igb_vfta_set - enable or disable vlan in VLAN filter table + * @hw: pointer to the HW structure + * @vid: VLAN id to add or remove + * @add: if true add filter, if false remove + * + * Sets or clears a bit in the VLAN filter table array based on VLAN id + * and if we are adding or removing the filter + **/ +s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add) +{ + u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK; + u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK); + u32 vfta = array_rd32(E1000_VFTA, index); + s32 ret_val = 0; + + /* bit was set/cleared before we started */ + if ((!!(vfta & mask)) == add) { + ret_val = -E1000_ERR_CONFIG; + } else { + if (add) + vfta |= mask; + else + vfta &= ~mask; + } + + igb_write_vfta(hw, index, vfta); + + return ret_val; +} + +/** + * igb_check_alt_mac_addr - Check for alternate MAC addr + * @hw: pointer to the HW structure + * + * Checks the nvm for an alternate MAC address. An alternate MAC address + * can be setup by pre-boot software and must be treated like a permanent + * address and must override the actual permanent MAC address. If an + * alternate MAC address is fopund it is saved in the hw struct and + * prgrammed into RAR0 and the cuntion returns success, otherwise the + * function returns an error. + **/ +s32 igb_check_alt_mac_addr(struct e1000_hw *hw) +{ + u32 i; + s32 ret_val = 0; + u16 offset, nvm_alt_mac_addr_offset, nvm_data; + u8 alt_mac_addr[ETH_ALEN]; + + ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1, + &nvm_alt_mac_addr_offset); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (nvm_alt_mac_addr_offset == 0xFFFF) { + /* There is no Alternate MAC Address */ + goto out; + } + + if (hw->bus.func == E1000_FUNC_1) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; + for (i = 0; i < ETH_ALEN; i += 2) { + offset = nvm_alt_mac_addr_offset + (i >> 1); + ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + alt_mac_addr[i] = (u8)(nvm_data & 0xFF); + alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); + } + + /* if multicast bit is set, the alternate address will not be used */ + if (is_multicast_ether_addr(alt_mac_addr)) { + hw_dbg("Ignoring Alternate Mac Address with MC bit set\n"); + goto out; + } + + /* + * We have a valid alternate MAC address, and we want to treat it the + * same as the normal permanent MAC address stored by the HW into the + * RAR. Do this by mapping this address into RAR0. + */ + hw->mac.ops.rar_set(hw, alt_mac_addr, 0); + +out: + return ret_val; +} + +/** + * igb_rar_set - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + **/ +void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* + * HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | + ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + /* + * Some bridges will combine consecutive 32-bit writes into + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ + wr32(E1000_RAL(index), rar_low); + wrfl(); + wr32(E1000_RAH(index), rar_high); + wrfl(); +} + +/** + * igb_mta_set - Set multicast filter table address + * @hw: pointer to the HW structure + * @hash_value: determines the MTA register and bit to set + * + * The multicast table address is a register array of 32-bit registers. + * The hash_value is used to determine what register the bit is in, the + * current value is read, the new bit is OR'd in and the new value is + * written back into the register. + **/ +void igb_mta_set(struct e1000_hw *hw, u32 hash_value) +{ + u32 hash_bit, hash_reg, mta; + + /* + * The MTA is a register array of 32-bit registers. It is + * treated like an array of (32*mta_reg_count) bits. We want to + * set bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The (hw->mac.mta_reg_count - 1) serves as a + * mask to bits 31:5 of the hash value which gives us the + * register we're modifying. The hash bit within that register + * is determined by the lower 5 bits of the hash value. + */ + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + mta = array_rd32(E1000_MTA, hash_reg); + + mta |= (1 << hash_bit); + + array_wr32(E1000_MTA, hash_reg, mta); + wrfl(); +} + +/** + * igb_hash_mc_addr - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. See + * igb_mta_set() + **/ +static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* + * For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* + * The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16) mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * igb_update_mc_addr_list - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates entire Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void igb_update_mc_addr_list(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + u32 hash_value, hash_bit, hash_reg; + int i; + + /* clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32) i < mc_addr_count; i++) { + hash_value = igb_hash_mc_addr(hw, mc_addr_list); + + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); + mc_addr_list += (ETH_ALEN); + } + + /* replace the entire MTA table */ + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) + array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]); + wrfl(); +} + +/** + * igb_clear_hw_cntrs_base - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + **/ +void igb_clear_hw_cntrs_base(struct e1000_hw *hw) +{ + rd32(E1000_CRCERRS); + rd32(E1000_SYMERRS); + rd32(E1000_MPC); + rd32(E1000_SCC); + rd32(E1000_ECOL); + rd32(E1000_MCC); + rd32(E1000_LATECOL); + rd32(E1000_COLC); + rd32(E1000_DC); + rd32(E1000_SEC); + rd32(E1000_RLEC); + rd32(E1000_XONRXC); + rd32(E1000_XONTXC); + rd32(E1000_XOFFRXC); + rd32(E1000_XOFFTXC); + rd32(E1000_FCRUC); + rd32(E1000_GPRC); + rd32(E1000_BPRC); + rd32(E1000_MPRC); + rd32(E1000_GPTC); + rd32(E1000_GORCL); + rd32(E1000_GORCH); + rd32(E1000_GOTCL); + rd32(E1000_GOTCH); + rd32(E1000_RNBC); + rd32(E1000_RUC); + rd32(E1000_RFC); + rd32(E1000_ROC); + rd32(E1000_RJC); + rd32(E1000_TORL); + rd32(E1000_TORH); + rd32(E1000_TOTL); + rd32(E1000_TOTH); + rd32(E1000_TPR); + rd32(E1000_TPT); + rd32(E1000_MPTC); + rd32(E1000_BPTC); +} + +/** + * igb_check_for_copper_link - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +s32 igb_check_for_copper_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + + /* + * We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) { + ret_val = 0; + goto out; + } + + /* + * First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = igb_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) + goto out; /* No link detected */ + + mac->get_link_status = false; + + /* + * Check if there was DownShift, must be checked + * immediately after link-up + */ + igb_check_downshift(hw); + + /* + * If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + /* + * Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + igb_config_collision_dist(hw); + + /* + * Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = igb_config_fc_after_link_up(hw); + if (ret_val) + hw_dbg("Error configuring flow control\n"); + +out: + return ret_val; +} + +/** + * igb_setup_link - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +s32 igb_setup_link(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + /* + * In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (igb_check_reset_block(hw)) + goto out; + + /* + * If requested flow control is set to default, set flow control + * based on the EEPROM flow control settings. + */ + if (hw->fc.requested_mode == e1000_fc_default) { + ret_val = igb_set_default_fc(hw); + if (ret_val) + goto out; + } + + /* + * We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) + goto out; + + /* + * Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + hw_dbg("Initializing the Flow Control address, type and timer regs\n"); + wr32(E1000_FCT, FLOW_CONTROL_TYPE); + wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); + wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); + + wr32(E1000_FCTTV, hw->fc.pause_time); + + ret_val = igb_set_fc_watermarks(hw); + +out: + return ret_val; +} + +/** + * igb_config_collision_dist - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +void igb_config_collision_dist(struct e1000_hw *hw) +{ + u32 tctl; + + tctl = rd32(E1000_TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; + + wr32(E1000_TCTL, tctl); + wrfl(); +} + +/** + * igb_set_fc_watermarks - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * tansmission as well. + **/ +static s32 igb_set_fc_watermarks(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u32 fcrtl = 0, fcrth = 0; + + /* + * Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.current_mode & e1000_fc_tx_pause) { + /* + * We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + if (hw->fc.send_xon) + fcrtl |= E1000_FCRTL_XONE; + + fcrth = hw->fc.high_water; + } + wr32(E1000_FCRTL, fcrtl); + wr32(E1000_FCRTH, fcrth); + + return ret_val; +} + +/** + * igb_set_default_fc - Set flow control default values + * @hw: pointer to the HW structure + * + * Read the EEPROM for the default values for flow control and store the + * values. + **/ +static s32 igb_set_default_fc(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 nvm_data; + + /* + * Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); + + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) + hw->fc.requested_mode = e1000_fc_none; + else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == + NVM_WORD0F_ASM_DIR) + hw->fc.requested_mode = e1000_fc_tx_pause; + else + hw->fc.requested_mode = e1000_fc_full; + +out: + return ret_val; +} + +/** + * igb_force_mac_fc - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + **/ +s32 igb_force_mac_fc(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val = 0; + + ctrl = rd32(E1000_CTRL); + + /* + * Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc.current_mode" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); + + switch (hw->fc.current_mode) { + case e1000_fc_none: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case e1000_fc_rx_pause: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case e1000_fc_tx_pause: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case e1000_fc_full: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + wr32(E1000_CTRL, ctrl); + +out: + return ret_val; +} + +/** + * igb_config_fc_after_link_up - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + **/ +s32 igb_config_fc_after_link_up(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = 0; + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + u16 speed, duplex; + + /* + * Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) { + if (hw->phy.media_type == e1000_media_type_internal_serdes) + ret_val = igb_force_mac_fc(hw); + } else { + if (hw->phy.media_type == e1000_media_type_copper) + ret_val = igb_force_mac_fc(hw); + } + + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + + /* + * Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { + /* + * Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { + hw_dbg("Copper PHY and Auto Neg " + "has not completed.\n"); + goto out; + } + + /* + * The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + goto out; + + /* + * Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | E1000_fc_full + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* + * Now we need to check if the user selected RX ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + hw_dbg("Flow Control = FULL.\r\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = " + "RX PAUSE frames only.\r\n"); + } + } + /* + * For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + hw_dbg("Flow Control = TX PAUSE frames only.\r\n"); + } + /* + * For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); + } + /* + * Per the IEEE spec, at this point flow control should be + * disabled. However, we want to consider that we could + * be connected to a legacy switch that doesn't advertise + * desired flow control, but can be forced on the link + * partner. So if we advertised no flow control, that is + * what we will resolve to. If we advertised some kind of + * receive capability (Rx Pause Only or Full Flow Control) + * and the link partner advertised none, we will configure + * ourselves to enable Rx Flow Control only. We can do + * this safely for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, no + * harm done since we won't be receiving any PAUSE frames + * anyway. If the intent on the link partner was to have + * flow control enabled, then by us enabling RX only, we + * can at least receive pause frames and process them. + * This is a good idea because in most cases, since we are + * predominantly a server NIC, more times than not we will + * be asked to delay transmission of packets than asking + * our link partner to pause transmission of frames. + */ + else if ((hw->fc.requested_mode == e1000_fc_none || + hw->fc.requested_mode == e1000_fc_tx_pause) || + hw->fc.strict_ieee) { + hw->fc.current_mode = e1000_fc_none; + hw_dbg("Flow Control = NONE.\r\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); + } + + /* + * Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + hw_dbg("Error getting link speed and duplex\n"); + goto out; + } + + if (duplex == HALF_DUPLEX) + hw->fc.current_mode = e1000_fc_none; + + /* + * Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = igb_force_mac_fc(hw); + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + } + +out: + return ret_val; +} + +/** + * igb_get_speed_and_duplex_copper - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + **/ +s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + u32 status; + + status = rd32(E1000_STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + hw_dbg("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + hw_dbg("100 Mbs, "); + } else { + *speed = SPEED_10; + hw_dbg("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + hw_dbg("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + hw_dbg("Half Duplex\n"); + } + + return 0; +} + +/** + * igb_get_hw_semaphore - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +s32 igb_get_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + s32 ret_val = 0; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = rd32(E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + udelay(50); + i++; + } + + if (i == timeout) { + hw_dbg("Driver can't access device - SMBI bit is set.\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = rd32(E1000_SWSM); + wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + + udelay(50); + } + + if (i == timeout) { + /* Release semaphores */ + igb_put_hw_semaphore(hw); + hw_dbg("Driver can't access the NVM\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_put_hw_semaphore - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +void igb_put_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + + swsm = rd32(E1000_SWSM); + + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + + wr32(E1000_SWSM, swsm); +} + +/** + * igb_get_auto_rd_done - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + **/ +s32 igb_get_auto_rd_done(struct e1000_hw *hw) +{ + s32 i = 0; + s32 ret_val = 0; + + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD) + break; + msleep(1); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + hw_dbg("Auto read by HW from NVM has not completed.\n"); + ret_val = -E1000_ERR_RESET; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_valid_led_default - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { + switch(hw->phy.media_type) { + case e1000_media_type_internal_serdes: + *data = ID_LED_DEFAULT_82575_SERDES; + break; + case e1000_media_type_copper: + default: + *data = ID_LED_DEFAULT; + break; + } + } +out: + return ret_val; +} + +/** + * igb_id_led_init - + * @hw: pointer to the HW structure + * + **/ +s32 igb_id_led_init(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 data, i, temp; + const u16 led_mask = 0x0F; + + ret_val = igb_valid_led_default(hw, &data); + if (ret_val) + goto out; + + mac->ledctl_default = rd32(E1000_LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + +out: + return ret_val; +} + +/** + * igb_cleanup_led - Set LED config to default operation + * @hw: pointer to the HW structure + * + * Remove the current LED configuration and set the LED configuration + * to the default value, saved from the EEPROM. + **/ +s32 igb_cleanup_led(struct e1000_hw *hw) +{ + wr32(E1000_LEDCTL, hw->mac.ledctl_default); + return 0; +} + +/** + * igb_blink_led - Blink LED + * @hw: pointer to the HW structure + * + * Blink the led's which are set to be on. + **/ +s32 igb_blink_led(struct e1000_hw *hw) +{ + u32 ledctl_blink = 0; + u32 i; + + /* + * set the blink bit for each LED that's "on" (0x0E) + * in ledctl_mode2 + */ + ledctl_blink = hw->mac.ledctl_mode2; + for (i = 0; i < 4; i++) + if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == + E1000_LEDCTL_MODE_LED_ON) + ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << + (i * 8)); + + wr32(E1000_LEDCTL, ledctl_blink); + + return 0; +} + +/** + * igb_led_off - Turn LED off + * @hw: pointer to the HW structure + * + * Turn LED off. + **/ +s32 igb_led_off(struct e1000_hw *hw) +{ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + wr32(E1000_LEDCTL, hw->mac.ledctl_mode1); + break; + default: + break; + } + + return 0; +} + +/** + * igb_disable_pcie_master - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns 0 (0) if successful, else returns -10 + * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not casued + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + **/ +s32 igb_disable_pcie_master(struct e1000_hw *hw) +{ + u32 ctrl; + s32 timeout = MASTER_DISABLE_TIMEOUT; + s32 ret_val = 0; + + if (hw->bus.type != e1000_bus_type_pci_express) + goto out; + + ctrl = rd32(E1000_CTRL); + ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; + wr32(E1000_CTRL, ctrl); + + while (timeout) { + if (!(rd32(E1000_STATUS) & + E1000_STATUS_GIO_MASTER_ENABLE)) + break; + udelay(100); + timeout--; + } + + if (!timeout) { + hw_dbg("Master requests are pending.\n"); + ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_validate_mdi_setting - Verify MDI/MDIx settings + * @hw: pointer to the HW structure + * + * Verify that when not using auto-negotitation that MDI/MDIx is correctly + * set, which is forced to MDI mode only. + **/ +s32 igb_validate_mdi_setting(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) { + hw_dbg("Invalid MDI setting detected\n"); + hw->phy.mdix = 1; + ret_val = -E1000_ERR_CONFIG; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_write_8bit_ctrl_reg - Write a 8bit CTRL register + * @hw: pointer to the HW structure + * @reg: 32bit register offset such as E1000_SCTL + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes an address/data control type register. There are several of these + * and they all have the format address << 8 | data and bit 31 is polled for + * completion. + **/ +s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data) +{ + u32 i, regvalue = 0; + s32 ret_val = 0; + + /* Set up the address and data */ + regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT); + wr32(reg, regvalue); + + /* Poll the ready bit to see if the MDI read completed */ + for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) { + udelay(5); + regvalue = rd32(reg); + if (regvalue & E1000_GEN_CTL_READY) + break; + } + if (!(regvalue & E1000_GEN_CTL_READY)) { + hw_dbg("Reg %08x did not indicate ready\n", reg); + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_enable_mng_pass_thru - Enable processing of ARP's + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. + **/ +bool igb_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + u32 fwsm, factps; + bool ret_val = false; + + if (!hw->mac.asf_firmware_present) + goto out; + + manc = rd32(E1000_MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN)) + goto out; + + if (hw->mac.arc_subsystem_valid) { + fwsm = rd32(E1000_FWSM); + factps = rd32(E1000_FACTPS); + + if (!(factps & E1000_FACTPS_MNGCG) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { + ret_val = true; + goto out; + } + } else { + if ((manc & E1000_MANC_SMBUS_EN) && + !(manc & E1000_MANC_ASF_EN)) { + ret_val = true; + goto out; + } + } + +out: + return ret_val; +} diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h new file mode 100644 index 0000000..4927f61 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/e1000_mac.h @@ -0,0 +1,90 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_MAC_H_ +#define _E1000_MAC_H_ + +#include "e1000_hw.h" + +#include "e1000_phy.h" +#include "e1000_nvm.h" +#include "e1000_defines.h" + +/* + * Functions that should not be called directly from drivers but can be used + * by other files in this 'shared code' + */ +s32 igb_blink_led(struct e1000_hw *hw); +s32 igb_check_for_copper_link(struct e1000_hw *hw); +s32 igb_cleanup_led(struct e1000_hw *hw); +s32 igb_config_fc_after_link_up(struct e1000_hw *hw); +s32 igb_disable_pcie_master(struct e1000_hw *hw); +s32 igb_force_mac_fc(struct e1000_hw *hw); +s32 igb_get_auto_rd_done(struct e1000_hw *hw); +s32 igb_get_bus_info_pcie(struct e1000_hw *hw); +s32 igb_get_hw_semaphore(struct e1000_hw *hw); +s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +s32 igb_id_led_init(struct e1000_hw *hw); +s32 igb_led_off(struct e1000_hw *hw); +void igb_update_mc_addr_list(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count); +s32 igb_setup_link(struct e1000_hw *hw); +s32 igb_validate_mdi_setting(struct e1000_hw *hw); +s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data); + +void igb_clear_hw_cntrs_base(struct e1000_hw *hw); +void igb_clear_vfta(struct e1000_hw *hw); +s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add); +void igb_config_collision_dist(struct e1000_hw *hw); +void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); +void igb_mta_set(struct e1000_hw *hw, u32 hash_value); +void igb_put_hw_semaphore(struct e1000_hw *hw); +void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); +s32 igb_check_alt_mac_addr(struct e1000_hw *hw); + +bool igb_enable_mng_pass_thru(struct e1000_hw *hw); + +enum e1000_mng_mode { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_if_only +}; + +#define E1000_FACTPS_MNGCG 0x20000000 + +#define E1000_FWSM_MODE_MASK 0xE +#define E1000_FWSM_MODE_SHIFT 1 + +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 + +extern void e1000_init_function_pointers_82575(struct e1000_hw *hw); + +#endif diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c new file mode 100644 index 0000000..74f2f11 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c @@ -0,0 +1,446 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000_mbx.h" + +/** + * igb_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfuly read message from buffer + **/ +s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; + + if (mbx->ops.read) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * igb_write_mbx - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = 0; + + if (size > mbx->size) + ret_val = -E1000_ERR_MBX; + + else if (mbx->ops.write) + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * igb_check_for_msg - checks to see if someone sent us mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (mbx->ops.check_for_msg) + ret_val = mbx->ops.check_for_msg(hw, mbx_id); + + return ret_val; +} + +/** + * igb_check_for_ack - checks to see if someone sent us ACK + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (mbx->ops.check_for_ack) + ret_val = mbx->ops.check_for_ack(hw, mbx_id); + + return ret_val; +} + +/** + * igb_check_for_rst - checks to see if other side has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (mbx->ops.check_for_rst) + ret_val = mbx->ops.check_for_rst(hw, mbx_id); + + return ret_val; +} + +/** + * igb_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_msg) + goto out; + + while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? 0 : -E1000_ERR_MBX; +} + +/** + * igb_poll_for_ack - Wait for message acknowledgement + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledgement + **/ +static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_ack) + goto out; + + while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? 0 : -E1000_ERR_MBX; +} + +/** + * igb_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (!mbx->ops.read) + goto out; + + ret_val = igb_poll_for_msg(hw, mbx_id); + + if (!ret_val) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); +out: + return ret_val; +} + +/** + * igb_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) + goto out; + + /* send msg */ + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + /* if msg sent wait until we receive an ack */ + if (!ret_val) + ret_val = igb_poll_for_ack(hw, mbx_id); +out: + return ret_val; +} + +static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask) +{ + u32 mbvficr = rd32(E1000_MBVFICR); + s32 ret_val = -E1000_ERR_MBX; + + if (mbvficr & mask) { + ret_val = 0; + wr32(E1000_MBVFICR, mask); + } + + return ret_val; +} + +/** + * igb_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 igb_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + + if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) { + ret_val = 0; + hw->mbx.stats.reqs++; + } + + return ret_val; +} + +/** + * igb_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 igb_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + + if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) { + ret_val = 0; + hw->mbx.stats.acks++; + } + + return ret_val; +} + +/** + * igb_check_for_rst_pf - checks to see if the VF has reset + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) +{ + u32 vflre = rd32(E1000_VFLRE); + s32 ret_val = -E1000_ERR_MBX; + + if (vflre & (1 << vf_number)) { + ret_val = 0; + wr32(E1000_VFLRE, (1 << vf_number)); + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * igb_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * return SUCCESS if we obtained the mailbox lock + **/ +static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + u32 p2v_mailbox; + + + /* Take ownership of the buffer */ + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); + + /* reserve mailbox for vf use */ + p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); + if (p2v_mailbox & E1000_P2VMAILBOX_PFU) + ret_val = 0; + + return ret_val; +} + +/** + * igb_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + igb_check_for_msg_pf(hw, vf_number); + igb_check_for_ack_pf(hw, vf_number); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + array_wr32(E1000_VMBMEM(vf_number), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent and release buffer*/ + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + +out_no_write: + return ret_val; + +} + +/** + * igb_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_read; + + /* copy the message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = array_rd32(E1000_VMBMEM(vf_number), i); + + /* Acknowledge the message and release buffer */ + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return ret_val; +} + +/** + * e1000_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +s32 igb_init_mbx_params_pf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + + mbx->timeout = 0; + mbx->usec_delay = 0; + + mbx->size = E1000_VFMAILBOX_SIZE; + + mbx->ops.read = igb_read_mbx_pf; + mbx->ops.write = igb_write_mbx_pf; + mbx->ops.read_posted = igb_read_posted_mbx; + mbx->ops.write_posted = igb_write_posted_mbx; + mbx->ops.check_for_msg = igb_check_for_msg_pf; + mbx->ops.check_for_ack = igb_check_for_ack_pf; + mbx->ops.check_for_rst = igb_check_for_rst_pf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + + return 0; +} + diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h new file mode 100644 index 0000000..eddb0f8 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h @@ -0,0 +1,77 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_MBX_H_ +#define _E1000_MBX_H_ + +#include "e1000_hw.h" + +#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ + +#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */ +#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */ +#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ + +#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ + +/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is E1000_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +#define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with + * this are the ACK */ +#define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with + * this are the NACK */ +#define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still + clear to send requests */ +#define E1000_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for exra info for certain messages */ +#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) + +#define E1000_VF_RESET 0x01 /* VF requests reset */ +#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ +#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ +#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ +#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */ +#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/ +#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT) + +#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ + +s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16); +s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16); +s32 igb_check_for_msg(struct e1000_hw *, u16); +s32 igb_check_for_ack(struct e1000_hw *, u16); +s32 igb_check_for_rst(struct e1000_hw *, u16); +s32 igb_init_mbx_params_pf(struct e1000_hw *); + +#endif /* _E1000_MBX_H_ */ diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c new file mode 100644 index 0000000..4040712 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c @@ -0,0 +1,713 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include + +#include "e1000_mac.h" +#include "e1000_nvm.h" + +/** + * igb_raise_eec_clk - Raise EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Enable/Raise the EEPROM clock bit. + **/ +static void igb_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd | E1000_EECD_SK; + wr32(E1000_EECD, *eecd); + wrfl(); + udelay(hw->nvm.delay_usec); +} + +/** + * igb_lower_eec_clk - Lower EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Clear/Lower the EEPROM clock bit. + **/ +static void igb_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd & ~E1000_EECD_SK; + wr32(E1000_EECD, *eecd); + wrfl(); + udelay(hw->nvm.delay_usec); +} + +/** + * igb_shift_out_eec_bits - Shift data bits our to the EEPROM + * @hw: pointer to the HW structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out + * + * We need to shift 'count' bits out to the EEPROM. So, the value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + **/ +static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(E1000_EECD); + u32 mask; + + mask = 0x01 << (count - 1); + if (nvm->type == e1000_nvm_eeprom_spi) + eecd |= E1000_EECD_DO; + + do { + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + wr32(E1000_EECD, eecd); + wrfl(); + + udelay(nvm->delay_usec); + + igb_raise_eec_clk(hw, &eecd); + igb_lower_eec_clk(hw, &eecd); + + mask >>= 1; + } while (mask); + + eecd &= ~E1000_EECD_DI; + wr32(E1000_EECD, eecd); +} + +/** + * igb_shift_in_eec_bits - Shift data bits in from the EEPROM + * @hw: pointer to the HW structure + * @count: number of bits to shift in + * + * In order to read a register from the EEPROM, we need to shift 'count' bits + * in from the EEPROM. Bits are "shifted in" by raising the clock input to + * the EEPROM (setting the SK bit), and then reading the value of the data out + * "DO" bit. During this "shifting in" process the data in "DI" bit should + * always be clear. + **/ +static u16 igb_shift_in_eec_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + eecd = rd32(E1000_EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data <<= 1; + igb_raise_eec_clk(hw, &eecd); + + eecd = rd32(E1000_EECD); + + eecd &= ~E1000_EECD_DI; + if (eecd & E1000_EECD_DO) + data |= 1; + + igb_lower_eec_clk(hw, &eecd); + } + + return data; +} + +/** + * igb_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + **/ +static s32 igb_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) +{ + u32 attempts = 100000; + u32 i, reg = 0; + s32 ret_val = -E1000_ERR_NVM; + + for (i = 0; i < attempts; i++) { + if (ee_reg == E1000_NVM_POLL_READ) + reg = rd32(E1000_EERD); + else + reg = rd32(E1000_EEWR); + + if (reg & E1000_NVM_RW_REG_DONE) { + ret_val = 0; + break; + } + + udelay(5); + } + + return ret_val; +} + +/** + * igb_acquire_nvm - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +s32 igb_acquire_nvm(struct e1000_hw *hw) +{ + u32 eecd = rd32(E1000_EECD); + s32 timeout = E1000_NVM_GRANT_ATTEMPTS; + s32 ret_val = 0; + + + wr32(E1000_EECD, eecd | E1000_EECD_REQ); + eecd = rd32(E1000_EECD); + + while (timeout) { + if (eecd & E1000_EECD_GNT) + break; + udelay(5); + eecd = rd32(E1000_EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~E1000_EECD_REQ; + wr32(E1000_EECD, eecd); + hw_dbg("Could not acquire NVM grant\n"); + ret_val = -E1000_ERR_NVM; + } + + return ret_val; +} + +/** + * igb_standby_nvm - Return EEPROM to standby state + * @hw: pointer to the HW structure + * + * Return the EEPROM to a standby state. + **/ +static void igb_standby_nvm(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(E1000_EECD); + + if (nvm->type == e1000_nvm_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + wr32(E1000_EECD, eecd); + wrfl(); + udelay(nvm->delay_usec); + eecd &= ~E1000_EECD_CS; + wr32(E1000_EECD, eecd); + wrfl(); + udelay(nvm->delay_usec); + } +} + +/** + * e1000_stop_nvm - Terminate EEPROM command + * @hw: pointer to the HW structure + * + * Terminates the current command by inverting the EEPROM's chip select pin. + **/ +static void e1000_stop_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + eecd = rd32(E1000_EECD); + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + /* Pull CS high */ + eecd |= E1000_EECD_CS; + igb_lower_eec_clk(hw, &eecd); + } +} + +/** + * igb_release_nvm - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +void igb_release_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + e1000_stop_nvm(hw); + + eecd = rd32(E1000_EECD); + eecd &= ~E1000_EECD_REQ; + wr32(E1000_EECD, eecd); +} + +/** + * igb_ready_nvm_eeprom - Prepares EEPROM for read/write + * @hw: pointer to the HW structure + * + * Setups the EEPROM for reading and writing. + **/ +static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(E1000_EECD); + s32 ret_val = 0; + u16 timeout = 0; + u8 spi_stat_reg; + + + if (nvm->type == e1000_nvm_eeprom_spi) { + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + wr32(E1000_EECD, eecd); + wrfl(); + udelay(1); + timeout = NVM_MAX_RETRY_SPI; + + /* + * Read "Status Register" repeatedly until the LSB is cleared. + * The EEPROM will signal that the command has been completed + * by clearing bit 0 of the internal status register. If it's + * not cleared within 'timeout', then error out. + */ + while (timeout) { + igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, + hw->nvm.opcode_bits); + spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8); + if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) + break; + + udelay(5); + igb_standby_nvm(hw); + timeout--; + } + + if (!timeout) { + hw_dbg("SPI NVM Status error\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + } + +out: + return ret_val; +} + +/** + * igb_read_nvm_spi - Read EEPROM's using SPI + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM. + **/ +s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i = 0; + s32 ret_val; + u16 word_in; + u8 read_opcode = NVM_READ_OPCODE_SPI; + + /* + * A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + igb_standby_nvm(hw); + + if ((nvm->address_bits == 8) && (offset >= 128)) + read_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); + igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); + + /* + * Read the data. SPI NVMs increment the address with each byte + * read and will roll over if reading beyond the end. This allows + * us to read the whole NVM from any offset + */ + for (i = 0; i < words; i++) { + word_in = igb_shift_in_eec_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + +release: + nvm->ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = 0; + + /* + * A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + + E1000_NVM_RW_REG_START; + + wr32(E1000_EERD, eerd); + ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); + if (ret_val) + break; + + data[i] = (rd32(E1000_EERD) >> + E1000_NVM_RW_REG_DATA); + } + +out: + return ret_val; +} + +/** + * igb_write_nvm_spi - Write to EEPROM using SPI + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * Writes data to EEPROM at offset using SPI interface. + * + * If e1000_update_nvm_checksum is not called after this function , the + * EEPROM will most likley contain an invalid checksum. + **/ +s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val; + u16 widx = 0; + + /* + * A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + ret_val = hw->nvm.ops.acquire(hw); + if (ret_val) + goto out; + + msleep(10); + + while (widx < words) { + u8 write_opcode = NVM_WRITE_OPCODE_SPI; + + ret_val = igb_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + igb_standby_nvm(hw); + + /* Send the WRITE ENABLE command (8 bit opcode) */ + igb_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, + nvm->opcode_bits); + + igb_standby_nvm(hw); + + /* + * Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((nvm->address_bits == 8) && (offset >= 128)) + write_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + igb_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); + igb_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), + nvm->address_bits); + + /* Loop to allow for up to whole page write of eeprom */ + while (widx < words) { + u16 word_out = data[widx]; + word_out = (word_out >> 8) | (word_out << 8); + igb_shift_out_eec_bits(hw, word_out, 16); + widx++; + + if ((((offset + widx) * 2) % nvm->page_size) == 0) { + igb_standby_nvm(hw); + break; + } + } + } + + msleep(10); +release: + hw->nvm.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_read_part_string - Read device part number + * @hw: pointer to the HW structure + * @part_num: pointer to device part number + * @part_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in part_num. + **/ +s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size) +{ + s32 ret_val; + u16 nvm_data; + u16 pointer; + u16 offset; + u16 length; + + if (part_num == NULL) { + hw_dbg("PBA string buffer was null\n"); + ret_val = E1000_ERR_INVALID_ARGUMENT; + goto out; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pointer); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + /* + * if nvm_data is not ptr guard the PBA must be in legacy format which + * means pointer is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (nvm_data != NVM_PBA_PTR_GUARD) { + hw_dbg("NVM PBA number is not stored as string\n"); + + /* we will need 11 characters to store the PBA */ + if (part_num_size < 11) { + hw_dbg("PBA string buffer too small\n"); + return E1000_ERR_NO_SPACE; + } + + /* extract hex string from data and pointer */ + part_num[0] = (nvm_data >> 12) & 0xF; + part_num[1] = (nvm_data >> 8) & 0xF; + part_num[2] = (nvm_data >> 4) & 0xF; + part_num[3] = nvm_data & 0xF; + part_num[4] = (pointer >> 12) & 0xF; + part_num[5] = (pointer >> 8) & 0xF; + part_num[6] = '-'; + part_num[7] = 0; + part_num[8] = (pointer >> 4) & 0xF; + part_num[9] = pointer & 0xF; + + /* put a null character on the end of our string */ + part_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { + if (part_num[offset] < 0xA) + part_num[offset] += '0'; + else if (part_num[offset] < 0x10) + part_num[offset] += 'A' - 0xA; + } + + goto out; + } + + ret_val = hw->nvm.ops.read(hw, pointer, 1, &length); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (length == 0xFFFF || length == 0) { + hw_dbg("NVM PBA number section invalid length\n"); + ret_val = E1000_ERR_NVM_PBA_SECTION; + goto out; + } + /* check if part_num buffer is big enough */ + if (part_num_size < (((u32)length * 2) - 1)) { + hw_dbg("PBA string buffer too small\n"); + ret_val = E1000_ERR_NO_SPACE; + goto out; + } + + /* trim pba length from start of string */ + pointer++; + length--; + + for (offset = 0; offset < length; offset++) { + ret_val = hw->nvm.ops.read(hw, pointer + offset, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + part_num[offset * 2] = (u8)(nvm_data >> 8); + part_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); + } + part_num[offset * 2] = '\0'; + +out: + return ret_val; +} + +/** + * igb_read_mac_addr - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the EEPROM and stores the value. + * Since devices with two ports use the same EEPROM, we increment the + * last bit in the MAC address for the second port. + **/ +s32 igb_read_mac_addr(struct e1000_hw *hw) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = rd32(E1000_RAH(0)); + rar_low = rd32(E1000_RAL(0)); + + for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8)); + + for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8)); + + for (i = 0; i < ETH_ALEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return 0; +} + +/** + * igb_validate_nvm_checksum - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 igb_validate_nvm_checksum(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + hw_dbg("NVM Checksum Invalid\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 igb_update_nvm_checksum(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) + hw_dbg("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} + diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h new file mode 100644 index 0000000..a2a7ca9 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h @@ -0,0 +1,43 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_NVM_H_ +#define _E1000_NVM_H_ + +s32 igb_acquire_nvm(struct e1000_hw *hw); +void igb_release_nvm(struct e1000_hw *hw); +s32 igb_read_mac_addr(struct e1000_hw *hw); +s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num); +s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, + u32 part_num_size); +s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 igb_validate_nvm_checksum(struct e1000_hw *hw); +s32 igb_update_nvm_checksum(struct e1000_hw *hw); + +#endif diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c new file mode 100644 index 0000000..e662554 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -0,0 +1,2341 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include + +#include "e1000_mac.h" +#include "e1000_phy.h" + +static s32 igb_phy_setup_autoneg(struct e1000_hw *hw); +static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, + u16 *phy_ctrl); +static s32 igb_wait_autoneg(struct e1000_hw *hw); + +/* Cable length tables */ +static const u16 e1000_m88_cable_length_table[] = + { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; +#define M88E1000_CABLE_LENGTH_TABLE_SIZE \ + (sizeof(e1000_m88_cable_length_table) / \ + sizeof(e1000_m88_cable_length_table[0])) + +static const u16 e1000_igp_2_cable_length_table[] = + { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, + 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, + 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, + 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, + 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, + 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, + 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, + 104, 109, 114, 118, 121, 124}; +#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ + (sizeof(e1000_igp_2_cable_length_table) / \ + sizeof(e1000_igp_2_cable_length_table[0])) + +/** + * igb_check_reset_block - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset + * is blocked. If a reset is not blocked return 0, otherwise + * return E1000_BLK_PHY_RESET (12). + **/ +s32 igb_check_reset_block(struct e1000_hw *hw) +{ + u32 manc; + + manc = rd32(E1000_MANC); + + return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? + E1000_BLK_PHY_RESET : 0; +} + +/** + * igb_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +s32 igb_get_phy_id(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_id; + + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); + if (ret_val) + goto out; + + phy->id = (u32)(phy_id << 16); + udelay(20); + ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); + if (ret_val) + goto out; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + +out: + return ret_val; +} + +/** + * igb_phy_reset_dsp - Reset PHY DSP + * @hw: pointer to the HW structure + * + * Reset the digital signal processor. + **/ +static s32 igb_phy_reset_dsp(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + if (!(hw->phy.ops.write_reg)) + goto out; + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); + if (ret_val) + goto out; + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0); + +out: + return ret_val; +} + +/** + * igb_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control regsiter in the PHY at offset and stores the + * information read to data. + **/ +s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -E1000_ERR_PARAM; + goto out; + } + + /* + * Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + wr32(E1000_MDIC, mdic); + + /* + * Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + udelay(50); + mdic = rd32(E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + hw_dbg("MDI Read did not complete\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + if (mdic & E1000_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + *data = (u16) mdic; + +out: + return ret_val; +} + +/** + * igb_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -E1000_ERR_PARAM; + goto out; + } + + /* + * Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = (((u32)data) | + (offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + wr32(E1000_MDIC, mdic); + + /* + * Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + udelay(50); + mdic = rd32(E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + hw_dbg("MDI Write did not complete\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + if (mdic & E1000_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_read_phy_reg_i2c - Read PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the i2c interface and stores the + * retrieved information in data. + **/ +s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + + + /* + * Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + (E1000_I2CCMD_OPCODE_READ)); + + wr32(E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + udelay(50); + i2ccmd = rd32(E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + hw_dbg("I2CCMD Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + hw_dbg("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + + /* Need to byte-swap the 16-bit value. */ + *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00); + + return 0; +} + +/** + * igb_write_phy_reg_i2c - Write PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset using the i2c interface. + **/ +s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + u16 phy_data_swapped; + + + /* Swap the data bytes for the I2C interface */ + phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00); + + /* + * Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_WRITE | + phy_data_swapped); + + wr32(E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + udelay(50); + i2ccmd = rd32(E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + hw_dbg("I2CCMD Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + hw_dbg("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + + return 0; +} + +/** + * igb_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val = 0; + + if (!(hw->phy.ops.acquire)) + goto out; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + ret_val = igb_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (ret_val) { + hw->phy.ops.release(hw); + goto out; + } + } + + ret_val = igb_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val = 0; + + if (!(hw->phy.ops.acquire)) + goto out; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + ret_val = igb_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (ret_val) { + hw->phy.ops.release(hw); + goto out; + } + } + + ret_val = igb_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_copper_link_setup_82580 - Setup 82580 PHY for copper link + * @hw: pointer to the HW structure + * + * Sets up Carrier-sense on Transmit and downshift values. + **/ +s32 igb_copper_link_setup_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + + if (phy->reset_disable) { + ret_val = 0; + goto out; + } + + if (phy->type == e1000_phy_82580) { + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY.\n"); + goto out; + } + } + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, I82580_CFG_REG, &phy_data); + if (ret_val) + goto out; + + phy_data |= I82580_CFG_ASSERT_CRS_ON_TX; + + /* Enable downshift */ + phy_data |= I82580_CFG_ENABLE_DOWNSHIFT; + + ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data); + +out: + return ret_val; +} + +/** + * igb_copper_link_setup_m88 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock + * and downshift values are set also. + **/ +s32 igb_copper_link_setup_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + if (phy->reset_disable) { + ret_val = 0; + goto out; + } + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* + * Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* + * Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + goto out; + + if (phy->revision < E1000_REVISION_4) { + /* + * Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((phy->revision == E1000_REVISION_2) && + (phy->id == M88E1111_I_PHY_ID)) { + /* 82573L PHY - set the downshift counter to 5x. */ + phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + } + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + goto out; + } + + /* Commit the changes. */ + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + goto out; + } + +out: + return ret_val; +} + +/** + * igb_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's. + * Also enables and sets the downshift parameters. + **/ +s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + if (phy->reset_disable) { + ret_val = 0; + goto out; + } + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + /* + * Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + /* M88E1112 does not support this mode) */ + if (phy->id != M88E1112_E_PHY_ID) { + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + } + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* + * Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + /* Enable downshift and setting it to X6 */ + phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK; + phy_data |= I347AT4_PSCR_DOWNSHIFT_6X; + phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE; + + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + goto out; + + /* Commit the changes. */ + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + goto out; + } + +out: + return ret_val; +} + +/** + * igb_copper_link_setup_igp - Setup igp PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for + * igp PHY's. + **/ +s32 igb_copper_link_setup_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + if (phy->reset_disable) { + ret_val = 0; + goto out; + } + + ret_val = phy->ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY.\n"); + goto out; + } + + /* + * Wait 100ms for MAC to configure PHY from NVM settings, to avoid + * timeout issues when LFS is enabled. + */ + msleep(100); + + /* + * The NVM settings will configure LPLU in D3 for + * non-IGP1 PHYs. + */ + if (phy->type == e1000_phy_igp) { + /* disable lplu d3 during driver init */ + if (phy->ops.set_d3_lplu_state) + ret_val = phy->ops.set_d3_lplu_state(hw, false); + if (ret_val) { + hw_dbg("Error Disabling LPLU D3\n"); + goto out; + } + } + + /* disable lplu d0 during driver init */ + ret_val = phy->ops.set_d0_lplu_state(hw, false); + if (ret_val) { + hw_dbg("Error Disabling LPLU D0\n"); + goto out; + } + /* Configure mdi-mdix settings */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (phy->mdix) { + case 1: + data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data); + if (ret_val) + goto out; + + /* set auto-master slave resolution settings */ + if (hw->mac.autoneg) { + /* + * when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. + */ + if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + + /* Set auto Master/Slave resolution process */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); + if (ret_val) + goto out; + + data &= ~CR_1000T_MS_ENABLE; + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); + if (ret_val) + goto out; + } + + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); + if (ret_val) + goto out; + + /* load defaults for future use */ + phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ? + ((data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : + e1000_ms_auto; + + switch (phy->ms_type) { + case e1000_ms_force_master: + data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + data |= CR_1000T_MS_ENABLE; + data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + data &= ~CR_1000T_MS_ENABLE; + default: + break; + } + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + **/ +static s32 igb_copper_link_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_ctrl; + + /* + * Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* + * If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (phy->autoneg_advertised == 0) + phy->autoneg_advertised = phy->autoneg_mask; + + hw_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = igb_phy_setup_autoneg(hw); + if (ret_val) { + hw_dbg("Error Setting up Auto-Negotiation\n"); + goto out; + } + hw_dbg("Restarting Auto-Neg\n"); + + /* + * Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + goto out; + + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + goto out; + + /* + * Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = igb_wait_autoneg(hw); + if (ret_val) { + hw_dbg("Error while waiting for " + "autoneg to complete\n"); + goto out; + } + } + + hw->mac.get_link_status = true; + +out: + return ret_val; +} + +/** + * igb_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control + * register and if the PHY is already setup for auto-negotiation, then + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + **/ +static s32 igb_phy_setup_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg = 0; + + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + goto out; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, + &mii_1000t_ctrl_reg); + if (ret_val) + goto out; + } + + /* + * Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* + * First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | + NWAY_AR_100TX_HD_CAPS | + NWAY_AR_10T_FD_CAPS | + NWAY_AR_10T_HD_CAPS); + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); + + hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { + hw_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { + hw_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { + hw_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { + hw_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) + hw_dbg("Advertise 1000mb Half duplex request denied!\n"); + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { + hw_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* + * Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- + * negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: + /* + * Flow control (RX & TX) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_rx_pause: + /* + * RX Flow control is enabled, and TX Flow control is + * disabled, by a software over-ride. + * + * Since there really isn't a way to advertise that we are + * capable of RX Pause ONLY, we will advertise that we + * support both symmetric and asymmetric RX PAUSE. Later + * (in e1000_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_tx_pause: + /* + * TX Flow control is enabled, and RX Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case e1000_fc_full: + /* + * Flow control (both RX and TX) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + goto out; + + hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + ret_val = phy->ops.write_reg(hw, + PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_setup_copper_link - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -E1000_ERR_PHY (-2). + **/ +s32 igb_setup_copper_link(struct e1000_hw *hw) +{ + s32 ret_val; + bool link; + + + if (hw->mac.autoneg) { + /* + * Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = igb_copper_link_autoneg(hw); + if (ret_val) + goto out; + } else { + /* + * PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + hw_dbg("Forcing Speed and Duplex\n"); + ret_val = hw->phy.ops.force_speed_duplex(hw); + if (ret_val) { + hw_dbg("Error Forcing Speed and Duplex\n"); + goto out; + } + } + + /* + * Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = igb_phy_has_link(hw, + COPPER_LINK_UP_LIMIT, + 10, + &link); + if (ret_val) + goto out; + + if (link) { + hw_dbg("Valid link established!!!\n"); + igb_config_collision_dist(hw); + ret_val = igb_config_fc_after_link_up(hw); + } else { + hw_dbg("Unable to establish link!!!\n"); + } + +out: + return ret_val; +} + +/** + * igb_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -E1000_ERR_PHY (-2). + **/ +s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + igb_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + /* + * Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + goto out; + + hw_dbg("IGP PSCR: %X\n", phy_data); + + udelay(1); + + if (phy->autoneg_wait_to_complete) { + hw_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); + + ret_val = igb_phy_has_link(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + goto out; + + if (!link) + hw_dbg("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = igb_phy_has_link(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Resets the PHY to commit the + * changes. If time expires while waiting for link up, we reset the DSP. + * After reset, TX_CLK and CRS on TX must be set. Return successful upon + * successful completion, else return corresponding error code. + **/ +s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + /* + * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + goto out; + + hw_dbg("M88E1000 PSCR: %X\n", phy_data); + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + igb_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + /* Reset the phy to commit changes. */ + ret_val = igb_phy_sw_reset(hw); + if (ret_val) + goto out; + + if (phy->autoneg_wait_to_complete) { + hw_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); + + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); + if (ret_val) + goto out; + + if (!link) { + if (hw->phy.type != e1000_phy_m88 || + hw->phy.id == I347AT4_E_PHY_ID || + hw->phy.id == M88E1112_E_PHY_ID) { + hw_dbg("Link taking longer than expected.\n"); + } else { + + /* + * We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = phy->ops.write_reg(hw, + M88E1000_PHY_PAGE_SELECT, + 0x001d); + if (ret_val) + goto out; + ret_val = igb_phy_reset_dsp(hw); + if (ret_val) + goto out; + } + } + + /* Try once more */ + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + goto out; + } + + if (hw->phy.type != e1000_phy_m88 || + hw->phy.id == I347AT4_E_PHY_ID || + hw->phy.id == M88E1112_E_PHY_ID) + goto out; + + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + /* + * Resetting the phy means we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock from + * the reset value of 2.5MHz. + */ + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + goto out; + + /* + * In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + +out: + return ret_val; +} + +/** + * igb_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex + * @hw: pointer to the HW structure + * @phy_ctrl: pointer to current value of PHY_CONTROL + * + * Forces speed and duplex on the PHY by doing the following: disable flow + * control, force speed/duplex on the MAC, disable auto speed detection, + * disable auto-negotiation, configure duplex, configure speed, configure + * the collision distance, write configuration to CTRL register. The + * caller must write to the PHY_CONTROL register for these settings to + * take affect. + **/ +static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, + u16 *phy_ctrl) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl; + + /* Turn off flow control when forcing speed/duplex */ + hw->fc.current_mode = e1000_fc_none; + + /* Force speed/duplex on the mac */ + ctrl = rd32(E1000_CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~E1000_CTRL_SPD_SEL; + + /* Disable Auto Speed Detection */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Disable autoneg on the phy */ + *phy_ctrl &= ~MII_CR_AUTO_NEG_EN; + + /* Forcing Full or Half Duplex? */ + if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { + ctrl &= ~E1000_CTRL_FD; + *phy_ctrl &= ~MII_CR_FULL_DUPLEX; + hw_dbg("Half Duplex\n"); + } else { + ctrl |= E1000_CTRL_FD; + *phy_ctrl |= MII_CR_FULL_DUPLEX; + hw_dbg("Full Duplex\n"); + } + + /* Forcing 10mb or 100mb? */ + if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { + ctrl |= E1000_CTRL_SPD_100; + *phy_ctrl |= MII_CR_SPEED_100; + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); + hw_dbg("Forcing 100mb\n"); + } else { + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + *phy_ctrl |= MII_CR_SPEED_10; + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + hw_dbg("Forcing 10mb\n"); + } + + igb_config_collision_dist(hw); + + wr32(E1000_CTRL, ctrl); +} + +/** + * igb_set_d3_lplu_state - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 data; + + if (!(hw->phy.ops.read_reg)) + goto out; + + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + goto out; + + if (!active) { + data &= ~IGP02E1000_PM_D3_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= IGP02E1000_PM_D3_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + } + +out: + return ret_val; +} + +/** + * igb_check_downshift - Checks whether a downshift in speed occurred + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns 1 + * + * A downshift is detected by querying the PHY link health. + **/ +s32 igb_check_downshift(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + switch (phy->type) { + case e1000_phy_m88: + case e1000_phy_gg82563: + offset = M88E1000_PHY_SPEC_STATUS; + mask = M88E1000_PSSR_DOWNSHIFT; + break; + case e1000_phy_igp_2: + case e1000_phy_igp: + case e1000_phy_igp_3: + offset = IGP01E1000_PHY_LINK_HEALTH; + mask = IGP01E1000_PLHR_SS_DOWNGRADE; + break; + default: + /* speed downshift not supported */ + phy->speed_downgraded = false; + ret_val = 0; + goto out; + } + + ret_val = phy->ops.read_reg(hw, offset, &phy_data); + + if (!ret_val) + phy->speed_downgraded = (phy_data & mask) ? true : false; + +out: + return ret_val; +} + +/** + * igb_check_polarity_m88 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +static s32 igb_check_polarity_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data); + + if (!ret_val) + phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * igb_check_polarity_igp - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY port status register, and the + * current speed (since there is no polarity at 100Mbps). + **/ +static s32 igb_check_polarity_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data, offset, mask; + + /* + * Polarity is determined based on the speed of + * our connection. + */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + goto out; + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + offset = IGP01E1000_PHY_PCS_INIT_REG; + mask = IGP01E1000_PHY_POLARITY_MASK; + } else { + /* + * This really only applies to 10Mbps since + * there is no polarity for 100Mbps (always 0). + */ + offset = IGP01E1000_PHY_PORT_STATUS; + mask = IGP01E1000_PSSR_POLARITY_REVERSED; + } + + ret_val = phy->ops.read_reg(hw, offset, &data); + + if (!ret_val) + phy->cable_polarity = (data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + +out: + return ret_val; +} + +/** + * igb_wait_autoneg - Wait for auto-neg compeletion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + **/ +static s32 igb_wait_autoneg(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 i, phy_status; + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; + msleep(100); + } + + /* + * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * igb_phy_has_link - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts + * @success: pointer to whether polling was successful or not + * + * Polls the PHY status register for link, 'iterations' number of times. + **/ +s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success) +{ + s32 ret_val = 0; + u16 i, phy_status; + + for (i = 0; i < iterations; i++) { + /* + * Some PHYs require the PHY_STATUS register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) { + /* + * If the first read fails, another entity may have + * ownership of the resources, wait and try again to + * see if they have relinquished the resources yet. + */ + udelay(usec_interval); + } + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_LINK_STATUS) + break; + if (usec_interval >= 1000) + mdelay(usec_interval/1000); + else + udelay(usec_interval); + } + + *success = (i < iterations) ? true : false; + + return ret_val; +} + +/** + * igb_get_cable_length_m88 - Determine cable length for m88 PHY + * @hw: pointer to the HW structure + * + * Reads the PHY specific status register to retrieve the cable length + * information. The cable length is determined by averaging the minimum and + * maximum values to get the "average" cable length. The m88 PHY has four + * possible cable length values, which are: + * Register Value Cable Length + * 0 < 50 meters + * 1 50 - 80 meters + * 2 80 - 110 meters + * 3 110 - 140 meters + * 4 > 140 meters + **/ +s32 igb_get_cable_length_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, index; + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + goto out; + + index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +out: + return ret_val; +} + +s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, phy_data2, index, default_page, is_cm; + + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: + /* Remember the original page select and set it to 7 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07); + if (ret_val) + goto out; + + /* Get cable length from PHY Cable Diagnostics Control Reg */ + ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr), + &phy_data); + if (ret_val) + goto out; + + /* Check if the unit of cable length is meters or cm */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2); + if (ret_val) + goto out; + + is_cm = !(phy_data & I347AT4_PCDC_CABLE_LENGTH_UNIT); + + /* Populate the phy structure with cable length in meters */ + phy->min_cable_length = phy_data / (is_cm ? 100 : 1); + phy->max_cable_length = phy_data / (is_cm ? 100 : 1); + phy->cable_length = phy_data / (is_cm ? 100 : 1); + + /* Reset the page selec to its original value */ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) + goto out; + break; + case M88E1112_E_PHY_ID: + /* Remember the original page select and set it to 5 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE, + &phy_data); + if (ret_val) + goto out; + + index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + + phy->max_cable_length) / 2; + + /* Reset the page select to its original value */ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) + goto out; + + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_get_cable_length_igp_2 - Determine cable length for igp2 PHY + * @hw: pointer to the HW structure + * + * The automatic gain control (agc) normalizes the amplitude of the + * received signal, adjusting for the attenuation produced by the + * cable. By reading the AGC registers, which represent the + * combination of coarse and fine gain value, the value can be put + * into a lookup table to obtain the approximate cable length + * for each channel. + **/ +s32 igb_get_cable_length_igp_2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_data, i, agc_value = 0; + u16 cur_agc_index, max_agc_index = 0; + u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; + static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { + IGP02E1000_PHY_AGC_A, + IGP02E1000_PHY_AGC_B, + IGP02E1000_PHY_AGC_C, + IGP02E1000_PHY_AGC_D + }; + + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { + ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) + goto out; + + /* + * Getting bits 15:9, which represent the combination of + * coarse and fine gain values. The result is a number + * that can be put into the lookup table to obtain the + * approximate cable length. + */ + cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & + IGP02E1000_AGC_LENGTH_MASK; + + /* Array index bound check. */ + if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || + (cur_agc_index == 0)) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + /* Remove min & max AGC values from calculation. */ + if (e1000_igp_2_cable_length_table[min_agc_index] > + e1000_igp_2_cable_length_table[cur_agc_index]) + min_agc_index = cur_agc_index; + if (e1000_igp_2_cable_length_table[max_agc_index] < + e1000_igp_2_cable_length_table[cur_agc_index]) + max_agc_index = cur_agc_index; + + agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; + } + + agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + + e1000_igp_2_cable_length_table[max_agc_index]); + agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); + + /* Calculate cable length with the error range of +/- 10 meters. */ + phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? + (agc_value - IGP02E1000_AGC_RANGE) : 0; + phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +out: + return ret_val; +} + +/** + * igb_get_phy_info_m88 - Retrieve PHY information + * @hw: pointer to the HW structure + * + * Valid for only copper links. Read the PHY status register (sticky read) + * to verify that link is up. Read the PHY special control register to + * determine the polarity and 10base-T extended distance. Read the PHY + * special status register to determine MDI/MDIx and current speed. If + * speed is 1000, then determine cable length, local and remote receiver. + **/ +s32 igb_get_phy_info_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + if (phy->media_type != e1000_media_type_copper) { + hw_dbg("Phy info is only valid for copper media\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = igb_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + hw_dbg("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL) + ? true : false; + + ret_val = igb_check_polarity_m88(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + goto out; + + phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? true : false; + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + ret_val = phy->ops.get_cable_length(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + goto out; + + phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + /* Set values to "undefined" */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +out: + return ret_val; +} + +/** + * igb_get_phy_info_igp - Retrieve igp PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 igb_get_phy_info_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = igb_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + hw_dbg("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + phy->polarity_correction = true; + + ret_val = igb_check_polarity_igp(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + goto out; + + phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? true : false; + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + ret_val = phy->ops.get_cable_length(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) + goto out; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +out: + return ret_val; +} + +/** + * igb_phy_sw_reset - PHY software reset + * @hw: pointer to the HW structure + * + * Does a software reset of the PHY by reading the PHY control register and + * setting/write the control register reset bit to the PHY. + **/ +s32 igb_phy_sw_reset(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 phy_ctrl; + + if (!(hw->phy.ops.read_reg)) + goto out; + + ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + goto out; + + phy_ctrl |= MII_CR_RESET; + ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + goto out; + + udelay(1); + +out: + return ret_val; +} + +/** + * igb_phy_hw_reset - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and relase the semaphore (if necessary). + **/ +s32 igb_phy_hw_reset(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u32 ctrl; + + ret_val = igb_check_reset_block(hw); + if (ret_val) { + ret_val = 0; + goto out; + } + + ret_val = phy->ops.acquire(hw); + if (ret_val) + goto out; + + ctrl = rd32(E1000_CTRL); + wr32(E1000_CTRL, ctrl | E1000_CTRL_PHY_RST); + wrfl(); + + udelay(phy->reset_delay_us); + + wr32(E1000_CTRL, ctrl); + wrfl(); + + udelay(150); + + phy->ops.release(hw); + + ret_val = phy->ops.get_cfg_done(hw); + +out: + return ret_val; +} + +/** + * igb_phy_init_script_igp3 - Inits the IGP3 PHY + * @hw: pointer to the HW structure + * + * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. + **/ +s32 igb_phy_init_script_igp3(struct e1000_hw *hw) +{ + hw_dbg("Running IGP 3 PHY init script\n"); + + /* PHY init IGP 3 */ + /* Enable rise/fall, 10-mode work in class-A */ + hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018); + /* Remove all caps from Replica path filter */ + hw->phy.ops.write_reg(hw, 0x2F52, 0x0000); + /* Bias trimming for ADC, AFE and Driver (Default) */ + hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24); + /* Increase Hybrid poly bias */ + hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0); + /* Add 4% to TX amplitude in Giga mode */ + hw->phy.ops.write_reg(hw, 0x2010, 0x10B0); + /* Disable trimming (TTT) */ + hw->phy.ops.write_reg(hw, 0x2011, 0x0000); + /* Poly DC correction to 94.6% + 2% for all channels */ + hw->phy.ops.write_reg(hw, 0x20DD, 0x249A); + /* ABS DC correction to 95.9% */ + hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3); + /* BG temp curve trim */ + hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE); + /* Increasing ADC OPAMP stage 1 currents to max */ + hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4); + /* Force 1000 ( required for enabling PHY regs configuration) */ + hw->phy.ops.write_reg(hw, 0x0000, 0x0140); + /* Set upd_freq to 6 */ + hw->phy.ops.write_reg(hw, 0x1F30, 0x1606); + /* Disable NPDFE */ + hw->phy.ops.write_reg(hw, 0x1F31, 0xB814); + /* Disable adaptive fixed FFE (Default) */ + hw->phy.ops.write_reg(hw, 0x1F35, 0x002A); + /* Enable FFE hysteresis */ + hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067); + /* Fixed FFE for short cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F54, 0x0065); + /* Fixed FFE for medium cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F55, 0x002A); + /* Fixed FFE for long cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F56, 0x002A); + /* Enable Adaptive Clip Threshold */ + hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0); + /* AHT reset limit to 1 */ + hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF); + /* Set AHT master delay to 127 msec */ + hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC); + /* Set scan bits for AHT */ + hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF); + /* Set AHT Preset bits */ + hw->phy.ops.write_reg(hw, 0x1F79, 0x0210); + /* Change integ_factor of channel A to 3 */ + hw->phy.ops.write_reg(hw, 0x1895, 0x0003); + /* Change prop_factor of channels BCD to 8 */ + hw->phy.ops.write_reg(hw, 0x1796, 0x0008); + /* Change cg_icount + enable integbp for channels BCD */ + hw->phy.ops.write_reg(hw, 0x1798, 0xD008); + /* + * Change cg_icount + enable integbp + change prop_factor_master + * to 8 for channel A + */ + hw->phy.ops.write_reg(hw, 0x1898, 0xD918); + /* Disable AHT in Slave mode on channel A */ + hw->phy.ops.write_reg(hw, 0x187A, 0x0800); + /* + * Enable LPLU and disable AN to 1000 in non-D0a states, + * Enable SPD+B2B + */ + hw->phy.ops.write_reg(hw, 0x0019, 0x008D); + /* Enable restart AN on an1000_dis change */ + hw->phy.ops.write_reg(hw, 0x001B, 0x2080); + /* Enable wh_fifo read clock in 10/100 modes */ + hw->phy.ops.write_reg(hw, 0x0014, 0x0045); + /* Restart AN, Speed selection is 1000 */ + hw->phy.ops.write_reg(hw, 0x0000, 0x1340); + + return 0; +} + +/** + * igb_power_up_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, restore the link to previous settings. + **/ +void igb_power_up_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); +} + +/** + * igb_power_down_phy_copper - Power down copper PHY + * @hw: pointer to the HW structure + * + * Power down PHY to save power when interface is down and wake on lan + * is not enabled. + **/ +void igb_power_down_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); + msleep(1); +} + +/** + * igb_check_polarity_82580 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +static s32 igb_check_polarity_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + + ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data); + + if (!ret_val) + phy->cable_polarity = (data & I82580_PHY_STATUS2_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * igb_phy_force_speed_duplex_82580 - Force speed/duplex for I82580 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -E1000_ERR_PHY (-2). + **/ +s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + igb_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + /* + * Clear Auto-Crossover to force MDI manually. 82580 requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data); + if (ret_val) + goto out; + + phy_data &= ~I82580_PHY_CTRL2_AUTO_MDIX; + phy_data &= ~I82580_PHY_CTRL2_FORCE_MDI_MDIX; + + ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data); + if (ret_val) + goto out; + + hw_dbg("I82580_PHY_CTRL_2: %X\n", phy_data); + + udelay(1); + + if (phy->autoneg_wait_to_complete) { + hw_dbg("Waiting for forced speed/duplex link on 82580 phy\n"); + + ret_val = igb_phy_has_link(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + goto out; + + if (!link) + hw_dbg("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = igb_phy_has_link(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_get_phy_info_82580 - Retrieve I82580 PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 igb_get_phy_info_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + + ret_val = igb_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + hw_dbg("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + phy->polarity_correction = true; + + ret_val = igb_check_polarity_82580(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data); + if (ret_val) + goto out; + + phy->is_mdix = (data & I82580_PHY_STATUS2_MDIX) ? true : false; + + if ((data & I82580_PHY_STATUS2_SPEED_MASK) == + I82580_PHY_STATUS2_SPEED_1000MBPS) { + ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) + goto out; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +out: + return ret_val; +} + +/** + * igb_get_cable_length_82580 - Determine cable length for 82580 PHY + * @hw: pointer to the HW structure + * + * Reads the diagnostic status register and verifies result is valid before + * placing it in the phy_cable_length field. + **/ +s32 igb_get_cable_length_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, length; + + + ret_val = phy->ops.read_reg(hw, I82580_PHY_DIAG_STATUS, &phy_data); + if (ret_val) + goto out; + + length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >> + I82580_DSTATUS_CABLE_LENGTH_SHIFT; + + if (length == E1000_CABLE_LENGTH_UNDEFINED) + ret_val = -E1000_ERR_PHY; + + phy->cable_length = length; + +out: + return ret_val; +} diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h new file mode 100644 index 0000000..8510797 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/e1000_phy.h @@ -0,0 +1,136 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_PHY_H_ +#define _E1000_PHY_H_ + +enum e1000_ms_type { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +}; + +enum e1000_smart_speed { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +}; + +s32 igb_check_downshift(struct e1000_hw *hw); +s32 igb_check_reset_block(struct e1000_hw *hw); +s32 igb_copper_link_setup_igp(struct e1000_hw *hw); +s32 igb_copper_link_setup_m88(struct e1000_hw *hw); +s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw); +s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw); +s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw); +s32 igb_get_cable_length_m88(struct e1000_hw *hw); +s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw); +s32 igb_get_cable_length_igp_2(struct e1000_hw *hw); +s32 igb_get_phy_id(struct e1000_hw *hw); +s32 igb_get_phy_info_igp(struct e1000_hw *hw); +s32 igb_get_phy_info_m88(struct e1000_hw *hw); +s32 igb_phy_sw_reset(struct e1000_hw *hw); +s32 igb_phy_hw_reset(struct e1000_hw *hw); +s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active); +s32 igb_setup_copper_link(struct e1000_hw *hw); +s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +void igb_power_up_phy_copper(struct e1000_hw *hw); +void igb_power_down_phy_copper(struct e1000_hw *hw); +s32 igb_phy_init_script_igp3(struct e1000_hw *hw); +s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_copper_link_setup_82580(struct e1000_hw *hw); +s32 igb_get_phy_info_82580(struct e1000_hw *hw); +s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw); +s32 igb_get_cable_length_82580(struct e1000_hw *hw); + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 + +#define I82580_ADDR_REG 16 +#define I82580_CFG_REG 22 +#define I82580_CFG_ASSERT_CRS_ON_TX (1 << 15) +#define I82580_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */ +#define I82580_CTRL_REG 23 +#define I82580_CTRL_DOWNSHIFT_MASK (7 << 10) + +/* 82580 specific PHY registers */ +#define I82580_PHY_CTRL_2 18 +#define I82580_PHY_LBK_CTRL 19 +#define I82580_PHY_STATUS_2 26 +#define I82580_PHY_DIAG_STATUS 31 + +/* I82580 PHY Status 2 */ +#define I82580_PHY_STATUS2_REV_POLARITY 0x0400 +#define I82580_PHY_STATUS2_MDIX 0x0800 +#define I82580_PHY_STATUS2_SPEED_MASK 0x0300 +#define I82580_PHY_STATUS2_SPEED_1000MBPS 0x0200 +#define I82580_PHY_STATUS2_SPEED_100MBPS 0x0100 + +/* I82580 PHY Control 2 */ +#define I82580_PHY_CTRL2_AUTO_MDIX 0x0400 +#define I82580_PHY_CTRL2_FORCE_MDI_MDIX 0x0200 + +/* I82580 PHY Diagnostics Status */ +#define I82580_DSTATUS_CABLE_LENGTH 0x03FC +#define I82580_DSTATUS_CABLE_LENGTH_SHIFT 2 +/* Enable flexible speed on link-up */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 +#define IGP02E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F +#define IGP02E1000_AGC_RANGE 15 + +#define E1000_CABLE_LENGTH_UNDEFINED 0xFF + +#endif diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h new file mode 100644 index 0000000..0990f6d --- /dev/null +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -0,0 +1,354 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_REGS_H_ +#define _E1000_REGS_H_ + +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ +#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */ +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ +#define E1000_RCTL 0x00100 /* RX Control - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ +#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) +#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ +#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */ +#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ +#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ +#define E1000_TCTL 0x00400 /* TX Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ +#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ +#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ +#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ + +/* IEEE 1588 TIMESYNCH */ +#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ +#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ +#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ +#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */ +#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */ +#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ +#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ +#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ +#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ +#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ +#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ + +/* Filtering Registers */ +#define E1000_SAQF(_n) (0x5980 + 4 * (_n)) +#define E1000_DAQF(_n) (0x59A0 + 4 * (_n)) +#define E1000_SPQF(_n) (0x59C0 + 4 * (_n)) +#define E1000_FTQF(_n) (0x59E0 + 4 * (_n)) +#define E1000_SAQF0 E1000_SAQF(0) +#define E1000_DAQF0 E1000_DAQF(0) +#define E1000_SPQF0 E1000_SPQF(0) +#define E1000_FTQF0 E1000_FTQF(0) +#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */ +#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ + +#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) + +/* DMA Coalescing registers */ +#define E1000_DMACR 0x02508 /* Control Register */ +#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */ +#define E1000_DMCTLX 0x02514 /* Time to Lx Request */ +#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ +#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */ +#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */ +#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ + +/* TX Rate Limit Registers */ +#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */ +#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */ + +/* Split and Replication RX Control - RW */ +#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ +/* + * Convenience macros + * + * Note: "_n" is the queue number of the register to be written to. + * + * Example usage: + * E1000_RDBAL_REG(current_rx_queue) + */ +#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) \ + : (0x0C000 + ((_n) * 0x40))) +#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) \ + : (0x0C004 + ((_n) * 0x40))) +#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) \ + : (0x0C008 + ((_n) * 0x40))) +#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) \ + : (0x0C00C + ((_n) * 0x40))) +#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) \ + : (0x0C010 + ((_n) * 0x40))) +#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) \ + : (0x0C018 + ((_n) * 0x40))) +#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) \ + : (0x0C028 + ((_n) * 0x40))) +#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) \ + : (0x0E000 + ((_n) * 0x40))) +#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) \ + : (0x0E004 + ((_n) * 0x40))) +#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) \ + : (0x0E008 + ((_n) * 0x40))) +#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) \ + : (0x0E010 + ((_n) * 0x40))) +#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) \ + : (0x0E018 + ((_n) * 0x40))) +#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \ + : (0x0E028 + ((_n) * 0x40))) +#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8)) +#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8)) +#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \ + : (0x0E038 + ((_n) * 0x40))) +#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \ + : (0x0E03C + ((_n) * 0x40))) +#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ +#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */ +#define E1000_DTXCTL 0x03590 /* DMA TX Control - RW */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +/* Interrupt Cause Rx Packet Timer Expire Count */ +#define E1000_ICRXPTC 0x04104 +/* Interrupt Cause Rx Absolute Timer Expire Count */ +#define E1000_ICRXATC 0x04108 +/* Interrupt Cause Tx Packet Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C +/* Interrupt Cause Tx Absolute Timer Expire Count */ +#define E1000_ICTXATC 0x04110 +/* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQEC 0x04118 +/* Interrupt Cause Tx Queue Minimum Threshold Count */ +#define E1000_ICTXQMTC 0x0411C +/* Interrupt Cause Rx Descriptor Minimum Threshold Count */ +#define E1000_ICRXDMTC 0x04120 +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ +#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */ +#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */ +#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ +#define E1000_CBTMPC 0x0402C /* Circuit Breaker TX Packet Count */ +#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */ +#define E1000_CBRMPC 0x040FC /* Circuit Breaker RX Packet Count */ +#define E1000_RPTHC 0x04104 /* Rx Packets To Host */ +#define E1000_HGPTC 0x04118 /* Host Good Packets TX Count */ +#define E1000_HTCBDPC 0x04124 /* Host TX Circuit Breaker Dropped Count */ +#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */ +#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */ +#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ +#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ +#define E1000_LENERRS 0x04138 /* Length Errors Count */ +#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ +#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */ +#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ +#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */ +#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Page - RW */ +#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ +#define E1000_RLPML 0x05004 /* RX Long Packet Max Length */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */ +#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) +#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x054E0 + ((_i - 16) * 8))) +#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x054E4 + ((_i - 16) * 8))) +#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) +#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) +#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) +#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) +#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) +#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ + +#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ +#define E1000_CCMCTL 0x05B48 /* CCM Control Register */ +#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */ +#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */ +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */ + +/* RSS registers */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ +#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate Interrupt Ext*/ +#define E1000_IMIRVP 0x05AC0 /* Immediate Interrupt RX VLAN Priority - RW */ +/* MSI-X Allocation Register (_i) - RW */ +#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) +/* Redirection Table - RW Array */ +#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) +#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */ + +/* VT Registers */ +#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */ +#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */ +#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */ +#define E1000_VFRE 0x00C8C /* VF Receive Enables */ +#define E1000_VFTE 0x00C90 /* VF Transmit Enables */ +#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ +#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ +#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */ +#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ +#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */ +#define E1000_IOVTCL 0x05BBC /* IOV Control Register */ +/* These act per VF so an array friendly macro is used */ +#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) +#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) +#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) +#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine + * Filter - RW */ +#define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) + +#define wr32(reg, value) (writel(value, hw->hw_addr + reg)) +#define rd32(reg) (readl(hw->hw_addr + reg)) +#define wrfl() ((void)rd32(E1000_STATUS)) + +#define array_wr32(reg, offset, value) \ + (writel(value, hw->hw_addr + reg + ((offset) << 2))) +#define array_rd32(reg, offset) \ + (readl(hw->hw_addr + reg + ((offset) << 2))) + +/* DMA Coalescing registers */ +#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ + +/* Energy Efficient Ethernet "EEE" register */ +#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ +#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */ + +/* Thermal Sensor Register */ +#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ + +/* OS2BMC Registers */ +#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */ +#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */ +#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */ +#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */ + +#endif diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h new file mode 100644 index 0000000..265e151 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -0,0 +1,415 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _IGB_H_ +#define _IGB_H_ + +#include "e1000_mac.h" +#include "e1000_82575.h" + +#include +#include +#include +#include +#include + +struct igb_adapter; + +/* ((1000000000ns / (6000ints/s * 1024ns)) << 2 = 648 */ +#define IGB_START_ITR 648 + +/* TX/RX descriptor defines */ +#define IGB_DEFAULT_TXD 256 +#define IGB_MIN_TXD 80 +#define IGB_MAX_TXD 4096 + +#define IGB_DEFAULT_RXD 256 +#define IGB_MIN_RXD 80 +#define IGB_MAX_RXD 4096 + +#define IGB_DEFAULT_ITR 3 /* dynamic */ +#define IGB_MAX_ITR_USECS 10000 +#define IGB_MIN_ITR_USECS 10 +#define NON_Q_VECTORS 1 +#define MAX_Q_VECTORS 8 + +/* Transmit and receive queues */ +#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? 2 : \ + (hw->mac.type > e1000_82575 ? 8 : 4)) +#define IGB_ABS_MAX_TX_QUEUES 8 +#define IGB_MAX_TX_QUEUES IGB_MAX_RX_QUEUES + +#define IGB_MAX_VF_MC_ENTRIES 30 +#define IGB_MAX_VF_FUNCTIONS 8 +#define IGB_MAX_VFTA_ENTRIES 128 + +struct vf_data_storage { + unsigned char vf_mac_addresses[ETH_ALEN]; + u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; + u16 num_vf_mc_hashes; + u16 vlans_enabled; + u32 flags; + unsigned long last_nack; + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 pf_qos; + u16 tx_rate; +}; + +#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */ +#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */ +#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */ +#define IGB_VF_FLAG_PF_SET_MAC 0x00000008 /* PF has set MAC address */ + +/* RX descriptor control thresholds. + * PTHRESH - MAC will consider prefetch if it has fewer than this number of + * descriptors available in its onboard memory. + * Setting this to 0 disables RX descriptor prefetch. + * HTHRESH - MAC will only prefetch if there are at least this many descriptors + * available in host memory. + * If PTHRESH is 0, this should also be 0. + * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back + * descriptors until either it has this many to write back, or the + * ITR timer expires. + */ +#define IGB_RX_PTHRESH 8 +#define IGB_RX_HTHRESH 8 +#define IGB_RX_WTHRESH 1 +#define IGB_TX_PTHRESH 8 +#define IGB_TX_HTHRESH 1 +#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \ + adapter->msix_entries) ? 1 : 16) + +/* this is the size past which hardware will drop packets when setting LPE=0 */ +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 + +/* Supported Rx Buffer Sizes */ +#define IGB_RXBUFFER_64 64 /* Used for packet split */ +#define IGB_RXBUFFER_128 128 /* Used for packet split */ +#define IGB_RXBUFFER_1024 1024 +#define IGB_RXBUFFER_2048 2048 +#define IGB_RXBUFFER_16384 16384 + +#define MAX_STD_JUMBO_FRAME_SIZE 9234 + +/* How many Tx Descriptors do we need to call netif_wake_queue ? */ +#define IGB_TX_QUEUE_WAKE 16 +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define AUTO_ALL_MODES 0 +#define IGB_EEPROM_APME 0x0400 + +#ifndef IGB_MASTER_SLAVE +/* Switch to override PHY master/slave setting */ +#define IGB_MASTER_SLAVE e1000_ms_hw_default +#endif + +#define IGB_MNG_VLAN_NONE -1 + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer */ +struct igb_buffer { + struct sk_buff *skb; + dma_addr_t dma; + union { + /* TX */ + struct { + unsigned long time_stamp; + u16 length; + u16 next_to_watch; + unsigned int bytecount; + u16 gso_segs; + u8 tx_flags; + u8 mapped_as_page; + }; + /* RX */ + struct { + struct page *page; + dma_addr_t page_dma; + u16 page_offset; + }; + }; +}; + +struct igb_tx_queue_stats { + u64 packets; + u64 bytes; + u64 restart_queue; + u64 restart_queue2; +}; + +struct igb_rx_queue_stats { + u64 packets; + u64 bytes; + u64 drops; + u64 csum_err; + u64 alloc_failed; +}; + +struct igb_q_vector { + struct igb_adapter *adapter; /* backlink */ + struct igb_ring *rx_ring; + struct igb_ring *tx_ring; + struct napi_struct napi; + + u32 eims_value; + u16 cpu; + + u16 itr_val; + u8 set_itr; + void __iomem *itr_register; + + char name[IFNAMSIZ + 9]; +}; + +struct igb_ring { + struct igb_q_vector *q_vector; /* backlink to q_vector */ + struct net_device *netdev; /* back pointer to net_device */ + struct device *dev; /* device pointer for dma mapping */ + dma_addr_t dma; /* phys address of the ring */ + void *desc; /* descriptor ring memory */ + unsigned int size; /* length of desc. ring in bytes */ + u16 count; /* number of desc. in the ring */ + u16 next_to_use; + u16 next_to_clean; + u8 queue_index; + u8 reg_idx; + void __iomem *head; + void __iomem *tail; + struct igb_buffer *buffer_info; /* array of buffer info structs */ + + unsigned int total_bytes; + unsigned int total_packets; + + u32 flags; + + union { + /* TX */ + struct { + struct igb_tx_queue_stats tx_stats; + struct u64_stats_sync tx_syncp; + struct u64_stats_sync tx_syncp2; + bool detect_tx_hung; + }; + /* RX */ + struct { + struct igb_rx_queue_stats rx_stats; + struct u64_stats_sync rx_syncp; + u32 rx_buffer_len; + }; + }; +}; + +#define IGB_RING_FLAG_RX_CSUM 0x00000001 /* RX CSUM enabled */ +#define IGB_RING_FLAG_RX_SCTP_CSUM 0x00000002 /* SCTP CSUM offload enabled */ + +#define IGB_RING_FLAG_TX_CTX_IDX 0x00000001 /* HW requires context index */ + +#define IGB_ADVTXD_DCMD (E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS) + +#define E1000_RX_DESC_ADV(R, i) \ + (&(((union e1000_adv_rx_desc *)((R).desc))[i])) +#define E1000_TX_DESC_ADV(R, i) \ + (&(((union e1000_adv_tx_desc *)((R).desc))[i])) +#define E1000_TX_CTXTDESC_ADV(R, i) \ + (&(((struct e1000_adv_tx_context_desc *)((R).desc))[i])) + +/* igb_desc_unused - calculate if we have unused descriptors */ +static inline int igb_desc_unused(struct igb_ring *ring) +{ + if (ring->next_to_clean > ring->next_to_use) + return ring->next_to_clean - ring->next_to_use - 1; + + return ring->count + ring->next_to_clean - ring->next_to_use - 1; +} + +/* board specific private data structure */ +struct igb_adapter { + struct timer_list watchdog_timer; + struct timer_list phy_info_timer; + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u16 mng_vlan_id; + u32 bd_number; + u32 wol; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + + /* Interrupt Throttle Rate */ + u32 rx_itr_setting; + u32 tx_itr_setting; + u16 tx_itr; + u16 rx_itr; + + struct work_struct reset_task; + struct work_struct watchdog_task; + bool fc_autoneg; + u8 tx_timeout_factor; + struct timer_list blink_timer; + unsigned long led_status; + + /* TX */ + struct igb_ring *tx_ring[16]; + u32 tx_timeout_count; + + /* RX */ + struct igb_ring *rx_ring[16]; + int num_tx_queues; + int num_rx_queues; + + u32 max_frame_size; + u32 min_frame_size; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + struct cyclecounter cycles; + struct timecounter clock; + struct timecompare compare; + struct hwtstamp_config hwtstamp_config; + + spinlock_t stats64_lock; + struct rtnl_link_stats64 stats64; + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + struct e1000_phy_stats phy_stats; + + u32 test_icr; + struct igb_ring test_tx_ring; + struct igb_ring test_rx_ring; + + int msg_enable; + + unsigned int num_q_vectors; + struct igb_q_vector *q_vector[MAX_Q_VECTORS]; + struct msix_entry *msix_entries; + u32 eims_enable_mask; + u32 eims_other; + + /* to not mess up cache alignment, always add to the bottom */ + unsigned long state; + unsigned int flags; + u32 eeprom_wol; + + struct igb_ring *multi_tx_table[IGB_ABS_MAX_TX_QUEUES]; + u16 tx_ring_count; + u16 rx_ring_count; + unsigned int vfs_allocated_count; + struct vf_data_storage *vf_data; + int vf_rate_link_speed; + u32 rss_queues; + u32 wvbr; +}; + +#define IGB_FLAG_HAS_MSI (1 << 0) +#define IGB_FLAG_DCA_ENABLED (1 << 1) +#define IGB_FLAG_QUAD_PORT_A (1 << 2) +#define IGB_FLAG_QUEUE_PAIRS (1 << 3) +#define IGB_FLAG_DMAC (1 << 4) + +/* DMA Coalescing defines */ +#define IGB_MIN_TXPBSIZE 20408 +#define IGB_TX_BUF_4096 4096 +#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ + +#define IGB_82576_TSYNC_SHIFT 19 +#define IGB_82580_TSYNC_SHIFT 24 +#define IGB_TS_HDR_LEN 16 +enum e1000_state_t { + __IGB_TESTING, + __IGB_RESETTING, + __IGB_DOWN +}; + +enum igb_boards { + board_82575, +}; + +extern char igb_driver_name[]; +extern char igb_driver_version[]; + +extern int igb_up(struct igb_adapter *); +extern void igb_down(struct igb_adapter *); +extern void igb_reinit_locked(struct igb_adapter *); +extern void igb_reset(struct igb_adapter *); +extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8); +extern int igb_setup_tx_resources(struct igb_ring *); +extern int igb_setup_rx_resources(struct igb_ring *); +extern void igb_free_tx_resources(struct igb_ring *); +extern void igb_free_rx_resources(struct igb_ring *); +extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *); +extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); +extern void igb_setup_tctl(struct igb_adapter *); +extern void igb_setup_rctl(struct igb_adapter *); +extern netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, struct igb_ring *); +extern void igb_unmap_and_free_tx_resource(struct igb_ring *, + struct igb_buffer *); +extern void igb_alloc_rx_buffers_adv(struct igb_ring *, int); +extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *); +extern bool igb_has_link(struct igb_adapter *adapter); +extern void igb_set_ethtool_ops(struct net_device *); +extern void igb_power_up_link(struct igb_adapter *); + +static inline s32 igb_reset_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.reset) + return hw->phy.ops.reset(hw); + + return 0; +} + +static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + if (hw->phy.ops.read_reg) + return hw->phy.ops.read_reg(hw, offset, data); + + return 0; +} + +static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + if (hw->phy.ops.write_reg) + return hw->phy.ops.write_reg(hw, offset, data); + + return 0; +} + +static inline s32 igb_get_phy_info(struct e1000_hw *hw) +{ + if (hw->phy.ops.get_phy_info) + return hw->phy.ops.get_phy_info(hw); + + return 0; +} + +#endif /* _IGB_H_ */ diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c new file mode 100644 index 0000000..414b022 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -0,0 +1,2201 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* ethtool support for igb */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "igb.h" + +struct igb_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define IGB_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \ + .stat_offset = offsetof(struct igb_adapter, _stat) \ +} +static const struct igb_stats igb_gstrings_stats[] = { + IGB_STAT("rx_packets", stats.gprc), + IGB_STAT("tx_packets", stats.gptc), + IGB_STAT("rx_bytes", stats.gorc), + IGB_STAT("tx_bytes", stats.gotc), + IGB_STAT("rx_broadcast", stats.bprc), + IGB_STAT("tx_broadcast", stats.bptc), + IGB_STAT("rx_multicast", stats.mprc), + IGB_STAT("tx_multicast", stats.mptc), + IGB_STAT("multicast", stats.mprc), + IGB_STAT("collisions", stats.colc), + IGB_STAT("rx_crc_errors", stats.crcerrs), + IGB_STAT("rx_no_buffer_count", stats.rnbc), + IGB_STAT("rx_missed_errors", stats.mpc), + IGB_STAT("tx_aborted_errors", stats.ecol), + IGB_STAT("tx_carrier_errors", stats.tncrs), + IGB_STAT("tx_window_errors", stats.latecol), + IGB_STAT("tx_abort_late_coll", stats.latecol), + IGB_STAT("tx_deferred_ok", stats.dc), + IGB_STAT("tx_single_coll_ok", stats.scc), + IGB_STAT("tx_multi_coll_ok", stats.mcc), + IGB_STAT("tx_timeout_count", tx_timeout_count), + IGB_STAT("rx_long_length_errors", stats.roc), + IGB_STAT("rx_short_length_errors", stats.ruc), + IGB_STAT("rx_align_errors", stats.algnerrc), + IGB_STAT("tx_tcp_seg_good", stats.tsctc), + IGB_STAT("tx_tcp_seg_failed", stats.tsctfc), + IGB_STAT("rx_flow_control_xon", stats.xonrxc), + IGB_STAT("rx_flow_control_xoff", stats.xoffrxc), + IGB_STAT("tx_flow_control_xon", stats.xontxc), + IGB_STAT("tx_flow_control_xoff", stats.xofftxc), + IGB_STAT("rx_long_byte_count", stats.gorc), + IGB_STAT("tx_dma_out_of_sync", stats.doosync), + IGB_STAT("tx_smbus", stats.mgptc), + IGB_STAT("rx_smbus", stats.mgprc), + IGB_STAT("dropped_smbus", stats.mgpdc), + IGB_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + IGB_STAT("os2bmc_tx_by_host", stats.o2bspc), + IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc), +}; + +#define IGB_NETDEV_STAT(_net_stat) { \ + .stat_string = __stringify(_net_stat), \ + .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \ + .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \ +} +static const struct igb_stats igb_gstrings_net_stats[] = { + IGB_NETDEV_STAT(rx_errors), + IGB_NETDEV_STAT(tx_errors), + IGB_NETDEV_STAT(tx_dropped), + IGB_NETDEV_STAT(rx_length_errors), + IGB_NETDEV_STAT(rx_over_errors), + IGB_NETDEV_STAT(rx_frame_errors), + IGB_NETDEV_STAT(rx_fifo_errors), + IGB_NETDEV_STAT(tx_fifo_errors), + IGB_NETDEV_STAT(tx_heartbeat_errors) +}; + +#define IGB_GLOBAL_STATS_LEN \ + (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)) +#define IGB_NETDEV_STATS_LEN \ + (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats)) +#define IGB_RX_QUEUE_STATS_LEN \ + (sizeof(struct igb_rx_queue_stats) / sizeof(u64)) + +#define IGB_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */ + +#define IGB_QUEUE_STATS_LEN \ + ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \ + IGB_RX_QUEUE_STATS_LEN) + \ + (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \ + IGB_TX_QUEUE_STATS_LEN)) +#define IGB_STATS_LEN \ + (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN) + +static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; +#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN) + +static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 status; + + if (hw->phy.media_type == e1000_media_type_copper) { + + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full| + SUPPORTED_Autoneg | + SUPPORTED_TP); + ecmd->advertising = ADVERTISED_TP; + + if (hw->mac.autoneg == 1) { + ecmd->advertising |= ADVERTISED_Autoneg; + /* the e1000 autoneg seems to match ethtool nicely */ + ecmd->advertising |= hw->phy.autoneg_advertised; + } + + ecmd->port = PORT_TP; + ecmd->phy_address = hw->phy.addr; + } else { + ecmd->supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg); + + ecmd->advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg); + + ecmd->port = PORT_FIBRE; + } + + ecmd->transceiver = XCVR_INTERNAL; + + status = rd32(E1000_STATUS); + + if (status & E1000_STATUS_LU) { + + if ((status & E1000_STATUS_SPEED_1000) || + hw->phy.media_type != e1000_media_type_copper) + ethtool_cmd_speed_set(ecmd, SPEED_1000); + else if (status & E1000_STATUS_SPEED_100) + ethtool_cmd_speed_set(ecmd, SPEED_100); + else + ethtool_cmd_speed_set(ecmd, SPEED_10); + + if ((status & E1000_STATUS_FD) || + hw->phy.media_type != e1000_media_type_copper) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } else { + ethtool_cmd_speed_set(ecmd, -1); + ecmd->duplex = -1; + } + + ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; + return 0; +} + +static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + /* When SoL/IDER sessions are active, autoneg/speed/duplex + * cannot be changed */ + if (igb_check_reset_block(hw)) { + dev_err(&adapter->pdev->dev, "Cannot change link " + "characteristics when SoL/IDER is active.\n"); + return -EINVAL; + } + + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + msleep(1); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = 1; + hw->phy.autoneg_advertised = ecmd->advertising | + ADVERTISED_TP | + ADVERTISED_Autoneg; + ecmd->advertising = hw->phy.autoneg_advertised; + if (adapter->fc_autoneg) + hw->fc.requested_mode = e1000_fc_default; + } else { + u32 speed = ethtool_cmd_speed(ecmd); + if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) { + clear_bit(__IGB_RESETTING, &adapter->state); + return -EINVAL; + } + } + + /* reset the link */ + if (netif_running(adapter->netdev)) { + igb_down(adapter); + igb_up(adapter); + } else + igb_reset(adapter); + + clear_bit(__IGB_RESETTING, &adapter->state); + return 0; +} + +static u32 igb_get_link(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_mac_info *mac = &adapter->hw.mac; + + /* + * If the link is not reported up to netdev, interrupts are disabled, + * and so the physical link state may have changed since we last + * looked. Set get_link_status to make sure that the true link + * state is interrogated, rather than pulling a cached and possibly + * stale link state from the driver. + */ + if (!netif_carrier_ok(netdev)) + mac->get_link_status = 1; + + return igb_has_link(adapter); +} + +static void igb_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc.current_mode == e1000_fc_rx_pause) + pause->rx_pause = 1; + else if (hw->fc.current_mode == e1000_fc_tx_pause) + pause->tx_pause = 1; + else if (hw->fc.current_mode == e1000_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int igb_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int retval = 0; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + msleep(1); + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + hw->fc.requested_mode = e1000_fc_default; + if (netif_running(adapter->netdev)) { + igb_down(adapter); + igb_up(adapter); + } else { + igb_reset(adapter); + } + } else { + if (pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = e1000_fc_full; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = e1000_fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = e1000_fc_tx_pause; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = e1000_fc_none; + + hw->fc.current_mode = hw->fc.requested_mode; + + retval = ((hw->phy.media_type == e1000_media_type_copper) ? + igb_force_mac_fc(hw) : igb_setup_link(hw)); + } + + clear_bit(__IGB_RESETTING, &adapter->state); + return retval; +} + +static u32 igb_get_msglevel(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void igb_set_msglevel(struct net_device *netdev, u32 data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int igb_get_regs_len(struct net_device *netdev) +{ +#define IGB_REGS_LEN 551 + return IGB_REGS_LEN * sizeof(u32); +} + +static void igb_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u8 i; + + memset(p, 0, IGB_REGS_LEN * sizeof(u32)); + + regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; + + /* General Registers */ + regs_buff[0] = rd32(E1000_CTRL); + regs_buff[1] = rd32(E1000_STATUS); + regs_buff[2] = rd32(E1000_CTRL_EXT); + regs_buff[3] = rd32(E1000_MDIC); + regs_buff[4] = rd32(E1000_SCTL); + regs_buff[5] = rd32(E1000_CONNSW); + regs_buff[6] = rd32(E1000_VET); + regs_buff[7] = rd32(E1000_LEDCTL); + regs_buff[8] = rd32(E1000_PBA); + regs_buff[9] = rd32(E1000_PBS); + regs_buff[10] = rd32(E1000_FRTIMER); + regs_buff[11] = rd32(E1000_TCPTIMER); + + /* NVM Register */ + regs_buff[12] = rd32(E1000_EECD); + + /* Interrupt */ + /* Reading EICS for EICR because they read the + * same but EICS does not clear on read */ + regs_buff[13] = rd32(E1000_EICS); + regs_buff[14] = rd32(E1000_EICS); + regs_buff[15] = rd32(E1000_EIMS); + regs_buff[16] = rd32(E1000_EIMC); + regs_buff[17] = rd32(E1000_EIAC); + regs_buff[18] = rd32(E1000_EIAM); + /* Reading ICS for ICR because they read the + * same but ICS does not clear on read */ + regs_buff[19] = rd32(E1000_ICS); + regs_buff[20] = rd32(E1000_ICS); + regs_buff[21] = rd32(E1000_IMS); + regs_buff[22] = rd32(E1000_IMC); + regs_buff[23] = rd32(E1000_IAC); + regs_buff[24] = rd32(E1000_IAM); + regs_buff[25] = rd32(E1000_IMIRVP); + + /* Flow Control */ + regs_buff[26] = rd32(E1000_FCAL); + regs_buff[27] = rd32(E1000_FCAH); + regs_buff[28] = rd32(E1000_FCTTV); + regs_buff[29] = rd32(E1000_FCRTL); + regs_buff[30] = rd32(E1000_FCRTH); + regs_buff[31] = rd32(E1000_FCRTV); + + /* Receive */ + regs_buff[32] = rd32(E1000_RCTL); + regs_buff[33] = rd32(E1000_RXCSUM); + regs_buff[34] = rd32(E1000_RLPML); + regs_buff[35] = rd32(E1000_RFCTL); + regs_buff[36] = rd32(E1000_MRQC); + regs_buff[37] = rd32(E1000_VT_CTL); + + /* Transmit */ + regs_buff[38] = rd32(E1000_TCTL); + regs_buff[39] = rd32(E1000_TCTL_EXT); + regs_buff[40] = rd32(E1000_TIPG); + regs_buff[41] = rd32(E1000_DTXCTL); + + /* Wake Up */ + regs_buff[42] = rd32(E1000_WUC); + regs_buff[43] = rd32(E1000_WUFC); + regs_buff[44] = rd32(E1000_WUS); + regs_buff[45] = rd32(E1000_IPAV); + regs_buff[46] = rd32(E1000_WUPL); + + /* MAC */ + regs_buff[47] = rd32(E1000_PCS_CFG0); + regs_buff[48] = rd32(E1000_PCS_LCTL); + regs_buff[49] = rd32(E1000_PCS_LSTAT); + regs_buff[50] = rd32(E1000_PCS_ANADV); + regs_buff[51] = rd32(E1000_PCS_LPAB); + regs_buff[52] = rd32(E1000_PCS_NPTX); + regs_buff[53] = rd32(E1000_PCS_LPABNP); + + /* Statistics */ + regs_buff[54] = adapter->stats.crcerrs; + regs_buff[55] = adapter->stats.algnerrc; + regs_buff[56] = adapter->stats.symerrs; + regs_buff[57] = adapter->stats.rxerrc; + regs_buff[58] = adapter->stats.mpc; + regs_buff[59] = adapter->stats.scc; + regs_buff[60] = adapter->stats.ecol; + regs_buff[61] = adapter->stats.mcc; + regs_buff[62] = adapter->stats.latecol; + regs_buff[63] = adapter->stats.colc; + regs_buff[64] = adapter->stats.dc; + regs_buff[65] = adapter->stats.tncrs; + regs_buff[66] = adapter->stats.sec; + regs_buff[67] = adapter->stats.htdpmc; + regs_buff[68] = adapter->stats.rlec; + regs_buff[69] = adapter->stats.xonrxc; + regs_buff[70] = adapter->stats.xontxc; + regs_buff[71] = adapter->stats.xoffrxc; + regs_buff[72] = adapter->stats.xofftxc; + regs_buff[73] = adapter->stats.fcruc; + regs_buff[74] = adapter->stats.prc64; + regs_buff[75] = adapter->stats.prc127; + regs_buff[76] = adapter->stats.prc255; + regs_buff[77] = adapter->stats.prc511; + regs_buff[78] = adapter->stats.prc1023; + regs_buff[79] = adapter->stats.prc1522; + regs_buff[80] = adapter->stats.gprc; + regs_buff[81] = adapter->stats.bprc; + regs_buff[82] = adapter->stats.mprc; + regs_buff[83] = adapter->stats.gptc; + regs_buff[84] = adapter->stats.gorc; + regs_buff[86] = adapter->stats.gotc; + regs_buff[88] = adapter->stats.rnbc; + regs_buff[89] = adapter->stats.ruc; + regs_buff[90] = adapter->stats.rfc; + regs_buff[91] = adapter->stats.roc; + regs_buff[92] = adapter->stats.rjc; + regs_buff[93] = adapter->stats.mgprc; + regs_buff[94] = adapter->stats.mgpdc; + regs_buff[95] = adapter->stats.mgptc; + regs_buff[96] = adapter->stats.tor; + regs_buff[98] = adapter->stats.tot; + regs_buff[100] = adapter->stats.tpr; + regs_buff[101] = adapter->stats.tpt; + regs_buff[102] = adapter->stats.ptc64; + regs_buff[103] = adapter->stats.ptc127; + regs_buff[104] = adapter->stats.ptc255; + regs_buff[105] = adapter->stats.ptc511; + regs_buff[106] = adapter->stats.ptc1023; + regs_buff[107] = adapter->stats.ptc1522; + regs_buff[108] = adapter->stats.mptc; + regs_buff[109] = adapter->stats.bptc; + regs_buff[110] = adapter->stats.tsctc; + regs_buff[111] = adapter->stats.iac; + regs_buff[112] = adapter->stats.rpthc; + regs_buff[113] = adapter->stats.hgptc; + regs_buff[114] = adapter->stats.hgorc; + regs_buff[116] = adapter->stats.hgotc; + regs_buff[118] = adapter->stats.lenerrs; + regs_buff[119] = adapter->stats.scvpc; + regs_buff[120] = adapter->stats.hrmpc; + + for (i = 0; i < 4; i++) + regs_buff[121 + i] = rd32(E1000_SRRCTL(i)); + for (i = 0; i < 4; i++) + regs_buff[125 + i] = rd32(E1000_PSRTYPE(i)); + for (i = 0; i < 4; i++) + regs_buff[129 + i] = rd32(E1000_RDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[133 + i] = rd32(E1000_RDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[137 + i] = rd32(E1000_RDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[141 + i] = rd32(E1000_RDH(i)); + for (i = 0; i < 4; i++) + regs_buff[145 + i] = rd32(E1000_RDT(i)); + for (i = 0; i < 4; i++) + regs_buff[149 + i] = rd32(E1000_RXDCTL(i)); + + for (i = 0; i < 10; i++) + regs_buff[153 + i] = rd32(E1000_EITR(i)); + for (i = 0; i < 8; i++) + regs_buff[163 + i] = rd32(E1000_IMIR(i)); + for (i = 0; i < 8; i++) + regs_buff[171 + i] = rd32(E1000_IMIREXT(i)); + for (i = 0; i < 16; i++) + regs_buff[179 + i] = rd32(E1000_RAL(i)); + for (i = 0; i < 16; i++) + regs_buff[195 + i] = rd32(E1000_RAH(i)); + + for (i = 0; i < 4; i++) + regs_buff[211 + i] = rd32(E1000_TDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[215 + i] = rd32(E1000_TDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[219 + i] = rd32(E1000_TDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[223 + i] = rd32(E1000_TDH(i)); + for (i = 0; i < 4; i++) + regs_buff[227 + i] = rd32(E1000_TDT(i)); + for (i = 0; i < 4; i++) + regs_buff[231 + i] = rd32(E1000_TXDCTL(i)); + for (i = 0; i < 4; i++) + regs_buff[235 + i] = rd32(E1000_TDWBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[239 + i] = rd32(E1000_TDWBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[243 + i] = rd32(E1000_DCA_TXCTRL(i)); + + for (i = 0; i < 4; i++) + regs_buff[247 + i] = rd32(E1000_IP4AT_REG(i)); + for (i = 0; i < 4; i++) + regs_buff[251 + i] = rd32(E1000_IP6AT_REG(i)); + for (i = 0; i < 32; i++) + regs_buff[255 + i] = rd32(E1000_WUPM_REG(i)); + for (i = 0; i < 128; i++) + regs_buff[287 + i] = rd32(E1000_FFMT_REG(i)); + for (i = 0; i < 128; i++) + regs_buff[415 + i] = rd32(E1000_FFVT_REG(i)); + for (i = 0; i < 4; i++) + regs_buff[543 + i] = rd32(E1000_FFLT_REG(i)); + + regs_buff[547] = rd32(E1000_TDFH); + regs_buff[548] = rd32(E1000_TDFT); + regs_buff[549] = rd32(E1000_TDFHS); + regs_buff[550] = rd32(E1000_TDFPC); + regs_buff[551] = adapter->stats.o2bgptc; + regs_buff[552] = adapter->stats.b2ospc; + regs_buff[553] = adapter->stats.o2bspc; + regs_buff[554] = adapter->stats.b2ogprc; +} + +static int igb_get_eeprom_len(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + return adapter->hw.nvm.word_size * 2; +} + +static int igb_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word, last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc(sizeof(u16) * + (last_word - first_word + 1), GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->nvm.type == e1000_nvm_eeprom_spi) + ret_val = hw->nvm.ops.read(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = hw->nvm.ops.read(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int igb_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len, first_word, last_word, ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = hw->nvm.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word */ + /* only the second byte of the word is being modified */ + ret_val = hw->nvm.ops.read(hw, first_word, 1, + &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { + /* need read/modify/write of last changed EEPROM word */ + /* only the first byte of the word is being modified */ + ret_val = hw->nvm.ops.read(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); + + ret_val = hw->nvm.ops.write(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + /* Update the checksum over the first part of the EEPROM if needed + * and flush shadow RAM for 82573 controllers */ + if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG))) + hw->nvm.ops.update(hw); + + kfree(eeprom_buff); + return ret_val; +} + +static void igb_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + char firmware_version[32]; + u16 eeprom_data; + + strncpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver) - 1); + strncpy(drvinfo->version, igb_driver_version, + sizeof(drvinfo->version) - 1); + + /* EEPROM image version # is reported as firmware version # for + * 82575 controllers */ + adapter->hw.nvm.ops.read(&adapter->hw, 5, 1, &eeprom_data); + sprintf(firmware_version, "%d.%d-%d", + (eeprom_data & 0xF000) >> 12, + (eeprom_data & 0x0FF0) >> 4, + eeprom_data & 0x000F); + + strncpy(drvinfo->fw_version, firmware_version, + sizeof(drvinfo->fw_version) - 1); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info) - 1); + drvinfo->n_stats = IGB_STATS_LEN; + drvinfo->testinfo_len = IGB_TEST_LEN; + drvinfo->regdump_len = igb_get_regs_len(netdev); + drvinfo->eedump_len = igb_get_eeprom_len(netdev); +} + +static void igb_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = IGB_MAX_RXD; + ring->tx_max_pending = IGB_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int igb_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct igb_ring *temp_ring; + int i, err = 0; + u16 new_rx_count, new_tx_count; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD); + new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD); + new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); + + new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD); + new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD); + new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring_count) && + (new_rx_count == adapter->rx_ring_count)) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + msleep(1); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + if (adapter->num_tx_queues > adapter->num_rx_queues) + temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring)); + else + temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring)); + + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + + igb_down(adapter); + + /* + * We can't just free everything and then setup again, + * because the ISRs in MSI-X mode get passed pointers + * to the tx and rx ring structs. + */ + if (new_tx_count != adapter->tx_ring_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + memcpy(&temp_ring[i], adapter->tx_ring[i], + sizeof(struct igb_ring)); + + temp_ring[i].count = new_tx_count; + err = igb_setup_tx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igb_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + igb_free_tx_resources(adapter->tx_ring[i]); + + memcpy(adapter->tx_ring[i], &temp_ring[i], + sizeof(struct igb_ring)); + } + + adapter->tx_ring_count = new_tx_count; + } + + if (new_rx_count != adapter->rx_ring_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + memcpy(&temp_ring[i], adapter->rx_ring[i], + sizeof(struct igb_ring)); + + temp_ring[i].count = new_rx_count; + err = igb_setup_rx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igb_free_rx_resources(&temp_ring[i]); + } + goto err_setup; + } + + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + igb_free_rx_resources(adapter->rx_ring[i]); + + memcpy(adapter->rx_ring[i], &temp_ring[i], + sizeof(struct igb_ring)); + } + + adapter->rx_ring_count = new_rx_count; + } +err_setup: + igb_up(adapter); + vfree(temp_ring); +clear_reset: + clear_bit(__IGB_RESETTING, &adapter->state); + return err; +} + +/* ethtool register test data */ +struct igb_reg_test { + u16 reg; + u16 reg_offset; + u16 array_len; + u16 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x100 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define WRITE_NO_TEST 3 +#define TABLE32_TEST 4 +#define TABLE64_TEST_LO 5 +#define TABLE64_TEST_HI 6 + +/* i350 reg test */ +static struct igb_reg_test reg_test_i350[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + /* RDH is read-only for i350, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, + 0xC3FFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 16, TABLE64_TEST_HI, + 0xC3FFFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +/* 82580 reg test */ +static struct igb_reg_test reg_test_82580[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + /* RDH is read-only for 82580, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, + 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_HI, + 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +/* 82576 reg test */ +static struct igb_reg_test reg_test_82576[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + /* Enable all RX queues before testing. */ + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, + { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, + /* RDH is read-only for 82576, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, + { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128,TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +/* 82575 register test */ +static struct igb_reg_test reg_test_82575[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + /* Enable all four RX queues before testing. */ + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, + /* RDH is read-only for 82575, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x800FFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data, + int reg, u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + u32 pat, val; + static const u32 _test[] = + {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; + for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { + wr32(reg, (_test[pat] & write)); + val = rd32(reg) & mask; + if (val != (_test[pat] & write & mask)) { + dev_err(&adapter->pdev->dev, "pattern test reg %04X " + "failed: got 0x%08X expected 0x%08X\n", + reg, val, (_test[pat] & write & mask)); + *data = reg; + return 1; + } + } + + return 0; +} + +static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data, + int reg, u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + u32 val; + wr32(reg, write & mask); + val = rd32(reg); + if ((write & mask) != (val & mask)) { + dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:" + " got 0x%08X expected 0x%08X\n", reg, + (val & mask), (write & mask)); + *data = reg; + return 1; + } + + return 0; +} + +#define REG_PATTERN_TEST(reg, mask, write) \ + do { \ + if (reg_pattern_test(adapter, data, reg, mask, write)) \ + return 1; \ + } while (0) + +#define REG_SET_AND_CHECK(reg, mask, write) \ + do { \ + if (reg_set_and_check(adapter, data, reg, mask, write)) \ + return 1; \ + } while (0) + +static int igb_reg_test(struct igb_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + struct igb_reg_test *test; + u32 value, before, after; + u32 i, toggle; + + switch (adapter->hw.mac.type) { + case e1000_i350: + test = reg_test_i350; + toggle = 0x7FEFF3FF; + break; + case e1000_82580: + test = reg_test_82580; + toggle = 0x7FEFF3FF; + break; + case e1000_82576: + test = reg_test_82576; + toggle = 0x7FFFF3FF; + break; + default: + test = reg_test_82575; + toggle = 0x7FFFF3FF; + break; + } + + /* Because the status register is such a special case, + * we handle it separately from the rest of the register + * tests. Some bits are read-only, some toggle, and some + * are writable on newer MACs. + */ + before = rd32(E1000_STATUS); + value = (rd32(E1000_STATUS) & toggle); + wr32(E1000_STATUS, toggle); + after = rd32(E1000_STATUS) & toggle; + if (value != after) { + dev_err(&adapter->pdev->dev, "failed STATUS register test " + "got: 0x%08X expected: 0x%08X\n", after, value); + *data = 1; + return 1; + } + /* restore previous status */ + wr32(E1000_STATUS, before); + + /* Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + switch (test->test_type) { + case PATTERN_TEST: + REG_PATTERN_TEST(test->reg + + (i * test->reg_offset), + test->mask, + test->write); + break; + case SET_READ_TEST: + REG_SET_AND_CHECK(test->reg + + (i * test->reg_offset), + test->mask, + test->write); + break; + case WRITE_NO_TEST: + writel(test->write, + (adapter->hw.hw_addr + test->reg) + + (i * test->reg_offset)); + break; + case TABLE32_TEST: + REG_PATTERN_TEST(test->reg + (i * 4), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + REG_PATTERN_TEST(test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + REG_PATTERN_TEST((test->reg + 4) + (i * 8), + test->mask, + test->write); + break; + } + } + test++; + } + + *data = 0; + return 0; +} + +static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data) +{ + u16 temp; + u16 checksum = 0; + u16 i; + + *data = 0; + /* Read and add up the contents of the EEPROM */ + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp)) < 0) { + *data = 1; + break; + } + checksum += temp; + } + + /* If Checksum is not Correct return error else test passed */ + if ((checksum != (u16) NVM_SUM) && !(*data)) + *data = 2; + + return *data; +} + +static irqreturn_t igb_test_intr(int irq, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *) data; + struct e1000_hw *hw = &adapter->hw; + + adapter->test_icr |= rd32(E1000_ICR); + + return IRQ_HANDLED; +} + +static int igb_intr_test(struct igb_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 mask, ics_mask, i = 0, shared_int = true; + u32 irq = adapter->pdev->irq; + + *data = 0; + + /* Hook up test interrupt handler just for this test */ + if (adapter->msix_entries) { + if (request_irq(adapter->msix_entries[0].vector, + igb_test_intr, 0, netdev->name, adapter)) { + *data = 1; + return -1; + } + } else if (adapter->flags & IGB_FLAG_HAS_MSI) { + shared_int = false; + if (request_irq(irq, + igb_test_intr, 0, netdev->name, adapter)) { + *data = 1; + return -1; + } + } else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED, + netdev->name, adapter)) { + shared_int = false; + } else if (request_irq(irq, igb_test_intr, IRQF_SHARED, + netdev->name, adapter)) { + *data = 1; + return -1; + } + dev_info(&adapter->pdev->dev, "testing %s interrupt\n", + (shared_int ? "shared" : "unshared")); + + /* Disable all the interrupts */ + wr32(E1000_IMC, ~0); + wrfl(); + msleep(10); + + /* Define all writable bits for ICS */ + switch (hw->mac.type) { + case e1000_82575: + ics_mask = 0x37F47EDD; + break; + case e1000_82576: + ics_mask = 0x77D4FBFD; + break; + case e1000_82580: + ics_mask = 0x77DCFED5; + break; + case e1000_i350: + ics_mask = 0x77DCFED5; + break; + default: + ics_mask = 0x7FFFFFFF; + break; + } + + /* Test each interrupt */ + for (; i < 31; i++) { + /* Interrupt to test */ + mask = 1 << i; + + if (!(mask & ics_mask)) + continue; + + if (!shared_int) { + /* Disable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + + /* Flush any pending interrupts */ + wr32(E1000_ICR, ~0); + + wr32(E1000_IMC, mask); + wr32(E1000_ICS, mask); + wrfl(); + msleep(10); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* Enable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was not posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + + /* Flush any pending interrupts */ + wr32(E1000_ICR, ~0); + + wr32(E1000_IMS, mask); + wr32(E1000_ICS, mask); + wrfl(); + msleep(10); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + + /* Flush any pending interrupts */ + wr32(E1000_ICR, ~0); + + wr32(E1000_IMC, ~mask); + wr32(E1000_ICS, ~mask); + wrfl(); + msleep(10); + + if (adapter->test_icr & mask) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + wr32(E1000_IMC, ~0); + wrfl(); + msleep(10); + + /* Unhook test interrupt handler */ + if (adapter->msix_entries) + free_irq(adapter->msix_entries[0].vector, adapter); + else + free_irq(irq, adapter); + + return *data; +} + +static void igb_free_desc_rings(struct igb_adapter *adapter) +{ + igb_free_tx_resources(&adapter->test_tx_ring); + igb_free_rx_resources(&adapter->test_rx_ring); +} + +static int igb_setup_desc_rings(struct igb_adapter *adapter) +{ + struct igb_ring *tx_ring = &adapter->test_tx_ring; + struct igb_ring *rx_ring = &adapter->test_rx_ring; + struct e1000_hw *hw = &adapter->hw; + int ret_val; + + /* Setup Tx descriptor ring and Tx buffers */ + tx_ring->count = IGB_DEFAULT_TXD; + tx_ring->dev = &adapter->pdev->dev; + tx_ring->netdev = adapter->netdev; + tx_ring->reg_idx = adapter->vfs_allocated_count; + + if (igb_setup_tx_resources(tx_ring)) { + ret_val = 1; + goto err_nomem; + } + + igb_setup_tctl(adapter); + igb_configure_tx_ring(adapter, tx_ring); + + /* Setup Rx descriptor ring and Rx buffers */ + rx_ring->count = IGB_DEFAULT_RXD; + rx_ring->dev = &adapter->pdev->dev; + rx_ring->netdev = adapter->netdev; + rx_ring->rx_buffer_len = IGB_RXBUFFER_2048; + rx_ring->reg_idx = adapter->vfs_allocated_count; + + if (igb_setup_rx_resources(rx_ring)) { + ret_val = 3; + goto err_nomem; + } + + /* set the default queue to queue 0 of PF */ + wr32(E1000_MRQC, adapter->vfs_allocated_count << 3); + + /* enable receive ring */ + igb_setup_rctl(adapter); + igb_configure_rx_ring(adapter, rx_ring); + + igb_alloc_rx_buffers_adv(rx_ring, igb_desc_unused(rx_ring)); + + return 0; + +err_nomem: + igb_free_desc_rings(adapter); + return ret_val; +} + +static void igb_phy_disable_receiver(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + igb_write_phy_reg(hw, 29, 0x001F); + igb_write_phy_reg(hw, 30, 0x8FFC); + igb_write_phy_reg(hw, 29, 0x001A); + igb_write_phy_reg(hw, 30, 0x8FF0); +} + +static int igb_integrated_phy_loopback(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg = 0; + + hw->mac.autoneg = false; + + if (hw->phy.type == e1000_phy_m88) { + /* Auto-MDI/MDIX Off */ + igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ + igb_write_phy_reg(hw, PHY_CONTROL, 0x9140); + /* autoneg off */ + igb_write_phy_reg(hw, PHY_CONTROL, 0x8140); + } else if (hw->phy.type == e1000_phy_82580) { + /* enable MII loopback */ + igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041); + } + + ctrl_reg = rd32(E1000_CTRL); + + /* force 1000, set loopback */ + igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = rd32(E1000_CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ + E1000_CTRL_FD | /* Force Duplex to FULL */ + E1000_CTRL_SLU); /* Set link up enable bit */ + + if (hw->phy.type == e1000_phy_m88) + ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ + + wr32(E1000_CTRL, ctrl_reg); + + /* Disable the receiver on the PHY so when a cable is plugged in, the + * PHY does not begin to autoneg when a cable is reconnected to the NIC. + */ + if (hw->phy.type == e1000_phy_m88) + igb_phy_disable_receiver(adapter); + + udelay(500); + + return 0; +} + +static int igb_set_phy_loopback(struct igb_adapter *adapter) +{ + return igb_integrated_phy_loopback(adapter); +} + +static int igb_setup_loopback_test(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 reg; + + reg = rd32(E1000_CTRL_EXT); + + /* use CTRL_EXT to identify link type as SGMII can appear as copper */ + if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) { + if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) { + + /* Enable DH89xxCC MPHY for near end loopback */ + reg = rd32(E1000_MPHY_ADDR_CTL); + reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) | + E1000_MPHY_PCS_CLK_REG_OFFSET; + wr32(E1000_MPHY_ADDR_CTL, reg); + + reg = rd32(E1000_MPHY_DATA); + reg |= E1000_MPHY_PCS_CLK_REG_DIGINELBEN; + wr32(E1000_MPHY_DATA, reg); + } + + reg = rd32(E1000_RCTL); + reg |= E1000_RCTL_LBM_TCVR; + wr32(E1000_RCTL, reg); + + wr32(E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK); + + reg = rd32(E1000_CTRL); + reg &= ~(E1000_CTRL_RFCE | + E1000_CTRL_TFCE | + E1000_CTRL_LRST); + reg |= E1000_CTRL_SLU | + E1000_CTRL_FD; + wr32(E1000_CTRL, reg); + + /* Unset switch control to serdes energy detect */ + reg = rd32(E1000_CONNSW); + reg &= ~E1000_CONNSW_ENRGSRC; + wr32(E1000_CONNSW, reg); + + /* Set PCS register for forced speed */ + reg = rd32(E1000_PCS_LCTL); + reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/ + reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */ + E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */ + E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ + E1000_PCS_LCTL_FSD | /* Force Speed */ + E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ + wr32(E1000_PCS_LCTL, reg); + + return 0; + } + + return igb_set_phy_loopback(adapter); +} + +static void igb_loopback_cleanup(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + u16 phy_reg; + + if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) { + u32 reg; + + /* Disable near end loopback on DH89xxCC */ + reg = rd32(E1000_MPHY_ADDR_CTL); + reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) | + E1000_MPHY_PCS_CLK_REG_OFFSET; + wr32(E1000_MPHY_ADDR_CTL, reg); + + reg = rd32(E1000_MPHY_DATA); + reg &= ~E1000_MPHY_PCS_CLK_REG_DIGINELBEN; + wr32(E1000_MPHY_DATA, reg); + } + + rctl = rd32(E1000_RCTL); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + wr32(E1000_RCTL, rctl); + + hw->mac.autoneg = true; + igb_read_phy_reg(hw, PHY_CONTROL, &phy_reg); + if (phy_reg & MII_CR_LOOPBACK) { + phy_reg &= ~MII_CR_LOOPBACK; + igb_write_phy_reg(hw, PHY_CONTROL, phy_reg); + igb_phy_sw_reset(hw); + } +} + +static void igb_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size /= 2; + memset(&skb->data[frame_size], 0xAA, frame_size - 1); + memset(&skb->data[frame_size + 10], 0xBE, 1); + memset(&skb->data[frame_size + 12], 0xAF, 1); +} + +static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) +{ + frame_size /= 2; + if (*(skb->data + 3) == 0xFF) { + if ((*(skb->data + frame_size + 10) == 0xBE) && + (*(skb->data + frame_size + 12) == 0xAF)) { + return 0; + } + } + return 13; +} + +static int igb_clean_test_rings(struct igb_ring *rx_ring, + struct igb_ring *tx_ring, + unsigned int size) +{ + union e1000_adv_rx_desc *rx_desc; + struct igb_buffer *buffer_info; + int rx_ntc, tx_ntc, count = 0; + u32 staterr; + + /* initialize next to clean and descriptor values */ + rx_ntc = rx_ring->next_to_clean; + tx_ntc = tx_ring->next_to_clean; + rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc); + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + + while (staterr & E1000_RXD_STAT_DD) { + /* check rx buffer */ + buffer_info = &rx_ring->buffer_info[rx_ntc]; + + /* unmap rx buffer, will be remapped by alloc_rx_buffers */ + dma_unmap_single(rx_ring->dev, + buffer_info->dma, + rx_ring->rx_buffer_len, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + + /* verify contents of skb */ + if (!igb_check_lbtest_frame(buffer_info->skb, size)) + count++; + + /* unmap buffer on tx side */ + buffer_info = &tx_ring->buffer_info[tx_ntc]; + igb_unmap_and_free_tx_resource(tx_ring, buffer_info); + + /* increment rx/tx next to clean counters */ + rx_ntc++; + if (rx_ntc == rx_ring->count) + rx_ntc = 0; + tx_ntc++; + if (tx_ntc == tx_ring->count) + tx_ntc = 0; + + /* fetch next descriptor */ + rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc); + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + } + + /* re-map buffers to ring, store next to clean values */ + igb_alloc_rx_buffers_adv(rx_ring, count); + rx_ring->next_to_clean = rx_ntc; + tx_ring->next_to_clean = tx_ntc; + + return count; +} + +static int igb_run_loopback_test(struct igb_adapter *adapter) +{ + struct igb_ring *tx_ring = &adapter->test_tx_ring; + struct igb_ring *rx_ring = &adapter->test_rx_ring; + int i, j, lc, good_cnt, ret_val = 0; + unsigned int size = 1024; + netdev_tx_t tx_ret_val; + struct sk_buff *skb; + + /* allocate test skb */ + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) + return 11; + + /* place data into test skb */ + igb_create_lbtest_frame(skb, size); + skb_put(skb, size); + + /* + * Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rx_ring->count <= tx_ring->count) + lc = ((tx_ring->count / 64) * 2) + 1; + else + lc = ((rx_ring->count / 64) * 2) + 1; + + for (j = 0; j <= lc; j++) { /* loop count loop */ + /* reset count of good packets */ + good_cnt = 0; + + /* place 64 packets on the transmit queue*/ + for (i = 0; i < 64; i++) { + skb_get(skb); + tx_ret_val = igb_xmit_frame_ring_adv(skb, tx_ring); + if (tx_ret_val == NETDEV_TX_OK) + good_cnt++; + } + + if (good_cnt != 64) { + ret_val = 12; + break; + } + + /* allow 200 milliseconds for packets to go from tx to rx */ + msleep(200); + + good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size); + if (good_cnt != 64) { + ret_val = 13; + break; + } + } /* end loop count loop */ + + /* free the original skb */ + kfree_skb(skb); + + return ret_val; +} + +static int igb_loopback_test(struct igb_adapter *adapter, u64 *data) +{ + /* PHY loopback cannot be performed if SoL/IDER + * sessions are active */ + if (igb_check_reset_block(&adapter->hw)) { + dev_err(&adapter->pdev->dev, + "Cannot do PHY loopback test " + "when SoL/IDER is active.\n"); + *data = 0; + goto out; + } + *data = igb_setup_desc_rings(adapter); + if (*data) + goto out; + *data = igb_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + *data = igb_run_loopback_test(adapter); + igb_loopback_cleanup(adapter); + +err_loopback: + igb_free_desc_rings(adapter); +out: + return *data; +} + +static int igb_link_test(struct igb_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + *data = 0; + if (hw->phy.media_type == e1000_media_type_internal_serdes) { + int i = 0; + hw->mac.serdes_has_link = false; + + /* On some blade server designs, link establishment + * could take as long as 2-3 minutes */ + do { + hw->mac.ops.check_for_link(&adapter->hw); + if (hw->mac.serdes_has_link) + return *data; + msleep(20); + } while (i++ < 3750); + + *data = 1; + } else { + hw->mac.ops.check_for_link(&adapter->hw); + if (hw->mac.autoneg) + msleep(4000); + + if (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) + *data = 1; + } + return *data; +} + +static void igb_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + u16 autoneg_advertised; + u8 forced_speed_duplex, autoneg; + bool if_running = netif_running(netdev); + + set_bit(__IGB_TESTING, &adapter->state); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + /* save speed, duplex, autoneg settings */ + autoneg_advertised = adapter->hw.phy.autoneg_advertised; + forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; + autoneg = adapter->hw.mac.autoneg; + + dev_info(&adapter->pdev->dev, "offline testing starting\n"); + + /* power up link for link test */ + igb_power_up_link(adapter); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result */ + if (igb_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + dev_close(netdev); + else + igb_reset(adapter); + + if (igb_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igb_reset(adapter); + if (igb_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igb_reset(adapter); + if (igb_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igb_reset(adapter); + /* power up link for loopback test */ + igb_power_up_link(adapter); + if (igb_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* restore speed, duplex, autoneg settings */ + adapter->hw.phy.autoneg_advertised = autoneg_advertised; + adapter->hw.mac.forced_speed_duplex = forced_speed_duplex; + adapter->hw.mac.autoneg = autoneg; + + /* force this routine to wait until autoneg complete/timeout */ + adapter->hw.phy.autoneg_wait_to_complete = true; + igb_reset(adapter); + adapter->hw.phy.autoneg_wait_to_complete = false; + + clear_bit(__IGB_TESTING, &adapter->state); + if (if_running) + dev_open(netdev); + } else { + dev_info(&adapter->pdev->dev, "online testing starting\n"); + + /* PHY is powered down when interface is down */ + if (if_running && igb_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + else + data[4] = 0; + + /* Online tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__IGB_TESTING, &adapter->state); + } + msleep_interruptible(4 * 1000); +} + +static int igb_wol_exclusion(struct igb_adapter *adapter, + struct ethtool_wolinfo *wol) +{ + struct e1000_hw *hw = &adapter->hw; + int retval = 1; /* fail by default */ + + switch (hw->device_id) { + case E1000_DEV_ID_82575GB_QUAD_COPPER: + /* WoL not supported */ + wol->supported = 0; + break; + case E1000_DEV_ID_82575EB_FIBER_SERDES: + case E1000_DEV_ID_82576_FIBER: + case E1000_DEV_ID_82576_SERDES: + /* Wake events not supported on port B */ + if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) { + wol->supported = 0; + break; + } + /* return success for non excluded adapter ports */ + retval = 0; + break; + case E1000_DEV_ID_82576_QUAD_COPPER: + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: + /* quad port adapters only support WoL on port A */ + if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) { + wol->supported = 0; + break; + } + /* return success for non excluded adapter ports */ + retval = 0; + break; + default: + /* dual port cards only support WoL on port A from now on + * unless it was enabled in the eeprom for port B + * so exclude FUNC_1 ports from having WoL enabled */ + if ((rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) && + !adapter->eeprom_wol) { + wol->supported = 0; + break; + } + + retval = 0; + } + + return retval; +} + +static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC | + WAKE_PHY; + wol->wolopts = 0; + + /* this function will set ->supported = 0 and return 1 if wol is not + * supported by this hardware */ + if (igb_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return; + + /* apply any specific unsupported masks here */ + switch (adapter->hw.device_id) { + default: + break; + } + + if (adapter->wol & E1000_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & E1000_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & E1000_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & E1000_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + if (adapter->wol & E1000_WUFC_LNKC) + wol->wolopts |= WAKE_PHY; +} + +static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + + if (igb_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= E1000_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= E1000_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= E1000_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= E1000_WUFC_MAG; + if (wol->wolopts & WAKE_PHY) + adapter->wol |= E1000_WUFC_LNKC; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +/* bit defines for adapter->led_status */ +#define IGB_LED_ON 0 + +static int igb_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + igb_blink_led(hw); + return 2; + case ETHTOOL_ID_ON: + igb_blink_led(hw); + break; + case ETHTOOL_ID_OFF: + igb_led_off(hw); + break; + case ETHTOOL_ID_INACTIVE: + igb_led_off(hw); + clear_bit(IGB_LED_ON, &adapter->led_status); + igb_cleanup_led(hw); + break; + } + + return 0; +} + +static int igb_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + int i; + + if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || + ((ec->rx_coalesce_usecs > 3) && + (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) || + (ec->rx_coalesce_usecs == 2)) + return -EINVAL; + + if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) || + ((ec->tx_coalesce_usecs > 3) && + (ec->tx_coalesce_usecs < IGB_MIN_ITR_USECS)) || + (ec->tx_coalesce_usecs == 2)) + return -EINVAL; + + if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) + return -EINVAL; + + /* If ITR is disabled, disable DMAC */ + if (ec->rx_coalesce_usecs == 0) { + if (adapter->flags & IGB_FLAG_DMAC) + adapter->flags &= ~IGB_FLAG_DMAC; + } + + /* convert to rate of irq's per second */ + if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + + /* convert to rate of irq's per second */ + if (adapter->flags & IGB_FLAG_QUEUE_PAIRS) + adapter->tx_itr_setting = adapter->rx_itr_setting; + else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3) + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + if (q_vector->rx_ring) + q_vector->itr_val = adapter->rx_itr_setting; + else + q_vector->itr_val = adapter->tx_itr_setting; + if (q_vector->itr_val && q_vector->itr_val <= 3) + q_vector->itr_val = IGB_START_ITR; + q_vector->set_itr = 1; + } + + return 0; +} + +static int igb_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + if (adapter->rx_itr_setting <= 3) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) { + if (adapter->tx_itr_setting <= 3) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + } + + return 0; +} + +static int igb_nway_reset(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) + igb_reinit_locked(adapter); + return 0; +} + +static int igb_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return IGB_STATS_LEN; + case ETH_SS_TEST: + return IGB_TEST_LEN; + default: + return -ENOTSUPP; + } +} + +static void igb_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + unsigned int start; + struct igb_ring *ring; + int i, j; + char *p; + + spin_lock(&adapter->stats64_lock); + igb_update_stats(adapter, net_stats); + + for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { + p = (char *)adapter + igb_gstrings_stats[i].stat_offset; + data[i] = (igb_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < IGB_NETDEV_STATS_LEN; j++, i++) { + p = (char *)net_stats + igb_gstrings_net_stats[j].stat_offset; + data[i] = (igb_gstrings_net_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < adapter->num_tx_queues; j++) { + u64 restart2; + + ring = adapter->tx_ring[j]; + do { + start = u64_stats_fetch_begin_bh(&ring->tx_syncp); + data[i] = ring->tx_stats.packets; + data[i+1] = ring->tx_stats.bytes; + data[i+2] = ring->tx_stats.restart_queue; + } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start)); + do { + start = u64_stats_fetch_begin_bh(&ring->tx_syncp2); + restart2 = ring->tx_stats.restart_queue2; + } while (u64_stats_fetch_retry_bh(&ring->tx_syncp2, start)); + data[i+2] += restart2; + + i += IGB_TX_QUEUE_STATS_LEN; + } + for (j = 0; j < adapter->num_rx_queues; j++) { + ring = adapter->rx_ring[j]; + do { + start = u64_stats_fetch_begin_bh(&ring->rx_syncp); + data[i] = ring->rx_stats.packets; + data[i+1] = ring->rx_stats.bytes; + data[i+2] = ring->rx_stats.drops; + data[i+3] = ring->rx_stats.csum_err; + data[i+4] = ring->rx_stats.alloc_failed; + } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start)); + i += IGB_RX_QUEUE_STATS_LEN; + } + spin_unlock(&adapter->stats64_lock); +} + +static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *igb_gstrings_test, + IGB_TEST_LEN*ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { + memcpy(p, igb_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) { + memcpy(p, igb_gstrings_net_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_tx_queues; i++) { + sprintf(p, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_restart", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_rx_queues; i++) { + sprintf(p, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_drops", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_csum_err", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_alloc_failed", i); + p += ETH_GSTRING_LEN; + } +/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ + break; + } +} + +static const struct ethtool_ops igb_ethtool_ops = { + .get_settings = igb_get_settings, + .set_settings = igb_set_settings, + .get_drvinfo = igb_get_drvinfo, + .get_regs_len = igb_get_regs_len, + .get_regs = igb_get_regs, + .get_wol = igb_get_wol, + .set_wol = igb_set_wol, + .get_msglevel = igb_get_msglevel, + .set_msglevel = igb_set_msglevel, + .nway_reset = igb_nway_reset, + .get_link = igb_get_link, + .get_eeprom_len = igb_get_eeprom_len, + .get_eeprom = igb_get_eeprom, + .set_eeprom = igb_set_eeprom, + .get_ringparam = igb_get_ringparam, + .set_ringparam = igb_set_ringparam, + .get_pauseparam = igb_get_pauseparam, + .set_pauseparam = igb_set_pauseparam, + .self_test = igb_diag_test, + .get_strings = igb_get_strings, + .set_phys_id = igb_set_phys_id, + .get_sset_count = igb_get_sset_count, + .get_ethtool_stats = igb_get_ethtool_stats, + .get_coalesce = igb_get_coalesce, + .set_coalesce = igb_set_coalesce, +}; + +void igb_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops); +} diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c new file mode 100644 index 0000000..40d4c40 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -0,0 +1,6890 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_IGB_DCA +#include +#endif +#include "igb.h" + +#define MAJ 3 +#define MIN 0 +#define BUILD 6 +#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ +__stringify(BUILD) "-k" +char igb_driver_name[] = "igb"; +char igb_driver_version[] = DRV_VERSION; +static const char igb_driver_string[] = + "Intel(R) Gigabit Ethernet Network Driver"; +static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation."; + +static const struct e1000_info *igb_info_tbl[] = { + [board_82575] = &e1000_82575_info, +}; + +static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = { + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 }, + /* required last entry */ + {0, } +}; + +MODULE_DEVICE_TABLE(pci, igb_pci_tbl); + +void igb_reset(struct igb_adapter *); +static int igb_setup_all_tx_resources(struct igb_adapter *); +static int igb_setup_all_rx_resources(struct igb_adapter *); +static void igb_free_all_tx_resources(struct igb_adapter *); +static void igb_free_all_rx_resources(struct igb_adapter *); +static void igb_setup_mrqc(struct igb_adapter *); +static int igb_probe(struct pci_dev *, const struct pci_device_id *); +static void __devexit igb_remove(struct pci_dev *pdev); +static void igb_init_hw_timer(struct igb_adapter *adapter); +static int igb_sw_init(struct igb_adapter *); +static int igb_open(struct net_device *); +static int igb_close(struct net_device *); +static void igb_configure_tx(struct igb_adapter *); +static void igb_configure_rx(struct igb_adapter *); +static void igb_clean_all_tx_rings(struct igb_adapter *); +static void igb_clean_all_rx_rings(struct igb_adapter *); +static void igb_clean_tx_ring(struct igb_ring *); +static void igb_clean_rx_ring(struct igb_ring *); +static void igb_set_rx_mode(struct net_device *); +static void igb_update_phy_info(unsigned long); +static void igb_watchdog(unsigned long); +static void igb_watchdog_task(struct work_struct *); +static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *); +static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats); +static int igb_change_mtu(struct net_device *, int); +static int igb_set_mac(struct net_device *, void *); +static void igb_set_uta(struct igb_adapter *adapter); +static irqreturn_t igb_intr(int irq, void *); +static irqreturn_t igb_intr_msi(int irq, void *); +static irqreturn_t igb_msix_other(int irq, void *); +static irqreturn_t igb_msix_ring(int irq, void *); +#ifdef CONFIG_IGB_DCA +static void igb_update_dca(struct igb_q_vector *); +static void igb_setup_dca(struct igb_adapter *); +#endif /* CONFIG_IGB_DCA */ +static bool igb_clean_tx_irq(struct igb_q_vector *); +static int igb_poll(struct napi_struct *, int); +static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int); +static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); +static void igb_tx_timeout(struct net_device *); +static void igb_reset_task(struct work_struct *); +static void igb_vlan_mode(struct net_device *netdev, u32 features); +static void igb_vlan_rx_add_vid(struct net_device *, u16); +static void igb_vlan_rx_kill_vid(struct net_device *, u16); +static void igb_restore_vlan(struct igb_adapter *); +static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8); +static void igb_ping_all_vfs(struct igb_adapter *); +static void igb_msg_task(struct igb_adapter *); +static void igb_vmm_control(struct igb_adapter *); +static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *); +static void igb_restore_vf_multicasts(struct igb_adapter *adapter); +static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac); +static int igb_ndo_set_vf_vlan(struct net_device *netdev, + int vf, u16 vlan, u8 qos); +static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); +static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, + struct ifla_vf_info *ivi); +static void igb_check_vf_rate_limit(struct igb_adapter *); + +#ifdef CONFIG_PM +static int igb_suspend(struct pci_dev *, pm_message_t); +static int igb_resume(struct pci_dev *); +#endif +static void igb_shutdown(struct pci_dev *); +#ifdef CONFIG_IGB_DCA +static int igb_notify_dca(struct notifier_block *, unsigned long, void *); +static struct notifier_block dca_notifier = { + .notifier_call = igb_notify_dca, + .next = NULL, + .priority = 0 +}; +#endif +#ifdef CONFIG_NET_POLL_CONTROLLER +/* for netdump / net console */ +static void igb_netpoll(struct net_device *); +#endif +#ifdef CONFIG_PCI_IOV +static unsigned int max_vfs = 0; +module_param(max_vfs, uint, 0); +MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate " + "per physical function"); +#endif /* CONFIG_PCI_IOV */ + +static pci_ers_result_t igb_io_error_detected(struct pci_dev *, + pci_channel_state_t); +static pci_ers_result_t igb_io_slot_reset(struct pci_dev *); +static void igb_io_resume(struct pci_dev *); + +static struct pci_error_handlers igb_err_handler = { + .error_detected = igb_io_error_detected, + .slot_reset = igb_io_slot_reset, + .resume = igb_io_resume, +}; + + +static struct pci_driver igb_driver = { + .name = igb_driver_name, + .id_table = igb_pci_tbl, + .probe = igb_probe, + .remove = __devexit_p(igb_remove), +#ifdef CONFIG_PM + /* Power Management Hooks */ + .suspend = igb_suspend, + .resume = igb_resume, +#endif + .shutdown = igb_shutdown, + .err_handler = &igb_err_handler +}; + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +struct igb_reg_info { + u32 ofs; + char *name; +}; + +static const struct igb_reg_info igb_reg_info_tbl[] = { + + /* General Registers */ + {E1000_CTRL, "CTRL"}, + {E1000_STATUS, "STATUS"}, + {E1000_CTRL_EXT, "CTRL_EXT"}, + + /* Interrupt Registers */ + {E1000_ICR, "ICR"}, + + /* RX Registers */ + {E1000_RCTL, "RCTL"}, + {E1000_RDLEN(0), "RDLEN"}, + {E1000_RDH(0), "RDH"}, + {E1000_RDT(0), "RDT"}, + {E1000_RXDCTL(0), "RXDCTL"}, + {E1000_RDBAL(0), "RDBAL"}, + {E1000_RDBAH(0), "RDBAH"}, + + /* TX Registers */ + {E1000_TCTL, "TCTL"}, + {E1000_TDBAL(0), "TDBAL"}, + {E1000_TDBAH(0), "TDBAH"}, + {E1000_TDLEN(0), "TDLEN"}, + {E1000_TDH(0), "TDH"}, + {E1000_TDT(0), "TDT"}, + {E1000_TXDCTL(0), "TXDCTL"}, + {E1000_TDFH, "TDFH"}, + {E1000_TDFT, "TDFT"}, + {E1000_TDFHS, "TDFHS"}, + {E1000_TDFPC, "TDFPC"}, + + /* List Terminator */ + {} +}; + +/* + * igb_regdump - register printout routine + */ +static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo) +{ + int n = 0; + char rname[16]; + u32 regs[8]; + + switch (reginfo->ofs) { + case E1000_RDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDLEN(n)); + break; + case E1000_RDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDH(n)); + break; + case E1000_RDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDT(n)); + break; + case E1000_RXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RXDCTL(n)); + break; + case E1000_RDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDBAL(n)); + break; + case E1000_RDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDBAH(n)); + break; + case E1000_TDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDBAL(n)); + break; + case E1000_TDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDBAH(n)); + break; + case E1000_TDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDLEN(n)); + break; + case E1000_TDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDH(n)); + break; + case E1000_TDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDT(n)); + break; + case E1000_TXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TXDCTL(n)); + break; + default: + printk(KERN_INFO "%-15s %08x\n", + reginfo->name, rd32(reginfo->ofs)); + return; + } + + snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]"); + printk(KERN_INFO "%-15s ", rname); + for (n = 0; n < 4; n++) + printk(KERN_CONT "%08x ", regs[n]); + printk(KERN_CONT "\n"); +} + +/* + * igb_dump - Print registers, tx-rings and rx-rings + */ +static void igb_dump(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct igb_reg_info *reginfo; + int n = 0; + struct igb_ring *tx_ring; + union e1000_adv_tx_desc *tx_desc; + struct my_u0 { u64 a; u64 b; } *u0; + struct igb_buffer *buffer_info; + struct igb_ring *rx_ring; + union e1000_adv_rx_desc *rx_desc; + u32 staterr; + int i = 0; + + if (!netif_msg_hw(adapter)) + return; + + /* Print netdevice Info */ + if (netdev) { + dev_info(&adapter->pdev->dev, "Net device Info\n"); + printk(KERN_INFO "Device Name state " + "trans_start last_rx\n"); + printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", + netdev->name, + netdev->state, + netdev->trans_start, + netdev->last_rx); + } + + /* Print Registers */ + dev_info(&adapter->pdev->dev, "Register Dump\n"); + printk(KERN_INFO " Register Name Value\n"); + for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl; + reginfo->name; reginfo++) { + igb_regdump(hw, reginfo); + } + + /* Print TX Ring Summary */ + if (!netdev || !netif_running(netdev)) + goto exit; + + dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); + printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]" + " leng ntw timestamp\n"); + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; + printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", + n, tx_ring->next_to_use, tx_ring->next_to_clean, + (u64)buffer_info->dma, + buffer_info->length, + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp); + } + + /* Print TX Rings */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); + + /* Transmit Descriptor Formats + * + * Advanced Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +--------------------------------------------------------------+ + * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN | + * +--------------------------------------------------------------+ + * 63 46 45 40 39 38 36 35 32 31 24 15 0 + */ + + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + printk(KERN_INFO "------------------------------------\n"); + printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index); + printk(KERN_INFO "------------------------------------\n"); + printk(KERN_INFO "T [desc] [address 63:0 ] " + "[PlPOCIStDDM Ln] [bi->dma ] " + "leng ntw timestamp bi->skb\n"); + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX" + " %04X %3X %016llX %p", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)buffer_info->dma, + buffer_info->length, + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp, + buffer_info->skb); + if (i == tx_ring->next_to_use && + i == tx_ring->next_to_clean) + printk(KERN_CONT " NTC/U\n"); + else if (i == tx_ring->next_to_use) + printk(KERN_CONT " NTU\n"); + else if (i == tx_ring->next_to_clean) + printk(KERN_CONT " NTC\n"); + else + printk(KERN_CONT "\n"); + + if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, phys_to_virt(buffer_info->dma), + buffer_info->length, true); + } + } + + /* Print RX Rings Summary */ +rx_ring_summary: + dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); + printk(KERN_INFO "Queue [NTU] [NTC]\n"); + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + printk(KERN_INFO " %5d %5X %5X\n", n, + rx_ring->next_to_use, rx_ring->next_to_clean); + } + + /* Print RX Rings */ + if (!netif_msg_rx_status(adapter)) + goto exit; + + dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); + + /* Advanced Receive Descriptor (Read) Format + * 63 1 0 + * +-----------------------------------------------------+ + * 0 | Packet Buffer Address [63:1] |A0/NSE| + * +----------------------------------------------+------+ + * 8 | Header Buffer Address [63:1] | DD | + * +-----------------------------------------------------+ + * + * + * Advanced Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 30 21 20 17 16 4 3 0 + * +------------------------------------------------------+ + * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | + * | Checksum Ident | | | | Type | Type | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + printk(KERN_INFO "------------------------------------\n"); + printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index); + printk(KERN_INFO "------------------------------------\n"); + printk(KERN_INFO "R [desc] [ PktBuf A0] " + "[ HeadBuf DD] [bi->dma ] [bi->skb] " + "<-- Adv Rx Read format\n"); + printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] " + "[vl er S cks ln] ---------------- [bi->skb] " + "<-- Adv Rx Write-Back format\n"); + + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); + u0 = (struct my_u0 *)rx_desc; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + if (staterr & E1000_RXD_STAT_DD) { + /* Descriptor Done */ + printk(KERN_INFO "RWB[0x%03X] %016llX " + "%016llX ---------------- %p", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + buffer_info->skb); + } else { + printk(KERN_INFO "R [0x%03X] %016llX " + "%016llX %016llX %p", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)buffer_info->dma, + buffer_info->skb); + + if (netif_msg_pktdata(adapter)) { + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, + phys_to_virt(buffer_info->dma), + rx_ring->rx_buffer_len, true); + if (rx_ring->rx_buffer_len + < IGB_RXBUFFER_1024) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, + phys_to_virt( + buffer_info->page_dma + + buffer_info->page_offset), + PAGE_SIZE/2, true); + } + } + + if (i == rx_ring->next_to_use) + printk(KERN_CONT " NTU\n"); + else if (i == rx_ring->next_to_clean) + printk(KERN_CONT " NTC\n"); + else + printk(KERN_CONT "\n"); + + } + } + +exit: + return; +} + + +/** + * igb_read_clock - read raw cycle counter (to be used by time counter) + */ +static cycle_t igb_read_clock(const struct cyclecounter *tc) +{ + struct igb_adapter *adapter = + container_of(tc, struct igb_adapter, cycles); + struct e1000_hw *hw = &adapter->hw; + u64 stamp = 0; + int shift = 0; + + /* + * The timestamp latches on lowest register read. For the 82580 + * the lowest register is SYSTIMR instead of SYSTIML. However we never + * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it. + */ + if (hw->mac.type == e1000_82580) { + stamp = rd32(E1000_SYSTIMR) >> 8; + shift = IGB_82580_TSYNC_SHIFT; + } + + stamp |= (u64)rd32(E1000_SYSTIML) << shift; + stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32); + return stamp; +} + +/** + * igb_get_hw_dev - return device + * used by hardware layer to print debugging information + **/ +struct net_device *igb_get_hw_dev(struct e1000_hw *hw) +{ + struct igb_adapter *adapter = hw->back; + return adapter->netdev; +} + +/** + * igb_init_module - Driver Registration Routine + * + * igb_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init igb_init_module(void) +{ + int ret; + printk(KERN_INFO "%s - version %s\n", + igb_driver_string, igb_driver_version); + + printk(KERN_INFO "%s\n", igb_copyright); + +#ifdef CONFIG_IGB_DCA + dca_register_notify(&dca_notifier); +#endif + ret = pci_register_driver(&igb_driver); + return ret; +} + +module_init(igb_init_module); + +/** + * igb_exit_module - Driver Exit Cleanup Routine + * + * igb_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit igb_exit_module(void) +{ +#ifdef CONFIG_IGB_DCA + dca_unregister_notify(&dca_notifier); +#endif + pci_unregister_driver(&igb_driver); +} + +module_exit(igb_exit_module); + +#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1)) +/** + * igb_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + **/ +static void igb_cache_ring_register(struct igb_adapter *adapter) +{ + int i = 0, j = 0; + u32 rbase_offset = adapter->vfs_allocated_count; + + switch (adapter->hw.mac.type) { + case e1000_82576: + /* The queues are allocated for virtualization such that VF 0 + * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc. + * In order to avoid collision we start at the first free queue + * and continue consuming queues in the same sequence + */ + if (adapter->vfs_allocated_count) { + for (; i < adapter->rss_queues; i++) + adapter->rx_ring[i]->reg_idx = rbase_offset + + Q_IDX_82576(i); + } + case e1000_82575: + case e1000_82580: + case e1000_i350: + default: + for (; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = rbase_offset + i; + for (; j < adapter->num_tx_queues; j++) + adapter->tx_ring[j]->reg_idx = rbase_offset + j; + break; + } +} + +static void igb_free_queues(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + kfree(adapter->tx_ring[i]); + adapter->tx_ring[i] = NULL; + } + for (i = 0; i < adapter->num_rx_queues; i++) { + kfree(adapter->rx_ring[i]); + adapter->rx_ring[i] = NULL; + } + adapter->num_rx_queues = 0; + adapter->num_tx_queues = 0; +} + +/** + * igb_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + * We allocate one ring per queue at run-time since we don't know the + * number of queues at compile-time. + **/ +static int igb_alloc_queues(struct igb_adapter *adapter) +{ + struct igb_ring *ring; + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL); + if (!ring) + goto err; + ring->count = adapter->tx_ring_count; + ring->queue_index = i; + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + /* For 82575, context index must be unique per ring. */ + if (adapter->hw.mac.type == e1000_82575) + ring->flags = IGB_RING_FLAG_TX_CTX_IDX; + adapter->tx_ring[i] = ring; + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL); + if (!ring) + goto err; + ring->count = adapter->rx_ring_count; + ring->queue_index = i; + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */ + /* set flag indicating ring supports SCTP checksum offload */ + if (adapter->hw.mac.type >= e1000_82576) + ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM; + adapter->rx_ring[i] = ring; + } + + igb_cache_ring_register(adapter); + + return 0; + +err: + igb_free_queues(adapter); + + return -ENOMEM; +} + +#define IGB_N0_QUEUE -1 +static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) +{ + u32 msixbm = 0; + struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; + u32 ivar, index; + int rx_queue = IGB_N0_QUEUE; + int tx_queue = IGB_N0_QUEUE; + + if (q_vector->rx_ring) + rx_queue = q_vector->rx_ring->reg_idx; + if (q_vector->tx_ring) + tx_queue = q_vector->tx_ring->reg_idx; + + switch (hw->mac.type) { + case e1000_82575: + /* The 82575 assigns vectors using a bitmask, which matches the + bitmask for the EICR/EIMS/EIMC registers. To assign one + or more queues to a vector, we write the appropriate bits + into the MSIXBM register for that vector. */ + if (rx_queue > IGB_N0_QUEUE) + msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; + if (tx_queue > IGB_N0_QUEUE) + msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; + if (!adapter->msix_entries && msix_vector == 0) + msixbm |= E1000_EIMS_OTHER; + array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); + q_vector->eims_value = msixbm; + break; + case e1000_82576: + /* 82576 uses a table-based method for assigning vectors. + Each queue has a single entry in the table to which we write + a vector number along with a "valid" bit. Sadly, the layout + of the table is somewhat counterintuitive. */ + if (rx_queue > IGB_N0_QUEUE) { + index = (rx_queue & 0x7); + ivar = array_rd32(E1000_IVAR0, index); + if (rx_queue < 8) { + /* vector goes into low byte of register */ + ivar = ivar & 0xFFFFFF00; + ivar |= msix_vector | E1000_IVAR_VALID; + } else { + /* vector goes into third byte of register */ + ivar = ivar & 0xFF00FFFF; + ivar |= (msix_vector | E1000_IVAR_VALID) << 16; + } + array_wr32(E1000_IVAR0, index, ivar); + } + if (tx_queue > IGB_N0_QUEUE) { + index = (tx_queue & 0x7); + ivar = array_rd32(E1000_IVAR0, index); + if (tx_queue < 8) { + /* vector goes into second byte of register */ + ivar = ivar & 0xFFFF00FF; + ivar |= (msix_vector | E1000_IVAR_VALID) << 8; + } else { + /* vector goes into high byte of register */ + ivar = ivar & 0x00FFFFFF; + ivar |= (msix_vector | E1000_IVAR_VALID) << 24; + } + array_wr32(E1000_IVAR0, index, ivar); + } + q_vector->eims_value = 1 << msix_vector; + break; + case e1000_82580: + case e1000_i350: + /* 82580 uses the same table-based approach as 82576 but has fewer + entries as a result we carry over for queues greater than 4. */ + if (rx_queue > IGB_N0_QUEUE) { + index = (rx_queue >> 1); + ivar = array_rd32(E1000_IVAR0, index); + if (rx_queue & 0x1) { + /* vector goes into third byte of register */ + ivar = ivar & 0xFF00FFFF; + ivar |= (msix_vector | E1000_IVAR_VALID) << 16; + } else { + /* vector goes into low byte of register */ + ivar = ivar & 0xFFFFFF00; + ivar |= msix_vector | E1000_IVAR_VALID; + } + array_wr32(E1000_IVAR0, index, ivar); + } + if (tx_queue > IGB_N0_QUEUE) { + index = (tx_queue >> 1); + ivar = array_rd32(E1000_IVAR0, index); + if (tx_queue & 0x1) { + /* vector goes into high byte of register */ + ivar = ivar & 0x00FFFFFF; + ivar |= (msix_vector | E1000_IVAR_VALID) << 24; + } else { + /* vector goes into second byte of register */ + ivar = ivar & 0xFFFF00FF; + ivar |= (msix_vector | E1000_IVAR_VALID) << 8; + } + array_wr32(E1000_IVAR0, index, ivar); + } + q_vector->eims_value = 1 << msix_vector; + break; + default: + BUG(); + break; + } + + /* add q_vector eims value to global eims_enable_mask */ + adapter->eims_enable_mask |= q_vector->eims_value; + + /* configure q_vector to set itr on first interrupt */ + q_vector->set_itr = 1; +} + +/** + * igb_configure_msix - Configure MSI-X hardware + * + * igb_configure_msix sets up the hardware to properly + * generate MSI-X interrupts. + **/ +static void igb_configure_msix(struct igb_adapter *adapter) +{ + u32 tmp; + int i, vector = 0; + struct e1000_hw *hw = &adapter->hw; + + adapter->eims_enable_mask = 0; + + /* set vector for other causes, i.e. link changes */ + switch (hw->mac.type) { + case e1000_82575: + tmp = rd32(E1000_CTRL_EXT); + /* enable MSI-X PBA support*/ + tmp |= E1000_CTRL_EXT_PBA_CLR; + + /* Auto-Mask interrupts upon ICR read. */ + tmp |= E1000_CTRL_EXT_EIAME; + tmp |= E1000_CTRL_EXT_IRCA; + + wr32(E1000_CTRL_EXT, tmp); + + /* enable msix_other interrupt */ + array_wr32(E1000_MSIXBM(0), vector++, + E1000_EIMS_OTHER); + adapter->eims_other = E1000_EIMS_OTHER; + + break; + + case e1000_82576: + case e1000_82580: + case e1000_i350: + /* Turn on MSI-X capability first, or our settings + * won't stick. And it will take days to debug. */ + wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | + E1000_GPIE_PBA | E1000_GPIE_EIAME | + E1000_GPIE_NSICR); + + /* enable msix_other interrupt */ + adapter->eims_other = 1 << vector; + tmp = (vector++ | E1000_IVAR_VALID) << 8; + + wr32(E1000_IVAR_MISC, tmp); + break; + default: + /* do nothing, since nothing else supports MSI-X */ + break; + } /* switch (hw->mac.type) */ + + adapter->eims_enable_mask |= adapter->eims_other; + + for (i = 0; i < adapter->num_q_vectors; i++) + igb_assign_vector(adapter->q_vector[i], vector++); + + wrfl(); +} + +/** + * igb_request_msix - Initialize MSI-X interrupts + * + * igb_request_msix allocates MSI-X vectors and requests interrupts from the + * kernel. + **/ +static int igb_request_msix(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + int i, err = 0, vector = 0; + + err = request_irq(adapter->msix_entries[vector].vector, + igb_msix_other, 0, netdev->name, adapter); + if (err) + goto out; + vector++; + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + + q_vector->itr_register = hw->hw_addr + E1000_EITR(vector); + + if (q_vector->rx_ring && q_vector->tx_ring) + sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, + q_vector->rx_ring->queue_index); + else if (q_vector->tx_ring) + sprintf(q_vector->name, "%s-tx-%u", netdev->name, + q_vector->tx_ring->queue_index); + else if (q_vector->rx_ring) + sprintf(q_vector->name, "%s-rx-%u", netdev->name, + q_vector->rx_ring->queue_index); + else + sprintf(q_vector->name, "%s-unused", netdev->name); + + err = request_irq(adapter->msix_entries[vector].vector, + igb_msix_ring, 0, q_vector->name, + q_vector); + if (err) + goto out; + vector++; + } + + igb_configure_msix(adapter); + return 0; +out: + return err; +} + +static void igb_reset_interrupt_capability(struct igb_adapter *adapter) +{ + if (adapter->msix_entries) { + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & IGB_FLAG_HAS_MSI) { + pci_disable_msi(adapter->pdev); + } +} + +/** + * igb_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void igb_free_q_vectors(struct igb_adapter *adapter) +{ + int v_idx; + + for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { + struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; + adapter->q_vector[v_idx] = NULL; + if (!q_vector) + continue; + netif_napi_del(&q_vector->napi); + kfree(q_vector); + } + adapter->num_q_vectors = 0; +} + +/** + * igb_clear_interrupt_scheme - reset the device to a state of no interrupts + * + * This function resets the device so that it has 0 rx queues, tx queues, and + * MSI-X interrupts allocated. + */ +static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) +{ + igb_free_queues(adapter); + igb_free_q_vectors(adapter); + igb_reset_interrupt_capability(adapter); +} + +/** + * igb_set_interrupt_capability - set MSI or MSI-X if supported + * + * Attempt to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int igb_set_interrupt_capability(struct igb_adapter *adapter) +{ + int err; + int numvecs, i; + + /* Number of supported queues. */ + adapter->num_rx_queues = adapter->rss_queues; + if (adapter->vfs_allocated_count) + adapter->num_tx_queues = 1; + else + adapter->num_tx_queues = adapter->rss_queues; + + /* start with one vector for every rx queue */ + numvecs = adapter->num_rx_queues; + + /* if tx handler is separate add 1 for every tx queue */ + if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) + numvecs += adapter->num_tx_queues; + + /* store the number of vectors reserved for queues */ + adapter->num_q_vectors = numvecs; + + /* add 1 vector for link status interrupts */ + numvecs++; + adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), + GFP_KERNEL); + if (!adapter->msix_entries) + goto msi_only; + + for (i = 0; i < numvecs; i++) + adapter->msix_entries[i].entry = i; + + err = pci_enable_msix(adapter->pdev, + adapter->msix_entries, + numvecs); + if (err == 0) + goto out; + + igb_reset_interrupt_capability(adapter); + + /* If we can't do MSI-X, try MSI */ +msi_only: +#ifdef CONFIG_PCI_IOV + /* disable SR-IOV for non MSI-X configurations */ + if (adapter->vf_data) { + struct e1000_hw *hw = &adapter->hw; + /* disable iov and allow time for transactions to clear */ + pci_disable_sriov(adapter->pdev); + msleep(500); + + kfree(adapter->vf_data); + adapter->vf_data = NULL; + wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); + wrfl(); + msleep(100); + dev_info(&adapter->pdev->dev, "IOV Disabled\n"); + } +#endif + adapter->vfs_allocated_count = 0; + adapter->rss_queues = 1; + adapter->flags |= IGB_FLAG_QUEUE_PAIRS; + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + adapter->num_q_vectors = 1; + if (!pci_enable_msi(adapter->pdev)) + adapter->flags |= IGB_FLAG_HAS_MSI; +out: + /* Notify the stack of the (possibly) reduced queue counts. */ + netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); + return netif_set_real_num_rx_queues(adapter->netdev, + adapter->num_rx_queues); +} + +/** + * igb_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int igb_alloc_q_vectors(struct igb_adapter *adapter) +{ + struct igb_q_vector *q_vector; + struct e1000_hw *hw = &adapter->hw; + int v_idx; + + for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { + q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL); + if (!q_vector) + goto err_out; + q_vector->adapter = adapter; + q_vector->itr_register = hw->hw_addr + E1000_EITR(0); + q_vector->itr_val = IGB_START_ITR; + netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64); + adapter->q_vector[v_idx] = q_vector; + } + return 0; + +err_out: + igb_free_q_vectors(adapter); + return -ENOMEM; +} + +static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter, + int ring_idx, int v_idx) +{ + struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; + + q_vector->rx_ring = adapter->rx_ring[ring_idx]; + q_vector->rx_ring->q_vector = q_vector; + q_vector->itr_val = adapter->rx_itr_setting; + if (q_vector->itr_val && q_vector->itr_val <= 3) + q_vector->itr_val = IGB_START_ITR; +} + +static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter, + int ring_idx, int v_idx) +{ + struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; + + q_vector->tx_ring = adapter->tx_ring[ring_idx]; + q_vector->tx_ring->q_vector = q_vector; + q_vector->itr_val = adapter->tx_itr_setting; + if (q_vector->itr_val && q_vector->itr_val <= 3) + q_vector->itr_val = IGB_START_ITR; +} + +/** + * igb_map_ring_to_vector - maps allocated queues to vectors + * + * This function maps the recently allocated queues to vectors. + **/ +static int igb_map_ring_to_vector(struct igb_adapter *adapter) +{ + int i; + int v_idx = 0; + + if ((adapter->num_q_vectors < adapter->num_rx_queues) || + (adapter->num_q_vectors < adapter->num_tx_queues)) + return -ENOMEM; + + if (adapter->num_q_vectors >= + (adapter->num_rx_queues + adapter->num_tx_queues)) { + for (i = 0; i < adapter->num_rx_queues; i++) + igb_map_rx_ring_to_vector(adapter, i, v_idx++); + for (i = 0; i < adapter->num_tx_queues; i++) + igb_map_tx_ring_to_vector(adapter, i, v_idx++); + } else { + for (i = 0; i < adapter->num_rx_queues; i++) { + if (i < adapter->num_tx_queues) + igb_map_tx_ring_to_vector(adapter, i, v_idx); + igb_map_rx_ring_to_vector(adapter, i, v_idx++); + } + for (; i < adapter->num_tx_queues; i++) + igb_map_tx_ring_to_vector(adapter, i, v_idx++); + } + return 0; +} + +/** + * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors + * + * This function initializes the interrupts and allocates all of the queues. + **/ +static int igb_init_interrupt_scheme(struct igb_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int err; + + err = igb_set_interrupt_capability(adapter); + if (err) + return err; + + err = igb_alloc_q_vectors(adapter); + if (err) { + dev_err(&pdev->dev, "Unable to allocate memory for vectors\n"); + goto err_alloc_q_vectors; + } + + err = igb_alloc_queues(adapter); + if (err) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + goto err_alloc_queues; + } + + err = igb_map_ring_to_vector(adapter); + if (err) { + dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n"); + goto err_map_queues; + } + + + return 0; +err_map_queues: + igb_free_queues(adapter); +err_alloc_queues: + igb_free_q_vectors(adapter); +err_alloc_q_vectors: + igb_reset_interrupt_capability(adapter); + return err; +} + +/** + * igb_request_irq - initialize interrupts + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int igb_request_irq(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err = 0; + + if (adapter->msix_entries) { + err = igb_request_msix(adapter); + if (!err) + goto request_done; + /* fall back to MSI */ + igb_clear_interrupt_scheme(adapter); + if (!pci_enable_msi(adapter->pdev)) + adapter->flags |= IGB_FLAG_HAS_MSI; + igb_free_all_tx_resources(adapter); + igb_free_all_rx_resources(adapter); + adapter->num_tx_queues = 1; + adapter->num_rx_queues = 1; + adapter->num_q_vectors = 1; + err = igb_alloc_q_vectors(adapter); + if (err) { + dev_err(&pdev->dev, + "Unable to allocate memory for vectors\n"); + goto request_done; + } + err = igb_alloc_queues(adapter); + if (err) { + dev_err(&pdev->dev, + "Unable to allocate memory for queues\n"); + igb_free_q_vectors(adapter); + goto request_done; + } + igb_setup_all_tx_resources(adapter); + igb_setup_all_rx_resources(adapter); + } else { + igb_assign_vector(adapter->q_vector[0], 0); + } + + if (adapter->flags & IGB_FLAG_HAS_MSI) { + err = request_irq(adapter->pdev->irq, igb_intr_msi, 0, + netdev->name, adapter); + if (!err) + goto request_done; + + /* fall back to legacy interrupts */ + igb_reset_interrupt_capability(adapter); + adapter->flags &= ~IGB_FLAG_HAS_MSI; + } + + err = request_irq(adapter->pdev->irq, igb_intr, IRQF_SHARED, + netdev->name, adapter); + + if (err) + dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n", + err); + +request_done: + return err; +} + +static void igb_free_irq(struct igb_adapter *adapter) +{ + if (adapter->msix_entries) { + int vector = 0, i; + + free_irq(adapter->msix_entries[vector++].vector, adapter); + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + free_irq(adapter->msix_entries[vector++].vector, + q_vector); + } + } else { + free_irq(adapter->pdev->irq, adapter); + } +} + +/** + * igb_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static void igb_irq_disable(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* + * we need to be careful when disabling interrupts. The VFs are also + * mapped into these registers and so clearing the bits can cause + * issues on the VF drivers so we only need to clear what we set + */ + if (adapter->msix_entries) { + u32 regval = rd32(E1000_EIAM); + wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); + wr32(E1000_EIMC, adapter->eims_enable_mask); + regval = rd32(E1000_EIAC); + wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask); + } + + wr32(E1000_IAM, 0); + wr32(E1000_IMC, ~0); + wrfl(); + if (adapter->msix_entries) { + int i; + for (i = 0; i < adapter->num_q_vectors; i++) + synchronize_irq(adapter->msix_entries[i].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +/** + * igb_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static void igb_irq_enable(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->msix_entries) { + u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC; + u32 regval = rd32(E1000_EIAC); + wr32(E1000_EIAC, regval | adapter->eims_enable_mask); + regval = rd32(E1000_EIAM); + wr32(E1000_EIAM, regval | adapter->eims_enable_mask); + wr32(E1000_EIMS, adapter->eims_enable_mask); + if (adapter->vfs_allocated_count) { + wr32(E1000_MBVFIMR, 0xFF); + ims |= E1000_IMS_VMMB; + } + if (adapter->hw.mac.type == e1000_82580) + ims |= E1000_IMS_DRSTA; + + wr32(E1000_IMS, ims); + } else { + wr32(E1000_IMS, IMS_ENABLE_MASK | + E1000_IMS_DRSTA); + wr32(E1000_IAM, IMS_ENABLE_MASK | + E1000_IMS_DRSTA); + } +} + +static void igb_update_mng_vlan(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 vid = adapter->hw.mng_cookie.vlan_id; + u16 old_vid = adapter->mng_vlan_id; + + if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { + /* add VID to filter table */ + igb_vfta_set(hw, vid, true); + adapter->mng_vlan_id = vid; + } else { + adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; + } + + if ((old_vid != (u16)IGB_MNG_VLAN_NONE) && + (vid != old_vid) && + !test_bit(old_vid, adapter->active_vlans)) { + /* remove VID from filter table */ + igb_vfta_set(hw, old_vid, false); + } +} + +/** + * igb_release_hw_control - release control of the h/w to f/w + * @adapter: address of board private structure + * + * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. + * + **/ +static void igb_release_hw_control(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext; + + /* Let firmware take over control of h/w */ + ctrl_ext = rd32(E1000_CTRL_EXT); + wr32(E1000_CTRL_EXT, + ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); +} + +/** + * igb_get_hw_control - get control of the h/w from f/w + * @adapter: address of board private structure + * + * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that + * the driver is loaded. + * + **/ +static void igb_get_hw_control(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext; + + /* Let firmware know the driver has taken over */ + ctrl_ext = rd32(E1000_CTRL_EXT); + wr32(E1000_CTRL_EXT, + ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); +} + +/** + * igb_configure - configure the hardware for RX and TX + * @adapter: private board structure + **/ +static void igb_configure(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i; + + igb_get_hw_control(adapter); + igb_set_rx_mode(netdev); + + igb_restore_vlan(adapter); + + igb_setup_tctl(adapter); + igb_setup_mrqc(adapter); + igb_setup_rctl(adapter); + + igb_configure_tx(adapter); + igb_configure_rx(adapter); + + igb_rx_fifo_flush_82575(&adapter->hw); + + /* call igb_desc_unused which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; + igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring)); + } +} + +/** + * igb_power_up_link - Power up the phy/serdes link + * @adapter: address of board private structure + **/ +void igb_power_up_link(struct igb_adapter *adapter) +{ + if (adapter->hw.phy.media_type == e1000_media_type_copper) + igb_power_up_phy_copper(&adapter->hw); + else + igb_power_up_serdes_link_82575(&adapter->hw); +} + +/** + * igb_power_down_link - Power down the phy/serdes link + * @adapter: address of board private structure + */ +static void igb_power_down_link(struct igb_adapter *adapter) +{ + if (adapter->hw.phy.media_type == e1000_media_type_copper) + igb_power_down_phy_copper_82575(&adapter->hw); + else + igb_shutdown_serdes_link_82575(&adapter->hw); +} + +/** + * igb_up - Open the interface and prepare it to handle traffic + * @adapter: board private structure + **/ +int igb_up(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int i; + + /* hardware has been reset, we need to reload some things */ + igb_configure(adapter); + + clear_bit(__IGB_DOWN, &adapter->state); + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + napi_enable(&q_vector->napi); + } + if (adapter->msix_entries) + igb_configure_msix(adapter); + else + igb_assign_vector(adapter->q_vector[0], 0); + + /* Clear any pending interrupts. */ + rd32(E1000_ICR); + igb_irq_enable(adapter); + + /* notify VFs that reset has been completed */ + if (adapter->vfs_allocated_count) { + u32 reg_data = rd32(E1000_CTRL_EXT); + reg_data |= E1000_CTRL_EXT_PFRSTD; + wr32(E1000_CTRL_EXT, reg_data); + } + + netif_tx_start_all_queues(adapter->netdev); + + /* start the watchdog. */ + hw->mac.get_link_status = 1; + schedule_work(&adapter->watchdog_task); + + return 0; +} + +void igb_down(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 tctl, rctl; + int i; + + /* signal that we're down so the interrupt handler does not + * reschedule our watchdog timer */ + set_bit(__IGB_DOWN, &adapter->state); + + /* disable receives in the hardware */ + rctl = rd32(E1000_RCTL); + wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); + /* flush and sleep below */ + + netif_tx_stop_all_queues(netdev); + + /* disable transmits in the hardware */ + tctl = rd32(E1000_TCTL); + tctl &= ~E1000_TCTL_EN; + wr32(E1000_TCTL, tctl); + /* flush both disables and wait for them to finish */ + wrfl(); + msleep(10); + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + napi_disable(&q_vector->napi); + } + + igb_irq_disable(adapter); + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + netif_carrier_off(netdev); + + /* record the stats before reset*/ + spin_lock(&adapter->stats64_lock); + igb_update_stats(adapter, &adapter->stats64); + spin_unlock(&adapter->stats64_lock); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + if (!pci_channel_offline(adapter->pdev)) + igb_reset(adapter); + igb_clean_all_tx_rings(adapter); + igb_clean_all_rx_rings(adapter); +#ifdef CONFIG_IGB_DCA + + /* since we reset the hardware DCA settings were cleared */ + igb_setup_dca(adapter); +#endif +} + +void igb_reinit_locked(struct igb_adapter *adapter) +{ + WARN_ON(in_interrupt()); + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + msleep(1); + igb_down(adapter); + igb_up(adapter); + clear_bit(__IGB_RESETTING, &adapter->state); +} + +void igb_reset(struct igb_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &hw->mac; + struct e1000_fc_info *fc = &hw->fc; + u32 pba = 0, tx_space, min_tx_space, min_rx_space; + u16 hwm; + + /* Repartition Pba for greater than 9k mtu + * To take effect CTRL.RST is required. + */ + switch (mac->type) { + case e1000_i350: + case e1000_82580: + pba = rd32(E1000_RXPBS); + pba = igb_rxpbs_adjust_82580(pba); + break; + case e1000_82576: + pba = rd32(E1000_RXPBS); + pba &= E1000_RXPBS_SIZE_MASK_82576; + break; + case e1000_82575: + default: + pba = E1000_PBA_34K; + break; + } + + if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) && + (mac->type < e1000_82576)) { + /* adjust PBA for jumbo frames */ + wr32(E1000_PBA, pba); + + /* To maintain wire speed transmits, the Tx FIFO should be + * large enough to accommodate two full transmit packets, + * rounded up to the next 1KB and expressed in KB. Likewise, + * the Rx FIFO should be large enough to accommodate at least + * one full receive packet and is similarly rounded up and + * expressed in KB. */ + pba = rd32(E1000_PBA); + /* upper 16 bits has Tx packet buffer allocation size in KB */ + tx_space = pba >> 16; + /* lower 16 bits has Rx packet buffer allocation size in KB */ + pba &= 0xffff; + /* the tx fifo also stores 16 bytes of information about the tx + * but don't include ethernet FCS because hardware appends it */ + min_tx_space = (adapter->max_frame_size + + sizeof(union e1000_adv_tx_desc) - + ETH_FCS_LEN) * 2; + min_tx_space = ALIGN(min_tx_space, 1024); + min_tx_space >>= 10; + /* software strips receive CRC, so leave room for it */ + min_rx_space = adapter->max_frame_size; + min_rx_space = ALIGN(min_rx_space, 1024); + min_rx_space >>= 10; + + /* If current Tx allocation is less than the min Tx FIFO size, + * and the min Tx FIFO size is less than the current Rx FIFO + * allocation, take space away from current Rx allocation */ + if (tx_space < min_tx_space && + ((min_tx_space - tx_space) < pba)) { + pba = pba - (min_tx_space - tx_space); + + /* if short on rx space, rx wins and must trump tx + * adjustment */ + if (pba < min_rx_space) + pba = min_rx_space; + } + wr32(E1000_PBA, pba); + } + + /* flow control settings */ + /* The high water mark must be low enough to fit one full frame + * (or the size used for early receive) above it in the Rx FIFO. + * Set it to the lower of: + * - 90% of the Rx FIFO size, or + * - the full Rx FIFO size minus one full frame */ + hwm = min(((pba << 10) * 9 / 10), + ((pba << 10) - 2 * adapter->max_frame_size)); + + fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ + fc->low_water = fc->high_water - 16; + fc->pause_time = 0xFFFF; + fc->send_xon = 1; + fc->current_mode = fc->requested_mode; + + /* disable receive for all VFs and wait one second */ + if (adapter->vfs_allocated_count) { + int i; + for (i = 0 ; i < adapter->vfs_allocated_count; i++) + adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC; + + /* ping all the active vfs to let them know we are going down */ + igb_ping_all_vfs(adapter); + + /* disable transmits and receives */ + wr32(E1000_VFRE, 0); + wr32(E1000_VFTE, 0); + } + + /* Allow time for pending master requests to run */ + hw->mac.ops.reset_hw(hw); + wr32(E1000_WUC, 0); + + if (hw->mac.ops.init_hw(hw)) + dev_err(&pdev->dev, "Hardware Error\n"); + if (hw->mac.type > e1000_82580) { + if (adapter->flags & IGB_FLAG_DMAC) { + u32 reg; + + /* + * DMA Coalescing high water mark needs to be higher + * than * the * Rx threshold. The Rx threshold is + * currently * pba - 6, so we * should use a high water + * mark of pba * - 4. */ + hwm = (pba - 4) << 10; + + reg = (((pba-6) << E1000_DMACR_DMACTHR_SHIFT) + & E1000_DMACR_DMACTHR_MASK); + + /* transition to L0x or L1 if available..*/ + reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); + + /* watchdog timer= +-1000 usec in 32usec intervals */ + reg |= (1000 >> 5); + wr32(E1000_DMACR, reg); + + /* no lower threshold to disable coalescing(smart fifb) + * -UTRESH=0*/ + wr32(E1000_DMCRTRH, 0); + + /* set hwm to PBA - 2 * max frame size */ + wr32(E1000_FCRTC, hwm); + + /* + * This sets the time to wait before requesting tran- + * sition to * low power state to number of usecs needed + * to receive 1 512 * byte frame at gigabit line rate + */ + reg = rd32(E1000_DMCTLX); + reg |= IGB_DMCTLX_DCFLUSH_DIS; + + /* Delay 255 usec before entering Lx state. */ + reg |= 0xFF; + wr32(E1000_DMCTLX, reg); + + /* free space in Tx packet buffer to wake from DMAC */ + wr32(E1000_DMCTXTH, + (IGB_MIN_TXPBSIZE - + (IGB_TX_BUF_4096 + adapter->max_frame_size)) + >> 6); + + /* make low power state decision controlled by DMAC */ + reg = rd32(E1000_PCIEMISC); + reg |= E1000_PCIEMISC_LX_DECISION; + wr32(E1000_PCIEMISC, reg); + } /* end if IGB_FLAG_DMAC set */ + } + if (hw->mac.type == e1000_82580) { + u32 reg = rd32(E1000_PCIEMISC); + wr32(E1000_PCIEMISC, + reg & ~E1000_PCIEMISC_LX_DECISION); + } + if (!netif_running(adapter->netdev)) + igb_power_down_link(adapter); + + igb_update_mng_vlan(adapter); + + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ + wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); + + igb_get_phy_info(hw); +} + +static u32 igb_fix_features(struct net_device *netdev, u32 features) +{ + /* + * Since there is no support for separate rx/tx vlan accel + * enable/disable make sure tx flag is always in same state as rx. + */ + if (features & NETIF_F_HW_VLAN_RX) + features |= NETIF_F_HW_VLAN_TX; + else + features &= ~NETIF_F_HW_VLAN_TX; + + return features; +} + +static int igb_set_features(struct net_device *netdev, u32 features) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + int i; + u32 changed = netdev->features ^ features; + + for (i = 0; i < adapter->num_rx_queues; i++) { + if (features & NETIF_F_RXCSUM) + adapter->rx_ring[i]->flags |= IGB_RING_FLAG_RX_CSUM; + else + adapter->rx_ring[i]->flags &= ~IGB_RING_FLAG_RX_CSUM; + } + + if (changed & NETIF_F_HW_VLAN_RX) + igb_vlan_mode(netdev, features); + + return 0; +} + +static const struct net_device_ops igb_netdev_ops = { + .ndo_open = igb_open, + .ndo_stop = igb_close, + .ndo_start_xmit = igb_xmit_frame_adv, + .ndo_get_stats64 = igb_get_stats64, + .ndo_set_rx_mode = igb_set_rx_mode, + .ndo_set_multicast_list = igb_set_rx_mode, + .ndo_set_mac_address = igb_set_mac, + .ndo_change_mtu = igb_change_mtu, + .ndo_do_ioctl = igb_ioctl, + .ndo_tx_timeout = igb_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid, + .ndo_set_vf_mac = igb_ndo_set_vf_mac, + .ndo_set_vf_vlan = igb_ndo_set_vf_vlan, + .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw, + .ndo_get_vf_config = igb_ndo_get_vf_config, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = igb_netpoll, +#endif + .ndo_fix_features = igb_fix_features, + .ndo_set_features = igb_set_features, +}; + +/** + * igb_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in igb_pci_tbl + * + * Returns 0 on success, negative on failure + * + * igb_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int __devinit igb_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct igb_adapter *adapter; + struct e1000_hw *hw; + u16 eeprom_data = 0; + s32 ret_val; + static int global_quad_port_a; /* global quad port a indication */ + const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; + unsigned long mmio_start, mmio_len; + int err, pci_using_dac; + u16 eeprom_apme_mask = IGB_EEPROM_APME; + u8 part_str[E1000_PBANUM_LENGTH]; + + /* Catch broken hardware that put the wrong VF device ID in + * the PCIe SR-IOV capability. + */ + if (pdev->is_virtfn) { + WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", + pci_name(pdev), pdev->vendor, pdev->device); + return -EINVAL; + } + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + pci_using_dac = 0; + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (!err) { + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (!err) + pci_using_dac = 1; + } else { + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "No usable DMA " + "configuration, aborting\n"); + goto err_dma; + } + } + } + + err = pci_request_selected_regions(pdev, pci_select_bars(pdev, + IORESOURCE_MEM), + igb_driver_name); + if (err) + goto err_pci_reg; + + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + pci_save_state(pdev); + + err = -ENOMEM; + netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), + IGB_ABS_MAX_TX_QUEUES); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; + adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE; + + mmio_start = pci_resource_start(pdev, 0); + mmio_len = pci_resource_len(pdev, 0); + + err = -EIO; + hw->hw_addr = ioremap(mmio_start, mmio_len); + if (!hw->hw_addr) + goto err_ioremap; + + netdev->netdev_ops = &igb_netdev_ops; + igb_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + netdev->mem_start = mmio_start; + netdev->mem_end = mmio_start + mmio_len; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + /* Copy the default MAC, PHY and NVM function pointers */ + memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); + memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); + memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); + /* Initialize skew-specific constants */ + err = ei->get_invariants(hw); + if (err) + goto err_sw_init; + + /* setup the private structure */ + err = igb_sw_init(adapter); + if (err) + goto err_sw_init; + + igb_get_bus_info_pcie(hw); + + hw->phy.autoneg_wait_to_complete = false; + + /* Copper options */ + if (hw->phy.media_type == e1000_media_type_copper) { + hw->phy.mdix = AUTO_ALL_MODES; + hw->phy.disable_polarity_correction = false; + hw->phy.ms_type = e1000_ms_hw_default; + } + + if (igb_check_reset_block(hw)) + dev_info(&pdev->dev, + "PHY reset is blocked due to SOL/IDER session.\n"); + + netdev->hw_features = NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_RX; + + netdev->features = netdev->hw_features | + NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_FILTER; + + netdev->vlan_features |= NETIF_F_TSO; + netdev->vlan_features |= NETIF_F_TSO6; + netdev->vlan_features |= NETIF_F_IP_CSUM; + netdev->vlan_features |= NETIF_F_IPV6_CSUM; + netdev->vlan_features |= NETIF_F_SG; + + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + if (hw->mac.type >= e1000_82576) { + netdev->hw_features |= NETIF_F_SCTP_CSUM; + netdev->features |= NETIF_F_SCTP_CSUM; + } + + adapter->en_mng_pt = igb_enable_mng_pass_thru(hw); + + /* before reading the NVM, reset the controller to put the device in a + * known good starting state */ + hw->mac.ops.reset_hw(hw); + + /* make sure the NVM is good */ + if (hw->nvm.ops.validate(hw) < 0) { + dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + + /* copy the MAC address out of the NVM */ + if (hw->mac.ops.read_mac_addr(hw)) + dev_err(&pdev->dev, "NVM Read Error\n"); + + memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); + memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->perm_addr)) { + dev_err(&pdev->dev, "Invalid MAC Address\n"); + err = -EIO; + goto err_eeprom; + } + + setup_timer(&adapter->watchdog_timer, igb_watchdog, + (unsigned long) adapter); + setup_timer(&adapter->phy_info_timer, igb_update_phy_info, + (unsigned long) adapter); + + INIT_WORK(&adapter->reset_task, igb_reset_task); + INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); + + /* Initialize link properties that are user-changeable */ + adapter->fc_autoneg = true; + hw->mac.autoneg = true; + hw->phy.autoneg_advertised = 0x2f; + + hw->fc.requested_mode = e1000_fc_default; + hw->fc.current_mode = e1000_fc_default; + + igb_validate_mdi_setting(hw); + + /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM, + * enable the ACPI Magic Packet filter + */ + + if (hw->bus.func == 0) + hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); + else if (hw->mac.type >= e1000_82580) + hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, + &eeprom_data); + else if (hw->bus.func == 1) + hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); + + if (eeprom_data & eeprom_apme_mask) + adapter->eeprom_wol |= E1000_WUFC_MAG; + + /* now that we have the eeprom settings, apply the special cases where + * the eeprom may be wrong or the board simply won't support wake on + * lan on a particular port */ + switch (pdev->device) { + case E1000_DEV_ID_82575GB_QUAD_COPPER: + adapter->eeprom_wol = 0; + break; + case E1000_DEV_ID_82575EB_FIBER_SERDES: + case E1000_DEV_ID_82576_FIBER: + case E1000_DEV_ID_82576_SERDES: + /* Wake events only supported on port A for dual fiber + * regardless of eeprom setting */ + if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) + adapter->eeprom_wol = 0; + break; + case E1000_DEV_ID_82576_QUAD_COPPER: + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: + /* if quad port adapter, disable WoL on all but port A */ + if (global_quad_port_a != 0) + adapter->eeprom_wol = 0; + else + adapter->flags |= IGB_FLAG_QUAD_PORT_A; + /* Reset for multiple quad port adapters */ + if (++global_quad_port_a == 4) + global_quad_port_a = 0; + break; + } + + /* initialize the wol settings based on the eeprom settings */ + adapter->wol = adapter->eeprom_wol; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + /* reset the hardware with the new settings */ + igb_reset(adapter); + + /* let the f/w know that the h/w is now under the control of the + * driver. */ + igb_get_hw_control(adapter); + + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + + igb_vlan_mode(netdev, netdev->features); + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + +#ifdef CONFIG_IGB_DCA + if (dca_add_requester(&pdev->dev) == 0) { + adapter->flags |= IGB_FLAG_DCA_ENABLED; + dev_info(&pdev->dev, "DCA enabled\n"); + igb_setup_dca(adapter); + } + +#endif + /* do hw tstamp init after resetting */ + igb_init_hw_timer(adapter); + + dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); + /* print bus type/speed/width info */ + dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", + netdev->name, + ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : + (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" : + "unknown"), + ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : + (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" : + (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" : + "unknown"), + netdev->dev_addr); + + ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH); + if (ret_val) + strcpy(part_str, "Unknown"); + dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str); + dev_info(&pdev->dev, + "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n", + adapter->msix_entries ? "MSI-X" : + (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", + adapter->num_rx_queues, adapter->num_tx_queues); + switch (hw->mac.type) { + case e1000_i350: + igb_set_eee_i350(hw); + break; + default: + break; + } + return 0; + +err_register: + igb_release_hw_control(adapter); +err_eeprom: + if (!igb_check_reset_block(hw)) + igb_reset_phy(hw); + + if (hw->flash_address) + iounmap(hw->flash_address); +err_sw_init: + igb_clear_interrupt_scheme(adapter); + iounmap(hw->hw_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * igb_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * igb_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void __devexit igb_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + /* + * The watchdog timer may be rescheduled, so explicitly + * disable watchdog from being rescheduled. + */ + set_bit(__IGB_DOWN, &adapter->state); + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); + +#ifdef CONFIG_IGB_DCA + if (adapter->flags & IGB_FLAG_DCA_ENABLED) { + dev_info(&pdev->dev, "DCA disabled\n"); + dca_remove_requester(&pdev->dev); + adapter->flags &= ~IGB_FLAG_DCA_ENABLED; + wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); + } +#endif + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. */ + igb_release_hw_control(adapter); + + unregister_netdev(netdev); + + igb_clear_interrupt_scheme(adapter); + +#ifdef CONFIG_PCI_IOV + /* reclaim resources allocated to VFs */ + if (adapter->vf_data) { + /* disable iov and allow time for transactions to clear */ + pci_disable_sriov(pdev); + msleep(500); + + kfree(adapter->vf_data); + adapter->vf_data = NULL; + wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); + wrfl(); + msleep(100); + dev_info(&pdev->dev, "IOV Disabled\n"); + } +#endif + + iounmap(hw->hw_addr); + if (hw->flash_address) + iounmap(hw->flash_address); + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); + + free_netdev(netdev); + + pci_disable_pcie_error_reporting(pdev); + + pci_disable_device(pdev); +} + +/** + * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space + * @adapter: board private structure to initialize + * + * This function initializes the vf specific data storage and then attempts to + * allocate the VFs. The reason for ordering it this way is because it is much + * mor expensive time wise to disable SR-IOV than it is to allocate and free + * the memory for the VFs. + **/ +static void __devinit igb_probe_vfs(struct igb_adapter * adapter) +{ +#ifdef CONFIG_PCI_IOV + struct pci_dev *pdev = adapter->pdev; + + if (adapter->vfs_allocated_count) { + adapter->vf_data = kcalloc(adapter->vfs_allocated_count, + sizeof(struct vf_data_storage), + GFP_KERNEL); + /* if allocation failed then we do not support SR-IOV */ + if (!adapter->vf_data) { + adapter->vfs_allocated_count = 0; + dev_err(&pdev->dev, "Unable to allocate memory for VF " + "Data Storage\n"); + } + } + + if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) { + kfree(adapter->vf_data); + adapter->vf_data = NULL; +#endif /* CONFIG_PCI_IOV */ + adapter->vfs_allocated_count = 0; +#ifdef CONFIG_PCI_IOV + } else { + unsigned char mac_addr[ETH_ALEN]; + int i; + dev_info(&pdev->dev, "%d vfs allocated\n", + adapter->vfs_allocated_count); + for (i = 0; i < adapter->vfs_allocated_count; i++) { + random_ether_addr(mac_addr); + igb_set_vf_mac(adapter, i, mac_addr); + } + /* DMA Coalescing is not supported in IOV mode. */ + if (adapter->flags & IGB_FLAG_DMAC) + adapter->flags &= ~IGB_FLAG_DMAC; + } +#endif /* CONFIG_PCI_IOV */ +} + + +/** + * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp + * @adapter: board private structure to initialize + * + * igb_init_hw_timer initializes the function pointer and values for the hw + * timer found in hardware. + **/ +static void igb_init_hw_timer(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + switch (hw->mac.type) { + case e1000_i350: + case e1000_82580: + memset(&adapter->cycles, 0, sizeof(adapter->cycles)); + adapter->cycles.read = igb_read_clock; + adapter->cycles.mask = CLOCKSOURCE_MASK(64); + adapter->cycles.mult = 1; + /* + * The 82580 timesync updates the system timer every 8ns by 8ns + * and the value cannot be shifted. Instead we need to shift + * the registers to generate a 64bit timer value. As a result + * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by + * 24 in order to generate a larger value for synchronization. + */ + adapter->cycles.shift = IGB_82580_TSYNC_SHIFT; + /* disable system timer temporarily by setting bit 31 */ + wr32(E1000_TSAUXC, 0x80000000); + wrfl(); + + /* Set registers so that rollover occurs soon to test this. */ + wr32(E1000_SYSTIMR, 0x00000000); + wr32(E1000_SYSTIML, 0x80000000); + wr32(E1000_SYSTIMH, 0x000000FF); + wrfl(); + + /* enable system timer by clearing bit 31 */ + wr32(E1000_TSAUXC, 0x0); + wrfl(); + + timecounter_init(&adapter->clock, + &adapter->cycles, + ktime_to_ns(ktime_get_real())); + /* + * Synchronize our NIC clock against system wall clock. NIC + * time stamp reading requires ~3us per sample, each sample + * was pretty stable even under load => only require 10 + * samples for each offset comparison. + */ + memset(&adapter->compare, 0, sizeof(adapter->compare)); + adapter->compare.source = &adapter->clock; + adapter->compare.target = ktime_get_real; + adapter->compare.num_samples = 10; + timecompare_update(&adapter->compare, 0); + break; + case e1000_82576: + /* + * Initialize hardware timer: we keep it running just in case + * that some program needs it later on. + */ + memset(&adapter->cycles, 0, sizeof(adapter->cycles)); + adapter->cycles.read = igb_read_clock; + adapter->cycles.mask = CLOCKSOURCE_MASK(64); + adapter->cycles.mult = 1; + /** + * Scale the NIC clock cycle by a large factor so that + * relatively small clock corrections can be added or + * subtracted at each clock tick. The drawbacks of a large + * factor are a) that the clock register overflows more quickly + * (not such a big deal) and b) that the increment per tick has + * to fit into 24 bits. As a result we need to use a shift of + * 19 so we can fit a value of 16 into the TIMINCA register. + */ + adapter->cycles.shift = IGB_82576_TSYNC_SHIFT; + wr32(E1000_TIMINCA, + (1 << E1000_TIMINCA_16NS_SHIFT) | + (16 << IGB_82576_TSYNC_SHIFT)); + + /* Set registers so that rollover occurs soon to test this. */ + wr32(E1000_SYSTIML, 0x00000000); + wr32(E1000_SYSTIMH, 0xFF800000); + wrfl(); + + timecounter_init(&adapter->clock, + &adapter->cycles, + ktime_to_ns(ktime_get_real())); + /* + * Synchronize our NIC clock against system wall clock. NIC + * time stamp reading requires ~3us per sample, each sample + * was pretty stable even under load => only require 10 + * samples for each offset comparison. + */ + memset(&adapter->compare, 0, sizeof(adapter->compare)); + adapter->compare.source = &adapter->clock; + adapter->compare.target = ktime_get_real; + adapter->compare.num_samples = 10; + timecompare_update(&adapter->compare, 0); + break; + case e1000_82575: + /* 82575 does not support timesync */ + default: + break; + } + +} + +/** + * igb_sw_init - Initialize general software structures (struct igb_adapter) + * @adapter: board private structure to initialize + * + * igb_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int __devinit igb_sw_init(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); + + adapter->tx_ring_count = IGB_DEFAULT_TXD; + adapter->rx_ring_count = IGB_DEFAULT_RXD; + adapter->rx_itr_setting = IGB_DEFAULT_ITR; + adapter->tx_itr_setting = IGB_DEFAULT_ITR; + + adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + spin_lock_init(&adapter->stats64_lock); +#ifdef CONFIG_PCI_IOV + switch (hw->mac.type) { + case e1000_82576: + case e1000_i350: + if (max_vfs > 7) { + dev_warn(&pdev->dev, + "Maximum of 7 VFs per PF, using max\n"); + adapter->vfs_allocated_count = 7; + } else + adapter->vfs_allocated_count = max_vfs; + break; + default: + break; + } +#endif /* CONFIG_PCI_IOV */ + adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); + /* i350 cannot do RSS and SR-IOV at the same time */ + if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count) + adapter->rss_queues = 1; + + /* + * if rss_queues > 4 or vfs are going to be allocated with rss_queues + * then we should combine the queues into a queue pair in order to + * conserve interrupts due to limited supply + */ + if ((adapter->rss_queues > 4) || + ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6))) + adapter->flags |= IGB_FLAG_QUEUE_PAIRS; + + /* This call may decrease the number of queues */ + if (igb_init_interrupt_scheme(adapter)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + igb_probe_vfs(adapter); + + /* Explicitly disable IRQ since the NIC can be in any state. */ + igb_irq_disable(adapter); + + if (hw->mac.type == e1000_i350) + adapter->flags &= ~IGB_FLAG_DMAC; + + set_bit(__IGB_DOWN, &adapter->state); + return 0; +} + +/** + * igb_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int igb_open(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err; + int i; + + /* disallow open during test */ + if (test_bit(__IGB_TESTING, &adapter->state)) + return -EBUSY; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = igb_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = igb_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + igb_power_up_link(adapter); + + /* before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. */ + igb_configure(adapter); + + err = igb_request_irq(adapter); + if (err) + goto err_req_irq; + + /* From here on the code is the same as igb_up() */ + clear_bit(__IGB_DOWN, &adapter->state); + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + napi_enable(&q_vector->napi); + } + + /* Clear any pending interrupts. */ + rd32(E1000_ICR); + + igb_irq_enable(adapter); + + /* notify VFs that reset has been completed */ + if (adapter->vfs_allocated_count) { + u32 reg_data = rd32(E1000_CTRL_EXT); + reg_data |= E1000_CTRL_EXT_PFRSTD; + wr32(E1000_CTRL_EXT, reg_data); + } + + netif_tx_start_all_queues(netdev); + + /* start the watchdog. */ + hw->mac.get_link_status = 1; + schedule_work(&adapter->watchdog_task); + + return 0; + +err_req_irq: + igb_release_hw_control(adapter); + igb_power_down_link(adapter); + igb_free_all_rx_resources(adapter); +err_setup_rx: + igb_free_all_tx_resources(adapter); +err_setup_tx: + igb_reset(adapter); + + return err; +} + +/** + * igb_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the driver's control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int igb_close(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + WARN_ON(test_bit(__IGB_RESETTING, &adapter->state)); + igb_down(adapter); + + igb_free_irq(adapter); + + igb_free_all_tx_resources(adapter); + igb_free_all_rx_resources(adapter); + + return 0; +} + +/** + * igb_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +int igb_setup_tx_resources(struct igb_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int size; + + size = sizeof(struct igb_buffer) * tx_ring->count; + tx_ring->buffer_info = vzalloc(size); + if (!tx_ring->buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + tx_ring->desc = dma_alloc_coherent(dev, + tx_ring->size, + &tx_ring->dma, + GFP_KERNEL); + + if (!tx_ring->desc) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + return 0; + +err: + vfree(tx_ring->buffer_info); + dev_err(dev, + "Unable to allocate memory for the transmit descriptor ring\n"); + return -ENOMEM; +} + +/** + * igb_setup_all_tx_resources - wrapper to allocate Tx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static int igb_setup_all_tx_resources(struct igb_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = igb_setup_tx_resources(adapter->tx_ring[i]); + if (err) { + dev_err(&pdev->dev, + "Allocation for Tx Queue %u failed\n", i); + for (i--; i >= 0; i--) + igb_free_tx_resources(adapter->tx_ring[i]); + break; + } + } + + for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) { + int r_idx = i % adapter->num_tx_queues; + adapter->multi_tx_table[i] = adapter->tx_ring[r_idx]; + } + return err; +} + +/** + * igb_setup_tctl - configure the transmit control registers + * @adapter: Board private structure + **/ +void igb_setup_tctl(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 tctl; + + /* disable queue 0 which is enabled by default on 82575 and 82576 */ + wr32(E1000_TXDCTL(0), 0); + + /* Program the Transmit Control Register */ + tctl = rd32(E1000_TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + + igb_config_collision_dist(hw); + + /* Enable transmits */ + tctl |= E1000_TCTL_EN; + + wr32(E1000_TCTL, tctl); +} + +/** + * igb_configure_tx_ring - Configure transmit ring after Reset + * @adapter: board private structure + * @ring: tx ring to configure + * + * Configure a transmit ring after a reset. + **/ +void igb_configure_tx_ring(struct igb_adapter *adapter, + struct igb_ring *ring) +{ + struct e1000_hw *hw = &adapter->hw; + u32 txdctl; + u64 tdba = ring->dma; + int reg_idx = ring->reg_idx; + + /* disable the queue */ + txdctl = rd32(E1000_TXDCTL(reg_idx)); + wr32(E1000_TXDCTL(reg_idx), + txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); + wrfl(); + mdelay(10); + + wr32(E1000_TDLEN(reg_idx), + ring->count * sizeof(union e1000_adv_tx_desc)); + wr32(E1000_TDBAL(reg_idx), + tdba & 0x00000000ffffffffULL); + wr32(E1000_TDBAH(reg_idx), tdba >> 32); + + ring->head = hw->hw_addr + E1000_TDH(reg_idx); + ring->tail = hw->hw_addr + E1000_TDT(reg_idx); + writel(0, ring->head); + writel(0, ring->tail); + + txdctl |= IGB_TX_PTHRESH; + txdctl |= IGB_TX_HTHRESH << 8; + txdctl |= IGB_TX_WTHRESH << 16; + + txdctl |= E1000_TXDCTL_QUEUE_ENABLE; + wr32(E1000_TXDCTL(reg_idx), txdctl); +} + +/** + * igb_configure_tx - Configure transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void igb_configure_tx(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + igb_configure_tx_ring(adapter, adapter->tx_ring[i]); +} + +/** + * igb_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int igb_setup_rx_resources(struct igb_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int size, desc_len; + + size = sizeof(struct igb_buffer) * rx_ring->count; + rx_ring->buffer_info = vzalloc(size); + if (!rx_ring->buffer_info) + goto err; + + desc_len = sizeof(union e1000_adv_rx_desc); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * desc_len; + rx_ring->size = ALIGN(rx_ring->size, 4096); + + rx_ring->desc = dma_alloc_coherent(dev, + rx_ring->size, + &rx_ring->dma, + GFP_KERNEL); + + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + return 0; + +err: + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the receive descriptor" + " ring\n"); + return -ENOMEM; +} + +/** + * igb_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static int igb_setup_all_rx_resources(struct igb_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = igb_setup_rx_resources(adapter->rx_ring[i]); + if (err) { + dev_err(&pdev->dev, + "Allocation for Rx Queue %u failed\n", i); + for (i--; i >= 0; i--) + igb_free_rx_resources(adapter->rx_ring[i]); + break; + } + } + + return err; +} + +/** + * igb_setup_mrqc - configure the multiple receive queue control registers + * @adapter: Board private structure + **/ +static void igb_setup_mrqc(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 mrqc, rxcsum; + u32 j, num_rx_queues, shift = 0, shift2 = 0; + union e1000_reta { + u32 dword; + u8 bytes[4]; + } reta; + static const u8 rsshash[40] = { + 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67, + 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb, + 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, + 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa }; + + /* Fill out hash function seeds */ + for (j = 0; j < 10; j++) { + u32 rsskey = rsshash[(j * 4)]; + rsskey |= rsshash[(j * 4) + 1] << 8; + rsskey |= rsshash[(j * 4) + 2] << 16; + rsskey |= rsshash[(j * 4) + 3] << 24; + array_wr32(E1000_RSSRK(0), j, rsskey); + } + + num_rx_queues = adapter->rss_queues; + + if (adapter->vfs_allocated_count) { + /* 82575 and 82576 supports 2 RSS queues for VMDq */ + switch (hw->mac.type) { + case e1000_i350: + case e1000_82580: + num_rx_queues = 1; + shift = 0; + break; + case e1000_82576: + shift = 3; + num_rx_queues = 2; + break; + case e1000_82575: + shift = 2; + shift2 = 6; + default: + break; + } + } else { + if (hw->mac.type == e1000_82575) + shift = 6; + } + + for (j = 0; j < (32 * 4); j++) { + reta.bytes[j & 3] = (j % num_rx_queues) << shift; + if (shift2) + reta.bytes[j & 3] |= num_rx_queues << shift2; + if ((j & 3) == 3) + wr32(E1000_RETA(j >> 2), reta.dword); + } + + /* + * Disable raw packet checksumming so that RSS hash is placed in + * descriptor on writeback. No need to enable TCP/UDP/IP checksum + * offloads as they are enabled by default + */ + rxcsum = rd32(E1000_RXCSUM); + rxcsum |= E1000_RXCSUM_PCSD; + + if (adapter->hw.mac.type >= e1000_82576) + /* Enable Receive Checksum Offload for SCTP */ + rxcsum |= E1000_RXCSUM_CRCOFL; + + /* Don't need to set TUOFL or IPOFL, they default to 1 */ + wr32(E1000_RXCSUM, rxcsum); + + /* If VMDq is enabled then we set the appropriate mode for that, else + * we default to RSS so that an RSS hash is calculated per packet even + * if we are only using one queue */ + if (adapter->vfs_allocated_count) { + if (hw->mac.type > e1000_82575) { + /* Set the default pool for the PF's first queue */ + u32 vtctl = rd32(E1000_VT_CTL); + vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK | + E1000_VT_CTL_DISABLE_DEF_POOL); + vtctl |= adapter->vfs_allocated_count << + E1000_VT_CTL_DEFAULT_POOL_SHIFT; + wr32(E1000_VT_CTL, vtctl); + } + if (adapter->rss_queues > 1) + mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q; + else + mrqc = E1000_MRQC_ENABLE_VMDQ; + } else { + mrqc = E1000_MRQC_ENABLE_RSS_4Q; + } + igb_vmm_control(adapter); + + /* + * Generate RSS hash based on TCP port numbers and/or + * IPv4/v6 src and dst addresses since UDP cannot be + * hashed reliably due to IP fragmentation + */ + mrqc |= E1000_MRQC_RSS_FIELD_IPV4 | + E1000_MRQC_RSS_FIELD_IPV4_TCP | + E1000_MRQC_RSS_FIELD_IPV6 | + E1000_MRQC_RSS_FIELD_IPV6_TCP | + E1000_MRQC_RSS_FIELD_IPV6_TCP_EX; + + wr32(E1000_MRQC, mrqc); +} + +/** + * igb_setup_rctl - configure the receive control registers + * @adapter: Board private structure + **/ +void igb_setup_rctl(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + rctl = rd32(E1000_RCTL); + + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + + rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF | + (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + + /* + * enable stripping of CRC. It's unlikely this will break BMC + * redirection as it did with e1000. Newer features require + * that the HW strips the CRC. + */ + rctl |= E1000_RCTL_SECRC; + + /* disable store bad packets and clear size bits. */ + rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256); + + /* enable LPE to prevent packets larger than max_frame_size */ + rctl |= E1000_RCTL_LPE; + + /* disable queue 0 to prevent tail write w/o re-config */ + wr32(E1000_RXDCTL(0), 0); + + /* Attention!!! For SR-IOV PF driver operations you must enable + * queue drop for all VF and PF queues to prevent head of line blocking + * if an un-trusted VF does not provide descriptors to hardware. + */ + if (adapter->vfs_allocated_count) { + /* set all queue drop enable bits */ + wr32(E1000_QDE, ALL_QUEUES); + } + + wr32(E1000_RCTL, rctl); +} + +static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, + int vfn) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vmolr; + + /* if it isn't the PF check to see if VFs are enabled and + * increase the size to support vlan tags */ + if (vfn < adapter->vfs_allocated_count && + adapter->vf_data[vfn].vlans_enabled) + size += VLAN_TAG_SIZE; + + vmolr = rd32(E1000_VMOLR(vfn)); + vmolr &= ~E1000_VMOLR_RLPML_MASK; + vmolr |= size | E1000_VMOLR_LPE; + wr32(E1000_VMOLR(vfn), vmolr); + + return 0; +} + +/** + * igb_rlpml_set - set maximum receive packet size + * @adapter: board private structure + * + * Configure maximum receivable packet size. + **/ +static void igb_rlpml_set(struct igb_adapter *adapter) +{ + u32 max_frame_size; + struct e1000_hw *hw = &adapter->hw; + u16 pf_id = adapter->vfs_allocated_count; + + max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE; + + /* if vfs are enabled we set RLPML to the largest possible request + * size and set the VMOLR RLPML to the size we need */ + if (pf_id) { + igb_set_vf_rlpml(adapter, max_frame_size, pf_id); + max_frame_size = MAX_JUMBO_FRAME_SIZE; + } + + wr32(E1000_RLPML, max_frame_size); +} + +static inline void igb_set_vmolr(struct igb_adapter *adapter, + int vfn, bool aupe) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vmolr; + + /* + * This register exists only on 82576 and newer so if we are older then + * we should exit and do nothing + */ + if (hw->mac.type < e1000_82576) + return; + + vmolr = rd32(E1000_VMOLR(vfn)); + vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */ + if (aupe) + vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ + else + vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */ + + /* clear all bits that might not be set */ + vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE); + + if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count) + vmolr |= E1000_VMOLR_RSSE; /* enable RSS */ + /* + * for VMDq only allow the VFs and pool 0 to accept broadcast and + * multicast packets + */ + if (vfn <= adapter->vfs_allocated_count) + vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */ + + wr32(E1000_VMOLR(vfn), vmolr); +} + +/** + * igb_configure_rx_ring - Configure a receive ring after Reset + * @adapter: board private structure + * @ring: receive ring to be configured + * + * Configure the Rx unit of the MAC after a reset. + **/ +void igb_configure_rx_ring(struct igb_adapter *adapter, + struct igb_ring *ring) +{ + struct e1000_hw *hw = &adapter->hw; + u64 rdba = ring->dma; + int reg_idx = ring->reg_idx; + u32 srrctl, rxdctl; + + /* disable the queue */ + rxdctl = rd32(E1000_RXDCTL(reg_idx)); + wr32(E1000_RXDCTL(reg_idx), + rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); + + /* Set DMA base address registers */ + wr32(E1000_RDBAL(reg_idx), + rdba & 0x00000000ffffffffULL); + wr32(E1000_RDBAH(reg_idx), rdba >> 32); + wr32(E1000_RDLEN(reg_idx), + ring->count * sizeof(union e1000_adv_rx_desc)); + + /* initialize head and tail */ + ring->head = hw->hw_addr + E1000_RDH(reg_idx); + ring->tail = hw->hw_addr + E1000_RDT(reg_idx); + writel(0, ring->head); + writel(0, ring->tail); + + /* set descriptor configuration */ + if (ring->rx_buffer_len < IGB_RXBUFFER_1024) { + srrctl = ALIGN(ring->rx_buffer_len, 64) << + E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; +#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384 + srrctl |= IGB_RXBUFFER_16384 >> + E1000_SRRCTL_BSIZEPKT_SHIFT; +#else + srrctl |= (PAGE_SIZE / 2) >> + E1000_SRRCTL_BSIZEPKT_SHIFT; +#endif + srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; + } else { + srrctl = ALIGN(ring->rx_buffer_len, 1024) >> + E1000_SRRCTL_BSIZEPKT_SHIFT; + srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; + } + if (hw->mac.type == e1000_82580) + srrctl |= E1000_SRRCTL_TIMESTAMP; + /* Only set Drop Enable if we are supporting multiple queues */ + if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1) + srrctl |= E1000_SRRCTL_DROP_EN; + + wr32(E1000_SRRCTL(reg_idx), srrctl); + + /* set filtering for VMDQ pools */ + igb_set_vmolr(adapter, reg_idx & 0x7, true); + + /* enable receive descriptor fetching */ + rxdctl = rd32(E1000_RXDCTL(reg_idx)); + rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; + rxdctl &= 0xFFF00000; + rxdctl |= IGB_RX_PTHRESH; + rxdctl |= IGB_RX_HTHRESH << 8; + rxdctl |= IGB_RX_WTHRESH << 16; + wr32(E1000_RXDCTL(reg_idx), rxdctl); +} + +/** + * igb_configure_rx - Configure receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void igb_configure_rx(struct igb_adapter *adapter) +{ + int i; + + /* set UTA to appropriate mode */ + igb_set_uta(adapter); + + /* set the correct pool for the PF default MAC address in entry 0 */ + igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0, + adapter->vfs_allocated_count); + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring */ + for (i = 0; i < adapter->num_rx_queues; i++) + igb_configure_rx_ring(adapter, adapter->rx_ring[i]); +} + +/** + * igb_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void igb_free_tx_resources(struct igb_ring *tx_ring) +{ + igb_clean_tx_ring(tx_ring); + + vfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * igb_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +static void igb_free_all_tx_resources(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + igb_free_tx_resources(adapter->tx_ring[i]); +} + +void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring, + struct igb_buffer *buffer_info) +{ + if (buffer_info->dma) { + if (buffer_info->mapped_as_page) + dma_unmap_page(tx_ring->dev, + buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); + else + dma_unmap_single(tx_ring->dev, + buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); + buffer_info->dma = 0; + } + if (buffer_info->skb) { + dev_kfree_skb_any(buffer_info->skb); + buffer_info->skb = NULL; + } + buffer_info->time_stamp = 0; + buffer_info->length = 0; + buffer_info->next_to_watch = 0; + buffer_info->mapped_as_page = false; +} + +/** + * igb_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void igb_clean_tx_ring(struct igb_ring *tx_ring) +{ + struct igb_buffer *buffer_info; + unsigned long size; + unsigned int i; + + if (!tx_ring->buffer_info) + return; + /* Free all the Tx ring sk_buffs */ + + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + igb_unmap_and_free_tx_resource(tx_ring, buffer_info); + } + + size = sizeof(struct igb_buffer) * tx_ring->count; + memset(tx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +} + +/** + * igb_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void igb_clean_all_tx_rings(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + igb_clean_tx_ring(adapter->tx_ring[i]); +} + +/** + * igb_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void igb_free_rx_resources(struct igb_ring *rx_ring) +{ + igb_clean_rx_ring(rx_ring); + + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * igb_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +static void igb_free_all_rx_resources(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + igb_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * igb_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void igb_clean_rx_ring(struct igb_ring *rx_ring) +{ + struct igb_buffer *buffer_info; + unsigned long size; + unsigned int i; + + if (!rx_ring->buffer_info) + return; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + if (buffer_info->dma) { + dma_unmap_single(rx_ring->dev, + buffer_info->dma, + rx_ring->rx_buffer_len, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + } + + if (buffer_info->skb) { + dev_kfree_skb(buffer_info->skb); + buffer_info->skb = NULL; + } + if (buffer_info->page_dma) { + dma_unmap_page(rx_ring->dev, + buffer_info->page_dma, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + buffer_info->page_dma = 0; + } + if (buffer_info->page) { + put_page(buffer_info->page); + buffer_info->page = NULL; + buffer_info->page_offset = 0; + } + } + + size = sizeof(struct igb_buffer) * rx_ring->count; + memset(rx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +/** + * igb_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void igb_clean_all_rx_rings(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + igb_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * igb_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int igb_set_mac(struct net_device *netdev, void *p) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + /* set the correct pool for the new PF MAC address in entry 0 */ + igb_rar_set_qsel(adapter, hw->mac.addr, 0, + adapter->vfs_allocated_count); + + return 0; +} + +/** + * igb_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: -ENOMEM on failure + * 0 on no addresses written + * X on writing X addresses to MTA + **/ +static int igb_write_mc_addr_list(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u8 *mta_list; + int i; + + if (netdev_mc_empty(netdev)) { + /* nothing to program, so clear mc list */ + igb_update_mc_addr_list(hw, NULL, 0); + igb_restore_vf_multicasts(adapter); + return 0; + } + + mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); + if (!mta_list) + return -ENOMEM; + + /* The shared function expects a packed array of only addresses. */ + i = 0; + netdev_for_each_mc_addr(ha, netdev) + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); + + igb_update_mc_addr_list(hw, mta_list, i); + kfree(mta_list); + + return netdev_mc_count(netdev); +} + +/** + * igb_write_uc_addr_list - write unicast addresses to RAR table + * @netdev: network interface device structure + * + * Writes unicast address list to the RAR table. + * Returns: -ENOMEM on failure/insufficient address space + * 0 on no addresses written + * X on writing X addresses to the RAR table + **/ +static int igb_write_uc_addr_list(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + unsigned int vfn = adapter->vfs_allocated_count; + unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1); + int count = 0; + + /* return ENOMEM indicating insufficient memory for addresses */ + if (netdev_uc_count(netdev) > rar_entries) + return -ENOMEM; + + if (!netdev_uc_empty(netdev) && rar_entries) { + struct netdev_hw_addr *ha; + + netdev_for_each_uc_addr(ha, netdev) { + if (!rar_entries) + break; + igb_rar_set_qsel(adapter, ha->addr, + rar_entries--, + vfn); + count++; + } + } + /* write the addresses in reverse order to avoid write combining */ + for (; rar_entries > 0 ; rar_entries--) { + wr32(E1000_RAH(rar_entries), 0); + wr32(E1000_RAL(rar_entries), 0); + } + wrfl(); + + return count; +} + +/** + * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_mode entry point is called whenever the unicast or multicast + * address lists or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void igb_set_rx_mode(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + unsigned int vfn = adapter->vfs_allocated_count; + u32 rctl, vmolr = 0; + int count; + + /* Check for Promiscuous and All Multicast modes */ + rctl = rd32(E1000_RCTL); + + /* clear the effected bits */ + rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE); + + if (netdev->flags & IFF_PROMISC) { + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME); + } else { + if (netdev->flags & IFF_ALLMULTI) { + rctl |= E1000_RCTL_MPE; + vmolr |= E1000_VMOLR_MPME; + } else { + /* + * Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + count = igb_write_mc_addr_list(netdev); + if (count < 0) { + rctl |= E1000_RCTL_MPE; + vmolr |= E1000_VMOLR_MPME; + } else if (count) { + vmolr |= E1000_VMOLR_ROMPE; + } + } + /* + * Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + count = igb_write_uc_addr_list(netdev); + if (count < 0) { + rctl |= E1000_RCTL_UPE; + vmolr |= E1000_VMOLR_ROPE; + } + rctl |= E1000_RCTL_VFE; + } + wr32(E1000_RCTL, rctl); + + /* + * In order to support SR-IOV and eventually VMDq it is necessary to set + * the VMOLR to enable the appropriate modes. Without this workaround + * we will have issues with VLAN tag stripping not being done for frames + * that are only arriving because we are the default pool + */ + if (hw->mac.type < e1000_82576) + return; + + vmolr |= rd32(E1000_VMOLR(vfn)) & + ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); + wr32(E1000_VMOLR(vfn), vmolr); + igb_restore_vf_multicasts(adapter); +} + +static void igb_check_wvbr(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 wvbr = 0; + + switch (hw->mac.type) { + case e1000_82576: + case e1000_i350: + if (!(wvbr = rd32(E1000_WVBR))) + return; + break; + default: + break; + } + + adapter->wvbr |= wvbr; +} + +#define IGB_STAGGERED_QUEUE_OFFSET 8 + +static void igb_spoof_check(struct igb_adapter *adapter) +{ + int j; + + if (!adapter->wvbr) + return; + + for(j = 0; j < adapter->vfs_allocated_count; j++) { + if (adapter->wvbr & (1 << j) || + adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) { + dev_warn(&adapter->pdev->dev, + "Spoof event(s) detected on VF %d\n", j); + adapter->wvbr &= + ~((1 << j) | + (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))); + } + } +} + +/* Need to wait a few seconds after link up to get diagnostic information from + * the phy */ +static void igb_update_phy_info(unsigned long data) +{ + struct igb_adapter *adapter = (struct igb_adapter *) data; + igb_get_phy_info(&adapter->hw); +} + +/** + * igb_has_link - check shared code for link and determine up/down + * @adapter: pointer to driver private info + **/ +bool igb_has_link(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + bool link_active = false; + s32 ret_val = 0; + + /* get_link_status is set on LSC (link status) interrupt or + * rx sequence error interrupt. get_link_status will stay + * false until the e1000_check_for_link establishes link + * for copper adapters ONLY + */ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + if (hw->mac.get_link_status) { + ret_val = hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status; + } else { + link_active = true; + } + break; + case e1000_media_type_internal_serdes: + ret_val = hw->mac.ops.check_for_link(hw); + link_active = hw->mac.serdes_has_link; + break; + default: + case e1000_media_type_unknown: + break; + } + + return link_active; +} + +static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event) +{ + bool ret = false; + u32 ctrl_ext, thstat; + + /* check for thermal sensor event on i350, copper only */ + if (hw->mac.type == e1000_i350) { + thstat = rd32(E1000_THSTAT); + ctrl_ext = rd32(E1000_CTRL_EXT); + + if ((hw->phy.media_type == e1000_media_type_copper) && + !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) { + ret = !!(thstat & event); + } + } + + return ret; +} + +/** + * igb_watchdog - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void igb_watchdog(unsigned long data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); +} + +static void igb_watchdog_task(struct work_struct *work) +{ + struct igb_adapter *adapter = container_of(work, + struct igb_adapter, + watchdog_task); + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 link; + int i; + + link = igb_has_link(adapter); + if (link) { + if (!netif_carrier_ok(netdev)) { + u32 ctrl; + hw->mac.ops.get_speed_and_duplex(hw, + &adapter->link_speed, + &adapter->link_duplex); + + ctrl = rd32(E1000_CTRL); + /* Links status message must follow this format */ + printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, " + "Flow Control: %s\n", + netdev->name, + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full Duplex" : "Half Duplex", + ((ctrl & E1000_CTRL_TFCE) && + (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" : + ((ctrl & E1000_CTRL_RFCE) ? "RX" : + ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None"))); + + /* check for thermal sensor event */ + if (igb_thermal_sensor_event(hw, E1000_THSTAT_LINK_THROTTLE)) { + printk(KERN_INFO "igb: %s The network adapter " + "link speed was downshifted " + "because it overheated.\n", + netdev->name); + } + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + adapter->tx_timeout_factor = 14; + break; + case SPEED_100: + /* maybe add some timeout factor ? */ + break; + } + + netif_carrier_on(netdev); + + igb_ping_all_vfs(adapter); + igb_check_vf_rate_limit(adapter); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + + /* check for thermal sensor event */ + if (igb_thermal_sensor_event(hw, E1000_THSTAT_PWR_DOWN)) { + printk(KERN_ERR "igb: %s The network adapter " + "was stopped because it " + "overheated.\n", + netdev->name); + } + + /* Links status message must follow this format */ + printk(KERN_INFO "igb: %s NIC Link is Down\n", + netdev->name); + netif_carrier_off(netdev); + + igb_ping_all_vfs(adapter); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + } + } + + spin_lock(&adapter->stats64_lock); + igb_update_stats(adapter, &adapter->stats64); + spin_unlock(&adapter->stats64_lock); + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igb_ring *tx_ring = adapter->tx_ring[i]; + if (!netif_carrier_ok(netdev)) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). */ + if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + /* return immediately since reset is imminent */ + return; + } + } + + /* Force detection of hung controller every watchdog period */ + tx_ring->detect_tx_hung = true; + } + + /* Cause software interrupt to ensure rx ring is cleaned */ + if (adapter->msix_entries) { + u32 eics = 0; + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + eics |= q_vector->eims_value; + } + wr32(E1000_EICS, eics); + } else { + wr32(E1000_ICS, E1000_ICS_RXDMT0); + } + + igb_spoof_check(adapter); + + /* Reset the timer */ + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * igb_update_ring_itr - update the dynamic ITR value based on packet size + * + * Stores a new ITR value based on strictly on packet size. This + * algorithm is less sophisticated than that used in igb_update_itr, + * due to the difficulty of synchronizing statistics across multiple + * receive rings. The divisors and thresholds used by this function + * were determined based on theoretical maximum wire speed and testing + * data, in order to minimize response time while increasing bulk + * throughput. + * This functionality is controlled by the InterruptThrottleRate module + * parameter (see igb_param.c) + * NOTE: This function is called only when operating in a multiqueue + * receive environment. + * @q_vector: pointer to q_vector + **/ +static void igb_update_ring_itr(struct igb_q_vector *q_vector) +{ + int new_val = q_vector->itr_val; + int avg_wire_size = 0; + struct igb_adapter *adapter = q_vector->adapter; + struct igb_ring *ring; + unsigned int packets; + + /* For non-gigabit speeds, just fix the interrupt rate at 4000 + * ints/sec - ITR timer value of 120 ticks. + */ + if (adapter->link_speed != SPEED_1000) { + new_val = 976; + goto set_itr_val; + } + + ring = q_vector->rx_ring; + if (ring) { + packets = ACCESS_ONCE(ring->total_packets); + + if (packets) + avg_wire_size = ring->total_bytes / packets; + } + + ring = q_vector->tx_ring; + if (ring) { + packets = ACCESS_ONCE(ring->total_packets); + + if (packets) + avg_wire_size = max_t(u32, avg_wire_size, + ring->total_bytes / packets); + } + + /* if avg_wire_size isn't set no work was done */ + if (!avg_wire_size) + goto clear_counts; + + /* Add 24 bytes to size to account for CRC, preamble, and gap */ + avg_wire_size += 24; + + /* Don't starve jumbo frames */ + avg_wire_size = min(avg_wire_size, 3000); + + /* Give a little boost to mid-size frames */ + if ((avg_wire_size > 300) && (avg_wire_size < 1200)) + new_val = avg_wire_size / 3; + else + new_val = avg_wire_size / 2; + + /* when in itr mode 3 do not exceed 20K ints/sec */ + if (adapter->rx_itr_setting == 3 && new_val < 196) + new_val = 196; + +set_itr_val: + if (new_val != q_vector->itr_val) { + q_vector->itr_val = new_val; + q_vector->set_itr = 1; + } +clear_counts: + if (q_vector->rx_ring) { + q_vector->rx_ring->total_bytes = 0; + q_vector->rx_ring->total_packets = 0; + } + if (q_vector->tx_ring) { + q_vector->tx_ring->total_bytes = 0; + q_vector->tx_ring->total_packets = 0; + } +} + +/** + * igb_update_itr - update the dynamic ITR value based on statistics + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * this functionality is controlled by the InterruptThrottleRate module + * parameter (see igb_param.c) + * NOTE: These calculations are only valid when operating in a single- + * queue environment. + * @adapter: pointer to adapter + * @itr_setting: current q_vector->itr_val + * @packets: the number of packets during this measurement interval + * @bytes: the number of bytes during this measurement interval + **/ +static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting, + int packets, int bytes) +{ + unsigned int retval = itr_setting; + + if (packets == 0) + goto update_itr_done; + + switch (itr_setting) { + case lowest_latency: + /* handle TSO and jumbo frames */ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + retval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* this if handles the TSO accounting */ + if (bytes/packets > 8000) { + retval = bulk_latency; + } else if ((packets < 10) || ((bytes/packets) > 1200)) { + retval = bulk_latency; + } else if ((packets > 35)) { + retval = lowest_latency; + } + } else if (bytes/packets > 2000) { + retval = bulk_latency; + } else if (packets <= 2 && bytes < 512) { + retval = lowest_latency; + } + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + retval = low_latency; + } else if (bytes < 1500) { + retval = low_latency; + } + break; + } + +update_itr_done: + return retval; +} + +static void igb_set_itr(struct igb_adapter *adapter) +{ + struct igb_q_vector *q_vector = adapter->q_vector[0]; + u16 current_itr; + u32 new_itr = q_vector->itr_val; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + if (adapter->link_speed != SPEED_1000) { + current_itr = 0; + new_itr = 4000; + goto set_itr_now; + } + + adapter->rx_itr = igb_update_itr(adapter, + adapter->rx_itr, + q_vector->rx_ring->total_packets, + q_vector->rx_ring->total_bytes); + + adapter->tx_itr = igb_update_itr(adapter, + adapter->tx_itr, + q_vector->tx_ring->total_packets, + q_vector->tx_ring->total_bytes); + current_itr = max(adapter->rx_itr, adapter->tx_itr); + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency) + current_itr = low_latency; + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = 56; /* aka 70,000 ints/sec */ + break; + case low_latency: + new_itr = 196; /* aka 20,000 ints/sec */ + break; + case bulk_latency: + new_itr = 980; /* aka 4,000 ints/sec */ + break; + default: + break; + } + +set_itr_now: + q_vector->rx_ring->total_bytes = 0; + q_vector->rx_ring->total_packets = 0; + q_vector->tx_ring->total_bytes = 0; + q_vector->tx_ring->total_packets = 0; + + if (new_itr != q_vector->itr_val) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing */ + new_itr = new_itr > q_vector->itr_val ? + max((new_itr * q_vector->itr_val) / + (new_itr + (q_vector->itr_val >> 2)), + new_itr) : + new_itr; + /* Don't write the value here; it resets the adapter's + * internal timer, and causes us to delay far longer than + * we should between interrupts. Instead, we write the ITR + * value at the beginning of the next interrupt so the timing + * ends up being correct. + */ + q_vector->itr_val = new_itr; + q_vector->set_itr = 1; + } +} + +#define IGB_TX_FLAGS_CSUM 0x00000001 +#define IGB_TX_FLAGS_VLAN 0x00000002 +#define IGB_TX_FLAGS_TSO 0x00000004 +#define IGB_TX_FLAGS_IPV4 0x00000008 +#define IGB_TX_FLAGS_TSTAMP 0x00000010 +#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IGB_TX_FLAGS_VLAN_SHIFT 16 + +static inline int igb_tso_adv(struct igb_ring *tx_ring, + struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) +{ + struct e1000_adv_tx_context_desc *context_desc; + unsigned int i; + int err; + struct igb_buffer *buffer_info; + u32 info = 0, tu_cmd = 0; + u32 mss_l4len_idx; + u8 l4len; + + if (skb_header_cloned(skb)) { + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (err) + return err; + } + + l4len = tcp_hdrlen(skb); + *hdr_len += l4len; + + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + } else if (skb_is_gso_v6(skb)) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + } + + i = tx_ring->next_to_use; + + buffer_info = &tx_ring->buffer_info[i]; + context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i); + /* VLAN MACLEN IPLEN */ + if (tx_flags & IGB_TX_FLAGS_VLAN) + info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK); + info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); + *hdr_len += skb_network_offset(skb); + info |= skb_network_header_len(skb); + *hdr_len += skb_network_header_len(skb); + context_desc->vlan_macip_lens = cpu_to_le32(info); + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); + + if (skb->protocol == htons(ETH_P_IP)) + tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; + tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; + + context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); + + /* MSS L4LEN IDX */ + mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT); + mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); + + /* For 82575, context index must be unique per ring. */ + if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) + mss_l4len_idx |= tx_ring->reg_idx << 4; + + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); + context_desc->seqnum_seed = 0; + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + buffer_info->dma = 0; + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + return true; +} + +static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring, + struct sk_buff *skb, u32 tx_flags) +{ + struct e1000_adv_tx_context_desc *context_desc; + struct device *dev = tx_ring->dev; + struct igb_buffer *buffer_info; + u32 info = 0, tu_cmd = 0; + unsigned int i; + + if ((skb->ip_summed == CHECKSUM_PARTIAL) || + (tx_flags & IGB_TX_FLAGS_VLAN)) { + i = tx_ring->next_to_use; + buffer_info = &tx_ring->buffer_info[i]; + context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i); + + if (tx_flags & IGB_TX_FLAGS_VLAN) + info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK); + + info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); + if (skb->ip_summed == CHECKSUM_PARTIAL) + info |= skb_network_header_len(skb); + + context_desc->vlan_macip_lens = cpu_to_le32(info); + + tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + __be16 protocol; + + if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) { + const struct vlan_ethhdr *vhdr = + (const struct vlan_ethhdr*)skb->data; + + protocol = vhdr->h_vlan_encapsulated_proto; + } else { + protocol = skb->protocol; + } + + switch (protocol) { + case cpu_to_be16(ETH_P_IP): + tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; + else if (ip_hdr(skb)->protocol == IPPROTO_SCTP) + tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP; + break; + case cpu_to_be16(ETH_P_IPV6): + /* XXX what about other V6 headers?? */ + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; + else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP) + tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP; + break; + default: + if (unlikely(net_ratelimit())) + dev_warn(dev, + "partial checksum but proto=%x!\n", + skb->protocol); + break; + } + } + + context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); + context_desc->seqnum_seed = 0; + if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) + context_desc->mss_l4len_idx = + cpu_to_le32(tx_ring->reg_idx << 4); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + buffer_info->dma = 0; + + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; + + return true; + } + return false; +} + +#define IGB_MAX_TXD_PWR 16 +#define IGB_MAX_DATA_PER_TXD (1<dev; + unsigned int hlen = skb_headlen(skb); + unsigned int count = 0, i; + unsigned int f; + u16 gso_segs = skb_shinfo(skb)->gso_segs ?: 1; + + i = tx_ring->next_to_use; + + buffer_info = &tx_ring->buffer_info[i]; + BUG_ON(hlen >= IGB_MAX_DATA_PER_TXD); + buffer_info->length = hlen; + /* set time_stamp *before* dma to help avoid a possible race */ + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + buffer_info->dma = dma_map_single(dev, skb->data, hlen, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, buffer_info->dma)) + goto dma_error; + + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[f]; + unsigned int len = frag->size; + + count++; + i++; + if (i == tx_ring->count) + i = 0; + + buffer_info = &tx_ring->buffer_info[i]; + BUG_ON(len >= IGB_MAX_DATA_PER_TXD); + buffer_info->length = len; + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + buffer_info->mapped_as_page = true; + buffer_info->dma = dma_map_page(dev, + frag->page, + frag->page_offset, + len, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, buffer_info->dma)) + goto dma_error; + + } + + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[i].tx_flags = skb_shinfo(skb)->tx_flags; + /* multiply data chunks by size of headers */ + tx_ring->buffer_info[i].bytecount = ((gso_segs - 1) * hlen) + skb->len; + tx_ring->buffer_info[i].gso_segs = gso_segs; + tx_ring->buffer_info[first].next_to_watch = i; + + return ++count; + +dma_error: + dev_err(dev, "TX DMA map failed\n"); + + /* clear timestamp and dma mappings for failed buffer_info mapping */ + buffer_info->dma = 0; + buffer_info->time_stamp = 0; + buffer_info->length = 0; + buffer_info->next_to_watch = 0; + buffer_info->mapped_as_page = false; + + /* clear timestamp and dma mappings for remaining portion of packet */ + while (count--) { + if (i == 0) + i = tx_ring->count; + i--; + buffer_info = &tx_ring->buffer_info[i]; + igb_unmap_and_free_tx_resource(tx_ring, buffer_info); + } + + return 0; +} + +static inline void igb_tx_queue_adv(struct igb_ring *tx_ring, + u32 tx_flags, int count, u32 paylen, + u8 hdr_len) +{ + union e1000_adv_tx_desc *tx_desc; + struct igb_buffer *buffer_info; + u32 olinfo_status = 0, cmd_type_len; + unsigned int i = tx_ring->next_to_use; + + cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | + E1000_ADVTXD_DCMD_DEXT); + + if (tx_flags & IGB_TX_FLAGS_VLAN) + cmd_type_len |= E1000_ADVTXD_DCMD_VLE; + + if (tx_flags & IGB_TX_FLAGS_TSTAMP) + cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP; + + if (tx_flags & IGB_TX_FLAGS_TSO) { + cmd_type_len |= E1000_ADVTXD_DCMD_TSE; + + /* insert tcp checksum */ + olinfo_status |= E1000_TXD_POPTS_TXSM << 8; + + /* insert ip checksum */ + if (tx_flags & IGB_TX_FLAGS_IPV4) + olinfo_status |= E1000_TXD_POPTS_IXSM << 8; + + } else if (tx_flags & IGB_TX_FLAGS_CSUM) { + olinfo_status |= E1000_TXD_POPTS_TXSM << 8; + } + + if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) && + (tx_flags & (IGB_TX_FLAGS_CSUM | + IGB_TX_FLAGS_TSO | + IGB_TX_FLAGS_VLAN))) + olinfo_status |= tx_ring->reg_idx << 4; + + olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); + + do { + buffer_info = &tx_ring->buffer_info[i]; + tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); + tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type_len | buffer_info->length); + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); + count--; + i++; + if (i == tx_ring->count) + i = 0; + } while (count > 0); + + tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD); + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + + tx_ring->next_to_use = i; + writel(i, tx_ring->tail); + /* we need this if more than one processor can write to our tail + * at a time, it syncronizes IO on IA64/Altix systems */ + mmiowb(); +} + +static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size) +{ + struct net_device *netdev = tx_ring->netdev; + + netif_stop_subqueue(netdev, tx_ring->queue_index); + + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. */ + if (igb_desc_unused(tx_ring) < size) + return -EBUSY; + + /* A reprieve! */ + netif_wake_subqueue(netdev, tx_ring->queue_index); + + u64_stats_update_begin(&tx_ring->tx_syncp2); + tx_ring->tx_stats.restart_queue2++; + u64_stats_update_end(&tx_ring->tx_syncp2); + + return 0; +} + +static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size) +{ + if (igb_desc_unused(tx_ring) >= size) + return 0; + return __igb_maybe_stop_tx(tx_ring, size); +} + +netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, + struct igb_ring *tx_ring) +{ + int tso = 0, count; + u32 tx_flags = 0; + u16 first; + u8 hdr_len = 0; + + /* need: 1 descriptor per page, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for skb->data, + * + 1 desc for context descriptor, + * otherwise try next time */ + if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) { + /* this is a hard error */ + return NETDEV_TX_BUSY; + } + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= IGB_TX_FLAGS_TSTAMP; + } + + if (vlan_tx_tag_present(skb)) { + tx_flags |= IGB_TX_FLAGS_VLAN; + tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); + } + + if (skb->protocol == htons(ETH_P_IP)) + tx_flags |= IGB_TX_FLAGS_IPV4; + + first = tx_ring->next_to_use; + if (skb_is_gso(skb)) { + tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len); + + if (tso < 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + } + + if (tso) + tx_flags |= IGB_TX_FLAGS_TSO; + else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) && + (skb->ip_summed == CHECKSUM_PARTIAL)) + tx_flags |= IGB_TX_FLAGS_CSUM; + + /* + * count reflects descriptors mapped, if 0 or less then mapping error + * has occurred and we need to rewind the descriptor queue + */ + count = igb_tx_map_adv(tx_ring, skb, first); + if (!count) { + dev_kfree_skb_any(skb); + tx_ring->buffer_info[first].time_stamp = 0; + tx_ring->next_to_use = first; + return NETDEV_TX_OK; + } + + igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len); + + /* Make sure there is space in the ring for the next send. */ + igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4); + + return NETDEV_TX_OK; +} + +static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, + struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct igb_ring *tx_ring; + int r_idx = 0; + + if (test_bit(__IGB_DOWN, &adapter->state)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (skb->len <= 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1); + tx_ring = adapter->multi_tx_table[r_idx]; + + /* This goes back to the question of how to logically map a tx queue + * to a flow. Right now, performance is impacted slightly negatively + * if using multiple tx queues. If the stack breaks away from a + * single qdisc implementation, we can look at this again. */ + return igb_xmit_frame_ring_adv(skb, tx_ring); +} + +/** + * igb_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void igb_tx_timeout(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + + if (hw->mac.type == e1000_82580) + hw->dev_spec._82575.global_device_reset = true; + + schedule_work(&adapter->reset_task); + wr32(E1000_EICS, + (adapter->eims_enable_mask & ~adapter->eims_other)); +} + +static void igb_reset_task(struct work_struct *work) +{ + struct igb_adapter *adapter; + adapter = container_of(work, struct igb_adapter, reset_task); + + igb_dump(adapter); + netdev_err(adapter->netdev, "Reset adapter\n"); + igb_reinit_locked(adapter); +} + +/** + * igb_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: rtnl_link_stats64 pointer + * + **/ +static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + spin_lock(&adapter->stats64_lock); + igb_update_stats(adapter, &adapter->stats64); + memcpy(stats, &adapter->stats64, sizeof(*stats)); + spin_unlock(&adapter->stats64_lock); + + return stats; +} + +/** + * igb_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int igb_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + u32 rx_buffer_len, i; + + if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { + dev_err(&pdev->dev, "Invalid MTU setting\n"); + return -EINVAL; + } + + if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { + dev_err(&pdev->dev, "MTU > 9216 not supported.\n"); + return -EINVAL; + } + + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + msleep(1); + + /* igb_down has a dependency on max_frame_size */ + adapter->max_frame_size = max_frame; + + /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + * means we reserve 2 more, this pushes us to allocate from the next + * larger slab size. + * i.e. RXBUFFER_2048 --> size-4096 slab + */ + + if (adapter->hw.mac.type == e1000_82580) + max_frame += IGB_TS_HDR_LEN; + + if (max_frame <= IGB_RXBUFFER_1024) + rx_buffer_len = IGB_RXBUFFER_1024; + else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE) + rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + else + rx_buffer_len = IGB_RXBUFFER_128; + + if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN + IGB_TS_HDR_LEN) || + (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN)) + rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN; + + if ((adapter->hw.mac.type == e1000_82580) && + (rx_buffer_len == IGB_RXBUFFER_128)) + rx_buffer_len += IGB_RXBUFFER_64; + + if (netif_running(netdev)) + igb_down(adapter); + + dev_info(&pdev->dev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->rx_buffer_len = rx_buffer_len; + + if (netif_running(netdev)) + igb_up(adapter); + else + igb_reset(adapter); + + clear_bit(__IGB_RESETTING, &adapter->state); + + return 0; +} + +/** + * igb_update_stats - Update the board statistics counters + * @adapter: board private structure + **/ + +void igb_update_stats(struct igb_adapter *adapter, + struct rtnl_link_stats64 *net_stats) +{ + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + u32 reg, mpc; + u16 phy_tmp; + int i; + u64 bytes, packets; + unsigned int start; + u64 _bytes, _packets; + +#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF + + /* + * Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + bytes = 0; + packets = 0; + for (i = 0; i < adapter->num_rx_queues; i++) { + u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF; + struct igb_ring *ring = adapter->rx_ring[i]; + + ring->rx_stats.drops += rqdpc_tmp; + net_stats->rx_fifo_errors += rqdpc_tmp; + + do { + start = u64_stats_fetch_begin_bh(&ring->rx_syncp); + _bytes = ring->rx_stats.bytes; + _packets = ring->rx_stats.packets; + } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; + + bytes = 0; + packets = 0; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igb_ring *ring = adapter->tx_ring[i]; + do { + start = u64_stats_fetch_begin_bh(&ring->tx_syncp); + _bytes = ring->tx_stats.bytes; + _packets = ring->tx_stats.packets; + } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + + /* read stats registers */ + adapter->stats.crcerrs += rd32(E1000_CRCERRS); + adapter->stats.gprc += rd32(E1000_GPRC); + adapter->stats.gorc += rd32(E1000_GORCL); + rd32(E1000_GORCH); /* clear GORCL */ + adapter->stats.bprc += rd32(E1000_BPRC); + adapter->stats.mprc += rd32(E1000_MPRC); + adapter->stats.roc += rd32(E1000_ROC); + + adapter->stats.prc64 += rd32(E1000_PRC64); + adapter->stats.prc127 += rd32(E1000_PRC127); + adapter->stats.prc255 += rd32(E1000_PRC255); + adapter->stats.prc511 += rd32(E1000_PRC511); + adapter->stats.prc1023 += rd32(E1000_PRC1023); + adapter->stats.prc1522 += rd32(E1000_PRC1522); + adapter->stats.symerrs += rd32(E1000_SYMERRS); + adapter->stats.sec += rd32(E1000_SEC); + + mpc = rd32(E1000_MPC); + adapter->stats.mpc += mpc; + net_stats->rx_fifo_errors += mpc; + adapter->stats.scc += rd32(E1000_SCC); + adapter->stats.ecol += rd32(E1000_ECOL); + adapter->stats.mcc += rd32(E1000_MCC); + adapter->stats.latecol += rd32(E1000_LATECOL); + adapter->stats.dc += rd32(E1000_DC); + adapter->stats.rlec += rd32(E1000_RLEC); + adapter->stats.xonrxc += rd32(E1000_XONRXC); + adapter->stats.xontxc += rd32(E1000_XONTXC); + adapter->stats.xoffrxc += rd32(E1000_XOFFRXC); + adapter->stats.xofftxc += rd32(E1000_XOFFTXC); + adapter->stats.fcruc += rd32(E1000_FCRUC); + adapter->stats.gptc += rd32(E1000_GPTC); + adapter->stats.gotc += rd32(E1000_GOTCL); + rd32(E1000_GOTCH); /* clear GOTCL */ + adapter->stats.rnbc += rd32(E1000_RNBC); + adapter->stats.ruc += rd32(E1000_RUC); + adapter->stats.rfc += rd32(E1000_RFC); + adapter->stats.rjc += rd32(E1000_RJC); + adapter->stats.tor += rd32(E1000_TORH); + adapter->stats.tot += rd32(E1000_TOTH); + adapter->stats.tpr += rd32(E1000_TPR); + + adapter->stats.ptc64 += rd32(E1000_PTC64); + adapter->stats.ptc127 += rd32(E1000_PTC127); + adapter->stats.ptc255 += rd32(E1000_PTC255); + adapter->stats.ptc511 += rd32(E1000_PTC511); + adapter->stats.ptc1023 += rd32(E1000_PTC1023); + adapter->stats.ptc1522 += rd32(E1000_PTC1522); + + adapter->stats.mptc += rd32(E1000_MPTC); + adapter->stats.bptc += rd32(E1000_BPTC); + + adapter->stats.tpt += rd32(E1000_TPT); + adapter->stats.colc += rd32(E1000_COLC); + + adapter->stats.algnerrc += rd32(E1000_ALGNERRC); + /* read internal phy specific stats */ + reg = rd32(E1000_CTRL_EXT); + if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) { + adapter->stats.rxerrc += rd32(E1000_RXERRC); + adapter->stats.tncrs += rd32(E1000_TNCRS); + } + + adapter->stats.tsctc += rd32(E1000_TSCTC); + adapter->stats.tsctfc += rd32(E1000_TSCTFC); + + adapter->stats.iac += rd32(E1000_IAC); + adapter->stats.icrxoc += rd32(E1000_ICRXOC); + adapter->stats.icrxptc += rd32(E1000_ICRXPTC); + adapter->stats.icrxatc += rd32(E1000_ICRXATC); + adapter->stats.ictxptc += rd32(E1000_ICTXPTC); + adapter->stats.ictxatc += rd32(E1000_ICTXATC); + adapter->stats.ictxqec += rd32(E1000_ICTXQEC); + adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC); + adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); + + /* Fill out the OS statistics structure */ + net_stats->multicast = adapter->stats.mprc; + net_stats->collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC */ + net_stats->rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + net_stats->rx_length_errors = adapter->stats.ruc + + adapter->stats.roc; + net_stats->rx_crc_errors = adapter->stats.crcerrs; + net_stats->rx_frame_errors = adapter->stats.algnerrc; + net_stats->rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + net_stats->tx_errors = adapter->stats.ecol + + adapter->stats.latecol; + net_stats->tx_aborted_errors = adapter->stats.ecol; + net_stats->tx_window_errors = adapter->stats.latecol; + net_stats->tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Phy Stats */ + if (hw->phy.media_type == e1000_media_type_copper) { + if ((adapter->link_speed == SPEED_1000) && + (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { + phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; + adapter->phy_stats.idle_errors += phy_tmp; + } + } + + /* Management Stats */ + adapter->stats.mgptc += rd32(E1000_MGTPTC); + adapter->stats.mgprc += rd32(E1000_MGTPRC); + adapter->stats.mgpdc += rd32(E1000_MGTPDC); + + /* OS2BMC Stats */ + reg = rd32(E1000_MANC); + if (reg & E1000_MANC_EN_BMC2OS) { + adapter->stats.o2bgptc += rd32(E1000_O2BGPTC); + adapter->stats.o2bspc += rd32(E1000_O2BSPC); + adapter->stats.b2ospc += rd32(E1000_B2OSPC); + adapter->stats.b2ogprc += rd32(E1000_B2OGPRC); + } +} + +static irqreturn_t igb_msix_other(int irq, void *data) +{ + struct igb_adapter *adapter = data; + struct e1000_hw *hw = &adapter->hw; + u32 icr = rd32(E1000_ICR); + /* reading ICR causes bit 31 of EICR to be cleared */ + + if (icr & E1000_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & E1000_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + /* The DMA Out of Sync is also indication of a spoof event + * in IOV mode. Check the Wrong VM Behavior register to + * see if it is really a spoof event. */ + igb_check_wvbr(adapter); + } + + /* Check for a mailbox event */ + if (icr & E1000_ICR_VMMB) + igb_msg_task(adapter); + + if (icr & E1000_ICR_LSC) { + hw->mac.get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (adapter->vfs_allocated_count) + wr32(E1000_IMS, E1000_IMS_LSC | + E1000_IMS_VMMB | + E1000_IMS_DOUTSYNC); + else + wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC); + wr32(E1000_EIMS, adapter->eims_other); + + return IRQ_HANDLED; +} + +static void igb_write_itr(struct igb_q_vector *q_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + u32 itr_val = q_vector->itr_val & 0x7FFC; + + if (!q_vector->set_itr) + return; + + if (!itr_val) + itr_val = 0x4; + + if (adapter->hw.mac.type == e1000_82575) + itr_val |= itr_val << 16; + else + itr_val |= 0x8000000; + + writel(itr_val, q_vector->itr_register); + q_vector->set_itr = 0; +} + +static irqreturn_t igb_msix_ring(int irq, void *data) +{ + struct igb_q_vector *q_vector = data; + + /* Write the ITR value calculated from the previous interrupt. */ + igb_write_itr(q_vector); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_IGB_DCA +static void igb_update_dca(struct igb_q_vector *q_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; + int cpu = get_cpu(); + + if (q_vector->cpu == cpu) + goto out_no_update; + + if (q_vector->tx_ring) { + int q = q_vector->tx_ring->reg_idx; + u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q)); + if (hw->mac.type == e1000_82575) { + dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK; + dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); + } else { + dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576; + dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << + E1000_DCA_TXCTRL_CPUID_SHIFT; + } + dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN; + wr32(E1000_DCA_TXCTRL(q), dca_txctrl); + } + if (q_vector->rx_ring) { + int q = q_vector->rx_ring->reg_idx; + u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q)); + if (hw->mac.type == e1000_82575) { + dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK; + dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); + } else { + dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576; + dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << + E1000_DCA_RXCTRL_CPUID_SHIFT; + } + dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN; + dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN; + dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN; + wr32(E1000_DCA_RXCTRL(q), dca_rxctrl); + } + q_vector->cpu = cpu; +out_no_update: + put_cpu(); +} + +static void igb_setup_dca(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int i; + + if (!(adapter->flags & IGB_FLAG_DCA_ENABLED)) + return; + + /* Always use CB2 mode, difference is masked in the CB driver. */ + wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); + + for (i = 0; i < adapter->num_q_vectors; i++) { + adapter->q_vector[i]->cpu = -1; + igb_update_dca(adapter->q_vector[i]); + } +} + +static int __igb_notify_dca(struct device *dev, void *data) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + unsigned long event = *(unsigned long *)data; + + switch (event) { + case DCA_PROVIDER_ADD: + /* if already enabled, don't do it again */ + if (adapter->flags & IGB_FLAG_DCA_ENABLED) + break; + if (dca_add_requester(dev) == 0) { + adapter->flags |= IGB_FLAG_DCA_ENABLED; + dev_info(&pdev->dev, "DCA enabled\n"); + igb_setup_dca(adapter); + break; + } + /* Fall Through since DCA is disabled. */ + case DCA_PROVIDER_REMOVE: + if (adapter->flags & IGB_FLAG_DCA_ENABLED) { + /* without this a class_device is left + * hanging around in the sysfs model */ + dca_remove_requester(dev); + dev_info(&pdev->dev, "DCA disabled\n"); + adapter->flags &= ~IGB_FLAG_DCA_ENABLED; + wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); + } + break; + } + + return 0; +} + +static int igb_notify_dca(struct notifier_block *nb, unsigned long event, + void *p) +{ + int ret_val; + + ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event, + __igb_notify_dca); + + return ret_val ? NOTIFY_BAD : NOTIFY_DONE; +} +#endif /* CONFIG_IGB_DCA */ + +static void igb_ping_all_vfs(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ping; + int i; + + for (i = 0 ; i < adapter->vfs_allocated_count; i++) { + ping = E1000_PF_CONTROL_MSG; + if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS) + ping |= E1000_VT_MSGTYPE_CTS; + igb_write_mbx(hw, &ping, 1, i); + } +} + +static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vmolr = rd32(E1000_VMOLR(vf)); + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + + vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC | + IGB_VF_FLAG_MULTI_PROMISC); + vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); + + if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) { + vmolr |= E1000_VMOLR_MPME; + vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC; + *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST; + } else { + /* + * if we have hashes and we are clearing a multicast promisc + * flag we need to write the hashes to the MTA as this step + * was previously skipped + */ + if (vf_data->num_vf_mc_hashes > 30) { + vmolr |= E1000_VMOLR_MPME; + } else if (vf_data->num_vf_mc_hashes) { + int j; + vmolr |= E1000_VMOLR_ROMPE; + for (j = 0; j < vf_data->num_vf_mc_hashes; j++) + igb_mta_set(hw, vf_data->vf_mc_hashes[j]); + } + } + + wr32(E1000_VMOLR(vf), vmolr); + + /* there are flags left unprocessed, likely not supported */ + if (*msgbuf & E1000_VT_MSGINFO_MASK) + return -EINVAL; + + return 0; + +} + +static int igb_set_vf_multicasts(struct igb_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; + u16 *hash_list = (u16 *)&msgbuf[1]; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + int i; + + /* salt away the number of multicast addresses assigned + * to this VF for later use to restore when the PF multi cast + * list changes + */ + vf_data->num_vf_mc_hashes = n; + + /* only up to 30 hash values supported */ + if (n > 30) + n = 30; + + /* store the hashes for later use */ + for (i = 0; i < n; i++) + vf_data->vf_mc_hashes[i] = hash_list[i]; + + /* Flush and reset the mta with the new values */ + igb_set_rx_mode(adapter->netdev); + + return 0; +} + +static void igb_restore_vf_multicasts(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct vf_data_storage *vf_data; + int i, j; + + for (i = 0; i < adapter->vfs_allocated_count; i++) { + u32 vmolr = rd32(E1000_VMOLR(i)); + vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); + + vf_data = &adapter->vf_data[i]; + + if ((vf_data->num_vf_mc_hashes > 30) || + (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) { + vmolr |= E1000_VMOLR_MPME; + } else if (vf_data->num_vf_mc_hashes) { + vmolr |= E1000_VMOLR_ROMPE; + for (j = 0; j < vf_data->num_vf_mc_hashes; j++) + igb_mta_set(hw, vf_data->vf_mc_hashes[j]); + } + wr32(E1000_VMOLR(i), vmolr); + } +} + +static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + u32 pool_mask, reg, vid; + int i; + + pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf); + + /* Find the vlan filter for this id */ + for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { + reg = rd32(E1000_VLVF(i)); + + /* remove the vf from the pool */ + reg &= ~pool_mask; + + /* if pool is empty then remove entry from vfta */ + if (!(reg & E1000_VLVF_POOLSEL_MASK) && + (reg & E1000_VLVF_VLANID_ENABLE)) { + reg = 0; + vid = reg & E1000_VLVF_VLANID_MASK; + igb_vfta_set(hw, vid, false); + } + + wr32(E1000_VLVF(i), reg); + } + + adapter->vf_data[vf].vlans_enabled = 0; +} + +static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + u32 reg, i; + + /* The vlvf table only exists on 82576 hardware and newer */ + if (hw->mac.type < e1000_82576) + return -1; + + /* we only need to do this if VMDq is enabled */ + if (!adapter->vfs_allocated_count) + return -1; + + /* Find the vlan filter for this id */ + for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { + reg = rd32(E1000_VLVF(i)); + if ((reg & E1000_VLVF_VLANID_ENABLE) && + vid == (reg & E1000_VLVF_VLANID_MASK)) + break; + } + + if (add) { + if (i == E1000_VLVF_ARRAY_SIZE) { + /* Did not find a matching VLAN ID entry that was + * enabled. Search for a free filter entry, i.e. + * one without the enable bit set + */ + for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { + reg = rd32(E1000_VLVF(i)); + if (!(reg & E1000_VLVF_VLANID_ENABLE)) + break; + } + } + if (i < E1000_VLVF_ARRAY_SIZE) { + /* Found an enabled/available entry */ + reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf); + + /* if !enabled we need to set this up in vfta */ + if (!(reg & E1000_VLVF_VLANID_ENABLE)) { + /* add VID to filter table */ + igb_vfta_set(hw, vid, true); + reg |= E1000_VLVF_VLANID_ENABLE; + } + reg &= ~E1000_VLVF_VLANID_MASK; + reg |= vid; + wr32(E1000_VLVF(i), reg); + + /* do not modify RLPML for PF devices */ + if (vf >= adapter->vfs_allocated_count) + return 0; + + if (!adapter->vf_data[vf].vlans_enabled) { + u32 size; + reg = rd32(E1000_VMOLR(vf)); + size = reg & E1000_VMOLR_RLPML_MASK; + size += 4; + reg &= ~E1000_VMOLR_RLPML_MASK; + reg |= size; + wr32(E1000_VMOLR(vf), reg); + } + + adapter->vf_data[vf].vlans_enabled++; + return 0; + } + } else { + if (i < E1000_VLVF_ARRAY_SIZE) { + /* remove vf from the pool */ + reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf)); + /* if pool is empty then remove entry from vfta */ + if (!(reg & E1000_VLVF_POOLSEL_MASK)) { + reg = 0; + igb_vfta_set(hw, vid, false); + } + wr32(E1000_VLVF(i), reg); + + /* do not modify RLPML for PF devices */ + if (vf >= adapter->vfs_allocated_count) + return 0; + + adapter->vf_data[vf].vlans_enabled--; + if (!adapter->vf_data[vf].vlans_enabled) { + u32 size; + reg = rd32(E1000_VMOLR(vf)); + size = reg & E1000_VMOLR_RLPML_MASK; + size -= 4; + reg &= ~E1000_VMOLR_RLPML_MASK; + reg |= size; + wr32(E1000_VMOLR(vf), reg); + } + } + } + return 0; +} + +static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + + if (vid) + wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT)); + else + wr32(E1000_VMVIR(vf), 0); +} + +static int igb_ndo_set_vf_vlan(struct net_device *netdev, + int vf, u16 vlan, u8 qos) +{ + int err = 0; + struct igb_adapter *adapter = netdev_priv(netdev); + + if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7)) + return -EINVAL; + if (vlan || qos) { + err = igb_vlvf_set(adapter, vlan, !!vlan, vf); + if (err) + goto out; + igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); + igb_set_vmolr(adapter, vf, !vlan); + adapter->vf_data[vf].pf_vlan = vlan; + adapter->vf_data[vf].pf_qos = qos; + dev_info(&adapter->pdev->dev, + "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); + if (test_bit(__IGB_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, + "The VF VLAN has been set," + " but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, + "Bring the PF device up before" + " attempting to use the VF device.\n"); + } + } else { + igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan, + false, vf); + igb_set_vmvir(adapter, vlan, vf); + igb_set_vmolr(adapter, vf, true); + adapter->vf_data[vf].pf_vlan = 0; + adapter->vf_data[vf].pf_qos = 0; + } +out: + return err; +} + +static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) +{ + int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; + int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK); + + return igb_vlvf_set(adapter, vid, add, vf); +} + +static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) +{ + /* clear flags - except flag that indicates PF has set the MAC */ + adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC; + adapter->vf_data[vf].last_nack = jiffies; + + /* reset offloads to defaults */ + igb_set_vmolr(adapter, vf, true); + + /* reset vlans for device */ + igb_clear_vf_vfta(adapter, vf); + if (adapter->vf_data[vf].pf_vlan) + igb_ndo_set_vf_vlan(adapter->netdev, vf, + adapter->vf_data[vf].pf_vlan, + adapter->vf_data[vf].pf_qos); + else + igb_clear_vf_vfta(adapter, vf); + + /* reset multicast table array for vf */ + adapter->vf_data[vf].num_vf_mc_hashes = 0; + + /* Flush and reset the mta with the new values */ + igb_set_rx_mode(adapter->netdev); +} + +static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) +{ + unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; + + /* generate a new mac address as we were hotplug removed/added */ + if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC)) + random_ether_addr(vf_mac); + + /* process remaining reset events */ + igb_vf_reset(adapter, vf); +} + +static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; + int rar_entry = hw->mac.rar_entry_count - (vf + 1); + u32 reg, msgbuf[3]; + u8 *addr = (u8 *)(&msgbuf[1]); + + /* process all the same items cleared in a function level reset */ + igb_vf_reset(adapter, vf); + + /* set vf mac address */ + igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf); + + /* enable transmit and receive for vf */ + reg = rd32(E1000_VFTE); + wr32(E1000_VFTE, reg | (1 << vf)); + reg = rd32(E1000_VFRE); + wr32(E1000_VFRE, reg | (1 << vf)); + + adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS; + + /* reply to reset with ack and vf mac address */ + msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; + memcpy(addr, vf_mac, 6); + igb_write_mbx(hw, msgbuf, 3, vf); +} + +static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) +{ + /* + * The VF MAC Address is stored in a packed array of bytes + * starting at the second 32 bit word of the msg array + */ + unsigned char *addr = (char *)&msg[1]; + int err = -1; + + if (is_valid_ether_addr(addr)) + err = igb_set_vf_mac(adapter, vf, addr); + + return err; +} + +static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + u32 msg = E1000_VT_MSGTYPE_NACK; + + /* if device isn't clear to send it shouldn't be reading either */ + if (!(vf_data->flags & IGB_VF_FLAG_CTS) && + time_after(jiffies, vf_data->last_nack + (2 * HZ))) { + igb_write_mbx(hw, &msg, 1, vf); + vf_data->last_nack = jiffies; + } +} + +static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) +{ + struct pci_dev *pdev = adapter->pdev; + u32 msgbuf[E1000_VFMAILBOX_SIZE]; + struct e1000_hw *hw = &adapter->hw; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + s32 retval; + + retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf); + + if (retval) { + /* if receive failed revoke VF CTS stats and restart init */ + dev_err(&pdev->dev, "Error receiving message from VF\n"); + vf_data->flags &= ~IGB_VF_FLAG_CTS; + if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) + return; + goto out; + } + + /* this is a message we already processed, do nothing */ + if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) + return; + + /* + * until the vf completes a reset it should not be + * allowed to start any configuration. + */ + + if (msgbuf[0] == E1000_VF_RESET) { + igb_vf_reset_msg(adapter, vf); + return; + } + + if (!(vf_data->flags & IGB_VF_FLAG_CTS)) { + if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) + return; + retval = -1; + goto out; + } + + switch ((msgbuf[0] & 0xFFFF)) { + case E1000_VF_SET_MAC_ADDR: + retval = -EINVAL; + if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC)) + retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); + else + dev_warn(&pdev->dev, + "VF %d attempted to override administratively " + "set MAC address\nReload the VF driver to " + "resume operations\n", vf); + break; + case E1000_VF_SET_PROMISC: + retval = igb_set_vf_promisc(adapter, msgbuf, vf); + break; + case E1000_VF_SET_MULTICAST: + retval = igb_set_vf_multicasts(adapter, msgbuf, vf); + break; + case E1000_VF_SET_LPE: + retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf); + break; + case E1000_VF_SET_VLAN: + retval = -1; + if (vf_data->pf_vlan) + dev_warn(&pdev->dev, + "VF %d attempted to override administratively " + "set VLAN tag\nReload the VF driver to " + "resume operations\n", vf); + else + retval = igb_set_vf_vlan(adapter, msgbuf, vf); + break; + default: + dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); + retval = -1; + break; + } + + msgbuf[0] |= E1000_VT_MSGTYPE_CTS; +out: + /* notify the VF of the results of what it sent us */ + if (retval) + msgbuf[0] |= E1000_VT_MSGTYPE_NACK; + else + msgbuf[0] |= E1000_VT_MSGTYPE_ACK; + + igb_write_mbx(hw, msgbuf, 1, vf); +} + +static void igb_msg_task(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vf; + + for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { + /* process any reset requests */ + if (!igb_check_for_rst(hw, vf)) + igb_vf_reset_event(adapter, vf); + + /* process any messages pending */ + if (!igb_check_for_msg(hw, vf)) + igb_rcv_msg_from_vf(adapter, vf); + + /* process any acks */ + if (!igb_check_for_ack(hw, vf)) + igb_rcv_ack_from_vf(adapter, vf); + } +} + +/** + * igb_set_uta - Set unicast filter table address + * @adapter: board private structure + * + * The unicast table address is a register array of 32-bit registers. + * The table is meant to be used in a way similar to how the MTA is used + * however due to certain limitations in the hardware it is necessary to + * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous + * enable bit to allow vlan tag stripping when promiscuous mode is enabled + **/ +static void igb_set_uta(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int i; + + /* The UTA table only exists on 82576 hardware and newer */ + if (hw->mac.type < e1000_82576) + return; + + /* we only need to do this if VMDq is enabled */ + if (!adapter->vfs_allocated_count) + return; + + for (i = 0; i < hw->mac.uta_reg_count; i++) + array_wr32(E1000_UTA, i, ~0); +} + +/** + * igb_intr_msi - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t igb_intr_msi(int irq, void *data) +{ + struct igb_adapter *adapter = data; + struct igb_q_vector *q_vector = adapter->q_vector[0]; + struct e1000_hw *hw = &adapter->hw; + /* read ICR disables interrupts using IAM */ + u32 icr = rd32(E1000_ICR); + + igb_write_itr(q_vector); + + if (icr & E1000_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & E1000_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { + hw->mac.get_link_status = 1; + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * igb_intr - Legacy Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t igb_intr(int irq, void *data) +{ + struct igb_adapter *adapter = data; + struct igb_q_vector *q_vector = adapter->q_vector[0]; + struct e1000_hw *hw = &adapter->hw; + /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No + * need for the IMC write */ + u32 icr = rd32(E1000_ICR); + if (!icr) + return IRQ_NONE; /* Not our interrupt */ + + igb_write_itr(q_vector); + + /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is + * not set, then the adapter didn't send an interrupt */ + if (!(icr & E1000_ICR_INT_ASSERTED)) + return IRQ_NONE; + + if (icr & E1000_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & E1000_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { + hw->mac.get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; + + if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) || + (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) { + if (!adapter->msix_entries) + igb_set_itr(adapter); + else + igb_update_ring_itr(q_vector); + } + + if (!test_bit(__IGB_DOWN, &adapter->state)) { + if (adapter->msix_entries) + wr32(E1000_EIMS, q_vector->eims_value); + else + igb_irq_enable(adapter); + } +} + +/** + * igb_poll - NAPI Rx polling callback + * @napi: napi polling structure + * @budget: count of how many packets we should handle + **/ +static int igb_poll(struct napi_struct *napi, int budget) +{ + struct igb_q_vector *q_vector = container_of(napi, + struct igb_q_vector, + napi); + int tx_clean_complete = 1, work_done = 0; + +#ifdef CONFIG_IGB_DCA + if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) + igb_update_dca(q_vector); +#endif + if (q_vector->tx_ring) + tx_clean_complete = igb_clean_tx_irq(q_vector); + + if (q_vector->rx_ring) + igb_clean_rx_irq_adv(q_vector, &work_done, budget); + + if (!tx_clean_complete) + work_done = budget; + + /* If not enough Rx work done, exit the polling mode */ + if (work_done < budget) { + napi_complete(napi); + igb_ring_irq_enable(q_vector); + } + + return work_done; +} + +/** + * igb_systim_to_hwtstamp - convert system time value to hw timestamp + * @adapter: board private structure + * @shhwtstamps: timestamp structure to update + * @regval: unsigned 64bit system time value. + * + * We need to convert the system time value stored in the RX/TXSTMP registers + * into a hwtstamp which can be used by the upper level timestamping functions + */ +static void igb_systim_to_hwtstamp(struct igb_adapter *adapter, + struct skb_shared_hwtstamps *shhwtstamps, + u64 regval) +{ + u64 ns; + + /* + * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to + * 24 to match clock shift we setup earlier. + */ + if (adapter->hw.mac.type == e1000_82580) + regval <<= IGB_82580_TSYNC_SHIFT; + + ns = timecounter_cyc2time(&adapter->clock, regval); + timecompare_update(&adapter->compare, ns); + memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); + shhwtstamps->hwtstamp = ns_to_ktime(ns); + shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns); +} + +/** + * igb_tx_hwtstamp - utility function which checks for TX time stamp + * @q_vector: pointer to q_vector containing needed info + * @buffer: pointer to igb_buffer structure + * + * If we were asked to do hardware stamping and such a time stamp is + * available, then it must have been for this skb here because we only + * allow only one such packet into the queue. + */ +static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct igb_buffer *buffer_info) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; + struct skb_shared_hwtstamps shhwtstamps; + u64 regval; + + /* if skb does not support hw timestamp or TX stamp not valid exit */ + if (likely(!(buffer_info->tx_flags & SKBTX_HW_TSTAMP)) || + !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID)) + return; + + regval = rd32(E1000_TXSTMPL); + regval |= (u64)rd32(E1000_TXSTMPH) << 32; + + igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval); + skb_tstamp_tx(buffer_info->skb, &shhwtstamps); +} + +/** + * igb_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: pointer to q_vector containing needed info + * returns true if ring is completely cleaned + **/ +static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct igb_ring *tx_ring = q_vector->tx_ring; + struct net_device *netdev = tx_ring->netdev; + struct e1000_hw *hw = &adapter->hw; + struct igb_buffer *buffer_info; + union e1000_adv_tx_desc *tx_desc, *eop_desc; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int i, eop, count = 0; + bool cleaned = false; + + i = tx_ring->next_to_clean; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop); + + while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) && + (count < tx_ring->count)) { + rmb(); /* read buffer_info after eop_desc status */ + for (cleaned = false; !cleaned; count++) { + tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + cleaned = (i == eop); + + if (buffer_info->skb) { + total_bytes += buffer_info->bytecount; + /* gso_segs is currently only valid for tcp */ + total_packets += buffer_info->gso_segs; + igb_tx_hwtstamp(q_vector, buffer_info); + } + + igb_unmap_and_free_tx_resource(tx_ring, buffer_info); + tx_desc->wb.status = 0; + + i++; + if (i == tx_ring->count) + i = 0; + } + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop); + } + + tx_ring->next_to_clean = i; + + if (unlikely(count && + netif_carrier_ok(netdev) && + igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && + !(test_bit(__IGB_DOWN, &adapter->state))) { + netif_wake_subqueue(netdev, tx_ring->queue_index); + + u64_stats_update_begin(&tx_ring->tx_syncp); + tx_ring->tx_stats.restart_queue++; + u64_stats_update_end(&tx_ring->tx_syncp); + } + } + + if (tx_ring->detect_tx_hung) { + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i */ + tx_ring->detect_tx_hung = false; + if (tx_ring->buffer_info[i].time_stamp && + time_after(jiffies, tx_ring->buffer_info[i].time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { + + /* detected Tx unit hang */ + dev_err(tx_ring->dev, + "Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " desc.status <%x>\n", + tx_ring->queue_index, + readl(tx_ring->head), + readl(tx_ring->tail), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_ring->buffer_info[eop].time_stamp, + eop, + jiffies, + eop_desc->wb.status); + netif_stop_subqueue(netdev, tx_ring->queue_index); + } + } + tx_ring->total_bytes += total_bytes; + tx_ring->total_packets += total_packets; + u64_stats_update_begin(&tx_ring->tx_syncp); + tx_ring->tx_stats.bytes += total_bytes; + tx_ring->tx_stats.packets += total_packets; + u64_stats_update_end(&tx_ring->tx_syncp); + return count < tx_ring->count; +} + +static inline void igb_rx_checksum_adv(struct igb_ring *ring, + u32 status_err, struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + /* Ignore Checksum bit is set or checksum is disabled through ethtool */ + if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) || + (status_err & E1000_RXD_STAT_IXSM)) + return; + + /* TCP/UDP checksum error bit is set */ + if (status_err & + (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { + /* + * work around errata with sctp packets where the TCPE aka + * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) + * packets, (aka let the stack check the crc32c) + */ + if ((skb->len == 60) && + (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM)) { + u64_stats_update_begin(&ring->rx_syncp); + ring->rx_stats.csum_err++; + u64_stats_update_end(&ring->rx_syncp); + } + /* let the stack verify checksum errors */ + return; + } + /* It must be a TCP or UDP packet with a valid checksum */ + if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + dev_dbg(ring->dev, "cksum success: bits %08X\n", status_err); +} + +static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr, + struct sk_buff *skb) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; + u64 regval; + + /* + * If this bit is set, then the RX registers contain the time stamp. No + * other packet will be time stamped until we read these registers, so + * read the registers to make them available again. Because only one + * packet can be time stamped at a time, we know that the register + * values must belong to this one here and therefore we don't need to + * compare any of the additional attributes stored for it. + * + * If nothing went wrong, then it should have a shared tx_flags that we + * can turn into a skb_shared_hwtstamps. + */ + if (staterr & E1000_RXDADV_STAT_TSIP) { + u32 *stamp = (u32 *)skb->data; + regval = le32_to_cpu(*(stamp + 2)); + regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32; + skb_pull(skb, IGB_TS_HDR_LEN); + } else { + if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) + return; + + regval = rd32(E1000_RXSTMPL); + regval |= (u64)rd32(E1000_RXSTMPH) << 32; + } + + igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); +} +static inline u16 igb_get_hlen(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc) +{ + /* HW will not DMA in data larger than the given buffer, even if it + * parses the (NFS, of course) header to be larger. In that case, it + * fills the header buffer and spills the rest into the page. + */ + u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) & + E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; + if (hlen > rx_ring->rx_buffer_len) + hlen = rx_ring->rx_buffer_len; + return hlen; +} + +static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector, + int *work_done, int budget) +{ + struct igb_ring *rx_ring = q_vector->rx_ring; + struct net_device *netdev = rx_ring->netdev; + struct device *dev = rx_ring->dev; + union e1000_adv_rx_desc *rx_desc , *next_rxd; + struct igb_buffer *buffer_info , *next_buffer; + struct sk_buff *skb; + bool cleaned = false; + int cleaned_count = 0; + int current_node = numa_node_id(); + unsigned int total_bytes = 0, total_packets = 0; + unsigned int i; + u32 staterr; + u16 length; + + i = rx_ring->next_to_clean; + buffer_info = &rx_ring->buffer_info[i]; + rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + + while (staterr & E1000_RXD_STAT_DD) { + if (*work_done >= budget) + break; + (*work_done)++; + rmb(); /* read descriptor and rx_buffer_info after status DD */ + + skb = buffer_info->skb; + prefetch(skb->data - NET_IP_ALIGN); + buffer_info->skb = NULL; + + i++; + if (i == rx_ring->count) + i = 0; + + next_rxd = E1000_RX_DESC_ADV(*rx_ring, i); + prefetch(next_rxd); + next_buffer = &rx_ring->buffer_info[i]; + + length = le16_to_cpu(rx_desc->wb.upper.length); + cleaned = true; + cleaned_count++; + + if (buffer_info->dma) { + dma_unmap_single(dev, buffer_info->dma, + rx_ring->rx_buffer_len, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) { + skb_put(skb, length); + goto send_up; + } + skb_put(skb, igb_get_hlen(rx_ring, rx_desc)); + } + + if (length) { + dma_unmap_page(dev, buffer_info->page_dma, + PAGE_SIZE / 2, DMA_FROM_DEVICE); + buffer_info->page_dma = 0; + + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, + buffer_info->page, + buffer_info->page_offset, + length); + + if ((page_count(buffer_info->page) != 1) || + (page_to_nid(buffer_info->page) != current_node)) + buffer_info->page = NULL; + else + get_page(buffer_info->page); + + skb->len += length; + skb->data_len += length; + skb->truesize += length; + } + + if (!(staterr & E1000_RXD_STAT_EOP)) { + buffer_info->skb = next_buffer->skb; + buffer_info->dma = next_buffer->dma; + next_buffer->skb = skb; + next_buffer->dma = 0; + goto next_desc; + } +send_up: + if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { + dev_kfree_skb_irq(skb); + goto next_desc; + } + + if (staterr & (E1000_RXDADV_STAT_TSIP | E1000_RXDADV_STAT_TS)) + igb_rx_hwtstamp(q_vector, staterr, skb); + total_bytes += skb->len; + total_packets++; + + igb_rx_checksum_adv(rx_ring, staterr, skb); + + skb->protocol = eth_type_trans(skb, netdev); + skb_record_rx_queue(skb, rx_ring->queue_index); + + if (staterr & E1000_RXD_STAT_VP) { + u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); + + __vlan_hwaccel_put_tag(skb, vid); + } + napi_gro_receive(&q_vector->napi, skb); + +next_desc: + rx_desc->wb.upper.status_error = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IGB_RX_BUFFER_WRITE) { + igb_alloc_rx_buffers_adv(rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + } + + rx_ring->next_to_clean = i; + cleaned_count = igb_desc_unused(rx_ring); + + if (cleaned_count) + igb_alloc_rx_buffers_adv(rx_ring, cleaned_count); + + rx_ring->total_packets += total_packets; + rx_ring->total_bytes += total_bytes; + u64_stats_update_begin(&rx_ring->rx_syncp); + rx_ring->rx_stats.packets += total_packets; + rx_ring->rx_stats.bytes += total_bytes; + u64_stats_update_end(&rx_ring->rx_syncp); + return cleaned; +} + +/** + * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split + * @adapter: address of board private structure + **/ +void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count) +{ + struct net_device *netdev = rx_ring->netdev; + union e1000_adv_rx_desc *rx_desc; + struct igb_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + int bufsz; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + bufsz = rx_ring->rx_buffer_len; + + while (cleaned_count--) { + rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); + + if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) { + if (!buffer_info->page) { + buffer_info->page = netdev_alloc_page(netdev); + if (unlikely(!buffer_info->page)) { + u64_stats_update_begin(&rx_ring->rx_syncp); + rx_ring->rx_stats.alloc_failed++; + u64_stats_update_end(&rx_ring->rx_syncp); + goto no_buffers; + } + buffer_info->page_offset = 0; + } else { + buffer_info->page_offset ^= PAGE_SIZE / 2; + } + buffer_info->page_dma = + dma_map_page(rx_ring->dev, buffer_info->page, + buffer_info->page_offset, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + if (dma_mapping_error(rx_ring->dev, + buffer_info->page_dma)) { + buffer_info->page_dma = 0; + u64_stats_update_begin(&rx_ring->rx_syncp); + rx_ring->rx_stats.alloc_failed++; + u64_stats_update_end(&rx_ring->rx_syncp); + goto no_buffers; + } + } + + skb = buffer_info->skb; + if (!skb) { + skb = netdev_alloc_skb_ip_align(netdev, bufsz); + if (unlikely(!skb)) { + u64_stats_update_begin(&rx_ring->rx_syncp); + rx_ring->rx_stats.alloc_failed++; + u64_stats_update_end(&rx_ring->rx_syncp); + goto no_buffers; + } + + buffer_info->skb = skb; + } + if (!buffer_info->dma) { + buffer_info->dma = dma_map_single(rx_ring->dev, + skb->data, + bufsz, + DMA_FROM_DEVICE); + if (dma_mapping_error(rx_ring->dev, + buffer_info->dma)) { + buffer_info->dma = 0; + u64_stats_update_begin(&rx_ring->rx_syncp); + rx_ring->rx_stats.alloc_failed++; + u64_stats_update_end(&rx_ring->rx_syncp); + goto no_buffers; + } + } + /* Refresh the desc even if buffer_addrs didn't change because + * each write-back erases this info. */ + if (bufsz < IGB_RXBUFFER_1024) { + rx_desc->read.pkt_addr = + cpu_to_le64(buffer_info->page_dma); + rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); + } else { + rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma); + rx_desc->read.hdr_addr = 0; + } + + i++; + if (i == rx_ring->count) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + +no_buffers: + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + if (i == 0) + i = (rx_ring->count - 1); + else + i--; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + writel(i, rx_ring->tail); + } +} + +/** + * igb_mii_ioctl - + * @netdev: + * @ifreq: + * @cmd: + **/ +static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct mii_ioctl_data *data = if_mii(ifr); + + if (adapter->hw.phy.media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = adapter->hw.phy.addr; + break; + case SIOCGMIIREG: + if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, + &data->val_out)) + return -EIO; + break; + case SIOCSMIIREG: + default: + return -EOPNOTSUPP; + } + return 0; +} + +/** + * igb_hwtstamp_ioctl - control hardware time stamping + * @netdev: + * @ifreq: + * @cmd: + * + * Outgoing time stamping can be enabled and disabled. Play nice and + * disable it when requested, although it shouldn't case any overhead + * when no packet needs it. At most one packet in the queue may be + * marked for time stamping, otherwise it would be impossible to tell + * for sure to which packet the hardware time stamp belongs. + * + * Incoming time stamping has to be configured via the hardware + * filters. Not all combinations are supported, in particular event + * type has to be specified. Matching the kind of event packet is + * not supported, with the exception of "all V2 events regardless of + * level 2 or 4". + * + **/ +static int igb_hwtstamp_ioctl(struct net_device *netdev, + struct ifreq *ifr, int cmd) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct hwtstamp_config config; + u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; + u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; + u32 tsync_rx_cfg = 0; + bool is_l4 = false; + bool is_l2 = false; + u32 regval; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + /* reserved for future extensions */ + if (config.flags) + return -EINVAL; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + tsync_tx_ctl = 0; + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + tsync_rx_ctl = 0; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_ALL: + /* + * register TSYNCRXCFG must be set, therefore it is not + * possible to time stamp both Sync and Delay_Req messages + * => fall back to time stamping all packets + */ + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; + tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; + tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; + tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE; + is_l2 = true; + is_l4 = true; + config.rx_filter = HWTSTAMP_FILTER_SOME; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; + tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE; + is_l2 = true; + is_l4 = true; + config.rx_filter = HWTSTAMP_FILTER_SOME; + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2; + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + is_l2 = true; + break; + default: + return -ERANGE; + } + + if (hw->mac.type == e1000_82575) { + if (tsync_rx_ctl | tsync_tx_ctl) + return -EINVAL; + return 0; + } + + /* + * Per-packet timestamping only works if all packets are + * timestamped, so enable timestamping in all packets as + * long as one rx filter was configured. + */ + if ((hw->mac.type == e1000_82580) && tsync_rx_ctl) { + tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; + } + + /* enable/disable TX */ + regval = rd32(E1000_TSYNCTXCTL); + regval &= ~E1000_TSYNCTXCTL_ENABLED; + regval |= tsync_tx_ctl; + wr32(E1000_TSYNCTXCTL, regval); + + /* enable/disable RX */ + regval = rd32(E1000_TSYNCRXCTL); + regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK); + regval |= tsync_rx_ctl; + wr32(E1000_TSYNCRXCTL, regval); + + /* define which PTP packets are time stamped */ + wr32(E1000_TSYNCRXCFG, tsync_rx_cfg); + + /* define ethertype filter for timestamped packets */ + if (is_l2) + wr32(E1000_ETQF(3), + (E1000_ETQF_FILTER_ENABLE | /* enable filter */ + E1000_ETQF_1588 | /* enable timestamping */ + ETH_P_1588)); /* 1588 eth protocol type */ + else + wr32(E1000_ETQF(3), 0); + +#define PTP_PORT 319 + /* L4 Queue Filter[3]: filter by destination port and protocol */ + if (is_l4) { + u32 ftqf = (IPPROTO_UDP /* UDP */ + | E1000_FTQF_VF_BP /* VF not compared */ + | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */ + | E1000_FTQF_MASK); /* mask all inputs */ + ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */ + + wr32(E1000_IMIR(3), htons(PTP_PORT)); + wr32(E1000_IMIREXT(3), + (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP)); + if (hw->mac.type == e1000_82576) { + /* enable source port check */ + wr32(E1000_SPQF(3), htons(PTP_PORT)); + ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; + } + wr32(E1000_FTQF(3), ftqf); + } else { + wr32(E1000_FTQF(3), E1000_FTQF_MASK); + } + wrfl(); + + adapter->hwtstamp_config = config; + + /* clear TX/RX time stamp registers, just to be sure */ + regval = rd32(E1000_TXSTMPH); + regval = rd32(E1000_RXSTMPH); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +/** + * igb_ioctl - + * @netdev: + * @ifreq: + * @cmd: + **/ +static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return igb_mii_ioctl(netdev, ifr, cmd); + case SIOCSHWTSTAMP: + return igb_hwtstamp_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + u16 cap_offset; + + cap_offset = adapter->pdev->pcie_cap; + if (!cap_offset) + return -E1000_ERR_CONFIG; + + pci_read_config_word(adapter->pdev, cap_offset + reg, value); + + return 0; +} + +s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + u16 cap_offset; + + cap_offset = adapter->pdev->pcie_cap; + if (!cap_offset) + return -E1000_ERR_CONFIG; + + pci_write_config_word(adapter->pdev, cap_offset + reg, *value); + + return 0; +} + +static void igb_vlan_mode(struct net_device *netdev, u32 features) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, rctl; + + igb_irq_disable(adapter); + + if (features & NETIF_F_HW_VLAN_RX) { + /* enable VLAN tag insert/strip */ + ctrl = rd32(E1000_CTRL); + ctrl |= E1000_CTRL_VME; + wr32(E1000_CTRL, ctrl); + + /* Disable CFI check */ + rctl = rd32(E1000_RCTL); + rctl &= ~E1000_RCTL_CFIEN; + wr32(E1000_RCTL, rctl); + } else { + /* disable VLAN tag insert/strip */ + ctrl = rd32(E1000_CTRL); + ctrl &= ~E1000_CTRL_VME; + wr32(E1000_CTRL, ctrl); + } + + igb_rlpml_set(adapter); + + if (!test_bit(__IGB_DOWN, &adapter->state)) + igb_irq_enable(adapter); +} + +static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int pf_id = adapter->vfs_allocated_count; + + /* attempt to add filter to vlvf array */ + igb_vlvf_set(adapter, vid, true, pf_id); + + /* add the filter since PF can receive vlans w/o entry in vlvf */ + igb_vfta_set(hw, vid, true); + + set_bit(vid, adapter->active_vlans); +} + +static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int pf_id = adapter->vfs_allocated_count; + s32 err; + + igb_irq_disable(adapter); + + if (!test_bit(__IGB_DOWN, &adapter->state)) + igb_irq_enable(adapter); + + /* remove vlan from VLVF table array */ + err = igb_vlvf_set(adapter, vid, false, pf_id); + + /* if vid was not present in VLVF just remove it from table */ + if (err) + igb_vfta_set(hw, vid, false); + + clear_bit(vid, adapter->active_vlans); +} + +static void igb_restore_vlan(struct igb_adapter *adapter) +{ + u16 vid; + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + igb_vlan_rx_add_vid(adapter->netdev, vid); +} + +int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_mac_info *mac = &adapter->hw.mac; + + mac->autoneg = 0; + + /* Make sure dplx is at most 1 bit and lsb of speed is not set + * for the switch() below to work */ + if ((spd & 1) || (dplx & ~1)) + goto err_inval; + + /* Fiber NIC's only allow 1000 Gbps Full duplex */ + if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) && + spd != SPEED_1000 && + dplx != DUPLEX_FULL) + goto err_inval; + + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_10_HALF; + break; + case SPEED_10 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_10_FULL; + break; + case SPEED_100 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_100_HALF; + break; + case SPEED_100 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_100_FULL; + break; + case SPEED_1000 + DUPLEX_FULL: + mac->autoneg = 1; + adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + default: + goto err_inval; + } + return 0; + +err_inval: + dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n"); + return -EINVAL; +} + +static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, rctl, status; + u32 wufc = adapter->wol; +#ifdef CONFIG_PM + int retval = 0; +#endif + + netif_device_detach(netdev); + + if (netif_running(netdev)) + igb_close(netdev); + + igb_clear_interrupt_scheme(adapter); + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; +#endif + + status = rd32(E1000_STATUS); + if (status & E1000_STATUS_LU) + wufc &= ~E1000_WUFC_LNKC; + + if (wufc) { + igb_setup_rctl(adapter); + igb_set_rx_mode(netdev); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & E1000_WUFC_MC) { + rctl = rd32(E1000_RCTL); + rctl |= E1000_RCTL_MPE; + wr32(E1000_RCTL, rctl); + } + + ctrl = rd32(E1000_CTRL); + /* advertise wake from D3Cold */ + #define E1000_CTRL_ADVD3WUC 0x00100000 + /* phy power management enable */ + #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 + ctrl |= E1000_CTRL_ADVD3WUC; + wr32(E1000_CTRL, ctrl); + + /* Allow time for pending master requests to run */ + igb_disable_pcie_master(hw); + + wr32(E1000_WUC, E1000_WUC_PME_EN); + wr32(E1000_WUFC, wufc); + } else { + wr32(E1000_WUC, 0); + wr32(E1000_WUFC, 0); + } + + *enable_wake = wufc || adapter->en_mng_pt; + if (!*enable_wake) + igb_power_down_link(adapter); + else + igb_power_up_link(adapter); + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. */ + igb_release_hw_control(adapter); + + pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +static int igb_suspend(struct pci_dev *pdev, pm_message_t state) +{ + int retval; + bool wake; + + retval = __igb_shutdown(pdev, &wake); + if (retval) + return retval; + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + + return 0; +} + +static int igb_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, + "igb: Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (igb_init_interrupt_scheme(adapter)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + igb_reset(adapter); + + /* let the f/w know that the h/w is now under the control of the + * driver. */ + igb_get_hw_control(adapter); + + wr32(E1000_WUS, ~0); + + if (netif_running(netdev)) { + err = igb_open(netdev); + if (err) + return err; + } + + netif_device_attach(netdev); + + return 0; +} +#endif + +static void igb_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __igb_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void igb_netpoll(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int i; + + if (!adapter->msix_entries) { + struct igb_q_vector *q_vector = adapter->q_vector[0]; + igb_irq_disable(adapter); + napi_schedule(&q_vector->napi); + return; + } + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + wr32(E1000_EIMC, q_vector->eims_value); + napi_schedule(&q_vector->napi); + } +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + +/** + * igb_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + igb_down(adapter); + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * igb_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the igb_resume routine. + */ +static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + pci_ers_result_t result; + int err; + + if (pci_enable_device_mem(pdev)) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + igb_reset(adapter); + wr32(E1000_WUS, ~0); + result = PCI_ERS_RESULT_RECOVERED; + } + + err = pci_cleanup_aer_uncorrect_error_status(pdev); + if (err) { + dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status " + "failed 0x%0x\n", err); + /* non-fatal, continue */ + } + + return result; +} + +/** + * igb_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the igb_resume routine. + */ +static void igb_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) { + if (igb_up(adapter)) { + dev_err(&pdev->dev, "igb_up failed after reset\n"); + return; + } + } + + netif_device_attach(netdev); + + /* let the f/w know that the h/w is now under the control of the + * driver. */ + igb_get_hw_control(adapter); +} + +static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, + u8 qsel) +{ + u32 rar_low, rar_high; + struct e1000_hw *hw = &adapter->hw; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* Indicate to hardware the Address is Valid. */ + rar_high |= E1000_RAH_AV; + + if (hw->mac.type == e1000_82575) + rar_high |= E1000_RAH_POOL_1 * qsel; + else + rar_high |= E1000_RAH_POOL_1 << qsel; + + wr32(E1000_RAL(index), rar_low); + wrfl(); + wr32(E1000_RAH(index), rar_high); + wrfl(); +} + +static int igb_set_vf_mac(struct igb_adapter *adapter, + int vf, unsigned char *mac_addr) +{ + struct e1000_hw *hw = &adapter->hw; + /* VF MAC addresses start at end of receive addresses and moves + * torwards the first, as a result a collision should not be possible */ + int rar_entry = hw->mac.rar_entry_count - (vf + 1); + + memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN); + + igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf); + + return 0; +} + +static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count)) + return -EINVAL; + adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC; + dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); + dev_info(&adapter->pdev->dev, "Reload the VF driver to make this" + " change effective."); + if (test_bit(__IGB_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, "The VF MAC address has been set," + " but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, "Bring the PF device up before" + " attempting to use the VF device.\n"); + } + return igb_set_vf_mac(adapter, vf, mac); +} + +static int igb_link_mbps(int internal_link_speed) +{ + switch (internal_link_speed) { + case SPEED_100: + return 100; + case SPEED_1000: + return 1000; + default: + return 0; + } +} + +static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate, + int link_speed) +{ + int rf_dec, rf_int; + u32 bcnrc_val; + + if (tx_rate != 0) { + /* Calculate the rate factor values to set */ + rf_int = link_speed / tx_rate; + rf_dec = (link_speed - (rf_int * tx_rate)); + rf_dec = (rf_dec * (1<vf_rate_link_speed == 0) || + (adapter->hw.mac.type != e1000_82576)) + return; + + actual_link_speed = igb_link_mbps(adapter->link_speed); + if (actual_link_speed != adapter->vf_rate_link_speed) { + reset_rate = true; + adapter->vf_rate_link_speed = 0; + dev_info(&adapter->pdev->dev, + "Link speed has been changed. VF Transmit " + "rate is disabled\n"); + } + + for (i = 0; i < adapter->vfs_allocated_count; i++) { + if (reset_rate) + adapter->vf_data[i].tx_rate = 0; + + igb_set_vf_rate_limit(&adapter->hw, i, + adapter->vf_data[i].tx_rate, + actual_link_speed); + } +} + +static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int actual_link_speed; + + if (hw->mac.type != e1000_82576) + return -EOPNOTSUPP; + + actual_link_speed = igb_link_mbps(adapter->link_speed); + if ((vf >= adapter->vfs_allocated_count) || + (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) || + (tx_rate < 0) || (tx_rate > actual_link_speed)) + return -EINVAL; + + adapter->vf_rate_link_speed = actual_link_speed; + adapter->vf_data[vf].tx_rate = (u16)tx_rate; + igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed); + + return 0; +} + +static int igb_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + if (vf >= adapter->vfs_allocated_count) + return -EINVAL; + ivi->vf = vf; + memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN); + ivi->tx_rate = adapter->vf_data[vf].tx_rate; + ivi->vlan = adapter->vf_data[vf].pf_vlan; + ivi->qos = adapter->vf_data[vf].pf_qos; + return 0; +} + +static void igb_vmm_control(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 reg; + + switch (hw->mac.type) { + case e1000_82575: + default: + /* replication is not supported for 82575 */ + return; + case e1000_82576: + /* notify HW that the MAC is adding vlan tags */ + reg = rd32(E1000_DTXCTL); + reg |= E1000_DTXCTL_VLAN_ADDED; + wr32(E1000_DTXCTL, reg); + case e1000_82580: + /* enable replication vlan tag stripping */ + reg = rd32(E1000_RPLOLR); + reg |= E1000_RPLOLR_STRVLAN; + wr32(E1000_RPLOLR, reg); + case e1000_i350: + /* none of the above registers are supported by i350 */ + break; + } + + if (adapter->vfs_allocated_count) { + igb_vmdq_set_loopback_pf(hw, true); + igb_vmdq_set_replication_pf(hw, true); + igb_vmdq_set_anti_spoofing_pf(hw, true, + adapter->vfs_allocated_count); + } else { + igb_vmdq_set_loopback_pf(hw, false); + igb_vmdq_set_replication_pf(hw, false); + } +} + +/* igb_main.c */ diff --git a/drivers/net/ethernet/intel/igbvf/Makefile b/drivers/net/ethernet/intel/igbvf/Makefile new file mode 100644 index 0000000..0fa3db3 --- /dev/null +++ b/drivers/net/ethernet/intel/igbvf/Makefile @@ -0,0 +1,38 @@ +################################################################################ +# +# Intel(R) 82576 Virtual Function Linux driver +# Copyright(c) 2009 - 2010 Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: +# e1000-devel Mailing List +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +# +################################################################################ + +# +# Makefile for the Intel(R) 82576 VF ethernet driver +# + +obj-$(CONFIG_IGBVF) += igbvf.o + +igbvf-objs := vf.o \ + mbx.o \ + ethtool.o \ + netdev.o + diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h new file mode 100644 index 0000000..79f2604 --- /dev/null +++ b/drivers/net/ethernet/intel/igbvf/defines.h @@ -0,0 +1,125 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 1999 - 2010 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_DEFINES_H_ +#define _E1000_DEFINES_H_ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* IVAR valid bit */ +#define E1000_IVAR_VALID 0x80 + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ + +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +/* Device Control */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ + +#define MAX_JUMBO_FRAME_SIZE 0x3F00 + +/* 802.1q VLAN Packet Size */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ + +/* Error Codes */ +#define E1000_SUCCESS 0 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_MAC_INIT 5 +#define E1000_ERR_MBX 15 + +#ifndef ETH_ADDR_LEN +#define ETH_ADDR_LEN 6 +#endif + +/* SRRCTL bit definitions */ +#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 +#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 +#define E1000_SRRCTL_DROP_EN 0x80000000 + +#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 + +/* Additional Descriptor Control definitions */ +#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ +#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ + +/* Direct Cache Access (DCA) definitions */ +#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ + +#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ + +#endif /* _E1000_DEFINES_H_ */ diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c new file mode 100644 index 0000000..b0b14d6 --- /dev/null +++ b/drivers/net/ethernet/intel/igbvf/ethtool.c @@ -0,0 +1,534 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 2009 - 2010 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* ethtool support for igbvf */ + +#include +#include +#include +#include +#include + +#include "igbvf.h" +#include + + +struct igbvf_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; + int base_stat_offset; +}; + +#define IGBVF_STAT(current, base) \ + sizeof(((struct igbvf_adapter *)0)->current), \ + offsetof(struct igbvf_adapter, current), \ + offsetof(struct igbvf_adapter, base) + +static const struct igbvf_stats igbvf_gstrings_stats[] = { + { "rx_packets", IGBVF_STAT(stats.gprc, stats.base_gprc) }, + { "tx_packets", IGBVF_STAT(stats.gptc, stats.base_gptc) }, + { "rx_bytes", IGBVF_STAT(stats.gorc, stats.base_gorc) }, + { "tx_bytes", IGBVF_STAT(stats.gotc, stats.base_gotc) }, + { "multicast", IGBVF_STAT(stats.mprc, stats.base_mprc) }, + { "lbrx_bytes", IGBVF_STAT(stats.gorlbc, stats.base_gorlbc) }, + { "lbrx_packets", IGBVF_STAT(stats.gprlbc, stats.base_gprlbc) }, + { "tx_restart_queue", IGBVF_STAT(restart_queue, zero_base) }, + { "rx_long_byte_count", IGBVF_STAT(stats.gorc, stats.base_gorc) }, + { "rx_csum_offload_good", IGBVF_STAT(hw_csum_good, zero_base) }, + { "rx_csum_offload_errors", IGBVF_STAT(hw_csum_err, zero_base) }, + { "rx_header_split", IGBVF_STAT(rx_hdr_split, zero_base) }, + { "alloc_rx_buff_failed", IGBVF_STAT(alloc_rx_buff_failed, zero_base) }, +}; + +#define IGBVF_GLOBAL_STATS_LEN ARRAY_SIZE(igbvf_gstrings_stats) + +static const char igbvf_gstrings_test[][ETH_GSTRING_LEN] = { + "Link test (on/offline)" +}; + +#define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test) + +static int igbvf_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 status; + + ecmd->supported = SUPPORTED_1000baseT_Full; + + ecmd->advertising = ADVERTISED_1000baseT_Full; + + ecmd->port = -1; + ecmd->transceiver = XCVR_DUMMY1; + + status = er32(STATUS); + if (status & E1000_STATUS_LU) { + if (status & E1000_STATUS_SPEED_1000) + ethtool_cmd_speed_set(ecmd, SPEED_1000); + else if (status & E1000_STATUS_SPEED_100) + ethtool_cmd_speed_set(ecmd, SPEED_100); + else + ethtool_cmd_speed_set(ecmd, SPEED_10); + + if (status & E1000_STATUS_FD) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } else { + ethtool_cmd_speed_set(ecmd, -1); + ecmd->duplex = -1; + } + + ecmd->autoneg = AUTONEG_DISABLE; + + return 0; +} + +static int igbvf_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + return -EOPNOTSUPP; +} + +static void igbvf_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + return; +} + +static int igbvf_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + return -EOPNOTSUPP; +} + +static u32 igbvf_get_rx_csum(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + return !(adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED); +} + +static int igbvf_set_rx_csum(struct net_device *netdev, u32 data) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + if (data) + adapter->flags &= ~IGBVF_FLAG_RX_CSUM_DISABLED; + else + adapter->flags |= IGBVF_FLAG_RX_CSUM_DISABLED; + + return 0; +} + +static u32 igbvf_get_tx_csum(struct net_device *netdev) +{ + return (netdev->features & NETIF_F_IP_CSUM) != 0; +} + +static int igbvf_set_tx_csum(struct net_device *netdev, u32 data) +{ + if (data) + netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + else + netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + return 0; +} + +static int igbvf_set_tso(struct net_device *netdev, u32 data) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + if (data) { + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + } else { + netdev->features &= ~NETIF_F_TSO; + netdev->features &= ~NETIF_F_TSO6; + } + + dev_info(&adapter->pdev->dev, "TSO is %s\n", + data ? "Enabled" : "Disabled"); + return 0; +} + +static u32 igbvf_get_msglevel(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void igbvf_set_msglevel(struct net_device *netdev, u32 data) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int igbvf_get_regs_len(struct net_device *netdev) +{ +#define IGBVF_REGS_LEN 8 + return IGBVF_REGS_LEN * sizeof(u32); +} + +static void igbvf_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 *regs_buff = p; + + memset(p, 0, IGBVF_REGS_LEN * sizeof(u32)); + + regs->version = (1 << 24) | (adapter->pdev->revision << 16) | + adapter->pdev->device; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RDLEN(0)); + regs_buff[3] = er32(RDH(0)); + regs_buff[4] = er32(RDT(0)); + + regs_buff[5] = er32(TDLEN(0)); + regs_buff[6] = er32(TDH(0)); + regs_buff[7] = er32(TDT(0)); +} + +static int igbvf_get_eeprom_len(struct net_device *netdev) +{ + return 0; +} + +static int igbvf_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + return -EOPNOTSUPP; +} + +static int igbvf_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + return -EOPNOTSUPP; +} + +static void igbvf_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + char firmware_version[32] = "N/A"; + + strncpy(drvinfo->driver, igbvf_driver_name, 32); + strncpy(drvinfo->version, igbvf_driver_version, 32); + strncpy(drvinfo->fw_version, firmware_version, 32); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + drvinfo->regdump_len = igbvf_get_regs_len(netdev); + drvinfo->eedump_len = igbvf_get_eeprom_len(netdev); +} + +static void igbvf_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct igbvf_ring *tx_ring = adapter->tx_ring; + struct igbvf_ring *rx_ring = adapter->rx_ring; + + ring->rx_max_pending = IGBVF_MAX_RXD; + ring->tx_max_pending = IGBVF_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = rx_ring->count; + ring->tx_pending = tx_ring->count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int igbvf_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct igbvf_ring *temp_ring; + int err = 0; + u32 new_rx_count, new_tx_count; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_rx_count = max(ring->rx_pending, (u32)IGBVF_MIN_RXD); + new_rx_count = min(new_rx_count, (u32)IGBVF_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); + + new_tx_count = max(ring->tx_pending, (u32)IGBVF_MIN_TXD); + new_tx_count = min(new_tx_count, (u32)IGBVF_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring->count) && + (new_rx_count == adapter->rx_ring->count)) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) + msleep(1); + + if (!netif_running(adapter->netdev)) { + adapter->tx_ring->count = new_tx_count; + adapter->rx_ring->count = new_rx_count; + goto clear_reset; + } + + temp_ring = vmalloc(sizeof(struct igbvf_ring)); + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + + igbvf_down(adapter); + + /* + * We can't just free everything and then setup again, + * because the ISRs in MSI-X mode get passed pointers + * to the tx and rx ring structs. + */ + if (new_tx_count != adapter->tx_ring->count) { + memcpy(temp_ring, adapter->tx_ring, sizeof(struct igbvf_ring)); + + temp_ring->count = new_tx_count; + err = igbvf_setup_tx_resources(adapter, temp_ring); + if (err) + goto err_setup; + + igbvf_free_tx_resources(adapter->tx_ring); + + memcpy(adapter->tx_ring, temp_ring, sizeof(struct igbvf_ring)); + } + + if (new_rx_count != adapter->rx_ring->count) { + memcpy(temp_ring, adapter->rx_ring, sizeof(struct igbvf_ring)); + + temp_ring->count = new_rx_count; + err = igbvf_setup_rx_resources(adapter, temp_ring); + if (err) + goto err_setup; + + igbvf_free_rx_resources(adapter->rx_ring); + + memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring)); + } +err_setup: + igbvf_up(adapter); + vfree(temp_ring); +clear_reset: + clear_bit(__IGBVF_RESETTING, &adapter->state); + return err; +} + +static int igbvf_link_test(struct igbvf_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + *data = 0; + + hw->mac.ops.check_for_link(hw); + + if (!(er32(STATUS) & E1000_STATUS_LU)) + *data = 1; + + return *data; +} + +static void igbvf_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + set_bit(__IGBVF_TESTING, &adapter->state); + + /* + * Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (igbvf_link_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + clear_bit(__IGBVF_TESTING, &adapter->state); + msleep_interruptible(4 * 1000); +} + +static void igbvf_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + wol->supported = 0; + wol->wolopts = 0; +} + +static int igbvf_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + return -EOPNOTSUPP; +} + +static int igbvf_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + if (adapter->itr_setting <= 3) + ec->rx_coalesce_usecs = adapter->itr_setting; + else + ec->rx_coalesce_usecs = adapter->itr_setting >> 2; + + return 0; +} + +static int igbvf_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if ((ec->rx_coalesce_usecs > IGBVF_MAX_ITR_USECS) || + ((ec->rx_coalesce_usecs > 3) && + (ec->rx_coalesce_usecs < IGBVF_MIN_ITR_USECS)) || + (ec->rx_coalesce_usecs == 2)) + return -EINVAL; + + /* convert to rate of irq's per second */ + if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) { + adapter->itr = IGBVF_START_ITR; + adapter->itr_setting = ec->rx_coalesce_usecs; + } else { + adapter->itr = ec->rx_coalesce_usecs << 2; + adapter->itr_setting = adapter->itr; + } + + writel(adapter->itr, + hw->hw_addr + adapter->rx_ring[0].itr_register); + + return 0; +} + +static int igbvf_nway_reset(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) + igbvf_reinit_locked(adapter); + return 0; +} + + +static void igbvf_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, + u64 *data) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + int i; + + igbvf_update_stats(adapter); + for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) { + char *p = (char *)adapter + + igbvf_gstrings_stats[i].stat_offset; + char *b = (char *)adapter + + igbvf_gstrings_stats[i].base_stat_offset; + data[i] = ((igbvf_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) : + (*(u32 *)p - *(u32 *)b)); + } + +} + +static int igbvf_get_sset_count(struct net_device *dev, int stringset) +{ + switch(stringset) { + case ETH_SS_TEST: + return IGBVF_TEST_LEN; + case ETH_SS_STATS: + return IGBVF_GLOBAL_STATS_LEN; + default: + return -EINVAL; + } +} + +static void igbvf_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *igbvf_gstrings_test, sizeof(igbvf_gstrings_test)); + break; + case ETH_SS_STATS: + for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) { + memcpy(p, igbvf_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static const struct ethtool_ops igbvf_ethtool_ops = { + .get_settings = igbvf_get_settings, + .set_settings = igbvf_set_settings, + .get_drvinfo = igbvf_get_drvinfo, + .get_regs_len = igbvf_get_regs_len, + .get_regs = igbvf_get_regs, + .get_wol = igbvf_get_wol, + .set_wol = igbvf_set_wol, + .get_msglevel = igbvf_get_msglevel, + .set_msglevel = igbvf_set_msglevel, + .nway_reset = igbvf_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = igbvf_get_eeprom_len, + .get_eeprom = igbvf_get_eeprom, + .set_eeprom = igbvf_set_eeprom, + .get_ringparam = igbvf_get_ringparam, + .set_ringparam = igbvf_set_ringparam, + .get_pauseparam = igbvf_get_pauseparam, + .set_pauseparam = igbvf_set_pauseparam, + .get_rx_csum = igbvf_get_rx_csum, + .set_rx_csum = igbvf_set_rx_csum, + .get_tx_csum = igbvf_get_tx_csum, + .set_tx_csum = igbvf_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, + .get_tso = ethtool_op_get_tso, + .set_tso = igbvf_set_tso, + .self_test = igbvf_diag_test, + .get_sset_count = igbvf_get_sset_count, + .get_strings = igbvf_get_strings, + .get_ethtool_stats = igbvf_get_ethtool_stats, + .get_coalesce = igbvf_get_coalesce, + .set_coalesce = igbvf_set_coalesce, +}; + +void igbvf_set_ethtool_ops(struct net_device *netdev) +{ + /* have to "undeclare" const on this struct to remove warnings */ + SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igbvf_ethtool_ops); +} diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h new file mode 100644 index 0000000..fd4a7b7 --- /dev/null +++ b/drivers/net/ethernet/intel/igbvf/igbvf.h @@ -0,0 +1,326 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 2009 - 2010 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _IGBVF_H_ +#define _IGBVF_H_ + +#include +#include +#include +#include +#include + +#include "vf.h" + +/* Forward declarations */ +struct igbvf_info; +struct igbvf_adapter; + +/* Interrupt defines */ +#define IGBVF_START_ITR 648 /* ~6000 ints/sec */ + +/* Interrupt modes, as used by the IntMode parameter */ +#define IGBVF_INT_MODE_LEGACY 0 +#define IGBVF_INT_MODE_MSI 1 +#define IGBVF_INT_MODE_MSIX 2 + +/* Tx/Rx descriptor defines */ +#define IGBVF_DEFAULT_TXD 256 +#define IGBVF_MAX_TXD 4096 +#define IGBVF_MIN_TXD 80 + +#define IGBVF_DEFAULT_RXD 256 +#define IGBVF_MAX_RXD 4096 +#define IGBVF_MIN_RXD 80 + +#define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */ +#define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */ + +/* RX descriptor control thresholds. + * PTHRESH - MAC will consider prefetch if it has fewer than this number of + * descriptors available in its onboard memory. + * Setting this to 0 disables RX descriptor prefetch. + * HTHRESH - MAC will only prefetch if there are at least this many descriptors + * available in host memory. + * If PTHRESH is 0, this should also be 0. + * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back + * descriptors until either it has this many to write back, or the + * ITR timer expires. + */ +#define IGBVF_RX_PTHRESH 16 +#define IGBVF_RX_HTHRESH 8 +#define IGBVF_RX_WTHRESH 1 + +/* this is the size past which hardware will drop packets when setting LPE=0 */ +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 + +#define IGBVF_FC_PAUSE_TIME 0x0680 /* 858 usec */ + +/* How many Tx Descriptors do we need to call netif_wake_queue ? */ +#define IGBVF_TX_QUEUE_WAKE 32 +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IGBVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define AUTO_ALL_MODES 0 +#define IGBVF_EEPROM_APME 0x0400 + +#define IGBVF_MNG_VLAN_NONE (-1) + +/* Number of packet split data buffers (not including the header buffer) */ +#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) + +enum igbvf_boards { + board_vf, + board_i350_vf, +}; + +struct igbvf_queue_stats { + u64 packets; + u64 bytes; +}; + +/* + * wrappers around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct igbvf_buffer { + dma_addr_t dma; + struct sk_buff *skb; + union { + /* Tx */ + struct { + unsigned long time_stamp; + u16 length; + u16 next_to_watch; + u16 mapped_as_page; + }; + /* Rx */ + struct { + struct page *page; + u64 page_dma; + unsigned int page_offset; + }; + }; +}; + +union igbvf_desc { + union e1000_adv_rx_desc rx_desc; + union e1000_adv_tx_desc tx_desc; + struct e1000_adv_tx_context_desc tx_context_desc; +}; + +struct igbvf_ring { + struct igbvf_adapter *adapter; /* backlink */ + union igbvf_desc *desc; /* pointer to ring memory */ + dma_addr_t dma; /* phys address of ring */ + unsigned int size; /* length of ring in bytes */ + unsigned int count; /* number of desc. in ring */ + + u16 next_to_use; + u16 next_to_clean; + + u16 head; + u16 tail; + + /* array of buffer information structs */ + struct igbvf_buffer *buffer_info; + struct napi_struct napi; + + char name[IFNAMSIZ + 5]; + u32 eims_value; + u32 itr_val; + u16 itr_register; + int set_itr; + + struct sk_buff *rx_skb_top; + + struct igbvf_queue_stats stats; +}; + +/* board specific private data structure */ +struct igbvf_adapter { + struct timer_list watchdog_timer; + struct timer_list blink_timer; + + struct work_struct reset_task; + struct work_struct watchdog_task; + + const struct igbvf_info *ei; + + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u32 bd_number; + u32 rx_buffer_len; + u32 polling_interval; + u16 mng_vlan_id; + u16 link_speed; + u16 link_duplex; + + spinlock_t tx_queue_lock; /* prevent concurrent tail updates */ + + /* track device up/down/testing state */ + unsigned long state; + + /* Interrupt Throttle Rate */ + u32 itr; + u32 itr_setting; + u16 tx_itr; + u16 rx_itr; + + /* + * Tx + */ + struct igbvf_ring *tx_ring /* One per active queue */ + ____cacheline_aligned_in_smp; + + unsigned int restart_queue; + u32 txd_cmd; + + u32 tx_int_delay; + u32 tx_abs_int_delay; + + unsigned int total_tx_bytes; + unsigned int total_tx_packets; + unsigned int total_rx_bytes; + unsigned int total_rx_packets; + + /* Tx stats */ + u32 tx_timeout_count; + u32 tx_fifo_head; + u32 tx_head_addr; + u32 tx_fifo_size; + u32 tx_dma_failed; + + /* + * Rx + */ + struct igbvf_ring *rx_ring; + + u32 rx_int_delay; + u32 rx_abs_int_delay; + + /* Rx stats */ + u64 hw_csum_err; + u64 hw_csum_good; + u64 rx_hdr_split; + u32 alloc_rx_buff_failed; + u32 rx_dma_failed; + + unsigned int rx_ps_hdr_size; + u32 max_frame_size; + u32 min_frame_size; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + struct net_device_stats net_stats; + spinlock_t stats_lock; /* prevent concurrent stats updates */ + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + + /* The VF counters don't clear on read so we have to get a base + * count on driver start up and always subtract that base on + * on the first update, thus the flag.. + */ + struct e1000_vf_stats stats; + u64 zero_base; + + struct igbvf_ring test_tx_ring; + struct igbvf_ring test_rx_ring; + u32 test_icr; + + u32 msg_enable; + struct msix_entry *msix_entries; + int int_mode; + u32 eims_enable_mask; + u32 eims_other; + u32 int_counter0; + u32 int_counter1; + + u32 eeprom_wol; + u32 wol; + u32 pba; + + bool fc_autoneg; + + unsigned long led_status; + + unsigned int flags; + unsigned long last_reset; +}; + +struct igbvf_info { + enum e1000_mac_type mac; + unsigned int flags; + u32 pba; + void (*init_ops)(struct e1000_hw *); + s32 (*get_variants)(struct igbvf_adapter *); +}; + +/* hardware capability, feature, and workaround flags */ +#define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0) + +#define IGBVF_RX_DESC_ADV(R, i) \ + (&((((R).desc))[i].rx_desc)) +#define IGBVF_TX_DESC_ADV(R, i) \ + (&((((R).desc))[i].tx_desc)) +#define IGBVF_TX_CTXTDESC_ADV(R, i) \ + (&((((R).desc))[i].tx_context_desc)) + +enum igbvf_state_t { + __IGBVF_TESTING, + __IGBVF_RESETTING, + __IGBVF_DOWN +}; + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +extern char igbvf_driver_name[]; +extern const char igbvf_driver_version[]; + +extern void igbvf_check_options(struct igbvf_adapter *); +extern void igbvf_set_ethtool_ops(struct net_device *); + +extern int igbvf_up(struct igbvf_adapter *); +extern void igbvf_down(struct igbvf_adapter *); +extern void igbvf_reinit_locked(struct igbvf_adapter *); +extern int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *); +extern int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *); +extern void igbvf_free_rx_resources(struct igbvf_ring *); +extern void igbvf_free_tx_resources(struct igbvf_ring *); +extern void igbvf_update_stats(struct igbvf_adapter *); + +extern unsigned int copybreak; + +#endif /* _IGBVF_H_ */ diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c new file mode 100644 index 0000000..3d6f4cc --- /dev/null +++ b/drivers/net/ethernet/intel/igbvf/mbx.c @@ -0,0 +1,350 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 2009 - 2010 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "mbx.h" + +/** + * e1000_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * + * returns SUCCESS if it successfully received a message notification + **/ +static s32 e1000_poll_for_msg(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!mbx->ops.check_for_msg) + goto out; + + while (countdown && mbx->ops.check_for_msg(hw)) { + countdown--; + udelay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; +} + +/** + * e1000_poll_for_ack - Wait for message acknowledgement + * @hw: pointer to the HW structure + * + * returns SUCCESS if it successfully received a message acknowledgement + **/ +static s32 e1000_poll_for_ack(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!mbx->ops.check_for_ack) + goto out; + + while (countdown && mbx->ops.check_for_ack(hw)) { + countdown--; + udelay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; +} + +/** + * e1000_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +static s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (!mbx->ops.read) + goto out; + + ret_val = e1000_poll_for_msg(hw); + + /* if ack received read message, otherwise we timed out */ + if (!ret_val) + ret_val = mbx->ops.read(hw, msg, size); +out: + return ret_val; +} + +/** + * e1000_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +static s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + /* exit if we either can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) + goto out; + + /* send msg*/ + ret_val = mbx->ops.write(hw, msg, size); + + /* if msg sent wait until we receive an ack */ + if (!ret_val) + ret_val = e1000_poll_for_ack(hw); +out: + return ret_val; +} + +/** + * e1000_read_v2p_mailbox - read v2p mailbox + * @hw: pointer to the HW structure + * + * This function is used to read the v2p mailbox without losing the read to + * clear status bits. + **/ +static u32 e1000_read_v2p_mailbox(struct e1000_hw *hw) +{ + u32 v2p_mailbox = er32(V2PMAILBOX(0)); + + v2p_mailbox |= hw->dev_spec.vf.v2p_mailbox; + hw->dev_spec.vf.v2p_mailbox |= v2p_mailbox & E1000_V2PMAILBOX_R2C_BITS; + + return v2p_mailbox; +} + +/** + * e1000_check_for_bit_vf - Determine if a status bit was set + * @hw: pointer to the HW structure + * @mask: bitmask for bits to be tested and cleared + * + * This function is used to check for the read to clear bits within + * the V2P mailbox. + **/ +static s32 e1000_check_for_bit_vf(struct e1000_hw *hw, u32 mask) +{ + u32 v2p_mailbox = e1000_read_v2p_mailbox(hw); + s32 ret_val = -E1000_ERR_MBX; + + if (v2p_mailbox & mask) + ret_val = E1000_SUCCESS; + + hw->dev_spec.vf.v2p_mailbox &= ~mask; + + return ret_val; +} + +/** + * e1000_check_for_msg_vf - checks to see if the PF has sent mail + * @hw: pointer to the HW structure + * + * returns SUCCESS if the PF has set the Status bit or else ERR_MBX + **/ +static s32 e1000_check_for_msg_vf(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_MBX; + + if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFSTS)) { + ret_val = E1000_SUCCESS; + hw->mbx.stats.reqs++; + } + + return ret_val; +} + +/** + * e1000_check_for_ack_vf - checks to see if the PF has ACK'd + * @hw: pointer to the HW structure + * + * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX + **/ +static s32 e1000_check_for_ack_vf(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_MBX; + + if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFACK)) { + ret_val = E1000_SUCCESS; + hw->mbx.stats.acks++; + } + + return ret_val; +} + +/** + * e1000_check_for_rst_vf - checks to see if the PF has reset + * @hw: pointer to the HW structure + * + * returns true if the PF has set the reset done bit or else false + **/ +static s32 e1000_check_for_rst_vf(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_MBX; + + if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD | + E1000_V2PMAILBOX_RSTI))) { + ret_val = E1000_SUCCESS; + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * e1000_obtain_mbx_lock_vf - obtain mailbox lock + * @hw: pointer to the HW structure + * + * return SUCCESS if we obtained the mailbox lock + **/ +static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_MBX; + + /* Take ownership of the buffer */ + ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU); + + /* reserve mailbox for vf use */ + if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) + ret_val = E1000_SUCCESS; + + return ret_val; +} + +/** + * e1000_write_mbx_vf - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size) +{ + s32 err; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + err = e1000_obtain_mbx_lock_vf(hw); + if (err) + goto out_no_write; + + /* flush any ack or msg as we are going to overwrite mailbox */ + e1000_check_for_ack_vf(hw); + e1000_check_for_msg_vf(hw); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + array_ew32(VMBMEM(0), i, msg[i]); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + + /* Drop VFU and interrupt the PF to tell it a message has been sent */ + ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_REQ); + +out_no_write: + return err; +} + +/** + * e1000_read_mbx_vf - Reads a message from the inbox intended for vf + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns SUCCESS if it successfuly read message from buffer + **/ +static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size) +{ + s32 err; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + err = e1000_obtain_mbx_lock_vf(hw); + if (err) + goto out_no_read; + + /* copy the message from the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = array_er32(VMBMEM(0), i); + + /* Acknowledge receipt and release mailbox, then we're done */ + ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return err; +} + +/** + * e1000_init_mbx_params_vf - set initial values for vf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for vf mailbox + */ +s32 e1000_init_mbx_params_vf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + + /* start mailbox as timed out and let the reset_hw call set the timeout + * value to being communications */ + mbx->timeout = 0; + mbx->usec_delay = E1000_VF_MBX_INIT_DELAY; + + mbx->size = E1000_VFMAILBOX_SIZE; + + mbx->ops.read = e1000_read_mbx_vf; + mbx->ops.write = e1000_write_mbx_vf; + mbx->ops.read_posted = e1000_read_posted_mbx; + mbx->ops.write_posted = e1000_write_posted_mbx; + mbx->ops.check_for_msg = e1000_check_for_msg_vf; + mbx->ops.check_for_ack = e1000_check_for_ack_vf; + mbx->ops.check_for_rst = e1000_check_for_rst_vf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + + return E1000_SUCCESS; +} + diff --git a/drivers/net/ethernet/intel/igbvf/mbx.h b/drivers/net/ethernet/intel/igbvf/mbx.h new file mode 100644 index 0000000..c2883c4 --- /dev/null +++ b/drivers/net/ethernet/intel/igbvf/mbx.h @@ -0,0 +1,75 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 1999 - 2010 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_MBX_H_ +#define _E1000_MBX_H_ + +#include "vf.h" + +#define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ +#define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */ +#define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ +#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ +#define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */ +#define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ +#define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ + +#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ + +/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is E1000_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +#define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with + * this are the ACK */ +#define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with + * this are the NACK */ +#define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still + clear to send requests */ + +/* We have a total wait time of 1s for vf mailbox posted messages */ +#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* retry count for mailbox timeout */ +#define E1000_VF_MBX_INIT_DELAY 500 /* usec delay between retries */ + +#define E1000_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for exra info for certain messages */ +#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) + +#define E1000_VF_RESET 0x01 /* VF requests reset */ +#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ +#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ + +#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ + +void e1000_init_mbx_ops_generic(struct e1000_hw *hw); +s32 e1000_init_mbx_params_vf(struct e1000_hw *); + +#endif /* _E1000_MBX_H_ */ diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c new file mode 100644 index 0000000..40ed066 --- /dev/null +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -0,0 +1,2859 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 2009 - 2010 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "igbvf.h" + +#define DRV_VERSION "2.0.0-k" +char igbvf_driver_name[] = "igbvf"; +const char igbvf_driver_version[] = DRV_VERSION; +static const char igbvf_driver_string[] = + "Intel(R) Virtual Function Network Driver"; +static const char igbvf_copyright[] = + "Copyright (c) 2009 - 2010 Intel Corporation."; + +static int igbvf_poll(struct napi_struct *napi, int budget); +static void igbvf_reset(struct igbvf_adapter *); +static void igbvf_set_interrupt_capability(struct igbvf_adapter *); +static void igbvf_reset_interrupt_capability(struct igbvf_adapter *); + +static struct igbvf_info igbvf_vf_info = { + .mac = e1000_vfadapt, + .flags = 0, + .pba = 10, + .init_ops = e1000_init_function_pointers_vf, +}; + +static struct igbvf_info igbvf_i350_vf_info = { + .mac = e1000_vfadapt_i350, + .flags = 0, + .pba = 10, + .init_ops = e1000_init_function_pointers_vf, +}; + +static const struct igbvf_info *igbvf_info_tbl[] = { + [board_vf] = &igbvf_vf_info, + [board_i350_vf] = &igbvf_i350_vf_info, +}; + +/** + * igbvf_desc_unused - calculate if we have unused descriptors + **/ +static int igbvf_desc_unused(struct igbvf_ring *ring) +{ + if (ring->next_to_clean > ring->next_to_use) + return ring->next_to_clean - ring->next_to_use - 1; + + return ring->count + ring->next_to_clean - ring->next_to_use - 1; +} + +/** + * igbvf_receive_skb - helper function to handle Rx indications + * @adapter: board private structure + * @status: descriptor status field as written by hardware + * @vlan: descriptor vlan field as written by hardware (no le/be conversion) + * @skb: pointer to sk_buff to be indicated to stack + **/ +static void igbvf_receive_skb(struct igbvf_adapter *adapter, + struct net_device *netdev, + struct sk_buff *skb, + u32 status, u16 vlan) +{ + if (status & E1000_RXD_STAT_VP) { + u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; + + __vlan_hwaccel_put_tag(skb, vid); + } + netif_receive_skb(skb); +} + +static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, + u32 status_err, struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + /* Ignore Checksum bit is set or checksum is disabled through ethtool */ + if ((status_err & E1000_RXD_STAT_IXSM) || + (adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED)) + return; + + /* TCP/UDP checksum error bit is set */ + if (status_err & + (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { + /* let the stack verify checksum errors */ + adapter->hw_csum_err++; + return; + } + + /* It must be a TCP or UDP packet with a valid checksum */ + if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + adapter->hw_csum_good++; +} + +/** + * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split + * @rx_ring: address of ring structure to repopulate + * @cleaned_count: number of buffers to repopulate + **/ +static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, + int cleaned_count) +{ + struct igbvf_adapter *adapter = rx_ring->adapter; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + union e1000_adv_rx_desc *rx_desc; + struct igbvf_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + int bufsz; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + if (adapter->rx_ps_hdr_size) + bufsz = adapter->rx_ps_hdr_size; + else + bufsz = adapter->rx_buffer_len; + + while (cleaned_count--) { + rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); + + if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) { + if (!buffer_info->page) { + buffer_info->page = alloc_page(GFP_ATOMIC); + if (!buffer_info->page) { + adapter->alloc_rx_buff_failed++; + goto no_buffers; + } + buffer_info->page_offset = 0; + } else { + buffer_info->page_offset ^= PAGE_SIZE / 2; + } + buffer_info->page_dma = + dma_map_page(&pdev->dev, buffer_info->page, + buffer_info->page_offset, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + } + + if (!buffer_info->skb) { + skb = netdev_alloc_skb_ip_align(netdev, bufsz); + if (!skb) { + adapter->alloc_rx_buff_failed++; + goto no_buffers; + } + + buffer_info->skb = skb; + buffer_info->dma = dma_map_single(&pdev->dev, skb->data, + bufsz, + DMA_FROM_DEVICE); + } + /* Refresh the desc even if buffer_addrs didn't change because + * each write-back erases this info. */ + if (adapter->rx_ps_hdr_size) { + rx_desc->read.pkt_addr = + cpu_to_le64(buffer_info->page_dma); + rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); + } else { + rx_desc->read.pkt_addr = + cpu_to_le64(buffer_info->dma); + rx_desc->read.hdr_addr = 0; + } + + i++; + if (i == rx_ring->count) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + +no_buffers: + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + if (i == 0) + i = (rx_ring->count - 1); + else + i--; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + writel(i, adapter->hw.hw_addr + rx_ring->tail); + } +} + +/** + * igbvf_clean_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, + int *work_done, int work_to_do) +{ + struct igbvf_ring *rx_ring = adapter->rx_ring; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + union e1000_adv_rx_desc *rx_desc, *next_rxd; + struct igbvf_buffer *buffer_info, *next_buffer; + struct sk_buff *skb; + bool cleaned = false; + int cleaned_count = 0; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int i; + u32 length, hlen, staterr; + + i = rx_ring->next_to_clean; + rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + + while (staterr & E1000_RXD_STAT_DD) { + if (*work_done >= work_to_do) + break; + (*work_done)++; + rmb(); /* read descriptor and rx_buffer_info after status DD */ + + buffer_info = &rx_ring->buffer_info[i]; + + /* HW will not DMA in data larger than the given buffer, even + * if it parses the (NFS, of course) header to be larger. In + * that case, it fills the header buffer and spills the rest + * into the page. + */ + hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) & + E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; + if (hlen > adapter->rx_ps_hdr_size) + hlen = adapter->rx_ps_hdr_size; + + length = le16_to_cpu(rx_desc->wb.upper.length); + cleaned = true; + cleaned_count++; + + skb = buffer_info->skb; + prefetch(skb->data - NET_IP_ALIGN); + buffer_info->skb = NULL; + if (!adapter->rx_ps_hdr_size) { + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + skb_put(skb, length); + goto send_up; + } + + if (!skb_shinfo(skb)->nr_frags) { + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_ps_hdr_size, + DMA_FROM_DEVICE); + skb_put(skb, hlen); + } + + if (length) { + dma_unmap_page(&pdev->dev, buffer_info->page_dma, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + buffer_info->page_dma = 0; + + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, + buffer_info->page, + buffer_info->page_offset, + length); + + if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || + (page_count(buffer_info->page) != 1)) + buffer_info->page = NULL; + else + get_page(buffer_info->page); + + skb->len += length; + skb->data_len += length; + skb->truesize += length; + } +send_up: + i++; + if (i == rx_ring->count) + i = 0; + next_rxd = IGBVF_RX_DESC_ADV(*rx_ring, i); + prefetch(next_rxd); + next_buffer = &rx_ring->buffer_info[i]; + + if (!(staterr & E1000_RXD_STAT_EOP)) { + buffer_info->skb = next_buffer->skb; + buffer_info->dma = next_buffer->dma; + next_buffer->skb = skb; + next_buffer->dma = 0; + goto next_desc; + } + + if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { + dev_kfree_skb_irq(skb); + goto next_desc; + } + + total_bytes += skb->len; + total_packets++; + + igbvf_rx_checksum_adv(adapter, staterr, skb); + + skb->protocol = eth_type_trans(skb, netdev); + + igbvf_receive_skb(adapter, netdev, skb, staterr, + rx_desc->wb.upper.vlan); + +next_desc: + rx_desc->wb.upper.status_error = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IGBVF_RX_BUFFER_WRITE) { + igbvf_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + } + + rx_ring->next_to_clean = i; + cleaned_count = igbvf_desc_unused(rx_ring); + + if (cleaned_count) + igbvf_alloc_rx_buffers(rx_ring, cleaned_count); + + adapter->total_rx_packets += total_packets; + adapter->total_rx_bytes += total_bytes; + adapter->net_stats.rx_bytes += total_bytes; + adapter->net_stats.rx_packets += total_packets; + return cleaned; +} + +static void igbvf_put_txbuf(struct igbvf_adapter *adapter, + struct igbvf_buffer *buffer_info) +{ + if (buffer_info->dma) { + if (buffer_info->mapped_as_page) + dma_unmap_page(&adapter->pdev->dev, + buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); + else + dma_unmap_single(&adapter->pdev->dev, + buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); + buffer_info->dma = 0; + } + if (buffer_info->skb) { + dev_kfree_skb_any(buffer_info->skb); + buffer_info->skb = NULL; + } + buffer_info->time_stamp = 0; +} + +/** + * igbvf_setup_tx_resources - allocate Tx resources (Descriptors) + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, + struct igbvf_ring *tx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + int size; + + size = sizeof(struct igbvf_buffer) * tx_ring->count; + tx_ring->buffer_info = vzalloc(size); + if (!tx_ring->buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + + if (!tx_ring->desc) + goto err; + + tx_ring->adapter = adapter; + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + return 0; +err: + vfree(tx_ring->buffer_info); + dev_err(&adapter->pdev->dev, + "Unable to allocate memory for the transmit descriptor ring\n"); + return -ENOMEM; +} + +/** + * igbvf_setup_rx_resources - allocate Rx resources (Descriptors) + * @adapter: board private structure + * + * Returns 0 on success, negative on failure + **/ +int igbvf_setup_rx_resources(struct igbvf_adapter *adapter, + struct igbvf_ring *rx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + int size, desc_len; + + size = sizeof(struct igbvf_buffer) * rx_ring->count; + rx_ring->buffer_info = vzalloc(size); + if (!rx_ring->buffer_info) + goto err; + + desc_len = sizeof(union e1000_adv_rx_desc); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * desc_len; + rx_ring->size = ALIGN(rx_ring->size, 4096); + + rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + rx_ring->adapter = adapter; + + return 0; + +err: + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + dev_err(&adapter->pdev->dev, + "Unable to allocate memory for the receive descriptor ring\n"); + return -ENOMEM; +} + +/** + * igbvf_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void igbvf_clean_tx_ring(struct igbvf_ring *tx_ring) +{ + struct igbvf_adapter *adapter = tx_ring->adapter; + struct igbvf_buffer *buffer_info; + unsigned long size; + unsigned int i; + + if (!tx_ring->buffer_info) + return; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + igbvf_put_txbuf(adapter, buffer_info); + } + + size = sizeof(struct igbvf_buffer) * tx_ring->count; + memset(tx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + writel(0, adapter->hw.hw_addr + tx_ring->head); + writel(0, adapter->hw.hw_addr + tx_ring->tail); +} + +/** + * igbvf_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: ring to free resources from + * + * Free all transmit software resources + **/ +void igbvf_free_tx_resources(struct igbvf_ring *tx_ring) +{ + struct pci_dev *pdev = tx_ring->adapter->pdev; + + igbvf_clean_tx_ring(tx_ring); + + vfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * igbvf_clean_rx_ring - Free Rx Buffers per Queue + * @adapter: board private structure + **/ +static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring) +{ + struct igbvf_adapter *adapter = rx_ring->adapter; + struct igbvf_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; + + if (!rx_ring->buffer_info) + return; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + if (buffer_info->dma) { + if (adapter->rx_ps_hdr_size){ + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_ps_hdr_size, + DMA_FROM_DEVICE); + } else { + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + } + buffer_info->dma = 0; + } + + if (buffer_info->skb) { + dev_kfree_skb(buffer_info->skb); + buffer_info->skb = NULL; + } + + if (buffer_info->page) { + if (buffer_info->page_dma) + dma_unmap_page(&pdev->dev, + buffer_info->page_dma, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + put_page(buffer_info->page); + buffer_info->page = NULL; + buffer_info->page_dma = 0; + buffer_info->page_offset = 0; + } + } + + size = sizeof(struct igbvf_buffer) * rx_ring->count; + memset(rx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + writel(0, adapter->hw.hw_addr + rx_ring->head); + writel(0, adapter->hw.hw_addr + rx_ring->tail); +} + +/** + * igbvf_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ + +void igbvf_free_rx_resources(struct igbvf_ring *rx_ring) +{ + struct pci_dev *pdev = rx_ring->adapter->pdev; + + igbvf_clean_rx_ring(rx_ring); + + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + rx_ring->desc = NULL; +} + +/** + * igbvf_update_itr - update the dynamic ITR value based on statistics + * @adapter: pointer to adapter + * @itr_setting: current adapter->itr + * @packets: the number of packets during this measurement interval + * @bytes: the number of bytes during this measurement interval + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. This functionality is controlled + * by the InterruptThrottleRate module parameter. + **/ +static unsigned int igbvf_update_itr(struct igbvf_adapter *adapter, + u16 itr_setting, int packets, + int bytes) +{ + unsigned int retval = itr_setting; + + if (packets == 0) + goto update_itr_done; + + switch (itr_setting) { + case lowest_latency: + /* handle TSO and jumbo frames */ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + retval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* this if handles the TSO accounting */ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 10) || ((bytes/packets) > 1200)) + retval = bulk_latency; + else if ((packets > 35)) + retval = lowest_latency; + } else if (bytes/packets > 2000) { + retval = bulk_latency; + } else if (packets <= 2 && bytes < 512) { + retval = lowest_latency; + } + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + retval = low_latency; + } else if (bytes < 6000) { + retval = low_latency; + } + break; + } + +update_itr_done: + return retval; +} + +static void igbvf_set_itr(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 current_itr; + u32 new_itr = adapter->itr; + + adapter->tx_itr = igbvf_update_itr(adapter, adapter->tx_itr, + adapter->total_tx_packets, + adapter->total_tx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) + adapter->tx_itr = low_latency; + + adapter->rx_itr = igbvf_update_itr(adapter, adapter->rx_itr, + adapter->total_rx_packets, + adapter->total_rx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) + adapter->rx_itr = low_latency; + + current_itr = max(adapter->rx_itr, adapter->tx_itr); + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = 70000; + break; + case low_latency: + new_itr = 20000; /* aka hwitr = ~200 */ + break; + case bulk_latency: + new_itr = 4000; + break; + default: + break; + } + + if (new_itr != adapter->itr) { + /* + * this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > adapter->itr ? + min(adapter->itr + (new_itr >> 2), new_itr) : + new_itr; + adapter->itr = new_itr; + adapter->rx_ring->itr_val = 1952; + + if (adapter->msix_entries) + adapter->rx_ring->set_itr = 1; + else + ew32(ITR, 1952); + } +} + +/** + * igbvf_clean_tx_irq - Reclaim resources after transmit completes + * @adapter: board private structure + * returns true if ring is completely cleaned + **/ +static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) +{ + struct igbvf_adapter *adapter = tx_ring->adapter; + struct net_device *netdev = adapter->netdev; + struct igbvf_buffer *buffer_info; + struct sk_buff *skb; + union e1000_adv_tx_desc *tx_desc, *eop_desc; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int i, eop, count = 0; + bool cleaned = false; + + i = tx_ring->next_to_clean; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); + + while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) && + (count < tx_ring->count)) { + rmb(); /* read buffer_info after eop_desc status */ + for (cleaned = false; !cleaned; count++) { + tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + cleaned = (i == eop); + skb = buffer_info->skb; + + if (skb) { + unsigned int segs, bytecount; + + /* gso_segs is currently only valid for tcp */ + segs = skb_shinfo(skb)->gso_segs ?: 1; + /* multiply data chunks by size of headers */ + bytecount = ((segs - 1) * skb_headlen(skb)) + + skb->len; + total_packets += segs; + total_bytes += bytecount; + } + + igbvf_put_txbuf(adapter, buffer_info); + tx_desc->wb.status = 0; + + i++; + if (i == tx_ring->count) + i = 0; + } + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); + } + + tx_ring->next_to_clean = i; + + if (unlikely(count && + netif_carrier_ok(netdev) && + igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (netif_queue_stopped(netdev) && + !(test_bit(__IGBVF_DOWN, &adapter->state))) { + netif_wake_queue(netdev); + ++adapter->restart_queue; + } + } + + adapter->net_stats.tx_bytes += total_bytes; + adapter->net_stats.tx_packets += total_packets; + return count < tx_ring->count; +} + +static irqreturn_t igbvf_msix_other(int irq, void *data) +{ + struct net_device *netdev = data; + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + adapter->int_counter1++; + + netif_carrier_off(netdev); + hw->mac.get_link_status = 1; + if (!test_bit(__IGBVF_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + + ew32(EIMS, adapter->eims_other); + + return IRQ_HANDLED; +} + +static irqreturn_t igbvf_intr_msix_tx(int irq, void *data) +{ + struct net_device *netdev = data; + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct igbvf_ring *tx_ring = adapter->tx_ring; + + + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + + /* auto mask will automatically reenable the interrupt when we write + * EICS */ + if (!igbvf_clean_tx_irq(tx_ring)) + /* Ring was not completely cleaned, so fire another interrupt */ + ew32(EICS, tx_ring->eims_value); + else + ew32(EIMS, tx_ring->eims_value); + + return IRQ_HANDLED; +} + +static irqreturn_t igbvf_intr_msix_rx(int irq, void *data) +{ + struct net_device *netdev = data; + struct igbvf_adapter *adapter = netdev_priv(netdev); + + adapter->int_counter0++; + + /* Write the ITR value calculated at the end of the + * previous interrupt. + */ + if (adapter->rx_ring->set_itr) { + writel(adapter->rx_ring->itr_val, + adapter->hw.hw_addr + adapter->rx_ring->itr_register); + adapter->rx_ring->set_itr = 0; + } + + if (napi_schedule_prep(&adapter->rx_ring->napi)) { + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->rx_ring->napi); + } + + return IRQ_HANDLED; +} + +#define IGBVF_NO_QUEUE -1 + +static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, + int tx_queue, int msix_vector) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ivar, index; + + /* 82576 uses a table-based method for assigning vectors. + Each queue has a single entry in the table to which we write + a vector number along with a "valid" bit. Sadly, the layout + of the table is somewhat counterintuitive. */ + if (rx_queue > IGBVF_NO_QUEUE) { + index = (rx_queue >> 1); + ivar = array_er32(IVAR0, index); + if (rx_queue & 0x1) { + /* vector goes into third byte of register */ + ivar = ivar & 0xFF00FFFF; + ivar |= (msix_vector | E1000_IVAR_VALID) << 16; + } else { + /* vector goes into low byte of register */ + ivar = ivar & 0xFFFFFF00; + ivar |= msix_vector | E1000_IVAR_VALID; + } + adapter->rx_ring[rx_queue].eims_value = 1 << msix_vector; + array_ew32(IVAR0, index, ivar); + } + if (tx_queue > IGBVF_NO_QUEUE) { + index = (tx_queue >> 1); + ivar = array_er32(IVAR0, index); + if (tx_queue & 0x1) { + /* vector goes into high byte of register */ + ivar = ivar & 0x00FFFFFF; + ivar |= (msix_vector | E1000_IVAR_VALID) << 24; + } else { + /* vector goes into second byte of register */ + ivar = ivar & 0xFFFF00FF; + ivar |= (msix_vector | E1000_IVAR_VALID) << 8; + } + adapter->tx_ring[tx_queue].eims_value = 1 << msix_vector; + array_ew32(IVAR0, index, ivar); + } +} + +/** + * igbvf_configure_msix - Configure MSI-X hardware + * + * igbvf_configure_msix sets up the hardware to properly + * generate MSI-X interrupts. + **/ +static void igbvf_configure_msix(struct igbvf_adapter *adapter) +{ + u32 tmp; + struct e1000_hw *hw = &adapter->hw; + struct igbvf_ring *tx_ring = adapter->tx_ring; + struct igbvf_ring *rx_ring = adapter->rx_ring; + int vector = 0; + + adapter->eims_enable_mask = 0; + + igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++); + adapter->eims_enable_mask |= tx_ring->eims_value; + if (tx_ring->itr_val) + writel(tx_ring->itr_val, + hw->hw_addr + tx_ring->itr_register); + else + writel(1952, hw->hw_addr + tx_ring->itr_register); + + igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++); + adapter->eims_enable_mask |= rx_ring->eims_value; + if (rx_ring->itr_val) + writel(rx_ring->itr_val, + hw->hw_addr + rx_ring->itr_register); + else + writel(1952, hw->hw_addr + rx_ring->itr_register); + + /* set vector for other causes, i.e. link changes */ + + tmp = (vector++ | E1000_IVAR_VALID); + + ew32(IVAR_MISC, tmp); + + adapter->eims_enable_mask = (1 << (vector)) - 1; + adapter->eims_other = 1 << (vector - 1); + e1e_flush(); +} + +static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter) +{ + if (adapter->msix_entries) { + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } +} + +/** + * igbvf_set_interrupt_capability - set MSI or MSI-X if supported + * + * Attempt to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter) +{ + int err = -ENOMEM; + int i; + + /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */ + adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry), + GFP_KERNEL); + if (adapter->msix_entries) { + for (i = 0; i < 3; i++) + adapter->msix_entries[i].entry = i; + + err = pci_enable_msix(adapter->pdev, + adapter->msix_entries, 3); + } + + if (err) { + /* MSI-X failed */ + dev_err(&adapter->pdev->dev, + "Failed to initialize MSI-X interrupts.\n"); + igbvf_reset_interrupt_capability(adapter); + } +} + +/** + * igbvf_request_msix - Initialize MSI-X interrupts + * + * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the + * kernel. + **/ +static int igbvf_request_msix(struct igbvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err = 0, vector = 0; + + if (strlen(netdev->name) < (IFNAMSIZ - 5)) { + sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name); + sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name); + } else { + memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); + memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); + } + + err = request_irq(adapter->msix_entries[vector].vector, + igbvf_intr_msix_tx, 0, adapter->tx_ring->name, + netdev); + if (err) + goto out; + + adapter->tx_ring->itr_register = E1000_EITR(vector); + adapter->tx_ring->itr_val = 1952; + vector++; + + err = request_irq(adapter->msix_entries[vector].vector, + igbvf_intr_msix_rx, 0, adapter->rx_ring->name, + netdev); + if (err) + goto out; + + adapter->rx_ring->itr_register = E1000_EITR(vector); + adapter->rx_ring->itr_val = 1952; + vector++; + + err = request_irq(adapter->msix_entries[vector].vector, + igbvf_msix_other, 0, netdev->name, netdev); + if (err) + goto out; + + igbvf_configure_msix(adapter); + return 0; +out: + return err; +} + +/** + * igbvf_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + **/ +static int __devinit igbvf_alloc_queues(struct igbvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL); + if (!adapter->tx_ring) + return -ENOMEM; + + adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL); + if (!adapter->rx_ring) { + kfree(adapter->tx_ring); + return -ENOMEM; + } + + netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64); + + return 0; +} + +/** + * igbvf_request_irq - initialize interrupts + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int igbvf_request_irq(struct igbvf_adapter *adapter) +{ + int err = -1; + + /* igbvf supports msi-x only */ + if (adapter->msix_entries) + err = igbvf_request_msix(adapter); + + if (!err) + return err; + + dev_err(&adapter->pdev->dev, + "Unable to allocate interrupt, Error: %d\n", err); + + return err; +} + +static void igbvf_free_irq(struct igbvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int vector; + + if (adapter->msix_entries) { + for (vector = 0; vector < 3; vector++) + free_irq(adapter->msix_entries[vector].vector, netdev); + } +} + +/** + * igbvf_irq_disable - Mask off interrupt generation on the NIC + **/ +static void igbvf_irq_disable(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + ew32(EIMC, ~0); + + if (adapter->msix_entries) + ew32(EIAC, 0); +} + +/** + * igbvf_irq_enable - Enable default interrupt generation settings + **/ +static void igbvf_irq_enable(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + ew32(EIAC, adapter->eims_enable_mask); + ew32(EIAM, adapter->eims_enable_mask); + ew32(EIMS, adapter->eims_enable_mask); +} + +/** + * igbvf_poll - NAPI Rx polling callback + * @napi: struct associated with this polling callback + * @budget: amount of packets driver is allowed to process this poll + **/ +static int igbvf_poll(struct napi_struct *napi, int budget) +{ + struct igbvf_ring *rx_ring = container_of(napi, struct igbvf_ring, napi); + struct igbvf_adapter *adapter = rx_ring->adapter; + struct e1000_hw *hw = &adapter->hw; + int work_done = 0; + + igbvf_clean_rx_irq(adapter, &work_done, budget); + + /* If not enough Rx work done, exit the polling mode */ + if (work_done < budget) { + napi_complete(napi); + + if (adapter->itr_setting & 3) + igbvf_set_itr(adapter); + + if (!test_bit(__IGBVF_DOWN, &adapter->state)) + ew32(EIMS, adapter->rx_ring->eims_value); + } + + return work_done; +} + +/** + * igbvf_set_rlpml - set receive large packet maximum length + * @adapter: board private structure + * + * Configure the maximum size of packets that will be received + */ +static void igbvf_set_rlpml(struct igbvf_adapter *adapter) +{ + int max_frame_size; + struct e1000_hw *hw = &adapter->hw; + + max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE; + e1000_rlpml_set_vf(hw, max_frame_size); +} + +static void igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (hw->mac.ops.set_vfta(hw, vid, true)) + dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid); + else + set_bit(vid, adapter->active_vlans); +} + +static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + igbvf_irq_disable(adapter); + + if (!test_bit(__IGBVF_DOWN, &adapter->state)) + igbvf_irq_enable(adapter); + + if (hw->mac.ops.set_vfta(hw, vid, false)) + dev_err(&adapter->pdev->dev, + "Failed to remove vlan id %d\n", vid); + else + clear_bit(vid, adapter->active_vlans); +} + +static void igbvf_restore_vlan(struct igbvf_adapter *adapter) +{ + u16 vid; + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + igbvf_vlan_rx_add_vid(adapter->netdev, vid); +} + +/** + * igbvf_configure_tx - Configure Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void igbvf_configure_tx(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct igbvf_ring *tx_ring = adapter->tx_ring; + u64 tdba; + u32 txdctl, dca_txctrl; + + /* disable transmits */ + txdctl = er32(TXDCTL(0)); + ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); + e1e_flush(); + msleep(10); + + /* Setup the HW Tx Head and Tail descriptor pointers */ + ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc)); + tdba = tx_ring->dma; + ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32))); + ew32(TDBAH(0), (tdba >> 32)); + ew32(TDH(0), 0); + ew32(TDT(0), 0); + tx_ring->head = E1000_TDH(0); + tx_ring->tail = E1000_TDT(0); + + /* Turn off Relaxed Ordering on head write-backs. The writebacks + * MUST be delivered in order or it will completely screw up + * our bookeeping. + */ + dca_txctrl = er32(DCA_TXCTRL(0)); + dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; + ew32(DCA_TXCTRL(0), dca_txctrl); + + /* enable transmits */ + txdctl |= E1000_TXDCTL_QUEUE_ENABLE; + ew32(TXDCTL(0), txdctl); + + /* Setup Transmit Descriptor Settings for eop descriptor */ + adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS; + + /* enable Report Status bit */ + adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS; +} + +/** + * igbvf_setup_srrctl - configure the receive control registers + * @adapter: Board private structure + **/ +static void igbvf_setup_srrctl(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 srrctl = 0; + + srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK | + E1000_SRRCTL_BSIZEHDR_MASK | + E1000_SRRCTL_BSIZEPKT_MASK); + + /* Enable queue drop to avoid head of line blocking */ + srrctl |= E1000_SRRCTL_DROP_EN; + + /* Setup buffer sizes */ + srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >> + E1000_SRRCTL_BSIZEPKT_SHIFT; + + if (adapter->rx_buffer_len < 2048) { + adapter->rx_ps_hdr_size = 0; + srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; + } else { + adapter->rx_ps_hdr_size = 128; + srrctl |= adapter->rx_ps_hdr_size << + E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; + srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; + } + + ew32(SRRCTL(0), srrctl); +} + +/** + * igbvf_configure_rx - Configure Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void igbvf_configure_rx(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct igbvf_ring *rx_ring = adapter->rx_ring; + u64 rdba; + u32 rdlen, rxdctl; + + /* disable receives */ + rxdctl = er32(RXDCTL(0)); + ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); + e1e_flush(); + msleep(10); + + rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc); + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + rdba = rx_ring->dma; + ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32))); + ew32(RDBAH(0), (rdba >> 32)); + ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc)); + rx_ring->head = E1000_RDH(0); + rx_ring->tail = E1000_RDT(0); + ew32(RDH(0), 0); + ew32(RDT(0), 0); + + rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; + rxdctl &= 0xFFF00000; + rxdctl |= IGBVF_RX_PTHRESH; + rxdctl |= IGBVF_RX_HTHRESH << 8; + rxdctl |= IGBVF_RX_WTHRESH << 16; + + igbvf_set_rlpml(adapter); + + /* enable receives */ + ew32(RXDCTL(0), rxdctl); +} + +/** + * igbvf_set_multi - Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_multi entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void igbvf_set_multi(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u8 *mta_list = NULL; + int i; + + if (!netdev_mc_empty(netdev)) { + mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); + if (!mta_list) { + dev_err(&adapter->pdev->dev, + "failed to allocate multicast filter list\n"); + return; + } + } + + /* prepare a packed array of only addresses. */ + i = 0; + netdev_for_each_mc_addr(ha, netdev) + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); + + hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0); + kfree(mta_list); +} + +/** + * igbvf_configure - configure the hardware for Rx and Tx + * @adapter: private board structure + **/ +static void igbvf_configure(struct igbvf_adapter *adapter) +{ + igbvf_set_multi(adapter->netdev); + + igbvf_restore_vlan(adapter); + + igbvf_configure_tx(adapter); + igbvf_setup_srrctl(adapter); + igbvf_configure_rx(adapter); + igbvf_alloc_rx_buffers(adapter->rx_ring, + igbvf_desc_unused(adapter->rx_ring)); +} + +/* igbvf_reset - bring the hardware into a known good state + * + * This function boots the hardware and enables some settings that + * require a configuration cycle of the hardware - those cannot be + * set/changed during runtime. After reset the device needs to be + * properly configured for Rx, Tx etc. + */ +static void igbvf_reset(struct igbvf_adapter *adapter) +{ + struct e1000_mac_info *mac = &adapter->hw.mac; + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + + /* Allow time for pending master requests to run */ + if (mac->ops.reset_hw(hw)) + dev_err(&adapter->pdev->dev, "PF still resetting\n"); + + mac->ops.init_hw(hw); + + if (is_valid_ether_addr(adapter->hw.mac.addr)) { + memcpy(netdev->dev_addr, adapter->hw.mac.addr, + netdev->addr_len); + memcpy(netdev->perm_addr, adapter->hw.mac.addr, + netdev->addr_len); + } + + adapter->last_reset = jiffies; +} + +int igbvf_up(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* hardware has been reset, we need to reload some things */ + igbvf_configure(adapter); + + clear_bit(__IGBVF_DOWN, &adapter->state); + + napi_enable(&adapter->rx_ring->napi); + if (adapter->msix_entries) + igbvf_configure_msix(adapter); + + /* Clear any pending interrupts. */ + er32(EICR); + igbvf_irq_enable(adapter); + + /* start the watchdog */ + hw->mac.get_link_status = 1; + mod_timer(&adapter->watchdog_timer, jiffies + 1); + + + return 0; +} + +void igbvf_down(struct igbvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 rxdctl, txdctl; + + /* + * signal that we're down so the interrupt handler does not + * reschedule our watchdog timer + */ + set_bit(__IGBVF_DOWN, &adapter->state); + + /* disable receives in the hardware */ + rxdctl = er32(RXDCTL(0)); + ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); + + netif_stop_queue(netdev); + + /* disable transmits in the hardware */ + txdctl = er32(TXDCTL(0)); + ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); + + /* flush both disables and wait for them to finish */ + e1e_flush(); + msleep(10); + + napi_disable(&adapter->rx_ring->napi); + + igbvf_irq_disable(adapter); + + del_timer_sync(&adapter->watchdog_timer); + + netif_carrier_off(netdev); + + /* record the stats before reset*/ + igbvf_update_stats(adapter); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + igbvf_reset(adapter); + igbvf_clean_tx_ring(adapter->tx_ring); + igbvf_clean_rx_ring(adapter->rx_ring); +} + +void igbvf_reinit_locked(struct igbvf_adapter *adapter) +{ + might_sleep(); + while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) + msleep(1); + igbvf_down(adapter); + igbvf_up(adapter); + clear_bit(__IGBVF_RESETTING, &adapter->state); +} + +/** + * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter) + * @adapter: board private structure to initialize + * + * igbvf_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int __devinit igbvf_sw_init(struct igbvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + s32 rc; + + adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; + adapter->rx_ps_hdr_size = 0; + adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + adapter->tx_int_delay = 8; + adapter->tx_abs_int_delay = 32; + adapter->rx_int_delay = 0; + adapter->rx_abs_int_delay = 8; + adapter->itr_setting = 3; + adapter->itr = 20000; + + /* Set various function pointers */ + adapter->ei->init_ops(&adapter->hw); + + rc = adapter->hw.mac.ops.init_params(&adapter->hw); + if (rc) + return rc; + + rc = adapter->hw.mbx.ops.init_params(&adapter->hw); + if (rc) + return rc; + + igbvf_set_interrupt_capability(adapter); + + if (igbvf_alloc_queues(adapter)) + return -ENOMEM; + + spin_lock_init(&adapter->tx_queue_lock); + + /* Explicitly disable IRQ since the NIC can be in any state. */ + igbvf_irq_disable(adapter); + + spin_lock_init(&adapter->stats_lock); + + set_bit(__IGBVF_DOWN, &adapter->state); + return 0; +} + +static void igbvf_initialize_last_counter_stats(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + adapter->stats.last_gprc = er32(VFGPRC); + adapter->stats.last_gorc = er32(VFGORC); + adapter->stats.last_gptc = er32(VFGPTC); + adapter->stats.last_gotc = er32(VFGOTC); + adapter->stats.last_mprc = er32(VFMPRC); + adapter->stats.last_gotlbc = er32(VFGOTLBC); + adapter->stats.last_gptlbc = er32(VFGPTLBC); + adapter->stats.last_gorlbc = er32(VFGORLBC); + adapter->stats.last_gprlbc = er32(VFGPRLBC); + + adapter->stats.base_gprc = er32(VFGPRC); + adapter->stats.base_gorc = er32(VFGORC); + adapter->stats.base_gptc = er32(VFGPTC); + adapter->stats.base_gotc = er32(VFGOTC); + adapter->stats.base_mprc = er32(VFMPRC); + adapter->stats.base_gotlbc = er32(VFGOTLBC); + adapter->stats.base_gptlbc = er32(VFGPTLBC); + adapter->stats.base_gorlbc = er32(VFGORLBC); + adapter->stats.base_gprlbc = er32(VFGPRLBC); +} + +/** + * igbvf_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int igbvf_open(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err; + + /* disallow open during test */ + if (test_bit(__IGBVF_TESTING, &adapter->state)) + return -EBUSY; + + /* allocate transmit descriptors */ + err = igbvf_setup_tx_resources(adapter, adapter->tx_ring); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = igbvf_setup_rx_resources(adapter, adapter->rx_ring); + if (err) + goto err_setup_rx; + + /* + * before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. + */ + igbvf_configure(adapter); + + err = igbvf_request_irq(adapter); + if (err) + goto err_req_irq; + + /* From here on the code is the same as igbvf_up() */ + clear_bit(__IGBVF_DOWN, &adapter->state); + + napi_enable(&adapter->rx_ring->napi); + + /* clear any pending interrupts */ + er32(EICR); + + igbvf_irq_enable(adapter); + + /* start the watchdog */ + hw->mac.get_link_status = 1; + mod_timer(&adapter->watchdog_timer, jiffies + 1); + + return 0; + +err_req_irq: + igbvf_free_rx_resources(adapter->rx_ring); +err_setup_rx: + igbvf_free_tx_resources(adapter->tx_ring); +err_setup_tx: + igbvf_reset(adapter); + + return err; +} + +/** + * igbvf_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int igbvf_close(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state)); + igbvf_down(adapter); + + igbvf_free_irq(adapter); + + igbvf_free_tx_resources(adapter->tx_ring); + igbvf_free_rx_resources(adapter->rx_ring); + + return 0; +} +/** + * igbvf_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int igbvf_set_mac(struct net_device *netdev, void *p) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + if (memcmp(addr->sa_data, hw->mac.addr, 6)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + + return 0; +} + +#define UPDATE_VF_COUNTER(reg, name) \ + { \ + u32 current_counter = er32(reg); \ + if (current_counter < adapter->stats.last_##name) \ + adapter->stats.name += 0x100000000LL; \ + adapter->stats.last_##name = current_counter; \ + adapter->stats.name &= 0xFFFFFFFF00000000LL; \ + adapter->stats.name |= current_counter; \ + } + +/** + * igbvf_update_stats - Update the board statistics counters + * @adapter: board private structure +**/ +void igbvf_update_stats(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + + /* + * Prevent stats update while adapter is being reset, link is down + * or if the pci connection is down. + */ + if (adapter->link_speed == 0) + return; + + if (test_bit(__IGBVF_RESETTING, &adapter->state)) + return; + + if (pci_channel_offline(pdev)) + return; + + UPDATE_VF_COUNTER(VFGPRC, gprc); + UPDATE_VF_COUNTER(VFGORC, gorc); + UPDATE_VF_COUNTER(VFGPTC, gptc); + UPDATE_VF_COUNTER(VFGOTC, gotc); + UPDATE_VF_COUNTER(VFMPRC, mprc); + UPDATE_VF_COUNTER(VFGOTLBC, gotlbc); + UPDATE_VF_COUNTER(VFGPTLBC, gptlbc); + UPDATE_VF_COUNTER(VFGORLBC, gorlbc); + UPDATE_VF_COUNTER(VFGPRLBC, gprlbc); + + /* Fill out the OS statistics structure */ + adapter->net_stats.multicast = adapter->stats.mprc; +} + +static void igbvf_print_link_info(struct igbvf_adapter *adapter) +{ + dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s\n", + adapter->link_speed, + ((adapter->link_duplex == FULL_DUPLEX) ? + "Full Duplex" : "Half Duplex")); +} + +static bool igbvf_has_link(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + s32 ret_val = E1000_SUCCESS; + bool link_active; + + /* If interface is down, stay link down */ + if (test_bit(__IGBVF_DOWN, &adapter->state)) + return false; + + ret_val = hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status; + + /* if check for link returns error we will need to reset */ + if (ret_val && time_after(jiffies, adapter->last_reset + (10 * HZ))) + schedule_work(&adapter->reset_task); + + return link_active; +} + +/** + * igbvf_watchdog - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void igbvf_watchdog(unsigned long data) +{ + struct igbvf_adapter *adapter = (struct igbvf_adapter *) data; + + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); +} + +static void igbvf_watchdog_task(struct work_struct *work) +{ + struct igbvf_adapter *adapter = container_of(work, + struct igbvf_adapter, + watchdog_task); + struct net_device *netdev = adapter->netdev; + struct e1000_mac_info *mac = &adapter->hw.mac; + struct igbvf_ring *tx_ring = adapter->tx_ring; + struct e1000_hw *hw = &adapter->hw; + u32 link; + int tx_pending = 0; + + link = igbvf_has_link(adapter); + + if (link) { + if (!netif_carrier_ok(netdev)) { + mac->ops.get_link_up_info(&adapter->hw, + &adapter->link_speed, + &adapter->link_duplex); + igbvf_print_link_info(adapter); + + netif_carrier_on(netdev); + netif_wake_queue(netdev); + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + dev_info(&adapter->pdev->dev, "Link is Down\n"); + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + } + + if (netif_carrier_ok(netdev)) { + igbvf_update_stats(adapter); + } else { + tx_pending = (igbvf_desc_unused(tx_ring) + 1 < + tx_ring->count); + if (tx_pending) { + /* + * We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + } + } + + /* Cause software interrupt to ensure Rx ring is cleaned */ + ew32(EICS, adapter->rx_ring->eims_value); + + /* Reset the timer */ + if (!test_bit(__IGBVF_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + (2 * HZ))); +} + +#define IGBVF_TX_FLAGS_CSUM 0x00000001 +#define IGBVF_TX_FLAGS_VLAN 0x00000002 +#define IGBVF_TX_FLAGS_TSO 0x00000004 +#define IGBVF_TX_FLAGS_IPV4 0x00000008 +#define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IGBVF_TX_FLAGS_VLAN_SHIFT 16 + +static int igbvf_tso(struct igbvf_adapter *adapter, + struct igbvf_ring *tx_ring, + struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) +{ + struct e1000_adv_tx_context_desc *context_desc; + unsigned int i; + int err; + struct igbvf_buffer *buffer_info; + u32 info = 0, tu_cmd = 0; + u32 mss_l4len_idx, l4len; + *hdr_len = 0; + + if (skb_header_cloned(skb)) { + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (err) { + dev_err(&adapter->pdev->dev, + "igbvf_tso returning an error\n"); + return err; + } + } + + l4len = tcp_hdrlen(skb); + *hdr_len += l4len; + + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + } else if (skb_is_gso_v6(skb)) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + } + + i = tx_ring->next_to_use; + + buffer_info = &tx_ring->buffer_info[i]; + context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i); + /* VLAN MACLEN IPLEN */ + if (tx_flags & IGBVF_TX_FLAGS_VLAN) + info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK); + info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); + *hdr_len += skb_network_offset(skb); + info |= (skb_transport_header(skb) - skb_network_header(skb)); + *hdr_len += (skb_transport_header(skb) - skb_network_header(skb)); + context_desc->vlan_macip_lens = cpu_to_le32(info); + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); + + if (skb->protocol == htons(ETH_P_IP)) + tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; + tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; + + context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); + + /* MSS L4LEN IDX */ + mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT); + mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); + + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); + context_desc->seqnum_seed = 0; + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + buffer_info->dma = 0; + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + return true; +} + +static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, + struct igbvf_ring *tx_ring, + struct sk_buff *skb, u32 tx_flags) +{ + struct e1000_adv_tx_context_desc *context_desc; + unsigned int i; + struct igbvf_buffer *buffer_info; + u32 info = 0, tu_cmd = 0; + + if ((skb->ip_summed == CHECKSUM_PARTIAL) || + (tx_flags & IGBVF_TX_FLAGS_VLAN)) { + i = tx_ring->next_to_use; + buffer_info = &tx_ring->buffer_info[i]; + context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i); + + if (tx_flags & IGBVF_TX_FLAGS_VLAN) + info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK); + + info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); + if (skb->ip_summed == CHECKSUM_PARTIAL) + info |= (skb_transport_header(skb) - + skb_network_header(skb)); + + + context_desc->vlan_macip_lens = cpu_to_le32(info); + + tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + switch (skb->protocol) { + case __constant_htons(ETH_P_IP): + tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; + break; + case __constant_htons(ETH_P_IPV6): + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; + break; + default: + break; + } + } + + context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); + context_desc->seqnum_seed = 0; + context_desc->mss_l4len_idx = 0; + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + buffer_info->dma = 0; + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; + + return true; + } + + return false; +} + +static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + /* there is enough descriptors then we don't need to worry */ + if (igbvf_desc_unused(adapter->tx_ring) >= size) + return 0; + + netif_stop_queue(netdev); + + smp_mb(); + + /* We need to check again just in case room has been made available */ + if (igbvf_desc_unused(adapter->tx_ring) < size) + return -EBUSY; + + netif_wake_queue(netdev); + + ++adapter->restart_queue; + return 0; +} + +#define IGBVF_MAX_TXD_PWR 16 +#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR) + +static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, + struct igbvf_ring *tx_ring, + struct sk_buff *skb, + unsigned int first) +{ + struct igbvf_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + unsigned int len = skb_headlen(skb); + unsigned int count = 0, i; + unsigned int f; + + i = tx_ring->next_to_use; + + buffer_info = &tx_ring->buffer_info[i]; + BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD); + buffer_info->length = len; + /* set time_stamp *before* dma to help avoid a possible race */ + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + buffer_info->mapped_as_page = false; + buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len, + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + + + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { + struct skb_frag_struct *frag; + + count++; + i++; + if (i == tx_ring->count) + i = 0; + + frag = &skb_shinfo(skb)->frags[f]; + len = frag->size; + + buffer_info = &tx_ring->buffer_info[i]; + BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD); + buffer_info->length = len; + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + buffer_info->mapped_as_page = true; + buffer_info->dma = dma_map_page(&pdev->dev, + frag->page, + frag->page_offset, + len, + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + } + + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[first].next_to_watch = i; + + return ++count; + +dma_error: + dev_err(&pdev->dev, "TX DMA map failed\n"); + + /* clear timestamp and dma mappings for failed buffer_info mapping */ + buffer_info->dma = 0; + buffer_info->time_stamp = 0; + buffer_info->length = 0; + buffer_info->next_to_watch = 0; + buffer_info->mapped_as_page = false; + if (count) + count--; + + /* clear timestamp and dma mappings for remaining portion of packet */ + while (count--) { + if (i==0) + i += tx_ring->count; + i--; + buffer_info = &tx_ring->buffer_info[i]; + igbvf_put_txbuf(adapter, buffer_info); + } + + return 0; +} + +static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, + struct igbvf_ring *tx_ring, + int tx_flags, int count, u32 paylen, + u8 hdr_len) +{ + union e1000_adv_tx_desc *tx_desc = NULL; + struct igbvf_buffer *buffer_info; + u32 olinfo_status = 0, cmd_type_len; + unsigned int i; + + cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | + E1000_ADVTXD_DCMD_DEXT); + + if (tx_flags & IGBVF_TX_FLAGS_VLAN) + cmd_type_len |= E1000_ADVTXD_DCMD_VLE; + + if (tx_flags & IGBVF_TX_FLAGS_TSO) { + cmd_type_len |= E1000_ADVTXD_DCMD_TSE; + + /* insert tcp checksum */ + olinfo_status |= E1000_TXD_POPTS_TXSM << 8; + + /* insert ip checksum */ + if (tx_flags & IGBVF_TX_FLAGS_IPV4) + olinfo_status |= E1000_TXD_POPTS_IXSM << 8; + + } else if (tx_flags & IGBVF_TX_FLAGS_CSUM) { + olinfo_status |= E1000_TXD_POPTS_TXSM << 8; + } + + olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); + + i = tx_ring->next_to_use; + while (count--) { + buffer_info = &tx_ring->buffer_info[i]; + tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); + tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type_len | buffer_info->length); + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); + i++; + if (i == tx_ring->count) + i = 0; + } + + tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd); + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + + tx_ring->next_to_use = i; + writel(i, adapter->hw.hw_addr + tx_ring->tail); + /* we need this if more than one processor can write to our tail + * at a time, it syncronizes IO on IA64/Altix systems */ + mmiowb(); +} + +static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, + struct net_device *netdev, + struct igbvf_ring *tx_ring) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + unsigned int first, tx_flags = 0; + u8 hdr_len = 0; + int count = 0; + int tso = 0; + + if (test_bit(__IGBVF_DOWN, &adapter->state)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (skb->len <= 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* + * need: count + 4 desc gap to keep tail from touching + * + 2 desc gap to keep tail from touching head, + * + 1 desc for skb->data, + * + 1 desc for context descriptor, + * head, otherwise try next time + */ + if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) { + /* this is a hard error */ + return NETDEV_TX_BUSY; + } + + if (vlan_tx_tag_present(skb)) { + tx_flags |= IGBVF_TX_FLAGS_VLAN; + tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT); + } + + if (skb->protocol == htons(ETH_P_IP)) + tx_flags |= IGBVF_TX_FLAGS_IPV4; + + first = tx_ring->next_to_use; + + tso = skb_is_gso(skb) ? + igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0; + if (unlikely(tso < 0)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (tso) + tx_flags |= IGBVF_TX_FLAGS_TSO; + else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) && + (skb->ip_summed == CHECKSUM_PARTIAL)) + tx_flags |= IGBVF_TX_FLAGS_CSUM; + + /* + * count reflects descriptors mapped, if 0 then mapping error + * has occurred and we need to rewind the descriptor queue + */ + count = igbvf_tx_map_adv(adapter, tx_ring, skb, first); + + if (count) { + igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count, + skb->len, hdr_len); + /* Make sure there is space in the ring for the next send. */ + igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4); + } else { + dev_kfree_skb_any(skb); + tx_ring->buffer_info[first].time_stamp = 0; + tx_ring->next_to_use = first; + } + + return NETDEV_TX_OK; +} + +static netdev_tx_t igbvf_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct igbvf_ring *tx_ring; + + if (test_bit(__IGBVF_DOWN, &adapter->state)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + tx_ring = &adapter->tx_ring[0]; + + return igbvf_xmit_frame_ring_adv(skb, netdev, tx_ring); +} + +/** + * igbvf_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void igbvf_tx_timeout(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); +} + +static void igbvf_reset_task(struct work_struct *work) +{ + struct igbvf_adapter *adapter; + adapter = container_of(work, struct igbvf_adapter, reset_task); + + igbvf_reinit_locked(adapter); +} + +/** + * igbvf_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + **/ +static struct net_device_stats *igbvf_get_stats(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + /* only return the current stats */ + return &adapter->net_stats; +} + +/** + * igbvf_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int igbvf_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + + if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { + dev_err(&adapter->pdev->dev, "Invalid MTU setting\n"); + return -EINVAL; + } + +#define MAX_STD_JUMBO_FRAME_SIZE 9234 + if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { + dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n"); + return -EINVAL; + } + + while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) + msleep(1); + /* igbvf_down has a dependency on max_frame_size */ + adapter->max_frame_size = max_frame; + if (netif_running(netdev)) + igbvf_down(adapter); + + /* + * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + * means we reserve 2 more, this pushes us to allocate from the next + * larger slab size. + * i.e. RXBUFFER_2048 --> size-4096 slab + * However with the new *_jumbo_rx* routines, jumbo receives will use + * fragmented skbs + */ + + if (max_frame <= 1024) + adapter->rx_buffer_len = 1024; + else if (max_frame <= 2048) + adapter->rx_buffer_len = 2048; + else +#if (PAGE_SIZE / 2) > 16384 + adapter->rx_buffer_len = 16384; +#else + adapter->rx_buffer_len = PAGE_SIZE / 2; +#endif + + + /* adjust allocation if LPE protects us, and we aren't using SBP */ + if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || + (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) + adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + + ETH_FCS_LEN; + + dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + igbvf_up(adapter); + else + igbvf_reset(adapter); + + clear_bit(__IGBVF_RESETTING, &adapter->state); + + return 0; +} + +static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + default: + return -EOPNOTSUPP; + } +} + +static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igbvf_adapter *adapter = netdev_priv(netdev); +#ifdef CONFIG_PM + int retval = 0; +#endif + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state)); + igbvf_down(adapter); + igbvf_free_irq(adapter); + } + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; +#endif + + pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +static int igbvf_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igbvf_adapter *adapter = netdev_priv(netdev); + u32 err; + + pci_restore_state(pdev); + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); + return err; + } + + pci_set_master(pdev); + + if (netif_running(netdev)) { + err = igbvf_request_irq(adapter); + if (err) + return err; + } + + igbvf_reset(adapter); + + if (netif_running(netdev)) + igbvf_up(adapter); + + netif_device_attach(netdev); + + return 0; +} +#endif + +static void igbvf_shutdown(struct pci_dev *pdev) +{ + igbvf_suspend(pdev, PMSG_SUSPEND); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void igbvf_netpoll(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + disable_irq(adapter->pdev->irq); + + igbvf_clean_tx_irq(adapter->tx_ring); + + enable_irq(adapter->pdev->irq); +} +#endif + +/** + * igbvf_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igbvf_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + igbvf_down(adapter); + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * igbvf_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the igbvf_resume routine. + */ +static pci_ers_result_t igbvf_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igbvf_adapter *adapter = netdev_priv(netdev); + + if (pci_enable_device_mem(pdev)) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + igbvf_reset(adapter); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * igbvf_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the igbvf_resume routine. + */ +static void igbvf_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igbvf_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) { + if (igbvf_up(adapter)) { + dev_err(&pdev->dev, + "can't bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); +} + +static void igbvf_print_device_info(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + + dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n"); + dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr); + dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type); +} + +static const struct net_device_ops igbvf_netdev_ops = { + .ndo_open = igbvf_open, + .ndo_stop = igbvf_close, + .ndo_start_xmit = igbvf_xmit_frame, + .ndo_get_stats = igbvf_get_stats, + .ndo_set_multicast_list = igbvf_set_multi, + .ndo_set_mac_address = igbvf_set_mac, + .ndo_change_mtu = igbvf_change_mtu, + .ndo_do_ioctl = igbvf_ioctl, + .ndo_tx_timeout = igbvf_tx_timeout, + .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = igbvf_netpoll, +#endif +}; + +/** + * igbvf_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in igbvf_pci_tbl + * + * Returns 0 on success, negative on failure + * + * igbvf_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int __devinit igbvf_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct igbvf_adapter *adapter; + struct e1000_hw *hw; + const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data]; + + static int cards_found; + int err, pci_using_dac; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + pci_using_dac = 0; + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (!err) { + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (!err) + pci_using_dac = 1; + } else { + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + err = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "No usable DMA " + "configuration, aborting\n"); + goto err_dma; + } + } + } + + err = pci_request_regions(pdev, igbvf_driver_name); + if (err) + goto err_pci_reg; + + pci_set_master(pdev); + + err = -ENOMEM; + netdev = alloc_etherdev(sizeof(struct igbvf_adapter)); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + hw = &adapter->hw; + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->ei = ei; + adapter->pba = ei->pba; + adapter->flags = ei->flags; + adapter->hw.back = adapter; + adapter->hw.mac.type = ei->mac; + adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; + + /* PCI config space info */ + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + hw->revision_id = pdev->revision; + + err = -EIO; + adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + + if (!adapter->hw.hw_addr) + goto err_ioremap; + + if (ei->get_variants) { + err = ei->get_variants(adapter); + if (err) + goto err_ioremap; + } + + /* setup adapter struct */ + err = igbvf_sw_init(adapter); + if (err) + goto err_sw_init; + + /* construct the net_device struct */ + netdev->netdev_ops = &igbvf_netdev_ops; + + igbvf_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + adapter->bd_number = cards_found++; + + netdev->features = NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX | + NETIF_F_HW_VLAN_FILTER; + + netdev->features |= NETIF_F_IPV6_CSUM; + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + netdev->vlan_features |= NETIF_F_TSO; + netdev->vlan_features |= NETIF_F_TSO6; + netdev->vlan_features |= NETIF_F_IP_CSUM; + netdev->vlan_features |= NETIF_F_IPV6_CSUM; + netdev->vlan_features |= NETIF_F_SG; + + /*reset the controller to put the device in a known good state */ + err = hw->mac.ops.reset_hw(hw); + if (err) { + dev_info(&pdev->dev, + "PF still in reset state, assigning new address." + " Is the PF interface up?\n"); + dev_hw_addr_random(adapter->netdev, hw->mac.addr); + } else { + err = hw->mac.ops.read_mac_addr(hw); + if (err) { + dev_err(&pdev->dev, "Error reading MAC address\n"); + goto err_hw_init; + } + } + + memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); + memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->perm_addr)) { + dev_err(&pdev->dev, "Invalid MAC Address: %pM\n", + netdev->dev_addr); + err = -EIO; + goto err_hw_init; + } + + setup_timer(&adapter->watchdog_timer, &igbvf_watchdog, + (unsigned long) adapter); + + INIT_WORK(&adapter->reset_task, igbvf_reset_task); + INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task); + + /* ring size defaults */ + adapter->rx_ring->count = 1024; + adapter->tx_ring->count = 1024; + + /* reset the hardware with the new settings */ + igbvf_reset(adapter); + + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_hw_init; + + /* tell the stack to leave us alone until igbvf_open() is called */ + netif_carrier_off(netdev); + netif_stop_queue(netdev); + + igbvf_print_device_info(adapter); + + igbvf_initialize_last_counter_stats(adapter); + + return 0; + +err_hw_init: + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); +err_sw_init: + igbvf_reset_interrupt_capability(adapter); + iounmap(adapter->hw.hw_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * igbvf_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * igbvf_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void __devexit igbvf_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + /* + * The watchdog timer may be rescheduled, so explicitly + * disable it from being rescheduled. + */ + set_bit(__IGBVF_DOWN, &adapter->state); + del_timer_sync(&adapter->watchdog_timer); + + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); + + unregister_netdev(netdev); + + igbvf_reset_interrupt_capability(adapter); + + /* + * it is important to delete the napi struct prior to freeing the + * rx ring so that you do not end up with null pointer refs + */ + netif_napi_del(&adapter->rx_ring->napi); + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + + iounmap(hw->hw_addr); + if (hw->flash_address) + iounmap(hw->flash_address); + pci_release_regions(pdev); + + free_netdev(netdev); + + pci_disable_device(pdev); +} + +/* PCI Error Recovery (ERS) */ +static struct pci_error_handlers igbvf_err_handler = { + .error_detected = igbvf_io_error_detected, + .slot_reset = igbvf_io_slot_reset, + .resume = igbvf_io_resume, +}; + +static DEFINE_PCI_DEVICE_TABLE(igbvf_pci_tbl) = { + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_VF), board_i350_vf }, + { } /* terminate list */ +}; +MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl); + +/* PCI Device API Driver */ +static struct pci_driver igbvf_driver = { + .name = igbvf_driver_name, + .id_table = igbvf_pci_tbl, + .probe = igbvf_probe, + .remove = __devexit_p(igbvf_remove), +#ifdef CONFIG_PM + /* Power Management Hooks */ + .suspend = igbvf_suspend, + .resume = igbvf_resume, +#endif + .shutdown = igbvf_shutdown, + .err_handler = &igbvf_err_handler +}; + +/** + * igbvf_init_module - Driver Registration Routine + * + * igbvf_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init igbvf_init_module(void) +{ + int ret; + printk(KERN_INFO "%s - version %s\n", + igbvf_driver_string, igbvf_driver_version); + printk(KERN_INFO "%s\n", igbvf_copyright); + + ret = pci_register_driver(&igbvf_driver); + + return ret; +} +module_init(igbvf_init_module); + +/** + * igbvf_exit_module - Driver Exit Cleanup Routine + * + * igbvf_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit igbvf_exit_module(void) +{ + pci_unregister_driver(&igbvf_driver); +} +module_exit(igbvf_exit_module); + + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) 82576 Virtual Function Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +/* netdev.c */ diff --git a/drivers/net/ethernet/intel/igbvf/regs.h b/drivers/net/ethernet/intel/igbvf/regs.h new file mode 100644 index 0000000..77e18d3 --- /dev/null +++ b/drivers/net/ethernet/intel/igbvf/regs.h @@ -0,0 +1,108 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 2009 - 2010 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_REGS_H_ +#define _E1000_REGS_H_ + +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) +#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ +#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ +#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ +/* + * Convenience macros + * + * Note: "_n" is the queue number of the register to be written to. + * + * Example usage: + * E1000_RDBAL_REG(current_rx_queue) + */ +#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ + (0x0C000 + ((_n) * 0x40))) +#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ + (0x0C004 + ((_n) * 0x40))) +#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ + (0x0C008 + ((_n) * 0x40))) +#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ + (0x0C00C + ((_n) * 0x40))) +#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ + (0x0C010 + ((_n) * 0x40))) +#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ + (0x0C018 + ((_n) * 0x40))) +#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ + (0x0C028 + ((_n) * 0x40))) +#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ + (0x0E000 + ((_n) * 0x40))) +#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ + (0x0E004 + ((_n) * 0x40))) +#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ + (0x0E008 + ((_n) * 0x40))) +#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ + (0x0E010 + ((_n) * 0x40))) +#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ + (0x0E018 + ((_n) * 0x40))) +#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ + (0x0E028 + ((_n) * 0x40))) +#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8)) +#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8)) +#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x054E0 + ((_i - 16) * 8))) +#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x054E4 + ((_i - 16) * 8))) + +/* Statistics registers */ +#define E1000_VFGPRC 0x00F10 +#define E1000_VFGORC 0x00F18 +#define E1000_VFMPRC 0x00F3C +#define E1000_VFGPTC 0x00F14 +#define E1000_VFGOTC 0x00F34 +#define E1000_VFGOTLBC 0x00F50 +#define E1000_VFGPTLBC 0x00F44 +#define E1000_VFGORLBC 0x00F48 +#define E1000_VFGPRLBC 0x00F40 + +/* These act per VF so an array friendly macro is used */ +#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) +#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) + +/* Define macros for handling registers */ +#define er32(reg) readl(hw->hw_addr + E1000_##reg) +#define ew32(reg, val) writel((val), hw->hw_addr + E1000_##reg) +#define array_er32(reg, offset) \ + readl(hw->hw_addr + E1000_##reg + (offset << 2)) +#define array_ew32(reg, offset, val) \ + writel((val), hw->hw_addr + E1000_##reg + (offset << 2)) +#define e1e_flush() er32(STATUS) + +#endif diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c new file mode 100644 index 0000000..af3822f --- /dev/null +++ b/drivers/net/ethernet/intel/igbvf/vf.c @@ -0,0 +1,402 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 2009 - 2010 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + + +#include "vf.h" + +static s32 e1000_check_for_link_vf(struct e1000_hw *hw); +static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +static s32 e1000_init_hw_vf(struct e1000_hw *hw); +static s32 e1000_reset_hw_vf(struct e1000_hw *hw); + +static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *, + u32, u32, u32); +static void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32); +static s32 e1000_read_mac_addr_vf(struct e1000_hw *); +static s32 e1000_set_vfta_vf(struct e1000_hw *, u16, bool); + +/** + * e1000_init_mac_params_vf - Inits MAC params + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_mac_params_vf(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + /* VF's have no MTA Registers - PF feature only */ + mac->mta_reg_count = 128; + /* VF's have no access to RAR entries */ + mac->rar_entry_count = 1; + + /* Function pointers */ + /* reset */ + mac->ops.reset_hw = e1000_reset_hw_vf; + /* hw initialization */ + mac->ops.init_hw = e1000_init_hw_vf; + /* check for link */ + mac->ops.check_for_link = e1000_check_for_link_vf; + /* link info */ + mac->ops.get_link_up_info = e1000_get_link_up_info_vf; + /* multicast address update */ + mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf; + /* set mac address */ + mac->ops.rar_set = e1000_rar_set_vf; + /* read mac address */ + mac->ops.read_mac_addr = e1000_read_mac_addr_vf; + /* set vlan filter table array */ + mac->ops.set_vfta = e1000_set_vfta_vf; + + return E1000_SUCCESS; +} + +/** + * e1000_init_function_pointers_vf - Inits function pointers + * @hw: pointer to the HW structure + **/ +void e1000_init_function_pointers_vf(struct e1000_hw *hw) +{ + hw->mac.ops.init_params = e1000_init_mac_params_vf; + hw->mbx.ops.init_params = e1000_init_mbx_params_vf; +} + +/** + * e1000_get_link_up_info_vf - Gets link info. + * @hw: pointer to the HW structure + * @speed: pointer to 16 bit value to store link speed. + * @duplex: pointer to 16 bit value to store duplex. + * + * Since we cannot read the PHY and get accurate link info, we must rely upon + * the status register's data which is often stale and inaccurate. + **/ +static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 status; + + status = er32(STATUS); + if (status & E1000_STATUS_SPEED_1000) + *speed = SPEED_1000; + else if (status & E1000_STATUS_SPEED_100) + *speed = SPEED_100; + else + *speed = SPEED_10; + + if (status & E1000_STATUS_FD) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + return E1000_SUCCESS; +} + +/** + * e1000_reset_hw_vf - Resets the HW + * @hw: pointer to the HW structure + * + * VF's provide a function level reset. This is done using bit 26 of ctrl_reg. + * This is all the reset we can perform on a VF. + **/ +static s32 e1000_reset_hw_vf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 timeout = E1000_VF_INIT_TIMEOUT; + u32 ret_val = -E1000_ERR_MAC_INIT; + u32 msgbuf[3]; + u8 *addr = (u8 *)(&msgbuf[1]); + u32 ctrl; + + /* assert vf queue/interrupt reset */ + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_RST); + + /* we cannot initialize while the RSTI / RSTD bits are asserted */ + while (!mbx->ops.check_for_rst(hw) && timeout) { + timeout--; + udelay(5); + } + + if (timeout) { + /* mailbox timeout can now become active */ + mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT; + + /* notify pf of vf reset completion */ + msgbuf[0] = E1000_VF_RESET; + mbx->ops.write_posted(hw, msgbuf, 1); + + msleep(10); + + /* set our "perm_addr" based on info provided by PF */ + ret_val = mbx->ops.read_posted(hw, msgbuf, 3); + if (!ret_val) { + if (msgbuf[0] == (E1000_VF_RESET | E1000_VT_MSGTYPE_ACK)) + memcpy(hw->mac.perm_addr, addr, 6); + else + ret_val = -E1000_ERR_MAC_INIT; + } + } + + return ret_val; +} + +/** + * e1000_init_hw_vf - Inits the HW + * @hw: pointer to the HW structure + * + * Not much to do here except clear the PF Reset indication if there is one. + **/ +static s32 e1000_init_hw_vf(struct e1000_hw *hw) +{ + /* attempt to set and restore our mac address */ + e1000_rar_set_vf(hw, hw->mac.addr, 0); + + return E1000_SUCCESS; +} + +/** + * e1000_hash_mc_addr_vf - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. See + * e1000_mta_set_generic() + **/ +static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* + * The bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16) mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * e1000_update_mc_addr_list_vf - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * @rar_used_count: the first RAR register free to program + * @rar_count: total number of supported Receive Address Registers + * + * Updates the Receive Address Registers and Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + * The parameter rar_count will usually be hw->mac.rar_entry_count + * unless there are workarounds that change this. + **/ +static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count, + u32 rar_used_count, u32 rar_count) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 msgbuf[E1000_VFMAILBOX_SIZE]; + u16 *hash_list = (u16 *)&msgbuf[1]; + u32 hash_value; + u32 cnt, i; + + /* Each entry in the list uses 1 16 bit word. We have 30 + * 16 bit words available in our HW msg buffer (minus 1 for the + * msg type). That's 30 hash values if we pack 'em right. If + * there are more than 30 MC addresses to add then punt the + * extras for now and then add code to handle more than 30 later. + * It would be unusual for a server to request that many multi-cast + * addresses except for in large enterprise network environments. + */ + + cnt = (mc_addr_count > 30) ? 30 : mc_addr_count; + msgbuf[0] = E1000_VF_SET_MULTICAST; + msgbuf[0] |= cnt << E1000_VT_MSGINFO_SHIFT; + + for (i = 0; i < cnt; i++) { + hash_value = e1000_hash_mc_addr_vf(hw, mc_addr_list); + hash_list[i] = hash_value & 0x0FFFF; + mc_addr_list += ETH_ADDR_LEN; + } + + mbx->ops.write_posted(hw, msgbuf, E1000_VFMAILBOX_SIZE); +} + +/** + * e1000_set_vfta_vf - Set/Unset vlan filter table address + * @hw: pointer to the HW structure + * @vid: determines the vfta register and bit to set/unset + * @set: if true then set bit, else clear bit + **/ +static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 msgbuf[2]; + s32 err; + + msgbuf[0] = E1000_VF_SET_VLAN; + msgbuf[1] = vid; + /* Setting the 8 bit field MSG INFO to true indicates "add" */ + if (set) + msgbuf[0] |= 1 << E1000_VT_MSGINFO_SHIFT; + + mbx->ops.write_posted(hw, msgbuf, 2); + + err = mbx->ops.read_posted(hw, msgbuf, 2); + + msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS; + + /* if nacked the vlan was rejected */ + if (!err && (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK))) + err = -E1000_ERR_MAC_INIT; + + return err; +} + +/** e1000_rlpml_set_vf - Set the maximum receive packet length + * @hw: pointer to the HW structure + * @max_size: value to assign to max frame size + **/ +void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 msgbuf[2]; + + msgbuf[0] = E1000_VF_SET_LPE; + msgbuf[1] = max_size; + + mbx->ops.write_posted(hw, msgbuf, 2); +} + +/** + * e1000_rar_set_vf - set device MAC address + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index receive address array register + **/ +static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 msgbuf[3]; + u8 *msg_addr = (u8 *)(&msgbuf[1]); + s32 ret_val; + + memset(msgbuf, 0, 12); + msgbuf[0] = E1000_VF_SET_MAC_ADDR; + memcpy(msg_addr, addr, 6); + ret_val = mbx->ops.write_posted(hw, msgbuf, 3); + + if (!ret_val) + ret_val = mbx->ops.read_posted(hw, msgbuf, 3); + + msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS; + + /* if nacked the address was rejected, use "perm_addr" */ + if (!ret_val && + (msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK))) + e1000_read_mac_addr_vf(hw); +} + +/** + * e1000_read_mac_addr_vf - Read device MAC address + * @hw: pointer to the HW structure + **/ +static s32 e1000_read_mac_addr_vf(struct e1000_hw *hw) +{ + int i; + + for (i = 0; i < ETH_ADDR_LEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return E1000_SUCCESS; +} + +/** + * e1000_check_for_link_vf - Check for link for a virtual interface + * @hw: pointer to the HW structure + * + * Checks to see if the underlying PF is still talking to the VF and + * if it is then it reports the link state to the hardware, otherwise + * it reports link down and returns an error. + **/ +static s32 e1000_check_for_link_vf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = E1000_SUCCESS; + u32 in_msg = 0; + + /* + * We only want to run this if there has been a rst asserted. + * in this case that could mean a link change, device reset, + * or a virtual function reset + */ + + /* If we were hit with a reset or timeout drop the link */ + if (!mbx->ops.check_for_rst(hw) || !mbx->timeout) + mac->get_link_status = true; + + if (!mac->get_link_status) + goto out; + + /* if link status is down no point in checking to see if pf is up */ + if (!(er32(STATUS) & E1000_STATUS_LU)) + goto out; + + /* if the read failed it could just be a mailbox collision, best wait + * until we are called again and don't report an error */ + if (mbx->ops.read(hw, &in_msg, 1)) + goto out; + + /* if incoming message isn't clear to send we are waiting on response */ + if (!(in_msg & E1000_VT_MSGTYPE_CTS)) { + /* message is not CTS and is NACK we must have lost CTS status */ + if (in_msg & E1000_VT_MSGTYPE_NACK) + ret_val = -E1000_ERR_MAC_INIT; + goto out; + } + + /* the pf is talking, if we timed out in the past we reinit */ + if (!mbx->timeout) { + ret_val = -E1000_ERR_MAC_INIT; + goto out; + } + + /* if we passed all the tests above then the link is up and we no + * longer need to check for link */ + mac->get_link_status = false; + +out: + return ret_val; +} + diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h new file mode 100644 index 0000000..d7ed58f --- /dev/null +++ b/drivers/net/ethernet/intel/igbvf/vf.h @@ -0,0 +1,266 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 2009 - 2010 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_VF_H_ +#define _E1000_VF_H_ + +#include +#include +#include +#include + +#include "regs.h" +#include "defines.h" + +struct e1000_hw; + +#define E1000_DEV_ID_82576_VF 0x10CA +#define E1000_DEV_ID_I350_VF 0x1520 +#define E1000_REVISION_0 0 +#define E1000_REVISION_1 1 +#define E1000_REVISION_2 2 +#define E1000_REVISION_3 3 +#define E1000_REVISION_4 4 + +#define E1000_FUNC_0 0 +#define E1000_FUNC_1 1 + +/* + * Receive Address Register Count + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * These entries are also used for MAC-based filtering. + */ +#define E1000_RAR_ENTRIES_VF 1 + +/* Receive Descriptor - Advanced */ +union e1000_adv_rx_desc { + struct { + u64 pkt_addr; /* Packet buffer address */ + u64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + u32 data; + struct { + u16 pkt_info; /* RSS/Packet type */ + u16 hdr_info; /* Split Header, + * hdr buffer length */ + } hs_rss; + } lo_dword; + union { + u32 rss; /* RSS Hash */ + struct { + u16 ip_id; /* IP id */ + u16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + u32 status_error; /* ext status/error */ + u16 length; /* Packet length */ + u16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 +#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 + +/* Transmit Descriptor - Advanced */ +union e1000_adv_tx_desc { + struct { + u64 buffer_addr; /* Address of descriptor's data buf */ + u32 cmd_type_len; + u32 olinfo_status; + } read; + struct { + u64 rsvd; /* Reserved */ + u32 nxtseq_seed; + u32 status; + } wb; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +/* Context descriptors */ +struct e1000_adv_tx_context_desc { + u32 vlan_macip_lens; + u32 seqnum_seed; + u32 type_tucmd_mlhl; + u32 mss_l4len_idx; +}; + +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +enum e1000_mac_type { + e1000_undefined = 0, + e1000_vfadapt, + e1000_vfadapt_i350, + e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ +}; + +struct e1000_vf_stats { + u64 base_gprc; + u64 base_gptc; + u64 base_gorc; + u64 base_gotc; + u64 base_mprc; + u64 base_gotlbc; + u64 base_gptlbc; + u64 base_gorlbc; + u64 base_gprlbc; + + u32 last_gprc; + u32 last_gptc; + u32 last_gorc; + u32 last_gotc; + u32 last_mprc; + u32 last_gotlbc; + u32 last_gptlbc; + u32 last_gorlbc; + u32 last_gprlbc; + + u64 gprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 mprc; + u64 gotlbc; + u64 gptlbc; + u64 gorlbc; + u64 gprlbc; +}; + +#include "mbx.h" + +struct e1000_mac_operations { + /* Function pointers for the MAC. */ + s32 (*init_params)(struct e1000_hw *); + s32 (*check_for_link)(struct e1000_hw *); + void (*clear_vfta)(struct e1000_hw *); + s32 (*get_bus_info)(struct e1000_hw *); + s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); + void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32, u32, u32); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + s32 (*setup_link)(struct e1000_hw *); + void (*write_vfta)(struct e1000_hw *, u32, u32); + void (*mta_set)(struct e1000_hw *, u32); + void (*rar_set)(struct e1000_hw *, u8*, u32); + s32 (*read_mac_addr)(struct e1000_hw *); + s32 (*set_vfta)(struct e1000_hw *, u16, bool); +}; + +struct e1000_mac_info { + struct e1000_mac_operations ops; + u8 addr[6]; + u8 perm_addr[6]; + + enum e1000_mac_type type; + + u16 mta_reg_count; + u16 rar_entry_count; + + bool get_link_status; +}; + +struct e1000_mbx_operations { + s32 (*init_params)(struct e1000_hw *hw); + s32 (*read)(struct e1000_hw *, u32 *, u16); + s32 (*write)(struct e1000_hw *, u32 *, u16); + s32 (*read_posted)(struct e1000_hw *, u32 *, u16); + s32 (*write_posted)(struct e1000_hw *, u32 *, u16); + s32 (*check_for_msg)(struct e1000_hw *); + s32 (*check_for_ack)(struct e1000_hw *); + s32 (*check_for_rst)(struct e1000_hw *); +}; + +struct e1000_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct e1000_mbx_info { + struct e1000_mbx_operations ops; + struct e1000_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u16 size; +}; + +struct e1000_dev_spec_vf { + u32 vf_number; + u32 v2p_mailbox; +}; + +struct e1000_hw { + void *back; + + u8 __iomem *hw_addr; + u8 __iomem *flash_address; + unsigned long io_base; + + struct e1000_mac_info mac; + struct e1000_mbx_info mbx; + + union { + struct e1000_dev_spec_vf vf; + } dev_spec; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +/* These functions must be implemented by drivers */ +void e1000_rlpml_set_vf(struct e1000_hw *, u16); +void e1000_init_function_pointers_vf(struct e1000_hw *hw); + + +#endif /* _E1000_VF_H_ */ diff --git a/drivers/net/ethernet/intel/ixgb/Makefile b/drivers/net/ethernet/intel/ixgb/Makefile new file mode 100644 index 0000000..0b20c5e --- /dev/null +++ b/drivers/net/ethernet/intel/ixgb/Makefile @@ -0,0 +1,35 @@ +################################################################################ +# +# Intel PRO/10GbE Linux driver +# Copyright(c) 1999 - 2008 Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: +# Linux NICS +# e1000-devel Mailing List +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +# +################################################################################ + +# +# Makefile for the Intel(R) PRO/10GbE ethernet driver +# + +obj-$(CONFIG_IXGB) += ixgb.o + +ixgb-objs := ixgb_main.o ixgb_hw.o ixgb_ee.o ixgb_ethtool.o ixgb_param.o diff --git a/drivers/net/ethernet/intel/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h new file mode 100644 index 0000000..49e8408 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgb/ixgb.h @@ -0,0 +1,217 @@ +/******************************************************************************* + + Intel PRO/10GbE Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGB_H_ +#define _IXGB_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define BAR_0 0 +#define BAR_1 1 +#define BAR_5 5 + +struct ixgb_adapter; +#include "ixgb_hw.h" +#include "ixgb_ee.h" +#include "ixgb_ids.h" + +#define PFX "ixgb: " + +#ifdef _DEBUG_DRIVER_ +#define IXGB_DBG(fmt, args...) printk(KERN_DEBUG PFX fmt, ##args) +#else +#define IXGB_DBG(fmt, args...) \ +do { \ + if (0) \ + printk(KERN_DEBUG PFX fmt, ##args); \ +} while (0) +#endif + +/* TX/RX descriptor defines */ +#define DEFAULT_TXD 256 +#define MAX_TXD 4096 +#define MIN_TXD 64 + +/* hardware cannot reliably support more than 512 descriptors owned by + * hardware descriptor cache otherwise an unreliable ring under heavy + * receive load may result */ +#define DEFAULT_RXD 512 +#define MAX_RXD 512 +#define MIN_RXD 64 + +/* Supported Rx Buffer Sizes */ +#define IXGB_RXBUFFER_2048 2048 +#define IXGB_RXBUFFER_4096 4096 +#define IXGB_RXBUFFER_8192 8192 +#define IXGB_RXBUFFER_16384 16384 + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IXGB_RX_BUFFER_WRITE 8 /* Must be power of 2 */ + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer */ +struct ixgb_buffer { + struct sk_buff *skb; + dma_addr_t dma; + unsigned long time_stamp; + u16 length; + u16 next_to_watch; + u16 mapped_as_page; +}; + +struct ixgb_desc_ring { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; + /* number of descriptors in the ring */ + unsigned int count; + /* next descriptor to associate a buffer with */ + unsigned int next_to_use; + /* next descriptor to check for DD status bit */ + unsigned int next_to_clean; + /* array of buffer information structs */ + struct ixgb_buffer *buffer_info; +}; + +#define IXGB_DESC_UNUSED(R) \ + ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->next_to_clean - (R)->next_to_use - 1) + +#define IXGB_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) +#define IXGB_RX_DESC(R, i) IXGB_GET_DESC(R, i, ixgb_rx_desc) +#define IXGB_TX_DESC(R, i) IXGB_GET_DESC(R, i, ixgb_tx_desc) +#define IXGB_CONTEXT_DESC(R, i) IXGB_GET_DESC(R, i, ixgb_context_desc) + +/* board specific private data structure */ + +struct ixgb_adapter { + struct timer_list watchdog_timer; + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u32 bd_number; + u32 rx_buffer_len; + u32 part_num; + u16 link_speed; + u16 link_duplex; + struct work_struct tx_timeout_task; + + /* TX */ + struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp; + unsigned int restart_queue; + unsigned long timeo_start; + u32 tx_cmd_type; + u64 hw_csum_tx_good; + u64 hw_csum_tx_error; + u32 tx_int_delay; + u32 tx_timeout_count; + bool tx_int_delay_enable; + bool detect_tx_hung; + + /* RX */ + struct ixgb_desc_ring rx_ring; + u64 hw_csum_rx_error; + u64 hw_csum_rx_good; + u32 rx_int_delay; + bool rx_csum; + + /* OS defined structs */ + struct napi_struct napi; + struct net_device *netdev; + struct pci_dev *pdev; + + /* structs defined in ixgb_hw.h */ + struct ixgb_hw hw; + u16 msg_enable; + struct ixgb_hw_stats stats; + u32 alloc_rx_buff_failed; + bool have_msi; + unsigned long flags; +}; + +enum ixgb_state_t { + /* TBD + __IXGB_TESTING, + __IXGB_RESETTING, + */ + __IXGB_DOWN +}; + +/* Exported from other modules */ +extern void ixgb_check_options(struct ixgb_adapter *adapter); +extern void ixgb_set_ethtool_ops(struct net_device *netdev); +extern char ixgb_driver_name[]; +extern const char ixgb_driver_version[]; + +extern int ixgb_up(struct ixgb_adapter *adapter); +extern void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog); +extern void ixgb_reset(struct ixgb_adapter *adapter); +extern int ixgb_setup_rx_resources(struct ixgb_adapter *adapter); +extern int ixgb_setup_tx_resources(struct ixgb_adapter *adapter); +extern void ixgb_free_rx_resources(struct ixgb_adapter *adapter); +extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter); +extern void ixgb_update_stats(struct ixgb_adapter *adapter); + + +#endif /* _IXGB_H_ */ diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ee.c b/drivers/net/ethernet/intel/ixgb/ixgb_ee.c new file mode 100644 index 0000000..38b362b --- /dev/null +++ b/drivers/net/ethernet/intel/ixgb/ixgb_ee.c @@ -0,0 +1,607 @@ +/******************************************************************************* + + Intel PRO/10GbE Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "ixgb_hw.h" +#include "ixgb_ee.h" +/* Local prototypes */ +static u16 ixgb_shift_in_bits(struct ixgb_hw *hw); + +static void ixgb_shift_out_bits(struct ixgb_hw *hw, + u16 data, + u16 count); +static void ixgb_standby_eeprom(struct ixgb_hw *hw); + +static bool ixgb_wait_eeprom_command(struct ixgb_hw *hw); + +static void ixgb_cleanup_eeprom(struct ixgb_hw *hw); + +/****************************************************************************** + * Raises the EEPROM's clock input. + * + * hw - Struct containing variables accessed by shared code + * eecd_reg - EECD's current value + *****************************************************************************/ +static void +ixgb_raise_clock(struct ixgb_hw *hw, + u32 *eecd_reg) +{ + /* Raise the clock input to the EEPROM (by setting the SK bit), and then + * wait 50 microseconds. + */ + *eecd_reg = *eecd_reg | IXGB_EECD_SK; + IXGB_WRITE_REG(hw, EECD, *eecd_reg); + IXGB_WRITE_FLUSH(hw); + udelay(50); +} + +/****************************************************************************** + * Lowers the EEPROM's clock input. + * + * hw - Struct containing variables accessed by shared code + * eecd_reg - EECD's current value + *****************************************************************************/ +static void +ixgb_lower_clock(struct ixgb_hw *hw, + u32 *eecd_reg) +{ + /* Lower the clock input to the EEPROM (by clearing the SK bit), and then + * wait 50 microseconds. + */ + *eecd_reg = *eecd_reg & ~IXGB_EECD_SK; + IXGB_WRITE_REG(hw, EECD, *eecd_reg); + IXGB_WRITE_FLUSH(hw); + udelay(50); +} + +/****************************************************************************** + * Shift data bits out to the EEPROM. + * + * hw - Struct containing variables accessed by shared code + * data - data to send to the EEPROM + * count - number of bits to shift out + *****************************************************************************/ +static void +ixgb_shift_out_bits(struct ixgb_hw *hw, + u16 data, + u16 count) +{ + u32 eecd_reg; + u32 mask; + + /* We need to shift "count" bits out to the EEPROM. So, value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + */ + mask = 0x01 << (count - 1); + eecd_reg = IXGB_READ_REG(hw, EECD); + eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI); + do { + /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1", + * and then raising and then lowering the clock (the SK bit controls + * the clock input to the EEPROM). A "0" is shifted out to the EEPROM + * by setting "DI" to "0" and then raising and then lowering the clock. + */ + eecd_reg &= ~IXGB_EECD_DI; + + if (data & mask) + eecd_reg |= IXGB_EECD_DI; + + IXGB_WRITE_REG(hw, EECD, eecd_reg); + IXGB_WRITE_FLUSH(hw); + + udelay(50); + + ixgb_raise_clock(hw, &eecd_reg); + ixgb_lower_clock(hw, &eecd_reg); + + mask = mask >> 1; + + } while (mask); + + /* We leave the "DI" bit set to "0" when we leave this routine. */ + eecd_reg &= ~IXGB_EECD_DI; + IXGB_WRITE_REG(hw, EECD, eecd_reg); +} + +/****************************************************************************** + * Shift data bits in from the EEPROM + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +static u16 +ixgb_shift_in_bits(struct ixgb_hw *hw) +{ + u32 eecd_reg; + u32 i; + u16 data; + + /* In order to read a register from the EEPROM, we need to shift 16 bits + * in from the EEPROM. Bits are "shifted in" by raising the clock input to + * the EEPROM (setting the SK bit), and then reading the value of the "DO" + * bit. During this "shifting in" process the "DI" bit should always be + * clear.. + */ + + eecd_reg = IXGB_READ_REG(hw, EECD); + + eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI); + data = 0; + + for (i = 0; i < 16; i++) { + data = data << 1; + ixgb_raise_clock(hw, &eecd_reg); + + eecd_reg = IXGB_READ_REG(hw, EECD); + + eecd_reg &= ~(IXGB_EECD_DI); + if (eecd_reg & IXGB_EECD_DO) + data |= 1; + + ixgb_lower_clock(hw, &eecd_reg); + } + + return data; +} + +/****************************************************************************** + * Prepares EEPROM for access + * + * hw - Struct containing variables accessed by shared code + * + * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This + * function should be called before issuing a command to the EEPROM. + *****************************************************************************/ +static void +ixgb_setup_eeprom(struct ixgb_hw *hw) +{ + u32 eecd_reg; + + eecd_reg = IXGB_READ_REG(hw, EECD); + + /* Clear SK and DI */ + eecd_reg &= ~(IXGB_EECD_SK | IXGB_EECD_DI); + IXGB_WRITE_REG(hw, EECD, eecd_reg); + + /* Set CS */ + eecd_reg |= IXGB_EECD_CS; + IXGB_WRITE_REG(hw, EECD, eecd_reg); +} + +/****************************************************************************** + * Returns EEPROM to a "standby" state + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +static void +ixgb_standby_eeprom(struct ixgb_hw *hw) +{ + u32 eecd_reg; + + eecd_reg = IXGB_READ_REG(hw, EECD); + + /* Deselect EEPROM */ + eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_SK); + IXGB_WRITE_REG(hw, EECD, eecd_reg); + IXGB_WRITE_FLUSH(hw); + udelay(50); + + /* Clock high */ + eecd_reg |= IXGB_EECD_SK; + IXGB_WRITE_REG(hw, EECD, eecd_reg); + IXGB_WRITE_FLUSH(hw); + udelay(50); + + /* Select EEPROM */ + eecd_reg |= IXGB_EECD_CS; + IXGB_WRITE_REG(hw, EECD, eecd_reg); + IXGB_WRITE_FLUSH(hw); + udelay(50); + + /* Clock low */ + eecd_reg &= ~IXGB_EECD_SK; + IXGB_WRITE_REG(hw, EECD, eecd_reg); + IXGB_WRITE_FLUSH(hw); + udelay(50); +} + +/****************************************************************************** + * Raises then lowers the EEPROM's clock pin + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +static void +ixgb_clock_eeprom(struct ixgb_hw *hw) +{ + u32 eecd_reg; + + eecd_reg = IXGB_READ_REG(hw, EECD); + + /* Rising edge of clock */ + eecd_reg |= IXGB_EECD_SK; + IXGB_WRITE_REG(hw, EECD, eecd_reg); + IXGB_WRITE_FLUSH(hw); + udelay(50); + + /* Falling edge of clock */ + eecd_reg &= ~IXGB_EECD_SK; + IXGB_WRITE_REG(hw, EECD, eecd_reg); + IXGB_WRITE_FLUSH(hw); + udelay(50); +} + +/****************************************************************************** + * Terminates a command by lowering the EEPROM's chip select pin + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +static void +ixgb_cleanup_eeprom(struct ixgb_hw *hw) +{ + u32 eecd_reg; + + eecd_reg = IXGB_READ_REG(hw, EECD); + + eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_DI); + + IXGB_WRITE_REG(hw, EECD, eecd_reg); + + ixgb_clock_eeprom(hw); +} + +/****************************************************************************** + * Waits for the EEPROM to finish the current command. + * + * hw - Struct containing variables accessed by shared code + * + * The command is done when the EEPROM's data out pin goes high. + * + * Returns: + * true: EEPROM data pin is high before timeout. + * false: Time expired. + *****************************************************************************/ +static bool +ixgb_wait_eeprom_command(struct ixgb_hw *hw) +{ + u32 eecd_reg; + u32 i; + + /* Toggle the CS line. This in effect tells to EEPROM to actually execute + * the command in question. + */ + ixgb_standby_eeprom(hw); + + /* Now read DO repeatedly until is high (equal to '1'). The EEPROM will + * signal that the command has been completed by raising the DO signal. + * If DO does not go high in 10 milliseconds, then error out. + */ + for (i = 0; i < 200; i++) { + eecd_reg = IXGB_READ_REG(hw, EECD); + + if (eecd_reg & IXGB_EECD_DO) + return true; + + udelay(50); + } + ASSERT(0); + return false; +} + +/****************************************************************************** + * Verifies that the EEPROM has a valid checksum + * + * hw - Struct containing variables accessed by shared code + * + * Reads the first 64 16 bit words of the EEPROM and sums the values read. + * If the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is + * valid. + * + * Returns: + * true: Checksum is valid + * false: Checksum is not valid. + *****************************************************************************/ +bool +ixgb_validate_eeprom_checksum(struct ixgb_hw *hw) +{ + u16 checksum = 0; + u16 i; + + for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) + checksum += ixgb_read_eeprom(hw, i); + + if (checksum == (u16) EEPROM_SUM) + return true; + else + return false; +} + +/****************************************************************************** + * Calculates the EEPROM checksum and writes it to the EEPROM + * + * hw - Struct containing variables accessed by shared code + * + * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA. + * Writes the difference to word offset 63 of the EEPROM. + *****************************************************************************/ +void +ixgb_update_eeprom_checksum(struct ixgb_hw *hw) +{ + u16 checksum = 0; + u16 i; + + for (i = 0; i < EEPROM_CHECKSUM_REG; i++) + checksum += ixgb_read_eeprom(hw, i); + + checksum = (u16) EEPROM_SUM - checksum; + + ixgb_write_eeprom(hw, EEPROM_CHECKSUM_REG, checksum); +} + +/****************************************************************************** + * Writes a 16 bit word to a given offset in the EEPROM. + * + * hw - Struct containing variables accessed by shared code + * reg - offset within the EEPROM to be written to + * data - 16 bit word to be written to the EEPROM + * + * If ixgb_update_eeprom_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + * + *****************************************************************************/ +void +ixgb_write_eeprom(struct ixgb_hw *hw, u16 offset, u16 data) +{ + struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; + + /* Prepare the EEPROM for writing */ + ixgb_setup_eeprom(hw); + + /* Send the 9-bit EWEN (write enable) command to the EEPROM (5-bit opcode + * plus 4-bit dummy). This puts the EEPROM into write/erase mode. + */ + ixgb_shift_out_bits(hw, EEPROM_EWEN_OPCODE, 5); + ixgb_shift_out_bits(hw, 0, 4); + + /* Prepare the EEPROM */ + ixgb_standby_eeprom(hw); + + /* Send the Write command (3-bit opcode + 6-bit addr) */ + ixgb_shift_out_bits(hw, EEPROM_WRITE_OPCODE, 3); + ixgb_shift_out_bits(hw, offset, 6); + + /* Send the data */ + ixgb_shift_out_bits(hw, data, 16); + + ixgb_wait_eeprom_command(hw); + + /* Recover from write */ + ixgb_standby_eeprom(hw); + + /* Send the 9-bit EWDS (write disable) command to the EEPROM (5-bit + * opcode plus 4-bit dummy). This takes the EEPROM out of write/erase + * mode. + */ + ixgb_shift_out_bits(hw, EEPROM_EWDS_OPCODE, 5); + ixgb_shift_out_bits(hw, 0, 4); + + /* Done with writing */ + ixgb_cleanup_eeprom(hw); + + /* clear the init_ctrl_reg_1 to signify that the cache is invalidated */ + ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR); +} + +/****************************************************************************** + * Reads a 16 bit word from the EEPROM. + * + * hw - Struct containing variables accessed by shared code + * offset - offset of 16 bit word in the EEPROM to read + * + * Returns: + * The 16-bit value read from the eeprom + *****************************************************************************/ +u16 +ixgb_read_eeprom(struct ixgb_hw *hw, + u16 offset) +{ + u16 data; + + /* Prepare the EEPROM for reading */ + ixgb_setup_eeprom(hw); + + /* Send the READ command (opcode + addr) */ + ixgb_shift_out_bits(hw, EEPROM_READ_OPCODE, 3); + /* + * We have a 64 word EEPROM, there are 6 address bits + */ + ixgb_shift_out_bits(hw, offset, 6); + + /* Read the data */ + data = ixgb_shift_in_bits(hw); + + /* End this read operation */ + ixgb_standby_eeprom(hw); + + return data; +} + +/****************************************************************************** + * Reads eeprom and stores data in shared structure. + * Validates eeprom checksum and eeprom signature. + * + * hw - Struct containing variables accessed by shared code + * + * Returns: + * true: if eeprom read is successful + * false: otherwise. + *****************************************************************************/ +bool +ixgb_get_eeprom_data(struct ixgb_hw *hw) +{ + u16 i; + u16 checksum = 0; + struct ixgb_ee_map_type *ee_map; + + ENTER(); + + ee_map = (struct ixgb_ee_map_type *)hw->eeprom; + + pr_debug("Reading eeprom data\n"); + for (i = 0; i < IXGB_EEPROM_SIZE ; i++) { + u16 ee_data; + ee_data = ixgb_read_eeprom(hw, i); + checksum += ee_data; + hw->eeprom[i] = cpu_to_le16(ee_data); + } + + if (checksum != (u16) EEPROM_SUM) { + pr_debug("Checksum invalid\n"); + /* clear the init_ctrl_reg_1 to signify that the cache is + * invalidated */ + ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR); + return false; + } + + if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK)) + != cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) { + pr_debug("Signature invalid\n"); + return false; + } + + return true; +} + +/****************************************************************************** + * Local function to check if the eeprom signature is good + * If the eeprom signature is good, calls ixgb)get_eeprom_data. + * + * hw - Struct containing variables accessed by shared code + * + * Returns: + * true: eeprom signature was good and the eeprom read was successful + * false: otherwise. + ******************************************************************************/ +static bool +ixgb_check_and_get_eeprom_data (struct ixgb_hw* hw) +{ + struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; + + if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK)) + == cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) { + return true; + } else { + return ixgb_get_eeprom_data(hw); + } +} + +/****************************************************************************** + * return a word from the eeprom + * + * hw - Struct containing variables accessed by shared code + * index - Offset of eeprom word + * + * Returns: + * Word at indexed offset in eeprom, if valid, 0 otherwise. + ******************************************************************************/ +__le16 +ixgb_get_eeprom_word(struct ixgb_hw *hw, u16 index) +{ + + if ((index < IXGB_EEPROM_SIZE) && + (ixgb_check_and_get_eeprom_data(hw) == true)) { + return hw->eeprom[index]; + } + + return 0; +} + +/****************************************************************************** + * return the mac address from EEPROM + * + * hw - Struct containing variables accessed by shared code + * mac_addr - Ethernet Address if EEPROM contents are valid, 0 otherwise + * + * Returns: None. + ******************************************************************************/ +void +ixgb_get_ee_mac_addr(struct ixgb_hw *hw, + u8 *mac_addr) +{ + int i; + struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; + + ENTER(); + + if (ixgb_check_and_get_eeprom_data(hw) == true) { + for (i = 0; i < IXGB_ETH_LENGTH_OF_ADDRESS; i++) { + mac_addr[i] = ee_map->mac_addr[i]; + } + pr_debug("eeprom mac address = %pM\n", mac_addr); + } +} + + +/****************************************************************************** + * return the Printed Board Assembly number from EEPROM + * + * hw - Struct containing variables accessed by shared code + * + * Returns: + * PBA number if EEPROM contents are valid, 0 otherwise + ******************************************************************************/ +u32 +ixgb_get_ee_pba_number(struct ixgb_hw *hw) +{ + if (ixgb_check_and_get_eeprom_data(hw) == true) + return le16_to_cpu(hw->eeprom[EEPROM_PBA_1_2_REG]) + | (le16_to_cpu(hw->eeprom[EEPROM_PBA_3_4_REG])<<16); + + return 0; +} + + +/****************************************************************************** + * return the Device Id from EEPROM + * + * hw - Struct containing variables accessed by shared code + * + * Returns: + * Device Id if EEPROM contents are valid, 0 otherwise + ******************************************************************************/ +u16 +ixgb_get_ee_device_id(struct ixgb_hw *hw) +{ + struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; + + if (ixgb_check_and_get_eeprom_data(hw) == true) + return le16_to_cpu(ee_map->device_id); + + return 0; +} + diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ee.h b/drivers/net/ethernet/intel/ixgb/ixgb_ee.h new file mode 100644 index 0000000..7ea1265 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgb/ixgb_ee.h @@ -0,0 +1,106 @@ +/******************************************************************************* + + Intel PRO/10GbE Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGB_EE_H_ +#define _IXGB_EE_H_ + +#define IXGB_EEPROM_SIZE 64 /* Size in words */ + +#define IXGB_ETH_LENGTH_OF_ADDRESS 6 + +/* EEPROM Commands */ +#define EEPROM_READ_OPCODE 0x6 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE 0x5 /* EEPROM write opcode */ +#define EEPROM_ERASE_OPCODE 0x7 /* EEPROM erase opcode */ +#define EEPROM_EWEN_OPCODE 0x13 /* EEPROM erase/write enable */ +#define EEPROM_EWDS_OPCODE 0x10 /* EEPROM erase/write disable */ + +/* EEPROM MAP (Word Offsets) */ +#define EEPROM_IA_1_2_REG 0x0000 +#define EEPROM_IA_3_4_REG 0x0001 +#define EEPROM_IA_5_6_REG 0x0002 +#define EEPROM_COMPATIBILITY_REG 0x0003 +#define EEPROM_PBA_1_2_REG 0x0008 +#define EEPROM_PBA_3_4_REG 0x0009 +#define EEPROM_INIT_CONTROL1_REG 0x000A +#define EEPROM_SUBSYS_ID_REG 0x000B +#define EEPROM_SUBVEND_ID_REG 0x000C +#define EEPROM_DEVICE_ID_REG 0x000D +#define EEPROM_VENDOR_ID_REG 0x000E +#define EEPROM_INIT_CONTROL2_REG 0x000F +#define EEPROM_SWDPINS_REG 0x0020 +#define EEPROM_CIRCUIT_CTRL_REG 0x0021 +#define EEPROM_D0_D3_POWER_REG 0x0022 +#define EEPROM_FLASH_VERSION 0x0032 +#define EEPROM_CHECKSUM_REG 0x003F + +/* Mask bits for fields in Word 0x0a of the EEPROM */ + +#define EEPROM_ICW1_SIGNATURE_MASK 0xC000 +#define EEPROM_ICW1_SIGNATURE_VALID 0x4000 +#define EEPROM_ICW1_SIGNATURE_CLEAR 0x0000 + +/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */ +#define EEPROM_SUM 0xBABA + +/* EEPROM Map Sizes (Byte Counts) */ +#define PBA_SIZE 4 + +/* EEPROM Map defines (WORD OFFSETS)*/ + +/* EEPROM structure */ +struct ixgb_ee_map_type { + u8 mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS]; + __le16 compatibility; + __le16 reserved1[4]; + __le32 pba_number; + __le16 init_ctrl_reg_1; + __le16 subsystem_id; + __le16 subvendor_id; + __le16 device_id; + __le16 vendor_id; + __le16 init_ctrl_reg_2; + __le16 oem_reserved[16]; + __le16 swdpins_reg; + __le16 circuit_ctrl_reg; + u8 d3_power; + u8 d0_power; + __le16 reserved2[28]; + __le16 checksum; +}; + +/* EEPROM Functions */ +u16 ixgb_read_eeprom(struct ixgb_hw *hw, u16 reg); + +bool ixgb_validate_eeprom_checksum(struct ixgb_hw *hw); + +void ixgb_update_eeprom_checksum(struct ixgb_hw *hw); + +void ixgb_write_eeprom(struct ixgb_hw *hw, u16 reg, u16 data); + +#endif /* IXGB_EE_H */ diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c new file mode 100644 index 0000000..6da890b --- /dev/null +++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c @@ -0,0 +1,758 @@ +/******************************************************************************* + + Intel PRO/10GbE Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* ethtool support for ixgb */ + +#include "ixgb.h" + +#include + +#define IXGB_ALL_RAR_ENTRIES 16 + +enum {NETDEV_STATS, IXGB_STATS}; + +struct ixgb_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +#define IXGB_STAT(m) IXGB_STATS, \ + FIELD_SIZEOF(struct ixgb_adapter, m), \ + offsetof(struct ixgb_adapter, m) +#define IXGB_NETDEV_STAT(m) NETDEV_STATS, \ + FIELD_SIZEOF(struct net_device, m), \ + offsetof(struct net_device, m) + +static struct ixgb_stats ixgb_gstrings_stats[] = { + {"rx_packets", IXGB_NETDEV_STAT(stats.rx_packets)}, + {"tx_packets", IXGB_NETDEV_STAT(stats.tx_packets)}, + {"rx_bytes", IXGB_NETDEV_STAT(stats.rx_bytes)}, + {"tx_bytes", IXGB_NETDEV_STAT(stats.tx_bytes)}, + {"rx_errors", IXGB_NETDEV_STAT(stats.rx_errors)}, + {"tx_errors", IXGB_NETDEV_STAT(stats.tx_errors)}, + {"rx_dropped", IXGB_NETDEV_STAT(stats.rx_dropped)}, + {"tx_dropped", IXGB_NETDEV_STAT(stats.tx_dropped)}, + {"multicast", IXGB_NETDEV_STAT(stats.multicast)}, + {"collisions", IXGB_NETDEV_STAT(stats.collisions)}, + +/* { "rx_length_errors", IXGB_NETDEV_STAT(stats.rx_length_errors) }, */ + {"rx_over_errors", IXGB_NETDEV_STAT(stats.rx_over_errors)}, + {"rx_crc_errors", IXGB_NETDEV_STAT(stats.rx_crc_errors)}, + {"rx_frame_errors", IXGB_NETDEV_STAT(stats.rx_frame_errors)}, + {"rx_no_buffer_count", IXGB_STAT(stats.rnbc)}, + {"rx_fifo_errors", IXGB_NETDEV_STAT(stats.rx_fifo_errors)}, + {"rx_missed_errors", IXGB_NETDEV_STAT(stats.rx_missed_errors)}, + {"tx_aborted_errors", IXGB_NETDEV_STAT(stats.tx_aborted_errors)}, + {"tx_carrier_errors", IXGB_NETDEV_STAT(stats.tx_carrier_errors)}, + {"tx_fifo_errors", IXGB_NETDEV_STAT(stats.tx_fifo_errors)}, + {"tx_heartbeat_errors", IXGB_NETDEV_STAT(stats.tx_heartbeat_errors)}, + {"tx_window_errors", IXGB_NETDEV_STAT(stats.tx_window_errors)}, + {"tx_deferred_ok", IXGB_STAT(stats.dc)}, + {"tx_timeout_count", IXGB_STAT(tx_timeout_count) }, + {"tx_restart_queue", IXGB_STAT(restart_queue) }, + {"rx_long_length_errors", IXGB_STAT(stats.roc)}, + {"rx_short_length_errors", IXGB_STAT(stats.ruc)}, + {"tx_tcp_seg_good", IXGB_STAT(stats.tsctc)}, + {"tx_tcp_seg_failed", IXGB_STAT(stats.tsctfc)}, + {"rx_flow_control_xon", IXGB_STAT(stats.xonrxc)}, + {"rx_flow_control_xoff", IXGB_STAT(stats.xoffrxc)}, + {"tx_flow_control_xon", IXGB_STAT(stats.xontxc)}, + {"tx_flow_control_xoff", IXGB_STAT(stats.xofftxc)}, + {"rx_csum_offload_good", IXGB_STAT(hw_csum_rx_good)}, + {"rx_csum_offload_errors", IXGB_STAT(hw_csum_rx_error)}, + {"tx_csum_offload_good", IXGB_STAT(hw_csum_tx_good)}, + {"tx_csum_offload_errors", IXGB_STAT(hw_csum_tx_error)} +}; + +#define IXGB_STATS_LEN ARRAY_SIZE(ixgb_gstrings_stats) + +static int +ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + + ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); + ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); + ecmd->port = PORT_FIBRE; + ecmd->transceiver = XCVR_EXTERNAL; + + if (netif_carrier_ok(adapter->netdev)) { + ethtool_cmd_speed_set(ecmd, SPEED_10000); + ecmd->duplex = DUPLEX_FULL; + } else { + ethtool_cmd_speed_set(ecmd, -1); + ecmd->duplex = -1; + } + + ecmd->autoneg = AUTONEG_DISABLE; + return 0; +} + +static void ixgb_set_speed_duplex(struct net_device *netdev) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + /* be optimistic about our link, since we were up before */ + adapter->link_speed = 10000; + adapter->link_duplex = FULL_DUPLEX; + netif_carrier_on(netdev); + netif_wake_queue(netdev); +} + +static int +ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + u32 speed = ethtool_cmd_speed(ecmd); + + if (ecmd->autoneg == AUTONEG_ENABLE || + (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) + return -EINVAL; + + if (netif_running(adapter->netdev)) { + ixgb_down(adapter, true); + ixgb_reset(adapter); + ixgb_up(adapter); + ixgb_set_speed_duplex(netdev); + } else + ixgb_reset(adapter); + + return 0; +} + +static void +ixgb_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + struct ixgb_hw *hw = &adapter->hw; + + pause->autoneg = AUTONEG_DISABLE; + + if (hw->fc.type == ixgb_fc_rx_pause) + pause->rx_pause = 1; + else if (hw->fc.type == ixgb_fc_tx_pause) + pause->tx_pause = 1; + else if (hw->fc.type == ixgb_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int +ixgb_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + struct ixgb_hw *hw = &adapter->hw; + + if (pause->autoneg == AUTONEG_ENABLE) + return -EINVAL; + + if (pause->rx_pause && pause->tx_pause) + hw->fc.type = ixgb_fc_full; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc.type = ixgb_fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc.type = ixgb_fc_tx_pause; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc.type = ixgb_fc_none; + + if (netif_running(adapter->netdev)) { + ixgb_down(adapter, true); + ixgb_up(adapter); + ixgb_set_speed_duplex(netdev); + } else + ixgb_reset(adapter); + + return 0; +} + +static u32 +ixgb_get_rx_csum(struct net_device *netdev) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + + return adapter->rx_csum; +} + +static int +ixgb_set_rx_csum(struct net_device *netdev, u32 data) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + + adapter->rx_csum = data; + + if (netif_running(netdev)) { + ixgb_down(adapter, true); + ixgb_up(adapter); + ixgb_set_speed_duplex(netdev); + } else + ixgb_reset(adapter); + return 0; +} + +static u32 +ixgb_get_tx_csum(struct net_device *netdev) +{ + return (netdev->features & NETIF_F_HW_CSUM) != 0; +} + +static int +ixgb_set_tx_csum(struct net_device *netdev, u32 data) +{ + if (data) + netdev->features |= NETIF_F_HW_CSUM; + else + netdev->features &= ~NETIF_F_HW_CSUM; + + return 0; +} + +static int +ixgb_set_tso(struct net_device *netdev, u32 data) +{ + if (data) + netdev->features |= NETIF_F_TSO; + else + netdev->features &= ~NETIF_F_TSO; + return 0; +} + +static u32 +ixgb_get_msglevel(struct net_device *netdev) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void +ixgb_set_msglevel(struct net_device *netdev, u32 data) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} +#define IXGB_GET_STAT(_A_, _R_) _A_->stats._R_ + +static int +ixgb_get_regs_len(struct net_device *netdev) +{ +#define IXGB_REG_DUMP_LEN 136*sizeof(u32) + return IXGB_REG_DUMP_LEN; +} + +static void +ixgb_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + struct ixgb_hw *hw = &adapter->hw; + u32 *reg = p; + u32 *reg_start = reg; + u8 i; + + /* the 1 (one) below indicates an attempt at versioning, if the + * interface in ethtool or the driver changes, this 1 should be + * incremented */ + regs->version = (1<<24) | hw->revision_id << 16 | hw->device_id; + + /* General Registers */ + *reg++ = IXGB_READ_REG(hw, CTRL0); /* 0 */ + *reg++ = IXGB_READ_REG(hw, CTRL1); /* 1 */ + *reg++ = IXGB_READ_REG(hw, STATUS); /* 2 */ + *reg++ = IXGB_READ_REG(hw, EECD); /* 3 */ + *reg++ = IXGB_READ_REG(hw, MFS); /* 4 */ + + /* Interrupt */ + *reg++ = IXGB_READ_REG(hw, ICR); /* 5 */ + *reg++ = IXGB_READ_REG(hw, ICS); /* 6 */ + *reg++ = IXGB_READ_REG(hw, IMS); /* 7 */ + *reg++ = IXGB_READ_REG(hw, IMC); /* 8 */ + + /* Receive */ + *reg++ = IXGB_READ_REG(hw, RCTL); /* 9 */ + *reg++ = IXGB_READ_REG(hw, FCRTL); /* 10 */ + *reg++ = IXGB_READ_REG(hw, FCRTH); /* 11 */ + *reg++ = IXGB_READ_REG(hw, RDBAL); /* 12 */ + *reg++ = IXGB_READ_REG(hw, RDBAH); /* 13 */ + *reg++ = IXGB_READ_REG(hw, RDLEN); /* 14 */ + *reg++ = IXGB_READ_REG(hw, RDH); /* 15 */ + *reg++ = IXGB_READ_REG(hw, RDT); /* 16 */ + *reg++ = IXGB_READ_REG(hw, RDTR); /* 17 */ + *reg++ = IXGB_READ_REG(hw, RXDCTL); /* 18 */ + *reg++ = IXGB_READ_REG(hw, RAIDC); /* 19 */ + *reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */ + + /* there are 16 RAR entries in hardware, we only use 3 */ + for (i = 0; i < IXGB_ALL_RAR_ENTRIES; i++) { + *reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */ + *reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */ + } + + /* Transmit */ + *reg++ = IXGB_READ_REG(hw, TCTL); /* 53 */ + *reg++ = IXGB_READ_REG(hw, TDBAL); /* 54 */ + *reg++ = IXGB_READ_REG(hw, TDBAH); /* 55 */ + *reg++ = IXGB_READ_REG(hw, TDLEN); /* 56 */ + *reg++ = IXGB_READ_REG(hw, TDH); /* 57 */ + *reg++ = IXGB_READ_REG(hw, TDT); /* 58 */ + *reg++ = IXGB_READ_REG(hw, TIDV); /* 59 */ + *reg++ = IXGB_READ_REG(hw, TXDCTL); /* 60 */ + *reg++ = IXGB_READ_REG(hw, TSPMT); /* 61 */ + *reg++ = IXGB_READ_REG(hw, PAP); /* 62 */ + + /* Physical */ + *reg++ = IXGB_READ_REG(hw, PCSC1); /* 63 */ + *reg++ = IXGB_READ_REG(hw, PCSC2); /* 64 */ + *reg++ = IXGB_READ_REG(hw, PCSS1); /* 65 */ + *reg++ = IXGB_READ_REG(hw, PCSS2); /* 66 */ + *reg++ = IXGB_READ_REG(hw, XPCSS); /* 67 */ + *reg++ = IXGB_READ_REG(hw, UCCR); /* 68 */ + *reg++ = IXGB_READ_REG(hw, XPCSTC); /* 69 */ + *reg++ = IXGB_READ_REG(hw, MACA); /* 70 */ + *reg++ = IXGB_READ_REG(hw, APAE); /* 71 */ + *reg++ = IXGB_READ_REG(hw, ARD); /* 72 */ + *reg++ = IXGB_READ_REG(hw, AIS); /* 73 */ + *reg++ = IXGB_READ_REG(hw, MSCA); /* 74 */ + *reg++ = IXGB_READ_REG(hw, MSRWD); /* 75 */ + + /* Statistics */ + *reg++ = IXGB_GET_STAT(adapter, tprl); /* 76 */ + *reg++ = IXGB_GET_STAT(adapter, tprh); /* 77 */ + *reg++ = IXGB_GET_STAT(adapter, gprcl); /* 78 */ + *reg++ = IXGB_GET_STAT(adapter, gprch); /* 79 */ + *reg++ = IXGB_GET_STAT(adapter, bprcl); /* 80 */ + *reg++ = IXGB_GET_STAT(adapter, bprch); /* 81 */ + *reg++ = IXGB_GET_STAT(adapter, mprcl); /* 82 */ + *reg++ = IXGB_GET_STAT(adapter, mprch); /* 83 */ + *reg++ = IXGB_GET_STAT(adapter, uprcl); /* 84 */ + *reg++ = IXGB_GET_STAT(adapter, uprch); /* 85 */ + *reg++ = IXGB_GET_STAT(adapter, vprcl); /* 86 */ + *reg++ = IXGB_GET_STAT(adapter, vprch); /* 87 */ + *reg++ = IXGB_GET_STAT(adapter, jprcl); /* 88 */ + *reg++ = IXGB_GET_STAT(adapter, jprch); /* 89 */ + *reg++ = IXGB_GET_STAT(adapter, gorcl); /* 90 */ + *reg++ = IXGB_GET_STAT(adapter, gorch); /* 91 */ + *reg++ = IXGB_GET_STAT(adapter, torl); /* 92 */ + *reg++ = IXGB_GET_STAT(adapter, torh); /* 93 */ + *reg++ = IXGB_GET_STAT(adapter, rnbc); /* 94 */ + *reg++ = IXGB_GET_STAT(adapter, ruc); /* 95 */ + *reg++ = IXGB_GET_STAT(adapter, roc); /* 96 */ + *reg++ = IXGB_GET_STAT(adapter, rlec); /* 97 */ + *reg++ = IXGB_GET_STAT(adapter, crcerrs); /* 98 */ + *reg++ = IXGB_GET_STAT(adapter, icbc); /* 99 */ + *reg++ = IXGB_GET_STAT(adapter, ecbc); /* 100 */ + *reg++ = IXGB_GET_STAT(adapter, mpc); /* 101 */ + *reg++ = IXGB_GET_STAT(adapter, tptl); /* 102 */ + *reg++ = IXGB_GET_STAT(adapter, tpth); /* 103 */ + *reg++ = IXGB_GET_STAT(adapter, gptcl); /* 104 */ + *reg++ = IXGB_GET_STAT(adapter, gptch); /* 105 */ + *reg++ = IXGB_GET_STAT(adapter, bptcl); /* 106 */ + *reg++ = IXGB_GET_STAT(adapter, bptch); /* 107 */ + *reg++ = IXGB_GET_STAT(adapter, mptcl); /* 108 */ + *reg++ = IXGB_GET_STAT(adapter, mptch); /* 109 */ + *reg++ = IXGB_GET_STAT(adapter, uptcl); /* 110 */ + *reg++ = IXGB_GET_STAT(adapter, uptch); /* 111 */ + *reg++ = IXGB_GET_STAT(adapter, vptcl); /* 112 */ + *reg++ = IXGB_GET_STAT(adapter, vptch); /* 113 */ + *reg++ = IXGB_GET_STAT(adapter, jptcl); /* 114 */ + *reg++ = IXGB_GET_STAT(adapter, jptch); /* 115 */ + *reg++ = IXGB_GET_STAT(adapter, gotcl); /* 116 */ + *reg++ = IXGB_GET_STAT(adapter, gotch); /* 117 */ + *reg++ = IXGB_GET_STAT(adapter, totl); /* 118 */ + *reg++ = IXGB_GET_STAT(adapter, toth); /* 119 */ + *reg++ = IXGB_GET_STAT(adapter, dc); /* 120 */ + *reg++ = IXGB_GET_STAT(adapter, plt64c); /* 121 */ + *reg++ = IXGB_GET_STAT(adapter, tsctc); /* 122 */ + *reg++ = IXGB_GET_STAT(adapter, tsctfc); /* 123 */ + *reg++ = IXGB_GET_STAT(adapter, ibic); /* 124 */ + *reg++ = IXGB_GET_STAT(adapter, rfc); /* 125 */ + *reg++ = IXGB_GET_STAT(adapter, lfc); /* 126 */ + *reg++ = IXGB_GET_STAT(adapter, pfrc); /* 127 */ + *reg++ = IXGB_GET_STAT(adapter, pftc); /* 128 */ + *reg++ = IXGB_GET_STAT(adapter, mcfrc); /* 129 */ + *reg++ = IXGB_GET_STAT(adapter, mcftc); /* 130 */ + *reg++ = IXGB_GET_STAT(adapter, xonrxc); /* 131 */ + *reg++ = IXGB_GET_STAT(adapter, xontxc); /* 132 */ + *reg++ = IXGB_GET_STAT(adapter, xoffrxc); /* 133 */ + *reg++ = IXGB_GET_STAT(adapter, xofftxc); /* 134 */ + *reg++ = IXGB_GET_STAT(adapter, rjc); /* 135 */ + + regs->len = (reg - reg_start) * sizeof(u32); +} + +static int +ixgb_get_eeprom_len(struct net_device *netdev) +{ + /* return size in bytes */ + return IXGB_EEPROM_SIZE << 1; +} + +static int +ixgb_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + struct ixgb_hw *hw = &adapter->hw; + __le16 *eeprom_buff; + int i, max_len, first_word, last_word; + int ret_val = 0; + + if (eeprom->len == 0) { + ret_val = -EINVAL; + goto geeprom_error; + } + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + max_len = ixgb_get_eeprom_len(netdev); + + if (eeprom->offset > eeprom->offset + eeprom->len) { + ret_val = -EINVAL; + goto geeprom_error; + } + + if ((eeprom->offset + eeprom->len) > max_len) + eeprom->len = (max_len - eeprom->offset); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc(sizeof(__le16) * + (last_word - first_word + 1), GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + /* note the eeprom was good because the driver loaded */ + for (i = 0; i <= (last_word - first_word); i++) + eeprom_buff[i] = ixgb_get_eeprom_word(hw, (first_word + i)); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); + kfree(eeprom_buff); + +geeprom_error: + return ret_val; +} + +static int +ixgb_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + struct ixgb_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len, first_word, last_word; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = ixgb_get_eeprom_len(netdev); + + if (eeprom->offset > eeprom->offset + eeprom->len) + return -EINVAL; + + if ((eeprom->offset + eeprom->len) > max_len) + eeprom->len = (max_len - eeprom->offset); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word */ + /* only the second byte of the word is being modified */ + eeprom_buff[0] = ixgb_read_eeprom(hw, first_word); + ptr++; + } + if ((eeprom->offset + eeprom->len) & 1) { + /* need read/modify/write of last changed EEPROM word */ + /* only the first byte of the word is being modified */ + eeprom_buff[last_word - first_word] + = ixgb_read_eeprom(hw, last_word); + } + + memcpy(ptr, bytes, eeprom->len); + for (i = 0; i <= (last_word - first_word); i++) + ixgb_write_eeprom(hw, first_word + i, eeprom_buff[i]); + + /* Update the checksum over the first part of the EEPROM if needed */ + if (first_word <= EEPROM_CHECKSUM_REG) + ixgb_update_eeprom_checksum(hw); + + kfree(eeprom_buff); + return 0; +} + +static void +ixgb_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + + strncpy(drvinfo->driver, ixgb_driver_name, 32); + strncpy(drvinfo->version, ixgb_driver_version, 32); + strncpy(drvinfo->fw_version, "N/A", 32); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + drvinfo->n_stats = IXGB_STATS_LEN; + drvinfo->regdump_len = ixgb_get_regs_len(netdev); + drvinfo->eedump_len = ixgb_get_eeprom_len(netdev); +} + +static void +ixgb_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + struct ixgb_desc_ring *txdr = &adapter->tx_ring; + struct ixgb_desc_ring *rxdr = &adapter->rx_ring; + + ring->rx_max_pending = MAX_RXD; + ring->tx_max_pending = MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = rxdr->count; + ring->tx_pending = txdr->count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int +ixgb_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + struct ixgb_desc_ring *txdr = &adapter->tx_ring; + struct ixgb_desc_ring *rxdr = &adapter->rx_ring; + struct ixgb_desc_ring tx_old, tx_new, rx_old, rx_new; + int err; + + tx_old = adapter->tx_ring; + rx_old = adapter->rx_ring; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + if (netif_running(adapter->netdev)) + ixgb_down(adapter, true); + + rxdr->count = max(ring->rx_pending,(u32)MIN_RXD); + rxdr->count = min(rxdr->count,(u32)MAX_RXD); + rxdr->count = ALIGN(rxdr->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE); + + txdr->count = max(ring->tx_pending,(u32)MIN_TXD); + txdr->count = min(txdr->count,(u32)MAX_TXD); + txdr->count = ALIGN(txdr->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE); + + if (netif_running(adapter->netdev)) { + /* Try to get new resources before deleting old */ + if ((err = ixgb_setup_rx_resources(adapter))) + goto err_setup_rx; + if ((err = ixgb_setup_tx_resources(adapter))) + goto err_setup_tx; + + /* save the new, restore the old in order to free it, + * then restore the new back again */ + + rx_new = adapter->rx_ring; + tx_new = adapter->tx_ring; + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + ixgb_free_rx_resources(adapter); + ixgb_free_tx_resources(adapter); + adapter->rx_ring = rx_new; + adapter->tx_ring = tx_new; + if ((err = ixgb_up(adapter))) + return err; + ixgb_set_speed_duplex(netdev); + } + + return 0; +err_setup_tx: + ixgb_free_rx_resources(adapter); +err_setup_rx: + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + ixgb_up(adapter); + return err; +} + +static int +ixgb_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 2; + + case ETHTOOL_ID_ON: + ixgb_led_on(&adapter->hw); + break; + + case ETHTOOL_ID_OFF: + case ETHTOOL_ID_INACTIVE: + ixgb_led_off(&adapter->hw); + } + + return 0; +} + +static int +ixgb_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return IXGB_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void +ixgb_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + int i; + char *p = NULL; + + ixgb_update_stats(adapter); + for (i = 0; i < IXGB_STATS_LEN; i++) { + switch (ixgb_gstrings_stats[i].type) { + case NETDEV_STATS: + p = (char *) netdev + + ixgb_gstrings_stats[i].stat_offset; + break; + case IXGB_STATS: + p = (char *) adapter + + ixgb_gstrings_stats[i].stat_offset; + break; + } + + data[i] = (ixgb_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } +} + +static void +ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + int i; + + switch(stringset) { + case ETH_SS_STATS: + for (i = 0; i < IXGB_STATS_LEN; i++) { + memcpy(data + i * ETH_GSTRING_LEN, + ixgb_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + } + break; + } +} + +static int ixgb_set_flags(struct net_device *netdev, u32 data) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + bool need_reset; + int rc; + + /* + * Tx VLAN insertion does not work per HW design when Rx stripping is + * disabled. Disable txvlan when rxvlan is turned off, and enable + * rxvlan when txvlan is turned on. + */ + if (!(data & ETH_FLAG_RXVLAN) && + (netdev->features & NETIF_F_HW_VLAN_TX)) + data &= ~ETH_FLAG_TXVLAN; + else if (data & ETH_FLAG_TXVLAN) + data |= ETH_FLAG_RXVLAN; + + need_reset = (data & ETH_FLAG_RXVLAN) != + (netdev->features & NETIF_F_HW_VLAN_RX); + + rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_RXVLAN | + ETH_FLAG_TXVLAN); + if (rc) + return rc; + + if (need_reset) { + if (netif_running(netdev)) { + ixgb_down(adapter, true); + ixgb_up(adapter); + ixgb_set_speed_duplex(netdev); + } else + ixgb_reset(adapter); + } + + return 0; +} + +static const struct ethtool_ops ixgb_ethtool_ops = { + .get_settings = ixgb_get_settings, + .set_settings = ixgb_set_settings, + .get_drvinfo = ixgb_get_drvinfo, + .get_regs_len = ixgb_get_regs_len, + .get_regs = ixgb_get_regs, + .get_link = ethtool_op_get_link, + .get_eeprom_len = ixgb_get_eeprom_len, + .get_eeprom = ixgb_get_eeprom, + .set_eeprom = ixgb_set_eeprom, + .get_ringparam = ixgb_get_ringparam, + .set_ringparam = ixgb_set_ringparam, + .get_pauseparam = ixgb_get_pauseparam, + .set_pauseparam = ixgb_set_pauseparam, + .get_rx_csum = ixgb_get_rx_csum, + .set_rx_csum = ixgb_set_rx_csum, + .get_tx_csum = ixgb_get_tx_csum, + .set_tx_csum = ixgb_set_tx_csum, + .set_sg = ethtool_op_set_sg, + .get_msglevel = ixgb_get_msglevel, + .set_msglevel = ixgb_set_msglevel, + .set_tso = ixgb_set_tso, + .get_strings = ixgb_get_strings, + .set_phys_id = ixgb_set_phys_id, + .get_sset_count = ixgb_get_sset_count, + .get_ethtool_stats = ixgb_get_ethtool_stats, + .get_flags = ethtool_op_get_flags, + .set_flags = ixgb_set_flags, +}; + +void ixgb_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &ixgb_ethtool_ops); +} diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c new file mode 100644 index 0000000..3d61a9e --- /dev/null +++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c @@ -0,0 +1,1262 @@ +/******************************************************************************* + + Intel PRO/10GbE Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* ixgb_hw.c + * Shared functions for accessing and configuring the adapter + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "ixgb_hw.h" +#include "ixgb_ids.h" + +#include + +/* Local function prototypes */ + +static u32 ixgb_hash_mc_addr(struct ixgb_hw *hw, u8 * mc_addr); + +static void ixgb_mta_set(struct ixgb_hw *hw, u32 hash_value); + +static void ixgb_get_bus_info(struct ixgb_hw *hw); + +static bool ixgb_link_reset(struct ixgb_hw *hw); + +static void ixgb_optics_reset(struct ixgb_hw *hw); + +static void ixgb_optics_reset_bcm(struct ixgb_hw *hw); + +static ixgb_phy_type ixgb_identify_phy(struct ixgb_hw *hw); + +static void ixgb_clear_hw_cntrs(struct ixgb_hw *hw); + +static void ixgb_clear_vfta(struct ixgb_hw *hw); + +static void ixgb_init_rx_addrs(struct ixgb_hw *hw); + +static u16 ixgb_read_phy_reg(struct ixgb_hw *hw, + u32 reg_address, + u32 phy_address, + u32 device_type); + +static bool ixgb_setup_fc(struct ixgb_hw *hw); + +static bool mac_addr_valid(u8 *mac_addr); + +static u32 ixgb_mac_reset(struct ixgb_hw *hw) +{ + u32 ctrl_reg; + + ctrl_reg = IXGB_CTRL0_RST | + IXGB_CTRL0_SDP3_DIR | /* All pins are Output=1 */ + IXGB_CTRL0_SDP2_DIR | + IXGB_CTRL0_SDP1_DIR | + IXGB_CTRL0_SDP0_DIR | + IXGB_CTRL0_SDP3 | /* Initial value 1101 */ + IXGB_CTRL0_SDP2 | + IXGB_CTRL0_SDP0; + +#ifdef HP_ZX1 + /* Workaround for 82597EX reset errata */ + IXGB_WRITE_REG_IO(hw, CTRL0, ctrl_reg); +#else + IXGB_WRITE_REG(hw, CTRL0, ctrl_reg); +#endif + + /* Delay a few ms just to allow the reset to complete */ + msleep(IXGB_DELAY_AFTER_RESET); + ctrl_reg = IXGB_READ_REG(hw, CTRL0); +#ifdef DBG + /* Make sure the self-clearing global reset bit did self clear */ + ASSERT(!(ctrl_reg & IXGB_CTRL0_RST)); +#endif + + if (hw->subsystem_vendor_id == SUN_SUBVENDOR_ID) { + ctrl_reg = /* Enable interrupt from XFP and SerDes */ + IXGB_CTRL1_GPI0_EN | + IXGB_CTRL1_SDP6_DIR | + IXGB_CTRL1_SDP7_DIR | + IXGB_CTRL1_SDP6 | + IXGB_CTRL1_SDP7; + IXGB_WRITE_REG(hw, CTRL1, ctrl_reg); + ixgb_optics_reset_bcm(hw); + } + + if (hw->phy_type == ixgb_phy_type_txn17401) + ixgb_optics_reset(hw); + + return ctrl_reg; +} + +/****************************************************************************** + * Reset the transmit and receive units; mask and clear all interrupts. + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +bool +ixgb_adapter_stop(struct ixgb_hw *hw) +{ + u32 ctrl_reg; + u32 icr_reg; + + ENTER(); + + /* If we are stopped or resetting exit gracefully and wait to be + * started again before accessing the hardware. + */ + if (hw->adapter_stopped) { + pr_debug("Exiting because the adapter is already stopped!!!\n"); + return false; + } + + /* Set the Adapter Stopped flag so other driver functions stop + * touching the Hardware. + */ + hw->adapter_stopped = true; + + /* Clear interrupt mask to stop board from generating interrupts */ + pr_debug("Masking off all interrupts\n"); + IXGB_WRITE_REG(hw, IMC, 0xFFFFFFFF); + + /* Disable the Transmit and Receive units. Then delay to allow + * any pending transactions to complete before we hit the MAC with + * the global reset. + */ + IXGB_WRITE_REG(hw, RCTL, IXGB_READ_REG(hw, RCTL) & ~IXGB_RCTL_RXEN); + IXGB_WRITE_REG(hw, TCTL, IXGB_READ_REG(hw, TCTL) & ~IXGB_TCTL_TXEN); + IXGB_WRITE_FLUSH(hw); + msleep(IXGB_DELAY_BEFORE_RESET); + + /* Issue a global reset to the MAC. This will reset the chip's + * transmit, receive, DMA, and link units. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + pr_debug("Issuing a global reset to MAC\n"); + + ctrl_reg = ixgb_mac_reset(hw); + + /* Clear interrupt mask to stop board from generating interrupts */ + pr_debug("Masking off all interrupts\n"); + IXGB_WRITE_REG(hw, IMC, 0xffffffff); + + /* Clear any pending interrupt events. */ + icr_reg = IXGB_READ_REG(hw, ICR); + + return ctrl_reg & IXGB_CTRL0_RST; +} + + +/****************************************************************************** + * Identifies the vendor of the optics module on the adapter. The SR adapters + * support two different types of XPAK optics, so it is necessary to determine + * which optics are present before applying any optics-specific workarounds. + * + * hw - Struct containing variables accessed by shared code. + * + * Returns: the vendor of the XPAK optics module. + *****************************************************************************/ +static ixgb_xpak_vendor +ixgb_identify_xpak_vendor(struct ixgb_hw *hw) +{ + u32 i; + u16 vendor_name[5]; + ixgb_xpak_vendor xpak_vendor; + + ENTER(); + + /* Read the first few bytes of the vendor string from the XPAK NVR + * registers. These are standard XENPAK/XPAK registers, so all XPAK + * devices should implement them. */ + for (i = 0; i < 5; i++) { + vendor_name[i] = ixgb_read_phy_reg(hw, + MDIO_PMA_PMD_XPAK_VENDOR_NAME + + i, IXGB_PHY_ADDRESS, + MDIO_MMD_PMAPMD); + } + + /* Determine the actual vendor */ + if (vendor_name[0] == 'I' && + vendor_name[1] == 'N' && + vendor_name[2] == 'T' && + vendor_name[3] == 'E' && vendor_name[4] == 'L') { + xpak_vendor = ixgb_xpak_vendor_intel; + } else { + xpak_vendor = ixgb_xpak_vendor_infineon; + } + + return xpak_vendor; +} + +/****************************************************************************** + * Determine the physical layer module on the adapter. + * + * hw - Struct containing variables accessed by shared code. The device_id + * field must be (correctly) populated before calling this routine. + * + * Returns: the phy type of the adapter. + *****************************************************************************/ +static ixgb_phy_type +ixgb_identify_phy(struct ixgb_hw *hw) +{ + ixgb_phy_type phy_type; + ixgb_xpak_vendor xpak_vendor; + + ENTER(); + + /* Infer the transceiver/phy type from the device id */ + switch (hw->device_id) { + case IXGB_DEVICE_ID_82597EX: + pr_debug("Identified TXN17401 optics\n"); + phy_type = ixgb_phy_type_txn17401; + break; + + case IXGB_DEVICE_ID_82597EX_SR: + /* The SR adapters carry two different types of XPAK optics + * modules; read the vendor identifier to determine the exact + * type of optics. */ + xpak_vendor = ixgb_identify_xpak_vendor(hw); + if (xpak_vendor == ixgb_xpak_vendor_intel) { + pr_debug("Identified TXN17201 optics\n"); + phy_type = ixgb_phy_type_txn17201; + } else { + pr_debug("Identified G6005 optics\n"); + phy_type = ixgb_phy_type_g6005; + } + break; + case IXGB_DEVICE_ID_82597EX_LR: + pr_debug("Identified G6104 optics\n"); + phy_type = ixgb_phy_type_g6104; + break; + case IXGB_DEVICE_ID_82597EX_CX4: + pr_debug("Identified CX4\n"); + xpak_vendor = ixgb_identify_xpak_vendor(hw); + if (xpak_vendor == ixgb_xpak_vendor_intel) { + pr_debug("Identified TXN17201 optics\n"); + phy_type = ixgb_phy_type_txn17201; + } else { + pr_debug("Identified G6005 optics\n"); + phy_type = ixgb_phy_type_g6005; + } + break; + default: + pr_debug("Unknown physical layer module\n"); + phy_type = ixgb_phy_type_unknown; + break; + } + + /* update phy type for sun specific board */ + if (hw->subsystem_vendor_id == SUN_SUBVENDOR_ID) + phy_type = ixgb_phy_type_bcm; + + return phy_type; +} + +/****************************************************************************** + * Performs basic configuration of the adapter. + * + * hw - Struct containing variables accessed by shared code + * + * Resets the controller. + * Reads and validates the EEPROM. + * Initializes the receive address registers. + * Initializes the multicast table. + * Clears all on-chip counters. + * Calls routine to setup flow control settings. + * Leaves the transmit and receive units disabled and uninitialized. + * + * Returns: + * true if successful, + * false if unrecoverable problems were encountered. + *****************************************************************************/ +bool +ixgb_init_hw(struct ixgb_hw *hw) +{ + u32 i; + u32 ctrl_reg; + bool status; + + ENTER(); + + /* Issue a global reset to the MAC. This will reset the chip's + * transmit, receive, DMA, and link units. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + pr_debug("Issuing a global reset to MAC\n"); + + ctrl_reg = ixgb_mac_reset(hw); + + pr_debug("Issuing an EE reset to MAC\n"); +#ifdef HP_ZX1 + /* Workaround for 82597EX reset errata */ + IXGB_WRITE_REG_IO(hw, CTRL1, IXGB_CTRL1_EE_RST); +#else + IXGB_WRITE_REG(hw, CTRL1, IXGB_CTRL1_EE_RST); +#endif + + /* Delay a few ms just to allow the reset to complete */ + msleep(IXGB_DELAY_AFTER_EE_RESET); + + if (!ixgb_get_eeprom_data(hw)) + return false; + + /* Use the device id to determine the type of phy/transceiver. */ + hw->device_id = ixgb_get_ee_device_id(hw); + hw->phy_type = ixgb_identify_phy(hw); + + /* Setup the receive addresses. + * Receive Address Registers (RARs 0 - 15). + */ + ixgb_init_rx_addrs(hw); + + /* + * Check that a valid MAC address has been set. + * If it is not valid, we fail hardware init. + */ + if (!mac_addr_valid(hw->curr_mac_addr)) { + pr_debug("MAC address invalid after ixgb_init_rx_addrs\n"); + return(false); + } + + /* tell the routines in this file they can access hardware again */ + hw->adapter_stopped = false; + + /* Fill in the bus_info structure */ + ixgb_get_bus_info(hw); + + /* Zero out the Multicast HASH table */ + pr_debug("Zeroing the MTA\n"); + for (i = 0; i < IXGB_MC_TBL_SIZE; i++) + IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0); + + /* Zero out the VLAN Filter Table Array */ + ixgb_clear_vfta(hw); + + /* Zero all of the hardware counters */ + ixgb_clear_hw_cntrs(hw); + + /* Call a subroutine to setup flow control. */ + status = ixgb_setup_fc(hw); + + /* 82597EX errata: Call check-for-link in case lane deskew is locked */ + ixgb_check_for_link(hw); + + return status; +} + +/****************************************************************************** + * Initializes receive address filters. + * + * hw - Struct containing variables accessed by shared code + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + *****************************************************************************/ +static void +ixgb_init_rx_addrs(struct ixgb_hw *hw) +{ + u32 i; + + ENTER(); + + /* + * If the current mac address is valid, assume it is a software override + * to the permanent address. + * Otherwise, use the permanent address from the eeprom. + */ + if (!mac_addr_valid(hw->curr_mac_addr)) { + + /* Get the MAC address from the eeprom for later reference */ + ixgb_get_ee_mac_addr(hw, hw->curr_mac_addr); + + pr_debug("Keeping Permanent MAC Addr = %pM\n", + hw->curr_mac_addr); + } else { + + /* Setup the receive address. */ + pr_debug("Overriding MAC Address in RAR[0]\n"); + pr_debug("New MAC Addr = %pM\n", hw->curr_mac_addr); + + ixgb_rar_set(hw, hw->curr_mac_addr, 0); + } + + /* Zero out the other 15 receive addresses. */ + pr_debug("Clearing RAR[1-15]\n"); + for (i = 1; i < IXGB_RAR_ENTRIES; i++) { + /* Write high reg first to disable the AV bit first */ + IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); + IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); + } +} + +/****************************************************************************** + * Updates the MAC's list of multicast addresses. + * + * hw - Struct containing variables accessed by shared code + * mc_addr_list - the list of new multicast addresses + * mc_addr_count - number of addresses + * pad - number of bytes between addresses in the list + * + * The given list replaces any existing list. Clears the last 15 receive + * address registers and the multicast table. Uses receive address registers + * for the first 15 multicast addresses, and hashes the rest into the + * multicast table. + *****************************************************************************/ +void +ixgb_mc_addr_list_update(struct ixgb_hw *hw, + u8 *mc_addr_list, + u32 mc_addr_count, + u32 pad) +{ + u32 hash_value; + u32 i; + u32 rar_used_count = 1; /* RAR[0] is used for our MAC address */ + u8 *mca; + + ENTER(); + + /* Set the new number of MC addresses that we are being requested to use. */ + hw->num_mc_addrs = mc_addr_count; + + /* Clear RAR[1-15] */ + pr_debug("Clearing RAR[1-15]\n"); + for (i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) { + IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); + IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); + } + + /* Clear the MTA */ + pr_debug("Clearing MTA\n"); + for (i = 0; i < IXGB_MC_TBL_SIZE; i++) + IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0); + + /* Add the new addresses */ + mca = mc_addr_list; + for (i = 0; i < mc_addr_count; i++) { + pr_debug("Adding the multicast addresses:\n"); + pr_debug("MC Addr #%d = %pM\n", i, mca); + + /* Place this multicast address in the RAR if there is room, * + * else put it in the MTA + */ + if (rar_used_count < IXGB_RAR_ENTRIES) { + ixgb_rar_set(hw, mca, rar_used_count); + pr_debug("Added a multicast address to RAR[%d]\n", i); + rar_used_count++; + } else { + hash_value = ixgb_hash_mc_addr(hw, mca); + + pr_debug("Hash value = 0x%03X\n", hash_value); + + ixgb_mta_set(hw, hash_value); + } + + mca += IXGB_ETH_LENGTH_OF_ADDRESS + pad; + } + + pr_debug("MC Update Complete\n"); +} + +/****************************************************************************** + * Hashes an address to determine its location in the multicast table + * + * hw - Struct containing variables accessed by shared code + * mc_addr - the multicast address to hash + * + * Returns: + * The hash value + *****************************************************************************/ +static u32 +ixgb_hash_mc_addr(struct ixgb_hw *hw, + u8 *mc_addr) +{ + u32 hash_value = 0; + + ENTER(); + + /* The portion of the address that is used for the hash table is + * determined by the mc_filter_type setting. + */ + switch (hw->mc_filter_type) { + /* [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB - According to H/W docs */ + case 0: + /* [47:36] i.e. 0x563 for above example address */ + hash_value = + ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4)); + break; + case 1: /* [46:35] i.e. 0xAC6 for above example address */ + hash_value = + ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5)); + break; + case 2: /* [45:34] i.e. 0x5D8 for above example address */ + hash_value = + ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6)); + break; + case 3: /* [43:32] i.e. 0x634 for above example address */ + hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8)); + break; + default: + /* Invalid mc_filter_type, what should we do? */ + pr_debug("MC filter type param set incorrectly\n"); + ASSERT(0); + break; + } + + hash_value &= 0xFFF; + return hash_value; +} + +/****************************************************************************** + * Sets the bit in the multicast table corresponding to the hash value. + * + * hw - Struct containing variables accessed by shared code + * hash_value - Multicast address hash value + *****************************************************************************/ +static void +ixgb_mta_set(struct ixgb_hw *hw, + u32 hash_value) +{ + u32 hash_bit, hash_reg; + u32 mta_reg; + + /* The MTA is a register array of 128 32-bit registers. + * It is treated like an array of 4096 bits. We want to set + * bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The register is determined by the + * upper 7 bits of the hash value and the bit within that + * register are determined by the lower 5 bits of the value. + */ + hash_reg = (hash_value >> 5) & 0x7F; + hash_bit = hash_value & 0x1F; + + mta_reg = IXGB_READ_REG_ARRAY(hw, MTA, hash_reg); + + mta_reg |= (1 << hash_bit); + + IXGB_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta_reg); +} + +/****************************************************************************** + * Puts an ethernet address into a receive address register. + * + * hw - Struct containing variables accessed by shared code + * addr - Address to put into receive address register + * index - Receive address register to write + *****************************************************************************/ +void +ixgb_rar_set(struct ixgb_hw *hw, + u8 *addr, + u32 index) +{ + u32 rar_low, rar_high; + + ENTER(); + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | + ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | + ((u32)addr[3] << 24)); + + rar_high = ((u32) addr[4] | + ((u32)addr[5] << 8) | + IXGB_RAH_AV); + + IXGB_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); + IXGB_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); +} + +/****************************************************************************** + * Writes a value to the specified offset in the VLAN filter table. + * + * hw - Struct containing variables accessed by shared code + * offset - Offset in VLAN filer table to write + * value - Value to write into VLAN filter table + *****************************************************************************/ +void +ixgb_write_vfta(struct ixgb_hw *hw, + u32 offset, + u32 value) +{ + IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, value); +} + +/****************************************************************************** + * Clears the VLAN filer table + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +static void +ixgb_clear_vfta(struct ixgb_hw *hw) +{ + u32 offset; + + for (offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++) + IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0); +} + +/****************************************************************************** + * Configures the flow control settings based on SW configuration. + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ + +static bool +ixgb_setup_fc(struct ixgb_hw *hw) +{ + u32 ctrl_reg; + u32 pap_reg = 0; /* by default, assume no pause time */ + bool status = true; + + ENTER(); + + /* Get the current control reg 0 settings */ + ctrl_reg = IXGB_READ_REG(hw, CTRL0); + + /* Clear the Receive Pause Enable and Transmit Pause Enable bits */ + ctrl_reg &= ~(IXGB_CTRL0_RPE | IXGB_CTRL0_TPE); + + /* The possible values of the "flow_control" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.type) { + case ixgb_fc_none: /* 0 */ + /* Set CMDC bit to disable Rx Flow control */ + ctrl_reg |= (IXGB_CTRL0_CMDC); + break; + case ixgb_fc_rx_pause: /* 1 */ + /* RX Flow control is enabled, and TX Flow control is + * disabled. + */ + ctrl_reg |= (IXGB_CTRL0_RPE); + break; + case ixgb_fc_tx_pause: /* 2 */ + /* TX Flow control is enabled, and RX Flow control is + * disabled, by a software over-ride. + */ + ctrl_reg |= (IXGB_CTRL0_TPE); + pap_reg = hw->fc.pause_time; + break; + case ixgb_fc_full: /* 3 */ + /* Flow control (both RX and TX) is enabled by a software + * over-ride. + */ + ctrl_reg |= (IXGB_CTRL0_RPE | IXGB_CTRL0_TPE); + pap_reg = hw->fc.pause_time; + break; + default: + /* We should never get here. The value should be 0-3. */ + pr_debug("Flow control param set incorrectly\n"); + ASSERT(0); + break; + } + + /* Write the new settings */ + IXGB_WRITE_REG(hw, CTRL0, ctrl_reg); + + if (pap_reg != 0) + IXGB_WRITE_REG(hw, PAP, pap_reg); + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames in not enabled, then these + * registers will be set to 0. + */ + if (!(hw->fc.type & ixgb_fc_tx_pause)) { + IXGB_WRITE_REG(hw, FCRTL, 0); + IXGB_WRITE_REG(hw, FCRTH, 0); + } else { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of XON + * frames. */ + if (hw->fc.send_xon) { + IXGB_WRITE_REG(hw, FCRTL, + (hw->fc.low_water | IXGB_FCRTL_XONE)); + } else { + IXGB_WRITE_REG(hw, FCRTL, hw->fc.low_water); + } + IXGB_WRITE_REG(hw, FCRTH, hw->fc.high_water); + } + return status; +} + +/****************************************************************************** + * Reads a word from a device over the Management Data Interface (MDI) bus. + * This interface is used to manage Physical layer devices. + * + * hw - Struct containing variables accessed by hw code + * reg_address - Offset of device register being read. + * phy_address - Address of device on MDI. + * + * Returns: Data word (16 bits) from MDI device. + * + * The 82597EX has support for several MDI access methods. This routine + * uses the new protocol MDI Single Command and Address Operation. + * This requires that first an address cycle command is sent, followed by a + * read command. + *****************************************************************************/ +static u16 +ixgb_read_phy_reg(struct ixgb_hw *hw, + u32 reg_address, + u32 phy_address, + u32 device_type) +{ + u32 i; + u32 data; + u32 command = 0; + + ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS); + ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS); + ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE); + + /* Setup and write the address cycle command */ + command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) | + (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) | + (IXGB_MSCA_ADDR_CYCLE | IXGB_MSCA_MDI_COMMAND)); + + IXGB_WRITE_REG(hw, MSCA, command); + + /************************************************************** + ** Check every 10 usec to see if the address cycle completed + ** The COMMAND bit will clear when the operation is complete. + ** This may take as long as 64 usecs (we'll wait 100 usecs max) + ** from the CPU Write to the Ready bit assertion. + **************************************************************/ + + for (i = 0; i < 10; i++) + { + udelay(10); + + command = IXGB_READ_REG(hw, MSCA); + + if ((command & IXGB_MSCA_MDI_COMMAND) == 0) + break; + } + + ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0); + + /* Address cycle complete, setup and write the read command */ + command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) | + (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) | + (IXGB_MSCA_READ | IXGB_MSCA_MDI_COMMAND)); + + IXGB_WRITE_REG(hw, MSCA, command); + + /************************************************************** + ** Check every 10 usec to see if the read command completed + ** The COMMAND bit will clear when the operation is complete. + ** The read may take as long as 64 usecs (we'll wait 100 usecs max) + ** from the CPU Write to the Ready bit assertion. + **************************************************************/ + + for (i = 0; i < 10; i++) + { + udelay(10); + + command = IXGB_READ_REG(hw, MSCA); + + if ((command & IXGB_MSCA_MDI_COMMAND) == 0) + break; + } + + ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0); + + /* Operation is complete, get the data from the MDIO Read/Write Data + * register and return. + */ + data = IXGB_READ_REG(hw, MSRWD); + data >>= IXGB_MSRWD_READ_DATA_SHIFT; + return((u16) data); +} + +/****************************************************************************** + * Writes a word to a device over the Management Data Interface (MDI) bus. + * This interface is used to manage Physical layer devices. + * + * hw - Struct containing variables accessed by hw code + * reg_address - Offset of device register being read. + * phy_address - Address of device on MDI. + * device_type - Also known as the Device ID or DID. + * data - 16-bit value to be written + * + * Returns: void. + * + * The 82597EX has support for several MDI access methods. This routine + * uses the new protocol MDI Single Command and Address Operation. + * This requires that first an address cycle command is sent, followed by a + * write command. + *****************************************************************************/ +static void +ixgb_write_phy_reg(struct ixgb_hw *hw, + u32 reg_address, + u32 phy_address, + u32 device_type, + u16 data) +{ + u32 i; + u32 command = 0; + + ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS); + ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS); + ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE); + + /* Put the data in the MDIO Read/Write Data register */ + IXGB_WRITE_REG(hw, MSRWD, (u32)data); + + /* Setup and write the address cycle command */ + command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) | + (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) | + (IXGB_MSCA_ADDR_CYCLE | IXGB_MSCA_MDI_COMMAND)); + + IXGB_WRITE_REG(hw, MSCA, command); + + /************************************************************** + ** Check every 10 usec to see if the address cycle completed + ** The COMMAND bit will clear when the operation is complete. + ** This may take as long as 64 usecs (we'll wait 100 usecs max) + ** from the CPU Write to the Ready bit assertion. + **************************************************************/ + + for (i = 0; i < 10; i++) + { + udelay(10); + + command = IXGB_READ_REG(hw, MSCA); + + if ((command & IXGB_MSCA_MDI_COMMAND) == 0) + break; + } + + ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0); + + /* Address cycle complete, setup and write the write command */ + command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) | + (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) | + (IXGB_MSCA_WRITE | IXGB_MSCA_MDI_COMMAND)); + + IXGB_WRITE_REG(hw, MSCA, command); + + /************************************************************** + ** Check every 10 usec to see if the read command completed + ** The COMMAND bit will clear when the operation is complete. + ** The write may take as long as 64 usecs (we'll wait 100 usecs max) + ** from the CPU Write to the Ready bit assertion. + **************************************************************/ + + for (i = 0; i < 10; i++) + { + udelay(10); + + command = IXGB_READ_REG(hw, MSCA); + + if ((command & IXGB_MSCA_MDI_COMMAND) == 0) + break; + } + + ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0); + + /* Operation is complete, return. */ +} + +/****************************************************************************** + * Checks to see if the link status of the hardware has changed. + * + * hw - Struct containing variables accessed by hw code + * + * Called by any function that needs to check the link status of the adapter. + *****************************************************************************/ +void +ixgb_check_for_link(struct ixgb_hw *hw) +{ + u32 status_reg; + u32 xpcss_reg; + + ENTER(); + + xpcss_reg = IXGB_READ_REG(hw, XPCSS); + status_reg = IXGB_READ_REG(hw, STATUS); + + if ((xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) && + (status_reg & IXGB_STATUS_LU)) { + hw->link_up = true; + } else if (!(xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) && + (status_reg & IXGB_STATUS_LU)) { + pr_debug("XPCSS Not Aligned while Status:LU is set\n"); + hw->link_up = ixgb_link_reset(hw); + } else { + /* + * 82597EX errata. Since the lane deskew problem may prevent + * link, reset the link before reporting link down. + */ + hw->link_up = ixgb_link_reset(hw); + } + /* Anything else for 10 Gig?? */ +} + +/****************************************************************************** + * Check for a bad link condition that may have occurred. + * The indication is that the RFC / LFC registers may be incrementing + * continually. A full adapter reset is required to recover. + * + * hw - Struct containing variables accessed by hw code + * + * Called by any function that needs to check the link status of the adapter. + *****************************************************************************/ +bool ixgb_check_for_bad_link(struct ixgb_hw *hw) +{ + u32 newLFC, newRFC; + bool bad_link_returncode = false; + + if (hw->phy_type == ixgb_phy_type_txn17401) { + newLFC = IXGB_READ_REG(hw, LFC); + newRFC = IXGB_READ_REG(hw, RFC); + if ((hw->lastLFC + 250 < newLFC) + || (hw->lastRFC + 250 < newRFC)) { + pr_debug("BAD LINK! too many LFC/RFC since last check\n"); + bad_link_returncode = true; + } + hw->lastLFC = newLFC; + hw->lastRFC = newRFC; + } + + return bad_link_returncode; +} + +/****************************************************************************** + * Clears all hardware statistics counters. + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +static void +ixgb_clear_hw_cntrs(struct ixgb_hw *hw) +{ + volatile u32 temp_reg; + + ENTER(); + + /* if we are stopped or resetting exit gracefully */ + if (hw->adapter_stopped) { + pr_debug("Exiting because the adapter is stopped!!!\n"); + return; + } + + temp_reg = IXGB_READ_REG(hw, TPRL); + temp_reg = IXGB_READ_REG(hw, TPRH); + temp_reg = IXGB_READ_REG(hw, GPRCL); + temp_reg = IXGB_READ_REG(hw, GPRCH); + temp_reg = IXGB_READ_REG(hw, BPRCL); + temp_reg = IXGB_READ_REG(hw, BPRCH); + temp_reg = IXGB_READ_REG(hw, MPRCL); + temp_reg = IXGB_READ_REG(hw, MPRCH); + temp_reg = IXGB_READ_REG(hw, UPRCL); + temp_reg = IXGB_READ_REG(hw, UPRCH); + temp_reg = IXGB_READ_REG(hw, VPRCL); + temp_reg = IXGB_READ_REG(hw, VPRCH); + temp_reg = IXGB_READ_REG(hw, JPRCL); + temp_reg = IXGB_READ_REG(hw, JPRCH); + temp_reg = IXGB_READ_REG(hw, GORCL); + temp_reg = IXGB_READ_REG(hw, GORCH); + temp_reg = IXGB_READ_REG(hw, TORL); + temp_reg = IXGB_READ_REG(hw, TORH); + temp_reg = IXGB_READ_REG(hw, RNBC); + temp_reg = IXGB_READ_REG(hw, RUC); + temp_reg = IXGB_READ_REG(hw, ROC); + temp_reg = IXGB_READ_REG(hw, RLEC); + temp_reg = IXGB_READ_REG(hw, CRCERRS); + temp_reg = IXGB_READ_REG(hw, ICBC); + temp_reg = IXGB_READ_REG(hw, ECBC); + temp_reg = IXGB_READ_REG(hw, MPC); + temp_reg = IXGB_READ_REG(hw, TPTL); + temp_reg = IXGB_READ_REG(hw, TPTH); + temp_reg = IXGB_READ_REG(hw, GPTCL); + temp_reg = IXGB_READ_REG(hw, GPTCH); + temp_reg = IXGB_READ_REG(hw, BPTCL); + temp_reg = IXGB_READ_REG(hw, BPTCH); + temp_reg = IXGB_READ_REG(hw, MPTCL); + temp_reg = IXGB_READ_REG(hw, MPTCH); + temp_reg = IXGB_READ_REG(hw, UPTCL); + temp_reg = IXGB_READ_REG(hw, UPTCH); + temp_reg = IXGB_READ_REG(hw, VPTCL); + temp_reg = IXGB_READ_REG(hw, VPTCH); + temp_reg = IXGB_READ_REG(hw, JPTCL); + temp_reg = IXGB_READ_REG(hw, JPTCH); + temp_reg = IXGB_READ_REG(hw, GOTCL); + temp_reg = IXGB_READ_REG(hw, GOTCH); + temp_reg = IXGB_READ_REG(hw, TOTL); + temp_reg = IXGB_READ_REG(hw, TOTH); + temp_reg = IXGB_READ_REG(hw, DC); + temp_reg = IXGB_READ_REG(hw, PLT64C); + temp_reg = IXGB_READ_REG(hw, TSCTC); + temp_reg = IXGB_READ_REG(hw, TSCTFC); + temp_reg = IXGB_READ_REG(hw, IBIC); + temp_reg = IXGB_READ_REG(hw, RFC); + temp_reg = IXGB_READ_REG(hw, LFC); + temp_reg = IXGB_READ_REG(hw, PFRC); + temp_reg = IXGB_READ_REG(hw, PFTC); + temp_reg = IXGB_READ_REG(hw, MCFRC); + temp_reg = IXGB_READ_REG(hw, MCFTC); + temp_reg = IXGB_READ_REG(hw, XONRXC); + temp_reg = IXGB_READ_REG(hw, XONTXC); + temp_reg = IXGB_READ_REG(hw, XOFFRXC); + temp_reg = IXGB_READ_REG(hw, XOFFTXC); + temp_reg = IXGB_READ_REG(hw, RJC); +} + +/****************************************************************************** + * Turns on the software controllable LED + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +void +ixgb_led_on(struct ixgb_hw *hw) +{ + u32 ctrl0_reg = IXGB_READ_REG(hw, CTRL0); + + /* To turn on the LED, clear software-definable pin 0 (SDP0). */ + ctrl0_reg &= ~IXGB_CTRL0_SDP0; + IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg); +} + +/****************************************************************************** + * Turns off the software controllable LED + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +void +ixgb_led_off(struct ixgb_hw *hw) +{ + u32 ctrl0_reg = IXGB_READ_REG(hw, CTRL0); + + /* To turn off the LED, set software-definable pin 0 (SDP0). */ + ctrl0_reg |= IXGB_CTRL0_SDP0; + IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg); +} + +/****************************************************************************** + * Gets the current PCI bus type, speed, and width of the hardware + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +static void +ixgb_get_bus_info(struct ixgb_hw *hw) +{ + u32 status_reg; + + status_reg = IXGB_READ_REG(hw, STATUS); + + hw->bus.type = (status_reg & IXGB_STATUS_PCIX_MODE) ? + ixgb_bus_type_pcix : ixgb_bus_type_pci; + + if (hw->bus.type == ixgb_bus_type_pci) { + hw->bus.speed = (status_reg & IXGB_STATUS_PCI_SPD) ? + ixgb_bus_speed_66 : ixgb_bus_speed_33; + } else { + switch (status_reg & IXGB_STATUS_PCIX_SPD_MASK) { + case IXGB_STATUS_PCIX_SPD_66: + hw->bus.speed = ixgb_bus_speed_66; + break; + case IXGB_STATUS_PCIX_SPD_100: + hw->bus.speed = ixgb_bus_speed_100; + break; + case IXGB_STATUS_PCIX_SPD_133: + hw->bus.speed = ixgb_bus_speed_133; + break; + default: + hw->bus.speed = ixgb_bus_speed_reserved; + break; + } + } + + hw->bus.width = (status_reg & IXGB_STATUS_BUS64) ? + ixgb_bus_width_64 : ixgb_bus_width_32; +} + +/****************************************************************************** + * Tests a MAC address to ensure it is a valid Individual Address + * + * mac_addr - pointer to MAC address. + * + *****************************************************************************/ +static bool +mac_addr_valid(u8 *mac_addr) +{ + bool is_valid = true; + ENTER(); + + /* Make sure it is not a multicast address */ + if (is_multicast_ether_addr(mac_addr)) { + pr_debug("MAC address is multicast\n"); + is_valid = false; + } + /* Not a broadcast address */ + else if (is_broadcast_ether_addr(mac_addr)) { + pr_debug("MAC address is broadcast\n"); + is_valid = false; + } + /* Reject the zero address */ + else if (is_zero_ether_addr(mac_addr)) { + pr_debug("MAC address is all zeros\n"); + is_valid = false; + } + return is_valid; +} + +/****************************************************************************** + * Resets the 10GbE link. Waits the settle time and returns the state of + * the link. + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +static bool +ixgb_link_reset(struct ixgb_hw *hw) +{ + bool link_status = false; + u8 wait_retries = MAX_RESET_ITERATIONS; + u8 lrst_retries = MAX_RESET_ITERATIONS; + + do { + /* Reset the link */ + IXGB_WRITE_REG(hw, CTRL0, + IXGB_READ_REG(hw, CTRL0) | IXGB_CTRL0_LRST); + + /* Wait for link-up and lane re-alignment */ + do { + udelay(IXGB_DELAY_USECS_AFTER_LINK_RESET); + link_status = + ((IXGB_READ_REG(hw, STATUS) & IXGB_STATUS_LU) + && (IXGB_READ_REG(hw, XPCSS) & + IXGB_XPCSS_ALIGN_STATUS)) ? true : false; + } while (!link_status && --wait_retries); + + } while (!link_status && --lrst_retries); + + return link_status; +} + +/****************************************************************************** + * Resets the 10GbE optics module. + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +static void +ixgb_optics_reset(struct ixgb_hw *hw) +{ + if (hw->phy_type == ixgb_phy_type_txn17401) { + u16 mdio_reg; + + ixgb_write_phy_reg(hw, + MDIO_CTRL1, + IXGB_PHY_ADDRESS, + MDIO_MMD_PMAPMD, + MDIO_CTRL1_RESET); + + mdio_reg = ixgb_read_phy_reg(hw, + MDIO_CTRL1, + IXGB_PHY_ADDRESS, + MDIO_MMD_PMAPMD); + } +} + +/****************************************************************************** + * Resets the 10GbE optics module for Sun variant NIC. + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ + +#define IXGB_BCM8704_USER_PMD_TX_CTRL_REG 0xC803 +#define IXGB_BCM8704_USER_PMD_TX_CTRL_REG_VAL 0x0164 +#define IXGB_BCM8704_USER_CTRL_REG 0xC800 +#define IXGB_BCM8704_USER_CTRL_REG_VAL 0x7FBF +#define IXGB_BCM8704_USER_DEV3_ADDR 0x0003 +#define IXGB_SUN_PHY_ADDRESS 0x0000 +#define IXGB_SUN_PHY_RESET_DELAY 305 + +static void +ixgb_optics_reset_bcm(struct ixgb_hw *hw) +{ + u32 ctrl = IXGB_READ_REG(hw, CTRL0); + ctrl &= ~IXGB_CTRL0_SDP2; + ctrl |= IXGB_CTRL0_SDP3; + IXGB_WRITE_REG(hw, CTRL0, ctrl); + IXGB_WRITE_FLUSH(hw); + + /* SerDes needs extra delay */ + msleep(IXGB_SUN_PHY_RESET_DELAY); + + /* Broadcom 7408L configuration */ + /* Reference clock config */ + ixgb_write_phy_reg(hw, + IXGB_BCM8704_USER_PMD_TX_CTRL_REG, + IXGB_SUN_PHY_ADDRESS, + IXGB_BCM8704_USER_DEV3_ADDR, + IXGB_BCM8704_USER_PMD_TX_CTRL_REG_VAL); + /* we must read the registers twice */ + ixgb_read_phy_reg(hw, + IXGB_BCM8704_USER_PMD_TX_CTRL_REG, + IXGB_SUN_PHY_ADDRESS, + IXGB_BCM8704_USER_DEV3_ADDR); + ixgb_read_phy_reg(hw, + IXGB_BCM8704_USER_PMD_TX_CTRL_REG, + IXGB_SUN_PHY_ADDRESS, + IXGB_BCM8704_USER_DEV3_ADDR); + + ixgb_write_phy_reg(hw, + IXGB_BCM8704_USER_CTRL_REG, + IXGB_SUN_PHY_ADDRESS, + IXGB_BCM8704_USER_DEV3_ADDR, + IXGB_BCM8704_USER_CTRL_REG_VAL); + ixgb_read_phy_reg(hw, + IXGB_BCM8704_USER_CTRL_REG, + IXGB_SUN_PHY_ADDRESS, + IXGB_BCM8704_USER_DEV3_ADDR); + ixgb_read_phy_reg(hw, + IXGB_BCM8704_USER_CTRL_REG, + IXGB_SUN_PHY_ADDRESS, + IXGB_BCM8704_USER_DEV3_ADDR); + + /* SerDes needs extra delay */ + msleep(IXGB_SUN_PHY_RESET_DELAY); +} diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h new file mode 100644 index 0000000..873d32b --- /dev/null +++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h @@ -0,0 +1,801 @@ +/******************************************************************************* + + Intel PRO/10GbE Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGB_HW_H_ +#define _IXGB_HW_H_ + +#include + +#include "ixgb_osdep.h" + +/* Enums */ +typedef enum { + ixgb_mac_unknown = 0, + ixgb_82597, + ixgb_num_macs +} ixgb_mac_type; + +/* Types of physical layer modules */ +typedef enum { + ixgb_phy_type_unknown = 0, + ixgb_phy_type_g6005, /* 850nm, MM fiber, XPAK transceiver */ + ixgb_phy_type_g6104, /* 1310nm, SM fiber, XPAK transceiver */ + ixgb_phy_type_txn17201, /* 850nm, MM fiber, XPAK transceiver */ + ixgb_phy_type_txn17401, /* 1310nm, SM fiber, XENPAK transceiver */ + ixgb_phy_type_bcm /* SUN specific board */ +} ixgb_phy_type; + +/* XPAK transceiver vendors, for the SR adapters */ +typedef enum { + ixgb_xpak_vendor_intel, + ixgb_xpak_vendor_infineon +} ixgb_xpak_vendor; + +/* Media Types */ +typedef enum { + ixgb_media_type_unknown = 0, + ixgb_media_type_fiber = 1, + ixgb_media_type_copper = 2, + ixgb_num_media_types +} ixgb_media_type; + +/* Flow Control Settings */ +typedef enum { + ixgb_fc_none = 0, + ixgb_fc_rx_pause = 1, + ixgb_fc_tx_pause = 2, + ixgb_fc_full = 3, + ixgb_fc_default = 0xFF +} ixgb_fc_type; + +/* PCI bus types */ +typedef enum { + ixgb_bus_type_unknown = 0, + ixgb_bus_type_pci, + ixgb_bus_type_pcix +} ixgb_bus_type; + +/* PCI bus speeds */ +typedef enum { + ixgb_bus_speed_unknown = 0, + ixgb_bus_speed_33, + ixgb_bus_speed_66, + ixgb_bus_speed_100, + ixgb_bus_speed_133, + ixgb_bus_speed_reserved +} ixgb_bus_speed; + +/* PCI bus widths */ +typedef enum { + ixgb_bus_width_unknown = 0, + ixgb_bus_width_32, + ixgb_bus_width_64 +} ixgb_bus_width; + +#define IXGB_ETH_LENGTH_OF_ADDRESS 6 + +#define IXGB_EEPROM_SIZE 64 /* Size in words */ + +#define SPEED_10000 10000 +#define FULL_DUPLEX 2 + +#define MIN_NUMBER_OF_DESCRIPTORS 8 +#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 /* 13 bits in RDLEN/TDLEN, 128B aligned */ + +#define IXGB_DELAY_BEFORE_RESET 10 /* allow 10ms after idling rx/tx units */ +#define IXGB_DELAY_AFTER_RESET 1 /* allow 1ms after the reset */ +#define IXGB_DELAY_AFTER_EE_RESET 10 /* allow 10ms after the EEPROM reset */ + +#define IXGB_DELAY_USECS_AFTER_LINK_RESET 13 /* allow 13 microseconds after the reset */ + /* NOTE: this is MICROSECONDS */ +#define MAX_RESET_ITERATIONS 8 /* number of iterations to get things right */ + +/* General Registers */ +#define IXGB_CTRL0 0x00000 /* Device Control Register 0 - RW */ +#define IXGB_CTRL1 0x00008 /* Device Control Register 1 - RW */ +#define IXGB_STATUS 0x00010 /* Device Status Register - RO */ +#define IXGB_EECD 0x00018 /* EEPROM/Flash Control/Data Register - RW */ +#define IXGB_MFS 0x00020 /* Maximum Frame Size - RW */ + +/* Interrupt */ +#define IXGB_ICR 0x00080 /* Interrupt Cause Read - R/clr */ +#define IXGB_ICS 0x00088 /* Interrupt Cause Set - RW */ +#define IXGB_IMS 0x00090 /* Interrupt Mask Set/Read - RW */ +#define IXGB_IMC 0x00098 /* Interrupt Mask Clear - WO */ + +/* Receive */ +#define IXGB_RCTL 0x00100 /* RX Control - RW */ +#define IXGB_FCRTL 0x00108 /* Flow Control Receive Threshold Low - RW */ +#define IXGB_FCRTH 0x00110 /* Flow Control Receive Threshold High - RW */ +#define IXGB_RDBAL 0x00118 /* RX Descriptor Base Low - RW */ +#define IXGB_RDBAH 0x0011C /* RX Descriptor Base High - RW */ +#define IXGB_RDLEN 0x00120 /* RX Descriptor Length - RW */ +#define IXGB_RDH 0x00128 /* RX Descriptor Head - RW */ +#define IXGB_RDT 0x00130 /* RX Descriptor Tail - RW */ +#define IXGB_RDTR 0x00138 /* RX Delay Timer Ring - RW */ +#define IXGB_RXDCTL 0x00140 /* Receive Descriptor Control - RW */ +#define IXGB_RAIDC 0x00148 /* Receive Adaptive Interrupt Delay Control - RW */ +#define IXGB_RXCSUM 0x00158 /* Receive Checksum Control - RW */ +#define IXGB_RA 0x00180 /* Receive Address Array Base - RW */ +#define IXGB_RAL 0x00180 /* Receive Address Low [0:15] - RW */ +#define IXGB_RAH 0x00184 /* Receive Address High [0:15] - RW */ +#define IXGB_MTA 0x00200 /* Multicast Table Array [0:127] - RW */ +#define IXGB_VFTA 0x00400 /* VLAN Filter Table Array [0:127] - RW */ +#define IXGB_REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Transmit */ +#define IXGB_TCTL 0x00600 /* TX Control - RW */ +#define IXGB_TDBAL 0x00608 /* TX Descriptor Base Low - RW */ +#define IXGB_TDBAH 0x0060C /* TX Descriptor Base High - RW */ +#define IXGB_TDLEN 0x00610 /* TX Descriptor Length - RW */ +#define IXGB_TDH 0x00618 /* TX Descriptor Head - RW */ +#define IXGB_TDT 0x00620 /* TX Descriptor Tail - RW */ +#define IXGB_TIDV 0x00628 /* TX Interrupt Delay Value - RW */ +#define IXGB_TXDCTL 0x00630 /* Transmit Descriptor Control - RW */ +#define IXGB_TSPMT 0x00638 /* TCP Segmentation PAD & Min Threshold - RW */ +#define IXGB_PAP 0x00640 /* Pause and Pace - RW */ +#define IXGB_REQ_TX_DESCRIPTOR_MULTIPLE 8 + +/* Physical */ +#define IXGB_PCSC1 0x00700 /* PCS Control 1 - RW */ +#define IXGB_PCSC2 0x00708 /* PCS Control 2 - RW */ +#define IXGB_PCSS1 0x00710 /* PCS Status 1 - RO */ +#define IXGB_PCSS2 0x00718 /* PCS Status 2 - RO */ +#define IXGB_XPCSS 0x00720 /* 10GBASE-X PCS Status (or XGXS Lane Status) - RO */ +#define IXGB_UCCR 0x00728 /* Unilink Circuit Control Register */ +#define IXGB_XPCSTC 0x00730 /* 10GBASE-X PCS Test Control */ +#define IXGB_MACA 0x00738 /* MDI Autoscan Command and Address - RW */ +#define IXGB_APAE 0x00740 /* Autoscan PHY Address Enable - RW */ +#define IXGB_ARD 0x00748 /* Autoscan Read Data - RO */ +#define IXGB_AIS 0x00750 /* Autoscan Interrupt Status - RO */ +#define IXGB_MSCA 0x00758 /* MDI Single Command and Address - RW */ +#define IXGB_MSRWD 0x00760 /* MDI Single Read and Write Data - RW, RO */ + +/* Wake-up */ +#define IXGB_WUFC 0x00808 /* Wake Up Filter Control - RW */ +#define IXGB_WUS 0x00810 /* Wake Up Status - RO */ +#define IXGB_FFLT 0x01000 /* Flexible Filter Length Table - RW */ +#define IXGB_FFMT 0x01020 /* Flexible Filter Mask Table - RW */ +#define IXGB_FTVT 0x01420 /* Flexible Filter Value Table - RW */ + +/* Statistics */ +#define IXGB_TPRL 0x02000 /* Total Packets Received (Low) */ +#define IXGB_TPRH 0x02004 /* Total Packets Received (High) */ +#define IXGB_GPRCL 0x02008 /* Good Packets Received Count (Low) */ +#define IXGB_GPRCH 0x0200C /* Good Packets Received Count (High) */ +#define IXGB_BPRCL 0x02010 /* Broadcast Packets Received Count (Low) */ +#define IXGB_BPRCH 0x02014 /* Broadcast Packets Received Count (High) */ +#define IXGB_MPRCL 0x02018 /* Multicast Packets Received Count (Low) */ +#define IXGB_MPRCH 0x0201C /* Multicast Packets Received Count (High) */ +#define IXGB_UPRCL 0x02020 /* Unicast Packets Received Count (Low) */ +#define IXGB_UPRCH 0x02024 /* Unicast Packets Received Count (High) */ +#define IXGB_VPRCL 0x02028 /* VLAN Packets Received Count (Low) */ +#define IXGB_VPRCH 0x0202C /* VLAN Packets Received Count (High) */ +#define IXGB_JPRCL 0x02030 /* Jumbo Packets Received Count (Low) */ +#define IXGB_JPRCH 0x02034 /* Jumbo Packets Received Count (High) */ +#define IXGB_GORCL 0x02038 /* Good Octets Received Count (Low) */ +#define IXGB_GORCH 0x0203C /* Good Octets Received Count (High) */ +#define IXGB_TORL 0x02040 /* Total Octets Received (Low) */ +#define IXGB_TORH 0x02044 /* Total Octets Received (High) */ +#define IXGB_RNBC 0x02048 /* Receive No Buffers Count */ +#define IXGB_RUC 0x02050 /* Receive Undersize Count */ +#define IXGB_ROC 0x02058 /* Receive Oversize Count */ +#define IXGB_RLEC 0x02060 /* Receive Length Error Count */ +#define IXGB_CRCERRS 0x02068 /* CRC Error Count */ +#define IXGB_ICBC 0x02070 /* Illegal control byte in mid-packet Count */ +#define IXGB_ECBC 0x02078 /* Error Control byte in mid-packet Count */ +#define IXGB_MPC 0x02080 /* Missed Packets Count */ +#define IXGB_TPTL 0x02100 /* Total Packets Transmitted (Low) */ +#define IXGB_TPTH 0x02104 /* Total Packets Transmitted (High) */ +#define IXGB_GPTCL 0x02108 /* Good Packets Transmitted Count (Low) */ +#define IXGB_GPTCH 0x0210C /* Good Packets Transmitted Count (High) */ +#define IXGB_BPTCL 0x02110 /* Broadcast Packets Transmitted Count (Low) */ +#define IXGB_BPTCH 0x02114 /* Broadcast Packets Transmitted Count (High) */ +#define IXGB_MPTCL 0x02118 /* Multicast Packets Transmitted Count (Low) */ +#define IXGB_MPTCH 0x0211C /* Multicast Packets Transmitted Count (High) */ +#define IXGB_UPTCL 0x02120 /* Unicast Packets Transmitted Count (Low) */ +#define IXGB_UPTCH 0x02124 /* Unicast Packets Transmitted Count (High) */ +#define IXGB_VPTCL 0x02128 /* VLAN Packets Transmitted Count (Low) */ +#define IXGB_VPTCH 0x0212C /* VLAN Packets Transmitted Count (High) */ +#define IXGB_JPTCL 0x02130 /* Jumbo Packets Transmitted Count (Low) */ +#define IXGB_JPTCH 0x02134 /* Jumbo Packets Transmitted Count (High) */ +#define IXGB_GOTCL 0x02138 /* Good Octets Transmitted Count (Low) */ +#define IXGB_GOTCH 0x0213C /* Good Octets Transmitted Count (High) */ +#define IXGB_TOTL 0x02140 /* Total Octets Transmitted Count (Low) */ +#define IXGB_TOTH 0x02144 /* Total Octets Transmitted Count (High) */ +#define IXGB_DC 0x02148 /* Defer Count */ +#define IXGB_PLT64C 0x02150 /* Packet Transmitted was less than 64 bytes Count */ +#define IXGB_TSCTC 0x02170 /* TCP Segmentation Context Transmitted Count */ +#define IXGB_TSCTFC 0x02178 /* TCP Segmentation Context Tx Fail Count */ +#define IXGB_IBIC 0x02180 /* Illegal byte during Idle stream count */ +#define IXGB_RFC 0x02188 /* Remote Fault Count */ +#define IXGB_LFC 0x02190 /* Local Fault Count */ +#define IXGB_PFRC 0x02198 /* Pause Frame Receive Count */ +#define IXGB_PFTC 0x021A0 /* Pause Frame Transmit Count */ +#define IXGB_MCFRC 0x021A8 /* MAC Control Frames (non-Pause) Received Count */ +#define IXGB_MCFTC 0x021B0 /* MAC Control Frames (non-Pause) Transmitted Count */ +#define IXGB_XONRXC 0x021B8 /* XON Received Count */ +#define IXGB_XONTXC 0x021C0 /* XON Transmitted Count */ +#define IXGB_XOFFRXC 0x021C8 /* XOFF Received Count */ +#define IXGB_XOFFTXC 0x021D0 /* XOFF Transmitted Count */ +#define IXGB_RJC 0x021D8 /* Receive Jabber Count */ + +/* CTRL0 Bit Masks */ +#define IXGB_CTRL0_LRST 0x00000008 +#define IXGB_CTRL0_JFE 0x00000010 +#define IXGB_CTRL0_XLE 0x00000020 +#define IXGB_CTRL0_MDCS 0x00000040 +#define IXGB_CTRL0_CMDC 0x00000080 +#define IXGB_CTRL0_SDP0 0x00040000 +#define IXGB_CTRL0_SDP1 0x00080000 +#define IXGB_CTRL0_SDP2 0x00100000 +#define IXGB_CTRL0_SDP3 0x00200000 +#define IXGB_CTRL0_SDP0_DIR 0x00400000 +#define IXGB_CTRL0_SDP1_DIR 0x00800000 +#define IXGB_CTRL0_SDP2_DIR 0x01000000 +#define IXGB_CTRL0_SDP3_DIR 0x02000000 +#define IXGB_CTRL0_RST 0x04000000 +#define IXGB_CTRL0_RPE 0x08000000 +#define IXGB_CTRL0_TPE 0x10000000 +#define IXGB_CTRL0_VME 0x40000000 + +/* CTRL1 Bit Masks */ +#define IXGB_CTRL1_GPI0_EN 0x00000001 +#define IXGB_CTRL1_GPI1_EN 0x00000002 +#define IXGB_CTRL1_GPI2_EN 0x00000004 +#define IXGB_CTRL1_GPI3_EN 0x00000008 +#define IXGB_CTRL1_SDP4 0x00000010 +#define IXGB_CTRL1_SDP5 0x00000020 +#define IXGB_CTRL1_SDP6 0x00000040 +#define IXGB_CTRL1_SDP7 0x00000080 +#define IXGB_CTRL1_SDP4_DIR 0x00000100 +#define IXGB_CTRL1_SDP5_DIR 0x00000200 +#define IXGB_CTRL1_SDP6_DIR 0x00000400 +#define IXGB_CTRL1_SDP7_DIR 0x00000800 +#define IXGB_CTRL1_EE_RST 0x00002000 +#define IXGB_CTRL1_RO_DIS 0x00020000 +#define IXGB_CTRL1_PCIXHM_MASK 0x00C00000 +#define IXGB_CTRL1_PCIXHM_1_2 0x00000000 +#define IXGB_CTRL1_PCIXHM_5_8 0x00400000 +#define IXGB_CTRL1_PCIXHM_3_4 0x00800000 +#define IXGB_CTRL1_PCIXHM_7_8 0x00C00000 + +/* STATUS Bit Masks */ +#define IXGB_STATUS_LU 0x00000002 +#define IXGB_STATUS_AIP 0x00000004 +#define IXGB_STATUS_TXOFF 0x00000010 +#define IXGB_STATUS_XAUIME 0x00000020 +#define IXGB_STATUS_RES 0x00000040 +#define IXGB_STATUS_RIS 0x00000080 +#define IXGB_STATUS_RIE 0x00000100 +#define IXGB_STATUS_RLF 0x00000200 +#define IXGB_STATUS_RRF 0x00000400 +#define IXGB_STATUS_PCI_SPD 0x00000800 +#define IXGB_STATUS_BUS64 0x00001000 +#define IXGB_STATUS_PCIX_MODE 0x00002000 +#define IXGB_STATUS_PCIX_SPD_MASK 0x0000C000 +#define IXGB_STATUS_PCIX_SPD_66 0x00000000 +#define IXGB_STATUS_PCIX_SPD_100 0x00004000 +#define IXGB_STATUS_PCIX_SPD_133 0x00008000 +#define IXGB_STATUS_REV_ID_MASK 0x000F0000 +#define IXGB_STATUS_REV_ID_SHIFT 16 + +/* EECD Bit Masks */ +#define IXGB_EECD_SK 0x00000001 +#define IXGB_EECD_CS 0x00000002 +#define IXGB_EECD_DI 0x00000004 +#define IXGB_EECD_DO 0x00000008 +#define IXGB_EECD_FWE_MASK 0x00000030 +#define IXGB_EECD_FWE_DIS 0x00000010 +#define IXGB_EECD_FWE_EN 0x00000020 + +/* MFS */ +#define IXGB_MFS_SHIFT 16 + +/* Interrupt Register Bit Masks (used for ICR, ICS, IMS, and IMC) */ +#define IXGB_INT_TXDW 0x00000001 +#define IXGB_INT_TXQE 0x00000002 +#define IXGB_INT_LSC 0x00000004 +#define IXGB_INT_RXSEQ 0x00000008 +#define IXGB_INT_RXDMT0 0x00000010 +#define IXGB_INT_RXO 0x00000040 +#define IXGB_INT_RXT0 0x00000080 +#define IXGB_INT_AUTOSCAN 0x00000200 +#define IXGB_INT_GPI0 0x00000800 +#define IXGB_INT_GPI1 0x00001000 +#define IXGB_INT_GPI2 0x00002000 +#define IXGB_INT_GPI3 0x00004000 + +/* RCTL Bit Masks */ +#define IXGB_RCTL_RXEN 0x00000002 +#define IXGB_RCTL_SBP 0x00000004 +#define IXGB_RCTL_UPE 0x00000008 +#define IXGB_RCTL_MPE 0x00000010 +#define IXGB_RCTL_RDMTS_MASK 0x00000300 +#define IXGB_RCTL_RDMTS_1_2 0x00000000 +#define IXGB_RCTL_RDMTS_1_4 0x00000100 +#define IXGB_RCTL_RDMTS_1_8 0x00000200 +#define IXGB_RCTL_MO_MASK 0x00003000 +#define IXGB_RCTL_MO_47_36 0x00000000 +#define IXGB_RCTL_MO_46_35 0x00001000 +#define IXGB_RCTL_MO_45_34 0x00002000 +#define IXGB_RCTL_MO_43_32 0x00003000 +#define IXGB_RCTL_MO_SHIFT 12 +#define IXGB_RCTL_BAM 0x00008000 +#define IXGB_RCTL_BSIZE_MASK 0x00030000 +#define IXGB_RCTL_BSIZE_2048 0x00000000 +#define IXGB_RCTL_BSIZE_4096 0x00010000 +#define IXGB_RCTL_BSIZE_8192 0x00020000 +#define IXGB_RCTL_BSIZE_16384 0x00030000 +#define IXGB_RCTL_VFE 0x00040000 +#define IXGB_RCTL_CFIEN 0x00080000 +#define IXGB_RCTL_CFI 0x00100000 +#define IXGB_RCTL_RPDA_MASK 0x00600000 +#define IXGB_RCTL_RPDA_MC_MAC 0x00000000 +#define IXGB_RCTL_MC_ONLY 0x00400000 +#define IXGB_RCTL_CFF 0x00800000 +#define IXGB_RCTL_SECRC 0x04000000 +#define IXGB_RDT_FPDB 0x80000000 + +#define IXGB_RCTL_IDLE_RX_UNIT 0 + +/* FCRTL Bit Masks */ +#define IXGB_FCRTL_XONE 0x80000000 + +/* RXDCTL Bit Masks */ +#define IXGB_RXDCTL_PTHRESH_MASK 0x000001FF +#define IXGB_RXDCTL_PTHRESH_SHIFT 0 +#define IXGB_RXDCTL_HTHRESH_MASK 0x0003FE00 +#define IXGB_RXDCTL_HTHRESH_SHIFT 9 +#define IXGB_RXDCTL_WTHRESH_MASK 0x07FC0000 +#define IXGB_RXDCTL_WTHRESH_SHIFT 18 + +/* RAIDC Bit Masks */ +#define IXGB_RAIDC_HIGHTHRS_MASK 0x0000003F +#define IXGB_RAIDC_DELAY_MASK 0x000FF800 +#define IXGB_RAIDC_DELAY_SHIFT 11 +#define IXGB_RAIDC_POLL_MASK 0x1FF00000 +#define IXGB_RAIDC_POLL_SHIFT 20 +#define IXGB_RAIDC_RXT_GATE 0x40000000 +#define IXGB_RAIDC_EN 0x80000000 + +#define IXGB_RAIDC_POLL_1000_INTERRUPTS_PER_SECOND 1220 +#define IXGB_RAIDC_POLL_5000_INTERRUPTS_PER_SECOND 244 +#define IXGB_RAIDC_POLL_10000_INTERRUPTS_PER_SECOND 122 +#define IXGB_RAIDC_POLL_20000_INTERRUPTS_PER_SECOND 61 + +/* RXCSUM Bit Masks */ +#define IXGB_RXCSUM_IPOFL 0x00000100 +#define IXGB_RXCSUM_TUOFL 0x00000200 + +/* RAH Bit Masks */ +#define IXGB_RAH_ASEL_MASK 0x00030000 +#define IXGB_RAH_ASEL_DEST 0x00000000 +#define IXGB_RAH_ASEL_SRC 0x00010000 +#define IXGB_RAH_AV 0x80000000 + +/* TCTL Bit Masks */ +#define IXGB_TCTL_TCE 0x00000001 +#define IXGB_TCTL_TXEN 0x00000002 +#define IXGB_TCTL_TPDE 0x00000004 + +#define IXGB_TCTL_IDLE_TX_UNIT 0 + +/* TXDCTL Bit Masks */ +#define IXGB_TXDCTL_PTHRESH_MASK 0x0000007F +#define IXGB_TXDCTL_HTHRESH_MASK 0x00007F00 +#define IXGB_TXDCTL_HTHRESH_SHIFT 8 +#define IXGB_TXDCTL_WTHRESH_MASK 0x007F0000 +#define IXGB_TXDCTL_WTHRESH_SHIFT 16 + +/* TSPMT Bit Masks */ +#define IXGB_TSPMT_TSMT_MASK 0x0000FFFF +#define IXGB_TSPMT_TSPBP_MASK 0xFFFF0000 +#define IXGB_TSPMT_TSPBP_SHIFT 16 + +/* PAP Bit Masks */ +#define IXGB_PAP_TXPC_MASK 0x0000FFFF +#define IXGB_PAP_TXPV_MASK 0x000F0000 +#define IXGB_PAP_TXPV_10G 0x00000000 +#define IXGB_PAP_TXPV_1G 0x00010000 +#define IXGB_PAP_TXPV_2G 0x00020000 +#define IXGB_PAP_TXPV_3G 0x00030000 +#define IXGB_PAP_TXPV_4G 0x00040000 +#define IXGB_PAP_TXPV_5G 0x00050000 +#define IXGB_PAP_TXPV_6G 0x00060000 +#define IXGB_PAP_TXPV_7G 0x00070000 +#define IXGB_PAP_TXPV_8G 0x00080000 +#define IXGB_PAP_TXPV_9G 0x00090000 +#define IXGB_PAP_TXPV_WAN 0x000F0000 + +/* PCSC1 Bit Masks */ +#define IXGB_PCSC1_LOOPBACK 0x00004000 + +/* PCSC2 Bit Masks */ +#define IXGB_PCSC2_PCS_TYPE_MASK 0x00000003 +#define IXGB_PCSC2_PCS_TYPE_10GBX 0x00000001 + +/* PCSS1 Bit Masks */ +#define IXGB_PCSS1_LOCAL_FAULT 0x00000080 +#define IXGB_PCSS1_RX_LINK_STATUS 0x00000004 + +/* PCSS2 Bit Masks */ +#define IXGB_PCSS2_DEV_PRES_MASK 0x0000C000 +#define IXGB_PCSS2_DEV_PRES 0x00004000 +#define IXGB_PCSS2_TX_LF 0x00000800 +#define IXGB_PCSS2_RX_LF 0x00000400 +#define IXGB_PCSS2_10GBW 0x00000004 +#define IXGB_PCSS2_10GBX 0x00000002 +#define IXGB_PCSS2_10GBR 0x00000001 + +/* XPCSS Bit Masks */ +#define IXGB_XPCSS_ALIGN_STATUS 0x00001000 +#define IXGB_XPCSS_PATTERN_TEST 0x00000800 +#define IXGB_XPCSS_LANE_3_SYNC 0x00000008 +#define IXGB_XPCSS_LANE_2_SYNC 0x00000004 +#define IXGB_XPCSS_LANE_1_SYNC 0x00000002 +#define IXGB_XPCSS_LANE_0_SYNC 0x00000001 + +/* XPCSTC Bit Masks */ +#define IXGB_XPCSTC_BERT_TRIG 0x00200000 +#define IXGB_XPCSTC_BERT_SST 0x00100000 +#define IXGB_XPCSTC_BERT_PSZ_MASK 0x000C0000 +#define IXGB_XPCSTC_BERT_PSZ_SHIFT 17 +#define IXGB_XPCSTC_BERT_PSZ_INF 0x00000003 +#define IXGB_XPCSTC_BERT_PSZ_68 0x00000001 +#define IXGB_XPCSTC_BERT_PSZ_1028 0x00000000 + +/* MSCA bit Masks */ +/* New Protocol Address */ +#define IXGB_MSCA_NP_ADDR_MASK 0x0000FFFF +#define IXGB_MSCA_NP_ADDR_SHIFT 0 +/* Either Device Type or Register Address,depending on ST_CODE */ +#define IXGB_MSCA_DEV_TYPE_MASK 0x001F0000 +#define IXGB_MSCA_DEV_TYPE_SHIFT 16 +#define IXGB_MSCA_PHY_ADDR_MASK 0x03E00000 +#define IXGB_MSCA_PHY_ADDR_SHIFT 21 +#define IXGB_MSCA_OP_CODE_MASK 0x0C000000 +/* OP_CODE == 00, Address cycle, New Protocol */ +/* OP_CODE == 01, Write operation */ +/* OP_CODE == 10, Read operation */ +/* OP_CODE == 11, Read, auto increment, New Protocol */ +#define IXGB_MSCA_ADDR_CYCLE 0x00000000 +#define IXGB_MSCA_WRITE 0x04000000 +#define IXGB_MSCA_READ 0x08000000 +#define IXGB_MSCA_READ_AUTOINC 0x0C000000 +#define IXGB_MSCA_OP_CODE_SHIFT 26 +#define IXGB_MSCA_ST_CODE_MASK 0x30000000 +/* ST_CODE == 00, New Protocol */ +/* ST_CODE == 01, Old Protocol */ +#define IXGB_MSCA_NEW_PROTOCOL 0x00000000 +#define IXGB_MSCA_OLD_PROTOCOL 0x10000000 +#define IXGB_MSCA_ST_CODE_SHIFT 28 +/* Initiate command, self-clearing when command completes */ +#define IXGB_MSCA_MDI_COMMAND 0x40000000 +/*MDI In Progress Enable. */ +#define IXGB_MSCA_MDI_IN_PROG_EN 0x80000000 + +/* MSRWD bit masks */ +#define IXGB_MSRWD_WRITE_DATA_MASK 0x0000FFFF +#define IXGB_MSRWD_WRITE_DATA_SHIFT 0 +#define IXGB_MSRWD_READ_DATA_MASK 0xFFFF0000 +#define IXGB_MSRWD_READ_DATA_SHIFT 16 + +/* Definitions for the optics devices on the MDIO bus. */ +#define IXGB_PHY_ADDRESS 0x0 /* Single PHY, multiple "Devices" */ + +#define MDIO_PMA_PMD_XPAK_VENDOR_NAME 0x803A /* XPAK/XENPAK devices only */ + +/* Vendor-specific MDIO registers */ +#define G6XXX_PMA_PMD_VS1 0xC001 /* Vendor-specific register */ +#define G6XXX_XGXS_XAUI_VS2 0x18 /* Vendor-specific register */ + +#define G6XXX_PMA_PMD_VS1_PLL_RESET 0x80 +#define G6XXX_PMA_PMD_VS1_REMOVE_PLL_RESET 0x00 +#define G6XXX_XGXS_XAUI_VS2_INPUT_MASK 0x0F /* XAUI lanes synchronized */ + +/* Layout of a single receive descriptor. The controller assumes that this + * structure is packed into 16 bytes, which is a safe assumption with most + * compilers. However, some compilers may insert padding between the fields, + * in which case the structure must be packed in some compiler-specific + * manner. */ +struct ixgb_rx_desc { + __le64 buff_addr; + __le16 length; + __le16 reserved; + u8 status; + u8 errors; + __le16 special; +}; + +#define IXGB_RX_DESC_STATUS_DD 0x01 +#define IXGB_RX_DESC_STATUS_EOP 0x02 +#define IXGB_RX_DESC_STATUS_IXSM 0x04 +#define IXGB_RX_DESC_STATUS_VP 0x08 +#define IXGB_RX_DESC_STATUS_TCPCS 0x20 +#define IXGB_RX_DESC_STATUS_IPCS 0x40 +#define IXGB_RX_DESC_STATUS_PIF 0x80 + +#define IXGB_RX_DESC_ERRORS_CE 0x01 +#define IXGB_RX_DESC_ERRORS_SE 0x02 +#define IXGB_RX_DESC_ERRORS_P 0x08 +#define IXGB_RX_DESC_ERRORS_TCPE 0x20 +#define IXGB_RX_DESC_ERRORS_IPE 0x40 +#define IXGB_RX_DESC_ERRORS_RXE 0x80 + +#define IXGB_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define IXGB_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define IXGB_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority is in upper 3 of 16 */ + +/* Layout of a single transmit descriptor. The controller assumes that this + * structure is packed into 16 bytes, which is a safe assumption with most + * compilers. However, some compilers may insert padding between the fields, + * in which case the structure must be packed in some compiler-specific + * manner. */ +struct ixgb_tx_desc { + __le64 buff_addr; + __le32 cmd_type_len; + u8 status; + u8 popts; + __le16 vlan; +}; + +#define IXGB_TX_DESC_LENGTH_MASK 0x000FFFFF +#define IXGB_TX_DESC_TYPE_MASK 0x00F00000 +#define IXGB_TX_DESC_TYPE_SHIFT 20 +#define IXGB_TX_DESC_CMD_MASK 0xFF000000 +#define IXGB_TX_DESC_CMD_SHIFT 24 +#define IXGB_TX_DESC_CMD_EOP 0x01000000 +#define IXGB_TX_DESC_CMD_TSE 0x04000000 +#define IXGB_TX_DESC_CMD_RS 0x08000000 +#define IXGB_TX_DESC_CMD_VLE 0x40000000 +#define IXGB_TX_DESC_CMD_IDE 0x80000000 + +#define IXGB_TX_DESC_TYPE 0x00100000 + +#define IXGB_TX_DESC_STATUS_DD 0x01 + +#define IXGB_TX_DESC_POPTS_IXSM 0x01 +#define IXGB_TX_DESC_POPTS_TXSM 0x02 +#define IXGB_TX_DESC_SPECIAL_PRI_SHIFT IXGB_RX_DESC_SPECIAL_PRI_SHIFT /* Priority is in upper 3 of 16 */ + +struct ixgb_context_desc { + u8 ipcss; + u8 ipcso; + __le16 ipcse; + u8 tucss; + u8 tucso; + __le16 tucse; + __le32 cmd_type_len; + u8 status; + u8 hdr_len; + __le16 mss; +}; + +#define IXGB_CONTEXT_DESC_CMD_TCP 0x01000000 +#define IXGB_CONTEXT_DESC_CMD_IP 0x02000000 +#define IXGB_CONTEXT_DESC_CMD_TSE 0x04000000 +#define IXGB_CONTEXT_DESC_CMD_RS 0x08000000 +#define IXGB_CONTEXT_DESC_CMD_IDE 0x80000000 + +#define IXGB_CONTEXT_DESC_TYPE 0x00000000 + +#define IXGB_CONTEXT_DESC_STATUS_DD 0x01 + +/* Filters */ +#define IXGB_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */ +#define IXGB_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ +#define IXGB_RAR_ENTRIES 3 /* Number of entries in Rx Address array */ + +#define IXGB_MEMORY_REGISTER_BASE_ADDRESS 0 +#define ENET_HEADER_SIZE 14 +#define ENET_FCS_LENGTH 4 +#define IXGB_MAX_NUM_MULTICAST_ADDRESSES 128 +#define IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS 60 +#define IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS 1514 +#define IXGB_MAX_JUMBO_FRAME_SIZE 0x3F00 + +/* Phy Addresses */ +#define IXGB_OPTICAL_PHY_ADDR 0x0 /* Optical Module phy address */ +#define IXGB_XAUII_PHY_ADDR 0x1 /* Xauii transceiver phy address */ +#define IXGB_DIAG_PHY_ADDR 0x1F /* Diagnostic Device phy address */ + +/* This structure takes a 64k flash and maps it for identification commands */ +struct ixgb_flash_buffer { + u8 manufacturer_id; + u8 device_id; + u8 filler1[0x2AA8]; + u8 cmd2; + u8 filler2[0x2AAA]; + u8 cmd1; + u8 filler3[0xAAAA]; +}; + +/* Flow control parameters */ +struct ixgb_fc { + u32 high_water; /* Flow Control High-water */ + u32 low_water; /* Flow Control Low-water */ + u16 pause_time; /* Flow Control Pause timer */ + bool send_xon; /* Flow control send XON */ + ixgb_fc_type type; /* Type of flow control */ +}; + +/* The historical defaults for the flow control values are given below. */ +#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */ +#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */ +#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */ + +/* Phy definitions */ +#define IXGB_MAX_PHY_REG_ADDRESS 0xFFFF +#define IXGB_MAX_PHY_ADDRESS 31 +#define IXGB_MAX_PHY_DEV_TYPE 31 + +/* Bus parameters */ +struct ixgb_bus { + ixgb_bus_speed speed; + ixgb_bus_width width; + ixgb_bus_type type; +}; + +struct ixgb_hw { + u8 __iomem *hw_addr;/* Base Address of the hardware */ + void *back; /* Pointer to OS-dependent struct */ + struct ixgb_fc fc; /* Flow control parameters */ + struct ixgb_bus bus; /* Bus parameters */ + u32 phy_id; /* Phy Identifier */ + u32 phy_addr; /* XGMII address of Phy */ + ixgb_mac_type mac_type; /* Identifier for MAC controller */ + ixgb_phy_type phy_type; /* Transceiver/phy identifier */ + u32 max_frame_size; /* Maximum frame size supported */ + u32 mc_filter_type; /* Multicast filter hash type */ + u32 num_mc_addrs; /* Number of current Multicast addrs */ + u8 curr_mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS]; /* Individual address currently programmed in MAC */ + u32 num_tx_desc; /* Number of Transmit descriptors */ + u32 num_rx_desc; /* Number of Receive descriptors */ + u32 rx_buffer_size; /* Size of Receive buffer */ + bool link_up; /* true if link is valid */ + bool adapter_stopped; /* State of adapter */ + u16 device_id; /* device id from PCI configuration space */ + u16 vendor_id; /* vendor id from PCI configuration space */ + u8 revision_id; /* revision id from PCI configuration space */ + u16 subsystem_vendor_id; /* subsystem vendor id from PCI configuration space */ + u16 subsystem_id; /* subsystem id from PCI configuration space */ + u32 bar0; /* Base Address registers */ + u32 bar1; + u32 bar2; + u32 bar3; + u16 pci_cmd_word; /* PCI command register id from PCI configuration space */ + __le16 eeprom[IXGB_EEPROM_SIZE]; /* EEPROM contents read at init time */ + unsigned long io_base; /* Our I/O mapped location */ + u32 lastLFC; + u32 lastRFC; +}; + +/* Statistics reported by the hardware */ +struct ixgb_hw_stats { + u64 tprl; + u64 tprh; + u64 gprcl; + u64 gprch; + u64 bprcl; + u64 bprch; + u64 mprcl; + u64 mprch; + u64 uprcl; + u64 uprch; + u64 vprcl; + u64 vprch; + u64 jprcl; + u64 jprch; + u64 gorcl; + u64 gorch; + u64 torl; + u64 torh; + u64 rnbc; + u64 ruc; + u64 roc; + u64 rlec; + u64 crcerrs; + u64 icbc; + u64 ecbc; + u64 mpc; + u64 tptl; + u64 tpth; + u64 gptcl; + u64 gptch; + u64 bptcl; + u64 bptch; + u64 mptcl; + u64 mptch; + u64 uptcl; + u64 uptch; + u64 vptcl; + u64 vptch; + u64 jptcl; + u64 jptch; + u64 gotcl; + u64 gotch; + u64 totl; + u64 toth; + u64 dc; + u64 plt64c; + u64 tsctc; + u64 tsctfc; + u64 ibic; + u64 rfc; + u64 lfc; + u64 pfrc; + u64 pftc; + u64 mcfrc; + u64 mcftc; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 rjc; +}; + +/* Function Prototypes */ +extern bool ixgb_adapter_stop(struct ixgb_hw *hw); +extern bool ixgb_init_hw(struct ixgb_hw *hw); +extern bool ixgb_adapter_start(struct ixgb_hw *hw); +extern void ixgb_check_for_link(struct ixgb_hw *hw); +extern bool ixgb_check_for_bad_link(struct ixgb_hw *hw); + +extern void ixgb_rar_set(struct ixgb_hw *hw, + u8 *addr, + u32 index); + + +/* Filters (multicast, vlan, receive) */ +extern void ixgb_mc_addr_list_update(struct ixgb_hw *hw, + u8 *mc_addr_list, + u32 mc_addr_count, + u32 pad); + +/* Vfta functions */ +extern void ixgb_write_vfta(struct ixgb_hw *hw, + u32 offset, + u32 value); + +/* Access functions to eeprom data */ +void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, u8 *mac_addr); +u32 ixgb_get_ee_pba_number(struct ixgb_hw *hw); +u16 ixgb_get_ee_device_id(struct ixgb_hw *hw); +bool ixgb_get_eeprom_data(struct ixgb_hw *hw); +__le16 ixgb_get_eeprom_word(struct ixgb_hw *hw, u16 index); + +/* Everything else */ +void ixgb_led_on(struct ixgb_hw *hw); +void ixgb_led_off(struct ixgb_hw *hw); +void ixgb_write_pci_cfg(struct ixgb_hw *hw, + u32 reg, + u16 * value); + + +#endif /* _IXGB_HW_H_ */ diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ids.h b/drivers/net/ethernet/intel/ixgb/ixgb_ids.h new file mode 100644 index 0000000..2a58847 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgb/ixgb_ids.h @@ -0,0 +1,53 @@ +/******************************************************************************* + + Intel PRO/10GbE Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGB_IDS_H_ +#define _IXGB_IDS_H_ + +/********************************************************************** +** The Device and Vendor IDs for 10 Gigabit MACs +**********************************************************************/ + +#define INTEL_VENDOR_ID 0x8086 +#define INTEL_SUBVENDOR_ID 0x8086 +#define SUN_VENDOR_ID 0x108E +#define SUN_SUBVENDOR_ID 0x108E + +#define IXGB_DEVICE_ID_82597EX 0x1048 +#define IXGB_DEVICE_ID_82597EX_SR 0x1A48 +#define IXGB_DEVICE_ID_82597EX_LR 0x1B48 +#define IXGB_SUBDEVICE_ID_A11F 0xA11F +#define IXGB_SUBDEVICE_ID_A01F 0xA01F + +#define IXGB_DEVICE_ID_82597EX_CX4 0x109E +#define IXGB_SUBDEVICE_ID_A00C 0xA00C +#define IXGB_SUBDEVICE_ID_A01C 0xA01C +#define IXGB_SUBDEVICE_ID_7036 0x7036 + +#endif /* #ifndef _IXGB_IDS_H_ */ +/* End of File */ diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c new file mode 100644 index 0000000..6a130eb --- /dev/null +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -0,0 +1,2332 @@ +/******************************************************************************* + + Intel PRO/10GbE Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include "ixgb.h" + +char ixgb_driver_name[] = "ixgb"; +static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver"; + +#define DRIVERNAPI "-NAPI" +#define DRV_VERSION "1.0.135-k2" DRIVERNAPI +const char ixgb_driver_version[] = DRV_VERSION; +static const char ixgb_copyright[] = "Copyright (c) 1999-2008 Intel Corporation."; + +#define IXGB_CB_LENGTH 256 +static unsigned int copybreak __read_mostly = IXGB_CB_LENGTH; +module_param(copybreak, uint, 0644); +MODULE_PARM_DESC(copybreak, + "Maximum size of packet that is copied to a new buffer on receive"); + +/* ixgb_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static DEFINE_PCI_DEVICE_TABLE(ixgb_pci_tbl) = { + {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + + /* required last entry */ + {0,} +}; + +MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl); + +/* Local Function Prototypes */ +static int ixgb_init_module(void); +static void ixgb_exit_module(void); +static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void __devexit ixgb_remove(struct pci_dev *pdev); +static int ixgb_sw_init(struct ixgb_adapter *adapter); +static int ixgb_open(struct net_device *netdev); +static int ixgb_close(struct net_device *netdev); +static void ixgb_configure_tx(struct ixgb_adapter *adapter); +static void ixgb_configure_rx(struct ixgb_adapter *adapter); +static void ixgb_setup_rctl(struct ixgb_adapter *adapter); +static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter); +static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter); +static void ixgb_set_multi(struct net_device *netdev); +static void ixgb_watchdog(unsigned long data); +static netdev_tx_t ixgb_xmit_frame(struct sk_buff *skb, + struct net_device *netdev); +static struct net_device_stats *ixgb_get_stats(struct net_device *netdev); +static int ixgb_change_mtu(struct net_device *netdev, int new_mtu); +static int ixgb_set_mac(struct net_device *netdev, void *p); +static irqreturn_t ixgb_intr(int irq, void *data); +static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter); + +static int ixgb_clean(struct napi_struct *, int); +static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int); +static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int); + +static void ixgb_tx_timeout(struct net_device *dev); +static void ixgb_tx_timeout_task(struct work_struct *work); + +static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter); +static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter); +static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid); +static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); +static void ixgb_restore_vlan(struct ixgb_adapter *adapter); + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* for netdump / net console */ +static void ixgb_netpoll(struct net_device *dev); +#endif + +static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev, + enum pci_channel_state state); +static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev); +static void ixgb_io_resume (struct pci_dev *pdev); + +static struct pci_error_handlers ixgb_err_handler = { + .error_detected = ixgb_io_error_detected, + .slot_reset = ixgb_io_slot_reset, + .resume = ixgb_io_resume, +}; + +static struct pci_driver ixgb_driver = { + .name = ixgb_driver_name, + .id_table = ixgb_pci_tbl, + .probe = ixgb_probe, + .remove = __devexit_p(ixgb_remove), + .err_handler = &ixgb_err_handler +}; + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +#define DEFAULT_DEBUG_LEVEL_SHIFT 3 +static int debug = DEFAULT_DEBUG_LEVEL_SHIFT; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +/** + * ixgb_init_module - Driver Registration Routine + * + * ixgb_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ + +static int __init +ixgb_init_module(void) +{ + pr_info("%s - version %s\n", ixgb_driver_string, ixgb_driver_version); + pr_info("%s\n", ixgb_copyright); + + return pci_register_driver(&ixgb_driver); +} + +module_init(ixgb_init_module); + +/** + * ixgb_exit_module - Driver Exit Cleanup Routine + * + * ixgb_exit_module is called just before the driver is removed + * from memory. + **/ + +static void __exit +ixgb_exit_module(void) +{ + pci_unregister_driver(&ixgb_driver); +} + +module_exit(ixgb_exit_module); + +/** + * ixgb_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ + +static void +ixgb_irq_disable(struct ixgb_adapter *adapter) +{ + IXGB_WRITE_REG(&adapter->hw, IMC, ~0); + IXGB_WRITE_FLUSH(&adapter->hw); + synchronize_irq(adapter->pdev->irq); +} + +/** + * ixgb_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ + +static void +ixgb_irq_enable(struct ixgb_adapter *adapter) +{ + u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | + IXGB_INT_TXDW | IXGB_INT_LSC; + if (adapter->hw.subsystem_vendor_id == SUN_SUBVENDOR_ID) + val |= IXGB_INT_GPI0; + IXGB_WRITE_REG(&adapter->hw, IMS, val); + IXGB_WRITE_FLUSH(&adapter->hw); +} + +int +ixgb_up(struct ixgb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err, irq_flags = IRQF_SHARED; + int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; + struct ixgb_hw *hw = &adapter->hw; + + /* hardware has been reset, we need to reload some things */ + + ixgb_rar_set(hw, netdev->dev_addr, 0); + ixgb_set_multi(netdev); + + ixgb_restore_vlan(adapter); + + ixgb_configure_tx(adapter); + ixgb_setup_rctl(adapter); + ixgb_configure_rx(adapter); + ixgb_alloc_rx_buffers(adapter, IXGB_DESC_UNUSED(&adapter->rx_ring)); + + /* disable interrupts and get the hardware into a known state */ + IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff); + + /* only enable MSI if bus is in PCI-X mode */ + if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) { + err = pci_enable_msi(adapter->pdev); + if (!err) { + adapter->have_msi = 1; + irq_flags = 0; + } + /* proceed to try to request regular interrupt */ + } + + err = request_irq(adapter->pdev->irq, ixgb_intr, irq_flags, + netdev->name, netdev); + if (err) { + if (adapter->have_msi) + pci_disable_msi(adapter->pdev); + netif_err(adapter, probe, adapter->netdev, + "Unable to allocate interrupt Error: %d\n", err); + return err; + } + + if ((hw->max_frame_size != max_frame) || + (hw->max_frame_size != + (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) { + + hw->max_frame_size = max_frame; + + IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT); + + if (hw->max_frame_size > + IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) { + u32 ctrl0 = IXGB_READ_REG(hw, CTRL0); + + if (!(ctrl0 & IXGB_CTRL0_JFE)) { + ctrl0 |= IXGB_CTRL0_JFE; + IXGB_WRITE_REG(hw, CTRL0, ctrl0); + } + } + } + + clear_bit(__IXGB_DOWN, &adapter->flags); + + napi_enable(&adapter->napi); + ixgb_irq_enable(adapter); + + netif_wake_queue(netdev); + + mod_timer(&adapter->watchdog_timer, jiffies); + + return 0; +} + +void +ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog) +{ + struct net_device *netdev = adapter->netdev; + + /* prevent the interrupt handler from restarting watchdog */ + set_bit(__IXGB_DOWN, &adapter->flags); + + napi_disable(&adapter->napi); + /* waiting for NAPI to complete can re-enable interrupts */ + ixgb_irq_disable(adapter); + free_irq(adapter->pdev->irq, netdev); + + if (adapter->have_msi) + pci_disable_msi(adapter->pdev); + + if (kill_watchdog) + del_timer_sync(&adapter->watchdog_timer); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + netif_carrier_off(netdev); + netif_stop_queue(netdev); + + ixgb_reset(adapter); + ixgb_clean_tx_ring(adapter); + ixgb_clean_rx_ring(adapter); +} + +void +ixgb_reset(struct ixgb_adapter *adapter) +{ + struct ixgb_hw *hw = &adapter->hw; + + ixgb_adapter_stop(hw); + if (!ixgb_init_hw(hw)) + netif_err(adapter, probe, adapter->netdev, "ixgb_init_hw failed\n"); + + /* restore frame size information */ + IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT); + if (hw->max_frame_size > + IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) { + u32 ctrl0 = IXGB_READ_REG(hw, CTRL0); + if (!(ctrl0 & IXGB_CTRL0_JFE)) { + ctrl0 |= IXGB_CTRL0_JFE; + IXGB_WRITE_REG(hw, CTRL0, ctrl0); + } + } +} + +static const struct net_device_ops ixgb_netdev_ops = { + .ndo_open = ixgb_open, + .ndo_stop = ixgb_close, + .ndo_start_xmit = ixgb_xmit_frame, + .ndo_get_stats = ixgb_get_stats, + .ndo_set_multicast_list = ixgb_set_multi, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = ixgb_set_mac, + .ndo_change_mtu = ixgb_change_mtu, + .ndo_tx_timeout = ixgb_tx_timeout, + .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ixgb_netpoll, +#endif +}; + +/** + * ixgb_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in ixgb_pci_tbl + * + * Returns 0 on success, negative on failure + * + * ixgb_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ + +static int __devinit +ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev = NULL; + struct ixgb_adapter *adapter; + static int cards_found = 0; + int pci_using_dac; + int i; + int err; + + err = pci_enable_device(pdev); + if (err) + return err; + + pci_using_dac = 0; + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (!err) { + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (!err) + pci_using_dac = 1; + } else { + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + err = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32)); + if (err) { + pr_err("No usable DMA configuration, aborting\n"); + goto err_dma_mask; + } + } + } + + err = pci_request_regions(pdev, ixgb_driver_name); + if (err) + goto err_request_regions; + + pci_set_master(pdev); + + netdev = alloc_etherdev(sizeof(struct ixgb_adapter)); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->hw.back = adapter; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_DEBUG_LEVEL_SHIFT); + + adapter->hw.hw_addr = pci_ioremap_bar(pdev, BAR_0); + if (!adapter->hw.hw_addr) { + err = -EIO; + goto err_ioremap; + } + + for (i = BAR_1; i <= BAR_5; i++) { + if (pci_resource_len(pdev, i) == 0) + continue; + if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { + adapter->hw.io_base = pci_resource_start(pdev, i); + break; + } + } + + netdev->netdev_ops = &ixgb_netdev_ops; + ixgb_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64); + + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + adapter->bd_number = cards_found; + adapter->link_speed = 0; + adapter->link_duplex = 0; + + /* setup the private structure */ + + err = ixgb_sw_init(adapter); + if (err) + goto err_sw_init; + + netdev->features = NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX | + NETIF_F_HW_VLAN_FILTER; + netdev->features |= NETIF_F_TSO; + + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + /* make sure the EEPROM is good */ + + if (!ixgb_validate_eeprom_checksum(&adapter->hw)) { + netif_err(adapter, probe, adapter->netdev, + "The EEPROM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + + ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr); + memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->perm_addr)) { + netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n"); + err = -EIO; + goto err_eeprom; + } + + adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw); + + init_timer(&adapter->watchdog_timer); + adapter->watchdog_timer.function = ixgb_watchdog; + adapter->watchdog_timer.data = (unsigned long)adapter; + + INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task); + + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + netif_info(adapter, probe, adapter->netdev, + "Intel(R) PRO/10GbE Network Connection\n"); + ixgb_check_options(adapter); + /* reset the hardware with the new settings */ + + ixgb_reset(adapter); + + cards_found++; + return 0; + +err_register: +err_sw_init: +err_eeprom: + iounmap(adapter->hw.hw_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_request_regions: +err_dma_mask: + pci_disable_device(pdev); + return err; +} + +/** + * ixgb_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * ixgb_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ + +static void __devexit +ixgb_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgb_adapter *adapter = netdev_priv(netdev); + + cancel_work_sync(&adapter->tx_timeout_task); + + unregister_netdev(netdev); + + iounmap(adapter->hw.hw_addr); + pci_release_regions(pdev); + + free_netdev(netdev); + pci_disable_device(pdev); +} + +/** + * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter) + * @adapter: board private structure to initialize + * + * ixgb_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ + +static int __devinit +ixgb_sw_init(struct ixgb_adapter *adapter) +{ + struct ixgb_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + + /* PCI config space info */ + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_id = pdev->subsystem_device; + + hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; + adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */ + + if ((hw->device_id == IXGB_DEVICE_ID_82597EX) || + (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) || + (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) || + (hw->device_id == IXGB_DEVICE_ID_82597EX_SR)) + hw->mac_type = ixgb_82597; + else { + /* should never have loaded on this device */ + netif_err(adapter, probe, adapter->netdev, "unsupported device id\n"); + } + + /* enable flow control to be programmed */ + hw->fc.send_xon = 1; + + set_bit(__IXGB_DOWN, &adapter->flags); + return 0; +} + +/** + * ixgb_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ + +static int +ixgb_open(struct net_device *netdev) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + int err; + + /* allocate transmit descriptors */ + err = ixgb_setup_tx_resources(adapter); + if (err) + goto err_setup_tx; + + netif_carrier_off(netdev); + + /* allocate receive descriptors */ + + err = ixgb_setup_rx_resources(adapter); + if (err) + goto err_setup_rx; + + err = ixgb_up(adapter); + if (err) + goto err_up; + + netif_start_queue(netdev); + + return 0; + +err_up: + ixgb_free_rx_resources(adapter); +err_setup_rx: + ixgb_free_tx_resources(adapter); +err_setup_tx: + ixgb_reset(adapter); + + return err; +} + +/** + * ixgb_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ + +static int +ixgb_close(struct net_device *netdev) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + + ixgb_down(adapter, true); + + ixgb_free_tx_resources(adapter); + ixgb_free_rx_resources(adapter); + + return 0; +} + +/** + * ixgb_setup_tx_resources - allocate Tx resources (Descriptors) + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ + +int +ixgb_setup_tx_resources(struct ixgb_adapter *adapter) +{ + struct ixgb_desc_ring *txdr = &adapter->tx_ring; + struct pci_dev *pdev = adapter->pdev; + int size; + + size = sizeof(struct ixgb_buffer) * txdr->count; + txdr->buffer_info = vzalloc(size); + if (!txdr->buffer_info) { + netif_err(adapter, probe, adapter->netdev, + "Unable to allocate transmit descriptor ring memory\n"); + return -ENOMEM; + } + + /* round up to nearest 4K */ + + txdr->size = txdr->count * sizeof(struct ixgb_tx_desc); + txdr->size = ALIGN(txdr->size, 4096); + + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL); + if (!txdr->desc) { + vfree(txdr->buffer_info); + netif_err(adapter, probe, adapter->netdev, + "Unable to allocate transmit descriptor memory\n"); + return -ENOMEM; + } + memset(txdr->desc, 0, txdr->size); + + txdr->next_to_use = 0; + txdr->next_to_clean = 0; + + return 0; +} + +/** + * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset. + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ + +static void +ixgb_configure_tx(struct ixgb_adapter *adapter) +{ + u64 tdba = adapter->tx_ring.dma; + u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc); + u32 tctl; + struct ixgb_hw *hw = &adapter->hw; + + /* Setup the Base and Length of the Tx Descriptor Ring + * tx_ring.dma can be either a 32 or 64 bit value + */ + + IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL)); + IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32)); + + IXGB_WRITE_REG(hw, TDLEN, tdlen); + + /* Setup the HW Tx Head and Tail descriptor pointers */ + + IXGB_WRITE_REG(hw, TDH, 0); + IXGB_WRITE_REG(hw, TDT, 0); + + /* don't set up txdctl, it induces performance problems if configured + * incorrectly */ + /* Set the Tx Interrupt Delay register */ + + IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay); + + /* Program the Transmit Control Register */ + + tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE; + IXGB_WRITE_REG(hw, TCTL, tctl); + + /* Setup Transmit Descriptor Settings for this adapter */ + adapter->tx_cmd_type = + IXGB_TX_DESC_TYPE | + (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0); +} + +/** + * ixgb_setup_rx_resources - allocate Rx resources (Descriptors) + * @adapter: board private structure + * + * Returns 0 on success, negative on failure + **/ + +int +ixgb_setup_rx_resources(struct ixgb_adapter *adapter) +{ + struct ixgb_desc_ring *rxdr = &adapter->rx_ring; + struct pci_dev *pdev = adapter->pdev; + int size; + + size = sizeof(struct ixgb_buffer) * rxdr->count; + rxdr->buffer_info = vzalloc(size); + if (!rxdr->buffer_info) { + netif_err(adapter, probe, adapter->netdev, + "Unable to allocate receive descriptor ring\n"); + return -ENOMEM; + } + + /* Round up to nearest 4K */ + + rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); + rxdr->size = ALIGN(rxdr->size, 4096); + + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL); + + if (!rxdr->desc) { + vfree(rxdr->buffer_info); + netif_err(adapter, probe, adapter->netdev, + "Unable to allocate receive descriptors\n"); + return -ENOMEM; + } + memset(rxdr->desc, 0, rxdr->size); + + rxdr->next_to_clean = 0; + rxdr->next_to_use = 0; + + return 0; +} + +/** + * ixgb_setup_rctl - configure the receive control register + * @adapter: Board private structure + **/ + +static void +ixgb_setup_rctl(struct ixgb_adapter *adapter) +{ + u32 rctl; + + rctl = IXGB_READ_REG(&adapter->hw, RCTL); + + rctl &= ~(3 << IXGB_RCTL_MO_SHIFT); + + rctl |= + IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | + IXGB_RCTL_RXEN | IXGB_RCTL_CFF | + (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT); + + rctl |= IXGB_RCTL_SECRC; + + if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048) + rctl |= IXGB_RCTL_BSIZE_2048; + else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096) + rctl |= IXGB_RCTL_BSIZE_4096; + else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192) + rctl |= IXGB_RCTL_BSIZE_8192; + else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384) + rctl |= IXGB_RCTL_BSIZE_16384; + + IXGB_WRITE_REG(&adapter->hw, RCTL, rctl); +} + +/** + * ixgb_configure_rx - Configure 82597 Receive Unit after Reset. + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ + +static void +ixgb_configure_rx(struct ixgb_adapter *adapter) +{ + u64 rdba = adapter->rx_ring.dma; + u32 rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc); + struct ixgb_hw *hw = &adapter->hw; + u32 rctl; + u32 rxcsum; + + /* make sure receives are disabled while setting up the descriptors */ + + rctl = IXGB_READ_REG(hw, RCTL); + IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN); + + /* set the Receive Delay Timer Register */ + + IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay); + + /* Setup the Base and Length of the Rx Descriptor Ring */ + + IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL)); + IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32)); + + IXGB_WRITE_REG(hw, RDLEN, rdlen); + + /* Setup the HW Rx Head and Tail Descriptor Pointers */ + IXGB_WRITE_REG(hw, RDH, 0); + IXGB_WRITE_REG(hw, RDT, 0); + + /* due to the hardware errata with RXDCTL, we are unable to use any of + * the performance enhancing features of it without causing other + * subtle bugs, some of the bugs could include receive length + * corruption at high data rates (WTHRESH > 0) and/or receive + * descriptor ring irregularites (particularly in hardware cache) */ + IXGB_WRITE_REG(hw, RXDCTL, 0); + + /* Enable Receive Checksum Offload for TCP and UDP */ + if (adapter->rx_csum) { + rxcsum = IXGB_READ_REG(hw, RXCSUM); + rxcsum |= IXGB_RXCSUM_TUOFL; + IXGB_WRITE_REG(hw, RXCSUM, rxcsum); + } + + /* Enable Receives */ + + IXGB_WRITE_REG(hw, RCTL, rctl); +} + +/** + * ixgb_free_tx_resources - Free Tx Resources + * @adapter: board private structure + * + * Free all transmit software resources + **/ + +void +ixgb_free_tx_resources(struct ixgb_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + + ixgb_clean_tx_ring(adapter); + + vfree(adapter->tx_ring.buffer_info); + adapter->tx_ring.buffer_info = NULL; + + dma_free_coherent(&pdev->dev, adapter->tx_ring.size, + adapter->tx_ring.desc, adapter->tx_ring.dma); + + adapter->tx_ring.desc = NULL; +} + +static void +ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter, + struct ixgb_buffer *buffer_info) +{ + if (buffer_info->dma) { + if (buffer_info->mapped_as_page) + dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); + else + dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); + buffer_info->dma = 0; + } + + if (buffer_info->skb) { + dev_kfree_skb_any(buffer_info->skb); + buffer_info->skb = NULL; + } + buffer_info->time_stamp = 0; + /* these fields must always be initialized in tx + * buffer_info->length = 0; + * buffer_info->next_to_watch = 0; */ +} + +/** + * ixgb_clean_tx_ring - Free Tx Buffers + * @adapter: board private structure + **/ + +static void +ixgb_clean_tx_ring(struct ixgb_adapter *adapter) +{ + struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; + struct ixgb_buffer *buffer_info; + unsigned long size; + unsigned int i; + + /* Free all the Tx ring sk_buffs */ + + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + ixgb_unmap_and_free_tx_resource(adapter, buffer_info); + } + + size = sizeof(struct ixgb_buffer) * tx_ring->count; + memset(tx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + IXGB_WRITE_REG(&adapter->hw, TDH, 0); + IXGB_WRITE_REG(&adapter->hw, TDT, 0); +} + +/** + * ixgb_free_rx_resources - Free Rx Resources + * @adapter: board private structure + * + * Free all receive software resources + **/ + +void +ixgb_free_rx_resources(struct ixgb_adapter *adapter) +{ + struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; + struct pci_dev *pdev = adapter->pdev; + + ixgb_clean_rx_ring(adapter); + + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * ixgb_clean_rx_ring - Free Rx Buffers + * @adapter: board private structure + **/ + +static void +ixgb_clean_rx_ring(struct ixgb_adapter *adapter) +{ + struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; + struct ixgb_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; + + /* Free all the Rx ring sk_buffs */ + + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + if (buffer_info->dma) { + dma_unmap_single(&pdev->dev, + buffer_info->dma, + buffer_info->length, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + buffer_info->length = 0; + } + + if (buffer_info->skb) { + dev_kfree_skb(buffer_info->skb); + buffer_info->skb = NULL; + } + } + + size = sizeof(struct ixgb_buffer) * rx_ring->count; + memset(rx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + IXGB_WRITE_REG(&adapter->hw, RDH, 0); + IXGB_WRITE_REG(&adapter->hw, RDT, 0); +} + +/** + * ixgb_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ + +static int +ixgb_set_mac(struct net_device *netdev, void *p) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + + ixgb_rar_set(&adapter->hw, addr->sa_data, 0); + + return 0; +} + +/** + * ixgb_set_multi - Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_multi entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast, + * promiscuous mode, and all-multi behavior. + **/ + +static void +ixgb_set_multi(struct net_device *netdev) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + struct ixgb_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u32 rctl; + int i; + + /* Check for Promiscuous and All Multicast modes */ + + rctl = IXGB_READ_REG(hw, RCTL); + + if (netdev->flags & IFF_PROMISC) { + rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE); + /* disable VLAN filtering */ + rctl &= ~IXGB_RCTL_CFIEN; + rctl &= ~IXGB_RCTL_VFE; + } else { + if (netdev->flags & IFF_ALLMULTI) { + rctl |= IXGB_RCTL_MPE; + rctl &= ~IXGB_RCTL_UPE; + } else { + rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE); + } + /* enable VLAN filtering */ + rctl |= IXGB_RCTL_VFE; + rctl &= ~IXGB_RCTL_CFIEN; + } + + if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) { + rctl |= IXGB_RCTL_MPE; + IXGB_WRITE_REG(hw, RCTL, rctl); + } else { + u8 mta[IXGB_MAX_NUM_MULTICAST_ADDRESSES * + IXGB_ETH_LENGTH_OF_ADDRESS]; + + IXGB_WRITE_REG(hw, RCTL, rctl); + + i = 0; + netdev_for_each_mc_addr(ha, netdev) + memcpy(&mta[i++ * IXGB_ETH_LENGTH_OF_ADDRESS], + ha->addr, IXGB_ETH_LENGTH_OF_ADDRESS); + + ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0); + } + + if (netdev->features & NETIF_F_HW_VLAN_RX) + ixgb_vlan_strip_enable(adapter); + else + ixgb_vlan_strip_disable(adapter); + +} + +/** + * ixgb_watchdog - Timer Call-back + * @data: pointer to netdev cast into an unsigned long + **/ + +static void +ixgb_watchdog(unsigned long data) +{ + struct ixgb_adapter *adapter = (struct ixgb_adapter *)data; + struct net_device *netdev = adapter->netdev; + struct ixgb_desc_ring *txdr = &adapter->tx_ring; + + ixgb_check_for_link(&adapter->hw); + + if (ixgb_check_for_bad_link(&adapter->hw)) { + /* force the reset path */ + netif_stop_queue(netdev); + } + + if (adapter->hw.link_up) { + if (!netif_carrier_ok(netdev)) { + netdev_info(netdev, + "NIC Link is Up 10 Gbps Full Duplex, Flow Control: %s\n", + (adapter->hw.fc.type == ixgb_fc_full) ? + "RX/TX" : + (adapter->hw.fc.type == ixgb_fc_rx_pause) ? + "RX" : + (adapter->hw.fc.type == ixgb_fc_tx_pause) ? + "TX" : "None"); + adapter->link_speed = 10000; + adapter->link_duplex = FULL_DUPLEX; + netif_carrier_on(netdev); + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + netdev_info(netdev, "NIC Link is Down\n"); + netif_carrier_off(netdev); + } + } + + ixgb_update_stats(adapter); + + if (!netif_carrier_ok(netdev)) { + if (IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). */ + schedule_work(&adapter->tx_timeout_task); + /* return immediately since reset is imminent */ + return; + } + } + + /* Force detection of hung controller every watchdog period */ + adapter->detect_tx_hung = true; + + /* generate an interrupt to force clean up of any stragglers */ + IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW); + + /* Reset the timer */ + mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); +} + +#define IXGB_TX_FLAGS_CSUM 0x00000001 +#define IXGB_TX_FLAGS_VLAN 0x00000002 +#define IXGB_TX_FLAGS_TSO 0x00000004 + +static int +ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) +{ + struct ixgb_context_desc *context_desc; + unsigned int i; + u8 ipcss, ipcso, tucss, tucso, hdr_len; + u16 ipcse, tucse, mss; + int err; + + if (likely(skb_is_gso(skb))) { + struct ixgb_buffer *buffer_info; + struct iphdr *iph; + + if (skb_header_cloned(skb)) { + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (err) + return err; + } + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + mss = skb_shinfo(skb)->gso_size; + iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, 0); + ipcss = skb_network_offset(skb); + ipcso = (void *)&(iph->check) - (void *)skb->data; + ipcse = skb_transport_offset(skb) - 1; + tucss = skb_transport_offset(skb); + tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; + tucse = 0; + + i = adapter->tx_ring.next_to_use; + context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i); + buffer_info = &adapter->tx_ring.buffer_info[i]; + WARN_ON(buffer_info->dma != 0); + + context_desc->ipcss = ipcss; + context_desc->ipcso = ipcso; + context_desc->ipcse = cpu_to_le16(ipcse); + context_desc->tucss = tucss; + context_desc->tucso = tucso; + context_desc->tucse = cpu_to_le16(tucse); + context_desc->mss = cpu_to_le16(mss); + context_desc->hdr_len = hdr_len; + context_desc->status = 0; + context_desc->cmd_type_len = cpu_to_le32( + IXGB_CONTEXT_DESC_TYPE + | IXGB_CONTEXT_DESC_CMD_TSE + | IXGB_CONTEXT_DESC_CMD_IP + | IXGB_CONTEXT_DESC_CMD_TCP + | IXGB_CONTEXT_DESC_CMD_IDE + | (skb->len - (hdr_len))); + + + if (++i == adapter->tx_ring.count) i = 0; + adapter->tx_ring.next_to_use = i; + + return 1; + } + + return 0; +} + +static bool +ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb) +{ + struct ixgb_context_desc *context_desc; + unsigned int i; + u8 css, cso; + + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + struct ixgb_buffer *buffer_info; + css = skb_checksum_start_offset(skb); + cso = css + skb->csum_offset; + + i = adapter->tx_ring.next_to_use; + context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i); + buffer_info = &adapter->tx_ring.buffer_info[i]; + WARN_ON(buffer_info->dma != 0); + + context_desc->tucss = css; + context_desc->tucso = cso; + context_desc->tucse = 0; + /* zero out any previously existing data in one instruction */ + *(u32 *)&(context_desc->ipcss) = 0; + context_desc->status = 0; + context_desc->hdr_len = 0; + context_desc->mss = 0; + context_desc->cmd_type_len = + cpu_to_le32(IXGB_CONTEXT_DESC_TYPE + | IXGB_TX_DESC_CMD_IDE); + + if (++i == adapter->tx_ring.count) i = 0; + adapter->tx_ring.next_to_use = i; + + return true; + } + + return false; +} + +#define IXGB_MAX_TXD_PWR 14 +#define IXGB_MAX_DATA_PER_TXD (1<tx_ring; + struct pci_dev *pdev = adapter->pdev; + struct ixgb_buffer *buffer_info; + int len = skb_headlen(skb); + unsigned int offset = 0, size, count = 0, i; + unsigned int mss = skb_shinfo(skb)->gso_size; + unsigned int nr_frags = skb_shinfo(skb)->nr_frags; + unsigned int f; + + i = tx_ring->next_to_use; + + while (len) { + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, IXGB_MAX_DATA_PER_TXD); + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc */ + if (unlikely(mss && !nr_frags && size == len && size > 8)) + size -= 4; + + buffer_info->length = size; + WARN_ON(buffer_info->dma != 0); + buffer_info->time_stamp = jiffies; + buffer_info->mapped_as_page = false; + buffer_info->dma = dma_map_single(&pdev->dev, + skb->data + offset, + size, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + buffer_info->next_to_watch = 0; + + len -= size; + offset += size; + count++; + if (len) { + i++; + if (i == tx_ring->count) + i = 0; + } + } + + for (f = 0; f < nr_frags; f++) { + struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[f]; + len = frag->size; + offset = frag->page_offset; + + while (len) { + i++; + if (i == tx_ring->count) + i = 0; + + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, IXGB_MAX_DATA_PER_TXD); + + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc */ + if (unlikely(mss && (f == (nr_frags - 1)) + && size == len && size > 8)) + size -= 4; + + buffer_info->length = size; + buffer_info->time_stamp = jiffies; + buffer_info->mapped_as_page = true; + buffer_info->dma = + dma_map_page(&pdev->dev, frag->page, + offset, size, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + buffer_info->next_to_watch = 0; + + len -= size; + offset += size; + count++; + } + } + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[first].next_to_watch = i; + + return count; + +dma_error: + dev_err(&pdev->dev, "TX DMA map failed\n"); + buffer_info->dma = 0; + if (count) + count--; + + while (count--) { + if (i==0) + i += tx_ring->count; + i--; + buffer_info = &tx_ring->buffer_info[i]; + ixgb_unmap_and_free_tx_resource(adapter, buffer_info); + } + + return 0; +} + +static void +ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags) +{ + struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; + struct ixgb_tx_desc *tx_desc = NULL; + struct ixgb_buffer *buffer_info; + u32 cmd_type_len = adapter->tx_cmd_type; + u8 status = 0; + u8 popts = 0; + unsigned int i; + + if (tx_flags & IXGB_TX_FLAGS_TSO) { + cmd_type_len |= IXGB_TX_DESC_CMD_TSE; + popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM); + } + + if (tx_flags & IXGB_TX_FLAGS_CSUM) + popts |= IXGB_TX_DESC_POPTS_TXSM; + + if (tx_flags & IXGB_TX_FLAGS_VLAN) + cmd_type_len |= IXGB_TX_DESC_CMD_VLE; + + i = tx_ring->next_to_use; + + while (count--) { + buffer_info = &tx_ring->buffer_info[i]; + tx_desc = IXGB_TX_DESC(*tx_ring, i); + tx_desc->buff_addr = cpu_to_le64(buffer_info->dma); + tx_desc->cmd_type_len = + cpu_to_le32(cmd_type_len | buffer_info->length); + tx_desc->status = status; + tx_desc->popts = popts; + tx_desc->vlan = cpu_to_le16(vlan_id); + + if (++i == tx_ring->count) i = 0; + } + + tx_desc->cmd_type_len |= + cpu_to_le32(IXGB_TX_DESC_CMD_EOP | IXGB_TX_DESC_CMD_RS); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + + tx_ring->next_to_use = i; + IXGB_WRITE_REG(&adapter->hw, TDT, i); +} + +static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; + + netif_stop_queue(netdev); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. */ + if (likely(IXGB_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! */ + netif_start_queue(netdev); + ++adapter->restart_queue; + return 0; +} + +static int ixgb_maybe_stop_tx(struct net_device *netdev, + struct ixgb_desc_ring *tx_ring, int size) +{ + if (likely(IXGB_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __ixgb_maybe_stop_tx(netdev, size); +} + + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \ + (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) +#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \ + MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \ + + 1 /* one more needed for sentinel TSO workaround */ + +static netdev_tx_t +ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + unsigned int first; + unsigned int tx_flags = 0; + int vlan_id = 0; + int count = 0; + int tso; + + if (test_bit(__IXGB_DOWN, &adapter->flags)) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + if (skb->len <= 0) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, + DESC_NEEDED))) + return NETDEV_TX_BUSY; + + if (vlan_tx_tag_present(skb)) { + tx_flags |= IXGB_TX_FLAGS_VLAN; + vlan_id = vlan_tx_tag_get(skb); + } + + first = adapter->tx_ring.next_to_use; + + tso = ixgb_tso(adapter, skb); + if (tso < 0) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + if (likely(tso)) + tx_flags |= IXGB_TX_FLAGS_TSO; + else if (ixgb_tx_csum(adapter, skb)) + tx_flags |= IXGB_TX_FLAGS_CSUM; + + count = ixgb_tx_map(adapter, skb, first); + + if (count) { + ixgb_tx_queue(adapter, count, vlan_id, tx_flags); + /* Make sure there is space in the ring for the next send. */ + ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED); + + } else { + dev_kfree_skb_any(skb); + adapter->tx_ring.buffer_info[first].time_stamp = 0; + adapter->tx_ring.next_to_use = first; + } + + return NETDEV_TX_OK; +} + +/** + * ixgb_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ + +static void +ixgb_tx_timeout(struct net_device *netdev) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + schedule_work(&adapter->tx_timeout_task); +} + +static void +ixgb_tx_timeout_task(struct work_struct *work) +{ + struct ixgb_adapter *adapter = + container_of(work, struct ixgb_adapter, tx_timeout_task); + + adapter->tx_timeout_count++; + ixgb_down(adapter, true); + ixgb_up(adapter); +} + +/** + * ixgb_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + **/ + +static struct net_device_stats * +ixgb_get_stats(struct net_device *netdev) +{ + return &netdev->stats; +} + +/** + * ixgb_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ + +static int +ixgb_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; + int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; + + /* MTU < 68 is an error for IPv4 traffic, just don't allow it */ + if ((new_mtu < 68) || + (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) { + netif_err(adapter, probe, adapter->netdev, + "Invalid MTU setting %d\n", new_mtu); + return -EINVAL; + } + + if (old_max_frame == max_frame) + return 0; + + if (netif_running(netdev)) + ixgb_down(adapter, true); + + adapter->rx_buffer_len = max_frame + 8; /* + 8 for errata */ + + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + ixgb_up(adapter); + + return 0; +} + +/** + * ixgb_update_stats - Update the board statistics counters. + * @adapter: board private structure + **/ + +void +ixgb_update_stats(struct ixgb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + + /* Prevent stats update while adapter is being reset */ + if (pci_channel_offline(pdev)) + return; + + if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) || + (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) { + u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL); + u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL); + u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH); + u64 bcast = ((u64)bcast_h << 32) | bcast_l; + + multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32); + /* fix up multicast stats by removing broadcasts */ + if (multi >= bcast) + multi -= bcast; + + adapter->stats.mprcl += (multi & 0xFFFFFFFF); + adapter->stats.mprch += (multi >> 32); + adapter->stats.bprcl += bcast_l; + adapter->stats.bprch += bcast_h; + } else { + adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL); + adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH); + adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL); + adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH); + } + adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL); + adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH); + adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL); + adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH); + adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL); + adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH); + adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL); + adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH); + adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL); + adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH); + adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL); + adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH); + adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL); + adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH); + adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC); + adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC); + adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC); + adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC); + adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS); + adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC); + adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC); + adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC); + adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL); + adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH); + adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL); + adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH); + adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL); + adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH); + adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL); + adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH); + adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL); + adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH); + adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL); + adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH); + adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL); + adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH); + adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL); + adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH); + adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL); + adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH); + adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC); + adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C); + adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC); + adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC); + adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC); + adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC); + adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC); + adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC); + adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC); + adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC); + adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC); + adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC); + adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC); + adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC); + adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC); + adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC); + + /* Fill out the OS statistics structure */ + + netdev->stats.rx_packets = adapter->stats.gprcl; + netdev->stats.tx_packets = adapter->stats.gptcl; + netdev->stats.rx_bytes = adapter->stats.gorcl; + netdev->stats.tx_bytes = adapter->stats.gotcl; + netdev->stats.multicast = adapter->stats.mprcl; + netdev->stats.collisions = 0; + + /* ignore RLEC as it reports errors for padded (<64bytes) frames + * with a length in the type/len field */ + netdev->stats.rx_errors = + /* adapter->stats.rnbc + */ adapter->stats.crcerrs + + adapter->stats.ruc + + adapter->stats.roc /*+ adapter->stats.rlec */ + + adapter->stats.icbc + + adapter->stats.ecbc + adapter->stats.mpc; + + /* see above + * netdev->stats.rx_length_errors = adapter->stats.rlec; + */ + + netdev->stats.rx_crc_errors = adapter->stats.crcerrs; + netdev->stats.rx_fifo_errors = adapter->stats.mpc; + netdev->stats.rx_missed_errors = adapter->stats.mpc; + netdev->stats.rx_over_errors = adapter->stats.mpc; + + netdev->stats.tx_errors = 0; + netdev->stats.rx_frame_errors = 0; + netdev->stats.tx_aborted_errors = 0; + netdev->stats.tx_carrier_errors = 0; + netdev->stats.tx_fifo_errors = 0; + netdev->stats.tx_heartbeat_errors = 0; + netdev->stats.tx_window_errors = 0; +} + +#define IXGB_MAX_INTR 10 +/** + * ixgb_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ + +static irqreturn_t +ixgb_intr(int irq, void *data) +{ + struct net_device *netdev = data; + struct ixgb_adapter *adapter = netdev_priv(netdev); + struct ixgb_hw *hw = &adapter->hw; + u32 icr = IXGB_READ_REG(hw, ICR); + + if (unlikely(!icr)) + return IRQ_NONE; /* Not our interrupt */ + + if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC))) + if (!test_bit(__IXGB_DOWN, &adapter->flags)) + mod_timer(&adapter->watchdog_timer, jiffies); + + if (napi_schedule_prep(&adapter->napi)) { + + /* Disable interrupts and register for poll. The flush + of the posted write is intentionally left out. + */ + + IXGB_WRITE_REG(&adapter->hw, IMC, ~0); + __napi_schedule(&adapter->napi); + } + return IRQ_HANDLED; +} + +/** + * ixgb_clean - NAPI Rx polling callback + * @adapter: board private structure + **/ + +static int +ixgb_clean(struct napi_struct *napi, int budget) +{ + struct ixgb_adapter *adapter = container_of(napi, struct ixgb_adapter, napi); + int work_done = 0; + + ixgb_clean_tx_irq(adapter); + ixgb_clean_rx_irq(adapter, &work_done, budget); + + /* If budget not fully consumed, exit the polling mode */ + if (work_done < budget) { + napi_complete(napi); + if (!test_bit(__IXGB_DOWN, &adapter->flags)) + ixgb_irq_enable(adapter); + } + + return work_done; +} + +/** + * ixgb_clean_tx_irq - Reclaim resources after transmit completes + * @adapter: board private structure + **/ + +static bool +ixgb_clean_tx_irq(struct ixgb_adapter *adapter) +{ + struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; + struct net_device *netdev = adapter->netdev; + struct ixgb_tx_desc *tx_desc, *eop_desc; + struct ixgb_buffer *buffer_info; + unsigned int i, eop; + bool cleaned = false; + + i = tx_ring->next_to_clean; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = IXGB_TX_DESC(*tx_ring, eop); + + while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) { + + rmb(); /* read buffer_info after eop_desc */ + for (cleaned = false; !cleaned; ) { + tx_desc = IXGB_TX_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + + if (tx_desc->popts & + (IXGB_TX_DESC_POPTS_TXSM | + IXGB_TX_DESC_POPTS_IXSM)) + adapter->hw_csum_tx_good++; + + ixgb_unmap_and_free_tx_resource(adapter, buffer_info); + + *(u32 *)&(tx_desc->status) = 0; + + cleaned = (i == eop); + if (++i == tx_ring->count) i = 0; + } + + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = IXGB_TX_DESC(*tx_ring, eop); + } + + tx_ring->next_to_clean = i; + + if (unlikely(cleaned && netif_carrier_ok(netdev) && + IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. */ + smp_mb(); + + if (netif_queue_stopped(netdev) && + !(test_bit(__IXGB_DOWN, &adapter->flags))) { + netif_wake_queue(netdev); + ++adapter->restart_queue; + } + } + + if (adapter->detect_tx_hung) { + /* detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i */ + adapter->detect_tx_hung = false; + if (tx_ring->buffer_info[eop].time_stamp && + time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ) + && !(IXGB_READ_REG(&adapter->hw, STATUS) & + IXGB_STATUS_TXOFF)) { + /* detected Tx unit hang */ + netif_err(adapter, drv, adapter->netdev, + "Detected Tx Unit Hang\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " next_to_watch.status <%x>\n", + IXGB_READ_REG(&adapter->hw, TDH), + IXGB_READ_REG(&adapter->hw, TDT), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_ring->buffer_info[eop].time_stamp, + eop, + jiffies, + eop_desc->status); + netif_stop_queue(netdev); + } + } + + return cleaned; +} + +/** + * ixgb_rx_checksum - Receive Checksum Offload for 82597. + * @adapter: board private structure + * @rx_desc: receive descriptor + * @sk_buff: socket buffer with received data + **/ + +static void +ixgb_rx_checksum(struct ixgb_adapter *adapter, + struct ixgb_rx_desc *rx_desc, + struct sk_buff *skb) +{ + /* Ignore Checksum bit is set OR + * TCP Checksum has not been calculated + */ + if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) || + (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) { + skb_checksum_none_assert(skb); + return; + } + + /* At this point we know the hardware did the TCP checksum */ + /* now look at the TCP checksum error bit */ + if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) { + /* let the stack verify checksum errors */ + skb_checksum_none_assert(skb); + adapter->hw_csum_rx_error++; + } else { + /* TCP checksum is good */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + adapter->hw_csum_rx_good++; + } +} + +/* + * this should improve performance for small packets with large amounts + * of reassembly being done in the stack + */ +static void ixgb_check_copybreak(struct net_device *netdev, + struct ixgb_buffer *buffer_info, + u32 length, struct sk_buff **skb) +{ + struct sk_buff *new_skb; + + if (length > copybreak) + return; + + new_skb = netdev_alloc_skb_ip_align(netdev, length); + if (!new_skb) + return; + + skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN, + (*skb)->data - NET_IP_ALIGN, + length + NET_IP_ALIGN); + /* save the skb in buffer_info as good */ + buffer_info->skb = *skb; + *skb = new_skb; +} + +/** + * ixgb_clean_rx_irq - Send received data up the network stack, + * @adapter: board private structure + **/ + +static bool +ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do) +{ + struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct ixgb_rx_desc *rx_desc, *next_rxd; + struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer; + u32 length; + unsigned int i, j; + int cleaned_count = 0; + bool cleaned = false; + + i = rx_ring->next_to_clean; + rx_desc = IXGB_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & IXGB_RX_DESC_STATUS_DD) { + struct sk_buff *skb; + u8 status; + + if (*work_done >= work_to_do) + break; + + (*work_done)++; + rmb(); /* read descriptor and rx_buffer_info after status DD */ + status = rx_desc->status; + skb = buffer_info->skb; + buffer_info->skb = NULL; + + prefetch(skb->data - NET_IP_ALIGN); + + if (++i == rx_ring->count) + i = 0; + next_rxd = IXGB_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + j = i + 1; + if (j == rx_ring->count) + j = 0; + next2_buffer = &rx_ring->buffer_info[j]; + prefetch(next2_buffer); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + + dma_unmap_single(&pdev->dev, + buffer_info->dma, + buffer_info->length, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->length); + rx_desc->length = 0; + + if (unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) { + + /* All receives must fit into a single buffer */ + + IXGB_DBG("Receive packet consumed multiple buffers " + "length<%x>\n", length); + + dev_kfree_skb_irq(skb); + goto rxdesc_done; + } + + if (unlikely(rx_desc->errors & + (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE | + IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE))) { + dev_kfree_skb_irq(skb); + goto rxdesc_done; + } + + ixgb_check_copybreak(netdev, buffer_info, length, &skb); + + /* Good Receive */ + skb_put(skb, length); + + /* Receive Checksum Offload */ + ixgb_rx_checksum(adapter, rx_desc, skb); + + skb->protocol = eth_type_trans(skb, netdev); + if (status & IXGB_RX_DESC_STATUS_VP) + __vlan_hwaccel_put_tag(skb, + le16_to_cpu(rx_desc->special)); + + netif_receive_skb(skb); + +rxdesc_done: + /* clean up descriptor, might be written over by hw */ + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= IXGB_RX_BUFFER_WRITE)) { + ixgb_alloc_rx_buffers(adapter, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + + rx_ring->next_to_clean = i; + + cleaned_count = IXGB_DESC_UNUSED(rx_ring); + if (cleaned_count) + ixgb_alloc_rx_buffers(adapter, cleaned_count); + + return cleaned; +} + +/** + * ixgb_alloc_rx_buffers - Replace used receive buffers + * @adapter: address of board private structure + **/ + +static void +ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count) +{ + struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct ixgb_rx_desc *rx_desc; + struct ixgb_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + long cleancount; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + cleancount = IXGB_DESC_UNUSED(rx_ring); + + + /* leave three descriptors unused */ + while (--cleancount > 2 && cleaned_count--) { + /* recycle! its good for you */ + skb = buffer_info->skb; + if (skb) { + skb_trim(skb, 0); + goto map_skb; + } + + skb = netdev_alloc_skb_ip_align(netdev, adapter->rx_buffer_len); + if (unlikely(!skb)) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + buffer_info->skb = skb; + buffer_info->length = adapter->rx_buffer_len; +map_skb: + buffer_info->dma = dma_map_single(&pdev->dev, + skb->data, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + + rx_desc = IXGB_RX_DESC(*rx_ring, i); + rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); + /* guarantee DD bit not set now before h/w gets descriptor + * this is the rest of the workaround for h/w double + * writeback. */ + rx_desc->status = 0; + + + if (++i == rx_ring->count) i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, such + * as IA-64). */ + wmb(); + IXGB_WRITE_REG(&adapter->hw, RDT, i); + } +} + +static void +ixgb_vlan_strip_enable(struct ixgb_adapter *adapter) +{ + u32 ctrl; + + /* enable VLAN tag insert/strip */ + ctrl = IXGB_READ_REG(&adapter->hw, CTRL0); + ctrl |= IXGB_CTRL0_VME; + IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl); +} + +static void +ixgb_vlan_strip_disable(struct ixgb_adapter *adapter) +{ + u32 ctrl; + + /* disable VLAN tag insert/strip */ + ctrl = IXGB_READ_REG(&adapter->hw, CTRL0); + ctrl &= ~IXGB_CTRL0_VME; + IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl); +} + +static void +ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + u32 vfta, index; + + /* add VID to filter table */ + + index = (vid >> 5) & 0x7F; + vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index); + vfta |= (1 << (vid & 0x1F)); + ixgb_write_vfta(&adapter->hw, index, vfta); + set_bit(vid, adapter->active_vlans); +} + +static void +ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + u32 vfta, index; + + /* remove VID from filter table */ + + index = (vid >> 5) & 0x7F; + vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index); + vfta &= ~(1 << (vid & 0x1F)); + ixgb_write_vfta(&adapter->hw, index, vfta); + clear_bit(vid, adapter->active_vlans); +} + +static void +ixgb_restore_vlan(struct ixgb_adapter *adapter) +{ + u16 vid; + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + ixgb_vlan_rx_add_vid(adapter->netdev, vid); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ + +static void ixgb_netpoll(struct net_device *dev) +{ + struct ixgb_adapter *adapter = netdev_priv(dev); + + disable_irq(adapter->pdev->irq); + ixgb_intr(adapter->pdev->irq, dev); + enable_irq(adapter->pdev->irq); +} +#endif + +/** + * ixgb_io_error_detected() - called when PCI error is detected + * @pdev pointer to pci device with error + * @state pci channel state after error + * + * This callback is called by the PCI subsystem whenever + * a PCI bus error is detected. + */ +static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev, + enum pci_channel_state state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgb_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + ixgb_down(adapter, true); + + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * ixgb_io_slot_reset - called after the pci bus has been reset. + * @pdev pointer to pci device with error + * + * This callback is called after the PCI bus has been reset. + * Basically, this tries to restart the card from scratch. + * This is a shortened version of the device probe/discovery code, + * it resembles the first-half of the ixgb_probe() routine. + */ +static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgb_adapter *adapter = netdev_priv(netdev); + + if (pci_enable_device(pdev)) { + netif_err(adapter, probe, adapter->netdev, + "Cannot re-enable PCI device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + /* Perform card reset only on one instance of the card */ + if (0 != PCI_FUNC (pdev->devfn)) + return PCI_ERS_RESULT_RECOVERED; + + pci_set_master(pdev); + + netif_carrier_off(netdev); + netif_stop_queue(netdev); + ixgb_reset(adapter); + + /* Make sure the EEPROM is good */ + if (!ixgb_validate_eeprom_checksum(&adapter->hw)) { + netif_err(adapter, probe, adapter->netdev, + "After reset, the EEPROM checksum is not valid\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr); + memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->perm_addr)) { + netif_err(adapter, probe, adapter->netdev, + "After reset, invalid MAC address\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * ixgb_io_resume - called when its OK to resume normal operations + * @pdev pointer to pci device with error + * + * The error recovery driver tells us that its OK to resume + * normal operation. Implementation resembles the second-half + * of the ixgb_probe() routine. + */ +static void ixgb_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgb_adapter *adapter = netdev_priv(netdev); + + pci_set_master(pdev); + + if (netif_running(netdev)) { + if (ixgb_up(adapter)) { + pr_err("can't bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); + mod_timer(&adapter->watchdog_timer, jiffies); +} + +/* ixgb_main.c */ diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h b/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h new file mode 100644 index 0000000..e361185 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h @@ -0,0 +1,63 @@ +/******************************************************************************* + + Intel PRO/10GbE Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* glue for the OS independent part of ixgb + * includes register access macros + */ + +#ifndef _IXGB_OSDEP_H_ +#define _IXGB_OSDEP_H_ + +#include +#include +#include +#include +#include + +#undef ASSERT +#define ASSERT(x) BUG_ON(!(x)) + +#define ENTER() pr_debug("%s\n", __func__); + +#define IXGB_WRITE_REG(a, reg, value) ( \ + writel((value), ((a)->hw_addr + IXGB_##reg))) + +#define IXGB_READ_REG(a, reg) ( \ + readl((a)->hw_addr + IXGB_##reg)) + +#define IXGB_WRITE_REG_ARRAY(a, reg, offset, value) ( \ + writel((value), ((a)->hw_addr + IXGB_##reg + ((offset) << 2)))) + +#define IXGB_READ_REG_ARRAY(a, reg, offset) ( \ + readl((a)->hw_addr + IXGB_##reg + ((offset) << 2))) + +#define IXGB_WRITE_FLUSH(a) IXGB_READ_REG(a, STATUS) + +#define IXGB_MEMCPY memcpy + +#endif /* _IXGB_OSDEP_H_ */ diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_param.c b/drivers/net/ethernet/intel/ixgb/ixgb_param.c new file mode 100644 index 0000000..dd7fbeb --- /dev/null +++ b/drivers/net/ethernet/intel/ixgb/ixgb_param.c @@ -0,0 +1,469 @@ +/******************************************************************************* + + Intel PRO/10GbE Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "ixgb.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define IXGB_MAX_NIC 8 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define IXGB_PARAM_INIT { [0 ... IXGB_MAX_NIC] = OPTION_UNSET } +#define IXGB_PARAM(X, desc) \ + static int __devinitdata X[IXGB_MAX_NIC+1] \ + = IXGB_PARAM_INIT; \ + static unsigned int num_##X = 0; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); + +/* Transmit Descriptor Count + * + * Valid Range: 64-4096 + * + * Default Value: 256 + */ + +IXGB_PARAM(TxDescriptors, "Number of transmit descriptors"); + +/* Receive Descriptor Count + * + * Valid Range: 64-4096 + * + * Default Value: 1024 + */ + +IXGB_PARAM(RxDescriptors, "Number of receive descriptors"); + +/* User Specified Flow Control Override + * + * Valid Range: 0-3 + * - 0 - No Flow Control + * - 1 - Rx only, respond to PAUSE frames but do not generate them + * - 2 - Tx only, generate PAUSE frames but ignore them on receive + * - 3 - Full Flow Control Support + * + * Default Value: 2 - Tx only (silicon bug avoidance) + */ + +IXGB_PARAM(FlowControl, "Flow Control setting"); + +/* XsumRX - Receive Checksum Offload Enable/Disable + * + * Valid Range: 0, 1 + * - 0 - disables all checksum offload + * - 1 - enables receive IP/TCP/UDP checksum offload + * on 82597 based NICs + * + * Default Value: 1 + */ + +IXGB_PARAM(XsumRX, "Disable or enable Receive Checksum offload"); + +/* Transmit Interrupt Delay in units of 0.8192 microseconds + * + * Valid Range: 0-65535 + * + * Default Value: 32 + */ + +IXGB_PARAM(TxIntDelay, "Transmit Interrupt Delay"); + +/* Receive Interrupt Delay in units of 0.8192 microseconds + * + * Valid Range: 0-65535 + * + * Default Value: 72 + */ + +IXGB_PARAM(RxIntDelay, "Receive Interrupt Delay"); + +/* Receive Flow control high threshold (when we send a pause frame) + * (FCRTH) + * + * Valid Range: 1,536 - 262,136 (0x600 - 0x3FFF8, 8 byte granularity) + * + * Default Value: 196,608 (0x30000) + */ + +IXGB_PARAM(RxFCHighThresh, "Receive Flow Control High Threshold"); + +/* Receive Flow control low threshold (when we send a resume frame) + * (FCRTL) + * + * Valid Range: 64 - 262,136 (0x40 - 0x3FFF8, 8 byte granularity) + * must be less than high threshold by at least 8 bytes + * + * Default Value: 163,840 (0x28000) + */ + +IXGB_PARAM(RxFCLowThresh, "Receive Flow Control Low Threshold"); + +/* Flow control request timeout (how long to pause the link partner's tx) + * (PAP 15:0) + * + * Valid Range: 1 - 65535 + * + * Default Value: 65535 (0xffff) (we'll send an xon if we recover) + */ + +IXGB_PARAM(FCReqTimeout, "Flow Control Request Timeout"); + +/* Interrupt Delay Enable + * + * Valid Range: 0, 1 + * + * - 0 - disables transmit interrupt delay + * - 1 - enables transmmit interrupt delay + * + * Default Value: 1 + */ + +IXGB_PARAM(IntDelayEnable, "Transmit Interrupt Delay Enable"); + + +#define DEFAULT_TIDV 32 +#define MAX_TIDV 0xFFFF +#define MIN_TIDV 0 + +#define DEFAULT_RDTR 72 +#define MAX_RDTR 0xFFFF +#define MIN_RDTR 0 + +#define XSUMRX_DEFAULT OPTION_ENABLED + +#define DEFAULT_FCRTL 0x28000 +#define DEFAULT_FCRTH 0x30000 +#define MIN_FCRTL 0 +#define MAX_FCRTL 0x3FFE8 +#define MIN_FCRTH 8 +#define MAX_FCRTH 0x3FFF0 + +#define MIN_FCPAUSE 1 +#define MAX_FCPAUSE 0xffff +#define DEFAULT_FCPAUSE 0xFFFF /* this may be too long */ + +struct ixgb_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + const struct ixgb_opt_list { + int i; + const char *str; + } *p; + } l; + } arg; +}; + +static int __devinit +ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + pr_info("%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + pr_info("%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + pr_info("%s set to %i\n", opt->name, *value); + return 0; + } + break; + case list_option: { + int i; + const struct ixgb_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + pr_info("%s\n", ent->str); + return 0; + } + } + } + break; + default: + BUG(); + } + + pr_info("Invalid %s specified (%i) %s\n", opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +/** + * ixgb_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ + +void __devinit +ixgb_check_options(struct ixgb_adapter *adapter) +{ + int bd = adapter->bd_number; + if (bd >= IXGB_MAX_NIC) { + pr_notice("Warning: no configuration for board #%i\n", bd); + pr_notice("Using defaults for all values\n"); + } + + { /* Transmit Descriptor Count */ + const struct ixgb_option opt = { + .type = range_option, + .name = "Transmit Descriptors", + .err = "using default of " __MODULE_STRING(DEFAULT_TXD), + .def = DEFAULT_TXD, + .arg = { .r = { .min = MIN_TXD, + .max = MAX_TXD}} + }; + struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; + + if (num_TxDescriptors > bd) { + tx_ring->count = TxDescriptors[bd]; + ixgb_validate_option(&tx_ring->count, &opt); + } else { + tx_ring->count = opt.def; + } + tx_ring->count = ALIGN(tx_ring->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE); + } + { /* Receive Descriptor Count */ + const struct ixgb_option opt = { + .type = range_option, + .name = "Receive Descriptors", + .err = "using default of " __MODULE_STRING(DEFAULT_RXD), + .def = DEFAULT_RXD, + .arg = { .r = { .min = MIN_RXD, + .max = MAX_RXD}} + }; + struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; + + if (num_RxDescriptors > bd) { + rx_ring->count = RxDescriptors[bd]; + ixgb_validate_option(&rx_ring->count, &opt); + } else { + rx_ring->count = opt.def; + } + rx_ring->count = ALIGN(rx_ring->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE); + } + { /* Receive Checksum Offload Enable */ + const struct ixgb_option opt = { + .type = enable_option, + .name = "Receive Checksum Offload", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (num_XsumRX > bd) { + unsigned int rx_csum = XsumRX[bd]; + ixgb_validate_option(&rx_csum, &opt); + adapter->rx_csum = rx_csum; + } else { + adapter->rx_csum = opt.def; + } + } + { /* Flow Control */ + + static const struct ixgb_opt_list fc_list[] = { + { ixgb_fc_none, "Flow Control Disabled" }, + { ixgb_fc_rx_pause, "Flow Control Receive Only" }, + { ixgb_fc_tx_pause, "Flow Control Transmit Only" }, + { ixgb_fc_full, "Flow Control Enabled" }, + { ixgb_fc_default, "Flow Control Hardware Default" } + }; + + static const struct ixgb_option opt = { + .type = list_option, + .name = "Flow Control", + .err = "reading default settings from EEPROM", + .def = ixgb_fc_tx_pause, + .arg = { .l = { .nr = ARRAY_SIZE(fc_list), + .p = fc_list }} + }; + + if (num_FlowControl > bd) { + unsigned int fc = FlowControl[bd]; + ixgb_validate_option(&fc, &opt); + adapter->hw.fc.type = fc; + } else { + adapter->hw.fc.type = opt.def; + } + } + { /* Receive Flow Control High Threshold */ + const struct ixgb_option opt = { + .type = range_option, + .name = "Rx Flow Control High Threshold", + .err = "using default of " __MODULE_STRING(DEFAULT_FCRTH), + .def = DEFAULT_FCRTH, + .arg = { .r = { .min = MIN_FCRTH, + .max = MAX_FCRTH}} + }; + + if (num_RxFCHighThresh > bd) { + adapter->hw.fc.high_water = RxFCHighThresh[bd]; + ixgb_validate_option(&adapter->hw.fc.high_water, &opt); + } else { + adapter->hw.fc.high_water = opt.def; + } + if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) + pr_info("Ignoring RxFCHighThresh when no RxFC\n"); + } + { /* Receive Flow Control Low Threshold */ + const struct ixgb_option opt = { + .type = range_option, + .name = "Rx Flow Control Low Threshold", + .err = "using default of " __MODULE_STRING(DEFAULT_FCRTL), + .def = DEFAULT_FCRTL, + .arg = { .r = { .min = MIN_FCRTL, + .max = MAX_FCRTL}} + }; + + if (num_RxFCLowThresh > bd) { + adapter->hw.fc.low_water = RxFCLowThresh[bd]; + ixgb_validate_option(&adapter->hw.fc.low_water, &opt); + } else { + adapter->hw.fc.low_water = opt.def; + } + if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) + pr_info("Ignoring RxFCLowThresh when no RxFC\n"); + } + { /* Flow Control Pause Time Request*/ + const struct ixgb_option opt = { + .type = range_option, + .name = "Flow Control Pause Time Request", + .err = "using default of "__MODULE_STRING(DEFAULT_FCPAUSE), + .def = DEFAULT_FCPAUSE, + .arg = { .r = { .min = MIN_FCPAUSE, + .max = MAX_FCPAUSE}} + }; + + if (num_FCReqTimeout > bd) { + unsigned int pause_time = FCReqTimeout[bd]; + ixgb_validate_option(&pause_time, &opt); + adapter->hw.fc.pause_time = pause_time; + } else { + adapter->hw.fc.pause_time = opt.def; + } + if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) + pr_info("Ignoring FCReqTimeout when no RxFC\n"); + } + /* high low and spacing check for rx flow control thresholds */ + if (adapter->hw.fc.type & ixgb_fc_tx_pause) { + /* high must be greater than low */ + if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) { + /* set defaults */ + pr_info("RxFCHighThresh must be >= (RxFCLowThresh + 8), Using Defaults\n"); + adapter->hw.fc.high_water = DEFAULT_FCRTH; + adapter->hw.fc.low_water = DEFAULT_FCRTL; + } + } + { /* Receive Interrupt Delay */ + const struct ixgb_option opt = { + .type = range_option, + .name = "Receive Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_RDTR), + .def = DEFAULT_RDTR, + .arg = { .r = { .min = MIN_RDTR, + .max = MAX_RDTR}} + }; + + if (num_RxIntDelay > bd) { + adapter->rx_int_delay = RxIntDelay[bd]; + ixgb_validate_option(&adapter->rx_int_delay, &opt); + } else { + adapter->rx_int_delay = opt.def; + } + } + { /* Transmit Interrupt Delay */ + const struct ixgb_option opt = { + .type = range_option, + .name = "Transmit Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_TIDV), + .def = DEFAULT_TIDV, + .arg = { .r = { .min = MIN_TIDV, + .max = MAX_TIDV}} + }; + + if (num_TxIntDelay > bd) { + adapter->tx_int_delay = TxIntDelay[bd]; + ixgb_validate_option(&adapter->tx_int_delay, &opt); + } else { + adapter->tx_int_delay = opt.def; + } + } + + { /* Transmit Interrupt Delay Enable */ + const struct ixgb_option opt = { + .type = enable_option, + .name = "Tx Interrupt Delay Enable", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (num_IntDelayEnable > bd) { + unsigned int ide = IntDelayEnable[bd]; + ixgb_validate_option(&ide, &opt); + adapter->tx_int_delay_enable = ide; + } else { + adapter->tx_int_delay_enable = opt.def; + } + } +} diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile new file mode 100644 index 0000000..7d7387f --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/Makefile @@ -0,0 +1,42 @@ +################################################################################ +# +# Intel 10 Gigabit PCI Express Linux driver +# Copyright(c) 1999 - 2010 Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: +# Linux NICS +# e1000-devel Mailing List +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +# +################################################################################ + +# +# Makefile for the Intel(R) 10GbE PCI Express ethernet driver +# + +obj-$(CONFIG_IXGBE) += ixgbe.o + +ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ + ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ + ixgbe_mbx.o ixgbe_x540.o + +ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ + ixgbe_dcb_82599.o ixgbe_dcb_nl.o + +ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h new file mode 100644 index 0000000..e04a8e4 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -0,0 +1,617 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_H_ +#define _IXGBE_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "ixgbe_type.h" +#include "ixgbe_common.h" +#include "ixgbe_dcb.h" +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#define IXGBE_FCOE +#include "ixgbe_fcoe.h" +#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */ +#ifdef CONFIG_IXGBE_DCA +#include +#endif + +/* common prefix used by pr_<> macros */ +#undef pr_fmt +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +/* TX/RX descriptor defines */ +#define IXGBE_DEFAULT_TXD 512 +#define IXGBE_MAX_TXD 4096 +#define IXGBE_MIN_TXD 64 + +#define IXGBE_DEFAULT_RXD 512 +#define IXGBE_MAX_RXD 4096 +#define IXGBE_MIN_RXD 64 + +/* flow control */ +#define IXGBE_MIN_FCRTL 0x40 +#define IXGBE_MAX_FCRTL 0x7FF80 +#define IXGBE_MIN_FCRTH 0x600 +#define IXGBE_MAX_FCRTH 0x7FFF0 +#define IXGBE_DEFAULT_FCPAUSE 0xFFFF +#define IXGBE_MIN_FCPAUSE 0 +#define IXGBE_MAX_FCPAUSE 0xFFFF + +/* Supported Rx Buffer Sizes */ +#define IXGBE_RXBUFFER_512 512 /* Used for packet split */ +#define IXGBE_RXBUFFER_2048 2048 +#define IXGBE_RXBUFFER_4096 4096 +#define IXGBE_RXBUFFER_8192 8192 +#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ + +/* + * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN mans we + * reserve 2 more, and skb_shared_info adds an additional 384 bytes more, + * this adds up to 512 bytes of extra data meaning the smallest allocation + * we could have is 1K. + * i.e. RXBUFFER_512 --> size-1024 slab + */ +#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_512 + +#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define IXGBE_TX_FLAGS_CSUM (u32)(1) +#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1) +#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2) +#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3) +#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 4) +#define IXGBE_TX_FLAGS_FSO (u32)(1 << 5) +#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 +#define IXGBE_TX_FLAGS_VLAN_SHIFT 16 + +#define IXGBE_MAX_RSC_INT_RATE 162760 + +#define IXGBE_MAX_VF_MC_ENTRIES 30 +#define IXGBE_MAX_VF_FUNCTIONS 64 +#define IXGBE_MAX_VFTA_ENTRIES 128 +#define MAX_EMULATION_MAC_ADDRS 16 +#define IXGBE_MAX_PF_MACVLANS 15 +#define VMDQ_P(p) ((p) + adapter->num_vfs) + +struct vf_data_storage { + unsigned char vf_mac_addresses[ETH_ALEN]; + u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES]; + u16 num_vf_mc_hashes; + u16 default_vf_vlan_id; + u16 vlans_enabled; + bool clear_to_send; + bool pf_set_mac; + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 pf_qos; + u16 tx_rate; +}; + +struct vf_macvlans { + struct list_head l; + int vf; + int rar_entry; + bool free; + bool is_macvlan; + u8 vf_macvlan[ETH_ALEN]; +}; + +#define IXGBE_MAX_TXD_PWR 14 +#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) +#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer */ +struct ixgbe_tx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + unsigned long time_stamp; + u16 length; + u16 next_to_watch; + unsigned int bytecount; + u16 gso_segs; + u8 mapped_as_page; +}; + +struct ixgbe_rx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + struct page *page; + dma_addr_t page_dma; + unsigned int page_offset; +}; + +struct ixgbe_queue_stats { + u64 packets; + u64 bytes; +}; + +struct ixgbe_tx_queue_stats { + u64 restart_queue; + u64 tx_busy; + u64 completed; + u64 tx_done_old; +}; + +struct ixgbe_rx_queue_stats { + u64 rsc_count; + u64 rsc_flush; + u64 non_eop_descs; + u64 alloc_rx_page_failed; + u64 alloc_rx_buff_failed; +}; + +enum ixbge_ring_state_t { + __IXGBE_TX_FDIR_INIT_DONE, + __IXGBE_TX_DETECT_HANG, + __IXGBE_HANG_CHECK_ARMED, + __IXGBE_RX_PS_ENABLED, + __IXGBE_RX_RSC_ENABLED, +}; + +#define ring_is_ps_enabled(ring) \ + test_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state) +#define set_ring_ps_enabled(ring) \ + set_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state) +#define clear_ring_ps_enabled(ring) \ + clear_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state) +#define check_for_tx_hang(ring) \ + test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) +#define set_check_for_tx_hang(ring) \ + set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) +#define clear_check_for_tx_hang(ring) \ + clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) +#define ring_is_rsc_enabled(ring) \ + test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) +#define set_ring_rsc_enabled(ring) \ + set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) +#define clear_ring_rsc_enabled(ring) \ + clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) +struct ixgbe_ring { + void *desc; /* descriptor ring memory */ + struct device *dev; /* device for DMA mapping */ + struct net_device *netdev; /* netdev ring belongs to */ + union { + struct ixgbe_tx_buffer *tx_buffer_info; + struct ixgbe_rx_buffer *rx_buffer_info; + }; + unsigned long state; + u8 __iomem *tail; + + u16 count; /* amount of descriptors */ + u16 rx_buf_len; + + u8 queue_index; /* needed for multiqueue queue management */ + u8 reg_idx; /* holds the special value that gets + * the hardware register offset + * associated with this ring, which is + * different for DCB and RSS modes + */ + u8 atr_sample_rate; + u8 atr_count; + + u16 next_to_use; + u16 next_to_clean; + + u8 dcb_tc; + struct ixgbe_queue_stats stats; + struct u64_stats_sync syncp; + union { + struct ixgbe_tx_queue_stats tx_stats; + struct ixgbe_rx_queue_stats rx_stats; + }; + int numa_node; + unsigned int size; /* length in bytes */ + dma_addr_t dma; /* phys. address of descriptor ring */ + struct rcu_head rcu; + struct ixgbe_q_vector *q_vector; /* back-pointer to host q_vector */ +} ____cacheline_internodealigned_in_smp; + +enum ixgbe_ring_f_enum { + RING_F_NONE = 0, + RING_F_VMDQ, /* SR-IOV uses the same ring feature */ + RING_F_RSS, + RING_F_FDIR, +#ifdef IXGBE_FCOE + RING_F_FCOE, +#endif /* IXGBE_FCOE */ + + RING_F_ARRAY_SIZE /* must be last in enum set */ +}; + +#define IXGBE_MAX_RSS_INDICES 16 +#define IXGBE_MAX_VMDQ_INDICES 64 +#define IXGBE_MAX_FDIR_INDICES 64 +#ifdef IXGBE_FCOE +#define IXGBE_MAX_FCOE_INDICES 8 +#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) +#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) +#else +#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES +#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES +#endif /* IXGBE_FCOE */ +struct ixgbe_ring_feature { + int indices; + int mask; +} ____cacheline_internodealigned_in_smp; + +struct ixgbe_ring_container { +#if MAX_RX_QUEUES > MAX_TX_QUEUES + DECLARE_BITMAP(idx, MAX_RX_QUEUES); +#else + DECLARE_BITMAP(idx, MAX_TX_QUEUES); +#endif + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ + ? 8 : 1) +#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS + +/* MAX_MSIX_Q_VECTORS of these are allocated, + * but we only use one per queue-specific vector. + */ +struct ixgbe_q_vector { + struct ixgbe_adapter *adapter; + unsigned int v_idx; /* index of q_vector within array, also used for + * finding the bit in EICR and friends that + * represents the vector for this ring */ +#ifdef CONFIG_IXGBE_DCA + int cpu; /* CPU for DCA */ +#endif + struct napi_struct napi; + struct ixgbe_ring_container rx, tx; + u32 eitr; + cpumask_var_t affinity_mask; + char name[IFNAMSIZ + 9]; +}; + +/* Helper macros to switch between ints/sec and what the register uses. + * And yes, it's the same math going both ways. The lowest value + * supported by all of the ixgbe hardware is 8. + */ +#define EITR_INTS_PER_SEC_TO_REG(_eitr) \ + ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) +#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG + +static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +#define IXGBE_RX_DESC_ADV(R, i) \ + (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) +#define IXGBE_TX_DESC_ADV(R, i) \ + (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i])) +#define IXGBE_TX_CTXTDESC_ADV(R, i) \ + (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) + +#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 +#ifdef IXGBE_FCOE +/* Use 3K as the baby jumbo frame size for FCoE */ +#define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072 +#endif /* IXGBE_FCOE */ + +#define OTHER_VECTOR 1 +#define NON_Q_VECTORS (OTHER_VECTOR) + +#define MAX_MSIX_VECTORS_82599 64 +#define MAX_MSIX_Q_VECTORS_82599 64 +#define MAX_MSIX_VECTORS_82598 18 +#define MAX_MSIX_Q_VECTORS_82598 16 + +#define MAX_MSIX_Q_VECTORS MAX_MSIX_Q_VECTORS_82599 +#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599 + +#define MIN_MSIX_Q_VECTORS 2 +#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) + +/* board specific private data structure */ +struct ixgbe_adapter { + unsigned long state; + + /* Some features need tri-state capability, + * thus the additional *_CAPABLE flags. + */ + u32 flags; +#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1) +#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 1) +#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 2) +#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 3) +#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 4) +#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 6) +#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 7) +#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 8) +#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 9) +#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 10) +#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 11) +#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 12) +#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 13) +#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 14) +#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 16) +#define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17) +#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18) +#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19) +#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20) +#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22) +#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 23) +#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 24) +#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 25) +#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 26) +#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 27) +#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 28) +#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 29) + + u32 flags2; +#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1) +#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1) +#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2) +#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 3) +#define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 4) +#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 5) +#define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 6) +#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7) + + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u16 bd_number; + struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; + + /* DCB parameters */ + struct ieee_pfc *ixgbe_ieee_pfc; + struct ieee_ets *ixgbe_ieee_ets; + struct ixgbe_dcb_config dcb_cfg; + struct ixgbe_dcb_config temp_dcb_cfg; + u8 dcb_set_bitmap; + u8 dcbx_cap; + enum ixgbe_fc_mode last_lfc_mode; + + /* Interrupt Throttle Rate */ + u32 rx_itr_setting; + u32 tx_itr_setting; + u16 eitr_low; + u16 eitr_high; + + /* Work limits */ + u16 tx_work_limit; + + /* TX */ + struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; + int num_tx_queues; + u32 tx_timeout_count; + bool detect_tx_hung; + + u64 restart_queue; + u64 lsc_int; + + /* RX */ + struct ixgbe_ring *rx_ring[MAX_RX_QUEUES] ____cacheline_aligned_in_smp; + int num_rx_queues; + int num_rx_pools; /* == num_rx_queues in 82598 */ + int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */ + u64 hw_csum_rx_error; + u64 hw_rx_no_dma_resources; + u64 non_eop_descs; + int num_msix_vectors; + int max_msix_q_vectors; /* true count of q_vectors for device */ + struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE]; + struct msix_entry *msix_entries; + + u32 alloc_rx_page_failed; + u32 alloc_rx_buff_failed; + +/* default to trying for four seconds */ +#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + u32 test_icr; + struct ixgbe_ring test_tx_ring; + struct ixgbe_ring test_rx_ring; + + /* structs defined in ixgbe_hw.h */ + struct ixgbe_hw hw; + u16 msg_enable; + struct ixgbe_hw_stats stats; + + /* Interrupt Throttle Rate */ + u32 rx_eitr_param; + u32 tx_eitr_param; + + u64 tx_busy; + unsigned int tx_ring_count; + unsigned int rx_ring_count; + + u32 link_speed; + bool link_up; + unsigned long link_check_timeout; + + struct work_struct service_task; + struct timer_list service_timer; + u32 fdir_pballoc; + u32 atr_sample_rate; + unsigned long fdir_overflow; /* number of times ATR was backed off */ + spinlock_t fdir_perfect_lock; +#ifdef IXGBE_FCOE + struct ixgbe_fcoe fcoe; +#endif /* IXGBE_FCOE */ + u64 rsc_total_count; + u64 rsc_total_flush; + u32 wol; + u16 eeprom_version; + + int node; + u32 led_reg; + u32 interrupt_event; + char lsc_int_name[IFNAMSIZ + 9]; + + /* SR-IOV */ + DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); + unsigned int num_vfs; + struct vf_data_storage *vfinfo; + int vf_rate_link_speed; + struct vf_macvlans vf_mvs; + struct vf_macvlans *mv_list; + bool antispoofing_enabled; + + struct hlist_head fdir_filter_list; + union ixgbe_atr_input fdir_mask; + int fdir_filter_count; +}; + +struct ixgbe_fdir_filter { + struct hlist_node fdir_node; + union ixgbe_atr_input filter; + u16 sw_idx; + u16 action; +}; + +enum ixbge_state_t { + __IXGBE_TESTING, + __IXGBE_RESETTING, + __IXGBE_DOWN, + __IXGBE_SERVICE_SCHED, + __IXGBE_IN_SFP_INIT, +}; + +struct ixgbe_rsc_cb { + dma_addr_t dma; + u16 skb_cnt; + bool delay_unmap; +}; +#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb) + +enum ixgbe_boards { + board_82598, + board_82599, + board_X540, +}; + +extern struct ixgbe_info ixgbe_82598_info; +extern struct ixgbe_info ixgbe_82599_info; +extern struct ixgbe_info ixgbe_X540_info; +#ifdef CONFIG_IXGBE_DCB +extern const struct dcbnl_rtnl_ops dcbnl_ops; +extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, + struct ixgbe_dcb_config *dst_dcb_cfg, + int tc_max); +#endif + +extern char ixgbe_driver_name[]; +extern const char ixgbe_driver_version[]; + +extern int ixgbe_up(struct ixgbe_adapter *adapter); +extern void ixgbe_down(struct ixgbe_adapter *adapter); +extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); +extern void ixgbe_reset(struct ixgbe_adapter *adapter); +extern void ixgbe_set_ethtool_ops(struct net_device *netdev); +extern int ixgbe_setup_rx_resources(struct ixgbe_ring *); +extern int ixgbe_setup_tx_resources(struct ixgbe_ring *); +extern void ixgbe_free_rx_resources(struct ixgbe_ring *); +extern void ixgbe_free_tx_resources(struct ixgbe_ring *); +extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); +extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); +extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, + struct ixgbe_ring *); +extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); +extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); +extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); +extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, + struct ixgbe_adapter *, + struct ixgbe_ring *); +extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *, + struct ixgbe_tx_buffer *); +extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); +extern void ixgbe_write_eitr(struct ixgbe_q_vector *); +extern int ethtool_ioctl(struct ifreq *ifr); +extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); +extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl); +extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl); +extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common, + u8 queue); +extern s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input_mask); +extern s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id, u8 queue); +extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id); +extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, + union ixgbe_atr_input *mask); +extern void ixgbe_set_rx_mode(struct net_device *netdev); +extern int ixgbe_setup_tc(struct net_device *dev, u8 tc); +extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); +extern void ixgbe_do_reset(struct net_device *netdev); +#ifdef IXGBE_FCOE +extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); +extern int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, + u32 tx_flags, u8 *hdr_len); +extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter); +extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb, + u32 staterr); +extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc); +extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc); +extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid); +extern int ixgbe_fcoe_enable(struct net_device *netdev); +extern int ixgbe_fcoe_disable(struct net_device *netdev); +#ifdef CONFIG_IXGBE_DCB +extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter); +extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up); +#endif /* CONFIG_IXGBE_DCB */ +extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type); +#endif /* IXGBE_FCOE */ + +#endif /* _IXGBE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c new file mode 100644 index 0000000..0d4e382 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -0,0 +1,1353 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include + +#include "ixgbe.h" +#include "ixgbe_phy.h" + +#define IXGBE_82598_MAX_TX_QUEUES 32 +#define IXGBE_82598_MAX_RX_QUEUES 64 +#define IXGBE_82598_RAR_ENTRIES 16 +#define IXGBE_82598_MC_TBL_SIZE 128 +#define IXGBE_82598_VFT_TBL_SIZE 128 +#define IXGBE_82598_RX_PB_SIZE 512 + +static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete); +static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data); + +/** + * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout + * @hw: pointer to the HW structure + * + * The defaults for 82598 should be in the range of 50us to 50ms, + * however the hardware default for these parts is 500us to 1ms which is less + * than the 10ms recommended by the pci-e spec. To address this we need to + * increase the value to either 10ms to 250ms for capability version 1 config, + * or 16ms to 55ms for version 2. + **/ +static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) +{ + struct ixgbe_adapter *adapter = hw->back; + u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR); + u16 pcie_devctl2; + + /* only take action if timeout value is defaulted to 0 */ + if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK) + goto out; + + /* + * if capababilities version is type 1 we can write the + * timeout of 10ms to 250ms through the GCR register + */ + if (!(gcr & IXGBE_GCR_CAP_VER2)) { + gcr |= IXGBE_GCR_CMPL_TMOUT_10ms; + goto out; + } + + /* + * for version 2 capabilities we need to write the config space + * directly in order to set the completion timeout value for + * 16ms to 55ms + */ + pci_read_config_word(adapter->pdev, + IXGBE_PCI_DEVICE_CONTROL2, &pcie_devctl2); + pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; + pci_write_config_word(adapter->pdev, + IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); +out: + /* disable completion timeout resend */ + gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; + IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); +} + +/** + * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count + * @hw: pointer to hardware structure + * + * Read PCIe configuration space, and get the MSI-X vector count from + * the capabilities table. + **/ +static u16 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw) +{ + struct ixgbe_adapter *adapter = hw->back; + u16 msix_count; + pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82598_CAPS, + &msix_count); + msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; + + /* MSI-X count is zero-based in HW, so increment to give proper value */ + msix_count++; + + return msix_count; +} + +/** + */ +static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + + /* Call PHY identify routine to get the phy type */ + ixgbe_identify_phy_generic(hw); + + mac->mcft_size = IXGBE_82598_MC_TBL_SIZE; + mac->vft_size = IXGBE_82598_VFT_TBL_SIZE; + mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; + mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; + mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw); + + return 0; +} + +/** + * ixgbe_init_phy_ops_82598 - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during get_invariants because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + * + **/ +static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 list_offset, data_offset; + + /* Identify the PHY */ + phy->ops.identify(hw); + + /* Overwrite the link function pointers if copper PHY */ + if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { + mac->ops.setup_link = &ixgbe_setup_copper_link_82598; + mac->ops.get_link_capabilities = + &ixgbe_get_copper_link_capabilities_generic; + } + + switch (hw->phy.type) { + case ixgbe_phy_tn: + phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; + phy->ops.check_link = &ixgbe_check_phy_link_tnx; + phy->ops.get_firmware_version = + &ixgbe_get_phy_firmware_version_tnx; + break; + case ixgbe_phy_nl: + phy->ops.reset = &ixgbe_reset_phy_nl; + + /* Call SFP+ identify routine to get the SFP+ module type */ + ret_val = phy->ops.identify_sfp(hw); + if (ret_val != 0) + goto out; + else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) { + ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } + + /* Check to see if SFP+ module is supported */ + ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, + &list_offset, + &data_offset); + if (ret_val != 0) { + ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } + break; + default: + break; + } + +out: + return ret_val; +} + +/** + * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware using the generic start_hw function. + * Disables relaxed ordering Then set pcie completion timeout + * + **/ +static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) +{ + u32 regval; + u32 i; + s32 ret_val = 0; + + ret_val = ixgbe_start_hw_generic(hw); + + /* Disable relaxed ordering */ + for (i = 0; ((i < hw->mac.max_tx_queues) && + (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); + regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); + } + + for (i = 0; ((i < hw->mac.max_rx_queues) && + (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); + regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | + IXGBE_DCA_RXCTRL_DESC_HSRO_EN); + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); + } + + hw->mac.rx_pb_size = IXGBE_82598_RX_PB_SIZE; + + /* set the completion timeout for interface */ + if (ret_val == 0) + ixgbe_set_pcie_completion_timeout(hw); + + return ret_val; +} + +/** + * ixgbe_get_link_capabilities_82598 - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: boolean auto-negotiation value + * + * Determines the link capabilities by reading the AUTOC register. + **/ +static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + s32 status = 0; + u32 autoc = 0; + + /* + * Determine link capabilities based on the stored value of AUTOC, + * which represents EEPROM defaults. If AUTOC value has not been + * stored, use the current register value. + */ + if (hw->mac.orig_link_settings_stored) + autoc = hw->mac.orig_autoc; + else + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + + switch (autoc & IXGBE_AUTOC_LMS_MASK) { + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = false; + break; + + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + *autoneg = false; + break; + + case IXGBE_AUTOC_LMS_1G_AN: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + break; + + case IXGBE_AUTOC_LMS_KX4_AN: + case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc & IXGBE_AUTOC_KX_SUPP) + *speed |= IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + break; + + default: + status = IXGBE_ERR_LINK_SETUP; + break; + } + + return status; +} + +/** + * ixgbe_get_media_type_82598 - Determines media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) +{ + enum ixgbe_media_type media_type; + + /* Detect if there is a copper PHY attached. */ + switch (hw->phy.type) { + case ixgbe_phy_cu_unknown: + case ixgbe_phy_tn: + case ixgbe_phy_aq: + media_type = ixgbe_media_type_copper; + goto out; + default: + break; + } + + /* Media type for I82598 is based on device ID */ + switch (hw->device_id) { + case IXGBE_DEV_ID_82598: + case IXGBE_DEV_ID_82598_BX: + /* Default device ID is mezzanine card KX/KX4 */ + media_type = ixgbe_media_type_backplane; + break; + case IXGBE_DEV_ID_82598AF_DUAL_PORT: + case IXGBE_DEV_ID_82598AF_SINGLE_PORT: + case IXGBE_DEV_ID_82598_DA_DUAL_PORT: + case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: + case IXGBE_DEV_ID_82598EB_XF_LR: + case IXGBE_DEV_ID_82598EB_SFP_LOM: + media_type = ixgbe_media_type_fiber; + break; + case IXGBE_DEV_ID_82598EB_CX4: + case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: + media_type = ixgbe_media_type_cx4; + break; + case IXGBE_DEV_ID_82598AT: + case IXGBE_DEV_ID_82598AT2: + media_type = ixgbe_media_type_copper; + break; + default: + media_type = ixgbe_media_type_unknown; + break; + } +out: + return media_type; +} + +/** + * ixgbe_fc_enable_82598 - Enable flow control + * @hw: pointer to hardware structure + * @packetbuf_num: packet buffer number (0-7) + * + * Enable flow control according to the current settings. + **/ +static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num) +{ + s32 ret_val = 0; + u32 fctrl_reg; + u32 rmcs_reg; + u32 reg; + u32 rx_pba_size; + u32 link_speed = 0; + bool link_up; + +#ifdef CONFIG_DCB + if (hw->fc.requested_mode == ixgbe_fc_pfc) + goto out; + +#endif /* CONFIG_DCB */ + /* + * On 82598 having Rx FC on causes resets while doing 1G + * so if it's on turn it off once we know link_speed. For + * more details see 82598 Specification update. + */ + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) { + switch (hw->fc.requested_mode) { + case ixgbe_fc_full: + hw->fc.requested_mode = ixgbe_fc_tx_pause; + break; + case ixgbe_fc_rx_pause: + hw->fc.requested_mode = ixgbe_fc_none; + break; + default: + /* no change */ + break; + } + } + + /* Negotiate the fc mode to use */ + ret_val = ixgbe_fc_autoneg(hw); + if (ret_val == IXGBE_ERR_FLOW_CONTROL) + goto out; + + /* Disable any previous flow control settings */ + fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); + + rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); + rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); + + /* + * The possible values of fc.current_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. +#ifdef CONFIG_DCB + * 4: Priority Flow Control is enabled. +#endif + * other: Invalid. + */ + switch (hw->fc.current_mode) { + case ixgbe_fc_none: + /* + * Flow control is disabled by software override or autoneg. + * The code below will actually disable it in the HW. + */ + break; + case ixgbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + fctrl_reg |= IXGBE_FCTRL_RFCE; + break; + case ixgbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; + break; + case ixgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + fctrl_reg |= IXGBE_FCTRL_RFCE; + rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; + break; +#ifdef CONFIG_DCB + case ixgbe_fc_pfc: + goto out; + break; +#endif /* CONFIG_DCB */ + default: + hw_dbg(hw, "Flow control param set incorrectly\n"); + ret_val = IXGBE_ERR_CONFIG; + goto out; + break; + } + + /* Set 802.3x based flow control settings. */ + fctrl_reg |= IXGBE_FCTRL_DPF; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg); + IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); + + /* Set up and enable Rx high/low water mark thresholds, enable XON. */ + if (hw->fc.current_mode & ixgbe_fc_tx_pause) { + rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num)); + rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; + + reg = (rx_pba_size - hw->fc.low_water) << 6; + if (hw->fc.send_xon) + reg |= IXGBE_FCRTL_XONE; + + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg); + + reg = (rx_pba_size - hw->fc.high_water) << 6; + reg |= IXGBE_FCRTH_FCEN; + + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg); + } + + /* Configure pause time (2 TCs per register) */ + reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2)); + if ((packetbuf_num & 1) == 0) + reg = (reg & 0xFFFF0000) | hw->fc.pause_time; + else + reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16); + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg); + + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); + +out: + return ret_val; +} + +/** + * ixgbe_start_mac_link_82598 - Configures MAC link settings + * @hw: pointer to hardware structure + * + * Configures link settings based on values in the ixgbe_hw struct. + * Restarts the link. Performs autonegotiation if needed. + **/ +static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, + bool autoneg_wait_to_complete) +{ + u32 autoc_reg; + u32 links_reg; + u32 i; + s32 status = 0; + + /* Restart link */ + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + + /* Only poll for autoneg to complete if specified to do so */ + if (autoneg_wait_to_complete) { + if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == + IXGBE_AUTOC_LMS_KX4_AN || + (autoc_reg & IXGBE_AUTOC_LMS_MASK) == + IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { + links_reg = 0; /* Just in case Autoneg time = 0 */ + for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + if (links_reg & IXGBE_LINKS_KX_AN_COMP) + break; + msleep(100); + } + if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { + status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; + hw_dbg(hw, "Autonegotiation did not complete.\n"); + } + } + } + + /* Add delay to filter out noises during initial link setup */ + msleep(50); + + return status; +} + +/** + * ixgbe_validate_link_ready - Function looks for phy link + * @hw: pointer to hardware structure + * + * Function indicates success when phy link is available. If phy is not ready + * within 5 seconds of MAC indicating link, the function returns error. + **/ +static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw) +{ + u32 timeout; + u16 an_reg; + + if (hw->device_id != IXGBE_DEV_ID_82598AT2) + return 0; + + for (timeout = 0; + timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) { + hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, &an_reg); + + if ((an_reg & MDIO_AN_STAT1_COMPLETE) && + (an_reg & MDIO_STAT1_LSTATUS)) + break; + + msleep(100); + } + + if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) { + hw_dbg(hw, "Link was indicated but link is down\n"); + return IXGBE_ERR_LINK_SETUP; + } + + return 0; +} + +/** + * ixgbe_check_mac_link_82598 - Get link/speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true is link is up, false otherwise + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Reads the links register to determine if link is up and the current speed + **/ +static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, bool *link_up, + bool link_up_wait_to_complete) +{ + u32 links_reg; + u32 i; + u16 link_reg, adapt_comp_reg; + + /* + * SERDES PHY requires us to read link status from register 0xC79F. + * Bit 0 set indicates link is up/ready; clear indicates link down. + * 0xC00C is read to check that the XAUI lanes are active. Bit 0 + * clear indicates active; set indicates inactive. + */ + if (hw->phy.type == ixgbe_phy_nl) { + hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); + hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); + hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD, + &adapt_comp_reg); + if (link_up_wait_to_complete) { + for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { + if ((link_reg & 1) && + ((adapt_comp_reg & 1) == 0)) { + *link_up = true; + break; + } else { + *link_up = false; + } + msleep(100); + hw->phy.ops.read_reg(hw, 0xC79F, + MDIO_MMD_PMAPMD, + &link_reg); + hw->phy.ops.read_reg(hw, 0xC00C, + MDIO_MMD_PMAPMD, + &adapt_comp_reg); + } + } else { + if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0)) + *link_up = true; + else + *link_up = false; + } + + if (*link_up == false) + goto out; + } + + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + if (link_up_wait_to_complete) { + for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { + if (links_reg & IXGBE_LINKS_UP) { + *link_up = true; + break; + } else { + *link_up = false; + } + msleep(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + } + } else { + if (links_reg & IXGBE_LINKS_UP) + *link_up = true; + else + *link_up = false; + } + + if (links_reg & IXGBE_LINKS_SPEED) + *speed = IXGBE_LINK_SPEED_10GB_FULL; + else + *speed = IXGBE_LINK_SPEED_1GB_FULL; + + if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == true) && + (ixgbe_validate_link_ready(hw) != 0)) + *link_up = false; + + /* if link is down, zero out the current_mode */ + if (*link_up == false) { + hw->fc.current_mode = ixgbe_fc_none; + hw->fc.fc_was_autonegged = false; + } +out: + return 0; +} + +/** + * ixgbe_setup_mac_link_82598 - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg: true if auto-negotiation enabled + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed speed, bool autoneg, + bool autoneg_wait_to_complete) +{ + s32 status = 0; + ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; + u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 autoc = curr_autoc; + u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; + + /* Check to see if speed passed in is supported. */ + ixgbe_get_link_capabilities_82598(hw, &link_capabilities, &autoneg); + speed &= link_capabilities; + + if (speed == IXGBE_LINK_SPEED_UNKNOWN) + status = IXGBE_ERR_LINK_SETUP; + + /* Set KX4/KX support according to speed requested */ + else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN || + link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { + autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK; + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + autoc |= IXGBE_AUTOC_KX4_SUPP; + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + autoc |= IXGBE_AUTOC_KX_SUPP; + if (autoc != curr_autoc) + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); + } + + if (status == 0) { + /* + * Setup and restart the link based on the new values in + * ixgbe_hw This will write the AUTOC register based on the new + * stored values + */ + status = ixgbe_start_mac_link_82598(hw, + autoneg_wait_to_complete); + } + + return status; +} + + +/** + * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg: true if autonegotiation enabled + * @autoneg_wait_to_complete: true if waiting is needed to complete + * + * Sets the link speed in the AUTOC register in the MAC and restarts link. + **/ +static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete) +{ + s32 status; + + /* Setup the PHY according to input speed */ + status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, + autoneg_wait_to_complete); + /* Set up MAC */ + ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); + + return status; +} + +/** + * ixgbe_reset_hw_82598 - Performs hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks and + * clears all interrupts, performing a PHY reset, and performing a link (MAC) + * reset. + **/ +static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) +{ + s32 status = 0; + s32 phy_status = 0; + u32 ctrl; + u32 gheccr; + u32 i; + u32 autoc; + u8 analog_val; + + /* Call adapter stop to disable tx/rx and clear interrupts */ + hw->mac.ops.stop_adapter(hw); + + /* + * Power up the Atlas Tx lanes if they are currently powered down. + * Atlas Tx lanes are powered down for MAC loopback tests, but + * they are not automatically restored on reset. + */ + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); + if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { + /* Enable Tx Atlas so packets can be transmitted again */ + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, + &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, + analog_val); + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, + &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, + analog_val); + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, + &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, + analog_val); + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, + &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, + analog_val); + } + + /* Reset PHY */ + if (hw->phy.reset_disable == false) { + /* PHY ops must be identified and initialized prior to reset */ + + /* Init PHY and function pointers, perform SFP setup */ + phy_status = hw->phy.ops.init(hw); + if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED) + goto reset_hw_out; + else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT) + goto no_phy_reset; + + hw->phy.ops.reset(hw); + } + +no_phy_reset: + /* + * Prevent the PCI-E bus from from hanging by disabling PCI-E master + * access and verify no pending requests before reset + */ + ixgbe_disable_pcie_master(hw); + +mac_reset_top: + /* + * Issue global reset to the MAC. This needs to be a SW reset. + * If link reset is used, it might reset the MAC when mng is using it + */ + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); + IXGBE_WRITE_FLUSH(hw); + + /* Poll for reset bit to self-clear indicating reset is complete */ + for (i = 0; i < 10; i++) { + udelay(1); + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST)) + break; + } + if (ctrl & IXGBE_CTRL_RST) { + status = IXGBE_ERR_RESET_FAILED; + hw_dbg(hw, "Reset polling failed to complete.\n"); + } + + /* + * Double resets are required for recovery from certain error + * conditions. Between resets, it is necessary to stall to allow time + * for any pending HW events to complete. We use 1usec since that is + * what is needed for ixgbe_disable_pcie_master(). The second reset + * then clears out any effects of those events. + */ + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + udelay(1); + goto mac_reset_top; + } + + msleep(50); + + gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); + gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); + IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); + + /* + * Store the original AUTOC value if it has not been + * stored off yet. Otherwise restore the stored original + * AUTOC value since the reset operation sets back to deaults. + */ + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + if (hw->mac.orig_link_settings_stored == false) { + hw->mac.orig_autoc = autoc; + hw->mac.orig_link_settings_stored = true; + } else if (autoc != hw->mac.orig_autoc) { + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); + } + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* + * Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table + */ + hw->mac.ops.init_rx_addrs(hw); + +reset_hw_out: + if (phy_status) + status = phy_status; + + return status; +} + +/** + * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq set index + **/ +static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + hw_dbg(hw, "RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); + rar_high &= ~IXGBE_RAH_VIND_MASK; + rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK); + IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); + return 0; +} + +/** + * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq clear index (not used in 82598, but elsewhere) + **/ +static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + hw_dbg(hw, "RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); + if (rar_high & IXGBE_RAH_VIND_MASK) { + rar_high &= ~IXGBE_RAH_VIND_MASK; + IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); + } + + return 0; +} + +/** + * ixgbe_set_vfta_82598 - Set VLAN filter table + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFTA + * @vlan_on: boolean flag to turn on/off VLAN in VFTA + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on) +{ + u32 regindex; + u32 bitindex; + u32 bits; + u32 vftabyte; + + if (vlan > 4095) + return IXGBE_ERR_PARAM; + + /* Determine 32-bit word position in array */ + regindex = (vlan >> 5) & 0x7F; /* upper seven bits */ + + /* Determine the location of the (VMD) queue index */ + vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */ + bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */ + + /* Set the nibble for VMD queue index */ + bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex)); + bits &= (~(0x0F << bitindex)); + bits |= (vind << bitindex); + IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits); + + /* Determine the location of the bit for this VLAN id */ + bitindex = vlan & 0x1F; /* lower five bits */ + + bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); + if (vlan_on) + /* Turn on this VLAN id */ + bits |= (1 << bitindex); + else + /* Turn off this VLAN id */ + bits &= ~(1 << bitindex); + IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); + + return 0; +} + +/** + * ixgbe_clear_vfta_82598 - Clear VLAN filter table + * @hw: pointer to hardware structure + * + * Clears the VLAN filer table, and the VMDq index associated with the filter + **/ +static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) +{ + u32 offset; + u32 vlanbyte; + + for (offset = 0; offset < hw->mac.vft_size; offset++) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); + + for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) + for (offset = 0; offset < hw->mac.vft_size; offset++) + IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), + 0); + + return 0; +} + +/** + * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register + * @hw: pointer to hardware structure + * @reg: analog register to read + * @val: read value + * + * Performs read operation to Atlas analog register specified. + **/ +static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) +{ + u32 atlas_ctl; + + IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, + IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); + IXGBE_WRITE_FLUSH(hw); + udelay(10); + atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); + *val = (u8)atlas_ctl; + + return 0; +} + +/** + * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register + * @hw: pointer to hardware structure + * @reg: atlas register to write + * @val: value to write + * + * Performs write operation to Atlas analog register specified. + **/ +static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) +{ + u32 atlas_ctl; + + atlas_ctl = (reg << 8) | val; + IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); + IXGBE_WRITE_FLUSH(hw); + udelay(10); + + return 0; +} + +/** + * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface. + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to read + * @eeprom_data: value read + * + * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. + **/ +static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data) +{ + s32 status = 0; + u16 sfp_addr = 0; + u16 sfp_data = 0; + u16 sfp_stat = 0; + u32 i; + + if (hw->phy.type == ixgbe_phy_nl) { + /* + * phy SDA/SCL registers are at addresses 0xC30A to + * 0xC30D. These registers are used to talk to the SFP+ + * module's EEPROM through the SDA/SCL (I2C) interface. + */ + sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset; + sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK); + hw->phy.ops.write_reg(hw, + IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, + MDIO_MMD_PMAPMD, + sfp_addr); + + /* Poll status */ + for (i = 0; i < 100; i++) { + hw->phy.ops.read_reg(hw, + IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT, + MDIO_MMD_PMAPMD, + &sfp_stat); + sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; + if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) + break; + usleep_range(10000, 20000); + } + + if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) { + hw_dbg(hw, "EEPROM read did not pass.\n"); + status = IXGBE_ERR_SFP_NOT_PRESENT; + goto out; + } + + /* Read data */ + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA, + MDIO_MMD_PMAPMD, &sfp_data); + + *eeprom_data = (u8)(sfp_data >> 8); + } else { + status = IXGBE_ERR_PHY; + goto out; + } + +out: + return status; +} + +/** + * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current configuration. + **/ +static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) +{ + u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; + u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; + u16 ext_ability = 0; + + hw->phy.ops.identify(hw); + + /* Copper PHY must be checked before AUTOC LMS to determine correct + * physical layer because 10GBase-T PHYs use LMS = KX4/KX */ + switch (hw->phy.type) { + case ixgbe_phy_tn: + case ixgbe_phy_aq: + case ixgbe_phy_cu_unknown: + hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, + MDIO_MMD_PMAPMD, &ext_ability); + if (ext_ability & MDIO_PMA_EXTABLE_10GBT) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; + if (ext_ability & MDIO_PMA_EXTABLE_1000BT) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; + if (ext_ability & MDIO_PMA_EXTABLE_100BTX) + physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; + goto out; + default: + break; + } + + switch (autoc & IXGBE_AUTOC_LMS_MASK) { + case IXGBE_AUTOC_LMS_1G_AN: + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: + if (pma_pmd_1g == IXGBE_AUTOC_1G_KX) + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX; + else + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX; + break; + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: + if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; + else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; + else /* XAUI */ + physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + break; + case IXGBE_AUTOC_LMS_KX4_AN: + case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: + if (autoc & IXGBE_AUTOC_KX_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; + break; + default: + break; + } + + if (hw->phy.type == ixgbe_phy_nl) { + hw->phy.ops.identify_sfp(hw); + + switch (hw->phy.sfp_type) { + case ixgbe_sfp_type_da_cu: + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; + break; + case ixgbe_sfp_type_sr: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; + break; + case ixgbe_sfp_type_lr: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; + break; + default: + physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + break; + } + } + + switch (hw->device_id) { + case IXGBE_DEV_ID_82598_DA_DUAL_PORT: + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; + break; + case IXGBE_DEV_ID_82598AF_DUAL_PORT: + case IXGBE_DEV_ID_82598AF_SINGLE_PORT: + case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; + break; + case IXGBE_DEV_ID_82598EB_XF_LR: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; + break; + default: + break; + } + +out: + return physical_layer; +} + +/** + * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple + * port devices. + * @hw: pointer to the HW structure + * + * Calls common function and corrects issue with some single port devices + * that enable LAN1 but not LAN0. + **/ +static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw) +{ + struct ixgbe_bus_info *bus = &hw->bus; + u16 pci_gen = 0; + u16 pci_ctrl2 = 0; + + ixgbe_set_lan_id_multi_port_pcie(hw); + + /* check if LAN0 is disabled */ + hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen); + if ((pci_gen != 0) && (pci_gen != 0xFFFF)) { + + hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2); + + /* if LAN0 is completely disabled force function to 0 */ + if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) && + !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) && + !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) { + + bus->func = 0; + } + } +} + +/** + * ixgbe_set_rxpba_82598 - Configure packet buffers + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure packet buffers. + */ +static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, u32 headroom, + int strategy) +{ + u32 rxpktsize = IXGBE_RXPBSIZE_64KB; + u8 i = 0; + + if (!num_pb) + return; + + /* Setup Rx packet buffer sizes */ + switch (strategy) { + case PBA_STRATEGY_WEIGHTED: + /* Setup the first four at 80KB */ + rxpktsize = IXGBE_RXPBSIZE_80KB; + for (; i < 4; i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); + /* Setup the last four at 48KB...don't re-init i */ + rxpktsize = IXGBE_RXPBSIZE_48KB; + /* Fall Through */ + case PBA_STRATEGY_EQUAL: + default: + /* Divide the remaining Rx packet buffer evenly among the TCs */ + for (; i < IXGBE_MAX_PACKET_BUFFERS; i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); + break; + } + + /* Setup Tx packet buffer sizes */ + for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB); + + return; +} + +static struct ixgbe_mac_operations mac_ops_82598 = { + .init_hw = &ixgbe_init_hw_generic, + .reset_hw = &ixgbe_reset_hw_82598, + .start_hw = &ixgbe_start_hw_82598, + .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, + .get_media_type = &ixgbe_get_media_type_82598, + .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598, + .enable_rx_dma = &ixgbe_enable_rx_dma_generic, + .get_mac_addr = &ixgbe_get_mac_addr_generic, + .stop_adapter = &ixgbe_stop_adapter_generic, + .get_bus_info = &ixgbe_get_bus_info_generic, + .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598, + .read_analog_reg8 = &ixgbe_read_analog_reg8_82598, + .write_analog_reg8 = &ixgbe_write_analog_reg8_82598, + .setup_link = &ixgbe_setup_mac_link_82598, + .set_rxpba = &ixgbe_set_rxpba_82598, + .check_link = &ixgbe_check_mac_link_82598, + .get_link_capabilities = &ixgbe_get_link_capabilities_82598, + .led_on = &ixgbe_led_on_generic, + .led_off = &ixgbe_led_off_generic, + .blink_led_start = &ixgbe_blink_led_start_generic, + .blink_led_stop = &ixgbe_blink_led_stop_generic, + .set_rar = &ixgbe_set_rar_generic, + .clear_rar = &ixgbe_clear_rar_generic, + .set_vmdq = &ixgbe_set_vmdq_82598, + .clear_vmdq = &ixgbe_clear_vmdq_82598, + .init_rx_addrs = &ixgbe_init_rx_addrs_generic, + .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, + .enable_mc = &ixgbe_enable_mc_generic, + .disable_mc = &ixgbe_disable_mc_generic, + .clear_vfta = &ixgbe_clear_vfta_82598, + .set_vfta = &ixgbe_set_vfta_82598, + .fc_enable = &ixgbe_fc_enable_82598, + .set_fw_drv_ver = NULL, + .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, + .release_swfw_sync = &ixgbe_release_swfw_sync, +}; + +static struct ixgbe_eeprom_operations eeprom_ops_82598 = { + .init_params = &ixgbe_init_eeprom_params_generic, + .read = &ixgbe_read_eerd_generic, + .read_buffer = &ixgbe_read_eerd_buffer_generic, + .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, + .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, + .update_checksum = &ixgbe_update_eeprom_checksum_generic, +}; + +static struct ixgbe_phy_operations phy_ops_82598 = { + .identify = &ixgbe_identify_phy_generic, + .identify_sfp = &ixgbe_identify_sfp_module_generic, + .init = &ixgbe_init_phy_ops_82598, + .reset = &ixgbe_reset_phy_generic, + .read_reg = &ixgbe_read_phy_reg_generic, + .write_reg = &ixgbe_write_phy_reg_generic, + .setup_link = &ixgbe_setup_phy_link_generic, + .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, + .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598, + .check_overtemp = &ixgbe_tn_check_overtemp, +}; + +struct ixgbe_info ixgbe_82598_info = { + .mac = ixgbe_mac_82598EB, + .get_invariants = &ixgbe_get_invariants_82598, + .mac_ops = &mac_ops_82598, + .eeprom_ops = &eeprom_ops_82598, + .phy_ops = &phy_ops_82598, +}; + diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c new file mode 100644 index 0000000..34f30ec --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -0,0 +1,2263 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include + +#include "ixgbe.h" +#include "ixgbe_phy.h" +#include "ixgbe_mbx.h" + +#define IXGBE_82599_MAX_TX_QUEUES 128 +#define IXGBE_82599_MAX_RX_QUEUES 128 +#define IXGBE_82599_RAR_ENTRIES 128 +#define IXGBE_82599_MC_TBL_SIZE 128 +#define IXGBE_82599_VFT_TBL_SIZE 128 +#define IXGBE_82599_RX_PB_SIZE 512 + +static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); +static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); +static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); +static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete); +static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete); +static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, + bool autoneg_wait_to_complete); +static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete); +static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete); +static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); +static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); + +static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + + /* enable the laser control functions for SFP+ fiber */ + if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) { + mac->ops.disable_tx_laser = + &ixgbe_disable_tx_laser_multispeed_fiber; + mac->ops.enable_tx_laser = + &ixgbe_enable_tx_laser_multispeed_fiber; + mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; + } else { + mac->ops.disable_tx_laser = NULL; + mac->ops.enable_tx_laser = NULL; + mac->ops.flap_tx_laser = NULL; + } + + if (hw->phy.multispeed_fiber) { + /* Set up dual speed SFP+ support */ + mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; + } else { + if ((mac->ops.get_media_type(hw) == + ixgbe_media_type_backplane) && + (hw->phy.smart_speed == ixgbe_smart_speed_auto || + hw->phy.smart_speed == ixgbe_smart_speed_on) && + !ixgbe_verify_lesm_fw_enabled_82599(hw)) + mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed; + else + mac->ops.setup_link = &ixgbe_setup_mac_link_82599; + } +} + +static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) +{ + s32 ret_val = 0; + u32 reg_anlp1 = 0; + u32 i = 0; + u16 list_offset, data_offset, data_value; + + if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { + ixgbe_init_mac_link_ops_82599(hw); + + hw->phy.ops.reset = NULL; + + ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, + &data_offset); + if (ret_val != 0) + goto setup_sfp_out; + + /* PHY config will finish before releasing the semaphore */ + ret_val = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (ret_val != 0) { + ret_val = IXGBE_ERR_SWFW_SYNC; + goto setup_sfp_out; + } + + hw->eeprom.ops.read(hw, ++data_offset, &data_value); + while (data_value != 0xffff) { + IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value); + IXGBE_WRITE_FLUSH(hw); + hw->eeprom.ops.read(hw, ++data_offset, &data_value); + } + + /* Release the semaphore */ + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); + /* + * Delay obtaining semaphore again to allow FW access, + * semaphore_delay is in ms usleep_range needs us. + */ + usleep_range(hw->eeprom.semaphore_delay * 1000, + hw->eeprom.semaphore_delay * 2000); + + /* Now restart DSP by setting Restart_AN and clearing LMS */ + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw, + IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) | + IXGBE_AUTOC_AN_RESTART)); + + /* Wait for AN to leave state 0 */ + for (i = 0; i < 10; i++) { + usleep_range(4000, 8000); + reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1); + if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK) + break; + } + if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) { + hw_dbg(hw, "sfp module setup not complete\n"); + ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; + goto setup_sfp_out; + } + + /* Restart DSP by setting Restart_AN and return to SFI mode */ + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw, + IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL | + IXGBE_AUTOC_AN_RESTART)); + } + +setup_sfp_out: + return ret_val; +} + +static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + + ixgbe_init_mac_link_ops_82599(hw); + + mac->mcft_size = IXGBE_82599_MC_TBL_SIZE; + mac->vft_size = IXGBE_82599_VFT_TBL_SIZE; + mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES; + mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES; + mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES; + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); + + return 0; +} + +/** + * ixgbe_init_phy_ops_82599 - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during get_invariants because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + * + **/ +static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + /* Identify the PHY or SFP module */ + ret_val = phy->ops.identify(hw); + + /* Setup function pointers based on detected SFP module and speeds */ + ixgbe_init_mac_link_ops_82599(hw); + + /* If copper media, overwrite with copper function pointers */ + if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { + mac->ops.setup_link = &ixgbe_setup_copper_link_82599; + mac->ops.get_link_capabilities = + &ixgbe_get_copper_link_capabilities_generic; + } + + /* Set necessary function pointers based on phy type */ + switch (hw->phy.type) { + case ixgbe_phy_tn: + phy->ops.check_link = &ixgbe_check_phy_link_tnx; + phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; + phy->ops.get_firmware_version = + &ixgbe_get_phy_firmware_version_tnx; + break; + case ixgbe_phy_aq: + phy->ops.get_firmware_version = + &ixgbe_get_phy_firmware_version_generic; + break; + default: + break; + } + + return ret_val; +} + +/** + * ixgbe_get_link_capabilities_82599 - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @negotiation: true when autoneg or autotry is enabled + * + * Determines the link capabilities by reading the AUTOC register. + **/ +static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *negotiation) +{ + s32 status = 0; + u32 autoc = 0; + + /* Determine 1G link capabilities off of SFP+ type */ + if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) { + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *negotiation = true; + goto out; + } + + /* + * Determine link capabilities based on the stored value of AUTOC, + * which represents EEPROM defaults. If AUTOC value has not been + * stored, use the current register value. + */ + if (hw->mac.orig_link_settings_stored) + autoc = hw->mac.orig_autoc; + else + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + + switch (autoc & IXGBE_AUTOC_LMS_MASK) { + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *negotiation = false; + break; + + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + *negotiation = false; + break; + + case IXGBE_AUTOC_LMS_1G_AN: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *negotiation = true; + break; + + case IXGBE_AUTOC_LMS_10G_SERIAL: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + *negotiation = false; + break; + + case IXGBE_AUTOC_LMS_KX4_KX_KR: + case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + if (autoc & IXGBE_AUTOC_KR_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc & IXGBE_AUTOC_KX_SUPP) + *speed |= IXGBE_LINK_SPEED_1GB_FULL; + *negotiation = true; + break; + + case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: + *speed = IXGBE_LINK_SPEED_100_FULL; + if (autoc & IXGBE_AUTOC_KR_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc & IXGBE_AUTOC_KX_SUPP) + *speed |= IXGBE_LINK_SPEED_1GB_FULL; + *negotiation = true; + break; + + case IXGBE_AUTOC_LMS_SGMII_1G_100M: + *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL; + *negotiation = false; + break; + + default: + status = IXGBE_ERR_LINK_SETUP; + goto out; + break; + } + + if (hw->phy.multispeed_fiber) { + *speed |= IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + *negotiation = true; + } + +out: + return status; +} + +/** + * ixgbe_get_media_type_82599 - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) +{ + enum ixgbe_media_type media_type; + + /* Detect if there is a copper PHY attached. */ + switch (hw->phy.type) { + case ixgbe_phy_cu_unknown: + case ixgbe_phy_tn: + case ixgbe_phy_aq: + media_type = ixgbe_media_type_copper; + goto out; + default: + break; + } + + switch (hw->device_id) { + case IXGBE_DEV_ID_82599_KX4: + case IXGBE_DEV_ID_82599_KX4_MEZZ: + case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: + case IXGBE_DEV_ID_82599_KR: + case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: + case IXGBE_DEV_ID_82599_XAUI_LOM: + /* Default device ID is mezzanine card KX/KX4 */ + media_type = ixgbe_media_type_backplane; + break; + case IXGBE_DEV_ID_82599_SFP: + case IXGBE_DEV_ID_82599_SFP_FCOE: + case IXGBE_DEV_ID_82599_SFP_EM: + case IXGBE_DEV_ID_82599_SFP_SF2: + media_type = ixgbe_media_type_fiber; + break; + case IXGBE_DEV_ID_82599_CX4: + media_type = ixgbe_media_type_cx4; + break; + case IXGBE_DEV_ID_82599_T3_LOM: + media_type = ixgbe_media_type_copper; + break; + case IXGBE_DEV_ID_82599_LS: + media_type = ixgbe_media_type_fiber_lco; + break; + default: + media_type = ixgbe_media_type_unknown; + break; + } +out: + return media_type; +} + +/** + * ixgbe_start_mac_link_82599 - Setup MAC link settings + * @hw: pointer to hardware structure + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Configures link settings based on values in the ixgbe_hw struct. + * Restarts the link. Performs autonegotiation if needed. + **/ +static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, + bool autoneg_wait_to_complete) +{ + u32 autoc_reg; + u32 links_reg; + u32 i; + s32 status = 0; + + /* Restart link */ + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + + /* Only poll for autoneg to complete if specified to do so */ + if (autoneg_wait_to_complete) { + if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == + IXGBE_AUTOC_LMS_KX4_KX_KR || + (autoc_reg & IXGBE_AUTOC_LMS_MASK) == + IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || + (autoc_reg & IXGBE_AUTOC_LMS_MASK) == + IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { + links_reg = 0; /* Just in case Autoneg time = 0 */ + for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + if (links_reg & IXGBE_LINKS_KX_AN_COMP) + break; + msleep(100); + } + if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { + status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; + hw_dbg(hw, "Autoneg did not complete.\n"); + } + } + } + + /* Add delay to filter out noises during initial link setup */ + msleep(50); + + return status; +} + +/** + * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser + * @hw: pointer to hardware structure + * + * The base drivers may require better control over SFP+ module + * PHY states. This includes selectively shutting down the Tx + * laser on the PHY, effectively halting physical link. + **/ +static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) +{ + u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); + + /* Disable tx laser; allow 100us to go dark per spec */ + esdp_reg |= IXGBE_ESDP_SDP3; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); + udelay(100); +} + +/** + * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser + * @hw: pointer to hardware structure + * + * The base drivers may require better control over SFP+ module + * PHY states. This includes selectively turning on the Tx + * laser on the PHY, effectively starting physical link. + **/ +static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) +{ + u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); + + /* Enable tx laser; allow 100ms to light up */ + esdp_reg &= ~IXGBE_ESDP_SDP3; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); + msleep(100); +} + +/** + * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser + * @hw: pointer to hardware structure + * + * When the driver changes the link speeds that it can support, + * it sets autotry_restart to true to indicate that we need to + * initiate a new autotry session with the link partner. To do + * so, we set the speed then disable and re-enable the tx laser, to + * alert the link partner that it also needs to restart autotry on its + * end. This is consistent with true clause 37 autoneg, which also + * involves a loss of signal. + **/ +static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) +{ + if (hw->mac.autotry_restart) { + ixgbe_disable_tx_laser_multispeed_fiber(hw); + ixgbe_enable_tx_laser_multispeed_fiber(hw); + hw->mac.autotry_restart = false; + } +} + +/** + * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg: true if autonegotiation enabled + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete) +{ + s32 status = 0; + ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; + ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; + u32 speedcnt = 0; + u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); + u32 i = 0; + bool link_up = false; + bool negotiation; + + /* Mask off requested but non-supported speeds */ + status = hw->mac.ops.get_link_capabilities(hw, &link_speed, + &negotiation); + if (status != 0) + return status; + + speed &= link_speed; + + /* + * Try each speed one by one, highest priority first. We do this in + * software because 10gb fiber doesn't support speed autonegotiation. + */ + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { + speedcnt++; + highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; + + /* If we already have link at this speed, just jump out */ + status = hw->mac.ops.check_link(hw, &link_speed, &link_up, + false); + if (status != 0) + return status; + + if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) + goto out; + + /* Set the module link speed */ + esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); + + /* Allow module to change analog characteristics (1G->10G) */ + msleep(40); + + status = ixgbe_setup_mac_link_82599(hw, + IXGBE_LINK_SPEED_10GB_FULL, + autoneg, + autoneg_wait_to_complete); + if (status != 0) + return status; + + /* Flap the tx laser if it has not already been done */ + hw->mac.ops.flap_tx_laser(hw); + + /* + * Wait for the controller to acquire link. Per IEEE 802.3ap, + * Section 73.10.2, we may have to wait up to 500ms if KR is + * attempted. 82599 uses the same timing for 10g SFI. + */ + for (i = 0; i < 5; i++) { + /* Wait for the link partner to also set speed */ + msleep(100); + + /* If we have link, just jump out */ + status = hw->mac.ops.check_link(hw, &link_speed, + &link_up, false); + if (status != 0) + return status; + + if (link_up) + goto out; + } + } + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) { + speedcnt++; + if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) + highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; + + /* If we already have link at this speed, just jump out */ + status = hw->mac.ops.check_link(hw, &link_speed, &link_up, + false); + if (status != 0) + return status; + + if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) + goto out; + + /* Set the module link speed */ + esdp_reg &= ~IXGBE_ESDP_SDP5; + esdp_reg |= IXGBE_ESDP_SDP5_DIR; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); + + /* Allow module to change analog characteristics (10G->1G) */ + msleep(40); + + status = ixgbe_setup_mac_link_82599(hw, + IXGBE_LINK_SPEED_1GB_FULL, + autoneg, + autoneg_wait_to_complete); + if (status != 0) + return status; + + /* Flap the tx laser if it has not already been done */ + hw->mac.ops.flap_tx_laser(hw); + + /* Wait for the link partner to also set speed */ + msleep(100); + + /* If we have link, just jump out */ + status = hw->mac.ops.check_link(hw, &link_speed, &link_up, + false); + if (status != 0) + return status; + + if (link_up) + goto out; + } + + /* + * We didn't get link. Configure back to the highest speed we tried, + * (if there was more than one). We call ourselves back with just the + * single highest speed that the user requested. + */ + if (speedcnt > 1) + status = ixgbe_setup_mac_link_multispeed_fiber(hw, + highest_link_speed, + autoneg, + autoneg_wait_to_complete); + +out: + /* Set autoneg_advertised value based on input link speed */ + hw->phy.autoneg_advertised = 0; + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + return status; +} + +/** + * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg: true if autonegotiation enabled + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Implements the Intel SmartSpeed algorithm. + **/ +static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, + ixgbe_link_speed speed, bool autoneg, + bool autoneg_wait_to_complete) +{ + s32 status = 0; + ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; + s32 i, j; + bool link_up = false; + u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + + /* Set autoneg_advertised value based on input link speed */ + hw->phy.autoneg_advertised = 0; + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + if (speed & IXGBE_LINK_SPEED_100_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; + + /* + * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the + * autoneg advertisement if link is unable to be established at the + * highest negotiated rate. This can sometimes happen due to integrity + * issues with the physical media connection. + */ + + /* First, try to get link with full advertisement */ + hw->phy.smart_speed_active = false; + for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { + status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, + autoneg_wait_to_complete); + if (status != 0) + goto out; + + /* + * Wait for the controller to acquire link. Per IEEE 802.3ap, + * Section 73.10.2, we may have to wait up to 500ms if KR is + * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per + * Table 9 in the AN MAS. + */ + for (i = 0; i < 5; i++) { + mdelay(100); + + /* If we have link, just jump out */ + status = hw->mac.ops.check_link(hw, &link_speed, + &link_up, false); + if (status != 0) + goto out; + + if (link_up) + goto out; + } + } + + /* + * We didn't get link. If we advertised KR plus one of KX4/KX + * (or BX4/BX), then disable KR and try again. + */ + if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) || + ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0)) + goto out; + + /* Turn SmartSpeed on to disable KR support */ + hw->phy.smart_speed_active = true; + status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, + autoneg_wait_to_complete); + if (status != 0) + goto out; + + /* + * Wait for the controller to acquire link. 600ms will allow for + * the AN link_fail_inhibit_timer as well for multiple cycles of + * parallel detect, both 10g and 1g. This allows for the maximum + * connect attempts as defined in the AN MAS table 73-7. + */ + for (i = 0; i < 6; i++) { + mdelay(100); + + /* If we have link, just jump out */ + status = hw->mac.ops.check_link(hw, &link_speed, + &link_up, false); + if (status != 0) + goto out; + + if (link_up) + goto out; + } + + /* We didn't get link. Turn SmartSpeed back off. */ + hw->phy.smart_speed_active = false; + status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, + autoneg_wait_to_complete); + +out: + if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) + hw_dbg(hw, "Smartspeed has downgraded the link speed from " + "the maximum advertised\n"); + return status; +} + +/** + * ixgbe_setup_mac_link_82599 - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg: true if autonegotiation enabled + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, + ixgbe_link_speed speed, bool autoneg, + bool autoneg_wait_to_complete) +{ + s32 status = 0; + u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + u32 start_autoc = autoc; + u32 orig_autoc = 0; + u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; + u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; + u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; + u32 links_reg; + u32 i; + ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; + + /* Check to see if speed passed in is supported. */ + hw->mac.ops.get_link_capabilities(hw, &link_capabilities, &autoneg); + if (status != 0) + goto out; + + speed &= link_capabilities; + + if (speed == IXGBE_LINK_SPEED_UNKNOWN) { + status = IXGBE_ERR_LINK_SETUP; + goto out; + } + + /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ + if (hw->mac.orig_link_settings_stored) + orig_autoc = hw->mac.orig_autoc; + else + orig_autoc = autoc; + + if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || + link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || + link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { + /* Set KX4/KX/KR support according to speed requested */ + autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) + autoc |= IXGBE_AUTOC_KX4_SUPP; + if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && + (hw->phy.smart_speed_active == false)) + autoc |= IXGBE_AUTOC_KR_SUPP; + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + autoc |= IXGBE_AUTOC_KX_SUPP; + } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && + (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || + link_mode == IXGBE_AUTOC_LMS_1G_AN)) { + /* Switch from 1G SFI to 10G SFI if requested */ + if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && + (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { + autoc &= ~IXGBE_AUTOC_LMS_MASK; + autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; + } + } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && + (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { + /* Switch from 10G SFI to 1G SFI if requested */ + if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && + (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { + autoc &= ~IXGBE_AUTOC_LMS_MASK; + if (autoneg) + autoc |= IXGBE_AUTOC_LMS_1G_AN; + else + autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; + } + } + + if (autoc != start_autoc) { + /* Restart link */ + autoc |= IXGBE_AUTOC_AN_RESTART; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); + + /* Only poll for autoneg to complete if specified to do so */ + if (autoneg_wait_to_complete) { + if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || + link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || + link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { + links_reg = 0; /*Just in case Autoneg time=0*/ + for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { + links_reg = + IXGBE_READ_REG(hw, IXGBE_LINKS); + if (links_reg & IXGBE_LINKS_KX_AN_COMP) + break; + msleep(100); + } + if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { + status = + IXGBE_ERR_AUTONEG_NOT_COMPLETE; + hw_dbg(hw, "Autoneg did not " + "complete.\n"); + } + } + } + + /* Add delay to filter out noises during initial link setup */ + msleep(50); + } + +out: + return status; +} + +/** + * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg: true if autonegotiation enabled + * @autoneg_wait_to_complete: true if waiting is needed to complete + * + * Restarts link on PHY and MAC based on settings passed in. + **/ +static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete) +{ + s32 status; + + /* Setup the PHY according to input speed */ + status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, + autoneg_wait_to_complete); + /* Set up MAC */ + ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); + + return status; +} + +/** + * ixgbe_reset_hw_82599 - Perform hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks + * and clears all interrupts, perform a PHY reset, and perform a link (MAC) + * reset. + **/ +static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) +{ + s32 status = 0; + u32 ctrl; + u32 i; + u32 autoc; + u32 autoc2; + + /* Call adapter stop to disable tx/rx and clear interrupts */ + hw->mac.ops.stop_adapter(hw); + + /* PHY ops must be identified and initialized prior to reset */ + + /* Identify PHY and related function pointers */ + status = hw->phy.ops.init(hw); + + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) + goto reset_hw_out; + + /* Setup SFP module if there is one present. */ + if (hw->phy.sfp_setup_needed) { + status = hw->mac.ops.setup_sfp(hw); + hw->phy.sfp_setup_needed = false; + } + + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) + goto reset_hw_out; + + /* Reset PHY */ + if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL) + hw->phy.ops.reset(hw); + + /* + * Prevent the PCI-E bus from from hanging by disabling PCI-E master + * access and verify no pending requests before reset + */ + ixgbe_disable_pcie_master(hw); + +mac_reset_top: + /* + * Issue global reset to the MAC. This needs to be a SW reset. + * If link reset is used, it might reset the MAC when mng is using it + */ + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); + IXGBE_WRITE_FLUSH(hw); + + /* Poll for reset bit to self-clear indicating reset is complete */ + for (i = 0; i < 10; i++) { + udelay(1); + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST)) + break; + } + if (ctrl & IXGBE_CTRL_RST) { + status = IXGBE_ERR_RESET_FAILED; + hw_dbg(hw, "Reset polling failed to complete.\n"); + } + + /* + * Double resets are required for recovery from certain error + * conditions. Between resets, it is necessary to stall to allow time + * for any pending HW events to complete. We use 1usec since that is + * what is needed for ixgbe_disable_pcie_master(). The second reset + * then clears out any effects of those events. + */ + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + udelay(1); + goto mac_reset_top; + } + + msleep(50); + + /* + * Store the original AUTOC/AUTOC2 values if they have not been + * stored off yet. Otherwise restore the stored original + * values since the reset operation sets back to defaults. + */ + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + if (hw->mac.orig_link_settings_stored == false) { + hw->mac.orig_autoc = autoc; + hw->mac.orig_autoc2 = autoc2; + hw->mac.orig_link_settings_stored = true; + } else { + if (autoc != hw->mac.orig_autoc) + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc | + IXGBE_AUTOC_AN_RESTART)); + + if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != + (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { + autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; + autoc2 |= (hw->mac.orig_autoc2 & + IXGBE_AUTOC2_UPPER_MASK); + IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); + } + } + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* + * Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = 128; + hw->mac.ops.init_rx_addrs(hw); + + /* Store the permanent SAN mac address */ + hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); + + /* Add the SAN MAC address to the RAR only if it's a valid address */ + if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { + hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, + hw->mac.san_addr, 0, IXGBE_RAH_AV); + + /* Reserve the last RAR for the SAN MAC address */ + hw->mac.num_rar_entries--; + } + + /* Store the alternative WWNN/WWPN prefix */ + hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, + &hw->mac.wwpn_prefix); + +reset_hw_out: + return status; +} + +/** + * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) +{ + int i; + u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); + fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; + + /* + * Before starting reinitialization process, + * FDIRCMD.CMD must be zero. + */ + for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { + if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & + IXGBE_FDIRCMD_CMD_MASK)) + break; + udelay(10); + } + if (i >= IXGBE_FDIRCMD_CMD_POLL) { + hw_dbg(hw, "Flow Director previous command isn't complete, " + "aborting table re-initialization.\n"); + return IXGBE_ERR_FDIR_REINIT_FAILED; + } + + IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); + IXGBE_WRITE_FLUSH(hw); + /* + * 82599 adapters flow director init flow cannot be restarted, + * Workaround 82599 silicon errata by performing the following steps + * before re-writing the FDIRCTRL control register with the same value. + * - write 1 to bit 8 of FDIRCMD register & + * - write 0 to bit 8 of FDIRCMD register + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | + IXGBE_FDIRCMD_CLEARHT)); + IXGBE_WRITE_FLUSH(hw); + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & + ~IXGBE_FDIRCMD_CLEARHT)); + IXGBE_WRITE_FLUSH(hw); + /* + * Clear FDIR Hash register to clear any leftover hashes + * waiting to be programmed. + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00); + IXGBE_WRITE_FLUSH(hw); + + IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); + IXGBE_WRITE_FLUSH(hw); + + /* Poll init-done after we write FDIRCTRL register */ + for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { + if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & + IXGBE_FDIRCTRL_INIT_DONE) + break; + udelay(10); + } + if (i >= IXGBE_FDIR_INIT_DONE_POLL) { + hw_dbg(hw, "Flow Director Signature poll time exceeded!\n"); + return IXGBE_ERR_FDIR_REINIT_FAILED; + } + + /* Clear FDIR statistics registers (read to clear) */ + IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); + IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT); + IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); + IXGBE_READ_REG(hw, IXGBE_FDIRMISS); + IXGBE_READ_REG(hw, IXGBE_FDIRLEN); + + return 0; +} + +/** + * ixgbe_set_fdir_rxpba_82599 - Initialize Flow Director Rx packet buffer + * @hw: pointer to hardware structure + * @pballoc: which mode to allocate filters with + **/ +static s32 ixgbe_set_fdir_rxpba_82599(struct ixgbe_hw *hw, const u32 pballoc) +{ + u32 fdir_pbsize = hw->mac.rx_pb_size << IXGBE_RXPBSIZE_SHIFT; + u32 current_rxpbsize = 0; + int i; + + /* reserve space for Flow Director filters */ + switch (pballoc) { + case IXGBE_FDIR_PBALLOC_256K: + fdir_pbsize -= 256 << IXGBE_RXPBSIZE_SHIFT; + break; + case IXGBE_FDIR_PBALLOC_128K: + fdir_pbsize -= 128 << IXGBE_RXPBSIZE_SHIFT; + break; + case IXGBE_FDIR_PBALLOC_64K: + fdir_pbsize -= 64 << IXGBE_RXPBSIZE_SHIFT; + break; + case IXGBE_FDIR_PBALLOC_NONE: + default: + return IXGBE_ERR_PARAM; + } + + /* determine current RX packet buffer size */ + for (i = 0; i < 8; i++) + current_rxpbsize += IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); + + /* if there is already room for the filters do nothing */ + if (current_rxpbsize <= fdir_pbsize) + return 0; + + if (current_rxpbsize > hw->mac.rx_pb_size) { + /* + * if rxpbsize is greater than max then HW max the Rx buffer + * sizes are unconfigured or misconfigured since HW default is + * to give the full buffer to each traffic class resulting in + * the total size being buffer size 8x actual size + * + * This assumes no DCB since the RXPBSIZE registers appear to + * be unconfigured. + */ + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), fdir_pbsize); + for (i = 1; i < 8; i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); + } else { + /* + * Since the Rx packet buffer appears to have already been + * configured we need to shrink each packet buffer by enough + * to make room for the filters. As such we take each rxpbsize + * value and multiply it by a fraction representing the size + * needed over the size we currently have. + * + * We need to reduce fdir_pbsize and current_rxpbsize to + * 1/1024 of their original values in order to avoid + * overflowing the u32 being used to store rxpbsize. + */ + fdir_pbsize >>= IXGBE_RXPBSIZE_SHIFT; + current_rxpbsize >>= IXGBE_RXPBSIZE_SHIFT; + for (i = 0; i < 8; i++) { + u32 rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); + rxpbsize *= fdir_pbsize; + rxpbsize /= current_rxpbsize; + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize); + } + } + + return 0; +} + +/** + * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register + **/ +static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) +{ + int i; + + /* Prime the keys for hashing */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); + + /* + * Poll init-done after we write the register. Estimated times: + * 10G: PBALLOC = 11b, timing is 60us + * 1G: PBALLOC = 11b, timing is 600us + * 100M: PBALLOC = 11b, timing is 6ms + * + * Multiple these timings by 4 if under full Rx load + * + * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for + * 1 msec per poll time. If we're at line rate and drop to 100M, then + * this might not finish in our poll time, but we can live with that + * for now. + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); + IXGBE_WRITE_FLUSH(hw); + for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { + if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & + IXGBE_FDIRCTRL_INIT_DONE) + break; + usleep_range(1000, 2000); + } + + if (i >= IXGBE_FDIR_INIT_DONE_POLL) + hw_dbg(hw, "Flow Director poll time exceeded!\n"); +} + +/** + * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register, initially + * contains just the value of the Rx packet buffer allocation + **/ +s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl) +{ + s32 err; + + /* Before enabling Flow Director, verify the Rx Packet Buffer size */ + err = ixgbe_set_fdir_rxpba_82599(hw, fdirctrl); + if (err) + return err; + + /* + * Continue setup of fdirctrl register bits: + * Move the flexible bytes to use the ethertype - shift 6 words + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 filters are left + */ + fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | + (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | + (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); + + /* write hashes and fdirctrl register, poll for completion */ + ixgbe_fdir_enable_82599(hw, fdirctrl); + + return 0; +} + +/** + * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register, initially + * contains just the value of the Rx packet buffer allocation + **/ +s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) +{ + s32 err; + + /* Before enabling Flow Director, verify the Rx Packet Buffer size */ + err = ixgbe_set_fdir_rxpba_82599(hw, fdirctrl); + if (err) + return err; + + /* + * Continue setup of fdirctrl register bits: + * Turn perfect match filtering on + * Report hash in RSS field of Rx wb descriptor + * Initialize the drop queue + * Move the flexible bytes to use the ethertype - shift 6 words + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 (0x4 * 16) filters are left + */ + fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH | + IXGBE_FDIRCTRL_REPORT_STATUS | + (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) | + (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | + (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | + (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); + + /* write hashes and fdirctrl register, poll for completion */ + ixgbe_fdir_enable_82599(hw, fdirctrl); + + return 0; +} + +/* + * These defines allow us to quickly generate all of the necessary instructions + * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION + * for values 0 through 15 + */ +#define IXGBE_ATR_COMMON_HASH_KEY \ + (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY) +#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ + common_hash ^= lo_hash_dword >> n; \ + else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ + sig_hash ^= lo_hash_dword << (16 - n); \ + if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ + common_hash ^= hi_hash_dword >> n; \ + else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + bucket_hash ^= hi_hash_dword >> n; \ + else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ + sig_hash ^= hi_hash_dword << (16 - n); \ +} while (0); + +/** + * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash + * @stream: input bitstream to compute the hash on + * + * This function is almost identical to the function above but contains + * several optomizations such as unwinding all of the loops, letting the + * compiler work out all of the conditional ifs since the keys are static + * defines, and computing two keys at once since the hashed dword stream + * will be the same for both keys. + **/ +static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common) +{ + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; + + /* record the flow_vm_vlan bits as they are a key part to the hash */ + flow_vm_vlan = ntohl(input.dword); + + /* generate common hash dword */ + hi_hash_dword = ntohl(common.dword); + + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + + /* apply flow ID/VM pool/VLAN ID bits to hash words */ + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); + + /* Process bits 0 and 16 */ + IXGBE_COMPUTE_SIG_HASH_ITERATION(0); + + /* + * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed + * so we do not add the vlan until after bit 0 was processed + */ + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + + /* Process remaining 30 bit of the key */ + IXGBE_COMPUTE_SIG_HASH_ITERATION(1); + IXGBE_COMPUTE_SIG_HASH_ITERATION(2); + IXGBE_COMPUTE_SIG_HASH_ITERATION(3); + IXGBE_COMPUTE_SIG_HASH_ITERATION(4); + IXGBE_COMPUTE_SIG_HASH_ITERATION(5); + IXGBE_COMPUTE_SIG_HASH_ITERATION(6); + IXGBE_COMPUTE_SIG_HASH_ITERATION(7); + IXGBE_COMPUTE_SIG_HASH_ITERATION(8); + IXGBE_COMPUTE_SIG_HASH_ITERATION(9); + IXGBE_COMPUTE_SIG_HASH_ITERATION(10); + IXGBE_COMPUTE_SIG_HASH_ITERATION(11); + IXGBE_COMPUTE_SIG_HASH_ITERATION(12); + IXGBE_COMPUTE_SIG_HASH_ITERATION(13); + IXGBE_COMPUTE_SIG_HASH_ITERATION(14); + IXGBE_COMPUTE_SIG_HASH_ITERATION(15); + + /* combine common_hash result with signature and bucket hashes */ + bucket_hash ^= common_hash; + bucket_hash &= IXGBE_ATR_HASH_MASK; + + sig_hash ^= common_hash << 16; + sig_hash &= IXGBE_ATR_HASH_MASK << 16; + + /* return completed signature hash */ + return sig_hash ^ bucket_hash; +} + +/** + * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter + * @hw: pointer to hardware structure + * @input: unique input dword + * @common: compressed common input dword + * @queue: queue index to direct traffic to + **/ +s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common, + u8 queue) +{ + u64 fdirhashcmd; + u32 fdircmd; + + /* + * Get the flow_type in order to program FDIRCMD properly + * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 + */ + switch (input.formatted.flow_type) { + case IXGBE_ATR_FLOW_TYPE_TCPV4: + case IXGBE_ATR_FLOW_TYPE_UDPV4: + case IXGBE_ATR_FLOW_TYPE_SCTPV4: + case IXGBE_ATR_FLOW_TYPE_TCPV6: + case IXGBE_ATR_FLOW_TYPE_UDPV6: + case IXGBE_ATR_FLOW_TYPE_SCTPV6: + break; + default: + hw_dbg(hw, " Error on flow type input\n"); + return IXGBE_ERR_CONFIG; + } + + /* configure FDIRCMD register */ + fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | + IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; + fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; + fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; + + /* + * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits + * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. + */ + fdirhashcmd = (u64)fdircmd << 32; + fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); + IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); + + hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); + + return 0; +} + +#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + bucket_hash ^= hi_hash_dword >> n; \ +} while (0); + +/** + * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash + * @atr_input: input bitstream to compute the hash on + * @input_mask: mask for the input bitstream + * + * This function serves two main purposes. First it applys the input_mask + * to the atr_input resulting in a cleaned up atr_input data stream. + * Secondly it computes the hash and stores it in the bkt_hash field at + * the end of the input byte stream. This way it will be available for + * future use without needing to recompute the hash. + **/ +void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, + union ixgbe_atr_input *input_mask) +{ + + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 bucket_hash = 0; + + /* Apply masks to input data */ + input->dword_stream[0] &= input_mask->dword_stream[0]; + input->dword_stream[1] &= input_mask->dword_stream[1]; + input->dword_stream[2] &= input_mask->dword_stream[2]; + input->dword_stream[3] &= input_mask->dword_stream[3]; + input->dword_stream[4] &= input_mask->dword_stream[4]; + input->dword_stream[5] &= input_mask->dword_stream[5]; + input->dword_stream[6] &= input_mask->dword_stream[6]; + input->dword_stream[7] &= input_mask->dword_stream[7]; + input->dword_stream[8] &= input_mask->dword_stream[8]; + input->dword_stream[9] &= input_mask->dword_stream[9]; + input->dword_stream[10] &= input_mask->dword_stream[10]; + + /* record the flow_vm_vlan bits as they are a key part to the hash */ + flow_vm_vlan = ntohl(input->dword_stream[0]); + + /* generate common hash dword */ + hi_hash_dword = ntohl(input->dword_stream[1] ^ + input->dword_stream[2] ^ + input->dword_stream[3] ^ + input->dword_stream[4] ^ + input->dword_stream[5] ^ + input->dword_stream[6] ^ + input->dword_stream[7] ^ + input->dword_stream[8] ^ + input->dword_stream[9] ^ + input->dword_stream[10]); + + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + + /* apply flow ID/VM pool/VLAN ID bits to hash words */ + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); + + /* Process bits 0 and 16 */ + IXGBE_COMPUTE_BKT_HASH_ITERATION(0); + + /* + * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed + * so we do not add the vlan until after bit 0 was processed + */ + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + + /* Process remaining 30 bit of the key */ + IXGBE_COMPUTE_BKT_HASH_ITERATION(1); + IXGBE_COMPUTE_BKT_HASH_ITERATION(2); + IXGBE_COMPUTE_BKT_HASH_ITERATION(3); + IXGBE_COMPUTE_BKT_HASH_ITERATION(4); + IXGBE_COMPUTE_BKT_HASH_ITERATION(5); + IXGBE_COMPUTE_BKT_HASH_ITERATION(6); + IXGBE_COMPUTE_BKT_HASH_ITERATION(7); + IXGBE_COMPUTE_BKT_HASH_ITERATION(8); + IXGBE_COMPUTE_BKT_HASH_ITERATION(9); + IXGBE_COMPUTE_BKT_HASH_ITERATION(10); + IXGBE_COMPUTE_BKT_HASH_ITERATION(11); + IXGBE_COMPUTE_BKT_HASH_ITERATION(12); + IXGBE_COMPUTE_BKT_HASH_ITERATION(13); + IXGBE_COMPUTE_BKT_HASH_ITERATION(14); + IXGBE_COMPUTE_BKT_HASH_ITERATION(15); + + /* + * Limit hash to 13 bits since max bucket count is 8K. + * Store result at the end of the input stream. + */ + input->formatted.bkt_hash = bucket_hash & 0x1FFF; +} + +/** + * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks + * @input_mask: mask to be bit swapped + * + * The source and destination port masks for flow director are bit swapped + * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to + * generate a correctly swapped value we need to bit swap the mask and that + * is what is accomplished by this function. + **/ +static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) +{ + u32 mask = ntohs(input_mask->formatted.dst_port); + mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; + mask |= ntohs(input_mask->formatted.src_port); + mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); + mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); + mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); + return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); +} + +/* + * These two macros are meant to address the fact that we have registers + * that are either all or in part big-endian. As a result on big-endian + * systems we will end up byte swapping the value to little-endian before + * it is byte swapped again and written to the hardware in the original + * big-endian format. + */ +#define IXGBE_STORE_AS_BE32(_value) \ + (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \ + (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) + +#define IXGBE_WRITE_REG_BE32(a, reg, value) \ + IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value))) + +#define IXGBE_STORE_AS_BE16(_value) \ + ntohs(((u16)(_value) >> 8) | ((u16)(_value) << 8)) + +s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input_mask) +{ + /* mask IPv6 since it is currently not supported */ + u32 fdirm = IXGBE_FDIRM_DIPv6; + u32 fdirtcpm; + + /* + * Program the relevant mask registers. If src/dst_port or src/dst_addr + * are zero, then assume a full mask for that field. Also assume that + * a VLAN of 0 is unspecified, so mask that out as well. L4type + * cannot be masked out in this implementation. + * + * This also assumes IPv4 only. IPv6 masking isn't supported at this + * point in time. + */ + + /* verify bucket hash is cleared on hash generation */ + if (input_mask->formatted.bkt_hash) + hw_dbg(hw, " bucket hash should always be 0 in mask\n"); + + /* Program FDIRM and verify partial masks */ + switch (input_mask->formatted.vm_pool & 0x7F) { + case 0x0: + fdirm |= IXGBE_FDIRM_POOL; + case 0x7F: + break; + default: + hw_dbg(hw, " Error on vm pool mask\n"); + return IXGBE_ERR_CONFIG; + } + + switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) { + case 0x0: + fdirm |= IXGBE_FDIRM_L4P; + if (input_mask->formatted.dst_port || + input_mask->formatted.src_port) { + hw_dbg(hw, " Error on src/dst port mask\n"); + return IXGBE_ERR_CONFIG; + } + case IXGBE_ATR_L4TYPE_MASK: + break; + default: + hw_dbg(hw, " Error on flow type mask\n"); + return IXGBE_ERR_CONFIG; + } + + switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) { + case 0x0000: + /* mask VLAN ID, fall through to mask VLAN priority */ + fdirm |= IXGBE_FDIRM_VLANID; + case 0x0FFF: + /* mask VLAN priority */ + fdirm |= IXGBE_FDIRM_VLANP; + break; + case 0xE000: + /* mask VLAN ID only, fall through */ + fdirm |= IXGBE_FDIRM_VLANID; + case 0xEFFF: + /* no VLAN fields masked */ + break; + default: + hw_dbg(hw, " Error on VLAN mask\n"); + return IXGBE_ERR_CONFIG; + } + + switch (input_mask->formatted.flex_bytes & 0xFFFF) { + case 0x0000: + /* Mask Flex Bytes, fall through */ + fdirm |= IXGBE_FDIRM_FLEX; + case 0xFFFF: + break; + default: + hw_dbg(hw, " Error on flexible byte mask\n"); + return IXGBE_ERR_CONFIG; + } + + /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); + + /* store the TCP/UDP port masks, bit reversed from port layout */ + fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask); + + /* write both the same so that UDP and TCP use the same mask */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); + IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); + + /* store source and destination IP masks (big-enian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, + ~input_mask->formatted.src_ip[0]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, + ~input_mask->formatted.dst_ip[0]); + + return 0; +} + +s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id, u8 queue) +{ + u32 fdirport, fdirvlan, fdirhash, fdircmd; + + /* currently IPv6 is not supported, must be programmed with 0 */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), + input->formatted.src_ip[0]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), + input->formatted.src_ip[1]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), + input->formatted.src_ip[2]); + + /* record the source address (big-endian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]); + + /* record the first 32 bits of the destination address (big-endian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]); + + /* record source and destination port (little-endian)*/ + fdirport = ntohs(input->formatted.dst_port); + fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; + fdirport |= ntohs(input->formatted.src_port); + IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); + + /* record vlan (little-endian) and flex_bytes(big-endian) */ + fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes); + fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; + fdirvlan |= ntohs(input->formatted.vlan_id); + IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); + + /* configure FDIRHASH register */ + fdirhash = input->formatted.bkt_hash; + fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + + /* + * flush all previous writes to make certain registers are + * programmed prior to issuing the command + */ + IXGBE_WRITE_FLUSH(hw); + + /* configure FDIRCMD register */ + fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | + IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; + if (queue == IXGBE_FDIR_DROP_QUEUE) + fdircmd |= IXGBE_FDIRCMD_DROP; + fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; + fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; + fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT; + + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); + + return 0; +} + +s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id) +{ + u32 fdirhash; + u32 fdircmd = 0; + u32 retry_count; + s32 err = 0; + + /* configure FDIRHASH register */ + fdirhash = input->formatted.bkt_hash; + fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + + /* flush hash to HW */ + IXGBE_WRITE_FLUSH(hw); + + /* Query if filter is present */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT); + + for (retry_count = 10; retry_count; retry_count--) { + /* allow 10us for query to process */ + udelay(10); + /* verify query completed successfully */ + fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); + if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK)) + break; + } + + if (!retry_count) + err = IXGBE_ERR_FDIR_REINIT_FAILED; + + /* if filter exists in hardware then remove it */ + if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) { + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + IXGBE_WRITE_FLUSH(hw); + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + IXGBE_FDIRCMD_CMD_REMOVE_FLOW); + } + + return err; +} + +/** + * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register + * @hw: pointer to hardware structure + * @reg: analog register to read + * @val: read value + * + * Performs read operation to Omer analog register specified. + **/ +static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) +{ + u32 core_ctl; + + IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | + (reg << 8)); + IXGBE_WRITE_FLUSH(hw); + udelay(10); + core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); + *val = (u8)core_ctl; + + return 0; +} + +/** + * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register + * @hw: pointer to hardware structure + * @reg: atlas register to write + * @val: value to write + * + * Performs write operation to Omer analog register specified. + **/ +static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) +{ + u32 core_ctl; + + core_ctl = (reg << 8) | val; + IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl); + IXGBE_WRITE_FLUSH(hw); + udelay(10); + + return 0; +} + +/** + * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware using the generic start_hw function + * and the generation start_hw function. + * Then performs revision-specific operations, if any. + **/ +static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) +{ + s32 ret_val = 0; + + ret_val = ixgbe_start_hw_generic(hw); + if (ret_val != 0) + goto out; + + ret_val = ixgbe_start_hw_gen2(hw); + if (ret_val != 0) + goto out; + + /* We need to run link autotry after the driver loads */ + hw->mac.autotry_restart = true; + hw->mac.rx_pb_size = IXGBE_82599_RX_PB_SIZE; + + if (ret_val == 0) + ret_val = ixgbe_verify_fw_version_82599(hw); +out: + return ret_val; +} + +/** + * ixgbe_identify_phy_82599 - Get physical layer module + * @hw: pointer to hardware structure + * + * Determines the physical layer module found on the current adapter. + * If PHY already detected, maintains current PHY type in hw struct, + * otherwise executes the PHY detection routine. + **/ +static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_PHY_ADDR_INVALID; + + /* Detect PHY if not unknown - returns success if already detected. */ + status = ixgbe_identify_phy_generic(hw); + if (status != 0) { + /* 82599 10GBASE-T requires an external PHY */ + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) + goto out; + else + status = ixgbe_identify_sfp_module_generic(hw); + } + + /* Set PHY type none if no PHY detected */ + if (hw->phy.type == ixgbe_phy_unknown) { + hw->phy.type = ixgbe_phy_none; + status = 0; + } + + /* Return error if SFP module has been detected but is not supported */ + if (hw->phy.type == ixgbe_phy_sfp_unsupported) + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + +out: + return status; +} + +/** + * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current configuration. + **/ +static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) +{ + u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; + u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; + u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; + u16 ext_ability = 0; + u8 comp_codes_10g = 0; + u8 comp_codes_1g = 0; + + hw->phy.ops.identify(hw); + + switch (hw->phy.type) { + case ixgbe_phy_tn: + case ixgbe_phy_aq: + case ixgbe_phy_cu_unknown: + hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, + &ext_ability); + if (ext_ability & MDIO_PMA_EXTABLE_10GBT) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; + if (ext_ability & MDIO_PMA_EXTABLE_1000BT) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; + if (ext_ability & MDIO_PMA_EXTABLE_100BTX) + physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; + goto out; + default: + break; + } + + switch (autoc & IXGBE_AUTOC_LMS_MASK) { + case IXGBE_AUTOC_LMS_1G_AN: + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: + if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) { + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX | + IXGBE_PHYSICAL_LAYER_1000BASE_BX; + goto out; + } else + /* SFI mode so read SFP module */ + goto sfp_check; + break; + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: + if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; + else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; + else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI; + goto out; + break; + case IXGBE_AUTOC_LMS_10G_SERIAL: + if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) { + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR; + goto out; + } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) + goto sfp_check; + break; + case IXGBE_AUTOC_LMS_KX4_KX_KR: + case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: + if (autoc & IXGBE_AUTOC_KX_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; + if (autoc & IXGBE_AUTOC_KR_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR; + goto out; + break; + default: + goto out; + break; + } + +sfp_check: + /* SFP check must be done last since DA modules are sometimes used to + * test KR mode - we need to id KR mode correctly before SFP module. + * Call identify_sfp because the pluggable module may have changed */ + hw->phy.ops.identify_sfp(hw); + if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) + goto out; + + switch (hw->phy.type) { + case ixgbe_phy_sfp_passive_tyco: + case ixgbe_phy_sfp_passive_unknown: + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; + break; + case ixgbe_phy_sfp_ftl_active: + case ixgbe_phy_sfp_active_unknown: + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA; + break; + case ixgbe_phy_sfp_avago: + case ixgbe_phy_sfp_ftl: + case ixgbe_phy_sfp_intel: + case ixgbe_phy_sfp_unknown: + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g); + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); + if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; + else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; + else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T; + break; + default: + break; + } + +out: + return physical_layer; +} + +/** + * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599 + * @hw: pointer to hardware structure + * @regval: register value to write to RXCTRL + * + * Enables the Rx DMA unit for 82599 + **/ +static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) +{ +#define IXGBE_MAX_SECRX_POLL 30 + int i; + int secrxreg; + + /* + * Workaround for 82599 silicon errata when enabling the Rx datapath. + * If traffic is incoming before we enable the Rx unit, it could hang + * the Rx DMA unit. Therefore, make sure the security engine is + * completely disabled prior to enabling the Rx unit. + */ + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + secrxreg |= IXGBE_SECRXCTRL_RX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); + for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) { + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); + if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) + break; + else + /* Use interrupt-safe sleep just in case */ + udelay(10); + } + + /* For informational purposes only */ + if (i >= IXGBE_MAX_SECRX_POLL) + hw_dbg(hw, "Rx unit being enabled before security " + "path fully disabled. Continuing with init.\n"); + + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * ixgbe_verify_fw_version_82599 - verify fw version for 82599 + * @hw: pointer to hardware structure + * + * Verifies that installed the firmware version is 0.6 or higher + * for SFI devices. All 82599 SFI devices should have version 0.6 or higher. + * + * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or + * if the FW version is not supported. + **/ +static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_EEPROM_VERSION; + u16 fw_offset, fw_ptp_cfg_offset; + u16 fw_version = 0; + + /* firmware check is only necessary for SFI devices */ + if (hw->phy.media_type != ixgbe_media_type_fiber) { + status = 0; + goto fw_version_out; + } + + /* get the offset to the Firmware Module block */ + hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); + + if ((fw_offset == 0) || (fw_offset == 0xFFFF)) + goto fw_version_out; + + /* get the offset to the Pass Through Patch Configuration block */ + hw->eeprom.ops.read(hw, (fw_offset + + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR), + &fw_ptp_cfg_offset); + + if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF)) + goto fw_version_out; + + /* get the firmware version */ + hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset + + IXGBE_FW_PATCH_VERSION_4), + &fw_version); + + if (fw_version > 0x5) + status = 0; + +fw_version_out: + return status; +} + +/** + * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state. + * @hw: pointer to hardware structure + * + * Returns true if the LESM FW module is present and enabled. Otherwise + * returns false. Smart Speed must be disabled if LESM FW module is enabled. + **/ +static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) +{ + bool lesm_enabled = false; + u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; + s32 status; + + /* get the offset to the Firmware Module block */ + status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); + + if ((status != 0) || + (fw_offset == 0) || (fw_offset == 0xFFFF)) + goto out; + + /* get the offset to the LESM Parameters block */ + status = hw->eeprom.ops.read(hw, (fw_offset + + IXGBE_FW_LESM_PARAMETERS_PTR), + &fw_lesm_param_offset); + + if ((status != 0) || + (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF)) + goto out; + + /* get the lesm state word */ + status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset + + IXGBE_FW_LESM_STATE_1), + &fw_lesm_state); + + if ((status == 0) && + (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED)) + lesm_enabled = true; + +out: + return lesm_enabled; +} + +/** + * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using + * fastest available method + * + * @hw: pointer to hardware structure + * @offset: offset of word in EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM + * + * Retrieves 16 bit word(s) read from EEPROM + **/ +static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + s32 ret_val = IXGBE_ERR_CONFIG; + + /* + * If EEPROM is detected and can be addressed using 14 bits, + * use EERD otherwise use bit bang + */ + if ((eeprom->type == ixgbe_eeprom_spi) && + (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR)) + ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words, + data); + else + ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset, + words, + data); + + return ret_val; +} + +/** + * ixgbe_read_eeprom_82599 - Read EEPROM word using + * fastest available method + * + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM + **/ +static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, + u16 offset, u16 *data) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + s32 ret_val = IXGBE_ERR_CONFIG; + + /* + * If EEPROM is detected and can be addressed using 14 bits, + * use EERD otherwise use bit bang + */ + if ((eeprom->type == ixgbe_eeprom_spi) && + (offset <= IXGBE_EERD_MAX_ADDR)) + ret_val = ixgbe_read_eerd_generic(hw, offset, data); + else + ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data); + + return ret_val; +} + +static struct ixgbe_mac_operations mac_ops_82599 = { + .init_hw = &ixgbe_init_hw_generic, + .reset_hw = &ixgbe_reset_hw_82599, + .start_hw = &ixgbe_start_hw_82599, + .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, + .get_media_type = &ixgbe_get_media_type_82599, + .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82599, + .enable_rx_dma = &ixgbe_enable_rx_dma_82599, + .get_mac_addr = &ixgbe_get_mac_addr_generic, + .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, + .get_device_caps = &ixgbe_get_device_caps_generic, + .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, + .stop_adapter = &ixgbe_stop_adapter_generic, + .get_bus_info = &ixgbe_get_bus_info_generic, + .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, + .read_analog_reg8 = &ixgbe_read_analog_reg8_82599, + .write_analog_reg8 = &ixgbe_write_analog_reg8_82599, + .setup_link = &ixgbe_setup_mac_link_82599, + .set_rxpba = &ixgbe_set_rxpba_generic, + .check_link = &ixgbe_check_mac_link_generic, + .get_link_capabilities = &ixgbe_get_link_capabilities_82599, + .led_on = &ixgbe_led_on_generic, + .led_off = &ixgbe_led_off_generic, + .blink_led_start = &ixgbe_blink_led_start_generic, + .blink_led_stop = &ixgbe_blink_led_stop_generic, + .set_rar = &ixgbe_set_rar_generic, + .clear_rar = &ixgbe_clear_rar_generic, + .set_vmdq = &ixgbe_set_vmdq_generic, + .clear_vmdq = &ixgbe_clear_vmdq_generic, + .init_rx_addrs = &ixgbe_init_rx_addrs_generic, + .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, + .enable_mc = &ixgbe_enable_mc_generic, + .disable_mc = &ixgbe_disable_mc_generic, + .clear_vfta = &ixgbe_clear_vfta_generic, + .set_vfta = &ixgbe_set_vfta_generic, + .fc_enable = &ixgbe_fc_enable_generic, + .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, + .init_uta_tables = &ixgbe_init_uta_tables_generic, + .setup_sfp = &ixgbe_setup_sfp_modules_82599, + .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, + .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, + .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, + .release_swfw_sync = &ixgbe_release_swfw_sync, + +}; + +static struct ixgbe_eeprom_operations eeprom_ops_82599 = { + .init_params = &ixgbe_init_eeprom_params_generic, + .read = &ixgbe_read_eeprom_82599, + .read_buffer = &ixgbe_read_eeprom_buffer_82599, + .write = &ixgbe_write_eeprom_generic, + .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic, + .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, + .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, + .update_checksum = &ixgbe_update_eeprom_checksum_generic, +}; + +static struct ixgbe_phy_operations phy_ops_82599 = { + .identify = &ixgbe_identify_phy_82599, + .identify_sfp = &ixgbe_identify_sfp_module_generic, + .init = &ixgbe_init_phy_ops_82599, + .reset = &ixgbe_reset_phy_generic, + .read_reg = &ixgbe_read_phy_reg_generic, + .write_reg = &ixgbe_write_phy_reg_generic, + .setup_link = &ixgbe_setup_phy_link_generic, + .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, + .read_i2c_byte = &ixgbe_read_i2c_byte_generic, + .write_i2c_byte = &ixgbe_write_i2c_byte_generic, + .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, + .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, + .check_overtemp = &ixgbe_tn_check_overtemp, +}; + +struct ixgbe_info ixgbe_82599_info = { + .mac = ixgbe_mac_82599EB, + .get_invariants = &ixgbe_get_invariants_82599, + .mac_ops = &mac_ops_82599, + .eeprom_ops = &eeprom_ops_82599, + .phy_ops = &phy_ops_82599, + .mbx_ops = &mbx_ops_generic, +}; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c new file mode 100644 index 0000000..fc1375f --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -0,0 +1,3510 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include +#include + +#include "ixgbe.h" +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); +static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); +static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); +static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); +static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); +static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, + u16 count); +static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); +static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); +static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); +static void ixgbe_release_eeprom(struct ixgbe_hw *hw); + +static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); +static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw); +static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw); +static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw); +static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); +static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, + u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); +static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num); +static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); +static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, + u16 offset); + +/** + * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware by filling the bus info structure and media type, clears + * all on chip counters, initializes receive address registers, multicast + * table, VLAN filter table, calls routine to set up link and flow control + * settings, and leaves transmit and receive units disabled and uninitialized + **/ +s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) +{ + u32 ctrl_ext; + + /* Set the media type */ + hw->phy.media_type = hw->mac.ops.get_media_type(hw); + + /* Identify the PHY */ + hw->phy.ops.identify(hw); + + /* Clear the VLAN filter table */ + hw->mac.ops.clear_vfta(hw); + + /* Clear statistics registers */ + hw->mac.ops.clear_hw_cntrs(hw); + + /* Set No Snoop Disable */ + ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS; + IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); + IXGBE_WRITE_FLUSH(hw); + + /* Setup flow control */ + ixgbe_setup_fc(hw, 0); + + /* Clear adapter stopped flag */ + hw->adapter_stopped = false; + + return 0; +} + +/** + * ixgbe_start_hw_gen2 - Init sequence for common device family + * @hw: pointer to hw structure + * + * Performs the init sequence common to the second generation + * of 10 GbE devices. + * Devices in the second generation: + * 82599 + * X540 + **/ +s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) +{ + u32 i; + u32 regval; + + /* Clear the rate limiters */ + for (i = 0; i < hw->mac.max_tx_queues; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); + } + IXGBE_WRITE_FLUSH(hw); + + /* Disable relaxed ordering */ + for (i = 0; i < hw->mac.max_tx_queues; i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); + regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); + } + + for (i = 0; i < hw->mac.max_rx_queues; i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); + regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | + IXGBE_DCA_RXCTRL_DESC_HSRO_EN); + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); + } + + return 0; +} + +/** + * ixgbe_init_hw_generic - Generic hardware initialization + * @hw: pointer to hardware structure + * + * Initialize the hardware by resetting the hardware, filling the bus info + * structure and media type, clears all on chip counters, initializes receive + * address registers, multicast table, VLAN filter table, calls routine to set + * up link and flow control settings, and leaves transmit and receive units + * disabled and uninitialized + **/ +s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) +{ + s32 status; + + /* Reset the hardware */ + status = hw->mac.ops.reset_hw(hw); + + if (status == 0) { + /* Start the HW */ + status = hw->mac.ops.start_hw(hw); + } + + return status; +} + +/** + * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters + * @hw: pointer to hardware structure + * + * Clears all hardware statistics counters by reading them from the hardware + * Statistics counters are clear on read. + **/ +s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) +{ + u16 i = 0; + + IXGBE_READ_REG(hw, IXGBE_CRCERRS); + IXGBE_READ_REG(hw, IXGBE_ILLERRC); + IXGBE_READ_REG(hw, IXGBE_ERRBC); + IXGBE_READ_REG(hw, IXGBE_MSPDC); + for (i = 0; i < 8; i++) + IXGBE_READ_REG(hw, IXGBE_MPC(i)); + + IXGBE_READ_REG(hw, IXGBE_MLFC); + IXGBE_READ_REG(hw, IXGBE_MRFC); + IXGBE_READ_REG(hw, IXGBE_RLEC); + IXGBE_READ_REG(hw, IXGBE_LXONTXC); + IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); + if (hw->mac.type >= ixgbe_mac_82599EB) { + IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); + IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); + } else { + IXGBE_READ_REG(hw, IXGBE_LXONRXC); + IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); + } + + for (i = 0; i < 8; i++) { + IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); + IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); + if (hw->mac.type >= ixgbe_mac_82599EB) { + IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); + IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); + } else { + IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); + IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); + } + } + if (hw->mac.type >= ixgbe_mac_82599EB) + for (i = 0; i < 8; i++) + IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); + IXGBE_READ_REG(hw, IXGBE_PRC64); + IXGBE_READ_REG(hw, IXGBE_PRC127); + IXGBE_READ_REG(hw, IXGBE_PRC255); + IXGBE_READ_REG(hw, IXGBE_PRC511); + IXGBE_READ_REG(hw, IXGBE_PRC1023); + IXGBE_READ_REG(hw, IXGBE_PRC1522); + IXGBE_READ_REG(hw, IXGBE_GPRC); + IXGBE_READ_REG(hw, IXGBE_BPRC); + IXGBE_READ_REG(hw, IXGBE_MPRC); + IXGBE_READ_REG(hw, IXGBE_GPTC); + IXGBE_READ_REG(hw, IXGBE_GORCL); + IXGBE_READ_REG(hw, IXGBE_GORCH); + IXGBE_READ_REG(hw, IXGBE_GOTCL); + IXGBE_READ_REG(hw, IXGBE_GOTCH); + for (i = 0; i < 8; i++) + IXGBE_READ_REG(hw, IXGBE_RNBC(i)); + IXGBE_READ_REG(hw, IXGBE_RUC); + IXGBE_READ_REG(hw, IXGBE_RFC); + IXGBE_READ_REG(hw, IXGBE_ROC); + IXGBE_READ_REG(hw, IXGBE_RJC); + IXGBE_READ_REG(hw, IXGBE_MNGPRC); + IXGBE_READ_REG(hw, IXGBE_MNGPDC); + IXGBE_READ_REG(hw, IXGBE_MNGPTC); + IXGBE_READ_REG(hw, IXGBE_TORL); + IXGBE_READ_REG(hw, IXGBE_TORH); + IXGBE_READ_REG(hw, IXGBE_TPR); + IXGBE_READ_REG(hw, IXGBE_TPT); + IXGBE_READ_REG(hw, IXGBE_PTC64); + IXGBE_READ_REG(hw, IXGBE_PTC127); + IXGBE_READ_REG(hw, IXGBE_PTC255); + IXGBE_READ_REG(hw, IXGBE_PTC511); + IXGBE_READ_REG(hw, IXGBE_PTC1023); + IXGBE_READ_REG(hw, IXGBE_PTC1522); + IXGBE_READ_REG(hw, IXGBE_MPTC); + IXGBE_READ_REG(hw, IXGBE_BPTC); + for (i = 0; i < 16; i++) { + IXGBE_READ_REG(hw, IXGBE_QPRC(i)); + IXGBE_READ_REG(hw, IXGBE_QPTC(i)); + if (hw->mac.type >= ixgbe_mac_82599EB) { + IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); + IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); + IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); + IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); + IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); + } else { + IXGBE_READ_REG(hw, IXGBE_QBRC(i)); + IXGBE_READ_REG(hw, IXGBE_QBTC(i)); + } + } + + if (hw->mac.type == ixgbe_mac_X540) { + if (hw->phy.id == 0) + hw->phy.ops.identify(hw); + hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECL, &i); + hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECH, &i); + hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECL, &i); + hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECH, &i); + } + + return 0; +} + +/** + * ixgbe_read_pba_string_generic - Reads part number string from EEPROM + * @hw: pointer to hardware structure + * @pba_num: stores the part number string from the EEPROM + * @pba_num_size: part number string buffer length + * + * Reads the part number string from the EEPROM. + **/ +s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, + u32 pba_num_size) +{ + s32 ret_val; + u16 data; + u16 pba_ptr; + u16 offset; + u16 length; + + if (pba_num == NULL) { + hw_dbg(hw, "PBA string buffer was null\n"); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + + /* + * if data is not ptr guard the PBA must be in legacy format which + * means pba_ptr is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (data != IXGBE_PBANUM_PTR_GUARD) { + hw_dbg(hw, "NVM PBA number is not stored as string\n"); + + /* we will need 11 characters to store the PBA */ + if (pba_num_size < 11) { + hw_dbg(hw, "PBA string buffer too small\n"); + return IXGBE_ERR_NO_SPACE; + } + + /* extract hex string from data and pba_ptr */ + pba_num[0] = (data >> 12) & 0xF; + pba_num[1] = (data >> 8) & 0xF; + pba_num[2] = (data >> 4) & 0xF; + pba_num[3] = data & 0xF; + pba_num[4] = (pba_ptr >> 12) & 0xF; + pba_num[5] = (pba_ptr >> 8) & 0xF; + pba_num[6] = '-'; + pba_num[7] = 0; + pba_num[8] = (pba_ptr >> 4) & 0xF; + pba_num[9] = pba_ptr & 0xF; + + /* put a null character on the end of our string */ + pba_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { + if (pba_num[offset] < 0xA) + pba_num[offset] += '0'; + else if (pba_num[offset] < 0x10) + pba_num[offset] += 'A' - 0xA; + } + + return 0; + } + + ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + + if (length == 0xFFFF || length == 0) { + hw_dbg(hw, "NVM PBA number section invalid length\n"); + return IXGBE_ERR_PBA_SECTION; + } + + /* check if pba_num buffer is big enough */ + if (pba_num_size < (((u32)length * 2) - 1)) { + hw_dbg(hw, "PBA string buffer too small\n"); + return IXGBE_ERR_NO_SPACE; + } + + /* trim pba length from start of string */ + pba_ptr++; + length--; + + for (offset = 0; offset < length; offset++) { + ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + pba_num[offset * 2] = (u8)(data >> 8); + pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); + } + pba_num[offset * 2] = '\0'; + + return 0; +} + +/** + * ixgbe_get_mac_addr_generic - Generic get MAC address + * @hw: pointer to hardware structure + * @mac_addr: Adapter MAC address + * + * Reads the adapter's MAC address from first Receive Address Register (RAR0) + * A reset of the adapter must be performed prior to calling this function + * in order for the MAC address to have been loaded from the EEPROM into RAR0 + **/ +s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0)); + rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0)); + + for (i = 0; i < 4; i++) + mac_addr[i] = (u8)(rar_low >> (i*8)); + + for (i = 0; i < 2; i++) + mac_addr[i+4] = (u8)(rar_high >> (i*8)); + + return 0; +} + +/** + * ixgbe_get_bus_info_generic - Generic set PCI bus info + * @hw: pointer to hardware structure + * + * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure + **/ +s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_adapter *adapter = hw->back; + struct ixgbe_mac_info *mac = &hw->mac; + u16 link_status; + + hw->bus.type = ixgbe_bus_type_pci_express; + + /* Get the negotiated link width and speed from PCI config space */ + pci_read_config_word(adapter->pdev, IXGBE_PCI_LINK_STATUS, + &link_status); + + switch (link_status & IXGBE_PCI_LINK_WIDTH) { + case IXGBE_PCI_LINK_WIDTH_1: + hw->bus.width = ixgbe_bus_width_pcie_x1; + break; + case IXGBE_PCI_LINK_WIDTH_2: + hw->bus.width = ixgbe_bus_width_pcie_x2; + break; + case IXGBE_PCI_LINK_WIDTH_4: + hw->bus.width = ixgbe_bus_width_pcie_x4; + break; + case IXGBE_PCI_LINK_WIDTH_8: + hw->bus.width = ixgbe_bus_width_pcie_x8; + break; + default: + hw->bus.width = ixgbe_bus_width_unknown; + break; + } + + switch (link_status & IXGBE_PCI_LINK_SPEED) { + case IXGBE_PCI_LINK_SPEED_2500: + hw->bus.speed = ixgbe_bus_speed_2500; + break; + case IXGBE_PCI_LINK_SPEED_5000: + hw->bus.speed = ixgbe_bus_speed_5000; + break; + default: + hw->bus.speed = ixgbe_bus_speed_unknown; + break; + } + + mac->ops.set_lan_id(hw); + + return 0; +} + +/** + * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices + * @hw: pointer to the HW structure + * + * Determines the LAN function id by reading memory-mapped registers + * and swaps the port value if requested. + **/ +void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) +{ + struct ixgbe_bus_info *bus = &hw->bus; + u32 reg; + + reg = IXGBE_READ_REG(hw, IXGBE_STATUS); + bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT; + bus->lan_id = bus->func; + + /* check for a port swap */ + reg = IXGBE_READ_REG(hw, IXGBE_FACTPS); + if (reg & IXGBE_FACTPS_LFS) + bus->func ^= 0x1; +} + +/** + * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units + * @hw: pointer to hardware structure + * + * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, + * disables transmit and receive units. The adapter_stopped flag is used by + * the shared code and drivers to determine if the adapter is in a stopped + * state and should not touch the hardware. + **/ +s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) +{ + u32 number_of_queues; + u32 reg_val; + u16 i; + + /* + * Set the adapter_stopped flag so other driver functions stop touching + * the hardware + */ + hw->adapter_stopped = true; + + /* Disable the receive unit */ + reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + reg_val &= ~(IXGBE_RXCTRL_RXEN); + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val); + IXGBE_WRITE_FLUSH(hw); + usleep_range(2000, 4000); + + /* Clear interrupt mask to stop from interrupts being generated */ + IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); + + /* Clear any pending interrupts */ + IXGBE_READ_REG(hw, IXGBE_EICR); + + /* Disable the transmit unit. Each queue must be disabled. */ + number_of_queues = hw->mac.max_tx_queues; + for (i = 0; i < number_of_queues; i++) { + reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); + if (reg_val & IXGBE_TXDCTL_ENABLE) { + reg_val &= ~IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), reg_val); + } + } + + /* + * Prevent the PCI-E bus from from hanging by disabling PCI-E master + * access and verify no pending requests + */ + ixgbe_disable_pcie_master(hw); + + return 0; +} + +/** + * ixgbe_led_on_generic - Turns on the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn on + **/ +s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) +{ + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + + /* To turn on the LED, set mode to ON. */ + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * ixgbe_led_off_generic - Turns off the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn off + **/ +s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) +{ + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + + /* To turn off the LED, set mode to OFF. */ + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * ixgbe_init_eeprom_params_generic - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. + **/ +s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + u32 eec; + u16 eeprom_size; + + if (eeprom->type == ixgbe_eeprom_uninitialized) { + eeprom->type = ixgbe_eeprom_none; + /* Set default semaphore delay to 10ms which is a well + * tested value */ + eeprom->semaphore_delay = 10; + /* Clear EEPROM page size, it will be initialized as needed */ + eeprom->word_page_size = 0; + + /* + * Check for EEPROM present first. + * If not present leave as none + */ + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + if (eec & IXGBE_EEC_PRES) { + eeprom->type = ixgbe_eeprom_spi; + + /* + * SPI EEPROM is assumed here. This code would need to + * change if a future EEPROM is not SPI. + */ + eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> + IXGBE_EEC_SIZE_SHIFT); + eeprom->word_size = 1 << (eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); + } + + if (eec & IXGBE_EEC_ADDR_SIZE) + eeprom->address_bits = 16; + else + eeprom->address_bits = 8; + hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: " + "%d\n", eeprom->type, eeprom->word_size, + eeprom->address_bits); + } + + return 0; +} + +/** + * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to write + * @words: number of words + * @data: 16 bit word(s) to write to EEPROM + * + * Reads 16 bit word(s) from EEPROM through bit-bang method + **/ +s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + s32 status = 0; + u16 i, count; + + hw->eeprom.ops.init_params(hw); + + if (words == 0) { + status = IXGBE_ERR_INVALID_ARGUMENT; + goto out; + } + + if (offset + words > hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } + + /* + * The EEPROM page size cannot be queried from the chip. We do lazy + * initialization. It is worth to do that when we write large buffer. + */ + if ((hw->eeprom.word_page_size == 0) && + (words > IXGBE_EEPROM_PAGE_SIZE_MAX)) + ixgbe_detect_eeprom_page_size_generic(hw, offset); + + /* + * We cannot hold synchronization semaphores for too long + * to avoid other entity starvation. However it is more efficient + * to read in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { + count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? + IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); + status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i, + count, &data[i]); + + if (status != 0) + break; + } + +out: + return status; +} + +/** + * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be written to + * @words: number of word(s) + * @data: 16 bit word(s) to be written to the EEPROM + * + * If ixgbe_eeprom_update_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + **/ +static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + s32 status; + u16 word; + u16 page_size; + u16 i; + u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; + + /* Prepare the EEPROM for writing */ + status = ixgbe_acquire_eeprom(hw); + + if (status == 0) { + if (ixgbe_ready_eeprom(hw) != 0) { + ixgbe_release_eeprom(hw); + status = IXGBE_ERR_EEPROM; + } + } + + if (status == 0) { + for (i = 0; i < words; i++) { + ixgbe_standby_eeprom(hw); + + /* Send the WRITE ENABLE command (8 bit opcode ) */ + ixgbe_shift_out_eeprom_bits(hw, + IXGBE_EEPROM_WREN_OPCODE_SPI, + IXGBE_EEPROM_OPCODE_BITS); + + ixgbe_standby_eeprom(hw); + + /* + * Some SPI eeproms use the 8th address bit embedded + * in the opcode + */ + if ((hw->eeprom.address_bits == 8) && + ((offset + i) >= 128)) + write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + ixgbe_shift_out_eeprom_bits(hw, write_opcode, + IXGBE_EEPROM_OPCODE_BITS); + ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), + hw->eeprom.address_bits); + + page_size = hw->eeprom.word_page_size; + + /* Send the data in burst via SPI*/ + do { + word = data[i]; + word = (word >> 8) | (word << 8); + ixgbe_shift_out_eeprom_bits(hw, word, 16); + + if (page_size == 0) + break; + + /* do not wrap around page */ + if (((offset + i) & (page_size - 1)) == + (page_size - 1)) + break; + } while (++i < words); + + ixgbe_standby_eeprom(hw); + usleep_range(10000, 20000); + } + /* Done with writing - release the EEPROM */ + ixgbe_release_eeprom(hw); + } + + return status; +} + +/** + * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be written to + * @data: 16 bit word to be written to the EEPROM + * + * If ixgbe_eeprom_update_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + **/ +s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + s32 status; + + hw->eeprom.ops.init_params(hw); + + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } + + status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data); + +out: + return status; +} + +/** + * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @words: number of word(s) + * @data: read 16 bit words(s) from EEPROM + * + * Reads 16 bit word(s) from EEPROM through bit-bang method + **/ +s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + s32 status = 0; + u16 i, count; + + hw->eeprom.ops.init_params(hw); + + if (words == 0) { + status = IXGBE_ERR_INVALID_ARGUMENT; + goto out; + } + + if (offset + words > hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } + + /* + * We cannot hold synchronization semaphores for too long + * to avoid other entity starvation. However it is more efficient + * to read in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { + count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? + IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); + + status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i, + count, &data[i]); + + if (status != 0) + break; + } + +out: + return status; +} + +/** + * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @words: number of word(s) + * @data: read 16 bit word(s) from EEPROM + * + * Reads 16 bit word(s) from EEPROM through bit-bang method + **/ +static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + s32 status; + u16 word_in; + u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; + u16 i; + + /* Prepare the EEPROM for reading */ + status = ixgbe_acquire_eeprom(hw); + + if (status == 0) { + if (ixgbe_ready_eeprom(hw) != 0) { + ixgbe_release_eeprom(hw); + status = IXGBE_ERR_EEPROM; + } + } + + if (status == 0) { + for (i = 0; i < words; i++) { + ixgbe_standby_eeprom(hw); + /* + * Some SPI eeproms use the 8th address bit embedded + * in the opcode + */ + if ((hw->eeprom.address_bits == 8) && + ((offset + i) >= 128)) + read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + ixgbe_shift_out_eeprom_bits(hw, read_opcode, + IXGBE_EEPROM_OPCODE_BITS); + ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), + hw->eeprom.address_bits); + + /* Read the data. */ + word_in = ixgbe_shift_in_eeprom_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + + /* End this read operation */ + ixgbe_release_eeprom(hw); + } + + return status; +} + +/** + * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @data: read 16 bit value from EEPROM + * + * Reads 16 bit value from EEPROM through bit-bang method + **/ +s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 *data) +{ + s32 status; + + hw->eeprom.ops.init_params(hw); + + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } + + status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); + +out: + return status; +} + +/** + * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of word(s) + * @data: 16 bit word(s) from the EEPROM + * + * Reads a 16 bit word(s) from the EEPROM using the EERD register. + **/ +s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + u32 eerd; + s32 status = 0; + u32 i; + + hw->eeprom.ops.init_params(hw); + + if (words == 0) { + status = IXGBE_ERR_INVALID_ARGUMENT; + goto out; + } + + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } + + for (i = 0; i < words; i++) { + eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) + + IXGBE_EEPROM_RW_REG_START; + + IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); + status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); + + if (status == 0) { + data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >> + IXGBE_EEPROM_RW_REG_DATA); + } else { + hw_dbg(hw, "Eeprom read timed out\n"); + goto out; + } + } +out: + return status; +} + +/** + * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be used as a scratch pad + * + * Discover EEPROM page size by writing marching data at given offset. + * This function is called only when we are writing a new large buffer + * at given offset so the data would be overwritten anyway. + **/ +static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, + u16 offset) +{ + u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX]; + s32 status = 0; + u16 i; + + for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++) + data[i] = i; + + hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX; + status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, + IXGBE_EEPROM_PAGE_SIZE_MAX, data); + hw->eeprom.word_page_size = 0; + if (status != 0) + goto out; + + status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); + if (status != 0) + goto out; + + /* + * When writing in burst more than the actual page size + * EEPROM address wraps around current page. + */ + hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; + + hw_dbg(hw, "Detected EEPROM page size = %d words.", + hw->eeprom.word_page_size); +out: + return status; +} + +/** + * ixgbe_read_eerd_generic - Read EEPROM word using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data); +} + +/** + * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @words: number of words + * @data: word(s) write to the EEPROM + * + * Write a 16 bit word(s) to the EEPROM using the EEWR register. + **/ +s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + u32 eewr; + s32 status = 0; + u16 i; + + hw->eeprom.ops.init_params(hw); + + if (words == 0) { + status = IXGBE_ERR_INVALID_ARGUMENT; + goto out; + } + + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } + + for (i = 0; i < words; i++) { + eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | + (data[i] << IXGBE_EEPROM_RW_REG_DATA) | + IXGBE_EEPROM_RW_REG_START; + + status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); + if (status != 0) { + hw_dbg(hw, "Eeprom write EEWR timed out\n"); + goto out; + } + + IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); + + status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); + if (status != 0) { + hw_dbg(hw, "Eeprom write EEWR timed out\n"); + goto out; + } + } + +out: + return status; +} + +/** + * ixgbe_write_eewr_generic - Write EEPROM word using EEWR + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the EEWR register. + **/ +s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data); +} + +/** + * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status + * @hw: pointer to hardware structure + * @ee_reg: EEPROM flag for polling + * + * Polls the status bit (bit 1) of the EERD or EEWR to determine when the + * read or write is done respectively. + **/ +static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) +{ + u32 i; + u32 reg; + s32 status = IXGBE_ERR_EEPROM; + + for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) { + if (ee_reg == IXGBE_NVM_POLL_READ) + reg = IXGBE_READ_REG(hw, IXGBE_EERD); + else + reg = IXGBE_READ_REG(hw, IXGBE_EEWR); + + if (reg & IXGBE_EEPROM_RW_REG_DONE) { + status = 0; + break; + } + udelay(5); + } + return status; +} + +/** + * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang + * @hw: pointer to hardware structure + * + * Prepares EEPROM for access using bit-bang method. This function should + * be called before issuing a command to the EEPROM. + **/ +static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) +{ + s32 status = 0; + u32 eec; + u32 i; + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0) + status = IXGBE_ERR_SWFW_SYNC; + + if (status == 0) { + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + + /* Request EEPROM Access */ + eec |= IXGBE_EEC_REQ; + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + + for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + if (eec & IXGBE_EEC_GNT) + break; + udelay(5); + } + + /* Release if grant not acquired */ + if (!(eec & IXGBE_EEC_GNT)) { + eec &= ~IXGBE_EEC_REQ; + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + hw_dbg(hw, "Could not acquire EEPROM grant\n"); + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + status = IXGBE_ERR_EEPROM; + } + + /* Setup EEPROM for Read/Write */ + if (status == 0) { + /* Clear CS and SK */ + eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_FLUSH(hw); + udelay(1); + } + } + return status; +} + +/** + * ixgbe_get_eeprom_semaphore - Get hardware semaphore + * @hw: pointer to hardware structure + * + * Sets the hardware semaphores so EEPROM access can occur for bit-bang method + **/ +static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_EEPROM; + u32 timeout = 2000; + u32 i; + u32 swsm; + + /* Get SMBI software semaphore between device drivers first */ + for (i = 0; i < timeout; i++) { + /* + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + if (!(swsm & IXGBE_SWSM_SMBI)) { + status = 0; + break; + } + udelay(50); + } + + if (i == timeout) { + hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore " + "not granted.\n"); + /* + * this release is particularly important because our attempts + * above to get the semaphore may have succeeded, and if there + * was a timeout, we should unconditionally clear the semaphore + * bits to free the driver to make progress + */ + ixgbe_release_eeprom_semaphore(hw); + + udelay(50); + /* + * one last try + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + if (!(swsm & IXGBE_SWSM_SMBI)) + status = 0; + } + + /* Now get the semaphore between SW/FW through the SWESMBI bit */ + if (status == 0) { + for (i = 0; i < timeout; i++) { + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + + /* Set the SW EEPROM semaphore bit to request access */ + swsm |= IXGBE_SWSM_SWESMBI; + IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); + + /* + * If we set the bit successfully then we got the + * semaphore. + */ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + if (swsm & IXGBE_SWSM_SWESMBI) + break; + + udelay(50); + } + + /* + * Release semaphores and return error if SW EEPROM semaphore + * was not granted because we don't have access to the EEPROM + */ + if (i >= timeout) { + hw_dbg(hw, "SWESMBI Software EEPROM semaphore " + "not granted.\n"); + ixgbe_release_eeprom_semaphore(hw); + status = IXGBE_ERR_EEPROM; + } + } else { + hw_dbg(hw, "Software semaphore SMBI between device drivers " + "not granted.\n"); + } + + return status; +} + +/** + * ixgbe_release_eeprom_semaphore - Release hardware semaphore + * @hw: pointer to hardware structure + * + * This function clears hardware semaphore bits. + **/ +static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) +{ + u32 swsm; + + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + + /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ + swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); + IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_ready_eeprom - Polls for EEPROM ready + * @hw: pointer to hardware structure + **/ +static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) +{ + s32 status = 0; + u16 i; + u8 spi_stat_reg; + + /* + * Read "Status Register" repeatedly until the LSB is cleared. The + * EEPROM will signal that the command has been completed by clearing + * bit 0 of the internal status register. If it's not cleared within + * 5 milliseconds, then error out. + */ + for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { + ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, + IXGBE_EEPROM_OPCODE_BITS); + spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); + if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) + break; + + udelay(5); + ixgbe_standby_eeprom(hw); + } + + /* + * On some parts, SPI write time could vary from 0-20mSec on 3.3V + * devices (and only 0-5mSec on 5V devices) + */ + if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { + hw_dbg(hw, "SPI EEPROM Status error\n"); + status = IXGBE_ERR_EEPROM; + } + + return status; +} + +/** + * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state + * @hw: pointer to hardware structure + **/ +static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) +{ + u32 eec; + + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + + /* Toggle CS to flush commands */ + eec |= IXGBE_EEC_CS; + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_FLUSH(hw); + udelay(1); + eec &= ~IXGBE_EEC_CS; + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_FLUSH(hw); + udelay(1); +} + +/** + * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM. + * @hw: pointer to hardware structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out + **/ +static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, + u16 count) +{ + u32 eec; + u32 mask; + u32 i; + + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + + /* + * Mask is used to shift "count" bits of "data" out to the EEPROM + * one bit at a time. Determine the starting bit based on count + */ + mask = 0x01 << (count - 1); + + for (i = 0; i < count; i++) { + /* + * A "1" is shifted out to the EEPROM by setting bit "DI" to a + * "1", and then raising and then lowering the clock (the SK + * bit controls the clock input to the EEPROM). A "0" is + * shifted out to the EEPROM by setting "DI" to "0" and then + * raising and then lowering the clock. + */ + if (data & mask) + eec |= IXGBE_EEC_DI; + else + eec &= ~IXGBE_EEC_DI; + + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_FLUSH(hw); + + udelay(1); + + ixgbe_raise_eeprom_clk(hw, &eec); + ixgbe_lower_eeprom_clk(hw, &eec); + + /* + * Shift mask to signify next bit of data to shift in to the + * EEPROM + */ + mask = mask >> 1; + } + + /* We leave the "DI" bit set to "0" when we leave this routine. */ + eec &= ~IXGBE_EEC_DI; + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM + * @hw: pointer to hardware structure + **/ +static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) +{ + u32 eec; + u32 i; + u16 data = 0; + + /* + * In order to read a register from the EEPROM, we need to shift + * 'count' bits in from the EEPROM. Bits are "shifted in" by raising + * the clock input to the EEPROM (setting the SK bit), and then reading + * the value of the "DO" bit. During this "shifting in" process the + * "DI" bit should always be clear. + */ + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + + eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); + + for (i = 0; i < count; i++) { + data = data << 1; + ixgbe_raise_eeprom_clk(hw, &eec); + + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + + eec &= ~(IXGBE_EEC_DI); + if (eec & IXGBE_EEC_DO) + data |= 1; + + ixgbe_lower_eeprom_clk(hw, &eec); + } + + return data; +} + +/** + * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input. + * @hw: pointer to hardware structure + * @eec: EEC register's current value + **/ +static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) +{ + /* + * Raise the clock input to the EEPROM + * (setting the SK bit), then delay + */ + *eec = *eec | IXGBE_EEC_SK; + IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); + IXGBE_WRITE_FLUSH(hw); + udelay(1); +} + +/** + * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. + * @hw: pointer to hardware structure + * @eecd: EECD's current value + **/ +static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) +{ + /* + * Lower the clock input to the EEPROM (clearing the SK bit), then + * delay + */ + *eec = *eec & ~IXGBE_EEC_SK; + IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); + IXGBE_WRITE_FLUSH(hw); + udelay(1); +} + +/** + * ixgbe_release_eeprom - Release EEPROM, release semaphores + * @hw: pointer to hardware structure + **/ +static void ixgbe_release_eeprom(struct ixgbe_hw *hw) +{ + u32 eec; + + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + + eec |= IXGBE_EEC_CS; /* Pull CS high */ + eec &= ~IXGBE_EEC_SK; /* Lower SCK */ + + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_FLUSH(hw); + + udelay(1); + + /* Stop requesting EEPROM access */ + eec &= ~IXGBE_EEC_REQ; + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + + /* + * Delay before attempt to obtain semaphore again to allow FW + * access. semaphore_delay is in ms we need us for usleep_range + */ + usleep_range(hw->eeprom.semaphore_delay * 1000, + hw->eeprom.semaphore_delay * 2000); +} + +/** + * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum + * @hw: pointer to hardware structure + **/ +u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) +{ + u16 i; + u16 j; + u16 checksum = 0; + u16 length = 0; + u16 pointer = 0; + u16 word = 0; + + /* Include 0x0-0x3F in the checksum */ + for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { + if (hw->eeprom.ops.read(hw, i, &word) != 0) { + hw_dbg(hw, "EEPROM read failed\n"); + break; + } + checksum += word; + } + + /* Include all data from pointers except for the fw pointer */ + for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { + hw->eeprom.ops.read(hw, i, &pointer); + + /* Make sure the pointer seems valid */ + if (pointer != 0xFFFF && pointer != 0) { + hw->eeprom.ops.read(hw, pointer, &length); + + if (length != 0xFFFF && length != 0) { + for (j = pointer+1; j <= pointer+length; j++) { + hw->eeprom.ops.read(hw, j, &word); + checksum += word; + } + } + } + } + + checksum = (u16)IXGBE_EEPROM_SUM - checksum; + + return checksum; +} + +/** + * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + **/ +s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, + u16 *checksum_val) +{ + s32 status; + u16 checksum; + u16 read_checksum = 0; + + /* + * Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + + if (status == 0) { + checksum = hw->eeprom.ops.calc_checksum(hw); + + hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); + + /* + * Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) + status = IXGBE_ERR_EEPROM_CHECKSUM; + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + } else { + hw_dbg(hw, "EEPROM read failed\n"); + } + + return status; +} + +/** + * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum + * @hw: pointer to hardware structure + **/ +s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) +{ + s32 status; + u16 checksum; + + /* + * Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + + if (status == 0) { + checksum = hw->eeprom.ops.calc_checksum(hw); + status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, + checksum); + } else { + hw_dbg(hw, "EEPROM read failed\n"); + } + + return status; +} + +/** + * ixgbe_validate_mac_addr - Validate MAC address + * @mac_addr: pointer to MAC address. + * + * Tests a MAC address to ensure it is a valid Individual Address + **/ +s32 ixgbe_validate_mac_addr(u8 *mac_addr) +{ + s32 status = 0; + + /* Make sure it is not a multicast address */ + if (IXGBE_IS_MULTICAST(mac_addr)) + status = IXGBE_ERR_INVALID_MAC_ADDR; + /* Not a broadcast address */ + else if (IXGBE_IS_BROADCAST(mac_addr)) + status = IXGBE_ERR_INVALID_MAC_ADDR; + /* Reject the zero address */ + else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && + mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) + status = IXGBE_ERR_INVALID_MAC_ADDR; + + return status; +} + +/** + * ixgbe_set_rar_generic - Set Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: VMDq "set" or "pool" index + * @enable_addr: set flag that address is active + * + * Puts an ethernet address into a receive address register. + **/ +s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr) +{ + u32 rar_low, rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + hw_dbg(hw, "RAR index %d is out of range.\n", index); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + /* setup VMDq pool selection before this RAR gets enabled */ + hw->mac.ops.set_vmdq(hw, index, vmdq); + + /* + * HW expects these in little endian so we reverse the byte + * order from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | + ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | + ((u32)addr[3] << 24)); + /* + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); + rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); + rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); + + if (enable_addr != 0) + rar_high |= IXGBE_RAH_AV; + + IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); + + return 0; +} + +/** + * ixgbe_clear_rar_generic - Remove Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * + * Clears an ethernet address from a receive address register. + **/ +s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) +{ + u32 rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + hw_dbg(hw, "RAR index %d is out of range.\n", index); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + /* + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); + rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); + + IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); + + /* clear VMDq pool/queue selection for this RAR */ + hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); + + return 0; +} + +/** + * ixgbe_init_rx_addrs_generic - Initializes receive address filters. + * @hw: pointer to hardware structure + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + **/ +s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) +{ + u32 i; + u32 rar_entries = hw->mac.num_rar_entries; + + /* + * If the current mac address is valid, assume it is a software override + * to the permanent address. + * Otherwise, use the permanent address from the eeprom. + */ + if (ixgbe_validate_mac_addr(hw->mac.addr) == + IXGBE_ERR_INVALID_MAC_ADDR) { + /* Get the MAC address from the RAR0 for later reference */ + hw->mac.ops.get_mac_addr(hw, hw->mac.addr); + + hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr); + } else { + /* Setup the receive address. */ + hw_dbg(hw, "Overriding MAC Address in RAR[0]\n"); + hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); + + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); + + /* clear VMDq pool/queue selection for RAR 0 */ + hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); + } + hw->addr_ctrl.overflow_promisc = 0; + + hw->addr_ctrl.rar_used_count = 1; + + /* Zero out the other receive addresses. */ + hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1); + for (i = 1; i < rar_entries; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); + } + + /* Clear the MTA */ + hw->addr_ctrl.mta_in_use = 0; + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); + + hw_dbg(hw, " Clearing MTA\n"); + for (i = 0; i < hw->mac.mcft_size; i++) + IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); + + if (hw->mac.ops.init_uta_tables) + hw->mac.ops.init_uta_tables(hw); + + return 0; +} + +/** + * ixgbe_mta_vector - Determines bit-vector in multicast table to set + * @hw: pointer to hardware structure + * @mc_addr: the multicast address + * + * Extracts the 12 bits, from a multicast address, to determine which + * bit-vector to set in the multicast table. The hardware uses 12 bits, from + * incoming rx multicast addresses, to determine the bit-vector to check in + * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set + * by the MO field of the MCSTCTRL. The MO field is set during initialization + * to mc_filter_type. + **/ +static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) +{ + u32 vector = 0; + + switch (hw->mac.mc_filter_type) { + case 0: /* use bits [47:36] of the address */ + vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: /* use bits [46:35] of the address */ + vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: /* use bits [45:34] of the address */ + vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: /* use bits [43:32] of the address */ + vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + default: /* Invalid mc_filter_type */ + hw_dbg(hw, "MC filter type param set incorrectly\n"); + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + return vector; +} + +/** + * ixgbe_set_mta - Set bit-vector in multicast table + * @hw: pointer to hardware structure + * @hash_value: Multicast address hash value + * + * Sets the bit-vector in the multicast table. + **/ +static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) +{ + u32 vector; + u32 vector_bit; + u32 vector_reg; + + hw->addr_ctrl.mta_in_use++; + + vector = ixgbe_mta_vector(hw, mc_addr); + hw_dbg(hw, " bit-vector = 0x%03X\n", vector); + + /* + * The MTA is a register array of 128 32-bit registers. It is treated + * like an array of 4096 bits. We want to set bit + * BitArray[vector_value]. So we figure out what register the bit is + * in, read it, OR in the new bit, then write back the new value. The + * register is determined by the upper 7 bits of the vector value and + * the bit within that register are determined by the lower 5 bits of + * the value. + */ + vector_reg = (vector >> 5) & 0x7F; + vector_bit = vector & 0x1F; + hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); +} + +/** + * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses + * @hw: pointer to hardware structure + * @netdev: pointer to net device structure + * + * The given list replaces any existing list. Clears the MC addrs from receive + * address registers and the multicast table. Uses unused receive address + * registers for the first multicast addresses, and hashes the rest into the + * multicast table. + **/ +s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, + struct net_device *netdev) +{ + struct netdev_hw_addr *ha; + u32 i; + + /* + * Set the new number of MC addresses that we are being requested to + * use. + */ + hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev); + hw->addr_ctrl.mta_in_use = 0; + + /* Clear mta_shadow */ + hw_dbg(hw, " Clearing MTA\n"); + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* Update mta shadow */ + netdev_for_each_mc_addr(ha, netdev) { + hw_dbg(hw, " Adding the multicast addresses:\n"); + ixgbe_set_mta(hw, ha->addr); + } + + /* Enable mta */ + for (i = 0; i < hw->mac.mcft_size; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i, + hw->mac.mta_shadow[i]); + + if (hw->addr_ctrl.mta_in_use > 0) + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, + IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); + + hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n"); + return 0; +} + +/** + * ixgbe_enable_mc_generic - Enable multicast address in RAR + * @hw: pointer to hardware structure + * + * Enables multicast address in RAR and the use of the multicast hash table. + **/ +s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; + + if (a->mta_in_use > 0) + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | + hw->mac.mc_filter_type); + + return 0; +} + +/** + * ixgbe_disable_mc_generic - Disable multicast address in RAR + * @hw: pointer to hardware structure + * + * Disables multicast address in RAR and the use of the multicast hash table. + **/ +s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; + + if (a->mta_in_use > 0) + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); + + return 0; +} + +/** + * ixgbe_fc_enable_generic - Enable flow control + * @hw: pointer to hardware structure + * @packetbuf_num: packet buffer number (0-7) + * + * Enable flow control according to the current settings. + **/ +s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num) +{ + s32 ret_val = 0; + u32 mflcn_reg, fccfg_reg; + u32 reg; + u32 rx_pba_size; + u32 fcrtl, fcrth; + +#ifdef CONFIG_DCB + if (hw->fc.requested_mode == ixgbe_fc_pfc) + goto out; + +#endif /* CONFIG_DCB */ + /* Negotiate the fc mode to use */ + ret_val = ixgbe_fc_autoneg(hw); + if (ret_val == IXGBE_ERR_FLOW_CONTROL) + goto out; + + /* Disable any previous flow control settings */ + mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); + mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE); + + fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); + fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); + + /* + * The possible values of fc.current_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. +#ifdef CONFIG_DCB + * 4: Priority Flow Control is enabled. +#endif + * other: Invalid. + */ + switch (hw->fc.current_mode) { + case ixgbe_fc_none: + /* + * Flow control is disabled by software override or autoneg. + * The code below will actually disable it in the HW. + */ + break; + case ixgbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + mflcn_reg |= IXGBE_MFLCN_RFCE; + break; + case ixgbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; + break; + case ixgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + mflcn_reg |= IXGBE_MFLCN_RFCE; + fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; + break; +#ifdef CONFIG_DCB + case ixgbe_fc_pfc: + goto out; + break; +#endif /* CONFIG_DCB */ + default: + hw_dbg(hw, "Flow control param set incorrectly\n"); + ret_val = IXGBE_ERR_CONFIG; + goto out; + break; + } + + /* Set 802.3x based flow control settings. */ + mflcn_reg |= IXGBE_MFLCN_DPF; + IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); + IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); + + rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num)); + rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; + + fcrth = (rx_pba_size - hw->fc.high_water) << 10; + fcrtl = (rx_pba_size - hw->fc.low_water) << 10; + + if (hw->fc.current_mode & ixgbe_fc_tx_pause) { + fcrth |= IXGBE_FCRTH_FCEN; + if (hw->fc.send_xon) + fcrtl |= IXGBE_FCRTL_XONE; + } + + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth); + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl); + + /* Configure pause time (2 TCs per register) */ + reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2)); + if ((packetbuf_num & 1) == 0) + reg = (reg & 0xFFFF0000) | hw->fc.pause_time; + else + reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16); + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg); + + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); + +out: + return ret_val; +} + +/** + * ixgbe_fc_autoneg - Configure flow control + * @hw: pointer to hardware structure + * + * Compares our advertised flow control capabilities to those advertised by + * our link partner, and determines the proper flow control mode to use. + **/ +s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; + ixgbe_link_speed speed; + bool link_up; + + if (hw->fc.disable_fc_autoneg) + goto out; + + /* + * AN should have completed when the cable was plugged in. + * Look for reasons to bail out. Bail out if: + * - FC autoneg is disabled, or if + * - link is not up. + * + * Since we're being called from an LSC, link is already known to be up. + * So use link_up_wait_to_complete=false. + */ + hw->mac.ops.check_link(hw, &speed, &link_up, false); + if (!link_up) { + ret_val = IXGBE_ERR_FLOW_CONTROL; + goto out; + } + + switch (hw->phy.media_type) { + /* Autoneg flow control on fiber adapters */ + case ixgbe_media_type_fiber: + if (speed == IXGBE_LINK_SPEED_1GB_FULL) + ret_val = ixgbe_fc_autoneg_fiber(hw); + break; + + /* Autoneg flow control on backplane adapters */ + case ixgbe_media_type_backplane: + ret_val = ixgbe_fc_autoneg_backplane(hw); + break; + + /* Autoneg flow control on copper adapters */ + case ixgbe_media_type_copper: + if (ixgbe_device_supports_autoneg_fc(hw) == 0) + ret_val = ixgbe_fc_autoneg_copper(hw); + break; + + default: + break; + } + +out: + if (ret_val == 0) { + hw->fc.fc_was_autonegged = true; + } else { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; + } + return ret_val; +} + +/** + * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber + * @hw: pointer to hardware structure + * + * Enable flow control according on 1 gig fiber. + **/ +static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) +{ + u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; + s32 ret_val; + + /* + * On multispeed fiber at 1g, bail out if + * - link is up but AN did not complete, or if + * - link is up and AN completed but timed out + */ + + linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); + if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || + ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { + ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; + goto out; + } + + pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); + pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); + + ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg, + pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, + IXGBE_PCS1GANA_ASM_PAUSE, + IXGBE_PCS1GANA_SYM_PAUSE, + IXGBE_PCS1GANA_ASM_PAUSE); + +out: + return ret_val; +} + +/** + * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + **/ +static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) +{ + u32 links2, anlp1_reg, autoc_reg, links; + s32 ret_val; + + /* + * On backplane, bail out if + * - backplane autoneg was not completed, or if + * - we are 82599 and link partner is not AN enabled + */ + links = IXGBE_READ_REG(hw, IXGBE_LINKS); + if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; + ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; + goto out; + } + + if (hw->mac.type == ixgbe_mac_82599EB) { + links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); + if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; + ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; + goto out; + } + } + /* + * Read the 10g AN autoc and LP ability registers and resolve + * local flow control settings accordingly + */ + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); + + ret_val = ixgbe_negotiate_fc(hw, autoc_reg, + anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE, + IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE); + +out: + return ret_val; +} + +/** + * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + **/ +static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) +{ + u16 technology_ability_reg = 0; + u16 lp_technology_ability_reg = 0; + + hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, + &technology_ability_reg); + hw->phy.ops.read_reg(hw, MDIO_AN_LPA, + MDIO_MMD_AN, + &lp_technology_ability_reg); + + return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg, + (u32)lp_technology_ability_reg, + IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE, + IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE); +} + +/** + * ixgbe_negotiate_fc - Negotiate flow control + * @hw: pointer to hardware structure + * @adv_reg: flow control advertised settings + * @lp_reg: link partner's flow control settings + * @adv_sym: symmetric pause bit in advertisement + * @adv_asm: asymmetric pause bit in advertisement + * @lp_sym: symmetric pause bit in link partner advertisement + * @lp_asm: asymmetric pause bit in link partner advertisement + * + * Find the intersection between advertised settings and link partner's + * advertised settings + **/ +static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, + u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) +{ + if ((!(adv_reg)) || (!(lp_reg))) + return IXGBE_ERR_FC_NOT_NEGOTIATED; + + if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { + /* + * Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == ixgbe_fc_full) { + hw->fc.current_mode = ixgbe_fc_full; + hw_dbg(hw, "Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = ixgbe_fc_rx_pause; + hw_dbg(hw, "Flow Control=RX PAUSE frames only\n"); + } + } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && + (lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = ixgbe_fc_tx_pause; + hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n"); + } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && + !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = ixgbe_fc_rx_pause; + hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n"); + } else { + hw->fc.current_mode = ixgbe_fc_none; + hw_dbg(hw, "Flow Control = NONE.\n"); + } + return 0; +} + +/** + * ixgbe_setup_fc - Set up flow control + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + **/ +static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) +{ + s32 ret_val = 0; + u32 reg = 0, reg_bp = 0; + u16 reg_cu = 0; + +#ifdef CONFIG_DCB + if (hw->fc.requested_mode == ixgbe_fc_pfc) { + hw->fc.current_mode = hw->fc.requested_mode; + goto out; + } + +#endif /* CONFIG_DCB */ + /* Validate the packetbuf configuration */ + if (packetbuf_num < 0 || packetbuf_num > 7) { + hw_dbg(hw, "Invalid packet buffer number [%d], expected range " + "is 0-7\n", packetbuf_num); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* + * Validate the water mark configuration. Zero water marks are invalid + * because it causes the controller to just blast out fc packets. + */ + if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) { + hw_dbg(hw, "Invalid water mark configuration\n"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* + * Validate the requested mode. Strict IEEE mode does not allow + * ixgbe_fc_rx_pause because it will cause us to fail at UNH. + */ + if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { + hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict " + "IEEE mode\n"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* + * 10gig parts do not have a word in the EEPROM to determine the + * default flow control setting, so we explicitly set it to full. + */ + if (hw->fc.requested_mode == ixgbe_fc_default) + hw->fc.requested_mode = ixgbe_fc_full; + + /* + * Set up the 1G and 10G flow control advertisement registers so the + * HW will be able to do fc autoneg once the cable is plugged in. If + * we link at 10G, the 1G advertisement is harmless and vice versa. + */ + + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber: + case ixgbe_media_type_backplane: + reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); + reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC); + break; + + case ixgbe_media_type_copper: + hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, ®_cu); + break; + + default: + ; + } + + /* + * The possible values of fc.requested_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. +#ifdef CONFIG_DCB + * 4: Priority Flow Control is enabled. +#endif + * other: Invalid. + */ + switch (hw->fc.requested_mode) { + case ixgbe_fc_none: + /* Flow control completely disabled by software override. */ + reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); + if (hw->phy.media_type == ixgbe_media_type_backplane) + reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE | + IXGBE_AUTOC_ASM_PAUSE); + else if (hw->phy.media_type == ixgbe_media_type_copper) + reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); + break; + case ixgbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); + if (hw->phy.media_type == ixgbe_media_type_backplane) + reg_bp |= (IXGBE_AUTOC_SYM_PAUSE | + IXGBE_AUTOC_ASM_PAUSE); + else if (hw->phy.media_type == ixgbe_media_type_copper) + reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); + break; + case ixgbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + reg |= (IXGBE_PCS1GANA_ASM_PAUSE); + reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE); + if (hw->phy.media_type == ixgbe_media_type_backplane) { + reg_bp |= (IXGBE_AUTOC_ASM_PAUSE); + reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE); + } else if (hw->phy.media_type == ixgbe_media_type_copper) { + reg_cu |= (IXGBE_TAF_ASM_PAUSE); + reg_cu &= ~(IXGBE_TAF_SYM_PAUSE); + } + break; + case ixgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); + if (hw->phy.media_type == ixgbe_media_type_backplane) + reg_bp |= (IXGBE_AUTOC_SYM_PAUSE | + IXGBE_AUTOC_ASM_PAUSE); + else if (hw->phy.media_type == ixgbe_media_type_copper) + reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); + break; +#ifdef CONFIG_DCB + case ixgbe_fc_pfc: + goto out; + break; +#endif /* CONFIG_DCB */ + default: + hw_dbg(hw, "Flow control param set incorrectly\n"); + ret_val = IXGBE_ERR_CONFIG; + goto out; + break; + } + + if (hw->mac.type != ixgbe_mac_X540) { + /* + * Enable auto-negotiation between the MAC & PHY; + * the MAC will advertise clause 37 flow control. + */ + IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); + reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); + + /* Disable AN timeout */ + if (hw->fc.strict_ieee) + reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; + + IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); + hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); + } + + /* + * AUTOC restart handles negotiation of 1G and 10G on backplane + * and copper. There is no need to set the PCS1GCTL register. + * + */ + if (hw->phy.media_type == ixgbe_media_type_backplane) { + reg_bp |= IXGBE_AUTOC_AN_RESTART; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp); + } else if ((hw->phy.media_type == ixgbe_media_type_copper) && + (ixgbe_device_supports_autoneg_fc(hw) == 0)) { + hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, reg_cu); + } + + hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg); +out: + return ret_val; +} + +/** + * ixgbe_disable_pcie_master - Disable PCI-express master access + * @hw: pointer to hardware structure + * + * Disables PCI-Express master access and verifies there are no pending + * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable + * bit hasn't caused the master requests to be disabled, else 0 + * is returned signifying master requests disabled. + **/ +s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) +{ + struct ixgbe_adapter *adapter = hw->back; + u32 i; + u32 reg_val; + u32 number_of_queues; + s32 status = 0; + u16 dev_status = 0; + + /* Just jump out if bus mastering is already disabled */ + if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) + goto out; + + /* Disable the receive unit by stopping each queue */ + number_of_queues = hw->mac.max_rx_queues; + for (i = 0; i < number_of_queues; i++) { + reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + if (reg_val & IXGBE_RXDCTL_ENABLE) { + reg_val &= ~IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val); + } + } + + reg_val = IXGBE_READ_REG(hw, IXGBE_CTRL); + reg_val |= IXGBE_CTRL_GIO_DIS; + IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val); + + for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { + if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) + goto check_device_status; + udelay(100); + } + + hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n"); + status = IXGBE_ERR_MASTER_REQUESTS_PENDING; + + /* + * Before proceeding, make sure that the PCIe block does not have + * transactions pending. + */ +check_device_status: + for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { + pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_STATUS, + &dev_status); + if (!(dev_status & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) + break; + udelay(100); + } + + if (i == IXGBE_PCI_MASTER_DISABLE_TIMEOUT) + hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n"); + else + goto out; + + /* + * Two consecutive resets are required via CTRL.RST per datasheet + * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine + * of this need. The first reset prevents new master requests from + * being issued by our device. We then must wait 1usec for any + * remaining completions from the PCIe bus to trickle in, and then reset + * again to clear out any effects they may have had on our device. + */ + hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + +out: + return status; +} + + +/** + * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore through the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) +{ + u32 gssr; + u32 swmask = mask; + u32 fwmask = mask << 5; + s32 timeout = 200; + + while (timeout) { + /* + * SW EEPROM semaphore bit is used for access to all + * SW_FW_SYNC/GSSR bits (not just EEPROM) + */ + if (ixgbe_get_eeprom_semaphore(hw)) + return IXGBE_ERR_SWFW_SYNC; + + gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); + if (!(gssr & (fwmask | swmask))) + break; + + /* + * Firmware currently using resource (fwmask) or other software + * thread currently using resource (swmask) + */ + ixgbe_release_eeprom_semaphore(hw); + usleep_range(5000, 10000); + timeout--; + } + + if (!timeout) { + hw_dbg(hw, "Driver can't access resource, SW_FW_SYNC timeout.\n"); + return IXGBE_ERR_SWFW_SYNC; + } + + gssr |= swmask; + IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); + + ixgbe_release_eeprom_semaphore(hw); + return 0; +} + +/** + * ixgbe_release_swfw_sync - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore through the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) +{ + u32 gssr; + u32 swmask = mask; + + ixgbe_get_eeprom_semaphore(hw); + + gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); + gssr &= ~swmask; + IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); + + ixgbe_release_eeprom_semaphore(hw); +} + +/** + * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit + * @hw: pointer to hardware structure + * @regval: register value to write to RXCTRL + * + * Enables the Rx DMA unit + **/ +s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) +{ + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); + + return 0; +} + +/** + * ixgbe_blink_led_start_generic - Blink LED based on index. + * @hw: pointer to hardware structure + * @index: led number to blink + **/ +s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) +{ + ixgbe_link_speed speed = 0; + bool link_up = 0; + u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + + /* + * Link must be up to auto-blink the LEDs; + * Force it if link is down. + */ + hw->mac.ops.check_link(hw, &speed, &link_up, false); + + if (!link_up) { + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + autoc_reg |= IXGBE_AUTOC_FLU; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + IXGBE_WRITE_FLUSH(hw); + usleep_range(10000, 20000); + } + + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg |= IXGBE_LED_BLINK(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * ixgbe_blink_led_stop_generic - Stop blinking LED based on index. + * @hw: pointer to hardware structure + * @index: led number to stop blinking + **/ +s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) +{ + u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + + autoc_reg &= ~IXGBE_AUTOC_FLU; + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg &= ~IXGBE_LED_BLINK(index); + led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM + * @hw: pointer to hardware structure + * @san_mac_offset: SAN MAC address offset + * + * This function will read the EEPROM location for the SAN MAC address + * pointer, and returns the value at that location. This is used in both + * get and set mac_addr routines. + **/ +static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, + u16 *san_mac_offset) +{ + /* + * First read the EEPROM pointer to see if the MAC addresses are + * available. + */ + hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset); + + return 0; +} + +/** + * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM + * @hw: pointer to hardware structure + * @san_mac_addr: SAN MAC address + * + * Reads the SAN MAC address from the EEPROM, if it's available. This is + * per-port, so set_lan_id() must be called before reading the addresses. + * set_lan_id() is called by identify_sfp(), but this cannot be relied + * upon for non-SFP connections, so we must call it here. + **/ +s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) +{ + u16 san_mac_data, san_mac_offset; + u8 i; + + /* + * First read the EEPROM pointer to see if the MAC addresses are + * available. If they're not, no point in calling set_lan_id() here. + */ + ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); + + if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) { + /* + * No addresses available in this EEPROM. It's not an + * error though, so just wipe the local address and return. + */ + for (i = 0; i < 6; i++) + san_mac_addr[i] = 0xFF; + + goto san_mac_addr_out; + } + + /* make sure we know which port we need to program */ + hw->mac.ops.set_lan_id(hw); + /* apply the port offset to the address offset */ + (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : + (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); + for (i = 0; i < 3; i++) { + hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data); + san_mac_addr[i * 2] = (u8)(san_mac_data); + san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); + san_mac_offset++; + } + +san_mac_addr_out: + return 0; +} + +/** + * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count + * @hw: pointer to hardware structure + * + * Read PCIe configuration space, and get the MSI-X vector count from + * the capabilities table. + **/ +u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_adapter *adapter = hw->back; + u16 msix_count; + pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82599_CAPS, + &msix_count); + msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; + + /* MSI-X count is zero-based in HW, so increment to give proper value */ + msix_count++; + + return msix_count; +} + +/** + * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to disassociate + * @vmdq: VMDq pool index to remove from the rar + **/ +s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 mpsar_lo, mpsar_hi; + u32 rar_entries = hw->mac.num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + hw_dbg(hw, "RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); + mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); + + if (!mpsar_lo && !mpsar_hi) + goto done; + + if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { + if (mpsar_lo) { + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); + mpsar_lo = 0; + } + if (mpsar_hi) { + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); + mpsar_hi = 0; + } + } else if (vmdq < 32) { + mpsar_lo &= ~(1 << vmdq); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); + } else { + mpsar_hi &= ~(1 << (vmdq - 32)); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); + } + + /* was that the last pool using this rar? */ + if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) + hw->mac.ops.clear_rar(hw, rar); +done: + return 0; +} + +/** + * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq pool index + **/ +s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 mpsar; + u32 rar_entries = hw->mac.num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + hw_dbg(hw, "RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + if (vmdq < 32) { + mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); + mpsar |= 1 << vmdq; + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); + } else { + mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); + mpsar |= 1 << (vmdq - 32); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); + } + return 0; +} + +/** + * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array + * @hw: pointer to hardware structure + **/ +s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) +{ + int i; + + for (i = 0; i < 128; i++) + IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); + + return 0; +} + +/** + * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * + * return the VLVF index where this VLAN id should be placed + * + **/ +static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan) +{ + u32 bits = 0; + u32 first_empty_slot = 0; + s32 regindex; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* + * Search for the vlan id in the VLVF entries. Save off the first empty + * slot found along the way + */ + for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) { + bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); + if (!bits && !(first_empty_slot)) + first_empty_slot = regindex; + else if ((bits & 0x0FFF) == vlan) + break; + } + + /* + * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan + * in the VLVF. Else use the first empty VLVF register for this + * vlan id. + */ + if (regindex >= IXGBE_VLVF_ENTRIES) { + if (first_empty_slot) + regindex = first_empty_slot; + else { + hw_dbg(hw, "No space in VLVF.\n"); + regindex = IXGBE_ERR_NO_SPACE; + } + } + + return regindex; +} + +/** + * ixgbe_set_vfta_generic - Set VLAN filter table + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFVFB + * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on) +{ + s32 regindex; + u32 bitindex; + u32 vfta; + u32 bits; + u32 vt; + u32 targetbit; + bool vfta_changed = false; + + if (vlan > 4095) + return IXGBE_ERR_PARAM; + + /* + * this is a 2 part operation - first the VFTA, then the + * VLVF and VLVFB if VT Mode is set + * We don't write the VFTA until we know the VLVF part succeeded. + */ + + /* Part 1 + * The VFTA is a bitstring made up of 128 32-bit registers + * that enable the particular VLAN id, much like the MTA: + * bits[11-5]: which register + * bits[4-0]: which bit in the register + */ + regindex = (vlan >> 5) & 0x7F; + bitindex = vlan & 0x1F; + targetbit = (1 << bitindex); + vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); + + if (vlan_on) { + if (!(vfta & targetbit)) { + vfta |= targetbit; + vfta_changed = true; + } + } else { + if ((vfta & targetbit)) { + vfta &= ~targetbit; + vfta_changed = true; + } + } + + /* Part 2 + * If VT Mode is set + * Either vlan_on + * make sure the vlan is in VLVF + * set the vind bit in the matching VLVFB + * Or !vlan_on + * clear the pool bit and possibly the vind + */ + vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL); + if (vt & IXGBE_VT_CTL_VT_ENABLE) { + s32 vlvf_index; + + vlvf_index = ixgbe_find_vlvf_slot(hw, vlan); + if (vlvf_index < 0) + return vlvf_index; + + if (vlan_on) { + /* set the pool bit */ + if (vind < 32) { + bits = IXGBE_READ_REG(hw, + IXGBE_VLVFB(vlvf_index*2)); + bits |= (1 << vind); + IXGBE_WRITE_REG(hw, + IXGBE_VLVFB(vlvf_index*2), + bits); + } else { + bits = IXGBE_READ_REG(hw, + IXGBE_VLVFB((vlvf_index*2)+1)); + bits |= (1 << (vind-32)); + IXGBE_WRITE_REG(hw, + IXGBE_VLVFB((vlvf_index*2)+1), + bits); + } + } else { + /* clear the pool bit */ + if (vind < 32) { + bits = IXGBE_READ_REG(hw, + IXGBE_VLVFB(vlvf_index*2)); + bits &= ~(1 << vind); + IXGBE_WRITE_REG(hw, + IXGBE_VLVFB(vlvf_index*2), + bits); + bits |= IXGBE_READ_REG(hw, + IXGBE_VLVFB((vlvf_index*2)+1)); + } else { + bits = IXGBE_READ_REG(hw, + IXGBE_VLVFB((vlvf_index*2)+1)); + bits &= ~(1 << (vind-32)); + IXGBE_WRITE_REG(hw, + IXGBE_VLVFB((vlvf_index*2)+1), + bits); + bits |= IXGBE_READ_REG(hw, + IXGBE_VLVFB(vlvf_index*2)); + } + } + + /* + * If there are still bits set in the VLVFB registers + * for the VLAN ID indicated we need to see if the + * caller is requesting that we clear the VFTA entry bit. + * If the caller has requested that we clear the VFTA + * entry bit but there are still pools/VFs using this VLAN + * ID entry then ignore the request. We're not worried + * about the case where we're turning the VFTA VLAN ID + * entry bit on, only when requested to turn it off as + * there may be multiple pools and/or VFs using the + * VLAN ID entry. In that case we cannot clear the + * VFTA bit until all pools/VFs using that VLAN ID have also + * been cleared. This will be indicated by "bits" being + * zero. + */ + if (bits) { + IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), + (IXGBE_VLVF_VIEN | vlan)); + if (!vlan_on) { + /* someone wants to clear the vfta entry + * but some pools/VFs are still using it. + * Ignore it. */ + vfta_changed = false; + } + } + else + IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); + } + + if (vfta_changed) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta); + + return 0; +} + +/** + * ixgbe_clear_vfta_generic - Clear VLAN filter table + * @hw: pointer to hardware structure + * + * Clears the VLAN filer table, and the VMDq index associated with the filter + **/ +s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) +{ + u32 offset; + + for (offset = 0; offset < hw->mac.vft_size; offset++) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); + + for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { + IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0); + IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0); + } + + return 0; +} + +/** + * ixgbe_check_mac_link_generic - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Reads the links register to determine if link is up and the current speed + **/ +s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete) +{ + u32 links_reg, links_orig; + u32 i; + + /* clear the old state */ + links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS); + + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + + if (links_orig != links_reg) { + hw_dbg(hw, "LINKS changed from %08X to %08X\n", + links_orig, links_reg); + } + + if (link_up_wait_to_complete) { + for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { + if (links_reg & IXGBE_LINKS_UP) { + *link_up = true; + break; + } else { + *link_up = false; + } + msleep(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + } + } else { + if (links_reg & IXGBE_LINKS_UP) + *link_up = true; + else + *link_up = false; + } + + if ((links_reg & IXGBE_LINKS_SPEED_82599) == + IXGBE_LINKS_SPEED_10G_82599) + *speed = IXGBE_LINK_SPEED_10GB_FULL; + else if ((links_reg & IXGBE_LINKS_SPEED_82599) == + IXGBE_LINKS_SPEED_1G_82599) + *speed = IXGBE_LINK_SPEED_1GB_FULL; + else if ((links_reg & IXGBE_LINKS_SPEED_82599) == + IXGBE_LINKS_SPEED_100_82599) + *speed = IXGBE_LINK_SPEED_100_FULL; + else + *speed = IXGBE_LINK_SPEED_UNKNOWN; + + /* if link is down, zero out the current_mode */ + if (*link_up == false) { + hw->fc.current_mode = ixgbe_fc_none; + hw->fc.fc_was_autonegged = false; + } + + return 0; +} + +/** + * ixgbe_get_wwn_prefix_generic Get alternative WWNN/WWPN prefix from + * the EEPROM + * @hw: pointer to hardware structure + * @wwnn_prefix: the alternative WWNN prefix + * @wwpn_prefix: the alternative WWPN prefix + * + * This function will read the EEPROM from the alternative SAN MAC address + * block to check the support for the alternative WWNN/WWPN prefix support. + **/ +s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix) +{ + u16 offset, caps; + u16 alt_san_mac_blk_offset; + + /* clear output first */ + *wwnn_prefix = 0xFFFF; + *wwpn_prefix = 0xFFFF; + + /* check if alternative SAN MAC is supported */ + hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR, + &alt_san_mac_blk_offset); + + if ((alt_san_mac_blk_offset == 0) || + (alt_san_mac_blk_offset == 0xFFFF)) + goto wwn_prefix_out; + + /* check capability in alternative san mac address block */ + offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; + hw->eeprom.ops.read(hw, offset, &caps); + if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) + goto wwn_prefix_out; + + /* get the corresponding prefix for WWNN/WWPN */ + offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; + hw->eeprom.ops.read(hw, offset, wwnn_prefix); + + offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; + hw->eeprom.ops.read(hw, offset, wwpn_prefix); + +wwn_prefix_out: + return 0; +} + +/** + * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow + * control + * @hw: pointer to hardware structure + * + * There are several phys that do not support autoneg flow control. This + * function check the device id to see if the associated phy supports + * autoneg flow control. + **/ +static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) +{ + + switch (hw->device_id) { + case IXGBE_DEV_ID_X540T: + return 0; + case IXGBE_DEV_ID_82599_T3_LOM: + return 0; + default: + return IXGBE_ERR_FC_NOT_SUPPORTED; + } +} + +/** + * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for anti-spoofing + * @pf: Physical Function pool - do not enable anti-spoofing for the PF + * + **/ +void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf) +{ + int j; + int pf_target_reg = pf >> 3; + int pf_target_shift = pf % 8; + u32 pfvfspoof = 0; + + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + if (enable) + pfvfspoof = IXGBE_SPOOF_MACAS_MASK; + + /* + * PFVFSPOOF register array is size 8 with 8 bits assigned to + * MAC anti-spoof enables in each register array element. + */ + for (j = 0; j < IXGBE_PFVFSPOOF_REG_COUNT; j++) + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); + + /* If not enabling anti-spoofing then done */ + if (!enable) + return; + + /* + * The PF should be allowed to spoof so that it can support + * emulation mode NICs. Reset the bit assigned to the PF + */ + pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg)); + pfvfspoof ^= (1 << pf_target_shift); + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg), pfvfspoof); +} + +/** + * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for VLAN anti-spoofing + * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing + * + **/ +void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) +{ + int vf_target_reg = vf >> 3; + int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT; + u32 pfvfspoof; + + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); + if (enable) + pfvfspoof |= (1 << vf_target_shift); + else + pfvfspoof &= ~(1 << vf_target_shift); + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); +} + +/** + * ixgbe_get_device_caps_generic - Get additional device capabilities + * @hw: pointer to hardware structure + * @device_caps: the EEPROM word with the extra device capabilities + * + * This function will read the EEPROM location for the device capabilities, + * and return the word through device_caps. + **/ +s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps) +{ + hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); + + return 0; +} + +/** + * ixgbe_set_rxpba_generic - Initialize RX packet buffer + * @hw: pointer to hardware structure + * @num_pb: number of packet buffers to allocate + * @headroom: reserve n KB of headroom + * @strategy: packet buffer allocation strategy + **/ +void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, + int num_pb, + u32 headroom, + int strategy) +{ + u32 pbsize = hw->mac.rx_pb_size; + int i = 0; + u32 rxpktsize, txpktsize, txpbthresh; + + /* Reserve headroom */ + pbsize -= headroom; + + if (!num_pb) + num_pb = 1; + + /* Divide remaining packet buffer space amongst the number + * of packet buffers requested using supplied strategy. + */ + switch (strategy) { + case (PBA_STRATEGY_WEIGHTED): + /* pba_80_48 strategy weight first half of packet buffer with + * 5/8 of the packet buffer space. + */ + rxpktsize = ((pbsize * 5 * 2) / (num_pb * 8)); + pbsize -= rxpktsize * (num_pb / 2); + rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; + for (; i < (num_pb / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); + /* Fall through to configure remaining packet buffers */ + case (PBA_STRATEGY_EQUAL): + /* Divide the remaining Rx packet buffer evenly among the TCs */ + rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; + for (; i < num_pb; i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); + break; + default: + break; + } + + /* + * Setup Tx packet buffer and threshold equally for all TCs + * TXPBTHRESH register is set in K so divide by 1024 and subtract + * 10 since the largest packet we support is just over 9K. + */ + txpktsize = IXGBE_TXPBSIZE_MAX / num_pb; + txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX; + for (i = 0; i < num_pb; i++) { + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); + IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); + } + + /* Clear unused TCs, if any, to zero buffer size*/ + for (; i < IXGBE_MAX_PB; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); + } +} + +/** + * ixgbe_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) +{ + u32 i; + u8 sum = 0; + + if (!buffer) + return 0; + + for (i = 0; i < length; i++) + sum += buffer[i]; + + return (u8) (0 - sum); +} + +/** + * ixgbe_host_interface_command - Issue command to manageability block + * @hw: pointer to the HW structure + * @buffer: contains the command to write and where the return status will + * be placed + * @lenght: lenght of buffer, must be multiple of 4 bytes + * + * Communicates with the manageability block. On success return 0 + * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. + **/ +static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u8 *buffer, + u32 length) +{ + u32 hicr, i; + u32 hdr_size = sizeof(struct ixgbe_hic_hdr); + u8 buf_len, dword_len; + + s32 ret_val = 0; + + if (length == 0 || length & 0x3 || + length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { + hw_dbg(hw, "Buffer length failure.\n"); + ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; + goto out; + } + + /* Check that the host interface is enabled. */ + hicr = IXGBE_READ_REG(hw, IXGBE_HICR); + if ((hicr & IXGBE_HICR_EN) == 0) { + hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n"); + ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; + goto out; + } + + /* Calculate length in DWORDs */ + dword_len = length >> 2; + + /* + * The device driver writes the relevant command block + * into the ram area. + */ + for (i = 0; i < dword_len; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, + i, *((u32 *)buffer + i)); + + /* Setting this bit tells the ARC that a new command is pending. */ + IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); + + for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) { + hicr = IXGBE_READ_REG(hw, IXGBE_HICR); + if (!(hicr & IXGBE_HICR_C)) + break; + usleep_range(1000, 2000); + } + + /* Check command successful completion. */ + if (i == IXGBE_HI_COMMAND_TIMEOUT || + (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) { + hw_dbg(hw, "Command has failed with no status valid.\n"); + ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; + goto out; + } + + /* Calculate length in DWORDs */ + dword_len = hdr_size >> 2; + + /* first pull in the header so we know the buffer length */ + for (i = 0; i < dword_len; i++) + *((u32 *)buffer + i) = + IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, i); + + /* If there is any thing in data position pull it in */ + buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len; + if (buf_len == 0) + goto out; + + if (length < (buf_len + hdr_size)) { + hw_dbg(hw, "Buffer not large enough for reply message.\n"); + ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; + goto out; + } + + /* Calculate length in DWORDs, add one for odd lengths */ + dword_len = (buf_len + 1) >> 2; + + /* Pull in the rest of the buffer (i is where we left off)*/ + for (; i < buf_len; i++) + *((u32 *)buffer + i) = + IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, i); + +out: + return ret_val; +} + +/** + * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware + * @hw: pointer to the HW structure + * @maj: driver version major number + * @min: driver version minor number + * @build: driver version build number + * @sub: driver version sub build number + * + * Sends driver version number to firmware through the manageability + * block. On success return 0 + * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring + * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + **/ +s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 sub) +{ + struct ixgbe_hic_drv_info fw_cmd; + int i; + s32 ret_val = 0; + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM) != 0) { + ret_val = IXGBE_ERR_SWFW_SYNC; + goto out; + } + + fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; + fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; + fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + fw_cmd.port_num = (u8)hw->bus.func; + fw_cmd.ver_maj = maj; + fw_cmd.ver_min = min; + fw_cmd.ver_build = build; + fw_cmd.ver_sub = sub; + fw_cmd.hdr.checksum = 0; + fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, + (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); + fw_cmd.pad = 0; + fw_cmd.pad2 = 0; + + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + ret_val = ixgbe_host_interface_command(hw, (u8 *)&fw_cmd, + sizeof(fw_cmd)); + if (ret_val != 0) + continue; + + if (fw_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) + ret_val = 0; + else + ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; + + break; + } + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); +out: + return ret_val; +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h new file mode 100644 index 0000000..f24fd64 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -0,0 +1,145 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_COMMON_H_ +#define _IXGBE_COMMON_H_ + +#include "ixgbe_type.h" +#include "ixgbe.h" + +u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw); +s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); +s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw); +s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); +s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, + u32 pba_num_size); +s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr); +s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw); +void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw); +s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw); + +s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index); + +s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw); +s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data); +s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); +s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data); +s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 *data); +s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); +s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, + u16 *checksum_val); +s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); + +s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr); +s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); +s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, + struct net_device *netdev); +s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); +s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); +s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); +s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packtetbuf_num); +s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw); + +s32 ixgbe_validate_mac_addr(u8 *mac_addr); +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask); +s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); +s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); +s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw); +s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, + u32 vind, bool vlan_on); +s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw); +s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete); +s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix); +s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); +void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf); +void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); +s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); +s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 ver); + +void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, + u32 headroom, int strategy); + +#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) + +#ifndef writeq +#define writeq(val, addr) writel((u32) (val), addr); \ + writel((u32) (val >> 32), (addr + 4)); +#endif + +#define IXGBE_WRITE_REG64(a, reg, value) writeq((value), ((a)->hw_addr + (reg))) + +#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg)) + +#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) (\ + writel((value), ((a)->hw_addr + (reg) + ((offset) << 2)))) + +#define IXGBE_READ_REG_ARRAY(a, reg, offset) (\ + readl((a)->hw_addr + (reg) + ((offset) << 2))) + +#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) + +#define hw_dbg(hw, format, arg...) \ + netdev_dbg(((struct ixgbe_adapter *)(hw->back))->netdev, format, ##arg) +#define e_dev_info(format, arg...) \ + dev_info(&adapter->pdev->dev, format, ## arg) +#define e_dev_warn(format, arg...) \ + dev_warn(&adapter->pdev->dev, format, ## arg) +#define e_dev_err(format, arg...) \ + dev_err(&adapter->pdev->dev, format, ## arg) +#define e_dev_notice(format, arg...) \ + dev_notice(&adapter->pdev->dev, format, ## arg) +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_crit(msglvl, format, arg...) \ + netif_crit(adapter, msglvl, adapter->netdev, format, ## arg) +#endif /* IXGBE_COMMON */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c new file mode 100644 index 0000000..9d88c31 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c @@ -0,0 +1,320 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + + +#include "ixgbe.h" +#include "ixgbe_type.h" +#include "ixgbe_dcb.h" +#include "ixgbe_dcb_82598.h" +#include "ixgbe_dcb_82599.h" + +/** + * ixgbe_ieee_credits - This calculates the ieee traffic class + * credits from the configured bandwidth percentages. Credits + * are the smallest unit programmable into the underlying + * hardware. The IEEE 802.1Qaz specification do not use bandwidth + * groups so this is much simplified from the CEE case. + */ +s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame) +{ + int min_percent = 100; + int min_credit, multiplier; + int i; + + min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) / + DCB_CREDIT_QUANTUM; + + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + if (bw[i] < min_percent && bw[i]) + min_percent = bw[i]; + } + + multiplier = (min_credit / min_percent) + 1; + + /* Find out the hw credits for each TC */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + int val = min(bw[i] * multiplier, MAX_CREDIT_REFILL); + + if (val < min_credit) + val = min_credit; + refill[i] = val; + + max[i] = bw[i] ? (bw[i] * MAX_CREDIT)/100 : min_credit; + } + return 0; +} + +/** + * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits + * @ixgbe_dcb_config: Struct containing DCB settings. + * @direction: Configuring either Tx or Rx. + * + * This function calculates the credits allocated to each traffic class. + * It should be called only after the rules are checked by + * ixgbe_dcb_check_config(). + */ +s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config, + int max_frame, u8 direction) +{ + struct tc_bw_alloc *p; + int min_credit; + int min_multiplier; + int min_percent = 100; + s32 ret_val = 0; + /* Initialization values default for Tx settings */ + u32 credit_refill = 0; + u32 credit_max = 0; + u16 link_percentage = 0; + u8 bw_percent = 0; + u8 i; + + if (dcb_config == NULL) { + ret_val = DCB_ERR_CONFIG; + goto out; + } + + min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) / + DCB_CREDIT_QUANTUM; + + /* Find smallest link percentage */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + p = &dcb_config->tc_config[i].path[direction]; + bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; + link_percentage = p->bwg_percent; + + link_percentage = (link_percentage * bw_percent) / 100; + + if (link_percentage && link_percentage < min_percent) + min_percent = link_percentage; + } + + /* + * The ratio between traffic classes will control the bandwidth + * percentages seen on the wire. To calculate this ratio we use + * a multiplier. It is required that the refill credits must be + * larger than the max frame size so here we find the smallest + * multiplier that will allow all bandwidth percentages to be + * greater than the max frame size. + */ + min_multiplier = (min_credit / min_percent) + 1; + + /* Find out the link percentage for each TC first */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + p = &dcb_config->tc_config[i].path[direction]; + bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; + + link_percentage = p->bwg_percent; + /* Must be careful of integer division for very small nums */ + link_percentage = (link_percentage * bw_percent) / 100; + if (p->bwg_percent > 0 && link_percentage == 0) + link_percentage = 1; + + /* Save link_percentage for reference */ + p->link_percent = (u8)link_percentage; + + /* Calculate credit refill ratio using multiplier */ + credit_refill = min(link_percentage * min_multiplier, + MAX_CREDIT_REFILL); + p->data_credits_refill = (u16)credit_refill; + + /* Calculate maximum credit for the TC */ + credit_max = (link_percentage * MAX_CREDIT) / 100; + + /* + * Adjustment based on rule checking, if the percentage + * of a TC is too small, the maximum credit may not be + * enough to send out a jumbo frame in data plane arbitration. + */ + if (credit_max && (credit_max < min_credit)) + credit_max = min_credit; + + if (direction == DCB_TX_CONFIG) { + /* + * Adjustment based on rule checking, if the + * percentage of a TC is too small, the maximum + * credit may not be enough to send out a TSO + * packet in descriptor plane arbitration. + */ + if ((hw->mac.type == ixgbe_mac_82598EB) && + credit_max && + (credit_max < MINIMUM_CREDIT_FOR_TSO)) + credit_max = MINIMUM_CREDIT_FOR_TSO; + + dcb_config->tc_config[i].desc_credits_max = + (u16)credit_max; + } + + p->data_credits_max = (u16)credit_max; + } + +out: + return ret_val; +} + +void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en) +{ + int i; + + *pfc_en = 0; + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) + *pfc_en |= (cfg->tc_config[i].dcb_pfc & 0xF) << i; +} + +void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction, + u16 *refill) +{ + struct tc_bw_alloc *p; + int i; + + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + p = &cfg->tc_config[i].path[direction]; + refill[i] = p->data_credits_refill; + } +} + +void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max) +{ + int i; + + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) + max[i] = cfg->tc_config[i].desc_credits_max; +} + +void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction, + u8 *bwgid) +{ + struct tc_bw_alloc *p; + int i; + + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + p = &cfg->tc_config[i].path[direction]; + bwgid[i] = p->bwg_id; + } +} + +void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction, + u8 *ptype) +{ + struct tc_bw_alloc *p; + int i; + + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + p = &cfg->tc_config[i].path[direction]; + ptype[i] = p->prio_type; + } +} + +/** + * ixgbe_dcb_hw_config - Config and enable DCB + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure dcb settings and enable dcb mode. + */ +s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) +{ + s32 ret = 0; + u8 pfc_en; + u8 ptype[MAX_TRAFFIC_CLASS]; + u8 bwgid[MAX_TRAFFIC_CLASS]; + u16 refill[MAX_TRAFFIC_CLASS]; + u16 max[MAX_TRAFFIC_CLASS]; + /* CEE does not define a priority to tc mapping so map 1:1 */ + u8 prio_tc[MAX_TRAFFIC_CLASS] = {0, 1, 2, 3, 4, 5, 6, 7}; + + /* Unpack CEE standard containers */ + ixgbe_dcb_unpack_pfc(dcb_config, &pfc_en); + ixgbe_dcb_unpack_refill(dcb_config, DCB_TX_CONFIG, refill); + ixgbe_dcb_unpack_max(dcb_config, max); + ixgbe_dcb_unpack_bwgid(dcb_config, DCB_TX_CONFIG, bwgid); + ixgbe_dcb_unpack_prio(dcb_config, DCB_TX_CONFIG, ptype); + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_hw_config_82598(hw, pfc_en, refill, max, + bwgid, ptype); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + ret = ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max, + bwgid, ptype, prio_tc); + break; + default: + break; + } + return ret; +} + +/* Helper routines to abstract HW specifics from DCB netlink ops */ +s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en) +{ + int ret = -EINVAL; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en); + break; + default: + break; + } + return ret; +} + +s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, + u16 *refill, u16 *max, u8 *bwg_id, + u8 *prio_type, u8 *prio_tc) +{ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, + prio_type); + ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, + bwg_id, prio_type); + ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, + bwg_id, prio_type); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, + bwg_id, prio_type, prio_tc); + ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, + bwg_id, prio_type); + ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, + prio_type, prio_tc); + break; + default: + break; + } + return 0; +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h new file mode 100644 index 0000000..e85826a --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h @@ -0,0 +1,167 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _DCB_CONFIG_H_ +#define _DCB_CONFIG_H_ + +#include "ixgbe_type.h" + +/* DCB data structures */ + +#define IXGBE_MAX_PACKET_BUFFERS 8 +#define MAX_USER_PRIORITY 8 +#define MAX_TRAFFIC_CLASS 8 +#define MAX_BW_GROUP 8 +#define BW_PERCENT 100 + +#define DCB_TX_CONFIG 0 +#define DCB_RX_CONFIG 1 + +/* DCB error Codes */ +#define DCB_SUCCESS 0 +#define DCB_ERR_CONFIG -1 +#define DCB_ERR_PARAM -2 + +/* Transmit and receive Errors */ +/* Error in bandwidth group allocation */ +#define DCB_ERR_BW_GROUP -3 +/* Error in traffic class bandwidth allocation */ +#define DCB_ERR_TC_BW -4 +/* Traffic class has both link strict and group strict enabled */ +#define DCB_ERR_LS_GS -5 +/* Link strict traffic class has non zero bandwidth */ +#define DCB_ERR_LS_BW_NONZERO -6 +/* Link strict bandwidth group has non zero bandwidth */ +#define DCB_ERR_LS_BWG_NONZERO -7 +/* Traffic class has zero bandwidth */ +#define DCB_ERR_TC_BW_ZERO -8 + +#define DCB_NOT_IMPLEMENTED 0x7FFFFFFF + +struct dcb_pfc_tc_debug { + u8 tc; + u8 pause_status; + u64 pause_quanta; +}; + +enum strict_prio_type { + prio_none = 0, + prio_group, + prio_link +}; + +/* DCB capability definitions */ +#define IXGBE_DCB_PG_SUPPORT 0x00000001 +#define IXGBE_DCB_PFC_SUPPORT 0x00000002 +#define IXGBE_DCB_BCN_SUPPORT 0x00000004 +#define IXGBE_DCB_UP2TC_SUPPORT 0x00000008 +#define IXGBE_DCB_GSP_SUPPORT 0x00000010 + +#define IXGBE_DCB_8_TC_SUPPORT 0x80 + +struct dcb_support { + /* DCB capabilities */ + u32 capabilities; + + /* Each bit represents a number of TCs configurable in the hw. + * If 8 traffic classes can be configured, the value is 0x80. + */ + u8 traffic_classes; + u8 pfc_traffic_classes; +}; + +/* Traffic class bandwidth allocation per direction */ +struct tc_bw_alloc { + u8 bwg_id; /* Bandwidth Group (BWG) ID */ + u8 bwg_percent; /* % of BWG's bandwidth */ + u8 link_percent; /* % of link bandwidth */ + u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */ + u16 data_credits_refill; /* Credit refill amount in 64B granularity */ + u16 data_credits_max; /* Max credits for a configured packet buffer + * in 64B granularity.*/ + enum strict_prio_type prio_type; /* Link or Group Strict Priority */ +}; + +enum dcb_pfc_type { + pfc_disabled = 0, + pfc_enabled_full, + pfc_enabled_tx, + pfc_enabled_rx +}; + +/* Traffic class configuration */ +struct tc_configuration { + struct tc_bw_alloc path[2]; /* One each for Tx/Rx */ + enum dcb_pfc_type dcb_pfc; /* Class based flow control setting */ + + u16 desc_credits_max; /* For Tx Descriptor arbitration */ + u8 tc; /* Traffic class (TC) */ +}; + +struct dcb_num_tcs { + u8 pg_tcs; + u8 pfc_tcs; +}; + +struct ixgbe_dcb_config { + struct dcb_support support; + struct dcb_num_tcs num_tcs; + struct tc_configuration tc_config[MAX_TRAFFIC_CLASS]; + u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */ + bool pfc_mode_enable; + + u32 dcb_cfg_version; /* Not used...OS-specific? */ + u32 link_speed; /* For bandwidth allocation validation purpose */ +}; + +/* DCB driver APIs */ +void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en); +void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *, int, u16 *); +void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *); +void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *); +void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *); + +/* DCB credits calculation */ +s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame); +s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *, + struct ixgbe_dcb_config *, int, u8); + +/* DCB hw initialization */ +s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max, + u8 *bwg_id, u8 *prio_type, u8 *tc_prio); +s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en); +s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); + +/* DCB definitions for credit calculation */ +#define DCB_CREDIT_QUANTUM 64 /* DCB Quantum */ +#define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */ +#define DCB_MAX_TSO_SIZE (32*1024) /* MAX TSO packet size supported in DCB mode */ +#define MINIMUM_CREDIT_FOR_TSO (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO packet */ +#define MAX_CREDIT 4095 /* Maximum credit supported: 256KB * 1204 / 64B */ + +#endif /* _DCB_CONFIG_H */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c new file mode 100644 index 0000000..2288c3c --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c @@ -0,0 +1,297 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "ixgbe.h" +#include "ixgbe_type.h" +#include "ixgbe_dcb.h" +#include "ixgbe_dcb_82598.h" + +/** + * ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Rx Data Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *prio_type) +{ + u32 reg = 0; + u32 credit_refill = 0; + u32 credit_max = 0; + u8 i = 0; + + reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA; + IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg); + + reg = IXGBE_READ_REG(hw, IXGBE_RMCS); + /* Enable Arbiter */ + reg &= ~IXGBE_RMCS_ARBDIS; + /* Enable Receive Recycle within the BWG */ + reg |= IXGBE_RMCS_RRM; + /* Enable Deficit Fixed Priority arbitration*/ + reg |= IXGBE_RMCS_DFP; + + IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); + + /* Configure traffic class credits and priority */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + credit_refill = refill[i]; + credit_max = max[i]; + + reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT); + + if (prio_type[i] == prio_link) + reg |= IXGBE_RT2CR_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg); + } + + reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + reg |= IXGBE_RDRXCTL_RDMTS_1_2; + reg |= IXGBE_RDRXCTL_MPBEN; + reg |= IXGBE_RDRXCTL_MCEN; + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg); + + reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + /* Make sure there is enough descriptors before arbitration */ + reg &= ~IXGBE_RXCTRL_DMBYPS; + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg); + + return 0; +} + +/** + * ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Tx Descriptor Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type) +{ + u32 reg, max_credits; + u8 i; + + reg = IXGBE_READ_REG(hw, IXGBE_DPMCS); + + /* Enable arbiter */ + reg &= ~IXGBE_DPMCS_ARBDIS; + /* Enable DFP and Recycle mode */ + reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM); + reg |= IXGBE_DPMCS_TSOEF; + /* Configure Max TSO packet size 34KB including payload and headers */ + reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT); + + IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg); + + /* Configure traffic class credits and priority */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + max_credits = max[i]; + reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT; + reg |= refill[i]; + reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT; + + if (prio_type[i] == prio_group) + reg |= IXGBE_TDTQ2TCCR_GSP; + + if (prio_type[i] == prio_link) + reg |= IXGBE_TDTQ2TCCR_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg); + } + + return 0; +} + +/** + * ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Tx Data Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type) +{ + u32 reg; + u8 i; + + reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS); + /* Enable Data Plane Arbiter */ + reg &= ~IXGBE_PDPMCS_ARBDIS; + /* Enable DFP and Transmit Recycle Mode */ + reg |= (IXGBE_PDPMCS_TPPAC | IXGBE_PDPMCS_TRM); + + IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg); + + /* Configure traffic class credits and priority */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + reg = refill[i]; + reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT; + reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT; + + if (prio_type[i] == prio_group) + reg |= IXGBE_TDPT2TCCR_GSP; + + if (prio_type[i] == prio_link) + reg |= IXGBE_TDPT2TCCR_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg); + } + + /* Enable Tx packet buffer division */ + reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL); + reg |= IXGBE_DTXCTL_ENDBUBD; + IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg); + + return 0; +} + +/** + * ixgbe_dcb_config_pfc_82598 - Config priority flow control + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Priority Flow Control for each traffic class. + */ +s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) +{ + u32 reg, rx_pba_size; + u8 i; + + if (pfc_en) { + /* Enable Transmit Priority Flow Control */ + reg = IXGBE_READ_REG(hw, IXGBE_RMCS); + reg &= ~IXGBE_RMCS_TFCE_802_3X; + /* correct the reporting of our flow control status */ + reg |= IXGBE_RMCS_TFCE_PRIORITY; + IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); + + /* Enable Receive Priority Flow Control */ + reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); + reg &= ~IXGBE_FCTRL_RFCE; + reg |= IXGBE_FCTRL_RPFCE; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg); + + /* Configure pause time */ + for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800); + + /* Configure flow control refresh threshold value */ + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400); + } + + /* + * Configure flow control thresholds and enable priority flow control + * for each traffic class. + */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + int enabled = pfc_en & (1 << i); + rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); + rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; + reg = (rx_pba_size - hw->fc.low_water) << 10; + + if (enabled == pfc_enabled_tx || + enabled == pfc_enabled_full) + reg |= IXGBE_FCRTL_XONE; + + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg); + + reg = (rx_pba_size - hw->fc.high_water) << 10; + if (enabled == pfc_enabled_tx || + enabled == pfc_enabled_full) + reg |= IXGBE_FCRTH_FCEN; + + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg); + } + + return 0; +} + +/** + * ixgbe_dcb_config_tc_stats_82598 - Configure traffic class statistics + * @hw: pointer to hardware structure + * + * Configure queue statistics registers, all queues belonging to same traffic + * class uses a single set of queue statistics counters. + */ +static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) +{ + u32 reg = 0; + u8 i = 0; + u8 j = 0; + + /* Receive Queues stats setting - 8 queues per statistics reg */ + for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) { + reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i)); + reg |= ((0x1010101) * j); + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); + reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1)); + reg |= ((0x1010101) * j); + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg); + } + /* Transmit Queues stats setting - 4 queues per statistics reg */ + for (i = 0; i < 8; i++) { + reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i)); + reg |= ((0x1010101) * i); + IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg); + } + + return 0; +} + +/** + * ixgbe_dcb_hw_config_82598 - Config and enable DCB + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure dcb settings and enable dcb mode. + */ +s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, + u16 *max, u8 *bwg_id, u8 *prio_type) +{ + ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type); + ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, + bwg_id, prio_type); + ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, + bwg_id, prio_type); + ixgbe_dcb_config_pfc_82598(hw, pfc_en); + ixgbe_dcb_config_tc_stats_82598(hw); + + return 0; +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h new file mode 100644 index 0000000..2f31893 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h @@ -0,0 +1,97 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _DCB_82598_CONFIG_H_ +#define _DCB_82598_CONFIG_H_ + +/* DCB register definitions */ + +#define IXGBE_DPMCS_MTSOS_SHIFT 16 +#define IXGBE_DPMCS_TDPAC 0x00000001 /* 0 Round Robin, 1 DFP - Deficit Fixed Priority */ +#define IXGBE_DPMCS_TRM 0x00000010 /* Transmit Recycle Mode */ +#define IXGBE_DPMCS_ARBDIS 0x00000040 /* DCB arbiter disable */ +#define IXGBE_DPMCS_TSOEF 0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */ + +#define IXGBE_RUPPBMR_MQA 0x80000000 /* Enable UP to queue mapping */ + +#define IXGBE_RT2CR_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ +#define IXGBE_RT2CR_LSP 0x80000000 /* LSP enable bit */ + +#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet buffers enable */ +#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores (RSS) enable */ + +#define IXGBE_TDTQ2TCCR_MCL_SHIFT 12 +#define IXGBE_TDTQ2TCCR_BWG_SHIFT 9 +#define IXGBE_TDTQ2TCCR_GSP 0x40000000 +#define IXGBE_TDTQ2TCCR_LSP 0x80000000 + +#define IXGBE_TDPT2TCCR_MCL_SHIFT 12 +#define IXGBE_TDPT2TCCR_BWG_SHIFT 9 +#define IXGBE_TDPT2TCCR_GSP 0x40000000 +#define IXGBE_TDPT2TCCR_LSP 0x80000000 + +#define IXGBE_PDPMCS_TPPAC 0x00000020 /* 0 Round Robin, 1 for DFP - Deficit Fixed Priority */ +#define IXGBE_PDPMCS_ARBDIS 0x00000040 /* Arbiter disable */ +#define IXGBE_PDPMCS_TRM 0x00000100 /* Transmit Recycle Mode enable */ + +#define IXGBE_DTXCTL_ENDBUBD 0x00000004 /* Enable DBU buffer division */ + +#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */ +#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */ +#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ +#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ + +#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 + +/* DCB hardware-specific driver APIs */ + +/* DCB PFC functions */ +s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en); + +/* DCB hw initialization */ +s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *prio_type); + +s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type); + +s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type); + +s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, + u16 *max, u8 *bwg_id, u8 *prio_type); + +#endif /* _DCB_82598_CONFIG_H */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c new file mode 100644 index 0000000..ade9820 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c @@ -0,0 +1,346 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "ixgbe.h" +#include "ixgbe_type.h" +#include "ixgbe_dcb.h" +#include "ixgbe_dcb_82599.h" + +/** + * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter + * @hw: pointer to hardware structure + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @bwg_id: bandwidth grouping indexed by traffic class + * @prio_type: priority type indexed by traffic class + * + * Configure Rx Packet Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type, + u8 *prio_tc) +{ + u32 reg = 0; + u32 credit_refill = 0; + u32 credit_max = 0; + u8 i = 0; + + /* + * Disable the arbiter before changing parameters + * (always enable recycle mode; WSP) + */ + reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); + + /* Map all traffic classes to their UP, 1 to 1 */ + reg = 0; + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) + reg |= (prio_tc[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT)); + IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); + + /* Configure traffic class credits and priority */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + credit_refill = refill[i]; + credit_max = max[i]; + reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT); + + reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT; + + if (prio_type[i] == prio_link) + reg |= IXGBE_RTRPT4C_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg); + } + + /* + * Configure Rx packet plane (recycle mode; WSP) and + * enable arbiter + */ + reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC; + IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); + + return 0; +} + +/** + * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter + * @hw: pointer to hardware structure + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @bwg_id: bandwidth grouping indexed by traffic class + * @prio_type: priority type indexed by traffic class + * + * Configure Tx Descriptor Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type) +{ + u32 reg, max_credits; + u8 i; + + /* Clear the per-Tx queue credits; we use per-TC instead */ + for (i = 0; i < 128; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); + IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0); + } + + /* Configure traffic class credits and priority */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + max_credits = max[i]; + reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT; + reg |= refill[i]; + reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT; + + if (prio_type[i] == prio_group) + reg |= IXGBE_RTTDT2C_GSP; + + if (prio_type[i] == prio_link) + reg |= IXGBE_RTTDT2C_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg); + } + + /* + * Configure Tx descriptor plane (recycle mode; WSP) and + * enable arbiter + */ + reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); + + return 0; +} + +/** + * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter + * @hw: pointer to hardware structure + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @bwg_id: bandwidth grouping indexed by traffic class + * @prio_type: priority type indexed by traffic class + * + * Configure Tx Packet Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type, + u8 *prio_tc) +{ + u32 reg; + u8 i; + + /* + * Disable the arbiter before changing parameters + * (always enable recycle mode; SP; arb delay) + */ + reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM | + (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) | + IXGBE_RTTPCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); + + /* Map all traffic classes to their UP, 1 to 1 */ + reg = 0; + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) + reg |= (prio_tc[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT)); + IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg); + + /* Configure traffic class credits and priority */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + reg = refill[i]; + reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT; + reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT; + + if (prio_type[i] == prio_group) + reg |= IXGBE_RTTPT2C_GSP; + + if (prio_type[i] == prio_link) + reg |= IXGBE_RTTPT2C_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg); + } + + /* + * Configure Tx packet plane (recycle mode; SP; arb delay) and + * enable arbiter + */ + reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM | + (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT); + IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); + + return 0; +} + +/** + * ixgbe_dcb_config_pfc_82599 - Configure priority flow control + * @hw: pointer to hardware structure + * @pfc_en: enabled pfc bitmask + * + * Configure Priority Flow Control (PFC) for each traffic class. + */ +s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en) +{ + u32 i, reg, rx_pba_size; + + /* Configure PFC Tx thresholds per TC */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + int enabled = pfc_en & (1 << i); + rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); + rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; + + reg = (rx_pba_size - hw->fc.low_water) << 10; + + if (enabled) + reg |= IXGBE_FCRTL_XONE; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg); + + reg = (rx_pba_size - hw->fc.high_water) << 10; + if (enabled) + reg |= IXGBE_FCRTH_FCEN; + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg); + } + + if (pfc_en) { + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time | (hw->fc.pause_time << 16); + for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + + /* Configure flow control refresh threshold value */ + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); + + + reg = IXGBE_FCCFG_TFCE_PRIORITY; + IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg); + /* + * Enable Receive PFC + * 82599 will always honor XOFF frames we receive when + * we are in PFC mode however X540 only honors enabled + * traffic classes. + */ + reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); + reg &= ~IXGBE_MFLCN_RFCE; + reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF; + + if (hw->mac.type == ixgbe_mac_X540) + reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT; + + IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); + + } else { + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) + hw->mac.ops.fc_enable(hw, i); + } + + return 0; +} + +/** + * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics + * @hw: pointer to hardware structure + * + * Configure queue statistics registers, all queues belonging to same traffic + * class uses a single set of queue statistics counters. + */ +static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw) +{ + u32 reg = 0; + u8 i = 0; + + /* + * Receive Queues stats setting + * 32 RQSMR registers, each configuring 4 queues. + * Set all 16 queues of each TC to the same stat + * with TC 'n' going to stat 'n'. + */ + for (i = 0; i < 32; i++) { + reg = 0x01010101 * (i / 4); + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); + } + /* + * Transmit Queues stats setting + * 32 TQSM registers, each controlling 4 queues. + * Set all queues of each TC to the same stat + * with TC 'n' going to stat 'n'. + * Tx queues are allocated non-uniformly to TCs: + * 32, 32, 16, 16, 8, 8, 8, 8. + */ + for (i = 0; i < 32; i++) { + if (i < 8) + reg = 0x00000000; + else if (i < 16) + reg = 0x01010101; + else if (i < 20) + reg = 0x02020202; + else if (i < 24) + reg = 0x03030303; + else if (i < 26) + reg = 0x04040404; + else if (i < 28) + reg = 0x05050505; + else if (i < 30) + reg = 0x06060606; + else + reg = 0x07070707; + IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg); + } + + return 0; +} + +/** + * ixgbe_dcb_hw_config_82599 - Configure and enable DCB + * @hw: pointer to hardware structure + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @bwg_id: bandwidth grouping indexed by traffic class + * @prio_type: priority type indexed by traffic class + * @pfc_en: enabled pfc bitmask + * + * Configure dcb settings and enable dcb mode. + */ +s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, + u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc) +{ + ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, + prio_type, prio_tc); + ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, + bwg_id, prio_type); + ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, + bwg_id, prio_type, prio_tc); + ixgbe_dcb_config_pfc_82599(hw, pfc_en); + ixgbe_dcb_config_tc_stats_82599(hw); + + return 0; +} + diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h new file mode 100644 index 0000000..08d1749 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h @@ -0,0 +1,123 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _DCB_82599_CONFIG_H_ +#define _DCB_82599_CONFIG_H_ + +/* DCB register definitions */ +#define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin, + * 1 WSP - Weighted Strict Priority + */ +#define IXGBE_RTTDCS_VMPAC 0x00000002 /* 0 Round Robin, + * 1 WRR - Weighted Round Robin + */ +#define IXGBE_RTTDCS_TDRM 0x00000010 /* Transmit Recycle Mode */ +#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */ +#define IXGBE_RTTDCS_BDPM 0x00400000 /* Bypass Data Pipe - must clear! */ +#define IXGBE_RTTDCS_BPBFSM 0x00800000 /* Bypass PB Free Space - must + * clear! + */ +#define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */ + +/* Receive UP2TC mapping */ +#define IXGBE_RTRUP2TC_UP_SHIFT 3 +/* Transmit UP2TC mapping */ +#define IXGBE_RTTUP2TC_UP_SHIFT 3 + +#define IXGBE_RTRPT4C_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ +#define IXGBE_RTRPT4C_BWG_SHIFT 9 /* Offset to BWG index */ +#define IXGBE_RTRPT4C_GSP 0x40000000 /* GSP enable bit */ +#define IXGBE_RTRPT4C_LSP 0x80000000 /* LSP enable bit */ + +#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet + * buffers enable + */ +#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores + * (RSS) enable + */ + +/* RTRPCS Bit Masks */ +#define IXGBE_RTRPCS_RRM 0x00000002 /* Receive Recycle Mode enable */ +/* Receive Arbitration Control: 0 Round Robin, 1 DFP */ +#define IXGBE_RTRPCS_RAC 0x00000004 +#define IXGBE_RTRPCS_ARBDIS 0x00000040 /* Arbitration disable bit */ + +/* RTTDT2C Bit Masks */ +#define IXGBE_RTTDT2C_MCL_SHIFT 12 +#define IXGBE_RTTDT2C_BWG_SHIFT 9 +#define IXGBE_RTTDT2C_GSP 0x40000000 +#define IXGBE_RTTDT2C_LSP 0x80000000 + +#define IXGBE_RTTPT2C_MCL_SHIFT 12 +#define IXGBE_RTTPT2C_BWG_SHIFT 9 +#define IXGBE_RTTPT2C_GSP 0x40000000 +#define IXGBE_RTTPT2C_LSP 0x80000000 + +/* RTTPCS Bit Masks */ +#define IXGBE_RTTPCS_TPPAC 0x00000020 /* 0 Round Robin, + * 1 SP - Strict Priority + */ +#define IXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */ +#define IXGBE_RTTPCS_TPRM 0x00000100 /* Transmit Recycle Mode enable */ +#define IXGBE_RTTPCS_ARBD_SHIFT 22 +#define IXGBE_RTTPCS_ARBD_DCB 0x4 /* Arbitration delay in DCB mode */ + +/* SECTXMINIFG DCB */ +#define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer IFG */ + + +/* DCB hardware-specific driver APIs */ + +/* DCB PFC functions */ +s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en); + +/* DCB hw initialization */ +s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type, + u8 *prio_tc); + +s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type); + +s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type, + u8 *prio_tc); + +s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, + u16 *max, u8 *bwg_id, u8 *prio_type, + u8 *prio_tc); + +#endif /* _DCB_82599_CONFIG_H */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c new file mode 100644 index 0000000..0ace6ce --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -0,0 +1,816 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "ixgbe.h" +#include +#include "ixgbe_dcb_82598.h" +#include "ixgbe_dcb_82599.h" + +/* Callbacks for DCB netlink in the kernel */ +#define BIT_DCB_MODE 0x01 +#define BIT_PFC 0x02 +#define BIT_PG_RX 0x04 +#define BIT_PG_TX 0x08 +#define BIT_APP_UPCHG 0x10 +#define BIT_LINKSPEED 0x80 + +/* Responses for the DCB_C_SET_ALL command */ +#define DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */ +#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */ +#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */ + +int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, + struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max) +{ + struct tc_configuration *src_tc_cfg = NULL; + struct tc_configuration *dst_tc_cfg = NULL; + int i; + + if (!src_dcb_cfg || !dst_dcb_cfg) + return -EINVAL; + + for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) { + src_tc_cfg = &src_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0]; + dst_tc_cfg = &dst_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0]; + + dst_tc_cfg->path[DCB_TX_CONFIG].prio_type = + src_tc_cfg->path[DCB_TX_CONFIG].prio_type; + + dst_tc_cfg->path[DCB_TX_CONFIG].bwg_id = + src_tc_cfg->path[DCB_TX_CONFIG].bwg_id; + + dst_tc_cfg->path[DCB_TX_CONFIG].bwg_percent = + src_tc_cfg->path[DCB_TX_CONFIG].bwg_percent; + + dst_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap = + src_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap; + + dst_tc_cfg->path[DCB_RX_CONFIG].prio_type = + src_tc_cfg->path[DCB_RX_CONFIG].prio_type; + + dst_tc_cfg->path[DCB_RX_CONFIG].bwg_id = + src_tc_cfg->path[DCB_RX_CONFIG].bwg_id; + + dst_tc_cfg->path[DCB_RX_CONFIG].bwg_percent = + src_tc_cfg->path[DCB_RX_CONFIG].bwg_percent; + + dst_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap = + src_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap; + } + + for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) { + dst_dcb_cfg->bw_percentage[DCB_TX_CONFIG] + [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage + [DCB_TX_CONFIG][i-DCB_PG_ATTR_BW_ID_0]; + dst_dcb_cfg->bw_percentage[DCB_RX_CONFIG] + [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage + [DCB_RX_CONFIG][i-DCB_PG_ATTR_BW_ID_0]; + } + + for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) { + dst_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc = + src_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc; + } + + dst_dcb_cfg->pfc_mode_enable = src_dcb_cfg->pfc_mode_enable; + + return 0; +} + +static u8 ixgbe_dcbnl_get_state(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED); +} + +static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) +{ + u8 err = 0; + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + /* verify there is something to do, if not then exit */ + if (!!state != !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) + return err; + + if (state > 0) { + /* Turn on DCB */ + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { + e_err(drv, "Enable failed, needs MSI-X\n"); + err = 1; + goto out; + } + + adapter->flags |= IXGBE_FLAG_DCB_ENABLED; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + adapter->last_lfc_mode = adapter->hw.fc.current_mode; + adapter->hw.fc.requested_mode = ixgbe_fc_none; + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + break; + default: + break; + } + + ixgbe_setup_tc(netdev, MAX_TRAFFIC_CLASS); + } else { + /* Turn off DCB */ + adapter->hw.fc.requested_mode = adapter->last_lfc_mode; + adapter->temp_dcb_cfg.pfc_mode_enable = false; + adapter->dcb_cfg.pfc_mode_enable = false; + adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; + break; + default: + break; + } + ixgbe_setup_tc(netdev, 0); + } + +out: + return err; +} + +static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, + u8 *perm_addr) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int i, j; + + memset(perm_addr, 0xff, MAX_ADDR_LEN); + + for (i = 0; i < netdev->addr_len; i++) + perm_addr[i] = adapter->hw.mac.perm_addr[i]; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + for (j = 0; j < netdev->addr_len; j++, i++) + perm_addr[i] = adapter->hw.mac.san_addr[j]; + break; + default: + break; + } +} + +static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, + u8 prio, u8 bwg_id, u8 bw_pct, + u8 up_map) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (prio != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio; + if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id = bwg_id; + if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent = + bw_pct; + if (up_map != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap = + up_map; + + if ((adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type != + adapter->dcb_cfg.tc_config[tc].path[0].prio_type) || + (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id != + adapter->dcb_cfg.tc_config[tc].path[0].bwg_id) || + (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent != + adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) || + (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap != + adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)) + adapter->dcb_set_bitmap |= BIT_PG_TX; +} + +static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, + u8 bw_pct) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct; + + if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] != + adapter->dcb_cfg.bw_percentage[0][bwg_id]) + adapter->dcb_set_bitmap |= BIT_PG_TX; +} + +static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, + u8 prio, u8 bwg_id, u8 bw_pct, + u8 up_map) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (prio != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio; + if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id = bwg_id; + if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent = + bw_pct; + if (up_map != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap = + up_map; + + if ((adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type != + adapter->dcb_cfg.tc_config[tc].path[1].prio_type) || + (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id != + adapter->dcb_cfg.tc_config[tc].path[1].bwg_id) || + (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent != + adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) || + (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap != + adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)) + adapter->dcb_set_bitmap |= BIT_PG_RX; +} + +static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, + u8 bw_pct) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct; + + if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] != + adapter->dcb_cfg.bw_percentage[1][bwg_id]) + adapter->dcb_set_bitmap |= BIT_PG_RX; +} + +static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, + u8 *prio, u8 *bwg_id, u8 *bw_pct, + u8 *up_map) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + *prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type; + *bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id; + *bw_pct = adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent; + *up_map = adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap; +} + +static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, + u8 *bw_pct) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + *bw_pct = adapter->dcb_cfg.bw_percentage[0][bwg_id]; +} + +static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc, + u8 *prio, u8 *bwg_id, u8 *bw_pct, + u8 *up_map) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + *prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type; + *bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id; + *bw_pct = adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent; + *up_map = adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap; +} + +static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, + u8 *bw_pct) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + *bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id]; +} + +static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, + u8 setting) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting; + if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc != + adapter->dcb_cfg.tc_config[priority].dcb_pfc) { + adapter->dcb_set_bitmap |= BIT_PFC; + adapter->temp_dcb_cfg.pfc_mode_enable = true; + } +} + +static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, + u8 *setting) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + *setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc; +} + +static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int ret; +#ifdef IXGBE_FCOE + struct dcb_app app = { + .selector = DCB_APP_IDTYPE_ETHTYPE, + .protocol = ETH_P_FCOE, + }; + u8 up = dcb_getapp(netdev, &app); +#endif + + ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, + MAX_TRAFFIC_CLASS); + if (ret) + return DCB_NO_HW_CHG; + +#ifdef IXGBE_FCOE + if (up && (up != (1 << adapter->fcoe.up))) + adapter->dcb_set_bitmap |= BIT_APP_UPCHG; + + /* + * Only take down the adapter if an app change occurred. FCoE + * may shuffle tx rings in this case and this can not be done + * without a reset currently. + */ + if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { + while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + adapter->fcoe.up = ffs(up) - 1; + + if (netif_running(netdev)) + netdev->netdev_ops->ndo_stop(netdev); + ixgbe_clear_interrupt_scheme(adapter); + } +#endif + + if (adapter->dcb_cfg.pfc_mode_enable) { + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + if (adapter->hw.fc.current_mode != ixgbe_fc_pfc) + adapter->last_lfc_mode = + adapter->hw.fc.current_mode; + break; + default: + break; + } + adapter->hw.fc.requested_mode = ixgbe_fc_pfc; + } else { + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + adapter->hw.fc.requested_mode = ixgbe_fc_none; + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + adapter->hw.fc.requested_mode = adapter->last_lfc_mode; + break; + default: + break; + } + } + +#ifdef IXGBE_FCOE + if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { + ixgbe_init_interrupt_scheme(adapter); + if (netif_running(netdev)) + netdev->netdev_ops->ndo_open(netdev); + ret = DCB_HW_CHG_RST; + } +#endif + + if (adapter->dcb_set_bitmap & BIT_PFC) { + u8 pfc_en; + ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en); + ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en); + ret = DCB_HW_CHG; + } + + if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) { + u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS]; + u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS]; + /* Priority to TC mapping in CEE case default to 1:1 */ + u8 prio_tc[MAX_TRAFFIC_CLASS] = {0, 1, 2, 3, 4, 5, 6, 7}; + int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + +#ifdef CONFIG_FCOE + if (adapter->netdev->features & NETIF_F_FCOE_MTU) + max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); +#endif + + ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg, + max_frame, DCB_TX_CONFIG); + ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg, + max_frame, DCB_RX_CONFIG); + + ixgbe_dcb_unpack_refill(&adapter->dcb_cfg, + DCB_TX_CONFIG, refill); + ixgbe_dcb_unpack_max(&adapter->dcb_cfg, max); + ixgbe_dcb_unpack_bwgid(&adapter->dcb_cfg, + DCB_TX_CONFIG, bwg_id); + ixgbe_dcb_unpack_prio(&adapter->dcb_cfg, + DCB_TX_CONFIG, prio_type); + + ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max, + bwg_id, prio_type, prio_tc); + } + + if (adapter->dcb_cfg.pfc_mode_enable) + adapter->hw.fc.current_mode = ixgbe_fc_pfc; + + if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) + clear_bit(__IXGBE_RESETTING, &adapter->state); + adapter->dcb_set_bitmap = 0x00; + return ret; +} + +static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + switch (capid) { + case DCB_CAP_ATTR_PG: + *cap = true; + break; + case DCB_CAP_ATTR_PFC: + *cap = true; + break; + case DCB_CAP_ATTR_UP2TC: + *cap = false; + break; + case DCB_CAP_ATTR_PG_TCS: + *cap = 0x80; + break; + case DCB_CAP_ATTR_PFC_TCS: + *cap = 0x80; + break; + case DCB_CAP_ATTR_GSP: + *cap = true; + break; + case DCB_CAP_ATTR_BCN: + *cap = false; + break; + case DCB_CAP_ATTR_DCBX: + *cap = adapter->dcbx_cap; + break; + default: + *cap = false; + break; + } + + return 0; +} + +static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + u8 rval = 0; + + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { + switch (tcid) { + case DCB_NUMTCS_ATTR_PG: + *num = MAX_TRAFFIC_CLASS; + break; + case DCB_NUMTCS_ATTR_PFC: + *num = MAX_TRAFFIC_CLASS; + break; + default: + rval = -EINVAL; + break; + } + } else { + rval = -EINVAL; + } + + return rval; +} + +static u8 ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) +{ + return -EINVAL; +} + +static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + return adapter->dcb_cfg.pfc_mode_enable; +} + +static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + adapter->temp_dcb_cfg.pfc_mode_enable = state; + if (adapter->temp_dcb_cfg.pfc_mode_enable != + adapter->dcb_cfg.pfc_mode_enable) + adapter->dcb_set_bitmap |= BIT_PFC; +} + +/** + * ixgbe_dcbnl_getapp - retrieve the DCBX application user priority + * @netdev : the corresponding netdev + * @idtype : identifies the id as ether type or TCP/UDP port number + * @id: id is either ether type or TCP/UDP port number + * + * Returns : on success, returns a non-zero 802.1p user priority bitmap + * otherwise returns 0 as the invalid user priority bitmap to indicate an + * error. + */ +static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct dcb_app app = { + .selector = idtype, + .protocol = id, + }; + + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return 0; + + return dcb_getapp(netdev, &app); +} + +static int ixgbe_dcbnl_ieee_getets(struct net_device *dev, + struct ieee_ets *ets) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets; + + /* No IEEE PFC settings available */ + if (!my_ets) + return -EINVAL; + + ets->ets_cap = MAX_TRAFFIC_CLASS; + ets->cbs = my_ets->cbs; + memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw)); + memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw)); + memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa)); + memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc)); + return 0; +} + +static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, + struct ieee_ets *ets) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS]; + __u8 prio_type[IEEE_8021QAZ_MAX_TCS]; + int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; + int i, err; + __u64 *p = (__u64 *) ets->prio_tc; + /* naively give each TC a bwg to map onto CEE hardware */ + __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7}; + + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + if (!adapter->ixgbe_ieee_ets) { + adapter->ixgbe_ieee_ets = kmalloc(sizeof(struct ieee_ets), + GFP_KERNEL); + if (!adapter->ixgbe_ieee_ets) + return -ENOMEM; + } + + memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets)); + + /* Map TSA onto CEE prio type */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + prio_type[i] = 2; + break; + case IEEE_8021QAZ_TSA_ETS: + prio_type[i] = 0; + break; + default: + /* Hardware only supports priority strict or + * ETS transmission selection algorithms if + * we receive some other value from dcbnl + * throw an error + */ + return -EINVAL; + } + } + + if (*p) + ixgbe_dcbnl_set_state(dev, 1); + else + ixgbe_dcbnl_set_state(dev, 0); + + ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame); + err = ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max, + bwg_id, prio_type, ets->prio_tc); + return err; +} + +static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc; + int i; + + /* No IEEE PFC settings available */ + if (!my_pfc) + return -EINVAL; + + pfc->pfc_cap = MAX_TRAFFIC_CLASS; + pfc->pfc_en = my_pfc->pfc_en; + pfc->mbc = my_pfc->mbc; + pfc->delay = my_pfc->delay; + + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + pfc->requests[i] = adapter->stats.pxoffrxc[i]; + pfc->indications[i] = adapter->stats.pxofftxc[i]; + } + + return 0; +} + +static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + int err; + + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + if (!adapter->ixgbe_ieee_pfc) { + adapter->ixgbe_ieee_pfc = kmalloc(sizeof(struct ieee_pfc), + GFP_KERNEL); + if (!adapter->ixgbe_ieee_pfc) + return -ENOMEM; + } + + memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc)); + err = ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en); + return err; +} + +#ifdef IXGBE_FCOE +static void ixgbe_dcbnl_devreset(struct net_device *dev) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + + if (netif_running(dev)) + dev->netdev_ops->ndo_stop(dev); + + ixgbe_clear_interrupt_scheme(adapter); + ixgbe_init_interrupt_scheme(adapter); + + if (netif_running(dev)) + dev->netdev_ops->ndo_open(dev); +} +#endif + +static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, + struct dcb_app *app) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + int err = -EINVAL; + + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return err; + + err = dcb_ieee_setapp(dev, app); + +#ifdef IXGBE_FCOE + if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + app->protocol == ETH_P_FCOE) { + u8 app_mask = dcb_ieee_getapp_mask(dev, app); + + if (app_mask & (1 << adapter->fcoe.up)) + return err; + + adapter->fcoe.up = app->priority; + ixgbe_dcbnl_devreset(dev); + } +#endif + return 0; +} + +static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev, + struct dcb_app *app) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + int err; + + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + err = dcb_ieee_delapp(dev, app); + +#ifdef IXGBE_FCOE + if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + app->protocol == ETH_P_FCOE) { + u8 app_mask = dcb_ieee_getapp_mask(dev, app); + + if (app_mask & (1 << adapter->fcoe.up)) + return err; + + adapter->fcoe.up = app_mask ? + ffs(app_mask) - 1 : IXGBE_FCOE_DEFTC; + ixgbe_dcbnl_devreset(dev); + } +#endif + return err; +} + +static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + return adapter->dcbx_cap; +} + +static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ieee_ets ets = {0}; + struct ieee_pfc pfc = {0}; + + /* no support for LLD_MANAGED modes or CEE+IEEE */ + if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || + ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) || + !(mode & DCB_CAP_DCBX_HOST)) + return 1; + + if (mode == adapter->dcbx_cap) + return 0; + + adapter->dcbx_cap = mode; + + /* ETS and PFC defaults */ + ets.ets_cap = 8; + pfc.pfc_cap = 8; + + if (mode & DCB_CAP_DCBX_VER_IEEE) { + ixgbe_dcbnl_ieee_setets(dev, &ets); + ixgbe_dcbnl_ieee_setpfc(dev, &pfc); + } else if (mode & DCB_CAP_DCBX_VER_CEE) { + adapter->dcb_set_bitmap |= (BIT_PFC & BIT_PG_TX & BIT_PG_RX); + ixgbe_dcbnl_set_all(dev); + } else { + /* Drop into single TC mode strict priority as this + * indicates CEE and IEEE versions are disabled + */ + ixgbe_dcbnl_ieee_setets(dev, &ets); + ixgbe_dcbnl_ieee_setpfc(dev, &pfc); + ixgbe_dcbnl_set_state(dev, 0); + } + + return 0; +} + +const struct dcbnl_rtnl_ops dcbnl_ops = { + .ieee_getets = ixgbe_dcbnl_ieee_getets, + .ieee_setets = ixgbe_dcbnl_ieee_setets, + .ieee_getpfc = ixgbe_dcbnl_ieee_getpfc, + .ieee_setpfc = ixgbe_dcbnl_ieee_setpfc, + .ieee_setapp = ixgbe_dcbnl_ieee_setapp, + .ieee_delapp = ixgbe_dcbnl_ieee_delapp, + .getstate = ixgbe_dcbnl_get_state, + .setstate = ixgbe_dcbnl_set_state, + .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr, + .setpgtccfgtx = ixgbe_dcbnl_set_pg_tc_cfg_tx, + .setpgbwgcfgtx = ixgbe_dcbnl_set_pg_bwg_cfg_tx, + .setpgtccfgrx = ixgbe_dcbnl_set_pg_tc_cfg_rx, + .setpgbwgcfgrx = ixgbe_dcbnl_set_pg_bwg_cfg_rx, + .getpgtccfgtx = ixgbe_dcbnl_get_pg_tc_cfg_tx, + .getpgbwgcfgtx = ixgbe_dcbnl_get_pg_bwg_cfg_tx, + .getpgtccfgrx = ixgbe_dcbnl_get_pg_tc_cfg_rx, + .getpgbwgcfgrx = ixgbe_dcbnl_get_pg_bwg_cfg_rx, + .setpfccfg = ixgbe_dcbnl_set_pfc_cfg, + .getpfccfg = ixgbe_dcbnl_get_pfc_cfg, + .setall = ixgbe_dcbnl_set_all, + .getcap = ixgbe_dcbnl_getcap, + .getnumtcs = ixgbe_dcbnl_getnumtcs, + .setnumtcs = ixgbe_dcbnl_setnumtcs, + .getpfcstate = ixgbe_dcbnl_getpfcstate, + .setpfcstate = ixgbe_dcbnl_setpfcstate, + .getapp = ixgbe_dcbnl_getapp, + .getdcbx = ixgbe_dcbnl_getdcbx, + .setdcbx = ixgbe_dcbnl_setdcbx, +}; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c new file mode 100644 index 0000000..82d4244 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -0,0 +1,2592 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* ethtool support for ixgbe */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ixgbe.h" + + +#define IXGBE_ALL_RAR_ENTRIES 16 + +enum {NETDEV_STATS, IXGBE_STATS}; + +struct ixgbe_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +#define IXGBE_STAT(m) IXGBE_STATS, \ + sizeof(((struct ixgbe_adapter *)0)->m), \ + offsetof(struct ixgbe_adapter, m) +#define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \ + sizeof(((struct rtnl_link_stats64 *)0)->m), \ + offsetof(struct rtnl_link_stats64, m) + +static struct ixgbe_stats ixgbe_gstrings_stats[] = { + {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)}, + {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)}, + {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)}, + {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)}, + {"rx_pkts_nic", IXGBE_STAT(stats.gprc)}, + {"tx_pkts_nic", IXGBE_STAT(stats.gptc)}, + {"rx_bytes_nic", IXGBE_STAT(stats.gorc)}, + {"tx_bytes_nic", IXGBE_STAT(stats.gotc)}, + {"lsc_int", IXGBE_STAT(lsc_int)}, + {"tx_busy", IXGBE_STAT(tx_busy)}, + {"non_eop_descs", IXGBE_STAT(non_eop_descs)}, + {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)}, + {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)}, + {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)}, + {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)}, + {"multicast", IXGBE_NETDEV_STAT(multicast)}, + {"broadcast", IXGBE_STAT(stats.bprc)}, + {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) }, + {"collisions", IXGBE_NETDEV_STAT(collisions)}, + {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)}, + {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)}, + {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)}, + {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)}, + {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)}, + {"fdir_match", IXGBE_STAT(stats.fdirmatch)}, + {"fdir_miss", IXGBE_STAT(stats.fdirmiss)}, + {"fdir_overflow", IXGBE_STAT(fdir_overflow)}, + {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)}, + {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)}, + {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)}, + {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)}, + {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)}, + {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)}, + {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)}, + {"tx_restart_queue", IXGBE_STAT(restart_queue)}, + {"rx_long_length_errors", IXGBE_STAT(stats.roc)}, + {"rx_short_length_errors", IXGBE_STAT(stats.ruc)}, + {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)}, + {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)}, + {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)}, + {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)}, + {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)}, + {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, + {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, + {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)}, + {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)}, + {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)}, + {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)}, + {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)}, +#ifdef IXGBE_FCOE + {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)}, + {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)}, + {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)}, + {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)}, + {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)}, + {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)}, +#endif /* IXGBE_FCOE */ +}; + +#define IXGBE_QUEUE_STATS_LEN \ + ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_tx_queues + \ + ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) * \ + (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) +#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) +#define IXGBE_PB_STATS_LEN ( \ + (((struct ixgbe_adapter *)netdev_priv(netdev))->flags & \ + IXGBE_FLAG_DCB_ENABLED) ? \ + (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \ + sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \ + sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \ + sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \ + / sizeof(u64) : 0) +#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \ + IXGBE_PB_STATS_LEN + \ + IXGBE_QUEUE_STATS_LEN) + +static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; +#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN + +static int ixgbe_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 link_speed = 0; + bool link_up; + + ecmd->supported = SUPPORTED_10000baseT_Full; + ecmd->autoneg = AUTONEG_ENABLE; + ecmd->transceiver = XCVR_EXTERNAL; + if ((hw->phy.media_type == ixgbe_media_type_copper) || + (hw->phy.multispeed_fiber)) { + ecmd->supported |= (SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg); + + switch (hw->mac.type) { + case ixgbe_mac_X540: + ecmd->supported |= SUPPORTED_100baseT_Full; + break; + default: + break; + } + + ecmd->advertising = ADVERTISED_Autoneg; + if (hw->phy.autoneg_advertised) { + if (hw->phy.autoneg_advertised & + IXGBE_LINK_SPEED_100_FULL) + ecmd->advertising |= ADVERTISED_100baseT_Full; + if (hw->phy.autoneg_advertised & + IXGBE_LINK_SPEED_10GB_FULL) + ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (hw->phy.autoneg_advertised & + IXGBE_LINK_SPEED_1GB_FULL) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + } else { + /* + * Default advertised modes in case + * phy.autoneg_advertised isn't set. + */ + ecmd->advertising |= (ADVERTISED_10000baseT_Full | + ADVERTISED_1000baseT_Full); + if (hw->mac.type == ixgbe_mac_X540) + ecmd->advertising |= ADVERTISED_100baseT_Full; + } + + if (hw->phy.media_type == ixgbe_media_type_copper) { + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; + ecmd->port = PORT_TP; + } else { + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_FIBRE; + } + } else if (hw->phy.media_type == ixgbe_media_type_backplane) { + /* Set as FIBRE until SERDES defined in kernel */ + if (hw->device_id == IXGBE_DEV_ID_82598_BX) { + ecmd->supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE); + ecmd->advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE); + ecmd->port = PORT_FIBRE; + ecmd->autoneg = AUTONEG_DISABLE; + } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) || + (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) { + ecmd->supported |= (SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_FIBRE); + ecmd->advertising = (ADVERTISED_10000baseT_Full | + ADVERTISED_1000baseT_Full | + ADVERTISED_Autoneg | + ADVERTISED_FIBRE); + ecmd->port = PORT_FIBRE; + } else { + ecmd->supported |= (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE); + ecmd->advertising = (ADVERTISED_10000baseT_Full | + ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE); + ecmd->port = PORT_FIBRE; + } + } else { + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising = (ADVERTISED_10000baseT_Full | + ADVERTISED_FIBRE); + ecmd->port = PORT_FIBRE; + ecmd->autoneg = AUTONEG_DISABLE; + } + + /* Get PHY type */ + switch (adapter->hw.phy.type) { + case ixgbe_phy_tn: + case ixgbe_phy_aq: + case ixgbe_phy_cu_unknown: + /* Copper 10G-BASET */ + ecmd->port = PORT_TP; + break; + case ixgbe_phy_qt: + ecmd->port = PORT_FIBRE; + break; + case ixgbe_phy_nl: + case ixgbe_phy_sfp_passive_tyco: + case ixgbe_phy_sfp_passive_unknown: + case ixgbe_phy_sfp_ftl: + case ixgbe_phy_sfp_avago: + case ixgbe_phy_sfp_intel: + case ixgbe_phy_sfp_unknown: + switch (adapter->hw.phy.sfp_type) { + /* SFP+ devices, further checking needed */ + case ixgbe_sfp_type_da_cu: + case ixgbe_sfp_type_da_cu_core0: + case ixgbe_sfp_type_da_cu_core1: + ecmd->port = PORT_DA; + break; + case ixgbe_sfp_type_sr: + case ixgbe_sfp_type_lr: + case ixgbe_sfp_type_srlr_core0: + case ixgbe_sfp_type_srlr_core1: + ecmd->port = PORT_FIBRE; + break; + case ixgbe_sfp_type_not_present: + ecmd->port = PORT_NONE; + break; + case ixgbe_sfp_type_1g_cu_core0: + case ixgbe_sfp_type_1g_cu_core1: + ecmd->port = PORT_TP; + ecmd->supported = SUPPORTED_TP; + ecmd->advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_TP); + break; + case ixgbe_sfp_type_unknown: + default: + ecmd->port = PORT_OTHER; + break; + } + break; + case ixgbe_phy_xaui: + ecmd->port = PORT_NONE; + break; + case ixgbe_phy_unknown: + case ixgbe_phy_generic: + case ixgbe_phy_sfp_unsupported: + default: + ecmd->port = PORT_OTHER; + break; + } + + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + if (link_up) { + switch (link_speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + ethtool_cmd_speed_set(ecmd, SPEED_10000); + break; + case IXGBE_LINK_SPEED_1GB_FULL: + ethtool_cmd_speed_set(ecmd, SPEED_1000); + break; + case IXGBE_LINK_SPEED_100_FULL: + ethtool_cmd_speed_set(ecmd, SPEED_100); + break; + default: + break; + } + ecmd->duplex = DUPLEX_FULL; + } else { + ethtool_cmd_speed_set(ecmd, -1); + ecmd->duplex = -1; + } + + return 0; +} + +static int ixgbe_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 advertised, old; + s32 err = 0; + + if ((hw->phy.media_type == ixgbe_media_type_copper) || + (hw->phy.multispeed_fiber)) { + /* 10000/copper and 1000/copper must autoneg + * this function does not support any duplex forcing, but can + * limit the advertising of the adapter to only 10000 or 1000 */ + if (ecmd->autoneg == AUTONEG_DISABLE) + return -EINVAL; + + old = hw->phy.autoneg_advertised; + advertised = 0; + if (ecmd->advertising & ADVERTISED_10000baseT_Full) + advertised |= IXGBE_LINK_SPEED_10GB_FULL; + + if (ecmd->advertising & ADVERTISED_1000baseT_Full) + advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + if (ecmd->advertising & ADVERTISED_100baseT_Full) + advertised |= IXGBE_LINK_SPEED_100_FULL; + + if (old == advertised) + return err; + /* this sets the link speed and restarts auto-neg */ + hw->mac.autotry_restart = true; + err = hw->mac.ops.setup_link(hw, advertised, true, true); + if (err) { + e_info(probe, "setup link failed with code %d\n", err); + hw->mac.ops.setup_link(hw, old, true, true); + } + } else { + /* in this case we currently only support 10Gb/FULL */ + u32 speed = ethtool_cmd_speed(ecmd); + if ((ecmd->autoneg == AUTONEG_ENABLE) || + (ecmd->advertising != ADVERTISED_10000baseT_Full) || + (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) + return -EINVAL; + } + + return err; +} + +static void ixgbe_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + /* + * Flow Control Autoneg isn't on if + * - we didn't ask for it OR + * - it failed, we know this by tx & rx being off + */ + if (hw->fc.disable_fc_autoneg || + (hw->fc.current_mode == ixgbe_fc_none)) + pause->autoneg = 0; + else + pause->autoneg = 1; + + if (hw->fc.current_mode == ixgbe_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->fc.current_mode == ixgbe_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; +#ifdef CONFIG_DCB + } else if (hw->fc.current_mode == ixgbe_fc_pfc) { + pause->rx_pause = 0; + pause->tx_pause = 0; +#endif + } +} + +static int ixgbe_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_fc_info fc; + +#ifdef CONFIG_DCB + if (adapter->dcb_cfg.pfc_mode_enable || + ((hw->mac.type == ixgbe_mac_82598EB) && + (adapter->flags & IXGBE_FLAG_DCB_ENABLED))) + return -EINVAL; + +#endif + fc = hw->fc; + + if (pause->autoneg != AUTONEG_ENABLE) + fc.disable_fc_autoneg = true; + else + fc.disable_fc_autoneg = false; + + if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) + fc.requested_mode = ixgbe_fc_full; + else if (pause->rx_pause && !pause->tx_pause) + fc.requested_mode = ixgbe_fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + fc.requested_mode = ixgbe_fc_tx_pause; + else if (!pause->rx_pause && !pause->tx_pause) + fc.requested_mode = ixgbe_fc_none; + else + return -EINVAL; + +#ifdef CONFIG_DCB + adapter->last_lfc_mode = fc.requested_mode; +#endif + + /* if the thing changed then we'll update and use new autoneg */ + if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) { + hw->fc = fc; + if (netif_running(netdev)) + ixgbe_reinit_locked(adapter); + else + ixgbe_reset(adapter); + } + + return 0; +} + +static u32 ixgbe_get_msglevel(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int ixgbe_get_regs_len(struct net_device *netdev) +{ +#define IXGBE_REGS_LEN 1128 + return IXGBE_REGS_LEN * sizeof(u32); +} + +#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_ + +static void ixgbe_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u8 i; + + memset(p, 0, IXGBE_REGS_LEN * sizeof(u32)); + + regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id; + + /* General Registers */ + regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL); + regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS); + regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP); + regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP); + regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER); + regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER); + + /* NVM Register */ + regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC); + regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD); + regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA); + regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL); + regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA); + regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL); + regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA); + regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT); + regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP); + regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC); + + /* Interrupt */ + /* don't read EICR because it can clear interrupt causes, instead + * read EICS which is a shadow but doesn't clear EICR */ + regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS); + regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS); + regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS); + regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC); + regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC); + regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM); + regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0)); + regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0)); + regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT); + regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA); + regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0)); + regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE); + + /* Flow Control */ + regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP); + regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0)); + regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1)); + regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2)); + regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3)); + for (i = 0; i < 8; i++) { + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i)); + regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i)); + break; + case ixgbe_mac_82599EB: + regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i)); + regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); + break; + default: + break; + } + } + regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV); + regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS); + + /* Receive DMA */ + for (i = 0; i < 64; i++) + regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); + for (i = 0; i < 64; i++) + regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); + for (i = 0; i < 64; i++) + regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); + for (i = 0; i < 64; i++) + regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); + for (i = 0; i < 64; i++) + regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); + for (i = 0; i < 64; i++) + regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + for (i = 0; i < 16; i++) + regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); + for (i = 0; i < 16; i++) + regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); + regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + for (i = 0; i < 8; i++) + regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); + regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN); + + /* Receive */ + regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM); + regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL); + for (i = 0; i < 16; i++) + regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i)); + for (i = 0; i < 16; i++) + regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i)); + regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)); + regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL); + regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL); + regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC); + regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); + for (i = 0; i < 8; i++) + regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i)); + for (i = 0; i < 8; i++) + regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i)); + regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP); + + /* Transmit */ + for (i = 0; i < 32; i++) + regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); + for (i = 0; i < 32; i++) + regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); + for (i = 0; i < 32; i++) + regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); + for (i = 0; i < 32; i++) + regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); + for (i = 0; i < 32; i++) + regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); + for (i = 0; i < 32; i++) + regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); + for (i = 0; i < 32; i++) + regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i)); + for (i = 0; i < 32; i++) + regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i)); + regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL); + for (i = 0; i < 16; i++) + regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); + regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG); + for (i = 0; i < 8; i++) + regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i)); + regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP); + + /* Wake Up */ + regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC); + regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC); + regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS); + regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV); + regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT); + regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT); + regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL); + regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); + regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0)); + + /* DCB */ + regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); + regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); + regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); + regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR); + for (i = 0; i < 8; i++) + regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i)); + for (i = 0; i < 8; i++) + regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i)); + for (i = 0; i < 8; i++) + regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i)); + for (i = 0; i < 8; i++) + regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i)); + for (i = 0; i < 8; i++) + regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); + for (i = 0; i < 8; i++) + regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); + + /* Statistics */ + regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs); + regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc); + regs_buff[883] = IXGBE_GET_STAT(adapter, errbc); + regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc); + for (i = 0; i < 8; i++) + regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]); + regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc); + regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc); + regs_buff[895] = IXGBE_GET_STAT(adapter, rlec); + regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc); + regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc); + regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc); + regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc); + for (i = 0; i < 8; i++) + regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]); + for (i = 0; i < 8; i++) + regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]); + for (i = 0; i < 8; i++) + regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]); + for (i = 0; i < 8; i++) + regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]); + regs_buff[932] = IXGBE_GET_STAT(adapter, prc64); + regs_buff[933] = IXGBE_GET_STAT(adapter, prc127); + regs_buff[934] = IXGBE_GET_STAT(adapter, prc255); + regs_buff[935] = IXGBE_GET_STAT(adapter, prc511); + regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023); + regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522); + regs_buff[938] = IXGBE_GET_STAT(adapter, gprc); + regs_buff[939] = IXGBE_GET_STAT(adapter, bprc); + regs_buff[940] = IXGBE_GET_STAT(adapter, mprc); + regs_buff[941] = IXGBE_GET_STAT(adapter, gptc); + regs_buff[942] = IXGBE_GET_STAT(adapter, gorc); + regs_buff[944] = IXGBE_GET_STAT(adapter, gotc); + for (i = 0; i < 8; i++) + regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]); + regs_buff[954] = IXGBE_GET_STAT(adapter, ruc); + regs_buff[955] = IXGBE_GET_STAT(adapter, rfc); + regs_buff[956] = IXGBE_GET_STAT(adapter, roc); + regs_buff[957] = IXGBE_GET_STAT(adapter, rjc); + regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc); + regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc); + regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc); + regs_buff[961] = IXGBE_GET_STAT(adapter, tor); + regs_buff[963] = IXGBE_GET_STAT(adapter, tpr); + regs_buff[964] = IXGBE_GET_STAT(adapter, tpt); + regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64); + regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127); + regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255); + regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511); + regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023); + regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522); + regs_buff[971] = IXGBE_GET_STAT(adapter, mptc); + regs_buff[972] = IXGBE_GET_STAT(adapter, bptc); + regs_buff[973] = IXGBE_GET_STAT(adapter, xec); + for (i = 0; i < 16; i++) + regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]); + for (i = 0; i < 16; i++) + regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]); + for (i = 0; i < 16; i++) + regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]); + for (i = 0; i < 16; i++) + regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]); + + /* MAC */ + regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG); + regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); + regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); + regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0); + regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1); + regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); + regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); + regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP); + regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP); + regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0); + regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1); + regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP); + regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA); + regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE); + regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD); + regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS); + regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA); + regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD); + regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD); + regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD); + regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG); + regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1); + regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2); + regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS); + regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC); + regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS); + regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC); + regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS); + regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3); + regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1); + regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2); + regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); + + /* Diagnostic */ + regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL); + for (i = 0; i < 8; i++) + regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i)); + regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN); + for (i = 0; i < 4; i++) + regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i)); + regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE); + regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL); + for (i = 0; i < 8; i++) + regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i)); + regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN); + for (i = 0; i < 4; i++) + regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i)); + regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE); + regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL); + regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0); + regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1); + regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2); + regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3); + regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL); + regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0); + regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1); + regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2); + regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3); + for (i = 0; i < 8; i++) + regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i)); + regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL); + regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1); + regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2); + regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1); + regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2); + regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS); + regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL); + regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC); + regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC); +} + +static int ixgbe_get_eeprom_len(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + return adapter->hw.eeprom.word_size * 2; +} + +static int ixgbe_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word, last_word, eeprom_len; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_len = last_word - first_word + 1; + + eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len, + eeprom_buff); + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < eeprom_len; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static void ixgbe_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + char firmware_version[32]; + + strncpy(drvinfo->driver, ixgbe_driver_name, + sizeof(drvinfo->driver) - 1); + strncpy(drvinfo->version, ixgbe_driver_version, + sizeof(drvinfo->version) - 1); + + snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d", + (adapter->eeprom_version & 0xF000) >> 12, + (adapter->eeprom_version & 0x0FF0) >> 4, + adapter->eeprom_version & 0x000F); + + strncpy(drvinfo->fw_version, firmware_version, + sizeof(drvinfo->fw_version)); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->n_stats = IXGBE_STATS_LEN; + drvinfo->testinfo_len = IXGBE_TEST_LEN; + drvinfo->regdump_len = ixgbe_get_regs_len(netdev); +} + +static void ixgbe_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; + struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; + + ring->rx_max_pending = IXGBE_MAX_RXD; + ring->tx_max_pending = IXGBE_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = rx_ring->count; + ring->tx_pending = tx_ring->count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int ixgbe_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_ring *temp_tx_ring, *temp_rx_ring; + int i, err = 0; + u32 new_rx_count, new_tx_count; + bool need_update = false; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_rx_count = max(ring->rx_pending, (u32)IXGBE_MIN_RXD); + new_rx_count = min(new_rx_count, (u32)IXGBE_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); + + new_tx_count = max(ring->tx_pending, (u32)IXGBE_MIN_TXD); + new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring[0]->count) && + (new_rx_count == adapter->rx_ring[0]->count)) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring)); + if (!temp_tx_ring) { + err = -ENOMEM; + goto clear_reset; + } + + if (new_tx_count != adapter->tx_ring_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + memcpy(&temp_tx_ring[i], adapter->tx_ring[i], + sizeof(struct ixgbe_ring)); + temp_tx_ring[i].count = new_tx_count; + err = ixgbe_setup_tx_resources(&temp_tx_ring[i]); + if (err) { + while (i) { + i--; + ixgbe_free_tx_resources(&temp_tx_ring[i]); + } + goto clear_reset; + } + } + need_update = true; + } + + temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring)); + if (!temp_rx_ring) { + err = -ENOMEM; + goto err_setup; + } + + if (new_rx_count != adapter->rx_ring_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + memcpy(&temp_rx_ring[i], adapter->rx_ring[i], + sizeof(struct ixgbe_ring)); + temp_rx_ring[i].count = new_rx_count; + err = ixgbe_setup_rx_resources(&temp_rx_ring[i]); + if (err) { + while (i) { + i--; + ixgbe_free_rx_resources(&temp_rx_ring[i]); + } + goto err_setup; + } + } + need_update = true; + } + + /* if rings need to be updated, here's the place to do it in one shot */ + if (need_update) { + ixgbe_down(adapter); + + /* tx */ + if (new_tx_count != adapter->tx_ring_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + ixgbe_free_tx_resources(adapter->tx_ring[i]); + memcpy(adapter->tx_ring[i], &temp_tx_ring[i], + sizeof(struct ixgbe_ring)); + } + adapter->tx_ring_count = new_tx_count; + } + + /* rx */ + if (new_rx_count != adapter->rx_ring_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + ixgbe_free_rx_resources(adapter->rx_ring[i]); + memcpy(adapter->rx_ring[i], &temp_rx_ring[i], + sizeof(struct ixgbe_ring)); + } + adapter->rx_ring_count = new_rx_count; + } + ixgbe_up(adapter); + } + + vfree(temp_rx_ring); +err_setup: + vfree(temp_tx_ring); +clear_reset: + clear_bit(__IXGBE_RESETTING, &adapter->state); + return err; +} + +static int ixgbe_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return IXGBE_TEST_LEN; + case ETH_SS_STATS: + return IXGBE_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void ixgbe_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct rtnl_link_stats64 temp; + const struct rtnl_link_stats64 *net_stats; + unsigned int start; + struct ixgbe_ring *ring; + int i, j; + char *p = NULL; + + ixgbe_update_stats(adapter); + net_stats = dev_get_stats(netdev, &temp); + for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { + switch (ixgbe_gstrings_stats[i].type) { + case NETDEV_STATS: + p = (char *) net_stats + + ixgbe_gstrings_stats[i].stat_offset; + break; + case IXGBE_STATS: + p = (char *) adapter + + ixgbe_gstrings_stats[i].stat_offset; + break; + } + + data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < adapter->num_tx_queues; j++) { + ring = adapter->tx_ring[j]; + do { + start = u64_stats_fetch_begin_bh(&ring->syncp); + data[i] = ring->stats.packets; + data[i+1] = ring->stats.bytes; + } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); + i += 2; + } + for (j = 0; j < adapter->num_rx_queues; j++) { + ring = adapter->rx_ring[j]; + do { + start = u64_stats_fetch_begin_bh(&ring->syncp); + data[i] = ring->stats.packets; + data[i+1] = ring->stats.bytes; + } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); + i += 2; + } + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { + for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) { + data[i++] = adapter->stats.pxontxc[j]; + data[i++] = adapter->stats.pxofftxc[j]; + } + for (j = 0; j < MAX_RX_PACKET_BUFFERS; j++) { + data[i++] = adapter->stats.pxonrxc[j]; + data[i++] = adapter->stats.pxoffrxc[j]; + } + } +} + +static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + char *p = (char *)data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *ixgbe_gstrings_test, + IXGBE_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { + memcpy(p, ixgbe_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_tx_queues; i++) { + sprintf(p, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_rx_queues; i++) { + sprintf(p, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + } + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { + for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { + sprintf(p, "tx_pb_%u_pxon", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_pb_%u_pxoff", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < MAX_RX_PACKET_BUFFERS; i++) { + sprintf(p, "rx_pb_%u_pxon", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_pb_%u_pxoff", i); + p += ETH_GSTRING_LEN; + } + } + /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ + break; + } +} + +static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data) +{ + struct ixgbe_hw *hw = &adapter->hw; + bool link_up; + u32 link_speed = 0; + *data = 0; + + hw->mac.ops.check_link(hw, &link_speed, &link_up, true); + if (link_up) + return *data; + else + *data = 1; + return *data; +} + +/* ethtool register test data */ +struct ixgbe_reg_test { + u16 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x40 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define WRITE_NO_TEST 3 +#define TABLE32_TEST 4 +#define TABLE64_TEST_LO 5 +#define TABLE64_TEST_HI 6 + +/* default 82599 register test */ +static const struct ixgbe_reg_test reg_test_82599[] = { + { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, + { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, + { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, + { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, + { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 }, + { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF }, + { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +/* default 82598 register test */ +static const struct ixgbe_reg_test reg_test_82598[] = { + { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, + { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + /* Enable all four RX queues before testing. */ + { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, + /* RDH is read-only for 82598, only test RDT. */ + { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, + { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF }, + { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 }, + { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 }, + { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF }, + { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + u32 pat, val, before; + static const u32 test_pattern[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; + + for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { + before = readl(adapter->hw.hw_addr + reg); + writel((test_pattern[pat] & write), + (adapter->hw.hw_addr + reg)); + val = readl(adapter->hw.hw_addr + reg); + if (val != (test_pattern[pat] & write & mask)) { + e_err(drv, "pattern test reg %04X failed: got " + "0x%08X expected 0x%08X\n", + reg, val, (test_pattern[pat] & write & mask)); + *data = reg; + writel(before, adapter->hw.hw_addr + reg); + return 1; + } + writel(before, adapter->hw.hw_addr + reg); + } + return 0; +} + +static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + u32 val, before; + before = readl(adapter->hw.hw_addr + reg); + writel((write & mask), (adapter->hw.hw_addr + reg)); + val = readl(adapter->hw.hw_addr + reg); + if ((write & mask) != (val & mask)) { + e_err(drv, "set/check reg %04X test failed: got 0x%08X " + "expected 0x%08X\n", reg, (val & mask), (write & mask)); + *data = reg; + writel(before, (adapter->hw.hw_addr + reg)); + return 1; + } + writel(before, (adapter->hw.hw_addr + reg)); + return 0; +} + +#define REG_PATTERN_TEST(reg, mask, write) \ + do { \ + if (reg_pattern_test(adapter, data, reg, mask, write)) \ + return 1; \ + } while (0) \ + + +#define REG_SET_AND_CHECK(reg, mask, write) \ + do { \ + if (reg_set_and_check(adapter, data, reg, mask, write)) \ + return 1; \ + } while (0) \ + +static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) +{ + const struct ixgbe_reg_test *test; + u32 value, before, after; + u32 i, toggle; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + toggle = 0x7FFFF3FF; + test = reg_test_82598; + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + toggle = 0x7FFFF30F; + test = reg_test_82599; + break; + default: + *data = 1; + return 1; + break; + } + + /* + * Because the status register is such a special case, + * we handle it separately from the rest of the register + * tests. Some bits are read-only, some toggle, and some + * are writeable on newer MACs. + */ + before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS); + value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle); + after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle; + if (value != after) { + e_err(drv, "failed STATUS register test got: 0x%08X " + "expected: 0x%08X\n", after, value); + *data = 1; + return 1; + } + /* restore previous status */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before); + + /* + * Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + switch (test->test_type) { + case PATTERN_TEST: + REG_PATTERN_TEST(test->reg + (i * 0x40), + test->mask, + test->write); + break; + case SET_READ_TEST: + REG_SET_AND_CHECK(test->reg + (i * 0x40), + test->mask, + test->write); + break; + case WRITE_NO_TEST: + writel(test->write, + (adapter->hw.hw_addr + test->reg) + + (i * 0x40)); + break; + case TABLE32_TEST: + REG_PATTERN_TEST(test->reg + (i * 4), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + REG_PATTERN_TEST(test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + REG_PATTERN_TEST((test->reg + 4) + (i * 8), + test->mask, + test->write); + break; + } + } + test++; + } + + *data = 0; + return 0; +} + +static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data) +{ + struct ixgbe_hw *hw = &adapter->hw; + if (hw->eeprom.ops.validate_checksum(hw, NULL)) + *data = 1; + else + *data = 0; + return *data; +} + +static irqreturn_t ixgbe_test_intr(int irq, void *data) +{ + struct net_device *netdev = (struct net_device *) data; + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR); + + return IRQ_HANDLED; +} + +static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + u32 mask, i = 0, shared_int = true; + u32 irq = adapter->pdev->irq; + + *data = 0; + + /* Hook up test interrupt handler just for this test */ + if (adapter->msix_entries) { + /* NOTE: we don't test MSI-X interrupts here, yet */ + return 0; + } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { + shared_int = false; + if (request_irq(irq, ixgbe_test_intr, 0, netdev->name, + netdev)) { + *data = 1; + return -1; + } + } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED, + netdev->name, netdev)) { + shared_int = false; + } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED, + netdev->name, netdev)) { + *data = 1; + return -1; + } + e_info(hw, "testing %s interrupt\n", shared_int ? + "shared" : "unshared"); + + /* Disable all the interrupts */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); + IXGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + /* Test each interrupt */ + for (; i < 10; i++) { + /* Interrupt to test */ + mask = 1 << i; + + if (!shared_int) { + /* + * Disable the interrupts to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, + ~mask & 0x00007FFF); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, + ~mask & 0x00007FFF); + IXGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* + * Enable the interrupt to be reported in the cause + * register and then force the same interrupt and see + * if one gets posted. If an interrupt was not posted + * to the bus, the test failed. + */ + adapter->test_icr = 0; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); + IXGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + if (!(adapter->test_icr &mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* + * Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, + ~mask & 0x00007FFF); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, + ~mask & 0x00007FFF); + IXGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + if (adapter->test_icr) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); + IXGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + + return *data; +} + +static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) +{ + struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; + struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; + struct ixgbe_hw *hw = &adapter->hw; + u32 reg_ctl; + + /* shut down the DMA engines now so they can be reinitialized later */ + + /* first Rx */ + reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + reg_ctl &= ~IXGBE_RXCTRL_RXEN; + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); + ixgbe_disable_rx_queue(adapter, rx_ring); + + /* now Tx */ + reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx)); + reg_ctl &= ~IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl); + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + reg_ctl &= ~IXGBE_DMATXCTL_TE; + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); + break; + default: + break; + } + + ixgbe_reset(adapter); + + ixgbe_free_tx_resources(&adapter->test_tx_ring); + ixgbe_free_rx_resources(&adapter->test_rx_ring); +} + +static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) +{ + struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; + struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; + u32 rctl, reg_data; + int ret_val; + int err; + + /* Setup Tx descriptor ring and Tx buffers */ + tx_ring->count = IXGBE_DEFAULT_TXD; + tx_ring->queue_index = 0; + tx_ring->dev = &adapter->pdev->dev; + tx_ring->netdev = adapter->netdev; + tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; + tx_ring->numa_node = adapter->node; + + err = ixgbe_setup_tx_resources(tx_ring); + if (err) + return 1; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); + reg_data |= IXGBE_DMATXCTL_TE; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); + break; + default: + break; + } + + ixgbe_configure_tx_ring(adapter, tx_ring); + + /* Setup Rx Descriptor ring and Rx buffers */ + rx_ring->count = IXGBE_DEFAULT_RXD; + rx_ring->queue_index = 0; + rx_ring->dev = &adapter->pdev->dev; + rx_ring->netdev = adapter->netdev; + rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; + rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048; + rx_ring->numa_node = adapter->node; + + err = ixgbe_setup_rx_resources(rx_ring); + if (err) { + ret_val = 4; + goto err_nomem; + } + + rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN); + + ixgbe_configure_rx_ring(adapter, rx_ring); + + rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl); + + return 0; + +err_nomem: + ixgbe_free_desc_rings(adapter); + return ret_val; +} + +static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 reg_data; + + /* X540 needs to set the MACC.FLU bit to force link up */ + if (adapter->hw.mac.type == ixgbe_mac_X540) { + reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_MACC); + reg_data |= IXGBE_MACC_FLU; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_MACC, reg_data); + } + + /* right now we only support MAC loopback in the driver */ + reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); + /* Setup MAC loopback */ + reg_data |= IXGBE_HLREG0_LPBK; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); + + reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); + reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data); + + reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_AUTOC); + reg_data &= ~IXGBE_AUTOC_LMS_MASK; + reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data); + IXGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + /* Disable Atlas Tx lanes; re-enabled in reset path */ + if (hw->mac.type == ixgbe_mac_82598EB) { + u8 atlas; + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas); + atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas); + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas); + atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas); + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas); + atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas); + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas); + atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas); + } + + return 0; +} + +static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter) +{ + u32 reg_data; + + reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); + reg_data &= ~IXGBE_HLREG0_LPBK; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); +} + +static void ixgbe_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size &= ~1; + memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); + memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); + memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); +} + +static int ixgbe_check_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + frame_size &= ~1; + if (*(skb->data + 3) == 0xFF) { + if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && + (*(skb->data + frame_size / 2 + 12) == 0xAF)) { + return 0; + } + } + return 13; +} + +static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, + struct ixgbe_ring *tx_ring, + unsigned int size) +{ + union ixgbe_adv_rx_desc *rx_desc; + struct ixgbe_rx_buffer *rx_buffer_info; + struct ixgbe_tx_buffer *tx_buffer_info; + const int bufsz = rx_ring->rx_buf_len; + u32 staterr; + u16 rx_ntc, tx_ntc, count = 0; + + /* initialize next to clean and descriptor values */ + rx_ntc = rx_ring->next_to_clean; + tx_ntc = tx_ring->next_to_clean; + rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc); + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + + while (staterr & IXGBE_RXD_STAT_DD) { + /* check Rx buffer */ + rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; + + /* unmap Rx buffer, will be remapped by alloc_rx_buffers */ + dma_unmap_single(rx_ring->dev, + rx_buffer_info->dma, + bufsz, + DMA_FROM_DEVICE); + rx_buffer_info->dma = 0; + + /* verify contents of skb */ + if (!ixgbe_check_lbtest_frame(rx_buffer_info->skb, size)) + count++; + + /* unmap buffer on Tx side */ + tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; + ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); + + /* increment Rx/Tx next to clean counters */ + rx_ntc++; + if (rx_ntc == rx_ring->count) + rx_ntc = 0; + tx_ntc++; + if (tx_ntc == tx_ring->count) + tx_ntc = 0; + + /* fetch next descriptor */ + rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc); + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + } + + /* re-map buffers to ring, store next to clean values */ + ixgbe_alloc_rx_buffers(rx_ring, count); + rx_ring->next_to_clean = rx_ntc; + tx_ring->next_to_clean = tx_ntc; + + return count; +} + +static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) +{ + struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; + struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; + int i, j, lc, good_cnt, ret_val = 0; + unsigned int size = 1024; + netdev_tx_t tx_ret_val; + struct sk_buff *skb; + + /* allocate test skb */ + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) + return 11; + + /* place data into test skb */ + ixgbe_create_lbtest_frame(skb, size); + skb_put(skb, size); + + /* + * Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rx_ring->count <= tx_ring->count) + lc = ((tx_ring->count / 64) * 2) + 1; + else + lc = ((rx_ring->count / 64) * 2) + 1; + + for (j = 0; j <= lc; j++) { + /* reset count of good packets */ + good_cnt = 0; + + /* place 64 packets on the transmit queue*/ + for (i = 0; i < 64; i++) { + skb_get(skb); + tx_ret_val = ixgbe_xmit_frame_ring(skb, + adapter, + tx_ring); + if (tx_ret_val == NETDEV_TX_OK) + good_cnt++; + } + + if (good_cnt != 64) { + ret_val = 12; + break; + } + + /* allow 200 milliseconds for packets to go from Tx to Rx */ + msleep(200); + + good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size); + if (good_cnt != 64) { + ret_val = 13; + break; + } + } + + /* free the original skb */ + kfree_skb(skb); + + return ret_val; +} + +static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data) +{ + *data = ixgbe_setup_desc_rings(adapter); + if (*data) + goto out; + *data = ixgbe_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + *data = ixgbe_run_loopback_test(adapter); + ixgbe_loopback_cleanup(adapter); + +err_loopback: + ixgbe_free_desc_rings(adapter); +out: + return *data; +} + +static void ixgbe_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + bool if_running = netif_running(netdev); + + set_bit(__IXGBE_TESTING, &adapter->state); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + e_info(hw, "offline testing starting\n"); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result */ + if (ixgbe_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + int i; + for (i = 0; i < adapter->num_vfs; i++) { + if (adapter->vfinfo[i].clear_to_send) { + netdev_warn(netdev, "%s", + "offline diagnostic is not " + "supported when VFs are " + "present\n"); + data[0] = 1; + data[1] = 1; + data[2] = 1; + data[3] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + clear_bit(__IXGBE_TESTING, + &adapter->state); + goto skip_ol_tests; + } + } + } + + if (if_running) + /* indicate we're in test mode */ + dev_close(netdev); + else + ixgbe_reset(adapter); + + e_info(hw, "register testing starting\n"); + if (ixgbe_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + ixgbe_reset(adapter); + e_info(hw, "eeprom testing starting\n"); + if (ixgbe_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + ixgbe_reset(adapter); + e_info(hw, "interrupt testing starting\n"); + if (ixgbe_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* If SRIOV or VMDq is enabled then skip MAC + * loopback diagnostic. */ + if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | + IXGBE_FLAG_VMDQ_ENABLED)) { + e_info(hw, "Skip MAC loopback diagnostic in VT " + "mode\n"); + data[3] = 0; + goto skip_loopback; + } + + ixgbe_reset(adapter); + e_info(hw, "loopback testing starting\n"); + if (ixgbe_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + +skip_loopback: + ixgbe_reset(adapter); + + clear_bit(__IXGBE_TESTING, &adapter->state); + if (if_running) + dev_open(netdev); + } else { + e_info(hw, "online testing starting\n"); + /* Online tests */ + if (ixgbe_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Online tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__IXGBE_TESTING, &adapter->state); + } +skip_ol_tests: + msleep_interruptible(4 * 1000); +} + +static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, + struct ethtool_wolinfo *wol) +{ + struct ixgbe_hw *hw = &adapter->hw; + int retval = 1; + + /* WOL not supported except for the following */ + switch(hw->device_id) { + case IXGBE_DEV_ID_82599_SFP: + /* Only this subdevice supports WOL */ + if (hw->subsystem_device_id != IXGBE_SUBDEV_ID_82599_SFP) { + wol->supported = 0; + break; + } + retval = 0; + break; + case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: + /* All except this subdevice support WOL */ + if (hw->subsystem_device_id == + IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) { + wol->supported = 0; + break; + } + retval = 0; + break; + case IXGBE_DEV_ID_82599_KX4: + retval = 0; + break; + default: + wol->supported = 0; + } + + return retval; +} + +static void ixgbe_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = 0; + + if (ixgbe_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return; + + if (adapter->wol & IXGBE_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & IXGBE_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & IXGBE_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & IXGBE_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; +} + +static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + + if (ixgbe_wol_exclusion(adapter, wol)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= IXGBE_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= IXGBE_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= IXGBE_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= IXGBE_WUFC_MAG; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static int ixgbe_nway_reset(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + ixgbe_reinit_locked(adapter); + + return 0; +} + +static int ixgbe_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + return 2; + + case ETHTOOL_ID_ON: + hw->mac.ops.led_on(hw, IXGBE_LED_ON); + break; + + case ETHTOOL_ID_OFF: + hw->mac.ops.led_off(hw, IXGBE_LED_ON); + break; + + case ETHTOOL_ID_INACTIVE: + /* Restore LED settings */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg); + break; + } + + return 0; +} + +static int ixgbe_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit; + + /* only valid if in constant ITR mode */ + switch (adapter->rx_itr_setting) { + case 0: + /* throttling disabled */ + ec->rx_coalesce_usecs = 0; + break; + case 1: + /* dynamic ITR mode */ + ec->rx_coalesce_usecs = 1; + break; + default: + /* fixed interrupt rate mode */ + ec->rx_coalesce_usecs = 1000000/adapter->rx_eitr_param; + break; + } + + /* if in mixed tx/rx queues per vector mode, report only rx settings */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + return 0; + + /* only valid if in constant ITR mode */ + switch (adapter->tx_itr_setting) { + case 0: + /* throttling disabled */ + ec->tx_coalesce_usecs = 0; + break; + case 1: + /* dynamic ITR mode */ + ec->tx_coalesce_usecs = 1; + break; + default: + ec->tx_coalesce_usecs = 1000000/adapter->tx_eitr_param; + break; + } + + return 0; +} + +/* + * this function must be called before setting the new value of + * rx_itr_setting + */ +static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter, + struct ethtool_coalesce *ec) +{ + struct net_device *netdev = adapter->netdev; + + if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) + return false; + + /* if interrupt rate is too high then disable RSC */ + if (ec->rx_coalesce_usecs != 1 && + ec->rx_coalesce_usecs <= 1000000/IXGBE_MAX_RSC_INT_RATE) { + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { + e_info(probe, "rx-usecs set too low, " + "disabling RSC\n"); + adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; + return true; + } + } else { + /* check the feature flag value and enable RSC if necessary */ + if ((netdev->features & NETIF_F_LRO) && + !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { + e_info(probe, "rx-usecs set to %d, " + "re-enabling RSC\n", + ec->rx_coalesce_usecs); + adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; + return true; + } + } + return false; +} + +static int ixgbe_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_q_vector *q_vector; + int i; + bool need_reset = false; + + /* don't accept tx specific changes if we've got mixed RxTx vectors */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count + && ec->tx_coalesce_usecs) + return -EINVAL; + + if (ec->tx_max_coalesced_frames_irq) + adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq; + + if (ec->rx_coalesce_usecs > 1) { + /* check the limits */ + if ((1000000/ec->rx_coalesce_usecs > IXGBE_MAX_INT_RATE) || + (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE)) + return -EINVAL; + + /* check the old value and enable RSC if necessary */ + need_reset = ixgbe_update_rsc(adapter, ec); + + /* store the value in ints/second */ + adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs; + + /* static value of interrupt rate */ + adapter->rx_itr_setting = adapter->rx_eitr_param; + /* clear the lower bit as its used for dynamic state */ + adapter->rx_itr_setting &= ~1; + } else if (ec->rx_coalesce_usecs == 1) { + /* check the old value and enable RSC if necessary */ + need_reset = ixgbe_update_rsc(adapter, ec); + + /* 1 means dynamic mode */ + adapter->rx_eitr_param = 20000; + adapter->rx_itr_setting = 1; + } else { + /* check the old value and enable RSC if necessary */ + need_reset = ixgbe_update_rsc(adapter, ec); + /* + * any other value means disable eitr, which is best + * served by setting the interrupt rate very high + */ + adapter->rx_eitr_param = IXGBE_MAX_INT_RATE; + adapter->rx_itr_setting = 0; + } + + if (ec->tx_coalesce_usecs > 1) { + /* + * don't have to worry about max_int as above because + * tx vectors don't do hardware RSC (an rx function) + */ + /* check the limits */ + if ((1000000/ec->tx_coalesce_usecs > IXGBE_MAX_INT_RATE) || + (1000000/ec->tx_coalesce_usecs < IXGBE_MIN_INT_RATE)) + return -EINVAL; + + /* store the value in ints/second */ + adapter->tx_eitr_param = 1000000/ec->tx_coalesce_usecs; + + /* static value of interrupt rate */ + adapter->tx_itr_setting = adapter->tx_eitr_param; + + /* clear the lower bit as its used for dynamic state */ + adapter->tx_itr_setting &= ~1; + } else if (ec->tx_coalesce_usecs == 1) { + /* 1 means dynamic mode */ + adapter->tx_eitr_param = 10000; + adapter->tx_itr_setting = 1; + } else { + adapter->tx_eitr_param = IXGBE_MAX_INT_RATE; + adapter->tx_itr_setting = 0; + } + + /* MSI/MSIx Interrupt Mode */ + if (adapter->flags & + (IXGBE_FLAG_MSIX_ENABLED | IXGBE_FLAG_MSI_ENABLED)) { + int num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + for (i = 0; i < num_vectors; i++) { + q_vector = adapter->q_vector[i]; + if (q_vector->tx.count && !q_vector->rx.count) + /* tx only */ + q_vector->eitr = adapter->tx_eitr_param; + else + /* rx only or mixed */ + q_vector->eitr = adapter->rx_eitr_param; + q_vector->tx.work_limit = adapter->tx_work_limit; + ixgbe_write_eitr(q_vector); + } + /* Legacy Interrupt Mode */ + } else { + q_vector = adapter->q_vector[0]; + q_vector->eitr = adapter->rx_eitr_param; + q_vector->tx.work_limit = adapter->tx_work_limit; + ixgbe_write_eitr(q_vector); + } + + /* + * do reset here at the end to make sure EITR==0 case is handled + * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings + * also locks in RSC enable/disable which requires reset + */ + if (need_reset) + ixgbe_do_reset(netdev); + + return 0; +} + +static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + union ixgbe_atr_input *mask = &adapter->fdir_mask; + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct hlist_node *node, *node2; + struct ixgbe_fdir_filter *rule = NULL; + + /* report total rule count */ + cmd->data = (1024 << adapter->fdir_pballoc) - 2; + + hlist_for_each_entry_safe(rule, node, node2, + &adapter->fdir_filter_list, fdir_node) { + if (fsp->location <= rule->sw_idx) + break; + } + + if (!rule || fsp->location != rule->sw_idx) + return -EINVAL; + + /* fill out the flow spec entry */ + + /* set flow type field */ + switch (rule->filter.formatted.flow_type) { + case IXGBE_ATR_FLOW_TYPE_TCPV4: + fsp->flow_type = TCP_V4_FLOW; + break; + case IXGBE_ATR_FLOW_TYPE_UDPV4: + fsp->flow_type = UDP_V4_FLOW; + break; + case IXGBE_ATR_FLOW_TYPE_SCTPV4: + fsp->flow_type = SCTP_V4_FLOW; + break; + case IXGBE_ATR_FLOW_TYPE_IPV4: + fsp->flow_type = IP_USER_FLOW; + fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + fsp->h_u.usr_ip4_spec.proto = 0; + fsp->m_u.usr_ip4_spec.proto = 0; + break; + default: + return -EINVAL; + } + + fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port; + fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port; + fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port; + fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port; + fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0]; + fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0]; + fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0]; + fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0]; + fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id; + fsp->m_ext.vlan_tci = mask->formatted.vlan_id; + fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes; + fsp->m_ext.vlan_etype = mask->formatted.flex_bytes; + fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool); + fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool); + fsp->flow_type |= FLOW_EXT; + + /* record action */ + if (rule->action == IXGBE_FDIR_DROP_QUEUE) + fsp->ring_cookie = RX_CLS_FLOW_DISC; + else + fsp->ring_cookie = rule->action; + + return 0; +} + +static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct hlist_node *node, *node2; + struct ixgbe_fdir_filter *rule; + int cnt = 0; + + /* report total rule count */ + cmd->data = (1024 << adapter->fdir_pballoc) - 2; + + hlist_for_each_entry_safe(rule, node, node2, + &adapter->fdir_filter_list, fdir_node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + rule_locs[cnt] = rule->sw_idx; + cnt++; + } + + return 0; +} + +static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + void *rule_locs) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_rx_queues; + ret = 0; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = adapter->fdir_filter_count; + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, + (u32 *)rule_locs); + break; + default: + break; + } + + return ret; +} + +static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, + struct ixgbe_fdir_filter *input, + u16 sw_idx) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct hlist_node *node, *node2, *parent; + struct ixgbe_fdir_filter *rule; + int err = -EINVAL; + + parent = NULL; + rule = NULL; + + hlist_for_each_entry_safe(rule, node, node2, + &adapter->fdir_filter_list, fdir_node) { + /* hash found, or no matching entry */ + if (rule->sw_idx >= sw_idx) + break; + parent = node; + } + + /* if there is an old rule occupying our place remove it */ + if (rule && (rule->sw_idx == sw_idx)) { + if (!input || (rule->filter.formatted.bkt_hash != + input->filter.formatted.bkt_hash)) { + err = ixgbe_fdir_erase_perfect_filter_82599(hw, + &rule->filter, + sw_idx); + } + + hlist_del(&rule->fdir_node); + kfree(rule); + adapter->fdir_filter_count--; + } + + /* + * If no input this was a delete, err should be 0 if a rule was + * successfully found and removed from the list else -EINVAL + */ + if (!input) + return err; + + /* initialize node and set software index */ + INIT_HLIST_NODE(&input->fdir_node); + + /* add filter to the list */ + if (parent) + hlist_add_after(parent, &input->fdir_node); + else + hlist_add_head(&input->fdir_node, + &adapter->fdir_filter_list); + + /* update counts */ + adapter->fdir_filter_count++; + + return 0; +} + +static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp, + u8 *flow_type) +{ + switch (fsp->flow_type & ~FLOW_EXT) { + case TCP_V4_FLOW: + *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; + break; + case UDP_V4_FLOW: + *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; + break; + case SCTP_V4_FLOW: + *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; + break; + case IP_USER_FLOW: + switch (fsp->h_u.usr_ip4_spec.proto) { + case IPPROTO_TCP: + *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; + break; + case IPPROTO_UDP: + *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; + break; + case IPPROTO_SCTP: + *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; + break; + case 0: + if (!fsp->m_u.usr_ip4_spec.proto) { + *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4; + break; + } + default: + return 0; + } + break; + default: + return 0; + } + + return 1; +} + +static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_fdir_filter *input; + union ixgbe_atr_input mask; + int err; + + if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + return -EOPNOTSUPP; + + /* + * Don't allow programming if the action is a queue greater than + * the number of online Rx queues. + */ + if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && + (fsp->ring_cookie >= adapter->num_rx_queues)) + return -EINVAL; + + /* Don't allow indexes to exist outside of available space */ + if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) { + e_err(drv, "Location out of range\n"); + return -EINVAL; + } + + input = kzalloc(sizeof(*input), GFP_ATOMIC); + if (!input) + return -ENOMEM; + + memset(&mask, 0, sizeof(union ixgbe_atr_input)); + + /* set SW index */ + input->sw_idx = fsp->location; + + /* record flow type */ + if (!ixgbe_flowspec_to_flow_type(fsp, + &input->filter.formatted.flow_type)) { + e_err(drv, "Unrecognized flow type\n"); + goto err_out; + } + + mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | + IXGBE_ATR_L4TYPE_MASK; + + if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) + mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; + + /* Copy input into formatted structures */ + input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; + mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src; + input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; + mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst; + input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc; + mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc; + input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst; + mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst; + + if (fsp->flow_type & FLOW_EXT) { + input->filter.formatted.vm_pool = + (unsigned char)ntohl(fsp->h_ext.data[1]); + mask.formatted.vm_pool = + (unsigned char)ntohl(fsp->m_ext.data[1]); + input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci; + mask.formatted.vlan_id = fsp->m_ext.vlan_tci; + input->filter.formatted.flex_bytes = + fsp->h_ext.vlan_etype; + mask.formatted.flex_bytes = fsp->m_ext.vlan_etype; + } + + /* determine if we need to drop or route the packet */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) + input->action = IXGBE_FDIR_DROP_QUEUE; + else + input->action = fsp->ring_cookie; + + spin_lock(&adapter->fdir_perfect_lock); + + if (hlist_empty(&adapter->fdir_filter_list)) { + /* save mask and program input mask into HW */ + memcpy(&adapter->fdir_mask, &mask, sizeof(mask)); + err = ixgbe_fdir_set_input_mask_82599(hw, &mask); + if (err) { + e_err(drv, "Error writing mask\n"); + goto err_out_w_lock; + } + } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) { + e_err(drv, "Only one mask supported per port\n"); + goto err_out_w_lock; + } + + /* apply mask and compute/store hash */ + ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask); + + /* program filters to filter memory */ + err = ixgbe_fdir_write_perfect_filter_82599(hw, + &input->filter, input->sw_idx, + (input->action == IXGBE_FDIR_DROP_QUEUE) ? + IXGBE_FDIR_DROP_QUEUE : + adapter->rx_ring[input->action]->reg_idx); + if (err) + goto err_out_w_lock; + + ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); + + spin_unlock(&adapter->fdir_perfect_lock); + + return err; +err_out_w_lock: + spin_unlock(&adapter->fdir_perfect_lock); +err_out: + kfree(input); + return -EINVAL; +} + +static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + int err; + + spin_lock(&adapter->fdir_perfect_lock); + err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location); + spin_unlock(&adapter->fdir_perfect_lock); + + return err; +} + +static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +static const struct ethtool_ops ixgbe_ethtool_ops = { + .get_settings = ixgbe_get_settings, + .set_settings = ixgbe_set_settings, + .get_drvinfo = ixgbe_get_drvinfo, + .get_regs_len = ixgbe_get_regs_len, + .get_regs = ixgbe_get_regs, + .get_wol = ixgbe_get_wol, + .set_wol = ixgbe_set_wol, + .nway_reset = ixgbe_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = ixgbe_get_eeprom_len, + .get_eeprom = ixgbe_get_eeprom, + .get_ringparam = ixgbe_get_ringparam, + .set_ringparam = ixgbe_set_ringparam, + .get_pauseparam = ixgbe_get_pauseparam, + .set_pauseparam = ixgbe_set_pauseparam, + .get_msglevel = ixgbe_get_msglevel, + .set_msglevel = ixgbe_set_msglevel, + .self_test = ixgbe_diag_test, + .get_strings = ixgbe_get_strings, + .set_phys_id = ixgbe_set_phys_id, + .get_sset_count = ixgbe_get_sset_count, + .get_ethtool_stats = ixgbe_get_ethtool_stats, + .get_coalesce = ixgbe_get_coalesce, + .set_coalesce = ixgbe_set_coalesce, + .get_rxnfc = ixgbe_get_rxnfc, + .set_rxnfc = ixgbe_set_rxnfc, +}; + +void ixgbe_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops); +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c new file mode 100644 index 0000000..824edae --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -0,0 +1,836 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "ixgbe.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * ixgbe_fcoe_clear_ddp - clear the given ddp context + * @ddp - ptr to the ixgbe_fcoe_ddp + * + * Returns : none + * + */ +static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp) +{ + ddp->len = 0; + ddp->err = 1; + ddp->udl = NULL; + ddp->udp = 0UL; + ddp->sgl = NULL; + ddp->sgc = 0; +} + +/** + * ixgbe_fcoe_ddp_put - free the ddp context for a given xid + * @netdev: the corresponding net_device + * @xid: the xid that corresponding ddp will be freed + * + * This is the implementation of net_device_ops.ndo_fcoe_ddp_done + * and it is expected to be called by ULD, i.e., FCP layer of libfc + * to release the corresponding ddp context when the I/O is done. + * + * Returns : data length already ddp-ed in bytes + */ +int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) +{ + int len = 0; + struct ixgbe_fcoe *fcoe; + struct ixgbe_adapter *adapter; + struct ixgbe_fcoe_ddp *ddp; + u32 fcbuff; + + if (!netdev) + goto out_ddp_put; + + if (xid >= IXGBE_FCOE_DDP_MAX) + goto out_ddp_put; + + adapter = netdev_priv(netdev); + fcoe = &adapter->fcoe; + ddp = &fcoe->ddp[xid]; + if (!ddp->udl) + goto out_ddp_put; + + len = ddp->len; + /* if there an error, force to invalidate ddp context */ + if (ddp->err) { + spin_lock_bh(&fcoe->lock); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLT, 0); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLTRW, + (xid | IXGBE_FCFLTRW_WE)); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, + (xid | IXGBE_FCDMARW_WE)); + + /* guaranteed to be invalidated after 100us */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, + (xid | IXGBE_FCDMARW_RE)); + fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF); + spin_unlock_bh(&fcoe->lock); + if (fcbuff & IXGBE_FCBUFF_VALID) + udelay(100); + } + if (ddp->sgl) + pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc, + DMA_FROM_DEVICE); + if (ddp->pool) { + pci_pool_free(ddp->pool, ddp->udl, ddp->udp); + ddp->pool = NULL; + } + + ixgbe_fcoe_clear_ddp(ddp); + +out_ddp_put: + return len; +} + +/** + * ixgbe_fcoe_ddp_setup - called to set up ddp context + * @netdev: the corresponding net_device + * @xid: the exchange id requesting ddp + * @sgl: the scatter-gather list for this request + * @sgc: the number of scatter-gather items + * + * Returns : 1 for success and 0 for no ddp + */ +static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc, + int target_mode) +{ + struct ixgbe_adapter *adapter; + struct ixgbe_hw *hw; + struct ixgbe_fcoe *fcoe; + struct ixgbe_fcoe_ddp *ddp; + struct scatterlist *sg; + unsigned int i, j, dmacount; + unsigned int len; + static const unsigned int bufflen = IXGBE_FCBUFF_MIN; + unsigned int firstoff = 0; + unsigned int lastsize; + unsigned int thisoff = 0; + unsigned int thislen = 0; + u32 fcbuff, fcdmarw, fcfltrw, fcrxctl; + dma_addr_t addr = 0; + struct pci_pool *pool; + + if (!netdev || !sgl) + return 0; + + adapter = netdev_priv(netdev); + if (xid >= IXGBE_FCOE_DDP_MAX) { + e_warn(drv, "xid=0x%x out-of-range\n", xid); + return 0; + } + + /* no DDP if we are already down or resetting */ + if (test_bit(__IXGBE_DOWN, &adapter->state) || + test_bit(__IXGBE_RESETTING, &adapter->state)) + return 0; + + fcoe = &adapter->fcoe; + if (!fcoe->pool) { + e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); + return 0; + } + + ddp = &fcoe->ddp[xid]; + if (ddp->sgl) { + e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n", + xid, ddp->sgl, ddp->sgc); + return 0; + } + ixgbe_fcoe_clear_ddp(ddp); + + /* setup dma from scsi command sgl */ + dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE); + if (dmacount == 0) { + e_err(drv, "xid 0x%x DMA map error\n", xid); + return 0; + } + + /* alloc the udl from per cpu ddp pool */ + pool = *per_cpu_ptr(fcoe->pool, get_cpu()); + ddp->udl = pci_pool_alloc(pool, GFP_ATOMIC, &ddp->udp); + if (!ddp->udl) { + e_err(drv, "failed allocated ddp context\n"); + goto out_noddp_unmap; + } + ddp->pool = pool; + ddp->sgl = sgl; + ddp->sgc = sgc; + + j = 0; + for_each_sg(sgl, sg, dmacount, i) { + addr = sg_dma_address(sg); + len = sg_dma_len(sg); + while (len) { + /* max number of buffers allowed in one DDP context */ + if (j >= IXGBE_BUFFCNT_MAX) { + e_err(drv, "xid=%x:%d,%d,%d:addr=%llx " + "not enough descriptors\n", + xid, i, j, dmacount, (u64)addr); + goto out_noddp_free; + } + + /* get the offset of length of current buffer */ + thisoff = addr & ((dma_addr_t)bufflen - 1); + thislen = min((bufflen - thisoff), len); + /* + * all but the 1st buffer (j == 0) + * must be aligned on bufflen + */ + if ((j != 0) && (thisoff)) + goto out_noddp_free; + /* + * all but the last buffer + * ((i == (dmacount - 1)) && (thislen == len)) + * must end at bufflen + */ + if (((i != (dmacount - 1)) || (thislen != len)) + && ((thislen + thisoff) != bufflen)) + goto out_noddp_free; + + ddp->udl[j] = (u64)(addr - thisoff); + /* only the first buffer may have none-zero offset */ + if (j == 0) + firstoff = thisoff; + len -= thislen; + addr += thislen; + j++; + } + } + /* only the last buffer may have non-full bufflen */ + lastsize = thisoff + thislen; + + /* + * lastsize can not be buffer len. + * If it is then adding another buffer with lastsize = 1. + */ + if (lastsize == bufflen) { + if (j >= IXGBE_BUFFCNT_MAX) { + e_err(drv, "xid=%x:%d,%d,%d:addr=%llx " + "not enough user buffers. We need an extra " + "buffer because lastsize is bufflen.\n", + xid, i, j, dmacount, (u64)addr); + goto out_noddp_free; + } + + ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma); + j++; + lastsize = 1; + } + put_cpu(); + + fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); + fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); + fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); + /* Set WRCONTX bit to allow DDP for target */ + if (target_mode) + fcbuff |= (IXGBE_FCBUFF_WRCONTX); + fcbuff |= (IXGBE_FCBUFF_VALID); + + fcdmarw = xid; + fcdmarw |= IXGBE_FCDMARW_WE; + fcdmarw |= (lastsize << IXGBE_FCDMARW_LASTSIZE_SHIFT); + + fcfltrw = xid; + fcfltrw |= IXGBE_FCFLTRW_WE; + + /* program DMA context */ + hw = &adapter->hw; + spin_lock_bh(&fcoe->lock); + + /* turn on last frame indication for target mode as FCP_RSPtarget is + * supposed to send FCP_RSP when it is done. */ + if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) { + set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode); + fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL); + fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH; + IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl); + } + + IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); + IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); + IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); + IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw); + /* program filter context */ + IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); + IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); + IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); + + spin_unlock_bh(&fcoe->lock); + + return 1; + +out_noddp_free: + pci_pool_free(pool, ddp->udl, ddp->udp); + ixgbe_fcoe_clear_ddp(ddp); + +out_noddp_unmap: + pci_unmap_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE); + put_cpu(); + return 0; +} + +/** + * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode + * @netdev: the corresponding net_device + * @xid: the exchange id requesting ddp + * @sgl: the scatter-gather list for this request + * @sgc: the number of scatter-gather items + * + * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup + * and is expected to be called from ULD, e.g., FCP layer of libfc + * to set up ddp for the corresponding xid of the given sglist for + * the corresponding I/O. + * + * Returns : 1 for success and 0 for no ddp + */ +int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc) +{ + return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0); +} + +/** + * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode + * @netdev: the corresponding net_device + * @xid: the exchange id requesting ddp + * @sgl: the scatter-gather list for this request + * @sgc: the number of scatter-gather items + * + * This is the implementation of net_device_ops.ndo_fcoe_ddp_target + * and is expected to be called from ULD, e.g., FCP layer of libfc + * to set up ddp for the corresponding xid of the given sglist for + * the corresponding I/O. The DDP in target mode is a write I/O request + * from the initiator. + * + * Returns : 1 for success and 0 for no ddp + */ +int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc) +{ + return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1); +} + +/** + * ixgbe_fcoe_ddp - check ddp status and mark it done + * @adapter: ixgbe adapter + * @rx_desc: advanced rx descriptor + * @skb: the skb holding the received data + * + * This checks ddp status. + * + * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates + * not passing the skb to ULD, > 0 indicates is the length of data + * being ddped. + */ +int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb, + u32 staterr) +{ + u16 xid; + u32 fctl; + u32 fceofe, fcerr, fcstat; + int rc = -EINVAL; + struct ixgbe_fcoe *fcoe; + struct ixgbe_fcoe_ddp *ddp; + struct fc_frame_header *fh; + struct fcoe_crc_eof *crc; + + fcerr = (staterr & IXGBE_RXDADV_ERR_FCERR); + fceofe = (staterr & IXGBE_RXDADV_ERR_FCEOFE); + if (fcerr == IXGBE_FCERR_BADCRC) + skb_checksum_none_assert(skb); + else + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)) + fh = (struct fc_frame_header *)(skb->data + + sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr)); + else + fh = (struct fc_frame_header *)(skb->data + + sizeof(struct fcoe_hdr)); + fctl = ntoh24(fh->fh_f_ctl); + if (fctl & FC_FC_EX_CTX) + xid = be16_to_cpu(fh->fh_ox_id); + else + xid = be16_to_cpu(fh->fh_rx_id); + + if (xid >= IXGBE_FCOE_DDP_MAX) + goto ddp_out; + + fcoe = &adapter->fcoe; + ddp = &fcoe->ddp[xid]; + if (!ddp->udl) + goto ddp_out; + + if (fcerr | fceofe) + goto ddp_out; + + fcstat = (staterr & IXGBE_RXDADV_STAT_FCSTAT); + if (fcstat) { + /* update length of DDPed data */ + ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); + /* unmap the sg list when FCP_RSP is received */ + if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_FCPRSP) { + pci_unmap_sg(adapter->pdev, ddp->sgl, + ddp->sgc, DMA_FROM_DEVICE); + ddp->err = (fcerr | fceofe); + ddp->sgl = NULL; + ddp->sgc = 0; + } + /* return 0 to bypass going to ULD for DDPed data */ + if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_DDP) + rc = 0; + else if (ddp->len) + rc = ddp->len; + } + /* In target mode, check the last data frame of the sequence. + * For DDP in target mode, data is already DDPed but the header + * indication of the last data frame ould allow is to tell if we + * got all the data and the ULP can send FCP_RSP back, as this is + * not a full fcoe frame, we fill the trailer here so it won't be + * dropped by the ULP stack. + */ + if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) && + (fctl & FC_FC_END_SEQ)) { + crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc)); + crc->fcoe_eof = FC_EOF_T; + } +ddp_out: + return rc; +} + +/** + * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO) + * @adapter: ixgbe adapter + * @tx_ring: tx desc ring + * @skb: associated skb + * @tx_flags: tx flags + * @hdr_len: hdr_len to be returned + * + * This sets up large send offload for FCoE + * + * Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error + */ +int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, + u32 tx_flags, u8 *hdr_len) +{ + struct fc_frame_header *fh; + u32 vlan_macip_lens; + u32 fcoe_sof_eof = 0; + u32 mss_l4len_idx; + u8 sof, eof; + + if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) { + dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n", + skb_shinfo(skb)->gso_type); + return -EINVAL; + } + + /* resets the header to point fcoe/fc */ + skb_set_network_header(skb, skb->mac_len); + skb_set_transport_header(skb, skb->mac_len + + sizeof(struct fcoe_hdr)); + + /* sets up SOF and ORIS */ + sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof; + switch (sof) { + case FC_SOF_I2: + fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS; + break; + case FC_SOF_I3: + fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF | + IXGBE_ADVTXD_FCOEF_ORIS; + break; + case FC_SOF_N2: + break; + case FC_SOF_N3: + fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF; + break; + default: + dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof); + return -EINVAL; + } + + /* the first byte of the last dword is EOF */ + skb_copy_bits(skb, skb->len - 4, &eof, 1); + /* sets up EOF and ORIE */ + switch (eof) { + case FC_EOF_N: + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N; + break; + case FC_EOF_T: + /* lso needs ORIE */ + if (skb_is_gso(skb)) + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N | + IXGBE_ADVTXD_FCOEF_ORIE; + else + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T; + break; + case FC_EOF_NI: + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI; + break; + case FC_EOF_A: + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A; + break; + default: + dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof); + return -EINVAL; + } + + /* sets up PARINC indicating data offset */ + fh = (struct fc_frame_header *)skb_transport_header(skb); + if (fh->fh_f_ctl[2] & FC_FC_REL_OFF) + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC; + + /* include trailer in headlen as it is replicated per frame */ + *hdr_len = sizeof(struct fcoe_crc_eof); + + /* hdr_len includes fc_hdr if FCoE LSO is enabled */ + if (skb_is_gso(skb)) + *hdr_len += (skb_transport_offset(skb) + + sizeof(struct fc_frame_header)); + + /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */ + mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; + mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; + + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ + vlan_macip_lens = skb_transport_offset(skb) + + sizeof(struct fc_frame_header); + vlan_macip_lens |= (skb_transport_offset(skb) - 4) + << IXGBE_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; + + /* write context desc */ + ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof, + IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx); + + return skb_is_gso(skb); +} + +static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe) +{ + unsigned int cpu; + struct pci_pool **pool; + + for_each_possible_cpu(cpu) { + pool = per_cpu_ptr(fcoe->pool, cpu); + if (*pool) + pci_pool_destroy(*pool); + } + free_percpu(fcoe->pool); + fcoe->pool = NULL; +} + +static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter) +{ + struct ixgbe_fcoe *fcoe = &adapter->fcoe; + unsigned int cpu; + struct pci_pool **pool; + char pool_name[32]; + + fcoe->pool = alloc_percpu(struct pci_pool *); + if (!fcoe->pool) + return; + + /* allocate pci pool for each cpu */ + for_each_possible_cpu(cpu) { + snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu); + pool = per_cpu_ptr(fcoe->pool, cpu); + *pool = pci_pool_create(pool_name, + adapter->pdev, IXGBE_FCPTR_MAX, + IXGBE_FCPTR_ALIGN, PAGE_SIZE); + if (!*pool) { + e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu); + ixgbe_fcoe_ddp_pools_free(fcoe); + return; + } + } +} + +/** + * ixgbe_configure_fcoe - configures registers for fcoe at start + * @adapter: ptr to ixgbe adapter + * + * This sets up FCoE related registers + * + * Returns : none + */ +void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) +{ + int i, fcoe_q, fcoe_i; + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_fcoe *fcoe = &adapter->fcoe; + struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; + + if (!fcoe->pool) { + spin_lock_init(&fcoe->lock); + + ixgbe_fcoe_ddp_pools_alloc(adapter); + if (!fcoe->pool) { + e_err(drv, "failed to alloc percpu fcoe DDP pools\n"); + return; + } + + /* Extra buffer to be shared by all DDPs for HW work around */ + fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); + if (fcoe->extra_ddp_buffer == NULL) { + e_err(drv, "failed to allocated extra DDP buffer\n"); + goto out_ddp_pools; + } + + fcoe->extra_ddp_buffer_dma = + dma_map_single(&adapter->pdev->dev, + fcoe->extra_ddp_buffer, + IXGBE_FCBUFF_MIN, + DMA_FROM_DEVICE); + if (dma_mapping_error(&adapter->pdev->dev, + fcoe->extra_ddp_buffer_dma)) { + e_err(drv, "failed to map extra DDP buffer\n"); + goto out_extra_ddp_buffer; + } + } + + /* Enable L2 eth type filter for FCoE */ + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), + (ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN)); + /* Enable L2 eth type filter for FIP */ + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), + (ETH_P_FIP | IXGBE_ETQF_FILTER_EN)); + if (adapter->ring_feature[RING_F_FCOE].indices) { + /* Use multiple rx queues for FCoE by redirection table */ + for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { + fcoe_i = f->mask + i % f->indices; + fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; + fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; + IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); + } + IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); + IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); + } else { + /* Use single rx queue for FCoE */ + fcoe_i = f->mask; + fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; + IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0); + IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), + IXGBE_ETQS_QUEUE_EN | + (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); + } + /* send FIP frames to the first FCoE queue */ + fcoe_i = f->mask; + fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; + IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), + IXGBE_ETQS_QUEUE_EN | + (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); + + IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, + IXGBE_FCRXCTRL_FCOELLI | + IXGBE_FCRXCTRL_FCCRCBO | + (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT)); + return; + +out_extra_ddp_buffer: + kfree(fcoe->extra_ddp_buffer); +out_ddp_pools: + ixgbe_fcoe_ddp_pools_free(fcoe); +} + +/** + * ixgbe_cleanup_fcoe - release all fcoe ddp context resources + * @adapter : ixgbe adapter + * + * Cleans up outstanding ddp context resources + * + * Returns : none + */ +void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter) +{ + int i; + struct ixgbe_fcoe *fcoe = &adapter->fcoe; + + if (!fcoe->pool) + return; + + for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) + ixgbe_fcoe_ddp_put(adapter->netdev, i); + dma_unmap_single(&adapter->pdev->dev, + fcoe->extra_ddp_buffer_dma, + IXGBE_FCBUFF_MIN, + DMA_FROM_DEVICE); + kfree(fcoe->extra_ddp_buffer); + ixgbe_fcoe_ddp_pools_free(fcoe); +} + +/** + * ixgbe_fcoe_enable - turn on FCoE offload feature + * @netdev: the corresponding netdev + * + * Turns on FCoE offload feature in 82599. + * + * Returns : 0 indicates success or -EINVAL on failure + */ +int ixgbe_fcoe_enable(struct net_device *netdev) +{ + int rc = -EINVAL; + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_fcoe *fcoe = &adapter->fcoe; + + + if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) + goto out_enable; + + atomic_inc(&fcoe->refcnt); + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) + goto out_enable; + + e_info(drv, "Enabling FCoE offload features.\n"); + if (netif_running(netdev)) + netdev->netdev_ops->ndo_stop(netdev); + + ixgbe_clear_interrupt_scheme(adapter); + + adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; + adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE; + netdev->features |= NETIF_F_FCOE_CRC; + netdev->features |= NETIF_F_FSO; + netdev->features |= NETIF_F_FCOE_MTU; + netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; + + ixgbe_init_interrupt_scheme(adapter); + netdev_features_change(netdev); + + if (netif_running(netdev)) + netdev->netdev_ops->ndo_open(netdev); + rc = 0; + +out_enable: + return rc; +} + +/** + * ixgbe_fcoe_disable - turn off FCoE offload feature + * @netdev: the corresponding netdev + * + * Turns off FCoE offload feature in 82599. + * + * Returns : 0 indicates success or -EINVAL on failure + */ +int ixgbe_fcoe_disable(struct net_device *netdev) +{ + int rc = -EINVAL; + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_fcoe *fcoe = &adapter->fcoe; + + if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) + goto out_disable; + + if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) + goto out_disable; + + if (!atomic_dec_and_test(&fcoe->refcnt)) + goto out_disable; + + e_info(drv, "Disabling FCoE offload features.\n"); + netdev->features &= ~NETIF_F_FCOE_CRC; + netdev->features &= ~NETIF_F_FSO; + netdev->features &= ~NETIF_F_FCOE_MTU; + netdev->fcoe_ddp_xid = 0; + netdev_features_change(netdev); + + if (netif_running(netdev)) + netdev->netdev_ops->ndo_stop(netdev); + + ixgbe_clear_interrupt_scheme(adapter); + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; + adapter->ring_feature[RING_F_FCOE].indices = 0; + ixgbe_cleanup_fcoe(adapter); + ixgbe_init_interrupt_scheme(adapter); + + if (netif_running(netdev)) + netdev->netdev_ops->ndo_open(netdev); + rc = 0; + +out_disable: + return rc; +} + +/** + * ixgbe_fcoe_get_wwn - get world wide name for the node or the port + * @netdev : ixgbe adapter + * @wwn : the world wide name + * @type: the type of world wide name + * + * Returns the node or port world wide name if both the prefix and the san + * mac address are valid, then the wwn is formed based on the NAA-2 for + * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3). + * + * Returns : 0 on success + */ +int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) +{ + int rc = -EINVAL; + u16 prefix = 0xffff; + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_mac_info *mac = &adapter->hw.mac; + + switch (type) { + case NETDEV_FCOE_WWNN: + prefix = mac->wwnn_prefix; + break; + case NETDEV_FCOE_WWPN: + prefix = mac->wwpn_prefix; + break; + default: + break; + } + + if ((prefix != 0xffff) && + is_valid_ether_addr(mac->san_addr)) { + *wwn = ((u64) prefix << 48) | + ((u64) mac->san_addr[0] << 40) | + ((u64) mac->san_addr[1] << 32) | + ((u64) mac->san_addr[2] << 24) | + ((u64) mac->san_addr[3] << 16) | + ((u64) mac->san_addr[4] << 8) | + ((u64) mac->san_addr[5]); + rc = 0; + } + return rc; +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h new file mode 100644 index 0000000..99de145 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h @@ -0,0 +1,81 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_FCOE_H +#define _IXGBE_FCOE_H + +#include +#include + +/* shift bits within STAT fo FCSTAT */ +#define IXGBE_RXDADV_FCSTAT_SHIFT 4 + +/* ddp user buffer */ +#define IXGBE_BUFFCNT_MAX 256 /* 8 bits bufcnt */ +#define IXGBE_FCPTR_ALIGN 16 +#define IXGBE_FCPTR_MAX (IXGBE_BUFFCNT_MAX * sizeof(dma_addr_t)) +#define IXGBE_FCBUFF_4KB 0x0 +#define IXGBE_FCBUFF_8KB 0x1 +#define IXGBE_FCBUFF_16KB 0x2 +#define IXGBE_FCBUFF_64KB 0x3 +#define IXGBE_FCBUFF_MAX 65536 /* 64KB max */ +#define IXGBE_FCBUFF_MIN 4096 /* 4KB min */ +#define IXGBE_FCOE_DDP_MAX 512 /* 9 bits xid */ + +/* Default traffic class to use for FCoE */ +#define IXGBE_FCOE_DEFTC 3 + +/* fcerr */ +#define IXGBE_FCERR_BADCRC 0x00100000 + +/* FCoE DDP for target mode */ +#define __IXGBE_FCOE_TARGET 1 + +struct ixgbe_fcoe_ddp { + int len; + u32 err; + unsigned int sgc; + struct scatterlist *sgl; + dma_addr_t udp; + u64 *udl; + struct pci_pool *pool; +}; + +struct ixgbe_fcoe { + struct pci_pool **pool; + atomic_t refcnt; + spinlock_t lock; + struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; + unsigned char *extra_ddp_buffer; + dma_addr_t extra_ddp_buffer_dma; + unsigned long mode; +#ifdef CONFIG_IXGBE_DCB + u8 up; +#endif +}; + +#endif /* _IXGBE_FCOE_H */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c new file mode 100644 index 0000000..e86297b --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -0,0 +1,7934 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ixgbe.h" +#include "ixgbe_common.h" +#include "ixgbe_dcb_82599.h" +#include "ixgbe_sriov.h" + +char ixgbe_driver_name[] = "ixgbe"; +static const char ixgbe_driver_string[] = + "Intel(R) 10 Gigabit PCI Express Network Driver"; +#define MAJ 3 +#define MIN 4 +#define BUILD 8 +#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ + __stringify(BUILD) "-k" +const char ixgbe_driver_version[] = DRV_VERSION; +static const char ixgbe_copyright[] = + "Copyright (c) 1999-2011 Intel Corporation."; + +static const struct ixgbe_info *ixgbe_info_tbl[] = { + [board_82598] = &ixgbe_82598_info, + [board_82599] = &ixgbe_82599_info, + [board_X540] = &ixgbe_X540_info, +}; + +/* ixgbe_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), + board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), + board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), + board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), + board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), + board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), + board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), + board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), + board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), + board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), + board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), + board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), + board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), + board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), + board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), + board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), + board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), + board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), + board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), + board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), + board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), + board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), + board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), + board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), + board_X540 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), + board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), + board_82599 }, + + /* required last entry */ + {0, } +}; +MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); + +#ifdef CONFIG_IXGBE_DCA +static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, + void *p); +static struct notifier_block dca_notifier = { + .notifier_call = ixgbe_notify_dca, + .next = NULL, + .priority = 0 +}; +#endif + +#ifdef CONFIG_PCI_IOV +static unsigned int max_vfs; +module_param(max_vfs, uint, 0); +MODULE_PARM_DESC(max_vfs, + "Maximum number of virtual functions to allocate per physical function"); +#endif /* CONFIG_PCI_IOV */ + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +#define DEFAULT_DEBUG_LEVEL_SHIFT 3 + +static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 gcr; + u32 gpie; + u32 vmdctl; + +#ifdef CONFIG_PCI_IOV + /* disable iov and allow time for transactions to clear */ + pci_disable_sriov(adapter->pdev); +#endif + + /* turn off device IOV mode */ + gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); + gcr &= ~(IXGBE_GCR_EXT_SRIOV); + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr); + gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); + gpie &= ~IXGBE_GPIE_VTMODE_MASK; + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); + + /* set default pool back to 0 */ + vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); + vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); + IXGBE_WRITE_FLUSH(hw); + + /* take a breather then clean up driver data */ + msleep(100); + + kfree(adapter->vfinfo); + adapter->vfinfo = NULL; + + adapter->num_vfs = 0; + adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; +} + +static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter) +{ + if (!test_bit(__IXGBE_DOWN, &adapter->state) && + !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state)) + schedule_work(&adapter->service_task); +} + +static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) +{ + BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); + + /* flush memory to make sure state is correct before next watchog */ + smp_mb__before_clear_bit(); + clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); +} + +struct ixgbe_reg_info { + u32 ofs; + char *name; +}; + +static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { + + /* General Registers */ + {IXGBE_CTRL, "CTRL"}, + {IXGBE_STATUS, "STATUS"}, + {IXGBE_CTRL_EXT, "CTRL_EXT"}, + + /* Interrupt Registers */ + {IXGBE_EICR, "EICR"}, + + /* RX Registers */ + {IXGBE_SRRCTL(0), "SRRCTL"}, + {IXGBE_DCA_RXCTRL(0), "DRXCTL"}, + {IXGBE_RDLEN(0), "RDLEN"}, + {IXGBE_RDH(0), "RDH"}, + {IXGBE_RDT(0), "RDT"}, + {IXGBE_RXDCTL(0), "RXDCTL"}, + {IXGBE_RDBAL(0), "RDBAL"}, + {IXGBE_RDBAH(0), "RDBAH"}, + + /* TX Registers */ + {IXGBE_TDBAL(0), "TDBAL"}, + {IXGBE_TDBAH(0), "TDBAH"}, + {IXGBE_TDLEN(0), "TDLEN"}, + {IXGBE_TDH(0), "TDH"}, + {IXGBE_TDT(0), "TDT"}, + {IXGBE_TXDCTL(0), "TXDCTL"}, + + /* List Terminator */ + {} +}; + + +/* + * ixgbe_regdump - register printout routine + */ +static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo) +{ + int i = 0, j = 0; + char rname[16]; + u32 regs[64]; + + switch (reginfo->ofs) { + case IXGBE_SRRCTL(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); + break; + case IXGBE_DCA_RXCTRL(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); + break; + case IXGBE_RDLEN(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); + break; + case IXGBE_RDH(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); + break; + case IXGBE_RDT(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); + break; + case IXGBE_RXDCTL(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + break; + case IXGBE_RDBAL(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); + break; + case IXGBE_RDBAH(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); + break; + case IXGBE_TDBAL(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); + break; + case IXGBE_TDBAH(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); + break; + case IXGBE_TDLEN(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); + break; + case IXGBE_TDH(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); + break; + case IXGBE_TDT(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); + break; + case IXGBE_TXDCTL(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); + break; + default: + pr_info("%-15s %08x\n", reginfo->name, + IXGBE_READ_REG(hw, reginfo->ofs)); + return; + } + + for (i = 0; i < 8; i++) { + snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7); + pr_err("%-15s", rname); + for (j = 0; j < 8; j++) + pr_cont(" %08x", regs[i*8+j]); + pr_cont("\n"); + } + +} + +/* + * ixgbe_dump - Print registers, tx-rings and rx-rings + */ +static void ixgbe_dump(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_reg_info *reginfo; + int n = 0; + struct ixgbe_ring *tx_ring; + struct ixgbe_tx_buffer *tx_buffer_info; + union ixgbe_adv_tx_desc *tx_desc; + struct my_u0 { u64 a; u64 b; } *u0; + struct ixgbe_ring *rx_ring; + union ixgbe_adv_rx_desc *rx_desc; + struct ixgbe_rx_buffer *rx_buffer_info; + u32 staterr; + int i = 0; + + if (!netif_msg_hw(adapter)) + return; + + /* Print netdevice Info */ + if (netdev) { + dev_info(&adapter->pdev->dev, "Net device Info\n"); + pr_info("Device Name state " + "trans_start last_rx\n"); + pr_info("%-15s %016lX %016lX %016lX\n", + netdev->name, + netdev->state, + netdev->trans_start, + netdev->last_rx); + } + + /* Print Registers */ + dev_info(&adapter->pdev->dev, "Register Dump\n"); + pr_info(" Register Name Value\n"); + for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl; + reginfo->name; reginfo++) { + ixgbe_regdump(hw, reginfo); + } + + /* Print TX Ring Summary */ + if (!netdev || !netif_running(netdev)) + goto exit; + + dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); + pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + tx_buffer_info = + &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; + pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n", + n, tx_ring->next_to_use, tx_ring->next_to_clean, + (u64)tx_buffer_info->dma, + tx_buffer_info->length, + tx_buffer_info->next_to_watch, + (u64)tx_buffer_info->time_stamp); + } + + /* Print TX Rings */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); + + /* Transmit Descriptor Formats + * + * Advanced Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +--------------------------------------------------------------+ + * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN | + * +--------------------------------------------------------------+ + * 63 46 45 40 39 36 35 32 31 24 23 20 19 0 + */ + + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("T [desc] [address 63:0 ] " + "[PlPOIdStDDt Ln] [bi->dma ] " + "leng ntw timestamp bi->skb\n"); + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + pr_info("T [0x%03X] %016llX %016llX %016llX" + " %04X %3X %016llX %p", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)tx_buffer_info->dma, + tx_buffer_info->length, + tx_buffer_info->next_to_watch, + (u64)tx_buffer_info->time_stamp, + tx_buffer_info->skb); + if (i == tx_ring->next_to_use && + i == tx_ring->next_to_clean) + pr_cont(" NTC/U\n"); + else if (i == tx_ring->next_to_use) + pr_cont(" NTU\n"); + else if (i == tx_ring->next_to_clean) + pr_cont(" NTC\n"); + else + pr_cont("\n"); + + if (netif_msg_pktdata(adapter) && + tx_buffer_info->dma != 0) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, 1, + phys_to_virt(tx_buffer_info->dma), + tx_buffer_info->length, true); + } + } + + /* Print RX Rings Summary */ +rx_ring_summary: + dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); + pr_info("Queue [NTU] [NTC]\n"); + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info("%5d %5X %5X\n", + n, rx_ring->next_to_use, rx_ring->next_to_clean); + } + + /* Print RX Rings */ + if (!netif_msg_rx_status(adapter)) + goto exit; + + dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); + + /* Advanced Receive Descriptor (Read) Format + * 63 1 0 + * +-----------------------------------------------------+ + * 0 | Packet Buffer Address [63:1] |A0/NSE| + * +----------------------------------------------+------+ + * 8 | Header Buffer Address [63:1] | DD | + * +-----------------------------------------------------+ + * + * + * Advanced Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 30 21 20 16 15 4 3 0 + * +------------------------------------------------------+ + * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | + * | Checksum Ident | | | | Type | Type | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("R [desc] [ PktBuf A0] " + "[ HeadBuf DD] [bi->dma ] [bi->skb] " + "<-- Adv Rx Read format\n"); + pr_info("RWB[desc] [PcsmIpSHl PtRs] " + "[vl er S cks ln] ---------------- [bi->skb] " + "<-- Adv Rx Write-Back format\n"); + + for (i = 0; i < rx_ring->count; i++) { + rx_buffer_info = &rx_ring->rx_buffer_info[i]; + rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); + u0 = (struct my_u0 *)rx_desc; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + if (staterr & IXGBE_RXD_STAT_DD) { + /* Descriptor Done */ + pr_info("RWB[0x%03X] %016llX " + "%016llX ---------------- %p", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + rx_buffer_info->skb); + } else { + pr_info("R [0x%03X] %016llX " + "%016llX %016llX %p", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)rx_buffer_info->dma, + rx_buffer_info->skb); + + if (netif_msg_pktdata(adapter)) { + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, 1, + phys_to_virt(rx_buffer_info->dma), + rx_ring->rx_buf_len, true); + + if (rx_ring->rx_buf_len + < IXGBE_RXBUFFER_2048) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, 1, + phys_to_virt( + rx_buffer_info->page_dma + + rx_buffer_info->page_offset + ), + PAGE_SIZE/2, true); + } + } + + if (i == rx_ring->next_to_use) + pr_cont(" NTU\n"); + else if (i == rx_ring->next_to_clean) + pr_cont(" NTC\n"); + else + pr_cont("\n"); + + } + } + +exit: + return; +} + +static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) +{ + u32 ctrl_ext; + + /* Let firmware take over control of h/w */ + ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, + ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); +} + +static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) +{ + u32 ctrl_ext; + + /* Let firmware know the driver has taken over */ + ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, + ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); +} + +/* + * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors + * @adapter: pointer to adapter struct + * @direction: 0 for Rx, 1 for Tx, -1 for other causes + * @queue: queue to map the corresponding interrupt to + * @msix_vector: the vector to map to the corresponding queue + * + */ +static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, + u8 queue, u8 msix_vector) +{ + u32 ivar, index; + struct ixgbe_hw *hw = &adapter->hw; + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + if (direction == -1) + direction = 0; + index = (((direction * 64) + queue) >> 2) & 0x1F; + ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); + ivar &= ~(0xFF << (8 * (queue & 0x3))); + ivar |= (msix_vector << (8 * (queue & 0x3))); + IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + if (direction == -1) { + /* other causes */ + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + index = ((queue & 1) * 8); + ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar); + break; + } else { + /* tx or rx causes */ + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + index = ((16 * (queue & 1)) + (8 * direction)); + ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar); + break; + } + default: + break; + } +} + +static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, + u64 qmask) +{ + u32 mask; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + mask = (IXGBE_EIMS_RTX_QUEUE & qmask); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + mask = (qmask & 0xFFFFFFFF); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); + mask = (qmask >> 32); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); + break; + default: + break; + } +} + +void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring, + struct ixgbe_tx_buffer *tx_buffer_info) +{ + if (tx_buffer_info->dma) { + if (tx_buffer_info->mapped_as_page) + dma_unmap_page(tx_ring->dev, + tx_buffer_info->dma, + tx_buffer_info->length, + DMA_TO_DEVICE); + else + dma_unmap_single(tx_ring->dev, + tx_buffer_info->dma, + tx_buffer_info->length, + DMA_TO_DEVICE); + tx_buffer_info->dma = 0; + } + if (tx_buffer_info->skb) { + dev_kfree_skb_any(tx_buffer_info->skb); + tx_buffer_info->skb = NULL; + } + tx_buffer_info->time_stamp = 0; + /* tx_buffer_info must be completely set up in the transmit path */ +} + +static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_hw_stats *hwstats = &adapter->stats; + u32 data = 0; + u32 xoff[8] = {0}; + int i; + + if ((hw->fc.current_mode == ixgbe_fc_full) || + (hw->fc.current_mode == ixgbe_fc_rx_pause)) { + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); + break; + default: + data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); + } + hwstats->lxoffrxc += data; + + /* refill credits (no tx hang) if we received xoff */ + if (!data) + return; + + for (i = 0; i < adapter->num_tx_queues; i++) + clear_bit(__IXGBE_HANG_CHECK_ARMED, + &adapter->tx_ring[i]->state); + return; + } else if (!(adapter->dcb_cfg.pfc_mode_enable)) + return; + + /* update stats for each tc, only valid with PFC enabled */ + for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); + break; + default: + xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); + } + hwstats->pxoffrxc[i] += xoff[i]; + } + + /* disarm tx queues that have received xoff frames */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; + u8 tc = tx_ring->dcb_tc; + + if (xoff[tc]) + clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); + } +} + +static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring) +{ + return ring->tx_stats.completed; +} + +static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) +{ + struct ixgbe_adapter *adapter = netdev_priv(ring->netdev); + struct ixgbe_hw *hw = &adapter->hw; + + u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx)); + u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx)); + + if (head != tail) + return (head < tail) ? + tail - head : (tail + ring->count - head); + + return 0; +} + +static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) +{ + u32 tx_done = ixgbe_get_tx_completed(tx_ring); + u32 tx_done_old = tx_ring->tx_stats.tx_done_old; + u32 tx_pending = ixgbe_get_tx_pending(tx_ring); + bool ret = false; + + clear_check_for_tx_hang(tx_ring); + + /* + * Check for a hung queue, but be thorough. This verifies + * that a transmit has been completed since the previous + * check AND there is at least one packet pending. The + * ARMED bit is set to indicate a potential hang. The + * bit is cleared if a pause frame is received to remove + * false hang detection due to PFC or 802.3x frames. By + * requiring this to fail twice we avoid races with + * pfc clearing the ARMED bit and conditions where we + * run the check_tx_hang logic with a transmit completion + * pending but without time to complete it yet. + */ + if ((tx_done_old == tx_done) && tx_pending) { + /* make sure it is true for two checks in a row */ + ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED, + &tx_ring->state); + } else { + /* update completed stats and continue */ + tx_ring->tx_stats.tx_done_old = tx_done; + /* reset the countdown */ + clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); + } + + return ret; +} + +/** + * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout + * @adapter: driver private struct + **/ +static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter) +{ + + /* Do the reset outside of interrupt context */ + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; + ixgbe_service_event_schedule(adapter); + } +} + +/** + * ixgbe_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: structure containing interrupt and ring information + * @tx_ring: tx ring to clean + **/ +static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, + struct ixgbe_ring *tx_ring) +{ + struct ixgbe_adapter *adapter = q_vector->adapter; + union ixgbe_adv_tx_desc *tx_desc, *eop_desc; + struct ixgbe_tx_buffer *tx_buffer_info; + unsigned int total_bytes = 0, total_packets = 0; + u16 i, eop, count = 0; + + i = tx_ring->next_to_clean; + eop = tx_ring->tx_buffer_info[i].next_to_watch; + eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); + + while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && + (count < q_vector->tx.work_limit)) { + bool cleaned = false; + rmb(); /* read buffer_info after eop_desc */ + for ( ; !cleaned; count++) { + tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + + tx_desc->wb.status = 0; + cleaned = (i == eop); + + i++; + if (i == tx_ring->count) + i = 0; + + if (cleaned && tx_buffer_info->skb) { + total_bytes += tx_buffer_info->bytecount; + total_packets += tx_buffer_info->gso_segs; + } + + ixgbe_unmap_and_free_tx_resource(tx_ring, + tx_buffer_info); + } + + tx_ring->tx_stats.completed++; + eop = tx_ring->tx_buffer_info[i].next_to_watch; + eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); + } + + tx_ring->next_to_clean = i; + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_begin(&tx_ring->syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + + if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { + /* schedule immediate reset if we believe we hung */ + struct ixgbe_hw *hw = &adapter->hw; + tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); + e_err(drv, "Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" + " TDH, TDT <%x>, <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "tx_buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " jiffies <%lx>\n", + tx_ring->queue_index, + IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), + IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), + tx_ring->next_to_use, eop, + tx_ring->tx_buffer_info[eop].time_stamp, jiffies); + + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + e_info(probe, + "tx hang %d detected on queue %d, resetting adapter\n", + adapter->tx_timeout_count + 1, tx_ring->queue_index); + + /* schedule immediate reset if we believe we hung */ + ixgbe_tx_timeout_reset(adapter); + + /* the adapter is about to reset, no point in enabling stuff */ + return true; + } + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(count && netif_carrier_ok(tx_ring->netdev) && + (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && + !test_bit(__IXGBE_DOWN, &adapter->state)) { + netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } + } + + return count < q_vector->tx.work_limit; +} + +#ifdef CONFIG_IXGBE_DCA +static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring, + int cpu) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 rxctrl; + u8 reg_idx = rx_ring->reg_idx; + + rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx)); + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; + rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599; + rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << + IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599); + break; + default: + break; + } + rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; + rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; + rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN); + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl); +} + +static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring, + int cpu) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 txctrl; + u8 reg_idx = tx_ring->reg_idx; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx)); + txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; + txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); + txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx)); + txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599; + txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << + IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599); + txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl); + break; + default: + break; + } +} + +static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) +{ + struct ixgbe_adapter *adapter = q_vector->adapter; + int cpu = get_cpu(); + long r_idx; + int i; + + if (q_vector->cpu == cpu) + goto out_no_update; + + r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues); + for (i = 0; i < q_vector->tx.count; i++) { + ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu); + r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues, + r_idx + 1); + } + + r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); + for (i = 0; i < q_vector->rx.count; i++) { + ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu); + r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues, + r_idx + 1); + } + + q_vector->cpu = cpu; +out_no_update: + put_cpu(); +} + +static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) +{ + int num_q_vectors; + int i; + + if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) + return; + + /* always use CB2 mode, difference is masked in the CB driver */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); + + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) + num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + else + num_q_vectors = 1; + + for (i = 0; i < num_q_vectors; i++) { + adapter->q_vector[i]->cpu = -1; + ixgbe_update_dca(adapter->q_vector[i]); + } +} + +static int __ixgbe_notify_dca(struct device *dev, void *data) +{ + struct ixgbe_adapter *adapter = dev_get_drvdata(dev); + unsigned long event = *(unsigned long *)data; + + if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE)) + return 0; + + switch (event) { + case DCA_PROVIDER_ADD: + /* if we're already enabled, don't do it again */ + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + break; + if (dca_add_requester(dev) == 0) { + adapter->flags |= IXGBE_FLAG_DCA_ENABLED; + ixgbe_setup_dca(adapter); + break; + } + /* Fall Through since DCA is disabled. */ + case DCA_PROVIDER_REMOVE: + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { + dca_remove_requester(dev); + adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); + } + break; + } + + return 0; +} +#endif /* CONFIG_IXGBE_DCA */ + +static inline void ixgbe_rx_hash(union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); +} + +/** + * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type + * @adapter: address of board private structure + * @rx_desc: advanced rx descriptor + * + * Returns : true if it is FCoE pkt + */ +static inline bool ixgbe_rx_is_fcoe(struct ixgbe_adapter *adapter, + union ixgbe_adv_rx_desc *rx_desc) +{ + __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; + + return (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && + ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) == + (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE << + IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT))); +} + +/** + * ixgbe_receive_skb - Send a completed packet up the stack + * @adapter: board private structure + * @skb: packet to send up + * @status: hardware indication of status of receive + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * @rx_desc: rx descriptor + **/ +static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb, u8 status, + struct ixgbe_ring *ring, + union ixgbe_adv_rx_desc *rx_desc) +{ + struct ixgbe_adapter *adapter = q_vector->adapter; + struct napi_struct *napi = &q_vector->napi; + bool is_vlan = (status & IXGBE_RXD_STAT_VP); + u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); + + if (is_vlan && (tag & VLAN_VID_MASK)) + __vlan_hwaccel_put_tag(skb, tag); + + if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) + napi_gro_receive(napi, skb); + else + netif_rx(skb); +} + +/** + * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum + * @adapter: address of board private structure + * @status_err: hardware indication of status of receive + * @skb: skb currently being received and modified + * @status_err: status error value of last descriptor in packet + **/ +static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb, + u32 status_err) +{ + skb->ip_summed = CHECKSUM_NONE; + + /* Rx csum disabled */ + if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) + return; + + /* if IP and error */ + if ((status_err & IXGBE_RXD_STAT_IPCS) && + (status_err & IXGBE_RXDADV_ERR_IPE)) { + adapter->hw_csum_rx_error++; + return; + } + + if (!(status_err & IXGBE_RXD_STAT_L4CS)) + return; + + if (status_err & IXGBE_RXDADV_ERR_TCPE) { + u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; + + /* + * 82599 errata, UDP frames with a 0 checksum can be marked as + * checksum errors. + */ + if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) && + (adapter->hw.mac.type == ixgbe_mac_82599EB)) + return; + + adapter->hw_csum_rx_error++; + return; + } + + /* It must be a TCP or UDP packet with a valid checksum */ + skb->ip_summed = CHECKSUM_UNNECESSARY; +} + +static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val) +{ + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(val, rx_ring->tail); +} + +/** + * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) +{ + union ixgbe_adv_rx_desc *rx_desc; + struct ixgbe_rx_buffer *bi; + struct sk_buff *skb; + u16 i = rx_ring->next_to_use; + + /* do nothing if no valid netdev defined */ + if (!rx_ring->netdev) + return; + + while (cleaned_count--) { + rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + skb = bi->skb; + + if (!skb) { + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_buf_len); + if (!skb) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + goto no_buffers; + } + /* initialize queue mapping */ + skb_record_rx_queue(skb, rx_ring->queue_index); + bi->skb = skb; + } + + if (!bi->dma) { + bi->dma = dma_map_single(rx_ring->dev, + skb->data, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(rx_ring->dev, bi->dma)) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + bi->dma = 0; + goto no_buffers; + } + } + + if (ring_is_ps_enabled(rx_ring)) { + if (!bi->page) { + bi->page = netdev_alloc_page(rx_ring->netdev); + if (!bi->page) { + rx_ring->rx_stats.alloc_rx_page_failed++; + goto no_buffers; + } + } + + if (!bi->page_dma) { + /* use a half page if we're re-using */ + bi->page_offset ^= PAGE_SIZE / 2; + bi->page_dma = dma_map_page(rx_ring->dev, + bi->page, + bi->page_offset, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + if (dma_mapping_error(rx_ring->dev, + bi->page_dma)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + bi->page_dma = 0; + goto no_buffers; + } + } + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); + rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); + } else { + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + rx_desc->read.hdr_addr = 0; + } + + i++; + if (i == rx_ring->count) + i = 0; + } + +no_buffers: + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + ixgbe_release_rx_desc(rx_ring, i); + } +} + +static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc) +{ + /* HW will not DMA in data larger than the given buffer, even if it + * parses the (NFS, of course) header to be larger. In that case, it + * fills the header buffer and spills the rest into the page. + */ + u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info); + u16 hlen = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> + IXGBE_RXDADV_HDRBUFLEN_SHIFT; + if (hlen > IXGBE_RX_HDR_SIZE) + hlen = IXGBE_RX_HDR_SIZE; + return hlen; +} + +/** + * ixgbe_transform_rsc_queue - change rsc queue into a full packet + * @skb: pointer to the last skb in the rsc queue + * + * This function changes a queue full of hw rsc buffers into a completed + * packet. It uses the ->prev pointers to find the first packet and then + * turns it into the frag list owner. + **/ +static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb) +{ + unsigned int frag_list_size = 0; + unsigned int skb_cnt = 1; + + while (skb->prev) { + struct sk_buff *prev = skb->prev; + frag_list_size += skb->len; + skb->prev = NULL; + skb = prev; + skb_cnt++; + } + + skb_shinfo(skb)->frag_list = skb->next; + skb->next = NULL; + skb->len += frag_list_size; + skb->data_len += frag_list_size; + skb->truesize += frag_list_size; + IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt; + + return skb; +} + +static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc) +{ + return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) & + IXGBE_RXDADV_RSCCNT_MASK); +} + +static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, + struct ixgbe_ring *rx_ring, + int *work_done, int work_to_do) +{ + struct ixgbe_adapter *adapter = q_vector->adapter; + union ixgbe_adv_rx_desc *rx_desc, *next_rxd; + struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; + struct sk_buff *skb; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + const int current_node = numa_node_id(); +#ifdef IXGBE_FCOE + int ddp_bytes = 0; +#endif /* IXGBE_FCOE */ + u32 staterr; + u16 i; + u16 cleaned_count = 0; + bool pkt_is_rsc = false; + + i = rx_ring->next_to_clean; + rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + + while (staterr & IXGBE_RXD_STAT_DD) { + u32 upper_len = 0; + + rmb(); /* read descriptor and rx_buffer_info after status DD */ + + rx_buffer_info = &rx_ring->rx_buffer_info[i]; + + skb = rx_buffer_info->skb; + rx_buffer_info->skb = NULL; + prefetch(skb->data); + + if (ring_is_rsc_enabled(rx_ring)) + pkt_is_rsc = ixgbe_get_rsc_state(rx_desc); + + /* if this is a skb from previous receive DMA will be 0 */ + if (rx_buffer_info->dma) { + u16 hlen; + if (pkt_is_rsc && + !(staterr & IXGBE_RXD_STAT_EOP) && + !skb->prev) { + /* + * When HWRSC is enabled, delay unmapping + * of the first packet. It carries the + * header information, HW may still + * access the header after the writeback. + * Only unmap it when EOP is reached + */ + IXGBE_RSC_CB(skb)->delay_unmap = true; + IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; + } else { + dma_unmap_single(rx_ring->dev, + rx_buffer_info->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + } + rx_buffer_info->dma = 0; + + if (ring_is_ps_enabled(rx_ring)) { + hlen = ixgbe_get_hlen(rx_desc); + upper_len = le16_to_cpu(rx_desc->wb.upper.length); + } else { + hlen = le16_to_cpu(rx_desc->wb.upper.length); + } + + skb_put(skb, hlen); + } else { + /* assume packet split since header is unmapped */ + upper_len = le16_to_cpu(rx_desc->wb.upper.length); + } + + if (upper_len) { + dma_unmap_page(rx_ring->dev, + rx_buffer_info->page_dma, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + rx_buffer_info->page_dma = 0; + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, + rx_buffer_info->page, + rx_buffer_info->page_offset, + upper_len); + + if ((page_count(rx_buffer_info->page) == 1) && + (page_to_nid(rx_buffer_info->page) == current_node)) + get_page(rx_buffer_info->page); + else + rx_buffer_info->page = NULL; + + skb->len += upper_len; + skb->data_len += upper_len; + skb->truesize += upper_len; + } + + i++; + if (i == rx_ring->count) + i = 0; + + next_rxd = IXGBE_RX_DESC_ADV(rx_ring, i); + prefetch(next_rxd); + cleaned_count++; + + if (pkt_is_rsc) { + u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >> + IXGBE_RXDADV_NEXTP_SHIFT; + next_buffer = &rx_ring->rx_buffer_info[nextp]; + } else { + next_buffer = &rx_ring->rx_buffer_info[i]; + } + + if (!(staterr & IXGBE_RXD_STAT_EOP)) { + if (ring_is_ps_enabled(rx_ring)) { + rx_buffer_info->skb = next_buffer->skb; + rx_buffer_info->dma = next_buffer->dma; + next_buffer->skb = skb; + next_buffer->dma = 0; + } else { + skb->next = next_buffer->skb; + skb->next->prev = skb; + } + rx_ring->rx_stats.non_eop_descs++; + goto next_desc; + } + + if (skb->prev) { + skb = ixgbe_transform_rsc_queue(skb); + /* if we got here without RSC the packet is invalid */ + if (!pkt_is_rsc) { + __pskb_trim(skb, 0); + rx_buffer_info->skb = skb; + goto next_desc; + } + } + + if (ring_is_rsc_enabled(rx_ring)) { + if (IXGBE_RSC_CB(skb)->delay_unmap) { + dma_unmap_single(rx_ring->dev, + IXGBE_RSC_CB(skb)->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + IXGBE_RSC_CB(skb)->dma = 0; + IXGBE_RSC_CB(skb)->delay_unmap = false; + } + } + if (pkt_is_rsc) { + if (ring_is_ps_enabled(rx_ring)) + rx_ring->rx_stats.rsc_count += + skb_shinfo(skb)->nr_frags; + else + rx_ring->rx_stats.rsc_count += + IXGBE_RSC_CB(skb)->skb_cnt; + rx_ring->rx_stats.rsc_flush++; + } + + /* ERR_MASK will only have valid bits if EOP set */ + if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { + dev_kfree_skb_any(skb); + goto next_desc; + } + + ixgbe_rx_checksum(adapter, rx_desc, skb, staterr); + if (adapter->netdev->features & NETIF_F_RXHASH) + ixgbe_rx_hash(rx_desc, skb); + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + total_rx_packets++; + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +#ifdef IXGBE_FCOE + /* if ddp, not passing to ULD unless for FCP_RSP or error */ + if (ixgbe_rx_is_fcoe(adapter, rx_desc)) { + ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb, + staterr); + if (!ddp_bytes) + goto next_desc; + } +#endif /* IXGBE_FCOE */ + ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); + +next_desc: + rx_desc->wb.upper.status_error = 0; + + (*work_done)++; + if (*work_done >= work_to_do) + break; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { + ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + } + + rx_ring->next_to_clean = i; + cleaned_count = ixgbe_desc_unused(rx_ring); + + if (cleaned_count) + ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); + +#ifdef IXGBE_FCOE + /* include DDPed FCoE data */ + if (ddp_bytes > 0) { + unsigned int mss; + + mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) - + sizeof(struct fc_frame_header) - + sizeof(struct fcoe_crc_eof); + if (mss > 512) + mss &= ~511; + total_rx_bytes += ddp_bytes; + total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss); + } +#endif /* IXGBE_FCOE */ + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; +} + +static int ixgbe_clean_rxonly(struct napi_struct *, int); +/** + * ixgbe_configure_msix - Configure MSI-X hardware + * @adapter: board private structure + * + * ixgbe_configure_msix sets up the hardware to properly generate MSI-X + * interrupts. + **/ +static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) +{ + struct ixgbe_q_vector *q_vector; + int i, q_vectors, v_idx, r_idx; + u32 mask; + + q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + /* + * Populate the IVAR table and set the ITR values to the + * corresponding register. + */ + for (v_idx = 0; v_idx < q_vectors; v_idx++) { + q_vector = adapter->q_vector[v_idx]; + /* XXX for_each_set_bit(...) */ + r_idx = find_first_bit(q_vector->rx.idx, + adapter->num_rx_queues); + + for (i = 0; i < q_vector->rx.count; i++) { + u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx; + ixgbe_set_ivar(adapter, 0, reg_idx, v_idx); + r_idx = find_next_bit(q_vector->rx.idx, + adapter->num_rx_queues, + r_idx + 1); + } + r_idx = find_first_bit(q_vector->tx.idx, + adapter->num_tx_queues); + + for (i = 0; i < q_vector->tx.count; i++) { + u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx; + ixgbe_set_ivar(adapter, 1, reg_idx, v_idx); + r_idx = find_next_bit(q_vector->tx.idx, + adapter->num_tx_queues, + r_idx + 1); + } + + if (q_vector->tx.count && !q_vector->rx.count) + /* tx only */ + q_vector->eitr = adapter->tx_eitr_param; + else if (q_vector->rx.count) + /* rx or mixed */ + q_vector->eitr = adapter->rx_eitr_param; + + ixgbe_write_eitr(q_vector); + /* If ATR is enabled, set interrupt affinity */ + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { + /* + * Allocate the affinity_hint cpumask, assign the mask + * for this vector, and set our affinity_hint for + * this irq. + */ + if (!alloc_cpumask_var(&q_vector->affinity_mask, + GFP_KERNEL)) + return; + cpumask_set_cpu(v_idx, q_vector->affinity_mask); + irq_set_affinity_hint(adapter->msix_entries[v_idx].vector, + q_vector->affinity_mask); + } + } + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, + v_idx); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + ixgbe_set_ivar(adapter, -1, 1, v_idx); + break; + + default: + break; + } + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); + + /* set up to autoclear timer, and the vectors */ + mask = IXGBE_EIMS_ENABLE_MASK; + if (adapter->num_vfs) + mask &= ~(IXGBE_EIMS_OTHER | + IXGBE_EIMS_MAILBOX | + IXGBE_EIMS_LSC); + else + mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * ixgbe_update_itr - update the dynamic ITR value based on statistics + * @q_vector: structure containing interrupt and ring information + * @ring_container: structure containing ring performance data + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * this functionality is controlled by the InterruptThrottleRate module + * parameter (see ixgbe_param.c) + **/ +static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, + struct ixgbe_ring_container *ring_container) +{ + u64 bytes_perint; + struct ixgbe_adapter *adapter = q_vector->adapter; + int bytes = ring_container->total_bytes; + int packets = ring_container->total_packets; + u32 timepassed_us; + u8 itr_setting = ring_container->itr; + + if (packets == 0) + return; + + /* simple throttlerate management + * 0-20MB/s lowest (100000 ints/s) + * 20-100MB/s low (20000 ints/s) + * 100-1249MB/s bulk (8000 ints/s) + */ + /* what was last interrupt timeslice? */ + timepassed_us = 1000000/q_vector->eitr; + bytes_perint = bytes / timepassed_us; /* bytes/usec */ + + switch (itr_setting) { + case lowest_latency: + if (bytes_perint > adapter->eitr_low) + itr_setting = low_latency; + break; + case low_latency: + if (bytes_perint > adapter->eitr_high) + itr_setting = bulk_latency; + else if (bytes_perint <= adapter->eitr_low) + itr_setting = lowest_latency; + break; + case bulk_latency: + if (bytes_perint <= adapter->eitr_high) + itr_setting = low_latency; + break; + } + + /* clear work counters since we have the values we need */ + ring_container->total_bytes = 0; + ring_container->total_packets = 0; + + /* write updated itr to ring container */ + ring_container->itr = itr_setting; +} + +/** + * ixgbe_write_eitr - write EITR register in hardware specific way + * @q_vector: structure containing interrupt and ring information + * + * This function is made to be called by ethtool and by the driver + * when it needs to update EITR registers at runtime. Hardware + * specific quirks/differences are taken care of here. + */ +void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) +{ + struct ixgbe_adapter *adapter = q_vector->adapter; + struct ixgbe_hw *hw = &adapter->hw; + int v_idx = q_vector->v_idx; + u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr); + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + /* must write high and low 16 bits to reset counter */ + itr_reg |= (itr_reg << 16); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + /* + * 82599 and X540 can support a value of zero, so allow it for + * max interrupt rate, but there is an errata where it can + * not be zero with RSC + */ + if (itr_reg == 8 && + !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) + itr_reg = 0; + + /* + * set the WDIS bit to not clear the timer bits and cause an + * immediate assertion of the interrupt + */ + itr_reg |= IXGBE_EITR_CNT_WDIS; + break; + default: + break; + } + IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg); +} + +static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector) +{ + u32 new_itr = q_vector->eitr; + u8 current_itr; + + ixgbe_update_itr(q_vector, &q_vector->tx); + ixgbe_update_itr(q_vector, &q_vector->rx); + + current_itr = max(q_vector->rx.itr, q_vector->tx.itr); + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = 100000; + break; + case low_latency: + new_itr = 20000; /* aka hwitr = ~200 */ + break; + case bulk_latency: + new_itr = 8000; + break; + default: + break; + } + + if (new_itr != q_vector->eitr) { + /* do an exponential smoothing */ + new_itr = ((q_vector->eitr * 9) + new_itr)/10; + + /* save the algorithm value here */ + q_vector->eitr = new_itr; + + ixgbe_write_eitr(q_vector); + } +} + +/** + * ixgbe_check_overtemp_subtask - check for over tempurature + * @adapter: pointer to adapter + **/ +static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 eicr = adapter->interrupt_event; + + if (test_bit(__IXGBE_DOWN, &adapter->state)) + return; + + if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && + !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT)) + return; + + adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT; + + switch (hw->device_id) { + case IXGBE_DEV_ID_82599_T3_LOM: + /* + * Since the warning interrupt is for both ports + * we don't have to check if: + * - This interrupt wasn't for our port. + * - We may have missed the interrupt so always have to + * check if we got a LSC + */ + if (!(eicr & IXGBE_EICR_GPI_SDP0) && + !(eicr & IXGBE_EICR_LSC)) + return; + + if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) { + u32 autoneg; + bool link_up = false; + + hw->mac.ops.check_link(hw, &autoneg, &link_up, false); + + if (link_up) + return; + } + + /* Check if this is not due to overtemp */ + if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP) + return; + + break; + default: + if (!(eicr & IXGBE_EICR_GPI_SDP0)) + return; + break; + } + e_crit(drv, + "Network adapter has been stopped because it has over heated. " + "Restart the computer. If the problem persists, " + "power off the system and replace the adapter\n"); + + adapter->interrupt_event = 0; +} + +static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) +{ + struct ixgbe_hw *hw = &adapter->hw; + + if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && + (eicr & IXGBE_EICR_GPI_SDP1)) { + e_crit(probe, "Fan has stopped, replace the adapter\n"); + /* write to clear the interrupt */ + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); + } +} + +static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) +{ + struct ixgbe_hw *hw = &adapter->hw; + + if (eicr & IXGBE_EICR_GPI_SDP2) { + /* Clear the interrupt */ + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; + ixgbe_service_event_schedule(adapter); + } + } + + if (eicr & IXGBE_EICR_GPI_SDP1) { + /* Clear the interrupt */ + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; + ixgbe_service_event_schedule(adapter); + } + } +} + +static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + adapter->lsc_int++; + adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); + IXGBE_WRITE_FLUSH(hw); + ixgbe_service_event_schedule(adapter); + } +} + +static irqreturn_t ixgbe_msix_lsc(int irq, void *data) +{ + struct ixgbe_adapter *adapter = data; + struct ixgbe_hw *hw = &adapter->hw; + u32 eicr; + + /* + * Workaround for Silicon errata. Use clear-by-write instead + * of clear-by-read. Reading with EICS will return the + * interrupt causes without clearing, which later be done + * with the write to EICR. + */ + eicr = IXGBE_READ_REG(hw, IXGBE_EICS); + IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); + + if (eicr & IXGBE_EICR_LSC) + ixgbe_check_lsc(adapter); + + if (eicr & IXGBE_EICR_MAILBOX) + ixgbe_msg_task(adapter); + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + /* Handle Flow Director Full threshold interrupt */ + if (eicr & IXGBE_EICR_FLOW_DIR) { + int reinit_count = 0; + int i; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *ring = adapter->tx_ring[i]; + if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE, + &ring->state)) + reinit_count++; + } + if (reinit_count) { + /* no more flow director interrupts until after init */ + IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR); + eicr &= ~IXGBE_EICR_FLOW_DIR; + adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT; + ixgbe_service_event_schedule(adapter); + } + } + ixgbe_check_sfp_event(adapter, eicr); + if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && + ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) { + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + adapter->interrupt_event = eicr; + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; + ixgbe_service_event_schedule(adapter); + } + } + break; + default: + break; + } + + ixgbe_check_fan_failure(adapter, eicr); + + /* re-enable the original interrupt state, no lsc, no queues */ + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + IXGBE_WRITE_REG(hw, IXGBE_EIMS, eicr & + ~(IXGBE_EIMS_LSC | IXGBE_EIMS_RTX_QUEUE)); + + return IRQ_HANDLED; +} + +static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, + u64 qmask) +{ + u32 mask; + struct ixgbe_hw *hw = &adapter->hw; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + mask = (IXGBE_EIMS_RTX_QUEUE & qmask); + IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + mask = (qmask & 0xFFFFFFFF); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); + mask = (qmask >> 32); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); + break; + default: + break; + } + /* skip the flush */ +} + +static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, + u64 qmask) +{ + u32 mask; + struct ixgbe_hw *hw = &adapter->hw; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + mask = (IXGBE_EIMS_RTX_QUEUE & qmask); + IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + mask = (qmask & 0xFFFFFFFF); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); + mask = (qmask >> 32); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); + break; + default: + break; + } + /* skip the flush */ +} + +static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) +{ + struct ixgbe_q_vector *q_vector = data; + struct ixgbe_adapter *adapter = q_vector->adapter; + struct ixgbe_ring *tx_ring; + int i, r_idx; + + if (!q_vector->tx.count) + return IRQ_HANDLED; + + r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues); + for (i = 0; i < q_vector->tx.count; i++) { + tx_ring = adapter->tx_ring[r_idx]; + r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues, + r_idx + 1); + } + + /* EIAM disabled interrupts (on this vector) for us */ + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues) + * @irq: unused + * @data: pointer to our q_vector struct for this interrupt vector + **/ +static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) +{ + struct ixgbe_q_vector *q_vector = data; + struct ixgbe_adapter *adapter = q_vector->adapter; + struct ixgbe_ring *rx_ring; + int r_idx; + int i; + +#ifdef CONFIG_IXGBE_DCA + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + ixgbe_update_dca(q_vector); +#endif + + r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); + for (i = 0; i < q_vector->rx.count; i++) { + rx_ring = adapter->rx_ring[r_idx]; + r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues, + r_idx + 1); + } + + if (!q_vector->rx.count) + return IRQ_HANDLED; + + /* EIAM disabled interrupts (on this vector) for us */ + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) +{ + struct ixgbe_q_vector *q_vector = data; + struct ixgbe_adapter *adapter = q_vector->adapter; + struct ixgbe_ring *ring; + int r_idx; + int i; + + if (!q_vector->tx.count && !q_vector->rx.count) + return IRQ_HANDLED; + + r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues); + for (i = 0; i < q_vector->tx.count; i++) { + ring = adapter->tx_ring[r_idx]; + r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues, + r_idx + 1); + } + + r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); + for (i = 0; i < q_vector->rx.count; i++) { + ring = adapter->rx_ring[r_idx]; + r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues, + r_idx + 1); + } + + /* EIAM disabled interrupts (on this vector) for us */ + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function is optimized for cleaning one queue only on a single + * q_vector!!! + **/ +static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) +{ + struct ixgbe_q_vector *q_vector = + container_of(napi, struct ixgbe_q_vector, napi); + struct ixgbe_adapter *adapter = q_vector->adapter; + struct ixgbe_ring *rx_ring = NULL; + int work_done = 0; + long r_idx; + +#ifdef CONFIG_IXGBE_DCA + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + ixgbe_update_dca(q_vector); +#endif + + r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); + rx_ring = adapter->rx_ring[r_idx]; + + ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget); + + /* If all Rx work done, exit the polling mode */ + if (work_done < budget) { + napi_complete(napi); + if (adapter->rx_itr_setting & 1) + ixgbe_set_itr(q_vector); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_enable_queues(adapter, + ((u64)1 << q_vector->v_idx)); + } + + return work_done; +} + +/** + * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function will clean more than one rx queue associated with a + * q_vector. + **/ +static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) +{ + struct ixgbe_q_vector *q_vector = + container_of(napi, struct ixgbe_q_vector, napi); + struct ixgbe_adapter *adapter = q_vector->adapter; + struct ixgbe_ring *ring = NULL; + int work_done = 0, i; + long r_idx; + bool tx_clean_complete = true; + +#ifdef CONFIG_IXGBE_DCA + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + ixgbe_update_dca(q_vector); +#endif + + r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues); + for (i = 0; i < q_vector->tx.count; i++) { + ring = adapter->tx_ring[r_idx]; + tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring); + r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues, + r_idx + 1); + } + + /* attempt to distribute budget to each queue fairly, but don't allow + * the budget to go below 1 because we'll exit polling */ + budget /= (q_vector->rx.count ?: 1); + budget = max(budget, 1); + r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); + for (i = 0; i < q_vector->rx.count; i++) { + ring = adapter->rx_ring[r_idx]; + ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget); + r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues, + r_idx + 1); + } + + r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); + ring = adapter->rx_ring[r_idx]; + /* If all Rx work done, exit the polling mode */ + if (work_done < budget) { + napi_complete(napi); + if (adapter->rx_itr_setting & 1) + ixgbe_set_itr(q_vector); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_enable_queues(adapter, + ((u64)1 << q_vector->v_idx)); + return 0; + } + + return work_done; +} + +/** + * ixgbe_clean_txonly - msix (aka one shot) tx clean routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function is optimized for cleaning one queue only on a single + * q_vector!!! + **/ +static int ixgbe_clean_txonly(struct napi_struct *napi, int budget) +{ + struct ixgbe_q_vector *q_vector = + container_of(napi, struct ixgbe_q_vector, napi); + struct ixgbe_adapter *adapter = q_vector->adapter; + struct ixgbe_ring *tx_ring = NULL; + int work_done = 0; + long r_idx; + +#ifdef CONFIG_IXGBE_DCA + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + ixgbe_update_dca(q_vector); +#endif + + r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues); + tx_ring = adapter->tx_ring[r_idx]; + + if (!ixgbe_clean_tx_irq(q_vector, tx_ring)) + work_done = budget; + + /* If all Tx work done, exit the polling mode */ + if (work_done < budget) { + napi_complete(napi); + if (adapter->tx_itr_setting & 1) + ixgbe_set_itr(q_vector); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_enable_queues(adapter, + ((u64)1 << q_vector->v_idx)); + } + + return work_done; +} + +static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, + int r_idx) +{ + struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; + struct ixgbe_ring *rx_ring = a->rx_ring[r_idx]; + + set_bit(r_idx, q_vector->rx.idx); + q_vector->rx.count++; + rx_ring->q_vector = q_vector; +} + +static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, + int t_idx) +{ + struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; + struct ixgbe_ring *tx_ring = a->tx_ring[t_idx]; + + set_bit(t_idx, q_vector->tx.idx); + q_vector->tx.count++; + tx_ring->q_vector = q_vector; + q_vector->tx.work_limit = a->tx_work_limit; +} + +/** + * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors + * @adapter: board private structure to initialize + * + * This function maps descriptor rings to the queue-specific vectors + * we were allotted through the MSI-X enabling code. Ideally, we'd have + * one vector per ring/queue, but on a constrained vector budget, we + * group the rings as "efficiently" as possible. You would add new + * mapping configurations in here. + **/ +static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter) +{ + int q_vectors; + int v_start = 0; + int rxr_idx = 0, txr_idx = 0; + int rxr_remaining = adapter->num_rx_queues; + int txr_remaining = adapter->num_tx_queues; + int i, j; + int rqpv, tqpv; + int err = 0; + + /* No mapping required if MSI-X is disabled. */ + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) + goto out; + + q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + /* + * The ideal configuration... + * We have enough vectors to map one per queue. + */ + if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { + for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) + map_vector_to_rxq(adapter, v_start, rxr_idx); + + for (; txr_idx < txr_remaining; v_start++, txr_idx++) + map_vector_to_txq(adapter, v_start, txr_idx); + + goto out; + } + + /* + * If we don't have enough vectors for a 1-to-1 + * mapping, we'll have to group them so there are + * multiple queues per vector. + */ + /* Re-adjusting *qpv takes care of the remainder. */ + for (i = v_start; i < q_vectors; i++) { + rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); + for (j = 0; j < rqpv; j++) { + map_vector_to_rxq(adapter, i, rxr_idx); + rxr_idx++; + rxr_remaining--; + } + tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); + for (j = 0; j < tqpv; j++) { + map_vector_to_txq(adapter, i, txr_idx); + txr_idx++; + txr_remaining--; + } + } +out: + return err; +} + +/** + * ixgbe_request_msix_irqs - Initialize MSI-X interrupts + * @adapter: board private structure + * + * ixgbe_request_msix_irqs allocates MSI-X vectors and requests + * interrupts from the kernel. + **/ +static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + irqreturn_t (*handler)(int, void *); + int i, vector, q_vectors, err; + int ri = 0, ti = 0; + + /* Decrement for Other and TCP Timer vectors */ + q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + err = ixgbe_map_rings_to_vectors(adapter); + if (err) + return err; + +#define SET_HANDLER(_v) (((_v)->rx.count && (_v)->tx.count) \ + ? &ixgbe_msix_clean_many : \ + (_v)->rx.count ? &ixgbe_msix_clean_rx : \ + (_v)->tx.count ? &ixgbe_msix_clean_tx : \ + NULL) + for (vector = 0; vector < q_vectors; vector++) { + struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; + handler = SET_HANDLER(q_vector); + + if (handler == &ixgbe_msix_clean_rx) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", netdev->name, "rx", ri++); + } else if (handler == &ixgbe_msix_clean_tx) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", netdev->name, "tx", ti++); + } else if (handler == &ixgbe_msix_clean_many) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", netdev->name, "TxRx", ri++); + ti++; + } else { + /* skip this unused q_vector */ + continue; + } + err = request_irq(adapter->msix_entries[vector].vector, + handler, 0, q_vector->name, + q_vector); + if (err) { + e_err(probe, "request_irq failed for MSIX interrupt " + "Error: %d\n", err); + goto free_queue_irqs; + } + } + + sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name); + err = request_irq(adapter->msix_entries[vector].vector, + ixgbe_msix_lsc, 0, adapter->lsc_int_name, adapter); + if (err) { + e_err(probe, "request_irq for msix_lsc failed: %d\n", err); + goto free_queue_irqs; + } + + return 0; + +free_queue_irqs: + for (i = vector - 1; i >= 0; i--) + free_irq(adapter->msix_entries[--vector].vector, + adapter->q_vector[i]); + adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + return err; +} + +/** + * ixgbe_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, + bool flush) +{ + u32 mask; + + mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); + if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) + mask |= IXGBE_EIMS_GPI_SDP0; + if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) + mask |= IXGBE_EIMS_GPI_SDP1; + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + mask |= IXGBE_EIMS_ECC; + mask |= IXGBE_EIMS_GPI_SDP1; + mask |= IXGBE_EIMS_GPI_SDP2; + if (adapter->num_vfs) + mask |= IXGBE_EIMS_MAILBOX; + break; + default: + break; + } + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) + mask |= IXGBE_EIMS_FLOW_DIR; + + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); + if (queues) + ixgbe_irq_enable_queues(adapter, ~0); + if (flush) + IXGBE_WRITE_FLUSH(&adapter->hw); + + if (adapter->num_vfs > 32) { + u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel); + } +} + +/** + * ixgbe_intr - legacy mode Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t ixgbe_intr(int irq, void *data) +{ + struct ixgbe_adapter *adapter = data; + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; + u32 eicr; + + /* + * Workaround for silicon errata on 82598. Mask the interrupts + * before the read of EICR. + */ + IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); + + /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read + * therefore no explict interrupt disable is necessary */ + eicr = IXGBE_READ_REG(hw, IXGBE_EICR); + if (!eicr) { + /* + * shared interrupt alert! + * make sure interrupts are enabled because the read will + * have disabled interrupts due to EIAM + * finish the workaround of silicon errata on 82598. Unmask + * the interrupt that we masked before the EICR read. + */ + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_enable(adapter, true, true); + return IRQ_NONE; /* Not our interrupt */ + } + + if (eicr & IXGBE_EICR_LSC) + ixgbe_check_lsc(adapter); + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + ixgbe_check_sfp_event(adapter, eicr); + if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && + ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) { + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + adapter->interrupt_event = eicr; + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; + ixgbe_service_event_schedule(adapter); + } + } + break; + default: + break; + } + + ixgbe_check_fan_failure(adapter, eicr); + + if (napi_schedule_prep(&(q_vector->napi))) { + /* would disable interrupts here but EIAM disabled it */ + __napi_schedule(&(q_vector->napi)); + } + + /* + * re-enable link(maybe) and non-queue interrupts, no flush. + * ixgbe_poll will re-enable the queue interrupts + */ + + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_enable(adapter, false, false); + + return IRQ_HANDLED; +} + +static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter) +{ + int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + for (i = 0; i < q_vectors; i++) { + struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; + bitmap_zero(q_vector->rx.idx, MAX_RX_QUEUES); + bitmap_zero(q_vector->tx.idx, MAX_TX_QUEUES); + q_vector->rx.count = 0; + q_vector->tx.count = 0; + } +} + +/** + * ixgbe_request_irq - initialize interrupts + * @adapter: board private structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int ixgbe_request_irq(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + err = ixgbe_request_msix_irqs(adapter); + } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { + err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, + netdev->name, adapter); + } else { + err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, + netdev->name, adapter); + } + + if (err) + e_err(probe, "request_irq failed, Error %d\n", err); + + return err; +} + +static void ixgbe_free_irq(struct ixgbe_adapter *adapter) +{ + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + int i, q_vectors; + + q_vectors = adapter->num_msix_vectors; + + i = q_vectors - 1; + free_irq(adapter->msix_entries[i].vector, adapter); + + i--; + for (; i >= 0; i--) { + /* free only the irqs that were actually requested */ + if (!adapter->q_vector[i]->rx.count && + !adapter->q_vector[i]->tx.count) + continue; + + free_irq(adapter->msix_entries[i].vector, + adapter->q_vector[i]); + } + + ixgbe_reset_q_vectors(adapter); + } else { + free_irq(adapter->pdev->irq, adapter); + } +} + +/** + * ixgbe_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) +{ + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); + if (adapter->num_vfs > 32) + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); + break; + default: + break; + } + IXGBE_WRITE_FLUSH(&adapter->hw); + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + int i; + for (i = 0; i < adapter->num_msix_vectors; i++) + synchronize_irq(adapter->msix_entries[i].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +/** + * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts + * + **/ +static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + IXGBE_WRITE_REG(hw, IXGBE_EITR(0), + EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param)); + + ixgbe_set_ivar(adapter, 0, 0, 0); + ixgbe_set_ivar(adapter, 1, 0, 0); + + map_vector_to_rxq(adapter, 0, 0); + map_vector_to_txq(adapter, 0, 0); + + e_info(hw, "Legacy interrupt IVAR setup done\n"); +} + +/** + * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset + * @adapter: board private structure + * @ring: structure containing ring specific data + * + * Configure the Tx descriptor ring after a reset. + **/ +void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u64 tdba = ring->dma; + int wait_loop = 10; + u32 txdctl; + u8 reg_idx = ring->reg_idx; + + /* disable queue to avoid issues while updating state */ + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), + txdctl & ~IXGBE_TXDCTL_ENABLE); + IXGBE_WRITE_FLUSH(hw); + + IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx), + (tdba & DMA_BIT_MASK(32))); + IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx), + ring->count * sizeof(union ixgbe_adv_tx_desc)); + IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0); + ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx); + + /* configure fetching thresholds */ + if (adapter->rx_itr_setting == 0) { + /* cannot set wthresh when itr==0 */ + txdctl &= ~0x007F0000; + } else { + /* enable WTHRESH=8 descriptors, to encourage burst writeback */ + txdctl |= (8 << 16); + } + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { + /* PThresh workaround for Tx hang with DFP enabled. */ + txdctl |= 32; + } + + /* reinitialize flowdirector state */ + if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && + adapter->atr_sample_rate) { + ring->atr_sample_rate = adapter->atr_sample_rate; + ring->atr_count = 0; + set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state); + } else { + ring->atr_sample_rate = 0; + } + + clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state); + + /* enable queue */ + txdctl |= IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl); + + /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */ + if (hw->mac.type == ixgbe_mac_82598EB && + !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) + return; + + /* poll to verify queue is enabled */ + do { + usleep_range(1000, 2000); + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); + } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); + if (!wait_loop) + e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); +} + +static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 rttdcs; + u32 reg; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + /* disable the arbiter while setting MTQC */ + rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + rttdcs |= IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); + + /* set transmit pool layout */ + switch (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + case (IXGBE_FLAG_SRIOV_ENABLED): + IXGBE_WRITE_REG(hw, IXGBE_MTQC, + (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF)); + break; + default: + if (!tcs) + reg = IXGBE_MTQC_64Q_1PB; + else if (tcs <= 4) + reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; + else + reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; + + IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg); + + /* Enable Security TX Buffer IFG for multiple pb */ + if (tcs) { + reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); + reg |= IXGBE_SECTX_DCB; + IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); + } + break; + } + + /* re-enable the arbiter */ + rttdcs &= ~IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); +} + +/** + * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 dmatxctl; + u32 i; + + ixgbe_setup_mtqc(adapter); + + if (hw->mac.type != ixgbe_mac_82598EB) { + /* DMATXCTL.EN must be before Tx queues are enabled */ + dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + dmatxctl |= IXGBE_DMATXCTL_TE; + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); + } + + /* Setup the HW Tx Head and Tail descriptor pointers */ + for (i = 0; i < adapter->num_tx_queues; i++) + ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); +} + +#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 + +static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring) +{ + u32 srrctl; + u8 reg_idx = rx_ring->reg_idx; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: { + struct ixgbe_ring_feature *feature = adapter->ring_feature; + const int mask = feature[RING_F_RSS].mask; + reg_idx = reg_idx & mask; + } + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + default: + break; + } + + srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx)); + + srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; + srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; + if (adapter->num_vfs) + srrctl |= IXGBE_SRRCTL_DROP_EN; + + srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & + IXGBE_SRRCTL_BSIZEHDR_MASK; + + if (ring_is_ps_enabled(rx_ring)) { +#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER + srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; +#else + srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; +#endif + srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; + } else { + srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> + IXGBE_SRRCTL_BSIZEPKT_SHIFT; + srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; + } + + IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl); +} + +static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D, + 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE, + 0x6A3E67EA, 0x14364D17, 0x3BED200D}; + u32 mrqc = 0, reta = 0; + u32 rxcsum; + int i, j; + u8 tcs = netdev_get_num_tc(adapter->netdev); + int maxq = adapter->ring_feature[RING_F_RSS].indices; + + if (tcs) + maxq = min(maxq, adapter->num_tx_queues / tcs); + + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]); + + /* Fill out redirection table */ + for (i = 0, j = 0; i < 128; i++, j++) { + if (j == maxq) + j = 0; + /* reta = 4-byte sliding window of + * 0x00..(indices-1)(indices-1)00..etc. */ + reta = (reta << 8) | (j * 0x11); + if ((i & 3) == 3) + IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); + } + + /* Disable indicating checksum in descriptor, enables RSS hash */ + rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); + rxcsum |= IXGBE_RXCSUM_PCSD; + IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); + + if (adapter->hw.mac.type == ixgbe_mac_82598EB && + (adapter->flags & IXGBE_FLAG_RSS_ENABLED)) { + mrqc = IXGBE_MRQC_RSSEN; + } else { + int mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED + | IXGBE_FLAG_SRIOV_ENABLED); + + switch (mask) { + case (IXGBE_FLAG_RSS_ENABLED): + if (!tcs) + mrqc = IXGBE_MRQC_RSSEN; + else if (tcs <= 4) + mrqc = IXGBE_MRQC_RTRSS4TCEN; + else + mrqc = IXGBE_MRQC_RTRSS8TCEN; + break; + case (IXGBE_FLAG_SRIOV_ENABLED): + mrqc = IXGBE_MRQC_VMDQEN; + break; + default: + break; + } + } + + /* Perform hash on these packet types */ + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 + | IXGBE_MRQC_RSS_FIELD_IPV4_TCP + | IXGBE_MRQC_RSS_FIELD_IPV6 + | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; + + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); +} + +/** + * ixgbe_configure_rscctl - enable RSC for the indicated ring + * @adapter: address of board private structure + * @index: index of ring to set + **/ +static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 rscctrl; + int rx_buf_len; + u8 reg_idx = ring->reg_idx; + + if (!ring_is_rsc_enabled(ring)) + return; + + rx_buf_len = ring->rx_buf_len; + rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx)); + rscctrl |= IXGBE_RSCCTL_RSCEN; + /* + * we must limit the number of descriptors so that the + * total size of max desc * buf_len is not greater + * than 65535 + */ + if (ring_is_ps_enabled(ring)) { +#if (MAX_SKB_FRAGS > 16) + rscctrl |= IXGBE_RSCCTL_MAXDESC_16; +#elif (MAX_SKB_FRAGS > 8) + rscctrl |= IXGBE_RSCCTL_MAXDESC_8; +#elif (MAX_SKB_FRAGS > 4) + rscctrl |= IXGBE_RSCCTL_MAXDESC_4; +#else + rscctrl |= IXGBE_RSCCTL_MAXDESC_1; +#endif + } else { + if (rx_buf_len < IXGBE_RXBUFFER_4096) + rscctrl |= IXGBE_RSCCTL_MAXDESC_16; + else if (rx_buf_len < IXGBE_RXBUFFER_8192) + rscctrl |= IXGBE_RSCCTL_MAXDESC_8; + else + rscctrl |= IXGBE_RSCCTL_MAXDESC_4; + } + IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); +} + +/** + * ixgbe_set_uta - Set unicast filter table address + * @adapter: board private structure + * + * The unicast table address is a register array of 32-bit registers. + * The table is meant to be used in a way similar to how the MTA is used + * however due to certain limitations in the hardware it is necessary to + * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous + * enable bit to allow vlan tag stripping when promiscuous mode is enabled + **/ +static void ixgbe_set_uta(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int i; + + /* The UTA table only exists on 82599 hardware and newer */ + if (hw->mac.type < ixgbe_mac_82599EB) + return; + + /* we only need to do this if VMDq is enabled */ + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return; + + for (i = 0; i < 128; i++) + IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0); +} + +#define IXGBE_MAX_RX_DESC_POLL 10 +static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + int wait_loop = IXGBE_MAX_RX_DESC_POLL; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */ + if (hw->mac.type == ixgbe_mac_82598EB && + !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) + return; + + do { + usleep_range(1000, 2000); + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); + + if (!wait_loop) { + e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within " + "the polling period\n", reg_idx); + } +} + +void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + int wait_loop = IXGBE_MAX_RX_DESC_POLL; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + rxdctl &= ~IXGBE_RXDCTL_ENABLE; + + /* write value back with RXDCTL.ENABLE bit cleared */ + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); + + if (hw->mac.type == ixgbe_mac_82598EB && + !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) + return; + + /* the hardware may take up to 100us to really disable the rx queue */ + do { + udelay(10); + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); + + if (!wait_loop) { + e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within " + "the polling period\n", reg_idx); + } +} + +void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u64 rdba = ring->dma; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + /* disable queue to avoid issues while updating state */ + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + ixgbe_disable_rx_queue(adapter, ring); + + IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); + IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx), + ring->count * sizeof(union ixgbe_adv_rx_desc)); + IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); + ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx); + + ixgbe_configure_srrctl(adapter, ring); + ixgbe_configure_rscctl(adapter, ring); + + /* If operating in IOV mode set RLPML for X540 */ + if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && + hw->mac.type == ixgbe_mac_X540) { + rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; + rxdctl |= ((ring->netdev->mtu + ETH_HLEN + + ETH_FCS_LEN + VLAN_HLEN) | IXGBE_RXDCTL_RLPML_EN); + } + + if (hw->mac.type == ixgbe_mac_82598EB) { + /* + * enable cache line friendly hardware writes: + * PTHRESH=32 descriptors (half the internal cache), + * this also removes ugly rx_no_buffer_count increment + * HTHRESH=4 descriptors (to minimize latency on fetch) + * WTHRESH=8 burst writeback up to two cache lines + */ + rxdctl &= ~0x3FFFFF; + rxdctl |= 0x080420; + } + + /* enable receive descriptor ring */ + rxdctl |= IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); + + ixgbe_rx_desc_queue_enable(adapter, ring); + ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring)); +} + +static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int p; + + /* PSRTYPE must be initialized in non 82598 adapters */ + u32 psrtype = IXGBE_PSRTYPE_TCPHDR | + IXGBE_PSRTYPE_UDPHDR | + IXGBE_PSRTYPE_IPV4HDR | + IXGBE_PSRTYPE_L2HDR | + IXGBE_PSRTYPE_IPV6HDR; + + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) + psrtype |= (adapter->num_rx_queues_per_pool << 29); + + for (p = 0; p < adapter->num_rx_pools; p++) + IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p), + psrtype); +} + +static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 gcr_ext; + u32 vt_reg_bits; + u32 reg_offset, vf_shift; + u32 vmdctl; + + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return; + + vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); + vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN; + vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT); + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits); + + vf_shift = adapter->num_vfs % 32; + reg_offset = (adapter->num_vfs > 32) ? 1 : 0; + + /* Enable only the PF's pool for Tx/Rx */ + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift)); + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift)); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0); + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); + + /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ + hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs); + + /* + * Set up VF register offsets for selected VT Mode, + * i.e. 32 or 64 VFs for SR-IOV + */ + gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); + gcr_ext |= IXGBE_GCR_EXT_MSIX_EN; + gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64; + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); + + /* enable Tx loopback for VF/PF communication */ + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); + /* Enable MAC Anti-Spoofing */ + hw->mac.ops.set_mac_anti_spoofing(hw, + (adapter->antispoofing_enabled = + (adapter->num_vfs != 0)), + adapter->num_vfs); +} + +static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + int rx_buf_len; + struct ixgbe_ring *rx_ring; + int i; + u32 mhadd, hlreg0; + + /* Decide whether to use packet split mode or not */ + /* On by default */ + adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; + + /* Do not use packet split if we're in SR-IOV Mode */ + if (adapter->num_vfs) + adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; + + /* Disable packet split due to 82599 erratum #45 */ + if (hw->mac.type == ixgbe_mac_82599EB) + adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; + + /* Set the RX buffer length according to the mode */ + if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { + rx_buf_len = IXGBE_RX_HDR_SIZE; + } else { + if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && + (netdev->mtu <= ETH_DATA_LEN)) + rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; + else + rx_buf_len = ALIGN(max_frame + VLAN_HLEN, 1024); + } + +#ifdef IXGBE_FCOE + /* adjust max frame to be able to do baby jumbo for FCoE */ + if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && + (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE)) + max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE; + +#endif /* IXGBE_FCOE */ + mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); + if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { + mhadd &= ~IXGBE_MHADD_MFS_MASK; + mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT; + + IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); + } + + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */ + hlreg0 |= IXGBE_HLREG0_JUMBOEN; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + rx_ring = adapter->rx_ring[i]; + rx_ring->rx_buf_len = rx_buf_len; + + if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) + set_ring_ps_enabled(rx_ring); + else + clear_ring_ps_enabled(rx_ring); + + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) + set_ring_rsc_enabled(rx_ring); + else + clear_ring_rsc_enabled(rx_ring); + +#ifdef IXGBE_FCOE + if (netdev->features & NETIF_F_FCOE_MTU) { + struct ixgbe_ring_feature *f; + f = &adapter->ring_feature[RING_F_FCOE]; + if ((i >= f->mask) && (i < f->mask + f->indices)) { + clear_ring_ps_enabled(rx_ring); + if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) + rx_ring->rx_buf_len = + IXGBE_FCOE_JUMBO_FRAME_SIZE; + } else if (!ring_is_rsc_enabled(rx_ring) && + !ring_is_ps_enabled(rx_ring)) { + rx_ring->rx_buf_len = + IXGBE_FCOE_JUMBO_FRAME_SIZE; + } + } +#endif /* IXGBE_FCOE */ + } +} + +static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + /* + * For VMDq support of different descriptor types or + * buffer sizes through the use of multiple SRRCTL + * registers, RDRXCTL.MVMEN must be set to 1 + * + * also, the manual doesn't mention it clearly but DCA hints + * will only use queue 0's tags unless this bit is set. Side + * effects of setting this bit are only that SRRCTL must be + * fully programmed [0..15] + */ + rdrxctl |= IXGBE_RDRXCTL_MVMEN; + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + /* Disable RSC for ACK packets */ + IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, + (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); + rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; + /* hardware requires some bits to be set by default */ + rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX); + rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; + break; + default: + /* We should do nothing since we don't know this hardware */ + return; + } + + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); +} + +/** + * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int i; + u32 rxctrl; + + /* disable receives while setting up the descriptors */ + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); + + ixgbe_setup_psrtype(adapter); + ixgbe_setup_rdrxctl(adapter); + + /* Program registers for the distribution of queues */ + ixgbe_setup_mrqc(adapter); + + ixgbe_set_uta(adapter); + + /* set_rx_buffer_len must be called before ring initialization */ + ixgbe_set_rx_buffer_len(adapter); + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); + + /* disable drop enable for 82598 parts */ + if (hw->mac.type == ixgbe_mac_82598EB) + rxctrl |= IXGBE_RXCTRL_DMBYPS; + + /* enable all receives */ + rxctrl |= IXGBE_RXCTRL_RXEN; + hw->mac.ops.enable_rx_dma(hw, rxctrl); +} + +static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + int pool_ndx = adapter->num_vfs; + + /* add VID to filter table */ + hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true); + set_bit(vid, adapter->active_vlans); +} + +static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + int pool_ndx = adapter->num_vfs; + + /* remove VID from filter table */ + hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false); + clear_bit(vid, adapter->active_vlans); +} + +/** + * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering + * @adapter: driver data + */ +static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vlnctrl; + + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); +} + +/** + * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering + * @adapter: driver data + */ +static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vlnctrl; + + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctrl |= IXGBE_VLNCTRL_VFE; + vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); +} + +/** + * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping + * @adapter: driver data + */ +static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vlnctrl; + int i, j; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctrl &= ~IXGBE_VLNCTRL_VME; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + for (i = 0; i < adapter->num_rx_queues; i++) { + j = adapter->rx_ring[i]->reg_idx; + vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); + vlnctrl &= ~IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); + } + break; + default: + break; + } +} + +/** + * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping + * @adapter: driver data + */ +static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vlnctrl; + int i, j; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctrl |= IXGBE_VLNCTRL_VME; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + for (i = 0; i < adapter->num_rx_queues; i++) { + j = adapter->rx_ring[i]->reg_idx; + vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); + vlnctrl |= IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); + } + break; + default: + break; + } +} + +static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) +{ + u16 vid; + + ixgbe_vlan_rx_add_vid(adapter->netdev, 0); + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + ixgbe_vlan_rx_add_vid(adapter->netdev, vid); +} + +/** + * ixgbe_write_uc_addr_list - write unicast addresses to RAR table + * @netdev: network interface device structure + * + * Writes unicast address list to the RAR table. + * Returns: -ENOMEM on failure/insufficient address space + * 0 on no addresses written + * X on writing X addresses to the RAR table + **/ +static int ixgbe_write_uc_addr_list(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + unsigned int vfn = adapter->num_vfs; + unsigned int rar_entries = IXGBE_MAX_PF_MACVLANS; + int count = 0; + + /* return ENOMEM indicating insufficient memory for addresses */ + if (netdev_uc_count(netdev) > rar_entries) + return -ENOMEM; + + if (!netdev_uc_empty(netdev) && rar_entries) { + struct netdev_hw_addr *ha; + /* return error if we do not support writing to RAR table */ + if (!hw->mac.ops.set_rar) + return -ENOMEM; + + netdev_for_each_uc_addr(ha, netdev) { + if (!rar_entries) + break; + hw->mac.ops.set_rar(hw, rar_entries--, ha->addr, + vfn, IXGBE_RAH_AV); + count++; + } + } + /* write the addresses in reverse order to avoid write combining */ + for (; rar_entries > 0 ; rar_entries--) + hw->mac.ops.clear_rar(hw, rar_entries); + + return count; +} + +/** + * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_method entry point is called whenever the unicast/multicast + * address list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast and + * promiscuous mode. + **/ +void ixgbe_set_rx_mode(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; + int count; + + /* Check for Promiscuous and All Multicast modes */ + + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + + /* set all bits that we expect to always be set */ + fctrl |= IXGBE_FCTRL_BAM; + fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ + fctrl |= IXGBE_FCTRL_PMCF; + + /* clear the bits we are changing the status of */ + fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); + + if (netdev->flags & IFF_PROMISC) { + hw->addr_ctrl.user_set_promisc = true; + fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); + vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE); + /* don't hardware filter vlans in promisc mode */ + ixgbe_vlan_filter_disable(adapter); + } else { + if (netdev->flags & IFF_ALLMULTI) { + fctrl |= IXGBE_FCTRL_MPE; + vmolr |= IXGBE_VMOLR_MPE; + } else { + /* + * Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + hw->mac.ops.update_mc_addr_list(hw, netdev); + vmolr |= IXGBE_VMOLR_ROMPE; + } + ixgbe_vlan_filter_enable(adapter); + hw->addr_ctrl.user_set_promisc = false; + /* + * Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + count = ixgbe_write_uc_addr_list(netdev); + if (count < 0) { + fctrl |= IXGBE_FCTRL_UPE; + vmolr |= IXGBE_VMOLR_ROPE; + } + } + + if (adapter->num_vfs) { + ixgbe_restore_vf_multicasts(adapter); + vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) & + ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE | + IXGBE_VMOLR_ROPE); + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr); + } + + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + + if (netdev->features & NETIF_F_HW_VLAN_RX) + ixgbe_vlan_strip_enable(adapter); + else + ixgbe_vlan_strip_disable(adapter); +} + +static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) +{ + int q_idx; + struct ixgbe_q_vector *q_vector; + int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + /* legacy and MSI only use one vector */ + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) + q_vectors = 1; + + for (q_idx = 0; q_idx < q_vectors; q_idx++) { + struct napi_struct *napi; + q_vector = adapter->q_vector[q_idx]; + napi = &q_vector->napi; + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + if (!q_vector->rx.count || !q_vector->tx.count) { + if (q_vector->tx.count == 1) + napi->poll = &ixgbe_clean_txonly; + else if (q_vector->rx.count == 1) + napi->poll = &ixgbe_clean_rxonly; + } + } + + napi_enable(napi); + } +} + +static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) +{ + int q_idx; + struct ixgbe_q_vector *q_vector; + int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + /* legacy and MSI only use one vector */ + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) + q_vectors = 1; + + for (q_idx = 0; q_idx < q_vectors; q_idx++) { + q_vector = adapter->q_vector[q_idx]; + napi_disable(&q_vector->napi); + } +} + +#ifdef CONFIG_IXGBE_DCB +/* + * ixgbe_configure_dcb - Configure DCB hardware + * @adapter: ixgbe adapter struct + * + * This is called by the driver on open to configure the DCB hardware. + * This is also called by the gennetlink interface when reconfiguring + * the DCB state. + */ +static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + + if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { + if (hw->mac.type == ixgbe_mac_82598EB) + netif_set_gso_max_size(adapter->netdev, 65536); + return; + } + + if (hw->mac.type == ixgbe_mac_82598EB) + netif_set_gso_max_size(adapter->netdev, 32768); + + + /* Enable VLAN tag insert/strip */ + adapter->netdev->features |= NETIF_F_HW_VLAN_RX; + + hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); + + /* reconfigure the hardware */ + if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) { +#ifdef CONFIG_FCOE + if (adapter->netdev->features & NETIF_F_FCOE_MTU) + max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); +#endif + ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, + DCB_TX_CONFIG); + ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, + DCB_RX_CONFIG); + ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg); + } else { + struct net_device *dev = adapter->netdev; + + if (adapter->ixgbe_ieee_ets) + dev->dcbnl_ops->ieee_setets(dev, + adapter->ixgbe_ieee_ets); + if (adapter->ixgbe_ieee_pfc) + dev->dcbnl_ops->ieee_setpfc(dev, + adapter->ixgbe_ieee_pfc); + } + + /* Enable RSS Hash per TC */ + if (hw->mac.type != ixgbe_mac_82598EB) { + int i; + u32 reg = 0; + + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + u8 msb = 0; + u8 cnt = adapter->netdev->tc_to_txq[i].count; + + while (cnt >>= 1) + msb++; + + reg |= msb << IXGBE_RQTC_SHIFT_TC(i); + } + IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg); + } +} + +#endif + +static void ixgbe_configure_pb(struct ixgbe_adapter *adapter) +{ + int hdrm = 0; + int num_tc = netdev_get_num_tc(adapter->netdev); + struct ixgbe_hw *hw = &adapter->hw; + + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || + adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) + hdrm = 64 << adapter->fdir_pballoc; + + hw->mac.ops.set_rxpba(&adapter->hw, num_tc, hdrm, PBA_STRATEGY_EQUAL); +} + +static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct hlist_node *node, *node2; + struct ixgbe_fdir_filter *filter; + + spin_lock(&adapter->fdir_perfect_lock); + + if (!hlist_empty(&adapter->fdir_filter_list)) + ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask); + + hlist_for_each_entry_safe(filter, node, node2, + &adapter->fdir_filter_list, fdir_node) { + ixgbe_fdir_write_perfect_filter_82599(hw, + &filter->filter, + filter->sw_idx, + (filter->action == IXGBE_FDIR_DROP_QUEUE) ? + IXGBE_FDIR_DROP_QUEUE : + adapter->rx_ring[filter->action]->reg_idx); + } + + spin_unlock(&adapter->fdir_perfect_lock); +} + +static void ixgbe_configure(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + int i; + + ixgbe_configure_pb(adapter); +#ifdef CONFIG_IXGBE_DCB + ixgbe_configure_dcb(adapter); +#endif + + ixgbe_set_rx_mode(netdev); + ixgbe_restore_vlan(adapter); + +#ifdef IXGBE_FCOE + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) + ixgbe_configure_fcoe(adapter); + +#endif /* IXGBE_FCOE */ + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->atr_sample_rate = + adapter->atr_sample_rate; + ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc); + } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { + ixgbe_init_fdir_perfect_82599(&adapter->hw, + adapter->fdir_pballoc); + ixgbe_fdir_filter_restore(adapter); + } + ixgbe_configure_virtualization(adapter); + + ixgbe_configure_tx(adapter); + ixgbe_configure_rx(adapter); +} + +static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) +{ + switch (hw->phy.type) { + case ixgbe_phy_sfp_avago: + case ixgbe_phy_sfp_ftl: + case ixgbe_phy_sfp_intel: + case ixgbe_phy_sfp_unknown: + case ixgbe_phy_sfp_passive_tyco: + case ixgbe_phy_sfp_passive_unknown: + case ixgbe_phy_sfp_active_unknown: + case ixgbe_phy_sfp_ftl_active: + return true; + default: + return false; + } +} + +/** + * ixgbe_sfp_link_config - set up SFP+ link + * @adapter: pointer to private adapter struct + **/ +static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) +{ + /* + * We are assuming the worst case scenerio here, and that + * is that an SFP was inserted/removed after the reset + * but before SFP detection was enabled. As such the best + * solution is to just start searching as soon as we start + */ + if (adapter->hw.mac.type == ixgbe_mac_82598EB) + adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; + + adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; +} + +/** + * ixgbe_non_sfp_link_config - set up non-SFP+ link + * @hw: pointer to private hardware struct + * + * Returns 0 on success, negative on failure + **/ +static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) +{ + u32 autoneg; + bool negotiation, link_up = false; + u32 ret = IXGBE_ERR_LINK_SETUP; + + if (hw->mac.ops.check_link) + ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false); + + if (ret) + goto link_cfg_out; + + autoneg = hw->phy.autoneg_advertised; + if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) + ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, + &negotiation); + if (ret) + goto link_cfg_out; + + if (hw->mac.ops.setup_link) + ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up); +link_cfg_out: + return ret; +} + +static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 gpie = 0; + + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | + IXGBE_GPIE_OCD; + gpie |= IXGBE_GPIE_EIAME; + /* + * use EIAM to auto-mask when MSI-X interrupt is asserted + * this saves a register write for every interrupt + */ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + default: + IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); + break; + } + } else { + /* legacy interrupts, use EIAM to auto-mask when reading EICR, + * specifically only auto mask tx and rx interrupts */ + IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); + } + + /* XXX: to interrupt immediately for EICS writes, enable this */ + /* gpie |= IXGBE_GPIE_EIMEN; */ + + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + gpie &= ~IXGBE_GPIE_VTMODE_MASK; + gpie |= IXGBE_GPIE_VTMODE_64; + } + + /* Enable fan failure interrupt */ + if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) + gpie |= IXGBE_SDP1_GPIEN; + + if (hw->mac.type == ixgbe_mac_82599EB) { + gpie |= IXGBE_SDP1_GPIEN; + gpie |= IXGBE_SDP2_GPIEN; + } + + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); +} + +static int ixgbe_up_complete(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int err; + u32 ctrl_ext; + + ixgbe_get_hw_control(adapter); + ixgbe_setup_gpie(adapter); + + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) + ixgbe_configure_msix(adapter); + else + ixgbe_configure_msi_and_legacy(adapter); + + /* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */ + if (hw->mac.ops.enable_tx_laser && + ((hw->phy.multispeed_fiber) || + ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) && + (hw->mac.type == ixgbe_mac_82599EB)))) + hw->mac.ops.enable_tx_laser(hw); + + clear_bit(__IXGBE_DOWN, &adapter->state); + ixgbe_napi_enable_all(adapter); + + if (ixgbe_is_sfp(hw)) { + ixgbe_sfp_link_config(adapter); + } else { + err = ixgbe_non_sfp_link_config(hw); + if (err) + e_err(probe, "link_config FAILED %d\n", err); + } + + /* clear any pending interrupts, may auto mask */ + IXGBE_READ_REG(hw, IXGBE_EICR); + ixgbe_irq_enable(adapter, true, true); + + /* + * If this adapter has a fan, check to see if we had a failure + * before we enabled the interrupt. + */ + if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { + u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (esdp & IXGBE_ESDP_SDP1) + e_crit(drv, "Fan has stopped, replace the adapter\n"); + } + + /* enable transmits */ + netif_tx_start_all_queues(adapter->netdev); + + /* bring the link up in the watchdog, this could race with our first + * link up interrupt but shouldn't be a problem */ + adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + mod_timer(&adapter->service_timer, jiffies); + + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; + IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); + + return 0; +} + +void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) +{ + WARN_ON(in_interrupt()); + /* put off any impending NetWatchDogTimeout */ + adapter->netdev->trans_start = jiffies; + + while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + ixgbe_down(adapter); + /* + * If SR-IOV enabled then wait a bit before bringing the adapter + * back up to give the VFs time to respond to the reset. The + * two second wait is based upon the watchdog timer cycle in + * the VF driver. + */ + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + msleep(2000); + ixgbe_up(adapter); + clear_bit(__IXGBE_RESETTING, &adapter->state); +} + +int ixgbe_up(struct ixgbe_adapter *adapter) +{ + /* hardware has been reset, we need to reload some things */ + ixgbe_configure(adapter); + + return ixgbe_up_complete(adapter); +} + +void ixgbe_reset(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int err; + + /* lock SFP init bit to prevent race conditions with the watchdog */ + while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) + usleep_range(1000, 2000); + + /* clear all SFP and link config related flags while holding SFP_INIT */ + adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP | + IXGBE_FLAG2_SFP_NEEDS_RESET); + adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; + + err = hw->mac.ops.init_hw(hw); + switch (err) { + case 0: + case IXGBE_ERR_SFP_NOT_PRESENT: + case IXGBE_ERR_SFP_NOT_SUPPORTED: + break; + case IXGBE_ERR_MASTER_REQUESTS_PENDING: + e_dev_err("master disable timed out\n"); + break; + case IXGBE_ERR_EEPROM_VERSION: + /* We are running on a pre-production device, log a warning */ + e_dev_warn("This device is a pre-production adapter/LOM. " + "Please be aware there may be issuesassociated with " + "your hardware. If you are experiencing problems " + "please contact your Intel or hardware " + "representative who provided you with this " + "hardware.\n"); + break; + default: + e_dev_err("Hardware Error: %d\n", err); + } + + clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); + + /* reprogram the RAR[0] in case user changed it. */ + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs, + IXGBE_RAH_AV); +} + +/** + * ixgbe_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + unsigned long size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!rx_ring->rx_buffer_info) + return; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + struct ixgbe_rx_buffer *rx_buffer_info; + + rx_buffer_info = &rx_ring->rx_buffer_info[i]; + if (rx_buffer_info->dma) { + dma_unmap_single(rx_ring->dev, rx_buffer_info->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_buffer_info->dma = 0; + } + if (rx_buffer_info->skb) { + struct sk_buff *skb = rx_buffer_info->skb; + rx_buffer_info->skb = NULL; + do { + struct sk_buff *this = skb; + if (IXGBE_RSC_CB(this)->delay_unmap) { + dma_unmap_single(dev, + IXGBE_RSC_CB(this)->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + IXGBE_RSC_CB(this)->dma = 0; + IXGBE_RSC_CB(skb)->delay_unmap = false; + } + skb = skb->prev; + dev_kfree_skb(this); + } while (skb); + } + if (!rx_buffer_info->page) + continue; + if (rx_buffer_info->page_dma) { + dma_unmap_page(dev, rx_buffer_info->page_dma, + PAGE_SIZE / 2, DMA_FROM_DEVICE); + rx_buffer_info->page_dma = 0; + } + put_page(rx_buffer_info->page); + rx_buffer_info->page = NULL; + rx_buffer_info->page_offset = 0; + } + + size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; + memset(rx_ring->rx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +/** + * ixgbe_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) +{ + struct ixgbe_tx_buffer *tx_buffer_info; + unsigned long size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_buffer_info) + return; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) { + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); + } + + size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +} + +/** + * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbe_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + ixgbe_clean_tx_ring(adapter->tx_ring[i]); +} + +static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter) +{ + struct hlist_node *node, *node2; + struct ixgbe_fdir_filter *filter; + + spin_lock(&adapter->fdir_perfect_lock); + + hlist_for_each_entry_safe(filter, node, node2, + &adapter->fdir_filter_list, fdir_node) { + hlist_del(&filter->fdir_node); + kfree(filter); + } + adapter->fdir_filter_count = 0; + + spin_unlock(&adapter->fdir_perfect_lock); +} + +void ixgbe_down(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + u32 rxctrl; + int i; + int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + /* signal that we are down to the interrupt handler */ + set_bit(__IXGBE_DOWN, &adapter->state); + + /* disable receives */ + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); + + /* disable all enabled rx queues */ + for (i = 0; i < adapter->num_rx_queues; i++) + /* this call also flushes the previous write */ + ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]); + + usleep_range(10000, 20000); + + netif_tx_stop_all_queues(netdev); + + /* call carrier off first to avoid false dev_watchdog timeouts */ + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + ixgbe_irq_disable(adapter); + + ixgbe_napi_disable_all(adapter); + + adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT | + IXGBE_FLAG2_RESET_REQUESTED); + adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; + + del_timer_sync(&adapter->service_timer); + + /* disable receive for all VFs and wait one second */ + if (adapter->num_vfs) { + /* ping all the active vfs to let them know we are going down */ + ixgbe_ping_all_vfs(adapter); + + /* Disable all VFTE/VFRE TX/RX */ + ixgbe_disable_tx_rx(adapter); + + /* Mark all the VFs as inactive */ + for (i = 0 ; i < adapter->num_vfs; i++) + adapter->vfinfo[i].clear_to_send = 0; + } + + /* Cleanup the affinity_hint CPU mask memory and callback */ + for (i = 0; i < num_q_vectors; i++) { + struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(adapter->msix_entries[i]. vector, NULL); + /* release the CPU mask memory */ + free_cpumask_var(q_vector->affinity_mask); + } + + /* disable transmits in the hardware now that interrupts are off */ + for (i = 0; i < adapter->num_tx_queues; i++) { + u8 reg_idx = adapter->tx_ring[i]->reg_idx; + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); + } + + /* Disable the Tx DMA engine on 82599 and X540 */ + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, + (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & + ~IXGBE_DMATXCTL_TE)); + break; + default: + break; + } + + if (!pci_channel_offline(adapter->pdev)) + ixgbe_reset(adapter); + + /* power down the optics for multispeed fiber and 82599 SFP+ fiber */ + if (hw->mac.ops.disable_tx_laser && + ((hw->phy.multispeed_fiber) || + ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) && + (hw->mac.type == ixgbe_mac_82599EB)))) + hw->mac.ops.disable_tx_laser(hw); + + ixgbe_clean_all_tx_rings(adapter); + ixgbe_clean_all_rx_rings(adapter); + +#ifdef CONFIG_IXGBE_DCA + /* since we reset the hardware DCA settings were cleared */ + ixgbe_setup_dca(adapter); +#endif +} + +/** + * ixgbe_poll - NAPI Rx polling callback + * @napi: structure for representing this polling device + * @budget: how many packets driver is allowed to clean + * + * This function is used for legacy and MSI, NAPI mode + **/ +static int ixgbe_poll(struct napi_struct *napi, int budget) +{ + struct ixgbe_q_vector *q_vector = + container_of(napi, struct ixgbe_q_vector, napi); + struct ixgbe_adapter *adapter = q_vector->adapter; + int tx_clean_complete, work_done = 0; + +#ifdef CONFIG_IXGBE_DCA + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + ixgbe_update_dca(q_vector); +#endif + + tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]); + ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget); + + if (!tx_clean_complete) + work_done = budget; + + /* If budget not fully consumed, exit the polling mode */ + if (work_done < budget) { + napi_complete(napi); + if (adapter->rx_itr_setting & 1) + ixgbe_set_itr(q_vector); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE); + } + return work_done; +} + +/** + * ixgbe_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void ixgbe_tx_timeout(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + ixgbe_tx_timeout_reset(adapter); +} + +/** + * ixgbe_set_rss_queues: Allocate queues for RSS + * @adapter: board private structure to initialize + * + * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try + * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. + * + **/ +static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) +{ + bool ret = false; + struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS]; + + if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { + f->mask = 0xF; + adapter->num_rx_queues = f->indices; + adapter->num_tx_queues = f->indices; + ret = true; + } else { + ret = false; + } + + return ret; +} + +/** + * ixgbe_set_fdir_queues: Allocate queues for Flow Director + * @adapter: board private structure to initialize + * + * Flow Director is an advanced Rx filter, attempting to get Rx flows back + * to the original CPU that initiated the Tx session. This runs in addition + * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the + * Rx load across CPUs using RSS. + * + **/ +static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) +{ + bool ret = false; + struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR]; + + f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices); + f_fdir->mask = 0; + + /* Flow Director must have RSS enabled */ + if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && + (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { + adapter->num_tx_queues = f_fdir->indices; + adapter->num_rx_queues = f_fdir->indices; + ret = true; + } else { + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + } + return ret; +} + +#ifdef IXGBE_FCOE +/** + * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE) + * @adapter: board private structure to initialize + * + * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges. + * The ring feature mask is not used as a mask for FCoE, as it can take any 8 + * rx queues out of the max number of rx queues, instead, it is used as the + * index of the first rx queue used by FCoE. + * + **/ +static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) +{ + struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; + + if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) + return false; + + f->indices = min((int)num_online_cpus(), f->indices); + + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + + if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { + e_info(probe, "FCoE enabled with RSS\n"); + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) + ixgbe_set_fdir_queues(adapter); + else + ixgbe_set_rss_queues(adapter); + } + + /* adding FCoE rx rings to the end */ + f->mask = adapter->num_rx_queues; + adapter->num_rx_queues += f->indices; + adapter->num_tx_queues += f->indices; + + return true; +} +#endif /* IXGBE_FCOE */ + +/* Artificial max queue cap per traffic class in DCB mode */ +#define DCB_QUEUE_CAP 8 + +#ifdef CONFIG_IXGBE_DCB +static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) +{ + int per_tc_q, q, i, offset = 0; + struct net_device *dev = adapter->netdev; + int tcs = netdev_get_num_tc(dev); + + if (!tcs) + return false; + + /* Map queue offset and counts onto allocated tx queues */ + per_tc_q = min(dev->num_tx_queues / tcs, (unsigned int)DCB_QUEUE_CAP); + q = min((int)num_online_cpus(), per_tc_q); + + for (i = 0; i < tcs; i++) { + netdev_set_prio_tc_map(dev, i, i); + netdev_set_tc_queue(dev, i, q, offset); + offset += q; + } + + adapter->num_tx_queues = q * tcs; + adapter->num_rx_queues = q * tcs; + +#ifdef IXGBE_FCOE + /* FCoE enabled queues require special configuration indexed + * by feature specific indices and mask. Here we map FCoE + * indices onto the DCB queue pairs allowing FCoE to own + * configuration later. + */ + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { + int tc; + struct ixgbe_ring_feature *f = + &adapter->ring_feature[RING_F_FCOE]; + + tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up); + f->indices = dev->tc_to_txq[tc].count; + f->mask = dev->tc_to_txq[tc].offset; + } +#endif + + return true; +} +#endif + +/** + * ixgbe_set_sriov_queues: Allocate queues for IOV use + * @adapter: board private structure to initialize + * + * IOV doesn't actually use anything, so just NAK the + * request for now and let the other queue routines + * figure out what to do. + */ +static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) +{ + return false; +} + +/* + * ixgbe_set_num_queues: Allocate queues for device, feature dependent + * @adapter: board private structure to initialize + * + * This is the top level queue allocation routine. The order here is very + * important, starting with the "most" number of features turned on at once, + * and ending with the smallest set of features. This way large combinations + * can be allocated if they're turned on, and smaller combinations are the + * fallthrough conditions. + * + **/ +static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter) +{ + /* Start with base case */ + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + adapter->num_rx_pools = adapter->num_rx_queues; + adapter->num_rx_queues_per_pool = 1; + + if (ixgbe_set_sriov_queues(adapter)) + goto done; + +#ifdef CONFIG_IXGBE_DCB + if (ixgbe_set_dcb_queues(adapter)) + goto done; + +#endif +#ifdef IXGBE_FCOE + if (ixgbe_set_fcoe_queues(adapter)) + goto done; + +#endif /* IXGBE_FCOE */ + if (ixgbe_set_fdir_queues(adapter)) + goto done; + + if (ixgbe_set_rss_queues(adapter)) + goto done; + + /* fallback to base case */ + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + +done: + /* Notify the stack of the (possibly) reduced queue counts. */ + netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); + return netif_set_real_num_rx_queues(adapter->netdev, + adapter->num_rx_queues); +} + +static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, + int vectors) +{ + int err, vector_threshold; + + /* We'll want at least 3 (vector_threshold): + * 1) TxQ[0] Cleanup + * 2) RxQ[0] Cleanup + * 3) Other (Link Status Change, etc.) + * 4) TCP Timer (optional) + */ + vector_threshold = MIN_MSIX_COUNT; + + /* The more we get, the more we will assign to Tx/Rx Cleanup + * for the separate queues...where Rx Cleanup >= Tx Cleanup. + * Right now, we simply care about how many we'll get; we'll + * set them up later while requesting irq's. + */ + while (vectors >= vector_threshold) { + err = pci_enable_msix(adapter->pdev, adapter->msix_entries, + vectors); + if (!err) /* Success in acquiring all requested vectors. */ + break; + else if (err < 0) + vectors = 0; /* Nasty failure, quit now */ + else /* err == number of vectors we should try again with */ + vectors = err; + } + + if (vectors < vector_threshold) { + /* Can't allocate enough MSI-X interrupts? Oh well. + * This just means we'll go with either a single MSI + * vector or fall back to legacy interrupts. + */ + netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, + "Unable to allocate MSI-X interrupts\n"); + adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else { + adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ + /* + * Adjust for only the vectors we'll use, which is minimum + * of max_msix_q_vectors + NON_Q_VECTORS, or the number of + * vectors we were allocated. + */ + adapter->num_msix_vectors = min(vectors, + adapter->max_msix_q_vectors + NON_Q_VECTORS); + } +} + +/** + * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for RSS to the assigned rings. + * + **/ +static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) +{ + int i; + + if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) + return false; + + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = i; + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->reg_idx = i; + + return true; +} + +#ifdef CONFIG_IXGBE_DCB + +/* ixgbe_get_first_reg_idx - Return first register index associated with ring */ +static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, + unsigned int *tx, unsigned int *rx) +{ + struct net_device *dev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + u8 num_tcs = netdev_get_num_tc(dev); + + *tx = 0; + *rx = 0; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + *tx = tc << 2; + *rx = tc << 3; + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + if (num_tcs == 8) { + if (tc < 3) { + *tx = tc << 5; + *rx = tc << 4; + } else if (tc < 5) { + *tx = ((tc + 2) << 4); + *rx = tc << 4; + } else if (tc < num_tcs) { + *tx = ((tc + 8) << 3); + *rx = tc << 4; + } + } else if (num_tcs == 4) { + *rx = tc << 5; + switch (tc) { + case 0: + *tx = 0; + break; + case 1: + *tx = 64; + break; + case 2: + *tx = 96; + break; + case 3: + *tx = 112; + break; + default: + break; + } + } + break; + default: + break; + } +} + +/** + * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for DCB to the assigned rings. + * + **/ +static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + int i, j, k; + u8 num_tcs = netdev_get_num_tc(dev); + + if (!num_tcs) + return false; + + for (i = 0, k = 0; i < num_tcs; i++) { + unsigned int tx_s, rx_s; + u16 count = dev->tc_to_txq[i].count; + + ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s); + for (j = 0; j < count; j++, k++) { + adapter->tx_ring[k]->reg_idx = tx_s + j; + adapter->rx_ring[k]->reg_idx = rx_s + j; + adapter->tx_ring[k]->dcb_tc = i; + adapter->rx_ring[k]->dcb_tc = i; + } + } + + return true; +} +#endif + +/** + * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for Flow Director to the assigned rings. + * + **/ +static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) +{ + int i; + bool ret = false; + + if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && + (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = i; + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->reg_idx = i; + ret = true; + } + + return ret; +} + +#ifdef IXGBE_FCOE +/** + * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for FCoE mode to the assigned rings. + * + */ +static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) +{ + struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; + int i; + u8 fcoe_rx_i = 0, fcoe_tx_i = 0; + + if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) + return false; + + if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) + ixgbe_cache_ring_fdir(adapter); + else + ixgbe_cache_ring_rss(adapter); + + fcoe_rx_i = f->mask; + fcoe_tx_i = f->mask; + } + for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { + adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i; + adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i; + } + return true; +} + +#endif /* IXGBE_FCOE */ +/** + * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov + * @adapter: board private structure to initialize + * + * SR-IOV doesn't use any descriptor rings but changes the default if + * no other mapping is used. + * + */ +static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) +{ + adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2; + adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2; + if (adapter->num_vfs) + return true; + else + return false; +} + +/** + * ixgbe_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + * + * Note, the order the various feature calls is important. It must start with + * the "most" features enabled at the same time, then trickle down to the + * least amount of features turned on at once. + **/ +static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) +{ + /* start with default case */ + adapter->rx_ring[0]->reg_idx = 0; + adapter->tx_ring[0]->reg_idx = 0; + + if (ixgbe_cache_ring_sriov(adapter)) + return; + +#ifdef CONFIG_IXGBE_DCB + if (ixgbe_cache_ring_dcb(adapter)) + return; +#endif + +#ifdef IXGBE_FCOE + if (ixgbe_cache_ring_fcoe(adapter)) + return; +#endif /* IXGBE_FCOE */ + + if (ixgbe_cache_ring_fdir(adapter)) + return; + + if (ixgbe_cache_ring_rss(adapter)) + return; +} + +/** + * ixgbe_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + * We allocate one ring per queue at run-time since we don't know the + * number of queues at compile-time. The polling_netdev array is + * intended for Multiqueue, but should work fine with a single queue. + **/ +static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) +{ + int rx = 0, tx = 0, nid = adapter->node; + + if (nid < 0 || !node_online(nid)) + nid = first_online_node; + + for (; tx < adapter->num_tx_queues; tx++) { + struct ixgbe_ring *ring; + + ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid); + if (!ring) + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) + goto err_allocation; + ring->count = adapter->tx_ring_count; + ring->queue_index = tx; + ring->numa_node = nid; + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + adapter->tx_ring[tx] = ring; + } + + for (; rx < adapter->num_rx_queues; rx++) { + struct ixgbe_ring *ring; + + ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid); + if (!ring) + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) + goto err_allocation; + ring->count = adapter->rx_ring_count; + ring->queue_index = rx; + ring->numa_node = nid; + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + adapter->rx_ring[rx] = ring; + } + + ixgbe_cache_ring_register(adapter); + + return 0; + +err_allocation: + while (tx) + kfree(adapter->tx_ring[--tx]); + + while (rx) + kfree(adapter->rx_ring[--rx]); + return -ENOMEM; +} + +/** + * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported + * @adapter: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int err = 0; + int vector, v_budget; + + /* + * It's easy to be greedy for MSI-X vectors, but it really + * doesn't do us much good if we have a lot more vectors + * than CPU's. So let's be conservative and only ask for + * (roughly) the same number of vectors as there are CPU's. + */ + v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, + (int)num_online_cpus()) + NON_Q_VECTORS; + + /* + * At the same time, hardware can only support a maximum of + * hw.mac->max_msix_vectors vectors. With features + * such as RSS and VMDq, we can easily surpass the number of Rx and Tx + * descriptor queues supported by our device. Thus, we cap it off in + * those rare cases where the cpu count also exceeds our vector limit. + */ + v_budget = min(v_budget, (int)hw->mac.max_msix_vectors); + + /* A failure in MSI-X entry allocation isn't fatal, but it does + * mean we disable MSI-X capabilities of the adapter. */ + adapter->msix_entries = kcalloc(v_budget, + sizeof(struct msix_entry), GFP_KERNEL); + if (adapter->msix_entries) { + for (vector = 0; vector < v_budget; vector++) + adapter->msix_entries[vector].entry = vector; + + ixgbe_acquire_msix_vectors(adapter, v_budget); + + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) + goto out; + } + + adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; + adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { + e_err(probe, + "ATR is not supported while multiple " + "queues are disabled. Disabling Flow Director\n"); + } + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + adapter->atr_sample_rate = 0; + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + ixgbe_disable_sriov(adapter); + + err = ixgbe_set_num_queues(adapter); + if (err) + return err; + + err = pci_enable_msi(adapter->pdev); + if (!err) { + adapter->flags |= IXGBE_FLAG_MSI_ENABLED; + } else { + netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, + "Unable to allocate MSI interrupt, " + "falling back to legacy. Error: %d\n", err); + /* reset err */ + err = 0; + } + +out: + return err; +} + +/** + * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) +{ + int q_idx, num_q_vectors; + struct ixgbe_q_vector *q_vector; + int (*poll)(struct napi_struct *, int); + + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + poll = &ixgbe_clean_rxtx_many; + } else { + num_q_vectors = 1; + poll = &ixgbe_poll; + } + + for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { + q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector), + GFP_KERNEL, adapter->node); + if (!q_vector) + q_vector = kzalloc(sizeof(struct ixgbe_q_vector), + GFP_KERNEL); + if (!q_vector) + goto err_out; + q_vector->adapter = adapter; + if (q_vector->tx.count && !q_vector->rx.count) + q_vector->eitr = adapter->tx_eitr_param; + else + q_vector->eitr = adapter->rx_eitr_param; + q_vector->v_idx = q_idx; + netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64); + adapter->q_vector[q_idx] = q_vector; + } + + return 0; + +err_out: + while (q_idx) { + q_idx--; + q_vector = adapter->q_vector[q_idx]; + netif_napi_del(&q_vector->napi); + kfree(q_vector); + adapter->q_vector[q_idx] = NULL; + } + return -ENOMEM; +} + +/** + * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) +{ + int q_idx, num_q_vectors; + + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) + num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + else + num_q_vectors = 1; + + for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { + struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx]; + adapter->q_vector[q_idx] = NULL; + netif_napi_del(&q_vector->napi); + kfree(q_vector); + } +} + +static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) +{ + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { + adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; + pci_disable_msi(adapter->pdev); + } +} + +/** + * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme + * @adapter: board private structure to initialize + * + * We determine which interrupt scheme to use based on... + * - Kernel support (MSI, MSI-X) + * - which can be user-defined (via MODULE_PARAM) + * - Hardware queue count (num_*_queues) + * - defined by miscellaneous hardware support/features (RSS, etc.) + **/ +int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) +{ + int err; + + /* Number of supported queues */ + err = ixgbe_set_num_queues(adapter); + if (err) + return err; + + err = ixgbe_set_interrupt_capability(adapter); + if (err) { + e_dev_err("Unable to setup interrupt capabilities\n"); + goto err_set_interrupt; + } + + err = ixgbe_alloc_q_vectors(adapter); + if (err) { + e_dev_err("Unable to allocate memory for queue vectors\n"); + goto err_alloc_q_vectors; + } + + err = ixgbe_alloc_queues(adapter); + if (err) { + e_dev_err("Unable to allocate memory for queues\n"); + goto err_alloc_queues; + } + + e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", + (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", + adapter->num_rx_queues, adapter->num_tx_queues); + + set_bit(__IXGBE_DOWN, &adapter->state); + + return 0; + +err_alloc_queues: + ixgbe_free_q_vectors(adapter); +err_alloc_q_vectors: + ixgbe_reset_interrupt_capability(adapter); +err_set_interrupt: + return err; +} + +/** + * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings + * @adapter: board private structure to clear interrupt scheme on + * + * We go through and clear interrupt specific resources and reset the structure + * to pre-load conditions + **/ +void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + kfree(adapter->tx_ring[i]); + adapter->tx_ring[i] = NULL; + } + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbe_ring *ring = adapter->rx_ring[i]; + + /* ixgbe_get_stats64() might access this ring, we must wait + * a grace period before freeing it. + */ + kfree_rcu(ring, rcu); + adapter->rx_ring[i] = NULL; + } + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + + ixgbe_free_q_vectors(adapter); + ixgbe_reset_interrupt_capability(adapter); +} + +/** + * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) + * @adapter: board private structure to initialize + * + * ixgbe_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + struct net_device *dev = adapter->netdev; + unsigned int rss; +#ifdef CONFIG_IXGBE_DCB + int j; + struct tc_configuration *tc; +#endif + int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; + + /* PCI config space info */ + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + /* Set capability flags */ + rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus()); + adapter->ring_feature[RING_F_RSS].indices = rss; + adapter->flags |= IXGBE_FLAG_RSS_ENABLED; + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + if (hw->device_id == IXGBE_DEV_ID_82598AT) + adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; + adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; + adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; + adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; + if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; + /* Flow Director hash filters enabled */ + adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; + adapter->atr_sample_rate = 20; + adapter->ring_feature[RING_F_FDIR].indices = + IXGBE_MAX_FDIR_INDICES; + adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; +#ifdef IXGBE_FCOE + adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; + adapter->ring_feature[RING_F_FCOE].indices = 0; +#ifdef CONFIG_IXGBE_DCB + /* Default traffic class to use for FCoE */ + adapter->fcoe.up = IXGBE_FCOE_DEFTC; +#endif +#endif /* IXGBE_FCOE */ + break; + default: + break; + } + + /* n-tuple support exists, always init our spinlock */ + spin_lock_init(&adapter->fdir_perfect_lock); + +#ifdef CONFIG_IXGBE_DCB + /* Configure DCB traffic classes */ + for (j = 0; j < MAX_TRAFFIC_CLASS; j++) { + tc = &adapter->dcb_cfg.tc_config[j]; + tc->path[DCB_TX_CONFIG].bwg_id = 0; + tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1); + tc->path[DCB_RX_CONFIG].bwg_id = 0; + tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1); + tc->dcb_pfc = pfc_disabled; + } + adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; + adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; + adapter->dcb_cfg.pfc_mode_enable = false; + adapter->dcb_set_bitmap = 0x00; + adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; + ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, + MAX_TRAFFIC_CLASS); + +#endif + + /* default flow control settings */ + hw->fc.requested_mode = ixgbe_fc_full; + hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */ +#ifdef CONFIG_DCB + adapter->last_lfc_mode = hw->fc.current_mode; +#endif + hw->fc.high_water = FC_HIGH_WATER(max_frame); + hw->fc.low_water = FC_LOW_WATER(max_frame); + hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; + hw->fc.send_xon = true; + hw->fc.disable_fc_autoneg = false; + + /* enable itr by default in dynamic mode */ + adapter->rx_itr_setting = 1; + adapter->rx_eitr_param = 20000; + adapter->tx_itr_setting = 1; + adapter->tx_eitr_param = 10000; + + /* set defaults for eitr in MegaBytes */ + adapter->eitr_low = 10; + adapter->eitr_high = 20; + + /* set default ring sizes */ + adapter->tx_ring_count = IXGBE_DEFAULT_TXD; + adapter->rx_ring_count = IXGBE_DEFAULT_RXD; + + /* set default work limits */ + adapter->tx_work_limit = adapter->tx_ring_count; + + /* initialize eeprom parameters */ + if (ixgbe_init_eeprom_params_generic(hw)) { + e_dev_err("EEPROM initialization failed\n"); + return -EIO; + } + + /* enable rx csum by default */ + adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; + + /* get assigned NUMA node */ + adapter->node = dev_to_node(&pdev->dev); + + set_bit(__IXGBE_DOWN, &adapter->state); + + return 0; +} + +/** + * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int size; + + size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; + tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node); + if (!tx_ring->tx_buffer_info) + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); + return -ENOMEM; +} + +/** + * ixgbe_setup_all_tx_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); + if (!err) + continue; + e_err(probe, "Allocation for Tx Queue %u failed\n", i); + break; + } + + return err; +} + +/** + * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int size; + + size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; + rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node); + if (!rx_ring->rx_buffer_info) + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + return 0; +err: + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * ixgbe_setup_all_rx_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = ixgbe_setup_rx_resources(adapter->rx_ring[i]); + if (!err) + continue; + e_err(probe, "Allocation for Rx Queue %u failed\n", i); + break; + } + + return err; +} + +/** + * ixgbe_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring) +{ + ixgbe_clean_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (adapter->tx_ring[i]->desc) + ixgbe_free_tx_resources(adapter->tx_ring[i]); +} + +/** + * ixgbe_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring) +{ + ixgbe_clean_rx_ring(rx_ring); + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->rx_ring[i]->desc) + ixgbe_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * ixgbe_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + + /* MTU < 68 is an error and causes problems on some kernels */ + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED && + hw->mac.type != ixgbe_mac_X540) { + if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE)) + return -EINVAL; + } else { + if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) + return -EINVAL; + } + + e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + /* must set new MTU before calling down or up */ + netdev->mtu = new_mtu; + + hw->fc.high_water = FC_HIGH_WATER(max_frame); + hw->fc.low_water = FC_LOW_WATER(max_frame); + + if (netif_running(netdev)) + ixgbe_reinit_locked(adapter); + + return 0; +} + +/** + * ixgbe_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int ixgbe_open(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int err; + + /* disallow open during test */ + if (test_bit(__IXGBE_TESTING, &adapter->state)) + return -EBUSY; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = ixgbe_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = ixgbe_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + ixgbe_configure(adapter); + + err = ixgbe_request_irq(adapter); + if (err) + goto err_req_irq; + + err = ixgbe_up_complete(adapter); + if (err) + goto err_up; + + netif_tx_start_all_queues(netdev); + + return 0; + +err_up: + ixgbe_release_hw_control(adapter); + ixgbe_free_irq(adapter); +err_req_irq: +err_setup_rx: + ixgbe_free_all_rx_resources(adapter); +err_setup_tx: + ixgbe_free_all_tx_resources(adapter); + ixgbe_reset(adapter); + + return err; +} + +/** + * ixgbe_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int ixgbe_close(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + ixgbe_down(adapter); + ixgbe_free_irq(adapter); + + ixgbe_fdir_filter_exit(adapter); + + ixgbe_free_all_tx_resources(adapter); + ixgbe_free_all_rx_resources(adapter); + + ixgbe_release_hw_control(adapter); + + return 0; +} + +#ifdef CONFIG_PM +static int ixgbe_resume(struct pci_dev *pdev) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + /* + * pci_restore_state clears dev->state_saved so call + * pci_save_state to restore it. + */ + pci_save_state(pdev); + + err = pci_enable_device_mem(pdev); + if (err) { + e_dev_err("Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_wake_from_d3(pdev, false); + + err = ixgbe_init_interrupt_scheme(adapter); + if (err) { + e_dev_err("Cannot initialize interrupts for device\n"); + return err; + } + + ixgbe_reset(adapter); + + IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); + + if (netif_running(netdev)) { + err = ixgbe_open(netdev); + if (err) + return err; + } + + netif_device_attach(netdev); + + return 0; +} +#endif /* CONFIG_PM */ + +static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + u32 ctrl, fctrl; + u32 wufc = adapter->wol; +#ifdef CONFIG_PM + int retval = 0; +#endif + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + ixgbe_down(adapter); + ixgbe_free_irq(adapter); + ixgbe_free_all_tx_resources(adapter); + ixgbe_free_all_rx_resources(adapter); + } + + ixgbe_clear_interrupt_scheme(adapter); +#ifdef CONFIG_DCB + kfree(adapter->ixgbe_ieee_pfc); + kfree(adapter->ixgbe_ieee_ets); +#endif + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; + +#endif + if (wufc) { + ixgbe_set_rx_mode(netdev); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & IXGBE_WUFC_MC) { + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl |= IXGBE_FCTRL_MPE; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + } + + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + ctrl |= IXGBE_CTRL_GIO_DIS; + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + + IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc); + } else { + IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); + IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); + } + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + pci_wake_from_d3(pdev, false); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + pci_wake_from_d3(pdev, !!wufc); + break; + default: + break; + } + + *enable_wake = !!wufc; + + ixgbe_release_hw_control(adapter); + + pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) +{ + int retval; + bool wake; + + retval = __ixgbe_shutdown(pdev, &wake); + if (retval) + return retval; + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + + return 0; +} +#endif /* CONFIG_PM */ + +static void ixgbe_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __ixgbe_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +/** + * ixgbe_update_stats - Update the board statistics counters. + * @adapter: board private structure + **/ +void ixgbe_update_stats(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_hw_stats *hwstats = &adapter->stats; + u64 total_mpc = 0; + u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; + u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; + u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; + u64 bytes = 0, packets = 0; + + if (test_bit(__IXGBE_DOWN, &adapter->state) || + test_bit(__IXGBE_RESETTING, &adapter->state)) + return; + + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { + u64 rsc_count = 0; + u64 rsc_flush = 0; + for (i = 0; i < 16; i++) + adapter->hw_rx_no_dma_resources += + IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); + for (i = 0; i < adapter->num_rx_queues; i++) { + rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; + rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; + } + adapter->rsc_total_count = rsc_count; + adapter->rsc_total_flush = rsc_flush; + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbe_ring *rx_ring = adapter->rx_ring[i]; + non_eop_descs += rx_ring->rx_stats.non_eop_descs; + alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; + alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; + bytes += rx_ring->stats.bytes; + packets += rx_ring->stats.packets; + } + adapter->non_eop_descs = non_eop_descs; + adapter->alloc_rx_page_failed = alloc_rx_page_failed; + adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; + netdev->stats.rx_bytes = bytes; + netdev->stats.rx_packets = packets; + + bytes = 0; + packets = 0; + /* gather some stats to the adapter struct that are per queue */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; + restart_queue += tx_ring->tx_stats.restart_queue; + tx_busy += tx_ring->tx_stats.tx_busy; + bytes += tx_ring->stats.bytes; + packets += tx_ring->stats.packets; + } + adapter->restart_queue = restart_queue; + adapter->tx_busy = tx_busy; + netdev->stats.tx_bytes = bytes; + netdev->stats.tx_packets = packets; + + hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); + for (i = 0; i < 8; i++) { + /* for packet buffers not used, the register should read 0 */ + mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i)); + missed_rx += mpc; + hwstats->mpc[i] += mpc; + total_mpc += hwstats->mpc[i]; + if (hw->mac.type == ixgbe_mac_82598EB) + hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); + hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); + hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); + hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); + hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + hwstats->pxonrxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + hwstats->pxonrxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); + break; + default: + break; + } + hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); + hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); + } + hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); + /* work around hardware counting issue */ + hwstats->gprc -= missed_rx; + + ixgbe_update_xoff_received(adapter); + + /* 82598 hardware only has a 32 bit counter in the high register */ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); + hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); + hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); + hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); + break; + case ixgbe_mac_X540: + /* OS2BMC stats are X540 only*/ + hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC); + hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); + hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); + hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC); + case ixgbe_mac_82599EB: + hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); + IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ + hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); + IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */ + hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); + IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ + hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); + hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); + hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); +#ifdef IXGBE_FCOE + hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); + hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); + hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); + hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); + hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); + hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); +#endif /* IXGBE_FCOE */ + break; + default: + break; + } + bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); + hwstats->bprc += bprc; + hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); + if (hw->mac.type == ixgbe_mac_82598EB) + hwstats->mprc -= bprc; + hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); + hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); + hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); + hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); + hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); + hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); + hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); + hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); + lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); + hwstats->lxontxc += lxon; + lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); + hwstats->lxofftxc += lxoff; + hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); + hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); + hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); + /* + * 82598 errata - tx of flow control packets is included in tx counters + */ + xon_off_tot = lxon + lxoff; + hwstats->gptc -= xon_off_tot; + hwstats->mptc -= xon_off_tot; + hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN)); + hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); + hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); + hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); + hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); + hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); + hwstats->ptc64 -= xon_off_tot; + hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); + hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); + hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); + hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); + hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); + hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); + + /* Fill out the OS statistics structure */ + netdev->stats.multicast = hwstats->mprc; + + /* Rx Errors */ + netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec; + netdev->stats.rx_dropped = 0; + netdev->stats.rx_length_errors = hwstats->rlec; + netdev->stats.rx_crc_errors = hwstats->crcerrs; + netdev->stats.rx_missed_errors = total_mpc; +} + +/** + * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table + * @adapter - pointer to the device adapter structure + **/ +static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int i; + + if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) + return; + + adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT; + + /* if interface is down do nothing */ + if (test_bit(__IXGBE_DOWN, &adapter->state)) + return; + + /* do nothing if we are not using signature filters */ + if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) + return; + + adapter->fdir_overflow++; + + if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { + for (i = 0; i < adapter->num_tx_queues; i++) + set_bit(__IXGBE_TX_FDIR_INIT_DONE, + &(adapter->tx_ring[i]->state)); + /* re-enable flow director interrupts */ + IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR); + } else { + e_err(probe, "failed to finish FDIR re-initialization, " + "ignored adding FDIR ATR filters\n"); + } +} + +/** + * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts + * @adapter - pointer to the device adapter structure + * + * This function serves two purposes. First it strobes the interrupt lines + * in order to make certain interrupts are occuring. Secondly it sets the + * bits needed to check for TX hangs. As a result we should immediately + * determine if a hang has occured. + */ +static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u64 eics = 0; + int i; + + /* If we're down or resetting, just bail */ + if (test_bit(__IXGBE_DOWN, &adapter->state) || + test_bit(__IXGBE_RESETTING, &adapter->state)) + return; + + /* Force detection of hung controller */ + if (netif_carrier_ok(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + set_check_for_tx_hang(adapter->tx_ring[i]); + } + + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { + /* + * for legacy and MSI interrupts don't set any bits + * that are enabled for EIAM, because this operation + * would set *both* EIMS and EICS for any bit in EIAM + */ + IXGBE_WRITE_REG(hw, IXGBE_EICS, + (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); + } else { + /* get one bit for every active tx/rx interrupt vector */ + for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { + struct ixgbe_q_vector *qv = adapter->q_vector[i]; + if (qv->rx.count || qv->tx.count) + eics |= ((u64)1 << i); + } + } + + /* Cause software interrupt to ensure rings are cleaned */ + ixgbe_irq_rearm_queues(adapter, eics); + +} + +/** + * ixgbe_watchdog_update_link - update the link status + * @adapter - pointer to the device adapter structure + * @link_speed - pointer to a u32 to store the link_speed + **/ +static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 link_speed = adapter->link_speed; + bool link_up = adapter->link_up; + int i; + + if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) + return; + + if (hw->mac.ops.check_link) { + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + } else { + /* always assume link is up, if no check link function */ + link_speed = IXGBE_LINK_SPEED_10GB_FULL; + link_up = true; + } + if (link_up) { + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) + hw->mac.ops.fc_enable(hw, i); + } else { + hw->mac.ops.fc_enable(hw, 0); + } + } + + if (link_up || + time_after(jiffies, (adapter->link_check_timeout + + IXGBE_TRY_LINK_TIMEOUT))) { + adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; + IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC); + IXGBE_WRITE_FLUSH(hw); + } + + adapter->link_up = link_up; + adapter->link_speed = link_speed; +} + +/** + * ixgbe_watchdog_link_is_up - update netif_carrier status and + * print link up message + * @adapter - pointer to the device adapter structure + **/ +static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + u32 link_speed = adapter->link_speed; + bool flow_rx, flow_tx; + + /* only continue if link was previously down */ + if (netif_carrier_ok(netdev)) + return; + + adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: { + u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); + flow_rx = !!(frctl & IXGBE_FCTRL_RFCE); + flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); + } + break; + case ixgbe_mac_X540: + case ixgbe_mac_82599EB: { + u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); + u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); + flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE); + flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X); + } + break; + default: + flow_tx = false; + flow_rx = false; + break; + } + e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", + (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? + "10 Gbps" : + (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? + "1 Gbps" : + (link_speed == IXGBE_LINK_SPEED_100_FULL ? + "100 Mbps" : + "unknown speed"))), + ((flow_rx && flow_tx) ? "RX/TX" : + (flow_rx ? "RX" : + (flow_tx ? "TX" : "None")))); + + netif_carrier_on(netdev); + ixgbe_check_vf_rate_limit(adapter); +} + +/** + * ixgbe_watchdog_link_is_down - update netif_carrier status and + * print link down message + * @adapter - pointer to the adapter structure + **/ +static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter* adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + + adapter->link_up = false; + adapter->link_speed = 0; + + /* only continue if link was up previously */ + if (!netif_carrier_ok(netdev)) + return; + + /* poll for SFP+ cable when link is down */ + if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) + adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; + + e_info(drv, "NIC Link is Down\n"); + netif_carrier_off(netdev); +} + +/** + * ixgbe_watchdog_flush_tx - flush queues on link down + * @adapter - pointer to the device adapter structure + **/ +static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) +{ + int i; + int some_tx_pending = 0; + + if (!netif_carrier_ok(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; + if (tx_ring->next_to_use != tx_ring->next_to_clean) { + some_tx_pending = 1; + break; + } + } + + if (some_tx_pending) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; + } + } +} + +static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) +{ + u32 ssvpc; + + /* Do not perform spoof check for 82598 */ + if (adapter->hw.mac.type == ixgbe_mac_82598EB) + return; + + ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC); + + /* + * ssvpc register is cleared on read, if zero then no + * spoofed packets in the last interval. + */ + if (!ssvpc) + return; + + e_warn(drv, "%d Spoofed packets detected\n", ssvpc); +} + +/** + * ixgbe_watchdog_subtask - check and bring link up + * @adapter - pointer to the device adapter structure + **/ +static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) +{ + /* if interface is down do nothing */ + if (test_bit(__IXGBE_DOWN, &adapter->state)) + return; + + ixgbe_watchdog_update_link(adapter); + + if (adapter->link_up) + ixgbe_watchdog_link_is_up(adapter); + else + ixgbe_watchdog_link_is_down(adapter); + + ixgbe_spoof_check(adapter); + ixgbe_update_stats(adapter); + + ixgbe_watchdog_flush_tx(adapter); +} + +/** + * ixgbe_sfp_detection_subtask - poll for SFP+ cable + * @adapter - the ixgbe adapter structure + **/ +static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + s32 err; + + /* not searching for SFP so there is nothing to do here */ + if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) && + !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) + return; + + /* someone else is in init, wait until next service event */ + if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) + return; + + err = hw->phy.ops.identify_sfp(hw); + if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) + goto sfp_out; + + if (err == IXGBE_ERR_SFP_NOT_PRESENT) { + /* If no cable is present, then we need to reset + * the next time we find a good cable. */ + adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; + } + + /* exit on error */ + if (err) + goto sfp_out; + + /* exit if reset not needed */ + if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) + goto sfp_out; + + adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET; + + /* + * A module may be identified correctly, but the EEPROM may not have + * support for that module. setup_sfp() will fail in that case, so + * we should not allow that module to load. + */ + if (hw->mac.type == ixgbe_mac_82598EB) + err = hw->phy.ops.reset(hw); + else + err = hw->mac.ops.setup_sfp(hw); + + if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) + goto sfp_out; + + adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; + e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type); + +sfp_out: + clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); + + if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) && + (adapter->netdev->reg_state == NETREG_REGISTERED)) { + e_dev_err("failed to initialize because an unsupported " + "SFP+ module type was detected.\n"); + e_dev_err("Reload the driver after installing a " + "supported module.\n"); + unregister_netdev(adapter->netdev); + } +} + +/** + * ixgbe_sfp_link_config_subtask - set up link SFP after module install + * @adapter - the ixgbe adapter structure + **/ +static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 autoneg; + bool negotiation; + + if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG)) + return; + + /* someone else is in init, wait until next service event */ + if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) + return; + + adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; + + autoneg = hw->phy.autoneg_advertised; + if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) + hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); + hw->mac.autotry_restart = false; + if (hw->mac.ops.setup_link) + hw->mac.ops.setup_link(hw, autoneg, negotiation, true); + + adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); +} + +/** + * ixgbe_service_timer - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void ixgbe_service_timer(unsigned long data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + unsigned long next_event_offset; + + /* poll faster when waiting for link */ + if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) + next_event_offset = HZ / 10; + else + next_event_offset = HZ * 2; + + /* Reset the timer */ + mod_timer(&adapter->service_timer, next_event_offset + jiffies); + + ixgbe_service_event_schedule(adapter); +} + +static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) +{ + if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED)) + return; + + adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED; + + /* If we're already down or resetting, just bail */ + if (test_bit(__IXGBE_DOWN, &adapter->state) || + test_bit(__IXGBE_RESETTING, &adapter->state)) + return; + + ixgbe_dump(adapter); + netdev_err(adapter->netdev, "Reset adapter\n"); + adapter->tx_timeout_count++; + + ixgbe_reinit_locked(adapter); +} + +/** + * ixgbe_service_task - manages and runs subtasks + * @work: pointer to work_struct containing our data + **/ +static void ixgbe_service_task(struct work_struct *work) +{ + struct ixgbe_adapter *adapter = container_of(work, + struct ixgbe_adapter, + service_task); + + ixgbe_reset_subtask(adapter); + ixgbe_sfp_detection_subtask(adapter); + ixgbe_sfp_link_config_subtask(adapter); + ixgbe_check_overtemp_subtask(adapter); + ixgbe_watchdog_subtask(adapter); + ixgbe_fdir_reinit_subtask(adapter); + ixgbe_check_hang_subtask(adapter); + + ixgbe_service_event_complete(adapter); +} + +void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, + u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) +{ + struct ixgbe_adv_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + + context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; + + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); +} + +static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, + u32 tx_flags, __be16 protocol, u8 *hdr_len) +{ + int err; + u32 vlan_macip_lens, type_tucmd; + u32 mss_l4len_idx, l4len; + + if (!skb_is_gso(skb)) + return 0; + + if (skb_header_cloned(skb)) { + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (err) + return err; + } + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; + + if (protocol == __constant_htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + } else if (skb_is_gso_v6(skb)) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + } + + l4len = tcp_hdrlen(skb); + *hdr_len = skb_transport_offset(skb) + l4len; + + /* mss_l4len_id: use 1 as index for TSO */ + mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; + mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; + + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ + vlan_macip_lens = skb_network_header_len(skb); + vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; + + ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, + mss_l4len_idx); + + return 1; +} + +static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring, + struct sk_buff *skb, u32 tx_flags, + __be16 protocol) +{ + u32 vlan_macip_lens = 0; + u32 mss_l4len_idx = 0; + u32 type_tucmd = 0; + + if (skb->ip_summed != CHECKSUM_PARTIAL) { + if (!(tx_flags & IXGBE_TX_FLAGS_VLAN)) + return false; + } else { + u8 l4_hdr = 0; + switch (protocol) { + case __constant_htons(ETH_P_IP): + vlan_macip_lens |= skb_network_header_len(skb); + type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + l4_hdr = ip_hdr(skb)->protocol; + break; + case __constant_htons(ETH_P_IPV6): + vlan_macip_lens |= skb_network_header_len(skb); + l4_hdr = ipv6_hdr(skb)->nexthdr; + break; + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum but proto=%x!\n", + skb->protocol); + } + break; + } + + switch (l4_hdr) { + case IPPROTO_TCP: + type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; + mss_l4len_idx = tcp_hdrlen(skb) << + IXGBE_ADVTXD_L4LEN_SHIFT; + break; + case IPPROTO_SCTP: + type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; + mss_l4len_idx = sizeof(struct sctphdr) << + IXGBE_ADVTXD_L4LEN_SHIFT; + break; + case IPPROTO_UDP: + mss_l4len_idx = sizeof(struct udphdr) << + IXGBE_ADVTXD_L4LEN_SHIFT; + break; + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum but l4 proto=%x!\n", + skb->protocol); + } + break; + } + } + + vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; + + ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, + type_tucmd, mss_l4len_idx); + + return (skb->ip_summed == CHECKSUM_PARTIAL); +} + +static int ixgbe_tx_map(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring, + struct sk_buff *skb, u32 tx_flags, + unsigned int first, const u8 hdr_len) +{ + struct device *dev = tx_ring->dev; + struct ixgbe_tx_buffer *tx_buffer_info; + unsigned int len; + unsigned int total = skb->len; + unsigned int offset = 0, size, count = 0; + unsigned int nr_frags = skb_shinfo(skb)->nr_frags; + unsigned int f; + unsigned int bytecount = skb->len; + u16 gso_segs = 1; + u16 i; + + i = tx_ring->next_to_use; + + if (tx_flags & IXGBE_TX_FLAGS_FCOE) + /* excluding fcoe_crc_eof for FCoE */ + total -= sizeof(struct fcoe_crc_eof); + + len = min(skb_headlen(skb), total); + while (len) { + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); + + tx_buffer_info->length = size; + tx_buffer_info->mapped_as_page = false; + tx_buffer_info->dma = dma_map_single(dev, + skb->data + offset, + size, DMA_TO_DEVICE); + if (dma_mapping_error(dev, tx_buffer_info->dma)) + goto dma_error; + tx_buffer_info->time_stamp = jiffies; + tx_buffer_info->next_to_watch = i; + + len -= size; + total -= size; + offset += size; + count++; + + if (len) { + i++; + if (i == tx_ring->count) + i = 0; + } + } + + for (f = 0; f < nr_frags; f++) { + struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[f]; + len = min((unsigned int)frag->size, total); + offset = frag->page_offset; + + while (len) { + i++; + if (i == tx_ring->count) + i = 0; + + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); + + tx_buffer_info->length = size; + tx_buffer_info->dma = dma_map_page(dev, + frag->page, + offset, size, + DMA_TO_DEVICE); + tx_buffer_info->mapped_as_page = true; + if (dma_mapping_error(dev, tx_buffer_info->dma)) + goto dma_error; + tx_buffer_info->time_stamp = jiffies; + tx_buffer_info->next_to_watch = i; + + len -= size; + total -= size; + offset += size; + count++; + } + if (total == 0) + break; + } + + if (tx_flags & IXGBE_TX_FLAGS_TSO) + gso_segs = skb_shinfo(skb)->gso_segs; +#ifdef IXGBE_FCOE + /* adjust for FCoE Sequence Offload */ + else if (tx_flags & IXGBE_TX_FLAGS_FSO) + gso_segs = DIV_ROUND_UP(skb->len - hdr_len, + skb_shinfo(skb)->gso_size); +#endif /* IXGBE_FCOE */ + bytecount += (gso_segs - 1) * hdr_len; + + /* multiply data chunks by size of headers */ + tx_ring->tx_buffer_info[i].bytecount = bytecount; + tx_ring->tx_buffer_info[i].gso_segs = gso_segs; + tx_ring->tx_buffer_info[i].skb = skb; + tx_ring->tx_buffer_info[first].next_to_watch = i; + + return count; + +dma_error: + e_dev_err("TX DMA map failed\n"); + + /* clear timestamp and dma mappings for failed tx_buffer_info map */ + tx_buffer_info->dma = 0; + tx_buffer_info->time_stamp = 0; + tx_buffer_info->next_to_watch = 0; + if (count) + count--; + + /* clear timestamp and dma mappings for remaining portion of packet */ + while (count--) { + if (i == 0) + i += tx_ring->count; + i--; + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); + } + + return 0; +} + +static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring, + int tx_flags, int count, u32 paylen, u8 hdr_len) +{ + union ixgbe_adv_tx_desc *tx_desc = NULL; + struct ixgbe_tx_buffer *tx_buffer_info; + u32 olinfo_status = 0, cmd_type_len = 0; + unsigned int i; + u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS; + + cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA; + + cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; + + if (tx_flags & IXGBE_TX_FLAGS_VLAN) + cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; + + if (tx_flags & IXGBE_TX_FLAGS_TSO) { + cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; + + olinfo_status |= IXGBE_TXD_POPTS_TXSM << + IXGBE_ADVTXD_POPTS_SHIFT; + + /* use index 1 context for tso */ + olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); + if (tx_flags & IXGBE_TX_FLAGS_IPV4) + olinfo_status |= IXGBE_TXD_POPTS_IXSM << + IXGBE_ADVTXD_POPTS_SHIFT; + + } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) + olinfo_status |= IXGBE_TXD_POPTS_TXSM << + IXGBE_ADVTXD_POPTS_SHIFT; + + if (tx_flags & IXGBE_TX_FLAGS_FCOE) { + olinfo_status |= IXGBE_ADVTXD_CC; + olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); + if (tx_flags & IXGBE_TX_FLAGS_FSO) + cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; + } + + olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); + + i = tx_ring->next_to_use; + while (count--) { + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); + tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type_len | tx_buffer_info->length); + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); + i++; + if (i == tx_ring->count) + i = 0; + } + + tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); + + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + + tx_ring->next_to_use = i; + writel(i, tx_ring->tail); +} + +static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb, + u32 tx_flags, __be16 protocol) +{ + struct ixgbe_q_vector *q_vector = ring->q_vector; + union ixgbe_atr_hash_dword input = { .dword = 0 }; + union ixgbe_atr_hash_dword common = { .dword = 0 }; + union { + unsigned char *network; + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + } hdr; + struct tcphdr *th; + __be16 vlan_id; + + /* if ring doesn't have a interrupt vector, cannot perform ATR */ + if (!q_vector) + return; + + /* do nothing if sampling is disabled */ + if (!ring->atr_sample_rate) + return; + + ring->atr_count++; + + /* snag network header to get L4 type and address */ + hdr.network = skb_network_header(skb); + + /* Currently only IPv4/IPv6 with TCP is supported */ + if ((protocol != __constant_htons(ETH_P_IPV6) || + hdr.ipv6->nexthdr != IPPROTO_TCP) && + (protocol != __constant_htons(ETH_P_IP) || + hdr.ipv4->protocol != IPPROTO_TCP)) + return; + + th = tcp_hdr(skb); + + /* skip this packet since the socket is closing */ + if (th->fin) + return; + + /* sample on all syn packets or once every atr sample count */ + if (!th->syn && (ring->atr_count < ring->atr_sample_rate)) + return; + + /* reset sample count */ + ring->atr_count = 0; + + vlan_id = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT); + + /* + * src and dst are inverted, think how the receiver sees them + * + * The input is broken into two sections, a non-compressed section + * containing vm_pool, vlan_id, and flow_type. The rest of the data + * is XORed together and stored in the compressed dword. + */ + input.formatted.vlan_id = vlan_id; + + /* + * since src port and flex bytes occupy the same word XOR them together + * and write the value to source port portion of compressed dword + */ + if (vlan_id) + common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q); + else + common.port.src ^= th->dest ^ protocol; + common.port.dst ^= th->source; + + if (protocol == __constant_htons(ETH_P_IP)) { + input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; + common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; + } else { + input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6; + common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ + hdr.ipv6->saddr.s6_addr32[1] ^ + hdr.ipv6->saddr.s6_addr32[2] ^ + hdr.ipv6->saddr.s6_addr32[3] ^ + hdr.ipv6->daddr.s6_addr32[0] ^ + hdr.ipv6->daddr.s6_addr32[1] ^ + hdr.ipv6->daddr.s6_addr32[2] ^ + hdr.ipv6->daddr.s6_addr32[3]; + } + + /* This assumes the Rx queue and Tx queue are bound to the same CPU */ + ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, + input, common, ring->queue_index); +} + +static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. */ + if (likely(ixgbe_desc_unused(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + +static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) +{ + if (likely(ixgbe_desc_unused(tx_ring) >= size)) + return 0; + return __ixgbe_maybe_stop_tx(tx_ring, size); +} + +static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : + smp_processor_id(); +#ifdef IXGBE_FCOE + __be16 protocol = vlan_get_protocol(skb); + + if (((protocol == htons(ETH_P_FCOE)) || + (protocol == htons(ETH_P_FIP))) && + (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { + txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); + txq += adapter->ring_feature[RING_F_FCOE].mask; + return txq; + } +#endif + + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { + while (unlikely(txq >= dev->real_num_tx_queues)) + txq -= dev->real_num_tx_queues; + return txq; + } + + return skb_tx_hash(dev, skb); +} + +netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, + struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring) +{ + int tso; + u32 tx_flags = 0; +#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD + unsigned short f; +#endif + u16 first; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol; + u8 hdr_len = 0; + + /* + * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, + * + 1 desc for skb_head_len/IXGBE_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ +#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); +#else + count += skb_shinfo(skb)->nr_frags; +#endif + if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) { + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + protocol = vlan_get_protocol(skb); + + if (vlan_tx_tag_present(skb)) { + tx_flags |= vlan_tx_tag_get(skb); + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { + tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; + tx_flags |= tx_ring->dcb_tc << 13; + } + tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; + tx_flags |= IXGBE_TX_FLAGS_VLAN; + } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED && + skb->priority != TC_PRIO_CONTROL) { + tx_flags |= tx_ring->dcb_tc << 13; + tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; + tx_flags |= IXGBE_TX_FLAGS_VLAN; + } + +#ifdef IXGBE_FCOE + /* for FCoE with DCB, we force the priority to what + * was specified by the switch */ + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && + (protocol == htons(ETH_P_FCOE))) + tx_flags |= IXGBE_TX_FLAGS_FCOE; + +#endif + /* record the location of the first descriptor for this packet */ + first = tx_ring->next_to_use; + + if (tx_flags & IXGBE_TX_FLAGS_FCOE) { +#ifdef IXGBE_FCOE + /* setup tx offload for FCoE */ + tso = ixgbe_fso(tx_ring, skb, tx_flags, &hdr_len); + if (tso < 0) + goto out_drop; + else if (tso) + tx_flags |= IXGBE_TX_FLAGS_FSO; +#endif /* IXGBE_FCOE */ + } else { + if (protocol == htons(ETH_P_IP)) + tx_flags |= IXGBE_TX_FLAGS_IPV4; + tso = ixgbe_tso(tx_ring, skb, tx_flags, protocol, &hdr_len); + if (tso < 0) + goto out_drop; + else if (tso) + tx_flags |= IXGBE_TX_FLAGS_TSO; + else if (ixgbe_tx_csum(tx_ring, skb, tx_flags, protocol)) + tx_flags |= IXGBE_TX_FLAGS_CSUM; + } + + count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len); + if (count) { + /* add the ATR filter if ATR is on */ + if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) + ixgbe_atr(tx_ring, skb, tx_flags, protocol); + ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len); + ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); + + } else { + tx_ring->tx_buffer_info[first].time_stamp = 0; + tx_ring->next_to_use = first; + goto out_drop; + } + + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_ring *tx_ring; + + tx_ring = adapter->tx_ring[skb->queue_mapping]; + return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); +} + +/** + * ixgbe_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int ixgbe_set_mac(struct net_device *netdev, void *p) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs, + IXGBE_RAH_AV); + + return 0; +} + +static int +ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u16 value; + int rc; + + if (prtad != hw->phy.mdio.prtad) + return -EINVAL; + rc = hw->phy.ops.read_reg(hw, addr, devad, &value); + if (!rc) + rc = value; + return rc; +} + +static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad, + u16 addr, u16 value) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + if (prtad != hw->phy.mdio.prtad) + return -EINVAL; + return hw->phy.ops.write_reg(hw, addr, devad, value); +} + +static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); +} + +/** + * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding + * netdev->dev_addrs + * @netdev: network interface device structure + * + * Returns non-zero on failure + **/ +static int ixgbe_add_sanmac_netdev(struct net_device *dev) +{ + int err = 0; + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_mac_info *mac = &adapter->hw.mac; + + if (is_valid_ether_addr(mac->san_addr)) { + rtnl_lock(); + err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); + rtnl_unlock(); + } + return err; +} + +/** + * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding + * netdev->dev_addrs + * @netdev: network interface device structure + * + * Returns non-zero on failure + **/ +static int ixgbe_del_sanmac_netdev(struct net_device *dev) +{ + int err = 0; + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_mac_info *mac = &adapter->hw.mac; + + if (is_valid_ether_addr(mac->san_addr)) { + rtnl_lock(); + err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); + rtnl_unlock(); + } + return err; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void ixgbe_netpoll(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int i; + + /* if interface is down do nothing */ + if (test_bit(__IXGBE_DOWN, &adapter->state)) + return; + + adapter->flags |= IXGBE_FLAG_IN_NETPOLL; + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + for (i = 0; i < num_q_vectors; i++) { + struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; + ixgbe_msix_clean_many(0, q_vector); + } + } else { + ixgbe_intr(adapter->pdev->irq, netdev); + } + adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; +} +#endif + +static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int i; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin_bh(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin_bh(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } + } + rcu_read_unlock(); + /* following stats updated by ixgbe_watchdog_task() */ + stats->multicast = netdev->stats.multicast; + stats->rx_errors = netdev->stats.rx_errors; + stats->rx_length_errors = netdev->stats.rx_length_errors; + stats->rx_crc_errors = netdev->stats.rx_crc_errors; + stats->rx_missed_errors = netdev->stats.rx_missed_errors; + return stats; +} + +/* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. + * #adapter: pointer to ixgbe_adapter + * @tc: number of traffic classes currently enabled + * + * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm + * 802.1Q priority maps to a packet buffer that exists. + */ +static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 reg, rsave; + int i; + + /* 82598 have a static priority to TC mapping that can not + * be changed so no validation is needed. + */ + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); + rsave = reg; + + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT); + + /* If up2tc is out of bounds default to zero */ + if (up2tc > tc) + reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT); + } + + if (reg != rsave) + IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); + + return; +} + + +/* ixgbe_setup_tc - routine to configure net_device for multiple traffic + * classes. + * + * @netdev: net device to configure + * @tc: number of traffic classes to enable + */ +int ixgbe_setup_tc(struct net_device *dev, u8 tc) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; + + /* If DCB is anabled do not remove traffic classes, multiple + * traffic classes are required to implement DCB + */ + if (!tc && (adapter->flags & IXGBE_FLAG_DCB_ENABLED)) + return 0; + + /* Hardware supports up to 8 traffic classes */ + if (tc > MAX_TRAFFIC_CLASS || + (hw->mac.type == ixgbe_mac_82598EB && tc < MAX_TRAFFIC_CLASS)) + return -EINVAL; + + /* Hardware has to reinitialize queues and interrupts to + * match packet buffer alignment. Unfortunantly, the + * hardware is not flexible enough to do this dynamically. + */ + if (netif_running(dev)) + ixgbe_close(dev); + ixgbe_clear_interrupt_scheme(adapter); + + if (tc) + netdev_set_num_tc(dev, tc); + else + netdev_reset_tc(dev); + + ixgbe_init_interrupt_scheme(adapter); + ixgbe_validate_rtr(adapter, tc); + if (netif_running(dev)) + ixgbe_open(dev); + + return 0; +} + +void ixgbe_do_reset(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + ixgbe_reinit_locked(adapter); + else + ixgbe_reset(adapter); +} + +static u32 ixgbe_fix_features(struct net_device *netdev, u32 data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + +#ifdef CONFIG_DCB + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) + data &= ~NETIF_F_HW_VLAN_RX; +#endif + + /* return error if RXHASH is being enabled when RSS is not supported */ + if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) + data &= ~NETIF_F_RXHASH; + + /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ + if (!(data & NETIF_F_RXCSUM)) + data &= ~NETIF_F_LRO; + + /* Turn off LRO if not RSC capable or invalid ITR settings */ + if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) { + data &= ~NETIF_F_LRO; + } else if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && + (adapter->rx_itr_setting != 1 && + adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE)) { + data &= ~NETIF_F_LRO; + e_info(probe, "rx-usecs set too low, not enabling RSC\n"); + } + + return data; +} + +static int ixgbe_set_features(struct net_device *netdev, u32 data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + bool need_reset = false; + + /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ + if (!(data & NETIF_F_RXCSUM)) + adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED; + else + adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; + + /* Make sure RSC matches LRO, reset if change */ + if (!!(data & NETIF_F_LRO) != + !!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { + adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; + switch (adapter->hw.mac.type) { + case ixgbe_mac_X540: + case ixgbe_mac_82599EB: + need_reset = true; + break; + default: + break; + } + } + + /* + * Check if Flow Director n-tuple support was enabled or disabled. If + * the state changed, we need to reset. + */ + if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) { + /* turn off ATR, enable perfect filters and reset */ + if (data & NETIF_F_NTUPLE) { + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; + need_reset = true; + } + } else if (!(data & NETIF_F_NTUPLE)) { + /* turn off Flow Director, set ATR and reset */ + adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; + if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && + !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) + adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; + need_reset = true; + } + + if (need_reset) + ixgbe_do_reset(netdev); + + return 0; + +} + +static const struct net_device_ops ixgbe_netdev_ops = { + .ndo_open = ixgbe_open, + .ndo_stop = ixgbe_close, + .ndo_start_xmit = ixgbe_xmit_frame, + .ndo_select_queue = ixgbe_select_queue, + .ndo_set_rx_mode = ixgbe_set_rx_mode, + .ndo_set_multicast_list = ixgbe_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = ixgbe_set_mac, + .ndo_change_mtu = ixgbe_change_mtu, + .ndo_tx_timeout = ixgbe_tx_timeout, + .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, + .ndo_do_ioctl = ixgbe_ioctl, + .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac, + .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, + .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, + .ndo_get_vf_config = ixgbe_ndo_get_vf_config, + .ndo_get_stats64 = ixgbe_get_stats64, + .ndo_setup_tc = ixgbe_setup_tc, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ixgbe_netpoll, +#endif +#ifdef IXGBE_FCOE + .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, + .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, + .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, + .ndo_fcoe_enable = ixgbe_fcoe_enable, + .ndo_fcoe_disable = ixgbe_fcoe_disable, + .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn, +#endif /* IXGBE_FCOE */ + .ndo_set_features = ixgbe_set_features, + .ndo_fix_features = ixgbe_fix_features, +}; + +static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter, + const struct ixgbe_info *ii) +{ +#ifdef CONFIG_PCI_IOV + struct ixgbe_hw *hw = &adapter->hw; + int err; + int num_vf_macvlans, i; + struct vf_macvlans *mv_list; + + if (hw->mac.type == ixgbe_mac_82598EB || !max_vfs) + return; + + /* The 82599 supports up to 64 VFs per physical function + * but this implementation limits allocation to 63 so that + * basic networking resources are still available to the + * physical function + */ + adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs; + adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; + err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); + if (err) { + e_err(probe, "Failed to enable PCI sriov: %d\n", err); + goto err_novfs; + } + + num_vf_macvlans = hw->mac.num_rar_entries - + (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs); + + adapter->mv_list = mv_list = kcalloc(num_vf_macvlans, + sizeof(struct vf_macvlans), + GFP_KERNEL); + if (mv_list) { + /* Initialize list of VF macvlans */ + INIT_LIST_HEAD(&adapter->vf_mvs.l); + for (i = 0; i < num_vf_macvlans; i++) { + mv_list->vf = -1; + mv_list->free = true; + mv_list->rar_entry = hw->mac.num_rar_entries - + (i + adapter->num_vfs + 1); + list_add(&mv_list->l, &adapter->vf_mvs.l); + mv_list++; + } + } + + /* If call to enable VFs succeeded then allocate memory + * for per VF control structures. + */ + adapter->vfinfo = + kcalloc(adapter->num_vfs, + sizeof(struct vf_data_storage), GFP_KERNEL); + if (adapter->vfinfo) { + /* Now that we're sure SR-IOV is enabled + * and memory allocated set up the mailbox parameters + */ + ixgbe_init_mbx_params_pf(hw); + memcpy(&hw->mbx.ops, ii->mbx_ops, + sizeof(hw->mbx.ops)); + + /* Disable RSC when in SR-IOV mode */ + adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | + IXGBE_FLAG2_RSC_ENABLED); + return; + } + + /* Oh oh */ + e_err(probe, "Unable to allocate memory for VF Data Storage - " + "SRIOV disabled\n"); + pci_disable_sriov(adapter->pdev); + +err_novfs: + adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; + adapter->num_vfs = 0; +#endif /* CONFIG_PCI_IOV */ +} + +/** + * ixgbe_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in ixgbe_pci_tbl + * + * Returns 0 on success, negative on failure + * + * ixgbe_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int __devinit ixgbe_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct ixgbe_adapter *adapter = NULL; + struct ixgbe_hw *hw; + const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; + static int cards_found; + int i, err, pci_using_dac; + u8 part_str[IXGBE_PBANUM_LENGTH]; + unsigned int indices = num_possible_cpus(); +#ifdef IXGBE_FCOE + u16 device_caps; +#endif + u32 eec; + + /* Catch broken hardware that put the wrong VF device ID in + * the PCIe SR-IOV capability. + */ + if (pdev->is_virtfn) { + WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", + pci_name(pdev), pdev->vendor, pdev->device); + return -EINVAL; + } + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && + !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { + pci_using_dac = 1; + } else { + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + err = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; + } + } + pci_using_dac = 0; + } + + err = pci_request_selected_regions(pdev, pci_select_bars(pdev, + IORESOURCE_MEM), ixgbe_driver_name); + if (err) { + dev_err(&pdev->dev, + "pci_request_selected_regions failed 0x%x\n", err); + goto err_pci_reg; + } + + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + pci_save_state(pdev); + +#ifdef CONFIG_IXGBE_DCB + indices *= MAX_TRAFFIC_CLASS; +#endif + + if (ii->mac == ixgbe_mac_82598EB) + indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES); + else + indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES); + +#ifdef IXGBE_FCOE + indices += min_t(unsigned int, num_possible_cpus(), + IXGBE_MAX_FCOE_INDICES); +#endif + netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + adapter = netdev_priv(netdev); + pci_set_drvdata(pdev, adapter); + + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; + adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; + + hw->hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!hw->hw_addr) { + err = -EIO; + goto err_ioremap; + } + + for (i = 1; i <= 5; i++) { + if (pci_resource_len(pdev, i) == 0) + continue; + } + + netdev->netdev_ops = &ixgbe_netdev_ops; + ixgbe_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + adapter->bd_number = cards_found; + + /* Setup hw api */ + memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); + hw->mac.type = ii->mac; + + /* EEPROM */ + memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops)); + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */ + if (!(eec & (1 << 8))) + hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; + + /* PHY */ + memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops)); + hw->phy.sfp_type = ixgbe_sfp_type_unknown; + /* ixgbe_identify_phy_generic will set prtad and mmds properly */ + hw->phy.mdio.prtad = MDIO_PRTAD_NONE; + hw->phy.mdio.mmds = 0; + hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; + hw->phy.mdio.dev = netdev; + hw->phy.mdio.mdio_read = ixgbe_mdio_read; + hw->phy.mdio.mdio_write = ixgbe_mdio_write; + + ii->get_invariants(hw); + + /* setup the private structure */ + err = ixgbe_sw_init(adapter); + if (err) + goto err_sw_init; + + /* Make it possible the adapter to be woken up via WOL */ + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); + break; + default: + break; + } + + /* + * If there is a fan on this device and it has failed log the + * failure. + */ + if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { + u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (esdp & IXGBE_ESDP_SDP1) + e_crit(probe, "Fan has stopped, replace the adapter\n"); + } + + /* reset_hw fills in the perm_addr as well */ + hw->phy.reset_if_overtemp = true; + err = hw->mac.ops.reset_hw(hw); + hw->phy.reset_if_overtemp = false; + if (err == IXGBE_ERR_SFP_NOT_PRESENT && + hw->mac.type == ixgbe_mac_82598EB) { + err = 0; + } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { + e_dev_err("failed to load because an unsupported SFP+ " + "module type was detected.\n"); + e_dev_err("Reload the driver after installing a supported " + "module.\n"); + goto err_sw_init; + } else if (err) { + e_dev_err("HW Init failed: %d\n", err); + goto err_sw_init; + } + + ixgbe_probe_vf(adapter, ii); + + netdev->features = NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX | + NETIF_F_HW_VLAN_FILTER | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_GRO | + NETIF_F_RXHASH | + NETIF_F_RXCSUM; + + netdev->hw_features = netdev->features; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + netdev->features |= NETIF_F_SCTP_CSUM; + netdev->hw_features |= NETIF_F_SCTP_CSUM | + NETIF_F_NTUPLE; + break; + default: + break; + } + + netdev->vlan_features |= NETIF_F_TSO; + netdev->vlan_features |= NETIF_F_TSO6; + netdev->vlan_features |= NETIF_F_IP_CSUM; + netdev->vlan_features |= NETIF_F_IPV6_CSUM; + netdev->vlan_features |= NETIF_F_SG; + + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED | + IXGBE_FLAG_DCB_ENABLED); + +#ifdef CONFIG_IXGBE_DCB + netdev->dcbnl_ops = &dcbnl_ops; +#endif + +#ifdef IXGBE_FCOE + if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { + if (hw->mac.ops.get_device_caps) { + hw->mac.ops.get_device_caps(hw, &device_caps); + if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) + adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; + } + } + if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { + netdev->vlan_features |= NETIF_F_FCOE_CRC; + netdev->vlan_features |= NETIF_F_FSO; + netdev->vlan_features |= NETIF_F_FCOE_MTU; + } +#endif /* IXGBE_FCOE */ + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) + netdev->hw_features |= NETIF_F_LRO; + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) + netdev->features |= NETIF_F_LRO; + + /* make sure the EEPROM is good */ + if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { + e_dev_err("The EEPROM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + + memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); + memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); + + if (ixgbe_validate_mac_addr(netdev->perm_addr)) { + e_dev_err("invalid MAC address\n"); + err = -EIO; + goto err_eeprom; + } + + /* power down the optics for multispeed fiber and 82599 SFP+ fiber */ + if (hw->mac.ops.disable_tx_laser && + ((hw->phy.multispeed_fiber) || + ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) && + (hw->mac.type == ixgbe_mac_82599EB)))) + hw->mac.ops.disable_tx_laser(hw); + + setup_timer(&adapter->service_timer, &ixgbe_service_timer, + (unsigned long) adapter); + + INIT_WORK(&adapter->service_task, ixgbe_service_task); + clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); + + err = ixgbe_init_interrupt_scheme(adapter); + if (err) + goto err_sw_init; + + if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) { + netdev->hw_features &= ~NETIF_F_RXHASH; + netdev->features &= ~NETIF_F_RXHASH; + } + + switch (pdev->device) { + case IXGBE_DEV_ID_82599_SFP: + /* Only this subdevice supports WOL */ + if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP) + adapter->wol = IXGBE_WUFC_MAG; + break; + case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: + /* All except this subdevice support WOL */ + if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) + adapter->wol = IXGBE_WUFC_MAG; + break; + case IXGBE_DEV_ID_82599_KX4: + adapter->wol = IXGBE_WUFC_MAG; + break; + default: + adapter->wol = 0; + break; + } + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + /* pick up the PCI bus settings for reporting later */ + hw->mac.ops.get_bus_info(hw); + + /* print bus type/speed/width info */ + e_dev_info("(PCI Express:%s:%s) %pM\n", + (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" : + hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" : + "Unknown"), + (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" : + hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" : + hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" : + "Unknown"), + netdev->dev_addr); + + err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH); + if (err) + strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH); + if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) + e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", + hw->mac.type, hw->phy.type, hw->phy.sfp_type, + part_str); + else + e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n", + hw->mac.type, hw->phy.type, part_str); + + if (hw->bus.width <= ixgbe_bus_width_pcie_x4) { + e_dev_warn("PCI-Express bandwidth available for this card is " + "not sufficient for optimal performance.\n"); + e_dev_warn("For optimal performance a x8 PCI-Express slot " + "is required.\n"); + } + + /* save off EEPROM version number */ + hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version); + + /* reset the hardware with the new settings */ + err = hw->mac.ops.start_hw(hw); + + if (err == IXGBE_ERR_EEPROM_VERSION) { + /* We are running on a pre-production device, log a warning */ + e_dev_warn("This device is a pre-production adapter/LOM. " + "Please be aware there may be issues associated " + "with your hardware. If you are experiencing " + "problems please contact your Intel or hardware " + "representative who provided you with this " + "hardware.\n"); + } + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + +#ifdef CONFIG_IXGBE_DCA + if (dca_add_requester(&pdev->dev) == 0) { + adapter->flags |= IXGBE_FLAG_DCA_ENABLED; + ixgbe_setup_dca(adapter); + } +#endif + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs); + for (i = 0; i < adapter->num_vfs; i++) + ixgbe_vf_configuration(pdev, (i | 0x10000000)); + } + + /* Inform firmware of driver version */ + if (hw->mac.ops.set_fw_drv_ver) + hw->mac.ops.set_fw_drv_ver(hw, MAJ, MIN, BUILD, + FW_CEM_UNUSED_VER); + + /* add san mac addr to netdev */ + ixgbe_add_sanmac_netdev(netdev); + + e_dev_info("Intel(R) 10 Gigabit Network Connection\n"); + cards_found++; + return 0; + +err_register: + ixgbe_release_hw_control(adapter); + ixgbe_clear_interrupt_scheme(adapter); +err_sw_init: +err_eeprom: + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + ixgbe_disable_sriov(adapter); + adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; + iounmap(hw->hw_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * ixgbe_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * ixgbe_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void __devexit ixgbe_remove(struct pci_dev *pdev) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + + set_bit(__IXGBE_DOWN, &adapter->state); + cancel_work_sync(&adapter->service_task); + +#ifdef CONFIG_IXGBE_DCA + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { + adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; + dca_remove_requester(&pdev->dev); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); + } + +#endif +#ifdef IXGBE_FCOE + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) + ixgbe_cleanup_fcoe(adapter); + +#endif /* IXGBE_FCOE */ + + /* remove the added san mac */ + ixgbe_del_sanmac_netdev(netdev); + + if (netdev->reg_state == NETREG_REGISTERED) + unregister_netdev(netdev); + + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + ixgbe_disable_sriov(adapter); + + ixgbe_clear_interrupt_scheme(adapter); + + ixgbe_release_hw_control(adapter); + + iounmap(adapter->hw.hw_addr); + pci_release_selected_regions(pdev, pci_select_bars(pdev, + IORESOURCE_MEM)); + + e_dev_info("complete\n"); + + free_netdev(netdev); + + pci_disable_pcie_error_reporting(pdev); + + pci_disable_device(pdev); +} + +/** + * ixgbe_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + ixgbe_down(adapter); + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * ixgbe_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + */ +static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + pci_ers_result_t result; + int err; + + if (pci_enable_device_mem(pdev)) { + e_err(probe, "Cannot re-enable PCI device after reset.\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + pci_wake_from_d3(pdev, false); + + ixgbe_reset(adapter); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); + result = PCI_ERS_RESULT_RECOVERED; + } + + err = pci_cleanup_aer_uncorrect_error_status(pdev); + if (err) { + e_dev_err("pci_cleanup_aer_uncorrect_error_status " + "failed 0x%0x\n", err); + /* non-fatal, continue */ + } + + return result; +} + +/** + * ixgbe_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. + */ +static void ixgbe_io_resume(struct pci_dev *pdev) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + + if (netif_running(netdev)) { + if (ixgbe_up(adapter)) { + e_info(probe, "ixgbe_up failed after reset\n"); + return; + } + } + + netif_device_attach(netdev); +} + +static struct pci_error_handlers ixgbe_err_handler = { + .error_detected = ixgbe_io_error_detected, + .slot_reset = ixgbe_io_slot_reset, + .resume = ixgbe_io_resume, +}; + +static struct pci_driver ixgbe_driver = { + .name = ixgbe_driver_name, + .id_table = ixgbe_pci_tbl, + .probe = ixgbe_probe, + .remove = __devexit_p(ixgbe_remove), +#ifdef CONFIG_PM + .suspend = ixgbe_suspend, + .resume = ixgbe_resume, +#endif + .shutdown = ixgbe_shutdown, + .err_handler = &ixgbe_err_handler +}; + +/** + * ixgbe_init_module - Driver Registration Routine + * + * ixgbe_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init ixgbe_init_module(void) +{ + int ret; + pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version); + pr_info("%s\n", ixgbe_copyright); + +#ifdef CONFIG_IXGBE_DCA + dca_register_notify(&dca_notifier); +#endif + + ret = pci_register_driver(&ixgbe_driver); + return ret; +} + +module_init(ixgbe_init_module); + +/** + * ixgbe_exit_module - Driver Exit Cleanup Routine + * + * ixgbe_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit ixgbe_exit_module(void) +{ +#ifdef CONFIG_IXGBE_DCA + dca_unregister_notify(&dca_notifier); +#endif + pci_unregister_driver(&ixgbe_driver); + rcu_barrier(); /* Wait for completion of call_rcu()'s */ +} + +#ifdef CONFIG_IXGBE_DCA +static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, + void *p) +{ + int ret_val; + + ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, + __ixgbe_notify_dca); + + return ret_val ? NOTIFY_BAD : NOTIFY_DONE; +} + +#endif /* CONFIG_IXGBE_DCA */ + +module_exit(ixgbe_exit_module); + +/* ixgbe_main.c */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c new file mode 100644 index 0000000..1ff0eef --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c @@ -0,0 +1,471 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include "ixgbe_type.h" +#include "ixgbe_common.h" +#include "ixgbe_mbx.h" + +/** + * ixgbe_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfuly read message from buffer + **/ +s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; + + if (mbx->ops.read) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * ixgbe_write_mbx - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = 0; + + if (size > mbx->size) + ret_val = IXGBE_ERR_MBX; + + else if (mbx->ops.write) + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * ixgbe_check_for_msg - checks to see if someone sent us mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + + if (mbx->ops.check_for_msg) + ret_val = mbx->ops.check_for_msg(hw, mbx_id); + + return ret_val; +} + +/** + * ixgbe_check_for_ack - checks to see if someone sent us ACK + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + + if (mbx->ops.check_for_ack) + ret_val = mbx->ops.check_for_ack(hw, mbx_id); + + return ret_val; +} + +/** + * ixgbe_check_for_rst - checks to see if other side has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + + if (mbx->ops.check_for_rst) + ret_val = mbx->ops.check_for_rst(hw, mbx_id); + + return ret_val; +} + +/** + * ixgbe_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_msg) + goto out; + + while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->usec_delay); + } + +out: + return countdown ? 0 : IXGBE_ERR_MBX; +} + +/** + * ixgbe_poll_for_ack - Wait for message acknowledgement + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledgement + **/ +static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_ack) + goto out; + + while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->usec_delay); + } + +out: + return countdown ? 0 : IXGBE_ERR_MBX; +} + +/** + * ixgbe_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + + if (!mbx->ops.read) + goto out; + + ret_val = ixgbe_poll_for_msg(hw, mbx_id); + + /* if ack received read message, otherwise we timed out */ + if (!ret_val) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); +out: + return ret_val; +} + +/** + * ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) + goto out; + + /* send msg */ + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + /* if msg sent wait until we receive an ack */ + if (!ret_val) + ret_val = ixgbe_poll_for_ack(hw, mbx_id); +out: + return ret_val; +} + +static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) +{ + u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index)); + s32 ret_val = IXGBE_ERR_MBX; + + if (mbvficr & mask) { + ret_val = 0; + IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask); + } + + return ret_val; +} + +/** + * ixgbe_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number) +{ + s32 ret_val = IXGBE_ERR_MBX; + s32 index = IXGBE_MBVFICR_INDEX(vf_number); + u32 vf_bit = vf_number % 16; + + if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit, + index)) { + ret_val = 0; + hw->mbx.stats.reqs++; + } + + return ret_val; +} + +/** + * ixgbe_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number) +{ + s32 ret_val = IXGBE_ERR_MBX; + s32 index = IXGBE_MBVFICR_INDEX(vf_number); + u32 vf_bit = vf_number % 16; + + if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit, + index)) { + ret_val = 0; + hw->mbx.stats.acks++; + } + + return ret_val; +} + +/** + * ixgbe_check_for_rst_pf - checks to see if the VF has reset + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) +{ + u32 reg_offset = (vf_number < 32) ? 0 : 1; + u32 vf_shift = vf_number % 32; + u32 vflre = 0; + s32 ret_val = IXGBE_ERR_MBX; + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset)); + break; + case ixgbe_mac_X540: + vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset)); + break; + default: + break; + } + + if (vflre & (1 << vf_shift)) { + ret_val = 0; + IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift)); + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * ixgbe_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * return SUCCESS if we obtained the mailbox lock + **/ +static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number) +{ + s32 ret_val = IXGBE_ERR_MBX; + u32 p2v_mailbox; + + /* Take ownership of the buffer */ + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU); + + /* reserve mailbox for vf use */ + p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number)); + if (p2v_mailbox & IXGBE_PFMAILBOX_PFU) + ret_val = 0; + + return ret_val; +} + +/** + * ixgbe_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + ixgbe_check_for_msg_pf(hw, vf_number); + ixgbe_check_for_ack_pf(hw, vf_number); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent and release buffer*/ + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + +out_no_write: + return ret_val; + +} + +/** + * ixgbe_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_read; + + /* copy the message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i); + + /* Acknowledge the message and release buffer */ + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return ret_val; +} + +#ifdef CONFIG_PCI_IOV +/** + * ixgbe_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + if (hw->mac.type != ixgbe_mac_82599EB && + hw->mac.type != ixgbe_mac_X540) + return; + + mbx->timeout = 0; + mbx->usec_delay = 0; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + + mbx->size = IXGBE_VFMAILBOX_SIZE; +} +#endif /* CONFIG_PCI_IOV */ + +struct ixgbe_mbx_operations mbx_ops_generic = { + .read = ixgbe_read_mbx_pf, + .write = ixgbe_write_mbx_pf, + .read_posted = ixgbe_read_posted_mbx, + .write_posted = ixgbe_write_posted_mbx, + .check_for_msg = ixgbe_check_for_msg_pf, + .check_for_ack = ixgbe_check_for_ack_pf, + .check_for_rst = ixgbe_check_for_rst_pf, +}; + diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h new file mode 100644 index 0000000..b239bda --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -0,0 +1,93 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_MBX_H_ +#define _IXGBE_MBX_H_ + +#include "ixgbe_type.h" + +#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ +#define IXGBE_ERR_MBX -100 + +#define IXGBE_VFMAILBOX 0x002FC +#define IXGBE_VFMBMEM 0x00200 + +#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ + +#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */ +#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */ +#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ + + +/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is IXGBE_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with + * this are the ACK */ +#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with + * this are the NACK */ +#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still + clear to send requests */ +#define IXGBE_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for exra info for certain messages */ +#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) + +#define IXGBE_VF_RESET 0x01 /* VF requests reset */ +#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ +#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ + +/* length of permanent address message returned from PF */ +#define IXGBE_VF_PERMADDR_MSG_LEN 4 +/* word in permanent address message with the current multicast type */ +#define IXGBE_VF_MC_TYPE_WORD 3 + +#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ + +#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ + +s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16); +s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16); +s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16); +s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16); +s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); +#ifdef CONFIG_PCI_IOV +void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); +#endif /* CONFIG_PCI_IOV */ + +extern struct ixgbe_mbx_operations mbx_ops_generic; + +#endif /* _IXGBE_MBX_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c new file mode 100644 index 0000000..f7ca351 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -0,0 +1,1725 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include + +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +static void ixgbe_i2c_start(struct ixgbe_hw *hw); +static void ixgbe_i2c_stop(struct ixgbe_hw *hw); +static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data); +static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data); +static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw); +static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data); +static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data); +static s32 ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); +static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); +static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data); +static bool ixgbe_get_i2c_data(u32 *i2cctl); +static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); +static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); +static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); + +/** + * ixgbe_identify_phy_generic - Get physical layer module + * @hw: pointer to hardware structure + * + * Determines the physical layer module found on the current adapter. + **/ +s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_PHY_ADDR_INVALID; + u32 phy_addr; + u16 ext_ability = 0; + + if (hw->phy.type == ixgbe_phy_unknown) { + for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { + hw->phy.mdio.prtad = phy_addr; + if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) { + ixgbe_get_phy_id(hw); + hw->phy.type = + ixgbe_get_phy_type_from_id(hw->phy.id); + + if (hw->phy.type == ixgbe_phy_unknown) { + hw->phy.ops.read_reg(hw, + MDIO_PMA_EXTABLE, + MDIO_MMD_PMAPMD, + &ext_ability); + if (ext_ability & + (MDIO_PMA_EXTABLE_10GBT | + MDIO_PMA_EXTABLE_1000BT)) + hw->phy.type = + ixgbe_phy_cu_unknown; + else + hw->phy.type = + ixgbe_phy_generic; + } + + status = 0; + break; + } + } + /* clear value if nothing found */ + if (status != 0) + hw->phy.mdio.prtad = 0; + } else { + status = 0; + } + + return status; +} + +/** + * ixgbe_get_phy_id - Get the phy type + * @hw: pointer to hardware structure + * + **/ +static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw) +{ + u32 status; + u16 phy_id_high = 0; + u16 phy_id_low = 0; + + status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD, + &phy_id_high); + + if (status == 0) { + hw->phy.id = (u32)(phy_id_high << 16); + status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD, + &phy_id_low); + hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); + hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); + } + return status; +} + +/** + * ixgbe_get_phy_type_from_id - Get the phy type + * @hw: pointer to hardware structure + * + **/ +static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) +{ + enum ixgbe_phy_type phy_type; + + switch (phy_id) { + case TN1010_PHY_ID: + phy_type = ixgbe_phy_tn; + break; + case X540_PHY_ID: + phy_type = ixgbe_phy_aq; + break; + case QT2022_PHY_ID: + phy_type = ixgbe_phy_qt; + break; + case ATH_PHY_ID: + phy_type = ixgbe_phy_nl; + break; + default: + phy_type = ixgbe_phy_unknown; + break; + } + + return phy_type; +} + +/** + * ixgbe_reset_phy_generic - Performs a PHY reset + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) +{ + u32 i; + u16 ctrl = 0; + s32 status = 0; + + if (hw->phy.type == ixgbe_phy_unknown) + status = ixgbe_identify_phy_generic(hw); + + if (status != 0 || hw->phy.type == ixgbe_phy_none) + goto out; + + /* Don't reset PHY if it's shut down due to overtemp. */ + if (!hw->phy.reset_if_overtemp && + (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw))) + goto out; + + /* + * Perform soft PHY reset to the PHY_XS. + * This will cause a soft reset to the PHY + */ + hw->phy.ops.write_reg(hw, MDIO_CTRL1, + MDIO_MMD_PHYXS, + MDIO_CTRL1_RESET); + + /* + * Poll for reset bit to self-clear indicating reset is complete. + * Some PHYs could take up to 3 seconds to complete and need about + * 1.7 usec delay after the reset is complete. + */ + for (i = 0; i < 30; i++) { + msleep(100); + hw->phy.ops.read_reg(hw, MDIO_CTRL1, + MDIO_MMD_PHYXS, &ctrl); + if (!(ctrl & MDIO_CTRL1_RESET)) { + udelay(2); + break; + } + } + + if (ctrl & MDIO_CTRL1_RESET) { + status = IXGBE_ERR_RESET_FAILED; + hw_dbg(hw, "PHY reset polling failed to complete.\n"); + } + +out: + return status; +} + +/** + * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register + **/ +s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) +{ + u32 command; + u32 i; + u32 data; + s32 status = 0; + u16 gssr; + + if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) + gssr = IXGBE_GSSR_PHY1_SM; + else + gssr = IXGBE_GSSR_PHY0_SM; + + if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0) + status = IXGBE_ERR_SWFW_SYNC; + + if (status == 0) { + /* Setup and write the address cycle command */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* + * Check every 10 usec to see if the address cycle completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + udelay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + hw_dbg(hw, "PHY address command did not complete.\n"); + status = IXGBE_ERR_PHY; + } + + if (status == 0) { + /* + * Address cycle complete, setup and write the read + * command + */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.mdio.prtad << + IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* + * Check every 10 usec to see if the address cycle + * completed. The MDI Command bit will clear when the + * operation is complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + udelay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + hw_dbg(hw, "PHY read command didn't complete\n"); + status = IXGBE_ERR_PHY; + } else { + /* + * Read operation is complete. Get the data + * from MSRWD + */ + data = IXGBE_READ_REG(hw, IXGBE_MSRWD); + data >>= IXGBE_MSRWD_READ_DATA_SHIFT; + *phy_data = (u16)(data); + } + } + + hw->mac.ops.release_swfw_sync(hw, gssr); + } + + return status; +} + +/** + * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + **/ +s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + u32 command; + u32 i; + s32 status = 0; + u16 gssr; + + if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) + gssr = IXGBE_GSSR_PHY1_SM; + else + gssr = IXGBE_GSSR_PHY0_SM; + + if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0) + status = IXGBE_ERR_SWFW_SYNC; + + if (status == 0) { + /* Put the data in the MDI single read and write data register*/ + IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); + + /* Setup and write the address cycle command */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* + * Check every 10 usec to see if the address cycle completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + udelay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + hw_dbg(hw, "PHY address cmd didn't complete\n"); + status = IXGBE_ERR_PHY; + } + + if (status == 0) { + /* + * Address cycle complete, setup and write the write + * command + */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.mdio.prtad << + IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* + * Check every 10 usec to see if the address cycle + * completed. The MDI Command bit will clear when the + * operation is complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + udelay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + hw_dbg(hw, "PHY address cmd didn't complete\n"); + status = IXGBE_ERR_PHY; + } + } + + hw->mac.ops.release_swfw_sync(hw, gssr); + } + + return status; +} + +/** + * ixgbe_setup_phy_link_generic - Set and restart autoneg + * @hw: pointer to hardware structure + * + * Restart autonegotiation and PHY and waits for completion. + **/ +s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) +{ + s32 status = 0; + u32 time_out; + u32 max_time_out = 10; + u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; + bool autoneg = false; + ixgbe_link_speed speed; + + ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { + /* Set or unset auto-negotiation 10G advertisement */ + hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, + MDIO_MMD_AN, + &autoneg_reg); + + autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) + autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; + + hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, + MDIO_MMD_AN, + autoneg_reg); + } + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) { + /* Set or unset auto-negotiation 1G advertisement */ + hw->phy.ops.read_reg(hw, + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + MDIO_MMD_AN, + &autoneg_reg); + + autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) + autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE; + + hw->phy.ops.write_reg(hw, + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + MDIO_MMD_AN, + autoneg_reg); + } + + if (speed & IXGBE_LINK_SPEED_100_FULL) { + /* Set or unset auto-negotiation 100M advertisement */ + hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, + &autoneg_reg); + + autoneg_reg &= ~(ADVERTISE_100FULL | + ADVERTISE_100HALF); + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) + autoneg_reg |= ADVERTISE_100FULL; + + hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, + autoneg_reg); + } + + /* Restart PHY autonegotiation and wait for completion */ + hw->phy.ops.read_reg(hw, MDIO_CTRL1, + MDIO_MMD_AN, &autoneg_reg); + + autoneg_reg |= MDIO_AN_CTRL1_RESTART; + + hw->phy.ops.write_reg(hw, MDIO_CTRL1, + MDIO_MMD_AN, autoneg_reg); + + /* Wait for autonegotiation to finish */ + for (time_out = 0; time_out < max_time_out; time_out++) { + udelay(10); + /* Restart PHY autonegotiation and wait for completion */ + status = hw->phy.ops.read_reg(hw, MDIO_STAT1, + MDIO_MMD_AN, + &autoneg_reg); + + autoneg_reg &= MDIO_AN_STAT1_COMPLETE; + if (autoneg_reg == MDIO_AN_STAT1_COMPLETE) { + break; + } + } + + if (time_out == max_time_out) { + status = IXGBE_ERR_LINK_SETUP; + hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out"); + } + + return status; +} + +/** + * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg: true if autonegotiation enabled + **/ +s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete) +{ + + /* + * Clear autoneg_advertised and set new values based on input link + * speed. + */ + hw->phy.autoneg_advertised = 0; + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + if (speed & IXGBE_LINK_SPEED_100_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; + + /* Setup link based on the new speed settings */ + hw->phy.ops.setup_link(hw); + + return 0; +} + +/** + * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: boolean auto-negotiation value + * + * Determines the link capabilities by reading the AUTOC register. + */ +s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + s32 status = IXGBE_ERR_LINK_SETUP; + u16 speed_ability; + + *speed = 0; + *autoneg = true; + + status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD, + &speed_ability); + + if (status == 0) { + if (speed_ability & MDIO_SPEED_10G) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (speed_ability & MDIO_PMA_SPEED_1000) + *speed |= IXGBE_LINK_SPEED_1GB_FULL; + if (speed_ability & MDIO_PMA_SPEED_100) + *speed |= IXGBE_LINK_SPEED_100_FULL; + } + + return status; +} + +/** + * ixgbe_check_phy_link_tnx - Determine link and speed status + * @hw: pointer to hardware structure + * + * Reads the VS1 register to determine if link is up and the current speed for + * the PHY. + **/ +s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up) +{ + s32 status = 0; + u32 time_out; + u32 max_time_out = 10; + u16 phy_link = 0; + u16 phy_speed = 0; + u16 phy_data = 0; + + /* Initialize speed and link to default case */ + *link_up = false; + *speed = IXGBE_LINK_SPEED_10GB_FULL; + + /* + * Check current speed and link status of the PHY register. + * This is a vendor specific register and may have to + * be changed for other copper PHYs. + */ + for (time_out = 0; time_out < max_time_out; time_out++) { + udelay(10); + status = hw->phy.ops.read_reg(hw, + MDIO_STAT1, + MDIO_MMD_VEND1, + &phy_data); + phy_link = phy_data & + IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; + phy_speed = phy_data & + IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS; + if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) { + *link_up = true; + if (phy_speed == + IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS) + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + } + } + + return status; +} + +/** + * ixgbe_setup_phy_link_tnx - Set and restart autoneg + * @hw: pointer to hardware structure + * + * Restart autonegotiation and PHY and waits for completion. + **/ +s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) +{ + s32 status = 0; + u32 time_out; + u32 max_time_out = 10; + u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; + bool autoneg = false; + ixgbe_link_speed speed; + + ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { + /* Set or unset auto-negotiation 10G advertisement */ + hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, + MDIO_MMD_AN, + &autoneg_reg); + + autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) + autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; + + hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, + MDIO_MMD_AN, + autoneg_reg); + } + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) { + /* Set or unset auto-negotiation 1G advertisement */ + hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, + MDIO_MMD_AN, + &autoneg_reg); + + autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) + autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; + + hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, + MDIO_MMD_AN, + autoneg_reg); + } + + if (speed & IXGBE_LINK_SPEED_100_FULL) { + /* Set or unset auto-negotiation 100M advertisement */ + hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, + &autoneg_reg); + + autoneg_reg &= ~(ADVERTISE_100FULL | + ADVERTISE_100HALF); + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) + autoneg_reg |= ADVERTISE_100FULL; + + hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, + autoneg_reg); + } + + /* Restart PHY autonegotiation and wait for completion */ + hw->phy.ops.read_reg(hw, MDIO_CTRL1, + MDIO_MMD_AN, &autoneg_reg); + + autoneg_reg |= MDIO_AN_CTRL1_RESTART; + + hw->phy.ops.write_reg(hw, MDIO_CTRL1, + MDIO_MMD_AN, autoneg_reg); + + /* Wait for autonegotiation to finish */ + for (time_out = 0; time_out < max_time_out; time_out++) { + udelay(10); + /* Restart PHY autonegotiation and wait for completion */ + status = hw->phy.ops.read_reg(hw, MDIO_STAT1, + MDIO_MMD_AN, + &autoneg_reg); + + autoneg_reg &= MDIO_AN_STAT1_COMPLETE; + if (autoneg_reg == MDIO_AN_STAT1_COMPLETE) + break; + } + + if (time_out == max_time_out) { + status = IXGBE_ERR_LINK_SETUP; + hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out"); + } + + return status; +} + +/** + * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version + * @hw: pointer to hardware structure + * @firmware_version: pointer to the PHY Firmware Version + **/ +s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, + u16 *firmware_version) +{ + s32 status = 0; + + status = hw->phy.ops.read_reg(hw, TNX_FW_REV, + MDIO_MMD_VEND1, + firmware_version); + + return status; +} + +/** + * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version + * @hw: pointer to hardware structure + * @firmware_version: pointer to the PHY Firmware Version + **/ +s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, + u16 *firmware_version) +{ + s32 status = 0; + + status = hw->phy.ops.read_reg(hw, AQ_FW_REV, + MDIO_MMD_VEND1, + firmware_version); + + return status; +} + +/** + * ixgbe_reset_phy_nl - Performs a PHY reset + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) +{ + u16 phy_offset, control, eword, edata, block_crc; + bool end_data = false; + u16 list_offset, data_offset; + u16 phy_data = 0; + s32 ret_val = 0; + u32 i; + + hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data); + + /* reset the PHY and poll for completion */ + hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, + (phy_data | MDIO_CTRL1_RESET)); + + for (i = 0; i < 100; i++) { + hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, + &phy_data); + if ((phy_data & MDIO_CTRL1_RESET) == 0) + break; + usleep_range(10000, 20000); + } + + if ((phy_data & MDIO_CTRL1_RESET) != 0) { + hw_dbg(hw, "PHY reset did not complete.\n"); + ret_val = IXGBE_ERR_PHY; + goto out; + } + + /* Get init offsets */ + ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, + &data_offset); + if (ret_val != 0) + goto out; + + ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc); + data_offset++; + while (!end_data) { + /* + * Read control word from PHY init contents offset + */ + ret_val = hw->eeprom.ops.read(hw, data_offset, &eword); + control = (eword & IXGBE_CONTROL_MASK_NL) >> + IXGBE_CONTROL_SHIFT_NL; + edata = eword & IXGBE_DATA_MASK_NL; + switch (control) { + case IXGBE_DELAY_NL: + data_offset++; + hw_dbg(hw, "DELAY: %d MS\n", edata); + usleep_range(edata * 1000, edata * 2000); + break; + case IXGBE_DATA_NL: + hw_dbg(hw, "DATA:\n"); + data_offset++; + hw->eeprom.ops.read(hw, data_offset++, + &phy_offset); + for (i = 0; i < edata; i++) { + hw->eeprom.ops.read(hw, data_offset, &eword); + hw->phy.ops.write_reg(hw, phy_offset, + MDIO_MMD_PMAPMD, eword); + hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword, + phy_offset); + data_offset++; + phy_offset++; + } + break; + case IXGBE_CONTROL_NL: + data_offset++; + hw_dbg(hw, "CONTROL:\n"); + if (edata == IXGBE_CONTROL_EOL_NL) { + hw_dbg(hw, "EOL\n"); + end_data = true; + } else if (edata == IXGBE_CONTROL_SOL_NL) { + hw_dbg(hw, "SOL\n"); + } else { + hw_dbg(hw, "Bad control value\n"); + ret_val = IXGBE_ERR_PHY; + goto out; + } + break; + default: + hw_dbg(hw, "Bad control type\n"); + ret_val = IXGBE_ERR_PHY; + goto out; + } + } + +out: + return ret_val; +} + +/** + * ixgbe_identify_sfp_module_generic - Identifies SFP modules + * @hw: pointer to hardware structure + * + * Searches for and identifies the SFP module and assigns appropriate PHY type. + **/ +s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_PHY_ADDR_INVALID; + u32 vendor_oui = 0; + enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; + u8 identifier = 0; + u8 comp_codes_1g = 0; + u8 comp_codes_10g = 0; + u8 oui_bytes[3] = {0, 0, 0}; + u8 cable_tech = 0; + u8 cable_spec = 0; + u16 enforce_sfp = 0; + + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) { + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + status = IXGBE_ERR_SFP_NOT_PRESENT; + goto out; + } + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_IDENTIFIER, + &identifier); + + if (status == IXGBE_ERR_SWFW_SYNC || + status == IXGBE_ERR_I2C || + status == IXGBE_ERR_SFP_NOT_PRESENT) + goto err_read_i2c_eeprom; + + /* LAN ID is needed for sfp_type determination */ + hw->mac.ops.set_lan_id(hw); + + if (identifier != IXGBE_SFF_IDENTIFIER_SFP) { + hw->phy.type = ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + } else { + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_1GBE_COMP_CODES, + &comp_codes_1g); + + if (status == IXGBE_ERR_SWFW_SYNC || + status == IXGBE_ERR_I2C || + status == IXGBE_ERR_SFP_NOT_PRESENT) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_10GBE_COMP_CODES, + &comp_codes_10g); + + if (status == IXGBE_ERR_SWFW_SYNC || + status == IXGBE_ERR_I2C || + status == IXGBE_ERR_SFP_NOT_PRESENT) + goto err_read_i2c_eeprom; + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_CABLE_TECHNOLOGY, + &cable_tech); + + if (status == IXGBE_ERR_SWFW_SYNC || + status == IXGBE_ERR_I2C || + status == IXGBE_ERR_SFP_NOT_PRESENT) + goto err_read_i2c_eeprom; + + /* ID Module + * ========= + * 0 SFP_DA_CU + * 1 SFP_SR + * 2 SFP_LR + * 3 SFP_DA_CORE0 - 82599-specific + * 4 SFP_DA_CORE1 - 82599-specific + * 5 SFP_SR/LR_CORE0 - 82599-specific + * 6 SFP_SR/LR_CORE1 - 82599-specific + * 7 SFP_act_lmt_DA_CORE0 - 82599-specific + * 8 SFP_act_lmt_DA_CORE1 - 82599-specific + * 9 SFP_1g_cu_CORE0 - 82599-specific + * 10 SFP_1g_cu_CORE1 - 82599-specific + */ + if (hw->mac.type == ixgbe_mac_82598EB) { + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) + hw->phy.sfp_type = ixgbe_sfp_type_da_cu; + else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) + hw->phy.sfp_type = ixgbe_sfp_type_sr; + else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) + hw->phy.sfp_type = ixgbe_sfp_type_lr; + else + hw->phy.sfp_type = ixgbe_sfp_type_unknown; + } else if (hw->mac.type == ixgbe_mac_82599EB) { + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_da_cu_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_da_cu_core1; + } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) { + hw->phy.ops.read_i2c_eeprom( + hw, IXGBE_SFF_CABLE_SPEC_COMP, + &cable_spec); + if (cable_spec & + IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_da_act_lmt_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_da_act_lmt_core1; + } else { + hw->phy.sfp_type = + ixgbe_sfp_type_unknown; + } + } else if (comp_codes_10g & + (IXGBE_SFF_10GBASESR_CAPABLE | + IXGBE_SFF_10GBASELR_CAPABLE)) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_srlr_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_srlr_core1; + } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_1g_cu_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_1g_cu_core1; + } else { + hw->phy.sfp_type = ixgbe_sfp_type_unknown; + } + } + + if (hw->phy.sfp_type != stored_sfp_type) + hw->phy.sfp_setup_needed = true; + + /* Determine if the SFP+ PHY is dual speed or not. */ + hw->phy.multispeed_fiber = false; + if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || + ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) + hw->phy.multispeed_fiber = true; + + /* Determine PHY vendor */ + if (hw->phy.type != ixgbe_phy_nl) { + hw->phy.id = identifier; + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_VENDOR_OUI_BYTE0, + &oui_bytes[0]); + + if (status == IXGBE_ERR_SWFW_SYNC || + status == IXGBE_ERR_I2C || + status == IXGBE_ERR_SFP_NOT_PRESENT) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_VENDOR_OUI_BYTE1, + &oui_bytes[1]); + + if (status == IXGBE_ERR_SWFW_SYNC || + status == IXGBE_ERR_I2C || + status == IXGBE_ERR_SFP_NOT_PRESENT) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_VENDOR_OUI_BYTE2, + &oui_bytes[2]); + + if (status == IXGBE_ERR_SWFW_SYNC || + status == IXGBE_ERR_I2C || + status == IXGBE_ERR_SFP_NOT_PRESENT) + goto err_read_i2c_eeprom; + + vendor_oui = + ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | + (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | + (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); + + switch (vendor_oui) { + case IXGBE_SFF_VENDOR_OUI_TYCO: + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) + hw->phy.type = + ixgbe_phy_sfp_passive_tyco; + break; + case IXGBE_SFF_VENDOR_OUI_FTL: + if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) + hw->phy.type = ixgbe_phy_sfp_ftl_active; + else + hw->phy.type = ixgbe_phy_sfp_ftl; + break; + case IXGBE_SFF_VENDOR_OUI_AVAGO: + hw->phy.type = ixgbe_phy_sfp_avago; + break; + case IXGBE_SFF_VENDOR_OUI_INTEL: + hw->phy.type = ixgbe_phy_sfp_intel; + break; + default: + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) + hw->phy.type = + ixgbe_phy_sfp_passive_unknown; + else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) + hw->phy.type = + ixgbe_phy_sfp_active_unknown; + else + hw->phy.type = ixgbe_phy_sfp_unknown; + break; + } + } + + /* Allow any DA cable vendor */ + if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE | + IXGBE_SFF_DA_ACTIVE_CABLE)) { + status = 0; + goto out; + } + + /* Verify supported 1G SFP modules */ + if (comp_codes_10g == 0 && + !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0)) { + hw->phy.type = ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } + + /* Anything else 82598-based is supported */ + if (hw->mac.type == ixgbe_mac_82598EB) { + status = 0; + goto out; + } + + hw->mac.ops.get_device_caps(hw, &enforce_sfp); + if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) && + !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) || + (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1))) { + /* Make sure we're a supported PHY type */ + if (hw->phy.type == ixgbe_phy_sfp_intel) { + status = 0; + } else { + hw_dbg(hw, "SFP+ module not supported\n"); + hw->phy.type = ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + } + } else { + status = 0; + } + } + +out: + return status; + +err_read_i2c_eeprom: + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + if (hw->phy.type != ixgbe_phy_nl) { + hw->phy.id = 0; + hw->phy.type = ixgbe_phy_unknown; + } + return IXGBE_ERR_SFP_NOT_PRESENT; +} + +/** + * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence + * @hw: pointer to hardware structure + * @list_offset: offset to the SFP ID list + * @data_offset: offset to the SFP data block + * + * Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if + * so it returns the offsets to the phy init sequence block. + **/ +s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, + u16 *list_offset, + u16 *data_offset) +{ + u16 sfp_id; + u16 sfp_type = hw->phy.sfp_type; + + if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) + return IXGBE_ERR_SFP_NOT_SUPPORTED; + + if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) + return IXGBE_ERR_SFP_NOT_PRESENT; + + if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) && + (hw->phy.sfp_type == ixgbe_sfp_type_da_cu)) + return IXGBE_ERR_SFP_NOT_SUPPORTED; + + /* + * Limiting active cables and 1G Phys must be initialized as + * SR modules + */ + if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 || + sfp_type == ixgbe_sfp_type_1g_cu_core0) + sfp_type = ixgbe_sfp_type_srlr_core0; + else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 || + sfp_type == ixgbe_sfp_type_1g_cu_core1) + sfp_type = ixgbe_sfp_type_srlr_core1; + + /* Read offset to PHY init contents */ + hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset); + + if ((!*list_offset) || (*list_offset == 0xFFFF)) + return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT; + + /* Shift offset to first ID word */ + (*list_offset)++; + + /* + * Find the matching SFP ID in the EEPROM + * and program the init sequence + */ + hw->eeprom.ops.read(hw, *list_offset, &sfp_id); + + while (sfp_id != IXGBE_PHY_INIT_END_NL) { + if (sfp_id == sfp_type) { + (*list_offset)++; + hw->eeprom.ops.read(hw, *list_offset, data_offset); + if ((!*data_offset) || (*data_offset == 0xFFFF)) { + hw_dbg(hw, "SFP+ module not supported\n"); + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } else { + break; + } + } else { + (*list_offset) += 2; + if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id)) + return IXGBE_ERR_PHY; + } + } + + if (sfp_id == IXGBE_PHY_INIT_END_NL) { + hw_dbg(hw, "No matching SFP+ module found\n"); + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } + + return 0; +} + +/** + * ixgbe_read_i2c_eeprom_generic - Reads 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to read + * @eeprom_data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data) +{ + return hw->phy.ops.read_i2c_byte(hw, byte_offset, + IXGBE_I2C_EEPROM_DEV_ADDR, + eeprom_data); +} + +/** + * ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to write + * @eeprom_data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 eeprom_data) +{ + return hw->phy.ops.write_i2c_byte(hw, byte_offset, + IXGBE_I2C_EEPROM_DEV_ADDR, + eeprom_data); +} + +/** + * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified deivce address. + **/ +s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + s32 status = 0; + u32 max_retry = 10; + u32 retry = 0; + u16 swfw_mask = 0; + bool nack = 1; + + if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) + swfw_mask = IXGBE_GSSR_PHY1_SM; + else + swfw_mask = IXGBE_GSSR_PHY0_SM; + + do { + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) { + status = IXGBE_ERR_SWFW_SYNC; + goto read_byte_out; + } + + ixgbe_i2c_start(hw); + + /* Device Address and write indication */ + status = ixgbe_clock_out_i2c_byte(hw, dev_addr); + if (status != 0) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != 0) + goto fail; + + status = ixgbe_clock_out_i2c_byte(hw, byte_offset); + if (status != 0) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != 0) + goto fail; + + ixgbe_i2c_start(hw); + + /* Device Address and read indication */ + status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1)); + if (status != 0) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != 0) + goto fail; + + status = ixgbe_clock_in_i2c_byte(hw, data); + if (status != 0) + goto fail; + + status = ixgbe_clock_out_i2c_bit(hw, nack); + if (status != 0) + goto fail; + + ixgbe_i2c_stop(hw); + break; + +fail: + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msleep(100); + ixgbe_i2c_bus_clear(hw); + retry++; + if (retry < max_retry) + hw_dbg(hw, "I2C byte read error - Retrying.\n"); + else + hw_dbg(hw, "I2C byte read error.\n"); + + } while (retry < max_retry); + + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + +read_byte_out: + return status; +} + +/** + * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + s32 status = 0; + u32 max_retry = 1; + u32 retry = 0; + u16 swfw_mask = 0; + + if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) + swfw_mask = IXGBE_GSSR_PHY1_SM; + else + swfw_mask = IXGBE_GSSR_PHY0_SM; + + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) { + status = IXGBE_ERR_SWFW_SYNC; + goto write_byte_out; + } + + do { + ixgbe_i2c_start(hw); + + status = ixgbe_clock_out_i2c_byte(hw, dev_addr); + if (status != 0) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != 0) + goto fail; + + status = ixgbe_clock_out_i2c_byte(hw, byte_offset); + if (status != 0) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != 0) + goto fail; + + status = ixgbe_clock_out_i2c_byte(hw, data); + if (status != 0) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != 0) + goto fail; + + ixgbe_i2c_stop(hw); + break; + +fail: + ixgbe_i2c_bus_clear(hw); + retry++; + if (retry < max_retry) + hw_dbg(hw, "I2C byte write error - Retrying.\n"); + else + hw_dbg(hw, "I2C byte write error.\n"); + } while (retry < max_retry); + + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + +write_byte_out: + return status; +} + +/** + * ixgbe_i2c_start - Sets I2C start condition + * @hw: pointer to hardware structure + * + * Sets I2C start condition (High -> Low on SDA while SCL is High) + **/ +static void ixgbe_i2c_start(struct ixgbe_hw *hw) +{ + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + + /* Start condition must begin with data and clock high */ + ixgbe_set_i2c_data(hw, &i2cctl, 1); + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for start condition (4.7us) */ + udelay(IXGBE_I2C_T_SU_STA); + + ixgbe_set_i2c_data(hw, &i2cctl, 0); + + /* Hold time for start condition (4us) */ + udelay(IXGBE_I2C_T_HD_STA); + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + udelay(IXGBE_I2C_T_LOW); + +} + +/** + * ixgbe_i2c_stop - Sets I2C stop condition + * @hw: pointer to hardware structure + * + * Sets I2C stop condition (Low -> High on SDA while SCL is High) + **/ +static void ixgbe_i2c_stop(struct ixgbe_hw *hw) +{ + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + + /* Stop condition must begin with data low and clock high */ + ixgbe_set_i2c_data(hw, &i2cctl, 0); + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for stop condition (4us) */ + udelay(IXGBE_I2C_T_SU_STO); + + ixgbe_set_i2c_data(hw, &i2cctl, 1); + + /* bus free time between stop and start (4.7us)*/ + udelay(IXGBE_I2C_T_BUF); +} + +/** + * ixgbe_clock_in_i2c_byte - Clocks in one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte to clock in + * + * Clocks in one byte data via I2C data/clock + **/ +static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data) +{ + s32 status = 0; + s32 i; + bool bit = 0; + + for (i = 7; i >= 0; i--) { + status = ixgbe_clock_in_i2c_bit(hw, &bit); + *data |= bit << i; + + if (status != 0) + break; + } + + return status; +} + +/** + * ixgbe_clock_out_i2c_byte - Clocks out one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte clocked out + * + * Clocks out one byte data via I2C data/clock + **/ +static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) +{ + s32 status = 0; + s32 i; + u32 i2cctl; + bool bit = 0; + + for (i = 7; i >= 0; i--) { + bit = (data >> i) & 0x1; + status = ixgbe_clock_out_i2c_bit(hw, bit); + + if (status != 0) + break; + } + + /* Release SDA line (set high) */ + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + i2cctl |= IXGBE_I2C_DATA_OUT; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, i2cctl); + + return status; +} + +/** + * ixgbe_get_i2c_ack - Polls for I2C ACK + * @hw: pointer to hardware structure + * + * Clocks in/out one bit via I2C data/clock + **/ +static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) +{ + s32 status; + u32 i = 0; + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + u32 timeout = 10; + bool ack = 1; + + status = ixgbe_raise_i2c_clk(hw, &i2cctl); + + if (status != 0) + goto out; + + /* Minimum high period of clock is 4us */ + udelay(IXGBE_I2C_T_HIGH); + + /* Poll for ACK. Note that ACK in I2C spec is + * transition from 1 to 0 */ + for (i = 0; i < timeout; i++) { + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + ack = ixgbe_get_i2c_data(&i2cctl); + + udelay(1); + if (ack == 0) + break; + } + + if (ack == 1) { + hw_dbg(hw, "I2C ack was not received.\n"); + status = IXGBE_ERR_I2C; + } + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + udelay(IXGBE_I2C_T_LOW); + +out: + return status; +} + +/** + * ixgbe_clock_in_i2c_bit - Clocks in one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: read data value + * + * Clocks in one bit via I2C data/clock + **/ +static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) +{ + s32 status; + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + + status = ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + udelay(IXGBE_I2C_T_HIGH); + + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + *data = ixgbe_get_i2c_data(&i2cctl); + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + udelay(IXGBE_I2C_T_LOW); + + return status; +} + +/** + * ixgbe_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: data value to write + * + * Clocks out one bit via I2C data/clock + **/ +static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) +{ + s32 status; + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + + status = ixgbe_set_i2c_data(hw, &i2cctl, data); + if (status == 0) { + status = ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + udelay(IXGBE_I2C_T_HIGH); + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us. + * This also takes care of the data hold time. + */ + udelay(IXGBE_I2C_T_LOW); + } else { + status = IXGBE_ERR_I2C; + hw_dbg(hw, "I2C data was not set to %X\n", data); + } + + return status; +} +/** + * ixgbe_raise_i2c_clk - Raises the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Raises the I2C clock line '0'->'1' + **/ +static s32 ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) +{ + s32 status = 0; + + *i2cctl |= IXGBE_I2C_CLK_OUT; + + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); + IXGBE_WRITE_FLUSH(hw); + + /* SCL rise time (1000ns) */ + udelay(IXGBE_I2C_T_RISE); + + return status; +} + +/** + * ixgbe_lower_i2c_clk - Lowers the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Lowers the I2C clock line '1'->'0' + **/ +static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) +{ + + *i2cctl &= ~IXGBE_I2C_CLK_OUT; + + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); + IXGBE_WRITE_FLUSH(hw); + + /* SCL fall time (300ns) */ + udelay(IXGBE_I2C_T_FALL); +} + +/** + * ixgbe_set_i2c_data - Sets the I2C data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * @data: I2C data value (0 or 1) to set + * + * Sets the I2C data bit + **/ +static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) +{ + s32 status = 0; + + if (data) + *i2cctl |= IXGBE_I2C_DATA_OUT; + else + *i2cctl &= ~IXGBE_I2C_DATA_OUT; + + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); + IXGBE_WRITE_FLUSH(hw); + + /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ + udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA); + + /* Verify data was set correctly */ + *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + if (data != ixgbe_get_i2c_data(i2cctl)) { + status = IXGBE_ERR_I2C; + hw_dbg(hw, "Error - I2C data was not set to %X.\n", data); + } + + return status; +} + +/** + * ixgbe_get_i2c_data - Reads the I2C SDA data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Returns the I2C data bit value + **/ +static bool ixgbe_get_i2c_data(u32 *i2cctl) +{ + bool data; + + if (*i2cctl & IXGBE_I2C_DATA_IN) + data = 1; + else + data = 0; + + return data; +} + +/** + * ixgbe_i2c_bus_clear - Clears the I2C bus + * @hw: pointer to hardware structure + * + * Clears the I2C bus by sending nine clock pulses. + * Used when data line is stuck low. + **/ +static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) +{ + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + u32 i; + + ixgbe_i2c_start(hw); + + ixgbe_set_i2c_data(hw, &i2cctl, 1); + + for (i = 0; i < 9; i++) { + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Min high period of clock is 4us */ + udelay(IXGBE_I2C_T_HIGH); + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Min low period of clock is 4.7us*/ + udelay(IXGBE_I2C_T_LOW); + } + + ixgbe_i2c_start(hw); + + /* Put the i2c bus back to default state */ + ixgbe_i2c_stop(hw); +} + +/** + * ixgbe_tn_check_overtemp - Checks if an overtemp occurred. + * @hw: pointer to hardware structure + * + * Checks if the LASI temp alarm status was triggered due to overtemp + **/ +s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw) +{ + s32 status = 0; + u16 phy_data = 0; + + if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM) + goto out; + + /* Check that the LASI temp alarm status was triggered */ + hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG, + MDIO_MMD_PMAPMD, &phy_data); + + if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM)) + goto out; + + status = IXGBE_ERR_OVERTEMP; +out: + return status; +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h new file mode 100644 index 0000000..197bdd1 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -0,0 +1,131 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_PHY_H_ +#define _IXGBE_PHY_H_ + +#include "ixgbe_type.h" +#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0 + +/* EEPROM byte offsets */ +#define IXGBE_SFF_IDENTIFIER 0x0 +#define IXGBE_SFF_IDENTIFIER_SFP 0x3 +#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25 +#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26 +#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27 +#define IXGBE_SFF_1GBE_COMP_CODES 0x6 +#define IXGBE_SFF_10GBE_COMP_CODES 0x3 +#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8 +#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C + +/* Bitmasks */ +#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4 +#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8 +#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4 +#define IXGBE_SFF_1GBASESX_CAPABLE 0x1 +#define IXGBE_SFF_1GBASELX_CAPABLE 0x2 +#define IXGBE_SFF_1GBASET_CAPABLE 0x8 +#define IXGBE_SFF_10GBASESR_CAPABLE 0x10 +#define IXGBE_SFF_10GBASELR_CAPABLE 0x20 +#define IXGBE_I2C_EEPROM_READ_MASK 0x100 +#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3 +#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 +#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1 +#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 +#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 + +/* Flow control defines */ +#define IXGBE_TAF_SYM_PAUSE 0x400 +#define IXGBE_TAF_ASM_PAUSE 0x800 + +/* Bit-shift macros */ +#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24 +#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16 +#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8 + +/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ +#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600 +#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500 +#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00 +#define IXGBE_SFF_VENDOR_OUI_INTEL 0x001B2100 + +/* I2C SDA and SCL timing parameters for standard mode */ +#define IXGBE_I2C_T_HD_STA 4 +#define IXGBE_I2C_T_LOW 5 +#define IXGBE_I2C_T_HIGH 4 +#define IXGBE_I2C_T_SU_STA 5 +#define IXGBE_I2C_T_HD_DATA 5 +#define IXGBE_I2C_T_SU_DATA 1 +#define IXGBE_I2C_T_RISE 1 +#define IXGBE_I2C_T_FALL 1 +#define IXGBE_I2C_T_SU_STO 4 +#define IXGBE_I2C_T_BUF 5 + +#define IXGBE_TN_LASI_STATUS_REG 0x9005 +#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008 + +s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw); +s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); +s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw); +s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data); +s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data); +s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); +s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete); +s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg); + +/* PHY specific */ +s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up); +s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw); +s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, + u16 *firmware_version); +s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, + u16 *firmware_version); + +s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); +s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); +s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, + u16 *list_offset, + u16 *data_offset); +s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw); +s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data); +s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 eeprom_data); +#endif /* _IXGBE_PHY_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c new file mode 100644 index 0000000..d99d01e --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -0,0 +1,687 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef NETIF_F_HW_VLAN_TX +#include +#endif + +#include "ixgbe.h" + +#include "ixgbe_sriov.h" + +static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, + int entries, u16 *hash_list, u32 vf) +{ + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + struct ixgbe_hw *hw = &adapter->hw; + int i; + u32 vector_bit; + u32 vector_reg; + u32 mta_reg; + + /* only so many hash values supported */ + entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES); + + /* + * salt away the number of multi cast addresses assigned + * to this VF for later use to restore when the PF multi cast + * list changes + */ + vfinfo->num_vf_mc_hashes = entries; + + /* + * VFs are limited to using the MTA hash table for their multicast + * addresses + */ + for (i = 0; i < entries; i++) { + vfinfo->vf_mc_hashes[i] = hash_list[i]; + } + + for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { + vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; + vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F; + mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); + mta_reg |= (1 << vector_bit); + IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); + } + + return 0; +} + +static void ixgbe_restore_vf_macvlans(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct list_head *pos; + struct vf_macvlans *entry; + + list_for_each(pos, &adapter->vf_mvs.l) { + entry = list_entry(pos, struct vf_macvlans, l); + if (entry->free == false) + hw->mac.ops.set_rar(hw, entry->rar_entry, + entry->vf_macvlan, + entry->vf, IXGBE_RAH_AV); + } +} + +void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct vf_data_storage *vfinfo; + int i, j; + u32 vector_bit; + u32 vector_reg; + u32 mta_reg; + + for (i = 0; i < adapter->num_vfs; i++) { + vfinfo = &adapter->vfinfo[i]; + for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) { + hw->addr_ctrl.mta_in_use++; + vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F; + vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F; + mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); + mta_reg |= (1 << vector_bit); + IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); + } + } + + /* Restore any VF macvlans */ + ixgbe_restore_vf_macvlans(adapter); +} + +static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, + u32 vf) +{ + return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); +} + +static void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf) +{ + struct ixgbe_hw *hw = &adapter->hw; + int new_mtu = msgbuf[1]; + u32 max_frs; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + + /* Only X540 supports jumbo frames in IOV mode */ + if (adapter->hw.mac.type != ixgbe_mac_X540) + return; + + /* MTU < 68 is an error and causes problems on some kernels */ + if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) { + e_err(drv, "VF mtu %d out of range\n", new_mtu); + return; + } + + max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) & + IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT; + if (max_frs < new_mtu) { + max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs); + } + + e_info(hw, "VF requests change max MTU to %d\n", new_mtu); +} + +static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) +{ + u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); + vmolr |= (IXGBE_VMOLR_ROMPE | + IXGBE_VMOLR_BAM); + if (aupe) + vmolr |= IXGBE_VMOLR_AUPE; + else + vmolr &= ~IXGBE_VMOLR_AUPE; + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); +} + +static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + + if (vid) + IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), + (vid | IXGBE_VMVIR_VLANA_DEFAULT)); + else + IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); +} + +static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + int rar_entry = hw->mac.num_rar_entries - (vf + 1); + + /* reset offloads to defaults */ + if (adapter->vfinfo[vf].pf_vlan) { + ixgbe_set_vf_vlan(adapter, true, + adapter->vfinfo[vf].pf_vlan, vf); + ixgbe_set_vmvir(adapter, + (adapter->vfinfo[vf].pf_vlan | + (adapter->vfinfo[vf].pf_qos << + VLAN_PRIO_SHIFT)), vf); + ixgbe_set_vmolr(hw, vf, false); + } else { + ixgbe_set_vmvir(adapter, 0, vf); + ixgbe_set_vmolr(hw, vf, true); + } + + /* reset multicast table array for vf */ + adapter->vfinfo[vf].num_vf_mc_hashes = 0; + + /* Flush and reset the mta with the new values */ + ixgbe_set_rx_mode(adapter->netdev); + + hw->mac.ops.clear_rar(hw, rar_entry); +} + +static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, + int vf, unsigned char *mac_addr) +{ + struct ixgbe_hw *hw = &adapter->hw; + int rar_entry = hw->mac.num_rar_entries - (vf + 1); + + memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6); + hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV); + + return 0; +} + +static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, + int vf, int index, unsigned char *mac_addr) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct list_head *pos; + struct vf_macvlans *entry; + + if (index <= 1) { + list_for_each(pos, &adapter->vf_mvs.l) { + entry = list_entry(pos, struct vf_macvlans, l); + if (entry->vf == vf) { + entry->vf = -1; + entry->free = true; + entry->is_macvlan = false; + hw->mac.ops.clear_rar(hw, entry->rar_entry); + } + } + } + + /* + * If index was zero then we were asked to clear the uc list + * for the VF. We're done. + */ + if (!index) + return 0; + + entry = NULL; + + list_for_each(pos, &adapter->vf_mvs.l) { + entry = list_entry(pos, struct vf_macvlans, l); + if (entry->free) + break; + } + + /* + * If we traversed the entire list and didn't find a free entry + * then we're out of space on the RAR table. Also entry may + * be NULL because the original memory allocation for the list + * failed, which is not fatal but does mean we can't support + * VF requests for MACVLAN because we couldn't allocate + * memory for the list management required. + */ + if (!entry || !entry->free) + return -ENOSPC; + + entry->free = false; + entry->is_macvlan = true; + entry->vf = vf; + memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); + + hw->mac.ops.set_rar(hw, entry->rar_entry, mac_addr, vf, IXGBE_RAH_AV); + + return 0; +} + +int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) +{ + unsigned char vf_mac_addr[6]; + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + unsigned int vfn = (event_mask & 0x3f); + + bool enable = ((event_mask & 0x10000000U) != 0); + + if (enable) { + random_ether_addr(vf_mac_addr); + e_info(probe, "IOV: VF %d is enabled MAC %pM\n", + vfn, vf_mac_addr); + /* + * Store away the VF "permananet" MAC address, it will ask + * for it later. + */ + memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6); + } + + return 0; +} + +static inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 reg; + u32 reg_offset, vf_shift; + + vf_shift = vf % 32; + reg_offset = vf / 32; + + /* enable transmit and receive for vf */ + reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); + reg |= (reg | (1 << vf_shift)); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); + + reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); + reg |= (reg | (1 << vf_shift)); + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); + + /* Enable counting of spoofed packets in the SSVPC register */ + reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset)); + reg |= (1 << vf_shift); + IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); + + ixgbe_vf_reset_event(adapter, vf); +} + +static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) +{ + u32 mbx_size = IXGBE_VFMAILBOX_SIZE; + u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; + struct ixgbe_hw *hw = &adapter->hw; + s32 retval; + int entries; + u16 *hash_list; + int add, vid, index; + u8 *new_mac; + + retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); + + if (retval) + pr_err("Error receiving message from VF\n"); + + /* this is a message we already processed, do nothing */ + if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK)) + return retval; + + /* + * until the vf completes a virtual function reset it should not be + * allowed to start any configuration. + */ + + if (msgbuf[0] == IXGBE_VF_RESET) { + unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; + new_mac = (u8 *)(&msgbuf[1]); + e_info(probe, "VF Reset msg received from vf %d\n", vf); + adapter->vfinfo[vf].clear_to_send = false; + ixgbe_vf_reset_msg(adapter, vf); + adapter->vfinfo[vf].clear_to_send = true; + + if (is_valid_ether_addr(new_mac) && + !adapter->vfinfo[vf].pf_set_mac) + ixgbe_set_vf_mac(adapter, vf, vf_mac); + else + ixgbe_set_vf_mac(adapter, + vf, adapter->vfinfo[vf].vf_mac_addresses); + + /* reply to reset with ack and vf mac address */ + msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK; + memcpy(new_mac, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS); + /* + * Piggyback the multicast filter type so VF can compute the + * correct vectors + */ + msgbuf[3] = hw->mac.mc_filter_type; + ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf); + + return retval; + } + + if (!adapter->vfinfo[vf].clear_to_send) { + msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; + ixgbe_write_mbx(hw, msgbuf, 1, vf); + return retval; + } + + switch ((msgbuf[0] & 0xFFFF)) { + case IXGBE_VF_SET_MAC_ADDR: + new_mac = ((u8 *)(&msgbuf[1])); + if (is_valid_ether_addr(new_mac) && + !adapter->vfinfo[vf].pf_set_mac) { + ixgbe_set_vf_mac(adapter, vf, new_mac); + } else if (memcmp(adapter->vfinfo[vf].vf_mac_addresses, + new_mac, ETH_ALEN)) { + e_warn(drv, "VF %d attempted to override " + "administratively set MAC address\nReload " + "the VF driver to resume operations\n", vf); + retval = -1; + } + break; + case IXGBE_VF_SET_MULTICAST: + entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) + >> IXGBE_VT_MSGINFO_SHIFT; + hash_list = (u16 *)&msgbuf[1]; + retval = ixgbe_set_vf_multicasts(adapter, entries, + hash_list, vf); + break; + case IXGBE_VF_SET_LPE: + ixgbe_set_vf_lpe(adapter, msgbuf); + break; + case IXGBE_VF_SET_VLAN: + add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) + >> IXGBE_VT_MSGINFO_SHIFT; + vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); + if (adapter->vfinfo[vf].pf_vlan) { + e_warn(drv, "VF %d attempted to override " + "administratively set VLAN configuration\n" + "Reload the VF driver to resume operations\n", + vf); + retval = -1; + } else { + retval = ixgbe_set_vf_vlan(adapter, add, vid, vf); + } + break; + case IXGBE_VF_SET_MACVLAN: + index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> + IXGBE_VT_MSGINFO_SHIFT; + /* + * If the VF is allowed to set MAC filters then turn off + * anti-spoofing to avoid false positives. An index + * greater than 0 will indicate the VF is setting a + * macvlan MAC filter. + */ + if (index > 0 && adapter->antispoofing_enabled) { + hw->mac.ops.set_mac_anti_spoofing(hw, false, + adapter->num_vfs); + hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); + adapter->antispoofing_enabled = false; + } + retval = ixgbe_set_vf_macvlan(adapter, vf, index, + (unsigned char *)(&msgbuf[1])); + break; + default: + e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); + retval = IXGBE_ERR_MBX; + break; + } + + /* notify the VF of the results of what it sent us */ + if (retval) + msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; + else + msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; + + msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS; + + ixgbe_write_mbx(hw, msgbuf, 1, vf); + + return retval; +} + +static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 msg = IXGBE_VT_MSGTYPE_NACK; + + /* if device isn't clear to send it shouldn't be reading either */ + if (!adapter->vfinfo[vf].clear_to_send) + ixgbe_write_mbx(hw, &msg, 1, vf); +} + +void ixgbe_msg_task(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vf; + + for (vf = 0; vf < adapter->num_vfs; vf++) { + /* process any reset requests */ + if (!ixgbe_check_for_rst(hw, vf)) + ixgbe_vf_reset_event(adapter, vf); + + /* process any messages pending */ + if (!ixgbe_check_for_msg(hw, vf)) + ixgbe_rcv_msg_from_vf(adapter, vf); + + /* process any acks */ + if (!ixgbe_check_for_ack(hw, vf)) + ixgbe_rcv_ack_from_vf(adapter, vf); + } +} + +void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + /* disable transmit and receive for all vfs */ + IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0); + + IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0); +} + +void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 ping; + int i; + + for (i = 0 ; i < adapter->num_vfs; i++) { + ping = IXGBE_PF_CONTROL_MSG; + if (adapter->vfinfo[i].clear_to_send) + ping |= IXGBE_VT_MSGTYPE_CTS; + ixgbe_write_mbx(hw, &ping, 1, i); + } +} + +int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs)) + return -EINVAL; + adapter->vfinfo[vf].pf_set_mac = true; + dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); + dev_info(&adapter->pdev->dev, "Reload the VF driver to make this" + " change effective."); + if (test_bit(__IXGBE_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, "The VF MAC address has been set," + " but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, "Bring the PF device up before" + " attempting to use the VF device.\n"); + } + return ixgbe_set_vf_mac(adapter, vf, mac); +} + +int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) +{ + int err = 0; + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) + return -EINVAL; + if (vlan || qos) { + err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); + if (err) + goto out; + ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); + ixgbe_set_vmolr(hw, vf, false); + if (adapter->antispoofing_enabled) + hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); + adapter->vfinfo[vf].pf_vlan = vlan; + adapter->vfinfo[vf].pf_qos = qos; + dev_info(&adapter->pdev->dev, + "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); + if (test_bit(__IXGBE_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, + "The VF VLAN has been set," + " but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, + "Bring the PF device up before" + " attempting to use the VF device.\n"); + } + } else { + err = ixgbe_set_vf_vlan(adapter, false, + adapter->vfinfo[vf].pf_vlan, vf); + ixgbe_set_vmvir(adapter, vlan, vf); + ixgbe_set_vmolr(hw, vf, true); + hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); + adapter->vfinfo[vf].pf_vlan = 0; + adapter->vfinfo[vf].pf_qos = 0; + } +out: + return err; +} + +static int ixgbe_link_mbps(int internal_link_speed) +{ + switch (internal_link_speed) { + case IXGBE_LINK_SPEED_100_FULL: + return 100; + case IXGBE_LINK_SPEED_1GB_FULL: + return 1000; + case IXGBE_LINK_SPEED_10GB_FULL: + return 10000; + default: + return 0; + } +} + +static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate, + int link_speed) +{ + int rf_dec, rf_int; + u32 bcnrc_val; + + if (tx_rate != 0) { + /* Calculate the rate factor values to set */ + rf_int = link_speed / tx_rate; + rf_dec = (link_speed - (rf_int * tx_rate)); + rf_dec = (rf_dec * (1<mac.type) { + case ixgbe_mac_82599EB: + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4); + break; + case ixgbe_mac_X540: + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14); + break; + default: + break; + } + + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); +} + +void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter) +{ + int actual_link_speed, i; + bool reset_rate = false; + + /* VF Tx rate limit was not set */ + if (adapter->vf_rate_link_speed == 0) + return; + + actual_link_speed = ixgbe_link_mbps(adapter->link_speed); + if (actual_link_speed != adapter->vf_rate_link_speed) { + reset_rate = true; + adapter->vf_rate_link_speed = 0; + dev_info(&adapter->pdev->dev, + "Link speed has been changed. VF Transmit rate " + "is disabled\n"); + } + + for (i = 0; i < adapter->num_vfs; i++) { + if (reset_rate) + adapter->vfinfo[i].tx_rate = 0; + + ixgbe_set_vf_rate_limit(&adapter->hw, i, + adapter->vfinfo[i].tx_rate, + actual_link_speed); + } +} + +int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + int actual_link_speed; + + actual_link_speed = ixgbe_link_mbps(adapter->link_speed); + if ((vf >= adapter->num_vfs) || (!adapter->link_up) || + (tx_rate > actual_link_speed) || (actual_link_speed != 10000) || + ((tx_rate != 0) && (tx_rate <= 10))) + /* rate limit cannot be set to 10Mb or less in 10Gb adapters */ + return -EINVAL; + + adapter->vf_rate_link_speed = actual_link_speed; + adapter->vfinfo[vf].tx_rate = (u16)tx_rate; + ixgbe_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed); + + return 0; +} + +int ixgbe_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + if (vf >= adapter->num_vfs) + return -EINVAL; + ivi->vf = vf; + memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); + ivi->tx_rate = adapter->vfinfo[vf].tx_rate; + ivi->vlan = adapter->vfinfo[vf].pf_vlan; + ivi->qos = adapter->vfinfo[vf].pf_qos; + return 0; +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h new file mode 100644 index 0000000..3417556 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h @@ -0,0 +1,46 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_SRIOV_H_ +#define _IXGBE_SRIOV_H_ + +void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter); +void ixgbe_msg_task(struct ixgbe_adapter *adapter); +int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); +void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter); +void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter); +void ixgbe_dump_registers(struct ixgbe_adapter *adapter); +int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); +int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, + u8 qos); +int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); +int ixgbe_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi); +void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); + +#endif /* _IXGBE_SRIOV_H_ */ + diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h new file mode 100644 index 0000000..e0d970e --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -0,0 +1,2877 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_TYPE_H_ +#define _IXGBE_TYPE_H_ + +#include +#include +#include + +/* Vendor ID */ +#define IXGBE_INTEL_VENDOR_ID 0x8086 + +/* Device IDs */ +#define IXGBE_DEV_ID_82598 0x10B6 +#define IXGBE_DEV_ID_82598_BX 0x1508 +#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 +#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 +#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB +#define IXGBE_DEV_ID_82598AT 0x10C8 +#define IXGBE_DEV_ID_82598AT2 0x150B +#define IXGBE_DEV_ID_82598EB_CX4 0x10DD +#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC +#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1 +#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1 +#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 +#define IXGBE_DEV_ID_82599_KX4 0x10F7 +#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514 +#define IXGBE_DEV_ID_82599_KR 0x1517 +#define IXGBE_DEV_ID_82599_T3_LOM 0x151C +#define IXGBE_DEV_ID_82599_CX4 0x10F9 +#define IXGBE_DEV_ID_82599_SFP 0x10FB +#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a +#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529 +#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 +#define IXGBE_DEV_ID_82599_SFP_EM 0x1507 +#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D +#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC +#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 +#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C +#define IXGBE_DEV_ID_82599_LS 0x154F +#define IXGBE_DEV_ID_X540T 0x1528 + +/* General Registers */ +#define IXGBE_CTRL 0x00000 +#define IXGBE_STATUS 0x00008 +#define IXGBE_CTRL_EXT 0x00018 +#define IXGBE_ESDP 0x00020 +#define IXGBE_EODSDP 0x00028 +#define IXGBE_I2CCTL 0x00028 +#define IXGBE_LEDCTL 0x00200 +#define IXGBE_FRTIMER 0x00048 +#define IXGBE_TCPTIMER 0x0004C +#define IXGBE_CORESPARE 0x00600 +#define IXGBE_EXVET 0x05078 + +/* NVM Registers */ +#define IXGBE_EEC 0x10010 +#define IXGBE_EERD 0x10014 +#define IXGBE_EEWR 0x10018 +#define IXGBE_FLA 0x1001C +#define IXGBE_EEMNGCTL 0x10110 +#define IXGBE_EEMNGDATA 0x10114 +#define IXGBE_FLMNGCTL 0x10118 +#define IXGBE_FLMNGDATA 0x1011C +#define IXGBE_FLMNGCNT 0x10120 +#define IXGBE_FLOP 0x1013C +#define IXGBE_GRC 0x10200 + +/* General Receive Control */ +#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */ +#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */ + +#define IXGBE_VPDDIAG0 0x10204 +#define IXGBE_VPDDIAG1 0x10208 + +/* I2CCTL Bit Masks */ +#define IXGBE_I2C_CLK_IN 0x00000001 +#define IXGBE_I2C_CLK_OUT 0x00000002 +#define IXGBE_I2C_DATA_IN 0x00000004 +#define IXGBE_I2C_DATA_OUT 0x00000008 + +/* Interrupt Registers */ +#define IXGBE_EICR 0x00800 +#define IXGBE_EICS 0x00808 +#define IXGBE_EIMS 0x00880 +#define IXGBE_EIMC 0x00888 +#define IXGBE_EIAC 0x00810 +#define IXGBE_EIAM 0x00890 +#define IXGBE_EICS_EX(_i) (0x00A90 + (_i) * 4) +#define IXGBE_EIMS_EX(_i) (0x00AA0 + (_i) * 4) +#define IXGBE_EIMC_EX(_i) (0x00AB0 + (_i) * 4) +#define IXGBE_EIAM_EX(_i) (0x00AD0 + (_i) * 4) +/* + * 82598 EITR is 16 bits but set the limits based on the max + * supported by all ixgbe hardware. 82599 EITR is only 12 bits, + * with the lower 3 always zero. + */ +#define IXGBE_MAX_INT_RATE 488281 +#define IXGBE_MIN_INT_RATE 956 +#define IXGBE_MAX_EITR 0x00000FF8 +#define IXGBE_MIN_EITR 8 +#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \ + (0x012300 + (((_i) - 24) * 4))) +#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8 +#define IXGBE_EITR_LLI_MOD 0x00008000 +#define IXGBE_EITR_CNT_WDIS 0x80000000 +#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */ +#define IXGBE_IVAR_MISC 0x00A00 /* misc MSI-X interrupt causes */ +#define IXGBE_EITRSEL 0x00894 +#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */ +#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */ +#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4))) +#define IXGBE_GPIE 0x00898 + +/* Flow Control Registers */ +#define IXGBE_FCADBUL 0x03210 +#define IXGBE_FCADBUH 0x03214 +#define IXGBE_FCAMACL 0x04328 +#define IXGBE_FCAMACH 0x0432C +#define IXGBE_FCRTH_82599(_i) (0x03260 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_FCRTL_82599(_i) (0x03220 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_PFCTOP 0x03008 +#define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */ +#define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */ +#define IXGBE_FCRTV 0x032A0 +#define IXGBE_FCCFG 0x03D00 +#define IXGBE_TFCS 0x0CE00 + +/* Receive DMA Registers */ +#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \ + (0x0D000 + ((_i - 64) * 0x40))) +#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \ + (0x0D004 + ((_i - 64) * 0x40))) +#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \ + (0x0D008 + ((_i - 64) * 0x40))) +#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \ + (0x0D010 + ((_i - 64) * 0x40))) +#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \ + (0x0D018 + ((_i - 64) * 0x40))) +#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \ + (0x0D028 + ((_i - 64) * 0x40))) +#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \ + (0x0D02C + ((_i - 64) * 0x40))) +#define IXGBE_RSCDBU 0x03028 +#define IXGBE_RDDCC 0x02F20 +#define IXGBE_RXMEMWRAP 0x03190 +#define IXGBE_STARCTRL 0x03024 +/* + * Split and Replication Receive Control Registers + * 00-15 : 0x02100 + n*4 + * 16-64 : 0x01014 + n*0x40 + * 64-127: 0x0D014 + (n-64)*0x40 + */ +#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \ + (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ + (0x0D014 + ((_i - 64) * 0x40)))) +/* + * Rx DCA Control Register: + * 00-15 : 0x02200 + n*4 + * 16-64 : 0x0100C + n*0x40 + * 64-127: 0x0D00C + (n-64)*0x40 + */ +#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \ + (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \ + (0x0D00C + ((_i - 64) * 0x40)))) +#define IXGBE_RDRXCTL 0x02F00 +#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) + /* 8 of these 0x03C00 - 0x03C1C */ +#define IXGBE_RXCTRL 0x03000 +#define IXGBE_DROPEN 0x03D04 +#define IXGBE_RXPBSIZE_SHIFT 10 + +/* Receive Registers */ +#define IXGBE_RXCSUM 0x05000 +#define IXGBE_RFCTL 0x05008 +#define IXGBE_DRECCCTL 0x02F08 +#define IXGBE_DRECCCTL_DISABLE 0 +/* Multicast Table Array - 128 entries */ +#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4)) +#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x0A200 + ((_i) * 8))) +#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x0A204 + ((_i) * 8))) +#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8)) +#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8)) +/* Packet split receive type */ +#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \ + (0x0EA00 + ((_i) * 4))) +/* array of 4096 1-bit vlan filters */ +#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4)) +/*array of 4096 4-bit vlan vmdq indices */ +#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4)) +#define IXGBE_FCTRL 0x05080 +#define IXGBE_VLNCTRL 0x05088 +#define IXGBE_MCSTCTRL 0x05090 +#define IXGBE_MRQC 0x05818 +#define IXGBE_SAQF(_i) (0x0E000 + ((_i) * 4)) /* Source Address Queue Filter */ +#define IXGBE_DAQF(_i) (0x0E200 + ((_i) * 4)) /* Dest. Address Queue Filter */ +#define IXGBE_SDPQF(_i) (0x0E400 + ((_i) * 4)) /* Src Dest. Addr Queue Filter */ +#define IXGBE_FTQF(_i) (0x0E600 + ((_i) * 4)) /* Five Tuple Queue Filter */ +#define IXGBE_ETQF(_i) (0x05128 + ((_i) * 4)) /* EType Queue Filter */ +#define IXGBE_ETQS(_i) (0x0EC00 + ((_i) * 4)) /* EType Queue Select */ +#define IXGBE_SYNQF 0x0EC30 /* SYN Packet Queue Filter */ +#define IXGBE_RQTC 0x0EC70 +#define IXGBE_MTQC 0x08120 +#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */ +#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */ +#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */ +#define IXGBE_VT_CTL 0x051B0 +#define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */ +#define IXGBE_PFMBMEM(_i) (0x13000 + (64 * (_i))) /* 64 Mailboxes, 16 DW each */ +#define IXGBE_PFMBICR(_i) (0x00710 + (4 * (_i))) /* 4 total */ +#define IXGBE_PFMBIMR(_i) (0x00720 + (4 * (_i))) /* 4 total */ +#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4)) +#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4)) +#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4)) +#define IXGBE_QDE 0x2F04 +#define IXGBE_VMTXSW(_i) (0x05180 + ((_i) * 4)) /* 2 total */ +#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */ +#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4)) +#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4)) +#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4)) +#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4)) +#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/ +#define IXGBE_RXFECCERR0 0x051B8 +#define IXGBE_LLITHRESH 0x0EC90 +#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_IMIRVP 0x05AC0 +#define IXGBE_VMD_CTL 0x0581C +#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ +#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ + +/* Flow Director registers */ +#define IXGBE_FDIRCTRL 0x0EE00 +#define IXGBE_FDIRHKEY 0x0EE68 +#define IXGBE_FDIRSKEY 0x0EE6C +#define IXGBE_FDIRDIP4M 0x0EE3C +#define IXGBE_FDIRSIP4M 0x0EE40 +#define IXGBE_FDIRTCPM 0x0EE44 +#define IXGBE_FDIRUDPM 0x0EE48 +#define IXGBE_FDIRIP6M 0x0EE74 +#define IXGBE_FDIRM 0x0EE70 + +/* Flow Director Stats registers */ +#define IXGBE_FDIRFREE 0x0EE38 +#define IXGBE_FDIRLEN 0x0EE4C +#define IXGBE_FDIRUSTAT 0x0EE50 +#define IXGBE_FDIRFSTAT 0x0EE54 +#define IXGBE_FDIRMATCH 0x0EE58 +#define IXGBE_FDIRMISS 0x0EE5C + +/* Flow Director Programming registers */ +#define IXGBE_FDIRSIPv6(_i) (0x0EE0C + ((_i) * 4)) /* 3 of these (0-2) */ +#define IXGBE_FDIRIPSA 0x0EE18 +#define IXGBE_FDIRIPDA 0x0EE1C +#define IXGBE_FDIRPORT 0x0EE20 +#define IXGBE_FDIRVLAN 0x0EE24 +#define IXGBE_FDIRHASH 0x0EE28 +#define IXGBE_FDIRCMD 0x0EE2C + +/* Transmit DMA registers */ +#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31)*/ +#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40)) +#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40)) +#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40)) +#define IXGBE_TDT(_i) (0x06018 + ((_i) * 0x40)) +#define IXGBE_TXDCTL(_i) (0x06028 + ((_i) * 0x40)) +#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40)) +#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40)) +#define IXGBE_DTXCTL 0x07E00 + +#define IXGBE_DMATXCTL 0x04A80 +#define IXGBE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */ +#define IXGBE_PFDTXGSWC 0x08220 +#define IXGBE_DTXMXSZRQ 0x08100 +#define IXGBE_DTXTCPFLGL 0x04A88 +#define IXGBE_DTXTCPFLGH 0x04A8C +#define IXGBE_LBDRPEN 0x0CA00 +#define IXGBE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */ + +#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */ +#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */ +#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */ +#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */ + +#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */ + +/* Anti-spoofing defines */ +#define IXGBE_SPOOF_MACAS_MASK 0xFF +#define IXGBE_SPOOF_VLANAS_MASK 0xFF00 +#define IXGBE_SPOOF_VLANAS_SHIFT 8 +#define IXGBE_PFVFSPOOF_REG_COUNT 8 + +#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */ +/* Tx DCA Control register : 128 of these (0-127) */ +#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40)) +#define IXGBE_TIPG 0x0CB00 +#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_MNGTXMAP 0x0CD10 +#define IXGBE_TIPG_FIBER_DEFAULT 3 +#define IXGBE_TXPBSIZE_SHIFT 10 + +/* Wake up registers */ +#define IXGBE_WUC 0x05800 +#define IXGBE_WUFC 0x05808 +#define IXGBE_WUS 0x05810 +#define IXGBE_IPAV 0x05838 +#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */ +#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */ + +#define IXGBE_WUPL 0x05900 +#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ +#define IXGBE_FHFT(_n) (0x09000 + (_n * 0x100)) /* Flex host filter table */ +#define IXGBE_FHFT_EXT(_n) (0x09800 + (_n * 0x100)) /* Ext Flexible Host + * Filter Table */ + +#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4 +#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2 + +/* Each Flexible Filter is at most 128 (0x80) bytes in length */ +#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX 128 +#define IXGBE_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */ +#define IXGBE_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */ + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */ +#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */ + +/* Wake Up Filter Control */ +#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define IXGBE_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define IXGBE_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define IXGBE_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define IXGBE_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define IXGBE_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define IXGBE_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define IXGBE_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ +#define IXGBE_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */ + +#define IXGBE_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ +#define IXGBE_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ +#define IXGBE_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ +#define IXGBE_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ +#define IXGBE_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ +#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */ +#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */ +#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */ +#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 /* Mask for Ext. flex filters */ +#define IXGBE_WUFC_ALL_FILTERS 0x003F00FF /* Mask for all wakeup filters */ +#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ + +/* Wake Up Status */ +#define IXGBE_WUS_LNKC IXGBE_WUFC_LNKC +#define IXGBE_WUS_MAG IXGBE_WUFC_MAG +#define IXGBE_WUS_EX IXGBE_WUFC_EX +#define IXGBE_WUS_MC IXGBE_WUFC_MC +#define IXGBE_WUS_BC IXGBE_WUFC_BC +#define IXGBE_WUS_ARP IXGBE_WUFC_ARP +#define IXGBE_WUS_IPV4 IXGBE_WUFC_IPV4 +#define IXGBE_WUS_IPV6 IXGBE_WUFC_IPV6 +#define IXGBE_WUS_MNG IXGBE_WUFC_MNG +#define IXGBE_WUS_FLX0 IXGBE_WUFC_FLX0 +#define IXGBE_WUS_FLX1 IXGBE_WUFC_FLX1 +#define IXGBE_WUS_FLX2 IXGBE_WUFC_FLX2 +#define IXGBE_WUS_FLX3 IXGBE_WUFC_FLX3 +#define IXGBE_WUS_FLX4 IXGBE_WUFC_FLX4 +#define IXGBE_WUS_FLX5 IXGBE_WUFC_FLX5 +#define IXGBE_WUS_FLX_FILTERS IXGBE_WUFC_FLX_FILTERS + +/* Wake Up Packet Length */ +#define IXGBE_WUPL_LENGTH_MASK 0xFFFF + +/* DCB registers */ +#define IXGBE_RMCS 0x03D00 +#define IXGBE_DPMCS 0x07F40 +#define IXGBE_PDPMCS 0x0CD00 +#define IXGBE_RUPPBMR 0x050A0 +#define IXGBE_RT2CR(_i) (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RT2SR(_i) (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TDTQ2TCCR(_i) (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */ +#define IXGBE_TDTQ2TCSR(_i) (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */ +#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ + + +/* Security Control Registers */ +#define IXGBE_SECTXCTRL 0x08800 +#define IXGBE_SECTXSTAT 0x08804 +#define IXGBE_SECTXBUFFAF 0x08808 +#define IXGBE_SECTXMINIFG 0x08810 +#define IXGBE_SECRXCTRL 0x08D00 +#define IXGBE_SECRXSTAT 0x08D04 + +/* Security Bit Fields and Masks */ +#define IXGBE_SECTXCTRL_SECTX_DIS 0x00000001 +#define IXGBE_SECTXCTRL_TX_DIS 0x00000002 +#define IXGBE_SECTXCTRL_STORE_FORWARD 0x00000004 + +#define IXGBE_SECTXSTAT_SECTX_RDY 0x00000001 +#define IXGBE_SECTXSTAT_ECC_TXERR 0x00000002 + +#define IXGBE_SECRXCTRL_SECRX_DIS 0x00000001 +#define IXGBE_SECRXCTRL_RX_DIS 0x00000002 + +#define IXGBE_SECRXSTAT_SECRX_RDY 0x00000001 +#define IXGBE_SECRXSTAT_ECC_RXERR 0x00000002 + +/* LinkSec (MacSec) Registers */ +#define IXGBE_LSECTXCAP 0x08A00 +#define IXGBE_LSECRXCAP 0x08F00 +#define IXGBE_LSECTXCTRL 0x08A04 +#define IXGBE_LSECTXSCL 0x08A08 /* SCI Low */ +#define IXGBE_LSECTXSCH 0x08A0C /* SCI High */ +#define IXGBE_LSECTXSA 0x08A10 +#define IXGBE_LSECTXPN0 0x08A14 +#define IXGBE_LSECTXPN1 0x08A18 +#define IXGBE_LSECTXKEY0(_n) (0x08A1C + (4 * (_n))) /* 4 of these (0-3) */ +#define IXGBE_LSECTXKEY1(_n) (0x08A2C + (4 * (_n))) /* 4 of these (0-3) */ +#define IXGBE_LSECRXCTRL 0x08F04 +#define IXGBE_LSECRXSCL 0x08F08 +#define IXGBE_LSECRXSCH 0x08F0C +#define IXGBE_LSECRXSA(_i) (0x08F10 + (4 * (_i))) /* 2 of these (0-1) */ +#define IXGBE_LSECRXPN(_i) (0x08F18 + (4 * (_i))) /* 2 of these (0-1) */ +#define IXGBE_LSECRXKEY(_n, _m) (0x08F20 + ((0x10 * (_n)) + (4 * (_m)))) +#define IXGBE_LSECTXUT 0x08A3C /* OutPktsUntagged */ +#define IXGBE_LSECTXPKTE 0x08A40 /* OutPktsEncrypted */ +#define IXGBE_LSECTXPKTP 0x08A44 /* OutPktsProtected */ +#define IXGBE_LSECTXOCTE 0x08A48 /* OutOctetsEncrypted */ +#define IXGBE_LSECTXOCTP 0x08A4C /* OutOctetsProtected */ +#define IXGBE_LSECRXUT 0x08F40 /* InPktsUntagged/InPktsNoTag */ +#define IXGBE_LSECRXOCTD 0x08F44 /* InOctetsDecrypted */ +#define IXGBE_LSECRXOCTV 0x08F48 /* InOctetsValidated */ +#define IXGBE_LSECRXBAD 0x08F4C /* InPktsBadTag */ +#define IXGBE_LSECRXNOSCI 0x08F50 /* InPktsNoSci */ +#define IXGBE_LSECRXUNSCI 0x08F54 /* InPktsUnknownSci */ +#define IXGBE_LSECRXUNCH 0x08F58 /* InPktsUnchecked */ +#define IXGBE_LSECRXDELAY 0x08F5C /* InPktsDelayed */ +#define IXGBE_LSECRXLATE 0x08F60 /* InPktsLate */ +#define IXGBE_LSECRXOK(_n) (0x08F64 + (0x04 * (_n))) /* InPktsOk */ +#define IXGBE_LSECRXINV(_n) (0x08F6C + (0x04 * (_n))) /* InPktsInvalid */ +#define IXGBE_LSECRXNV(_n) (0x08F74 + (0x04 * (_n))) /* InPktsNotValid */ +#define IXGBE_LSECRXUNSA 0x08F7C /* InPktsUnusedSa */ +#define IXGBE_LSECRXNUSA 0x08F80 /* InPktsNotUsingSa */ + +/* LinkSec (MacSec) Bit Fields and Masks */ +#define IXGBE_LSECTXCAP_SUM_MASK 0x00FF0000 +#define IXGBE_LSECTXCAP_SUM_SHIFT 16 +#define IXGBE_LSECRXCAP_SUM_MASK 0x00FF0000 +#define IXGBE_LSECRXCAP_SUM_SHIFT 16 + +#define IXGBE_LSECTXCTRL_EN_MASK 0x00000003 +#define IXGBE_LSECTXCTRL_DISABLE 0x0 +#define IXGBE_LSECTXCTRL_AUTH 0x1 +#define IXGBE_LSECTXCTRL_AUTH_ENCRYPT 0x2 +#define IXGBE_LSECTXCTRL_AISCI 0x00000020 +#define IXGBE_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00 +#define IXGBE_LSECTXCTRL_RSV_MASK 0x000000D8 + +#define IXGBE_LSECRXCTRL_EN_MASK 0x0000000C +#define IXGBE_LSECRXCTRL_EN_SHIFT 2 +#define IXGBE_LSECRXCTRL_DISABLE 0x0 +#define IXGBE_LSECRXCTRL_CHECK 0x1 +#define IXGBE_LSECRXCTRL_STRICT 0x2 +#define IXGBE_LSECRXCTRL_DROP 0x3 +#define IXGBE_LSECRXCTRL_PLSH 0x00000040 +#define IXGBE_LSECRXCTRL_RP 0x00000080 +#define IXGBE_LSECRXCTRL_RSV_MASK 0xFFFFFF33 + +/* IpSec Registers */ +#define IXGBE_IPSTXIDX 0x08900 +#define IXGBE_IPSTXSALT 0x08904 +#define IXGBE_IPSTXKEY(_i) (0x08908 + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXIDX 0x08E00 +#define IXGBE_IPSRXIPADDR(_i) (0x08E04 + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXSPI 0x08E14 +#define IXGBE_IPSRXIPIDX 0x08E18 +#define IXGBE_IPSRXKEY(_i) (0x08E1C + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXSALT 0x08E2C +#define IXGBE_IPSRXMOD 0x08E30 + +#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4 + +/* DCB registers */ +#define IXGBE_RTRPCS 0x02430 +#define IXGBE_RTTDCS 0x04900 +#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */ +#define IXGBE_RTTPCS 0x0CD00 +#define IXGBE_RTRUP2TC 0x03020 +#define IXGBE_RTTUP2TC 0x0C800 +#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TXLLQ(_i) (0x082E0 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTDQSEL 0x04904 +#define IXGBE_RTTDT1C 0x04908 +#define IXGBE_RTTDT1S 0x0490C +#define IXGBE_RTTDTECC 0x04990 +#define IXGBE_RTTDTECC_NO_BCN 0x00000100 +#define IXGBE_RTTBCNRC 0x04984 +#define IXGBE_RTTBCNRC_RS_ENA 0x80000000 +#define IXGBE_RTTBCNRC_RF_DEC_MASK 0x00003FFF +#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14 +#define IXGBE_RTTBCNRC_RF_INT_MASK \ + (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT) +#define IXGBE_RTTBCNRM 0x04980 + +/* FCoE DMA Context Registers */ +#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ +#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */ +#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */ +#define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */ +#define IXGBE_FCINVST0 0x03FC0 /* FC Invalid DMA Context Status Reg 0 */ +#define IXGBE_FCINVST(_i) (IXGBE_FCINVST0 + ((_i) * 4)) +#define IXGBE_FCBUFF_VALID (1 << 0) /* DMA Context Valid */ +#define IXGBE_FCBUFF_BUFFSIZE (3 << 3) /* User Buffer Size */ +#define IXGBE_FCBUFF_WRCONTX (1 << 7) /* 0: Initiator, 1: Target */ +#define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */ +#define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */ +#define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3 +#define IXGBE_FCBUFF_BUFFCNT_SHIFT 8 +#define IXGBE_FCBUFF_OFFSET_SHIFT 16 +#define IXGBE_FCDMARW_WE (1 << 14) /* Write enable */ +#define IXGBE_FCDMARW_RE (1 << 15) /* Read enable */ +#define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */ +#define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */ +#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16 + +/* FCoE SOF/EOF */ +#define IXGBE_TEOFF 0x04A94 /* Tx FC EOF */ +#define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */ +#define IXGBE_REOFF 0x05158 /* Rx FC EOF */ +#define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */ +/* FCoE Filter Context Registers */ +#define IXGBE_FCFLT 0x05108 /* FC FLT Context */ +#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */ +#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */ +#define IXGBE_FCFLT_VALID (1 << 0) /* Filter Context Valid */ +#define IXGBE_FCFLT_FIRST (1 << 1) /* Filter First */ +#define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */ +#define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */ +#define IXGBE_FCFLTRW_RVALDT (1 << 13) /* Fast Re-Validation */ +#define IXGBE_FCFLTRW_WE (1 << 14) /* Write Enable */ +#define IXGBE_FCFLTRW_RE (1 << 15) /* Read Enable */ +/* FCoE Receive Control */ +#define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */ +#define IXGBE_FCRXCTRL_FCOELLI (1 << 0) /* Low latency interrupt */ +#define IXGBE_FCRXCTRL_SAVBAD (1 << 1) /* Save Bad Frames */ +#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2) /* EN 1st Read Header */ +#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3) /* EN Last Header in Seq */ +#define IXGBE_FCRXCTRL_ALLH (1 << 4) /* EN All Headers */ +#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5) /* EN 1st Seq. Header */ +#define IXGBE_FCRXCTRL_ICRC (1 << 6) /* Ignore Bad FC CRC */ +#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7) /* FC CRC Byte Ordering */ +#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */ +#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8 +/* FCoE Redirection */ +#define IXGBE_FCRECTL 0x0ED00 /* FC Redirection Control */ +#define IXGBE_FCRETA0 0x0ED10 /* FC Redirection Table 0 */ +#define IXGBE_FCRETA(_i) (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */ +#define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */ +#define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */ +#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */ + +/* Stats registers */ +#define IXGBE_CRCERRS 0x04000 +#define IXGBE_ILLERRC 0x04004 +#define IXGBE_ERRBC 0x04008 +#define IXGBE_MSPDC 0x04010 +#define IXGBE_MPC(_i) (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/ +#define IXGBE_MLFC 0x04034 +#define IXGBE_MRFC 0x04038 +#define IXGBE_RLEC 0x04040 +#define IXGBE_LXONTXC 0x03F60 +#define IXGBE_LXONRXC 0x0CF60 +#define IXGBE_LXOFFTXC 0x03F68 +#define IXGBE_LXOFFRXC 0x0CF68 +#define IXGBE_LXONRXCNT 0x041A4 +#define IXGBE_LXOFFRXCNT 0x041A8 +#define IXGBE_PXONRXCNT(_i) (0x04140 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_PXOFFRXCNT(_i) (0x04160 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_PXON2OFFCNT(_i) (0x03240 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_PXONTXC(_i) (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/ +#define IXGBE_PXONRXC(_i) (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/ +#define IXGBE_PXOFFTXC(_i) (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/ +#define IXGBE_PXOFFRXC(_i) (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/ +#define IXGBE_PRC64 0x0405C +#define IXGBE_PRC127 0x04060 +#define IXGBE_PRC255 0x04064 +#define IXGBE_PRC511 0x04068 +#define IXGBE_PRC1023 0x0406C +#define IXGBE_PRC1522 0x04070 +#define IXGBE_GPRC 0x04074 +#define IXGBE_BPRC 0x04078 +#define IXGBE_MPRC 0x0407C +#define IXGBE_GPTC 0x04080 +#define IXGBE_GORCL 0x04088 +#define IXGBE_GORCH 0x0408C +#define IXGBE_GOTCL 0x04090 +#define IXGBE_GOTCH 0x04094 +#define IXGBE_RNBC(_i) (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/ +#define IXGBE_RUC 0x040A4 +#define IXGBE_RFC 0x040A8 +#define IXGBE_ROC 0x040AC +#define IXGBE_RJC 0x040B0 +#define IXGBE_MNGPRC 0x040B4 +#define IXGBE_MNGPDC 0x040B8 +#define IXGBE_MNGPTC 0x0CF90 +#define IXGBE_TORL 0x040C0 +#define IXGBE_TORH 0x040C4 +#define IXGBE_TPR 0x040D0 +#define IXGBE_TPT 0x040D4 +#define IXGBE_PTC64 0x040D8 +#define IXGBE_PTC127 0x040DC +#define IXGBE_PTC255 0x040E0 +#define IXGBE_PTC511 0x040E4 +#define IXGBE_PTC1023 0x040E8 +#define IXGBE_PTC1522 0x040EC +#define IXGBE_MPTC 0x040F0 +#define IXGBE_BPTC 0x040F4 +#define IXGBE_XEC 0x04120 +#define IXGBE_SSVPC 0x08780 + +#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) +#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \ + (0x08600 + ((_i) * 4))) +#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4)) + +#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */ +#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */ +#define IXGBE_FCCRC 0x05118 /* Count of Good Eth CRC w/ Bad FC CRC */ +#define IXGBE_FCOERPDC 0x0241C /* FCoE Rx Packets Dropped Count */ +#define IXGBE_FCLAST 0x02424 /* FCoE Last Error Count */ +#define IXGBE_FCOEPRC 0x02428 /* Number of FCoE Packets Received */ +#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */ +#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */ +#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */ +#define IXGBE_O2BGPTC 0x041C4 +#define IXGBE_O2BSPC 0x087B0 +#define IXGBE_B2OSPC 0x041C0 +#define IXGBE_B2OGPRC 0x02F90 +#define IXGBE_PCRC8ECL 0x0E810 +#define IXGBE_PCRC8ECH 0x0E811 +#define IXGBE_PCRC8ECH_MASK 0x1F +#define IXGBE_LDPCECL 0x0E820 +#define IXGBE_LDPCECH 0x0E821 + +/* Management */ +#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MANC 0x05820 +#define IXGBE_MFVAL 0x05824 +#define IXGBE_MANC2H 0x05860 +#define IXGBE_MDEF(_i) (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MIPAF 0x058B0 +#define IXGBE_MMAL(_i) (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */ +#define IXGBE_MMAH(_i) (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */ +#define IXGBE_FTFT 0x09400 /* 0x9400-0x97FC */ +#define IXGBE_METF(_i) (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_LSWFW 0x15014 + +/* ARC Subsystem registers */ +#define IXGBE_HICR 0x15F00 +#define IXGBE_FWSTS 0x15F0C +#define IXGBE_HSMC0R 0x15F04 +#define IXGBE_HSMC1R 0x15F08 +#define IXGBE_SWSR 0x15F10 +#define IXGBE_HFDR 0x15FE8 +#define IXGBE_FLEX_MNG 0x15800 /* 0x15800 - 0x15EFC */ + +#define IXGBE_HICR_EN 0x01 /* Enable bit - RO */ +/* Driver sets this bit when done to put command in RAM */ +#define IXGBE_HICR_C 0x02 +#define IXGBE_HICR_SV 0x04 /* Status Validity */ +#define IXGBE_HICR_FW_RESET_ENABLE 0x40 +#define IXGBE_HICR_FW_RESET 0x80 + +/* PCI-E registers */ +#define IXGBE_GCR 0x11000 +#define IXGBE_GTV 0x11004 +#define IXGBE_FUNCTAG 0x11008 +#define IXGBE_GLT 0x1100C +#define IXGBE_GSCL_1 0x11010 +#define IXGBE_GSCL_2 0x11014 +#define IXGBE_GSCL_3 0x11018 +#define IXGBE_GSCL_4 0x1101C +#define IXGBE_GSCN_0 0x11020 +#define IXGBE_GSCN_1 0x11024 +#define IXGBE_GSCN_2 0x11028 +#define IXGBE_GSCN_3 0x1102C +#define IXGBE_FACTPS 0x10150 +#define IXGBE_PCIEANACTL 0x11040 +#define IXGBE_SWSM 0x10140 +#define IXGBE_FWSM 0x10148 +#define IXGBE_GSSR 0x10160 +#define IXGBE_MREVID 0x11064 +#define IXGBE_DCA_ID 0x11070 +#define IXGBE_DCA_CTRL 0x11074 +#define IXGBE_SWFW_SYNC IXGBE_GSSR + +/* PCIe registers 82599-specific */ +#define IXGBE_GCR_EXT 0x11050 +#define IXGBE_GSCL_5_82599 0x11030 +#define IXGBE_GSCL_6_82599 0x11034 +#define IXGBE_GSCL_7_82599 0x11038 +#define IXGBE_GSCL_8_82599 0x1103C +#define IXGBE_PHYADR_82599 0x11040 +#define IXGBE_PHYDAT_82599 0x11044 +#define IXGBE_PHYCTL_82599 0x11048 +#define IXGBE_PBACLR_82599 0x11068 +#define IXGBE_CIAA_82599 0x11088 +#define IXGBE_CIAD_82599 0x1108C +#define IXGBE_PICAUSE 0x110B0 +#define IXGBE_PIENA 0x110B8 +#define IXGBE_CDQ_MBR_82599 0x110B4 +#define IXGBE_PCIESPARE 0x110BC +#define IXGBE_MISC_REG_82599 0x110F0 +#define IXGBE_ECC_CTRL_0_82599 0x11100 +#define IXGBE_ECC_CTRL_1_82599 0x11104 +#define IXGBE_ECC_STATUS_82599 0x110E0 +#define IXGBE_BAR_CTRL_82599 0x110F4 + +/* PCI Express Control */ +#define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000 +#define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000 +#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000 +#define IXGBE_GCR_CAP_VER2 0x00040000 + +#define IXGBE_GCR_EXT_MSIX_EN 0x80000000 +#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001 +#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002 +#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003 +#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \ + IXGBE_GCR_EXT_VT_MODE_64) + +/* Time Sync Registers */ +#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ +#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ +#define IXGBE_RXSTMPL 0x051E8 /* Rx timestamp Low - RO */ +#define IXGBE_RXSTMPH 0x051A4 /* Rx timestamp High - RO */ +#define IXGBE_RXSATRL 0x051A0 /* Rx timestamp attribute low - RO */ +#define IXGBE_RXSATRH 0x051A8 /* Rx timestamp attribute high - RO */ +#define IXGBE_RXMTRL 0x05120 /* RX message type register low - RW */ +#define IXGBE_TXSTMPL 0x08C04 /* Tx timestamp value Low - RO */ +#define IXGBE_TXSTMPH 0x08C08 /* Tx timestamp value High - RO */ +#define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */ +#define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */ +#define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */ +#define IXGBE_TIMADJL 0x08C18 /* Time Adjustment Offset register Low - RW */ +#define IXGBE_TIMADJH 0x08C1C /* Time Adjustment Offset register High - RW */ +#define IXGBE_TSAUXC 0x08C20 /* TimeSync Auxiliary Control register - RW */ +#define IXGBE_TRGTTIML0 0x08C24 /* Target Time Register 0 Low - RW */ +#define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */ +#define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */ +#define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */ +#define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */ +#define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */ +#define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */ +#define IXGBE_AUXSTMPH0 0x08C40 /* Auxiliary Time Stamp 0 register High - RO */ +#define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */ +#define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */ + +/* Diagnostic Registers */ +#define IXGBE_RDSTATCTL 0x02C20 +#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */ +#define IXGBE_RDHMPN 0x02F08 +#define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4)) +#define IXGBE_RDPROBE 0x02F20 +#define IXGBE_RDMAM 0x02F30 +#define IXGBE_RDMAD 0x02F34 +#define IXGBE_TDSTATCTL 0x07C20 +#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */ +#define IXGBE_TDHMPN 0x07F08 +#define IXGBE_TDHMPN2 0x082FC +#define IXGBE_TXDESCIC 0x082CC +#define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4)) +#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4)) +#define IXGBE_TDPROBE 0x07F20 +#define IXGBE_TXBUFCTRL 0x0C600 +#define IXGBE_TXBUFDATA0 0x0C610 +#define IXGBE_TXBUFDATA1 0x0C614 +#define IXGBE_TXBUFDATA2 0x0C618 +#define IXGBE_TXBUFDATA3 0x0C61C +#define IXGBE_RXBUFCTRL 0x03600 +#define IXGBE_RXBUFDATA0 0x03610 +#define IXGBE_RXBUFDATA1 0x03614 +#define IXGBE_RXBUFDATA2 0x03618 +#define IXGBE_RXBUFDATA3 0x0361C +#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_RFVAL 0x050A4 +#define IXGBE_MDFTC1 0x042B8 +#define IXGBE_MDFTC2 0x042C0 +#define IXGBE_MDFTFIFO1 0x042C4 +#define IXGBE_MDFTFIFO2 0x042C8 +#define IXGBE_MDFTS 0x042CC +#define IXGBE_RXDATAWRPTR(_i) (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/ +#define IXGBE_RXDESCWRPTR(_i) (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/ +#define IXGBE_RXDATARDPTR(_i) (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/ +#define IXGBE_RXDESCRDPTR(_i) (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/ +#define IXGBE_TXDATAWRPTR(_i) (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/ +#define IXGBE_TXDESCWRPTR(_i) (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/ +#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/ +#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/ +#define IXGBE_PCIEECCCTL 0x1106C +#define IXGBE_RXWRPTR(_i) (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/ +#define IXGBE_RXUSED(_i) (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/ +#define IXGBE_RXRDPTR(_i) (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/ +#define IXGBE_RXRDWRPTR(_i) (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/ +#define IXGBE_TXWRPTR(_i) (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/ +#define IXGBE_TXUSED(_i) (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/ +#define IXGBE_TXRDPTR(_i) (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/ +#define IXGBE_TXRDWRPTR(_i) (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/ +#define IXGBE_PCIEECCCTL0 0x11100 +#define IXGBE_PCIEECCCTL1 0x11104 +#define IXGBE_RXDBUECC 0x03F70 +#define IXGBE_TXDBUECC 0x0CF70 +#define IXGBE_RXDBUEST 0x03F74 +#define IXGBE_TXDBUEST 0x0CF74 +#define IXGBE_PBTXECC 0x0C300 +#define IXGBE_PBRXECC 0x03300 +#define IXGBE_GHECCR 0x110B0 + +/* MAC Registers */ +#define IXGBE_PCS1GCFIG 0x04200 +#define IXGBE_PCS1GLCTL 0x04208 +#define IXGBE_PCS1GLSTA 0x0420C +#define IXGBE_PCS1GDBG0 0x04210 +#define IXGBE_PCS1GDBG1 0x04214 +#define IXGBE_PCS1GANA 0x04218 +#define IXGBE_PCS1GANLP 0x0421C +#define IXGBE_PCS1GANNP 0x04220 +#define IXGBE_PCS1GANLPNP 0x04224 +#define IXGBE_HLREG0 0x04240 +#define IXGBE_HLREG1 0x04244 +#define IXGBE_PAP 0x04248 +#define IXGBE_MACA 0x0424C +#define IXGBE_APAE 0x04250 +#define IXGBE_ARD 0x04254 +#define IXGBE_AIS 0x04258 +#define IXGBE_MSCA 0x0425C +#define IXGBE_MSRWD 0x04260 +#define IXGBE_MLADD 0x04264 +#define IXGBE_MHADD 0x04268 +#define IXGBE_MAXFRS 0x04268 +#define IXGBE_TREG 0x0426C +#define IXGBE_PCSS1 0x04288 +#define IXGBE_PCSS2 0x0428C +#define IXGBE_XPCSS 0x04290 +#define IXGBE_MFLCN 0x04294 +#define IXGBE_SERDESC 0x04298 +#define IXGBE_MACS 0x0429C +#define IXGBE_AUTOC 0x042A0 +#define IXGBE_LINKS 0x042A4 +#define IXGBE_LINKS2 0x04324 +#define IXGBE_AUTOC2 0x042A8 +#define IXGBE_AUTOC3 0x042AC +#define IXGBE_ANLP1 0x042B0 +#define IXGBE_ANLP2 0x042B4 +#define IXGBE_MACC 0x04330 +#define IXGBE_ATLASCTL 0x04800 +#define IXGBE_MMNGC 0x042D0 +#define IXGBE_ANLPNP1 0x042D4 +#define IXGBE_ANLPNP2 0x042D8 +#define IXGBE_KRPCSFC 0x042E0 +#define IXGBE_KRPCSS 0x042E4 +#define IXGBE_FECS1 0x042E8 +#define IXGBE_FECS2 0x042EC +#define IXGBE_SMADARCTL 0x14F10 +#define IXGBE_MPVC 0x04318 +#define IXGBE_SGMIIC 0x04314 + +/* Statistics Registers */ +#define IXGBE_RXNFGPC 0x041B0 +#define IXGBE_RXNFGBCL 0x041B4 +#define IXGBE_RXNFGBCH 0x041B8 +#define IXGBE_RXDGPC 0x02F50 +#define IXGBE_RXDGBCL 0x02F54 +#define IXGBE_RXDGBCH 0x02F58 +#define IXGBE_RXDDGPC 0x02F5C +#define IXGBE_RXDDGBCL 0x02F60 +#define IXGBE_RXDDGBCH 0x02F64 +#define IXGBE_RXLPBKGPC 0x02F68 +#define IXGBE_RXLPBKGBCL 0x02F6C +#define IXGBE_RXLPBKGBCH 0x02F70 +#define IXGBE_RXDLPBKGPC 0x02F74 +#define IXGBE_RXDLPBKGBCL 0x02F78 +#define IXGBE_RXDLPBKGBCH 0x02F7C +#define IXGBE_TXDGPC 0x087A0 +#define IXGBE_TXDGBCL 0x087A4 +#define IXGBE_TXDGBCH 0x087A8 + +#define IXGBE_RXDSTATCTRL 0x02F40 + +/* Copper Pond 2 link timeout */ +#define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50 + +/* Omer CORECTL */ +#define IXGBE_CORECTL 0x014F00 +/* BARCTRL */ +#define IXGBE_BARCTRL 0x110F4 +#define IXGBE_BARCTRL_FLSIZE 0x0700 +#define IXGBE_BARCTRL_FLSIZE_SHIFT 8 +#define IXGBE_BARCTRL_CSRSIZE 0x2000 + +/* RSCCTL Bit Masks */ +#define IXGBE_RSCCTL_RSCEN 0x01 +#define IXGBE_RSCCTL_MAXDESC_1 0x00 +#define IXGBE_RSCCTL_MAXDESC_4 0x04 +#define IXGBE_RSCCTL_MAXDESC_8 0x08 +#define IXGBE_RSCCTL_MAXDESC_16 0x0C + +/* RSCDBU Bit Masks */ +#define IXGBE_RSCDBU_RSCSMALDIS_MASK 0x0000007F +#define IXGBE_RSCDBU_RSCACKDIS 0x00000080 + +/* RDRXCTL Bit Masks */ +#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min Threshold Size */ +#define IXGBE_RDRXCTL_CRCSTRIP 0x00000002 /* CRC Strip */ +#define IXGBE_RDRXCTL_MVMEN 0x00000020 +#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */ +#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */ +#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */ +#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI */ +#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */ +#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */ + +/* RQTC Bit Masks and Shifts */ +#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4) +#define IXGBE_RQTC_TC0_MASK (0x7 << 0) +#define IXGBE_RQTC_TC1_MASK (0x7 << 4) +#define IXGBE_RQTC_TC2_MASK (0x7 << 8) +#define IXGBE_RQTC_TC3_MASK (0x7 << 12) +#define IXGBE_RQTC_TC4_MASK (0x7 << 16) +#define IXGBE_RQTC_TC5_MASK (0x7 << 20) +#define IXGBE_RQTC_TC6_MASK (0x7 << 24) +#define IXGBE_RQTC_TC7_MASK (0x7 << 28) + +/* PSRTYPE.RQPL Bit masks and shift */ +#define IXGBE_PSRTYPE_RQPL_MASK 0x7 +#define IXGBE_PSRTYPE_RQPL_SHIFT 29 + +/* CTRL Bit Masks */ +#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */ +#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */ +#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ + +/* FACTPS */ +#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */ + +/* MHADD Bit Masks */ +#define IXGBE_MHADD_MFS_MASK 0xFFFF0000 +#define IXGBE_MHADD_MFS_SHIFT 16 + +/* Extended Device Control */ +#define IXGBE_CTRL_EXT_PFRSTD 0x00004000 /* Physical Function Reset Done */ +#define IXGBE_CTRL_EXT_NS_DIS 0x00010000 /* No Snoop disable */ +#define IXGBE_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ + +/* Direct Cache Access (DCA) definitions */ +#define IXGBE_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ +#define IXGBE_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ + +#define IXGBE_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ +#define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ + +#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */ +#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */ +#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ +#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ +#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ +#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */ +#define IXGBE_DCA_RXCTRL_DESC_WRO_EN (1 << 13) /* DCA Rx wr Desc Relax Order */ +#define IXGBE_DCA_RXCTRL_DESC_HSRO_EN (1 << 15) /* DCA Rx Split Header RO */ + +#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */ +#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */ +#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ +#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */ + +/* MSCA Bit Masks */ +#define IXGBE_MSCA_NP_ADDR_MASK 0x0000FFFF /* MDI Address (new protocol) */ +#define IXGBE_MSCA_NP_ADDR_SHIFT 0 +#define IXGBE_MSCA_DEV_TYPE_MASK 0x001F0000 /* Device Type (new protocol) */ +#define IXGBE_MSCA_DEV_TYPE_SHIFT 16 /* Register Address (old protocol */ +#define IXGBE_MSCA_PHY_ADDR_MASK 0x03E00000 /* PHY Address mask */ +#define IXGBE_MSCA_PHY_ADDR_SHIFT 21 /* PHY Address shift*/ +#define IXGBE_MSCA_OP_CODE_MASK 0x0C000000 /* OP CODE mask */ +#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */ +#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */ +#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (write) */ +#define IXGBE_MSCA_READ 0x0C000000 /* OP CODE 11 (read) */ +#define IXGBE_MSCA_READ_AUTOINC 0x08000000 /* OP CODE 10 (read, auto inc)*/ +#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */ +#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */ +#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new protocol) */ +#define IXGBE_MSCA_OLD_PROTOCOL 0x10000000 /* ST CODE 01 (old protocol) */ +#define IXGBE_MSCA_MDI_COMMAND 0x40000000 /* Initiate MDI command */ +#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress enable */ + +/* MSRWD bit masks */ +#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF +#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0 +#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000 +#define IXGBE_MSRWD_READ_DATA_SHIFT 16 + +/* Atlas registers */ +#define IXGBE_ATLAS_PDN_LPBK 0x24 +#define IXGBE_ATLAS_PDN_10G 0xB +#define IXGBE_ATLAS_PDN_1G 0xC +#define IXGBE_ATLAS_PDN_AN 0xD + +/* Atlas bit masks */ +#define IXGBE_ATLASCTL_WRITE_CMD 0x00010000 +#define IXGBE_ATLAS_PDN_TX_REG_EN 0x10 +#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL 0xF0 +#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0 +#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0 + +/* Omer bit masks */ +#define IXGBE_CORECTL_WRITE_CMD 0x00010000 + +/* MDIO definitions */ + +#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */ + +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0 - 10G, 1 - 1G */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010 + +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */ +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */ + +/* MII clause 22/28 definitions */ +#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ +#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */ +#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/ +#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/ +#define IXGBE_MII_AUTONEG_REG 0x0 + +#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 +#define IXGBE_MAX_PHY_ADDR 32 + +/* PHY IDs*/ +#define TN1010_PHY_ID 0x00A19410 +#define TNX_FW_REV 0xB +#define X540_PHY_ID 0x01540200 +#define QT2022_PHY_ID 0x0043A400 +#define ATH_PHY_ID 0x03429050 +#define AQ_FW_REV 0x20 + +/* PHY Types */ +#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0 + +/* Special PHY Init Routine */ +#define IXGBE_PHY_INIT_OFFSET_NL 0x002B +#define IXGBE_PHY_INIT_END_NL 0xFFFF +#define IXGBE_CONTROL_MASK_NL 0xF000 +#define IXGBE_DATA_MASK_NL 0x0FFF +#define IXGBE_CONTROL_SHIFT_NL 12 +#define IXGBE_DELAY_NL 0 +#define IXGBE_DATA_NL 1 +#define IXGBE_CONTROL_NL 0x000F +#define IXGBE_CONTROL_EOL_NL 0x0FFF +#define IXGBE_CONTROL_SOL_NL 0x0000 + +/* General purpose Interrupt Enable */ +#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */ +#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */ +#define IXGBE_SDP2_GPIEN 0x00000004 /* SDP2 */ +#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ +#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ +#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ +#define IXGBE_GPIE_EIAME 0x40000000 +#define IXGBE_GPIE_PBA_SUPPORT 0x80000000 +#define IXGBE_GPIE_RSC_DELAY_SHIFT 11 +#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */ +#define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */ +#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */ +#define IXGBE_GPIE_VTMODE_64 0x0000C000 /* 64 VFs 2 queues per VF */ + +/* Packet Buffer Initialization */ +#define IXGBE_TXPBSIZE_20KB 0x00005000 /* 20KB Packet Buffer */ +#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */ +#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */ +#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ +#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ +#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */ +#define IXGBE_RXPBSIZE_MAX 0x00080000 /* 512KB Packet Buffer*/ +#define IXGBE_TXPBSIZE_MAX 0x00028000 /* 160KB Packet Buffer*/ + +#define IXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ +#define IXGBE_MAX_PB 8 + +/* Packet buffer allocation strategies */ +enum { + PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */ +#define PBA_STRATEGY_EQUAL PBA_STRATEGY_EQUAL + PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */ +#define PBA_STRATEGY_WEIGHTED PBA_STRATEGY_WEIGHTED +}; + +/* Transmit Flow Control status */ +#define IXGBE_TFCS_TXOFF 0x00000001 +#define IXGBE_TFCS_TXOFF0 0x00000100 +#define IXGBE_TFCS_TXOFF1 0x00000200 +#define IXGBE_TFCS_TXOFF2 0x00000400 +#define IXGBE_TFCS_TXOFF3 0x00000800 +#define IXGBE_TFCS_TXOFF4 0x00001000 +#define IXGBE_TFCS_TXOFF5 0x00002000 +#define IXGBE_TFCS_TXOFF6 0x00004000 +#define IXGBE_TFCS_TXOFF7 0x00008000 + +/* TCP Timer */ +#define IXGBE_TCPTIMER_KS 0x00000100 +#define IXGBE_TCPTIMER_COUNT_ENABLE 0x00000200 +#define IXGBE_TCPTIMER_COUNT_FINISH 0x00000400 +#define IXGBE_TCPTIMER_LOOP 0x00000800 +#define IXGBE_TCPTIMER_DURATION_MASK 0x000000FF + +/* HLREG0 Bit Masks */ +#define IXGBE_HLREG0_TXCRCEN 0x00000001 /* bit 0 */ +#define IXGBE_HLREG0_RXCRCSTRP 0x00000002 /* bit 1 */ +#define IXGBE_HLREG0_JUMBOEN 0x00000004 /* bit 2 */ +#define IXGBE_HLREG0_TXPADEN 0x00000400 /* bit 10 */ +#define IXGBE_HLREG0_TXPAUSEEN 0x00001000 /* bit 12 */ +#define IXGBE_HLREG0_RXPAUSEEN 0x00004000 /* bit 14 */ +#define IXGBE_HLREG0_LPBK 0x00008000 /* bit 15 */ +#define IXGBE_HLREG0_MDCSPD 0x00010000 /* bit 16 */ +#define IXGBE_HLREG0_CONTMDC 0x00020000 /* bit 17 */ +#define IXGBE_HLREG0_CTRLFLTR 0x00040000 /* bit 18 */ +#define IXGBE_HLREG0_PREPEND 0x00F00000 /* bits 20-23 */ +#define IXGBE_HLREG0_PRIPAUSEEN 0x01000000 /* bit 24 */ +#define IXGBE_HLREG0_RXPAUSERECDA 0x06000000 /* bits 25-26 */ +#define IXGBE_HLREG0_RXLNGTHERREN 0x08000000 /* bit 27 */ +#define IXGBE_HLREG0_RXPADSTRIPEN 0x10000000 /* bit 28 */ + +/* VMD_CTL bitmasks */ +#define IXGBE_VMD_CTL_VMDQ_EN 0x00000001 +#define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002 + +/* VT_CTL bitmasks */ +#define IXGBE_VT_CTL_DIS_DEFPL 0x20000000 /* disable default pool */ +#define IXGBE_VT_CTL_REPLEN 0x40000000 /* replication enabled */ +#define IXGBE_VT_CTL_VT_ENABLE 0x00000001 /* Enable VT Mode */ +#define IXGBE_VT_CTL_POOL_SHIFT 7 +#define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT) + +/* VMOLR bitmasks */ +#define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */ +#define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */ +#define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */ +#define IXGBE_VMOLR_BAM 0x08000000 /* accept broadcast packets */ +#define IXGBE_VMOLR_MPE 0x10000000 /* multicast promiscuous */ + +/* VFRE bitmask */ +#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF + +#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ + +/* RDHMPN and TDHMPN bitmasks */ +#define IXGBE_RDHMPN_RDICADDR 0x007FF800 +#define IXGBE_RDHMPN_RDICRDREQ 0x00800000 +#define IXGBE_RDHMPN_RDICADDR_SHIFT 11 +#define IXGBE_TDHMPN_TDICADDR 0x003FF800 +#define IXGBE_TDHMPN_TDICRDREQ 0x00800000 +#define IXGBE_TDHMPN_TDICADDR_SHIFT 11 + +#define IXGBE_RDMAM_MEM_SEL_SHIFT 13 +#define IXGBE_RDMAM_DWORD_SHIFT 9 +#define IXGBE_RDMAM_DESC_COMP_FIFO 1 +#define IXGBE_RDMAM_DFC_CMD_FIFO 2 +#define IXGBE_RDMAM_TCN_STATUS_RAM 4 +#define IXGBE_RDMAM_WB_COLL_FIFO 5 +#define IXGBE_RDMAM_QSC_CNT_RAM 6 +#define IXGBE_RDMAM_QSC_QUEUE_CNT 8 +#define IXGBE_RDMAM_QSC_QUEUE_RAM 0xA +#define IXGBE_RDMAM_DESC_COM_FIFO_RANGE 135 +#define IXGBE_RDMAM_DESC_COM_FIFO_COUNT 4 +#define IXGBE_RDMAM_DFC_CMD_FIFO_RANGE 48 +#define IXGBE_RDMAM_DFC_CMD_FIFO_COUNT 7 +#define IXGBE_RDMAM_TCN_STATUS_RAM_RANGE 256 +#define IXGBE_RDMAM_TCN_STATUS_RAM_COUNT 9 +#define IXGBE_RDMAM_WB_COLL_FIFO_RANGE 8 +#define IXGBE_RDMAM_WB_COLL_FIFO_COUNT 4 +#define IXGBE_RDMAM_QSC_CNT_RAM_RANGE 64 +#define IXGBE_RDMAM_QSC_CNT_RAM_COUNT 4 +#define IXGBE_RDMAM_QSC_QUEUE_CNT_RANGE 32 +#define IXGBE_RDMAM_QSC_QUEUE_CNT_COUNT 4 +#define IXGBE_RDMAM_QSC_QUEUE_RAM_RANGE 128 +#define IXGBE_RDMAM_QSC_QUEUE_RAM_COUNT 8 + +#define IXGBE_TXDESCIC_READY 0x80000000 + +/* Receive Checksum Control */ +#define IXGBE_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define IXGBE_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* FCRTL Bit Masks */ +#define IXGBE_FCRTL_XONE 0x80000000 /* XON enable */ +#define IXGBE_FCRTH_FCEN 0x80000000 /* Packet buffer fc enable */ + +/* PAP bit masks*/ +#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */ + +/* RMCS Bit Masks */ +#define IXGBE_RMCS_RRM 0x00000002 /* Receive Recycle Mode enable */ +/* Receive Arbitration Control: 0 Round Robin, 1 DFP */ +#define IXGBE_RMCS_RAC 0x00000004 +#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */ +#define IXGBE_RMCS_TFCE_802_3X 0x00000008 /* Tx Priority FC ena */ +#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority FC ena */ +#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */ + +/* FCCFG Bit Masks */ +#define IXGBE_FCCFG_TFCE_802_3X 0x00000008 /* Tx link FC enable */ +#define IXGBE_FCCFG_TFCE_PRIORITY 0x00000010 /* Tx priority FC enable */ + +/* Interrupt register bitmasks */ + +/* Extended Interrupt Cause Read */ +#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */ +#define IXGBE_EICR_FLOW_DIR 0x00010000 /* FDir Exception */ +#define IXGBE_EICR_RX_MISS 0x00020000 /* Packet Buffer Overrun */ +#define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */ +#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */ +#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */ +#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */ +#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */ +#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ +#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */ +#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */ +#define IXGBE_EICR_ECC 0x10000000 /* ECC Error */ +#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */ +#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */ +#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ +#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ + +/* Extended Interrupt Cause Set */ +#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EICS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ +#define IXGBE_EICS_RX_MISS IXGBE_EICR_RX_MISS /* Pkt Buffer Overrun */ +#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */ +#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ +#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ +#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +#define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */ +#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */ +#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + +/* Extended Interrupt Mask Set */ +#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EIMS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ +#define IXGBE_EIMS_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ +#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */ +#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ +#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ +#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +#define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */ +#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */ +#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + +/* Extended Interrupt Mask Clear */ +#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EIMC_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ +#define IXGBE_EIMC_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ +#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */ +#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ +#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ +#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +#define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */ +#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */ +#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + +#define IXGBE_EIMS_ENABLE_MASK ( \ + IXGBE_EIMS_RTX_QUEUE | \ + IXGBE_EIMS_LSC | \ + IXGBE_EIMS_TCP_TIMER | \ + IXGBE_EIMS_OTHER) + +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ +#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ +#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ +#define IXGBE_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ +#define IXGBE_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ +#define IXGBE_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ +#define IXGBE_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ +#define IXGBE_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ +#define IXGBE_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ +#define IXGBE_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of control bits */ +#define IXGBE_IMIR_SIZE_BP_82599 0x00001000 /* Packet size bypass */ +#define IXGBE_IMIR_CTRL_URG_82599 0x00002000 /* Check URG bit in header */ +#define IXGBE_IMIR_CTRL_ACK_82599 0x00004000 /* Check ACK bit in header */ +#define IXGBE_IMIR_CTRL_PSH_82599 0x00008000 /* Check PSH bit in header */ +#define IXGBE_IMIR_CTRL_RST_82599 0x00010000 /* Check RST bit in header */ +#define IXGBE_IMIR_CTRL_SYN_82599 0x00020000 /* Check SYN bit in header */ +#define IXGBE_IMIR_CTRL_FIN_82599 0x00040000 /* Check FIN bit in header */ +#define IXGBE_IMIR_CTRL_BP_82599 0x00080000 /* Bypass check of control bits */ +#define IXGBE_IMIR_LLI_EN_82599 0x00100000 /* Enables low latency Int */ +#define IXGBE_IMIR_RX_QUEUE_MASK_82599 0x0000007F /* Rx Queue Mask */ +#define IXGBE_IMIR_RX_QUEUE_SHIFT_82599 21 /* Rx Queue Shift */ +#define IXGBE_IMIRVP_PRIORITY_MASK 0x00000007 /* VLAN priority mask */ +#define IXGBE_IMIRVP_PRIORITY_EN 0x00000008 /* VLAN priority enable */ + +#define IXGBE_MAX_FTQF_FILTERS 128 +#define IXGBE_FTQF_PROTOCOL_MASK 0x00000003 +#define IXGBE_FTQF_PROTOCOL_TCP 0x00000000 +#define IXGBE_FTQF_PROTOCOL_UDP 0x00000001 +#define IXGBE_FTQF_PROTOCOL_SCTP 2 +#define IXGBE_FTQF_PRIORITY_MASK 0x00000007 +#define IXGBE_FTQF_PRIORITY_SHIFT 2 +#define IXGBE_FTQF_POOL_MASK 0x0000003F +#define IXGBE_FTQF_POOL_SHIFT 8 +#define IXGBE_FTQF_5TUPLE_MASK_MASK 0x0000001F +#define IXGBE_FTQF_5TUPLE_MASK_SHIFT 25 +#define IXGBE_FTQF_SOURCE_ADDR_MASK 0x1E +#define IXGBE_FTQF_DEST_ADDR_MASK 0x1D +#define IXGBE_FTQF_SOURCE_PORT_MASK 0x1B +#define IXGBE_FTQF_DEST_PORT_MASK 0x17 +#define IXGBE_FTQF_PROTOCOL_COMP_MASK 0x0F +#define IXGBE_FTQF_POOL_MASK_EN 0x40000000 +#define IXGBE_FTQF_QUEUE_ENABLE 0x80000000 + +/* Interrupt clear mask */ +#define IXGBE_IRQ_CLEAR_MASK 0xFFFFFFFF + +/* Interrupt Vector Allocation Registers */ +#define IXGBE_IVAR_REG_NUM 25 +#define IXGBE_IVAR_REG_NUM_82599 64 +#define IXGBE_IVAR_TXRX_ENTRY 96 +#define IXGBE_IVAR_RX_ENTRY 64 +#define IXGBE_IVAR_RX_QUEUE(_i) (0 + (_i)) +#define IXGBE_IVAR_TX_QUEUE(_i) (64 + (_i)) +#define IXGBE_IVAR_TX_ENTRY 32 + +#define IXGBE_IVAR_TCP_TIMER_INDEX 96 /* 0 based index */ +#define IXGBE_IVAR_OTHER_CAUSES_INDEX 97 /* 0 based index */ + +#define IXGBE_MSIX_VECTOR(_i) (0 + (_i)) + +#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ + +/* ETYPE Queue Filter/Select Bit Masks */ +#define IXGBE_MAX_ETQF_FILTERS 8 +#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */ +#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */ +#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */ +#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */ +#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */ + +#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */ +#define IXGBE_ETQS_RX_QUEUE_SHIFT 16 +#define IXGBE_ETQS_LLI 0x20000000 /* bit 29 */ +#define IXGBE_ETQS_QUEUE_EN 0x80000000 /* bit 31 */ + +/* + * ETQF filter list: one static filter per filter consumer. This is + * to avoid filter collisions later. Add new filters + * here!! + * + * Current filters: + * EAPOL 802.1x (0x888e): Filter 0 + * FCoE (0x8906): Filter 2 + * 1588 (0x88f7): Filter 3 + * FIP (0x8914): Filter 4 + */ +#define IXGBE_ETQF_FILTER_EAPOL 0 +#define IXGBE_ETQF_FILTER_FCOE 2 +#define IXGBE_ETQF_FILTER_1588 3 +#define IXGBE_ETQF_FILTER_FIP 4 +/* VLAN Control Bit Masks */ +#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ +#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */ +#define IXGBE_VLNCTRL_CFIEN 0x20000000 /* bit 29 */ +#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */ +#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */ + +/* VLAN pool filtering masks */ +#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */ +#define IXGBE_VLVF_ENTRIES 64 +#define IXGBE_VLVF_VLANID_MASK 0x00000FFF + +/* Per VF Port VLAN insertion rules */ +#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ +#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ + +#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ + +/* STATUS Bit Masks */ +#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */ +#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/ +#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Enable Status */ + +#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */ +#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */ + +/* ESDP Bit Masks */ +#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */ +#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */ +#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */ +#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */ +#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */ +#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */ +#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */ +#define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */ +#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */ + +/* LEDCTL Bit Masks */ +#define IXGBE_LED_IVRT_BASE 0x00000040 +#define IXGBE_LED_BLINK_BASE 0x00000080 +#define IXGBE_LED_MODE_MASK_BASE 0x0000000F +#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i))) +#define IXGBE_LED_MODE_SHIFT(_i) (8*(_i)) +#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i) +#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i) +#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i) + +/* LED modes */ +#define IXGBE_LED_LINK_UP 0x0 +#define IXGBE_LED_LINK_10G 0x1 +#define IXGBE_LED_MAC 0x2 +#define IXGBE_LED_FILTER 0x3 +#define IXGBE_LED_LINK_ACTIVE 0x4 +#define IXGBE_LED_LINK_1G 0x5 +#define IXGBE_LED_ON 0xE +#define IXGBE_LED_OFF 0xF + +/* AUTOC Bit Masks */ +#define IXGBE_AUTOC_KX4_KX_SUPP_MASK 0xC0000000 +#define IXGBE_AUTOC_KX4_SUPP 0x80000000 +#define IXGBE_AUTOC_KX_SUPP 0x40000000 +#define IXGBE_AUTOC_PAUSE 0x30000000 +#define IXGBE_AUTOC_ASM_PAUSE 0x20000000 +#define IXGBE_AUTOC_SYM_PAUSE 0x10000000 +#define IXGBE_AUTOC_RF 0x08000000 +#define IXGBE_AUTOC_PD_TMR 0x06000000 +#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000 +#define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000 +#define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000 +#define IXGBE_AUTOC_FECA 0x00040000 +#define IXGBE_AUTOC_FECR 0x00020000 +#define IXGBE_AUTOC_KR_SUPP 0x00010000 +#define IXGBE_AUTOC_AN_RESTART 0x00001000 +#define IXGBE_AUTOC_FLU 0x00000001 +#define IXGBE_AUTOC_LMS_SHIFT 13 +#define IXGBE_AUTOC_LMS_10G_SERIAL (0x3 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_KX_KR (0x4 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_SGMII_1G_100M (0x5 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII (0x7 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) + +#define IXGBE_AUTOC_1G_PMA_PMD_MASK 0x00000200 +#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9 +#define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180 +#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7 +#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_SFI (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_KX_BX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) + +#define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000 +#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000 +#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16 +#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) + +#define IXGBE_MACC_FLU 0x00000001 +#define IXGBE_MACC_FSV_10G 0x00030000 +#define IXGBE_MACC_FS 0x00040000 +#define IXGBE_MAC_RX2TX_LPBK 0x00000002 + +/* LINKS Bit Masks */ +#define IXGBE_LINKS_KX_AN_COMP 0x80000000 +#define IXGBE_LINKS_UP 0x40000000 +#define IXGBE_LINKS_SPEED 0x20000000 +#define IXGBE_LINKS_MODE 0x18000000 +#define IXGBE_LINKS_RX_MODE 0x06000000 +#define IXGBE_LINKS_TX_MODE 0x01800000 +#define IXGBE_LINKS_XGXS_EN 0x00400000 +#define IXGBE_LINKS_SGMII_EN 0x02000000 +#define IXGBE_LINKS_PCS_1G_EN 0x00200000 +#define IXGBE_LINKS_1G_AN_EN 0x00100000 +#define IXGBE_LINKS_KX_AN_IDLE 0x00080000 +#define IXGBE_LINKS_1G_SYNC 0x00040000 +#define IXGBE_LINKS_10G_ALIGN 0x00020000 +#define IXGBE_LINKS_10G_LANE_SYNC 0x00017000 +#define IXGBE_LINKS_TL_FAULT 0x00001000 +#define IXGBE_LINKS_SIGNAL 0x00000F00 + +#define IXGBE_LINKS_SPEED_82599 0x30000000 +#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 +#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 +#define IXGBE_LINKS_SPEED_100_82599 0x10000000 +#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ +#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ + +#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040 + +/* PCS1GLSTA Bit Masks */ +#define IXGBE_PCS1GLSTA_LINK_OK 1 +#define IXGBE_PCS1GLSTA_SYNK_OK 0x10 +#define IXGBE_PCS1GLSTA_AN_COMPLETE 0x10000 +#define IXGBE_PCS1GLSTA_AN_PAGE_RX 0x20000 +#define IXGBE_PCS1GLSTA_AN_TIMED_OUT 0x40000 +#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000 +#define IXGBE_PCS1GLSTA_AN_ERROR_RWS 0x100000 + +#define IXGBE_PCS1GANA_SYM_PAUSE 0x80 +#define IXGBE_PCS1GANA_ASM_PAUSE 0x100 + +/* PCS1GLCTL Bit Masks */ +#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN 0x00040000 /* PCS 1G autoneg to en */ +#define IXGBE_PCS1GLCTL_FLV_LINK_UP 1 +#define IXGBE_PCS1GLCTL_FORCE_LINK 0x20 +#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH 0x40 +#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000 +#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000 + +/* ANLP1 Bit Masks */ +#define IXGBE_ANLP1_PAUSE 0x0C00 +#define IXGBE_ANLP1_SYM_PAUSE 0x0400 +#define IXGBE_ANLP1_ASM_PAUSE 0x0800 +#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000 + +/* SW Semaphore Register bitmasks */ +#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ +#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */ + +/* SW_FW_SYNC/GSSR definitions */ +#define IXGBE_GSSR_EEP_SM 0x0001 +#define IXGBE_GSSR_PHY0_SM 0x0002 +#define IXGBE_GSSR_PHY1_SM 0x0004 +#define IXGBE_GSSR_MAC_CSR_SM 0x0008 +#define IXGBE_GSSR_FLASH_SM 0x0010 +#define IXGBE_GSSR_SW_MNG_SM 0x0400 + +/* FW Status register bitmask */ +#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */ + +/* EEC Register */ +#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */ +#define IXGBE_EEC_CS 0x00000002 /* EEPROM Chip Select */ +#define IXGBE_EEC_DI 0x00000004 /* EEPROM Data In */ +#define IXGBE_EEC_DO 0x00000008 /* EEPROM Data Out */ +#define IXGBE_EEC_FWE_MASK 0x00000030 /* FLASH Write Enable */ +#define IXGBE_EEC_FWE_DIS 0x00000010 /* Disable FLASH writes */ +#define IXGBE_EEC_FWE_EN 0x00000020 /* Enable FLASH writes */ +#define IXGBE_EEC_FWE_SHIFT 4 +#define IXGBE_EEC_REQ 0x00000040 /* EEPROM Access Request */ +#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */ +#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */ +#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */ +#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */ +#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */ +#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */ +/* EEPROM Addressing bits based on type (0-small, 1-large) */ +#define IXGBE_EEC_ADDR_SIZE 0x00000400 +#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */ +#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD alows 14 bits for addr. */ + +#define IXGBE_EEC_SIZE_SHIFT 11 +#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6 +#define IXGBE_EEPROM_OPCODE_BITS 8 + +/* Part Number String Length */ +#define IXGBE_PBANUM_LENGTH 11 + +/* Checksum and EEPROM pointers */ +#define IXGBE_PBANUM_PTR_GUARD 0xFAFA +#define IXGBE_EEPROM_CHECKSUM 0x3F +#define IXGBE_EEPROM_SUM 0xBABA +#define IXGBE_PCIE_ANALOG_PTR 0x03 +#define IXGBE_ATLAS0_CONFIG_PTR 0x04 +#define IXGBE_PHY_PTR 0x04 +#define IXGBE_ATLAS1_CONFIG_PTR 0x05 +#define IXGBE_OPTION_ROM_PTR 0x05 +#define IXGBE_PCIE_GENERAL_PTR 0x06 +#define IXGBE_PCIE_CONFIG0_PTR 0x07 +#define IXGBE_PCIE_CONFIG1_PTR 0x08 +#define IXGBE_CORE0_PTR 0x09 +#define IXGBE_CORE1_PTR 0x0A +#define IXGBE_MAC0_PTR 0x0B +#define IXGBE_MAC1_PTR 0x0C +#define IXGBE_CSR0_CONFIG_PTR 0x0D +#define IXGBE_CSR1_CONFIG_PTR 0x0E +#define IXGBE_FW_PTR 0x0F +#define IXGBE_PBANUM0_PTR 0x15 +#define IXGBE_PBANUM1_PTR 0x16 +#define IXGBE_FREE_SPACE_PTR 0X3E +#define IXGBE_SAN_MAC_ADDR_PTR 0x28 +#define IXGBE_DEVICE_CAPS 0x2C +#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11 +#define IXGBE_PCIE_MSIX_82599_CAPS 0x72 +#define IXGBE_PCIE_MSIX_82598_CAPS 0x62 + +/* MSI-X capability fields masks */ +#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF + +/* Legacy EEPROM word offsets */ +#define IXGBE_ISCSI_BOOT_CAPS 0x0033 +#define IXGBE_ISCSI_SETUP_PORT_0 0x0030 +#define IXGBE_ISCSI_SETUP_PORT_1 0x0034 + +/* EEPROM Commands - SPI */ +#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */ +#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01 +#define IXGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ +#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ +/* EEPROM reset Write Enable latch */ +#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04 +#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ +#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ +#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ + +/* EEPROM Read Register */ +#define IXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */ +#define IXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */ +#define IXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */ +#define IXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for read complete */ + +#define IXGBE_ETH_LENGTH_OF_ADDRESS 6 + +#define IXGBE_EEPROM_PAGE_SIZE_MAX 128 +#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */ +#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */ + +#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS +#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ +#endif + +#ifndef IXGBE_EERD_EEWR_ATTEMPTS +/* Number of 5 microseconds we wait for EERD read and + * EERW write to complete */ +#define IXGBE_EERD_EEWR_ATTEMPTS 100000 +#endif + +#ifndef IXGBE_FLUDONE_ATTEMPTS +/* # attempts we wait for flush update to complete */ +#define IXGBE_FLUDONE_ATTEMPTS 20000 +#endif + +#define IXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */ +#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */ +#define IXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */ +#define IXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */ + +#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0 +#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 +#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 +#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 +#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2 +#define IXGBE_FW_LESM_STATE_1 0x1 +#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */ +#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 +#define IXGBE_FW_PATCH_VERSION_4 0x7 +#define IXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */ +#define IXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */ +#define IXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */ +#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */ +#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */ +#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */ +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt. SAN MAC capability */ +#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt. SAN MAC 1 offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt. WWNN prefix offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt. WWPN prefix offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt. SAN MAC exists */ +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */ + +/* PCI Bus Info */ +#define IXGBE_PCI_DEVICE_STATUS 0xAA +#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020 +#define IXGBE_PCI_LINK_STATUS 0xB2 +#define IXGBE_PCI_DEVICE_CONTROL2 0xC8 +#define IXGBE_PCI_LINK_WIDTH 0x3F0 +#define IXGBE_PCI_LINK_WIDTH_1 0x10 +#define IXGBE_PCI_LINK_WIDTH_2 0x20 +#define IXGBE_PCI_LINK_WIDTH_4 0x40 +#define IXGBE_PCI_LINK_WIDTH_8 0x80 +#define IXGBE_PCI_LINK_SPEED 0xF +#define IXGBE_PCI_LINK_SPEED_2500 0x1 +#define IXGBE_PCI_LINK_SPEED_5000 0x2 +#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E +#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005 + +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 + +/* Check whether address is multicast. This is little-endian specific check.*/ +#define IXGBE_IS_MULTICAST(Address) \ + (bool)(((u8 *)(Address))[0] & ((u8)0x01)) + +/* Check whether an address is broadcast. */ +#define IXGBE_IS_BROADCAST(Address) \ + ((((u8 *)(Address))[0] == ((u8)0xff)) && \ + (((u8 *)(Address))[1] == ((u8)0xff))) + +/* RAH */ +#define IXGBE_RAH_VIND_MASK 0x003C0000 +#define IXGBE_RAH_VIND_SHIFT 18 +#define IXGBE_RAH_AV 0x80000000 +#define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF + +/* Header split receive */ +#define IXGBE_RFCTL_ISCSI_DIS 0x00000001 +#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E +#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1 +#define IXGBE_RFCTL_NFSW_DIS 0x00000040 +#define IXGBE_RFCTL_NFSR_DIS 0x00000080 +#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300 +#define IXGBE_RFCTL_NFS_VER_SHIFT 8 +#define IXGBE_RFCTL_NFS_VER_2 0 +#define IXGBE_RFCTL_NFS_VER_3 1 +#define IXGBE_RFCTL_NFS_VER_4 2 +#define IXGBE_RFCTL_IPV6_DIS 0x00000400 +#define IXGBE_RFCTL_IPV6_XSUM_DIS 0x00000800 +#define IXGBE_RFCTL_IPFRSP_DIS 0x00004000 +#define IXGBE_RFCTL_IPV6_EX_DIS 0x00010000 +#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Transmit Config masks */ +#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ +#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. write-back flushing */ +#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ +/* Enable short packet padding to 64 bytes */ +#define IXGBE_TX_PAD_ENABLE 0x00000400 +#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */ +/* This allows for 16K packets + 4k for vlan */ +#define IXGBE_MAX_FRAME_SZ 0x40040000 + +#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */ +#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */ + +/* Receive Config masks */ +#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ +#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ +#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ +#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */ +#define IXGBE_RXDCTL_RLPML_EN 0x00008000 +#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ + +#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ +#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/ +#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */ +#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */ +#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */ +#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */ +/* Receive Priority Flow Control Enable */ +#define IXGBE_FCTRL_RPFCE 0x00004000 +#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */ +#define IXGBE_MFLCN_PMCF 0x00000001 /* Pass MAC Control Frames */ +#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */ +#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */ +#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */ + +#define IXGBE_MFLCN_RPFCE_SHIFT 4 + +/* Multiple Receive Queue Control */ +#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */ +#define IXGBE_MRQC_MRQE_MASK 0xF /* Bits 3:0 */ +#define IXGBE_MRQC_RT8TCEN 0x00000002 /* 8 TC no RSS */ +#define IXGBE_MRQC_RT4TCEN 0x00000003 /* 4 TC no RSS */ +#define IXGBE_MRQC_RTRSS8TCEN 0x00000004 /* 8 TC w/ RSS */ +#define IXGBE_MRQC_RTRSS4TCEN 0x00000005 /* 4 TC w/ RSS */ +#define IXGBE_MRQC_VMDQEN 0x00000008 /* VMDq2 64 pools no RSS */ +#define IXGBE_MRQC_VMDQRSS32EN 0x0000000A /* VMDq2 32 pools w/ RSS */ +#define IXGBE_MRQC_VMDQRSS64EN 0x0000000B /* VMDq2 64 pools w/ RSS */ +#define IXGBE_MRQC_VMDQRT8TCEN 0x0000000C /* VMDq2/RT 16 pool 8 TC */ +#define IXGBE_MRQC_VMDQRT4TCEN 0x0000000D /* VMDq2/RT 32 pool 4 TC */ +#define IXGBE_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define IXGBE_MRQC_RSS_FIELD_IPV4 0x00020000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define IXGBE_MRQC_RSS_FIELD_IPV6 0x00100000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 +#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000 +#define IXGBE_MRQC_L3L4TXSWEN 0x00008000 + +/* Queue Drop Enable */ +#define IXGBE_QDE_ENABLE 0x00000001 +#define IXGBE_QDE_IDX_MASK 0x00007F00 +#define IXGBE_QDE_IDX_SHIFT 8 + +#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ +#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ + +#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000 +#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 +#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 +#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED 0x18000000 +#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 +/* Multiple Transmit Queue Command Register */ +#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */ +#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */ +#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */ +#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */ +#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */ +#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */ +#define IXGBE_MTQC_4TC_4TQ 0x8 /* 4 TC if RT_ENA or 4 TQ if VT_ENA */ + +/* Receive Descriptor bit definitions */ +#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ +#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */ +#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */ +#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004 +#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ +#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ +#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ +#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ +#define IXGBE_RXD_STAT_LLINT 0x800 /* Pkt caused Low Latency Interrupt */ +#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ +#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ +#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ +#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */ +#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */ +#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */ +#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */ +#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ +#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ +#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ +#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */ +#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ +#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCoEFe/IPE */ +#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */ +#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */ +#define IXGBE_RXDADV_ERR_FDIR_DROP 0x00200000 /* FDIR Drop error */ +#define IXGBE_RXDADV_ERR_FDIR_COLL 0x00400000 /* FDIR Collision error */ +#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ +#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ +#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ +#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ +#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ +#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ +#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ +#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ +#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define IXGBE_RXD_PRI_SHIFT 13 +#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define IXGBE_RXD_CFI_SHIFT 12 + +#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */ +#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */ +#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */ +#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */ +#define IXGBE_RXDADV_STAT_MASK 0x000fffff /* Stat/NEXTP: bit 0-19 */ +#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */ +#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ +#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ +#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ + +/* PSRTYPE bit definitions */ +#define IXGBE_PSRTYPE_TCPHDR 0x00000010 +#define IXGBE_PSRTYPE_UDPHDR 0x00000020 +#define IXGBE_PSRTYPE_IPV4HDR 0x00000100 +#define IXGBE_PSRTYPE_IPV6HDR 0x00000200 +#define IXGBE_PSRTYPE_L2HDR 0x00001000 + +/* SRRCTL bit definitions */ +#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ +#define IXGBE_SRRCTL_RDMTS_SHIFT 22 +#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000 +#define IXGBE_SRRCTL_DROP_EN 0x10000000 +#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 +#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 + +#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000 +#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF + +#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F +#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 +#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 +#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 +#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 +#define IXGBE_RXDADV_RSCCNT_SHIFT 17 +#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 +#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 +#define IXGBE_RXDADV_SPH 0x8000 + +/* RSS Hash results */ +#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000 +#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 +#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 +#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004 +#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 +#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 + +/* RSS Packet Types as indicated in the receive descriptor. */ +#define IXGBE_RXDADV_PKTTYPE_NONE 0x00000000 +#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPv4 hdr + extensions */ +#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPv6 hdr + extensions */ +#define IXGBE_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ +#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ +#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ +#define IXGBE_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */ +#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ +#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ + +/* Security Processing bit Indication */ +#define IXGBE_RXDADV_LNKSEC_STATUS_SECP 0x00020000 +#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000 +#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000 +#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000 +#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000 + +/* Masks to determine if packets should be dropped due to frame errors */ +#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ + IXGBE_RXD_ERR_CE | \ + IXGBE_RXD_ERR_LE | \ + IXGBE_RXD_ERR_PE | \ + IXGBE_RXD_ERR_OSE | \ + IXGBE_RXD_ERR_USE) + +#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \ + IXGBE_RXDADV_ERR_CE | \ + IXGBE_RXDADV_ERR_LE | \ + IXGBE_RXDADV_ERR_PE | \ + IXGBE_RXDADV_ERR_OSE | \ + IXGBE_RXDADV_ERR_USE) + +/* Multicast bit mask */ +#define IXGBE_MCSTCTRL_MFE 0x4 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024 + +/* Vlan-specific macros */ +#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */ +#define IXGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */ +#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ +#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT + +/* SR-IOV specific macros */ +#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4) +#define IXGBE_MBVFICR(_i) (0x00710 + (_i * 4)) +#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600)) +#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4)) + +enum ixgbe_fdir_pballoc_type { + IXGBE_FDIR_PBALLOC_NONE = 0, + IXGBE_FDIR_PBALLOC_64K = 1, + IXGBE_FDIR_PBALLOC_128K = 2, + IXGBE_FDIR_PBALLOC_256K = 3, +}; +#define IXGBE_FDIR_PBALLOC_SIZE_SHIFT 16 + +/* Flow Director register values */ +#define IXGBE_FDIRCTRL_PBALLOC_64K 0x00000001 +#define IXGBE_FDIRCTRL_PBALLOC_128K 0x00000002 +#define IXGBE_FDIRCTRL_PBALLOC_256K 0x00000003 +#define IXGBE_FDIRCTRL_INIT_DONE 0x00000008 +#define IXGBE_FDIRCTRL_PERFECT_MATCH 0x00000010 +#define IXGBE_FDIRCTRL_REPORT_STATUS 0x00000020 +#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080 +#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8 +#define IXGBE_FDIRCTRL_FLEX_SHIFT 16 +#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000 +#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24 +#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000 +#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT 28 + +#define IXGBE_FDIRTCPM_DPORTM_SHIFT 16 +#define IXGBE_FDIRUDPM_DPORTM_SHIFT 16 +#define IXGBE_FDIRIP6M_DIPM_SHIFT 16 +#define IXGBE_FDIRM_VLANID 0x00000001 +#define IXGBE_FDIRM_VLANP 0x00000002 +#define IXGBE_FDIRM_POOL 0x00000004 +#define IXGBE_FDIRM_L4P 0x00000008 +#define IXGBE_FDIRM_FLEX 0x00000010 +#define IXGBE_FDIRM_DIPv6 0x00000020 + +#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF +#define IXGBE_FDIRFREE_FREE_SHIFT 0 +#define IXGBE_FDIRFREE_COLL_MASK 0x7FFF0000 +#define IXGBE_FDIRFREE_COLL_SHIFT 16 +#define IXGBE_FDIRLEN_MAXLEN_MASK 0x3F +#define IXGBE_FDIRLEN_MAXLEN_SHIFT 0 +#define IXGBE_FDIRLEN_MAXHASH_MASK 0x7FFF0000 +#define IXGBE_FDIRLEN_MAXHASH_SHIFT 16 +#define IXGBE_FDIRUSTAT_ADD_MASK 0xFFFF +#define IXGBE_FDIRUSTAT_ADD_SHIFT 0 +#define IXGBE_FDIRUSTAT_REMOVE_MASK 0xFFFF0000 +#define IXGBE_FDIRUSTAT_REMOVE_SHIFT 16 +#define IXGBE_FDIRFSTAT_FADD_MASK 0x00FF +#define IXGBE_FDIRFSTAT_FADD_SHIFT 0 +#define IXGBE_FDIRFSTAT_FREMOVE_MASK 0xFF00 +#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT 8 +#define IXGBE_FDIRPORT_DESTINATION_SHIFT 16 +#define IXGBE_FDIRVLAN_FLEX_SHIFT 16 +#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT 15 +#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT 16 + +#define IXGBE_FDIRCMD_CMD_MASK 0x00000003 +#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001 +#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002 +#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003 +#define IXGBE_FDIRCMD_FILTER_VALID 0x00000004 +#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008 +#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010 +#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020 +#define IXGBE_FDIRCMD_L4TYPE_TCP 0x00000040 +#define IXGBE_FDIRCMD_L4TYPE_SCTP 0x00000060 +#define IXGBE_FDIRCMD_IPV6 0x00000080 +#define IXGBE_FDIRCMD_CLEARHT 0x00000100 +#define IXGBE_FDIRCMD_DROP 0x00000200 +#define IXGBE_FDIRCMD_INT 0x00000400 +#define IXGBE_FDIRCMD_LAST 0x00000800 +#define IXGBE_FDIRCMD_COLLISION 0x00001000 +#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000 +#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5 +#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16 +#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24 +#define IXGBE_FDIR_INIT_DONE_POLL 10 +#define IXGBE_FDIRCMD_CMD_POLL 10 + +#define IXGBE_FDIR_DROP_QUEUE 127 + +/* Manageablility Host Interface defines */ +#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ +#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ +#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */ + +/* CEM Support */ +#define FW_CEM_HDR_LEN 0x4 +#define FW_CEM_CMD_DRIVER_INFO 0xDD +#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5 +#define FW_CEM_CMD_RESERVED 0x0 +#define FW_CEM_UNUSED_VER 0x0 +#define FW_CEM_MAX_RETRIES 3 +#define FW_CEM_RESP_STATUS_SUCCESS 0x1 + +/* Host Interface Command Structures */ +struct ixgbe_hic_hdr { + u8 cmd; + u8 buf_len; + union { + u8 cmd_resv; + u8 ret_status; + } cmd_or_resp; + u8 checksum; +}; + +struct ixgbe_hic_drv_info { + struct ixgbe_hic_hdr hdr; + u8 port_num; + u8 ver_sub; + u8 ver_build; + u8 ver_min; + u8 ver_maj; + u8 pad; /* end spacing to ensure length is mult. of dword */ + u16 pad2; /* end spacing to ensure length is mult. of dword2 */ +}; + +/* Transmit Descriptor - Advanced */ +union ixgbe_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Receive Descriptor - Advanced */ +union ixgbe_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /* RSS, Pkt type */ + __le16 hdr_info; /* Splithdr, hdrlen */ + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +/* Context descriptors */ +struct ixgbe_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */ +#define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */ +#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */ +#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */ +#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ +#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */ +#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ +#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ +#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ +#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ +#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ +#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ +#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ +#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */ +#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */ +#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ +#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ +#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ + IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ + IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */ +#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ +#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ +#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /*Req requires Markers and CRC*/ +#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ +#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ +#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */ +#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */ +#define IXGBE_ADVTXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */ +#define IXGBE_ADVTXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */ +#define IXGBE_ADVTXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */ +#define IXGBE_ADVTXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation: End */ +#define IXGBE_ADVTXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation: Start */ +#define IXGBE_ADVTXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */ +#define IXGBE_ADVTXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */ +#define IXGBE_ADVTXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */ +#define IXGBE_ADVTXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */ +#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +/* Autonegotiation advertised speeds */ +typedef u32 ixgbe_autoneg_advertised; +/* Link speed */ +typedef u32 ixgbe_link_speed; +#define IXGBE_LINK_SPEED_UNKNOWN 0 +#define IXGBE_LINK_SPEED_100_FULL 0x0008 +#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 +#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 +#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \ + IXGBE_LINK_SPEED_10GB_FULL) +#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \ + IXGBE_LINK_SPEED_1GB_FULL | \ + IXGBE_LINK_SPEED_10GB_FULL) + + +/* Physical layer type */ +typedef u32 ixgbe_physical_layer; +#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0 +#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001 +#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002 +#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x0004 +#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008 +#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010 +#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020 +#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x0040 +#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x0080 +#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x0100 +#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200 +#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400 +#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800 +#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000 +#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000 + +/* Flow Control Macros */ +#define PAUSE_RTT 8 +#define PAUSE_MTU(MTU) ((MTU + 1024 - 1) / 1024) + +#define FC_HIGH_WATER(MTU) ((((PAUSE_RTT + PAUSE_MTU(MTU)) * 144) + 99) / 100 +\ + PAUSE_MTU(MTU)) +#define FC_LOW_WATER(MTU) (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT)) + +/* Software ATR hash keys */ +#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 +#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 + +/* Software ATR input stream values and masks */ +#define IXGBE_ATR_HASH_MASK 0x7fff +#define IXGBE_ATR_L4TYPE_MASK 0x3 +#define IXGBE_ATR_L4TYPE_UDP 0x1 +#define IXGBE_ATR_L4TYPE_TCP 0x2 +#define IXGBE_ATR_L4TYPE_SCTP 0x3 +#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4 +enum ixgbe_atr_flow_type { + IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0, + IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1, + IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2, + IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3, + IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4, + IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5, + IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6, + IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7, +}; + +/* Flow Director ATR input struct. */ +union ixgbe_atr_input { + /* + * Byte layout in order, all values with MSB first: + * + * vm_pool - 1 byte + * flow_type - 1 byte + * vlan_id - 2 bytes + * src_ip - 16 bytes + * dst_ip - 16 bytes + * src_port - 2 bytes + * dst_port - 2 bytes + * flex_bytes - 2 bytes + * bkt_hash - 2 bytes + */ + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + __be32 dst_ip[4]; + __be32 src_ip[4]; + __be16 src_port; + __be16 dst_port; + __be16 flex_bytes; + __be16 bkt_hash; + } formatted; + __be32 dword_stream[11]; +}; + +/* Flow Director compressed ATR hash input struct */ +union ixgbe_atr_hash_dword { + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + } formatted; + __be32 ip; + struct { + __be16 src; + __be16 dst; + } port; + __be16 flex_bytes; + __be32 dword; +}; + +enum ixgbe_eeprom_type { + ixgbe_eeprom_uninitialized = 0, + ixgbe_eeprom_spi, + ixgbe_flash, + ixgbe_eeprom_none /* No NVM support */ +}; + +enum ixgbe_mac_type { + ixgbe_mac_unknown = 0, + ixgbe_mac_82598EB, + ixgbe_mac_82599EB, + ixgbe_mac_X540, + ixgbe_num_macs +}; + +enum ixgbe_phy_type { + ixgbe_phy_unknown = 0, + ixgbe_phy_none, + ixgbe_phy_tn, + ixgbe_phy_aq, + ixgbe_phy_cu_unknown, + ixgbe_phy_qt, + ixgbe_phy_xaui, + ixgbe_phy_nl, + ixgbe_phy_sfp_passive_tyco, + ixgbe_phy_sfp_passive_unknown, + ixgbe_phy_sfp_active_unknown, + ixgbe_phy_sfp_avago, + ixgbe_phy_sfp_ftl, + ixgbe_phy_sfp_ftl_active, + ixgbe_phy_sfp_unknown, + ixgbe_phy_sfp_intel, + ixgbe_phy_sfp_unsupported, + ixgbe_phy_generic +}; + +/* + * SFP+ module type IDs: + * + * ID Module Type + * ============= + * 0 SFP_DA_CU + * 1 SFP_SR + * 2 SFP_LR + * 3 SFP_DA_CU_CORE0 - 82599-specific + * 4 SFP_DA_CU_CORE1 - 82599-specific + * 5 SFP_SR/LR_CORE0 - 82599-specific + * 6 SFP_SR/LR_CORE1 - 82599-specific + */ +enum ixgbe_sfp_type { + ixgbe_sfp_type_da_cu = 0, + ixgbe_sfp_type_sr = 1, + ixgbe_sfp_type_lr = 2, + ixgbe_sfp_type_da_cu_core0 = 3, + ixgbe_sfp_type_da_cu_core1 = 4, + ixgbe_sfp_type_srlr_core0 = 5, + ixgbe_sfp_type_srlr_core1 = 6, + ixgbe_sfp_type_da_act_lmt_core0 = 7, + ixgbe_sfp_type_da_act_lmt_core1 = 8, + ixgbe_sfp_type_1g_cu_core0 = 9, + ixgbe_sfp_type_1g_cu_core1 = 10, + ixgbe_sfp_type_not_present = 0xFFFE, + ixgbe_sfp_type_unknown = 0xFFFF +}; + +enum ixgbe_media_type { + ixgbe_media_type_unknown = 0, + ixgbe_media_type_fiber, + ixgbe_media_type_fiber_lco, + ixgbe_media_type_copper, + ixgbe_media_type_backplane, + ixgbe_media_type_cx4, + ixgbe_media_type_virtual +}; + +/* Flow Control Settings */ +enum ixgbe_fc_mode { + ixgbe_fc_none = 0, + ixgbe_fc_rx_pause, + ixgbe_fc_tx_pause, + ixgbe_fc_full, +#ifdef CONFIG_DCB + ixgbe_fc_pfc, +#endif + ixgbe_fc_default +}; + +/* Smart Speed Settings */ +#define IXGBE_SMARTSPEED_MAX_RETRIES 3 +enum ixgbe_smart_speed { + ixgbe_smart_speed_auto = 0, + ixgbe_smart_speed_on, + ixgbe_smart_speed_off +}; + +/* PCI bus types */ +enum ixgbe_bus_type { + ixgbe_bus_type_unknown = 0, + ixgbe_bus_type_pci, + ixgbe_bus_type_pcix, + ixgbe_bus_type_pci_express, + ixgbe_bus_type_reserved +}; + +/* PCI bus speeds */ +enum ixgbe_bus_speed { + ixgbe_bus_speed_unknown = 0, + ixgbe_bus_speed_33 = 33, + ixgbe_bus_speed_66 = 66, + ixgbe_bus_speed_100 = 100, + ixgbe_bus_speed_120 = 120, + ixgbe_bus_speed_133 = 133, + ixgbe_bus_speed_2500 = 2500, + ixgbe_bus_speed_5000 = 5000, + ixgbe_bus_speed_reserved +}; + +/* PCI bus widths */ +enum ixgbe_bus_width { + ixgbe_bus_width_unknown = 0, + ixgbe_bus_width_pcie_x1 = 1, + ixgbe_bus_width_pcie_x2 = 2, + ixgbe_bus_width_pcie_x4 = 4, + ixgbe_bus_width_pcie_x8 = 8, + ixgbe_bus_width_32 = 32, + ixgbe_bus_width_64 = 64, + ixgbe_bus_width_reserved +}; + +struct ixgbe_addr_filter_info { + u32 num_mc_addrs; + u32 rar_used_count; + u32 mta_in_use; + u32 overflow_promisc; + bool uc_set_promisc; + bool user_set_promisc; +}; + +/* Bus parameters */ +struct ixgbe_bus_info { + enum ixgbe_bus_speed speed; + enum ixgbe_bus_width width; + enum ixgbe_bus_type type; + + u16 func; + u16 lan_id; +}; + +/* Flow control parameters */ +struct ixgbe_fc_info { + u32 high_water; /* Flow Control High-water */ + u32 low_water; /* Flow Control Low-water */ + u16 pause_time; /* Flow Control Pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + bool disable_fc_autoneg; /* Do not autonegotiate FC */ + bool fc_was_autonegged; /* Is current_mode the result of autonegging? */ + enum ixgbe_fc_mode current_mode; /* FC mode in effect */ + enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +/* Statistics counters collected by the MAC */ +struct ixgbe_hw_stats { + u64 crcerrs; + u64 illerrc; + u64 errbc; + u64 mspdc; + u64 mpctotal; + u64 mpc[8]; + u64 mlfc; + u64 mrfc; + u64 rlec; + u64 lxontxc; + u64 lxonrxc; + u64 lxofftxc; + u64 lxoffrxc; + u64 pxontxc[8]; + u64 pxonrxc[8]; + u64 pxofftxc[8]; + u64 pxoffrxc[8]; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc[8]; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mngprc; + u64 mngpdc; + u64 mngptc; + u64 tor; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 xec; + u64 rqsmr[16]; + u64 tqsmr[8]; + u64 qprc[16]; + u64 qptc[16]; + u64 qbrc[16]; + u64 qbtc[16]; + u64 qprdc[16]; + u64 pxon2offc[8]; + u64 fdirustat_add; + u64 fdirustat_remove; + u64 fdirfstat_fadd; + u64 fdirfstat_fremove; + u64 fdirmatch; + u64 fdirmiss; + u64 fccrc; + u64 fcoerpdc; + u64 fcoeprc; + u64 fcoeptc; + u64 fcoedwrc; + u64 fcoedwtc; + u64 b2ospc; + u64 b2ogprc; + u64 o2bgptc; + u64 o2bspc; +}; + +/* forward declaration */ +struct ixgbe_hw; + +/* iterator type for walking multicast address lists */ +typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, + u32 *vmdq); + +/* Function pointer table */ +struct ixgbe_eeprom_operations { + s32 (*init_params)(struct ixgbe_hw *); + s32 (*read)(struct ixgbe_hw *, u16, u16 *); + s32 (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *); + s32 (*write)(struct ixgbe_hw *, u16, u16); + s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *); + s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); + s32 (*update_checksum)(struct ixgbe_hw *); + u16 (*calc_checksum)(struct ixgbe_hw *); +}; + +struct ixgbe_mac_operations { + s32 (*init_hw)(struct ixgbe_hw *); + s32 (*reset_hw)(struct ixgbe_hw *); + s32 (*start_hw)(struct ixgbe_hw *); + s32 (*clear_hw_cntrs)(struct ixgbe_hw *); + enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); + u32 (*get_supported_physical_layer)(struct ixgbe_hw *); + s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); + s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *); + s32 (*get_device_caps)(struct ixgbe_hw *, u16 *); + s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *); + s32 (*stop_adapter)(struct ixgbe_hw *); + s32 (*get_bus_info)(struct ixgbe_hw *); + void (*set_lan_id)(struct ixgbe_hw *); + s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*); + s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8); + s32 (*setup_sfp)(struct ixgbe_hw *); + s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); + s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16); + void (*release_swfw_sync)(struct ixgbe_hw *, u16); + + /* Link */ + void (*disable_tx_laser)(struct ixgbe_hw *); + void (*enable_tx_laser)(struct ixgbe_hw *); + void (*flap_tx_laser)(struct ixgbe_hw *); + s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); + s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); + s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, + bool *); + + /* Packet Buffer Manipulation */ + void (*set_rxpba)(struct ixgbe_hw *, int, u32, int); + + /* LED */ + s32 (*led_on)(struct ixgbe_hw *, u32); + s32 (*led_off)(struct ixgbe_hw *, u32); + s32 (*blink_led_start)(struct ixgbe_hw *, u32); + s32 (*blink_led_stop)(struct ixgbe_hw *, u32); + + /* RAR, Multicast, VLAN */ + s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32); + s32 (*clear_rar)(struct ixgbe_hw *, u32); + s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); + s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); + s32 (*init_rx_addrs)(struct ixgbe_hw *); + s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); + s32 (*enable_mc)(struct ixgbe_hw *); + s32 (*disable_mc)(struct ixgbe_hw *); + s32 (*clear_vfta)(struct ixgbe_hw *); + s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); + s32 (*init_uta_tables)(struct ixgbe_hw *); + void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int); + void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int); + + /* Flow Control */ + s32 (*fc_enable)(struct ixgbe_hw *, s32); + + /* Manageability interface */ + s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); +}; + +struct ixgbe_phy_operations { + s32 (*identify)(struct ixgbe_hw *); + s32 (*identify_sfp)(struct ixgbe_hw *); + s32 (*init)(struct ixgbe_hw *); + s32 (*reset)(struct ixgbe_hw *); + s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *); + s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16); + s32 (*setup_link)(struct ixgbe_hw *); + s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool, + bool); + s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); + s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *); + s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *); + s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8); + s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); + s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); + s32 (*check_overtemp)(struct ixgbe_hw *); +}; + +struct ixgbe_eeprom_info { + struct ixgbe_eeprom_operations ops; + enum ixgbe_eeprom_type type; + u32 semaphore_delay; + u16 word_size; + u16 address_bits; + u16 word_page_size; +}; + +#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 +struct ixgbe_mac_info { + struct ixgbe_mac_operations ops; + enum ixgbe_mac_type type; + u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; + u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; + u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; + /* prefix for World Wide Node Name (WWNN) */ + u16 wwnn_prefix; + /* prefix for World Wide Port Name (WWPN) */ + u16 wwpn_prefix; +#define IXGBE_MAX_MTA 128 + u32 mta_shadow[IXGBE_MAX_MTA]; + s32 mc_filter_type; + u32 mcft_size; + u32 vft_size; + u32 num_rar_entries; + u32 rar_highwater; + u32 rx_pb_size; + u32 max_tx_queues; + u32 max_rx_queues; + u32 max_msix_vectors; + u32 orig_autoc; + u32 orig_autoc2; + bool orig_link_settings_stored; + bool autotry_restart; + u8 flags; +}; + +struct ixgbe_phy_info { + struct ixgbe_phy_operations ops; + struct mdio_if_info mdio; + enum ixgbe_phy_type type; + u32 id; + enum ixgbe_sfp_type sfp_type; + bool sfp_setup_needed; + u32 revision; + enum ixgbe_media_type media_type; + bool reset_disable; + ixgbe_autoneg_advertised autoneg_advertised; + enum ixgbe_smart_speed smart_speed; + bool smart_speed_active; + bool multispeed_fiber; + bool reset_if_overtemp; +}; + +#include "ixgbe_mbx.h" + +struct ixgbe_mbx_operations { + s32 (*init_params)(struct ixgbe_hw *hw); + s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16); + s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16); + s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16); + s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16); + s32 (*check_for_msg)(struct ixgbe_hw *, u16); + s32 (*check_for_ack)(struct ixgbe_hw *, u16); + s32 (*check_for_rst)(struct ixgbe_hw *, u16); +}; + +struct ixgbe_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct ixgbe_mbx_info { + struct ixgbe_mbx_operations ops; + struct ixgbe_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u32 v2p_mailbox; + u16 size; +}; + +struct ixgbe_hw { + u8 __iomem *hw_addr; + void *back; + struct ixgbe_mac_info mac; + struct ixgbe_addr_filter_info addr_ctrl; + struct ixgbe_fc_info fc; + struct ixgbe_phy_info phy; + struct ixgbe_eeprom_info eeprom; + struct ixgbe_bus_info bus; + struct ixgbe_mbx_info mbx; + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + bool adapter_stopped; + bool force_full_reset; +}; + +struct ixgbe_info { + enum ixgbe_mac_type mac; + s32 (*get_invariants)(struct ixgbe_hw *); + struct ixgbe_mac_operations *mac_ops; + struct ixgbe_eeprom_operations *eeprom_ops; + struct ixgbe_phy_operations *phy_ops; + struct ixgbe_mbx_operations *mbx_ops; +}; + + +/* Error Codes */ +#define IXGBE_ERR_EEPROM -1 +#define IXGBE_ERR_EEPROM_CHECKSUM -2 +#define IXGBE_ERR_PHY -3 +#define IXGBE_ERR_CONFIG -4 +#define IXGBE_ERR_PARAM -5 +#define IXGBE_ERR_MAC_TYPE -6 +#define IXGBE_ERR_UNKNOWN_PHY -7 +#define IXGBE_ERR_LINK_SETUP -8 +#define IXGBE_ERR_ADAPTER_STOPPED -9 +#define IXGBE_ERR_INVALID_MAC_ADDR -10 +#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11 +#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12 +#define IXGBE_ERR_INVALID_LINK_SETTINGS -13 +#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14 +#define IXGBE_ERR_RESET_FAILED -15 +#define IXGBE_ERR_SWFW_SYNC -16 +#define IXGBE_ERR_PHY_ADDR_INVALID -17 +#define IXGBE_ERR_I2C -18 +#define IXGBE_ERR_SFP_NOT_SUPPORTED -19 +#define IXGBE_ERR_SFP_NOT_PRESENT -20 +#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21 +#define IXGBE_ERR_NO_SAN_ADDR_PTR -22 +#define IXGBE_ERR_FDIR_REINIT_FAILED -23 +#define IXGBE_ERR_EEPROM_VERSION -24 +#define IXGBE_ERR_NO_SPACE -25 +#define IXGBE_ERR_OVERTEMP -26 +#define IXGBE_ERR_FC_NOT_NEGOTIATED -27 +#define IXGBE_ERR_FC_NOT_SUPPORTED -28 +#define IXGBE_ERR_FLOW_CONTROL -29 +#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30 +#define IXGBE_ERR_PBA_SECTION -31 +#define IXGBE_ERR_INVALID_ARGUMENT -32 +#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33 +#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF + +#endif /* _IXGBE_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c new file mode 100644 index 0000000..2696c78 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -0,0 +1,941 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include + +#include "ixgbe.h" +#include "ixgbe_phy.h" + +#define IXGBE_X540_MAX_TX_QUEUES 128 +#define IXGBE_X540_MAX_RX_QUEUES 128 +#define IXGBE_X540_RAR_ENTRIES 128 +#define IXGBE_X540_MC_TBL_SIZE 128 +#define IXGBE_X540_VFT_TBL_SIZE 128 +#define IXGBE_X540_RX_PB_SIZE 384 + +static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw); +static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); +static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask); +static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask); +static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw); +static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw); + +static enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) +{ + return ixgbe_media_type_copper; +} + +static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + + /* Call PHY identify routine to get the phy type */ + ixgbe_identify_phy_generic(hw); + + mac->mcft_size = IXGBE_X540_MC_TBL_SIZE; + mac->vft_size = IXGBE_X540_VFT_TBL_SIZE; + mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES; + mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES; + mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES; + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); + + return 0; +} + +/** + * ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg: true if autonegotiation enabled + * @autoneg_wait_to_complete: true when waiting for completion is needed + **/ +static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, + ixgbe_link_speed speed, bool autoneg, + bool autoneg_wait_to_complete) +{ + return hw->phy.ops.setup_link_speed(hw, speed, autoneg, + autoneg_wait_to_complete); +} + +/** + * ixgbe_reset_hw_X540 - Perform hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks + * and clears all interrupts, perform a PHY reset, and perform a link (MAC) + * reset. + **/ +static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) +{ + ixgbe_link_speed link_speed; + s32 status = 0; + u32 ctrl; + u32 ctrl_ext; + u32 reset_bit; + u32 i; + u32 autoc; + u32 autoc2; + bool link_up = false; + + /* Call adapter stop to disable tx/rx and clear interrupts */ + hw->mac.ops.stop_adapter(hw); + + /* + * Prevent the PCI-E bus from from hanging by disabling PCI-E master + * access and verify no pending requests before reset + */ + ixgbe_disable_pcie_master(hw); + +mac_reset_top: + /* + * Issue global reset to the MAC. Needs to be SW reset if link is up. + * If link reset is used when link is up, it might reset the PHY when + * mng is using it. If link is down or the flag to force full link + * reset is set, then perform link reset. + */ + if (hw->force_full_reset) { + reset_bit = IXGBE_CTRL_LNK_RST; + } else { + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + if (!link_up) + reset_bit = IXGBE_CTRL_LNK_RST; + else + reset_bit = IXGBE_CTRL_RST; + } + + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | reset_bit)); + IXGBE_WRITE_FLUSH(hw); + + /* Poll for reset bit to self-clear indicating reset is complete */ + for (i = 0; i < 10; i++) { + udelay(1); + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & reset_bit)) + break; + } + if (ctrl & reset_bit) { + status = IXGBE_ERR_RESET_FAILED; + hw_dbg(hw, "Reset polling failed to complete.\n"); + } + + /* + * Double resets are required for recovery from certain error + * conditions. Between resets, it is necessary to stall to allow time + * for any pending HW events to complete. We use 1usec since that is + * what is needed for ixgbe_disable_pcie_master(). The second reset + * then clears out any effects of those events. + */ + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + udelay(1); + goto mac_reset_top; + } + + /* Clear PF Reset Done bit so PF/VF Mail Ops can work */ + ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; + IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); + IXGBE_WRITE_FLUSH(hw); + + msleep(50); + + /* Set the Rx packet buffer size. */ + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT); + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* + * Store the original AUTOC/AUTOC2 values if they have not been + * stored off yet. Otherwise restore the stored original + * values since the reset operation sets back to defaults. + */ + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + if (hw->mac.orig_link_settings_stored == false) { + hw->mac.orig_autoc = autoc; + hw->mac.orig_autoc2 = autoc2; + hw->mac.orig_link_settings_stored = true; + } else { + if (autoc != hw->mac.orig_autoc) + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc | + IXGBE_AUTOC_AN_RESTART)); + + if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != + (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { + autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; + autoc2 |= (hw->mac.orig_autoc2 & + IXGBE_AUTOC2_UPPER_MASK); + IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); + } + } + + /* + * Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES; + hw->mac.ops.init_rx_addrs(hw); + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* Store the permanent SAN mac address */ + hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); + + /* Add the SAN MAC address to the RAR only if it's a valid address */ + if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { + hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, + hw->mac.san_addr, 0, IXGBE_RAH_AV); + + /* Reserve the last RAR for the SAN MAC address */ + hw->mac.num_rar_entries--; + } + + /* Store the alternative WWNN/WWPN prefix */ + hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, + &hw->mac.wwpn_prefix); + + return status; +} + +/** + * ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware using the generic start_hw function + * and the generation start_hw function. + * Then performs revision-specific operations, if any. + **/ +static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) +{ + s32 ret_val = 0; + + ret_val = ixgbe_start_hw_generic(hw); + if (ret_val != 0) + goto out; + + ret_val = ixgbe_start_hw_gen2(hw); + hw->mac.rx_pb_size = IXGBE_X540_RX_PB_SIZE; +out: + return ret_val; +} + +/** + * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current configuration. + **/ +static u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw) +{ + u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u16 ext_ability = 0; + + hw->phy.ops.identify(hw); + + hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, + &ext_ability); + if (ext_ability & MDIO_PMA_EXTABLE_10GBT) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; + if (ext_ability & MDIO_PMA_EXTABLE_1000BT) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; + if (ext_ability & MDIO_PMA_EXTABLE_100BTX) + physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; + + return physical_layer; +} + +/** + * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. + **/ +static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + u32 eec; + u16 eeprom_size; + + if (eeprom->type == ixgbe_eeprom_uninitialized) { + eeprom->semaphore_delay = 10; + eeprom->type = ixgbe_flash; + + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> + IXGBE_EEC_SIZE_SHIFT); + eeprom->word_size = 1 << (eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); + + hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", + eeprom->type, eeprom->word_size); + } + + return 0; +} + +/** + * ixgbe_read_eerd_X540- Read EEPROM word using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + s32 status = 0; + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + 0) + status = ixgbe_read_eerd_generic(hw, offset, data); + else + status = IXGBE_ERR_SWFW_SYNC; + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return status; +} + +/** + * ixgbe_read_eerd_buffer_X540 - Read EEPROM word(s) using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM + * + * Reads a 16 bit word(s) from the EEPROM using the EERD register. + **/ +static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + s32 status = 0; + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + 0) + status = ixgbe_read_eerd_buffer_generic(hw, offset, + words, data); + else + status = IXGBE_ERR_SWFW_SYNC; + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return status; +} + +/** + * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the EEWR register. + **/ +static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + s32 status = 0; + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) + status = ixgbe_write_eewr_generic(hw, offset, data); + else + status = IXGBE_ERR_SWFW_SYNC; + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return status; +} + +/** + * ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @words: number of words + * @data: word(s) write to the EEPROM + * + * Write a 16 bit word(s) to the EEPROM using the EEWR register. + **/ +static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + s32 status = 0; + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + 0) + status = ixgbe_write_eewr_buffer_generic(hw, offset, + words, data); + else + status = IXGBE_ERR_SWFW_SYNC; + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return status; +} + +/** + * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum + * + * This function does not use synchronization for EERD and EEWR. It can + * be used internally by function which utilize ixgbe_acquire_swfw_sync_X540. + * + * @hw: pointer to hardware structure + **/ +static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) +{ + u16 i; + u16 j; + u16 checksum = 0; + u16 length = 0; + u16 pointer = 0; + u16 word = 0; + + /* + * Do not use hw->eeprom.ops.read because we do not want to take + * the synchronization semaphores here. Instead use + * ixgbe_read_eerd_generic + */ + + /* Include 0x0-0x3F in the checksum */ + for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { + if (ixgbe_read_eerd_generic(hw, i, &word) != 0) { + hw_dbg(hw, "EEPROM read failed\n"); + break; + } + checksum += word; + } + + /* + * Include all data from pointers 0x3, 0x6-0xE. This excludes the + * FW, PHY module, and PCIe Expansion/Option ROM pointers. + */ + for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { + if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) + continue; + + if (ixgbe_read_eerd_generic(hw, i, &pointer) != 0) { + hw_dbg(hw, "EEPROM read failed\n"); + break; + } + + /* Skip pointer section if the pointer is invalid. */ + if (pointer == 0xFFFF || pointer == 0 || + pointer >= hw->eeprom.word_size) + continue; + + if (ixgbe_read_eerd_generic(hw, pointer, &length) != 0) { + hw_dbg(hw, "EEPROM read failed\n"); + break; + } + + /* Skip pointer section if length is invalid. */ + if (length == 0xFFFF || length == 0 || + (pointer + length) >= hw->eeprom.word_size) + continue; + + for (j = pointer+1; j <= pointer+length; j++) { + if (ixgbe_read_eerd_generic(hw, j, &word) != 0) { + hw_dbg(hw, "EEPROM read failed\n"); + break; + } + checksum += word; + } + } + + checksum = (u16)IXGBE_EEPROM_SUM - checksum; + + return checksum; +} + +/** + * ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + **/ +static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, + u16 *checksum_val) +{ + s32 status; + u16 checksum; + u16 read_checksum = 0; + + /* + * Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + + if (status != 0) { + hw_dbg(hw, "EEPROM read failed\n"); + goto out; + } + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { + checksum = hw->eeprom.ops.calc_checksum(hw); + + /* + * Do not use hw->eeprom.ops.read because we do not want to take + * the synchronization semaphores twice here. + */ + ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM, + &read_checksum); + + /* + * Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) + status = IXGBE_ERR_EEPROM_CHECKSUM; + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + } else { + status = IXGBE_ERR_SWFW_SYNC; + } + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); +out: + return status; +} + +/** + * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash + * @hw: pointer to hardware structure + * + * After writing EEPROM to shadow RAM using EEWR register, software calculates + * checksum and updates the EEPROM and instructs the hardware to update + * the flash. + **/ +static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) +{ + s32 status; + u16 checksum; + + /* + * Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + + if (status != 0) + hw_dbg(hw, "EEPROM read failed\n"); + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { + checksum = hw->eeprom.ops.calc_checksum(hw); + + /* + * Do not use hw->eeprom.ops.write because we do not want to + * take the synchronization semaphores twice here. + */ + status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, + checksum); + + if (status == 0) + status = ixgbe_update_flash_X540(hw); + else + status = IXGBE_ERR_SWFW_SYNC; + } + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + + return status; +} + +/** + * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device + * @hw: pointer to hardware structure + * + * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy + * EEPROM from shadow RAM to the flash device. + **/ +static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) +{ + u32 flup; + s32 status = IXGBE_ERR_EEPROM; + + status = ixgbe_poll_flash_update_done_X540(hw); + if (status == IXGBE_ERR_EEPROM) { + hw_dbg(hw, "Flash update time out\n"); + goto out; + } + + flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP; + IXGBE_WRITE_REG(hw, IXGBE_EEC, flup); + + status = ixgbe_poll_flash_update_done_X540(hw); + if (status == 0) + hw_dbg(hw, "Flash update complete\n"); + else + hw_dbg(hw, "Flash update time out\n"); + + if (hw->revision_id == 0) { + flup = IXGBE_READ_REG(hw, IXGBE_EEC); + + if (flup & IXGBE_EEC_SEC1VAL) { + flup |= IXGBE_EEC_FLUP; + IXGBE_WRITE_REG(hw, IXGBE_EEC, flup); + } + + status = ixgbe_poll_flash_update_done_X540(hw); + if (status == 0) + hw_dbg(hw, "Flash update complete\n"); + else + hw_dbg(hw, "Flash update time out\n"); + } +out: + return status; +} + +/** + * ixgbe_poll_flash_update_done_X540 - Poll flash update status + * @hw: pointer to hardware structure + * + * Polls the FLUDONE (bit 26) of the EEC Register to determine when the + * flash update is done. + **/ +static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) +{ + u32 i; + u32 reg; + s32 status = IXGBE_ERR_EEPROM; + + for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) { + reg = IXGBE_READ_REG(hw, IXGBE_EEC); + if (reg & IXGBE_EEC_FLUDONE) { + status = 0; + break; + } + udelay(5); + } + return status; +} + +/** + * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore thought the SW_FW_SYNC register for + * the specified function (CSR, PHY0, PHY1, NVM, Flash) + **/ +static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 5; + u32 hwmask = 0; + u32 timeout = 200; + u32 i; + + if (swmask == IXGBE_GSSR_EEP_SM) + hwmask = IXGBE_GSSR_FLASH_SM; + + for (i = 0; i < timeout; i++) { + /* + * SW NVM semaphore bit is used for access to all + * SW_FW_SYNC bits (not just NVM) + */ + if (ixgbe_get_swfw_sync_semaphore(hw)) + return IXGBE_ERR_SWFW_SYNC; + + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); + if (!(swfw_sync & (fwmask | swmask | hwmask))) { + swfw_sync |= swmask; + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); + ixgbe_release_swfw_sync_semaphore(hw); + break; + } else { + /* + * Firmware currently using resource (fwmask), + * hardware currently using resource (hwmask), + * or other software thread currently using + * resource (swmask) + */ + ixgbe_release_swfw_sync_semaphore(hw); + usleep_range(5000, 10000); + } + } + + /* + * If the resource is not released by the FW/HW the SW can assume that + * the FW/HW malfunctions. In that case the SW should sets the + * SW bit(s) of the requested resource(s) while ignoring the + * corresponding FW/HW bits in the SW_FW_SYNC register. + */ + if (i >= timeout) { + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); + if (swfw_sync & (fwmask | hwmask)) { + if (ixgbe_get_swfw_sync_semaphore(hw)) + return IXGBE_ERR_SWFW_SYNC; + + swfw_sync |= swmask; + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); + ixgbe_release_swfw_sync_semaphore(hw); + } + } + + usleep_range(5000, 10000); + return 0; +} + +/** + * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore through the SW_FW_SYNC register + * for the specified function (CSR, PHY0, PHY1, EVM, Flash) + **/ +static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + + ixgbe_get_swfw_sync_semaphore(hw); + + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); + swfw_sync &= ~swmask; + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); + + ixgbe_release_swfw_sync_semaphore(hw); + usleep_range(5000, 10000); +} + +/** + * ixgbe_get_nvm_semaphore - Get hardware semaphore + * @hw: pointer to hardware structure + * + * Sets the hardware semaphores so SW/FW can gain control of shared resources + **/ +static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_EEPROM; + u32 timeout = 2000; + u32 i; + u32 swsm; + + /* Get SMBI software semaphore between device drivers first */ + for (i = 0; i < timeout; i++) { + /* + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + if (!(swsm & IXGBE_SWSM_SMBI)) { + status = 0; + break; + } + udelay(50); + } + + /* Now get the semaphore between SW/FW through the REGSMP bit */ + if (status) { + for (i = 0; i < timeout; i++) { + swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); + if (!(swsm & IXGBE_SWFW_REGSMP)) + break; + + udelay(50); + } + } else { + hw_dbg(hw, "Software semaphore SMBI between device drivers " + "not granted.\n"); + } + + return status; +} + +/** + * ixgbe_release_nvm_semaphore - Release hardware semaphore + * @hw: pointer to hardware structure + * + * This function clears hardware semaphore bits. + **/ +static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) +{ + u32 swsm; + + /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */ + + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + swsm &= ~IXGBE_SWSM_SMBI; + IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); + + swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); + swsm &= ~IXGBE_SWFW_REGSMP; + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm); + + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_blink_led_start_X540 - Blink LED based on index. + * @hw: pointer to hardware structure + * @index: led number to blink + * + * Devices that implement the version 2 interface: + * X540 + **/ +static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) +{ + u32 macc_reg; + u32 ledctl_reg; + + /* + * In order for the blink bit in the LED control register + * to work, link and speed must be forced in the MAC. We + * will reverse this when we stop the blinking. + */ + macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); + macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS; + IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); + + /* Set the LED to LINK_UP + BLINK. */ + ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); + ledctl_reg |= IXGBE_LED_BLINK(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * ixgbe_blink_led_stop_X540 - Stop blinking LED based on index. + * @hw: pointer to hardware structure + * @index: led number to stop blinking + * + * Devices that implement the version 2 interface: + * X540 + **/ +static s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) +{ + u32 macc_reg; + u32 ledctl_reg; + + /* Restore the LED to its default value. */ + ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); + ledctl_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); + ledctl_reg &= ~IXGBE_LED_BLINK(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg); + + /* Unforce link and speed in the MAC. */ + macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); + macc_reg &= ~(IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS); + IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} +static struct ixgbe_mac_operations mac_ops_X540 = { + .init_hw = &ixgbe_init_hw_generic, + .reset_hw = &ixgbe_reset_hw_X540, + .start_hw = &ixgbe_start_hw_X540, + .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, + .get_media_type = &ixgbe_get_media_type_X540, + .get_supported_physical_layer = + &ixgbe_get_supported_physical_layer_X540, + .enable_rx_dma = &ixgbe_enable_rx_dma_generic, + .get_mac_addr = &ixgbe_get_mac_addr_generic, + .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, + .get_device_caps = &ixgbe_get_device_caps_generic, + .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, + .stop_adapter = &ixgbe_stop_adapter_generic, + .get_bus_info = &ixgbe_get_bus_info_generic, + .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, + .read_analog_reg8 = NULL, + .write_analog_reg8 = NULL, + .setup_link = &ixgbe_setup_mac_link_X540, + .set_rxpba = &ixgbe_set_rxpba_generic, + .check_link = &ixgbe_check_mac_link_generic, + .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic, + .led_on = &ixgbe_led_on_generic, + .led_off = &ixgbe_led_off_generic, + .blink_led_start = &ixgbe_blink_led_start_X540, + .blink_led_stop = &ixgbe_blink_led_stop_X540, + .set_rar = &ixgbe_set_rar_generic, + .clear_rar = &ixgbe_clear_rar_generic, + .set_vmdq = &ixgbe_set_vmdq_generic, + .clear_vmdq = &ixgbe_clear_vmdq_generic, + .init_rx_addrs = &ixgbe_init_rx_addrs_generic, + .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, + .enable_mc = &ixgbe_enable_mc_generic, + .disable_mc = &ixgbe_disable_mc_generic, + .clear_vfta = &ixgbe_clear_vfta_generic, + .set_vfta = &ixgbe_set_vfta_generic, + .fc_enable = &ixgbe_fc_enable_generic, + .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, + .init_uta_tables = &ixgbe_init_uta_tables_generic, + .setup_sfp = NULL, + .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, + .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, + .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, + .release_swfw_sync = &ixgbe_release_swfw_sync_X540, +}; + +static struct ixgbe_eeprom_operations eeprom_ops_X540 = { + .init_params = &ixgbe_init_eeprom_params_X540, + .read = &ixgbe_read_eerd_X540, + .read_buffer = &ixgbe_read_eerd_buffer_X540, + .write = &ixgbe_write_eewr_X540, + .write_buffer = &ixgbe_write_eewr_buffer_X540, + .calc_checksum = &ixgbe_calc_eeprom_checksum_X540, + .validate_checksum = &ixgbe_validate_eeprom_checksum_X540, + .update_checksum = &ixgbe_update_eeprom_checksum_X540, +}; + +static struct ixgbe_phy_operations phy_ops_X540 = { + .identify = &ixgbe_identify_phy_generic, + .identify_sfp = &ixgbe_identify_sfp_module_generic, + .init = NULL, + .reset = NULL, + .read_reg = &ixgbe_read_phy_reg_generic, + .write_reg = &ixgbe_write_phy_reg_generic, + .setup_link = &ixgbe_setup_phy_link_generic, + .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, + .read_i2c_byte = &ixgbe_read_i2c_byte_generic, + .write_i2c_byte = &ixgbe_write_i2c_byte_generic, + .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, + .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, + .check_overtemp = &ixgbe_tn_check_overtemp, +}; + +struct ixgbe_info ixgbe_X540_info = { + .mac = ixgbe_mac_X540, + .get_invariants = &ixgbe_get_invariants_X540, + .mac_ops = &mac_ops_X540, + .eeprom_ops = &eeprom_ops_X540, + .phy_ops = &phy_ops_X540, + .mbx_ops = &mbx_ops_generic, +}; diff --git a/drivers/net/ethernet/intel/ixgbevf/Makefile b/drivers/net/ethernet/intel/ixgbevf/Makefile new file mode 100644 index 0000000..1f35d22 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/Makefile @@ -0,0 +1,38 @@ +################################################################################ +# +# Intel 82599 Virtual Function driver +# Copyright(c) 1999 - 2010 Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: +# e1000-devel Mailing List +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +# +################################################################################ + +# +# Makefile for the Intel(R) 82599 VF ethernet driver +# + +obj-$(CONFIG_IXGBEVF) += ixgbevf.o + +ixgbevf-objs := vf.o \ + mbx.o \ + ethtool.o \ + ixgbevf_main.o + diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h new file mode 100644 index 0000000..78abb6f --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -0,0 +1,297 @@ +/******************************************************************************* + + Intel 82599 Virtual Function driver + Copyright(c) 1999 - 2010 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBEVF_DEFINES_H_ +#define _IXGBEVF_DEFINES_H_ + +/* Device IDs */ +#define IXGBE_DEV_ID_82599_VF 0x10ED +#define IXGBE_DEV_ID_X540_VF 0x1515 + +#define IXGBE_VF_IRQ_CLEAR_MASK 7 +#define IXGBE_VF_MAX_TX_QUEUES 1 +#define IXGBE_VF_MAX_RX_QUEUES 1 +#define IXGBE_ETH_LENGTH_OF_ADDRESS 6 + +/* Link speed */ +typedef u32 ixgbe_link_speed; +#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 +#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 + +#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ +#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ +#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ +#define IXGBE_LINKS_UP 0x40000000 +#define IXGBE_LINKS_SPEED_82599 0x30000000 +#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 +#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024 + +/* Interrupt Vector Allocation Registers */ +#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ + +#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ + +/* Receive Config masks */ +#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ +#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ +#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ +#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ +#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */ +#define IXGBE_RXDCTL_RLPML_EN 0x00008000 + +/* DCA Control */ +#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ + +/* PSRTYPE bit definitions */ +#define IXGBE_PSRTYPE_TCPHDR 0x00000010 +#define IXGBE_PSRTYPE_UDPHDR 0x00000020 +#define IXGBE_PSRTYPE_IPV4HDR 0x00000100 +#define IXGBE_PSRTYPE_IPV6HDR 0x00000200 +#define IXGBE_PSRTYPE_L2HDR 0x00001000 + +/* SRRCTL bit definitions */ +#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ +#define IXGBE_SRRCTL_RDMTS_SHIFT 22 +#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000 +#define IXGBE_SRRCTL_DROP_EN 0x10000000 +#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 +#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 + +/* Receive Descriptor bit definitions */ +#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ +#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */ +#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */ +#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004 +#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ +#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ +#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ +#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ +#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ +#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ +#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ +#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */ +#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */ +#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */ +#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */ +#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ +#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ +#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ +#define IXGBE_RXDADV_ERR_MASK 0xFFF00000 /* RDESC.ERRORS mask */ +#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ +#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ +#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ +#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ +#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ +#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ +#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ +#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ +#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ +#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define IXGBE_RXD_PRI_SHIFT 13 +#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define IXGBE_RXD_CFI_SHIFT 12 + +#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */ +#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */ +#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */ +#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */ +#define IXGBE_RXDADV_STAT_MASK 0x000FFFFF /* Stat/NEXTP: bit 0-19 */ +#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */ +#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ +#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ +#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ + +#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F +#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 +#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 +#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 +#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 +#define IXGBE_RXDADV_RSCCNT_SHIFT 17 +#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 +#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 +#define IXGBE_RXDADV_SPH 0x8000 + +#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ + IXGBE_RXD_ERR_CE | \ + IXGBE_RXD_ERR_LE | \ + IXGBE_RXD_ERR_PE | \ + IXGBE_RXD_ERR_OSE | \ + IXGBE_RXD_ERR_USE) + +#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \ + IXGBE_RXDADV_ERR_CE | \ + IXGBE_RXDADV_ERR_LE | \ + IXGBE_RXDADV_ERR_PE | \ + IXGBE_RXDADV_ERR_OSE | \ + IXGBE_RXDADV_ERR_USE) + +#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ +#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ + +/* Transmit Descriptor - Advanced */ +union ixgbe_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Receive Descriptor - Advanced */ +union ixgbe_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /* RSS, Pkt type */ + __le16 hdr_info; /* Splithdr, hdrlen */ + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +/* Context descriptors */ +struct ixgbe_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ +#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */ +#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ +#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ +#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ +#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ +#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ +#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ +#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ +#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ +#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ + IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ + IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ +#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +/* Interrupt register bitmasks */ + +/* Extended Interrupt Cause Read */ +#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */ +#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */ +#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ + +/* Extended Interrupt Cause Set */ +#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + +/* Extended Interrupt Mask Set */ +#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + +/* Extended Interrupt Mask Clear */ +#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + +#define IXGBE_EIMS_ENABLE_MASK ( \ + IXGBE_EIMS_RTX_QUEUE | \ + IXGBE_EIMS_MAILBOX | \ + IXGBE_EIMS_OTHER) + +#define IXGBE_EITR_CNT_WDIS 0x80000000 + +/* Error Codes */ +#define IXGBE_ERR_INVALID_MAC_ADDR -1 +#define IXGBE_ERR_RESET_FAILED -2 + +#endif /* _IXGBEVF_DEFINES_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c new file mode 100644 index 0000000..deee375 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -0,0 +1,742 @@ +/******************************************************************************* + + Intel 82599 Virtual Function driver + Copyright(c) 1999 - 2009 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* ethtool support for ixgbevf */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ixgbevf.h" + +#define IXGBE_ALL_RAR_ENTRIES 16 + +#ifdef ETHTOOL_GSTATS +struct ixgbe_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; + int base_stat_offset; + int saved_reset_offset; +}; + +#define IXGBEVF_STAT(m, b, r) sizeof(((struct ixgbevf_adapter *)0)->m), \ + offsetof(struct ixgbevf_adapter, m), \ + offsetof(struct ixgbevf_adapter, b), \ + offsetof(struct ixgbevf_adapter, r) +static struct ixgbe_stats ixgbe_gstrings_stats[] = { + {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc, + stats.saved_reset_vfgprc)}, + {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc, + stats.saved_reset_vfgptc)}, + {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc, + stats.saved_reset_vfgorc)}, + {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc, + stats.saved_reset_vfgotc)}, + {"tx_busy", IXGBEVF_STAT(tx_busy, zero_base, zero_base)}, + {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc, + stats.saved_reset_vfmprc)}, + {"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base, + zero_base)}, + {"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base, + zero_base)}, + {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base, + zero_base)}, + {"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base, zero_base)}, +}; + +#define IXGBE_QUEUE_STATS_LEN 0 +#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) + +#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN) +#endif /* ETHTOOL_GSTATS */ +#ifdef ETHTOOL_TEST +static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", + "Link test (on/offline)" +}; +#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) +#endif /* ETHTOOL_TEST */ + +static int ixgbevf_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 link_speed = 0; + bool link_up; + + ecmd->supported = SUPPORTED_10000baseT_Full; + ecmd->autoneg = AUTONEG_DISABLE; + ecmd->transceiver = XCVR_DUMMY1; + ecmd->port = -1; + + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + + if (link_up) { + ethtool_cmd_speed_set( + ecmd, + (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? + SPEED_10000 : SPEED_1000); + ecmd->duplex = DUPLEX_FULL; + } else { + ethtool_cmd_speed_set(ecmd, -1); + ecmd->duplex = -1; + } + + return 0; +} + +static u32 ixgbevf_get_rx_csum(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + return adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED; +} + +static int ixgbevf_set_rx_csum(struct net_device *netdev, u32 data) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + if (data) + adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; + else + adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED; + + if (netif_running(netdev)) { + if (!adapter->dev_closed) + ixgbevf_reinit_locked(adapter); + } else { + ixgbevf_reset(adapter); + } + + return 0; +} + +static int ixgbevf_set_tso(struct net_device *netdev, u32 data) +{ + if (data) { + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + } else { + netif_tx_stop_all_queues(netdev); + netdev->features &= ~NETIF_F_TSO; + netdev->features &= ~NETIF_F_TSO6; + netif_tx_start_all_queues(netdev); + } + return 0; +} + +static u32 ixgbevf_get_msglevel(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_) + +static char *ixgbevf_reg_names[] = { + "IXGBE_VFCTRL", + "IXGBE_VFSTATUS", + "IXGBE_VFLINKS", + "IXGBE_VFRXMEMWRAP", + "IXGBE_VFFRTIMER", + "IXGBE_VTEICR", + "IXGBE_VTEICS", + "IXGBE_VTEIMS", + "IXGBE_VTEIMC", + "IXGBE_VTEIAC", + "IXGBE_VTEIAM", + "IXGBE_VTEITR", + "IXGBE_VTIVAR", + "IXGBE_VTIVAR_MISC", + "IXGBE_VFRDBAL0", + "IXGBE_VFRDBAL1", + "IXGBE_VFRDBAH0", + "IXGBE_VFRDBAH1", + "IXGBE_VFRDLEN0", + "IXGBE_VFRDLEN1", + "IXGBE_VFRDH0", + "IXGBE_VFRDH1", + "IXGBE_VFRDT0", + "IXGBE_VFRDT1", + "IXGBE_VFRXDCTL0", + "IXGBE_VFRXDCTL1", + "IXGBE_VFSRRCTL0", + "IXGBE_VFSRRCTL1", + "IXGBE_VFPSRTYPE", + "IXGBE_VFTDBAL0", + "IXGBE_VFTDBAL1", + "IXGBE_VFTDBAH0", + "IXGBE_VFTDBAH1", + "IXGBE_VFTDLEN0", + "IXGBE_VFTDLEN1", + "IXGBE_VFTDH0", + "IXGBE_VFTDH1", + "IXGBE_VFTDT0", + "IXGBE_VFTDT1", + "IXGBE_VFTXDCTL0", + "IXGBE_VFTXDCTL1", + "IXGBE_VFTDWBAL0", + "IXGBE_VFTDWBAL1", + "IXGBE_VFTDWBAH0", + "IXGBE_VFTDWBAH1" +}; + + +static int ixgbevf_get_regs_len(struct net_device *netdev) +{ + return (ARRAY_SIZE(ixgbevf_reg_names)) * sizeof(u32); +} + +static void ixgbevf_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, + void *p) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u32 regs_len = ixgbevf_get_regs_len(netdev); + u8 i; + + memset(p, 0, regs_len); + + regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id; + + /* General Registers */ + regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL); + regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS); + regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP); + regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER); + + /* Interrupt */ + /* don't read EICR because it can clear interrupt causes, instead + * read EICS which is a shadow but doesn't clear EICR */ + regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS); + regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS); + regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS); + regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC); + regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC); + regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM); + regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0)); + regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0)); + regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); + + /* Receive DMA */ + for (i = 0; i < 2; i++) + regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i)); + for (i = 0; i < 2; i++) + regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i)); + for (i = 0; i < 2; i++) + regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i)); + for (i = 0; i < 2; i++) + regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i)); + for (i = 0; i < 2; i++) + regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i)); + for (i = 0; i < 2; i++) + regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); + for (i = 0; i < 2; i++) + regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i)); + + /* Receive */ + regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE); + + /* Transmit */ + for (i = 0; i < 2; i++) + regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i)); + for (i = 0; i < 2; i++) + regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i)); + for (i = 0; i < 2; i++) + regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i)); + for (i = 0; i < 2; i++) + regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i)); + for (i = 0; i < 2; i++) + regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i)); + for (i = 0; i < 2; i++) + regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); + for (i = 0; i < 2; i++) + regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i)); + for (i = 0; i < 2; i++) + regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i)); + + for (i = 0; i < ARRAY_SIZE(ixgbevf_reg_names); i++) + hw_dbg(hw, "%s\t%8.8x\n", ixgbevf_reg_names[i], regs_buff[i]); +} + +static void ixgbevf_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, ixgbevf_driver_name, 32); + strlcpy(drvinfo->version, ixgbevf_driver_version, 32); + + strlcpy(drvinfo->fw_version, "N/A", 4); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); +} + +static void ixgbevf_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbevf_ring *tx_ring = adapter->tx_ring; + struct ixgbevf_ring *rx_ring = adapter->rx_ring; + + ring->rx_max_pending = IXGBEVF_MAX_RXD; + ring->tx_max_pending = IXGBEVF_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = rx_ring->count; + ring->tx_pending = tx_ring->count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int ixgbevf_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL; + int i, err = 0; + u32 new_rx_count, new_tx_count; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_rx_count = max(ring->rx_pending, (u32)IXGBEVF_MIN_RXD); + new_rx_count = min(new_rx_count, (u32)IXGBEVF_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); + + new_tx_count = max(ring->tx_pending, (u32)IXGBEVF_MIN_TXD); + new_tx_count = min(new_tx_count, (u32)IXGBEVF_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring->count) && + (new_rx_count == adapter->rx_ring->count)) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) + msleep(1); + + /* + * If the adapter isn't up and running then just set the + * new parameters and scurry for the exits. + */ + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i].count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i].count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + tx_ring = kcalloc(adapter->num_tx_queues, + sizeof(struct ixgbevf_ring), GFP_KERNEL); + if (!tx_ring) { + err = -ENOMEM; + goto clear_reset; + } + + rx_ring = kcalloc(adapter->num_rx_queues, + sizeof(struct ixgbevf_ring), GFP_KERNEL); + if (!rx_ring) { + err = -ENOMEM; + goto err_rx_setup; + } + + ixgbevf_down(adapter); + + memcpy(tx_ring, adapter->tx_ring, + adapter->num_tx_queues * sizeof(struct ixgbevf_ring)); + for (i = 0; i < adapter->num_tx_queues; i++) { + tx_ring[i].count = new_tx_count; + err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]); + if (err) { + while (i) { + i--; + ixgbevf_free_tx_resources(adapter, + &tx_ring[i]); + } + goto err_tx_ring_setup; + } + tx_ring[i].v_idx = adapter->tx_ring[i].v_idx; + } + + memcpy(rx_ring, adapter->rx_ring, + adapter->num_rx_queues * sizeof(struct ixgbevf_ring)); + for (i = 0; i < adapter->num_rx_queues; i++) { + rx_ring[i].count = new_rx_count; + err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]); + if (err) { + while (i) { + i--; + ixgbevf_free_rx_resources(adapter, + &rx_ring[i]); + } + goto err_rx_ring_setup; + } + rx_ring[i].v_idx = adapter->rx_ring[i].v_idx; + } + + /* + * Only switch to new rings if all the prior allocations + * and ring setups have succeeded. + */ + kfree(adapter->tx_ring); + adapter->tx_ring = tx_ring; + adapter->tx_ring_count = new_tx_count; + + kfree(adapter->rx_ring); + adapter->rx_ring = rx_ring; + adapter->rx_ring_count = new_rx_count; + + /* success! */ + ixgbevf_up(adapter); + + goto clear_reset; + +err_rx_ring_setup: + for(i = 0; i < adapter->num_tx_queues; i++) + ixgbevf_free_tx_resources(adapter, &tx_ring[i]); + +err_tx_ring_setup: + kfree(rx_ring); + +err_rx_setup: + kfree(tx_ring); + +clear_reset: + clear_bit(__IXGBEVF_RESETTING, &adapter->state); + return err; +} + +static int ixgbevf_get_sset_count(struct net_device *dev, int stringset) +{ + switch (stringset) { + case ETH_SS_TEST: + return IXGBE_TEST_LEN; + case ETH_SS_STATS: + return IXGBE_GLOBAL_STATS_LEN; + default: + return -EINVAL; + } +} + +static void ixgbevf_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + int i; + + ixgbevf_update_stats(adapter); + for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { + char *p = (char *)adapter + + ixgbe_gstrings_stats[i].stat_offset; + char *b = (char *)adapter + + ixgbe_gstrings_stats[i].base_stat_offset; + char *r = (char *)adapter + + ixgbe_gstrings_stats[i].saved_reset_offset; + data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p) - + ((ixgbe_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)b : *(u32 *)b) + + ((ixgbe_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)r : *(u32 *)r); + } +} + +static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + char *p = (char *)data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *ixgbe_gstrings_test, + IXGBE_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { + memcpy(p, ixgbe_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data) +{ + struct ixgbe_hw *hw = &adapter->hw; + bool link_up; + u32 link_speed = 0; + *data = 0; + + hw->mac.ops.check_link(hw, &link_speed, &link_up, true); + if (!link_up) + *data = 1; + + return *data; +} + +/* ethtool register test data */ +struct ixgbevf_reg_test { + u16 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x40 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define WRITE_NO_TEST 3 +#define TABLE32_TEST 4 +#define TABLE64_TEST_LO 5 +#define TABLE64_TEST_HI 6 + +/* default VF register test */ +static const struct ixgbevf_reg_test reg_test_vf[] = { + { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, + { IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 }, + { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, + { 0, 0, 0, 0 } +}; + +static const u32 register_test_patterns[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF +}; + +#define REG_PATTERN_TEST(R, M, W) \ +{ \ + u32 pat, val, before; \ + for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { \ + before = readl(adapter->hw.hw_addr + R); \ + writel((register_test_patterns[pat] & W), \ + (adapter->hw.hw_addr + R)); \ + val = readl(adapter->hw.hw_addr + R); \ + if (val != (register_test_patterns[pat] & W & M)) { \ + hw_dbg(&adapter->hw, \ + "pattern test reg %04X failed: got " \ + "0x%08X expected 0x%08X\n", \ + R, val, (register_test_patterns[pat] & W & M)); \ + *data = R; \ + writel(before, adapter->hw.hw_addr + R); \ + return 1; \ + } \ + writel(before, adapter->hw.hw_addr + R); \ + } \ +} + +#define REG_SET_AND_CHECK(R, M, W) \ +{ \ + u32 val, before; \ + before = readl(adapter->hw.hw_addr + R); \ + writel((W & M), (adapter->hw.hw_addr + R)); \ + val = readl(adapter->hw.hw_addr + R); \ + if ((W & M) != (val & M)) { \ + printk(KERN_ERR "set/check reg %04X test failed: got 0x%08X " \ + "expected 0x%08X\n", R, (val & M), (W & M)); \ + *data = R; \ + writel(before, (adapter->hw.hw_addr + R)); \ + return 1; \ + } \ + writel(before, (adapter->hw.hw_addr + R)); \ +} + +static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data) +{ + const struct ixgbevf_reg_test *test; + u32 i; + + test = reg_test_vf; + + /* + * Perform the register test, looping through the test table + * until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + switch (test->test_type) { + case PATTERN_TEST: + REG_PATTERN_TEST(test->reg + (i * 0x40), + test->mask, + test->write); + break; + case SET_READ_TEST: + REG_SET_AND_CHECK(test->reg + (i * 0x40), + test->mask, + test->write); + break; + case WRITE_NO_TEST: + writel(test->write, + (adapter->hw.hw_addr + test->reg) + + (i * 0x40)); + break; + case TABLE32_TEST: + REG_PATTERN_TEST(test->reg + (i * 4), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + REG_PATTERN_TEST(test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + REG_PATTERN_TEST((test->reg + 4) + (i * 8), + test->mask, + test->write); + break; + } + } + test++; + } + + *data = 0; + return *data; +} + +static void ixgbevf_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + bool if_running = netif_running(netdev); + + set_bit(__IXGBEVF_TESTING, &adapter->state); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + hw_dbg(&adapter->hw, "offline testing starting\n"); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result */ + if (ixgbevf_link_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + dev_close(netdev); + else + ixgbevf_reset(adapter); + + hw_dbg(&adapter->hw, "register testing starting\n"); + if (ixgbevf_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + ixgbevf_reset(adapter); + + clear_bit(__IXGBEVF_TESTING, &adapter->state); + if (if_running) + dev_open(netdev); + } else { + hw_dbg(&adapter->hw, "online testing starting\n"); + /* Online tests */ + if (ixgbevf_link_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Online tests aren't run; pass by default */ + data[0] = 0; + + clear_bit(__IXGBEVF_TESTING, &adapter->state); + } + msleep_interruptible(4 * 1000); +} + +static int ixgbevf_nway_reset(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) { + if (!adapter->dev_closed) + ixgbevf_reinit_locked(adapter); + } + + return 0; +} + +static struct ethtool_ops ixgbevf_ethtool_ops = { + .get_settings = ixgbevf_get_settings, + .get_drvinfo = ixgbevf_get_drvinfo, + .get_regs_len = ixgbevf_get_regs_len, + .get_regs = ixgbevf_get_regs, + .nway_reset = ixgbevf_nway_reset, + .get_link = ethtool_op_get_link, + .get_ringparam = ixgbevf_get_ringparam, + .set_ringparam = ixgbevf_set_ringparam, + .get_rx_csum = ixgbevf_get_rx_csum, + .set_rx_csum = ixgbevf_set_rx_csum, + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = ethtool_op_set_tx_ipv6_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, + .get_msglevel = ixgbevf_get_msglevel, + .set_msglevel = ixgbevf_set_msglevel, + .get_tso = ethtool_op_get_tso, + .set_tso = ixgbevf_set_tso, + .self_test = ixgbevf_diag_test, + .get_sset_count = ixgbevf_get_sset_count, + .get_strings = ixgbevf_get_strings, + .get_ethtool_stats = ixgbevf_get_ethtool_stats, +}; + +void ixgbevf_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &ixgbevf_ethtool_ops); +} diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h new file mode 100644 index 0000000..8857df4 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -0,0 +1,318 @@ +/******************************************************************************* + + Intel 82599 Virtual Function driver + Copyright(c) 1999 - 2010 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBEVF_H_ +#define _IXGBEVF_H_ + +#include +#include +#include +#include +#include +#include + +#include "vf.h" + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer */ +struct ixgbevf_tx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + unsigned long time_stamp; + u16 length; + u16 next_to_watch; + u16 mapped_as_page; +}; + +struct ixgbevf_rx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + struct page *page; + dma_addr_t page_dma; + unsigned int page_offset; +}; + +struct ixgbevf_ring { + struct ixgbevf_adapter *adapter; /* backlink */ + void *desc; /* descriptor ring memory */ + dma_addr_t dma; /* phys. address of descriptor ring */ + unsigned int size; /* length in bytes */ + unsigned int count; /* amount of descriptors */ + unsigned int next_to_use; + unsigned int next_to_clean; + + int queue_index; /* needed for multiqueue queue management */ + union { + struct ixgbevf_tx_buffer *tx_buffer_info; + struct ixgbevf_rx_buffer *rx_buffer_info; + }; + + u16 head; + u16 tail; + + unsigned int total_bytes; + unsigned int total_packets; + + u16 reg_idx; /* holds the special value that gets the hardware register + * offset associated with this ring, which is different + * for DCB and RSS modes */ + +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) + /* cpu for tx queue */ + int cpu; +#endif + + u64 v_idx; /* maps directly to the index for this ring in the hardware + * vector array, can also be used for finding the bit in EICR + * and friends that represents the vector for this ring */ + + u16 work_limit; /* max work per interrupt */ + u16 rx_buf_len; +}; + +enum ixgbevf_ring_f_enum { + RING_F_NONE = 0, + RING_F_ARRAY_SIZE /* must be last in enum set */ +}; + +struct ixgbevf_ring_feature { + int indices; + int mask; +}; + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define MAX_RX_QUEUES 1 +#define MAX_TX_QUEUES 1 + +#define IXGBEVF_DEFAULT_TXD 1024 +#define IXGBEVF_DEFAULT_RXD 512 +#define IXGBEVF_MAX_TXD 4096 +#define IXGBEVF_MIN_TXD 64 +#define IXGBEVF_MAX_RXD 4096 +#define IXGBEVF_MIN_RXD 64 + +/* Supported Rx Buffer Sizes */ +#define IXGBEVF_RXBUFFER_64 64 /* Used for packet split */ +#define IXGBEVF_RXBUFFER_128 128 /* Used for packet split */ +#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ +#define IXGBEVF_RXBUFFER_2048 2048 +#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */ + +#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 + +#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) + +#define IXGBE_TX_FLAGS_CSUM (u32)(1) +#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1) +#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2) +#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3) +#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 4) +#define IXGBE_TX_FLAGS_FSO (u32)(1 << 5) +#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 +#define IXGBE_TX_FLAGS_VLAN_SHIFT 16 + +/* MAX_MSIX_Q_VECTORS of these are allocated, + * but we only use one per queue-specific vector. + */ +struct ixgbevf_q_vector { + struct ixgbevf_adapter *adapter; + struct napi_struct napi; + DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */ + DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */ + u8 rxr_count; /* Rx ring count assigned to this vector */ + u8 txr_count; /* Tx ring count assigned to this vector */ + u8 tx_itr; + u8 rx_itr; + u32 eitr; + int v_idx; /* vector index in list */ +}; + +/* Helper macros to switch between ints/sec and what the register uses. + * And yes, it's the same math going both ways. The lowest value + * supported by all of the ixgbe hardware is 8. + */ +#define EITR_INTS_PER_SEC_TO_REG(_eitr) \ + ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) +#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG + +#define IXGBE_DESC_UNUSED(R) \ + ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->next_to_clean - (R)->next_to_use - 1) + +#define IXGBE_RX_DESC_ADV(R, i) \ + (&(((union ixgbe_adv_rx_desc *)((R).desc))[i])) +#define IXGBE_TX_DESC_ADV(R, i) \ + (&(((union ixgbe_adv_tx_desc *)((R).desc))[i])) +#define IXGBE_TX_CTXTDESC_ADV(R, i) \ + (&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i])) + +#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 + +#define OTHER_VECTOR 1 +#define NON_Q_VECTORS (OTHER_VECTOR) + +#define MAX_MSIX_Q_VECTORS 2 +#define MAX_MSIX_COUNT 2 + +#define MIN_MSIX_Q_VECTORS 2 +#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) + +/* board specific private data structure */ +struct ixgbevf_adapter { + struct timer_list watchdog_timer; + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u16 bd_number; + struct work_struct reset_task; + struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; + char name[MAX_MSIX_COUNT][IFNAMSIZ + 9]; + + /* Interrupt Throttle Rate */ + u32 itr_setting; + u16 eitr_low; + u16 eitr_high; + + /* TX */ + struct ixgbevf_ring *tx_ring; /* One per active queue */ + int num_tx_queues; + u64 restart_queue; + u64 hw_csum_tx_good; + u64 lsc_int; + u64 hw_tso_ctxt; + u64 hw_tso6_ctxt; + u32 tx_timeout_count; + + /* RX */ + struct ixgbevf_ring *rx_ring; /* One per active queue */ + int num_rx_queues; + int num_rx_pools; /* == num_rx_queues in 82598 */ + int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */ + u64 hw_csum_rx_error; + u64 hw_rx_no_dma_resources; + u64 hw_csum_rx_good; + u64 non_eop_descs; + int num_msix_vectors; + int max_msix_q_vectors; /* true count of q_vectors for device */ + struct ixgbevf_ring_feature ring_feature[RING_F_ARRAY_SIZE]; + struct msix_entry *msix_entries; + + u64 rx_hdr_split; + u32 alloc_rx_page_failed; + u32 alloc_rx_buff_failed; + + /* Some features need tri-state capability, + * thus the additional *_CAPABLE flags. + */ + u32 flags; +#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1) +#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1) +#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 2) +#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3) +#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4) +#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 5) +#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 6) +#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 7) +#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 8) + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + /* structs defined in ixgbe_vf.h */ + struct ixgbe_hw hw; + u16 msg_enable; + struct ixgbevf_hw_stats stats; + u64 zero_base; + /* Interrupt Throttle Rate */ + u32 eitr_param; + + unsigned long state; + u32 *config_space; + u64 tx_busy; + unsigned int tx_ring_count; + unsigned int rx_ring_count; + + u32 link_speed; + bool link_up; + unsigned long link_check_timeout; + + struct work_struct watchdog_task; + bool netdev_registered; + bool dev_closed; +}; + +enum ixbgevf_state_t { + __IXGBEVF_TESTING, + __IXGBEVF_RESETTING, + __IXGBEVF_DOWN +}; + +enum ixgbevf_boards { + board_82599_vf, + board_X540_vf, +}; + +extern struct ixgbevf_info ixgbevf_82599_vf_info; +extern struct ixgbevf_info ixgbevf_X540_vf_info; +extern struct ixgbe_mbx_operations ixgbevf_mbx_ops; + +/* needed by ethtool.c */ +extern char ixgbevf_driver_name[]; +extern const char ixgbevf_driver_version[]; + +extern int ixgbevf_up(struct ixgbevf_adapter *adapter); +extern void ixgbevf_down(struct ixgbevf_adapter *adapter); +extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter); +extern void ixgbevf_reset(struct ixgbevf_adapter *adapter); +extern void ixgbevf_set_ethtool_ops(struct net_device *netdev); +extern int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *, + struct ixgbevf_ring *); +extern int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *, + struct ixgbevf_ring *); +extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *, + struct ixgbevf_ring *); +extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *, + struct ixgbevf_ring *); +extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter); + +#ifdef ETHTOOL_OPS_COMPAT +extern int ethtool_ioctl(struct ifreq *ifr); + +#endif +extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter); +extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter); + +#ifdef DEBUG +extern char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw); +#define hw_dbg(hw, format, arg...) \ + printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg) +#else +#define hw_dbg(hw, format, arg...) do {} while (0) +#endif + +#endif /* _IXGBEVF_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c new file mode 100644 index 0000000..3b880a2 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -0,0 +1,3523 @@ +/******************************************************************************* + + Intel 82599 Virtual Function driver + Copyright(c) 1999 - 2010 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + + +/****************************************************************************** + Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code +******************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ixgbevf.h" + +char ixgbevf_driver_name[] = "ixgbevf"; +static const char ixgbevf_driver_string[] = + "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; + +#define DRV_VERSION "2.1.0-k" +const char ixgbevf_driver_version[] = DRV_VERSION; +static char ixgbevf_copyright[] = + "Copyright (c) 2009 - 2010 Intel Corporation."; + +static const struct ixgbevf_info *ixgbevf_info_tbl[] = { + [board_82599_vf] = &ixgbevf_82599_vf_info, + [board_X540_vf] = &ixgbevf_X540_vf_info, +}; + +/* ixgbevf_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static struct pci_device_id ixgbevf_pci_tbl[] = { + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), + board_82599_vf}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), + board_X540_vf}, + + /* required last entry */ + {0, } +}; +MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +#define DEFAULT_DEBUG_LEVEL_SHIFT 3 + +/* forward decls */ +static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector); +static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx, + u32 itr_reg); + +static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw, + struct ixgbevf_ring *rx_ring, + u32 val) +{ + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val); +} + +/* + * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors + * @adapter: pointer to adapter struct + * @direction: 0 for Rx, 1 for Tx, -1 for other causes + * @queue: queue to map the corresponding interrupt to + * @msix_vector: the vector to map to the corresponding queue + * + */ +static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, + u8 queue, u8 msix_vector) +{ + u32 ivar, index; + struct ixgbe_hw *hw = &adapter->hw; + if (direction == -1) { + /* other causes */ + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); + ivar &= ~0xFF; + ivar |= msix_vector; + IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); + } else { + /* tx or rx causes */ + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + index = ((16 * (queue & 1)) + (8 * direction)); + ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar); + } +} + +static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter, + struct ixgbevf_tx_buffer + *tx_buffer_info) +{ + if (tx_buffer_info->dma) { + if (tx_buffer_info->mapped_as_page) + dma_unmap_page(&adapter->pdev->dev, + tx_buffer_info->dma, + tx_buffer_info->length, + DMA_TO_DEVICE); + else + dma_unmap_single(&adapter->pdev->dev, + tx_buffer_info->dma, + tx_buffer_info->length, + DMA_TO_DEVICE); + tx_buffer_info->dma = 0; + } + if (tx_buffer_info->skb) { + dev_kfree_skb_any(tx_buffer_info->skb); + tx_buffer_info->skb = NULL; + } + tx_buffer_info->time_stamp = 0; + /* tx_buffer_info must be completely set up in the transmit path */ +} + +#define IXGBE_MAX_TXD_PWR 14 +#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \ + (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) +#ifdef MAX_SKB_FRAGS +#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ + MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ +#else +#define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) +#endif + +static void ixgbevf_tx_timeout(struct net_device *netdev); + +/** + * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes + * @adapter: board private structure + * @tx_ring: tx ring to clean + **/ +static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter, + struct ixgbevf_ring *tx_ring) +{ + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + union ixgbe_adv_tx_desc *tx_desc, *eop_desc; + struct ixgbevf_tx_buffer *tx_buffer_info; + unsigned int i, eop, count = 0; + unsigned int total_bytes = 0, total_packets = 0; + + i = tx_ring->next_to_clean; + eop = tx_ring->tx_buffer_info[i].next_to_watch; + eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); + + while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && + (count < tx_ring->work_limit)) { + bool cleaned = false; + rmb(); /* read buffer_info after eop_desc */ + for ( ; !cleaned; count++) { + struct sk_buff *skb; + tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + cleaned = (i == eop); + skb = tx_buffer_info->skb; + + if (cleaned && skb) { + unsigned int segs, bytecount; + + /* gso_segs is currently only valid for tcp */ + segs = skb_shinfo(skb)->gso_segs ?: 1; + /* multiply data chunks by size of headers */ + bytecount = ((segs - 1) * skb_headlen(skb)) + + skb->len; + total_packets += segs; + total_bytes += bytecount; + } + + ixgbevf_unmap_and_free_tx_resource(adapter, + tx_buffer_info); + + tx_desc->wb.status = 0; + + i++; + if (i == tx_ring->count) + i = 0; + } + + eop = tx_ring->tx_buffer_info[i].next_to_watch; + eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); + } + + tx_ring->next_to_clean = i; + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(count && netif_carrier_ok(netdev) && + (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); +#ifdef HAVE_TX_MQ + if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && + !test_bit(__IXGBEVF_DOWN, &adapter->state)) { + netif_wake_subqueue(netdev, tx_ring->queue_index); + ++adapter->restart_queue; + } +#else + if (netif_queue_stopped(netdev) && + !test_bit(__IXGBEVF_DOWN, &adapter->state)) { + netif_wake_queue(netdev); + ++adapter->restart_queue; + } +#endif + } + + /* re-arm the interrupt */ + if ((count >= tx_ring->work_limit) && + (!test_bit(__IXGBEVF_DOWN, &adapter->state))) { + IXGBE_WRITE_REG(hw, IXGBE_VTEICS, tx_ring->v_idx); + } + + tx_ring->total_bytes += total_bytes; + tx_ring->total_packets += total_packets; + + netdev->stats.tx_bytes += total_bytes; + netdev->stats.tx_packets += total_packets; + + return count < tx_ring->work_limit; +} + +/** + * ixgbevf_receive_skb - Send a completed packet up the stack + * @q_vector: structure containing interrupt and ring information + * @skb: packet to send up + * @status: hardware indication of status of receive + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * @rx_desc: rx descriptor + **/ +static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, + struct sk_buff *skb, u8 status, + struct ixgbevf_ring *ring, + union ixgbe_adv_rx_desc *rx_desc) +{ + struct ixgbevf_adapter *adapter = q_vector->adapter; + bool is_vlan = (status & IXGBE_RXD_STAT_VP); + + if (is_vlan) { + u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); + + __vlan_hwaccel_put_tag(skb, tag); + } + + if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) + napi_gro_receive(&q_vector->napi, skb); + else + netif_rx(skb); +} + +/** + * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum + * @adapter: address of board private structure + * @status_err: hardware indication of status of receive + * @skb: skb currently being received and modified + **/ +static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter, + u32 status_err, struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + /* Rx csum disabled */ + if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) + return; + + /* if IP and error */ + if ((status_err & IXGBE_RXD_STAT_IPCS) && + (status_err & IXGBE_RXDADV_ERR_IPE)) { + adapter->hw_csum_rx_error++; + return; + } + + if (!(status_err & IXGBE_RXD_STAT_L4CS)) + return; + + if (status_err & IXGBE_RXDADV_ERR_TCPE) { + adapter->hw_csum_rx_error++; + return; + } + + /* It must be a TCP or UDP packet with a valid checksum */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + adapter->hw_csum_rx_good++; +} + +/** + * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split + * @adapter: address of board private structure + **/ +static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, + struct ixgbevf_ring *rx_ring, + int cleaned_count) +{ + struct pci_dev *pdev = adapter->pdev; + union ixgbe_adv_rx_desc *rx_desc; + struct ixgbevf_rx_buffer *bi; + struct sk_buff *skb; + unsigned int i; + unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN; + + i = rx_ring->next_to_use; + bi = &rx_ring->rx_buffer_info[i]; + + while (cleaned_count--) { + rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); + + if (!bi->page_dma && + (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) { + if (!bi->page) { + bi->page = netdev_alloc_page(adapter->netdev); + if (!bi->page) { + adapter->alloc_rx_page_failed++; + goto no_buffers; + } + bi->page_offset = 0; + } else { + /* use a half page if we're re-using */ + bi->page_offset ^= (PAGE_SIZE / 2); + } + + bi->page_dma = dma_map_page(&pdev->dev, bi->page, + bi->page_offset, + (PAGE_SIZE / 2), + DMA_FROM_DEVICE); + } + + skb = bi->skb; + if (!skb) { + skb = netdev_alloc_skb(adapter->netdev, + bufsz); + + if (!skb) { + adapter->alloc_rx_buff_failed++; + goto no_buffers; + } + + /* + * Make buffer alignment 2 beyond a 16 byte boundary + * this will result in a 16 byte aligned IP header after + * the 14 byte MAC header is removed + */ + skb_reserve(skb, NET_IP_ALIGN); + + bi->skb = skb; + } + if (!bi->dma) { + bi->dma = dma_map_single(&pdev->dev, skb->data, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + } + /* Refresh the desc even if buffer_addrs didn't change because + * each write-back erases this info. */ + if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { + rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); + rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); + } else { + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + } + + i++; + if (i == rx_ring->count) + i = 0; + bi = &rx_ring->rx_buffer_info[i]; + } + +no_buffers: + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + if (i-- == 0) + i = (rx_ring->count - 1); + + ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i); + } +} + +static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, + u64 qmask) +{ + u32 mask; + struct ixgbe_hw *hw = &adapter->hw; + + mask = (qmask & 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); +} + +static inline u16 ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc) +{ + return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info; +} + +static inline u16 ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc) +{ + return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; +} + +static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, + struct ixgbevf_ring *rx_ring, + int *work_done, int work_to_do) +{ + struct ixgbevf_adapter *adapter = q_vector->adapter; + struct pci_dev *pdev = adapter->pdev; + union ixgbe_adv_rx_desc *rx_desc, *next_rxd; + struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer; + struct sk_buff *skb; + unsigned int i; + u32 len, staterr; + u16 hdr_info; + bool cleaned = false; + int cleaned_count = 0; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + rx_buffer_info = &rx_ring->rx_buffer_info[i]; + + while (staterr & IXGBE_RXD_STAT_DD) { + u32 upper_len = 0; + if (*work_done >= work_to_do) + break; + (*work_done)++; + + rmb(); /* read descriptor and rx_buffer_info after status DD */ + if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { + hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc)); + len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> + IXGBE_RXDADV_HDRBUFLEN_SHIFT; + if (hdr_info & IXGBE_RXDADV_SPH) + adapter->rx_hdr_split++; + if (len > IXGBEVF_RX_HDR_SIZE) + len = IXGBEVF_RX_HDR_SIZE; + upper_len = le16_to_cpu(rx_desc->wb.upper.length); + } else { + len = le16_to_cpu(rx_desc->wb.upper.length); + } + cleaned = true; + skb = rx_buffer_info->skb; + prefetch(skb->data - NET_IP_ALIGN); + rx_buffer_info->skb = NULL; + + if (rx_buffer_info->dma) { + dma_unmap_single(&pdev->dev, rx_buffer_info->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_buffer_info->dma = 0; + skb_put(skb, len); + } + + if (upper_len) { + dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, + PAGE_SIZE / 2, DMA_FROM_DEVICE); + rx_buffer_info->page_dma = 0; + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, + rx_buffer_info->page, + rx_buffer_info->page_offset, + upper_len); + + if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) || + (page_count(rx_buffer_info->page) != 1)) + rx_buffer_info->page = NULL; + else + get_page(rx_buffer_info->page); + + skb->len += upper_len; + skb->data_len += upper_len; + skb->truesize += upper_len; + } + + i++; + if (i == rx_ring->count) + i = 0; + + next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i); + prefetch(next_rxd); + cleaned_count++; + + next_buffer = &rx_ring->rx_buffer_info[i]; + + if (!(staterr & IXGBE_RXD_STAT_EOP)) { + if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { + rx_buffer_info->skb = next_buffer->skb; + rx_buffer_info->dma = next_buffer->dma; + next_buffer->skb = skb; + next_buffer->dma = 0; + } else { + skb->next = next_buffer->skb; + skb->next->prev = skb; + } + adapter->non_eop_descs++; + goto next_desc; + } + + /* ERR_MASK will only have valid bits if EOP set */ + if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { + dev_kfree_skb_irq(skb); + goto next_desc; + } + + ixgbevf_rx_checksum(adapter, staterr, skb); + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + total_rx_packets++; + + /* + * Work around issue of some types of VM to VM loop back + * packets not getting split correctly + */ + if (staterr & IXGBE_RXD_STAT_LB) { + u32 header_fixup_len = skb_headlen(skb); + if (header_fixup_len < 14) + skb_push(skb, header_fixup_len); + } + skb->protocol = eth_type_trans(skb, adapter->netdev); + + ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); + +next_desc: + rx_desc->wb.upper.status_error = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { + ixgbevf_alloc_rx_buffers(adapter, rx_ring, + cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + rx_buffer_info = &rx_ring->rx_buffer_info[i]; + + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + } + + rx_ring->next_to_clean = i; + cleaned_count = IXGBE_DESC_UNUSED(rx_ring); + + if (cleaned_count) + ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count); + + rx_ring->total_packets += total_rx_packets; + rx_ring->total_bytes += total_rx_bytes; + adapter->netdev->stats.rx_bytes += total_rx_bytes; + adapter->netdev->stats.rx_packets += total_rx_packets; + + return cleaned; +} + +/** + * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function is optimized for cleaning one queue only on a single + * q_vector!!! + **/ +static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget) +{ + struct ixgbevf_q_vector *q_vector = + container_of(napi, struct ixgbevf_q_vector, napi); + struct ixgbevf_adapter *adapter = q_vector->adapter; + struct ixgbevf_ring *rx_ring = NULL; + int work_done = 0; + long r_idx; + + r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); + rx_ring = &(adapter->rx_ring[r_idx]); + + ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget); + + /* If all Rx work done, exit the polling mode */ + if (work_done < budget) { + napi_complete(napi); + if (adapter->itr_setting & 1) + ixgbevf_set_itr_msix(q_vector); + if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) + ixgbevf_irq_enable_queues(adapter, rx_ring->v_idx); + } + + return work_done; +} + +/** + * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function will clean more than one rx queue associated with a + * q_vector. + **/ +static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget) +{ + struct ixgbevf_q_vector *q_vector = + container_of(napi, struct ixgbevf_q_vector, napi); + struct ixgbevf_adapter *adapter = q_vector->adapter; + struct ixgbevf_ring *rx_ring = NULL; + int work_done = 0, i; + long r_idx; + u64 enable_mask = 0; + + /* attempt to distribute budget to each queue fairly, but don't allow + * the budget to go below 1 because we'll exit polling */ + budget /= (q_vector->rxr_count ?: 1); + budget = max(budget, 1); + r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); + for (i = 0; i < q_vector->rxr_count; i++) { + rx_ring = &(adapter->rx_ring[r_idx]); + ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget); + enable_mask |= rx_ring->v_idx; + r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, + r_idx + 1); + } + +#ifndef HAVE_NETDEV_NAPI_LIST + if (!netif_running(adapter->netdev)) + work_done = 0; + +#endif + r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); + rx_ring = &(adapter->rx_ring[r_idx]); + + /* If all Rx work done, exit the polling mode */ + if (work_done < budget) { + napi_complete(napi); + if (adapter->itr_setting & 1) + ixgbevf_set_itr_msix(q_vector); + if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) + ixgbevf_irq_enable_queues(adapter, enable_mask); + } + + return work_done; +} + + +/** + * ixgbevf_configure_msix - Configure MSI-X hardware + * @adapter: board private structure + * + * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X + * interrupts. + **/ +static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) +{ + struct ixgbevf_q_vector *q_vector; + struct ixgbe_hw *hw = &adapter->hw; + int i, j, q_vectors, v_idx, r_idx; + u32 mask; + + q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + /* + * Populate the IVAR table and set the ITR values to the + * corresponding register. + */ + for (v_idx = 0; v_idx < q_vectors; v_idx++) { + q_vector = adapter->q_vector[v_idx]; + /* XXX for_each_set_bit(...) */ + r_idx = find_first_bit(q_vector->rxr_idx, + adapter->num_rx_queues); + + for (i = 0; i < q_vector->rxr_count; i++) { + j = adapter->rx_ring[r_idx].reg_idx; + ixgbevf_set_ivar(adapter, 0, j, v_idx); + r_idx = find_next_bit(q_vector->rxr_idx, + adapter->num_rx_queues, + r_idx + 1); + } + r_idx = find_first_bit(q_vector->txr_idx, + adapter->num_tx_queues); + + for (i = 0; i < q_vector->txr_count; i++) { + j = adapter->tx_ring[r_idx].reg_idx; + ixgbevf_set_ivar(adapter, 1, j, v_idx); + r_idx = find_next_bit(q_vector->txr_idx, + adapter->num_tx_queues, + r_idx + 1); + } + + /* if this is a tx only vector halve the interrupt rate */ + if (q_vector->txr_count && !q_vector->rxr_count) + q_vector->eitr = (adapter->eitr_param >> 1); + else if (q_vector->rxr_count) + /* rx only */ + q_vector->eitr = adapter->eitr_param; + + ixgbevf_write_eitr(adapter, v_idx, q_vector->eitr); + } + + ixgbevf_set_ivar(adapter, -1, 1, v_idx); + + /* set up to autoclear timer, and the vectors */ + mask = IXGBE_EIMS_ENABLE_MASK; + mask &= ~IXGBE_EIMS_OTHER; + IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask); +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * ixgbevf_update_itr - update the dynamic ITR value based on statistics + * @adapter: pointer to adapter + * @eitr: eitr setting (ints per sec) to give last timeslice + * @itr_setting: current throttle rate in ints/second + * @packets: the number of packets during this measurement interval + * @bytes: the number of bytes during this measurement interval + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + **/ +static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter, + u32 eitr, u8 itr_setting, + int packets, int bytes) +{ + unsigned int retval = itr_setting; + u32 timepassed_us; + u64 bytes_perint; + + if (packets == 0) + goto update_itr_done; + + + /* simple throttlerate management + * 0-20MB/s lowest (100000 ints/s) + * 20-100MB/s low (20000 ints/s) + * 100-1249MB/s bulk (8000 ints/s) + */ + /* what was last interrupt timeslice? */ + timepassed_us = 1000000/eitr; + bytes_perint = bytes / timepassed_us; /* bytes/usec */ + + switch (itr_setting) { + case lowest_latency: + if (bytes_perint > adapter->eitr_low) + retval = low_latency; + break; + case low_latency: + if (bytes_perint > adapter->eitr_high) + retval = bulk_latency; + else if (bytes_perint <= adapter->eitr_low) + retval = lowest_latency; + break; + case bulk_latency: + if (bytes_perint <= adapter->eitr_high) + retval = low_latency; + break; + } + +update_itr_done: + return retval; +} + +/** + * ixgbevf_write_eitr - write VTEITR register in hardware specific way + * @adapter: pointer to adapter struct + * @v_idx: vector index into q_vector array + * @itr_reg: new value to be written in *register* format, not ints/s + * + * This function is made to be called by ethtool and by the driver + * when it needs to update VTEITR registers at runtime. Hardware + * specific quirks/differences are taken care of here. + */ +static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx, + u32 itr_reg) +{ + struct ixgbe_hw *hw = &adapter->hw; + + itr_reg = EITR_INTS_PER_SEC_TO_REG(itr_reg); + + /* + * set the WDIS bit to not clear the timer bits and cause an + * immediate assertion of the interrupt + */ + itr_reg |= IXGBE_EITR_CNT_WDIS; + + IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); +} + +static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector) +{ + struct ixgbevf_adapter *adapter = q_vector->adapter; + u32 new_itr; + u8 current_itr, ret_itr; + int i, r_idx, v_idx = q_vector->v_idx; + struct ixgbevf_ring *rx_ring, *tx_ring; + + r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); + for (i = 0; i < q_vector->txr_count; i++) { + tx_ring = &(adapter->tx_ring[r_idx]); + ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr, + q_vector->tx_itr, + tx_ring->total_packets, + tx_ring->total_bytes); + /* if the result for this queue would decrease interrupt + * rate for this vector then use that result */ + q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ? + q_vector->tx_itr - 1 : ret_itr); + r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, + r_idx + 1); + } + + r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); + for (i = 0; i < q_vector->rxr_count; i++) { + rx_ring = &(adapter->rx_ring[r_idx]); + ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr, + q_vector->rx_itr, + rx_ring->total_packets, + rx_ring->total_bytes); + /* if the result for this queue would decrease interrupt + * rate for this vector then use that result */ + q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ? + q_vector->rx_itr - 1 : ret_itr); + r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, + r_idx + 1); + } + + current_itr = max(q_vector->rx_itr, q_vector->tx_itr); + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = 100000; + break; + case low_latency: + new_itr = 20000; /* aka hwitr = ~200 */ + break; + case bulk_latency: + default: + new_itr = 8000; + break; + } + + if (new_itr != q_vector->eitr) { + u32 itr_reg; + + /* save the algorithm value here, not the smoothed one */ + q_vector->eitr = new_itr; + /* do an exponential smoothing */ + new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); + itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr); + ixgbevf_write_eitr(adapter, v_idx, itr_reg); + } +} + +static irqreturn_t ixgbevf_msix_mbx(int irq, void *data) +{ + struct net_device *netdev = data; + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 eicr; + u32 msg; + + eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS); + IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr); + + if (!hw->mbx.ops.check_for_ack(hw)) { + /* + * checking for the ack clears the PFACK bit. Place + * it back in the v2p_mailbox cache so that anyone + * polling for an ack will not miss it. Also + * avoid the read below because the code to read + * the mailbox will also clear the ack bit. This was + * causing lost acks. Just cache the bit and exit + * the IRQ handler. + */ + hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK; + goto out; + } + + /* Not an ack interrupt, go ahead and read the message */ + hw->mbx.ops.read(hw, &msg, 1); + + if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 1)); + +out: + return IRQ_HANDLED; +} + +static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data) +{ + struct ixgbevf_q_vector *q_vector = data; + struct ixgbevf_adapter *adapter = q_vector->adapter; + struct ixgbevf_ring *tx_ring; + int i, r_idx; + + if (!q_vector->txr_count) + return IRQ_HANDLED; + + r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); + for (i = 0; i < q_vector->txr_count; i++) { + tx_ring = &(adapter->tx_ring[r_idx]); + tx_ring->total_bytes = 0; + tx_ring->total_packets = 0; + ixgbevf_clean_tx_irq(adapter, tx_ring); + r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, + r_idx + 1); + } + + if (adapter->itr_setting & 1) + ixgbevf_set_itr_msix(q_vector); + + return IRQ_HANDLED; +} + +/** + * ixgbevf_msix_clean_rx - single unshared vector rx clean (all queues) + * @irq: unused + * @data: pointer to our q_vector struct for this interrupt vector + **/ +static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data) +{ + struct ixgbevf_q_vector *q_vector = data; + struct ixgbevf_adapter *adapter = q_vector->adapter; + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbevf_ring *rx_ring; + int r_idx; + int i; + + r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); + for (i = 0; i < q_vector->rxr_count; i++) { + rx_ring = &(adapter->rx_ring[r_idx]); + rx_ring->total_bytes = 0; + rx_ring->total_packets = 0; + r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, + r_idx + 1); + } + + if (!q_vector->rxr_count) + return IRQ_HANDLED; + + r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); + rx_ring = &(adapter->rx_ring[r_idx]); + /* disable interrupts on this vector only */ + IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, rx_ring->v_idx); + napi_schedule(&q_vector->napi); + + + return IRQ_HANDLED; +} + +static irqreturn_t ixgbevf_msix_clean_many(int irq, void *data) +{ + ixgbevf_msix_clean_rx(irq, data); + ixgbevf_msix_clean_tx(irq, data); + + return IRQ_HANDLED; +} + +static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx, + int r_idx) +{ + struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; + + set_bit(r_idx, q_vector->rxr_idx); + q_vector->rxr_count++; + a->rx_ring[r_idx].v_idx = 1 << v_idx; +} + +static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx, + int t_idx) +{ + struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; + + set_bit(t_idx, q_vector->txr_idx); + q_vector->txr_count++; + a->tx_ring[t_idx].v_idx = 1 << v_idx; +} + +/** + * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors + * @adapter: board private structure to initialize + * + * This function maps descriptor rings to the queue-specific vectors + * we were allotted through the MSI-X enabling code. Ideally, we'd have + * one vector per ring/queue, but on a constrained vector budget, we + * group the rings as "efficiently" as possible. You would add new + * mapping configurations in here. + **/ +static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) +{ + int q_vectors; + int v_start = 0; + int rxr_idx = 0, txr_idx = 0; + int rxr_remaining = adapter->num_rx_queues; + int txr_remaining = adapter->num_tx_queues; + int i, j; + int rqpv, tqpv; + int err = 0; + + q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + /* + * The ideal configuration... + * We have enough vectors to map one per queue. + */ + if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { + for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) + map_vector_to_rxq(adapter, v_start, rxr_idx); + + for (; txr_idx < txr_remaining; v_start++, txr_idx++) + map_vector_to_txq(adapter, v_start, txr_idx); + goto out; + } + + /* + * If we don't have enough vectors for a 1-to-1 + * mapping, we'll have to group them so there are + * multiple queues per vector. + */ + /* Re-adjusting *qpv takes care of the remainder. */ + for (i = v_start; i < q_vectors; i++) { + rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); + for (j = 0; j < rqpv; j++) { + map_vector_to_rxq(adapter, i, rxr_idx); + rxr_idx++; + rxr_remaining--; + } + } + for (i = v_start; i < q_vectors; i++) { + tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); + for (j = 0; j < tqpv; j++) { + map_vector_to_txq(adapter, i, txr_idx); + txr_idx++; + txr_remaining--; + } + } + +out: + return err; +} + +/** + * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts + * @adapter: board private structure + * + * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests + * interrupts from the kernel. + **/ +static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + irqreturn_t (*handler)(int, void *); + int i, vector, q_vectors, err; + int ri = 0, ti = 0; + + /* Decrement for Other and TCP Timer vectors */ + q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + +#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \ + ? &ixgbevf_msix_clean_many : \ + (_v)->rxr_count ? &ixgbevf_msix_clean_rx : \ + (_v)->txr_count ? &ixgbevf_msix_clean_tx : \ + NULL) + for (vector = 0; vector < q_vectors; vector++) { + handler = SET_HANDLER(adapter->q_vector[vector]); + + if (handler == &ixgbevf_msix_clean_rx) { + sprintf(adapter->name[vector], "%s-%s-%d", + netdev->name, "rx", ri++); + } else if (handler == &ixgbevf_msix_clean_tx) { + sprintf(adapter->name[vector], "%s-%s-%d", + netdev->name, "tx", ti++); + } else if (handler == &ixgbevf_msix_clean_many) { + sprintf(adapter->name[vector], "%s-%s-%d", + netdev->name, "TxRx", vector); + } else { + /* skip this unused q_vector */ + continue; + } + err = request_irq(adapter->msix_entries[vector].vector, + handler, 0, adapter->name[vector], + adapter->q_vector[vector]); + if (err) { + hw_dbg(&adapter->hw, + "request_irq failed for MSIX interrupt " + "Error: %d\n", err); + goto free_queue_irqs; + } + } + + sprintf(adapter->name[vector], "%s:mbx", netdev->name); + err = request_irq(adapter->msix_entries[vector].vector, + &ixgbevf_msix_mbx, 0, adapter->name[vector], netdev); + if (err) { + hw_dbg(&adapter->hw, + "request_irq for msix_mbx failed: %d\n", err); + goto free_queue_irqs; + } + + return 0; + +free_queue_irqs: + for (i = vector - 1; i >= 0; i--) + free_irq(adapter->msix_entries[--vector].vector, + &(adapter->q_vector[i])); + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + return err; +} + +static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) +{ + int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + for (i = 0; i < q_vectors; i++) { + struct ixgbevf_q_vector *q_vector = adapter->q_vector[i]; + bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES); + bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES); + q_vector->rxr_count = 0; + q_vector->txr_count = 0; + q_vector->eitr = adapter->eitr_param; + } +} + +/** + * ixgbevf_request_irq - initialize interrupts + * @adapter: board private structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) +{ + int err = 0; + + err = ixgbevf_request_msix_irqs(adapter); + + if (err) + hw_dbg(&adapter->hw, + "request_irq failed, Error %d\n", err); + + return err; +} + +static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i, q_vectors; + + q_vectors = adapter->num_msix_vectors; + + i = q_vectors - 1; + + free_irq(adapter->msix_entries[i].vector, netdev); + i--; + + for (; i >= 0; i--) { + free_irq(adapter->msix_entries[i].vector, + adapter->q_vector[i]); + } + + ixgbevf_reset_q_vectors(adapter); +} + +/** + * ixgbevf_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter) +{ + int i; + struct ixgbe_hw *hw = &adapter->hw; + + IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0); + + IXGBE_WRITE_FLUSH(hw); + + for (i = 0; i < adapter->num_msix_vectors; i++) + synchronize_irq(adapter->msix_entries[i].vector); +} + +/** + * ixgbevf_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter, + bool queues, bool flush) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 mask; + u64 qmask; + + mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); + qmask = ~0; + + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); + + if (queues) + ixgbevf_irq_enable_queues(adapter, qmask); + + if (flush) + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) +{ + u64 tdba; + struct ixgbe_hw *hw = &adapter->hw; + u32 i, j, tdlen, txctrl; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbevf_ring *ring = &adapter->tx_ring[i]; + j = ring->reg_idx; + tdba = ring->dma; + tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc); + IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j), + (tdba & DMA_BIT_MASK(32))); + IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen); + IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0); + adapter->tx_ring[i].head = IXGBE_VFTDH(j); + adapter->tx_ring[i].tail = IXGBE_VFTDT(j); + /* Disable Tx Head Writeback RO bit, since this hoses + * bookkeeping if things aren't delivered in order. + */ + txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j)); + txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; + IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl); + } +} + +#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 + +static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) +{ + struct ixgbevf_ring *rx_ring; + struct ixgbe_hw *hw = &adapter->hw; + u32 srrctl; + + rx_ring = &adapter->rx_ring[index]; + + srrctl = IXGBE_SRRCTL_DROP_EN; + + if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { + u16 bufsz = IXGBEVF_RXBUFFER_2048; + /* grow the amount we can receive on large page machines */ + if (bufsz < (PAGE_SIZE / 2)) + bufsz = (PAGE_SIZE / 2); + /* cap the bufsz at our largest descriptor size */ + bufsz = min((u16)IXGBEVF_MAX_RXBUFFER, bufsz); + + srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; + srrctl |= ((IXGBEVF_RX_HDR_SIZE << + IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & + IXGBE_SRRCTL_BSIZEHDR_MASK); + } else { + srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; + + if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE) + srrctl |= IXGBEVF_RXBUFFER_2048 >> + IXGBE_SRRCTL_BSIZEPKT_SHIFT; + else + srrctl |= rx_ring->rx_buf_len >> + IXGBE_SRRCTL_BSIZEPKT_SHIFT; + } + IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); +} + +/** + * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) +{ + u64 rdba; + struct ixgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + int i, j; + u32 rdlen; + int rx_buf_len; + + /* Decide whether to use packet split mode or not */ + if (netdev->mtu > ETH_DATA_LEN) { + if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE) + adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; + else + adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; + } else { + if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE) + adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; + else + adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; + } + + /* Set the RX buffer length according to the mode */ + if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { + /* PSRTYPE must be initialized in 82599 */ + u32 psrtype = IXGBE_PSRTYPE_TCPHDR | + IXGBE_PSRTYPE_UDPHDR | + IXGBE_PSRTYPE_IPV4HDR | + IXGBE_PSRTYPE_IPV6HDR | + IXGBE_PSRTYPE_L2HDR; + IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); + rx_buf_len = IXGBEVF_RX_HDR_SIZE; + } else { + IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); + if (netdev->mtu <= ETH_DATA_LEN) + rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; + else + rx_buf_len = ALIGN(max_frame, 1024); + } + + rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring */ + for (i = 0; i < adapter->num_rx_queues; i++) { + rdba = adapter->rx_ring[i].dma; + j = adapter->rx_ring[i].reg_idx; + IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j), + (rdba & DMA_BIT_MASK(32))); + IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen); + IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0); + adapter->rx_ring[i].head = IXGBE_VFRDH(j); + adapter->rx_ring[i].tail = IXGBE_VFRDT(j); + adapter->rx_ring[i].rx_buf_len = rx_buf_len; + + ixgbevf_configure_srrctl(adapter, j); + } +} + +static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + /* add VID to filter table */ + if (hw->mac.ops.set_vfta) + hw->mac.ops.set_vfta(hw, vid, 0, true); + set_bit(vid, adapter->active_vlans); +} + +static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + /* remove VID from filter table */ + if (hw->mac.ops.set_vfta) + hw->mac.ops.set_vfta(hw, vid, 0, false); + clear_bit(vid, adapter->active_vlans); +} + +static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) +{ + u16 vid; + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + ixgbevf_vlan_rx_add_vid(adapter->netdev, vid); +} + +static int ixgbevf_write_uc_addr_list(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + int count = 0; + + if ((netdev_uc_count(netdev)) > 10) { + printk(KERN_ERR "Too many unicast filters - No Space\n"); + return -ENOSPC; + } + + if (!netdev_uc_empty(netdev)) { + struct netdev_hw_addr *ha; + netdev_for_each_uc_addr(ha, netdev) { + hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); + udelay(200); + } + } else { + /* + * If the list is empty then send message to PF driver to + * clear all macvlans on this VF. + */ + hw->mac.ops.set_uc_addr(hw, 0, NULL); + } + + return count; +} + +/** + * ixgbevf_set_rx_mode - Multicast set + * @netdev: network interface device structure + * + * The set_rx_method entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast mode. + **/ +static void ixgbevf_set_rx_mode(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + /* reprogram multicast list */ + if (hw->mac.ops.update_mc_addr_list) + hw->mac.ops.update_mc_addr_list(hw, netdev); + + ixgbevf_write_uc_addr_list(netdev); +} + +static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) +{ + int q_idx; + struct ixgbevf_q_vector *q_vector; + int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + for (q_idx = 0; q_idx < q_vectors; q_idx++) { + struct napi_struct *napi; + q_vector = adapter->q_vector[q_idx]; + if (!q_vector->rxr_count) + continue; + napi = &q_vector->napi; + if (q_vector->rxr_count > 1) + napi->poll = &ixgbevf_clean_rxonly_many; + + napi_enable(napi); + } +} + +static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) +{ + int q_idx; + struct ixgbevf_q_vector *q_vector; + int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + for (q_idx = 0; q_idx < q_vectors; q_idx++) { + q_vector = adapter->q_vector[q_idx]; + if (!q_vector->rxr_count) + continue; + napi_disable(&q_vector->napi); + } +} + +static void ixgbevf_configure(struct ixgbevf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i; + + ixgbevf_set_rx_mode(netdev); + + ixgbevf_restore_vlan(adapter); + + ixgbevf_configure_tx(adapter); + ixgbevf_configure_rx(adapter); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbevf_ring *ring = &adapter->rx_ring[i]; + ixgbevf_alloc_rx_buffers(adapter, ring, ring->count); + ring->next_to_use = ring->count - 1; + writel(ring->next_to_use, adapter->hw.hw_addr + ring->tail); + } +} + +#define IXGBE_MAX_RX_DESC_POLL 10 +static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, + int rxr) +{ + struct ixgbe_hw *hw = &adapter->hw; + int j = adapter->rx_ring[rxr].reg_idx; + int k; + + for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) { + if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE) + break; + else + msleep(1); + } + if (k >= IXGBE_MAX_RX_DESC_POLL) { + hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d " + "not set within the polling period\n", rxr); + } + + ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr], + (adapter->rx_ring[rxr].count - 1)); +} + +static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) +{ + /* Only save pre-reset stats if there are some */ + if (adapter->stats.vfgprc || adapter->stats.vfgptc) { + adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc - + adapter->stats.base_vfgprc; + adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc - + adapter->stats.base_vfgptc; + adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc - + adapter->stats.base_vfgorc; + adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc - + adapter->stats.base_vfgotc; + adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc - + adapter->stats.base_vfmprc; + } +} + +static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); + adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); + adapter->stats.last_vfgorc |= + (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); + adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); + adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); + adapter->stats.last_vfgotc |= + (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); + adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); + + adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; + adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; + adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; + adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; + adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; +} + +static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + int i, j = 0; + int num_rx_rings = adapter->num_rx_queues; + u32 txdctl, rxdctl; + + for (i = 0; i < adapter->num_tx_queues; i++) { + j = adapter->tx_ring[i].reg_idx; + txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); + /* enable WTHRESH=8 descriptors, to encourage burst writeback */ + txdctl |= (8 << 16); + IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + j = adapter->tx_ring[i].reg_idx; + txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); + txdctl |= IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); + } + + for (i = 0; i < num_rx_rings; i++) { + j = adapter->rx_ring[i].reg_idx; + rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); + rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; + if (hw->mac.type == ixgbe_mac_X540_vf) { + rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; + rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) | + IXGBE_RXDCTL_RLPML_EN); + } + IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); + ixgbevf_rx_desc_queue_enable(adapter, i); + } + + ixgbevf_configure_msix(adapter); + + if (hw->mac.ops.set_rar) { + if (is_valid_ether_addr(hw->mac.addr)) + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); + else + hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); + } + + clear_bit(__IXGBEVF_DOWN, &adapter->state); + ixgbevf_napi_enable_all(adapter); + + /* enable transmits */ + netif_tx_start_all_queues(netdev); + + ixgbevf_save_reset_stats(adapter); + ixgbevf_init_last_counter_stats(adapter); + + /* bring the link up in the watchdog, this could race with our first + * link up interrupt but shouldn't be a problem */ + adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + mod_timer(&adapter->watchdog_timer, jiffies); + return 0; +} + +int ixgbevf_up(struct ixgbevf_adapter *adapter) +{ + int err; + struct ixgbe_hw *hw = &adapter->hw; + + ixgbevf_configure(adapter); + + err = ixgbevf_up_complete(adapter); + + /* clear any pending interrupts, may auto mask */ + IXGBE_READ_REG(hw, IXGBE_VTEICR); + + ixgbevf_irq_enable(adapter, true, true); + + return err; +} + +/** + * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue + * @adapter: board private structure + * @rx_ring: ring to free buffers from + **/ +static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter, + struct ixgbevf_ring *rx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; + + if (!rx_ring->rx_buffer_info) + return; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + struct ixgbevf_rx_buffer *rx_buffer_info; + + rx_buffer_info = &rx_ring->rx_buffer_info[i]; + if (rx_buffer_info->dma) { + dma_unmap_single(&pdev->dev, rx_buffer_info->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_buffer_info->dma = 0; + } + if (rx_buffer_info->skb) { + struct sk_buff *skb = rx_buffer_info->skb; + rx_buffer_info->skb = NULL; + do { + struct sk_buff *this = skb; + skb = skb->prev; + dev_kfree_skb(this); + } while (skb); + } + if (!rx_buffer_info->page) + continue; + dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, + PAGE_SIZE / 2, DMA_FROM_DEVICE); + rx_buffer_info->page_dma = 0; + put_page(rx_buffer_info->page); + rx_buffer_info->page = NULL; + rx_buffer_info->page_offset = 0; + } + + size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; + memset(rx_ring->rx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + if (rx_ring->head) + writel(0, adapter->hw.hw_addr + rx_ring->head); + if (rx_ring->tail) + writel(0, adapter->hw.hw_addr + rx_ring->tail); +} + +/** + * ixgbevf_clean_tx_ring - Free Tx Buffers + * @adapter: board private structure + * @tx_ring: ring to be cleaned + **/ +static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter, + struct ixgbevf_ring *tx_ring) +{ + struct ixgbevf_tx_buffer *tx_buffer_info; + unsigned long size; + unsigned int i; + + if (!tx_ring->tx_buffer_info) + return; + + /* Free all the Tx ring sk_buffs */ + + for (i = 0; i < tx_ring->count; i++) { + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info); + } + + size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_buffer_info, 0, size); + + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + if (tx_ring->head) + writel(0, adapter->hw.hw_addr + tx_ring->head); + if (tx_ring->tail) + writel(0, adapter->hw.hw_addr + tx_ring->tail); +} + +/** + * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]); +} + +/** + * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]); +} + +void ixgbevf_down(struct ixgbevf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + u32 txdctl; + int i, j; + + /* signal that we are down to the interrupt handler */ + set_bit(__IXGBEVF_DOWN, &adapter->state); + /* disable receives */ + + netif_tx_disable(netdev); + + msleep(10); + + netif_tx_stop_all_queues(netdev); + + ixgbevf_irq_disable(adapter); + + ixgbevf_napi_disable_all(adapter); + + del_timer_sync(&adapter->watchdog_timer); + /* can't call flush scheduled work here because it can deadlock + * if linkwatch_event tries to acquire the rtnl_lock which we are + * holding */ + while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK) + msleep(1); + + /* disable transmits in the hardware now that interrupts are off */ + for (i = 0; i < adapter->num_tx_queues; i++) { + j = adapter->tx_ring[i].reg_idx; + txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); + IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), + (txdctl & ~IXGBE_TXDCTL_ENABLE)); + } + + netif_carrier_off(netdev); + + if (!pci_channel_offline(adapter->pdev)) + ixgbevf_reset(adapter); + + ixgbevf_clean_all_tx_rings(adapter); + ixgbevf_clean_all_rx_rings(adapter); +} + +void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + WARN_ON(in_interrupt()); + + while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) + msleep(1); + + /* + * Check if PF is up before re-init. If not then skip until + * later when the PF is up and ready to service requests from + * the VF via mailbox. If the VF is up and running then the + * watchdog task will continue to schedule reset tasks until + * the PF is up and running. + */ + if (!hw->mac.ops.reset_hw(hw)) { + ixgbevf_down(adapter); + ixgbevf_up(adapter); + } + + clear_bit(__IXGBEVF_RESETTING, &adapter->state); +} + +void ixgbevf_reset(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + + if (hw->mac.ops.reset_hw(hw)) + hw_dbg(hw, "PF still resetting\n"); + else + hw->mac.ops.init_hw(hw); + + if (is_valid_ether_addr(adapter->hw.mac.addr)) { + memcpy(netdev->dev_addr, adapter->hw.mac.addr, + netdev->addr_len); + memcpy(netdev->perm_addr, adapter->hw.mac.addr, + netdev->addr_len); + } +} + +static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, + int vectors) +{ + int err, vector_threshold; + + /* We'll want at least 3 (vector_threshold): + * 1) TxQ[0] Cleanup + * 2) RxQ[0] Cleanup + * 3) Other (Link Status Change, etc.) + */ + vector_threshold = MIN_MSIX_COUNT; + + /* The more we get, the more we will assign to Tx/Rx Cleanup + * for the separate queues...where Rx Cleanup >= Tx Cleanup. + * Right now, we simply care about how many we'll get; we'll + * set them up later while requesting irq's. + */ + while (vectors >= vector_threshold) { + err = pci_enable_msix(adapter->pdev, adapter->msix_entries, + vectors); + if (!err) /* Success in acquiring all requested vectors. */ + break; + else if (err < 0) + vectors = 0; /* Nasty failure, quit now */ + else /* err == number of vectors we should try again with */ + vectors = err; + } + + if (vectors < vector_threshold) { + /* Can't allocate enough MSI-X interrupts? Oh well. + * This just means we'll go with either a single MSI + * vector or fall back to legacy interrupts. + */ + hw_dbg(&adapter->hw, + "Unable to allocate MSI-X interrupts\n"); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else { + /* + * Adjust for only the vectors we'll use, which is minimum + * of max_msix_q_vectors + NON_Q_VECTORS, or the number of + * vectors we were allocated. + */ + adapter->num_msix_vectors = vectors; + } +} + +/* + * ixgbevf_set_num_queues: Allocate queues for device, feature dependent + * @adapter: board private structure to initialize + * + * This is the top level queue allocation routine. The order here is very + * important, starting with the "most" number of features turned on at once, + * and ending with the smallest set of features. This way large combinations + * can be allocated if they're turned on, and smaller combinations are the + * fallthrough conditions. + * + **/ +static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) +{ + /* Start with base case */ + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + adapter->num_rx_pools = adapter->num_rx_queues; + adapter->num_rx_queues_per_pool = 1; +} + +/** + * ixgbevf_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + * We allocate one ring per queue at run-time since we don't know the + * number of queues at compile-time. The polling_netdev array is + * intended for Multiqueue, but should work fine with a single queue. + **/ +static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter) +{ + int i; + + adapter->tx_ring = kcalloc(adapter->num_tx_queues, + sizeof(struct ixgbevf_ring), GFP_KERNEL); + if (!adapter->tx_ring) + goto err_tx_ring_allocation; + + adapter->rx_ring = kcalloc(adapter->num_rx_queues, + sizeof(struct ixgbevf_ring), GFP_KERNEL); + if (!adapter->rx_ring) + goto err_rx_ring_allocation; + + for (i = 0; i < adapter->num_tx_queues; i++) { + adapter->tx_ring[i].count = adapter->tx_ring_count; + adapter->tx_ring[i].queue_index = i; + adapter->tx_ring[i].reg_idx = i; + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + adapter->rx_ring[i].count = adapter->rx_ring_count; + adapter->rx_ring[i].queue_index = i; + adapter->rx_ring[i].reg_idx = i; + } + + return 0; + +err_rx_ring_allocation: + kfree(adapter->tx_ring); +err_tx_ring_allocation: + return -ENOMEM; +} + +/** + * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported + * @adapter: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) +{ + int err = 0; + int vector, v_budget; + + /* + * It's easy to be greedy for MSI-X vectors, but it really + * doesn't do us much good if we have a lot more vectors + * than CPU's. So let's be conservative and only ask for + * (roughly) twice the number of vectors as there are CPU's. + */ + v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, + (int)(num_online_cpus() * 2)) + NON_Q_VECTORS; + + /* A failure in MSI-X entry allocation isn't fatal, but it does + * mean we disable MSI-X capabilities of the adapter. */ + adapter->msix_entries = kcalloc(v_budget, + sizeof(struct msix_entry), GFP_KERNEL); + if (!adapter->msix_entries) { + err = -ENOMEM; + goto out; + } + + for (vector = 0; vector < v_budget; vector++) + adapter->msix_entries[vector].entry = vector; + + ixgbevf_acquire_msix_vectors(adapter, v_budget); + +out: + return err; +} + +/** + * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) +{ + int q_idx, num_q_vectors; + struct ixgbevf_q_vector *q_vector; + int napi_vectors; + int (*poll)(struct napi_struct *, int); + + num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + napi_vectors = adapter->num_rx_queues; + poll = &ixgbevf_clean_rxonly; + + for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { + q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL); + if (!q_vector) + goto err_out; + q_vector->adapter = adapter; + q_vector->v_idx = q_idx; + q_vector->eitr = adapter->eitr_param; + if (q_idx < napi_vectors) + netif_napi_add(adapter->netdev, &q_vector->napi, + (*poll), 64); + adapter->q_vector[q_idx] = q_vector; + } + + return 0; + +err_out: + while (q_idx) { + q_idx--; + q_vector = adapter->q_vector[q_idx]; + netif_napi_del(&q_vector->napi); + kfree(q_vector); + adapter->q_vector[q_idx] = NULL; + } + return -ENOMEM; +} + +/** + * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) +{ + int q_idx, num_q_vectors; + int napi_vectors; + + num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + napi_vectors = adapter->num_rx_queues; + + for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { + struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx]; + + adapter->q_vector[q_idx] = NULL; + if (q_idx < napi_vectors) + netif_napi_del(&q_vector->napi); + kfree(q_vector); + } +} + +/** + * ixgbevf_reset_interrupt_capability - Reset MSIX setup + * @adapter: board private structure + * + **/ +static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter) +{ + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; +} + +/** + * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init + * @adapter: board private structure to initialize + * + **/ +static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) +{ + int err; + + /* Number of supported queues */ + ixgbevf_set_num_queues(adapter); + + err = ixgbevf_set_interrupt_capability(adapter); + if (err) { + hw_dbg(&adapter->hw, + "Unable to setup interrupt capabilities\n"); + goto err_set_interrupt; + } + + err = ixgbevf_alloc_q_vectors(adapter); + if (err) { + hw_dbg(&adapter->hw, "Unable to allocate memory for queue " + "vectors\n"); + goto err_alloc_q_vectors; + } + + err = ixgbevf_alloc_queues(adapter); + if (err) { + printk(KERN_ERR "Unable to allocate memory for queues\n"); + goto err_alloc_queues; + } + + hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, " + "Tx Queue count = %u\n", + (adapter->num_rx_queues > 1) ? "Enabled" : + "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); + + set_bit(__IXGBEVF_DOWN, &adapter->state); + + return 0; +err_alloc_queues: + ixgbevf_free_q_vectors(adapter); +err_alloc_q_vectors: + ixgbevf_reset_interrupt_capability(adapter); +err_set_interrupt: + return err; +} + +/** + * ixgbevf_sw_init - Initialize general software structures + * (struct ixgbevf_adapter) + * @adapter: board private structure to initialize + * + * ixgbevf_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + int err; + + /* PCI config space info */ + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + hw->mbx.ops.init_params(hw); + hw->mac.max_tx_queues = MAX_TX_QUEUES; + hw->mac.max_rx_queues = MAX_RX_QUEUES; + err = hw->mac.ops.reset_hw(hw); + if (err) { + dev_info(&pdev->dev, + "PF still in reset state, assigning new address\n"); + dev_hw_addr_random(adapter->netdev, hw->mac.addr); + } else { + err = hw->mac.ops.init_hw(hw); + if (err) { + printk(KERN_ERR "init_shared_code failed: %d\n", err); + goto out; + } + } + + /* Enable dynamic interrupt throttling rates */ + adapter->eitr_param = 20000; + adapter->itr_setting = 1; + + /* set defaults for eitr in MegaBytes */ + adapter->eitr_low = 10; + adapter->eitr_high = 20; + + /* set default ring sizes */ + adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; + adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; + + /* enable rx csum by default */ + adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; + + set_bit(__IXGBEVF_DOWN, &adapter->state); + +out: + return err; +} + +#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ + { \ + u32 current_counter = IXGBE_READ_REG(hw, reg); \ + if (current_counter < last_counter) \ + counter += 0x100000000LL; \ + last_counter = current_counter; \ + counter &= 0xFFFFFFFF00000000LL; \ + counter |= current_counter; \ + } + +#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ + { \ + u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ + u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ + u64 current_counter = (current_counter_msb << 32) | \ + current_counter_lsb; \ + if (current_counter < last_counter) \ + counter += 0x1000000000LL; \ + last_counter = current_counter; \ + counter &= 0xFFFFFFF000000000LL; \ + counter |= current_counter; \ + } +/** + * ixgbevf_update_stats - Update the board statistics counters. + * @adapter: board private structure + **/ +void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, + adapter->stats.vfgprc); + UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc, + adapter->stats.vfgptc); + UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, + adapter->stats.last_vfgorc, + adapter->stats.vfgorc); + UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, + adapter->stats.last_vfgotc, + adapter->stats.vfgotc); + UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc, + adapter->stats.vfmprc); + + /* Fill out the OS statistics structure */ + adapter->netdev->stats.multicast = adapter->stats.vfmprc - + adapter->stats.base_vfmprc; +} + +/** + * ixgbevf_watchdog - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void ixgbevf_watchdog(unsigned long data) +{ + struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data; + struct ixgbe_hw *hw = &adapter->hw; + u64 eics = 0; + int i; + + /* + * Do the watchdog outside of interrupt context due to the lovely + * delays that some of the newer hardware requires + */ + + if (test_bit(__IXGBEVF_DOWN, &adapter->state)) + goto watchdog_short_circuit; + + /* get one bit for every active tx/rx interrupt vector */ + for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { + struct ixgbevf_q_vector *qv = adapter->q_vector[i]; + if (qv->rxr_count || qv->txr_count) + eics |= (1 << i); + } + + IXGBE_WRITE_REG(hw, IXGBE_VTEICS, (u32)eics); + +watchdog_short_circuit: + schedule_work(&adapter->watchdog_task); +} + +/** + * ixgbevf_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void ixgbevf_tx_timeout(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + schedule_work(&adapter->reset_task); +} + +static void ixgbevf_reset_task(struct work_struct *work) +{ + struct ixgbevf_adapter *adapter; + adapter = container_of(work, struct ixgbevf_adapter, reset_task); + + /* If we're already down or resetting, just bail */ + if (test_bit(__IXGBEVF_DOWN, &adapter->state) || + test_bit(__IXGBEVF_RESETTING, &adapter->state)) + return; + + adapter->tx_timeout_count++; + + ixgbevf_reinit_locked(adapter); +} + +/** + * ixgbevf_watchdog_task - worker thread to bring link up + * @work: pointer to work_struct containing our data + **/ +static void ixgbevf_watchdog_task(struct work_struct *work) +{ + struct ixgbevf_adapter *adapter = container_of(work, + struct ixgbevf_adapter, + watchdog_task); + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + u32 link_speed = adapter->link_speed; + bool link_up = adapter->link_up; + + adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; + + /* + * Always check the link on the watchdog because we have + * no LSC interrupt + */ + if (hw->mac.ops.check_link) { + if ((hw->mac.ops.check_link(hw, &link_speed, + &link_up, false)) != 0) { + adapter->link_up = link_up; + adapter->link_speed = link_speed; + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + schedule_work(&adapter->reset_task); + goto pf_has_reset; + } + } else { + /* always assume link is up, if no check link + * function */ + link_speed = IXGBE_LINK_SPEED_10GB_FULL; + link_up = true; + } + adapter->link_up = link_up; + adapter->link_speed = link_speed; + + if (link_up) { + if (!netif_carrier_ok(netdev)) { + hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n", + (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? + 10 : 1); + netif_carrier_on(netdev); + netif_tx_wake_all_queues(netdev); + } + } else { + adapter->link_up = false; + adapter->link_speed = 0; + if (netif_carrier_ok(netdev)) { + hw_dbg(&adapter->hw, "NIC Link is Down\n"); + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + } + } + + ixgbevf_update_stats(adapter); + +pf_has_reset: + /* Reset the timer */ + if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + (2 * HZ))); + + adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK; +} + +/** + * ixgbevf_free_tx_resources - Free Tx Resources per Queue + * @adapter: board private structure + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter, + struct ixgbevf_ring *tx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + ixgbevf_clean_tx_ring(adapter, tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (adapter->tx_ring[i].desc) + ixgbevf_free_tx_resources(adapter, + &adapter->tx_ring[i]); + +} + +/** + * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors) + * @adapter: board private structure + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter, + struct ixgbevf_ring *tx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + int size; + + size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->work_limit = tx_ring->count; + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit " + "descriptor ring\n"); + return -ENOMEM; +} + +/** + * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]); + if (!err) + continue; + hw_dbg(&adapter->hw, + "Allocation for Tx Queue %u failed\n", i); + break; + } + + return err; +} + +/** + * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors) + * @adapter: board private structure + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, + struct ixgbevf_ring *rx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + int size; + + size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) { + hw_dbg(&adapter->hw, + "Unable to vmalloc buffer memory for " + "the receive descriptor ring\n"); + goto alloc_failed; + } + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + + rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + + if (!rx_ring->desc) { + hw_dbg(&adapter->hw, + "Unable to allocate memory for " + "the receive descriptor ring\n"); + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + goto alloc_failed; + } + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + return 0; +alloc_failed: + return -ENOMEM; +} + +/** + * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]); + if (!err) + continue; + hw_dbg(&adapter->hw, + "Allocation for Rx Queue %u failed\n", i); + break; + } + return err; +} + +/** + * ixgbevf_free_rx_resources - Free Rx Resources + * @adapter: board private structure + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter, + struct ixgbevf_ring *rx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + ixgbevf_clean_rx_ring(adapter, rx_ring); + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->rx_ring[i].desc) + ixgbevf_free_rx_resources(adapter, + &adapter->rx_ring[i]); +} + +/** + * ixgbevf_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int ixgbevf_open(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + int err; + + /* disallow open during test */ + if (test_bit(__IXGBEVF_TESTING, &adapter->state)) + return -EBUSY; + + if (hw->adapter_stopped) { + ixgbevf_reset(adapter); + /* if adapter is still stopped then PF isn't up and + * the vf can't start. */ + if (hw->adapter_stopped) { + err = IXGBE_ERR_MBX; + printk(KERN_ERR "Unable to start - perhaps the PF" + " Driver isn't up yet\n"); + goto err_setup_reset; + } + } + + /* allocate transmit descriptors */ + err = ixgbevf_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = ixgbevf_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + ixgbevf_configure(adapter); + + /* + * Map the Tx/Rx rings to the vectors we were allotted. + * if request_irq will be called in this function map_rings + * must be called *before* up_complete + */ + ixgbevf_map_rings_to_vectors(adapter); + + err = ixgbevf_up_complete(adapter); + if (err) + goto err_up; + + /* clear any pending interrupts, may auto mask */ + IXGBE_READ_REG(hw, IXGBE_VTEICR); + err = ixgbevf_request_irq(adapter); + if (err) + goto err_req_irq; + + ixgbevf_irq_enable(adapter, true, true); + + return 0; + +err_req_irq: + ixgbevf_down(adapter); +err_up: + ixgbevf_free_irq(adapter); +err_setup_rx: + ixgbevf_free_all_rx_resources(adapter); +err_setup_tx: + ixgbevf_free_all_tx_resources(adapter); + ixgbevf_reset(adapter); + +err_setup_reset: + + return err; +} + +/** + * ixgbevf_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int ixgbevf_close(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + ixgbevf_down(adapter); + ixgbevf_free_irq(adapter); + + ixgbevf_free_all_tx_resources(adapter); + ixgbevf_free_all_rx_resources(adapter); + + return 0; +} + +static int ixgbevf_tso(struct ixgbevf_adapter *adapter, + struct ixgbevf_ring *tx_ring, + struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) +{ + struct ixgbe_adv_tx_context_desc *context_desc; + unsigned int i; + int err; + struct ixgbevf_tx_buffer *tx_buffer_info; + u32 vlan_macip_lens = 0, type_tucmd_mlhl; + u32 mss_l4len_idx, l4len; + + if (skb_is_gso(skb)) { + if (skb_header_cloned(skb)) { + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (err) + return err; + } + l4len = tcp_hdrlen(skb); + *hdr_len += l4len; + + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + adapter->hw_tso_ctxt++; + } else if (skb_is_gso_v6(skb)) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + adapter->hw_tso6_ctxt++; + } + + i = tx_ring->next_to_use; + + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); + + /* VLAN MACLEN IPLEN */ + if (tx_flags & IXGBE_TX_FLAGS_VLAN) + vlan_macip_lens |= + (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); + vlan_macip_lens |= ((skb_network_offset(skb)) << + IXGBE_ADVTXD_MACLEN_SHIFT); + *hdr_len += skb_network_offset(skb); + vlan_macip_lens |= + (skb_transport_header(skb) - skb_network_header(skb)); + *hdr_len += + (skb_transport_header(skb) - skb_network_header(skb)); + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->seqnum_seed = 0; + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT | + IXGBE_ADVTXD_DTYP_CTXT); + + if (skb->protocol == htons(ETH_P_IP)) + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); + + /* MSS L4LEN IDX */ + mss_l4len_idx = + (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT); + mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT); + /* use index 1 for TSO */ + mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); + + tx_buffer_info->time_stamp = jiffies; + tx_buffer_info->next_to_watch = i; + + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; + + return true; + } + + return false; +} + +static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter, + struct ixgbevf_ring *tx_ring, + struct sk_buff *skb, u32 tx_flags) +{ + struct ixgbe_adv_tx_context_desc *context_desc; + unsigned int i; + struct ixgbevf_tx_buffer *tx_buffer_info; + u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; + + if (skb->ip_summed == CHECKSUM_PARTIAL || + (tx_flags & IXGBE_TX_FLAGS_VLAN)) { + i = tx_ring->next_to_use; + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); + + if (tx_flags & IXGBE_TX_FLAGS_VLAN) + vlan_macip_lens |= (tx_flags & + IXGBE_TX_FLAGS_VLAN_MASK); + vlan_macip_lens |= (skb_network_offset(skb) << + IXGBE_ADVTXD_MACLEN_SHIFT); + if (skb->ip_summed == CHECKSUM_PARTIAL) + vlan_macip_lens |= (skb_transport_header(skb) - + skb_network_header(skb)); + + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->seqnum_seed = 0; + + type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | + IXGBE_ADVTXD_DTYP_CTXT); + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + switch (skb->protocol) { + case __constant_htons(ETH_P_IP): + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + type_tucmd_mlhl |= + IXGBE_ADVTXD_TUCMD_L4T_TCP; + break; + case __constant_htons(ETH_P_IPV6): + /* XXX what about other V6 headers?? */ + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + type_tucmd_mlhl |= + IXGBE_ADVTXD_TUCMD_L4T_TCP; + break; + default: + if (unlikely(net_ratelimit())) { + printk(KERN_WARNING + "partial checksum but " + "proto=%x!\n", + skb->protocol); + } + break; + } + } + + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); + /* use index zero for tx checksum offload */ + context_desc->mss_l4len_idx = 0; + + tx_buffer_info->time_stamp = jiffies; + tx_buffer_info->next_to_watch = i; + + adapter->hw_csum_tx_good++; + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; + + return true; + } + + return false; +} + +static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter, + struct ixgbevf_ring *tx_ring, + struct sk_buff *skb, u32 tx_flags, + unsigned int first) +{ + struct pci_dev *pdev = adapter->pdev; + struct ixgbevf_tx_buffer *tx_buffer_info; + unsigned int len; + unsigned int total = skb->len; + unsigned int offset = 0, size; + int count = 0; + unsigned int nr_frags = skb_shinfo(skb)->nr_frags; + unsigned int f; + int i; + + i = tx_ring->next_to_use; + + len = min(skb_headlen(skb), total); + while (len) { + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); + + tx_buffer_info->length = size; + tx_buffer_info->mapped_as_page = false; + tx_buffer_info->dma = dma_map_single(&adapter->pdev->dev, + skb->data + offset, + size, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) + goto dma_error; + tx_buffer_info->time_stamp = jiffies; + tx_buffer_info->next_to_watch = i; + + len -= size; + total -= size; + offset += size; + count++; + i++; + if (i == tx_ring->count) + i = 0; + } + + for (f = 0; f < nr_frags; f++) { + struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[f]; + len = min((unsigned int)frag->size, total); + offset = frag->page_offset; + + while (len) { + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); + + tx_buffer_info->length = size; + tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev, + frag->page, + offset, + size, + DMA_TO_DEVICE); + tx_buffer_info->mapped_as_page = true; + if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) + goto dma_error; + tx_buffer_info->time_stamp = jiffies; + tx_buffer_info->next_to_watch = i; + + len -= size; + total -= size; + offset += size; + count++; + i++; + if (i == tx_ring->count) + i = 0; + } + if (total == 0) + break; + } + + if (i == 0) + i = tx_ring->count - 1; + else + i = i - 1; + tx_ring->tx_buffer_info[i].skb = skb; + tx_ring->tx_buffer_info[first].next_to_watch = i; + + return count; + +dma_error: + dev_err(&pdev->dev, "TX DMA map failed\n"); + + /* clear timestamp and dma mappings for failed tx_buffer_info map */ + tx_buffer_info->dma = 0; + tx_buffer_info->time_stamp = 0; + tx_buffer_info->next_to_watch = 0; + count--; + + /* clear timestamp and dma mappings for remaining portion of packet */ + while (count >= 0) { + count--; + i--; + if (i < 0) + i += tx_ring->count; + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info); + } + + return count; +} + +static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter, + struct ixgbevf_ring *tx_ring, int tx_flags, + int count, u32 paylen, u8 hdr_len) +{ + union ixgbe_adv_tx_desc *tx_desc = NULL; + struct ixgbevf_tx_buffer *tx_buffer_info; + u32 olinfo_status = 0, cmd_type_len = 0; + unsigned int i; + + u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS; + + cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA; + + cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; + + if (tx_flags & IXGBE_TX_FLAGS_VLAN) + cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; + + if (tx_flags & IXGBE_TX_FLAGS_TSO) { + cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; + + olinfo_status |= IXGBE_TXD_POPTS_TXSM << + IXGBE_ADVTXD_POPTS_SHIFT; + + /* use index 1 context for tso */ + olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); + if (tx_flags & IXGBE_TX_FLAGS_IPV4) + olinfo_status |= IXGBE_TXD_POPTS_IXSM << + IXGBE_ADVTXD_POPTS_SHIFT; + + } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) + olinfo_status |= IXGBE_TXD_POPTS_TXSM << + IXGBE_ADVTXD_POPTS_SHIFT; + + olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); + + i = tx_ring->next_to_use; + while (count--) { + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); + tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type_len | tx_buffer_info->length); + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); + i++; + if (i == tx_ring->count) + i = 0; + } + + tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); + + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + + tx_ring->next_to_use = i; + writel(i, adapter->hw.hw_addr + tx_ring->tail); +} + +static int __ixgbevf_maybe_stop_tx(struct net_device *netdev, + struct ixgbevf_ring *tx_ring, int size) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + netif_stop_subqueue(netdev, tx_ring->queue_index); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. */ + if (likely(IXGBE_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(netdev, tx_ring->queue_index); + ++adapter->restart_queue; + return 0; +} + +static int ixgbevf_maybe_stop_tx(struct net_device *netdev, + struct ixgbevf_ring *tx_ring, int size) +{ + if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __ixgbevf_maybe_stop_tx(netdev, tx_ring, size); +} + +static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbevf_ring *tx_ring; + unsigned int first; + unsigned int tx_flags = 0; + u8 hdr_len = 0; + int r_idx = 0, tso; + int count = 0; + + unsigned int f; + + tx_ring = &adapter->tx_ring[r_idx]; + + if (vlan_tx_tag_present(skb)) { + tx_flags |= vlan_tx_tag_get(skb); + tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; + tx_flags |= IXGBE_TX_FLAGS_VLAN; + } + + /* four things can cause us to need a context descriptor */ + if (skb_is_gso(skb) || + (skb->ip_summed == CHECKSUM_PARTIAL) || + (tx_flags & IXGBE_TX_FLAGS_VLAN)) + count++; + + count += TXD_USE_COUNT(skb_headlen(skb)); + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); + + if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) { + adapter->tx_busy++; + return NETDEV_TX_BUSY; + } + + first = tx_ring->next_to_use; + + if (skb->protocol == htons(ETH_P_IP)) + tx_flags |= IXGBE_TX_FLAGS_IPV4; + tso = ixgbevf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len); + if (tso < 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (tso) + tx_flags |= IXGBE_TX_FLAGS_TSO; + else if (ixgbevf_tx_csum(adapter, tx_ring, skb, tx_flags) && + (skb->ip_summed == CHECKSUM_PARTIAL)) + tx_flags |= IXGBE_TX_FLAGS_CSUM; + + ixgbevf_tx_queue(adapter, tx_ring, tx_flags, + ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first), + skb->len, hdr_len); + + ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); + + return NETDEV_TX_OK; +} + +/** + * ixgbevf_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int ixgbevf_set_mac(struct net_device *netdev, void *p) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + if (hw->mac.ops.set_rar) + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); + + return 0; +} + +/** + * ixgbevf_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; + u32 msg[2]; + + if (adapter->hw.mac.type == ixgbe_mac_X540_vf) + max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; + + /* MTU < 68 is an error and causes problems on some kernels */ + if ((new_mtu < 68) || (max_frame > max_possible_frame)) + return -EINVAL; + + hw_dbg(&adapter->hw, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + /* must set new MTU before calling down or up */ + netdev->mtu = new_mtu; + + msg[0] = IXGBE_VF_SET_LPE; + msg[1] = max_frame; + hw->mbx.ops.write_posted(hw, msg, 2); + + if (netif_running(netdev)) + ixgbevf_reinit_locked(adapter); + + return 0; +} + +static void ixgbevf_shutdown(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + ixgbevf_down(adapter); + ixgbevf_free_irq(adapter); + ixgbevf_free_all_tx_resources(adapter); + ixgbevf_free_all_rx_resources(adapter); + } + +#ifdef CONFIG_PM + pci_save_state(pdev); +#endif + + pci_disable_device(pdev); +} + +static const struct net_device_ops ixgbe_netdev_ops = { + .ndo_open = ixgbevf_open, + .ndo_stop = ixgbevf_close, + .ndo_start_xmit = ixgbevf_xmit_frame, + .ndo_set_rx_mode = ixgbevf_set_rx_mode, + .ndo_set_multicast_list = ixgbevf_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = ixgbevf_set_mac, + .ndo_change_mtu = ixgbevf_change_mtu, + .ndo_tx_timeout = ixgbevf_tx_timeout, + .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, +}; + +static void ixgbevf_assign_netdev_ops(struct net_device *dev) +{ + dev->netdev_ops = &ixgbe_netdev_ops; + ixgbevf_set_ethtool_ops(dev); + dev->watchdog_timeo = 5 * HZ; +} + +/** + * ixgbevf_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in ixgbevf_pci_tbl + * + * Returns 0 on success, negative on failure + * + * ixgbevf_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int __devinit ixgbevf_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct ixgbevf_adapter *adapter = NULL; + struct ixgbe_hw *hw = NULL; + const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; + static int cards_found; + int err, pci_using_dac; + + err = pci_enable_device(pdev); + if (err) + return err; + + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && + !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { + pci_using_dac = 1; + } else { + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + err = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "No usable DMA " + "configuration, aborting\n"); + goto err_dma; + } + } + pci_using_dac = 0; + } + + err = pci_request_regions(pdev, ixgbevf_driver_name); + if (err) { + dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); + goto err_pci_reg; + } + + pci_set_master(pdev); + +#ifdef HAVE_TX_MQ + netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter), + MAX_TX_QUEUES); +#else + netdev = alloc_etherdev(sizeof(struct ixgbevf_adapter)); +#endif + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; + adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; + + /* + * call save state here in standalone driver because it relies on + * adapter struct to exist, and needs to call netdev_priv + */ + pci_save_state(pdev); + + hw->hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!hw->hw_addr) { + err = -EIO; + goto err_ioremap; + } + + ixgbevf_assign_netdev_ops(netdev); + + adapter->bd_number = cards_found; + + /* Setup hw api */ + memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); + hw->mac.type = ii->mac; + + memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, + sizeof(struct ixgbe_mbx_operations)); + + adapter->flags &= ~IXGBE_FLAG_RX_PS_CAPABLE; + adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; + adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE; + + /* setup the private structure */ + err = ixgbevf_sw_init(adapter); + + netdev->features = NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX | + NETIF_F_HW_VLAN_FILTER; + + netdev->features |= NETIF_F_IPV6_CSUM; + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + netdev->features |= NETIF_F_GRO; + netdev->vlan_features |= NETIF_F_TSO; + netdev->vlan_features |= NETIF_F_TSO6; + netdev->vlan_features |= NETIF_F_IP_CSUM; + netdev->vlan_features |= NETIF_F_IPV6_CSUM; + netdev->vlan_features |= NETIF_F_SG; + + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + /* The HW MAC address was set and/or determined in sw_init */ + memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); + memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + printk(KERN_ERR "invalid MAC address\n"); + err = -EIO; + goto err_sw_init; + } + + init_timer(&adapter->watchdog_timer); + adapter->watchdog_timer.function = ixgbevf_watchdog; + adapter->watchdog_timer.data = (unsigned long)adapter; + + INIT_WORK(&adapter->reset_task, ixgbevf_reset_task); + INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task); + + err = ixgbevf_init_interrupt_scheme(adapter); + if (err) + goto err_sw_init; + + /* pick up the PCI bus settings for reporting later */ + if (hw->mac.ops.get_bus_info) + hw->mac.ops.get_bus_info(hw); + + strcpy(netdev->name, "eth%d"); + + err = register_netdev(netdev); + if (err) + goto err_register; + + adapter->netdev_registered = true; + + netif_carrier_off(netdev); + + ixgbevf_init_last_counter_stats(adapter); + + /* print the MAC address */ + hw_dbg(hw, "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", + netdev->dev_addr[0], + netdev->dev_addr[1], + netdev->dev_addr[2], + netdev->dev_addr[3], + netdev->dev_addr[4], + netdev->dev_addr[5]); + + hw_dbg(hw, "MAC: %d\n", hw->mac.type); + + hw_dbg(hw, "LRO is disabled\n"); + + hw_dbg(hw, "Intel(R) 82599 Virtual Function\n"); + cards_found++; + return 0; + +err_register: +err_sw_init: + ixgbevf_reset_interrupt_capability(adapter); + iounmap(hw->hw_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * ixgbevf_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * ixgbevf_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void __devexit ixgbevf_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + set_bit(__IXGBEVF_DOWN, &adapter->state); + + del_timer_sync(&adapter->watchdog_timer); + + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); + + if (adapter->netdev_registered) { + unregister_netdev(netdev); + adapter->netdev_registered = false; + } + + ixgbevf_reset_interrupt_capability(adapter); + + iounmap(adapter->hw.hw_addr); + pci_release_regions(pdev); + + hw_dbg(&adapter->hw, "Remove complete\n"); + + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + + free_netdev(netdev); + + pci_disable_device(pdev); +} + +static struct pci_driver ixgbevf_driver = { + .name = ixgbevf_driver_name, + .id_table = ixgbevf_pci_tbl, + .probe = ixgbevf_probe, + .remove = __devexit_p(ixgbevf_remove), + .shutdown = ixgbevf_shutdown, +}; + +/** + * ixgbevf_init_module - Driver Registration Routine + * + * ixgbevf_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init ixgbevf_init_module(void) +{ + int ret; + printk(KERN_INFO "ixgbevf: %s - version %s\n", ixgbevf_driver_string, + ixgbevf_driver_version); + + printk(KERN_INFO "%s\n", ixgbevf_copyright); + + ret = pci_register_driver(&ixgbevf_driver); + return ret; +} + +module_init(ixgbevf_init_module); + +/** + * ixgbevf_exit_module - Driver Exit Cleanup Routine + * + * ixgbevf_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit ixgbevf_exit_module(void) +{ + pci_unregister_driver(&ixgbevf_driver); +} + +#ifdef DEBUG +/** + * ixgbevf_get_hw_dev_name - return device name string + * used by hardware layer to print debugging information + **/ +char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) +{ + struct ixgbevf_adapter *adapter = hw->back; + return adapter->netdev->name; +} + +#endif +module_exit(ixgbevf_exit_module); + +/* ixgbevf_main.c */ diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c new file mode 100644 index 0000000..7a88331 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c @@ -0,0 +1,341 @@ +/******************************************************************************* + + Intel 82599 Virtual Function driver + Copyright(c) 1999 - 2010 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "mbx.h" + +/** + * ixgbevf_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * + * returns 0 if it successfully received a message notification + **/ +static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + while (countdown && mbx->ops.check_for_msg(hw)) { + countdown--; + udelay(mbx->udelay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; + + return countdown ? 0 : IXGBE_ERR_MBX; +} + +/** + * ixgbevf_poll_for_ack - Wait for message acknowledgement + * @hw: pointer to the HW structure + * + * returns 0 if it successfully received a message acknowledgement + **/ +static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + while (countdown && mbx->ops.check_for_ack(hw)) { + countdown--; + udelay(mbx->udelay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; + + return countdown ? 0 : IXGBE_ERR_MBX; +} + +/** + * ixgbevf_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns 0 if it successfully received a message notification and + * copied it into the receive buffer. + **/ +static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + + ret_val = ixgbevf_poll_for_msg(hw); + + /* if ack received read message, otherwise we timed out */ + if (!ret_val) + ret_val = mbx->ops.read(hw, msg, size); + + return ret_val; +} + +/** + * ixgbevf_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns 0 if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val; + + /* send msg */ + ret_val = mbx->ops.write(hw, msg, size); + + /* if msg sent wait until we receive an ack */ + if (!ret_val) + ret_val = ixgbevf_poll_for_ack(hw); + + return ret_val; +} + +/** + * ixgbevf_read_v2p_mailbox - read v2p mailbox + * @hw: pointer to the HW structure + * + * This function is used to read the v2p mailbox without losing the read to + * clear status bits. + **/ +static u32 ixgbevf_read_v2p_mailbox(struct ixgbe_hw *hw) +{ + u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX); + + v2p_mailbox |= hw->mbx.v2p_mailbox; + hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS; + + return v2p_mailbox; +} + +/** + * ixgbevf_check_for_bit_vf - Determine if a status bit was set + * @hw: pointer to the HW structure + * @mask: bitmask for bits to be tested and cleared + * + * This function is used to check for the read to clear bits within + * the V2P mailbox. + **/ +static s32 ixgbevf_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask) +{ + u32 v2p_mailbox = ixgbevf_read_v2p_mailbox(hw); + s32 ret_val = IXGBE_ERR_MBX; + + if (v2p_mailbox & mask) + ret_val = 0; + + hw->mbx.v2p_mailbox &= ~mask; + + return ret_val; +} + +/** + * ixgbevf_check_for_msg_vf - checks to see if the PF has sent mail + * @hw: pointer to the HW structure + * + * returns 0 if the PF has set the Status bit or else ERR_MBX + **/ +static s32 ixgbevf_check_for_msg_vf(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_ERR_MBX; + + if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) { + ret_val = 0; + hw->mbx.stats.reqs++; + } + + return ret_val; +} + +/** + * ixgbevf_check_for_ack_vf - checks to see if the PF has ACK'd + * @hw: pointer to the HW structure + * + * returns 0 if the PF has set the ACK bit or else ERR_MBX + **/ +static s32 ixgbevf_check_for_ack_vf(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_ERR_MBX; + + if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) { + ret_val = 0; + hw->mbx.stats.acks++; + } + + return ret_val; +} + +/** + * ixgbevf_check_for_rst_vf - checks to see if the PF has reset + * @hw: pointer to the HW structure + * + * returns true if the PF has set the reset done bit or else false + **/ +static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_ERR_MBX; + + if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD | + IXGBE_VFMAILBOX_RSTI))) { + ret_val = 0; + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * ixgbevf_obtain_mbx_lock_vf - obtain mailbox lock + * @hw: pointer to the HW structure + * + * return 0 if we obtained the mailbox lock + **/ +static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_ERR_MBX; + + /* Take ownership of the buffer */ + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU); + + /* reserve mailbox for vf use */ + if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU) + ret_val = 0; + + return ret_val; +} + +/** + * ixgbevf_write_mbx_vf - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns 0 if it successfully copied message into the buffer + **/ +static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size) +{ + s32 ret_val; + u16 i; + + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbevf_obtain_mbx_lock_vf(hw); + if (ret_val) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + ixgbevf_check_for_msg_vf(hw); + ixgbevf_check_for_ack_vf(hw); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + + /* Drop VFU and interrupt the PF to tell it a message has been sent */ + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ); + +out_no_write: + return ret_val; +} + +/** + * ixgbevf_read_mbx_vf - Reads a message from the inbox intended for vf + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns 0 if it successfuly read message from buffer + **/ +static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size) +{ + s32 ret_val = 0; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbevf_obtain_mbx_lock_vf(hw); + if (ret_val) + goto out_no_read; + + /* copy the message from the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i); + + /* Acknowledge receipt and release mailbox, then we're done */ + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return ret_val; +} + +/** + * ixgbevf_init_mbx_params_vf - set initial values for vf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for vf mailbox + */ +static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + /* start mailbox as timed out and let the reset_hw call set the timeout + * value to begin communications */ + mbx->timeout = 0; + mbx->udelay = IXGBE_VF_MBX_INIT_DELAY; + + mbx->size = IXGBE_VFMAILBOX_SIZE; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + + return 0; +} + +struct ixgbe_mbx_operations ixgbevf_mbx_ops = { + .init_params = ixgbevf_init_mbx_params_vf, + .read = ixgbevf_read_mbx_vf, + .write = ixgbevf_write_mbx_vf, + .read_posted = ixgbevf_read_posted_mbx, + .write_posted = ixgbevf_write_posted_mbx, + .check_for_msg = ixgbevf_check_for_msg_vf, + .check_for_ack = ixgbevf_check_for_ack_vf, + .check_for_rst = ixgbevf_check_for_rst_vf, +}; + diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h new file mode 100644 index 0000000..ea393eb --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h @@ -0,0 +1,99 @@ +/******************************************************************************* + + Intel 82599 Virtual Function driver + Copyright(c) 1999 - 2010 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_MBX_H_ +#define _IXGBE_MBX_H_ + +#include "vf.h" + +#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ +#define IXGBE_ERR_MBX -100 + +#define IXGBE_VFMAILBOX 0x002FC +#define IXGBE_VFMBMEM 0x00200 + +/* Define mailbox register bits */ +#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ +#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */ +#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ +#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ +#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */ +#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ +#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ + +#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x)) +#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn)) + +#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ + +#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */ +#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */ +#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ + + +/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is IXGBE_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with + * this are the ACK */ +#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with + * this are the NACK */ +#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still + * clear to send requests */ +#define IXGBE_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for exra info for certain messages */ +#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) + +#define IXGBE_VF_RESET 0x01 /* VF requests reset */ +#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ +#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ + +/* length of permanent address message returned from PF */ +#define IXGBE_VF_PERMADDR_MSG_LEN 4 +/* word in permanent address message with the current multicast type */ +#define IXGBE_VF_MC_TYPE_WORD 3 + +#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ + +#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ + +/* forward declaration of the HW struct */ +struct ixgbe_hw; + +#endif /* _IXGBE_MBX_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h new file mode 100644 index 0000000..189200e --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/regs.h @@ -0,0 +1,85 @@ +/******************************************************************************* + + Intel 82599 Virtual Function driver + Copyright(c) 1999 - 2010 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBEVF_REGS_H_ +#define _IXGBEVF_REGS_H_ + +#define IXGBE_VFCTRL 0x00000 +#define IXGBE_VFSTATUS 0x00008 +#define IXGBE_VFLINKS 0x00010 +#define IXGBE_VFFRTIMER 0x00048 +#define IXGBE_VFRXMEMWRAP 0x03190 +#define IXGBE_VTEICR 0x00100 +#define IXGBE_VTEICS 0x00104 +#define IXGBE_VTEIMS 0x00108 +#define IXGBE_VTEIMC 0x0010C +#define IXGBE_VTEIAC 0x00110 +#define IXGBE_VTEIAM 0x00114 +#define IXGBE_VTEITR(x) (0x00820 + (4 * x)) +#define IXGBE_VTIVAR(x) (0x00120 + (4 * x)) +#define IXGBE_VTIVAR_MISC 0x00140 +#define IXGBE_VTRSCINT(x) (0x00180 + (4 * x)) +#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * x)) +#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * x)) +#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * x)) +#define IXGBE_VFRDH(x) (0x01010 + (0x40 * x)) +#define IXGBE_VFRDT(x) (0x01018 + (0x40 * x)) +#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * x)) +#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * x)) +#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * x)) +#define IXGBE_VFPSRTYPE 0x00300 +#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * x)) +#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * x)) +#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * x)) +#define IXGBE_VFTDH(x) (0x02010 + (0x40 * x)) +#define IXGBE_VFTDT(x) (0x02018 + (0x40 * x)) +#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * x)) +#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * x)) +#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * x)) +#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * x)) +#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * x)) +#define IXGBE_VFGPRC 0x0101C +#define IXGBE_VFGPTC 0x0201C +#define IXGBE_VFGORC_LSB 0x01020 +#define IXGBE_VFGORC_MSB 0x01024 +#define IXGBE_VFGOTC_LSB 0x02020 +#define IXGBE_VFGOTC_MSB 0x02024 +#define IXGBE_VFMPRC 0x01034 + +#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) + +#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg)) + +#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) ( \ + writel((value), ((a)->hw_addr + (reg) + ((offset) << 2)))) + +#define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \ + readl((a)->hw_addr + (reg) + ((offset) << 2))) + +#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS)) + +#endif /* _IXGBEVF_REGS_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c new file mode 100644 index 0000000..aa3682e --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -0,0 +1,426 @@ +/******************************************************************************* + + Intel 82599 Virtual Function driver + Copyright(c) 1999 - 2010 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "vf.h" + +/** + * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware by filling the bus info structure and media type, clears + * all on chip counters, initializes receive address registers, multicast + * table, VLAN filter table, calls routine to set up link and flow control + * settings, and leaves transmit and receive units disabled and uninitialized + **/ +static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw) +{ + /* Clear adapter stopped flag */ + hw->adapter_stopped = false; + + return 0; +} + +/** + * ixgbevf_init_hw_vf - virtual function hardware initialization + * @hw: pointer to hardware structure + * + * Initialize the hardware by resetting the hardware and then starting + * the hardware + **/ +static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw) +{ + s32 status = hw->mac.ops.start_hw(hw); + + hw->mac.ops.get_mac_addr(hw, hw->mac.addr); + + return status; +} + +/** + * ixgbevf_reset_hw_vf - Performs hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by reseting the transmit and receive units, masks and + * clears all interrupts. + **/ +static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 timeout = IXGBE_VF_INIT_TIMEOUT; + s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR; + u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN]; + u8 *addr = (u8 *)(&msgbuf[1]); + + /* Call adapter stop to disable tx/rx and clear interrupts */ + hw->mac.ops.stop_adapter(hw); + + IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST); + IXGBE_WRITE_FLUSH(hw); + + /* we cannot reset while the RSTI / RSTD bits are asserted */ + while (!mbx->ops.check_for_rst(hw) && timeout) { + timeout--; + udelay(5); + } + + if (!timeout) + return IXGBE_ERR_RESET_FAILED; + + /* mailbox timeout can now become active */ + mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT; + + msgbuf[0] = IXGBE_VF_RESET; + mbx->ops.write_posted(hw, msgbuf, 1); + + msleep(10); + + /* set our "perm_addr" based on info provided by PF */ + /* also set up the mc_filter_type which is piggy backed + * on the mac address in word 3 */ + ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN); + if (ret_val) + return ret_val; + + if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK)) + return IXGBE_ERR_INVALID_MAC_ADDR; + + memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS); + hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD]; + + return 0; +} + +/** + * ixgbevf_stop_hw_vf - Generic stop Tx/Rx units + * @hw: pointer to hardware structure + * + * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, + * disables transmit and receive units. The adapter_stopped flag is used by + * the shared code and drivers to determine if the adapter is in a stopped + * state and should not touch the hardware. + **/ +static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw) +{ + u32 number_of_queues; + u32 reg_val; + u16 i; + + /* + * Set the adapter_stopped flag so other driver functions stop touching + * the hardware + */ + hw->adapter_stopped = true; + + /* Disable the receive unit by stopped each queue */ + number_of_queues = hw->mac.max_rx_queues; + for (i = 0; i < number_of_queues; i++) { + reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); + if (reg_val & IXGBE_RXDCTL_ENABLE) { + reg_val &= ~IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val); + } + } + + IXGBE_WRITE_FLUSH(hw); + + /* Clear interrupt mask to stop from interrupts being generated */ + IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); + + /* Clear any pending interrupts */ + IXGBE_READ_REG(hw, IXGBE_VTEICR); + + /* Disable the transmit unit. Each queue must be disabled. */ + number_of_queues = hw->mac.max_tx_queues; + for (i = 0; i < number_of_queues; i++) { + reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); + if (reg_val & IXGBE_TXDCTL_ENABLE) { + reg_val &= ~IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val); + } + } + + return 0; +} + +/** + * ixgbevf_mta_vector - Determines bit-vector in multicast table to set + * @hw: pointer to hardware structure + * @mc_addr: the multicast address + * + * Extracts the 12 bits, from a multicast address, to determine which + * bit-vector to set in the multicast table. The hardware uses 12 bits, from + * incoming rx multicast addresses, to determine the bit-vector to check in + * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set + * by the MO field of the MCSTCTRL. The MO field is set during initialization + * to mc_filter_type. + **/ +static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) +{ + u32 vector = 0; + + switch (hw->mac.mc_filter_type) { + case 0: /* use bits [47:36] of the address */ + vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: /* use bits [46:35] of the address */ + vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: /* use bits [45:34] of the address */ + vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: /* use bits [43:32] of the address */ + vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + default: /* Invalid mc_filter_type */ + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + return vector; +} + +/** + * ixgbevf_get_mac_addr_vf - Read device MAC address + * @hw: pointer to the HW structure + * @mac_addr: pointer to storage for retrieved MAC address + **/ +static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr) +{ + memcpy(mac_addr, hw->mac.perm_addr, IXGBE_ETH_LENGTH_OF_ADDRESS); + + return 0; +} + +static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[3]; + u8 *msg_addr = (u8 *)(&msgbuf[1]); + s32 ret_val; + + memset(msgbuf, 0, sizeof(msgbuf)); + /* + * If index is one then this is the start of a new list and needs + * indication to the PF so it can do it's own list management. + * If it is zero then that tells the PF to just clear all of + * this VF's macvlans and there is no new list. + */ + msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT; + msgbuf[0] |= IXGBE_VF_SET_MACVLAN; + if (addr) + memcpy(msg_addr, addr, 6); + ret_val = mbx->ops.write_posted(hw, msgbuf, 3); + + if (!ret_val) + ret_val = mbx->ops.read_posted(hw, msgbuf, 3); + + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + if (!ret_val) + if (msgbuf[0] == + (IXGBE_VF_SET_MACVLAN | IXGBE_VT_MSGTYPE_NACK)) + ret_val = -ENOMEM; + + return ret_val; +} + +/** + * ixgbevf_set_rar_vf - set device MAC address + * @hw: pointer to hardware structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: Unused in this implementation + **/ +static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, + u32 vmdq) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[3]; + u8 *msg_addr = (u8 *)(&msgbuf[1]); + s32 ret_val; + + memset(msgbuf, 0, sizeof(msgbuf)); + msgbuf[0] = IXGBE_VF_SET_MAC_ADDR; + memcpy(msg_addr, addr, 6); + ret_val = mbx->ops.write_posted(hw, msgbuf, 3); + + if (!ret_val) + ret_val = mbx->ops.read_posted(hw, msgbuf, 3); + + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + /* if nacked the address was rejected, use "perm_addr" */ + if (!ret_val && + (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) + ixgbevf_get_mac_addr_vf(hw, hw->mac.addr); + + return ret_val; +} + +/** + * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses + * @hw: pointer to the HW structure + * @netdev: pointer to net device structure + * + * Updates the Multicast Table Array. + **/ +static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, + struct net_device *netdev) +{ + struct netdev_hw_addr *ha; + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; + u16 *vector_list = (u16 *)&msgbuf[1]; + u32 cnt, i; + + /* Each entry in the list uses 1 16 bit word. We have 30 + * 16 bit words available in our HW msg buffer (minus 1 for the + * msg type). That's 30 hash values if we pack 'em right. If + * there are more than 30 MC addresses to add then punt the + * extras for now and then add code to handle more than 30 later. + * It would be unusual for a server to request that many multi-cast + * addresses except for in large enterprise network environments. + */ + + cnt = netdev_mc_count(netdev); + if (cnt > 30) + cnt = 30; + msgbuf[0] = IXGBE_VF_SET_MULTICAST; + msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT; + + i = 0; + netdev_for_each_mc_addr(ha, netdev) { + if (i == cnt) + break; + vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr); + } + + mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE); + + return 0; +} + +/** + * ixgbevf_set_vfta_vf - Set/Unset vlan filter table address + * @hw: pointer to the HW structure + * @vlan: 12 bit VLAN ID + * @vind: unused by VF drivers + * @vlan_on: if true then set bit, else clear bit + **/ +static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[2]; + + msgbuf[0] = IXGBE_VF_SET_VLAN; + msgbuf[1] = vlan; + /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ + msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT; + + return mbx->ops.write_posted(hw, msgbuf, 2); +} + +/** + * ixgbevf_setup_mac_link_vf - Setup MAC link settings + * @hw: pointer to hardware structure + * @speed: Unused in this implementation + * @autoneg: Unused in this implementation + * @autoneg_wait_to_complete: Unused in this implementation + * + * Do nothing and return success. VF drivers are not allowed to change + * global settings. Maintained for driver compatibility. + **/ +static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw, + ixgbe_link_speed speed, bool autoneg, + bool autoneg_wait_to_complete) +{ + return 0; +} + +/** + * ixgbevf_check_mac_link_vf - Get link/speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true is link is up, false otherwise + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Reads the links register to determine if link is up and the current speed + **/ +static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up, + bool autoneg_wait_to_complete) +{ + u32 links_reg; + + if (!(hw->mbx.ops.check_for_rst(hw))) { + *link_up = false; + *speed = 0; + return -1; + } + + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + + if (links_reg & IXGBE_LINKS_UP) + *link_up = true; + else + *link_up = false; + + if ((links_reg & IXGBE_LINKS_SPEED_82599) == + IXGBE_LINKS_SPEED_10G_82599) + *speed = IXGBE_LINK_SPEED_10GB_FULL; + else + *speed = IXGBE_LINK_SPEED_1GB_FULL; + + return 0; +} + +static struct ixgbe_mac_operations ixgbevf_mac_ops = { + .init_hw = ixgbevf_init_hw_vf, + .reset_hw = ixgbevf_reset_hw_vf, + .start_hw = ixgbevf_start_hw_vf, + .get_mac_addr = ixgbevf_get_mac_addr_vf, + .stop_adapter = ixgbevf_stop_hw_vf, + .setup_link = ixgbevf_setup_mac_link_vf, + .check_link = ixgbevf_check_mac_link_vf, + .set_rar = ixgbevf_set_rar_vf, + .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf, + .set_uc_addr = ixgbevf_set_uc_addr_vf, + .set_vfta = ixgbevf_set_vfta_vf, +}; + +struct ixgbevf_info ixgbevf_82599_vf_info = { + .mac = ixgbe_mac_82599_vf, + .mac_ops = &ixgbevf_mac_ops, +}; + +struct ixgbevf_info ixgbevf_X540_vf_info = { + .mac = ixgbe_mac_X540_vf, + .mac_ops = &ixgbevf_mac_ops, +}; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h new file mode 100644 index 0000000..10306b4 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -0,0 +1,174 @@ +/******************************************************************************* + + Intel 82599 Virtual Function driver + Copyright(c) 1999 - 2010 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef __IXGBE_VF_H__ +#define __IXGBE_VF_H__ + +#include +#include +#include +#include +#include + +#include "defines.h" +#include "regs.h" +#include "mbx.h" + +struct ixgbe_hw; + +/* iterator type for walking multicast address lists */ +typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, + u32 *vmdq); +struct ixgbe_mac_operations { + s32 (*init_hw)(struct ixgbe_hw *); + s32 (*reset_hw)(struct ixgbe_hw *); + s32 (*start_hw)(struct ixgbe_hw *); + s32 (*clear_hw_cntrs)(struct ixgbe_hw *); + enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); + u32 (*get_supported_physical_layer)(struct ixgbe_hw *); + s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); + s32 (*stop_adapter)(struct ixgbe_hw *); + s32 (*get_bus_info)(struct ixgbe_hw *); + + /* Link */ + s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); + s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); + s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, + bool *); + + /* RAR, Multicast, VLAN */ + s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32); + s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *); + s32 (*init_rx_addrs)(struct ixgbe_hw *); + s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); + s32 (*enable_mc)(struct ixgbe_hw *); + s32 (*disable_mc)(struct ixgbe_hw *); + s32 (*clear_vfta)(struct ixgbe_hw *); + s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); +}; + +enum ixgbe_mac_type { + ixgbe_mac_unknown = 0, + ixgbe_mac_82599_vf, + ixgbe_mac_X540_vf, + ixgbe_num_macs +}; + +struct ixgbe_mac_info { + struct ixgbe_mac_operations ops; + u8 addr[6]; + u8 perm_addr[6]; + + enum ixgbe_mac_type type; + + s32 mc_filter_type; + + bool get_link_status; + u32 max_tx_queues; + u32 max_rx_queues; + u32 max_msix_vectors; +}; + +struct ixgbe_mbx_operations { + s32 (*init_params)(struct ixgbe_hw *hw); + s32 (*read)(struct ixgbe_hw *, u32 *, u16); + s32 (*write)(struct ixgbe_hw *, u32 *, u16); + s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16); + s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16); + s32 (*check_for_msg)(struct ixgbe_hw *); + s32 (*check_for_ack)(struct ixgbe_hw *); + s32 (*check_for_rst)(struct ixgbe_hw *); +}; + +struct ixgbe_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct ixgbe_mbx_info { + struct ixgbe_mbx_operations ops; + struct ixgbe_mbx_stats stats; + u32 timeout; + u32 udelay; + u32 v2p_mailbox; + u16 size; +}; + +struct ixgbe_hw { + void *back; + + u8 __iomem *hw_addr; + + struct ixgbe_mac_info mac; + struct ixgbe_mbx_info mbx; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; + bool adapter_stopped; +}; + +struct ixgbevf_hw_stats { + u64 base_vfgprc; + u64 base_vfgptc; + u64 base_vfgorc; + u64 base_vfgotc; + u64 base_vfmprc; + + u64 last_vfgprc; + u64 last_vfgptc; + u64 last_vfgorc; + u64 last_vfgotc; + u64 last_vfmprc; + + u64 vfgprc; + u64 vfgptc; + u64 vfgorc; + u64 vfgotc; + u64 vfmprc; + + u64 saved_reset_vfgprc; + u64 saved_reset_vfgptc; + u64 saved_reset_vfgorc; + u64 saved_reset_vfgotc; + u64 saved_reset_vfmprc; +}; + +struct ixgbevf_info { + enum ixgbe_mac_type mac; + struct ixgbe_mac_operations *mac_ops; +}; + +#endif /* __IXGBE_VF_H__ */ + diff --git a/drivers/net/igb/Makefile b/drivers/net/igb/Makefile deleted file mode 100644 index c6e4621..0000000 --- a/drivers/net/igb/Makefile +++ /dev/null @@ -1,37 +0,0 @@ -################################################################################ -# -# Intel 82575 PCI-Express Ethernet Linux driver -# Copyright(c) 1999 - 2011 Intel Corporation. -# -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -# -# The full GNU General Public License is included in this distribution in -# the file called "COPYING". -# -# Contact Information: -# Linux NICS -# e1000-devel Mailing List -# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -# -################################################################################ - -# -# Makefile for the Intel(R) 82575 PCI-Express ethernet driver -# - -obj-$(CONFIG_IGB) += igb.o - -igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \ - e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o - diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c deleted file mode 100644 index c0857bd..0000000 --- a/drivers/net/igb/e1000_82575.c +++ /dev/null @@ -1,2084 +0,0 @@ -/******************************************************************************* - - Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -/* e1000_82575 - * e1000_82576 - */ - -#include -#include - -#include "e1000_mac.h" -#include "e1000_82575.h" - -static s32 igb_get_invariants_82575(struct e1000_hw *); -static s32 igb_acquire_phy_82575(struct e1000_hw *); -static void igb_release_phy_82575(struct e1000_hw *); -static s32 igb_acquire_nvm_82575(struct e1000_hw *); -static void igb_release_nvm_82575(struct e1000_hw *); -static s32 igb_check_for_link_82575(struct e1000_hw *); -static s32 igb_get_cfg_done_82575(struct e1000_hw *); -static s32 igb_init_hw_82575(struct e1000_hw *); -static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *); -static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *); -static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *); -static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16); -static s32 igb_reset_hw_82575(struct e1000_hw *); -static s32 igb_reset_hw_82580(struct e1000_hw *); -static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); -static s32 igb_setup_copper_link_82575(struct e1000_hw *); -static s32 igb_setup_serdes_link_82575(struct e1000_hw *); -static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16); -static void igb_clear_hw_cntrs_82575(struct e1000_hw *); -static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16); -static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *, - u16 *); -static s32 igb_get_phy_id_82575(struct e1000_hw *); -static void igb_release_swfw_sync_82575(struct e1000_hw *, u16); -static bool igb_sgmii_active_82575(struct e1000_hw *); -static s32 igb_reset_init_script_82575(struct e1000_hw *); -static s32 igb_read_mac_addr_82575(struct e1000_hw *); -static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw); -static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw); -static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw); -static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw); -static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, - u16 offset); -static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, - u16 offset); -static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw); -static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw); -static const u16 e1000_82580_rxpbs_table[] = - { 36, 72, 144, 1, 2, 4, 8, 16, - 35, 70, 140 }; -#define E1000_82580_RXPBS_TABLE_SIZE \ - (sizeof(e1000_82580_rxpbs_table)/sizeof(u16)) - -/** - * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO - * @hw: pointer to the HW structure - * - * Called to determine if the I2C pins are being used for I2C or as an - * external MDIO interface since the two options are mutually exclusive. - **/ -static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw) -{ - u32 reg = 0; - bool ext_mdio = false; - - switch (hw->mac.type) { - case e1000_82575: - case e1000_82576: - reg = rd32(E1000_MDIC); - ext_mdio = !!(reg & E1000_MDIC_DEST); - break; - case e1000_82580: - case e1000_i350: - reg = rd32(E1000_MDICNFG); - ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); - break; - default: - break; - } - return ext_mdio; -} - -static s32 igb_get_invariants_82575(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - struct e1000_nvm_info *nvm = &hw->nvm; - struct e1000_mac_info *mac = &hw->mac; - struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575; - u32 eecd; - s32 ret_val; - u16 size; - u32 ctrl_ext = 0; - - switch (hw->device_id) { - case E1000_DEV_ID_82575EB_COPPER: - case E1000_DEV_ID_82575EB_FIBER_SERDES: - case E1000_DEV_ID_82575GB_QUAD_COPPER: - mac->type = e1000_82575; - break; - case E1000_DEV_ID_82576: - case E1000_DEV_ID_82576_NS: - case E1000_DEV_ID_82576_NS_SERDES: - case E1000_DEV_ID_82576_FIBER: - case E1000_DEV_ID_82576_SERDES: - case E1000_DEV_ID_82576_QUAD_COPPER: - case E1000_DEV_ID_82576_QUAD_COPPER_ET2: - case E1000_DEV_ID_82576_SERDES_QUAD: - mac->type = e1000_82576; - break; - case E1000_DEV_ID_82580_COPPER: - case E1000_DEV_ID_82580_FIBER: - case E1000_DEV_ID_82580_QUAD_FIBER: - case E1000_DEV_ID_82580_SERDES: - case E1000_DEV_ID_82580_SGMII: - case E1000_DEV_ID_82580_COPPER_DUAL: - case E1000_DEV_ID_DH89XXCC_SGMII: - case E1000_DEV_ID_DH89XXCC_SERDES: - case E1000_DEV_ID_DH89XXCC_BACKPLANE: - case E1000_DEV_ID_DH89XXCC_SFP: - mac->type = e1000_82580; - break; - case E1000_DEV_ID_I350_COPPER: - case E1000_DEV_ID_I350_FIBER: - case E1000_DEV_ID_I350_SERDES: - case E1000_DEV_ID_I350_SGMII: - mac->type = e1000_i350; - break; - default: - return -E1000_ERR_MAC_INIT; - break; - } - - /* Set media type */ - /* - * The 82575 uses bits 22:23 for link mode. The mode can be changed - * based on the EEPROM. We cannot rely upon device ID. There - * is no distinguishable difference between fiber and internal - * SerDes mode on the 82575. There can be an external PHY attached - * on the SGMII interface. For this, we'll set sgmii_active to true. - */ - phy->media_type = e1000_media_type_copper; - dev_spec->sgmii_active = false; - - ctrl_ext = rd32(E1000_CTRL_EXT); - switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { - case E1000_CTRL_EXT_LINK_MODE_SGMII: - dev_spec->sgmii_active = true; - break; - case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: - case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: - hw->phy.media_type = e1000_media_type_internal_serdes; - break; - default: - break; - } - - /* Set mta register count */ - mac->mta_reg_count = 128; - /* Set rar entry count */ - mac->rar_entry_count = E1000_RAR_ENTRIES_82575; - if (mac->type == e1000_82576) - mac->rar_entry_count = E1000_RAR_ENTRIES_82576; - if (mac->type == e1000_82580) - mac->rar_entry_count = E1000_RAR_ENTRIES_82580; - if (mac->type == e1000_i350) - mac->rar_entry_count = E1000_RAR_ENTRIES_I350; - /* reset */ - if (mac->type >= e1000_82580) - mac->ops.reset_hw = igb_reset_hw_82580; - else - mac->ops.reset_hw = igb_reset_hw_82575; - /* Set if part includes ASF firmware */ - mac->asf_firmware_present = true; - /* Set if manageability features are enabled. */ - mac->arc_subsystem_valid = - (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK) - ? true : false; - /* enable EEE on i350 parts */ - if (mac->type == e1000_i350) - dev_spec->eee_disable = false; - else - dev_spec->eee_disable = true; - /* physical interface link setup */ - mac->ops.setup_physical_interface = - (hw->phy.media_type == e1000_media_type_copper) - ? igb_setup_copper_link_82575 - : igb_setup_serdes_link_82575; - - /* NVM initialization */ - eecd = rd32(E1000_EECD); - - nvm->opcode_bits = 8; - nvm->delay_usec = 1; - switch (nvm->override) { - case e1000_nvm_override_spi_large: - nvm->page_size = 32; - nvm->address_bits = 16; - break; - case e1000_nvm_override_spi_small: - nvm->page_size = 8; - nvm->address_bits = 8; - break; - default: - nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; - nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; - break; - } - - nvm->type = e1000_nvm_eeprom_spi; - - size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> - E1000_EECD_SIZE_EX_SHIFT); - - /* - * Added to a constant, "size" becomes the left-shift value - * for setting word_size. - */ - size += NVM_WORD_SIZE_BASE_SHIFT; - - /* - * Check for invalid size - */ - if ((hw->mac.type == e1000_82576) && (size > 15)) { - printk("igb: The NVM size is not valid, " - "defaulting to 32K.\n"); - size = 15; - } - nvm->word_size = 1 << size; - if (nvm->word_size == (1 << 15)) - nvm->page_size = 128; - - /* NVM Function Pointers */ - nvm->ops.acquire = igb_acquire_nvm_82575; - if (nvm->word_size < (1 << 15)) - nvm->ops.read = igb_read_nvm_eerd; - else - nvm->ops.read = igb_read_nvm_spi; - - nvm->ops.release = igb_release_nvm_82575; - switch (hw->mac.type) { - case e1000_82580: - nvm->ops.validate = igb_validate_nvm_checksum_82580; - nvm->ops.update = igb_update_nvm_checksum_82580; - break; - case e1000_i350: - nvm->ops.validate = igb_validate_nvm_checksum_i350; - nvm->ops.update = igb_update_nvm_checksum_i350; - break; - default: - nvm->ops.validate = igb_validate_nvm_checksum; - nvm->ops.update = igb_update_nvm_checksum; - } - nvm->ops.write = igb_write_nvm_spi; - - /* if part supports SR-IOV then initialize mailbox parameters */ - switch (mac->type) { - case e1000_82576: - case e1000_i350: - igb_init_mbx_params_pf(hw); - break; - default: - break; - } - - /* setup PHY parameters */ - if (phy->media_type != e1000_media_type_copper) { - phy->type = e1000_phy_none; - return 0; - } - - phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; - phy->reset_delay_us = 100; - - ctrl_ext = rd32(E1000_CTRL_EXT); - - /* PHY function pointers */ - if (igb_sgmii_active_82575(hw)) { - phy->ops.reset = igb_phy_hw_reset_sgmii_82575; - ctrl_ext |= E1000_CTRL_I2C_ENA; - } else { - phy->ops.reset = igb_phy_hw_reset; - ctrl_ext &= ~E1000_CTRL_I2C_ENA; - } - - wr32(E1000_CTRL_EXT, ctrl_ext); - igb_reset_mdicnfg_82580(hw); - - if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) { - phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; - phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; - } else if (hw->mac.type >= e1000_82580) { - phy->ops.read_reg = igb_read_phy_reg_82580; - phy->ops.write_reg = igb_write_phy_reg_82580; - } else { - phy->ops.read_reg = igb_read_phy_reg_igp; - phy->ops.write_reg = igb_write_phy_reg_igp; - } - - /* set lan id */ - hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> - E1000_STATUS_FUNC_SHIFT; - - /* Set phy->phy_addr and phy->id. */ - ret_val = igb_get_phy_id_82575(hw); - if (ret_val) - return ret_val; - - /* Verify phy id and set remaining function pointers */ - switch (phy->id) { - case I347AT4_E_PHY_ID: - case M88E1112_E_PHY_ID: - case M88E1111_I_PHY_ID: - phy->type = e1000_phy_m88; - phy->ops.get_phy_info = igb_get_phy_info_m88; - - if (phy->id == I347AT4_E_PHY_ID || - phy->id == M88E1112_E_PHY_ID) - phy->ops.get_cable_length = igb_get_cable_length_m88_gen2; - else - phy->ops.get_cable_length = igb_get_cable_length_m88; - - phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; - break; - case IGP03E1000_E_PHY_ID: - phy->type = e1000_phy_igp_3; - phy->ops.get_phy_info = igb_get_phy_info_igp; - phy->ops.get_cable_length = igb_get_cable_length_igp_2; - phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp; - phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575; - phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state; - break; - case I82580_I_PHY_ID: - case I350_I_PHY_ID: - phy->type = e1000_phy_82580; - phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580; - phy->ops.get_cable_length = igb_get_cable_length_82580; - phy->ops.get_phy_info = igb_get_phy_info_82580; - break; - default: - return -E1000_ERR_PHY; - } - - return 0; -} - -/** - * igb_acquire_phy_82575 - Acquire rights to access PHY - * @hw: pointer to the HW structure - * - * Acquire access rights to the correct PHY. This is a - * function pointer entry point called by the api module. - **/ -static s32 igb_acquire_phy_82575(struct e1000_hw *hw) -{ - u16 mask = E1000_SWFW_PHY0_SM; - - if (hw->bus.func == E1000_FUNC_1) - mask = E1000_SWFW_PHY1_SM; - else if (hw->bus.func == E1000_FUNC_2) - mask = E1000_SWFW_PHY2_SM; - else if (hw->bus.func == E1000_FUNC_3) - mask = E1000_SWFW_PHY3_SM; - - return igb_acquire_swfw_sync_82575(hw, mask); -} - -/** - * igb_release_phy_82575 - Release rights to access PHY - * @hw: pointer to the HW structure - * - * A wrapper to release access rights to the correct PHY. This is a - * function pointer entry point called by the api module. - **/ -static void igb_release_phy_82575(struct e1000_hw *hw) -{ - u16 mask = E1000_SWFW_PHY0_SM; - - if (hw->bus.func == E1000_FUNC_1) - mask = E1000_SWFW_PHY1_SM; - else if (hw->bus.func == E1000_FUNC_2) - mask = E1000_SWFW_PHY2_SM; - else if (hw->bus.func == E1000_FUNC_3) - mask = E1000_SWFW_PHY3_SM; - - igb_release_swfw_sync_82575(hw, mask); -} - -/** - * igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii - * @hw: pointer to the HW structure - * @offset: register offset to be read - * @data: pointer to the read data - * - * Reads the PHY register at offset using the serial gigabit media independent - * interface and stores the retrieved information in data. - **/ -static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, - u16 *data) -{ - s32 ret_val = -E1000_ERR_PARAM; - - if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { - hw_dbg("PHY Address %u is out of range\n", offset); - goto out; - } - - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - goto out; - - ret_val = igb_read_phy_reg_i2c(hw, offset, data); - - hw->phy.ops.release(hw); - -out: - return ret_val; -} - -/** - * igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii - * @hw: pointer to the HW structure - * @offset: register offset to write to - * @data: data to write at register offset - * - * Writes the data to PHY register at the offset using the serial gigabit - * media independent interface. - **/ -static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, - u16 data) -{ - s32 ret_val = -E1000_ERR_PARAM; - - - if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { - hw_dbg("PHY Address %d is out of range\n", offset); - goto out; - } - - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - goto out; - - ret_val = igb_write_phy_reg_i2c(hw, offset, data); - - hw->phy.ops.release(hw); - -out: - return ret_val; -} - -/** - * igb_get_phy_id_82575 - Retrieve PHY addr and id - * @hw: pointer to the HW structure - * - * Retrieves the PHY address and ID for both PHY's which do and do not use - * sgmi interface. - **/ -static s32 igb_get_phy_id_82575(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val = 0; - u16 phy_id; - u32 ctrl_ext; - u32 mdic; - - /* - * For SGMII PHYs, we try the list of possible addresses until - * we find one that works. For non-SGMII PHYs - * (e.g. integrated copper PHYs), an address of 1 should - * work. The result of this function should mean phy->phy_addr - * and phy->id are set correctly. - */ - if (!(igb_sgmii_active_82575(hw))) { - phy->addr = 1; - ret_val = igb_get_phy_id(hw); - goto out; - } - - if (igb_sgmii_uses_mdio_82575(hw)) { - switch (hw->mac.type) { - case e1000_82575: - case e1000_82576: - mdic = rd32(E1000_MDIC); - mdic &= E1000_MDIC_PHY_MASK; - phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; - break; - case e1000_82580: - case e1000_i350: - mdic = rd32(E1000_MDICNFG); - mdic &= E1000_MDICNFG_PHY_MASK; - phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; - break; - default: - ret_val = -E1000_ERR_PHY; - goto out; - break; - } - ret_val = igb_get_phy_id(hw); - goto out; - } - - /* Power on sgmii phy if it is disabled */ - ctrl_ext = rd32(E1000_CTRL_EXT); - wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); - wrfl(); - msleep(300); - - /* - * The address field in the I2CCMD register is 3 bits and 0 is invalid. - * Therefore, we need to test 1-7 - */ - for (phy->addr = 1; phy->addr < 8; phy->addr++) { - ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); - if (ret_val == 0) { - hw_dbg("Vendor ID 0x%08X read at address %u\n", - phy_id, phy->addr); - /* - * At the time of this writing, The M88 part is - * the only supported SGMII PHY product. - */ - if (phy_id == M88_VENDOR) - break; - } else { - hw_dbg("PHY address %u was unreadable\n", phy->addr); - } - } - - /* A valid PHY type couldn't be found. */ - if (phy->addr == 8) { - phy->addr = 0; - ret_val = -E1000_ERR_PHY; - goto out; - } else { - ret_val = igb_get_phy_id(hw); - } - - /* restore previous sfp cage power state */ - wr32(E1000_CTRL_EXT, ctrl_ext); - -out: - return ret_val; -} - -/** - * igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset - * @hw: pointer to the HW structure - * - * Resets the PHY using the serial gigabit media independent interface. - **/ -static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) -{ - s32 ret_val; - - /* - * This isn't a true "hard" reset, but is the only reset - * available to us at this time. - */ - - hw_dbg("Soft resetting SGMII attached PHY...\n"); - - /* - * SFP documentation requires the following to configure the SPF module - * to work on SGMII. No further documentation is given. - */ - ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); - if (ret_val) - goto out; - - ret_val = igb_phy_sw_reset(hw); - -out: - return ret_val; -} - -/** - * igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state - * @hw: pointer to the HW structure - * @active: true to enable LPLU, false to disable - * - * Sets the LPLU D0 state according to the active flag. When - * activating LPLU this function also disables smart speed - * and vice versa. LPLU will not be activated unless the - * device autonegotiation advertisement meets standards of - * either 10 or 10/100 or 10/100/1000 at all duplexes. - * This is a function pointer entry point only called by - * PHY setup routines. - **/ -static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 data; - - ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); - if (ret_val) - goto out; - - if (active) { - data |= IGP02E1000_PM_D0_LPLU; - ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, - data); - if (ret_val) - goto out; - - /* When LPLU is enabled, we should disable SmartSpeed */ - ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, - &data); - data &= ~IGP01E1000_PSCFR_SMART_SPEED; - ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, - data); - if (ret_val) - goto out; - } else { - data &= ~IGP02E1000_PM_D0_LPLU; - ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, - data); - /* - * LPLU and SmartSpeed are mutually exclusive. LPLU is used - * during Dx states where the power conservation is most - * important. During driver activity we should enable - * SmartSpeed, so performance is maintained. - */ - if (phy->smart_speed == e1000_smart_speed_on) { - ret_val = phy->ops.read_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, &data); - if (ret_val) - goto out; - - data |= IGP01E1000_PSCFR_SMART_SPEED; - ret_val = phy->ops.write_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, data); - if (ret_val) - goto out; - } else if (phy->smart_speed == e1000_smart_speed_off) { - ret_val = phy->ops.read_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, &data); - if (ret_val) - goto out; - - data &= ~IGP01E1000_PSCFR_SMART_SPEED; - ret_val = phy->ops.write_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, data); - if (ret_val) - goto out; - } - } - -out: - return ret_val; -} - -/** - * igb_acquire_nvm_82575 - Request for access to EEPROM - * @hw: pointer to the HW structure - * - * Acquire the necessary semaphores for exclusive access to the EEPROM. - * Set the EEPROM access request bit and wait for EEPROM access grant bit. - * Return successful if access grant bit set, else clear the request for - * EEPROM access and return -E1000_ERR_NVM (-1). - **/ -static s32 igb_acquire_nvm_82575(struct e1000_hw *hw) -{ - s32 ret_val; - - ret_val = igb_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); - if (ret_val) - goto out; - - ret_val = igb_acquire_nvm(hw); - - if (ret_val) - igb_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); - -out: - return ret_val; -} - -/** - * igb_release_nvm_82575 - Release exclusive access to EEPROM - * @hw: pointer to the HW structure - * - * Stop any current commands to the EEPROM and clear the EEPROM request bit, - * then release the semaphores acquired. - **/ -static void igb_release_nvm_82575(struct e1000_hw *hw) -{ - igb_release_nvm(hw); - igb_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); -} - -/** - * igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore - * @hw: pointer to the HW structure - * @mask: specifies which semaphore to acquire - * - * Acquire the SW/FW semaphore to access the PHY or NVM. The mask - * will also specify which port we're acquiring the lock for. - **/ -static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) -{ - u32 swfw_sync; - u32 swmask = mask; - u32 fwmask = mask << 16; - s32 ret_val = 0; - s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ - - while (i < timeout) { - if (igb_get_hw_semaphore(hw)) { - ret_val = -E1000_ERR_SWFW_SYNC; - goto out; - } - - swfw_sync = rd32(E1000_SW_FW_SYNC); - if (!(swfw_sync & (fwmask | swmask))) - break; - - /* - * Firmware currently using resource (fwmask) - * or other software thread using resource (swmask) - */ - igb_put_hw_semaphore(hw); - mdelay(5); - i++; - } - - if (i == timeout) { - hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); - ret_val = -E1000_ERR_SWFW_SYNC; - goto out; - } - - swfw_sync |= swmask; - wr32(E1000_SW_FW_SYNC, swfw_sync); - - igb_put_hw_semaphore(hw); - -out: - return ret_val; -} - -/** - * igb_release_swfw_sync_82575 - Release SW/FW semaphore - * @hw: pointer to the HW structure - * @mask: specifies which semaphore to acquire - * - * Release the SW/FW semaphore used to access the PHY or NVM. The mask - * will also specify which port we're releasing the lock for. - **/ -static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) -{ - u32 swfw_sync; - - while (igb_get_hw_semaphore(hw) != 0); - /* Empty */ - - swfw_sync = rd32(E1000_SW_FW_SYNC); - swfw_sync &= ~mask; - wr32(E1000_SW_FW_SYNC, swfw_sync); - - igb_put_hw_semaphore(hw); -} - -/** - * igb_get_cfg_done_82575 - Read config done bit - * @hw: pointer to the HW structure - * - * Read the management control register for the config done bit for - * completion status. NOTE: silicon which is EEPROM-less will fail trying - * to read the config done bit, so an error is *ONLY* logged and returns - * 0. If we were to return with error, EEPROM-less silicon - * would not be able to be reset or change link. - **/ -static s32 igb_get_cfg_done_82575(struct e1000_hw *hw) -{ - s32 timeout = PHY_CFG_TIMEOUT; - s32 ret_val = 0; - u32 mask = E1000_NVM_CFG_DONE_PORT_0; - - if (hw->bus.func == 1) - mask = E1000_NVM_CFG_DONE_PORT_1; - else if (hw->bus.func == E1000_FUNC_2) - mask = E1000_NVM_CFG_DONE_PORT_2; - else if (hw->bus.func == E1000_FUNC_3) - mask = E1000_NVM_CFG_DONE_PORT_3; - - while (timeout) { - if (rd32(E1000_EEMNGCTL) & mask) - break; - msleep(1); - timeout--; - } - if (!timeout) - hw_dbg("MNG configuration cycle has not completed.\n"); - - /* If EEPROM is not marked present, init the PHY manually */ - if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) && - (hw->phy.type == e1000_phy_igp_3)) - igb_phy_init_script_igp3(hw); - - return ret_val; -} - -/** - * igb_check_for_link_82575 - Check for link - * @hw: pointer to the HW structure - * - * If sgmii is enabled, then use the pcs register to determine link, otherwise - * use the generic interface for determining link. - **/ -static s32 igb_check_for_link_82575(struct e1000_hw *hw) -{ - s32 ret_val; - u16 speed, duplex; - - if (hw->phy.media_type != e1000_media_type_copper) { - ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, - &duplex); - /* - * Use this flag to determine if link needs to be checked or - * not. If we have link clear the flag so that we do not - * continue to check for link. - */ - hw->mac.get_link_status = !hw->mac.serdes_has_link; - } else { - ret_val = igb_check_for_copper_link(hw); - } - - return ret_val; -} - -/** - * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown - * @hw: pointer to the HW structure - **/ -void igb_power_up_serdes_link_82575(struct e1000_hw *hw) -{ - u32 reg; - - - if ((hw->phy.media_type != e1000_media_type_internal_serdes) && - !igb_sgmii_active_82575(hw)) - return; - - /* Enable PCS to turn on link */ - reg = rd32(E1000_PCS_CFG0); - reg |= E1000_PCS_CFG_PCS_EN; - wr32(E1000_PCS_CFG0, reg); - - /* Power up the laser */ - reg = rd32(E1000_CTRL_EXT); - reg &= ~E1000_CTRL_EXT_SDP3_DATA; - wr32(E1000_CTRL_EXT, reg); - - /* flush the write to verify completion */ - wrfl(); - msleep(1); -} - -/** - * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex - * @hw: pointer to the HW structure - * @speed: stores the current speed - * @duplex: stores the current duplex - * - * Using the physical coding sub-layer (PCS), retrieve the current speed and - * duplex, then store the values in the pointers provided. - **/ -static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, - u16 *duplex) -{ - struct e1000_mac_info *mac = &hw->mac; - u32 pcs; - - /* Set up defaults for the return values of this function */ - mac->serdes_has_link = false; - *speed = 0; - *duplex = 0; - - /* - * Read the PCS Status register for link state. For non-copper mode, - * the status register is not accurate. The PCS status register is - * used instead. - */ - pcs = rd32(E1000_PCS_LSTAT); - - /* - * The link up bit determines when link is up on autoneg. The sync ok - * gets set once both sides sync up and agree upon link. Stable link - * can be determined by checking for both link up and link sync ok - */ - if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) { - mac->serdes_has_link = true; - - /* Detect and store PCS speed */ - if (pcs & E1000_PCS_LSTS_SPEED_1000) { - *speed = SPEED_1000; - } else if (pcs & E1000_PCS_LSTS_SPEED_100) { - *speed = SPEED_100; - } else { - *speed = SPEED_10; - } - - /* Detect and store PCS duplex */ - if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) { - *duplex = FULL_DUPLEX; - } else { - *duplex = HALF_DUPLEX; - } - } - - return 0; -} - -/** - * igb_shutdown_serdes_link_82575 - Remove link during power down - * @hw: pointer to the HW structure - * - * In the case of fiber serdes, shut down optics and PCS on driver unload - * when management pass thru is not enabled. - **/ -void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) -{ - u32 reg; - - if (hw->phy.media_type != e1000_media_type_internal_serdes && - igb_sgmii_active_82575(hw)) - return; - - if (!igb_enable_mng_pass_thru(hw)) { - /* Disable PCS to turn off link */ - reg = rd32(E1000_PCS_CFG0); - reg &= ~E1000_PCS_CFG_PCS_EN; - wr32(E1000_PCS_CFG0, reg); - - /* shutdown the laser */ - reg = rd32(E1000_CTRL_EXT); - reg |= E1000_CTRL_EXT_SDP3_DATA; - wr32(E1000_CTRL_EXT, reg); - - /* flush the write to verify completion */ - wrfl(); - msleep(1); - } -} - -/** - * igb_reset_hw_82575 - Reset hardware - * @hw: pointer to the HW structure - * - * This resets the hardware into a known state. This is a - * function pointer entry point called by the api module. - **/ -static s32 igb_reset_hw_82575(struct e1000_hw *hw) -{ - u32 ctrl, icr; - s32 ret_val; - - /* - * Prevent the PCI-E bus from sticking if there is no TLP connection - * on the last TLP read/write transaction when MAC is reset. - */ - ret_val = igb_disable_pcie_master(hw); - if (ret_val) - hw_dbg("PCI-E Master disable polling has failed.\n"); - - /* set the completion timeout for interface */ - ret_val = igb_set_pcie_completion_timeout(hw); - if (ret_val) { - hw_dbg("PCI-E Set completion timeout has failed.\n"); - } - - hw_dbg("Masking off all interrupts\n"); - wr32(E1000_IMC, 0xffffffff); - - wr32(E1000_RCTL, 0); - wr32(E1000_TCTL, E1000_TCTL_PSP); - wrfl(); - - msleep(10); - - ctrl = rd32(E1000_CTRL); - - hw_dbg("Issuing a global reset to MAC\n"); - wr32(E1000_CTRL, ctrl | E1000_CTRL_RST); - - ret_val = igb_get_auto_rd_done(hw); - if (ret_val) { - /* - * When auto config read does not complete, do not - * return with an error. This can happen in situations - * where there is no eeprom and prevents getting link. - */ - hw_dbg("Auto Read Done did not complete\n"); - } - - /* If EEPROM is not present, run manual init scripts */ - if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) - igb_reset_init_script_82575(hw); - - /* Clear any pending interrupt events. */ - wr32(E1000_IMC, 0xffffffff); - icr = rd32(E1000_ICR); - - /* Install any alternate MAC address into RAR0 */ - ret_val = igb_check_alt_mac_addr(hw); - - return ret_val; -} - -/** - * igb_init_hw_82575 - Initialize hardware - * @hw: pointer to the HW structure - * - * This inits the hardware readying it for operation. - **/ -static s32 igb_init_hw_82575(struct e1000_hw *hw) -{ - struct e1000_mac_info *mac = &hw->mac; - s32 ret_val; - u16 i, rar_count = mac->rar_entry_count; - - /* Initialize identification LED */ - ret_val = igb_id_led_init(hw); - if (ret_val) { - hw_dbg("Error initializing identification LED\n"); - /* This is not fatal and we should not stop init due to this */ - } - - /* Disabling VLAN filtering */ - hw_dbg("Initializing the IEEE VLAN\n"); - igb_clear_vfta(hw); - - /* Setup the receive address */ - igb_init_rx_addrs(hw, rar_count); - - /* Zero out the Multicast HASH table */ - hw_dbg("Zeroing the MTA\n"); - for (i = 0; i < mac->mta_reg_count; i++) - array_wr32(E1000_MTA, i, 0); - - /* Zero out the Unicast HASH table */ - hw_dbg("Zeroing the UTA\n"); - for (i = 0; i < mac->uta_reg_count; i++) - array_wr32(E1000_UTA, i, 0); - - /* Setup link and flow control */ - ret_val = igb_setup_link(hw); - - /* - * Clear all of the statistics registers (clear on read). It is - * important that we do this after we have tried to establish link - * because the symbol error count will increment wildly if there - * is no link. - */ - igb_clear_hw_cntrs_82575(hw); - - return ret_val; -} - -/** - * igb_setup_copper_link_82575 - Configure copper link settings - * @hw: pointer to the HW structure - * - * Configures the link for auto-neg or forced speed and duplex. Then we check - * for link, once link is established calls to configure collision distance - * and flow control are called. - **/ -static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) -{ - u32 ctrl; - s32 ret_val; - - ctrl = rd32(E1000_CTRL); - ctrl |= E1000_CTRL_SLU; - ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); - wr32(E1000_CTRL, ctrl); - - ret_val = igb_setup_serdes_link_82575(hw); - if (ret_val) - goto out; - - if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) { - /* allow time for SFP cage time to power up phy */ - msleep(300); - - ret_val = hw->phy.ops.reset(hw); - if (ret_val) { - hw_dbg("Error resetting the PHY.\n"); - goto out; - } - } - switch (hw->phy.type) { - case e1000_phy_m88: - if (hw->phy.id == I347AT4_E_PHY_ID || - hw->phy.id == M88E1112_E_PHY_ID) - ret_val = igb_copper_link_setup_m88_gen2(hw); - else - ret_val = igb_copper_link_setup_m88(hw); - break; - case e1000_phy_igp_3: - ret_val = igb_copper_link_setup_igp(hw); - break; - case e1000_phy_82580: - ret_val = igb_copper_link_setup_82580(hw); - break; - default: - ret_val = -E1000_ERR_PHY; - break; - } - - if (ret_val) - goto out; - - ret_val = igb_setup_copper_link(hw); -out: - return ret_val; -} - -/** - * igb_setup_serdes_link_82575 - Setup link for serdes - * @hw: pointer to the HW structure - * - * Configure the physical coding sub-layer (PCS) link. The PCS link is - * used on copper connections where the serialized gigabit media independent - * interface (sgmii), or serdes fiber is being used. Configures the link - * for auto-negotiation or forces speed/duplex. - **/ -static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) -{ - u32 ctrl_ext, ctrl_reg, reg; - bool pcs_autoneg; - s32 ret_val = E1000_SUCCESS; - u16 data; - - if ((hw->phy.media_type != e1000_media_type_internal_serdes) && - !igb_sgmii_active_82575(hw)) - return ret_val; - - - /* - * On the 82575, SerDes loopback mode persists until it is - * explicitly turned off or a power cycle is performed. A read to - * the register does not indicate its status. Therefore, we ensure - * loopback mode is disabled during initialization. - */ - wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); - - /* power on the sfp cage if present */ - ctrl_ext = rd32(E1000_CTRL_EXT); - ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; - wr32(E1000_CTRL_EXT, ctrl_ext); - - ctrl_reg = rd32(E1000_CTRL); - ctrl_reg |= E1000_CTRL_SLU; - - if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) { - /* set both sw defined pins */ - ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1; - - /* Set switch control to serdes energy detect */ - reg = rd32(E1000_CONNSW); - reg |= E1000_CONNSW_ENRGSRC; - wr32(E1000_CONNSW, reg); - } - - reg = rd32(E1000_PCS_LCTL); - - /* default pcs_autoneg to the same setting as mac autoneg */ - pcs_autoneg = hw->mac.autoneg; - - switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { - case E1000_CTRL_EXT_LINK_MODE_SGMII: - /* sgmii mode lets the phy handle forcing speed/duplex */ - pcs_autoneg = true; - /* autoneg time out should be disabled for SGMII mode */ - reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT); - break; - case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: - /* disable PCS autoneg and support parallel detect only */ - pcs_autoneg = false; - default: - if (hw->mac.type == e1000_82575 || - hw->mac.type == e1000_82576) { - ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); - if (ret_val) { - printk(KERN_DEBUG "NVM Read Error\n\n"); - return ret_val; - } - - if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT) - pcs_autoneg = false; - } - - /* - * non-SGMII modes only supports a speed of 1000/Full for the - * link so it is best to just force the MAC and let the pcs - * link either autoneg or be forced to 1000/Full - */ - ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | - E1000_CTRL_FD | E1000_CTRL_FRCDPX; - - /* set speed of 1000/Full if speed/duplex is forced */ - reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; - break; - } - - wr32(E1000_CTRL, ctrl_reg); - - /* - * New SerDes mode allows for forcing speed or autonegotiating speed - * at 1gb. Autoneg should be default set by most drivers. This is the - * mode that will be compatible with older link partners and switches. - * However, both are supported by the hardware and some drivers/tools. - */ - reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | - E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); - - /* - * We force flow control to prevent the CTRL register values from being - * overwritten by the autonegotiated flow control values - */ - reg |= E1000_PCS_LCTL_FORCE_FCTRL; - - if (pcs_autoneg) { - /* Set PCS register for autoneg */ - reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ - E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ - hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); - } else { - /* Set PCS register for forced link */ - reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ - - hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); - } - - wr32(E1000_PCS_LCTL, reg); - - if (!igb_sgmii_active_82575(hw)) - igb_force_mac_fc(hw); - - return ret_val; -} - -/** - * igb_sgmii_active_82575 - Return sgmii state - * @hw: pointer to the HW structure - * - * 82575 silicon has a serialized gigabit media independent interface (sgmii) - * which can be enabled for use in the embedded applications. Simply - * return the current state of the sgmii interface. - **/ -static bool igb_sgmii_active_82575(struct e1000_hw *hw) -{ - struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; - return dev_spec->sgmii_active; -} - -/** - * igb_reset_init_script_82575 - Inits HW defaults after reset - * @hw: pointer to the HW structure - * - * Inits recommended HW defaults after a reset when there is no EEPROM - * detected. This is only for the 82575. - **/ -static s32 igb_reset_init_script_82575(struct e1000_hw *hw) -{ - if (hw->mac.type == e1000_82575) { - hw_dbg("Running reset init script for 82575\n"); - /* SerDes configuration via SERDESCTRL */ - igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C); - igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78); - igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23); - igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15); - - /* CCM configuration via CCMCTL register */ - igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00); - igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00); - - /* PCIe lanes configuration */ - igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC); - igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF); - igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05); - igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81); - - /* PCIe PLL Configuration */ - igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47); - igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00); - igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00); - } - - return 0; -} - -/** - * igb_read_mac_addr_82575 - Read device MAC address - * @hw: pointer to the HW structure - **/ -static s32 igb_read_mac_addr_82575(struct e1000_hw *hw) -{ - s32 ret_val = 0; - - /* - * If there's an alternate MAC address place it in RAR0 - * so that it will override the Si installed default perm - * address. - */ - ret_val = igb_check_alt_mac_addr(hw); - if (ret_val) - goto out; - - ret_val = igb_read_mac_addr(hw); - -out: - return ret_val; -} - -/** - * igb_power_down_phy_copper_82575 - Remove link during PHY power down - * @hw: pointer to the HW structure - * - * In the case of a PHY power down to save power, or to turn off link during a - * driver unload, or wake on lan is not enabled, remove the link. - **/ -void igb_power_down_phy_copper_82575(struct e1000_hw *hw) -{ - /* If the management interface is not enabled, then power down */ - if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw))) - igb_power_down_phy_copper(hw); -} - -/** - * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters - * @hw: pointer to the HW structure - * - * Clears the hardware counters by reading the counter registers. - **/ -static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw) -{ - igb_clear_hw_cntrs_base(hw); - - rd32(E1000_PRC64); - rd32(E1000_PRC127); - rd32(E1000_PRC255); - rd32(E1000_PRC511); - rd32(E1000_PRC1023); - rd32(E1000_PRC1522); - rd32(E1000_PTC64); - rd32(E1000_PTC127); - rd32(E1000_PTC255); - rd32(E1000_PTC511); - rd32(E1000_PTC1023); - rd32(E1000_PTC1522); - - rd32(E1000_ALGNERRC); - rd32(E1000_RXERRC); - rd32(E1000_TNCRS); - rd32(E1000_CEXTERR); - rd32(E1000_TSCTC); - rd32(E1000_TSCTFC); - - rd32(E1000_MGTPRC); - rd32(E1000_MGTPDC); - rd32(E1000_MGTPTC); - - rd32(E1000_IAC); - rd32(E1000_ICRXOC); - - rd32(E1000_ICRXPTC); - rd32(E1000_ICRXATC); - rd32(E1000_ICTXPTC); - rd32(E1000_ICTXATC); - rd32(E1000_ICTXQEC); - rd32(E1000_ICTXQMTC); - rd32(E1000_ICRXDMTC); - - rd32(E1000_CBTMPC); - rd32(E1000_HTDPMC); - rd32(E1000_CBRMPC); - rd32(E1000_RPTHC); - rd32(E1000_HGPTC); - rd32(E1000_HTCBDPC); - rd32(E1000_HGORCL); - rd32(E1000_HGORCH); - rd32(E1000_HGOTCL); - rd32(E1000_HGOTCH); - rd32(E1000_LENERRS); - - /* This register should not be read in copper configurations */ - if (hw->phy.media_type == e1000_media_type_internal_serdes || - igb_sgmii_active_82575(hw)) - rd32(E1000_SCVPC); -} - -/** - * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable - * @hw: pointer to the HW structure - * - * After rx enable if managability is enabled then there is likely some - * bad data at the start of the fifo and possibly in the DMA fifo. This - * function clears the fifos and flushes any packets that came in as rx was - * being enabled. - **/ -void igb_rx_fifo_flush_82575(struct e1000_hw *hw) -{ - u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; - int i, ms_wait; - - if (hw->mac.type != e1000_82575 || - !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN)) - return; - - /* Disable all RX queues */ - for (i = 0; i < 4; i++) { - rxdctl[i] = rd32(E1000_RXDCTL(i)); - wr32(E1000_RXDCTL(i), - rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); - } - /* Poll all queues to verify they have shut down */ - for (ms_wait = 0; ms_wait < 10; ms_wait++) { - msleep(1); - rx_enabled = 0; - for (i = 0; i < 4; i++) - rx_enabled |= rd32(E1000_RXDCTL(i)); - if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) - break; - } - - if (ms_wait == 10) - hw_dbg("Queue disable timed out after 10ms\n"); - - /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all - * incoming packets are rejected. Set enable and wait 2ms so that - * any packet that was coming in as RCTL.EN was set is flushed - */ - rfctl = rd32(E1000_RFCTL); - wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); - - rlpml = rd32(E1000_RLPML); - wr32(E1000_RLPML, 0); - - rctl = rd32(E1000_RCTL); - temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); - temp_rctl |= E1000_RCTL_LPE; - - wr32(E1000_RCTL, temp_rctl); - wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN); - wrfl(); - msleep(2); - - /* Enable RX queues that were previously enabled and restore our - * previous state - */ - for (i = 0; i < 4; i++) - wr32(E1000_RXDCTL(i), rxdctl[i]); - wr32(E1000_RCTL, rctl); - wrfl(); - - wr32(E1000_RLPML, rlpml); - wr32(E1000_RFCTL, rfctl); - - /* Flush receive errors generated by workaround */ - rd32(E1000_ROC); - rd32(E1000_RNBC); - rd32(E1000_MPC); -} - -/** - * igb_set_pcie_completion_timeout - set pci-e completion timeout - * @hw: pointer to the HW structure - * - * The defaults for 82575 and 82576 should be in the range of 50us to 50ms, - * however the hardware default for these parts is 500us to 1ms which is less - * than the 10ms recommended by the pci-e spec. To address this we need to - * increase the value to either 10ms to 200ms for capability version 1 config, - * or 16ms to 55ms for version 2. - **/ -static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw) -{ - u32 gcr = rd32(E1000_GCR); - s32 ret_val = 0; - u16 pcie_devctl2; - - /* only take action if timeout value is defaulted to 0 */ - if (gcr & E1000_GCR_CMPL_TMOUT_MASK) - goto out; - - /* - * if capababilities version is type 1 we can write the - * timeout of 10ms to 200ms through the GCR register - */ - if (!(gcr & E1000_GCR_CAP_VER2)) { - gcr |= E1000_GCR_CMPL_TMOUT_10ms; - goto out; - } - - /* - * for version 2 capabilities we need to write the config space - * directly in order to set the completion timeout value for - * 16ms to 55ms - */ - ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, - &pcie_devctl2); - if (ret_val) - goto out; - - pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; - - ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, - &pcie_devctl2); -out: - /* disable completion timeout resend */ - gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; - - wr32(E1000_GCR, gcr); - return ret_val; -} - -/** - * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing - * @hw: pointer to the hardware struct - * @enable: state to enter, either enabled or disabled - * @pf: Physical Function pool - do not set anti-spoofing for the PF - * - * enables/disables L2 switch anti-spoofing functionality. - **/ -void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) -{ - u32 dtxswc; - - switch (hw->mac.type) { - case e1000_82576: - case e1000_i350: - dtxswc = rd32(E1000_DTXSWC); - if (enable) { - dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK | - E1000_DTXSWC_VLAN_SPOOF_MASK); - /* The PF can spoof - it has to in order to - * support emulation mode NICs */ - dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); - } else { - dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | - E1000_DTXSWC_VLAN_SPOOF_MASK); - } - wr32(E1000_DTXSWC, dtxswc); - break; - default: - break; - } -} - -/** - * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback - * @hw: pointer to the hardware struct - * @enable: state to enter, either enabled or disabled - * - * enables/disables L2 switch loopback functionality. - **/ -void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) -{ - u32 dtxswc = rd32(E1000_DTXSWC); - - if (enable) - dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; - else - dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; - - wr32(E1000_DTXSWC, dtxswc); -} - -/** - * igb_vmdq_set_replication_pf - enable or disable vmdq replication - * @hw: pointer to the hardware struct - * @enable: state to enter, either enabled or disabled - * - * enables/disables replication of packets across multiple pools. - **/ -void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) -{ - u32 vt_ctl = rd32(E1000_VT_CTL); - - if (enable) - vt_ctl |= E1000_VT_CTL_VM_REPL_EN; - else - vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN; - - wr32(E1000_VT_CTL, vt_ctl); -} - -/** - * igb_read_phy_reg_82580 - Read 82580 MDI control register - * @hw: pointer to the HW structure - * @offset: register offset to be read - * @data: pointer to the read data - * - * Reads the MDI control register in the PHY at offset and stores the - * information read to data. - **/ -static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) -{ - s32 ret_val; - - - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - goto out; - - ret_val = igb_read_phy_reg_mdic(hw, offset, data); - - hw->phy.ops.release(hw); - -out: - return ret_val; -} - -/** - * igb_write_phy_reg_82580 - Write 82580 MDI control register - * @hw: pointer to the HW structure - * @offset: register offset to write to - * @data: data to write to register at offset - * - * Writes data to MDI control register in the PHY at offset. - **/ -static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) -{ - s32 ret_val; - - - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - goto out; - - ret_val = igb_write_phy_reg_mdic(hw, offset, data); - - hw->phy.ops.release(hw); - -out: - return ret_val; -} - -/** - * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits - * @hw: pointer to the HW structure - * - * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on - * the values found in the EEPROM. This addresses an issue in which these - * bits are not restored from EEPROM after reset. - **/ -static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw) -{ - s32 ret_val = 0; - u32 mdicnfg; - u16 nvm_data = 0; - - if (hw->mac.type != e1000_82580) - goto out; - if (!igb_sgmii_active_82575(hw)) - goto out; - - ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + - NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, - &nvm_data); - if (ret_val) { - hw_dbg("NVM Read Error\n"); - goto out; - } - - mdicnfg = rd32(E1000_MDICNFG); - if (nvm_data & NVM_WORD24_EXT_MDIO) - mdicnfg |= E1000_MDICNFG_EXT_MDIO; - if (nvm_data & NVM_WORD24_COM_MDIO) - mdicnfg |= E1000_MDICNFG_COM_MDIO; - wr32(E1000_MDICNFG, mdicnfg); -out: - return ret_val; -} - -/** - * igb_reset_hw_82580 - Reset hardware - * @hw: pointer to the HW structure - * - * This resets function or entire device (all ports, etc.) - * to a known state. - **/ -static s32 igb_reset_hw_82580(struct e1000_hw *hw) -{ - s32 ret_val = 0; - /* BH SW mailbox bit in SW_FW_SYNC */ - u16 swmbsw_mask = E1000_SW_SYNCH_MB; - u32 ctrl, icr; - bool global_device_reset = hw->dev_spec._82575.global_device_reset; - - - hw->dev_spec._82575.global_device_reset = false; - - /* Get current control state. */ - ctrl = rd32(E1000_CTRL); - - /* - * Prevent the PCI-E bus from sticking if there is no TLP connection - * on the last TLP read/write transaction when MAC is reset. - */ - ret_val = igb_disable_pcie_master(hw); - if (ret_val) - hw_dbg("PCI-E Master disable polling has failed.\n"); - - hw_dbg("Masking off all interrupts\n"); - wr32(E1000_IMC, 0xffffffff); - wr32(E1000_RCTL, 0); - wr32(E1000_TCTL, E1000_TCTL_PSP); - wrfl(); - - msleep(10); - - /* Determine whether or not a global dev reset is requested */ - if (global_device_reset && - igb_acquire_swfw_sync_82575(hw, swmbsw_mask)) - global_device_reset = false; - - if (global_device_reset && - !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET)) - ctrl |= E1000_CTRL_DEV_RST; - else - ctrl |= E1000_CTRL_RST; - - wr32(E1000_CTRL, ctrl); - wrfl(); - - /* Add delay to insure DEV_RST has time to complete */ - if (global_device_reset) - msleep(5); - - ret_val = igb_get_auto_rd_done(hw); - if (ret_val) { - /* - * When auto config read does not complete, do not - * return with an error. This can happen in situations - * where there is no eeprom and prevents getting link. - */ - hw_dbg("Auto Read Done did not complete\n"); - } - - /* If EEPROM is not present, run manual init scripts */ - if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) - igb_reset_init_script_82575(hw); - - /* clear global device reset status bit */ - wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET); - - /* Clear any pending interrupt events. */ - wr32(E1000_IMC, 0xffffffff); - icr = rd32(E1000_ICR); - - ret_val = igb_reset_mdicnfg_82580(hw); - if (ret_val) - hw_dbg("Could not reset MDICNFG based on EEPROM\n"); - - /* Install any alternate MAC address into RAR0 */ - ret_val = igb_check_alt_mac_addr(hw); - - /* Release semaphore */ - if (global_device_reset) - igb_release_swfw_sync_82575(hw, swmbsw_mask); - - return ret_val; -} - -/** - * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size - * @data: data received by reading RXPBS register - * - * The 82580 uses a table based approach for packet buffer allocation sizes. - * This function converts the retrieved value into the correct table value - * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 - * 0x0 36 72 144 1 2 4 8 16 - * 0x8 35 70 140 rsv rsv rsv rsv rsv - */ -u16 igb_rxpbs_adjust_82580(u32 data) -{ - u16 ret_val = 0; - - if (data < E1000_82580_RXPBS_TABLE_SIZE) - ret_val = e1000_82580_rxpbs_table[data]; - - return ret_val; -} - -/** - * igb_validate_nvm_checksum_with_offset - Validate EEPROM - * checksum - * @hw: pointer to the HW structure - * @offset: offset in words of the checksum protected region - * - * Calculates the EEPROM checksum by reading/adding each word of the EEPROM - * and then verifies that the sum of the EEPROM is equal to 0xBABA. - **/ -s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) -{ - s32 ret_val = 0; - u16 checksum = 0; - u16 i, nvm_data; - - for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { - ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); - if (ret_val) { - hw_dbg("NVM Read Error\n"); - goto out; - } - checksum += nvm_data; - } - - if (checksum != (u16) NVM_SUM) { - hw_dbg("NVM Checksum Invalid\n"); - ret_val = -E1000_ERR_NVM; - goto out; - } - -out: - return ret_val; -} - -/** - * igb_update_nvm_checksum_with_offset - Update EEPROM - * checksum - * @hw: pointer to the HW structure - * @offset: offset in words of the checksum protected region - * - * Updates the EEPROM checksum by reading/adding each word of the EEPROM - * up to the checksum. Then calculates the EEPROM checksum and writes the - * value to the EEPROM. - **/ -s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) -{ - s32 ret_val; - u16 checksum = 0; - u16 i, nvm_data; - - for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { - ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); - if (ret_val) { - hw_dbg("NVM Read Error while updating checksum.\n"); - goto out; - } - checksum += nvm_data; - } - checksum = (u16) NVM_SUM - checksum; - ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, - &checksum); - if (ret_val) - hw_dbg("NVM Write Error while updating checksum.\n"); - -out: - return ret_val; -} - -/** - * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum - * @hw: pointer to the HW structure - * - * Calculates the EEPROM section checksum by reading/adding each word of - * the EEPROM and then verifies that the sum of the EEPROM is - * equal to 0xBABA. - **/ -static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw) -{ - s32 ret_val = 0; - u16 eeprom_regions_count = 1; - u16 j, nvm_data; - u16 nvm_offset; - - ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); - if (ret_val) { - hw_dbg("NVM Read Error\n"); - goto out; - } - - if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { - /* if checksums compatibility bit is set validate checksums - * for all 4 ports. */ - eeprom_regions_count = 4; - } - - for (j = 0; j < eeprom_regions_count; j++) { - nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); - ret_val = igb_validate_nvm_checksum_with_offset(hw, - nvm_offset); - if (ret_val != 0) - goto out; - } - -out: - return ret_val; -} - -/** - * igb_update_nvm_checksum_82580 - Update EEPROM checksum - * @hw: pointer to the HW structure - * - * Updates the EEPROM section checksums for all 4 ports by reading/adding - * each word of the EEPROM up to the checksum. Then calculates the EEPROM - * checksum and writes the value to the EEPROM. - **/ -static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw) -{ - s32 ret_val; - u16 j, nvm_data; - u16 nvm_offset; - - ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); - if (ret_val) { - hw_dbg("NVM Read Error while updating checksum" - " compatibility bit.\n"); - goto out; - } - - if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) { - /* set compatibility bit to validate checksums appropriately */ - nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; - ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, - &nvm_data); - if (ret_val) { - hw_dbg("NVM Write Error while updating checksum" - " compatibility bit.\n"); - goto out; - } - } - - for (j = 0; j < 4; j++) { - nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); - ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); - if (ret_val) - goto out; - } - -out: - return ret_val; -} - -/** - * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum - * @hw: pointer to the HW structure - * - * Calculates the EEPROM section checksum by reading/adding each word of - * the EEPROM and then verifies that the sum of the EEPROM is - * equal to 0xBABA. - **/ -static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw) -{ - s32 ret_val = 0; - u16 j; - u16 nvm_offset; - - for (j = 0; j < 4; j++) { - nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); - ret_val = igb_validate_nvm_checksum_with_offset(hw, - nvm_offset); - if (ret_val != 0) - goto out; - } - -out: - return ret_val; -} - -/** - * igb_update_nvm_checksum_i350 - Update EEPROM checksum - * @hw: pointer to the HW structure - * - * Updates the EEPROM section checksums for all 4 ports by reading/adding - * each word of the EEPROM up to the checksum. Then calculates the EEPROM - * checksum and writes the value to the EEPROM. - **/ -static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw) -{ - s32 ret_val = 0; - u16 j; - u16 nvm_offset; - - for (j = 0; j < 4; j++) { - nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); - ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); - if (ret_val != 0) - goto out; - } - -out: - return ret_val; -} - -/** - * igb_set_eee_i350 - Enable/disable EEE support - * @hw: pointer to the HW structure - * - * Enable/disable EEE based on setting in dev_spec structure. - * - **/ -s32 igb_set_eee_i350(struct e1000_hw *hw) -{ - s32 ret_val = 0; - u32 ipcnfg, eeer, ctrl_ext; - - ctrl_ext = rd32(E1000_CTRL_EXT); - if ((hw->mac.type != e1000_i350) || - (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK)) - goto out; - ipcnfg = rd32(E1000_IPCNFG); - eeer = rd32(E1000_EEER); - - /* enable or disable per user setting */ - if (!(hw->dev_spec._82575.eee_disable)) { - ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | - E1000_IPCNFG_EEE_100M_AN); - eeer |= (E1000_EEER_TX_LPI_EN | - E1000_EEER_RX_LPI_EN | - E1000_EEER_LPI_FC); - - } else { - ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | - E1000_IPCNFG_EEE_100M_AN); - eeer &= ~(E1000_EEER_TX_LPI_EN | - E1000_EEER_RX_LPI_EN | - E1000_EEER_LPI_FC); - } - wr32(E1000_IPCNFG, ipcnfg); - wr32(E1000_EEER, eeer); -out: - - return ret_val; -} - -static struct e1000_mac_operations e1000_mac_ops_82575 = { - .init_hw = igb_init_hw_82575, - .check_for_link = igb_check_for_link_82575, - .rar_set = igb_rar_set, - .read_mac_addr = igb_read_mac_addr_82575, - .get_speed_and_duplex = igb_get_speed_and_duplex_copper, -}; - -static struct e1000_phy_operations e1000_phy_ops_82575 = { - .acquire = igb_acquire_phy_82575, - .get_cfg_done = igb_get_cfg_done_82575, - .release = igb_release_phy_82575, -}; - -static struct e1000_nvm_operations e1000_nvm_ops_82575 = { - .acquire = igb_acquire_nvm_82575, - .read = igb_read_nvm_eerd, - .release = igb_release_nvm_82575, - .write = igb_write_nvm_spi, -}; - -const struct e1000_info e1000_82575_info = { - .get_invariants = igb_get_invariants_82575, - .mac_ops = &e1000_mac_ops_82575, - .phy_ops = &e1000_phy_ops_82575, - .nvm_ops = &e1000_nvm_ops_82575, -}; - diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h deleted file mode 100644 index 786e110..0000000 --- a/drivers/net/igb/e1000_82575.h +++ /dev/null @@ -1,258 +0,0 @@ -/******************************************************************************* - - Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _E1000_82575_H_ -#define _E1000_82575_H_ - -extern void igb_shutdown_serdes_link_82575(struct e1000_hw *hw); -extern void igb_power_up_serdes_link_82575(struct e1000_hw *hw); -extern void igb_power_down_phy_copper_82575(struct e1000_hw *hw); -extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw); - -#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ - (ID_LED_DEF1_DEF2 << 8) | \ - (ID_LED_DEF1_DEF2 << 4) | \ - (ID_LED_OFF1_ON2)) - -#define E1000_RAR_ENTRIES_82575 16 -#define E1000_RAR_ENTRIES_82576 24 -#define E1000_RAR_ENTRIES_82580 24 -#define E1000_RAR_ENTRIES_I350 32 - -#define E1000_SW_SYNCH_MB 0x00000100 -#define E1000_STAT_DEV_RST_SET 0x00100000 -#define E1000_CTRL_DEV_RST 0x20000000 - -/* SRRCTL bit definitions */ -#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ -#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ -#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 -#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 -#define E1000_SRRCTL_DROP_EN 0x80000000 -#define E1000_SRRCTL_TIMESTAMP 0x40000000 - -#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002 -#define E1000_MRQC_ENABLE_VMDQ 0x00000003 -#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005 -#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 -#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 -#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 - -#define E1000_EICR_TX_QUEUE ( \ - E1000_EICR_TX_QUEUE0 | \ - E1000_EICR_TX_QUEUE1 | \ - E1000_EICR_TX_QUEUE2 | \ - E1000_EICR_TX_QUEUE3) - -#define E1000_EICR_RX_QUEUE ( \ - E1000_EICR_RX_QUEUE0 | \ - E1000_EICR_RX_QUEUE1 | \ - E1000_EICR_RX_QUEUE2 | \ - E1000_EICR_RX_QUEUE3) - -/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ -#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ -#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ - -/* Receive Descriptor - Advanced */ -union e1000_adv_rx_desc { - struct { - __le64 pkt_addr; /* Packet buffer address */ - __le64 hdr_addr; /* Header buffer address */ - } read; - struct { - struct { - struct { - __le16 pkt_info; /* RSS type, Packet type */ - __le16 hdr_info; /* Split Header, - * header buffer length */ - } lo_dword; - union { - __le32 rss; /* RSS Hash */ - struct { - __le16 ip_id; /* IP id */ - __le16 csum; /* Packet Checksum */ - } csum_ip; - } hi_dword; - } lower; - struct { - __le32 status_error; /* ext status/error */ - __le16 length; /* Packet length */ - __le16 vlan; /* VLAN tag */ - } upper; - } wb; /* writeback */ -}; - -#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 -#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 -#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ -#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ - -/* Transmit Descriptor - Advanced */ -union e1000_adv_tx_desc { - struct { - __le64 buffer_addr; /* Address of descriptor's data buf */ - __le32 cmd_type_len; - __le32 olinfo_status; - } read; - struct { - __le64 rsvd; /* Reserved */ - __le32 nxtseq_seed; - __le32 status; - } wb; -}; - -/* Adv Transmit Descriptor Config Masks */ -#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ -#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ -#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ -#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ -#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ -#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ -#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ -#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ - -/* Context descriptors */ -struct e1000_adv_tx_context_desc { - __le32 vlan_macip_lens; - __le32 seqnum_seed; - __le32 type_tucmd_mlhl; - __le32 mss_l4len_idx; -}; - -#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ -#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ -#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ -#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */ -/* IPSec Encrypt Enable for ESP */ -#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ -#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ -/* Adv ctxt IPSec SA IDX mask */ -/* Adv ctxt IPSec ESP len mask */ - -/* Additional Transmit Descriptor Control definitions */ -#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ -/* Tx Queue Arbitration Priority 0=low, 1=high */ - -/* Additional Receive Descriptor Control definitions */ -#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ - -/* Direct Cache Access (DCA) definitions */ -#define E1000_DCA_CTRL_DCA_MODE_DISABLE 0x01 /* DCA Disable */ -#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ - -#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ -#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ -#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ -#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ - -#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ -#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ -#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ - -/* Additional DCA related definitions, note change in position of CPUID */ -#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ -#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */ -#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */ -#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */ - -/* ETQF register bit definitions */ -#define E1000_ETQF_FILTER_ENABLE (1 << 26) -#define E1000_ETQF_1588 (1 << 30) - -/* FTQF register bit definitions */ -#define E1000_FTQF_VF_BP 0x00008000 -#define E1000_FTQF_1588_TIME_STAMP 0x08000000 -#define E1000_FTQF_MASK 0xF0000000 -#define E1000_FTQF_MASK_PROTO_BP 0x10000000 -#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000 - -#define E1000_NVM_APME_82575 0x0400 -#define MAX_NUM_VFS 8 - -#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof control */ -#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */ -#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ -#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8 -#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ - -/* Easy defines for setting default pool, would normally be left a zero */ -#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7 -#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT) - -/* Other useful VMD_CTL register defines */ -#define E1000_VT_CTL_IGNORE_MAC (1 << 28) -#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29) -#define E1000_VT_CTL_VM_REPL_EN (1 << 30) - -/* Per VM Offload register setup */ -#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */ -#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */ -#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */ -#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */ -#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */ -#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */ -#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */ -#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */ -#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ -#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */ - -#define E1000_VLVF_ARRAY_SIZE 32 -#define E1000_VLVF_VLANID_MASK 0x00000FFF -#define E1000_VLVF_POOLSEL_SHIFT 12 -#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT) -#define E1000_VLVF_LVLAN 0x00100000 -#define E1000_VLVF_VLANID_ENABLE 0x80000000 - -#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ -#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ - -#define E1000_IOVCTL 0x05BBC -#define E1000_IOVCTL_REUSE_VFQ 0x00000001 - -#define E1000_RPLOLR_STRVLAN 0x40000000 -#define E1000_RPLOLR_STRCRC 0x80000000 - -#define E1000_DTXCTL_8023LL 0x0004 -#define E1000_DTXCTL_VLAN_ADDED 0x0008 -#define E1000_DTXCTL_OOS_ENABLE 0x0010 -#define E1000_DTXCTL_MDP_EN 0x0020 -#define E1000_DTXCTL_SPOOF_INT 0x0040 - -#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14) - -#define ALL_QUEUES 0xFFFF - -/* RX packet buffer size defines */ -#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F -void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int); -void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool); -void igb_vmdq_set_replication_pf(struct e1000_hw *, bool); -u16 igb_rxpbs_adjust_82580(u32 data); -s32 igb_set_eee_i350(struct e1000_hw *); - -#endif diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h deleted file mode 100644 index 7b8ddd8..0000000 --- a/drivers/net/igb/e1000_defines.h +++ /dev/null @@ -1,834 +0,0 @@ -/******************************************************************************* - - Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _E1000_DEFINES_H_ -#define _E1000_DEFINES_H_ - -/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ -#define REQ_TX_DESCRIPTOR_MULTIPLE 8 -#define REQ_RX_DESCRIPTOR_MULTIPLE 8 - -/* Definitions for power management and wakeup registers */ -/* Wake Up Control */ -#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ - -/* Wake Up Filter Control */ -#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ -#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ -#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ -#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ -#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ - -/* Extended Device Control */ -#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */ -/* Physical Func Reset Done Indication */ -#define E1000_CTRL_EXT_PFRSTD 0x00004000 -#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 -#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 -#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 -#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 -#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 -#define E1000_CTRL_EXT_EIAME 0x01000000 -#define E1000_CTRL_EXT_IRCA 0x00000001 -/* Interrupt delay cancellation */ -/* Driver loaded bit for FW */ -#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 -/* Interrupt acknowledge Auto-mask */ -/* Clear Interrupt timers after IMS clear */ -/* packet buffer parity error detection enabled */ -/* descriptor FIFO parity error detection enable */ -#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ -#define E1000_I2CCMD_REG_ADDR_SHIFT 16 -#define E1000_I2CCMD_PHY_ADDR_SHIFT 24 -#define E1000_I2CCMD_OPCODE_READ 0x08000000 -#define E1000_I2CCMD_OPCODE_WRITE 0x00000000 -#define E1000_I2CCMD_READY 0x20000000 -#define E1000_I2CCMD_ERROR 0x80000000 -#define E1000_MAX_SGMII_PHY_REG_ADDR 255 -#define E1000_I2CCMD_PHY_TIMEOUT 200 -#define E1000_IVAR_VALID 0x80 -#define E1000_GPIE_NSICR 0x00000001 -#define E1000_GPIE_MSIX_MODE 0x00000010 -#define E1000_GPIE_EIAME 0x40000000 -#define E1000_GPIE_PBA 0x80000000 - -/* Receive Descriptor bit definitions */ -#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ -#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ -#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ -#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ -#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ -#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ -#define E1000_RXD_STAT_TS 0x10000 /* Pkt was time stamped */ - -#define E1000_RXDEXT_STATERR_CE 0x01000000 -#define E1000_RXDEXT_STATERR_SE 0x02000000 -#define E1000_RXDEXT_STATERR_SEQ 0x04000000 -#define E1000_RXDEXT_STATERR_CXE 0x10000000 -#define E1000_RXDEXT_STATERR_TCPE 0x20000000 -#define E1000_RXDEXT_STATERR_IPE 0x40000000 -#define E1000_RXDEXT_STATERR_RXE 0x80000000 - -/* Same mask, but for extended and packet split descriptors */ -#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ - E1000_RXDEXT_STATERR_CE | \ - E1000_RXDEXT_STATERR_SE | \ - E1000_RXDEXT_STATERR_SEQ | \ - E1000_RXDEXT_STATERR_CXE | \ - E1000_RXDEXT_STATERR_RXE) - -#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 -#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 -#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 -#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 -#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 - - -/* Management Control */ -#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ -#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ -#define E1000_MANC_EN_BMC2OS 0x10000000 /* OSBMC is Enabled or not */ -/* Enable Neighbor Discovery Filtering */ -#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ -#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ -/* Enable MAC address filtering */ -#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 - -/* Receive Control */ -#define E1000_RCTL_EN 0x00000002 /* enable */ -#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ -#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ -#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ -#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ -#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ -#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ -#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ -#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ -#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ -#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ -#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ -#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ -#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ -#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ - -/* - * Use byte values for the following shift parameters - * Usage: - * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & - * E1000_PSRCTL_BSIZE0_MASK) | - * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & - * E1000_PSRCTL_BSIZE1_MASK) | - * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & - * E1000_PSRCTL_BSIZE2_MASK) | - * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; - * E1000_PSRCTL_BSIZE3_MASK)) - * where value0 = [128..16256], default=256 - * value1 = [1024..64512], default=4096 - * value2 = [0..64512], default=4096 - * value3 = [0..64512], default=0 - */ - -#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F -#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 -#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 -#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 - -#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ -#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ -#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ -#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ - -/* SWFW_SYNC Definitions */ -#define E1000_SWFW_EEP_SM 0x1 -#define E1000_SWFW_PHY0_SM 0x2 -#define E1000_SWFW_PHY1_SM 0x4 -#define E1000_SWFW_PHY2_SM 0x20 -#define E1000_SWFW_PHY3_SM 0x40 - -/* FACTPS Definitions */ -/* Device Control */ -#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ -#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ -#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ -#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ -#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ -#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ -#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ -#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ -#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ -#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ -#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ -/* Defined polarity of Dock/Undock indication in SDP[0] */ -/* Reset both PHY ports, through PHYRST_N pin */ -/* enable link status from external LINK_0 and LINK_1 pins */ -#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ -#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ -#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ -#define E1000_CTRL_RST 0x04000000 /* Global reset */ -#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ -#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ -#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ -#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ -/* Initiate an interrupt to manageability engine */ -#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */ - -/* Bit definitions for the Management Data IO (MDIO) and Management Data - * Clock (MDC) pins in the Device Control Register. - */ - -#define E1000_CONNSW_ENRGSRC 0x4 -#define E1000_PCS_CFG_PCS_EN 8 -#define E1000_PCS_LCTL_FLV_LINK_UP 1 -#define E1000_PCS_LCTL_FSV_100 2 -#define E1000_PCS_LCTL_FSV_1000 4 -#define E1000_PCS_LCTL_FDV_FULL 8 -#define E1000_PCS_LCTL_FSD 0x10 -#define E1000_PCS_LCTL_FORCE_LINK 0x20 -#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 -#define E1000_PCS_LCTL_AN_ENABLE 0x10000 -#define E1000_PCS_LCTL_AN_RESTART 0x20000 -#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 -#define E1000_ENABLE_SERDES_LOOPBACK 0x0410 - -#define E1000_PCS_LSTS_LINK_OK 1 -#define E1000_PCS_LSTS_SPEED_100 2 -#define E1000_PCS_LSTS_SPEED_1000 4 -#define E1000_PCS_LSTS_DUPLEX_FULL 8 -#define E1000_PCS_LSTS_SYNK_OK 0x10 - -/* Device Status */ -#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ -#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ -#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ -#define E1000_STATUS_FUNC_SHIFT 2 -#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ -#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ -#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ -#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ -/* Change in Dock/Undock state. Clear on write '0'. */ -/* Status of Master requests. */ -#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 -/* BMC external code execution disabled */ - -/* Constants used to intrepret the masked PCI-X bus speed. */ - -#define SPEED_10 10 -#define SPEED_100 100 -#define SPEED_1000 1000 -#define HALF_DUPLEX 1 -#define FULL_DUPLEX 2 - - -#define ADVERTISE_10_HALF 0x0001 -#define ADVERTISE_10_FULL 0x0002 -#define ADVERTISE_100_HALF 0x0004 -#define ADVERTISE_100_FULL 0x0008 -#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ -#define ADVERTISE_1000_FULL 0x0020 - -/* 1000/H is not supported, nor spec-compliant. */ -#define E1000_ALL_SPEED_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ - ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ - ADVERTISE_1000_FULL) -#define E1000_ALL_NOT_GIG (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ - ADVERTISE_100_HALF | ADVERTISE_100_FULL) -#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) -#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) -#define E1000_ALL_FULL_DUPLEX (ADVERTISE_10_FULL | ADVERTISE_100_FULL | \ - ADVERTISE_1000_FULL) -#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) - -#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX - -/* LED Control */ -#define E1000_LEDCTL_LED0_MODE_SHIFT 0 -#define E1000_LEDCTL_LED0_BLINK 0x00000080 - -#define E1000_LEDCTL_MODE_LED_ON 0xE -#define E1000_LEDCTL_MODE_LED_OFF 0xF - -/* Transmit Descriptor bit definitions */ -#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ -#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ -#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ -#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ -#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ -#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ -#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ -/* Extended desc bits for Linksec and timesync */ - -/* Transmit Control */ -#define E1000_TCTL_EN 0x00000002 /* enable tx */ -#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ -#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ -#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ -#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ - -/* DMA Coalescing register fields */ -#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coalescing - * Watchdog Timer */ -#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coalescing Receive - * Threshold */ -#define E1000_DMACR_DMACTHR_SHIFT 16 -#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe - * transactions */ -#define E1000_DMACR_DMAC_LX_SHIFT 28 -#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ - -#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coalescing Transmit - * Threshold */ - -#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ - -#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Receive Traffic Rate - * Threshold */ -#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rcv packet rate in - * current window */ - -#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rcv Traffic - * Current Cnt */ - -#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* Flow ctrl Rcv Threshold - * High val */ -#define E1000_FCRTC_RTH_COAL_SHIFT 4 -#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */ - -/* SerDes Control */ -#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 - -/* Receive Checksum Control */ -#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ -#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ -#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ -#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ - -/* Header split receive */ -#define E1000_RFCTL_LEF 0x00040000 - -/* Collision related configuration parameters */ -#define E1000_COLLISION_THRESHOLD 15 -#define E1000_CT_SHIFT 4 -#define E1000_COLLISION_DISTANCE 63 -#define E1000_COLD_SHIFT 12 - -/* Ethertype field values */ -#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ - -#define MAX_JUMBO_FRAME_SIZE 0x3F00 - -/* PBA constants */ -#define E1000_PBA_34K 0x0022 -#define E1000_PBA_64K 0x0040 /* 64KB */ - -/* SW Semaphore Register */ -#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ -#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ - -/* Interrupt Cause Read */ -#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ -#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ -#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ -#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ -#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ -#define E1000_ICR_VMMB 0x00000100 /* VM MB event */ -#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ -/* If this bit asserted, the driver should claim the interrupt */ -#define E1000_ICR_INT_ASSERTED 0x80000000 -/* LAN connected device generates an interrupt */ -#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ - -/* Extended Interrupt Cause Read */ -#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ -#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */ -#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */ -#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */ -#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */ -#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */ -#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */ -#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */ -#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ -/* TCP Timer */ - -/* - * This defines the bits that are set in the Interrupt Mask - * Set/Read Register. Each bit is documented below: - * o RXT0 = Receiver Timer Interrupt (ring 0) - * o TXDW = Transmit Descriptor Written Back - * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) - * o RXSEQ = Receive Sequence Error - * o LSC = Link Status Change - */ -#define IMS_ENABLE_MASK ( \ - E1000_IMS_RXT0 | \ - E1000_IMS_TXDW | \ - E1000_IMS_RXDMT0 | \ - E1000_IMS_RXSEQ | \ - E1000_IMS_LSC | \ - E1000_IMS_DOUTSYNC) - -/* Interrupt Mask Set */ -#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ -#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ -#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */ -#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ -#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ -#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ -#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */ -#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ - -/* Extended Interrupt Mask Set */ -#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ - -/* Interrupt Cause Set */ -#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ -#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ -#define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */ - -/* Extended Interrupt Cause Set */ - -/* Transmit Descriptor Control */ -/* Enable the counting of descriptors still to be processed. */ - -/* Flow Control Constants */ -#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 -#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 -#define FLOW_CONTROL_TYPE 0x8808 - -/* 802.1q VLAN Packet Size */ -#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ -#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ - -/* Receive Address */ -/* - * Number of high/low register pairs in the RAR. The RAR (Receive Address - * Registers) holds the directed and multicast addresses that we monitor. - * Technically, we have 16 spots. However, we reserve one of these spots - * (RAR[15]) for our directed address used by controllers with - * manageability enabled, allowing us room for 15 multicast addresses. - */ -#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ -#define E1000_RAL_MAC_ADDR_LEN 4 -#define E1000_RAH_MAC_ADDR_LEN 2 -#define E1000_RAH_POOL_MASK 0x03FC0000 -#define E1000_RAH_POOL_1 0x00040000 - -/* Error Codes */ -#define E1000_SUCCESS 0 -#define E1000_ERR_NVM 1 -#define E1000_ERR_PHY 2 -#define E1000_ERR_CONFIG 3 -#define E1000_ERR_PARAM 4 -#define E1000_ERR_MAC_INIT 5 -#define E1000_ERR_RESET 9 -#define E1000_ERR_MASTER_REQUESTS_PENDING 10 -#define E1000_BLK_PHY_RESET 12 -#define E1000_ERR_SWFW_SYNC 13 -#define E1000_NOT_IMPLEMENTED 14 -#define E1000_ERR_MBX 15 -#define E1000_ERR_INVALID_ARGUMENT 16 -#define E1000_ERR_NO_SPACE 17 -#define E1000_ERR_NVM_PBA_SECTION 18 - -/* Loop limit on how long we wait for auto-negotiation to complete */ -#define COPPER_LINK_UP_LIMIT 10 -#define PHY_AUTO_NEG_LIMIT 45 -#define PHY_FORCE_LIMIT 20 -/* Number of 100 microseconds we wait for PCI Express master disable */ -#define MASTER_DISABLE_TIMEOUT 800 -/* Number of milliseconds we wait for PHY configuration done after MAC reset */ -#define PHY_CFG_TIMEOUT 100 -/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ -/* Number of milliseconds for NVM auto read done after MAC reset. */ -#define AUTO_READ_DONE_TIMEOUT 10 - -/* Flow Control */ -#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ - -#define E1000_TSYNCTXCTL_VALID 0x00000001 /* tx timestamp valid */ -#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable tx timestampping */ - -#define E1000_TSYNCRXCTL_VALID 0x00000001 /* rx timestamp valid */ -#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* rx type mask */ -#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 -#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 -#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 -#define E1000_TSYNCRXCTL_TYPE_ALL 0x08 -#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A -#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable rx timestampping */ - -#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF -#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 -#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 -#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02 -#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03 -#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04 - -#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00 -#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000 -#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100 -#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200 -#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300 -#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800 -#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900 -#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00 -#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00 -#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00 -#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00 - -#define E1000_TIMINCA_16NS_SHIFT 24 - -#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ -#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ -#define E1000_MDICNFG_PHY_MASK 0x03E00000 -#define E1000_MDICNFG_PHY_SHIFT 21 - -/* PCI Express Control */ -#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 -#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 -#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000 -#define E1000_GCR_CAP_VER2 0x00040000 - -/* mPHY Address Control and Data Registers */ -#define E1000_MPHY_ADDR_CTL 0x0024 /* mPHY Address Control Register */ -#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000 -#define E1000_MPHY_DATA 0x0E10 /* mPHY Data Register */ - -/* mPHY PCS CLK Register */ -#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004 /* mPHY PCS CLK AFE CSR Offset */ -/* mPHY Near End Digital Loopback Override Bit */ -#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10 - -/* PHY Control Register */ -#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ -#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ -#define MII_CR_POWER_DOWN 0x0800 /* Power down */ -#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ -#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ -#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ -#define MII_CR_SPEED_1000 0x0040 -#define MII_CR_SPEED_100 0x2000 -#define MII_CR_SPEED_10 0x0000 - -/* PHY Status Register */ -#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ -#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ - -/* Autoneg Advertisement Register */ -#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ -#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ -#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ -#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ -#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ -#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ - -/* Link Partner Ability Register (Base Page) */ -#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ -#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ - -/* Autoneg Expansion Register */ - -/* 1000BASE-T Control Register */ -#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ -#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ -#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ - /* 0=Configure PHY as Slave */ -#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ - /* 0=Automatic Master/Slave config */ - -/* 1000BASE-T Status Register */ -#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ -#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ - - -/* PHY 1000 MII Register/Bit Definitions */ -/* PHY Registers defined by IEEE */ -#define PHY_CONTROL 0x00 /* Control Register */ -#define PHY_STATUS 0x01 /* Status Register */ -#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ -#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ -#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ -#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ -#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ -#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ - -/* NVM Control */ -#define E1000_EECD_SK 0x00000001 /* NVM Clock */ -#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ -#define E1000_EECD_DI 0x00000004 /* NVM Data In */ -#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ -#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ -#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ -#define E1000_EECD_PRES 0x00000100 /* NVM Present */ -/* NVM Addressing bits based on type 0=small, 1=large */ -#define E1000_EECD_ADDR_BITS 0x00000400 -#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ -#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ -#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ -#define E1000_EECD_SIZE_EX_SHIFT 11 - -/* Offset to data in NVM read/write registers */ -#define E1000_NVM_RW_REG_DATA 16 -#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ -#define E1000_NVM_RW_REG_START 1 /* Start operation */ -#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ -#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ - -/* NVM Word Offsets */ -#define NVM_COMPAT 0x0003 -#define NVM_ID_LED_SETTINGS 0x0004 /* SERDES output amplitude */ -#define NVM_INIT_CONTROL2_REG 0x000F -#define NVM_INIT_CONTROL3_PORT_B 0x0014 -#define NVM_INIT_CONTROL3_PORT_A 0x0024 -#define NVM_ALT_MAC_ADDR_PTR 0x0037 -#define NVM_CHECKSUM_REG 0x003F -#define NVM_COMPATIBILITY_REG_3 0x0003 -#define NVM_COMPATIBILITY_BIT_MASK 0x8000 - -#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ -#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ -#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */ -#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */ - -#define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0) - -/* Mask bits for fields in Word 0x24 of the NVM */ -#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */ -#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed external */ - -/* Mask bits for fields in Word 0x0f of the NVM */ -#define NVM_WORD0F_PAUSE_MASK 0x3000 -#define NVM_WORD0F_ASM_DIR 0x2000 - -/* Mask bits for fields in Word 0x1a of the NVM */ - -/* length of string needed to store part num */ -#define E1000_PBANUM_LENGTH 11 - -/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ -#define NVM_SUM 0xBABA - -#define NVM_PBA_OFFSET_0 8 -#define NVM_PBA_OFFSET_1 9 -#define NVM_PBA_PTR_GUARD 0xFAFA -#define NVM_WORD_SIZE_BASE_SHIFT 6 - -/* NVM Commands - Microwire */ - -/* NVM Commands - SPI */ -#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ -#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ -#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ -#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ -#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ -#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ - -/* SPI NVM Status Register */ -#define NVM_STATUS_RDY_SPI 0x01 - -/* Word definitions for ID LED Settings */ -#define ID_LED_RESERVED_0000 0x0000 -#define ID_LED_RESERVED_FFFF 0xFFFF -#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ - (ID_LED_OFF1_OFF2 << 8) | \ - (ID_LED_DEF1_DEF2 << 4) | \ - (ID_LED_DEF1_DEF2)) -#define ID_LED_DEF1_DEF2 0x1 -#define ID_LED_DEF1_ON2 0x2 -#define ID_LED_DEF1_OFF2 0x3 -#define ID_LED_ON1_DEF2 0x4 -#define ID_LED_ON1_ON2 0x5 -#define ID_LED_ON1_OFF2 0x6 -#define ID_LED_OFF1_DEF2 0x7 -#define ID_LED_OFF1_ON2 0x8 -#define ID_LED_OFF1_OFF2 0x9 - -#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF -#define IGP_ACTIVITY_LED_ENABLE 0x0300 -#define IGP_LED3_MODE 0x07000000 - -/* PCI/PCI-X/PCI-EX Config space */ -#define PCIE_DEVICE_CONTROL2 0x28 -#define PCIE_DEVICE_CONTROL2_16ms 0x0005 - -#define PHY_REVISION_MASK 0xFFFFFFF0 -#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ -#define MAX_PHY_MULTI_PAGE_REG 0xF - -/* Bit definitions for valid PHY IDs. */ -/* - * I = Integrated - * E = External - */ -#define M88E1111_I_PHY_ID 0x01410CC0 -#define M88E1112_E_PHY_ID 0x01410C90 -#define I347AT4_E_PHY_ID 0x01410DC0 -#define IGP03E1000_E_PHY_ID 0x02A80390 -#define I82580_I_PHY_ID 0x015403A0 -#define I350_I_PHY_ID 0x015403B0 -#define M88_VENDOR 0x0141 - -/* M88E1000 Specific Registers */ -#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ -#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ -#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ - -#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ -#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ - -/* M88E1000 PHY Specific Control Register */ -#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ -/* 1=CLK125 low, 0=CLK125 toggling */ -#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ - /* Manual MDI configuration */ -#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ -/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ -#define M88E1000_PSCR_AUTO_X_1000T 0x0040 -/* Auto crossover enabled all speeds */ -#define M88E1000_PSCR_AUTO_X_MODE 0x0060 -/* - * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold - * 0=Normal 10BASE-T Rx Threshold - */ -/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */ -#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ - -/* M88E1000 PHY Specific Status Register */ -#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ -#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ -#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ -/* - * 0 = <50M - * 1 = 50-80M - * 2 = 80-110M - * 3 = 110-140M - * 4 = >140M - */ -#define M88E1000_PSSR_CABLE_LENGTH 0x0380 -#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ -#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ - -#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 - -/* M88E1000 Extended PHY Specific Control Register */ -/* - * 1 = Lost lock detect enabled. - * Will assert lost lock and bring - * link down if idle not seen - * within 1ms in 1000BASE-T - */ -/* - * Number of times we will attempt to autonegotiate before downshifting if we - * are the master - */ -#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 -#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 -/* - * Number of times we will attempt to autonegotiate before downshifting if we - * are the slave - */ -#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 -#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 -#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ - -/* Intel i347-AT4 Registers */ - -#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */ -#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */ -#define I347AT4_PAGE_SELECT 0x16 - -/* i347-AT4 Extended PHY Specific Control Register */ - -/* - * Number of times we will attempt to autonegotiate before downshifting if we - * are the master - */ -#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800 -#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000 -#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000 -#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000 -#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000 -#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000 -#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000 -#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000 -#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000 -#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000 - -/* i347-AT4 PHY Cable Diagnostics Control */ -#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */ - -/* Marvell 1112 only registers */ -#define M88E1112_VCT_DSP_DISTANCE 0x001A - -/* M88EC018 Rev 2 specific DownShift settings */ -#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 -#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 - -/* MDI Control */ -#define E1000_MDIC_DATA_MASK 0x0000FFFF -#define E1000_MDIC_REG_MASK 0x001F0000 -#define E1000_MDIC_REG_SHIFT 16 -#define E1000_MDIC_PHY_MASK 0x03E00000 -#define E1000_MDIC_PHY_SHIFT 21 -#define E1000_MDIC_OP_WRITE 0x04000000 -#define E1000_MDIC_OP_READ 0x08000000 -#define E1000_MDIC_READY 0x10000000 -#define E1000_MDIC_INT_EN 0x20000000 -#define E1000_MDIC_ERROR 0x40000000 -#define E1000_MDIC_DEST 0x80000000 - -/* Thermal Sensor */ -#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */ -#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Speed Throttle Event */ - -/* Energy Efficient Ethernet */ -#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* EEE Enable 1G AN */ -#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */ -#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */ -#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */ -#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */ - -/* SerDes Control */ -#define E1000_GEN_CTL_READY 0x80000000 -#define E1000_GEN_CTL_ADDRESS_SHIFT 8 -#define E1000_GEN_POLL_TIMEOUT 640 - -#define E1000_VFTA_ENTRY_SHIFT 5 -#define E1000_VFTA_ENTRY_MASK 0x7F -#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F - -/* DMA Coalescing register fields */ -#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based - on DMA coal */ - -/* Tx Rate-Scheduler Config fields */ -#define E1000_RTTBCNRC_RS_ENA 0x80000000 -#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF -#define E1000_RTTBCNRC_RF_INT_SHIFT 14 -#define E1000_RTTBCNRC_RF_INT_MASK \ - (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT) - -#endif diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h deleted file mode 100644 index 4519a13..0000000 --- a/drivers/net/igb/e1000_hw.h +++ /dev/null @@ -1,529 +0,0 @@ -/******************************************************************************* - - Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _E1000_HW_H_ -#define _E1000_HW_H_ - -#include -#include -#include -#include - -#include "e1000_regs.h" -#include "e1000_defines.h" - -struct e1000_hw; - -#define E1000_DEV_ID_82576 0x10C9 -#define E1000_DEV_ID_82576_FIBER 0x10E6 -#define E1000_DEV_ID_82576_SERDES 0x10E7 -#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 -#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526 -#define E1000_DEV_ID_82576_NS 0x150A -#define E1000_DEV_ID_82576_NS_SERDES 0x1518 -#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D -#define E1000_DEV_ID_82575EB_COPPER 0x10A7 -#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 -#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 -#define E1000_DEV_ID_82580_COPPER 0x150E -#define E1000_DEV_ID_82580_FIBER 0x150F -#define E1000_DEV_ID_82580_SERDES 0x1510 -#define E1000_DEV_ID_82580_SGMII 0x1511 -#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 -#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527 -#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 -#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A -#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C -#define E1000_DEV_ID_DH89XXCC_SFP 0x0440 -#define E1000_DEV_ID_I350_COPPER 0x1521 -#define E1000_DEV_ID_I350_FIBER 0x1522 -#define E1000_DEV_ID_I350_SERDES 0x1523 -#define E1000_DEV_ID_I350_SGMII 0x1524 - -#define E1000_REVISION_2 2 -#define E1000_REVISION_4 4 - -#define E1000_FUNC_0 0 -#define E1000_FUNC_1 1 -#define E1000_FUNC_2 2 -#define E1000_FUNC_3 3 - -#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 -#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 -#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6 -#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9 - -enum e1000_mac_type { - e1000_undefined = 0, - e1000_82575, - e1000_82576, - e1000_82580, - e1000_i350, - e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ -}; - -enum e1000_media_type { - e1000_media_type_unknown = 0, - e1000_media_type_copper = 1, - e1000_media_type_internal_serdes = 2, - e1000_num_media_types -}; - -enum e1000_nvm_type { - e1000_nvm_unknown = 0, - e1000_nvm_none, - e1000_nvm_eeprom_spi, - e1000_nvm_flash_hw, - e1000_nvm_flash_sw -}; - -enum e1000_nvm_override { - e1000_nvm_override_none = 0, - e1000_nvm_override_spi_small, - e1000_nvm_override_spi_large, -}; - -enum e1000_phy_type { - e1000_phy_unknown = 0, - e1000_phy_none, - e1000_phy_m88, - e1000_phy_igp, - e1000_phy_igp_2, - e1000_phy_gg82563, - e1000_phy_igp_3, - e1000_phy_ife, - e1000_phy_82580, -}; - -enum e1000_bus_type { - e1000_bus_type_unknown = 0, - e1000_bus_type_pci, - e1000_bus_type_pcix, - e1000_bus_type_pci_express, - e1000_bus_type_reserved -}; - -enum e1000_bus_speed { - e1000_bus_speed_unknown = 0, - e1000_bus_speed_33, - e1000_bus_speed_66, - e1000_bus_speed_100, - e1000_bus_speed_120, - e1000_bus_speed_133, - e1000_bus_speed_2500, - e1000_bus_speed_5000, - e1000_bus_speed_reserved -}; - -enum e1000_bus_width { - e1000_bus_width_unknown = 0, - e1000_bus_width_pcie_x1, - e1000_bus_width_pcie_x2, - e1000_bus_width_pcie_x4 = 4, - e1000_bus_width_pcie_x8 = 8, - e1000_bus_width_32, - e1000_bus_width_64, - e1000_bus_width_reserved -}; - -enum e1000_1000t_rx_status { - e1000_1000t_rx_status_not_ok = 0, - e1000_1000t_rx_status_ok, - e1000_1000t_rx_status_undefined = 0xFF -}; - -enum e1000_rev_polarity { - e1000_rev_polarity_normal = 0, - e1000_rev_polarity_reversed, - e1000_rev_polarity_undefined = 0xFF -}; - -enum e1000_fc_mode { - e1000_fc_none = 0, - e1000_fc_rx_pause, - e1000_fc_tx_pause, - e1000_fc_full, - e1000_fc_default = 0xFF -}; - -/* Statistics counters collected by the MAC */ -struct e1000_hw_stats { - u64 crcerrs; - u64 algnerrc; - u64 symerrs; - u64 rxerrc; - u64 mpc; - u64 scc; - u64 ecol; - u64 mcc; - u64 latecol; - u64 colc; - u64 dc; - u64 tncrs; - u64 sec; - u64 cexterr; - u64 rlec; - u64 xonrxc; - u64 xontxc; - u64 xoffrxc; - u64 xofftxc; - u64 fcruc; - u64 prc64; - u64 prc127; - u64 prc255; - u64 prc511; - u64 prc1023; - u64 prc1522; - u64 gprc; - u64 bprc; - u64 mprc; - u64 gptc; - u64 gorc; - u64 gotc; - u64 rnbc; - u64 ruc; - u64 rfc; - u64 roc; - u64 rjc; - u64 mgprc; - u64 mgpdc; - u64 mgptc; - u64 tor; - u64 tot; - u64 tpr; - u64 tpt; - u64 ptc64; - u64 ptc127; - u64 ptc255; - u64 ptc511; - u64 ptc1023; - u64 ptc1522; - u64 mptc; - u64 bptc; - u64 tsctc; - u64 tsctfc; - u64 iac; - u64 icrxptc; - u64 icrxatc; - u64 ictxptc; - u64 ictxatc; - u64 ictxqec; - u64 ictxqmtc; - u64 icrxdmtc; - u64 icrxoc; - u64 cbtmpc; - u64 htdpmc; - u64 cbrdpc; - u64 cbrmpc; - u64 rpthc; - u64 hgptc; - u64 htcbdpc; - u64 hgorc; - u64 hgotc; - u64 lenerrs; - u64 scvpc; - u64 hrmpc; - u64 doosync; - u64 o2bgptc; - u64 o2bspc; - u64 b2ospc; - u64 b2ogprc; -}; - -struct e1000_phy_stats { - u32 idle_errors; - u32 receive_errors; -}; - -struct e1000_host_mng_dhcp_cookie { - u32 signature; - u8 status; - u8 reserved0; - u16 vlan_id; - u32 reserved1; - u16 reserved2; - u8 reserved3; - u8 checksum; -}; - -/* Host Interface "Rev 1" */ -struct e1000_host_command_header { - u8 command_id; - u8 command_length; - u8 command_options; - u8 checksum; -}; - -#define E1000_HI_MAX_DATA_LENGTH 252 -struct e1000_host_command_info { - struct e1000_host_command_header command_header; - u8 command_data[E1000_HI_MAX_DATA_LENGTH]; -}; - -/* Host Interface "Rev 2" */ -struct e1000_host_mng_command_header { - u8 command_id; - u8 checksum; - u16 reserved1; - u16 reserved2; - u16 command_length; -}; - -#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 -struct e1000_host_mng_command_info { - struct e1000_host_mng_command_header command_header; - u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; -}; - -#include "e1000_mac.h" -#include "e1000_phy.h" -#include "e1000_nvm.h" -#include "e1000_mbx.h" - -struct e1000_mac_operations { - s32 (*check_for_link)(struct e1000_hw *); - s32 (*reset_hw)(struct e1000_hw *); - s32 (*init_hw)(struct e1000_hw *); - bool (*check_mng_mode)(struct e1000_hw *); - s32 (*setup_physical_interface)(struct e1000_hw *); - void (*rar_set)(struct e1000_hw *, u8 *, u32); - s32 (*read_mac_addr)(struct e1000_hw *); - s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); -}; - -struct e1000_phy_operations { - s32 (*acquire)(struct e1000_hw *); - s32 (*check_polarity)(struct e1000_hw *); - s32 (*check_reset_block)(struct e1000_hw *); - s32 (*force_speed_duplex)(struct e1000_hw *); - s32 (*get_cfg_done)(struct e1000_hw *hw); - s32 (*get_cable_length)(struct e1000_hw *); - s32 (*get_phy_info)(struct e1000_hw *); - s32 (*read_reg)(struct e1000_hw *, u32, u16 *); - void (*release)(struct e1000_hw *); - s32 (*reset)(struct e1000_hw *); - s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); - s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); - s32 (*write_reg)(struct e1000_hw *, u32, u16); -}; - -struct e1000_nvm_operations { - s32 (*acquire)(struct e1000_hw *); - s32 (*read)(struct e1000_hw *, u16, u16, u16 *); - void (*release)(struct e1000_hw *); - s32 (*write)(struct e1000_hw *, u16, u16, u16 *); - s32 (*update)(struct e1000_hw *); - s32 (*validate)(struct e1000_hw *); -}; - -struct e1000_info { - s32 (*get_invariants)(struct e1000_hw *); - struct e1000_mac_operations *mac_ops; - struct e1000_phy_operations *phy_ops; - struct e1000_nvm_operations *nvm_ops; -}; - -extern const struct e1000_info e1000_82575_info; - -struct e1000_mac_info { - struct e1000_mac_operations ops; - - u8 addr[6]; - u8 perm_addr[6]; - - enum e1000_mac_type type; - - u32 ledctl_default; - u32 ledctl_mode1; - u32 ledctl_mode2; - u32 mc_filter_type; - u32 txcw; - - u16 mta_reg_count; - u16 uta_reg_count; - - /* Maximum size of the MTA register table in all supported adapters */ - #define MAX_MTA_REG 128 - u32 mta_shadow[MAX_MTA_REG]; - u16 rar_entry_count; - - u8 forced_speed_duplex; - - bool adaptive_ifs; - bool arc_subsystem_valid; - bool asf_firmware_present; - bool autoneg; - bool autoneg_failed; - bool disable_hw_init_bits; - bool get_link_status; - bool ifs_params_forced; - bool in_ifs_mode; - bool report_tx_early; - bool serdes_has_link; - bool tx_pkt_filtering; -}; - -struct e1000_phy_info { - struct e1000_phy_operations ops; - - enum e1000_phy_type type; - - enum e1000_1000t_rx_status local_rx; - enum e1000_1000t_rx_status remote_rx; - enum e1000_ms_type ms_type; - enum e1000_ms_type original_ms_type; - enum e1000_rev_polarity cable_polarity; - enum e1000_smart_speed smart_speed; - - u32 addr; - u32 id; - u32 reset_delay_us; /* in usec */ - u32 revision; - - enum e1000_media_type media_type; - - u16 autoneg_advertised; - u16 autoneg_mask; - u16 cable_length; - u16 max_cable_length; - u16 min_cable_length; - - u8 mdix; - - bool disable_polarity_correction; - bool is_mdix; - bool polarity_correction; - bool reset_disable; - bool speed_downgraded; - bool autoneg_wait_to_complete; -}; - -struct e1000_nvm_info { - struct e1000_nvm_operations ops; - enum e1000_nvm_type type; - enum e1000_nvm_override override; - - u32 flash_bank_size; - u32 flash_base_addr; - - u16 word_size; - u16 delay_usec; - u16 address_bits; - u16 opcode_bits; - u16 page_size; -}; - -struct e1000_bus_info { - enum e1000_bus_type type; - enum e1000_bus_speed speed; - enum e1000_bus_width width; - - u32 snoop; - - u16 func; - u16 pci_cmd_word; -}; - -struct e1000_fc_info { - u32 high_water; /* Flow control high-water mark */ - u32 low_water; /* Flow control low-water mark */ - u16 pause_time; /* Flow control pause timer */ - bool send_xon; /* Flow control send XON */ - bool strict_ieee; /* Strict IEEE mode */ - enum e1000_fc_mode current_mode; /* Type of flow control */ - enum e1000_fc_mode requested_mode; -}; - -struct e1000_mbx_operations { - s32 (*init_params)(struct e1000_hw *hw); - s32 (*read)(struct e1000_hw *, u32 *, u16, u16); - s32 (*write)(struct e1000_hw *, u32 *, u16, u16); - s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16); - s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16); - s32 (*check_for_msg)(struct e1000_hw *, u16); - s32 (*check_for_ack)(struct e1000_hw *, u16); - s32 (*check_for_rst)(struct e1000_hw *, u16); -}; - -struct e1000_mbx_stats { - u32 msgs_tx; - u32 msgs_rx; - - u32 acks; - u32 reqs; - u32 rsts; -}; - -struct e1000_mbx_info { - struct e1000_mbx_operations ops; - struct e1000_mbx_stats stats; - u32 timeout; - u32 usec_delay; - u16 size; -}; - -struct e1000_dev_spec_82575 { - bool sgmii_active; - bool global_device_reset; - bool eee_disable; -}; - -struct e1000_hw { - void *back; - - u8 __iomem *hw_addr; - u8 __iomem *flash_address; - unsigned long io_base; - - struct e1000_mac_info mac; - struct e1000_fc_info fc; - struct e1000_phy_info phy; - struct e1000_nvm_info nvm; - struct e1000_bus_info bus; - struct e1000_mbx_info mbx; - struct e1000_host_mng_dhcp_cookie mng_cookie; - - union { - struct e1000_dev_spec_82575 _82575; - } dev_spec; - - u16 device_id; - u16 subsystem_vendor_id; - u16 subsystem_device_id; - u16 vendor_id; - - u8 revision_id; -}; - -extern struct net_device *igb_get_hw_dev(struct e1000_hw *hw); -#define hw_dbg(format, arg...) \ - netdev_dbg(igb_get_hw_dev(hw), format, ##arg) - -/* These functions must be implemented by drivers */ -s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); -s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); -#endif /* _E1000_HW_H_ */ diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c deleted file mode 100644 index 2b5ef76..0000000 --- a/drivers/net/igb/e1000_mac.c +++ /dev/null @@ -1,1421 +0,0 @@ -/******************************************************************************* - - Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include -#include -#include -#include -#include - -#include "e1000_mac.h" - -#include "igb.h" - -static s32 igb_set_default_fc(struct e1000_hw *hw); -static s32 igb_set_fc_watermarks(struct e1000_hw *hw); - -/** - * igb_get_bus_info_pcie - Get PCIe bus information - * @hw: pointer to the HW structure - * - * Determines and stores the system bus information for a particular - * network interface. The following bus information is determined and stored: - * bus speed, bus width, type (PCIe), and PCIe function. - **/ -s32 igb_get_bus_info_pcie(struct e1000_hw *hw) -{ - struct e1000_bus_info *bus = &hw->bus; - s32 ret_val; - u32 reg; - u16 pcie_link_status; - - bus->type = e1000_bus_type_pci_express; - - ret_val = igb_read_pcie_cap_reg(hw, - PCI_EXP_LNKSTA, - &pcie_link_status); - if (ret_val) { - bus->width = e1000_bus_width_unknown; - bus->speed = e1000_bus_speed_unknown; - } else { - switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) { - case PCI_EXP_LNKSTA_CLS_2_5GB: - bus->speed = e1000_bus_speed_2500; - break; - case PCI_EXP_LNKSTA_CLS_5_0GB: - bus->speed = e1000_bus_speed_5000; - break; - default: - bus->speed = e1000_bus_speed_unknown; - break; - } - - bus->width = (enum e1000_bus_width)((pcie_link_status & - PCI_EXP_LNKSTA_NLW) >> - PCI_EXP_LNKSTA_NLW_SHIFT); - } - - reg = rd32(E1000_STATUS); - bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; - - return 0; -} - -/** - * igb_clear_vfta - Clear VLAN filter table - * @hw: pointer to the HW structure - * - * Clears the register array which contains the VLAN filter table by - * setting all the values to 0. - **/ -void igb_clear_vfta(struct e1000_hw *hw) -{ - u32 offset; - - for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { - array_wr32(E1000_VFTA, offset, 0); - wrfl(); - } -} - -/** - * igb_write_vfta - Write value to VLAN filter table - * @hw: pointer to the HW structure - * @offset: register offset in VLAN filter table - * @value: register value written to VLAN filter table - * - * Writes value at the given offset in the register array which stores - * the VLAN filter table. - **/ -static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) -{ - array_wr32(E1000_VFTA, offset, value); - wrfl(); -} - -/** - * igb_init_rx_addrs - Initialize receive address's - * @hw: pointer to the HW structure - * @rar_count: receive address registers - * - * Setups the receive address registers by setting the base receive address - * register to the devices MAC address and clearing all the other receive - * address registers to 0. - **/ -void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) -{ - u32 i; - u8 mac_addr[ETH_ALEN] = {0}; - - /* Setup the receive address */ - hw_dbg("Programming MAC Address into RAR[0]\n"); - - hw->mac.ops.rar_set(hw, hw->mac.addr, 0); - - /* Zero out the other (rar_entry_count - 1) receive addresses */ - hw_dbg("Clearing RAR[1-%u]\n", rar_count-1); - for (i = 1; i < rar_count; i++) - hw->mac.ops.rar_set(hw, mac_addr, i); -} - -/** - * igb_vfta_set - enable or disable vlan in VLAN filter table - * @hw: pointer to the HW structure - * @vid: VLAN id to add or remove - * @add: if true add filter, if false remove - * - * Sets or clears a bit in the VLAN filter table array based on VLAN id - * and if we are adding or removing the filter - **/ -s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add) -{ - u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK; - u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK); - u32 vfta = array_rd32(E1000_VFTA, index); - s32 ret_val = 0; - - /* bit was set/cleared before we started */ - if ((!!(vfta & mask)) == add) { - ret_val = -E1000_ERR_CONFIG; - } else { - if (add) - vfta |= mask; - else - vfta &= ~mask; - } - - igb_write_vfta(hw, index, vfta); - - return ret_val; -} - -/** - * igb_check_alt_mac_addr - Check for alternate MAC addr - * @hw: pointer to the HW structure - * - * Checks the nvm for an alternate MAC address. An alternate MAC address - * can be setup by pre-boot software and must be treated like a permanent - * address and must override the actual permanent MAC address. If an - * alternate MAC address is fopund it is saved in the hw struct and - * prgrammed into RAR0 and the cuntion returns success, otherwise the - * function returns an error. - **/ -s32 igb_check_alt_mac_addr(struct e1000_hw *hw) -{ - u32 i; - s32 ret_val = 0; - u16 offset, nvm_alt_mac_addr_offset, nvm_data; - u8 alt_mac_addr[ETH_ALEN]; - - ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1, - &nvm_alt_mac_addr_offset); - if (ret_val) { - hw_dbg("NVM Read Error\n"); - goto out; - } - - if (nvm_alt_mac_addr_offset == 0xFFFF) { - /* There is no Alternate MAC Address */ - goto out; - } - - if (hw->bus.func == E1000_FUNC_1) - nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; - for (i = 0; i < ETH_ALEN; i += 2) { - offset = nvm_alt_mac_addr_offset + (i >> 1); - ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); - if (ret_val) { - hw_dbg("NVM Read Error\n"); - goto out; - } - - alt_mac_addr[i] = (u8)(nvm_data & 0xFF); - alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); - } - - /* if multicast bit is set, the alternate address will not be used */ - if (is_multicast_ether_addr(alt_mac_addr)) { - hw_dbg("Ignoring Alternate Mac Address with MC bit set\n"); - goto out; - } - - /* - * We have a valid alternate MAC address, and we want to treat it the - * same as the normal permanent MAC address stored by the HW into the - * RAR. Do this by mapping this address into RAR0. - */ - hw->mac.ops.rar_set(hw, alt_mac_addr, 0); - -out: - return ret_val; -} - -/** - * igb_rar_set - Set receive address register - * @hw: pointer to the HW structure - * @addr: pointer to the receive address - * @index: receive address array register - * - * Sets the receive address array register at index to the address passed - * in by addr. - **/ -void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) -{ - u32 rar_low, rar_high; - - /* - * HW expects these in little endian so we reverse the byte order - * from network order (big endian) to little endian - */ - rar_low = ((u32) addr[0] | - ((u32) addr[1] << 8) | - ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); - - rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); - - /* If MAC address zero, no need to set the AV bit */ - if (rar_low || rar_high) - rar_high |= E1000_RAH_AV; - - /* - * Some bridges will combine consecutive 32-bit writes into - * a single burst write, which will malfunction on some parts. - * The flushes avoid this. - */ - wr32(E1000_RAL(index), rar_low); - wrfl(); - wr32(E1000_RAH(index), rar_high); - wrfl(); -} - -/** - * igb_mta_set - Set multicast filter table address - * @hw: pointer to the HW structure - * @hash_value: determines the MTA register and bit to set - * - * The multicast table address is a register array of 32-bit registers. - * The hash_value is used to determine what register the bit is in, the - * current value is read, the new bit is OR'd in and the new value is - * written back into the register. - **/ -void igb_mta_set(struct e1000_hw *hw, u32 hash_value) -{ - u32 hash_bit, hash_reg, mta; - - /* - * The MTA is a register array of 32-bit registers. It is - * treated like an array of (32*mta_reg_count) bits. We want to - * set bit BitArray[hash_value]. So we figure out what register - * the bit is in, read it, OR in the new bit, then write - * back the new value. The (hw->mac.mta_reg_count - 1) serves as a - * mask to bits 31:5 of the hash value which gives us the - * register we're modifying. The hash bit within that register - * is determined by the lower 5 bits of the hash value. - */ - hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); - hash_bit = hash_value & 0x1F; - - mta = array_rd32(E1000_MTA, hash_reg); - - mta |= (1 << hash_bit); - - array_wr32(E1000_MTA, hash_reg, mta); - wrfl(); -} - -/** - * igb_hash_mc_addr - Generate a multicast hash value - * @hw: pointer to the HW structure - * @mc_addr: pointer to a multicast address - * - * Generates a multicast address hash value which is used to determine - * the multicast filter table array address and new table value. See - * igb_mta_set() - **/ -static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) -{ - u32 hash_value, hash_mask; - u8 bit_shift = 0; - - /* Register count multiplied by bits per register */ - hash_mask = (hw->mac.mta_reg_count * 32) - 1; - - /* - * For a mc_filter_type of 0, bit_shift is the number of left-shifts - * where 0xFF would still fall within the hash mask. - */ - while (hash_mask >> bit_shift != 0xFF) - bit_shift++; - - /* - * The portion of the address that is used for the hash table - * is determined by the mc_filter_type setting. - * The algorithm is such that there is a total of 8 bits of shifting. - * The bit_shift for a mc_filter_type of 0 represents the number of - * left-shifts where the MSB of mc_addr[5] would still fall within - * the hash_mask. Case 0 does this exactly. Since there are a total - * of 8 bits of shifting, then mc_addr[4] will shift right the - * remaining number of bits. Thus 8 - bit_shift. The rest of the - * cases are a variation of this algorithm...essentially raising the - * number of bits to shift mc_addr[5] left, while still keeping the - * 8-bit shifting total. - * - * For example, given the following Destination MAC Address and an - * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), - * we can see that the bit_shift for case 0 is 4. These are the hash - * values resulting from each mc_filter_type... - * [0] [1] [2] [3] [4] [5] - * 01 AA 00 12 34 56 - * LSB MSB - * - * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 - * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 - * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 - * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 - */ - switch (hw->mac.mc_filter_type) { - default: - case 0: - break; - case 1: - bit_shift += 1; - break; - case 2: - bit_shift += 2; - break; - case 3: - bit_shift += 4; - break; - } - - hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | - (((u16) mc_addr[5]) << bit_shift))); - - return hash_value; -} - -/** - * igb_update_mc_addr_list - Update Multicast addresses - * @hw: pointer to the HW structure - * @mc_addr_list: array of multicast addresses to program - * @mc_addr_count: number of multicast addresses to program - * - * Updates entire Multicast Table Array. - * The caller must have a packed mc_addr_list of multicast addresses. - **/ -void igb_update_mc_addr_list(struct e1000_hw *hw, - u8 *mc_addr_list, u32 mc_addr_count) -{ - u32 hash_value, hash_bit, hash_reg; - int i; - - /* clear mta_shadow */ - memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); - - /* update mta_shadow from mc_addr_list */ - for (i = 0; (u32) i < mc_addr_count; i++) { - hash_value = igb_hash_mc_addr(hw, mc_addr_list); - - hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); - hash_bit = hash_value & 0x1F; - - hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); - mc_addr_list += (ETH_ALEN); - } - - /* replace the entire MTA table */ - for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) - array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]); - wrfl(); -} - -/** - * igb_clear_hw_cntrs_base - Clear base hardware counters - * @hw: pointer to the HW structure - * - * Clears the base hardware counters by reading the counter registers. - **/ -void igb_clear_hw_cntrs_base(struct e1000_hw *hw) -{ - rd32(E1000_CRCERRS); - rd32(E1000_SYMERRS); - rd32(E1000_MPC); - rd32(E1000_SCC); - rd32(E1000_ECOL); - rd32(E1000_MCC); - rd32(E1000_LATECOL); - rd32(E1000_COLC); - rd32(E1000_DC); - rd32(E1000_SEC); - rd32(E1000_RLEC); - rd32(E1000_XONRXC); - rd32(E1000_XONTXC); - rd32(E1000_XOFFRXC); - rd32(E1000_XOFFTXC); - rd32(E1000_FCRUC); - rd32(E1000_GPRC); - rd32(E1000_BPRC); - rd32(E1000_MPRC); - rd32(E1000_GPTC); - rd32(E1000_GORCL); - rd32(E1000_GORCH); - rd32(E1000_GOTCL); - rd32(E1000_GOTCH); - rd32(E1000_RNBC); - rd32(E1000_RUC); - rd32(E1000_RFC); - rd32(E1000_ROC); - rd32(E1000_RJC); - rd32(E1000_TORL); - rd32(E1000_TORH); - rd32(E1000_TOTL); - rd32(E1000_TOTH); - rd32(E1000_TPR); - rd32(E1000_TPT); - rd32(E1000_MPTC); - rd32(E1000_BPTC); -} - -/** - * igb_check_for_copper_link - Check for link (Copper) - * @hw: pointer to the HW structure - * - * Checks to see of the link status of the hardware has changed. If a - * change in link status has been detected, then we read the PHY registers - * to get the current speed/duplex if link exists. - **/ -s32 igb_check_for_copper_link(struct e1000_hw *hw) -{ - struct e1000_mac_info *mac = &hw->mac; - s32 ret_val; - bool link; - - /* - * We only want to go out to the PHY registers to see if Auto-Neg - * has completed and/or if our link status has changed. The - * get_link_status flag is set upon receiving a Link Status - * Change or Rx Sequence Error interrupt. - */ - if (!mac->get_link_status) { - ret_val = 0; - goto out; - } - - /* - * First we want to see if the MII Status Register reports - * link. If so, then we want to get the current speed/duplex - * of the PHY. - */ - ret_val = igb_phy_has_link(hw, 1, 0, &link); - if (ret_val) - goto out; - - if (!link) - goto out; /* No link detected */ - - mac->get_link_status = false; - - /* - * Check if there was DownShift, must be checked - * immediately after link-up - */ - igb_check_downshift(hw); - - /* - * If we are forcing speed/duplex, then we simply return since - * we have already determined whether we have link or not. - */ - if (!mac->autoneg) { - ret_val = -E1000_ERR_CONFIG; - goto out; - } - - /* - * Auto-Neg is enabled. Auto Speed Detection takes care - * of MAC speed/duplex configuration. So we only need to - * configure Collision Distance in the MAC. - */ - igb_config_collision_dist(hw); - - /* - * Configure Flow Control now that Auto-Neg has completed. - * First, we need to restore the desired flow control - * settings because we may have had to re-autoneg with a - * different link partner. - */ - ret_val = igb_config_fc_after_link_up(hw); - if (ret_val) - hw_dbg("Error configuring flow control\n"); - -out: - return ret_val; -} - -/** - * igb_setup_link - Setup flow control and link settings - * @hw: pointer to the HW structure - * - * Determines which flow control settings to use, then configures flow - * control. Calls the appropriate media-specific link configuration - * function. Assuming the adapter has a valid link partner, a valid link - * should be established. Assumes the hardware has previously been reset - * and the transmitter and receiver are not enabled. - **/ -s32 igb_setup_link(struct e1000_hw *hw) -{ - s32 ret_val = 0; - - /* - * In the case of the phy reset being blocked, we already have a link. - * We do not need to set it up again. - */ - if (igb_check_reset_block(hw)) - goto out; - - /* - * If requested flow control is set to default, set flow control - * based on the EEPROM flow control settings. - */ - if (hw->fc.requested_mode == e1000_fc_default) { - ret_val = igb_set_default_fc(hw); - if (ret_val) - goto out; - } - - /* - * We want to save off the original Flow Control configuration just - * in case we get disconnected and then reconnected into a different - * hub or switch with different Flow Control capabilities. - */ - hw->fc.current_mode = hw->fc.requested_mode; - - hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); - - /* Call the necessary media_type subroutine to configure the link. */ - ret_val = hw->mac.ops.setup_physical_interface(hw); - if (ret_val) - goto out; - - /* - * Initialize the flow control address, type, and PAUSE timer - * registers to their default values. This is done even if flow - * control is disabled, because it does not hurt anything to - * initialize these registers. - */ - hw_dbg("Initializing the Flow Control address, type and timer regs\n"); - wr32(E1000_FCT, FLOW_CONTROL_TYPE); - wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); - wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); - - wr32(E1000_FCTTV, hw->fc.pause_time); - - ret_val = igb_set_fc_watermarks(hw); - -out: - return ret_val; -} - -/** - * igb_config_collision_dist - Configure collision distance - * @hw: pointer to the HW structure - * - * Configures the collision distance to the default value and is used - * during link setup. Currently no func pointer exists and all - * implementations are handled in the generic version of this function. - **/ -void igb_config_collision_dist(struct e1000_hw *hw) -{ - u32 tctl; - - tctl = rd32(E1000_TCTL); - - tctl &= ~E1000_TCTL_COLD; - tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; - - wr32(E1000_TCTL, tctl); - wrfl(); -} - -/** - * igb_set_fc_watermarks - Set flow control high/low watermarks - * @hw: pointer to the HW structure - * - * Sets the flow control high/low threshold (watermark) registers. If - * flow control XON frame transmission is enabled, then set XON frame - * tansmission as well. - **/ -static s32 igb_set_fc_watermarks(struct e1000_hw *hw) -{ - s32 ret_val = 0; - u32 fcrtl = 0, fcrth = 0; - - /* - * Set the flow control receive threshold registers. Normally, - * these registers will be set to a default threshold that may be - * adjusted later by the driver's runtime code. However, if the - * ability to transmit pause frames is not enabled, then these - * registers will be set to 0. - */ - if (hw->fc.current_mode & e1000_fc_tx_pause) { - /* - * We need to set up the Receive Threshold high and low water - * marks as well as (optionally) enabling the transmission of - * XON frames. - */ - fcrtl = hw->fc.low_water; - if (hw->fc.send_xon) - fcrtl |= E1000_FCRTL_XONE; - - fcrth = hw->fc.high_water; - } - wr32(E1000_FCRTL, fcrtl); - wr32(E1000_FCRTH, fcrth); - - return ret_val; -} - -/** - * igb_set_default_fc - Set flow control default values - * @hw: pointer to the HW structure - * - * Read the EEPROM for the default values for flow control and store the - * values. - **/ -static s32 igb_set_default_fc(struct e1000_hw *hw) -{ - s32 ret_val = 0; - u16 nvm_data; - - /* - * Read and store word 0x0F of the EEPROM. This word contains bits - * that determine the hardware's default PAUSE (flow control) mode, - * a bit that determines whether the HW defaults to enabling or - * disabling auto-negotiation, and the direction of the - * SW defined pins. If there is no SW over-ride of the flow - * control setting, then the variable hw->fc will - * be initialized based on a value in the EEPROM. - */ - ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); - - if (ret_val) { - hw_dbg("NVM Read Error\n"); - goto out; - } - - if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) - hw->fc.requested_mode = e1000_fc_none; - else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == - NVM_WORD0F_ASM_DIR) - hw->fc.requested_mode = e1000_fc_tx_pause; - else - hw->fc.requested_mode = e1000_fc_full; - -out: - return ret_val; -} - -/** - * igb_force_mac_fc - Force the MAC's flow control settings - * @hw: pointer to the HW structure - * - * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the - * device control register to reflect the adapter settings. TFCE and RFCE - * need to be explicitly set by software when a copper PHY is used because - * autonegotiation is managed by the PHY rather than the MAC. Software must - * also configure these bits when link is forced on a fiber connection. - **/ -s32 igb_force_mac_fc(struct e1000_hw *hw) -{ - u32 ctrl; - s32 ret_val = 0; - - ctrl = rd32(E1000_CTRL); - - /* - * Because we didn't get link via the internal auto-negotiation - * mechanism (we either forced link or we got link via PHY - * auto-neg), we have to manually enable/disable transmit an - * receive flow control. - * - * The "Case" statement below enables/disable flow control - * according to the "hw->fc.current_mode" parameter. - * - * The possible values of the "fc" parameter are: - * 0: Flow control is completely disabled - * 1: Rx flow control is enabled (we can receive pause - * frames but not send pause frames). - * 2: Tx flow control is enabled (we can send pause frames - * frames but we do not receive pause frames). - * 3: Both Rx and TX flow control (symmetric) is enabled. - * other: No other values should be possible at this point. - */ - hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); - - switch (hw->fc.current_mode) { - case e1000_fc_none: - ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); - break; - case e1000_fc_rx_pause: - ctrl &= (~E1000_CTRL_TFCE); - ctrl |= E1000_CTRL_RFCE; - break; - case e1000_fc_tx_pause: - ctrl &= (~E1000_CTRL_RFCE); - ctrl |= E1000_CTRL_TFCE; - break; - case e1000_fc_full: - ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); - break; - default: - hw_dbg("Flow control param set incorrectly\n"); - ret_val = -E1000_ERR_CONFIG; - goto out; - } - - wr32(E1000_CTRL, ctrl); - -out: - return ret_val; -} - -/** - * igb_config_fc_after_link_up - Configures flow control after link - * @hw: pointer to the HW structure - * - * Checks the status of auto-negotiation after link up to ensure that the - * speed and duplex were not forced. If the link needed to be forced, then - * flow control needs to be forced also. If auto-negotiation is enabled - * and did not fail, then we configure flow control based on our link - * partner. - **/ -s32 igb_config_fc_after_link_up(struct e1000_hw *hw) -{ - struct e1000_mac_info *mac = &hw->mac; - s32 ret_val = 0; - u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; - u16 speed, duplex; - - /* - * Check for the case where we have fiber media and auto-neg failed - * so we had to force link. In this case, we need to force the - * configuration of the MAC to match the "fc" parameter. - */ - if (mac->autoneg_failed) { - if (hw->phy.media_type == e1000_media_type_internal_serdes) - ret_val = igb_force_mac_fc(hw); - } else { - if (hw->phy.media_type == e1000_media_type_copper) - ret_val = igb_force_mac_fc(hw); - } - - if (ret_val) { - hw_dbg("Error forcing flow control settings\n"); - goto out; - } - - /* - * Check for the case where we have copper media and auto-neg is - * enabled. In this case, we need to check and see if Auto-Neg - * has completed, and if so, how the PHY and link partner has - * flow control configured. - */ - if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { - /* - * Read the MII Status Register and check to see if AutoNeg - * has completed. We read this twice because this reg has - * some "sticky" (latched) bits. - */ - ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, - &mii_status_reg); - if (ret_val) - goto out; - ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, - &mii_status_reg); - if (ret_val) - goto out; - - if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { - hw_dbg("Copper PHY and Auto Neg " - "has not completed.\n"); - goto out; - } - - /* - * The AutoNeg process has completed, so we now need to - * read both the Auto Negotiation Advertisement - * Register (Address 4) and the Auto_Negotiation Base - * Page Ability Register (Address 5) to determine how - * flow control was negotiated. - */ - ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, - &mii_nway_adv_reg); - if (ret_val) - goto out; - ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, - &mii_nway_lp_ability_reg); - if (ret_val) - goto out; - - /* - * Two bits in the Auto Negotiation Advertisement Register - * (Address 4) and two bits in the Auto Negotiation Base - * Page Ability Register (Address 5) determine flow control - * for both the PHY and the link partner. The following - * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, - * 1999, describes these PAUSE resolution bits and how flow - * control is determined based upon these settings. - * NOTE: DC = Don't Care - * - * LOCAL DEVICE | LINK PARTNER - * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution - *-------|---------|-------|---------|-------------------- - * 0 | 0 | DC | DC | e1000_fc_none - * 0 | 1 | 0 | DC | e1000_fc_none - * 0 | 1 | 1 | 0 | e1000_fc_none - * 0 | 1 | 1 | 1 | e1000_fc_tx_pause - * 1 | 0 | 0 | DC | e1000_fc_none - * 1 | DC | 1 | DC | e1000_fc_full - * 1 | 1 | 0 | 0 | e1000_fc_none - * 1 | 1 | 0 | 1 | e1000_fc_rx_pause - * - * Are both PAUSE bits set to 1? If so, this implies - * Symmetric Flow Control is enabled at both ends. The - * ASM_DIR bits are irrelevant per the spec. - * - * For Symmetric Flow Control: - * - * LOCAL DEVICE | LINK PARTNER - * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result - *-------|---------|-------|---------|-------------------- - * 1 | DC | 1 | DC | E1000_fc_full - * - */ - if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && - (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { - /* - * Now we need to check if the user selected RX ONLY - * of pause frames. In this case, we had to advertise - * FULL flow control because we could not advertise RX - * ONLY. Hence, we must now check to see if we need to - * turn OFF the TRANSMISSION of PAUSE frames. - */ - if (hw->fc.requested_mode == e1000_fc_full) { - hw->fc.current_mode = e1000_fc_full; - hw_dbg("Flow Control = FULL.\r\n"); - } else { - hw->fc.current_mode = e1000_fc_rx_pause; - hw_dbg("Flow Control = " - "RX PAUSE frames only.\r\n"); - } - } - /* - * For receiving PAUSE frames ONLY. - * - * LOCAL DEVICE | LINK PARTNER - * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result - *-------|---------|-------|---------|-------------------- - * 0 | 1 | 1 | 1 | e1000_fc_tx_pause - */ - else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && - (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && - (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && - (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { - hw->fc.current_mode = e1000_fc_tx_pause; - hw_dbg("Flow Control = TX PAUSE frames only.\r\n"); - } - /* - * For transmitting PAUSE frames ONLY. - * - * LOCAL DEVICE | LINK PARTNER - * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result - *-------|---------|-------|---------|-------------------- - * 1 | 1 | 0 | 1 | e1000_fc_rx_pause - */ - else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && - (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && - !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && - (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { - hw->fc.current_mode = e1000_fc_rx_pause; - hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); - } - /* - * Per the IEEE spec, at this point flow control should be - * disabled. However, we want to consider that we could - * be connected to a legacy switch that doesn't advertise - * desired flow control, but can be forced on the link - * partner. So if we advertised no flow control, that is - * what we will resolve to. If we advertised some kind of - * receive capability (Rx Pause Only or Full Flow Control) - * and the link partner advertised none, we will configure - * ourselves to enable Rx Flow Control only. We can do - * this safely for two reasons: If the link partner really - * didn't want flow control enabled, and we enable Rx, no - * harm done since we won't be receiving any PAUSE frames - * anyway. If the intent on the link partner was to have - * flow control enabled, then by us enabling RX only, we - * can at least receive pause frames and process them. - * This is a good idea because in most cases, since we are - * predominantly a server NIC, more times than not we will - * be asked to delay transmission of packets than asking - * our link partner to pause transmission of frames. - */ - else if ((hw->fc.requested_mode == e1000_fc_none || - hw->fc.requested_mode == e1000_fc_tx_pause) || - hw->fc.strict_ieee) { - hw->fc.current_mode = e1000_fc_none; - hw_dbg("Flow Control = NONE.\r\n"); - } else { - hw->fc.current_mode = e1000_fc_rx_pause; - hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); - } - - /* - * Now we need to do one last check... If we auto- - * negotiated to HALF DUPLEX, flow control should not be - * enabled per IEEE 802.3 spec. - */ - ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); - if (ret_val) { - hw_dbg("Error getting link speed and duplex\n"); - goto out; - } - - if (duplex == HALF_DUPLEX) - hw->fc.current_mode = e1000_fc_none; - - /* - * Now we call a subroutine to actually force the MAC - * controller to use the correct flow control settings. - */ - ret_val = igb_force_mac_fc(hw); - if (ret_val) { - hw_dbg("Error forcing flow control settings\n"); - goto out; - } - } - -out: - return ret_val; -} - -/** - * igb_get_speed_and_duplex_copper - Retrieve current speed/duplex - * @hw: pointer to the HW structure - * @speed: stores the current speed - * @duplex: stores the current duplex - * - * Read the status register for the current speed/duplex and store the current - * speed and duplex for copper connections. - **/ -s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, - u16 *duplex) -{ - u32 status; - - status = rd32(E1000_STATUS); - if (status & E1000_STATUS_SPEED_1000) { - *speed = SPEED_1000; - hw_dbg("1000 Mbs, "); - } else if (status & E1000_STATUS_SPEED_100) { - *speed = SPEED_100; - hw_dbg("100 Mbs, "); - } else { - *speed = SPEED_10; - hw_dbg("10 Mbs, "); - } - - if (status & E1000_STATUS_FD) { - *duplex = FULL_DUPLEX; - hw_dbg("Full Duplex\n"); - } else { - *duplex = HALF_DUPLEX; - hw_dbg("Half Duplex\n"); - } - - return 0; -} - -/** - * igb_get_hw_semaphore - Acquire hardware semaphore - * @hw: pointer to the HW structure - * - * Acquire the HW semaphore to access the PHY or NVM - **/ -s32 igb_get_hw_semaphore(struct e1000_hw *hw) -{ - u32 swsm; - s32 ret_val = 0; - s32 timeout = hw->nvm.word_size + 1; - s32 i = 0; - - /* Get the SW semaphore */ - while (i < timeout) { - swsm = rd32(E1000_SWSM); - if (!(swsm & E1000_SWSM_SMBI)) - break; - - udelay(50); - i++; - } - - if (i == timeout) { - hw_dbg("Driver can't access device - SMBI bit is set.\n"); - ret_val = -E1000_ERR_NVM; - goto out; - } - - /* Get the FW semaphore. */ - for (i = 0; i < timeout; i++) { - swsm = rd32(E1000_SWSM); - wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); - - /* Semaphore acquired if bit latched */ - if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) - break; - - udelay(50); - } - - if (i == timeout) { - /* Release semaphores */ - igb_put_hw_semaphore(hw); - hw_dbg("Driver can't access the NVM\n"); - ret_val = -E1000_ERR_NVM; - goto out; - } - -out: - return ret_val; -} - -/** - * igb_put_hw_semaphore - Release hardware semaphore - * @hw: pointer to the HW structure - * - * Release hardware semaphore used to access the PHY or NVM - **/ -void igb_put_hw_semaphore(struct e1000_hw *hw) -{ - u32 swsm; - - swsm = rd32(E1000_SWSM); - - swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); - - wr32(E1000_SWSM, swsm); -} - -/** - * igb_get_auto_rd_done - Check for auto read completion - * @hw: pointer to the HW structure - * - * Check EEPROM for Auto Read done bit. - **/ -s32 igb_get_auto_rd_done(struct e1000_hw *hw) -{ - s32 i = 0; - s32 ret_val = 0; - - - while (i < AUTO_READ_DONE_TIMEOUT) { - if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD) - break; - msleep(1); - i++; - } - - if (i == AUTO_READ_DONE_TIMEOUT) { - hw_dbg("Auto read by HW from NVM has not completed.\n"); - ret_val = -E1000_ERR_RESET; - goto out; - } - -out: - return ret_val; -} - -/** - * igb_valid_led_default - Verify a valid default LED config - * @hw: pointer to the HW structure - * @data: pointer to the NVM (EEPROM) - * - * Read the EEPROM for the current default LED configuration. If the - * LED configuration is not valid, set to a valid LED configuration. - **/ -static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data) -{ - s32 ret_val; - - ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); - if (ret_val) { - hw_dbg("NVM Read Error\n"); - goto out; - } - - if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { - switch(hw->phy.media_type) { - case e1000_media_type_internal_serdes: - *data = ID_LED_DEFAULT_82575_SERDES; - break; - case e1000_media_type_copper: - default: - *data = ID_LED_DEFAULT; - break; - } - } -out: - return ret_val; -} - -/** - * igb_id_led_init - - * @hw: pointer to the HW structure - * - **/ -s32 igb_id_led_init(struct e1000_hw *hw) -{ - struct e1000_mac_info *mac = &hw->mac; - s32 ret_val; - const u32 ledctl_mask = 0x000000FF; - const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; - const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; - u16 data, i, temp; - const u16 led_mask = 0x0F; - - ret_val = igb_valid_led_default(hw, &data); - if (ret_val) - goto out; - - mac->ledctl_default = rd32(E1000_LEDCTL); - mac->ledctl_mode1 = mac->ledctl_default; - mac->ledctl_mode2 = mac->ledctl_default; - - for (i = 0; i < 4; i++) { - temp = (data >> (i << 2)) & led_mask; - switch (temp) { - case ID_LED_ON1_DEF2: - case ID_LED_ON1_ON2: - case ID_LED_ON1_OFF2: - mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); - mac->ledctl_mode1 |= ledctl_on << (i << 3); - break; - case ID_LED_OFF1_DEF2: - case ID_LED_OFF1_ON2: - case ID_LED_OFF1_OFF2: - mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); - mac->ledctl_mode1 |= ledctl_off << (i << 3); - break; - default: - /* Do nothing */ - break; - } - switch (temp) { - case ID_LED_DEF1_ON2: - case ID_LED_ON1_ON2: - case ID_LED_OFF1_ON2: - mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); - mac->ledctl_mode2 |= ledctl_on << (i << 3); - break; - case ID_LED_DEF1_OFF2: - case ID_LED_ON1_OFF2: - case ID_LED_OFF1_OFF2: - mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); - mac->ledctl_mode2 |= ledctl_off << (i << 3); - break; - default: - /* Do nothing */ - break; - } - } - -out: - return ret_val; -} - -/** - * igb_cleanup_led - Set LED config to default operation - * @hw: pointer to the HW structure - * - * Remove the current LED configuration and set the LED configuration - * to the default value, saved from the EEPROM. - **/ -s32 igb_cleanup_led(struct e1000_hw *hw) -{ - wr32(E1000_LEDCTL, hw->mac.ledctl_default); - return 0; -} - -/** - * igb_blink_led - Blink LED - * @hw: pointer to the HW structure - * - * Blink the led's which are set to be on. - **/ -s32 igb_blink_led(struct e1000_hw *hw) -{ - u32 ledctl_blink = 0; - u32 i; - - /* - * set the blink bit for each LED that's "on" (0x0E) - * in ledctl_mode2 - */ - ledctl_blink = hw->mac.ledctl_mode2; - for (i = 0; i < 4; i++) - if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == - E1000_LEDCTL_MODE_LED_ON) - ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << - (i * 8)); - - wr32(E1000_LEDCTL, ledctl_blink); - - return 0; -} - -/** - * igb_led_off - Turn LED off - * @hw: pointer to the HW structure - * - * Turn LED off. - **/ -s32 igb_led_off(struct e1000_hw *hw) -{ - switch (hw->phy.media_type) { - case e1000_media_type_copper: - wr32(E1000_LEDCTL, hw->mac.ledctl_mode1); - break; - default: - break; - } - - return 0; -} - -/** - * igb_disable_pcie_master - Disables PCI-express master access - * @hw: pointer to the HW structure - * - * Returns 0 (0) if successful, else returns -10 - * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not casued - * the master requests to be disabled. - * - * Disables PCI-Express master access and verifies there are no pending - * requests. - **/ -s32 igb_disable_pcie_master(struct e1000_hw *hw) -{ - u32 ctrl; - s32 timeout = MASTER_DISABLE_TIMEOUT; - s32 ret_val = 0; - - if (hw->bus.type != e1000_bus_type_pci_express) - goto out; - - ctrl = rd32(E1000_CTRL); - ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; - wr32(E1000_CTRL, ctrl); - - while (timeout) { - if (!(rd32(E1000_STATUS) & - E1000_STATUS_GIO_MASTER_ENABLE)) - break; - udelay(100); - timeout--; - } - - if (!timeout) { - hw_dbg("Master requests are pending.\n"); - ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING; - goto out; - } - -out: - return ret_val; -} - -/** - * igb_validate_mdi_setting - Verify MDI/MDIx settings - * @hw: pointer to the HW structure - * - * Verify that when not using auto-negotitation that MDI/MDIx is correctly - * set, which is forced to MDI mode only. - **/ -s32 igb_validate_mdi_setting(struct e1000_hw *hw) -{ - s32 ret_val = 0; - - if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) { - hw_dbg("Invalid MDI setting detected\n"); - hw->phy.mdix = 1; - ret_val = -E1000_ERR_CONFIG; - goto out; - } - -out: - return ret_val; -} - -/** - * igb_write_8bit_ctrl_reg - Write a 8bit CTRL register - * @hw: pointer to the HW structure - * @reg: 32bit register offset such as E1000_SCTL - * @offset: register offset to write to - * @data: data to write at register offset - * - * Writes an address/data control type register. There are several of these - * and they all have the format address << 8 | data and bit 31 is polled for - * completion. - **/ -s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, - u32 offset, u8 data) -{ - u32 i, regvalue = 0; - s32 ret_val = 0; - - /* Set up the address and data */ - regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT); - wr32(reg, regvalue); - - /* Poll the ready bit to see if the MDI read completed */ - for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) { - udelay(5); - regvalue = rd32(reg); - if (regvalue & E1000_GEN_CTL_READY) - break; - } - if (!(regvalue & E1000_GEN_CTL_READY)) { - hw_dbg("Reg %08x did not indicate ready\n", reg); - ret_val = -E1000_ERR_PHY; - goto out; - } - -out: - return ret_val; -} - -/** - * igb_enable_mng_pass_thru - Enable processing of ARP's - * @hw: pointer to the HW structure - * - * Verifies the hardware needs to leave interface enabled so that frames can - * be directed to and from the management interface. - **/ -bool igb_enable_mng_pass_thru(struct e1000_hw *hw) -{ - u32 manc; - u32 fwsm, factps; - bool ret_val = false; - - if (!hw->mac.asf_firmware_present) - goto out; - - manc = rd32(E1000_MANC); - - if (!(manc & E1000_MANC_RCV_TCO_EN)) - goto out; - - if (hw->mac.arc_subsystem_valid) { - fwsm = rd32(E1000_FWSM); - factps = rd32(E1000_FACTPS); - - if (!(factps & E1000_FACTPS_MNGCG) && - ((fwsm & E1000_FWSM_MODE_MASK) == - (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { - ret_val = true; - goto out; - } - } else { - if ((manc & E1000_MANC_SMBUS_EN) && - !(manc & E1000_MANC_ASF_EN)) { - ret_val = true; - goto out; - } - } - -out: - return ret_val; -} diff --git a/drivers/net/igb/e1000_mac.h b/drivers/net/igb/e1000_mac.h deleted file mode 100644 index 4927f61..0000000 --- a/drivers/net/igb/e1000_mac.h +++ /dev/null @@ -1,90 +0,0 @@ -/******************************************************************************* - - Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _E1000_MAC_H_ -#define _E1000_MAC_H_ - -#include "e1000_hw.h" - -#include "e1000_phy.h" -#include "e1000_nvm.h" -#include "e1000_defines.h" - -/* - * Functions that should not be called directly from drivers but can be used - * by other files in this 'shared code' - */ -s32 igb_blink_led(struct e1000_hw *hw); -s32 igb_check_for_copper_link(struct e1000_hw *hw); -s32 igb_cleanup_led(struct e1000_hw *hw); -s32 igb_config_fc_after_link_up(struct e1000_hw *hw); -s32 igb_disable_pcie_master(struct e1000_hw *hw); -s32 igb_force_mac_fc(struct e1000_hw *hw); -s32 igb_get_auto_rd_done(struct e1000_hw *hw); -s32 igb_get_bus_info_pcie(struct e1000_hw *hw); -s32 igb_get_hw_semaphore(struct e1000_hw *hw); -s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, - u16 *duplex); -s32 igb_id_led_init(struct e1000_hw *hw); -s32 igb_led_off(struct e1000_hw *hw); -void igb_update_mc_addr_list(struct e1000_hw *hw, - u8 *mc_addr_list, u32 mc_addr_count); -s32 igb_setup_link(struct e1000_hw *hw); -s32 igb_validate_mdi_setting(struct e1000_hw *hw); -s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, - u32 offset, u8 data); - -void igb_clear_hw_cntrs_base(struct e1000_hw *hw); -void igb_clear_vfta(struct e1000_hw *hw); -s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add); -void igb_config_collision_dist(struct e1000_hw *hw); -void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); -void igb_mta_set(struct e1000_hw *hw, u32 hash_value); -void igb_put_hw_semaphore(struct e1000_hw *hw); -void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); -s32 igb_check_alt_mac_addr(struct e1000_hw *hw); - -bool igb_enable_mng_pass_thru(struct e1000_hw *hw); - -enum e1000_mng_mode { - e1000_mng_mode_none = 0, - e1000_mng_mode_asf, - e1000_mng_mode_pt, - e1000_mng_mode_ipmi, - e1000_mng_mode_host_if_only -}; - -#define E1000_FACTPS_MNGCG 0x20000000 - -#define E1000_FWSM_MODE_MASK 0xE -#define E1000_FWSM_MODE_SHIFT 1 - -#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 - -extern void e1000_init_function_pointers_82575(struct e1000_hw *hw); - -#endif diff --git a/drivers/net/igb/e1000_mbx.c b/drivers/net/igb/e1000_mbx.c deleted file mode 100644 index 74f2f11..0000000 --- a/drivers/net/igb/e1000_mbx.c +++ /dev/null @@ -1,446 +0,0 @@ -/******************************************************************************* - - Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include "e1000_mbx.h" - -/** - * igb_read_mbx - Reads a message from the mailbox - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * @mbx_id: id of mailbox to read - * - * returns SUCCESS if it successfuly read message from buffer - **/ -s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) -{ - struct e1000_mbx_info *mbx = &hw->mbx; - s32 ret_val = -E1000_ERR_MBX; - - /* limit read to size of mailbox */ - if (size > mbx->size) - size = mbx->size; - - if (mbx->ops.read) - ret_val = mbx->ops.read(hw, msg, size, mbx_id); - - return ret_val; -} - -/** - * igb_write_mbx - Write a message to the mailbox - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * @mbx_id: id of mailbox to write - * - * returns SUCCESS if it successfully copied message into the buffer - **/ -s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) -{ - struct e1000_mbx_info *mbx = &hw->mbx; - s32 ret_val = 0; - - if (size > mbx->size) - ret_val = -E1000_ERR_MBX; - - else if (mbx->ops.write) - ret_val = mbx->ops.write(hw, msg, size, mbx_id); - - return ret_val; -} - -/** - * igb_check_for_msg - checks to see if someone sent us mail - * @hw: pointer to the HW structure - * @mbx_id: id of mailbox to check - * - * returns SUCCESS if the Status bit was found or else ERR_MBX - **/ -s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id) -{ - struct e1000_mbx_info *mbx = &hw->mbx; - s32 ret_val = -E1000_ERR_MBX; - - if (mbx->ops.check_for_msg) - ret_val = mbx->ops.check_for_msg(hw, mbx_id); - - return ret_val; -} - -/** - * igb_check_for_ack - checks to see if someone sent us ACK - * @hw: pointer to the HW structure - * @mbx_id: id of mailbox to check - * - * returns SUCCESS if the Status bit was found or else ERR_MBX - **/ -s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id) -{ - struct e1000_mbx_info *mbx = &hw->mbx; - s32 ret_val = -E1000_ERR_MBX; - - if (mbx->ops.check_for_ack) - ret_val = mbx->ops.check_for_ack(hw, mbx_id); - - return ret_val; -} - -/** - * igb_check_for_rst - checks to see if other side has reset - * @hw: pointer to the HW structure - * @mbx_id: id of mailbox to check - * - * returns SUCCESS if the Status bit was found or else ERR_MBX - **/ -s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id) -{ - struct e1000_mbx_info *mbx = &hw->mbx; - s32 ret_val = -E1000_ERR_MBX; - - if (mbx->ops.check_for_rst) - ret_val = mbx->ops.check_for_rst(hw, mbx_id); - - return ret_val; -} - -/** - * igb_poll_for_msg - Wait for message notification - * @hw: pointer to the HW structure - * @mbx_id: id of mailbox to write - * - * returns SUCCESS if it successfully received a message notification - **/ -static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id) -{ - struct e1000_mbx_info *mbx = &hw->mbx; - int countdown = mbx->timeout; - - if (!countdown || !mbx->ops.check_for_msg) - goto out; - - while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { - countdown--; - if (!countdown) - break; - udelay(mbx->usec_delay); - } - - /* if we failed, all future posted messages fail until reset */ - if (!countdown) - mbx->timeout = 0; -out: - return countdown ? 0 : -E1000_ERR_MBX; -} - -/** - * igb_poll_for_ack - Wait for message acknowledgement - * @hw: pointer to the HW structure - * @mbx_id: id of mailbox to write - * - * returns SUCCESS if it successfully received a message acknowledgement - **/ -static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id) -{ - struct e1000_mbx_info *mbx = &hw->mbx; - int countdown = mbx->timeout; - - if (!countdown || !mbx->ops.check_for_ack) - goto out; - - while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { - countdown--; - if (!countdown) - break; - udelay(mbx->usec_delay); - } - - /* if we failed, all future posted messages fail until reset */ - if (!countdown) - mbx->timeout = 0; -out: - return countdown ? 0 : -E1000_ERR_MBX; -} - -/** - * igb_read_posted_mbx - Wait for message notification and receive message - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * @mbx_id: id of mailbox to write - * - * returns SUCCESS if it successfully received a message notification and - * copied it into the receive buffer. - **/ -static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) -{ - struct e1000_mbx_info *mbx = &hw->mbx; - s32 ret_val = -E1000_ERR_MBX; - - if (!mbx->ops.read) - goto out; - - ret_val = igb_poll_for_msg(hw, mbx_id); - - if (!ret_val) - ret_val = mbx->ops.read(hw, msg, size, mbx_id); -out: - return ret_val; -} - -/** - * igb_write_posted_mbx - Write a message to the mailbox, wait for ack - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * @mbx_id: id of mailbox to write - * - * returns SUCCESS if it successfully copied message into the buffer and - * received an ack to that message within delay * timeout period - **/ -static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) -{ - struct e1000_mbx_info *mbx = &hw->mbx; - s32 ret_val = -E1000_ERR_MBX; - - /* exit if either we can't write or there isn't a defined timeout */ - if (!mbx->ops.write || !mbx->timeout) - goto out; - - /* send msg */ - ret_val = mbx->ops.write(hw, msg, size, mbx_id); - - /* if msg sent wait until we receive an ack */ - if (!ret_val) - ret_val = igb_poll_for_ack(hw, mbx_id); -out: - return ret_val; -} - -static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask) -{ - u32 mbvficr = rd32(E1000_MBVFICR); - s32 ret_val = -E1000_ERR_MBX; - - if (mbvficr & mask) { - ret_val = 0; - wr32(E1000_MBVFICR, mask); - } - - return ret_val; -} - -/** - * igb_check_for_msg_pf - checks to see if the VF has sent mail - * @hw: pointer to the HW structure - * @vf_number: the VF index - * - * returns SUCCESS if the VF has set the Status bit or else ERR_MBX - **/ -static s32 igb_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number) -{ - s32 ret_val = -E1000_ERR_MBX; - - if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) { - ret_val = 0; - hw->mbx.stats.reqs++; - } - - return ret_val; -} - -/** - * igb_check_for_ack_pf - checks to see if the VF has ACKed - * @hw: pointer to the HW structure - * @vf_number: the VF index - * - * returns SUCCESS if the VF has set the Status bit or else ERR_MBX - **/ -static s32 igb_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number) -{ - s32 ret_val = -E1000_ERR_MBX; - - if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) { - ret_val = 0; - hw->mbx.stats.acks++; - } - - return ret_val; -} - -/** - * igb_check_for_rst_pf - checks to see if the VF has reset - * @hw: pointer to the HW structure - * @vf_number: the VF index - * - * returns SUCCESS if the VF has set the Status bit or else ERR_MBX - **/ -static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) -{ - u32 vflre = rd32(E1000_VFLRE); - s32 ret_val = -E1000_ERR_MBX; - - if (vflre & (1 << vf_number)) { - ret_val = 0; - wr32(E1000_VFLRE, (1 << vf_number)); - hw->mbx.stats.rsts++; - } - - return ret_val; -} - -/** - * igb_obtain_mbx_lock_pf - obtain mailbox lock - * @hw: pointer to the HW structure - * @vf_number: the VF index - * - * return SUCCESS if we obtained the mailbox lock - **/ -static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) -{ - s32 ret_val = -E1000_ERR_MBX; - u32 p2v_mailbox; - - - /* Take ownership of the buffer */ - wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); - - /* reserve mailbox for vf use */ - p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); - if (p2v_mailbox & E1000_P2VMAILBOX_PFU) - ret_val = 0; - - return ret_val; -} - -/** - * igb_write_mbx_pf - Places a message in the mailbox - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * @vf_number: the VF index - * - * returns SUCCESS if it successfully copied message into the buffer - **/ -static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, - u16 vf_number) -{ - s32 ret_val; - u16 i; - - /* lock the mailbox to prevent pf/vf race condition */ - ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); - if (ret_val) - goto out_no_write; - - /* flush msg and acks as we are overwriting the message buffer */ - igb_check_for_msg_pf(hw, vf_number); - igb_check_for_ack_pf(hw, vf_number); - - /* copy the caller specified message to the mailbox memory buffer */ - for (i = 0; i < size; i++) - array_wr32(E1000_VMBMEM(vf_number), i, msg[i]); - - /* Interrupt VF to tell it a message has been sent and release buffer*/ - wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS); - - /* update stats */ - hw->mbx.stats.msgs_tx++; - -out_no_write: - return ret_val; - -} - -/** - * igb_read_mbx_pf - Read a message from the mailbox - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * @vf_number: the VF index - * - * This function copies a message from the mailbox buffer to the caller's - * memory buffer. The presumption is that the caller knows that there was - * a message due to a VF request so no polling for message is needed. - **/ -static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, - u16 vf_number) -{ - s32 ret_val; - u16 i; - - /* lock the mailbox to prevent pf/vf race condition */ - ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); - if (ret_val) - goto out_no_read; - - /* copy the message to the mailbox memory buffer */ - for (i = 0; i < size; i++) - msg[i] = array_rd32(E1000_VMBMEM(vf_number), i); - - /* Acknowledge the message and release buffer */ - wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK); - - /* update stats */ - hw->mbx.stats.msgs_rx++; - -out_no_read: - return ret_val; -} - -/** - * e1000_init_mbx_params_pf - set initial values for pf mailbox - * @hw: pointer to the HW structure - * - * Initializes the hw->mbx struct to correct values for pf mailbox - */ -s32 igb_init_mbx_params_pf(struct e1000_hw *hw) -{ - struct e1000_mbx_info *mbx = &hw->mbx; - - mbx->timeout = 0; - mbx->usec_delay = 0; - - mbx->size = E1000_VFMAILBOX_SIZE; - - mbx->ops.read = igb_read_mbx_pf; - mbx->ops.write = igb_write_mbx_pf; - mbx->ops.read_posted = igb_read_posted_mbx; - mbx->ops.write_posted = igb_write_posted_mbx; - mbx->ops.check_for_msg = igb_check_for_msg_pf; - mbx->ops.check_for_ack = igb_check_for_ack_pf; - mbx->ops.check_for_rst = igb_check_for_rst_pf; - - mbx->stats.msgs_tx = 0; - mbx->stats.msgs_rx = 0; - mbx->stats.reqs = 0; - mbx->stats.acks = 0; - mbx->stats.rsts = 0; - - return 0; -} - diff --git a/drivers/net/igb/e1000_mbx.h b/drivers/net/igb/e1000_mbx.h deleted file mode 100644 index eddb0f8..0000000 --- a/drivers/net/igb/e1000_mbx.h +++ /dev/null @@ -1,77 +0,0 @@ -/******************************************************************************* - - Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _E1000_MBX_H_ -#define _E1000_MBX_H_ - -#include "e1000_hw.h" - -#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */ -#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ -#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ -#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ -#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ - -#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */ -#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ -#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */ -#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ - -#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ - -/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the - * PF. The reverse is true if it is E1000_PF_*. - * Message ACK's are the value or'd with 0xF0000000 - */ -#define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with - * this are the ACK */ -#define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with - * this are the NACK */ -#define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still - clear to send requests */ -#define E1000_VT_MSGINFO_SHIFT 16 -/* bits 23:16 are used for exra info for certain messages */ -#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) - -#define E1000_VF_RESET 0x01 /* VF requests reset */ -#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ -#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ -#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ -#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */ -#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/ -#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT) - -#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ - -s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16); -s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16); -s32 igb_check_for_msg(struct e1000_hw *, u16); -s32 igb_check_for_ack(struct e1000_hw *, u16); -s32 igb_check_for_rst(struct e1000_hw *, u16); -s32 igb_init_mbx_params_pf(struct e1000_hw *); - -#endif /* _E1000_MBX_H_ */ diff --git a/drivers/net/igb/e1000_nvm.c b/drivers/net/igb/e1000_nvm.c deleted file mode 100644 index 4040712..0000000 --- a/drivers/net/igb/e1000_nvm.c +++ /dev/null @@ -1,713 +0,0 @@ -/******************************************************************************* - - Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include -#include - -#include "e1000_mac.h" -#include "e1000_nvm.h" - -/** - * igb_raise_eec_clk - Raise EEPROM clock - * @hw: pointer to the HW structure - * @eecd: pointer to the EEPROM - * - * Enable/Raise the EEPROM clock bit. - **/ -static void igb_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) -{ - *eecd = *eecd | E1000_EECD_SK; - wr32(E1000_EECD, *eecd); - wrfl(); - udelay(hw->nvm.delay_usec); -} - -/** - * igb_lower_eec_clk - Lower EEPROM clock - * @hw: pointer to the HW structure - * @eecd: pointer to the EEPROM - * - * Clear/Lower the EEPROM clock bit. - **/ -static void igb_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) -{ - *eecd = *eecd & ~E1000_EECD_SK; - wr32(E1000_EECD, *eecd); - wrfl(); - udelay(hw->nvm.delay_usec); -} - -/** - * igb_shift_out_eec_bits - Shift data bits our to the EEPROM - * @hw: pointer to the HW structure - * @data: data to send to the EEPROM - * @count: number of bits to shift out - * - * We need to shift 'count' bits out to the EEPROM. So, the value in the - * "data" parameter will be shifted out to the EEPROM one bit at a time. - * In order to do this, "data" must be broken down into bits. - **/ -static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) -{ - struct e1000_nvm_info *nvm = &hw->nvm; - u32 eecd = rd32(E1000_EECD); - u32 mask; - - mask = 0x01 << (count - 1); - if (nvm->type == e1000_nvm_eeprom_spi) - eecd |= E1000_EECD_DO; - - do { - eecd &= ~E1000_EECD_DI; - - if (data & mask) - eecd |= E1000_EECD_DI; - - wr32(E1000_EECD, eecd); - wrfl(); - - udelay(nvm->delay_usec); - - igb_raise_eec_clk(hw, &eecd); - igb_lower_eec_clk(hw, &eecd); - - mask >>= 1; - } while (mask); - - eecd &= ~E1000_EECD_DI; - wr32(E1000_EECD, eecd); -} - -/** - * igb_shift_in_eec_bits - Shift data bits in from the EEPROM - * @hw: pointer to the HW structure - * @count: number of bits to shift in - * - * In order to read a register from the EEPROM, we need to shift 'count' bits - * in from the EEPROM. Bits are "shifted in" by raising the clock input to - * the EEPROM (setting the SK bit), and then reading the value of the data out - * "DO" bit. During this "shifting in" process the data in "DI" bit should - * always be clear. - **/ -static u16 igb_shift_in_eec_bits(struct e1000_hw *hw, u16 count) -{ - u32 eecd; - u32 i; - u16 data; - - eecd = rd32(E1000_EECD); - - eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); - data = 0; - - for (i = 0; i < count; i++) { - data <<= 1; - igb_raise_eec_clk(hw, &eecd); - - eecd = rd32(E1000_EECD); - - eecd &= ~E1000_EECD_DI; - if (eecd & E1000_EECD_DO) - data |= 1; - - igb_lower_eec_clk(hw, &eecd); - } - - return data; -} - -/** - * igb_poll_eerd_eewr_done - Poll for EEPROM read/write completion - * @hw: pointer to the HW structure - * @ee_reg: EEPROM flag for polling - * - * Polls the EEPROM status bit for either read or write completion based - * upon the value of 'ee_reg'. - **/ -static s32 igb_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) -{ - u32 attempts = 100000; - u32 i, reg = 0; - s32 ret_val = -E1000_ERR_NVM; - - for (i = 0; i < attempts; i++) { - if (ee_reg == E1000_NVM_POLL_READ) - reg = rd32(E1000_EERD); - else - reg = rd32(E1000_EEWR); - - if (reg & E1000_NVM_RW_REG_DONE) { - ret_val = 0; - break; - } - - udelay(5); - } - - return ret_val; -} - -/** - * igb_acquire_nvm - Generic request for access to EEPROM - * @hw: pointer to the HW structure - * - * Set the EEPROM access request bit and wait for EEPROM access grant bit. - * Return successful if access grant bit set, else clear the request for - * EEPROM access and return -E1000_ERR_NVM (-1). - **/ -s32 igb_acquire_nvm(struct e1000_hw *hw) -{ - u32 eecd = rd32(E1000_EECD); - s32 timeout = E1000_NVM_GRANT_ATTEMPTS; - s32 ret_val = 0; - - - wr32(E1000_EECD, eecd | E1000_EECD_REQ); - eecd = rd32(E1000_EECD); - - while (timeout) { - if (eecd & E1000_EECD_GNT) - break; - udelay(5); - eecd = rd32(E1000_EECD); - timeout--; - } - - if (!timeout) { - eecd &= ~E1000_EECD_REQ; - wr32(E1000_EECD, eecd); - hw_dbg("Could not acquire NVM grant\n"); - ret_val = -E1000_ERR_NVM; - } - - return ret_val; -} - -/** - * igb_standby_nvm - Return EEPROM to standby state - * @hw: pointer to the HW structure - * - * Return the EEPROM to a standby state. - **/ -static void igb_standby_nvm(struct e1000_hw *hw) -{ - struct e1000_nvm_info *nvm = &hw->nvm; - u32 eecd = rd32(E1000_EECD); - - if (nvm->type == e1000_nvm_eeprom_spi) { - /* Toggle CS to flush commands */ - eecd |= E1000_EECD_CS; - wr32(E1000_EECD, eecd); - wrfl(); - udelay(nvm->delay_usec); - eecd &= ~E1000_EECD_CS; - wr32(E1000_EECD, eecd); - wrfl(); - udelay(nvm->delay_usec); - } -} - -/** - * e1000_stop_nvm - Terminate EEPROM command - * @hw: pointer to the HW structure - * - * Terminates the current command by inverting the EEPROM's chip select pin. - **/ -static void e1000_stop_nvm(struct e1000_hw *hw) -{ - u32 eecd; - - eecd = rd32(E1000_EECD); - if (hw->nvm.type == e1000_nvm_eeprom_spi) { - /* Pull CS high */ - eecd |= E1000_EECD_CS; - igb_lower_eec_clk(hw, &eecd); - } -} - -/** - * igb_release_nvm - Release exclusive access to EEPROM - * @hw: pointer to the HW structure - * - * Stop any current commands to the EEPROM and clear the EEPROM request bit. - **/ -void igb_release_nvm(struct e1000_hw *hw) -{ - u32 eecd; - - e1000_stop_nvm(hw); - - eecd = rd32(E1000_EECD); - eecd &= ~E1000_EECD_REQ; - wr32(E1000_EECD, eecd); -} - -/** - * igb_ready_nvm_eeprom - Prepares EEPROM for read/write - * @hw: pointer to the HW structure - * - * Setups the EEPROM for reading and writing. - **/ -static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw) -{ - struct e1000_nvm_info *nvm = &hw->nvm; - u32 eecd = rd32(E1000_EECD); - s32 ret_val = 0; - u16 timeout = 0; - u8 spi_stat_reg; - - - if (nvm->type == e1000_nvm_eeprom_spi) { - /* Clear SK and CS */ - eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); - wr32(E1000_EECD, eecd); - wrfl(); - udelay(1); - timeout = NVM_MAX_RETRY_SPI; - - /* - * Read "Status Register" repeatedly until the LSB is cleared. - * The EEPROM will signal that the command has been completed - * by clearing bit 0 of the internal status register. If it's - * not cleared within 'timeout', then error out. - */ - while (timeout) { - igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, - hw->nvm.opcode_bits); - spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8); - if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) - break; - - udelay(5); - igb_standby_nvm(hw); - timeout--; - } - - if (!timeout) { - hw_dbg("SPI NVM Status error\n"); - ret_val = -E1000_ERR_NVM; - goto out; - } - } - -out: - return ret_val; -} - -/** - * igb_read_nvm_spi - Read EEPROM's using SPI - * @hw: pointer to the HW structure - * @offset: offset of word in the EEPROM to read - * @words: number of words to read - * @data: word read from the EEPROM - * - * Reads a 16 bit word from the EEPROM. - **/ -s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) -{ - struct e1000_nvm_info *nvm = &hw->nvm; - u32 i = 0; - s32 ret_val; - u16 word_in; - u8 read_opcode = NVM_READ_OPCODE_SPI; - - /* - * A check for invalid values: offset too large, too many words, - * and not enough words. - */ - if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || - (words == 0)) { - hw_dbg("nvm parameter(s) out of bounds\n"); - ret_val = -E1000_ERR_NVM; - goto out; - } - - ret_val = nvm->ops.acquire(hw); - if (ret_val) - goto out; - - ret_val = igb_ready_nvm_eeprom(hw); - if (ret_val) - goto release; - - igb_standby_nvm(hw); - - if ((nvm->address_bits == 8) && (offset >= 128)) - read_opcode |= NVM_A8_OPCODE_SPI; - - /* Send the READ command (opcode + addr) */ - igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); - igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); - - /* - * Read the data. SPI NVMs increment the address with each byte - * read and will roll over if reading beyond the end. This allows - * us to read the whole NVM from any offset - */ - for (i = 0; i < words; i++) { - word_in = igb_shift_in_eec_bits(hw, 16); - data[i] = (word_in >> 8) | (word_in << 8); - } - -release: - nvm->ops.release(hw); - -out: - return ret_val; -} - -/** - * igb_read_nvm_eerd - Reads EEPROM using EERD register - * @hw: pointer to the HW structure - * @offset: offset of word in the EEPROM to read - * @words: number of words to read - * @data: word read from the EEPROM - * - * Reads a 16 bit word from the EEPROM using the EERD register. - **/ -s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) -{ - struct e1000_nvm_info *nvm = &hw->nvm; - u32 i, eerd = 0; - s32 ret_val = 0; - - /* - * A check for invalid values: offset too large, too many words, - * and not enough words. - */ - if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || - (words == 0)) { - hw_dbg("nvm parameter(s) out of bounds\n"); - ret_val = -E1000_ERR_NVM; - goto out; - } - - for (i = 0; i < words; i++) { - eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + - E1000_NVM_RW_REG_START; - - wr32(E1000_EERD, eerd); - ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); - if (ret_val) - break; - - data[i] = (rd32(E1000_EERD) >> - E1000_NVM_RW_REG_DATA); - } - -out: - return ret_val; -} - -/** - * igb_write_nvm_spi - Write to EEPROM using SPI - * @hw: pointer to the HW structure - * @offset: offset within the EEPROM to be written to - * @words: number of words to write - * @data: 16 bit word(s) to be written to the EEPROM - * - * Writes data to EEPROM at offset using SPI interface. - * - * If e1000_update_nvm_checksum is not called after this function , the - * EEPROM will most likley contain an invalid checksum. - **/ -s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) -{ - struct e1000_nvm_info *nvm = &hw->nvm; - s32 ret_val; - u16 widx = 0; - - /* - * A check for invalid values: offset too large, too many words, - * and not enough words. - */ - if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || - (words == 0)) { - hw_dbg("nvm parameter(s) out of bounds\n"); - ret_val = -E1000_ERR_NVM; - goto out; - } - - ret_val = hw->nvm.ops.acquire(hw); - if (ret_val) - goto out; - - msleep(10); - - while (widx < words) { - u8 write_opcode = NVM_WRITE_OPCODE_SPI; - - ret_val = igb_ready_nvm_eeprom(hw); - if (ret_val) - goto release; - - igb_standby_nvm(hw); - - /* Send the WRITE ENABLE command (8 bit opcode) */ - igb_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, - nvm->opcode_bits); - - igb_standby_nvm(hw); - - /* - * Some SPI eeproms use the 8th address bit embedded in the - * opcode - */ - if ((nvm->address_bits == 8) && (offset >= 128)) - write_opcode |= NVM_A8_OPCODE_SPI; - - /* Send the Write command (8-bit opcode + addr) */ - igb_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); - igb_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), - nvm->address_bits); - - /* Loop to allow for up to whole page write of eeprom */ - while (widx < words) { - u16 word_out = data[widx]; - word_out = (word_out >> 8) | (word_out << 8); - igb_shift_out_eec_bits(hw, word_out, 16); - widx++; - - if ((((offset + widx) * 2) % nvm->page_size) == 0) { - igb_standby_nvm(hw); - break; - } - } - } - - msleep(10); -release: - hw->nvm.ops.release(hw); - -out: - return ret_val; -} - -/** - * igb_read_part_string - Read device part number - * @hw: pointer to the HW structure - * @part_num: pointer to device part number - * @part_num_size: size of part number buffer - * - * Reads the product board assembly (PBA) number from the EEPROM and stores - * the value in part_num. - **/ -s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size) -{ - s32 ret_val; - u16 nvm_data; - u16 pointer; - u16 offset; - u16 length; - - if (part_num == NULL) { - hw_dbg("PBA string buffer was null\n"); - ret_val = E1000_ERR_INVALID_ARGUMENT; - goto out; - } - - ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); - if (ret_val) { - hw_dbg("NVM Read Error\n"); - goto out; - } - - ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pointer); - if (ret_val) { - hw_dbg("NVM Read Error\n"); - goto out; - } - - /* - * if nvm_data is not ptr guard the PBA must be in legacy format which - * means pointer is actually our second data word for the PBA number - * and we can decode it into an ascii string - */ - if (nvm_data != NVM_PBA_PTR_GUARD) { - hw_dbg("NVM PBA number is not stored as string\n"); - - /* we will need 11 characters to store the PBA */ - if (part_num_size < 11) { - hw_dbg("PBA string buffer too small\n"); - return E1000_ERR_NO_SPACE; - } - - /* extract hex string from data and pointer */ - part_num[0] = (nvm_data >> 12) & 0xF; - part_num[1] = (nvm_data >> 8) & 0xF; - part_num[2] = (nvm_data >> 4) & 0xF; - part_num[3] = nvm_data & 0xF; - part_num[4] = (pointer >> 12) & 0xF; - part_num[5] = (pointer >> 8) & 0xF; - part_num[6] = '-'; - part_num[7] = 0; - part_num[8] = (pointer >> 4) & 0xF; - part_num[9] = pointer & 0xF; - - /* put a null character on the end of our string */ - part_num[10] = '\0'; - - /* switch all the data but the '-' to hex char */ - for (offset = 0; offset < 10; offset++) { - if (part_num[offset] < 0xA) - part_num[offset] += '0'; - else if (part_num[offset] < 0x10) - part_num[offset] += 'A' - 0xA; - } - - goto out; - } - - ret_val = hw->nvm.ops.read(hw, pointer, 1, &length); - if (ret_val) { - hw_dbg("NVM Read Error\n"); - goto out; - } - - if (length == 0xFFFF || length == 0) { - hw_dbg("NVM PBA number section invalid length\n"); - ret_val = E1000_ERR_NVM_PBA_SECTION; - goto out; - } - /* check if part_num buffer is big enough */ - if (part_num_size < (((u32)length * 2) - 1)) { - hw_dbg("PBA string buffer too small\n"); - ret_val = E1000_ERR_NO_SPACE; - goto out; - } - - /* trim pba length from start of string */ - pointer++; - length--; - - for (offset = 0; offset < length; offset++) { - ret_val = hw->nvm.ops.read(hw, pointer + offset, 1, &nvm_data); - if (ret_val) { - hw_dbg("NVM Read Error\n"); - goto out; - } - part_num[offset * 2] = (u8)(nvm_data >> 8); - part_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); - } - part_num[offset * 2] = '\0'; - -out: - return ret_val; -} - -/** - * igb_read_mac_addr - Read device MAC address - * @hw: pointer to the HW structure - * - * Reads the device MAC address from the EEPROM and stores the value. - * Since devices with two ports use the same EEPROM, we increment the - * last bit in the MAC address for the second port. - **/ -s32 igb_read_mac_addr(struct e1000_hw *hw) -{ - u32 rar_high; - u32 rar_low; - u16 i; - - rar_high = rd32(E1000_RAH(0)); - rar_low = rd32(E1000_RAL(0)); - - for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++) - hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8)); - - for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++) - hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8)); - - for (i = 0; i < ETH_ALEN; i++) - hw->mac.addr[i] = hw->mac.perm_addr[i]; - - return 0; -} - -/** - * igb_validate_nvm_checksum - Validate EEPROM checksum - * @hw: pointer to the HW structure - * - * Calculates the EEPROM checksum by reading/adding each word of the EEPROM - * and then verifies that the sum of the EEPROM is equal to 0xBABA. - **/ -s32 igb_validate_nvm_checksum(struct e1000_hw *hw) -{ - s32 ret_val = 0; - u16 checksum = 0; - u16 i, nvm_data; - - for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { - ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); - if (ret_val) { - hw_dbg("NVM Read Error\n"); - goto out; - } - checksum += nvm_data; - } - - if (checksum != (u16) NVM_SUM) { - hw_dbg("NVM Checksum Invalid\n"); - ret_val = -E1000_ERR_NVM; - goto out; - } - -out: - return ret_val; -} - -/** - * igb_update_nvm_checksum - Update EEPROM checksum - * @hw: pointer to the HW structure - * - * Updates the EEPROM checksum by reading/adding each word of the EEPROM - * up to the checksum. Then calculates the EEPROM checksum and writes the - * value to the EEPROM. - **/ -s32 igb_update_nvm_checksum(struct e1000_hw *hw) -{ - s32 ret_val; - u16 checksum = 0; - u16 i, nvm_data; - - for (i = 0; i < NVM_CHECKSUM_REG; i++) { - ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); - if (ret_val) { - hw_dbg("NVM Read Error while updating checksum.\n"); - goto out; - } - checksum += nvm_data; - } - checksum = (u16) NVM_SUM - checksum; - ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum); - if (ret_val) - hw_dbg("NVM Write Error while updating checksum.\n"); - -out: - return ret_val; -} - diff --git a/drivers/net/igb/e1000_nvm.h b/drivers/net/igb/e1000_nvm.h deleted file mode 100644 index a2a7ca9..0000000 --- a/drivers/net/igb/e1000_nvm.h +++ /dev/null @@ -1,43 +0,0 @@ -/******************************************************************************* - - Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _E1000_NVM_H_ -#define _E1000_NVM_H_ - -s32 igb_acquire_nvm(struct e1000_hw *hw); -void igb_release_nvm(struct e1000_hw *hw); -s32 igb_read_mac_addr(struct e1000_hw *hw); -s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num); -s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, - u32 part_num_size); -s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); -s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); -s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); -s32 igb_validate_nvm_checksum(struct e1000_hw *hw); -s32 igb_update_nvm_checksum(struct e1000_hw *hw); - -#endif diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c deleted file mode 100644 index e662554..0000000 --- a/drivers/net/igb/e1000_phy.c +++ /dev/null @@ -1,2341 +0,0 @@ -/******************************************************************************* - - Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include -#include - -#include "e1000_mac.h" -#include "e1000_phy.h" - -static s32 igb_phy_setup_autoneg(struct e1000_hw *hw); -static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, - u16 *phy_ctrl); -static s32 igb_wait_autoneg(struct e1000_hw *hw); - -/* Cable length tables */ -static const u16 e1000_m88_cable_length_table[] = - { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; -#define M88E1000_CABLE_LENGTH_TABLE_SIZE \ - (sizeof(e1000_m88_cable_length_table) / \ - sizeof(e1000_m88_cable_length_table[0])) - -static const u16 e1000_igp_2_cable_length_table[] = - { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, - 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, - 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, - 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, - 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, - 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, - 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, - 104, 109, 114, 118, 121, 124}; -#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ - (sizeof(e1000_igp_2_cable_length_table) / \ - sizeof(e1000_igp_2_cable_length_table[0])) - -/** - * igb_check_reset_block - Check if PHY reset is blocked - * @hw: pointer to the HW structure - * - * Read the PHY management control register and check whether a PHY reset - * is blocked. If a reset is not blocked return 0, otherwise - * return E1000_BLK_PHY_RESET (12). - **/ -s32 igb_check_reset_block(struct e1000_hw *hw) -{ - u32 manc; - - manc = rd32(E1000_MANC); - - return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? - E1000_BLK_PHY_RESET : 0; -} - -/** - * igb_get_phy_id - Retrieve the PHY ID and revision - * @hw: pointer to the HW structure - * - * Reads the PHY registers and stores the PHY ID and possibly the PHY - * revision in the hardware structure. - **/ -s32 igb_get_phy_id(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val = 0; - u16 phy_id; - - ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); - if (ret_val) - goto out; - - phy->id = (u32)(phy_id << 16); - udelay(20); - ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); - if (ret_val) - goto out; - - phy->id |= (u32)(phy_id & PHY_REVISION_MASK); - phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); - -out: - return ret_val; -} - -/** - * igb_phy_reset_dsp - Reset PHY DSP - * @hw: pointer to the HW structure - * - * Reset the digital signal processor. - **/ -static s32 igb_phy_reset_dsp(struct e1000_hw *hw) -{ - s32 ret_val = 0; - - if (!(hw->phy.ops.write_reg)) - goto out; - - ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); - if (ret_val) - goto out; - - ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0); - -out: - return ret_val; -} - -/** - * igb_read_phy_reg_mdic - Read MDI control register - * @hw: pointer to the HW structure - * @offset: register offset to be read - * @data: pointer to the read data - * - * Reads the MDI control regsiter in the PHY at offset and stores the - * information read to data. - **/ -s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) -{ - struct e1000_phy_info *phy = &hw->phy; - u32 i, mdic = 0; - s32 ret_val = 0; - - if (offset > MAX_PHY_REG_ADDRESS) { - hw_dbg("PHY Address %d is out of range\n", offset); - ret_val = -E1000_ERR_PARAM; - goto out; - } - - /* - * Set up Op-code, Phy Address, and register offset in the MDI - * Control register. The MAC will take care of interfacing with the - * PHY to retrieve the desired data. - */ - mdic = ((offset << E1000_MDIC_REG_SHIFT) | - (phy->addr << E1000_MDIC_PHY_SHIFT) | - (E1000_MDIC_OP_READ)); - - wr32(E1000_MDIC, mdic); - - /* - * Poll the ready bit to see if the MDI read completed - * Increasing the time out as testing showed failures with - * the lower time out - */ - for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { - udelay(50); - mdic = rd32(E1000_MDIC); - if (mdic & E1000_MDIC_READY) - break; - } - if (!(mdic & E1000_MDIC_READY)) { - hw_dbg("MDI Read did not complete\n"); - ret_val = -E1000_ERR_PHY; - goto out; - } - if (mdic & E1000_MDIC_ERROR) { - hw_dbg("MDI Error\n"); - ret_val = -E1000_ERR_PHY; - goto out; - } - *data = (u16) mdic; - -out: - return ret_val; -} - -/** - * igb_write_phy_reg_mdic - Write MDI control register - * @hw: pointer to the HW structure - * @offset: register offset to write to - * @data: data to write to register at offset - * - * Writes data to MDI control register in the PHY at offset. - **/ -s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) -{ - struct e1000_phy_info *phy = &hw->phy; - u32 i, mdic = 0; - s32 ret_val = 0; - - if (offset > MAX_PHY_REG_ADDRESS) { - hw_dbg("PHY Address %d is out of range\n", offset); - ret_val = -E1000_ERR_PARAM; - goto out; - } - - /* - * Set up Op-code, Phy Address, and register offset in the MDI - * Control register. The MAC will take care of interfacing with the - * PHY to retrieve the desired data. - */ - mdic = (((u32)data) | - (offset << E1000_MDIC_REG_SHIFT) | - (phy->addr << E1000_MDIC_PHY_SHIFT) | - (E1000_MDIC_OP_WRITE)); - - wr32(E1000_MDIC, mdic); - - /* - * Poll the ready bit to see if the MDI read completed - * Increasing the time out as testing showed failures with - * the lower time out - */ - for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { - udelay(50); - mdic = rd32(E1000_MDIC); - if (mdic & E1000_MDIC_READY) - break; - } - if (!(mdic & E1000_MDIC_READY)) { - hw_dbg("MDI Write did not complete\n"); - ret_val = -E1000_ERR_PHY; - goto out; - } - if (mdic & E1000_MDIC_ERROR) { - hw_dbg("MDI Error\n"); - ret_val = -E1000_ERR_PHY; - goto out; - } - -out: - return ret_val; -} - -/** - * igb_read_phy_reg_i2c - Read PHY register using i2c - * @hw: pointer to the HW structure - * @offset: register offset to be read - * @data: pointer to the read data - * - * Reads the PHY register at offset using the i2c interface and stores the - * retrieved information in data. - **/ -s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data) -{ - struct e1000_phy_info *phy = &hw->phy; - u32 i, i2ccmd = 0; - - - /* - * Set up Op-code, Phy Address, and register address in the I2CCMD - * register. The MAC will take care of interfacing with the - * PHY to retrieve the desired data. - */ - i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | - (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | - (E1000_I2CCMD_OPCODE_READ)); - - wr32(E1000_I2CCMD, i2ccmd); - - /* Poll the ready bit to see if the I2C read completed */ - for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { - udelay(50); - i2ccmd = rd32(E1000_I2CCMD); - if (i2ccmd & E1000_I2CCMD_READY) - break; - } - if (!(i2ccmd & E1000_I2CCMD_READY)) { - hw_dbg("I2CCMD Read did not complete\n"); - return -E1000_ERR_PHY; - } - if (i2ccmd & E1000_I2CCMD_ERROR) { - hw_dbg("I2CCMD Error bit set\n"); - return -E1000_ERR_PHY; - } - - /* Need to byte-swap the 16-bit value. */ - *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00); - - return 0; -} - -/** - * igb_write_phy_reg_i2c - Write PHY register using i2c - * @hw: pointer to the HW structure - * @offset: register offset to write to - * @data: data to write at register offset - * - * Writes the data to PHY register at the offset using the i2c interface. - **/ -s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) -{ - struct e1000_phy_info *phy = &hw->phy; - u32 i, i2ccmd = 0; - u16 phy_data_swapped; - - - /* Swap the data bytes for the I2C interface */ - phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00); - - /* - * Set up Op-code, Phy Address, and register address in the I2CCMD - * register. The MAC will take care of interfacing with the - * PHY to retrieve the desired data. - */ - i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | - (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | - E1000_I2CCMD_OPCODE_WRITE | - phy_data_swapped); - - wr32(E1000_I2CCMD, i2ccmd); - - /* Poll the ready bit to see if the I2C read completed */ - for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { - udelay(50); - i2ccmd = rd32(E1000_I2CCMD); - if (i2ccmd & E1000_I2CCMD_READY) - break; - } - if (!(i2ccmd & E1000_I2CCMD_READY)) { - hw_dbg("I2CCMD Write did not complete\n"); - return -E1000_ERR_PHY; - } - if (i2ccmd & E1000_I2CCMD_ERROR) { - hw_dbg("I2CCMD Error bit set\n"); - return -E1000_ERR_PHY; - } - - return 0; -} - -/** - * igb_read_phy_reg_igp - Read igp PHY register - * @hw: pointer to the HW structure - * @offset: register offset to be read - * @data: pointer to the read data - * - * Acquires semaphore, if necessary, then reads the PHY register at offset - * and storing the retrieved information in data. Release any acquired - * semaphores before exiting. - **/ -s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) -{ - s32 ret_val = 0; - - if (!(hw->phy.ops.acquire)) - goto out; - - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - goto out; - - if (offset > MAX_PHY_MULTI_PAGE_REG) { - ret_val = igb_write_phy_reg_mdic(hw, - IGP01E1000_PHY_PAGE_SELECT, - (u16)offset); - if (ret_val) { - hw->phy.ops.release(hw); - goto out; - } - } - - ret_val = igb_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, - data); - - hw->phy.ops.release(hw); - -out: - return ret_val; -} - -/** - * igb_write_phy_reg_igp - Write igp PHY register - * @hw: pointer to the HW structure - * @offset: register offset to write to - * @data: data to write at register offset - * - * Acquires semaphore, if necessary, then writes the data to PHY register - * at the offset. Release any acquired semaphores before exiting. - **/ -s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) -{ - s32 ret_val = 0; - - if (!(hw->phy.ops.acquire)) - goto out; - - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - goto out; - - if (offset > MAX_PHY_MULTI_PAGE_REG) { - ret_val = igb_write_phy_reg_mdic(hw, - IGP01E1000_PHY_PAGE_SELECT, - (u16)offset); - if (ret_val) { - hw->phy.ops.release(hw); - goto out; - } - } - - ret_val = igb_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, - data); - - hw->phy.ops.release(hw); - -out: - return ret_val; -} - -/** - * igb_copper_link_setup_82580 - Setup 82580 PHY for copper link - * @hw: pointer to the HW structure - * - * Sets up Carrier-sense on Transmit and downshift values. - **/ -s32 igb_copper_link_setup_82580(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_data; - - - if (phy->reset_disable) { - ret_val = 0; - goto out; - } - - if (phy->type == e1000_phy_82580) { - ret_val = hw->phy.ops.reset(hw); - if (ret_val) { - hw_dbg("Error resetting the PHY.\n"); - goto out; - } - } - - /* Enable CRS on TX. This must be set for half-duplex operation. */ - ret_val = phy->ops.read_reg(hw, I82580_CFG_REG, &phy_data); - if (ret_val) - goto out; - - phy_data |= I82580_CFG_ASSERT_CRS_ON_TX; - - /* Enable downshift */ - phy_data |= I82580_CFG_ENABLE_DOWNSHIFT; - - ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data); - -out: - return ret_val; -} - -/** - * igb_copper_link_setup_m88 - Setup m88 PHY's for copper link - * @hw: pointer to the HW structure - * - * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock - * and downshift values are set also. - **/ -s32 igb_copper_link_setup_m88(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_data; - - if (phy->reset_disable) { - ret_val = 0; - goto out; - } - - /* Enable CRS on TX. This must be set for half-duplex operation. */ - ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); - if (ret_val) - goto out; - - phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; - - /* - * Options: - * MDI/MDI-X = 0 (default) - * 0 - Auto for all speeds - * 1 - MDI mode - * 2 - MDI-X mode - * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) - */ - phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; - - switch (phy->mdix) { - case 1: - phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; - break; - case 2: - phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; - break; - case 3: - phy_data |= M88E1000_PSCR_AUTO_X_1000T; - break; - case 0: - default: - phy_data |= M88E1000_PSCR_AUTO_X_MODE; - break; - } - - /* - * Options: - * disable_polarity_correction = 0 (default) - * Automatic Correction for Reversed Cable Polarity - * 0 - Disabled - * 1 - Enabled - */ - phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; - if (phy->disable_polarity_correction == 1) - phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; - - ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); - if (ret_val) - goto out; - - if (phy->revision < E1000_REVISION_4) { - /* - * Force TX_CLK in the Extended PHY Specific Control Register - * to 25MHz clock. - */ - ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, - &phy_data); - if (ret_val) - goto out; - - phy_data |= M88E1000_EPSCR_TX_CLK_25; - - if ((phy->revision == E1000_REVISION_2) && - (phy->id == M88E1111_I_PHY_ID)) { - /* 82573L PHY - set the downshift counter to 5x. */ - phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; - phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; - } else { - /* Configure Master and Slave downshift values */ - phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | - M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); - phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | - M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); - } - ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, - phy_data); - if (ret_val) - goto out; - } - - /* Commit the changes. */ - ret_val = igb_phy_sw_reset(hw); - if (ret_val) { - hw_dbg("Error committing the PHY changes\n"); - goto out; - } - -out: - return ret_val; -} - -/** - * igb_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link - * @hw: pointer to the HW structure - * - * Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's. - * Also enables and sets the downshift parameters. - **/ -s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_data; - - if (phy->reset_disable) { - ret_val = 0; - goto out; - } - - /* Enable CRS on Tx. This must be set for half-duplex operation. */ - ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); - if (ret_val) - goto out; - - /* - * Options: - * MDI/MDI-X = 0 (default) - * 0 - Auto for all speeds - * 1 - MDI mode - * 2 - MDI-X mode - * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) - */ - phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; - - switch (phy->mdix) { - case 1: - phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; - break; - case 2: - phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; - break; - case 3: - /* M88E1112 does not support this mode) */ - if (phy->id != M88E1112_E_PHY_ID) { - phy_data |= M88E1000_PSCR_AUTO_X_1000T; - break; - } - case 0: - default: - phy_data |= M88E1000_PSCR_AUTO_X_MODE; - break; - } - - /* - * Options: - * disable_polarity_correction = 0 (default) - * Automatic Correction for Reversed Cable Polarity - * 0 - Disabled - * 1 - Enabled - */ - phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; - if (phy->disable_polarity_correction == 1) - phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; - - /* Enable downshift and setting it to X6 */ - phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK; - phy_data |= I347AT4_PSCR_DOWNSHIFT_6X; - phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE; - - ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); - if (ret_val) - goto out; - - /* Commit the changes. */ - ret_val = igb_phy_sw_reset(hw); - if (ret_val) { - hw_dbg("Error committing the PHY changes\n"); - goto out; - } - -out: - return ret_val; -} - -/** - * igb_copper_link_setup_igp - Setup igp PHY's for copper link - * @hw: pointer to the HW structure - * - * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for - * igp PHY's. - **/ -s32 igb_copper_link_setup_igp(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 data; - - if (phy->reset_disable) { - ret_val = 0; - goto out; - } - - ret_val = phy->ops.reset(hw); - if (ret_val) { - hw_dbg("Error resetting the PHY.\n"); - goto out; - } - - /* - * Wait 100ms for MAC to configure PHY from NVM settings, to avoid - * timeout issues when LFS is enabled. - */ - msleep(100); - - /* - * The NVM settings will configure LPLU in D3 for - * non-IGP1 PHYs. - */ - if (phy->type == e1000_phy_igp) { - /* disable lplu d3 during driver init */ - if (phy->ops.set_d3_lplu_state) - ret_val = phy->ops.set_d3_lplu_state(hw, false); - if (ret_val) { - hw_dbg("Error Disabling LPLU D3\n"); - goto out; - } - } - - /* disable lplu d0 during driver init */ - ret_val = phy->ops.set_d0_lplu_state(hw, false); - if (ret_val) { - hw_dbg("Error Disabling LPLU D0\n"); - goto out; - } - /* Configure mdi-mdix settings */ - ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data); - if (ret_val) - goto out; - - data &= ~IGP01E1000_PSCR_AUTO_MDIX; - - switch (phy->mdix) { - case 1: - data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; - break; - case 2: - data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; - break; - case 0: - default: - data |= IGP01E1000_PSCR_AUTO_MDIX; - break; - } - ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data); - if (ret_val) - goto out; - - /* set auto-master slave resolution settings */ - if (hw->mac.autoneg) { - /* - * when autonegotiation advertisement is only 1000Mbps then we - * should disable SmartSpeed and enable Auto MasterSlave - * resolution as hardware default. - */ - if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { - /* Disable SmartSpeed */ - ret_val = phy->ops.read_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, - &data); - if (ret_val) - goto out; - - data &= ~IGP01E1000_PSCFR_SMART_SPEED; - ret_val = phy->ops.write_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, - data); - if (ret_val) - goto out; - - /* Set auto Master/Slave resolution process */ - ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); - if (ret_val) - goto out; - - data &= ~CR_1000T_MS_ENABLE; - ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); - if (ret_val) - goto out; - } - - ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); - if (ret_val) - goto out; - - /* load defaults for future use */ - phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ? - ((data & CR_1000T_MS_VALUE) ? - e1000_ms_force_master : - e1000_ms_force_slave) : - e1000_ms_auto; - - switch (phy->ms_type) { - case e1000_ms_force_master: - data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); - break; - case e1000_ms_force_slave: - data |= CR_1000T_MS_ENABLE; - data &= ~(CR_1000T_MS_VALUE); - break; - case e1000_ms_auto: - data &= ~CR_1000T_MS_ENABLE; - default: - break; - } - ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); - if (ret_val) - goto out; - } - -out: - return ret_val; -} - -/** - * igb_copper_link_autoneg - Setup/Enable autoneg for copper link - * @hw: pointer to the HW structure - * - * Performs initial bounds checking on autoneg advertisement parameter, then - * configure to advertise the full capability. Setup the PHY to autoneg - * and restart the negotiation process between the link partner. If - * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. - **/ -static s32 igb_copper_link_autoneg(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_ctrl; - - /* - * Perform some bounds checking on the autoneg advertisement - * parameter. - */ - phy->autoneg_advertised &= phy->autoneg_mask; - - /* - * If autoneg_advertised is zero, we assume it was not defaulted - * by the calling code so we set to advertise full capability. - */ - if (phy->autoneg_advertised == 0) - phy->autoneg_advertised = phy->autoneg_mask; - - hw_dbg("Reconfiguring auto-neg advertisement params\n"); - ret_val = igb_phy_setup_autoneg(hw); - if (ret_val) { - hw_dbg("Error Setting up Auto-Negotiation\n"); - goto out; - } - hw_dbg("Restarting Auto-Neg\n"); - - /* - * Restart auto-negotiation by setting the Auto Neg Enable bit and - * the Auto Neg Restart bit in the PHY control register. - */ - ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); - if (ret_val) - goto out; - - phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); - ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); - if (ret_val) - goto out; - - /* - * Does the user want to wait for Auto-Neg to complete here, or - * check at a later time (for example, callback routine). - */ - if (phy->autoneg_wait_to_complete) { - ret_val = igb_wait_autoneg(hw); - if (ret_val) { - hw_dbg("Error while waiting for " - "autoneg to complete\n"); - goto out; - } - } - - hw->mac.get_link_status = true; - -out: - return ret_val; -} - -/** - * igb_phy_setup_autoneg - Configure PHY for auto-negotiation - * @hw: pointer to the HW structure - * - * Reads the MII auto-neg advertisement register and/or the 1000T control - * register and if the PHY is already setup for auto-negotiation, then - * return successful. Otherwise, setup advertisement and flow control to - * the appropriate values for the wanted auto-negotiation. - **/ -static s32 igb_phy_setup_autoneg(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 mii_autoneg_adv_reg; - u16 mii_1000t_ctrl_reg = 0; - - phy->autoneg_advertised &= phy->autoneg_mask; - - /* Read the MII Auto-Neg Advertisement Register (Address 4). */ - ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); - if (ret_val) - goto out; - - if (phy->autoneg_mask & ADVERTISE_1000_FULL) { - /* Read the MII 1000Base-T Control Register (Address 9). */ - ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, - &mii_1000t_ctrl_reg); - if (ret_val) - goto out; - } - - /* - * Need to parse both autoneg_advertised and fc and set up - * the appropriate PHY registers. First we will parse for - * autoneg_advertised software override. Since we can advertise - * a plethora of combinations, we need to check each bit - * individually. - */ - - /* - * First we clear all the 10/100 mb speed bits in the Auto-Neg - * Advertisement Register (Address 4) and the 1000 mb speed bits in - * the 1000Base-T Control Register (Address 9). - */ - mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | - NWAY_AR_100TX_HD_CAPS | - NWAY_AR_10T_FD_CAPS | - NWAY_AR_10T_HD_CAPS); - mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); - - hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); - - /* Do we want to advertise 10 Mb Half Duplex? */ - if (phy->autoneg_advertised & ADVERTISE_10_HALF) { - hw_dbg("Advertise 10mb Half duplex\n"); - mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; - } - - /* Do we want to advertise 10 Mb Full Duplex? */ - if (phy->autoneg_advertised & ADVERTISE_10_FULL) { - hw_dbg("Advertise 10mb Full duplex\n"); - mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; - } - - /* Do we want to advertise 100 Mb Half Duplex? */ - if (phy->autoneg_advertised & ADVERTISE_100_HALF) { - hw_dbg("Advertise 100mb Half duplex\n"); - mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; - } - - /* Do we want to advertise 100 Mb Full Duplex? */ - if (phy->autoneg_advertised & ADVERTISE_100_FULL) { - hw_dbg("Advertise 100mb Full duplex\n"); - mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; - } - - /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ - if (phy->autoneg_advertised & ADVERTISE_1000_HALF) - hw_dbg("Advertise 1000mb Half duplex request denied!\n"); - - /* Do we want to advertise 1000 Mb Full Duplex? */ - if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { - hw_dbg("Advertise 1000mb Full duplex\n"); - mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; - } - - /* - * Check for a software override of the flow control settings, and - * setup the PHY advertisement registers accordingly. If - * auto-negotiation is enabled, then software will have to set the - * "PAUSE" bits to the correct value in the Auto-Negotiation - * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- - * negotiation. - * - * The possible values of the "fc" parameter are: - * 0: Flow control is completely disabled - * 1: Rx flow control is enabled (we can receive pause frames - * but not send pause frames). - * 2: Tx flow control is enabled (we can send pause frames - * but we do not support receiving pause frames). - * 3: Both Rx and TX flow control (symmetric) are enabled. - * other: No software override. The flow control configuration - * in the EEPROM is used. - */ - switch (hw->fc.current_mode) { - case e1000_fc_none: - /* - * Flow control (RX & TX) is completely disabled by a - * software over-ride. - */ - mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); - break; - case e1000_fc_rx_pause: - /* - * RX Flow control is enabled, and TX Flow control is - * disabled, by a software over-ride. - * - * Since there really isn't a way to advertise that we are - * capable of RX Pause ONLY, we will advertise that we - * support both symmetric and asymmetric RX PAUSE. Later - * (in e1000_config_fc_after_link_up) we will disable the - * hw's ability to send PAUSE frames. - */ - mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); - break; - case e1000_fc_tx_pause: - /* - * TX Flow control is enabled, and RX Flow control is - * disabled, by a software over-ride. - */ - mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; - mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; - break; - case e1000_fc_full: - /* - * Flow control (both RX and TX) is enabled by a software - * over-ride. - */ - mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); - break; - default: - hw_dbg("Flow control param set incorrectly\n"); - ret_val = -E1000_ERR_CONFIG; - goto out; - } - - ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); - if (ret_val) - goto out; - - hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); - - if (phy->autoneg_mask & ADVERTISE_1000_FULL) { - ret_val = phy->ops.write_reg(hw, - PHY_1000T_CTRL, - mii_1000t_ctrl_reg); - if (ret_val) - goto out; - } - -out: - return ret_val; -} - -/** - * igb_setup_copper_link - Configure copper link settings - * @hw: pointer to the HW structure - * - * Calls the appropriate function to configure the link for auto-neg or forced - * speed and duplex. Then we check for link, once link is established calls - * to configure collision distance and flow control are called. If link is - * not established, we return -E1000_ERR_PHY (-2). - **/ -s32 igb_setup_copper_link(struct e1000_hw *hw) -{ - s32 ret_val; - bool link; - - - if (hw->mac.autoneg) { - /* - * Setup autoneg and flow control advertisement and perform - * autonegotiation. - */ - ret_val = igb_copper_link_autoneg(hw); - if (ret_val) - goto out; - } else { - /* - * PHY will be set to 10H, 10F, 100H or 100F - * depending on user settings. - */ - hw_dbg("Forcing Speed and Duplex\n"); - ret_val = hw->phy.ops.force_speed_duplex(hw); - if (ret_val) { - hw_dbg("Error Forcing Speed and Duplex\n"); - goto out; - } - } - - /* - * Check link status. Wait up to 100 microseconds for link to become - * valid. - */ - ret_val = igb_phy_has_link(hw, - COPPER_LINK_UP_LIMIT, - 10, - &link); - if (ret_val) - goto out; - - if (link) { - hw_dbg("Valid link established!!!\n"); - igb_config_collision_dist(hw); - ret_val = igb_config_fc_after_link_up(hw); - } else { - hw_dbg("Unable to establish link!!!\n"); - } - -out: - return ret_val; -} - -/** - * igb_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY - * @hw: pointer to the HW structure - * - * Calls the PHY setup function to force speed and duplex. Clears the - * auto-crossover to force MDI manually. Waits for link and returns - * successful if link up is successful, else -E1000_ERR_PHY (-2). - **/ -s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_data; - bool link; - - ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); - if (ret_val) - goto out; - - igb_phy_force_speed_duplex_setup(hw, &phy_data); - - ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); - if (ret_val) - goto out; - - /* - * Clear Auto-Crossover to force MDI manually. IGP requires MDI - * forced whenever speed and duplex are forced. - */ - ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); - if (ret_val) - goto out; - - phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; - phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; - - ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); - if (ret_val) - goto out; - - hw_dbg("IGP PSCR: %X\n", phy_data); - - udelay(1); - - if (phy->autoneg_wait_to_complete) { - hw_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); - - ret_val = igb_phy_has_link(hw, - PHY_FORCE_LIMIT, - 100000, - &link); - if (ret_val) - goto out; - - if (!link) - hw_dbg("Link taking longer than expected.\n"); - - /* Try once more */ - ret_val = igb_phy_has_link(hw, - PHY_FORCE_LIMIT, - 100000, - &link); - if (ret_val) - goto out; - } - -out: - return ret_val; -} - -/** - * igb_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY - * @hw: pointer to the HW structure - * - * Calls the PHY setup function to force speed and duplex. Clears the - * auto-crossover to force MDI manually. Resets the PHY to commit the - * changes. If time expires while waiting for link up, we reset the DSP. - * After reset, TX_CLK and CRS on TX must be set. Return successful upon - * successful completion, else return corresponding error code. - **/ -s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_data; - bool link; - - /* - * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI - * forced whenever speed and duplex are forced. - */ - ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); - if (ret_val) - goto out; - - phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; - ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); - if (ret_val) - goto out; - - hw_dbg("M88E1000 PSCR: %X\n", phy_data); - - ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); - if (ret_val) - goto out; - - igb_phy_force_speed_duplex_setup(hw, &phy_data); - - ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); - if (ret_val) - goto out; - - /* Reset the phy to commit changes. */ - ret_val = igb_phy_sw_reset(hw); - if (ret_val) - goto out; - - if (phy->autoneg_wait_to_complete) { - hw_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); - - ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); - if (ret_val) - goto out; - - if (!link) { - if (hw->phy.type != e1000_phy_m88 || - hw->phy.id == I347AT4_E_PHY_ID || - hw->phy.id == M88E1112_E_PHY_ID) { - hw_dbg("Link taking longer than expected.\n"); - } else { - - /* - * We didn't get link. - * Reset the DSP and cross our fingers. - */ - ret_val = phy->ops.write_reg(hw, - M88E1000_PHY_PAGE_SELECT, - 0x001d); - if (ret_val) - goto out; - ret_val = igb_phy_reset_dsp(hw); - if (ret_val) - goto out; - } - } - - /* Try once more */ - ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, - 100000, &link); - if (ret_val) - goto out; - } - - if (hw->phy.type != e1000_phy_m88 || - hw->phy.id == I347AT4_E_PHY_ID || - hw->phy.id == M88E1112_E_PHY_ID) - goto out; - - ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); - if (ret_val) - goto out; - - /* - * Resetting the phy means we need to re-force TX_CLK in the - * Extended PHY Specific Control Register to 25MHz clock from - * the reset value of 2.5MHz. - */ - phy_data |= M88E1000_EPSCR_TX_CLK_25; - ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); - if (ret_val) - goto out; - - /* - * In addition, we must re-enable CRS on Tx for both half and full - * duplex. - */ - ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); - if (ret_val) - goto out; - - phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; - ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); - -out: - return ret_val; -} - -/** - * igb_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex - * @hw: pointer to the HW structure - * @phy_ctrl: pointer to current value of PHY_CONTROL - * - * Forces speed and duplex on the PHY by doing the following: disable flow - * control, force speed/duplex on the MAC, disable auto speed detection, - * disable auto-negotiation, configure duplex, configure speed, configure - * the collision distance, write configuration to CTRL register. The - * caller must write to the PHY_CONTROL register for these settings to - * take affect. - **/ -static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, - u16 *phy_ctrl) -{ - struct e1000_mac_info *mac = &hw->mac; - u32 ctrl; - - /* Turn off flow control when forcing speed/duplex */ - hw->fc.current_mode = e1000_fc_none; - - /* Force speed/duplex on the mac */ - ctrl = rd32(E1000_CTRL); - ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); - ctrl &= ~E1000_CTRL_SPD_SEL; - - /* Disable Auto Speed Detection */ - ctrl &= ~E1000_CTRL_ASDE; - - /* Disable autoneg on the phy */ - *phy_ctrl &= ~MII_CR_AUTO_NEG_EN; - - /* Forcing Full or Half Duplex? */ - if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { - ctrl &= ~E1000_CTRL_FD; - *phy_ctrl &= ~MII_CR_FULL_DUPLEX; - hw_dbg("Half Duplex\n"); - } else { - ctrl |= E1000_CTRL_FD; - *phy_ctrl |= MII_CR_FULL_DUPLEX; - hw_dbg("Full Duplex\n"); - } - - /* Forcing 10mb or 100mb? */ - if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { - ctrl |= E1000_CTRL_SPD_100; - *phy_ctrl |= MII_CR_SPEED_100; - *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); - hw_dbg("Forcing 100mb\n"); - } else { - ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); - *phy_ctrl |= MII_CR_SPEED_10; - *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); - hw_dbg("Forcing 10mb\n"); - } - - igb_config_collision_dist(hw); - - wr32(E1000_CTRL, ctrl); -} - -/** - * igb_set_d3_lplu_state - Sets low power link up state for D3 - * @hw: pointer to the HW structure - * @active: boolean used to enable/disable lplu - * - * Success returns 0, Failure returns 1 - * - * The low power link up (lplu) state is set to the power management level D3 - * and SmartSpeed is disabled when active is true, else clear lplu for D3 - * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU - * is used during Dx states where the power conservation is most important. - * During driver activity, SmartSpeed should be enabled so performance is - * maintained. - **/ -s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val = 0; - u16 data; - - if (!(hw->phy.ops.read_reg)) - goto out; - - ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); - if (ret_val) - goto out; - - if (!active) { - data &= ~IGP02E1000_PM_D3_LPLU; - ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, - data); - if (ret_val) - goto out; - /* - * LPLU and SmartSpeed are mutually exclusive. LPLU is used - * during Dx states where the power conservation is most - * important. During driver activity we should enable - * SmartSpeed, so performance is maintained. - */ - if (phy->smart_speed == e1000_smart_speed_on) { - ret_val = phy->ops.read_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, - &data); - if (ret_val) - goto out; - - data |= IGP01E1000_PSCFR_SMART_SPEED; - ret_val = phy->ops.write_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, - data); - if (ret_val) - goto out; - } else if (phy->smart_speed == e1000_smart_speed_off) { - ret_val = phy->ops.read_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, - &data); - if (ret_val) - goto out; - - data &= ~IGP01E1000_PSCFR_SMART_SPEED; - ret_val = phy->ops.write_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, - data); - if (ret_val) - goto out; - } - } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || - (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || - (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { - data |= IGP02E1000_PM_D3_LPLU; - ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, - data); - if (ret_val) - goto out; - - /* When LPLU is enabled, we should disable SmartSpeed */ - ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, - &data); - if (ret_val) - goto out; - - data &= ~IGP01E1000_PSCFR_SMART_SPEED; - ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, - data); - } - -out: - return ret_val; -} - -/** - * igb_check_downshift - Checks whether a downshift in speed occurred - * @hw: pointer to the HW structure - * - * Success returns 0, Failure returns 1 - * - * A downshift is detected by querying the PHY link health. - **/ -s32 igb_check_downshift(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_data, offset, mask; - - switch (phy->type) { - case e1000_phy_m88: - case e1000_phy_gg82563: - offset = M88E1000_PHY_SPEC_STATUS; - mask = M88E1000_PSSR_DOWNSHIFT; - break; - case e1000_phy_igp_2: - case e1000_phy_igp: - case e1000_phy_igp_3: - offset = IGP01E1000_PHY_LINK_HEALTH; - mask = IGP01E1000_PLHR_SS_DOWNGRADE; - break; - default: - /* speed downshift not supported */ - phy->speed_downgraded = false; - ret_val = 0; - goto out; - } - - ret_val = phy->ops.read_reg(hw, offset, &phy_data); - - if (!ret_val) - phy->speed_downgraded = (phy_data & mask) ? true : false; - -out: - return ret_val; -} - -/** - * igb_check_polarity_m88 - Checks the polarity. - * @hw: pointer to the HW structure - * - * Success returns 0, Failure returns -E1000_ERR_PHY (-2) - * - * Polarity is determined based on the PHY specific status register. - **/ -static s32 igb_check_polarity_m88(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 data; - - ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data); - - if (!ret_val) - phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) - ? e1000_rev_polarity_reversed - : e1000_rev_polarity_normal; - - return ret_val; -} - -/** - * igb_check_polarity_igp - Checks the polarity. - * @hw: pointer to the HW structure - * - * Success returns 0, Failure returns -E1000_ERR_PHY (-2) - * - * Polarity is determined based on the PHY port status register, and the - * current speed (since there is no polarity at 100Mbps). - **/ -static s32 igb_check_polarity_igp(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 data, offset, mask; - - /* - * Polarity is determined based on the speed of - * our connection. - */ - ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); - if (ret_val) - goto out; - - if ((data & IGP01E1000_PSSR_SPEED_MASK) == - IGP01E1000_PSSR_SPEED_1000MBPS) { - offset = IGP01E1000_PHY_PCS_INIT_REG; - mask = IGP01E1000_PHY_POLARITY_MASK; - } else { - /* - * This really only applies to 10Mbps since - * there is no polarity for 100Mbps (always 0). - */ - offset = IGP01E1000_PHY_PORT_STATUS; - mask = IGP01E1000_PSSR_POLARITY_REVERSED; - } - - ret_val = phy->ops.read_reg(hw, offset, &data); - - if (!ret_val) - phy->cable_polarity = (data & mask) - ? e1000_rev_polarity_reversed - : e1000_rev_polarity_normal; - -out: - return ret_val; -} - -/** - * igb_wait_autoneg - Wait for auto-neg compeletion - * @hw: pointer to the HW structure - * - * Waits for auto-negotiation to complete or for the auto-negotiation time - * limit to expire, which ever happens first. - **/ -static s32 igb_wait_autoneg(struct e1000_hw *hw) -{ - s32 ret_val = 0; - u16 i, phy_status; - - /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ - for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { - ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); - if (ret_val) - break; - ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); - if (ret_val) - break; - if (phy_status & MII_SR_AUTONEG_COMPLETE) - break; - msleep(100); - } - - /* - * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation - * has completed. - */ - return ret_val; -} - -/** - * igb_phy_has_link - Polls PHY for link - * @hw: pointer to the HW structure - * @iterations: number of times to poll for link - * @usec_interval: delay between polling attempts - * @success: pointer to whether polling was successful or not - * - * Polls the PHY status register for link, 'iterations' number of times. - **/ -s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, - u32 usec_interval, bool *success) -{ - s32 ret_val = 0; - u16 i, phy_status; - - for (i = 0; i < iterations; i++) { - /* - * Some PHYs require the PHY_STATUS register to be read - * twice due to the link bit being sticky. No harm doing - * it across the board. - */ - ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); - if (ret_val) { - /* - * If the first read fails, another entity may have - * ownership of the resources, wait and try again to - * see if they have relinquished the resources yet. - */ - udelay(usec_interval); - } - ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); - if (ret_val) - break; - if (phy_status & MII_SR_LINK_STATUS) - break; - if (usec_interval >= 1000) - mdelay(usec_interval/1000); - else - udelay(usec_interval); - } - - *success = (i < iterations) ? true : false; - - return ret_val; -} - -/** - * igb_get_cable_length_m88 - Determine cable length for m88 PHY - * @hw: pointer to the HW structure - * - * Reads the PHY specific status register to retrieve the cable length - * information. The cable length is determined by averaging the minimum and - * maximum values to get the "average" cable length. The m88 PHY has four - * possible cable length values, which are: - * Register Value Cable Length - * 0 < 50 meters - * 1 50 - 80 meters - * 2 80 - 110 meters - * 3 110 - 140 meters - * 4 > 140 meters - **/ -s32 igb_get_cable_length_m88(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_data, index; - - ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); - if (ret_val) - goto out; - - index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> - M88E1000_PSSR_CABLE_LENGTH_SHIFT; - if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { - ret_val = -E1000_ERR_PHY; - goto out; - } - - phy->min_cable_length = e1000_m88_cable_length_table[index]; - phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; - - phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; - -out: - return ret_val; -} - -s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_data, phy_data2, index, default_page, is_cm; - - switch (hw->phy.id) { - case I347AT4_E_PHY_ID: - /* Remember the original page select and set it to 7 */ - ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, - &default_page); - if (ret_val) - goto out; - - ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07); - if (ret_val) - goto out; - - /* Get cable length from PHY Cable Diagnostics Control Reg */ - ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr), - &phy_data); - if (ret_val) - goto out; - - /* Check if the unit of cable length is meters or cm */ - ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2); - if (ret_val) - goto out; - - is_cm = !(phy_data & I347AT4_PCDC_CABLE_LENGTH_UNIT); - - /* Populate the phy structure with cable length in meters */ - phy->min_cable_length = phy_data / (is_cm ? 100 : 1); - phy->max_cable_length = phy_data / (is_cm ? 100 : 1); - phy->cable_length = phy_data / (is_cm ? 100 : 1); - - /* Reset the page selec to its original value */ - ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, - default_page); - if (ret_val) - goto out; - break; - case M88E1112_E_PHY_ID: - /* Remember the original page select and set it to 5 */ - ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, - &default_page); - if (ret_val) - goto out; - - ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05); - if (ret_val) - goto out; - - ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE, - &phy_data); - if (ret_val) - goto out; - - index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> - M88E1000_PSSR_CABLE_LENGTH_SHIFT; - if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { - ret_val = -E1000_ERR_PHY; - goto out; - } - - phy->min_cable_length = e1000_m88_cable_length_table[index]; - phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; - - phy->cable_length = (phy->min_cable_length + - phy->max_cable_length) / 2; - - /* Reset the page select to its original value */ - ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, - default_page); - if (ret_val) - goto out; - - break; - default: - ret_val = -E1000_ERR_PHY; - goto out; - } - -out: - return ret_val; -} - -/** - * igb_get_cable_length_igp_2 - Determine cable length for igp2 PHY - * @hw: pointer to the HW structure - * - * The automatic gain control (agc) normalizes the amplitude of the - * received signal, adjusting for the attenuation produced by the - * cable. By reading the AGC registers, which represent the - * combination of coarse and fine gain value, the value can be put - * into a lookup table to obtain the approximate cable length - * for each channel. - **/ -s32 igb_get_cable_length_igp_2(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val = 0; - u16 phy_data, i, agc_value = 0; - u16 cur_agc_index, max_agc_index = 0; - u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; - static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { - IGP02E1000_PHY_AGC_A, - IGP02E1000_PHY_AGC_B, - IGP02E1000_PHY_AGC_C, - IGP02E1000_PHY_AGC_D - }; - - /* Read the AGC registers for all channels */ - for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { - ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data); - if (ret_val) - goto out; - - /* - * Getting bits 15:9, which represent the combination of - * coarse and fine gain values. The result is a number - * that can be put into the lookup table to obtain the - * approximate cable length. - */ - cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & - IGP02E1000_AGC_LENGTH_MASK; - - /* Array index bound check. */ - if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || - (cur_agc_index == 0)) { - ret_val = -E1000_ERR_PHY; - goto out; - } - - /* Remove min & max AGC values from calculation. */ - if (e1000_igp_2_cable_length_table[min_agc_index] > - e1000_igp_2_cable_length_table[cur_agc_index]) - min_agc_index = cur_agc_index; - if (e1000_igp_2_cable_length_table[max_agc_index] < - e1000_igp_2_cable_length_table[cur_agc_index]) - max_agc_index = cur_agc_index; - - agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; - } - - agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + - e1000_igp_2_cable_length_table[max_agc_index]); - agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); - - /* Calculate cable length with the error range of +/- 10 meters. */ - phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? - (agc_value - IGP02E1000_AGC_RANGE) : 0; - phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; - - phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; - -out: - return ret_val; -} - -/** - * igb_get_phy_info_m88 - Retrieve PHY information - * @hw: pointer to the HW structure - * - * Valid for only copper links. Read the PHY status register (sticky read) - * to verify that link is up. Read the PHY special control register to - * determine the polarity and 10base-T extended distance. Read the PHY - * special status register to determine MDI/MDIx and current speed. If - * speed is 1000, then determine cable length, local and remote receiver. - **/ -s32 igb_get_phy_info_m88(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_data; - bool link; - - if (phy->media_type != e1000_media_type_copper) { - hw_dbg("Phy info is only valid for copper media\n"); - ret_val = -E1000_ERR_CONFIG; - goto out; - } - - ret_val = igb_phy_has_link(hw, 1, 0, &link); - if (ret_val) - goto out; - - if (!link) { - hw_dbg("Phy info is only valid if link is up\n"); - ret_val = -E1000_ERR_CONFIG; - goto out; - } - - ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); - if (ret_val) - goto out; - - phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL) - ? true : false; - - ret_val = igb_check_polarity_m88(hw); - if (ret_val) - goto out; - - ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); - if (ret_val) - goto out; - - phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? true : false; - - if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { - ret_val = phy->ops.get_cable_length(hw); - if (ret_val) - goto out; - - ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data); - if (ret_val) - goto out; - - phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) - ? e1000_1000t_rx_status_ok - : e1000_1000t_rx_status_not_ok; - - phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) - ? e1000_1000t_rx_status_ok - : e1000_1000t_rx_status_not_ok; - } else { - /* Set values to "undefined" */ - phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; - phy->local_rx = e1000_1000t_rx_status_undefined; - phy->remote_rx = e1000_1000t_rx_status_undefined; - } - -out: - return ret_val; -} - -/** - * igb_get_phy_info_igp - Retrieve igp PHY information - * @hw: pointer to the HW structure - * - * Read PHY status to determine if link is up. If link is up, then - * set/determine 10base-T extended distance and polarity correction. Read - * PHY port status to determine MDI/MDIx and speed. Based on the speed, - * determine on the cable length, local and remote receiver. - **/ -s32 igb_get_phy_info_igp(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 data; - bool link; - - ret_val = igb_phy_has_link(hw, 1, 0, &link); - if (ret_val) - goto out; - - if (!link) { - hw_dbg("Phy info is only valid if link is up\n"); - ret_val = -E1000_ERR_CONFIG; - goto out; - } - - phy->polarity_correction = true; - - ret_val = igb_check_polarity_igp(hw); - if (ret_val) - goto out; - - ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); - if (ret_val) - goto out; - - phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? true : false; - - if ((data & IGP01E1000_PSSR_SPEED_MASK) == - IGP01E1000_PSSR_SPEED_1000MBPS) { - ret_val = phy->ops.get_cable_length(hw); - if (ret_val) - goto out; - - ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); - if (ret_val) - goto out; - - phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) - ? e1000_1000t_rx_status_ok - : e1000_1000t_rx_status_not_ok; - - phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) - ? e1000_1000t_rx_status_ok - : e1000_1000t_rx_status_not_ok; - } else { - phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; - phy->local_rx = e1000_1000t_rx_status_undefined; - phy->remote_rx = e1000_1000t_rx_status_undefined; - } - -out: - return ret_val; -} - -/** - * igb_phy_sw_reset - PHY software reset - * @hw: pointer to the HW structure - * - * Does a software reset of the PHY by reading the PHY control register and - * setting/write the control register reset bit to the PHY. - **/ -s32 igb_phy_sw_reset(struct e1000_hw *hw) -{ - s32 ret_val = 0; - u16 phy_ctrl; - - if (!(hw->phy.ops.read_reg)) - goto out; - - ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); - if (ret_val) - goto out; - - phy_ctrl |= MII_CR_RESET; - ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl); - if (ret_val) - goto out; - - udelay(1); - -out: - return ret_val; -} - -/** - * igb_phy_hw_reset - PHY hardware reset - * @hw: pointer to the HW structure - * - * Verify the reset block is not blocking us from resetting. Acquire - * semaphore (if necessary) and read/set/write the device control reset - * bit in the PHY. Wait the appropriate delay time for the device to - * reset and relase the semaphore (if necessary). - **/ -s32 igb_phy_hw_reset(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u32 ctrl; - - ret_val = igb_check_reset_block(hw); - if (ret_val) { - ret_val = 0; - goto out; - } - - ret_val = phy->ops.acquire(hw); - if (ret_val) - goto out; - - ctrl = rd32(E1000_CTRL); - wr32(E1000_CTRL, ctrl | E1000_CTRL_PHY_RST); - wrfl(); - - udelay(phy->reset_delay_us); - - wr32(E1000_CTRL, ctrl); - wrfl(); - - udelay(150); - - phy->ops.release(hw); - - ret_val = phy->ops.get_cfg_done(hw); - -out: - return ret_val; -} - -/** - * igb_phy_init_script_igp3 - Inits the IGP3 PHY - * @hw: pointer to the HW structure - * - * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. - **/ -s32 igb_phy_init_script_igp3(struct e1000_hw *hw) -{ - hw_dbg("Running IGP 3 PHY init script\n"); - - /* PHY init IGP 3 */ - /* Enable rise/fall, 10-mode work in class-A */ - hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018); - /* Remove all caps from Replica path filter */ - hw->phy.ops.write_reg(hw, 0x2F52, 0x0000); - /* Bias trimming for ADC, AFE and Driver (Default) */ - hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24); - /* Increase Hybrid poly bias */ - hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0); - /* Add 4% to TX amplitude in Giga mode */ - hw->phy.ops.write_reg(hw, 0x2010, 0x10B0); - /* Disable trimming (TTT) */ - hw->phy.ops.write_reg(hw, 0x2011, 0x0000); - /* Poly DC correction to 94.6% + 2% for all channels */ - hw->phy.ops.write_reg(hw, 0x20DD, 0x249A); - /* ABS DC correction to 95.9% */ - hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3); - /* BG temp curve trim */ - hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE); - /* Increasing ADC OPAMP stage 1 currents to max */ - hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4); - /* Force 1000 ( required for enabling PHY regs configuration) */ - hw->phy.ops.write_reg(hw, 0x0000, 0x0140); - /* Set upd_freq to 6 */ - hw->phy.ops.write_reg(hw, 0x1F30, 0x1606); - /* Disable NPDFE */ - hw->phy.ops.write_reg(hw, 0x1F31, 0xB814); - /* Disable adaptive fixed FFE (Default) */ - hw->phy.ops.write_reg(hw, 0x1F35, 0x002A); - /* Enable FFE hysteresis */ - hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067); - /* Fixed FFE for short cable lengths */ - hw->phy.ops.write_reg(hw, 0x1F54, 0x0065); - /* Fixed FFE for medium cable lengths */ - hw->phy.ops.write_reg(hw, 0x1F55, 0x002A); - /* Fixed FFE for long cable lengths */ - hw->phy.ops.write_reg(hw, 0x1F56, 0x002A); - /* Enable Adaptive Clip Threshold */ - hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0); - /* AHT reset limit to 1 */ - hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF); - /* Set AHT master delay to 127 msec */ - hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC); - /* Set scan bits for AHT */ - hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF); - /* Set AHT Preset bits */ - hw->phy.ops.write_reg(hw, 0x1F79, 0x0210); - /* Change integ_factor of channel A to 3 */ - hw->phy.ops.write_reg(hw, 0x1895, 0x0003); - /* Change prop_factor of channels BCD to 8 */ - hw->phy.ops.write_reg(hw, 0x1796, 0x0008); - /* Change cg_icount + enable integbp for channels BCD */ - hw->phy.ops.write_reg(hw, 0x1798, 0xD008); - /* - * Change cg_icount + enable integbp + change prop_factor_master - * to 8 for channel A - */ - hw->phy.ops.write_reg(hw, 0x1898, 0xD918); - /* Disable AHT in Slave mode on channel A */ - hw->phy.ops.write_reg(hw, 0x187A, 0x0800); - /* - * Enable LPLU and disable AN to 1000 in non-D0a states, - * Enable SPD+B2B - */ - hw->phy.ops.write_reg(hw, 0x0019, 0x008D); - /* Enable restart AN on an1000_dis change */ - hw->phy.ops.write_reg(hw, 0x001B, 0x2080); - /* Enable wh_fifo read clock in 10/100 modes */ - hw->phy.ops.write_reg(hw, 0x0014, 0x0045); - /* Restart AN, Speed selection is 1000 */ - hw->phy.ops.write_reg(hw, 0x0000, 0x1340); - - return 0; -} - -/** - * igb_power_up_phy_copper - Restore copper link in case of PHY power down - * @hw: pointer to the HW structure - * - * In the case of a PHY power down to save power, or to turn off link during a - * driver unload, restore the link to previous settings. - **/ -void igb_power_up_phy_copper(struct e1000_hw *hw) -{ - u16 mii_reg = 0; - - /* The PHY will retain its settings across a power down/up cycle */ - hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); - mii_reg &= ~MII_CR_POWER_DOWN; - hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); -} - -/** - * igb_power_down_phy_copper - Power down copper PHY - * @hw: pointer to the HW structure - * - * Power down PHY to save power when interface is down and wake on lan - * is not enabled. - **/ -void igb_power_down_phy_copper(struct e1000_hw *hw) -{ - u16 mii_reg = 0; - - /* The PHY will retain its settings across a power down/up cycle */ - hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); - mii_reg |= MII_CR_POWER_DOWN; - hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); - msleep(1); -} - -/** - * igb_check_polarity_82580 - Checks the polarity. - * @hw: pointer to the HW structure - * - * Success returns 0, Failure returns -E1000_ERR_PHY (-2) - * - * Polarity is determined based on the PHY specific status register. - **/ -static s32 igb_check_polarity_82580(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 data; - - - ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data); - - if (!ret_val) - phy->cable_polarity = (data & I82580_PHY_STATUS2_REV_POLARITY) - ? e1000_rev_polarity_reversed - : e1000_rev_polarity_normal; - - return ret_val; -} - -/** - * igb_phy_force_speed_duplex_82580 - Force speed/duplex for I82580 PHY - * @hw: pointer to the HW structure - * - * Calls the PHY setup function to force speed and duplex. Clears the - * auto-crossover to force MDI manually. Waits for link and returns - * successful if link up is successful, else -E1000_ERR_PHY (-2). - **/ -s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_data; - bool link; - - - ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); - if (ret_val) - goto out; - - igb_phy_force_speed_duplex_setup(hw, &phy_data); - - ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); - if (ret_val) - goto out; - - /* - * Clear Auto-Crossover to force MDI manually. 82580 requires MDI - * forced whenever speed and duplex are forced. - */ - ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data); - if (ret_val) - goto out; - - phy_data &= ~I82580_PHY_CTRL2_AUTO_MDIX; - phy_data &= ~I82580_PHY_CTRL2_FORCE_MDI_MDIX; - - ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data); - if (ret_val) - goto out; - - hw_dbg("I82580_PHY_CTRL_2: %X\n", phy_data); - - udelay(1); - - if (phy->autoneg_wait_to_complete) { - hw_dbg("Waiting for forced speed/duplex link on 82580 phy\n"); - - ret_val = igb_phy_has_link(hw, - PHY_FORCE_LIMIT, - 100000, - &link); - if (ret_val) - goto out; - - if (!link) - hw_dbg("Link taking longer than expected.\n"); - - /* Try once more */ - ret_val = igb_phy_has_link(hw, - PHY_FORCE_LIMIT, - 100000, - &link); - if (ret_val) - goto out; - } - -out: - return ret_val; -} - -/** - * igb_get_phy_info_82580 - Retrieve I82580 PHY information - * @hw: pointer to the HW structure - * - * Read PHY status to determine if link is up. If link is up, then - * set/determine 10base-T extended distance and polarity correction. Read - * PHY port status to determine MDI/MDIx and speed. Based on the speed, - * determine on the cable length, local and remote receiver. - **/ -s32 igb_get_phy_info_82580(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 data; - bool link; - - - ret_val = igb_phy_has_link(hw, 1, 0, &link); - if (ret_val) - goto out; - - if (!link) { - hw_dbg("Phy info is only valid if link is up\n"); - ret_val = -E1000_ERR_CONFIG; - goto out; - } - - phy->polarity_correction = true; - - ret_val = igb_check_polarity_82580(hw); - if (ret_val) - goto out; - - ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data); - if (ret_val) - goto out; - - phy->is_mdix = (data & I82580_PHY_STATUS2_MDIX) ? true : false; - - if ((data & I82580_PHY_STATUS2_SPEED_MASK) == - I82580_PHY_STATUS2_SPEED_1000MBPS) { - ret_val = hw->phy.ops.get_cable_length(hw); - if (ret_val) - goto out; - - ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); - if (ret_val) - goto out; - - phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) - ? e1000_1000t_rx_status_ok - : e1000_1000t_rx_status_not_ok; - - phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) - ? e1000_1000t_rx_status_ok - : e1000_1000t_rx_status_not_ok; - } else { - phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; - phy->local_rx = e1000_1000t_rx_status_undefined; - phy->remote_rx = e1000_1000t_rx_status_undefined; - } - -out: - return ret_val; -} - -/** - * igb_get_cable_length_82580 - Determine cable length for 82580 PHY - * @hw: pointer to the HW structure - * - * Reads the diagnostic status register and verifies result is valid before - * placing it in the phy_cable_length field. - **/ -s32 igb_get_cable_length_82580(struct e1000_hw *hw) -{ - struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; - u16 phy_data, length; - - - ret_val = phy->ops.read_reg(hw, I82580_PHY_DIAG_STATUS, &phy_data); - if (ret_val) - goto out; - - length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >> - I82580_DSTATUS_CABLE_LENGTH_SHIFT; - - if (length == E1000_CABLE_LENGTH_UNDEFINED) - ret_val = -E1000_ERR_PHY; - - phy->cable_length = length; - -out: - return ret_val; -} diff --git a/drivers/net/igb/e1000_phy.h b/drivers/net/igb/e1000_phy.h deleted file mode 100644 index 8510797..0000000 --- a/drivers/net/igb/e1000_phy.h +++ /dev/null @@ -1,136 +0,0 @@ -/******************************************************************************* - - Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _E1000_PHY_H_ -#define _E1000_PHY_H_ - -enum e1000_ms_type { - e1000_ms_hw_default = 0, - e1000_ms_force_master, - e1000_ms_force_slave, - e1000_ms_auto -}; - -enum e1000_smart_speed { - e1000_smart_speed_default = 0, - e1000_smart_speed_on, - e1000_smart_speed_off -}; - -s32 igb_check_downshift(struct e1000_hw *hw); -s32 igb_check_reset_block(struct e1000_hw *hw); -s32 igb_copper_link_setup_igp(struct e1000_hw *hw); -s32 igb_copper_link_setup_m88(struct e1000_hw *hw); -s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw); -s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw); -s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw); -s32 igb_get_cable_length_m88(struct e1000_hw *hw); -s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw); -s32 igb_get_cable_length_igp_2(struct e1000_hw *hw); -s32 igb_get_phy_id(struct e1000_hw *hw); -s32 igb_get_phy_info_igp(struct e1000_hw *hw); -s32 igb_get_phy_info_m88(struct e1000_hw *hw); -s32 igb_phy_sw_reset(struct e1000_hw *hw); -s32 igb_phy_hw_reset(struct e1000_hw *hw); -s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); -s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active); -s32 igb_setup_copper_link(struct e1000_hw *hw); -s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); -s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, - u32 usec_interval, bool *success); -void igb_power_up_phy_copper(struct e1000_hw *hw); -void igb_power_down_phy_copper(struct e1000_hw *hw); -s32 igb_phy_init_script_igp3(struct e1000_hw *hw); -s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); -s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); -s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); -s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); -s32 igb_copper_link_setup_82580(struct e1000_hw *hw); -s32 igb_get_phy_info_82580(struct e1000_hw *hw); -s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw); -s32 igb_get_cable_length_82580(struct e1000_hw *hw); - -/* IGP01E1000 Specific Registers */ -#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ -#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ -#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ -#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ -#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ -#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ -#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 -#define IGP01E1000_PHY_POLARITY_MASK 0x0078 -#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 -#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ -#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 - -#define I82580_ADDR_REG 16 -#define I82580_CFG_REG 22 -#define I82580_CFG_ASSERT_CRS_ON_TX (1 << 15) -#define I82580_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */ -#define I82580_CTRL_REG 23 -#define I82580_CTRL_DOWNSHIFT_MASK (7 << 10) - -/* 82580 specific PHY registers */ -#define I82580_PHY_CTRL_2 18 -#define I82580_PHY_LBK_CTRL 19 -#define I82580_PHY_STATUS_2 26 -#define I82580_PHY_DIAG_STATUS 31 - -/* I82580 PHY Status 2 */ -#define I82580_PHY_STATUS2_REV_POLARITY 0x0400 -#define I82580_PHY_STATUS2_MDIX 0x0800 -#define I82580_PHY_STATUS2_SPEED_MASK 0x0300 -#define I82580_PHY_STATUS2_SPEED_1000MBPS 0x0200 -#define I82580_PHY_STATUS2_SPEED_100MBPS 0x0100 - -/* I82580 PHY Control 2 */ -#define I82580_PHY_CTRL2_AUTO_MDIX 0x0400 -#define I82580_PHY_CTRL2_FORCE_MDI_MDIX 0x0200 - -/* I82580 PHY Diagnostics Status */ -#define I82580_DSTATUS_CABLE_LENGTH 0x03FC -#define I82580_DSTATUS_CABLE_LENGTH_SHIFT 2 -/* Enable flexible speed on link-up */ -#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ -#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ -#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 -#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 -#define IGP01E1000_PSSR_MDIX 0x0800 -#define IGP01E1000_PSSR_SPEED_MASK 0xC000 -#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 -#define IGP02E1000_PHY_CHANNEL_NUM 4 -#define IGP02E1000_PHY_AGC_A 0x11B1 -#define IGP02E1000_PHY_AGC_B 0x12B1 -#define IGP02E1000_PHY_AGC_C 0x14B1 -#define IGP02E1000_PHY_AGC_D 0x18B1 -#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */ -#define IGP02E1000_AGC_LENGTH_MASK 0x7F -#define IGP02E1000_AGC_RANGE 15 - -#define E1000_CABLE_LENGTH_UNDEFINED 0xFF - -#endif diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h deleted file mode 100644 index 0990f6d..0000000 --- a/drivers/net/igb/e1000_regs.h +++ /dev/null @@ -1,354 +0,0 @@ -/******************************************************************************* - - Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _E1000_REGS_H_ -#define _E1000_REGS_H_ - -#define E1000_CTRL 0x00000 /* Device Control - RW */ -#define E1000_STATUS 0x00008 /* Device Status - RO */ -#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ -#define E1000_EERD 0x00014 /* EEPROM Read - RW */ -#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ -#define E1000_MDIC 0x00020 /* MDI Control - RW */ -#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */ -#define E1000_SCTL 0x00024 /* SerDes Control - RW */ -#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ -#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ -#define E1000_FCT 0x00030 /* Flow Control Type - RW */ -#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ -#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ -#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ -#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ -#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ -#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ -#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ -#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ -#define E1000_RCTL 0x00100 /* RX Control - RW */ -#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ -#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ -#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ -#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) -#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ -#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ -#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ -#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ -#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ -#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */ -#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ -#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ -#define E1000_TCTL 0x00400 /* TX Control - RW */ -#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ -#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ -#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ -#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ -#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ -#define E1000_PBS 0x01008 /* Packet Buffer Size */ -#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ -#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ -#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ -#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ -#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */ -#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ -#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ -#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ - -/* IEEE 1588 TIMESYNCH */ -#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ -#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ -#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ -#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ -#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ -#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */ -#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */ -#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ -#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ -#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ -#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ -#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ -#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ -#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ - -/* Filtering Registers */ -#define E1000_SAQF(_n) (0x5980 + 4 * (_n)) -#define E1000_DAQF(_n) (0x59A0 + 4 * (_n)) -#define E1000_SPQF(_n) (0x59C0 + 4 * (_n)) -#define E1000_FTQF(_n) (0x59E0 + 4 * (_n)) -#define E1000_SAQF0 E1000_SAQF(0) -#define E1000_DAQF0 E1000_DAQF(0) -#define E1000_SPQF0 E1000_SPQF(0) -#define E1000_FTQF0 E1000_FTQF(0) -#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */ -#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ - -#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) - -/* DMA Coalescing registers */ -#define E1000_DMACR 0x02508 /* Control Register */ -#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */ -#define E1000_DMCTLX 0x02514 /* Time to Lx Request */ -#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ -#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */ -#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */ -#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ - -/* TX Rate Limit Registers */ -#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */ -#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */ - -/* Split and Replication RX Control - RW */ -#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ -/* - * Convenience macros - * - * Note: "_n" is the queue number of the register to be written to. - * - * Example usage: - * E1000_RDBAL_REG(current_rx_queue) - */ -#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) \ - : (0x0C000 + ((_n) * 0x40))) -#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) \ - : (0x0C004 + ((_n) * 0x40))) -#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) \ - : (0x0C008 + ((_n) * 0x40))) -#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) \ - : (0x0C00C + ((_n) * 0x40))) -#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) \ - : (0x0C010 + ((_n) * 0x40))) -#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) \ - : (0x0C018 + ((_n) * 0x40))) -#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) \ - : (0x0C028 + ((_n) * 0x40))) -#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) \ - : (0x0E000 + ((_n) * 0x40))) -#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) \ - : (0x0E004 + ((_n) * 0x40))) -#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) \ - : (0x0E008 + ((_n) * 0x40))) -#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) \ - : (0x0E010 + ((_n) * 0x40))) -#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) \ - : (0x0E018 + ((_n) * 0x40))) -#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \ - : (0x0E028 + ((_n) * 0x40))) -#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8)) -#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8)) -#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \ - : (0x0E038 + ((_n) * 0x40))) -#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \ - : (0x0E03C + ((_n) * 0x40))) -#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ -#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ -#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ -#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */ -#define E1000_DTXCTL 0x03590 /* DMA TX Control - RW */ -#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ -#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ -#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ -#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ -#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ -#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ -#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ -#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ -#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ -#define E1000_COLC 0x04028 /* Collision Count - R/clr */ -#define E1000_DC 0x04030 /* Defer Count - R/clr */ -#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */ -#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ -#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ -#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ -#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */ -#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */ -#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */ -#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */ -#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */ -#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */ -#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */ -#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */ -#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */ -#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */ -#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */ -#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */ -#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */ -#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */ -#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */ -#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */ -#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */ -#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */ -#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */ -#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */ -#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */ -#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */ -#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */ -#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */ -#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */ -#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ -#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */ -#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */ -#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */ -#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */ -#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */ -#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */ -#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */ -#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */ -#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */ -#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */ -#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */ -#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */ -#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */ -#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */ -#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ -#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ -#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ -#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ -/* Interrupt Cause Rx Packet Timer Expire Count */ -#define E1000_ICRXPTC 0x04104 -/* Interrupt Cause Rx Absolute Timer Expire Count */ -#define E1000_ICRXATC 0x04108 -/* Interrupt Cause Tx Packet Timer Expire Count */ -#define E1000_ICTXPTC 0x0410C -/* Interrupt Cause Tx Absolute Timer Expire Count */ -#define E1000_ICTXATC 0x04110 -/* Interrupt Cause Tx Queue Empty Count */ -#define E1000_ICTXQEC 0x04118 -/* Interrupt Cause Tx Queue Minimum Threshold Count */ -#define E1000_ICTXQMTC 0x0411C -/* Interrupt Cause Rx Descriptor Minimum Threshold Count */ -#define E1000_ICRXDMTC 0x04120 -#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ -#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */ -#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */ -#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ -#define E1000_CBTMPC 0x0402C /* Circuit Breaker TX Packet Count */ -#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */ -#define E1000_CBRMPC 0x040FC /* Circuit Breaker RX Packet Count */ -#define E1000_RPTHC 0x04104 /* Rx Packets To Host */ -#define E1000_HGPTC 0x04118 /* Host Good Packets TX Count */ -#define E1000_HTCBDPC 0x04124 /* Host TX Circuit Breaker Dropped Count */ -#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */ -#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */ -#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ -#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ -#define E1000_LENERRS 0x04138 /* Length Errors Count */ -#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ -#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */ -#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ -#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */ -#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Page - RW */ -#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ -#define E1000_RLPML 0x05004 /* RX Long Packet Max Length */ -#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ -#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ -#define E1000_RA 0x05400 /* Receive Address - RW Array */ -#define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */ -#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) -#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ - (0x054E0 + ((_i - 16) * 8))) -#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ - (0x054E4 + ((_i - 16) * 8))) -#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) -#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) -#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) -#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) -#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) -#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) -#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ -#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ -#define E1000_WUC 0x05800 /* Wakeup Control - RW */ -#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ -#define E1000_WUS 0x05810 /* Wakeup Status - RO */ -#define E1000_MANC 0x05820 /* Management Control - RW */ -#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ -#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ - -#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ -#define E1000_CCMCTL 0x05B48 /* CCM Control Register */ -#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */ -#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */ -#define E1000_GCR 0x05B00 /* PCI-Ex Control */ -#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ -#define E1000_SWSM 0x05B50 /* SW Semaphore */ -#define E1000_FWSM 0x05B54 /* FW Semaphore */ -#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */ - -/* RSS registers */ -#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ -#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ -#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate Interrupt Ext*/ -#define E1000_IMIRVP 0x05AC0 /* Immediate Interrupt RX VLAN Priority - RW */ -/* MSI-X Allocation Register (_i) - RW */ -#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) -/* Redirection Table - RW Array */ -#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) -#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */ - -/* VT Registers */ -#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */ -#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */ -#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */ -#define E1000_VFRE 0x00C8C /* VF Receive Enables */ -#define E1000_VFTE 0x00C90 /* VF Transmit Enables */ -#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ -#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ -#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */ -#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ -#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */ -#define E1000_IOVTCL 0x05BBC /* IOV Control Register */ -/* These act per VF so an array friendly macro is used */ -#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) -#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) -#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) -#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine - * Filter - RW */ -#define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) - -#define wr32(reg, value) (writel(value, hw->hw_addr + reg)) -#define rd32(reg) (readl(hw->hw_addr + reg)) -#define wrfl() ((void)rd32(E1000_STATUS)) - -#define array_wr32(reg, offset, value) \ - (writel(value, hw->hw_addr + reg + ((offset) << 2))) -#define array_rd32(reg, offset) \ - (readl(hw->hw_addr + reg + ((offset) << 2))) - -/* DMA Coalescing registers */ -#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ - -/* Energy Efficient Ethernet "EEE" register */ -#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ -#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */ - -/* Thermal Sensor Register */ -#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ - -/* OS2BMC Registers */ -#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */ -#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */ -#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */ -#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */ - -#endif diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h deleted file mode 100644 index 265e151..0000000 --- a/drivers/net/igb/igb.h +++ /dev/null @@ -1,415 +0,0 @@ -/******************************************************************************* - - Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - - -/* Linux PRO/1000 Ethernet Driver main header file */ - -#ifndef _IGB_H_ -#define _IGB_H_ - -#include "e1000_mac.h" -#include "e1000_82575.h" - -#include -#include -#include -#include -#include - -struct igb_adapter; - -/* ((1000000000ns / (6000ints/s * 1024ns)) << 2 = 648 */ -#define IGB_START_ITR 648 - -/* TX/RX descriptor defines */ -#define IGB_DEFAULT_TXD 256 -#define IGB_MIN_TXD 80 -#define IGB_MAX_TXD 4096 - -#define IGB_DEFAULT_RXD 256 -#define IGB_MIN_RXD 80 -#define IGB_MAX_RXD 4096 - -#define IGB_DEFAULT_ITR 3 /* dynamic */ -#define IGB_MAX_ITR_USECS 10000 -#define IGB_MIN_ITR_USECS 10 -#define NON_Q_VECTORS 1 -#define MAX_Q_VECTORS 8 - -/* Transmit and receive queues */ -#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? 2 : \ - (hw->mac.type > e1000_82575 ? 8 : 4)) -#define IGB_ABS_MAX_TX_QUEUES 8 -#define IGB_MAX_TX_QUEUES IGB_MAX_RX_QUEUES - -#define IGB_MAX_VF_MC_ENTRIES 30 -#define IGB_MAX_VF_FUNCTIONS 8 -#define IGB_MAX_VFTA_ENTRIES 128 - -struct vf_data_storage { - unsigned char vf_mac_addresses[ETH_ALEN]; - u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; - u16 num_vf_mc_hashes; - u16 vlans_enabled; - u32 flags; - unsigned long last_nack; - u16 pf_vlan; /* When set, guest VLAN config not allowed. */ - u16 pf_qos; - u16 tx_rate; -}; - -#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */ -#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */ -#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */ -#define IGB_VF_FLAG_PF_SET_MAC 0x00000008 /* PF has set MAC address */ - -/* RX descriptor control thresholds. - * PTHRESH - MAC will consider prefetch if it has fewer than this number of - * descriptors available in its onboard memory. - * Setting this to 0 disables RX descriptor prefetch. - * HTHRESH - MAC will only prefetch if there are at least this many descriptors - * available in host memory. - * If PTHRESH is 0, this should also be 0. - * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back - * descriptors until either it has this many to write back, or the - * ITR timer expires. - */ -#define IGB_RX_PTHRESH 8 -#define IGB_RX_HTHRESH 8 -#define IGB_RX_WTHRESH 1 -#define IGB_TX_PTHRESH 8 -#define IGB_TX_HTHRESH 1 -#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \ - adapter->msix_entries) ? 1 : 16) - -/* this is the size past which hardware will drop packets when setting LPE=0 */ -#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 - -/* Supported Rx Buffer Sizes */ -#define IGB_RXBUFFER_64 64 /* Used for packet split */ -#define IGB_RXBUFFER_128 128 /* Used for packet split */ -#define IGB_RXBUFFER_1024 1024 -#define IGB_RXBUFFER_2048 2048 -#define IGB_RXBUFFER_16384 16384 - -#define MAX_STD_JUMBO_FRAME_SIZE 9234 - -/* How many Tx Descriptors do we need to call netif_wake_queue ? */ -#define IGB_TX_QUEUE_WAKE 16 -/* How many Rx Buffers do we bundle into one write to the hardware ? */ -#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ - -#define AUTO_ALL_MODES 0 -#define IGB_EEPROM_APME 0x0400 - -#ifndef IGB_MASTER_SLAVE -/* Switch to override PHY master/slave setting */ -#define IGB_MASTER_SLAVE e1000_ms_hw_default -#endif - -#define IGB_MNG_VLAN_NONE -1 - -/* wrapper around a pointer to a socket buffer, - * so a DMA handle can be stored along with the buffer */ -struct igb_buffer { - struct sk_buff *skb; - dma_addr_t dma; - union { - /* TX */ - struct { - unsigned long time_stamp; - u16 length; - u16 next_to_watch; - unsigned int bytecount; - u16 gso_segs; - u8 tx_flags; - u8 mapped_as_page; - }; - /* RX */ - struct { - struct page *page; - dma_addr_t page_dma; - u16 page_offset; - }; - }; -}; - -struct igb_tx_queue_stats { - u64 packets; - u64 bytes; - u64 restart_queue; - u64 restart_queue2; -}; - -struct igb_rx_queue_stats { - u64 packets; - u64 bytes; - u64 drops; - u64 csum_err; - u64 alloc_failed; -}; - -struct igb_q_vector { - struct igb_adapter *adapter; /* backlink */ - struct igb_ring *rx_ring; - struct igb_ring *tx_ring; - struct napi_struct napi; - - u32 eims_value; - u16 cpu; - - u16 itr_val; - u8 set_itr; - void __iomem *itr_register; - - char name[IFNAMSIZ + 9]; -}; - -struct igb_ring { - struct igb_q_vector *q_vector; /* backlink to q_vector */ - struct net_device *netdev; /* back pointer to net_device */ - struct device *dev; /* device pointer for dma mapping */ - dma_addr_t dma; /* phys address of the ring */ - void *desc; /* descriptor ring memory */ - unsigned int size; /* length of desc. ring in bytes */ - u16 count; /* number of desc. in the ring */ - u16 next_to_use; - u16 next_to_clean; - u8 queue_index; - u8 reg_idx; - void __iomem *head; - void __iomem *tail; - struct igb_buffer *buffer_info; /* array of buffer info structs */ - - unsigned int total_bytes; - unsigned int total_packets; - - u32 flags; - - union { - /* TX */ - struct { - struct igb_tx_queue_stats tx_stats; - struct u64_stats_sync tx_syncp; - struct u64_stats_sync tx_syncp2; - bool detect_tx_hung; - }; - /* RX */ - struct { - struct igb_rx_queue_stats rx_stats; - struct u64_stats_sync rx_syncp; - u32 rx_buffer_len; - }; - }; -}; - -#define IGB_RING_FLAG_RX_CSUM 0x00000001 /* RX CSUM enabled */ -#define IGB_RING_FLAG_RX_SCTP_CSUM 0x00000002 /* SCTP CSUM offload enabled */ - -#define IGB_RING_FLAG_TX_CTX_IDX 0x00000001 /* HW requires context index */ - -#define IGB_ADVTXD_DCMD (E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS) - -#define E1000_RX_DESC_ADV(R, i) \ - (&(((union e1000_adv_rx_desc *)((R).desc))[i])) -#define E1000_TX_DESC_ADV(R, i) \ - (&(((union e1000_adv_tx_desc *)((R).desc))[i])) -#define E1000_TX_CTXTDESC_ADV(R, i) \ - (&(((struct e1000_adv_tx_context_desc *)((R).desc))[i])) - -/* igb_desc_unused - calculate if we have unused descriptors */ -static inline int igb_desc_unused(struct igb_ring *ring) -{ - if (ring->next_to_clean > ring->next_to_use) - return ring->next_to_clean - ring->next_to_use - 1; - - return ring->count + ring->next_to_clean - ring->next_to_use - 1; -} - -/* board specific private data structure */ -struct igb_adapter { - struct timer_list watchdog_timer; - struct timer_list phy_info_timer; - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; - u16 mng_vlan_id; - u32 bd_number; - u32 wol; - u32 en_mng_pt; - u16 link_speed; - u16 link_duplex; - - /* Interrupt Throttle Rate */ - u32 rx_itr_setting; - u32 tx_itr_setting; - u16 tx_itr; - u16 rx_itr; - - struct work_struct reset_task; - struct work_struct watchdog_task; - bool fc_autoneg; - u8 tx_timeout_factor; - struct timer_list blink_timer; - unsigned long led_status; - - /* TX */ - struct igb_ring *tx_ring[16]; - u32 tx_timeout_count; - - /* RX */ - struct igb_ring *rx_ring[16]; - int num_tx_queues; - int num_rx_queues; - - u32 max_frame_size; - u32 min_frame_size; - - /* OS defined structs */ - struct net_device *netdev; - struct pci_dev *pdev; - struct cyclecounter cycles; - struct timecounter clock; - struct timecompare compare; - struct hwtstamp_config hwtstamp_config; - - spinlock_t stats64_lock; - struct rtnl_link_stats64 stats64; - - /* structs defined in e1000_hw.h */ - struct e1000_hw hw; - struct e1000_hw_stats stats; - struct e1000_phy_info phy_info; - struct e1000_phy_stats phy_stats; - - u32 test_icr; - struct igb_ring test_tx_ring; - struct igb_ring test_rx_ring; - - int msg_enable; - - unsigned int num_q_vectors; - struct igb_q_vector *q_vector[MAX_Q_VECTORS]; - struct msix_entry *msix_entries; - u32 eims_enable_mask; - u32 eims_other; - - /* to not mess up cache alignment, always add to the bottom */ - unsigned long state; - unsigned int flags; - u32 eeprom_wol; - - struct igb_ring *multi_tx_table[IGB_ABS_MAX_TX_QUEUES]; - u16 tx_ring_count; - u16 rx_ring_count; - unsigned int vfs_allocated_count; - struct vf_data_storage *vf_data; - int vf_rate_link_speed; - u32 rss_queues; - u32 wvbr; -}; - -#define IGB_FLAG_HAS_MSI (1 << 0) -#define IGB_FLAG_DCA_ENABLED (1 << 1) -#define IGB_FLAG_QUAD_PORT_A (1 << 2) -#define IGB_FLAG_QUEUE_PAIRS (1 << 3) -#define IGB_FLAG_DMAC (1 << 4) - -/* DMA Coalescing defines */ -#define IGB_MIN_TXPBSIZE 20408 -#define IGB_TX_BUF_4096 4096 -#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ - -#define IGB_82576_TSYNC_SHIFT 19 -#define IGB_82580_TSYNC_SHIFT 24 -#define IGB_TS_HDR_LEN 16 -enum e1000_state_t { - __IGB_TESTING, - __IGB_RESETTING, - __IGB_DOWN -}; - -enum igb_boards { - board_82575, -}; - -extern char igb_driver_name[]; -extern char igb_driver_version[]; - -extern int igb_up(struct igb_adapter *); -extern void igb_down(struct igb_adapter *); -extern void igb_reinit_locked(struct igb_adapter *); -extern void igb_reset(struct igb_adapter *); -extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8); -extern int igb_setup_tx_resources(struct igb_ring *); -extern int igb_setup_rx_resources(struct igb_ring *); -extern void igb_free_tx_resources(struct igb_ring *); -extern void igb_free_rx_resources(struct igb_ring *); -extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *); -extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); -extern void igb_setup_tctl(struct igb_adapter *); -extern void igb_setup_rctl(struct igb_adapter *); -extern netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, struct igb_ring *); -extern void igb_unmap_and_free_tx_resource(struct igb_ring *, - struct igb_buffer *); -extern void igb_alloc_rx_buffers_adv(struct igb_ring *, int); -extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *); -extern bool igb_has_link(struct igb_adapter *adapter); -extern void igb_set_ethtool_ops(struct net_device *); -extern void igb_power_up_link(struct igb_adapter *); - -static inline s32 igb_reset_phy(struct e1000_hw *hw) -{ - if (hw->phy.ops.reset) - return hw->phy.ops.reset(hw); - - return 0; -} - -static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data) -{ - if (hw->phy.ops.read_reg) - return hw->phy.ops.read_reg(hw, offset, data); - - return 0; -} - -static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data) -{ - if (hw->phy.ops.write_reg) - return hw->phy.ops.write_reg(hw, offset, data); - - return 0; -} - -static inline s32 igb_get_phy_info(struct e1000_hw *hw) -{ - if (hw->phy.ops.get_phy_info) - return hw->phy.ops.get_phy_info(hw); - - return 0; -} - -#endif /* _IGB_H_ */ diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c deleted file mode 100644 index 414b022..0000000 --- a/drivers/net/igb/igb_ethtool.c +++ /dev/null @@ -1,2201 +0,0 @@ -/******************************************************************************* - - Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -/* ethtool support for igb */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "igb.h" - -struct igb_stats { - char stat_string[ETH_GSTRING_LEN]; - int sizeof_stat; - int stat_offset; -}; - -#define IGB_STAT(_name, _stat) { \ - .stat_string = _name, \ - .sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \ - .stat_offset = offsetof(struct igb_adapter, _stat) \ -} -static const struct igb_stats igb_gstrings_stats[] = { - IGB_STAT("rx_packets", stats.gprc), - IGB_STAT("tx_packets", stats.gptc), - IGB_STAT("rx_bytes", stats.gorc), - IGB_STAT("tx_bytes", stats.gotc), - IGB_STAT("rx_broadcast", stats.bprc), - IGB_STAT("tx_broadcast", stats.bptc), - IGB_STAT("rx_multicast", stats.mprc), - IGB_STAT("tx_multicast", stats.mptc), - IGB_STAT("multicast", stats.mprc), - IGB_STAT("collisions", stats.colc), - IGB_STAT("rx_crc_errors", stats.crcerrs), - IGB_STAT("rx_no_buffer_count", stats.rnbc), - IGB_STAT("rx_missed_errors", stats.mpc), - IGB_STAT("tx_aborted_errors", stats.ecol), - IGB_STAT("tx_carrier_errors", stats.tncrs), - IGB_STAT("tx_window_errors", stats.latecol), - IGB_STAT("tx_abort_late_coll", stats.latecol), - IGB_STAT("tx_deferred_ok", stats.dc), - IGB_STAT("tx_single_coll_ok", stats.scc), - IGB_STAT("tx_multi_coll_ok", stats.mcc), - IGB_STAT("tx_timeout_count", tx_timeout_count), - IGB_STAT("rx_long_length_errors", stats.roc), - IGB_STAT("rx_short_length_errors", stats.ruc), - IGB_STAT("rx_align_errors", stats.algnerrc), - IGB_STAT("tx_tcp_seg_good", stats.tsctc), - IGB_STAT("tx_tcp_seg_failed", stats.tsctfc), - IGB_STAT("rx_flow_control_xon", stats.xonrxc), - IGB_STAT("rx_flow_control_xoff", stats.xoffrxc), - IGB_STAT("tx_flow_control_xon", stats.xontxc), - IGB_STAT("tx_flow_control_xoff", stats.xofftxc), - IGB_STAT("rx_long_byte_count", stats.gorc), - IGB_STAT("tx_dma_out_of_sync", stats.doosync), - IGB_STAT("tx_smbus", stats.mgptc), - IGB_STAT("rx_smbus", stats.mgprc), - IGB_STAT("dropped_smbus", stats.mgpdc), - IGB_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), - IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc), - IGB_STAT("os2bmc_tx_by_host", stats.o2bspc), - IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc), -}; - -#define IGB_NETDEV_STAT(_net_stat) { \ - .stat_string = __stringify(_net_stat), \ - .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \ - .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \ -} -static const struct igb_stats igb_gstrings_net_stats[] = { - IGB_NETDEV_STAT(rx_errors), - IGB_NETDEV_STAT(tx_errors), - IGB_NETDEV_STAT(tx_dropped), - IGB_NETDEV_STAT(rx_length_errors), - IGB_NETDEV_STAT(rx_over_errors), - IGB_NETDEV_STAT(rx_frame_errors), - IGB_NETDEV_STAT(rx_fifo_errors), - IGB_NETDEV_STAT(tx_fifo_errors), - IGB_NETDEV_STAT(tx_heartbeat_errors) -}; - -#define IGB_GLOBAL_STATS_LEN \ - (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)) -#define IGB_NETDEV_STATS_LEN \ - (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats)) -#define IGB_RX_QUEUE_STATS_LEN \ - (sizeof(struct igb_rx_queue_stats) / sizeof(u64)) - -#define IGB_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */ - -#define IGB_QUEUE_STATS_LEN \ - ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \ - IGB_RX_QUEUE_STATS_LEN) + \ - (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \ - IGB_TX_QUEUE_STATS_LEN)) -#define IGB_STATS_LEN \ - (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN) - -static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { - "Register test (offline)", "Eeprom test (offline)", - "Interrupt test (offline)", "Loopback test (offline)", - "Link test (on/offline)" -}; -#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN) - -static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u32 status; - - if (hw->phy.media_type == e1000_media_type_copper) { - - ecmd->supported = (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Full| - SUPPORTED_Autoneg | - SUPPORTED_TP); - ecmd->advertising = ADVERTISED_TP; - - if (hw->mac.autoneg == 1) { - ecmd->advertising |= ADVERTISED_Autoneg; - /* the e1000 autoneg seems to match ethtool nicely */ - ecmd->advertising |= hw->phy.autoneg_advertised; - } - - ecmd->port = PORT_TP; - ecmd->phy_address = hw->phy.addr; - } else { - ecmd->supported = (SUPPORTED_1000baseT_Full | - SUPPORTED_FIBRE | - SUPPORTED_Autoneg); - - ecmd->advertising = (ADVERTISED_1000baseT_Full | - ADVERTISED_FIBRE | - ADVERTISED_Autoneg); - - ecmd->port = PORT_FIBRE; - } - - ecmd->transceiver = XCVR_INTERNAL; - - status = rd32(E1000_STATUS); - - if (status & E1000_STATUS_LU) { - - if ((status & E1000_STATUS_SPEED_1000) || - hw->phy.media_type != e1000_media_type_copper) - ethtool_cmd_speed_set(ecmd, SPEED_1000); - else if (status & E1000_STATUS_SPEED_100) - ethtool_cmd_speed_set(ecmd, SPEED_100); - else - ethtool_cmd_speed_set(ecmd, SPEED_10); - - if ((status & E1000_STATUS_FD) || - hw->phy.media_type != e1000_media_type_copper) - ecmd->duplex = DUPLEX_FULL; - else - ecmd->duplex = DUPLEX_HALF; - } else { - ethtool_cmd_speed_set(ecmd, -1); - ecmd->duplex = -1; - } - - ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; - return 0; -} - -static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - - /* When SoL/IDER sessions are active, autoneg/speed/duplex - * cannot be changed */ - if (igb_check_reset_block(hw)) { - dev_err(&adapter->pdev->dev, "Cannot change link " - "characteristics when SoL/IDER is active.\n"); - return -EINVAL; - } - - while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) - msleep(1); - - if (ecmd->autoneg == AUTONEG_ENABLE) { - hw->mac.autoneg = 1; - hw->phy.autoneg_advertised = ecmd->advertising | - ADVERTISED_TP | - ADVERTISED_Autoneg; - ecmd->advertising = hw->phy.autoneg_advertised; - if (adapter->fc_autoneg) - hw->fc.requested_mode = e1000_fc_default; - } else { - u32 speed = ethtool_cmd_speed(ecmd); - if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) { - clear_bit(__IGB_RESETTING, &adapter->state); - return -EINVAL; - } - } - - /* reset the link */ - if (netif_running(adapter->netdev)) { - igb_down(adapter); - igb_up(adapter); - } else - igb_reset(adapter); - - clear_bit(__IGB_RESETTING, &adapter->state); - return 0; -} - -static u32 igb_get_link(struct net_device *netdev) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_mac_info *mac = &adapter->hw.mac; - - /* - * If the link is not reported up to netdev, interrupts are disabled, - * and so the physical link state may have changed since we last - * looked. Set get_link_status to make sure that the true link - * state is interrogated, rather than pulling a cached and possibly - * stale link state from the driver. - */ - if (!netif_carrier_ok(netdev)) - mac->get_link_status = 1; - - return igb_has_link(adapter); -} - -static void igb_get_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - - pause->autoneg = - (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); - - if (hw->fc.current_mode == e1000_fc_rx_pause) - pause->rx_pause = 1; - else if (hw->fc.current_mode == e1000_fc_tx_pause) - pause->tx_pause = 1; - else if (hw->fc.current_mode == e1000_fc_full) { - pause->rx_pause = 1; - pause->tx_pause = 1; - } -} - -static int igb_set_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - int retval = 0; - - adapter->fc_autoneg = pause->autoneg; - - while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) - msleep(1); - - if (adapter->fc_autoneg == AUTONEG_ENABLE) { - hw->fc.requested_mode = e1000_fc_default; - if (netif_running(adapter->netdev)) { - igb_down(adapter); - igb_up(adapter); - } else { - igb_reset(adapter); - } - } else { - if (pause->rx_pause && pause->tx_pause) - hw->fc.requested_mode = e1000_fc_full; - else if (pause->rx_pause && !pause->tx_pause) - hw->fc.requested_mode = e1000_fc_rx_pause; - else if (!pause->rx_pause && pause->tx_pause) - hw->fc.requested_mode = e1000_fc_tx_pause; - else if (!pause->rx_pause && !pause->tx_pause) - hw->fc.requested_mode = e1000_fc_none; - - hw->fc.current_mode = hw->fc.requested_mode; - - retval = ((hw->phy.media_type == e1000_media_type_copper) ? - igb_force_mac_fc(hw) : igb_setup_link(hw)); - } - - clear_bit(__IGB_RESETTING, &adapter->state); - return retval; -} - -static u32 igb_get_msglevel(struct net_device *netdev) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - return adapter->msg_enable; -} - -static void igb_set_msglevel(struct net_device *netdev, u32 data) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - adapter->msg_enable = data; -} - -static int igb_get_regs_len(struct net_device *netdev) -{ -#define IGB_REGS_LEN 551 - return IGB_REGS_LEN * sizeof(u32); -} - -static void igb_get_regs(struct net_device *netdev, - struct ethtool_regs *regs, void *p) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u32 *regs_buff = p; - u8 i; - - memset(p, 0, IGB_REGS_LEN * sizeof(u32)); - - regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; - - /* General Registers */ - regs_buff[0] = rd32(E1000_CTRL); - regs_buff[1] = rd32(E1000_STATUS); - regs_buff[2] = rd32(E1000_CTRL_EXT); - regs_buff[3] = rd32(E1000_MDIC); - regs_buff[4] = rd32(E1000_SCTL); - regs_buff[5] = rd32(E1000_CONNSW); - regs_buff[6] = rd32(E1000_VET); - regs_buff[7] = rd32(E1000_LEDCTL); - regs_buff[8] = rd32(E1000_PBA); - regs_buff[9] = rd32(E1000_PBS); - regs_buff[10] = rd32(E1000_FRTIMER); - regs_buff[11] = rd32(E1000_TCPTIMER); - - /* NVM Register */ - regs_buff[12] = rd32(E1000_EECD); - - /* Interrupt */ - /* Reading EICS for EICR because they read the - * same but EICS does not clear on read */ - regs_buff[13] = rd32(E1000_EICS); - regs_buff[14] = rd32(E1000_EICS); - regs_buff[15] = rd32(E1000_EIMS); - regs_buff[16] = rd32(E1000_EIMC); - regs_buff[17] = rd32(E1000_EIAC); - regs_buff[18] = rd32(E1000_EIAM); - /* Reading ICS for ICR because they read the - * same but ICS does not clear on read */ - regs_buff[19] = rd32(E1000_ICS); - regs_buff[20] = rd32(E1000_ICS); - regs_buff[21] = rd32(E1000_IMS); - regs_buff[22] = rd32(E1000_IMC); - regs_buff[23] = rd32(E1000_IAC); - regs_buff[24] = rd32(E1000_IAM); - regs_buff[25] = rd32(E1000_IMIRVP); - - /* Flow Control */ - regs_buff[26] = rd32(E1000_FCAL); - regs_buff[27] = rd32(E1000_FCAH); - regs_buff[28] = rd32(E1000_FCTTV); - regs_buff[29] = rd32(E1000_FCRTL); - regs_buff[30] = rd32(E1000_FCRTH); - regs_buff[31] = rd32(E1000_FCRTV); - - /* Receive */ - regs_buff[32] = rd32(E1000_RCTL); - regs_buff[33] = rd32(E1000_RXCSUM); - regs_buff[34] = rd32(E1000_RLPML); - regs_buff[35] = rd32(E1000_RFCTL); - regs_buff[36] = rd32(E1000_MRQC); - regs_buff[37] = rd32(E1000_VT_CTL); - - /* Transmit */ - regs_buff[38] = rd32(E1000_TCTL); - regs_buff[39] = rd32(E1000_TCTL_EXT); - regs_buff[40] = rd32(E1000_TIPG); - regs_buff[41] = rd32(E1000_DTXCTL); - - /* Wake Up */ - regs_buff[42] = rd32(E1000_WUC); - regs_buff[43] = rd32(E1000_WUFC); - regs_buff[44] = rd32(E1000_WUS); - regs_buff[45] = rd32(E1000_IPAV); - regs_buff[46] = rd32(E1000_WUPL); - - /* MAC */ - regs_buff[47] = rd32(E1000_PCS_CFG0); - regs_buff[48] = rd32(E1000_PCS_LCTL); - regs_buff[49] = rd32(E1000_PCS_LSTAT); - regs_buff[50] = rd32(E1000_PCS_ANADV); - regs_buff[51] = rd32(E1000_PCS_LPAB); - regs_buff[52] = rd32(E1000_PCS_NPTX); - regs_buff[53] = rd32(E1000_PCS_LPABNP); - - /* Statistics */ - regs_buff[54] = adapter->stats.crcerrs; - regs_buff[55] = adapter->stats.algnerrc; - regs_buff[56] = adapter->stats.symerrs; - regs_buff[57] = adapter->stats.rxerrc; - regs_buff[58] = adapter->stats.mpc; - regs_buff[59] = adapter->stats.scc; - regs_buff[60] = adapter->stats.ecol; - regs_buff[61] = adapter->stats.mcc; - regs_buff[62] = adapter->stats.latecol; - regs_buff[63] = adapter->stats.colc; - regs_buff[64] = adapter->stats.dc; - regs_buff[65] = adapter->stats.tncrs; - regs_buff[66] = adapter->stats.sec; - regs_buff[67] = adapter->stats.htdpmc; - regs_buff[68] = adapter->stats.rlec; - regs_buff[69] = adapter->stats.xonrxc; - regs_buff[70] = adapter->stats.xontxc; - regs_buff[71] = adapter->stats.xoffrxc; - regs_buff[72] = adapter->stats.xofftxc; - regs_buff[73] = adapter->stats.fcruc; - regs_buff[74] = adapter->stats.prc64; - regs_buff[75] = adapter->stats.prc127; - regs_buff[76] = adapter->stats.prc255; - regs_buff[77] = adapter->stats.prc511; - regs_buff[78] = adapter->stats.prc1023; - regs_buff[79] = adapter->stats.prc1522; - regs_buff[80] = adapter->stats.gprc; - regs_buff[81] = adapter->stats.bprc; - regs_buff[82] = adapter->stats.mprc; - regs_buff[83] = adapter->stats.gptc; - regs_buff[84] = adapter->stats.gorc; - regs_buff[86] = adapter->stats.gotc; - regs_buff[88] = adapter->stats.rnbc; - regs_buff[89] = adapter->stats.ruc; - regs_buff[90] = adapter->stats.rfc; - regs_buff[91] = adapter->stats.roc; - regs_buff[92] = adapter->stats.rjc; - regs_buff[93] = adapter->stats.mgprc; - regs_buff[94] = adapter->stats.mgpdc; - regs_buff[95] = adapter->stats.mgptc; - regs_buff[96] = adapter->stats.tor; - regs_buff[98] = adapter->stats.tot; - regs_buff[100] = adapter->stats.tpr; - regs_buff[101] = adapter->stats.tpt; - regs_buff[102] = adapter->stats.ptc64; - regs_buff[103] = adapter->stats.ptc127; - regs_buff[104] = adapter->stats.ptc255; - regs_buff[105] = adapter->stats.ptc511; - regs_buff[106] = adapter->stats.ptc1023; - regs_buff[107] = adapter->stats.ptc1522; - regs_buff[108] = adapter->stats.mptc; - regs_buff[109] = adapter->stats.bptc; - regs_buff[110] = adapter->stats.tsctc; - regs_buff[111] = adapter->stats.iac; - regs_buff[112] = adapter->stats.rpthc; - regs_buff[113] = adapter->stats.hgptc; - regs_buff[114] = adapter->stats.hgorc; - regs_buff[116] = adapter->stats.hgotc; - regs_buff[118] = adapter->stats.lenerrs; - regs_buff[119] = adapter->stats.scvpc; - regs_buff[120] = adapter->stats.hrmpc; - - for (i = 0; i < 4; i++) - regs_buff[121 + i] = rd32(E1000_SRRCTL(i)); - for (i = 0; i < 4; i++) - regs_buff[125 + i] = rd32(E1000_PSRTYPE(i)); - for (i = 0; i < 4; i++) - regs_buff[129 + i] = rd32(E1000_RDBAL(i)); - for (i = 0; i < 4; i++) - regs_buff[133 + i] = rd32(E1000_RDBAH(i)); - for (i = 0; i < 4; i++) - regs_buff[137 + i] = rd32(E1000_RDLEN(i)); - for (i = 0; i < 4; i++) - regs_buff[141 + i] = rd32(E1000_RDH(i)); - for (i = 0; i < 4; i++) - regs_buff[145 + i] = rd32(E1000_RDT(i)); - for (i = 0; i < 4; i++) - regs_buff[149 + i] = rd32(E1000_RXDCTL(i)); - - for (i = 0; i < 10; i++) - regs_buff[153 + i] = rd32(E1000_EITR(i)); - for (i = 0; i < 8; i++) - regs_buff[163 + i] = rd32(E1000_IMIR(i)); - for (i = 0; i < 8; i++) - regs_buff[171 + i] = rd32(E1000_IMIREXT(i)); - for (i = 0; i < 16; i++) - regs_buff[179 + i] = rd32(E1000_RAL(i)); - for (i = 0; i < 16; i++) - regs_buff[195 + i] = rd32(E1000_RAH(i)); - - for (i = 0; i < 4; i++) - regs_buff[211 + i] = rd32(E1000_TDBAL(i)); - for (i = 0; i < 4; i++) - regs_buff[215 + i] = rd32(E1000_TDBAH(i)); - for (i = 0; i < 4; i++) - regs_buff[219 + i] = rd32(E1000_TDLEN(i)); - for (i = 0; i < 4; i++) - regs_buff[223 + i] = rd32(E1000_TDH(i)); - for (i = 0; i < 4; i++) - regs_buff[227 + i] = rd32(E1000_TDT(i)); - for (i = 0; i < 4; i++) - regs_buff[231 + i] = rd32(E1000_TXDCTL(i)); - for (i = 0; i < 4; i++) - regs_buff[235 + i] = rd32(E1000_TDWBAL(i)); - for (i = 0; i < 4; i++) - regs_buff[239 + i] = rd32(E1000_TDWBAH(i)); - for (i = 0; i < 4; i++) - regs_buff[243 + i] = rd32(E1000_DCA_TXCTRL(i)); - - for (i = 0; i < 4; i++) - regs_buff[247 + i] = rd32(E1000_IP4AT_REG(i)); - for (i = 0; i < 4; i++) - regs_buff[251 + i] = rd32(E1000_IP6AT_REG(i)); - for (i = 0; i < 32; i++) - regs_buff[255 + i] = rd32(E1000_WUPM_REG(i)); - for (i = 0; i < 128; i++) - regs_buff[287 + i] = rd32(E1000_FFMT_REG(i)); - for (i = 0; i < 128; i++) - regs_buff[415 + i] = rd32(E1000_FFVT_REG(i)); - for (i = 0; i < 4; i++) - regs_buff[543 + i] = rd32(E1000_FFLT_REG(i)); - - regs_buff[547] = rd32(E1000_TDFH); - regs_buff[548] = rd32(E1000_TDFT); - regs_buff[549] = rd32(E1000_TDFHS); - regs_buff[550] = rd32(E1000_TDFPC); - regs_buff[551] = adapter->stats.o2bgptc; - regs_buff[552] = adapter->stats.b2ospc; - regs_buff[553] = adapter->stats.o2bspc; - regs_buff[554] = adapter->stats.b2ogprc; -} - -static int igb_get_eeprom_len(struct net_device *netdev) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - return adapter->hw.nvm.word_size * 2; -} - -static int igb_get_eeprom(struct net_device *netdev, - struct ethtool_eeprom *eeprom, u8 *bytes) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u16 *eeprom_buff; - int first_word, last_word; - int ret_val = 0; - u16 i; - - if (eeprom->len == 0) - return -EINVAL; - - eeprom->magic = hw->vendor_id | (hw->device_id << 16); - - first_word = eeprom->offset >> 1; - last_word = (eeprom->offset + eeprom->len - 1) >> 1; - - eeprom_buff = kmalloc(sizeof(u16) * - (last_word - first_word + 1), GFP_KERNEL); - if (!eeprom_buff) - return -ENOMEM; - - if (hw->nvm.type == e1000_nvm_eeprom_spi) - ret_val = hw->nvm.ops.read(hw, first_word, - last_word - first_word + 1, - eeprom_buff); - else { - for (i = 0; i < last_word - first_word + 1; i++) { - ret_val = hw->nvm.ops.read(hw, first_word + i, 1, - &eeprom_buff[i]); - if (ret_val) - break; - } - } - - /* Device's eeprom is always little-endian, word addressable */ - for (i = 0; i < last_word - first_word + 1; i++) - le16_to_cpus(&eeprom_buff[i]); - - memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), - eeprom->len); - kfree(eeprom_buff); - - return ret_val; -} - -static int igb_set_eeprom(struct net_device *netdev, - struct ethtool_eeprom *eeprom, u8 *bytes) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u16 *eeprom_buff; - void *ptr; - int max_len, first_word, last_word, ret_val = 0; - u16 i; - - if (eeprom->len == 0) - return -EOPNOTSUPP; - - if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) - return -EFAULT; - - max_len = hw->nvm.word_size * 2; - - first_word = eeprom->offset >> 1; - last_word = (eeprom->offset + eeprom->len - 1) >> 1; - eeprom_buff = kmalloc(max_len, GFP_KERNEL); - if (!eeprom_buff) - return -ENOMEM; - - ptr = (void *)eeprom_buff; - - if (eeprom->offset & 1) { - /* need read/modify/write of first changed EEPROM word */ - /* only the second byte of the word is being modified */ - ret_val = hw->nvm.ops.read(hw, first_word, 1, - &eeprom_buff[0]); - ptr++; - } - if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { - /* need read/modify/write of last changed EEPROM word */ - /* only the first byte of the word is being modified */ - ret_val = hw->nvm.ops.read(hw, last_word, 1, - &eeprom_buff[last_word - first_word]); - } - - /* Device's eeprom is always little-endian, word addressable */ - for (i = 0; i < last_word - first_word + 1; i++) - le16_to_cpus(&eeprom_buff[i]); - - memcpy(ptr, bytes, eeprom->len); - - for (i = 0; i < last_word - first_word + 1; i++) - eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); - - ret_val = hw->nvm.ops.write(hw, first_word, - last_word - first_word + 1, eeprom_buff); - - /* Update the checksum over the first part of the EEPROM if needed - * and flush shadow RAM for 82573 controllers */ - if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG))) - hw->nvm.ops.update(hw); - - kfree(eeprom_buff); - return ret_val; -} - -static void igb_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - char firmware_version[32]; - u16 eeprom_data; - - strncpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver) - 1); - strncpy(drvinfo->version, igb_driver_version, - sizeof(drvinfo->version) - 1); - - /* EEPROM image version # is reported as firmware version # for - * 82575 controllers */ - adapter->hw.nvm.ops.read(&adapter->hw, 5, 1, &eeprom_data); - sprintf(firmware_version, "%d.%d-%d", - (eeprom_data & 0xF000) >> 12, - (eeprom_data & 0x0FF0) >> 4, - eeprom_data & 0x000F); - - strncpy(drvinfo->fw_version, firmware_version, - sizeof(drvinfo->fw_version) - 1); - strncpy(drvinfo->bus_info, pci_name(adapter->pdev), - sizeof(drvinfo->bus_info) - 1); - drvinfo->n_stats = IGB_STATS_LEN; - drvinfo->testinfo_len = IGB_TEST_LEN; - drvinfo->regdump_len = igb_get_regs_len(netdev); - drvinfo->eedump_len = igb_get_eeprom_len(netdev); -} - -static void igb_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - - ring->rx_max_pending = IGB_MAX_RXD; - ring->tx_max_pending = IGB_MAX_TXD; - ring->rx_mini_max_pending = 0; - ring->rx_jumbo_max_pending = 0; - ring->rx_pending = adapter->rx_ring_count; - ring->tx_pending = adapter->tx_ring_count; - ring->rx_mini_pending = 0; - ring->rx_jumbo_pending = 0; -} - -static int igb_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct igb_ring *temp_ring; - int i, err = 0; - u16 new_rx_count, new_tx_count; - - if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) - return -EINVAL; - - new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD); - new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD); - new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); - - new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD); - new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD); - new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); - - if ((new_tx_count == adapter->tx_ring_count) && - (new_rx_count == adapter->rx_ring_count)) { - /* nothing to do */ - return 0; - } - - while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) - msleep(1); - - if (!netif_running(adapter->netdev)) { - for (i = 0; i < adapter->num_tx_queues; i++) - adapter->tx_ring[i]->count = new_tx_count; - for (i = 0; i < adapter->num_rx_queues; i++) - adapter->rx_ring[i]->count = new_rx_count; - adapter->tx_ring_count = new_tx_count; - adapter->rx_ring_count = new_rx_count; - goto clear_reset; - } - - if (adapter->num_tx_queues > adapter->num_rx_queues) - temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring)); - else - temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring)); - - if (!temp_ring) { - err = -ENOMEM; - goto clear_reset; - } - - igb_down(adapter); - - /* - * We can't just free everything and then setup again, - * because the ISRs in MSI-X mode get passed pointers - * to the tx and rx ring structs. - */ - if (new_tx_count != adapter->tx_ring_count) { - for (i = 0; i < adapter->num_tx_queues; i++) { - memcpy(&temp_ring[i], adapter->tx_ring[i], - sizeof(struct igb_ring)); - - temp_ring[i].count = new_tx_count; - err = igb_setup_tx_resources(&temp_ring[i]); - if (err) { - while (i) { - i--; - igb_free_tx_resources(&temp_ring[i]); - } - goto err_setup; - } - } - - for (i = 0; i < adapter->num_tx_queues; i++) { - igb_free_tx_resources(adapter->tx_ring[i]); - - memcpy(adapter->tx_ring[i], &temp_ring[i], - sizeof(struct igb_ring)); - } - - adapter->tx_ring_count = new_tx_count; - } - - if (new_rx_count != adapter->rx_ring_count) { - for (i = 0; i < adapter->num_rx_queues; i++) { - memcpy(&temp_ring[i], adapter->rx_ring[i], - sizeof(struct igb_ring)); - - temp_ring[i].count = new_rx_count; - err = igb_setup_rx_resources(&temp_ring[i]); - if (err) { - while (i) { - i--; - igb_free_rx_resources(&temp_ring[i]); - } - goto err_setup; - } - - } - - for (i = 0; i < adapter->num_rx_queues; i++) { - igb_free_rx_resources(adapter->rx_ring[i]); - - memcpy(adapter->rx_ring[i], &temp_ring[i], - sizeof(struct igb_ring)); - } - - adapter->rx_ring_count = new_rx_count; - } -err_setup: - igb_up(adapter); - vfree(temp_ring); -clear_reset: - clear_bit(__IGB_RESETTING, &adapter->state); - return err; -} - -/* ethtool register test data */ -struct igb_reg_test { - u16 reg; - u16 reg_offset; - u16 array_len; - u16 test_type; - u32 mask; - u32 write; -}; - -/* In the hardware, registers are laid out either singly, in arrays - * spaced 0x100 bytes apart, or in contiguous tables. We assume - * most tests take place on arrays or single registers (handled - * as a single-element array) and special-case the tables. - * Table tests are always pattern tests. - * - * We also make provision for some required setup steps by specifying - * registers to be written without any read-back testing. - */ - -#define PATTERN_TEST 1 -#define SET_READ_TEST 2 -#define WRITE_NO_TEST 3 -#define TABLE32_TEST 4 -#define TABLE64_TEST_LO 5 -#define TABLE64_TEST_HI 6 - -/* i350 reg test */ -static struct igb_reg_test reg_test_i350[] = { - { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, - { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, - { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 }, - { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, - { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, - /* RDH is read-only for i350, only test RDT. */ - { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, - { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, - { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, - { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, - { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, - { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, - { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, - { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, - { E1000_RA, 0, 16, TABLE64_TEST_LO, - 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RA, 0, 16, TABLE64_TEST_HI, - 0xC3FFFFFF, 0xFFFFFFFF }, - { E1000_RA2, 0, 16, TABLE64_TEST_LO, - 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RA2, 0, 16, TABLE64_TEST_HI, - 0xC3FFFFFF, 0xFFFFFFFF }, - { E1000_MTA, 0, 128, TABLE32_TEST, - 0xFFFFFFFF, 0xFFFFFFFF }, - { 0, 0, 0, 0 } -}; - -/* 82580 reg test */ -static struct igb_reg_test reg_test_82580[] = { - { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, - { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, - { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, - { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, - /* RDH is read-only for 82580, only test RDT. */ - { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, - { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, - { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, - { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, - { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, - { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, - { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, - { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, - { E1000_RA, 0, 16, TABLE64_TEST_LO, - 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RA, 0, 16, TABLE64_TEST_HI, - 0x83FFFFFF, 0xFFFFFFFF }, - { E1000_RA2, 0, 8, TABLE64_TEST_LO, - 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RA2, 0, 8, TABLE64_TEST_HI, - 0x83FFFFFF, 0xFFFFFFFF }, - { E1000_MTA, 0, 128, TABLE32_TEST, - 0xFFFFFFFF, 0xFFFFFFFF }, - { 0, 0, 0, 0 } -}; - -/* 82576 reg test */ -static struct igb_reg_test reg_test_82576[] = { - { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, - { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, - { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, - { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, - /* Enable all RX queues before testing. */ - { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, - { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, - /* RDH is read-only for 82576, only test RDT. */ - { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, - { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 }, - { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, - { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, - { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, - { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, - { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, - { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, - { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, - { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, - { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, - { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, - { E1000_MTA, 0, 128,TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { 0, 0, 0, 0 } -}; - -/* 82575 register test */ -static struct igb_reg_test reg_test_82575[] = { - { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, - { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, - { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, - /* Enable all four RX queues before testing. */ - { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, - /* RDH is read-only for 82575, only test RDT. */ - { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, - { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, - { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, - { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, - { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, - { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB }, - { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF }, - { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, - { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF }, - { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, - { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x800FFFFF, 0xFFFFFFFF }, - { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { 0, 0, 0, 0 } -}; - -static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data, - int reg, u32 mask, u32 write) -{ - struct e1000_hw *hw = &adapter->hw; - u32 pat, val; - static const u32 _test[] = - {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; - for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { - wr32(reg, (_test[pat] & write)); - val = rd32(reg) & mask; - if (val != (_test[pat] & write & mask)) { - dev_err(&adapter->pdev->dev, "pattern test reg %04X " - "failed: got 0x%08X expected 0x%08X\n", - reg, val, (_test[pat] & write & mask)); - *data = reg; - return 1; - } - } - - return 0; -} - -static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data, - int reg, u32 mask, u32 write) -{ - struct e1000_hw *hw = &adapter->hw; - u32 val; - wr32(reg, write & mask); - val = rd32(reg); - if ((write & mask) != (val & mask)) { - dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:" - " got 0x%08X expected 0x%08X\n", reg, - (val & mask), (write & mask)); - *data = reg; - return 1; - } - - return 0; -} - -#define REG_PATTERN_TEST(reg, mask, write) \ - do { \ - if (reg_pattern_test(adapter, data, reg, mask, write)) \ - return 1; \ - } while (0) - -#define REG_SET_AND_CHECK(reg, mask, write) \ - do { \ - if (reg_set_and_check(adapter, data, reg, mask, write)) \ - return 1; \ - } while (0) - -static int igb_reg_test(struct igb_adapter *adapter, u64 *data) -{ - struct e1000_hw *hw = &adapter->hw; - struct igb_reg_test *test; - u32 value, before, after; - u32 i, toggle; - - switch (adapter->hw.mac.type) { - case e1000_i350: - test = reg_test_i350; - toggle = 0x7FEFF3FF; - break; - case e1000_82580: - test = reg_test_82580; - toggle = 0x7FEFF3FF; - break; - case e1000_82576: - test = reg_test_82576; - toggle = 0x7FFFF3FF; - break; - default: - test = reg_test_82575; - toggle = 0x7FFFF3FF; - break; - } - - /* Because the status register is such a special case, - * we handle it separately from the rest of the register - * tests. Some bits are read-only, some toggle, and some - * are writable on newer MACs. - */ - before = rd32(E1000_STATUS); - value = (rd32(E1000_STATUS) & toggle); - wr32(E1000_STATUS, toggle); - after = rd32(E1000_STATUS) & toggle; - if (value != after) { - dev_err(&adapter->pdev->dev, "failed STATUS register test " - "got: 0x%08X expected: 0x%08X\n", after, value); - *data = 1; - return 1; - } - /* restore previous status */ - wr32(E1000_STATUS, before); - - /* Perform the remainder of the register test, looping through - * the test table until we either fail or reach the null entry. - */ - while (test->reg) { - for (i = 0; i < test->array_len; i++) { - switch (test->test_type) { - case PATTERN_TEST: - REG_PATTERN_TEST(test->reg + - (i * test->reg_offset), - test->mask, - test->write); - break; - case SET_READ_TEST: - REG_SET_AND_CHECK(test->reg + - (i * test->reg_offset), - test->mask, - test->write); - break; - case WRITE_NO_TEST: - writel(test->write, - (adapter->hw.hw_addr + test->reg) - + (i * test->reg_offset)); - break; - case TABLE32_TEST: - REG_PATTERN_TEST(test->reg + (i * 4), - test->mask, - test->write); - break; - case TABLE64_TEST_LO: - REG_PATTERN_TEST(test->reg + (i * 8), - test->mask, - test->write); - break; - case TABLE64_TEST_HI: - REG_PATTERN_TEST((test->reg + 4) + (i * 8), - test->mask, - test->write); - break; - } - } - test++; - } - - *data = 0; - return 0; -} - -static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data) -{ - u16 temp; - u16 checksum = 0; - u16 i; - - *data = 0; - /* Read and add up the contents of the EEPROM */ - for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { - if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp)) < 0) { - *data = 1; - break; - } - checksum += temp; - } - - /* If Checksum is not Correct return error else test passed */ - if ((checksum != (u16) NVM_SUM) && !(*data)) - *data = 2; - - return *data; -} - -static irqreturn_t igb_test_intr(int irq, void *data) -{ - struct igb_adapter *adapter = (struct igb_adapter *) data; - struct e1000_hw *hw = &adapter->hw; - - adapter->test_icr |= rd32(E1000_ICR); - - return IRQ_HANDLED; -} - -static int igb_intr_test(struct igb_adapter *adapter, u64 *data) -{ - struct e1000_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - u32 mask, ics_mask, i = 0, shared_int = true; - u32 irq = adapter->pdev->irq; - - *data = 0; - - /* Hook up test interrupt handler just for this test */ - if (adapter->msix_entries) { - if (request_irq(adapter->msix_entries[0].vector, - igb_test_intr, 0, netdev->name, adapter)) { - *data = 1; - return -1; - } - } else if (adapter->flags & IGB_FLAG_HAS_MSI) { - shared_int = false; - if (request_irq(irq, - igb_test_intr, 0, netdev->name, adapter)) { - *data = 1; - return -1; - } - } else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED, - netdev->name, adapter)) { - shared_int = false; - } else if (request_irq(irq, igb_test_intr, IRQF_SHARED, - netdev->name, adapter)) { - *data = 1; - return -1; - } - dev_info(&adapter->pdev->dev, "testing %s interrupt\n", - (shared_int ? "shared" : "unshared")); - - /* Disable all the interrupts */ - wr32(E1000_IMC, ~0); - wrfl(); - msleep(10); - - /* Define all writable bits for ICS */ - switch (hw->mac.type) { - case e1000_82575: - ics_mask = 0x37F47EDD; - break; - case e1000_82576: - ics_mask = 0x77D4FBFD; - break; - case e1000_82580: - ics_mask = 0x77DCFED5; - break; - case e1000_i350: - ics_mask = 0x77DCFED5; - break; - default: - ics_mask = 0x7FFFFFFF; - break; - } - - /* Test each interrupt */ - for (; i < 31; i++) { - /* Interrupt to test */ - mask = 1 << i; - - if (!(mask & ics_mask)) - continue; - - if (!shared_int) { - /* Disable the interrupt to be reported in - * the cause register and then force the same - * interrupt and see if one gets posted. If - * an interrupt was posted to the bus, the - * test failed. - */ - adapter->test_icr = 0; - - /* Flush any pending interrupts */ - wr32(E1000_ICR, ~0); - - wr32(E1000_IMC, mask); - wr32(E1000_ICS, mask); - wrfl(); - msleep(10); - - if (adapter->test_icr & mask) { - *data = 3; - break; - } - } - - /* Enable the interrupt to be reported in - * the cause register and then force the same - * interrupt and see if one gets posted. If - * an interrupt was not posted to the bus, the - * test failed. - */ - adapter->test_icr = 0; - - /* Flush any pending interrupts */ - wr32(E1000_ICR, ~0); - - wr32(E1000_IMS, mask); - wr32(E1000_ICS, mask); - wrfl(); - msleep(10); - - if (!(adapter->test_icr & mask)) { - *data = 4; - break; - } - - if (!shared_int) { - /* Disable the other interrupts to be reported in - * the cause register and then force the other - * interrupts and see if any get posted. If - * an interrupt was posted to the bus, the - * test failed. - */ - adapter->test_icr = 0; - - /* Flush any pending interrupts */ - wr32(E1000_ICR, ~0); - - wr32(E1000_IMC, ~mask); - wr32(E1000_ICS, ~mask); - wrfl(); - msleep(10); - - if (adapter->test_icr & mask) { - *data = 5; - break; - } - } - } - - /* Disable all the interrupts */ - wr32(E1000_IMC, ~0); - wrfl(); - msleep(10); - - /* Unhook test interrupt handler */ - if (adapter->msix_entries) - free_irq(adapter->msix_entries[0].vector, adapter); - else - free_irq(irq, adapter); - - return *data; -} - -static void igb_free_desc_rings(struct igb_adapter *adapter) -{ - igb_free_tx_resources(&adapter->test_tx_ring); - igb_free_rx_resources(&adapter->test_rx_ring); -} - -static int igb_setup_desc_rings(struct igb_adapter *adapter) -{ - struct igb_ring *tx_ring = &adapter->test_tx_ring; - struct igb_ring *rx_ring = &adapter->test_rx_ring; - struct e1000_hw *hw = &adapter->hw; - int ret_val; - - /* Setup Tx descriptor ring and Tx buffers */ - tx_ring->count = IGB_DEFAULT_TXD; - tx_ring->dev = &adapter->pdev->dev; - tx_ring->netdev = adapter->netdev; - tx_ring->reg_idx = adapter->vfs_allocated_count; - - if (igb_setup_tx_resources(tx_ring)) { - ret_val = 1; - goto err_nomem; - } - - igb_setup_tctl(adapter); - igb_configure_tx_ring(adapter, tx_ring); - - /* Setup Rx descriptor ring and Rx buffers */ - rx_ring->count = IGB_DEFAULT_RXD; - rx_ring->dev = &adapter->pdev->dev; - rx_ring->netdev = adapter->netdev; - rx_ring->rx_buffer_len = IGB_RXBUFFER_2048; - rx_ring->reg_idx = adapter->vfs_allocated_count; - - if (igb_setup_rx_resources(rx_ring)) { - ret_val = 3; - goto err_nomem; - } - - /* set the default queue to queue 0 of PF */ - wr32(E1000_MRQC, adapter->vfs_allocated_count << 3); - - /* enable receive ring */ - igb_setup_rctl(adapter); - igb_configure_rx_ring(adapter, rx_ring); - - igb_alloc_rx_buffers_adv(rx_ring, igb_desc_unused(rx_ring)); - - return 0; - -err_nomem: - igb_free_desc_rings(adapter); - return ret_val; -} - -static void igb_phy_disable_receiver(struct igb_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - - /* Write out to PHY registers 29 and 30 to disable the Receiver. */ - igb_write_phy_reg(hw, 29, 0x001F); - igb_write_phy_reg(hw, 30, 0x8FFC); - igb_write_phy_reg(hw, 29, 0x001A); - igb_write_phy_reg(hw, 30, 0x8FF0); -} - -static int igb_integrated_phy_loopback(struct igb_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 ctrl_reg = 0; - - hw->mac.autoneg = false; - - if (hw->phy.type == e1000_phy_m88) { - /* Auto-MDI/MDIX Off */ - igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); - /* reset to update Auto-MDI/MDIX */ - igb_write_phy_reg(hw, PHY_CONTROL, 0x9140); - /* autoneg off */ - igb_write_phy_reg(hw, PHY_CONTROL, 0x8140); - } else if (hw->phy.type == e1000_phy_82580) { - /* enable MII loopback */ - igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041); - } - - ctrl_reg = rd32(E1000_CTRL); - - /* force 1000, set loopback */ - igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); - - /* Now set up the MAC to the same speed/duplex as the PHY. */ - ctrl_reg = rd32(E1000_CTRL); - ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ - ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ - E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ - E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ - E1000_CTRL_FD | /* Force Duplex to FULL */ - E1000_CTRL_SLU); /* Set link up enable bit */ - - if (hw->phy.type == e1000_phy_m88) - ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ - - wr32(E1000_CTRL, ctrl_reg); - - /* Disable the receiver on the PHY so when a cable is plugged in, the - * PHY does not begin to autoneg when a cable is reconnected to the NIC. - */ - if (hw->phy.type == e1000_phy_m88) - igb_phy_disable_receiver(adapter); - - udelay(500); - - return 0; -} - -static int igb_set_phy_loopback(struct igb_adapter *adapter) -{ - return igb_integrated_phy_loopback(adapter); -} - -static int igb_setup_loopback_test(struct igb_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 reg; - - reg = rd32(E1000_CTRL_EXT); - - /* use CTRL_EXT to identify link type as SGMII can appear as copper */ - if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) { - if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || - (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || - (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || - (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) { - - /* Enable DH89xxCC MPHY for near end loopback */ - reg = rd32(E1000_MPHY_ADDR_CTL); - reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) | - E1000_MPHY_PCS_CLK_REG_OFFSET; - wr32(E1000_MPHY_ADDR_CTL, reg); - - reg = rd32(E1000_MPHY_DATA); - reg |= E1000_MPHY_PCS_CLK_REG_DIGINELBEN; - wr32(E1000_MPHY_DATA, reg); - } - - reg = rd32(E1000_RCTL); - reg |= E1000_RCTL_LBM_TCVR; - wr32(E1000_RCTL, reg); - - wr32(E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK); - - reg = rd32(E1000_CTRL); - reg &= ~(E1000_CTRL_RFCE | - E1000_CTRL_TFCE | - E1000_CTRL_LRST); - reg |= E1000_CTRL_SLU | - E1000_CTRL_FD; - wr32(E1000_CTRL, reg); - - /* Unset switch control to serdes energy detect */ - reg = rd32(E1000_CONNSW); - reg &= ~E1000_CONNSW_ENRGSRC; - wr32(E1000_CONNSW, reg); - - /* Set PCS register for forced speed */ - reg = rd32(E1000_PCS_LCTL); - reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/ - reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */ - E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */ - E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ - E1000_PCS_LCTL_FSD | /* Force Speed */ - E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ - wr32(E1000_PCS_LCTL, reg); - - return 0; - } - - return igb_set_phy_loopback(adapter); -} - -static void igb_loopback_cleanup(struct igb_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 rctl; - u16 phy_reg; - - if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || - (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || - (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || - (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) { - u32 reg; - - /* Disable near end loopback on DH89xxCC */ - reg = rd32(E1000_MPHY_ADDR_CTL); - reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) | - E1000_MPHY_PCS_CLK_REG_OFFSET; - wr32(E1000_MPHY_ADDR_CTL, reg); - - reg = rd32(E1000_MPHY_DATA); - reg &= ~E1000_MPHY_PCS_CLK_REG_DIGINELBEN; - wr32(E1000_MPHY_DATA, reg); - } - - rctl = rd32(E1000_RCTL); - rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); - wr32(E1000_RCTL, rctl); - - hw->mac.autoneg = true; - igb_read_phy_reg(hw, PHY_CONTROL, &phy_reg); - if (phy_reg & MII_CR_LOOPBACK) { - phy_reg &= ~MII_CR_LOOPBACK; - igb_write_phy_reg(hw, PHY_CONTROL, phy_reg); - igb_phy_sw_reset(hw); - } -} - -static void igb_create_lbtest_frame(struct sk_buff *skb, - unsigned int frame_size) -{ - memset(skb->data, 0xFF, frame_size); - frame_size /= 2; - memset(&skb->data[frame_size], 0xAA, frame_size - 1); - memset(&skb->data[frame_size + 10], 0xBE, 1); - memset(&skb->data[frame_size + 12], 0xAF, 1); -} - -static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) -{ - frame_size /= 2; - if (*(skb->data + 3) == 0xFF) { - if ((*(skb->data + frame_size + 10) == 0xBE) && - (*(skb->data + frame_size + 12) == 0xAF)) { - return 0; - } - } - return 13; -} - -static int igb_clean_test_rings(struct igb_ring *rx_ring, - struct igb_ring *tx_ring, - unsigned int size) -{ - union e1000_adv_rx_desc *rx_desc; - struct igb_buffer *buffer_info; - int rx_ntc, tx_ntc, count = 0; - u32 staterr; - - /* initialize next to clean and descriptor values */ - rx_ntc = rx_ring->next_to_clean; - tx_ntc = tx_ring->next_to_clean; - rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc); - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); - - while (staterr & E1000_RXD_STAT_DD) { - /* check rx buffer */ - buffer_info = &rx_ring->buffer_info[rx_ntc]; - - /* unmap rx buffer, will be remapped by alloc_rx_buffers */ - dma_unmap_single(rx_ring->dev, - buffer_info->dma, - rx_ring->rx_buffer_len, - DMA_FROM_DEVICE); - buffer_info->dma = 0; - - /* verify contents of skb */ - if (!igb_check_lbtest_frame(buffer_info->skb, size)) - count++; - - /* unmap buffer on tx side */ - buffer_info = &tx_ring->buffer_info[tx_ntc]; - igb_unmap_and_free_tx_resource(tx_ring, buffer_info); - - /* increment rx/tx next to clean counters */ - rx_ntc++; - if (rx_ntc == rx_ring->count) - rx_ntc = 0; - tx_ntc++; - if (tx_ntc == tx_ring->count) - tx_ntc = 0; - - /* fetch next descriptor */ - rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc); - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); - } - - /* re-map buffers to ring, store next to clean values */ - igb_alloc_rx_buffers_adv(rx_ring, count); - rx_ring->next_to_clean = rx_ntc; - tx_ring->next_to_clean = tx_ntc; - - return count; -} - -static int igb_run_loopback_test(struct igb_adapter *adapter) -{ - struct igb_ring *tx_ring = &adapter->test_tx_ring; - struct igb_ring *rx_ring = &adapter->test_rx_ring; - int i, j, lc, good_cnt, ret_val = 0; - unsigned int size = 1024; - netdev_tx_t tx_ret_val; - struct sk_buff *skb; - - /* allocate test skb */ - skb = alloc_skb(size, GFP_KERNEL); - if (!skb) - return 11; - - /* place data into test skb */ - igb_create_lbtest_frame(skb, size); - skb_put(skb, size); - - /* - * Calculate the loop count based on the largest descriptor ring - * The idea is to wrap the largest ring a number of times using 64 - * send/receive pairs during each loop - */ - - if (rx_ring->count <= tx_ring->count) - lc = ((tx_ring->count / 64) * 2) + 1; - else - lc = ((rx_ring->count / 64) * 2) + 1; - - for (j = 0; j <= lc; j++) { /* loop count loop */ - /* reset count of good packets */ - good_cnt = 0; - - /* place 64 packets on the transmit queue*/ - for (i = 0; i < 64; i++) { - skb_get(skb); - tx_ret_val = igb_xmit_frame_ring_adv(skb, tx_ring); - if (tx_ret_val == NETDEV_TX_OK) - good_cnt++; - } - - if (good_cnt != 64) { - ret_val = 12; - break; - } - - /* allow 200 milliseconds for packets to go from tx to rx */ - msleep(200); - - good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size); - if (good_cnt != 64) { - ret_val = 13; - break; - } - } /* end loop count loop */ - - /* free the original skb */ - kfree_skb(skb); - - return ret_val; -} - -static int igb_loopback_test(struct igb_adapter *adapter, u64 *data) -{ - /* PHY loopback cannot be performed if SoL/IDER - * sessions are active */ - if (igb_check_reset_block(&adapter->hw)) { - dev_err(&adapter->pdev->dev, - "Cannot do PHY loopback test " - "when SoL/IDER is active.\n"); - *data = 0; - goto out; - } - *data = igb_setup_desc_rings(adapter); - if (*data) - goto out; - *data = igb_setup_loopback_test(adapter); - if (*data) - goto err_loopback; - *data = igb_run_loopback_test(adapter); - igb_loopback_cleanup(adapter); - -err_loopback: - igb_free_desc_rings(adapter); -out: - return *data; -} - -static int igb_link_test(struct igb_adapter *adapter, u64 *data) -{ - struct e1000_hw *hw = &adapter->hw; - *data = 0; - if (hw->phy.media_type == e1000_media_type_internal_serdes) { - int i = 0; - hw->mac.serdes_has_link = false; - - /* On some blade server designs, link establishment - * could take as long as 2-3 minutes */ - do { - hw->mac.ops.check_for_link(&adapter->hw); - if (hw->mac.serdes_has_link) - return *data; - msleep(20); - } while (i++ < 3750); - - *data = 1; - } else { - hw->mac.ops.check_for_link(&adapter->hw); - if (hw->mac.autoneg) - msleep(4000); - - if (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) - *data = 1; - } - return *data; -} - -static void igb_diag_test(struct net_device *netdev, - struct ethtool_test *eth_test, u64 *data) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - u16 autoneg_advertised; - u8 forced_speed_duplex, autoneg; - bool if_running = netif_running(netdev); - - set_bit(__IGB_TESTING, &adapter->state); - if (eth_test->flags == ETH_TEST_FL_OFFLINE) { - /* Offline tests */ - - /* save speed, duplex, autoneg settings */ - autoneg_advertised = adapter->hw.phy.autoneg_advertised; - forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; - autoneg = adapter->hw.mac.autoneg; - - dev_info(&adapter->pdev->dev, "offline testing starting\n"); - - /* power up link for link test */ - igb_power_up_link(adapter); - - /* Link test performed before hardware reset so autoneg doesn't - * interfere with test result */ - if (igb_link_test(adapter, &data[4])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - if (if_running) - /* indicate we're in test mode */ - dev_close(netdev); - else - igb_reset(adapter); - - if (igb_reg_test(adapter, &data[0])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - igb_reset(adapter); - if (igb_eeprom_test(adapter, &data[1])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - igb_reset(adapter); - if (igb_intr_test(adapter, &data[2])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - igb_reset(adapter); - /* power up link for loopback test */ - igb_power_up_link(adapter); - if (igb_loopback_test(adapter, &data[3])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - /* restore speed, duplex, autoneg settings */ - adapter->hw.phy.autoneg_advertised = autoneg_advertised; - adapter->hw.mac.forced_speed_duplex = forced_speed_duplex; - adapter->hw.mac.autoneg = autoneg; - - /* force this routine to wait until autoneg complete/timeout */ - adapter->hw.phy.autoneg_wait_to_complete = true; - igb_reset(adapter); - adapter->hw.phy.autoneg_wait_to_complete = false; - - clear_bit(__IGB_TESTING, &adapter->state); - if (if_running) - dev_open(netdev); - } else { - dev_info(&adapter->pdev->dev, "online testing starting\n"); - - /* PHY is powered down when interface is down */ - if (if_running && igb_link_test(adapter, &data[4])) - eth_test->flags |= ETH_TEST_FL_FAILED; - else - data[4] = 0; - - /* Online tests aren't run; pass by default */ - data[0] = 0; - data[1] = 0; - data[2] = 0; - data[3] = 0; - - clear_bit(__IGB_TESTING, &adapter->state); - } - msleep_interruptible(4 * 1000); -} - -static int igb_wol_exclusion(struct igb_adapter *adapter, - struct ethtool_wolinfo *wol) -{ - struct e1000_hw *hw = &adapter->hw; - int retval = 1; /* fail by default */ - - switch (hw->device_id) { - case E1000_DEV_ID_82575GB_QUAD_COPPER: - /* WoL not supported */ - wol->supported = 0; - break; - case E1000_DEV_ID_82575EB_FIBER_SERDES: - case E1000_DEV_ID_82576_FIBER: - case E1000_DEV_ID_82576_SERDES: - /* Wake events not supported on port B */ - if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) { - wol->supported = 0; - break; - } - /* return success for non excluded adapter ports */ - retval = 0; - break; - case E1000_DEV_ID_82576_QUAD_COPPER: - case E1000_DEV_ID_82576_QUAD_COPPER_ET2: - /* quad port adapters only support WoL on port A */ - if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) { - wol->supported = 0; - break; - } - /* return success for non excluded adapter ports */ - retval = 0; - break; - default: - /* dual port cards only support WoL on port A from now on - * unless it was enabled in the eeprom for port B - * so exclude FUNC_1 ports from having WoL enabled */ - if ((rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) && - !adapter->eeprom_wol) { - wol->supported = 0; - break; - } - - retval = 0; - } - - return retval; -} - -static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - - wol->supported = WAKE_UCAST | WAKE_MCAST | - WAKE_BCAST | WAKE_MAGIC | - WAKE_PHY; - wol->wolopts = 0; - - /* this function will set ->supported = 0 and return 1 if wol is not - * supported by this hardware */ - if (igb_wol_exclusion(adapter, wol) || - !device_can_wakeup(&adapter->pdev->dev)) - return; - - /* apply any specific unsupported masks here */ - switch (adapter->hw.device_id) { - default: - break; - } - - if (adapter->wol & E1000_WUFC_EX) - wol->wolopts |= WAKE_UCAST; - if (adapter->wol & E1000_WUFC_MC) - wol->wolopts |= WAKE_MCAST; - if (adapter->wol & E1000_WUFC_BC) - wol->wolopts |= WAKE_BCAST; - if (adapter->wol & E1000_WUFC_MAG) - wol->wolopts |= WAKE_MAGIC; - if (adapter->wol & E1000_WUFC_LNKC) - wol->wolopts |= WAKE_PHY; -} - -static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - - if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)) - return -EOPNOTSUPP; - - if (igb_wol_exclusion(adapter, wol) || - !device_can_wakeup(&adapter->pdev->dev)) - return wol->wolopts ? -EOPNOTSUPP : 0; - - /* these settings will always override what we currently have */ - adapter->wol = 0; - - if (wol->wolopts & WAKE_UCAST) - adapter->wol |= E1000_WUFC_EX; - if (wol->wolopts & WAKE_MCAST) - adapter->wol |= E1000_WUFC_MC; - if (wol->wolopts & WAKE_BCAST) - adapter->wol |= E1000_WUFC_BC; - if (wol->wolopts & WAKE_MAGIC) - adapter->wol |= E1000_WUFC_MAG; - if (wol->wolopts & WAKE_PHY) - adapter->wol |= E1000_WUFC_LNKC; - device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); - - return 0; -} - -/* bit defines for adapter->led_status */ -#define IGB_LED_ON 0 - -static int igb_set_phys_id(struct net_device *netdev, - enum ethtool_phys_id_state state) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - - switch (state) { - case ETHTOOL_ID_ACTIVE: - igb_blink_led(hw); - return 2; - case ETHTOOL_ID_ON: - igb_blink_led(hw); - break; - case ETHTOOL_ID_OFF: - igb_led_off(hw); - break; - case ETHTOOL_ID_INACTIVE: - igb_led_off(hw); - clear_bit(IGB_LED_ON, &adapter->led_status); - igb_cleanup_led(hw); - break; - } - - return 0; -} - -static int igb_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - int i; - - if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || - ((ec->rx_coalesce_usecs > 3) && - (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) || - (ec->rx_coalesce_usecs == 2)) - return -EINVAL; - - if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) || - ((ec->tx_coalesce_usecs > 3) && - (ec->tx_coalesce_usecs < IGB_MIN_ITR_USECS)) || - (ec->tx_coalesce_usecs == 2)) - return -EINVAL; - - if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) - return -EINVAL; - - /* If ITR is disabled, disable DMAC */ - if (ec->rx_coalesce_usecs == 0) { - if (adapter->flags & IGB_FLAG_DMAC) - adapter->flags &= ~IGB_FLAG_DMAC; - } - - /* convert to rate of irq's per second */ - if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) - adapter->rx_itr_setting = ec->rx_coalesce_usecs; - else - adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; - - /* convert to rate of irq's per second */ - if (adapter->flags & IGB_FLAG_QUEUE_PAIRS) - adapter->tx_itr_setting = adapter->rx_itr_setting; - else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3) - adapter->tx_itr_setting = ec->tx_coalesce_usecs; - else - adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; - - for (i = 0; i < adapter->num_q_vectors; i++) { - struct igb_q_vector *q_vector = adapter->q_vector[i]; - if (q_vector->rx_ring) - q_vector->itr_val = adapter->rx_itr_setting; - else - q_vector->itr_val = adapter->tx_itr_setting; - if (q_vector->itr_val && q_vector->itr_val <= 3) - q_vector->itr_val = IGB_START_ITR; - q_vector->set_itr = 1; - } - - return 0; -} - -static int igb_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - - if (adapter->rx_itr_setting <= 3) - ec->rx_coalesce_usecs = adapter->rx_itr_setting; - else - ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; - - if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) { - if (adapter->tx_itr_setting <= 3) - ec->tx_coalesce_usecs = adapter->tx_itr_setting; - else - ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; - } - - return 0; -} - -static int igb_nway_reset(struct net_device *netdev) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - if (netif_running(netdev)) - igb_reinit_locked(adapter); - return 0; -} - -static int igb_get_sset_count(struct net_device *netdev, int sset) -{ - switch (sset) { - case ETH_SS_STATS: - return IGB_STATS_LEN; - case ETH_SS_TEST: - return IGB_TEST_LEN; - default: - return -ENOTSUPP; - } -} - -static void igb_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, u64 *data) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct rtnl_link_stats64 *net_stats = &adapter->stats64; - unsigned int start; - struct igb_ring *ring; - int i, j; - char *p; - - spin_lock(&adapter->stats64_lock); - igb_update_stats(adapter, net_stats); - - for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { - p = (char *)adapter + igb_gstrings_stats[i].stat_offset; - data[i] = (igb_gstrings_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } - for (j = 0; j < IGB_NETDEV_STATS_LEN; j++, i++) { - p = (char *)net_stats + igb_gstrings_net_stats[j].stat_offset; - data[i] = (igb_gstrings_net_stats[j].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } - for (j = 0; j < adapter->num_tx_queues; j++) { - u64 restart2; - - ring = adapter->tx_ring[j]; - do { - start = u64_stats_fetch_begin_bh(&ring->tx_syncp); - data[i] = ring->tx_stats.packets; - data[i+1] = ring->tx_stats.bytes; - data[i+2] = ring->tx_stats.restart_queue; - } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start)); - do { - start = u64_stats_fetch_begin_bh(&ring->tx_syncp2); - restart2 = ring->tx_stats.restart_queue2; - } while (u64_stats_fetch_retry_bh(&ring->tx_syncp2, start)); - data[i+2] += restart2; - - i += IGB_TX_QUEUE_STATS_LEN; - } - for (j = 0; j < adapter->num_rx_queues; j++) { - ring = adapter->rx_ring[j]; - do { - start = u64_stats_fetch_begin_bh(&ring->rx_syncp); - data[i] = ring->rx_stats.packets; - data[i+1] = ring->rx_stats.bytes; - data[i+2] = ring->rx_stats.drops; - data[i+3] = ring->rx_stats.csum_err; - data[i+4] = ring->rx_stats.alloc_failed; - } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start)); - i += IGB_RX_QUEUE_STATS_LEN; - } - spin_unlock(&adapter->stats64_lock); -} - -static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - u8 *p = data; - int i; - - switch (stringset) { - case ETH_SS_TEST: - memcpy(data, *igb_gstrings_test, - IGB_TEST_LEN*ETH_GSTRING_LEN); - break; - case ETH_SS_STATS: - for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { - memcpy(p, igb_gstrings_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) { - memcpy(p, igb_gstrings_net_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < adapter->num_tx_queues; i++) { - sprintf(p, "tx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_restart", i); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < adapter->num_rx_queues; i++) { - sprintf(p, "rx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_drops", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_csum_err", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_alloc_failed", i); - p += ETH_GSTRING_LEN; - } -/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ - break; - } -} - -static const struct ethtool_ops igb_ethtool_ops = { - .get_settings = igb_get_settings, - .set_settings = igb_set_settings, - .get_drvinfo = igb_get_drvinfo, - .get_regs_len = igb_get_regs_len, - .get_regs = igb_get_regs, - .get_wol = igb_get_wol, - .set_wol = igb_set_wol, - .get_msglevel = igb_get_msglevel, - .set_msglevel = igb_set_msglevel, - .nway_reset = igb_nway_reset, - .get_link = igb_get_link, - .get_eeprom_len = igb_get_eeprom_len, - .get_eeprom = igb_get_eeprom, - .set_eeprom = igb_set_eeprom, - .get_ringparam = igb_get_ringparam, - .set_ringparam = igb_set_ringparam, - .get_pauseparam = igb_get_pauseparam, - .set_pauseparam = igb_set_pauseparam, - .self_test = igb_diag_test, - .get_strings = igb_get_strings, - .set_phys_id = igb_set_phys_id, - .get_sset_count = igb_get_sset_count, - .get_ethtool_stats = igb_get_ethtool_stats, - .get_coalesce = igb_get_coalesce, - .set_coalesce = igb_set_coalesce, -}; - -void igb_set_ethtool_ops(struct net_device *netdev) -{ - SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops); -} diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c deleted file mode 100644 index 40d4c40..0000000 --- a/drivers/net/igb/igb_main.c +++ /dev/null @@ -1,6890 +0,0 @@ -/******************************************************************************* - - Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#ifdef CONFIG_IGB_DCA -#include -#endif -#include "igb.h" - -#define MAJ 3 -#define MIN 0 -#define BUILD 6 -#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ -__stringify(BUILD) "-k" -char igb_driver_name[] = "igb"; -char igb_driver_version[] = DRV_VERSION; -static const char igb_driver_string[] = - "Intel(R) Gigabit Ethernet Network Driver"; -static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation."; - -static const struct e1000_info *igb_info_tbl[] = { - [board_82575] = &e1000_82575_info, -}; - -static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = { - { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 }, - /* required last entry */ - {0, } -}; - -MODULE_DEVICE_TABLE(pci, igb_pci_tbl); - -void igb_reset(struct igb_adapter *); -static int igb_setup_all_tx_resources(struct igb_adapter *); -static int igb_setup_all_rx_resources(struct igb_adapter *); -static void igb_free_all_tx_resources(struct igb_adapter *); -static void igb_free_all_rx_resources(struct igb_adapter *); -static void igb_setup_mrqc(struct igb_adapter *); -static int igb_probe(struct pci_dev *, const struct pci_device_id *); -static void __devexit igb_remove(struct pci_dev *pdev); -static void igb_init_hw_timer(struct igb_adapter *adapter); -static int igb_sw_init(struct igb_adapter *); -static int igb_open(struct net_device *); -static int igb_close(struct net_device *); -static void igb_configure_tx(struct igb_adapter *); -static void igb_configure_rx(struct igb_adapter *); -static void igb_clean_all_tx_rings(struct igb_adapter *); -static void igb_clean_all_rx_rings(struct igb_adapter *); -static void igb_clean_tx_ring(struct igb_ring *); -static void igb_clean_rx_ring(struct igb_ring *); -static void igb_set_rx_mode(struct net_device *); -static void igb_update_phy_info(unsigned long); -static void igb_watchdog(unsigned long); -static void igb_watchdog_task(struct work_struct *); -static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *); -static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats); -static int igb_change_mtu(struct net_device *, int); -static int igb_set_mac(struct net_device *, void *); -static void igb_set_uta(struct igb_adapter *adapter); -static irqreturn_t igb_intr(int irq, void *); -static irqreturn_t igb_intr_msi(int irq, void *); -static irqreturn_t igb_msix_other(int irq, void *); -static irqreturn_t igb_msix_ring(int irq, void *); -#ifdef CONFIG_IGB_DCA -static void igb_update_dca(struct igb_q_vector *); -static void igb_setup_dca(struct igb_adapter *); -#endif /* CONFIG_IGB_DCA */ -static bool igb_clean_tx_irq(struct igb_q_vector *); -static int igb_poll(struct napi_struct *, int); -static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int); -static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); -static void igb_tx_timeout(struct net_device *); -static void igb_reset_task(struct work_struct *); -static void igb_vlan_mode(struct net_device *netdev, u32 features); -static void igb_vlan_rx_add_vid(struct net_device *, u16); -static void igb_vlan_rx_kill_vid(struct net_device *, u16); -static void igb_restore_vlan(struct igb_adapter *); -static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8); -static void igb_ping_all_vfs(struct igb_adapter *); -static void igb_msg_task(struct igb_adapter *); -static void igb_vmm_control(struct igb_adapter *); -static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *); -static void igb_restore_vf_multicasts(struct igb_adapter *adapter); -static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac); -static int igb_ndo_set_vf_vlan(struct net_device *netdev, - int vf, u16 vlan, u8 qos); -static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); -static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, - struct ifla_vf_info *ivi); -static void igb_check_vf_rate_limit(struct igb_adapter *); - -#ifdef CONFIG_PM -static int igb_suspend(struct pci_dev *, pm_message_t); -static int igb_resume(struct pci_dev *); -#endif -static void igb_shutdown(struct pci_dev *); -#ifdef CONFIG_IGB_DCA -static int igb_notify_dca(struct notifier_block *, unsigned long, void *); -static struct notifier_block dca_notifier = { - .notifier_call = igb_notify_dca, - .next = NULL, - .priority = 0 -}; -#endif -#ifdef CONFIG_NET_POLL_CONTROLLER -/* for netdump / net console */ -static void igb_netpoll(struct net_device *); -#endif -#ifdef CONFIG_PCI_IOV -static unsigned int max_vfs = 0; -module_param(max_vfs, uint, 0); -MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate " - "per physical function"); -#endif /* CONFIG_PCI_IOV */ - -static pci_ers_result_t igb_io_error_detected(struct pci_dev *, - pci_channel_state_t); -static pci_ers_result_t igb_io_slot_reset(struct pci_dev *); -static void igb_io_resume(struct pci_dev *); - -static struct pci_error_handlers igb_err_handler = { - .error_detected = igb_io_error_detected, - .slot_reset = igb_io_slot_reset, - .resume = igb_io_resume, -}; - - -static struct pci_driver igb_driver = { - .name = igb_driver_name, - .id_table = igb_pci_tbl, - .probe = igb_probe, - .remove = __devexit_p(igb_remove), -#ifdef CONFIG_PM - /* Power Management Hooks */ - .suspend = igb_suspend, - .resume = igb_resume, -#endif - .shutdown = igb_shutdown, - .err_handler = &igb_err_handler -}; - -MODULE_AUTHOR("Intel Corporation, "); -MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_VERSION); - -struct igb_reg_info { - u32 ofs; - char *name; -}; - -static const struct igb_reg_info igb_reg_info_tbl[] = { - - /* General Registers */ - {E1000_CTRL, "CTRL"}, - {E1000_STATUS, "STATUS"}, - {E1000_CTRL_EXT, "CTRL_EXT"}, - - /* Interrupt Registers */ - {E1000_ICR, "ICR"}, - - /* RX Registers */ - {E1000_RCTL, "RCTL"}, - {E1000_RDLEN(0), "RDLEN"}, - {E1000_RDH(0), "RDH"}, - {E1000_RDT(0), "RDT"}, - {E1000_RXDCTL(0), "RXDCTL"}, - {E1000_RDBAL(0), "RDBAL"}, - {E1000_RDBAH(0), "RDBAH"}, - - /* TX Registers */ - {E1000_TCTL, "TCTL"}, - {E1000_TDBAL(0), "TDBAL"}, - {E1000_TDBAH(0), "TDBAH"}, - {E1000_TDLEN(0), "TDLEN"}, - {E1000_TDH(0), "TDH"}, - {E1000_TDT(0), "TDT"}, - {E1000_TXDCTL(0), "TXDCTL"}, - {E1000_TDFH, "TDFH"}, - {E1000_TDFT, "TDFT"}, - {E1000_TDFHS, "TDFHS"}, - {E1000_TDFPC, "TDFPC"}, - - /* List Terminator */ - {} -}; - -/* - * igb_regdump - register printout routine - */ -static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo) -{ - int n = 0; - char rname[16]; - u32 regs[8]; - - switch (reginfo->ofs) { - case E1000_RDLEN(0): - for (n = 0; n < 4; n++) - regs[n] = rd32(E1000_RDLEN(n)); - break; - case E1000_RDH(0): - for (n = 0; n < 4; n++) - regs[n] = rd32(E1000_RDH(n)); - break; - case E1000_RDT(0): - for (n = 0; n < 4; n++) - regs[n] = rd32(E1000_RDT(n)); - break; - case E1000_RXDCTL(0): - for (n = 0; n < 4; n++) - regs[n] = rd32(E1000_RXDCTL(n)); - break; - case E1000_RDBAL(0): - for (n = 0; n < 4; n++) - regs[n] = rd32(E1000_RDBAL(n)); - break; - case E1000_RDBAH(0): - for (n = 0; n < 4; n++) - regs[n] = rd32(E1000_RDBAH(n)); - break; - case E1000_TDBAL(0): - for (n = 0; n < 4; n++) - regs[n] = rd32(E1000_RDBAL(n)); - break; - case E1000_TDBAH(0): - for (n = 0; n < 4; n++) - regs[n] = rd32(E1000_TDBAH(n)); - break; - case E1000_TDLEN(0): - for (n = 0; n < 4; n++) - regs[n] = rd32(E1000_TDLEN(n)); - break; - case E1000_TDH(0): - for (n = 0; n < 4; n++) - regs[n] = rd32(E1000_TDH(n)); - break; - case E1000_TDT(0): - for (n = 0; n < 4; n++) - regs[n] = rd32(E1000_TDT(n)); - break; - case E1000_TXDCTL(0): - for (n = 0; n < 4; n++) - regs[n] = rd32(E1000_TXDCTL(n)); - break; - default: - printk(KERN_INFO "%-15s %08x\n", - reginfo->name, rd32(reginfo->ofs)); - return; - } - - snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]"); - printk(KERN_INFO "%-15s ", rname); - for (n = 0; n < 4; n++) - printk(KERN_CONT "%08x ", regs[n]); - printk(KERN_CONT "\n"); -} - -/* - * igb_dump - Print registers, tx-rings and rx-rings - */ -static void igb_dump(struct igb_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct e1000_hw *hw = &adapter->hw; - struct igb_reg_info *reginfo; - int n = 0; - struct igb_ring *tx_ring; - union e1000_adv_tx_desc *tx_desc; - struct my_u0 { u64 a; u64 b; } *u0; - struct igb_buffer *buffer_info; - struct igb_ring *rx_ring; - union e1000_adv_rx_desc *rx_desc; - u32 staterr; - int i = 0; - - if (!netif_msg_hw(adapter)) - return; - - /* Print netdevice Info */ - if (netdev) { - dev_info(&adapter->pdev->dev, "Net device Info\n"); - printk(KERN_INFO "Device Name state " - "trans_start last_rx\n"); - printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", - netdev->name, - netdev->state, - netdev->trans_start, - netdev->last_rx); - } - - /* Print Registers */ - dev_info(&adapter->pdev->dev, "Register Dump\n"); - printk(KERN_INFO " Register Name Value\n"); - for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl; - reginfo->name; reginfo++) { - igb_regdump(hw, reginfo); - } - - /* Print TX Ring Summary */ - if (!netdev || !netif_running(netdev)) - goto exit; - - dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); - printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]" - " leng ntw timestamp\n"); - for (n = 0; n < adapter->num_tx_queues; n++) { - tx_ring = adapter->tx_ring[n]; - buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; - printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", - n, tx_ring->next_to_use, tx_ring->next_to_clean, - (u64)buffer_info->dma, - buffer_info->length, - buffer_info->next_to_watch, - (u64)buffer_info->time_stamp); - } - - /* Print TX Rings */ - if (!netif_msg_tx_done(adapter)) - goto rx_ring_summary; - - dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); - - /* Transmit Descriptor Formats - * - * Advanced Transmit Descriptor - * +--------------------------------------------------------------+ - * 0 | Buffer Address [63:0] | - * +--------------------------------------------------------------+ - * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN | - * +--------------------------------------------------------------+ - * 63 46 45 40 39 38 36 35 32 31 24 15 0 - */ - - for (n = 0; n < adapter->num_tx_queues; n++) { - tx_ring = adapter->tx_ring[n]; - printk(KERN_INFO "------------------------------------\n"); - printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index); - printk(KERN_INFO "------------------------------------\n"); - printk(KERN_INFO "T [desc] [address 63:0 ] " - "[PlPOCIStDDM Ln] [bi->dma ] " - "leng ntw timestamp bi->skb\n"); - - for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { - tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); - buffer_info = &tx_ring->buffer_info[i]; - u0 = (struct my_u0 *)tx_desc; - printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX" - " %04X %3X %016llX %p", i, - le64_to_cpu(u0->a), - le64_to_cpu(u0->b), - (u64)buffer_info->dma, - buffer_info->length, - buffer_info->next_to_watch, - (u64)buffer_info->time_stamp, - buffer_info->skb); - if (i == tx_ring->next_to_use && - i == tx_ring->next_to_clean) - printk(KERN_CONT " NTC/U\n"); - else if (i == tx_ring->next_to_use) - printk(KERN_CONT " NTU\n"); - else if (i == tx_ring->next_to_clean) - printk(KERN_CONT " NTC\n"); - else - printk(KERN_CONT "\n"); - - if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) - print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, - 16, 1, phys_to_virt(buffer_info->dma), - buffer_info->length, true); - } - } - - /* Print RX Rings Summary */ -rx_ring_summary: - dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); - printk(KERN_INFO "Queue [NTU] [NTC]\n"); - for (n = 0; n < adapter->num_rx_queues; n++) { - rx_ring = adapter->rx_ring[n]; - printk(KERN_INFO " %5d %5X %5X\n", n, - rx_ring->next_to_use, rx_ring->next_to_clean); - } - - /* Print RX Rings */ - if (!netif_msg_rx_status(adapter)) - goto exit; - - dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); - - /* Advanced Receive Descriptor (Read) Format - * 63 1 0 - * +-----------------------------------------------------+ - * 0 | Packet Buffer Address [63:1] |A0/NSE| - * +----------------------------------------------+------+ - * 8 | Header Buffer Address [63:1] | DD | - * +-----------------------------------------------------+ - * - * - * Advanced Receive Descriptor (Write-Back) Format - * - * 63 48 47 32 31 30 21 20 17 16 4 3 0 - * +------------------------------------------------------+ - * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | - * | Checksum Ident | | | | Type | Type | - * +------------------------------------------------------+ - * 8 | VLAN Tag | Length | Extended Error | Extended Status | - * +------------------------------------------------------+ - * 63 48 47 32 31 20 19 0 - */ - - for (n = 0; n < adapter->num_rx_queues; n++) { - rx_ring = adapter->rx_ring[n]; - printk(KERN_INFO "------------------------------------\n"); - printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index); - printk(KERN_INFO "------------------------------------\n"); - printk(KERN_INFO "R [desc] [ PktBuf A0] " - "[ HeadBuf DD] [bi->dma ] [bi->skb] " - "<-- Adv Rx Read format\n"); - printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] " - "[vl er S cks ln] ---------------- [bi->skb] " - "<-- Adv Rx Write-Back format\n"); - - for (i = 0; i < rx_ring->count; i++) { - buffer_info = &rx_ring->buffer_info[i]; - rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); - u0 = (struct my_u0 *)rx_desc; - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); - if (staterr & E1000_RXD_STAT_DD) { - /* Descriptor Done */ - printk(KERN_INFO "RWB[0x%03X] %016llX " - "%016llX ---------------- %p", i, - le64_to_cpu(u0->a), - le64_to_cpu(u0->b), - buffer_info->skb); - } else { - printk(KERN_INFO "R [0x%03X] %016llX " - "%016llX %016llX %p", i, - le64_to_cpu(u0->a), - le64_to_cpu(u0->b), - (u64)buffer_info->dma, - buffer_info->skb); - - if (netif_msg_pktdata(adapter)) { - print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, - 16, 1, - phys_to_virt(buffer_info->dma), - rx_ring->rx_buffer_len, true); - if (rx_ring->rx_buffer_len - < IGB_RXBUFFER_1024) - print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, - 16, 1, - phys_to_virt( - buffer_info->page_dma + - buffer_info->page_offset), - PAGE_SIZE/2, true); - } - } - - if (i == rx_ring->next_to_use) - printk(KERN_CONT " NTU\n"); - else if (i == rx_ring->next_to_clean) - printk(KERN_CONT " NTC\n"); - else - printk(KERN_CONT "\n"); - - } - } - -exit: - return; -} - - -/** - * igb_read_clock - read raw cycle counter (to be used by time counter) - */ -static cycle_t igb_read_clock(const struct cyclecounter *tc) -{ - struct igb_adapter *adapter = - container_of(tc, struct igb_adapter, cycles); - struct e1000_hw *hw = &adapter->hw; - u64 stamp = 0; - int shift = 0; - - /* - * The timestamp latches on lowest register read. For the 82580 - * the lowest register is SYSTIMR instead of SYSTIML. However we never - * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it. - */ - if (hw->mac.type == e1000_82580) { - stamp = rd32(E1000_SYSTIMR) >> 8; - shift = IGB_82580_TSYNC_SHIFT; - } - - stamp |= (u64)rd32(E1000_SYSTIML) << shift; - stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32); - return stamp; -} - -/** - * igb_get_hw_dev - return device - * used by hardware layer to print debugging information - **/ -struct net_device *igb_get_hw_dev(struct e1000_hw *hw) -{ - struct igb_adapter *adapter = hw->back; - return adapter->netdev; -} - -/** - * igb_init_module - Driver Registration Routine - * - * igb_init_module is the first routine called when the driver is - * loaded. All it does is register with the PCI subsystem. - **/ -static int __init igb_init_module(void) -{ - int ret; - printk(KERN_INFO "%s - version %s\n", - igb_driver_string, igb_driver_version); - - printk(KERN_INFO "%s\n", igb_copyright); - -#ifdef CONFIG_IGB_DCA - dca_register_notify(&dca_notifier); -#endif - ret = pci_register_driver(&igb_driver); - return ret; -} - -module_init(igb_init_module); - -/** - * igb_exit_module - Driver Exit Cleanup Routine - * - * igb_exit_module is called just before the driver is removed - * from memory. - **/ -static void __exit igb_exit_module(void) -{ -#ifdef CONFIG_IGB_DCA - dca_unregister_notify(&dca_notifier); -#endif - pci_unregister_driver(&igb_driver); -} - -module_exit(igb_exit_module); - -#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1)) -/** - * igb_cache_ring_register - Descriptor ring to register mapping - * @adapter: board private structure to initialize - * - * Once we know the feature-set enabled for the device, we'll cache - * the register offset the descriptor ring is assigned to. - **/ -static void igb_cache_ring_register(struct igb_adapter *adapter) -{ - int i = 0, j = 0; - u32 rbase_offset = adapter->vfs_allocated_count; - - switch (adapter->hw.mac.type) { - case e1000_82576: - /* The queues are allocated for virtualization such that VF 0 - * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc. - * In order to avoid collision we start at the first free queue - * and continue consuming queues in the same sequence - */ - if (adapter->vfs_allocated_count) { - for (; i < adapter->rss_queues; i++) - adapter->rx_ring[i]->reg_idx = rbase_offset + - Q_IDX_82576(i); - } - case e1000_82575: - case e1000_82580: - case e1000_i350: - default: - for (; i < adapter->num_rx_queues; i++) - adapter->rx_ring[i]->reg_idx = rbase_offset + i; - for (; j < adapter->num_tx_queues; j++) - adapter->tx_ring[j]->reg_idx = rbase_offset + j; - break; - } -} - -static void igb_free_queues(struct igb_adapter *adapter) -{ - int i; - - for (i = 0; i < adapter->num_tx_queues; i++) { - kfree(adapter->tx_ring[i]); - adapter->tx_ring[i] = NULL; - } - for (i = 0; i < adapter->num_rx_queues; i++) { - kfree(adapter->rx_ring[i]); - adapter->rx_ring[i] = NULL; - } - adapter->num_rx_queues = 0; - adapter->num_tx_queues = 0; -} - -/** - * igb_alloc_queues - Allocate memory for all rings - * @adapter: board private structure to initialize - * - * We allocate one ring per queue at run-time since we don't know the - * number of queues at compile-time. - **/ -static int igb_alloc_queues(struct igb_adapter *adapter) -{ - struct igb_ring *ring; - int i; - - for (i = 0; i < adapter->num_tx_queues; i++) { - ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL); - if (!ring) - goto err; - ring->count = adapter->tx_ring_count; - ring->queue_index = i; - ring->dev = &adapter->pdev->dev; - ring->netdev = adapter->netdev; - /* For 82575, context index must be unique per ring. */ - if (adapter->hw.mac.type == e1000_82575) - ring->flags = IGB_RING_FLAG_TX_CTX_IDX; - adapter->tx_ring[i] = ring; - } - - for (i = 0; i < adapter->num_rx_queues; i++) { - ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL); - if (!ring) - goto err; - ring->count = adapter->rx_ring_count; - ring->queue_index = i; - ring->dev = &adapter->pdev->dev; - ring->netdev = adapter->netdev; - ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; - ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */ - /* set flag indicating ring supports SCTP checksum offload */ - if (adapter->hw.mac.type >= e1000_82576) - ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM; - adapter->rx_ring[i] = ring; - } - - igb_cache_ring_register(adapter); - - return 0; - -err: - igb_free_queues(adapter); - - return -ENOMEM; -} - -#define IGB_N0_QUEUE -1 -static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) -{ - u32 msixbm = 0; - struct igb_adapter *adapter = q_vector->adapter; - struct e1000_hw *hw = &adapter->hw; - u32 ivar, index; - int rx_queue = IGB_N0_QUEUE; - int tx_queue = IGB_N0_QUEUE; - - if (q_vector->rx_ring) - rx_queue = q_vector->rx_ring->reg_idx; - if (q_vector->tx_ring) - tx_queue = q_vector->tx_ring->reg_idx; - - switch (hw->mac.type) { - case e1000_82575: - /* The 82575 assigns vectors using a bitmask, which matches the - bitmask for the EICR/EIMS/EIMC registers. To assign one - or more queues to a vector, we write the appropriate bits - into the MSIXBM register for that vector. */ - if (rx_queue > IGB_N0_QUEUE) - msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; - if (tx_queue > IGB_N0_QUEUE) - msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; - if (!adapter->msix_entries && msix_vector == 0) - msixbm |= E1000_EIMS_OTHER; - array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); - q_vector->eims_value = msixbm; - break; - case e1000_82576: - /* 82576 uses a table-based method for assigning vectors. - Each queue has a single entry in the table to which we write - a vector number along with a "valid" bit. Sadly, the layout - of the table is somewhat counterintuitive. */ - if (rx_queue > IGB_N0_QUEUE) { - index = (rx_queue & 0x7); - ivar = array_rd32(E1000_IVAR0, index); - if (rx_queue < 8) { - /* vector goes into low byte of register */ - ivar = ivar & 0xFFFFFF00; - ivar |= msix_vector | E1000_IVAR_VALID; - } else { - /* vector goes into third byte of register */ - ivar = ivar & 0xFF00FFFF; - ivar |= (msix_vector | E1000_IVAR_VALID) << 16; - } - array_wr32(E1000_IVAR0, index, ivar); - } - if (tx_queue > IGB_N0_QUEUE) { - index = (tx_queue & 0x7); - ivar = array_rd32(E1000_IVAR0, index); - if (tx_queue < 8) { - /* vector goes into second byte of register */ - ivar = ivar & 0xFFFF00FF; - ivar |= (msix_vector | E1000_IVAR_VALID) << 8; - } else { - /* vector goes into high byte of register */ - ivar = ivar & 0x00FFFFFF; - ivar |= (msix_vector | E1000_IVAR_VALID) << 24; - } - array_wr32(E1000_IVAR0, index, ivar); - } - q_vector->eims_value = 1 << msix_vector; - break; - case e1000_82580: - case e1000_i350: - /* 82580 uses the same table-based approach as 82576 but has fewer - entries as a result we carry over for queues greater than 4. */ - if (rx_queue > IGB_N0_QUEUE) { - index = (rx_queue >> 1); - ivar = array_rd32(E1000_IVAR0, index); - if (rx_queue & 0x1) { - /* vector goes into third byte of register */ - ivar = ivar & 0xFF00FFFF; - ivar |= (msix_vector | E1000_IVAR_VALID) << 16; - } else { - /* vector goes into low byte of register */ - ivar = ivar & 0xFFFFFF00; - ivar |= msix_vector | E1000_IVAR_VALID; - } - array_wr32(E1000_IVAR0, index, ivar); - } - if (tx_queue > IGB_N0_QUEUE) { - index = (tx_queue >> 1); - ivar = array_rd32(E1000_IVAR0, index); - if (tx_queue & 0x1) { - /* vector goes into high byte of register */ - ivar = ivar & 0x00FFFFFF; - ivar |= (msix_vector | E1000_IVAR_VALID) << 24; - } else { - /* vector goes into second byte of register */ - ivar = ivar & 0xFFFF00FF; - ivar |= (msix_vector | E1000_IVAR_VALID) << 8; - } - array_wr32(E1000_IVAR0, index, ivar); - } - q_vector->eims_value = 1 << msix_vector; - break; - default: - BUG(); - break; - } - - /* add q_vector eims value to global eims_enable_mask */ - adapter->eims_enable_mask |= q_vector->eims_value; - - /* configure q_vector to set itr on first interrupt */ - q_vector->set_itr = 1; -} - -/** - * igb_configure_msix - Configure MSI-X hardware - * - * igb_configure_msix sets up the hardware to properly - * generate MSI-X interrupts. - **/ -static void igb_configure_msix(struct igb_adapter *adapter) -{ - u32 tmp; - int i, vector = 0; - struct e1000_hw *hw = &adapter->hw; - - adapter->eims_enable_mask = 0; - - /* set vector for other causes, i.e. link changes */ - switch (hw->mac.type) { - case e1000_82575: - tmp = rd32(E1000_CTRL_EXT); - /* enable MSI-X PBA support*/ - tmp |= E1000_CTRL_EXT_PBA_CLR; - - /* Auto-Mask interrupts upon ICR read. */ - tmp |= E1000_CTRL_EXT_EIAME; - tmp |= E1000_CTRL_EXT_IRCA; - - wr32(E1000_CTRL_EXT, tmp); - - /* enable msix_other interrupt */ - array_wr32(E1000_MSIXBM(0), vector++, - E1000_EIMS_OTHER); - adapter->eims_other = E1000_EIMS_OTHER; - - break; - - case e1000_82576: - case e1000_82580: - case e1000_i350: - /* Turn on MSI-X capability first, or our settings - * won't stick. And it will take days to debug. */ - wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | - E1000_GPIE_PBA | E1000_GPIE_EIAME | - E1000_GPIE_NSICR); - - /* enable msix_other interrupt */ - adapter->eims_other = 1 << vector; - tmp = (vector++ | E1000_IVAR_VALID) << 8; - - wr32(E1000_IVAR_MISC, tmp); - break; - default: - /* do nothing, since nothing else supports MSI-X */ - break; - } /* switch (hw->mac.type) */ - - adapter->eims_enable_mask |= adapter->eims_other; - - for (i = 0; i < adapter->num_q_vectors; i++) - igb_assign_vector(adapter->q_vector[i], vector++); - - wrfl(); -} - -/** - * igb_request_msix - Initialize MSI-X interrupts - * - * igb_request_msix allocates MSI-X vectors and requests interrupts from the - * kernel. - **/ -static int igb_request_msix(struct igb_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct e1000_hw *hw = &adapter->hw; - int i, err = 0, vector = 0; - - err = request_irq(adapter->msix_entries[vector].vector, - igb_msix_other, 0, netdev->name, adapter); - if (err) - goto out; - vector++; - - for (i = 0; i < adapter->num_q_vectors; i++) { - struct igb_q_vector *q_vector = adapter->q_vector[i]; - - q_vector->itr_register = hw->hw_addr + E1000_EITR(vector); - - if (q_vector->rx_ring && q_vector->tx_ring) - sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, - q_vector->rx_ring->queue_index); - else if (q_vector->tx_ring) - sprintf(q_vector->name, "%s-tx-%u", netdev->name, - q_vector->tx_ring->queue_index); - else if (q_vector->rx_ring) - sprintf(q_vector->name, "%s-rx-%u", netdev->name, - q_vector->rx_ring->queue_index); - else - sprintf(q_vector->name, "%s-unused", netdev->name); - - err = request_irq(adapter->msix_entries[vector].vector, - igb_msix_ring, 0, q_vector->name, - q_vector); - if (err) - goto out; - vector++; - } - - igb_configure_msix(adapter); - return 0; -out: - return err; -} - -static void igb_reset_interrupt_capability(struct igb_adapter *adapter) -{ - if (adapter->msix_entries) { - pci_disable_msix(adapter->pdev); - kfree(adapter->msix_entries); - adapter->msix_entries = NULL; - } else if (adapter->flags & IGB_FLAG_HAS_MSI) { - pci_disable_msi(adapter->pdev); - } -} - -/** - * igb_free_q_vectors - Free memory allocated for interrupt vectors - * @adapter: board private structure to initialize - * - * This function frees the memory allocated to the q_vectors. In addition if - * NAPI is enabled it will delete any references to the NAPI struct prior - * to freeing the q_vector. - **/ -static void igb_free_q_vectors(struct igb_adapter *adapter) -{ - int v_idx; - - for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { - struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; - adapter->q_vector[v_idx] = NULL; - if (!q_vector) - continue; - netif_napi_del(&q_vector->napi); - kfree(q_vector); - } - adapter->num_q_vectors = 0; -} - -/** - * igb_clear_interrupt_scheme - reset the device to a state of no interrupts - * - * This function resets the device so that it has 0 rx queues, tx queues, and - * MSI-X interrupts allocated. - */ -static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) -{ - igb_free_queues(adapter); - igb_free_q_vectors(adapter); - igb_reset_interrupt_capability(adapter); -} - -/** - * igb_set_interrupt_capability - set MSI or MSI-X if supported - * - * Attempt to configure interrupts using the best available - * capabilities of the hardware and kernel. - **/ -static int igb_set_interrupt_capability(struct igb_adapter *adapter) -{ - int err; - int numvecs, i; - - /* Number of supported queues. */ - adapter->num_rx_queues = adapter->rss_queues; - if (adapter->vfs_allocated_count) - adapter->num_tx_queues = 1; - else - adapter->num_tx_queues = adapter->rss_queues; - - /* start with one vector for every rx queue */ - numvecs = adapter->num_rx_queues; - - /* if tx handler is separate add 1 for every tx queue */ - if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) - numvecs += adapter->num_tx_queues; - - /* store the number of vectors reserved for queues */ - adapter->num_q_vectors = numvecs; - - /* add 1 vector for link status interrupts */ - numvecs++; - adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), - GFP_KERNEL); - if (!adapter->msix_entries) - goto msi_only; - - for (i = 0; i < numvecs; i++) - adapter->msix_entries[i].entry = i; - - err = pci_enable_msix(adapter->pdev, - adapter->msix_entries, - numvecs); - if (err == 0) - goto out; - - igb_reset_interrupt_capability(adapter); - - /* If we can't do MSI-X, try MSI */ -msi_only: -#ifdef CONFIG_PCI_IOV - /* disable SR-IOV for non MSI-X configurations */ - if (adapter->vf_data) { - struct e1000_hw *hw = &adapter->hw; - /* disable iov and allow time for transactions to clear */ - pci_disable_sriov(adapter->pdev); - msleep(500); - - kfree(adapter->vf_data); - adapter->vf_data = NULL; - wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); - wrfl(); - msleep(100); - dev_info(&adapter->pdev->dev, "IOV Disabled\n"); - } -#endif - adapter->vfs_allocated_count = 0; - adapter->rss_queues = 1; - adapter->flags |= IGB_FLAG_QUEUE_PAIRS; - adapter->num_rx_queues = 1; - adapter->num_tx_queues = 1; - adapter->num_q_vectors = 1; - if (!pci_enable_msi(adapter->pdev)) - adapter->flags |= IGB_FLAG_HAS_MSI; -out: - /* Notify the stack of the (possibly) reduced queue counts. */ - netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); - return netif_set_real_num_rx_queues(adapter->netdev, - adapter->num_rx_queues); -} - -/** - * igb_alloc_q_vectors - Allocate memory for interrupt vectors - * @adapter: board private structure to initialize - * - * We allocate one q_vector per queue interrupt. If allocation fails we - * return -ENOMEM. - **/ -static int igb_alloc_q_vectors(struct igb_adapter *adapter) -{ - struct igb_q_vector *q_vector; - struct e1000_hw *hw = &adapter->hw; - int v_idx; - - for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { - q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL); - if (!q_vector) - goto err_out; - q_vector->adapter = adapter; - q_vector->itr_register = hw->hw_addr + E1000_EITR(0); - q_vector->itr_val = IGB_START_ITR; - netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64); - adapter->q_vector[v_idx] = q_vector; - } - return 0; - -err_out: - igb_free_q_vectors(adapter); - return -ENOMEM; -} - -static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter, - int ring_idx, int v_idx) -{ - struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; - - q_vector->rx_ring = adapter->rx_ring[ring_idx]; - q_vector->rx_ring->q_vector = q_vector; - q_vector->itr_val = adapter->rx_itr_setting; - if (q_vector->itr_val && q_vector->itr_val <= 3) - q_vector->itr_val = IGB_START_ITR; -} - -static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter, - int ring_idx, int v_idx) -{ - struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; - - q_vector->tx_ring = adapter->tx_ring[ring_idx]; - q_vector->tx_ring->q_vector = q_vector; - q_vector->itr_val = adapter->tx_itr_setting; - if (q_vector->itr_val && q_vector->itr_val <= 3) - q_vector->itr_val = IGB_START_ITR; -} - -/** - * igb_map_ring_to_vector - maps allocated queues to vectors - * - * This function maps the recently allocated queues to vectors. - **/ -static int igb_map_ring_to_vector(struct igb_adapter *adapter) -{ - int i; - int v_idx = 0; - - if ((adapter->num_q_vectors < adapter->num_rx_queues) || - (adapter->num_q_vectors < adapter->num_tx_queues)) - return -ENOMEM; - - if (adapter->num_q_vectors >= - (adapter->num_rx_queues + adapter->num_tx_queues)) { - for (i = 0; i < adapter->num_rx_queues; i++) - igb_map_rx_ring_to_vector(adapter, i, v_idx++); - for (i = 0; i < adapter->num_tx_queues; i++) - igb_map_tx_ring_to_vector(adapter, i, v_idx++); - } else { - for (i = 0; i < adapter->num_rx_queues; i++) { - if (i < adapter->num_tx_queues) - igb_map_tx_ring_to_vector(adapter, i, v_idx); - igb_map_rx_ring_to_vector(adapter, i, v_idx++); - } - for (; i < adapter->num_tx_queues; i++) - igb_map_tx_ring_to_vector(adapter, i, v_idx++); - } - return 0; -} - -/** - * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors - * - * This function initializes the interrupts and allocates all of the queues. - **/ -static int igb_init_interrupt_scheme(struct igb_adapter *adapter) -{ - struct pci_dev *pdev = adapter->pdev; - int err; - - err = igb_set_interrupt_capability(adapter); - if (err) - return err; - - err = igb_alloc_q_vectors(adapter); - if (err) { - dev_err(&pdev->dev, "Unable to allocate memory for vectors\n"); - goto err_alloc_q_vectors; - } - - err = igb_alloc_queues(adapter); - if (err) { - dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); - goto err_alloc_queues; - } - - err = igb_map_ring_to_vector(adapter); - if (err) { - dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n"); - goto err_map_queues; - } - - - return 0; -err_map_queues: - igb_free_queues(adapter); -err_alloc_queues: - igb_free_q_vectors(adapter); -err_alloc_q_vectors: - igb_reset_interrupt_capability(adapter); - return err; -} - -/** - * igb_request_irq - initialize interrupts - * - * Attempts to configure interrupts using the best available - * capabilities of the hardware and kernel. - **/ -static int igb_request_irq(struct igb_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - int err = 0; - - if (adapter->msix_entries) { - err = igb_request_msix(adapter); - if (!err) - goto request_done; - /* fall back to MSI */ - igb_clear_interrupt_scheme(adapter); - if (!pci_enable_msi(adapter->pdev)) - adapter->flags |= IGB_FLAG_HAS_MSI; - igb_free_all_tx_resources(adapter); - igb_free_all_rx_resources(adapter); - adapter->num_tx_queues = 1; - adapter->num_rx_queues = 1; - adapter->num_q_vectors = 1; - err = igb_alloc_q_vectors(adapter); - if (err) { - dev_err(&pdev->dev, - "Unable to allocate memory for vectors\n"); - goto request_done; - } - err = igb_alloc_queues(adapter); - if (err) { - dev_err(&pdev->dev, - "Unable to allocate memory for queues\n"); - igb_free_q_vectors(adapter); - goto request_done; - } - igb_setup_all_tx_resources(adapter); - igb_setup_all_rx_resources(adapter); - } else { - igb_assign_vector(adapter->q_vector[0], 0); - } - - if (adapter->flags & IGB_FLAG_HAS_MSI) { - err = request_irq(adapter->pdev->irq, igb_intr_msi, 0, - netdev->name, adapter); - if (!err) - goto request_done; - - /* fall back to legacy interrupts */ - igb_reset_interrupt_capability(adapter); - adapter->flags &= ~IGB_FLAG_HAS_MSI; - } - - err = request_irq(adapter->pdev->irq, igb_intr, IRQF_SHARED, - netdev->name, adapter); - - if (err) - dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n", - err); - -request_done: - return err; -} - -static void igb_free_irq(struct igb_adapter *adapter) -{ - if (adapter->msix_entries) { - int vector = 0, i; - - free_irq(adapter->msix_entries[vector++].vector, adapter); - - for (i = 0; i < adapter->num_q_vectors; i++) { - struct igb_q_vector *q_vector = adapter->q_vector[i]; - free_irq(adapter->msix_entries[vector++].vector, - q_vector); - } - } else { - free_irq(adapter->pdev->irq, adapter); - } -} - -/** - * igb_irq_disable - Mask off interrupt generation on the NIC - * @adapter: board private structure - **/ -static void igb_irq_disable(struct igb_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - - /* - * we need to be careful when disabling interrupts. The VFs are also - * mapped into these registers and so clearing the bits can cause - * issues on the VF drivers so we only need to clear what we set - */ - if (adapter->msix_entries) { - u32 regval = rd32(E1000_EIAM); - wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); - wr32(E1000_EIMC, adapter->eims_enable_mask); - regval = rd32(E1000_EIAC); - wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask); - } - - wr32(E1000_IAM, 0); - wr32(E1000_IMC, ~0); - wrfl(); - if (adapter->msix_entries) { - int i; - for (i = 0; i < adapter->num_q_vectors; i++) - synchronize_irq(adapter->msix_entries[i].vector); - } else { - synchronize_irq(adapter->pdev->irq); - } -} - -/** - * igb_irq_enable - Enable default interrupt generation settings - * @adapter: board private structure - **/ -static void igb_irq_enable(struct igb_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - - if (adapter->msix_entries) { - u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC; - u32 regval = rd32(E1000_EIAC); - wr32(E1000_EIAC, regval | adapter->eims_enable_mask); - regval = rd32(E1000_EIAM); - wr32(E1000_EIAM, regval | adapter->eims_enable_mask); - wr32(E1000_EIMS, adapter->eims_enable_mask); - if (adapter->vfs_allocated_count) { - wr32(E1000_MBVFIMR, 0xFF); - ims |= E1000_IMS_VMMB; - } - if (adapter->hw.mac.type == e1000_82580) - ims |= E1000_IMS_DRSTA; - - wr32(E1000_IMS, ims); - } else { - wr32(E1000_IMS, IMS_ENABLE_MASK | - E1000_IMS_DRSTA); - wr32(E1000_IAM, IMS_ENABLE_MASK | - E1000_IMS_DRSTA); - } -} - -static void igb_update_mng_vlan(struct igb_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u16 vid = adapter->hw.mng_cookie.vlan_id; - u16 old_vid = adapter->mng_vlan_id; - - if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { - /* add VID to filter table */ - igb_vfta_set(hw, vid, true); - adapter->mng_vlan_id = vid; - } else { - adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; - } - - if ((old_vid != (u16)IGB_MNG_VLAN_NONE) && - (vid != old_vid) && - !test_bit(old_vid, adapter->active_vlans)) { - /* remove VID from filter table */ - igb_vfta_set(hw, old_vid, false); - } -} - -/** - * igb_release_hw_control - release control of the h/w to f/w - * @adapter: address of board private structure - * - * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit. - * For ASF and Pass Through versions of f/w this means that the - * driver is no longer loaded. - * - **/ -static void igb_release_hw_control(struct igb_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 ctrl_ext; - - /* Let firmware take over control of h/w */ - ctrl_ext = rd32(E1000_CTRL_EXT); - wr32(E1000_CTRL_EXT, - ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); -} - -/** - * igb_get_hw_control - get control of the h/w from f/w - * @adapter: address of board private structure - * - * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit. - * For ASF and Pass Through versions of f/w this means that - * the driver is loaded. - * - **/ -static void igb_get_hw_control(struct igb_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 ctrl_ext; - - /* Let firmware know the driver has taken over */ - ctrl_ext = rd32(E1000_CTRL_EXT); - wr32(E1000_CTRL_EXT, - ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); -} - -/** - * igb_configure - configure the hardware for RX and TX - * @adapter: private board structure - **/ -static void igb_configure(struct igb_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - int i; - - igb_get_hw_control(adapter); - igb_set_rx_mode(netdev); - - igb_restore_vlan(adapter); - - igb_setup_tctl(adapter); - igb_setup_mrqc(adapter); - igb_setup_rctl(adapter); - - igb_configure_tx(adapter); - igb_configure_rx(adapter); - - igb_rx_fifo_flush_82575(&adapter->hw); - - /* call igb_desc_unused which always leaves - * at least 1 descriptor unused to make sure - * next_to_use != next_to_clean */ - for (i = 0; i < adapter->num_rx_queues; i++) { - struct igb_ring *ring = adapter->rx_ring[i]; - igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring)); - } -} - -/** - * igb_power_up_link - Power up the phy/serdes link - * @adapter: address of board private structure - **/ -void igb_power_up_link(struct igb_adapter *adapter) -{ - if (adapter->hw.phy.media_type == e1000_media_type_copper) - igb_power_up_phy_copper(&adapter->hw); - else - igb_power_up_serdes_link_82575(&adapter->hw); -} - -/** - * igb_power_down_link - Power down the phy/serdes link - * @adapter: address of board private structure - */ -static void igb_power_down_link(struct igb_adapter *adapter) -{ - if (adapter->hw.phy.media_type == e1000_media_type_copper) - igb_power_down_phy_copper_82575(&adapter->hw); - else - igb_shutdown_serdes_link_82575(&adapter->hw); -} - -/** - * igb_up - Open the interface and prepare it to handle traffic - * @adapter: board private structure - **/ -int igb_up(struct igb_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - int i; - - /* hardware has been reset, we need to reload some things */ - igb_configure(adapter); - - clear_bit(__IGB_DOWN, &adapter->state); - - for (i = 0; i < adapter->num_q_vectors; i++) { - struct igb_q_vector *q_vector = adapter->q_vector[i]; - napi_enable(&q_vector->napi); - } - if (adapter->msix_entries) - igb_configure_msix(adapter); - else - igb_assign_vector(adapter->q_vector[0], 0); - - /* Clear any pending interrupts. */ - rd32(E1000_ICR); - igb_irq_enable(adapter); - - /* notify VFs that reset has been completed */ - if (adapter->vfs_allocated_count) { - u32 reg_data = rd32(E1000_CTRL_EXT); - reg_data |= E1000_CTRL_EXT_PFRSTD; - wr32(E1000_CTRL_EXT, reg_data); - } - - netif_tx_start_all_queues(adapter->netdev); - - /* start the watchdog. */ - hw->mac.get_link_status = 1; - schedule_work(&adapter->watchdog_task); - - return 0; -} - -void igb_down(struct igb_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct e1000_hw *hw = &adapter->hw; - u32 tctl, rctl; - int i; - - /* signal that we're down so the interrupt handler does not - * reschedule our watchdog timer */ - set_bit(__IGB_DOWN, &adapter->state); - - /* disable receives in the hardware */ - rctl = rd32(E1000_RCTL); - wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); - /* flush and sleep below */ - - netif_tx_stop_all_queues(netdev); - - /* disable transmits in the hardware */ - tctl = rd32(E1000_TCTL); - tctl &= ~E1000_TCTL_EN; - wr32(E1000_TCTL, tctl); - /* flush both disables and wait for them to finish */ - wrfl(); - msleep(10); - - for (i = 0; i < adapter->num_q_vectors; i++) { - struct igb_q_vector *q_vector = adapter->q_vector[i]; - napi_disable(&q_vector->napi); - } - - igb_irq_disable(adapter); - - del_timer_sync(&adapter->watchdog_timer); - del_timer_sync(&adapter->phy_info_timer); - - netif_carrier_off(netdev); - - /* record the stats before reset*/ - spin_lock(&adapter->stats64_lock); - igb_update_stats(adapter, &adapter->stats64); - spin_unlock(&adapter->stats64_lock); - - adapter->link_speed = 0; - adapter->link_duplex = 0; - - if (!pci_channel_offline(adapter->pdev)) - igb_reset(adapter); - igb_clean_all_tx_rings(adapter); - igb_clean_all_rx_rings(adapter); -#ifdef CONFIG_IGB_DCA - - /* since we reset the hardware DCA settings were cleared */ - igb_setup_dca(adapter); -#endif -} - -void igb_reinit_locked(struct igb_adapter *adapter) -{ - WARN_ON(in_interrupt()); - while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) - msleep(1); - igb_down(adapter); - igb_up(adapter); - clear_bit(__IGB_RESETTING, &adapter->state); -} - -void igb_reset(struct igb_adapter *adapter) -{ - struct pci_dev *pdev = adapter->pdev; - struct e1000_hw *hw = &adapter->hw; - struct e1000_mac_info *mac = &hw->mac; - struct e1000_fc_info *fc = &hw->fc; - u32 pba = 0, tx_space, min_tx_space, min_rx_space; - u16 hwm; - - /* Repartition Pba for greater than 9k mtu - * To take effect CTRL.RST is required. - */ - switch (mac->type) { - case e1000_i350: - case e1000_82580: - pba = rd32(E1000_RXPBS); - pba = igb_rxpbs_adjust_82580(pba); - break; - case e1000_82576: - pba = rd32(E1000_RXPBS); - pba &= E1000_RXPBS_SIZE_MASK_82576; - break; - case e1000_82575: - default: - pba = E1000_PBA_34K; - break; - } - - if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) && - (mac->type < e1000_82576)) { - /* adjust PBA for jumbo frames */ - wr32(E1000_PBA, pba); - - /* To maintain wire speed transmits, the Tx FIFO should be - * large enough to accommodate two full transmit packets, - * rounded up to the next 1KB and expressed in KB. Likewise, - * the Rx FIFO should be large enough to accommodate at least - * one full receive packet and is similarly rounded up and - * expressed in KB. */ - pba = rd32(E1000_PBA); - /* upper 16 bits has Tx packet buffer allocation size in KB */ - tx_space = pba >> 16; - /* lower 16 bits has Rx packet buffer allocation size in KB */ - pba &= 0xffff; - /* the tx fifo also stores 16 bytes of information about the tx - * but don't include ethernet FCS because hardware appends it */ - min_tx_space = (adapter->max_frame_size + - sizeof(union e1000_adv_tx_desc) - - ETH_FCS_LEN) * 2; - min_tx_space = ALIGN(min_tx_space, 1024); - min_tx_space >>= 10; - /* software strips receive CRC, so leave room for it */ - min_rx_space = adapter->max_frame_size; - min_rx_space = ALIGN(min_rx_space, 1024); - min_rx_space >>= 10; - - /* If current Tx allocation is less than the min Tx FIFO size, - * and the min Tx FIFO size is less than the current Rx FIFO - * allocation, take space away from current Rx allocation */ - if (tx_space < min_tx_space && - ((min_tx_space - tx_space) < pba)) { - pba = pba - (min_tx_space - tx_space); - - /* if short on rx space, rx wins and must trump tx - * adjustment */ - if (pba < min_rx_space) - pba = min_rx_space; - } - wr32(E1000_PBA, pba); - } - - /* flow control settings */ - /* The high water mark must be low enough to fit one full frame - * (or the size used for early receive) above it in the Rx FIFO. - * Set it to the lower of: - * - 90% of the Rx FIFO size, or - * - the full Rx FIFO size minus one full frame */ - hwm = min(((pba << 10) * 9 / 10), - ((pba << 10) - 2 * adapter->max_frame_size)); - - fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ - fc->low_water = fc->high_water - 16; - fc->pause_time = 0xFFFF; - fc->send_xon = 1; - fc->current_mode = fc->requested_mode; - - /* disable receive for all VFs and wait one second */ - if (adapter->vfs_allocated_count) { - int i; - for (i = 0 ; i < adapter->vfs_allocated_count; i++) - adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC; - - /* ping all the active vfs to let them know we are going down */ - igb_ping_all_vfs(adapter); - - /* disable transmits and receives */ - wr32(E1000_VFRE, 0); - wr32(E1000_VFTE, 0); - } - - /* Allow time for pending master requests to run */ - hw->mac.ops.reset_hw(hw); - wr32(E1000_WUC, 0); - - if (hw->mac.ops.init_hw(hw)) - dev_err(&pdev->dev, "Hardware Error\n"); - if (hw->mac.type > e1000_82580) { - if (adapter->flags & IGB_FLAG_DMAC) { - u32 reg; - - /* - * DMA Coalescing high water mark needs to be higher - * than * the * Rx threshold. The Rx threshold is - * currently * pba - 6, so we * should use a high water - * mark of pba * - 4. */ - hwm = (pba - 4) << 10; - - reg = (((pba-6) << E1000_DMACR_DMACTHR_SHIFT) - & E1000_DMACR_DMACTHR_MASK); - - /* transition to L0x or L1 if available..*/ - reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); - - /* watchdog timer= +-1000 usec in 32usec intervals */ - reg |= (1000 >> 5); - wr32(E1000_DMACR, reg); - - /* no lower threshold to disable coalescing(smart fifb) - * -UTRESH=0*/ - wr32(E1000_DMCRTRH, 0); - - /* set hwm to PBA - 2 * max frame size */ - wr32(E1000_FCRTC, hwm); - - /* - * This sets the time to wait before requesting tran- - * sition to * low power state to number of usecs needed - * to receive 1 512 * byte frame at gigabit line rate - */ - reg = rd32(E1000_DMCTLX); - reg |= IGB_DMCTLX_DCFLUSH_DIS; - - /* Delay 255 usec before entering Lx state. */ - reg |= 0xFF; - wr32(E1000_DMCTLX, reg); - - /* free space in Tx packet buffer to wake from DMAC */ - wr32(E1000_DMCTXTH, - (IGB_MIN_TXPBSIZE - - (IGB_TX_BUF_4096 + adapter->max_frame_size)) - >> 6); - - /* make low power state decision controlled by DMAC */ - reg = rd32(E1000_PCIEMISC); - reg |= E1000_PCIEMISC_LX_DECISION; - wr32(E1000_PCIEMISC, reg); - } /* end if IGB_FLAG_DMAC set */ - } - if (hw->mac.type == e1000_82580) { - u32 reg = rd32(E1000_PCIEMISC); - wr32(E1000_PCIEMISC, - reg & ~E1000_PCIEMISC_LX_DECISION); - } - if (!netif_running(adapter->netdev)) - igb_power_down_link(adapter); - - igb_update_mng_vlan(adapter); - - /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ - wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); - - igb_get_phy_info(hw); -} - -static u32 igb_fix_features(struct net_device *netdev, u32 features) -{ - /* - * Since there is no support for separate rx/tx vlan accel - * enable/disable make sure tx flag is always in same state as rx. - */ - if (features & NETIF_F_HW_VLAN_RX) - features |= NETIF_F_HW_VLAN_TX; - else - features &= ~NETIF_F_HW_VLAN_TX; - - return features; -} - -static int igb_set_features(struct net_device *netdev, u32 features) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - int i; - u32 changed = netdev->features ^ features; - - for (i = 0; i < adapter->num_rx_queues; i++) { - if (features & NETIF_F_RXCSUM) - adapter->rx_ring[i]->flags |= IGB_RING_FLAG_RX_CSUM; - else - adapter->rx_ring[i]->flags &= ~IGB_RING_FLAG_RX_CSUM; - } - - if (changed & NETIF_F_HW_VLAN_RX) - igb_vlan_mode(netdev, features); - - return 0; -} - -static const struct net_device_ops igb_netdev_ops = { - .ndo_open = igb_open, - .ndo_stop = igb_close, - .ndo_start_xmit = igb_xmit_frame_adv, - .ndo_get_stats64 = igb_get_stats64, - .ndo_set_rx_mode = igb_set_rx_mode, - .ndo_set_multicast_list = igb_set_rx_mode, - .ndo_set_mac_address = igb_set_mac, - .ndo_change_mtu = igb_change_mtu, - .ndo_do_ioctl = igb_ioctl, - .ndo_tx_timeout = igb_tx_timeout, - .ndo_validate_addr = eth_validate_addr, - .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid, - .ndo_set_vf_mac = igb_ndo_set_vf_mac, - .ndo_set_vf_vlan = igb_ndo_set_vf_vlan, - .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw, - .ndo_get_vf_config = igb_ndo_get_vf_config, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = igb_netpoll, -#endif - .ndo_fix_features = igb_fix_features, - .ndo_set_features = igb_set_features, -}; - -/** - * igb_probe - Device Initialization Routine - * @pdev: PCI device information struct - * @ent: entry in igb_pci_tbl - * - * Returns 0 on success, negative on failure - * - * igb_probe initializes an adapter identified by a pci_dev structure. - * The OS initialization, configuring of the adapter private structure, - * and a hardware reset occur. - **/ -static int __devinit igb_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - struct net_device *netdev; - struct igb_adapter *adapter; - struct e1000_hw *hw; - u16 eeprom_data = 0; - s32 ret_val; - static int global_quad_port_a; /* global quad port a indication */ - const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; - unsigned long mmio_start, mmio_len; - int err, pci_using_dac; - u16 eeprom_apme_mask = IGB_EEPROM_APME; - u8 part_str[E1000_PBANUM_LENGTH]; - - /* Catch broken hardware that put the wrong VF device ID in - * the PCIe SR-IOV capability. - */ - if (pdev->is_virtfn) { - WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", - pci_name(pdev), pdev->vendor, pdev->device); - return -EINVAL; - } - - err = pci_enable_device_mem(pdev); - if (err) - return err; - - pci_using_dac = 0; - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); - if (!err) { - err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); - if (!err) - pci_using_dac = 1; - } else { - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); - if (err) { - err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, "No usable DMA " - "configuration, aborting\n"); - goto err_dma; - } - } - } - - err = pci_request_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM), - igb_driver_name); - if (err) - goto err_pci_reg; - - pci_enable_pcie_error_reporting(pdev); - - pci_set_master(pdev); - pci_save_state(pdev); - - err = -ENOMEM; - netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), - IGB_ABS_MAX_TX_QUEUES); - if (!netdev) - goto err_alloc_etherdev; - - SET_NETDEV_DEV(netdev, &pdev->dev); - - pci_set_drvdata(pdev, netdev); - adapter = netdev_priv(netdev); - adapter->netdev = netdev; - adapter->pdev = pdev; - hw = &adapter->hw; - hw->back = adapter; - adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE; - - mmio_start = pci_resource_start(pdev, 0); - mmio_len = pci_resource_len(pdev, 0); - - err = -EIO; - hw->hw_addr = ioremap(mmio_start, mmio_len); - if (!hw->hw_addr) - goto err_ioremap; - - netdev->netdev_ops = &igb_netdev_ops; - igb_set_ethtool_ops(netdev); - netdev->watchdog_timeo = 5 * HZ; - - strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); - - netdev->mem_start = mmio_start; - netdev->mem_end = mmio_start + mmio_len; - - /* PCI config space info */ - hw->vendor_id = pdev->vendor; - hw->device_id = pdev->device; - hw->revision_id = pdev->revision; - hw->subsystem_vendor_id = pdev->subsystem_vendor; - hw->subsystem_device_id = pdev->subsystem_device; - - /* Copy the default MAC, PHY and NVM function pointers */ - memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); - memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); - memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); - /* Initialize skew-specific constants */ - err = ei->get_invariants(hw); - if (err) - goto err_sw_init; - - /* setup the private structure */ - err = igb_sw_init(adapter); - if (err) - goto err_sw_init; - - igb_get_bus_info_pcie(hw); - - hw->phy.autoneg_wait_to_complete = false; - - /* Copper options */ - if (hw->phy.media_type == e1000_media_type_copper) { - hw->phy.mdix = AUTO_ALL_MODES; - hw->phy.disable_polarity_correction = false; - hw->phy.ms_type = e1000_ms_hw_default; - } - - if (igb_check_reset_block(hw)) - dev_info(&pdev->dev, - "PHY reset is blocked due to SOL/IDER session.\n"); - - netdev->hw_features = NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_RXCSUM | - NETIF_F_HW_VLAN_RX; - - netdev->features = netdev->hw_features | - NETIF_F_HW_VLAN_TX | - NETIF_F_HW_VLAN_FILTER; - - netdev->vlan_features |= NETIF_F_TSO; - netdev->vlan_features |= NETIF_F_TSO6; - netdev->vlan_features |= NETIF_F_IP_CSUM; - netdev->vlan_features |= NETIF_F_IPV6_CSUM; - netdev->vlan_features |= NETIF_F_SG; - - if (pci_using_dac) { - netdev->features |= NETIF_F_HIGHDMA; - netdev->vlan_features |= NETIF_F_HIGHDMA; - } - - if (hw->mac.type >= e1000_82576) { - netdev->hw_features |= NETIF_F_SCTP_CSUM; - netdev->features |= NETIF_F_SCTP_CSUM; - } - - adapter->en_mng_pt = igb_enable_mng_pass_thru(hw); - - /* before reading the NVM, reset the controller to put the device in a - * known good starting state */ - hw->mac.ops.reset_hw(hw); - - /* make sure the NVM is good */ - if (hw->nvm.ops.validate(hw) < 0) { - dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); - err = -EIO; - goto err_eeprom; - } - - /* copy the MAC address out of the NVM */ - if (hw->mac.ops.read_mac_addr(hw)) - dev_err(&pdev->dev, "NVM Read Error\n"); - - memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); - memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len); - - if (!is_valid_ether_addr(netdev->perm_addr)) { - dev_err(&pdev->dev, "Invalid MAC Address\n"); - err = -EIO; - goto err_eeprom; - } - - setup_timer(&adapter->watchdog_timer, igb_watchdog, - (unsigned long) adapter); - setup_timer(&adapter->phy_info_timer, igb_update_phy_info, - (unsigned long) adapter); - - INIT_WORK(&adapter->reset_task, igb_reset_task); - INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); - - /* Initialize link properties that are user-changeable */ - adapter->fc_autoneg = true; - hw->mac.autoneg = true; - hw->phy.autoneg_advertised = 0x2f; - - hw->fc.requested_mode = e1000_fc_default; - hw->fc.current_mode = e1000_fc_default; - - igb_validate_mdi_setting(hw); - - /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM, - * enable the ACPI Magic Packet filter - */ - - if (hw->bus.func == 0) - hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); - else if (hw->mac.type >= e1000_82580) - hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + - NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, - &eeprom_data); - else if (hw->bus.func == 1) - hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); - - if (eeprom_data & eeprom_apme_mask) - adapter->eeprom_wol |= E1000_WUFC_MAG; - - /* now that we have the eeprom settings, apply the special cases where - * the eeprom may be wrong or the board simply won't support wake on - * lan on a particular port */ - switch (pdev->device) { - case E1000_DEV_ID_82575GB_QUAD_COPPER: - adapter->eeprom_wol = 0; - break; - case E1000_DEV_ID_82575EB_FIBER_SERDES: - case E1000_DEV_ID_82576_FIBER: - case E1000_DEV_ID_82576_SERDES: - /* Wake events only supported on port A for dual fiber - * regardless of eeprom setting */ - if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) - adapter->eeprom_wol = 0; - break; - case E1000_DEV_ID_82576_QUAD_COPPER: - case E1000_DEV_ID_82576_QUAD_COPPER_ET2: - /* if quad port adapter, disable WoL on all but port A */ - if (global_quad_port_a != 0) - adapter->eeprom_wol = 0; - else - adapter->flags |= IGB_FLAG_QUAD_PORT_A; - /* Reset for multiple quad port adapters */ - if (++global_quad_port_a == 4) - global_quad_port_a = 0; - break; - } - - /* initialize the wol settings based on the eeprom settings */ - adapter->wol = adapter->eeprom_wol; - device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); - - /* reset the hardware with the new settings */ - igb_reset(adapter); - - /* let the f/w know that the h/w is now under the control of the - * driver. */ - igb_get_hw_control(adapter); - - strcpy(netdev->name, "eth%d"); - err = register_netdev(netdev); - if (err) - goto err_register; - - igb_vlan_mode(netdev, netdev->features); - - /* carrier off reporting is important to ethtool even BEFORE open */ - netif_carrier_off(netdev); - -#ifdef CONFIG_IGB_DCA - if (dca_add_requester(&pdev->dev) == 0) { - adapter->flags |= IGB_FLAG_DCA_ENABLED; - dev_info(&pdev->dev, "DCA enabled\n"); - igb_setup_dca(adapter); - } - -#endif - /* do hw tstamp init after resetting */ - igb_init_hw_timer(adapter); - - dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); - /* print bus type/speed/width info */ - dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", - netdev->name, - ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : - (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" : - "unknown"), - ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : - (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" : - (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" : - "unknown"), - netdev->dev_addr); - - ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH); - if (ret_val) - strcpy(part_str, "Unknown"); - dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str); - dev_info(&pdev->dev, - "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n", - adapter->msix_entries ? "MSI-X" : - (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", - adapter->num_rx_queues, adapter->num_tx_queues); - switch (hw->mac.type) { - case e1000_i350: - igb_set_eee_i350(hw); - break; - default: - break; - } - return 0; - -err_register: - igb_release_hw_control(adapter); -err_eeprom: - if (!igb_check_reset_block(hw)) - igb_reset_phy(hw); - - if (hw->flash_address) - iounmap(hw->flash_address); -err_sw_init: - igb_clear_interrupt_scheme(adapter); - iounmap(hw->hw_addr); -err_ioremap: - free_netdev(netdev); -err_alloc_etherdev: - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); -err_pci_reg: -err_dma: - pci_disable_device(pdev); - return err; -} - -/** - * igb_remove - Device Removal Routine - * @pdev: PCI device information struct - * - * igb_remove is called by the PCI subsystem to alert the driver - * that it should release a PCI device. The could be caused by a - * Hot-Plug event, or because the driver is going to be removed from - * memory. - **/ -static void __devexit igb_remove(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - - /* - * The watchdog timer may be rescheduled, so explicitly - * disable watchdog from being rescheduled. - */ - set_bit(__IGB_DOWN, &adapter->state); - del_timer_sync(&adapter->watchdog_timer); - del_timer_sync(&adapter->phy_info_timer); - - cancel_work_sync(&adapter->reset_task); - cancel_work_sync(&adapter->watchdog_task); - -#ifdef CONFIG_IGB_DCA - if (adapter->flags & IGB_FLAG_DCA_ENABLED) { - dev_info(&pdev->dev, "DCA disabled\n"); - dca_remove_requester(&pdev->dev); - adapter->flags &= ~IGB_FLAG_DCA_ENABLED; - wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); - } -#endif - - /* Release control of h/w to f/w. If f/w is AMT enabled, this - * would have already happened in close and is redundant. */ - igb_release_hw_control(adapter); - - unregister_netdev(netdev); - - igb_clear_interrupt_scheme(adapter); - -#ifdef CONFIG_PCI_IOV - /* reclaim resources allocated to VFs */ - if (adapter->vf_data) { - /* disable iov and allow time for transactions to clear */ - pci_disable_sriov(pdev); - msleep(500); - - kfree(adapter->vf_data); - adapter->vf_data = NULL; - wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); - wrfl(); - msleep(100); - dev_info(&pdev->dev, "IOV Disabled\n"); - } -#endif - - iounmap(hw->hw_addr); - if (hw->flash_address) - iounmap(hw->flash_address); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); - - free_netdev(netdev); - - pci_disable_pcie_error_reporting(pdev); - - pci_disable_device(pdev); -} - -/** - * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space - * @adapter: board private structure to initialize - * - * This function initializes the vf specific data storage and then attempts to - * allocate the VFs. The reason for ordering it this way is because it is much - * mor expensive time wise to disable SR-IOV than it is to allocate and free - * the memory for the VFs. - **/ -static void __devinit igb_probe_vfs(struct igb_adapter * adapter) -{ -#ifdef CONFIG_PCI_IOV - struct pci_dev *pdev = adapter->pdev; - - if (adapter->vfs_allocated_count) { - adapter->vf_data = kcalloc(adapter->vfs_allocated_count, - sizeof(struct vf_data_storage), - GFP_KERNEL); - /* if allocation failed then we do not support SR-IOV */ - if (!adapter->vf_data) { - adapter->vfs_allocated_count = 0; - dev_err(&pdev->dev, "Unable to allocate memory for VF " - "Data Storage\n"); - } - } - - if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) { - kfree(adapter->vf_data); - adapter->vf_data = NULL; -#endif /* CONFIG_PCI_IOV */ - adapter->vfs_allocated_count = 0; -#ifdef CONFIG_PCI_IOV - } else { - unsigned char mac_addr[ETH_ALEN]; - int i; - dev_info(&pdev->dev, "%d vfs allocated\n", - adapter->vfs_allocated_count); - for (i = 0; i < adapter->vfs_allocated_count; i++) { - random_ether_addr(mac_addr); - igb_set_vf_mac(adapter, i, mac_addr); - } - /* DMA Coalescing is not supported in IOV mode. */ - if (adapter->flags & IGB_FLAG_DMAC) - adapter->flags &= ~IGB_FLAG_DMAC; - } -#endif /* CONFIG_PCI_IOV */ -} - - -/** - * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp - * @adapter: board private structure to initialize - * - * igb_init_hw_timer initializes the function pointer and values for the hw - * timer found in hardware. - **/ -static void igb_init_hw_timer(struct igb_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - - switch (hw->mac.type) { - case e1000_i350: - case e1000_82580: - memset(&adapter->cycles, 0, sizeof(adapter->cycles)); - adapter->cycles.read = igb_read_clock; - adapter->cycles.mask = CLOCKSOURCE_MASK(64); - adapter->cycles.mult = 1; - /* - * The 82580 timesync updates the system timer every 8ns by 8ns - * and the value cannot be shifted. Instead we need to shift - * the registers to generate a 64bit timer value. As a result - * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by - * 24 in order to generate a larger value for synchronization. - */ - adapter->cycles.shift = IGB_82580_TSYNC_SHIFT; - /* disable system timer temporarily by setting bit 31 */ - wr32(E1000_TSAUXC, 0x80000000); - wrfl(); - - /* Set registers so that rollover occurs soon to test this. */ - wr32(E1000_SYSTIMR, 0x00000000); - wr32(E1000_SYSTIML, 0x80000000); - wr32(E1000_SYSTIMH, 0x000000FF); - wrfl(); - - /* enable system timer by clearing bit 31 */ - wr32(E1000_TSAUXC, 0x0); - wrfl(); - - timecounter_init(&adapter->clock, - &adapter->cycles, - ktime_to_ns(ktime_get_real())); - /* - * Synchronize our NIC clock against system wall clock. NIC - * time stamp reading requires ~3us per sample, each sample - * was pretty stable even under load => only require 10 - * samples for each offset comparison. - */ - memset(&adapter->compare, 0, sizeof(adapter->compare)); - adapter->compare.source = &adapter->clock; - adapter->compare.target = ktime_get_real; - adapter->compare.num_samples = 10; - timecompare_update(&adapter->compare, 0); - break; - case e1000_82576: - /* - * Initialize hardware timer: we keep it running just in case - * that some program needs it later on. - */ - memset(&adapter->cycles, 0, sizeof(adapter->cycles)); - adapter->cycles.read = igb_read_clock; - adapter->cycles.mask = CLOCKSOURCE_MASK(64); - adapter->cycles.mult = 1; - /** - * Scale the NIC clock cycle by a large factor so that - * relatively small clock corrections can be added or - * subtracted at each clock tick. The drawbacks of a large - * factor are a) that the clock register overflows more quickly - * (not such a big deal) and b) that the increment per tick has - * to fit into 24 bits. As a result we need to use a shift of - * 19 so we can fit a value of 16 into the TIMINCA register. - */ - adapter->cycles.shift = IGB_82576_TSYNC_SHIFT; - wr32(E1000_TIMINCA, - (1 << E1000_TIMINCA_16NS_SHIFT) | - (16 << IGB_82576_TSYNC_SHIFT)); - - /* Set registers so that rollover occurs soon to test this. */ - wr32(E1000_SYSTIML, 0x00000000); - wr32(E1000_SYSTIMH, 0xFF800000); - wrfl(); - - timecounter_init(&adapter->clock, - &adapter->cycles, - ktime_to_ns(ktime_get_real())); - /* - * Synchronize our NIC clock against system wall clock. NIC - * time stamp reading requires ~3us per sample, each sample - * was pretty stable even under load => only require 10 - * samples for each offset comparison. - */ - memset(&adapter->compare, 0, sizeof(adapter->compare)); - adapter->compare.source = &adapter->clock; - adapter->compare.target = ktime_get_real; - adapter->compare.num_samples = 10; - timecompare_update(&adapter->compare, 0); - break; - case e1000_82575: - /* 82575 does not support timesync */ - default: - break; - } - -} - -/** - * igb_sw_init - Initialize general software structures (struct igb_adapter) - * @adapter: board private structure to initialize - * - * igb_sw_init initializes the Adapter private data structure. - * Fields are initialized based on PCI device information and - * OS network device settings (MTU size). - **/ -static int __devinit igb_sw_init(struct igb_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - - pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); - - adapter->tx_ring_count = IGB_DEFAULT_TXD; - adapter->rx_ring_count = IGB_DEFAULT_RXD; - adapter->rx_itr_setting = IGB_DEFAULT_ITR; - adapter->tx_itr_setting = IGB_DEFAULT_ITR; - - adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; - adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; - - spin_lock_init(&adapter->stats64_lock); -#ifdef CONFIG_PCI_IOV - switch (hw->mac.type) { - case e1000_82576: - case e1000_i350: - if (max_vfs > 7) { - dev_warn(&pdev->dev, - "Maximum of 7 VFs per PF, using max\n"); - adapter->vfs_allocated_count = 7; - } else - adapter->vfs_allocated_count = max_vfs; - break; - default: - break; - } -#endif /* CONFIG_PCI_IOV */ - adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); - /* i350 cannot do RSS and SR-IOV at the same time */ - if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count) - adapter->rss_queues = 1; - - /* - * if rss_queues > 4 or vfs are going to be allocated with rss_queues - * then we should combine the queues into a queue pair in order to - * conserve interrupts due to limited supply - */ - if ((adapter->rss_queues > 4) || - ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6))) - adapter->flags |= IGB_FLAG_QUEUE_PAIRS; - - /* This call may decrease the number of queues */ - if (igb_init_interrupt_scheme(adapter)) { - dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); - return -ENOMEM; - } - - igb_probe_vfs(adapter); - - /* Explicitly disable IRQ since the NIC can be in any state. */ - igb_irq_disable(adapter); - - if (hw->mac.type == e1000_i350) - adapter->flags &= ~IGB_FLAG_DMAC; - - set_bit(__IGB_DOWN, &adapter->state); - return 0; -} - -/** - * igb_open - Called when a network interface is made active - * @netdev: network interface device structure - * - * Returns 0 on success, negative value on failure - * - * The open entry point is called when a network interface is made - * active by the system (IFF_UP). At this point all resources needed - * for transmit and receive operations are allocated, the interrupt - * handler is registered with the OS, the watchdog timer is started, - * and the stack is notified that the interface is ready. - **/ -static int igb_open(struct net_device *netdev) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - int err; - int i; - - /* disallow open during test */ - if (test_bit(__IGB_TESTING, &adapter->state)) - return -EBUSY; - - netif_carrier_off(netdev); - - /* allocate transmit descriptors */ - err = igb_setup_all_tx_resources(adapter); - if (err) - goto err_setup_tx; - - /* allocate receive descriptors */ - err = igb_setup_all_rx_resources(adapter); - if (err) - goto err_setup_rx; - - igb_power_up_link(adapter); - - /* before we allocate an interrupt, we must be ready to handle it. - * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt - * as soon as we call pci_request_irq, so we have to setup our - * clean_rx handler before we do so. */ - igb_configure(adapter); - - err = igb_request_irq(adapter); - if (err) - goto err_req_irq; - - /* From here on the code is the same as igb_up() */ - clear_bit(__IGB_DOWN, &adapter->state); - - for (i = 0; i < adapter->num_q_vectors; i++) { - struct igb_q_vector *q_vector = adapter->q_vector[i]; - napi_enable(&q_vector->napi); - } - - /* Clear any pending interrupts. */ - rd32(E1000_ICR); - - igb_irq_enable(adapter); - - /* notify VFs that reset has been completed */ - if (adapter->vfs_allocated_count) { - u32 reg_data = rd32(E1000_CTRL_EXT); - reg_data |= E1000_CTRL_EXT_PFRSTD; - wr32(E1000_CTRL_EXT, reg_data); - } - - netif_tx_start_all_queues(netdev); - - /* start the watchdog. */ - hw->mac.get_link_status = 1; - schedule_work(&adapter->watchdog_task); - - return 0; - -err_req_irq: - igb_release_hw_control(adapter); - igb_power_down_link(adapter); - igb_free_all_rx_resources(adapter); -err_setup_rx: - igb_free_all_tx_resources(adapter); -err_setup_tx: - igb_reset(adapter); - - return err; -} - -/** - * igb_close - Disables a network interface - * @netdev: network interface device structure - * - * Returns 0, this is not allowed to fail - * - * The close entry point is called when an interface is de-activated - * by the OS. The hardware is still under the driver's control, but - * needs to be disabled. A global MAC reset is issued to stop the - * hardware, and all transmit and receive resources are freed. - **/ -static int igb_close(struct net_device *netdev) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - - WARN_ON(test_bit(__IGB_RESETTING, &adapter->state)); - igb_down(adapter); - - igb_free_irq(adapter); - - igb_free_all_tx_resources(adapter); - igb_free_all_rx_resources(adapter); - - return 0; -} - -/** - * igb_setup_tx_resources - allocate Tx resources (Descriptors) - * @tx_ring: tx descriptor ring (for a specific queue) to setup - * - * Return 0 on success, negative on failure - **/ -int igb_setup_tx_resources(struct igb_ring *tx_ring) -{ - struct device *dev = tx_ring->dev; - int size; - - size = sizeof(struct igb_buffer) * tx_ring->count; - tx_ring->buffer_info = vzalloc(size); - if (!tx_ring->buffer_info) - goto err; - - /* round up to nearest 4K */ - tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); - tx_ring->size = ALIGN(tx_ring->size, 4096); - - tx_ring->desc = dma_alloc_coherent(dev, - tx_ring->size, - &tx_ring->dma, - GFP_KERNEL); - - if (!tx_ring->desc) - goto err; - - tx_ring->next_to_use = 0; - tx_ring->next_to_clean = 0; - return 0; - -err: - vfree(tx_ring->buffer_info); - dev_err(dev, - "Unable to allocate memory for the transmit descriptor ring\n"); - return -ENOMEM; -} - -/** - * igb_setup_all_tx_resources - wrapper to allocate Tx resources - * (Descriptors) for all queues - * @adapter: board private structure - * - * Return 0 on success, negative on failure - **/ -static int igb_setup_all_tx_resources(struct igb_adapter *adapter) -{ - struct pci_dev *pdev = adapter->pdev; - int i, err = 0; - - for (i = 0; i < adapter->num_tx_queues; i++) { - err = igb_setup_tx_resources(adapter->tx_ring[i]); - if (err) { - dev_err(&pdev->dev, - "Allocation for Tx Queue %u failed\n", i); - for (i--; i >= 0; i--) - igb_free_tx_resources(adapter->tx_ring[i]); - break; - } - } - - for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) { - int r_idx = i % adapter->num_tx_queues; - adapter->multi_tx_table[i] = adapter->tx_ring[r_idx]; - } - return err; -} - -/** - * igb_setup_tctl - configure the transmit control registers - * @adapter: Board private structure - **/ -void igb_setup_tctl(struct igb_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 tctl; - - /* disable queue 0 which is enabled by default on 82575 and 82576 */ - wr32(E1000_TXDCTL(0), 0); - - /* Program the Transmit Control Register */ - tctl = rd32(E1000_TCTL); - tctl &= ~E1000_TCTL_CT; - tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | - (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); - - igb_config_collision_dist(hw); - - /* Enable transmits */ - tctl |= E1000_TCTL_EN; - - wr32(E1000_TCTL, tctl); -} - -/** - * igb_configure_tx_ring - Configure transmit ring after Reset - * @adapter: board private structure - * @ring: tx ring to configure - * - * Configure a transmit ring after a reset. - **/ -void igb_configure_tx_ring(struct igb_adapter *adapter, - struct igb_ring *ring) -{ - struct e1000_hw *hw = &adapter->hw; - u32 txdctl; - u64 tdba = ring->dma; - int reg_idx = ring->reg_idx; - - /* disable the queue */ - txdctl = rd32(E1000_TXDCTL(reg_idx)); - wr32(E1000_TXDCTL(reg_idx), - txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); - wrfl(); - mdelay(10); - - wr32(E1000_TDLEN(reg_idx), - ring->count * sizeof(union e1000_adv_tx_desc)); - wr32(E1000_TDBAL(reg_idx), - tdba & 0x00000000ffffffffULL); - wr32(E1000_TDBAH(reg_idx), tdba >> 32); - - ring->head = hw->hw_addr + E1000_TDH(reg_idx); - ring->tail = hw->hw_addr + E1000_TDT(reg_idx); - writel(0, ring->head); - writel(0, ring->tail); - - txdctl |= IGB_TX_PTHRESH; - txdctl |= IGB_TX_HTHRESH << 8; - txdctl |= IGB_TX_WTHRESH << 16; - - txdctl |= E1000_TXDCTL_QUEUE_ENABLE; - wr32(E1000_TXDCTL(reg_idx), txdctl); -} - -/** - * igb_configure_tx - Configure transmit Unit after Reset - * @adapter: board private structure - * - * Configure the Tx unit of the MAC after a reset. - **/ -static void igb_configure_tx(struct igb_adapter *adapter) -{ - int i; - - for (i = 0; i < adapter->num_tx_queues; i++) - igb_configure_tx_ring(adapter, adapter->tx_ring[i]); -} - -/** - * igb_setup_rx_resources - allocate Rx resources (Descriptors) - * @rx_ring: rx descriptor ring (for a specific queue) to setup - * - * Returns 0 on success, negative on failure - **/ -int igb_setup_rx_resources(struct igb_ring *rx_ring) -{ - struct device *dev = rx_ring->dev; - int size, desc_len; - - size = sizeof(struct igb_buffer) * rx_ring->count; - rx_ring->buffer_info = vzalloc(size); - if (!rx_ring->buffer_info) - goto err; - - desc_len = sizeof(union e1000_adv_rx_desc); - - /* Round up to nearest 4K */ - rx_ring->size = rx_ring->count * desc_len; - rx_ring->size = ALIGN(rx_ring->size, 4096); - - rx_ring->desc = dma_alloc_coherent(dev, - rx_ring->size, - &rx_ring->dma, - GFP_KERNEL); - - if (!rx_ring->desc) - goto err; - - rx_ring->next_to_clean = 0; - rx_ring->next_to_use = 0; - - return 0; - -err: - vfree(rx_ring->buffer_info); - rx_ring->buffer_info = NULL; - dev_err(dev, "Unable to allocate memory for the receive descriptor" - " ring\n"); - return -ENOMEM; -} - -/** - * igb_setup_all_rx_resources - wrapper to allocate Rx resources - * (Descriptors) for all queues - * @adapter: board private structure - * - * Return 0 on success, negative on failure - **/ -static int igb_setup_all_rx_resources(struct igb_adapter *adapter) -{ - struct pci_dev *pdev = adapter->pdev; - int i, err = 0; - - for (i = 0; i < adapter->num_rx_queues; i++) { - err = igb_setup_rx_resources(adapter->rx_ring[i]); - if (err) { - dev_err(&pdev->dev, - "Allocation for Rx Queue %u failed\n", i); - for (i--; i >= 0; i--) - igb_free_rx_resources(adapter->rx_ring[i]); - break; - } - } - - return err; -} - -/** - * igb_setup_mrqc - configure the multiple receive queue control registers - * @adapter: Board private structure - **/ -static void igb_setup_mrqc(struct igb_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 mrqc, rxcsum; - u32 j, num_rx_queues, shift = 0, shift2 = 0; - union e1000_reta { - u32 dword; - u8 bytes[4]; - } reta; - static const u8 rsshash[40] = { - 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67, - 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb, - 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, - 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa }; - - /* Fill out hash function seeds */ - for (j = 0; j < 10; j++) { - u32 rsskey = rsshash[(j * 4)]; - rsskey |= rsshash[(j * 4) + 1] << 8; - rsskey |= rsshash[(j * 4) + 2] << 16; - rsskey |= rsshash[(j * 4) + 3] << 24; - array_wr32(E1000_RSSRK(0), j, rsskey); - } - - num_rx_queues = adapter->rss_queues; - - if (adapter->vfs_allocated_count) { - /* 82575 and 82576 supports 2 RSS queues for VMDq */ - switch (hw->mac.type) { - case e1000_i350: - case e1000_82580: - num_rx_queues = 1; - shift = 0; - break; - case e1000_82576: - shift = 3; - num_rx_queues = 2; - break; - case e1000_82575: - shift = 2; - shift2 = 6; - default: - break; - } - } else { - if (hw->mac.type == e1000_82575) - shift = 6; - } - - for (j = 0; j < (32 * 4); j++) { - reta.bytes[j & 3] = (j % num_rx_queues) << shift; - if (shift2) - reta.bytes[j & 3] |= num_rx_queues << shift2; - if ((j & 3) == 3) - wr32(E1000_RETA(j >> 2), reta.dword); - } - - /* - * Disable raw packet checksumming so that RSS hash is placed in - * descriptor on writeback. No need to enable TCP/UDP/IP checksum - * offloads as they are enabled by default - */ - rxcsum = rd32(E1000_RXCSUM); - rxcsum |= E1000_RXCSUM_PCSD; - - if (adapter->hw.mac.type >= e1000_82576) - /* Enable Receive Checksum Offload for SCTP */ - rxcsum |= E1000_RXCSUM_CRCOFL; - - /* Don't need to set TUOFL or IPOFL, they default to 1 */ - wr32(E1000_RXCSUM, rxcsum); - - /* If VMDq is enabled then we set the appropriate mode for that, else - * we default to RSS so that an RSS hash is calculated per packet even - * if we are only using one queue */ - if (adapter->vfs_allocated_count) { - if (hw->mac.type > e1000_82575) { - /* Set the default pool for the PF's first queue */ - u32 vtctl = rd32(E1000_VT_CTL); - vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK | - E1000_VT_CTL_DISABLE_DEF_POOL); - vtctl |= adapter->vfs_allocated_count << - E1000_VT_CTL_DEFAULT_POOL_SHIFT; - wr32(E1000_VT_CTL, vtctl); - } - if (adapter->rss_queues > 1) - mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q; - else - mrqc = E1000_MRQC_ENABLE_VMDQ; - } else { - mrqc = E1000_MRQC_ENABLE_RSS_4Q; - } - igb_vmm_control(adapter); - - /* - * Generate RSS hash based on TCP port numbers and/or - * IPv4/v6 src and dst addresses since UDP cannot be - * hashed reliably due to IP fragmentation - */ - mrqc |= E1000_MRQC_RSS_FIELD_IPV4 | - E1000_MRQC_RSS_FIELD_IPV4_TCP | - E1000_MRQC_RSS_FIELD_IPV6 | - E1000_MRQC_RSS_FIELD_IPV6_TCP | - E1000_MRQC_RSS_FIELD_IPV6_TCP_EX; - - wr32(E1000_MRQC, mrqc); -} - -/** - * igb_setup_rctl - configure the receive control registers - * @adapter: Board private structure - **/ -void igb_setup_rctl(struct igb_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 rctl; - - rctl = rd32(E1000_RCTL); - - rctl &= ~(3 << E1000_RCTL_MO_SHIFT); - rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); - - rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF | - (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); - - /* - * enable stripping of CRC. It's unlikely this will break BMC - * redirection as it did with e1000. Newer features require - * that the HW strips the CRC. - */ - rctl |= E1000_RCTL_SECRC; - - /* disable store bad packets and clear size bits. */ - rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256); - - /* enable LPE to prevent packets larger than max_frame_size */ - rctl |= E1000_RCTL_LPE; - - /* disable queue 0 to prevent tail write w/o re-config */ - wr32(E1000_RXDCTL(0), 0); - - /* Attention!!! For SR-IOV PF driver operations you must enable - * queue drop for all VF and PF queues to prevent head of line blocking - * if an un-trusted VF does not provide descriptors to hardware. - */ - if (adapter->vfs_allocated_count) { - /* set all queue drop enable bits */ - wr32(E1000_QDE, ALL_QUEUES); - } - - wr32(E1000_RCTL, rctl); -} - -static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, - int vfn) -{ - struct e1000_hw *hw = &adapter->hw; - u32 vmolr; - - /* if it isn't the PF check to see if VFs are enabled and - * increase the size to support vlan tags */ - if (vfn < adapter->vfs_allocated_count && - adapter->vf_data[vfn].vlans_enabled) - size += VLAN_TAG_SIZE; - - vmolr = rd32(E1000_VMOLR(vfn)); - vmolr &= ~E1000_VMOLR_RLPML_MASK; - vmolr |= size | E1000_VMOLR_LPE; - wr32(E1000_VMOLR(vfn), vmolr); - - return 0; -} - -/** - * igb_rlpml_set - set maximum receive packet size - * @adapter: board private structure - * - * Configure maximum receivable packet size. - **/ -static void igb_rlpml_set(struct igb_adapter *adapter) -{ - u32 max_frame_size; - struct e1000_hw *hw = &adapter->hw; - u16 pf_id = adapter->vfs_allocated_count; - - max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE; - - /* if vfs are enabled we set RLPML to the largest possible request - * size and set the VMOLR RLPML to the size we need */ - if (pf_id) { - igb_set_vf_rlpml(adapter, max_frame_size, pf_id); - max_frame_size = MAX_JUMBO_FRAME_SIZE; - } - - wr32(E1000_RLPML, max_frame_size); -} - -static inline void igb_set_vmolr(struct igb_adapter *adapter, - int vfn, bool aupe) -{ - struct e1000_hw *hw = &adapter->hw; - u32 vmolr; - - /* - * This register exists only on 82576 and newer so if we are older then - * we should exit and do nothing - */ - if (hw->mac.type < e1000_82576) - return; - - vmolr = rd32(E1000_VMOLR(vfn)); - vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */ - if (aupe) - vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ - else - vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */ - - /* clear all bits that might not be set */ - vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE); - - if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count) - vmolr |= E1000_VMOLR_RSSE; /* enable RSS */ - /* - * for VMDq only allow the VFs and pool 0 to accept broadcast and - * multicast packets - */ - if (vfn <= adapter->vfs_allocated_count) - vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */ - - wr32(E1000_VMOLR(vfn), vmolr); -} - -/** - * igb_configure_rx_ring - Configure a receive ring after Reset - * @adapter: board private structure - * @ring: receive ring to be configured - * - * Configure the Rx unit of the MAC after a reset. - **/ -void igb_configure_rx_ring(struct igb_adapter *adapter, - struct igb_ring *ring) -{ - struct e1000_hw *hw = &adapter->hw; - u64 rdba = ring->dma; - int reg_idx = ring->reg_idx; - u32 srrctl, rxdctl; - - /* disable the queue */ - rxdctl = rd32(E1000_RXDCTL(reg_idx)); - wr32(E1000_RXDCTL(reg_idx), - rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); - - /* Set DMA base address registers */ - wr32(E1000_RDBAL(reg_idx), - rdba & 0x00000000ffffffffULL); - wr32(E1000_RDBAH(reg_idx), rdba >> 32); - wr32(E1000_RDLEN(reg_idx), - ring->count * sizeof(union e1000_adv_rx_desc)); - - /* initialize head and tail */ - ring->head = hw->hw_addr + E1000_RDH(reg_idx); - ring->tail = hw->hw_addr + E1000_RDT(reg_idx); - writel(0, ring->head); - writel(0, ring->tail); - - /* set descriptor configuration */ - if (ring->rx_buffer_len < IGB_RXBUFFER_1024) { - srrctl = ALIGN(ring->rx_buffer_len, 64) << - E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; -#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384 - srrctl |= IGB_RXBUFFER_16384 >> - E1000_SRRCTL_BSIZEPKT_SHIFT; -#else - srrctl |= (PAGE_SIZE / 2) >> - E1000_SRRCTL_BSIZEPKT_SHIFT; -#endif - srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; - } else { - srrctl = ALIGN(ring->rx_buffer_len, 1024) >> - E1000_SRRCTL_BSIZEPKT_SHIFT; - srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; - } - if (hw->mac.type == e1000_82580) - srrctl |= E1000_SRRCTL_TIMESTAMP; - /* Only set Drop Enable if we are supporting multiple queues */ - if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1) - srrctl |= E1000_SRRCTL_DROP_EN; - - wr32(E1000_SRRCTL(reg_idx), srrctl); - - /* set filtering for VMDQ pools */ - igb_set_vmolr(adapter, reg_idx & 0x7, true); - - /* enable receive descriptor fetching */ - rxdctl = rd32(E1000_RXDCTL(reg_idx)); - rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; - rxdctl &= 0xFFF00000; - rxdctl |= IGB_RX_PTHRESH; - rxdctl |= IGB_RX_HTHRESH << 8; - rxdctl |= IGB_RX_WTHRESH << 16; - wr32(E1000_RXDCTL(reg_idx), rxdctl); -} - -/** - * igb_configure_rx - Configure receive Unit after Reset - * @adapter: board private structure - * - * Configure the Rx unit of the MAC after a reset. - **/ -static void igb_configure_rx(struct igb_adapter *adapter) -{ - int i; - - /* set UTA to appropriate mode */ - igb_set_uta(adapter); - - /* set the correct pool for the PF default MAC address in entry 0 */ - igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0, - adapter->vfs_allocated_count); - - /* Setup the HW Rx Head and Tail Descriptor Pointers and - * the Base and Length of the Rx Descriptor Ring */ - for (i = 0; i < adapter->num_rx_queues; i++) - igb_configure_rx_ring(adapter, adapter->rx_ring[i]); -} - -/** - * igb_free_tx_resources - Free Tx Resources per Queue - * @tx_ring: Tx descriptor ring for a specific queue - * - * Free all transmit software resources - **/ -void igb_free_tx_resources(struct igb_ring *tx_ring) -{ - igb_clean_tx_ring(tx_ring); - - vfree(tx_ring->buffer_info); - tx_ring->buffer_info = NULL; - - /* if not set, then don't free */ - if (!tx_ring->desc) - return; - - dma_free_coherent(tx_ring->dev, tx_ring->size, - tx_ring->desc, tx_ring->dma); - - tx_ring->desc = NULL; -} - -/** - * igb_free_all_tx_resources - Free Tx Resources for All Queues - * @adapter: board private structure - * - * Free all transmit software resources - **/ -static void igb_free_all_tx_resources(struct igb_adapter *adapter) -{ - int i; - - for (i = 0; i < adapter->num_tx_queues; i++) - igb_free_tx_resources(adapter->tx_ring[i]); -} - -void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring, - struct igb_buffer *buffer_info) -{ - if (buffer_info->dma) { - if (buffer_info->mapped_as_page) - dma_unmap_page(tx_ring->dev, - buffer_info->dma, - buffer_info->length, - DMA_TO_DEVICE); - else - dma_unmap_single(tx_ring->dev, - buffer_info->dma, - buffer_info->length, - DMA_TO_DEVICE); - buffer_info->dma = 0; - } - if (buffer_info->skb) { - dev_kfree_skb_any(buffer_info->skb); - buffer_info->skb = NULL; - } - buffer_info->time_stamp = 0; - buffer_info->length = 0; - buffer_info->next_to_watch = 0; - buffer_info->mapped_as_page = false; -} - -/** - * igb_clean_tx_ring - Free Tx Buffers - * @tx_ring: ring to be cleaned - **/ -static void igb_clean_tx_ring(struct igb_ring *tx_ring) -{ - struct igb_buffer *buffer_info; - unsigned long size; - unsigned int i; - - if (!tx_ring->buffer_info) - return; - /* Free all the Tx ring sk_buffs */ - - for (i = 0; i < tx_ring->count; i++) { - buffer_info = &tx_ring->buffer_info[i]; - igb_unmap_and_free_tx_resource(tx_ring, buffer_info); - } - - size = sizeof(struct igb_buffer) * tx_ring->count; - memset(tx_ring->buffer_info, 0, size); - - /* Zero out the descriptor ring */ - memset(tx_ring->desc, 0, tx_ring->size); - - tx_ring->next_to_use = 0; - tx_ring->next_to_clean = 0; -} - -/** - * igb_clean_all_tx_rings - Free Tx Buffers for all queues - * @adapter: board private structure - **/ -static void igb_clean_all_tx_rings(struct igb_adapter *adapter) -{ - int i; - - for (i = 0; i < adapter->num_tx_queues; i++) - igb_clean_tx_ring(adapter->tx_ring[i]); -} - -/** - * igb_free_rx_resources - Free Rx Resources - * @rx_ring: ring to clean the resources from - * - * Free all receive software resources - **/ -void igb_free_rx_resources(struct igb_ring *rx_ring) -{ - igb_clean_rx_ring(rx_ring); - - vfree(rx_ring->buffer_info); - rx_ring->buffer_info = NULL; - - /* if not set, then don't free */ - if (!rx_ring->desc) - return; - - dma_free_coherent(rx_ring->dev, rx_ring->size, - rx_ring->desc, rx_ring->dma); - - rx_ring->desc = NULL; -} - -/** - * igb_free_all_rx_resources - Free Rx Resources for All Queues - * @adapter: board private structure - * - * Free all receive software resources - **/ -static void igb_free_all_rx_resources(struct igb_adapter *adapter) -{ - int i; - - for (i = 0; i < adapter->num_rx_queues; i++) - igb_free_rx_resources(adapter->rx_ring[i]); -} - -/** - * igb_clean_rx_ring - Free Rx Buffers per Queue - * @rx_ring: ring to free buffers from - **/ -static void igb_clean_rx_ring(struct igb_ring *rx_ring) -{ - struct igb_buffer *buffer_info; - unsigned long size; - unsigned int i; - - if (!rx_ring->buffer_info) - return; - - /* Free all the Rx ring sk_buffs */ - for (i = 0; i < rx_ring->count; i++) { - buffer_info = &rx_ring->buffer_info[i]; - if (buffer_info->dma) { - dma_unmap_single(rx_ring->dev, - buffer_info->dma, - rx_ring->rx_buffer_len, - DMA_FROM_DEVICE); - buffer_info->dma = 0; - } - - if (buffer_info->skb) { - dev_kfree_skb(buffer_info->skb); - buffer_info->skb = NULL; - } - if (buffer_info->page_dma) { - dma_unmap_page(rx_ring->dev, - buffer_info->page_dma, - PAGE_SIZE / 2, - DMA_FROM_DEVICE); - buffer_info->page_dma = 0; - } - if (buffer_info->page) { - put_page(buffer_info->page); - buffer_info->page = NULL; - buffer_info->page_offset = 0; - } - } - - size = sizeof(struct igb_buffer) * rx_ring->count; - memset(rx_ring->buffer_info, 0, size); - - /* Zero out the descriptor ring */ - memset(rx_ring->desc, 0, rx_ring->size); - - rx_ring->next_to_clean = 0; - rx_ring->next_to_use = 0; -} - -/** - * igb_clean_all_rx_rings - Free Rx Buffers for all queues - * @adapter: board private structure - **/ -static void igb_clean_all_rx_rings(struct igb_adapter *adapter) -{ - int i; - - for (i = 0; i < adapter->num_rx_queues; i++) - igb_clean_rx_ring(adapter->rx_ring[i]); -} - -/** - * igb_set_mac - Change the Ethernet Address of the NIC - * @netdev: network interface device structure - * @p: pointer to an address structure - * - * Returns 0 on success, negative on failure - **/ -static int igb_set_mac(struct net_device *netdev, void *p) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - struct sockaddr *addr = p; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); - - /* set the correct pool for the new PF MAC address in entry 0 */ - igb_rar_set_qsel(adapter, hw->mac.addr, 0, - adapter->vfs_allocated_count); - - return 0; -} - -/** - * igb_write_mc_addr_list - write multicast addresses to MTA - * @netdev: network interface device structure - * - * Writes multicast address list to the MTA hash table. - * Returns: -ENOMEM on failure - * 0 on no addresses written - * X on writing X addresses to MTA - **/ -static int igb_write_mc_addr_list(struct net_device *netdev) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - struct netdev_hw_addr *ha; - u8 *mta_list; - int i; - - if (netdev_mc_empty(netdev)) { - /* nothing to program, so clear mc list */ - igb_update_mc_addr_list(hw, NULL, 0); - igb_restore_vf_multicasts(adapter); - return 0; - } - - mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); - if (!mta_list) - return -ENOMEM; - - /* The shared function expects a packed array of only addresses. */ - i = 0; - netdev_for_each_mc_addr(ha, netdev) - memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); - - igb_update_mc_addr_list(hw, mta_list, i); - kfree(mta_list); - - return netdev_mc_count(netdev); -} - -/** - * igb_write_uc_addr_list - write unicast addresses to RAR table - * @netdev: network interface device structure - * - * Writes unicast address list to the RAR table. - * Returns: -ENOMEM on failure/insufficient address space - * 0 on no addresses written - * X on writing X addresses to the RAR table - **/ -static int igb_write_uc_addr_list(struct net_device *netdev) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - unsigned int vfn = adapter->vfs_allocated_count; - unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1); - int count = 0; - - /* return ENOMEM indicating insufficient memory for addresses */ - if (netdev_uc_count(netdev) > rar_entries) - return -ENOMEM; - - if (!netdev_uc_empty(netdev) && rar_entries) { - struct netdev_hw_addr *ha; - - netdev_for_each_uc_addr(ha, netdev) { - if (!rar_entries) - break; - igb_rar_set_qsel(adapter, ha->addr, - rar_entries--, - vfn); - count++; - } - } - /* write the addresses in reverse order to avoid write combining */ - for (; rar_entries > 0 ; rar_entries--) { - wr32(E1000_RAH(rar_entries), 0); - wr32(E1000_RAL(rar_entries), 0); - } - wrfl(); - - return count; -} - -/** - * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set - * @netdev: network interface device structure - * - * The set_rx_mode entry point is called whenever the unicast or multicast - * address lists or the network interface flags are updated. This routine is - * responsible for configuring the hardware for proper unicast, multicast, - * promiscuous mode, and all-multi behavior. - **/ -static void igb_set_rx_mode(struct net_device *netdev) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - unsigned int vfn = adapter->vfs_allocated_count; - u32 rctl, vmolr = 0; - int count; - - /* Check for Promiscuous and All Multicast modes */ - rctl = rd32(E1000_RCTL); - - /* clear the effected bits */ - rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE); - - if (netdev->flags & IFF_PROMISC) { - rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); - vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME); - } else { - if (netdev->flags & IFF_ALLMULTI) { - rctl |= E1000_RCTL_MPE; - vmolr |= E1000_VMOLR_MPME; - } else { - /* - * Write addresses to the MTA, if the attempt fails - * then we should just turn on promiscuous mode so - * that we can at least receive multicast traffic - */ - count = igb_write_mc_addr_list(netdev); - if (count < 0) { - rctl |= E1000_RCTL_MPE; - vmolr |= E1000_VMOLR_MPME; - } else if (count) { - vmolr |= E1000_VMOLR_ROMPE; - } - } - /* - * Write addresses to available RAR registers, if there is not - * sufficient space to store all the addresses then enable - * unicast promiscuous mode - */ - count = igb_write_uc_addr_list(netdev); - if (count < 0) { - rctl |= E1000_RCTL_UPE; - vmolr |= E1000_VMOLR_ROPE; - } - rctl |= E1000_RCTL_VFE; - } - wr32(E1000_RCTL, rctl); - - /* - * In order to support SR-IOV and eventually VMDq it is necessary to set - * the VMOLR to enable the appropriate modes. Without this workaround - * we will have issues with VLAN tag stripping not being done for frames - * that are only arriving because we are the default pool - */ - if (hw->mac.type < e1000_82576) - return; - - vmolr |= rd32(E1000_VMOLR(vfn)) & - ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); - wr32(E1000_VMOLR(vfn), vmolr); - igb_restore_vf_multicasts(adapter); -} - -static void igb_check_wvbr(struct igb_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 wvbr = 0; - - switch (hw->mac.type) { - case e1000_82576: - case e1000_i350: - if (!(wvbr = rd32(E1000_WVBR))) - return; - break; - default: - break; - } - - adapter->wvbr |= wvbr; -} - -#define IGB_STAGGERED_QUEUE_OFFSET 8 - -static void igb_spoof_check(struct igb_adapter *adapter) -{ - int j; - - if (!adapter->wvbr) - return; - - for(j = 0; j < adapter->vfs_allocated_count; j++) { - if (adapter->wvbr & (1 << j) || - adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) { - dev_warn(&adapter->pdev->dev, - "Spoof event(s) detected on VF %d\n", j); - adapter->wvbr &= - ~((1 << j) | - (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))); - } - } -} - -/* Need to wait a few seconds after link up to get diagnostic information from - * the phy */ -static void igb_update_phy_info(unsigned long data) -{ - struct igb_adapter *adapter = (struct igb_adapter *) data; - igb_get_phy_info(&adapter->hw); -} - -/** - * igb_has_link - check shared code for link and determine up/down - * @adapter: pointer to driver private info - **/ -bool igb_has_link(struct igb_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - bool link_active = false; - s32 ret_val = 0; - - /* get_link_status is set on LSC (link status) interrupt or - * rx sequence error interrupt. get_link_status will stay - * false until the e1000_check_for_link establishes link - * for copper adapters ONLY - */ - switch (hw->phy.media_type) { - case e1000_media_type_copper: - if (hw->mac.get_link_status) { - ret_val = hw->mac.ops.check_for_link(hw); - link_active = !hw->mac.get_link_status; - } else { - link_active = true; - } - break; - case e1000_media_type_internal_serdes: - ret_val = hw->mac.ops.check_for_link(hw); - link_active = hw->mac.serdes_has_link; - break; - default: - case e1000_media_type_unknown: - break; - } - - return link_active; -} - -static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event) -{ - bool ret = false; - u32 ctrl_ext, thstat; - - /* check for thermal sensor event on i350, copper only */ - if (hw->mac.type == e1000_i350) { - thstat = rd32(E1000_THSTAT); - ctrl_ext = rd32(E1000_CTRL_EXT); - - if ((hw->phy.media_type == e1000_media_type_copper) && - !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) { - ret = !!(thstat & event); - } - } - - return ret; -} - -/** - * igb_watchdog - Timer Call-back - * @data: pointer to adapter cast into an unsigned long - **/ -static void igb_watchdog(unsigned long data) -{ - struct igb_adapter *adapter = (struct igb_adapter *)data; - /* Do the rest outside of interrupt context */ - schedule_work(&adapter->watchdog_task); -} - -static void igb_watchdog_task(struct work_struct *work) -{ - struct igb_adapter *adapter = container_of(work, - struct igb_adapter, - watchdog_task); - struct e1000_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - u32 link; - int i; - - link = igb_has_link(adapter); - if (link) { - if (!netif_carrier_ok(netdev)) { - u32 ctrl; - hw->mac.ops.get_speed_and_duplex(hw, - &adapter->link_speed, - &adapter->link_duplex); - - ctrl = rd32(E1000_CTRL); - /* Links status message must follow this format */ - printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, " - "Flow Control: %s\n", - netdev->name, - adapter->link_speed, - adapter->link_duplex == FULL_DUPLEX ? - "Full Duplex" : "Half Duplex", - ((ctrl & E1000_CTRL_TFCE) && - (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" : - ((ctrl & E1000_CTRL_RFCE) ? "RX" : - ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None"))); - - /* check for thermal sensor event */ - if (igb_thermal_sensor_event(hw, E1000_THSTAT_LINK_THROTTLE)) { - printk(KERN_INFO "igb: %s The network adapter " - "link speed was downshifted " - "because it overheated.\n", - netdev->name); - } - - /* adjust timeout factor according to speed/duplex */ - adapter->tx_timeout_factor = 1; - switch (adapter->link_speed) { - case SPEED_10: - adapter->tx_timeout_factor = 14; - break; - case SPEED_100: - /* maybe add some timeout factor ? */ - break; - } - - netif_carrier_on(netdev); - - igb_ping_all_vfs(adapter); - igb_check_vf_rate_limit(adapter); - - /* link state has changed, schedule phy info update */ - if (!test_bit(__IGB_DOWN, &adapter->state)) - mod_timer(&adapter->phy_info_timer, - round_jiffies(jiffies + 2 * HZ)); - } - } else { - if (netif_carrier_ok(netdev)) { - adapter->link_speed = 0; - adapter->link_duplex = 0; - - /* check for thermal sensor event */ - if (igb_thermal_sensor_event(hw, E1000_THSTAT_PWR_DOWN)) { - printk(KERN_ERR "igb: %s The network adapter " - "was stopped because it " - "overheated.\n", - netdev->name); - } - - /* Links status message must follow this format */ - printk(KERN_INFO "igb: %s NIC Link is Down\n", - netdev->name); - netif_carrier_off(netdev); - - igb_ping_all_vfs(adapter); - - /* link state has changed, schedule phy info update */ - if (!test_bit(__IGB_DOWN, &adapter->state)) - mod_timer(&adapter->phy_info_timer, - round_jiffies(jiffies + 2 * HZ)); - } - } - - spin_lock(&adapter->stats64_lock); - igb_update_stats(adapter, &adapter->stats64); - spin_unlock(&adapter->stats64_lock); - - for (i = 0; i < adapter->num_tx_queues; i++) { - struct igb_ring *tx_ring = adapter->tx_ring[i]; - if (!netif_carrier_ok(netdev)) { - /* We've lost link, so the controller stops DMA, - * but we've got queued Tx work that's never going - * to get done, so reset controller to flush Tx. - * (Do the reset outside of interrupt context). */ - if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { - adapter->tx_timeout_count++; - schedule_work(&adapter->reset_task); - /* return immediately since reset is imminent */ - return; - } - } - - /* Force detection of hung controller every watchdog period */ - tx_ring->detect_tx_hung = true; - } - - /* Cause software interrupt to ensure rx ring is cleaned */ - if (adapter->msix_entries) { - u32 eics = 0; - for (i = 0; i < adapter->num_q_vectors; i++) { - struct igb_q_vector *q_vector = adapter->q_vector[i]; - eics |= q_vector->eims_value; - } - wr32(E1000_EICS, eics); - } else { - wr32(E1000_ICS, E1000_ICS_RXDMT0); - } - - igb_spoof_check(adapter); - - /* Reset the timer */ - if (!test_bit(__IGB_DOWN, &adapter->state)) - mod_timer(&adapter->watchdog_timer, - round_jiffies(jiffies + 2 * HZ)); -} - -enum latency_range { - lowest_latency = 0, - low_latency = 1, - bulk_latency = 2, - latency_invalid = 255 -}; - -/** - * igb_update_ring_itr - update the dynamic ITR value based on packet size - * - * Stores a new ITR value based on strictly on packet size. This - * algorithm is less sophisticated than that used in igb_update_itr, - * due to the difficulty of synchronizing statistics across multiple - * receive rings. The divisors and thresholds used by this function - * were determined based on theoretical maximum wire speed and testing - * data, in order to minimize response time while increasing bulk - * throughput. - * This functionality is controlled by the InterruptThrottleRate module - * parameter (see igb_param.c) - * NOTE: This function is called only when operating in a multiqueue - * receive environment. - * @q_vector: pointer to q_vector - **/ -static void igb_update_ring_itr(struct igb_q_vector *q_vector) -{ - int new_val = q_vector->itr_val; - int avg_wire_size = 0; - struct igb_adapter *adapter = q_vector->adapter; - struct igb_ring *ring; - unsigned int packets; - - /* For non-gigabit speeds, just fix the interrupt rate at 4000 - * ints/sec - ITR timer value of 120 ticks. - */ - if (adapter->link_speed != SPEED_1000) { - new_val = 976; - goto set_itr_val; - } - - ring = q_vector->rx_ring; - if (ring) { - packets = ACCESS_ONCE(ring->total_packets); - - if (packets) - avg_wire_size = ring->total_bytes / packets; - } - - ring = q_vector->tx_ring; - if (ring) { - packets = ACCESS_ONCE(ring->total_packets); - - if (packets) - avg_wire_size = max_t(u32, avg_wire_size, - ring->total_bytes / packets); - } - - /* if avg_wire_size isn't set no work was done */ - if (!avg_wire_size) - goto clear_counts; - - /* Add 24 bytes to size to account for CRC, preamble, and gap */ - avg_wire_size += 24; - - /* Don't starve jumbo frames */ - avg_wire_size = min(avg_wire_size, 3000); - - /* Give a little boost to mid-size frames */ - if ((avg_wire_size > 300) && (avg_wire_size < 1200)) - new_val = avg_wire_size / 3; - else - new_val = avg_wire_size / 2; - - /* when in itr mode 3 do not exceed 20K ints/sec */ - if (adapter->rx_itr_setting == 3 && new_val < 196) - new_val = 196; - -set_itr_val: - if (new_val != q_vector->itr_val) { - q_vector->itr_val = new_val; - q_vector->set_itr = 1; - } -clear_counts: - if (q_vector->rx_ring) { - q_vector->rx_ring->total_bytes = 0; - q_vector->rx_ring->total_packets = 0; - } - if (q_vector->tx_ring) { - q_vector->tx_ring->total_bytes = 0; - q_vector->tx_ring->total_packets = 0; - } -} - -/** - * igb_update_itr - update the dynamic ITR value based on statistics - * Stores a new ITR value based on packets and byte - * counts during the last interrupt. The advantage of per interrupt - * computation is faster updates and more accurate ITR for the current - * traffic pattern. Constants in this function were computed - * based on theoretical maximum wire speed and thresholds were set based - * on testing data as well as attempting to minimize response time - * while increasing bulk throughput. - * this functionality is controlled by the InterruptThrottleRate module - * parameter (see igb_param.c) - * NOTE: These calculations are only valid when operating in a single- - * queue environment. - * @adapter: pointer to adapter - * @itr_setting: current q_vector->itr_val - * @packets: the number of packets during this measurement interval - * @bytes: the number of bytes during this measurement interval - **/ -static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting, - int packets, int bytes) -{ - unsigned int retval = itr_setting; - - if (packets == 0) - goto update_itr_done; - - switch (itr_setting) { - case lowest_latency: - /* handle TSO and jumbo frames */ - if (bytes/packets > 8000) - retval = bulk_latency; - else if ((packets < 5) && (bytes > 512)) - retval = low_latency; - break; - case low_latency: /* 50 usec aka 20000 ints/s */ - if (bytes > 10000) { - /* this if handles the TSO accounting */ - if (bytes/packets > 8000) { - retval = bulk_latency; - } else if ((packets < 10) || ((bytes/packets) > 1200)) { - retval = bulk_latency; - } else if ((packets > 35)) { - retval = lowest_latency; - } - } else if (bytes/packets > 2000) { - retval = bulk_latency; - } else if (packets <= 2 && bytes < 512) { - retval = lowest_latency; - } - break; - case bulk_latency: /* 250 usec aka 4000 ints/s */ - if (bytes > 25000) { - if (packets > 35) - retval = low_latency; - } else if (bytes < 1500) { - retval = low_latency; - } - break; - } - -update_itr_done: - return retval; -} - -static void igb_set_itr(struct igb_adapter *adapter) -{ - struct igb_q_vector *q_vector = adapter->q_vector[0]; - u16 current_itr; - u32 new_itr = q_vector->itr_val; - - /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ - if (adapter->link_speed != SPEED_1000) { - current_itr = 0; - new_itr = 4000; - goto set_itr_now; - } - - adapter->rx_itr = igb_update_itr(adapter, - adapter->rx_itr, - q_vector->rx_ring->total_packets, - q_vector->rx_ring->total_bytes); - - adapter->tx_itr = igb_update_itr(adapter, - adapter->tx_itr, - q_vector->tx_ring->total_packets, - q_vector->tx_ring->total_bytes); - current_itr = max(adapter->rx_itr, adapter->tx_itr); - - /* conservative mode (itr 3) eliminates the lowest_latency setting */ - if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency) - current_itr = low_latency; - - switch (current_itr) { - /* counts and packets in update_itr are dependent on these numbers */ - case lowest_latency: - new_itr = 56; /* aka 70,000 ints/sec */ - break; - case low_latency: - new_itr = 196; /* aka 20,000 ints/sec */ - break; - case bulk_latency: - new_itr = 980; /* aka 4,000 ints/sec */ - break; - default: - break; - } - -set_itr_now: - q_vector->rx_ring->total_bytes = 0; - q_vector->rx_ring->total_packets = 0; - q_vector->tx_ring->total_bytes = 0; - q_vector->tx_ring->total_packets = 0; - - if (new_itr != q_vector->itr_val) { - /* this attempts to bias the interrupt rate towards Bulk - * by adding intermediate steps when interrupt rate is - * increasing */ - new_itr = new_itr > q_vector->itr_val ? - max((new_itr * q_vector->itr_val) / - (new_itr + (q_vector->itr_val >> 2)), - new_itr) : - new_itr; - /* Don't write the value here; it resets the adapter's - * internal timer, and causes us to delay far longer than - * we should between interrupts. Instead, we write the ITR - * value at the beginning of the next interrupt so the timing - * ends up being correct. - */ - q_vector->itr_val = new_itr; - q_vector->set_itr = 1; - } -} - -#define IGB_TX_FLAGS_CSUM 0x00000001 -#define IGB_TX_FLAGS_VLAN 0x00000002 -#define IGB_TX_FLAGS_TSO 0x00000004 -#define IGB_TX_FLAGS_IPV4 0x00000008 -#define IGB_TX_FLAGS_TSTAMP 0x00000010 -#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 -#define IGB_TX_FLAGS_VLAN_SHIFT 16 - -static inline int igb_tso_adv(struct igb_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) -{ - struct e1000_adv_tx_context_desc *context_desc; - unsigned int i; - int err; - struct igb_buffer *buffer_info; - u32 info = 0, tu_cmd = 0; - u32 mss_l4len_idx; - u8 l4len; - - if (skb_header_cloned(skb)) { - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); - if (err) - return err; - } - - l4len = tcp_hdrlen(skb); - *hdr_len += l4len; - - if (skb->protocol == htons(ETH_P_IP)) { - struct iphdr *iph = ip_hdr(skb); - iph->tot_len = 0; - iph->check = 0; - tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, - 0); - } else if (skb_is_gso_v6(skb)) { - ipv6_hdr(skb)->payload_len = 0; - tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); - } - - i = tx_ring->next_to_use; - - buffer_info = &tx_ring->buffer_info[i]; - context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i); - /* VLAN MACLEN IPLEN */ - if (tx_flags & IGB_TX_FLAGS_VLAN) - info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK); - info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); - *hdr_len += skb_network_offset(skb); - info |= skb_network_header_len(skb); - *hdr_len += skb_network_header_len(skb); - context_desc->vlan_macip_lens = cpu_to_le32(info); - - /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ - tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); - - if (skb->protocol == htons(ETH_P_IP)) - tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; - tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; - - context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); - - /* MSS L4LEN IDX */ - mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT); - mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); - - /* For 82575, context index must be unique per ring. */ - if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) - mss_l4len_idx |= tx_ring->reg_idx << 4; - - context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); - context_desc->seqnum_seed = 0; - - buffer_info->time_stamp = jiffies; - buffer_info->next_to_watch = i; - buffer_info->dma = 0; - i++; - if (i == tx_ring->count) - i = 0; - - tx_ring->next_to_use = i; - - return true; -} - -static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags) -{ - struct e1000_adv_tx_context_desc *context_desc; - struct device *dev = tx_ring->dev; - struct igb_buffer *buffer_info; - u32 info = 0, tu_cmd = 0; - unsigned int i; - - if ((skb->ip_summed == CHECKSUM_PARTIAL) || - (tx_flags & IGB_TX_FLAGS_VLAN)) { - i = tx_ring->next_to_use; - buffer_info = &tx_ring->buffer_info[i]; - context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i); - - if (tx_flags & IGB_TX_FLAGS_VLAN) - info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK); - - info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); - if (skb->ip_summed == CHECKSUM_PARTIAL) - info |= skb_network_header_len(skb); - - context_desc->vlan_macip_lens = cpu_to_le32(info); - - tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); - - if (skb->ip_summed == CHECKSUM_PARTIAL) { - __be16 protocol; - - if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) { - const struct vlan_ethhdr *vhdr = - (const struct vlan_ethhdr*)skb->data; - - protocol = vhdr->h_vlan_encapsulated_proto; - } else { - protocol = skb->protocol; - } - - switch (protocol) { - case cpu_to_be16(ETH_P_IP): - tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; - if (ip_hdr(skb)->protocol == IPPROTO_TCP) - tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; - else if (ip_hdr(skb)->protocol == IPPROTO_SCTP) - tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP; - break; - case cpu_to_be16(ETH_P_IPV6): - /* XXX what about other V6 headers?? */ - if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) - tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; - else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP) - tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP; - break; - default: - if (unlikely(net_ratelimit())) - dev_warn(dev, - "partial checksum but proto=%x!\n", - skb->protocol); - break; - } - } - - context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); - context_desc->seqnum_seed = 0; - if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) - context_desc->mss_l4len_idx = - cpu_to_le32(tx_ring->reg_idx << 4); - - buffer_info->time_stamp = jiffies; - buffer_info->next_to_watch = i; - buffer_info->dma = 0; - - i++; - if (i == tx_ring->count) - i = 0; - tx_ring->next_to_use = i; - - return true; - } - return false; -} - -#define IGB_MAX_TXD_PWR 16 -#define IGB_MAX_DATA_PER_TXD (1<dev; - unsigned int hlen = skb_headlen(skb); - unsigned int count = 0, i; - unsigned int f; - u16 gso_segs = skb_shinfo(skb)->gso_segs ?: 1; - - i = tx_ring->next_to_use; - - buffer_info = &tx_ring->buffer_info[i]; - BUG_ON(hlen >= IGB_MAX_DATA_PER_TXD); - buffer_info->length = hlen; - /* set time_stamp *before* dma to help avoid a possible race */ - buffer_info->time_stamp = jiffies; - buffer_info->next_to_watch = i; - buffer_info->dma = dma_map_single(dev, skb->data, hlen, - DMA_TO_DEVICE); - if (dma_mapping_error(dev, buffer_info->dma)) - goto dma_error; - - for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[f]; - unsigned int len = frag->size; - - count++; - i++; - if (i == tx_ring->count) - i = 0; - - buffer_info = &tx_ring->buffer_info[i]; - BUG_ON(len >= IGB_MAX_DATA_PER_TXD); - buffer_info->length = len; - buffer_info->time_stamp = jiffies; - buffer_info->next_to_watch = i; - buffer_info->mapped_as_page = true; - buffer_info->dma = dma_map_page(dev, - frag->page, - frag->page_offset, - len, - DMA_TO_DEVICE); - if (dma_mapping_error(dev, buffer_info->dma)) - goto dma_error; - - } - - tx_ring->buffer_info[i].skb = skb; - tx_ring->buffer_info[i].tx_flags = skb_shinfo(skb)->tx_flags; - /* multiply data chunks by size of headers */ - tx_ring->buffer_info[i].bytecount = ((gso_segs - 1) * hlen) + skb->len; - tx_ring->buffer_info[i].gso_segs = gso_segs; - tx_ring->buffer_info[first].next_to_watch = i; - - return ++count; - -dma_error: - dev_err(dev, "TX DMA map failed\n"); - - /* clear timestamp and dma mappings for failed buffer_info mapping */ - buffer_info->dma = 0; - buffer_info->time_stamp = 0; - buffer_info->length = 0; - buffer_info->next_to_watch = 0; - buffer_info->mapped_as_page = false; - - /* clear timestamp and dma mappings for remaining portion of packet */ - while (count--) { - if (i == 0) - i = tx_ring->count; - i--; - buffer_info = &tx_ring->buffer_info[i]; - igb_unmap_and_free_tx_resource(tx_ring, buffer_info); - } - - return 0; -} - -static inline void igb_tx_queue_adv(struct igb_ring *tx_ring, - u32 tx_flags, int count, u32 paylen, - u8 hdr_len) -{ - union e1000_adv_tx_desc *tx_desc; - struct igb_buffer *buffer_info; - u32 olinfo_status = 0, cmd_type_len; - unsigned int i = tx_ring->next_to_use; - - cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | - E1000_ADVTXD_DCMD_DEXT); - - if (tx_flags & IGB_TX_FLAGS_VLAN) - cmd_type_len |= E1000_ADVTXD_DCMD_VLE; - - if (tx_flags & IGB_TX_FLAGS_TSTAMP) - cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP; - - if (tx_flags & IGB_TX_FLAGS_TSO) { - cmd_type_len |= E1000_ADVTXD_DCMD_TSE; - - /* insert tcp checksum */ - olinfo_status |= E1000_TXD_POPTS_TXSM << 8; - - /* insert ip checksum */ - if (tx_flags & IGB_TX_FLAGS_IPV4) - olinfo_status |= E1000_TXD_POPTS_IXSM << 8; - - } else if (tx_flags & IGB_TX_FLAGS_CSUM) { - olinfo_status |= E1000_TXD_POPTS_TXSM << 8; - } - - if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) && - (tx_flags & (IGB_TX_FLAGS_CSUM | - IGB_TX_FLAGS_TSO | - IGB_TX_FLAGS_VLAN))) - olinfo_status |= tx_ring->reg_idx << 4; - - olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); - - do { - buffer_info = &tx_ring->buffer_info[i]; - tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); - tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); - tx_desc->read.cmd_type_len = - cpu_to_le32(cmd_type_len | buffer_info->length); - tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); - count--; - i++; - if (i == tx_ring->count) - i = 0; - } while (count > 0); - - tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD); - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). */ - wmb(); - - tx_ring->next_to_use = i; - writel(i, tx_ring->tail); - /* we need this if more than one processor can write to our tail - * at a time, it syncronizes IO on IA64/Altix systems */ - mmiowb(); -} - -static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size) -{ - struct net_device *netdev = tx_ring->netdev; - - netif_stop_subqueue(netdev, tx_ring->queue_index); - - /* Herbert's original patch had: - * smp_mb__after_netif_stop_queue(); - * but since that doesn't exist yet, just open code it. */ - smp_mb(); - - /* We need to check again in a case another CPU has just - * made room available. */ - if (igb_desc_unused(tx_ring) < size) - return -EBUSY; - - /* A reprieve! */ - netif_wake_subqueue(netdev, tx_ring->queue_index); - - u64_stats_update_begin(&tx_ring->tx_syncp2); - tx_ring->tx_stats.restart_queue2++; - u64_stats_update_end(&tx_ring->tx_syncp2); - - return 0; -} - -static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size) -{ - if (igb_desc_unused(tx_ring) >= size) - return 0; - return __igb_maybe_stop_tx(tx_ring, size); -} - -netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, - struct igb_ring *tx_ring) -{ - int tso = 0, count; - u32 tx_flags = 0; - u16 first; - u8 hdr_len = 0; - - /* need: 1 descriptor per page, - * + 2 desc gap to keep tail from touching head, - * + 1 desc for skb->data, - * + 1 desc for context descriptor, - * otherwise try next time */ - if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) { - /* this is a hard error */ - return NETDEV_TX_BUSY; - } - - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - tx_flags |= IGB_TX_FLAGS_TSTAMP; - } - - if (vlan_tx_tag_present(skb)) { - tx_flags |= IGB_TX_FLAGS_VLAN; - tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); - } - - if (skb->protocol == htons(ETH_P_IP)) - tx_flags |= IGB_TX_FLAGS_IPV4; - - first = tx_ring->next_to_use; - if (skb_is_gso(skb)) { - tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len); - - if (tso < 0) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - } - - if (tso) - tx_flags |= IGB_TX_FLAGS_TSO; - else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) && - (skb->ip_summed == CHECKSUM_PARTIAL)) - tx_flags |= IGB_TX_FLAGS_CSUM; - - /* - * count reflects descriptors mapped, if 0 or less then mapping error - * has occurred and we need to rewind the descriptor queue - */ - count = igb_tx_map_adv(tx_ring, skb, first); - if (!count) { - dev_kfree_skb_any(skb); - tx_ring->buffer_info[first].time_stamp = 0; - tx_ring->next_to_use = first; - return NETDEV_TX_OK; - } - - igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len); - - /* Make sure there is space in the ring for the next send. */ - igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4); - - return NETDEV_TX_OK; -} - -static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, - struct net_device *netdev) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct igb_ring *tx_ring; - int r_idx = 0; - - if (test_bit(__IGB_DOWN, &adapter->state)) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - - if (skb->len <= 0) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - - r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1); - tx_ring = adapter->multi_tx_table[r_idx]; - - /* This goes back to the question of how to logically map a tx queue - * to a flow. Right now, performance is impacted slightly negatively - * if using multiple tx queues. If the stack breaks away from a - * single qdisc implementation, we can look at this again. */ - return igb_xmit_frame_ring_adv(skb, tx_ring); -} - -/** - * igb_tx_timeout - Respond to a Tx Hang - * @netdev: network interface device structure - **/ -static void igb_tx_timeout(struct net_device *netdev) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - - /* Do the reset outside of interrupt context */ - adapter->tx_timeout_count++; - - if (hw->mac.type == e1000_82580) - hw->dev_spec._82575.global_device_reset = true; - - schedule_work(&adapter->reset_task); - wr32(E1000_EICS, - (adapter->eims_enable_mask & ~adapter->eims_other)); -} - -static void igb_reset_task(struct work_struct *work) -{ - struct igb_adapter *adapter; - adapter = container_of(work, struct igb_adapter, reset_task); - - igb_dump(adapter); - netdev_err(adapter->netdev, "Reset adapter\n"); - igb_reinit_locked(adapter); -} - -/** - * igb_get_stats64 - Get System Network Statistics - * @netdev: network interface device structure - * @stats: rtnl_link_stats64 pointer - * - **/ -static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - - spin_lock(&adapter->stats64_lock); - igb_update_stats(adapter, &adapter->stats64); - memcpy(stats, &adapter->stats64, sizeof(*stats)); - spin_unlock(&adapter->stats64_lock); - - return stats; -} - -/** - * igb_change_mtu - Change the Maximum Transfer Unit - * @netdev: network interface device structure - * @new_mtu: new value for maximum frame size - * - * Returns 0 on success, negative on failure - **/ -static int igb_change_mtu(struct net_device *netdev, int new_mtu) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct pci_dev *pdev = adapter->pdev; - int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; - u32 rx_buffer_len, i; - - if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { - dev_err(&pdev->dev, "Invalid MTU setting\n"); - return -EINVAL; - } - - if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { - dev_err(&pdev->dev, "MTU > 9216 not supported.\n"); - return -EINVAL; - } - - while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) - msleep(1); - - /* igb_down has a dependency on max_frame_size */ - adapter->max_frame_size = max_frame; - - /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN - * means we reserve 2 more, this pushes us to allocate from the next - * larger slab size. - * i.e. RXBUFFER_2048 --> size-4096 slab - */ - - if (adapter->hw.mac.type == e1000_82580) - max_frame += IGB_TS_HDR_LEN; - - if (max_frame <= IGB_RXBUFFER_1024) - rx_buffer_len = IGB_RXBUFFER_1024; - else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE) - rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; - else - rx_buffer_len = IGB_RXBUFFER_128; - - if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN + IGB_TS_HDR_LEN) || - (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN)) - rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN; - - if ((adapter->hw.mac.type == e1000_82580) && - (rx_buffer_len == IGB_RXBUFFER_128)) - rx_buffer_len += IGB_RXBUFFER_64; - - if (netif_running(netdev)) - igb_down(adapter); - - dev_info(&pdev->dev, "changing MTU from %d to %d\n", - netdev->mtu, new_mtu); - netdev->mtu = new_mtu; - - for (i = 0; i < adapter->num_rx_queues; i++) - adapter->rx_ring[i]->rx_buffer_len = rx_buffer_len; - - if (netif_running(netdev)) - igb_up(adapter); - else - igb_reset(adapter); - - clear_bit(__IGB_RESETTING, &adapter->state); - - return 0; -} - -/** - * igb_update_stats - Update the board statistics counters - * @adapter: board private structure - **/ - -void igb_update_stats(struct igb_adapter *adapter, - struct rtnl_link_stats64 *net_stats) -{ - struct e1000_hw *hw = &adapter->hw; - struct pci_dev *pdev = adapter->pdev; - u32 reg, mpc; - u16 phy_tmp; - int i; - u64 bytes, packets; - unsigned int start; - u64 _bytes, _packets; - -#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF - - /* - * Prevent stats update while adapter is being reset, or if the pci - * connection is down. - */ - if (adapter->link_speed == 0) - return; - if (pci_channel_offline(pdev)) - return; - - bytes = 0; - packets = 0; - for (i = 0; i < adapter->num_rx_queues; i++) { - u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF; - struct igb_ring *ring = adapter->rx_ring[i]; - - ring->rx_stats.drops += rqdpc_tmp; - net_stats->rx_fifo_errors += rqdpc_tmp; - - do { - start = u64_stats_fetch_begin_bh(&ring->rx_syncp); - _bytes = ring->rx_stats.bytes; - _packets = ring->rx_stats.packets; - } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start)); - bytes += _bytes; - packets += _packets; - } - - net_stats->rx_bytes = bytes; - net_stats->rx_packets = packets; - - bytes = 0; - packets = 0; - for (i = 0; i < adapter->num_tx_queues; i++) { - struct igb_ring *ring = adapter->tx_ring[i]; - do { - start = u64_stats_fetch_begin_bh(&ring->tx_syncp); - _bytes = ring->tx_stats.bytes; - _packets = ring->tx_stats.packets; - } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start)); - bytes += _bytes; - packets += _packets; - } - net_stats->tx_bytes = bytes; - net_stats->tx_packets = packets; - - /* read stats registers */ - adapter->stats.crcerrs += rd32(E1000_CRCERRS); - adapter->stats.gprc += rd32(E1000_GPRC); - adapter->stats.gorc += rd32(E1000_GORCL); - rd32(E1000_GORCH); /* clear GORCL */ - adapter->stats.bprc += rd32(E1000_BPRC); - adapter->stats.mprc += rd32(E1000_MPRC); - adapter->stats.roc += rd32(E1000_ROC); - - adapter->stats.prc64 += rd32(E1000_PRC64); - adapter->stats.prc127 += rd32(E1000_PRC127); - adapter->stats.prc255 += rd32(E1000_PRC255); - adapter->stats.prc511 += rd32(E1000_PRC511); - adapter->stats.prc1023 += rd32(E1000_PRC1023); - adapter->stats.prc1522 += rd32(E1000_PRC1522); - adapter->stats.symerrs += rd32(E1000_SYMERRS); - adapter->stats.sec += rd32(E1000_SEC); - - mpc = rd32(E1000_MPC); - adapter->stats.mpc += mpc; - net_stats->rx_fifo_errors += mpc; - adapter->stats.scc += rd32(E1000_SCC); - adapter->stats.ecol += rd32(E1000_ECOL); - adapter->stats.mcc += rd32(E1000_MCC); - adapter->stats.latecol += rd32(E1000_LATECOL); - adapter->stats.dc += rd32(E1000_DC); - adapter->stats.rlec += rd32(E1000_RLEC); - adapter->stats.xonrxc += rd32(E1000_XONRXC); - adapter->stats.xontxc += rd32(E1000_XONTXC); - adapter->stats.xoffrxc += rd32(E1000_XOFFRXC); - adapter->stats.xofftxc += rd32(E1000_XOFFTXC); - adapter->stats.fcruc += rd32(E1000_FCRUC); - adapter->stats.gptc += rd32(E1000_GPTC); - adapter->stats.gotc += rd32(E1000_GOTCL); - rd32(E1000_GOTCH); /* clear GOTCL */ - adapter->stats.rnbc += rd32(E1000_RNBC); - adapter->stats.ruc += rd32(E1000_RUC); - adapter->stats.rfc += rd32(E1000_RFC); - adapter->stats.rjc += rd32(E1000_RJC); - adapter->stats.tor += rd32(E1000_TORH); - adapter->stats.tot += rd32(E1000_TOTH); - adapter->stats.tpr += rd32(E1000_TPR); - - adapter->stats.ptc64 += rd32(E1000_PTC64); - adapter->stats.ptc127 += rd32(E1000_PTC127); - adapter->stats.ptc255 += rd32(E1000_PTC255); - adapter->stats.ptc511 += rd32(E1000_PTC511); - adapter->stats.ptc1023 += rd32(E1000_PTC1023); - adapter->stats.ptc1522 += rd32(E1000_PTC1522); - - adapter->stats.mptc += rd32(E1000_MPTC); - adapter->stats.bptc += rd32(E1000_BPTC); - - adapter->stats.tpt += rd32(E1000_TPT); - adapter->stats.colc += rd32(E1000_COLC); - - adapter->stats.algnerrc += rd32(E1000_ALGNERRC); - /* read internal phy specific stats */ - reg = rd32(E1000_CTRL_EXT); - if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) { - adapter->stats.rxerrc += rd32(E1000_RXERRC); - adapter->stats.tncrs += rd32(E1000_TNCRS); - } - - adapter->stats.tsctc += rd32(E1000_TSCTC); - adapter->stats.tsctfc += rd32(E1000_TSCTFC); - - adapter->stats.iac += rd32(E1000_IAC); - adapter->stats.icrxoc += rd32(E1000_ICRXOC); - adapter->stats.icrxptc += rd32(E1000_ICRXPTC); - adapter->stats.icrxatc += rd32(E1000_ICRXATC); - adapter->stats.ictxptc += rd32(E1000_ICTXPTC); - adapter->stats.ictxatc += rd32(E1000_ICTXATC); - adapter->stats.ictxqec += rd32(E1000_ICTXQEC); - adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC); - adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); - - /* Fill out the OS statistics structure */ - net_stats->multicast = adapter->stats.mprc; - net_stats->collisions = adapter->stats.colc; - - /* Rx Errors */ - - /* RLEC on some newer hardware can be incorrect so build - * our own version based on RUC and ROC */ - net_stats->rx_errors = adapter->stats.rxerrc + - adapter->stats.crcerrs + adapter->stats.algnerrc + - adapter->stats.ruc + adapter->stats.roc + - adapter->stats.cexterr; - net_stats->rx_length_errors = adapter->stats.ruc + - adapter->stats.roc; - net_stats->rx_crc_errors = adapter->stats.crcerrs; - net_stats->rx_frame_errors = adapter->stats.algnerrc; - net_stats->rx_missed_errors = adapter->stats.mpc; - - /* Tx Errors */ - net_stats->tx_errors = adapter->stats.ecol + - adapter->stats.latecol; - net_stats->tx_aborted_errors = adapter->stats.ecol; - net_stats->tx_window_errors = adapter->stats.latecol; - net_stats->tx_carrier_errors = adapter->stats.tncrs; - - /* Tx Dropped needs to be maintained elsewhere */ - - /* Phy Stats */ - if (hw->phy.media_type == e1000_media_type_copper) { - if ((adapter->link_speed == SPEED_1000) && - (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { - phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; - adapter->phy_stats.idle_errors += phy_tmp; - } - } - - /* Management Stats */ - adapter->stats.mgptc += rd32(E1000_MGTPTC); - adapter->stats.mgprc += rd32(E1000_MGTPRC); - adapter->stats.mgpdc += rd32(E1000_MGTPDC); - - /* OS2BMC Stats */ - reg = rd32(E1000_MANC); - if (reg & E1000_MANC_EN_BMC2OS) { - adapter->stats.o2bgptc += rd32(E1000_O2BGPTC); - adapter->stats.o2bspc += rd32(E1000_O2BSPC); - adapter->stats.b2ospc += rd32(E1000_B2OSPC); - adapter->stats.b2ogprc += rd32(E1000_B2OGPRC); - } -} - -static irqreturn_t igb_msix_other(int irq, void *data) -{ - struct igb_adapter *adapter = data; - struct e1000_hw *hw = &adapter->hw; - u32 icr = rd32(E1000_ICR); - /* reading ICR causes bit 31 of EICR to be cleared */ - - if (icr & E1000_ICR_DRSTA) - schedule_work(&adapter->reset_task); - - if (icr & E1000_ICR_DOUTSYNC) { - /* HW is reporting DMA is out of sync */ - adapter->stats.doosync++; - /* The DMA Out of Sync is also indication of a spoof event - * in IOV mode. Check the Wrong VM Behavior register to - * see if it is really a spoof event. */ - igb_check_wvbr(adapter); - } - - /* Check for a mailbox event */ - if (icr & E1000_ICR_VMMB) - igb_msg_task(adapter); - - if (icr & E1000_ICR_LSC) { - hw->mac.get_link_status = 1; - /* guard against interrupt when we're going down */ - if (!test_bit(__IGB_DOWN, &adapter->state)) - mod_timer(&adapter->watchdog_timer, jiffies + 1); - } - - if (adapter->vfs_allocated_count) - wr32(E1000_IMS, E1000_IMS_LSC | - E1000_IMS_VMMB | - E1000_IMS_DOUTSYNC); - else - wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC); - wr32(E1000_EIMS, adapter->eims_other); - - return IRQ_HANDLED; -} - -static void igb_write_itr(struct igb_q_vector *q_vector) -{ - struct igb_adapter *adapter = q_vector->adapter; - u32 itr_val = q_vector->itr_val & 0x7FFC; - - if (!q_vector->set_itr) - return; - - if (!itr_val) - itr_val = 0x4; - - if (adapter->hw.mac.type == e1000_82575) - itr_val |= itr_val << 16; - else - itr_val |= 0x8000000; - - writel(itr_val, q_vector->itr_register); - q_vector->set_itr = 0; -} - -static irqreturn_t igb_msix_ring(int irq, void *data) -{ - struct igb_q_vector *q_vector = data; - - /* Write the ITR value calculated from the previous interrupt. */ - igb_write_itr(q_vector); - - napi_schedule(&q_vector->napi); - - return IRQ_HANDLED; -} - -#ifdef CONFIG_IGB_DCA -static void igb_update_dca(struct igb_q_vector *q_vector) -{ - struct igb_adapter *adapter = q_vector->adapter; - struct e1000_hw *hw = &adapter->hw; - int cpu = get_cpu(); - - if (q_vector->cpu == cpu) - goto out_no_update; - - if (q_vector->tx_ring) { - int q = q_vector->tx_ring->reg_idx; - u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q)); - if (hw->mac.type == e1000_82575) { - dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK; - dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); - } else { - dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576; - dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << - E1000_DCA_TXCTRL_CPUID_SHIFT; - } - dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN; - wr32(E1000_DCA_TXCTRL(q), dca_txctrl); - } - if (q_vector->rx_ring) { - int q = q_vector->rx_ring->reg_idx; - u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q)); - if (hw->mac.type == e1000_82575) { - dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK; - dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); - } else { - dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576; - dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << - E1000_DCA_RXCTRL_CPUID_SHIFT; - } - dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN; - dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN; - dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN; - wr32(E1000_DCA_RXCTRL(q), dca_rxctrl); - } - q_vector->cpu = cpu; -out_no_update: - put_cpu(); -} - -static void igb_setup_dca(struct igb_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - int i; - - if (!(adapter->flags & IGB_FLAG_DCA_ENABLED)) - return; - - /* Always use CB2 mode, difference is masked in the CB driver. */ - wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); - - for (i = 0; i < adapter->num_q_vectors; i++) { - adapter->q_vector[i]->cpu = -1; - igb_update_dca(adapter->q_vector[i]); - } -} - -static int __igb_notify_dca(struct device *dev, void *data) -{ - struct net_device *netdev = dev_get_drvdata(dev); - struct igb_adapter *adapter = netdev_priv(netdev); - struct pci_dev *pdev = adapter->pdev; - struct e1000_hw *hw = &adapter->hw; - unsigned long event = *(unsigned long *)data; - - switch (event) { - case DCA_PROVIDER_ADD: - /* if already enabled, don't do it again */ - if (adapter->flags & IGB_FLAG_DCA_ENABLED) - break; - if (dca_add_requester(dev) == 0) { - adapter->flags |= IGB_FLAG_DCA_ENABLED; - dev_info(&pdev->dev, "DCA enabled\n"); - igb_setup_dca(adapter); - break; - } - /* Fall Through since DCA is disabled. */ - case DCA_PROVIDER_REMOVE: - if (adapter->flags & IGB_FLAG_DCA_ENABLED) { - /* without this a class_device is left - * hanging around in the sysfs model */ - dca_remove_requester(dev); - dev_info(&pdev->dev, "DCA disabled\n"); - adapter->flags &= ~IGB_FLAG_DCA_ENABLED; - wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); - } - break; - } - - return 0; -} - -static int igb_notify_dca(struct notifier_block *nb, unsigned long event, - void *p) -{ - int ret_val; - - ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event, - __igb_notify_dca); - - return ret_val ? NOTIFY_BAD : NOTIFY_DONE; -} -#endif /* CONFIG_IGB_DCA */ - -static void igb_ping_all_vfs(struct igb_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 ping; - int i; - - for (i = 0 ; i < adapter->vfs_allocated_count; i++) { - ping = E1000_PF_CONTROL_MSG; - if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS) - ping |= E1000_VT_MSGTYPE_CTS; - igb_write_mbx(hw, &ping, 1, i); - } -} - -static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) -{ - struct e1000_hw *hw = &adapter->hw; - u32 vmolr = rd32(E1000_VMOLR(vf)); - struct vf_data_storage *vf_data = &adapter->vf_data[vf]; - - vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC | - IGB_VF_FLAG_MULTI_PROMISC); - vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); - - if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) { - vmolr |= E1000_VMOLR_MPME; - vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC; - *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST; - } else { - /* - * if we have hashes and we are clearing a multicast promisc - * flag we need to write the hashes to the MTA as this step - * was previously skipped - */ - if (vf_data->num_vf_mc_hashes > 30) { - vmolr |= E1000_VMOLR_MPME; - } else if (vf_data->num_vf_mc_hashes) { - int j; - vmolr |= E1000_VMOLR_ROMPE; - for (j = 0; j < vf_data->num_vf_mc_hashes; j++) - igb_mta_set(hw, vf_data->vf_mc_hashes[j]); - } - } - - wr32(E1000_VMOLR(vf), vmolr); - - /* there are flags left unprocessed, likely not supported */ - if (*msgbuf & E1000_VT_MSGINFO_MASK) - return -EINVAL; - - return 0; - -} - -static int igb_set_vf_multicasts(struct igb_adapter *adapter, - u32 *msgbuf, u32 vf) -{ - int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; - u16 *hash_list = (u16 *)&msgbuf[1]; - struct vf_data_storage *vf_data = &adapter->vf_data[vf]; - int i; - - /* salt away the number of multicast addresses assigned - * to this VF for later use to restore when the PF multi cast - * list changes - */ - vf_data->num_vf_mc_hashes = n; - - /* only up to 30 hash values supported */ - if (n > 30) - n = 30; - - /* store the hashes for later use */ - for (i = 0; i < n; i++) - vf_data->vf_mc_hashes[i] = hash_list[i]; - - /* Flush and reset the mta with the new values */ - igb_set_rx_mode(adapter->netdev); - - return 0; -} - -static void igb_restore_vf_multicasts(struct igb_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - struct vf_data_storage *vf_data; - int i, j; - - for (i = 0; i < adapter->vfs_allocated_count; i++) { - u32 vmolr = rd32(E1000_VMOLR(i)); - vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); - - vf_data = &adapter->vf_data[i]; - - if ((vf_data->num_vf_mc_hashes > 30) || - (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) { - vmolr |= E1000_VMOLR_MPME; - } else if (vf_data->num_vf_mc_hashes) { - vmolr |= E1000_VMOLR_ROMPE; - for (j = 0; j < vf_data->num_vf_mc_hashes; j++) - igb_mta_set(hw, vf_data->vf_mc_hashes[j]); - } - wr32(E1000_VMOLR(i), vmolr); - } -} - -static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf) -{ - struct e1000_hw *hw = &adapter->hw; - u32 pool_mask, reg, vid; - int i; - - pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf); - - /* Find the vlan filter for this id */ - for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { - reg = rd32(E1000_VLVF(i)); - - /* remove the vf from the pool */ - reg &= ~pool_mask; - - /* if pool is empty then remove entry from vfta */ - if (!(reg & E1000_VLVF_POOLSEL_MASK) && - (reg & E1000_VLVF_VLANID_ENABLE)) { - reg = 0; - vid = reg & E1000_VLVF_VLANID_MASK; - igb_vfta_set(hw, vid, false); - } - - wr32(E1000_VLVF(i), reg); - } - - adapter->vf_data[vf].vlans_enabled = 0; -} - -static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) -{ - struct e1000_hw *hw = &adapter->hw; - u32 reg, i; - - /* The vlvf table only exists on 82576 hardware and newer */ - if (hw->mac.type < e1000_82576) - return -1; - - /* we only need to do this if VMDq is enabled */ - if (!adapter->vfs_allocated_count) - return -1; - - /* Find the vlan filter for this id */ - for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { - reg = rd32(E1000_VLVF(i)); - if ((reg & E1000_VLVF_VLANID_ENABLE) && - vid == (reg & E1000_VLVF_VLANID_MASK)) - break; - } - - if (add) { - if (i == E1000_VLVF_ARRAY_SIZE) { - /* Did not find a matching VLAN ID entry that was - * enabled. Search for a free filter entry, i.e. - * one without the enable bit set - */ - for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { - reg = rd32(E1000_VLVF(i)); - if (!(reg & E1000_VLVF_VLANID_ENABLE)) - break; - } - } - if (i < E1000_VLVF_ARRAY_SIZE) { - /* Found an enabled/available entry */ - reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf); - - /* if !enabled we need to set this up in vfta */ - if (!(reg & E1000_VLVF_VLANID_ENABLE)) { - /* add VID to filter table */ - igb_vfta_set(hw, vid, true); - reg |= E1000_VLVF_VLANID_ENABLE; - } - reg &= ~E1000_VLVF_VLANID_MASK; - reg |= vid; - wr32(E1000_VLVF(i), reg); - - /* do not modify RLPML for PF devices */ - if (vf >= adapter->vfs_allocated_count) - return 0; - - if (!adapter->vf_data[vf].vlans_enabled) { - u32 size; - reg = rd32(E1000_VMOLR(vf)); - size = reg & E1000_VMOLR_RLPML_MASK; - size += 4; - reg &= ~E1000_VMOLR_RLPML_MASK; - reg |= size; - wr32(E1000_VMOLR(vf), reg); - } - - adapter->vf_data[vf].vlans_enabled++; - return 0; - } - } else { - if (i < E1000_VLVF_ARRAY_SIZE) { - /* remove vf from the pool */ - reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf)); - /* if pool is empty then remove entry from vfta */ - if (!(reg & E1000_VLVF_POOLSEL_MASK)) { - reg = 0; - igb_vfta_set(hw, vid, false); - } - wr32(E1000_VLVF(i), reg); - - /* do not modify RLPML for PF devices */ - if (vf >= adapter->vfs_allocated_count) - return 0; - - adapter->vf_data[vf].vlans_enabled--; - if (!adapter->vf_data[vf].vlans_enabled) { - u32 size; - reg = rd32(E1000_VMOLR(vf)); - size = reg & E1000_VMOLR_RLPML_MASK; - size -= 4; - reg &= ~E1000_VMOLR_RLPML_MASK; - reg |= size; - wr32(E1000_VMOLR(vf), reg); - } - } - } - return 0; -} - -static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf) -{ - struct e1000_hw *hw = &adapter->hw; - - if (vid) - wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT)); - else - wr32(E1000_VMVIR(vf), 0); -} - -static int igb_ndo_set_vf_vlan(struct net_device *netdev, - int vf, u16 vlan, u8 qos) -{ - int err = 0; - struct igb_adapter *adapter = netdev_priv(netdev); - - if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7)) - return -EINVAL; - if (vlan || qos) { - err = igb_vlvf_set(adapter, vlan, !!vlan, vf); - if (err) - goto out; - igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); - igb_set_vmolr(adapter, vf, !vlan); - adapter->vf_data[vf].pf_vlan = vlan; - adapter->vf_data[vf].pf_qos = qos; - dev_info(&adapter->pdev->dev, - "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); - if (test_bit(__IGB_DOWN, &adapter->state)) { - dev_warn(&adapter->pdev->dev, - "The VF VLAN has been set," - " but the PF device is not up.\n"); - dev_warn(&adapter->pdev->dev, - "Bring the PF device up before" - " attempting to use the VF device.\n"); - } - } else { - igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan, - false, vf); - igb_set_vmvir(adapter, vlan, vf); - igb_set_vmolr(adapter, vf, true); - adapter->vf_data[vf].pf_vlan = 0; - adapter->vf_data[vf].pf_qos = 0; - } -out: - return err; -} - -static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) -{ - int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; - int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK); - - return igb_vlvf_set(adapter, vid, add, vf); -} - -static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) -{ - /* clear flags - except flag that indicates PF has set the MAC */ - adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC; - adapter->vf_data[vf].last_nack = jiffies; - - /* reset offloads to defaults */ - igb_set_vmolr(adapter, vf, true); - - /* reset vlans for device */ - igb_clear_vf_vfta(adapter, vf); - if (adapter->vf_data[vf].pf_vlan) - igb_ndo_set_vf_vlan(adapter->netdev, vf, - adapter->vf_data[vf].pf_vlan, - adapter->vf_data[vf].pf_qos); - else - igb_clear_vf_vfta(adapter, vf); - - /* reset multicast table array for vf */ - adapter->vf_data[vf].num_vf_mc_hashes = 0; - - /* Flush and reset the mta with the new values */ - igb_set_rx_mode(adapter->netdev); -} - -static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) -{ - unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; - - /* generate a new mac address as we were hotplug removed/added */ - if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC)) - random_ether_addr(vf_mac); - - /* process remaining reset events */ - igb_vf_reset(adapter, vf); -} - -static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) -{ - struct e1000_hw *hw = &adapter->hw; - unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; - int rar_entry = hw->mac.rar_entry_count - (vf + 1); - u32 reg, msgbuf[3]; - u8 *addr = (u8 *)(&msgbuf[1]); - - /* process all the same items cleared in a function level reset */ - igb_vf_reset(adapter, vf); - - /* set vf mac address */ - igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf); - - /* enable transmit and receive for vf */ - reg = rd32(E1000_VFTE); - wr32(E1000_VFTE, reg | (1 << vf)); - reg = rd32(E1000_VFRE); - wr32(E1000_VFRE, reg | (1 << vf)); - - adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS; - - /* reply to reset with ack and vf mac address */ - msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; - memcpy(addr, vf_mac, 6); - igb_write_mbx(hw, msgbuf, 3, vf); -} - -static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) -{ - /* - * The VF MAC Address is stored in a packed array of bytes - * starting at the second 32 bit word of the msg array - */ - unsigned char *addr = (char *)&msg[1]; - int err = -1; - - if (is_valid_ether_addr(addr)) - err = igb_set_vf_mac(adapter, vf, addr); - - return err; -} - -static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf) -{ - struct e1000_hw *hw = &adapter->hw; - struct vf_data_storage *vf_data = &adapter->vf_data[vf]; - u32 msg = E1000_VT_MSGTYPE_NACK; - - /* if device isn't clear to send it shouldn't be reading either */ - if (!(vf_data->flags & IGB_VF_FLAG_CTS) && - time_after(jiffies, vf_data->last_nack + (2 * HZ))) { - igb_write_mbx(hw, &msg, 1, vf); - vf_data->last_nack = jiffies; - } -} - -static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) -{ - struct pci_dev *pdev = adapter->pdev; - u32 msgbuf[E1000_VFMAILBOX_SIZE]; - struct e1000_hw *hw = &adapter->hw; - struct vf_data_storage *vf_data = &adapter->vf_data[vf]; - s32 retval; - - retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf); - - if (retval) { - /* if receive failed revoke VF CTS stats and restart init */ - dev_err(&pdev->dev, "Error receiving message from VF\n"); - vf_data->flags &= ~IGB_VF_FLAG_CTS; - if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) - return; - goto out; - } - - /* this is a message we already processed, do nothing */ - if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) - return; - - /* - * until the vf completes a reset it should not be - * allowed to start any configuration. - */ - - if (msgbuf[0] == E1000_VF_RESET) { - igb_vf_reset_msg(adapter, vf); - return; - } - - if (!(vf_data->flags & IGB_VF_FLAG_CTS)) { - if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) - return; - retval = -1; - goto out; - } - - switch ((msgbuf[0] & 0xFFFF)) { - case E1000_VF_SET_MAC_ADDR: - retval = -EINVAL; - if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC)) - retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); - else - dev_warn(&pdev->dev, - "VF %d attempted to override administratively " - "set MAC address\nReload the VF driver to " - "resume operations\n", vf); - break; - case E1000_VF_SET_PROMISC: - retval = igb_set_vf_promisc(adapter, msgbuf, vf); - break; - case E1000_VF_SET_MULTICAST: - retval = igb_set_vf_multicasts(adapter, msgbuf, vf); - break; - case E1000_VF_SET_LPE: - retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf); - break; - case E1000_VF_SET_VLAN: - retval = -1; - if (vf_data->pf_vlan) - dev_warn(&pdev->dev, - "VF %d attempted to override administratively " - "set VLAN tag\nReload the VF driver to " - "resume operations\n", vf); - else - retval = igb_set_vf_vlan(adapter, msgbuf, vf); - break; - default: - dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); - retval = -1; - break; - } - - msgbuf[0] |= E1000_VT_MSGTYPE_CTS; -out: - /* notify the VF of the results of what it sent us */ - if (retval) - msgbuf[0] |= E1000_VT_MSGTYPE_NACK; - else - msgbuf[0] |= E1000_VT_MSGTYPE_ACK; - - igb_write_mbx(hw, msgbuf, 1, vf); -} - -static void igb_msg_task(struct igb_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 vf; - - for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { - /* process any reset requests */ - if (!igb_check_for_rst(hw, vf)) - igb_vf_reset_event(adapter, vf); - - /* process any messages pending */ - if (!igb_check_for_msg(hw, vf)) - igb_rcv_msg_from_vf(adapter, vf); - - /* process any acks */ - if (!igb_check_for_ack(hw, vf)) - igb_rcv_ack_from_vf(adapter, vf); - } -} - -/** - * igb_set_uta - Set unicast filter table address - * @adapter: board private structure - * - * The unicast table address is a register array of 32-bit registers. - * The table is meant to be used in a way similar to how the MTA is used - * however due to certain limitations in the hardware it is necessary to - * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous - * enable bit to allow vlan tag stripping when promiscuous mode is enabled - **/ -static void igb_set_uta(struct igb_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - int i; - - /* The UTA table only exists on 82576 hardware and newer */ - if (hw->mac.type < e1000_82576) - return; - - /* we only need to do this if VMDq is enabled */ - if (!adapter->vfs_allocated_count) - return; - - for (i = 0; i < hw->mac.uta_reg_count; i++) - array_wr32(E1000_UTA, i, ~0); -} - -/** - * igb_intr_msi - Interrupt Handler - * @irq: interrupt number - * @data: pointer to a network interface device structure - **/ -static irqreturn_t igb_intr_msi(int irq, void *data) -{ - struct igb_adapter *adapter = data; - struct igb_q_vector *q_vector = adapter->q_vector[0]; - struct e1000_hw *hw = &adapter->hw; - /* read ICR disables interrupts using IAM */ - u32 icr = rd32(E1000_ICR); - - igb_write_itr(q_vector); - - if (icr & E1000_ICR_DRSTA) - schedule_work(&adapter->reset_task); - - if (icr & E1000_ICR_DOUTSYNC) { - /* HW is reporting DMA is out of sync */ - adapter->stats.doosync++; - } - - if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { - hw->mac.get_link_status = 1; - if (!test_bit(__IGB_DOWN, &adapter->state)) - mod_timer(&adapter->watchdog_timer, jiffies + 1); - } - - napi_schedule(&q_vector->napi); - - return IRQ_HANDLED; -} - -/** - * igb_intr - Legacy Interrupt Handler - * @irq: interrupt number - * @data: pointer to a network interface device structure - **/ -static irqreturn_t igb_intr(int irq, void *data) -{ - struct igb_adapter *adapter = data; - struct igb_q_vector *q_vector = adapter->q_vector[0]; - struct e1000_hw *hw = &adapter->hw; - /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No - * need for the IMC write */ - u32 icr = rd32(E1000_ICR); - if (!icr) - return IRQ_NONE; /* Not our interrupt */ - - igb_write_itr(q_vector); - - /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is - * not set, then the adapter didn't send an interrupt */ - if (!(icr & E1000_ICR_INT_ASSERTED)) - return IRQ_NONE; - - if (icr & E1000_ICR_DRSTA) - schedule_work(&adapter->reset_task); - - if (icr & E1000_ICR_DOUTSYNC) { - /* HW is reporting DMA is out of sync */ - adapter->stats.doosync++; - } - - if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { - hw->mac.get_link_status = 1; - /* guard against interrupt when we're going down */ - if (!test_bit(__IGB_DOWN, &adapter->state)) - mod_timer(&adapter->watchdog_timer, jiffies + 1); - } - - napi_schedule(&q_vector->napi); - - return IRQ_HANDLED; -} - -static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector) -{ - struct igb_adapter *adapter = q_vector->adapter; - struct e1000_hw *hw = &adapter->hw; - - if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) || - (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) { - if (!adapter->msix_entries) - igb_set_itr(adapter); - else - igb_update_ring_itr(q_vector); - } - - if (!test_bit(__IGB_DOWN, &adapter->state)) { - if (adapter->msix_entries) - wr32(E1000_EIMS, q_vector->eims_value); - else - igb_irq_enable(adapter); - } -} - -/** - * igb_poll - NAPI Rx polling callback - * @napi: napi polling structure - * @budget: count of how many packets we should handle - **/ -static int igb_poll(struct napi_struct *napi, int budget) -{ - struct igb_q_vector *q_vector = container_of(napi, - struct igb_q_vector, - napi); - int tx_clean_complete = 1, work_done = 0; - -#ifdef CONFIG_IGB_DCA - if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) - igb_update_dca(q_vector); -#endif - if (q_vector->tx_ring) - tx_clean_complete = igb_clean_tx_irq(q_vector); - - if (q_vector->rx_ring) - igb_clean_rx_irq_adv(q_vector, &work_done, budget); - - if (!tx_clean_complete) - work_done = budget; - - /* If not enough Rx work done, exit the polling mode */ - if (work_done < budget) { - napi_complete(napi); - igb_ring_irq_enable(q_vector); - } - - return work_done; -} - -/** - * igb_systim_to_hwtstamp - convert system time value to hw timestamp - * @adapter: board private structure - * @shhwtstamps: timestamp structure to update - * @regval: unsigned 64bit system time value. - * - * We need to convert the system time value stored in the RX/TXSTMP registers - * into a hwtstamp which can be used by the upper level timestamping functions - */ -static void igb_systim_to_hwtstamp(struct igb_adapter *adapter, - struct skb_shared_hwtstamps *shhwtstamps, - u64 regval) -{ - u64 ns; - - /* - * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to - * 24 to match clock shift we setup earlier. - */ - if (adapter->hw.mac.type == e1000_82580) - regval <<= IGB_82580_TSYNC_SHIFT; - - ns = timecounter_cyc2time(&adapter->clock, regval); - timecompare_update(&adapter->compare, ns); - memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); - shhwtstamps->hwtstamp = ns_to_ktime(ns); - shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns); -} - -/** - * igb_tx_hwtstamp - utility function which checks for TX time stamp - * @q_vector: pointer to q_vector containing needed info - * @buffer: pointer to igb_buffer structure - * - * If we were asked to do hardware stamping and such a time stamp is - * available, then it must have been for this skb here because we only - * allow only one such packet into the queue. - */ -static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct igb_buffer *buffer_info) -{ - struct igb_adapter *adapter = q_vector->adapter; - struct e1000_hw *hw = &adapter->hw; - struct skb_shared_hwtstamps shhwtstamps; - u64 regval; - - /* if skb does not support hw timestamp or TX stamp not valid exit */ - if (likely(!(buffer_info->tx_flags & SKBTX_HW_TSTAMP)) || - !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID)) - return; - - regval = rd32(E1000_TXSTMPL); - regval |= (u64)rd32(E1000_TXSTMPH) << 32; - - igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval); - skb_tstamp_tx(buffer_info->skb, &shhwtstamps); -} - -/** - * igb_clean_tx_irq - Reclaim resources after transmit completes - * @q_vector: pointer to q_vector containing needed info - * returns true if ring is completely cleaned - **/ -static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) -{ - struct igb_adapter *adapter = q_vector->adapter; - struct igb_ring *tx_ring = q_vector->tx_ring; - struct net_device *netdev = tx_ring->netdev; - struct e1000_hw *hw = &adapter->hw; - struct igb_buffer *buffer_info; - union e1000_adv_tx_desc *tx_desc, *eop_desc; - unsigned int total_bytes = 0, total_packets = 0; - unsigned int i, eop, count = 0; - bool cleaned = false; - - i = tx_ring->next_to_clean; - eop = tx_ring->buffer_info[i].next_to_watch; - eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop); - - while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) && - (count < tx_ring->count)) { - rmb(); /* read buffer_info after eop_desc status */ - for (cleaned = false; !cleaned; count++) { - tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); - buffer_info = &tx_ring->buffer_info[i]; - cleaned = (i == eop); - - if (buffer_info->skb) { - total_bytes += buffer_info->bytecount; - /* gso_segs is currently only valid for tcp */ - total_packets += buffer_info->gso_segs; - igb_tx_hwtstamp(q_vector, buffer_info); - } - - igb_unmap_and_free_tx_resource(tx_ring, buffer_info); - tx_desc->wb.status = 0; - - i++; - if (i == tx_ring->count) - i = 0; - } - eop = tx_ring->buffer_info[i].next_to_watch; - eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop); - } - - tx_ring->next_to_clean = i; - - if (unlikely(count && - netif_carrier_ok(netdev) && - igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) { - /* Make sure that anybody stopping the queue after this - * sees the new next_to_clean. - */ - smp_mb(); - if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && - !(test_bit(__IGB_DOWN, &adapter->state))) { - netif_wake_subqueue(netdev, tx_ring->queue_index); - - u64_stats_update_begin(&tx_ring->tx_syncp); - tx_ring->tx_stats.restart_queue++; - u64_stats_update_end(&tx_ring->tx_syncp); - } - } - - if (tx_ring->detect_tx_hung) { - /* Detect a transmit hang in hardware, this serializes the - * check with the clearing of time_stamp and movement of i */ - tx_ring->detect_tx_hung = false; - if (tx_ring->buffer_info[i].time_stamp && - time_after(jiffies, tx_ring->buffer_info[i].time_stamp + - (adapter->tx_timeout_factor * HZ)) && - !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { - - /* detected Tx unit hang */ - dev_err(tx_ring->dev, - "Detected Tx Unit Hang\n" - " Tx Queue <%d>\n" - " TDH <%x>\n" - " TDT <%x>\n" - " next_to_use <%x>\n" - " next_to_clean <%x>\n" - "buffer_info[next_to_clean]\n" - " time_stamp <%lx>\n" - " next_to_watch <%x>\n" - " jiffies <%lx>\n" - " desc.status <%x>\n", - tx_ring->queue_index, - readl(tx_ring->head), - readl(tx_ring->tail), - tx_ring->next_to_use, - tx_ring->next_to_clean, - tx_ring->buffer_info[eop].time_stamp, - eop, - jiffies, - eop_desc->wb.status); - netif_stop_subqueue(netdev, tx_ring->queue_index); - } - } - tx_ring->total_bytes += total_bytes; - tx_ring->total_packets += total_packets; - u64_stats_update_begin(&tx_ring->tx_syncp); - tx_ring->tx_stats.bytes += total_bytes; - tx_ring->tx_stats.packets += total_packets; - u64_stats_update_end(&tx_ring->tx_syncp); - return count < tx_ring->count; -} - -static inline void igb_rx_checksum_adv(struct igb_ring *ring, - u32 status_err, struct sk_buff *skb) -{ - skb_checksum_none_assert(skb); - - /* Ignore Checksum bit is set or checksum is disabled through ethtool */ - if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) || - (status_err & E1000_RXD_STAT_IXSM)) - return; - - /* TCP/UDP checksum error bit is set */ - if (status_err & - (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { - /* - * work around errata with sctp packets where the TCPE aka - * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) - * packets, (aka let the stack check the crc32c) - */ - if ((skb->len == 60) && - (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM)) { - u64_stats_update_begin(&ring->rx_syncp); - ring->rx_stats.csum_err++; - u64_stats_update_end(&ring->rx_syncp); - } - /* let the stack verify checksum errors */ - return; - } - /* It must be a TCP or UDP packet with a valid checksum */ - if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) - skb->ip_summed = CHECKSUM_UNNECESSARY; - - dev_dbg(ring->dev, "cksum success: bits %08X\n", status_err); -} - -static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr, - struct sk_buff *skb) -{ - struct igb_adapter *adapter = q_vector->adapter; - struct e1000_hw *hw = &adapter->hw; - u64 regval; - - /* - * If this bit is set, then the RX registers contain the time stamp. No - * other packet will be time stamped until we read these registers, so - * read the registers to make them available again. Because only one - * packet can be time stamped at a time, we know that the register - * values must belong to this one here and therefore we don't need to - * compare any of the additional attributes stored for it. - * - * If nothing went wrong, then it should have a shared tx_flags that we - * can turn into a skb_shared_hwtstamps. - */ - if (staterr & E1000_RXDADV_STAT_TSIP) { - u32 *stamp = (u32 *)skb->data; - regval = le32_to_cpu(*(stamp + 2)); - regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32; - skb_pull(skb, IGB_TS_HDR_LEN); - } else { - if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) - return; - - regval = rd32(E1000_RXSTMPL); - regval |= (u64)rd32(E1000_RXSTMPH) << 32; - } - - igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); -} -static inline u16 igb_get_hlen(struct igb_ring *rx_ring, - union e1000_adv_rx_desc *rx_desc) -{ - /* HW will not DMA in data larger than the given buffer, even if it - * parses the (NFS, of course) header to be larger. In that case, it - * fills the header buffer and spills the rest into the page. - */ - u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) & - E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; - if (hlen > rx_ring->rx_buffer_len) - hlen = rx_ring->rx_buffer_len; - return hlen; -} - -static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector, - int *work_done, int budget) -{ - struct igb_ring *rx_ring = q_vector->rx_ring; - struct net_device *netdev = rx_ring->netdev; - struct device *dev = rx_ring->dev; - union e1000_adv_rx_desc *rx_desc , *next_rxd; - struct igb_buffer *buffer_info , *next_buffer; - struct sk_buff *skb; - bool cleaned = false; - int cleaned_count = 0; - int current_node = numa_node_id(); - unsigned int total_bytes = 0, total_packets = 0; - unsigned int i; - u32 staterr; - u16 length; - - i = rx_ring->next_to_clean; - buffer_info = &rx_ring->buffer_info[i]; - rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); - - while (staterr & E1000_RXD_STAT_DD) { - if (*work_done >= budget) - break; - (*work_done)++; - rmb(); /* read descriptor and rx_buffer_info after status DD */ - - skb = buffer_info->skb; - prefetch(skb->data - NET_IP_ALIGN); - buffer_info->skb = NULL; - - i++; - if (i == rx_ring->count) - i = 0; - - next_rxd = E1000_RX_DESC_ADV(*rx_ring, i); - prefetch(next_rxd); - next_buffer = &rx_ring->buffer_info[i]; - - length = le16_to_cpu(rx_desc->wb.upper.length); - cleaned = true; - cleaned_count++; - - if (buffer_info->dma) { - dma_unmap_single(dev, buffer_info->dma, - rx_ring->rx_buffer_len, - DMA_FROM_DEVICE); - buffer_info->dma = 0; - if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) { - skb_put(skb, length); - goto send_up; - } - skb_put(skb, igb_get_hlen(rx_ring, rx_desc)); - } - - if (length) { - dma_unmap_page(dev, buffer_info->page_dma, - PAGE_SIZE / 2, DMA_FROM_DEVICE); - buffer_info->page_dma = 0; - - skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, - buffer_info->page, - buffer_info->page_offset, - length); - - if ((page_count(buffer_info->page) != 1) || - (page_to_nid(buffer_info->page) != current_node)) - buffer_info->page = NULL; - else - get_page(buffer_info->page); - - skb->len += length; - skb->data_len += length; - skb->truesize += length; - } - - if (!(staterr & E1000_RXD_STAT_EOP)) { - buffer_info->skb = next_buffer->skb; - buffer_info->dma = next_buffer->dma; - next_buffer->skb = skb; - next_buffer->dma = 0; - goto next_desc; - } -send_up: - if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { - dev_kfree_skb_irq(skb); - goto next_desc; - } - - if (staterr & (E1000_RXDADV_STAT_TSIP | E1000_RXDADV_STAT_TS)) - igb_rx_hwtstamp(q_vector, staterr, skb); - total_bytes += skb->len; - total_packets++; - - igb_rx_checksum_adv(rx_ring, staterr, skb); - - skb->protocol = eth_type_trans(skb, netdev); - skb_record_rx_queue(skb, rx_ring->queue_index); - - if (staterr & E1000_RXD_STAT_VP) { - u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); - - __vlan_hwaccel_put_tag(skb, vid); - } - napi_gro_receive(&q_vector->napi, skb); - -next_desc: - rx_desc->wb.upper.status_error = 0; - - /* return some buffers to hardware, one at a time is too slow */ - if (cleaned_count >= IGB_RX_BUFFER_WRITE) { - igb_alloc_rx_buffers_adv(rx_ring, cleaned_count); - cleaned_count = 0; - } - - /* use prefetched values */ - rx_desc = next_rxd; - buffer_info = next_buffer; - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); - } - - rx_ring->next_to_clean = i; - cleaned_count = igb_desc_unused(rx_ring); - - if (cleaned_count) - igb_alloc_rx_buffers_adv(rx_ring, cleaned_count); - - rx_ring->total_packets += total_packets; - rx_ring->total_bytes += total_bytes; - u64_stats_update_begin(&rx_ring->rx_syncp); - rx_ring->rx_stats.packets += total_packets; - rx_ring->rx_stats.bytes += total_bytes; - u64_stats_update_end(&rx_ring->rx_syncp); - return cleaned; -} - -/** - * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split - * @adapter: address of board private structure - **/ -void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count) -{ - struct net_device *netdev = rx_ring->netdev; - union e1000_adv_rx_desc *rx_desc; - struct igb_buffer *buffer_info; - struct sk_buff *skb; - unsigned int i; - int bufsz; - - i = rx_ring->next_to_use; - buffer_info = &rx_ring->buffer_info[i]; - - bufsz = rx_ring->rx_buffer_len; - - while (cleaned_count--) { - rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); - - if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) { - if (!buffer_info->page) { - buffer_info->page = netdev_alloc_page(netdev); - if (unlikely(!buffer_info->page)) { - u64_stats_update_begin(&rx_ring->rx_syncp); - rx_ring->rx_stats.alloc_failed++; - u64_stats_update_end(&rx_ring->rx_syncp); - goto no_buffers; - } - buffer_info->page_offset = 0; - } else { - buffer_info->page_offset ^= PAGE_SIZE / 2; - } - buffer_info->page_dma = - dma_map_page(rx_ring->dev, buffer_info->page, - buffer_info->page_offset, - PAGE_SIZE / 2, - DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, - buffer_info->page_dma)) { - buffer_info->page_dma = 0; - u64_stats_update_begin(&rx_ring->rx_syncp); - rx_ring->rx_stats.alloc_failed++; - u64_stats_update_end(&rx_ring->rx_syncp); - goto no_buffers; - } - } - - skb = buffer_info->skb; - if (!skb) { - skb = netdev_alloc_skb_ip_align(netdev, bufsz); - if (unlikely(!skb)) { - u64_stats_update_begin(&rx_ring->rx_syncp); - rx_ring->rx_stats.alloc_failed++; - u64_stats_update_end(&rx_ring->rx_syncp); - goto no_buffers; - } - - buffer_info->skb = skb; - } - if (!buffer_info->dma) { - buffer_info->dma = dma_map_single(rx_ring->dev, - skb->data, - bufsz, - DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, - buffer_info->dma)) { - buffer_info->dma = 0; - u64_stats_update_begin(&rx_ring->rx_syncp); - rx_ring->rx_stats.alloc_failed++; - u64_stats_update_end(&rx_ring->rx_syncp); - goto no_buffers; - } - } - /* Refresh the desc even if buffer_addrs didn't change because - * each write-back erases this info. */ - if (bufsz < IGB_RXBUFFER_1024) { - rx_desc->read.pkt_addr = - cpu_to_le64(buffer_info->page_dma); - rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); - } else { - rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma); - rx_desc->read.hdr_addr = 0; - } - - i++; - if (i == rx_ring->count) - i = 0; - buffer_info = &rx_ring->buffer_info[i]; - } - -no_buffers: - if (rx_ring->next_to_use != i) { - rx_ring->next_to_use = i; - if (i == 0) - i = (rx_ring->count - 1); - else - i--; - - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). */ - wmb(); - writel(i, rx_ring->tail); - } -} - -/** - * igb_mii_ioctl - - * @netdev: - * @ifreq: - * @cmd: - **/ -static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct mii_ioctl_data *data = if_mii(ifr); - - if (adapter->hw.phy.media_type != e1000_media_type_copper) - return -EOPNOTSUPP; - - switch (cmd) { - case SIOCGMIIPHY: - data->phy_id = adapter->hw.phy.addr; - break; - case SIOCGMIIREG: - if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, - &data->val_out)) - return -EIO; - break; - case SIOCSMIIREG: - default: - return -EOPNOTSUPP; - } - return 0; -} - -/** - * igb_hwtstamp_ioctl - control hardware time stamping - * @netdev: - * @ifreq: - * @cmd: - * - * Outgoing time stamping can be enabled and disabled. Play nice and - * disable it when requested, although it shouldn't case any overhead - * when no packet needs it. At most one packet in the queue may be - * marked for time stamping, otherwise it would be impossible to tell - * for sure to which packet the hardware time stamp belongs. - * - * Incoming time stamping has to be configured via the hardware - * filters. Not all combinations are supported, in particular event - * type has to be specified. Matching the kind of event packet is - * not supported, with the exception of "all V2 events regardless of - * level 2 or 4". - * - **/ -static int igb_hwtstamp_ioctl(struct net_device *netdev, - struct ifreq *ifr, int cmd) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - struct hwtstamp_config config; - u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; - u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; - u32 tsync_rx_cfg = 0; - bool is_l4 = false; - bool is_l2 = false; - u32 regval; - - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; - - /* reserved for future extensions */ - if (config.flags) - return -EINVAL; - - switch (config.tx_type) { - case HWTSTAMP_TX_OFF: - tsync_tx_ctl = 0; - case HWTSTAMP_TX_ON: - break; - default: - return -ERANGE; - } - - switch (config.rx_filter) { - case HWTSTAMP_FILTER_NONE: - tsync_rx_ctl = 0; - break; - case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: - case HWTSTAMP_FILTER_ALL: - /* - * register TSYNCRXCFG must be set, therefore it is not - * possible to time stamp both Sync and Delay_Req messages - * => fall back to time stamping all packets - */ - tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; - config.rx_filter = HWTSTAMP_FILTER_ALL; - break; - case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: - tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; - tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE; - is_l4 = true; - break; - case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; - tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE; - is_l4 = true; - break; - case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: - tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; - tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE; - is_l2 = true; - is_l4 = true; - config.rx_filter = HWTSTAMP_FILTER_SOME; - break; - case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: - tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; - tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE; - is_l2 = true; - is_l4 = true; - config.rx_filter = HWTSTAMP_FILTER_SOME; - break; - case HWTSTAMP_FILTER_PTP_V2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: - tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2; - config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; - is_l2 = true; - break; - default: - return -ERANGE; - } - - if (hw->mac.type == e1000_82575) { - if (tsync_rx_ctl | tsync_tx_ctl) - return -EINVAL; - return 0; - } - - /* - * Per-packet timestamping only works if all packets are - * timestamped, so enable timestamping in all packets as - * long as one rx filter was configured. - */ - if ((hw->mac.type == e1000_82580) && tsync_rx_ctl) { - tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; - tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; - } - - /* enable/disable TX */ - regval = rd32(E1000_TSYNCTXCTL); - regval &= ~E1000_TSYNCTXCTL_ENABLED; - regval |= tsync_tx_ctl; - wr32(E1000_TSYNCTXCTL, regval); - - /* enable/disable RX */ - regval = rd32(E1000_TSYNCRXCTL); - regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK); - regval |= tsync_rx_ctl; - wr32(E1000_TSYNCRXCTL, regval); - - /* define which PTP packets are time stamped */ - wr32(E1000_TSYNCRXCFG, tsync_rx_cfg); - - /* define ethertype filter for timestamped packets */ - if (is_l2) - wr32(E1000_ETQF(3), - (E1000_ETQF_FILTER_ENABLE | /* enable filter */ - E1000_ETQF_1588 | /* enable timestamping */ - ETH_P_1588)); /* 1588 eth protocol type */ - else - wr32(E1000_ETQF(3), 0); - -#define PTP_PORT 319 - /* L4 Queue Filter[3]: filter by destination port and protocol */ - if (is_l4) { - u32 ftqf = (IPPROTO_UDP /* UDP */ - | E1000_FTQF_VF_BP /* VF not compared */ - | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */ - | E1000_FTQF_MASK); /* mask all inputs */ - ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */ - - wr32(E1000_IMIR(3), htons(PTP_PORT)); - wr32(E1000_IMIREXT(3), - (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP)); - if (hw->mac.type == e1000_82576) { - /* enable source port check */ - wr32(E1000_SPQF(3), htons(PTP_PORT)); - ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; - } - wr32(E1000_FTQF(3), ftqf); - } else { - wr32(E1000_FTQF(3), E1000_FTQF_MASK); - } - wrfl(); - - adapter->hwtstamp_config = config; - - /* clear TX/RX time stamp registers, just to be sure */ - regval = rd32(E1000_TXSTMPH); - regval = rd32(E1000_RXSTMPH); - - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; -} - -/** - * igb_ioctl - - * @netdev: - * @ifreq: - * @cmd: - **/ -static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) -{ - switch (cmd) { - case SIOCGMIIPHY: - case SIOCGMIIREG: - case SIOCSMIIREG: - return igb_mii_ioctl(netdev, ifr, cmd); - case SIOCSHWTSTAMP: - return igb_hwtstamp_ioctl(netdev, ifr, cmd); - default: - return -EOPNOTSUPP; - } -} - -s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) -{ - struct igb_adapter *adapter = hw->back; - u16 cap_offset; - - cap_offset = adapter->pdev->pcie_cap; - if (!cap_offset) - return -E1000_ERR_CONFIG; - - pci_read_config_word(adapter->pdev, cap_offset + reg, value); - - return 0; -} - -s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) -{ - struct igb_adapter *adapter = hw->back; - u16 cap_offset; - - cap_offset = adapter->pdev->pcie_cap; - if (!cap_offset) - return -E1000_ERR_CONFIG; - - pci_write_config_word(adapter->pdev, cap_offset + reg, *value); - - return 0; -} - -static void igb_vlan_mode(struct net_device *netdev, u32 features) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u32 ctrl, rctl; - - igb_irq_disable(adapter); - - if (features & NETIF_F_HW_VLAN_RX) { - /* enable VLAN tag insert/strip */ - ctrl = rd32(E1000_CTRL); - ctrl |= E1000_CTRL_VME; - wr32(E1000_CTRL, ctrl); - - /* Disable CFI check */ - rctl = rd32(E1000_RCTL); - rctl &= ~E1000_RCTL_CFIEN; - wr32(E1000_RCTL, rctl); - } else { - /* disable VLAN tag insert/strip */ - ctrl = rd32(E1000_CTRL); - ctrl &= ~E1000_CTRL_VME; - wr32(E1000_CTRL, ctrl); - } - - igb_rlpml_set(adapter); - - if (!test_bit(__IGB_DOWN, &adapter->state)) - igb_irq_enable(adapter); -} - -static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - int pf_id = adapter->vfs_allocated_count; - - /* attempt to add filter to vlvf array */ - igb_vlvf_set(adapter, vid, true, pf_id); - - /* add the filter since PF can receive vlans w/o entry in vlvf */ - igb_vfta_set(hw, vid, true); - - set_bit(vid, adapter->active_vlans); -} - -static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - int pf_id = adapter->vfs_allocated_count; - s32 err; - - igb_irq_disable(adapter); - - if (!test_bit(__IGB_DOWN, &adapter->state)) - igb_irq_enable(adapter); - - /* remove vlan from VLVF table array */ - err = igb_vlvf_set(adapter, vid, false, pf_id); - - /* if vid was not present in VLVF just remove it from table */ - if (err) - igb_vfta_set(hw, vid, false); - - clear_bit(vid, adapter->active_vlans); -} - -static void igb_restore_vlan(struct igb_adapter *adapter) -{ - u16 vid; - - for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) - igb_vlan_rx_add_vid(adapter->netdev, vid); -} - -int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx) -{ - struct pci_dev *pdev = adapter->pdev; - struct e1000_mac_info *mac = &adapter->hw.mac; - - mac->autoneg = 0; - - /* Make sure dplx is at most 1 bit and lsb of speed is not set - * for the switch() below to work */ - if ((spd & 1) || (dplx & ~1)) - goto err_inval; - - /* Fiber NIC's only allow 1000 Gbps Full duplex */ - if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) && - spd != SPEED_1000 && - dplx != DUPLEX_FULL) - goto err_inval; - - switch (spd + dplx) { - case SPEED_10 + DUPLEX_HALF: - mac->forced_speed_duplex = ADVERTISE_10_HALF; - break; - case SPEED_10 + DUPLEX_FULL: - mac->forced_speed_duplex = ADVERTISE_10_FULL; - break; - case SPEED_100 + DUPLEX_HALF: - mac->forced_speed_duplex = ADVERTISE_100_HALF; - break; - case SPEED_100 + DUPLEX_FULL: - mac->forced_speed_duplex = ADVERTISE_100_FULL; - break; - case SPEED_1000 + DUPLEX_FULL: - mac->autoneg = 1; - adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; - break; - case SPEED_1000 + DUPLEX_HALF: /* not supported */ - default: - goto err_inval; - } - return 0; - -err_inval: - dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n"); - return -EINVAL; -} - -static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u32 ctrl, rctl, status; - u32 wufc = adapter->wol; -#ifdef CONFIG_PM - int retval = 0; -#endif - - netif_device_detach(netdev); - - if (netif_running(netdev)) - igb_close(netdev); - - igb_clear_interrupt_scheme(adapter); - -#ifdef CONFIG_PM - retval = pci_save_state(pdev); - if (retval) - return retval; -#endif - - status = rd32(E1000_STATUS); - if (status & E1000_STATUS_LU) - wufc &= ~E1000_WUFC_LNKC; - - if (wufc) { - igb_setup_rctl(adapter); - igb_set_rx_mode(netdev); - - /* turn on all-multi mode if wake on multicast is enabled */ - if (wufc & E1000_WUFC_MC) { - rctl = rd32(E1000_RCTL); - rctl |= E1000_RCTL_MPE; - wr32(E1000_RCTL, rctl); - } - - ctrl = rd32(E1000_CTRL); - /* advertise wake from D3Cold */ - #define E1000_CTRL_ADVD3WUC 0x00100000 - /* phy power management enable */ - #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 - ctrl |= E1000_CTRL_ADVD3WUC; - wr32(E1000_CTRL, ctrl); - - /* Allow time for pending master requests to run */ - igb_disable_pcie_master(hw); - - wr32(E1000_WUC, E1000_WUC_PME_EN); - wr32(E1000_WUFC, wufc); - } else { - wr32(E1000_WUC, 0); - wr32(E1000_WUFC, 0); - } - - *enable_wake = wufc || adapter->en_mng_pt; - if (!*enable_wake) - igb_power_down_link(adapter); - else - igb_power_up_link(adapter); - - /* Release control of h/w to f/w. If f/w is AMT enabled, this - * would have already happened in close and is redundant. */ - igb_release_hw_control(adapter); - - pci_disable_device(pdev); - - return 0; -} - -#ifdef CONFIG_PM -static int igb_suspend(struct pci_dev *pdev, pm_message_t state) -{ - int retval; - bool wake; - - retval = __igb_shutdown(pdev, &wake); - if (retval) - return retval; - - if (wake) { - pci_prepare_to_sleep(pdev); - } else { - pci_wake_from_d3(pdev, false); - pci_set_power_state(pdev, PCI_D3hot); - } - - return 0; -} - -static int igb_resume(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u32 err; - - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - pci_save_state(pdev); - - err = pci_enable_device_mem(pdev); - if (err) { - dev_err(&pdev->dev, - "igb: Cannot enable PCI device from suspend\n"); - return err; - } - pci_set_master(pdev); - - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); - - if (igb_init_interrupt_scheme(adapter)) { - dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); - return -ENOMEM; - } - - igb_reset(adapter); - - /* let the f/w know that the h/w is now under the control of the - * driver. */ - igb_get_hw_control(adapter); - - wr32(E1000_WUS, ~0); - - if (netif_running(netdev)) { - err = igb_open(netdev); - if (err) - return err; - } - - netif_device_attach(netdev); - - return 0; -} -#endif - -static void igb_shutdown(struct pci_dev *pdev) -{ - bool wake; - - __igb_shutdown(pdev, &wake); - - if (system_state == SYSTEM_POWER_OFF) { - pci_wake_from_d3(pdev, wake); - pci_set_power_state(pdev, PCI_D3hot); - } -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -/* - * Polling 'interrupt' - used by things like netconsole to send skbs - * without having to re-enable interrupts. It's not called while - * the interrupt routine is executing. - */ -static void igb_netpoll(struct net_device *netdev) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - int i; - - if (!adapter->msix_entries) { - struct igb_q_vector *q_vector = adapter->q_vector[0]; - igb_irq_disable(adapter); - napi_schedule(&q_vector->napi); - return; - } - - for (i = 0; i < adapter->num_q_vectors; i++) { - struct igb_q_vector *q_vector = adapter->q_vector[i]; - wr32(E1000_EIMC, q_vector->eims_value); - napi_schedule(&q_vector->napi); - } -} -#endif /* CONFIG_NET_POLL_CONTROLLER */ - -/** - * igb_io_error_detected - called when PCI error is detected - * @pdev: Pointer to PCI device - * @state: The current pci connection state - * - * This function is called after a PCI bus error affecting - * this device has been detected. - */ -static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct igb_adapter *adapter = netdev_priv(netdev); - - netif_device_detach(netdev); - - if (state == pci_channel_io_perm_failure) - return PCI_ERS_RESULT_DISCONNECT; - - if (netif_running(netdev)) - igb_down(adapter); - pci_disable_device(pdev); - - /* Request a slot slot reset. */ - return PCI_ERS_RESULT_NEED_RESET; -} - -/** - * igb_io_slot_reset - called after the pci bus has been reset. - * @pdev: Pointer to PCI device - * - * Restart the card from scratch, as if from a cold-boot. Implementation - * resembles the first-half of the igb_resume routine. - */ -static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - pci_ers_result_t result; - int err; - - if (pci_enable_device_mem(pdev)) { - dev_err(&pdev->dev, - "Cannot re-enable PCI device after reset.\n"); - result = PCI_ERS_RESULT_DISCONNECT; - } else { - pci_set_master(pdev); - pci_restore_state(pdev); - pci_save_state(pdev); - - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); - - igb_reset(adapter); - wr32(E1000_WUS, ~0); - result = PCI_ERS_RESULT_RECOVERED; - } - - err = pci_cleanup_aer_uncorrect_error_status(pdev); - if (err) { - dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status " - "failed 0x%0x\n", err); - /* non-fatal, continue */ - } - - return result; -} - -/** - * igb_io_resume - called when traffic can start flowing again. - * @pdev: Pointer to PCI device - * - * This callback is called when the error recovery driver tells us that - * its OK to resume normal operation. Implementation resembles the - * second-half of the igb_resume routine. - */ -static void igb_io_resume(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct igb_adapter *adapter = netdev_priv(netdev); - - if (netif_running(netdev)) { - if (igb_up(adapter)) { - dev_err(&pdev->dev, "igb_up failed after reset\n"); - return; - } - } - - netif_device_attach(netdev); - - /* let the f/w know that the h/w is now under the control of the - * driver. */ - igb_get_hw_control(adapter); -} - -static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, - u8 qsel) -{ - u32 rar_low, rar_high; - struct e1000_hw *hw = &adapter->hw; - - /* HW expects these in little endian so we reverse the byte order - * from network order (big endian) to little endian - */ - rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | - ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); - rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); - - /* Indicate to hardware the Address is Valid. */ - rar_high |= E1000_RAH_AV; - - if (hw->mac.type == e1000_82575) - rar_high |= E1000_RAH_POOL_1 * qsel; - else - rar_high |= E1000_RAH_POOL_1 << qsel; - - wr32(E1000_RAL(index), rar_low); - wrfl(); - wr32(E1000_RAH(index), rar_high); - wrfl(); -} - -static int igb_set_vf_mac(struct igb_adapter *adapter, - int vf, unsigned char *mac_addr) -{ - struct e1000_hw *hw = &adapter->hw; - /* VF MAC addresses start at end of receive addresses and moves - * torwards the first, as a result a collision should not be possible */ - int rar_entry = hw->mac.rar_entry_count - (vf + 1); - - memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN); - - igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf); - - return 0; -} - -static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count)) - return -EINVAL; - adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC; - dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); - dev_info(&adapter->pdev->dev, "Reload the VF driver to make this" - " change effective."); - if (test_bit(__IGB_DOWN, &adapter->state)) { - dev_warn(&adapter->pdev->dev, "The VF MAC address has been set," - " but the PF device is not up.\n"); - dev_warn(&adapter->pdev->dev, "Bring the PF device up before" - " attempting to use the VF device.\n"); - } - return igb_set_vf_mac(adapter, vf, mac); -} - -static int igb_link_mbps(int internal_link_speed) -{ - switch (internal_link_speed) { - case SPEED_100: - return 100; - case SPEED_1000: - return 1000; - default: - return 0; - } -} - -static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate, - int link_speed) -{ - int rf_dec, rf_int; - u32 bcnrc_val; - - if (tx_rate != 0) { - /* Calculate the rate factor values to set */ - rf_int = link_speed / tx_rate; - rf_dec = (link_speed - (rf_int * tx_rate)); - rf_dec = (rf_dec * (1<vf_rate_link_speed == 0) || - (adapter->hw.mac.type != e1000_82576)) - return; - - actual_link_speed = igb_link_mbps(adapter->link_speed); - if (actual_link_speed != adapter->vf_rate_link_speed) { - reset_rate = true; - adapter->vf_rate_link_speed = 0; - dev_info(&adapter->pdev->dev, - "Link speed has been changed. VF Transmit " - "rate is disabled\n"); - } - - for (i = 0; i < adapter->vfs_allocated_count; i++) { - if (reset_rate) - adapter->vf_data[i].tx_rate = 0; - - igb_set_vf_rate_limit(&adapter->hw, i, - adapter->vf_data[i].tx_rate, - actual_link_speed); - } -} - -static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - int actual_link_speed; - - if (hw->mac.type != e1000_82576) - return -EOPNOTSUPP; - - actual_link_speed = igb_link_mbps(adapter->link_speed); - if ((vf >= adapter->vfs_allocated_count) || - (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) || - (tx_rate < 0) || (tx_rate > actual_link_speed)) - return -EINVAL; - - adapter->vf_rate_link_speed = actual_link_speed; - adapter->vf_data[vf].tx_rate = (u16)tx_rate; - igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed); - - return 0; -} - -static int igb_ndo_get_vf_config(struct net_device *netdev, - int vf, struct ifla_vf_info *ivi) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - if (vf >= adapter->vfs_allocated_count) - return -EINVAL; - ivi->vf = vf; - memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN); - ivi->tx_rate = adapter->vf_data[vf].tx_rate; - ivi->vlan = adapter->vf_data[vf].pf_vlan; - ivi->qos = adapter->vf_data[vf].pf_qos; - return 0; -} - -static void igb_vmm_control(struct igb_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 reg; - - switch (hw->mac.type) { - case e1000_82575: - default: - /* replication is not supported for 82575 */ - return; - case e1000_82576: - /* notify HW that the MAC is adding vlan tags */ - reg = rd32(E1000_DTXCTL); - reg |= E1000_DTXCTL_VLAN_ADDED; - wr32(E1000_DTXCTL, reg); - case e1000_82580: - /* enable replication vlan tag stripping */ - reg = rd32(E1000_RPLOLR); - reg |= E1000_RPLOLR_STRVLAN; - wr32(E1000_RPLOLR, reg); - case e1000_i350: - /* none of the above registers are supported by i350 */ - break; - } - - if (adapter->vfs_allocated_count) { - igb_vmdq_set_loopback_pf(hw, true); - igb_vmdq_set_replication_pf(hw, true); - igb_vmdq_set_anti_spoofing_pf(hw, true, - adapter->vfs_allocated_count); - } else { - igb_vmdq_set_loopback_pf(hw, false); - igb_vmdq_set_replication_pf(hw, false); - } -} - -/* igb_main.c */ diff --git a/drivers/net/igbvf/Makefile b/drivers/net/igbvf/Makefile deleted file mode 100644 index 0fa3db3..0000000 --- a/drivers/net/igbvf/Makefile +++ /dev/null @@ -1,38 +0,0 @@ -################################################################################ -# -# Intel(R) 82576 Virtual Function Linux driver -# Copyright(c) 2009 - 2010 Intel Corporation. -# -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -# -# The full GNU General Public License is included in this distribution in -# the file called "COPYING". -# -# Contact Information: -# e1000-devel Mailing List -# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -# -################################################################################ - -# -# Makefile for the Intel(R) 82576 VF ethernet driver -# - -obj-$(CONFIG_IGBVF) += igbvf.o - -igbvf-objs := vf.o \ - mbx.o \ - ethtool.o \ - netdev.o - diff --git a/drivers/net/igbvf/defines.h b/drivers/net/igbvf/defines.h deleted file mode 100644 index 79f2604..0000000 --- a/drivers/net/igbvf/defines.h +++ /dev/null @@ -1,125 +0,0 @@ -/******************************************************************************* - - Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _E1000_DEFINES_H_ -#define _E1000_DEFINES_H_ - -/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ -#define REQ_TX_DESCRIPTOR_MULTIPLE 8 -#define REQ_RX_DESCRIPTOR_MULTIPLE 8 - -/* IVAR valid bit */ -#define E1000_IVAR_VALID 0x80 - -/* Receive Descriptor bit definitions */ -#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ -#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ -#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ -#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ -#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ -#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ -#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ -#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ -#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ - -#define E1000_RXDEXT_STATERR_CE 0x01000000 -#define E1000_RXDEXT_STATERR_SE 0x02000000 -#define E1000_RXDEXT_STATERR_SEQ 0x04000000 -#define E1000_RXDEXT_STATERR_CXE 0x10000000 -#define E1000_RXDEXT_STATERR_TCPE 0x20000000 -#define E1000_RXDEXT_STATERR_IPE 0x40000000 -#define E1000_RXDEXT_STATERR_RXE 0x80000000 - - -/* Same mask, but for extended and packet split descriptors */ -#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ - E1000_RXDEXT_STATERR_CE | \ - E1000_RXDEXT_STATERR_SE | \ - E1000_RXDEXT_STATERR_SEQ | \ - E1000_RXDEXT_STATERR_CXE | \ - E1000_RXDEXT_STATERR_RXE) - -/* Device Control */ -#define E1000_CTRL_RST 0x04000000 /* Global reset */ - -/* Device Status */ -#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ -#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ -#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ -#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ -#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ -#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ - -#define SPEED_10 10 -#define SPEED_100 100 -#define SPEED_1000 1000 -#define HALF_DUPLEX 1 -#define FULL_DUPLEX 2 - -/* Transmit Descriptor bit definitions */ -#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ -#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ -#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ -#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ - -#define MAX_JUMBO_FRAME_SIZE 0x3F00 - -/* 802.1q VLAN Packet Size */ -#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ - -/* Error Codes */ -#define E1000_SUCCESS 0 -#define E1000_ERR_CONFIG 3 -#define E1000_ERR_MAC_INIT 5 -#define E1000_ERR_MBX 15 - -#ifndef ETH_ADDR_LEN -#define ETH_ADDR_LEN 6 -#endif - -/* SRRCTL bit definitions */ -#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ -#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 -#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ -#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 -#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 -#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 -#define E1000_SRRCTL_DROP_EN 0x80000000 - -#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F -#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 - -/* Additional Descriptor Control definitions */ -#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ -#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ - -/* Direct Cache Access (DCA) definitions */ -#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ - -#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ - -#endif /* _E1000_DEFINES_H_ */ diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c deleted file mode 100644 index b0b14d6..0000000 --- a/drivers/net/igbvf/ethtool.c +++ /dev/null @@ -1,534 +0,0 @@ -/******************************************************************************* - - Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 2009 - 2010 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -/* ethtool support for igbvf */ - -#include -#include -#include -#include -#include - -#include "igbvf.h" -#include - - -struct igbvf_stats { - char stat_string[ETH_GSTRING_LEN]; - int sizeof_stat; - int stat_offset; - int base_stat_offset; -}; - -#define IGBVF_STAT(current, base) \ - sizeof(((struct igbvf_adapter *)0)->current), \ - offsetof(struct igbvf_adapter, current), \ - offsetof(struct igbvf_adapter, base) - -static const struct igbvf_stats igbvf_gstrings_stats[] = { - { "rx_packets", IGBVF_STAT(stats.gprc, stats.base_gprc) }, - { "tx_packets", IGBVF_STAT(stats.gptc, stats.base_gptc) }, - { "rx_bytes", IGBVF_STAT(stats.gorc, stats.base_gorc) }, - { "tx_bytes", IGBVF_STAT(stats.gotc, stats.base_gotc) }, - { "multicast", IGBVF_STAT(stats.mprc, stats.base_mprc) }, - { "lbrx_bytes", IGBVF_STAT(stats.gorlbc, stats.base_gorlbc) }, - { "lbrx_packets", IGBVF_STAT(stats.gprlbc, stats.base_gprlbc) }, - { "tx_restart_queue", IGBVF_STAT(restart_queue, zero_base) }, - { "rx_long_byte_count", IGBVF_STAT(stats.gorc, stats.base_gorc) }, - { "rx_csum_offload_good", IGBVF_STAT(hw_csum_good, zero_base) }, - { "rx_csum_offload_errors", IGBVF_STAT(hw_csum_err, zero_base) }, - { "rx_header_split", IGBVF_STAT(rx_hdr_split, zero_base) }, - { "alloc_rx_buff_failed", IGBVF_STAT(alloc_rx_buff_failed, zero_base) }, -}; - -#define IGBVF_GLOBAL_STATS_LEN ARRAY_SIZE(igbvf_gstrings_stats) - -static const char igbvf_gstrings_test[][ETH_GSTRING_LEN] = { - "Link test (on/offline)" -}; - -#define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test) - -static int igbvf_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) -{ - struct igbvf_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u32 status; - - ecmd->supported = SUPPORTED_1000baseT_Full; - - ecmd->advertising = ADVERTISED_1000baseT_Full; - - ecmd->port = -1; - ecmd->transceiver = XCVR_DUMMY1; - - status = er32(STATUS); - if (status & E1000_STATUS_LU) { - if (status & E1000_STATUS_SPEED_1000) - ethtool_cmd_speed_set(ecmd, SPEED_1000); - else if (status & E1000_STATUS_SPEED_100) - ethtool_cmd_speed_set(ecmd, SPEED_100); - else - ethtool_cmd_speed_set(ecmd, SPEED_10); - - if (status & E1000_STATUS_FD) - ecmd->duplex = DUPLEX_FULL; - else - ecmd->duplex = DUPLEX_HALF; - } else { - ethtool_cmd_speed_set(ecmd, -1); - ecmd->duplex = -1; - } - - ecmd->autoneg = AUTONEG_DISABLE; - - return 0; -} - -static int igbvf_set_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) -{ - return -EOPNOTSUPP; -} - -static void igbvf_get_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) -{ - return; -} - -static int igbvf_set_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) -{ - return -EOPNOTSUPP; -} - -static u32 igbvf_get_rx_csum(struct net_device *netdev) -{ - struct igbvf_adapter *adapter = netdev_priv(netdev); - return !(adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED); -} - -static int igbvf_set_rx_csum(struct net_device *netdev, u32 data) -{ - struct igbvf_adapter *adapter = netdev_priv(netdev); - - if (data) - adapter->flags &= ~IGBVF_FLAG_RX_CSUM_DISABLED; - else - adapter->flags |= IGBVF_FLAG_RX_CSUM_DISABLED; - - return 0; -} - -static u32 igbvf_get_tx_csum(struct net_device *netdev) -{ - return (netdev->features & NETIF_F_IP_CSUM) != 0; -} - -static int igbvf_set_tx_csum(struct net_device *netdev, u32 data) -{ - if (data) - netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); - else - netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); - return 0; -} - -static int igbvf_set_tso(struct net_device *netdev, u32 data) -{ - struct igbvf_adapter *adapter = netdev_priv(netdev); - - if (data) { - netdev->features |= NETIF_F_TSO; - netdev->features |= NETIF_F_TSO6; - } else { - netdev->features &= ~NETIF_F_TSO; - netdev->features &= ~NETIF_F_TSO6; - } - - dev_info(&adapter->pdev->dev, "TSO is %s\n", - data ? "Enabled" : "Disabled"); - return 0; -} - -static u32 igbvf_get_msglevel(struct net_device *netdev) -{ - struct igbvf_adapter *adapter = netdev_priv(netdev); - return adapter->msg_enable; -} - -static void igbvf_set_msglevel(struct net_device *netdev, u32 data) -{ - struct igbvf_adapter *adapter = netdev_priv(netdev); - adapter->msg_enable = data; -} - -static int igbvf_get_regs_len(struct net_device *netdev) -{ -#define IGBVF_REGS_LEN 8 - return IGBVF_REGS_LEN * sizeof(u32); -} - -static void igbvf_get_regs(struct net_device *netdev, - struct ethtool_regs *regs, void *p) -{ - struct igbvf_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - u32 *regs_buff = p; - - memset(p, 0, IGBVF_REGS_LEN * sizeof(u32)); - - regs->version = (1 << 24) | (adapter->pdev->revision << 16) | - adapter->pdev->device; - - regs_buff[0] = er32(CTRL); - regs_buff[1] = er32(STATUS); - - regs_buff[2] = er32(RDLEN(0)); - regs_buff[3] = er32(RDH(0)); - regs_buff[4] = er32(RDT(0)); - - regs_buff[5] = er32(TDLEN(0)); - regs_buff[6] = er32(TDH(0)); - regs_buff[7] = er32(TDT(0)); -} - -static int igbvf_get_eeprom_len(struct net_device *netdev) -{ - return 0; -} - -static int igbvf_get_eeprom(struct net_device *netdev, - struct ethtool_eeprom *eeprom, u8 *bytes) -{ - return -EOPNOTSUPP; -} - -static int igbvf_set_eeprom(struct net_device *netdev, - struct ethtool_eeprom *eeprom, u8 *bytes) -{ - return -EOPNOTSUPP; -} - -static void igbvf_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) -{ - struct igbvf_adapter *adapter = netdev_priv(netdev); - char firmware_version[32] = "N/A"; - - strncpy(drvinfo->driver, igbvf_driver_name, 32); - strncpy(drvinfo->version, igbvf_driver_version, 32); - strncpy(drvinfo->fw_version, firmware_version, 32); - strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); - drvinfo->regdump_len = igbvf_get_regs_len(netdev); - drvinfo->eedump_len = igbvf_get_eeprom_len(netdev); -} - -static void igbvf_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -{ - struct igbvf_adapter *adapter = netdev_priv(netdev); - struct igbvf_ring *tx_ring = adapter->tx_ring; - struct igbvf_ring *rx_ring = adapter->rx_ring; - - ring->rx_max_pending = IGBVF_MAX_RXD; - ring->tx_max_pending = IGBVF_MAX_TXD; - ring->rx_mini_max_pending = 0; - ring->rx_jumbo_max_pending = 0; - ring->rx_pending = rx_ring->count; - ring->tx_pending = tx_ring->count; - ring->rx_mini_pending = 0; - ring->rx_jumbo_pending = 0; -} - -static int igbvf_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -{ - struct igbvf_adapter *adapter = netdev_priv(netdev); - struct igbvf_ring *temp_ring; - int err = 0; - u32 new_rx_count, new_tx_count; - - if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) - return -EINVAL; - - new_rx_count = max(ring->rx_pending, (u32)IGBVF_MIN_RXD); - new_rx_count = min(new_rx_count, (u32)IGBVF_MAX_RXD); - new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); - - new_tx_count = max(ring->tx_pending, (u32)IGBVF_MIN_TXD); - new_tx_count = min(new_tx_count, (u32)IGBVF_MAX_TXD); - new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); - - if ((new_tx_count == adapter->tx_ring->count) && - (new_rx_count == adapter->rx_ring->count)) { - /* nothing to do */ - return 0; - } - - while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) - msleep(1); - - if (!netif_running(adapter->netdev)) { - adapter->tx_ring->count = new_tx_count; - adapter->rx_ring->count = new_rx_count; - goto clear_reset; - } - - temp_ring = vmalloc(sizeof(struct igbvf_ring)); - if (!temp_ring) { - err = -ENOMEM; - goto clear_reset; - } - - igbvf_down(adapter); - - /* - * We can't just free everything and then setup again, - * because the ISRs in MSI-X mode get passed pointers - * to the tx and rx ring structs. - */ - if (new_tx_count != adapter->tx_ring->count) { - memcpy(temp_ring, adapter->tx_ring, sizeof(struct igbvf_ring)); - - temp_ring->count = new_tx_count; - err = igbvf_setup_tx_resources(adapter, temp_ring); - if (err) - goto err_setup; - - igbvf_free_tx_resources(adapter->tx_ring); - - memcpy(adapter->tx_ring, temp_ring, sizeof(struct igbvf_ring)); - } - - if (new_rx_count != adapter->rx_ring->count) { - memcpy(temp_ring, adapter->rx_ring, sizeof(struct igbvf_ring)); - - temp_ring->count = new_rx_count; - err = igbvf_setup_rx_resources(adapter, temp_ring); - if (err) - goto err_setup; - - igbvf_free_rx_resources(adapter->rx_ring); - - memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring)); - } -err_setup: - igbvf_up(adapter); - vfree(temp_ring); -clear_reset: - clear_bit(__IGBVF_RESETTING, &adapter->state); - return err; -} - -static int igbvf_link_test(struct igbvf_adapter *adapter, u64 *data) -{ - struct e1000_hw *hw = &adapter->hw; - *data = 0; - - hw->mac.ops.check_for_link(hw); - - if (!(er32(STATUS) & E1000_STATUS_LU)) - *data = 1; - - return *data; -} - -static void igbvf_diag_test(struct net_device *netdev, - struct ethtool_test *eth_test, u64 *data) -{ - struct igbvf_adapter *adapter = netdev_priv(netdev); - - set_bit(__IGBVF_TESTING, &adapter->state); - - /* - * Link test performed before hardware reset so autoneg doesn't - * interfere with test result - */ - if (igbvf_link_test(adapter, &data[0])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - clear_bit(__IGBVF_TESTING, &adapter->state); - msleep_interruptible(4 * 1000); -} - -static void igbvf_get_wol(struct net_device *netdev, - struct ethtool_wolinfo *wol) -{ - wol->supported = 0; - wol->wolopts = 0; -} - -static int igbvf_set_wol(struct net_device *netdev, - struct ethtool_wolinfo *wol) -{ - return -EOPNOTSUPP; -} - -static int igbvf_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) -{ - struct igbvf_adapter *adapter = netdev_priv(netdev); - - if (adapter->itr_setting <= 3) - ec->rx_coalesce_usecs = adapter->itr_setting; - else - ec->rx_coalesce_usecs = adapter->itr_setting >> 2; - - return 0; -} - -static int igbvf_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) -{ - struct igbvf_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - - if ((ec->rx_coalesce_usecs > IGBVF_MAX_ITR_USECS) || - ((ec->rx_coalesce_usecs > 3) && - (ec->rx_coalesce_usecs < IGBVF_MIN_ITR_USECS)) || - (ec->rx_coalesce_usecs == 2)) - return -EINVAL; - - /* convert to rate of irq's per second */ - if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) { - adapter->itr = IGBVF_START_ITR; - adapter->itr_setting = ec->rx_coalesce_usecs; - } else { - adapter->itr = ec->rx_coalesce_usecs << 2; - adapter->itr_setting = adapter->itr; - } - - writel(adapter->itr, - hw->hw_addr + adapter->rx_ring[0].itr_register); - - return 0; -} - -static int igbvf_nway_reset(struct net_device *netdev) -{ - struct igbvf_adapter *adapter = netdev_priv(netdev); - if (netif_running(netdev)) - igbvf_reinit_locked(adapter); - return 0; -} - - -static void igbvf_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, - u64 *data) -{ - struct igbvf_adapter *adapter = netdev_priv(netdev); - int i; - - igbvf_update_stats(adapter); - for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) { - char *p = (char *)adapter + - igbvf_gstrings_stats[i].stat_offset; - char *b = (char *)adapter + - igbvf_gstrings_stats[i].base_stat_offset; - data[i] = ((igbvf_gstrings_stats[i].sizeof_stat == - sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) : - (*(u32 *)p - *(u32 *)b)); - } - -} - -static int igbvf_get_sset_count(struct net_device *dev, int stringset) -{ - switch(stringset) { - case ETH_SS_TEST: - return IGBVF_TEST_LEN; - case ETH_SS_STATS: - return IGBVF_GLOBAL_STATS_LEN; - default: - return -EINVAL; - } -} - -static void igbvf_get_strings(struct net_device *netdev, u32 stringset, - u8 *data) -{ - u8 *p = data; - int i; - - switch (stringset) { - case ETH_SS_TEST: - memcpy(data, *igbvf_gstrings_test, sizeof(igbvf_gstrings_test)); - break; - case ETH_SS_STATS: - for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) { - memcpy(p, igbvf_gstrings_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - break; - } -} - -static const struct ethtool_ops igbvf_ethtool_ops = { - .get_settings = igbvf_get_settings, - .set_settings = igbvf_set_settings, - .get_drvinfo = igbvf_get_drvinfo, - .get_regs_len = igbvf_get_regs_len, - .get_regs = igbvf_get_regs, - .get_wol = igbvf_get_wol, - .set_wol = igbvf_set_wol, - .get_msglevel = igbvf_get_msglevel, - .set_msglevel = igbvf_set_msglevel, - .nway_reset = igbvf_nway_reset, - .get_link = ethtool_op_get_link, - .get_eeprom_len = igbvf_get_eeprom_len, - .get_eeprom = igbvf_get_eeprom, - .set_eeprom = igbvf_set_eeprom, - .get_ringparam = igbvf_get_ringparam, - .set_ringparam = igbvf_set_ringparam, - .get_pauseparam = igbvf_get_pauseparam, - .set_pauseparam = igbvf_set_pauseparam, - .get_rx_csum = igbvf_get_rx_csum, - .set_rx_csum = igbvf_set_rx_csum, - .get_tx_csum = igbvf_get_tx_csum, - .set_tx_csum = igbvf_set_tx_csum, - .get_sg = ethtool_op_get_sg, - .set_sg = ethtool_op_set_sg, - .get_tso = ethtool_op_get_tso, - .set_tso = igbvf_set_tso, - .self_test = igbvf_diag_test, - .get_sset_count = igbvf_get_sset_count, - .get_strings = igbvf_get_strings, - .get_ethtool_stats = igbvf_get_ethtool_stats, - .get_coalesce = igbvf_get_coalesce, - .set_coalesce = igbvf_set_coalesce, -}; - -void igbvf_set_ethtool_ops(struct net_device *netdev) -{ - /* have to "undeclare" const on this struct to remove warnings */ - SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igbvf_ethtool_ops); -} diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h deleted file mode 100644 index fd4a7b7..0000000 --- a/drivers/net/igbvf/igbvf.h +++ /dev/null @@ -1,326 +0,0 @@ -/******************************************************************************* - - Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 2009 - 2010 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -/* Linux PRO/1000 Ethernet Driver main header file */ - -#ifndef _IGBVF_H_ -#define _IGBVF_H_ - -#include -#include -#include -#include -#include - -#include "vf.h" - -/* Forward declarations */ -struct igbvf_info; -struct igbvf_adapter; - -/* Interrupt defines */ -#define IGBVF_START_ITR 648 /* ~6000 ints/sec */ - -/* Interrupt modes, as used by the IntMode parameter */ -#define IGBVF_INT_MODE_LEGACY 0 -#define IGBVF_INT_MODE_MSI 1 -#define IGBVF_INT_MODE_MSIX 2 - -/* Tx/Rx descriptor defines */ -#define IGBVF_DEFAULT_TXD 256 -#define IGBVF_MAX_TXD 4096 -#define IGBVF_MIN_TXD 80 - -#define IGBVF_DEFAULT_RXD 256 -#define IGBVF_MAX_RXD 4096 -#define IGBVF_MIN_RXD 80 - -#define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */ -#define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */ - -/* RX descriptor control thresholds. - * PTHRESH - MAC will consider prefetch if it has fewer than this number of - * descriptors available in its onboard memory. - * Setting this to 0 disables RX descriptor prefetch. - * HTHRESH - MAC will only prefetch if there are at least this many descriptors - * available in host memory. - * If PTHRESH is 0, this should also be 0. - * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back - * descriptors until either it has this many to write back, or the - * ITR timer expires. - */ -#define IGBVF_RX_PTHRESH 16 -#define IGBVF_RX_HTHRESH 8 -#define IGBVF_RX_WTHRESH 1 - -/* this is the size past which hardware will drop packets when setting LPE=0 */ -#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 - -#define IGBVF_FC_PAUSE_TIME 0x0680 /* 858 usec */ - -/* How many Tx Descriptors do we need to call netif_wake_queue ? */ -#define IGBVF_TX_QUEUE_WAKE 32 -/* How many Rx Buffers do we bundle into one write to the hardware ? */ -#define IGBVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ - -#define AUTO_ALL_MODES 0 -#define IGBVF_EEPROM_APME 0x0400 - -#define IGBVF_MNG_VLAN_NONE (-1) - -/* Number of packet split data buffers (not including the header buffer) */ -#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) - -enum igbvf_boards { - board_vf, - board_i350_vf, -}; - -struct igbvf_queue_stats { - u64 packets; - u64 bytes; -}; - -/* - * wrappers around a pointer to a socket buffer, - * so a DMA handle can be stored along with the buffer - */ -struct igbvf_buffer { - dma_addr_t dma; - struct sk_buff *skb; - union { - /* Tx */ - struct { - unsigned long time_stamp; - u16 length; - u16 next_to_watch; - u16 mapped_as_page; - }; - /* Rx */ - struct { - struct page *page; - u64 page_dma; - unsigned int page_offset; - }; - }; -}; - -union igbvf_desc { - union e1000_adv_rx_desc rx_desc; - union e1000_adv_tx_desc tx_desc; - struct e1000_adv_tx_context_desc tx_context_desc; -}; - -struct igbvf_ring { - struct igbvf_adapter *adapter; /* backlink */ - union igbvf_desc *desc; /* pointer to ring memory */ - dma_addr_t dma; /* phys address of ring */ - unsigned int size; /* length of ring in bytes */ - unsigned int count; /* number of desc. in ring */ - - u16 next_to_use; - u16 next_to_clean; - - u16 head; - u16 tail; - - /* array of buffer information structs */ - struct igbvf_buffer *buffer_info; - struct napi_struct napi; - - char name[IFNAMSIZ + 5]; - u32 eims_value; - u32 itr_val; - u16 itr_register; - int set_itr; - - struct sk_buff *rx_skb_top; - - struct igbvf_queue_stats stats; -}; - -/* board specific private data structure */ -struct igbvf_adapter { - struct timer_list watchdog_timer; - struct timer_list blink_timer; - - struct work_struct reset_task; - struct work_struct watchdog_task; - - const struct igbvf_info *ei; - - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; - u32 bd_number; - u32 rx_buffer_len; - u32 polling_interval; - u16 mng_vlan_id; - u16 link_speed; - u16 link_duplex; - - spinlock_t tx_queue_lock; /* prevent concurrent tail updates */ - - /* track device up/down/testing state */ - unsigned long state; - - /* Interrupt Throttle Rate */ - u32 itr; - u32 itr_setting; - u16 tx_itr; - u16 rx_itr; - - /* - * Tx - */ - struct igbvf_ring *tx_ring /* One per active queue */ - ____cacheline_aligned_in_smp; - - unsigned int restart_queue; - u32 txd_cmd; - - u32 tx_int_delay; - u32 tx_abs_int_delay; - - unsigned int total_tx_bytes; - unsigned int total_tx_packets; - unsigned int total_rx_bytes; - unsigned int total_rx_packets; - - /* Tx stats */ - u32 tx_timeout_count; - u32 tx_fifo_head; - u32 tx_head_addr; - u32 tx_fifo_size; - u32 tx_dma_failed; - - /* - * Rx - */ - struct igbvf_ring *rx_ring; - - u32 rx_int_delay; - u32 rx_abs_int_delay; - - /* Rx stats */ - u64 hw_csum_err; - u64 hw_csum_good; - u64 rx_hdr_split; - u32 alloc_rx_buff_failed; - u32 rx_dma_failed; - - unsigned int rx_ps_hdr_size; - u32 max_frame_size; - u32 min_frame_size; - - /* OS defined structs */ - struct net_device *netdev; - struct pci_dev *pdev; - struct net_device_stats net_stats; - spinlock_t stats_lock; /* prevent concurrent stats updates */ - - /* structs defined in e1000_hw.h */ - struct e1000_hw hw; - - /* The VF counters don't clear on read so we have to get a base - * count on driver start up and always subtract that base on - * on the first update, thus the flag.. - */ - struct e1000_vf_stats stats; - u64 zero_base; - - struct igbvf_ring test_tx_ring; - struct igbvf_ring test_rx_ring; - u32 test_icr; - - u32 msg_enable; - struct msix_entry *msix_entries; - int int_mode; - u32 eims_enable_mask; - u32 eims_other; - u32 int_counter0; - u32 int_counter1; - - u32 eeprom_wol; - u32 wol; - u32 pba; - - bool fc_autoneg; - - unsigned long led_status; - - unsigned int flags; - unsigned long last_reset; -}; - -struct igbvf_info { - enum e1000_mac_type mac; - unsigned int flags; - u32 pba; - void (*init_ops)(struct e1000_hw *); - s32 (*get_variants)(struct igbvf_adapter *); -}; - -/* hardware capability, feature, and workaround flags */ -#define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0) - -#define IGBVF_RX_DESC_ADV(R, i) \ - (&((((R).desc))[i].rx_desc)) -#define IGBVF_TX_DESC_ADV(R, i) \ - (&((((R).desc))[i].tx_desc)) -#define IGBVF_TX_CTXTDESC_ADV(R, i) \ - (&((((R).desc))[i].tx_context_desc)) - -enum igbvf_state_t { - __IGBVF_TESTING, - __IGBVF_RESETTING, - __IGBVF_DOWN -}; - -enum latency_range { - lowest_latency = 0, - low_latency = 1, - bulk_latency = 2, - latency_invalid = 255 -}; - -extern char igbvf_driver_name[]; -extern const char igbvf_driver_version[]; - -extern void igbvf_check_options(struct igbvf_adapter *); -extern void igbvf_set_ethtool_ops(struct net_device *); - -extern int igbvf_up(struct igbvf_adapter *); -extern void igbvf_down(struct igbvf_adapter *); -extern void igbvf_reinit_locked(struct igbvf_adapter *); -extern int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *); -extern int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *); -extern void igbvf_free_rx_resources(struct igbvf_ring *); -extern void igbvf_free_tx_resources(struct igbvf_ring *); -extern void igbvf_update_stats(struct igbvf_adapter *); - -extern unsigned int copybreak; - -#endif /* _IGBVF_H_ */ diff --git a/drivers/net/igbvf/mbx.c b/drivers/net/igbvf/mbx.c deleted file mode 100644 index 3d6f4cc..0000000 --- a/drivers/net/igbvf/mbx.c +++ /dev/null @@ -1,350 +0,0 @@ -/******************************************************************************* - - Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 2009 - 2010 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include "mbx.h" - -/** - * e1000_poll_for_msg - Wait for message notification - * @hw: pointer to the HW structure - * - * returns SUCCESS if it successfully received a message notification - **/ -static s32 e1000_poll_for_msg(struct e1000_hw *hw) -{ - struct e1000_mbx_info *mbx = &hw->mbx; - int countdown = mbx->timeout; - - if (!mbx->ops.check_for_msg) - goto out; - - while (countdown && mbx->ops.check_for_msg(hw)) { - countdown--; - udelay(mbx->usec_delay); - } - - /* if we failed, all future posted messages fail until reset */ - if (!countdown) - mbx->timeout = 0; -out: - return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; -} - -/** - * e1000_poll_for_ack - Wait for message acknowledgement - * @hw: pointer to the HW structure - * - * returns SUCCESS if it successfully received a message acknowledgement - **/ -static s32 e1000_poll_for_ack(struct e1000_hw *hw) -{ - struct e1000_mbx_info *mbx = &hw->mbx; - int countdown = mbx->timeout; - - if (!mbx->ops.check_for_ack) - goto out; - - while (countdown && mbx->ops.check_for_ack(hw)) { - countdown--; - udelay(mbx->usec_delay); - } - - /* if we failed, all future posted messages fail until reset */ - if (!countdown) - mbx->timeout = 0; -out: - return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; -} - -/** - * e1000_read_posted_mbx - Wait for message notification and receive message - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * - * returns SUCCESS if it successfully received a message notification and - * copied it into the receive buffer. - **/ -static s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size) -{ - struct e1000_mbx_info *mbx = &hw->mbx; - s32 ret_val = -E1000_ERR_MBX; - - if (!mbx->ops.read) - goto out; - - ret_val = e1000_poll_for_msg(hw); - - /* if ack received read message, otherwise we timed out */ - if (!ret_val) - ret_val = mbx->ops.read(hw, msg, size); -out: - return ret_val; -} - -/** - * e1000_write_posted_mbx - Write a message to the mailbox, wait for ack - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * - * returns SUCCESS if it successfully copied message into the buffer and - * received an ack to that message within delay * timeout period - **/ -static s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size) -{ - struct e1000_mbx_info *mbx = &hw->mbx; - s32 ret_val = -E1000_ERR_MBX; - - /* exit if we either can't write or there isn't a defined timeout */ - if (!mbx->ops.write || !mbx->timeout) - goto out; - - /* send msg*/ - ret_val = mbx->ops.write(hw, msg, size); - - /* if msg sent wait until we receive an ack */ - if (!ret_val) - ret_val = e1000_poll_for_ack(hw); -out: - return ret_val; -} - -/** - * e1000_read_v2p_mailbox - read v2p mailbox - * @hw: pointer to the HW structure - * - * This function is used to read the v2p mailbox without losing the read to - * clear status bits. - **/ -static u32 e1000_read_v2p_mailbox(struct e1000_hw *hw) -{ - u32 v2p_mailbox = er32(V2PMAILBOX(0)); - - v2p_mailbox |= hw->dev_spec.vf.v2p_mailbox; - hw->dev_spec.vf.v2p_mailbox |= v2p_mailbox & E1000_V2PMAILBOX_R2C_BITS; - - return v2p_mailbox; -} - -/** - * e1000_check_for_bit_vf - Determine if a status bit was set - * @hw: pointer to the HW structure - * @mask: bitmask for bits to be tested and cleared - * - * This function is used to check for the read to clear bits within - * the V2P mailbox. - **/ -static s32 e1000_check_for_bit_vf(struct e1000_hw *hw, u32 mask) -{ - u32 v2p_mailbox = e1000_read_v2p_mailbox(hw); - s32 ret_val = -E1000_ERR_MBX; - - if (v2p_mailbox & mask) - ret_val = E1000_SUCCESS; - - hw->dev_spec.vf.v2p_mailbox &= ~mask; - - return ret_val; -} - -/** - * e1000_check_for_msg_vf - checks to see if the PF has sent mail - * @hw: pointer to the HW structure - * - * returns SUCCESS if the PF has set the Status bit or else ERR_MBX - **/ -static s32 e1000_check_for_msg_vf(struct e1000_hw *hw) -{ - s32 ret_val = -E1000_ERR_MBX; - - if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFSTS)) { - ret_val = E1000_SUCCESS; - hw->mbx.stats.reqs++; - } - - return ret_val; -} - -/** - * e1000_check_for_ack_vf - checks to see if the PF has ACK'd - * @hw: pointer to the HW structure - * - * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX - **/ -static s32 e1000_check_for_ack_vf(struct e1000_hw *hw) -{ - s32 ret_val = -E1000_ERR_MBX; - - if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFACK)) { - ret_val = E1000_SUCCESS; - hw->mbx.stats.acks++; - } - - return ret_val; -} - -/** - * e1000_check_for_rst_vf - checks to see if the PF has reset - * @hw: pointer to the HW structure - * - * returns true if the PF has set the reset done bit or else false - **/ -static s32 e1000_check_for_rst_vf(struct e1000_hw *hw) -{ - s32 ret_val = -E1000_ERR_MBX; - - if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD | - E1000_V2PMAILBOX_RSTI))) { - ret_val = E1000_SUCCESS; - hw->mbx.stats.rsts++; - } - - return ret_val; -} - -/** - * e1000_obtain_mbx_lock_vf - obtain mailbox lock - * @hw: pointer to the HW structure - * - * return SUCCESS if we obtained the mailbox lock - **/ -static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw) -{ - s32 ret_val = -E1000_ERR_MBX; - - /* Take ownership of the buffer */ - ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU); - - /* reserve mailbox for vf use */ - if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) - ret_val = E1000_SUCCESS; - - return ret_val; -} - -/** - * e1000_write_mbx_vf - Write a message to the mailbox - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * - * returns SUCCESS if it successfully copied message into the buffer - **/ -static s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size) -{ - s32 err; - u16 i; - - /* lock the mailbox to prevent pf/vf race condition */ - err = e1000_obtain_mbx_lock_vf(hw); - if (err) - goto out_no_write; - - /* flush any ack or msg as we are going to overwrite mailbox */ - e1000_check_for_ack_vf(hw); - e1000_check_for_msg_vf(hw); - - /* copy the caller specified message to the mailbox memory buffer */ - for (i = 0; i < size; i++) - array_ew32(VMBMEM(0), i, msg[i]); - - /* update stats */ - hw->mbx.stats.msgs_tx++; - - /* Drop VFU and interrupt the PF to tell it a message has been sent */ - ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_REQ); - -out_no_write: - return err; -} - -/** - * e1000_read_mbx_vf - Reads a message from the inbox intended for vf - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * - * returns SUCCESS if it successfuly read message from buffer - **/ -static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size) -{ - s32 err; - u16 i; - - /* lock the mailbox to prevent pf/vf race condition */ - err = e1000_obtain_mbx_lock_vf(hw); - if (err) - goto out_no_read; - - /* copy the message from the mailbox memory buffer */ - for (i = 0; i < size; i++) - msg[i] = array_er32(VMBMEM(0), i); - - /* Acknowledge receipt and release mailbox, then we're done */ - ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_ACK); - - /* update stats */ - hw->mbx.stats.msgs_rx++; - -out_no_read: - return err; -} - -/** - * e1000_init_mbx_params_vf - set initial values for vf mailbox - * @hw: pointer to the HW structure - * - * Initializes the hw->mbx struct to correct values for vf mailbox - */ -s32 e1000_init_mbx_params_vf(struct e1000_hw *hw) -{ - struct e1000_mbx_info *mbx = &hw->mbx; - - /* start mailbox as timed out and let the reset_hw call set the timeout - * value to being communications */ - mbx->timeout = 0; - mbx->usec_delay = E1000_VF_MBX_INIT_DELAY; - - mbx->size = E1000_VFMAILBOX_SIZE; - - mbx->ops.read = e1000_read_mbx_vf; - mbx->ops.write = e1000_write_mbx_vf; - mbx->ops.read_posted = e1000_read_posted_mbx; - mbx->ops.write_posted = e1000_write_posted_mbx; - mbx->ops.check_for_msg = e1000_check_for_msg_vf; - mbx->ops.check_for_ack = e1000_check_for_ack_vf; - mbx->ops.check_for_rst = e1000_check_for_rst_vf; - - mbx->stats.msgs_tx = 0; - mbx->stats.msgs_rx = 0; - mbx->stats.reqs = 0; - mbx->stats.acks = 0; - mbx->stats.rsts = 0; - - return E1000_SUCCESS; -} - diff --git a/drivers/net/igbvf/mbx.h b/drivers/net/igbvf/mbx.h deleted file mode 100644 index c2883c4..0000000 --- a/drivers/net/igbvf/mbx.h +++ /dev/null @@ -1,75 +0,0 @@ -/******************************************************************************* - - Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _E1000_MBX_H_ -#define _E1000_MBX_H_ - -#include "vf.h" - -#define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ -#define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */ -#define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ -#define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ -#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ -#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ -#define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */ -#define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ -#define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ - -#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ - -/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the - * PF. The reverse is true if it is E1000_PF_*. - * Message ACK's are the value or'd with 0xF0000000 - */ -#define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with - * this are the ACK */ -#define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with - * this are the NACK */ -#define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still - clear to send requests */ - -/* We have a total wait time of 1s for vf mailbox posted messages */ -#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* retry count for mailbox timeout */ -#define E1000_VF_MBX_INIT_DELAY 500 /* usec delay between retries */ - -#define E1000_VT_MSGINFO_SHIFT 16 -/* bits 23:16 are used for exra info for certain messages */ -#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) - -#define E1000_VF_RESET 0x01 /* VF requests reset */ -#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ -#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ -#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ -#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ - -#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ - -void e1000_init_mbx_ops_generic(struct e1000_hw *hw); -s32 e1000_init_mbx_params_vf(struct e1000_hw *); - -#endif /* _E1000_MBX_H_ */ diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c deleted file mode 100644 index 40ed066..0000000 --- a/drivers/net/igbvf/netdev.c +++ /dev/null @@ -1,2859 +0,0 @@ -/******************************************************************************* - - Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 2009 - 2010 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "igbvf.h" - -#define DRV_VERSION "2.0.0-k" -char igbvf_driver_name[] = "igbvf"; -const char igbvf_driver_version[] = DRV_VERSION; -static const char igbvf_driver_string[] = - "Intel(R) Virtual Function Network Driver"; -static const char igbvf_copyright[] = - "Copyright (c) 2009 - 2010 Intel Corporation."; - -static int igbvf_poll(struct napi_struct *napi, int budget); -static void igbvf_reset(struct igbvf_adapter *); -static void igbvf_set_interrupt_capability(struct igbvf_adapter *); -static void igbvf_reset_interrupt_capability(struct igbvf_adapter *); - -static struct igbvf_info igbvf_vf_info = { - .mac = e1000_vfadapt, - .flags = 0, - .pba = 10, - .init_ops = e1000_init_function_pointers_vf, -}; - -static struct igbvf_info igbvf_i350_vf_info = { - .mac = e1000_vfadapt_i350, - .flags = 0, - .pba = 10, - .init_ops = e1000_init_function_pointers_vf, -}; - -static const struct igbvf_info *igbvf_info_tbl[] = { - [board_vf] = &igbvf_vf_info, - [board_i350_vf] = &igbvf_i350_vf_info, -}; - -/** - * igbvf_desc_unused - calculate if we have unused descriptors - **/ -static int igbvf_desc_unused(struct igbvf_ring *ring) -{ - if (ring->next_to_clean > ring->next_to_use) - return ring->next_to_clean - ring->next_to_use - 1; - - return ring->count + ring->next_to_clean - ring->next_to_use - 1; -} - -/** - * igbvf_receive_skb - helper function to handle Rx indications - * @adapter: board private structure - * @status: descriptor status field as written by hardware - * @vlan: descriptor vlan field as written by hardware (no le/be conversion) - * @skb: pointer to sk_buff to be indicated to stack - **/ -static void igbvf_receive_skb(struct igbvf_adapter *adapter, - struct net_device *netdev, - struct sk_buff *skb, - u32 status, u16 vlan) -{ - if (status & E1000_RXD_STAT_VP) { - u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; - - __vlan_hwaccel_put_tag(skb, vid); - } - netif_receive_skb(skb); -} - -static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, - u32 status_err, struct sk_buff *skb) -{ - skb_checksum_none_assert(skb); - - /* Ignore Checksum bit is set or checksum is disabled through ethtool */ - if ((status_err & E1000_RXD_STAT_IXSM) || - (adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED)) - return; - - /* TCP/UDP checksum error bit is set */ - if (status_err & - (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { - /* let the stack verify checksum errors */ - adapter->hw_csum_err++; - return; - } - - /* It must be a TCP or UDP packet with a valid checksum */ - if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) - skb->ip_summed = CHECKSUM_UNNECESSARY; - - adapter->hw_csum_good++; -} - -/** - * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split - * @rx_ring: address of ring structure to repopulate - * @cleaned_count: number of buffers to repopulate - **/ -static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, - int cleaned_count) -{ - struct igbvf_adapter *adapter = rx_ring->adapter; - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - union e1000_adv_rx_desc *rx_desc; - struct igbvf_buffer *buffer_info; - struct sk_buff *skb; - unsigned int i; - int bufsz; - - i = rx_ring->next_to_use; - buffer_info = &rx_ring->buffer_info[i]; - - if (adapter->rx_ps_hdr_size) - bufsz = adapter->rx_ps_hdr_size; - else - bufsz = adapter->rx_buffer_len; - - while (cleaned_count--) { - rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); - - if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) { - if (!buffer_info->page) { - buffer_info->page = alloc_page(GFP_ATOMIC); - if (!buffer_info->page) { - adapter->alloc_rx_buff_failed++; - goto no_buffers; - } - buffer_info->page_offset = 0; - } else { - buffer_info->page_offset ^= PAGE_SIZE / 2; - } - buffer_info->page_dma = - dma_map_page(&pdev->dev, buffer_info->page, - buffer_info->page_offset, - PAGE_SIZE / 2, - DMA_FROM_DEVICE); - } - - if (!buffer_info->skb) { - skb = netdev_alloc_skb_ip_align(netdev, bufsz); - if (!skb) { - adapter->alloc_rx_buff_failed++; - goto no_buffers; - } - - buffer_info->skb = skb; - buffer_info->dma = dma_map_single(&pdev->dev, skb->data, - bufsz, - DMA_FROM_DEVICE); - } - /* Refresh the desc even if buffer_addrs didn't change because - * each write-back erases this info. */ - if (adapter->rx_ps_hdr_size) { - rx_desc->read.pkt_addr = - cpu_to_le64(buffer_info->page_dma); - rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); - } else { - rx_desc->read.pkt_addr = - cpu_to_le64(buffer_info->dma); - rx_desc->read.hdr_addr = 0; - } - - i++; - if (i == rx_ring->count) - i = 0; - buffer_info = &rx_ring->buffer_info[i]; - } - -no_buffers: - if (rx_ring->next_to_use != i) { - rx_ring->next_to_use = i; - if (i == 0) - i = (rx_ring->count - 1); - else - i--; - - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). */ - wmb(); - writel(i, adapter->hw.hw_addr + rx_ring->tail); - } -} - -/** - * igbvf_clean_rx_irq - Send received data up the network stack; legacy - * @adapter: board private structure - * - * the return value indicates whether actual cleaning was done, there - * is no guarantee that everything was cleaned - **/ -static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, - int *work_done, int work_to_do) -{ - struct igbvf_ring *rx_ring = adapter->rx_ring; - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - union e1000_adv_rx_desc *rx_desc, *next_rxd; - struct igbvf_buffer *buffer_info, *next_buffer; - struct sk_buff *skb; - bool cleaned = false; - int cleaned_count = 0; - unsigned int total_bytes = 0, total_packets = 0; - unsigned int i; - u32 length, hlen, staterr; - - i = rx_ring->next_to_clean; - rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); - - while (staterr & E1000_RXD_STAT_DD) { - if (*work_done >= work_to_do) - break; - (*work_done)++; - rmb(); /* read descriptor and rx_buffer_info after status DD */ - - buffer_info = &rx_ring->buffer_info[i]; - - /* HW will not DMA in data larger than the given buffer, even - * if it parses the (NFS, of course) header to be larger. In - * that case, it fills the header buffer and spills the rest - * into the page. - */ - hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) & - E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; - if (hlen > adapter->rx_ps_hdr_size) - hlen = adapter->rx_ps_hdr_size; - - length = le16_to_cpu(rx_desc->wb.upper.length); - cleaned = true; - cleaned_count++; - - skb = buffer_info->skb; - prefetch(skb->data - NET_IP_ALIGN); - buffer_info->skb = NULL; - if (!adapter->rx_ps_hdr_size) { - dma_unmap_single(&pdev->dev, buffer_info->dma, - adapter->rx_buffer_len, - DMA_FROM_DEVICE); - buffer_info->dma = 0; - skb_put(skb, length); - goto send_up; - } - - if (!skb_shinfo(skb)->nr_frags) { - dma_unmap_single(&pdev->dev, buffer_info->dma, - adapter->rx_ps_hdr_size, - DMA_FROM_DEVICE); - skb_put(skb, hlen); - } - - if (length) { - dma_unmap_page(&pdev->dev, buffer_info->page_dma, - PAGE_SIZE / 2, - DMA_FROM_DEVICE); - buffer_info->page_dma = 0; - - skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, - buffer_info->page, - buffer_info->page_offset, - length); - - if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || - (page_count(buffer_info->page) != 1)) - buffer_info->page = NULL; - else - get_page(buffer_info->page); - - skb->len += length; - skb->data_len += length; - skb->truesize += length; - } -send_up: - i++; - if (i == rx_ring->count) - i = 0; - next_rxd = IGBVF_RX_DESC_ADV(*rx_ring, i); - prefetch(next_rxd); - next_buffer = &rx_ring->buffer_info[i]; - - if (!(staterr & E1000_RXD_STAT_EOP)) { - buffer_info->skb = next_buffer->skb; - buffer_info->dma = next_buffer->dma; - next_buffer->skb = skb; - next_buffer->dma = 0; - goto next_desc; - } - - if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { - dev_kfree_skb_irq(skb); - goto next_desc; - } - - total_bytes += skb->len; - total_packets++; - - igbvf_rx_checksum_adv(adapter, staterr, skb); - - skb->protocol = eth_type_trans(skb, netdev); - - igbvf_receive_skb(adapter, netdev, skb, staterr, - rx_desc->wb.upper.vlan); - -next_desc: - rx_desc->wb.upper.status_error = 0; - - /* return some buffers to hardware, one at a time is too slow */ - if (cleaned_count >= IGBVF_RX_BUFFER_WRITE) { - igbvf_alloc_rx_buffers(rx_ring, cleaned_count); - cleaned_count = 0; - } - - /* use prefetched values */ - rx_desc = next_rxd; - buffer_info = next_buffer; - - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); - } - - rx_ring->next_to_clean = i; - cleaned_count = igbvf_desc_unused(rx_ring); - - if (cleaned_count) - igbvf_alloc_rx_buffers(rx_ring, cleaned_count); - - adapter->total_rx_packets += total_packets; - adapter->total_rx_bytes += total_bytes; - adapter->net_stats.rx_bytes += total_bytes; - adapter->net_stats.rx_packets += total_packets; - return cleaned; -} - -static void igbvf_put_txbuf(struct igbvf_adapter *adapter, - struct igbvf_buffer *buffer_info) -{ - if (buffer_info->dma) { - if (buffer_info->mapped_as_page) - dma_unmap_page(&adapter->pdev->dev, - buffer_info->dma, - buffer_info->length, - DMA_TO_DEVICE); - else - dma_unmap_single(&adapter->pdev->dev, - buffer_info->dma, - buffer_info->length, - DMA_TO_DEVICE); - buffer_info->dma = 0; - } - if (buffer_info->skb) { - dev_kfree_skb_any(buffer_info->skb); - buffer_info->skb = NULL; - } - buffer_info->time_stamp = 0; -} - -/** - * igbvf_setup_tx_resources - allocate Tx resources (Descriptors) - * @adapter: board private structure - * - * Return 0 on success, negative on failure - **/ -int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, - struct igbvf_ring *tx_ring) -{ - struct pci_dev *pdev = adapter->pdev; - int size; - - size = sizeof(struct igbvf_buffer) * tx_ring->count; - tx_ring->buffer_info = vzalloc(size); - if (!tx_ring->buffer_info) - goto err; - - /* round up to nearest 4K */ - tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); - tx_ring->size = ALIGN(tx_ring->size, 4096); - - tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, - &tx_ring->dma, GFP_KERNEL); - - if (!tx_ring->desc) - goto err; - - tx_ring->adapter = adapter; - tx_ring->next_to_use = 0; - tx_ring->next_to_clean = 0; - - return 0; -err: - vfree(tx_ring->buffer_info); - dev_err(&adapter->pdev->dev, - "Unable to allocate memory for the transmit descriptor ring\n"); - return -ENOMEM; -} - -/** - * igbvf_setup_rx_resources - allocate Rx resources (Descriptors) - * @adapter: board private structure - * - * Returns 0 on success, negative on failure - **/ -int igbvf_setup_rx_resources(struct igbvf_adapter *adapter, - struct igbvf_ring *rx_ring) -{ - struct pci_dev *pdev = adapter->pdev; - int size, desc_len; - - size = sizeof(struct igbvf_buffer) * rx_ring->count; - rx_ring->buffer_info = vzalloc(size); - if (!rx_ring->buffer_info) - goto err; - - desc_len = sizeof(union e1000_adv_rx_desc); - - /* Round up to nearest 4K */ - rx_ring->size = rx_ring->count * desc_len; - rx_ring->size = ALIGN(rx_ring->size, 4096); - - rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, - &rx_ring->dma, GFP_KERNEL); - - if (!rx_ring->desc) - goto err; - - rx_ring->next_to_clean = 0; - rx_ring->next_to_use = 0; - - rx_ring->adapter = adapter; - - return 0; - -err: - vfree(rx_ring->buffer_info); - rx_ring->buffer_info = NULL; - dev_err(&adapter->pdev->dev, - "Unable to allocate memory for the receive descriptor ring\n"); - return -ENOMEM; -} - -/** - * igbvf_clean_tx_ring - Free Tx Buffers - * @tx_ring: ring to be cleaned - **/ -static void igbvf_clean_tx_ring(struct igbvf_ring *tx_ring) -{ - struct igbvf_adapter *adapter = tx_ring->adapter; - struct igbvf_buffer *buffer_info; - unsigned long size; - unsigned int i; - - if (!tx_ring->buffer_info) - return; - - /* Free all the Tx ring sk_buffs */ - for (i = 0; i < tx_ring->count; i++) { - buffer_info = &tx_ring->buffer_info[i]; - igbvf_put_txbuf(adapter, buffer_info); - } - - size = sizeof(struct igbvf_buffer) * tx_ring->count; - memset(tx_ring->buffer_info, 0, size); - - /* Zero out the descriptor ring */ - memset(tx_ring->desc, 0, tx_ring->size); - - tx_ring->next_to_use = 0; - tx_ring->next_to_clean = 0; - - writel(0, adapter->hw.hw_addr + tx_ring->head); - writel(0, adapter->hw.hw_addr + tx_ring->tail); -} - -/** - * igbvf_free_tx_resources - Free Tx Resources per Queue - * @tx_ring: ring to free resources from - * - * Free all transmit software resources - **/ -void igbvf_free_tx_resources(struct igbvf_ring *tx_ring) -{ - struct pci_dev *pdev = tx_ring->adapter->pdev; - - igbvf_clean_tx_ring(tx_ring); - - vfree(tx_ring->buffer_info); - tx_ring->buffer_info = NULL; - - dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, - tx_ring->dma); - - tx_ring->desc = NULL; -} - -/** - * igbvf_clean_rx_ring - Free Rx Buffers per Queue - * @adapter: board private structure - **/ -static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring) -{ - struct igbvf_adapter *adapter = rx_ring->adapter; - struct igbvf_buffer *buffer_info; - struct pci_dev *pdev = adapter->pdev; - unsigned long size; - unsigned int i; - - if (!rx_ring->buffer_info) - return; - - /* Free all the Rx ring sk_buffs */ - for (i = 0; i < rx_ring->count; i++) { - buffer_info = &rx_ring->buffer_info[i]; - if (buffer_info->dma) { - if (adapter->rx_ps_hdr_size){ - dma_unmap_single(&pdev->dev, buffer_info->dma, - adapter->rx_ps_hdr_size, - DMA_FROM_DEVICE); - } else { - dma_unmap_single(&pdev->dev, buffer_info->dma, - adapter->rx_buffer_len, - DMA_FROM_DEVICE); - } - buffer_info->dma = 0; - } - - if (buffer_info->skb) { - dev_kfree_skb(buffer_info->skb); - buffer_info->skb = NULL; - } - - if (buffer_info->page) { - if (buffer_info->page_dma) - dma_unmap_page(&pdev->dev, - buffer_info->page_dma, - PAGE_SIZE / 2, - DMA_FROM_DEVICE); - put_page(buffer_info->page); - buffer_info->page = NULL; - buffer_info->page_dma = 0; - buffer_info->page_offset = 0; - } - } - - size = sizeof(struct igbvf_buffer) * rx_ring->count; - memset(rx_ring->buffer_info, 0, size); - - /* Zero out the descriptor ring */ - memset(rx_ring->desc, 0, rx_ring->size); - - rx_ring->next_to_clean = 0; - rx_ring->next_to_use = 0; - - writel(0, adapter->hw.hw_addr + rx_ring->head); - writel(0, adapter->hw.hw_addr + rx_ring->tail); -} - -/** - * igbvf_free_rx_resources - Free Rx Resources - * @rx_ring: ring to clean the resources from - * - * Free all receive software resources - **/ - -void igbvf_free_rx_resources(struct igbvf_ring *rx_ring) -{ - struct pci_dev *pdev = rx_ring->adapter->pdev; - - igbvf_clean_rx_ring(rx_ring); - - vfree(rx_ring->buffer_info); - rx_ring->buffer_info = NULL; - - dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, - rx_ring->dma); - rx_ring->desc = NULL; -} - -/** - * igbvf_update_itr - update the dynamic ITR value based on statistics - * @adapter: pointer to adapter - * @itr_setting: current adapter->itr - * @packets: the number of packets during this measurement interval - * @bytes: the number of bytes during this measurement interval - * - * Stores a new ITR value based on packets and byte - * counts during the last interrupt. The advantage of per interrupt - * computation is faster updates and more accurate ITR for the current - * traffic pattern. Constants in this function were computed - * based on theoretical maximum wire speed and thresholds were set based - * on testing data as well as attempting to minimize response time - * while increasing bulk throughput. This functionality is controlled - * by the InterruptThrottleRate module parameter. - **/ -static unsigned int igbvf_update_itr(struct igbvf_adapter *adapter, - u16 itr_setting, int packets, - int bytes) -{ - unsigned int retval = itr_setting; - - if (packets == 0) - goto update_itr_done; - - switch (itr_setting) { - case lowest_latency: - /* handle TSO and jumbo frames */ - if (bytes/packets > 8000) - retval = bulk_latency; - else if ((packets < 5) && (bytes > 512)) - retval = low_latency; - break; - case low_latency: /* 50 usec aka 20000 ints/s */ - if (bytes > 10000) { - /* this if handles the TSO accounting */ - if (bytes/packets > 8000) - retval = bulk_latency; - else if ((packets < 10) || ((bytes/packets) > 1200)) - retval = bulk_latency; - else if ((packets > 35)) - retval = lowest_latency; - } else if (bytes/packets > 2000) { - retval = bulk_latency; - } else if (packets <= 2 && bytes < 512) { - retval = lowest_latency; - } - break; - case bulk_latency: /* 250 usec aka 4000 ints/s */ - if (bytes > 25000) { - if (packets > 35) - retval = low_latency; - } else if (bytes < 6000) { - retval = low_latency; - } - break; - } - -update_itr_done: - return retval; -} - -static void igbvf_set_itr(struct igbvf_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u16 current_itr; - u32 new_itr = adapter->itr; - - adapter->tx_itr = igbvf_update_itr(adapter, adapter->tx_itr, - adapter->total_tx_packets, - adapter->total_tx_bytes); - /* conservative mode (itr 3) eliminates the lowest_latency setting */ - if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) - adapter->tx_itr = low_latency; - - adapter->rx_itr = igbvf_update_itr(adapter, adapter->rx_itr, - adapter->total_rx_packets, - adapter->total_rx_bytes); - /* conservative mode (itr 3) eliminates the lowest_latency setting */ - if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) - adapter->rx_itr = low_latency; - - current_itr = max(adapter->rx_itr, adapter->tx_itr); - - switch (current_itr) { - /* counts and packets in update_itr are dependent on these numbers */ - case lowest_latency: - new_itr = 70000; - break; - case low_latency: - new_itr = 20000; /* aka hwitr = ~200 */ - break; - case bulk_latency: - new_itr = 4000; - break; - default: - break; - } - - if (new_itr != adapter->itr) { - /* - * this attempts to bias the interrupt rate towards Bulk - * by adding intermediate steps when interrupt rate is - * increasing - */ - new_itr = new_itr > adapter->itr ? - min(adapter->itr + (new_itr >> 2), new_itr) : - new_itr; - adapter->itr = new_itr; - adapter->rx_ring->itr_val = 1952; - - if (adapter->msix_entries) - adapter->rx_ring->set_itr = 1; - else - ew32(ITR, 1952); - } -} - -/** - * igbvf_clean_tx_irq - Reclaim resources after transmit completes - * @adapter: board private structure - * returns true if ring is completely cleaned - **/ -static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) -{ - struct igbvf_adapter *adapter = tx_ring->adapter; - struct net_device *netdev = adapter->netdev; - struct igbvf_buffer *buffer_info; - struct sk_buff *skb; - union e1000_adv_tx_desc *tx_desc, *eop_desc; - unsigned int total_bytes = 0, total_packets = 0; - unsigned int i, eop, count = 0; - bool cleaned = false; - - i = tx_ring->next_to_clean; - eop = tx_ring->buffer_info[i].next_to_watch; - eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); - - while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) && - (count < tx_ring->count)) { - rmb(); /* read buffer_info after eop_desc status */ - for (cleaned = false; !cleaned; count++) { - tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); - buffer_info = &tx_ring->buffer_info[i]; - cleaned = (i == eop); - skb = buffer_info->skb; - - if (skb) { - unsigned int segs, bytecount; - - /* gso_segs is currently only valid for tcp */ - segs = skb_shinfo(skb)->gso_segs ?: 1; - /* multiply data chunks by size of headers */ - bytecount = ((segs - 1) * skb_headlen(skb)) + - skb->len; - total_packets += segs; - total_bytes += bytecount; - } - - igbvf_put_txbuf(adapter, buffer_info); - tx_desc->wb.status = 0; - - i++; - if (i == tx_ring->count) - i = 0; - } - eop = tx_ring->buffer_info[i].next_to_watch; - eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); - } - - tx_ring->next_to_clean = i; - - if (unlikely(count && - netif_carrier_ok(netdev) && - igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) { - /* Make sure that anybody stopping the queue after this - * sees the new next_to_clean. - */ - smp_mb(); - if (netif_queue_stopped(netdev) && - !(test_bit(__IGBVF_DOWN, &adapter->state))) { - netif_wake_queue(netdev); - ++adapter->restart_queue; - } - } - - adapter->net_stats.tx_bytes += total_bytes; - adapter->net_stats.tx_packets += total_packets; - return count < tx_ring->count; -} - -static irqreturn_t igbvf_msix_other(int irq, void *data) -{ - struct net_device *netdev = data; - struct igbvf_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - - adapter->int_counter1++; - - netif_carrier_off(netdev); - hw->mac.get_link_status = 1; - if (!test_bit(__IGBVF_DOWN, &adapter->state)) - mod_timer(&adapter->watchdog_timer, jiffies + 1); - - ew32(EIMS, adapter->eims_other); - - return IRQ_HANDLED; -} - -static irqreturn_t igbvf_intr_msix_tx(int irq, void *data) -{ - struct net_device *netdev = data; - struct igbvf_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - struct igbvf_ring *tx_ring = adapter->tx_ring; - - - adapter->total_tx_bytes = 0; - adapter->total_tx_packets = 0; - - /* auto mask will automatically reenable the interrupt when we write - * EICS */ - if (!igbvf_clean_tx_irq(tx_ring)) - /* Ring was not completely cleaned, so fire another interrupt */ - ew32(EICS, tx_ring->eims_value); - else - ew32(EIMS, tx_ring->eims_value); - - return IRQ_HANDLED; -} - -static irqreturn_t igbvf_intr_msix_rx(int irq, void *data) -{ - struct net_device *netdev = data; - struct igbvf_adapter *adapter = netdev_priv(netdev); - - adapter->int_counter0++; - - /* Write the ITR value calculated at the end of the - * previous interrupt. - */ - if (adapter->rx_ring->set_itr) { - writel(adapter->rx_ring->itr_val, - adapter->hw.hw_addr + adapter->rx_ring->itr_register); - adapter->rx_ring->set_itr = 0; - } - - if (napi_schedule_prep(&adapter->rx_ring->napi)) { - adapter->total_rx_bytes = 0; - adapter->total_rx_packets = 0; - __napi_schedule(&adapter->rx_ring->napi); - } - - return IRQ_HANDLED; -} - -#define IGBVF_NO_QUEUE -1 - -static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, - int tx_queue, int msix_vector) -{ - struct e1000_hw *hw = &adapter->hw; - u32 ivar, index; - - /* 82576 uses a table-based method for assigning vectors. - Each queue has a single entry in the table to which we write - a vector number along with a "valid" bit. Sadly, the layout - of the table is somewhat counterintuitive. */ - if (rx_queue > IGBVF_NO_QUEUE) { - index = (rx_queue >> 1); - ivar = array_er32(IVAR0, index); - if (rx_queue & 0x1) { - /* vector goes into third byte of register */ - ivar = ivar & 0xFF00FFFF; - ivar |= (msix_vector | E1000_IVAR_VALID) << 16; - } else { - /* vector goes into low byte of register */ - ivar = ivar & 0xFFFFFF00; - ivar |= msix_vector | E1000_IVAR_VALID; - } - adapter->rx_ring[rx_queue].eims_value = 1 << msix_vector; - array_ew32(IVAR0, index, ivar); - } - if (tx_queue > IGBVF_NO_QUEUE) { - index = (tx_queue >> 1); - ivar = array_er32(IVAR0, index); - if (tx_queue & 0x1) { - /* vector goes into high byte of register */ - ivar = ivar & 0x00FFFFFF; - ivar |= (msix_vector | E1000_IVAR_VALID) << 24; - } else { - /* vector goes into second byte of register */ - ivar = ivar & 0xFFFF00FF; - ivar |= (msix_vector | E1000_IVAR_VALID) << 8; - } - adapter->tx_ring[tx_queue].eims_value = 1 << msix_vector; - array_ew32(IVAR0, index, ivar); - } -} - -/** - * igbvf_configure_msix - Configure MSI-X hardware - * - * igbvf_configure_msix sets up the hardware to properly - * generate MSI-X interrupts. - **/ -static void igbvf_configure_msix(struct igbvf_adapter *adapter) -{ - u32 tmp; - struct e1000_hw *hw = &adapter->hw; - struct igbvf_ring *tx_ring = adapter->tx_ring; - struct igbvf_ring *rx_ring = adapter->rx_ring; - int vector = 0; - - adapter->eims_enable_mask = 0; - - igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++); - adapter->eims_enable_mask |= tx_ring->eims_value; - if (tx_ring->itr_val) - writel(tx_ring->itr_val, - hw->hw_addr + tx_ring->itr_register); - else - writel(1952, hw->hw_addr + tx_ring->itr_register); - - igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++); - adapter->eims_enable_mask |= rx_ring->eims_value; - if (rx_ring->itr_val) - writel(rx_ring->itr_val, - hw->hw_addr + rx_ring->itr_register); - else - writel(1952, hw->hw_addr + rx_ring->itr_register); - - /* set vector for other causes, i.e. link changes */ - - tmp = (vector++ | E1000_IVAR_VALID); - - ew32(IVAR_MISC, tmp); - - adapter->eims_enable_mask = (1 << (vector)) - 1; - adapter->eims_other = 1 << (vector - 1); - e1e_flush(); -} - -static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter) -{ - if (adapter->msix_entries) { - pci_disable_msix(adapter->pdev); - kfree(adapter->msix_entries); - adapter->msix_entries = NULL; - } -} - -/** - * igbvf_set_interrupt_capability - set MSI or MSI-X if supported - * - * Attempt to configure interrupts using the best available - * capabilities of the hardware and kernel. - **/ -static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter) -{ - int err = -ENOMEM; - int i; - - /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */ - adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry), - GFP_KERNEL); - if (adapter->msix_entries) { - for (i = 0; i < 3; i++) - adapter->msix_entries[i].entry = i; - - err = pci_enable_msix(adapter->pdev, - adapter->msix_entries, 3); - } - - if (err) { - /* MSI-X failed */ - dev_err(&adapter->pdev->dev, - "Failed to initialize MSI-X interrupts.\n"); - igbvf_reset_interrupt_capability(adapter); - } -} - -/** - * igbvf_request_msix - Initialize MSI-X interrupts - * - * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the - * kernel. - **/ -static int igbvf_request_msix(struct igbvf_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - int err = 0, vector = 0; - - if (strlen(netdev->name) < (IFNAMSIZ - 5)) { - sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name); - sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name); - } else { - memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); - memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); - } - - err = request_irq(adapter->msix_entries[vector].vector, - igbvf_intr_msix_tx, 0, adapter->tx_ring->name, - netdev); - if (err) - goto out; - - adapter->tx_ring->itr_register = E1000_EITR(vector); - adapter->tx_ring->itr_val = 1952; - vector++; - - err = request_irq(adapter->msix_entries[vector].vector, - igbvf_intr_msix_rx, 0, adapter->rx_ring->name, - netdev); - if (err) - goto out; - - adapter->rx_ring->itr_register = E1000_EITR(vector); - adapter->rx_ring->itr_val = 1952; - vector++; - - err = request_irq(adapter->msix_entries[vector].vector, - igbvf_msix_other, 0, netdev->name, netdev); - if (err) - goto out; - - igbvf_configure_msix(adapter); - return 0; -out: - return err; -} - -/** - * igbvf_alloc_queues - Allocate memory for all rings - * @adapter: board private structure to initialize - **/ -static int __devinit igbvf_alloc_queues(struct igbvf_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - - adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL); - if (!adapter->tx_ring) - return -ENOMEM; - - adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL); - if (!adapter->rx_ring) { - kfree(adapter->tx_ring); - return -ENOMEM; - } - - netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64); - - return 0; -} - -/** - * igbvf_request_irq - initialize interrupts - * - * Attempts to configure interrupts using the best available - * capabilities of the hardware and kernel. - **/ -static int igbvf_request_irq(struct igbvf_adapter *adapter) -{ - int err = -1; - - /* igbvf supports msi-x only */ - if (adapter->msix_entries) - err = igbvf_request_msix(adapter); - - if (!err) - return err; - - dev_err(&adapter->pdev->dev, - "Unable to allocate interrupt, Error: %d\n", err); - - return err; -} - -static void igbvf_free_irq(struct igbvf_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - int vector; - - if (adapter->msix_entries) { - for (vector = 0; vector < 3; vector++) - free_irq(adapter->msix_entries[vector].vector, netdev); - } -} - -/** - * igbvf_irq_disable - Mask off interrupt generation on the NIC - **/ -static void igbvf_irq_disable(struct igbvf_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - - ew32(EIMC, ~0); - - if (adapter->msix_entries) - ew32(EIAC, 0); -} - -/** - * igbvf_irq_enable - Enable default interrupt generation settings - **/ -static void igbvf_irq_enable(struct igbvf_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - - ew32(EIAC, adapter->eims_enable_mask); - ew32(EIAM, adapter->eims_enable_mask); - ew32(EIMS, adapter->eims_enable_mask); -} - -/** - * igbvf_poll - NAPI Rx polling callback - * @napi: struct associated with this polling callback - * @budget: amount of packets driver is allowed to process this poll - **/ -static int igbvf_poll(struct napi_struct *napi, int budget) -{ - struct igbvf_ring *rx_ring = container_of(napi, struct igbvf_ring, napi); - struct igbvf_adapter *adapter = rx_ring->adapter; - struct e1000_hw *hw = &adapter->hw; - int work_done = 0; - - igbvf_clean_rx_irq(adapter, &work_done, budget); - - /* If not enough Rx work done, exit the polling mode */ - if (work_done < budget) { - napi_complete(napi); - - if (adapter->itr_setting & 3) - igbvf_set_itr(adapter); - - if (!test_bit(__IGBVF_DOWN, &adapter->state)) - ew32(EIMS, adapter->rx_ring->eims_value); - } - - return work_done; -} - -/** - * igbvf_set_rlpml - set receive large packet maximum length - * @adapter: board private structure - * - * Configure the maximum size of packets that will be received - */ -static void igbvf_set_rlpml(struct igbvf_adapter *adapter) -{ - int max_frame_size; - struct e1000_hw *hw = &adapter->hw; - - max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE; - e1000_rlpml_set_vf(hw, max_frame_size); -} - -static void igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) -{ - struct igbvf_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - - if (hw->mac.ops.set_vfta(hw, vid, true)) - dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid); - else - set_bit(vid, adapter->active_vlans); -} - -static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) -{ - struct igbvf_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - - igbvf_irq_disable(adapter); - - if (!test_bit(__IGBVF_DOWN, &adapter->state)) - igbvf_irq_enable(adapter); - - if (hw->mac.ops.set_vfta(hw, vid, false)) - dev_err(&adapter->pdev->dev, - "Failed to remove vlan id %d\n", vid); - else - clear_bit(vid, adapter->active_vlans); -} - -static void igbvf_restore_vlan(struct igbvf_adapter *adapter) -{ - u16 vid; - - for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) - igbvf_vlan_rx_add_vid(adapter->netdev, vid); -} - -/** - * igbvf_configure_tx - Configure Transmit Unit after Reset - * @adapter: board private structure - * - * Configure the Tx unit of the MAC after a reset. - **/ -static void igbvf_configure_tx(struct igbvf_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - struct igbvf_ring *tx_ring = adapter->tx_ring; - u64 tdba; - u32 txdctl, dca_txctrl; - - /* disable transmits */ - txdctl = er32(TXDCTL(0)); - ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); - e1e_flush(); - msleep(10); - - /* Setup the HW Tx Head and Tail descriptor pointers */ - ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc)); - tdba = tx_ring->dma; - ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32))); - ew32(TDBAH(0), (tdba >> 32)); - ew32(TDH(0), 0); - ew32(TDT(0), 0); - tx_ring->head = E1000_TDH(0); - tx_ring->tail = E1000_TDT(0); - - /* Turn off Relaxed Ordering on head write-backs. The writebacks - * MUST be delivered in order or it will completely screw up - * our bookeeping. - */ - dca_txctrl = er32(DCA_TXCTRL(0)); - dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; - ew32(DCA_TXCTRL(0), dca_txctrl); - - /* enable transmits */ - txdctl |= E1000_TXDCTL_QUEUE_ENABLE; - ew32(TXDCTL(0), txdctl); - - /* Setup Transmit Descriptor Settings for eop descriptor */ - adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS; - - /* enable Report Status bit */ - adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS; -} - -/** - * igbvf_setup_srrctl - configure the receive control registers - * @adapter: Board private structure - **/ -static void igbvf_setup_srrctl(struct igbvf_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 srrctl = 0; - - srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK | - E1000_SRRCTL_BSIZEHDR_MASK | - E1000_SRRCTL_BSIZEPKT_MASK); - - /* Enable queue drop to avoid head of line blocking */ - srrctl |= E1000_SRRCTL_DROP_EN; - - /* Setup buffer sizes */ - srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >> - E1000_SRRCTL_BSIZEPKT_SHIFT; - - if (adapter->rx_buffer_len < 2048) { - adapter->rx_ps_hdr_size = 0; - srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; - } else { - adapter->rx_ps_hdr_size = 128; - srrctl |= adapter->rx_ps_hdr_size << - E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; - srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; - } - - ew32(SRRCTL(0), srrctl); -} - -/** - * igbvf_configure_rx - Configure Receive Unit after Reset - * @adapter: board private structure - * - * Configure the Rx unit of the MAC after a reset. - **/ -static void igbvf_configure_rx(struct igbvf_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - struct igbvf_ring *rx_ring = adapter->rx_ring; - u64 rdba; - u32 rdlen, rxdctl; - - /* disable receives */ - rxdctl = er32(RXDCTL(0)); - ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); - e1e_flush(); - msleep(10); - - rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc); - - /* - * Setup the HW Rx Head and Tail Descriptor Pointers and - * the Base and Length of the Rx Descriptor Ring - */ - rdba = rx_ring->dma; - ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32))); - ew32(RDBAH(0), (rdba >> 32)); - ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc)); - rx_ring->head = E1000_RDH(0); - rx_ring->tail = E1000_RDT(0); - ew32(RDH(0), 0); - ew32(RDT(0), 0); - - rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; - rxdctl &= 0xFFF00000; - rxdctl |= IGBVF_RX_PTHRESH; - rxdctl |= IGBVF_RX_HTHRESH << 8; - rxdctl |= IGBVF_RX_WTHRESH << 16; - - igbvf_set_rlpml(adapter); - - /* enable receives */ - ew32(RXDCTL(0), rxdctl); -} - -/** - * igbvf_set_multi - Multicast and Promiscuous mode set - * @netdev: network interface device structure - * - * The set_multi entry point is called whenever the multicast address - * list or the network interface flags are updated. This routine is - * responsible for configuring the hardware for proper multicast, - * promiscuous mode, and all-multi behavior. - **/ -static void igbvf_set_multi(struct net_device *netdev) -{ - struct igbvf_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - struct netdev_hw_addr *ha; - u8 *mta_list = NULL; - int i; - - if (!netdev_mc_empty(netdev)) { - mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); - if (!mta_list) { - dev_err(&adapter->pdev->dev, - "failed to allocate multicast filter list\n"); - return; - } - } - - /* prepare a packed array of only addresses. */ - i = 0; - netdev_for_each_mc_addr(ha, netdev) - memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); - - hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0); - kfree(mta_list); -} - -/** - * igbvf_configure - configure the hardware for Rx and Tx - * @adapter: private board structure - **/ -static void igbvf_configure(struct igbvf_adapter *adapter) -{ - igbvf_set_multi(adapter->netdev); - - igbvf_restore_vlan(adapter); - - igbvf_configure_tx(adapter); - igbvf_setup_srrctl(adapter); - igbvf_configure_rx(adapter); - igbvf_alloc_rx_buffers(adapter->rx_ring, - igbvf_desc_unused(adapter->rx_ring)); -} - -/* igbvf_reset - bring the hardware into a known good state - * - * This function boots the hardware and enables some settings that - * require a configuration cycle of the hardware - those cannot be - * set/changed during runtime. After reset the device needs to be - * properly configured for Rx, Tx etc. - */ -static void igbvf_reset(struct igbvf_adapter *adapter) -{ - struct e1000_mac_info *mac = &adapter->hw.mac; - struct net_device *netdev = adapter->netdev; - struct e1000_hw *hw = &adapter->hw; - - /* Allow time for pending master requests to run */ - if (mac->ops.reset_hw(hw)) - dev_err(&adapter->pdev->dev, "PF still resetting\n"); - - mac->ops.init_hw(hw); - - if (is_valid_ether_addr(adapter->hw.mac.addr)) { - memcpy(netdev->dev_addr, adapter->hw.mac.addr, - netdev->addr_len); - memcpy(netdev->perm_addr, adapter->hw.mac.addr, - netdev->addr_len); - } - - adapter->last_reset = jiffies; -} - -int igbvf_up(struct igbvf_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - - /* hardware has been reset, we need to reload some things */ - igbvf_configure(adapter); - - clear_bit(__IGBVF_DOWN, &adapter->state); - - napi_enable(&adapter->rx_ring->napi); - if (adapter->msix_entries) - igbvf_configure_msix(adapter); - - /* Clear any pending interrupts. */ - er32(EICR); - igbvf_irq_enable(adapter); - - /* start the watchdog */ - hw->mac.get_link_status = 1; - mod_timer(&adapter->watchdog_timer, jiffies + 1); - - - return 0; -} - -void igbvf_down(struct igbvf_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct e1000_hw *hw = &adapter->hw; - u32 rxdctl, txdctl; - - /* - * signal that we're down so the interrupt handler does not - * reschedule our watchdog timer - */ - set_bit(__IGBVF_DOWN, &adapter->state); - - /* disable receives in the hardware */ - rxdctl = er32(RXDCTL(0)); - ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); - - netif_stop_queue(netdev); - - /* disable transmits in the hardware */ - txdctl = er32(TXDCTL(0)); - ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); - - /* flush both disables and wait for them to finish */ - e1e_flush(); - msleep(10); - - napi_disable(&adapter->rx_ring->napi); - - igbvf_irq_disable(adapter); - - del_timer_sync(&adapter->watchdog_timer); - - netif_carrier_off(netdev); - - /* record the stats before reset*/ - igbvf_update_stats(adapter); - - adapter->link_speed = 0; - adapter->link_duplex = 0; - - igbvf_reset(adapter); - igbvf_clean_tx_ring(adapter->tx_ring); - igbvf_clean_rx_ring(adapter->rx_ring); -} - -void igbvf_reinit_locked(struct igbvf_adapter *adapter) -{ - might_sleep(); - while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) - msleep(1); - igbvf_down(adapter); - igbvf_up(adapter); - clear_bit(__IGBVF_RESETTING, &adapter->state); -} - -/** - * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter) - * @adapter: board private structure to initialize - * - * igbvf_sw_init initializes the Adapter private data structure. - * Fields are initialized based on PCI device information and - * OS network device settings (MTU size). - **/ -static int __devinit igbvf_sw_init(struct igbvf_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - s32 rc; - - adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; - adapter->rx_ps_hdr_size = 0; - adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; - adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; - - adapter->tx_int_delay = 8; - adapter->tx_abs_int_delay = 32; - adapter->rx_int_delay = 0; - adapter->rx_abs_int_delay = 8; - adapter->itr_setting = 3; - adapter->itr = 20000; - - /* Set various function pointers */ - adapter->ei->init_ops(&adapter->hw); - - rc = adapter->hw.mac.ops.init_params(&adapter->hw); - if (rc) - return rc; - - rc = adapter->hw.mbx.ops.init_params(&adapter->hw); - if (rc) - return rc; - - igbvf_set_interrupt_capability(adapter); - - if (igbvf_alloc_queues(adapter)) - return -ENOMEM; - - spin_lock_init(&adapter->tx_queue_lock); - - /* Explicitly disable IRQ since the NIC can be in any state. */ - igbvf_irq_disable(adapter); - - spin_lock_init(&adapter->stats_lock); - - set_bit(__IGBVF_DOWN, &adapter->state); - return 0; -} - -static void igbvf_initialize_last_counter_stats(struct igbvf_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - - adapter->stats.last_gprc = er32(VFGPRC); - adapter->stats.last_gorc = er32(VFGORC); - adapter->stats.last_gptc = er32(VFGPTC); - adapter->stats.last_gotc = er32(VFGOTC); - adapter->stats.last_mprc = er32(VFMPRC); - adapter->stats.last_gotlbc = er32(VFGOTLBC); - adapter->stats.last_gptlbc = er32(VFGPTLBC); - adapter->stats.last_gorlbc = er32(VFGORLBC); - adapter->stats.last_gprlbc = er32(VFGPRLBC); - - adapter->stats.base_gprc = er32(VFGPRC); - adapter->stats.base_gorc = er32(VFGORC); - adapter->stats.base_gptc = er32(VFGPTC); - adapter->stats.base_gotc = er32(VFGOTC); - adapter->stats.base_mprc = er32(VFMPRC); - adapter->stats.base_gotlbc = er32(VFGOTLBC); - adapter->stats.base_gptlbc = er32(VFGPTLBC); - adapter->stats.base_gorlbc = er32(VFGORLBC); - adapter->stats.base_gprlbc = er32(VFGPRLBC); -} - -/** - * igbvf_open - Called when a network interface is made active - * @netdev: network interface device structure - * - * Returns 0 on success, negative value on failure - * - * The open entry point is called when a network interface is made - * active by the system (IFF_UP). At this point all resources needed - * for transmit and receive operations are allocated, the interrupt - * handler is registered with the OS, the watchdog timer is started, - * and the stack is notified that the interface is ready. - **/ -static int igbvf_open(struct net_device *netdev) -{ - struct igbvf_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - int err; - - /* disallow open during test */ - if (test_bit(__IGBVF_TESTING, &adapter->state)) - return -EBUSY; - - /* allocate transmit descriptors */ - err = igbvf_setup_tx_resources(adapter, adapter->tx_ring); - if (err) - goto err_setup_tx; - - /* allocate receive descriptors */ - err = igbvf_setup_rx_resources(adapter, adapter->rx_ring); - if (err) - goto err_setup_rx; - - /* - * before we allocate an interrupt, we must be ready to handle it. - * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt - * as soon as we call pci_request_irq, so we have to setup our - * clean_rx handler before we do so. - */ - igbvf_configure(adapter); - - err = igbvf_request_irq(adapter); - if (err) - goto err_req_irq; - - /* From here on the code is the same as igbvf_up() */ - clear_bit(__IGBVF_DOWN, &adapter->state); - - napi_enable(&adapter->rx_ring->napi); - - /* clear any pending interrupts */ - er32(EICR); - - igbvf_irq_enable(adapter); - - /* start the watchdog */ - hw->mac.get_link_status = 1; - mod_timer(&adapter->watchdog_timer, jiffies + 1); - - return 0; - -err_req_irq: - igbvf_free_rx_resources(adapter->rx_ring); -err_setup_rx: - igbvf_free_tx_resources(adapter->tx_ring); -err_setup_tx: - igbvf_reset(adapter); - - return err; -} - -/** - * igbvf_close - Disables a network interface - * @netdev: network interface device structure - * - * Returns 0, this is not allowed to fail - * - * The close entry point is called when an interface is de-activated - * by the OS. The hardware is still under the drivers control, but - * needs to be disabled. A global MAC reset is issued to stop the - * hardware, and all transmit and receive resources are freed. - **/ -static int igbvf_close(struct net_device *netdev) -{ - struct igbvf_adapter *adapter = netdev_priv(netdev); - - WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state)); - igbvf_down(adapter); - - igbvf_free_irq(adapter); - - igbvf_free_tx_resources(adapter->tx_ring); - igbvf_free_rx_resources(adapter->rx_ring); - - return 0; -} -/** - * igbvf_set_mac - Change the Ethernet Address of the NIC - * @netdev: network interface device structure - * @p: pointer to an address structure - * - * Returns 0 on success, negative on failure - **/ -static int igbvf_set_mac(struct net_device *netdev, void *p) -{ - struct igbvf_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - struct sockaddr *addr = p; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - - memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); - - hw->mac.ops.rar_set(hw, hw->mac.addr, 0); - - if (memcmp(addr->sa_data, hw->mac.addr, 6)) - return -EADDRNOTAVAIL; - - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - - return 0; -} - -#define UPDATE_VF_COUNTER(reg, name) \ - { \ - u32 current_counter = er32(reg); \ - if (current_counter < adapter->stats.last_##name) \ - adapter->stats.name += 0x100000000LL; \ - adapter->stats.last_##name = current_counter; \ - adapter->stats.name &= 0xFFFFFFFF00000000LL; \ - adapter->stats.name |= current_counter; \ - } - -/** - * igbvf_update_stats - Update the board statistics counters - * @adapter: board private structure -**/ -void igbvf_update_stats(struct igbvf_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - struct pci_dev *pdev = adapter->pdev; - - /* - * Prevent stats update while adapter is being reset, link is down - * or if the pci connection is down. - */ - if (adapter->link_speed == 0) - return; - - if (test_bit(__IGBVF_RESETTING, &adapter->state)) - return; - - if (pci_channel_offline(pdev)) - return; - - UPDATE_VF_COUNTER(VFGPRC, gprc); - UPDATE_VF_COUNTER(VFGORC, gorc); - UPDATE_VF_COUNTER(VFGPTC, gptc); - UPDATE_VF_COUNTER(VFGOTC, gotc); - UPDATE_VF_COUNTER(VFMPRC, mprc); - UPDATE_VF_COUNTER(VFGOTLBC, gotlbc); - UPDATE_VF_COUNTER(VFGPTLBC, gptlbc); - UPDATE_VF_COUNTER(VFGORLBC, gorlbc); - UPDATE_VF_COUNTER(VFGPRLBC, gprlbc); - - /* Fill out the OS statistics structure */ - adapter->net_stats.multicast = adapter->stats.mprc; -} - -static void igbvf_print_link_info(struct igbvf_adapter *adapter) -{ - dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s\n", - adapter->link_speed, - ((adapter->link_duplex == FULL_DUPLEX) ? - "Full Duplex" : "Half Duplex")); -} - -static bool igbvf_has_link(struct igbvf_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - s32 ret_val = E1000_SUCCESS; - bool link_active; - - /* If interface is down, stay link down */ - if (test_bit(__IGBVF_DOWN, &adapter->state)) - return false; - - ret_val = hw->mac.ops.check_for_link(hw); - link_active = !hw->mac.get_link_status; - - /* if check for link returns error we will need to reset */ - if (ret_val && time_after(jiffies, adapter->last_reset + (10 * HZ))) - schedule_work(&adapter->reset_task); - - return link_active; -} - -/** - * igbvf_watchdog - Timer Call-back - * @data: pointer to adapter cast into an unsigned long - **/ -static void igbvf_watchdog(unsigned long data) -{ - struct igbvf_adapter *adapter = (struct igbvf_adapter *) data; - - /* Do the rest outside of interrupt context */ - schedule_work(&adapter->watchdog_task); -} - -static void igbvf_watchdog_task(struct work_struct *work) -{ - struct igbvf_adapter *adapter = container_of(work, - struct igbvf_adapter, - watchdog_task); - struct net_device *netdev = adapter->netdev; - struct e1000_mac_info *mac = &adapter->hw.mac; - struct igbvf_ring *tx_ring = adapter->tx_ring; - struct e1000_hw *hw = &adapter->hw; - u32 link; - int tx_pending = 0; - - link = igbvf_has_link(adapter); - - if (link) { - if (!netif_carrier_ok(netdev)) { - mac->ops.get_link_up_info(&adapter->hw, - &adapter->link_speed, - &adapter->link_duplex); - igbvf_print_link_info(adapter); - - netif_carrier_on(netdev); - netif_wake_queue(netdev); - } - } else { - if (netif_carrier_ok(netdev)) { - adapter->link_speed = 0; - adapter->link_duplex = 0; - dev_info(&adapter->pdev->dev, "Link is Down\n"); - netif_carrier_off(netdev); - netif_stop_queue(netdev); - } - } - - if (netif_carrier_ok(netdev)) { - igbvf_update_stats(adapter); - } else { - tx_pending = (igbvf_desc_unused(tx_ring) + 1 < - tx_ring->count); - if (tx_pending) { - /* - * We've lost link, so the controller stops DMA, - * but we've got queued Tx work that's never going - * to get done, so reset controller to flush Tx. - * (Do the reset outside of interrupt context). - */ - adapter->tx_timeout_count++; - schedule_work(&adapter->reset_task); - } - } - - /* Cause software interrupt to ensure Rx ring is cleaned */ - ew32(EICS, adapter->rx_ring->eims_value); - - /* Reset the timer */ - if (!test_bit(__IGBVF_DOWN, &adapter->state)) - mod_timer(&adapter->watchdog_timer, - round_jiffies(jiffies + (2 * HZ))); -} - -#define IGBVF_TX_FLAGS_CSUM 0x00000001 -#define IGBVF_TX_FLAGS_VLAN 0x00000002 -#define IGBVF_TX_FLAGS_TSO 0x00000004 -#define IGBVF_TX_FLAGS_IPV4 0x00000008 -#define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000 -#define IGBVF_TX_FLAGS_VLAN_SHIFT 16 - -static int igbvf_tso(struct igbvf_adapter *adapter, - struct igbvf_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) -{ - struct e1000_adv_tx_context_desc *context_desc; - unsigned int i; - int err; - struct igbvf_buffer *buffer_info; - u32 info = 0, tu_cmd = 0; - u32 mss_l4len_idx, l4len; - *hdr_len = 0; - - if (skb_header_cloned(skb)) { - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); - if (err) { - dev_err(&adapter->pdev->dev, - "igbvf_tso returning an error\n"); - return err; - } - } - - l4len = tcp_hdrlen(skb); - *hdr_len += l4len; - - if (skb->protocol == htons(ETH_P_IP)) { - struct iphdr *iph = ip_hdr(skb); - iph->tot_len = 0; - iph->check = 0; - tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, - 0); - } else if (skb_is_gso_v6(skb)) { - ipv6_hdr(skb)->payload_len = 0; - tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); - } - - i = tx_ring->next_to_use; - - buffer_info = &tx_ring->buffer_info[i]; - context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i); - /* VLAN MACLEN IPLEN */ - if (tx_flags & IGBVF_TX_FLAGS_VLAN) - info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK); - info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); - *hdr_len += skb_network_offset(skb); - info |= (skb_transport_header(skb) - skb_network_header(skb)); - *hdr_len += (skb_transport_header(skb) - skb_network_header(skb)); - context_desc->vlan_macip_lens = cpu_to_le32(info); - - /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ - tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); - - if (skb->protocol == htons(ETH_P_IP)) - tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; - tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; - - context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); - - /* MSS L4LEN IDX */ - mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT); - mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); - - context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); - context_desc->seqnum_seed = 0; - - buffer_info->time_stamp = jiffies; - buffer_info->next_to_watch = i; - buffer_info->dma = 0; - i++; - if (i == tx_ring->count) - i = 0; - - tx_ring->next_to_use = i; - - return true; -} - -static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, - struct igbvf_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags) -{ - struct e1000_adv_tx_context_desc *context_desc; - unsigned int i; - struct igbvf_buffer *buffer_info; - u32 info = 0, tu_cmd = 0; - - if ((skb->ip_summed == CHECKSUM_PARTIAL) || - (tx_flags & IGBVF_TX_FLAGS_VLAN)) { - i = tx_ring->next_to_use; - buffer_info = &tx_ring->buffer_info[i]; - context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i); - - if (tx_flags & IGBVF_TX_FLAGS_VLAN) - info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK); - - info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); - if (skb->ip_summed == CHECKSUM_PARTIAL) - info |= (skb_transport_header(skb) - - skb_network_header(skb)); - - - context_desc->vlan_macip_lens = cpu_to_le32(info); - - tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); - - if (skb->ip_summed == CHECKSUM_PARTIAL) { - switch (skb->protocol) { - case __constant_htons(ETH_P_IP): - tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; - if (ip_hdr(skb)->protocol == IPPROTO_TCP) - tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; - break; - case __constant_htons(ETH_P_IPV6): - if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) - tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; - break; - default: - break; - } - } - - context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); - context_desc->seqnum_seed = 0; - context_desc->mss_l4len_idx = 0; - - buffer_info->time_stamp = jiffies; - buffer_info->next_to_watch = i; - buffer_info->dma = 0; - i++; - if (i == tx_ring->count) - i = 0; - tx_ring->next_to_use = i; - - return true; - } - - return false; -} - -static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) -{ - struct igbvf_adapter *adapter = netdev_priv(netdev); - - /* there is enough descriptors then we don't need to worry */ - if (igbvf_desc_unused(adapter->tx_ring) >= size) - return 0; - - netif_stop_queue(netdev); - - smp_mb(); - - /* We need to check again just in case room has been made available */ - if (igbvf_desc_unused(adapter->tx_ring) < size) - return -EBUSY; - - netif_wake_queue(netdev); - - ++adapter->restart_queue; - return 0; -} - -#define IGBVF_MAX_TXD_PWR 16 -#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR) - -static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, - struct igbvf_ring *tx_ring, - struct sk_buff *skb, - unsigned int first) -{ - struct igbvf_buffer *buffer_info; - struct pci_dev *pdev = adapter->pdev; - unsigned int len = skb_headlen(skb); - unsigned int count = 0, i; - unsigned int f; - - i = tx_ring->next_to_use; - - buffer_info = &tx_ring->buffer_info[i]; - BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD); - buffer_info->length = len; - /* set time_stamp *before* dma to help avoid a possible race */ - buffer_info->time_stamp = jiffies; - buffer_info->next_to_watch = i; - buffer_info->mapped_as_page = false; - buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len, - DMA_TO_DEVICE); - if (dma_mapping_error(&pdev->dev, buffer_info->dma)) - goto dma_error; - - - for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { - struct skb_frag_struct *frag; - - count++; - i++; - if (i == tx_ring->count) - i = 0; - - frag = &skb_shinfo(skb)->frags[f]; - len = frag->size; - - buffer_info = &tx_ring->buffer_info[i]; - BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD); - buffer_info->length = len; - buffer_info->time_stamp = jiffies; - buffer_info->next_to_watch = i; - buffer_info->mapped_as_page = true; - buffer_info->dma = dma_map_page(&pdev->dev, - frag->page, - frag->page_offset, - len, - DMA_TO_DEVICE); - if (dma_mapping_error(&pdev->dev, buffer_info->dma)) - goto dma_error; - } - - tx_ring->buffer_info[i].skb = skb; - tx_ring->buffer_info[first].next_to_watch = i; - - return ++count; - -dma_error: - dev_err(&pdev->dev, "TX DMA map failed\n"); - - /* clear timestamp and dma mappings for failed buffer_info mapping */ - buffer_info->dma = 0; - buffer_info->time_stamp = 0; - buffer_info->length = 0; - buffer_info->next_to_watch = 0; - buffer_info->mapped_as_page = false; - if (count) - count--; - - /* clear timestamp and dma mappings for remaining portion of packet */ - while (count--) { - if (i==0) - i += tx_ring->count; - i--; - buffer_info = &tx_ring->buffer_info[i]; - igbvf_put_txbuf(adapter, buffer_info); - } - - return 0; -} - -static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, - struct igbvf_ring *tx_ring, - int tx_flags, int count, u32 paylen, - u8 hdr_len) -{ - union e1000_adv_tx_desc *tx_desc = NULL; - struct igbvf_buffer *buffer_info; - u32 olinfo_status = 0, cmd_type_len; - unsigned int i; - - cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | - E1000_ADVTXD_DCMD_DEXT); - - if (tx_flags & IGBVF_TX_FLAGS_VLAN) - cmd_type_len |= E1000_ADVTXD_DCMD_VLE; - - if (tx_flags & IGBVF_TX_FLAGS_TSO) { - cmd_type_len |= E1000_ADVTXD_DCMD_TSE; - - /* insert tcp checksum */ - olinfo_status |= E1000_TXD_POPTS_TXSM << 8; - - /* insert ip checksum */ - if (tx_flags & IGBVF_TX_FLAGS_IPV4) - olinfo_status |= E1000_TXD_POPTS_IXSM << 8; - - } else if (tx_flags & IGBVF_TX_FLAGS_CSUM) { - olinfo_status |= E1000_TXD_POPTS_TXSM << 8; - } - - olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); - - i = tx_ring->next_to_use; - while (count--) { - buffer_info = &tx_ring->buffer_info[i]; - tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); - tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); - tx_desc->read.cmd_type_len = - cpu_to_le32(cmd_type_len | buffer_info->length); - tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); - i++; - if (i == tx_ring->count) - i = 0; - } - - tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd); - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). */ - wmb(); - - tx_ring->next_to_use = i; - writel(i, adapter->hw.hw_addr + tx_ring->tail); - /* we need this if more than one processor can write to our tail - * at a time, it syncronizes IO on IA64/Altix systems */ - mmiowb(); -} - -static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, - struct net_device *netdev, - struct igbvf_ring *tx_ring) -{ - struct igbvf_adapter *adapter = netdev_priv(netdev); - unsigned int first, tx_flags = 0; - u8 hdr_len = 0; - int count = 0; - int tso = 0; - - if (test_bit(__IGBVF_DOWN, &adapter->state)) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - - if (skb->len <= 0) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - - /* - * need: count + 4 desc gap to keep tail from touching - * + 2 desc gap to keep tail from touching head, - * + 1 desc for skb->data, - * + 1 desc for context descriptor, - * head, otherwise try next time - */ - if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) { - /* this is a hard error */ - return NETDEV_TX_BUSY; - } - - if (vlan_tx_tag_present(skb)) { - tx_flags |= IGBVF_TX_FLAGS_VLAN; - tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT); - } - - if (skb->protocol == htons(ETH_P_IP)) - tx_flags |= IGBVF_TX_FLAGS_IPV4; - - first = tx_ring->next_to_use; - - tso = skb_is_gso(skb) ? - igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0; - if (unlikely(tso < 0)) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - - if (tso) - tx_flags |= IGBVF_TX_FLAGS_TSO; - else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) && - (skb->ip_summed == CHECKSUM_PARTIAL)) - tx_flags |= IGBVF_TX_FLAGS_CSUM; - - /* - * count reflects descriptors mapped, if 0 then mapping error - * has occurred and we need to rewind the descriptor queue - */ - count = igbvf_tx_map_adv(adapter, tx_ring, skb, first); - - if (count) { - igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count, - skb->len, hdr_len); - /* Make sure there is space in the ring for the next send. */ - igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4); - } else { - dev_kfree_skb_any(skb); - tx_ring->buffer_info[first].time_stamp = 0; - tx_ring->next_to_use = first; - } - - return NETDEV_TX_OK; -} - -static netdev_tx_t igbvf_xmit_frame(struct sk_buff *skb, - struct net_device *netdev) -{ - struct igbvf_adapter *adapter = netdev_priv(netdev); - struct igbvf_ring *tx_ring; - - if (test_bit(__IGBVF_DOWN, &adapter->state)) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - - tx_ring = &adapter->tx_ring[0]; - - return igbvf_xmit_frame_ring_adv(skb, netdev, tx_ring); -} - -/** - * igbvf_tx_timeout - Respond to a Tx Hang - * @netdev: network interface device structure - **/ -static void igbvf_tx_timeout(struct net_device *netdev) -{ - struct igbvf_adapter *adapter = netdev_priv(netdev); - - /* Do the reset outside of interrupt context */ - adapter->tx_timeout_count++; - schedule_work(&adapter->reset_task); -} - -static void igbvf_reset_task(struct work_struct *work) -{ - struct igbvf_adapter *adapter; - adapter = container_of(work, struct igbvf_adapter, reset_task); - - igbvf_reinit_locked(adapter); -} - -/** - * igbvf_get_stats - Get System Network Statistics - * @netdev: network interface device structure - * - * Returns the address of the device statistics structure. - * The statistics are actually updated from the timer callback. - **/ -static struct net_device_stats *igbvf_get_stats(struct net_device *netdev) -{ - struct igbvf_adapter *adapter = netdev_priv(netdev); - - /* only return the current stats */ - return &adapter->net_stats; -} - -/** - * igbvf_change_mtu - Change the Maximum Transfer Unit - * @netdev: network interface device structure - * @new_mtu: new value for maximum frame size - * - * Returns 0 on success, negative on failure - **/ -static int igbvf_change_mtu(struct net_device *netdev, int new_mtu) -{ - struct igbvf_adapter *adapter = netdev_priv(netdev); - int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; - - if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { - dev_err(&adapter->pdev->dev, "Invalid MTU setting\n"); - return -EINVAL; - } - -#define MAX_STD_JUMBO_FRAME_SIZE 9234 - if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { - dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n"); - return -EINVAL; - } - - while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) - msleep(1); - /* igbvf_down has a dependency on max_frame_size */ - adapter->max_frame_size = max_frame; - if (netif_running(netdev)) - igbvf_down(adapter); - - /* - * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN - * means we reserve 2 more, this pushes us to allocate from the next - * larger slab size. - * i.e. RXBUFFER_2048 --> size-4096 slab - * However with the new *_jumbo_rx* routines, jumbo receives will use - * fragmented skbs - */ - - if (max_frame <= 1024) - adapter->rx_buffer_len = 1024; - else if (max_frame <= 2048) - adapter->rx_buffer_len = 2048; - else -#if (PAGE_SIZE / 2) > 16384 - adapter->rx_buffer_len = 16384; -#else - adapter->rx_buffer_len = PAGE_SIZE / 2; -#endif - - - /* adjust allocation if LPE protects us, and we aren't using SBP */ - if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || - (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) - adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + - ETH_FCS_LEN; - - dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", - netdev->mtu, new_mtu); - netdev->mtu = new_mtu; - - if (netif_running(netdev)) - igbvf_up(adapter); - else - igbvf_reset(adapter); - - clear_bit(__IGBVF_RESETTING, &adapter->state); - - return 0; -} - -static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) -{ - switch (cmd) { - default: - return -EOPNOTSUPP; - } -} - -static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct igbvf_adapter *adapter = netdev_priv(netdev); -#ifdef CONFIG_PM - int retval = 0; -#endif - - netif_device_detach(netdev); - - if (netif_running(netdev)) { - WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state)); - igbvf_down(adapter); - igbvf_free_irq(adapter); - } - -#ifdef CONFIG_PM - retval = pci_save_state(pdev); - if (retval) - return retval; -#endif - - pci_disable_device(pdev); - - return 0; -} - -#ifdef CONFIG_PM -static int igbvf_resume(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct igbvf_adapter *adapter = netdev_priv(netdev); - u32 err; - - pci_restore_state(pdev); - err = pci_enable_device_mem(pdev); - if (err) { - dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); - return err; - } - - pci_set_master(pdev); - - if (netif_running(netdev)) { - err = igbvf_request_irq(adapter); - if (err) - return err; - } - - igbvf_reset(adapter); - - if (netif_running(netdev)) - igbvf_up(adapter); - - netif_device_attach(netdev); - - return 0; -} -#endif - -static void igbvf_shutdown(struct pci_dev *pdev) -{ - igbvf_suspend(pdev, PMSG_SUSPEND); -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -/* - * Polling 'interrupt' - used by things like netconsole to send skbs - * without having to re-enable interrupts. It's not called while - * the interrupt routine is executing. - */ -static void igbvf_netpoll(struct net_device *netdev) -{ - struct igbvf_adapter *adapter = netdev_priv(netdev); - - disable_irq(adapter->pdev->irq); - - igbvf_clean_tx_irq(adapter->tx_ring); - - enable_irq(adapter->pdev->irq); -} -#endif - -/** - * igbvf_io_error_detected - called when PCI error is detected - * @pdev: Pointer to PCI device - * @state: The current pci connection state - * - * This function is called after a PCI bus error affecting - * this device has been detected. - */ -static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct igbvf_adapter *adapter = netdev_priv(netdev); - - netif_device_detach(netdev); - - if (state == pci_channel_io_perm_failure) - return PCI_ERS_RESULT_DISCONNECT; - - if (netif_running(netdev)) - igbvf_down(adapter); - pci_disable_device(pdev); - - /* Request a slot slot reset. */ - return PCI_ERS_RESULT_NEED_RESET; -} - -/** - * igbvf_io_slot_reset - called after the pci bus has been reset. - * @pdev: Pointer to PCI device - * - * Restart the card from scratch, as if from a cold-boot. Implementation - * resembles the first-half of the igbvf_resume routine. - */ -static pci_ers_result_t igbvf_io_slot_reset(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct igbvf_adapter *adapter = netdev_priv(netdev); - - if (pci_enable_device_mem(pdev)) { - dev_err(&pdev->dev, - "Cannot re-enable PCI device after reset.\n"); - return PCI_ERS_RESULT_DISCONNECT; - } - pci_set_master(pdev); - - igbvf_reset(adapter); - - return PCI_ERS_RESULT_RECOVERED; -} - -/** - * igbvf_io_resume - called when traffic can start flowing again. - * @pdev: Pointer to PCI device - * - * This callback is called when the error recovery driver tells us that - * its OK to resume normal operation. Implementation resembles the - * second-half of the igbvf_resume routine. - */ -static void igbvf_io_resume(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct igbvf_adapter *adapter = netdev_priv(netdev); - - if (netif_running(netdev)) { - if (igbvf_up(adapter)) { - dev_err(&pdev->dev, - "can't bring device back up after reset\n"); - return; - } - } - - netif_device_attach(netdev); -} - -static void igbvf_print_device_info(struct igbvf_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - - dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n"); - dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr); - dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type); -} - -static const struct net_device_ops igbvf_netdev_ops = { - .ndo_open = igbvf_open, - .ndo_stop = igbvf_close, - .ndo_start_xmit = igbvf_xmit_frame, - .ndo_get_stats = igbvf_get_stats, - .ndo_set_multicast_list = igbvf_set_multi, - .ndo_set_mac_address = igbvf_set_mac, - .ndo_change_mtu = igbvf_change_mtu, - .ndo_do_ioctl = igbvf_ioctl, - .ndo_tx_timeout = igbvf_tx_timeout, - .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = igbvf_netpoll, -#endif -}; - -/** - * igbvf_probe - Device Initialization Routine - * @pdev: PCI device information struct - * @ent: entry in igbvf_pci_tbl - * - * Returns 0 on success, negative on failure - * - * igbvf_probe initializes an adapter identified by a pci_dev structure. - * The OS initialization, configuring of the adapter private structure, - * and a hardware reset occur. - **/ -static int __devinit igbvf_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - struct net_device *netdev; - struct igbvf_adapter *adapter; - struct e1000_hw *hw; - const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data]; - - static int cards_found; - int err, pci_using_dac; - - err = pci_enable_device_mem(pdev); - if (err) - return err; - - pci_using_dac = 0; - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); - if (!err) { - err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); - if (!err) - pci_using_dac = 1; - } else { - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); - if (err) { - err = dma_set_coherent_mask(&pdev->dev, - DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, "No usable DMA " - "configuration, aborting\n"); - goto err_dma; - } - } - } - - err = pci_request_regions(pdev, igbvf_driver_name); - if (err) - goto err_pci_reg; - - pci_set_master(pdev); - - err = -ENOMEM; - netdev = alloc_etherdev(sizeof(struct igbvf_adapter)); - if (!netdev) - goto err_alloc_etherdev; - - SET_NETDEV_DEV(netdev, &pdev->dev); - - pci_set_drvdata(pdev, netdev); - adapter = netdev_priv(netdev); - hw = &adapter->hw; - adapter->netdev = netdev; - adapter->pdev = pdev; - adapter->ei = ei; - adapter->pba = ei->pba; - adapter->flags = ei->flags; - adapter->hw.back = adapter; - adapter->hw.mac.type = ei->mac; - adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; - - /* PCI config space info */ - - hw->vendor_id = pdev->vendor; - hw->device_id = pdev->device; - hw->subsystem_vendor_id = pdev->subsystem_vendor; - hw->subsystem_device_id = pdev->subsystem_device; - hw->revision_id = pdev->revision; - - err = -EIO; - adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); - - if (!adapter->hw.hw_addr) - goto err_ioremap; - - if (ei->get_variants) { - err = ei->get_variants(adapter); - if (err) - goto err_ioremap; - } - - /* setup adapter struct */ - err = igbvf_sw_init(adapter); - if (err) - goto err_sw_init; - - /* construct the net_device struct */ - netdev->netdev_ops = &igbvf_netdev_ops; - - igbvf_set_ethtool_ops(netdev); - netdev->watchdog_timeo = 5 * HZ; - strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); - - adapter->bd_number = cards_found++; - - netdev->features = NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_HW_VLAN_TX | - NETIF_F_HW_VLAN_RX | - NETIF_F_HW_VLAN_FILTER; - - netdev->features |= NETIF_F_IPV6_CSUM; - netdev->features |= NETIF_F_TSO; - netdev->features |= NETIF_F_TSO6; - - if (pci_using_dac) - netdev->features |= NETIF_F_HIGHDMA; - - netdev->vlan_features |= NETIF_F_TSO; - netdev->vlan_features |= NETIF_F_TSO6; - netdev->vlan_features |= NETIF_F_IP_CSUM; - netdev->vlan_features |= NETIF_F_IPV6_CSUM; - netdev->vlan_features |= NETIF_F_SG; - - /*reset the controller to put the device in a known good state */ - err = hw->mac.ops.reset_hw(hw); - if (err) { - dev_info(&pdev->dev, - "PF still in reset state, assigning new address." - " Is the PF interface up?\n"); - dev_hw_addr_random(adapter->netdev, hw->mac.addr); - } else { - err = hw->mac.ops.read_mac_addr(hw); - if (err) { - dev_err(&pdev->dev, "Error reading MAC address\n"); - goto err_hw_init; - } - } - - memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); - memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); - - if (!is_valid_ether_addr(netdev->perm_addr)) { - dev_err(&pdev->dev, "Invalid MAC Address: %pM\n", - netdev->dev_addr); - err = -EIO; - goto err_hw_init; - } - - setup_timer(&adapter->watchdog_timer, &igbvf_watchdog, - (unsigned long) adapter); - - INIT_WORK(&adapter->reset_task, igbvf_reset_task); - INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task); - - /* ring size defaults */ - adapter->rx_ring->count = 1024; - adapter->tx_ring->count = 1024; - - /* reset the hardware with the new settings */ - igbvf_reset(adapter); - - strcpy(netdev->name, "eth%d"); - err = register_netdev(netdev); - if (err) - goto err_hw_init; - - /* tell the stack to leave us alone until igbvf_open() is called */ - netif_carrier_off(netdev); - netif_stop_queue(netdev); - - igbvf_print_device_info(adapter); - - igbvf_initialize_last_counter_stats(adapter); - - return 0; - -err_hw_init: - kfree(adapter->tx_ring); - kfree(adapter->rx_ring); -err_sw_init: - igbvf_reset_interrupt_capability(adapter); - iounmap(adapter->hw.hw_addr); -err_ioremap: - free_netdev(netdev); -err_alloc_etherdev: - pci_release_regions(pdev); -err_pci_reg: -err_dma: - pci_disable_device(pdev); - return err; -} - -/** - * igbvf_remove - Device Removal Routine - * @pdev: PCI device information struct - * - * igbvf_remove is called by the PCI subsystem to alert the driver - * that it should release a PCI device. The could be caused by a - * Hot-Plug event, or because the driver is going to be removed from - * memory. - **/ -static void __devexit igbvf_remove(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct igbvf_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - - /* - * The watchdog timer may be rescheduled, so explicitly - * disable it from being rescheduled. - */ - set_bit(__IGBVF_DOWN, &adapter->state); - del_timer_sync(&adapter->watchdog_timer); - - cancel_work_sync(&adapter->reset_task); - cancel_work_sync(&adapter->watchdog_task); - - unregister_netdev(netdev); - - igbvf_reset_interrupt_capability(adapter); - - /* - * it is important to delete the napi struct prior to freeing the - * rx ring so that you do not end up with null pointer refs - */ - netif_napi_del(&adapter->rx_ring->napi); - kfree(adapter->tx_ring); - kfree(adapter->rx_ring); - - iounmap(hw->hw_addr); - if (hw->flash_address) - iounmap(hw->flash_address); - pci_release_regions(pdev); - - free_netdev(netdev); - - pci_disable_device(pdev); -} - -/* PCI Error Recovery (ERS) */ -static struct pci_error_handlers igbvf_err_handler = { - .error_detected = igbvf_io_error_detected, - .slot_reset = igbvf_io_slot_reset, - .resume = igbvf_io_resume, -}; - -static DEFINE_PCI_DEVICE_TABLE(igbvf_pci_tbl) = { - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_VF), board_i350_vf }, - { } /* terminate list */ -}; -MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl); - -/* PCI Device API Driver */ -static struct pci_driver igbvf_driver = { - .name = igbvf_driver_name, - .id_table = igbvf_pci_tbl, - .probe = igbvf_probe, - .remove = __devexit_p(igbvf_remove), -#ifdef CONFIG_PM - /* Power Management Hooks */ - .suspend = igbvf_suspend, - .resume = igbvf_resume, -#endif - .shutdown = igbvf_shutdown, - .err_handler = &igbvf_err_handler -}; - -/** - * igbvf_init_module - Driver Registration Routine - * - * igbvf_init_module is the first routine called when the driver is - * loaded. All it does is register with the PCI subsystem. - **/ -static int __init igbvf_init_module(void) -{ - int ret; - printk(KERN_INFO "%s - version %s\n", - igbvf_driver_string, igbvf_driver_version); - printk(KERN_INFO "%s\n", igbvf_copyright); - - ret = pci_register_driver(&igbvf_driver); - - return ret; -} -module_init(igbvf_init_module); - -/** - * igbvf_exit_module - Driver Exit Cleanup Routine - * - * igbvf_exit_module is called just before the driver is removed - * from memory. - **/ -static void __exit igbvf_exit_module(void) -{ - pci_unregister_driver(&igbvf_driver); -} -module_exit(igbvf_exit_module); - - -MODULE_AUTHOR("Intel Corporation, "); -MODULE_DESCRIPTION("Intel(R) 82576 Virtual Function Network Driver"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_VERSION); - -/* netdev.c */ diff --git a/drivers/net/igbvf/regs.h b/drivers/net/igbvf/regs.h deleted file mode 100644 index 77e18d3..0000000 --- a/drivers/net/igbvf/regs.h +++ /dev/null @@ -1,108 +0,0 @@ -/******************************************************************************* - - Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 2009 - 2010 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _E1000_REGS_H_ -#define _E1000_REGS_H_ - -#define E1000_CTRL 0x00000 /* Device Control - RW */ -#define E1000_STATUS 0x00008 /* Device Status - RO */ -#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ -#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ -#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) -#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ -#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ -#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ -#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ -#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ -#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ -#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ -/* - * Convenience macros - * - * Note: "_n" is the queue number of the register to be written to. - * - * Example usage: - * E1000_RDBAL_REG(current_rx_queue) - */ -#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ - (0x0C000 + ((_n) * 0x40))) -#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ - (0x0C004 + ((_n) * 0x40))) -#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ - (0x0C008 + ((_n) * 0x40))) -#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ - (0x0C00C + ((_n) * 0x40))) -#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ - (0x0C010 + ((_n) * 0x40))) -#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ - (0x0C018 + ((_n) * 0x40))) -#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ - (0x0C028 + ((_n) * 0x40))) -#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ - (0x0E000 + ((_n) * 0x40))) -#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ - (0x0E004 + ((_n) * 0x40))) -#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ - (0x0E008 + ((_n) * 0x40))) -#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ - (0x0E010 + ((_n) * 0x40))) -#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ - (0x0E018 + ((_n) * 0x40))) -#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ - (0x0E028 + ((_n) * 0x40))) -#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8)) -#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8)) -#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ - (0x054E0 + ((_i - 16) * 8))) -#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ - (0x054E4 + ((_i - 16) * 8))) - -/* Statistics registers */ -#define E1000_VFGPRC 0x00F10 -#define E1000_VFGORC 0x00F18 -#define E1000_VFMPRC 0x00F3C -#define E1000_VFGPTC 0x00F14 -#define E1000_VFGOTC 0x00F34 -#define E1000_VFGOTLBC 0x00F50 -#define E1000_VFGPTLBC 0x00F44 -#define E1000_VFGORLBC 0x00F48 -#define E1000_VFGPRLBC 0x00F40 - -/* These act per VF so an array friendly macro is used */ -#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) -#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) - -/* Define macros for handling registers */ -#define er32(reg) readl(hw->hw_addr + E1000_##reg) -#define ew32(reg, val) writel((val), hw->hw_addr + E1000_##reg) -#define array_er32(reg, offset) \ - readl(hw->hw_addr + E1000_##reg + (offset << 2)) -#define array_ew32(reg, offset, val) \ - writel((val), hw->hw_addr + E1000_##reg + (offset << 2)) -#define e1e_flush() er32(STATUS) - -#endif diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c deleted file mode 100644 index af3822f..0000000 --- a/drivers/net/igbvf/vf.c +++ /dev/null @@ -1,402 +0,0 @@ -/******************************************************************************* - - Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 2009 - 2010 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - - -#include "vf.h" - -static s32 e1000_check_for_link_vf(struct e1000_hw *hw); -static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, - u16 *duplex); -static s32 e1000_init_hw_vf(struct e1000_hw *hw); -static s32 e1000_reset_hw_vf(struct e1000_hw *hw); - -static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *, - u32, u32, u32); -static void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32); -static s32 e1000_read_mac_addr_vf(struct e1000_hw *); -static s32 e1000_set_vfta_vf(struct e1000_hw *, u16, bool); - -/** - * e1000_init_mac_params_vf - Inits MAC params - * @hw: pointer to the HW structure - **/ -static s32 e1000_init_mac_params_vf(struct e1000_hw *hw) -{ - struct e1000_mac_info *mac = &hw->mac; - - /* VF's have no MTA Registers - PF feature only */ - mac->mta_reg_count = 128; - /* VF's have no access to RAR entries */ - mac->rar_entry_count = 1; - - /* Function pointers */ - /* reset */ - mac->ops.reset_hw = e1000_reset_hw_vf; - /* hw initialization */ - mac->ops.init_hw = e1000_init_hw_vf; - /* check for link */ - mac->ops.check_for_link = e1000_check_for_link_vf; - /* link info */ - mac->ops.get_link_up_info = e1000_get_link_up_info_vf; - /* multicast address update */ - mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf; - /* set mac address */ - mac->ops.rar_set = e1000_rar_set_vf; - /* read mac address */ - mac->ops.read_mac_addr = e1000_read_mac_addr_vf; - /* set vlan filter table array */ - mac->ops.set_vfta = e1000_set_vfta_vf; - - return E1000_SUCCESS; -} - -/** - * e1000_init_function_pointers_vf - Inits function pointers - * @hw: pointer to the HW structure - **/ -void e1000_init_function_pointers_vf(struct e1000_hw *hw) -{ - hw->mac.ops.init_params = e1000_init_mac_params_vf; - hw->mbx.ops.init_params = e1000_init_mbx_params_vf; -} - -/** - * e1000_get_link_up_info_vf - Gets link info. - * @hw: pointer to the HW structure - * @speed: pointer to 16 bit value to store link speed. - * @duplex: pointer to 16 bit value to store duplex. - * - * Since we cannot read the PHY and get accurate link info, we must rely upon - * the status register's data which is often stale and inaccurate. - **/ -static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, - u16 *duplex) -{ - s32 status; - - status = er32(STATUS); - if (status & E1000_STATUS_SPEED_1000) - *speed = SPEED_1000; - else if (status & E1000_STATUS_SPEED_100) - *speed = SPEED_100; - else - *speed = SPEED_10; - - if (status & E1000_STATUS_FD) - *duplex = FULL_DUPLEX; - else - *duplex = HALF_DUPLEX; - - return E1000_SUCCESS; -} - -/** - * e1000_reset_hw_vf - Resets the HW - * @hw: pointer to the HW structure - * - * VF's provide a function level reset. This is done using bit 26 of ctrl_reg. - * This is all the reset we can perform on a VF. - **/ -static s32 e1000_reset_hw_vf(struct e1000_hw *hw) -{ - struct e1000_mbx_info *mbx = &hw->mbx; - u32 timeout = E1000_VF_INIT_TIMEOUT; - u32 ret_val = -E1000_ERR_MAC_INIT; - u32 msgbuf[3]; - u8 *addr = (u8 *)(&msgbuf[1]); - u32 ctrl; - - /* assert vf queue/interrupt reset */ - ctrl = er32(CTRL); - ew32(CTRL, ctrl | E1000_CTRL_RST); - - /* we cannot initialize while the RSTI / RSTD bits are asserted */ - while (!mbx->ops.check_for_rst(hw) && timeout) { - timeout--; - udelay(5); - } - - if (timeout) { - /* mailbox timeout can now become active */ - mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT; - - /* notify pf of vf reset completion */ - msgbuf[0] = E1000_VF_RESET; - mbx->ops.write_posted(hw, msgbuf, 1); - - msleep(10); - - /* set our "perm_addr" based on info provided by PF */ - ret_val = mbx->ops.read_posted(hw, msgbuf, 3); - if (!ret_val) { - if (msgbuf[0] == (E1000_VF_RESET | E1000_VT_MSGTYPE_ACK)) - memcpy(hw->mac.perm_addr, addr, 6); - else - ret_val = -E1000_ERR_MAC_INIT; - } - } - - return ret_val; -} - -/** - * e1000_init_hw_vf - Inits the HW - * @hw: pointer to the HW structure - * - * Not much to do here except clear the PF Reset indication if there is one. - **/ -static s32 e1000_init_hw_vf(struct e1000_hw *hw) -{ - /* attempt to set and restore our mac address */ - e1000_rar_set_vf(hw, hw->mac.addr, 0); - - return E1000_SUCCESS; -} - -/** - * e1000_hash_mc_addr_vf - Generate a multicast hash value - * @hw: pointer to the HW structure - * @mc_addr: pointer to a multicast address - * - * Generates a multicast address hash value which is used to determine - * the multicast filter table array address and new table value. See - * e1000_mta_set_generic() - **/ -static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr) -{ - u32 hash_value, hash_mask; - u8 bit_shift = 0; - - /* Register count multiplied by bits per register */ - hash_mask = (hw->mac.mta_reg_count * 32) - 1; - - /* - * The bit_shift is the number of left-shifts - * where 0xFF would still fall within the hash mask. - */ - while (hash_mask >> bit_shift != 0xFF) - bit_shift++; - - hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | - (((u16) mc_addr[5]) << bit_shift))); - - return hash_value; -} - -/** - * e1000_update_mc_addr_list_vf - Update Multicast addresses - * @hw: pointer to the HW structure - * @mc_addr_list: array of multicast addresses to program - * @mc_addr_count: number of multicast addresses to program - * @rar_used_count: the first RAR register free to program - * @rar_count: total number of supported Receive Address Registers - * - * Updates the Receive Address Registers and Multicast Table Array. - * The caller must have a packed mc_addr_list of multicast addresses. - * The parameter rar_count will usually be hw->mac.rar_entry_count - * unless there are workarounds that change this. - **/ -static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, - u8 *mc_addr_list, u32 mc_addr_count, - u32 rar_used_count, u32 rar_count) -{ - struct e1000_mbx_info *mbx = &hw->mbx; - u32 msgbuf[E1000_VFMAILBOX_SIZE]; - u16 *hash_list = (u16 *)&msgbuf[1]; - u32 hash_value; - u32 cnt, i; - - /* Each entry in the list uses 1 16 bit word. We have 30 - * 16 bit words available in our HW msg buffer (minus 1 for the - * msg type). That's 30 hash values if we pack 'em right. If - * there are more than 30 MC addresses to add then punt the - * extras for now and then add code to handle more than 30 later. - * It would be unusual for a server to request that many multi-cast - * addresses except for in large enterprise network environments. - */ - - cnt = (mc_addr_count > 30) ? 30 : mc_addr_count; - msgbuf[0] = E1000_VF_SET_MULTICAST; - msgbuf[0] |= cnt << E1000_VT_MSGINFO_SHIFT; - - for (i = 0; i < cnt; i++) { - hash_value = e1000_hash_mc_addr_vf(hw, mc_addr_list); - hash_list[i] = hash_value & 0x0FFFF; - mc_addr_list += ETH_ADDR_LEN; - } - - mbx->ops.write_posted(hw, msgbuf, E1000_VFMAILBOX_SIZE); -} - -/** - * e1000_set_vfta_vf - Set/Unset vlan filter table address - * @hw: pointer to the HW structure - * @vid: determines the vfta register and bit to set/unset - * @set: if true then set bit, else clear bit - **/ -static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set) -{ - struct e1000_mbx_info *mbx = &hw->mbx; - u32 msgbuf[2]; - s32 err; - - msgbuf[0] = E1000_VF_SET_VLAN; - msgbuf[1] = vid; - /* Setting the 8 bit field MSG INFO to true indicates "add" */ - if (set) - msgbuf[0] |= 1 << E1000_VT_MSGINFO_SHIFT; - - mbx->ops.write_posted(hw, msgbuf, 2); - - err = mbx->ops.read_posted(hw, msgbuf, 2); - - msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS; - - /* if nacked the vlan was rejected */ - if (!err && (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK))) - err = -E1000_ERR_MAC_INIT; - - return err; -} - -/** e1000_rlpml_set_vf - Set the maximum receive packet length - * @hw: pointer to the HW structure - * @max_size: value to assign to max frame size - **/ -void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size) -{ - struct e1000_mbx_info *mbx = &hw->mbx; - u32 msgbuf[2]; - - msgbuf[0] = E1000_VF_SET_LPE; - msgbuf[1] = max_size; - - mbx->ops.write_posted(hw, msgbuf, 2); -} - -/** - * e1000_rar_set_vf - set device MAC address - * @hw: pointer to the HW structure - * @addr: pointer to the receive address - * @index receive address array register - **/ -static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index) -{ - struct e1000_mbx_info *mbx = &hw->mbx; - u32 msgbuf[3]; - u8 *msg_addr = (u8 *)(&msgbuf[1]); - s32 ret_val; - - memset(msgbuf, 0, 12); - msgbuf[0] = E1000_VF_SET_MAC_ADDR; - memcpy(msg_addr, addr, 6); - ret_val = mbx->ops.write_posted(hw, msgbuf, 3); - - if (!ret_val) - ret_val = mbx->ops.read_posted(hw, msgbuf, 3); - - msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS; - - /* if nacked the address was rejected, use "perm_addr" */ - if (!ret_val && - (msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK))) - e1000_read_mac_addr_vf(hw); -} - -/** - * e1000_read_mac_addr_vf - Read device MAC address - * @hw: pointer to the HW structure - **/ -static s32 e1000_read_mac_addr_vf(struct e1000_hw *hw) -{ - int i; - - for (i = 0; i < ETH_ADDR_LEN; i++) - hw->mac.addr[i] = hw->mac.perm_addr[i]; - - return E1000_SUCCESS; -} - -/** - * e1000_check_for_link_vf - Check for link for a virtual interface - * @hw: pointer to the HW structure - * - * Checks to see if the underlying PF is still talking to the VF and - * if it is then it reports the link state to the hardware, otherwise - * it reports link down and returns an error. - **/ -static s32 e1000_check_for_link_vf(struct e1000_hw *hw) -{ - struct e1000_mbx_info *mbx = &hw->mbx; - struct e1000_mac_info *mac = &hw->mac; - s32 ret_val = E1000_SUCCESS; - u32 in_msg = 0; - - /* - * We only want to run this if there has been a rst asserted. - * in this case that could mean a link change, device reset, - * or a virtual function reset - */ - - /* If we were hit with a reset or timeout drop the link */ - if (!mbx->ops.check_for_rst(hw) || !mbx->timeout) - mac->get_link_status = true; - - if (!mac->get_link_status) - goto out; - - /* if link status is down no point in checking to see if pf is up */ - if (!(er32(STATUS) & E1000_STATUS_LU)) - goto out; - - /* if the read failed it could just be a mailbox collision, best wait - * until we are called again and don't report an error */ - if (mbx->ops.read(hw, &in_msg, 1)) - goto out; - - /* if incoming message isn't clear to send we are waiting on response */ - if (!(in_msg & E1000_VT_MSGTYPE_CTS)) { - /* message is not CTS and is NACK we must have lost CTS status */ - if (in_msg & E1000_VT_MSGTYPE_NACK) - ret_val = -E1000_ERR_MAC_INIT; - goto out; - } - - /* the pf is talking, if we timed out in the past we reinit */ - if (!mbx->timeout) { - ret_val = -E1000_ERR_MAC_INIT; - goto out; - } - - /* if we passed all the tests above then the link is up and we no - * longer need to check for link */ - mac->get_link_status = false; - -out: - return ret_val; -} - diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h deleted file mode 100644 index d7ed58f..0000000 --- a/drivers/net/igbvf/vf.h +++ /dev/null @@ -1,266 +0,0 @@ -/******************************************************************************* - - Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 2009 - 2010 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _E1000_VF_H_ -#define _E1000_VF_H_ - -#include -#include -#include -#include - -#include "regs.h" -#include "defines.h" - -struct e1000_hw; - -#define E1000_DEV_ID_82576_VF 0x10CA -#define E1000_DEV_ID_I350_VF 0x1520 -#define E1000_REVISION_0 0 -#define E1000_REVISION_1 1 -#define E1000_REVISION_2 2 -#define E1000_REVISION_3 3 -#define E1000_REVISION_4 4 - -#define E1000_FUNC_0 0 -#define E1000_FUNC_1 1 - -/* - * Receive Address Register Count - * Number of high/low register pairs in the RAR. The RAR (Receive Address - * Registers) holds the directed and multicast addresses that we monitor. - * These entries are also used for MAC-based filtering. - */ -#define E1000_RAR_ENTRIES_VF 1 - -/* Receive Descriptor - Advanced */ -union e1000_adv_rx_desc { - struct { - u64 pkt_addr; /* Packet buffer address */ - u64 hdr_addr; /* Header buffer address */ - } read; - struct { - struct { - union { - u32 data; - struct { - u16 pkt_info; /* RSS/Packet type */ - u16 hdr_info; /* Split Header, - * hdr buffer length */ - } hs_rss; - } lo_dword; - union { - u32 rss; /* RSS Hash */ - struct { - u16 ip_id; /* IP id */ - u16 csum; /* Packet Checksum */ - } csum_ip; - } hi_dword; - } lower; - struct { - u32 status_error; /* ext status/error */ - u16 length; /* Packet length */ - u16 vlan; /* VLAN tag */ - } upper; - } wb; /* writeback */ -}; - -#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 -#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 - -/* Transmit Descriptor - Advanced */ -union e1000_adv_tx_desc { - struct { - u64 buffer_addr; /* Address of descriptor's data buf */ - u32 cmd_type_len; - u32 olinfo_status; - } read; - struct { - u64 rsvd; /* Reserved */ - u32 nxtseq_seed; - u32 status; - } wb; -}; - -/* Adv Transmit Descriptor Config Masks */ -#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ -#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ -#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ -#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ -#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ -#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ -#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ -#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ -#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ - -/* Context descriptors */ -struct e1000_adv_tx_context_desc { - u32 vlan_macip_lens; - u32 seqnum_seed; - u32 type_tucmd_mlhl; - u32 mss_l4len_idx; -}; - -#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ -#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ -#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ -#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ -#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ - -enum e1000_mac_type { - e1000_undefined = 0, - e1000_vfadapt, - e1000_vfadapt_i350, - e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ -}; - -struct e1000_vf_stats { - u64 base_gprc; - u64 base_gptc; - u64 base_gorc; - u64 base_gotc; - u64 base_mprc; - u64 base_gotlbc; - u64 base_gptlbc; - u64 base_gorlbc; - u64 base_gprlbc; - - u32 last_gprc; - u32 last_gptc; - u32 last_gorc; - u32 last_gotc; - u32 last_mprc; - u32 last_gotlbc; - u32 last_gptlbc; - u32 last_gorlbc; - u32 last_gprlbc; - - u64 gprc; - u64 gptc; - u64 gorc; - u64 gotc; - u64 mprc; - u64 gotlbc; - u64 gptlbc; - u64 gorlbc; - u64 gprlbc; -}; - -#include "mbx.h" - -struct e1000_mac_operations { - /* Function pointers for the MAC. */ - s32 (*init_params)(struct e1000_hw *); - s32 (*check_for_link)(struct e1000_hw *); - void (*clear_vfta)(struct e1000_hw *); - s32 (*get_bus_info)(struct e1000_hw *); - s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); - void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32, u32, u32); - s32 (*reset_hw)(struct e1000_hw *); - s32 (*init_hw)(struct e1000_hw *); - s32 (*setup_link)(struct e1000_hw *); - void (*write_vfta)(struct e1000_hw *, u32, u32); - void (*mta_set)(struct e1000_hw *, u32); - void (*rar_set)(struct e1000_hw *, u8*, u32); - s32 (*read_mac_addr)(struct e1000_hw *); - s32 (*set_vfta)(struct e1000_hw *, u16, bool); -}; - -struct e1000_mac_info { - struct e1000_mac_operations ops; - u8 addr[6]; - u8 perm_addr[6]; - - enum e1000_mac_type type; - - u16 mta_reg_count; - u16 rar_entry_count; - - bool get_link_status; -}; - -struct e1000_mbx_operations { - s32 (*init_params)(struct e1000_hw *hw); - s32 (*read)(struct e1000_hw *, u32 *, u16); - s32 (*write)(struct e1000_hw *, u32 *, u16); - s32 (*read_posted)(struct e1000_hw *, u32 *, u16); - s32 (*write_posted)(struct e1000_hw *, u32 *, u16); - s32 (*check_for_msg)(struct e1000_hw *); - s32 (*check_for_ack)(struct e1000_hw *); - s32 (*check_for_rst)(struct e1000_hw *); -}; - -struct e1000_mbx_stats { - u32 msgs_tx; - u32 msgs_rx; - - u32 acks; - u32 reqs; - u32 rsts; -}; - -struct e1000_mbx_info { - struct e1000_mbx_operations ops; - struct e1000_mbx_stats stats; - u32 timeout; - u32 usec_delay; - u16 size; -}; - -struct e1000_dev_spec_vf { - u32 vf_number; - u32 v2p_mailbox; -}; - -struct e1000_hw { - void *back; - - u8 __iomem *hw_addr; - u8 __iomem *flash_address; - unsigned long io_base; - - struct e1000_mac_info mac; - struct e1000_mbx_info mbx; - - union { - struct e1000_dev_spec_vf vf; - } dev_spec; - - u16 device_id; - u16 subsystem_vendor_id; - u16 subsystem_device_id; - u16 vendor_id; - - u8 revision_id; -}; - -/* These functions must be implemented by drivers */ -void e1000_rlpml_set_vf(struct e1000_hw *, u16); -void e1000_init_function_pointers_vf(struct e1000_hw *hw); - - -#endif /* _E1000_VF_H_ */ diff --git a/drivers/net/ixgb/Makefile b/drivers/net/ixgb/Makefile deleted file mode 100644 index 0b20c5e..0000000 --- a/drivers/net/ixgb/Makefile +++ /dev/null @@ -1,35 +0,0 @@ -################################################################################ -# -# Intel PRO/10GbE Linux driver -# Copyright(c) 1999 - 2008 Intel Corporation. -# -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -# -# The full GNU General Public License is included in this distribution in -# the file called "COPYING". -# -# Contact Information: -# Linux NICS -# e1000-devel Mailing List -# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -# -################################################################################ - -# -# Makefile for the Intel(R) PRO/10GbE ethernet driver -# - -obj-$(CONFIG_IXGB) += ixgb.o - -ixgb-objs := ixgb_main.o ixgb_hw.o ixgb_ee.o ixgb_ethtool.o ixgb_param.o diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h deleted file mode 100644 index 49e8408..0000000 --- a/drivers/net/ixgb/ixgb.h +++ /dev/null @@ -1,217 +0,0 @@ -/******************************************************************************* - - Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGB_H_ -#define _IXGB_H_ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#define BAR_0 0 -#define BAR_1 1 -#define BAR_5 5 - -struct ixgb_adapter; -#include "ixgb_hw.h" -#include "ixgb_ee.h" -#include "ixgb_ids.h" - -#define PFX "ixgb: " - -#ifdef _DEBUG_DRIVER_ -#define IXGB_DBG(fmt, args...) printk(KERN_DEBUG PFX fmt, ##args) -#else -#define IXGB_DBG(fmt, args...) \ -do { \ - if (0) \ - printk(KERN_DEBUG PFX fmt, ##args); \ -} while (0) -#endif - -/* TX/RX descriptor defines */ -#define DEFAULT_TXD 256 -#define MAX_TXD 4096 -#define MIN_TXD 64 - -/* hardware cannot reliably support more than 512 descriptors owned by - * hardware descriptor cache otherwise an unreliable ring under heavy - * receive load may result */ -#define DEFAULT_RXD 512 -#define MAX_RXD 512 -#define MIN_RXD 64 - -/* Supported Rx Buffer Sizes */ -#define IXGB_RXBUFFER_2048 2048 -#define IXGB_RXBUFFER_4096 4096 -#define IXGB_RXBUFFER_8192 8192 -#define IXGB_RXBUFFER_16384 16384 - -/* How many Rx Buffers do we bundle into one write to the hardware ? */ -#define IXGB_RX_BUFFER_WRITE 8 /* Must be power of 2 */ - -/* wrapper around a pointer to a socket buffer, - * so a DMA handle can be stored along with the buffer */ -struct ixgb_buffer { - struct sk_buff *skb; - dma_addr_t dma; - unsigned long time_stamp; - u16 length; - u16 next_to_watch; - u16 mapped_as_page; -}; - -struct ixgb_desc_ring { - /* pointer to the descriptor ring memory */ - void *desc; - /* physical address of the descriptor ring */ - dma_addr_t dma; - /* length of descriptor ring in bytes */ - unsigned int size; - /* number of descriptors in the ring */ - unsigned int count; - /* next descriptor to associate a buffer with */ - unsigned int next_to_use; - /* next descriptor to check for DD status bit */ - unsigned int next_to_clean; - /* array of buffer information structs */ - struct ixgb_buffer *buffer_info; -}; - -#define IXGB_DESC_UNUSED(R) \ - ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ - (R)->next_to_clean - (R)->next_to_use - 1) - -#define IXGB_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) -#define IXGB_RX_DESC(R, i) IXGB_GET_DESC(R, i, ixgb_rx_desc) -#define IXGB_TX_DESC(R, i) IXGB_GET_DESC(R, i, ixgb_tx_desc) -#define IXGB_CONTEXT_DESC(R, i) IXGB_GET_DESC(R, i, ixgb_context_desc) - -/* board specific private data structure */ - -struct ixgb_adapter { - struct timer_list watchdog_timer; - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; - u32 bd_number; - u32 rx_buffer_len; - u32 part_num; - u16 link_speed; - u16 link_duplex; - struct work_struct tx_timeout_task; - - /* TX */ - struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp; - unsigned int restart_queue; - unsigned long timeo_start; - u32 tx_cmd_type; - u64 hw_csum_tx_good; - u64 hw_csum_tx_error; - u32 tx_int_delay; - u32 tx_timeout_count; - bool tx_int_delay_enable; - bool detect_tx_hung; - - /* RX */ - struct ixgb_desc_ring rx_ring; - u64 hw_csum_rx_error; - u64 hw_csum_rx_good; - u32 rx_int_delay; - bool rx_csum; - - /* OS defined structs */ - struct napi_struct napi; - struct net_device *netdev; - struct pci_dev *pdev; - - /* structs defined in ixgb_hw.h */ - struct ixgb_hw hw; - u16 msg_enable; - struct ixgb_hw_stats stats; - u32 alloc_rx_buff_failed; - bool have_msi; - unsigned long flags; -}; - -enum ixgb_state_t { - /* TBD - __IXGB_TESTING, - __IXGB_RESETTING, - */ - __IXGB_DOWN -}; - -/* Exported from other modules */ -extern void ixgb_check_options(struct ixgb_adapter *adapter); -extern void ixgb_set_ethtool_ops(struct net_device *netdev); -extern char ixgb_driver_name[]; -extern const char ixgb_driver_version[]; - -extern int ixgb_up(struct ixgb_adapter *adapter); -extern void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog); -extern void ixgb_reset(struct ixgb_adapter *adapter); -extern int ixgb_setup_rx_resources(struct ixgb_adapter *adapter); -extern int ixgb_setup_tx_resources(struct ixgb_adapter *adapter); -extern void ixgb_free_rx_resources(struct ixgb_adapter *adapter); -extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter); -extern void ixgb_update_stats(struct ixgb_adapter *adapter); - - -#endif /* _IXGB_H_ */ diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c deleted file mode 100644 index 38b362b..0000000 --- a/drivers/net/ixgb/ixgb_ee.c +++ /dev/null @@ -1,607 +0,0 @@ -/******************************************************************************* - - Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include "ixgb_hw.h" -#include "ixgb_ee.h" -/* Local prototypes */ -static u16 ixgb_shift_in_bits(struct ixgb_hw *hw); - -static void ixgb_shift_out_bits(struct ixgb_hw *hw, - u16 data, - u16 count); -static void ixgb_standby_eeprom(struct ixgb_hw *hw); - -static bool ixgb_wait_eeprom_command(struct ixgb_hw *hw); - -static void ixgb_cleanup_eeprom(struct ixgb_hw *hw); - -/****************************************************************************** - * Raises the EEPROM's clock input. - * - * hw - Struct containing variables accessed by shared code - * eecd_reg - EECD's current value - *****************************************************************************/ -static void -ixgb_raise_clock(struct ixgb_hw *hw, - u32 *eecd_reg) -{ - /* Raise the clock input to the EEPROM (by setting the SK bit), and then - * wait 50 microseconds. - */ - *eecd_reg = *eecd_reg | IXGB_EECD_SK; - IXGB_WRITE_REG(hw, EECD, *eecd_reg); - IXGB_WRITE_FLUSH(hw); - udelay(50); -} - -/****************************************************************************** - * Lowers the EEPROM's clock input. - * - * hw - Struct containing variables accessed by shared code - * eecd_reg - EECD's current value - *****************************************************************************/ -static void -ixgb_lower_clock(struct ixgb_hw *hw, - u32 *eecd_reg) -{ - /* Lower the clock input to the EEPROM (by clearing the SK bit), and then - * wait 50 microseconds. - */ - *eecd_reg = *eecd_reg & ~IXGB_EECD_SK; - IXGB_WRITE_REG(hw, EECD, *eecd_reg); - IXGB_WRITE_FLUSH(hw); - udelay(50); -} - -/****************************************************************************** - * Shift data bits out to the EEPROM. - * - * hw - Struct containing variables accessed by shared code - * data - data to send to the EEPROM - * count - number of bits to shift out - *****************************************************************************/ -static void -ixgb_shift_out_bits(struct ixgb_hw *hw, - u16 data, - u16 count) -{ - u32 eecd_reg; - u32 mask; - - /* We need to shift "count" bits out to the EEPROM. So, value in the - * "data" parameter will be shifted out to the EEPROM one bit at a time. - * In order to do this, "data" must be broken down into bits. - */ - mask = 0x01 << (count - 1); - eecd_reg = IXGB_READ_REG(hw, EECD); - eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI); - do { - /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1", - * and then raising and then lowering the clock (the SK bit controls - * the clock input to the EEPROM). A "0" is shifted out to the EEPROM - * by setting "DI" to "0" and then raising and then lowering the clock. - */ - eecd_reg &= ~IXGB_EECD_DI; - - if (data & mask) - eecd_reg |= IXGB_EECD_DI; - - IXGB_WRITE_REG(hw, EECD, eecd_reg); - IXGB_WRITE_FLUSH(hw); - - udelay(50); - - ixgb_raise_clock(hw, &eecd_reg); - ixgb_lower_clock(hw, &eecd_reg); - - mask = mask >> 1; - - } while (mask); - - /* We leave the "DI" bit set to "0" when we leave this routine. */ - eecd_reg &= ~IXGB_EECD_DI; - IXGB_WRITE_REG(hw, EECD, eecd_reg); -} - -/****************************************************************************** - * Shift data bits in from the EEPROM - * - * hw - Struct containing variables accessed by shared code - *****************************************************************************/ -static u16 -ixgb_shift_in_bits(struct ixgb_hw *hw) -{ - u32 eecd_reg; - u32 i; - u16 data; - - /* In order to read a register from the EEPROM, we need to shift 16 bits - * in from the EEPROM. Bits are "shifted in" by raising the clock input to - * the EEPROM (setting the SK bit), and then reading the value of the "DO" - * bit. During this "shifting in" process the "DI" bit should always be - * clear.. - */ - - eecd_reg = IXGB_READ_REG(hw, EECD); - - eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI); - data = 0; - - for (i = 0; i < 16; i++) { - data = data << 1; - ixgb_raise_clock(hw, &eecd_reg); - - eecd_reg = IXGB_READ_REG(hw, EECD); - - eecd_reg &= ~(IXGB_EECD_DI); - if (eecd_reg & IXGB_EECD_DO) - data |= 1; - - ixgb_lower_clock(hw, &eecd_reg); - } - - return data; -} - -/****************************************************************************** - * Prepares EEPROM for access - * - * hw - Struct containing variables accessed by shared code - * - * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This - * function should be called before issuing a command to the EEPROM. - *****************************************************************************/ -static void -ixgb_setup_eeprom(struct ixgb_hw *hw) -{ - u32 eecd_reg; - - eecd_reg = IXGB_READ_REG(hw, EECD); - - /* Clear SK and DI */ - eecd_reg &= ~(IXGB_EECD_SK | IXGB_EECD_DI); - IXGB_WRITE_REG(hw, EECD, eecd_reg); - - /* Set CS */ - eecd_reg |= IXGB_EECD_CS; - IXGB_WRITE_REG(hw, EECD, eecd_reg); -} - -/****************************************************************************** - * Returns EEPROM to a "standby" state - * - * hw - Struct containing variables accessed by shared code - *****************************************************************************/ -static void -ixgb_standby_eeprom(struct ixgb_hw *hw) -{ - u32 eecd_reg; - - eecd_reg = IXGB_READ_REG(hw, EECD); - - /* Deselect EEPROM */ - eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_SK); - IXGB_WRITE_REG(hw, EECD, eecd_reg); - IXGB_WRITE_FLUSH(hw); - udelay(50); - - /* Clock high */ - eecd_reg |= IXGB_EECD_SK; - IXGB_WRITE_REG(hw, EECD, eecd_reg); - IXGB_WRITE_FLUSH(hw); - udelay(50); - - /* Select EEPROM */ - eecd_reg |= IXGB_EECD_CS; - IXGB_WRITE_REG(hw, EECD, eecd_reg); - IXGB_WRITE_FLUSH(hw); - udelay(50); - - /* Clock low */ - eecd_reg &= ~IXGB_EECD_SK; - IXGB_WRITE_REG(hw, EECD, eecd_reg); - IXGB_WRITE_FLUSH(hw); - udelay(50); -} - -/****************************************************************************** - * Raises then lowers the EEPROM's clock pin - * - * hw - Struct containing variables accessed by shared code - *****************************************************************************/ -static void -ixgb_clock_eeprom(struct ixgb_hw *hw) -{ - u32 eecd_reg; - - eecd_reg = IXGB_READ_REG(hw, EECD); - - /* Rising edge of clock */ - eecd_reg |= IXGB_EECD_SK; - IXGB_WRITE_REG(hw, EECD, eecd_reg); - IXGB_WRITE_FLUSH(hw); - udelay(50); - - /* Falling edge of clock */ - eecd_reg &= ~IXGB_EECD_SK; - IXGB_WRITE_REG(hw, EECD, eecd_reg); - IXGB_WRITE_FLUSH(hw); - udelay(50); -} - -/****************************************************************************** - * Terminates a command by lowering the EEPROM's chip select pin - * - * hw - Struct containing variables accessed by shared code - *****************************************************************************/ -static void -ixgb_cleanup_eeprom(struct ixgb_hw *hw) -{ - u32 eecd_reg; - - eecd_reg = IXGB_READ_REG(hw, EECD); - - eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_DI); - - IXGB_WRITE_REG(hw, EECD, eecd_reg); - - ixgb_clock_eeprom(hw); -} - -/****************************************************************************** - * Waits for the EEPROM to finish the current command. - * - * hw - Struct containing variables accessed by shared code - * - * The command is done when the EEPROM's data out pin goes high. - * - * Returns: - * true: EEPROM data pin is high before timeout. - * false: Time expired. - *****************************************************************************/ -static bool -ixgb_wait_eeprom_command(struct ixgb_hw *hw) -{ - u32 eecd_reg; - u32 i; - - /* Toggle the CS line. This in effect tells to EEPROM to actually execute - * the command in question. - */ - ixgb_standby_eeprom(hw); - - /* Now read DO repeatedly until is high (equal to '1'). The EEPROM will - * signal that the command has been completed by raising the DO signal. - * If DO does not go high in 10 milliseconds, then error out. - */ - for (i = 0; i < 200; i++) { - eecd_reg = IXGB_READ_REG(hw, EECD); - - if (eecd_reg & IXGB_EECD_DO) - return true; - - udelay(50); - } - ASSERT(0); - return false; -} - -/****************************************************************************** - * Verifies that the EEPROM has a valid checksum - * - * hw - Struct containing variables accessed by shared code - * - * Reads the first 64 16 bit words of the EEPROM and sums the values read. - * If the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is - * valid. - * - * Returns: - * true: Checksum is valid - * false: Checksum is not valid. - *****************************************************************************/ -bool -ixgb_validate_eeprom_checksum(struct ixgb_hw *hw) -{ - u16 checksum = 0; - u16 i; - - for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) - checksum += ixgb_read_eeprom(hw, i); - - if (checksum == (u16) EEPROM_SUM) - return true; - else - return false; -} - -/****************************************************************************** - * Calculates the EEPROM checksum and writes it to the EEPROM - * - * hw - Struct containing variables accessed by shared code - * - * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA. - * Writes the difference to word offset 63 of the EEPROM. - *****************************************************************************/ -void -ixgb_update_eeprom_checksum(struct ixgb_hw *hw) -{ - u16 checksum = 0; - u16 i; - - for (i = 0; i < EEPROM_CHECKSUM_REG; i++) - checksum += ixgb_read_eeprom(hw, i); - - checksum = (u16) EEPROM_SUM - checksum; - - ixgb_write_eeprom(hw, EEPROM_CHECKSUM_REG, checksum); -} - -/****************************************************************************** - * Writes a 16 bit word to a given offset in the EEPROM. - * - * hw - Struct containing variables accessed by shared code - * reg - offset within the EEPROM to be written to - * data - 16 bit word to be written to the EEPROM - * - * If ixgb_update_eeprom_checksum is not called after this function, the - * EEPROM will most likely contain an invalid checksum. - * - *****************************************************************************/ -void -ixgb_write_eeprom(struct ixgb_hw *hw, u16 offset, u16 data) -{ - struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; - - /* Prepare the EEPROM for writing */ - ixgb_setup_eeprom(hw); - - /* Send the 9-bit EWEN (write enable) command to the EEPROM (5-bit opcode - * plus 4-bit dummy). This puts the EEPROM into write/erase mode. - */ - ixgb_shift_out_bits(hw, EEPROM_EWEN_OPCODE, 5); - ixgb_shift_out_bits(hw, 0, 4); - - /* Prepare the EEPROM */ - ixgb_standby_eeprom(hw); - - /* Send the Write command (3-bit opcode + 6-bit addr) */ - ixgb_shift_out_bits(hw, EEPROM_WRITE_OPCODE, 3); - ixgb_shift_out_bits(hw, offset, 6); - - /* Send the data */ - ixgb_shift_out_bits(hw, data, 16); - - ixgb_wait_eeprom_command(hw); - - /* Recover from write */ - ixgb_standby_eeprom(hw); - - /* Send the 9-bit EWDS (write disable) command to the EEPROM (5-bit - * opcode plus 4-bit dummy). This takes the EEPROM out of write/erase - * mode. - */ - ixgb_shift_out_bits(hw, EEPROM_EWDS_OPCODE, 5); - ixgb_shift_out_bits(hw, 0, 4); - - /* Done with writing */ - ixgb_cleanup_eeprom(hw); - - /* clear the init_ctrl_reg_1 to signify that the cache is invalidated */ - ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR); -} - -/****************************************************************************** - * Reads a 16 bit word from the EEPROM. - * - * hw - Struct containing variables accessed by shared code - * offset - offset of 16 bit word in the EEPROM to read - * - * Returns: - * The 16-bit value read from the eeprom - *****************************************************************************/ -u16 -ixgb_read_eeprom(struct ixgb_hw *hw, - u16 offset) -{ - u16 data; - - /* Prepare the EEPROM for reading */ - ixgb_setup_eeprom(hw); - - /* Send the READ command (opcode + addr) */ - ixgb_shift_out_bits(hw, EEPROM_READ_OPCODE, 3); - /* - * We have a 64 word EEPROM, there are 6 address bits - */ - ixgb_shift_out_bits(hw, offset, 6); - - /* Read the data */ - data = ixgb_shift_in_bits(hw); - - /* End this read operation */ - ixgb_standby_eeprom(hw); - - return data; -} - -/****************************************************************************** - * Reads eeprom and stores data in shared structure. - * Validates eeprom checksum and eeprom signature. - * - * hw - Struct containing variables accessed by shared code - * - * Returns: - * true: if eeprom read is successful - * false: otherwise. - *****************************************************************************/ -bool -ixgb_get_eeprom_data(struct ixgb_hw *hw) -{ - u16 i; - u16 checksum = 0; - struct ixgb_ee_map_type *ee_map; - - ENTER(); - - ee_map = (struct ixgb_ee_map_type *)hw->eeprom; - - pr_debug("Reading eeprom data\n"); - for (i = 0; i < IXGB_EEPROM_SIZE ; i++) { - u16 ee_data; - ee_data = ixgb_read_eeprom(hw, i); - checksum += ee_data; - hw->eeprom[i] = cpu_to_le16(ee_data); - } - - if (checksum != (u16) EEPROM_SUM) { - pr_debug("Checksum invalid\n"); - /* clear the init_ctrl_reg_1 to signify that the cache is - * invalidated */ - ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR); - return false; - } - - if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK)) - != cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) { - pr_debug("Signature invalid\n"); - return false; - } - - return true; -} - -/****************************************************************************** - * Local function to check if the eeprom signature is good - * If the eeprom signature is good, calls ixgb)get_eeprom_data. - * - * hw - Struct containing variables accessed by shared code - * - * Returns: - * true: eeprom signature was good and the eeprom read was successful - * false: otherwise. - ******************************************************************************/ -static bool -ixgb_check_and_get_eeprom_data (struct ixgb_hw* hw) -{ - struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; - - if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK)) - == cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) { - return true; - } else { - return ixgb_get_eeprom_data(hw); - } -} - -/****************************************************************************** - * return a word from the eeprom - * - * hw - Struct containing variables accessed by shared code - * index - Offset of eeprom word - * - * Returns: - * Word at indexed offset in eeprom, if valid, 0 otherwise. - ******************************************************************************/ -__le16 -ixgb_get_eeprom_word(struct ixgb_hw *hw, u16 index) -{ - - if ((index < IXGB_EEPROM_SIZE) && - (ixgb_check_and_get_eeprom_data(hw) == true)) { - return hw->eeprom[index]; - } - - return 0; -} - -/****************************************************************************** - * return the mac address from EEPROM - * - * hw - Struct containing variables accessed by shared code - * mac_addr - Ethernet Address if EEPROM contents are valid, 0 otherwise - * - * Returns: None. - ******************************************************************************/ -void -ixgb_get_ee_mac_addr(struct ixgb_hw *hw, - u8 *mac_addr) -{ - int i; - struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; - - ENTER(); - - if (ixgb_check_and_get_eeprom_data(hw) == true) { - for (i = 0; i < IXGB_ETH_LENGTH_OF_ADDRESS; i++) { - mac_addr[i] = ee_map->mac_addr[i]; - } - pr_debug("eeprom mac address = %pM\n", mac_addr); - } -} - - -/****************************************************************************** - * return the Printed Board Assembly number from EEPROM - * - * hw - Struct containing variables accessed by shared code - * - * Returns: - * PBA number if EEPROM contents are valid, 0 otherwise - ******************************************************************************/ -u32 -ixgb_get_ee_pba_number(struct ixgb_hw *hw) -{ - if (ixgb_check_and_get_eeprom_data(hw) == true) - return le16_to_cpu(hw->eeprom[EEPROM_PBA_1_2_REG]) - | (le16_to_cpu(hw->eeprom[EEPROM_PBA_3_4_REG])<<16); - - return 0; -} - - -/****************************************************************************** - * return the Device Id from EEPROM - * - * hw - Struct containing variables accessed by shared code - * - * Returns: - * Device Id if EEPROM contents are valid, 0 otherwise - ******************************************************************************/ -u16 -ixgb_get_ee_device_id(struct ixgb_hw *hw) -{ - struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; - - if (ixgb_check_and_get_eeprom_data(hw) == true) - return le16_to_cpu(ee_map->device_id); - - return 0; -} - diff --git a/drivers/net/ixgb/ixgb_ee.h b/drivers/net/ixgb/ixgb_ee.h deleted file mode 100644 index 7ea1265..0000000 --- a/drivers/net/ixgb/ixgb_ee.h +++ /dev/null @@ -1,106 +0,0 @@ -/******************************************************************************* - - Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGB_EE_H_ -#define _IXGB_EE_H_ - -#define IXGB_EEPROM_SIZE 64 /* Size in words */ - -#define IXGB_ETH_LENGTH_OF_ADDRESS 6 - -/* EEPROM Commands */ -#define EEPROM_READ_OPCODE 0x6 /* EEPROM read opcode */ -#define EEPROM_WRITE_OPCODE 0x5 /* EEPROM write opcode */ -#define EEPROM_ERASE_OPCODE 0x7 /* EEPROM erase opcode */ -#define EEPROM_EWEN_OPCODE 0x13 /* EEPROM erase/write enable */ -#define EEPROM_EWDS_OPCODE 0x10 /* EEPROM erase/write disable */ - -/* EEPROM MAP (Word Offsets) */ -#define EEPROM_IA_1_2_REG 0x0000 -#define EEPROM_IA_3_4_REG 0x0001 -#define EEPROM_IA_5_6_REG 0x0002 -#define EEPROM_COMPATIBILITY_REG 0x0003 -#define EEPROM_PBA_1_2_REG 0x0008 -#define EEPROM_PBA_3_4_REG 0x0009 -#define EEPROM_INIT_CONTROL1_REG 0x000A -#define EEPROM_SUBSYS_ID_REG 0x000B -#define EEPROM_SUBVEND_ID_REG 0x000C -#define EEPROM_DEVICE_ID_REG 0x000D -#define EEPROM_VENDOR_ID_REG 0x000E -#define EEPROM_INIT_CONTROL2_REG 0x000F -#define EEPROM_SWDPINS_REG 0x0020 -#define EEPROM_CIRCUIT_CTRL_REG 0x0021 -#define EEPROM_D0_D3_POWER_REG 0x0022 -#define EEPROM_FLASH_VERSION 0x0032 -#define EEPROM_CHECKSUM_REG 0x003F - -/* Mask bits for fields in Word 0x0a of the EEPROM */ - -#define EEPROM_ICW1_SIGNATURE_MASK 0xC000 -#define EEPROM_ICW1_SIGNATURE_VALID 0x4000 -#define EEPROM_ICW1_SIGNATURE_CLEAR 0x0000 - -/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */ -#define EEPROM_SUM 0xBABA - -/* EEPROM Map Sizes (Byte Counts) */ -#define PBA_SIZE 4 - -/* EEPROM Map defines (WORD OFFSETS)*/ - -/* EEPROM structure */ -struct ixgb_ee_map_type { - u8 mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS]; - __le16 compatibility; - __le16 reserved1[4]; - __le32 pba_number; - __le16 init_ctrl_reg_1; - __le16 subsystem_id; - __le16 subvendor_id; - __le16 device_id; - __le16 vendor_id; - __le16 init_ctrl_reg_2; - __le16 oem_reserved[16]; - __le16 swdpins_reg; - __le16 circuit_ctrl_reg; - u8 d3_power; - u8 d0_power; - __le16 reserved2[28]; - __le16 checksum; -}; - -/* EEPROM Functions */ -u16 ixgb_read_eeprom(struct ixgb_hw *hw, u16 reg); - -bool ixgb_validate_eeprom_checksum(struct ixgb_hw *hw); - -void ixgb_update_eeprom_checksum(struct ixgb_hw *hw); - -void ixgb_write_eeprom(struct ixgb_hw *hw, u16 reg, u16 data); - -#endif /* IXGB_EE_H */ diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c deleted file mode 100644 index 6da890b..0000000 --- a/drivers/net/ixgb/ixgb_ethtool.c +++ /dev/null @@ -1,758 +0,0 @@ -/******************************************************************************* - - Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -/* ethtool support for ixgb */ - -#include "ixgb.h" - -#include - -#define IXGB_ALL_RAR_ENTRIES 16 - -enum {NETDEV_STATS, IXGB_STATS}; - -struct ixgb_stats { - char stat_string[ETH_GSTRING_LEN]; - int type; - int sizeof_stat; - int stat_offset; -}; - -#define IXGB_STAT(m) IXGB_STATS, \ - FIELD_SIZEOF(struct ixgb_adapter, m), \ - offsetof(struct ixgb_adapter, m) -#define IXGB_NETDEV_STAT(m) NETDEV_STATS, \ - FIELD_SIZEOF(struct net_device, m), \ - offsetof(struct net_device, m) - -static struct ixgb_stats ixgb_gstrings_stats[] = { - {"rx_packets", IXGB_NETDEV_STAT(stats.rx_packets)}, - {"tx_packets", IXGB_NETDEV_STAT(stats.tx_packets)}, - {"rx_bytes", IXGB_NETDEV_STAT(stats.rx_bytes)}, - {"tx_bytes", IXGB_NETDEV_STAT(stats.tx_bytes)}, - {"rx_errors", IXGB_NETDEV_STAT(stats.rx_errors)}, - {"tx_errors", IXGB_NETDEV_STAT(stats.tx_errors)}, - {"rx_dropped", IXGB_NETDEV_STAT(stats.rx_dropped)}, - {"tx_dropped", IXGB_NETDEV_STAT(stats.tx_dropped)}, - {"multicast", IXGB_NETDEV_STAT(stats.multicast)}, - {"collisions", IXGB_NETDEV_STAT(stats.collisions)}, - -/* { "rx_length_errors", IXGB_NETDEV_STAT(stats.rx_length_errors) }, */ - {"rx_over_errors", IXGB_NETDEV_STAT(stats.rx_over_errors)}, - {"rx_crc_errors", IXGB_NETDEV_STAT(stats.rx_crc_errors)}, - {"rx_frame_errors", IXGB_NETDEV_STAT(stats.rx_frame_errors)}, - {"rx_no_buffer_count", IXGB_STAT(stats.rnbc)}, - {"rx_fifo_errors", IXGB_NETDEV_STAT(stats.rx_fifo_errors)}, - {"rx_missed_errors", IXGB_NETDEV_STAT(stats.rx_missed_errors)}, - {"tx_aborted_errors", IXGB_NETDEV_STAT(stats.tx_aborted_errors)}, - {"tx_carrier_errors", IXGB_NETDEV_STAT(stats.tx_carrier_errors)}, - {"tx_fifo_errors", IXGB_NETDEV_STAT(stats.tx_fifo_errors)}, - {"tx_heartbeat_errors", IXGB_NETDEV_STAT(stats.tx_heartbeat_errors)}, - {"tx_window_errors", IXGB_NETDEV_STAT(stats.tx_window_errors)}, - {"tx_deferred_ok", IXGB_STAT(stats.dc)}, - {"tx_timeout_count", IXGB_STAT(tx_timeout_count) }, - {"tx_restart_queue", IXGB_STAT(restart_queue) }, - {"rx_long_length_errors", IXGB_STAT(stats.roc)}, - {"rx_short_length_errors", IXGB_STAT(stats.ruc)}, - {"tx_tcp_seg_good", IXGB_STAT(stats.tsctc)}, - {"tx_tcp_seg_failed", IXGB_STAT(stats.tsctfc)}, - {"rx_flow_control_xon", IXGB_STAT(stats.xonrxc)}, - {"rx_flow_control_xoff", IXGB_STAT(stats.xoffrxc)}, - {"tx_flow_control_xon", IXGB_STAT(stats.xontxc)}, - {"tx_flow_control_xoff", IXGB_STAT(stats.xofftxc)}, - {"rx_csum_offload_good", IXGB_STAT(hw_csum_rx_good)}, - {"rx_csum_offload_errors", IXGB_STAT(hw_csum_rx_error)}, - {"tx_csum_offload_good", IXGB_STAT(hw_csum_tx_good)}, - {"tx_csum_offload_errors", IXGB_STAT(hw_csum_tx_error)} -}; - -#define IXGB_STATS_LEN ARRAY_SIZE(ixgb_gstrings_stats) - -static int -ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) -{ - struct ixgb_adapter *adapter = netdev_priv(netdev); - - ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); - ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); - ecmd->port = PORT_FIBRE; - ecmd->transceiver = XCVR_EXTERNAL; - - if (netif_carrier_ok(adapter->netdev)) { - ethtool_cmd_speed_set(ecmd, SPEED_10000); - ecmd->duplex = DUPLEX_FULL; - } else { - ethtool_cmd_speed_set(ecmd, -1); - ecmd->duplex = -1; - } - - ecmd->autoneg = AUTONEG_DISABLE; - return 0; -} - -static void ixgb_set_speed_duplex(struct net_device *netdev) -{ - struct ixgb_adapter *adapter = netdev_priv(netdev); - /* be optimistic about our link, since we were up before */ - adapter->link_speed = 10000; - adapter->link_duplex = FULL_DUPLEX; - netif_carrier_on(netdev); - netif_wake_queue(netdev); -} - -static int -ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) -{ - struct ixgb_adapter *adapter = netdev_priv(netdev); - u32 speed = ethtool_cmd_speed(ecmd); - - if (ecmd->autoneg == AUTONEG_ENABLE || - (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) - return -EINVAL; - - if (netif_running(adapter->netdev)) { - ixgb_down(adapter, true); - ixgb_reset(adapter); - ixgb_up(adapter); - ixgb_set_speed_duplex(netdev); - } else - ixgb_reset(adapter); - - return 0; -} - -static void -ixgb_get_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) -{ - struct ixgb_adapter *adapter = netdev_priv(netdev); - struct ixgb_hw *hw = &adapter->hw; - - pause->autoneg = AUTONEG_DISABLE; - - if (hw->fc.type == ixgb_fc_rx_pause) - pause->rx_pause = 1; - else if (hw->fc.type == ixgb_fc_tx_pause) - pause->tx_pause = 1; - else if (hw->fc.type == ixgb_fc_full) { - pause->rx_pause = 1; - pause->tx_pause = 1; - } -} - -static int -ixgb_set_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) -{ - struct ixgb_adapter *adapter = netdev_priv(netdev); - struct ixgb_hw *hw = &adapter->hw; - - if (pause->autoneg == AUTONEG_ENABLE) - return -EINVAL; - - if (pause->rx_pause && pause->tx_pause) - hw->fc.type = ixgb_fc_full; - else if (pause->rx_pause && !pause->tx_pause) - hw->fc.type = ixgb_fc_rx_pause; - else if (!pause->rx_pause && pause->tx_pause) - hw->fc.type = ixgb_fc_tx_pause; - else if (!pause->rx_pause && !pause->tx_pause) - hw->fc.type = ixgb_fc_none; - - if (netif_running(adapter->netdev)) { - ixgb_down(adapter, true); - ixgb_up(adapter); - ixgb_set_speed_duplex(netdev); - } else - ixgb_reset(adapter); - - return 0; -} - -static u32 -ixgb_get_rx_csum(struct net_device *netdev) -{ - struct ixgb_adapter *adapter = netdev_priv(netdev); - - return adapter->rx_csum; -} - -static int -ixgb_set_rx_csum(struct net_device *netdev, u32 data) -{ - struct ixgb_adapter *adapter = netdev_priv(netdev); - - adapter->rx_csum = data; - - if (netif_running(netdev)) { - ixgb_down(adapter, true); - ixgb_up(adapter); - ixgb_set_speed_duplex(netdev); - } else - ixgb_reset(adapter); - return 0; -} - -static u32 -ixgb_get_tx_csum(struct net_device *netdev) -{ - return (netdev->features & NETIF_F_HW_CSUM) != 0; -} - -static int -ixgb_set_tx_csum(struct net_device *netdev, u32 data) -{ - if (data) - netdev->features |= NETIF_F_HW_CSUM; - else - netdev->features &= ~NETIF_F_HW_CSUM; - - return 0; -} - -static int -ixgb_set_tso(struct net_device *netdev, u32 data) -{ - if (data) - netdev->features |= NETIF_F_TSO; - else - netdev->features &= ~NETIF_F_TSO; - return 0; -} - -static u32 -ixgb_get_msglevel(struct net_device *netdev) -{ - struct ixgb_adapter *adapter = netdev_priv(netdev); - return adapter->msg_enable; -} - -static void -ixgb_set_msglevel(struct net_device *netdev, u32 data) -{ - struct ixgb_adapter *adapter = netdev_priv(netdev); - adapter->msg_enable = data; -} -#define IXGB_GET_STAT(_A_, _R_) _A_->stats._R_ - -static int -ixgb_get_regs_len(struct net_device *netdev) -{ -#define IXGB_REG_DUMP_LEN 136*sizeof(u32) - return IXGB_REG_DUMP_LEN; -} - -static void -ixgb_get_regs(struct net_device *netdev, - struct ethtool_regs *regs, void *p) -{ - struct ixgb_adapter *adapter = netdev_priv(netdev); - struct ixgb_hw *hw = &adapter->hw; - u32 *reg = p; - u32 *reg_start = reg; - u8 i; - - /* the 1 (one) below indicates an attempt at versioning, if the - * interface in ethtool or the driver changes, this 1 should be - * incremented */ - regs->version = (1<<24) | hw->revision_id << 16 | hw->device_id; - - /* General Registers */ - *reg++ = IXGB_READ_REG(hw, CTRL0); /* 0 */ - *reg++ = IXGB_READ_REG(hw, CTRL1); /* 1 */ - *reg++ = IXGB_READ_REG(hw, STATUS); /* 2 */ - *reg++ = IXGB_READ_REG(hw, EECD); /* 3 */ - *reg++ = IXGB_READ_REG(hw, MFS); /* 4 */ - - /* Interrupt */ - *reg++ = IXGB_READ_REG(hw, ICR); /* 5 */ - *reg++ = IXGB_READ_REG(hw, ICS); /* 6 */ - *reg++ = IXGB_READ_REG(hw, IMS); /* 7 */ - *reg++ = IXGB_READ_REG(hw, IMC); /* 8 */ - - /* Receive */ - *reg++ = IXGB_READ_REG(hw, RCTL); /* 9 */ - *reg++ = IXGB_READ_REG(hw, FCRTL); /* 10 */ - *reg++ = IXGB_READ_REG(hw, FCRTH); /* 11 */ - *reg++ = IXGB_READ_REG(hw, RDBAL); /* 12 */ - *reg++ = IXGB_READ_REG(hw, RDBAH); /* 13 */ - *reg++ = IXGB_READ_REG(hw, RDLEN); /* 14 */ - *reg++ = IXGB_READ_REG(hw, RDH); /* 15 */ - *reg++ = IXGB_READ_REG(hw, RDT); /* 16 */ - *reg++ = IXGB_READ_REG(hw, RDTR); /* 17 */ - *reg++ = IXGB_READ_REG(hw, RXDCTL); /* 18 */ - *reg++ = IXGB_READ_REG(hw, RAIDC); /* 19 */ - *reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */ - - /* there are 16 RAR entries in hardware, we only use 3 */ - for (i = 0; i < IXGB_ALL_RAR_ENTRIES; i++) { - *reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */ - *reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */ - } - - /* Transmit */ - *reg++ = IXGB_READ_REG(hw, TCTL); /* 53 */ - *reg++ = IXGB_READ_REG(hw, TDBAL); /* 54 */ - *reg++ = IXGB_READ_REG(hw, TDBAH); /* 55 */ - *reg++ = IXGB_READ_REG(hw, TDLEN); /* 56 */ - *reg++ = IXGB_READ_REG(hw, TDH); /* 57 */ - *reg++ = IXGB_READ_REG(hw, TDT); /* 58 */ - *reg++ = IXGB_READ_REG(hw, TIDV); /* 59 */ - *reg++ = IXGB_READ_REG(hw, TXDCTL); /* 60 */ - *reg++ = IXGB_READ_REG(hw, TSPMT); /* 61 */ - *reg++ = IXGB_READ_REG(hw, PAP); /* 62 */ - - /* Physical */ - *reg++ = IXGB_READ_REG(hw, PCSC1); /* 63 */ - *reg++ = IXGB_READ_REG(hw, PCSC2); /* 64 */ - *reg++ = IXGB_READ_REG(hw, PCSS1); /* 65 */ - *reg++ = IXGB_READ_REG(hw, PCSS2); /* 66 */ - *reg++ = IXGB_READ_REG(hw, XPCSS); /* 67 */ - *reg++ = IXGB_READ_REG(hw, UCCR); /* 68 */ - *reg++ = IXGB_READ_REG(hw, XPCSTC); /* 69 */ - *reg++ = IXGB_READ_REG(hw, MACA); /* 70 */ - *reg++ = IXGB_READ_REG(hw, APAE); /* 71 */ - *reg++ = IXGB_READ_REG(hw, ARD); /* 72 */ - *reg++ = IXGB_READ_REG(hw, AIS); /* 73 */ - *reg++ = IXGB_READ_REG(hw, MSCA); /* 74 */ - *reg++ = IXGB_READ_REG(hw, MSRWD); /* 75 */ - - /* Statistics */ - *reg++ = IXGB_GET_STAT(adapter, tprl); /* 76 */ - *reg++ = IXGB_GET_STAT(adapter, tprh); /* 77 */ - *reg++ = IXGB_GET_STAT(adapter, gprcl); /* 78 */ - *reg++ = IXGB_GET_STAT(adapter, gprch); /* 79 */ - *reg++ = IXGB_GET_STAT(adapter, bprcl); /* 80 */ - *reg++ = IXGB_GET_STAT(adapter, bprch); /* 81 */ - *reg++ = IXGB_GET_STAT(adapter, mprcl); /* 82 */ - *reg++ = IXGB_GET_STAT(adapter, mprch); /* 83 */ - *reg++ = IXGB_GET_STAT(adapter, uprcl); /* 84 */ - *reg++ = IXGB_GET_STAT(adapter, uprch); /* 85 */ - *reg++ = IXGB_GET_STAT(adapter, vprcl); /* 86 */ - *reg++ = IXGB_GET_STAT(adapter, vprch); /* 87 */ - *reg++ = IXGB_GET_STAT(adapter, jprcl); /* 88 */ - *reg++ = IXGB_GET_STAT(adapter, jprch); /* 89 */ - *reg++ = IXGB_GET_STAT(adapter, gorcl); /* 90 */ - *reg++ = IXGB_GET_STAT(adapter, gorch); /* 91 */ - *reg++ = IXGB_GET_STAT(adapter, torl); /* 92 */ - *reg++ = IXGB_GET_STAT(adapter, torh); /* 93 */ - *reg++ = IXGB_GET_STAT(adapter, rnbc); /* 94 */ - *reg++ = IXGB_GET_STAT(adapter, ruc); /* 95 */ - *reg++ = IXGB_GET_STAT(adapter, roc); /* 96 */ - *reg++ = IXGB_GET_STAT(adapter, rlec); /* 97 */ - *reg++ = IXGB_GET_STAT(adapter, crcerrs); /* 98 */ - *reg++ = IXGB_GET_STAT(adapter, icbc); /* 99 */ - *reg++ = IXGB_GET_STAT(adapter, ecbc); /* 100 */ - *reg++ = IXGB_GET_STAT(adapter, mpc); /* 101 */ - *reg++ = IXGB_GET_STAT(adapter, tptl); /* 102 */ - *reg++ = IXGB_GET_STAT(adapter, tpth); /* 103 */ - *reg++ = IXGB_GET_STAT(adapter, gptcl); /* 104 */ - *reg++ = IXGB_GET_STAT(adapter, gptch); /* 105 */ - *reg++ = IXGB_GET_STAT(adapter, bptcl); /* 106 */ - *reg++ = IXGB_GET_STAT(adapter, bptch); /* 107 */ - *reg++ = IXGB_GET_STAT(adapter, mptcl); /* 108 */ - *reg++ = IXGB_GET_STAT(adapter, mptch); /* 109 */ - *reg++ = IXGB_GET_STAT(adapter, uptcl); /* 110 */ - *reg++ = IXGB_GET_STAT(adapter, uptch); /* 111 */ - *reg++ = IXGB_GET_STAT(adapter, vptcl); /* 112 */ - *reg++ = IXGB_GET_STAT(adapter, vptch); /* 113 */ - *reg++ = IXGB_GET_STAT(adapter, jptcl); /* 114 */ - *reg++ = IXGB_GET_STAT(adapter, jptch); /* 115 */ - *reg++ = IXGB_GET_STAT(adapter, gotcl); /* 116 */ - *reg++ = IXGB_GET_STAT(adapter, gotch); /* 117 */ - *reg++ = IXGB_GET_STAT(adapter, totl); /* 118 */ - *reg++ = IXGB_GET_STAT(adapter, toth); /* 119 */ - *reg++ = IXGB_GET_STAT(adapter, dc); /* 120 */ - *reg++ = IXGB_GET_STAT(adapter, plt64c); /* 121 */ - *reg++ = IXGB_GET_STAT(adapter, tsctc); /* 122 */ - *reg++ = IXGB_GET_STAT(adapter, tsctfc); /* 123 */ - *reg++ = IXGB_GET_STAT(adapter, ibic); /* 124 */ - *reg++ = IXGB_GET_STAT(adapter, rfc); /* 125 */ - *reg++ = IXGB_GET_STAT(adapter, lfc); /* 126 */ - *reg++ = IXGB_GET_STAT(adapter, pfrc); /* 127 */ - *reg++ = IXGB_GET_STAT(adapter, pftc); /* 128 */ - *reg++ = IXGB_GET_STAT(adapter, mcfrc); /* 129 */ - *reg++ = IXGB_GET_STAT(adapter, mcftc); /* 130 */ - *reg++ = IXGB_GET_STAT(adapter, xonrxc); /* 131 */ - *reg++ = IXGB_GET_STAT(adapter, xontxc); /* 132 */ - *reg++ = IXGB_GET_STAT(adapter, xoffrxc); /* 133 */ - *reg++ = IXGB_GET_STAT(adapter, xofftxc); /* 134 */ - *reg++ = IXGB_GET_STAT(adapter, rjc); /* 135 */ - - regs->len = (reg - reg_start) * sizeof(u32); -} - -static int -ixgb_get_eeprom_len(struct net_device *netdev) -{ - /* return size in bytes */ - return IXGB_EEPROM_SIZE << 1; -} - -static int -ixgb_get_eeprom(struct net_device *netdev, - struct ethtool_eeprom *eeprom, u8 *bytes) -{ - struct ixgb_adapter *adapter = netdev_priv(netdev); - struct ixgb_hw *hw = &adapter->hw; - __le16 *eeprom_buff; - int i, max_len, first_word, last_word; - int ret_val = 0; - - if (eeprom->len == 0) { - ret_val = -EINVAL; - goto geeprom_error; - } - - eeprom->magic = hw->vendor_id | (hw->device_id << 16); - - max_len = ixgb_get_eeprom_len(netdev); - - if (eeprom->offset > eeprom->offset + eeprom->len) { - ret_val = -EINVAL; - goto geeprom_error; - } - - if ((eeprom->offset + eeprom->len) > max_len) - eeprom->len = (max_len - eeprom->offset); - - first_word = eeprom->offset >> 1; - last_word = (eeprom->offset + eeprom->len - 1) >> 1; - - eeprom_buff = kmalloc(sizeof(__le16) * - (last_word - first_word + 1), GFP_KERNEL); - if (!eeprom_buff) - return -ENOMEM; - - /* note the eeprom was good because the driver loaded */ - for (i = 0; i <= (last_word - first_word); i++) - eeprom_buff[i] = ixgb_get_eeprom_word(hw, (first_word + i)); - - memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); - kfree(eeprom_buff); - -geeprom_error: - return ret_val; -} - -static int -ixgb_set_eeprom(struct net_device *netdev, - struct ethtool_eeprom *eeprom, u8 *bytes) -{ - struct ixgb_adapter *adapter = netdev_priv(netdev); - struct ixgb_hw *hw = &adapter->hw; - u16 *eeprom_buff; - void *ptr; - int max_len, first_word, last_word; - u16 i; - - if (eeprom->len == 0) - return -EINVAL; - - if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) - return -EFAULT; - - max_len = ixgb_get_eeprom_len(netdev); - - if (eeprom->offset > eeprom->offset + eeprom->len) - return -EINVAL; - - if ((eeprom->offset + eeprom->len) > max_len) - eeprom->len = (max_len - eeprom->offset); - - first_word = eeprom->offset >> 1; - last_word = (eeprom->offset + eeprom->len - 1) >> 1; - eeprom_buff = kmalloc(max_len, GFP_KERNEL); - if (!eeprom_buff) - return -ENOMEM; - - ptr = (void *)eeprom_buff; - - if (eeprom->offset & 1) { - /* need read/modify/write of first changed EEPROM word */ - /* only the second byte of the word is being modified */ - eeprom_buff[0] = ixgb_read_eeprom(hw, first_word); - ptr++; - } - if ((eeprom->offset + eeprom->len) & 1) { - /* need read/modify/write of last changed EEPROM word */ - /* only the first byte of the word is being modified */ - eeprom_buff[last_word - first_word] - = ixgb_read_eeprom(hw, last_word); - } - - memcpy(ptr, bytes, eeprom->len); - for (i = 0; i <= (last_word - first_word); i++) - ixgb_write_eeprom(hw, first_word + i, eeprom_buff[i]); - - /* Update the checksum over the first part of the EEPROM if needed */ - if (first_word <= EEPROM_CHECKSUM_REG) - ixgb_update_eeprom_checksum(hw); - - kfree(eeprom_buff); - return 0; -} - -static void -ixgb_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) -{ - struct ixgb_adapter *adapter = netdev_priv(netdev); - - strncpy(drvinfo->driver, ixgb_driver_name, 32); - strncpy(drvinfo->version, ixgb_driver_version, 32); - strncpy(drvinfo->fw_version, "N/A", 32); - strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); - drvinfo->n_stats = IXGB_STATS_LEN; - drvinfo->regdump_len = ixgb_get_regs_len(netdev); - drvinfo->eedump_len = ixgb_get_eeprom_len(netdev); -} - -static void -ixgb_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -{ - struct ixgb_adapter *adapter = netdev_priv(netdev); - struct ixgb_desc_ring *txdr = &adapter->tx_ring; - struct ixgb_desc_ring *rxdr = &adapter->rx_ring; - - ring->rx_max_pending = MAX_RXD; - ring->tx_max_pending = MAX_TXD; - ring->rx_mini_max_pending = 0; - ring->rx_jumbo_max_pending = 0; - ring->rx_pending = rxdr->count; - ring->tx_pending = txdr->count; - ring->rx_mini_pending = 0; - ring->rx_jumbo_pending = 0; -} - -static int -ixgb_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -{ - struct ixgb_adapter *adapter = netdev_priv(netdev); - struct ixgb_desc_ring *txdr = &adapter->tx_ring; - struct ixgb_desc_ring *rxdr = &adapter->rx_ring; - struct ixgb_desc_ring tx_old, tx_new, rx_old, rx_new; - int err; - - tx_old = adapter->tx_ring; - rx_old = adapter->rx_ring; - - if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) - return -EINVAL; - - if (netif_running(adapter->netdev)) - ixgb_down(adapter, true); - - rxdr->count = max(ring->rx_pending,(u32)MIN_RXD); - rxdr->count = min(rxdr->count,(u32)MAX_RXD); - rxdr->count = ALIGN(rxdr->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE); - - txdr->count = max(ring->tx_pending,(u32)MIN_TXD); - txdr->count = min(txdr->count,(u32)MAX_TXD); - txdr->count = ALIGN(txdr->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE); - - if (netif_running(adapter->netdev)) { - /* Try to get new resources before deleting old */ - if ((err = ixgb_setup_rx_resources(adapter))) - goto err_setup_rx; - if ((err = ixgb_setup_tx_resources(adapter))) - goto err_setup_tx; - - /* save the new, restore the old in order to free it, - * then restore the new back again */ - - rx_new = adapter->rx_ring; - tx_new = adapter->tx_ring; - adapter->rx_ring = rx_old; - adapter->tx_ring = tx_old; - ixgb_free_rx_resources(adapter); - ixgb_free_tx_resources(adapter); - adapter->rx_ring = rx_new; - adapter->tx_ring = tx_new; - if ((err = ixgb_up(adapter))) - return err; - ixgb_set_speed_duplex(netdev); - } - - return 0; -err_setup_tx: - ixgb_free_rx_resources(adapter); -err_setup_rx: - adapter->rx_ring = rx_old; - adapter->tx_ring = tx_old; - ixgb_up(adapter); - return err; -} - -static int -ixgb_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) -{ - struct ixgb_adapter *adapter = netdev_priv(netdev); - - switch (state) { - case ETHTOOL_ID_ACTIVE: - return 2; - - case ETHTOOL_ID_ON: - ixgb_led_on(&adapter->hw); - break; - - case ETHTOOL_ID_OFF: - case ETHTOOL_ID_INACTIVE: - ixgb_led_off(&adapter->hw); - } - - return 0; -} - -static int -ixgb_get_sset_count(struct net_device *netdev, int sset) -{ - switch (sset) { - case ETH_SS_STATS: - return IXGB_STATS_LEN; - default: - return -EOPNOTSUPP; - } -} - -static void -ixgb_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, u64 *data) -{ - struct ixgb_adapter *adapter = netdev_priv(netdev); - int i; - char *p = NULL; - - ixgb_update_stats(adapter); - for (i = 0; i < IXGB_STATS_LEN; i++) { - switch (ixgb_gstrings_stats[i].type) { - case NETDEV_STATS: - p = (char *) netdev + - ixgb_gstrings_stats[i].stat_offset; - break; - case IXGB_STATS: - p = (char *) adapter + - ixgb_gstrings_stats[i].stat_offset; - break; - } - - data[i] = (ixgb_gstrings_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } -} - -static void -ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) -{ - int i; - - switch(stringset) { - case ETH_SS_STATS: - for (i = 0; i < IXGB_STATS_LEN; i++) { - memcpy(data + i * ETH_GSTRING_LEN, - ixgb_gstrings_stats[i].stat_string, - ETH_GSTRING_LEN); - } - break; - } -} - -static int ixgb_set_flags(struct net_device *netdev, u32 data) -{ - struct ixgb_adapter *adapter = netdev_priv(netdev); - bool need_reset; - int rc; - - /* - * Tx VLAN insertion does not work per HW design when Rx stripping is - * disabled. Disable txvlan when rxvlan is turned off, and enable - * rxvlan when txvlan is turned on. - */ - if (!(data & ETH_FLAG_RXVLAN) && - (netdev->features & NETIF_F_HW_VLAN_TX)) - data &= ~ETH_FLAG_TXVLAN; - else if (data & ETH_FLAG_TXVLAN) - data |= ETH_FLAG_RXVLAN; - - need_reset = (data & ETH_FLAG_RXVLAN) != - (netdev->features & NETIF_F_HW_VLAN_RX); - - rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_RXVLAN | - ETH_FLAG_TXVLAN); - if (rc) - return rc; - - if (need_reset) { - if (netif_running(netdev)) { - ixgb_down(adapter, true); - ixgb_up(adapter); - ixgb_set_speed_duplex(netdev); - } else - ixgb_reset(adapter); - } - - return 0; -} - -static const struct ethtool_ops ixgb_ethtool_ops = { - .get_settings = ixgb_get_settings, - .set_settings = ixgb_set_settings, - .get_drvinfo = ixgb_get_drvinfo, - .get_regs_len = ixgb_get_regs_len, - .get_regs = ixgb_get_regs, - .get_link = ethtool_op_get_link, - .get_eeprom_len = ixgb_get_eeprom_len, - .get_eeprom = ixgb_get_eeprom, - .set_eeprom = ixgb_set_eeprom, - .get_ringparam = ixgb_get_ringparam, - .set_ringparam = ixgb_set_ringparam, - .get_pauseparam = ixgb_get_pauseparam, - .set_pauseparam = ixgb_set_pauseparam, - .get_rx_csum = ixgb_get_rx_csum, - .set_rx_csum = ixgb_set_rx_csum, - .get_tx_csum = ixgb_get_tx_csum, - .set_tx_csum = ixgb_set_tx_csum, - .set_sg = ethtool_op_set_sg, - .get_msglevel = ixgb_get_msglevel, - .set_msglevel = ixgb_set_msglevel, - .set_tso = ixgb_set_tso, - .get_strings = ixgb_get_strings, - .set_phys_id = ixgb_set_phys_id, - .get_sset_count = ixgb_get_sset_count, - .get_ethtool_stats = ixgb_get_ethtool_stats, - .get_flags = ethtool_op_get_flags, - .set_flags = ixgb_set_flags, -}; - -void ixgb_set_ethtool_ops(struct net_device *netdev) -{ - SET_ETHTOOL_OPS(netdev, &ixgb_ethtool_ops); -} diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c deleted file mode 100644 index 3d61a9e..0000000 --- a/drivers/net/ixgb/ixgb_hw.c +++ /dev/null @@ -1,1262 +0,0 @@ -/******************************************************************************* - - Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -/* ixgb_hw.c - * Shared functions for accessing and configuring the adapter - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include "ixgb_hw.h" -#include "ixgb_ids.h" - -#include - -/* Local function prototypes */ - -static u32 ixgb_hash_mc_addr(struct ixgb_hw *hw, u8 * mc_addr); - -static void ixgb_mta_set(struct ixgb_hw *hw, u32 hash_value); - -static void ixgb_get_bus_info(struct ixgb_hw *hw); - -static bool ixgb_link_reset(struct ixgb_hw *hw); - -static void ixgb_optics_reset(struct ixgb_hw *hw); - -static void ixgb_optics_reset_bcm(struct ixgb_hw *hw); - -static ixgb_phy_type ixgb_identify_phy(struct ixgb_hw *hw); - -static void ixgb_clear_hw_cntrs(struct ixgb_hw *hw); - -static void ixgb_clear_vfta(struct ixgb_hw *hw); - -static void ixgb_init_rx_addrs(struct ixgb_hw *hw); - -static u16 ixgb_read_phy_reg(struct ixgb_hw *hw, - u32 reg_address, - u32 phy_address, - u32 device_type); - -static bool ixgb_setup_fc(struct ixgb_hw *hw); - -static bool mac_addr_valid(u8 *mac_addr); - -static u32 ixgb_mac_reset(struct ixgb_hw *hw) -{ - u32 ctrl_reg; - - ctrl_reg = IXGB_CTRL0_RST | - IXGB_CTRL0_SDP3_DIR | /* All pins are Output=1 */ - IXGB_CTRL0_SDP2_DIR | - IXGB_CTRL0_SDP1_DIR | - IXGB_CTRL0_SDP0_DIR | - IXGB_CTRL0_SDP3 | /* Initial value 1101 */ - IXGB_CTRL0_SDP2 | - IXGB_CTRL0_SDP0; - -#ifdef HP_ZX1 - /* Workaround for 82597EX reset errata */ - IXGB_WRITE_REG_IO(hw, CTRL0, ctrl_reg); -#else - IXGB_WRITE_REG(hw, CTRL0, ctrl_reg); -#endif - - /* Delay a few ms just to allow the reset to complete */ - msleep(IXGB_DELAY_AFTER_RESET); - ctrl_reg = IXGB_READ_REG(hw, CTRL0); -#ifdef DBG - /* Make sure the self-clearing global reset bit did self clear */ - ASSERT(!(ctrl_reg & IXGB_CTRL0_RST)); -#endif - - if (hw->subsystem_vendor_id == SUN_SUBVENDOR_ID) { - ctrl_reg = /* Enable interrupt from XFP and SerDes */ - IXGB_CTRL1_GPI0_EN | - IXGB_CTRL1_SDP6_DIR | - IXGB_CTRL1_SDP7_DIR | - IXGB_CTRL1_SDP6 | - IXGB_CTRL1_SDP7; - IXGB_WRITE_REG(hw, CTRL1, ctrl_reg); - ixgb_optics_reset_bcm(hw); - } - - if (hw->phy_type == ixgb_phy_type_txn17401) - ixgb_optics_reset(hw); - - return ctrl_reg; -} - -/****************************************************************************** - * Reset the transmit and receive units; mask and clear all interrupts. - * - * hw - Struct containing variables accessed by shared code - *****************************************************************************/ -bool -ixgb_adapter_stop(struct ixgb_hw *hw) -{ - u32 ctrl_reg; - u32 icr_reg; - - ENTER(); - - /* If we are stopped or resetting exit gracefully and wait to be - * started again before accessing the hardware. - */ - if (hw->adapter_stopped) { - pr_debug("Exiting because the adapter is already stopped!!!\n"); - return false; - } - - /* Set the Adapter Stopped flag so other driver functions stop - * touching the Hardware. - */ - hw->adapter_stopped = true; - - /* Clear interrupt mask to stop board from generating interrupts */ - pr_debug("Masking off all interrupts\n"); - IXGB_WRITE_REG(hw, IMC, 0xFFFFFFFF); - - /* Disable the Transmit and Receive units. Then delay to allow - * any pending transactions to complete before we hit the MAC with - * the global reset. - */ - IXGB_WRITE_REG(hw, RCTL, IXGB_READ_REG(hw, RCTL) & ~IXGB_RCTL_RXEN); - IXGB_WRITE_REG(hw, TCTL, IXGB_READ_REG(hw, TCTL) & ~IXGB_TCTL_TXEN); - IXGB_WRITE_FLUSH(hw); - msleep(IXGB_DELAY_BEFORE_RESET); - - /* Issue a global reset to the MAC. This will reset the chip's - * transmit, receive, DMA, and link units. It will not effect - * the current PCI configuration. The global reset bit is self- - * clearing, and should clear within a microsecond. - */ - pr_debug("Issuing a global reset to MAC\n"); - - ctrl_reg = ixgb_mac_reset(hw); - - /* Clear interrupt mask to stop board from generating interrupts */ - pr_debug("Masking off all interrupts\n"); - IXGB_WRITE_REG(hw, IMC, 0xffffffff); - - /* Clear any pending interrupt events. */ - icr_reg = IXGB_READ_REG(hw, ICR); - - return ctrl_reg & IXGB_CTRL0_RST; -} - - -/****************************************************************************** - * Identifies the vendor of the optics module on the adapter. The SR adapters - * support two different types of XPAK optics, so it is necessary to determine - * which optics are present before applying any optics-specific workarounds. - * - * hw - Struct containing variables accessed by shared code. - * - * Returns: the vendor of the XPAK optics module. - *****************************************************************************/ -static ixgb_xpak_vendor -ixgb_identify_xpak_vendor(struct ixgb_hw *hw) -{ - u32 i; - u16 vendor_name[5]; - ixgb_xpak_vendor xpak_vendor; - - ENTER(); - - /* Read the first few bytes of the vendor string from the XPAK NVR - * registers. These are standard XENPAK/XPAK registers, so all XPAK - * devices should implement them. */ - for (i = 0; i < 5; i++) { - vendor_name[i] = ixgb_read_phy_reg(hw, - MDIO_PMA_PMD_XPAK_VENDOR_NAME - + i, IXGB_PHY_ADDRESS, - MDIO_MMD_PMAPMD); - } - - /* Determine the actual vendor */ - if (vendor_name[0] == 'I' && - vendor_name[1] == 'N' && - vendor_name[2] == 'T' && - vendor_name[3] == 'E' && vendor_name[4] == 'L') { - xpak_vendor = ixgb_xpak_vendor_intel; - } else { - xpak_vendor = ixgb_xpak_vendor_infineon; - } - - return xpak_vendor; -} - -/****************************************************************************** - * Determine the physical layer module on the adapter. - * - * hw - Struct containing variables accessed by shared code. The device_id - * field must be (correctly) populated before calling this routine. - * - * Returns: the phy type of the adapter. - *****************************************************************************/ -static ixgb_phy_type -ixgb_identify_phy(struct ixgb_hw *hw) -{ - ixgb_phy_type phy_type; - ixgb_xpak_vendor xpak_vendor; - - ENTER(); - - /* Infer the transceiver/phy type from the device id */ - switch (hw->device_id) { - case IXGB_DEVICE_ID_82597EX: - pr_debug("Identified TXN17401 optics\n"); - phy_type = ixgb_phy_type_txn17401; - break; - - case IXGB_DEVICE_ID_82597EX_SR: - /* The SR adapters carry two different types of XPAK optics - * modules; read the vendor identifier to determine the exact - * type of optics. */ - xpak_vendor = ixgb_identify_xpak_vendor(hw); - if (xpak_vendor == ixgb_xpak_vendor_intel) { - pr_debug("Identified TXN17201 optics\n"); - phy_type = ixgb_phy_type_txn17201; - } else { - pr_debug("Identified G6005 optics\n"); - phy_type = ixgb_phy_type_g6005; - } - break; - case IXGB_DEVICE_ID_82597EX_LR: - pr_debug("Identified G6104 optics\n"); - phy_type = ixgb_phy_type_g6104; - break; - case IXGB_DEVICE_ID_82597EX_CX4: - pr_debug("Identified CX4\n"); - xpak_vendor = ixgb_identify_xpak_vendor(hw); - if (xpak_vendor == ixgb_xpak_vendor_intel) { - pr_debug("Identified TXN17201 optics\n"); - phy_type = ixgb_phy_type_txn17201; - } else { - pr_debug("Identified G6005 optics\n"); - phy_type = ixgb_phy_type_g6005; - } - break; - default: - pr_debug("Unknown physical layer module\n"); - phy_type = ixgb_phy_type_unknown; - break; - } - - /* update phy type for sun specific board */ - if (hw->subsystem_vendor_id == SUN_SUBVENDOR_ID) - phy_type = ixgb_phy_type_bcm; - - return phy_type; -} - -/****************************************************************************** - * Performs basic configuration of the adapter. - * - * hw - Struct containing variables accessed by shared code - * - * Resets the controller. - * Reads and validates the EEPROM. - * Initializes the receive address registers. - * Initializes the multicast table. - * Clears all on-chip counters. - * Calls routine to setup flow control settings. - * Leaves the transmit and receive units disabled and uninitialized. - * - * Returns: - * true if successful, - * false if unrecoverable problems were encountered. - *****************************************************************************/ -bool -ixgb_init_hw(struct ixgb_hw *hw) -{ - u32 i; - u32 ctrl_reg; - bool status; - - ENTER(); - - /* Issue a global reset to the MAC. This will reset the chip's - * transmit, receive, DMA, and link units. It will not effect - * the current PCI configuration. The global reset bit is self- - * clearing, and should clear within a microsecond. - */ - pr_debug("Issuing a global reset to MAC\n"); - - ctrl_reg = ixgb_mac_reset(hw); - - pr_debug("Issuing an EE reset to MAC\n"); -#ifdef HP_ZX1 - /* Workaround for 82597EX reset errata */ - IXGB_WRITE_REG_IO(hw, CTRL1, IXGB_CTRL1_EE_RST); -#else - IXGB_WRITE_REG(hw, CTRL1, IXGB_CTRL1_EE_RST); -#endif - - /* Delay a few ms just to allow the reset to complete */ - msleep(IXGB_DELAY_AFTER_EE_RESET); - - if (!ixgb_get_eeprom_data(hw)) - return false; - - /* Use the device id to determine the type of phy/transceiver. */ - hw->device_id = ixgb_get_ee_device_id(hw); - hw->phy_type = ixgb_identify_phy(hw); - - /* Setup the receive addresses. - * Receive Address Registers (RARs 0 - 15). - */ - ixgb_init_rx_addrs(hw); - - /* - * Check that a valid MAC address has been set. - * If it is not valid, we fail hardware init. - */ - if (!mac_addr_valid(hw->curr_mac_addr)) { - pr_debug("MAC address invalid after ixgb_init_rx_addrs\n"); - return(false); - } - - /* tell the routines in this file they can access hardware again */ - hw->adapter_stopped = false; - - /* Fill in the bus_info structure */ - ixgb_get_bus_info(hw); - - /* Zero out the Multicast HASH table */ - pr_debug("Zeroing the MTA\n"); - for (i = 0; i < IXGB_MC_TBL_SIZE; i++) - IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0); - - /* Zero out the VLAN Filter Table Array */ - ixgb_clear_vfta(hw); - - /* Zero all of the hardware counters */ - ixgb_clear_hw_cntrs(hw); - - /* Call a subroutine to setup flow control. */ - status = ixgb_setup_fc(hw); - - /* 82597EX errata: Call check-for-link in case lane deskew is locked */ - ixgb_check_for_link(hw); - - return status; -} - -/****************************************************************************** - * Initializes receive address filters. - * - * hw - Struct containing variables accessed by shared code - * - * Places the MAC address in receive address register 0 and clears the rest - * of the receive address registers. Clears the multicast table. Assumes - * the receiver is in reset when the routine is called. - *****************************************************************************/ -static void -ixgb_init_rx_addrs(struct ixgb_hw *hw) -{ - u32 i; - - ENTER(); - - /* - * If the current mac address is valid, assume it is a software override - * to the permanent address. - * Otherwise, use the permanent address from the eeprom. - */ - if (!mac_addr_valid(hw->curr_mac_addr)) { - - /* Get the MAC address from the eeprom for later reference */ - ixgb_get_ee_mac_addr(hw, hw->curr_mac_addr); - - pr_debug("Keeping Permanent MAC Addr = %pM\n", - hw->curr_mac_addr); - } else { - - /* Setup the receive address. */ - pr_debug("Overriding MAC Address in RAR[0]\n"); - pr_debug("New MAC Addr = %pM\n", hw->curr_mac_addr); - - ixgb_rar_set(hw, hw->curr_mac_addr, 0); - } - - /* Zero out the other 15 receive addresses. */ - pr_debug("Clearing RAR[1-15]\n"); - for (i = 1; i < IXGB_RAR_ENTRIES; i++) { - /* Write high reg first to disable the AV bit first */ - IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); - IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); - } -} - -/****************************************************************************** - * Updates the MAC's list of multicast addresses. - * - * hw - Struct containing variables accessed by shared code - * mc_addr_list - the list of new multicast addresses - * mc_addr_count - number of addresses - * pad - number of bytes between addresses in the list - * - * The given list replaces any existing list. Clears the last 15 receive - * address registers and the multicast table. Uses receive address registers - * for the first 15 multicast addresses, and hashes the rest into the - * multicast table. - *****************************************************************************/ -void -ixgb_mc_addr_list_update(struct ixgb_hw *hw, - u8 *mc_addr_list, - u32 mc_addr_count, - u32 pad) -{ - u32 hash_value; - u32 i; - u32 rar_used_count = 1; /* RAR[0] is used for our MAC address */ - u8 *mca; - - ENTER(); - - /* Set the new number of MC addresses that we are being requested to use. */ - hw->num_mc_addrs = mc_addr_count; - - /* Clear RAR[1-15] */ - pr_debug("Clearing RAR[1-15]\n"); - for (i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) { - IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); - IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); - } - - /* Clear the MTA */ - pr_debug("Clearing MTA\n"); - for (i = 0; i < IXGB_MC_TBL_SIZE; i++) - IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0); - - /* Add the new addresses */ - mca = mc_addr_list; - for (i = 0; i < mc_addr_count; i++) { - pr_debug("Adding the multicast addresses:\n"); - pr_debug("MC Addr #%d = %pM\n", i, mca); - - /* Place this multicast address in the RAR if there is room, * - * else put it in the MTA - */ - if (rar_used_count < IXGB_RAR_ENTRIES) { - ixgb_rar_set(hw, mca, rar_used_count); - pr_debug("Added a multicast address to RAR[%d]\n", i); - rar_used_count++; - } else { - hash_value = ixgb_hash_mc_addr(hw, mca); - - pr_debug("Hash value = 0x%03X\n", hash_value); - - ixgb_mta_set(hw, hash_value); - } - - mca += IXGB_ETH_LENGTH_OF_ADDRESS + pad; - } - - pr_debug("MC Update Complete\n"); -} - -/****************************************************************************** - * Hashes an address to determine its location in the multicast table - * - * hw - Struct containing variables accessed by shared code - * mc_addr - the multicast address to hash - * - * Returns: - * The hash value - *****************************************************************************/ -static u32 -ixgb_hash_mc_addr(struct ixgb_hw *hw, - u8 *mc_addr) -{ - u32 hash_value = 0; - - ENTER(); - - /* The portion of the address that is used for the hash table is - * determined by the mc_filter_type setting. - */ - switch (hw->mc_filter_type) { - /* [0] [1] [2] [3] [4] [5] - * 01 AA 00 12 34 56 - * LSB MSB - According to H/W docs */ - case 0: - /* [47:36] i.e. 0x563 for above example address */ - hash_value = - ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4)); - break; - case 1: /* [46:35] i.e. 0xAC6 for above example address */ - hash_value = - ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5)); - break; - case 2: /* [45:34] i.e. 0x5D8 for above example address */ - hash_value = - ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6)); - break; - case 3: /* [43:32] i.e. 0x634 for above example address */ - hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8)); - break; - default: - /* Invalid mc_filter_type, what should we do? */ - pr_debug("MC filter type param set incorrectly\n"); - ASSERT(0); - break; - } - - hash_value &= 0xFFF; - return hash_value; -} - -/****************************************************************************** - * Sets the bit in the multicast table corresponding to the hash value. - * - * hw - Struct containing variables accessed by shared code - * hash_value - Multicast address hash value - *****************************************************************************/ -static void -ixgb_mta_set(struct ixgb_hw *hw, - u32 hash_value) -{ - u32 hash_bit, hash_reg; - u32 mta_reg; - - /* The MTA is a register array of 128 32-bit registers. - * It is treated like an array of 4096 bits. We want to set - * bit BitArray[hash_value]. So we figure out what register - * the bit is in, read it, OR in the new bit, then write - * back the new value. The register is determined by the - * upper 7 bits of the hash value and the bit within that - * register are determined by the lower 5 bits of the value. - */ - hash_reg = (hash_value >> 5) & 0x7F; - hash_bit = hash_value & 0x1F; - - mta_reg = IXGB_READ_REG_ARRAY(hw, MTA, hash_reg); - - mta_reg |= (1 << hash_bit); - - IXGB_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta_reg); -} - -/****************************************************************************** - * Puts an ethernet address into a receive address register. - * - * hw - Struct containing variables accessed by shared code - * addr - Address to put into receive address register - * index - Receive address register to write - *****************************************************************************/ -void -ixgb_rar_set(struct ixgb_hw *hw, - u8 *addr, - u32 index) -{ - u32 rar_low, rar_high; - - ENTER(); - - /* HW expects these in little endian so we reverse the byte order - * from network order (big endian) to little endian - */ - rar_low = ((u32) addr[0] | - ((u32)addr[1] << 8) | - ((u32)addr[2] << 16) | - ((u32)addr[3] << 24)); - - rar_high = ((u32) addr[4] | - ((u32)addr[5] << 8) | - IXGB_RAH_AV); - - IXGB_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); - IXGB_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); -} - -/****************************************************************************** - * Writes a value to the specified offset in the VLAN filter table. - * - * hw - Struct containing variables accessed by shared code - * offset - Offset in VLAN filer table to write - * value - Value to write into VLAN filter table - *****************************************************************************/ -void -ixgb_write_vfta(struct ixgb_hw *hw, - u32 offset, - u32 value) -{ - IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, value); -} - -/****************************************************************************** - * Clears the VLAN filer table - * - * hw - Struct containing variables accessed by shared code - *****************************************************************************/ -static void -ixgb_clear_vfta(struct ixgb_hw *hw) -{ - u32 offset; - - for (offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++) - IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0); -} - -/****************************************************************************** - * Configures the flow control settings based on SW configuration. - * - * hw - Struct containing variables accessed by shared code - *****************************************************************************/ - -static bool -ixgb_setup_fc(struct ixgb_hw *hw) -{ - u32 ctrl_reg; - u32 pap_reg = 0; /* by default, assume no pause time */ - bool status = true; - - ENTER(); - - /* Get the current control reg 0 settings */ - ctrl_reg = IXGB_READ_REG(hw, CTRL0); - - /* Clear the Receive Pause Enable and Transmit Pause Enable bits */ - ctrl_reg &= ~(IXGB_CTRL0_RPE | IXGB_CTRL0_TPE); - - /* The possible values of the "flow_control" parameter are: - * 0: Flow control is completely disabled - * 1: Rx flow control is enabled (we can receive pause frames - * but not send pause frames). - * 2: Tx flow control is enabled (we can send pause frames - * but we do not support receiving pause frames). - * 3: Both Rx and TX flow control (symmetric) are enabled. - * other: Invalid. - */ - switch (hw->fc.type) { - case ixgb_fc_none: /* 0 */ - /* Set CMDC bit to disable Rx Flow control */ - ctrl_reg |= (IXGB_CTRL0_CMDC); - break; - case ixgb_fc_rx_pause: /* 1 */ - /* RX Flow control is enabled, and TX Flow control is - * disabled. - */ - ctrl_reg |= (IXGB_CTRL0_RPE); - break; - case ixgb_fc_tx_pause: /* 2 */ - /* TX Flow control is enabled, and RX Flow control is - * disabled, by a software over-ride. - */ - ctrl_reg |= (IXGB_CTRL0_TPE); - pap_reg = hw->fc.pause_time; - break; - case ixgb_fc_full: /* 3 */ - /* Flow control (both RX and TX) is enabled by a software - * over-ride. - */ - ctrl_reg |= (IXGB_CTRL0_RPE | IXGB_CTRL0_TPE); - pap_reg = hw->fc.pause_time; - break; - default: - /* We should never get here. The value should be 0-3. */ - pr_debug("Flow control param set incorrectly\n"); - ASSERT(0); - break; - } - - /* Write the new settings */ - IXGB_WRITE_REG(hw, CTRL0, ctrl_reg); - - if (pap_reg != 0) - IXGB_WRITE_REG(hw, PAP, pap_reg); - - /* Set the flow control receive threshold registers. Normally, - * these registers will be set to a default threshold that may be - * adjusted later by the driver's runtime code. However, if the - * ability to transmit pause frames in not enabled, then these - * registers will be set to 0. - */ - if (!(hw->fc.type & ixgb_fc_tx_pause)) { - IXGB_WRITE_REG(hw, FCRTL, 0); - IXGB_WRITE_REG(hw, FCRTH, 0); - } else { - /* We need to set up the Receive Threshold high and low water - * marks as well as (optionally) enabling the transmission of XON - * frames. */ - if (hw->fc.send_xon) { - IXGB_WRITE_REG(hw, FCRTL, - (hw->fc.low_water | IXGB_FCRTL_XONE)); - } else { - IXGB_WRITE_REG(hw, FCRTL, hw->fc.low_water); - } - IXGB_WRITE_REG(hw, FCRTH, hw->fc.high_water); - } - return status; -} - -/****************************************************************************** - * Reads a word from a device over the Management Data Interface (MDI) bus. - * This interface is used to manage Physical layer devices. - * - * hw - Struct containing variables accessed by hw code - * reg_address - Offset of device register being read. - * phy_address - Address of device on MDI. - * - * Returns: Data word (16 bits) from MDI device. - * - * The 82597EX has support for several MDI access methods. This routine - * uses the new protocol MDI Single Command and Address Operation. - * This requires that first an address cycle command is sent, followed by a - * read command. - *****************************************************************************/ -static u16 -ixgb_read_phy_reg(struct ixgb_hw *hw, - u32 reg_address, - u32 phy_address, - u32 device_type) -{ - u32 i; - u32 data; - u32 command = 0; - - ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS); - ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS); - ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE); - - /* Setup and write the address cycle command */ - command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) | - (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) | - (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) | - (IXGB_MSCA_ADDR_CYCLE | IXGB_MSCA_MDI_COMMAND)); - - IXGB_WRITE_REG(hw, MSCA, command); - - /************************************************************** - ** Check every 10 usec to see if the address cycle completed - ** The COMMAND bit will clear when the operation is complete. - ** This may take as long as 64 usecs (we'll wait 100 usecs max) - ** from the CPU Write to the Ready bit assertion. - **************************************************************/ - - for (i = 0; i < 10; i++) - { - udelay(10); - - command = IXGB_READ_REG(hw, MSCA); - - if ((command & IXGB_MSCA_MDI_COMMAND) == 0) - break; - } - - ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0); - - /* Address cycle complete, setup and write the read command */ - command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) | - (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) | - (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) | - (IXGB_MSCA_READ | IXGB_MSCA_MDI_COMMAND)); - - IXGB_WRITE_REG(hw, MSCA, command); - - /************************************************************** - ** Check every 10 usec to see if the read command completed - ** The COMMAND bit will clear when the operation is complete. - ** The read may take as long as 64 usecs (we'll wait 100 usecs max) - ** from the CPU Write to the Ready bit assertion. - **************************************************************/ - - for (i = 0; i < 10; i++) - { - udelay(10); - - command = IXGB_READ_REG(hw, MSCA); - - if ((command & IXGB_MSCA_MDI_COMMAND) == 0) - break; - } - - ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0); - - /* Operation is complete, get the data from the MDIO Read/Write Data - * register and return. - */ - data = IXGB_READ_REG(hw, MSRWD); - data >>= IXGB_MSRWD_READ_DATA_SHIFT; - return((u16) data); -} - -/****************************************************************************** - * Writes a word to a device over the Management Data Interface (MDI) bus. - * This interface is used to manage Physical layer devices. - * - * hw - Struct containing variables accessed by hw code - * reg_address - Offset of device register being read. - * phy_address - Address of device on MDI. - * device_type - Also known as the Device ID or DID. - * data - 16-bit value to be written - * - * Returns: void. - * - * The 82597EX has support for several MDI access methods. This routine - * uses the new protocol MDI Single Command and Address Operation. - * This requires that first an address cycle command is sent, followed by a - * write command. - *****************************************************************************/ -static void -ixgb_write_phy_reg(struct ixgb_hw *hw, - u32 reg_address, - u32 phy_address, - u32 device_type, - u16 data) -{ - u32 i; - u32 command = 0; - - ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS); - ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS); - ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE); - - /* Put the data in the MDIO Read/Write Data register */ - IXGB_WRITE_REG(hw, MSRWD, (u32)data); - - /* Setup and write the address cycle command */ - command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) | - (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) | - (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) | - (IXGB_MSCA_ADDR_CYCLE | IXGB_MSCA_MDI_COMMAND)); - - IXGB_WRITE_REG(hw, MSCA, command); - - /************************************************************** - ** Check every 10 usec to see if the address cycle completed - ** The COMMAND bit will clear when the operation is complete. - ** This may take as long as 64 usecs (we'll wait 100 usecs max) - ** from the CPU Write to the Ready bit assertion. - **************************************************************/ - - for (i = 0; i < 10; i++) - { - udelay(10); - - command = IXGB_READ_REG(hw, MSCA); - - if ((command & IXGB_MSCA_MDI_COMMAND) == 0) - break; - } - - ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0); - - /* Address cycle complete, setup and write the write command */ - command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) | - (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) | - (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) | - (IXGB_MSCA_WRITE | IXGB_MSCA_MDI_COMMAND)); - - IXGB_WRITE_REG(hw, MSCA, command); - - /************************************************************** - ** Check every 10 usec to see if the read command completed - ** The COMMAND bit will clear when the operation is complete. - ** The write may take as long as 64 usecs (we'll wait 100 usecs max) - ** from the CPU Write to the Ready bit assertion. - **************************************************************/ - - for (i = 0; i < 10; i++) - { - udelay(10); - - command = IXGB_READ_REG(hw, MSCA); - - if ((command & IXGB_MSCA_MDI_COMMAND) == 0) - break; - } - - ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0); - - /* Operation is complete, return. */ -} - -/****************************************************************************** - * Checks to see if the link status of the hardware has changed. - * - * hw - Struct containing variables accessed by hw code - * - * Called by any function that needs to check the link status of the adapter. - *****************************************************************************/ -void -ixgb_check_for_link(struct ixgb_hw *hw) -{ - u32 status_reg; - u32 xpcss_reg; - - ENTER(); - - xpcss_reg = IXGB_READ_REG(hw, XPCSS); - status_reg = IXGB_READ_REG(hw, STATUS); - - if ((xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) && - (status_reg & IXGB_STATUS_LU)) { - hw->link_up = true; - } else if (!(xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) && - (status_reg & IXGB_STATUS_LU)) { - pr_debug("XPCSS Not Aligned while Status:LU is set\n"); - hw->link_up = ixgb_link_reset(hw); - } else { - /* - * 82597EX errata. Since the lane deskew problem may prevent - * link, reset the link before reporting link down. - */ - hw->link_up = ixgb_link_reset(hw); - } - /* Anything else for 10 Gig?? */ -} - -/****************************************************************************** - * Check for a bad link condition that may have occurred. - * The indication is that the RFC / LFC registers may be incrementing - * continually. A full adapter reset is required to recover. - * - * hw - Struct containing variables accessed by hw code - * - * Called by any function that needs to check the link status of the adapter. - *****************************************************************************/ -bool ixgb_check_for_bad_link(struct ixgb_hw *hw) -{ - u32 newLFC, newRFC; - bool bad_link_returncode = false; - - if (hw->phy_type == ixgb_phy_type_txn17401) { - newLFC = IXGB_READ_REG(hw, LFC); - newRFC = IXGB_READ_REG(hw, RFC); - if ((hw->lastLFC + 250 < newLFC) - || (hw->lastRFC + 250 < newRFC)) { - pr_debug("BAD LINK! too many LFC/RFC since last check\n"); - bad_link_returncode = true; - } - hw->lastLFC = newLFC; - hw->lastRFC = newRFC; - } - - return bad_link_returncode; -} - -/****************************************************************************** - * Clears all hardware statistics counters. - * - * hw - Struct containing variables accessed by shared code - *****************************************************************************/ -static void -ixgb_clear_hw_cntrs(struct ixgb_hw *hw) -{ - volatile u32 temp_reg; - - ENTER(); - - /* if we are stopped or resetting exit gracefully */ - if (hw->adapter_stopped) { - pr_debug("Exiting because the adapter is stopped!!!\n"); - return; - } - - temp_reg = IXGB_READ_REG(hw, TPRL); - temp_reg = IXGB_READ_REG(hw, TPRH); - temp_reg = IXGB_READ_REG(hw, GPRCL); - temp_reg = IXGB_READ_REG(hw, GPRCH); - temp_reg = IXGB_READ_REG(hw, BPRCL); - temp_reg = IXGB_READ_REG(hw, BPRCH); - temp_reg = IXGB_READ_REG(hw, MPRCL); - temp_reg = IXGB_READ_REG(hw, MPRCH); - temp_reg = IXGB_READ_REG(hw, UPRCL); - temp_reg = IXGB_READ_REG(hw, UPRCH); - temp_reg = IXGB_READ_REG(hw, VPRCL); - temp_reg = IXGB_READ_REG(hw, VPRCH); - temp_reg = IXGB_READ_REG(hw, JPRCL); - temp_reg = IXGB_READ_REG(hw, JPRCH); - temp_reg = IXGB_READ_REG(hw, GORCL); - temp_reg = IXGB_READ_REG(hw, GORCH); - temp_reg = IXGB_READ_REG(hw, TORL); - temp_reg = IXGB_READ_REG(hw, TORH); - temp_reg = IXGB_READ_REG(hw, RNBC); - temp_reg = IXGB_READ_REG(hw, RUC); - temp_reg = IXGB_READ_REG(hw, ROC); - temp_reg = IXGB_READ_REG(hw, RLEC); - temp_reg = IXGB_READ_REG(hw, CRCERRS); - temp_reg = IXGB_READ_REG(hw, ICBC); - temp_reg = IXGB_READ_REG(hw, ECBC); - temp_reg = IXGB_READ_REG(hw, MPC); - temp_reg = IXGB_READ_REG(hw, TPTL); - temp_reg = IXGB_READ_REG(hw, TPTH); - temp_reg = IXGB_READ_REG(hw, GPTCL); - temp_reg = IXGB_READ_REG(hw, GPTCH); - temp_reg = IXGB_READ_REG(hw, BPTCL); - temp_reg = IXGB_READ_REG(hw, BPTCH); - temp_reg = IXGB_READ_REG(hw, MPTCL); - temp_reg = IXGB_READ_REG(hw, MPTCH); - temp_reg = IXGB_READ_REG(hw, UPTCL); - temp_reg = IXGB_READ_REG(hw, UPTCH); - temp_reg = IXGB_READ_REG(hw, VPTCL); - temp_reg = IXGB_READ_REG(hw, VPTCH); - temp_reg = IXGB_READ_REG(hw, JPTCL); - temp_reg = IXGB_READ_REG(hw, JPTCH); - temp_reg = IXGB_READ_REG(hw, GOTCL); - temp_reg = IXGB_READ_REG(hw, GOTCH); - temp_reg = IXGB_READ_REG(hw, TOTL); - temp_reg = IXGB_READ_REG(hw, TOTH); - temp_reg = IXGB_READ_REG(hw, DC); - temp_reg = IXGB_READ_REG(hw, PLT64C); - temp_reg = IXGB_READ_REG(hw, TSCTC); - temp_reg = IXGB_READ_REG(hw, TSCTFC); - temp_reg = IXGB_READ_REG(hw, IBIC); - temp_reg = IXGB_READ_REG(hw, RFC); - temp_reg = IXGB_READ_REG(hw, LFC); - temp_reg = IXGB_READ_REG(hw, PFRC); - temp_reg = IXGB_READ_REG(hw, PFTC); - temp_reg = IXGB_READ_REG(hw, MCFRC); - temp_reg = IXGB_READ_REG(hw, MCFTC); - temp_reg = IXGB_READ_REG(hw, XONRXC); - temp_reg = IXGB_READ_REG(hw, XONTXC); - temp_reg = IXGB_READ_REG(hw, XOFFRXC); - temp_reg = IXGB_READ_REG(hw, XOFFTXC); - temp_reg = IXGB_READ_REG(hw, RJC); -} - -/****************************************************************************** - * Turns on the software controllable LED - * - * hw - Struct containing variables accessed by shared code - *****************************************************************************/ -void -ixgb_led_on(struct ixgb_hw *hw) -{ - u32 ctrl0_reg = IXGB_READ_REG(hw, CTRL0); - - /* To turn on the LED, clear software-definable pin 0 (SDP0). */ - ctrl0_reg &= ~IXGB_CTRL0_SDP0; - IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg); -} - -/****************************************************************************** - * Turns off the software controllable LED - * - * hw - Struct containing variables accessed by shared code - *****************************************************************************/ -void -ixgb_led_off(struct ixgb_hw *hw) -{ - u32 ctrl0_reg = IXGB_READ_REG(hw, CTRL0); - - /* To turn off the LED, set software-definable pin 0 (SDP0). */ - ctrl0_reg |= IXGB_CTRL0_SDP0; - IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg); -} - -/****************************************************************************** - * Gets the current PCI bus type, speed, and width of the hardware - * - * hw - Struct containing variables accessed by shared code - *****************************************************************************/ -static void -ixgb_get_bus_info(struct ixgb_hw *hw) -{ - u32 status_reg; - - status_reg = IXGB_READ_REG(hw, STATUS); - - hw->bus.type = (status_reg & IXGB_STATUS_PCIX_MODE) ? - ixgb_bus_type_pcix : ixgb_bus_type_pci; - - if (hw->bus.type == ixgb_bus_type_pci) { - hw->bus.speed = (status_reg & IXGB_STATUS_PCI_SPD) ? - ixgb_bus_speed_66 : ixgb_bus_speed_33; - } else { - switch (status_reg & IXGB_STATUS_PCIX_SPD_MASK) { - case IXGB_STATUS_PCIX_SPD_66: - hw->bus.speed = ixgb_bus_speed_66; - break; - case IXGB_STATUS_PCIX_SPD_100: - hw->bus.speed = ixgb_bus_speed_100; - break; - case IXGB_STATUS_PCIX_SPD_133: - hw->bus.speed = ixgb_bus_speed_133; - break; - default: - hw->bus.speed = ixgb_bus_speed_reserved; - break; - } - } - - hw->bus.width = (status_reg & IXGB_STATUS_BUS64) ? - ixgb_bus_width_64 : ixgb_bus_width_32; -} - -/****************************************************************************** - * Tests a MAC address to ensure it is a valid Individual Address - * - * mac_addr - pointer to MAC address. - * - *****************************************************************************/ -static bool -mac_addr_valid(u8 *mac_addr) -{ - bool is_valid = true; - ENTER(); - - /* Make sure it is not a multicast address */ - if (is_multicast_ether_addr(mac_addr)) { - pr_debug("MAC address is multicast\n"); - is_valid = false; - } - /* Not a broadcast address */ - else if (is_broadcast_ether_addr(mac_addr)) { - pr_debug("MAC address is broadcast\n"); - is_valid = false; - } - /* Reject the zero address */ - else if (is_zero_ether_addr(mac_addr)) { - pr_debug("MAC address is all zeros\n"); - is_valid = false; - } - return is_valid; -} - -/****************************************************************************** - * Resets the 10GbE link. Waits the settle time and returns the state of - * the link. - * - * hw - Struct containing variables accessed by shared code - *****************************************************************************/ -static bool -ixgb_link_reset(struct ixgb_hw *hw) -{ - bool link_status = false; - u8 wait_retries = MAX_RESET_ITERATIONS; - u8 lrst_retries = MAX_RESET_ITERATIONS; - - do { - /* Reset the link */ - IXGB_WRITE_REG(hw, CTRL0, - IXGB_READ_REG(hw, CTRL0) | IXGB_CTRL0_LRST); - - /* Wait for link-up and lane re-alignment */ - do { - udelay(IXGB_DELAY_USECS_AFTER_LINK_RESET); - link_status = - ((IXGB_READ_REG(hw, STATUS) & IXGB_STATUS_LU) - && (IXGB_READ_REG(hw, XPCSS) & - IXGB_XPCSS_ALIGN_STATUS)) ? true : false; - } while (!link_status && --wait_retries); - - } while (!link_status && --lrst_retries); - - return link_status; -} - -/****************************************************************************** - * Resets the 10GbE optics module. - * - * hw - Struct containing variables accessed by shared code - *****************************************************************************/ -static void -ixgb_optics_reset(struct ixgb_hw *hw) -{ - if (hw->phy_type == ixgb_phy_type_txn17401) { - u16 mdio_reg; - - ixgb_write_phy_reg(hw, - MDIO_CTRL1, - IXGB_PHY_ADDRESS, - MDIO_MMD_PMAPMD, - MDIO_CTRL1_RESET); - - mdio_reg = ixgb_read_phy_reg(hw, - MDIO_CTRL1, - IXGB_PHY_ADDRESS, - MDIO_MMD_PMAPMD); - } -} - -/****************************************************************************** - * Resets the 10GbE optics module for Sun variant NIC. - * - * hw - Struct containing variables accessed by shared code - *****************************************************************************/ - -#define IXGB_BCM8704_USER_PMD_TX_CTRL_REG 0xC803 -#define IXGB_BCM8704_USER_PMD_TX_CTRL_REG_VAL 0x0164 -#define IXGB_BCM8704_USER_CTRL_REG 0xC800 -#define IXGB_BCM8704_USER_CTRL_REG_VAL 0x7FBF -#define IXGB_BCM8704_USER_DEV3_ADDR 0x0003 -#define IXGB_SUN_PHY_ADDRESS 0x0000 -#define IXGB_SUN_PHY_RESET_DELAY 305 - -static void -ixgb_optics_reset_bcm(struct ixgb_hw *hw) -{ - u32 ctrl = IXGB_READ_REG(hw, CTRL0); - ctrl &= ~IXGB_CTRL0_SDP2; - ctrl |= IXGB_CTRL0_SDP3; - IXGB_WRITE_REG(hw, CTRL0, ctrl); - IXGB_WRITE_FLUSH(hw); - - /* SerDes needs extra delay */ - msleep(IXGB_SUN_PHY_RESET_DELAY); - - /* Broadcom 7408L configuration */ - /* Reference clock config */ - ixgb_write_phy_reg(hw, - IXGB_BCM8704_USER_PMD_TX_CTRL_REG, - IXGB_SUN_PHY_ADDRESS, - IXGB_BCM8704_USER_DEV3_ADDR, - IXGB_BCM8704_USER_PMD_TX_CTRL_REG_VAL); - /* we must read the registers twice */ - ixgb_read_phy_reg(hw, - IXGB_BCM8704_USER_PMD_TX_CTRL_REG, - IXGB_SUN_PHY_ADDRESS, - IXGB_BCM8704_USER_DEV3_ADDR); - ixgb_read_phy_reg(hw, - IXGB_BCM8704_USER_PMD_TX_CTRL_REG, - IXGB_SUN_PHY_ADDRESS, - IXGB_BCM8704_USER_DEV3_ADDR); - - ixgb_write_phy_reg(hw, - IXGB_BCM8704_USER_CTRL_REG, - IXGB_SUN_PHY_ADDRESS, - IXGB_BCM8704_USER_DEV3_ADDR, - IXGB_BCM8704_USER_CTRL_REG_VAL); - ixgb_read_phy_reg(hw, - IXGB_BCM8704_USER_CTRL_REG, - IXGB_SUN_PHY_ADDRESS, - IXGB_BCM8704_USER_DEV3_ADDR); - ixgb_read_phy_reg(hw, - IXGB_BCM8704_USER_CTRL_REG, - IXGB_SUN_PHY_ADDRESS, - IXGB_BCM8704_USER_DEV3_ADDR); - - /* SerDes needs extra delay */ - msleep(IXGB_SUN_PHY_RESET_DELAY); -} diff --git a/drivers/net/ixgb/ixgb_hw.h b/drivers/net/ixgb/ixgb_hw.h deleted file mode 100644 index 873d32b..0000000 --- a/drivers/net/ixgb/ixgb_hw.h +++ /dev/null @@ -1,801 +0,0 @@ -/******************************************************************************* - - Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGB_HW_H_ -#define _IXGB_HW_H_ - -#include - -#include "ixgb_osdep.h" - -/* Enums */ -typedef enum { - ixgb_mac_unknown = 0, - ixgb_82597, - ixgb_num_macs -} ixgb_mac_type; - -/* Types of physical layer modules */ -typedef enum { - ixgb_phy_type_unknown = 0, - ixgb_phy_type_g6005, /* 850nm, MM fiber, XPAK transceiver */ - ixgb_phy_type_g6104, /* 1310nm, SM fiber, XPAK transceiver */ - ixgb_phy_type_txn17201, /* 850nm, MM fiber, XPAK transceiver */ - ixgb_phy_type_txn17401, /* 1310nm, SM fiber, XENPAK transceiver */ - ixgb_phy_type_bcm /* SUN specific board */ -} ixgb_phy_type; - -/* XPAK transceiver vendors, for the SR adapters */ -typedef enum { - ixgb_xpak_vendor_intel, - ixgb_xpak_vendor_infineon -} ixgb_xpak_vendor; - -/* Media Types */ -typedef enum { - ixgb_media_type_unknown = 0, - ixgb_media_type_fiber = 1, - ixgb_media_type_copper = 2, - ixgb_num_media_types -} ixgb_media_type; - -/* Flow Control Settings */ -typedef enum { - ixgb_fc_none = 0, - ixgb_fc_rx_pause = 1, - ixgb_fc_tx_pause = 2, - ixgb_fc_full = 3, - ixgb_fc_default = 0xFF -} ixgb_fc_type; - -/* PCI bus types */ -typedef enum { - ixgb_bus_type_unknown = 0, - ixgb_bus_type_pci, - ixgb_bus_type_pcix -} ixgb_bus_type; - -/* PCI bus speeds */ -typedef enum { - ixgb_bus_speed_unknown = 0, - ixgb_bus_speed_33, - ixgb_bus_speed_66, - ixgb_bus_speed_100, - ixgb_bus_speed_133, - ixgb_bus_speed_reserved -} ixgb_bus_speed; - -/* PCI bus widths */ -typedef enum { - ixgb_bus_width_unknown = 0, - ixgb_bus_width_32, - ixgb_bus_width_64 -} ixgb_bus_width; - -#define IXGB_ETH_LENGTH_OF_ADDRESS 6 - -#define IXGB_EEPROM_SIZE 64 /* Size in words */ - -#define SPEED_10000 10000 -#define FULL_DUPLEX 2 - -#define MIN_NUMBER_OF_DESCRIPTORS 8 -#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 /* 13 bits in RDLEN/TDLEN, 128B aligned */ - -#define IXGB_DELAY_BEFORE_RESET 10 /* allow 10ms after idling rx/tx units */ -#define IXGB_DELAY_AFTER_RESET 1 /* allow 1ms after the reset */ -#define IXGB_DELAY_AFTER_EE_RESET 10 /* allow 10ms after the EEPROM reset */ - -#define IXGB_DELAY_USECS_AFTER_LINK_RESET 13 /* allow 13 microseconds after the reset */ - /* NOTE: this is MICROSECONDS */ -#define MAX_RESET_ITERATIONS 8 /* number of iterations to get things right */ - -/* General Registers */ -#define IXGB_CTRL0 0x00000 /* Device Control Register 0 - RW */ -#define IXGB_CTRL1 0x00008 /* Device Control Register 1 - RW */ -#define IXGB_STATUS 0x00010 /* Device Status Register - RO */ -#define IXGB_EECD 0x00018 /* EEPROM/Flash Control/Data Register - RW */ -#define IXGB_MFS 0x00020 /* Maximum Frame Size - RW */ - -/* Interrupt */ -#define IXGB_ICR 0x00080 /* Interrupt Cause Read - R/clr */ -#define IXGB_ICS 0x00088 /* Interrupt Cause Set - RW */ -#define IXGB_IMS 0x00090 /* Interrupt Mask Set/Read - RW */ -#define IXGB_IMC 0x00098 /* Interrupt Mask Clear - WO */ - -/* Receive */ -#define IXGB_RCTL 0x00100 /* RX Control - RW */ -#define IXGB_FCRTL 0x00108 /* Flow Control Receive Threshold Low - RW */ -#define IXGB_FCRTH 0x00110 /* Flow Control Receive Threshold High - RW */ -#define IXGB_RDBAL 0x00118 /* RX Descriptor Base Low - RW */ -#define IXGB_RDBAH 0x0011C /* RX Descriptor Base High - RW */ -#define IXGB_RDLEN 0x00120 /* RX Descriptor Length - RW */ -#define IXGB_RDH 0x00128 /* RX Descriptor Head - RW */ -#define IXGB_RDT 0x00130 /* RX Descriptor Tail - RW */ -#define IXGB_RDTR 0x00138 /* RX Delay Timer Ring - RW */ -#define IXGB_RXDCTL 0x00140 /* Receive Descriptor Control - RW */ -#define IXGB_RAIDC 0x00148 /* Receive Adaptive Interrupt Delay Control - RW */ -#define IXGB_RXCSUM 0x00158 /* Receive Checksum Control - RW */ -#define IXGB_RA 0x00180 /* Receive Address Array Base - RW */ -#define IXGB_RAL 0x00180 /* Receive Address Low [0:15] - RW */ -#define IXGB_RAH 0x00184 /* Receive Address High [0:15] - RW */ -#define IXGB_MTA 0x00200 /* Multicast Table Array [0:127] - RW */ -#define IXGB_VFTA 0x00400 /* VLAN Filter Table Array [0:127] - RW */ -#define IXGB_REQ_RX_DESCRIPTOR_MULTIPLE 8 - -/* Transmit */ -#define IXGB_TCTL 0x00600 /* TX Control - RW */ -#define IXGB_TDBAL 0x00608 /* TX Descriptor Base Low - RW */ -#define IXGB_TDBAH 0x0060C /* TX Descriptor Base High - RW */ -#define IXGB_TDLEN 0x00610 /* TX Descriptor Length - RW */ -#define IXGB_TDH 0x00618 /* TX Descriptor Head - RW */ -#define IXGB_TDT 0x00620 /* TX Descriptor Tail - RW */ -#define IXGB_TIDV 0x00628 /* TX Interrupt Delay Value - RW */ -#define IXGB_TXDCTL 0x00630 /* Transmit Descriptor Control - RW */ -#define IXGB_TSPMT 0x00638 /* TCP Segmentation PAD & Min Threshold - RW */ -#define IXGB_PAP 0x00640 /* Pause and Pace - RW */ -#define IXGB_REQ_TX_DESCRIPTOR_MULTIPLE 8 - -/* Physical */ -#define IXGB_PCSC1 0x00700 /* PCS Control 1 - RW */ -#define IXGB_PCSC2 0x00708 /* PCS Control 2 - RW */ -#define IXGB_PCSS1 0x00710 /* PCS Status 1 - RO */ -#define IXGB_PCSS2 0x00718 /* PCS Status 2 - RO */ -#define IXGB_XPCSS 0x00720 /* 10GBASE-X PCS Status (or XGXS Lane Status) - RO */ -#define IXGB_UCCR 0x00728 /* Unilink Circuit Control Register */ -#define IXGB_XPCSTC 0x00730 /* 10GBASE-X PCS Test Control */ -#define IXGB_MACA 0x00738 /* MDI Autoscan Command and Address - RW */ -#define IXGB_APAE 0x00740 /* Autoscan PHY Address Enable - RW */ -#define IXGB_ARD 0x00748 /* Autoscan Read Data - RO */ -#define IXGB_AIS 0x00750 /* Autoscan Interrupt Status - RO */ -#define IXGB_MSCA 0x00758 /* MDI Single Command and Address - RW */ -#define IXGB_MSRWD 0x00760 /* MDI Single Read and Write Data - RW, RO */ - -/* Wake-up */ -#define IXGB_WUFC 0x00808 /* Wake Up Filter Control - RW */ -#define IXGB_WUS 0x00810 /* Wake Up Status - RO */ -#define IXGB_FFLT 0x01000 /* Flexible Filter Length Table - RW */ -#define IXGB_FFMT 0x01020 /* Flexible Filter Mask Table - RW */ -#define IXGB_FTVT 0x01420 /* Flexible Filter Value Table - RW */ - -/* Statistics */ -#define IXGB_TPRL 0x02000 /* Total Packets Received (Low) */ -#define IXGB_TPRH 0x02004 /* Total Packets Received (High) */ -#define IXGB_GPRCL 0x02008 /* Good Packets Received Count (Low) */ -#define IXGB_GPRCH 0x0200C /* Good Packets Received Count (High) */ -#define IXGB_BPRCL 0x02010 /* Broadcast Packets Received Count (Low) */ -#define IXGB_BPRCH 0x02014 /* Broadcast Packets Received Count (High) */ -#define IXGB_MPRCL 0x02018 /* Multicast Packets Received Count (Low) */ -#define IXGB_MPRCH 0x0201C /* Multicast Packets Received Count (High) */ -#define IXGB_UPRCL 0x02020 /* Unicast Packets Received Count (Low) */ -#define IXGB_UPRCH 0x02024 /* Unicast Packets Received Count (High) */ -#define IXGB_VPRCL 0x02028 /* VLAN Packets Received Count (Low) */ -#define IXGB_VPRCH 0x0202C /* VLAN Packets Received Count (High) */ -#define IXGB_JPRCL 0x02030 /* Jumbo Packets Received Count (Low) */ -#define IXGB_JPRCH 0x02034 /* Jumbo Packets Received Count (High) */ -#define IXGB_GORCL 0x02038 /* Good Octets Received Count (Low) */ -#define IXGB_GORCH 0x0203C /* Good Octets Received Count (High) */ -#define IXGB_TORL 0x02040 /* Total Octets Received (Low) */ -#define IXGB_TORH 0x02044 /* Total Octets Received (High) */ -#define IXGB_RNBC 0x02048 /* Receive No Buffers Count */ -#define IXGB_RUC 0x02050 /* Receive Undersize Count */ -#define IXGB_ROC 0x02058 /* Receive Oversize Count */ -#define IXGB_RLEC 0x02060 /* Receive Length Error Count */ -#define IXGB_CRCERRS 0x02068 /* CRC Error Count */ -#define IXGB_ICBC 0x02070 /* Illegal control byte in mid-packet Count */ -#define IXGB_ECBC 0x02078 /* Error Control byte in mid-packet Count */ -#define IXGB_MPC 0x02080 /* Missed Packets Count */ -#define IXGB_TPTL 0x02100 /* Total Packets Transmitted (Low) */ -#define IXGB_TPTH 0x02104 /* Total Packets Transmitted (High) */ -#define IXGB_GPTCL 0x02108 /* Good Packets Transmitted Count (Low) */ -#define IXGB_GPTCH 0x0210C /* Good Packets Transmitted Count (High) */ -#define IXGB_BPTCL 0x02110 /* Broadcast Packets Transmitted Count (Low) */ -#define IXGB_BPTCH 0x02114 /* Broadcast Packets Transmitted Count (High) */ -#define IXGB_MPTCL 0x02118 /* Multicast Packets Transmitted Count (Low) */ -#define IXGB_MPTCH 0x0211C /* Multicast Packets Transmitted Count (High) */ -#define IXGB_UPTCL 0x02120 /* Unicast Packets Transmitted Count (Low) */ -#define IXGB_UPTCH 0x02124 /* Unicast Packets Transmitted Count (High) */ -#define IXGB_VPTCL 0x02128 /* VLAN Packets Transmitted Count (Low) */ -#define IXGB_VPTCH 0x0212C /* VLAN Packets Transmitted Count (High) */ -#define IXGB_JPTCL 0x02130 /* Jumbo Packets Transmitted Count (Low) */ -#define IXGB_JPTCH 0x02134 /* Jumbo Packets Transmitted Count (High) */ -#define IXGB_GOTCL 0x02138 /* Good Octets Transmitted Count (Low) */ -#define IXGB_GOTCH 0x0213C /* Good Octets Transmitted Count (High) */ -#define IXGB_TOTL 0x02140 /* Total Octets Transmitted Count (Low) */ -#define IXGB_TOTH 0x02144 /* Total Octets Transmitted Count (High) */ -#define IXGB_DC 0x02148 /* Defer Count */ -#define IXGB_PLT64C 0x02150 /* Packet Transmitted was less than 64 bytes Count */ -#define IXGB_TSCTC 0x02170 /* TCP Segmentation Context Transmitted Count */ -#define IXGB_TSCTFC 0x02178 /* TCP Segmentation Context Tx Fail Count */ -#define IXGB_IBIC 0x02180 /* Illegal byte during Idle stream count */ -#define IXGB_RFC 0x02188 /* Remote Fault Count */ -#define IXGB_LFC 0x02190 /* Local Fault Count */ -#define IXGB_PFRC 0x02198 /* Pause Frame Receive Count */ -#define IXGB_PFTC 0x021A0 /* Pause Frame Transmit Count */ -#define IXGB_MCFRC 0x021A8 /* MAC Control Frames (non-Pause) Received Count */ -#define IXGB_MCFTC 0x021B0 /* MAC Control Frames (non-Pause) Transmitted Count */ -#define IXGB_XONRXC 0x021B8 /* XON Received Count */ -#define IXGB_XONTXC 0x021C0 /* XON Transmitted Count */ -#define IXGB_XOFFRXC 0x021C8 /* XOFF Received Count */ -#define IXGB_XOFFTXC 0x021D0 /* XOFF Transmitted Count */ -#define IXGB_RJC 0x021D8 /* Receive Jabber Count */ - -/* CTRL0 Bit Masks */ -#define IXGB_CTRL0_LRST 0x00000008 -#define IXGB_CTRL0_JFE 0x00000010 -#define IXGB_CTRL0_XLE 0x00000020 -#define IXGB_CTRL0_MDCS 0x00000040 -#define IXGB_CTRL0_CMDC 0x00000080 -#define IXGB_CTRL0_SDP0 0x00040000 -#define IXGB_CTRL0_SDP1 0x00080000 -#define IXGB_CTRL0_SDP2 0x00100000 -#define IXGB_CTRL0_SDP3 0x00200000 -#define IXGB_CTRL0_SDP0_DIR 0x00400000 -#define IXGB_CTRL0_SDP1_DIR 0x00800000 -#define IXGB_CTRL0_SDP2_DIR 0x01000000 -#define IXGB_CTRL0_SDP3_DIR 0x02000000 -#define IXGB_CTRL0_RST 0x04000000 -#define IXGB_CTRL0_RPE 0x08000000 -#define IXGB_CTRL0_TPE 0x10000000 -#define IXGB_CTRL0_VME 0x40000000 - -/* CTRL1 Bit Masks */ -#define IXGB_CTRL1_GPI0_EN 0x00000001 -#define IXGB_CTRL1_GPI1_EN 0x00000002 -#define IXGB_CTRL1_GPI2_EN 0x00000004 -#define IXGB_CTRL1_GPI3_EN 0x00000008 -#define IXGB_CTRL1_SDP4 0x00000010 -#define IXGB_CTRL1_SDP5 0x00000020 -#define IXGB_CTRL1_SDP6 0x00000040 -#define IXGB_CTRL1_SDP7 0x00000080 -#define IXGB_CTRL1_SDP4_DIR 0x00000100 -#define IXGB_CTRL1_SDP5_DIR 0x00000200 -#define IXGB_CTRL1_SDP6_DIR 0x00000400 -#define IXGB_CTRL1_SDP7_DIR 0x00000800 -#define IXGB_CTRL1_EE_RST 0x00002000 -#define IXGB_CTRL1_RO_DIS 0x00020000 -#define IXGB_CTRL1_PCIXHM_MASK 0x00C00000 -#define IXGB_CTRL1_PCIXHM_1_2 0x00000000 -#define IXGB_CTRL1_PCIXHM_5_8 0x00400000 -#define IXGB_CTRL1_PCIXHM_3_4 0x00800000 -#define IXGB_CTRL1_PCIXHM_7_8 0x00C00000 - -/* STATUS Bit Masks */ -#define IXGB_STATUS_LU 0x00000002 -#define IXGB_STATUS_AIP 0x00000004 -#define IXGB_STATUS_TXOFF 0x00000010 -#define IXGB_STATUS_XAUIME 0x00000020 -#define IXGB_STATUS_RES 0x00000040 -#define IXGB_STATUS_RIS 0x00000080 -#define IXGB_STATUS_RIE 0x00000100 -#define IXGB_STATUS_RLF 0x00000200 -#define IXGB_STATUS_RRF 0x00000400 -#define IXGB_STATUS_PCI_SPD 0x00000800 -#define IXGB_STATUS_BUS64 0x00001000 -#define IXGB_STATUS_PCIX_MODE 0x00002000 -#define IXGB_STATUS_PCIX_SPD_MASK 0x0000C000 -#define IXGB_STATUS_PCIX_SPD_66 0x00000000 -#define IXGB_STATUS_PCIX_SPD_100 0x00004000 -#define IXGB_STATUS_PCIX_SPD_133 0x00008000 -#define IXGB_STATUS_REV_ID_MASK 0x000F0000 -#define IXGB_STATUS_REV_ID_SHIFT 16 - -/* EECD Bit Masks */ -#define IXGB_EECD_SK 0x00000001 -#define IXGB_EECD_CS 0x00000002 -#define IXGB_EECD_DI 0x00000004 -#define IXGB_EECD_DO 0x00000008 -#define IXGB_EECD_FWE_MASK 0x00000030 -#define IXGB_EECD_FWE_DIS 0x00000010 -#define IXGB_EECD_FWE_EN 0x00000020 - -/* MFS */ -#define IXGB_MFS_SHIFT 16 - -/* Interrupt Register Bit Masks (used for ICR, ICS, IMS, and IMC) */ -#define IXGB_INT_TXDW 0x00000001 -#define IXGB_INT_TXQE 0x00000002 -#define IXGB_INT_LSC 0x00000004 -#define IXGB_INT_RXSEQ 0x00000008 -#define IXGB_INT_RXDMT0 0x00000010 -#define IXGB_INT_RXO 0x00000040 -#define IXGB_INT_RXT0 0x00000080 -#define IXGB_INT_AUTOSCAN 0x00000200 -#define IXGB_INT_GPI0 0x00000800 -#define IXGB_INT_GPI1 0x00001000 -#define IXGB_INT_GPI2 0x00002000 -#define IXGB_INT_GPI3 0x00004000 - -/* RCTL Bit Masks */ -#define IXGB_RCTL_RXEN 0x00000002 -#define IXGB_RCTL_SBP 0x00000004 -#define IXGB_RCTL_UPE 0x00000008 -#define IXGB_RCTL_MPE 0x00000010 -#define IXGB_RCTL_RDMTS_MASK 0x00000300 -#define IXGB_RCTL_RDMTS_1_2 0x00000000 -#define IXGB_RCTL_RDMTS_1_4 0x00000100 -#define IXGB_RCTL_RDMTS_1_8 0x00000200 -#define IXGB_RCTL_MO_MASK 0x00003000 -#define IXGB_RCTL_MO_47_36 0x00000000 -#define IXGB_RCTL_MO_46_35 0x00001000 -#define IXGB_RCTL_MO_45_34 0x00002000 -#define IXGB_RCTL_MO_43_32 0x00003000 -#define IXGB_RCTL_MO_SHIFT 12 -#define IXGB_RCTL_BAM 0x00008000 -#define IXGB_RCTL_BSIZE_MASK 0x00030000 -#define IXGB_RCTL_BSIZE_2048 0x00000000 -#define IXGB_RCTL_BSIZE_4096 0x00010000 -#define IXGB_RCTL_BSIZE_8192 0x00020000 -#define IXGB_RCTL_BSIZE_16384 0x00030000 -#define IXGB_RCTL_VFE 0x00040000 -#define IXGB_RCTL_CFIEN 0x00080000 -#define IXGB_RCTL_CFI 0x00100000 -#define IXGB_RCTL_RPDA_MASK 0x00600000 -#define IXGB_RCTL_RPDA_MC_MAC 0x00000000 -#define IXGB_RCTL_MC_ONLY 0x00400000 -#define IXGB_RCTL_CFF 0x00800000 -#define IXGB_RCTL_SECRC 0x04000000 -#define IXGB_RDT_FPDB 0x80000000 - -#define IXGB_RCTL_IDLE_RX_UNIT 0 - -/* FCRTL Bit Masks */ -#define IXGB_FCRTL_XONE 0x80000000 - -/* RXDCTL Bit Masks */ -#define IXGB_RXDCTL_PTHRESH_MASK 0x000001FF -#define IXGB_RXDCTL_PTHRESH_SHIFT 0 -#define IXGB_RXDCTL_HTHRESH_MASK 0x0003FE00 -#define IXGB_RXDCTL_HTHRESH_SHIFT 9 -#define IXGB_RXDCTL_WTHRESH_MASK 0x07FC0000 -#define IXGB_RXDCTL_WTHRESH_SHIFT 18 - -/* RAIDC Bit Masks */ -#define IXGB_RAIDC_HIGHTHRS_MASK 0x0000003F -#define IXGB_RAIDC_DELAY_MASK 0x000FF800 -#define IXGB_RAIDC_DELAY_SHIFT 11 -#define IXGB_RAIDC_POLL_MASK 0x1FF00000 -#define IXGB_RAIDC_POLL_SHIFT 20 -#define IXGB_RAIDC_RXT_GATE 0x40000000 -#define IXGB_RAIDC_EN 0x80000000 - -#define IXGB_RAIDC_POLL_1000_INTERRUPTS_PER_SECOND 1220 -#define IXGB_RAIDC_POLL_5000_INTERRUPTS_PER_SECOND 244 -#define IXGB_RAIDC_POLL_10000_INTERRUPTS_PER_SECOND 122 -#define IXGB_RAIDC_POLL_20000_INTERRUPTS_PER_SECOND 61 - -/* RXCSUM Bit Masks */ -#define IXGB_RXCSUM_IPOFL 0x00000100 -#define IXGB_RXCSUM_TUOFL 0x00000200 - -/* RAH Bit Masks */ -#define IXGB_RAH_ASEL_MASK 0x00030000 -#define IXGB_RAH_ASEL_DEST 0x00000000 -#define IXGB_RAH_ASEL_SRC 0x00010000 -#define IXGB_RAH_AV 0x80000000 - -/* TCTL Bit Masks */ -#define IXGB_TCTL_TCE 0x00000001 -#define IXGB_TCTL_TXEN 0x00000002 -#define IXGB_TCTL_TPDE 0x00000004 - -#define IXGB_TCTL_IDLE_TX_UNIT 0 - -/* TXDCTL Bit Masks */ -#define IXGB_TXDCTL_PTHRESH_MASK 0x0000007F -#define IXGB_TXDCTL_HTHRESH_MASK 0x00007F00 -#define IXGB_TXDCTL_HTHRESH_SHIFT 8 -#define IXGB_TXDCTL_WTHRESH_MASK 0x007F0000 -#define IXGB_TXDCTL_WTHRESH_SHIFT 16 - -/* TSPMT Bit Masks */ -#define IXGB_TSPMT_TSMT_MASK 0x0000FFFF -#define IXGB_TSPMT_TSPBP_MASK 0xFFFF0000 -#define IXGB_TSPMT_TSPBP_SHIFT 16 - -/* PAP Bit Masks */ -#define IXGB_PAP_TXPC_MASK 0x0000FFFF -#define IXGB_PAP_TXPV_MASK 0x000F0000 -#define IXGB_PAP_TXPV_10G 0x00000000 -#define IXGB_PAP_TXPV_1G 0x00010000 -#define IXGB_PAP_TXPV_2G 0x00020000 -#define IXGB_PAP_TXPV_3G 0x00030000 -#define IXGB_PAP_TXPV_4G 0x00040000 -#define IXGB_PAP_TXPV_5G 0x00050000 -#define IXGB_PAP_TXPV_6G 0x00060000 -#define IXGB_PAP_TXPV_7G 0x00070000 -#define IXGB_PAP_TXPV_8G 0x00080000 -#define IXGB_PAP_TXPV_9G 0x00090000 -#define IXGB_PAP_TXPV_WAN 0x000F0000 - -/* PCSC1 Bit Masks */ -#define IXGB_PCSC1_LOOPBACK 0x00004000 - -/* PCSC2 Bit Masks */ -#define IXGB_PCSC2_PCS_TYPE_MASK 0x00000003 -#define IXGB_PCSC2_PCS_TYPE_10GBX 0x00000001 - -/* PCSS1 Bit Masks */ -#define IXGB_PCSS1_LOCAL_FAULT 0x00000080 -#define IXGB_PCSS1_RX_LINK_STATUS 0x00000004 - -/* PCSS2 Bit Masks */ -#define IXGB_PCSS2_DEV_PRES_MASK 0x0000C000 -#define IXGB_PCSS2_DEV_PRES 0x00004000 -#define IXGB_PCSS2_TX_LF 0x00000800 -#define IXGB_PCSS2_RX_LF 0x00000400 -#define IXGB_PCSS2_10GBW 0x00000004 -#define IXGB_PCSS2_10GBX 0x00000002 -#define IXGB_PCSS2_10GBR 0x00000001 - -/* XPCSS Bit Masks */ -#define IXGB_XPCSS_ALIGN_STATUS 0x00001000 -#define IXGB_XPCSS_PATTERN_TEST 0x00000800 -#define IXGB_XPCSS_LANE_3_SYNC 0x00000008 -#define IXGB_XPCSS_LANE_2_SYNC 0x00000004 -#define IXGB_XPCSS_LANE_1_SYNC 0x00000002 -#define IXGB_XPCSS_LANE_0_SYNC 0x00000001 - -/* XPCSTC Bit Masks */ -#define IXGB_XPCSTC_BERT_TRIG 0x00200000 -#define IXGB_XPCSTC_BERT_SST 0x00100000 -#define IXGB_XPCSTC_BERT_PSZ_MASK 0x000C0000 -#define IXGB_XPCSTC_BERT_PSZ_SHIFT 17 -#define IXGB_XPCSTC_BERT_PSZ_INF 0x00000003 -#define IXGB_XPCSTC_BERT_PSZ_68 0x00000001 -#define IXGB_XPCSTC_BERT_PSZ_1028 0x00000000 - -/* MSCA bit Masks */ -/* New Protocol Address */ -#define IXGB_MSCA_NP_ADDR_MASK 0x0000FFFF -#define IXGB_MSCA_NP_ADDR_SHIFT 0 -/* Either Device Type or Register Address,depending on ST_CODE */ -#define IXGB_MSCA_DEV_TYPE_MASK 0x001F0000 -#define IXGB_MSCA_DEV_TYPE_SHIFT 16 -#define IXGB_MSCA_PHY_ADDR_MASK 0x03E00000 -#define IXGB_MSCA_PHY_ADDR_SHIFT 21 -#define IXGB_MSCA_OP_CODE_MASK 0x0C000000 -/* OP_CODE == 00, Address cycle, New Protocol */ -/* OP_CODE == 01, Write operation */ -/* OP_CODE == 10, Read operation */ -/* OP_CODE == 11, Read, auto increment, New Protocol */ -#define IXGB_MSCA_ADDR_CYCLE 0x00000000 -#define IXGB_MSCA_WRITE 0x04000000 -#define IXGB_MSCA_READ 0x08000000 -#define IXGB_MSCA_READ_AUTOINC 0x0C000000 -#define IXGB_MSCA_OP_CODE_SHIFT 26 -#define IXGB_MSCA_ST_CODE_MASK 0x30000000 -/* ST_CODE == 00, New Protocol */ -/* ST_CODE == 01, Old Protocol */ -#define IXGB_MSCA_NEW_PROTOCOL 0x00000000 -#define IXGB_MSCA_OLD_PROTOCOL 0x10000000 -#define IXGB_MSCA_ST_CODE_SHIFT 28 -/* Initiate command, self-clearing when command completes */ -#define IXGB_MSCA_MDI_COMMAND 0x40000000 -/*MDI In Progress Enable. */ -#define IXGB_MSCA_MDI_IN_PROG_EN 0x80000000 - -/* MSRWD bit masks */ -#define IXGB_MSRWD_WRITE_DATA_MASK 0x0000FFFF -#define IXGB_MSRWD_WRITE_DATA_SHIFT 0 -#define IXGB_MSRWD_READ_DATA_MASK 0xFFFF0000 -#define IXGB_MSRWD_READ_DATA_SHIFT 16 - -/* Definitions for the optics devices on the MDIO bus. */ -#define IXGB_PHY_ADDRESS 0x0 /* Single PHY, multiple "Devices" */ - -#define MDIO_PMA_PMD_XPAK_VENDOR_NAME 0x803A /* XPAK/XENPAK devices only */ - -/* Vendor-specific MDIO registers */ -#define G6XXX_PMA_PMD_VS1 0xC001 /* Vendor-specific register */ -#define G6XXX_XGXS_XAUI_VS2 0x18 /* Vendor-specific register */ - -#define G6XXX_PMA_PMD_VS1_PLL_RESET 0x80 -#define G6XXX_PMA_PMD_VS1_REMOVE_PLL_RESET 0x00 -#define G6XXX_XGXS_XAUI_VS2_INPUT_MASK 0x0F /* XAUI lanes synchronized */ - -/* Layout of a single receive descriptor. The controller assumes that this - * structure is packed into 16 bytes, which is a safe assumption with most - * compilers. However, some compilers may insert padding between the fields, - * in which case the structure must be packed in some compiler-specific - * manner. */ -struct ixgb_rx_desc { - __le64 buff_addr; - __le16 length; - __le16 reserved; - u8 status; - u8 errors; - __le16 special; -}; - -#define IXGB_RX_DESC_STATUS_DD 0x01 -#define IXGB_RX_DESC_STATUS_EOP 0x02 -#define IXGB_RX_DESC_STATUS_IXSM 0x04 -#define IXGB_RX_DESC_STATUS_VP 0x08 -#define IXGB_RX_DESC_STATUS_TCPCS 0x20 -#define IXGB_RX_DESC_STATUS_IPCS 0x40 -#define IXGB_RX_DESC_STATUS_PIF 0x80 - -#define IXGB_RX_DESC_ERRORS_CE 0x01 -#define IXGB_RX_DESC_ERRORS_SE 0x02 -#define IXGB_RX_DESC_ERRORS_P 0x08 -#define IXGB_RX_DESC_ERRORS_TCPE 0x20 -#define IXGB_RX_DESC_ERRORS_IPE 0x40 -#define IXGB_RX_DESC_ERRORS_RXE 0x80 - -#define IXGB_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ -#define IXGB_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ -#define IXGB_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority is in upper 3 of 16 */ - -/* Layout of a single transmit descriptor. The controller assumes that this - * structure is packed into 16 bytes, which is a safe assumption with most - * compilers. However, some compilers may insert padding between the fields, - * in which case the structure must be packed in some compiler-specific - * manner. */ -struct ixgb_tx_desc { - __le64 buff_addr; - __le32 cmd_type_len; - u8 status; - u8 popts; - __le16 vlan; -}; - -#define IXGB_TX_DESC_LENGTH_MASK 0x000FFFFF -#define IXGB_TX_DESC_TYPE_MASK 0x00F00000 -#define IXGB_TX_DESC_TYPE_SHIFT 20 -#define IXGB_TX_DESC_CMD_MASK 0xFF000000 -#define IXGB_TX_DESC_CMD_SHIFT 24 -#define IXGB_TX_DESC_CMD_EOP 0x01000000 -#define IXGB_TX_DESC_CMD_TSE 0x04000000 -#define IXGB_TX_DESC_CMD_RS 0x08000000 -#define IXGB_TX_DESC_CMD_VLE 0x40000000 -#define IXGB_TX_DESC_CMD_IDE 0x80000000 - -#define IXGB_TX_DESC_TYPE 0x00100000 - -#define IXGB_TX_DESC_STATUS_DD 0x01 - -#define IXGB_TX_DESC_POPTS_IXSM 0x01 -#define IXGB_TX_DESC_POPTS_TXSM 0x02 -#define IXGB_TX_DESC_SPECIAL_PRI_SHIFT IXGB_RX_DESC_SPECIAL_PRI_SHIFT /* Priority is in upper 3 of 16 */ - -struct ixgb_context_desc { - u8 ipcss; - u8 ipcso; - __le16 ipcse; - u8 tucss; - u8 tucso; - __le16 tucse; - __le32 cmd_type_len; - u8 status; - u8 hdr_len; - __le16 mss; -}; - -#define IXGB_CONTEXT_DESC_CMD_TCP 0x01000000 -#define IXGB_CONTEXT_DESC_CMD_IP 0x02000000 -#define IXGB_CONTEXT_DESC_CMD_TSE 0x04000000 -#define IXGB_CONTEXT_DESC_CMD_RS 0x08000000 -#define IXGB_CONTEXT_DESC_CMD_IDE 0x80000000 - -#define IXGB_CONTEXT_DESC_TYPE 0x00000000 - -#define IXGB_CONTEXT_DESC_STATUS_DD 0x01 - -/* Filters */ -#define IXGB_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */ -#define IXGB_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ -#define IXGB_RAR_ENTRIES 3 /* Number of entries in Rx Address array */ - -#define IXGB_MEMORY_REGISTER_BASE_ADDRESS 0 -#define ENET_HEADER_SIZE 14 -#define ENET_FCS_LENGTH 4 -#define IXGB_MAX_NUM_MULTICAST_ADDRESSES 128 -#define IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS 60 -#define IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS 1514 -#define IXGB_MAX_JUMBO_FRAME_SIZE 0x3F00 - -/* Phy Addresses */ -#define IXGB_OPTICAL_PHY_ADDR 0x0 /* Optical Module phy address */ -#define IXGB_XAUII_PHY_ADDR 0x1 /* Xauii transceiver phy address */ -#define IXGB_DIAG_PHY_ADDR 0x1F /* Diagnostic Device phy address */ - -/* This structure takes a 64k flash and maps it for identification commands */ -struct ixgb_flash_buffer { - u8 manufacturer_id; - u8 device_id; - u8 filler1[0x2AA8]; - u8 cmd2; - u8 filler2[0x2AAA]; - u8 cmd1; - u8 filler3[0xAAAA]; -}; - -/* Flow control parameters */ -struct ixgb_fc { - u32 high_water; /* Flow Control High-water */ - u32 low_water; /* Flow Control Low-water */ - u16 pause_time; /* Flow Control Pause timer */ - bool send_xon; /* Flow control send XON */ - ixgb_fc_type type; /* Type of flow control */ -}; - -/* The historical defaults for the flow control values are given below. */ -#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */ -#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */ -#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */ - -/* Phy definitions */ -#define IXGB_MAX_PHY_REG_ADDRESS 0xFFFF -#define IXGB_MAX_PHY_ADDRESS 31 -#define IXGB_MAX_PHY_DEV_TYPE 31 - -/* Bus parameters */ -struct ixgb_bus { - ixgb_bus_speed speed; - ixgb_bus_width width; - ixgb_bus_type type; -}; - -struct ixgb_hw { - u8 __iomem *hw_addr;/* Base Address of the hardware */ - void *back; /* Pointer to OS-dependent struct */ - struct ixgb_fc fc; /* Flow control parameters */ - struct ixgb_bus bus; /* Bus parameters */ - u32 phy_id; /* Phy Identifier */ - u32 phy_addr; /* XGMII address of Phy */ - ixgb_mac_type mac_type; /* Identifier for MAC controller */ - ixgb_phy_type phy_type; /* Transceiver/phy identifier */ - u32 max_frame_size; /* Maximum frame size supported */ - u32 mc_filter_type; /* Multicast filter hash type */ - u32 num_mc_addrs; /* Number of current Multicast addrs */ - u8 curr_mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS]; /* Individual address currently programmed in MAC */ - u32 num_tx_desc; /* Number of Transmit descriptors */ - u32 num_rx_desc; /* Number of Receive descriptors */ - u32 rx_buffer_size; /* Size of Receive buffer */ - bool link_up; /* true if link is valid */ - bool adapter_stopped; /* State of adapter */ - u16 device_id; /* device id from PCI configuration space */ - u16 vendor_id; /* vendor id from PCI configuration space */ - u8 revision_id; /* revision id from PCI configuration space */ - u16 subsystem_vendor_id; /* subsystem vendor id from PCI configuration space */ - u16 subsystem_id; /* subsystem id from PCI configuration space */ - u32 bar0; /* Base Address registers */ - u32 bar1; - u32 bar2; - u32 bar3; - u16 pci_cmd_word; /* PCI command register id from PCI configuration space */ - __le16 eeprom[IXGB_EEPROM_SIZE]; /* EEPROM contents read at init time */ - unsigned long io_base; /* Our I/O mapped location */ - u32 lastLFC; - u32 lastRFC; -}; - -/* Statistics reported by the hardware */ -struct ixgb_hw_stats { - u64 tprl; - u64 tprh; - u64 gprcl; - u64 gprch; - u64 bprcl; - u64 bprch; - u64 mprcl; - u64 mprch; - u64 uprcl; - u64 uprch; - u64 vprcl; - u64 vprch; - u64 jprcl; - u64 jprch; - u64 gorcl; - u64 gorch; - u64 torl; - u64 torh; - u64 rnbc; - u64 ruc; - u64 roc; - u64 rlec; - u64 crcerrs; - u64 icbc; - u64 ecbc; - u64 mpc; - u64 tptl; - u64 tpth; - u64 gptcl; - u64 gptch; - u64 bptcl; - u64 bptch; - u64 mptcl; - u64 mptch; - u64 uptcl; - u64 uptch; - u64 vptcl; - u64 vptch; - u64 jptcl; - u64 jptch; - u64 gotcl; - u64 gotch; - u64 totl; - u64 toth; - u64 dc; - u64 plt64c; - u64 tsctc; - u64 tsctfc; - u64 ibic; - u64 rfc; - u64 lfc; - u64 pfrc; - u64 pftc; - u64 mcfrc; - u64 mcftc; - u64 xonrxc; - u64 xontxc; - u64 xoffrxc; - u64 xofftxc; - u64 rjc; -}; - -/* Function Prototypes */ -extern bool ixgb_adapter_stop(struct ixgb_hw *hw); -extern bool ixgb_init_hw(struct ixgb_hw *hw); -extern bool ixgb_adapter_start(struct ixgb_hw *hw); -extern void ixgb_check_for_link(struct ixgb_hw *hw); -extern bool ixgb_check_for_bad_link(struct ixgb_hw *hw); - -extern void ixgb_rar_set(struct ixgb_hw *hw, - u8 *addr, - u32 index); - - -/* Filters (multicast, vlan, receive) */ -extern void ixgb_mc_addr_list_update(struct ixgb_hw *hw, - u8 *mc_addr_list, - u32 mc_addr_count, - u32 pad); - -/* Vfta functions */ -extern void ixgb_write_vfta(struct ixgb_hw *hw, - u32 offset, - u32 value); - -/* Access functions to eeprom data */ -void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, u8 *mac_addr); -u32 ixgb_get_ee_pba_number(struct ixgb_hw *hw); -u16 ixgb_get_ee_device_id(struct ixgb_hw *hw); -bool ixgb_get_eeprom_data(struct ixgb_hw *hw); -__le16 ixgb_get_eeprom_word(struct ixgb_hw *hw, u16 index); - -/* Everything else */ -void ixgb_led_on(struct ixgb_hw *hw); -void ixgb_led_off(struct ixgb_hw *hw); -void ixgb_write_pci_cfg(struct ixgb_hw *hw, - u32 reg, - u16 * value); - - -#endif /* _IXGB_HW_H_ */ diff --git a/drivers/net/ixgb/ixgb_ids.h b/drivers/net/ixgb/ixgb_ids.h deleted file mode 100644 index 2a58847..0000000 --- a/drivers/net/ixgb/ixgb_ids.h +++ /dev/null @@ -1,53 +0,0 @@ -/******************************************************************************* - - Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGB_IDS_H_ -#define _IXGB_IDS_H_ - -/********************************************************************** -** The Device and Vendor IDs for 10 Gigabit MACs -**********************************************************************/ - -#define INTEL_VENDOR_ID 0x8086 -#define INTEL_SUBVENDOR_ID 0x8086 -#define SUN_VENDOR_ID 0x108E -#define SUN_SUBVENDOR_ID 0x108E - -#define IXGB_DEVICE_ID_82597EX 0x1048 -#define IXGB_DEVICE_ID_82597EX_SR 0x1A48 -#define IXGB_DEVICE_ID_82597EX_LR 0x1B48 -#define IXGB_SUBDEVICE_ID_A11F 0xA11F -#define IXGB_SUBDEVICE_ID_A01F 0xA01F - -#define IXGB_DEVICE_ID_82597EX_CX4 0x109E -#define IXGB_SUBDEVICE_ID_A00C 0xA00C -#define IXGB_SUBDEVICE_ID_A01C 0xA01C -#define IXGB_SUBDEVICE_ID_7036 0x7036 - -#endif /* #ifndef _IXGB_IDS_H_ */ -/* End of File */ diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c deleted file mode 100644 index 6a130eb..0000000 --- a/drivers/net/ixgb/ixgb_main.c +++ /dev/null @@ -1,2332 +0,0 @@ -/******************************************************************************* - - Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include "ixgb.h" - -char ixgb_driver_name[] = "ixgb"; -static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver"; - -#define DRIVERNAPI "-NAPI" -#define DRV_VERSION "1.0.135-k2" DRIVERNAPI -const char ixgb_driver_version[] = DRV_VERSION; -static const char ixgb_copyright[] = "Copyright (c) 1999-2008 Intel Corporation."; - -#define IXGB_CB_LENGTH 256 -static unsigned int copybreak __read_mostly = IXGB_CB_LENGTH; -module_param(copybreak, uint, 0644); -MODULE_PARM_DESC(copybreak, - "Maximum size of packet that is copied to a new buffer on receive"); - -/* ixgb_pci_tbl - PCI Device ID Table - * - * Wildcard entries (PCI_ANY_ID) should come last - * Last entry must be all 0s - * - * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, - * Class, Class Mask, private data (not used) } - */ -static DEFINE_PCI_DEVICE_TABLE(ixgb_pci_tbl) = { - {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - - /* required last entry */ - {0,} -}; - -MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl); - -/* Local Function Prototypes */ -static int ixgb_init_module(void); -static void ixgb_exit_module(void); -static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent); -static void __devexit ixgb_remove(struct pci_dev *pdev); -static int ixgb_sw_init(struct ixgb_adapter *adapter); -static int ixgb_open(struct net_device *netdev); -static int ixgb_close(struct net_device *netdev); -static void ixgb_configure_tx(struct ixgb_adapter *adapter); -static void ixgb_configure_rx(struct ixgb_adapter *adapter); -static void ixgb_setup_rctl(struct ixgb_adapter *adapter); -static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter); -static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter); -static void ixgb_set_multi(struct net_device *netdev); -static void ixgb_watchdog(unsigned long data); -static netdev_tx_t ixgb_xmit_frame(struct sk_buff *skb, - struct net_device *netdev); -static struct net_device_stats *ixgb_get_stats(struct net_device *netdev); -static int ixgb_change_mtu(struct net_device *netdev, int new_mtu); -static int ixgb_set_mac(struct net_device *netdev, void *p); -static irqreturn_t ixgb_intr(int irq, void *data); -static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter); - -static int ixgb_clean(struct napi_struct *, int); -static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int); -static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int); - -static void ixgb_tx_timeout(struct net_device *dev); -static void ixgb_tx_timeout_task(struct work_struct *work); - -static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter); -static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter); -static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid); -static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); -static void ixgb_restore_vlan(struct ixgb_adapter *adapter); - -#ifdef CONFIG_NET_POLL_CONTROLLER -/* for netdump / net console */ -static void ixgb_netpoll(struct net_device *dev); -#endif - -static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev, - enum pci_channel_state state); -static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev); -static void ixgb_io_resume (struct pci_dev *pdev); - -static struct pci_error_handlers ixgb_err_handler = { - .error_detected = ixgb_io_error_detected, - .slot_reset = ixgb_io_slot_reset, - .resume = ixgb_io_resume, -}; - -static struct pci_driver ixgb_driver = { - .name = ixgb_driver_name, - .id_table = ixgb_pci_tbl, - .probe = ixgb_probe, - .remove = __devexit_p(ixgb_remove), - .err_handler = &ixgb_err_handler -}; - -MODULE_AUTHOR("Intel Corporation, "); -MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_VERSION); - -#define DEFAULT_DEBUG_LEVEL_SHIFT 3 -static int debug = DEFAULT_DEBUG_LEVEL_SHIFT; -module_param(debug, int, 0); -MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); - -/** - * ixgb_init_module - Driver Registration Routine - * - * ixgb_init_module is the first routine called when the driver is - * loaded. All it does is register with the PCI subsystem. - **/ - -static int __init -ixgb_init_module(void) -{ - pr_info("%s - version %s\n", ixgb_driver_string, ixgb_driver_version); - pr_info("%s\n", ixgb_copyright); - - return pci_register_driver(&ixgb_driver); -} - -module_init(ixgb_init_module); - -/** - * ixgb_exit_module - Driver Exit Cleanup Routine - * - * ixgb_exit_module is called just before the driver is removed - * from memory. - **/ - -static void __exit -ixgb_exit_module(void) -{ - pci_unregister_driver(&ixgb_driver); -} - -module_exit(ixgb_exit_module); - -/** - * ixgb_irq_disable - Mask off interrupt generation on the NIC - * @adapter: board private structure - **/ - -static void -ixgb_irq_disable(struct ixgb_adapter *adapter) -{ - IXGB_WRITE_REG(&adapter->hw, IMC, ~0); - IXGB_WRITE_FLUSH(&adapter->hw); - synchronize_irq(adapter->pdev->irq); -} - -/** - * ixgb_irq_enable - Enable default interrupt generation settings - * @adapter: board private structure - **/ - -static void -ixgb_irq_enable(struct ixgb_adapter *adapter) -{ - u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | - IXGB_INT_TXDW | IXGB_INT_LSC; - if (adapter->hw.subsystem_vendor_id == SUN_SUBVENDOR_ID) - val |= IXGB_INT_GPI0; - IXGB_WRITE_REG(&adapter->hw, IMS, val); - IXGB_WRITE_FLUSH(&adapter->hw); -} - -int -ixgb_up(struct ixgb_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - int err, irq_flags = IRQF_SHARED; - int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; - struct ixgb_hw *hw = &adapter->hw; - - /* hardware has been reset, we need to reload some things */ - - ixgb_rar_set(hw, netdev->dev_addr, 0); - ixgb_set_multi(netdev); - - ixgb_restore_vlan(adapter); - - ixgb_configure_tx(adapter); - ixgb_setup_rctl(adapter); - ixgb_configure_rx(adapter); - ixgb_alloc_rx_buffers(adapter, IXGB_DESC_UNUSED(&adapter->rx_ring)); - - /* disable interrupts and get the hardware into a known state */ - IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff); - - /* only enable MSI if bus is in PCI-X mode */ - if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) { - err = pci_enable_msi(adapter->pdev); - if (!err) { - adapter->have_msi = 1; - irq_flags = 0; - } - /* proceed to try to request regular interrupt */ - } - - err = request_irq(adapter->pdev->irq, ixgb_intr, irq_flags, - netdev->name, netdev); - if (err) { - if (adapter->have_msi) - pci_disable_msi(adapter->pdev); - netif_err(adapter, probe, adapter->netdev, - "Unable to allocate interrupt Error: %d\n", err); - return err; - } - - if ((hw->max_frame_size != max_frame) || - (hw->max_frame_size != - (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) { - - hw->max_frame_size = max_frame; - - IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT); - - if (hw->max_frame_size > - IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) { - u32 ctrl0 = IXGB_READ_REG(hw, CTRL0); - - if (!(ctrl0 & IXGB_CTRL0_JFE)) { - ctrl0 |= IXGB_CTRL0_JFE; - IXGB_WRITE_REG(hw, CTRL0, ctrl0); - } - } - } - - clear_bit(__IXGB_DOWN, &adapter->flags); - - napi_enable(&adapter->napi); - ixgb_irq_enable(adapter); - - netif_wake_queue(netdev); - - mod_timer(&adapter->watchdog_timer, jiffies); - - return 0; -} - -void -ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog) -{ - struct net_device *netdev = adapter->netdev; - - /* prevent the interrupt handler from restarting watchdog */ - set_bit(__IXGB_DOWN, &adapter->flags); - - napi_disable(&adapter->napi); - /* waiting for NAPI to complete can re-enable interrupts */ - ixgb_irq_disable(adapter); - free_irq(adapter->pdev->irq, netdev); - - if (adapter->have_msi) - pci_disable_msi(adapter->pdev); - - if (kill_watchdog) - del_timer_sync(&adapter->watchdog_timer); - - adapter->link_speed = 0; - adapter->link_duplex = 0; - netif_carrier_off(netdev); - netif_stop_queue(netdev); - - ixgb_reset(adapter); - ixgb_clean_tx_ring(adapter); - ixgb_clean_rx_ring(adapter); -} - -void -ixgb_reset(struct ixgb_adapter *adapter) -{ - struct ixgb_hw *hw = &adapter->hw; - - ixgb_adapter_stop(hw); - if (!ixgb_init_hw(hw)) - netif_err(adapter, probe, adapter->netdev, "ixgb_init_hw failed\n"); - - /* restore frame size information */ - IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT); - if (hw->max_frame_size > - IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) { - u32 ctrl0 = IXGB_READ_REG(hw, CTRL0); - if (!(ctrl0 & IXGB_CTRL0_JFE)) { - ctrl0 |= IXGB_CTRL0_JFE; - IXGB_WRITE_REG(hw, CTRL0, ctrl0); - } - } -} - -static const struct net_device_ops ixgb_netdev_ops = { - .ndo_open = ixgb_open, - .ndo_stop = ixgb_close, - .ndo_start_xmit = ixgb_xmit_frame, - .ndo_get_stats = ixgb_get_stats, - .ndo_set_multicast_list = ixgb_set_multi, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = ixgb_set_mac, - .ndo_change_mtu = ixgb_change_mtu, - .ndo_tx_timeout = ixgb_tx_timeout, - .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ixgb_netpoll, -#endif -}; - -/** - * ixgb_probe - Device Initialization Routine - * @pdev: PCI device information struct - * @ent: entry in ixgb_pci_tbl - * - * Returns 0 on success, negative on failure - * - * ixgb_probe initializes an adapter identified by a pci_dev structure. - * The OS initialization, configuring of the adapter private structure, - * and a hardware reset occur. - **/ - -static int __devinit -ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - struct net_device *netdev = NULL; - struct ixgb_adapter *adapter; - static int cards_found = 0; - int pci_using_dac; - int i; - int err; - - err = pci_enable_device(pdev); - if (err) - return err; - - pci_using_dac = 0; - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); - if (!err) { - err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); - if (!err) - pci_using_dac = 1; - } else { - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); - if (err) { - err = dma_set_coherent_mask(&pdev->dev, - DMA_BIT_MASK(32)); - if (err) { - pr_err("No usable DMA configuration, aborting\n"); - goto err_dma_mask; - } - } - } - - err = pci_request_regions(pdev, ixgb_driver_name); - if (err) - goto err_request_regions; - - pci_set_master(pdev); - - netdev = alloc_etherdev(sizeof(struct ixgb_adapter)); - if (!netdev) { - err = -ENOMEM; - goto err_alloc_etherdev; - } - - SET_NETDEV_DEV(netdev, &pdev->dev); - - pci_set_drvdata(pdev, netdev); - adapter = netdev_priv(netdev); - adapter->netdev = netdev; - adapter->pdev = pdev; - adapter->hw.back = adapter; - adapter->msg_enable = netif_msg_init(debug, DEFAULT_DEBUG_LEVEL_SHIFT); - - adapter->hw.hw_addr = pci_ioremap_bar(pdev, BAR_0); - if (!adapter->hw.hw_addr) { - err = -EIO; - goto err_ioremap; - } - - for (i = BAR_1; i <= BAR_5; i++) { - if (pci_resource_len(pdev, i) == 0) - continue; - if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { - adapter->hw.io_base = pci_resource_start(pdev, i); - break; - } - } - - netdev->netdev_ops = &ixgb_netdev_ops; - ixgb_set_ethtool_ops(netdev); - netdev->watchdog_timeo = 5 * HZ; - netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64); - - strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); - - adapter->bd_number = cards_found; - adapter->link_speed = 0; - adapter->link_duplex = 0; - - /* setup the private structure */ - - err = ixgb_sw_init(adapter); - if (err) - goto err_sw_init; - - netdev->features = NETIF_F_SG | - NETIF_F_HW_CSUM | - NETIF_F_HW_VLAN_TX | - NETIF_F_HW_VLAN_RX | - NETIF_F_HW_VLAN_FILTER; - netdev->features |= NETIF_F_TSO; - - if (pci_using_dac) { - netdev->features |= NETIF_F_HIGHDMA; - netdev->vlan_features |= NETIF_F_HIGHDMA; - } - - /* make sure the EEPROM is good */ - - if (!ixgb_validate_eeprom_checksum(&adapter->hw)) { - netif_err(adapter, probe, adapter->netdev, - "The EEPROM Checksum Is Not Valid\n"); - err = -EIO; - goto err_eeprom; - } - - ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr); - memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); - - if (!is_valid_ether_addr(netdev->perm_addr)) { - netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n"); - err = -EIO; - goto err_eeprom; - } - - adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw); - - init_timer(&adapter->watchdog_timer); - adapter->watchdog_timer.function = ixgb_watchdog; - adapter->watchdog_timer.data = (unsigned long)adapter; - - INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task); - - strcpy(netdev->name, "eth%d"); - err = register_netdev(netdev); - if (err) - goto err_register; - - /* carrier off reporting is important to ethtool even BEFORE open */ - netif_carrier_off(netdev); - - netif_info(adapter, probe, adapter->netdev, - "Intel(R) PRO/10GbE Network Connection\n"); - ixgb_check_options(adapter); - /* reset the hardware with the new settings */ - - ixgb_reset(adapter); - - cards_found++; - return 0; - -err_register: -err_sw_init: -err_eeprom: - iounmap(adapter->hw.hw_addr); -err_ioremap: - free_netdev(netdev); -err_alloc_etherdev: - pci_release_regions(pdev); -err_request_regions: -err_dma_mask: - pci_disable_device(pdev); - return err; -} - -/** - * ixgb_remove - Device Removal Routine - * @pdev: PCI device information struct - * - * ixgb_remove is called by the PCI subsystem to alert the driver - * that it should release a PCI device. The could be caused by a - * Hot-Plug event, or because the driver is going to be removed from - * memory. - **/ - -static void __devexit -ixgb_remove(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct ixgb_adapter *adapter = netdev_priv(netdev); - - cancel_work_sync(&adapter->tx_timeout_task); - - unregister_netdev(netdev); - - iounmap(adapter->hw.hw_addr); - pci_release_regions(pdev); - - free_netdev(netdev); - pci_disable_device(pdev); -} - -/** - * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter) - * @adapter: board private structure to initialize - * - * ixgb_sw_init initializes the Adapter private data structure. - * Fields are initialized based on PCI device information and - * OS network device settings (MTU size). - **/ - -static int __devinit -ixgb_sw_init(struct ixgb_adapter *adapter) -{ - struct ixgb_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - - /* PCI config space info */ - - hw->vendor_id = pdev->vendor; - hw->device_id = pdev->device; - hw->subsystem_vendor_id = pdev->subsystem_vendor; - hw->subsystem_id = pdev->subsystem_device; - - hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; - adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */ - - if ((hw->device_id == IXGB_DEVICE_ID_82597EX) || - (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) || - (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) || - (hw->device_id == IXGB_DEVICE_ID_82597EX_SR)) - hw->mac_type = ixgb_82597; - else { - /* should never have loaded on this device */ - netif_err(adapter, probe, adapter->netdev, "unsupported device id\n"); - } - - /* enable flow control to be programmed */ - hw->fc.send_xon = 1; - - set_bit(__IXGB_DOWN, &adapter->flags); - return 0; -} - -/** - * ixgb_open - Called when a network interface is made active - * @netdev: network interface device structure - * - * Returns 0 on success, negative value on failure - * - * The open entry point is called when a network interface is made - * active by the system (IFF_UP). At this point all resources needed - * for transmit and receive operations are allocated, the interrupt - * handler is registered with the OS, the watchdog timer is started, - * and the stack is notified that the interface is ready. - **/ - -static int -ixgb_open(struct net_device *netdev) -{ - struct ixgb_adapter *adapter = netdev_priv(netdev); - int err; - - /* allocate transmit descriptors */ - err = ixgb_setup_tx_resources(adapter); - if (err) - goto err_setup_tx; - - netif_carrier_off(netdev); - - /* allocate receive descriptors */ - - err = ixgb_setup_rx_resources(adapter); - if (err) - goto err_setup_rx; - - err = ixgb_up(adapter); - if (err) - goto err_up; - - netif_start_queue(netdev); - - return 0; - -err_up: - ixgb_free_rx_resources(adapter); -err_setup_rx: - ixgb_free_tx_resources(adapter); -err_setup_tx: - ixgb_reset(adapter); - - return err; -} - -/** - * ixgb_close - Disables a network interface - * @netdev: network interface device structure - * - * Returns 0, this is not allowed to fail - * - * The close entry point is called when an interface is de-activated - * by the OS. The hardware is still under the drivers control, but - * needs to be disabled. A global MAC reset is issued to stop the - * hardware, and all transmit and receive resources are freed. - **/ - -static int -ixgb_close(struct net_device *netdev) -{ - struct ixgb_adapter *adapter = netdev_priv(netdev); - - ixgb_down(adapter, true); - - ixgb_free_tx_resources(adapter); - ixgb_free_rx_resources(adapter); - - return 0; -} - -/** - * ixgb_setup_tx_resources - allocate Tx resources (Descriptors) - * @adapter: board private structure - * - * Return 0 on success, negative on failure - **/ - -int -ixgb_setup_tx_resources(struct ixgb_adapter *adapter) -{ - struct ixgb_desc_ring *txdr = &adapter->tx_ring; - struct pci_dev *pdev = adapter->pdev; - int size; - - size = sizeof(struct ixgb_buffer) * txdr->count; - txdr->buffer_info = vzalloc(size); - if (!txdr->buffer_info) { - netif_err(adapter, probe, adapter->netdev, - "Unable to allocate transmit descriptor ring memory\n"); - return -ENOMEM; - } - - /* round up to nearest 4K */ - - txdr->size = txdr->count * sizeof(struct ixgb_tx_desc); - txdr->size = ALIGN(txdr->size, 4096); - - txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, - GFP_KERNEL); - if (!txdr->desc) { - vfree(txdr->buffer_info); - netif_err(adapter, probe, adapter->netdev, - "Unable to allocate transmit descriptor memory\n"); - return -ENOMEM; - } - memset(txdr->desc, 0, txdr->size); - - txdr->next_to_use = 0; - txdr->next_to_clean = 0; - - return 0; -} - -/** - * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset. - * @adapter: board private structure - * - * Configure the Tx unit of the MAC after a reset. - **/ - -static void -ixgb_configure_tx(struct ixgb_adapter *adapter) -{ - u64 tdba = adapter->tx_ring.dma; - u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc); - u32 tctl; - struct ixgb_hw *hw = &adapter->hw; - - /* Setup the Base and Length of the Tx Descriptor Ring - * tx_ring.dma can be either a 32 or 64 bit value - */ - - IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL)); - IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32)); - - IXGB_WRITE_REG(hw, TDLEN, tdlen); - - /* Setup the HW Tx Head and Tail descriptor pointers */ - - IXGB_WRITE_REG(hw, TDH, 0); - IXGB_WRITE_REG(hw, TDT, 0); - - /* don't set up txdctl, it induces performance problems if configured - * incorrectly */ - /* Set the Tx Interrupt Delay register */ - - IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay); - - /* Program the Transmit Control Register */ - - tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE; - IXGB_WRITE_REG(hw, TCTL, tctl); - - /* Setup Transmit Descriptor Settings for this adapter */ - adapter->tx_cmd_type = - IXGB_TX_DESC_TYPE | - (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0); -} - -/** - * ixgb_setup_rx_resources - allocate Rx resources (Descriptors) - * @adapter: board private structure - * - * Returns 0 on success, negative on failure - **/ - -int -ixgb_setup_rx_resources(struct ixgb_adapter *adapter) -{ - struct ixgb_desc_ring *rxdr = &adapter->rx_ring; - struct pci_dev *pdev = adapter->pdev; - int size; - - size = sizeof(struct ixgb_buffer) * rxdr->count; - rxdr->buffer_info = vzalloc(size); - if (!rxdr->buffer_info) { - netif_err(adapter, probe, adapter->netdev, - "Unable to allocate receive descriptor ring\n"); - return -ENOMEM; - } - - /* Round up to nearest 4K */ - - rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); - rxdr->size = ALIGN(rxdr->size, 4096); - - rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, - GFP_KERNEL); - - if (!rxdr->desc) { - vfree(rxdr->buffer_info); - netif_err(adapter, probe, adapter->netdev, - "Unable to allocate receive descriptors\n"); - return -ENOMEM; - } - memset(rxdr->desc, 0, rxdr->size); - - rxdr->next_to_clean = 0; - rxdr->next_to_use = 0; - - return 0; -} - -/** - * ixgb_setup_rctl - configure the receive control register - * @adapter: Board private structure - **/ - -static void -ixgb_setup_rctl(struct ixgb_adapter *adapter) -{ - u32 rctl; - - rctl = IXGB_READ_REG(&adapter->hw, RCTL); - - rctl &= ~(3 << IXGB_RCTL_MO_SHIFT); - - rctl |= - IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | - IXGB_RCTL_RXEN | IXGB_RCTL_CFF | - (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT); - - rctl |= IXGB_RCTL_SECRC; - - if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048) - rctl |= IXGB_RCTL_BSIZE_2048; - else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096) - rctl |= IXGB_RCTL_BSIZE_4096; - else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192) - rctl |= IXGB_RCTL_BSIZE_8192; - else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384) - rctl |= IXGB_RCTL_BSIZE_16384; - - IXGB_WRITE_REG(&adapter->hw, RCTL, rctl); -} - -/** - * ixgb_configure_rx - Configure 82597 Receive Unit after Reset. - * @adapter: board private structure - * - * Configure the Rx unit of the MAC after a reset. - **/ - -static void -ixgb_configure_rx(struct ixgb_adapter *adapter) -{ - u64 rdba = adapter->rx_ring.dma; - u32 rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc); - struct ixgb_hw *hw = &adapter->hw; - u32 rctl; - u32 rxcsum; - - /* make sure receives are disabled while setting up the descriptors */ - - rctl = IXGB_READ_REG(hw, RCTL); - IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN); - - /* set the Receive Delay Timer Register */ - - IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay); - - /* Setup the Base and Length of the Rx Descriptor Ring */ - - IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL)); - IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32)); - - IXGB_WRITE_REG(hw, RDLEN, rdlen); - - /* Setup the HW Rx Head and Tail Descriptor Pointers */ - IXGB_WRITE_REG(hw, RDH, 0); - IXGB_WRITE_REG(hw, RDT, 0); - - /* due to the hardware errata with RXDCTL, we are unable to use any of - * the performance enhancing features of it without causing other - * subtle bugs, some of the bugs could include receive length - * corruption at high data rates (WTHRESH > 0) and/or receive - * descriptor ring irregularites (particularly in hardware cache) */ - IXGB_WRITE_REG(hw, RXDCTL, 0); - - /* Enable Receive Checksum Offload for TCP and UDP */ - if (adapter->rx_csum) { - rxcsum = IXGB_READ_REG(hw, RXCSUM); - rxcsum |= IXGB_RXCSUM_TUOFL; - IXGB_WRITE_REG(hw, RXCSUM, rxcsum); - } - - /* Enable Receives */ - - IXGB_WRITE_REG(hw, RCTL, rctl); -} - -/** - * ixgb_free_tx_resources - Free Tx Resources - * @adapter: board private structure - * - * Free all transmit software resources - **/ - -void -ixgb_free_tx_resources(struct ixgb_adapter *adapter) -{ - struct pci_dev *pdev = adapter->pdev; - - ixgb_clean_tx_ring(adapter); - - vfree(adapter->tx_ring.buffer_info); - adapter->tx_ring.buffer_info = NULL; - - dma_free_coherent(&pdev->dev, adapter->tx_ring.size, - adapter->tx_ring.desc, adapter->tx_ring.dma); - - adapter->tx_ring.desc = NULL; -} - -static void -ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter, - struct ixgb_buffer *buffer_info) -{ - if (buffer_info->dma) { - if (buffer_info->mapped_as_page) - dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, - buffer_info->length, DMA_TO_DEVICE); - else - dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, - buffer_info->length, DMA_TO_DEVICE); - buffer_info->dma = 0; - } - - if (buffer_info->skb) { - dev_kfree_skb_any(buffer_info->skb); - buffer_info->skb = NULL; - } - buffer_info->time_stamp = 0; - /* these fields must always be initialized in tx - * buffer_info->length = 0; - * buffer_info->next_to_watch = 0; */ -} - -/** - * ixgb_clean_tx_ring - Free Tx Buffers - * @adapter: board private structure - **/ - -static void -ixgb_clean_tx_ring(struct ixgb_adapter *adapter) -{ - struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; - struct ixgb_buffer *buffer_info; - unsigned long size; - unsigned int i; - - /* Free all the Tx ring sk_buffs */ - - for (i = 0; i < tx_ring->count; i++) { - buffer_info = &tx_ring->buffer_info[i]; - ixgb_unmap_and_free_tx_resource(adapter, buffer_info); - } - - size = sizeof(struct ixgb_buffer) * tx_ring->count; - memset(tx_ring->buffer_info, 0, size); - - /* Zero out the descriptor ring */ - - memset(tx_ring->desc, 0, tx_ring->size); - - tx_ring->next_to_use = 0; - tx_ring->next_to_clean = 0; - - IXGB_WRITE_REG(&adapter->hw, TDH, 0); - IXGB_WRITE_REG(&adapter->hw, TDT, 0); -} - -/** - * ixgb_free_rx_resources - Free Rx Resources - * @adapter: board private structure - * - * Free all receive software resources - **/ - -void -ixgb_free_rx_resources(struct ixgb_adapter *adapter) -{ - struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; - struct pci_dev *pdev = adapter->pdev; - - ixgb_clean_rx_ring(adapter); - - vfree(rx_ring->buffer_info); - rx_ring->buffer_info = NULL; - - dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, - rx_ring->dma); - - rx_ring->desc = NULL; -} - -/** - * ixgb_clean_rx_ring - Free Rx Buffers - * @adapter: board private structure - **/ - -static void -ixgb_clean_rx_ring(struct ixgb_adapter *adapter) -{ - struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; - struct ixgb_buffer *buffer_info; - struct pci_dev *pdev = adapter->pdev; - unsigned long size; - unsigned int i; - - /* Free all the Rx ring sk_buffs */ - - for (i = 0; i < rx_ring->count; i++) { - buffer_info = &rx_ring->buffer_info[i]; - if (buffer_info->dma) { - dma_unmap_single(&pdev->dev, - buffer_info->dma, - buffer_info->length, - DMA_FROM_DEVICE); - buffer_info->dma = 0; - buffer_info->length = 0; - } - - if (buffer_info->skb) { - dev_kfree_skb(buffer_info->skb); - buffer_info->skb = NULL; - } - } - - size = sizeof(struct ixgb_buffer) * rx_ring->count; - memset(rx_ring->buffer_info, 0, size); - - /* Zero out the descriptor ring */ - - memset(rx_ring->desc, 0, rx_ring->size); - - rx_ring->next_to_clean = 0; - rx_ring->next_to_use = 0; - - IXGB_WRITE_REG(&adapter->hw, RDH, 0); - IXGB_WRITE_REG(&adapter->hw, RDT, 0); -} - -/** - * ixgb_set_mac - Change the Ethernet Address of the NIC - * @netdev: network interface device structure - * @p: pointer to an address structure - * - * Returns 0 on success, negative on failure - **/ - -static int -ixgb_set_mac(struct net_device *netdev, void *p) -{ - struct ixgb_adapter *adapter = netdev_priv(netdev); - struct sockaddr *addr = p; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - - ixgb_rar_set(&adapter->hw, addr->sa_data, 0); - - return 0; -} - -/** - * ixgb_set_multi - Multicast and Promiscuous mode set - * @netdev: network interface device structure - * - * The set_multi entry point is called whenever the multicast address - * list or the network interface flags are updated. This routine is - * responsible for configuring the hardware for proper multicast, - * promiscuous mode, and all-multi behavior. - **/ - -static void -ixgb_set_multi(struct net_device *netdev) -{ - struct ixgb_adapter *adapter = netdev_priv(netdev); - struct ixgb_hw *hw = &adapter->hw; - struct netdev_hw_addr *ha; - u32 rctl; - int i; - - /* Check for Promiscuous and All Multicast modes */ - - rctl = IXGB_READ_REG(hw, RCTL); - - if (netdev->flags & IFF_PROMISC) { - rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE); - /* disable VLAN filtering */ - rctl &= ~IXGB_RCTL_CFIEN; - rctl &= ~IXGB_RCTL_VFE; - } else { - if (netdev->flags & IFF_ALLMULTI) { - rctl |= IXGB_RCTL_MPE; - rctl &= ~IXGB_RCTL_UPE; - } else { - rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE); - } - /* enable VLAN filtering */ - rctl |= IXGB_RCTL_VFE; - rctl &= ~IXGB_RCTL_CFIEN; - } - - if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) { - rctl |= IXGB_RCTL_MPE; - IXGB_WRITE_REG(hw, RCTL, rctl); - } else { - u8 mta[IXGB_MAX_NUM_MULTICAST_ADDRESSES * - IXGB_ETH_LENGTH_OF_ADDRESS]; - - IXGB_WRITE_REG(hw, RCTL, rctl); - - i = 0; - netdev_for_each_mc_addr(ha, netdev) - memcpy(&mta[i++ * IXGB_ETH_LENGTH_OF_ADDRESS], - ha->addr, IXGB_ETH_LENGTH_OF_ADDRESS); - - ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0); - } - - if (netdev->features & NETIF_F_HW_VLAN_RX) - ixgb_vlan_strip_enable(adapter); - else - ixgb_vlan_strip_disable(adapter); - -} - -/** - * ixgb_watchdog - Timer Call-back - * @data: pointer to netdev cast into an unsigned long - **/ - -static void -ixgb_watchdog(unsigned long data) -{ - struct ixgb_adapter *adapter = (struct ixgb_adapter *)data; - struct net_device *netdev = adapter->netdev; - struct ixgb_desc_ring *txdr = &adapter->tx_ring; - - ixgb_check_for_link(&adapter->hw); - - if (ixgb_check_for_bad_link(&adapter->hw)) { - /* force the reset path */ - netif_stop_queue(netdev); - } - - if (adapter->hw.link_up) { - if (!netif_carrier_ok(netdev)) { - netdev_info(netdev, - "NIC Link is Up 10 Gbps Full Duplex, Flow Control: %s\n", - (adapter->hw.fc.type == ixgb_fc_full) ? - "RX/TX" : - (adapter->hw.fc.type == ixgb_fc_rx_pause) ? - "RX" : - (adapter->hw.fc.type == ixgb_fc_tx_pause) ? - "TX" : "None"); - adapter->link_speed = 10000; - adapter->link_duplex = FULL_DUPLEX; - netif_carrier_on(netdev); - } - } else { - if (netif_carrier_ok(netdev)) { - adapter->link_speed = 0; - adapter->link_duplex = 0; - netdev_info(netdev, "NIC Link is Down\n"); - netif_carrier_off(netdev); - } - } - - ixgb_update_stats(adapter); - - if (!netif_carrier_ok(netdev)) { - if (IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) { - /* We've lost link, so the controller stops DMA, - * but we've got queued Tx work that's never going - * to get done, so reset controller to flush Tx. - * (Do the reset outside of interrupt context). */ - schedule_work(&adapter->tx_timeout_task); - /* return immediately since reset is imminent */ - return; - } - } - - /* Force detection of hung controller every watchdog period */ - adapter->detect_tx_hung = true; - - /* generate an interrupt to force clean up of any stragglers */ - IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW); - - /* Reset the timer */ - mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); -} - -#define IXGB_TX_FLAGS_CSUM 0x00000001 -#define IXGB_TX_FLAGS_VLAN 0x00000002 -#define IXGB_TX_FLAGS_TSO 0x00000004 - -static int -ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) -{ - struct ixgb_context_desc *context_desc; - unsigned int i; - u8 ipcss, ipcso, tucss, tucso, hdr_len; - u16 ipcse, tucse, mss; - int err; - - if (likely(skb_is_gso(skb))) { - struct ixgb_buffer *buffer_info; - struct iphdr *iph; - - if (skb_header_cloned(skb)) { - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); - if (err) - return err; - } - - hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - mss = skb_shinfo(skb)->gso_size; - iph = ip_hdr(skb); - iph->tot_len = 0; - iph->check = 0; - tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, 0); - ipcss = skb_network_offset(skb); - ipcso = (void *)&(iph->check) - (void *)skb->data; - ipcse = skb_transport_offset(skb) - 1; - tucss = skb_transport_offset(skb); - tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; - tucse = 0; - - i = adapter->tx_ring.next_to_use; - context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i); - buffer_info = &adapter->tx_ring.buffer_info[i]; - WARN_ON(buffer_info->dma != 0); - - context_desc->ipcss = ipcss; - context_desc->ipcso = ipcso; - context_desc->ipcse = cpu_to_le16(ipcse); - context_desc->tucss = tucss; - context_desc->tucso = tucso; - context_desc->tucse = cpu_to_le16(tucse); - context_desc->mss = cpu_to_le16(mss); - context_desc->hdr_len = hdr_len; - context_desc->status = 0; - context_desc->cmd_type_len = cpu_to_le32( - IXGB_CONTEXT_DESC_TYPE - | IXGB_CONTEXT_DESC_CMD_TSE - | IXGB_CONTEXT_DESC_CMD_IP - | IXGB_CONTEXT_DESC_CMD_TCP - | IXGB_CONTEXT_DESC_CMD_IDE - | (skb->len - (hdr_len))); - - - if (++i == adapter->tx_ring.count) i = 0; - adapter->tx_ring.next_to_use = i; - - return 1; - } - - return 0; -} - -static bool -ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb) -{ - struct ixgb_context_desc *context_desc; - unsigned int i; - u8 css, cso; - - if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { - struct ixgb_buffer *buffer_info; - css = skb_checksum_start_offset(skb); - cso = css + skb->csum_offset; - - i = adapter->tx_ring.next_to_use; - context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i); - buffer_info = &adapter->tx_ring.buffer_info[i]; - WARN_ON(buffer_info->dma != 0); - - context_desc->tucss = css; - context_desc->tucso = cso; - context_desc->tucse = 0; - /* zero out any previously existing data in one instruction */ - *(u32 *)&(context_desc->ipcss) = 0; - context_desc->status = 0; - context_desc->hdr_len = 0; - context_desc->mss = 0; - context_desc->cmd_type_len = - cpu_to_le32(IXGB_CONTEXT_DESC_TYPE - | IXGB_TX_DESC_CMD_IDE); - - if (++i == adapter->tx_ring.count) i = 0; - adapter->tx_ring.next_to_use = i; - - return true; - } - - return false; -} - -#define IXGB_MAX_TXD_PWR 14 -#define IXGB_MAX_DATA_PER_TXD (1<tx_ring; - struct pci_dev *pdev = adapter->pdev; - struct ixgb_buffer *buffer_info; - int len = skb_headlen(skb); - unsigned int offset = 0, size, count = 0, i; - unsigned int mss = skb_shinfo(skb)->gso_size; - unsigned int nr_frags = skb_shinfo(skb)->nr_frags; - unsigned int f; - - i = tx_ring->next_to_use; - - while (len) { - buffer_info = &tx_ring->buffer_info[i]; - size = min(len, IXGB_MAX_DATA_PER_TXD); - /* Workaround for premature desc write-backs - * in TSO mode. Append 4-byte sentinel desc */ - if (unlikely(mss && !nr_frags && size == len && size > 8)) - size -= 4; - - buffer_info->length = size; - WARN_ON(buffer_info->dma != 0); - buffer_info->time_stamp = jiffies; - buffer_info->mapped_as_page = false; - buffer_info->dma = dma_map_single(&pdev->dev, - skb->data + offset, - size, DMA_TO_DEVICE); - if (dma_mapping_error(&pdev->dev, buffer_info->dma)) - goto dma_error; - buffer_info->next_to_watch = 0; - - len -= size; - offset += size; - count++; - if (len) { - i++; - if (i == tx_ring->count) - i = 0; - } - } - - for (f = 0; f < nr_frags; f++) { - struct skb_frag_struct *frag; - - frag = &skb_shinfo(skb)->frags[f]; - len = frag->size; - offset = frag->page_offset; - - while (len) { - i++; - if (i == tx_ring->count) - i = 0; - - buffer_info = &tx_ring->buffer_info[i]; - size = min(len, IXGB_MAX_DATA_PER_TXD); - - /* Workaround for premature desc write-backs - * in TSO mode. Append 4-byte sentinel desc */ - if (unlikely(mss && (f == (nr_frags - 1)) - && size == len && size > 8)) - size -= 4; - - buffer_info->length = size; - buffer_info->time_stamp = jiffies; - buffer_info->mapped_as_page = true; - buffer_info->dma = - dma_map_page(&pdev->dev, frag->page, - offset, size, DMA_TO_DEVICE); - if (dma_mapping_error(&pdev->dev, buffer_info->dma)) - goto dma_error; - buffer_info->next_to_watch = 0; - - len -= size; - offset += size; - count++; - } - } - tx_ring->buffer_info[i].skb = skb; - tx_ring->buffer_info[first].next_to_watch = i; - - return count; - -dma_error: - dev_err(&pdev->dev, "TX DMA map failed\n"); - buffer_info->dma = 0; - if (count) - count--; - - while (count--) { - if (i==0) - i += tx_ring->count; - i--; - buffer_info = &tx_ring->buffer_info[i]; - ixgb_unmap_and_free_tx_resource(adapter, buffer_info); - } - - return 0; -} - -static void -ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags) -{ - struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; - struct ixgb_tx_desc *tx_desc = NULL; - struct ixgb_buffer *buffer_info; - u32 cmd_type_len = adapter->tx_cmd_type; - u8 status = 0; - u8 popts = 0; - unsigned int i; - - if (tx_flags & IXGB_TX_FLAGS_TSO) { - cmd_type_len |= IXGB_TX_DESC_CMD_TSE; - popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM); - } - - if (tx_flags & IXGB_TX_FLAGS_CSUM) - popts |= IXGB_TX_DESC_POPTS_TXSM; - - if (tx_flags & IXGB_TX_FLAGS_VLAN) - cmd_type_len |= IXGB_TX_DESC_CMD_VLE; - - i = tx_ring->next_to_use; - - while (count--) { - buffer_info = &tx_ring->buffer_info[i]; - tx_desc = IXGB_TX_DESC(*tx_ring, i); - tx_desc->buff_addr = cpu_to_le64(buffer_info->dma); - tx_desc->cmd_type_len = - cpu_to_le32(cmd_type_len | buffer_info->length); - tx_desc->status = status; - tx_desc->popts = popts; - tx_desc->vlan = cpu_to_le16(vlan_id); - - if (++i == tx_ring->count) i = 0; - } - - tx_desc->cmd_type_len |= - cpu_to_le32(IXGB_TX_DESC_CMD_EOP | IXGB_TX_DESC_CMD_RS); - - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). */ - wmb(); - - tx_ring->next_to_use = i; - IXGB_WRITE_REG(&adapter->hw, TDT, i); -} - -static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size) -{ - struct ixgb_adapter *adapter = netdev_priv(netdev); - struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; - - netif_stop_queue(netdev); - /* Herbert's original patch had: - * smp_mb__after_netif_stop_queue(); - * but since that doesn't exist yet, just open code it. */ - smp_mb(); - - /* We need to check again in a case another CPU has just - * made room available. */ - if (likely(IXGB_DESC_UNUSED(tx_ring) < size)) - return -EBUSY; - - /* A reprieve! */ - netif_start_queue(netdev); - ++adapter->restart_queue; - return 0; -} - -static int ixgb_maybe_stop_tx(struct net_device *netdev, - struct ixgb_desc_ring *tx_ring, int size) -{ - if (likely(IXGB_DESC_UNUSED(tx_ring) >= size)) - return 0; - return __ixgb_maybe_stop_tx(netdev, size); -} - - -/* Tx Descriptors needed, worst case */ -#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \ - (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) -#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \ - MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \ - + 1 /* one more needed for sentinel TSO workaround */ - -static netdev_tx_t -ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) -{ - struct ixgb_adapter *adapter = netdev_priv(netdev); - unsigned int first; - unsigned int tx_flags = 0; - int vlan_id = 0; - int count = 0; - int tso; - - if (test_bit(__IXGB_DOWN, &adapter->flags)) { - dev_kfree_skb(skb); - return NETDEV_TX_OK; - } - - if (skb->len <= 0) { - dev_kfree_skb(skb); - return NETDEV_TX_OK; - } - - if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, - DESC_NEEDED))) - return NETDEV_TX_BUSY; - - if (vlan_tx_tag_present(skb)) { - tx_flags |= IXGB_TX_FLAGS_VLAN; - vlan_id = vlan_tx_tag_get(skb); - } - - first = adapter->tx_ring.next_to_use; - - tso = ixgb_tso(adapter, skb); - if (tso < 0) { - dev_kfree_skb(skb); - return NETDEV_TX_OK; - } - - if (likely(tso)) - tx_flags |= IXGB_TX_FLAGS_TSO; - else if (ixgb_tx_csum(adapter, skb)) - tx_flags |= IXGB_TX_FLAGS_CSUM; - - count = ixgb_tx_map(adapter, skb, first); - - if (count) { - ixgb_tx_queue(adapter, count, vlan_id, tx_flags); - /* Make sure there is space in the ring for the next send. */ - ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED); - - } else { - dev_kfree_skb_any(skb); - adapter->tx_ring.buffer_info[first].time_stamp = 0; - adapter->tx_ring.next_to_use = first; - } - - return NETDEV_TX_OK; -} - -/** - * ixgb_tx_timeout - Respond to a Tx Hang - * @netdev: network interface device structure - **/ - -static void -ixgb_tx_timeout(struct net_device *netdev) -{ - struct ixgb_adapter *adapter = netdev_priv(netdev); - - /* Do the reset outside of interrupt context */ - schedule_work(&adapter->tx_timeout_task); -} - -static void -ixgb_tx_timeout_task(struct work_struct *work) -{ - struct ixgb_adapter *adapter = - container_of(work, struct ixgb_adapter, tx_timeout_task); - - adapter->tx_timeout_count++; - ixgb_down(adapter, true); - ixgb_up(adapter); -} - -/** - * ixgb_get_stats - Get System Network Statistics - * @netdev: network interface device structure - * - * Returns the address of the device statistics structure. - * The statistics are actually updated from the timer callback. - **/ - -static struct net_device_stats * -ixgb_get_stats(struct net_device *netdev) -{ - return &netdev->stats; -} - -/** - * ixgb_change_mtu - Change the Maximum Transfer Unit - * @netdev: network interface device structure - * @new_mtu: new value for maximum frame size - * - * Returns 0 on success, negative on failure - **/ - -static int -ixgb_change_mtu(struct net_device *netdev, int new_mtu) -{ - struct ixgb_adapter *adapter = netdev_priv(netdev); - int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; - int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; - - /* MTU < 68 is an error for IPv4 traffic, just don't allow it */ - if ((new_mtu < 68) || - (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) { - netif_err(adapter, probe, adapter->netdev, - "Invalid MTU setting %d\n", new_mtu); - return -EINVAL; - } - - if (old_max_frame == max_frame) - return 0; - - if (netif_running(netdev)) - ixgb_down(adapter, true); - - adapter->rx_buffer_len = max_frame + 8; /* + 8 for errata */ - - netdev->mtu = new_mtu; - - if (netif_running(netdev)) - ixgb_up(adapter); - - return 0; -} - -/** - * ixgb_update_stats - Update the board statistics counters. - * @adapter: board private structure - **/ - -void -ixgb_update_stats(struct ixgb_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - - /* Prevent stats update while adapter is being reset */ - if (pci_channel_offline(pdev)) - return; - - if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) || - (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) { - u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL); - u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL); - u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH); - u64 bcast = ((u64)bcast_h << 32) | bcast_l; - - multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32); - /* fix up multicast stats by removing broadcasts */ - if (multi >= bcast) - multi -= bcast; - - adapter->stats.mprcl += (multi & 0xFFFFFFFF); - adapter->stats.mprch += (multi >> 32); - adapter->stats.bprcl += bcast_l; - adapter->stats.bprch += bcast_h; - } else { - adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL); - adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH); - adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL); - adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH); - } - adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL); - adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH); - adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL); - adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH); - adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL); - adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH); - adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL); - adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH); - adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL); - adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH); - adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL); - adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH); - adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL); - adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH); - adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC); - adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC); - adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC); - adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC); - adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS); - adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC); - adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC); - adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC); - adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL); - adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH); - adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL); - adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH); - adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL); - adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH); - adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL); - adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH); - adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL); - adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH); - adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL); - adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH); - adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL); - adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH); - adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL); - adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH); - adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL); - adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH); - adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC); - adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C); - adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC); - adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC); - adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC); - adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC); - adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC); - adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC); - adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC); - adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC); - adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC); - adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC); - adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC); - adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC); - adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC); - adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC); - - /* Fill out the OS statistics structure */ - - netdev->stats.rx_packets = adapter->stats.gprcl; - netdev->stats.tx_packets = adapter->stats.gptcl; - netdev->stats.rx_bytes = adapter->stats.gorcl; - netdev->stats.tx_bytes = adapter->stats.gotcl; - netdev->stats.multicast = adapter->stats.mprcl; - netdev->stats.collisions = 0; - - /* ignore RLEC as it reports errors for padded (<64bytes) frames - * with a length in the type/len field */ - netdev->stats.rx_errors = - /* adapter->stats.rnbc + */ adapter->stats.crcerrs + - adapter->stats.ruc + - adapter->stats.roc /*+ adapter->stats.rlec */ + - adapter->stats.icbc + - adapter->stats.ecbc + adapter->stats.mpc; - - /* see above - * netdev->stats.rx_length_errors = adapter->stats.rlec; - */ - - netdev->stats.rx_crc_errors = adapter->stats.crcerrs; - netdev->stats.rx_fifo_errors = adapter->stats.mpc; - netdev->stats.rx_missed_errors = adapter->stats.mpc; - netdev->stats.rx_over_errors = adapter->stats.mpc; - - netdev->stats.tx_errors = 0; - netdev->stats.rx_frame_errors = 0; - netdev->stats.tx_aborted_errors = 0; - netdev->stats.tx_carrier_errors = 0; - netdev->stats.tx_fifo_errors = 0; - netdev->stats.tx_heartbeat_errors = 0; - netdev->stats.tx_window_errors = 0; -} - -#define IXGB_MAX_INTR 10 -/** - * ixgb_intr - Interrupt Handler - * @irq: interrupt number - * @data: pointer to a network interface device structure - **/ - -static irqreturn_t -ixgb_intr(int irq, void *data) -{ - struct net_device *netdev = data; - struct ixgb_adapter *adapter = netdev_priv(netdev); - struct ixgb_hw *hw = &adapter->hw; - u32 icr = IXGB_READ_REG(hw, ICR); - - if (unlikely(!icr)) - return IRQ_NONE; /* Not our interrupt */ - - if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC))) - if (!test_bit(__IXGB_DOWN, &adapter->flags)) - mod_timer(&adapter->watchdog_timer, jiffies); - - if (napi_schedule_prep(&adapter->napi)) { - - /* Disable interrupts and register for poll. The flush - of the posted write is intentionally left out. - */ - - IXGB_WRITE_REG(&adapter->hw, IMC, ~0); - __napi_schedule(&adapter->napi); - } - return IRQ_HANDLED; -} - -/** - * ixgb_clean - NAPI Rx polling callback - * @adapter: board private structure - **/ - -static int -ixgb_clean(struct napi_struct *napi, int budget) -{ - struct ixgb_adapter *adapter = container_of(napi, struct ixgb_adapter, napi); - int work_done = 0; - - ixgb_clean_tx_irq(adapter); - ixgb_clean_rx_irq(adapter, &work_done, budget); - - /* If budget not fully consumed, exit the polling mode */ - if (work_done < budget) { - napi_complete(napi); - if (!test_bit(__IXGB_DOWN, &adapter->flags)) - ixgb_irq_enable(adapter); - } - - return work_done; -} - -/** - * ixgb_clean_tx_irq - Reclaim resources after transmit completes - * @adapter: board private structure - **/ - -static bool -ixgb_clean_tx_irq(struct ixgb_adapter *adapter) -{ - struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; - struct net_device *netdev = adapter->netdev; - struct ixgb_tx_desc *tx_desc, *eop_desc; - struct ixgb_buffer *buffer_info; - unsigned int i, eop; - bool cleaned = false; - - i = tx_ring->next_to_clean; - eop = tx_ring->buffer_info[i].next_to_watch; - eop_desc = IXGB_TX_DESC(*tx_ring, eop); - - while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) { - - rmb(); /* read buffer_info after eop_desc */ - for (cleaned = false; !cleaned; ) { - tx_desc = IXGB_TX_DESC(*tx_ring, i); - buffer_info = &tx_ring->buffer_info[i]; - - if (tx_desc->popts & - (IXGB_TX_DESC_POPTS_TXSM | - IXGB_TX_DESC_POPTS_IXSM)) - adapter->hw_csum_tx_good++; - - ixgb_unmap_and_free_tx_resource(adapter, buffer_info); - - *(u32 *)&(tx_desc->status) = 0; - - cleaned = (i == eop); - if (++i == tx_ring->count) i = 0; - } - - eop = tx_ring->buffer_info[i].next_to_watch; - eop_desc = IXGB_TX_DESC(*tx_ring, eop); - } - - tx_ring->next_to_clean = i; - - if (unlikely(cleaned && netif_carrier_ok(netdev) && - IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) { - /* Make sure that anybody stopping the queue after this - * sees the new next_to_clean. */ - smp_mb(); - - if (netif_queue_stopped(netdev) && - !(test_bit(__IXGB_DOWN, &adapter->flags))) { - netif_wake_queue(netdev); - ++adapter->restart_queue; - } - } - - if (adapter->detect_tx_hung) { - /* detect a transmit hang in hardware, this serializes the - * check with the clearing of time_stamp and movement of i */ - adapter->detect_tx_hung = false; - if (tx_ring->buffer_info[eop].time_stamp && - time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ) - && !(IXGB_READ_REG(&adapter->hw, STATUS) & - IXGB_STATUS_TXOFF)) { - /* detected Tx unit hang */ - netif_err(adapter, drv, adapter->netdev, - "Detected Tx Unit Hang\n" - " TDH <%x>\n" - " TDT <%x>\n" - " next_to_use <%x>\n" - " next_to_clean <%x>\n" - "buffer_info[next_to_clean]\n" - " time_stamp <%lx>\n" - " next_to_watch <%x>\n" - " jiffies <%lx>\n" - " next_to_watch.status <%x>\n", - IXGB_READ_REG(&adapter->hw, TDH), - IXGB_READ_REG(&adapter->hw, TDT), - tx_ring->next_to_use, - tx_ring->next_to_clean, - tx_ring->buffer_info[eop].time_stamp, - eop, - jiffies, - eop_desc->status); - netif_stop_queue(netdev); - } - } - - return cleaned; -} - -/** - * ixgb_rx_checksum - Receive Checksum Offload for 82597. - * @adapter: board private structure - * @rx_desc: receive descriptor - * @sk_buff: socket buffer with received data - **/ - -static void -ixgb_rx_checksum(struct ixgb_adapter *adapter, - struct ixgb_rx_desc *rx_desc, - struct sk_buff *skb) -{ - /* Ignore Checksum bit is set OR - * TCP Checksum has not been calculated - */ - if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) || - (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) { - skb_checksum_none_assert(skb); - return; - } - - /* At this point we know the hardware did the TCP checksum */ - /* now look at the TCP checksum error bit */ - if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) { - /* let the stack verify checksum errors */ - skb_checksum_none_assert(skb); - adapter->hw_csum_rx_error++; - } else { - /* TCP checksum is good */ - skb->ip_summed = CHECKSUM_UNNECESSARY; - adapter->hw_csum_rx_good++; - } -} - -/* - * this should improve performance for small packets with large amounts - * of reassembly being done in the stack - */ -static void ixgb_check_copybreak(struct net_device *netdev, - struct ixgb_buffer *buffer_info, - u32 length, struct sk_buff **skb) -{ - struct sk_buff *new_skb; - - if (length > copybreak) - return; - - new_skb = netdev_alloc_skb_ip_align(netdev, length); - if (!new_skb) - return; - - skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN, - (*skb)->data - NET_IP_ALIGN, - length + NET_IP_ALIGN); - /* save the skb in buffer_info as good */ - buffer_info->skb = *skb; - *skb = new_skb; -} - -/** - * ixgb_clean_rx_irq - Send received data up the network stack, - * @adapter: board private structure - **/ - -static bool -ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do) -{ - struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - struct ixgb_rx_desc *rx_desc, *next_rxd; - struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer; - u32 length; - unsigned int i, j; - int cleaned_count = 0; - bool cleaned = false; - - i = rx_ring->next_to_clean; - rx_desc = IXGB_RX_DESC(*rx_ring, i); - buffer_info = &rx_ring->buffer_info[i]; - - while (rx_desc->status & IXGB_RX_DESC_STATUS_DD) { - struct sk_buff *skb; - u8 status; - - if (*work_done >= work_to_do) - break; - - (*work_done)++; - rmb(); /* read descriptor and rx_buffer_info after status DD */ - status = rx_desc->status; - skb = buffer_info->skb; - buffer_info->skb = NULL; - - prefetch(skb->data - NET_IP_ALIGN); - - if (++i == rx_ring->count) - i = 0; - next_rxd = IXGB_RX_DESC(*rx_ring, i); - prefetch(next_rxd); - - j = i + 1; - if (j == rx_ring->count) - j = 0; - next2_buffer = &rx_ring->buffer_info[j]; - prefetch(next2_buffer); - - next_buffer = &rx_ring->buffer_info[i]; - - cleaned = true; - cleaned_count++; - - dma_unmap_single(&pdev->dev, - buffer_info->dma, - buffer_info->length, - DMA_FROM_DEVICE); - buffer_info->dma = 0; - - length = le16_to_cpu(rx_desc->length); - rx_desc->length = 0; - - if (unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) { - - /* All receives must fit into a single buffer */ - - IXGB_DBG("Receive packet consumed multiple buffers " - "length<%x>\n", length); - - dev_kfree_skb_irq(skb); - goto rxdesc_done; - } - - if (unlikely(rx_desc->errors & - (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE | - IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE))) { - dev_kfree_skb_irq(skb); - goto rxdesc_done; - } - - ixgb_check_copybreak(netdev, buffer_info, length, &skb); - - /* Good Receive */ - skb_put(skb, length); - - /* Receive Checksum Offload */ - ixgb_rx_checksum(adapter, rx_desc, skb); - - skb->protocol = eth_type_trans(skb, netdev); - if (status & IXGB_RX_DESC_STATUS_VP) - __vlan_hwaccel_put_tag(skb, - le16_to_cpu(rx_desc->special)); - - netif_receive_skb(skb); - -rxdesc_done: - /* clean up descriptor, might be written over by hw */ - rx_desc->status = 0; - - /* return some buffers to hardware, one at a time is too slow */ - if (unlikely(cleaned_count >= IXGB_RX_BUFFER_WRITE)) { - ixgb_alloc_rx_buffers(adapter, cleaned_count); - cleaned_count = 0; - } - - /* use prefetched values */ - rx_desc = next_rxd; - buffer_info = next_buffer; - } - - rx_ring->next_to_clean = i; - - cleaned_count = IXGB_DESC_UNUSED(rx_ring); - if (cleaned_count) - ixgb_alloc_rx_buffers(adapter, cleaned_count); - - return cleaned; -} - -/** - * ixgb_alloc_rx_buffers - Replace used receive buffers - * @adapter: address of board private structure - **/ - -static void -ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count) -{ - struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - struct ixgb_rx_desc *rx_desc; - struct ixgb_buffer *buffer_info; - struct sk_buff *skb; - unsigned int i; - long cleancount; - - i = rx_ring->next_to_use; - buffer_info = &rx_ring->buffer_info[i]; - cleancount = IXGB_DESC_UNUSED(rx_ring); - - - /* leave three descriptors unused */ - while (--cleancount > 2 && cleaned_count--) { - /* recycle! its good for you */ - skb = buffer_info->skb; - if (skb) { - skb_trim(skb, 0); - goto map_skb; - } - - skb = netdev_alloc_skb_ip_align(netdev, adapter->rx_buffer_len); - if (unlikely(!skb)) { - /* Better luck next round */ - adapter->alloc_rx_buff_failed++; - break; - } - - buffer_info->skb = skb; - buffer_info->length = adapter->rx_buffer_len; -map_skb: - buffer_info->dma = dma_map_single(&pdev->dev, - skb->data, - adapter->rx_buffer_len, - DMA_FROM_DEVICE); - - rx_desc = IXGB_RX_DESC(*rx_ring, i); - rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); - /* guarantee DD bit not set now before h/w gets descriptor - * this is the rest of the workaround for h/w double - * writeback. */ - rx_desc->status = 0; - - - if (++i == rx_ring->count) i = 0; - buffer_info = &rx_ring->buffer_info[i]; - } - - if (likely(rx_ring->next_to_use != i)) { - rx_ring->next_to_use = i; - if (unlikely(i-- == 0)) - i = (rx_ring->count - 1); - - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, such - * as IA-64). */ - wmb(); - IXGB_WRITE_REG(&adapter->hw, RDT, i); - } -} - -static void -ixgb_vlan_strip_enable(struct ixgb_adapter *adapter) -{ - u32 ctrl; - - /* enable VLAN tag insert/strip */ - ctrl = IXGB_READ_REG(&adapter->hw, CTRL0); - ctrl |= IXGB_CTRL0_VME; - IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl); -} - -static void -ixgb_vlan_strip_disable(struct ixgb_adapter *adapter) -{ - u32 ctrl; - - /* disable VLAN tag insert/strip */ - ctrl = IXGB_READ_REG(&adapter->hw, CTRL0); - ctrl &= ~IXGB_CTRL0_VME; - IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl); -} - -static void -ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) -{ - struct ixgb_adapter *adapter = netdev_priv(netdev); - u32 vfta, index; - - /* add VID to filter table */ - - index = (vid >> 5) & 0x7F; - vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index); - vfta |= (1 << (vid & 0x1F)); - ixgb_write_vfta(&adapter->hw, index, vfta); - set_bit(vid, adapter->active_vlans); -} - -static void -ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) -{ - struct ixgb_adapter *adapter = netdev_priv(netdev); - u32 vfta, index; - - /* remove VID from filter table */ - - index = (vid >> 5) & 0x7F; - vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index); - vfta &= ~(1 << (vid & 0x1F)); - ixgb_write_vfta(&adapter->hw, index, vfta); - clear_bit(vid, adapter->active_vlans); -} - -static void -ixgb_restore_vlan(struct ixgb_adapter *adapter) -{ - u16 vid; - - for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) - ixgb_vlan_rx_add_vid(adapter->netdev, vid); -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -/* - * Polling 'interrupt' - used by things like netconsole to send skbs - * without having to re-enable interrupts. It's not called while - * the interrupt routine is executing. - */ - -static void ixgb_netpoll(struct net_device *dev) -{ - struct ixgb_adapter *adapter = netdev_priv(dev); - - disable_irq(adapter->pdev->irq); - ixgb_intr(adapter->pdev->irq, dev); - enable_irq(adapter->pdev->irq); -} -#endif - -/** - * ixgb_io_error_detected() - called when PCI error is detected - * @pdev pointer to pci device with error - * @state pci channel state after error - * - * This callback is called by the PCI subsystem whenever - * a PCI bus error is detected. - */ -static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev, - enum pci_channel_state state) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct ixgb_adapter *adapter = netdev_priv(netdev); - - netif_device_detach(netdev); - - if (state == pci_channel_io_perm_failure) - return PCI_ERS_RESULT_DISCONNECT; - - if (netif_running(netdev)) - ixgb_down(adapter, true); - - pci_disable_device(pdev); - - /* Request a slot reset. */ - return PCI_ERS_RESULT_NEED_RESET; -} - -/** - * ixgb_io_slot_reset - called after the pci bus has been reset. - * @pdev pointer to pci device with error - * - * This callback is called after the PCI bus has been reset. - * Basically, this tries to restart the card from scratch. - * This is a shortened version of the device probe/discovery code, - * it resembles the first-half of the ixgb_probe() routine. - */ -static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct ixgb_adapter *adapter = netdev_priv(netdev); - - if (pci_enable_device(pdev)) { - netif_err(adapter, probe, adapter->netdev, - "Cannot re-enable PCI device after reset\n"); - return PCI_ERS_RESULT_DISCONNECT; - } - - /* Perform card reset only on one instance of the card */ - if (0 != PCI_FUNC (pdev->devfn)) - return PCI_ERS_RESULT_RECOVERED; - - pci_set_master(pdev); - - netif_carrier_off(netdev); - netif_stop_queue(netdev); - ixgb_reset(adapter); - - /* Make sure the EEPROM is good */ - if (!ixgb_validate_eeprom_checksum(&adapter->hw)) { - netif_err(adapter, probe, adapter->netdev, - "After reset, the EEPROM checksum is not valid\n"); - return PCI_ERS_RESULT_DISCONNECT; - } - ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr); - memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); - - if (!is_valid_ether_addr(netdev->perm_addr)) { - netif_err(adapter, probe, adapter->netdev, - "After reset, invalid MAC address\n"); - return PCI_ERS_RESULT_DISCONNECT; - } - - return PCI_ERS_RESULT_RECOVERED; -} - -/** - * ixgb_io_resume - called when its OK to resume normal operations - * @pdev pointer to pci device with error - * - * The error recovery driver tells us that its OK to resume - * normal operation. Implementation resembles the second-half - * of the ixgb_probe() routine. - */ -static void ixgb_io_resume(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct ixgb_adapter *adapter = netdev_priv(netdev); - - pci_set_master(pdev); - - if (netif_running(netdev)) { - if (ixgb_up(adapter)) { - pr_err("can't bring device back up after reset\n"); - return; - } - } - - netif_device_attach(netdev); - mod_timer(&adapter->watchdog_timer, jiffies); -} - -/* ixgb_main.c */ diff --git a/drivers/net/ixgb/ixgb_osdep.h b/drivers/net/ixgb/ixgb_osdep.h deleted file mode 100644 index e361185..0000000 --- a/drivers/net/ixgb/ixgb_osdep.h +++ /dev/null @@ -1,63 +0,0 @@ -/******************************************************************************* - - Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -/* glue for the OS independent part of ixgb - * includes register access macros - */ - -#ifndef _IXGB_OSDEP_H_ -#define _IXGB_OSDEP_H_ - -#include -#include -#include -#include -#include - -#undef ASSERT -#define ASSERT(x) BUG_ON(!(x)) - -#define ENTER() pr_debug("%s\n", __func__); - -#define IXGB_WRITE_REG(a, reg, value) ( \ - writel((value), ((a)->hw_addr + IXGB_##reg))) - -#define IXGB_READ_REG(a, reg) ( \ - readl((a)->hw_addr + IXGB_##reg)) - -#define IXGB_WRITE_REG_ARRAY(a, reg, offset, value) ( \ - writel((value), ((a)->hw_addr + IXGB_##reg + ((offset) << 2)))) - -#define IXGB_READ_REG_ARRAY(a, reg, offset) ( \ - readl((a)->hw_addr + IXGB_##reg + ((offset) << 2))) - -#define IXGB_WRITE_FLUSH(a) IXGB_READ_REG(a, STATUS) - -#define IXGB_MEMCPY memcpy - -#endif /* _IXGB_OSDEP_H_ */ diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c deleted file mode 100644 index dd7fbeb..0000000 --- a/drivers/net/ixgb/ixgb_param.c +++ /dev/null @@ -1,469 +0,0 @@ -/******************************************************************************* - - Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include "ixgb.h" - -/* This is the only thing that needs to be changed to adjust the - * maximum number of ports that the driver can manage. - */ - -#define IXGB_MAX_NIC 8 - -#define OPTION_UNSET -1 -#define OPTION_DISABLED 0 -#define OPTION_ENABLED 1 - -/* All parameters are treated the same, as an integer array of values. - * This macro just reduces the need to repeat the same declaration code - * over and over (plus this helps to avoid typo bugs). - */ - -#define IXGB_PARAM_INIT { [0 ... IXGB_MAX_NIC] = OPTION_UNSET } -#define IXGB_PARAM(X, desc) \ - static int __devinitdata X[IXGB_MAX_NIC+1] \ - = IXGB_PARAM_INIT; \ - static unsigned int num_##X = 0; \ - module_param_array_named(X, X, int, &num_##X, 0); \ - MODULE_PARM_DESC(X, desc); - -/* Transmit Descriptor Count - * - * Valid Range: 64-4096 - * - * Default Value: 256 - */ - -IXGB_PARAM(TxDescriptors, "Number of transmit descriptors"); - -/* Receive Descriptor Count - * - * Valid Range: 64-4096 - * - * Default Value: 1024 - */ - -IXGB_PARAM(RxDescriptors, "Number of receive descriptors"); - -/* User Specified Flow Control Override - * - * Valid Range: 0-3 - * - 0 - No Flow Control - * - 1 - Rx only, respond to PAUSE frames but do not generate them - * - 2 - Tx only, generate PAUSE frames but ignore them on receive - * - 3 - Full Flow Control Support - * - * Default Value: 2 - Tx only (silicon bug avoidance) - */ - -IXGB_PARAM(FlowControl, "Flow Control setting"); - -/* XsumRX - Receive Checksum Offload Enable/Disable - * - * Valid Range: 0, 1 - * - 0 - disables all checksum offload - * - 1 - enables receive IP/TCP/UDP checksum offload - * on 82597 based NICs - * - * Default Value: 1 - */ - -IXGB_PARAM(XsumRX, "Disable or enable Receive Checksum offload"); - -/* Transmit Interrupt Delay in units of 0.8192 microseconds - * - * Valid Range: 0-65535 - * - * Default Value: 32 - */ - -IXGB_PARAM(TxIntDelay, "Transmit Interrupt Delay"); - -/* Receive Interrupt Delay in units of 0.8192 microseconds - * - * Valid Range: 0-65535 - * - * Default Value: 72 - */ - -IXGB_PARAM(RxIntDelay, "Receive Interrupt Delay"); - -/* Receive Flow control high threshold (when we send a pause frame) - * (FCRTH) - * - * Valid Range: 1,536 - 262,136 (0x600 - 0x3FFF8, 8 byte granularity) - * - * Default Value: 196,608 (0x30000) - */ - -IXGB_PARAM(RxFCHighThresh, "Receive Flow Control High Threshold"); - -/* Receive Flow control low threshold (when we send a resume frame) - * (FCRTL) - * - * Valid Range: 64 - 262,136 (0x40 - 0x3FFF8, 8 byte granularity) - * must be less than high threshold by at least 8 bytes - * - * Default Value: 163,840 (0x28000) - */ - -IXGB_PARAM(RxFCLowThresh, "Receive Flow Control Low Threshold"); - -/* Flow control request timeout (how long to pause the link partner's tx) - * (PAP 15:0) - * - * Valid Range: 1 - 65535 - * - * Default Value: 65535 (0xffff) (we'll send an xon if we recover) - */ - -IXGB_PARAM(FCReqTimeout, "Flow Control Request Timeout"); - -/* Interrupt Delay Enable - * - * Valid Range: 0, 1 - * - * - 0 - disables transmit interrupt delay - * - 1 - enables transmmit interrupt delay - * - * Default Value: 1 - */ - -IXGB_PARAM(IntDelayEnable, "Transmit Interrupt Delay Enable"); - - -#define DEFAULT_TIDV 32 -#define MAX_TIDV 0xFFFF -#define MIN_TIDV 0 - -#define DEFAULT_RDTR 72 -#define MAX_RDTR 0xFFFF -#define MIN_RDTR 0 - -#define XSUMRX_DEFAULT OPTION_ENABLED - -#define DEFAULT_FCRTL 0x28000 -#define DEFAULT_FCRTH 0x30000 -#define MIN_FCRTL 0 -#define MAX_FCRTL 0x3FFE8 -#define MIN_FCRTH 8 -#define MAX_FCRTH 0x3FFF0 - -#define MIN_FCPAUSE 1 -#define MAX_FCPAUSE 0xffff -#define DEFAULT_FCPAUSE 0xFFFF /* this may be too long */ - -struct ixgb_option { - enum { enable_option, range_option, list_option } type; - const char *name; - const char *err; - int def; - union { - struct { /* range_option info */ - int min; - int max; - } r; - struct { /* list_option info */ - int nr; - const struct ixgb_opt_list { - int i; - const char *str; - } *p; - } l; - } arg; -}; - -static int __devinit -ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt) -{ - if (*value == OPTION_UNSET) { - *value = opt->def; - return 0; - } - - switch (opt->type) { - case enable_option: - switch (*value) { - case OPTION_ENABLED: - pr_info("%s Enabled\n", opt->name); - return 0; - case OPTION_DISABLED: - pr_info("%s Disabled\n", opt->name); - return 0; - } - break; - case range_option: - if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { - pr_info("%s set to %i\n", opt->name, *value); - return 0; - } - break; - case list_option: { - int i; - const struct ixgb_opt_list *ent; - - for (i = 0; i < opt->arg.l.nr; i++) { - ent = &opt->arg.l.p[i]; - if (*value == ent->i) { - if (ent->str[0] != '\0') - pr_info("%s\n", ent->str); - return 0; - } - } - } - break; - default: - BUG(); - } - - pr_info("Invalid %s specified (%i) %s\n", opt->name, *value, opt->err); - *value = opt->def; - return -1; -} - -/** - * ixgb_check_options - Range Checking for Command Line Parameters - * @adapter: board private structure - * - * This routine checks all command line parameters for valid user - * input. If an invalid value is given, or if no user specified - * value exists, a default value is used. The final value is stored - * in a variable in the adapter structure. - **/ - -void __devinit -ixgb_check_options(struct ixgb_adapter *adapter) -{ - int bd = adapter->bd_number; - if (bd >= IXGB_MAX_NIC) { - pr_notice("Warning: no configuration for board #%i\n", bd); - pr_notice("Using defaults for all values\n"); - } - - { /* Transmit Descriptor Count */ - const struct ixgb_option opt = { - .type = range_option, - .name = "Transmit Descriptors", - .err = "using default of " __MODULE_STRING(DEFAULT_TXD), - .def = DEFAULT_TXD, - .arg = { .r = { .min = MIN_TXD, - .max = MAX_TXD}} - }; - struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; - - if (num_TxDescriptors > bd) { - tx_ring->count = TxDescriptors[bd]; - ixgb_validate_option(&tx_ring->count, &opt); - } else { - tx_ring->count = opt.def; - } - tx_ring->count = ALIGN(tx_ring->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE); - } - { /* Receive Descriptor Count */ - const struct ixgb_option opt = { - .type = range_option, - .name = "Receive Descriptors", - .err = "using default of " __MODULE_STRING(DEFAULT_RXD), - .def = DEFAULT_RXD, - .arg = { .r = { .min = MIN_RXD, - .max = MAX_RXD}} - }; - struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; - - if (num_RxDescriptors > bd) { - rx_ring->count = RxDescriptors[bd]; - ixgb_validate_option(&rx_ring->count, &opt); - } else { - rx_ring->count = opt.def; - } - rx_ring->count = ALIGN(rx_ring->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE); - } - { /* Receive Checksum Offload Enable */ - const struct ixgb_option opt = { - .type = enable_option, - .name = "Receive Checksum Offload", - .err = "defaulting to Enabled", - .def = OPTION_ENABLED - }; - - if (num_XsumRX > bd) { - unsigned int rx_csum = XsumRX[bd]; - ixgb_validate_option(&rx_csum, &opt); - adapter->rx_csum = rx_csum; - } else { - adapter->rx_csum = opt.def; - } - } - { /* Flow Control */ - - static const struct ixgb_opt_list fc_list[] = { - { ixgb_fc_none, "Flow Control Disabled" }, - { ixgb_fc_rx_pause, "Flow Control Receive Only" }, - { ixgb_fc_tx_pause, "Flow Control Transmit Only" }, - { ixgb_fc_full, "Flow Control Enabled" }, - { ixgb_fc_default, "Flow Control Hardware Default" } - }; - - static const struct ixgb_option opt = { - .type = list_option, - .name = "Flow Control", - .err = "reading default settings from EEPROM", - .def = ixgb_fc_tx_pause, - .arg = { .l = { .nr = ARRAY_SIZE(fc_list), - .p = fc_list }} - }; - - if (num_FlowControl > bd) { - unsigned int fc = FlowControl[bd]; - ixgb_validate_option(&fc, &opt); - adapter->hw.fc.type = fc; - } else { - adapter->hw.fc.type = opt.def; - } - } - { /* Receive Flow Control High Threshold */ - const struct ixgb_option opt = { - .type = range_option, - .name = "Rx Flow Control High Threshold", - .err = "using default of " __MODULE_STRING(DEFAULT_FCRTH), - .def = DEFAULT_FCRTH, - .arg = { .r = { .min = MIN_FCRTH, - .max = MAX_FCRTH}} - }; - - if (num_RxFCHighThresh > bd) { - adapter->hw.fc.high_water = RxFCHighThresh[bd]; - ixgb_validate_option(&adapter->hw.fc.high_water, &opt); - } else { - adapter->hw.fc.high_water = opt.def; - } - if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) - pr_info("Ignoring RxFCHighThresh when no RxFC\n"); - } - { /* Receive Flow Control Low Threshold */ - const struct ixgb_option opt = { - .type = range_option, - .name = "Rx Flow Control Low Threshold", - .err = "using default of " __MODULE_STRING(DEFAULT_FCRTL), - .def = DEFAULT_FCRTL, - .arg = { .r = { .min = MIN_FCRTL, - .max = MAX_FCRTL}} - }; - - if (num_RxFCLowThresh > bd) { - adapter->hw.fc.low_water = RxFCLowThresh[bd]; - ixgb_validate_option(&adapter->hw.fc.low_water, &opt); - } else { - adapter->hw.fc.low_water = opt.def; - } - if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) - pr_info("Ignoring RxFCLowThresh when no RxFC\n"); - } - { /* Flow Control Pause Time Request*/ - const struct ixgb_option opt = { - .type = range_option, - .name = "Flow Control Pause Time Request", - .err = "using default of "__MODULE_STRING(DEFAULT_FCPAUSE), - .def = DEFAULT_FCPAUSE, - .arg = { .r = { .min = MIN_FCPAUSE, - .max = MAX_FCPAUSE}} - }; - - if (num_FCReqTimeout > bd) { - unsigned int pause_time = FCReqTimeout[bd]; - ixgb_validate_option(&pause_time, &opt); - adapter->hw.fc.pause_time = pause_time; - } else { - adapter->hw.fc.pause_time = opt.def; - } - if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) - pr_info("Ignoring FCReqTimeout when no RxFC\n"); - } - /* high low and spacing check for rx flow control thresholds */ - if (adapter->hw.fc.type & ixgb_fc_tx_pause) { - /* high must be greater than low */ - if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) { - /* set defaults */ - pr_info("RxFCHighThresh must be >= (RxFCLowThresh + 8), Using Defaults\n"); - adapter->hw.fc.high_water = DEFAULT_FCRTH; - adapter->hw.fc.low_water = DEFAULT_FCRTL; - } - } - { /* Receive Interrupt Delay */ - const struct ixgb_option opt = { - .type = range_option, - .name = "Receive Interrupt Delay", - .err = "using default of " __MODULE_STRING(DEFAULT_RDTR), - .def = DEFAULT_RDTR, - .arg = { .r = { .min = MIN_RDTR, - .max = MAX_RDTR}} - }; - - if (num_RxIntDelay > bd) { - adapter->rx_int_delay = RxIntDelay[bd]; - ixgb_validate_option(&adapter->rx_int_delay, &opt); - } else { - adapter->rx_int_delay = opt.def; - } - } - { /* Transmit Interrupt Delay */ - const struct ixgb_option opt = { - .type = range_option, - .name = "Transmit Interrupt Delay", - .err = "using default of " __MODULE_STRING(DEFAULT_TIDV), - .def = DEFAULT_TIDV, - .arg = { .r = { .min = MIN_TIDV, - .max = MAX_TIDV}} - }; - - if (num_TxIntDelay > bd) { - adapter->tx_int_delay = TxIntDelay[bd]; - ixgb_validate_option(&adapter->tx_int_delay, &opt); - } else { - adapter->tx_int_delay = opt.def; - } - } - - { /* Transmit Interrupt Delay Enable */ - const struct ixgb_option opt = { - .type = enable_option, - .name = "Tx Interrupt Delay Enable", - .err = "defaulting to Enabled", - .def = OPTION_ENABLED - }; - - if (num_IntDelayEnable > bd) { - unsigned int ide = IntDelayEnable[bd]; - ixgb_validate_option(&ide, &opt); - adapter->tx_int_delay_enable = ide; - } else { - adapter->tx_int_delay_enable = opt.def; - } - } -} diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile deleted file mode 100644 index 7d7387f..0000000 --- a/drivers/net/ixgbe/Makefile +++ /dev/null @@ -1,42 +0,0 @@ -################################################################################ -# -# Intel 10 Gigabit PCI Express Linux driver -# Copyright(c) 1999 - 2010 Intel Corporation. -# -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -# -# The full GNU General Public License is included in this distribution in -# the file called "COPYING". -# -# Contact Information: -# Linux NICS -# e1000-devel Mailing List -# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -# -################################################################################ - -# -# Makefile for the Intel(R) 10GbE PCI Express ethernet driver -# - -obj-$(CONFIG_IXGBE) += ixgbe.o - -ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ - ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ - ixgbe_mbx.o ixgbe_x540.o - -ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ - ixgbe_dcb_82599.o ixgbe_dcb_nl.o - -ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h deleted file mode 100644 index e04a8e4..0000000 --- a/drivers/net/ixgbe/ixgbe.h +++ /dev/null @@ -1,617 +0,0 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGBE_H_ -#define _IXGBE_H_ - -#include -#include -#include -#include -#include -#include -#include - -#include "ixgbe_type.h" -#include "ixgbe_common.h" -#include "ixgbe_dcb.h" -#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) -#define IXGBE_FCOE -#include "ixgbe_fcoe.h" -#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */ -#ifdef CONFIG_IXGBE_DCA -#include -#endif - -/* common prefix used by pr_<> macros */ -#undef pr_fmt -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -/* TX/RX descriptor defines */ -#define IXGBE_DEFAULT_TXD 512 -#define IXGBE_MAX_TXD 4096 -#define IXGBE_MIN_TXD 64 - -#define IXGBE_DEFAULT_RXD 512 -#define IXGBE_MAX_RXD 4096 -#define IXGBE_MIN_RXD 64 - -/* flow control */ -#define IXGBE_MIN_FCRTL 0x40 -#define IXGBE_MAX_FCRTL 0x7FF80 -#define IXGBE_MIN_FCRTH 0x600 -#define IXGBE_MAX_FCRTH 0x7FFF0 -#define IXGBE_DEFAULT_FCPAUSE 0xFFFF -#define IXGBE_MIN_FCPAUSE 0 -#define IXGBE_MAX_FCPAUSE 0xFFFF - -/* Supported Rx Buffer Sizes */ -#define IXGBE_RXBUFFER_512 512 /* Used for packet split */ -#define IXGBE_RXBUFFER_2048 2048 -#define IXGBE_RXBUFFER_4096 4096 -#define IXGBE_RXBUFFER_8192 8192 -#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ - -/* - * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN mans we - * reserve 2 more, and skb_shared_info adds an additional 384 bytes more, - * this adds up to 512 bytes of extra data meaning the smallest allocation - * we could have is 1K. - * i.e. RXBUFFER_512 --> size-1024 slab - */ -#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_512 - -#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) - -/* How many Rx Buffers do we bundle into one write to the hardware ? */ -#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ - -#define IXGBE_TX_FLAGS_CSUM (u32)(1) -#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1) -#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2) -#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3) -#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 4) -#define IXGBE_TX_FLAGS_FSO (u32)(1 << 5) -#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 -#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 -#define IXGBE_TX_FLAGS_VLAN_SHIFT 16 - -#define IXGBE_MAX_RSC_INT_RATE 162760 - -#define IXGBE_MAX_VF_MC_ENTRIES 30 -#define IXGBE_MAX_VF_FUNCTIONS 64 -#define IXGBE_MAX_VFTA_ENTRIES 128 -#define MAX_EMULATION_MAC_ADDRS 16 -#define IXGBE_MAX_PF_MACVLANS 15 -#define VMDQ_P(p) ((p) + adapter->num_vfs) - -struct vf_data_storage { - unsigned char vf_mac_addresses[ETH_ALEN]; - u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES]; - u16 num_vf_mc_hashes; - u16 default_vf_vlan_id; - u16 vlans_enabled; - bool clear_to_send; - bool pf_set_mac; - u16 pf_vlan; /* When set, guest VLAN config not allowed. */ - u16 pf_qos; - u16 tx_rate; -}; - -struct vf_macvlans { - struct list_head l; - int vf; - int rar_entry; - bool free; - bool is_macvlan; - u8 vf_macvlan[ETH_ALEN]; -}; - -#define IXGBE_MAX_TXD_PWR 14 -#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) - -/* Tx Descriptors needed, worst case */ -#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) -#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) - -/* wrapper around a pointer to a socket buffer, - * so a DMA handle can be stored along with the buffer */ -struct ixgbe_tx_buffer { - struct sk_buff *skb; - dma_addr_t dma; - unsigned long time_stamp; - u16 length; - u16 next_to_watch; - unsigned int bytecount; - u16 gso_segs; - u8 mapped_as_page; -}; - -struct ixgbe_rx_buffer { - struct sk_buff *skb; - dma_addr_t dma; - struct page *page; - dma_addr_t page_dma; - unsigned int page_offset; -}; - -struct ixgbe_queue_stats { - u64 packets; - u64 bytes; -}; - -struct ixgbe_tx_queue_stats { - u64 restart_queue; - u64 tx_busy; - u64 completed; - u64 tx_done_old; -}; - -struct ixgbe_rx_queue_stats { - u64 rsc_count; - u64 rsc_flush; - u64 non_eop_descs; - u64 alloc_rx_page_failed; - u64 alloc_rx_buff_failed; -}; - -enum ixbge_ring_state_t { - __IXGBE_TX_FDIR_INIT_DONE, - __IXGBE_TX_DETECT_HANG, - __IXGBE_HANG_CHECK_ARMED, - __IXGBE_RX_PS_ENABLED, - __IXGBE_RX_RSC_ENABLED, -}; - -#define ring_is_ps_enabled(ring) \ - test_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state) -#define set_ring_ps_enabled(ring) \ - set_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state) -#define clear_ring_ps_enabled(ring) \ - clear_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state) -#define check_for_tx_hang(ring) \ - test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) -#define set_check_for_tx_hang(ring) \ - set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) -#define clear_check_for_tx_hang(ring) \ - clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) -#define ring_is_rsc_enabled(ring) \ - test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) -#define set_ring_rsc_enabled(ring) \ - set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) -#define clear_ring_rsc_enabled(ring) \ - clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) -struct ixgbe_ring { - void *desc; /* descriptor ring memory */ - struct device *dev; /* device for DMA mapping */ - struct net_device *netdev; /* netdev ring belongs to */ - union { - struct ixgbe_tx_buffer *tx_buffer_info; - struct ixgbe_rx_buffer *rx_buffer_info; - }; - unsigned long state; - u8 __iomem *tail; - - u16 count; /* amount of descriptors */ - u16 rx_buf_len; - - u8 queue_index; /* needed for multiqueue queue management */ - u8 reg_idx; /* holds the special value that gets - * the hardware register offset - * associated with this ring, which is - * different for DCB and RSS modes - */ - u8 atr_sample_rate; - u8 atr_count; - - u16 next_to_use; - u16 next_to_clean; - - u8 dcb_tc; - struct ixgbe_queue_stats stats; - struct u64_stats_sync syncp; - union { - struct ixgbe_tx_queue_stats tx_stats; - struct ixgbe_rx_queue_stats rx_stats; - }; - int numa_node; - unsigned int size; /* length in bytes */ - dma_addr_t dma; /* phys. address of descriptor ring */ - struct rcu_head rcu; - struct ixgbe_q_vector *q_vector; /* back-pointer to host q_vector */ -} ____cacheline_internodealigned_in_smp; - -enum ixgbe_ring_f_enum { - RING_F_NONE = 0, - RING_F_VMDQ, /* SR-IOV uses the same ring feature */ - RING_F_RSS, - RING_F_FDIR, -#ifdef IXGBE_FCOE - RING_F_FCOE, -#endif /* IXGBE_FCOE */ - - RING_F_ARRAY_SIZE /* must be last in enum set */ -}; - -#define IXGBE_MAX_RSS_INDICES 16 -#define IXGBE_MAX_VMDQ_INDICES 64 -#define IXGBE_MAX_FDIR_INDICES 64 -#ifdef IXGBE_FCOE -#define IXGBE_MAX_FCOE_INDICES 8 -#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) -#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) -#else -#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES -#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES -#endif /* IXGBE_FCOE */ -struct ixgbe_ring_feature { - int indices; - int mask; -} ____cacheline_internodealigned_in_smp; - -struct ixgbe_ring_container { -#if MAX_RX_QUEUES > MAX_TX_QUEUES - DECLARE_BITMAP(idx, MAX_RX_QUEUES); -#else - DECLARE_BITMAP(idx, MAX_TX_QUEUES); -#endif - unsigned int total_bytes; /* total bytes processed this int */ - unsigned int total_packets; /* total packets processed this int */ - u16 work_limit; /* total work allowed per interrupt */ - u8 count; /* total number of rings in vector */ - u8 itr; /* current ITR setting for ring */ -}; - -#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ - ? 8 : 1) -#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS - -/* MAX_MSIX_Q_VECTORS of these are allocated, - * but we only use one per queue-specific vector. - */ -struct ixgbe_q_vector { - struct ixgbe_adapter *adapter; - unsigned int v_idx; /* index of q_vector within array, also used for - * finding the bit in EICR and friends that - * represents the vector for this ring */ -#ifdef CONFIG_IXGBE_DCA - int cpu; /* CPU for DCA */ -#endif - struct napi_struct napi; - struct ixgbe_ring_container rx, tx; - u32 eitr; - cpumask_var_t affinity_mask; - char name[IFNAMSIZ + 9]; -}; - -/* Helper macros to switch between ints/sec and what the register uses. - * And yes, it's the same math going both ways. The lowest value - * supported by all of the ixgbe hardware is 8. - */ -#define EITR_INTS_PER_SEC_TO_REG(_eitr) \ - ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) -#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG - -static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) -{ - u16 ntc = ring->next_to_clean; - u16 ntu = ring->next_to_use; - - return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; -} - -#define IXGBE_RX_DESC_ADV(R, i) \ - (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) -#define IXGBE_TX_DESC_ADV(R, i) \ - (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i])) -#define IXGBE_TX_CTXTDESC_ADV(R, i) \ - (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) - -#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 -#ifdef IXGBE_FCOE -/* Use 3K as the baby jumbo frame size for FCoE */ -#define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072 -#endif /* IXGBE_FCOE */ - -#define OTHER_VECTOR 1 -#define NON_Q_VECTORS (OTHER_VECTOR) - -#define MAX_MSIX_VECTORS_82599 64 -#define MAX_MSIX_Q_VECTORS_82599 64 -#define MAX_MSIX_VECTORS_82598 18 -#define MAX_MSIX_Q_VECTORS_82598 16 - -#define MAX_MSIX_Q_VECTORS MAX_MSIX_Q_VECTORS_82599 -#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599 - -#define MIN_MSIX_Q_VECTORS 2 -#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) - -/* board specific private data structure */ -struct ixgbe_adapter { - unsigned long state; - - /* Some features need tri-state capability, - * thus the additional *_CAPABLE flags. - */ - u32 flags; -#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1) -#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 1) -#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 2) -#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 3) -#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 4) -#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 6) -#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 7) -#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 8) -#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 9) -#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 10) -#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 11) -#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 12) -#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 13) -#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 14) -#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 16) -#define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17) -#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18) -#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19) -#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20) -#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22) -#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 23) -#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 24) -#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 25) -#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 26) -#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 27) -#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 28) -#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 29) - - u32 flags2; -#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1) -#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1) -#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2) -#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 3) -#define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 4) -#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 5) -#define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 6) -#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7) - - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; - u16 bd_number; - struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; - - /* DCB parameters */ - struct ieee_pfc *ixgbe_ieee_pfc; - struct ieee_ets *ixgbe_ieee_ets; - struct ixgbe_dcb_config dcb_cfg; - struct ixgbe_dcb_config temp_dcb_cfg; - u8 dcb_set_bitmap; - u8 dcbx_cap; - enum ixgbe_fc_mode last_lfc_mode; - - /* Interrupt Throttle Rate */ - u32 rx_itr_setting; - u32 tx_itr_setting; - u16 eitr_low; - u16 eitr_high; - - /* Work limits */ - u16 tx_work_limit; - - /* TX */ - struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; - int num_tx_queues; - u32 tx_timeout_count; - bool detect_tx_hung; - - u64 restart_queue; - u64 lsc_int; - - /* RX */ - struct ixgbe_ring *rx_ring[MAX_RX_QUEUES] ____cacheline_aligned_in_smp; - int num_rx_queues; - int num_rx_pools; /* == num_rx_queues in 82598 */ - int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */ - u64 hw_csum_rx_error; - u64 hw_rx_no_dma_resources; - u64 non_eop_descs; - int num_msix_vectors; - int max_msix_q_vectors; /* true count of q_vectors for device */ - struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE]; - struct msix_entry *msix_entries; - - u32 alloc_rx_page_failed; - u32 alloc_rx_buff_failed; - -/* default to trying for four seconds */ -#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) - - /* OS defined structs */ - struct net_device *netdev; - struct pci_dev *pdev; - - u32 test_icr; - struct ixgbe_ring test_tx_ring; - struct ixgbe_ring test_rx_ring; - - /* structs defined in ixgbe_hw.h */ - struct ixgbe_hw hw; - u16 msg_enable; - struct ixgbe_hw_stats stats; - - /* Interrupt Throttle Rate */ - u32 rx_eitr_param; - u32 tx_eitr_param; - - u64 tx_busy; - unsigned int tx_ring_count; - unsigned int rx_ring_count; - - u32 link_speed; - bool link_up; - unsigned long link_check_timeout; - - struct work_struct service_task; - struct timer_list service_timer; - u32 fdir_pballoc; - u32 atr_sample_rate; - unsigned long fdir_overflow; /* number of times ATR was backed off */ - spinlock_t fdir_perfect_lock; -#ifdef IXGBE_FCOE - struct ixgbe_fcoe fcoe; -#endif /* IXGBE_FCOE */ - u64 rsc_total_count; - u64 rsc_total_flush; - u32 wol; - u16 eeprom_version; - - int node; - u32 led_reg; - u32 interrupt_event; - char lsc_int_name[IFNAMSIZ + 9]; - - /* SR-IOV */ - DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); - unsigned int num_vfs; - struct vf_data_storage *vfinfo; - int vf_rate_link_speed; - struct vf_macvlans vf_mvs; - struct vf_macvlans *mv_list; - bool antispoofing_enabled; - - struct hlist_head fdir_filter_list; - union ixgbe_atr_input fdir_mask; - int fdir_filter_count; -}; - -struct ixgbe_fdir_filter { - struct hlist_node fdir_node; - union ixgbe_atr_input filter; - u16 sw_idx; - u16 action; -}; - -enum ixbge_state_t { - __IXGBE_TESTING, - __IXGBE_RESETTING, - __IXGBE_DOWN, - __IXGBE_SERVICE_SCHED, - __IXGBE_IN_SFP_INIT, -}; - -struct ixgbe_rsc_cb { - dma_addr_t dma; - u16 skb_cnt; - bool delay_unmap; -}; -#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb) - -enum ixgbe_boards { - board_82598, - board_82599, - board_X540, -}; - -extern struct ixgbe_info ixgbe_82598_info; -extern struct ixgbe_info ixgbe_82599_info; -extern struct ixgbe_info ixgbe_X540_info; -#ifdef CONFIG_IXGBE_DCB -extern const struct dcbnl_rtnl_ops dcbnl_ops; -extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, - struct ixgbe_dcb_config *dst_dcb_cfg, - int tc_max); -#endif - -extern char ixgbe_driver_name[]; -extern const char ixgbe_driver_version[]; - -extern int ixgbe_up(struct ixgbe_adapter *adapter); -extern void ixgbe_down(struct ixgbe_adapter *adapter); -extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); -extern void ixgbe_reset(struct ixgbe_adapter *adapter); -extern void ixgbe_set_ethtool_ops(struct net_device *netdev); -extern int ixgbe_setup_rx_resources(struct ixgbe_ring *); -extern int ixgbe_setup_tx_resources(struct ixgbe_ring *); -extern void ixgbe_free_rx_resources(struct ixgbe_ring *); -extern void ixgbe_free_tx_resources(struct ixgbe_ring *); -extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); -extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); -extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, - struct ixgbe_ring *); -extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); -extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); -extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); -extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, - struct ixgbe_adapter *, - struct ixgbe_ring *); -extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *, - struct ixgbe_tx_buffer *); -extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); -extern void ixgbe_write_eitr(struct ixgbe_q_vector *); -extern int ethtool_ioctl(struct ifreq *ifr); -extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); -extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl); -extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl); -extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, - union ixgbe_atr_hash_dword input, - union ixgbe_atr_hash_dword common, - u8 queue); -extern s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, - union ixgbe_atr_input *input_mask); -extern s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, - union ixgbe_atr_input *input, - u16 soft_id, u8 queue); -extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, - union ixgbe_atr_input *input, - u16 soft_id); -extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, - union ixgbe_atr_input *mask); -extern void ixgbe_set_rx_mode(struct net_device *netdev); -extern int ixgbe_setup_tc(struct net_device *dev, u8 tc); -extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); -extern void ixgbe_do_reset(struct net_device *netdev); -#ifdef IXGBE_FCOE -extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); -extern int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, - u32 tx_flags, u8 *hdr_len); -extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter); -extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb, - u32 staterr); -extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, - struct scatterlist *sgl, unsigned int sgc); -extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, - struct scatterlist *sgl, unsigned int sgc); -extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid); -extern int ixgbe_fcoe_enable(struct net_device *netdev); -extern int ixgbe_fcoe_disable(struct net_device *netdev); -#ifdef CONFIG_IXGBE_DCB -extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter); -extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up); -#endif /* CONFIG_IXGBE_DCB */ -extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type); -#endif /* IXGBE_FCOE */ - -#endif /* _IXGBE_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c deleted file mode 100644 index 0d4e382..0000000 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ /dev/null @@ -1,1353 +0,0 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include -#include -#include - -#include "ixgbe.h" -#include "ixgbe_phy.h" - -#define IXGBE_82598_MAX_TX_QUEUES 32 -#define IXGBE_82598_MAX_RX_QUEUES 64 -#define IXGBE_82598_RAR_ENTRIES 16 -#define IXGBE_82598_MC_TBL_SIZE 128 -#define IXGBE_82598_VFT_TBL_SIZE 128 -#define IXGBE_82598_RX_PB_SIZE 512 - -static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg, - bool autoneg_wait_to_complete); -static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, - u8 *eeprom_data); - -/** - * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout - * @hw: pointer to the HW structure - * - * The defaults for 82598 should be in the range of 50us to 50ms, - * however the hardware default for these parts is 500us to 1ms which is less - * than the 10ms recommended by the pci-e spec. To address this we need to - * increase the value to either 10ms to 250ms for capability version 1 config, - * or 16ms to 55ms for version 2. - **/ -static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) -{ - struct ixgbe_adapter *adapter = hw->back; - u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR); - u16 pcie_devctl2; - - /* only take action if timeout value is defaulted to 0 */ - if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK) - goto out; - - /* - * if capababilities version is type 1 we can write the - * timeout of 10ms to 250ms through the GCR register - */ - if (!(gcr & IXGBE_GCR_CAP_VER2)) { - gcr |= IXGBE_GCR_CMPL_TMOUT_10ms; - goto out; - } - - /* - * for version 2 capabilities we need to write the config space - * directly in order to set the completion timeout value for - * 16ms to 55ms - */ - pci_read_config_word(adapter->pdev, - IXGBE_PCI_DEVICE_CONTROL2, &pcie_devctl2); - pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; - pci_write_config_word(adapter->pdev, - IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); -out: - /* disable completion timeout resend */ - gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; - IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); -} - -/** - * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count - * @hw: pointer to hardware structure - * - * Read PCIe configuration space, and get the MSI-X vector count from - * the capabilities table. - **/ -static u16 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw) -{ - struct ixgbe_adapter *adapter = hw->back; - u16 msix_count; - pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82598_CAPS, - &msix_count); - msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; - - /* MSI-X count is zero-based in HW, so increment to give proper value */ - msix_count++; - - return msix_count; -} - -/** - */ -static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) -{ - struct ixgbe_mac_info *mac = &hw->mac; - - /* Call PHY identify routine to get the phy type */ - ixgbe_identify_phy_generic(hw); - - mac->mcft_size = IXGBE_82598_MC_TBL_SIZE; - mac->vft_size = IXGBE_82598_VFT_TBL_SIZE; - mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; - mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; - mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; - mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw); - - return 0; -} - -/** - * ixgbe_init_phy_ops_82598 - PHY/SFP specific init - * @hw: pointer to hardware structure - * - * Initialize any function pointers that were not able to be - * set during get_invariants because the PHY/SFP type was - * not known. Perform the SFP init if necessary. - * - **/ -static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) -{ - struct ixgbe_mac_info *mac = &hw->mac; - struct ixgbe_phy_info *phy = &hw->phy; - s32 ret_val = 0; - u16 list_offset, data_offset; - - /* Identify the PHY */ - phy->ops.identify(hw); - - /* Overwrite the link function pointers if copper PHY */ - if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { - mac->ops.setup_link = &ixgbe_setup_copper_link_82598; - mac->ops.get_link_capabilities = - &ixgbe_get_copper_link_capabilities_generic; - } - - switch (hw->phy.type) { - case ixgbe_phy_tn: - phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; - phy->ops.check_link = &ixgbe_check_phy_link_tnx; - phy->ops.get_firmware_version = - &ixgbe_get_phy_firmware_version_tnx; - break; - case ixgbe_phy_nl: - phy->ops.reset = &ixgbe_reset_phy_nl; - - /* Call SFP+ identify routine to get the SFP+ module type */ - ret_val = phy->ops.identify_sfp(hw); - if (ret_val != 0) - goto out; - else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) { - ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; - goto out; - } - - /* Check to see if SFP+ module is supported */ - ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, - &list_offset, - &data_offset); - if (ret_val != 0) { - ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; - goto out; - } - break; - default: - break; - } - -out: - return ret_val; -} - -/** - * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx - * @hw: pointer to hardware structure - * - * Starts the hardware using the generic start_hw function. - * Disables relaxed ordering Then set pcie completion timeout - * - **/ -static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) -{ - u32 regval; - u32 i; - s32 ret_val = 0; - - ret_val = ixgbe_start_hw_generic(hw); - - /* Disable relaxed ordering */ - for (i = 0; ((i < hw->mac.max_tx_queues) && - (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { - regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); - regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; - IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); - } - - for (i = 0; ((i < hw->mac.max_rx_queues) && - (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { - regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); - regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | - IXGBE_DCA_RXCTRL_DESC_HSRO_EN); - IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); - } - - hw->mac.rx_pb_size = IXGBE_82598_RX_PB_SIZE; - - /* set the completion timeout for interface */ - if (ret_val == 0) - ixgbe_set_pcie_completion_timeout(hw); - - return ret_val; -} - -/** - * ixgbe_get_link_capabilities_82598 - Determines link capabilities - * @hw: pointer to hardware structure - * @speed: pointer to link speed - * @autoneg: boolean auto-negotiation value - * - * Determines the link capabilities by reading the AUTOC register. - **/ -static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *autoneg) -{ - s32 status = 0; - u32 autoc = 0; - - /* - * Determine link capabilities based on the stored value of AUTOC, - * which represents EEPROM defaults. If AUTOC value has not been - * stored, use the current register value. - */ - if (hw->mac.orig_link_settings_stored) - autoc = hw->mac.orig_autoc; - else - autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); - - switch (autoc & IXGBE_AUTOC_LMS_MASK) { - case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: - *speed = IXGBE_LINK_SPEED_1GB_FULL; - *autoneg = false; - break; - - case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: - *speed = IXGBE_LINK_SPEED_10GB_FULL; - *autoneg = false; - break; - - case IXGBE_AUTOC_LMS_1G_AN: - *speed = IXGBE_LINK_SPEED_1GB_FULL; - *autoneg = true; - break; - - case IXGBE_AUTOC_LMS_KX4_AN: - case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: - *speed = IXGBE_LINK_SPEED_UNKNOWN; - if (autoc & IXGBE_AUTOC_KX4_SUPP) - *speed |= IXGBE_LINK_SPEED_10GB_FULL; - if (autoc & IXGBE_AUTOC_KX_SUPP) - *speed |= IXGBE_LINK_SPEED_1GB_FULL; - *autoneg = true; - break; - - default: - status = IXGBE_ERR_LINK_SETUP; - break; - } - - return status; -} - -/** - * ixgbe_get_media_type_82598 - Determines media type - * @hw: pointer to hardware structure - * - * Returns the media type (fiber, copper, backplane) - **/ -static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) -{ - enum ixgbe_media_type media_type; - - /* Detect if there is a copper PHY attached. */ - switch (hw->phy.type) { - case ixgbe_phy_cu_unknown: - case ixgbe_phy_tn: - case ixgbe_phy_aq: - media_type = ixgbe_media_type_copper; - goto out; - default: - break; - } - - /* Media type for I82598 is based on device ID */ - switch (hw->device_id) { - case IXGBE_DEV_ID_82598: - case IXGBE_DEV_ID_82598_BX: - /* Default device ID is mezzanine card KX/KX4 */ - media_type = ixgbe_media_type_backplane; - break; - case IXGBE_DEV_ID_82598AF_DUAL_PORT: - case IXGBE_DEV_ID_82598AF_SINGLE_PORT: - case IXGBE_DEV_ID_82598_DA_DUAL_PORT: - case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: - case IXGBE_DEV_ID_82598EB_XF_LR: - case IXGBE_DEV_ID_82598EB_SFP_LOM: - media_type = ixgbe_media_type_fiber; - break; - case IXGBE_DEV_ID_82598EB_CX4: - case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: - media_type = ixgbe_media_type_cx4; - break; - case IXGBE_DEV_ID_82598AT: - case IXGBE_DEV_ID_82598AT2: - media_type = ixgbe_media_type_copper; - break; - default: - media_type = ixgbe_media_type_unknown; - break; - } -out: - return media_type; -} - -/** - * ixgbe_fc_enable_82598 - Enable flow control - * @hw: pointer to hardware structure - * @packetbuf_num: packet buffer number (0-7) - * - * Enable flow control according to the current settings. - **/ -static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num) -{ - s32 ret_val = 0; - u32 fctrl_reg; - u32 rmcs_reg; - u32 reg; - u32 rx_pba_size; - u32 link_speed = 0; - bool link_up; - -#ifdef CONFIG_DCB - if (hw->fc.requested_mode == ixgbe_fc_pfc) - goto out; - -#endif /* CONFIG_DCB */ - /* - * On 82598 having Rx FC on causes resets while doing 1G - * so if it's on turn it off once we know link_speed. For - * more details see 82598 Specification update. - */ - hw->mac.ops.check_link(hw, &link_speed, &link_up, false); - if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) { - switch (hw->fc.requested_mode) { - case ixgbe_fc_full: - hw->fc.requested_mode = ixgbe_fc_tx_pause; - break; - case ixgbe_fc_rx_pause: - hw->fc.requested_mode = ixgbe_fc_none; - break; - default: - /* no change */ - break; - } - } - - /* Negotiate the fc mode to use */ - ret_val = ixgbe_fc_autoneg(hw); - if (ret_val == IXGBE_ERR_FLOW_CONTROL) - goto out; - - /* Disable any previous flow control settings */ - fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); - fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); - - rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); - rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); - - /* - * The possible values of fc.current_mode are: - * 0: Flow control is completely disabled - * 1: Rx flow control is enabled (we can receive pause frames, - * but not send pause frames). - * 2: Tx flow control is enabled (we can send pause frames but - * we do not support receiving pause frames). - * 3: Both Rx and Tx flow control (symmetric) are enabled. -#ifdef CONFIG_DCB - * 4: Priority Flow Control is enabled. -#endif - * other: Invalid. - */ - switch (hw->fc.current_mode) { - case ixgbe_fc_none: - /* - * Flow control is disabled by software override or autoneg. - * The code below will actually disable it in the HW. - */ - break; - case ixgbe_fc_rx_pause: - /* - * Rx Flow control is enabled and Tx Flow control is - * disabled by software override. Since there really - * isn't a way to advertise that we are capable of RX - * Pause ONLY, we will advertise that we support both - * symmetric and asymmetric Rx PAUSE. Later, we will - * disable the adapter's ability to send PAUSE frames. - */ - fctrl_reg |= IXGBE_FCTRL_RFCE; - break; - case ixgbe_fc_tx_pause: - /* - * Tx Flow control is enabled, and Rx Flow control is - * disabled by software override. - */ - rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; - break; - case ixgbe_fc_full: - /* Flow control (both Rx and Tx) is enabled by SW override. */ - fctrl_reg |= IXGBE_FCTRL_RFCE; - rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; - break; -#ifdef CONFIG_DCB - case ixgbe_fc_pfc: - goto out; - break; -#endif /* CONFIG_DCB */ - default: - hw_dbg(hw, "Flow control param set incorrectly\n"); - ret_val = IXGBE_ERR_CONFIG; - goto out; - break; - } - - /* Set 802.3x based flow control settings. */ - fctrl_reg |= IXGBE_FCTRL_DPF; - IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg); - IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); - - /* Set up and enable Rx high/low water mark thresholds, enable XON. */ - if (hw->fc.current_mode & ixgbe_fc_tx_pause) { - rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num)); - rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; - - reg = (rx_pba_size - hw->fc.low_water) << 6; - if (hw->fc.send_xon) - reg |= IXGBE_FCRTL_XONE; - - IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg); - - reg = (rx_pba_size - hw->fc.high_water) << 6; - reg |= IXGBE_FCRTH_FCEN; - - IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg); - } - - /* Configure pause time (2 TCs per register) */ - reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2)); - if ((packetbuf_num & 1) == 0) - reg = (reg & 0xFFFF0000) | hw->fc.pause_time; - else - reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16); - IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg); - - IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); - -out: - return ret_val; -} - -/** - * ixgbe_start_mac_link_82598 - Configures MAC link settings - * @hw: pointer to hardware structure - * - * Configures link settings based on values in the ixgbe_hw struct. - * Restarts the link. Performs autonegotiation if needed. - **/ -static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, - bool autoneg_wait_to_complete) -{ - u32 autoc_reg; - u32 links_reg; - u32 i; - s32 status = 0; - - /* Restart link */ - autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); - autoc_reg |= IXGBE_AUTOC_AN_RESTART; - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); - - /* Only poll for autoneg to complete if specified to do so */ - if (autoneg_wait_to_complete) { - if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == - IXGBE_AUTOC_LMS_KX4_AN || - (autoc_reg & IXGBE_AUTOC_LMS_MASK) == - IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { - links_reg = 0; /* Just in case Autoneg time = 0 */ - for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { - links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); - if (links_reg & IXGBE_LINKS_KX_AN_COMP) - break; - msleep(100); - } - if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { - status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; - hw_dbg(hw, "Autonegotiation did not complete.\n"); - } - } - } - - /* Add delay to filter out noises during initial link setup */ - msleep(50); - - return status; -} - -/** - * ixgbe_validate_link_ready - Function looks for phy link - * @hw: pointer to hardware structure - * - * Function indicates success when phy link is available. If phy is not ready - * within 5 seconds of MAC indicating link, the function returns error. - **/ -static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw) -{ - u32 timeout; - u16 an_reg; - - if (hw->device_id != IXGBE_DEV_ID_82598AT2) - return 0; - - for (timeout = 0; - timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) { - hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, &an_reg); - - if ((an_reg & MDIO_AN_STAT1_COMPLETE) && - (an_reg & MDIO_STAT1_LSTATUS)) - break; - - msleep(100); - } - - if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) { - hw_dbg(hw, "Link was indicated but link is down\n"); - return IXGBE_ERR_LINK_SETUP; - } - - return 0; -} - -/** - * ixgbe_check_mac_link_82598 - Get link/speed status - * @hw: pointer to hardware structure - * @speed: pointer to link speed - * @link_up: true is link is up, false otherwise - * @link_up_wait_to_complete: bool used to wait for link up or not - * - * Reads the links register to determine if link is up and the current speed - **/ -static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, bool *link_up, - bool link_up_wait_to_complete) -{ - u32 links_reg; - u32 i; - u16 link_reg, adapt_comp_reg; - - /* - * SERDES PHY requires us to read link status from register 0xC79F. - * Bit 0 set indicates link is up/ready; clear indicates link down. - * 0xC00C is read to check that the XAUI lanes are active. Bit 0 - * clear indicates active; set indicates inactive. - */ - if (hw->phy.type == ixgbe_phy_nl) { - hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); - hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); - hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD, - &adapt_comp_reg); - if (link_up_wait_to_complete) { - for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { - if ((link_reg & 1) && - ((adapt_comp_reg & 1) == 0)) { - *link_up = true; - break; - } else { - *link_up = false; - } - msleep(100); - hw->phy.ops.read_reg(hw, 0xC79F, - MDIO_MMD_PMAPMD, - &link_reg); - hw->phy.ops.read_reg(hw, 0xC00C, - MDIO_MMD_PMAPMD, - &adapt_comp_reg); - } - } else { - if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0)) - *link_up = true; - else - *link_up = false; - } - - if (*link_up == false) - goto out; - } - - links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); - if (link_up_wait_to_complete) { - for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { - if (links_reg & IXGBE_LINKS_UP) { - *link_up = true; - break; - } else { - *link_up = false; - } - msleep(100); - links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); - } - } else { - if (links_reg & IXGBE_LINKS_UP) - *link_up = true; - else - *link_up = false; - } - - if (links_reg & IXGBE_LINKS_SPEED) - *speed = IXGBE_LINK_SPEED_10GB_FULL; - else - *speed = IXGBE_LINK_SPEED_1GB_FULL; - - if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == true) && - (ixgbe_validate_link_ready(hw) != 0)) - *link_up = false; - - /* if link is down, zero out the current_mode */ - if (*link_up == false) { - hw->fc.current_mode = ixgbe_fc_none; - hw->fc.fc_was_autonegged = false; - } -out: - return 0; -} - -/** - * ixgbe_setup_mac_link_82598 - Set MAC link speed - * @hw: pointer to hardware structure - * @speed: new link speed - * @autoneg: true if auto-negotiation enabled - * @autoneg_wait_to_complete: true when waiting for completion is needed - * - * Set the link speed in the AUTOC register and restarts link. - **/ -static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, - ixgbe_link_speed speed, bool autoneg, - bool autoneg_wait_to_complete) -{ - s32 status = 0; - ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; - u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); - u32 autoc = curr_autoc; - u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; - - /* Check to see if speed passed in is supported. */ - ixgbe_get_link_capabilities_82598(hw, &link_capabilities, &autoneg); - speed &= link_capabilities; - - if (speed == IXGBE_LINK_SPEED_UNKNOWN) - status = IXGBE_ERR_LINK_SETUP; - - /* Set KX4/KX support according to speed requested */ - else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN || - link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { - autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK; - if (speed & IXGBE_LINK_SPEED_10GB_FULL) - autoc |= IXGBE_AUTOC_KX4_SUPP; - if (speed & IXGBE_LINK_SPEED_1GB_FULL) - autoc |= IXGBE_AUTOC_KX_SUPP; - if (autoc != curr_autoc) - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); - } - - if (status == 0) { - /* - * Setup and restart the link based on the new values in - * ixgbe_hw This will write the AUTOC register based on the new - * stored values - */ - status = ixgbe_start_mac_link_82598(hw, - autoneg_wait_to_complete); - } - - return status; -} - - -/** - * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field - * @hw: pointer to hardware structure - * @speed: new link speed - * @autoneg: true if autonegotiation enabled - * @autoneg_wait_to_complete: true if waiting is needed to complete - * - * Sets the link speed in the AUTOC register in the MAC and restarts link. - **/ -static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg, - bool autoneg_wait_to_complete) -{ - s32 status; - - /* Setup the PHY according to input speed */ - status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, - autoneg_wait_to_complete); - /* Set up MAC */ - ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); - - return status; -} - -/** - * ixgbe_reset_hw_82598 - Performs hardware reset - * @hw: pointer to hardware structure - * - * Resets the hardware by resetting the transmit and receive units, masks and - * clears all interrupts, performing a PHY reset, and performing a link (MAC) - * reset. - **/ -static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) -{ - s32 status = 0; - s32 phy_status = 0; - u32 ctrl; - u32 gheccr; - u32 i; - u32 autoc; - u8 analog_val; - - /* Call adapter stop to disable tx/rx and clear interrupts */ - hw->mac.ops.stop_adapter(hw); - - /* - * Power up the Atlas Tx lanes if they are currently powered down. - * Atlas Tx lanes are powered down for MAC loopback tests, but - * they are not automatically restored on reset. - */ - hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); - if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { - /* Enable Tx Atlas so packets can be transmitted again */ - hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, - &analog_val); - analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; - hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, - analog_val); - - hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, - &analog_val); - analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; - hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, - analog_val); - - hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, - &analog_val); - analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; - hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, - analog_val); - - hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, - &analog_val); - analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; - hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, - analog_val); - } - - /* Reset PHY */ - if (hw->phy.reset_disable == false) { - /* PHY ops must be identified and initialized prior to reset */ - - /* Init PHY and function pointers, perform SFP setup */ - phy_status = hw->phy.ops.init(hw); - if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED) - goto reset_hw_out; - else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT) - goto no_phy_reset; - - hw->phy.ops.reset(hw); - } - -no_phy_reset: - /* - * Prevent the PCI-E bus from from hanging by disabling PCI-E master - * access and verify no pending requests before reset - */ - ixgbe_disable_pcie_master(hw); - -mac_reset_top: - /* - * Issue global reset to the MAC. This needs to be a SW reset. - * If link reset is used, it might reset the MAC when mng is using it - */ - ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); - IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); - IXGBE_WRITE_FLUSH(hw); - - /* Poll for reset bit to self-clear indicating reset is complete */ - for (i = 0; i < 10; i++) { - udelay(1); - ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); - if (!(ctrl & IXGBE_CTRL_RST)) - break; - } - if (ctrl & IXGBE_CTRL_RST) { - status = IXGBE_ERR_RESET_FAILED; - hw_dbg(hw, "Reset polling failed to complete.\n"); - } - - /* - * Double resets are required for recovery from certain error - * conditions. Between resets, it is necessary to stall to allow time - * for any pending HW events to complete. We use 1usec since that is - * what is needed for ixgbe_disable_pcie_master(). The second reset - * then clears out any effects of those events. - */ - if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { - hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; - udelay(1); - goto mac_reset_top; - } - - msleep(50); - - gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); - gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); - IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); - - /* - * Store the original AUTOC value if it has not been - * stored off yet. Otherwise restore the stored original - * AUTOC value since the reset operation sets back to deaults. - */ - autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); - if (hw->mac.orig_link_settings_stored == false) { - hw->mac.orig_autoc = autoc; - hw->mac.orig_link_settings_stored = true; - } else if (autoc != hw->mac.orig_autoc) { - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); - } - - /* Store the permanent mac address */ - hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); - - /* - * Store MAC address from RAR0, clear receive address registers, and - * clear the multicast table - */ - hw->mac.ops.init_rx_addrs(hw); - -reset_hw_out: - if (phy_status) - status = phy_status; - - return status; -} - -/** - * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address - * @hw: pointer to hardware struct - * @rar: receive address register index to associate with a VMDq index - * @vmdq: VMDq set index - **/ -static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) -{ - u32 rar_high; - u32 rar_entries = hw->mac.num_rar_entries; - - /* Make sure we are using a valid rar index range */ - if (rar >= rar_entries) { - hw_dbg(hw, "RAR index %d is out of range.\n", rar); - return IXGBE_ERR_INVALID_ARGUMENT; - } - - rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); - rar_high &= ~IXGBE_RAH_VIND_MASK; - rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK); - IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); - return 0; -} - -/** - * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address - * @hw: pointer to hardware struct - * @rar: receive address register index to associate with a VMDq index - * @vmdq: VMDq clear index (not used in 82598, but elsewhere) - **/ -static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) -{ - u32 rar_high; - u32 rar_entries = hw->mac.num_rar_entries; - - - /* Make sure we are using a valid rar index range */ - if (rar >= rar_entries) { - hw_dbg(hw, "RAR index %d is out of range.\n", rar); - return IXGBE_ERR_INVALID_ARGUMENT; - } - - rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); - if (rar_high & IXGBE_RAH_VIND_MASK) { - rar_high &= ~IXGBE_RAH_VIND_MASK; - IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); - } - - return 0; -} - -/** - * ixgbe_set_vfta_82598 - Set VLAN filter table - * @hw: pointer to hardware structure - * @vlan: VLAN id to write to VLAN filter - * @vind: VMDq output index that maps queue to VLAN id in VFTA - * @vlan_on: boolean flag to turn on/off VLAN in VFTA - * - * Turn on/off specified VLAN in the VLAN filter table. - **/ -static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, - bool vlan_on) -{ - u32 regindex; - u32 bitindex; - u32 bits; - u32 vftabyte; - - if (vlan > 4095) - return IXGBE_ERR_PARAM; - - /* Determine 32-bit word position in array */ - regindex = (vlan >> 5) & 0x7F; /* upper seven bits */ - - /* Determine the location of the (VMD) queue index */ - vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */ - bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */ - - /* Set the nibble for VMD queue index */ - bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex)); - bits &= (~(0x0F << bitindex)); - bits |= (vind << bitindex); - IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits); - - /* Determine the location of the bit for this VLAN id */ - bitindex = vlan & 0x1F; /* lower five bits */ - - bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); - if (vlan_on) - /* Turn on this VLAN id */ - bits |= (1 << bitindex); - else - /* Turn off this VLAN id */ - bits &= ~(1 << bitindex); - IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); - - return 0; -} - -/** - * ixgbe_clear_vfta_82598 - Clear VLAN filter table - * @hw: pointer to hardware structure - * - * Clears the VLAN filer table, and the VMDq index associated with the filter - **/ -static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) -{ - u32 offset; - u32 vlanbyte; - - for (offset = 0; offset < hw->mac.vft_size; offset++) - IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); - - for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) - for (offset = 0; offset < hw->mac.vft_size; offset++) - IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), - 0); - - return 0; -} - -/** - * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register - * @hw: pointer to hardware structure - * @reg: analog register to read - * @val: read value - * - * Performs read operation to Atlas analog register specified. - **/ -static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) -{ - u32 atlas_ctl; - - IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, - IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); - IXGBE_WRITE_FLUSH(hw); - udelay(10); - atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); - *val = (u8)atlas_ctl; - - return 0; -} - -/** - * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register - * @hw: pointer to hardware structure - * @reg: atlas register to write - * @val: value to write - * - * Performs write operation to Atlas analog register specified. - **/ -static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) -{ - u32 atlas_ctl; - - atlas_ctl = (reg << 8) | val; - IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); - IXGBE_WRITE_FLUSH(hw); - udelay(10); - - return 0; -} - -/** - * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface. - * @hw: pointer to hardware structure - * @byte_offset: EEPROM byte offset to read - * @eeprom_data: value read - * - * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. - **/ -static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, - u8 *eeprom_data) -{ - s32 status = 0; - u16 sfp_addr = 0; - u16 sfp_data = 0; - u16 sfp_stat = 0; - u32 i; - - if (hw->phy.type == ixgbe_phy_nl) { - /* - * phy SDA/SCL registers are at addresses 0xC30A to - * 0xC30D. These registers are used to talk to the SFP+ - * module's EEPROM through the SDA/SCL (I2C) interface. - */ - sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset; - sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK); - hw->phy.ops.write_reg(hw, - IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, - MDIO_MMD_PMAPMD, - sfp_addr); - - /* Poll status */ - for (i = 0; i < 100; i++) { - hw->phy.ops.read_reg(hw, - IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT, - MDIO_MMD_PMAPMD, - &sfp_stat); - sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; - if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) - break; - usleep_range(10000, 20000); - } - - if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) { - hw_dbg(hw, "EEPROM read did not pass.\n"); - status = IXGBE_ERR_SFP_NOT_PRESENT; - goto out; - } - - /* Read data */ - hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA, - MDIO_MMD_PMAPMD, &sfp_data); - - *eeprom_data = (u8)(sfp_data >> 8); - } else { - status = IXGBE_ERR_PHY; - goto out; - } - -out: - return status; -} - -/** - * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type - * @hw: pointer to hardware structure - * - * Determines physical layer capabilities of the current configuration. - **/ -static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) -{ - u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; - u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); - u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; - u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; - u16 ext_ability = 0; - - hw->phy.ops.identify(hw); - - /* Copper PHY must be checked before AUTOC LMS to determine correct - * physical layer because 10GBase-T PHYs use LMS = KX4/KX */ - switch (hw->phy.type) { - case ixgbe_phy_tn: - case ixgbe_phy_aq: - case ixgbe_phy_cu_unknown: - hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, - MDIO_MMD_PMAPMD, &ext_ability); - if (ext_ability & MDIO_PMA_EXTABLE_10GBT) - physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; - if (ext_ability & MDIO_PMA_EXTABLE_1000BT) - physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; - if (ext_ability & MDIO_PMA_EXTABLE_100BTX) - physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; - goto out; - default: - break; - } - - switch (autoc & IXGBE_AUTOC_LMS_MASK) { - case IXGBE_AUTOC_LMS_1G_AN: - case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: - if (pma_pmd_1g == IXGBE_AUTOC_1G_KX) - physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX; - else - physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX; - break; - case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: - if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4) - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; - else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4) - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; - else /* XAUI */ - physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; - break; - case IXGBE_AUTOC_LMS_KX4_AN: - case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: - if (autoc & IXGBE_AUTOC_KX_SUPP) - physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; - if (autoc & IXGBE_AUTOC_KX4_SUPP) - physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; - break; - default: - break; - } - - if (hw->phy.type == ixgbe_phy_nl) { - hw->phy.ops.identify_sfp(hw); - - switch (hw->phy.sfp_type) { - case ixgbe_sfp_type_da_cu: - physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; - break; - case ixgbe_sfp_type_sr: - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; - break; - case ixgbe_sfp_type_lr: - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; - break; - default: - physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; - break; - } - } - - switch (hw->device_id) { - case IXGBE_DEV_ID_82598_DA_DUAL_PORT: - physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; - break; - case IXGBE_DEV_ID_82598AF_DUAL_PORT: - case IXGBE_DEV_ID_82598AF_SINGLE_PORT: - case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; - break; - case IXGBE_DEV_ID_82598EB_XF_LR: - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; - break; - default: - break; - } - -out: - return physical_layer; -} - -/** - * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple - * port devices. - * @hw: pointer to the HW structure - * - * Calls common function and corrects issue with some single port devices - * that enable LAN1 but not LAN0. - **/ -static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw) -{ - struct ixgbe_bus_info *bus = &hw->bus; - u16 pci_gen = 0; - u16 pci_ctrl2 = 0; - - ixgbe_set_lan_id_multi_port_pcie(hw); - - /* check if LAN0 is disabled */ - hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen); - if ((pci_gen != 0) && (pci_gen != 0xFFFF)) { - - hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2); - - /* if LAN0 is completely disabled force function to 0 */ - if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) && - !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) && - !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) { - - bus->func = 0; - } - } -} - -/** - * ixgbe_set_rxpba_82598 - Configure packet buffers - * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure - * - * Configure packet buffers. - */ -static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, u32 headroom, - int strategy) -{ - u32 rxpktsize = IXGBE_RXPBSIZE_64KB; - u8 i = 0; - - if (!num_pb) - return; - - /* Setup Rx packet buffer sizes */ - switch (strategy) { - case PBA_STRATEGY_WEIGHTED: - /* Setup the first four at 80KB */ - rxpktsize = IXGBE_RXPBSIZE_80KB; - for (; i < 4; i++) - IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); - /* Setup the last four at 48KB...don't re-init i */ - rxpktsize = IXGBE_RXPBSIZE_48KB; - /* Fall Through */ - case PBA_STRATEGY_EQUAL: - default: - /* Divide the remaining Rx packet buffer evenly among the TCs */ - for (; i < IXGBE_MAX_PACKET_BUFFERS; i++) - IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); - break; - } - - /* Setup Tx packet buffer sizes */ - for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) - IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB); - - return; -} - -static struct ixgbe_mac_operations mac_ops_82598 = { - .init_hw = &ixgbe_init_hw_generic, - .reset_hw = &ixgbe_reset_hw_82598, - .start_hw = &ixgbe_start_hw_82598, - .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, - .get_media_type = &ixgbe_get_media_type_82598, - .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598, - .enable_rx_dma = &ixgbe_enable_rx_dma_generic, - .get_mac_addr = &ixgbe_get_mac_addr_generic, - .stop_adapter = &ixgbe_stop_adapter_generic, - .get_bus_info = &ixgbe_get_bus_info_generic, - .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598, - .read_analog_reg8 = &ixgbe_read_analog_reg8_82598, - .write_analog_reg8 = &ixgbe_write_analog_reg8_82598, - .setup_link = &ixgbe_setup_mac_link_82598, - .set_rxpba = &ixgbe_set_rxpba_82598, - .check_link = &ixgbe_check_mac_link_82598, - .get_link_capabilities = &ixgbe_get_link_capabilities_82598, - .led_on = &ixgbe_led_on_generic, - .led_off = &ixgbe_led_off_generic, - .blink_led_start = &ixgbe_blink_led_start_generic, - .blink_led_stop = &ixgbe_blink_led_stop_generic, - .set_rar = &ixgbe_set_rar_generic, - .clear_rar = &ixgbe_clear_rar_generic, - .set_vmdq = &ixgbe_set_vmdq_82598, - .clear_vmdq = &ixgbe_clear_vmdq_82598, - .init_rx_addrs = &ixgbe_init_rx_addrs_generic, - .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, - .enable_mc = &ixgbe_enable_mc_generic, - .disable_mc = &ixgbe_disable_mc_generic, - .clear_vfta = &ixgbe_clear_vfta_82598, - .set_vfta = &ixgbe_set_vfta_82598, - .fc_enable = &ixgbe_fc_enable_82598, - .set_fw_drv_ver = NULL, - .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, - .release_swfw_sync = &ixgbe_release_swfw_sync, -}; - -static struct ixgbe_eeprom_operations eeprom_ops_82598 = { - .init_params = &ixgbe_init_eeprom_params_generic, - .read = &ixgbe_read_eerd_generic, - .read_buffer = &ixgbe_read_eerd_buffer_generic, - .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, - .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, - .update_checksum = &ixgbe_update_eeprom_checksum_generic, -}; - -static struct ixgbe_phy_operations phy_ops_82598 = { - .identify = &ixgbe_identify_phy_generic, - .identify_sfp = &ixgbe_identify_sfp_module_generic, - .init = &ixgbe_init_phy_ops_82598, - .reset = &ixgbe_reset_phy_generic, - .read_reg = &ixgbe_read_phy_reg_generic, - .write_reg = &ixgbe_write_phy_reg_generic, - .setup_link = &ixgbe_setup_phy_link_generic, - .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, - .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598, - .check_overtemp = &ixgbe_tn_check_overtemp, -}; - -struct ixgbe_info ixgbe_82598_info = { - .mac = ixgbe_mac_82598EB, - .get_invariants = &ixgbe_get_invariants_82598, - .mac_ops = &mac_ops_82598, - .eeprom_ops = &eeprom_ops_82598, - .phy_ops = &phy_ops_82598, -}; - diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c deleted file mode 100644 index 34f30ec..0000000 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ /dev/null @@ -1,2263 +0,0 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include -#include -#include - -#include "ixgbe.h" -#include "ixgbe_phy.h" -#include "ixgbe_mbx.h" - -#define IXGBE_82599_MAX_TX_QUEUES 128 -#define IXGBE_82599_MAX_RX_QUEUES 128 -#define IXGBE_82599_RAR_ENTRIES 128 -#define IXGBE_82599_MC_TBL_SIZE 128 -#define IXGBE_82599_VFT_TBL_SIZE 128 -#define IXGBE_82599_RX_PB_SIZE 512 - -static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); -static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); -static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); -static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg, - bool autoneg_wait_to_complete); -static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg, - bool autoneg_wait_to_complete); -static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, - bool autoneg_wait_to_complete); -static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg, - bool autoneg_wait_to_complete); -static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg, - bool autoneg_wait_to_complete); -static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); -static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); - -static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) -{ - struct ixgbe_mac_info *mac = &hw->mac; - - /* enable the laser control functions for SFP+ fiber */ - if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) { - mac->ops.disable_tx_laser = - &ixgbe_disable_tx_laser_multispeed_fiber; - mac->ops.enable_tx_laser = - &ixgbe_enable_tx_laser_multispeed_fiber; - mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; - } else { - mac->ops.disable_tx_laser = NULL; - mac->ops.enable_tx_laser = NULL; - mac->ops.flap_tx_laser = NULL; - } - - if (hw->phy.multispeed_fiber) { - /* Set up dual speed SFP+ support */ - mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; - } else { - if ((mac->ops.get_media_type(hw) == - ixgbe_media_type_backplane) && - (hw->phy.smart_speed == ixgbe_smart_speed_auto || - hw->phy.smart_speed == ixgbe_smart_speed_on) && - !ixgbe_verify_lesm_fw_enabled_82599(hw)) - mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed; - else - mac->ops.setup_link = &ixgbe_setup_mac_link_82599; - } -} - -static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) -{ - s32 ret_val = 0; - u32 reg_anlp1 = 0; - u32 i = 0; - u16 list_offset, data_offset, data_value; - - if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { - ixgbe_init_mac_link_ops_82599(hw); - - hw->phy.ops.reset = NULL; - - ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, - &data_offset); - if (ret_val != 0) - goto setup_sfp_out; - - /* PHY config will finish before releasing the semaphore */ - ret_val = hw->mac.ops.acquire_swfw_sync(hw, - IXGBE_GSSR_MAC_CSR_SM); - if (ret_val != 0) { - ret_val = IXGBE_ERR_SWFW_SYNC; - goto setup_sfp_out; - } - - hw->eeprom.ops.read(hw, ++data_offset, &data_value); - while (data_value != 0xffff) { - IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value); - IXGBE_WRITE_FLUSH(hw); - hw->eeprom.ops.read(hw, ++data_offset, &data_value); - } - - /* Release the semaphore */ - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); - /* - * Delay obtaining semaphore again to allow FW access, - * semaphore_delay is in ms usleep_range needs us. - */ - usleep_range(hw->eeprom.semaphore_delay * 1000, - hw->eeprom.semaphore_delay * 2000); - - /* Now restart DSP by setting Restart_AN and clearing LMS */ - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw, - IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) | - IXGBE_AUTOC_AN_RESTART)); - - /* Wait for AN to leave state 0 */ - for (i = 0; i < 10; i++) { - usleep_range(4000, 8000); - reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1); - if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK) - break; - } - if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) { - hw_dbg(hw, "sfp module setup not complete\n"); - ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; - goto setup_sfp_out; - } - - /* Restart DSP by setting Restart_AN and return to SFI mode */ - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw, - IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL | - IXGBE_AUTOC_AN_RESTART)); - } - -setup_sfp_out: - return ret_val; -} - -static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw) -{ - struct ixgbe_mac_info *mac = &hw->mac; - - ixgbe_init_mac_link_ops_82599(hw); - - mac->mcft_size = IXGBE_82599_MC_TBL_SIZE; - mac->vft_size = IXGBE_82599_VFT_TBL_SIZE; - mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES; - mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES; - mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES; - mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); - - return 0; -} - -/** - * ixgbe_init_phy_ops_82599 - PHY/SFP specific init - * @hw: pointer to hardware structure - * - * Initialize any function pointers that were not able to be - * set during get_invariants because the PHY/SFP type was - * not known. Perform the SFP init if necessary. - * - **/ -static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) -{ - struct ixgbe_mac_info *mac = &hw->mac; - struct ixgbe_phy_info *phy = &hw->phy; - s32 ret_val = 0; - - /* Identify the PHY or SFP module */ - ret_val = phy->ops.identify(hw); - - /* Setup function pointers based on detected SFP module and speeds */ - ixgbe_init_mac_link_ops_82599(hw); - - /* If copper media, overwrite with copper function pointers */ - if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { - mac->ops.setup_link = &ixgbe_setup_copper_link_82599; - mac->ops.get_link_capabilities = - &ixgbe_get_copper_link_capabilities_generic; - } - - /* Set necessary function pointers based on phy type */ - switch (hw->phy.type) { - case ixgbe_phy_tn: - phy->ops.check_link = &ixgbe_check_phy_link_tnx; - phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; - phy->ops.get_firmware_version = - &ixgbe_get_phy_firmware_version_tnx; - break; - case ixgbe_phy_aq: - phy->ops.get_firmware_version = - &ixgbe_get_phy_firmware_version_generic; - break; - default: - break; - } - - return ret_val; -} - -/** - * ixgbe_get_link_capabilities_82599 - Determines link capabilities - * @hw: pointer to hardware structure - * @speed: pointer to link speed - * @negotiation: true when autoneg or autotry is enabled - * - * Determines the link capabilities by reading the AUTOC register. - **/ -static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *negotiation) -{ - s32 status = 0; - u32 autoc = 0; - - /* Determine 1G link capabilities off of SFP+ type */ - if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) { - *speed = IXGBE_LINK_SPEED_1GB_FULL; - *negotiation = true; - goto out; - } - - /* - * Determine link capabilities based on the stored value of AUTOC, - * which represents EEPROM defaults. If AUTOC value has not been - * stored, use the current register value. - */ - if (hw->mac.orig_link_settings_stored) - autoc = hw->mac.orig_autoc; - else - autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); - - switch (autoc & IXGBE_AUTOC_LMS_MASK) { - case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: - *speed = IXGBE_LINK_SPEED_1GB_FULL; - *negotiation = false; - break; - - case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: - *speed = IXGBE_LINK_SPEED_10GB_FULL; - *negotiation = false; - break; - - case IXGBE_AUTOC_LMS_1G_AN: - *speed = IXGBE_LINK_SPEED_1GB_FULL; - *negotiation = true; - break; - - case IXGBE_AUTOC_LMS_10G_SERIAL: - *speed = IXGBE_LINK_SPEED_10GB_FULL; - *negotiation = false; - break; - - case IXGBE_AUTOC_LMS_KX4_KX_KR: - case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: - *speed = IXGBE_LINK_SPEED_UNKNOWN; - if (autoc & IXGBE_AUTOC_KR_SUPP) - *speed |= IXGBE_LINK_SPEED_10GB_FULL; - if (autoc & IXGBE_AUTOC_KX4_SUPP) - *speed |= IXGBE_LINK_SPEED_10GB_FULL; - if (autoc & IXGBE_AUTOC_KX_SUPP) - *speed |= IXGBE_LINK_SPEED_1GB_FULL; - *negotiation = true; - break; - - case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: - *speed = IXGBE_LINK_SPEED_100_FULL; - if (autoc & IXGBE_AUTOC_KR_SUPP) - *speed |= IXGBE_LINK_SPEED_10GB_FULL; - if (autoc & IXGBE_AUTOC_KX4_SUPP) - *speed |= IXGBE_LINK_SPEED_10GB_FULL; - if (autoc & IXGBE_AUTOC_KX_SUPP) - *speed |= IXGBE_LINK_SPEED_1GB_FULL; - *negotiation = true; - break; - - case IXGBE_AUTOC_LMS_SGMII_1G_100M: - *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL; - *negotiation = false; - break; - - default: - status = IXGBE_ERR_LINK_SETUP; - goto out; - break; - } - - if (hw->phy.multispeed_fiber) { - *speed |= IXGBE_LINK_SPEED_10GB_FULL | - IXGBE_LINK_SPEED_1GB_FULL; - *negotiation = true; - } - -out: - return status; -} - -/** - * ixgbe_get_media_type_82599 - Get media type - * @hw: pointer to hardware structure - * - * Returns the media type (fiber, copper, backplane) - **/ -static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) -{ - enum ixgbe_media_type media_type; - - /* Detect if there is a copper PHY attached. */ - switch (hw->phy.type) { - case ixgbe_phy_cu_unknown: - case ixgbe_phy_tn: - case ixgbe_phy_aq: - media_type = ixgbe_media_type_copper; - goto out; - default: - break; - } - - switch (hw->device_id) { - case IXGBE_DEV_ID_82599_KX4: - case IXGBE_DEV_ID_82599_KX4_MEZZ: - case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: - case IXGBE_DEV_ID_82599_KR: - case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: - case IXGBE_DEV_ID_82599_XAUI_LOM: - /* Default device ID is mezzanine card KX/KX4 */ - media_type = ixgbe_media_type_backplane; - break; - case IXGBE_DEV_ID_82599_SFP: - case IXGBE_DEV_ID_82599_SFP_FCOE: - case IXGBE_DEV_ID_82599_SFP_EM: - case IXGBE_DEV_ID_82599_SFP_SF2: - media_type = ixgbe_media_type_fiber; - break; - case IXGBE_DEV_ID_82599_CX4: - media_type = ixgbe_media_type_cx4; - break; - case IXGBE_DEV_ID_82599_T3_LOM: - media_type = ixgbe_media_type_copper; - break; - case IXGBE_DEV_ID_82599_LS: - media_type = ixgbe_media_type_fiber_lco; - break; - default: - media_type = ixgbe_media_type_unknown; - break; - } -out: - return media_type; -} - -/** - * ixgbe_start_mac_link_82599 - Setup MAC link settings - * @hw: pointer to hardware structure - * @autoneg_wait_to_complete: true when waiting for completion is needed - * - * Configures link settings based on values in the ixgbe_hw struct. - * Restarts the link. Performs autonegotiation if needed. - **/ -static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, - bool autoneg_wait_to_complete) -{ - u32 autoc_reg; - u32 links_reg; - u32 i; - s32 status = 0; - - /* Restart link */ - autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); - autoc_reg |= IXGBE_AUTOC_AN_RESTART; - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); - - /* Only poll for autoneg to complete if specified to do so */ - if (autoneg_wait_to_complete) { - if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == - IXGBE_AUTOC_LMS_KX4_KX_KR || - (autoc_reg & IXGBE_AUTOC_LMS_MASK) == - IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || - (autoc_reg & IXGBE_AUTOC_LMS_MASK) == - IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { - links_reg = 0; /* Just in case Autoneg time = 0 */ - for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { - links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); - if (links_reg & IXGBE_LINKS_KX_AN_COMP) - break; - msleep(100); - } - if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { - status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; - hw_dbg(hw, "Autoneg did not complete.\n"); - } - } - } - - /* Add delay to filter out noises during initial link setup */ - msleep(50); - - return status; -} - -/** - * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser - * @hw: pointer to hardware structure - * - * The base drivers may require better control over SFP+ module - * PHY states. This includes selectively shutting down the Tx - * laser on the PHY, effectively halting physical link. - **/ -static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) -{ - u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); - - /* Disable tx laser; allow 100us to go dark per spec */ - esdp_reg |= IXGBE_ESDP_SDP3; - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); - IXGBE_WRITE_FLUSH(hw); - udelay(100); -} - -/** - * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser - * @hw: pointer to hardware structure - * - * The base drivers may require better control over SFP+ module - * PHY states. This includes selectively turning on the Tx - * laser on the PHY, effectively starting physical link. - **/ -static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) -{ - u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); - - /* Enable tx laser; allow 100ms to light up */ - esdp_reg &= ~IXGBE_ESDP_SDP3; - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); - IXGBE_WRITE_FLUSH(hw); - msleep(100); -} - -/** - * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser - * @hw: pointer to hardware structure - * - * When the driver changes the link speeds that it can support, - * it sets autotry_restart to true to indicate that we need to - * initiate a new autotry session with the link partner. To do - * so, we set the speed then disable and re-enable the tx laser, to - * alert the link partner that it also needs to restart autotry on its - * end. This is consistent with true clause 37 autoneg, which also - * involves a loss of signal. - **/ -static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) -{ - if (hw->mac.autotry_restart) { - ixgbe_disable_tx_laser_multispeed_fiber(hw); - ixgbe_enable_tx_laser_multispeed_fiber(hw); - hw->mac.autotry_restart = false; - } -} - -/** - * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed - * @hw: pointer to hardware structure - * @speed: new link speed - * @autoneg: true if autonegotiation enabled - * @autoneg_wait_to_complete: true when waiting for completion is needed - * - * Set the link speed in the AUTOC register and restarts link. - **/ -static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg, - bool autoneg_wait_to_complete) -{ - s32 status = 0; - ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; - ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; - u32 speedcnt = 0; - u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); - u32 i = 0; - bool link_up = false; - bool negotiation; - - /* Mask off requested but non-supported speeds */ - status = hw->mac.ops.get_link_capabilities(hw, &link_speed, - &negotiation); - if (status != 0) - return status; - - speed &= link_speed; - - /* - * Try each speed one by one, highest priority first. We do this in - * software because 10gb fiber doesn't support speed autonegotiation. - */ - if (speed & IXGBE_LINK_SPEED_10GB_FULL) { - speedcnt++; - highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; - - /* If we already have link at this speed, just jump out */ - status = hw->mac.ops.check_link(hw, &link_speed, &link_up, - false); - if (status != 0) - return status; - - if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) - goto out; - - /* Set the module link speed */ - esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); - IXGBE_WRITE_FLUSH(hw); - - /* Allow module to change analog characteristics (1G->10G) */ - msleep(40); - - status = ixgbe_setup_mac_link_82599(hw, - IXGBE_LINK_SPEED_10GB_FULL, - autoneg, - autoneg_wait_to_complete); - if (status != 0) - return status; - - /* Flap the tx laser if it has not already been done */ - hw->mac.ops.flap_tx_laser(hw); - - /* - * Wait for the controller to acquire link. Per IEEE 802.3ap, - * Section 73.10.2, we may have to wait up to 500ms if KR is - * attempted. 82599 uses the same timing for 10g SFI. - */ - for (i = 0; i < 5; i++) { - /* Wait for the link partner to also set speed */ - msleep(100); - - /* If we have link, just jump out */ - status = hw->mac.ops.check_link(hw, &link_speed, - &link_up, false); - if (status != 0) - return status; - - if (link_up) - goto out; - } - } - - if (speed & IXGBE_LINK_SPEED_1GB_FULL) { - speedcnt++; - if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) - highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; - - /* If we already have link at this speed, just jump out */ - status = hw->mac.ops.check_link(hw, &link_speed, &link_up, - false); - if (status != 0) - return status; - - if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) - goto out; - - /* Set the module link speed */ - esdp_reg &= ~IXGBE_ESDP_SDP5; - esdp_reg |= IXGBE_ESDP_SDP5_DIR; - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); - IXGBE_WRITE_FLUSH(hw); - - /* Allow module to change analog characteristics (10G->1G) */ - msleep(40); - - status = ixgbe_setup_mac_link_82599(hw, - IXGBE_LINK_SPEED_1GB_FULL, - autoneg, - autoneg_wait_to_complete); - if (status != 0) - return status; - - /* Flap the tx laser if it has not already been done */ - hw->mac.ops.flap_tx_laser(hw); - - /* Wait for the link partner to also set speed */ - msleep(100); - - /* If we have link, just jump out */ - status = hw->mac.ops.check_link(hw, &link_speed, &link_up, - false); - if (status != 0) - return status; - - if (link_up) - goto out; - } - - /* - * We didn't get link. Configure back to the highest speed we tried, - * (if there was more than one). We call ourselves back with just the - * single highest speed that the user requested. - */ - if (speedcnt > 1) - status = ixgbe_setup_mac_link_multispeed_fiber(hw, - highest_link_speed, - autoneg, - autoneg_wait_to_complete); - -out: - /* Set autoneg_advertised value based on input link speed */ - hw->phy.autoneg_advertised = 0; - - if (speed & IXGBE_LINK_SPEED_10GB_FULL) - hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; - - if (speed & IXGBE_LINK_SPEED_1GB_FULL) - hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; - - return status; -} - -/** - * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed - * @hw: pointer to hardware structure - * @speed: new link speed - * @autoneg: true if autonegotiation enabled - * @autoneg_wait_to_complete: true when waiting for completion is needed - * - * Implements the Intel SmartSpeed algorithm. - **/ -static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, - ixgbe_link_speed speed, bool autoneg, - bool autoneg_wait_to_complete) -{ - s32 status = 0; - ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; - s32 i, j; - bool link_up = false; - u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); - - /* Set autoneg_advertised value based on input link speed */ - hw->phy.autoneg_advertised = 0; - - if (speed & IXGBE_LINK_SPEED_10GB_FULL) - hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; - - if (speed & IXGBE_LINK_SPEED_1GB_FULL) - hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; - - if (speed & IXGBE_LINK_SPEED_100_FULL) - hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; - - /* - * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the - * autoneg advertisement if link is unable to be established at the - * highest negotiated rate. This can sometimes happen due to integrity - * issues with the physical media connection. - */ - - /* First, try to get link with full advertisement */ - hw->phy.smart_speed_active = false; - for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { - status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, - autoneg_wait_to_complete); - if (status != 0) - goto out; - - /* - * Wait for the controller to acquire link. Per IEEE 802.3ap, - * Section 73.10.2, we may have to wait up to 500ms if KR is - * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per - * Table 9 in the AN MAS. - */ - for (i = 0; i < 5; i++) { - mdelay(100); - - /* If we have link, just jump out */ - status = hw->mac.ops.check_link(hw, &link_speed, - &link_up, false); - if (status != 0) - goto out; - - if (link_up) - goto out; - } - } - - /* - * We didn't get link. If we advertised KR plus one of KX4/KX - * (or BX4/BX), then disable KR and try again. - */ - if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) || - ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0)) - goto out; - - /* Turn SmartSpeed on to disable KR support */ - hw->phy.smart_speed_active = true; - status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, - autoneg_wait_to_complete); - if (status != 0) - goto out; - - /* - * Wait for the controller to acquire link. 600ms will allow for - * the AN link_fail_inhibit_timer as well for multiple cycles of - * parallel detect, both 10g and 1g. This allows for the maximum - * connect attempts as defined in the AN MAS table 73-7. - */ - for (i = 0; i < 6; i++) { - mdelay(100); - - /* If we have link, just jump out */ - status = hw->mac.ops.check_link(hw, &link_speed, - &link_up, false); - if (status != 0) - goto out; - - if (link_up) - goto out; - } - - /* We didn't get link. Turn SmartSpeed back off. */ - hw->phy.smart_speed_active = false; - status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, - autoneg_wait_to_complete); - -out: - if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) - hw_dbg(hw, "Smartspeed has downgraded the link speed from " - "the maximum advertised\n"); - return status; -} - -/** - * ixgbe_setup_mac_link_82599 - Set MAC link speed - * @hw: pointer to hardware structure - * @speed: new link speed - * @autoneg: true if autonegotiation enabled - * @autoneg_wait_to_complete: true when waiting for completion is needed - * - * Set the link speed in the AUTOC register and restarts link. - **/ -static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, - ixgbe_link_speed speed, bool autoneg, - bool autoneg_wait_to_complete) -{ - s32 status = 0; - u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); - u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); - u32 start_autoc = autoc; - u32 orig_autoc = 0; - u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; - u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; - u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; - u32 links_reg; - u32 i; - ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; - - /* Check to see if speed passed in is supported. */ - hw->mac.ops.get_link_capabilities(hw, &link_capabilities, &autoneg); - if (status != 0) - goto out; - - speed &= link_capabilities; - - if (speed == IXGBE_LINK_SPEED_UNKNOWN) { - status = IXGBE_ERR_LINK_SETUP; - goto out; - } - - /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ - if (hw->mac.orig_link_settings_stored) - orig_autoc = hw->mac.orig_autoc; - else - orig_autoc = autoc; - - if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || - link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || - link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { - /* Set KX4/KX/KR support according to speed requested */ - autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); - if (speed & IXGBE_LINK_SPEED_10GB_FULL) - if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) - autoc |= IXGBE_AUTOC_KX4_SUPP; - if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && - (hw->phy.smart_speed_active == false)) - autoc |= IXGBE_AUTOC_KR_SUPP; - if (speed & IXGBE_LINK_SPEED_1GB_FULL) - autoc |= IXGBE_AUTOC_KX_SUPP; - } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && - (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || - link_mode == IXGBE_AUTOC_LMS_1G_AN)) { - /* Switch from 1G SFI to 10G SFI if requested */ - if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && - (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { - autoc &= ~IXGBE_AUTOC_LMS_MASK; - autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; - } - } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && - (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { - /* Switch from 10G SFI to 1G SFI if requested */ - if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && - (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { - autoc &= ~IXGBE_AUTOC_LMS_MASK; - if (autoneg) - autoc |= IXGBE_AUTOC_LMS_1G_AN; - else - autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; - } - } - - if (autoc != start_autoc) { - /* Restart link */ - autoc |= IXGBE_AUTOC_AN_RESTART; - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); - - /* Only poll for autoneg to complete if specified to do so */ - if (autoneg_wait_to_complete) { - if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || - link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || - link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { - links_reg = 0; /*Just in case Autoneg time=0*/ - for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { - links_reg = - IXGBE_READ_REG(hw, IXGBE_LINKS); - if (links_reg & IXGBE_LINKS_KX_AN_COMP) - break; - msleep(100); - } - if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { - status = - IXGBE_ERR_AUTONEG_NOT_COMPLETE; - hw_dbg(hw, "Autoneg did not " - "complete.\n"); - } - } - } - - /* Add delay to filter out noises during initial link setup */ - msleep(50); - } - -out: - return status; -} - -/** - * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field - * @hw: pointer to hardware structure - * @speed: new link speed - * @autoneg: true if autonegotiation enabled - * @autoneg_wait_to_complete: true if waiting is needed to complete - * - * Restarts link on PHY and MAC based on settings passed in. - **/ -static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg, - bool autoneg_wait_to_complete) -{ - s32 status; - - /* Setup the PHY according to input speed */ - status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, - autoneg_wait_to_complete); - /* Set up MAC */ - ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); - - return status; -} - -/** - * ixgbe_reset_hw_82599 - Perform hardware reset - * @hw: pointer to hardware structure - * - * Resets the hardware by resetting the transmit and receive units, masks - * and clears all interrupts, perform a PHY reset, and perform a link (MAC) - * reset. - **/ -static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) -{ - s32 status = 0; - u32 ctrl; - u32 i; - u32 autoc; - u32 autoc2; - - /* Call adapter stop to disable tx/rx and clear interrupts */ - hw->mac.ops.stop_adapter(hw); - - /* PHY ops must be identified and initialized prior to reset */ - - /* Identify PHY and related function pointers */ - status = hw->phy.ops.init(hw); - - if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) - goto reset_hw_out; - - /* Setup SFP module if there is one present. */ - if (hw->phy.sfp_setup_needed) { - status = hw->mac.ops.setup_sfp(hw); - hw->phy.sfp_setup_needed = false; - } - - if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) - goto reset_hw_out; - - /* Reset PHY */ - if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL) - hw->phy.ops.reset(hw); - - /* - * Prevent the PCI-E bus from from hanging by disabling PCI-E master - * access and verify no pending requests before reset - */ - ixgbe_disable_pcie_master(hw); - -mac_reset_top: - /* - * Issue global reset to the MAC. This needs to be a SW reset. - * If link reset is used, it might reset the MAC when mng is using it - */ - ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); - IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); - IXGBE_WRITE_FLUSH(hw); - - /* Poll for reset bit to self-clear indicating reset is complete */ - for (i = 0; i < 10; i++) { - udelay(1); - ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); - if (!(ctrl & IXGBE_CTRL_RST)) - break; - } - if (ctrl & IXGBE_CTRL_RST) { - status = IXGBE_ERR_RESET_FAILED; - hw_dbg(hw, "Reset polling failed to complete.\n"); - } - - /* - * Double resets are required for recovery from certain error - * conditions. Between resets, it is necessary to stall to allow time - * for any pending HW events to complete. We use 1usec since that is - * what is needed for ixgbe_disable_pcie_master(). The second reset - * then clears out any effects of those events. - */ - if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { - hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; - udelay(1); - goto mac_reset_top; - } - - msleep(50); - - /* - * Store the original AUTOC/AUTOC2 values if they have not been - * stored off yet. Otherwise restore the stored original - * values since the reset operation sets back to defaults. - */ - autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); - autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); - if (hw->mac.orig_link_settings_stored == false) { - hw->mac.orig_autoc = autoc; - hw->mac.orig_autoc2 = autoc2; - hw->mac.orig_link_settings_stored = true; - } else { - if (autoc != hw->mac.orig_autoc) - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc | - IXGBE_AUTOC_AN_RESTART)); - - if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != - (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { - autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; - autoc2 |= (hw->mac.orig_autoc2 & - IXGBE_AUTOC2_UPPER_MASK); - IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); - } - } - - /* Store the permanent mac address */ - hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); - - /* - * Store MAC address from RAR0, clear receive address registers, and - * clear the multicast table. Also reset num_rar_entries to 128, - * since we modify this value when programming the SAN MAC address. - */ - hw->mac.num_rar_entries = 128; - hw->mac.ops.init_rx_addrs(hw); - - /* Store the permanent SAN mac address */ - hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); - - /* Add the SAN MAC address to the RAR only if it's a valid address */ - if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { - hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, - hw->mac.san_addr, 0, IXGBE_RAH_AV); - - /* Reserve the last RAR for the SAN MAC address */ - hw->mac.num_rar_entries--; - } - - /* Store the alternative WWNN/WWPN prefix */ - hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, - &hw->mac.wwpn_prefix); - -reset_hw_out: - return status; -} - -/** - * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. - * @hw: pointer to hardware structure - **/ -s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) -{ - int i; - u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); - fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; - - /* - * Before starting reinitialization process, - * FDIRCMD.CMD must be zero. - */ - for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { - if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & - IXGBE_FDIRCMD_CMD_MASK)) - break; - udelay(10); - } - if (i >= IXGBE_FDIRCMD_CMD_POLL) { - hw_dbg(hw, "Flow Director previous command isn't complete, " - "aborting table re-initialization.\n"); - return IXGBE_ERR_FDIR_REINIT_FAILED; - } - - IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); - IXGBE_WRITE_FLUSH(hw); - /* - * 82599 adapters flow director init flow cannot be restarted, - * Workaround 82599 silicon errata by performing the following steps - * before re-writing the FDIRCTRL control register with the same value. - * - write 1 to bit 8 of FDIRCMD register & - * - write 0 to bit 8 of FDIRCMD register - */ - IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, - (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | - IXGBE_FDIRCMD_CLEARHT)); - IXGBE_WRITE_FLUSH(hw); - IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, - (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & - ~IXGBE_FDIRCMD_CLEARHT)); - IXGBE_WRITE_FLUSH(hw); - /* - * Clear FDIR Hash register to clear any leftover hashes - * waiting to be programmed. - */ - IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00); - IXGBE_WRITE_FLUSH(hw); - - IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); - IXGBE_WRITE_FLUSH(hw); - - /* Poll init-done after we write FDIRCTRL register */ - for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { - if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & - IXGBE_FDIRCTRL_INIT_DONE) - break; - udelay(10); - } - if (i >= IXGBE_FDIR_INIT_DONE_POLL) { - hw_dbg(hw, "Flow Director Signature poll time exceeded!\n"); - return IXGBE_ERR_FDIR_REINIT_FAILED; - } - - /* Clear FDIR statistics registers (read to clear) */ - IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); - IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT); - IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); - IXGBE_READ_REG(hw, IXGBE_FDIRMISS); - IXGBE_READ_REG(hw, IXGBE_FDIRLEN); - - return 0; -} - -/** - * ixgbe_set_fdir_rxpba_82599 - Initialize Flow Director Rx packet buffer - * @hw: pointer to hardware structure - * @pballoc: which mode to allocate filters with - **/ -static s32 ixgbe_set_fdir_rxpba_82599(struct ixgbe_hw *hw, const u32 pballoc) -{ - u32 fdir_pbsize = hw->mac.rx_pb_size << IXGBE_RXPBSIZE_SHIFT; - u32 current_rxpbsize = 0; - int i; - - /* reserve space for Flow Director filters */ - switch (pballoc) { - case IXGBE_FDIR_PBALLOC_256K: - fdir_pbsize -= 256 << IXGBE_RXPBSIZE_SHIFT; - break; - case IXGBE_FDIR_PBALLOC_128K: - fdir_pbsize -= 128 << IXGBE_RXPBSIZE_SHIFT; - break; - case IXGBE_FDIR_PBALLOC_64K: - fdir_pbsize -= 64 << IXGBE_RXPBSIZE_SHIFT; - break; - case IXGBE_FDIR_PBALLOC_NONE: - default: - return IXGBE_ERR_PARAM; - } - - /* determine current RX packet buffer size */ - for (i = 0; i < 8; i++) - current_rxpbsize += IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); - - /* if there is already room for the filters do nothing */ - if (current_rxpbsize <= fdir_pbsize) - return 0; - - if (current_rxpbsize > hw->mac.rx_pb_size) { - /* - * if rxpbsize is greater than max then HW max the Rx buffer - * sizes are unconfigured or misconfigured since HW default is - * to give the full buffer to each traffic class resulting in - * the total size being buffer size 8x actual size - * - * This assumes no DCB since the RXPBSIZE registers appear to - * be unconfigured. - */ - IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), fdir_pbsize); - for (i = 1; i < 8; i++) - IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); - } else { - /* - * Since the Rx packet buffer appears to have already been - * configured we need to shrink each packet buffer by enough - * to make room for the filters. As such we take each rxpbsize - * value and multiply it by a fraction representing the size - * needed over the size we currently have. - * - * We need to reduce fdir_pbsize and current_rxpbsize to - * 1/1024 of their original values in order to avoid - * overflowing the u32 being used to store rxpbsize. - */ - fdir_pbsize >>= IXGBE_RXPBSIZE_SHIFT; - current_rxpbsize >>= IXGBE_RXPBSIZE_SHIFT; - for (i = 0; i < 8; i++) { - u32 rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); - rxpbsize *= fdir_pbsize; - rxpbsize /= current_rxpbsize; - IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize); - } - } - - return 0; -} - -/** - * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers - * @hw: pointer to hardware structure - * @fdirctrl: value to write to flow director control register - **/ -static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) -{ - int i; - - /* Prime the keys for hashing */ - IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); - IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); - - /* - * Poll init-done after we write the register. Estimated times: - * 10G: PBALLOC = 11b, timing is 60us - * 1G: PBALLOC = 11b, timing is 600us - * 100M: PBALLOC = 11b, timing is 6ms - * - * Multiple these timings by 4 if under full Rx load - * - * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for - * 1 msec per poll time. If we're at line rate and drop to 100M, then - * this might not finish in our poll time, but we can live with that - * for now. - */ - IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); - IXGBE_WRITE_FLUSH(hw); - for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { - if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & - IXGBE_FDIRCTRL_INIT_DONE) - break; - usleep_range(1000, 2000); - } - - if (i >= IXGBE_FDIR_INIT_DONE_POLL) - hw_dbg(hw, "Flow Director poll time exceeded!\n"); -} - -/** - * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters - * @hw: pointer to hardware structure - * @fdirctrl: value to write to flow director control register, initially - * contains just the value of the Rx packet buffer allocation - **/ -s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl) -{ - s32 err; - - /* Before enabling Flow Director, verify the Rx Packet Buffer size */ - err = ixgbe_set_fdir_rxpba_82599(hw, fdirctrl); - if (err) - return err; - - /* - * Continue setup of fdirctrl register bits: - * Move the flexible bytes to use the ethertype - shift 6 words - * Set the maximum length per hash bucket to 0xA filters - * Send interrupt when 64 filters are left - */ - fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | - (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | - (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); - - /* write hashes and fdirctrl register, poll for completion */ - ixgbe_fdir_enable_82599(hw, fdirctrl); - - return 0; -} - -/** - * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters - * @hw: pointer to hardware structure - * @fdirctrl: value to write to flow director control register, initially - * contains just the value of the Rx packet buffer allocation - **/ -s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) -{ - s32 err; - - /* Before enabling Flow Director, verify the Rx Packet Buffer size */ - err = ixgbe_set_fdir_rxpba_82599(hw, fdirctrl); - if (err) - return err; - - /* - * Continue setup of fdirctrl register bits: - * Turn perfect match filtering on - * Report hash in RSS field of Rx wb descriptor - * Initialize the drop queue - * Move the flexible bytes to use the ethertype - shift 6 words - * Set the maximum length per hash bucket to 0xA filters - * Send interrupt when 64 (0x4 * 16) filters are left - */ - fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH | - IXGBE_FDIRCTRL_REPORT_STATUS | - (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) | - (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | - (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | - (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); - - /* write hashes and fdirctrl register, poll for completion */ - ixgbe_fdir_enable_82599(hw, fdirctrl); - - return 0; -} - -/* - * These defines allow us to quickly generate all of the necessary instructions - * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION - * for values 0 through 15 - */ -#define IXGBE_ATR_COMMON_HASH_KEY \ - (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY) -#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ -do { \ - u32 n = (_n); \ - if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ - common_hash ^= lo_hash_dword >> n; \ - else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ - bucket_hash ^= lo_hash_dword >> n; \ - else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ - sig_hash ^= lo_hash_dword << (16 - n); \ - if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ - common_hash ^= hi_hash_dword >> n; \ - else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ - bucket_hash ^= hi_hash_dword >> n; \ - else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ - sig_hash ^= hi_hash_dword << (16 - n); \ -} while (0); - -/** - * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash - * @stream: input bitstream to compute the hash on - * - * This function is almost identical to the function above but contains - * several optomizations such as unwinding all of the loops, letting the - * compiler work out all of the conditional ifs since the keys are static - * defines, and computing two keys at once since the hashed dword stream - * will be the same for both keys. - **/ -static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, - union ixgbe_atr_hash_dword common) -{ - u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; - u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; - - /* record the flow_vm_vlan bits as they are a key part to the hash */ - flow_vm_vlan = ntohl(input.dword); - - /* generate common hash dword */ - hi_hash_dword = ntohl(common.dword); - - /* low dword is word swapped version of common */ - lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); - - /* apply flow ID/VM pool/VLAN ID bits to hash words */ - hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); - - /* Process bits 0 and 16 */ - IXGBE_COMPUTE_SIG_HASH_ITERATION(0); - - /* - * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to - * delay this because bit 0 of the stream should not be processed - * so we do not add the vlan until after bit 0 was processed - */ - lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); - - /* Process remaining 30 bit of the key */ - IXGBE_COMPUTE_SIG_HASH_ITERATION(1); - IXGBE_COMPUTE_SIG_HASH_ITERATION(2); - IXGBE_COMPUTE_SIG_HASH_ITERATION(3); - IXGBE_COMPUTE_SIG_HASH_ITERATION(4); - IXGBE_COMPUTE_SIG_HASH_ITERATION(5); - IXGBE_COMPUTE_SIG_HASH_ITERATION(6); - IXGBE_COMPUTE_SIG_HASH_ITERATION(7); - IXGBE_COMPUTE_SIG_HASH_ITERATION(8); - IXGBE_COMPUTE_SIG_HASH_ITERATION(9); - IXGBE_COMPUTE_SIG_HASH_ITERATION(10); - IXGBE_COMPUTE_SIG_HASH_ITERATION(11); - IXGBE_COMPUTE_SIG_HASH_ITERATION(12); - IXGBE_COMPUTE_SIG_HASH_ITERATION(13); - IXGBE_COMPUTE_SIG_HASH_ITERATION(14); - IXGBE_COMPUTE_SIG_HASH_ITERATION(15); - - /* combine common_hash result with signature and bucket hashes */ - bucket_hash ^= common_hash; - bucket_hash &= IXGBE_ATR_HASH_MASK; - - sig_hash ^= common_hash << 16; - sig_hash &= IXGBE_ATR_HASH_MASK << 16; - - /* return completed signature hash */ - return sig_hash ^ bucket_hash; -} - -/** - * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter - * @hw: pointer to hardware structure - * @input: unique input dword - * @common: compressed common input dword - * @queue: queue index to direct traffic to - **/ -s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, - union ixgbe_atr_hash_dword input, - union ixgbe_atr_hash_dword common, - u8 queue) -{ - u64 fdirhashcmd; - u32 fdircmd; - - /* - * Get the flow_type in order to program FDIRCMD properly - * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 - */ - switch (input.formatted.flow_type) { - case IXGBE_ATR_FLOW_TYPE_TCPV4: - case IXGBE_ATR_FLOW_TYPE_UDPV4: - case IXGBE_ATR_FLOW_TYPE_SCTPV4: - case IXGBE_ATR_FLOW_TYPE_TCPV6: - case IXGBE_ATR_FLOW_TYPE_UDPV6: - case IXGBE_ATR_FLOW_TYPE_SCTPV6: - break; - default: - hw_dbg(hw, " Error on flow type input\n"); - return IXGBE_ERR_CONFIG; - } - - /* configure FDIRCMD register */ - fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | - IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; - fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; - fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; - - /* - * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits - * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. - */ - fdirhashcmd = (u64)fdircmd << 32; - fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); - IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); - - hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); - - return 0; -} - -#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ -do { \ - u32 n = (_n); \ - if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ - bucket_hash ^= lo_hash_dword >> n; \ - if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ - bucket_hash ^= hi_hash_dword >> n; \ -} while (0); - -/** - * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash - * @atr_input: input bitstream to compute the hash on - * @input_mask: mask for the input bitstream - * - * This function serves two main purposes. First it applys the input_mask - * to the atr_input resulting in a cleaned up atr_input data stream. - * Secondly it computes the hash and stores it in the bkt_hash field at - * the end of the input byte stream. This way it will be available for - * future use without needing to recompute the hash. - **/ -void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, - union ixgbe_atr_input *input_mask) -{ - - u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; - u32 bucket_hash = 0; - - /* Apply masks to input data */ - input->dword_stream[0] &= input_mask->dword_stream[0]; - input->dword_stream[1] &= input_mask->dword_stream[1]; - input->dword_stream[2] &= input_mask->dword_stream[2]; - input->dword_stream[3] &= input_mask->dword_stream[3]; - input->dword_stream[4] &= input_mask->dword_stream[4]; - input->dword_stream[5] &= input_mask->dword_stream[5]; - input->dword_stream[6] &= input_mask->dword_stream[6]; - input->dword_stream[7] &= input_mask->dword_stream[7]; - input->dword_stream[8] &= input_mask->dword_stream[8]; - input->dword_stream[9] &= input_mask->dword_stream[9]; - input->dword_stream[10] &= input_mask->dword_stream[10]; - - /* record the flow_vm_vlan bits as they are a key part to the hash */ - flow_vm_vlan = ntohl(input->dword_stream[0]); - - /* generate common hash dword */ - hi_hash_dword = ntohl(input->dword_stream[1] ^ - input->dword_stream[2] ^ - input->dword_stream[3] ^ - input->dword_stream[4] ^ - input->dword_stream[5] ^ - input->dword_stream[6] ^ - input->dword_stream[7] ^ - input->dword_stream[8] ^ - input->dword_stream[9] ^ - input->dword_stream[10]); - - /* low dword is word swapped version of common */ - lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); - - /* apply flow ID/VM pool/VLAN ID bits to hash words */ - hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); - - /* Process bits 0 and 16 */ - IXGBE_COMPUTE_BKT_HASH_ITERATION(0); - - /* - * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to - * delay this because bit 0 of the stream should not be processed - * so we do not add the vlan until after bit 0 was processed - */ - lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); - - /* Process remaining 30 bit of the key */ - IXGBE_COMPUTE_BKT_HASH_ITERATION(1); - IXGBE_COMPUTE_BKT_HASH_ITERATION(2); - IXGBE_COMPUTE_BKT_HASH_ITERATION(3); - IXGBE_COMPUTE_BKT_HASH_ITERATION(4); - IXGBE_COMPUTE_BKT_HASH_ITERATION(5); - IXGBE_COMPUTE_BKT_HASH_ITERATION(6); - IXGBE_COMPUTE_BKT_HASH_ITERATION(7); - IXGBE_COMPUTE_BKT_HASH_ITERATION(8); - IXGBE_COMPUTE_BKT_HASH_ITERATION(9); - IXGBE_COMPUTE_BKT_HASH_ITERATION(10); - IXGBE_COMPUTE_BKT_HASH_ITERATION(11); - IXGBE_COMPUTE_BKT_HASH_ITERATION(12); - IXGBE_COMPUTE_BKT_HASH_ITERATION(13); - IXGBE_COMPUTE_BKT_HASH_ITERATION(14); - IXGBE_COMPUTE_BKT_HASH_ITERATION(15); - - /* - * Limit hash to 13 bits since max bucket count is 8K. - * Store result at the end of the input stream. - */ - input->formatted.bkt_hash = bucket_hash & 0x1FFF; -} - -/** - * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks - * @input_mask: mask to be bit swapped - * - * The source and destination port masks for flow director are bit swapped - * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to - * generate a correctly swapped value we need to bit swap the mask and that - * is what is accomplished by this function. - **/ -static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) -{ - u32 mask = ntohs(input_mask->formatted.dst_port); - mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; - mask |= ntohs(input_mask->formatted.src_port); - mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); - mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); - mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); - return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); -} - -/* - * These two macros are meant to address the fact that we have registers - * that are either all or in part big-endian. As a result on big-endian - * systems we will end up byte swapping the value to little-endian before - * it is byte swapped again and written to the hardware in the original - * big-endian format. - */ -#define IXGBE_STORE_AS_BE32(_value) \ - (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \ - (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) - -#define IXGBE_WRITE_REG_BE32(a, reg, value) \ - IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value))) - -#define IXGBE_STORE_AS_BE16(_value) \ - ntohs(((u16)(_value) >> 8) | ((u16)(_value) << 8)) - -s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, - union ixgbe_atr_input *input_mask) -{ - /* mask IPv6 since it is currently not supported */ - u32 fdirm = IXGBE_FDIRM_DIPv6; - u32 fdirtcpm; - - /* - * Program the relevant mask registers. If src/dst_port or src/dst_addr - * are zero, then assume a full mask for that field. Also assume that - * a VLAN of 0 is unspecified, so mask that out as well. L4type - * cannot be masked out in this implementation. - * - * This also assumes IPv4 only. IPv6 masking isn't supported at this - * point in time. - */ - - /* verify bucket hash is cleared on hash generation */ - if (input_mask->formatted.bkt_hash) - hw_dbg(hw, " bucket hash should always be 0 in mask\n"); - - /* Program FDIRM and verify partial masks */ - switch (input_mask->formatted.vm_pool & 0x7F) { - case 0x0: - fdirm |= IXGBE_FDIRM_POOL; - case 0x7F: - break; - default: - hw_dbg(hw, " Error on vm pool mask\n"); - return IXGBE_ERR_CONFIG; - } - - switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) { - case 0x0: - fdirm |= IXGBE_FDIRM_L4P; - if (input_mask->formatted.dst_port || - input_mask->formatted.src_port) { - hw_dbg(hw, " Error on src/dst port mask\n"); - return IXGBE_ERR_CONFIG; - } - case IXGBE_ATR_L4TYPE_MASK: - break; - default: - hw_dbg(hw, " Error on flow type mask\n"); - return IXGBE_ERR_CONFIG; - } - - switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) { - case 0x0000: - /* mask VLAN ID, fall through to mask VLAN priority */ - fdirm |= IXGBE_FDIRM_VLANID; - case 0x0FFF: - /* mask VLAN priority */ - fdirm |= IXGBE_FDIRM_VLANP; - break; - case 0xE000: - /* mask VLAN ID only, fall through */ - fdirm |= IXGBE_FDIRM_VLANID; - case 0xEFFF: - /* no VLAN fields masked */ - break; - default: - hw_dbg(hw, " Error on VLAN mask\n"); - return IXGBE_ERR_CONFIG; - } - - switch (input_mask->formatted.flex_bytes & 0xFFFF) { - case 0x0000: - /* Mask Flex Bytes, fall through */ - fdirm |= IXGBE_FDIRM_FLEX; - case 0xFFFF: - break; - default: - hw_dbg(hw, " Error on flexible byte mask\n"); - return IXGBE_ERR_CONFIG; - } - - /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ - IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); - - /* store the TCP/UDP port masks, bit reversed from port layout */ - fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask); - - /* write both the same so that UDP and TCP use the same mask */ - IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); - IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); - - /* store source and destination IP masks (big-enian) */ - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, - ~input_mask->formatted.src_ip[0]); - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, - ~input_mask->formatted.dst_ip[0]); - - return 0; -} - -s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, - union ixgbe_atr_input *input, - u16 soft_id, u8 queue) -{ - u32 fdirport, fdirvlan, fdirhash, fdircmd; - - /* currently IPv6 is not supported, must be programmed with 0 */ - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), - input->formatted.src_ip[0]); - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), - input->formatted.src_ip[1]); - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), - input->formatted.src_ip[2]); - - /* record the source address (big-endian) */ - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]); - - /* record the first 32 bits of the destination address (big-endian) */ - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]); - - /* record source and destination port (little-endian)*/ - fdirport = ntohs(input->formatted.dst_port); - fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; - fdirport |= ntohs(input->formatted.src_port); - IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); - - /* record vlan (little-endian) and flex_bytes(big-endian) */ - fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes); - fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; - fdirvlan |= ntohs(input->formatted.vlan_id); - IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); - - /* configure FDIRHASH register */ - fdirhash = input->formatted.bkt_hash; - fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; - IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); - - /* - * flush all previous writes to make certain registers are - * programmed prior to issuing the command - */ - IXGBE_WRITE_FLUSH(hw); - - /* configure FDIRCMD register */ - fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | - IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; - if (queue == IXGBE_FDIR_DROP_QUEUE) - fdircmd |= IXGBE_FDIRCMD_DROP; - fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; - fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; - fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT; - - IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); - - return 0; -} - -s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, - union ixgbe_atr_input *input, - u16 soft_id) -{ - u32 fdirhash; - u32 fdircmd = 0; - u32 retry_count; - s32 err = 0; - - /* configure FDIRHASH register */ - fdirhash = input->formatted.bkt_hash; - fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; - IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); - - /* flush hash to HW */ - IXGBE_WRITE_FLUSH(hw); - - /* Query if filter is present */ - IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT); - - for (retry_count = 10; retry_count; retry_count--) { - /* allow 10us for query to process */ - udelay(10); - /* verify query completed successfully */ - fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); - if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK)) - break; - } - - if (!retry_count) - err = IXGBE_ERR_FDIR_REINIT_FAILED; - - /* if filter exists in hardware then remove it */ - if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) { - IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); - IXGBE_WRITE_FLUSH(hw); - IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, - IXGBE_FDIRCMD_CMD_REMOVE_FLOW); - } - - return err; -} - -/** - * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register - * @hw: pointer to hardware structure - * @reg: analog register to read - * @val: read value - * - * Performs read operation to Omer analog register specified. - **/ -static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) -{ - u32 core_ctl; - - IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | - (reg << 8)); - IXGBE_WRITE_FLUSH(hw); - udelay(10); - core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); - *val = (u8)core_ctl; - - return 0; -} - -/** - * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register - * @hw: pointer to hardware structure - * @reg: atlas register to write - * @val: value to write - * - * Performs write operation to Omer analog register specified. - **/ -static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) -{ - u32 core_ctl; - - core_ctl = (reg << 8) | val; - IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl); - IXGBE_WRITE_FLUSH(hw); - udelay(10); - - return 0; -} - -/** - * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx - * @hw: pointer to hardware structure - * - * Starts the hardware using the generic start_hw function - * and the generation start_hw function. - * Then performs revision-specific operations, if any. - **/ -static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) -{ - s32 ret_val = 0; - - ret_val = ixgbe_start_hw_generic(hw); - if (ret_val != 0) - goto out; - - ret_val = ixgbe_start_hw_gen2(hw); - if (ret_val != 0) - goto out; - - /* We need to run link autotry after the driver loads */ - hw->mac.autotry_restart = true; - hw->mac.rx_pb_size = IXGBE_82599_RX_PB_SIZE; - - if (ret_val == 0) - ret_val = ixgbe_verify_fw_version_82599(hw); -out: - return ret_val; -} - -/** - * ixgbe_identify_phy_82599 - Get physical layer module - * @hw: pointer to hardware structure - * - * Determines the physical layer module found on the current adapter. - * If PHY already detected, maintains current PHY type in hw struct, - * otherwise executes the PHY detection routine. - **/ -static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) -{ - s32 status = IXGBE_ERR_PHY_ADDR_INVALID; - - /* Detect PHY if not unknown - returns success if already detected. */ - status = ixgbe_identify_phy_generic(hw); - if (status != 0) { - /* 82599 10GBASE-T requires an external PHY */ - if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) - goto out; - else - status = ixgbe_identify_sfp_module_generic(hw); - } - - /* Set PHY type none if no PHY detected */ - if (hw->phy.type == ixgbe_phy_unknown) { - hw->phy.type = ixgbe_phy_none; - status = 0; - } - - /* Return error if SFP module has been detected but is not supported */ - if (hw->phy.type == ixgbe_phy_sfp_unsupported) - status = IXGBE_ERR_SFP_NOT_SUPPORTED; - -out: - return status; -} - -/** - * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type - * @hw: pointer to hardware structure - * - * Determines physical layer capabilities of the current configuration. - **/ -static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) -{ - u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; - u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); - u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); - u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; - u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; - u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; - u16 ext_ability = 0; - u8 comp_codes_10g = 0; - u8 comp_codes_1g = 0; - - hw->phy.ops.identify(hw); - - switch (hw->phy.type) { - case ixgbe_phy_tn: - case ixgbe_phy_aq: - case ixgbe_phy_cu_unknown: - hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, - &ext_ability); - if (ext_ability & MDIO_PMA_EXTABLE_10GBT) - physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; - if (ext_ability & MDIO_PMA_EXTABLE_1000BT) - physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; - if (ext_ability & MDIO_PMA_EXTABLE_100BTX) - physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; - goto out; - default: - break; - } - - switch (autoc & IXGBE_AUTOC_LMS_MASK) { - case IXGBE_AUTOC_LMS_1G_AN: - case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: - if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) { - physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX | - IXGBE_PHYSICAL_LAYER_1000BASE_BX; - goto out; - } else - /* SFI mode so read SFP module */ - goto sfp_check; - break; - case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: - if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4) - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; - else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4) - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; - else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI) - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI; - goto out; - break; - case IXGBE_AUTOC_LMS_10G_SERIAL: - if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) { - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR; - goto out; - } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) - goto sfp_check; - break; - case IXGBE_AUTOC_LMS_KX4_KX_KR: - case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: - if (autoc & IXGBE_AUTOC_KX_SUPP) - physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; - if (autoc & IXGBE_AUTOC_KX4_SUPP) - physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; - if (autoc & IXGBE_AUTOC_KR_SUPP) - physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR; - goto out; - break; - default: - goto out; - break; - } - -sfp_check: - /* SFP check must be done last since DA modules are sometimes used to - * test KR mode - we need to id KR mode correctly before SFP module. - * Call identify_sfp because the pluggable module may have changed */ - hw->phy.ops.identify_sfp(hw); - if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) - goto out; - - switch (hw->phy.type) { - case ixgbe_phy_sfp_passive_tyco: - case ixgbe_phy_sfp_passive_unknown: - physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; - break; - case ixgbe_phy_sfp_ftl_active: - case ixgbe_phy_sfp_active_unknown: - physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA; - break; - case ixgbe_phy_sfp_avago: - case ixgbe_phy_sfp_ftl: - case ixgbe_phy_sfp_intel: - case ixgbe_phy_sfp_unknown: - hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g); - hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); - if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; - else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; - else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) - physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T; - break; - default: - break; - } - -out: - return physical_layer; -} - -/** - * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599 - * @hw: pointer to hardware structure - * @regval: register value to write to RXCTRL - * - * Enables the Rx DMA unit for 82599 - **/ -static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) -{ -#define IXGBE_MAX_SECRX_POLL 30 - int i; - int secrxreg; - - /* - * Workaround for 82599 silicon errata when enabling the Rx datapath. - * If traffic is incoming before we enable the Rx unit, it could hang - * the Rx DMA unit. Therefore, make sure the security engine is - * completely disabled prior to enabling the Rx unit. - */ - secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); - secrxreg |= IXGBE_SECRXCTRL_RX_DIS; - IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); - for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) { - secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); - if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) - break; - else - /* Use interrupt-safe sleep just in case */ - udelay(10); - } - - /* For informational purposes only */ - if (i >= IXGBE_MAX_SECRX_POLL) - hw_dbg(hw, "Rx unit being enabled before security " - "path fully disabled. Continuing with init.\n"); - - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); - secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); - secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; - IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); - IXGBE_WRITE_FLUSH(hw); - - return 0; -} - -/** - * ixgbe_verify_fw_version_82599 - verify fw version for 82599 - * @hw: pointer to hardware structure - * - * Verifies that installed the firmware version is 0.6 or higher - * for SFI devices. All 82599 SFI devices should have version 0.6 or higher. - * - * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or - * if the FW version is not supported. - **/ -static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) -{ - s32 status = IXGBE_ERR_EEPROM_VERSION; - u16 fw_offset, fw_ptp_cfg_offset; - u16 fw_version = 0; - - /* firmware check is only necessary for SFI devices */ - if (hw->phy.media_type != ixgbe_media_type_fiber) { - status = 0; - goto fw_version_out; - } - - /* get the offset to the Firmware Module block */ - hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); - - if ((fw_offset == 0) || (fw_offset == 0xFFFF)) - goto fw_version_out; - - /* get the offset to the Pass Through Patch Configuration block */ - hw->eeprom.ops.read(hw, (fw_offset + - IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR), - &fw_ptp_cfg_offset); - - if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF)) - goto fw_version_out; - - /* get the firmware version */ - hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset + - IXGBE_FW_PATCH_VERSION_4), - &fw_version); - - if (fw_version > 0x5) - status = 0; - -fw_version_out: - return status; -} - -/** - * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state. - * @hw: pointer to hardware structure - * - * Returns true if the LESM FW module is present and enabled. Otherwise - * returns false. Smart Speed must be disabled if LESM FW module is enabled. - **/ -static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) -{ - bool lesm_enabled = false; - u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; - s32 status; - - /* get the offset to the Firmware Module block */ - status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); - - if ((status != 0) || - (fw_offset == 0) || (fw_offset == 0xFFFF)) - goto out; - - /* get the offset to the LESM Parameters block */ - status = hw->eeprom.ops.read(hw, (fw_offset + - IXGBE_FW_LESM_PARAMETERS_PTR), - &fw_lesm_param_offset); - - if ((status != 0) || - (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF)) - goto out; - - /* get the lesm state word */ - status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset + - IXGBE_FW_LESM_STATE_1), - &fw_lesm_state); - - if ((status == 0) && - (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED)) - lesm_enabled = true; - -out: - return lesm_enabled; -} - -/** - * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using - * fastest available method - * - * @hw: pointer to hardware structure - * @offset: offset of word in EEPROM to read - * @words: number of words - * @data: word(s) read from the EEPROM - * - * Retrieves 16 bit word(s) read from EEPROM - **/ -static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data) -{ - struct ixgbe_eeprom_info *eeprom = &hw->eeprom; - s32 ret_val = IXGBE_ERR_CONFIG; - - /* - * If EEPROM is detected and can be addressed using 14 bits, - * use EERD otherwise use bit bang - */ - if ((eeprom->type == ixgbe_eeprom_spi) && - (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR)) - ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words, - data); - else - ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset, - words, - data); - - return ret_val; -} - -/** - * ixgbe_read_eeprom_82599 - Read EEPROM word using - * fastest available method - * - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to read - * @data: word read from the EEPROM - * - * Reads a 16 bit word from the EEPROM - **/ -static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, - u16 offset, u16 *data) -{ - struct ixgbe_eeprom_info *eeprom = &hw->eeprom; - s32 ret_val = IXGBE_ERR_CONFIG; - - /* - * If EEPROM is detected and can be addressed using 14 bits, - * use EERD otherwise use bit bang - */ - if ((eeprom->type == ixgbe_eeprom_spi) && - (offset <= IXGBE_EERD_MAX_ADDR)) - ret_val = ixgbe_read_eerd_generic(hw, offset, data); - else - ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data); - - return ret_val; -} - -static struct ixgbe_mac_operations mac_ops_82599 = { - .init_hw = &ixgbe_init_hw_generic, - .reset_hw = &ixgbe_reset_hw_82599, - .start_hw = &ixgbe_start_hw_82599, - .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, - .get_media_type = &ixgbe_get_media_type_82599, - .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82599, - .enable_rx_dma = &ixgbe_enable_rx_dma_82599, - .get_mac_addr = &ixgbe_get_mac_addr_generic, - .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, - .get_device_caps = &ixgbe_get_device_caps_generic, - .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, - .stop_adapter = &ixgbe_stop_adapter_generic, - .get_bus_info = &ixgbe_get_bus_info_generic, - .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, - .read_analog_reg8 = &ixgbe_read_analog_reg8_82599, - .write_analog_reg8 = &ixgbe_write_analog_reg8_82599, - .setup_link = &ixgbe_setup_mac_link_82599, - .set_rxpba = &ixgbe_set_rxpba_generic, - .check_link = &ixgbe_check_mac_link_generic, - .get_link_capabilities = &ixgbe_get_link_capabilities_82599, - .led_on = &ixgbe_led_on_generic, - .led_off = &ixgbe_led_off_generic, - .blink_led_start = &ixgbe_blink_led_start_generic, - .blink_led_stop = &ixgbe_blink_led_stop_generic, - .set_rar = &ixgbe_set_rar_generic, - .clear_rar = &ixgbe_clear_rar_generic, - .set_vmdq = &ixgbe_set_vmdq_generic, - .clear_vmdq = &ixgbe_clear_vmdq_generic, - .init_rx_addrs = &ixgbe_init_rx_addrs_generic, - .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, - .enable_mc = &ixgbe_enable_mc_generic, - .disable_mc = &ixgbe_disable_mc_generic, - .clear_vfta = &ixgbe_clear_vfta_generic, - .set_vfta = &ixgbe_set_vfta_generic, - .fc_enable = &ixgbe_fc_enable_generic, - .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, - .init_uta_tables = &ixgbe_init_uta_tables_generic, - .setup_sfp = &ixgbe_setup_sfp_modules_82599, - .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, - .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, - .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, - .release_swfw_sync = &ixgbe_release_swfw_sync, - -}; - -static struct ixgbe_eeprom_operations eeprom_ops_82599 = { - .init_params = &ixgbe_init_eeprom_params_generic, - .read = &ixgbe_read_eeprom_82599, - .read_buffer = &ixgbe_read_eeprom_buffer_82599, - .write = &ixgbe_write_eeprom_generic, - .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic, - .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, - .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, - .update_checksum = &ixgbe_update_eeprom_checksum_generic, -}; - -static struct ixgbe_phy_operations phy_ops_82599 = { - .identify = &ixgbe_identify_phy_82599, - .identify_sfp = &ixgbe_identify_sfp_module_generic, - .init = &ixgbe_init_phy_ops_82599, - .reset = &ixgbe_reset_phy_generic, - .read_reg = &ixgbe_read_phy_reg_generic, - .write_reg = &ixgbe_write_phy_reg_generic, - .setup_link = &ixgbe_setup_phy_link_generic, - .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, - .read_i2c_byte = &ixgbe_read_i2c_byte_generic, - .write_i2c_byte = &ixgbe_write_i2c_byte_generic, - .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, - .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, - .check_overtemp = &ixgbe_tn_check_overtemp, -}; - -struct ixgbe_info ixgbe_82599_info = { - .mac = ixgbe_mac_82599EB, - .get_invariants = &ixgbe_get_invariants_82599, - .mac_ops = &mac_ops_82599, - .eeprom_ops = &eeprom_ops_82599, - .phy_ops = &phy_ops_82599, - .mbx_ops = &mbx_ops_generic, -}; diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c deleted file mode 100644 index fc1375f..0000000 --- a/drivers/net/ixgbe/ixgbe_common.c +++ /dev/null @@ -1,3510 +0,0 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include -#include -#include -#include - -#include "ixgbe.h" -#include "ixgbe_common.h" -#include "ixgbe_phy.h" - -static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); -static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); -static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); -static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); -static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); -static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, - u16 count); -static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); -static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); -static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); -static void ixgbe_release_eeprom(struct ixgbe_hw *hw); - -static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); -static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw); -static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw); -static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw); -static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); -static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, - u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); -static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num); -static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); -static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data); -static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data); -static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, - u16 offset); - -/** - * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx - * @hw: pointer to hardware structure - * - * Starts the hardware by filling the bus info structure and media type, clears - * all on chip counters, initializes receive address registers, multicast - * table, VLAN filter table, calls routine to set up link and flow control - * settings, and leaves transmit and receive units disabled and uninitialized - **/ -s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) -{ - u32 ctrl_ext; - - /* Set the media type */ - hw->phy.media_type = hw->mac.ops.get_media_type(hw); - - /* Identify the PHY */ - hw->phy.ops.identify(hw); - - /* Clear the VLAN filter table */ - hw->mac.ops.clear_vfta(hw); - - /* Clear statistics registers */ - hw->mac.ops.clear_hw_cntrs(hw); - - /* Set No Snoop Disable */ - ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); - ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS; - IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); - IXGBE_WRITE_FLUSH(hw); - - /* Setup flow control */ - ixgbe_setup_fc(hw, 0); - - /* Clear adapter stopped flag */ - hw->adapter_stopped = false; - - return 0; -} - -/** - * ixgbe_start_hw_gen2 - Init sequence for common device family - * @hw: pointer to hw structure - * - * Performs the init sequence common to the second generation - * of 10 GbE devices. - * Devices in the second generation: - * 82599 - * X540 - **/ -s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) -{ - u32 i; - u32 regval; - - /* Clear the rate limiters */ - for (i = 0; i < hw->mac.max_tx_queues; i++) { - IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); - IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); - } - IXGBE_WRITE_FLUSH(hw); - - /* Disable relaxed ordering */ - for (i = 0; i < hw->mac.max_tx_queues; i++) { - regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); - regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; - IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); - } - - for (i = 0; i < hw->mac.max_rx_queues; i++) { - regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); - regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | - IXGBE_DCA_RXCTRL_DESC_HSRO_EN); - IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); - } - - return 0; -} - -/** - * ixgbe_init_hw_generic - Generic hardware initialization - * @hw: pointer to hardware structure - * - * Initialize the hardware by resetting the hardware, filling the bus info - * structure and media type, clears all on chip counters, initializes receive - * address registers, multicast table, VLAN filter table, calls routine to set - * up link and flow control settings, and leaves transmit and receive units - * disabled and uninitialized - **/ -s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) -{ - s32 status; - - /* Reset the hardware */ - status = hw->mac.ops.reset_hw(hw); - - if (status == 0) { - /* Start the HW */ - status = hw->mac.ops.start_hw(hw); - } - - return status; -} - -/** - * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters - * @hw: pointer to hardware structure - * - * Clears all hardware statistics counters by reading them from the hardware - * Statistics counters are clear on read. - **/ -s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) -{ - u16 i = 0; - - IXGBE_READ_REG(hw, IXGBE_CRCERRS); - IXGBE_READ_REG(hw, IXGBE_ILLERRC); - IXGBE_READ_REG(hw, IXGBE_ERRBC); - IXGBE_READ_REG(hw, IXGBE_MSPDC); - for (i = 0; i < 8; i++) - IXGBE_READ_REG(hw, IXGBE_MPC(i)); - - IXGBE_READ_REG(hw, IXGBE_MLFC); - IXGBE_READ_REG(hw, IXGBE_MRFC); - IXGBE_READ_REG(hw, IXGBE_RLEC); - IXGBE_READ_REG(hw, IXGBE_LXONTXC); - IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); - if (hw->mac.type >= ixgbe_mac_82599EB) { - IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); - IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); - } else { - IXGBE_READ_REG(hw, IXGBE_LXONRXC); - IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); - } - - for (i = 0; i < 8; i++) { - IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); - IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); - if (hw->mac.type >= ixgbe_mac_82599EB) { - IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); - IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); - } else { - IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); - IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); - } - } - if (hw->mac.type >= ixgbe_mac_82599EB) - for (i = 0; i < 8; i++) - IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); - IXGBE_READ_REG(hw, IXGBE_PRC64); - IXGBE_READ_REG(hw, IXGBE_PRC127); - IXGBE_READ_REG(hw, IXGBE_PRC255); - IXGBE_READ_REG(hw, IXGBE_PRC511); - IXGBE_READ_REG(hw, IXGBE_PRC1023); - IXGBE_READ_REG(hw, IXGBE_PRC1522); - IXGBE_READ_REG(hw, IXGBE_GPRC); - IXGBE_READ_REG(hw, IXGBE_BPRC); - IXGBE_READ_REG(hw, IXGBE_MPRC); - IXGBE_READ_REG(hw, IXGBE_GPTC); - IXGBE_READ_REG(hw, IXGBE_GORCL); - IXGBE_READ_REG(hw, IXGBE_GORCH); - IXGBE_READ_REG(hw, IXGBE_GOTCL); - IXGBE_READ_REG(hw, IXGBE_GOTCH); - for (i = 0; i < 8; i++) - IXGBE_READ_REG(hw, IXGBE_RNBC(i)); - IXGBE_READ_REG(hw, IXGBE_RUC); - IXGBE_READ_REG(hw, IXGBE_RFC); - IXGBE_READ_REG(hw, IXGBE_ROC); - IXGBE_READ_REG(hw, IXGBE_RJC); - IXGBE_READ_REG(hw, IXGBE_MNGPRC); - IXGBE_READ_REG(hw, IXGBE_MNGPDC); - IXGBE_READ_REG(hw, IXGBE_MNGPTC); - IXGBE_READ_REG(hw, IXGBE_TORL); - IXGBE_READ_REG(hw, IXGBE_TORH); - IXGBE_READ_REG(hw, IXGBE_TPR); - IXGBE_READ_REG(hw, IXGBE_TPT); - IXGBE_READ_REG(hw, IXGBE_PTC64); - IXGBE_READ_REG(hw, IXGBE_PTC127); - IXGBE_READ_REG(hw, IXGBE_PTC255); - IXGBE_READ_REG(hw, IXGBE_PTC511); - IXGBE_READ_REG(hw, IXGBE_PTC1023); - IXGBE_READ_REG(hw, IXGBE_PTC1522); - IXGBE_READ_REG(hw, IXGBE_MPTC); - IXGBE_READ_REG(hw, IXGBE_BPTC); - for (i = 0; i < 16; i++) { - IXGBE_READ_REG(hw, IXGBE_QPRC(i)); - IXGBE_READ_REG(hw, IXGBE_QPTC(i)); - if (hw->mac.type >= ixgbe_mac_82599EB) { - IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); - IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); - IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); - IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); - IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); - } else { - IXGBE_READ_REG(hw, IXGBE_QBRC(i)); - IXGBE_READ_REG(hw, IXGBE_QBTC(i)); - } - } - - if (hw->mac.type == ixgbe_mac_X540) { - if (hw->phy.id == 0) - hw->phy.ops.identify(hw); - hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECL, &i); - hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECH, &i); - hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECL, &i); - hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECH, &i); - } - - return 0; -} - -/** - * ixgbe_read_pba_string_generic - Reads part number string from EEPROM - * @hw: pointer to hardware structure - * @pba_num: stores the part number string from the EEPROM - * @pba_num_size: part number string buffer length - * - * Reads the part number string from the EEPROM. - **/ -s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, - u32 pba_num_size) -{ - s32 ret_val; - u16 data; - u16 pba_ptr; - u16 offset; - u16 length; - - if (pba_num == NULL) { - hw_dbg(hw, "PBA string buffer was null\n"); - return IXGBE_ERR_INVALID_ARGUMENT; - } - - ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); - if (ret_val) { - hw_dbg(hw, "NVM Read Error\n"); - return ret_val; - } - - ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr); - if (ret_val) { - hw_dbg(hw, "NVM Read Error\n"); - return ret_val; - } - - /* - * if data is not ptr guard the PBA must be in legacy format which - * means pba_ptr is actually our second data word for the PBA number - * and we can decode it into an ascii string - */ - if (data != IXGBE_PBANUM_PTR_GUARD) { - hw_dbg(hw, "NVM PBA number is not stored as string\n"); - - /* we will need 11 characters to store the PBA */ - if (pba_num_size < 11) { - hw_dbg(hw, "PBA string buffer too small\n"); - return IXGBE_ERR_NO_SPACE; - } - - /* extract hex string from data and pba_ptr */ - pba_num[0] = (data >> 12) & 0xF; - pba_num[1] = (data >> 8) & 0xF; - pba_num[2] = (data >> 4) & 0xF; - pba_num[3] = data & 0xF; - pba_num[4] = (pba_ptr >> 12) & 0xF; - pba_num[5] = (pba_ptr >> 8) & 0xF; - pba_num[6] = '-'; - pba_num[7] = 0; - pba_num[8] = (pba_ptr >> 4) & 0xF; - pba_num[9] = pba_ptr & 0xF; - - /* put a null character on the end of our string */ - pba_num[10] = '\0'; - - /* switch all the data but the '-' to hex char */ - for (offset = 0; offset < 10; offset++) { - if (pba_num[offset] < 0xA) - pba_num[offset] += '0'; - else if (pba_num[offset] < 0x10) - pba_num[offset] += 'A' - 0xA; - } - - return 0; - } - - ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); - if (ret_val) { - hw_dbg(hw, "NVM Read Error\n"); - return ret_val; - } - - if (length == 0xFFFF || length == 0) { - hw_dbg(hw, "NVM PBA number section invalid length\n"); - return IXGBE_ERR_PBA_SECTION; - } - - /* check if pba_num buffer is big enough */ - if (pba_num_size < (((u32)length * 2) - 1)) { - hw_dbg(hw, "PBA string buffer too small\n"); - return IXGBE_ERR_NO_SPACE; - } - - /* trim pba length from start of string */ - pba_ptr++; - length--; - - for (offset = 0; offset < length; offset++) { - ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data); - if (ret_val) { - hw_dbg(hw, "NVM Read Error\n"); - return ret_val; - } - pba_num[offset * 2] = (u8)(data >> 8); - pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); - } - pba_num[offset * 2] = '\0'; - - return 0; -} - -/** - * ixgbe_get_mac_addr_generic - Generic get MAC address - * @hw: pointer to hardware structure - * @mac_addr: Adapter MAC address - * - * Reads the adapter's MAC address from first Receive Address Register (RAR0) - * A reset of the adapter must be performed prior to calling this function - * in order for the MAC address to have been loaded from the EEPROM into RAR0 - **/ -s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) -{ - u32 rar_high; - u32 rar_low; - u16 i; - - rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0)); - rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0)); - - for (i = 0; i < 4; i++) - mac_addr[i] = (u8)(rar_low >> (i*8)); - - for (i = 0; i < 2; i++) - mac_addr[i+4] = (u8)(rar_high >> (i*8)); - - return 0; -} - -/** - * ixgbe_get_bus_info_generic - Generic set PCI bus info - * @hw: pointer to hardware structure - * - * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure - **/ -s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) -{ - struct ixgbe_adapter *adapter = hw->back; - struct ixgbe_mac_info *mac = &hw->mac; - u16 link_status; - - hw->bus.type = ixgbe_bus_type_pci_express; - - /* Get the negotiated link width and speed from PCI config space */ - pci_read_config_word(adapter->pdev, IXGBE_PCI_LINK_STATUS, - &link_status); - - switch (link_status & IXGBE_PCI_LINK_WIDTH) { - case IXGBE_PCI_LINK_WIDTH_1: - hw->bus.width = ixgbe_bus_width_pcie_x1; - break; - case IXGBE_PCI_LINK_WIDTH_2: - hw->bus.width = ixgbe_bus_width_pcie_x2; - break; - case IXGBE_PCI_LINK_WIDTH_4: - hw->bus.width = ixgbe_bus_width_pcie_x4; - break; - case IXGBE_PCI_LINK_WIDTH_8: - hw->bus.width = ixgbe_bus_width_pcie_x8; - break; - default: - hw->bus.width = ixgbe_bus_width_unknown; - break; - } - - switch (link_status & IXGBE_PCI_LINK_SPEED) { - case IXGBE_PCI_LINK_SPEED_2500: - hw->bus.speed = ixgbe_bus_speed_2500; - break; - case IXGBE_PCI_LINK_SPEED_5000: - hw->bus.speed = ixgbe_bus_speed_5000; - break; - default: - hw->bus.speed = ixgbe_bus_speed_unknown; - break; - } - - mac->ops.set_lan_id(hw); - - return 0; -} - -/** - * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices - * @hw: pointer to the HW structure - * - * Determines the LAN function id by reading memory-mapped registers - * and swaps the port value if requested. - **/ -void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) -{ - struct ixgbe_bus_info *bus = &hw->bus; - u32 reg; - - reg = IXGBE_READ_REG(hw, IXGBE_STATUS); - bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT; - bus->lan_id = bus->func; - - /* check for a port swap */ - reg = IXGBE_READ_REG(hw, IXGBE_FACTPS); - if (reg & IXGBE_FACTPS_LFS) - bus->func ^= 0x1; -} - -/** - * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units - * @hw: pointer to hardware structure - * - * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, - * disables transmit and receive units. The adapter_stopped flag is used by - * the shared code and drivers to determine if the adapter is in a stopped - * state and should not touch the hardware. - **/ -s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) -{ - u32 number_of_queues; - u32 reg_val; - u16 i; - - /* - * Set the adapter_stopped flag so other driver functions stop touching - * the hardware - */ - hw->adapter_stopped = true; - - /* Disable the receive unit */ - reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - reg_val &= ~(IXGBE_RXCTRL_RXEN); - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val); - IXGBE_WRITE_FLUSH(hw); - usleep_range(2000, 4000); - - /* Clear interrupt mask to stop from interrupts being generated */ - IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); - - /* Clear any pending interrupts */ - IXGBE_READ_REG(hw, IXGBE_EICR); - - /* Disable the transmit unit. Each queue must be disabled. */ - number_of_queues = hw->mac.max_tx_queues; - for (i = 0; i < number_of_queues; i++) { - reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); - if (reg_val & IXGBE_TXDCTL_ENABLE) { - reg_val &= ~IXGBE_TXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), reg_val); - } - } - - /* - * Prevent the PCI-E bus from from hanging by disabling PCI-E master - * access and verify no pending requests - */ - ixgbe_disable_pcie_master(hw); - - return 0; -} - -/** - * ixgbe_led_on_generic - Turns on the software controllable LEDs. - * @hw: pointer to hardware structure - * @index: led number to turn on - **/ -s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) -{ - u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - - /* To turn on the LED, set mode to ON. */ - led_reg &= ~IXGBE_LED_MODE_MASK(index); - led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); - IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); - IXGBE_WRITE_FLUSH(hw); - - return 0; -} - -/** - * ixgbe_led_off_generic - Turns off the software controllable LEDs. - * @hw: pointer to hardware structure - * @index: led number to turn off - **/ -s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) -{ - u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - - /* To turn off the LED, set mode to OFF. */ - led_reg &= ~IXGBE_LED_MODE_MASK(index); - led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); - IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); - IXGBE_WRITE_FLUSH(hw); - - return 0; -} - -/** - * ixgbe_init_eeprom_params_generic - Initialize EEPROM params - * @hw: pointer to hardware structure - * - * Initializes the EEPROM parameters ixgbe_eeprom_info within the - * ixgbe_hw struct in order to set up EEPROM access. - **/ -s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) -{ - struct ixgbe_eeprom_info *eeprom = &hw->eeprom; - u32 eec; - u16 eeprom_size; - - if (eeprom->type == ixgbe_eeprom_uninitialized) { - eeprom->type = ixgbe_eeprom_none; - /* Set default semaphore delay to 10ms which is a well - * tested value */ - eeprom->semaphore_delay = 10; - /* Clear EEPROM page size, it will be initialized as needed */ - eeprom->word_page_size = 0; - - /* - * Check for EEPROM present first. - * If not present leave as none - */ - eec = IXGBE_READ_REG(hw, IXGBE_EEC); - if (eec & IXGBE_EEC_PRES) { - eeprom->type = ixgbe_eeprom_spi; - - /* - * SPI EEPROM is assumed here. This code would need to - * change if a future EEPROM is not SPI. - */ - eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> - IXGBE_EEC_SIZE_SHIFT); - eeprom->word_size = 1 << (eeprom_size + - IXGBE_EEPROM_WORD_SIZE_SHIFT); - } - - if (eec & IXGBE_EEC_ADDR_SIZE) - eeprom->address_bits = 16; - else - eeprom->address_bits = 8; - hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: " - "%d\n", eeprom->type, eeprom->word_size, - eeprom->address_bits); - } - - return 0; -} - -/** - * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang - * @hw: pointer to hardware structure - * @offset: offset within the EEPROM to write - * @words: number of words - * @data: 16 bit word(s) to write to EEPROM - * - * Reads 16 bit word(s) from EEPROM through bit-bang method - **/ -s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data) -{ - s32 status = 0; - u16 i, count; - - hw->eeprom.ops.init_params(hw); - - if (words == 0) { - status = IXGBE_ERR_INVALID_ARGUMENT; - goto out; - } - - if (offset + words > hw->eeprom.word_size) { - status = IXGBE_ERR_EEPROM; - goto out; - } - - /* - * The EEPROM page size cannot be queried from the chip. We do lazy - * initialization. It is worth to do that when we write large buffer. - */ - if ((hw->eeprom.word_page_size == 0) && - (words > IXGBE_EEPROM_PAGE_SIZE_MAX)) - ixgbe_detect_eeprom_page_size_generic(hw, offset); - - /* - * We cannot hold synchronization semaphores for too long - * to avoid other entity starvation. However it is more efficient - * to read in bursts than synchronizing access for each word. - */ - for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { - count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? - IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); - status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i, - count, &data[i]); - - if (status != 0) - break; - } - -out: - return status; -} - -/** - * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM - * @hw: pointer to hardware structure - * @offset: offset within the EEPROM to be written to - * @words: number of word(s) - * @data: 16 bit word(s) to be written to the EEPROM - * - * If ixgbe_eeprom_update_checksum is not called after this function, the - * EEPROM will most likely contain an invalid checksum. - **/ -static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data) -{ - s32 status; - u16 word; - u16 page_size; - u16 i; - u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; - - /* Prepare the EEPROM for writing */ - status = ixgbe_acquire_eeprom(hw); - - if (status == 0) { - if (ixgbe_ready_eeprom(hw) != 0) { - ixgbe_release_eeprom(hw); - status = IXGBE_ERR_EEPROM; - } - } - - if (status == 0) { - for (i = 0; i < words; i++) { - ixgbe_standby_eeprom(hw); - - /* Send the WRITE ENABLE command (8 bit opcode ) */ - ixgbe_shift_out_eeprom_bits(hw, - IXGBE_EEPROM_WREN_OPCODE_SPI, - IXGBE_EEPROM_OPCODE_BITS); - - ixgbe_standby_eeprom(hw); - - /* - * Some SPI eeproms use the 8th address bit embedded - * in the opcode - */ - if ((hw->eeprom.address_bits == 8) && - ((offset + i) >= 128)) - write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; - - /* Send the Write command (8-bit opcode + addr) */ - ixgbe_shift_out_eeprom_bits(hw, write_opcode, - IXGBE_EEPROM_OPCODE_BITS); - ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), - hw->eeprom.address_bits); - - page_size = hw->eeprom.word_page_size; - - /* Send the data in burst via SPI*/ - do { - word = data[i]; - word = (word >> 8) | (word << 8); - ixgbe_shift_out_eeprom_bits(hw, word, 16); - - if (page_size == 0) - break; - - /* do not wrap around page */ - if (((offset + i) & (page_size - 1)) == - (page_size - 1)) - break; - } while (++i < words); - - ixgbe_standby_eeprom(hw); - usleep_range(10000, 20000); - } - /* Done with writing - release the EEPROM */ - ixgbe_release_eeprom(hw); - } - - return status; -} - -/** - * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM - * @hw: pointer to hardware structure - * @offset: offset within the EEPROM to be written to - * @data: 16 bit word to be written to the EEPROM - * - * If ixgbe_eeprom_update_checksum is not called after this function, the - * EEPROM will most likely contain an invalid checksum. - **/ -s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) -{ - s32 status; - - hw->eeprom.ops.init_params(hw); - - if (offset >= hw->eeprom.word_size) { - status = IXGBE_ERR_EEPROM; - goto out; - } - - status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data); - -out: - return status; -} - -/** - * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang - * @hw: pointer to hardware structure - * @offset: offset within the EEPROM to be read - * @words: number of word(s) - * @data: read 16 bit words(s) from EEPROM - * - * Reads 16 bit word(s) from EEPROM through bit-bang method - **/ -s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data) -{ - s32 status = 0; - u16 i, count; - - hw->eeprom.ops.init_params(hw); - - if (words == 0) { - status = IXGBE_ERR_INVALID_ARGUMENT; - goto out; - } - - if (offset + words > hw->eeprom.word_size) { - status = IXGBE_ERR_EEPROM; - goto out; - } - - /* - * We cannot hold synchronization semaphores for too long - * to avoid other entity starvation. However it is more efficient - * to read in bursts than synchronizing access for each word. - */ - for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { - count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? - IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); - - status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i, - count, &data[i]); - - if (status != 0) - break; - } - -out: - return status; -} - -/** - * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang - * @hw: pointer to hardware structure - * @offset: offset within the EEPROM to be read - * @words: number of word(s) - * @data: read 16 bit word(s) from EEPROM - * - * Reads 16 bit word(s) from EEPROM through bit-bang method - **/ -static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data) -{ - s32 status; - u16 word_in; - u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; - u16 i; - - /* Prepare the EEPROM for reading */ - status = ixgbe_acquire_eeprom(hw); - - if (status == 0) { - if (ixgbe_ready_eeprom(hw) != 0) { - ixgbe_release_eeprom(hw); - status = IXGBE_ERR_EEPROM; - } - } - - if (status == 0) { - for (i = 0; i < words; i++) { - ixgbe_standby_eeprom(hw); - /* - * Some SPI eeproms use the 8th address bit embedded - * in the opcode - */ - if ((hw->eeprom.address_bits == 8) && - ((offset + i) >= 128)) - read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; - - /* Send the READ command (opcode + addr) */ - ixgbe_shift_out_eeprom_bits(hw, read_opcode, - IXGBE_EEPROM_OPCODE_BITS); - ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), - hw->eeprom.address_bits); - - /* Read the data. */ - word_in = ixgbe_shift_in_eeprom_bits(hw, 16); - data[i] = (word_in >> 8) | (word_in << 8); - } - - /* End this read operation */ - ixgbe_release_eeprom(hw); - } - - return status; -} - -/** - * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang - * @hw: pointer to hardware structure - * @offset: offset within the EEPROM to be read - * @data: read 16 bit value from EEPROM - * - * Reads 16 bit value from EEPROM through bit-bang method - **/ -s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, - u16 *data) -{ - s32 status; - - hw->eeprom.ops.init_params(hw); - - if (offset >= hw->eeprom.word_size) { - status = IXGBE_ERR_EEPROM; - goto out; - } - - status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); - -out: - return status; -} - -/** - * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to read - * @words: number of word(s) - * @data: 16 bit word(s) from the EEPROM - * - * Reads a 16 bit word(s) from the EEPROM using the EERD register. - **/ -s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data) -{ - u32 eerd; - s32 status = 0; - u32 i; - - hw->eeprom.ops.init_params(hw); - - if (words == 0) { - status = IXGBE_ERR_INVALID_ARGUMENT; - goto out; - } - - if (offset >= hw->eeprom.word_size) { - status = IXGBE_ERR_EEPROM; - goto out; - } - - for (i = 0; i < words; i++) { - eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) + - IXGBE_EEPROM_RW_REG_START; - - IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); - status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); - - if (status == 0) { - data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >> - IXGBE_EEPROM_RW_REG_DATA); - } else { - hw_dbg(hw, "Eeprom read timed out\n"); - goto out; - } - } -out: - return status; -} - -/** - * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size - * @hw: pointer to hardware structure - * @offset: offset within the EEPROM to be used as a scratch pad - * - * Discover EEPROM page size by writing marching data at given offset. - * This function is called only when we are writing a new large buffer - * at given offset so the data would be overwritten anyway. - **/ -static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, - u16 offset) -{ - u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX]; - s32 status = 0; - u16 i; - - for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++) - data[i] = i; - - hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX; - status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, - IXGBE_EEPROM_PAGE_SIZE_MAX, data); - hw->eeprom.word_page_size = 0; - if (status != 0) - goto out; - - status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); - if (status != 0) - goto out; - - /* - * When writing in burst more than the actual page size - * EEPROM address wraps around current page. - */ - hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; - - hw_dbg(hw, "Detected EEPROM page size = %d words.", - hw->eeprom.word_page_size); -out: - return status; -} - -/** - * ixgbe_read_eerd_generic - Read EEPROM word using EERD - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to read - * @data: word read from the EEPROM - * - * Reads a 16 bit word from the EEPROM using the EERD register. - **/ -s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) -{ - return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data); -} - -/** - * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to write - * @words: number of words - * @data: word(s) write to the EEPROM - * - * Write a 16 bit word(s) to the EEPROM using the EEWR register. - **/ -s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data) -{ - u32 eewr; - s32 status = 0; - u16 i; - - hw->eeprom.ops.init_params(hw); - - if (words == 0) { - status = IXGBE_ERR_INVALID_ARGUMENT; - goto out; - } - - if (offset >= hw->eeprom.word_size) { - status = IXGBE_ERR_EEPROM; - goto out; - } - - for (i = 0; i < words; i++) { - eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | - (data[i] << IXGBE_EEPROM_RW_REG_DATA) | - IXGBE_EEPROM_RW_REG_START; - - status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); - if (status != 0) { - hw_dbg(hw, "Eeprom write EEWR timed out\n"); - goto out; - } - - IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); - - status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); - if (status != 0) { - hw_dbg(hw, "Eeprom write EEWR timed out\n"); - goto out; - } - } - -out: - return status; -} - -/** - * ixgbe_write_eewr_generic - Write EEPROM word using EEWR - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to write - * @data: word write to the EEPROM - * - * Write a 16 bit word to the EEPROM using the EEWR register. - **/ -s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) -{ - return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data); -} - -/** - * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status - * @hw: pointer to hardware structure - * @ee_reg: EEPROM flag for polling - * - * Polls the status bit (bit 1) of the EERD or EEWR to determine when the - * read or write is done respectively. - **/ -static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) -{ - u32 i; - u32 reg; - s32 status = IXGBE_ERR_EEPROM; - - for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) { - if (ee_reg == IXGBE_NVM_POLL_READ) - reg = IXGBE_READ_REG(hw, IXGBE_EERD); - else - reg = IXGBE_READ_REG(hw, IXGBE_EEWR); - - if (reg & IXGBE_EEPROM_RW_REG_DONE) { - status = 0; - break; - } - udelay(5); - } - return status; -} - -/** - * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang - * @hw: pointer to hardware structure - * - * Prepares EEPROM for access using bit-bang method. This function should - * be called before issuing a command to the EEPROM. - **/ -static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) -{ - s32 status = 0; - u32 eec; - u32 i; - - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0) - status = IXGBE_ERR_SWFW_SYNC; - - if (status == 0) { - eec = IXGBE_READ_REG(hw, IXGBE_EEC); - - /* Request EEPROM Access */ - eec |= IXGBE_EEC_REQ; - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); - - for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { - eec = IXGBE_READ_REG(hw, IXGBE_EEC); - if (eec & IXGBE_EEC_GNT) - break; - udelay(5); - } - - /* Release if grant not acquired */ - if (!(eec & IXGBE_EEC_GNT)) { - eec &= ~IXGBE_EEC_REQ; - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); - hw_dbg(hw, "Could not acquire EEPROM grant\n"); - - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - status = IXGBE_ERR_EEPROM; - } - - /* Setup EEPROM for Read/Write */ - if (status == 0) { - /* Clear CS and SK */ - eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); - IXGBE_WRITE_FLUSH(hw); - udelay(1); - } - } - return status; -} - -/** - * ixgbe_get_eeprom_semaphore - Get hardware semaphore - * @hw: pointer to hardware structure - * - * Sets the hardware semaphores so EEPROM access can occur for bit-bang method - **/ -static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) -{ - s32 status = IXGBE_ERR_EEPROM; - u32 timeout = 2000; - u32 i; - u32 swsm; - - /* Get SMBI software semaphore between device drivers first */ - for (i = 0; i < timeout; i++) { - /* - * If the SMBI bit is 0 when we read it, then the bit will be - * set and we have the semaphore - */ - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); - if (!(swsm & IXGBE_SWSM_SMBI)) { - status = 0; - break; - } - udelay(50); - } - - if (i == timeout) { - hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore " - "not granted.\n"); - /* - * this release is particularly important because our attempts - * above to get the semaphore may have succeeded, and if there - * was a timeout, we should unconditionally clear the semaphore - * bits to free the driver to make progress - */ - ixgbe_release_eeprom_semaphore(hw); - - udelay(50); - /* - * one last try - * If the SMBI bit is 0 when we read it, then the bit will be - * set and we have the semaphore - */ - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); - if (!(swsm & IXGBE_SWSM_SMBI)) - status = 0; - } - - /* Now get the semaphore between SW/FW through the SWESMBI bit */ - if (status == 0) { - for (i = 0; i < timeout; i++) { - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); - - /* Set the SW EEPROM semaphore bit to request access */ - swsm |= IXGBE_SWSM_SWESMBI; - IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); - - /* - * If we set the bit successfully then we got the - * semaphore. - */ - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); - if (swsm & IXGBE_SWSM_SWESMBI) - break; - - udelay(50); - } - - /* - * Release semaphores and return error if SW EEPROM semaphore - * was not granted because we don't have access to the EEPROM - */ - if (i >= timeout) { - hw_dbg(hw, "SWESMBI Software EEPROM semaphore " - "not granted.\n"); - ixgbe_release_eeprom_semaphore(hw); - status = IXGBE_ERR_EEPROM; - } - } else { - hw_dbg(hw, "Software semaphore SMBI between device drivers " - "not granted.\n"); - } - - return status; -} - -/** - * ixgbe_release_eeprom_semaphore - Release hardware semaphore - * @hw: pointer to hardware structure - * - * This function clears hardware semaphore bits. - **/ -static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) -{ - u32 swsm; - - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); - - /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ - swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); - IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); - IXGBE_WRITE_FLUSH(hw); -} - -/** - * ixgbe_ready_eeprom - Polls for EEPROM ready - * @hw: pointer to hardware structure - **/ -static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) -{ - s32 status = 0; - u16 i; - u8 spi_stat_reg; - - /* - * Read "Status Register" repeatedly until the LSB is cleared. The - * EEPROM will signal that the command has been completed by clearing - * bit 0 of the internal status register. If it's not cleared within - * 5 milliseconds, then error out. - */ - for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { - ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, - IXGBE_EEPROM_OPCODE_BITS); - spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); - if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) - break; - - udelay(5); - ixgbe_standby_eeprom(hw); - } - - /* - * On some parts, SPI write time could vary from 0-20mSec on 3.3V - * devices (and only 0-5mSec on 5V devices) - */ - if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { - hw_dbg(hw, "SPI EEPROM Status error\n"); - status = IXGBE_ERR_EEPROM; - } - - return status; -} - -/** - * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state - * @hw: pointer to hardware structure - **/ -static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) -{ - u32 eec; - - eec = IXGBE_READ_REG(hw, IXGBE_EEC); - - /* Toggle CS to flush commands */ - eec |= IXGBE_EEC_CS; - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); - IXGBE_WRITE_FLUSH(hw); - udelay(1); - eec &= ~IXGBE_EEC_CS; - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); - IXGBE_WRITE_FLUSH(hw); - udelay(1); -} - -/** - * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM. - * @hw: pointer to hardware structure - * @data: data to send to the EEPROM - * @count: number of bits to shift out - **/ -static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, - u16 count) -{ - u32 eec; - u32 mask; - u32 i; - - eec = IXGBE_READ_REG(hw, IXGBE_EEC); - - /* - * Mask is used to shift "count" bits of "data" out to the EEPROM - * one bit at a time. Determine the starting bit based on count - */ - mask = 0x01 << (count - 1); - - for (i = 0; i < count; i++) { - /* - * A "1" is shifted out to the EEPROM by setting bit "DI" to a - * "1", and then raising and then lowering the clock (the SK - * bit controls the clock input to the EEPROM). A "0" is - * shifted out to the EEPROM by setting "DI" to "0" and then - * raising and then lowering the clock. - */ - if (data & mask) - eec |= IXGBE_EEC_DI; - else - eec &= ~IXGBE_EEC_DI; - - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); - IXGBE_WRITE_FLUSH(hw); - - udelay(1); - - ixgbe_raise_eeprom_clk(hw, &eec); - ixgbe_lower_eeprom_clk(hw, &eec); - - /* - * Shift mask to signify next bit of data to shift in to the - * EEPROM - */ - mask = mask >> 1; - } - - /* We leave the "DI" bit set to "0" when we leave this routine. */ - eec &= ~IXGBE_EEC_DI; - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); - IXGBE_WRITE_FLUSH(hw); -} - -/** - * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM - * @hw: pointer to hardware structure - **/ -static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) -{ - u32 eec; - u32 i; - u16 data = 0; - - /* - * In order to read a register from the EEPROM, we need to shift - * 'count' bits in from the EEPROM. Bits are "shifted in" by raising - * the clock input to the EEPROM (setting the SK bit), and then reading - * the value of the "DO" bit. During this "shifting in" process the - * "DI" bit should always be clear. - */ - eec = IXGBE_READ_REG(hw, IXGBE_EEC); - - eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); - - for (i = 0; i < count; i++) { - data = data << 1; - ixgbe_raise_eeprom_clk(hw, &eec); - - eec = IXGBE_READ_REG(hw, IXGBE_EEC); - - eec &= ~(IXGBE_EEC_DI); - if (eec & IXGBE_EEC_DO) - data |= 1; - - ixgbe_lower_eeprom_clk(hw, &eec); - } - - return data; -} - -/** - * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input. - * @hw: pointer to hardware structure - * @eec: EEC register's current value - **/ -static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) -{ - /* - * Raise the clock input to the EEPROM - * (setting the SK bit), then delay - */ - *eec = *eec | IXGBE_EEC_SK; - IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); - IXGBE_WRITE_FLUSH(hw); - udelay(1); -} - -/** - * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. - * @hw: pointer to hardware structure - * @eecd: EECD's current value - **/ -static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) -{ - /* - * Lower the clock input to the EEPROM (clearing the SK bit), then - * delay - */ - *eec = *eec & ~IXGBE_EEC_SK; - IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); - IXGBE_WRITE_FLUSH(hw); - udelay(1); -} - -/** - * ixgbe_release_eeprom - Release EEPROM, release semaphores - * @hw: pointer to hardware structure - **/ -static void ixgbe_release_eeprom(struct ixgbe_hw *hw) -{ - u32 eec; - - eec = IXGBE_READ_REG(hw, IXGBE_EEC); - - eec |= IXGBE_EEC_CS; /* Pull CS high */ - eec &= ~IXGBE_EEC_SK; /* Lower SCK */ - - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); - IXGBE_WRITE_FLUSH(hw); - - udelay(1); - - /* Stop requesting EEPROM access */ - eec &= ~IXGBE_EEC_REQ; - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); - - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - - /* - * Delay before attempt to obtain semaphore again to allow FW - * access. semaphore_delay is in ms we need us for usleep_range - */ - usleep_range(hw->eeprom.semaphore_delay * 1000, - hw->eeprom.semaphore_delay * 2000); -} - -/** - * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum - * @hw: pointer to hardware structure - **/ -u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) -{ - u16 i; - u16 j; - u16 checksum = 0; - u16 length = 0; - u16 pointer = 0; - u16 word = 0; - - /* Include 0x0-0x3F in the checksum */ - for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { - if (hw->eeprom.ops.read(hw, i, &word) != 0) { - hw_dbg(hw, "EEPROM read failed\n"); - break; - } - checksum += word; - } - - /* Include all data from pointers except for the fw pointer */ - for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { - hw->eeprom.ops.read(hw, i, &pointer); - - /* Make sure the pointer seems valid */ - if (pointer != 0xFFFF && pointer != 0) { - hw->eeprom.ops.read(hw, pointer, &length); - - if (length != 0xFFFF && length != 0) { - for (j = pointer+1; j <= pointer+length; j++) { - hw->eeprom.ops.read(hw, j, &word); - checksum += word; - } - } - } - } - - checksum = (u16)IXGBE_EEPROM_SUM - checksum; - - return checksum; -} - -/** - * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum - * @hw: pointer to hardware structure - * @checksum_val: calculated checksum - * - * Performs checksum calculation and validates the EEPROM checksum. If the - * caller does not need checksum_val, the value can be NULL. - **/ -s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, - u16 *checksum_val) -{ - s32 status; - u16 checksum; - u16 read_checksum = 0; - - /* - * Read the first word from the EEPROM. If this times out or fails, do - * not continue or we could be in for a very long wait while every - * EEPROM read fails - */ - status = hw->eeprom.ops.read(hw, 0, &checksum); - - if (status == 0) { - checksum = hw->eeprom.ops.calc_checksum(hw); - - hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); - - /* - * Verify read checksum from EEPROM is the same as - * calculated checksum - */ - if (read_checksum != checksum) - status = IXGBE_ERR_EEPROM_CHECKSUM; - - /* If the user cares, return the calculated checksum */ - if (checksum_val) - *checksum_val = checksum; - } else { - hw_dbg(hw, "EEPROM read failed\n"); - } - - return status; -} - -/** - * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum - * @hw: pointer to hardware structure - **/ -s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) -{ - s32 status; - u16 checksum; - - /* - * Read the first word from the EEPROM. If this times out or fails, do - * not continue or we could be in for a very long wait while every - * EEPROM read fails - */ - status = hw->eeprom.ops.read(hw, 0, &checksum); - - if (status == 0) { - checksum = hw->eeprom.ops.calc_checksum(hw); - status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, - checksum); - } else { - hw_dbg(hw, "EEPROM read failed\n"); - } - - return status; -} - -/** - * ixgbe_validate_mac_addr - Validate MAC address - * @mac_addr: pointer to MAC address. - * - * Tests a MAC address to ensure it is a valid Individual Address - **/ -s32 ixgbe_validate_mac_addr(u8 *mac_addr) -{ - s32 status = 0; - - /* Make sure it is not a multicast address */ - if (IXGBE_IS_MULTICAST(mac_addr)) - status = IXGBE_ERR_INVALID_MAC_ADDR; - /* Not a broadcast address */ - else if (IXGBE_IS_BROADCAST(mac_addr)) - status = IXGBE_ERR_INVALID_MAC_ADDR; - /* Reject the zero address */ - else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && - mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) - status = IXGBE_ERR_INVALID_MAC_ADDR; - - return status; -} - -/** - * ixgbe_set_rar_generic - Set Rx address register - * @hw: pointer to hardware structure - * @index: Receive address register to write - * @addr: Address to put into receive address register - * @vmdq: VMDq "set" or "pool" index - * @enable_addr: set flag that address is active - * - * Puts an ethernet address into a receive address register. - **/ -s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, - u32 enable_addr) -{ - u32 rar_low, rar_high; - u32 rar_entries = hw->mac.num_rar_entries; - - /* Make sure we are using a valid rar index range */ - if (index >= rar_entries) { - hw_dbg(hw, "RAR index %d is out of range.\n", index); - return IXGBE_ERR_INVALID_ARGUMENT; - } - - /* setup VMDq pool selection before this RAR gets enabled */ - hw->mac.ops.set_vmdq(hw, index, vmdq); - - /* - * HW expects these in little endian so we reverse the byte - * order from network order (big endian) to little endian - */ - rar_low = ((u32)addr[0] | - ((u32)addr[1] << 8) | - ((u32)addr[2] << 16) | - ((u32)addr[3] << 24)); - /* - * Some parts put the VMDq setting in the extra RAH bits, - * so save everything except the lower 16 bits that hold part - * of the address and the address valid bit. - */ - rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); - rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); - rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); - - if (enable_addr != 0) - rar_high |= IXGBE_RAH_AV; - - IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); - IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); - - return 0; -} - -/** - * ixgbe_clear_rar_generic - Remove Rx address register - * @hw: pointer to hardware structure - * @index: Receive address register to write - * - * Clears an ethernet address from a receive address register. - **/ -s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) -{ - u32 rar_high; - u32 rar_entries = hw->mac.num_rar_entries; - - /* Make sure we are using a valid rar index range */ - if (index >= rar_entries) { - hw_dbg(hw, "RAR index %d is out of range.\n", index); - return IXGBE_ERR_INVALID_ARGUMENT; - } - - /* - * Some parts put the VMDq setting in the extra RAH bits, - * so save everything except the lower 16 bits that hold part - * of the address and the address valid bit. - */ - rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); - rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); - - IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); - IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); - - /* clear VMDq pool/queue selection for this RAR */ - hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); - - return 0; -} - -/** - * ixgbe_init_rx_addrs_generic - Initializes receive address filters. - * @hw: pointer to hardware structure - * - * Places the MAC address in receive address register 0 and clears the rest - * of the receive address registers. Clears the multicast table. Assumes - * the receiver is in reset when the routine is called. - **/ -s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) -{ - u32 i; - u32 rar_entries = hw->mac.num_rar_entries; - - /* - * If the current mac address is valid, assume it is a software override - * to the permanent address. - * Otherwise, use the permanent address from the eeprom. - */ - if (ixgbe_validate_mac_addr(hw->mac.addr) == - IXGBE_ERR_INVALID_MAC_ADDR) { - /* Get the MAC address from the RAR0 for later reference */ - hw->mac.ops.get_mac_addr(hw, hw->mac.addr); - - hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr); - } else { - /* Setup the receive address. */ - hw_dbg(hw, "Overriding MAC Address in RAR[0]\n"); - hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); - - hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); - - /* clear VMDq pool/queue selection for RAR 0 */ - hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); - } - hw->addr_ctrl.overflow_promisc = 0; - - hw->addr_ctrl.rar_used_count = 1; - - /* Zero out the other receive addresses. */ - hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1); - for (i = 1; i < rar_entries; i++) { - IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); - IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); - } - - /* Clear the MTA */ - hw->addr_ctrl.mta_in_use = 0; - IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); - - hw_dbg(hw, " Clearing MTA\n"); - for (i = 0; i < hw->mac.mcft_size; i++) - IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); - - if (hw->mac.ops.init_uta_tables) - hw->mac.ops.init_uta_tables(hw); - - return 0; -} - -/** - * ixgbe_mta_vector - Determines bit-vector in multicast table to set - * @hw: pointer to hardware structure - * @mc_addr: the multicast address - * - * Extracts the 12 bits, from a multicast address, to determine which - * bit-vector to set in the multicast table. The hardware uses 12 bits, from - * incoming rx multicast addresses, to determine the bit-vector to check in - * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set - * by the MO field of the MCSTCTRL. The MO field is set during initialization - * to mc_filter_type. - **/ -static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) -{ - u32 vector = 0; - - switch (hw->mac.mc_filter_type) { - case 0: /* use bits [47:36] of the address */ - vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); - break; - case 1: /* use bits [46:35] of the address */ - vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); - break; - case 2: /* use bits [45:34] of the address */ - vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); - break; - case 3: /* use bits [43:32] of the address */ - vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); - break; - default: /* Invalid mc_filter_type */ - hw_dbg(hw, "MC filter type param set incorrectly\n"); - break; - } - - /* vector can only be 12-bits or boundary will be exceeded */ - vector &= 0xFFF; - return vector; -} - -/** - * ixgbe_set_mta - Set bit-vector in multicast table - * @hw: pointer to hardware structure - * @hash_value: Multicast address hash value - * - * Sets the bit-vector in the multicast table. - **/ -static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) -{ - u32 vector; - u32 vector_bit; - u32 vector_reg; - - hw->addr_ctrl.mta_in_use++; - - vector = ixgbe_mta_vector(hw, mc_addr); - hw_dbg(hw, " bit-vector = 0x%03X\n", vector); - - /* - * The MTA is a register array of 128 32-bit registers. It is treated - * like an array of 4096 bits. We want to set bit - * BitArray[vector_value]. So we figure out what register the bit is - * in, read it, OR in the new bit, then write back the new value. The - * register is determined by the upper 7 bits of the vector value and - * the bit within that register are determined by the lower 5 bits of - * the value. - */ - vector_reg = (vector >> 5) & 0x7F; - vector_bit = vector & 0x1F; - hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); -} - -/** - * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses - * @hw: pointer to hardware structure - * @netdev: pointer to net device structure - * - * The given list replaces any existing list. Clears the MC addrs from receive - * address registers and the multicast table. Uses unused receive address - * registers for the first multicast addresses, and hashes the rest into the - * multicast table. - **/ -s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, - struct net_device *netdev) -{ - struct netdev_hw_addr *ha; - u32 i; - - /* - * Set the new number of MC addresses that we are being requested to - * use. - */ - hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev); - hw->addr_ctrl.mta_in_use = 0; - - /* Clear mta_shadow */ - hw_dbg(hw, " Clearing MTA\n"); - memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); - - /* Update mta shadow */ - netdev_for_each_mc_addr(ha, netdev) { - hw_dbg(hw, " Adding the multicast addresses:\n"); - ixgbe_set_mta(hw, ha->addr); - } - - /* Enable mta */ - for (i = 0; i < hw->mac.mcft_size; i++) - IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i, - hw->mac.mta_shadow[i]); - - if (hw->addr_ctrl.mta_in_use > 0) - IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, - IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); - - hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n"); - return 0; -} - -/** - * ixgbe_enable_mc_generic - Enable multicast address in RAR - * @hw: pointer to hardware structure - * - * Enables multicast address in RAR and the use of the multicast hash table. - **/ -s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) -{ - struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; - - if (a->mta_in_use > 0) - IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | - hw->mac.mc_filter_type); - - return 0; -} - -/** - * ixgbe_disable_mc_generic - Disable multicast address in RAR - * @hw: pointer to hardware structure - * - * Disables multicast address in RAR and the use of the multicast hash table. - **/ -s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) -{ - struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; - - if (a->mta_in_use > 0) - IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); - - return 0; -} - -/** - * ixgbe_fc_enable_generic - Enable flow control - * @hw: pointer to hardware structure - * @packetbuf_num: packet buffer number (0-7) - * - * Enable flow control according to the current settings. - **/ -s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num) -{ - s32 ret_val = 0; - u32 mflcn_reg, fccfg_reg; - u32 reg; - u32 rx_pba_size; - u32 fcrtl, fcrth; - -#ifdef CONFIG_DCB - if (hw->fc.requested_mode == ixgbe_fc_pfc) - goto out; - -#endif /* CONFIG_DCB */ - /* Negotiate the fc mode to use */ - ret_val = ixgbe_fc_autoneg(hw); - if (ret_val == IXGBE_ERR_FLOW_CONTROL) - goto out; - - /* Disable any previous flow control settings */ - mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); - mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE); - - fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); - fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); - - /* - * The possible values of fc.current_mode are: - * 0: Flow control is completely disabled - * 1: Rx flow control is enabled (we can receive pause frames, - * but not send pause frames). - * 2: Tx flow control is enabled (we can send pause frames but - * we do not support receiving pause frames). - * 3: Both Rx and Tx flow control (symmetric) are enabled. -#ifdef CONFIG_DCB - * 4: Priority Flow Control is enabled. -#endif - * other: Invalid. - */ - switch (hw->fc.current_mode) { - case ixgbe_fc_none: - /* - * Flow control is disabled by software override or autoneg. - * The code below will actually disable it in the HW. - */ - break; - case ixgbe_fc_rx_pause: - /* - * Rx Flow control is enabled and Tx Flow control is - * disabled by software override. Since there really - * isn't a way to advertise that we are capable of RX - * Pause ONLY, we will advertise that we support both - * symmetric and asymmetric Rx PAUSE. Later, we will - * disable the adapter's ability to send PAUSE frames. - */ - mflcn_reg |= IXGBE_MFLCN_RFCE; - break; - case ixgbe_fc_tx_pause: - /* - * Tx Flow control is enabled, and Rx Flow control is - * disabled by software override. - */ - fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; - break; - case ixgbe_fc_full: - /* Flow control (both Rx and Tx) is enabled by SW override. */ - mflcn_reg |= IXGBE_MFLCN_RFCE; - fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; - break; -#ifdef CONFIG_DCB - case ixgbe_fc_pfc: - goto out; - break; -#endif /* CONFIG_DCB */ - default: - hw_dbg(hw, "Flow control param set incorrectly\n"); - ret_val = IXGBE_ERR_CONFIG; - goto out; - break; - } - - /* Set 802.3x based flow control settings. */ - mflcn_reg |= IXGBE_MFLCN_DPF; - IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); - IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); - - rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num)); - rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; - - fcrth = (rx_pba_size - hw->fc.high_water) << 10; - fcrtl = (rx_pba_size - hw->fc.low_water) << 10; - - if (hw->fc.current_mode & ixgbe_fc_tx_pause) { - fcrth |= IXGBE_FCRTH_FCEN; - if (hw->fc.send_xon) - fcrtl |= IXGBE_FCRTL_XONE; - } - - IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth); - IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl); - - /* Configure pause time (2 TCs per register) */ - reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2)); - if ((packetbuf_num & 1) == 0) - reg = (reg & 0xFFFF0000) | hw->fc.pause_time; - else - reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16); - IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg); - - IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); - -out: - return ret_val; -} - -/** - * ixgbe_fc_autoneg - Configure flow control - * @hw: pointer to hardware structure - * - * Compares our advertised flow control capabilities to those advertised by - * our link partner, and determines the proper flow control mode to use. - **/ -s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw) -{ - s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; - ixgbe_link_speed speed; - bool link_up; - - if (hw->fc.disable_fc_autoneg) - goto out; - - /* - * AN should have completed when the cable was plugged in. - * Look for reasons to bail out. Bail out if: - * - FC autoneg is disabled, or if - * - link is not up. - * - * Since we're being called from an LSC, link is already known to be up. - * So use link_up_wait_to_complete=false. - */ - hw->mac.ops.check_link(hw, &speed, &link_up, false); - if (!link_up) { - ret_val = IXGBE_ERR_FLOW_CONTROL; - goto out; - } - - switch (hw->phy.media_type) { - /* Autoneg flow control on fiber adapters */ - case ixgbe_media_type_fiber: - if (speed == IXGBE_LINK_SPEED_1GB_FULL) - ret_val = ixgbe_fc_autoneg_fiber(hw); - break; - - /* Autoneg flow control on backplane adapters */ - case ixgbe_media_type_backplane: - ret_val = ixgbe_fc_autoneg_backplane(hw); - break; - - /* Autoneg flow control on copper adapters */ - case ixgbe_media_type_copper: - if (ixgbe_device_supports_autoneg_fc(hw) == 0) - ret_val = ixgbe_fc_autoneg_copper(hw); - break; - - default: - break; - } - -out: - if (ret_val == 0) { - hw->fc.fc_was_autonegged = true; - } else { - hw->fc.fc_was_autonegged = false; - hw->fc.current_mode = hw->fc.requested_mode; - } - return ret_val; -} - -/** - * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber - * @hw: pointer to hardware structure - * - * Enable flow control according on 1 gig fiber. - **/ -static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) -{ - u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; - s32 ret_val; - - /* - * On multispeed fiber at 1g, bail out if - * - link is up but AN did not complete, or if - * - link is up and AN completed but timed out - */ - - linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); - if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || - ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { - ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; - goto out; - } - - pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); - pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); - - ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg, - pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, - IXGBE_PCS1GANA_ASM_PAUSE, - IXGBE_PCS1GANA_SYM_PAUSE, - IXGBE_PCS1GANA_ASM_PAUSE); - -out: - return ret_val; -} - -/** - * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 - * @hw: pointer to hardware structure - * - * Enable flow control according to IEEE clause 37. - **/ -static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) -{ - u32 links2, anlp1_reg, autoc_reg, links; - s32 ret_val; - - /* - * On backplane, bail out if - * - backplane autoneg was not completed, or if - * - we are 82599 and link partner is not AN enabled - */ - links = IXGBE_READ_REG(hw, IXGBE_LINKS); - if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) { - hw->fc.fc_was_autonegged = false; - hw->fc.current_mode = hw->fc.requested_mode; - ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; - goto out; - } - - if (hw->mac.type == ixgbe_mac_82599EB) { - links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); - if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) { - hw->fc.fc_was_autonegged = false; - hw->fc.current_mode = hw->fc.requested_mode; - ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; - goto out; - } - } - /* - * Read the 10g AN autoc and LP ability registers and resolve - * local flow control settings accordingly - */ - autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); - anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); - - ret_val = ixgbe_negotiate_fc(hw, autoc_reg, - anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE, - IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE); - -out: - return ret_val; -} - -/** - * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 - * @hw: pointer to hardware structure - * - * Enable flow control according to IEEE clause 37. - **/ -static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) -{ - u16 technology_ability_reg = 0; - u16 lp_technology_ability_reg = 0; - - hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, - MDIO_MMD_AN, - &technology_ability_reg); - hw->phy.ops.read_reg(hw, MDIO_AN_LPA, - MDIO_MMD_AN, - &lp_technology_ability_reg); - - return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg, - (u32)lp_technology_ability_reg, - IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE, - IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE); -} - -/** - * ixgbe_negotiate_fc - Negotiate flow control - * @hw: pointer to hardware structure - * @adv_reg: flow control advertised settings - * @lp_reg: link partner's flow control settings - * @adv_sym: symmetric pause bit in advertisement - * @adv_asm: asymmetric pause bit in advertisement - * @lp_sym: symmetric pause bit in link partner advertisement - * @lp_asm: asymmetric pause bit in link partner advertisement - * - * Find the intersection between advertised settings and link partner's - * advertised settings - **/ -static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, - u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) -{ - if ((!(adv_reg)) || (!(lp_reg))) - return IXGBE_ERR_FC_NOT_NEGOTIATED; - - if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { - /* - * Now we need to check if the user selected Rx ONLY - * of pause frames. In this case, we had to advertise - * FULL flow control because we could not advertise RX - * ONLY. Hence, we must now check to see if we need to - * turn OFF the TRANSMISSION of PAUSE frames. - */ - if (hw->fc.requested_mode == ixgbe_fc_full) { - hw->fc.current_mode = ixgbe_fc_full; - hw_dbg(hw, "Flow Control = FULL.\n"); - } else { - hw->fc.current_mode = ixgbe_fc_rx_pause; - hw_dbg(hw, "Flow Control=RX PAUSE frames only\n"); - } - } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && - (lp_reg & lp_sym) && (lp_reg & lp_asm)) { - hw->fc.current_mode = ixgbe_fc_tx_pause; - hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n"); - } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && - !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { - hw->fc.current_mode = ixgbe_fc_rx_pause; - hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n"); - } else { - hw->fc.current_mode = ixgbe_fc_none; - hw_dbg(hw, "Flow Control = NONE.\n"); - } - return 0; -} - -/** - * ixgbe_setup_fc - Set up flow control - * @hw: pointer to hardware structure - * - * Called at init time to set up flow control. - **/ -static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) -{ - s32 ret_val = 0; - u32 reg = 0, reg_bp = 0; - u16 reg_cu = 0; - -#ifdef CONFIG_DCB - if (hw->fc.requested_mode == ixgbe_fc_pfc) { - hw->fc.current_mode = hw->fc.requested_mode; - goto out; - } - -#endif /* CONFIG_DCB */ - /* Validate the packetbuf configuration */ - if (packetbuf_num < 0 || packetbuf_num > 7) { - hw_dbg(hw, "Invalid packet buffer number [%d], expected range " - "is 0-7\n", packetbuf_num); - ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; - goto out; - } - - /* - * Validate the water mark configuration. Zero water marks are invalid - * because it causes the controller to just blast out fc packets. - */ - if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) { - hw_dbg(hw, "Invalid water mark configuration\n"); - ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; - goto out; - } - - /* - * Validate the requested mode. Strict IEEE mode does not allow - * ixgbe_fc_rx_pause because it will cause us to fail at UNH. - */ - if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { - hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict " - "IEEE mode\n"); - ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; - goto out; - } - - /* - * 10gig parts do not have a word in the EEPROM to determine the - * default flow control setting, so we explicitly set it to full. - */ - if (hw->fc.requested_mode == ixgbe_fc_default) - hw->fc.requested_mode = ixgbe_fc_full; - - /* - * Set up the 1G and 10G flow control advertisement registers so the - * HW will be able to do fc autoneg once the cable is plugged in. If - * we link at 10G, the 1G advertisement is harmless and vice versa. - */ - - switch (hw->phy.media_type) { - case ixgbe_media_type_fiber: - case ixgbe_media_type_backplane: - reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); - reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC); - break; - - case ixgbe_media_type_copper: - hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, - MDIO_MMD_AN, ®_cu); - break; - - default: - ; - } - - /* - * The possible values of fc.requested_mode are: - * 0: Flow control is completely disabled - * 1: Rx flow control is enabled (we can receive pause frames, - * but not send pause frames). - * 2: Tx flow control is enabled (we can send pause frames but - * we do not support receiving pause frames). - * 3: Both Rx and Tx flow control (symmetric) are enabled. -#ifdef CONFIG_DCB - * 4: Priority Flow Control is enabled. -#endif - * other: Invalid. - */ - switch (hw->fc.requested_mode) { - case ixgbe_fc_none: - /* Flow control completely disabled by software override. */ - reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); - if (hw->phy.media_type == ixgbe_media_type_backplane) - reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE | - IXGBE_AUTOC_ASM_PAUSE); - else if (hw->phy.media_type == ixgbe_media_type_copper) - reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); - break; - case ixgbe_fc_rx_pause: - /* - * Rx Flow control is enabled and Tx Flow control is - * disabled by software override. Since there really - * isn't a way to advertise that we are capable of RX - * Pause ONLY, we will advertise that we support both - * symmetric and asymmetric Rx PAUSE. Later, we will - * disable the adapter's ability to send PAUSE frames. - */ - reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); - if (hw->phy.media_type == ixgbe_media_type_backplane) - reg_bp |= (IXGBE_AUTOC_SYM_PAUSE | - IXGBE_AUTOC_ASM_PAUSE); - else if (hw->phy.media_type == ixgbe_media_type_copper) - reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); - break; - case ixgbe_fc_tx_pause: - /* - * Tx Flow control is enabled, and Rx Flow control is - * disabled by software override. - */ - reg |= (IXGBE_PCS1GANA_ASM_PAUSE); - reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE); - if (hw->phy.media_type == ixgbe_media_type_backplane) { - reg_bp |= (IXGBE_AUTOC_ASM_PAUSE); - reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE); - } else if (hw->phy.media_type == ixgbe_media_type_copper) { - reg_cu |= (IXGBE_TAF_ASM_PAUSE); - reg_cu &= ~(IXGBE_TAF_SYM_PAUSE); - } - break; - case ixgbe_fc_full: - /* Flow control (both Rx and Tx) is enabled by SW override. */ - reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); - if (hw->phy.media_type == ixgbe_media_type_backplane) - reg_bp |= (IXGBE_AUTOC_SYM_PAUSE | - IXGBE_AUTOC_ASM_PAUSE); - else if (hw->phy.media_type == ixgbe_media_type_copper) - reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); - break; -#ifdef CONFIG_DCB - case ixgbe_fc_pfc: - goto out; - break; -#endif /* CONFIG_DCB */ - default: - hw_dbg(hw, "Flow control param set incorrectly\n"); - ret_val = IXGBE_ERR_CONFIG; - goto out; - break; - } - - if (hw->mac.type != ixgbe_mac_X540) { - /* - * Enable auto-negotiation between the MAC & PHY; - * the MAC will advertise clause 37 flow control. - */ - IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); - reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); - - /* Disable AN timeout */ - if (hw->fc.strict_ieee) - reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; - - IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); - hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); - } - - /* - * AUTOC restart handles negotiation of 1G and 10G on backplane - * and copper. There is no need to set the PCS1GCTL register. - * - */ - if (hw->phy.media_type == ixgbe_media_type_backplane) { - reg_bp |= IXGBE_AUTOC_AN_RESTART; - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp); - } else if ((hw->phy.media_type == ixgbe_media_type_copper) && - (ixgbe_device_supports_autoneg_fc(hw) == 0)) { - hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, - MDIO_MMD_AN, reg_cu); - } - - hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg); -out: - return ret_val; -} - -/** - * ixgbe_disable_pcie_master - Disable PCI-express master access - * @hw: pointer to hardware structure - * - * Disables PCI-Express master access and verifies there are no pending - * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable - * bit hasn't caused the master requests to be disabled, else 0 - * is returned signifying master requests disabled. - **/ -s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) -{ - struct ixgbe_adapter *adapter = hw->back; - u32 i; - u32 reg_val; - u32 number_of_queues; - s32 status = 0; - u16 dev_status = 0; - - /* Just jump out if bus mastering is already disabled */ - if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) - goto out; - - /* Disable the receive unit by stopping each queue */ - number_of_queues = hw->mac.max_rx_queues; - for (i = 0; i < number_of_queues; i++) { - reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); - if (reg_val & IXGBE_RXDCTL_ENABLE) { - reg_val &= ~IXGBE_RXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val); - } - } - - reg_val = IXGBE_READ_REG(hw, IXGBE_CTRL); - reg_val |= IXGBE_CTRL_GIO_DIS; - IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val); - - for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { - if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) - goto check_device_status; - udelay(100); - } - - hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n"); - status = IXGBE_ERR_MASTER_REQUESTS_PENDING; - - /* - * Before proceeding, make sure that the PCIe block does not have - * transactions pending. - */ -check_device_status: - for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { - pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_STATUS, - &dev_status); - if (!(dev_status & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) - break; - udelay(100); - } - - if (i == IXGBE_PCI_MASTER_DISABLE_TIMEOUT) - hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n"); - else - goto out; - - /* - * Two consecutive resets are required via CTRL.RST per datasheet - * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine - * of this need. The first reset prevents new master requests from - * being issued by our device. We then must wait 1usec for any - * remaining completions from the PCIe bus to trickle in, and then reset - * again to clear out any effects they may have had on our device. - */ - hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; - -out: - return status; -} - - -/** - * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore - * @hw: pointer to hardware structure - * @mask: Mask to specify which semaphore to acquire - * - * Acquires the SWFW semaphore through the GSSR register for the specified - * function (CSR, PHY0, PHY1, EEPROM, Flash) - **/ -s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) -{ - u32 gssr; - u32 swmask = mask; - u32 fwmask = mask << 5; - s32 timeout = 200; - - while (timeout) { - /* - * SW EEPROM semaphore bit is used for access to all - * SW_FW_SYNC/GSSR bits (not just EEPROM) - */ - if (ixgbe_get_eeprom_semaphore(hw)) - return IXGBE_ERR_SWFW_SYNC; - - gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); - if (!(gssr & (fwmask | swmask))) - break; - - /* - * Firmware currently using resource (fwmask) or other software - * thread currently using resource (swmask) - */ - ixgbe_release_eeprom_semaphore(hw); - usleep_range(5000, 10000); - timeout--; - } - - if (!timeout) { - hw_dbg(hw, "Driver can't access resource, SW_FW_SYNC timeout.\n"); - return IXGBE_ERR_SWFW_SYNC; - } - - gssr |= swmask; - IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); - - ixgbe_release_eeprom_semaphore(hw); - return 0; -} - -/** - * ixgbe_release_swfw_sync - Release SWFW semaphore - * @hw: pointer to hardware structure - * @mask: Mask to specify which semaphore to release - * - * Releases the SWFW semaphore through the GSSR register for the specified - * function (CSR, PHY0, PHY1, EEPROM, Flash) - **/ -void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) -{ - u32 gssr; - u32 swmask = mask; - - ixgbe_get_eeprom_semaphore(hw); - - gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); - gssr &= ~swmask; - IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); - - ixgbe_release_eeprom_semaphore(hw); -} - -/** - * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit - * @hw: pointer to hardware structure - * @regval: register value to write to RXCTRL - * - * Enables the Rx DMA unit - **/ -s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) -{ - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); - - return 0; -} - -/** - * ixgbe_blink_led_start_generic - Blink LED based on index. - * @hw: pointer to hardware structure - * @index: led number to blink - **/ -s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) -{ - ixgbe_link_speed speed = 0; - bool link_up = 0; - u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); - u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - - /* - * Link must be up to auto-blink the LEDs; - * Force it if link is down. - */ - hw->mac.ops.check_link(hw, &speed, &link_up, false); - - if (!link_up) { - autoc_reg |= IXGBE_AUTOC_AN_RESTART; - autoc_reg |= IXGBE_AUTOC_FLU; - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); - IXGBE_WRITE_FLUSH(hw); - usleep_range(10000, 20000); - } - - led_reg &= ~IXGBE_LED_MODE_MASK(index); - led_reg |= IXGBE_LED_BLINK(index); - IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); - IXGBE_WRITE_FLUSH(hw); - - return 0; -} - -/** - * ixgbe_blink_led_stop_generic - Stop blinking LED based on index. - * @hw: pointer to hardware structure - * @index: led number to stop blinking - **/ -s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) -{ - u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); - u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - - autoc_reg &= ~IXGBE_AUTOC_FLU; - autoc_reg |= IXGBE_AUTOC_AN_RESTART; - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); - - led_reg &= ~IXGBE_LED_MODE_MASK(index); - led_reg &= ~IXGBE_LED_BLINK(index); - led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); - IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); - IXGBE_WRITE_FLUSH(hw); - - return 0; -} - -/** - * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM - * @hw: pointer to hardware structure - * @san_mac_offset: SAN MAC address offset - * - * This function will read the EEPROM location for the SAN MAC address - * pointer, and returns the value at that location. This is used in both - * get and set mac_addr routines. - **/ -static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, - u16 *san_mac_offset) -{ - /* - * First read the EEPROM pointer to see if the MAC addresses are - * available. - */ - hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset); - - return 0; -} - -/** - * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM - * @hw: pointer to hardware structure - * @san_mac_addr: SAN MAC address - * - * Reads the SAN MAC address from the EEPROM, if it's available. This is - * per-port, so set_lan_id() must be called before reading the addresses. - * set_lan_id() is called by identify_sfp(), but this cannot be relied - * upon for non-SFP connections, so we must call it here. - **/ -s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) -{ - u16 san_mac_data, san_mac_offset; - u8 i; - - /* - * First read the EEPROM pointer to see if the MAC addresses are - * available. If they're not, no point in calling set_lan_id() here. - */ - ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); - - if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) { - /* - * No addresses available in this EEPROM. It's not an - * error though, so just wipe the local address and return. - */ - for (i = 0; i < 6; i++) - san_mac_addr[i] = 0xFF; - - goto san_mac_addr_out; - } - - /* make sure we know which port we need to program */ - hw->mac.ops.set_lan_id(hw); - /* apply the port offset to the address offset */ - (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : - (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); - for (i = 0; i < 3; i++) { - hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data); - san_mac_addr[i * 2] = (u8)(san_mac_data); - san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); - san_mac_offset++; - } - -san_mac_addr_out: - return 0; -} - -/** - * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count - * @hw: pointer to hardware structure - * - * Read PCIe configuration space, and get the MSI-X vector count from - * the capabilities table. - **/ -u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) -{ - struct ixgbe_adapter *adapter = hw->back; - u16 msix_count; - pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82599_CAPS, - &msix_count); - msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; - - /* MSI-X count is zero-based in HW, so increment to give proper value */ - msix_count++; - - return msix_count; -} - -/** - * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address - * @hw: pointer to hardware struct - * @rar: receive address register index to disassociate - * @vmdq: VMDq pool index to remove from the rar - **/ -s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) -{ - u32 mpsar_lo, mpsar_hi; - u32 rar_entries = hw->mac.num_rar_entries; - - /* Make sure we are using a valid rar index range */ - if (rar >= rar_entries) { - hw_dbg(hw, "RAR index %d is out of range.\n", rar); - return IXGBE_ERR_INVALID_ARGUMENT; - } - - mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); - mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); - - if (!mpsar_lo && !mpsar_hi) - goto done; - - if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { - if (mpsar_lo) { - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); - mpsar_lo = 0; - } - if (mpsar_hi) { - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); - mpsar_hi = 0; - } - } else if (vmdq < 32) { - mpsar_lo &= ~(1 << vmdq); - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); - } else { - mpsar_hi &= ~(1 << (vmdq - 32)); - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); - } - - /* was that the last pool using this rar? */ - if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) - hw->mac.ops.clear_rar(hw, rar); -done: - return 0; -} - -/** - * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address - * @hw: pointer to hardware struct - * @rar: receive address register index to associate with a VMDq index - * @vmdq: VMDq pool index - **/ -s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) -{ - u32 mpsar; - u32 rar_entries = hw->mac.num_rar_entries; - - /* Make sure we are using a valid rar index range */ - if (rar >= rar_entries) { - hw_dbg(hw, "RAR index %d is out of range.\n", rar); - return IXGBE_ERR_INVALID_ARGUMENT; - } - - if (vmdq < 32) { - mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); - mpsar |= 1 << vmdq; - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); - } else { - mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); - mpsar |= 1 << (vmdq - 32); - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); - } - return 0; -} - -/** - * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array - * @hw: pointer to hardware structure - **/ -s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) -{ - int i; - - for (i = 0; i < 128; i++) - IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); - - return 0; -} - -/** - * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot - * @hw: pointer to hardware structure - * @vlan: VLAN id to write to VLAN filter - * - * return the VLVF index where this VLAN id should be placed - * - **/ -static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan) -{ - u32 bits = 0; - u32 first_empty_slot = 0; - s32 regindex; - - /* short cut the special case */ - if (vlan == 0) - return 0; - - /* - * Search for the vlan id in the VLVF entries. Save off the first empty - * slot found along the way - */ - for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) { - bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); - if (!bits && !(first_empty_slot)) - first_empty_slot = regindex; - else if ((bits & 0x0FFF) == vlan) - break; - } - - /* - * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan - * in the VLVF. Else use the first empty VLVF register for this - * vlan id. - */ - if (regindex >= IXGBE_VLVF_ENTRIES) { - if (first_empty_slot) - regindex = first_empty_slot; - else { - hw_dbg(hw, "No space in VLVF.\n"); - regindex = IXGBE_ERR_NO_SPACE; - } - } - - return regindex; -} - -/** - * ixgbe_set_vfta_generic - Set VLAN filter table - * @hw: pointer to hardware structure - * @vlan: VLAN id to write to VLAN filter - * @vind: VMDq output index that maps queue to VLAN id in VFVFB - * @vlan_on: boolean flag to turn on/off VLAN in VFVF - * - * Turn on/off specified VLAN in the VLAN filter table. - **/ -s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, - bool vlan_on) -{ - s32 regindex; - u32 bitindex; - u32 vfta; - u32 bits; - u32 vt; - u32 targetbit; - bool vfta_changed = false; - - if (vlan > 4095) - return IXGBE_ERR_PARAM; - - /* - * this is a 2 part operation - first the VFTA, then the - * VLVF and VLVFB if VT Mode is set - * We don't write the VFTA until we know the VLVF part succeeded. - */ - - /* Part 1 - * The VFTA is a bitstring made up of 128 32-bit registers - * that enable the particular VLAN id, much like the MTA: - * bits[11-5]: which register - * bits[4-0]: which bit in the register - */ - regindex = (vlan >> 5) & 0x7F; - bitindex = vlan & 0x1F; - targetbit = (1 << bitindex); - vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); - - if (vlan_on) { - if (!(vfta & targetbit)) { - vfta |= targetbit; - vfta_changed = true; - } - } else { - if ((vfta & targetbit)) { - vfta &= ~targetbit; - vfta_changed = true; - } - } - - /* Part 2 - * If VT Mode is set - * Either vlan_on - * make sure the vlan is in VLVF - * set the vind bit in the matching VLVFB - * Or !vlan_on - * clear the pool bit and possibly the vind - */ - vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL); - if (vt & IXGBE_VT_CTL_VT_ENABLE) { - s32 vlvf_index; - - vlvf_index = ixgbe_find_vlvf_slot(hw, vlan); - if (vlvf_index < 0) - return vlvf_index; - - if (vlan_on) { - /* set the pool bit */ - if (vind < 32) { - bits = IXGBE_READ_REG(hw, - IXGBE_VLVFB(vlvf_index*2)); - bits |= (1 << vind); - IXGBE_WRITE_REG(hw, - IXGBE_VLVFB(vlvf_index*2), - bits); - } else { - bits = IXGBE_READ_REG(hw, - IXGBE_VLVFB((vlvf_index*2)+1)); - bits |= (1 << (vind-32)); - IXGBE_WRITE_REG(hw, - IXGBE_VLVFB((vlvf_index*2)+1), - bits); - } - } else { - /* clear the pool bit */ - if (vind < 32) { - bits = IXGBE_READ_REG(hw, - IXGBE_VLVFB(vlvf_index*2)); - bits &= ~(1 << vind); - IXGBE_WRITE_REG(hw, - IXGBE_VLVFB(vlvf_index*2), - bits); - bits |= IXGBE_READ_REG(hw, - IXGBE_VLVFB((vlvf_index*2)+1)); - } else { - bits = IXGBE_READ_REG(hw, - IXGBE_VLVFB((vlvf_index*2)+1)); - bits &= ~(1 << (vind-32)); - IXGBE_WRITE_REG(hw, - IXGBE_VLVFB((vlvf_index*2)+1), - bits); - bits |= IXGBE_READ_REG(hw, - IXGBE_VLVFB(vlvf_index*2)); - } - } - - /* - * If there are still bits set in the VLVFB registers - * for the VLAN ID indicated we need to see if the - * caller is requesting that we clear the VFTA entry bit. - * If the caller has requested that we clear the VFTA - * entry bit but there are still pools/VFs using this VLAN - * ID entry then ignore the request. We're not worried - * about the case where we're turning the VFTA VLAN ID - * entry bit on, only when requested to turn it off as - * there may be multiple pools and/or VFs using the - * VLAN ID entry. In that case we cannot clear the - * VFTA bit until all pools/VFs using that VLAN ID have also - * been cleared. This will be indicated by "bits" being - * zero. - */ - if (bits) { - IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), - (IXGBE_VLVF_VIEN | vlan)); - if (!vlan_on) { - /* someone wants to clear the vfta entry - * but some pools/VFs are still using it. - * Ignore it. */ - vfta_changed = false; - } - } - else - IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); - } - - if (vfta_changed) - IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta); - - return 0; -} - -/** - * ixgbe_clear_vfta_generic - Clear VLAN filter table - * @hw: pointer to hardware structure - * - * Clears the VLAN filer table, and the VMDq index associated with the filter - **/ -s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) -{ - u32 offset; - - for (offset = 0; offset < hw->mac.vft_size; offset++) - IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); - - for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { - IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); - IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0); - IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0); - } - - return 0; -} - -/** - * ixgbe_check_mac_link_generic - Determine link and speed status - * @hw: pointer to hardware structure - * @speed: pointer to link speed - * @link_up: true when link is up - * @link_up_wait_to_complete: bool used to wait for link up or not - * - * Reads the links register to determine if link is up and the current speed - **/ -s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, - bool *link_up, bool link_up_wait_to_complete) -{ - u32 links_reg, links_orig; - u32 i; - - /* clear the old state */ - links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS); - - links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); - - if (links_orig != links_reg) { - hw_dbg(hw, "LINKS changed from %08X to %08X\n", - links_orig, links_reg); - } - - if (link_up_wait_to_complete) { - for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { - if (links_reg & IXGBE_LINKS_UP) { - *link_up = true; - break; - } else { - *link_up = false; - } - msleep(100); - links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); - } - } else { - if (links_reg & IXGBE_LINKS_UP) - *link_up = true; - else - *link_up = false; - } - - if ((links_reg & IXGBE_LINKS_SPEED_82599) == - IXGBE_LINKS_SPEED_10G_82599) - *speed = IXGBE_LINK_SPEED_10GB_FULL; - else if ((links_reg & IXGBE_LINKS_SPEED_82599) == - IXGBE_LINKS_SPEED_1G_82599) - *speed = IXGBE_LINK_SPEED_1GB_FULL; - else if ((links_reg & IXGBE_LINKS_SPEED_82599) == - IXGBE_LINKS_SPEED_100_82599) - *speed = IXGBE_LINK_SPEED_100_FULL; - else - *speed = IXGBE_LINK_SPEED_UNKNOWN; - - /* if link is down, zero out the current_mode */ - if (*link_up == false) { - hw->fc.current_mode = ixgbe_fc_none; - hw->fc.fc_was_autonegged = false; - } - - return 0; -} - -/** - * ixgbe_get_wwn_prefix_generic Get alternative WWNN/WWPN prefix from - * the EEPROM - * @hw: pointer to hardware structure - * @wwnn_prefix: the alternative WWNN prefix - * @wwpn_prefix: the alternative WWPN prefix - * - * This function will read the EEPROM from the alternative SAN MAC address - * block to check the support for the alternative WWNN/WWPN prefix support. - **/ -s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, - u16 *wwpn_prefix) -{ - u16 offset, caps; - u16 alt_san_mac_blk_offset; - - /* clear output first */ - *wwnn_prefix = 0xFFFF; - *wwpn_prefix = 0xFFFF; - - /* check if alternative SAN MAC is supported */ - hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR, - &alt_san_mac_blk_offset); - - if ((alt_san_mac_blk_offset == 0) || - (alt_san_mac_blk_offset == 0xFFFF)) - goto wwn_prefix_out; - - /* check capability in alternative san mac address block */ - offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; - hw->eeprom.ops.read(hw, offset, &caps); - if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) - goto wwn_prefix_out; - - /* get the corresponding prefix for WWNN/WWPN */ - offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; - hw->eeprom.ops.read(hw, offset, wwnn_prefix); - - offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; - hw->eeprom.ops.read(hw, offset, wwpn_prefix); - -wwn_prefix_out: - return 0; -} - -/** - * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow - * control - * @hw: pointer to hardware structure - * - * There are several phys that do not support autoneg flow control. This - * function check the device id to see if the associated phy supports - * autoneg flow control. - **/ -static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) -{ - - switch (hw->device_id) { - case IXGBE_DEV_ID_X540T: - return 0; - case IXGBE_DEV_ID_82599_T3_LOM: - return 0; - default: - return IXGBE_ERR_FC_NOT_SUPPORTED; - } -} - -/** - * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing - * @hw: pointer to hardware structure - * @enable: enable or disable switch for anti-spoofing - * @pf: Physical Function pool - do not enable anti-spoofing for the PF - * - **/ -void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf) -{ - int j; - int pf_target_reg = pf >> 3; - int pf_target_shift = pf % 8; - u32 pfvfspoof = 0; - - if (hw->mac.type == ixgbe_mac_82598EB) - return; - - if (enable) - pfvfspoof = IXGBE_SPOOF_MACAS_MASK; - - /* - * PFVFSPOOF register array is size 8 with 8 bits assigned to - * MAC anti-spoof enables in each register array element. - */ - for (j = 0; j < IXGBE_PFVFSPOOF_REG_COUNT; j++) - IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); - - /* If not enabling anti-spoofing then done */ - if (!enable) - return; - - /* - * The PF should be allowed to spoof so that it can support - * emulation mode NICs. Reset the bit assigned to the PF - */ - pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg)); - pfvfspoof ^= (1 << pf_target_shift); - IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg), pfvfspoof); -} - -/** - * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing - * @hw: pointer to hardware structure - * @enable: enable or disable switch for VLAN anti-spoofing - * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing - * - **/ -void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) -{ - int vf_target_reg = vf >> 3; - int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT; - u32 pfvfspoof; - - if (hw->mac.type == ixgbe_mac_82598EB) - return; - - pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); - if (enable) - pfvfspoof |= (1 << vf_target_shift); - else - pfvfspoof &= ~(1 << vf_target_shift); - IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); -} - -/** - * ixgbe_get_device_caps_generic - Get additional device capabilities - * @hw: pointer to hardware structure - * @device_caps: the EEPROM word with the extra device capabilities - * - * This function will read the EEPROM location for the device capabilities, - * and return the word through device_caps. - **/ -s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps) -{ - hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); - - return 0; -} - -/** - * ixgbe_set_rxpba_generic - Initialize RX packet buffer - * @hw: pointer to hardware structure - * @num_pb: number of packet buffers to allocate - * @headroom: reserve n KB of headroom - * @strategy: packet buffer allocation strategy - **/ -void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, - int num_pb, - u32 headroom, - int strategy) -{ - u32 pbsize = hw->mac.rx_pb_size; - int i = 0; - u32 rxpktsize, txpktsize, txpbthresh; - - /* Reserve headroom */ - pbsize -= headroom; - - if (!num_pb) - num_pb = 1; - - /* Divide remaining packet buffer space amongst the number - * of packet buffers requested using supplied strategy. - */ - switch (strategy) { - case (PBA_STRATEGY_WEIGHTED): - /* pba_80_48 strategy weight first half of packet buffer with - * 5/8 of the packet buffer space. - */ - rxpktsize = ((pbsize * 5 * 2) / (num_pb * 8)); - pbsize -= rxpktsize * (num_pb / 2); - rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; - for (; i < (num_pb / 2); i++) - IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); - /* Fall through to configure remaining packet buffers */ - case (PBA_STRATEGY_EQUAL): - /* Divide the remaining Rx packet buffer evenly among the TCs */ - rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; - for (; i < num_pb; i++) - IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); - break; - default: - break; - } - - /* - * Setup Tx packet buffer and threshold equally for all TCs - * TXPBTHRESH register is set in K so divide by 1024 and subtract - * 10 since the largest packet we support is just over 9K. - */ - txpktsize = IXGBE_TXPBSIZE_MAX / num_pb; - txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX; - for (i = 0; i < num_pb; i++) { - IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); - IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); - } - - /* Clear unused TCs, if any, to zero buffer size*/ - for (; i < IXGBE_MAX_PB; i++) { - IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); - IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); - IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); - } -} - -/** - * ixgbe_calculate_checksum - Calculate checksum for buffer - * @buffer: pointer to EEPROM - * @length: size of EEPROM to calculate a checksum for - * Calculates the checksum for some buffer on a specified length. The - * checksum calculated is returned. - **/ -static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) -{ - u32 i; - u8 sum = 0; - - if (!buffer) - return 0; - - for (i = 0; i < length; i++) - sum += buffer[i]; - - return (u8) (0 - sum); -} - -/** - * ixgbe_host_interface_command - Issue command to manageability block - * @hw: pointer to the HW structure - * @buffer: contains the command to write and where the return status will - * be placed - * @lenght: lenght of buffer, must be multiple of 4 bytes - * - * Communicates with the manageability block. On success return 0 - * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. - **/ -static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u8 *buffer, - u32 length) -{ - u32 hicr, i; - u32 hdr_size = sizeof(struct ixgbe_hic_hdr); - u8 buf_len, dword_len; - - s32 ret_val = 0; - - if (length == 0 || length & 0x3 || - length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { - hw_dbg(hw, "Buffer length failure.\n"); - ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; - goto out; - } - - /* Check that the host interface is enabled. */ - hicr = IXGBE_READ_REG(hw, IXGBE_HICR); - if ((hicr & IXGBE_HICR_EN) == 0) { - hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n"); - ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; - goto out; - } - - /* Calculate length in DWORDs */ - dword_len = length >> 2; - - /* - * The device driver writes the relevant command block - * into the ram area. - */ - for (i = 0; i < dword_len; i++) - IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, - i, *((u32 *)buffer + i)); - - /* Setting this bit tells the ARC that a new command is pending. */ - IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); - - for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) { - hicr = IXGBE_READ_REG(hw, IXGBE_HICR); - if (!(hicr & IXGBE_HICR_C)) - break; - usleep_range(1000, 2000); - } - - /* Check command successful completion. */ - if (i == IXGBE_HI_COMMAND_TIMEOUT || - (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) { - hw_dbg(hw, "Command has failed with no status valid.\n"); - ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; - goto out; - } - - /* Calculate length in DWORDs */ - dword_len = hdr_size >> 2; - - /* first pull in the header so we know the buffer length */ - for (i = 0; i < dword_len; i++) - *((u32 *)buffer + i) = - IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, i); - - /* If there is any thing in data position pull it in */ - buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len; - if (buf_len == 0) - goto out; - - if (length < (buf_len + hdr_size)) { - hw_dbg(hw, "Buffer not large enough for reply message.\n"); - ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; - goto out; - } - - /* Calculate length in DWORDs, add one for odd lengths */ - dword_len = (buf_len + 1) >> 2; - - /* Pull in the rest of the buffer (i is where we left off)*/ - for (; i < buf_len; i++) - *((u32 *)buffer + i) = - IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, i); - -out: - return ret_val; -} - -/** - * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware - * @hw: pointer to the HW structure - * @maj: driver version major number - * @min: driver version minor number - * @build: driver version build number - * @sub: driver version sub build number - * - * Sends driver version number to firmware through the manageability - * block. On success return 0 - * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring - * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. - **/ -s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, - u8 build, u8 sub) -{ - struct ixgbe_hic_drv_info fw_cmd; - int i; - s32 ret_val = 0; - - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM) != 0) { - ret_val = IXGBE_ERR_SWFW_SYNC; - goto out; - } - - fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; - fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; - fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; - fw_cmd.port_num = (u8)hw->bus.func; - fw_cmd.ver_maj = maj; - fw_cmd.ver_min = min; - fw_cmd.ver_build = build; - fw_cmd.ver_sub = sub; - fw_cmd.hdr.checksum = 0; - fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, - (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); - fw_cmd.pad = 0; - fw_cmd.pad2 = 0; - - for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { - ret_val = ixgbe_host_interface_command(hw, (u8 *)&fw_cmd, - sizeof(fw_cmd)); - if (ret_val != 0) - continue; - - if (fw_cmd.hdr.cmd_or_resp.ret_status == - FW_CEM_RESP_STATUS_SUCCESS) - ret_val = 0; - else - ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; - - break; - } - - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); -out: - return ret_val; -} diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h deleted file mode 100644 index f24fd64..0000000 --- a/drivers/net/ixgbe/ixgbe_common.h +++ /dev/null @@ -1,145 +0,0 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGBE_COMMON_H_ -#define _IXGBE_COMMON_H_ - -#include "ixgbe_type.h" -#include "ixgbe.h" - -u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw); -s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); -s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); -s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); -s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw); -s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); -s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, - u32 pba_num_size); -s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr); -s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw); -void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw); -s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw); - -s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index); -s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index); - -s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw); -s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data); -s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data); -s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); -s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data); -s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data); -s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data); -s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, - u16 *data); -s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data); -u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); -s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, - u16 *checksum_val); -s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); - -s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, - u32 enable_addr); -s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index); -s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); -s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, - struct net_device *netdev); -s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); -s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); -s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); -s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packtetbuf_num); -s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw); - -s32 ixgbe_validate_mac_addr(u8 *mac_addr); -s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); -void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask); -s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); -s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); -s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); -s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); -s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw); -s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, - u32 vind, bool vlan_on); -s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw); -s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *link_up, bool link_up_wait_to_complete); -s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, - u16 *wwpn_prefix); -s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); -s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); -void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf); -void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); -s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); -s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, - u8 build, u8 ver); - -void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, - u32 headroom, int strategy); - -#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) - -#ifndef writeq -#define writeq(val, addr) writel((u32) (val), addr); \ - writel((u32) (val >> 32), (addr + 4)); -#endif - -#define IXGBE_WRITE_REG64(a, reg, value) writeq((value), ((a)->hw_addr + (reg))) - -#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg)) - -#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) (\ - writel((value), ((a)->hw_addr + (reg) + ((offset) << 2)))) - -#define IXGBE_READ_REG_ARRAY(a, reg, offset) (\ - readl((a)->hw_addr + (reg) + ((offset) << 2))) - -#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) - -#define hw_dbg(hw, format, arg...) \ - netdev_dbg(((struct ixgbe_adapter *)(hw->back))->netdev, format, ##arg) -#define e_dev_info(format, arg...) \ - dev_info(&adapter->pdev->dev, format, ## arg) -#define e_dev_warn(format, arg...) \ - dev_warn(&adapter->pdev->dev, format, ## arg) -#define e_dev_err(format, arg...) \ - dev_err(&adapter->pdev->dev, format, ## arg) -#define e_dev_notice(format, arg...) \ - dev_notice(&adapter->pdev->dev, format, ## arg) -#define e_info(msglvl, format, arg...) \ - netif_info(adapter, msglvl, adapter->netdev, format, ## arg) -#define e_err(msglvl, format, arg...) \ - netif_err(adapter, msglvl, adapter->netdev, format, ## arg) -#define e_warn(msglvl, format, arg...) \ - netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) -#define e_crit(msglvl, format, arg...) \ - netif_crit(adapter, msglvl, adapter->netdev, format, ## arg) -#endif /* IXGBE_COMMON */ diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c deleted file mode 100644 index 9d88c31..0000000 --- a/drivers/net/ixgbe/ixgbe_dcb.c +++ /dev/null @@ -1,320 +0,0 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - - -#include "ixgbe.h" -#include "ixgbe_type.h" -#include "ixgbe_dcb.h" -#include "ixgbe_dcb_82598.h" -#include "ixgbe_dcb_82599.h" - -/** - * ixgbe_ieee_credits - This calculates the ieee traffic class - * credits from the configured bandwidth percentages. Credits - * are the smallest unit programmable into the underlying - * hardware. The IEEE 802.1Qaz specification do not use bandwidth - * groups so this is much simplified from the CEE case. - */ -s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame) -{ - int min_percent = 100; - int min_credit, multiplier; - int i; - - min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) / - DCB_CREDIT_QUANTUM; - - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - if (bw[i] < min_percent && bw[i]) - min_percent = bw[i]; - } - - multiplier = (min_credit / min_percent) + 1; - - /* Find out the hw credits for each TC */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - int val = min(bw[i] * multiplier, MAX_CREDIT_REFILL); - - if (val < min_credit) - val = min_credit; - refill[i] = val; - - max[i] = bw[i] ? (bw[i] * MAX_CREDIT)/100 : min_credit; - } - return 0; -} - -/** - * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits - * @ixgbe_dcb_config: Struct containing DCB settings. - * @direction: Configuring either Tx or Rx. - * - * This function calculates the credits allocated to each traffic class. - * It should be called only after the rules are checked by - * ixgbe_dcb_check_config(). - */ -s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, - struct ixgbe_dcb_config *dcb_config, - int max_frame, u8 direction) -{ - struct tc_bw_alloc *p; - int min_credit; - int min_multiplier; - int min_percent = 100; - s32 ret_val = 0; - /* Initialization values default for Tx settings */ - u32 credit_refill = 0; - u32 credit_max = 0; - u16 link_percentage = 0; - u8 bw_percent = 0; - u8 i; - - if (dcb_config == NULL) { - ret_val = DCB_ERR_CONFIG; - goto out; - } - - min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) / - DCB_CREDIT_QUANTUM; - - /* Find smallest link percentage */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - p = &dcb_config->tc_config[i].path[direction]; - bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; - link_percentage = p->bwg_percent; - - link_percentage = (link_percentage * bw_percent) / 100; - - if (link_percentage && link_percentage < min_percent) - min_percent = link_percentage; - } - - /* - * The ratio between traffic classes will control the bandwidth - * percentages seen on the wire. To calculate this ratio we use - * a multiplier. It is required that the refill credits must be - * larger than the max frame size so here we find the smallest - * multiplier that will allow all bandwidth percentages to be - * greater than the max frame size. - */ - min_multiplier = (min_credit / min_percent) + 1; - - /* Find out the link percentage for each TC first */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - p = &dcb_config->tc_config[i].path[direction]; - bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; - - link_percentage = p->bwg_percent; - /* Must be careful of integer division for very small nums */ - link_percentage = (link_percentage * bw_percent) / 100; - if (p->bwg_percent > 0 && link_percentage == 0) - link_percentage = 1; - - /* Save link_percentage for reference */ - p->link_percent = (u8)link_percentage; - - /* Calculate credit refill ratio using multiplier */ - credit_refill = min(link_percentage * min_multiplier, - MAX_CREDIT_REFILL); - p->data_credits_refill = (u16)credit_refill; - - /* Calculate maximum credit for the TC */ - credit_max = (link_percentage * MAX_CREDIT) / 100; - - /* - * Adjustment based on rule checking, if the percentage - * of a TC is too small, the maximum credit may not be - * enough to send out a jumbo frame in data plane arbitration. - */ - if (credit_max && (credit_max < min_credit)) - credit_max = min_credit; - - if (direction == DCB_TX_CONFIG) { - /* - * Adjustment based on rule checking, if the - * percentage of a TC is too small, the maximum - * credit may not be enough to send out a TSO - * packet in descriptor plane arbitration. - */ - if ((hw->mac.type == ixgbe_mac_82598EB) && - credit_max && - (credit_max < MINIMUM_CREDIT_FOR_TSO)) - credit_max = MINIMUM_CREDIT_FOR_TSO; - - dcb_config->tc_config[i].desc_credits_max = - (u16)credit_max; - } - - p->data_credits_max = (u16)credit_max; - } - -out: - return ret_val; -} - -void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en) -{ - int i; - - *pfc_en = 0; - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) - *pfc_en |= (cfg->tc_config[i].dcb_pfc & 0xF) << i; -} - -void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction, - u16 *refill) -{ - struct tc_bw_alloc *p; - int i; - - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - p = &cfg->tc_config[i].path[direction]; - refill[i] = p->data_credits_refill; - } -} - -void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max) -{ - int i; - - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) - max[i] = cfg->tc_config[i].desc_credits_max; -} - -void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction, - u8 *bwgid) -{ - struct tc_bw_alloc *p; - int i; - - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - p = &cfg->tc_config[i].path[direction]; - bwgid[i] = p->bwg_id; - } -} - -void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction, - u8 *ptype) -{ - struct tc_bw_alloc *p; - int i; - - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - p = &cfg->tc_config[i].path[direction]; - ptype[i] = p->prio_type; - } -} - -/** - * ixgbe_dcb_hw_config - Config and enable DCB - * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure - * - * Configure dcb settings and enable dcb mode. - */ -s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, - struct ixgbe_dcb_config *dcb_config) -{ - s32 ret = 0; - u8 pfc_en; - u8 ptype[MAX_TRAFFIC_CLASS]; - u8 bwgid[MAX_TRAFFIC_CLASS]; - u16 refill[MAX_TRAFFIC_CLASS]; - u16 max[MAX_TRAFFIC_CLASS]; - /* CEE does not define a priority to tc mapping so map 1:1 */ - u8 prio_tc[MAX_TRAFFIC_CLASS] = {0, 1, 2, 3, 4, 5, 6, 7}; - - /* Unpack CEE standard containers */ - ixgbe_dcb_unpack_pfc(dcb_config, &pfc_en); - ixgbe_dcb_unpack_refill(dcb_config, DCB_TX_CONFIG, refill); - ixgbe_dcb_unpack_max(dcb_config, max); - ixgbe_dcb_unpack_bwgid(dcb_config, DCB_TX_CONFIG, bwgid); - ixgbe_dcb_unpack_prio(dcb_config, DCB_TX_CONFIG, ptype); - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - ret = ixgbe_dcb_hw_config_82598(hw, pfc_en, refill, max, - bwgid, ptype); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - ret = ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max, - bwgid, ptype, prio_tc); - break; - default: - break; - } - return ret; -} - -/* Helper routines to abstract HW specifics from DCB netlink ops */ -s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en) -{ - int ret = -EINVAL; - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en); - break; - default: - break; - } - return ret; -} - -s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, - u16 *refill, u16 *max, u8 *bwg_id, - u8 *prio_type, u8 *prio_tc) -{ - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, - prio_type); - ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, - bwg_id, prio_type); - ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, - bwg_id, prio_type); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, - bwg_id, prio_type, prio_tc); - ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, - bwg_id, prio_type); - ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, - prio_type, prio_tc); - break; - default: - break; - } - return 0; -} diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h deleted file mode 100644 index e85826a..0000000 --- a/drivers/net/ixgbe/ixgbe_dcb.h +++ /dev/null @@ -1,167 +0,0 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _DCB_CONFIG_H_ -#define _DCB_CONFIG_H_ - -#include "ixgbe_type.h" - -/* DCB data structures */ - -#define IXGBE_MAX_PACKET_BUFFERS 8 -#define MAX_USER_PRIORITY 8 -#define MAX_TRAFFIC_CLASS 8 -#define MAX_BW_GROUP 8 -#define BW_PERCENT 100 - -#define DCB_TX_CONFIG 0 -#define DCB_RX_CONFIG 1 - -/* DCB error Codes */ -#define DCB_SUCCESS 0 -#define DCB_ERR_CONFIG -1 -#define DCB_ERR_PARAM -2 - -/* Transmit and receive Errors */ -/* Error in bandwidth group allocation */ -#define DCB_ERR_BW_GROUP -3 -/* Error in traffic class bandwidth allocation */ -#define DCB_ERR_TC_BW -4 -/* Traffic class has both link strict and group strict enabled */ -#define DCB_ERR_LS_GS -5 -/* Link strict traffic class has non zero bandwidth */ -#define DCB_ERR_LS_BW_NONZERO -6 -/* Link strict bandwidth group has non zero bandwidth */ -#define DCB_ERR_LS_BWG_NONZERO -7 -/* Traffic class has zero bandwidth */ -#define DCB_ERR_TC_BW_ZERO -8 - -#define DCB_NOT_IMPLEMENTED 0x7FFFFFFF - -struct dcb_pfc_tc_debug { - u8 tc; - u8 pause_status; - u64 pause_quanta; -}; - -enum strict_prio_type { - prio_none = 0, - prio_group, - prio_link -}; - -/* DCB capability definitions */ -#define IXGBE_DCB_PG_SUPPORT 0x00000001 -#define IXGBE_DCB_PFC_SUPPORT 0x00000002 -#define IXGBE_DCB_BCN_SUPPORT 0x00000004 -#define IXGBE_DCB_UP2TC_SUPPORT 0x00000008 -#define IXGBE_DCB_GSP_SUPPORT 0x00000010 - -#define IXGBE_DCB_8_TC_SUPPORT 0x80 - -struct dcb_support { - /* DCB capabilities */ - u32 capabilities; - - /* Each bit represents a number of TCs configurable in the hw. - * If 8 traffic classes can be configured, the value is 0x80. - */ - u8 traffic_classes; - u8 pfc_traffic_classes; -}; - -/* Traffic class bandwidth allocation per direction */ -struct tc_bw_alloc { - u8 bwg_id; /* Bandwidth Group (BWG) ID */ - u8 bwg_percent; /* % of BWG's bandwidth */ - u8 link_percent; /* % of link bandwidth */ - u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */ - u16 data_credits_refill; /* Credit refill amount in 64B granularity */ - u16 data_credits_max; /* Max credits for a configured packet buffer - * in 64B granularity.*/ - enum strict_prio_type prio_type; /* Link or Group Strict Priority */ -}; - -enum dcb_pfc_type { - pfc_disabled = 0, - pfc_enabled_full, - pfc_enabled_tx, - pfc_enabled_rx -}; - -/* Traffic class configuration */ -struct tc_configuration { - struct tc_bw_alloc path[2]; /* One each for Tx/Rx */ - enum dcb_pfc_type dcb_pfc; /* Class based flow control setting */ - - u16 desc_credits_max; /* For Tx Descriptor arbitration */ - u8 tc; /* Traffic class (TC) */ -}; - -struct dcb_num_tcs { - u8 pg_tcs; - u8 pfc_tcs; -}; - -struct ixgbe_dcb_config { - struct dcb_support support; - struct dcb_num_tcs num_tcs; - struct tc_configuration tc_config[MAX_TRAFFIC_CLASS]; - u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */ - bool pfc_mode_enable; - - u32 dcb_cfg_version; /* Not used...OS-specific? */ - u32 link_speed; /* For bandwidth allocation validation purpose */ -}; - -/* DCB driver APIs */ -void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en); -void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *, int, u16 *); -void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *); -void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *); -void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *); - -/* DCB credits calculation */ -s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame); -s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *, - struct ixgbe_dcb_config *, int, u8); - -/* DCB hw initialization */ -s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max, - u8 *bwg_id, u8 *prio_type, u8 *tc_prio); -s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en); -s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); - -/* DCB definitions for credit calculation */ -#define DCB_CREDIT_QUANTUM 64 /* DCB Quantum */ -#define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */ -#define DCB_MAX_TSO_SIZE (32*1024) /* MAX TSO packet size supported in DCB mode */ -#define MINIMUM_CREDIT_FOR_TSO (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO packet */ -#define MAX_CREDIT 4095 /* Maximum credit supported: 256KB * 1204 / 64B */ - -#endif /* _DCB_CONFIG_H */ diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c deleted file mode 100644 index 2288c3c..0000000 --- a/drivers/net/ixgbe/ixgbe_dcb_82598.c +++ /dev/null @@ -1,297 +0,0 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include "ixgbe.h" -#include "ixgbe_type.h" -#include "ixgbe_dcb.h" -#include "ixgbe_dcb_82598.h" - -/** - * ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter - * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure - * - * Configure Rx Data Arbiter and credits for each traffic class. - */ -s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *prio_type) -{ - u32 reg = 0; - u32 credit_refill = 0; - u32 credit_max = 0; - u8 i = 0; - - reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA; - IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg); - - reg = IXGBE_READ_REG(hw, IXGBE_RMCS); - /* Enable Arbiter */ - reg &= ~IXGBE_RMCS_ARBDIS; - /* Enable Receive Recycle within the BWG */ - reg |= IXGBE_RMCS_RRM; - /* Enable Deficit Fixed Priority arbitration*/ - reg |= IXGBE_RMCS_DFP; - - IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); - - /* Configure traffic class credits and priority */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - credit_refill = refill[i]; - credit_max = max[i]; - - reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT); - - if (prio_type[i] == prio_link) - reg |= IXGBE_RT2CR_LSP; - - IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg); - } - - reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); - reg |= IXGBE_RDRXCTL_RDMTS_1_2; - reg |= IXGBE_RDRXCTL_MPBEN; - reg |= IXGBE_RDRXCTL_MCEN; - IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg); - - reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - /* Make sure there is enough descriptors before arbitration */ - reg &= ~IXGBE_RXCTRL_DMBYPS; - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg); - - return 0; -} - -/** - * ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter - * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure - * - * Configure Tx Descriptor Arbiter and credits for each traffic class. - */ -s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type) -{ - u32 reg, max_credits; - u8 i; - - reg = IXGBE_READ_REG(hw, IXGBE_DPMCS); - - /* Enable arbiter */ - reg &= ~IXGBE_DPMCS_ARBDIS; - /* Enable DFP and Recycle mode */ - reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM); - reg |= IXGBE_DPMCS_TSOEF; - /* Configure Max TSO packet size 34KB including payload and headers */ - reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT); - - IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg); - - /* Configure traffic class credits and priority */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - max_credits = max[i]; - reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT; - reg |= refill[i]; - reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT; - - if (prio_type[i] == prio_group) - reg |= IXGBE_TDTQ2TCCR_GSP; - - if (prio_type[i] == prio_link) - reg |= IXGBE_TDTQ2TCCR_LSP; - - IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg); - } - - return 0; -} - -/** - * ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter - * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure - * - * Configure Tx Data Arbiter and credits for each traffic class. - */ -s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type) -{ - u32 reg; - u8 i; - - reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS); - /* Enable Data Plane Arbiter */ - reg &= ~IXGBE_PDPMCS_ARBDIS; - /* Enable DFP and Transmit Recycle Mode */ - reg |= (IXGBE_PDPMCS_TPPAC | IXGBE_PDPMCS_TRM); - - IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg); - - /* Configure traffic class credits and priority */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - reg = refill[i]; - reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT; - reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT; - - if (prio_type[i] == prio_group) - reg |= IXGBE_TDPT2TCCR_GSP; - - if (prio_type[i] == prio_link) - reg |= IXGBE_TDPT2TCCR_LSP; - - IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg); - } - - /* Enable Tx packet buffer division */ - reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL); - reg |= IXGBE_DTXCTL_ENDBUBD; - IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg); - - return 0; -} - -/** - * ixgbe_dcb_config_pfc_82598 - Config priority flow control - * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure - * - * Configure Priority Flow Control for each traffic class. - */ -s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) -{ - u32 reg, rx_pba_size; - u8 i; - - if (pfc_en) { - /* Enable Transmit Priority Flow Control */ - reg = IXGBE_READ_REG(hw, IXGBE_RMCS); - reg &= ~IXGBE_RMCS_TFCE_802_3X; - /* correct the reporting of our flow control status */ - reg |= IXGBE_RMCS_TFCE_PRIORITY; - IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); - - /* Enable Receive Priority Flow Control */ - reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); - reg &= ~IXGBE_FCTRL_RFCE; - reg |= IXGBE_FCTRL_RPFCE; - IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg); - - /* Configure pause time */ - for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++) - IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800); - - /* Configure flow control refresh threshold value */ - IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400); - } - - /* - * Configure flow control thresholds and enable priority flow control - * for each traffic class. - */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - int enabled = pfc_en & (1 << i); - rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); - rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; - reg = (rx_pba_size - hw->fc.low_water) << 10; - - if (enabled == pfc_enabled_tx || - enabled == pfc_enabled_full) - reg |= IXGBE_FCRTL_XONE; - - IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg); - - reg = (rx_pba_size - hw->fc.high_water) << 10; - if (enabled == pfc_enabled_tx || - enabled == pfc_enabled_full) - reg |= IXGBE_FCRTH_FCEN; - - IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg); - } - - return 0; -} - -/** - * ixgbe_dcb_config_tc_stats_82598 - Configure traffic class statistics - * @hw: pointer to hardware structure - * - * Configure queue statistics registers, all queues belonging to same traffic - * class uses a single set of queue statistics counters. - */ -static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) -{ - u32 reg = 0; - u8 i = 0; - u8 j = 0; - - /* Receive Queues stats setting - 8 queues per statistics reg */ - for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) { - reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i)); - reg |= ((0x1010101) * j); - IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); - reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1)); - reg |= ((0x1010101) * j); - IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg); - } - /* Transmit Queues stats setting - 4 queues per statistics reg */ - for (i = 0; i < 8; i++) { - reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i)); - reg |= ((0x1010101) * i); - IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg); - } - - return 0; -} - -/** - * ixgbe_dcb_hw_config_82598 - Config and enable DCB - * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure - * - * Configure dcb settings and enable dcb mode. - */ -s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, - u16 *max, u8 *bwg_id, u8 *prio_type) -{ - ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type); - ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, - bwg_id, prio_type); - ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, - bwg_id, prio_type); - ixgbe_dcb_config_pfc_82598(hw, pfc_en); - ixgbe_dcb_config_tc_stats_82598(hw); - - return 0; -} diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ixgbe/ixgbe_dcb_82598.h deleted file mode 100644 index 2f31893..0000000 --- a/drivers/net/ixgbe/ixgbe_dcb_82598.h +++ /dev/null @@ -1,97 +0,0 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _DCB_82598_CONFIG_H_ -#define _DCB_82598_CONFIG_H_ - -/* DCB register definitions */ - -#define IXGBE_DPMCS_MTSOS_SHIFT 16 -#define IXGBE_DPMCS_TDPAC 0x00000001 /* 0 Round Robin, 1 DFP - Deficit Fixed Priority */ -#define IXGBE_DPMCS_TRM 0x00000010 /* Transmit Recycle Mode */ -#define IXGBE_DPMCS_ARBDIS 0x00000040 /* DCB arbiter disable */ -#define IXGBE_DPMCS_TSOEF 0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */ - -#define IXGBE_RUPPBMR_MQA 0x80000000 /* Enable UP to queue mapping */ - -#define IXGBE_RT2CR_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ -#define IXGBE_RT2CR_LSP 0x80000000 /* LSP enable bit */ - -#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet buffers enable */ -#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores (RSS) enable */ - -#define IXGBE_TDTQ2TCCR_MCL_SHIFT 12 -#define IXGBE_TDTQ2TCCR_BWG_SHIFT 9 -#define IXGBE_TDTQ2TCCR_GSP 0x40000000 -#define IXGBE_TDTQ2TCCR_LSP 0x80000000 - -#define IXGBE_TDPT2TCCR_MCL_SHIFT 12 -#define IXGBE_TDPT2TCCR_BWG_SHIFT 9 -#define IXGBE_TDPT2TCCR_GSP 0x40000000 -#define IXGBE_TDPT2TCCR_LSP 0x80000000 - -#define IXGBE_PDPMCS_TPPAC 0x00000020 /* 0 Round Robin, 1 for DFP - Deficit Fixed Priority */ -#define IXGBE_PDPMCS_ARBDIS 0x00000040 /* Arbiter disable */ -#define IXGBE_PDPMCS_TRM 0x00000100 /* Transmit Recycle Mode enable */ - -#define IXGBE_DTXCTL_ENDBUBD 0x00000004 /* Enable DBU buffer division */ - -#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */ -#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */ -#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ -#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ - -#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 - -/* DCB hardware-specific driver APIs */ - -/* DCB PFC functions */ -s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en); - -/* DCB hw initialization */ -s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *prio_type); - -s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type); - -s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type); - -s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, - u16 *max, u8 *bwg_id, u8 *prio_type); - -#endif /* _DCB_82598_CONFIG_H */ diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c deleted file mode 100644 index ade9820..0000000 --- a/drivers/net/ixgbe/ixgbe_dcb_82599.c +++ /dev/null @@ -1,346 +0,0 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include "ixgbe.h" -#include "ixgbe_type.h" -#include "ixgbe_dcb.h" -#include "ixgbe_dcb_82599.h" - -/** - * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter - * @hw: pointer to hardware structure - * @refill: refill credits index by traffic class - * @max: max credits index by traffic class - * @bwg_id: bandwidth grouping indexed by traffic class - * @prio_type: priority type indexed by traffic class - * - * Configure Rx Packet Arbiter and credits for each traffic class. - */ -s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type, - u8 *prio_tc) -{ - u32 reg = 0; - u32 credit_refill = 0; - u32 credit_max = 0; - u8 i = 0; - - /* - * Disable the arbiter before changing parameters - * (always enable recycle mode; WSP) - */ - reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS; - IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); - - /* Map all traffic classes to their UP, 1 to 1 */ - reg = 0; - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) - reg |= (prio_tc[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT)); - IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); - - /* Configure traffic class credits and priority */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - credit_refill = refill[i]; - credit_max = max[i]; - reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT); - - reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT; - - if (prio_type[i] == prio_link) - reg |= IXGBE_RTRPT4C_LSP; - - IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg); - } - - /* - * Configure Rx packet plane (recycle mode; WSP) and - * enable arbiter - */ - reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC; - IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); - - return 0; -} - -/** - * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter - * @hw: pointer to hardware structure - * @refill: refill credits index by traffic class - * @max: max credits index by traffic class - * @bwg_id: bandwidth grouping indexed by traffic class - * @prio_type: priority type indexed by traffic class - * - * Configure Tx Descriptor Arbiter and credits for each traffic class. - */ -s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type) -{ - u32 reg, max_credits; - u8 i; - - /* Clear the per-Tx queue credits; we use per-TC instead */ - for (i = 0; i < 128; i++) { - IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); - IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0); - } - - /* Configure traffic class credits and priority */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - max_credits = max[i]; - reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT; - reg |= refill[i]; - reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT; - - if (prio_type[i] == prio_group) - reg |= IXGBE_RTTDT2C_GSP; - - if (prio_type[i] == prio_link) - reg |= IXGBE_RTTDT2C_LSP; - - IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg); - } - - /* - * Configure Tx descriptor plane (recycle mode; WSP) and - * enable arbiter - */ - reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM; - IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); - - return 0; -} - -/** - * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter - * @hw: pointer to hardware structure - * @refill: refill credits index by traffic class - * @max: max credits index by traffic class - * @bwg_id: bandwidth grouping indexed by traffic class - * @prio_type: priority type indexed by traffic class - * - * Configure Tx Packet Arbiter and credits for each traffic class. - */ -s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type, - u8 *prio_tc) -{ - u32 reg; - u8 i; - - /* - * Disable the arbiter before changing parameters - * (always enable recycle mode; SP; arb delay) - */ - reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM | - (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) | - IXGBE_RTTPCS_ARBDIS; - IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); - - /* Map all traffic classes to their UP, 1 to 1 */ - reg = 0; - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) - reg |= (prio_tc[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT)); - IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg); - - /* Configure traffic class credits and priority */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - reg = refill[i]; - reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT; - reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT; - - if (prio_type[i] == prio_group) - reg |= IXGBE_RTTPT2C_GSP; - - if (prio_type[i] == prio_link) - reg |= IXGBE_RTTPT2C_LSP; - - IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg); - } - - /* - * Configure Tx packet plane (recycle mode; SP; arb delay) and - * enable arbiter - */ - reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM | - (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT); - IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); - - return 0; -} - -/** - * ixgbe_dcb_config_pfc_82599 - Configure priority flow control - * @hw: pointer to hardware structure - * @pfc_en: enabled pfc bitmask - * - * Configure Priority Flow Control (PFC) for each traffic class. - */ -s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en) -{ - u32 i, reg, rx_pba_size; - - /* Configure PFC Tx thresholds per TC */ - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - int enabled = pfc_en & (1 << i); - rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); - rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; - - reg = (rx_pba_size - hw->fc.low_water) << 10; - - if (enabled) - reg |= IXGBE_FCRTL_XONE; - IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg); - - reg = (rx_pba_size - hw->fc.high_water) << 10; - if (enabled) - reg |= IXGBE_FCRTH_FCEN; - IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg); - } - - if (pfc_en) { - /* Configure pause time (2 TCs per register) */ - reg = hw->fc.pause_time | (hw->fc.pause_time << 16); - for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) - IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); - - /* Configure flow control refresh threshold value */ - IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); - - - reg = IXGBE_FCCFG_TFCE_PRIORITY; - IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg); - /* - * Enable Receive PFC - * 82599 will always honor XOFF frames we receive when - * we are in PFC mode however X540 only honors enabled - * traffic classes. - */ - reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); - reg &= ~IXGBE_MFLCN_RFCE; - reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF; - - if (hw->mac.type == ixgbe_mac_X540) - reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT; - - IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); - - } else { - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) - hw->mac.ops.fc_enable(hw, i); - } - - return 0; -} - -/** - * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics - * @hw: pointer to hardware structure - * - * Configure queue statistics registers, all queues belonging to same traffic - * class uses a single set of queue statistics counters. - */ -static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw) -{ - u32 reg = 0; - u8 i = 0; - - /* - * Receive Queues stats setting - * 32 RQSMR registers, each configuring 4 queues. - * Set all 16 queues of each TC to the same stat - * with TC 'n' going to stat 'n'. - */ - for (i = 0; i < 32; i++) { - reg = 0x01010101 * (i / 4); - IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); - } - /* - * Transmit Queues stats setting - * 32 TQSM registers, each controlling 4 queues. - * Set all queues of each TC to the same stat - * with TC 'n' going to stat 'n'. - * Tx queues are allocated non-uniformly to TCs: - * 32, 32, 16, 16, 8, 8, 8, 8. - */ - for (i = 0; i < 32; i++) { - if (i < 8) - reg = 0x00000000; - else if (i < 16) - reg = 0x01010101; - else if (i < 20) - reg = 0x02020202; - else if (i < 24) - reg = 0x03030303; - else if (i < 26) - reg = 0x04040404; - else if (i < 28) - reg = 0x05050505; - else if (i < 30) - reg = 0x06060606; - else - reg = 0x07070707; - IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg); - } - - return 0; -} - -/** - * ixgbe_dcb_hw_config_82599 - Configure and enable DCB - * @hw: pointer to hardware structure - * @refill: refill credits index by traffic class - * @max: max credits index by traffic class - * @bwg_id: bandwidth grouping indexed by traffic class - * @prio_type: priority type indexed by traffic class - * @pfc_en: enabled pfc bitmask - * - * Configure dcb settings and enable dcb mode. - */ -s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, - u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc) -{ - ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, - prio_type, prio_tc); - ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, - bwg_id, prio_type); - ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, - bwg_id, prio_type, prio_tc); - ixgbe_dcb_config_pfc_82599(hw, pfc_en); - ixgbe_dcb_config_tc_stats_82599(hw); - - return 0; -} - diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ixgbe/ixgbe_dcb_82599.h deleted file mode 100644 index 08d1749..0000000 --- a/drivers/net/ixgbe/ixgbe_dcb_82599.h +++ /dev/null @@ -1,123 +0,0 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _DCB_82599_CONFIG_H_ -#define _DCB_82599_CONFIG_H_ - -/* DCB register definitions */ -#define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin, - * 1 WSP - Weighted Strict Priority - */ -#define IXGBE_RTTDCS_VMPAC 0x00000002 /* 0 Round Robin, - * 1 WRR - Weighted Round Robin - */ -#define IXGBE_RTTDCS_TDRM 0x00000010 /* Transmit Recycle Mode */ -#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */ -#define IXGBE_RTTDCS_BDPM 0x00400000 /* Bypass Data Pipe - must clear! */ -#define IXGBE_RTTDCS_BPBFSM 0x00800000 /* Bypass PB Free Space - must - * clear! - */ -#define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */ - -/* Receive UP2TC mapping */ -#define IXGBE_RTRUP2TC_UP_SHIFT 3 -/* Transmit UP2TC mapping */ -#define IXGBE_RTTUP2TC_UP_SHIFT 3 - -#define IXGBE_RTRPT4C_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ -#define IXGBE_RTRPT4C_BWG_SHIFT 9 /* Offset to BWG index */ -#define IXGBE_RTRPT4C_GSP 0x40000000 /* GSP enable bit */ -#define IXGBE_RTRPT4C_LSP 0x80000000 /* LSP enable bit */ - -#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet - * buffers enable - */ -#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores - * (RSS) enable - */ - -/* RTRPCS Bit Masks */ -#define IXGBE_RTRPCS_RRM 0x00000002 /* Receive Recycle Mode enable */ -/* Receive Arbitration Control: 0 Round Robin, 1 DFP */ -#define IXGBE_RTRPCS_RAC 0x00000004 -#define IXGBE_RTRPCS_ARBDIS 0x00000040 /* Arbitration disable bit */ - -/* RTTDT2C Bit Masks */ -#define IXGBE_RTTDT2C_MCL_SHIFT 12 -#define IXGBE_RTTDT2C_BWG_SHIFT 9 -#define IXGBE_RTTDT2C_GSP 0x40000000 -#define IXGBE_RTTDT2C_LSP 0x80000000 - -#define IXGBE_RTTPT2C_MCL_SHIFT 12 -#define IXGBE_RTTPT2C_BWG_SHIFT 9 -#define IXGBE_RTTPT2C_GSP 0x40000000 -#define IXGBE_RTTPT2C_LSP 0x80000000 - -/* RTTPCS Bit Masks */ -#define IXGBE_RTTPCS_TPPAC 0x00000020 /* 0 Round Robin, - * 1 SP - Strict Priority - */ -#define IXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */ -#define IXGBE_RTTPCS_TPRM 0x00000100 /* Transmit Recycle Mode enable */ -#define IXGBE_RTTPCS_ARBD_SHIFT 22 -#define IXGBE_RTTPCS_ARBD_DCB 0x4 /* Arbitration delay in DCB mode */ - -/* SECTXMINIFG DCB */ -#define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer IFG */ - - -/* DCB hardware-specific driver APIs */ - -/* DCB PFC functions */ -s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en); - -/* DCB hw initialization */ -s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type, - u8 *prio_tc); - -s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type); - -s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type, - u8 *prio_tc); - -s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, - u16 *max, u8 *bwg_id, u8 *prio_type, - u8 *prio_tc); - -#endif /* _DCB_82599_CONFIG_H */ diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c deleted file mode 100644 index 0ace6ce..0000000 --- a/drivers/net/ixgbe/ixgbe_dcb_nl.c +++ /dev/null @@ -1,816 +0,0 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include "ixgbe.h" -#include -#include "ixgbe_dcb_82598.h" -#include "ixgbe_dcb_82599.h" - -/* Callbacks for DCB netlink in the kernel */ -#define BIT_DCB_MODE 0x01 -#define BIT_PFC 0x02 -#define BIT_PG_RX 0x04 -#define BIT_PG_TX 0x08 -#define BIT_APP_UPCHG 0x10 -#define BIT_LINKSPEED 0x80 - -/* Responses for the DCB_C_SET_ALL command */ -#define DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */ -#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */ -#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */ - -int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, - struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max) -{ - struct tc_configuration *src_tc_cfg = NULL; - struct tc_configuration *dst_tc_cfg = NULL; - int i; - - if (!src_dcb_cfg || !dst_dcb_cfg) - return -EINVAL; - - for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) { - src_tc_cfg = &src_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0]; - dst_tc_cfg = &dst_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0]; - - dst_tc_cfg->path[DCB_TX_CONFIG].prio_type = - src_tc_cfg->path[DCB_TX_CONFIG].prio_type; - - dst_tc_cfg->path[DCB_TX_CONFIG].bwg_id = - src_tc_cfg->path[DCB_TX_CONFIG].bwg_id; - - dst_tc_cfg->path[DCB_TX_CONFIG].bwg_percent = - src_tc_cfg->path[DCB_TX_CONFIG].bwg_percent; - - dst_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap = - src_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap; - - dst_tc_cfg->path[DCB_RX_CONFIG].prio_type = - src_tc_cfg->path[DCB_RX_CONFIG].prio_type; - - dst_tc_cfg->path[DCB_RX_CONFIG].bwg_id = - src_tc_cfg->path[DCB_RX_CONFIG].bwg_id; - - dst_tc_cfg->path[DCB_RX_CONFIG].bwg_percent = - src_tc_cfg->path[DCB_RX_CONFIG].bwg_percent; - - dst_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap = - src_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap; - } - - for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) { - dst_dcb_cfg->bw_percentage[DCB_TX_CONFIG] - [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage - [DCB_TX_CONFIG][i-DCB_PG_ATTR_BW_ID_0]; - dst_dcb_cfg->bw_percentage[DCB_RX_CONFIG] - [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage - [DCB_RX_CONFIG][i-DCB_PG_ATTR_BW_ID_0]; - } - - for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) { - dst_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc = - src_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc; - } - - dst_dcb_cfg->pfc_mode_enable = src_dcb_cfg->pfc_mode_enable; - - return 0; -} - -static u8 ixgbe_dcbnl_get_state(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED); -} - -static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) -{ - u8 err = 0; - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - /* verify there is something to do, if not then exit */ - if (!!state != !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) - return err; - - if (state > 0) { - /* Turn on DCB */ - if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { - e_err(drv, "Enable failed, needs MSI-X\n"); - err = 1; - goto out; - } - - adapter->flags |= IXGBE_FLAG_DCB_ENABLED; - - switch (adapter->hw.mac.type) { - case ixgbe_mac_82598EB: - adapter->last_lfc_mode = adapter->hw.fc.current_mode; - adapter->hw.fc.requested_mode = ixgbe_fc_none; - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; - break; - default: - break; - } - - ixgbe_setup_tc(netdev, MAX_TRAFFIC_CLASS); - } else { - /* Turn off DCB */ - adapter->hw.fc.requested_mode = adapter->last_lfc_mode; - adapter->temp_dcb_cfg.pfc_mode_enable = false; - adapter->dcb_cfg.pfc_mode_enable = false; - adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; - switch (adapter->hw.mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) - adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; - break; - default: - break; - } - ixgbe_setup_tc(netdev, 0); - } - -out: - return err; -} - -static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, - u8 *perm_addr) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - int i, j; - - memset(perm_addr, 0xff, MAX_ADDR_LEN); - - for (i = 0; i < netdev->addr_len; i++) - perm_addr[i] = adapter->hw.mac.perm_addr[i]; - - switch (adapter->hw.mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - for (j = 0; j < netdev->addr_len; j++, i++) - perm_addr[i] = adapter->hw.mac.san_addr[j]; - break; - default: - break; - } -} - -static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, - u8 prio, u8 bwg_id, u8 bw_pct, - u8 up_map) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - if (prio != DCB_ATTR_VALUE_UNDEFINED) - adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio; - if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) - adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id = bwg_id; - if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) - adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent = - bw_pct; - if (up_map != DCB_ATTR_VALUE_UNDEFINED) - adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap = - up_map; - - if ((adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type != - adapter->dcb_cfg.tc_config[tc].path[0].prio_type) || - (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id != - adapter->dcb_cfg.tc_config[tc].path[0].bwg_id) || - (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent != - adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) || - (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap != - adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)) - adapter->dcb_set_bitmap |= BIT_PG_TX; -} - -static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, - u8 bw_pct) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct; - - if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] != - adapter->dcb_cfg.bw_percentage[0][bwg_id]) - adapter->dcb_set_bitmap |= BIT_PG_TX; -} - -static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, - u8 prio, u8 bwg_id, u8 bw_pct, - u8 up_map) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - if (prio != DCB_ATTR_VALUE_UNDEFINED) - adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio; - if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) - adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id = bwg_id; - if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) - adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent = - bw_pct; - if (up_map != DCB_ATTR_VALUE_UNDEFINED) - adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap = - up_map; - - if ((adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type != - adapter->dcb_cfg.tc_config[tc].path[1].prio_type) || - (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id != - adapter->dcb_cfg.tc_config[tc].path[1].bwg_id) || - (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent != - adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) || - (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap != - adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)) - adapter->dcb_set_bitmap |= BIT_PG_RX; -} - -static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, - u8 bw_pct) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct; - - if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] != - adapter->dcb_cfg.bw_percentage[1][bwg_id]) - adapter->dcb_set_bitmap |= BIT_PG_RX; -} - -static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, - u8 *prio, u8 *bwg_id, u8 *bw_pct, - u8 *up_map) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - *prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type; - *bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id; - *bw_pct = adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent; - *up_map = adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap; -} - -static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, - u8 *bw_pct) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - *bw_pct = adapter->dcb_cfg.bw_percentage[0][bwg_id]; -} - -static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc, - u8 *prio, u8 *bwg_id, u8 *bw_pct, - u8 *up_map) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - *prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type; - *bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id; - *bw_pct = adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent; - *up_map = adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap; -} - -static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, - u8 *bw_pct) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - *bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id]; -} - -static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, - u8 setting) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting; - if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc != - adapter->dcb_cfg.tc_config[priority].dcb_pfc) { - adapter->dcb_set_bitmap |= BIT_PFC; - adapter->temp_dcb_cfg.pfc_mode_enable = true; - } -} - -static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, - u8 *setting) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - *setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc; -} - -static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - int ret; -#ifdef IXGBE_FCOE - struct dcb_app app = { - .selector = DCB_APP_IDTYPE_ETHTYPE, - .protocol = ETH_P_FCOE, - }; - u8 up = dcb_getapp(netdev, &app); -#endif - - ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, - MAX_TRAFFIC_CLASS); - if (ret) - return DCB_NO_HW_CHG; - -#ifdef IXGBE_FCOE - if (up && (up != (1 << adapter->fcoe.up))) - adapter->dcb_set_bitmap |= BIT_APP_UPCHG; - - /* - * Only take down the adapter if an app change occurred. FCoE - * may shuffle tx rings in this case and this can not be done - * without a reset currently. - */ - if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { - while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) - usleep_range(1000, 2000); - - adapter->fcoe.up = ffs(up) - 1; - - if (netif_running(netdev)) - netdev->netdev_ops->ndo_stop(netdev); - ixgbe_clear_interrupt_scheme(adapter); - } -#endif - - if (adapter->dcb_cfg.pfc_mode_enable) { - switch (adapter->hw.mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - if (adapter->hw.fc.current_mode != ixgbe_fc_pfc) - adapter->last_lfc_mode = - adapter->hw.fc.current_mode; - break; - default: - break; - } - adapter->hw.fc.requested_mode = ixgbe_fc_pfc; - } else { - switch (adapter->hw.mac.type) { - case ixgbe_mac_82598EB: - adapter->hw.fc.requested_mode = ixgbe_fc_none; - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - adapter->hw.fc.requested_mode = adapter->last_lfc_mode; - break; - default: - break; - } - } - -#ifdef IXGBE_FCOE - if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { - ixgbe_init_interrupt_scheme(adapter); - if (netif_running(netdev)) - netdev->netdev_ops->ndo_open(netdev); - ret = DCB_HW_CHG_RST; - } -#endif - - if (adapter->dcb_set_bitmap & BIT_PFC) { - u8 pfc_en; - ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en); - ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en); - ret = DCB_HW_CHG; - } - - if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) { - u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS]; - u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS]; - /* Priority to TC mapping in CEE case default to 1:1 */ - u8 prio_tc[MAX_TRAFFIC_CLASS] = {0, 1, 2, 3, 4, 5, 6, 7}; - int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; - -#ifdef CONFIG_FCOE - if (adapter->netdev->features & NETIF_F_FCOE_MTU) - max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); -#endif - - ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg, - max_frame, DCB_TX_CONFIG); - ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg, - max_frame, DCB_RX_CONFIG); - - ixgbe_dcb_unpack_refill(&adapter->dcb_cfg, - DCB_TX_CONFIG, refill); - ixgbe_dcb_unpack_max(&adapter->dcb_cfg, max); - ixgbe_dcb_unpack_bwgid(&adapter->dcb_cfg, - DCB_TX_CONFIG, bwg_id); - ixgbe_dcb_unpack_prio(&adapter->dcb_cfg, - DCB_TX_CONFIG, prio_type); - - ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max, - bwg_id, prio_type, prio_tc); - } - - if (adapter->dcb_cfg.pfc_mode_enable) - adapter->hw.fc.current_mode = ixgbe_fc_pfc; - - if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) - clear_bit(__IXGBE_RESETTING, &adapter->state); - adapter->dcb_set_bitmap = 0x00; - return ret; -} - -static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - switch (capid) { - case DCB_CAP_ATTR_PG: - *cap = true; - break; - case DCB_CAP_ATTR_PFC: - *cap = true; - break; - case DCB_CAP_ATTR_UP2TC: - *cap = false; - break; - case DCB_CAP_ATTR_PG_TCS: - *cap = 0x80; - break; - case DCB_CAP_ATTR_PFC_TCS: - *cap = 0x80; - break; - case DCB_CAP_ATTR_GSP: - *cap = true; - break; - case DCB_CAP_ATTR_BCN: - *cap = false; - break; - case DCB_CAP_ATTR_DCBX: - *cap = adapter->dcbx_cap; - break; - default: - *cap = false; - break; - } - - return 0; -} - -static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - u8 rval = 0; - - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - switch (tcid) { - case DCB_NUMTCS_ATTR_PG: - *num = MAX_TRAFFIC_CLASS; - break; - case DCB_NUMTCS_ATTR_PFC: - *num = MAX_TRAFFIC_CLASS; - break; - default: - rval = -EINVAL; - break; - } - } else { - rval = -EINVAL; - } - - return rval; -} - -static u8 ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) -{ - return -EINVAL; -} - -static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - return adapter->dcb_cfg.pfc_mode_enable; -} - -static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - adapter->temp_dcb_cfg.pfc_mode_enable = state; - if (adapter->temp_dcb_cfg.pfc_mode_enable != - adapter->dcb_cfg.pfc_mode_enable) - adapter->dcb_set_bitmap |= BIT_PFC; -} - -/** - * ixgbe_dcbnl_getapp - retrieve the DCBX application user priority - * @netdev : the corresponding netdev - * @idtype : identifies the id as ether type or TCP/UDP port number - * @id: id is either ether type or TCP/UDP port number - * - * Returns : on success, returns a non-zero 802.1p user priority bitmap - * otherwise returns 0 as the invalid user priority bitmap to indicate an - * error. - */ -static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct dcb_app app = { - .selector = idtype, - .protocol = id, - }; - - if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) - return 0; - - return dcb_getapp(netdev, &app); -} - -static int ixgbe_dcbnl_ieee_getets(struct net_device *dev, - struct ieee_ets *ets) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets; - - /* No IEEE PFC settings available */ - if (!my_ets) - return -EINVAL; - - ets->ets_cap = MAX_TRAFFIC_CLASS; - ets->cbs = my_ets->cbs; - memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw)); - memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw)); - memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa)); - memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc)); - return 0; -} - -static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, - struct ieee_ets *ets) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS]; - __u8 prio_type[IEEE_8021QAZ_MAX_TCS]; - int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; - int i, err; - __u64 *p = (__u64 *) ets->prio_tc; - /* naively give each TC a bwg to map onto CEE hardware */ - __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7}; - - if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) - return -EINVAL; - - if (!adapter->ixgbe_ieee_ets) { - adapter->ixgbe_ieee_ets = kmalloc(sizeof(struct ieee_ets), - GFP_KERNEL); - if (!adapter->ixgbe_ieee_ets) - return -ENOMEM; - } - - memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets)); - - /* Map TSA onto CEE prio type */ - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { - switch (ets->tc_tsa[i]) { - case IEEE_8021QAZ_TSA_STRICT: - prio_type[i] = 2; - break; - case IEEE_8021QAZ_TSA_ETS: - prio_type[i] = 0; - break; - default: - /* Hardware only supports priority strict or - * ETS transmission selection algorithms if - * we receive some other value from dcbnl - * throw an error - */ - return -EINVAL; - } - } - - if (*p) - ixgbe_dcbnl_set_state(dev, 1); - else - ixgbe_dcbnl_set_state(dev, 0); - - ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame); - err = ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max, - bwg_id, prio_type, ets->prio_tc); - return err; -} - -static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev, - struct ieee_pfc *pfc) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc; - int i; - - /* No IEEE PFC settings available */ - if (!my_pfc) - return -EINVAL; - - pfc->pfc_cap = MAX_TRAFFIC_CLASS; - pfc->pfc_en = my_pfc->pfc_en; - pfc->mbc = my_pfc->mbc; - pfc->delay = my_pfc->delay; - - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - pfc->requests[i] = adapter->stats.pxoffrxc[i]; - pfc->indications[i] = adapter->stats.pxofftxc[i]; - } - - return 0; -} - -static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev, - struct ieee_pfc *pfc) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - int err; - - if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) - return -EINVAL; - - if (!adapter->ixgbe_ieee_pfc) { - adapter->ixgbe_ieee_pfc = kmalloc(sizeof(struct ieee_pfc), - GFP_KERNEL); - if (!adapter->ixgbe_ieee_pfc) - return -ENOMEM; - } - - memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc)); - err = ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en); - return err; -} - -#ifdef IXGBE_FCOE -static void ixgbe_dcbnl_devreset(struct net_device *dev) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - - if (netif_running(dev)) - dev->netdev_ops->ndo_stop(dev); - - ixgbe_clear_interrupt_scheme(adapter); - ixgbe_init_interrupt_scheme(adapter); - - if (netif_running(dev)) - dev->netdev_ops->ndo_open(dev); -} -#endif - -static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, - struct dcb_app *app) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - int err = -EINVAL; - - if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) - return err; - - err = dcb_ieee_setapp(dev, app); - -#ifdef IXGBE_FCOE - if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && - app->protocol == ETH_P_FCOE) { - u8 app_mask = dcb_ieee_getapp_mask(dev, app); - - if (app_mask & (1 << adapter->fcoe.up)) - return err; - - adapter->fcoe.up = app->priority; - ixgbe_dcbnl_devreset(dev); - } -#endif - return 0; -} - -static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev, - struct dcb_app *app) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - int err; - - if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) - return -EINVAL; - - err = dcb_ieee_delapp(dev, app); - -#ifdef IXGBE_FCOE - if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && - app->protocol == ETH_P_FCOE) { - u8 app_mask = dcb_ieee_getapp_mask(dev, app); - - if (app_mask & (1 << adapter->fcoe.up)) - return err; - - adapter->fcoe.up = app_mask ? - ffs(app_mask) - 1 : IXGBE_FCOE_DEFTC; - ixgbe_dcbnl_devreset(dev); - } -#endif - return err; -} - -static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - return adapter->dcbx_cap; -} - -static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ieee_ets ets = {0}; - struct ieee_pfc pfc = {0}; - - /* no support for LLD_MANAGED modes or CEE+IEEE */ - if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || - ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) || - !(mode & DCB_CAP_DCBX_HOST)) - return 1; - - if (mode == adapter->dcbx_cap) - return 0; - - adapter->dcbx_cap = mode; - - /* ETS and PFC defaults */ - ets.ets_cap = 8; - pfc.pfc_cap = 8; - - if (mode & DCB_CAP_DCBX_VER_IEEE) { - ixgbe_dcbnl_ieee_setets(dev, &ets); - ixgbe_dcbnl_ieee_setpfc(dev, &pfc); - } else if (mode & DCB_CAP_DCBX_VER_CEE) { - adapter->dcb_set_bitmap |= (BIT_PFC & BIT_PG_TX & BIT_PG_RX); - ixgbe_dcbnl_set_all(dev); - } else { - /* Drop into single TC mode strict priority as this - * indicates CEE and IEEE versions are disabled - */ - ixgbe_dcbnl_ieee_setets(dev, &ets); - ixgbe_dcbnl_ieee_setpfc(dev, &pfc); - ixgbe_dcbnl_set_state(dev, 0); - } - - return 0; -} - -const struct dcbnl_rtnl_ops dcbnl_ops = { - .ieee_getets = ixgbe_dcbnl_ieee_getets, - .ieee_setets = ixgbe_dcbnl_ieee_setets, - .ieee_getpfc = ixgbe_dcbnl_ieee_getpfc, - .ieee_setpfc = ixgbe_dcbnl_ieee_setpfc, - .ieee_setapp = ixgbe_dcbnl_ieee_setapp, - .ieee_delapp = ixgbe_dcbnl_ieee_delapp, - .getstate = ixgbe_dcbnl_get_state, - .setstate = ixgbe_dcbnl_set_state, - .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr, - .setpgtccfgtx = ixgbe_dcbnl_set_pg_tc_cfg_tx, - .setpgbwgcfgtx = ixgbe_dcbnl_set_pg_bwg_cfg_tx, - .setpgtccfgrx = ixgbe_dcbnl_set_pg_tc_cfg_rx, - .setpgbwgcfgrx = ixgbe_dcbnl_set_pg_bwg_cfg_rx, - .getpgtccfgtx = ixgbe_dcbnl_get_pg_tc_cfg_tx, - .getpgbwgcfgtx = ixgbe_dcbnl_get_pg_bwg_cfg_tx, - .getpgtccfgrx = ixgbe_dcbnl_get_pg_tc_cfg_rx, - .getpgbwgcfgrx = ixgbe_dcbnl_get_pg_bwg_cfg_rx, - .setpfccfg = ixgbe_dcbnl_set_pfc_cfg, - .getpfccfg = ixgbe_dcbnl_get_pfc_cfg, - .setall = ixgbe_dcbnl_set_all, - .getcap = ixgbe_dcbnl_getcap, - .getnumtcs = ixgbe_dcbnl_getnumtcs, - .setnumtcs = ixgbe_dcbnl_setnumtcs, - .getpfcstate = ixgbe_dcbnl_getpfcstate, - .setpfcstate = ixgbe_dcbnl_setpfcstate, - .getapp = ixgbe_dcbnl_getapp, - .getdcbx = ixgbe_dcbnl_getdcbx, - .setdcbx = ixgbe_dcbnl_setdcbx, -}; diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c deleted file mode 100644 index 82d4244..0000000 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ /dev/null @@ -1,2592 +0,0 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -/* ethtool support for ixgbe */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ixgbe.h" - - -#define IXGBE_ALL_RAR_ENTRIES 16 - -enum {NETDEV_STATS, IXGBE_STATS}; - -struct ixgbe_stats { - char stat_string[ETH_GSTRING_LEN]; - int type; - int sizeof_stat; - int stat_offset; -}; - -#define IXGBE_STAT(m) IXGBE_STATS, \ - sizeof(((struct ixgbe_adapter *)0)->m), \ - offsetof(struct ixgbe_adapter, m) -#define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \ - sizeof(((struct rtnl_link_stats64 *)0)->m), \ - offsetof(struct rtnl_link_stats64, m) - -static struct ixgbe_stats ixgbe_gstrings_stats[] = { - {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)}, - {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)}, - {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)}, - {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)}, - {"rx_pkts_nic", IXGBE_STAT(stats.gprc)}, - {"tx_pkts_nic", IXGBE_STAT(stats.gptc)}, - {"rx_bytes_nic", IXGBE_STAT(stats.gorc)}, - {"tx_bytes_nic", IXGBE_STAT(stats.gotc)}, - {"lsc_int", IXGBE_STAT(lsc_int)}, - {"tx_busy", IXGBE_STAT(tx_busy)}, - {"non_eop_descs", IXGBE_STAT(non_eop_descs)}, - {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)}, - {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)}, - {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)}, - {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)}, - {"multicast", IXGBE_NETDEV_STAT(multicast)}, - {"broadcast", IXGBE_STAT(stats.bprc)}, - {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) }, - {"collisions", IXGBE_NETDEV_STAT(collisions)}, - {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)}, - {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)}, - {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)}, - {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)}, - {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)}, - {"fdir_match", IXGBE_STAT(stats.fdirmatch)}, - {"fdir_miss", IXGBE_STAT(stats.fdirmiss)}, - {"fdir_overflow", IXGBE_STAT(fdir_overflow)}, - {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)}, - {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)}, - {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)}, - {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)}, - {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)}, - {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)}, - {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)}, - {"tx_restart_queue", IXGBE_STAT(restart_queue)}, - {"rx_long_length_errors", IXGBE_STAT(stats.roc)}, - {"rx_short_length_errors", IXGBE_STAT(stats.ruc)}, - {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)}, - {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)}, - {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)}, - {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)}, - {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)}, - {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, - {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, - {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)}, - {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)}, - {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)}, - {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)}, - {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)}, -#ifdef IXGBE_FCOE - {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)}, - {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)}, - {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)}, - {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)}, - {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)}, - {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)}, -#endif /* IXGBE_FCOE */ -}; - -#define IXGBE_QUEUE_STATS_LEN \ - ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_tx_queues + \ - ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) * \ - (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) -#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) -#define IXGBE_PB_STATS_LEN ( \ - (((struct ixgbe_adapter *)netdev_priv(netdev))->flags & \ - IXGBE_FLAG_DCB_ENABLED) ? \ - (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \ - sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \ - sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \ - sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \ - / sizeof(u64) : 0) -#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \ - IXGBE_PB_STATS_LEN + \ - IXGBE_QUEUE_STATS_LEN) - -static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { - "Register test (offline)", "Eeprom test (offline)", - "Interrupt test (offline)", "Loopback test (offline)", - "Link test (on/offline)" -}; -#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN - -static int ixgbe_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - u32 link_speed = 0; - bool link_up; - - ecmd->supported = SUPPORTED_10000baseT_Full; - ecmd->autoneg = AUTONEG_ENABLE; - ecmd->transceiver = XCVR_EXTERNAL; - if ((hw->phy.media_type == ixgbe_media_type_copper) || - (hw->phy.multispeed_fiber)) { - ecmd->supported |= (SUPPORTED_1000baseT_Full | - SUPPORTED_Autoneg); - - switch (hw->mac.type) { - case ixgbe_mac_X540: - ecmd->supported |= SUPPORTED_100baseT_Full; - break; - default: - break; - } - - ecmd->advertising = ADVERTISED_Autoneg; - if (hw->phy.autoneg_advertised) { - if (hw->phy.autoneg_advertised & - IXGBE_LINK_SPEED_100_FULL) - ecmd->advertising |= ADVERTISED_100baseT_Full; - if (hw->phy.autoneg_advertised & - IXGBE_LINK_SPEED_10GB_FULL) - ecmd->advertising |= ADVERTISED_10000baseT_Full; - if (hw->phy.autoneg_advertised & - IXGBE_LINK_SPEED_1GB_FULL) - ecmd->advertising |= ADVERTISED_1000baseT_Full; - } else { - /* - * Default advertised modes in case - * phy.autoneg_advertised isn't set. - */ - ecmd->advertising |= (ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full); - if (hw->mac.type == ixgbe_mac_X540) - ecmd->advertising |= ADVERTISED_100baseT_Full; - } - - if (hw->phy.media_type == ixgbe_media_type_copper) { - ecmd->supported |= SUPPORTED_TP; - ecmd->advertising |= ADVERTISED_TP; - ecmd->port = PORT_TP; - } else { - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_FIBRE; - } - } else if (hw->phy.media_type == ixgbe_media_type_backplane) { - /* Set as FIBRE until SERDES defined in kernel */ - if (hw->device_id == IXGBE_DEV_ID_82598_BX) { - ecmd->supported = (SUPPORTED_1000baseT_Full | - SUPPORTED_FIBRE); - ecmd->advertising = (ADVERTISED_1000baseT_Full | - ADVERTISED_FIBRE); - ecmd->port = PORT_FIBRE; - ecmd->autoneg = AUTONEG_DISABLE; - } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) || - (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) { - ecmd->supported |= (SUPPORTED_1000baseT_Full | - SUPPORTED_Autoneg | - SUPPORTED_FIBRE); - ecmd->advertising = (ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full | - ADVERTISED_Autoneg | - ADVERTISED_FIBRE); - ecmd->port = PORT_FIBRE; - } else { - ecmd->supported |= (SUPPORTED_1000baseT_Full | - SUPPORTED_FIBRE); - ecmd->advertising = (ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full | - ADVERTISED_FIBRE); - ecmd->port = PORT_FIBRE; - } - } else { - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising = (ADVERTISED_10000baseT_Full | - ADVERTISED_FIBRE); - ecmd->port = PORT_FIBRE; - ecmd->autoneg = AUTONEG_DISABLE; - } - - /* Get PHY type */ - switch (adapter->hw.phy.type) { - case ixgbe_phy_tn: - case ixgbe_phy_aq: - case ixgbe_phy_cu_unknown: - /* Copper 10G-BASET */ - ecmd->port = PORT_TP; - break; - case ixgbe_phy_qt: - ecmd->port = PORT_FIBRE; - break; - case ixgbe_phy_nl: - case ixgbe_phy_sfp_passive_tyco: - case ixgbe_phy_sfp_passive_unknown: - case ixgbe_phy_sfp_ftl: - case ixgbe_phy_sfp_avago: - case ixgbe_phy_sfp_intel: - case ixgbe_phy_sfp_unknown: - switch (adapter->hw.phy.sfp_type) { - /* SFP+ devices, further checking needed */ - case ixgbe_sfp_type_da_cu: - case ixgbe_sfp_type_da_cu_core0: - case ixgbe_sfp_type_da_cu_core1: - ecmd->port = PORT_DA; - break; - case ixgbe_sfp_type_sr: - case ixgbe_sfp_type_lr: - case ixgbe_sfp_type_srlr_core0: - case ixgbe_sfp_type_srlr_core1: - ecmd->port = PORT_FIBRE; - break; - case ixgbe_sfp_type_not_present: - ecmd->port = PORT_NONE; - break; - case ixgbe_sfp_type_1g_cu_core0: - case ixgbe_sfp_type_1g_cu_core1: - ecmd->port = PORT_TP; - ecmd->supported = SUPPORTED_TP; - ecmd->advertising = (ADVERTISED_1000baseT_Full | - ADVERTISED_TP); - break; - case ixgbe_sfp_type_unknown: - default: - ecmd->port = PORT_OTHER; - break; - } - break; - case ixgbe_phy_xaui: - ecmd->port = PORT_NONE; - break; - case ixgbe_phy_unknown: - case ixgbe_phy_generic: - case ixgbe_phy_sfp_unsupported: - default: - ecmd->port = PORT_OTHER; - break; - } - - hw->mac.ops.check_link(hw, &link_speed, &link_up, false); - if (link_up) { - switch (link_speed) { - case IXGBE_LINK_SPEED_10GB_FULL: - ethtool_cmd_speed_set(ecmd, SPEED_10000); - break; - case IXGBE_LINK_SPEED_1GB_FULL: - ethtool_cmd_speed_set(ecmd, SPEED_1000); - break; - case IXGBE_LINK_SPEED_100_FULL: - ethtool_cmd_speed_set(ecmd, SPEED_100); - break; - default: - break; - } - ecmd->duplex = DUPLEX_FULL; - } else { - ethtool_cmd_speed_set(ecmd, -1); - ecmd->duplex = -1; - } - - return 0; -} - -static int ixgbe_set_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - u32 advertised, old; - s32 err = 0; - - if ((hw->phy.media_type == ixgbe_media_type_copper) || - (hw->phy.multispeed_fiber)) { - /* 10000/copper and 1000/copper must autoneg - * this function does not support any duplex forcing, but can - * limit the advertising of the adapter to only 10000 or 1000 */ - if (ecmd->autoneg == AUTONEG_DISABLE) - return -EINVAL; - - old = hw->phy.autoneg_advertised; - advertised = 0; - if (ecmd->advertising & ADVERTISED_10000baseT_Full) - advertised |= IXGBE_LINK_SPEED_10GB_FULL; - - if (ecmd->advertising & ADVERTISED_1000baseT_Full) - advertised |= IXGBE_LINK_SPEED_1GB_FULL; - - if (ecmd->advertising & ADVERTISED_100baseT_Full) - advertised |= IXGBE_LINK_SPEED_100_FULL; - - if (old == advertised) - return err; - /* this sets the link speed and restarts auto-neg */ - hw->mac.autotry_restart = true; - err = hw->mac.ops.setup_link(hw, advertised, true, true); - if (err) { - e_info(probe, "setup link failed with code %d\n", err); - hw->mac.ops.setup_link(hw, old, true, true); - } - } else { - /* in this case we currently only support 10Gb/FULL */ - u32 speed = ethtool_cmd_speed(ecmd); - if ((ecmd->autoneg == AUTONEG_ENABLE) || - (ecmd->advertising != ADVERTISED_10000baseT_Full) || - (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) - return -EINVAL; - } - - return err; -} - -static void ixgbe_get_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - - /* - * Flow Control Autoneg isn't on if - * - we didn't ask for it OR - * - it failed, we know this by tx & rx being off - */ - if (hw->fc.disable_fc_autoneg || - (hw->fc.current_mode == ixgbe_fc_none)) - pause->autoneg = 0; - else - pause->autoneg = 1; - - if (hw->fc.current_mode == ixgbe_fc_rx_pause) { - pause->rx_pause = 1; - } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) { - pause->tx_pause = 1; - } else if (hw->fc.current_mode == ixgbe_fc_full) { - pause->rx_pause = 1; - pause->tx_pause = 1; -#ifdef CONFIG_DCB - } else if (hw->fc.current_mode == ixgbe_fc_pfc) { - pause->rx_pause = 0; - pause->tx_pause = 0; -#endif - } -} - -static int ixgbe_set_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - struct ixgbe_fc_info fc; - -#ifdef CONFIG_DCB - if (adapter->dcb_cfg.pfc_mode_enable || - ((hw->mac.type == ixgbe_mac_82598EB) && - (adapter->flags & IXGBE_FLAG_DCB_ENABLED))) - return -EINVAL; - -#endif - fc = hw->fc; - - if (pause->autoneg != AUTONEG_ENABLE) - fc.disable_fc_autoneg = true; - else - fc.disable_fc_autoneg = false; - - if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) - fc.requested_mode = ixgbe_fc_full; - else if (pause->rx_pause && !pause->tx_pause) - fc.requested_mode = ixgbe_fc_rx_pause; - else if (!pause->rx_pause && pause->tx_pause) - fc.requested_mode = ixgbe_fc_tx_pause; - else if (!pause->rx_pause && !pause->tx_pause) - fc.requested_mode = ixgbe_fc_none; - else - return -EINVAL; - -#ifdef CONFIG_DCB - adapter->last_lfc_mode = fc.requested_mode; -#endif - - /* if the thing changed then we'll update and use new autoneg */ - if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) { - hw->fc = fc; - if (netif_running(netdev)) - ixgbe_reinit_locked(adapter); - else - ixgbe_reset(adapter); - } - - return 0; -} - -static u32 ixgbe_get_msglevel(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - return adapter->msg_enable; -} - -static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - adapter->msg_enable = data; -} - -static int ixgbe_get_regs_len(struct net_device *netdev) -{ -#define IXGBE_REGS_LEN 1128 - return IXGBE_REGS_LEN * sizeof(u32); -} - -#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_ - -static void ixgbe_get_regs(struct net_device *netdev, - struct ethtool_regs *regs, void *p) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - u32 *regs_buff = p; - u8 i; - - memset(p, 0, IXGBE_REGS_LEN * sizeof(u32)); - - regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id; - - /* General Registers */ - regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL); - regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS); - regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); - regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP); - regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP); - regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER); - regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER); - - /* NVM Register */ - regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC); - regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD); - regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA); - regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL); - regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA); - regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL); - regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA); - regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT); - regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP); - regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC); - - /* Interrupt */ - /* don't read EICR because it can clear interrupt causes, instead - * read EICS which is a shadow but doesn't clear EICR */ - regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS); - regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS); - regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS); - regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC); - regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC); - regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM); - regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0)); - regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0)); - regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT); - regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA); - regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0)); - regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE); - - /* Flow Control */ - regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP); - regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0)); - regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1)); - regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2)); - regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3)); - for (i = 0; i < 8; i++) { - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i)); - regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i)); - break; - case ixgbe_mac_82599EB: - regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i)); - regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); - break; - default: - break; - } - } - regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV); - regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS); - - /* Receive DMA */ - for (i = 0; i < 64; i++) - regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); - for (i = 0; i < 64; i++) - regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); - for (i = 0; i < 64; i++) - regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); - for (i = 0; i < 64; i++) - regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); - for (i = 0; i < 64; i++) - regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); - for (i = 0; i < 64; i++) - regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); - for (i = 0; i < 16; i++) - regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); - for (i = 0; i < 16; i++) - regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); - regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); - for (i = 0; i < 8; i++) - regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); - regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN); - - /* Receive */ - regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM); - regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL); - for (i = 0; i < 16; i++) - regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i)); - for (i = 0; i < 16; i++) - regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i)); - regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)); - regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL); - regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); - regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL); - regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC); - regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); - for (i = 0; i < 8; i++) - regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i)); - for (i = 0; i < 8; i++) - regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i)); - regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP); - - /* Transmit */ - for (i = 0; i < 32; i++) - regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); - for (i = 0; i < 32; i++) - regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); - for (i = 0; i < 32; i++) - regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); - for (i = 0; i < 32; i++) - regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); - for (i = 0; i < 32; i++) - regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); - for (i = 0; i < 32; i++) - regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); - for (i = 0; i < 32; i++) - regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i)); - for (i = 0; i < 32; i++) - regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i)); - regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL); - for (i = 0; i < 16; i++) - regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); - regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG); - for (i = 0; i < 8; i++) - regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i)); - regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP); - - /* Wake Up */ - regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC); - regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC); - regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS); - regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV); - regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT); - regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT); - regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL); - regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); - regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0)); - - /* DCB */ - regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); - regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); - regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); - regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR); - for (i = 0; i < 8; i++) - regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i)); - for (i = 0; i < 8; i++) - regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i)); - for (i = 0; i < 8; i++) - regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i)); - for (i = 0; i < 8; i++) - regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i)); - for (i = 0; i < 8; i++) - regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); - for (i = 0; i < 8; i++) - regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); - - /* Statistics */ - regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs); - regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc); - regs_buff[883] = IXGBE_GET_STAT(adapter, errbc); - regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc); - for (i = 0; i < 8; i++) - regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]); - regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc); - regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc); - regs_buff[895] = IXGBE_GET_STAT(adapter, rlec); - regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc); - regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc); - regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc); - regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc); - for (i = 0; i < 8; i++) - regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]); - for (i = 0; i < 8; i++) - regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]); - for (i = 0; i < 8; i++) - regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]); - for (i = 0; i < 8; i++) - regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]); - regs_buff[932] = IXGBE_GET_STAT(adapter, prc64); - regs_buff[933] = IXGBE_GET_STAT(adapter, prc127); - regs_buff[934] = IXGBE_GET_STAT(adapter, prc255); - regs_buff[935] = IXGBE_GET_STAT(adapter, prc511); - regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023); - regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522); - regs_buff[938] = IXGBE_GET_STAT(adapter, gprc); - regs_buff[939] = IXGBE_GET_STAT(adapter, bprc); - regs_buff[940] = IXGBE_GET_STAT(adapter, mprc); - regs_buff[941] = IXGBE_GET_STAT(adapter, gptc); - regs_buff[942] = IXGBE_GET_STAT(adapter, gorc); - regs_buff[944] = IXGBE_GET_STAT(adapter, gotc); - for (i = 0; i < 8; i++) - regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]); - regs_buff[954] = IXGBE_GET_STAT(adapter, ruc); - regs_buff[955] = IXGBE_GET_STAT(adapter, rfc); - regs_buff[956] = IXGBE_GET_STAT(adapter, roc); - regs_buff[957] = IXGBE_GET_STAT(adapter, rjc); - regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc); - regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc); - regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc); - regs_buff[961] = IXGBE_GET_STAT(adapter, tor); - regs_buff[963] = IXGBE_GET_STAT(adapter, tpr); - regs_buff[964] = IXGBE_GET_STAT(adapter, tpt); - regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64); - regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127); - regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255); - regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511); - regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023); - regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522); - regs_buff[971] = IXGBE_GET_STAT(adapter, mptc); - regs_buff[972] = IXGBE_GET_STAT(adapter, bptc); - regs_buff[973] = IXGBE_GET_STAT(adapter, xec); - for (i = 0; i < 16; i++) - regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]); - for (i = 0; i < 16; i++) - regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]); - for (i = 0; i < 16; i++) - regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]); - for (i = 0; i < 16; i++) - regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]); - - /* MAC */ - regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG); - regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); - regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); - regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0); - regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1); - regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); - regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); - regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP); - regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP); - regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0); - regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1); - regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP); - regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA); - regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE); - regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD); - regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS); - regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA); - regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD); - regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD); - regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD); - regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG); - regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1); - regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2); - regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS); - regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC); - regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS); - regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC); - regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS); - regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2); - regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3); - regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1); - regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2); - regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); - - /* Diagnostic */ - regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL); - for (i = 0; i < 8; i++) - regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i)); - regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN); - for (i = 0; i < 4; i++) - regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i)); - regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE); - regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL); - for (i = 0; i < 8; i++) - regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i)); - regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN); - for (i = 0; i < 4; i++) - regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i)); - regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE); - regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL); - regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0); - regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1); - regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2); - regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3); - regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL); - regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0); - regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1); - regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2); - regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3); - for (i = 0; i < 8; i++) - regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i)); - regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL); - regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1); - regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2); - regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1); - regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2); - regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS); - regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL); - regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC); - regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC); -} - -static int ixgbe_get_eeprom_len(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - return adapter->hw.eeprom.word_size * 2; -} - -static int ixgbe_get_eeprom(struct net_device *netdev, - struct ethtool_eeprom *eeprom, u8 *bytes) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - u16 *eeprom_buff; - int first_word, last_word, eeprom_len; - int ret_val = 0; - u16 i; - - if (eeprom->len == 0) - return -EINVAL; - - eeprom->magic = hw->vendor_id | (hw->device_id << 16); - - first_word = eeprom->offset >> 1; - last_word = (eeprom->offset + eeprom->len - 1) >> 1; - eeprom_len = last_word - first_word + 1; - - eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL); - if (!eeprom_buff) - return -ENOMEM; - - ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len, - eeprom_buff); - - /* Device's eeprom is always little-endian, word addressable */ - for (i = 0; i < eeprom_len; i++) - le16_to_cpus(&eeprom_buff[i]); - - memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); - kfree(eeprom_buff); - - return ret_val; -} - -static void ixgbe_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - char firmware_version[32]; - - strncpy(drvinfo->driver, ixgbe_driver_name, - sizeof(drvinfo->driver) - 1); - strncpy(drvinfo->version, ixgbe_driver_version, - sizeof(drvinfo->version) - 1); - - snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d", - (adapter->eeprom_version & 0xF000) >> 12, - (adapter->eeprom_version & 0x0FF0) >> 4, - adapter->eeprom_version & 0x000F); - - strncpy(drvinfo->fw_version, firmware_version, - sizeof(drvinfo->fw_version)); - strncpy(drvinfo->bus_info, pci_name(adapter->pdev), - sizeof(drvinfo->bus_info)); - drvinfo->n_stats = IXGBE_STATS_LEN; - drvinfo->testinfo_len = IXGBE_TEST_LEN; - drvinfo->regdump_len = ixgbe_get_regs_len(netdev); -} - -static void ixgbe_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; - struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; - - ring->rx_max_pending = IXGBE_MAX_RXD; - ring->tx_max_pending = IXGBE_MAX_TXD; - ring->rx_mini_max_pending = 0; - ring->rx_jumbo_max_pending = 0; - ring->rx_pending = rx_ring->count; - ring->tx_pending = tx_ring->count; - ring->rx_mini_pending = 0; - ring->rx_jumbo_pending = 0; -} - -static int ixgbe_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_ring *temp_tx_ring, *temp_rx_ring; - int i, err = 0; - u32 new_rx_count, new_tx_count; - bool need_update = false; - - if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) - return -EINVAL; - - new_rx_count = max(ring->rx_pending, (u32)IXGBE_MIN_RXD); - new_rx_count = min(new_rx_count, (u32)IXGBE_MAX_RXD); - new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); - - new_tx_count = max(ring->tx_pending, (u32)IXGBE_MIN_TXD); - new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD); - new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); - - if ((new_tx_count == adapter->tx_ring[0]->count) && - (new_rx_count == adapter->rx_ring[0]->count)) { - /* nothing to do */ - return 0; - } - - while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) - usleep_range(1000, 2000); - - if (!netif_running(adapter->netdev)) { - for (i = 0; i < adapter->num_tx_queues; i++) - adapter->tx_ring[i]->count = new_tx_count; - for (i = 0; i < adapter->num_rx_queues; i++) - adapter->rx_ring[i]->count = new_rx_count; - adapter->tx_ring_count = new_tx_count; - adapter->rx_ring_count = new_rx_count; - goto clear_reset; - } - - temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring)); - if (!temp_tx_ring) { - err = -ENOMEM; - goto clear_reset; - } - - if (new_tx_count != adapter->tx_ring_count) { - for (i = 0; i < adapter->num_tx_queues; i++) { - memcpy(&temp_tx_ring[i], adapter->tx_ring[i], - sizeof(struct ixgbe_ring)); - temp_tx_ring[i].count = new_tx_count; - err = ixgbe_setup_tx_resources(&temp_tx_ring[i]); - if (err) { - while (i) { - i--; - ixgbe_free_tx_resources(&temp_tx_ring[i]); - } - goto clear_reset; - } - } - need_update = true; - } - - temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring)); - if (!temp_rx_ring) { - err = -ENOMEM; - goto err_setup; - } - - if (new_rx_count != adapter->rx_ring_count) { - for (i = 0; i < adapter->num_rx_queues; i++) { - memcpy(&temp_rx_ring[i], adapter->rx_ring[i], - sizeof(struct ixgbe_ring)); - temp_rx_ring[i].count = new_rx_count; - err = ixgbe_setup_rx_resources(&temp_rx_ring[i]); - if (err) { - while (i) { - i--; - ixgbe_free_rx_resources(&temp_rx_ring[i]); - } - goto err_setup; - } - } - need_update = true; - } - - /* if rings need to be updated, here's the place to do it in one shot */ - if (need_update) { - ixgbe_down(adapter); - - /* tx */ - if (new_tx_count != adapter->tx_ring_count) { - for (i = 0; i < adapter->num_tx_queues; i++) { - ixgbe_free_tx_resources(adapter->tx_ring[i]); - memcpy(adapter->tx_ring[i], &temp_tx_ring[i], - sizeof(struct ixgbe_ring)); - } - adapter->tx_ring_count = new_tx_count; - } - - /* rx */ - if (new_rx_count != adapter->rx_ring_count) { - for (i = 0; i < adapter->num_rx_queues; i++) { - ixgbe_free_rx_resources(adapter->rx_ring[i]); - memcpy(adapter->rx_ring[i], &temp_rx_ring[i], - sizeof(struct ixgbe_ring)); - } - adapter->rx_ring_count = new_rx_count; - } - ixgbe_up(adapter); - } - - vfree(temp_rx_ring); -err_setup: - vfree(temp_tx_ring); -clear_reset: - clear_bit(__IXGBE_RESETTING, &adapter->state); - return err; -} - -static int ixgbe_get_sset_count(struct net_device *netdev, int sset) -{ - switch (sset) { - case ETH_SS_TEST: - return IXGBE_TEST_LEN; - case ETH_SS_STATS: - return IXGBE_STATS_LEN; - default: - return -EOPNOTSUPP; - } -} - -static void ixgbe_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, u64 *data) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct rtnl_link_stats64 temp; - const struct rtnl_link_stats64 *net_stats; - unsigned int start; - struct ixgbe_ring *ring; - int i, j; - char *p = NULL; - - ixgbe_update_stats(adapter); - net_stats = dev_get_stats(netdev, &temp); - for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { - switch (ixgbe_gstrings_stats[i].type) { - case NETDEV_STATS: - p = (char *) net_stats + - ixgbe_gstrings_stats[i].stat_offset; - break; - case IXGBE_STATS: - p = (char *) adapter + - ixgbe_gstrings_stats[i].stat_offset; - break; - } - - data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } - for (j = 0; j < adapter->num_tx_queues; j++) { - ring = adapter->tx_ring[j]; - do { - start = u64_stats_fetch_begin_bh(&ring->syncp); - data[i] = ring->stats.packets; - data[i+1] = ring->stats.bytes; - } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); - i += 2; - } - for (j = 0; j < adapter->num_rx_queues; j++) { - ring = adapter->rx_ring[j]; - do { - start = u64_stats_fetch_begin_bh(&ring->syncp); - data[i] = ring->stats.packets; - data[i+1] = ring->stats.bytes; - } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); - i += 2; - } - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) { - data[i++] = adapter->stats.pxontxc[j]; - data[i++] = adapter->stats.pxofftxc[j]; - } - for (j = 0; j < MAX_RX_PACKET_BUFFERS; j++) { - data[i++] = adapter->stats.pxonrxc[j]; - data[i++] = adapter->stats.pxoffrxc[j]; - } - } -} - -static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, - u8 *data) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - char *p = (char *)data; - int i; - - switch (stringset) { - case ETH_SS_TEST: - memcpy(data, *ixgbe_gstrings_test, - IXGBE_TEST_LEN * ETH_GSTRING_LEN); - break; - case ETH_SS_STATS: - for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { - memcpy(p, ixgbe_gstrings_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < adapter->num_tx_queues; i++) { - sprintf(p, "tx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < adapter->num_rx_queues; i++) { - sprintf(p, "rx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; - } - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { - sprintf(p, "tx_pb_%u_pxon", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_pb_%u_pxoff", i); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < MAX_RX_PACKET_BUFFERS; i++) { - sprintf(p, "rx_pb_%u_pxon", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_pb_%u_pxoff", i); - p += ETH_GSTRING_LEN; - } - } - /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ - break; - } -} - -static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data) -{ - struct ixgbe_hw *hw = &adapter->hw; - bool link_up; - u32 link_speed = 0; - *data = 0; - - hw->mac.ops.check_link(hw, &link_speed, &link_up, true); - if (link_up) - return *data; - else - *data = 1; - return *data; -} - -/* ethtool register test data */ -struct ixgbe_reg_test { - u16 reg; - u8 array_len; - u8 test_type; - u32 mask; - u32 write; -}; - -/* In the hardware, registers are laid out either singly, in arrays - * spaced 0x40 bytes apart, or in contiguous tables. We assume - * most tests take place on arrays or single registers (handled - * as a single-element array) and special-case the tables. - * Table tests are always pattern tests. - * - * We also make provision for some required setup steps by specifying - * registers to be written without any read-back testing. - */ - -#define PATTERN_TEST 1 -#define SET_READ_TEST 2 -#define WRITE_NO_TEST 3 -#define TABLE32_TEST 4 -#define TABLE64_TEST_LO 5 -#define TABLE64_TEST_HI 6 - -/* default 82599 register test */ -static const struct ixgbe_reg_test reg_test_82599[] = { - { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, - { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, - { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, - { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, - { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, - { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, - { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, - { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, - { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, - { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 }, - { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, - { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF }, - { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { 0, 0, 0, 0 } -}; - -/* default 82598 register test */ -static const struct ixgbe_reg_test reg_test_82598[] = { - { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, - { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, - { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, - { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, - /* Enable all four RX queues before testing. */ - { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, - /* RDH is read-only for 82598, only test RDT. */ - { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, - { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, - { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF }, - { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, - { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 }, - { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 }, - { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, - { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF }, - { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { 0, 0, 0, 0 } -}; - -static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg, - u32 mask, u32 write) -{ - u32 pat, val, before; - static const u32 test_pattern[] = { - 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; - - for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { - before = readl(adapter->hw.hw_addr + reg); - writel((test_pattern[pat] & write), - (adapter->hw.hw_addr + reg)); - val = readl(adapter->hw.hw_addr + reg); - if (val != (test_pattern[pat] & write & mask)) { - e_err(drv, "pattern test reg %04X failed: got " - "0x%08X expected 0x%08X\n", - reg, val, (test_pattern[pat] & write & mask)); - *data = reg; - writel(before, adapter->hw.hw_addr + reg); - return 1; - } - writel(before, adapter->hw.hw_addr + reg); - } - return 0; -} - -static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg, - u32 mask, u32 write) -{ - u32 val, before; - before = readl(adapter->hw.hw_addr + reg); - writel((write & mask), (adapter->hw.hw_addr + reg)); - val = readl(adapter->hw.hw_addr + reg); - if ((write & mask) != (val & mask)) { - e_err(drv, "set/check reg %04X test failed: got 0x%08X " - "expected 0x%08X\n", reg, (val & mask), (write & mask)); - *data = reg; - writel(before, (adapter->hw.hw_addr + reg)); - return 1; - } - writel(before, (adapter->hw.hw_addr + reg)); - return 0; -} - -#define REG_PATTERN_TEST(reg, mask, write) \ - do { \ - if (reg_pattern_test(adapter, data, reg, mask, write)) \ - return 1; \ - } while (0) \ - - -#define REG_SET_AND_CHECK(reg, mask, write) \ - do { \ - if (reg_set_and_check(adapter, data, reg, mask, write)) \ - return 1; \ - } while (0) \ - -static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) -{ - const struct ixgbe_reg_test *test; - u32 value, before, after; - u32 i, toggle; - - switch (adapter->hw.mac.type) { - case ixgbe_mac_82598EB: - toggle = 0x7FFFF3FF; - test = reg_test_82598; - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - toggle = 0x7FFFF30F; - test = reg_test_82599; - break; - default: - *data = 1; - return 1; - break; - } - - /* - * Because the status register is such a special case, - * we handle it separately from the rest of the register - * tests. Some bits are read-only, some toggle, and some - * are writeable on newer MACs. - */ - before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS); - value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle); - after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle; - if (value != after) { - e_err(drv, "failed STATUS register test got: 0x%08X " - "expected: 0x%08X\n", after, value); - *data = 1; - return 1; - } - /* restore previous status */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before); - - /* - * Perform the remainder of the register test, looping through - * the test table until we either fail or reach the null entry. - */ - while (test->reg) { - for (i = 0; i < test->array_len; i++) { - switch (test->test_type) { - case PATTERN_TEST: - REG_PATTERN_TEST(test->reg + (i * 0x40), - test->mask, - test->write); - break; - case SET_READ_TEST: - REG_SET_AND_CHECK(test->reg + (i * 0x40), - test->mask, - test->write); - break; - case WRITE_NO_TEST: - writel(test->write, - (adapter->hw.hw_addr + test->reg) - + (i * 0x40)); - break; - case TABLE32_TEST: - REG_PATTERN_TEST(test->reg + (i * 4), - test->mask, - test->write); - break; - case TABLE64_TEST_LO: - REG_PATTERN_TEST(test->reg + (i * 8), - test->mask, - test->write); - break; - case TABLE64_TEST_HI: - REG_PATTERN_TEST((test->reg + 4) + (i * 8), - test->mask, - test->write); - break; - } - } - test++; - } - - *data = 0; - return 0; -} - -static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data) -{ - struct ixgbe_hw *hw = &adapter->hw; - if (hw->eeprom.ops.validate_checksum(hw, NULL)) - *data = 1; - else - *data = 0; - return *data; -} - -static irqreturn_t ixgbe_test_intr(int irq, void *data) -{ - struct net_device *netdev = (struct net_device *) data; - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR); - - return IRQ_HANDLED; -} - -static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) -{ - struct net_device *netdev = adapter->netdev; - u32 mask, i = 0, shared_int = true; - u32 irq = adapter->pdev->irq; - - *data = 0; - - /* Hook up test interrupt handler just for this test */ - if (adapter->msix_entries) { - /* NOTE: we don't test MSI-X interrupts here, yet */ - return 0; - } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { - shared_int = false; - if (request_irq(irq, ixgbe_test_intr, 0, netdev->name, - netdev)) { - *data = 1; - return -1; - } - } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED, - netdev->name, netdev)) { - shared_int = false; - } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED, - netdev->name, netdev)) { - *data = 1; - return -1; - } - e_info(hw, "testing %s interrupt\n", shared_int ? - "shared" : "unshared"); - - /* Disable all the interrupts */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); - IXGBE_WRITE_FLUSH(&adapter->hw); - usleep_range(10000, 20000); - - /* Test each interrupt */ - for (; i < 10; i++) { - /* Interrupt to test */ - mask = 1 << i; - - if (!shared_int) { - /* - * Disable the interrupts to be reported in - * the cause register and then force the same - * interrupt and see if one gets posted. If - * an interrupt was posted to the bus, the - * test failed. - */ - adapter->test_icr = 0; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, - ~mask & 0x00007FFF); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, - ~mask & 0x00007FFF); - IXGBE_WRITE_FLUSH(&adapter->hw); - usleep_range(10000, 20000); - - if (adapter->test_icr & mask) { - *data = 3; - break; - } - } - - /* - * Enable the interrupt to be reported in the cause - * register and then force the same interrupt and see - * if one gets posted. If an interrupt was not posted - * to the bus, the test failed. - */ - adapter->test_icr = 0; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); - IXGBE_WRITE_FLUSH(&adapter->hw); - usleep_range(10000, 20000); - - if (!(adapter->test_icr &mask)) { - *data = 4; - break; - } - - if (!shared_int) { - /* - * Disable the other interrupts to be reported in - * the cause register and then force the other - * interrupts and see if any get posted. If - * an interrupt was posted to the bus, the - * test failed. - */ - adapter->test_icr = 0; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, - ~mask & 0x00007FFF); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, - ~mask & 0x00007FFF); - IXGBE_WRITE_FLUSH(&adapter->hw); - usleep_range(10000, 20000); - - if (adapter->test_icr) { - *data = 5; - break; - } - } - } - - /* Disable all the interrupts */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); - IXGBE_WRITE_FLUSH(&adapter->hw); - usleep_range(10000, 20000); - - /* Unhook test interrupt handler */ - free_irq(irq, netdev); - - return *data; -} - -static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) -{ - struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; - struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; - struct ixgbe_hw *hw = &adapter->hw; - u32 reg_ctl; - - /* shut down the DMA engines now so they can be reinitialized later */ - - /* first Rx */ - reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - reg_ctl &= ~IXGBE_RXCTRL_RXEN; - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); - ixgbe_disable_rx_queue(adapter, rx_ring); - - /* now Tx */ - reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx)); - reg_ctl &= ~IXGBE_TXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl); - - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); - reg_ctl &= ~IXGBE_DMATXCTL_TE; - IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); - break; - default: - break; - } - - ixgbe_reset(adapter); - - ixgbe_free_tx_resources(&adapter->test_tx_ring); - ixgbe_free_rx_resources(&adapter->test_rx_ring); -} - -static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) -{ - struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; - struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; - u32 rctl, reg_data; - int ret_val; - int err; - - /* Setup Tx descriptor ring and Tx buffers */ - tx_ring->count = IXGBE_DEFAULT_TXD; - tx_ring->queue_index = 0; - tx_ring->dev = &adapter->pdev->dev; - tx_ring->netdev = adapter->netdev; - tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; - tx_ring->numa_node = adapter->node; - - err = ixgbe_setup_tx_resources(tx_ring); - if (err) - return 1; - - switch (adapter->hw.mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); - reg_data |= IXGBE_DMATXCTL_TE; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); - break; - default: - break; - } - - ixgbe_configure_tx_ring(adapter, tx_ring); - - /* Setup Rx Descriptor ring and Rx buffers */ - rx_ring->count = IXGBE_DEFAULT_RXD; - rx_ring->queue_index = 0; - rx_ring->dev = &adapter->pdev->dev; - rx_ring->netdev = adapter->netdev; - rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; - rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048; - rx_ring->numa_node = adapter->node; - - err = ixgbe_setup_rx_resources(rx_ring); - if (err) { - ret_val = 4; - goto err_nomem; - } - - rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN); - - ixgbe_configure_rx_ring(adapter, rx_ring); - - rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl); - - return 0; - -err_nomem: - ixgbe_free_desc_rings(adapter); - return ret_val; -} - -static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 reg_data; - - /* X540 needs to set the MACC.FLU bit to force link up */ - if (adapter->hw.mac.type == ixgbe_mac_X540) { - reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_MACC); - reg_data |= IXGBE_MACC_FLU; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_MACC, reg_data); - } - - /* right now we only support MAC loopback in the driver */ - reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); - /* Setup MAC loopback */ - reg_data |= IXGBE_HLREG0_LPBK; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); - - reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); - reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data); - - reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_AUTOC); - reg_data &= ~IXGBE_AUTOC_LMS_MASK; - reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data); - IXGBE_WRITE_FLUSH(&adapter->hw); - usleep_range(10000, 20000); - - /* Disable Atlas Tx lanes; re-enabled in reset path */ - if (hw->mac.type == ixgbe_mac_82598EB) { - u8 atlas; - - hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas); - atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; - hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas); - - hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas); - atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; - hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas); - - hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas); - atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; - hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas); - - hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas); - atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; - hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas); - } - - return 0; -} - -static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter) -{ - u32 reg_data; - - reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); - reg_data &= ~IXGBE_HLREG0_LPBK; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); -} - -static void ixgbe_create_lbtest_frame(struct sk_buff *skb, - unsigned int frame_size) -{ - memset(skb->data, 0xFF, frame_size); - frame_size &= ~1; - memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); - memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); - memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); -} - -static int ixgbe_check_lbtest_frame(struct sk_buff *skb, - unsigned int frame_size) -{ - frame_size &= ~1; - if (*(skb->data + 3) == 0xFF) { - if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && - (*(skb->data + frame_size / 2 + 12) == 0xAF)) { - return 0; - } - } - return 13; -} - -static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, - struct ixgbe_ring *tx_ring, - unsigned int size) -{ - union ixgbe_adv_rx_desc *rx_desc; - struct ixgbe_rx_buffer *rx_buffer_info; - struct ixgbe_tx_buffer *tx_buffer_info; - const int bufsz = rx_ring->rx_buf_len; - u32 staterr; - u16 rx_ntc, tx_ntc, count = 0; - - /* initialize next to clean and descriptor values */ - rx_ntc = rx_ring->next_to_clean; - tx_ntc = tx_ring->next_to_clean; - rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc); - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); - - while (staterr & IXGBE_RXD_STAT_DD) { - /* check Rx buffer */ - rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; - - /* unmap Rx buffer, will be remapped by alloc_rx_buffers */ - dma_unmap_single(rx_ring->dev, - rx_buffer_info->dma, - bufsz, - DMA_FROM_DEVICE); - rx_buffer_info->dma = 0; - - /* verify contents of skb */ - if (!ixgbe_check_lbtest_frame(rx_buffer_info->skb, size)) - count++; - - /* unmap buffer on Tx side */ - tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; - ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); - - /* increment Rx/Tx next to clean counters */ - rx_ntc++; - if (rx_ntc == rx_ring->count) - rx_ntc = 0; - tx_ntc++; - if (tx_ntc == tx_ring->count) - tx_ntc = 0; - - /* fetch next descriptor */ - rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc); - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); - } - - /* re-map buffers to ring, store next to clean values */ - ixgbe_alloc_rx_buffers(rx_ring, count); - rx_ring->next_to_clean = rx_ntc; - tx_ring->next_to_clean = tx_ntc; - - return count; -} - -static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) -{ - struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; - struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; - int i, j, lc, good_cnt, ret_val = 0; - unsigned int size = 1024; - netdev_tx_t tx_ret_val; - struct sk_buff *skb; - - /* allocate test skb */ - skb = alloc_skb(size, GFP_KERNEL); - if (!skb) - return 11; - - /* place data into test skb */ - ixgbe_create_lbtest_frame(skb, size); - skb_put(skb, size); - - /* - * Calculate the loop count based on the largest descriptor ring - * The idea is to wrap the largest ring a number of times using 64 - * send/receive pairs during each loop - */ - - if (rx_ring->count <= tx_ring->count) - lc = ((tx_ring->count / 64) * 2) + 1; - else - lc = ((rx_ring->count / 64) * 2) + 1; - - for (j = 0; j <= lc; j++) { - /* reset count of good packets */ - good_cnt = 0; - - /* place 64 packets on the transmit queue*/ - for (i = 0; i < 64; i++) { - skb_get(skb); - tx_ret_val = ixgbe_xmit_frame_ring(skb, - adapter, - tx_ring); - if (tx_ret_val == NETDEV_TX_OK) - good_cnt++; - } - - if (good_cnt != 64) { - ret_val = 12; - break; - } - - /* allow 200 milliseconds for packets to go from Tx to Rx */ - msleep(200); - - good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size); - if (good_cnt != 64) { - ret_val = 13; - break; - } - } - - /* free the original skb */ - kfree_skb(skb); - - return ret_val; -} - -static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data) -{ - *data = ixgbe_setup_desc_rings(adapter); - if (*data) - goto out; - *data = ixgbe_setup_loopback_test(adapter); - if (*data) - goto err_loopback; - *data = ixgbe_run_loopback_test(adapter); - ixgbe_loopback_cleanup(adapter); - -err_loopback: - ixgbe_free_desc_rings(adapter); -out: - return *data; -} - -static void ixgbe_diag_test(struct net_device *netdev, - struct ethtool_test *eth_test, u64 *data) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - bool if_running = netif_running(netdev); - - set_bit(__IXGBE_TESTING, &adapter->state); - if (eth_test->flags == ETH_TEST_FL_OFFLINE) { - /* Offline tests */ - - e_info(hw, "offline testing starting\n"); - - /* Link test performed before hardware reset so autoneg doesn't - * interfere with test result */ - if (ixgbe_link_test(adapter, &data[4])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { - int i; - for (i = 0; i < adapter->num_vfs; i++) { - if (adapter->vfinfo[i].clear_to_send) { - netdev_warn(netdev, "%s", - "offline diagnostic is not " - "supported when VFs are " - "present\n"); - data[0] = 1; - data[1] = 1; - data[2] = 1; - data[3] = 1; - eth_test->flags |= ETH_TEST_FL_FAILED; - clear_bit(__IXGBE_TESTING, - &adapter->state); - goto skip_ol_tests; - } - } - } - - if (if_running) - /* indicate we're in test mode */ - dev_close(netdev); - else - ixgbe_reset(adapter); - - e_info(hw, "register testing starting\n"); - if (ixgbe_reg_test(adapter, &data[0])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - ixgbe_reset(adapter); - e_info(hw, "eeprom testing starting\n"); - if (ixgbe_eeprom_test(adapter, &data[1])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - ixgbe_reset(adapter); - e_info(hw, "interrupt testing starting\n"); - if (ixgbe_intr_test(adapter, &data[2])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - /* If SRIOV or VMDq is enabled then skip MAC - * loopback diagnostic. */ - if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | - IXGBE_FLAG_VMDQ_ENABLED)) { - e_info(hw, "Skip MAC loopback diagnostic in VT " - "mode\n"); - data[3] = 0; - goto skip_loopback; - } - - ixgbe_reset(adapter); - e_info(hw, "loopback testing starting\n"); - if (ixgbe_loopback_test(adapter, &data[3])) - eth_test->flags |= ETH_TEST_FL_FAILED; - -skip_loopback: - ixgbe_reset(adapter); - - clear_bit(__IXGBE_TESTING, &adapter->state); - if (if_running) - dev_open(netdev); - } else { - e_info(hw, "online testing starting\n"); - /* Online tests */ - if (ixgbe_link_test(adapter, &data[4])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - /* Online tests aren't run; pass by default */ - data[0] = 0; - data[1] = 0; - data[2] = 0; - data[3] = 0; - - clear_bit(__IXGBE_TESTING, &adapter->state); - } -skip_ol_tests: - msleep_interruptible(4 * 1000); -} - -static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, - struct ethtool_wolinfo *wol) -{ - struct ixgbe_hw *hw = &adapter->hw; - int retval = 1; - - /* WOL not supported except for the following */ - switch(hw->device_id) { - case IXGBE_DEV_ID_82599_SFP: - /* Only this subdevice supports WOL */ - if (hw->subsystem_device_id != IXGBE_SUBDEV_ID_82599_SFP) { - wol->supported = 0; - break; - } - retval = 0; - break; - case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: - /* All except this subdevice support WOL */ - if (hw->subsystem_device_id == - IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) { - wol->supported = 0; - break; - } - retval = 0; - break; - case IXGBE_DEV_ID_82599_KX4: - retval = 0; - break; - default: - wol->supported = 0; - } - - return retval; -} - -static void ixgbe_get_wol(struct net_device *netdev, - struct ethtool_wolinfo *wol) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - wol->supported = WAKE_UCAST | WAKE_MCAST | - WAKE_BCAST | WAKE_MAGIC; - wol->wolopts = 0; - - if (ixgbe_wol_exclusion(adapter, wol) || - !device_can_wakeup(&adapter->pdev->dev)) - return; - - if (adapter->wol & IXGBE_WUFC_EX) - wol->wolopts |= WAKE_UCAST; - if (adapter->wol & IXGBE_WUFC_MC) - wol->wolopts |= WAKE_MCAST; - if (adapter->wol & IXGBE_WUFC_BC) - wol->wolopts |= WAKE_BCAST; - if (adapter->wol & IXGBE_WUFC_MAG) - wol->wolopts |= WAKE_MAGIC; -} - -static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) - return -EOPNOTSUPP; - - if (ixgbe_wol_exclusion(adapter, wol)) - return wol->wolopts ? -EOPNOTSUPP : 0; - - adapter->wol = 0; - - if (wol->wolopts & WAKE_UCAST) - adapter->wol |= IXGBE_WUFC_EX; - if (wol->wolopts & WAKE_MCAST) - adapter->wol |= IXGBE_WUFC_MC; - if (wol->wolopts & WAKE_BCAST) - adapter->wol |= IXGBE_WUFC_BC; - if (wol->wolopts & WAKE_MAGIC) - adapter->wol |= IXGBE_WUFC_MAG; - - device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); - - return 0; -} - -static int ixgbe_nway_reset(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - if (netif_running(netdev)) - ixgbe_reinit_locked(adapter); - - return 0; -} - -static int ixgbe_set_phys_id(struct net_device *netdev, - enum ethtool_phys_id_state state) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - - switch (state) { - case ETHTOOL_ID_ACTIVE: - adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - return 2; - - case ETHTOOL_ID_ON: - hw->mac.ops.led_on(hw, IXGBE_LED_ON); - break; - - case ETHTOOL_ID_OFF: - hw->mac.ops.led_off(hw, IXGBE_LED_ON); - break; - - case ETHTOOL_ID_INACTIVE: - /* Restore LED settings */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg); - break; - } - - return 0; -} - -static int ixgbe_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit; - - /* only valid if in constant ITR mode */ - switch (adapter->rx_itr_setting) { - case 0: - /* throttling disabled */ - ec->rx_coalesce_usecs = 0; - break; - case 1: - /* dynamic ITR mode */ - ec->rx_coalesce_usecs = 1; - break; - default: - /* fixed interrupt rate mode */ - ec->rx_coalesce_usecs = 1000000/adapter->rx_eitr_param; - break; - } - - /* if in mixed tx/rx queues per vector mode, report only rx settings */ - if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) - return 0; - - /* only valid if in constant ITR mode */ - switch (adapter->tx_itr_setting) { - case 0: - /* throttling disabled */ - ec->tx_coalesce_usecs = 0; - break; - case 1: - /* dynamic ITR mode */ - ec->tx_coalesce_usecs = 1; - break; - default: - ec->tx_coalesce_usecs = 1000000/adapter->tx_eitr_param; - break; - } - - return 0; -} - -/* - * this function must be called before setting the new value of - * rx_itr_setting - */ -static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter, - struct ethtool_coalesce *ec) -{ - struct net_device *netdev = adapter->netdev; - - if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) - return false; - - /* if interrupt rate is too high then disable RSC */ - if (ec->rx_coalesce_usecs != 1 && - ec->rx_coalesce_usecs <= 1000000/IXGBE_MAX_RSC_INT_RATE) { - if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { - e_info(probe, "rx-usecs set too low, " - "disabling RSC\n"); - adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; - return true; - } - } else { - /* check the feature flag value and enable RSC if necessary */ - if ((netdev->features & NETIF_F_LRO) && - !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { - e_info(probe, "rx-usecs set to %d, " - "re-enabling RSC\n", - ec->rx_coalesce_usecs); - adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; - return true; - } - } - return false; -} - -static int ixgbe_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_q_vector *q_vector; - int i; - bool need_reset = false; - - /* don't accept tx specific changes if we've got mixed RxTx vectors */ - if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count - && ec->tx_coalesce_usecs) - return -EINVAL; - - if (ec->tx_max_coalesced_frames_irq) - adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq; - - if (ec->rx_coalesce_usecs > 1) { - /* check the limits */ - if ((1000000/ec->rx_coalesce_usecs > IXGBE_MAX_INT_RATE) || - (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE)) - return -EINVAL; - - /* check the old value and enable RSC if necessary */ - need_reset = ixgbe_update_rsc(adapter, ec); - - /* store the value in ints/second */ - adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs; - - /* static value of interrupt rate */ - adapter->rx_itr_setting = adapter->rx_eitr_param; - /* clear the lower bit as its used for dynamic state */ - adapter->rx_itr_setting &= ~1; - } else if (ec->rx_coalesce_usecs == 1) { - /* check the old value and enable RSC if necessary */ - need_reset = ixgbe_update_rsc(adapter, ec); - - /* 1 means dynamic mode */ - adapter->rx_eitr_param = 20000; - adapter->rx_itr_setting = 1; - } else { - /* check the old value and enable RSC if necessary */ - need_reset = ixgbe_update_rsc(adapter, ec); - /* - * any other value means disable eitr, which is best - * served by setting the interrupt rate very high - */ - adapter->rx_eitr_param = IXGBE_MAX_INT_RATE; - adapter->rx_itr_setting = 0; - } - - if (ec->tx_coalesce_usecs > 1) { - /* - * don't have to worry about max_int as above because - * tx vectors don't do hardware RSC (an rx function) - */ - /* check the limits */ - if ((1000000/ec->tx_coalesce_usecs > IXGBE_MAX_INT_RATE) || - (1000000/ec->tx_coalesce_usecs < IXGBE_MIN_INT_RATE)) - return -EINVAL; - - /* store the value in ints/second */ - adapter->tx_eitr_param = 1000000/ec->tx_coalesce_usecs; - - /* static value of interrupt rate */ - adapter->tx_itr_setting = adapter->tx_eitr_param; - - /* clear the lower bit as its used for dynamic state */ - adapter->tx_itr_setting &= ~1; - } else if (ec->tx_coalesce_usecs == 1) { - /* 1 means dynamic mode */ - adapter->tx_eitr_param = 10000; - adapter->tx_itr_setting = 1; - } else { - adapter->tx_eitr_param = IXGBE_MAX_INT_RATE; - adapter->tx_itr_setting = 0; - } - - /* MSI/MSIx Interrupt Mode */ - if (adapter->flags & - (IXGBE_FLAG_MSIX_ENABLED | IXGBE_FLAG_MSI_ENABLED)) { - int num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - for (i = 0; i < num_vectors; i++) { - q_vector = adapter->q_vector[i]; - if (q_vector->tx.count && !q_vector->rx.count) - /* tx only */ - q_vector->eitr = adapter->tx_eitr_param; - else - /* rx only or mixed */ - q_vector->eitr = adapter->rx_eitr_param; - q_vector->tx.work_limit = adapter->tx_work_limit; - ixgbe_write_eitr(q_vector); - } - /* Legacy Interrupt Mode */ - } else { - q_vector = adapter->q_vector[0]; - q_vector->eitr = adapter->rx_eitr_param; - q_vector->tx.work_limit = adapter->tx_work_limit; - ixgbe_write_eitr(q_vector); - } - - /* - * do reset here at the end to make sure EITR==0 case is handled - * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings - * also locks in RSC enable/disable which requires reset - */ - if (need_reset) - ixgbe_do_reset(netdev); - - return 0; -} - -static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter, - struct ethtool_rxnfc *cmd) -{ - union ixgbe_atr_input *mask = &adapter->fdir_mask; - struct ethtool_rx_flow_spec *fsp = - (struct ethtool_rx_flow_spec *)&cmd->fs; - struct hlist_node *node, *node2; - struct ixgbe_fdir_filter *rule = NULL; - - /* report total rule count */ - cmd->data = (1024 << adapter->fdir_pballoc) - 2; - - hlist_for_each_entry_safe(rule, node, node2, - &adapter->fdir_filter_list, fdir_node) { - if (fsp->location <= rule->sw_idx) - break; - } - - if (!rule || fsp->location != rule->sw_idx) - return -EINVAL; - - /* fill out the flow spec entry */ - - /* set flow type field */ - switch (rule->filter.formatted.flow_type) { - case IXGBE_ATR_FLOW_TYPE_TCPV4: - fsp->flow_type = TCP_V4_FLOW; - break; - case IXGBE_ATR_FLOW_TYPE_UDPV4: - fsp->flow_type = UDP_V4_FLOW; - break; - case IXGBE_ATR_FLOW_TYPE_SCTPV4: - fsp->flow_type = SCTP_V4_FLOW; - break; - case IXGBE_ATR_FLOW_TYPE_IPV4: - fsp->flow_type = IP_USER_FLOW; - fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; - fsp->h_u.usr_ip4_spec.proto = 0; - fsp->m_u.usr_ip4_spec.proto = 0; - break; - default: - return -EINVAL; - } - - fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port; - fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port; - fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port; - fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port; - fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0]; - fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0]; - fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0]; - fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0]; - fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id; - fsp->m_ext.vlan_tci = mask->formatted.vlan_id; - fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes; - fsp->m_ext.vlan_etype = mask->formatted.flex_bytes; - fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool); - fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool); - fsp->flow_type |= FLOW_EXT; - - /* record action */ - if (rule->action == IXGBE_FDIR_DROP_QUEUE) - fsp->ring_cookie = RX_CLS_FLOW_DISC; - else - fsp->ring_cookie = rule->action; - - return 0; -} - -static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter, - struct ethtool_rxnfc *cmd, - u32 *rule_locs) -{ - struct hlist_node *node, *node2; - struct ixgbe_fdir_filter *rule; - int cnt = 0; - - /* report total rule count */ - cmd->data = (1024 << adapter->fdir_pballoc) - 2; - - hlist_for_each_entry_safe(rule, node, node2, - &adapter->fdir_filter_list, fdir_node) { - if (cnt == cmd->rule_cnt) - return -EMSGSIZE; - rule_locs[cnt] = rule->sw_idx; - cnt++; - } - - return 0; -} - -static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, - void *rule_locs) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - int ret = -EOPNOTSUPP; - - switch (cmd->cmd) { - case ETHTOOL_GRXRINGS: - cmd->data = adapter->num_rx_queues; - ret = 0; - break; - case ETHTOOL_GRXCLSRLCNT: - cmd->rule_cnt = adapter->fdir_filter_count; - ret = 0; - break; - case ETHTOOL_GRXCLSRULE: - ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd); - break; - case ETHTOOL_GRXCLSRLALL: - ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, - (u32 *)rule_locs); - break; - default: - break; - } - - return ret; -} - -static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, - struct ixgbe_fdir_filter *input, - u16 sw_idx) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct hlist_node *node, *node2, *parent; - struct ixgbe_fdir_filter *rule; - int err = -EINVAL; - - parent = NULL; - rule = NULL; - - hlist_for_each_entry_safe(rule, node, node2, - &adapter->fdir_filter_list, fdir_node) { - /* hash found, or no matching entry */ - if (rule->sw_idx >= sw_idx) - break; - parent = node; - } - - /* if there is an old rule occupying our place remove it */ - if (rule && (rule->sw_idx == sw_idx)) { - if (!input || (rule->filter.formatted.bkt_hash != - input->filter.formatted.bkt_hash)) { - err = ixgbe_fdir_erase_perfect_filter_82599(hw, - &rule->filter, - sw_idx); - } - - hlist_del(&rule->fdir_node); - kfree(rule); - adapter->fdir_filter_count--; - } - - /* - * If no input this was a delete, err should be 0 if a rule was - * successfully found and removed from the list else -EINVAL - */ - if (!input) - return err; - - /* initialize node and set software index */ - INIT_HLIST_NODE(&input->fdir_node); - - /* add filter to the list */ - if (parent) - hlist_add_after(parent, &input->fdir_node); - else - hlist_add_head(&input->fdir_node, - &adapter->fdir_filter_list); - - /* update counts */ - adapter->fdir_filter_count++; - - return 0; -} - -static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp, - u8 *flow_type) -{ - switch (fsp->flow_type & ~FLOW_EXT) { - case TCP_V4_FLOW: - *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; - break; - case UDP_V4_FLOW: - *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; - break; - case SCTP_V4_FLOW: - *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; - break; - case IP_USER_FLOW: - switch (fsp->h_u.usr_ip4_spec.proto) { - case IPPROTO_TCP: - *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; - break; - case IPPROTO_UDP: - *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; - break; - case IPPROTO_SCTP: - *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; - break; - case 0: - if (!fsp->m_u.usr_ip4_spec.proto) { - *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4; - break; - } - default: - return 0; - } - break; - default: - return 0; - } - - return 1; -} - -static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, - struct ethtool_rxnfc *cmd) -{ - struct ethtool_rx_flow_spec *fsp = - (struct ethtool_rx_flow_spec *)&cmd->fs; - struct ixgbe_hw *hw = &adapter->hw; - struct ixgbe_fdir_filter *input; - union ixgbe_atr_input mask; - int err; - - if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) - return -EOPNOTSUPP; - - /* - * Don't allow programming if the action is a queue greater than - * the number of online Rx queues. - */ - if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && - (fsp->ring_cookie >= adapter->num_rx_queues)) - return -EINVAL; - - /* Don't allow indexes to exist outside of available space */ - if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) { - e_err(drv, "Location out of range\n"); - return -EINVAL; - } - - input = kzalloc(sizeof(*input), GFP_ATOMIC); - if (!input) - return -ENOMEM; - - memset(&mask, 0, sizeof(union ixgbe_atr_input)); - - /* set SW index */ - input->sw_idx = fsp->location; - - /* record flow type */ - if (!ixgbe_flowspec_to_flow_type(fsp, - &input->filter.formatted.flow_type)) { - e_err(drv, "Unrecognized flow type\n"); - goto err_out; - } - - mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | - IXGBE_ATR_L4TYPE_MASK; - - if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) - mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; - - /* Copy input into formatted structures */ - input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; - mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src; - input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; - mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst; - input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc; - mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc; - input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst; - mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst; - - if (fsp->flow_type & FLOW_EXT) { - input->filter.formatted.vm_pool = - (unsigned char)ntohl(fsp->h_ext.data[1]); - mask.formatted.vm_pool = - (unsigned char)ntohl(fsp->m_ext.data[1]); - input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci; - mask.formatted.vlan_id = fsp->m_ext.vlan_tci; - input->filter.formatted.flex_bytes = - fsp->h_ext.vlan_etype; - mask.formatted.flex_bytes = fsp->m_ext.vlan_etype; - } - - /* determine if we need to drop or route the packet */ - if (fsp->ring_cookie == RX_CLS_FLOW_DISC) - input->action = IXGBE_FDIR_DROP_QUEUE; - else - input->action = fsp->ring_cookie; - - spin_lock(&adapter->fdir_perfect_lock); - - if (hlist_empty(&adapter->fdir_filter_list)) { - /* save mask and program input mask into HW */ - memcpy(&adapter->fdir_mask, &mask, sizeof(mask)); - err = ixgbe_fdir_set_input_mask_82599(hw, &mask); - if (err) { - e_err(drv, "Error writing mask\n"); - goto err_out_w_lock; - } - } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) { - e_err(drv, "Only one mask supported per port\n"); - goto err_out_w_lock; - } - - /* apply mask and compute/store hash */ - ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask); - - /* program filters to filter memory */ - err = ixgbe_fdir_write_perfect_filter_82599(hw, - &input->filter, input->sw_idx, - (input->action == IXGBE_FDIR_DROP_QUEUE) ? - IXGBE_FDIR_DROP_QUEUE : - adapter->rx_ring[input->action]->reg_idx); - if (err) - goto err_out_w_lock; - - ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); - - spin_unlock(&adapter->fdir_perfect_lock); - - return err; -err_out_w_lock: - spin_unlock(&adapter->fdir_perfect_lock); -err_out: - kfree(input); - return -EINVAL; -} - -static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter, - struct ethtool_rxnfc *cmd) -{ - struct ethtool_rx_flow_spec *fsp = - (struct ethtool_rx_flow_spec *)&cmd->fs; - int err; - - spin_lock(&adapter->fdir_perfect_lock); - err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location); - spin_unlock(&adapter->fdir_perfect_lock); - - return err; -} - -static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - int ret = -EOPNOTSUPP; - - switch (cmd->cmd) { - case ETHTOOL_SRXCLSRLINS: - ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd); - break; - case ETHTOOL_SRXCLSRLDEL: - ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd); - break; - default: - break; - } - - return ret; -} - -static const struct ethtool_ops ixgbe_ethtool_ops = { - .get_settings = ixgbe_get_settings, - .set_settings = ixgbe_set_settings, - .get_drvinfo = ixgbe_get_drvinfo, - .get_regs_len = ixgbe_get_regs_len, - .get_regs = ixgbe_get_regs, - .get_wol = ixgbe_get_wol, - .set_wol = ixgbe_set_wol, - .nway_reset = ixgbe_nway_reset, - .get_link = ethtool_op_get_link, - .get_eeprom_len = ixgbe_get_eeprom_len, - .get_eeprom = ixgbe_get_eeprom, - .get_ringparam = ixgbe_get_ringparam, - .set_ringparam = ixgbe_set_ringparam, - .get_pauseparam = ixgbe_get_pauseparam, - .set_pauseparam = ixgbe_set_pauseparam, - .get_msglevel = ixgbe_get_msglevel, - .set_msglevel = ixgbe_set_msglevel, - .self_test = ixgbe_diag_test, - .get_strings = ixgbe_get_strings, - .set_phys_id = ixgbe_set_phys_id, - .get_sset_count = ixgbe_get_sset_count, - .get_ethtool_stats = ixgbe_get_ethtool_stats, - .get_coalesce = ixgbe_get_coalesce, - .set_coalesce = ixgbe_set_coalesce, - .get_rxnfc = ixgbe_get_rxnfc, - .set_rxnfc = ixgbe_set_rxnfc, -}; - -void ixgbe_set_ethtool_ops(struct net_device *netdev) -{ - SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops); -} diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c deleted file mode 100644 index 824edae..0000000 --- a/drivers/net/ixgbe/ixgbe_fcoe.c +++ /dev/null @@ -1,836 +0,0 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include "ixgbe.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/** - * ixgbe_fcoe_clear_ddp - clear the given ddp context - * @ddp - ptr to the ixgbe_fcoe_ddp - * - * Returns : none - * - */ -static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp) -{ - ddp->len = 0; - ddp->err = 1; - ddp->udl = NULL; - ddp->udp = 0UL; - ddp->sgl = NULL; - ddp->sgc = 0; -} - -/** - * ixgbe_fcoe_ddp_put - free the ddp context for a given xid - * @netdev: the corresponding net_device - * @xid: the xid that corresponding ddp will be freed - * - * This is the implementation of net_device_ops.ndo_fcoe_ddp_done - * and it is expected to be called by ULD, i.e., FCP layer of libfc - * to release the corresponding ddp context when the I/O is done. - * - * Returns : data length already ddp-ed in bytes - */ -int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) -{ - int len = 0; - struct ixgbe_fcoe *fcoe; - struct ixgbe_adapter *adapter; - struct ixgbe_fcoe_ddp *ddp; - u32 fcbuff; - - if (!netdev) - goto out_ddp_put; - - if (xid >= IXGBE_FCOE_DDP_MAX) - goto out_ddp_put; - - adapter = netdev_priv(netdev); - fcoe = &adapter->fcoe; - ddp = &fcoe->ddp[xid]; - if (!ddp->udl) - goto out_ddp_put; - - len = ddp->len; - /* if there an error, force to invalidate ddp context */ - if (ddp->err) { - spin_lock_bh(&fcoe->lock); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLT, 0); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLTRW, - (xid | IXGBE_FCFLTRW_WE)); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, - (xid | IXGBE_FCDMARW_WE)); - - /* guaranteed to be invalidated after 100us */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, - (xid | IXGBE_FCDMARW_RE)); - fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF); - spin_unlock_bh(&fcoe->lock); - if (fcbuff & IXGBE_FCBUFF_VALID) - udelay(100); - } - if (ddp->sgl) - pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc, - DMA_FROM_DEVICE); - if (ddp->pool) { - pci_pool_free(ddp->pool, ddp->udl, ddp->udp); - ddp->pool = NULL; - } - - ixgbe_fcoe_clear_ddp(ddp); - -out_ddp_put: - return len; -} - -/** - * ixgbe_fcoe_ddp_setup - called to set up ddp context - * @netdev: the corresponding net_device - * @xid: the exchange id requesting ddp - * @sgl: the scatter-gather list for this request - * @sgc: the number of scatter-gather items - * - * Returns : 1 for success and 0 for no ddp - */ -static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, - struct scatterlist *sgl, unsigned int sgc, - int target_mode) -{ - struct ixgbe_adapter *adapter; - struct ixgbe_hw *hw; - struct ixgbe_fcoe *fcoe; - struct ixgbe_fcoe_ddp *ddp; - struct scatterlist *sg; - unsigned int i, j, dmacount; - unsigned int len; - static const unsigned int bufflen = IXGBE_FCBUFF_MIN; - unsigned int firstoff = 0; - unsigned int lastsize; - unsigned int thisoff = 0; - unsigned int thislen = 0; - u32 fcbuff, fcdmarw, fcfltrw, fcrxctl; - dma_addr_t addr = 0; - struct pci_pool *pool; - - if (!netdev || !sgl) - return 0; - - adapter = netdev_priv(netdev); - if (xid >= IXGBE_FCOE_DDP_MAX) { - e_warn(drv, "xid=0x%x out-of-range\n", xid); - return 0; - } - - /* no DDP if we are already down or resetting */ - if (test_bit(__IXGBE_DOWN, &adapter->state) || - test_bit(__IXGBE_RESETTING, &adapter->state)) - return 0; - - fcoe = &adapter->fcoe; - if (!fcoe->pool) { - e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); - return 0; - } - - ddp = &fcoe->ddp[xid]; - if (ddp->sgl) { - e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n", - xid, ddp->sgl, ddp->sgc); - return 0; - } - ixgbe_fcoe_clear_ddp(ddp); - - /* setup dma from scsi command sgl */ - dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE); - if (dmacount == 0) { - e_err(drv, "xid 0x%x DMA map error\n", xid); - return 0; - } - - /* alloc the udl from per cpu ddp pool */ - pool = *per_cpu_ptr(fcoe->pool, get_cpu()); - ddp->udl = pci_pool_alloc(pool, GFP_ATOMIC, &ddp->udp); - if (!ddp->udl) { - e_err(drv, "failed allocated ddp context\n"); - goto out_noddp_unmap; - } - ddp->pool = pool; - ddp->sgl = sgl; - ddp->sgc = sgc; - - j = 0; - for_each_sg(sgl, sg, dmacount, i) { - addr = sg_dma_address(sg); - len = sg_dma_len(sg); - while (len) { - /* max number of buffers allowed in one DDP context */ - if (j >= IXGBE_BUFFCNT_MAX) { - e_err(drv, "xid=%x:%d,%d,%d:addr=%llx " - "not enough descriptors\n", - xid, i, j, dmacount, (u64)addr); - goto out_noddp_free; - } - - /* get the offset of length of current buffer */ - thisoff = addr & ((dma_addr_t)bufflen - 1); - thislen = min((bufflen - thisoff), len); - /* - * all but the 1st buffer (j == 0) - * must be aligned on bufflen - */ - if ((j != 0) && (thisoff)) - goto out_noddp_free; - /* - * all but the last buffer - * ((i == (dmacount - 1)) && (thislen == len)) - * must end at bufflen - */ - if (((i != (dmacount - 1)) || (thislen != len)) - && ((thislen + thisoff) != bufflen)) - goto out_noddp_free; - - ddp->udl[j] = (u64)(addr - thisoff); - /* only the first buffer may have none-zero offset */ - if (j == 0) - firstoff = thisoff; - len -= thislen; - addr += thislen; - j++; - } - } - /* only the last buffer may have non-full bufflen */ - lastsize = thisoff + thislen; - - /* - * lastsize can not be buffer len. - * If it is then adding another buffer with lastsize = 1. - */ - if (lastsize == bufflen) { - if (j >= IXGBE_BUFFCNT_MAX) { - e_err(drv, "xid=%x:%d,%d,%d:addr=%llx " - "not enough user buffers. We need an extra " - "buffer because lastsize is bufflen.\n", - xid, i, j, dmacount, (u64)addr); - goto out_noddp_free; - } - - ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma); - j++; - lastsize = 1; - } - put_cpu(); - - fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); - fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); - fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); - /* Set WRCONTX bit to allow DDP for target */ - if (target_mode) - fcbuff |= (IXGBE_FCBUFF_WRCONTX); - fcbuff |= (IXGBE_FCBUFF_VALID); - - fcdmarw = xid; - fcdmarw |= IXGBE_FCDMARW_WE; - fcdmarw |= (lastsize << IXGBE_FCDMARW_LASTSIZE_SHIFT); - - fcfltrw = xid; - fcfltrw |= IXGBE_FCFLTRW_WE; - - /* program DMA context */ - hw = &adapter->hw; - spin_lock_bh(&fcoe->lock); - - /* turn on last frame indication for target mode as FCP_RSPtarget is - * supposed to send FCP_RSP when it is done. */ - if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) { - set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode); - fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL); - fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH; - IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl); - } - - IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); - IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); - IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); - IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw); - /* program filter context */ - IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); - IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); - IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); - - spin_unlock_bh(&fcoe->lock); - - return 1; - -out_noddp_free: - pci_pool_free(pool, ddp->udl, ddp->udp); - ixgbe_fcoe_clear_ddp(ddp); - -out_noddp_unmap: - pci_unmap_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE); - put_cpu(); - return 0; -} - -/** - * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode - * @netdev: the corresponding net_device - * @xid: the exchange id requesting ddp - * @sgl: the scatter-gather list for this request - * @sgc: the number of scatter-gather items - * - * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup - * and is expected to be called from ULD, e.g., FCP layer of libfc - * to set up ddp for the corresponding xid of the given sglist for - * the corresponding I/O. - * - * Returns : 1 for success and 0 for no ddp - */ -int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, - struct scatterlist *sgl, unsigned int sgc) -{ - return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0); -} - -/** - * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode - * @netdev: the corresponding net_device - * @xid: the exchange id requesting ddp - * @sgl: the scatter-gather list for this request - * @sgc: the number of scatter-gather items - * - * This is the implementation of net_device_ops.ndo_fcoe_ddp_target - * and is expected to be called from ULD, e.g., FCP layer of libfc - * to set up ddp for the corresponding xid of the given sglist for - * the corresponding I/O. The DDP in target mode is a write I/O request - * from the initiator. - * - * Returns : 1 for success and 0 for no ddp - */ -int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, - struct scatterlist *sgl, unsigned int sgc) -{ - return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1); -} - -/** - * ixgbe_fcoe_ddp - check ddp status and mark it done - * @adapter: ixgbe adapter - * @rx_desc: advanced rx descriptor - * @skb: the skb holding the received data - * - * This checks ddp status. - * - * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates - * not passing the skb to ULD, > 0 indicates is the length of data - * being ddped. - */ -int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb, - u32 staterr) -{ - u16 xid; - u32 fctl; - u32 fceofe, fcerr, fcstat; - int rc = -EINVAL; - struct ixgbe_fcoe *fcoe; - struct ixgbe_fcoe_ddp *ddp; - struct fc_frame_header *fh; - struct fcoe_crc_eof *crc; - - fcerr = (staterr & IXGBE_RXDADV_ERR_FCERR); - fceofe = (staterr & IXGBE_RXDADV_ERR_FCEOFE); - if (fcerr == IXGBE_FCERR_BADCRC) - skb_checksum_none_assert(skb); - else - skb->ip_summed = CHECKSUM_UNNECESSARY; - - if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)) - fh = (struct fc_frame_header *)(skb->data + - sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr)); - else - fh = (struct fc_frame_header *)(skb->data + - sizeof(struct fcoe_hdr)); - fctl = ntoh24(fh->fh_f_ctl); - if (fctl & FC_FC_EX_CTX) - xid = be16_to_cpu(fh->fh_ox_id); - else - xid = be16_to_cpu(fh->fh_rx_id); - - if (xid >= IXGBE_FCOE_DDP_MAX) - goto ddp_out; - - fcoe = &adapter->fcoe; - ddp = &fcoe->ddp[xid]; - if (!ddp->udl) - goto ddp_out; - - if (fcerr | fceofe) - goto ddp_out; - - fcstat = (staterr & IXGBE_RXDADV_STAT_FCSTAT); - if (fcstat) { - /* update length of DDPed data */ - ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); - /* unmap the sg list when FCP_RSP is received */ - if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_FCPRSP) { - pci_unmap_sg(adapter->pdev, ddp->sgl, - ddp->sgc, DMA_FROM_DEVICE); - ddp->err = (fcerr | fceofe); - ddp->sgl = NULL; - ddp->sgc = 0; - } - /* return 0 to bypass going to ULD for DDPed data */ - if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_DDP) - rc = 0; - else if (ddp->len) - rc = ddp->len; - } - /* In target mode, check the last data frame of the sequence. - * For DDP in target mode, data is already DDPed but the header - * indication of the last data frame ould allow is to tell if we - * got all the data and the ULP can send FCP_RSP back, as this is - * not a full fcoe frame, we fill the trailer here so it won't be - * dropped by the ULP stack. - */ - if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) && - (fctl & FC_FC_END_SEQ)) { - crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc)); - crc->fcoe_eof = FC_EOF_T; - } -ddp_out: - return rc; -} - -/** - * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO) - * @adapter: ixgbe adapter - * @tx_ring: tx desc ring - * @skb: associated skb - * @tx_flags: tx flags - * @hdr_len: hdr_len to be returned - * - * This sets up large send offload for FCoE - * - * Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error - */ -int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, - u32 tx_flags, u8 *hdr_len) -{ - struct fc_frame_header *fh; - u32 vlan_macip_lens; - u32 fcoe_sof_eof = 0; - u32 mss_l4len_idx; - u8 sof, eof; - - if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) { - dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n", - skb_shinfo(skb)->gso_type); - return -EINVAL; - } - - /* resets the header to point fcoe/fc */ - skb_set_network_header(skb, skb->mac_len); - skb_set_transport_header(skb, skb->mac_len + - sizeof(struct fcoe_hdr)); - - /* sets up SOF and ORIS */ - sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof; - switch (sof) { - case FC_SOF_I2: - fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS; - break; - case FC_SOF_I3: - fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF | - IXGBE_ADVTXD_FCOEF_ORIS; - break; - case FC_SOF_N2: - break; - case FC_SOF_N3: - fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF; - break; - default: - dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof); - return -EINVAL; - } - - /* the first byte of the last dword is EOF */ - skb_copy_bits(skb, skb->len - 4, &eof, 1); - /* sets up EOF and ORIE */ - switch (eof) { - case FC_EOF_N: - fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N; - break; - case FC_EOF_T: - /* lso needs ORIE */ - if (skb_is_gso(skb)) - fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N | - IXGBE_ADVTXD_FCOEF_ORIE; - else - fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T; - break; - case FC_EOF_NI: - fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI; - break; - case FC_EOF_A: - fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A; - break; - default: - dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof); - return -EINVAL; - } - - /* sets up PARINC indicating data offset */ - fh = (struct fc_frame_header *)skb_transport_header(skb); - if (fh->fh_f_ctl[2] & FC_FC_REL_OFF) - fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC; - - /* include trailer in headlen as it is replicated per frame */ - *hdr_len = sizeof(struct fcoe_crc_eof); - - /* hdr_len includes fc_hdr if FCoE LSO is enabled */ - if (skb_is_gso(skb)) - *hdr_len += (skb_transport_offset(skb) + - sizeof(struct fc_frame_header)); - - /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */ - mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; - mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; - - /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ - vlan_macip_lens = skb_transport_offset(skb) + - sizeof(struct fc_frame_header); - vlan_macip_lens |= (skb_transport_offset(skb) - 4) - << IXGBE_ADVTXD_MACLEN_SHIFT; - vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; - - /* write context desc */ - ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof, - IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx); - - return skb_is_gso(skb); -} - -static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe) -{ - unsigned int cpu; - struct pci_pool **pool; - - for_each_possible_cpu(cpu) { - pool = per_cpu_ptr(fcoe->pool, cpu); - if (*pool) - pci_pool_destroy(*pool); - } - free_percpu(fcoe->pool); - fcoe->pool = NULL; -} - -static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter) -{ - struct ixgbe_fcoe *fcoe = &adapter->fcoe; - unsigned int cpu; - struct pci_pool **pool; - char pool_name[32]; - - fcoe->pool = alloc_percpu(struct pci_pool *); - if (!fcoe->pool) - return; - - /* allocate pci pool for each cpu */ - for_each_possible_cpu(cpu) { - snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu); - pool = per_cpu_ptr(fcoe->pool, cpu); - *pool = pci_pool_create(pool_name, - adapter->pdev, IXGBE_FCPTR_MAX, - IXGBE_FCPTR_ALIGN, PAGE_SIZE); - if (!*pool) { - e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu); - ixgbe_fcoe_ddp_pools_free(fcoe); - return; - } - } -} - -/** - * ixgbe_configure_fcoe - configures registers for fcoe at start - * @adapter: ptr to ixgbe adapter - * - * This sets up FCoE related registers - * - * Returns : none - */ -void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) -{ - int i, fcoe_q, fcoe_i; - struct ixgbe_hw *hw = &adapter->hw; - struct ixgbe_fcoe *fcoe = &adapter->fcoe; - struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; - - if (!fcoe->pool) { - spin_lock_init(&fcoe->lock); - - ixgbe_fcoe_ddp_pools_alloc(adapter); - if (!fcoe->pool) { - e_err(drv, "failed to alloc percpu fcoe DDP pools\n"); - return; - } - - /* Extra buffer to be shared by all DDPs for HW work around */ - fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); - if (fcoe->extra_ddp_buffer == NULL) { - e_err(drv, "failed to allocated extra DDP buffer\n"); - goto out_ddp_pools; - } - - fcoe->extra_ddp_buffer_dma = - dma_map_single(&adapter->pdev->dev, - fcoe->extra_ddp_buffer, - IXGBE_FCBUFF_MIN, - DMA_FROM_DEVICE); - if (dma_mapping_error(&adapter->pdev->dev, - fcoe->extra_ddp_buffer_dma)) { - e_err(drv, "failed to map extra DDP buffer\n"); - goto out_extra_ddp_buffer; - } - } - - /* Enable L2 eth type filter for FCoE */ - IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), - (ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN)); - /* Enable L2 eth type filter for FIP */ - IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), - (ETH_P_FIP | IXGBE_ETQF_FILTER_EN)); - if (adapter->ring_feature[RING_F_FCOE].indices) { - /* Use multiple rx queues for FCoE by redirection table */ - for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { - fcoe_i = f->mask + i % f->indices; - fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; - fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; - IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); - } - IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); - IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); - } else { - /* Use single rx queue for FCoE */ - fcoe_i = f->mask; - fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; - IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0); - IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), - IXGBE_ETQS_QUEUE_EN | - (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); - } - /* send FIP frames to the first FCoE queue */ - fcoe_i = f->mask; - fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; - IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), - IXGBE_ETQS_QUEUE_EN | - (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); - - IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, - IXGBE_FCRXCTRL_FCOELLI | - IXGBE_FCRXCTRL_FCCRCBO | - (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT)); - return; - -out_extra_ddp_buffer: - kfree(fcoe->extra_ddp_buffer); -out_ddp_pools: - ixgbe_fcoe_ddp_pools_free(fcoe); -} - -/** - * ixgbe_cleanup_fcoe - release all fcoe ddp context resources - * @adapter : ixgbe adapter - * - * Cleans up outstanding ddp context resources - * - * Returns : none - */ -void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter) -{ - int i; - struct ixgbe_fcoe *fcoe = &adapter->fcoe; - - if (!fcoe->pool) - return; - - for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) - ixgbe_fcoe_ddp_put(adapter->netdev, i); - dma_unmap_single(&adapter->pdev->dev, - fcoe->extra_ddp_buffer_dma, - IXGBE_FCBUFF_MIN, - DMA_FROM_DEVICE); - kfree(fcoe->extra_ddp_buffer); - ixgbe_fcoe_ddp_pools_free(fcoe); -} - -/** - * ixgbe_fcoe_enable - turn on FCoE offload feature - * @netdev: the corresponding netdev - * - * Turns on FCoE offload feature in 82599. - * - * Returns : 0 indicates success or -EINVAL on failure - */ -int ixgbe_fcoe_enable(struct net_device *netdev) -{ - int rc = -EINVAL; - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_fcoe *fcoe = &adapter->fcoe; - - - if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) - goto out_enable; - - atomic_inc(&fcoe->refcnt); - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) - goto out_enable; - - e_info(drv, "Enabling FCoE offload features.\n"); - if (netif_running(netdev)) - netdev->netdev_ops->ndo_stop(netdev); - - ixgbe_clear_interrupt_scheme(adapter); - - adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; - adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE; - netdev->features |= NETIF_F_FCOE_CRC; - netdev->features |= NETIF_F_FSO; - netdev->features |= NETIF_F_FCOE_MTU; - netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; - - ixgbe_init_interrupt_scheme(adapter); - netdev_features_change(netdev); - - if (netif_running(netdev)) - netdev->netdev_ops->ndo_open(netdev); - rc = 0; - -out_enable: - return rc; -} - -/** - * ixgbe_fcoe_disable - turn off FCoE offload feature - * @netdev: the corresponding netdev - * - * Turns off FCoE offload feature in 82599. - * - * Returns : 0 indicates success or -EINVAL on failure - */ -int ixgbe_fcoe_disable(struct net_device *netdev) -{ - int rc = -EINVAL; - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_fcoe *fcoe = &adapter->fcoe; - - if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) - goto out_disable; - - if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) - goto out_disable; - - if (!atomic_dec_and_test(&fcoe->refcnt)) - goto out_disable; - - e_info(drv, "Disabling FCoE offload features.\n"); - netdev->features &= ~NETIF_F_FCOE_CRC; - netdev->features &= ~NETIF_F_FSO; - netdev->features &= ~NETIF_F_FCOE_MTU; - netdev->fcoe_ddp_xid = 0; - netdev_features_change(netdev); - - if (netif_running(netdev)) - netdev->netdev_ops->ndo_stop(netdev); - - ixgbe_clear_interrupt_scheme(adapter); - adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; - adapter->ring_feature[RING_F_FCOE].indices = 0; - ixgbe_cleanup_fcoe(adapter); - ixgbe_init_interrupt_scheme(adapter); - - if (netif_running(netdev)) - netdev->netdev_ops->ndo_open(netdev); - rc = 0; - -out_disable: - return rc; -} - -/** - * ixgbe_fcoe_get_wwn - get world wide name for the node or the port - * @netdev : ixgbe adapter - * @wwn : the world wide name - * @type: the type of world wide name - * - * Returns the node or port world wide name if both the prefix and the san - * mac address are valid, then the wwn is formed based on the NAA-2 for - * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3). - * - * Returns : 0 on success - */ -int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) -{ - int rc = -EINVAL; - u16 prefix = 0xffff; - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_mac_info *mac = &adapter->hw.mac; - - switch (type) { - case NETDEV_FCOE_WWNN: - prefix = mac->wwnn_prefix; - break; - case NETDEV_FCOE_WWPN: - prefix = mac->wwpn_prefix; - break; - default: - break; - } - - if ((prefix != 0xffff) && - is_valid_ether_addr(mac->san_addr)) { - *wwn = ((u64) prefix << 48) | - ((u64) mac->san_addr[0] << 40) | - ((u64) mac->san_addr[1] << 32) | - ((u64) mac->san_addr[2] << 24) | - ((u64) mac->san_addr[3] << 16) | - ((u64) mac->san_addr[4] << 8) | - ((u64) mac->san_addr[5]); - rc = 0; - } - return rc; -} diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h deleted file mode 100644 index 99de145..0000000 --- a/drivers/net/ixgbe/ixgbe_fcoe.h +++ /dev/null @@ -1,81 +0,0 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGBE_FCOE_H -#define _IXGBE_FCOE_H - -#include -#include - -/* shift bits within STAT fo FCSTAT */ -#define IXGBE_RXDADV_FCSTAT_SHIFT 4 - -/* ddp user buffer */ -#define IXGBE_BUFFCNT_MAX 256 /* 8 bits bufcnt */ -#define IXGBE_FCPTR_ALIGN 16 -#define IXGBE_FCPTR_MAX (IXGBE_BUFFCNT_MAX * sizeof(dma_addr_t)) -#define IXGBE_FCBUFF_4KB 0x0 -#define IXGBE_FCBUFF_8KB 0x1 -#define IXGBE_FCBUFF_16KB 0x2 -#define IXGBE_FCBUFF_64KB 0x3 -#define IXGBE_FCBUFF_MAX 65536 /* 64KB max */ -#define IXGBE_FCBUFF_MIN 4096 /* 4KB min */ -#define IXGBE_FCOE_DDP_MAX 512 /* 9 bits xid */ - -/* Default traffic class to use for FCoE */ -#define IXGBE_FCOE_DEFTC 3 - -/* fcerr */ -#define IXGBE_FCERR_BADCRC 0x00100000 - -/* FCoE DDP for target mode */ -#define __IXGBE_FCOE_TARGET 1 - -struct ixgbe_fcoe_ddp { - int len; - u32 err; - unsigned int sgc; - struct scatterlist *sgl; - dma_addr_t udp; - u64 *udl; - struct pci_pool *pool; -}; - -struct ixgbe_fcoe { - struct pci_pool **pool; - atomic_t refcnt; - spinlock_t lock; - struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; - unsigned char *extra_ddp_buffer; - dma_addr_t extra_ddp_buffer_dma; - unsigned long mode; -#ifdef CONFIG_IXGBE_DCB - u8 up; -#endif -}; - -#endif /* _IXGBE_FCOE_H */ diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c deleted file mode 100644 index e86297b..0000000 --- a/drivers/net/ixgbe/ixgbe_main.c +++ /dev/null @@ -1,7934 +0,0 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ixgbe.h" -#include "ixgbe_common.h" -#include "ixgbe_dcb_82599.h" -#include "ixgbe_sriov.h" - -char ixgbe_driver_name[] = "ixgbe"; -static const char ixgbe_driver_string[] = - "Intel(R) 10 Gigabit PCI Express Network Driver"; -#define MAJ 3 -#define MIN 4 -#define BUILD 8 -#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ - __stringify(BUILD) "-k" -const char ixgbe_driver_version[] = DRV_VERSION; -static const char ixgbe_copyright[] = - "Copyright (c) 1999-2011 Intel Corporation."; - -static const struct ixgbe_info *ixgbe_info_tbl[] = { - [board_82598] = &ixgbe_82598_info, - [board_82599] = &ixgbe_82599_info, - [board_X540] = &ixgbe_X540_info, -}; - -/* ixgbe_pci_tbl - PCI Device ID Table - * - * Wildcard entries (PCI_ANY_ID) should come last - * Last entry must be all 0s - * - * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, - * Class, Class Mask, private data (not used) } - */ -static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), - board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), - board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), - board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), - board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), - board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), - board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), - board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), - board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), - board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), - board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), - board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), - board_82598 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), - board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), - board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), - board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), - board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), - board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), - board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), - board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), - board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), - board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), - board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), - board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), - board_X540 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), - board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), - board_82599 }, - - /* required last entry */ - {0, } -}; -MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); - -#ifdef CONFIG_IXGBE_DCA -static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, - void *p); -static struct notifier_block dca_notifier = { - .notifier_call = ixgbe_notify_dca, - .next = NULL, - .priority = 0 -}; -#endif - -#ifdef CONFIG_PCI_IOV -static unsigned int max_vfs; -module_param(max_vfs, uint, 0); -MODULE_PARM_DESC(max_vfs, - "Maximum number of virtual functions to allocate per physical function"); -#endif /* CONFIG_PCI_IOV */ - -MODULE_AUTHOR("Intel Corporation, "); -MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_VERSION); - -#define DEFAULT_DEBUG_LEVEL_SHIFT 3 - -static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 gcr; - u32 gpie; - u32 vmdctl; - -#ifdef CONFIG_PCI_IOV - /* disable iov and allow time for transactions to clear */ - pci_disable_sriov(adapter->pdev); -#endif - - /* turn off device IOV mode */ - gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); - gcr &= ~(IXGBE_GCR_EXT_SRIOV); - IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr); - gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); - gpie &= ~IXGBE_GPIE_VTMODE_MASK; - IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); - - /* set default pool back to 0 */ - vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); - vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; - IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); - IXGBE_WRITE_FLUSH(hw); - - /* take a breather then clean up driver data */ - msleep(100); - - kfree(adapter->vfinfo); - adapter->vfinfo = NULL; - - adapter->num_vfs = 0; - adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; -} - -static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter) -{ - if (!test_bit(__IXGBE_DOWN, &adapter->state) && - !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state)) - schedule_work(&adapter->service_task); -} - -static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) -{ - BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); - - /* flush memory to make sure state is correct before next watchog */ - smp_mb__before_clear_bit(); - clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); -} - -struct ixgbe_reg_info { - u32 ofs; - char *name; -}; - -static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { - - /* General Registers */ - {IXGBE_CTRL, "CTRL"}, - {IXGBE_STATUS, "STATUS"}, - {IXGBE_CTRL_EXT, "CTRL_EXT"}, - - /* Interrupt Registers */ - {IXGBE_EICR, "EICR"}, - - /* RX Registers */ - {IXGBE_SRRCTL(0), "SRRCTL"}, - {IXGBE_DCA_RXCTRL(0), "DRXCTL"}, - {IXGBE_RDLEN(0), "RDLEN"}, - {IXGBE_RDH(0), "RDH"}, - {IXGBE_RDT(0), "RDT"}, - {IXGBE_RXDCTL(0), "RXDCTL"}, - {IXGBE_RDBAL(0), "RDBAL"}, - {IXGBE_RDBAH(0), "RDBAH"}, - - /* TX Registers */ - {IXGBE_TDBAL(0), "TDBAL"}, - {IXGBE_TDBAH(0), "TDBAH"}, - {IXGBE_TDLEN(0), "TDLEN"}, - {IXGBE_TDH(0), "TDH"}, - {IXGBE_TDT(0), "TDT"}, - {IXGBE_TXDCTL(0), "TXDCTL"}, - - /* List Terminator */ - {} -}; - - -/* - * ixgbe_regdump - register printout routine - */ -static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo) -{ - int i = 0, j = 0; - char rname[16]; - u32 regs[64]; - - switch (reginfo->ofs) { - case IXGBE_SRRCTL(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); - break; - case IXGBE_DCA_RXCTRL(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); - break; - case IXGBE_RDLEN(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); - break; - case IXGBE_RDH(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); - break; - case IXGBE_RDT(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); - break; - case IXGBE_RXDCTL(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); - break; - case IXGBE_RDBAL(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); - break; - case IXGBE_RDBAH(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); - break; - case IXGBE_TDBAL(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); - break; - case IXGBE_TDBAH(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); - break; - case IXGBE_TDLEN(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); - break; - case IXGBE_TDH(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); - break; - case IXGBE_TDT(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); - break; - case IXGBE_TXDCTL(0): - for (i = 0; i < 64; i++) - regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); - break; - default: - pr_info("%-15s %08x\n", reginfo->name, - IXGBE_READ_REG(hw, reginfo->ofs)); - return; - } - - for (i = 0; i < 8; i++) { - snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7); - pr_err("%-15s", rname); - for (j = 0; j < 8; j++) - pr_cont(" %08x", regs[i*8+j]); - pr_cont("\n"); - } - -} - -/* - * ixgbe_dump - Print registers, tx-rings and rx-rings - */ -static void ixgbe_dump(struct ixgbe_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct ixgbe_hw *hw = &adapter->hw; - struct ixgbe_reg_info *reginfo; - int n = 0; - struct ixgbe_ring *tx_ring; - struct ixgbe_tx_buffer *tx_buffer_info; - union ixgbe_adv_tx_desc *tx_desc; - struct my_u0 { u64 a; u64 b; } *u0; - struct ixgbe_ring *rx_ring; - union ixgbe_adv_rx_desc *rx_desc; - struct ixgbe_rx_buffer *rx_buffer_info; - u32 staterr; - int i = 0; - - if (!netif_msg_hw(adapter)) - return; - - /* Print netdevice Info */ - if (netdev) { - dev_info(&adapter->pdev->dev, "Net device Info\n"); - pr_info("Device Name state " - "trans_start last_rx\n"); - pr_info("%-15s %016lX %016lX %016lX\n", - netdev->name, - netdev->state, - netdev->trans_start, - netdev->last_rx); - } - - /* Print Registers */ - dev_info(&adapter->pdev->dev, "Register Dump\n"); - pr_info(" Register Name Value\n"); - for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl; - reginfo->name; reginfo++) { - ixgbe_regdump(hw, reginfo); - } - - /* Print TX Ring Summary */ - if (!netdev || !netif_running(netdev)) - goto exit; - - dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); - pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); - for (n = 0; n < adapter->num_tx_queues; n++) { - tx_ring = adapter->tx_ring[n]; - tx_buffer_info = - &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; - pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n", - n, tx_ring->next_to_use, tx_ring->next_to_clean, - (u64)tx_buffer_info->dma, - tx_buffer_info->length, - tx_buffer_info->next_to_watch, - (u64)tx_buffer_info->time_stamp); - } - - /* Print TX Rings */ - if (!netif_msg_tx_done(adapter)) - goto rx_ring_summary; - - dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); - - /* Transmit Descriptor Formats - * - * Advanced Transmit Descriptor - * +--------------------------------------------------------------+ - * 0 | Buffer Address [63:0] | - * +--------------------------------------------------------------+ - * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN | - * +--------------------------------------------------------------+ - * 63 46 45 40 39 36 35 32 31 24 23 20 19 0 - */ - - for (n = 0; n < adapter->num_tx_queues; n++) { - tx_ring = adapter->tx_ring[n]; - pr_info("------------------------------------\n"); - pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); - pr_info("------------------------------------\n"); - pr_info("T [desc] [address 63:0 ] " - "[PlPOIdStDDt Ln] [bi->dma ] " - "leng ntw timestamp bi->skb\n"); - - for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { - tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - u0 = (struct my_u0 *)tx_desc; - pr_info("T [0x%03X] %016llX %016llX %016llX" - " %04X %3X %016llX %p", i, - le64_to_cpu(u0->a), - le64_to_cpu(u0->b), - (u64)tx_buffer_info->dma, - tx_buffer_info->length, - tx_buffer_info->next_to_watch, - (u64)tx_buffer_info->time_stamp, - tx_buffer_info->skb); - if (i == tx_ring->next_to_use && - i == tx_ring->next_to_clean) - pr_cont(" NTC/U\n"); - else if (i == tx_ring->next_to_use) - pr_cont(" NTU\n"); - else if (i == tx_ring->next_to_clean) - pr_cont(" NTC\n"); - else - pr_cont("\n"); - - if (netif_msg_pktdata(adapter) && - tx_buffer_info->dma != 0) - print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, 16, 1, - phys_to_virt(tx_buffer_info->dma), - tx_buffer_info->length, true); - } - } - - /* Print RX Rings Summary */ -rx_ring_summary: - dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); - pr_info("Queue [NTU] [NTC]\n"); - for (n = 0; n < adapter->num_rx_queues; n++) { - rx_ring = adapter->rx_ring[n]; - pr_info("%5d %5X %5X\n", - n, rx_ring->next_to_use, rx_ring->next_to_clean); - } - - /* Print RX Rings */ - if (!netif_msg_rx_status(adapter)) - goto exit; - - dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); - - /* Advanced Receive Descriptor (Read) Format - * 63 1 0 - * +-----------------------------------------------------+ - * 0 | Packet Buffer Address [63:1] |A0/NSE| - * +----------------------------------------------+------+ - * 8 | Header Buffer Address [63:1] | DD | - * +-----------------------------------------------------+ - * - * - * Advanced Receive Descriptor (Write-Back) Format - * - * 63 48 47 32 31 30 21 20 16 15 4 3 0 - * +------------------------------------------------------+ - * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | - * | Checksum Ident | | | | Type | Type | - * +------------------------------------------------------+ - * 8 | VLAN Tag | Length | Extended Error | Extended Status | - * +------------------------------------------------------+ - * 63 48 47 32 31 20 19 0 - */ - for (n = 0; n < adapter->num_rx_queues; n++) { - rx_ring = adapter->rx_ring[n]; - pr_info("------------------------------------\n"); - pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); - pr_info("------------------------------------\n"); - pr_info("R [desc] [ PktBuf A0] " - "[ HeadBuf DD] [bi->dma ] [bi->skb] " - "<-- Adv Rx Read format\n"); - pr_info("RWB[desc] [PcsmIpSHl PtRs] " - "[vl er S cks ln] ---------------- [bi->skb] " - "<-- Adv Rx Write-Back format\n"); - - for (i = 0; i < rx_ring->count; i++) { - rx_buffer_info = &rx_ring->rx_buffer_info[i]; - rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); - u0 = (struct my_u0 *)rx_desc; - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); - if (staterr & IXGBE_RXD_STAT_DD) { - /* Descriptor Done */ - pr_info("RWB[0x%03X] %016llX " - "%016llX ---------------- %p", i, - le64_to_cpu(u0->a), - le64_to_cpu(u0->b), - rx_buffer_info->skb); - } else { - pr_info("R [0x%03X] %016llX " - "%016llX %016llX %p", i, - le64_to_cpu(u0->a), - le64_to_cpu(u0->b), - (u64)rx_buffer_info->dma, - rx_buffer_info->skb); - - if (netif_msg_pktdata(adapter)) { - print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, 16, 1, - phys_to_virt(rx_buffer_info->dma), - rx_ring->rx_buf_len, true); - - if (rx_ring->rx_buf_len - < IXGBE_RXBUFFER_2048) - print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, 16, 1, - phys_to_virt( - rx_buffer_info->page_dma + - rx_buffer_info->page_offset - ), - PAGE_SIZE/2, true); - } - } - - if (i == rx_ring->next_to_use) - pr_cont(" NTU\n"); - else if (i == rx_ring->next_to_clean) - pr_cont(" NTC\n"); - else - pr_cont("\n"); - - } - } - -exit: - return; -} - -static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) -{ - u32 ctrl_ext; - - /* Let firmware take over control of h/w */ - ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, - ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); -} - -static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) -{ - u32 ctrl_ext; - - /* Let firmware know the driver has taken over */ - ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, - ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); -} - -/* - * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors - * @adapter: pointer to adapter struct - * @direction: 0 for Rx, 1 for Tx, -1 for other causes - * @queue: queue to map the corresponding interrupt to - * @msix_vector: the vector to map to the corresponding queue - * - */ -static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, - u8 queue, u8 msix_vector) -{ - u32 ivar, index; - struct ixgbe_hw *hw = &adapter->hw; - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - msix_vector |= IXGBE_IVAR_ALLOC_VAL; - if (direction == -1) - direction = 0; - index = (((direction * 64) + queue) >> 2) & 0x1F; - ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); - ivar &= ~(0xFF << (8 * (queue & 0x3))); - ivar |= (msix_vector << (8 * (queue & 0x3))); - IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - if (direction == -1) { - /* other causes */ - msix_vector |= IXGBE_IVAR_ALLOC_VAL; - index = ((queue & 1) * 8); - ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC); - ivar &= ~(0xFF << index); - ivar |= (msix_vector << index); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar); - break; - } else { - /* tx or rx causes */ - msix_vector |= IXGBE_IVAR_ALLOC_VAL; - index = ((16 * (queue & 1)) + (8 * direction)); - ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); - ivar &= ~(0xFF << index); - ivar |= (msix_vector << index); - IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar); - break; - } - default: - break; - } -} - -static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, - u64 qmask) -{ - u32 mask; - - switch (adapter->hw.mac.type) { - case ixgbe_mac_82598EB: - mask = (IXGBE_EIMS_RTX_QUEUE & qmask); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - mask = (qmask & 0xFFFFFFFF); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); - mask = (qmask >> 32); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); - break; - default: - break; - } -} - -void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring, - struct ixgbe_tx_buffer *tx_buffer_info) -{ - if (tx_buffer_info->dma) { - if (tx_buffer_info->mapped_as_page) - dma_unmap_page(tx_ring->dev, - tx_buffer_info->dma, - tx_buffer_info->length, - DMA_TO_DEVICE); - else - dma_unmap_single(tx_ring->dev, - tx_buffer_info->dma, - tx_buffer_info->length, - DMA_TO_DEVICE); - tx_buffer_info->dma = 0; - } - if (tx_buffer_info->skb) { - dev_kfree_skb_any(tx_buffer_info->skb); - tx_buffer_info->skb = NULL; - } - tx_buffer_info->time_stamp = 0; - /* tx_buffer_info must be completely set up in the transmit path */ -} - -static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct ixgbe_hw_stats *hwstats = &adapter->stats; - u32 data = 0; - u32 xoff[8] = {0}; - int i; - - if ((hw->fc.current_mode == ixgbe_fc_full) || - (hw->fc.current_mode == ixgbe_fc_rx_pause)) { - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); - break; - default: - data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); - } - hwstats->lxoffrxc += data; - - /* refill credits (no tx hang) if we received xoff */ - if (!data) - return; - - for (i = 0; i < adapter->num_tx_queues; i++) - clear_bit(__IXGBE_HANG_CHECK_ARMED, - &adapter->tx_ring[i]->state); - return; - } else if (!(adapter->dcb_cfg.pfc_mode_enable)) - return; - - /* update stats for each tc, only valid with PFC enabled */ - for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); - break; - default: - xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); - } - hwstats->pxoffrxc[i] += xoff[i]; - } - - /* disarm tx queues that have received xoff frames */ - for (i = 0; i < adapter->num_tx_queues; i++) { - struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; - u8 tc = tx_ring->dcb_tc; - - if (xoff[tc]) - clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); - } -} - -static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring) -{ - return ring->tx_stats.completed; -} - -static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) -{ - struct ixgbe_adapter *adapter = netdev_priv(ring->netdev); - struct ixgbe_hw *hw = &adapter->hw; - - u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx)); - u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx)); - - if (head != tail) - return (head < tail) ? - tail - head : (tail + ring->count - head); - - return 0; -} - -static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) -{ - u32 tx_done = ixgbe_get_tx_completed(tx_ring); - u32 tx_done_old = tx_ring->tx_stats.tx_done_old; - u32 tx_pending = ixgbe_get_tx_pending(tx_ring); - bool ret = false; - - clear_check_for_tx_hang(tx_ring); - - /* - * Check for a hung queue, but be thorough. This verifies - * that a transmit has been completed since the previous - * check AND there is at least one packet pending. The - * ARMED bit is set to indicate a potential hang. The - * bit is cleared if a pause frame is received to remove - * false hang detection due to PFC or 802.3x frames. By - * requiring this to fail twice we avoid races with - * pfc clearing the ARMED bit and conditions where we - * run the check_tx_hang logic with a transmit completion - * pending but without time to complete it yet. - */ - if ((tx_done_old == tx_done) && tx_pending) { - /* make sure it is true for two checks in a row */ - ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED, - &tx_ring->state); - } else { - /* update completed stats and continue */ - tx_ring->tx_stats.tx_done_old = tx_done; - /* reset the countdown */ - clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); - } - - return ret; -} - -/** - * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout - * @adapter: driver private struct - **/ -static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter) -{ - - /* Do the reset outside of interrupt context */ - if (!test_bit(__IXGBE_DOWN, &adapter->state)) { - adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; - ixgbe_service_event_schedule(adapter); - } -} - -/** - * ixgbe_clean_tx_irq - Reclaim resources after transmit completes - * @q_vector: structure containing interrupt and ring information - * @tx_ring: tx ring to clean - **/ -static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, - struct ixgbe_ring *tx_ring) -{ - struct ixgbe_adapter *adapter = q_vector->adapter; - union ixgbe_adv_tx_desc *tx_desc, *eop_desc; - struct ixgbe_tx_buffer *tx_buffer_info; - unsigned int total_bytes = 0, total_packets = 0; - u16 i, eop, count = 0; - - i = tx_ring->next_to_clean; - eop = tx_ring->tx_buffer_info[i].next_to_watch; - eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); - - while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && - (count < q_vector->tx.work_limit)) { - bool cleaned = false; - rmb(); /* read buffer_info after eop_desc */ - for ( ; !cleaned; count++) { - tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - - tx_desc->wb.status = 0; - cleaned = (i == eop); - - i++; - if (i == tx_ring->count) - i = 0; - - if (cleaned && tx_buffer_info->skb) { - total_bytes += tx_buffer_info->bytecount; - total_packets += tx_buffer_info->gso_segs; - } - - ixgbe_unmap_and_free_tx_resource(tx_ring, - tx_buffer_info); - } - - tx_ring->tx_stats.completed++; - eop = tx_ring->tx_buffer_info[i].next_to_watch; - eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); - } - - tx_ring->next_to_clean = i; - tx_ring->stats.bytes += total_bytes; - tx_ring->stats.packets += total_packets; - u64_stats_update_begin(&tx_ring->syncp); - q_vector->tx.total_bytes += total_bytes; - q_vector->tx.total_packets += total_packets; - u64_stats_update_end(&tx_ring->syncp); - - if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { - /* schedule immediate reset if we believe we hung */ - struct ixgbe_hw *hw = &adapter->hw; - tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); - e_err(drv, "Detected Tx Unit Hang\n" - " Tx Queue <%d>\n" - " TDH, TDT <%x>, <%x>\n" - " next_to_use <%x>\n" - " next_to_clean <%x>\n" - "tx_buffer_info[next_to_clean]\n" - " time_stamp <%lx>\n" - " jiffies <%lx>\n", - tx_ring->queue_index, - IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), - IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), - tx_ring->next_to_use, eop, - tx_ring->tx_buffer_info[eop].time_stamp, jiffies); - - netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); - - e_info(probe, - "tx hang %d detected on queue %d, resetting adapter\n", - adapter->tx_timeout_count + 1, tx_ring->queue_index); - - /* schedule immediate reset if we believe we hung */ - ixgbe_tx_timeout_reset(adapter); - - /* the adapter is about to reset, no point in enabling stuff */ - return true; - } - -#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) - if (unlikely(count && netif_carrier_ok(tx_ring->netdev) && - (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { - /* Make sure that anybody stopping the queue after this - * sees the new next_to_clean. - */ - smp_mb(); - if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && - !test_bit(__IXGBE_DOWN, &adapter->state)) { - netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); - ++tx_ring->tx_stats.restart_queue; - } - } - - return count < q_vector->tx.work_limit; -} - -#ifdef CONFIG_IXGBE_DCA -static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rx_ring, - int cpu) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 rxctrl; - u8 reg_idx = rx_ring->reg_idx; - - rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx)); - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; - rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599; - rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << - IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599); - break; - default: - break; - } - rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; - rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; - rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN); - IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl); -} - -static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring, - int cpu) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 txctrl; - u8 reg_idx = tx_ring->reg_idx; - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx)); - txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; - txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); - txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; - IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx)); - txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599; - txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << - IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599); - txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; - IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl); - break; - default: - break; - } -} - -static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) -{ - struct ixgbe_adapter *adapter = q_vector->adapter; - int cpu = get_cpu(); - long r_idx; - int i; - - if (q_vector->cpu == cpu) - goto out_no_update; - - r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues); - for (i = 0; i < q_vector->tx.count; i++) { - ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu); - r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues, - r_idx + 1); - } - - r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); - for (i = 0; i < q_vector->rx.count; i++) { - ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu); - r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues, - r_idx + 1); - } - - q_vector->cpu = cpu; -out_no_update: - put_cpu(); -} - -static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) -{ - int num_q_vectors; - int i; - - if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) - return; - - /* always use CB2 mode, difference is masked in the CB driver */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); - - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) - num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - else - num_q_vectors = 1; - - for (i = 0; i < num_q_vectors; i++) { - adapter->q_vector[i]->cpu = -1; - ixgbe_update_dca(adapter->q_vector[i]); - } -} - -static int __ixgbe_notify_dca(struct device *dev, void *data) -{ - struct ixgbe_adapter *adapter = dev_get_drvdata(dev); - unsigned long event = *(unsigned long *)data; - - if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE)) - return 0; - - switch (event) { - case DCA_PROVIDER_ADD: - /* if we're already enabled, don't do it again */ - if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) - break; - if (dca_add_requester(dev) == 0) { - adapter->flags |= IXGBE_FLAG_DCA_ENABLED; - ixgbe_setup_dca(adapter); - break; - } - /* Fall Through since DCA is disabled. */ - case DCA_PROVIDER_REMOVE: - if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { - dca_remove_requester(dev); - adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); - } - break; - } - - return 0; -} -#endif /* CONFIG_IXGBE_DCA */ - -static inline void ixgbe_rx_hash(union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) -{ - skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); -} - -/** - * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type - * @adapter: address of board private structure - * @rx_desc: advanced rx descriptor - * - * Returns : true if it is FCoE pkt - */ -static inline bool ixgbe_rx_is_fcoe(struct ixgbe_adapter *adapter, - union ixgbe_adv_rx_desc *rx_desc) -{ - __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; - - return (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && - ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) == - (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE << - IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT))); -} - -/** - * ixgbe_receive_skb - Send a completed packet up the stack - * @adapter: board private structure - * @skb: packet to send up - * @status: hardware indication of status of receive - * @rx_ring: rx descriptor ring (for a specific queue) to setup - * @rx_desc: rx descriptor - **/ -static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, - struct sk_buff *skb, u8 status, - struct ixgbe_ring *ring, - union ixgbe_adv_rx_desc *rx_desc) -{ - struct ixgbe_adapter *adapter = q_vector->adapter; - struct napi_struct *napi = &q_vector->napi; - bool is_vlan = (status & IXGBE_RXD_STAT_VP); - u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); - - if (is_vlan && (tag & VLAN_VID_MASK)) - __vlan_hwaccel_put_tag(skb, tag); - - if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) - napi_gro_receive(napi, skb); - else - netif_rx(skb); -} - -/** - * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum - * @adapter: address of board private structure - * @status_err: hardware indication of status of receive - * @skb: skb currently being received and modified - * @status_err: status error value of last descriptor in packet - **/ -static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb, - u32 status_err) -{ - skb->ip_summed = CHECKSUM_NONE; - - /* Rx csum disabled */ - if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) - return; - - /* if IP and error */ - if ((status_err & IXGBE_RXD_STAT_IPCS) && - (status_err & IXGBE_RXDADV_ERR_IPE)) { - adapter->hw_csum_rx_error++; - return; - } - - if (!(status_err & IXGBE_RXD_STAT_L4CS)) - return; - - if (status_err & IXGBE_RXDADV_ERR_TCPE) { - u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; - - /* - * 82599 errata, UDP frames with a 0 checksum can be marked as - * checksum errors. - */ - if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) && - (adapter->hw.mac.type == ixgbe_mac_82599EB)) - return; - - adapter->hw_csum_rx_error++; - return; - } - - /* It must be a TCP or UDP packet with a valid checksum */ - skb->ip_summed = CHECKSUM_UNNECESSARY; -} - -static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val) -{ - /* - * Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - writel(val, rx_ring->tail); -} - -/** - * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split - * @rx_ring: ring to place buffers on - * @cleaned_count: number of buffers to replace - **/ -void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) -{ - union ixgbe_adv_rx_desc *rx_desc; - struct ixgbe_rx_buffer *bi; - struct sk_buff *skb; - u16 i = rx_ring->next_to_use; - - /* do nothing if no valid netdev defined */ - if (!rx_ring->netdev) - return; - - while (cleaned_count--) { - rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); - bi = &rx_ring->rx_buffer_info[i]; - skb = bi->skb; - - if (!skb) { - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, - rx_ring->rx_buf_len); - if (!skb) { - rx_ring->rx_stats.alloc_rx_buff_failed++; - goto no_buffers; - } - /* initialize queue mapping */ - skb_record_rx_queue(skb, rx_ring->queue_index); - bi->skb = skb; - } - - if (!bi->dma) { - bi->dma = dma_map_single(rx_ring->dev, - skb->data, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, bi->dma)) { - rx_ring->rx_stats.alloc_rx_buff_failed++; - bi->dma = 0; - goto no_buffers; - } - } - - if (ring_is_ps_enabled(rx_ring)) { - if (!bi->page) { - bi->page = netdev_alloc_page(rx_ring->netdev); - if (!bi->page) { - rx_ring->rx_stats.alloc_rx_page_failed++; - goto no_buffers; - } - } - - if (!bi->page_dma) { - /* use a half page if we're re-using */ - bi->page_offset ^= PAGE_SIZE / 2; - bi->page_dma = dma_map_page(rx_ring->dev, - bi->page, - bi->page_offset, - PAGE_SIZE / 2, - DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, - bi->page_dma)) { - rx_ring->rx_stats.alloc_rx_page_failed++; - bi->page_dma = 0; - goto no_buffers; - } - } - - /* Refresh the desc even if buffer_addrs didn't change - * because each write-back erases this info. */ - rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); - rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); - } else { - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); - rx_desc->read.hdr_addr = 0; - } - - i++; - if (i == rx_ring->count) - i = 0; - } - -no_buffers: - if (rx_ring->next_to_use != i) { - rx_ring->next_to_use = i; - ixgbe_release_rx_desc(rx_ring, i); - } -} - -static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc) -{ - /* HW will not DMA in data larger than the given buffer, even if it - * parses the (NFS, of course) header to be larger. In that case, it - * fills the header buffer and spills the rest into the page. - */ - u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info); - u16 hlen = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> - IXGBE_RXDADV_HDRBUFLEN_SHIFT; - if (hlen > IXGBE_RX_HDR_SIZE) - hlen = IXGBE_RX_HDR_SIZE; - return hlen; -} - -/** - * ixgbe_transform_rsc_queue - change rsc queue into a full packet - * @skb: pointer to the last skb in the rsc queue - * - * This function changes a queue full of hw rsc buffers into a completed - * packet. It uses the ->prev pointers to find the first packet and then - * turns it into the frag list owner. - **/ -static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb) -{ - unsigned int frag_list_size = 0; - unsigned int skb_cnt = 1; - - while (skb->prev) { - struct sk_buff *prev = skb->prev; - frag_list_size += skb->len; - skb->prev = NULL; - skb = prev; - skb_cnt++; - } - - skb_shinfo(skb)->frag_list = skb->next; - skb->next = NULL; - skb->len += frag_list_size; - skb->data_len += frag_list_size; - skb->truesize += frag_list_size; - IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt; - - return skb; -} - -static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc) -{ - return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) & - IXGBE_RXDADV_RSCCNT_MASK); -} - -static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, - struct ixgbe_ring *rx_ring, - int *work_done, int work_to_do) -{ - struct ixgbe_adapter *adapter = q_vector->adapter; - union ixgbe_adv_rx_desc *rx_desc, *next_rxd; - struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; - struct sk_buff *skb; - unsigned int total_rx_bytes = 0, total_rx_packets = 0; - const int current_node = numa_node_id(); -#ifdef IXGBE_FCOE - int ddp_bytes = 0; -#endif /* IXGBE_FCOE */ - u32 staterr; - u16 i; - u16 cleaned_count = 0; - bool pkt_is_rsc = false; - - i = rx_ring->next_to_clean; - rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); - - while (staterr & IXGBE_RXD_STAT_DD) { - u32 upper_len = 0; - - rmb(); /* read descriptor and rx_buffer_info after status DD */ - - rx_buffer_info = &rx_ring->rx_buffer_info[i]; - - skb = rx_buffer_info->skb; - rx_buffer_info->skb = NULL; - prefetch(skb->data); - - if (ring_is_rsc_enabled(rx_ring)) - pkt_is_rsc = ixgbe_get_rsc_state(rx_desc); - - /* if this is a skb from previous receive DMA will be 0 */ - if (rx_buffer_info->dma) { - u16 hlen; - if (pkt_is_rsc && - !(staterr & IXGBE_RXD_STAT_EOP) && - !skb->prev) { - /* - * When HWRSC is enabled, delay unmapping - * of the first packet. It carries the - * header information, HW may still - * access the header after the writeback. - * Only unmap it when EOP is reached - */ - IXGBE_RSC_CB(skb)->delay_unmap = true; - IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; - } else { - dma_unmap_single(rx_ring->dev, - rx_buffer_info->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - } - rx_buffer_info->dma = 0; - - if (ring_is_ps_enabled(rx_ring)) { - hlen = ixgbe_get_hlen(rx_desc); - upper_len = le16_to_cpu(rx_desc->wb.upper.length); - } else { - hlen = le16_to_cpu(rx_desc->wb.upper.length); - } - - skb_put(skb, hlen); - } else { - /* assume packet split since header is unmapped */ - upper_len = le16_to_cpu(rx_desc->wb.upper.length); - } - - if (upper_len) { - dma_unmap_page(rx_ring->dev, - rx_buffer_info->page_dma, - PAGE_SIZE / 2, - DMA_FROM_DEVICE); - rx_buffer_info->page_dma = 0; - skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, - rx_buffer_info->page, - rx_buffer_info->page_offset, - upper_len); - - if ((page_count(rx_buffer_info->page) == 1) && - (page_to_nid(rx_buffer_info->page) == current_node)) - get_page(rx_buffer_info->page); - else - rx_buffer_info->page = NULL; - - skb->len += upper_len; - skb->data_len += upper_len; - skb->truesize += upper_len; - } - - i++; - if (i == rx_ring->count) - i = 0; - - next_rxd = IXGBE_RX_DESC_ADV(rx_ring, i); - prefetch(next_rxd); - cleaned_count++; - - if (pkt_is_rsc) { - u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >> - IXGBE_RXDADV_NEXTP_SHIFT; - next_buffer = &rx_ring->rx_buffer_info[nextp]; - } else { - next_buffer = &rx_ring->rx_buffer_info[i]; - } - - if (!(staterr & IXGBE_RXD_STAT_EOP)) { - if (ring_is_ps_enabled(rx_ring)) { - rx_buffer_info->skb = next_buffer->skb; - rx_buffer_info->dma = next_buffer->dma; - next_buffer->skb = skb; - next_buffer->dma = 0; - } else { - skb->next = next_buffer->skb; - skb->next->prev = skb; - } - rx_ring->rx_stats.non_eop_descs++; - goto next_desc; - } - - if (skb->prev) { - skb = ixgbe_transform_rsc_queue(skb); - /* if we got here without RSC the packet is invalid */ - if (!pkt_is_rsc) { - __pskb_trim(skb, 0); - rx_buffer_info->skb = skb; - goto next_desc; - } - } - - if (ring_is_rsc_enabled(rx_ring)) { - if (IXGBE_RSC_CB(skb)->delay_unmap) { - dma_unmap_single(rx_ring->dev, - IXGBE_RSC_CB(skb)->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - IXGBE_RSC_CB(skb)->dma = 0; - IXGBE_RSC_CB(skb)->delay_unmap = false; - } - } - if (pkt_is_rsc) { - if (ring_is_ps_enabled(rx_ring)) - rx_ring->rx_stats.rsc_count += - skb_shinfo(skb)->nr_frags; - else - rx_ring->rx_stats.rsc_count += - IXGBE_RSC_CB(skb)->skb_cnt; - rx_ring->rx_stats.rsc_flush++; - } - - /* ERR_MASK will only have valid bits if EOP set */ - if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { - dev_kfree_skb_any(skb); - goto next_desc; - } - - ixgbe_rx_checksum(adapter, rx_desc, skb, staterr); - if (adapter->netdev->features & NETIF_F_RXHASH) - ixgbe_rx_hash(rx_desc, skb); - - /* probably a little skewed due to removing CRC */ - total_rx_bytes += skb->len; - total_rx_packets++; - - skb->protocol = eth_type_trans(skb, rx_ring->netdev); -#ifdef IXGBE_FCOE - /* if ddp, not passing to ULD unless for FCP_RSP or error */ - if (ixgbe_rx_is_fcoe(adapter, rx_desc)) { - ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb, - staterr); - if (!ddp_bytes) - goto next_desc; - } -#endif /* IXGBE_FCOE */ - ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); - -next_desc: - rx_desc->wb.upper.status_error = 0; - - (*work_done)++; - if (*work_done >= work_to_do) - break; - - /* return some buffers to hardware, one at a time is too slow */ - if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { - ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); - cleaned_count = 0; - } - - /* use prefetched values */ - rx_desc = next_rxd; - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); - } - - rx_ring->next_to_clean = i; - cleaned_count = ixgbe_desc_unused(rx_ring); - - if (cleaned_count) - ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); - -#ifdef IXGBE_FCOE - /* include DDPed FCoE data */ - if (ddp_bytes > 0) { - unsigned int mss; - - mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) - - sizeof(struct fc_frame_header) - - sizeof(struct fcoe_crc_eof); - if (mss > 512) - mss &= ~511; - total_rx_bytes += ddp_bytes; - total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss); - } -#endif /* IXGBE_FCOE */ - - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->stats.packets += total_rx_packets; - rx_ring->stats.bytes += total_rx_bytes; - u64_stats_update_end(&rx_ring->syncp); - q_vector->rx.total_packets += total_rx_packets; - q_vector->rx.total_bytes += total_rx_bytes; -} - -static int ixgbe_clean_rxonly(struct napi_struct *, int); -/** - * ixgbe_configure_msix - Configure MSI-X hardware - * @adapter: board private structure - * - * ixgbe_configure_msix sets up the hardware to properly generate MSI-X - * interrupts. - **/ -static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) -{ - struct ixgbe_q_vector *q_vector; - int i, q_vectors, v_idx, r_idx; - u32 mask; - - q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - - /* - * Populate the IVAR table and set the ITR values to the - * corresponding register. - */ - for (v_idx = 0; v_idx < q_vectors; v_idx++) { - q_vector = adapter->q_vector[v_idx]; - /* XXX for_each_set_bit(...) */ - r_idx = find_first_bit(q_vector->rx.idx, - adapter->num_rx_queues); - - for (i = 0; i < q_vector->rx.count; i++) { - u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx; - ixgbe_set_ivar(adapter, 0, reg_idx, v_idx); - r_idx = find_next_bit(q_vector->rx.idx, - adapter->num_rx_queues, - r_idx + 1); - } - r_idx = find_first_bit(q_vector->tx.idx, - adapter->num_tx_queues); - - for (i = 0; i < q_vector->tx.count; i++) { - u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx; - ixgbe_set_ivar(adapter, 1, reg_idx, v_idx); - r_idx = find_next_bit(q_vector->tx.idx, - adapter->num_tx_queues, - r_idx + 1); - } - - if (q_vector->tx.count && !q_vector->rx.count) - /* tx only */ - q_vector->eitr = adapter->tx_eitr_param; - else if (q_vector->rx.count) - /* rx or mixed */ - q_vector->eitr = adapter->rx_eitr_param; - - ixgbe_write_eitr(q_vector); - /* If ATR is enabled, set interrupt affinity */ - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { - /* - * Allocate the affinity_hint cpumask, assign the mask - * for this vector, and set our affinity_hint for - * this irq. - */ - if (!alloc_cpumask_var(&q_vector->affinity_mask, - GFP_KERNEL)) - return; - cpumask_set_cpu(v_idx, q_vector->affinity_mask); - irq_set_affinity_hint(adapter->msix_entries[v_idx].vector, - q_vector->affinity_mask); - } - } - - switch (adapter->hw.mac.type) { - case ixgbe_mac_82598EB: - ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, - v_idx); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - ixgbe_set_ivar(adapter, -1, 1, v_idx); - break; - - default: - break; - } - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); - - /* set up to autoclear timer, and the vectors */ - mask = IXGBE_EIMS_ENABLE_MASK; - if (adapter->num_vfs) - mask &= ~(IXGBE_EIMS_OTHER | - IXGBE_EIMS_MAILBOX | - IXGBE_EIMS_LSC); - else - mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); -} - -enum latency_range { - lowest_latency = 0, - low_latency = 1, - bulk_latency = 2, - latency_invalid = 255 -}; - -/** - * ixgbe_update_itr - update the dynamic ITR value based on statistics - * @q_vector: structure containing interrupt and ring information - * @ring_container: structure containing ring performance data - * - * Stores a new ITR value based on packets and byte - * counts during the last interrupt. The advantage of per interrupt - * computation is faster updates and more accurate ITR for the current - * traffic pattern. Constants in this function were computed - * based on theoretical maximum wire speed and thresholds were set based - * on testing data as well as attempting to minimize response time - * while increasing bulk throughput. - * this functionality is controlled by the InterruptThrottleRate module - * parameter (see ixgbe_param.c) - **/ -static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, - struct ixgbe_ring_container *ring_container) -{ - u64 bytes_perint; - struct ixgbe_adapter *adapter = q_vector->adapter; - int bytes = ring_container->total_bytes; - int packets = ring_container->total_packets; - u32 timepassed_us; - u8 itr_setting = ring_container->itr; - - if (packets == 0) - return; - - /* simple throttlerate management - * 0-20MB/s lowest (100000 ints/s) - * 20-100MB/s low (20000 ints/s) - * 100-1249MB/s bulk (8000 ints/s) - */ - /* what was last interrupt timeslice? */ - timepassed_us = 1000000/q_vector->eitr; - bytes_perint = bytes / timepassed_us; /* bytes/usec */ - - switch (itr_setting) { - case lowest_latency: - if (bytes_perint > adapter->eitr_low) - itr_setting = low_latency; - break; - case low_latency: - if (bytes_perint > adapter->eitr_high) - itr_setting = bulk_latency; - else if (bytes_perint <= adapter->eitr_low) - itr_setting = lowest_latency; - break; - case bulk_latency: - if (bytes_perint <= adapter->eitr_high) - itr_setting = low_latency; - break; - } - - /* clear work counters since we have the values we need */ - ring_container->total_bytes = 0; - ring_container->total_packets = 0; - - /* write updated itr to ring container */ - ring_container->itr = itr_setting; -} - -/** - * ixgbe_write_eitr - write EITR register in hardware specific way - * @q_vector: structure containing interrupt and ring information - * - * This function is made to be called by ethtool and by the driver - * when it needs to update EITR registers at runtime. Hardware - * specific quirks/differences are taken care of here. - */ -void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) -{ - struct ixgbe_adapter *adapter = q_vector->adapter; - struct ixgbe_hw *hw = &adapter->hw; - int v_idx = q_vector->v_idx; - u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr); - - switch (adapter->hw.mac.type) { - case ixgbe_mac_82598EB: - /* must write high and low 16 bits to reset counter */ - itr_reg |= (itr_reg << 16); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - /* - * 82599 and X540 can support a value of zero, so allow it for - * max interrupt rate, but there is an errata where it can - * not be zero with RSC - */ - if (itr_reg == 8 && - !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) - itr_reg = 0; - - /* - * set the WDIS bit to not clear the timer bits and cause an - * immediate assertion of the interrupt - */ - itr_reg |= IXGBE_EITR_CNT_WDIS; - break; - default: - break; - } - IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg); -} - -static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector) -{ - u32 new_itr = q_vector->eitr; - u8 current_itr; - - ixgbe_update_itr(q_vector, &q_vector->tx); - ixgbe_update_itr(q_vector, &q_vector->rx); - - current_itr = max(q_vector->rx.itr, q_vector->tx.itr); - - switch (current_itr) { - /* counts and packets in update_itr are dependent on these numbers */ - case lowest_latency: - new_itr = 100000; - break; - case low_latency: - new_itr = 20000; /* aka hwitr = ~200 */ - break; - case bulk_latency: - new_itr = 8000; - break; - default: - break; - } - - if (new_itr != q_vector->eitr) { - /* do an exponential smoothing */ - new_itr = ((q_vector->eitr * 9) + new_itr)/10; - - /* save the algorithm value here */ - q_vector->eitr = new_itr; - - ixgbe_write_eitr(q_vector); - } -} - -/** - * ixgbe_check_overtemp_subtask - check for over tempurature - * @adapter: pointer to adapter - **/ -static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 eicr = adapter->interrupt_event; - - if (test_bit(__IXGBE_DOWN, &adapter->state)) - return; - - if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && - !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT)) - return; - - adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT; - - switch (hw->device_id) { - case IXGBE_DEV_ID_82599_T3_LOM: - /* - * Since the warning interrupt is for both ports - * we don't have to check if: - * - This interrupt wasn't for our port. - * - We may have missed the interrupt so always have to - * check if we got a LSC - */ - if (!(eicr & IXGBE_EICR_GPI_SDP0) && - !(eicr & IXGBE_EICR_LSC)) - return; - - if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) { - u32 autoneg; - bool link_up = false; - - hw->mac.ops.check_link(hw, &autoneg, &link_up, false); - - if (link_up) - return; - } - - /* Check if this is not due to overtemp */ - if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP) - return; - - break; - default: - if (!(eicr & IXGBE_EICR_GPI_SDP0)) - return; - break; - } - e_crit(drv, - "Network adapter has been stopped because it has over heated. " - "Restart the computer. If the problem persists, " - "power off the system and replace the adapter\n"); - - adapter->interrupt_event = 0; -} - -static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) -{ - struct ixgbe_hw *hw = &adapter->hw; - - if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && - (eicr & IXGBE_EICR_GPI_SDP1)) { - e_crit(probe, "Fan has stopped, replace the adapter\n"); - /* write to clear the interrupt */ - IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); - } -} - -static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) -{ - struct ixgbe_hw *hw = &adapter->hw; - - if (eicr & IXGBE_EICR_GPI_SDP2) { - /* Clear the interrupt */ - IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2); - if (!test_bit(__IXGBE_DOWN, &adapter->state)) { - adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; - ixgbe_service_event_schedule(adapter); - } - } - - if (eicr & IXGBE_EICR_GPI_SDP1) { - /* Clear the interrupt */ - IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); - if (!test_bit(__IXGBE_DOWN, &adapter->state)) { - adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; - ixgbe_service_event_schedule(adapter); - } - } -} - -static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - - adapter->lsc_int++; - adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; - adapter->link_check_timeout = jiffies; - if (!test_bit(__IXGBE_DOWN, &adapter->state)) { - IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); - IXGBE_WRITE_FLUSH(hw); - ixgbe_service_event_schedule(adapter); - } -} - -static irqreturn_t ixgbe_msix_lsc(int irq, void *data) -{ - struct ixgbe_adapter *adapter = data; - struct ixgbe_hw *hw = &adapter->hw; - u32 eicr; - - /* - * Workaround for Silicon errata. Use clear-by-write instead - * of clear-by-read. Reading with EICS will return the - * interrupt causes without clearing, which later be done - * with the write to EICR. - */ - eicr = IXGBE_READ_REG(hw, IXGBE_EICS); - IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); - - if (eicr & IXGBE_EICR_LSC) - ixgbe_check_lsc(adapter); - - if (eicr & IXGBE_EICR_MAILBOX) - ixgbe_msg_task(adapter); - - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - /* Handle Flow Director Full threshold interrupt */ - if (eicr & IXGBE_EICR_FLOW_DIR) { - int reinit_count = 0; - int i; - for (i = 0; i < adapter->num_tx_queues; i++) { - struct ixgbe_ring *ring = adapter->tx_ring[i]; - if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE, - &ring->state)) - reinit_count++; - } - if (reinit_count) { - /* no more flow director interrupts until after init */ - IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR); - eicr &= ~IXGBE_EICR_FLOW_DIR; - adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT; - ixgbe_service_event_schedule(adapter); - } - } - ixgbe_check_sfp_event(adapter, eicr); - if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && - ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) { - if (!test_bit(__IXGBE_DOWN, &adapter->state)) { - adapter->interrupt_event = eicr; - adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; - ixgbe_service_event_schedule(adapter); - } - } - break; - default: - break; - } - - ixgbe_check_fan_failure(adapter, eicr); - - /* re-enable the original interrupt state, no lsc, no queues */ - if (!test_bit(__IXGBE_DOWN, &adapter->state)) - IXGBE_WRITE_REG(hw, IXGBE_EIMS, eicr & - ~(IXGBE_EIMS_LSC | IXGBE_EIMS_RTX_QUEUE)); - - return IRQ_HANDLED; -} - -static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, - u64 qmask) -{ - u32 mask; - struct ixgbe_hw *hw = &adapter->hw; - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - mask = (IXGBE_EIMS_RTX_QUEUE & qmask); - IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - mask = (qmask & 0xFFFFFFFF); - if (mask) - IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); - mask = (qmask >> 32); - if (mask) - IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); - break; - default: - break; - } - /* skip the flush */ -} - -static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, - u64 qmask) -{ - u32 mask; - struct ixgbe_hw *hw = &adapter->hw; - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - mask = (IXGBE_EIMS_RTX_QUEUE & qmask); - IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - mask = (qmask & 0xFFFFFFFF); - if (mask) - IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); - mask = (qmask >> 32); - if (mask) - IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); - break; - default: - break; - } - /* skip the flush */ -} - -static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) -{ - struct ixgbe_q_vector *q_vector = data; - struct ixgbe_adapter *adapter = q_vector->adapter; - struct ixgbe_ring *tx_ring; - int i, r_idx; - - if (!q_vector->tx.count) - return IRQ_HANDLED; - - r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues); - for (i = 0; i < q_vector->tx.count; i++) { - tx_ring = adapter->tx_ring[r_idx]; - r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues, - r_idx + 1); - } - - /* EIAM disabled interrupts (on this vector) for us */ - napi_schedule(&q_vector->napi); - - return IRQ_HANDLED; -} - -/** - * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues) - * @irq: unused - * @data: pointer to our q_vector struct for this interrupt vector - **/ -static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) -{ - struct ixgbe_q_vector *q_vector = data; - struct ixgbe_adapter *adapter = q_vector->adapter; - struct ixgbe_ring *rx_ring; - int r_idx; - int i; - -#ifdef CONFIG_IXGBE_DCA - if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) - ixgbe_update_dca(q_vector); -#endif - - r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); - for (i = 0; i < q_vector->rx.count; i++) { - rx_ring = adapter->rx_ring[r_idx]; - r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues, - r_idx + 1); - } - - if (!q_vector->rx.count) - return IRQ_HANDLED; - - /* EIAM disabled interrupts (on this vector) for us */ - napi_schedule(&q_vector->napi); - - return IRQ_HANDLED; -} - -static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) -{ - struct ixgbe_q_vector *q_vector = data; - struct ixgbe_adapter *adapter = q_vector->adapter; - struct ixgbe_ring *ring; - int r_idx; - int i; - - if (!q_vector->tx.count && !q_vector->rx.count) - return IRQ_HANDLED; - - r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues); - for (i = 0; i < q_vector->tx.count; i++) { - ring = adapter->tx_ring[r_idx]; - r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues, - r_idx + 1); - } - - r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); - for (i = 0; i < q_vector->rx.count; i++) { - ring = adapter->rx_ring[r_idx]; - r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues, - r_idx + 1); - } - - /* EIAM disabled interrupts (on this vector) for us */ - napi_schedule(&q_vector->napi); - - return IRQ_HANDLED; -} - -/** - * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine - * @napi: napi struct with our devices info in it - * @budget: amount of work driver is allowed to do this pass, in packets - * - * This function is optimized for cleaning one queue only on a single - * q_vector!!! - **/ -static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) -{ - struct ixgbe_q_vector *q_vector = - container_of(napi, struct ixgbe_q_vector, napi); - struct ixgbe_adapter *adapter = q_vector->adapter; - struct ixgbe_ring *rx_ring = NULL; - int work_done = 0; - long r_idx; - -#ifdef CONFIG_IXGBE_DCA - if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) - ixgbe_update_dca(q_vector); -#endif - - r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); - rx_ring = adapter->rx_ring[r_idx]; - - ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget); - - /* If all Rx work done, exit the polling mode */ - if (work_done < budget) { - napi_complete(napi); - if (adapter->rx_itr_setting & 1) - ixgbe_set_itr(q_vector); - if (!test_bit(__IXGBE_DOWN, &adapter->state)) - ixgbe_irq_enable_queues(adapter, - ((u64)1 << q_vector->v_idx)); - } - - return work_done; -} - -/** - * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine - * @napi: napi struct with our devices info in it - * @budget: amount of work driver is allowed to do this pass, in packets - * - * This function will clean more than one rx queue associated with a - * q_vector. - **/ -static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) -{ - struct ixgbe_q_vector *q_vector = - container_of(napi, struct ixgbe_q_vector, napi); - struct ixgbe_adapter *adapter = q_vector->adapter; - struct ixgbe_ring *ring = NULL; - int work_done = 0, i; - long r_idx; - bool tx_clean_complete = true; - -#ifdef CONFIG_IXGBE_DCA - if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) - ixgbe_update_dca(q_vector); -#endif - - r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues); - for (i = 0; i < q_vector->tx.count; i++) { - ring = adapter->tx_ring[r_idx]; - tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring); - r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues, - r_idx + 1); - } - - /* attempt to distribute budget to each queue fairly, but don't allow - * the budget to go below 1 because we'll exit polling */ - budget /= (q_vector->rx.count ?: 1); - budget = max(budget, 1); - r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); - for (i = 0; i < q_vector->rx.count; i++) { - ring = adapter->rx_ring[r_idx]; - ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget); - r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues, - r_idx + 1); - } - - r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); - ring = adapter->rx_ring[r_idx]; - /* If all Rx work done, exit the polling mode */ - if (work_done < budget) { - napi_complete(napi); - if (adapter->rx_itr_setting & 1) - ixgbe_set_itr(q_vector); - if (!test_bit(__IXGBE_DOWN, &adapter->state)) - ixgbe_irq_enable_queues(adapter, - ((u64)1 << q_vector->v_idx)); - return 0; - } - - return work_done; -} - -/** - * ixgbe_clean_txonly - msix (aka one shot) tx clean routine - * @napi: napi struct with our devices info in it - * @budget: amount of work driver is allowed to do this pass, in packets - * - * This function is optimized for cleaning one queue only on a single - * q_vector!!! - **/ -static int ixgbe_clean_txonly(struct napi_struct *napi, int budget) -{ - struct ixgbe_q_vector *q_vector = - container_of(napi, struct ixgbe_q_vector, napi); - struct ixgbe_adapter *adapter = q_vector->adapter; - struct ixgbe_ring *tx_ring = NULL; - int work_done = 0; - long r_idx; - -#ifdef CONFIG_IXGBE_DCA - if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) - ixgbe_update_dca(q_vector); -#endif - - r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues); - tx_ring = adapter->tx_ring[r_idx]; - - if (!ixgbe_clean_tx_irq(q_vector, tx_ring)) - work_done = budget; - - /* If all Tx work done, exit the polling mode */ - if (work_done < budget) { - napi_complete(napi); - if (adapter->tx_itr_setting & 1) - ixgbe_set_itr(q_vector); - if (!test_bit(__IXGBE_DOWN, &adapter->state)) - ixgbe_irq_enable_queues(adapter, - ((u64)1 << q_vector->v_idx)); - } - - return work_done; -} - -static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, - int r_idx) -{ - struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; - struct ixgbe_ring *rx_ring = a->rx_ring[r_idx]; - - set_bit(r_idx, q_vector->rx.idx); - q_vector->rx.count++; - rx_ring->q_vector = q_vector; -} - -static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, - int t_idx) -{ - struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; - struct ixgbe_ring *tx_ring = a->tx_ring[t_idx]; - - set_bit(t_idx, q_vector->tx.idx); - q_vector->tx.count++; - tx_ring->q_vector = q_vector; - q_vector->tx.work_limit = a->tx_work_limit; -} - -/** - * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors - * @adapter: board private structure to initialize - * - * This function maps descriptor rings to the queue-specific vectors - * we were allotted through the MSI-X enabling code. Ideally, we'd have - * one vector per ring/queue, but on a constrained vector budget, we - * group the rings as "efficiently" as possible. You would add new - * mapping configurations in here. - **/ -static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter) -{ - int q_vectors; - int v_start = 0; - int rxr_idx = 0, txr_idx = 0; - int rxr_remaining = adapter->num_rx_queues; - int txr_remaining = adapter->num_tx_queues; - int i, j; - int rqpv, tqpv; - int err = 0; - - /* No mapping required if MSI-X is disabled. */ - if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) - goto out; - - q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - - /* - * The ideal configuration... - * We have enough vectors to map one per queue. - */ - if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { - for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) - map_vector_to_rxq(adapter, v_start, rxr_idx); - - for (; txr_idx < txr_remaining; v_start++, txr_idx++) - map_vector_to_txq(adapter, v_start, txr_idx); - - goto out; - } - - /* - * If we don't have enough vectors for a 1-to-1 - * mapping, we'll have to group them so there are - * multiple queues per vector. - */ - /* Re-adjusting *qpv takes care of the remainder. */ - for (i = v_start; i < q_vectors; i++) { - rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); - for (j = 0; j < rqpv; j++) { - map_vector_to_rxq(adapter, i, rxr_idx); - rxr_idx++; - rxr_remaining--; - } - tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); - for (j = 0; j < tqpv; j++) { - map_vector_to_txq(adapter, i, txr_idx); - txr_idx++; - txr_remaining--; - } - } -out: - return err; -} - -/** - * ixgbe_request_msix_irqs - Initialize MSI-X interrupts - * @adapter: board private structure - * - * ixgbe_request_msix_irqs allocates MSI-X vectors and requests - * interrupts from the kernel. - **/ -static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - irqreturn_t (*handler)(int, void *); - int i, vector, q_vectors, err; - int ri = 0, ti = 0; - - /* Decrement for Other and TCP Timer vectors */ - q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - - err = ixgbe_map_rings_to_vectors(adapter); - if (err) - return err; - -#define SET_HANDLER(_v) (((_v)->rx.count && (_v)->tx.count) \ - ? &ixgbe_msix_clean_many : \ - (_v)->rx.count ? &ixgbe_msix_clean_rx : \ - (_v)->tx.count ? &ixgbe_msix_clean_tx : \ - NULL) - for (vector = 0; vector < q_vectors; vector++) { - struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; - handler = SET_HANDLER(q_vector); - - if (handler == &ixgbe_msix_clean_rx) { - snprintf(q_vector->name, sizeof(q_vector->name) - 1, - "%s-%s-%d", netdev->name, "rx", ri++); - } else if (handler == &ixgbe_msix_clean_tx) { - snprintf(q_vector->name, sizeof(q_vector->name) - 1, - "%s-%s-%d", netdev->name, "tx", ti++); - } else if (handler == &ixgbe_msix_clean_many) { - snprintf(q_vector->name, sizeof(q_vector->name) - 1, - "%s-%s-%d", netdev->name, "TxRx", ri++); - ti++; - } else { - /* skip this unused q_vector */ - continue; - } - err = request_irq(adapter->msix_entries[vector].vector, - handler, 0, q_vector->name, - q_vector); - if (err) { - e_err(probe, "request_irq failed for MSIX interrupt " - "Error: %d\n", err); - goto free_queue_irqs; - } - } - - sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name); - err = request_irq(adapter->msix_entries[vector].vector, - ixgbe_msix_lsc, 0, adapter->lsc_int_name, adapter); - if (err) { - e_err(probe, "request_irq for msix_lsc failed: %d\n", err); - goto free_queue_irqs; - } - - return 0; - -free_queue_irqs: - for (i = vector - 1; i >= 0; i--) - free_irq(adapter->msix_entries[--vector].vector, - adapter->q_vector[i]); - adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; - pci_disable_msix(adapter->pdev); - kfree(adapter->msix_entries); - adapter->msix_entries = NULL; - return err; -} - -/** - * ixgbe_irq_enable - Enable default interrupt generation settings - * @adapter: board private structure - **/ -static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, - bool flush) -{ - u32 mask; - - mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); - if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) - mask |= IXGBE_EIMS_GPI_SDP0; - if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) - mask |= IXGBE_EIMS_GPI_SDP1; - switch (adapter->hw.mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - mask |= IXGBE_EIMS_ECC; - mask |= IXGBE_EIMS_GPI_SDP1; - mask |= IXGBE_EIMS_GPI_SDP2; - if (adapter->num_vfs) - mask |= IXGBE_EIMS_MAILBOX; - break; - default: - break; - } - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) - mask |= IXGBE_EIMS_FLOW_DIR; - - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); - if (queues) - ixgbe_irq_enable_queues(adapter, ~0); - if (flush) - IXGBE_WRITE_FLUSH(&adapter->hw); - - if (adapter->num_vfs > 32) { - u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel); - } -} - -/** - * ixgbe_intr - legacy mode Interrupt Handler - * @irq: interrupt number - * @data: pointer to a network interface device structure - **/ -static irqreturn_t ixgbe_intr(int irq, void *data) -{ - struct ixgbe_adapter *adapter = data; - struct ixgbe_hw *hw = &adapter->hw; - struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; - u32 eicr; - - /* - * Workaround for silicon errata on 82598. Mask the interrupts - * before the read of EICR. - */ - IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); - - /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read - * therefore no explict interrupt disable is necessary */ - eicr = IXGBE_READ_REG(hw, IXGBE_EICR); - if (!eicr) { - /* - * shared interrupt alert! - * make sure interrupts are enabled because the read will - * have disabled interrupts due to EIAM - * finish the workaround of silicon errata on 82598. Unmask - * the interrupt that we masked before the EICR read. - */ - if (!test_bit(__IXGBE_DOWN, &adapter->state)) - ixgbe_irq_enable(adapter, true, true); - return IRQ_NONE; /* Not our interrupt */ - } - - if (eicr & IXGBE_EICR_LSC) - ixgbe_check_lsc(adapter); - - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - ixgbe_check_sfp_event(adapter, eicr); - if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && - ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) { - if (!test_bit(__IXGBE_DOWN, &adapter->state)) { - adapter->interrupt_event = eicr; - adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; - ixgbe_service_event_schedule(adapter); - } - } - break; - default: - break; - } - - ixgbe_check_fan_failure(adapter, eicr); - - if (napi_schedule_prep(&(q_vector->napi))) { - /* would disable interrupts here but EIAM disabled it */ - __napi_schedule(&(q_vector->napi)); - } - - /* - * re-enable link(maybe) and non-queue interrupts, no flush. - * ixgbe_poll will re-enable the queue interrupts - */ - - if (!test_bit(__IXGBE_DOWN, &adapter->state)) - ixgbe_irq_enable(adapter, false, false); - - return IRQ_HANDLED; -} - -static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter) -{ - int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - - for (i = 0; i < q_vectors; i++) { - struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; - bitmap_zero(q_vector->rx.idx, MAX_RX_QUEUES); - bitmap_zero(q_vector->tx.idx, MAX_TX_QUEUES); - q_vector->rx.count = 0; - q_vector->tx.count = 0; - } -} - -/** - * ixgbe_request_irq - initialize interrupts - * @adapter: board private structure - * - * Attempts to configure interrupts using the best available - * capabilities of the hardware and kernel. - **/ -static int ixgbe_request_irq(struct ixgbe_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - int err; - - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - err = ixgbe_request_msix_irqs(adapter); - } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { - err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, - netdev->name, adapter); - } else { - err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, - netdev->name, adapter); - } - - if (err) - e_err(probe, "request_irq failed, Error %d\n", err); - - return err; -} - -static void ixgbe_free_irq(struct ixgbe_adapter *adapter) -{ - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - int i, q_vectors; - - q_vectors = adapter->num_msix_vectors; - - i = q_vectors - 1; - free_irq(adapter->msix_entries[i].vector, adapter); - - i--; - for (; i >= 0; i--) { - /* free only the irqs that were actually requested */ - if (!adapter->q_vector[i]->rx.count && - !adapter->q_vector[i]->tx.count) - continue; - - free_irq(adapter->msix_entries[i].vector, - adapter->q_vector[i]); - } - - ixgbe_reset_q_vectors(adapter); - } else { - free_irq(adapter->pdev->irq, adapter); - } -} - -/** - * ixgbe_irq_disable - Mask off interrupt generation on the NIC - * @adapter: board private structure - **/ -static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) -{ - switch (adapter->hw.mac.type) { - case ixgbe_mac_82598EB: - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); - if (adapter->num_vfs > 32) - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); - break; - default: - break; - } - IXGBE_WRITE_FLUSH(&adapter->hw); - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - int i; - for (i = 0; i < adapter->num_msix_vectors; i++) - synchronize_irq(adapter->msix_entries[i].vector); - } else { - synchronize_irq(adapter->pdev->irq); - } -} - -/** - * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts - * - **/ -static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - - IXGBE_WRITE_REG(hw, IXGBE_EITR(0), - EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param)); - - ixgbe_set_ivar(adapter, 0, 0, 0); - ixgbe_set_ivar(adapter, 1, 0, 0); - - map_vector_to_rxq(adapter, 0, 0); - map_vector_to_txq(adapter, 0, 0); - - e_info(hw, "Legacy interrupt IVAR setup done\n"); -} - -/** - * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset - * @adapter: board private structure - * @ring: structure containing ring specific data - * - * Configure the Tx descriptor ring after a reset. - **/ -void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, - struct ixgbe_ring *ring) -{ - struct ixgbe_hw *hw = &adapter->hw; - u64 tdba = ring->dma; - int wait_loop = 10; - u32 txdctl; - u8 reg_idx = ring->reg_idx; - - /* disable queue to avoid issues while updating state */ - txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), - txdctl & ~IXGBE_TXDCTL_ENABLE); - IXGBE_WRITE_FLUSH(hw); - - IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx), - (tdba & DMA_BIT_MASK(32))); - IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32)); - IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx), - ring->count * sizeof(union ixgbe_adv_tx_desc)); - IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0); - IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0); - ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx); - - /* configure fetching thresholds */ - if (adapter->rx_itr_setting == 0) { - /* cannot set wthresh when itr==0 */ - txdctl &= ~0x007F0000; - } else { - /* enable WTHRESH=8 descriptors, to encourage burst writeback */ - txdctl |= (8 << 16); - } - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - /* PThresh workaround for Tx hang with DFP enabled. */ - txdctl |= 32; - } - - /* reinitialize flowdirector state */ - if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && - adapter->atr_sample_rate) { - ring->atr_sample_rate = adapter->atr_sample_rate; - ring->atr_count = 0; - set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state); - } else { - ring->atr_sample_rate = 0; - } - - clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state); - - /* enable queue */ - txdctl |= IXGBE_TXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl); - - /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */ - if (hw->mac.type == ixgbe_mac_82598EB && - !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) - return; - - /* poll to verify queue is enabled */ - do { - usleep_range(1000, 2000); - txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); - } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); - if (!wait_loop) - e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); -} - -static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 rttdcs; - u32 reg; - u8 tcs = netdev_get_num_tc(adapter->netdev); - - if (hw->mac.type == ixgbe_mac_82598EB) - return; - - /* disable the arbiter while setting MTQC */ - rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); - rttdcs |= IXGBE_RTTDCS_ARBDIS; - IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); - - /* set transmit pool layout */ - switch (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { - case (IXGBE_FLAG_SRIOV_ENABLED): - IXGBE_WRITE_REG(hw, IXGBE_MTQC, - (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF)); - break; - default: - if (!tcs) - reg = IXGBE_MTQC_64Q_1PB; - else if (tcs <= 4) - reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; - else - reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; - - IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg); - - /* Enable Security TX Buffer IFG for multiple pb */ - if (tcs) { - reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); - reg |= IXGBE_SECTX_DCB; - IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); - } - break; - } - - /* re-enable the arbiter */ - rttdcs &= ~IXGBE_RTTDCS_ARBDIS; - IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); -} - -/** - * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset - * @adapter: board private structure - * - * Configure the Tx unit of the MAC after a reset. - **/ -static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 dmatxctl; - u32 i; - - ixgbe_setup_mtqc(adapter); - - if (hw->mac.type != ixgbe_mac_82598EB) { - /* DMATXCTL.EN must be before Tx queues are enabled */ - dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); - dmatxctl |= IXGBE_DMATXCTL_TE; - IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); - } - - /* Setup the HW Tx Head and Tail descriptor pointers */ - for (i = 0; i < adapter->num_tx_queues; i++) - ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); -} - -#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 - -static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rx_ring) -{ - u32 srrctl; - u8 reg_idx = rx_ring->reg_idx; - - switch (adapter->hw.mac.type) { - case ixgbe_mac_82598EB: { - struct ixgbe_ring_feature *feature = adapter->ring_feature; - const int mask = feature[RING_F_RSS].mask; - reg_idx = reg_idx & mask; - } - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - default: - break; - } - - srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx)); - - srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; - srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; - if (adapter->num_vfs) - srrctl |= IXGBE_SRRCTL_DROP_EN; - - srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & - IXGBE_SRRCTL_BSIZEHDR_MASK; - - if (ring_is_ps_enabled(rx_ring)) { -#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER - srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; -#else - srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; -#endif - srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; - } else { - srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> - IXGBE_SRRCTL_BSIZEPKT_SHIFT; - srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; - } - - IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl); -} - -static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D, - 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE, - 0x6A3E67EA, 0x14364D17, 0x3BED200D}; - u32 mrqc = 0, reta = 0; - u32 rxcsum; - int i, j; - u8 tcs = netdev_get_num_tc(adapter->netdev); - int maxq = adapter->ring_feature[RING_F_RSS].indices; - - if (tcs) - maxq = min(maxq, adapter->num_tx_queues / tcs); - - /* Fill out hash function seeds */ - for (i = 0; i < 10; i++) - IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]); - - /* Fill out redirection table */ - for (i = 0, j = 0; i < 128; i++, j++) { - if (j == maxq) - j = 0; - /* reta = 4-byte sliding window of - * 0x00..(indices-1)(indices-1)00..etc. */ - reta = (reta << 8) | (j * 0x11); - if ((i & 3) == 3) - IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); - } - - /* Disable indicating checksum in descriptor, enables RSS hash */ - rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); - rxcsum |= IXGBE_RXCSUM_PCSD; - IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); - - if (adapter->hw.mac.type == ixgbe_mac_82598EB && - (adapter->flags & IXGBE_FLAG_RSS_ENABLED)) { - mrqc = IXGBE_MRQC_RSSEN; - } else { - int mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED - | IXGBE_FLAG_SRIOV_ENABLED); - - switch (mask) { - case (IXGBE_FLAG_RSS_ENABLED): - if (!tcs) - mrqc = IXGBE_MRQC_RSSEN; - else if (tcs <= 4) - mrqc = IXGBE_MRQC_RTRSS4TCEN; - else - mrqc = IXGBE_MRQC_RTRSS8TCEN; - break; - case (IXGBE_FLAG_SRIOV_ENABLED): - mrqc = IXGBE_MRQC_VMDQEN; - break; - default: - break; - } - } - - /* Perform hash on these packet types */ - mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 - | IXGBE_MRQC_RSS_FIELD_IPV4_TCP - | IXGBE_MRQC_RSS_FIELD_IPV6 - | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; - - IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); -} - -/** - * ixgbe_configure_rscctl - enable RSC for the indicated ring - * @adapter: address of board private structure - * @index: index of ring to set - **/ -static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, - struct ixgbe_ring *ring) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 rscctrl; - int rx_buf_len; - u8 reg_idx = ring->reg_idx; - - if (!ring_is_rsc_enabled(ring)) - return; - - rx_buf_len = ring->rx_buf_len; - rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx)); - rscctrl |= IXGBE_RSCCTL_RSCEN; - /* - * we must limit the number of descriptors so that the - * total size of max desc * buf_len is not greater - * than 65535 - */ - if (ring_is_ps_enabled(ring)) { -#if (MAX_SKB_FRAGS > 16) - rscctrl |= IXGBE_RSCCTL_MAXDESC_16; -#elif (MAX_SKB_FRAGS > 8) - rscctrl |= IXGBE_RSCCTL_MAXDESC_8; -#elif (MAX_SKB_FRAGS > 4) - rscctrl |= IXGBE_RSCCTL_MAXDESC_4; -#else - rscctrl |= IXGBE_RSCCTL_MAXDESC_1; -#endif - } else { - if (rx_buf_len < IXGBE_RXBUFFER_4096) - rscctrl |= IXGBE_RSCCTL_MAXDESC_16; - else if (rx_buf_len < IXGBE_RXBUFFER_8192) - rscctrl |= IXGBE_RSCCTL_MAXDESC_8; - else - rscctrl |= IXGBE_RSCCTL_MAXDESC_4; - } - IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); -} - -/** - * ixgbe_set_uta - Set unicast filter table address - * @adapter: board private structure - * - * The unicast table address is a register array of 32-bit registers. - * The table is meant to be used in a way similar to how the MTA is used - * however due to certain limitations in the hardware it is necessary to - * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous - * enable bit to allow vlan tag stripping when promiscuous mode is enabled - **/ -static void ixgbe_set_uta(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - int i; - - /* The UTA table only exists on 82599 hardware and newer */ - if (hw->mac.type < ixgbe_mac_82599EB) - return; - - /* we only need to do this if VMDq is enabled */ - if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) - return; - - for (i = 0; i < 128; i++) - IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0); -} - -#define IXGBE_MAX_RX_DESC_POLL 10 -static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, - struct ixgbe_ring *ring) -{ - struct ixgbe_hw *hw = &adapter->hw; - int wait_loop = IXGBE_MAX_RX_DESC_POLL; - u32 rxdctl; - u8 reg_idx = ring->reg_idx; - - /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */ - if (hw->mac.type == ixgbe_mac_82598EB && - !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) - return; - - do { - usleep_range(1000, 2000); - rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); - } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); - - if (!wait_loop) { - e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within " - "the polling period\n", reg_idx); - } -} - -void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, - struct ixgbe_ring *ring) -{ - struct ixgbe_hw *hw = &adapter->hw; - int wait_loop = IXGBE_MAX_RX_DESC_POLL; - u32 rxdctl; - u8 reg_idx = ring->reg_idx; - - rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); - rxdctl &= ~IXGBE_RXDCTL_ENABLE; - - /* write value back with RXDCTL.ENABLE bit cleared */ - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); - - if (hw->mac.type == ixgbe_mac_82598EB && - !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) - return; - - /* the hardware may take up to 100us to really disable the rx queue */ - do { - udelay(10); - rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); - } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); - - if (!wait_loop) { - e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within " - "the polling period\n", reg_idx); - } -} - -void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, - struct ixgbe_ring *ring) -{ - struct ixgbe_hw *hw = &adapter->hw; - u64 rdba = ring->dma; - u32 rxdctl; - u8 reg_idx = ring->reg_idx; - - /* disable queue to avoid issues while updating state */ - rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); - ixgbe_disable_rx_queue(adapter, ring); - - IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); - IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); - IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx), - ring->count * sizeof(union ixgbe_adv_rx_desc)); - IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0); - IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); - ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx); - - ixgbe_configure_srrctl(adapter, ring); - ixgbe_configure_rscctl(adapter, ring); - - /* If operating in IOV mode set RLPML for X540 */ - if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && - hw->mac.type == ixgbe_mac_X540) { - rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; - rxdctl |= ((ring->netdev->mtu + ETH_HLEN + - ETH_FCS_LEN + VLAN_HLEN) | IXGBE_RXDCTL_RLPML_EN); - } - - if (hw->mac.type == ixgbe_mac_82598EB) { - /* - * enable cache line friendly hardware writes: - * PTHRESH=32 descriptors (half the internal cache), - * this also removes ugly rx_no_buffer_count increment - * HTHRESH=4 descriptors (to minimize latency on fetch) - * WTHRESH=8 burst writeback up to two cache lines - */ - rxdctl &= ~0x3FFFFF; - rxdctl |= 0x080420; - } - - /* enable receive descriptor ring */ - rxdctl |= IXGBE_RXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); - - ixgbe_rx_desc_queue_enable(adapter, ring); - ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring)); -} - -static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - int p; - - /* PSRTYPE must be initialized in non 82598 adapters */ - u32 psrtype = IXGBE_PSRTYPE_TCPHDR | - IXGBE_PSRTYPE_UDPHDR | - IXGBE_PSRTYPE_IPV4HDR | - IXGBE_PSRTYPE_L2HDR | - IXGBE_PSRTYPE_IPV6HDR; - - if (hw->mac.type == ixgbe_mac_82598EB) - return; - - if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) - psrtype |= (adapter->num_rx_queues_per_pool << 29); - - for (p = 0; p < adapter->num_rx_pools; p++) - IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p), - psrtype); -} - -static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 gcr_ext; - u32 vt_reg_bits; - u32 reg_offset, vf_shift; - u32 vmdctl; - - if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) - return; - - vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); - vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN; - vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT); - IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits); - - vf_shift = adapter->num_vfs % 32; - reg_offset = (adapter->num_vfs > 32) ? 1 : 0; - - /* Enable only the PF's pool for Tx/Rx */ - IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift)); - IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0); - IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift)); - IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0); - IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); - - /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ - hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs); - - /* - * Set up VF register offsets for selected VT Mode, - * i.e. 32 or 64 VFs for SR-IOV - */ - gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); - gcr_ext |= IXGBE_GCR_EXT_MSIX_EN; - gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64; - IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); - - /* enable Tx loopback for VF/PF communication */ - IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); - /* Enable MAC Anti-Spoofing */ - hw->mac.ops.set_mac_anti_spoofing(hw, - (adapter->antispoofing_enabled = - (adapter->num_vfs != 0)), - adapter->num_vfs); -} - -static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; - int rx_buf_len; - struct ixgbe_ring *rx_ring; - int i; - u32 mhadd, hlreg0; - - /* Decide whether to use packet split mode or not */ - /* On by default */ - adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; - - /* Do not use packet split if we're in SR-IOV Mode */ - if (adapter->num_vfs) - adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; - - /* Disable packet split due to 82599 erratum #45 */ - if (hw->mac.type == ixgbe_mac_82599EB) - adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; - - /* Set the RX buffer length according to the mode */ - if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { - rx_buf_len = IXGBE_RX_HDR_SIZE; - } else { - if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && - (netdev->mtu <= ETH_DATA_LEN)) - rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; - else - rx_buf_len = ALIGN(max_frame + VLAN_HLEN, 1024); - } - -#ifdef IXGBE_FCOE - /* adjust max frame to be able to do baby jumbo for FCoE */ - if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && - (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE)) - max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE; - -#endif /* IXGBE_FCOE */ - mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); - if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { - mhadd &= ~IXGBE_MHADD_MFS_MASK; - mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT; - - IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); - } - - hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); - /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */ - hlreg0 |= IXGBE_HLREG0_JUMBOEN; - IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); - - /* - * Setup the HW Rx Head and Tail Descriptor Pointers and - * the Base and Length of the Rx Descriptor Ring - */ - for (i = 0; i < adapter->num_rx_queues; i++) { - rx_ring = adapter->rx_ring[i]; - rx_ring->rx_buf_len = rx_buf_len; - - if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) - set_ring_ps_enabled(rx_ring); - else - clear_ring_ps_enabled(rx_ring); - - if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) - set_ring_rsc_enabled(rx_ring); - else - clear_ring_rsc_enabled(rx_ring); - -#ifdef IXGBE_FCOE - if (netdev->features & NETIF_F_FCOE_MTU) { - struct ixgbe_ring_feature *f; - f = &adapter->ring_feature[RING_F_FCOE]; - if ((i >= f->mask) && (i < f->mask + f->indices)) { - clear_ring_ps_enabled(rx_ring); - if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) - rx_ring->rx_buf_len = - IXGBE_FCOE_JUMBO_FRAME_SIZE; - } else if (!ring_is_rsc_enabled(rx_ring) && - !ring_is_ps_enabled(rx_ring)) { - rx_ring->rx_buf_len = - IXGBE_FCOE_JUMBO_FRAME_SIZE; - } - } -#endif /* IXGBE_FCOE */ - } -} - -static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - /* - * For VMDq support of different descriptor types or - * buffer sizes through the use of multiple SRRCTL - * registers, RDRXCTL.MVMEN must be set to 1 - * - * also, the manual doesn't mention it clearly but DCA hints - * will only use queue 0's tags unless this bit is set. Side - * effects of setting this bit are only that SRRCTL must be - * fully programmed [0..15] - */ - rdrxctl |= IXGBE_RDRXCTL_MVMEN; - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - /* Disable RSC for ACK packets */ - IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, - (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); - rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; - /* hardware requires some bits to be set by default */ - rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX); - rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; - break; - default: - /* We should do nothing since we don't know this hardware */ - return; - } - - IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); -} - -/** - * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset - * @adapter: board private structure - * - * Configure the Rx unit of the MAC after a reset. - **/ -static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - int i; - u32 rxctrl; - - /* disable receives while setting up the descriptors */ - rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); - - ixgbe_setup_psrtype(adapter); - ixgbe_setup_rdrxctl(adapter); - - /* Program registers for the distribution of queues */ - ixgbe_setup_mrqc(adapter); - - ixgbe_set_uta(adapter); - - /* set_rx_buffer_len must be called before ring initialization */ - ixgbe_set_rx_buffer_len(adapter); - - /* - * Setup the HW Rx Head and Tail Descriptor Pointers and - * the Base and Length of the Rx Descriptor Ring - */ - for (i = 0; i < adapter->num_rx_queues; i++) - ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); - - /* disable drop enable for 82598 parts */ - if (hw->mac.type == ixgbe_mac_82598EB) - rxctrl |= IXGBE_RXCTRL_DMBYPS; - - /* enable all receives */ - rxctrl |= IXGBE_RXCTRL_RXEN; - hw->mac.ops.enable_rx_dma(hw, rxctrl); -} - -static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - int pool_ndx = adapter->num_vfs; - - /* add VID to filter table */ - hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true); - set_bit(vid, adapter->active_vlans); -} - -static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - int pool_ndx = adapter->num_vfs; - - /* remove VID from filter table */ - hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false); - clear_bit(vid, adapter->active_vlans); -} - -/** - * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering - * @adapter: driver data - */ -static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 vlnctrl; - - vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); - vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); -} - -/** - * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering - * @adapter: driver data - */ -static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 vlnctrl; - - vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); - vlnctrl |= IXGBE_VLNCTRL_VFE; - vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); -} - -/** - * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping - * @adapter: driver data - */ -static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 vlnctrl; - int i, j; - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); - vlnctrl &= ~IXGBE_VLNCTRL_VME; - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - for (i = 0; i < adapter->num_rx_queues; i++) { - j = adapter->rx_ring[i]->reg_idx; - vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); - vlnctrl &= ~IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); - } - break; - default: - break; - } -} - -/** - * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping - * @adapter: driver data - */ -static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 vlnctrl; - int i, j; - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); - vlnctrl |= IXGBE_VLNCTRL_VME; - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - for (i = 0; i < adapter->num_rx_queues; i++) { - j = adapter->rx_ring[i]->reg_idx; - vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); - vlnctrl |= IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); - } - break; - default: - break; - } -} - -static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) -{ - u16 vid; - - ixgbe_vlan_rx_add_vid(adapter->netdev, 0); - - for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) - ixgbe_vlan_rx_add_vid(adapter->netdev, vid); -} - -/** - * ixgbe_write_uc_addr_list - write unicast addresses to RAR table - * @netdev: network interface device structure - * - * Writes unicast address list to the RAR table. - * Returns: -ENOMEM on failure/insufficient address space - * 0 on no addresses written - * X on writing X addresses to the RAR table - **/ -static int ixgbe_write_uc_addr_list(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - unsigned int vfn = adapter->num_vfs; - unsigned int rar_entries = IXGBE_MAX_PF_MACVLANS; - int count = 0; - - /* return ENOMEM indicating insufficient memory for addresses */ - if (netdev_uc_count(netdev) > rar_entries) - return -ENOMEM; - - if (!netdev_uc_empty(netdev) && rar_entries) { - struct netdev_hw_addr *ha; - /* return error if we do not support writing to RAR table */ - if (!hw->mac.ops.set_rar) - return -ENOMEM; - - netdev_for_each_uc_addr(ha, netdev) { - if (!rar_entries) - break; - hw->mac.ops.set_rar(hw, rar_entries--, ha->addr, - vfn, IXGBE_RAH_AV); - count++; - } - } - /* write the addresses in reverse order to avoid write combining */ - for (; rar_entries > 0 ; rar_entries--) - hw->mac.ops.clear_rar(hw, rar_entries); - - return count; -} - -/** - * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set - * @netdev: network interface device structure - * - * The set_rx_method entry point is called whenever the unicast/multicast - * address list or the network interface flags are updated. This routine is - * responsible for configuring the hardware for proper unicast, multicast and - * promiscuous mode. - **/ -void ixgbe_set_rx_mode(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; - int count; - - /* Check for Promiscuous and All Multicast modes */ - - fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); - - /* set all bits that we expect to always be set */ - fctrl |= IXGBE_FCTRL_BAM; - fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ - fctrl |= IXGBE_FCTRL_PMCF; - - /* clear the bits we are changing the status of */ - fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); - - if (netdev->flags & IFF_PROMISC) { - hw->addr_ctrl.user_set_promisc = true; - fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); - vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE); - /* don't hardware filter vlans in promisc mode */ - ixgbe_vlan_filter_disable(adapter); - } else { - if (netdev->flags & IFF_ALLMULTI) { - fctrl |= IXGBE_FCTRL_MPE; - vmolr |= IXGBE_VMOLR_MPE; - } else { - /* - * Write addresses to the MTA, if the attempt fails - * then we should just turn on promiscuous mode so - * that we can at least receive multicast traffic - */ - hw->mac.ops.update_mc_addr_list(hw, netdev); - vmolr |= IXGBE_VMOLR_ROMPE; - } - ixgbe_vlan_filter_enable(adapter); - hw->addr_ctrl.user_set_promisc = false; - /* - * Write addresses to available RAR registers, if there is not - * sufficient space to store all the addresses then enable - * unicast promiscuous mode - */ - count = ixgbe_write_uc_addr_list(netdev); - if (count < 0) { - fctrl |= IXGBE_FCTRL_UPE; - vmolr |= IXGBE_VMOLR_ROPE; - } - } - - if (adapter->num_vfs) { - ixgbe_restore_vf_multicasts(adapter); - vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) & - ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE | - IXGBE_VMOLR_ROPE); - IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr); - } - - IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); - - if (netdev->features & NETIF_F_HW_VLAN_RX) - ixgbe_vlan_strip_enable(adapter); - else - ixgbe_vlan_strip_disable(adapter); -} - -static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) -{ - int q_idx; - struct ixgbe_q_vector *q_vector; - int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - - /* legacy and MSI only use one vector */ - if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) - q_vectors = 1; - - for (q_idx = 0; q_idx < q_vectors; q_idx++) { - struct napi_struct *napi; - q_vector = adapter->q_vector[q_idx]; - napi = &q_vector->napi; - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - if (!q_vector->rx.count || !q_vector->tx.count) { - if (q_vector->tx.count == 1) - napi->poll = &ixgbe_clean_txonly; - else if (q_vector->rx.count == 1) - napi->poll = &ixgbe_clean_rxonly; - } - } - - napi_enable(napi); - } -} - -static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) -{ - int q_idx; - struct ixgbe_q_vector *q_vector; - int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - - /* legacy and MSI only use one vector */ - if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) - q_vectors = 1; - - for (q_idx = 0; q_idx < q_vectors; q_idx++) { - q_vector = adapter->q_vector[q_idx]; - napi_disable(&q_vector->napi); - } -} - -#ifdef CONFIG_IXGBE_DCB -/* - * ixgbe_configure_dcb - Configure DCB hardware - * @adapter: ixgbe adapter struct - * - * This is called by the driver on open to configure the DCB hardware. - * This is also called by the gennetlink interface when reconfiguring - * the DCB state. - */ -static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; - - if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { - if (hw->mac.type == ixgbe_mac_82598EB) - netif_set_gso_max_size(adapter->netdev, 65536); - return; - } - - if (hw->mac.type == ixgbe_mac_82598EB) - netif_set_gso_max_size(adapter->netdev, 32768); - - - /* Enable VLAN tag insert/strip */ - adapter->netdev->features |= NETIF_F_HW_VLAN_RX; - - hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); - - /* reconfigure the hardware */ - if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) { -#ifdef CONFIG_FCOE - if (adapter->netdev->features & NETIF_F_FCOE_MTU) - max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); -#endif - ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, - DCB_TX_CONFIG); - ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, - DCB_RX_CONFIG); - ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg); - } else { - struct net_device *dev = adapter->netdev; - - if (adapter->ixgbe_ieee_ets) - dev->dcbnl_ops->ieee_setets(dev, - adapter->ixgbe_ieee_ets); - if (adapter->ixgbe_ieee_pfc) - dev->dcbnl_ops->ieee_setpfc(dev, - adapter->ixgbe_ieee_pfc); - } - - /* Enable RSS Hash per TC */ - if (hw->mac.type != ixgbe_mac_82598EB) { - int i; - u32 reg = 0; - - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - u8 msb = 0; - u8 cnt = adapter->netdev->tc_to_txq[i].count; - - while (cnt >>= 1) - msb++; - - reg |= msb << IXGBE_RQTC_SHIFT_TC(i); - } - IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg); - } -} - -#endif - -static void ixgbe_configure_pb(struct ixgbe_adapter *adapter) -{ - int hdrm = 0; - int num_tc = netdev_get_num_tc(adapter->netdev); - struct ixgbe_hw *hw = &adapter->hw; - - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || - adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) - hdrm = 64 << adapter->fdir_pballoc; - - hw->mac.ops.set_rxpba(&adapter->hw, num_tc, hdrm, PBA_STRATEGY_EQUAL); -} - -static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct hlist_node *node, *node2; - struct ixgbe_fdir_filter *filter; - - spin_lock(&adapter->fdir_perfect_lock); - - if (!hlist_empty(&adapter->fdir_filter_list)) - ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask); - - hlist_for_each_entry_safe(filter, node, node2, - &adapter->fdir_filter_list, fdir_node) { - ixgbe_fdir_write_perfect_filter_82599(hw, - &filter->filter, - filter->sw_idx, - (filter->action == IXGBE_FDIR_DROP_QUEUE) ? - IXGBE_FDIR_DROP_QUEUE : - adapter->rx_ring[filter->action]->reg_idx); - } - - spin_unlock(&adapter->fdir_perfect_lock); -} - -static void ixgbe_configure(struct ixgbe_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct ixgbe_hw *hw = &adapter->hw; - int i; - - ixgbe_configure_pb(adapter); -#ifdef CONFIG_IXGBE_DCB - ixgbe_configure_dcb(adapter); -#endif - - ixgbe_set_rx_mode(netdev); - ixgbe_restore_vlan(adapter); - -#ifdef IXGBE_FCOE - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) - ixgbe_configure_fcoe(adapter); - -#endif /* IXGBE_FCOE */ - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { - for (i = 0; i < adapter->num_tx_queues; i++) - adapter->tx_ring[i]->atr_sample_rate = - adapter->atr_sample_rate; - ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc); - } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { - ixgbe_init_fdir_perfect_82599(&adapter->hw, - adapter->fdir_pballoc); - ixgbe_fdir_filter_restore(adapter); - } - ixgbe_configure_virtualization(adapter); - - ixgbe_configure_tx(adapter); - ixgbe_configure_rx(adapter); -} - -static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) -{ - switch (hw->phy.type) { - case ixgbe_phy_sfp_avago: - case ixgbe_phy_sfp_ftl: - case ixgbe_phy_sfp_intel: - case ixgbe_phy_sfp_unknown: - case ixgbe_phy_sfp_passive_tyco: - case ixgbe_phy_sfp_passive_unknown: - case ixgbe_phy_sfp_active_unknown: - case ixgbe_phy_sfp_ftl_active: - return true; - default: - return false; - } -} - -/** - * ixgbe_sfp_link_config - set up SFP+ link - * @adapter: pointer to private adapter struct - **/ -static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) -{ - /* - * We are assuming the worst case scenerio here, and that - * is that an SFP was inserted/removed after the reset - * but before SFP detection was enabled. As such the best - * solution is to just start searching as soon as we start - */ - if (adapter->hw.mac.type == ixgbe_mac_82598EB) - adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; - - adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; -} - -/** - * ixgbe_non_sfp_link_config - set up non-SFP+ link - * @hw: pointer to private hardware struct - * - * Returns 0 on success, negative on failure - **/ -static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) -{ - u32 autoneg; - bool negotiation, link_up = false; - u32 ret = IXGBE_ERR_LINK_SETUP; - - if (hw->mac.ops.check_link) - ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false); - - if (ret) - goto link_cfg_out; - - autoneg = hw->phy.autoneg_advertised; - if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) - ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, - &negotiation); - if (ret) - goto link_cfg_out; - - if (hw->mac.ops.setup_link) - ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up); -link_cfg_out: - return ret; -} - -static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 gpie = 0; - - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | - IXGBE_GPIE_OCD; - gpie |= IXGBE_GPIE_EIAME; - /* - * use EIAM to auto-mask when MSI-X interrupt is asserted - * this saves a register write for every interrupt - */ - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - default: - IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); - IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); - break; - } - } else { - /* legacy interrupts, use EIAM to auto-mask when reading EICR, - * specifically only auto mask tx and rx interrupts */ - IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); - } - - /* XXX: to interrupt immediately for EICS writes, enable this */ - /* gpie |= IXGBE_GPIE_EIMEN; */ - - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { - gpie &= ~IXGBE_GPIE_VTMODE_MASK; - gpie |= IXGBE_GPIE_VTMODE_64; - } - - /* Enable fan failure interrupt */ - if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) - gpie |= IXGBE_SDP1_GPIEN; - - if (hw->mac.type == ixgbe_mac_82599EB) { - gpie |= IXGBE_SDP1_GPIEN; - gpie |= IXGBE_SDP2_GPIEN; - } - - IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); -} - -static int ixgbe_up_complete(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - int err; - u32 ctrl_ext; - - ixgbe_get_hw_control(adapter); - ixgbe_setup_gpie(adapter); - - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) - ixgbe_configure_msix(adapter); - else - ixgbe_configure_msi_and_legacy(adapter); - - /* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */ - if (hw->mac.ops.enable_tx_laser && - ((hw->phy.multispeed_fiber) || - ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) && - (hw->mac.type == ixgbe_mac_82599EB)))) - hw->mac.ops.enable_tx_laser(hw); - - clear_bit(__IXGBE_DOWN, &adapter->state); - ixgbe_napi_enable_all(adapter); - - if (ixgbe_is_sfp(hw)) { - ixgbe_sfp_link_config(adapter); - } else { - err = ixgbe_non_sfp_link_config(hw); - if (err) - e_err(probe, "link_config FAILED %d\n", err); - } - - /* clear any pending interrupts, may auto mask */ - IXGBE_READ_REG(hw, IXGBE_EICR); - ixgbe_irq_enable(adapter, true, true); - - /* - * If this adapter has a fan, check to see if we had a failure - * before we enabled the interrupt. - */ - if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { - u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - if (esdp & IXGBE_ESDP_SDP1) - e_crit(drv, "Fan has stopped, replace the adapter\n"); - } - - /* enable transmits */ - netif_tx_start_all_queues(adapter->netdev); - - /* bring the link up in the watchdog, this could race with our first - * link up interrupt but shouldn't be a problem */ - adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; - adapter->link_check_timeout = jiffies; - mod_timer(&adapter->service_timer, jiffies); - - /* Set PF Reset Done bit so PF/VF Mail Ops can work */ - ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); - ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; - IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); - - return 0; -} - -void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) -{ - WARN_ON(in_interrupt()); - /* put off any impending NetWatchDogTimeout */ - adapter->netdev->trans_start = jiffies; - - while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) - usleep_range(1000, 2000); - ixgbe_down(adapter); - /* - * If SR-IOV enabled then wait a bit before bringing the adapter - * back up to give the VFs time to respond to the reset. The - * two second wait is based upon the watchdog timer cycle in - * the VF driver. - */ - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) - msleep(2000); - ixgbe_up(adapter); - clear_bit(__IXGBE_RESETTING, &adapter->state); -} - -int ixgbe_up(struct ixgbe_adapter *adapter) -{ - /* hardware has been reset, we need to reload some things */ - ixgbe_configure(adapter); - - return ixgbe_up_complete(adapter); -} - -void ixgbe_reset(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - int err; - - /* lock SFP init bit to prevent race conditions with the watchdog */ - while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) - usleep_range(1000, 2000); - - /* clear all SFP and link config related flags while holding SFP_INIT */ - adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP | - IXGBE_FLAG2_SFP_NEEDS_RESET); - adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; - - err = hw->mac.ops.init_hw(hw); - switch (err) { - case 0: - case IXGBE_ERR_SFP_NOT_PRESENT: - case IXGBE_ERR_SFP_NOT_SUPPORTED: - break; - case IXGBE_ERR_MASTER_REQUESTS_PENDING: - e_dev_err("master disable timed out\n"); - break; - case IXGBE_ERR_EEPROM_VERSION: - /* We are running on a pre-production device, log a warning */ - e_dev_warn("This device is a pre-production adapter/LOM. " - "Please be aware there may be issuesassociated with " - "your hardware. If you are experiencing problems " - "please contact your Intel or hardware " - "representative who provided you with this " - "hardware.\n"); - break; - default: - e_dev_err("Hardware Error: %d\n", err); - } - - clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); - - /* reprogram the RAR[0] in case user changed it. */ - hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs, - IXGBE_RAH_AV); -} - -/** - * ixgbe_clean_rx_ring - Free Rx Buffers per Queue - * @rx_ring: ring to free buffers from - **/ -static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) -{ - struct device *dev = rx_ring->dev; - unsigned long size; - u16 i; - - /* ring already cleared, nothing to do */ - if (!rx_ring->rx_buffer_info) - return; - - /* Free all the Rx ring sk_buffs */ - for (i = 0; i < rx_ring->count; i++) { - struct ixgbe_rx_buffer *rx_buffer_info; - - rx_buffer_info = &rx_ring->rx_buffer_info[i]; - if (rx_buffer_info->dma) { - dma_unmap_single(rx_ring->dev, rx_buffer_info->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - rx_buffer_info->dma = 0; - } - if (rx_buffer_info->skb) { - struct sk_buff *skb = rx_buffer_info->skb; - rx_buffer_info->skb = NULL; - do { - struct sk_buff *this = skb; - if (IXGBE_RSC_CB(this)->delay_unmap) { - dma_unmap_single(dev, - IXGBE_RSC_CB(this)->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - IXGBE_RSC_CB(this)->dma = 0; - IXGBE_RSC_CB(skb)->delay_unmap = false; - } - skb = skb->prev; - dev_kfree_skb(this); - } while (skb); - } - if (!rx_buffer_info->page) - continue; - if (rx_buffer_info->page_dma) { - dma_unmap_page(dev, rx_buffer_info->page_dma, - PAGE_SIZE / 2, DMA_FROM_DEVICE); - rx_buffer_info->page_dma = 0; - } - put_page(rx_buffer_info->page); - rx_buffer_info->page = NULL; - rx_buffer_info->page_offset = 0; - } - - size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; - memset(rx_ring->rx_buffer_info, 0, size); - - /* Zero out the descriptor ring */ - memset(rx_ring->desc, 0, rx_ring->size); - - rx_ring->next_to_clean = 0; - rx_ring->next_to_use = 0; -} - -/** - * ixgbe_clean_tx_ring - Free Tx Buffers - * @tx_ring: ring to be cleaned - **/ -static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) -{ - struct ixgbe_tx_buffer *tx_buffer_info; - unsigned long size; - u16 i; - - /* ring already cleared, nothing to do */ - if (!tx_ring->tx_buffer_info) - return; - - /* Free all the Tx ring sk_buffs */ - for (i = 0; i < tx_ring->count; i++) { - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); - } - - size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; - memset(tx_ring->tx_buffer_info, 0, size); - - /* Zero out the descriptor ring */ - memset(tx_ring->desc, 0, tx_ring->size); - - tx_ring->next_to_use = 0; - tx_ring->next_to_clean = 0; -} - -/** - * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues - * @adapter: board private structure - **/ -static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter) -{ - int i; - - for (i = 0; i < adapter->num_rx_queues; i++) - ixgbe_clean_rx_ring(adapter->rx_ring[i]); -} - -/** - * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues - * @adapter: board private structure - **/ -static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) -{ - int i; - - for (i = 0; i < adapter->num_tx_queues; i++) - ixgbe_clean_tx_ring(adapter->tx_ring[i]); -} - -static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter) -{ - struct hlist_node *node, *node2; - struct ixgbe_fdir_filter *filter; - - spin_lock(&adapter->fdir_perfect_lock); - - hlist_for_each_entry_safe(filter, node, node2, - &adapter->fdir_filter_list, fdir_node) { - hlist_del(&filter->fdir_node); - kfree(filter); - } - adapter->fdir_filter_count = 0; - - spin_unlock(&adapter->fdir_perfect_lock); -} - -void ixgbe_down(struct ixgbe_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct ixgbe_hw *hw = &adapter->hw; - u32 rxctrl; - int i; - int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - - /* signal that we are down to the interrupt handler */ - set_bit(__IXGBE_DOWN, &adapter->state); - - /* disable receives */ - rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); - - /* disable all enabled rx queues */ - for (i = 0; i < adapter->num_rx_queues; i++) - /* this call also flushes the previous write */ - ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]); - - usleep_range(10000, 20000); - - netif_tx_stop_all_queues(netdev); - - /* call carrier off first to avoid false dev_watchdog timeouts */ - netif_carrier_off(netdev); - netif_tx_disable(netdev); - - ixgbe_irq_disable(adapter); - - ixgbe_napi_disable_all(adapter); - - adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT | - IXGBE_FLAG2_RESET_REQUESTED); - adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; - - del_timer_sync(&adapter->service_timer); - - /* disable receive for all VFs and wait one second */ - if (adapter->num_vfs) { - /* ping all the active vfs to let them know we are going down */ - ixgbe_ping_all_vfs(adapter); - - /* Disable all VFTE/VFRE TX/RX */ - ixgbe_disable_tx_rx(adapter); - - /* Mark all the VFs as inactive */ - for (i = 0 ; i < adapter->num_vfs; i++) - adapter->vfinfo[i].clear_to_send = 0; - } - - /* Cleanup the affinity_hint CPU mask memory and callback */ - for (i = 0; i < num_q_vectors; i++) { - struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; - /* clear the affinity_mask in the IRQ descriptor */ - irq_set_affinity_hint(adapter->msix_entries[i]. vector, NULL); - /* release the CPU mask memory */ - free_cpumask_var(q_vector->affinity_mask); - } - - /* disable transmits in the hardware now that interrupts are off */ - for (i = 0; i < adapter->num_tx_queues; i++) { - u8 reg_idx = adapter->tx_ring[i]->reg_idx; - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); - } - - /* Disable the Tx DMA engine on 82599 and X540 */ - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, - (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & - ~IXGBE_DMATXCTL_TE)); - break; - default: - break; - } - - if (!pci_channel_offline(adapter->pdev)) - ixgbe_reset(adapter); - - /* power down the optics for multispeed fiber and 82599 SFP+ fiber */ - if (hw->mac.ops.disable_tx_laser && - ((hw->phy.multispeed_fiber) || - ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) && - (hw->mac.type == ixgbe_mac_82599EB)))) - hw->mac.ops.disable_tx_laser(hw); - - ixgbe_clean_all_tx_rings(adapter); - ixgbe_clean_all_rx_rings(adapter); - -#ifdef CONFIG_IXGBE_DCA - /* since we reset the hardware DCA settings were cleared */ - ixgbe_setup_dca(adapter); -#endif -} - -/** - * ixgbe_poll - NAPI Rx polling callback - * @napi: structure for representing this polling device - * @budget: how many packets driver is allowed to clean - * - * This function is used for legacy and MSI, NAPI mode - **/ -static int ixgbe_poll(struct napi_struct *napi, int budget) -{ - struct ixgbe_q_vector *q_vector = - container_of(napi, struct ixgbe_q_vector, napi); - struct ixgbe_adapter *adapter = q_vector->adapter; - int tx_clean_complete, work_done = 0; - -#ifdef CONFIG_IXGBE_DCA - if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) - ixgbe_update_dca(q_vector); -#endif - - tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]); - ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget); - - if (!tx_clean_complete) - work_done = budget; - - /* If budget not fully consumed, exit the polling mode */ - if (work_done < budget) { - napi_complete(napi); - if (adapter->rx_itr_setting & 1) - ixgbe_set_itr(q_vector); - if (!test_bit(__IXGBE_DOWN, &adapter->state)) - ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE); - } - return work_done; -} - -/** - * ixgbe_tx_timeout - Respond to a Tx Hang - * @netdev: network interface device structure - **/ -static void ixgbe_tx_timeout(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - /* Do the reset outside of interrupt context */ - ixgbe_tx_timeout_reset(adapter); -} - -/** - * ixgbe_set_rss_queues: Allocate queues for RSS - * @adapter: board private structure to initialize - * - * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try - * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. - * - **/ -static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) -{ - bool ret = false; - struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS]; - - if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { - f->mask = 0xF; - adapter->num_rx_queues = f->indices; - adapter->num_tx_queues = f->indices; - ret = true; - } else { - ret = false; - } - - return ret; -} - -/** - * ixgbe_set_fdir_queues: Allocate queues for Flow Director - * @adapter: board private structure to initialize - * - * Flow Director is an advanced Rx filter, attempting to get Rx flows back - * to the original CPU that initiated the Tx session. This runs in addition - * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the - * Rx load across CPUs using RSS. - * - **/ -static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) -{ - bool ret = false; - struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR]; - - f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices); - f_fdir->mask = 0; - - /* Flow Director must have RSS enabled */ - if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && - (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { - adapter->num_tx_queues = f_fdir->indices; - adapter->num_rx_queues = f_fdir->indices; - ret = true; - } else { - adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; - } - return ret; -} - -#ifdef IXGBE_FCOE -/** - * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE) - * @adapter: board private structure to initialize - * - * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges. - * The ring feature mask is not used as a mask for FCoE, as it can take any 8 - * rx queues out of the max number of rx queues, instead, it is used as the - * index of the first rx queue used by FCoE. - * - **/ -static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) -{ - struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; - - if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) - return false; - - f->indices = min((int)num_online_cpus(), f->indices); - - adapter->num_rx_queues = 1; - adapter->num_tx_queues = 1; - - if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { - e_info(probe, "FCoE enabled with RSS\n"); - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) - ixgbe_set_fdir_queues(adapter); - else - ixgbe_set_rss_queues(adapter); - } - - /* adding FCoE rx rings to the end */ - f->mask = adapter->num_rx_queues; - adapter->num_rx_queues += f->indices; - adapter->num_tx_queues += f->indices; - - return true; -} -#endif /* IXGBE_FCOE */ - -/* Artificial max queue cap per traffic class in DCB mode */ -#define DCB_QUEUE_CAP 8 - -#ifdef CONFIG_IXGBE_DCB -static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) -{ - int per_tc_q, q, i, offset = 0; - struct net_device *dev = adapter->netdev; - int tcs = netdev_get_num_tc(dev); - - if (!tcs) - return false; - - /* Map queue offset and counts onto allocated tx queues */ - per_tc_q = min(dev->num_tx_queues / tcs, (unsigned int)DCB_QUEUE_CAP); - q = min((int)num_online_cpus(), per_tc_q); - - for (i = 0; i < tcs; i++) { - netdev_set_prio_tc_map(dev, i, i); - netdev_set_tc_queue(dev, i, q, offset); - offset += q; - } - - adapter->num_tx_queues = q * tcs; - adapter->num_rx_queues = q * tcs; - -#ifdef IXGBE_FCOE - /* FCoE enabled queues require special configuration indexed - * by feature specific indices and mask. Here we map FCoE - * indices onto the DCB queue pairs allowing FCoE to own - * configuration later. - */ - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { - int tc; - struct ixgbe_ring_feature *f = - &adapter->ring_feature[RING_F_FCOE]; - - tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up); - f->indices = dev->tc_to_txq[tc].count; - f->mask = dev->tc_to_txq[tc].offset; - } -#endif - - return true; -} -#endif - -/** - * ixgbe_set_sriov_queues: Allocate queues for IOV use - * @adapter: board private structure to initialize - * - * IOV doesn't actually use anything, so just NAK the - * request for now and let the other queue routines - * figure out what to do. - */ -static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) -{ - return false; -} - -/* - * ixgbe_set_num_queues: Allocate queues for device, feature dependent - * @adapter: board private structure to initialize - * - * This is the top level queue allocation routine. The order here is very - * important, starting with the "most" number of features turned on at once, - * and ending with the smallest set of features. This way large combinations - * can be allocated if they're turned on, and smaller combinations are the - * fallthrough conditions. - * - **/ -static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter) -{ - /* Start with base case */ - adapter->num_rx_queues = 1; - adapter->num_tx_queues = 1; - adapter->num_rx_pools = adapter->num_rx_queues; - adapter->num_rx_queues_per_pool = 1; - - if (ixgbe_set_sriov_queues(adapter)) - goto done; - -#ifdef CONFIG_IXGBE_DCB - if (ixgbe_set_dcb_queues(adapter)) - goto done; - -#endif -#ifdef IXGBE_FCOE - if (ixgbe_set_fcoe_queues(adapter)) - goto done; - -#endif /* IXGBE_FCOE */ - if (ixgbe_set_fdir_queues(adapter)) - goto done; - - if (ixgbe_set_rss_queues(adapter)) - goto done; - - /* fallback to base case */ - adapter->num_rx_queues = 1; - adapter->num_tx_queues = 1; - -done: - /* Notify the stack of the (possibly) reduced queue counts. */ - netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); - return netif_set_real_num_rx_queues(adapter->netdev, - adapter->num_rx_queues); -} - -static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, - int vectors) -{ - int err, vector_threshold; - - /* We'll want at least 3 (vector_threshold): - * 1) TxQ[0] Cleanup - * 2) RxQ[0] Cleanup - * 3) Other (Link Status Change, etc.) - * 4) TCP Timer (optional) - */ - vector_threshold = MIN_MSIX_COUNT; - - /* The more we get, the more we will assign to Tx/Rx Cleanup - * for the separate queues...where Rx Cleanup >= Tx Cleanup. - * Right now, we simply care about how many we'll get; we'll - * set them up later while requesting irq's. - */ - while (vectors >= vector_threshold) { - err = pci_enable_msix(adapter->pdev, adapter->msix_entries, - vectors); - if (!err) /* Success in acquiring all requested vectors. */ - break; - else if (err < 0) - vectors = 0; /* Nasty failure, quit now */ - else /* err == number of vectors we should try again with */ - vectors = err; - } - - if (vectors < vector_threshold) { - /* Can't allocate enough MSI-X interrupts? Oh well. - * This just means we'll go with either a single MSI - * vector or fall back to legacy interrupts. - */ - netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, - "Unable to allocate MSI-X interrupts\n"); - adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; - kfree(adapter->msix_entries); - adapter->msix_entries = NULL; - } else { - adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ - /* - * Adjust for only the vectors we'll use, which is minimum - * of max_msix_q_vectors + NON_Q_VECTORS, or the number of - * vectors we were allocated. - */ - adapter->num_msix_vectors = min(vectors, - adapter->max_msix_q_vectors + NON_Q_VECTORS); - } -} - -/** - * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS - * @adapter: board private structure to initialize - * - * Cache the descriptor ring offsets for RSS to the assigned rings. - * - **/ -static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) -{ - int i; - - if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) - return false; - - for (i = 0; i < adapter->num_rx_queues; i++) - adapter->rx_ring[i]->reg_idx = i; - for (i = 0; i < adapter->num_tx_queues; i++) - adapter->tx_ring[i]->reg_idx = i; - - return true; -} - -#ifdef CONFIG_IXGBE_DCB - -/* ixgbe_get_first_reg_idx - Return first register index associated with ring */ -static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, - unsigned int *tx, unsigned int *rx) -{ - struct net_device *dev = adapter->netdev; - struct ixgbe_hw *hw = &adapter->hw; - u8 num_tcs = netdev_get_num_tc(dev); - - *tx = 0; - *rx = 0; - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - *tx = tc << 2; - *rx = tc << 3; - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - if (num_tcs == 8) { - if (tc < 3) { - *tx = tc << 5; - *rx = tc << 4; - } else if (tc < 5) { - *tx = ((tc + 2) << 4); - *rx = tc << 4; - } else if (tc < num_tcs) { - *tx = ((tc + 8) << 3); - *rx = tc << 4; - } - } else if (num_tcs == 4) { - *rx = tc << 5; - switch (tc) { - case 0: - *tx = 0; - break; - case 1: - *tx = 64; - break; - case 2: - *tx = 96; - break; - case 3: - *tx = 112; - break; - default: - break; - } - } - break; - default: - break; - } -} - -/** - * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB - * @adapter: board private structure to initialize - * - * Cache the descriptor ring offsets for DCB to the assigned rings. - * - **/ -static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) -{ - struct net_device *dev = adapter->netdev; - int i, j, k; - u8 num_tcs = netdev_get_num_tc(dev); - - if (!num_tcs) - return false; - - for (i = 0, k = 0; i < num_tcs; i++) { - unsigned int tx_s, rx_s; - u16 count = dev->tc_to_txq[i].count; - - ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s); - for (j = 0; j < count; j++, k++) { - adapter->tx_ring[k]->reg_idx = tx_s + j; - adapter->rx_ring[k]->reg_idx = rx_s + j; - adapter->tx_ring[k]->dcb_tc = i; - adapter->rx_ring[k]->dcb_tc = i; - } - } - - return true; -} -#endif - -/** - * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director - * @adapter: board private structure to initialize - * - * Cache the descriptor ring offsets for Flow Director to the assigned rings. - * - **/ -static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) -{ - int i; - bool ret = false; - - if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && - (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { - for (i = 0; i < adapter->num_rx_queues; i++) - adapter->rx_ring[i]->reg_idx = i; - for (i = 0; i < adapter->num_tx_queues; i++) - adapter->tx_ring[i]->reg_idx = i; - ret = true; - } - - return ret; -} - -#ifdef IXGBE_FCOE -/** - * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE - * @adapter: board private structure to initialize - * - * Cache the descriptor ring offsets for FCoE mode to the assigned rings. - * - */ -static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) -{ - struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; - int i; - u8 fcoe_rx_i = 0, fcoe_tx_i = 0; - - if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) - return false; - - if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) - ixgbe_cache_ring_fdir(adapter); - else - ixgbe_cache_ring_rss(adapter); - - fcoe_rx_i = f->mask; - fcoe_tx_i = f->mask; - } - for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { - adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i; - adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i; - } - return true; -} - -#endif /* IXGBE_FCOE */ -/** - * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov - * @adapter: board private structure to initialize - * - * SR-IOV doesn't use any descriptor rings but changes the default if - * no other mapping is used. - * - */ -static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) -{ - adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2; - adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2; - if (adapter->num_vfs) - return true; - else - return false; -} - -/** - * ixgbe_cache_ring_register - Descriptor ring to register mapping - * @adapter: board private structure to initialize - * - * Once we know the feature-set enabled for the device, we'll cache - * the register offset the descriptor ring is assigned to. - * - * Note, the order the various feature calls is important. It must start with - * the "most" features enabled at the same time, then trickle down to the - * least amount of features turned on at once. - **/ -static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) -{ - /* start with default case */ - adapter->rx_ring[0]->reg_idx = 0; - adapter->tx_ring[0]->reg_idx = 0; - - if (ixgbe_cache_ring_sriov(adapter)) - return; - -#ifdef CONFIG_IXGBE_DCB - if (ixgbe_cache_ring_dcb(adapter)) - return; -#endif - -#ifdef IXGBE_FCOE - if (ixgbe_cache_ring_fcoe(adapter)) - return; -#endif /* IXGBE_FCOE */ - - if (ixgbe_cache_ring_fdir(adapter)) - return; - - if (ixgbe_cache_ring_rss(adapter)) - return; -} - -/** - * ixgbe_alloc_queues - Allocate memory for all rings - * @adapter: board private structure to initialize - * - * We allocate one ring per queue at run-time since we don't know the - * number of queues at compile-time. The polling_netdev array is - * intended for Multiqueue, but should work fine with a single queue. - **/ -static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) -{ - int rx = 0, tx = 0, nid = adapter->node; - - if (nid < 0 || !node_online(nid)) - nid = first_online_node; - - for (; tx < adapter->num_tx_queues; tx++) { - struct ixgbe_ring *ring; - - ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid); - if (!ring) - ring = kzalloc(sizeof(*ring), GFP_KERNEL); - if (!ring) - goto err_allocation; - ring->count = adapter->tx_ring_count; - ring->queue_index = tx; - ring->numa_node = nid; - ring->dev = &adapter->pdev->dev; - ring->netdev = adapter->netdev; - - adapter->tx_ring[tx] = ring; - } - - for (; rx < adapter->num_rx_queues; rx++) { - struct ixgbe_ring *ring; - - ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid); - if (!ring) - ring = kzalloc(sizeof(*ring), GFP_KERNEL); - if (!ring) - goto err_allocation; - ring->count = adapter->rx_ring_count; - ring->queue_index = rx; - ring->numa_node = nid; - ring->dev = &adapter->pdev->dev; - ring->netdev = adapter->netdev; - - adapter->rx_ring[rx] = ring; - } - - ixgbe_cache_ring_register(adapter); - - return 0; - -err_allocation: - while (tx) - kfree(adapter->tx_ring[--tx]); - - while (rx) - kfree(adapter->rx_ring[--rx]); - return -ENOMEM; -} - -/** - * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported - * @adapter: board private structure to initialize - * - * Attempt to configure the interrupts using the best available - * capabilities of the hardware and the kernel. - **/ -static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - int err = 0; - int vector, v_budget; - - /* - * It's easy to be greedy for MSI-X vectors, but it really - * doesn't do us much good if we have a lot more vectors - * than CPU's. So let's be conservative and only ask for - * (roughly) the same number of vectors as there are CPU's. - */ - v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, - (int)num_online_cpus()) + NON_Q_VECTORS; - - /* - * At the same time, hardware can only support a maximum of - * hw.mac->max_msix_vectors vectors. With features - * such as RSS and VMDq, we can easily surpass the number of Rx and Tx - * descriptor queues supported by our device. Thus, we cap it off in - * those rare cases where the cpu count also exceeds our vector limit. - */ - v_budget = min(v_budget, (int)hw->mac.max_msix_vectors); - - /* A failure in MSI-X entry allocation isn't fatal, but it does - * mean we disable MSI-X capabilities of the adapter. */ - adapter->msix_entries = kcalloc(v_budget, - sizeof(struct msix_entry), GFP_KERNEL); - if (adapter->msix_entries) { - for (vector = 0; vector < v_budget; vector++) - adapter->msix_entries[vector].entry = vector; - - ixgbe_acquire_msix_vectors(adapter, v_budget); - - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) - goto out; - } - - adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; - adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { - e_err(probe, - "ATR is not supported while multiple " - "queues are disabled. Disabling Flow Director\n"); - } - adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; - adapter->atr_sample_rate = 0; - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) - ixgbe_disable_sriov(adapter); - - err = ixgbe_set_num_queues(adapter); - if (err) - return err; - - err = pci_enable_msi(adapter->pdev); - if (!err) { - adapter->flags |= IXGBE_FLAG_MSI_ENABLED; - } else { - netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, - "Unable to allocate MSI interrupt, " - "falling back to legacy. Error: %d\n", err); - /* reset err */ - err = 0; - } - -out: - return err; -} - -/** - * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors - * @adapter: board private structure to initialize - * - * We allocate one q_vector per queue interrupt. If allocation fails we - * return -ENOMEM. - **/ -static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) -{ - int q_idx, num_q_vectors; - struct ixgbe_q_vector *q_vector; - int (*poll)(struct napi_struct *, int); - - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - poll = &ixgbe_clean_rxtx_many; - } else { - num_q_vectors = 1; - poll = &ixgbe_poll; - } - - for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { - q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector), - GFP_KERNEL, adapter->node); - if (!q_vector) - q_vector = kzalloc(sizeof(struct ixgbe_q_vector), - GFP_KERNEL); - if (!q_vector) - goto err_out; - q_vector->adapter = adapter; - if (q_vector->tx.count && !q_vector->rx.count) - q_vector->eitr = adapter->tx_eitr_param; - else - q_vector->eitr = adapter->rx_eitr_param; - q_vector->v_idx = q_idx; - netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64); - adapter->q_vector[q_idx] = q_vector; - } - - return 0; - -err_out: - while (q_idx) { - q_idx--; - q_vector = adapter->q_vector[q_idx]; - netif_napi_del(&q_vector->napi); - kfree(q_vector); - adapter->q_vector[q_idx] = NULL; - } - return -ENOMEM; -} - -/** - * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors - * @adapter: board private structure to initialize - * - * This function frees the memory allocated to the q_vectors. In addition if - * NAPI is enabled it will delete any references to the NAPI struct prior - * to freeing the q_vector. - **/ -static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) -{ - int q_idx, num_q_vectors; - - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) - num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - else - num_q_vectors = 1; - - for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { - struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx]; - adapter->q_vector[q_idx] = NULL; - netif_napi_del(&q_vector->napi); - kfree(q_vector); - } -} - -static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) -{ - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; - pci_disable_msix(adapter->pdev); - kfree(adapter->msix_entries); - adapter->msix_entries = NULL; - } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { - adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; - pci_disable_msi(adapter->pdev); - } -} - -/** - * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme - * @adapter: board private structure to initialize - * - * We determine which interrupt scheme to use based on... - * - Kernel support (MSI, MSI-X) - * - which can be user-defined (via MODULE_PARAM) - * - Hardware queue count (num_*_queues) - * - defined by miscellaneous hardware support/features (RSS, etc.) - **/ -int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) -{ - int err; - - /* Number of supported queues */ - err = ixgbe_set_num_queues(adapter); - if (err) - return err; - - err = ixgbe_set_interrupt_capability(adapter); - if (err) { - e_dev_err("Unable to setup interrupt capabilities\n"); - goto err_set_interrupt; - } - - err = ixgbe_alloc_q_vectors(adapter); - if (err) { - e_dev_err("Unable to allocate memory for queue vectors\n"); - goto err_alloc_q_vectors; - } - - err = ixgbe_alloc_queues(adapter); - if (err) { - e_dev_err("Unable to allocate memory for queues\n"); - goto err_alloc_queues; - } - - e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", - (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", - adapter->num_rx_queues, adapter->num_tx_queues); - - set_bit(__IXGBE_DOWN, &adapter->state); - - return 0; - -err_alloc_queues: - ixgbe_free_q_vectors(adapter); -err_alloc_q_vectors: - ixgbe_reset_interrupt_capability(adapter); -err_set_interrupt: - return err; -} - -/** - * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings - * @adapter: board private structure to clear interrupt scheme on - * - * We go through and clear interrupt specific resources and reset the structure - * to pre-load conditions - **/ -void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) -{ - int i; - - for (i = 0; i < adapter->num_tx_queues; i++) { - kfree(adapter->tx_ring[i]); - adapter->tx_ring[i] = NULL; - } - for (i = 0; i < adapter->num_rx_queues; i++) { - struct ixgbe_ring *ring = adapter->rx_ring[i]; - - /* ixgbe_get_stats64() might access this ring, we must wait - * a grace period before freeing it. - */ - kfree_rcu(ring, rcu); - adapter->rx_ring[i] = NULL; - } - - adapter->num_tx_queues = 0; - adapter->num_rx_queues = 0; - - ixgbe_free_q_vectors(adapter); - ixgbe_reset_interrupt_capability(adapter); -} - -/** - * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) - * @adapter: board private structure to initialize - * - * ixgbe_sw_init initializes the Adapter private data structure. - * Fields are initialized based on PCI device information and - * OS network device settings (MTU size). - **/ -static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct pci_dev *pdev = adapter->pdev; - struct net_device *dev = adapter->netdev; - unsigned int rss; -#ifdef CONFIG_IXGBE_DCB - int j; - struct tc_configuration *tc; -#endif - int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; - - /* PCI config space info */ - - hw->vendor_id = pdev->vendor; - hw->device_id = pdev->device; - hw->revision_id = pdev->revision; - hw->subsystem_vendor_id = pdev->subsystem_vendor; - hw->subsystem_device_id = pdev->subsystem_device; - - /* Set capability flags */ - rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus()); - adapter->ring_feature[RING_F_RSS].indices = rss; - adapter->flags |= IXGBE_FLAG_RSS_ENABLED; - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - if (hw->device_id == IXGBE_DEV_ID_82598AT) - adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; - adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; - adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; - adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; - if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) - adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; - /* Flow Director hash filters enabled */ - adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; - adapter->atr_sample_rate = 20; - adapter->ring_feature[RING_F_FDIR].indices = - IXGBE_MAX_FDIR_INDICES; - adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; -#ifdef IXGBE_FCOE - adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; - adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; - adapter->ring_feature[RING_F_FCOE].indices = 0; -#ifdef CONFIG_IXGBE_DCB - /* Default traffic class to use for FCoE */ - adapter->fcoe.up = IXGBE_FCOE_DEFTC; -#endif -#endif /* IXGBE_FCOE */ - break; - default: - break; - } - - /* n-tuple support exists, always init our spinlock */ - spin_lock_init(&adapter->fdir_perfect_lock); - -#ifdef CONFIG_IXGBE_DCB - /* Configure DCB traffic classes */ - for (j = 0; j < MAX_TRAFFIC_CLASS; j++) { - tc = &adapter->dcb_cfg.tc_config[j]; - tc->path[DCB_TX_CONFIG].bwg_id = 0; - tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1); - tc->path[DCB_RX_CONFIG].bwg_id = 0; - tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1); - tc->dcb_pfc = pfc_disabled; - } - adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; - adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; - adapter->dcb_cfg.pfc_mode_enable = false; - adapter->dcb_set_bitmap = 0x00; - adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; - ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, - MAX_TRAFFIC_CLASS); - -#endif - - /* default flow control settings */ - hw->fc.requested_mode = ixgbe_fc_full; - hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */ -#ifdef CONFIG_DCB - adapter->last_lfc_mode = hw->fc.current_mode; -#endif - hw->fc.high_water = FC_HIGH_WATER(max_frame); - hw->fc.low_water = FC_LOW_WATER(max_frame); - hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; - hw->fc.send_xon = true; - hw->fc.disable_fc_autoneg = false; - - /* enable itr by default in dynamic mode */ - adapter->rx_itr_setting = 1; - adapter->rx_eitr_param = 20000; - adapter->tx_itr_setting = 1; - adapter->tx_eitr_param = 10000; - - /* set defaults for eitr in MegaBytes */ - adapter->eitr_low = 10; - adapter->eitr_high = 20; - - /* set default ring sizes */ - adapter->tx_ring_count = IXGBE_DEFAULT_TXD; - adapter->rx_ring_count = IXGBE_DEFAULT_RXD; - - /* set default work limits */ - adapter->tx_work_limit = adapter->tx_ring_count; - - /* initialize eeprom parameters */ - if (ixgbe_init_eeprom_params_generic(hw)) { - e_dev_err("EEPROM initialization failed\n"); - return -EIO; - } - - /* enable rx csum by default */ - adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; - - /* get assigned NUMA node */ - adapter->node = dev_to_node(&pdev->dev); - - set_bit(__IXGBE_DOWN, &adapter->state); - - return 0; -} - -/** - * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) - * @tx_ring: tx descriptor ring (for a specific queue) to setup - * - * Return 0 on success, negative on failure - **/ -int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) -{ - struct device *dev = tx_ring->dev; - int size; - - size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; - tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node); - if (!tx_ring->tx_buffer_info) - tx_ring->tx_buffer_info = vzalloc(size); - if (!tx_ring->tx_buffer_info) - goto err; - - /* round up to nearest 4K */ - tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); - tx_ring->size = ALIGN(tx_ring->size, 4096); - - tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, - &tx_ring->dma, GFP_KERNEL); - if (!tx_ring->desc) - goto err; - - tx_ring->next_to_use = 0; - tx_ring->next_to_clean = 0; - return 0; - -err: - vfree(tx_ring->tx_buffer_info); - tx_ring->tx_buffer_info = NULL; - dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); - return -ENOMEM; -} - -/** - * ixgbe_setup_all_tx_resources - allocate all queues Tx resources - * @adapter: board private structure - * - * If this function returns with an error, then it's possible one or - * more of the rings is populated (while the rest are not). It is the - * callers duty to clean those orphaned rings. - * - * Return 0 on success, negative on failure - **/ -static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) -{ - int i, err = 0; - - for (i = 0; i < adapter->num_tx_queues; i++) { - err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); - if (!err) - continue; - e_err(probe, "Allocation for Tx Queue %u failed\n", i); - break; - } - - return err; -} - -/** - * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) - * @rx_ring: rx descriptor ring (for a specific queue) to setup - * - * Returns 0 on success, negative on failure - **/ -int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) -{ - struct device *dev = rx_ring->dev; - int size; - - size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; - rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node); - if (!rx_ring->rx_buffer_info) - rx_ring->rx_buffer_info = vzalloc(size); - if (!rx_ring->rx_buffer_info) - goto err; - - /* Round up to nearest 4K */ - rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); - rx_ring->size = ALIGN(rx_ring->size, 4096); - - rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, - &rx_ring->dma, GFP_KERNEL); - - if (!rx_ring->desc) - goto err; - - rx_ring->next_to_clean = 0; - rx_ring->next_to_use = 0; - - return 0; -err: - vfree(rx_ring->rx_buffer_info); - rx_ring->rx_buffer_info = NULL; - dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); - return -ENOMEM; -} - -/** - * ixgbe_setup_all_rx_resources - allocate all queues Rx resources - * @adapter: board private structure - * - * If this function returns with an error, then it's possible one or - * more of the rings is populated (while the rest are not). It is the - * callers duty to clean those orphaned rings. - * - * Return 0 on success, negative on failure - **/ -static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) -{ - int i, err = 0; - - for (i = 0; i < adapter->num_rx_queues; i++) { - err = ixgbe_setup_rx_resources(adapter->rx_ring[i]); - if (!err) - continue; - e_err(probe, "Allocation for Rx Queue %u failed\n", i); - break; - } - - return err; -} - -/** - * ixgbe_free_tx_resources - Free Tx Resources per Queue - * @tx_ring: Tx descriptor ring for a specific queue - * - * Free all transmit software resources - **/ -void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring) -{ - ixgbe_clean_tx_ring(tx_ring); - - vfree(tx_ring->tx_buffer_info); - tx_ring->tx_buffer_info = NULL; - - /* if not set, then don't free */ - if (!tx_ring->desc) - return; - - dma_free_coherent(tx_ring->dev, tx_ring->size, - tx_ring->desc, tx_ring->dma); - - tx_ring->desc = NULL; -} - -/** - * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues - * @adapter: board private structure - * - * Free all transmit software resources - **/ -static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) -{ - int i; - - for (i = 0; i < adapter->num_tx_queues; i++) - if (adapter->tx_ring[i]->desc) - ixgbe_free_tx_resources(adapter->tx_ring[i]); -} - -/** - * ixgbe_free_rx_resources - Free Rx Resources - * @rx_ring: ring to clean the resources from - * - * Free all receive software resources - **/ -void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring) -{ - ixgbe_clean_rx_ring(rx_ring); - - vfree(rx_ring->rx_buffer_info); - rx_ring->rx_buffer_info = NULL; - - /* if not set, then don't free */ - if (!rx_ring->desc) - return; - - dma_free_coherent(rx_ring->dev, rx_ring->size, - rx_ring->desc, rx_ring->dma); - - rx_ring->desc = NULL; -} - -/** - * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues - * @adapter: board private structure - * - * Free all receive software resources - **/ -static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) -{ - int i; - - for (i = 0; i < adapter->num_rx_queues; i++) - if (adapter->rx_ring[i]->desc) - ixgbe_free_rx_resources(adapter->rx_ring[i]); -} - -/** - * ixgbe_change_mtu - Change the Maximum Transfer Unit - * @netdev: network interface device structure - * @new_mtu: new value for maximum frame size - * - * Returns 0 on success, negative on failure - **/ -static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; - - /* MTU < 68 is an error and causes problems on some kernels */ - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED && - hw->mac.type != ixgbe_mac_X540) { - if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE)) - return -EINVAL; - } else { - if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) - return -EINVAL; - } - - e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); - /* must set new MTU before calling down or up */ - netdev->mtu = new_mtu; - - hw->fc.high_water = FC_HIGH_WATER(max_frame); - hw->fc.low_water = FC_LOW_WATER(max_frame); - - if (netif_running(netdev)) - ixgbe_reinit_locked(adapter); - - return 0; -} - -/** - * ixgbe_open - Called when a network interface is made active - * @netdev: network interface device structure - * - * Returns 0 on success, negative value on failure - * - * The open entry point is called when a network interface is made - * active by the system (IFF_UP). At this point all resources needed - * for transmit and receive operations are allocated, the interrupt - * handler is registered with the OS, the watchdog timer is started, - * and the stack is notified that the interface is ready. - **/ -static int ixgbe_open(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - int err; - - /* disallow open during test */ - if (test_bit(__IXGBE_TESTING, &adapter->state)) - return -EBUSY; - - netif_carrier_off(netdev); - - /* allocate transmit descriptors */ - err = ixgbe_setup_all_tx_resources(adapter); - if (err) - goto err_setup_tx; - - /* allocate receive descriptors */ - err = ixgbe_setup_all_rx_resources(adapter); - if (err) - goto err_setup_rx; - - ixgbe_configure(adapter); - - err = ixgbe_request_irq(adapter); - if (err) - goto err_req_irq; - - err = ixgbe_up_complete(adapter); - if (err) - goto err_up; - - netif_tx_start_all_queues(netdev); - - return 0; - -err_up: - ixgbe_release_hw_control(adapter); - ixgbe_free_irq(adapter); -err_req_irq: -err_setup_rx: - ixgbe_free_all_rx_resources(adapter); -err_setup_tx: - ixgbe_free_all_tx_resources(adapter); - ixgbe_reset(adapter); - - return err; -} - -/** - * ixgbe_close - Disables a network interface - * @netdev: network interface device structure - * - * Returns 0, this is not allowed to fail - * - * The close entry point is called when an interface is de-activated - * by the OS. The hardware is still under the drivers control, but - * needs to be disabled. A global MAC reset is issued to stop the - * hardware, and all transmit and receive resources are freed. - **/ -static int ixgbe_close(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - ixgbe_down(adapter); - ixgbe_free_irq(adapter); - - ixgbe_fdir_filter_exit(adapter); - - ixgbe_free_all_tx_resources(adapter); - ixgbe_free_all_rx_resources(adapter); - - ixgbe_release_hw_control(adapter); - - return 0; -} - -#ifdef CONFIG_PM -static int ixgbe_resume(struct pci_dev *pdev) -{ - struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; - u32 err; - - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - /* - * pci_restore_state clears dev->state_saved so call - * pci_save_state to restore it. - */ - pci_save_state(pdev); - - err = pci_enable_device_mem(pdev); - if (err) { - e_dev_err("Cannot enable PCI device from suspend\n"); - return err; - } - pci_set_master(pdev); - - pci_wake_from_d3(pdev, false); - - err = ixgbe_init_interrupt_scheme(adapter); - if (err) { - e_dev_err("Cannot initialize interrupts for device\n"); - return err; - } - - ixgbe_reset(adapter); - - IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); - - if (netif_running(netdev)) { - err = ixgbe_open(netdev); - if (err) - return err; - } - - netif_device_attach(netdev); - - return 0; -} -#endif /* CONFIG_PM */ - -static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) -{ - struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; - struct ixgbe_hw *hw = &adapter->hw; - u32 ctrl, fctrl; - u32 wufc = adapter->wol; -#ifdef CONFIG_PM - int retval = 0; -#endif - - netif_device_detach(netdev); - - if (netif_running(netdev)) { - ixgbe_down(adapter); - ixgbe_free_irq(adapter); - ixgbe_free_all_tx_resources(adapter); - ixgbe_free_all_rx_resources(adapter); - } - - ixgbe_clear_interrupt_scheme(adapter); -#ifdef CONFIG_DCB - kfree(adapter->ixgbe_ieee_pfc); - kfree(adapter->ixgbe_ieee_ets); -#endif - -#ifdef CONFIG_PM - retval = pci_save_state(pdev); - if (retval) - return retval; - -#endif - if (wufc) { - ixgbe_set_rx_mode(netdev); - - /* turn on all-multi mode if wake on multicast is enabled */ - if (wufc & IXGBE_WUFC_MC) { - fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); - fctrl |= IXGBE_FCTRL_MPE; - IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); - } - - ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); - ctrl |= IXGBE_CTRL_GIO_DIS; - IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); - - IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc); - } else { - IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); - IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); - } - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - pci_wake_from_d3(pdev, false); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - pci_wake_from_d3(pdev, !!wufc); - break; - default: - break; - } - - *enable_wake = !!wufc; - - ixgbe_release_hw_control(adapter); - - pci_disable_device(pdev); - - return 0; -} - -#ifdef CONFIG_PM -static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) -{ - int retval; - bool wake; - - retval = __ixgbe_shutdown(pdev, &wake); - if (retval) - return retval; - - if (wake) { - pci_prepare_to_sleep(pdev); - } else { - pci_wake_from_d3(pdev, false); - pci_set_power_state(pdev, PCI_D3hot); - } - - return 0; -} -#endif /* CONFIG_PM */ - -static void ixgbe_shutdown(struct pci_dev *pdev) -{ - bool wake; - - __ixgbe_shutdown(pdev, &wake); - - if (system_state == SYSTEM_POWER_OFF) { - pci_wake_from_d3(pdev, wake); - pci_set_power_state(pdev, PCI_D3hot); - } -} - -/** - * ixgbe_update_stats - Update the board statistics counters. - * @adapter: board private structure - **/ -void ixgbe_update_stats(struct ixgbe_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct ixgbe_hw *hw = &adapter->hw; - struct ixgbe_hw_stats *hwstats = &adapter->stats; - u64 total_mpc = 0; - u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; - u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; - u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; - u64 bytes = 0, packets = 0; - - if (test_bit(__IXGBE_DOWN, &adapter->state) || - test_bit(__IXGBE_RESETTING, &adapter->state)) - return; - - if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { - u64 rsc_count = 0; - u64 rsc_flush = 0; - for (i = 0; i < 16; i++) - adapter->hw_rx_no_dma_resources += - IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); - for (i = 0; i < adapter->num_rx_queues; i++) { - rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; - rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; - } - adapter->rsc_total_count = rsc_count; - adapter->rsc_total_flush = rsc_flush; - } - - for (i = 0; i < adapter->num_rx_queues; i++) { - struct ixgbe_ring *rx_ring = adapter->rx_ring[i]; - non_eop_descs += rx_ring->rx_stats.non_eop_descs; - alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; - alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; - bytes += rx_ring->stats.bytes; - packets += rx_ring->stats.packets; - } - adapter->non_eop_descs = non_eop_descs; - adapter->alloc_rx_page_failed = alloc_rx_page_failed; - adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; - netdev->stats.rx_bytes = bytes; - netdev->stats.rx_packets = packets; - - bytes = 0; - packets = 0; - /* gather some stats to the adapter struct that are per queue */ - for (i = 0; i < adapter->num_tx_queues; i++) { - struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; - restart_queue += tx_ring->tx_stats.restart_queue; - tx_busy += tx_ring->tx_stats.tx_busy; - bytes += tx_ring->stats.bytes; - packets += tx_ring->stats.packets; - } - adapter->restart_queue = restart_queue; - adapter->tx_busy = tx_busy; - netdev->stats.tx_bytes = bytes; - netdev->stats.tx_packets = packets; - - hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); - for (i = 0; i < 8; i++) { - /* for packet buffers not used, the register should read 0 */ - mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i)); - missed_rx += mpc; - hwstats->mpc[i] += mpc; - total_mpc += hwstats->mpc[i]; - if (hw->mac.type == ixgbe_mac_82598EB) - hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); - hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); - hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); - hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); - hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - hwstats->pxonrxc[i] += - IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - hwstats->pxonrxc[i] += - IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); - break; - default: - break; - } - hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); - hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); - } - hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); - /* work around hardware counting issue */ - hwstats->gprc -= missed_rx; - - ixgbe_update_xoff_received(adapter); - - /* 82598 hardware only has a 32 bit counter in the high register */ - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); - hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); - hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); - hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); - break; - case ixgbe_mac_X540: - /* OS2BMC stats are X540 only*/ - hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC); - hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); - hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); - hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC); - case ixgbe_mac_82599EB: - hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); - IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ - hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); - IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */ - hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); - IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ - hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); - hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); - hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); -#ifdef IXGBE_FCOE - hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); - hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); - hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); - hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); - hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); - hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); -#endif /* IXGBE_FCOE */ - break; - default: - break; - } - bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); - hwstats->bprc += bprc; - hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); - if (hw->mac.type == ixgbe_mac_82598EB) - hwstats->mprc -= bprc; - hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); - hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); - hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); - hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); - hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); - hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); - hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); - hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); - lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); - hwstats->lxontxc += lxon; - lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); - hwstats->lxofftxc += lxoff; - hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); - hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); - hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); - /* - * 82598 errata - tx of flow control packets is included in tx counters - */ - xon_off_tot = lxon + lxoff; - hwstats->gptc -= xon_off_tot; - hwstats->mptc -= xon_off_tot; - hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN)); - hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); - hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); - hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); - hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); - hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); - hwstats->ptc64 -= xon_off_tot; - hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); - hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); - hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); - hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); - hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); - hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); - - /* Fill out the OS statistics structure */ - netdev->stats.multicast = hwstats->mprc; - - /* Rx Errors */ - netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec; - netdev->stats.rx_dropped = 0; - netdev->stats.rx_length_errors = hwstats->rlec; - netdev->stats.rx_crc_errors = hwstats->crcerrs; - netdev->stats.rx_missed_errors = total_mpc; -} - -/** - * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table - * @adapter - pointer to the device adapter structure - **/ -static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - int i; - - if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) - return; - - adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT; - - /* if interface is down do nothing */ - if (test_bit(__IXGBE_DOWN, &adapter->state)) - return; - - /* do nothing if we are not using signature filters */ - if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) - return; - - adapter->fdir_overflow++; - - if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { - for (i = 0; i < adapter->num_tx_queues; i++) - set_bit(__IXGBE_TX_FDIR_INIT_DONE, - &(adapter->tx_ring[i]->state)); - /* re-enable flow director interrupts */ - IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR); - } else { - e_err(probe, "failed to finish FDIR re-initialization, " - "ignored adding FDIR ATR filters\n"); - } -} - -/** - * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts - * @adapter - pointer to the device adapter structure - * - * This function serves two purposes. First it strobes the interrupt lines - * in order to make certain interrupts are occuring. Secondly it sets the - * bits needed to check for TX hangs. As a result we should immediately - * determine if a hang has occured. - */ -static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u64 eics = 0; - int i; - - /* If we're down or resetting, just bail */ - if (test_bit(__IXGBE_DOWN, &adapter->state) || - test_bit(__IXGBE_RESETTING, &adapter->state)) - return; - - /* Force detection of hung controller */ - if (netif_carrier_ok(adapter->netdev)) { - for (i = 0; i < adapter->num_tx_queues; i++) - set_check_for_tx_hang(adapter->tx_ring[i]); - } - - if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { - /* - * for legacy and MSI interrupts don't set any bits - * that are enabled for EIAM, because this operation - * would set *both* EIMS and EICS for any bit in EIAM - */ - IXGBE_WRITE_REG(hw, IXGBE_EICS, - (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); - } else { - /* get one bit for every active tx/rx interrupt vector */ - for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { - struct ixgbe_q_vector *qv = adapter->q_vector[i]; - if (qv->rx.count || qv->tx.count) - eics |= ((u64)1 << i); - } - } - - /* Cause software interrupt to ensure rings are cleaned */ - ixgbe_irq_rearm_queues(adapter, eics); - -} - -/** - * ixgbe_watchdog_update_link - update the link status - * @adapter - pointer to the device adapter structure - * @link_speed - pointer to a u32 to store the link_speed - **/ -static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 link_speed = adapter->link_speed; - bool link_up = adapter->link_up; - int i; - - if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) - return; - - if (hw->mac.ops.check_link) { - hw->mac.ops.check_link(hw, &link_speed, &link_up, false); - } else { - /* always assume link is up, if no check link function */ - link_speed = IXGBE_LINK_SPEED_10GB_FULL; - link_up = true; - } - if (link_up) { - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) - hw->mac.ops.fc_enable(hw, i); - } else { - hw->mac.ops.fc_enable(hw, 0); - } - } - - if (link_up || - time_after(jiffies, (adapter->link_check_timeout + - IXGBE_TRY_LINK_TIMEOUT))) { - adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; - IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC); - IXGBE_WRITE_FLUSH(hw); - } - - adapter->link_up = link_up; - adapter->link_speed = link_speed; -} - -/** - * ixgbe_watchdog_link_is_up - update netif_carrier status and - * print link up message - * @adapter - pointer to the device adapter structure - **/ -static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct ixgbe_hw *hw = &adapter->hw; - u32 link_speed = adapter->link_speed; - bool flow_rx, flow_tx; - - /* only continue if link was previously down */ - if (netif_carrier_ok(netdev)) - return; - - adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: { - u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); - u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); - flow_rx = !!(frctl & IXGBE_FCTRL_RFCE); - flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); - } - break; - case ixgbe_mac_X540: - case ixgbe_mac_82599EB: { - u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); - u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); - flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE); - flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X); - } - break; - default: - flow_tx = false; - flow_rx = false; - break; - } - e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", - (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? - "10 Gbps" : - (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? - "1 Gbps" : - (link_speed == IXGBE_LINK_SPEED_100_FULL ? - "100 Mbps" : - "unknown speed"))), - ((flow_rx && flow_tx) ? "RX/TX" : - (flow_rx ? "RX" : - (flow_tx ? "TX" : "None")))); - - netif_carrier_on(netdev); - ixgbe_check_vf_rate_limit(adapter); -} - -/** - * ixgbe_watchdog_link_is_down - update netif_carrier status and - * print link down message - * @adapter - pointer to the adapter structure - **/ -static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter* adapter) -{ - struct net_device *netdev = adapter->netdev; - struct ixgbe_hw *hw = &adapter->hw; - - adapter->link_up = false; - adapter->link_speed = 0; - - /* only continue if link was up previously */ - if (!netif_carrier_ok(netdev)) - return; - - /* poll for SFP+ cable when link is down */ - if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) - adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; - - e_info(drv, "NIC Link is Down\n"); - netif_carrier_off(netdev); -} - -/** - * ixgbe_watchdog_flush_tx - flush queues on link down - * @adapter - pointer to the device adapter structure - **/ -static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) -{ - int i; - int some_tx_pending = 0; - - if (!netif_carrier_ok(adapter->netdev)) { - for (i = 0; i < adapter->num_tx_queues; i++) { - struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; - if (tx_ring->next_to_use != tx_ring->next_to_clean) { - some_tx_pending = 1; - break; - } - } - - if (some_tx_pending) { - /* We've lost link, so the controller stops DMA, - * but we've got queued Tx work that's never going - * to get done, so reset controller to flush Tx. - * (Do the reset outside of interrupt context). - */ - adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; - } - } -} - -static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) -{ - u32 ssvpc; - - /* Do not perform spoof check for 82598 */ - if (adapter->hw.mac.type == ixgbe_mac_82598EB) - return; - - ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC); - - /* - * ssvpc register is cleared on read, if zero then no - * spoofed packets in the last interval. - */ - if (!ssvpc) - return; - - e_warn(drv, "%d Spoofed packets detected\n", ssvpc); -} - -/** - * ixgbe_watchdog_subtask - check and bring link up - * @adapter - pointer to the device adapter structure - **/ -static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) -{ - /* if interface is down do nothing */ - if (test_bit(__IXGBE_DOWN, &adapter->state)) - return; - - ixgbe_watchdog_update_link(adapter); - - if (adapter->link_up) - ixgbe_watchdog_link_is_up(adapter); - else - ixgbe_watchdog_link_is_down(adapter); - - ixgbe_spoof_check(adapter); - ixgbe_update_stats(adapter); - - ixgbe_watchdog_flush_tx(adapter); -} - -/** - * ixgbe_sfp_detection_subtask - poll for SFP+ cable - * @adapter - the ixgbe adapter structure - **/ -static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - s32 err; - - /* not searching for SFP so there is nothing to do here */ - if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) && - !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) - return; - - /* someone else is in init, wait until next service event */ - if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) - return; - - err = hw->phy.ops.identify_sfp(hw); - if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) - goto sfp_out; - - if (err == IXGBE_ERR_SFP_NOT_PRESENT) { - /* If no cable is present, then we need to reset - * the next time we find a good cable. */ - adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; - } - - /* exit on error */ - if (err) - goto sfp_out; - - /* exit if reset not needed */ - if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) - goto sfp_out; - - adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET; - - /* - * A module may be identified correctly, but the EEPROM may not have - * support for that module. setup_sfp() will fail in that case, so - * we should not allow that module to load. - */ - if (hw->mac.type == ixgbe_mac_82598EB) - err = hw->phy.ops.reset(hw); - else - err = hw->mac.ops.setup_sfp(hw); - - if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) - goto sfp_out; - - adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; - e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type); - -sfp_out: - clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); - - if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) && - (adapter->netdev->reg_state == NETREG_REGISTERED)) { - e_dev_err("failed to initialize because an unsupported " - "SFP+ module type was detected.\n"); - e_dev_err("Reload the driver after installing a " - "supported module.\n"); - unregister_netdev(adapter->netdev); - } -} - -/** - * ixgbe_sfp_link_config_subtask - set up link SFP after module install - * @adapter - the ixgbe adapter structure - **/ -static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 autoneg; - bool negotiation; - - if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG)) - return; - - /* someone else is in init, wait until next service event */ - if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) - return; - - adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; - - autoneg = hw->phy.autoneg_advertised; - if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) - hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); - hw->mac.autotry_restart = false; - if (hw->mac.ops.setup_link) - hw->mac.ops.setup_link(hw, autoneg, negotiation, true); - - adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; - adapter->link_check_timeout = jiffies; - clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); -} - -/** - * ixgbe_service_timer - Timer Call-back - * @data: pointer to adapter cast into an unsigned long - **/ -static void ixgbe_service_timer(unsigned long data) -{ - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; - unsigned long next_event_offset; - - /* poll faster when waiting for link */ - if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) - next_event_offset = HZ / 10; - else - next_event_offset = HZ * 2; - - /* Reset the timer */ - mod_timer(&adapter->service_timer, next_event_offset + jiffies); - - ixgbe_service_event_schedule(adapter); -} - -static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) -{ - if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED)) - return; - - adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED; - - /* If we're already down or resetting, just bail */ - if (test_bit(__IXGBE_DOWN, &adapter->state) || - test_bit(__IXGBE_RESETTING, &adapter->state)) - return; - - ixgbe_dump(adapter); - netdev_err(adapter->netdev, "Reset adapter\n"); - adapter->tx_timeout_count++; - - ixgbe_reinit_locked(adapter); -} - -/** - * ixgbe_service_task - manages and runs subtasks - * @work: pointer to work_struct containing our data - **/ -static void ixgbe_service_task(struct work_struct *work) -{ - struct ixgbe_adapter *adapter = container_of(work, - struct ixgbe_adapter, - service_task); - - ixgbe_reset_subtask(adapter); - ixgbe_sfp_detection_subtask(adapter); - ixgbe_sfp_link_config_subtask(adapter); - ixgbe_check_overtemp_subtask(adapter); - ixgbe_watchdog_subtask(adapter); - ixgbe_fdir_reinit_subtask(adapter); - ixgbe_check_hang_subtask(adapter); - - ixgbe_service_event_complete(adapter); -} - -void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, - u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) -{ - struct ixgbe_adv_tx_context_desc *context_desc; - u16 i = tx_ring->next_to_use; - - context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i); - - i++; - tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; - - /* set bits to identify this as an advanced context descriptor */ - type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; - - context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); - context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); - context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); - context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); -} - -static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, - u32 tx_flags, __be16 protocol, u8 *hdr_len) -{ - int err; - u32 vlan_macip_lens, type_tucmd; - u32 mss_l4len_idx, l4len; - - if (!skb_is_gso(skb)) - return 0; - - if (skb_header_cloned(skb)) { - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); - if (err) - return err; - } - - /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ - type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; - - if (protocol == __constant_htons(ETH_P_IP)) { - struct iphdr *iph = ip_hdr(skb); - iph->tot_len = 0; - iph->check = 0; - tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, - 0); - type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; - } else if (skb_is_gso_v6(skb)) { - ipv6_hdr(skb)->payload_len = 0; - tcp_hdr(skb)->check = - ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); - } - - l4len = tcp_hdrlen(skb); - *hdr_len = skb_transport_offset(skb) + l4len; - - /* mss_l4len_id: use 1 as index for TSO */ - mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; - mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; - mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; - - /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ - vlan_macip_lens = skb_network_header_len(skb); - vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; - vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; - - ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, - mss_l4len_idx); - - return 1; -} - -static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags, - __be16 protocol) -{ - u32 vlan_macip_lens = 0; - u32 mss_l4len_idx = 0; - u32 type_tucmd = 0; - - if (skb->ip_summed != CHECKSUM_PARTIAL) { - if (!(tx_flags & IXGBE_TX_FLAGS_VLAN)) - return false; - } else { - u8 l4_hdr = 0; - switch (protocol) { - case __constant_htons(ETH_P_IP): - vlan_macip_lens |= skb_network_header_len(skb); - type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; - l4_hdr = ip_hdr(skb)->protocol; - break; - case __constant_htons(ETH_P_IPV6): - vlan_macip_lens |= skb_network_header_len(skb); - l4_hdr = ipv6_hdr(skb)->nexthdr; - break; - default: - if (unlikely(net_ratelimit())) { - dev_warn(tx_ring->dev, - "partial checksum but proto=%x!\n", - skb->protocol); - } - break; - } - - switch (l4_hdr) { - case IPPROTO_TCP: - type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; - mss_l4len_idx = tcp_hdrlen(skb) << - IXGBE_ADVTXD_L4LEN_SHIFT; - break; - case IPPROTO_SCTP: - type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; - mss_l4len_idx = sizeof(struct sctphdr) << - IXGBE_ADVTXD_L4LEN_SHIFT; - break; - case IPPROTO_UDP: - mss_l4len_idx = sizeof(struct udphdr) << - IXGBE_ADVTXD_L4LEN_SHIFT; - break; - default: - if (unlikely(net_ratelimit())) { - dev_warn(tx_ring->dev, - "partial checksum but l4 proto=%x!\n", - skb->protocol); - } - break; - } - } - - vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; - vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; - - ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, - type_tucmd, mss_l4len_idx); - - return (skb->ip_summed == CHECKSUM_PARTIAL); -} - -static int ixgbe_tx_map(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags, - unsigned int first, const u8 hdr_len) -{ - struct device *dev = tx_ring->dev; - struct ixgbe_tx_buffer *tx_buffer_info; - unsigned int len; - unsigned int total = skb->len; - unsigned int offset = 0, size, count = 0; - unsigned int nr_frags = skb_shinfo(skb)->nr_frags; - unsigned int f; - unsigned int bytecount = skb->len; - u16 gso_segs = 1; - u16 i; - - i = tx_ring->next_to_use; - - if (tx_flags & IXGBE_TX_FLAGS_FCOE) - /* excluding fcoe_crc_eof for FCoE */ - total -= sizeof(struct fcoe_crc_eof); - - len = min(skb_headlen(skb), total); - while (len) { - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); - - tx_buffer_info->length = size; - tx_buffer_info->mapped_as_page = false; - tx_buffer_info->dma = dma_map_single(dev, - skb->data + offset, - size, DMA_TO_DEVICE); - if (dma_mapping_error(dev, tx_buffer_info->dma)) - goto dma_error; - tx_buffer_info->time_stamp = jiffies; - tx_buffer_info->next_to_watch = i; - - len -= size; - total -= size; - offset += size; - count++; - - if (len) { - i++; - if (i == tx_ring->count) - i = 0; - } - } - - for (f = 0; f < nr_frags; f++) { - struct skb_frag_struct *frag; - - frag = &skb_shinfo(skb)->frags[f]; - len = min((unsigned int)frag->size, total); - offset = frag->page_offset; - - while (len) { - i++; - if (i == tx_ring->count) - i = 0; - - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); - - tx_buffer_info->length = size; - tx_buffer_info->dma = dma_map_page(dev, - frag->page, - offset, size, - DMA_TO_DEVICE); - tx_buffer_info->mapped_as_page = true; - if (dma_mapping_error(dev, tx_buffer_info->dma)) - goto dma_error; - tx_buffer_info->time_stamp = jiffies; - tx_buffer_info->next_to_watch = i; - - len -= size; - total -= size; - offset += size; - count++; - } - if (total == 0) - break; - } - - if (tx_flags & IXGBE_TX_FLAGS_TSO) - gso_segs = skb_shinfo(skb)->gso_segs; -#ifdef IXGBE_FCOE - /* adjust for FCoE Sequence Offload */ - else if (tx_flags & IXGBE_TX_FLAGS_FSO) - gso_segs = DIV_ROUND_UP(skb->len - hdr_len, - skb_shinfo(skb)->gso_size); -#endif /* IXGBE_FCOE */ - bytecount += (gso_segs - 1) * hdr_len; - - /* multiply data chunks by size of headers */ - tx_ring->tx_buffer_info[i].bytecount = bytecount; - tx_ring->tx_buffer_info[i].gso_segs = gso_segs; - tx_ring->tx_buffer_info[i].skb = skb; - tx_ring->tx_buffer_info[first].next_to_watch = i; - - return count; - -dma_error: - e_dev_err("TX DMA map failed\n"); - - /* clear timestamp and dma mappings for failed tx_buffer_info map */ - tx_buffer_info->dma = 0; - tx_buffer_info->time_stamp = 0; - tx_buffer_info->next_to_watch = 0; - if (count) - count--; - - /* clear timestamp and dma mappings for remaining portion of packet */ - while (count--) { - if (i == 0) - i += tx_ring->count; - i--; - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); - } - - return 0; -} - -static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring, - int tx_flags, int count, u32 paylen, u8 hdr_len) -{ - union ixgbe_adv_tx_desc *tx_desc = NULL; - struct ixgbe_tx_buffer *tx_buffer_info; - u32 olinfo_status = 0, cmd_type_len = 0; - unsigned int i; - u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS; - - cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA; - - cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; - - if (tx_flags & IXGBE_TX_FLAGS_VLAN) - cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; - - if (tx_flags & IXGBE_TX_FLAGS_TSO) { - cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; - - olinfo_status |= IXGBE_TXD_POPTS_TXSM << - IXGBE_ADVTXD_POPTS_SHIFT; - - /* use index 1 context for tso */ - olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); - if (tx_flags & IXGBE_TX_FLAGS_IPV4) - olinfo_status |= IXGBE_TXD_POPTS_IXSM << - IXGBE_ADVTXD_POPTS_SHIFT; - - } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) - olinfo_status |= IXGBE_TXD_POPTS_TXSM << - IXGBE_ADVTXD_POPTS_SHIFT; - - if (tx_flags & IXGBE_TX_FLAGS_FCOE) { - olinfo_status |= IXGBE_ADVTXD_CC; - olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); - if (tx_flags & IXGBE_TX_FLAGS_FSO) - cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; - } - - olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); - - i = tx_ring->next_to_use; - while (count--) { - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); - tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); - tx_desc->read.cmd_type_len = - cpu_to_le32(cmd_type_len | tx_buffer_info->length); - tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); - i++; - if (i == tx_ring->count) - i = 0; - } - - tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); - - /* - * Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - - tx_ring->next_to_use = i; - writel(i, tx_ring->tail); -} - -static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb, - u32 tx_flags, __be16 protocol) -{ - struct ixgbe_q_vector *q_vector = ring->q_vector; - union ixgbe_atr_hash_dword input = { .dword = 0 }; - union ixgbe_atr_hash_dword common = { .dword = 0 }; - union { - unsigned char *network; - struct iphdr *ipv4; - struct ipv6hdr *ipv6; - } hdr; - struct tcphdr *th; - __be16 vlan_id; - - /* if ring doesn't have a interrupt vector, cannot perform ATR */ - if (!q_vector) - return; - - /* do nothing if sampling is disabled */ - if (!ring->atr_sample_rate) - return; - - ring->atr_count++; - - /* snag network header to get L4 type and address */ - hdr.network = skb_network_header(skb); - - /* Currently only IPv4/IPv6 with TCP is supported */ - if ((protocol != __constant_htons(ETH_P_IPV6) || - hdr.ipv6->nexthdr != IPPROTO_TCP) && - (protocol != __constant_htons(ETH_P_IP) || - hdr.ipv4->protocol != IPPROTO_TCP)) - return; - - th = tcp_hdr(skb); - - /* skip this packet since the socket is closing */ - if (th->fin) - return; - - /* sample on all syn packets or once every atr sample count */ - if (!th->syn && (ring->atr_count < ring->atr_sample_rate)) - return; - - /* reset sample count */ - ring->atr_count = 0; - - vlan_id = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT); - - /* - * src and dst are inverted, think how the receiver sees them - * - * The input is broken into two sections, a non-compressed section - * containing vm_pool, vlan_id, and flow_type. The rest of the data - * is XORed together and stored in the compressed dword. - */ - input.formatted.vlan_id = vlan_id; - - /* - * since src port and flex bytes occupy the same word XOR them together - * and write the value to source port portion of compressed dword - */ - if (vlan_id) - common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q); - else - common.port.src ^= th->dest ^ protocol; - common.port.dst ^= th->source; - - if (protocol == __constant_htons(ETH_P_IP)) { - input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; - common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; - } else { - input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6; - common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ - hdr.ipv6->saddr.s6_addr32[1] ^ - hdr.ipv6->saddr.s6_addr32[2] ^ - hdr.ipv6->saddr.s6_addr32[3] ^ - hdr.ipv6->daddr.s6_addr32[0] ^ - hdr.ipv6->daddr.s6_addr32[1] ^ - hdr.ipv6->daddr.s6_addr32[2] ^ - hdr.ipv6->daddr.s6_addr32[3]; - } - - /* This assumes the Rx queue and Tx queue are bound to the same CPU */ - ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, - input, common, ring->queue_index); -} - -static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) -{ - netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); - /* Herbert's original patch had: - * smp_mb__after_netif_stop_queue(); - * but since that doesn't exist yet, just open code it. */ - smp_mb(); - - /* We need to check again in a case another CPU has just - * made room available. */ - if (likely(ixgbe_desc_unused(tx_ring) < size)) - return -EBUSY; - - /* A reprieve! - use start_queue because it doesn't call schedule */ - netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); - ++tx_ring->tx_stats.restart_queue; - return 0; -} - -static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) -{ - if (likely(ixgbe_desc_unused(tx_ring) >= size)) - return 0; - return __ixgbe_maybe_stop_tx(tx_ring, size); -} - -static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : - smp_processor_id(); -#ifdef IXGBE_FCOE - __be16 protocol = vlan_get_protocol(skb); - - if (((protocol == htons(ETH_P_FCOE)) || - (protocol == htons(ETH_P_FIP))) && - (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { - txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); - txq += adapter->ring_feature[RING_F_FCOE].mask; - return txq; - } -#endif - - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { - while (unlikely(txq >= dev->real_num_tx_queues)) - txq -= dev->real_num_tx_queues; - return txq; - } - - return skb_tx_hash(dev, skb); -} - -netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, - struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring) -{ - int tso; - u32 tx_flags = 0; -#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD - unsigned short f; -#endif - u16 first; - u16 count = TXD_USE_COUNT(skb_headlen(skb)); - __be16 protocol; - u8 hdr_len = 0; - - /* - * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, - * + 1 desc for skb_head_len/IXGBE_MAX_DATA_PER_TXD, - * + 2 desc gap to keep tail from touching head, - * + 1 desc for context descriptor, - * otherwise try next time - */ -#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD - for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); -#else - count += skb_shinfo(skb)->nr_frags; -#endif - if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) { - tx_ring->tx_stats.tx_busy++; - return NETDEV_TX_BUSY; - } - - protocol = vlan_get_protocol(skb); - - if (vlan_tx_tag_present(skb)) { - tx_flags |= vlan_tx_tag_get(skb); - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; - tx_flags |= tx_ring->dcb_tc << 13; - } - tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; - tx_flags |= IXGBE_TX_FLAGS_VLAN; - } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED && - skb->priority != TC_PRIO_CONTROL) { - tx_flags |= tx_ring->dcb_tc << 13; - tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; - tx_flags |= IXGBE_TX_FLAGS_VLAN; - } - -#ifdef IXGBE_FCOE - /* for FCoE with DCB, we force the priority to what - * was specified by the switch */ - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && - (protocol == htons(ETH_P_FCOE))) - tx_flags |= IXGBE_TX_FLAGS_FCOE; - -#endif - /* record the location of the first descriptor for this packet */ - first = tx_ring->next_to_use; - - if (tx_flags & IXGBE_TX_FLAGS_FCOE) { -#ifdef IXGBE_FCOE - /* setup tx offload for FCoE */ - tso = ixgbe_fso(tx_ring, skb, tx_flags, &hdr_len); - if (tso < 0) - goto out_drop; - else if (tso) - tx_flags |= IXGBE_TX_FLAGS_FSO; -#endif /* IXGBE_FCOE */ - } else { - if (protocol == htons(ETH_P_IP)) - tx_flags |= IXGBE_TX_FLAGS_IPV4; - tso = ixgbe_tso(tx_ring, skb, tx_flags, protocol, &hdr_len); - if (tso < 0) - goto out_drop; - else if (tso) - tx_flags |= IXGBE_TX_FLAGS_TSO; - else if (ixgbe_tx_csum(tx_ring, skb, tx_flags, protocol)) - tx_flags |= IXGBE_TX_FLAGS_CSUM; - } - - count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len); - if (count) { - /* add the ATR filter if ATR is on */ - if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) - ixgbe_atr(tx_ring, skb, tx_flags, protocol); - ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len); - ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); - - } else { - tx_ring->tx_buffer_info[first].time_stamp = 0; - tx_ring->next_to_use = first; - goto out_drop; - } - - return NETDEV_TX_OK; - -out_drop: - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; -} - -static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_ring *tx_ring; - - tx_ring = adapter->tx_ring[skb->queue_mapping]; - return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); -} - -/** - * ixgbe_set_mac - Change the Ethernet Address of the NIC - * @netdev: network interface device structure - * @p: pointer to an address structure - * - * Returns 0 on success, negative on failure - **/ -static int ixgbe_set_mac(struct net_device *netdev, void *p) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - struct sockaddr *addr = p; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); - - hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs, - IXGBE_RAH_AV); - - return 0; -} - -static int -ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - u16 value; - int rc; - - if (prtad != hw->phy.mdio.prtad) - return -EINVAL; - rc = hw->phy.ops.read_reg(hw, addr, devad, &value); - if (!rc) - rc = value; - return rc; -} - -static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad, - u16 addr, u16 value) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - - if (prtad != hw->phy.mdio.prtad) - return -EINVAL; - return hw->phy.ops.write_reg(hw, addr, devad, value); -} - -static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); -} - -/** - * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding - * netdev->dev_addrs - * @netdev: network interface device structure - * - * Returns non-zero on failure - **/ -static int ixgbe_add_sanmac_netdev(struct net_device *dev) -{ - int err = 0; - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ixgbe_mac_info *mac = &adapter->hw.mac; - - if (is_valid_ether_addr(mac->san_addr)) { - rtnl_lock(); - err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); - rtnl_unlock(); - } - return err; -} - -/** - * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding - * netdev->dev_addrs - * @netdev: network interface device structure - * - * Returns non-zero on failure - **/ -static int ixgbe_del_sanmac_netdev(struct net_device *dev) -{ - int err = 0; - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ixgbe_mac_info *mac = &adapter->hw.mac; - - if (is_valid_ether_addr(mac->san_addr)) { - rtnl_lock(); - err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); - rtnl_unlock(); - } - return err; -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -/* - * Polling 'interrupt' - used by things like netconsole to send skbs - * without having to re-enable interrupts. It's not called while - * the interrupt routine is executing. - */ -static void ixgbe_netpoll(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - int i; - - /* if interface is down do nothing */ - if (test_bit(__IXGBE_DOWN, &adapter->state)) - return; - - adapter->flags |= IXGBE_FLAG_IN_NETPOLL; - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - for (i = 0; i < num_q_vectors; i++) { - struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; - ixgbe_msix_clean_many(0, q_vector); - } - } else { - ixgbe_intr(adapter->pdev->irq, netdev); - } - adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; -} -#endif - -static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - int i; - - rcu_read_lock(); - for (i = 0; i < adapter->num_rx_queues; i++) { - struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); - u64 bytes, packets; - unsigned int start; - - if (ring) { - do { - start = u64_stats_fetch_begin_bh(&ring->syncp); - packets = ring->stats.packets; - bytes = ring->stats.bytes; - } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); - stats->rx_packets += packets; - stats->rx_bytes += bytes; - } - } - - for (i = 0; i < adapter->num_tx_queues; i++) { - struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); - u64 bytes, packets; - unsigned int start; - - if (ring) { - do { - start = u64_stats_fetch_begin_bh(&ring->syncp); - packets = ring->stats.packets; - bytes = ring->stats.bytes; - } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); - stats->tx_packets += packets; - stats->tx_bytes += bytes; - } - } - rcu_read_unlock(); - /* following stats updated by ixgbe_watchdog_task() */ - stats->multicast = netdev->stats.multicast; - stats->rx_errors = netdev->stats.rx_errors; - stats->rx_length_errors = netdev->stats.rx_length_errors; - stats->rx_crc_errors = netdev->stats.rx_crc_errors; - stats->rx_missed_errors = netdev->stats.rx_missed_errors; - return stats; -} - -/* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. - * #adapter: pointer to ixgbe_adapter - * @tc: number of traffic classes currently enabled - * - * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm - * 802.1Q priority maps to a packet buffer that exists. - */ -static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 reg, rsave; - int i; - - /* 82598 have a static priority to TC mapping that can not - * be changed so no validation is needed. - */ - if (hw->mac.type == ixgbe_mac_82598EB) - return; - - reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); - rsave = reg; - - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT); - - /* If up2tc is out of bounds default to zero */ - if (up2tc > tc) - reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT); - } - - if (reg != rsave) - IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); - - return; -} - - -/* ixgbe_setup_tc - routine to configure net_device for multiple traffic - * classes. - * - * @netdev: net device to configure - * @tc: number of traffic classes to enable - */ -int ixgbe_setup_tc(struct net_device *dev, u8 tc) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ixgbe_hw *hw = &adapter->hw; - - /* If DCB is anabled do not remove traffic classes, multiple - * traffic classes are required to implement DCB - */ - if (!tc && (adapter->flags & IXGBE_FLAG_DCB_ENABLED)) - return 0; - - /* Hardware supports up to 8 traffic classes */ - if (tc > MAX_TRAFFIC_CLASS || - (hw->mac.type == ixgbe_mac_82598EB && tc < MAX_TRAFFIC_CLASS)) - return -EINVAL; - - /* Hardware has to reinitialize queues and interrupts to - * match packet buffer alignment. Unfortunantly, the - * hardware is not flexible enough to do this dynamically. - */ - if (netif_running(dev)) - ixgbe_close(dev); - ixgbe_clear_interrupt_scheme(adapter); - - if (tc) - netdev_set_num_tc(dev, tc); - else - netdev_reset_tc(dev); - - ixgbe_init_interrupt_scheme(adapter); - ixgbe_validate_rtr(adapter, tc); - if (netif_running(dev)) - ixgbe_open(dev); - - return 0; -} - -void ixgbe_do_reset(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - if (netif_running(netdev)) - ixgbe_reinit_locked(adapter); - else - ixgbe_reset(adapter); -} - -static u32 ixgbe_fix_features(struct net_device *netdev, u32 data) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - -#ifdef CONFIG_DCB - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) - data &= ~NETIF_F_HW_VLAN_RX; -#endif - - /* return error if RXHASH is being enabled when RSS is not supported */ - if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) - data &= ~NETIF_F_RXHASH; - - /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ - if (!(data & NETIF_F_RXCSUM)) - data &= ~NETIF_F_LRO; - - /* Turn off LRO if not RSC capable or invalid ITR settings */ - if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) { - data &= ~NETIF_F_LRO; - } else if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && - (adapter->rx_itr_setting != 1 && - adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE)) { - data &= ~NETIF_F_LRO; - e_info(probe, "rx-usecs set too low, not enabling RSC\n"); - } - - return data; -} - -static int ixgbe_set_features(struct net_device *netdev, u32 data) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - bool need_reset = false; - - /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ - if (!(data & NETIF_F_RXCSUM)) - adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED; - else - adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; - - /* Make sure RSC matches LRO, reset if change */ - if (!!(data & NETIF_F_LRO) != - !!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { - adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; - switch (adapter->hw.mac.type) { - case ixgbe_mac_X540: - case ixgbe_mac_82599EB: - need_reset = true; - break; - default: - break; - } - } - - /* - * Check if Flow Director n-tuple support was enabled or disabled. If - * the state changed, we need to reset. - */ - if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) { - /* turn off ATR, enable perfect filters and reset */ - if (data & NETIF_F_NTUPLE) { - adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; - adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; - need_reset = true; - } - } else if (!(data & NETIF_F_NTUPLE)) { - /* turn off Flow Director, set ATR and reset */ - adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; - if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && - !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) - adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; - need_reset = true; - } - - if (need_reset) - ixgbe_do_reset(netdev); - - return 0; - -} - -static const struct net_device_ops ixgbe_netdev_ops = { - .ndo_open = ixgbe_open, - .ndo_stop = ixgbe_close, - .ndo_start_xmit = ixgbe_xmit_frame, - .ndo_select_queue = ixgbe_select_queue, - .ndo_set_rx_mode = ixgbe_set_rx_mode, - .ndo_set_multicast_list = ixgbe_set_rx_mode, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = ixgbe_set_mac, - .ndo_change_mtu = ixgbe_change_mtu, - .ndo_tx_timeout = ixgbe_tx_timeout, - .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, - .ndo_do_ioctl = ixgbe_ioctl, - .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac, - .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, - .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, - .ndo_get_vf_config = ixgbe_ndo_get_vf_config, - .ndo_get_stats64 = ixgbe_get_stats64, - .ndo_setup_tc = ixgbe_setup_tc, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ixgbe_netpoll, -#endif -#ifdef IXGBE_FCOE - .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, - .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, - .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, - .ndo_fcoe_enable = ixgbe_fcoe_enable, - .ndo_fcoe_disable = ixgbe_fcoe_disable, - .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn, -#endif /* IXGBE_FCOE */ - .ndo_set_features = ixgbe_set_features, - .ndo_fix_features = ixgbe_fix_features, -}; - -static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter, - const struct ixgbe_info *ii) -{ -#ifdef CONFIG_PCI_IOV - struct ixgbe_hw *hw = &adapter->hw; - int err; - int num_vf_macvlans, i; - struct vf_macvlans *mv_list; - - if (hw->mac.type == ixgbe_mac_82598EB || !max_vfs) - return; - - /* The 82599 supports up to 64 VFs per physical function - * but this implementation limits allocation to 63 so that - * basic networking resources are still available to the - * physical function - */ - adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs; - adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; - err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); - if (err) { - e_err(probe, "Failed to enable PCI sriov: %d\n", err); - goto err_novfs; - } - - num_vf_macvlans = hw->mac.num_rar_entries - - (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs); - - adapter->mv_list = mv_list = kcalloc(num_vf_macvlans, - sizeof(struct vf_macvlans), - GFP_KERNEL); - if (mv_list) { - /* Initialize list of VF macvlans */ - INIT_LIST_HEAD(&adapter->vf_mvs.l); - for (i = 0; i < num_vf_macvlans; i++) { - mv_list->vf = -1; - mv_list->free = true; - mv_list->rar_entry = hw->mac.num_rar_entries - - (i + adapter->num_vfs + 1); - list_add(&mv_list->l, &adapter->vf_mvs.l); - mv_list++; - } - } - - /* If call to enable VFs succeeded then allocate memory - * for per VF control structures. - */ - adapter->vfinfo = - kcalloc(adapter->num_vfs, - sizeof(struct vf_data_storage), GFP_KERNEL); - if (adapter->vfinfo) { - /* Now that we're sure SR-IOV is enabled - * and memory allocated set up the mailbox parameters - */ - ixgbe_init_mbx_params_pf(hw); - memcpy(&hw->mbx.ops, ii->mbx_ops, - sizeof(hw->mbx.ops)); - - /* Disable RSC when in SR-IOV mode */ - adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | - IXGBE_FLAG2_RSC_ENABLED); - return; - } - - /* Oh oh */ - e_err(probe, "Unable to allocate memory for VF Data Storage - " - "SRIOV disabled\n"); - pci_disable_sriov(adapter->pdev); - -err_novfs: - adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; - adapter->num_vfs = 0; -#endif /* CONFIG_PCI_IOV */ -} - -/** - * ixgbe_probe - Device Initialization Routine - * @pdev: PCI device information struct - * @ent: entry in ixgbe_pci_tbl - * - * Returns 0 on success, negative on failure - * - * ixgbe_probe initializes an adapter identified by a pci_dev structure. - * The OS initialization, configuring of the adapter private structure, - * and a hardware reset occur. - **/ -static int __devinit ixgbe_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - struct net_device *netdev; - struct ixgbe_adapter *adapter = NULL; - struct ixgbe_hw *hw; - const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; - static int cards_found; - int i, err, pci_using_dac; - u8 part_str[IXGBE_PBANUM_LENGTH]; - unsigned int indices = num_possible_cpus(); -#ifdef IXGBE_FCOE - u16 device_caps; -#endif - u32 eec; - - /* Catch broken hardware that put the wrong VF device ID in - * the PCIe SR-IOV capability. - */ - if (pdev->is_virtfn) { - WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", - pci_name(pdev), pdev->vendor, pdev->device); - return -EINVAL; - } - - err = pci_enable_device_mem(pdev); - if (err) - return err; - - if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && - !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { - pci_using_dac = 1; - } else { - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); - if (err) { - err = dma_set_coherent_mask(&pdev->dev, - DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, - "No usable DMA configuration, aborting\n"); - goto err_dma; - } - } - pci_using_dac = 0; - } - - err = pci_request_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM), ixgbe_driver_name); - if (err) { - dev_err(&pdev->dev, - "pci_request_selected_regions failed 0x%x\n", err); - goto err_pci_reg; - } - - pci_enable_pcie_error_reporting(pdev); - - pci_set_master(pdev); - pci_save_state(pdev); - -#ifdef CONFIG_IXGBE_DCB - indices *= MAX_TRAFFIC_CLASS; -#endif - - if (ii->mac == ixgbe_mac_82598EB) - indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES); - else - indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES); - -#ifdef IXGBE_FCOE - indices += min_t(unsigned int, num_possible_cpus(), - IXGBE_MAX_FCOE_INDICES); -#endif - netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); - if (!netdev) { - err = -ENOMEM; - goto err_alloc_etherdev; - } - - SET_NETDEV_DEV(netdev, &pdev->dev); - - adapter = netdev_priv(netdev); - pci_set_drvdata(pdev, adapter); - - adapter->netdev = netdev; - adapter->pdev = pdev; - hw = &adapter->hw; - hw->back = adapter; - adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; - - hw->hw_addr = ioremap(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); - if (!hw->hw_addr) { - err = -EIO; - goto err_ioremap; - } - - for (i = 1; i <= 5; i++) { - if (pci_resource_len(pdev, i) == 0) - continue; - } - - netdev->netdev_ops = &ixgbe_netdev_ops; - ixgbe_set_ethtool_ops(netdev); - netdev->watchdog_timeo = 5 * HZ; - strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); - - adapter->bd_number = cards_found; - - /* Setup hw api */ - memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); - hw->mac.type = ii->mac; - - /* EEPROM */ - memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops)); - eec = IXGBE_READ_REG(hw, IXGBE_EEC); - /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */ - if (!(eec & (1 << 8))) - hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; - - /* PHY */ - memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops)); - hw->phy.sfp_type = ixgbe_sfp_type_unknown; - /* ixgbe_identify_phy_generic will set prtad and mmds properly */ - hw->phy.mdio.prtad = MDIO_PRTAD_NONE; - hw->phy.mdio.mmds = 0; - hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; - hw->phy.mdio.dev = netdev; - hw->phy.mdio.mdio_read = ixgbe_mdio_read; - hw->phy.mdio.mdio_write = ixgbe_mdio_write; - - ii->get_invariants(hw); - - /* setup the private structure */ - err = ixgbe_sw_init(adapter); - if (err) - goto err_sw_init; - - /* Make it possible the adapter to be woken up via WOL */ - switch (adapter->hw.mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); - break; - default: - break; - } - - /* - * If there is a fan on this device and it has failed log the - * failure. - */ - if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { - u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - if (esdp & IXGBE_ESDP_SDP1) - e_crit(probe, "Fan has stopped, replace the adapter\n"); - } - - /* reset_hw fills in the perm_addr as well */ - hw->phy.reset_if_overtemp = true; - err = hw->mac.ops.reset_hw(hw); - hw->phy.reset_if_overtemp = false; - if (err == IXGBE_ERR_SFP_NOT_PRESENT && - hw->mac.type == ixgbe_mac_82598EB) { - err = 0; - } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { - e_dev_err("failed to load because an unsupported SFP+ " - "module type was detected.\n"); - e_dev_err("Reload the driver after installing a supported " - "module.\n"); - goto err_sw_init; - } else if (err) { - e_dev_err("HW Init failed: %d\n", err); - goto err_sw_init; - } - - ixgbe_probe_vf(adapter, ii); - - netdev->features = NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_HW_VLAN_TX | - NETIF_F_HW_VLAN_RX | - NETIF_F_HW_VLAN_FILTER | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_GRO | - NETIF_F_RXHASH | - NETIF_F_RXCSUM; - - netdev->hw_features = netdev->features; - - switch (adapter->hw.mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - netdev->features |= NETIF_F_SCTP_CSUM; - netdev->hw_features |= NETIF_F_SCTP_CSUM | - NETIF_F_NTUPLE; - break; - default: - break; - } - - netdev->vlan_features |= NETIF_F_TSO; - netdev->vlan_features |= NETIF_F_TSO6; - netdev->vlan_features |= NETIF_F_IP_CSUM; - netdev->vlan_features |= NETIF_F_IPV6_CSUM; - netdev->vlan_features |= NETIF_F_SG; - - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) - adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED | - IXGBE_FLAG_DCB_ENABLED); - -#ifdef CONFIG_IXGBE_DCB - netdev->dcbnl_ops = &dcbnl_ops; -#endif - -#ifdef IXGBE_FCOE - if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { - if (hw->mac.ops.get_device_caps) { - hw->mac.ops.get_device_caps(hw, &device_caps); - if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) - adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; - } - } - if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { - netdev->vlan_features |= NETIF_F_FCOE_CRC; - netdev->vlan_features |= NETIF_F_FSO; - netdev->vlan_features |= NETIF_F_FCOE_MTU; - } -#endif /* IXGBE_FCOE */ - if (pci_using_dac) { - netdev->features |= NETIF_F_HIGHDMA; - netdev->vlan_features |= NETIF_F_HIGHDMA; - } - - if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) - netdev->hw_features |= NETIF_F_LRO; - if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) - netdev->features |= NETIF_F_LRO; - - /* make sure the EEPROM is good */ - if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { - e_dev_err("The EEPROM Checksum Is Not Valid\n"); - err = -EIO; - goto err_eeprom; - } - - memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); - memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); - - if (ixgbe_validate_mac_addr(netdev->perm_addr)) { - e_dev_err("invalid MAC address\n"); - err = -EIO; - goto err_eeprom; - } - - /* power down the optics for multispeed fiber and 82599 SFP+ fiber */ - if (hw->mac.ops.disable_tx_laser && - ((hw->phy.multispeed_fiber) || - ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) && - (hw->mac.type == ixgbe_mac_82599EB)))) - hw->mac.ops.disable_tx_laser(hw); - - setup_timer(&adapter->service_timer, &ixgbe_service_timer, - (unsigned long) adapter); - - INIT_WORK(&adapter->service_task, ixgbe_service_task); - clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); - - err = ixgbe_init_interrupt_scheme(adapter); - if (err) - goto err_sw_init; - - if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) { - netdev->hw_features &= ~NETIF_F_RXHASH; - netdev->features &= ~NETIF_F_RXHASH; - } - - switch (pdev->device) { - case IXGBE_DEV_ID_82599_SFP: - /* Only this subdevice supports WOL */ - if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP) - adapter->wol = IXGBE_WUFC_MAG; - break; - case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: - /* All except this subdevice support WOL */ - if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) - adapter->wol = IXGBE_WUFC_MAG; - break; - case IXGBE_DEV_ID_82599_KX4: - adapter->wol = IXGBE_WUFC_MAG; - break; - default: - adapter->wol = 0; - break; - } - device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); - - /* pick up the PCI bus settings for reporting later */ - hw->mac.ops.get_bus_info(hw); - - /* print bus type/speed/width info */ - e_dev_info("(PCI Express:%s:%s) %pM\n", - (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" : - hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" : - "Unknown"), - (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" : - hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" : - hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" : - "Unknown"), - netdev->dev_addr); - - err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH); - if (err) - strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH); - if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) - e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", - hw->mac.type, hw->phy.type, hw->phy.sfp_type, - part_str); - else - e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n", - hw->mac.type, hw->phy.type, part_str); - - if (hw->bus.width <= ixgbe_bus_width_pcie_x4) { - e_dev_warn("PCI-Express bandwidth available for this card is " - "not sufficient for optimal performance.\n"); - e_dev_warn("For optimal performance a x8 PCI-Express slot " - "is required.\n"); - } - - /* save off EEPROM version number */ - hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version); - - /* reset the hardware with the new settings */ - err = hw->mac.ops.start_hw(hw); - - if (err == IXGBE_ERR_EEPROM_VERSION) { - /* We are running on a pre-production device, log a warning */ - e_dev_warn("This device is a pre-production adapter/LOM. " - "Please be aware there may be issues associated " - "with your hardware. If you are experiencing " - "problems please contact your Intel or hardware " - "representative who provided you with this " - "hardware.\n"); - } - strcpy(netdev->name, "eth%d"); - err = register_netdev(netdev); - if (err) - goto err_register; - - /* carrier off reporting is important to ethtool even BEFORE open */ - netif_carrier_off(netdev); - -#ifdef CONFIG_IXGBE_DCA - if (dca_add_requester(&pdev->dev) == 0) { - adapter->flags |= IXGBE_FLAG_DCA_ENABLED; - ixgbe_setup_dca(adapter); - } -#endif - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { - e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs); - for (i = 0; i < adapter->num_vfs; i++) - ixgbe_vf_configuration(pdev, (i | 0x10000000)); - } - - /* Inform firmware of driver version */ - if (hw->mac.ops.set_fw_drv_ver) - hw->mac.ops.set_fw_drv_ver(hw, MAJ, MIN, BUILD, - FW_CEM_UNUSED_VER); - - /* add san mac addr to netdev */ - ixgbe_add_sanmac_netdev(netdev); - - e_dev_info("Intel(R) 10 Gigabit Network Connection\n"); - cards_found++; - return 0; - -err_register: - ixgbe_release_hw_control(adapter); - ixgbe_clear_interrupt_scheme(adapter); -err_sw_init: -err_eeprom: - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) - ixgbe_disable_sriov(adapter); - adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; - iounmap(hw->hw_addr); -err_ioremap: - free_netdev(netdev); -err_alloc_etherdev: - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); -err_pci_reg: -err_dma: - pci_disable_device(pdev); - return err; -} - -/** - * ixgbe_remove - Device Removal Routine - * @pdev: PCI device information struct - * - * ixgbe_remove is called by the PCI subsystem to alert the driver - * that it should release a PCI device. The could be caused by a - * Hot-Plug event, or because the driver is going to be removed from - * memory. - **/ -static void __devexit ixgbe_remove(struct pci_dev *pdev) -{ - struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; - - set_bit(__IXGBE_DOWN, &adapter->state); - cancel_work_sync(&adapter->service_task); - -#ifdef CONFIG_IXGBE_DCA - if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { - adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; - dca_remove_requester(&pdev->dev); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); - } - -#endif -#ifdef IXGBE_FCOE - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) - ixgbe_cleanup_fcoe(adapter); - -#endif /* IXGBE_FCOE */ - - /* remove the added san mac */ - ixgbe_del_sanmac_netdev(netdev); - - if (netdev->reg_state == NETREG_REGISTERED) - unregister_netdev(netdev); - - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) - ixgbe_disable_sriov(adapter); - - ixgbe_clear_interrupt_scheme(adapter); - - ixgbe_release_hw_control(adapter); - - iounmap(adapter->hw.hw_addr); - pci_release_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM)); - - e_dev_info("complete\n"); - - free_netdev(netdev); - - pci_disable_pcie_error_reporting(pdev); - - pci_disable_device(pdev); -} - -/** - * ixgbe_io_error_detected - called when PCI error is detected - * @pdev: Pointer to PCI device - * @state: The current pci connection state - * - * This function is called after a PCI bus error affecting - * this device has been detected. - */ -static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) -{ - struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; - - netif_device_detach(netdev); - - if (state == pci_channel_io_perm_failure) - return PCI_ERS_RESULT_DISCONNECT; - - if (netif_running(netdev)) - ixgbe_down(adapter); - pci_disable_device(pdev); - - /* Request a slot reset. */ - return PCI_ERS_RESULT_NEED_RESET; -} - -/** - * ixgbe_io_slot_reset - called after the pci bus has been reset. - * @pdev: Pointer to PCI device - * - * Restart the card from scratch, as if from a cold-boot. - */ -static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) -{ - struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); - pci_ers_result_t result; - int err; - - if (pci_enable_device_mem(pdev)) { - e_err(probe, "Cannot re-enable PCI device after reset.\n"); - result = PCI_ERS_RESULT_DISCONNECT; - } else { - pci_set_master(pdev); - pci_restore_state(pdev); - pci_save_state(pdev); - - pci_wake_from_d3(pdev, false); - - ixgbe_reset(adapter); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); - result = PCI_ERS_RESULT_RECOVERED; - } - - err = pci_cleanup_aer_uncorrect_error_status(pdev); - if (err) { - e_dev_err("pci_cleanup_aer_uncorrect_error_status " - "failed 0x%0x\n", err); - /* non-fatal, continue */ - } - - return result; -} - -/** - * ixgbe_io_resume - called when traffic can start flowing again. - * @pdev: Pointer to PCI device - * - * This callback is called when the error recovery driver tells us that - * its OK to resume normal operation. - */ -static void ixgbe_io_resume(struct pci_dev *pdev) -{ - struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; - - if (netif_running(netdev)) { - if (ixgbe_up(adapter)) { - e_info(probe, "ixgbe_up failed after reset\n"); - return; - } - } - - netif_device_attach(netdev); -} - -static struct pci_error_handlers ixgbe_err_handler = { - .error_detected = ixgbe_io_error_detected, - .slot_reset = ixgbe_io_slot_reset, - .resume = ixgbe_io_resume, -}; - -static struct pci_driver ixgbe_driver = { - .name = ixgbe_driver_name, - .id_table = ixgbe_pci_tbl, - .probe = ixgbe_probe, - .remove = __devexit_p(ixgbe_remove), -#ifdef CONFIG_PM - .suspend = ixgbe_suspend, - .resume = ixgbe_resume, -#endif - .shutdown = ixgbe_shutdown, - .err_handler = &ixgbe_err_handler -}; - -/** - * ixgbe_init_module - Driver Registration Routine - * - * ixgbe_init_module is the first routine called when the driver is - * loaded. All it does is register with the PCI subsystem. - **/ -static int __init ixgbe_init_module(void) -{ - int ret; - pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version); - pr_info("%s\n", ixgbe_copyright); - -#ifdef CONFIG_IXGBE_DCA - dca_register_notify(&dca_notifier); -#endif - - ret = pci_register_driver(&ixgbe_driver); - return ret; -} - -module_init(ixgbe_init_module); - -/** - * ixgbe_exit_module - Driver Exit Cleanup Routine - * - * ixgbe_exit_module is called just before the driver is removed - * from memory. - **/ -static void __exit ixgbe_exit_module(void) -{ -#ifdef CONFIG_IXGBE_DCA - dca_unregister_notify(&dca_notifier); -#endif - pci_unregister_driver(&ixgbe_driver); - rcu_barrier(); /* Wait for completion of call_rcu()'s */ -} - -#ifdef CONFIG_IXGBE_DCA -static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, - void *p) -{ - int ret_val; - - ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, - __ixgbe_notify_dca); - - return ret_val ? NOTIFY_BAD : NOTIFY_DONE; -} - -#endif /* CONFIG_IXGBE_DCA */ - -module_exit(ixgbe_exit_module); - -/* ixgbe_main.c */ diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c deleted file mode 100644 index 1ff0eef..0000000 --- a/drivers/net/ixgbe/ixgbe_mbx.c +++ /dev/null @@ -1,471 +0,0 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include -#include -#include "ixgbe_type.h" -#include "ixgbe_common.h" -#include "ixgbe_mbx.h" - -/** - * ixgbe_read_mbx - Reads a message from the mailbox - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * @mbx_id: id of mailbox to read - * - * returns SUCCESS if it successfuly read message from buffer - **/ -s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 ret_val = IXGBE_ERR_MBX; - - /* limit read to size of mailbox */ - if (size > mbx->size) - size = mbx->size; - - if (mbx->ops.read) - ret_val = mbx->ops.read(hw, msg, size, mbx_id); - - return ret_val; -} - -/** - * ixgbe_write_mbx - Write a message to the mailbox - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * @mbx_id: id of mailbox to write - * - * returns SUCCESS if it successfully copied message into the buffer - **/ -s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 ret_val = 0; - - if (size > mbx->size) - ret_val = IXGBE_ERR_MBX; - - else if (mbx->ops.write) - ret_val = mbx->ops.write(hw, msg, size, mbx_id); - - return ret_val; -} - -/** - * ixgbe_check_for_msg - checks to see if someone sent us mail - * @hw: pointer to the HW structure - * @mbx_id: id of mailbox to check - * - * returns SUCCESS if the Status bit was found or else ERR_MBX - **/ -s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 ret_val = IXGBE_ERR_MBX; - - if (mbx->ops.check_for_msg) - ret_val = mbx->ops.check_for_msg(hw, mbx_id); - - return ret_val; -} - -/** - * ixgbe_check_for_ack - checks to see if someone sent us ACK - * @hw: pointer to the HW structure - * @mbx_id: id of mailbox to check - * - * returns SUCCESS if the Status bit was found or else ERR_MBX - **/ -s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 ret_val = IXGBE_ERR_MBX; - - if (mbx->ops.check_for_ack) - ret_val = mbx->ops.check_for_ack(hw, mbx_id); - - return ret_val; -} - -/** - * ixgbe_check_for_rst - checks to see if other side has reset - * @hw: pointer to the HW structure - * @mbx_id: id of mailbox to check - * - * returns SUCCESS if the Status bit was found or else ERR_MBX - **/ -s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 ret_val = IXGBE_ERR_MBX; - - if (mbx->ops.check_for_rst) - ret_val = mbx->ops.check_for_rst(hw, mbx_id); - - return ret_val; -} - -/** - * ixgbe_poll_for_msg - Wait for message notification - * @hw: pointer to the HW structure - * @mbx_id: id of mailbox to write - * - * returns SUCCESS if it successfully received a message notification - **/ -static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - int countdown = mbx->timeout; - - if (!countdown || !mbx->ops.check_for_msg) - goto out; - - while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { - countdown--; - if (!countdown) - break; - udelay(mbx->usec_delay); - } - -out: - return countdown ? 0 : IXGBE_ERR_MBX; -} - -/** - * ixgbe_poll_for_ack - Wait for message acknowledgement - * @hw: pointer to the HW structure - * @mbx_id: id of mailbox to write - * - * returns SUCCESS if it successfully received a message acknowledgement - **/ -static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - int countdown = mbx->timeout; - - if (!countdown || !mbx->ops.check_for_ack) - goto out; - - while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { - countdown--; - if (!countdown) - break; - udelay(mbx->usec_delay); - } - -out: - return countdown ? 0 : IXGBE_ERR_MBX; -} - -/** - * ixgbe_read_posted_mbx - Wait for message notification and receive message - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * @mbx_id: id of mailbox to write - * - * returns SUCCESS if it successfully received a message notification and - * copied it into the receive buffer. - **/ -static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, - u16 mbx_id) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 ret_val = IXGBE_ERR_MBX; - - if (!mbx->ops.read) - goto out; - - ret_val = ixgbe_poll_for_msg(hw, mbx_id); - - /* if ack received read message, otherwise we timed out */ - if (!ret_val) - ret_val = mbx->ops.read(hw, msg, size, mbx_id); -out: - return ret_val; -} - -/** - * ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * @mbx_id: id of mailbox to write - * - * returns SUCCESS if it successfully copied message into the buffer and - * received an ack to that message within delay * timeout period - **/ -static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, - u16 mbx_id) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 ret_val = IXGBE_ERR_MBX; - - /* exit if either we can't write or there isn't a defined timeout */ - if (!mbx->ops.write || !mbx->timeout) - goto out; - - /* send msg */ - ret_val = mbx->ops.write(hw, msg, size, mbx_id); - - /* if msg sent wait until we receive an ack */ - if (!ret_val) - ret_val = ixgbe_poll_for_ack(hw, mbx_id); -out: - return ret_val; -} - -static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) -{ - u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index)); - s32 ret_val = IXGBE_ERR_MBX; - - if (mbvficr & mask) { - ret_val = 0; - IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask); - } - - return ret_val; -} - -/** - * ixgbe_check_for_msg_pf - checks to see if the VF has sent mail - * @hw: pointer to the HW structure - * @vf_number: the VF index - * - * returns SUCCESS if the VF has set the Status bit or else ERR_MBX - **/ -static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number) -{ - s32 ret_val = IXGBE_ERR_MBX; - s32 index = IXGBE_MBVFICR_INDEX(vf_number); - u32 vf_bit = vf_number % 16; - - if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit, - index)) { - ret_val = 0; - hw->mbx.stats.reqs++; - } - - return ret_val; -} - -/** - * ixgbe_check_for_ack_pf - checks to see if the VF has ACKed - * @hw: pointer to the HW structure - * @vf_number: the VF index - * - * returns SUCCESS if the VF has set the Status bit or else ERR_MBX - **/ -static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number) -{ - s32 ret_val = IXGBE_ERR_MBX; - s32 index = IXGBE_MBVFICR_INDEX(vf_number); - u32 vf_bit = vf_number % 16; - - if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit, - index)) { - ret_val = 0; - hw->mbx.stats.acks++; - } - - return ret_val; -} - -/** - * ixgbe_check_for_rst_pf - checks to see if the VF has reset - * @hw: pointer to the HW structure - * @vf_number: the VF index - * - * returns SUCCESS if the VF has set the Status bit or else ERR_MBX - **/ -static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) -{ - u32 reg_offset = (vf_number < 32) ? 0 : 1; - u32 vf_shift = vf_number % 32; - u32 vflre = 0; - s32 ret_val = IXGBE_ERR_MBX; - - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset)); - break; - case ixgbe_mac_X540: - vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset)); - break; - default: - break; - } - - if (vflre & (1 << vf_shift)) { - ret_val = 0; - IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift)); - hw->mbx.stats.rsts++; - } - - return ret_val; -} - -/** - * ixgbe_obtain_mbx_lock_pf - obtain mailbox lock - * @hw: pointer to the HW structure - * @vf_number: the VF index - * - * return SUCCESS if we obtained the mailbox lock - **/ -static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number) -{ - s32 ret_val = IXGBE_ERR_MBX; - u32 p2v_mailbox; - - /* Take ownership of the buffer */ - IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU); - - /* reserve mailbox for vf use */ - p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number)); - if (p2v_mailbox & IXGBE_PFMAILBOX_PFU) - ret_val = 0; - - return ret_val; -} - -/** - * ixgbe_write_mbx_pf - Places a message in the mailbox - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * @vf_number: the VF index - * - * returns SUCCESS if it successfully copied message into the buffer - **/ -static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, - u16 vf_number) -{ - s32 ret_val; - u16 i; - - /* lock the mailbox to prevent pf/vf race condition */ - ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); - if (ret_val) - goto out_no_write; - - /* flush msg and acks as we are overwriting the message buffer */ - ixgbe_check_for_msg_pf(hw, vf_number); - ixgbe_check_for_ack_pf(hw, vf_number); - - /* copy the caller specified message to the mailbox memory buffer */ - for (i = 0; i < size; i++) - IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]); - - /* Interrupt VF to tell it a message has been sent and release buffer*/ - IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS); - - /* update stats */ - hw->mbx.stats.msgs_tx++; - -out_no_write: - return ret_val; - -} - -/** - * ixgbe_read_mbx_pf - Read a message from the mailbox - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * @vf_number: the VF index - * - * This function copies a message from the mailbox buffer to the caller's - * memory buffer. The presumption is that the caller knows that there was - * a message due to a VF request so no polling for message is needed. - **/ -static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, - u16 vf_number) -{ - s32 ret_val; - u16 i; - - /* lock the mailbox to prevent pf/vf race condition */ - ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); - if (ret_val) - goto out_no_read; - - /* copy the message to the mailbox memory buffer */ - for (i = 0; i < size; i++) - msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i); - - /* Acknowledge the message and release buffer */ - IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK); - - /* update stats */ - hw->mbx.stats.msgs_rx++; - -out_no_read: - return ret_val; -} - -#ifdef CONFIG_PCI_IOV -/** - * ixgbe_init_mbx_params_pf - set initial values for pf mailbox - * @hw: pointer to the HW structure - * - * Initializes the hw->mbx struct to correct values for pf mailbox - */ -void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - - if (hw->mac.type != ixgbe_mac_82599EB && - hw->mac.type != ixgbe_mac_X540) - return; - - mbx->timeout = 0; - mbx->usec_delay = 0; - - mbx->stats.msgs_tx = 0; - mbx->stats.msgs_rx = 0; - mbx->stats.reqs = 0; - mbx->stats.acks = 0; - mbx->stats.rsts = 0; - - mbx->size = IXGBE_VFMAILBOX_SIZE; -} -#endif /* CONFIG_PCI_IOV */ - -struct ixgbe_mbx_operations mbx_ops_generic = { - .read = ixgbe_read_mbx_pf, - .write = ixgbe_write_mbx_pf, - .read_posted = ixgbe_read_posted_mbx, - .write_posted = ixgbe_write_posted_mbx, - .check_for_msg = ixgbe_check_for_msg_pf, - .check_for_ack = ixgbe_check_for_ack_pf, - .check_for_rst = ixgbe_check_for_rst_pf, -}; - diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h deleted file mode 100644 index b239bda..0000000 --- a/drivers/net/ixgbe/ixgbe_mbx.h +++ /dev/null @@ -1,93 +0,0 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGBE_MBX_H_ -#define _IXGBE_MBX_H_ - -#include "ixgbe_type.h" - -#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ -#define IXGBE_ERR_MBX -100 - -#define IXGBE_VFMAILBOX 0x002FC -#define IXGBE_VFMBMEM 0x00200 - -#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ -#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ -#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ -#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ -#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ - -#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */ -#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ -#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */ -#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ - - -/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the - * PF. The reverse is true if it is IXGBE_PF_*. - * Message ACK's are the value or'd with 0xF0000000 - */ -#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with - * this are the ACK */ -#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with - * this are the NACK */ -#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still - clear to send requests */ -#define IXGBE_VT_MSGINFO_SHIFT 16 -/* bits 23:16 are used for exra info for certain messages */ -#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) - -#define IXGBE_VF_RESET 0x01 /* VF requests reset */ -#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ -#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ -#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ -#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ -#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ - -/* length of permanent address message returned from PF */ -#define IXGBE_VF_PERMADDR_MSG_LEN 4 -/* word in permanent address message with the current multicast type */ -#define IXGBE_VF_MC_TYPE_WORD 3 - -#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ - -#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ -#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ - -s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16); -s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16); -s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16); -s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16); -s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); -#ifdef CONFIG_PCI_IOV -void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); -#endif /* CONFIG_PCI_IOV */ - -extern struct ixgbe_mbx_operations mbx_ops_generic; - -#endif /* _IXGBE_MBX_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c deleted file mode 100644 index f7ca351..0000000 --- a/drivers/net/ixgbe/ixgbe_phy.c +++ /dev/null @@ -1,1725 +0,0 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include -#include -#include - -#include "ixgbe_common.h" -#include "ixgbe_phy.h" - -static void ixgbe_i2c_start(struct ixgbe_hw *hw); -static void ixgbe_i2c_stop(struct ixgbe_hw *hw); -static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data); -static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data); -static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw); -static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data); -static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data); -static s32 ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); -static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); -static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data); -static bool ixgbe_get_i2c_data(u32 *i2cctl); -static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); -static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); -static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); - -/** - * ixgbe_identify_phy_generic - Get physical layer module - * @hw: pointer to hardware structure - * - * Determines the physical layer module found on the current adapter. - **/ -s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) -{ - s32 status = IXGBE_ERR_PHY_ADDR_INVALID; - u32 phy_addr; - u16 ext_ability = 0; - - if (hw->phy.type == ixgbe_phy_unknown) { - for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { - hw->phy.mdio.prtad = phy_addr; - if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) { - ixgbe_get_phy_id(hw); - hw->phy.type = - ixgbe_get_phy_type_from_id(hw->phy.id); - - if (hw->phy.type == ixgbe_phy_unknown) { - hw->phy.ops.read_reg(hw, - MDIO_PMA_EXTABLE, - MDIO_MMD_PMAPMD, - &ext_ability); - if (ext_ability & - (MDIO_PMA_EXTABLE_10GBT | - MDIO_PMA_EXTABLE_1000BT)) - hw->phy.type = - ixgbe_phy_cu_unknown; - else - hw->phy.type = - ixgbe_phy_generic; - } - - status = 0; - break; - } - } - /* clear value if nothing found */ - if (status != 0) - hw->phy.mdio.prtad = 0; - } else { - status = 0; - } - - return status; -} - -/** - * ixgbe_get_phy_id - Get the phy type - * @hw: pointer to hardware structure - * - **/ -static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw) -{ - u32 status; - u16 phy_id_high = 0; - u16 phy_id_low = 0; - - status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD, - &phy_id_high); - - if (status == 0) { - hw->phy.id = (u32)(phy_id_high << 16); - status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD, - &phy_id_low); - hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); - hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); - } - return status; -} - -/** - * ixgbe_get_phy_type_from_id - Get the phy type - * @hw: pointer to hardware structure - * - **/ -static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) -{ - enum ixgbe_phy_type phy_type; - - switch (phy_id) { - case TN1010_PHY_ID: - phy_type = ixgbe_phy_tn; - break; - case X540_PHY_ID: - phy_type = ixgbe_phy_aq; - break; - case QT2022_PHY_ID: - phy_type = ixgbe_phy_qt; - break; - case ATH_PHY_ID: - phy_type = ixgbe_phy_nl; - break; - default: - phy_type = ixgbe_phy_unknown; - break; - } - - return phy_type; -} - -/** - * ixgbe_reset_phy_generic - Performs a PHY reset - * @hw: pointer to hardware structure - **/ -s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) -{ - u32 i; - u16 ctrl = 0; - s32 status = 0; - - if (hw->phy.type == ixgbe_phy_unknown) - status = ixgbe_identify_phy_generic(hw); - - if (status != 0 || hw->phy.type == ixgbe_phy_none) - goto out; - - /* Don't reset PHY if it's shut down due to overtemp. */ - if (!hw->phy.reset_if_overtemp && - (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw))) - goto out; - - /* - * Perform soft PHY reset to the PHY_XS. - * This will cause a soft reset to the PHY - */ - hw->phy.ops.write_reg(hw, MDIO_CTRL1, - MDIO_MMD_PHYXS, - MDIO_CTRL1_RESET); - - /* - * Poll for reset bit to self-clear indicating reset is complete. - * Some PHYs could take up to 3 seconds to complete and need about - * 1.7 usec delay after the reset is complete. - */ - for (i = 0; i < 30; i++) { - msleep(100); - hw->phy.ops.read_reg(hw, MDIO_CTRL1, - MDIO_MMD_PHYXS, &ctrl); - if (!(ctrl & MDIO_CTRL1_RESET)) { - udelay(2); - break; - } - } - - if (ctrl & MDIO_CTRL1_RESET) { - status = IXGBE_ERR_RESET_FAILED; - hw_dbg(hw, "PHY reset polling failed to complete.\n"); - } - -out: - return status; -} - -/** - * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register - * @hw: pointer to hardware structure - * @reg_addr: 32 bit address of PHY register to read - * @phy_data: Pointer to read data from PHY register - **/ -s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 *phy_data) -{ - u32 command; - u32 i; - u32 data; - s32 status = 0; - u16 gssr; - - if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) - gssr = IXGBE_GSSR_PHY1_SM; - else - gssr = IXGBE_GSSR_PHY0_SM; - - if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0) - status = IXGBE_ERR_SWFW_SYNC; - - if (status == 0) { - /* Setup and write the address cycle command */ - command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | - (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); - - IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); - - /* - * Check every 10 usec to see if the address cycle completed. - * The MDI Command bit will clear when the operation is - * complete - */ - for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - udelay(10); - - command = IXGBE_READ_REG(hw, IXGBE_MSCA); - - if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) - break; - } - - if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { - hw_dbg(hw, "PHY address command did not complete.\n"); - status = IXGBE_ERR_PHY; - } - - if (status == 0) { - /* - * Address cycle complete, setup and write the read - * command - */ - command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.mdio.prtad << - IXGBE_MSCA_PHY_ADDR_SHIFT) | - (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); - - IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); - - /* - * Check every 10 usec to see if the address cycle - * completed. The MDI Command bit will clear when the - * operation is complete - */ - for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - udelay(10); - - command = IXGBE_READ_REG(hw, IXGBE_MSCA); - - if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) - break; - } - - if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { - hw_dbg(hw, "PHY read command didn't complete\n"); - status = IXGBE_ERR_PHY; - } else { - /* - * Read operation is complete. Get the data - * from MSRWD - */ - data = IXGBE_READ_REG(hw, IXGBE_MSRWD); - data >>= IXGBE_MSRWD_READ_DATA_SHIFT; - *phy_data = (u16)(data); - } - } - - hw->mac.ops.release_swfw_sync(hw, gssr); - } - - return status; -} - -/** - * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register - * @hw: pointer to hardware structure - * @reg_addr: 32 bit PHY register to write - * @device_type: 5 bit device type - * @phy_data: Data to write to the PHY register - **/ -s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 phy_data) -{ - u32 command; - u32 i; - s32 status = 0; - u16 gssr; - - if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) - gssr = IXGBE_GSSR_PHY1_SM; - else - gssr = IXGBE_GSSR_PHY0_SM; - - if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0) - status = IXGBE_ERR_SWFW_SYNC; - - if (status == 0) { - /* Put the data in the MDI single read and write data register*/ - IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); - - /* Setup and write the address cycle command */ - command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | - (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); - - IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); - - /* - * Check every 10 usec to see if the address cycle completed. - * The MDI Command bit will clear when the operation is - * complete - */ - for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - udelay(10); - - command = IXGBE_READ_REG(hw, IXGBE_MSCA); - - if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) - break; - } - - if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { - hw_dbg(hw, "PHY address cmd didn't complete\n"); - status = IXGBE_ERR_PHY; - } - - if (status == 0) { - /* - * Address cycle complete, setup and write the write - * command - */ - command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.mdio.prtad << - IXGBE_MSCA_PHY_ADDR_SHIFT) | - (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); - - IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); - - /* - * Check every 10 usec to see if the address cycle - * completed. The MDI Command bit will clear when the - * operation is complete - */ - for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - udelay(10); - - command = IXGBE_READ_REG(hw, IXGBE_MSCA); - - if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) - break; - } - - if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { - hw_dbg(hw, "PHY address cmd didn't complete\n"); - status = IXGBE_ERR_PHY; - } - } - - hw->mac.ops.release_swfw_sync(hw, gssr); - } - - return status; -} - -/** - * ixgbe_setup_phy_link_generic - Set and restart autoneg - * @hw: pointer to hardware structure - * - * Restart autonegotiation and PHY and waits for completion. - **/ -s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) -{ - s32 status = 0; - u32 time_out; - u32 max_time_out = 10; - u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; - bool autoneg = false; - ixgbe_link_speed speed; - - ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); - - if (speed & IXGBE_LINK_SPEED_10GB_FULL) { - /* Set or unset auto-negotiation 10G advertisement */ - hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, - MDIO_MMD_AN, - &autoneg_reg); - - autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) - autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; - - hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, - MDIO_MMD_AN, - autoneg_reg); - } - - if (speed & IXGBE_LINK_SPEED_1GB_FULL) { - /* Set or unset auto-negotiation 1G advertisement */ - hw->phy.ops.read_reg(hw, - IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, - MDIO_MMD_AN, - &autoneg_reg); - - autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE; - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) - autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE; - - hw->phy.ops.write_reg(hw, - IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, - MDIO_MMD_AN, - autoneg_reg); - } - - if (speed & IXGBE_LINK_SPEED_100_FULL) { - /* Set or unset auto-negotiation 100M advertisement */ - hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, - MDIO_MMD_AN, - &autoneg_reg); - - autoneg_reg &= ~(ADVERTISE_100FULL | - ADVERTISE_100HALF); - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) - autoneg_reg |= ADVERTISE_100FULL; - - hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, - MDIO_MMD_AN, - autoneg_reg); - } - - /* Restart PHY autonegotiation and wait for completion */ - hw->phy.ops.read_reg(hw, MDIO_CTRL1, - MDIO_MMD_AN, &autoneg_reg); - - autoneg_reg |= MDIO_AN_CTRL1_RESTART; - - hw->phy.ops.write_reg(hw, MDIO_CTRL1, - MDIO_MMD_AN, autoneg_reg); - - /* Wait for autonegotiation to finish */ - for (time_out = 0; time_out < max_time_out; time_out++) { - udelay(10); - /* Restart PHY autonegotiation and wait for completion */ - status = hw->phy.ops.read_reg(hw, MDIO_STAT1, - MDIO_MMD_AN, - &autoneg_reg); - - autoneg_reg &= MDIO_AN_STAT1_COMPLETE; - if (autoneg_reg == MDIO_AN_STAT1_COMPLETE) { - break; - } - } - - if (time_out == max_time_out) { - status = IXGBE_ERR_LINK_SETUP; - hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out"); - } - - return status; -} - -/** - * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities - * @hw: pointer to hardware structure - * @speed: new link speed - * @autoneg: true if autonegotiation enabled - **/ -s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg, - bool autoneg_wait_to_complete) -{ - - /* - * Clear autoneg_advertised and set new values based on input link - * speed. - */ - hw->phy.autoneg_advertised = 0; - - if (speed & IXGBE_LINK_SPEED_10GB_FULL) - hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; - - if (speed & IXGBE_LINK_SPEED_1GB_FULL) - hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; - - if (speed & IXGBE_LINK_SPEED_100_FULL) - hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; - - /* Setup link based on the new speed settings */ - hw->phy.ops.setup_link(hw); - - return 0; -} - -/** - * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities - * @hw: pointer to hardware structure - * @speed: pointer to link speed - * @autoneg: boolean auto-negotiation value - * - * Determines the link capabilities by reading the AUTOC register. - */ -s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *autoneg) -{ - s32 status = IXGBE_ERR_LINK_SETUP; - u16 speed_ability; - - *speed = 0; - *autoneg = true; - - status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD, - &speed_ability); - - if (status == 0) { - if (speed_ability & MDIO_SPEED_10G) - *speed |= IXGBE_LINK_SPEED_10GB_FULL; - if (speed_ability & MDIO_PMA_SPEED_1000) - *speed |= IXGBE_LINK_SPEED_1GB_FULL; - if (speed_ability & MDIO_PMA_SPEED_100) - *speed |= IXGBE_LINK_SPEED_100_FULL; - } - - return status; -} - -/** - * ixgbe_check_phy_link_tnx - Determine link and speed status - * @hw: pointer to hardware structure - * - * Reads the VS1 register to determine if link is up and the current speed for - * the PHY. - **/ -s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, - bool *link_up) -{ - s32 status = 0; - u32 time_out; - u32 max_time_out = 10; - u16 phy_link = 0; - u16 phy_speed = 0; - u16 phy_data = 0; - - /* Initialize speed and link to default case */ - *link_up = false; - *speed = IXGBE_LINK_SPEED_10GB_FULL; - - /* - * Check current speed and link status of the PHY register. - * This is a vendor specific register and may have to - * be changed for other copper PHYs. - */ - for (time_out = 0; time_out < max_time_out; time_out++) { - udelay(10); - status = hw->phy.ops.read_reg(hw, - MDIO_STAT1, - MDIO_MMD_VEND1, - &phy_data); - phy_link = phy_data & - IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; - phy_speed = phy_data & - IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS; - if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) { - *link_up = true; - if (phy_speed == - IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS) - *speed = IXGBE_LINK_SPEED_1GB_FULL; - break; - } - } - - return status; -} - -/** - * ixgbe_setup_phy_link_tnx - Set and restart autoneg - * @hw: pointer to hardware structure - * - * Restart autonegotiation and PHY and waits for completion. - **/ -s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) -{ - s32 status = 0; - u32 time_out; - u32 max_time_out = 10; - u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; - bool autoneg = false; - ixgbe_link_speed speed; - - ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); - - if (speed & IXGBE_LINK_SPEED_10GB_FULL) { - /* Set or unset auto-negotiation 10G advertisement */ - hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, - MDIO_MMD_AN, - &autoneg_reg); - - autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) - autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; - - hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, - MDIO_MMD_AN, - autoneg_reg); - } - - if (speed & IXGBE_LINK_SPEED_1GB_FULL) { - /* Set or unset auto-negotiation 1G advertisement */ - hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, - MDIO_MMD_AN, - &autoneg_reg); - - autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) - autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; - - hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, - MDIO_MMD_AN, - autoneg_reg); - } - - if (speed & IXGBE_LINK_SPEED_100_FULL) { - /* Set or unset auto-negotiation 100M advertisement */ - hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, - MDIO_MMD_AN, - &autoneg_reg); - - autoneg_reg &= ~(ADVERTISE_100FULL | - ADVERTISE_100HALF); - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) - autoneg_reg |= ADVERTISE_100FULL; - - hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, - MDIO_MMD_AN, - autoneg_reg); - } - - /* Restart PHY autonegotiation and wait for completion */ - hw->phy.ops.read_reg(hw, MDIO_CTRL1, - MDIO_MMD_AN, &autoneg_reg); - - autoneg_reg |= MDIO_AN_CTRL1_RESTART; - - hw->phy.ops.write_reg(hw, MDIO_CTRL1, - MDIO_MMD_AN, autoneg_reg); - - /* Wait for autonegotiation to finish */ - for (time_out = 0; time_out < max_time_out; time_out++) { - udelay(10); - /* Restart PHY autonegotiation and wait for completion */ - status = hw->phy.ops.read_reg(hw, MDIO_STAT1, - MDIO_MMD_AN, - &autoneg_reg); - - autoneg_reg &= MDIO_AN_STAT1_COMPLETE; - if (autoneg_reg == MDIO_AN_STAT1_COMPLETE) - break; - } - - if (time_out == max_time_out) { - status = IXGBE_ERR_LINK_SETUP; - hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out"); - } - - return status; -} - -/** - * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version - * @hw: pointer to hardware structure - * @firmware_version: pointer to the PHY Firmware Version - **/ -s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, - u16 *firmware_version) -{ - s32 status = 0; - - status = hw->phy.ops.read_reg(hw, TNX_FW_REV, - MDIO_MMD_VEND1, - firmware_version); - - return status; -} - -/** - * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version - * @hw: pointer to hardware structure - * @firmware_version: pointer to the PHY Firmware Version - **/ -s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, - u16 *firmware_version) -{ - s32 status = 0; - - status = hw->phy.ops.read_reg(hw, AQ_FW_REV, - MDIO_MMD_VEND1, - firmware_version); - - return status; -} - -/** - * ixgbe_reset_phy_nl - Performs a PHY reset - * @hw: pointer to hardware structure - **/ -s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) -{ - u16 phy_offset, control, eword, edata, block_crc; - bool end_data = false; - u16 list_offset, data_offset; - u16 phy_data = 0; - s32 ret_val = 0; - u32 i; - - hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data); - - /* reset the PHY and poll for completion */ - hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, - (phy_data | MDIO_CTRL1_RESET)); - - for (i = 0; i < 100; i++) { - hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, - &phy_data); - if ((phy_data & MDIO_CTRL1_RESET) == 0) - break; - usleep_range(10000, 20000); - } - - if ((phy_data & MDIO_CTRL1_RESET) != 0) { - hw_dbg(hw, "PHY reset did not complete.\n"); - ret_val = IXGBE_ERR_PHY; - goto out; - } - - /* Get init offsets */ - ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, - &data_offset); - if (ret_val != 0) - goto out; - - ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc); - data_offset++; - while (!end_data) { - /* - * Read control word from PHY init contents offset - */ - ret_val = hw->eeprom.ops.read(hw, data_offset, &eword); - control = (eword & IXGBE_CONTROL_MASK_NL) >> - IXGBE_CONTROL_SHIFT_NL; - edata = eword & IXGBE_DATA_MASK_NL; - switch (control) { - case IXGBE_DELAY_NL: - data_offset++; - hw_dbg(hw, "DELAY: %d MS\n", edata); - usleep_range(edata * 1000, edata * 2000); - break; - case IXGBE_DATA_NL: - hw_dbg(hw, "DATA:\n"); - data_offset++; - hw->eeprom.ops.read(hw, data_offset++, - &phy_offset); - for (i = 0; i < edata; i++) { - hw->eeprom.ops.read(hw, data_offset, &eword); - hw->phy.ops.write_reg(hw, phy_offset, - MDIO_MMD_PMAPMD, eword); - hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword, - phy_offset); - data_offset++; - phy_offset++; - } - break; - case IXGBE_CONTROL_NL: - data_offset++; - hw_dbg(hw, "CONTROL:\n"); - if (edata == IXGBE_CONTROL_EOL_NL) { - hw_dbg(hw, "EOL\n"); - end_data = true; - } else if (edata == IXGBE_CONTROL_SOL_NL) { - hw_dbg(hw, "SOL\n"); - } else { - hw_dbg(hw, "Bad control value\n"); - ret_val = IXGBE_ERR_PHY; - goto out; - } - break; - default: - hw_dbg(hw, "Bad control type\n"); - ret_val = IXGBE_ERR_PHY; - goto out; - } - } - -out: - return ret_val; -} - -/** - * ixgbe_identify_sfp_module_generic - Identifies SFP modules - * @hw: pointer to hardware structure - * - * Searches for and identifies the SFP module and assigns appropriate PHY type. - **/ -s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) -{ - s32 status = IXGBE_ERR_PHY_ADDR_INVALID; - u32 vendor_oui = 0; - enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; - u8 identifier = 0; - u8 comp_codes_1g = 0; - u8 comp_codes_10g = 0; - u8 oui_bytes[3] = {0, 0, 0}; - u8 cable_tech = 0; - u8 cable_spec = 0; - u16 enforce_sfp = 0; - - if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) { - hw->phy.sfp_type = ixgbe_sfp_type_not_present; - status = IXGBE_ERR_SFP_NOT_PRESENT; - goto out; - } - - status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_IDENTIFIER, - &identifier); - - if (status == IXGBE_ERR_SWFW_SYNC || - status == IXGBE_ERR_I2C || - status == IXGBE_ERR_SFP_NOT_PRESENT) - goto err_read_i2c_eeprom; - - /* LAN ID is needed for sfp_type determination */ - hw->mac.ops.set_lan_id(hw); - - if (identifier != IXGBE_SFF_IDENTIFIER_SFP) { - hw->phy.type = ixgbe_phy_sfp_unsupported; - status = IXGBE_ERR_SFP_NOT_SUPPORTED; - } else { - status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_1GBE_COMP_CODES, - &comp_codes_1g); - - if (status == IXGBE_ERR_SWFW_SYNC || - status == IXGBE_ERR_I2C || - status == IXGBE_ERR_SFP_NOT_PRESENT) - goto err_read_i2c_eeprom; - - status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_10GBE_COMP_CODES, - &comp_codes_10g); - - if (status == IXGBE_ERR_SWFW_SYNC || - status == IXGBE_ERR_I2C || - status == IXGBE_ERR_SFP_NOT_PRESENT) - goto err_read_i2c_eeprom; - status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_CABLE_TECHNOLOGY, - &cable_tech); - - if (status == IXGBE_ERR_SWFW_SYNC || - status == IXGBE_ERR_I2C || - status == IXGBE_ERR_SFP_NOT_PRESENT) - goto err_read_i2c_eeprom; - - /* ID Module - * ========= - * 0 SFP_DA_CU - * 1 SFP_SR - * 2 SFP_LR - * 3 SFP_DA_CORE0 - 82599-specific - * 4 SFP_DA_CORE1 - 82599-specific - * 5 SFP_SR/LR_CORE0 - 82599-specific - * 6 SFP_SR/LR_CORE1 - 82599-specific - * 7 SFP_act_lmt_DA_CORE0 - 82599-specific - * 8 SFP_act_lmt_DA_CORE1 - 82599-specific - * 9 SFP_1g_cu_CORE0 - 82599-specific - * 10 SFP_1g_cu_CORE1 - 82599-specific - */ - if (hw->mac.type == ixgbe_mac_82598EB) { - if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) - hw->phy.sfp_type = ixgbe_sfp_type_da_cu; - else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) - hw->phy.sfp_type = ixgbe_sfp_type_sr; - else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) - hw->phy.sfp_type = ixgbe_sfp_type_lr; - else - hw->phy.sfp_type = ixgbe_sfp_type_unknown; - } else if (hw->mac.type == ixgbe_mac_82599EB) { - if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) { - if (hw->bus.lan_id == 0) - hw->phy.sfp_type = - ixgbe_sfp_type_da_cu_core0; - else - hw->phy.sfp_type = - ixgbe_sfp_type_da_cu_core1; - } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) { - hw->phy.ops.read_i2c_eeprom( - hw, IXGBE_SFF_CABLE_SPEC_COMP, - &cable_spec); - if (cable_spec & - IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) { - if (hw->bus.lan_id == 0) - hw->phy.sfp_type = - ixgbe_sfp_type_da_act_lmt_core0; - else - hw->phy.sfp_type = - ixgbe_sfp_type_da_act_lmt_core1; - } else { - hw->phy.sfp_type = - ixgbe_sfp_type_unknown; - } - } else if (comp_codes_10g & - (IXGBE_SFF_10GBASESR_CAPABLE | - IXGBE_SFF_10GBASELR_CAPABLE)) { - if (hw->bus.lan_id == 0) - hw->phy.sfp_type = - ixgbe_sfp_type_srlr_core0; - else - hw->phy.sfp_type = - ixgbe_sfp_type_srlr_core1; - } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) { - if (hw->bus.lan_id == 0) - hw->phy.sfp_type = - ixgbe_sfp_type_1g_cu_core0; - else - hw->phy.sfp_type = - ixgbe_sfp_type_1g_cu_core1; - } else { - hw->phy.sfp_type = ixgbe_sfp_type_unknown; - } - } - - if (hw->phy.sfp_type != stored_sfp_type) - hw->phy.sfp_setup_needed = true; - - /* Determine if the SFP+ PHY is dual speed or not. */ - hw->phy.multispeed_fiber = false; - if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && - (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || - ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && - (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) - hw->phy.multispeed_fiber = true; - - /* Determine PHY vendor */ - if (hw->phy.type != ixgbe_phy_nl) { - hw->phy.id = identifier; - status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_VENDOR_OUI_BYTE0, - &oui_bytes[0]); - - if (status == IXGBE_ERR_SWFW_SYNC || - status == IXGBE_ERR_I2C || - status == IXGBE_ERR_SFP_NOT_PRESENT) - goto err_read_i2c_eeprom; - - status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_VENDOR_OUI_BYTE1, - &oui_bytes[1]); - - if (status == IXGBE_ERR_SWFW_SYNC || - status == IXGBE_ERR_I2C || - status == IXGBE_ERR_SFP_NOT_PRESENT) - goto err_read_i2c_eeprom; - - status = hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_VENDOR_OUI_BYTE2, - &oui_bytes[2]); - - if (status == IXGBE_ERR_SWFW_SYNC || - status == IXGBE_ERR_I2C || - status == IXGBE_ERR_SFP_NOT_PRESENT) - goto err_read_i2c_eeprom; - - vendor_oui = - ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | - (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | - (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); - - switch (vendor_oui) { - case IXGBE_SFF_VENDOR_OUI_TYCO: - if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) - hw->phy.type = - ixgbe_phy_sfp_passive_tyco; - break; - case IXGBE_SFF_VENDOR_OUI_FTL: - if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) - hw->phy.type = ixgbe_phy_sfp_ftl_active; - else - hw->phy.type = ixgbe_phy_sfp_ftl; - break; - case IXGBE_SFF_VENDOR_OUI_AVAGO: - hw->phy.type = ixgbe_phy_sfp_avago; - break; - case IXGBE_SFF_VENDOR_OUI_INTEL: - hw->phy.type = ixgbe_phy_sfp_intel; - break; - default: - if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) - hw->phy.type = - ixgbe_phy_sfp_passive_unknown; - else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) - hw->phy.type = - ixgbe_phy_sfp_active_unknown; - else - hw->phy.type = ixgbe_phy_sfp_unknown; - break; - } - } - - /* Allow any DA cable vendor */ - if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE | - IXGBE_SFF_DA_ACTIVE_CABLE)) { - status = 0; - goto out; - } - - /* Verify supported 1G SFP modules */ - if (comp_codes_10g == 0 && - !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0)) { - hw->phy.type = ixgbe_phy_sfp_unsupported; - status = IXGBE_ERR_SFP_NOT_SUPPORTED; - goto out; - } - - /* Anything else 82598-based is supported */ - if (hw->mac.type == ixgbe_mac_82598EB) { - status = 0; - goto out; - } - - hw->mac.ops.get_device_caps(hw, &enforce_sfp); - if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) && - !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) || - (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1))) { - /* Make sure we're a supported PHY type */ - if (hw->phy.type == ixgbe_phy_sfp_intel) { - status = 0; - } else { - hw_dbg(hw, "SFP+ module not supported\n"); - hw->phy.type = ixgbe_phy_sfp_unsupported; - status = IXGBE_ERR_SFP_NOT_SUPPORTED; - } - } else { - status = 0; - } - } - -out: - return status; - -err_read_i2c_eeprom: - hw->phy.sfp_type = ixgbe_sfp_type_not_present; - if (hw->phy.type != ixgbe_phy_nl) { - hw->phy.id = 0; - hw->phy.type = ixgbe_phy_unknown; - } - return IXGBE_ERR_SFP_NOT_PRESENT; -} - -/** - * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence - * @hw: pointer to hardware structure - * @list_offset: offset to the SFP ID list - * @data_offset: offset to the SFP data block - * - * Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if - * so it returns the offsets to the phy init sequence block. - **/ -s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, - u16 *list_offset, - u16 *data_offset) -{ - u16 sfp_id; - u16 sfp_type = hw->phy.sfp_type; - - if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) - return IXGBE_ERR_SFP_NOT_SUPPORTED; - - if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) - return IXGBE_ERR_SFP_NOT_PRESENT; - - if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) && - (hw->phy.sfp_type == ixgbe_sfp_type_da_cu)) - return IXGBE_ERR_SFP_NOT_SUPPORTED; - - /* - * Limiting active cables and 1G Phys must be initialized as - * SR modules - */ - if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 || - sfp_type == ixgbe_sfp_type_1g_cu_core0) - sfp_type = ixgbe_sfp_type_srlr_core0; - else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 || - sfp_type == ixgbe_sfp_type_1g_cu_core1) - sfp_type = ixgbe_sfp_type_srlr_core1; - - /* Read offset to PHY init contents */ - hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset); - - if ((!*list_offset) || (*list_offset == 0xFFFF)) - return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT; - - /* Shift offset to first ID word */ - (*list_offset)++; - - /* - * Find the matching SFP ID in the EEPROM - * and program the init sequence - */ - hw->eeprom.ops.read(hw, *list_offset, &sfp_id); - - while (sfp_id != IXGBE_PHY_INIT_END_NL) { - if (sfp_id == sfp_type) { - (*list_offset)++; - hw->eeprom.ops.read(hw, *list_offset, data_offset); - if ((!*data_offset) || (*data_offset == 0xFFFF)) { - hw_dbg(hw, "SFP+ module not supported\n"); - return IXGBE_ERR_SFP_NOT_SUPPORTED; - } else { - break; - } - } else { - (*list_offset) += 2; - if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id)) - return IXGBE_ERR_PHY; - } - } - - if (sfp_id == IXGBE_PHY_INIT_END_NL) { - hw_dbg(hw, "No matching SFP+ module found\n"); - return IXGBE_ERR_SFP_NOT_SUPPORTED; - } - - return 0; -} - -/** - * ixgbe_read_i2c_eeprom_generic - Reads 8 bit EEPROM word over I2C interface - * @hw: pointer to hardware structure - * @byte_offset: EEPROM byte offset to read - * @eeprom_data: value read - * - * Performs byte read operation to SFP module's EEPROM over I2C interface. - **/ -s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 *eeprom_data) -{ - return hw->phy.ops.read_i2c_byte(hw, byte_offset, - IXGBE_I2C_EEPROM_DEV_ADDR, - eeprom_data); -} - -/** - * ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface - * @hw: pointer to hardware structure - * @byte_offset: EEPROM byte offset to write - * @eeprom_data: value to write - * - * Performs byte write operation to SFP module's EEPROM over I2C interface. - **/ -s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 eeprom_data) -{ - return hw->phy.ops.write_i2c_byte(hw, byte_offset, - IXGBE_I2C_EEPROM_DEV_ADDR, - eeprom_data); -} - -/** - * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C - * @hw: pointer to hardware structure - * @byte_offset: byte offset to read - * @data: value read - * - * Performs byte read operation to SFP module's EEPROM over I2C interface at - * a specified deivce address. - **/ -s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 *data) -{ - s32 status = 0; - u32 max_retry = 10; - u32 retry = 0; - u16 swfw_mask = 0; - bool nack = 1; - - if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) - swfw_mask = IXGBE_GSSR_PHY1_SM; - else - swfw_mask = IXGBE_GSSR_PHY0_SM; - - do { - if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) { - status = IXGBE_ERR_SWFW_SYNC; - goto read_byte_out; - } - - ixgbe_i2c_start(hw); - - /* Device Address and write indication */ - status = ixgbe_clock_out_i2c_byte(hw, dev_addr); - if (status != 0) - goto fail; - - status = ixgbe_get_i2c_ack(hw); - if (status != 0) - goto fail; - - status = ixgbe_clock_out_i2c_byte(hw, byte_offset); - if (status != 0) - goto fail; - - status = ixgbe_get_i2c_ack(hw); - if (status != 0) - goto fail; - - ixgbe_i2c_start(hw); - - /* Device Address and read indication */ - status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1)); - if (status != 0) - goto fail; - - status = ixgbe_get_i2c_ack(hw); - if (status != 0) - goto fail; - - status = ixgbe_clock_in_i2c_byte(hw, data); - if (status != 0) - goto fail; - - status = ixgbe_clock_out_i2c_bit(hw, nack); - if (status != 0) - goto fail; - - ixgbe_i2c_stop(hw); - break; - -fail: - hw->mac.ops.release_swfw_sync(hw, swfw_mask); - msleep(100); - ixgbe_i2c_bus_clear(hw); - retry++; - if (retry < max_retry) - hw_dbg(hw, "I2C byte read error - Retrying.\n"); - else - hw_dbg(hw, "I2C byte read error.\n"); - - } while (retry < max_retry); - - hw->mac.ops.release_swfw_sync(hw, swfw_mask); - -read_byte_out: - return status; -} - -/** - * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C - * @hw: pointer to hardware structure - * @byte_offset: byte offset to write - * @data: value to write - * - * Performs byte write operation to SFP module's EEPROM over I2C interface at - * a specified device address. - **/ -s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 data) -{ - s32 status = 0; - u32 max_retry = 1; - u32 retry = 0; - u16 swfw_mask = 0; - - if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) - swfw_mask = IXGBE_GSSR_PHY1_SM; - else - swfw_mask = IXGBE_GSSR_PHY0_SM; - - if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) { - status = IXGBE_ERR_SWFW_SYNC; - goto write_byte_out; - } - - do { - ixgbe_i2c_start(hw); - - status = ixgbe_clock_out_i2c_byte(hw, dev_addr); - if (status != 0) - goto fail; - - status = ixgbe_get_i2c_ack(hw); - if (status != 0) - goto fail; - - status = ixgbe_clock_out_i2c_byte(hw, byte_offset); - if (status != 0) - goto fail; - - status = ixgbe_get_i2c_ack(hw); - if (status != 0) - goto fail; - - status = ixgbe_clock_out_i2c_byte(hw, data); - if (status != 0) - goto fail; - - status = ixgbe_get_i2c_ack(hw); - if (status != 0) - goto fail; - - ixgbe_i2c_stop(hw); - break; - -fail: - ixgbe_i2c_bus_clear(hw); - retry++; - if (retry < max_retry) - hw_dbg(hw, "I2C byte write error - Retrying.\n"); - else - hw_dbg(hw, "I2C byte write error.\n"); - } while (retry < max_retry); - - hw->mac.ops.release_swfw_sync(hw, swfw_mask); - -write_byte_out: - return status; -} - -/** - * ixgbe_i2c_start - Sets I2C start condition - * @hw: pointer to hardware structure - * - * Sets I2C start condition (High -> Low on SDA while SCL is High) - **/ -static void ixgbe_i2c_start(struct ixgbe_hw *hw) -{ - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); - - /* Start condition must begin with data and clock high */ - ixgbe_set_i2c_data(hw, &i2cctl, 1); - ixgbe_raise_i2c_clk(hw, &i2cctl); - - /* Setup time for start condition (4.7us) */ - udelay(IXGBE_I2C_T_SU_STA); - - ixgbe_set_i2c_data(hw, &i2cctl, 0); - - /* Hold time for start condition (4us) */ - udelay(IXGBE_I2C_T_HD_STA); - - ixgbe_lower_i2c_clk(hw, &i2cctl); - - /* Minimum low period of clock is 4.7 us */ - udelay(IXGBE_I2C_T_LOW); - -} - -/** - * ixgbe_i2c_stop - Sets I2C stop condition - * @hw: pointer to hardware structure - * - * Sets I2C stop condition (Low -> High on SDA while SCL is High) - **/ -static void ixgbe_i2c_stop(struct ixgbe_hw *hw) -{ - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); - - /* Stop condition must begin with data low and clock high */ - ixgbe_set_i2c_data(hw, &i2cctl, 0); - ixgbe_raise_i2c_clk(hw, &i2cctl); - - /* Setup time for stop condition (4us) */ - udelay(IXGBE_I2C_T_SU_STO); - - ixgbe_set_i2c_data(hw, &i2cctl, 1); - - /* bus free time between stop and start (4.7us)*/ - udelay(IXGBE_I2C_T_BUF); -} - -/** - * ixgbe_clock_in_i2c_byte - Clocks in one byte via I2C - * @hw: pointer to hardware structure - * @data: data byte to clock in - * - * Clocks in one byte data via I2C data/clock - **/ -static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data) -{ - s32 status = 0; - s32 i; - bool bit = 0; - - for (i = 7; i >= 0; i--) { - status = ixgbe_clock_in_i2c_bit(hw, &bit); - *data |= bit << i; - - if (status != 0) - break; - } - - return status; -} - -/** - * ixgbe_clock_out_i2c_byte - Clocks out one byte via I2C - * @hw: pointer to hardware structure - * @data: data byte clocked out - * - * Clocks out one byte data via I2C data/clock - **/ -static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) -{ - s32 status = 0; - s32 i; - u32 i2cctl; - bool bit = 0; - - for (i = 7; i >= 0; i--) { - bit = (data >> i) & 0x1; - status = ixgbe_clock_out_i2c_bit(hw, bit); - - if (status != 0) - break; - } - - /* Release SDA line (set high) */ - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); - i2cctl |= IXGBE_I2C_DATA_OUT; - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, i2cctl); - - return status; -} - -/** - * ixgbe_get_i2c_ack - Polls for I2C ACK - * @hw: pointer to hardware structure - * - * Clocks in/out one bit via I2C data/clock - **/ -static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) -{ - s32 status; - u32 i = 0; - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); - u32 timeout = 10; - bool ack = 1; - - status = ixgbe_raise_i2c_clk(hw, &i2cctl); - - if (status != 0) - goto out; - - /* Minimum high period of clock is 4us */ - udelay(IXGBE_I2C_T_HIGH); - - /* Poll for ACK. Note that ACK in I2C spec is - * transition from 1 to 0 */ - for (i = 0; i < timeout; i++) { - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); - ack = ixgbe_get_i2c_data(&i2cctl); - - udelay(1); - if (ack == 0) - break; - } - - if (ack == 1) { - hw_dbg(hw, "I2C ack was not received.\n"); - status = IXGBE_ERR_I2C; - } - - ixgbe_lower_i2c_clk(hw, &i2cctl); - - /* Minimum low period of clock is 4.7 us */ - udelay(IXGBE_I2C_T_LOW); - -out: - return status; -} - -/** - * ixgbe_clock_in_i2c_bit - Clocks in one bit via I2C data/clock - * @hw: pointer to hardware structure - * @data: read data value - * - * Clocks in one bit via I2C data/clock - **/ -static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) -{ - s32 status; - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); - - status = ixgbe_raise_i2c_clk(hw, &i2cctl); - - /* Minimum high period of clock is 4us */ - udelay(IXGBE_I2C_T_HIGH); - - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); - *data = ixgbe_get_i2c_data(&i2cctl); - - ixgbe_lower_i2c_clk(hw, &i2cctl); - - /* Minimum low period of clock is 4.7 us */ - udelay(IXGBE_I2C_T_LOW); - - return status; -} - -/** - * ixgbe_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock - * @hw: pointer to hardware structure - * @data: data value to write - * - * Clocks out one bit via I2C data/clock - **/ -static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) -{ - s32 status; - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); - - status = ixgbe_set_i2c_data(hw, &i2cctl, data); - if (status == 0) { - status = ixgbe_raise_i2c_clk(hw, &i2cctl); - - /* Minimum high period of clock is 4us */ - udelay(IXGBE_I2C_T_HIGH); - - ixgbe_lower_i2c_clk(hw, &i2cctl); - - /* Minimum low period of clock is 4.7 us. - * This also takes care of the data hold time. - */ - udelay(IXGBE_I2C_T_LOW); - } else { - status = IXGBE_ERR_I2C; - hw_dbg(hw, "I2C data was not set to %X\n", data); - } - - return status; -} -/** - * ixgbe_raise_i2c_clk - Raises the I2C SCL clock - * @hw: pointer to hardware structure - * @i2cctl: Current value of I2CCTL register - * - * Raises the I2C clock line '0'->'1' - **/ -static s32 ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) -{ - s32 status = 0; - - *i2cctl |= IXGBE_I2C_CLK_OUT; - - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); - IXGBE_WRITE_FLUSH(hw); - - /* SCL rise time (1000ns) */ - udelay(IXGBE_I2C_T_RISE); - - return status; -} - -/** - * ixgbe_lower_i2c_clk - Lowers the I2C SCL clock - * @hw: pointer to hardware structure - * @i2cctl: Current value of I2CCTL register - * - * Lowers the I2C clock line '1'->'0' - **/ -static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) -{ - - *i2cctl &= ~IXGBE_I2C_CLK_OUT; - - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); - IXGBE_WRITE_FLUSH(hw); - - /* SCL fall time (300ns) */ - udelay(IXGBE_I2C_T_FALL); -} - -/** - * ixgbe_set_i2c_data - Sets the I2C data bit - * @hw: pointer to hardware structure - * @i2cctl: Current value of I2CCTL register - * @data: I2C data value (0 or 1) to set - * - * Sets the I2C data bit - **/ -static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) -{ - s32 status = 0; - - if (data) - *i2cctl |= IXGBE_I2C_DATA_OUT; - else - *i2cctl &= ~IXGBE_I2C_DATA_OUT; - - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); - IXGBE_WRITE_FLUSH(hw); - - /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ - udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA); - - /* Verify data was set correctly */ - *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); - if (data != ixgbe_get_i2c_data(i2cctl)) { - status = IXGBE_ERR_I2C; - hw_dbg(hw, "Error - I2C data was not set to %X.\n", data); - } - - return status; -} - -/** - * ixgbe_get_i2c_data - Reads the I2C SDA data bit - * @hw: pointer to hardware structure - * @i2cctl: Current value of I2CCTL register - * - * Returns the I2C data bit value - **/ -static bool ixgbe_get_i2c_data(u32 *i2cctl) -{ - bool data; - - if (*i2cctl & IXGBE_I2C_DATA_IN) - data = 1; - else - data = 0; - - return data; -} - -/** - * ixgbe_i2c_bus_clear - Clears the I2C bus - * @hw: pointer to hardware structure - * - * Clears the I2C bus by sending nine clock pulses. - * Used when data line is stuck low. - **/ -static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) -{ - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); - u32 i; - - ixgbe_i2c_start(hw); - - ixgbe_set_i2c_data(hw, &i2cctl, 1); - - for (i = 0; i < 9; i++) { - ixgbe_raise_i2c_clk(hw, &i2cctl); - - /* Min high period of clock is 4us */ - udelay(IXGBE_I2C_T_HIGH); - - ixgbe_lower_i2c_clk(hw, &i2cctl); - - /* Min low period of clock is 4.7us*/ - udelay(IXGBE_I2C_T_LOW); - } - - ixgbe_i2c_start(hw); - - /* Put the i2c bus back to default state */ - ixgbe_i2c_stop(hw); -} - -/** - * ixgbe_tn_check_overtemp - Checks if an overtemp occurred. - * @hw: pointer to hardware structure - * - * Checks if the LASI temp alarm status was triggered due to overtemp - **/ -s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw) -{ - s32 status = 0; - u16 phy_data = 0; - - if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM) - goto out; - - /* Check that the LASI temp alarm status was triggered */ - hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG, - MDIO_MMD_PMAPMD, &phy_data); - - if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM)) - goto out; - - status = IXGBE_ERR_OVERTEMP; -out: - return status; -} diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h deleted file mode 100644 index 197bdd1..0000000 --- a/drivers/net/ixgbe/ixgbe_phy.h +++ /dev/null @@ -1,131 +0,0 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGBE_PHY_H_ -#define _IXGBE_PHY_H_ - -#include "ixgbe_type.h" -#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0 - -/* EEPROM byte offsets */ -#define IXGBE_SFF_IDENTIFIER 0x0 -#define IXGBE_SFF_IDENTIFIER_SFP 0x3 -#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25 -#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26 -#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27 -#define IXGBE_SFF_1GBE_COMP_CODES 0x6 -#define IXGBE_SFF_10GBE_COMP_CODES 0x3 -#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8 -#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C - -/* Bitmasks */ -#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4 -#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8 -#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4 -#define IXGBE_SFF_1GBASESX_CAPABLE 0x1 -#define IXGBE_SFF_1GBASELX_CAPABLE 0x2 -#define IXGBE_SFF_1GBASET_CAPABLE 0x8 -#define IXGBE_SFF_10GBASESR_CAPABLE 0x10 -#define IXGBE_SFF_10GBASELR_CAPABLE 0x20 -#define IXGBE_I2C_EEPROM_READ_MASK 0x100 -#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3 -#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 -#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1 -#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 -#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 - -/* Flow control defines */ -#define IXGBE_TAF_SYM_PAUSE 0x400 -#define IXGBE_TAF_ASM_PAUSE 0x800 - -/* Bit-shift macros */ -#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24 -#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16 -#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8 - -/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ -#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600 -#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500 -#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00 -#define IXGBE_SFF_VENDOR_OUI_INTEL 0x001B2100 - -/* I2C SDA and SCL timing parameters for standard mode */ -#define IXGBE_I2C_T_HD_STA 4 -#define IXGBE_I2C_T_LOW 5 -#define IXGBE_I2C_T_HIGH 4 -#define IXGBE_I2C_T_SU_STA 5 -#define IXGBE_I2C_T_HD_DATA 5 -#define IXGBE_I2C_T_SU_DATA 1 -#define IXGBE_I2C_T_RISE 1 -#define IXGBE_I2C_T_FALL 1 -#define IXGBE_I2C_T_SU_STO 4 -#define IXGBE_I2C_T_BUF 5 - -#define IXGBE_TN_LASI_STATUS_REG 0x9005 -#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008 - -s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw); -s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); -s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw); -s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 *phy_data); -s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 phy_data); -s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); -s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg, - bool autoneg_wait_to_complete); -s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *autoneg); - -/* PHY specific */ -s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *link_up); -s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw); -s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, - u16 *firmware_version); -s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, - u16 *firmware_version); - -s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); -s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); -s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, - u16 *list_offset, - u16 *data_offset); -s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw); -s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 *data); -s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 data); -s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 *eeprom_data); -s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 eeprom_data); -#endif /* _IXGBE_PHY_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c deleted file mode 100644 index d99d01e..0000000 --- a/drivers/net/ixgbe/ixgbe_sriov.c +++ /dev/null @@ -1,687 +0,0 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#ifdef NETIF_F_HW_VLAN_TX -#include -#endif - -#include "ixgbe.h" - -#include "ixgbe_sriov.h" - -static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, - int entries, u16 *hash_list, u32 vf) -{ - struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; - struct ixgbe_hw *hw = &adapter->hw; - int i; - u32 vector_bit; - u32 vector_reg; - u32 mta_reg; - - /* only so many hash values supported */ - entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES); - - /* - * salt away the number of multi cast addresses assigned - * to this VF for later use to restore when the PF multi cast - * list changes - */ - vfinfo->num_vf_mc_hashes = entries; - - /* - * VFs are limited to using the MTA hash table for their multicast - * addresses - */ - for (i = 0; i < entries; i++) { - vfinfo->vf_mc_hashes[i] = hash_list[i]; - } - - for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { - vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; - vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F; - mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); - mta_reg |= (1 << vector_bit); - IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); - } - - return 0; -} - -static void ixgbe_restore_vf_macvlans(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct list_head *pos; - struct vf_macvlans *entry; - - list_for_each(pos, &adapter->vf_mvs.l) { - entry = list_entry(pos, struct vf_macvlans, l); - if (entry->free == false) - hw->mac.ops.set_rar(hw, entry->rar_entry, - entry->vf_macvlan, - entry->vf, IXGBE_RAH_AV); - } -} - -void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct vf_data_storage *vfinfo; - int i, j; - u32 vector_bit; - u32 vector_reg; - u32 mta_reg; - - for (i = 0; i < adapter->num_vfs; i++) { - vfinfo = &adapter->vfinfo[i]; - for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) { - hw->addr_ctrl.mta_in_use++; - vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F; - vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F; - mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); - mta_reg |= (1 << vector_bit); - IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); - } - } - - /* Restore any VF macvlans */ - ixgbe_restore_vf_macvlans(adapter); -} - -static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, - u32 vf) -{ - return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); -} - -static void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf) -{ - struct ixgbe_hw *hw = &adapter->hw; - int new_mtu = msgbuf[1]; - u32 max_frs; - int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; - - /* Only X540 supports jumbo frames in IOV mode */ - if (adapter->hw.mac.type != ixgbe_mac_X540) - return; - - /* MTU < 68 is an error and causes problems on some kernels */ - if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) { - e_err(drv, "VF mtu %d out of range\n", new_mtu); - return; - } - - max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) & - IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT; - if (max_frs < new_mtu) { - max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT; - IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs); - } - - e_info(hw, "VF requests change max MTU to %d\n", new_mtu); -} - -static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) -{ - u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); - vmolr |= (IXGBE_VMOLR_ROMPE | - IXGBE_VMOLR_BAM); - if (aupe) - vmolr |= IXGBE_VMOLR_AUPE; - else - vmolr &= ~IXGBE_VMOLR_AUPE; - IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); -} - -static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf) -{ - struct ixgbe_hw *hw = &adapter->hw; - - if (vid) - IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), - (vid | IXGBE_VMVIR_VLANA_DEFAULT)); - else - IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); -} - -static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) -{ - struct ixgbe_hw *hw = &adapter->hw; - int rar_entry = hw->mac.num_rar_entries - (vf + 1); - - /* reset offloads to defaults */ - if (adapter->vfinfo[vf].pf_vlan) { - ixgbe_set_vf_vlan(adapter, true, - adapter->vfinfo[vf].pf_vlan, vf); - ixgbe_set_vmvir(adapter, - (adapter->vfinfo[vf].pf_vlan | - (adapter->vfinfo[vf].pf_qos << - VLAN_PRIO_SHIFT)), vf); - ixgbe_set_vmolr(hw, vf, false); - } else { - ixgbe_set_vmvir(adapter, 0, vf); - ixgbe_set_vmolr(hw, vf, true); - } - - /* reset multicast table array for vf */ - adapter->vfinfo[vf].num_vf_mc_hashes = 0; - - /* Flush and reset the mta with the new values */ - ixgbe_set_rx_mode(adapter->netdev); - - hw->mac.ops.clear_rar(hw, rar_entry); -} - -static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, - int vf, unsigned char *mac_addr) -{ - struct ixgbe_hw *hw = &adapter->hw; - int rar_entry = hw->mac.num_rar_entries - (vf + 1); - - memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6); - hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV); - - return 0; -} - -static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, - int vf, int index, unsigned char *mac_addr) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct list_head *pos; - struct vf_macvlans *entry; - - if (index <= 1) { - list_for_each(pos, &adapter->vf_mvs.l) { - entry = list_entry(pos, struct vf_macvlans, l); - if (entry->vf == vf) { - entry->vf = -1; - entry->free = true; - entry->is_macvlan = false; - hw->mac.ops.clear_rar(hw, entry->rar_entry); - } - } - } - - /* - * If index was zero then we were asked to clear the uc list - * for the VF. We're done. - */ - if (!index) - return 0; - - entry = NULL; - - list_for_each(pos, &adapter->vf_mvs.l) { - entry = list_entry(pos, struct vf_macvlans, l); - if (entry->free) - break; - } - - /* - * If we traversed the entire list and didn't find a free entry - * then we're out of space on the RAR table. Also entry may - * be NULL because the original memory allocation for the list - * failed, which is not fatal but does mean we can't support - * VF requests for MACVLAN because we couldn't allocate - * memory for the list management required. - */ - if (!entry || !entry->free) - return -ENOSPC; - - entry->free = false; - entry->is_macvlan = true; - entry->vf = vf; - memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); - - hw->mac.ops.set_rar(hw, entry->rar_entry, mac_addr, vf, IXGBE_RAH_AV); - - return 0; -} - -int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) -{ - unsigned char vf_mac_addr[6]; - struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); - unsigned int vfn = (event_mask & 0x3f); - - bool enable = ((event_mask & 0x10000000U) != 0); - - if (enable) { - random_ether_addr(vf_mac_addr); - e_info(probe, "IOV: VF %d is enabled MAC %pM\n", - vfn, vf_mac_addr); - /* - * Store away the VF "permananet" MAC address, it will ask - * for it later. - */ - memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6); - } - - return 0; -} - -static inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 reg; - u32 reg_offset, vf_shift; - - vf_shift = vf % 32; - reg_offset = vf / 32; - - /* enable transmit and receive for vf */ - reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); - reg |= (reg | (1 << vf_shift)); - IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); - - reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); - reg |= (reg | (1 << vf_shift)); - IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); - - /* Enable counting of spoofed packets in the SSVPC register */ - reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset)); - reg |= (1 << vf_shift); - IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); - - ixgbe_vf_reset_event(adapter, vf); -} - -static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) -{ - u32 mbx_size = IXGBE_VFMAILBOX_SIZE; - u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; - struct ixgbe_hw *hw = &adapter->hw; - s32 retval; - int entries; - u16 *hash_list; - int add, vid, index; - u8 *new_mac; - - retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); - - if (retval) - pr_err("Error receiving message from VF\n"); - - /* this is a message we already processed, do nothing */ - if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK)) - return retval; - - /* - * until the vf completes a virtual function reset it should not be - * allowed to start any configuration. - */ - - if (msgbuf[0] == IXGBE_VF_RESET) { - unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; - new_mac = (u8 *)(&msgbuf[1]); - e_info(probe, "VF Reset msg received from vf %d\n", vf); - adapter->vfinfo[vf].clear_to_send = false; - ixgbe_vf_reset_msg(adapter, vf); - adapter->vfinfo[vf].clear_to_send = true; - - if (is_valid_ether_addr(new_mac) && - !adapter->vfinfo[vf].pf_set_mac) - ixgbe_set_vf_mac(adapter, vf, vf_mac); - else - ixgbe_set_vf_mac(adapter, - vf, adapter->vfinfo[vf].vf_mac_addresses); - - /* reply to reset with ack and vf mac address */ - msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK; - memcpy(new_mac, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS); - /* - * Piggyback the multicast filter type so VF can compute the - * correct vectors - */ - msgbuf[3] = hw->mac.mc_filter_type; - ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf); - - return retval; - } - - if (!adapter->vfinfo[vf].clear_to_send) { - msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; - ixgbe_write_mbx(hw, msgbuf, 1, vf); - return retval; - } - - switch ((msgbuf[0] & 0xFFFF)) { - case IXGBE_VF_SET_MAC_ADDR: - new_mac = ((u8 *)(&msgbuf[1])); - if (is_valid_ether_addr(new_mac) && - !adapter->vfinfo[vf].pf_set_mac) { - ixgbe_set_vf_mac(adapter, vf, new_mac); - } else if (memcmp(adapter->vfinfo[vf].vf_mac_addresses, - new_mac, ETH_ALEN)) { - e_warn(drv, "VF %d attempted to override " - "administratively set MAC address\nReload " - "the VF driver to resume operations\n", vf); - retval = -1; - } - break; - case IXGBE_VF_SET_MULTICAST: - entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) - >> IXGBE_VT_MSGINFO_SHIFT; - hash_list = (u16 *)&msgbuf[1]; - retval = ixgbe_set_vf_multicasts(adapter, entries, - hash_list, vf); - break; - case IXGBE_VF_SET_LPE: - ixgbe_set_vf_lpe(adapter, msgbuf); - break; - case IXGBE_VF_SET_VLAN: - add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) - >> IXGBE_VT_MSGINFO_SHIFT; - vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); - if (adapter->vfinfo[vf].pf_vlan) { - e_warn(drv, "VF %d attempted to override " - "administratively set VLAN configuration\n" - "Reload the VF driver to resume operations\n", - vf); - retval = -1; - } else { - retval = ixgbe_set_vf_vlan(adapter, add, vid, vf); - } - break; - case IXGBE_VF_SET_MACVLAN: - index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> - IXGBE_VT_MSGINFO_SHIFT; - /* - * If the VF is allowed to set MAC filters then turn off - * anti-spoofing to avoid false positives. An index - * greater than 0 will indicate the VF is setting a - * macvlan MAC filter. - */ - if (index > 0 && adapter->antispoofing_enabled) { - hw->mac.ops.set_mac_anti_spoofing(hw, false, - adapter->num_vfs); - hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); - adapter->antispoofing_enabled = false; - } - retval = ixgbe_set_vf_macvlan(adapter, vf, index, - (unsigned char *)(&msgbuf[1])); - break; - default: - e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); - retval = IXGBE_ERR_MBX; - break; - } - - /* notify the VF of the results of what it sent us */ - if (retval) - msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; - else - msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; - - msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS; - - ixgbe_write_mbx(hw, msgbuf, 1, vf); - - return retval; -} - -static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 msg = IXGBE_VT_MSGTYPE_NACK; - - /* if device isn't clear to send it shouldn't be reading either */ - if (!adapter->vfinfo[vf].clear_to_send) - ixgbe_write_mbx(hw, &msg, 1, vf); -} - -void ixgbe_msg_task(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 vf; - - for (vf = 0; vf < adapter->num_vfs; vf++) { - /* process any reset requests */ - if (!ixgbe_check_for_rst(hw, vf)) - ixgbe_vf_reset_event(adapter, vf); - - /* process any messages pending */ - if (!ixgbe_check_for_msg(hw, vf)) - ixgbe_rcv_msg_from_vf(adapter, vf); - - /* process any acks */ - if (!ixgbe_check_for_ack(hw, vf)) - ixgbe_rcv_ack_from_vf(adapter, vf); - } -} - -void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - - /* disable transmit and receive for all vfs */ - IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0); - IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0); - - IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0); - IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0); -} - -void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 ping; - int i; - - for (i = 0 ; i < adapter->num_vfs; i++) { - ping = IXGBE_PF_CONTROL_MSG; - if (adapter->vfinfo[i].clear_to_send) - ping |= IXGBE_VT_MSGTYPE_CTS; - ixgbe_write_mbx(hw, &ping, 1, i); - } -} - -int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs)) - return -EINVAL; - adapter->vfinfo[vf].pf_set_mac = true; - dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); - dev_info(&adapter->pdev->dev, "Reload the VF driver to make this" - " change effective."); - if (test_bit(__IXGBE_DOWN, &adapter->state)) { - dev_warn(&adapter->pdev->dev, "The VF MAC address has been set," - " but the PF device is not up.\n"); - dev_warn(&adapter->pdev->dev, "Bring the PF device up before" - " attempting to use the VF device.\n"); - } - return ixgbe_set_vf_mac(adapter, vf, mac); -} - -int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) -{ - int err = 0; - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - - if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) - return -EINVAL; - if (vlan || qos) { - err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); - if (err) - goto out; - ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); - ixgbe_set_vmolr(hw, vf, false); - if (adapter->antispoofing_enabled) - hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); - adapter->vfinfo[vf].pf_vlan = vlan; - adapter->vfinfo[vf].pf_qos = qos; - dev_info(&adapter->pdev->dev, - "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); - if (test_bit(__IXGBE_DOWN, &adapter->state)) { - dev_warn(&adapter->pdev->dev, - "The VF VLAN has been set," - " but the PF device is not up.\n"); - dev_warn(&adapter->pdev->dev, - "Bring the PF device up before" - " attempting to use the VF device.\n"); - } - } else { - err = ixgbe_set_vf_vlan(adapter, false, - adapter->vfinfo[vf].pf_vlan, vf); - ixgbe_set_vmvir(adapter, vlan, vf); - ixgbe_set_vmolr(hw, vf, true); - hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); - adapter->vfinfo[vf].pf_vlan = 0; - adapter->vfinfo[vf].pf_qos = 0; - } -out: - return err; -} - -static int ixgbe_link_mbps(int internal_link_speed) -{ - switch (internal_link_speed) { - case IXGBE_LINK_SPEED_100_FULL: - return 100; - case IXGBE_LINK_SPEED_1GB_FULL: - return 1000; - case IXGBE_LINK_SPEED_10GB_FULL: - return 10000; - default: - return 0; - } -} - -static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate, - int link_speed) -{ - int rf_dec, rf_int; - u32 bcnrc_val; - - if (tx_rate != 0) { - /* Calculate the rate factor values to set */ - rf_int = link_speed / tx_rate; - rf_dec = (link_speed - (rf_int * tx_rate)); - rf_dec = (rf_dec * (1<mac.type) { - case ixgbe_mac_82599EB: - IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4); - break; - case ixgbe_mac_X540: - IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14); - break; - default: - break; - } - - IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); -} - -void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter) -{ - int actual_link_speed, i; - bool reset_rate = false; - - /* VF Tx rate limit was not set */ - if (adapter->vf_rate_link_speed == 0) - return; - - actual_link_speed = ixgbe_link_mbps(adapter->link_speed); - if (actual_link_speed != adapter->vf_rate_link_speed) { - reset_rate = true; - adapter->vf_rate_link_speed = 0; - dev_info(&adapter->pdev->dev, - "Link speed has been changed. VF Transmit rate " - "is disabled\n"); - } - - for (i = 0; i < adapter->num_vfs; i++) { - if (reset_rate) - adapter->vfinfo[i].tx_rate = 0; - - ixgbe_set_vf_rate_limit(&adapter->hw, i, - adapter->vfinfo[i].tx_rate, - actual_link_speed); - } -} - -int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - int actual_link_speed; - - actual_link_speed = ixgbe_link_mbps(adapter->link_speed); - if ((vf >= adapter->num_vfs) || (!adapter->link_up) || - (tx_rate > actual_link_speed) || (actual_link_speed != 10000) || - ((tx_rate != 0) && (tx_rate <= 10))) - /* rate limit cannot be set to 10Mb or less in 10Gb adapters */ - return -EINVAL; - - adapter->vf_rate_link_speed = actual_link_speed; - adapter->vfinfo[vf].tx_rate = (u16)tx_rate; - ixgbe_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed); - - return 0; -} - -int ixgbe_ndo_get_vf_config(struct net_device *netdev, - int vf, struct ifla_vf_info *ivi) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - if (vf >= adapter->num_vfs) - return -EINVAL; - ivi->vf = vf; - memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); - ivi->tx_rate = adapter->vfinfo[vf].tx_rate; - ivi->vlan = adapter->vfinfo[vf].pf_vlan; - ivi->qos = adapter->vfinfo[vf].pf_qos; - return 0; -} diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ixgbe/ixgbe_sriov.h deleted file mode 100644 index 3417556..0000000 --- a/drivers/net/ixgbe/ixgbe_sriov.h +++ /dev/null @@ -1,46 +0,0 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGBE_SRIOV_H_ -#define _IXGBE_SRIOV_H_ - -void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter); -void ixgbe_msg_task(struct ixgbe_adapter *adapter); -int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); -void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter); -void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter); -void ixgbe_dump_registers(struct ixgbe_adapter *adapter); -int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); -int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, - u8 qos); -int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); -int ixgbe_ndo_get_vf_config(struct net_device *netdev, - int vf, struct ifla_vf_info *ivi); -void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); - -#endif /* _IXGBE_SRIOV_H_ */ - diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h deleted file mode 100644 index e0d970e..0000000 --- a/drivers/net/ixgbe/ixgbe_type.h +++ /dev/null @@ -1,2877 +0,0 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGBE_TYPE_H_ -#define _IXGBE_TYPE_H_ - -#include -#include -#include - -/* Vendor ID */ -#define IXGBE_INTEL_VENDOR_ID 0x8086 - -/* Device IDs */ -#define IXGBE_DEV_ID_82598 0x10B6 -#define IXGBE_DEV_ID_82598_BX 0x1508 -#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 -#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 -#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB -#define IXGBE_DEV_ID_82598AT 0x10C8 -#define IXGBE_DEV_ID_82598AT2 0x150B -#define IXGBE_DEV_ID_82598EB_CX4 0x10DD -#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC -#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1 -#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1 -#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 -#define IXGBE_DEV_ID_82599_KX4 0x10F7 -#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514 -#define IXGBE_DEV_ID_82599_KR 0x1517 -#define IXGBE_DEV_ID_82599_T3_LOM 0x151C -#define IXGBE_DEV_ID_82599_CX4 0x10F9 -#define IXGBE_DEV_ID_82599_SFP 0x10FB -#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a -#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529 -#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 -#define IXGBE_DEV_ID_82599_SFP_EM 0x1507 -#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D -#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC -#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 -#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C -#define IXGBE_DEV_ID_82599_LS 0x154F -#define IXGBE_DEV_ID_X540T 0x1528 - -/* General Registers */ -#define IXGBE_CTRL 0x00000 -#define IXGBE_STATUS 0x00008 -#define IXGBE_CTRL_EXT 0x00018 -#define IXGBE_ESDP 0x00020 -#define IXGBE_EODSDP 0x00028 -#define IXGBE_I2CCTL 0x00028 -#define IXGBE_LEDCTL 0x00200 -#define IXGBE_FRTIMER 0x00048 -#define IXGBE_TCPTIMER 0x0004C -#define IXGBE_CORESPARE 0x00600 -#define IXGBE_EXVET 0x05078 - -/* NVM Registers */ -#define IXGBE_EEC 0x10010 -#define IXGBE_EERD 0x10014 -#define IXGBE_EEWR 0x10018 -#define IXGBE_FLA 0x1001C -#define IXGBE_EEMNGCTL 0x10110 -#define IXGBE_EEMNGDATA 0x10114 -#define IXGBE_FLMNGCTL 0x10118 -#define IXGBE_FLMNGDATA 0x1011C -#define IXGBE_FLMNGCNT 0x10120 -#define IXGBE_FLOP 0x1013C -#define IXGBE_GRC 0x10200 - -/* General Receive Control */ -#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */ -#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */ - -#define IXGBE_VPDDIAG0 0x10204 -#define IXGBE_VPDDIAG1 0x10208 - -/* I2CCTL Bit Masks */ -#define IXGBE_I2C_CLK_IN 0x00000001 -#define IXGBE_I2C_CLK_OUT 0x00000002 -#define IXGBE_I2C_DATA_IN 0x00000004 -#define IXGBE_I2C_DATA_OUT 0x00000008 - -/* Interrupt Registers */ -#define IXGBE_EICR 0x00800 -#define IXGBE_EICS 0x00808 -#define IXGBE_EIMS 0x00880 -#define IXGBE_EIMC 0x00888 -#define IXGBE_EIAC 0x00810 -#define IXGBE_EIAM 0x00890 -#define IXGBE_EICS_EX(_i) (0x00A90 + (_i) * 4) -#define IXGBE_EIMS_EX(_i) (0x00AA0 + (_i) * 4) -#define IXGBE_EIMC_EX(_i) (0x00AB0 + (_i) * 4) -#define IXGBE_EIAM_EX(_i) (0x00AD0 + (_i) * 4) -/* - * 82598 EITR is 16 bits but set the limits based on the max - * supported by all ixgbe hardware. 82599 EITR is only 12 bits, - * with the lower 3 always zero. - */ -#define IXGBE_MAX_INT_RATE 488281 -#define IXGBE_MIN_INT_RATE 956 -#define IXGBE_MAX_EITR 0x00000FF8 -#define IXGBE_MIN_EITR 8 -#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \ - (0x012300 + (((_i) - 24) * 4))) -#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8 -#define IXGBE_EITR_LLI_MOD 0x00008000 -#define IXGBE_EITR_CNT_WDIS 0x80000000 -#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */ -#define IXGBE_IVAR_MISC 0x00A00 /* misc MSI-X interrupt causes */ -#define IXGBE_EITRSEL 0x00894 -#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */ -#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */ -#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4))) -#define IXGBE_GPIE 0x00898 - -/* Flow Control Registers */ -#define IXGBE_FCADBUL 0x03210 -#define IXGBE_FCADBUH 0x03214 -#define IXGBE_FCAMACL 0x04328 -#define IXGBE_FCAMACH 0x0432C -#define IXGBE_FCRTH_82599(_i) (0x03260 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_FCRTL_82599(_i) (0x03220 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_PFCTOP 0x03008 -#define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */ -#define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */ -#define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */ -#define IXGBE_FCRTV 0x032A0 -#define IXGBE_FCCFG 0x03D00 -#define IXGBE_TFCS 0x0CE00 - -/* Receive DMA Registers */ -#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \ - (0x0D000 + ((_i - 64) * 0x40))) -#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \ - (0x0D004 + ((_i - 64) * 0x40))) -#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \ - (0x0D008 + ((_i - 64) * 0x40))) -#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \ - (0x0D010 + ((_i - 64) * 0x40))) -#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \ - (0x0D018 + ((_i - 64) * 0x40))) -#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \ - (0x0D028 + ((_i - 64) * 0x40))) -#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \ - (0x0D02C + ((_i - 64) * 0x40))) -#define IXGBE_RSCDBU 0x03028 -#define IXGBE_RDDCC 0x02F20 -#define IXGBE_RXMEMWRAP 0x03190 -#define IXGBE_STARCTRL 0x03024 -/* - * Split and Replication Receive Control Registers - * 00-15 : 0x02100 + n*4 - * 16-64 : 0x01014 + n*0x40 - * 64-127: 0x0D014 + (n-64)*0x40 - */ -#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \ - (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ - (0x0D014 + ((_i - 64) * 0x40)))) -/* - * Rx DCA Control Register: - * 00-15 : 0x02200 + n*4 - * 16-64 : 0x0100C + n*0x40 - * 64-127: 0x0D00C + (n-64)*0x40 - */ -#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \ - (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \ - (0x0D00C + ((_i - 64) * 0x40)))) -#define IXGBE_RDRXCTL 0x02F00 -#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) - /* 8 of these 0x03C00 - 0x03C1C */ -#define IXGBE_RXCTRL 0x03000 -#define IXGBE_DROPEN 0x03D04 -#define IXGBE_RXPBSIZE_SHIFT 10 - -/* Receive Registers */ -#define IXGBE_RXCSUM 0x05000 -#define IXGBE_RFCTL 0x05008 -#define IXGBE_DRECCCTL 0x02F08 -#define IXGBE_DRECCCTL_DISABLE 0 -/* Multicast Table Array - 128 entries */ -#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4)) -#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ - (0x0A200 + ((_i) * 8))) -#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ - (0x0A204 + ((_i) * 8))) -#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8)) -#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8)) -/* Packet split receive type */ -#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \ - (0x0EA00 + ((_i) * 4))) -/* array of 4096 1-bit vlan filters */ -#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4)) -/*array of 4096 4-bit vlan vmdq indices */ -#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4)) -#define IXGBE_FCTRL 0x05080 -#define IXGBE_VLNCTRL 0x05088 -#define IXGBE_MCSTCTRL 0x05090 -#define IXGBE_MRQC 0x05818 -#define IXGBE_SAQF(_i) (0x0E000 + ((_i) * 4)) /* Source Address Queue Filter */ -#define IXGBE_DAQF(_i) (0x0E200 + ((_i) * 4)) /* Dest. Address Queue Filter */ -#define IXGBE_SDPQF(_i) (0x0E400 + ((_i) * 4)) /* Src Dest. Addr Queue Filter */ -#define IXGBE_FTQF(_i) (0x0E600 + ((_i) * 4)) /* Five Tuple Queue Filter */ -#define IXGBE_ETQF(_i) (0x05128 + ((_i) * 4)) /* EType Queue Filter */ -#define IXGBE_ETQS(_i) (0x0EC00 + ((_i) * 4)) /* EType Queue Select */ -#define IXGBE_SYNQF 0x0EC30 /* SYN Packet Queue Filter */ -#define IXGBE_RQTC 0x0EC70 -#define IXGBE_MTQC 0x08120 -#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */ -#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */ -#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */ -#define IXGBE_VT_CTL 0x051B0 -#define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */ -#define IXGBE_PFMBMEM(_i) (0x13000 + (64 * (_i))) /* 64 Mailboxes, 16 DW each */ -#define IXGBE_PFMBICR(_i) (0x00710 + (4 * (_i))) /* 4 total */ -#define IXGBE_PFMBIMR(_i) (0x00720 + (4 * (_i))) /* 4 total */ -#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4)) -#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4)) -#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4)) -#define IXGBE_QDE 0x2F04 -#define IXGBE_VMTXSW(_i) (0x05180 + ((_i) * 4)) /* 2 total */ -#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */ -#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4)) -#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4)) -#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4)) -#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4)) -#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/ -#define IXGBE_RXFECCERR0 0x051B8 -#define IXGBE_LLITHRESH 0x0EC90 -#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_IMIRVP 0x05AC0 -#define IXGBE_VMD_CTL 0x0581C -#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ -#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ - -/* Flow Director registers */ -#define IXGBE_FDIRCTRL 0x0EE00 -#define IXGBE_FDIRHKEY 0x0EE68 -#define IXGBE_FDIRSKEY 0x0EE6C -#define IXGBE_FDIRDIP4M 0x0EE3C -#define IXGBE_FDIRSIP4M 0x0EE40 -#define IXGBE_FDIRTCPM 0x0EE44 -#define IXGBE_FDIRUDPM 0x0EE48 -#define IXGBE_FDIRIP6M 0x0EE74 -#define IXGBE_FDIRM 0x0EE70 - -/* Flow Director Stats registers */ -#define IXGBE_FDIRFREE 0x0EE38 -#define IXGBE_FDIRLEN 0x0EE4C -#define IXGBE_FDIRUSTAT 0x0EE50 -#define IXGBE_FDIRFSTAT 0x0EE54 -#define IXGBE_FDIRMATCH 0x0EE58 -#define IXGBE_FDIRMISS 0x0EE5C - -/* Flow Director Programming registers */ -#define IXGBE_FDIRSIPv6(_i) (0x0EE0C + ((_i) * 4)) /* 3 of these (0-2) */ -#define IXGBE_FDIRIPSA 0x0EE18 -#define IXGBE_FDIRIPDA 0x0EE1C -#define IXGBE_FDIRPORT 0x0EE20 -#define IXGBE_FDIRVLAN 0x0EE24 -#define IXGBE_FDIRHASH 0x0EE28 -#define IXGBE_FDIRCMD 0x0EE2C - -/* Transmit DMA registers */ -#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31)*/ -#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40)) -#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40)) -#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40)) -#define IXGBE_TDT(_i) (0x06018 + ((_i) * 0x40)) -#define IXGBE_TXDCTL(_i) (0x06028 + ((_i) * 0x40)) -#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40)) -#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40)) -#define IXGBE_DTXCTL 0x07E00 - -#define IXGBE_DMATXCTL 0x04A80 -#define IXGBE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */ -#define IXGBE_PFDTXGSWC 0x08220 -#define IXGBE_DTXMXSZRQ 0x08100 -#define IXGBE_DTXTCPFLGL 0x04A88 -#define IXGBE_DTXTCPFLGH 0x04A8C -#define IXGBE_LBDRPEN 0x0CA00 -#define IXGBE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */ - -#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */ -#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */ -#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */ -#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */ - -#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */ - -/* Anti-spoofing defines */ -#define IXGBE_SPOOF_MACAS_MASK 0xFF -#define IXGBE_SPOOF_VLANAS_MASK 0xFF00 -#define IXGBE_SPOOF_VLANAS_SHIFT 8 -#define IXGBE_PFVFSPOOF_REG_COUNT 8 - -#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */ -/* Tx DCA Control register : 128 of these (0-127) */ -#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40)) -#define IXGBE_TIPG 0x0CB00 -#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */ -#define IXGBE_MNGTXMAP 0x0CD10 -#define IXGBE_TIPG_FIBER_DEFAULT 3 -#define IXGBE_TXPBSIZE_SHIFT 10 - -/* Wake up registers */ -#define IXGBE_WUC 0x05800 -#define IXGBE_WUFC 0x05808 -#define IXGBE_WUS 0x05810 -#define IXGBE_IPAV 0x05838 -#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */ -#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */ - -#define IXGBE_WUPL 0x05900 -#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ -#define IXGBE_FHFT(_n) (0x09000 + (_n * 0x100)) /* Flex host filter table */ -#define IXGBE_FHFT_EXT(_n) (0x09800 + (_n * 0x100)) /* Ext Flexible Host - * Filter Table */ - -#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4 -#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2 - -/* Each Flexible Filter is at most 128 (0x80) bytes in length */ -#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX 128 -#define IXGBE_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */ -#define IXGBE_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */ - -/* Definitions for power management and wakeup registers */ -/* Wake Up Control */ -#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */ -#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */ -#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */ - -/* Wake Up Filter Control */ -#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ -#define IXGBE_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ -#define IXGBE_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ -#define IXGBE_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ -#define IXGBE_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ -#define IXGBE_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ -#define IXGBE_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ -#define IXGBE_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ -#define IXGBE_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */ - -#define IXGBE_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ -#define IXGBE_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ -#define IXGBE_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ -#define IXGBE_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ -#define IXGBE_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ -#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */ -#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */ -#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */ -#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 /* Mask for Ext. flex filters */ -#define IXGBE_WUFC_ALL_FILTERS 0x003F00FF /* Mask for all wakeup filters */ -#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ - -/* Wake Up Status */ -#define IXGBE_WUS_LNKC IXGBE_WUFC_LNKC -#define IXGBE_WUS_MAG IXGBE_WUFC_MAG -#define IXGBE_WUS_EX IXGBE_WUFC_EX -#define IXGBE_WUS_MC IXGBE_WUFC_MC -#define IXGBE_WUS_BC IXGBE_WUFC_BC -#define IXGBE_WUS_ARP IXGBE_WUFC_ARP -#define IXGBE_WUS_IPV4 IXGBE_WUFC_IPV4 -#define IXGBE_WUS_IPV6 IXGBE_WUFC_IPV6 -#define IXGBE_WUS_MNG IXGBE_WUFC_MNG -#define IXGBE_WUS_FLX0 IXGBE_WUFC_FLX0 -#define IXGBE_WUS_FLX1 IXGBE_WUFC_FLX1 -#define IXGBE_WUS_FLX2 IXGBE_WUFC_FLX2 -#define IXGBE_WUS_FLX3 IXGBE_WUFC_FLX3 -#define IXGBE_WUS_FLX4 IXGBE_WUFC_FLX4 -#define IXGBE_WUS_FLX5 IXGBE_WUFC_FLX5 -#define IXGBE_WUS_FLX_FILTERS IXGBE_WUFC_FLX_FILTERS - -/* Wake Up Packet Length */ -#define IXGBE_WUPL_LENGTH_MASK 0xFFFF - -/* DCB registers */ -#define IXGBE_RMCS 0x03D00 -#define IXGBE_DPMCS 0x07F40 -#define IXGBE_PDPMCS 0x0CD00 -#define IXGBE_RUPPBMR 0x050A0 -#define IXGBE_RT2CR(_i) (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_RT2SR(_i) (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_TDTQ2TCCR(_i) (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */ -#define IXGBE_TDTQ2TCSR(_i) (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */ -#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ - - -/* Security Control Registers */ -#define IXGBE_SECTXCTRL 0x08800 -#define IXGBE_SECTXSTAT 0x08804 -#define IXGBE_SECTXBUFFAF 0x08808 -#define IXGBE_SECTXMINIFG 0x08810 -#define IXGBE_SECRXCTRL 0x08D00 -#define IXGBE_SECRXSTAT 0x08D04 - -/* Security Bit Fields and Masks */ -#define IXGBE_SECTXCTRL_SECTX_DIS 0x00000001 -#define IXGBE_SECTXCTRL_TX_DIS 0x00000002 -#define IXGBE_SECTXCTRL_STORE_FORWARD 0x00000004 - -#define IXGBE_SECTXSTAT_SECTX_RDY 0x00000001 -#define IXGBE_SECTXSTAT_ECC_TXERR 0x00000002 - -#define IXGBE_SECRXCTRL_SECRX_DIS 0x00000001 -#define IXGBE_SECRXCTRL_RX_DIS 0x00000002 - -#define IXGBE_SECRXSTAT_SECRX_RDY 0x00000001 -#define IXGBE_SECRXSTAT_ECC_RXERR 0x00000002 - -/* LinkSec (MacSec) Registers */ -#define IXGBE_LSECTXCAP 0x08A00 -#define IXGBE_LSECRXCAP 0x08F00 -#define IXGBE_LSECTXCTRL 0x08A04 -#define IXGBE_LSECTXSCL 0x08A08 /* SCI Low */ -#define IXGBE_LSECTXSCH 0x08A0C /* SCI High */ -#define IXGBE_LSECTXSA 0x08A10 -#define IXGBE_LSECTXPN0 0x08A14 -#define IXGBE_LSECTXPN1 0x08A18 -#define IXGBE_LSECTXKEY0(_n) (0x08A1C + (4 * (_n))) /* 4 of these (0-3) */ -#define IXGBE_LSECTXKEY1(_n) (0x08A2C + (4 * (_n))) /* 4 of these (0-3) */ -#define IXGBE_LSECRXCTRL 0x08F04 -#define IXGBE_LSECRXSCL 0x08F08 -#define IXGBE_LSECRXSCH 0x08F0C -#define IXGBE_LSECRXSA(_i) (0x08F10 + (4 * (_i))) /* 2 of these (0-1) */ -#define IXGBE_LSECRXPN(_i) (0x08F18 + (4 * (_i))) /* 2 of these (0-1) */ -#define IXGBE_LSECRXKEY(_n, _m) (0x08F20 + ((0x10 * (_n)) + (4 * (_m)))) -#define IXGBE_LSECTXUT 0x08A3C /* OutPktsUntagged */ -#define IXGBE_LSECTXPKTE 0x08A40 /* OutPktsEncrypted */ -#define IXGBE_LSECTXPKTP 0x08A44 /* OutPktsProtected */ -#define IXGBE_LSECTXOCTE 0x08A48 /* OutOctetsEncrypted */ -#define IXGBE_LSECTXOCTP 0x08A4C /* OutOctetsProtected */ -#define IXGBE_LSECRXUT 0x08F40 /* InPktsUntagged/InPktsNoTag */ -#define IXGBE_LSECRXOCTD 0x08F44 /* InOctetsDecrypted */ -#define IXGBE_LSECRXOCTV 0x08F48 /* InOctetsValidated */ -#define IXGBE_LSECRXBAD 0x08F4C /* InPktsBadTag */ -#define IXGBE_LSECRXNOSCI 0x08F50 /* InPktsNoSci */ -#define IXGBE_LSECRXUNSCI 0x08F54 /* InPktsUnknownSci */ -#define IXGBE_LSECRXUNCH 0x08F58 /* InPktsUnchecked */ -#define IXGBE_LSECRXDELAY 0x08F5C /* InPktsDelayed */ -#define IXGBE_LSECRXLATE 0x08F60 /* InPktsLate */ -#define IXGBE_LSECRXOK(_n) (0x08F64 + (0x04 * (_n))) /* InPktsOk */ -#define IXGBE_LSECRXINV(_n) (0x08F6C + (0x04 * (_n))) /* InPktsInvalid */ -#define IXGBE_LSECRXNV(_n) (0x08F74 + (0x04 * (_n))) /* InPktsNotValid */ -#define IXGBE_LSECRXUNSA 0x08F7C /* InPktsUnusedSa */ -#define IXGBE_LSECRXNUSA 0x08F80 /* InPktsNotUsingSa */ - -/* LinkSec (MacSec) Bit Fields and Masks */ -#define IXGBE_LSECTXCAP_SUM_MASK 0x00FF0000 -#define IXGBE_LSECTXCAP_SUM_SHIFT 16 -#define IXGBE_LSECRXCAP_SUM_MASK 0x00FF0000 -#define IXGBE_LSECRXCAP_SUM_SHIFT 16 - -#define IXGBE_LSECTXCTRL_EN_MASK 0x00000003 -#define IXGBE_LSECTXCTRL_DISABLE 0x0 -#define IXGBE_LSECTXCTRL_AUTH 0x1 -#define IXGBE_LSECTXCTRL_AUTH_ENCRYPT 0x2 -#define IXGBE_LSECTXCTRL_AISCI 0x00000020 -#define IXGBE_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00 -#define IXGBE_LSECTXCTRL_RSV_MASK 0x000000D8 - -#define IXGBE_LSECRXCTRL_EN_MASK 0x0000000C -#define IXGBE_LSECRXCTRL_EN_SHIFT 2 -#define IXGBE_LSECRXCTRL_DISABLE 0x0 -#define IXGBE_LSECRXCTRL_CHECK 0x1 -#define IXGBE_LSECRXCTRL_STRICT 0x2 -#define IXGBE_LSECRXCTRL_DROP 0x3 -#define IXGBE_LSECRXCTRL_PLSH 0x00000040 -#define IXGBE_LSECRXCTRL_RP 0x00000080 -#define IXGBE_LSECRXCTRL_RSV_MASK 0xFFFFFF33 - -/* IpSec Registers */ -#define IXGBE_IPSTXIDX 0x08900 -#define IXGBE_IPSTXSALT 0x08904 -#define IXGBE_IPSTXKEY(_i) (0x08908 + (4 * (_i))) /* 4 of these (0-3) */ -#define IXGBE_IPSRXIDX 0x08E00 -#define IXGBE_IPSRXIPADDR(_i) (0x08E04 + (4 * (_i))) /* 4 of these (0-3) */ -#define IXGBE_IPSRXSPI 0x08E14 -#define IXGBE_IPSRXIPIDX 0x08E18 -#define IXGBE_IPSRXKEY(_i) (0x08E1C + (4 * (_i))) /* 4 of these (0-3) */ -#define IXGBE_IPSRXSALT 0x08E2C -#define IXGBE_IPSRXMOD 0x08E30 - -#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4 - -/* DCB registers */ -#define IXGBE_RTRPCS 0x02430 -#define IXGBE_RTTDCS 0x04900 -#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */ -#define IXGBE_RTTPCS 0x0CD00 -#define IXGBE_RTRUP2TC 0x03020 -#define IXGBE_RTTUP2TC 0x0C800 -#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_TXLLQ(_i) (0x082E0 + ((_i) * 4)) /* 4 of these (0-3) */ -#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_RTTDQSEL 0x04904 -#define IXGBE_RTTDT1C 0x04908 -#define IXGBE_RTTDT1S 0x0490C -#define IXGBE_RTTDTECC 0x04990 -#define IXGBE_RTTDTECC_NO_BCN 0x00000100 -#define IXGBE_RTTBCNRC 0x04984 -#define IXGBE_RTTBCNRC_RS_ENA 0x80000000 -#define IXGBE_RTTBCNRC_RF_DEC_MASK 0x00003FFF -#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14 -#define IXGBE_RTTBCNRC_RF_INT_MASK \ - (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT) -#define IXGBE_RTTBCNRM 0x04980 - -/* FCoE DMA Context Registers */ -#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ -#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */ -#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */ -#define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */ -#define IXGBE_FCINVST0 0x03FC0 /* FC Invalid DMA Context Status Reg 0 */ -#define IXGBE_FCINVST(_i) (IXGBE_FCINVST0 + ((_i) * 4)) -#define IXGBE_FCBUFF_VALID (1 << 0) /* DMA Context Valid */ -#define IXGBE_FCBUFF_BUFFSIZE (3 << 3) /* User Buffer Size */ -#define IXGBE_FCBUFF_WRCONTX (1 << 7) /* 0: Initiator, 1: Target */ -#define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */ -#define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */ -#define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3 -#define IXGBE_FCBUFF_BUFFCNT_SHIFT 8 -#define IXGBE_FCBUFF_OFFSET_SHIFT 16 -#define IXGBE_FCDMARW_WE (1 << 14) /* Write enable */ -#define IXGBE_FCDMARW_RE (1 << 15) /* Read enable */ -#define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */ -#define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */ -#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16 - -/* FCoE SOF/EOF */ -#define IXGBE_TEOFF 0x04A94 /* Tx FC EOF */ -#define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */ -#define IXGBE_REOFF 0x05158 /* Rx FC EOF */ -#define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */ -/* FCoE Filter Context Registers */ -#define IXGBE_FCFLT 0x05108 /* FC FLT Context */ -#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */ -#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */ -#define IXGBE_FCFLT_VALID (1 << 0) /* Filter Context Valid */ -#define IXGBE_FCFLT_FIRST (1 << 1) /* Filter First */ -#define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */ -#define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */ -#define IXGBE_FCFLTRW_RVALDT (1 << 13) /* Fast Re-Validation */ -#define IXGBE_FCFLTRW_WE (1 << 14) /* Write Enable */ -#define IXGBE_FCFLTRW_RE (1 << 15) /* Read Enable */ -/* FCoE Receive Control */ -#define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */ -#define IXGBE_FCRXCTRL_FCOELLI (1 << 0) /* Low latency interrupt */ -#define IXGBE_FCRXCTRL_SAVBAD (1 << 1) /* Save Bad Frames */ -#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2) /* EN 1st Read Header */ -#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3) /* EN Last Header in Seq */ -#define IXGBE_FCRXCTRL_ALLH (1 << 4) /* EN All Headers */ -#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5) /* EN 1st Seq. Header */ -#define IXGBE_FCRXCTRL_ICRC (1 << 6) /* Ignore Bad FC CRC */ -#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7) /* FC CRC Byte Ordering */ -#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */ -#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8 -/* FCoE Redirection */ -#define IXGBE_FCRECTL 0x0ED00 /* FC Redirection Control */ -#define IXGBE_FCRETA0 0x0ED10 /* FC Redirection Table 0 */ -#define IXGBE_FCRETA(_i) (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */ -#define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */ -#define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */ -#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */ - -/* Stats registers */ -#define IXGBE_CRCERRS 0x04000 -#define IXGBE_ILLERRC 0x04004 -#define IXGBE_ERRBC 0x04008 -#define IXGBE_MSPDC 0x04010 -#define IXGBE_MPC(_i) (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/ -#define IXGBE_MLFC 0x04034 -#define IXGBE_MRFC 0x04038 -#define IXGBE_RLEC 0x04040 -#define IXGBE_LXONTXC 0x03F60 -#define IXGBE_LXONRXC 0x0CF60 -#define IXGBE_LXOFFTXC 0x03F68 -#define IXGBE_LXOFFRXC 0x0CF68 -#define IXGBE_LXONRXCNT 0x041A4 -#define IXGBE_LXOFFRXCNT 0x041A8 -#define IXGBE_PXONRXCNT(_i) (0x04140 + ((_i) * 4)) /* 8 of these */ -#define IXGBE_PXOFFRXCNT(_i) (0x04160 + ((_i) * 4)) /* 8 of these */ -#define IXGBE_PXON2OFFCNT(_i) (0x03240 + ((_i) * 4)) /* 8 of these */ -#define IXGBE_PXONTXC(_i) (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/ -#define IXGBE_PXONRXC(_i) (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/ -#define IXGBE_PXOFFTXC(_i) (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/ -#define IXGBE_PXOFFRXC(_i) (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/ -#define IXGBE_PRC64 0x0405C -#define IXGBE_PRC127 0x04060 -#define IXGBE_PRC255 0x04064 -#define IXGBE_PRC511 0x04068 -#define IXGBE_PRC1023 0x0406C -#define IXGBE_PRC1522 0x04070 -#define IXGBE_GPRC 0x04074 -#define IXGBE_BPRC 0x04078 -#define IXGBE_MPRC 0x0407C -#define IXGBE_GPTC 0x04080 -#define IXGBE_GORCL 0x04088 -#define IXGBE_GORCH 0x0408C -#define IXGBE_GOTCL 0x04090 -#define IXGBE_GOTCH 0x04094 -#define IXGBE_RNBC(_i) (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/ -#define IXGBE_RUC 0x040A4 -#define IXGBE_RFC 0x040A8 -#define IXGBE_ROC 0x040AC -#define IXGBE_RJC 0x040B0 -#define IXGBE_MNGPRC 0x040B4 -#define IXGBE_MNGPDC 0x040B8 -#define IXGBE_MNGPTC 0x0CF90 -#define IXGBE_TORL 0x040C0 -#define IXGBE_TORH 0x040C4 -#define IXGBE_TPR 0x040D0 -#define IXGBE_TPT 0x040D4 -#define IXGBE_PTC64 0x040D8 -#define IXGBE_PTC127 0x040DC -#define IXGBE_PTC255 0x040E0 -#define IXGBE_PTC511 0x040E4 -#define IXGBE_PTC1023 0x040E8 -#define IXGBE_PTC1522 0x040EC -#define IXGBE_MPTC 0x040F0 -#define IXGBE_BPTC 0x040F4 -#define IXGBE_XEC 0x04120 -#define IXGBE_SSVPC 0x08780 - -#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) -#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \ - (0x08600 + ((_i) * 4))) -#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4)) - -#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ -#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ -#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ -#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */ -#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ -#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */ -#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */ -#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */ -#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */ -#define IXGBE_FCCRC 0x05118 /* Count of Good Eth CRC w/ Bad FC CRC */ -#define IXGBE_FCOERPDC 0x0241C /* FCoE Rx Packets Dropped Count */ -#define IXGBE_FCLAST 0x02424 /* FCoE Last Error Count */ -#define IXGBE_FCOEPRC 0x02428 /* Number of FCoE Packets Received */ -#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */ -#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */ -#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */ -#define IXGBE_O2BGPTC 0x041C4 -#define IXGBE_O2BSPC 0x087B0 -#define IXGBE_B2OSPC 0x041C0 -#define IXGBE_B2OGPRC 0x02F90 -#define IXGBE_PCRC8ECL 0x0E810 -#define IXGBE_PCRC8ECH 0x0E811 -#define IXGBE_PCRC8ECH_MASK 0x1F -#define IXGBE_LDPCECL 0x0E820 -#define IXGBE_LDPCECH 0x0E821 - -/* Management */ -#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_MANC 0x05820 -#define IXGBE_MFVAL 0x05824 -#define IXGBE_MANC2H 0x05860 -#define IXGBE_MDEF(_i) (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_MIPAF 0x058B0 -#define IXGBE_MMAL(_i) (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */ -#define IXGBE_MMAH(_i) (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */ -#define IXGBE_FTFT 0x09400 /* 0x9400-0x97FC */ -#define IXGBE_METF(_i) (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */ -#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */ -#define IXGBE_LSWFW 0x15014 - -/* ARC Subsystem registers */ -#define IXGBE_HICR 0x15F00 -#define IXGBE_FWSTS 0x15F0C -#define IXGBE_HSMC0R 0x15F04 -#define IXGBE_HSMC1R 0x15F08 -#define IXGBE_SWSR 0x15F10 -#define IXGBE_HFDR 0x15FE8 -#define IXGBE_FLEX_MNG 0x15800 /* 0x15800 - 0x15EFC */ - -#define IXGBE_HICR_EN 0x01 /* Enable bit - RO */ -/* Driver sets this bit when done to put command in RAM */ -#define IXGBE_HICR_C 0x02 -#define IXGBE_HICR_SV 0x04 /* Status Validity */ -#define IXGBE_HICR_FW_RESET_ENABLE 0x40 -#define IXGBE_HICR_FW_RESET 0x80 - -/* PCI-E registers */ -#define IXGBE_GCR 0x11000 -#define IXGBE_GTV 0x11004 -#define IXGBE_FUNCTAG 0x11008 -#define IXGBE_GLT 0x1100C -#define IXGBE_GSCL_1 0x11010 -#define IXGBE_GSCL_2 0x11014 -#define IXGBE_GSCL_3 0x11018 -#define IXGBE_GSCL_4 0x1101C -#define IXGBE_GSCN_0 0x11020 -#define IXGBE_GSCN_1 0x11024 -#define IXGBE_GSCN_2 0x11028 -#define IXGBE_GSCN_3 0x1102C -#define IXGBE_FACTPS 0x10150 -#define IXGBE_PCIEANACTL 0x11040 -#define IXGBE_SWSM 0x10140 -#define IXGBE_FWSM 0x10148 -#define IXGBE_GSSR 0x10160 -#define IXGBE_MREVID 0x11064 -#define IXGBE_DCA_ID 0x11070 -#define IXGBE_DCA_CTRL 0x11074 -#define IXGBE_SWFW_SYNC IXGBE_GSSR - -/* PCIe registers 82599-specific */ -#define IXGBE_GCR_EXT 0x11050 -#define IXGBE_GSCL_5_82599 0x11030 -#define IXGBE_GSCL_6_82599 0x11034 -#define IXGBE_GSCL_7_82599 0x11038 -#define IXGBE_GSCL_8_82599 0x1103C -#define IXGBE_PHYADR_82599 0x11040 -#define IXGBE_PHYDAT_82599 0x11044 -#define IXGBE_PHYCTL_82599 0x11048 -#define IXGBE_PBACLR_82599 0x11068 -#define IXGBE_CIAA_82599 0x11088 -#define IXGBE_CIAD_82599 0x1108C -#define IXGBE_PICAUSE 0x110B0 -#define IXGBE_PIENA 0x110B8 -#define IXGBE_CDQ_MBR_82599 0x110B4 -#define IXGBE_PCIESPARE 0x110BC -#define IXGBE_MISC_REG_82599 0x110F0 -#define IXGBE_ECC_CTRL_0_82599 0x11100 -#define IXGBE_ECC_CTRL_1_82599 0x11104 -#define IXGBE_ECC_STATUS_82599 0x110E0 -#define IXGBE_BAR_CTRL_82599 0x110F4 - -/* PCI Express Control */ -#define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000 -#define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000 -#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000 -#define IXGBE_GCR_CAP_VER2 0x00040000 - -#define IXGBE_GCR_EXT_MSIX_EN 0x80000000 -#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001 -#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002 -#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003 -#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \ - IXGBE_GCR_EXT_VT_MODE_64) - -/* Time Sync Registers */ -#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ -#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ -#define IXGBE_RXSTMPL 0x051E8 /* Rx timestamp Low - RO */ -#define IXGBE_RXSTMPH 0x051A4 /* Rx timestamp High - RO */ -#define IXGBE_RXSATRL 0x051A0 /* Rx timestamp attribute low - RO */ -#define IXGBE_RXSATRH 0x051A8 /* Rx timestamp attribute high - RO */ -#define IXGBE_RXMTRL 0x05120 /* RX message type register low - RW */ -#define IXGBE_TXSTMPL 0x08C04 /* Tx timestamp value Low - RO */ -#define IXGBE_TXSTMPH 0x08C08 /* Tx timestamp value High - RO */ -#define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */ -#define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */ -#define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */ -#define IXGBE_TIMADJL 0x08C18 /* Time Adjustment Offset register Low - RW */ -#define IXGBE_TIMADJH 0x08C1C /* Time Adjustment Offset register High - RW */ -#define IXGBE_TSAUXC 0x08C20 /* TimeSync Auxiliary Control register - RW */ -#define IXGBE_TRGTTIML0 0x08C24 /* Target Time Register 0 Low - RW */ -#define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */ -#define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */ -#define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */ -#define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */ -#define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */ -#define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */ -#define IXGBE_AUXSTMPH0 0x08C40 /* Auxiliary Time Stamp 0 register High - RO */ -#define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */ -#define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */ - -/* Diagnostic Registers */ -#define IXGBE_RDSTATCTL 0x02C20 -#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */ -#define IXGBE_RDHMPN 0x02F08 -#define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4)) -#define IXGBE_RDPROBE 0x02F20 -#define IXGBE_RDMAM 0x02F30 -#define IXGBE_RDMAD 0x02F34 -#define IXGBE_TDSTATCTL 0x07C20 -#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */ -#define IXGBE_TDHMPN 0x07F08 -#define IXGBE_TDHMPN2 0x082FC -#define IXGBE_TXDESCIC 0x082CC -#define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4)) -#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4)) -#define IXGBE_TDPROBE 0x07F20 -#define IXGBE_TXBUFCTRL 0x0C600 -#define IXGBE_TXBUFDATA0 0x0C610 -#define IXGBE_TXBUFDATA1 0x0C614 -#define IXGBE_TXBUFDATA2 0x0C618 -#define IXGBE_TXBUFDATA3 0x0C61C -#define IXGBE_RXBUFCTRL 0x03600 -#define IXGBE_RXBUFDATA0 0x03610 -#define IXGBE_RXBUFDATA1 0x03614 -#define IXGBE_RXBUFDATA2 0x03618 -#define IXGBE_RXBUFDATA3 0x0361C -#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */ -#define IXGBE_RFVAL 0x050A4 -#define IXGBE_MDFTC1 0x042B8 -#define IXGBE_MDFTC2 0x042C0 -#define IXGBE_MDFTFIFO1 0x042C4 -#define IXGBE_MDFTFIFO2 0x042C8 -#define IXGBE_MDFTS 0x042CC -#define IXGBE_RXDATAWRPTR(_i) (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/ -#define IXGBE_RXDESCWRPTR(_i) (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/ -#define IXGBE_RXDATARDPTR(_i) (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/ -#define IXGBE_RXDESCRDPTR(_i) (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/ -#define IXGBE_TXDATAWRPTR(_i) (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/ -#define IXGBE_TXDESCWRPTR(_i) (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/ -#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/ -#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/ -#define IXGBE_PCIEECCCTL 0x1106C -#define IXGBE_RXWRPTR(_i) (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/ -#define IXGBE_RXUSED(_i) (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/ -#define IXGBE_RXRDPTR(_i) (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/ -#define IXGBE_RXRDWRPTR(_i) (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/ -#define IXGBE_TXWRPTR(_i) (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/ -#define IXGBE_TXUSED(_i) (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/ -#define IXGBE_TXRDPTR(_i) (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/ -#define IXGBE_TXRDWRPTR(_i) (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/ -#define IXGBE_PCIEECCCTL0 0x11100 -#define IXGBE_PCIEECCCTL1 0x11104 -#define IXGBE_RXDBUECC 0x03F70 -#define IXGBE_TXDBUECC 0x0CF70 -#define IXGBE_RXDBUEST 0x03F74 -#define IXGBE_TXDBUEST 0x0CF74 -#define IXGBE_PBTXECC 0x0C300 -#define IXGBE_PBRXECC 0x03300 -#define IXGBE_GHECCR 0x110B0 - -/* MAC Registers */ -#define IXGBE_PCS1GCFIG 0x04200 -#define IXGBE_PCS1GLCTL 0x04208 -#define IXGBE_PCS1GLSTA 0x0420C -#define IXGBE_PCS1GDBG0 0x04210 -#define IXGBE_PCS1GDBG1 0x04214 -#define IXGBE_PCS1GANA 0x04218 -#define IXGBE_PCS1GANLP 0x0421C -#define IXGBE_PCS1GANNP 0x04220 -#define IXGBE_PCS1GANLPNP 0x04224 -#define IXGBE_HLREG0 0x04240 -#define IXGBE_HLREG1 0x04244 -#define IXGBE_PAP 0x04248 -#define IXGBE_MACA 0x0424C -#define IXGBE_APAE 0x04250 -#define IXGBE_ARD 0x04254 -#define IXGBE_AIS 0x04258 -#define IXGBE_MSCA 0x0425C -#define IXGBE_MSRWD 0x04260 -#define IXGBE_MLADD 0x04264 -#define IXGBE_MHADD 0x04268 -#define IXGBE_MAXFRS 0x04268 -#define IXGBE_TREG 0x0426C -#define IXGBE_PCSS1 0x04288 -#define IXGBE_PCSS2 0x0428C -#define IXGBE_XPCSS 0x04290 -#define IXGBE_MFLCN 0x04294 -#define IXGBE_SERDESC 0x04298 -#define IXGBE_MACS 0x0429C -#define IXGBE_AUTOC 0x042A0 -#define IXGBE_LINKS 0x042A4 -#define IXGBE_LINKS2 0x04324 -#define IXGBE_AUTOC2 0x042A8 -#define IXGBE_AUTOC3 0x042AC -#define IXGBE_ANLP1 0x042B0 -#define IXGBE_ANLP2 0x042B4 -#define IXGBE_MACC 0x04330 -#define IXGBE_ATLASCTL 0x04800 -#define IXGBE_MMNGC 0x042D0 -#define IXGBE_ANLPNP1 0x042D4 -#define IXGBE_ANLPNP2 0x042D8 -#define IXGBE_KRPCSFC 0x042E0 -#define IXGBE_KRPCSS 0x042E4 -#define IXGBE_FECS1 0x042E8 -#define IXGBE_FECS2 0x042EC -#define IXGBE_SMADARCTL 0x14F10 -#define IXGBE_MPVC 0x04318 -#define IXGBE_SGMIIC 0x04314 - -/* Statistics Registers */ -#define IXGBE_RXNFGPC 0x041B0 -#define IXGBE_RXNFGBCL 0x041B4 -#define IXGBE_RXNFGBCH 0x041B8 -#define IXGBE_RXDGPC 0x02F50 -#define IXGBE_RXDGBCL 0x02F54 -#define IXGBE_RXDGBCH 0x02F58 -#define IXGBE_RXDDGPC 0x02F5C -#define IXGBE_RXDDGBCL 0x02F60 -#define IXGBE_RXDDGBCH 0x02F64 -#define IXGBE_RXLPBKGPC 0x02F68 -#define IXGBE_RXLPBKGBCL 0x02F6C -#define IXGBE_RXLPBKGBCH 0x02F70 -#define IXGBE_RXDLPBKGPC 0x02F74 -#define IXGBE_RXDLPBKGBCL 0x02F78 -#define IXGBE_RXDLPBKGBCH 0x02F7C -#define IXGBE_TXDGPC 0x087A0 -#define IXGBE_TXDGBCL 0x087A4 -#define IXGBE_TXDGBCH 0x087A8 - -#define IXGBE_RXDSTATCTRL 0x02F40 - -/* Copper Pond 2 link timeout */ -#define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50 - -/* Omer CORECTL */ -#define IXGBE_CORECTL 0x014F00 -/* BARCTRL */ -#define IXGBE_BARCTRL 0x110F4 -#define IXGBE_BARCTRL_FLSIZE 0x0700 -#define IXGBE_BARCTRL_FLSIZE_SHIFT 8 -#define IXGBE_BARCTRL_CSRSIZE 0x2000 - -/* RSCCTL Bit Masks */ -#define IXGBE_RSCCTL_RSCEN 0x01 -#define IXGBE_RSCCTL_MAXDESC_1 0x00 -#define IXGBE_RSCCTL_MAXDESC_4 0x04 -#define IXGBE_RSCCTL_MAXDESC_8 0x08 -#define IXGBE_RSCCTL_MAXDESC_16 0x0C - -/* RSCDBU Bit Masks */ -#define IXGBE_RSCDBU_RSCSMALDIS_MASK 0x0000007F -#define IXGBE_RSCDBU_RSCACKDIS 0x00000080 - -/* RDRXCTL Bit Masks */ -#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min Threshold Size */ -#define IXGBE_RDRXCTL_CRCSTRIP 0x00000002 /* CRC Strip */ -#define IXGBE_RDRXCTL_MVMEN 0x00000020 -#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */ -#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */ -#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */ -#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI */ -#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */ -#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */ - -/* RQTC Bit Masks and Shifts */ -#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4) -#define IXGBE_RQTC_TC0_MASK (0x7 << 0) -#define IXGBE_RQTC_TC1_MASK (0x7 << 4) -#define IXGBE_RQTC_TC2_MASK (0x7 << 8) -#define IXGBE_RQTC_TC3_MASK (0x7 << 12) -#define IXGBE_RQTC_TC4_MASK (0x7 << 16) -#define IXGBE_RQTC_TC5_MASK (0x7 << 20) -#define IXGBE_RQTC_TC6_MASK (0x7 << 24) -#define IXGBE_RQTC_TC7_MASK (0x7 << 28) - -/* PSRTYPE.RQPL Bit masks and shift */ -#define IXGBE_PSRTYPE_RQPL_MASK 0x7 -#define IXGBE_PSRTYPE_RQPL_SHIFT 29 - -/* CTRL Bit Masks */ -#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */ -#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */ -#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ - -/* FACTPS */ -#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */ - -/* MHADD Bit Masks */ -#define IXGBE_MHADD_MFS_MASK 0xFFFF0000 -#define IXGBE_MHADD_MFS_SHIFT 16 - -/* Extended Device Control */ -#define IXGBE_CTRL_EXT_PFRSTD 0x00004000 /* Physical Function Reset Done */ -#define IXGBE_CTRL_EXT_NS_DIS 0x00010000 /* No Snoop disable */ -#define IXGBE_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ -#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ - -/* Direct Cache Access (DCA) definitions */ -#define IXGBE_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ -#define IXGBE_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ - -#define IXGBE_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ -#define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ - -#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ -#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */ -#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */ -#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ -#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ -#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ -#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */ -#define IXGBE_DCA_RXCTRL_DESC_WRO_EN (1 << 13) /* DCA Rx wr Desc Relax Order */ -#define IXGBE_DCA_RXCTRL_DESC_HSRO_EN (1 << 15) /* DCA Rx Split Header RO */ - -#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ -#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */ -#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */ -#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ -#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ -#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */ - -/* MSCA Bit Masks */ -#define IXGBE_MSCA_NP_ADDR_MASK 0x0000FFFF /* MDI Address (new protocol) */ -#define IXGBE_MSCA_NP_ADDR_SHIFT 0 -#define IXGBE_MSCA_DEV_TYPE_MASK 0x001F0000 /* Device Type (new protocol) */ -#define IXGBE_MSCA_DEV_TYPE_SHIFT 16 /* Register Address (old protocol */ -#define IXGBE_MSCA_PHY_ADDR_MASK 0x03E00000 /* PHY Address mask */ -#define IXGBE_MSCA_PHY_ADDR_SHIFT 21 /* PHY Address shift*/ -#define IXGBE_MSCA_OP_CODE_MASK 0x0C000000 /* OP CODE mask */ -#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */ -#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */ -#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (write) */ -#define IXGBE_MSCA_READ 0x0C000000 /* OP CODE 11 (read) */ -#define IXGBE_MSCA_READ_AUTOINC 0x08000000 /* OP CODE 10 (read, auto inc)*/ -#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */ -#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */ -#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new protocol) */ -#define IXGBE_MSCA_OLD_PROTOCOL 0x10000000 /* ST CODE 01 (old protocol) */ -#define IXGBE_MSCA_MDI_COMMAND 0x40000000 /* Initiate MDI command */ -#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress enable */ - -/* MSRWD bit masks */ -#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF -#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0 -#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000 -#define IXGBE_MSRWD_READ_DATA_SHIFT 16 - -/* Atlas registers */ -#define IXGBE_ATLAS_PDN_LPBK 0x24 -#define IXGBE_ATLAS_PDN_10G 0xB -#define IXGBE_ATLAS_PDN_1G 0xC -#define IXGBE_ATLAS_PDN_AN 0xD - -/* Atlas bit masks */ -#define IXGBE_ATLASCTL_WRITE_CMD 0x00010000 -#define IXGBE_ATLAS_PDN_TX_REG_EN 0x10 -#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL 0xF0 -#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0 -#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0 - -/* Omer bit masks */ -#define IXGBE_CORECTL_WRITE_CMD 0x00010000 - -/* MDIO definitions */ - -#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */ - -#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */ -#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */ -#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */ -#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0 - 10G, 1 - 1G */ -#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018 -#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010 - -#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */ -#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ -#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */ - -/* MII clause 22/28 definitions */ -#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ -#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */ -#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/ -#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/ -#define IXGBE_MII_AUTONEG_REG 0x0 - -#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 -#define IXGBE_MAX_PHY_ADDR 32 - -/* PHY IDs*/ -#define TN1010_PHY_ID 0x00A19410 -#define TNX_FW_REV 0xB -#define X540_PHY_ID 0x01540200 -#define QT2022_PHY_ID 0x0043A400 -#define ATH_PHY_ID 0x03429050 -#define AQ_FW_REV 0x20 - -/* PHY Types */ -#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0 - -/* Special PHY Init Routine */ -#define IXGBE_PHY_INIT_OFFSET_NL 0x002B -#define IXGBE_PHY_INIT_END_NL 0xFFFF -#define IXGBE_CONTROL_MASK_NL 0xF000 -#define IXGBE_DATA_MASK_NL 0x0FFF -#define IXGBE_CONTROL_SHIFT_NL 12 -#define IXGBE_DELAY_NL 0 -#define IXGBE_DATA_NL 1 -#define IXGBE_CONTROL_NL 0x000F -#define IXGBE_CONTROL_EOL_NL 0x0FFF -#define IXGBE_CONTROL_SOL_NL 0x0000 - -/* General purpose Interrupt Enable */ -#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */ -#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */ -#define IXGBE_SDP2_GPIEN 0x00000004 /* SDP2 */ -#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ -#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ -#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ -#define IXGBE_GPIE_EIAME 0x40000000 -#define IXGBE_GPIE_PBA_SUPPORT 0x80000000 -#define IXGBE_GPIE_RSC_DELAY_SHIFT 11 -#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */ -#define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */ -#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */ -#define IXGBE_GPIE_VTMODE_64 0x0000C000 /* 64 VFs 2 queues per VF */ - -/* Packet Buffer Initialization */ -#define IXGBE_TXPBSIZE_20KB 0x00005000 /* 20KB Packet Buffer */ -#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */ -#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */ -#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ -#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ -#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */ -#define IXGBE_RXPBSIZE_MAX 0x00080000 /* 512KB Packet Buffer*/ -#define IXGBE_TXPBSIZE_MAX 0x00028000 /* 160KB Packet Buffer*/ - -#define IXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ -#define IXGBE_MAX_PB 8 - -/* Packet buffer allocation strategies */ -enum { - PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */ -#define PBA_STRATEGY_EQUAL PBA_STRATEGY_EQUAL - PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */ -#define PBA_STRATEGY_WEIGHTED PBA_STRATEGY_WEIGHTED -}; - -/* Transmit Flow Control status */ -#define IXGBE_TFCS_TXOFF 0x00000001 -#define IXGBE_TFCS_TXOFF0 0x00000100 -#define IXGBE_TFCS_TXOFF1 0x00000200 -#define IXGBE_TFCS_TXOFF2 0x00000400 -#define IXGBE_TFCS_TXOFF3 0x00000800 -#define IXGBE_TFCS_TXOFF4 0x00001000 -#define IXGBE_TFCS_TXOFF5 0x00002000 -#define IXGBE_TFCS_TXOFF6 0x00004000 -#define IXGBE_TFCS_TXOFF7 0x00008000 - -/* TCP Timer */ -#define IXGBE_TCPTIMER_KS 0x00000100 -#define IXGBE_TCPTIMER_COUNT_ENABLE 0x00000200 -#define IXGBE_TCPTIMER_COUNT_FINISH 0x00000400 -#define IXGBE_TCPTIMER_LOOP 0x00000800 -#define IXGBE_TCPTIMER_DURATION_MASK 0x000000FF - -/* HLREG0 Bit Masks */ -#define IXGBE_HLREG0_TXCRCEN 0x00000001 /* bit 0 */ -#define IXGBE_HLREG0_RXCRCSTRP 0x00000002 /* bit 1 */ -#define IXGBE_HLREG0_JUMBOEN 0x00000004 /* bit 2 */ -#define IXGBE_HLREG0_TXPADEN 0x00000400 /* bit 10 */ -#define IXGBE_HLREG0_TXPAUSEEN 0x00001000 /* bit 12 */ -#define IXGBE_HLREG0_RXPAUSEEN 0x00004000 /* bit 14 */ -#define IXGBE_HLREG0_LPBK 0x00008000 /* bit 15 */ -#define IXGBE_HLREG0_MDCSPD 0x00010000 /* bit 16 */ -#define IXGBE_HLREG0_CONTMDC 0x00020000 /* bit 17 */ -#define IXGBE_HLREG0_CTRLFLTR 0x00040000 /* bit 18 */ -#define IXGBE_HLREG0_PREPEND 0x00F00000 /* bits 20-23 */ -#define IXGBE_HLREG0_PRIPAUSEEN 0x01000000 /* bit 24 */ -#define IXGBE_HLREG0_RXPAUSERECDA 0x06000000 /* bits 25-26 */ -#define IXGBE_HLREG0_RXLNGTHERREN 0x08000000 /* bit 27 */ -#define IXGBE_HLREG0_RXPADSTRIPEN 0x10000000 /* bit 28 */ - -/* VMD_CTL bitmasks */ -#define IXGBE_VMD_CTL_VMDQ_EN 0x00000001 -#define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002 - -/* VT_CTL bitmasks */ -#define IXGBE_VT_CTL_DIS_DEFPL 0x20000000 /* disable default pool */ -#define IXGBE_VT_CTL_REPLEN 0x40000000 /* replication enabled */ -#define IXGBE_VT_CTL_VT_ENABLE 0x00000001 /* Enable VT Mode */ -#define IXGBE_VT_CTL_POOL_SHIFT 7 -#define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT) - -/* VMOLR bitmasks */ -#define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */ -#define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */ -#define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */ -#define IXGBE_VMOLR_BAM 0x08000000 /* accept broadcast packets */ -#define IXGBE_VMOLR_MPE 0x10000000 /* multicast promiscuous */ - -/* VFRE bitmask */ -#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF - -#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ - -/* RDHMPN and TDHMPN bitmasks */ -#define IXGBE_RDHMPN_RDICADDR 0x007FF800 -#define IXGBE_RDHMPN_RDICRDREQ 0x00800000 -#define IXGBE_RDHMPN_RDICADDR_SHIFT 11 -#define IXGBE_TDHMPN_TDICADDR 0x003FF800 -#define IXGBE_TDHMPN_TDICRDREQ 0x00800000 -#define IXGBE_TDHMPN_TDICADDR_SHIFT 11 - -#define IXGBE_RDMAM_MEM_SEL_SHIFT 13 -#define IXGBE_RDMAM_DWORD_SHIFT 9 -#define IXGBE_RDMAM_DESC_COMP_FIFO 1 -#define IXGBE_RDMAM_DFC_CMD_FIFO 2 -#define IXGBE_RDMAM_TCN_STATUS_RAM 4 -#define IXGBE_RDMAM_WB_COLL_FIFO 5 -#define IXGBE_RDMAM_QSC_CNT_RAM 6 -#define IXGBE_RDMAM_QSC_QUEUE_CNT 8 -#define IXGBE_RDMAM_QSC_QUEUE_RAM 0xA -#define IXGBE_RDMAM_DESC_COM_FIFO_RANGE 135 -#define IXGBE_RDMAM_DESC_COM_FIFO_COUNT 4 -#define IXGBE_RDMAM_DFC_CMD_FIFO_RANGE 48 -#define IXGBE_RDMAM_DFC_CMD_FIFO_COUNT 7 -#define IXGBE_RDMAM_TCN_STATUS_RAM_RANGE 256 -#define IXGBE_RDMAM_TCN_STATUS_RAM_COUNT 9 -#define IXGBE_RDMAM_WB_COLL_FIFO_RANGE 8 -#define IXGBE_RDMAM_WB_COLL_FIFO_COUNT 4 -#define IXGBE_RDMAM_QSC_CNT_RAM_RANGE 64 -#define IXGBE_RDMAM_QSC_CNT_RAM_COUNT 4 -#define IXGBE_RDMAM_QSC_QUEUE_CNT_RANGE 32 -#define IXGBE_RDMAM_QSC_QUEUE_CNT_COUNT 4 -#define IXGBE_RDMAM_QSC_QUEUE_RAM_RANGE 128 -#define IXGBE_RDMAM_QSC_QUEUE_RAM_COUNT 8 - -#define IXGBE_TXDESCIC_READY 0x80000000 - -/* Receive Checksum Control */ -#define IXGBE_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ -#define IXGBE_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ - -/* FCRTL Bit Masks */ -#define IXGBE_FCRTL_XONE 0x80000000 /* XON enable */ -#define IXGBE_FCRTH_FCEN 0x80000000 /* Packet buffer fc enable */ - -/* PAP bit masks*/ -#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */ - -/* RMCS Bit Masks */ -#define IXGBE_RMCS_RRM 0x00000002 /* Receive Recycle Mode enable */ -/* Receive Arbitration Control: 0 Round Robin, 1 DFP */ -#define IXGBE_RMCS_RAC 0x00000004 -#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */ -#define IXGBE_RMCS_TFCE_802_3X 0x00000008 /* Tx Priority FC ena */ -#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority FC ena */ -#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */ - -/* FCCFG Bit Masks */ -#define IXGBE_FCCFG_TFCE_802_3X 0x00000008 /* Tx link FC enable */ -#define IXGBE_FCCFG_TFCE_PRIORITY 0x00000010 /* Tx priority FC enable */ - -/* Interrupt register bitmasks */ - -/* Extended Interrupt Cause Read */ -#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */ -#define IXGBE_EICR_FLOW_DIR 0x00010000 /* FDir Exception */ -#define IXGBE_EICR_RX_MISS 0x00020000 /* Packet Buffer Overrun */ -#define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */ -#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */ -#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */ -#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */ -#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */ -#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ -#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */ -#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */ -#define IXGBE_EICR_ECC 0x10000000 /* ECC Error */ -#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */ -#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */ -#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ -#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ - -/* Extended Interrupt Cause Set */ -#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ -#define IXGBE_EICS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ -#define IXGBE_EICS_RX_MISS IXGBE_EICR_RX_MISS /* Pkt Buffer Overrun */ -#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */ -#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ -#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ -#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ -#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ -#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ -#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ -#define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */ -#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ -#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */ -#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ -#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ - -/* Extended Interrupt Mask Set */ -#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ -#define IXGBE_EIMS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ -#define IXGBE_EIMS_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ -#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */ -#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ -#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ -#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ -#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ -#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ -#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ -#define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */ -#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ -#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */ -#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ -#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ - -/* Extended Interrupt Mask Clear */ -#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ -#define IXGBE_EIMC_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ -#define IXGBE_EIMC_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ -#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */ -#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ -#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ -#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ -#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ -#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ -#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ -#define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */ -#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ -#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */ -#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ -#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ - -#define IXGBE_EIMS_ENABLE_MASK ( \ - IXGBE_EIMS_RTX_QUEUE | \ - IXGBE_EIMS_LSC | \ - IXGBE_EIMS_TCP_TIMER | \ - IXGBE_EIMS_OTHER) - -/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ -#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ -#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ -#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ -#define IXGBE_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ -#define IXGBE_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ -#define IXGBE_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ -#define IXGBE_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ -#define IXGBE_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ -#define IXGBE_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ -#define IXGBE_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of control bits */ -#define IXGBE_IMIR_SIZE_BP_82599 0x00001000 /* Packet size bypass */ -#define IXGBE_IMIR_CTRL_URG_82599 0x00002000 /* Check URG bit in header */ -#define IXGBE_IMIR_CTRL_ACK_82599 0x00004000 /* Check ACK bit in header */ -#define IXGBE_IMIR_CTRL_PSH_82599 0x00008000 /* Check PSH bit in header */ -#define IXGBE_IMIR_CTRL_RST_82599 0x00010000 /* Check RST bit in header */ -#define IXGBE_IMIR_CTRL_SYN_82599 0x00020000 /* Check SYN bit in header */ -#define IXGBE_IMIR_CTRL_FIN_82599 0x00040000 /* Check FIN bit in header */ -#define IXGBE_IMIR_CTRL_BP_82599 0x00080000 /* Bypass check of control bits */ -#define IXGBE_IMIR_LLI_EN_82599 0x00100000 /* Enables low latency Int */ -#define IXGBE_IMIR_RX_QUEUE_MASK_82599 0x0000007F /* Rx Queue Mask */ -#define IXGBE_IMIR_RX_QUEUE_SHIFT_82599 21 /* Rx Queue Shift */ -#define IXGBE_IMIRVP_PRIORITY_MASK 0x00000007 /* VLAN priority mask */ -#define IXGBE_IMIRVP_PRIORITY_EN 0x00000008 /* VLAN priority enable */ - -#define IXGBE_MAX_FTQF_FILTERS 128 -#define IXGBE_FTQF_PROTOCOL_MASK 0x00000003 -#define IXGBE_FTQF_PROTOCOL_TCP 0x00000000 -#define IXGBE_FTQF_PROTOCOL_UDP 0x00000001 -#define IXGBE_FTQF_PROTOCOL_SCTP 2 -#define IXGBE_FTQF_PRIORITY_MASK 0x00000007 -#define IXGBE_FTQF_PRIORITY_SHIFT 2 -#define IXGBE_FTQF_POOL_MASK 0x0000003F -#define IXGBE_FTQF_POOL_SHIFT 8 -#define IXGBE_FTQF_5TUPLE_MASK_MASK 0x0000001F -#define IXGBE_FTQF_5TUPLE_MASK_SHIFT 25 -#define IXGBE_FTQF_SOURCE_ADDR_MASK 0x1E -#define IXGBE_FTQF_DEST_ADDR_MASK 0x1D -#define IXGBE_FTQF_SOURCE_PORT_MASK 0x1B -#define IXGBE_FTQF_DEST_PORT_MASK 0x17 -#define IXGBE_FTQF_PROTOCOL_COMP_MASK 0x0F -#define IXGBE_FTQF_POOL_MASK_EN 0x40000000 -#define IXGBE_FTQF_QUEUE_ENABLE 0x80000000 - -/* Interrupt clear mask */ -#define IXGBE_IRQ_CLEAR_MASK 0xFFFFFFFF - -/* Interrupt Vector Allocation Registers */ -#define IXGBE_IVAR_REG_NUM 25 -#define IXGBE_IVAR_REG_NUM_82599 64 -#define IXGBE_IVAR_TXRX_ENTRY 96 -#define IXGBE_IVAR_RX_ENTRY 64 -#define IXGBE_IVAR_RX_QUEUE(_i) (0 + (_i)) -#define IXGBE_IVAR_TX_QUEUE(_i) (64 + (_i)) -#define IXGBE_IVAR_TX_ENTRY 32 - -#define IXGBE_IVAR_TCP_TIMER_INDEX 96 /* 0 based index */ -#define IXGBE_IVAR_OTHER_CAUSES_INDEX 97 /* 0 based index */ - -#define IXGBE_MSIX_VECTOR(_i) (0 + (_i)) - -#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ - -/* ETYPE Queue Filter/Select Bit Masks */ -#define IXGBE_MAX_ETQF_FILTERS 8 -#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */ -#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */ -#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */ -#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */ -#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */ - -#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */ -#define IXGBE_ETQS_RX_QUEUE_SHIFT 16 -#define IXGBE_ETQS_LLI 0x20000000 /* bit 29 */ -#define IXGBE_ETQS_QUEUE_EN 0x80000000 /* bit 31 */ - -/* - * ETQF filter list: one static filter per filter consumer. This is - * to avoid filter collisions later. Add new filters - * here!! - * - * Current filters: - * EAPOL 802.1x (0x888e): Filter 0 - * FCoE (0x8906): Filter 2 - * 1588 (0x88f7): Filter 3 - * FIP (0x8914): Filter 4 - */ -#define IXGBE_ETQF_FILTER_EAPOL 0 -#define IXGBE_ETQF_FILTER_FCOE 2 -#define IXGBE_ETQF_FILTER_1588 3 -#define IXGBE_ETQF_FILTER_FIP 4 -/* VLAN Control Bit Masks */ -#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ -#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */ -#define IXGBE_VLNCTRL_CFIEN 0x20000000 /* bit 29 */ -#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */ -#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */ - -/* VLAN pool filtering masks */ -#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */ -#define IXGBE_VLVF_ENTRIES 64 -#define IXGBE_VLVF_VLANID_MASK 0x00000FFF - -/* Per VF Port VLAN insertion rules */ -#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ -#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ - -#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ - -/* STATUS Bit Masks */ -#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */ -#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/ -#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Enable Status */ - -#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */ -#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */ - -/* ESDP Bit Masks */ -#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */ -#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */ -#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */ -#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */ -#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */ -#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */ -#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */ -#define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */ -#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */ - -/* LEDCTL Bit Masks */ -#define IXGBE_LED_IVRT_BASE 0x00000040 -#define IXGBE_LED_BLINK_BASE 0x00000080 -#define IXGBE_LED_MODE_MASK_BASE 0x0000000F -#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i))) -#define IXGBE_LED_MODE_SHIFT(_i) (8*(_i)) -#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i) -#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i) -#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i) - -/* LED modes */ -#define IXGBE_LED_LINK_UP 0x0 -#define IXGBE_LED_LINK_10G 0x1 -#define IXGBE_LED_MAC 0x2 -#define IXGBE_LED_FILTER 0x3 -#define IXGBE_LED_LINK_ACTIVE 0x4 -#define IXGBE_LED_LINK_1G 0x5 -#define IXGBE_LED_ON 0xE -#define IXGBE_LED_OFF 0xF - -/* AUTOC Bit Masks */ -#define IXGBE_AUTOC_KX4_KX_SUPP_MASK 0xC0000000 -#define IXGBE_AUTOC_KX4_SUPP 0x80000000 -#define IXGBE_AUTOC_KX_SUPP 0x40000000 -#define IXGBE_AUTOC_PAUSE 0x30000000 -#define IXGBE_AUTOC_ASM_PAUSE 0x20000000 -#define IXGBE_AUTOC_SYM_PAUSE 0x10000000 -#define IXGBE_AUTOC_RF 0x08000000 -#define IXGBE_AUTOC_PD_TMR 0x06000000 -#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000 -#define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000 -#define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000 -#define IXGBE_AUTOC_FECA 0x00040000 -#define IXGBE_AUTOC_FECR 0x00020000 -#define IXGBE_AUTOC_KR_SUPP 0x00010000 -#define IXGBE_AUTOC_AN_RESTART 0x00001000 -#define IXGBE_AUTOC_FLU 0x00000001 -#define IXGBE_AUTOC_LMS_SHIFT 13 -#define IXGBE_AUTOC_LMS_10G_SERIAL (0x3 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_KX4_KX_KR (0x4 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_SGMII_1G_100M (0x5 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII (0x7 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) -#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) - -#define IXGBE_AUTOC_1G_PMA_PMD_MASK 0x00000200 -#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9 -#define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180 -#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7 -#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_1G_SFI (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_1G_KX_BX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) - -#define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000 -#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000 -#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16 -#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) -#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) -#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) - -#define IXGBE_MACC_FLU 0x00000001 -#define IXGBE_MACC_FSV_10G 0x00030000 -#define IXGBE_MACC_FS 0x00040000 -#define IXGBE_MAC_RX2TX_LPBK 0x00000002 - -/* LINKS Bit Masks */ -#define IXGBE_LINKS_KX_AN_COMP 0x80000000 -#define IXGBE_LINKS_UP 0x40000000 -#define IXGBE_LINKS_SPEED 0x20000000 -#define IXGBE_LINKS_MODE 0x18000000 -#define IXGBE_LINKS_RX_MODE 0x06000000 -#define IXGBE_LINKS_TX_MODE 0x01800000 -#define IXGBE_LINKS_XGXS_EN 0x00400000 -#define IXGBE_LINKS_SGMII_EN 0x02000000 -#define IXGBE_LINKS_PCS_1G_EN 0x00200000 -#define IXGBE_LINKS_1G_AN_EN 0x00100000 -#define IXGBE_LINKS_KX_AN_IDLE 0x00080000 -#define IXGBE_LINKS_1G_SYNC 0x00040000 -#define IXGBE_LINKS_10G_ALIGN 0x00020000 -#define IXGBE_LINKS_10G_LANE_SYNC 0x00017000 -#define IXGBE_LINKS_TL_FAULT 0x00001000 -#define IXGBE_LINKS_SIGNAL 0x00000F00 - -#define IXGBE_LINKS_SPEED_82599 0x30000000 -#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 -#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 -#define IXGBE_LINKS_SPEED_100_82599 0x10000000 -#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ -#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ - -#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040 - -/* PCS1GLSTA Bit Masks */ -#define IXGBE_PCS1GLSTA_LINK_OK 1 -#define IXGBE_PCS1GLSTA_SYNK_OK 0x10 -#define IXGBE_PCS1GLSTA_AN_COMPLETE 0x10000 -#define IXGBE_PCS1GLSTA_AN_PAGE_RX 0x20000 -#define IXGBE_PCS1GLSTA_AN_TIMED_OUT 0x40000 -#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000 -#define IXGBE_PCS1GLSTA_AN_ERROR_RWS 0x100000 - -#define IXGBE_PCS1GANA_SYM_PAUSE 0x80 -#define IXGBE_PCS1GANA_ASM_PAUSE 0x100 - -/* PCS1GLCTL Bit Masks */ -#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN 0x00040000 /* PCS 1G autoneg to en */ -#define IXGBE_PCS1GLCTL_FLV_LINK_UP 1 -#define IXGBE_PCS1GLCTL_FORCE_LINK 0x20 -#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH 0x40 -#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000 -#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000 - -/* ANLP1 Bit Masks */ -#define IXGBE_ANLP1_PAUSE 0x0C00 -#define IXGBE_ANLP1_SYM_PAUSE 0x0400 -#define IXGBE_ANLP1_ASM_PAUSE 0x0800 -#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000 - -/* SW Semaphore Register bitmasks */ -#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ -#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ -#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ -#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */ - -/* SW_FW_SYNC/GSSR definitions */ -#define IXGBE_GSSR_EEP_SM 0x0001 -#define IXGBE_GSSR_PHY0_SM 0x0002 -#define IXGBE_GSSR_PHY1_SM 0x0004 -#define IXGBE_GSSR_MAC_CSR_SM 0x0008 -#define IXGBE_GSSR_FLASH_SM 0x0010 -#define IXGBE_GSSR_SW_MNG_SM 0x0400 - -/* FW Status register bitmask */ -#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */ - -/* EEC Register */ -#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */ -#define IXGBE_EEC_CS 0x00000002 /* EEPROM Chip Select */ -#define IXGBE_EEC_DI 0x00000004 /* EEPROM Data In */ -#define IXGBE_EEC_DO 0x00000008 /* EEPROM Data Out */ -#define IXGBE_EEC_FWE_MASK 0x00000030 /* FLASH Write Enable */ -#define IXGBE_EEC_FWE_DIS 0x00000010 /* Disable FLASH writes */ -#define IXGBE_EEC_FWE_EN 0x00000020 /* Enable FLASH writes */ -#define IXGBE_EEC_FWE_SHIFT 4 -#define IXGBE_EEC_REQ 0x00000040 /* EEPROM Access Request */ -#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */ -#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */ -#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */ -#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */ -#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */ -#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */ -/* EEPROM Addressing bits based on type (0-small, 1-large) */ -#define IXGBE_EEC_ADDR_SIZE 0x00000400 -#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */ -#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD alows 14 bits for addr. */ - -#define IXGBE_EEC_SIZE_SHIFT 11 -#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6 -#define IXGBE_EEPROM_OPCODE_BITS 8 - -/* Part Number String Length */ -#define IXGBE_PBANUM_LENGTH 11 - -/* Checksum and EEPROM pointers */ -#define IXGBE_PBANUM_PTR_GUARD 0xFAFA -#define IXGBE_EEPROM_CHECKSUM 0x3F -#define IXGBE_EEPROM_SUM 0xBABA -#define IXGBE_PCIE_ANALOG_PTR 0x03 -#define IXGBE_ATLAS0_CONFIG_PTR 0x04 -#define IXGBE_PHY_PTR 0x04 -#define IXGBE_ATLAS1_CONFIG_PTR 0x05 -#define IXGBE_OPTION_ROM_PTR 0x05 -#define IXGBE_PCIE_GENERAL_PTR 0x06 -#define IXGBE_PCIE_CONFIG0_PTR 0x07 -#define IXGBE_PCIE_CONFIG1_PTR 0x08 -#define IXGBE_CORE0_PTR 0x09 -#define IXGBE_CORE1_PTR 0x0A -#define IXGBE_MAC0_PTR 0x0B -#define IXGBE_MAC1_PTR 0x0C -#define IXGBE_CSR0_CONFIG_PTR 0x0D -#define IXGBE_CSR1_CONFIG_PTR 0x0E -#define IXGBE_FW_PTR 0x0F -#define IXGBE_PBANUM0_PTR 0x15 -#define IXGBE_PBANUM1_PTR 0x16 -#define IXGBE_FREE_SPACE_PTR 0X3E -#define IXGBE_SAN_MAC_ADDR_PTR 0x28 -#define IXGBE_DEVICE_CAPS 0x2C -#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11 -#define IXGBE_PCIE_MSIX_82599_CAPS 0x72 -#define IXGBE_PCIE_MSIX_82598_CAPS 0x62 - -/* MSI-X capability fields masks */ -#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF - -/* Legacy EEPROM word offsets */ -#define IXGBE_ISCSI_BOOT_CAPS 0x0033 -#define IXGBE_ISCSI_SETUP_PORT_0 0x0030 -#define IXGBE_ISCSI_SETUP_PORT_1 0x0034 - -/* EEPROM Commands - SPI */ -#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */ -#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01 -#define IXGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ -#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ -#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ -#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ -/* EEPROM reset Write Enable latch */ -#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04 -#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ -#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ -#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ -#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ -#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ - -/* EEPROM Read Register */ -#define IXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */ -#define IXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */ -#define IXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */ -#define IXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ -#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ -#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for read complete */ - -#define IXGBE_ETH_LENGTH_OF_ADDRESS 6 - -#define IXGBE_EEPROM_PAGE_SIZE_MAX 128 -#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */ -#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */ - -#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS -#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ -#endif - -#ifndef IXGBE_EERD_EEWR_ATTEMPTS -/* Number of 5 microseconds we wait for EERD read and - * EERW write to complete */ -#define IXGBE_EERD_EEWR_ATTEMPTS 100000 -#endif - -#ifndef IXGBE_FLUDONE_ATTEMPTS -/* # attempts we wait for flush update to complete */ -#define IXGBE_FLUDONE_ATTEMPTS 20000 -#endif - -#define IXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */ -#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */ -#define IXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */ -#define IXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */ - -#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0 -#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 -#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 -#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 -#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2 -#define IXGBE_FW_LESM_STATE_1 0x1 -#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */ -#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 -#define IXGBE_FW_PATCH_VERSION_4 0x7 -#define IXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */ -#define IXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */ -#define IXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */ -#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */ -#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */ -#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */ -#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt. SAN MAC capability */ -#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */ -#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt. SAN MAC 1 offset */ -#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt. WWNN prefix offset */ -#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt. WWPN prefix offset */ -#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt. SAN MAC exists */ -#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */ - -/* PCI Bus Info */ -#define IXGBE_PCI_DEVICE_STATUS 0xAA -#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020 -#define IXGBE_PCI_LINK_STATUS 0xB2 -#define IXGBE_PCI_DEVICE_CONTROL2 0xC8 -#define IXGBE_PCI_LINK_WIDTH 0x3F0 -#define IXGBE_PCI_LINK_WIDTH_1 0x10 -#define IXGBE_PCI_LINK_WIDTH_2 0x20 -#define IXGBE_PCI_LINK_WIDTH_4 0x40 -#define IXGBE_PCI_LINK_WIDTH_8 0x80 -#define IXGBE_PCI_LINK_SPEED 0xF -#define IXGBE_PCI_LINK_SPEED_2500 0x1 -#define IXGBE_PCI_LINK_SPEED_5000 0x2 -#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E -#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 -#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005 - -/* Number of 100 microseconds we wait for PCI Express master disable */ -#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 - -/* Check whether address is multicast. This is little-endian specific check.*/ -#define IXGBE_IS_MULTICAST(Address) \ - (bool)(((u8 *)(Address))[0] & ((u8)0x01)) - -/* Check whether an address is broadcast. */ -#define IXGBE_IS_BROADCAST(Address) \ - ((((u8 *)(Address))[0] == ((u8)0xff)) && \ - (((u8 *)(Address))[1] == ((u8)0xff))) - -/* RAH */ -#define IXGBE_RAH_VIND_MASK 0x003C0000 -#define IXGBE_RAH_VIND_SHIFT 18 -#define IXGBE_RAH_AV 0x80000000 -#define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF - -/* Header split receive */ -#define IXGBE_RFCTL_ISCSI_DIS 0x00000001 -#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E -#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1 -#define IXGBE_RFCTL_NFSW_DIS 0x00000040 -#define IXGBE_RFCTL_NFSR_DIS 0x00000080 -#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300 -#define IXGBE_RFCTL_NFS_VER_SHIFT 8 -#define IXGBE_RFCTL_NFS_VER_2 0 -#define IXGBE_RFCTL_NFS_VER_3 1 -#define IXGBE_RFCTL_NFS_VER_4 2 -#define IXGBE_RFCTL_IPV6_DIS 0x00000400 -#define IXGBE_RFCTL_IPV6_XSUM_DIS 0x00000800 -#define IXGBE_RFCTL_IPFRSP_DIS 0x00004000 -#define IXGBE_RFCTL_IPV6_EX_DIS 0x00010000 -#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 - -/* Transmit Config masks */ -#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ -#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. write-back flushing */ -#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ -/* Enable short packet padding to 64 bytes */ -#define IXGBE_TX_PAD_ENABLE 0x00000400 -#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */ -/* This allows for 16K packets + 4k for vlan */ -#define IXGBE_MAX_FRAME_SZ 0x40040000 - -#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */ -#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */ - -/* Receive Config masks */ -#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ -#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ -#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ -#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */ -#define IXGBE_RXDCTL_RLPML_EN 0x00008000 -#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ - -#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ -#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/ -#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */ -#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */ -#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */ -#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */ -/* Receive Priority Flow Control Enable */ -#define IXGBE_FCTRL_RPFCE 0x00004000 -#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */ -#define IXGBE_MFLCN_PMCF 0x00000001 /* Pass MAC Control Frames */ -#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */ -#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */ -#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */ - -#define IXGBE_MFLCN_RPFCE_SHIFT 4 - -/* Multiple Receive Queue Control */ -#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */ -#define IXGBE_MRQC_MRQE_MASK 0xF /* Bits 3:0 */ -#define IXGBE_MRQC_RT8TCEN 0x00000002 /* 8 TC no RSS */ -#define IXGBE_MRQC_RT4TCEN 0x00000003 /* 4 TC no RSS */ -#define IXGBE_MRQC_RTRSS8TCEN 0x00000004 /* 8 TC w/ RSS */ -#define IXGBE_MRQC_RTRSS4TCEN 0x00000005 /* 4 TC w/ RSS */ -#define IXGBE_MRQC_VMDQEN 0x00000008 /* VMDq2 64 pools no RSS */ -#define IXGBE_MRQC_VMDQRSS32EN 0x0000000A /* VMDq2 32 pools w/ RSS */ -#define IXGBE_MRQC_VMDQRSS64EN 0x0000000B /* VMDq2 64 pools w/ RSS */ -#define IXGBE_MRQC_VMDQRT8TCEN 0x0000000C /* VMDq2/RT 16 pool 8 TC */ -#define IXGBE_MRQC_VMDQRT4TCEN 0x0000000D /* VMDq2/RT 32 pool 4 TC */ -#define IXGBE_MRQC_RSS_FIELD_MASK 0xFFFF0000 -#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 -#define IXGBE_MRQC_RSS_FIELD_IPV4 0x00020000 -#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000 -#define IXGBE_MRQC_RSS_FIELD_IPV6_EX 0x00080000 -#define IXGBE_MRQC_RSS_FIELD_IPV6 0x00100000 -#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 -#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 -#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 -#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000 -#define IXGBE_MRQC_L3L4TXSWEN 0x00008000 - -/* Queue Drop Enable */ -#define IXGBE_QDE_ENABLE 0x00000001 -#define IXGBE_QDE_IDX_MASK 0x00007F00 -#define IXGBE_QDE_IDX_SHIFT 8 - -#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ -#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ -#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */ -#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ -#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ -#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ -#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ -#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ -#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ - -#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000 -#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 -#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 -#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED 0x18000000 -#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 -/* Multiple Transmit Queue Command Register */ -#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */ -#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */ -#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */ -#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */ -#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */ -#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */ -#define IXGBE_MTQC_4TC_4TQ 0x8 /* 4 TC if RT_ENA or 4 TQ if VT_ENA */ - -/* Receive Descriptor bit definitions */ -#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ -#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ -#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */ -#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ -#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */ -#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004 -#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ -#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ -#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ -#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ -#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ -#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ -#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ -#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ -#define IXGBE_RXD_STAT_LLINT 0x800 /* Pkt caused Low Latency Interrupt */ -#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ -#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ -#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ -#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ -#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */ -#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */ -#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */ -#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */ -#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ -#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ -#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ -#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */ -#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ -#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCoEFe/IPE */ -#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */ -#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */ -#define IXGBE_RXDADV_ERR_FDIR_DROP 0x00200000 /* FDIR Drop error */ -#define IXGBE_RXDADV_ERR_FDIR_COLL 0x00400000 /* FDIR Collision error */ -#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ -#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ -#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ -#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ -#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ -#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ -#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ -#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ -#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ -#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ -#define IXGBE_RXD_PRI_SHIFT 13 -#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ -#define IXGBE_RXD_CFI_SHIFT 12 - -#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */ -#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */ -#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */ -#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */ -#define IXGBE_RXDADV_STAT_MASK 0x000fffff /* Stat/NEXTP: bit 0-19 */ -#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */ -#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */ -#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */ -#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ -#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ -#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ - -/* PSRTYPE bit definitions */ -#define IXGBE_PSRTYPE_TCPHDR 0x00000010 -#define IXGBE_PSRTYPE_UDPHDR 0x00000020 -#define IXGBE_PSRTYPE_IPV4HDR 0x00000100 -#define IXGBE_PSRTYPE_IPV6HDR 0x00000200 -#define IXGBE_PSRTYPE_L2HDR 0x00001000 - -/* SRRCTL bit definitions */ -#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ -#define IXGBE_SRRCTL_RDMTS_SHIFT 22 -#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000 -#define IXGBE_SRRCTL_DROP_EN 0x10000000 -#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F -#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 -#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 -#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 -#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 -#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 -#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 -#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 - -#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000 -#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF - -#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F -#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 -#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 -#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 -#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 -#define IXGBE_RXDADV_RSCCNT_SHIFT 17 -#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 -#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 -#define IXGBE_RXDADV_SPH 0x8000 - -/* RSS Hash results */ -#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000 -#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 -#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002 -#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 -#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004 -#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005 -#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 -#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 -#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 -#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 - -/* RSS Packet Types as indicated in the receive descriptor. */ -#define IXGBE_RXDADV_PKTTYPE_NONE 0x00000000 -#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */ -#define IXGBE_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPv4 hdr + extensions */ -#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */ -#define IXGBE_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPv6 hdr + extensions */ -#define IXGBE_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ -#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ -#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ -#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ -#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ -#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ -#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ -#define IXGBE_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */ -#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ -#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ - -/* Security Processing bit Indication */ -#define IXGBE_RXDADV_LNKSEC_STATUS_SECP 0x00020000 -#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000 -#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000 -#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000 -#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000 - -/* Masks to determine if packets should be dropped due to frame errors */ -#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ - IXGBE_RXD_ERR_CE | \ - IXGBE_RXD_ERR_LE | \ - IXGBE_RXD_ERR_PE | \ - IXGBE_RXD_ERR_OSE | \ - IXGBE_RXD_ERR_USE) - -#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \ - IXGBE_RXDADV_ERR_CE | \ - IXGBE_RXDADV_ERR_LE | \ - IXGBE_RXDADV_ERR_PE | \ - IXGBE_RXDADV_ERR_OSE | \ - IXGBE_RXDADV_ERR_USE) - -/* Multicast bit mask */ -#define IXGBE_MCSTCTRL_MFE 0x4 - -/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ -#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 -#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 -#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024 - -/* Vlan-specific macros */ -#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */ -#define IXGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */ -#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ -#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT - -/* SR-IOV specific macros */ -#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4) -#define IXGBE_MBVFICR(_i) (0x00710 + (_i * 4)) -#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600)) -#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4)) - -enum ixgbe_fdir_pballoc_type { - IXGBE_FDIR_PBALLOC_NONE = 0, - IXGBE_FDIR_PBALLOC_64K = 1, - IXGBE_FDIR_PBALLOC_128K = 2, - IXGBE_FDIR_PBALLOC_256K = 3, -}; -#define IXGBE_FDIR_PBALLOC_SIZE_SHIFT 16 - -/* Flow Director register values */ -#define IXGBE_FDIRCTRL_PBALLOC_64K 0x00000001 -#define IXGBE_FDIRCTRL_PBALLOC_128K 0x00000002 -#define IXGBE_FDIRCTRL_PBALLOC_256K 0x00000003 -#define IXGBE_FDIRCTRL_INIT_DONE 0x00000008 -#define IXGBE_FDIRCTRL_PERFECT_MATCH 0x00000010 -#define IXGBE_FDIRCTRL_REPORT_STATUS 0x00000020 -#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080 -#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8 -#define IXGBE_FDIRCTRL_FLEX_SHIFT 16 -#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000 -#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24 -#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000 -#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT 28 - -#define IXGBE_FDIRTCPM_DPORTM_SHIFT 16 -#define IXGBE_FDIRUDPM_DPORTM_SHIFT 16 -#define IXGBE_FDIRIP6M_DIPM_SHIFT 16 -#define IXGBE_FDIRM_VLANID 0x00000001 -#define IXGBE_FDIRM_VLANP 0x00000002 -#define IXGBE_FDIRM_POOL 0x00000004 -#define IXGBE_FDIRM_L4P 0x00000008 -#define IXGBE_FDIRM_FLEX 0x00000010 -#define IXGBE_FDIRM_DIPv6 0x00000020 - -#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF -#define IXGBE_FDIRFREE_FREE_SHIFT 0 -#define IXGBE_FDIRFREE_COLL_MASK 0x7FFF0000 -#define IXGBE_FDIRFREE_COLL_SHIFT 16 -#define IXGBE_FDIRLEN_MAXLEN_MASK 0x3F -#define IXGBE_FDIRLEN_MAXLEN_SHIFT 0 -#define IXGBE_FDIRLEN_MAXHASH_MASK 0x7FFF0000 -#define IXGBE_FDIRLEN_MAXHASH_SHIFT 16 -#define IXGBE_FDIRUSTAT_ADD_MASK 0xFFFF -#define IXGBE_FDIRUSTAT_ADD_SHIFT 0 -#define IXGBE_FDIRUSTAT_REMOVE_MASK 0xFFFF0000 -#define IXGBE_FDIRUSTAT_REMOVE_SHIFT 16 -#define IXGBE_FDIRFSTAT_FADD_MASK 0x00FF -#define IXGBE_FDIRFSTAT_FADD_SHIFT 0 -#define IXGBE_FDIRFSTAT_FREMOVE_MASK 0xFF00 -#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT 8 -#define IXGBE_FDIRPORT_DESTINATION_SHIFT 16 -#define IXGBE_FDIRVLAN_FLEX_SHIFT 16 -#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT 15 -#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT 16 - -#define IXGBE_FDIRCMD_CMD_MASK 0x00000003 -#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001 -#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002 -#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003 -#define IXGBE_FDIRCMD_FILTER_VALID 0x00000004 -#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008 -#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010 -#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020 -#define IXGBE_FDIRCMD_L4TYPE_TCP 0x00000040 -#define IXGBE_FDIRCMD_L4TYPE_SCTP 0x00000060 -#define IXGBE_FDIRCMD_IPV6 0x00000080 -#define IXGBE_FDIRCMD_CLEARHT 0x00000100 -#define IXGBE_FDIRCMD_DROP 0x00000200 -#define IXGBE_FDIRCMD_INT 0x00000400 -#define IXGBE_FDIRCMD_LAST 0x00000800 -#define IXGBE_FDIRCMD_COLLISION 0x00001000 -#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000 -#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5 -#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16 -#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24 -#define IXGBE_FDIR_INIT_DONE_POLL 10 -#define IXGBE_FDIRCMD_CMD_POLL 10 - -#define IXGBE_FDIR_DROP_QUEUE 127 - -/* Manageablility Host Interface defines */ -#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ -#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ -#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */ - -/* CEM Support */ -#define FW_CEM_HDR_LEN 0x4 -#define FW_CEM_CMD_DRIVER_INFO 0xDD -#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5 -#define FW_CEM_CMD_RESERVED 0x0 -#define FW_CEM_UNUSED_VER 0x0 -#define FW_CEM_MAX_RETRIES 3 -#define FW_CEM_RESP_STATUS_SUCCESS 0x1 - -/* Host Interface Command Structures */ -struct ixgbe_hic_hdr { - u8 cmd; - u8 buf_len; - union { - u8 cmd_resv; - u8 ret_status; - } cmd_or_resp; - u8 checksum; -}; - -struct ixgbe_hic_drv_info { - struct ixgbe_hic_hdr hdr; - u8 port_num; - u8 ver_sub; - u8 ver_build; - u8 ver_min; - u8 ver_maj; - u8 pad; /* end spacing to ensure length is mult. of dword */ - u16 pad2; /* end spacing to ensure length is mult. of dword2 */ -}; - -/* Transmit Descriptor - Advanced */ -union ixgbe_adv_tx_desc { - struct { - __le64 buffer_addr; /* Address of descriptor's data buf */ - __le32 cmd_type_len; - __le32 olinfo_status; - } read; - struct { - __le64 rsvd; /* Reserved */ - __le32 nxtseq_seed; - __le32 status; - } wb; -}; - -/* Receive Descriptor - Advanced */ -union ixgbe_adv_rx_desc { - struct { - __le64 pkt_addr; /* Packet buffer address */ - __le64 hdr_addr; /* Header buffer address */ - } read; - struct { - struct { - union { - __le32 data; - struct { - __le16 pkt_info; /* RSS, Pkt type */ - __le16 hdr_info; /* Splithdr, hdrlen */ - } hs_rss; - } lo_dword; - union { - __le32 rss; /* RSS Hash */ - struct { - __le16 ip_id; /* IP id */ - __le16 csum; /* Packet Checksum */ - } csum_ip; - } hi_dword; - } lower; - struct { - __le32 status_error; /* ext status/error */ - __le16 length; /* Packet length */ - __le16 vlan; /* VLAN tag */ - } upper; - } wb; /* writeback */ -}; - -/* Context descriptors */ -struct ixgbe_adv_tx_context_desc { - __le32 vlan_macip_lens; - __le32 seqnum_seed; - __le32 type_tucmd_mlhl; - __le32 mss_l4len_idx; -}; - -/* Adv Transmit Descriptor Config Masks */ -#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */ -#define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */ -#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */ -#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */ -#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ -#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */ -#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ -#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ -#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ -#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ -#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ -#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ -#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ -#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ -#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ -#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */ -#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */ -#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ -#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ -#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ -#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ - IXGBE_ADVTXD_POPTS_SHIFT) -#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ - IXGBE_ADVTXD_POPTS_SHIFT) -#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ -#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ -#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ -#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU */ -#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */ -#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ -#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ -#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ -#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ -#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ -#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ -#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ -#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ -#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /*Req requires Markers and CRC*/ -#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ -#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ -#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */ -#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */ -#define IXGBE_ADVTXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */ -#define IXGBE_ADVTXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */ -#define IXGBE_ADVTXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */ -#define IXGBE_ADVTXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation: End */ -#define IXGBE_ADVTXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation: Start */ -#define IXGBE_ADVTXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */ -#define IXGBE_ADVTXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */ -#define IXGBE_ADVTXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */ -#define IXGBE_ADVTXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */ -#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ -#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ - -/* Autonegotiation advertised speeds */ -typedef u32 ixgbe_autoneg_advertised; -/* Link speed */ -typedef u32 ixgbe_link_speed; -#define IXGBE_LINK_SPEED_UNKNOWN 0 -#define IXGBE_LINK_SPEED_100_FULL 0x0008 -#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 -#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 -#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \ - IXGBE_LINK_SPEED_10GB_FULL) -#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \ - IXGBE_LINK_SPEED_1GB_FULL | \ - IXGBE_LINK_SPEED_10GB_FULL) - - -/* Physical layer type */ -typedef u32 ixgbe_physical_layer; -#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0 -#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001 -#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002 -#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x0004 -#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008 -#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010 -#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020 -#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x0040 -#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x0080 -#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x0100 -#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200 -#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400 -#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800 -#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000 -#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000 - -/* Flow Control Macros */ -#define PAUSE_RTT 8 -#define PAUSE_MTU(MTU) ((MTU + 1024 - 1) / 1024) - -#define FC_HIGH_WATER(MTU) ((((PAUSE_RTT + PAUSE_MTU(MTU)) * 144) + 99) / 100 +\ - PAUSE_MTU(MTU)) -#define FC_LOW_WATER(MTU) (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT)) - -/* Software ATR hash keys */ -#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 -#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 - -/* Software ATR input stream values and masks */ -#define IXGBE_ATR_HASH_MASK 0x7fff -#define IXGBE_ATR_L4TYPE_MASK 0x3 -#define IXGBE_ATR_L4TYPE_UDP 0x1 -#define IXGBE_ATR_L4TYPE_TCP 0x2 -#define IXGBE_ATR_L4TYPE_SCTP 0x3 -#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4 -enum ixgbe_atr_flow_type { - IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0, - IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1, - IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2, - IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3, - IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4, - IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5, - IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6, - IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7, -}; - -/* Flow Director ATR input struct. */ -union ixgbe_atr_input { - /* - * Byte layout in order, all values with MSB first: - * - * vm_pool - 1 byte - * flow_type - 1 byte - * vlan_id - 2 bytes - * src_ip - 16 bytes - * dst_ip - 16 bytes - * src_port - 2 bytes - * dst_port - 2 bytes - * flex_bytes - 2 bytes - * bkt_hash - 2 bytes - */ - struct { - u8 vm_pool; - u8 flow_type; - __be16 vlan_id; - __be32 dst_ip[4]; - __be32 src_ip[4]; - __be16 src_port; - __be16 dst_port; - __be16 flex_bytes; - __be16 bkt_hash; - } formatted; - __be32 dword_stream[11]; -}; - -/* Flow Director compressed ATR hash input struct */ -union ixgbe_atr_hash_dword { - struct { - u8 vm_pool; - u8 flow_type; - __be16 vlan_id; - } formatted; - __be32 ip; - struct { - __be16 src; - __be16 dst; - } port; - __be16 flex_bytes; - __be32 dword; -}; - -enum ixgbe_eeprom_type { - ixgbe_eeprom_uninitialized = 0, - ixgbe_eeprom_spi, - ixgbe_flash, - ixgbe_eeprom_none /* No NVM support */ -}; - -enum ixgbe_mac_type { - ixgbe_mac_unknown = 0, - ixgbe_mac_82598EB, - ixgbe_mac_82599EB, - ixgbe_mac_X540, - ixgbe_num_macs -}; - -enum ixgbe_phy_type { - ixgbe_phy_unknown = 0, - ixgbe_phy_none, - ixgbe_phy_tn, - ixgbe_phy_aq, - ixgbe_phy_cu_unknown, - ixgbe_phy_qt, - ixgbe_phy_xaui, - ixgbe_phy_nl, - ixgbe_phy_sfp_passive_tyco, - ixgbe_phy_sfp_passive_unknown, - ixgbe_phy_sfp_active_unknown, - ixgbe_phy_sfp_avago, - ixgbe_phy_sfp_ftl, - ixgbe_phy_sfp_ftl_active, - ixgbe_phy_sfp_unknown, - ixgbe_phy_sfp_intel, - ixgbe_phy_sfp_unsupported, - ixgbe_phy_generic -}; - -/* - * SFP+ module type IDs: - * - * ID Module Type - * ============= - * 0 SFP_DA_CU - * 1 SFP_SR - * 2 SFP_LR - * 3 SFP_DA_CU_CORE0 - 82599-specific - * 4 SFP_DA_CU_CORE1 - 82599-specific - * 5 SFP_SR/LR_CORE0 - 82599-specific - * 6 SFP_SR/LR_CORE1 - 82599-specific - */ -enum ixgbe_sfp_type { - ixgbe_sfp_type_da_cu = 0, - ixgbe_sfp_type_sr = 1, - ixgbe_sfp_type_lr = 2, - ixgbe_sfp_type_da_cu_core0 = 3, - ixgbe_sfp_type_da_cu_core1 = 4, - ixgbe_sfp_type_srlr_core0 = 5, - ixgbe_sfp_type_srlr_core1 = 6, - ixgbe_sfp_type_da_act_lmt_core0 = 7, - ixgbe_sfp_type_da_act_lmt_core1 = 8, - ixgbe_sfp_type_1g_cu_core0 = 9, - ixgbe_sfp_type_1g_cu_core1 = 10, - ixgbe_sfp_type_not_present = 0xFFFE, - ixgbe_sfp_type_unknown = 0xFFFF -}; - -enum ixgbe_media_type { - ixgbe_media_type_unknown = 0, - ixgbe_media_type_fiber, - ixgbe_media_type_fiber_lco, - ixgbe_media_type_copper, - ixgbe_media_type_backplane, - ixgbe_media_type_cx4, - ixgbe_media_type_virtual -}; - -/* Flow Control Settings */ -enum ixgbe_fc_mode { - ixgbe_fc_none = 0, - ixgbe_fc_rx_pause, - ixgbe_fc_tx_pause, - ixgbe_fc_full, -#ifdef CONFIG_DCB - ixgbe_fc_pfc, -#endif - ixgbe_fc_default -}; - -/* Smart Speed Settings */ -#define IXGBE_SMARTSPEED_MAX_RETRIES 3 -enum ixgbe_smart_speed { - ixgbe_smart_speed_auto = 0, - ixgbe_smart_speed_on, - ixgbe_smart_speed_off -}; - -/* PCI bus types */ -enum ixgbe_bus_type { - ixgbe_bus_type_unknown = 0, - ixgbe_bus_type_pci, - ixgbe_bus_type_pcix, - ixgbe_bus_type_pci_express, - ixgbe_bus_type_reserved -}; - -/* PCI bus speeds */ -enum ixgbe_bus_speed { - ixgbe_bus_speed_unknown = 0, - ixgbe_bus_speed_33 = 33, - ixgbe_bus_speed_66 = 66, - ixgbe_bus_speed_100 = 100, - ixgbe_bus_speed_120 = 120, - ixgbe_bus_speed_133 = 133, - ixgbe_bus_speed_2500 = 2500, - ixgbe_bus_speed_5000 = 5000, - ixgbe_bus_speed_reserved -}; - -/* PCI bus widths */ -enum ixgbe_bus_width { - ixgbe_bus_width_unknown = 0, - ixgbe_bus_width_pcie_x1 = 1, - ixgbe_bus_width_pcie_x2 = 2, - ixgbe_bus_width_pcie_x4 = 4, - ixgbe_bus_width_pcie_x8 = 8, - ixgbe_bus_width_32 = 32, - ixgbe_bus_width_64 = 64, - ixgbe_bus_width_reserved -}; - -struct ixgbe_addr_filter_info { - u32 num_mc_addrs; - u32 rar_used_count; - u32 mta_in_use; - u32 overflow_promisc; - bool uc_set_promisc; - bool user_set_promisc; -}; - -/* Bus parameters */ -struct ixgbe_bus_info { - enum ixgbe_bus_speed speed; - enum ixgbe_bus_width width; - enum ixgbe_bus_type type; - - u16 func; - u16 lan_id; -}; - -/* Flow control parameters */ -struct ixgbe_fc_info { - u32 high_water; /* Flow Control High-water */ - u32 low_water; /* Flow Control Low-water */ - u16 pause_time; /* Flow Control Pause timer */ - bool send_xon; /* Flow control send XON */ - bool strict_ieee; /* Strict IEEE mode */ - bool disable_fc_autoneg; /* Do not autonegotiate FC */ - bool fc_was_autonegged; /* Is current_mode the result of autonegging? */ - enum ixgbe_fc_mode current_mode; /* FC mode in effect */ - enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */ -}; - -/* Statistics counters collected by the MAC */ -struct ixgbe_hw_stats { - u64 crcerrs; - u64 illerrc; - u64 errbc; - u64 mspdc; - u64 mpctotal; - u64 mpc[8]; - u64 mlfc; - u64 mrfc; - u64 rlec; - u64 lxontxc; - u64 lxonrxc; - u64 lxofftxc; - u64 lxoffrxc; - u64 pxontxc[8]; - u64 pxonrxc[8]; - u64 pxofftxc[8]; - u64 pxoffrxc[8]; - u64 prc64; - u64 prc127; - u64 prc255; - u64 prc511; - u64 prc1023; - u64 prc1522; - u64 gprc; - u64 bprc; - u64 mprc; - u64 gptc; - u64 gorc; - u64 gotc; - u64 rnbc[8]; - u64 ruc; - u64 rfc; - u64 roc; - u64 rjc; - u64 mngprc; - u64 mngpdc; - u64 mngptc; - u64 tor; - u64 tpr; - u64 tpt; - u64 ptc64; - u64 ptc127; - u64 ptc255; - u64 ptc511; - u64 ptc1023; - u64 ptc1522; - u64 mptc; - u64 bptc; - u64 xec; - u64 rqsmr[16]; - u64 tqsmr[8]; - u64 qprc[16]; - u64 qptc[16]; - u64 qbrc[16]; - u64 qbtc[16]; - u64 qprdc[16]; - u64 pxon2offc[8]; - u64 fdirustat_add; - u64 fdirustat_remove; - u64 fdirfstat_fadd; - u64 fdirfstat_fremove; - u64 fdirmatch; - u64 fdirmiss; - u64 fccrc; - u64 fcoerpdc; - u64 fcoeprc; - u64 fcoeptc; - u64 fcoedwrc; - u64 fcoedwtc; - u64 b2ospc; - u64 b2ogprc; - u64 o2bgptc; - u64 o2bspc; -}; - -/* forward declaration */ -struct ixgbe_hw; - -/* iterator type for walking multicast address lists */ -typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, - u32 *vmdq); - -/* Function pointer table */ -struct ixgbe_eeprom_operations { - s32 (*init_params)(struct ixgbe_hw *); - s32 (*read)(struct ixgbe_hw *, u16, u16 *); - s32 (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *); - s32 (*write)(struct ixgbe_hw *, u16, u16); - s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *); - s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); - s32 (*update_checksum)(struct ixgbe_hw *); - u16 (*calc_checksum)(struct ixgbe_hw *); -}; - -struct ixgbe_mac_operations { - s32 (*init_hw)(struct ixgbe_hw *); - s32 (*reset_hw)(struct ixgbe_hw *); - s32 (*start_hw)(struct ixgbe_hw *); - s32 (*clear_hw_cntrs)(struct ixgbe_hw *); - enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); - u32 (*get_supported_physical_layer)(struct ixgbe_hw *); - s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); - s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *); - s32 (*get_device_caps)(struct ixgbe_hw *, u16 *); - s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *); - s32 (*stop_adapter)(struct ixgbe_hw *); - s32 (*get_bus_info)(struct ixgbe_hw *); - void (*set_lan_id)(struct ixgbe_hw *); - s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*); - s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8); - s32 (*setup_sfp)(struct ixgbe_hw *); - s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); - s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16); - void (*release_swfw_sync)(struct ixgbe_hw *, u16); - - /* Link */ - void (*disable_tx_laser)(struct ixgbe_hw *); - void (*enable_tx_laser)(struct ixgbe_hw *); - void (*flap_tx_laser)(struct ixgbe_hw *); - s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); - s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); - s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, - bool *); - - /* Packet Buffer Manipulation */ - void (*set_rxpba)(struct ixgbe_hw *, int, u32, int); - - /* LED */ - s32 (*led_on)(struct ixgbe_hw *, u32); - s32 (*led_off)(struct ixgbe_hw *, u32); - s32 (*blink_led_start)(struct ixgbe_hw *, u32); - s32 (*blink_led_stop)(struct ixgbe_hw *, u32); - - /* RAR, Multicast, VLAN */ - s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32); - s32 (*clear_rar)(struct ixgbe_hw *, u32); - s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); - s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); - s32 (*init_rx_addrs)(struct ixgbe_hw *); - s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); - s32 (*enable_mc)(struct ixgbe_hw *); - s32 (*disable_mc)(struct ixgbe_hw *); - s32 (*clear_vfta)(struct ixgbe_hw *); - s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); - s32 (*init_uta_tables)(struct ixgbe_hw *); - void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int); - void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int); - - /* Flow Control */ - s32 (*fc_enable)(struct ixgbe_hw *, s32); - - /* Manageability interface */ - s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); -}; - -struct ixgbe_phy_operations { - s32 (*identify)(struct ixgbe_hw *); - s32 (*identify_sfp)(struct ixgbe_hw *); - s32 (*init)(struct ixgbe_hw *); - s32 (*reset)(struct ixgbe_hw *); - s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *); - s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16); - s32 (*setup_link)(struct ixgbe_hw *); - s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool, - bool); - s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); - s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *); - s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *); - s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8); - s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); - s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); - s32 (*check_overtemp)(struct ixgbe_hw *); -}; - -struct ixgbe_eeprom_info { - struct ixgbe_eeprom_operations ops; - enum ixgbe_eeprom_type type; - u32 semaphore_delay; - u16 word_size; - u16 address_bits; - u16 word_page_size; -}; - -#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 -struct ixgbe_mac_info { - struct ixgbe_mac_operations ops; - enum ixgbe_mac_type type; - u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; - u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; - u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; - /* prefix for World Wide Node Name (WWNN) */ - u16 wwnn_prefix; - /* prefix for World Wide Port Name (WWPN) */ - u16 wwpn_prefix; -#define IXGBE_MAX_MTA 128 - u32 mta_shadow[IXGBE_MAX_MTA]; - s32 mc_filter_type; - u32 mcft_size; - u32 vft_size; - u32 num_rar_entries; - u32 rar_highwater; - u32 rx_pb_size; - u32 max_tx_queues; - u32 max_rx_queues; - u32 max_msix_vectors; - u32 orig_autoc; - u32 orig_autoc2; - bool orig_link_settings_stored; - bool autotry_restart; - u8 flags; -}; - -struct ixgbe_phy_info { - struct ixgbe_phy_operations ops; - struct mdio_if_info mdio; - enum ixgbe_phy_type type; - u32 id; - enum ixgbe_sfp_type sfp_type; - bool sfp_setup_needed; - u32 revision; - enum ixgbe_media_type media_type; - bool reset_disable; - ixgbe_autoneg_advertised autoneg_advertised; - enum ixgbe_smart_speed smart_speed; - bool smart_speed_active; - bool multispeed_fiber; - bool reset_if_overtemp; -}; - -#include "ixgbe_mbx.h" - -struct ixgbe_mbx_operations { - s32 (*init_params)(struct ixgbe_hw *hw); - s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16); - s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16); - s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16); - s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16); - s32 (*check_for_msg)(struct ixgbe_hw *, u16); - s32 (*check_for_ack)(struct ixgbe_hw *, u16); - s32 (*check_for_rst)(struct ixgbe_hw *, u16); -}; - -struct ixgbe_mbx_stats { - u32 msgs_tx; - u32 msgs_rx; - - u32 acks; - u32 reqs; - u32 rsts; -}; - -struct ixgbe_mbx_info { - struct ixgbe_mbx_operations ops; - struct ixgbe_mbx_stats stats; - u32 timeout; - u32 usec_delay; - u32 v2p_mailbox; - u16 size; -}; - -struct ixgbe_hw { - u8 __iomem *hw_addr; - void *back; - struct ixgbe_mac_info mac; - struct ixgbe_addr_filter_info addr_ctrl; - struct ixgbe_fc_info fc; - struct ixgbe_phy_info phy; - struct ixgbe_eeprom_info eeprom; - struct ixgbe_bus_info bus; - struct ixgbe_mbx_info mbx; - u16 device_id; - u16 vendor_id; - u16 subsystem_device_id; - u16 subsystem_vendor_id; - u8 revision_id; - bool adapter_stopped; - bool force_full_reset; -}; - -struct ixgbe_info { - enum ixgbe_mac_type mac; - s32 (*get_invariants)(struct ixgbe_hw *); - struct ixgbe_mac_operations *mac_ops; - struct ixgbe_eeprom_operations *eeprom_ops; - struct ixgbe_phy_operations *phy_ops; - struct ixgbe_mbx_operations *mbx_ops; -}; - - -/* Error Codes */ -#define IXGBE_ERR_EEPROM -1 -#define IXGBE_ERR_EEPROM_CHECKSUM -2 -#define IXGBE_ERR_PHY -3 -#define IXGBE_ERR_CONFIG -4 -#define IXGBE_ERR_PARAM -5 -#define IXGBE_ERR_MAC_TYPE -6 -#define IXGBE_ERR_UNKNOWN_PHY -7 -#define IXGBE_ERR_LINK_SETUP -8 -#define IXGBE_ERR_ADAPTER_STOPPED -9 -#define IXGBE_ERR_INVALID_MAC_ADDR -10 -#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11 -#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12 -#define IXGBE_ERR_INVALID_LINK_SETTINGS -13 -#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14 -#define IXGBE_ERR_RESET_FAILED -15 -#define IXGBE_ERR_SWFW_SYNC -16 -#define IXGBE_ERR_PHY_ADDR_INVALID -17 -#define IXGBE_ERR_I2C -18 -#define IXGBE_ERR_SFP_NOT_SUPPORTED -19 -#define IXGBE_ERR_SFP_NOT_PRESENT -20 -#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21 -#define IXGBE_ERR_NO_SAN_ADDR_PTR -22 -#define IXGBE_ERR_FDIR_REINIT_FAILED -23 -#define IXGBE_ERR_EEPROM_VERSION -24 -#define IXGBE_ERR_NO_SPACE -25 -#define IXGBE_ERR_OVERTEMP -26 -#define IXGBE_ERR_FC_NOT_NEGOTIATED -27 -#define IXGBE_ERR_FC_NOT_SUPPORTED -28 -#define IXGBE_ERR_FLOW_CONTROL -29 -#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30 -#define IXGBE_ERR_PBA_SECTION -31 -#define IXGBE_ERR_INVALID_ARGUMENT -32 -#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33 -#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF - -#endif /* _IXGBE_TYPE_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c deleted file mode 100644 index 2696c78..0000000 --- a/drivers/net/ixgbe/ixgbe_x540.c +++ /dev/null @@ -1,941 +0,0 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include -#include -#include - -#include "ixgbe.h" -#include "ixgbe_phy.h" - -#define IXGBE_X540_MAX_TX_QUEUES 128 -#define IXGBE_X540_MAX_RX_QUEUES 128 -#define IXGBE_X540_RAR_ENTRIES 128 -#define IXGBE_X540_MC_TBL_SIZE 128 -#define IXGBE_X540_VFT_TBL_SIZE 128 -#define IXGBE_X540_RX_PB_SIZE 384 - -static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw); -static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); -static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask); -static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask); -static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw); -static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw); - -static enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) -{ - return ixgbe_media_type_copper; -} - -static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) -{ - struct ixgbe_mac_info *mac = &hw->mac; - - /* Call PHY identify routine to get the phy type */ - ixgbe_identify_phy_generic(hw); - - mac->mcft_size = IXGBE_X540_MC_TBL_SIZE; - mac->vft_size = IXGBE_X540_VFT_TBL_SIZE; - mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES; - mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES; - mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES; - mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); - - return 0; -} - -/** - * ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires - * @hw: pointer to hardware structure - * @speed: new link speed - * @autoneg: true if autonegotiation enabled - * @autoneg_wait_to_complete: true when waiting for completion is needed - **/ -static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, - ixgbe_link_speed speed, bool autoneg, - bool autoneg_wait_to_complete) -{ - return hw->phy.ops.setup_link_speed(hw, speed, autoneg, - autoneg_wait_to_complete); -} - -/** - * ixgbe_reset_hw_X540 - Perform hardware reset - * @hw: pointer to hardware structure - * - * Resets the hardware by resetting the transmit and receive units, masks - * and clears all interrupts, perform a PHY reset, and perform a link (MAC) - * reset. - **/ -static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) -{ - ixgbe_link_speed link_speed; - s32 status = 0; - u32 ctrl; - u32 ctrl_ext; - u32 reset_bit; - u32 i; - u32 autoc; - u32 autoc2; - bool link_up = false; - - /* Call adapter stop to disable tx/rx and clear interrupts */ - hw->mac.ops.stop_adapter(hw); - - /* - * Prevent the PCI-E bus from from hanging by disabling PCI-E master - * access and verify no pending requests before reset - */ - ixgbe_disable_pcie_master(hw); - -mac_reset_top: - /* - * Issue global reset to the MAC. Needs to be SW reset if link is up. - * If link reset is used when link is up, it might reset the PHY when - * mng is using it. If link is down or the flag to force full link - * reset is set, then perform link reset. - */ - if (hw->force_full_reset) { - reset_bit = IXGBE_CTRL_LNK_RST; - } else { - hw->mac.ops.check_link(hw, &link_speed, &link_up, false); - if (!link_up) - reset_bit = IXGBE_CTRL_LNK_RST; - else - reset_bit = IXGBE_CTRL_RST; - } - - ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); - IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | reset_bit)); - IXGBE_WRITE_FLUSH(hw); - - /* Poll for reset bit to self-clear indicating reset is complete */ - for (i = 0; i < 10; i++) { - udelay(1); - ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); - if (!(ctrl & reset_bit)) - break; - } - if (ctrl & reset_bit) { - status = IXGBE_ERR_RESET_FAILED; - hw_dbg(hw, "Reset polling failed to complete.\n"); - } - - /* - * Double resets are required for recovery from certain error - * conditions. Between resets, it is necessary to stall to allow time - * for any pending HW events to complete. We use 1usec since that is - * what is needed for ixgbe_disable_pcie_master(). The second reset - * then clears out any effects of those events. - */ - if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { - hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; - udelay(1); - goto mac_reset_top; - } - - /* Clear PF Reset Done bit so PF/VF Mail Ops can work */ - ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); - ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; - IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); - IXGBE_WRITE_FLUSH(hw); - - msleep(50); - - /* Set the Rx packet buffer size. */ - IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT); - - /* Store the permanent mac address */ - hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); - - /* - * Store the original AUTOC/AUTOC2 values if they have not been - * stored off yet. Otherwise restore the stored original - * values since the reset operation sets back to defaults. - */ - autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); - autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); - if (hw->mac.orig_link_settings_stored == false) { - hw->mac.orig_autoc = autoc; - hw->mac.orig_autoc2 = autoc2; - hw->mac.orig_link_settings_stored = true; - } else { - if (autoc != hw->mac.orig_autoc) - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc | - IXGBE_AUTOC_AN_RESTART)); - - if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != - (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { - autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; - autoc2 |= (hw->mac.orig_autoc2 & - IXGBE_AUTOC2_UPPER_MASK); - IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); - } - } - - /* - * Store MAC address from RAR0, clear receive address registers, and - * clear the multicast table. Also reset num_rar_entries to 128, - * since we modify this value when programming the SAN MAC address. - */ - hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES; - hw->mac.ops.init_rx_addrs(hw); - - /* Store the permanent mac address */ - hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); - - /* Store the permanent SAN mac address */ - hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); - - /* Add the SAN MAC address to the RAR only if it's a valid address */ - if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { - hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, - hw->mac.san_addr, 0, IXGBE_RAH_AV); - - /* Reserve the last RAR for the SAN MAC address */ - hw->mac.num_rar_entries--; - } - - /* Store the alternative WWNN/WWPN prefix */ - hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, - &hw->mac.wwpn_prefix); - - return status; -} - -/** - * ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx - * @hw: pointer to hardware structure - * - * Starts the hardware using the generic start_hw function - * and the generation start_hw function. - * Then performs revision-specific operations, if any. - **/ -static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) -{ - s32 ret_val = 0; - - ret_val = ixgbe_start_hw_generic(hw); - if (ret_val != 0) - goto out; - - ret_val = ixgbe_start_hw_gen2(hw); - hw->mac.rx_pb_size = IXGBE_X540_RX_PB_SIZE; -out: - return ret_val; -} - -/** - * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type - * @hw: pointer to hardware structure - * - * Determines physical layer capabilities of the current configuration. - **/ -static u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw) -{ - u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; - u16 ext_ability = 0; - - hw->phy.ops.identify(hw); - - hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, - &ext_ability); - if (ext_ability & MDIO_PMA_EXTABLE_10GBT) - physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; - if (ext_ability & MDIO_PMA_EXTABLE_1000BT) - physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; - if (ext_ability & MDIO_PMA_EXTABLE_100BTX) - physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; - - return physical_layer; -} - -/** - * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params - * @hw: pointer to hardware structure - * - * Initializes the EEPROM parameters ixgbe_eeprom_info within the - * ixgbe_hw struct in order to set up EEPROM access. - **/ -static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) -{ - struct ixgbe_eeprom_info *eeprom = &hw->eeprom; - u32 eec; - u16 eeprom_size; - - if (eeprom->type == ixgbe_eeprom_uninitialized) { - eeprom->semaphore_delay = 10; - eeprom->type = ixgbe_flash; - - eec = IXGBE_READ_REG(hw, IXGBE_EEC); - eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> - IXGBE_EEC_SIZE_SHIFT); - eeprom->word_size = 1 << (eeprom_size + - IXGBE_EEPROM_WORD_SIZE_SHIFT); - - hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", - eeprom->type, eeprom->word_size); - } - - return 0; -} - -/** - * ixgbe_read_eerd_X540- Read EEPROM word using EERD - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to read - * @data: word read from the EEPROM - * - * Reads a 16 bit word from the EEPROM using the EERD register. - **/ -static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) -{ - s32 status = 0; - - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == - 0) - status = ixgbe_read_eerd_generic(hw, offset, data); - else - status = IXGBE_ERR_SWFW_SYNC; - - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - return status; -} - -/** - * ixgbe_read_eerd_buffer_X540 - Read EEPROM word(s) using EERD - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to read - * @words: number of words - * @data: word(s) read from the EEPROM - * - * Reads a 16 bit word(s) from the EEPROM using the EERD register. - **/ -static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, - u16 offset, u16 words, u16 *data) -{ - s32 status = 0; - - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == - 0) - status = ixgbe_read_eerd_buffer_generic(hw, offset, - words, data); - else - status = IXGBE_ERR_SWFW_SYNC; - - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - return status; -} - -/** - * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to write - * @data: word write to the EEPROM - * - * Write a 16 bit word to the EEPROM using the EEWR register. - **/ -static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) -{ - s32 status = 0; - - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) - status = ixgbe_write_eewr_generic(hw, offset, data); - else - status = IXGBE_ERR_SWFW_SYNC; - - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - return status; -} - -/** - * ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR - * @hw: pointer to hardware structure - * @offset: offset of word in the EEPROM to write - * @words: number of words - * @data: word(s) write to the EEPROM - * - * Write a 16 bit word(s) to the EEPROM using the EEWR register. - **/ -static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, - u16 offset, u16 words, u16 *data) -{ - s32 status = 0; - - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == - 0) - status = ixgbe_write_eewr_buffer_generic(hw, offset, - words, data); - else - status = IXGBE_ERR_SWFW_SYNC; - - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - return status; -} - -/** - * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum - * - * This function does not use synchronization for EERD and EEWR. It can - * be used internally by function which utilize ixgbe_acquire_swfw_sync_X540. - * - * @hw: pointer to hardware structure - **/ -static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) -{ - u16 i; - u16 j; - u16 checksum = 0; - u16 length = 0; - u16 pointer = 0; - u16 word = 0; - - /* - * Do not use hw->eeprom.ops.read because we do not want to take - * the synchronization semaphores here. Instead use - * ixgbe_read_eerd_generic - */ - - /* Include 0x0-0x3F in the checksum */ - for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { - if (ixgbe_read_eerd_generic(hw, i, &word) != 0) { - hw_dbg(hw, "EEPROM read failed\n"); - break; - } - checksum += word; - } - - /* - * Include all data from pointers 0x3, 0x6-0xE. This excludes the - * FW, PHY module, and PCIe Expansion/Option ROM pointers. - */ - for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { - if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) - continue; - - if (ixgbe_read_eerd_generic(hw, i, &pointer) != 0) { - hw_dbg(hw, "EEPROM read failed\n"); - break; - } - - /* Skip pointer section if the pointer is invalid. */ - if (pointer == 0xFFFF || pointer == 0 || - pointer >= hw->eeprom.word_size) - continue; - - if (ixgbe_read_eerd_generic(hw, pointer, &length) != 0) { - hw_dbg(hw, "EEPROM read failed\n"); - break; - } - - /* Skip pointer section if length is invalid. */ - if (length == 0xFFFF || length == 0 || - (pointer + length) >= hw->eeprom.word_size) - continue; - - for (j = pointer+1; j <= pointer+length; j++) { - if (ixgbe_read_eerd_generic(hw, j, &word) != 0) { - hw_dbg(hw, "EEPROM read failed\n"); - break; - } - checksum += word; - } - } - - checksum = (u16)IXGBE_EEPROM_SUM - checksum; - - return checksum; -} - -/** - * ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum - * @hw: pointer to hardware structure - * @checksum_val: calculated checksum - * - * Performs checksum calculation and validates the EEPROM checksum. If the - * caller does not need checksum_val, the value can be NULL. - **/ -static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, - u16 *checksum_val) -{ - s32 status; - u16 checksum; - u16 read_checksum = 0; - - /* - * Read the first word from the EEPROM. If this times out or fails, do - * not continue or we could be in for a very long wait while every - * EEPROM read fails - */ - status = hw->eeprom.ops.read(hw, 0, &checksum); - - if (status != 0) { - hw_dbg(hw, "EEPROM read failed\n"); - goto out; - } - - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { - checksum = hw->eeprom.ops.calc_checksum(hw); - - /* - * Do not use hw->eeprom.ops.read because we do not want to take - * the synchronization semaphores twice here. - */ - ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM, - &read_checksum); - - /* - * Verify read checksum from EEPROM is the same as - * calculated checksum - */ - if (read_checksum != checksum) - status = IXGBE_ERR_EEPROM_CHECKSUM; - - /* If the user cares, return the calculated checksum */ - if (checksum_val) - *checksum_val = checksum; - } else { - status = IXGBE_ERR_SWFW_SYNC; - } - - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); -out: - return status; -} - -/** - * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash - * @hw: pointer to hardware structure - * - * After writing EEPROM to shadow RAM using EEWR register, software calculates - * checksum and updates the EEPROM and instructs the hardware to update - * the flash. - **/ -static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) -{ - s32 status; - u16 checksum; - - /* - * Read the first word from the EEPROM. If this times out or fails, do - * not continue or we could be in for a very long wait while every - * EEPROM read fails - */ - status = hw->eeprom.ops.read(hw, 0, &checksum); - - if (status != 0) - hw_dbg(hw, "EEPROM read failed\n"); - - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { - checksum = hw->eeprom.ops.calc_checksum(hw); - - /* - * Do not use hw->eeprom.ops.write because we do not want to - * take the synchronization semaphores twice here. - */ - status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, - checksum); - - if (status == 0) - status = ixgbe_update_flash_X540(hw); - else - status = IXGBE_ERR_SWFW_SYNC; - } - - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - - return status; -} - -/** - * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device - * @hw: pointer to hardware structure - * - * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy - * EEPROM from shadow RAM to the flash device. - **/ -static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) -{ - u32 flup; - s32 status = IXGBE_ERR_EEPROM; - - status = ixgbe_poll_flash_update_done_X540(hw); - if (status == IXGBE_ERR_EEPROM) { - hw_dbg(hw, "Flash update time out\n"); - goto out; - } - - flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP; - IXGBE_WRITE_REG(hw, IXGBE_EEC, flup); - - status = ixgbe_poll_flash_update_done_X540(hw); - if (status == 0) - hw_dbg(hw, "Flash update complete\n"); - else - hw_dbg(hw, "Flash update time out\n"); - - if (hw->revision_id == 0) { - flup = IXGBE_READ_REG(hw, IXGBE_EEC); - - if (flup & IXGBE_EEC_SEC1VAL) { - flup |= IXGBE_EEC_FLUP; - IXGBE_WRITE_REG(hw, IXGBE_EEC, flup); - } - - status = ixgbe_poll_flash_update_done_X540(hw); - if (status == 0) - hw_dbg(hw, "Flash update complete\n"); - else - hw_dbg(hw, "Flash update time out\n"); - } -out: - return status; -} - -/** - * ixgbe_poll_flash_update_done_X540 - Poll flash update status - * @hw: pointer to hardware structure - * - * Polls the FLUDONE (bit 26) of the EEC Register to determine when the - * flash update is done. - **/ -static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) -{ - u32 i; - u32 reg; - s32 status = IXGBE_ERR_EEPROM; - - for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) { - reg = IXGBE_READ_REG(hw, IXGBE_EEC); - if (reg & IXGBE_EEC_FLUDONE) { - status = 0; - break; - } - udelay(5); - } - return status; -} - -/** - * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore - * @hw: pointer to hardware structure - * @mask: Mask to specify which semaphore to acquire - * - * Acquires the SWFW semaphore thought the SW_FW_SYNC register for - * the specified function (CSR, PHY0, PHY1, NVM, Flash) - **/ -static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) -{ - u32 swfw_sync; - u32 swmask = mask; - u32 fwmask = mask << 5; - u32 hwmask = 0; - u32 timeout = 200; - u32 i; - - if (swmask == IXGBE_GSSR_EEP_SM) - hwmask = IXGBE_GSSR_FLASH_SM; - - for (i = 0; i < timeout; i++) { - /* - * SW NVM semaphore bit is used for access to all - * SW_FW_SYNC bits (not just NVM) - */ - if (ixgbe_get_swfw_sync_semaphore(hw)) - return IXGBE_ERR_SWFW_SYNC; - - swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); - if (!(swfw_sync & (fwmask | swmask | hwmask))) { - swfw_sync |= swmask; - IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); - ixgbe_release_swfw_sync_semaphore(hw); - break; - } else { - /* - * Firmware currently using resource (fwmask), - * hardware currently using resource (hwmask), - * or other software thread currently using - * resource (swmask) - */ - ixgbe_release_swfw_sync_semaphore(hw); - usleep_range(5000, 10000); - } - } - - /* - * If the resource is not released by the FW/HW the SW can assume that - * the FW/HW malfunctions. In that case the SW should sets the - * SW bit(s) of the requested resource(s) while ignoring the - * corresponding FW/HW bits in the SW_FW_SYNC register. - */ - if (i >= timeout) { - swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); - if (swfw_sync & (fwmask | hwmask)) { - if (ixgbe_get_swfw_sync_semaphore(hw)) - return IXGBE_ERR_SWFW_SYNC; - - swfw_sync |= swmask; - IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); - ixgbe_release_swfw_sync_semaphore(hw); - } - } - - usleep_range(5000, 10000); - return 0; -} - -/** - * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore - * @hw: pointer to hardware structure - * @mask: Mask to specify which semaphore to release - * - * Releases the SWFW semaphore through the SW_FW_SYNC register - * for the specified function (CSR, PHY0, PHY1, EVM, Flash) - **/ -static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) -{ - u32 swfw_sync; - u32 swmask = mask; - - ixgbe_get_swfw_sync_semaphore(hw); - - swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); - swfw_sync &= ~swmask; - IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); - - ixgbe_release_swfw_sync_semaphore(hw); - usleep_range(5000, 10000); -} - -/** - * ixgbe_get_nvm_semaphore - Get hardware semaphore - * @hw: pointer to hardware structure - * - * Sets the hardware semaphores so SW/FW can gain control of shared resources - **/ -static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) -{ - s32 status = IXGBE_ERR_EEPROM; - u32 timeout = 2000; - u32 i; - u32 swsm; - - /* Get SMBI software semaphore between device drivers first */ - for (i = 0; i < timeout; i++) { - /* - * If the SMBI bit is 0 when we read it, then the bit will be - * set and we have the semaphore - */ - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); - if (!(swsm & IXGBE_SWSM_SMBI)) { - status = 0; - break; - } - udelay(50); - } - - /* Now get the semaphore between SW/FW through the REGSMP bit */ - if (status) { - for (i = 0; i < timeout; i++) { - swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); - if (!(swsm & IXGBE_SWFW_REGSMP)) - break; - - udelay(50); - } - } else { - hw_dbg(hw, "Software semaphore SMBI between device drivers " - "not granted.\n"); - } - - return status; -} - -/** - * ixgbe_release_nvm_semaphore - Release hardware semaphore - * @hw: pointer to hardware structure - * - * This function clears hardware semaphore bits. - **/ -static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) -{ - u32 swsm; - - /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */ - - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); - swsm &= ~IXGBE_SWSM_SMBI; - IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); - - swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); - swsm &= ~IXGBE_SWFW_REGSMP; - IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm); - - IXGBE_WRITE_FLUSH(hw); -} - -/** - * ixgbe_blink_led_start_X540 - Blink LED based on index. - * @hw: pointer to hardware structure - * @index: led number to blink - * - * Devices that implement the version 2 interface: - * X540 - **/ -static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) -{ - u32 macc_reg; - u32 ledctl_reg; - - /* - * In order for the blink bit in the LED control register - * to work, link and speed must be forced in the MAC. We - * will reverse this when we stop the blinking. - */ - macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); - macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS; - IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); - - /* Set the LED to LINK_UP + BLINK. */ - ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); - ledctl_reg |= IXGBE_LED_BLINK(index); - IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg); - IXGBE_WRITE_FLUSH(hw); - - return 0; -} - -/** - * ixgbe_blink_led_stop_X540 - Stop blinking LED based on index. - * @hw: pointer to hardware structure - * @index: led number to stop blinking - * - * Devices that implement the version 2 interface: - * X540 - **/ -static s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) -{ - u32 macc_reg; - u32 ledctl_reg; - - /* Restore the LED to its default value. */ - ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); - ledctl_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); - ledctl_reg &= ~IXGBE_LED_BLINK(index); - IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg); - - /* Unforce link and speed in the MAC. */ - macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); - macc_reg &= ~(IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS); - IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); - IXGBE_WRITE_FLUSH(hw); - - return 0; -} -static struct ixgbe_mac_operations mac_ops_X540 = { - .init_hw = &ixgbe_init_hw_generic, - .reset_hw = &ixgbe_reset_hw_X540, - .start_hw = &ixgbe_start_hw_X540, - .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, - .get_media_type = &ixgbe_get_media_type_X540, - .get_supported_physical_layer = - &ixgbe_get_supported_physical_layer_X540, - .enable_rx_dma = &ixgbe_enable_rx_dma_generic, - .get_mac_addr = &ixgbe_get_mac_addr_generic, - .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, - .get_device_caps = &ixgbe_get_device_caps_generic, - .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, - .stop_adapter = &ixgbe_stop_adapter_generic, - .get_bus_info = &ixgbe_get_bus_info_generic, - .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, - .read_analog_reg8 = NULL, - .write_analog_reg8 = NULL, - .setup_link = &ixgbe_setup_mac_link_X540, - .set_rxpba = &ixgbe_set_rxpba_generic, - .check_link = &ixgbe_check_mac_link_generic, - .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic, - .led_on = &ixgbe_led_on_generic, - .led_off = &ixgbe_led_off_generic, - .blink_led_start = &ixgbe_blink_led_start_X540, - .blink_led_stop = &ixgbe_blink_led_stop_X540, - .set_rar = &ixgbe_set_rar_generic, - .clear_rar = &ixgbe_clear_rar_generic, - .set_vmdq = &ixgbe_set_vmdq_generic, - .clear_vmdq = &ixgbe_clear_vmdq_generic, - .init_rx_addrs = &ixgbe_init_rx_addrs_generic, - .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, - .enable_mc = &ixgbe_enable_mc_generic, - .disable_mc = &ixgbe_disable_mc_generic, - .clear_vfta = &ixgbe_clear_vfta_generic, - .set_vfta = &ixgbe_set_vfta_generic, - .fc_enable = &ixgbe_fc_enable_generic, - .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, - .init_uta_tables = &ixgbe_init_uta_tables_generic, - .setup_sfp = NULL, - .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, - .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, - .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, - .release_swfw_sync = &ixgbe_release_swfw_sync_X540, -}; - -static struct ixgbe_eeprom_operations eeprom_ops_X540 = { - .init_params = &ixgbe_init_eeprom_params_X540, - .read = &ixgbe_read_eerd_X540, - .read_buffer = &ixgbe_read_eerd_buffer_X540, - .write = &ixgbe_write_eewr_X540, - .write_buffer = &ixgbe_write_eewr_buffer_X540, - .calc_checksum = &ixgbe_calc_eeprom_checksum_X540, - .validate_checksum = &ixgbe_validate_eeprom_checksum_X540, - .update_checksum = &ixgbe_update_eeprom_checksum_X540, -}; - -static struct ixgbe_phy_operations phy_ops_X540 = { - .identify = &ixgbe_identify_phy_generic, - .identify_sfp = &ixgbe_identify_sfp_module_generic, - .init = NULL, - .reset = NULL, - .read_reg = &ixgbe_read_phy_reg_generic, - .write_reg = &ixgbe_write_phy_reg_generic, - .setup_link = &ixgbe_setup_phy_link_generic, - .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, - .read_i2c_byte = &ixgbe_read_i2c_byte_generic, - .write_i2c_byte = &ixgbe_write_i2c_byte_generic, - .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, - .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, - .check_overtemp = &ixgbe_tn_check_overtemp, -}; - -struct ixgbe_info ixgbe_X540_info = { - .mac = ixgbe_mac_X540, - .get_invariants = &ixgbe_get_invariants_X540, - .mac_ops = &mac_ops_X540, - .eeprom_ops = &eeprom_ops_X540, - .phy_ops = &phy_ops_X540, - .mbx_ops = &mbx_ops_generic, -}; diff --git a/drivers/net/ixgbevf/Makefile b/drivers/net/ixgbevf/Makefile deleted file mode 100644 index 1f35d22..0000000 --- a/drivers/net/ixgbevf/Makefile +++ /dev/null @@ -1,38 +0,0 @@ -################################################################################ -# -# Intel 82599 Virtual Function driver -# Copyright(c) 1999 - 2010 Intel Corporation. -# -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -# -# The full GNU General Public License is included in this distribution in -# the file called "COPYING". -# -# Contact Information: -# e1000-devel Mailing List -# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -# -################################################################################ - -# -# Makefile for the Intel(R) 82599 VF ethernet driver -# - -obj-$(CONFIG_IXGBEVF) += ixgbevf.o - -ixgbevf-objs := vf.o \ - mbx.o \ - ethtool.o \ - ixgbevf_main.o - diff --git a/drivers/net/ixgbevf/defines.h b/drivers/net/ixgbevf/defines.h deleted file mode 100644 index 78abb6f..0000000 --- a/drivers/net/ixgbevf/defines.h +++ /dev/null @@ -1,297 +0,0 @@ -/******************************************************************************* - - Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2010 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGBEVF_DEFINES_H_ -#define _IXGBEVF_DEFINES_H_ - -/* Device IDs */ -#define IXGBE_DEV_ID_82599_VF 0x10ED -#define IXGBE_DEV_ID_X540_VF 0x1515 - -#define IXGBE_VF_IRQ_CLEAR_MASK 7 -#define IXGBE_VF_MAX_TX_QUEUES 1 -#define IXGBE_VF_MAX_RX_QUEUES 1 -#define IXGBE_ETH_LENGTH_OF_ADDRESS 6 - -/* Link speed */ -typedef u32 ixgbe_link_speed; -#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 -#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 - -#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ -#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ -#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ -#define IXGBE_LINKS_UP 0x40000000 -#define IXGBE_LINKS_SPEED_82599 0x30000000 -#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 -#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 - -/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ -#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 -#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 -#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024 - -/* Interrupt Vector Allocation Registers */ -#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ - -#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ - -/* Receive Config masks */ -#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ -#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ -#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ -#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ -#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */ -#define IXGBE_RXDCTL_RLPML_EN 0x00008000 - -/* DCA Control */ -#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ - -/* PSRTYPE bit definitions */ -#define IXGBE_PSRTYPE_TCPHDR 0x00000010 -#define IXGBE_PSRTYPE_UDPHDR 0x00000020 -#define IXGBE_PSRTYPE_IPV4HDR 0x00000100 -#define IXGBE_PSRTYPE_IPV6HDR 0x00000200 -#define IXGBE_PSRTYPE_L2HDR 0x00001000 - -/* SRRCTL bit definitions */ -#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ -#define IXGBE_SRRCTL_RDMTS_SHIFT 22 -#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000 -#define IXGBE_SRRCTL_DROP_EN 0x10000000 -#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F -#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 -#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 -#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 -#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 -#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 -#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 -#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 - -/* Receive Descriptor bit definitions */ -#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ -#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ -#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */ -#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ -#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */ -#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004 -#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ -#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ -#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ -#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ -#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ -#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ -#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ -#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ -#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ -#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ -#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ -#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ -#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */ -#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */ -#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */ -#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */ -#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ -#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ -#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ -#define IXGBE_RXDADV_ERR_MASK 0xFFF00000 /* RDESC.ERRORS mask */ -#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ -#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ -#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ -#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ -#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ -#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ -#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ -#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ -#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ -#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ -#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ -#define IXGBE_RXD_PRI_SHIFT 13 -#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ -#define IXGBE_RXD_CFI_SHIFT 12 - -#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */ -#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */ -#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */ -#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */ -#define IXGBE_RXDADV_STAT_MASK 0x000FFFFF /* Stat/NEXTP: bit 0-19 */ -#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */ -#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */ -#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */ -#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ -#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ -#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ - -#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F -#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 -#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 -#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 -#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 -#define IXGBE_RXDADV_RSCCNT_SHIFT 17 -#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 -#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 -#define IXGBE_RXDADV_SPH 0x8000 - -#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ - IXGBE_RXD_ERR_CE | \ - IXGBE_RXD_ERR_LE | \ - IXGBE_RXD_ERR_PE | \ - IXGBE_RXD_ERR_OSE | \ - IXGBE_RXD_ERR_USE) - -#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \ - IXGBE_RXDADV_ERR_CE | \ - IXGBE_RXDADV_ERR_LE | \ - IXGBE_RXDADV_ERR_PE | \ - IXGBE_RXDADV_ERR_OSE | \ - IXGBE_RXDADV_ERR_USE) - -#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ -#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ -#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */ -#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ -#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ -#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ -#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ -#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ -#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ - -/* Transmit Descriptor - Advanced */ -union ixgbe_adv_tx_desc { - struct { - __le64 buffer_addr; /* Address of descriptor's data buf */ - __le32 cmd_type_len; - __le32 olinfo_status; - } read; - struct { - __le64 rsvd; /* Reserved */ - __le32 nxtseq_seed; - __le32 status; - } wb; -}; - -/* Receive Descriptor - Advanced */ -union ixgbe_adv_rx_desc { - struct { - __le64 pkt_addr; /* Packet buffer address */ - __le64 hdr_addr; /* Header buffer address */ - } read; - struct { - struct { - union { - __le32 data; - struct { - __le16 pkt_info; /* RSS, Pkt type */ - __le16 hdr_info; /* Splithdr, hdrlen */ - } hs_rss; - } lo_dword; - union { - __le32 rss; /* RSS Hash */ - struct { - __le16 ip_id; /* IP id */ - __le16 csum; /* Packet Checksum */ - } csum_ip; - } hi_dword; - } lower; - struct { - __le32 status_error; /* ext status/error */ - __le16 length; /* Packet length */ - __le16 vlan; /* VLAN tag */ - } upper; - } wb; /* writeback */ -}; - -/* Context descriptors */ -struct ixgbe_adv_tx_context_desc { - __le32 vlan_macip_lens; - __le32 seqnum_seed; - __le32 type_tucmd_mlhl; - __le32 mss_l4len_idx; -}; - -/* Adv Transmit Descriptor Config Masks */ -#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ -#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */ -#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ -#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ -#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ -#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ -#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ -#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ -#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ -#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ -#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ -#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ -#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ -#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ -#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ -#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ -#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ -#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ - IXGBE_ADVTXD_POPTS_SHIFT) -#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ - IXGBE_ADVTXD_POPTS_SHIFT) -#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ -#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ -#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ -#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ -#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ - -/* Interrupt register bitmasks */ - -/* Extended Interrupt Cause Read */ -#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */ -#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */ -#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ - -/* Extended Interrupt Cause Set */ -#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ -#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ -#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ - -/* Extended Interrupt Mask Set */ -#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ -#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ -#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ - -/* Extended Interrupt Mask Clear */ -#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ -#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ -#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ - -#define IXGBE_EIMS_ENABLE_MASK ( \ - IXGBE_EIMS_RTX_QUEUE | \ - IXGBE_EIMS_MAILBOX | \ - IXGBE_EIMS_OTHER) - -#define IXGBE_EITR_CNT_WDIS 0x80000000 - -/* Error Codes */ -#define IXGBE_ERR_INVALID_MAC_ADDR -1 -#define IXGBE_ERR_RESET_FAILED -2 - -#endif /* _IXGBEVF_DEFINES_H_ */ diff --git a/drivers/net/ixgbevf/ethtool.c b/drivers/net/ixgbevf/ethtool.c deleted file mode 100644 index deee375..0000000 --- a/drivers/net/ixgbevf/ethtool.c +++ /dev/null @@ -1,742 +0,0 @@ -/******************************************************************************* - - Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2009 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -/* ethtool support for ixgbevf */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ixgbevf.h" - -#define IXGBE_ALL_RAR_ENTRIES 16 - -#ifdef ETHTOOL_GSTATS -struct ixgbe_stats { - char stat_string[ETH_GSTRING_LEN]; - int sizeof_stat; - int stat_offset; - int base_stat_offset; - int saved_reset_offset; -}; - -#define IXGBEVF_STAT(m, b, r) sizeof(((struct ixgbevf_adapter *)0)->m), \ - offsetof(struct ixgbevf_adapter, m), \ - offsetof(struct ixgbevf_adapter, b), \ - offsetof(struct ixgbevf_adapter, r) -static struct ixgbe_stats ixgbe_gstrings_stats[] = { - {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc, - stats.saved_reset_vfgprc)}, - {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc, - stats.saved_reset_vfgptc)}, - {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc, - stats.saved_reset_vfgorc)}, - {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc, - stats.saved_reset_vfgotc)}, - {"tx_busy", IXGBEVF_STAT(tx_busy, zero_base, zero_base)}, - {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc, - stats.saved_reset_vfmprc)}, - {"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base, - zero_base)}, - {"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base, - zero_base)}, - {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base, - zero_base)}, - {"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base, zero_base)}, -}; - -#define IXGBE_QUEUE_STATS_LEN 0 -#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) - -#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN) -#endif /* ETHTOOL_GSTATS */ -#ifdef ETHTOOL_TEST -static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { - "Register test (offline)", - "Link test (on/offline)" -}; -#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) -#endif /* ETHTOOL_TEST */ - -static int ixgbevf_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - u32 link_speed = 0; - bool link_up; - - ecmd->supported = SUPPORTED_10000baseT_Full; - ecmd->autoneg = AUTONEG_DISABLE; - ecmd->transceiver = XCVR_DUMMY1; - ecmd->port = -1; - - hw->mac.ops.check_link(hw, &link_speed, &link_up, false); - - if (link_up) { - ethtool_cmd_speed_set( - ecmd, - (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? - SPEED_10000 : SPEED_1000); - ecmd->duplex = DUPLEX_FULL; - } else { - ethtool_cmd_speed_set(ecmd, -1); - ecmd->duplex = -1; - } - - return 0; -} - -static u32 ixgbevf_get_rx_csum(struct net_device *netdev) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - return adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED; -} - -static int ixgbevf_set_rx_csum(struct net_device *netdev, u32 data) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - if (data) - adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; - else - adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED; - - if (netif_running(netdev)) { - if (!adapter->dev_closed) - ixgbevf_reinit_locked(adapter); - } else { - ixgbevf_reset(adapter); - } - - return 0; -} - -static int ixgbevf_set_tso(struct net_device *netdev, u32 data) -{ - if (data) { - netdev->features |= NETIF_F_TSO; - netdev->features |= NETIF_F_TSO6; - } else { - netif_tx_stop_all_queues(netdev); - netdev->features &= ~NETIF_F_TSO; - netdev->features &= ~NETIF_F_TSO6; - netif_tx_start_all_queues(netdev); - } - return 0; -} - -static u32 ixgbevf_get_msglevel(struct net_device *netdev) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - return adapter->msg_enable; -} - -static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - adapter->msg_enable = data; -} - -#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_) - -static char *ixgbevf_reg_names[] = { - "IXGBE_VFCTRL", - "IXGBE_VFSTATUS", - "IXGBE_VFLINKS", - "IXGBE_VFRXMEMWRAP", - "IXGBE_VFFRTIMER", - "IXGBE_VTEICR", - "IXGBE_VTEICS", - "IXGBE_VTEIMS", - "IXGBE_VTEIMC", - "IXGBE_VTEIAC", - "IXGBE_VTEIAM", - "IXGBE_VTEITR", - "IXGBE_VTIVAR", - "IXGBE_VTIVAR_MISC", - "IXGBE_VFRDBAL0", - "IXGBE_VFRDBAL1", - "IXGBE_VFRDBAH0", - "IXGBE_VFRDBAH1", - "IXGBE_VFRDLEN0", - "IXGBE_VFRDLEN1", - "IXGBE_VFRDH0", - "IXGBE_VFRDH1", - "IXGBE_VFRDT0", - "IXGBE_VFRDT1", - "IXGBE_VFRXDCTL0", - "IXGBE_VFRXDCTL1", - "IXGBE_VFSRRCTL0", - "IXGBE_VFSRRCTL1", - "IXGBE_VFPSRTYPE", - "IXGBE_VFTDBAL0", - "IXGBE_VFTDBAL1", - "IXGBE_VFTDBAH0", - "IXGBE_VFTDBAH1", - "IXGBE_VFTDLEN0", - "IXGBE_VFTDLEN1", - "IXGBE_VFTDH0", - "IXGBE_VFTDH1", - "IXGBE_VFTDT0", - "IXGBE_VFTDT1", - "IXGBE_VFTXDCTL0", - "IXGBE_VFTXDCTL1", - "IXGBE_VFTDWBAL0", - "IXGBE_VFTDWBAL1", - "IXGBE_VFTDWBAH0", - "IXGBE_VFTDWBAH1" -}; - - -static int ixgbevf_get_regs_len(struct net_device *netdev) -{ - return (ARRAY_SIZE(ixgbevf_reg_names)) * sizeof(u32); -} - -static void ixgbevf_get_regs(struct net_device *netdev, - struct ethtool_regs *regs, - void *p) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - u32 *regs_buff = p; - u32 regs_len = ixgbevf_get_regs_len(netdev); - u8 i; - - memset(p, 0, regs_len); - - regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id; - - /* General Registers */ - regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL); - regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS); - regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS); - regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP); - regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER); - - /* Interrupt */ - /* don't read EICR because it can clear interrupt causes, instead - * read EICS which is a shadow but doesn't clear EICR */ - regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS); - regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS); - regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS); - regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC); - regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC); - regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM); - regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0)); - regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0)); - regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); - - /* Receive DMA */ - for (i = 0; i < 2; i++) - regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i)); - for (i = 0; i < 2; i++) - regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i)); - for (i = 0; i < 2; i++) - regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i)); - for (i = 0; i < 2; i++) - regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i)); - for (i = 0; i < 2; i++) - regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i)); - for (i = 0; i < 2; i++) - regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); - for (i = 0; i < 2; i++) - regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i)); - - /* Receive */ - regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE); - - /* Transmit */ - for (i = 0; i < 2; i++) - regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i)); - for (i = 0; i < 2; i++) - regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i)); - for (i = 0; i < 2; i++) - regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i)); - for (i = 0; i < 2; i++) - regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i)); - for (i = 0; i < 2; i++) - regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i)); - for (i = 0; i < 2; i++) - regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); - for (i = 0; i < 2; i++) - regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i)); - for (i = 0; i < 2; i++) - regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i)); - - for (i = 0; i < ARRAY_SIZE(ixgbevf_reg_names); i++) - hw_dbg(hw, "%s\t%8.8x\n", ixgbevf_reg_names[i], regs_buff[i]); -} - -static void ixgbevf_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - - strlcpy(drvinfo->driver, ixgbevf_driver_name, 32); - strlcpy(drvinfo->version, ixgbevf_driver_version, 32); - - strlcpy(drvinfo->fw_version, "N/A", 4); - strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); -} - -static void ixgbevf_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - struct ixgbevf_ring *tx_ring = adapter->tx_ring; - struct ixgbevf_ring *rx_ring = adapter->rx_ring; - - ring->rx_max_pending = IXGBEVF_MAX_RXD; - ring->tx_max_pending = IXGBEVF_MAX_TXD; - ring->rx_mini_max_pending = 0; - ring->rx_jumbo_max_pending = 0; - ring->rx_pending = rx_ring->count; - ring->tx_pending = tx_ring->count; - ring->rx_mini_pending = 0; - ring->rx_jumbo_pending = 0; -} - -static int ixgbevf_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL; - int i, err = 0; - u32 new_rx_count, new_tx_count; - - if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) - return -EINVAL; - - new_rx_count = max(ring->rx_pending, (u32)IXGBEVF_MIN_RXD); - new_rx_count = min(new_rx_count, (u32)IXGBEVF_MAX_RXD); - new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); - - new_tx_count = max(ring->tx_pending, (u32)IXGBEVF_MIN_TXD); - new_tx_count = min(new_tx_count, (u32)IXGBEVF_MAX_TXD); - new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); - - if ((new_tx_count == adapter->tx_ring->count) && - (new_rx_count == adapter->rx_ring->count)) { - /* nothing to do */ - return 0; - } - - while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) - msleep(1); - - /* - * If the adapter isn't up and running then just set the - * new parameters and scurry for the exits. - */ - if (!netif_running(adapter->netdev)) { - for (i = 0; i < adapter->num_tx_queues; i++) - adapter->tx_ring[i].count = new_tx_count; - for (i = 0; i < adapter->num_rx_queues; i++) - adapter->rx_ring[i].count = new_rx_count; - adapter->tx_ring_count = new_tx_count; - adapter->rx_ring_count = new_rx_count; - goto clear_reset; - } - - tx_ring = kcalloc(adapter->num_tx_queues, - sizeof(struct ixgbevf_ring), GFP_KERNEL); - if (!tx_ring) { - err = -ENOMEM; - goto clear_reset; - } - - rx_ring = kcalloc(adapter->num_rx_queues, - sizeof(struct ixgbevf_ring), GFP_KERNEL); - if (!rx_ring) { - err = -ENOMEM; - goto err_rx_setup; - } - - ixgbevf_down(adapter); - - memcpy(tx_ring, adapter->tx_ring, - adapter->num_tx_queues * sizeof(struct ixgbevf_ring)); - for (i = 0; i < adapter->num_tx_queues; i++) { - tx_ring[i].count = new_tx_count; - err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]); - if (err) { - while (i) { - i--; - ixgbevf_free_tx_resources(adapter, - &tx_ring[i]); - } - goto err_tx_ring_setup; - } - tx_ring[i].v_idx = adapter->tx_ring[i].v_idx; - } - - memcpy(rx_ring, adapter->rx_ring, - adapter->num_rx_queues * sizeof(struct ixgbevf_ring)); - for (i = 0; i < adapter->num_rx_queues; i++) { - rx_ring[i].count = new_rx_count; - err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]); - if (err) { - while (i) { - i--; - ixgbevf_free_rx_resources(adapter, - &rx_ring[i]); - } - goto err_rx_ring_setup; - } - rx_ring[i].v_idx = adapter->rx_ring[i].v_idx; - } - - /* - * Only switch to new rings if all the prior allocations - * and ring setups have succeeded. - */ - kfree(adapter->tx_ring); - adapter->tx_ring = tx_ring; - adapter->tx_ring_count = new_tx_count; - - kfree(adapter->rx_ring); - adapter->rx_ring = rx_ring; - adapter->rx_ring_count = new_rx_count; - - /* success! */ - ixgbevf_up(adapter); - - goto clear_reset; - -err_rx_ring_setup: - for(i = 0; i < adapter->num_tx_queues; i++) - ixgbevf_free_tx_resources(adapter, &tx_ring[i]); - -err_tx_ring_setup: - kfree(rx_ring); - -err_rx_setup: - kfree(tx_ring); - -clear_reset: - clear_bit(__IXGBEVF_RESETTING, &adapter->state); - return err; -} - -static int ixgbevf_get_sset_count(struct net_device *dev, int stringset) -{ - switch (stringset) { - case ETH_SS_TEST: - return IXGBE_TEST_LEN; - case ETH_SS_STATS: - return IXGBE_GLOBAL_STATS_LEN; - default: - return -EINVAL; - } -} - -static void ixgbevf_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, u64 *data) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - int i; - - ixgbevf_update_stats(adapter); - for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { - char *p = (char *)adapter + - ixgbe_gstrings_stats[i].stat_offset; - char *b = (char *)adapter + - ixgbe_gstrings_stats[i].base_stat_offset; - char *r = (char *)adapter + - ixgbe_gstrings_stats[i].saved_reset_offset; - data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p) - - ((ixgbe_gstrings_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)b : *(u32 *)b) + - ((ixgbe_gstrings_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)r : *(u32 *)r); - } -} - -static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset, - u8 *data) -{ - char *p = (char *)data; - int i; - - switch (stringset) { - case ETH_SS_TEST: - memcpy(data, *ixgbe_gstrings_test, - IXGBE_TEST_LEN * ETH_GSTRING_LEN); - break; - case ETH_SS_STATS: - for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { - memcpy(p, ixgbe_gstrings_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - break; - } -} - -static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data) -{ - struct ixgbe_hw *hw = &adapter->hw; - bool link_up; - u32 link_speed = 0; - *data = 0; - - hw->mac.ops.check_link(hw, &link_speed, &link_up, true); - if (!link_up) - *data = 1; - - return *data; -} - -/* ethtool register test data */ -struct ixgbevf_reg_test { - u16 reg; - u8 array_len; - u8 test_type; - u32 mask; - u32 write; -}; - -/* In the hardware, registers are laid out either singly, in arrays - * spaced 0x40 bytes apart, or in contiguous tables. We assume - * most tests take place on arrays or single registers (handled - * as a single-element array) and special-case the tables. - * Table tests are always pattern tests. - * - * We also make provision for some required setup steps by specifying - * registers to be written without any read-back testing. - */ - -#define PATTERN_TEST 1 -#define SET_READ_TEST 2 -#define WRITE_NO_TEST 3 -#define TABLE32_TEST 4 -#define TABLE64_TEST_LO 5 -#define TABLE64_TEST_HI 6 - -/* default VF register test */ -static const struct ixgbevf_reg_test reg_test_vf[] = { - { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, - { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, - { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, - { IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, - { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 }, - { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, - { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, - { 0, 0, 0, 0 } -}; - -static const u32 register_test_patterns[] = { - 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF -}; - -#define REG_PATTERN_TEST(R, M, W) \ -{ \ - u32 pat, val, before; \ - for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { \ - before = readl(adapter->hw.hw_addr + R); \ - writel((register_test_patterns[pat] & W), \ - (adapter->hw.hw_addr + R)); \ - val = readl(adapter->hw.hw_addr + R); \ - if (val != (register_test_patterns[pat] & W & M)) { \ - hw_dbg(&adapter->hw, \ - "pattern test reg %04X failed: got " \ - "0x%08X expected 0x%08X\n", \ - R, val, (register_test_patterns[pat] & W & M)); \ - *data = R; \ - writel(before, adapter->hw.hw_addr + R); \ - return 1; \ - } \ - writel(before, adapter->hw.hw_addr + R); \ - } \ -} - -#define REG_SET_AND_CHECK(R, M, W) \ -{ \ - u32 val, before; \ - before = readl(adapter->hw.hw_addr + R); \ - writel((W & M), (adapter->hw.hw_addr + R)); \ - val = readl(adapter->hw.hw_addr + R); \ - if ((W & M) != (val & M)) { \ - printk(KERN_ERR "set/check reg %04X test failed: got 0x%08X " \ - "expected 0x%08X\n", R, (val & M), (W & M)); \ - *data = R; \ - writel(before, (adapter->hw.hw_addr + R)); \ - return 1; \ - } \ - writel(before, (adapter->hw.hw_addr + R)); \ -} - -static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data) -{ - const struct ixgbevf_reg_test *test; - u32 i; - - test = reg_test_vf; - - /* - * Perform the register test, looping through the test table - * until we either fail or reach the null entry. - */ - while (test->reg) { - for (i = 0; i < test->array_len; i++) { - switch (test->test_type) { - case PATTERN_TEST: - REG_PATTERN_TEST(test->reg + (i * 0x40), - test->mask, - test->write); - break; - case SET_READ_TEST: - REG_SET_AND_CHECK(test->reg + (i * 0x40), - test->mask, - test->write); - break; - case WRITE_NO_TEST: - writel(test->write, - (adapter->hw.hw_addr + test->reg) - + (i * 0x40)); - break; - case TABLE32_TEST: - REG_PATTERN_TEST(test->reg + (i * 4), - test->mask, - test->write); - break; - case TABLE64_TEST_LO: - REG_PATTERN_TEST(test->reg + (i * 8), - test->mask, - test->write); - break; - case TABLE64_TEST_HI: - REG_PATTERN_TEST((test->reg + 4) + (i * 8), - test->mask, - test->write); - break; - } - } - test++; - } - - *data = 0; - return *data; -} - -static void ixgbevf_diag_test(struct net_device *netdev, - struct ethtool_test *eth_test, u64 *data) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - bool if_running = netif_running(netdev); - - set_bit(__IXGBEVF_TESTING, &adapter->state); - if (eth_test->flags == ETH_TEST_FL_OFFLINE) { - /* Offline tests */ - - hw_dbg(&adapter->hw, "offline testing starting\n"); - - /* Link test performed before hardware reset so autoneg doesn't - * interfere with test result */ - if (ixgbevf_link_test(adapter, &data[1])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - if (if_running) - /* indicate we're in test mode */ - dev_close(netdev); - else - ixgbevf_reset(adapter); - - hw_dbg(&adapter->hw, "register testing starting\n"); - if (ixgbevf_reg_test(adapter, &data[0])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - ixgbevf_reset(adapter); - - clear_bit(__IXGBEVF_TESTING, &adapter->state); - if (if_running) - dev_open(netdev); - } else { - hw_dbg(&adapter->hw, "online testing starting\n"); - /* Online tests */ - if (ixgbevf_link_test(adapter, &data[1])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - /* Online tests aren't run; pass by default */ - data[0] = 0; - - clear_bit(__IXGBEVF_TESTING, &adapter->state); - } - msleep_interruptible(4 * 1000); -} - -static int ixgbevf_nway_reset(struct net_device *netdev) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - - if (netif_running(netdev)) { - if (!adapter->dev_closed) - ixgbevf_reinit_locked(adapter); - } - - return 0; -} - -static struct ethtool_ops ixgbevf_ethtool_ops = { - .get_settings = ixgbevf_get_settings, - .get_drvinfo = ixgbevf_get_drvinfo, - .get_regs_len = ixgbevf_get_regs_len, - .get_regs = ixgbevf_get_regs, - .nway_reset = ixgbevf_nway_reset, - .get_link = ethtool_op_get_link, - .get_ringparam = ixgbevf_get_ringparam, - .set_ringparam = ixgbevf_set_ringparam, - .get_rx_csum = ixgbevf_get_rx_csum, - .set_rx_csum = ixgbevf_set_rx_csum, - .get_tx_csum = ethtool_op_get_tx_csum, - .set_tx_csum = ethtool_op_set_tx_ipv6_csum, - .get_sg = ethtool_op_get_sg, - .set_sg = ethtool_op_set_sg, - .get_msglevel = ixgbevf_get_msglevel, - .set_msglevel = ixgbevf_set_msglevel, - .get_tso = ethtool_op_get_tso, - .set_tso = ixgbevf_set_tso, - .self_test = ixgbevf_diag_test, - .get_sset_count = ixgbevf_get_sset_count, - .get_strings = ixgbevf_get_strings, - .get_ethtool_stats = ixgbevf_get_ethtool_stats, -}; - -void ixgbevf_set_ethtool_ops(struct net_device *netdev) -{ - SET_ETHTOOL_OPS(netdev, &ixgbevf_ethtool_ops); -} diff --git a/drivers/net/ixgbevf/ixgbevf.h b/drivers/net/ixgbevf/ixgbevf.h deleted file mode 100644 index 8857df4..0000000 --- a/drivers/net/ixgbevf/ixgbevf.h +++ /dev/null @@ -1,318 +0,0 @@ -/******************************************************************************* - - Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2010 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGBEVF_H_ -#define _IXGBEVF_H_ - -#include -#include -#include -#include -#include -#include - -#include "vf.h" - -/* wrapper around a pointer to a socket buffer, - * so a DMA handle can be stored along with the buffer */ -struct ixgbevf_tx_buffer { - struct sk_buff *skb; - dma_addr_t dma; - unsigned long time_stamp; - u16 length; - u16 next_to_watch; - u16 mapped_as_page; -}; - -struct ixgbevf_rx_buffer { - struct sk_buff *skb; - dma_addr_t dma; - struct page *page; - dma_addr_t page_dma; - unsigned int page_offset; -}; - -struct ixgbevf_ring { - struct ixgbevf_adapter *adapter; /* backlink */ - void *desc; /* descriptor ring memory */ - dma_addr_t dma; /* phys. address of descriptor ring */ - unsigned int size; /* length in bytes */ - unsigned int count; /* amount of descriptors */ - unsigned int next_to_use; - unsigned int next_to_clean; - - int queue_index; /* needed for multiqueue queue management */ - union { - struct ixgbevf_tx_buffer *tx_buffer_info; - struct ixgbevf_rx_buffer *rx_buffer_info; - }; - - u16 head; - u16 tail; - - unsigned int total_bytes; - unsigned int total_packets; - - u16 reg_idx; /* holds the special value that gets the hardware register - * offset associated with this ring, which is different - * for DCB and RSS modes */ - -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) - /* cpu for tx queue */ - int cpu; -#endif - - u64 v_idx; /* maps directly to the index for this ring in the hardware - * vector array, can also be used for finding the bit in EICR - * and friends that represents the vector for this ring */ - - u16 work_limit; /* max work per interrupt */ - u16 rx_buf_len; -}; - -enum ixgbevf_ring_f_enum { - RING_F_NONE = 0, - RING_F_ARRAY_SIZE /* must be last in enum set */ -}; - -struct ixgbevf_ring_feature { - int indices; - int mask; -}; - -/* How many Rx Buffers do we bundle into one write to the hardware ? */ -#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ - -#define MAX_RX_QUEUES 1 -#define MAX_TX_QUEUES 1 - -#define IXGBEVF_DEFAULT_TXD 1024 -#define IXGBEVF_DEFAULT_RXD 512 -#define IXGBEVF_MAX_TXD 4096 -#define IXGBEVF_MIN_TXD 64 -#define IXGBEVF_MAX_RXD 4096 -#define IXGBEVF_MIN_RXD 64 - -/* Supported Rx Buffer Sizes */ -#define IXGBEVF_RXBUFFER_64 64 /* Used for packet split */ -#define IXGBEVF_RXBUFFER_128 128 /* Used for packet split */ -#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ -#define IXGBEVF_RXBUFFER_2048 2048 -#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */ - -#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 - -#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) - -#define IXGBE_TX_FLAGS_CSUM (u32)(1) -#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1) -#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2) -#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3) -#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 4) -#define IXGBE_TX_FLAGS_FSO (u32)(1 << 5) -#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 -#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 -#define IXGBE_TX_FLAGS_VLAN_SHIFT 16 - -/* MAX_MSIX_Q_VECTORS of these are allocated, - * but we only use one per queue-specific vector. - */ -struct ixgbevf_q_vector { - struct ixgbevf_adapter *adapter; - struct napi_struct napi; - DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */ - DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */ - u8 rxr_count; /* Rx ring count assigned to this vector */ - u8 txr_count; /* Tx ring count assigned to this vector */ - u8 tx_itr; - u8 rx_itr; - u32 eitr; - int v_idx; /* vector index in list */ -}; - -/* Helper macros to switch between ints/sec and what the register uses. - * And yes, it's the same math going both ways. The lowest value - * supported by all of the ixgbe hardware is 8. - */ -#define EITR_INTS_PER_SEC_TO_REG(_eitr) \ - ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) -#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG - -#define IXGBE_DESC_UNUSED(R) \ - ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ - (R)->next_to_clean - (R)->next_to_use - 1) - -#define IXGBE_RX_DESC_ADV(R, i) \ - (&(((union ixgbe_adv_rx_desc *)((R).desc))[i])) -#define IXGBE_TX_DESC_ADV(R, i) \ - (&(((union ixgbe_adv_tx_desc *)((R).desc))[i])) -#define IXGBE_TX_CTXTDESC_ADV(R, i) \ - (&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i])) - -#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 - -#define OTHER_VECTOR 1 -#define NON_Q_VECTORS (OTHER_VECTOR) - -#define MAX_MSIX_Q_VECTORS 2 -#define MAX_MSIX_COUNT 2 - -#define MIN_MSIX_Q_VECTORS 2 -#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) - -/* board specific private data structure */ -struct ixgbevf_adapter { - struct timer_list watchdog_timer; - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; - u16 bd_number; - struct work_struct reset_task; - struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; - char name[MAX_MSIX_COUNT][IFNAMSIZ + 9]; - - /* Interrupt Throttle Rate */ - u32 itr_setting; - u16 eitr_low; - u16 eitr_high; - - /* TX */ - struct ixgbevf_ring *tx_ring; /* One per active queue */ - int num_tx_queues; - u64 restart_queue; - u64 hw_csum_tx_good; - u64 lsc_int; - u64 hw_tso_ctxt; - u64 hw_tso6_ctxt; - u32 tx_timeout_count; - - /* RX */ - struct ixgbevf_ring *rx_ring; /* One per active queue */ - int num_rx_queues; - int num_rx_pools; /* == num_rx_queues in 82598 */ - int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */ - u64 hw_csum_rx_error; - u64 hw_rx_no_dma_resources; - u64 hw_csum_rx_good; - u64 non_eop_descs; - int num_msix_vectors; - int max_msix_q_vectors; /* true count of q_vectors for device */ - struct ixgbevf_ring_feature ring_feature[RING_F_ARRAY_SIZE]; - struct msix_entry *msix_entries; - - u64 rx_hdr_split; - u32 alloc_rx_page_failed; - u32 alloc_rx_buff_failed; - - /* Some features need tri-state capability, - * thus the additional *_CAPABLE flags. - */ - u32 flags; -#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1) -#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1) -#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 2) -#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3) -#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4) -#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 5) -#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 6) -#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 7) -#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 8) - /* OS defined structs */ - struct net_device *netdev; - struct pci_dev *pdev; - - /* structs defined in ixgbe_vf.h */ - struct ixgbe_hw hw; - u16 msg_enable; - struct ixgbevf_hw_stats stats; - u64 zero_base; - /* Interrupt Throttle Rate */ - u32 eitr_param; - - unsigned long state; - u32 *config_space; - u64 tx_busy; - unsigned int tx_ring_count; - unsigned int rx_ring_count; - - u32 link_speed; - bool link_up; - unsigned long link_check_timeout; - - struct work_struct watchdog_task; - bool netdev_registered; - bool dev_closed; -}; - -enum ixbgevf_state_t { - __IXGBEVF_TESTING, - __IXGBEVF_RESETTING, - __IXGBEVF_DOWN -}; - -enum ixgbevf_boards { - board_82599_vf, - board_X540_vf, -}; - -extern struct ixgbevf_info ixgbevf_82599_vf_info; -extern struct ixgbevf_info ixgbevf_X540_vf_info; -extern struct ixgbe_mbx_operations ixgbevf_mbx_ops; - -/* needed by ethtool.c */ -extern char ixgbevf_driver_name[]; -extern const char ixgbevf_driver_version[]; - -extern int ixgbevf_up(struct ixgbevf_adapter *adapter); -extern void ixgbevf_down(struct ixgbevf_adapter *adapter); -extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter); -extern void ixgbevf_reset(struct ixgbevf_adapter *adapter); -extern void ixgbevf_set_ethtool_ops(struct net_device *netdev); -extern int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *, - struct ixgbevf_ring *); -extern int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *, - struct ixgbevf_ring *); -extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *, - struct ixgbevf_ring *); -extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *, - struct ixgbevf_ring *); -extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter); - -#ifdef ETHTOOL_OPS_COMPAT -extern int ethtool_ioctl(struct ifreq *ifr); - -#endif -extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter); -extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter); - -#ifdef DEBUG -extern char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw); -#define hw_dbg(hw, format, arg...) \ - printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg) -#else -#define hw_dbg(hw, format, arg...) do {} while (0) -#endif - -#endif /* _IXGBEVF_H_ */ diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c deleted file mode 100644 index 3b880a2..0000000 --- a/drivers/net/ixgbevf/ixgbevf_main.c +++ /dev/null @@ -1,3523 +0,0 @@ -/******************************************************************************* - - Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2010 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - - -/****************************************************************************** - Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code -******************************************************************************/ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ixgbevf.h" - -char ixgbevf_driver_name[] = "ixgbevf"; -static const char ixgbevf_driver_string[] = - "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; - -#define DRV_VERSION "2.1.0-k" -const char ixgbevf_driver_version[] = DRV_VERSION; -static char ixgbevf_copyright[] = - "Copyright (c) 2009 - 2010 Intel Corporation."; - -static const struct ixgbevf_info *ixgbevf_info_tbl[] = { - [board_82599_vf] = &ixgbevf_82599_vf_info, - [board_X540_vf] = &ixgbevf_X540_vf_info, -}; - -/* ixgbevf_pci_tbl - PCI Device ID Table - * - * Wildcard entries (PCI_ANY_ID) should come last - * Last entry must be all 0s - * - * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, - * Class, Class Mask, private data (not used) } - */ -static struct pci_device_id ixgbevf_pci_tbl[] = { - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), - board_82599_vf}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), - board_X540_vf}, - - /* required last entry */ - {0, } -}; -MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); - -MODULE_AUTHOR("Intel Corporation, "); -MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_VERSION); - -#define DEFAULT_DEBUG_LEVEL_SHIFT 3 - -/* forward decls */ -static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector); -static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx, - u32 itr_reg); - -static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw, - struct ixgbevf_ring *rx_ring, - u32 val) -{ - /* - * Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val); -} - -/* - * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors - * @adapter: pointer to adapter struct - * @direction: 0 for Rx, 1 for Tx, -1 for other causes - * @queue: queue to map the corresponding interrupt to - * @msix_vector: the vector to map to the corresponding queue - * - */ -static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, - u8 queue, u8 msix_vector) -{ - u32 ivar, index; - struct ixgbe_hw *hw = &adapter->hw; - if (direction == -1) { - /* other causes */ - msix_vector |= IXGBE_IVAR_ALLOC_VAL; - ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); - ivar &= ~0xFF; - ivar |= msix_vector; - IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); - } else { - /* tx or rx causes */ - msix_vector |= IXGBE_IVAR_ALLOC_VAL; - index = ((16 * (queue & 1)) + (8 * direction)); - ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); - ivar &= ~(0xFF << index); - ivar |= (msix_vector << index); - IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar); - } -} - -static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter, - struct ixgbevf_tx_buffer - *tx_buffer_info) -{ - if (tx_buffer_info->dma) { - if (tx_buffer_info->mapped_as_page) - dma_unmap_page(&adapter->pdev->dev, - tx_buffer_info->dma, - tx_buffer_info->length, - DMA_TO_DEVICE); - else - dma_unmap_single(&adapter->pdev->dev, - tx_buffer_info->dma, - tx_buffer_info->length, - DMA_TO_DEVICE); - tx_buffer_info->dma = 0; - } - if (tx_buffer_info->skb) { - dev_kfree_skb_any(tx_buffer_info->skb); - tx_buffer_info->skb = NULL; - } - tx_buffer_info->time_stamp = 0; - /* tx_buffer_info must be completely set up in the transmit path */ -} - -#define IXGBE_MAX_TXD_PWR 14 -#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) - -/* Tx Descriptors needed, worst case */ -#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \ - (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) -#ifdef MAX_SKB_FRAGS -#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ - MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ -#else -#define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) -#endif - -static void ixgbevf_tx_timeout(struct net_device *netdev); - -/** - * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes - * @adapter: board private structure - * @tx_ring: tx ring to clean - **/ -static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter, - struct ixgbevf_ring *tx_ring) -{ - struct net_device *netdev = adapter->netdev; - struct ixgbe_hw *hw = &adapter->hw; - union ixgbe_adv_tx_desc *tx_desc, *eop_desc; - struct ixgbevf_tx_buffer *tx_buffer_info; - unsigned int i, eop, count = 0; - unsigned int total_bytes = 0, total_packets = 0; - - i = tx_ring->next_to_clean; - eop = tx_ring->tx_buffer_info[i].next_to_watch; - eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); - - while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && - (count < tx_ring->work_limit)) { - bool cleaned = false; - rmb(); /* read buffer_info after eop_desc */ - for ( ; !cleaned; count++) { - struct sk_buff *skb; - tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - cleaned = (i == eop); - skb = tx_buffer_info->skb; - - if (cleaned && skb) { - unsigned int segs, bytecount; - - /* gso_segs is currently only valid for tcp */ - segs = skb_shinfo(skb)->gso_segs ?: 1; - /* multiply data chunks by size of headers */ - bytecount = ((segs - 1) * skb_headlen(skb)) + - skb->len; - total_packets += segs; - total_bytes += bytecount; - } - - ixgbevf_unmap_and_free_tx_resource(adapter, - tx_buffer_info); - - tx_desc->wb.status = 0; - - i++; - if (i == tx_ring->count) - i = 0; - } - - eop = tx_ring->tx_buffer_info[i].next_to_watch; - eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); - } - - tx_ring->next_to_clean = i; - -#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) - if (unlikely(count && netif_carrier_ok(netdev) && - (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { - /* Make sure that anybody stopping the queue after this - * sees the new next_to_clean. - */ - smp_mb(); -#ifdef HAVE_TX_MQ - if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && - !test_bit(__IXGBEVF_DOWN, &adapter->state)) { - netif_wake_subqueue(netdev, tx_ring->queue_index); - ++adapter->restart_queue; - } -#else - if (netif_queue_stopped(netdev) && - !test_bit(__IXGBEVF_DOWN, &adapter->state)) { - netif_wake_queue(netdev); - ++adapter->restart_queue; - } -#endif - } - - /* re-arm the interrupt */ - if ((count >= tx_ring->work_limit) && - (!test_bit(__IXGBEVF_DOWN, &adapter->state))) { - IXGBE_WRITE_REG(hw, IXGBE_VTEICS, tx_ring->v_idx); - } - - tx_ring->total_bytes += total_bytes; - tx_ring->total_packets += total_packets; - - netdev->stats.tx_bytes += total_bytes; - netdev->stats.tx_packets += total_packets; - - return count < tx_ring->work_limit; -} - -/** - * ixgbevf_receive_skb - Send a completed packet up the stack - * @q_vector: structure containing interrupt and ring information - * @skb: packet to send up - * @status: hardware indication of status of receive - * @rx_ring: rx descriptor ring (for a specific queue) to setup - * @rx_desc: rx descriptor - **/ -static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, - struct sk_buff *skb, u8 status, - struct ixgbevf_ring *ring, - union ixgbe_adv_rx_desc *rx_desc) -{ - struct ixgbevf_adapter *adapter = q_vector->adapter; - bool is_vlan = (status & IXGBE_RXD_STAT_VP); - - if (is_vlan) { - u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); - - __vlan_hwaccel_put_tag(skb, tag); - } - - if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) - napi_gro_receive(&q_vector->napi, skb); - else - netif_rx(skb); -} - -/** - * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum - * @adapter: address of board private structure - * @status_err: hardware indication of status of receive - * @skb: skb currently being received and modified - **/ -static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter, - u32 status_err, struct sk_buff *skb) -{ - skb_checksum_none_assert(skb); - - /* Rx csum disabled */ - if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) - return; - - /* if IP and error */ - if ((status_err & IXGBE_RXD_STAT_IPCS) && - (status_err & IXGBE_RXDADV_ERR_IPE)) { - adapter->hw_csum_rx_error++; - return; - } - - if (!(status_err & IXGBE_RXD_STAT_L4CS)) - return; - - if (status_err & IXGBE_RXDADV_ERR_TCPE) { - adapter->hw_csum_rx_error++; - return; - } - - /* It must be a TCP or UDP packet with a valid checksum */ - skb->ip_summed = CHECKSUM_UNNECESSARY; - adapter->hw_csum_rx_good++; -} - -/** - * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split - * @adapter: address of board private structure - **/ -static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, - struct ixgbevf_ring *rx_ring, - int cleaned_count) -{ - struct pci_dev *pdev = adapter->pdev; - union ixgbe_adv_rx_desc *rx_desc; - struct ixgbevf_rx_buffer *bi; - struct sk_buff *skb; - unsigned int i; - unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN; - - i = rx_ring->next_to_use; - bi = &rx_ring->rx_buffer_info[i]; - - while (cleaned_count--) { - rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); - - if (!bi->page_dma && - (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) { - if (!bi->page) { - bi->page = netdev_alloc_page(adapter->netdev); - if (!bi->page) { - adapter->alloc_rx_page_failed++; - goto no_buffers; - } - bi->page_offset = 0; - } else { - /* use a half page if we're re-using */ - bi->page_offset ^= (PAGE_SIZE / 2); - } - - bi->page_dma = dma_map_page(&pdev->dev, bi->page, - bi->page_offset, - (PAGE_SIZE / 2), - DMA_FROM_DEVICE); - } - - skb = bi->skb; - if (!skb) { - skb = netdev_alloc_skb(adapter->netdev, - bufsz); - - if (!skb) { - adapter->alloc_rx_buff_failed++; - goto no_buffers; - } - - /* - * Make buffer alignment 2 beyond a 16 byte boundary - * this will result in a 16 byte aligned IP header after - * the 14 byte MAC header is removed - */ - skb_reserve(skb, NET_IP_ALIGN); - - bi->skb = skb; - } - if (!bi->dma) { - bi->dma = dma_map_single(&pdev->dev, skb->data, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - } - /* Refresh the desc even if buffer_addrs didn't change because - * each write-back erases this info. */ - if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { - rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); - rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); - } else { - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); - } - - i++; - if (i == rx_ring->count) - i = 0; - bi = &rx_ring->rx_buffer_info[i]; - } - -no_buffers: - if (rx_ring->next_to_use != i) { - rx_ring->next_to_use = i; - if (i-- == 0) - i = (rx_ring->count - 1); - - ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i); - } -} - -static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, - u64 qmask) -{ - u32 mask; - struct ixgbe_hw *hw = &adapter->hw; - - mask = (qmask & 0xFFFFFFFF); - IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); -} - -static inline u16 ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc) -{ - return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info; -} - -static inline u16 ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc) -{ - return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; -} - -static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, - struct ixgbevf_ring *rx_ring, - int *work_done, int work_to_do) -{ - struct ixgbevf_adapter *adapter = q_vector->adapter; - struct pci_dev *pdev = adapter->pdev; - union ixgbe_adv_rx_desc *rx_desc, *next_rxd; - struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer; - struct sk_buff *skb; - unsigned int i; - u32 len, staterr; - u16 hdr_info; - bool cleaned = false; - int cleaned_count = 0; - unsigned int total_rx_bytes = 0, total_rx_packets = 0; - - i = rx_ring->next_to_clean; - rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); - rx_buffer_info = &rx_ring->rx_buffer_info[i]; - - while (staterr & IXGBE_RXD_STAT_DD) { - u32 upper_len = 0; - if (*work_done >= work_to_do) - break; - (*work_done)++; - - rmb(); /* read descriptor and rx_buffer_info after status DD */ - if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { - hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc)); - len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> - IXGBE_RXDADV_HDRBUFLEN_SHIFT; - if (hdr_info & IXGBE_RXDADV_SPH) - adapter->rx_hdr_split++; - if (len > IXGBEVF_RX_HDR_SIZE) - len = IXGBEVF_RX_HDR_SIZE; - upper_len = le16_to_cpu(rx_desc->wb.upper.length); - } else { - len = le16_to_cpu(rx_desc->wb.upper.length); - } - cleaned = true; - skb = rx_buffer_info->skb; - prefetch(skb->data - NET_IP_ALIGN); - rx_buffer_info->skb = NULL; - - if (rx_buffer_info->dma) { - dma_unmap_single(&pdev->dev, rx_buffer_info->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - rx_buffer_info->dma = 0; - skb_put(skb, len); - } - - if (upper_len) { - dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, - PAGE_SIZE / 2, DMA_FROM_DEVICE); - rx_buffer_info->page_dma = 0; - skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, - rx_buffer_info->page, - rx_buffer_info->page_offset, - upper_len); - - if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) || - (page_count(rx_buffer_info->page) != 1)) - rx_buffer_info->page = NULL; - else - get_page(rx_buffer_info->page); - - skb->len += upper_len; - skb->data_len += upper_len; - skb->truesize += upper_len; - } - - i++; - if (i == rx_ring->count) - i = 0; - - next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i); - prefetch(next_rxd); - cleaned_count++; - - next_buffer = &rx_ring->rx_buffer_info[i]; - - if (!(staterr & IXGBE_RXD_STAT_EOP)) { - if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { - rx_buffer_info->skb = next_buffer->skb; - rx_buffer_info->dma = next_buffer->dma; - next_buffer->skb = skb; - next_buffer->dma = 0; - } else { - skb->next = next_buffer->skb; - skb->next->prev = skb; - } - adapter->non_eop_descs++; - goto next_desc; - } - - /* ERR_MASK will only have valid bits if EOP set */ - if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { - dev_kfree_skb_irq(skb); - goto next_desc; - } - - ixgbevf_rx_checksum(adapter, staterr, skb); - - /* probably a little skewed due to removing CRC */ - total_rx_bytes += skb->len; - total_rx_packets++; - - /* - * Work around issue of some types of VM to VM loop back - * packets not getting split correctly - */ - if (staterr & IXGBE_RXD_STAT_LB) { - u32 header_fixup_len = skb_headlen(skb); - if (header_fixup_len < 14) - skb_push(skb, header_fixup_len); - } - skb->protocol = eth_type_trans(skb, adapter->netdev); - - ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); - -next_desc: - rx_desc->wb.upper.status_error = 0; - - /* return some buffers to hardware, one at a time is too slow */ - if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { - ixgbevf_alloc_rx_buffers(adapter, rx_ring, - cleaned_count); - cleaned_count = 0; - } - - /* use prefetched values */ - rx_desc = next_rxd; - rx_buffer_info = &rx_ring->rx_buffer_info[i]; - - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); - } - - rx_ring->next_to_clean = i; - cleaned_count = IXGBE_DESC_UNUSED(rx_ring); - - if (cleaned_count) - ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count); - - rx_ring->total_packets += total_rx_packets; - rx_ring->total_bytes += total_rx_bytes; - adapter->netdev->stats.rx_bytes += total_rx_bytes; - adapter->netdev->stats.rx_packets += total_rx_packets; - - return cleaned; -} - -/** - * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine - * @napi: napi struct with our devices info in it - * @budget: amount of work driver is allowed to do this pass, in packets - * - * This function is optimized for cleaning one queue only on a single - * q_vector!!! - **/ -static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget) -{ - struct ixgbevf_q_vector *q_vector = - container_of(napi, struct ixgbevf_q_vector, napi); - struct ixgbevf_adapter *adapter = q_vector->adapter; - struct ixgbevf_ring *rx_ring = NULL; - int work_done = 0; - long r_idx; - - r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); - rx_ring = &(adapter->rx_ring[r_idx]); - - ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget); - - /* If all Rx work done, exit the polling mode */ - if (work_done < budget) { - napi_complete(napi); - if (adapter->itr_setting & 1) - ixgbevf_set_itr_msix(q_vector); - if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) - ixgbevf_irq_enable_queues(adapter, rx_ring->v_idx); - } - - return work_done; -} - -/** - * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine - * @napi: napi struct with our devices info in it - * @budget: amount of work driver is allowed to do this pass, in packets - * - * This function will clean more than one rx queue associated with a - * q_vector. - **/ -static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget) -{ - struct ixgbevf_q_vector *q_vector = - container_of(napi, struct ixgbevf_q_vector, napi); - struct ixgbevf_adapter *adapter = q_vector->adapter; - struct ixgbevf_ring *rx_ring = NULL; - int work_done = 0, i; - long r_idx; - u64 enable_mask = 0; - - /* attempt to distribute budget to each queue fairly, but don't allow - * the budget to go below 1 because we'll exit polling */ - budget /= (q_vector->rxr_count ?: 1); - budget = max(budget, 1); - r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); - for (i = 0; i < q_vector->rxr_count; i++) { - rx_ring = &(adapter->rx_ring[r_idx]); - ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget); - enable_mask |= rx_ring->v_idx; - r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, - r_idx + 1); - } - -#ifndef HAVE_NETDEV_NAPI_LIST - if (!netif_running(adapter->netdev)) - work_done = 0; - -#endif - r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); - rx_ring = &(adapter->rx_ring[r_idx]); - - /* If all Rx work done, exit the polling mode */ - if (work_done < budget) { - napi_complete(napi); - if (adapter->itr_setting & 1) - ixgbevf_set_itr_msix(q_vector); - if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) - ixgbevf_irq_enable_queues(adapter, enable_mask); - } - - return work_done; -} - - -/** - * ixgbevf_configure_msix - Configure MSI-X hardware - * @adapter: board private structure - * - * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X - * interrupts. - **/ -static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) -{ - struct ixgbevf_q_vector *q_vector; - struct ixgbe_hw *hw = &adapter->hw; - int i, j, q_vectors, v_idx, r_idx; - u32 mask; - - q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - - /* - * Populate the IVAR table and set the ITR values to the - * corresponding register. - */ - for (v_idx = 0; v_idx < q_vectors; v_idx++) { - q_vector = adapter->q_vector[v_idx]; - /* XXX for_each_set_bit(...) */ - r_idx = find_first_bit(q_vector->rxr_idx, - adapter->num_rx_queues); - - for (i = 0; i < q_vector->rxr_count; i++) { - j = adapter->rx_ring[r_idx].reg_idx; - ixgbevf_set_ivar(adapter, 0, j, v_idx); - r_idx = find_next_bit(q_vector->rxr_idx, - adapter->num_rx_queues, - r_idx + 1); - } - r_idx = find_first_bit(q_vector->txr_idx, - adapter->num_tx_queues); - - for (i = 0; i < q_vector->txr_count; i++) { - j = adapter->tx_ring[r_idx].reg_idx; - ixgbevf_set_ivar(adapter, 1, j, v_idx); - r_idx = find_next_bit(q_vector->txr_idx, - adapter->num_tx_queues, - r_idx + 1); - } - - /* if this is a tx only vector halve the interrupt rate */ - if (q_vector->txr_count && !q_vector->rxr_count) - q_vector->eitr = (adapter->eitr_param >> 1); - else if (q_vector->rxr_count) - /* rx only */ - q_vector->eitr = adapter->eitr_param; - - ixgbevf_write_eitr(adapter, v_idx, q_vector->eitr); - } - - ixgbevf_set_ivar(adapter, -1, 1, v_idx); - - /* set up to autoclear timer, and the vectors */ - mask = IXGBE_EIMS_ENABLE_MASK; - mask &= ~IXGBE_EIMS_OTHER; - IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask); -} - -enum latency_range { - lowest_latency = 0, - low_latency = 1, - bulk_latency = 2, - latency_invalid = 255 -}; - -/** - * ixgbevf_update_itr - update the dynamic ITR value based on statistics - * @adapter: pointer to adapter - * @eitr: eitr setting (ints per sec) to give last timeslice - * @itr_setting: current throttle rate in ints/second - * @packets: the number of packets during this measurement interval - * @bytes: the number of bytes during this measurement interval - * - * Stores a new ITR value based on packets and byte - * counts during the last interrupt. The advantage of per interrupt - * computation is faster updates and more accurate ITR for the current - * traffic pattern. Constants in this function were computed - * based on theoretical maximum wire speed and thresholds were set based - * on testing data as well as attempting to minimize response time - * while increasing bulk throughput. - **/ -static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter, - u32 eitr, u8 itr_setting, - int packets, int bytes) -{ - unsigned int retval = itr_setting; - u32 timepassed_us; - u64 bytes_perint; - - if (packets == 0) - goto update_itr_done; - - - /* simple throttlerate management - * 0-20MB/s lowest (100000 ints/s) - * 20-100MB/s low (20000 ints/s) - * 100-1249MB/s bulk (8000 ints/s) - */ - /* what was last interrupt timeslice? */ - timepassed_us = 1000000/eitr; - bytes_perint = bytes / timepassed_us; /* bytes/usec */ - - switch (itr_setting) { - case lowest_latency: - if (bytes_perint > adapter->eitr_low) - retval = low_latency; - break; - case low_latency: - if (bytes_perint > adapter->eitr_high) - retval = bulk_latency; - else if (bytes_perint <= adapter->eitr_low) - retval = lowest_latency; - break; - case bulk_latency: - if (bytes_perint <= adapter->eitr_high) - retval = low_latency; - break; - } - -update_itr_done: - return retval; -} - -/** - * ixgbevf_write_eitr - write VTEITR register in hardware specific way - * @adapter: pointer to adapter struct - * @v_idx: vector index into q_vector array - * @itr_reg: new value to be written in *register* format, not ints/s - * - * This function is made to be called by ethtool and by the driver - * when it needs to update VTEITR registers at runtime. Hardware - * specific quirks/differences are taken care of here. - */ -static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx, - u32 itr_reg) -{ - struct ixgbe_hw *hw = &adapter->hw; - - itr_reg = EITR_INTS_PER_SEC_TO_REG(itr_reg); - - /* - * set the WDIS bit to not clear the timer bits and cause an - * immediate assertion of the interrupt - */ - itr_reg |= IXGBE_EITR_CNT_WDIS; - - IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); -} - -static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector) -{ - struct ixgbevf_adapter *adapter = q_vector->adapter; - u32 new_itr; - u8 current_itr, ret_itr; - int i, r_idx, v_idx = q_vector->v_idx; - struct ixgbevf_ring *rx_ring, *tx_ring; - - r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); - for (i = 0; i < q_vector->txr_count; i++) { - tx_ring = &(adapter->tx_ring[r_idx]); - ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr, - q_vector->tx_itr, - tx_ring->total_packets, - tx_ring->total_bytes); - /* if the result for this queue would decrease interrupt - * rate for this vector then use that result */ - q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ? - q_vector->tx_itr - 1 : ret_itr); - r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, - r_idx + 1); - } - - r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); - for (i = 0; i < q_vector->rxr_count; i++) { - rx_ring = &(adapter->rx_ring[r_idx]); - ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr, - q_vector->rx_itr, - rx_ring->total_packets, - rx_ring->total_bytes); - /* if the result for this queue would decrease interrupt - * rate for this vector then use that result */ - q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ? - q_vector->rx_itr - 1 : ret_itr); - r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, - r_idx + 1); - } - - current_itr = max(q_vector->rx_itr, q_vector->tx_itr); - - switch (current_itr) { - /* counts and packets in update_itr are dependent on these numbers */ - case lowest_latency: - new_itr = 100000; - break; - case low_latency: - new_itr = 20000; /* aka hwitr = ~200 */ - break; - case bulk_latency: - default: - new_itr = 8000; - break; - } - - if (new_itr != q_vector->eitr) { - u32 itr_reg; - - /* save the algorithm value here, not the smoothed one */ - q_vector->eitr = new_itr; - /* do an exponential smoothing */ - new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); - itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr); - ixgbevf_write_eitr(adapter, v_idx, itr_reg); - } -} - -static irqreturn_t ixgbevf_msix_mbx(int irq, void *data) -{ - struct net_device *netdev = data; - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - u32 eicr; - u32 msg; - - eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS); - IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr); - - if (!hw->mbx.ops.check_for_ack(hw)) { - /* - * checking for the ack clears the PFACK bit. Place - * it back in the v2p_mailbox cache so that anyone - * polling for an ack will not miss it. Also - * avoid the read below because the code to read - * the mailbox will also clear the ack bit. This was - * causing lost acks. Just cache the bit and exit - * the IRQ handler. - */ - hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK; - goto out; - } - - /* Not an ack interrupt, go ahead and read the message */ - hw->mbx.ops.read(hw, &msg, 1); - - if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) - mod_timer(&adapter->watchdog_timer, - round_jiffies(jiffies + 1)); - -out: - return IRQ_HANDLED; -} - -static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data) -{ - struct ixgbevf_q_vector *q_vector = data; - struct ixgbevf_adapter *adapter = q_vector->adapter; - struct ixgbevf_ring *tx_ring; - int i, r_idx; - - if (!q_vector->txr_count) - return IRQ_HANDLED; - - r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); - for (i = 0; i < q_vector->txr_count; i++) { - tx_ring = &(adapter->tx_ring[r_idx]); - tx_ring->total_bytes = 0; - tx_ring->total_packets = 0; - ixgbevf_clean_tx_irq(adapter, tx_ring); - r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, - r_idx + 1); - } - - if (adapter->itr_setting & 1) - ixgbevf_set_itr_msix(q_vector); - - return IRQ_HANDLED; -} - -/** - * ixgbevf_msix_clean_rx - single unshared vector rx clean (all queues) - * @irq: unused - * @data: pointer to our q_vector struct for this interrupt vector - **/ -static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data) -{ - struct ixgbevf_q_vector *q_vector = data; - struct ixgbevf_adapter *adapter = q_vector->adapter; - struct ixgbe_hw *hw = &adapter->hw; - struct ixgbevf_ring *rx_ring; - int r_idx; - int i; - - r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); - for (i = 0; i < q_vector->rxr_count; i++) { - rx_ring = &(adapter->rx_ring[r_idx]); - rx_ring->total_bytes = 0; - rx_ring->total_packets = 0; - r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, - r_idx + 1); - } - - if (!q_vector->rxr_count) - return IRQ_HANDLED; - - r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); - rx_ring = &(adapter->rx_ring[r_idx]); - /* disable interrupts on this vector only */ - IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, rx_ring->v_idx); - napi_schedule(&q_vector->napi); - - - return IRQ_HANDLED; -} - -static irqreturn_t ixgbevf_msix_clean_many(int irq, void *data) -{ - ixgbevf_msix_clean_rx(irq, data); - ixgbevf_msix_clean_tx(irq, data); - - return IRQ_HANDLED; -} - -static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx, - int r_idx) -{ - struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; - - set_bit(r_idx, q_vector->rxr_idx); - q_vector->rxr_count++; - a->rx_ring[r_idx].v_idx = 1 << v_idx; -} - -static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx, - int t_idx) -{ - struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; - - set_bit(t_idx, q_vector->txr_idx); - q_vector->txr_count++; - a->tx_ring[t_idx].v_idx = 1 << v_idx; -} - -/** - * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors - * @adapter: board private structure to initialize - * - * This function maps descriptor rings to the queue-specific vectors - * we were allotted through the MSI-X enabling code. Ideally, we'd have - * one vector per ring/queue, but on a constrained vector budget, we - * group the rings as "efficiently" as possible. You would add new - * mapping configurations in here. - **/ -static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) -{ - int q_vectors; - int v_start = 0; - int rxr_idx = 0, txr_idx = 0; - int rxr_remaining = adapter->num_rx_queues; - int txr_remaining = adapter->num_tx_queues; - int i, j; - int rqpv, tqpv; - int err = 0; - - q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - - /* - * The ideal configuration... - * We have enough vectors to map one per queue. - */ - if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { - for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) - map_vector_to_rxq(adapter, v_start, rxr_idx); - - for (; txr_idx < txr_remaining; v_start++, txr_idx++) - map_vector_to_txq(adapter, v_start, txr_idx); - goto out; - } - - /* - * If we don't have enough vectors for a 1-to-1 - * mapping, we'll have to group them so there are - * multiple queues per vector. - */ - /* Re-adjusting *qpv takes care of the remainder. */ - for (i = v_start; i < q_vectors; i++) { - rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); - for (j = 0; j < rqpv; j++) { - map_vector_to_rxq(adapter, i, rxr_idx); - rxr_idx++; - rxr_remaining--; - } - } - for (i = v_start; i < q_vectors; i++) { - tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); - for (j = 0; j < tqpv; j++) { - map_vector_to_txq(adapter, i, txr_idx); - txr_idx++; - txr_remaining--; - } - } - -out: - return err; -} - -/** - * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts - * @adapter: board private structure - * - * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests - * interrupts from the kernel. - **/ -static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - irqreturn_t (*handler)(int, void *); - int i, vector, q_vectors, err; - int ri = 0, ti = 0; - - /* Decrement for Other and TCP Timer vectors */ - q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - -#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \ - ? &ixgbevf_msix_clean_many : \ - (_v)->rxr_count ? &ixgbevf_msix_clean_rx : \ - (_v)->txr_count ? &ixgbevf_msix_clean_tx : \ - NULL) - for (vector = 0; vector < q_vectors; vector++) { - handler = SET_HANDLER(adapter->q_vector[vector]); - - if (handler == &ixgbevf_msix_clean_rx) { - sprintf(adapter->name[vector], "%s-%s-%d", - netdev->name, "rx", ri++); - } else if (handler == &ixgbevf_msix_clean_tx) { - sprintf(adapter->name[vector], "%s-%s-%d", - netdev->name, "tx", ti++); - } else if (handler == &ixgbevf_msix_clean_many) { - sprintf(adapter->name[vector], "%s-%s-%d", - netdev->name, "TxRx", vector); - } else { - /* skip this unused q_vector */ - continue; - } - err = request_irq(adapter->msix_entries[vector].vector, - handler, 0, adapter->name[vector], - adapter->q_vector[vector]); - if (err) { - hw_dbg(&adapter->hw, - "request_irq failed for MSIX interrupt " - "Error: %d\n", err); - goto free_queue_irqs; - } - } - - sprintf(adapter->name[vector], "%s:mbx", netdev->name); - err = request_irq(adapter->msix_entries[vector].vector, - &ixgbevf_msix_mbx, 0, adapter->name[vector], netdev); - if (err) { - hw_dbg(&adapter->hw, - "request_irq for msix_mbx failed: %d\n", err); - goto free_queue_irqs; - } - - return 0; - -free_queue_irqs: - for (i = vector - 1; i >= 0; i--) - free_irq(adapter->msix_entries[--vector].vector, - &(adapter->q_vector[i])); - pci_disable_msix(adapter->pdev); - kfree(adapter->msix_entries); - adapter->msix_entries = NULL; - return err; -} - -static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) -{ - int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - - for (i = 0; i < q_vectors; i++) { - struct ixgbevf_q_vector *q_vector = adapter->q_vector[i]; - bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES); - bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES); - q_vector->rxr_count = 0; - q_vector->txr_count = 0; - q_vector->eitr = adapter->eitr_param; - } -} - -/** - * ixgbevf_request_irq - initialize interrupts - * @adapter: board private structure - * - * Attempts to configure interrupts using the best available - * capabilities of the hardware and kernel. - **/ -static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) -{ - int err = 0; - - err = ixgbevf_request_msix_irqs(adapter); - - if (err) - hw_dbg(&adapter->hw, - "request_irq failed, Error %d\n", err); - - return err; -} - -static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - int i, q_vectors; - - q_vectors = adapter->num_msix_vectors; - - i = q_vectors - 1; - - free_irq(adapter->msix_entries[i].vector, netdev); - i--; - - for (; i >= 0; i--) { - free_irq(adapter->msix_entries[i].vector, - adapter->q_vector[i]); - } - - ixgbevf_reset_q_vectors(adapter); -} - -/** - * ixgbevf_irq_disable - Mask off interrupt generation on the NIC - * @adapter: board private structure - **/ -static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter) -{ - int i; - struct ixgbe_hw *hw = &adapter->hw; - - IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0); - - IXGBE_WRITE_FLUSH(hw); - - for (i = 0; i < adapter->num_msix_vectors; i++) - synchronize_irq(adapter->msix_entries[i].vector); -} - -/** - * ixgbevf_irq_enable - Enable default interrupt generation settings - * @adapter: board private structure - **/ -static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter, - bool queues, bool flush) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 mask; - u64 qmask; - - mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); - qmask = ~0; - - IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); - - if (queues) - ixgbevf_irq_enable_queues(adapter, qmask); - - if (flush) - IXGBE_WRITE_FLUSH(hw); -} - -/** - * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset - * @adapter: board private structure - * - * Configure the Tx unit of the MAC after a reset. - **/ -static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) -{ - u64 tdba; - struct ixgbe_hw *hw = &adapter->hw; - u32 i, j, tdlen, txctrl; - - /* Setup the HW Tx Head and Tail descriptor pointers */ - for (i = 0; i < adapter->num_tx_queues; i++) { - struct ixgbevf_ring *ring = &adapter->tx_ring[i]; - j = ring->reg_idx; - tdba = ring->dma; - tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc); - IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j), - (tdba & DMA_BIT_MASK(32))); - IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32)); - IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen); - IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0); - IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0); - adapter->tx_ring[i].head = IXGBE_VFTDH(j); - adapter->tx_ring[i].tail = IXGBE_VFTDT(j); - /* Disable Tx Head Writeback RO bit, since this hoses - * bookkeeping if things aren't delivered in order. - */ - txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j)); - txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; - IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl); - } -} - -#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 - -static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) -{ - struct ixgbevf_ring *rx_ring; - struct ixgbe_hw *hw = &adapter->hw; - u32 srrctl; - - rx_ring = &adapter->rx_ring[index]; - - srrctl = IXGBE_SRRCTL_DROP_EN; - - if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { - u16 bufsz = IXGBEVF_RXBUFFER_2048; - /* grow the amount we can receive on large page machines */ - if (bufsz < (PAGE_SIZE / 2)) - bufsz = (PAGE_SIZE / 2); - /* cap the bufsz at our largest descriptor size */ - bufsz = min((u16)IXGBEVF_MAX_RXBUFFER, bufsz); - - srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; - srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; - srrctl |= ((IXGBEVF_RX_HDR_SIZE << - IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & - IXGBE_SRRCTL_BSIZEHDR_MASK); - } else { - srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; - - if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE) - srrctl |= IXGBEVF_RXBUFFER_2048 >> - IXGBE_SRRCTL_BSIZEPKT_SHIFT; - else - srrctl |= rx_ring->rx_buf_len >> - IXGBE_SRRCTL_BSIZEPKT_SHIFT; - } - IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); -} - -/** - * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset - * @adapter: board private structure - * - * Configure the Rx unit of the MAC after a reset. - **/ -static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) -{ - u64 rdba; - struct ixgbe_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; - int i, j; - u32 rdlen; - int rx_buf_len; - - /* Decide whether to use packet split mode or not */ - if (netdev->mtu > ETH_DATA_LEN) { - if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE) - adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; - else - adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; - } else { - if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE) - adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; - else - adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; - } - - /* Set the RX buffer length according to the mode */ - if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { - /* PSRTYPE must be initialized in 82599 */ - u32 psrtype = IXGBE_PSRTYPE_TCPHDR | - IXGBE_PSRTYPE_UDPHDR | - IXGBE_PSRTYPE_IPV4HDR | - IXGBE_PSRTYPE_IPV6HDR | - IXGBE_PSRTYPE_L2HDR; - IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); - rx_buf_len = IXGBEVF_RX_HDR_SIZE; - } else { - IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); - if (netdev->mtu <= ETH_DATA_LEN) - rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; - else - rx_buf_len = ALIGN(max_frame, 1024); - } - - rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); - /* Setup the HW Rx Head and Tail Descriptor Pointers and - * the Base and Length of the Rx Descriptor Ring */ - for (i = 0; i < adapter->num_rx_queues; i++) { - rdba = adapter->rx_ring[i].dma; - j = adapter->rx_ring[i].reg_idx; - IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j), - (rdba & DMA_BIT_MASK(32))); - IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32)); - IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen); - IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0); - IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0); - adapter->rx_ring[i].head = IXGBE_VFRDH(j); - adapter->rx_ring[i].tail = IXGBE_VFRDT(j); - adapter->rx_ring[i].rx_buf_len = rx_buf_len; - - ixgbevf_configure_srrctl(adapter, j); - } -} - -static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - - /* add VID to filter table */ - if (hw->mac.ops.set_vfta) - hw->mac.ops.set_vfta(hw, vid, 0, true); - set_bit(vid, adapter->active_vlans); -} - -static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - - /* remove VID from filter table */ - if (hw->mac.ops.set_vfta) - hw->mac.ops.set_vfta(hw, vid, 0, false); - clear_bit(vid, adapter->active_vlans); -} - -static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) -{ - u16 vid; - - for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) - ixgbevf_vlan_rx_add_vid(adapter->netdev, vid); -} - -static int ixgbevf_write_uc_addr_list(struct net_device *netdev) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - int count = 0; - - if ((netdev_uc_count(netdev)) > 10) { - printk(KERN_ERR "Too many unicast filters - No Space\n"); - return -ENOSPC; - } - - if (!netdev_uc_empty(netdev)) { - struct netdev_hw_addr *ha; - netdev_for_each_uc_addr(ha, netdev) { - hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); - udelay(200); - } - } else { - /* - * If the list is empty then send message to PF driver to - * clear all macvlans on this VF. - */ - hw->mac.ops.set_uc_addr(hw, 0, NULL); - } - - return count; -} - -/** - * ixgbevf_set_rx_mode - Multicast set - * @netdev: network interface device structure - * - * The set_rx_method entry point is called whenever the multicast address - * list or the network interface flags are updated. This routine is - * responsible for configuring the hardware for proper multicast mode. - **/ -static void ixgbevf_set_rx_mode(struct net_device *netdev) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - - /* reprogram multicast list */ - if (hw->mac.ops.update_mc_addr_list) - hw->mac.ops.update_mc_addr_list(hw, netdev); - - ixgbevf_write_uc_addr_list(netdev); -} - -static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) -{ - int q_idx; - struct ixgbevf_q_vector *q_vector; - int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - - for (q_idx = 0; q_idx < q_vectors; q_idx++) { - struct napi_struct *napi; - q_vector = adapter->q_vector[q_idx]; - if (!q_vector->rxr_count) - continue; - napi = &q_vector->napi; - if (q_vector->rxr_count > 1) - napi->poll = &ixgbevf_clean_rxonly_many; - - napi_enable(napi); - } -} - -static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) -{ - int q_idx; - struct ixgbevf_q_vector *q_vector; - int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - - for (q_idx = 0; q_idx < q_vectors; q_idx++) { - q_vector = adapter->q_vector[q_idx]; - if (!q_vector->rxr_count) - continue; - napi_disable(&q_vector->napi); - } -} - -static void ixgbevf_configure(struct ixgbevf_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - int i; - - ixgbevf_set_rx_mode(netdev); - - ixgbevf_restore_vlan(adapter); - - ixgbevf_configure_tx(adapter); - ixgbevf_configure_rx(adapter); - for (i = 0; i < adapter->num_rx_queues; i++) { - struct ixgbevf_ring *ring = &adapter->rx_ring[i]; - ixgbevf_alloc_rx_buffers(adapter, ring, ring->count); - ring->next_to_use = ring->count - 1; - writel(ring->next_to_use, adapter->hw.hw_addr + ring->tail); - } -} - -#define IXGBE_MAX_RX_DESC_POLL 10 -static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, - int rxr) -{ - struct ixgbe_hw *hw = &adapter->hw; - int j = adapter->rx_ring[rxr].reg_idx; - int k; - - for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) { - if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE) - break; - else - msleep(1); - } - if (k >= IXGBE_MAX_RX_DESC_POLL) { - hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d " - "not set within the polling period\n", rxr); - } - - ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr], - (adapter->rx_ring[rxr].count - 1)); -} - -static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) -{ - /* Only save pre-reset stats if there are some */ - if (adapter->stats.vfgprc || adapter->stats.vfgptc) { - adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc - - adapter->stats.base_vfgprc; - adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc - - adapter->stats.base_vfgptc; - adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc - - adapter->stats.base_vfgorc; - adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc - - adapter->stats.base_vfgotc; - adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc - - adapter->stats.base_vfmprc; - } -} - -static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - - adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); - adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); - adapter->stats.last_vfgorc |= - (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); - adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); - adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); - adapter->stats.last_vfgotc |= - (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); - adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); - - adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; - adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; - adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; - adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; - adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; -} - -static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct ixgbe_hw *hw = &adapter->hw; - int i, j = 0; - int num_rx_rings = adapter->num_rx_queues; - u32 txdctl, rxdctl; - - for (i = 0; i < adapter->num_tx_queues; i++) { - j = adapter->tx_ring[i].reg_idx; - txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); - /* enable WTHRESH=8 descriptors, to encourage burst writeback */ - txdctl |= (8 << 16); - IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); - } - - for (i = 0; i < adapter->num_tx_queues; i++) { - j = adapter->tx_ring[i].reg_idx; - txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); - txdctl |= IXGBE_TXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); - } - - for (i = 0; i < num_rx_rings; i++) { - j = adapter->rx_ring[i].reg_idx; - rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); - rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; - if (hw->mac.type == ixgbe_mac_X540_vf) { - rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; - rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) | - IXGBE_RXDCTL_RLPML_EN); - } - IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); - ixgbevf_rx_desc_queue_enable(adapter, i); - } - - ixgbevf_configure_msix(adapter); - - if (hw->mac.ops.set_rar) { - if (is_valid_ether_addr(hw->mac.addr)) - hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); - else - hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); - } - - clear_bit(__IXGBEVF_DOWN, &adapter->state); - ixgbevf_napi_enable_all(adapter); - - /* enable transmits */ - netif_tx_start_all_queues(netdev); - - ixgbevf_save_reset_stats(adapter); - ixgbevf_init_last_counter_stats(adapter); - - /* bring the link up in the watchdog, this could race with our first - * link up interrupt but shouldn't be a problem */ - adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; - adapter->link_check_timeout = jiffies; - mod_timer(&adapter->watchdog_timer, jiffies); - return 0; -} - -int ixgbevf_up(struct ixgbevf_adapter *adapter) -{ - int err; - struct ixgbe_hw *hw = &adapter->hw; - - ixgbevf_configure(adapter); - - err = ixgbevf_up_complete(adapter); - - /* clear any pending interrupts, may auto mask */ - IXGBE_READ_REG(hw, IXGBE_VTEICR); - - ixgbevf_irq_enable(adapter, true, true); - - return err; -} - -/** - * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue - * @adapter: board private structure - * @rx_ring: ring to free buffers from - **/ -static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter, - struct ixgbevf_ring *rx_ring) -{ - struct pci_dev *pdev = adapter->pdev; - unsigned long size; - unsigned int i; - - if (!rx_ring->rx_buffer_info) - return; - - /* Free all the Rx ring sk_buffs */ - for (i = 0; i < rx_ring->count; i++) { - struct ixgbevf_rx_buffer *rx_buffer_info; - - rx_buffer_info = &rx_ring->rx_buffer_info[i]; - if (rx_buffer_info->dma) { - dma_unmap_single(&pdev->dev, rx_buffer_info->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - rx_buffer_info->dma = 0; - } - if (rx_buffer_info->skb) { - struct sk_buff *skb = rx_buffer_info->skb; - rx_buffer_info->skb = NULL; - do { - struct sk_buff *this = skb; - skb = skb->prev; - dev_kfree_skb(this); - } while (skb); - } - if (!rx_buffer_info->page) - continue; - dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, - PAGE_SIZE / 2, DMA_FROM_DEVICE); - rx_buffer_info->page_dma = 0; - put_page(rx_buffer_info->page); - rx_buffer_info->page = NULL; - rx_buffer_info->page_offset = 0; - } - - size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; - memset(rx_ring->rx_buffer_info, 0, size); - - /* Zero out the descriptor ring */ - memset(rx_ring->desc, 0, rx_ring->size); - - rx_ring->next_to_clean = 0; - rx_ring->next_to_use = 0; - - if (rx_ring->head) - writel(0, adapter->hw.hw_addr + rx_ring->head); - if (rx_ring->tail) - writel(0, adapter->hw.hw_addr + rx_ring->tail); -} - -/** - * ixgbevf_clean_tx_ring - Free Tx Buffers - * @adapter: board private structure - * @tx_ring: ring to be cleaned - **/ -static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter, - struct ixgbevf_ring *tx_ring) -{ - struct ixgbevf_tx_buffer *tx_buffer_info; - unsigned long size; - unsigned int i; - - if (!tx_ring->tx_buffer_info) - return; - - /* Free all the Tx ring sk_buffs */ - - for (i = 0; i < tx_ring->count; i++) { - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info); - } - - size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; - memset(tx_ring->tx_buffer_info, 0, size); - - memset(tx_ring->desc, 0, tx_ring->size); - - tx_ring->next_to_use = 0; - tx_ring->next_to_clean = 0; - - if (tx_ring->head) - writel(0, adapter->hw.hw_addr + tx_ring->head); - if (tx_ring->tail) - writel(0, adapter->hw.hw_addr + tx_ring->tail); -} - -/** - * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues - * @adapter: board private structure - **/ -static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter) -{ - int i; - - for (i = 0; i < adapter->num_rx_queues; i++) - ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]); -} - -/** - * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues - * @adapter: board private structure - **/ -static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter) -{ - int i; - - for (i = 0; i < adapter->num_tx_queues; i++) - ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]); -} - -void ixgbevf_down(struct ixgbevf_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct ixgbe_hw *hw = &adapter->hw; - u32 txdctl; - int i, j; - - /* signal that we are down to the interrupt handler */ - set_bit(__IXGBEVF_DOWN, &adapter->state); - /* disable receives */ - - netif_tx_disable(netdev); - - msleep(10); - - netif_tx_stop_all_queues(netdev); - - ixgbevf_irq_disable(adapter); - - ixgbevf_napi_disable_all(adapter); - - del_timer_sync(&adapter->watchdog_timer); - /* can't call flush scheduled work here because it can deadlock - * if linkwatch_event tries to acquire the rtnl_lock which we are - * holding */ - while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK) - msleep(1); - - /* disable transmits in the hardware now that interrupts are off */ - for (i = 0; i < adapter->num_tx_queues; i++) { - j = adapter->tx_ring[i].reg_idx; - txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); - IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), - (txdctl & ~IXGBE_TXDCTL_ENABLE)); - } - - netif_carrier_off(netdev); - - if (!pci_channel_offline(adapter->pdev)) - ixgbevf_reset(adapter); - - ixgbevf_clean_all_tx_rings(adapter); - ixgbevf_clean_all_rx_rings(adapter); -} - -void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - - WARN_ON(in_interrupt()); - - while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) - msleep(1); - - /* - * Check if PF is up before re-init. If not then skip until - * later when the PF is up and ready to service requests from - * the VF via mailbox. If the VF is up and running then the - * watchdog task will continue to schedule reset tasks until - * the PF is up and running. - */ - if (!hw->mac.ops.reset_hw(hw)) { - ixgbevf_down(adapter); - ixgbevf_up(adapter); - } - - clear_bit(__IXGBEVF_RESETTING, &adapter->state); -} - -void ixgbevf_reset(struct ixgbevf_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - - if (hw->mac.ops.reset_hw(hw)) - hw_dbg(hw, "PF still resetting\n"); - else - hw->mac.ops.init_hw(hw); - - if (is_valid_ether_addr(adapter->hw.mac.addr)) { - memcpy(netdev->dev_addr, adapter->hw.mac.addr, - netdev->addr_len); - memcpy(netdev->perm_addr, adapter->hw.mac.addr, - netdev->addr_len); - } -} - -static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, - int vectors) -{ - int err, vector_threshold; - - /* We'll want at least 3 (vector_threshold): - * 1) TxQ[0] Cleanup - * 2) RxQ[0] Cleanup - * 3) Other (Link Status Change, etc.) - */ - vector_threshold = MIN_MSIX_COUNT; - - /* The more we get, the more we will assign to Tx/Rx Cleanup - * for the separate queues...where Rx Cleanup >= Tx Cleanup. - * Right now, we simply care about how many we'll get; we'll - * set them up later while requesting irq's. - */ - while (vectors >= vector_threshold) { - err = pci_enable_msix(adapter->pdev, adapter->msix_entries, - vectors); - if (!err) /* Success in acquiring all requested vectors. */ - break; - else if (err < 0) - vectors = 0; /* Nasty failure, quit now */ - else /* err == number of vectors we should try again with */ - vectors = err; - } - - if (vectors < vector_threshold) { - /* Can't allocate enough MSI-X interrupts? Oh well. - * This just means we'll go with either a single MSI - * vector or fall back to legacy interrupts. - */ - hw_dbg(&adapter->hw, - "Unable to allocate MSI-X interrupts\n"); - kfree(adapter->msix_entries); - adapter->msix_entries = NULL; - } else { - /* - * Adjust for only the vectors we'll use, which is minimum - * of max_msix_q_vectors + NON_Q_VECTORS, or the number of - * vectors we were allocated. - */ - adapter->num_msix_vectors = vectors; - } -} - -/* - * ixgbevf_set_num_queues: Allocate queues for device, feature dependent - * @adapter: board private structure to initialize - * - * This is the top level queue allocation routine. The order here is very - * important, starting with the "most" number of features turned on at once, - * and ending with the smallest set of features. This way large combinations - * can be allocated if they're turned on, and smaller combinations are the - * fallthrough conditions. - * - **/ -static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) -{ - /* Start with base case */ - adapter->num_rx_queues = 1; - adapter->num_tx_queues = 1; - adapter->num_rx_pools = adapter->num_rx_queues; - adapter->num_rx_queues_per_pool = 1; -} - -/** - * ixgbevf_alloc_queues - Allocate memory for all rings - * @adapter: board private structure to initialize - * - * We allocate one ring per queue at run-time since we don't know the - * number of queues at compile-time. The polling_netdev array is - * intended for Multiqueue, but should work fine with a single queue. - **/ -static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter) -{ - int i; - - adapter->tx_ring = kcalloc(adapter->num_tx_queues, - sizeof(struct ixgbevf_ring), GFP_KERNEL); - if (!adapter->tx_ring) - goto err_tx_ring_allocation; - - adapter->rx_ring = kcalloc(adapter->num_rx_queues, - sizeof(struct ixgbevf_ring), GFP_KERNEL); - if (!adapter->rx_ring) - goto err_rx_ring_allocation; - - for (i = 0; i < adapter->num_tx_queues; i++) { - adapter->tx_ring[i].count = adapter->tx_ring_count; - adapter->tx_ring[i].queue_index = i; - adapter->tx_ring[i].reg_idx = i; - } - - for (i = 0; i < adapter->num_rx_queues; i++) { - adapter->rx_ring[i].count = adapter->rx_ring_count; - adapter->rx_ring[i].queue_index = i; - adapter->rx_ring[i].reg_idx = i; - } - - return 0; - -err_rx_ring_allocation: - kfree(adapter->tx_ring); -err_tx_ring_allocation: - return -ENOMEM; -} - -/** - * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported - * @adapter: board private structure to initialize - * - * Attempt to configure the interrupts using the best available - * capabilities of the hardware and the kernel. - **/ -static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) -{ - int err = 0; - int vector, v_budget; - - /* - * It's easy to be greedy for MSI-X vectors, but it really - * doesn't do us much good if we have a lot more vectors - * than CPU's. So let's be conservative and only ask for - * (roughly) twice the number of vectors as there are CPU's. - */ - v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, - (int)(num_online_cpus() * 2)) + NON_Q_VECTORS; - - /* A failure in MSI-X entry allocation isn't fatal, but it does - * mean we disable MSI-X capabilities of the adapter. */ - adapter->msix_entries = kcalloc(v_budget, - sizeof(struct msix_entry), GFP_KERNEL); - if (!adapter->msix_entries) { - err = -ENOMEM; - goto out; - } - - for (vector = 0; vector < v_budget; vector++) - adapter->msix_entries[vector].entry = vector; - - ixgbevf_acquire_msix_vectors(adapter, v_budget); - -out: - return err; -} - -/** - * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors - * @adapter: board private structure to initialize - * - * We allocate one q_vector per queue interrupt. If allocation fails we - * return -ENOMEM. - **/ -static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) -{ - int q_idx, num_q_vectors; - struct ixgbevf_q_vector *q_vector; - int napi_vectors; - int (*poll)(struct napi_struct *, int); - - num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - napi_vectors = adapter->num_rx_queues; - poll = &ixgbevf_clean_rxonly; - - for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { - q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL); - if (!q_vector) - goto err_out; - q_vector->adapter = adapter; - q_vector->v_idx = q_idx; - q_vector->eitr = adapter->eitr_param; - if (q_idx < napi_vectors) - netif_napi_add(adapter->netdev, &q_vector->napi, - (*poll), 64); - adapter->q_vector[q_idx] = q_vector; - } - - return 0; - -err_out: - while (q_idx) { - q_idx--; - q_vector = adapter->q_vector[q_idx]; - netif_napi_del(&q_vector->napi); - kfree(q_vector); - adapter->q_vector[q_idx] = NULL; - } - return -ENOMEM; -} - -/** - * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors - * @adapter: board private structure to initialize - * - * This function frees the memory allocated to the q_vectors. In addition if - * NAPI is enabled it will delete any references to the NAPI struct prior - * to freeing the q_vector. - **/ -static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) -{ - int q_idx, num_q_vectors; - int napi_vectors; - - num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - napi_vectors = adapter->num_rx_queues; - - for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { - struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx]; - - adapter->q_vector[q_idx] = NULL; - if (q_idx < napi_vectors) - netif_napi_del(&q_vector->napi); - kfree(q_vector); - } -} - -/** - * ixgbevf_reset_interrupt_capability - Reset MSIX setup - * @adapter: board private structure - * - **/ -static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter) -{ - pci_disable_msix(adapter->pdev); - kfree(adapter->msix_entries); - adapter->msix_entries = NULL; -} - -/** - * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init - * @adapter: board private structure to initialize - * - **/ -static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) -{ - int err; - - /* Number of supported queues */ - ixgbevf_set_num_queues(adapter); - - err = ixgbevf_set_interrupt_capability(adapter); - if (err) { - hw_dbg(&adapter->hw, - "Unable to setup interrupt capabilities\n"); - goto err_set_interrupt; - } - - err = ixgbevf_alloc_q_vectors(adapter); - if (err) { - hw_dbg(&adapter->hw, "Unable to allocate memory for queue " - "vectors\n"); - goto err_alloc_q_vectors; - } - - err = ixgbevf_alloc_queues(adapter); - if (err) { - printk(KERN_ERR "Unable to allocate memory for queues\n"); - goto err_alloc_queues; - } - - hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, " - "Tx Queue count = %u\n", - (adapter->num_rx_queues > 1) ? "Enabled" : - "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); - - set_bit(__IXGBEVF_DOWN, &adapter->state); - - return 0; -err_alloc_queues: - ixgbevf_free_q_vectors(adapter); -err_alloc_q_vectors: - ixgbevf_reset_interrupt_capability(adapter); -err_set_interrupt: - return err; -} - -/** - * ixgbevf_sw_init - Initialize general software structures - * (struct ixgbevf_adapter) - * @adapter: board private structure to initialize - * - * ixgbevf_sw_init initializes the Adapter private data structure. - * Fields are initialized based on PCI device information and - * OS network device settings (MTU size). - **/ -static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct pci_dev *pdev = adapter->pdev; - int err; - - /* PCI config space info */ - - hw->vendor_id = pdev->vendor; - hw->device_id = pdev->device; - hw->revision_id = pdev->revision; - hw->subsystem_vendor_id = pdev->subsystem_vendor; - hw->subsystem_device_id = pdev->subsystem_device; - - hw->mbx.ops.init_params(hw); - hw->mac.max_tx_queues = MAX_TX_QUEUES; - hw->mac.max_rx_queues = MAX_RX_QUEUES; - err = hw->mac.ops.reset_hw(hw); - if (err) { - dev_info(&pdev->dev, - "PF still in reset state, assigning new address\n"); - dev_hw_addr_random(adapter->netdev, hw->mac.addr); - } else { - err = hw->mac.ops.init_hw(hw); - if (err) { - printk(KERN_ERR "init_shared_code failed: %d\n", err); - goto out; - } - } - - /* Enable dynamic interrupt throttling rates */ - adapter->eitr_param = 20000; - adapter->itr_setting = 1; - - /* set defaults for eitr in MegaBytes */ - adapter->eitr_low = 10; - adapter->eitr_high = 20; - - /* set default ring sizes */ - adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; - adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; - - /* enable rx csum by default */ - adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; - - set_bit(__IXGBEVF_DOWN, &adapter->state); - -out: - return err; -} - -#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ - { \ - u32 current_counter = IXGBE_READ_REG(hw, reg); \ - if (current_counter < last_counter) \ - counter += 0x100000000LL; \ - last_counter = current_counter; \ - counter &= 0xFFFFFFFF00000000LL; \ - counter |= current_counter; \ - } - -#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ - { \ - u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ - u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ - u64 current_counter = (current_counter_msb << 32) | \ - current_counter_lsb; \ - if (current_counter < last_counter) \ - counter += 0x1000000000LL; \ - last_counter = current_counter; \ - counter &= 0xFFFFFFF000000000LL; \ - counter |= current_counter; \ - } -/** - * ixgbevf_update_stats - Update the board statistics counters. - * @adapter: board private structure - **/ -void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - - UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, - adapter->stats.vfgprc); - UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc, - adapter->stats.vfgptc); - UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, - adapter->stats.last_vfgorc, - adapter->stats.vfgorc); - UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, - adapter->stats.last_vfgotc, - adapter->stats.vfgotc); - UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc, - adapter->stats.vfmprc); - - /* Fill out the OS statistics structure */ - adapter->netdev->stats.multicast = adapter->stats.vfmprc - - adapter->stats.base_vfmprc; -} - -/** - * ixgbevf_watchdog - Timer Call-back - * @data: pointer to adapter cast into an unsigned long - **/ -static void ixgbevf_watchdog(unsigned long data) -{ - struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data; - struct ixgbe_hw *hw = &adapter->hw; - u64 eics = 0; - int i; - - /* - * Do the watchdog outside of interrupt context due to the lovely - * delays that some of the newer hardware requires - */ - - if (test_bit(__IXGBEVF_DOWN, &adapter->state)) - goto watchdog_short_circuit; - - /* get one bit for every active tx/rx interrupt vector */ - for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { - struct ixgbevf_q_vector *qv = adapter->q_vector[i]; - if (qv->rxr_count || qv->txr_count) - eics |= (1 << i); - } - - IXGBE_WRITE_REG(hw, IXGBE_VTEICS, (u32)eics); - -watchdog_short_circuit: - schedule_work(&adapter->watchdog_task); -} - -/** - * ixgbevf_tx_timeout - Respond to a Tx Hang - * @netdev: network interface device structure - **/ -static void ixgbevf_tx_timeout(struct net_device *netdev) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - - /* Do the reset outside of interrupt context */ - schedule_work(&adapter->reset_task); -} - -static void ixgbevf_reset_task(struct work_struct *work) -{ - struct ixgbevf_adapter *adapter; - adapter = container_of(work, struct ixgbevf_adapter, reset_task); - - /* If we're already down or resetting, just bail */ - if (test_bit(__IXGBEVF_DOWN, &adapter->state) || - test_bit(__IXGBEVF_RESETTING, &adapter->state)) - return; - - adapter->tx_timeout_count++; - - ixgbevf_reinit_locked(adapter); -} - -/** - * ixgbevf_watchdog_task - worker thread to bring link up - * @work: pointer to work_struct containing our data - **/ -static void ixgbevf_watchdog_task(struct work_struct *work) -{ - struct ixgbevf_adapter *adapter = container_of(work, - struct ixgbevf_adapter, - watchdog_task); - struct net_device *netdev = adapter->netdev; - struct ixgbe_hw *hw = &adapter->hw; - u32 link_speed = adapter->link_speed; - bool link_up = adapter->link_up; - - adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; - - /* - * Always check the link on the watchdog because we have - * no LSC interrupt - */ - if (hw->mac.ops.check_link) { - if ((hw->mac.ops.check_link(hw, &link_speed, - &link_up, false)) != 0) { - adapter->link_up = link_up; - adapter->link_speed = link_speed; - netif_carrier_off(netdev); - netif_tx_stop_all_queues(netdev); - schedule_work(&adapter->reset_task); - goto pf_has_reset; - } - } else { - /* always assume link is up, if no check link - * function */ - link_speed = IXGBE_LINK_SPEED_10GB_FULL; - link_up = true; - } - adapter->link_up = link_up; - adapter->link_speed = link_speed; - - if (link_up) { - if (!netif_carrier_ok(netdev)) { - hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n", - (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? - 10 : 1); - netif_carrier_on(netdev); - netif_tx_wake_all_queues(netdev); - } - } else { - adapter->link_up = false; - adapter->link_speed = 0; - if (netif_carrier_ok(netdev)) { - hw_dbg(&adapter->hw, "NIC Link is Down\n"); - netif_carrier_off(netdev); - netif_tx_stop_all_queues(netdev); - } - } - - ixgbevf_update_stats(adapter); - -pf_has_reset: - /* Reset the timer */ - if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) - mod_timer(&adapter->watchdog_timer, - round_jiffies(jiffies + (2 * HZ))); - - adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK; -} - -/** - * ixgbevf_free_tx_resources - Free Tx Resources per Queue - * @adapter: board private structure - * @tx_ring: Tx descriptor ring for a specific queue - * - * Free all transmit software resources - **/ -void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter, - struct ixgbevf_ring *tx_ring) -{ - struct pci_dev *pdev = adapter->pdev; - - ixgbevf_clean_tx_ring(adapter, tx_ring); - - vfree(tx_ring->tx_buffer_info); - tx_ring->tx_buffer_info = NULL; - - dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, - tx_ring->dma); - - tx_ring->desc = NULL; -} - -/** - * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues - * @adapter: board private structure - * - * Free all transmit software resources - **/ -static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) -{ - int i; - - for (i = 0; i < adapter->num_tx_queues; i++) - if (adapter->tx_ring[i].desc) - ixgbevf_free_tx_resources(adapter, - &adapter->tx_ring[i]); - -} - -/** - * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors) - * @adapter: board private structure - * @tx_ring: tx descriptor ring (for a specific queue) to setup - * - * Return 0 on success, negative on failure - **/ -int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter, - struct ixgbevf_ring *tx_ring) -{ - struct pci_dev *pdev = adapter->pdev; - int size; - - size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; - tx_ring->tx_buffer_info = vzalloc(size); - if (!tx_ring->tx_buffer_info) - goto err; - - /* round up to nearest 4K */ - tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); - tx_ring->size = ALIGN(tx_ring->size, 4096); - - tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, - &tx_ring->dma, GFP_KERNEL); - if (!tx_ring->desc) - goto err; - - tx_ring->next_to_use = 0; - tx_ring->next_to_clean = 0; - tx_ring->work_limit = tx_ring->count; - return 0; - -err: - vfree(tx_ring->tx_buffer_info); - tx_ring->tx_buffer_info = NULL; - hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit " - "descriptor ring\n"); - return -ENOMEM; -} - -/** - * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources - * @adapter: board private structure - * - * If this function returns with an error, then it's possible one or - * more of the rings is populated (while the rest are not). It is the - * callers duty to clean those orphaned rings. - * - * Return 0 on success, negative on failure - **/ -static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) -{ - int i, err = 0; - - for (i = 0; i < adapter->num_tx_queues; i++) { - err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]); - if (!err) - continue; - hw_dbg(&adapter->hw, - "Allocation for Tx Queue %u failed\n", i); - break; - } - - return err; -} - -/** - * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors) - * @adapter: board private structure - * @rx_ring: rx descriptor ring (for a specific queue) to setup - * - * Returns 0 on success, negative on failure - **/ -int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, - struct ixgbevf_ring *rx_ring) -{ - struct pci_dev *pdev = adapter->pdev; - int size; - - size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; - rx_ring->rx_buffer_info = vzalloc(size); - if (!rx_ring->rx_buffer_info) { - hw_dbg(&adapter->hw, - "Unable to vmalloc buffer memory for " - "the receive descriptor ring\n"); - goto alloc_failed; - } - - /* Round up to nearest 4K */ - rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); - rx_ring->size = ALIGN(rx_ring->size, 4096); - - rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, - &rx_ring->dma, GFP_KERNEL); - - if (!rx_ring->desc) { - hw_dbg(&adapter->hw, - "Unable to allocate memory for " - "the receive descriptor ring\n"); - vfree(rx_ring->rx_buffer_info); - rx_ring->rx_buffer_info = NULL; - goto alloc_failed; - } - - rx_ring->next_to_clean = 0; - rx_ring->next_to_use = 0; - - return 0; -alloc_failed: - return -ENOMEM; -} - -/** - * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources - * @adapter: board private structure - * - * If this function returns with an error, then it's possible one or - * more of the rings is populated (while the rest are not). It is the - * callers duty to clean those orphaned rings. - * - * Return 0 on success, negative on failure - **/ -static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) -{ - int i, err = 0; - - for (i = 0; i < adapter->num_rx_queues; i++) { - err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]); - if (!err) - continue; - hw_dbg(&adapter->hw, - "Allocation for Rx Queue %u failed\n", i); - break; - } - return err; -} - -/** - * ixgbevf_free_rx_resources - Free Rx Resources - * @adapter: board private structure - * @rx_ring: ring to clean the resources from - * - * Free all receive software resources - **/ -void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter, - struct ixgbevf_ring *rx_ring) -{ - struct pci_dev *pdev = adapter->pdev; - - ixgbevf_clean_rx_ring(adapter, rx_ring); - - vfree(rx_ring->rx_buffer_info); - rx_ring->rx_buffer_info = NULL; - - dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, - rx_ring->dma); - - rx_ring->desc = NULL; -} - -/** - * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues - * @adapter: board private structure - * - * Free all receive software resources - **/ -static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter) -{ - int i; - - for (i = 0; i < adapter->num_rx_queues; i++) - if (adapter->rx_ring[i].desc) - ixgbevf_free_rx_resources(adapter, - &adapter->rx_ring[i]); -} - -/** - * ixgbevf_open - Called when a network interface is made active - * @netdev: network interface device structure - * - * Returns 0 on success, negative value on failure - * - * The open entry point is called when a network interface is made - * active by the system (IFF_UP). At this point all resources needed - * for transmit and receive operations are allocated, the interrupt - * handler is registered with the OS, the watchdog timer is started, - * and the stack is notified that the interface is ready. - **/ -static int ixgbevf_open(struct net_device *netdev) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - int err; - - /* disallow open during test */ - if (test_bit(__IXGBEVF_TESTING, &adapter->state)) - return -EBUSY; - - if (hw->adapter_stopped) { - ixgbevf_reset(adapter); - /* if adapter is still stopped then PF isn't up and - * the vf can't start. */ - if (hw->adapter_stopped) { - err = IXGBE_ERR_MBX; - printk(KERN_ERR "Unable to start - perhaps the PF" - " Driver isn't up yet\n"); - goto err_setup_reset; - } - } - - /* allocate transmit descriptors */ - err = ixgbevf_setup_all_tx_resources(adapter); - if (err) - goto err_setup_tx; - - /* allocate receive descriptors */ - err = ixgbevf_setup_all_rx_resources(adapter); - if (err) - goto err_setup_rx; - - ixgbevf_configure(adapter); - - /* - * Map the Tx/Rx rings to the vectors we were allotted. - * if request_irq will be called in this function map_rings - * must be called *before* up_complete - */ - ixgbevf_map_rings_to_vectors(adapter); - - err = ixgbevf_up_complete(adapter); - if (err) - goto err_up; - - /* clear any pending interrupts, may auto mask */ - IXGBE_READ_REG(hw, IXGBE_VTEICR); - err = ixgbevf_request_irq(adapter); - if (err) - goto err_req_irq; - - ixgbevf_irq_enable(adapter, true, true); - - return 0; - -err_req_irq: - ixgbevf_down(adapter); -err_up: - ixgbevf_free_irq(adapter); -err_setup_rx: - ixgbevf_free_all_rx_resources(adapter); -err_setup_tx: - ixgbevf_free_all_tx_resources(adapter); - ixgbevf_reset(adapter); - -err_setup_reset: - - return err; -} - -/** - * ixgbevf_close - Disables a network interface - * @netdev: network interface device structure - * - * Returns 0, this is not allowed to fail - * - * The close entry point is called when an interface is de-activated - * by the OS. The hardware is still under the drivers control, but - * needs to be disabled. A global MAC reset is issued to stop the - * hardware, and all transmit and receive resources are freed. - **/ -static int ixgbevf_close(struct net_device *netdev) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - - ixgbevf_down(adapter); - ixgbevf_free_irq(adapter); - - ixgbevf_free_all_tx_resources(adapter); - ixgbevf_free_all_rx_resources(adapter); - - return 0; -} - -static int ixgbevf_tso(struct ixgbevf_adapter *adapter, - struct ixgbevf_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) -{ - struct ixgbe_adv_tx_context_desc *context_desc; - unsigned int i; - int err; - struct ixgbevf_tx_buffer *tx_buffer_info; - u32 vlan_macip_lens = 0, type_tucmd_mlhl; - u32 mss_l4len_idx, l4len; - - if (skb_is_gso(skb)) { - if (skb_header_cloned(skb)) { - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); - if (err) - return err; - } - l4len = tcp_hdrlen(skb); - *hdr_len += l4len; - - if (skb->protocol == htons(ETH_P_IP)) { - struct iphdr *iph = ip_hdr(skb); - iph->tot_len = 0; - iph->check = 0; - tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, - 0); - adapter->hw_tso_ctxt++; - } else if (skb_is_gso_v6(skb)) { - ipv6_hdr(skb)->payload_len = 0; - tcp_hdr(skb)->check = - ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); - adapter->hw_tso6_ctxt++; - } - - i = tx_ring->next_to_use; - - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); - - /* VLAN MACLEN IPLEN */ - if (tx_flags & IXGBE_TX_FLAGS_VLAN) - vlan_macip_lens |= - (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); - vlan_macip_lens |= ((skb_network_offset(skb)) << - IXGBE_ADVTXD_MACLEN_SHIFT); - *hdr_len += skb_network_offset(skb); - vlan_macip_lens |= - (skb_transport_header(skb) - skb_network_header(skb)); - *hdr_len += - (skb_transport_header(skb) - skb_network_header(skb)); - context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); - context_desc->seqnum_seed = 0; - - /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ - type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT | - IXGBE_ADVTXD_DTYP_CTXT); - - if (skb->protocol == htons(ETH_P_IP)) - type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; - type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; - context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); - - /* MSS L4LEN IDX */ - mss_l4len_idx = - (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT); - mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT); - /* use index 1 for TSO */ - mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT); - context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); - - tx_buffer_info->time_stamp = jiffies; - tx_buffer_info->next_to_watch = i; - - i++; - if (i == tx_ring->count) - i = 0; - tx_ring->next_to_use = i; - - return true; - } - - return false; -} - -static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter, - struct ixgbevf_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags) -{ - struct ixgbe_adv_tx_context_desc *context_desc; - unsigned int i; - struct ixgbevf_tx_buffer *tx_buffer_info; - u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; - - if (skb->ip_summed == CHECKSUM_PARTIAL || - (tx_flags & IXGBE_TX_FLAGS_VLAN)) { - i = tx_ring->next_to_use; - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); - - if (tx_flags & IXGBE_TX_FLAGS_VLAN) - vlan_macip_lens |= (tx_flags & - IXGBE_TX_FLAGS_VLAN_MASK); - vlan_macip_lens |= (skb_network_offset(skb) << - IXGBE_ADVTXD_MACLEN_SHIFT); - if (skb->ip_summed == CHECKSUM_PARTIAL) - vlan_macip_lens |= (skb_transport_header(skb) - - skb_network_header(skb)); - - context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); - context_desc->seqnum_seed = 0; - - type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | - IXGBE_ADVTXD_DTYP_CTXT); - - if (skb->ip_summed == CHECKSUM_PARTIAL) { - switch (skb->protocol) { - case __constant_htons(ETH_P_IP): - type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; - if (ip_hdr(skb)->protocol == IPPROTO_TCP) - type_tucmd_mlhl |= - IXGBE_ADVTXD_TUCMD_L4T_TCP; - break; - case __constant_htons(ETH_P_IPV6): - /* XXX what about other V6 headers?? */ - if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) - type_tucmd_mlhl |= - IXGBE_ADVTXD_TUCMD_L4T_TCP; - break; - default: - if (unlikely(net_ratelimit())) { - printk(KERN_WARNING - "partial checksum but " - "proto=%x!\n", - skb->protocol); - } - break; - } - } - - context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); - /* use index zero for tx checksum offload */ - context_desc->mss_l4len_idx = 0; - - tx_buffer_info->time_stamp = jiffies; - tx_buffer_info->next_to_watch = i; - - adapter->hw_csum_tx_good++; - i++; - if (i == tx_ring->count) - i = 0; - tx_ring->next_to_use = i; - - return true; - } - - return false; -} - -static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter, - struct ixgbevf_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags, - unsigned int first) -{ - struct pci_dev *pdev = adapter->pdev; - struct ixgbevf_tx_buffer *tx_buffer_info; - unsigned int len; - unsigned int total = skb->len; - unsigned int offset = 0, size; - int count = 0; - unsigned int nr_frags = skb_shinfo(skb)->nr_frags; - unsigned int f; - int i; - - i = tx_ring->next_to_use; - - len = min(skb_headlen(skb), total); - while (len) { - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); - - tx_buffer_info->length = size; - tx_buffer_info->mapped_as_page = false; - tx_buffer_info->dma = dma_map_single(&adapter->pdev->dev, - skb->data + offset, - size, DMA_TO_DEVICE); - if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) - goto dma_error; - tx_buffer_info->time_stamp = jiffies; - tx_buffer_info->next_to_watch = i; - - len -= size; - total -= size; - offset += size; - count++; - i++; - if (i == tx_ring->count) - i = 0; - } - - for (f = 0; f < nr_frags; f++) { - struct skb_frag_struct *frag; - - frag = &skb_shinfo(skb)->frags[f]; - len = min((unsigned int)frag->size, total); - offset = frag->page_offset; - - while (len) { - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); - - tx_buffer_info->length = size; - tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev, - frag->page, - offset, - size, - DMA_TO_DEVICE); - tx_buffer_info->mapped_as_page = true; - if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) - goto dma_error; - tx_buffer_info->time_stamp = jiffies; - tx_buffer_info->next_to_watch = i; - - len -= size; - total -= size; - offset += size; - count++; - i++; - if (i == tx_ring->count) - i = 0; - } - if (total == 0) - break; - } - - if (i == 0) - i = tx_ring->count - 1; - else - i = i - 1; - tx_ring->tx_buffer_info[i].skb = skb; - tx_ring->tx_buffer_info[first].next_to_watch = i; - - return count; - -dma_error: - dev_err(&pdev->dev, "TX DMA map failed\n"); - - /* clear timestamp and dma mappings for failed tx_buffer_info map */ - tx_buffer_info->dma = 0; - tx_buffer_info->time_stamp = 0; - tx_buffer_info->next_to_watch = 0; - count--; - - /* clear timestamp and dma mappings for remaining portion of packet */ - while (count >= 0) { - count--; - i--; - if (i < 0) - i += tx_ring->count; - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info); - } - - return count; -} - -static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter, - struct ixgbevf_ring *tx_ring, int tx_flags, - int count, u32 paylen, u8 hdr_len) -{ - union ixgbe_adv_tx_desc *tx_desc = NULL; - struct ixgbevf_tx_buffer *tx_buffer_info; - u32 olinfo_status = 0, cmd_type_len = 0; - unsigned int i; - - u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS; - - cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA; - - cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; - - if (tx_flags & IXGBE_TX_FLAGS_VLAN) - cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; - - if (tx_flags & IXGBE_TX_FLAGS_TSO) { - cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; - - olinfo_status |= IXGBE_TXD_POPTS_TXSM << - IXGBE_ADVTXD_POPTS_SHIFT; - - /* use index 1 context for tso */ - olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); - if (tx_flags & IXGBE_TX_FLAGS_IPV4) - olinfo_status |= IXGBE_TXD_POPTS_IXSM << - IXGBE_ADVTXD_POPTS_SHIFT; - - } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) - olinfo_status |= IXGBE_TXD_POPTS_TXSM << - IXGBE_ADVTXD_POPTS_SHIFT; - - olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); - - i = tx_ring->next_to_use; - while (count--) { - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); - tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); - tx_desc->read.cmd_type_len = - cpu_to_le32(cmd_type_len | tx_buffer_info->length); - tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); - i++; - if (i == tx_ring->count) - i = 0; - } - - tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); - - /* - * Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - - tx_ring->next_to_use = i; - writel(i, adapter->hw.hw_addr + tx_ring->tail); -} - -static int __ixgbevf_maybe_stop_tx(struct net_device *netdev, - struct ixgbevf_ring *tx_ring, int size) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - - netif_stop_subqueue(netdev, tx_ring->queue_index); - /* Herbert's original patch had: - * smp_mb__after_netif_stop_queue(); - * but since that doesn't exist yet, just open code it. */ - smp_mb(); - - /* We need to check again in a case another CPU has just - * made room available. */ - if (likely(IXGBE_DESC_UNUSED(tx_ring) < size)) - return -EBUSY; - - /* A reprieve! - use start_queue because it doesn't call schedule */ - netif_start_subqueue(netdev, tx_ring->queue_index); - ++adapter->restart_queue; - return 0; -} - -static int ixgbevf_maybe_stop_tx(struct net_device *netdev, - struct ixgbevf_ring *tx_ring, int size) -{ - if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) - return 0; - return __ixgbevf_maybe_stop_tx(netdev, tx_ring, size); -} - -static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - struct ixgbevf_ring *tx_ring; - unsigned int first; - unsigned int tx_flags = 0; - u8 hdr_len = 0; - int r_idx = 0, tso; - int count = 0; - - unsigned int f; - - tx_ring = &adapter->tx_ring[r_idx]; - - if (vlan_tx_tag_present(skb)) { - tx_flags |= vlan_tx_tag_get(skb); - tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; - tx_flags |= IXGBE_TX_FLAGS_VLAN; - } - - /* four things can cause us to need a context descriptor */ - if (skb_is_gso(skb) || - (skb->ip_summed == CHECKSUM_PARTIAL) || - (tx_flags & IXGBE_TX_FLAGS_VLAN)) - count++; - - count += TXD_USE_COUNT(skb_headlen(skb)); - for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); - - if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) { - adapter->tx_busy++; - return NETDEV_TX_BUSY; - } - - first = tx_ring->next_to_use; - - if (skb->protocol == htons(ETH_P_IP)) - tx_flags |= IXGBE_TX_FLAGS_IPV4; - tso = ixgbevf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len); - if (tso < 0) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - - if (tso) - tx_flags |= IXGBE_TX_FLAGS_TSO; - else if (ixgbevf_tx_csum(adapter, tx_ring, skb, tx_flags) && - (skb->ip_summed == CHECKSUM_PARTIAL)) - tx_flags |= IXGBE_TX_FLAGS_CSUM; - - ixgbevf_tx_queue(adapter, tx_ring, tx_flags, - ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first), - skb->len, hdr_len); - - ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); - - return NETDEV_TX_OK; -} - -/** - * ixgbevf_set_mac - Change the Ethernet Address of the NIC - * @netdev: network interface device structure - * @p: pointer to an address structure - * - * Returns 0 on success, negative on failure - **/ -static int ixgbevf_set_mac(struct net_device *netdev, void *p) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - struct sockaddr *addr = p; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); - - if (hw->mac.ops.set_rar) - hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); - - return 0; -} - -/** - * ixgbevf_change_mtu - Change the Maximum Transfer Unit - * @netdev: network interface device structure - * @new_mtu: new value for maximum frame size - * - * Returns 0 on success, negative on failure - **/ -static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; - int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; - u32 msg[2]; - - if (adapter->hw.mac.type == ixgbe_mac_X540_vf) - max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; - - /* MTU < 68 is an error and causes problems on some kernels */ - if ((new_mtu < 68) || (max_frame > max_possible_frame)) - return -EINVAL; - - hw_dbg(&adapter->hw, "changing MTU from %d to %d\n", - netdev->mtu, new_mtu); - /* must set new MTU before calling down or up */ - netdev->mtu = new_mtu; - - msg[0] = IXGBE_VF_SET_LPE; - msg[1] = max_frame; - hw->mbx.ops.write_posted(hw, msg, 2); - - if (netif_running(netdev)) - ixgbevf_reinit_locked(adapter); - - return 0; -} - -static void ixgbevf_shutdown(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - - netif_device_detach(netdev); - - if (netif_running(netdev)) { - ixgbevf_down(adapter); - ixgbevf_free_irq(adapter); - ixgbevf_free_all_tx_resources(adapter); - ixgbevf_free_all_rx_resources(adapter); - } - -#ifdef CONFIG_PM - pci_save_state(pdev); -#endif - - pci_disable_device(pdev); -} - -static const struct net_device_ops ixgbe_netdev_ops = { - .ndo_open = ixgbevf_open, - .ndo_stop = ixgbevf_close, - .ndo_start_xmit = ixgbevf_xmit_frame, - .ndo_set_rx_mode = ixgbevf_set_rx_mode, - .ndo_set_multicast_list = ixgbevf_set_rx_mode, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = ixgbevf_set_mac, - .ndo_change_mtu = ixgbevf_change_mtu, - .ndo_tx_timeout = ixgbevf_tx_timeout, - .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, -}; - -static void ixgbevf_assign_netdev_ops(struct net_device *dev) -{ - dev->netdev_ops = &ixgbe_netdev_ops; - ixgbevf_set_ethtool_ops(dev); - dev->watchdog_timeo = 5 * HZ; -} - -/** - * ixgbevf_probe - Device Initialization Routine - * @pdev: PCI device information struct - * @ent: entry in ixgbevf_pci_tbl - * - * Returns 0 on success, negative on failure - * - * ixgbevf_probe initializes an adapter identified by a pci_dev structure. - * The OS initialization, configuring of the adapter private structure, - * and a hardware reset occur. - **/ -static int __devinit ixgbevf_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - struct net_device *netdev; - struct ixgbevf_adapter *adapter = NULL; - struct ixgbe_hw *hw = NULL; - const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; - static int cards_found; - int err, pci_using_dac; - - err = pci_enable_device(pdev); - if (err) - return err; - - if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && - !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { - pci_using_dac = 1; - } else { - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); - if (err) { - err = dma_set_coherent_mask(&pdev->dev, - DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, "No usable DMA " - "configuration, aborting\n"); - goto err_dma; - } - } - pci_using_dac = 0; - } - - err = pci_request_regions(pdev, ixgbevf_driver_name); - if (err) { - dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); - goto err_pci_reg; - } - - pci_set_master(pdev); - -#ifdef HAVE_TX_MQ - netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter), - MAX_TX_QUEUES); -#else - netdev = alloc_etherdev(sizeof(struct ixgbevf_adapter)); -#endif - if (!netdev) { - err = -ENOMEM; - goto err_alloc_etherdev; - } - - SET_NETDEV_DEV(netdev, &pdev->dev); - - pci_set_drvdata(pdev, netdev); - adapter = netdev_priv(netdev); - - adapter->netdev = netdev; - adapter->pdev = pdev; - hw = &adapter->hw; - hw->back = adapter; - adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; - - /* - * call save state here in standalone driver because it relies on - * adapter struct to exist, and needs to call netdev_priv - */ - pci_save_state(pdev); - - hw->hw_addr = ioremap(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); - if (!hw->hw_addr) { - err = -EIO; - goto err_ioremap; - } - - ixgbevf_assign_netdev_ops(netdev); - - adapter->bd_number = cards_found; - - /* Setup hw api */ - memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); - hw->mac.type = ii->mac; - - memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, - sizeof(struct ixgbe_mbx_operations)); - - adapter->flags &= ~IXGBE_FLAG_RX_PS_CAPABLE; - adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; - adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE; - - /* setup the private structure */ - err = ixgbevf_sw_init(adapter); - - netdev->features = NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_HW_VLAN_TX | - NETIF_F_HW_VLAN_RX | - NETIF_F_HW_VLAN_FILTER; - - netdev->features |= NETIF_F_IPV6_CSUM; - netdev->features |= NETIF_F_TSO; - netdev->features |= NETIF_F_TSO6; - netdev->features |= NETIF_F_GRO; - netdev->vlan_features |= NETIF_F_TSO; - netdev->vlan_features |= NETIF_F_TSO6; - netdev->vlan_features |= NETIF_F_IP_CSUM; - netdev->vlan_features |= NETIF_F_IPV6_CSUM; - netdev->vlan_features |= NETIF_F_SG; - - if (pci_using_dac) - netdev->features |= NETIF_F_HIGHDMA; - - /* The HW MAC address was set and/or determined in sw_init */ - memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); - memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); - - if (!is_valid_ether_addr(netdev->dev_addr)) { - printk(KERN_ERR "invalid MAC address\n"); - err = -EIO; - goto err_sw_init; - } - - init_timer(&adapter->watchdog_timer); - adapter->watchdog_timer.function = ixgbevf_watchdog; - adapter->watchdog_timer.data = (unsigned long)adapter; - - INIT_WORK(&adapter->reset_task, ixgbevf_reset_task); - INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task); - - err = ixgbevf_init_interrupt_scheme(adapter); - if (err) - goto err_sw_init; - - /* pick up the PCI bus settings for reporting later */ - if (hw->mac.ops.get_bus_info) - hw->mac.ops.get_bus_info(hw); - - strcpy(netdev->name, "eth%d"); - - err = register_netdev(netdev); - if (err) - goto err_register; - - adapter->netdev_registered = true; - - netif_carrier_off(netdev); - - ixgbevf_init_last_counter_stats(adapter); - - /* print the MAC address */ - hw_dbg(hw, "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", - netdev->dev_addr[0], - netdev->dev_addr[1], - netdev->dev_addr[2], - netdev->dev_addr[3], - netdev->dev_addr[4], - netdev->dev_addr[5]); - - hw_dbg(hw, "MAC: %d\n", hw->mac.type); - - hw_dbg(hw, "LRO is disabled\n"); - - hw_dbg(hw, "Intel(R) 82599 Virtual Function\n"); - cards_found++; - return 0; - -err_register: -err_sw_init: - ixgbevf_reset_interrupt_capability(adapter); - iounmap(hw->hw_addr); -err_ioremap: - free_netdev(netdev); -err_alloc_etherdev: - pci_release_regions(pdev); -err_pci_reg: -err_dma: - pci_disable_device(pdev); - return err; -} - -/** - * ixgbevf_remove - Device Removal Routine - * @pdev: PCI device information struct - * - * ixgbevf_remove is called by the PCI subsystem to alert the driver - * that it should release a PCI device. The could be caused by a - * Hot-Plug event, or because the driver is going to be removed from - * memory. - **/ -static void __devexit ixgbevf_remove(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - - set_bit(__IXGBEVF_DOWN, &adapter->state); - - del_timer_sync(&adapter->watchdog_timer); - - cancel_work_sync(&adapter->reset_task); - cancel_work_sync(&adapter->watchdog_task); - - if (adapter->netdev_registered) { - unregister_netdev(netdev); - adapter->netdev_registered = false; - } - - ixgbevf_reset_interrupt_capability(adapter); - - iounmap(adapter->hw.hw_addr); - pci_release_regions(pdev); - - hw_dbg(&adapter->hw, "Remove complete\n"); - - kfree(adapter->tx_ring); - kfree(adapter->rx_ring); - - free_netdev(netdev); - - pci_disable_device(pdev); -} - -static struct pci_driver ixgbevf_driver = { - .name = ixgbevf_driver_name, - .id_table = ixgbevf_pci_tbl, - .probe = ixgbevf_probe, - .remove = __devexit_p(ixgbevf_remove), - .shutdown = ixgbevf_shutdown, -}; - -/** - * ixgbevf_init_module - Driver Registration Routine - * - * ixgbevf_init_module is the first routine called when the driver is - * loaded. All it does is register with the PCI subsystem. - **/ -static int __init ixgbevf_init_module(void) -{ - int ret; - printk(KERN_INFO "ixgbevf: %s - version %s\n", ixgbevf_driver_string, - ixgbevf_driver_version); - - printk(KERN_INFO "%s\n", ixgbevf_copyright); - - ret = pci_register_driver(&ixgbevf_driver); - return ret; -} - -module_init(ixgbevf_init_module); - -/** - * ixgbevf_exit_module - Driver Exit Cleanup Routine - * - * ixgbevf_exit_module is called just before the driver is removed - * from memory. - **/ -static void __exit ixgbevf_exit_module(void) -{ - pci_unregister_driver(&ixgbevf_driver); -} - -#ifdef DEBUG -/** - * ixgbevf_get_hw_dev_name - return device name string - * used by hardware layer to print debugging information - **/ -char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) -{ - struct ixgbevf_adapter *adapter = hw->back; - return adapter->netdev->name; -} - -#endif -module_exit(ixgbevf_exit_module); - -/* ixgbevf_main.c */ diff --git a/drivers/net/ixgbevf/mbx.c b/drivers/net/ixgbevf/mbx.c deleted file mode 100644 index 7a88331..0000000 --- a/drivers/net/ixgbevf/mbx.c +++ /dev/null @@ -1,341 +0,0 @@ -/******************************************************************************* - - Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2010 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include "mbx.h" - -/** - * ixgbevf_poll_for_msg - Wait for message notification - * @hw: pointer to the HW structure - * - * returns 0 if it successfully received a message notification - **/ -static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - int countdown = mbx->timeout; - - while (countdown && mbx->ops.check_for_msg(hw)) { - countdown--; - udelay(mbx->udelay); - } - - /* if we failed, all future posted messages fail until reset */ - if (!countdown) - mbx->timeout = 0; - - return countdown ? 0 : IXGBE_ERR_MBX; -} - -/** - * ixgbevf_poll_for_ack - Wait for message acknowledgement - * @hw: pointer to the HW structure - * - * returns 0 if it successfully received a message acknowledgement - **/ -static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - int countdown = mbx->timeout; - - while (countdown && mbx->ops.check_for_ack(hw)) { - countdown--; - udelay(mbx->udelay); - } - - /* if we failed, all future posted messages fail until reset */ - if (!countdown) - mbx->timeout = 0; - - return countdown ? 0 : IXGBE_ERR_MBX; -} - -/** - * ixgbevf_read_posted_mbx - Wait for message notification and receive message - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * - * returns 0 if it successfully received a message notification and - * copied it into the receive buffer. - **/ -static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 ret_val = IXGBE_ERR_MBX; - - ret_val = ixgbevf_poll_for_msg(hw); - - /* if ack received read message, otherwise we timed out */ - if (!ret_val) - ret_val = mbx->ops.read(hw, msg, size); - - return ret_val; -} - -/** - * ixgbevf_write_posted_mbx - Write a message to the mailbox, wait for ack - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * - * returns 0 if it successfully copied message into the buffer and - * received an ack to that message within delay * timeout period - **/ -static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 ret_val; - - /* send msg */ - ret_val = mbx->ops.write(hw, msg, size); - - /* if msg sent wait until we receive an ack */ - if (!ret_val) - ret_val = ixgbevf_poll_for_ack(hw); - - return ret_val; -} - -/** - * ixgbevf_read_v2p_mailbox - read v2p mailbox - * @hw: pointer to the HW structure - * - * This function is used to read the v2p mailbox without losing the read to - * clear status bits. - **/ -static u32 ixgbevf_read_v2p_mailbox(struct ixgbe_hw *hw) -{ - u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX); - - v2p_mailbox |= hw->mbx.v2p_mailbox; - hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS; - - return v2p_mailbox; -} - -/** - * ixgbevf_check_for_bit_vf - Determine if a status bit was set - * @hw: pointer to the HW structure - * @mask: bitmask for bits to be tested and cleared - * - * This function is used to check for the read to clear bits within - * the V2P mailbox. - **/ -static s32 ixgbevf_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask) -{ - u32 v2p_mailbox = ixgbevf_read_v2p_mailbox(hw); - s32 ret_val = IXGBE_ERR_MBX; - - if (v2p_mailbox & mask) - ret_val = 0; - - hw->mbx.v2p_mailbox &= ~mask; - - return ret_val; -} - -/** - * ixgbevf_check_for_msg_vf - checks to see if the PF has sent mail - * @hw: pointer to the HW structure - * - * returns 0 if the PF has set the Status bit or else ERR_MBX - **/ -static s32 ixgbevf_check_for_msg_vf(struct ixgbe_hw *hw) -{ - s32 ret_val = IXGBE_ERR_MBX; - - if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) { - ret_val = 0; - hw->mbx.stats.reqs++; - } - - return ret_val; -} - -/** - * ixgbevf_check_for_ack_vf - checks to see if the PF has ACK'd - * @hw: pointer to the HW structure - * - * returns 0 if the PF has set the ACK bit or else ERR_MBX - **/ -static s32 ixgbevf_check_for_ack_vf(struct ixgbe_hw *hw) -{ - s32 ret_val = IXGBE_ERR_MBX; - - if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) { - ret_val = 0; - hw->mbx.stats.acks++; - } - - return ret_val; -} - -/** - * ixgbevf_check_for_rst_vf - checks to see if the PF has reset - * @hw: pointer to the HW structure - * - * returns true if the PF has set the reset done bit or else false - **/ -static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw) -{ - s32 ret_val = IXGBE_ERR_MBX; - - if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD | - IXGBE_VFMAILBOX_RSTI))) { - ret_val = 0; - hw->mbx.stats.rsts++; - } - - return ret_val; -} - -/** - * ixgbevf_obtain_mbx_lock_vf - obtain mailbox lock - * @hw: pointer to the HW structure - * - * return 0 if we obtained the mailbox lock - **/ -static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw) -{ - s32 ret_val = IXGBE_ERR_MBX; - - /* Take ownership of the buffer */ - IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU); - - /* reserve mailbox for vf use */ - if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU) - ret_val = 0; - - return ret_val; -} - -/** - * ixgbevf_write_mbx_vf - Write a message to the mailbox - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * - * returns 0 if it successfully copied message into the buffer - **/ -static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size) -{ - s32 ret_val; - u16 i; - - - /* lock the mailbox to prevent pf/vf race condition */ - ret_val = ixgbevf_obtain_mbx_lock_vf(hw); - if (ret_val) - goto out_no_write; - - /* flush msg and acks as we are overwriting the message buffer */ - ixgbevf_check_for_msg_vf(hw); - ixgbevf_check_for_ack_vf(hw); - - /* copy the caller specified message to the mailbox memory buffer */ - for (i = 0; i < size; i++) - IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]); - - /* update stats */ - hw->mbx.stats.msgs_tx++; - - /* Drop VFU and interrupt the PF to tell it a message has been sent */ - IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ); - -out_no_write: - return ret_val; -} - -/** - * ixgbevf_read_mbx_vf - Reads a message from the inbox intended for vf - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer - * - * returns 0 if it successfuly read message from buffer - **/ -static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size) -{ - s32 ret_val = 0; - u16 i; - - /* lock the mailbox to prevent pf/vf race condition */ - ret_val = ixgbevf_obtain_mbx_lock_vf(hw); - if (ret_val) - goto out_no_read; - - /* copy the message from the mailbox memory buffer */ - for (i = 0; i < size; i++) - msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i); - - /* Acknowledge receipt and release mailbox, then we're done */ - IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK); - - /* update stats */ - hw->mbx.stats.msgs_rx++; - -out_no_read: - return ret_val; -} - -/** - * ixgbevf_init_mbx_params_vf - set initial values for vf mailbox - * @hw: pointer to the HW structure - * - * Initializes the hw->mbx struct to correct values for vf mailbox - */ -static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - - /* start mailbox as timed out and let the reset_hw call set the timeout - * value to begin communications */ - mbx->timeout = 0; - mbx->udelay = IXGBE_VF_MBX_INIT_DELAY; - - mbx->size = IXGBE_VFMAILBOX_SIZE; - - mbx->stats.msgs_tx = 0; - mbx->stats.msgs_rx = 0; - mbx->stats.reqs = 0; - mbx->stats.acks = 0; - mbx->stats.rsts = 0; - - return 0; -} - -struct ixgbe_mbx_operations ixgbevf_mbx_ops = { - .init_params = ixgbevf_init_mbx_params_vf, - .read = ixgbevf_read_mbx_vf, - .write = ixgbevf_write_mbx_vf, - .read_posted = ixgbevf_read_posted_mbx, - .write_posted = ixgbevf_write_posted_mbx, - .check_for_msg = ixgbevf_check_for_msg_vf, - .check_for_ack = ixgbevf_check_for_ack_vf, - .check_for_rst = ixgbevf_check_for_rst_vf, -}; - diff --git a/drivers/net/ixgbevf/mbx.h b/drivers/net/ixgbevf/mbx.h deleted file mode 100644 index ea393eb..0000000 --- a/drivers/net/ixgbevf/mbx.h +++ /dev/null @@ -1,99 +0,0 @@ -/******************************************************************************* - - Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2010 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGBE_MBX_H_ -#define _IXGBE_MBX_H_ - -#include "vf.h" - -#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ -#define IXGBE_ERR_MBX -100 - -#define IXGBE_VFMAILBOX 0x002FC -#define IXGBE_VFMBMEM 0x00200 - -/* Define mailbox register bits */ -#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ -#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */ -#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ -#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ -#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ -#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ -#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */ -#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ -#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ - -#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x)) -#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn)) - -#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ -#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ -#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ -#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ -#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ - -#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */ -#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ -#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */ -#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ - - -/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the - * PF. The reverse is true if it is IXGBE_PF_*. - * Message ACK's are the value or'd with 0xF0000000 - */ -#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with - * this are the ACK */ -#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with - * this are the NACK */ -#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still - * clear to send requests */ -#define IXGBE_VT_MSGINFO_SHIFT 16 -/* bits 23:16 are used for exra info for certain messages */ -#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) - -#define IXGBE_VF_RESET 0x01 /* VF requests reset */ -#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ -#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ -#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ -#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ -#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ - -/* length of permanent address message returned from PF */ -#define IXGBE_VF_PERMADDR_MSG_LEN 4 -/* word in permanent address message with the current multicast type */ -#define IXGBE_VF_MC_TYPE_WORD 3 - -#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ - -#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ -#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ - -/* forward declaration of the HW struct */ -struct ixgbe_hw; - -#endif /* _IXGBE_MBX_H_ */ diff --git a/drivers/net/ixgbevf/regs.h b/drivers/net/ixgbevf/regs.h deleted file mode 100644 index 189200e..0000000 --- a/drivers/net/ixgbevf/regs.h +++ /dev/null @@ -1,85 +0,0 @@ -/******************************************************************************* - - Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2010 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef _IXGBEVF_REGS_H_ -#define _IXGBEVF_REGS_H_ - -#define IXGBE_VFCTRL 0x00000 -#define IXGBE_VFSTATUS 0x00008 -#define IXGBE_VFLINKS 0x00010 -#define IXGBE_VFFRTIMER 0x00048 -#define IXGBE_VFRXMEMWRAP 0x03190 -#define IXGBE_VTEICR 0x00100 -#define IXGBE_VTEICS 0x00104 -#define IXGBE_VTEIMS 0x00108 -#define IXGBE_VTEIMC 0x0010C -#define IXGBE_VTEIAC 0x00110 -#define IXGBE_VTEIAM 0x00114 -#define IXGBE_VTEITR(x) (0x00820 + (4 * x)) -#define IXGBE_VTIVAR(x) (0x00120 + (4 * x)) -#define IXGBE_VTIVAR_MISC 0x00140 -#define IXGBE_VTRSCINT(x) (0x00180 + (4 * x)) -#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * x)) -#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * x)) -#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * x)) -#define IXGBE_VFRDH(x) (0x01010 + (0x40 * x)) -#define IXGBE_VFRDT(x) (0x01018 + (0x40 * x)) -#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * x)) -#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * x)) -#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * x)) -#define IXGBE_VFPSRTYPE 0x00300 -#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * x)) -#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * x)) -#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * x)) -#define IXGBE_VFTDH(x) (0x02010 + (0x40 * x)) -#define IXGBE_VFTDT(x) (0x02018 + (0x40 * x)) -#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * x)) -#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * x)) -#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * x)) -#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * x)) -#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * x)) -#define IXGBE_VFGPRC 0x0101C -#define IXGBE_VFGPTC 0x0201C -#define IXGBE_VFGORC_LSB 0x01020 -#define IXGBE_VFGORC_MSB 0x01024 -#define IXGBE_VFGOTC_LSB 0x02020 -#define IXGBE_VFGOTC_MSB 0x02024 -#define IXGBE_VFMPRC 0x01034 - -#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) - -#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg)) - -#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) ( \ - writel((value), ((a)->hw_addr + (reg) + ((offset) << 2)))) - -#define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \ - readl((a)->hw_addr + (reg) + ((offset) << 2))) - -#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS)) - -#endif /* _IXGBEVF_REGS_H_ */ diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c deleted file mode 100644 index aa3682e..0000000 --- a/drivers/net/ixgbevf/vf.c +++ /dev/null @@ -1,426 +0,0 @@ -/******************************************************************************* - - Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2010 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#include "vf.h" - -/** - * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx - * @hw: pointer to hardware structure - * - * Starts the hardware by filling the bus info structure and media type, clears - * all on chip counters, initializes receive address registers, multicast - * table, VLAN filter table, calls routine to set up link and flow control - * settings, and leaves transmit and receive units disabled and uninitialized - **/ -static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw) -{ - /* Clear adapter stopped flag */ - hw->adapter_stopped = false; - - return 0; -} - -/** - * ixgbevf_init_hw_vf - virtual function hardware initialization - * @hw: pointer to hardware structure - * - * Initialize the hardware by resetting the hardware and then starting - * the hardware - **/ -static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw) -{ - s32 status = hw->mac.ops.start_hw(hw); - - hw->mac.ops.get_mac_addr(hw, hw->mac.addr); - - return status; -} - -/** - * ixgbevf_reset_hw_vf - Performs hardware reset - * @hw: pointer to hardware structure - * - * Resets the hardware by reseting the transmit and receive units, masks and - * clears all interrupts. - **/ -static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - u32 timeout = IXGBE_VF_INIT_TIMEOUT; - s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR; - u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN]; - u8 *addr = (u8 *)(&msgbuf[1]); - - /* Call adapter stop to disable tx/rx and clear interrupts */ - hw->mac.ops.stop_adapter(hw); - - IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST); - IXGBE_WRITE_FLUSH(hw); - - /* we cannot reset while the RSTI / RSTD bits are asserted */ - while (!mbx->ops.check_for_rst(hw) && timeout) { - timeout--; - udelay(5); - } - - if (!timeout) - return IXGBE_ERR_RESET_FAILED; - - /* mailbox timeout can now become active */ - mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT; - - msgbuf[0] = IXGBE_VF_RESET; - mbx->ops.write_posted(hw, msgbuf, 1); - - msleep(10); - - /* set our "perm_addr" based on info provided by PF */ - /* also set up the mc_filter_type which is piggy backed - * on the mac address in word 3 */ - ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN); - if (ret_val) - return ret_val; - - if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK)) - return IXGBE_ERR_INVALID_MAC_ADDR; - - memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS); - hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD]; - - return 0; -} - -/** - * ixgbevf_stop_hw_vf - Generic stop Tx/Rx units - * @hw: pointer to hardware structure - * - * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, - * disables transmit and receive units. The adapter_stopped flag is used by - * the shared code and drivers to determine if the adapter is in a stopped - * state and should not touch the hardware. - **/ -static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw) -{ - u32 number_of_queues; - u32 reg_val; - u16 i; - - /* - * Set the adapter_stopped flag so other driver functions stop touching - * the hardware - */ - hw->adapter_stopped = true; - - /* Disable the receive unit by stopped each queue */ - number_of_queues = hw->mac.max_rx_queues; - for (i = 0; i < number_of_queues; i++) { - reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); - if (reg_val & IXGBE_RXDCTL_ENABLE) { - reg_val &= ~IXGBE_RXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val); - } - } - - IXGBE_WRITE_FLUSH(hw); - - /* Clear interrupt mask to stop from interrupts being generated */ - IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); - - /* Clear any pending interrupts */ - IXGBE_READ_REG(hw, IXGBE_VTEICR); - - /* Disable the transmit unit. Each queue must be disabled. */ - number_of_queues = hw->mac.max_tx_queues; - for (i = 0; i < number_of_queues; i++) { - reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); - if (reg_val & IXGBE_TXDCTL_ENABLE) { - reg_val &= ~IXGBE_TXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val); - } - } - - return 0; -} - -/** - * ixgbevf_mta_vector - Determines bit-vector in multicast table to set - * @hw: pointer to hardware structure - * @mc_addr: the multicast address - * - * Extracts the 12 bits, from a multicast address, to determine which - * bit-vector to set in the multicast table. The hardware uses 12 bits, from - * incoming rx multicast addresses, to determine the bit-vector to check in - * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set - * by the MO field of the MCSTCTRL. The MO field is set during initialization - * to mc_filter_type. - **/ -static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) -{ - u32 vector = 0; - - switch (hw->mac.mc_filter_type) { - case 0: /* use bits [47:36] of the address */ - vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); - break; - case 1: /* use bits [46:35] of the address */ - vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); - break; - case 2: /* use bits [45:34] of the address */ - vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); - break; - case 3: /* use bits [43:32] of the address */ - vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); - break; - default: /* Invalid mc_filter_type */ - break; - } - - /* vector can only be 12-bits or boundary will be exceeded */ - vector &= 0xFFF; - return vector; -} - -/** - * ixgbevf_get_mac_addr_vf - Read device MAC address - * @hw: pointer to the HW structure - * @mac_addr: pointer to storage for retrieved MAC address - **/ -static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr) -{ - memcpy(mac_addr, hw->mac.perm_addr, IXGBE_ETH_LENGTH_OF_ADDRESS); - - return 0; -} - -static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - u32 msgbuf[3]; - u8 *msg_addr = (u8 *)(&msgbuf[1]); - s32 ret_val; - - memset(msgbuf, 0, sizeof(msgbuf)); - /* - * If index is one then this is the start of a new list and needs - * indication to the PF so it can do it's own list management. - * If it is zero then that tells the PF to just clear all of - * this VF's macvlans and there is no new list. - */ - msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT; - msgbuf[0] |= IXGBE_VF_SET_MACVLAN; - if (addr) - memcpy(msg_addr, addr, 6); - ret_val = mbx->ops.write_posted(hw, msgbuf, 3); - - if (!ret_val) - ret_val = mbx->ops.read_posted(hw, msgbuf, 3); - - msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; - - if (!ret_val) - if (msgbuf[0] == - (IXGBE_VF_SET_MACVLAN | IXGBE_VT_MSGTYPE_NACK)) - ret_val = -ENOMEM; - - return ret_val; -} - -/** - * ixgbevf_set_rar_vf - set device MAC address - * @hw: pointer to hardware structure - * @index: Receive address register to write - * @addr: Address to put into receive address register - * @vmdq: Unused in this implementation - **/ -static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, - u32 vmdq) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - u32 msgbuf[3]; - u8 *msg_addr = (u8 *)(&msgbuf[1]); - s32 ret_val; - - memset(msgbuf, 0, sizeof(msgbuf)); - msgbuf[0] = IXGBE_VF_SET_MAC_ADDR; - memcpy(msg_addr, addr, 6); - ret_val = mbx->ops.write_posted(hw, msgbuf, 3); - - if (!ret_val) - ret_val = mbx->ops.read_posted(hw, msgbuf, 3); - - msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; - - /* if nacked the address was rejected, use "perm_addr" */ - if (!ret_val && - (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) - ixgbevf_get_mac_addr_vf(hw, hw->mac.addr); - - return ret_val; -} - -/** - * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses - * @hw: pointer to the HW structure - * @netdev: pointer to net device structure - * - * Updates the Multicast Table Array. - **/ -static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, - struct net_device *netdev) -{ - struct netdev_hw_addr *ha; - struct ixgbe_mbx_info *mbx = &hw->mbx; - u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; - u16 *vector_list = (u16 *)&msgbuf[1]; - u32 cnt, i; - - /* Each entry in the list uses 1 16 bit word. We have 30 - * 16 bit words available in our HW msg buffer (minus 1 for the - * msg type). That's 30 hash values if we pack 'em right. If - * there are more than 30 MC addresses to add then punt the - * extras for now and then add code to handle more than 30 later. - * It would be unusual for a server to request that many multi-cast - * addresses except for in large enterprise network environments. - */ - - cnt = netdev_mc_count(netdev); - if (cnt > 30) - cnt = 30; - msgbuf[0] = IXGBE_VF_SET_MULTICAST; - msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT; - - i = 0; - netdev_for_each_mc_addr(ha, netdev) { - if (i == cnt) - break; - vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr); - } - - mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE); - - return 0; -} - -/** - * ixgbevf_set_vfta_vf - Set/Unset vlan filter table address - * @hw: pointer to the HW structure - * @vlan: 12 bit VLAN ID - * @vind: unused by VF drivers - * @vlan_on: if true then set bit, else clear bit - **/ -static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, - bool vlan_on) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - u32 msgbuf[2]; - - msgbuf[0] = IXGBE_VF_SET_VLAN; - msgbuf[1] = vlan; - /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ - msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT; - - return mbx->ops.write_posted(hw, msgbuf, 2); -} - -/** - * ixgbevf_setup_mac_link_vf - Setup MAC link settings - * @hw: pointer to hardware structure - * @speed: Unused in this implementation - * @autoneg: Unused in this implementation - * @autoneg_wait_to_complete: Unused in this implementation - * - * Do nothing and return success. VF drivers are not allowed to change - * global settings. Maintained for driver compatibility. - **/ -static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw, - ixgbe_link_speed speed, bool autoneg, - bool autoneg_wait_to_complete) -{ - return 0; -} - -/** - * ixgbevf_check_mac_link_vf - Get link/speed status - * @hw: pointer to hardware structure - * @speed: pointer to link speed - * @link_up: true is link is up, false otherwise - * @autoneg_wait_to_complete: true when waiting for completion is needed - * - * Reads the links register to determine if link is up and the current speed - **/ -static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *link_up, - bool autoneg_wait_to_complete) -{ - u32 links_reg; - - if (!(hw->mbx.ops.check_for_rst(hw))) { - *link_up = false; - *speed = 0; - return -1; - } - - links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); - - if (links_reg & IXGBE_LINKS_UP) - *link_up = true; - else - *link_up = false; - - if ((links_reg & IXGBE_LINKS_SPEED_82599) == - IXGBE_LINKS_SPEED_10G_82599) - *speed = IXGBE_LINK_SPEED_10GB_FULL; - else - *speed = IXGBE_LINK_SPEED_1GB_FULL; - - return 0; -} - -static struct ixgbe_mac_operations ixgbevf_mac_ops = { - .init_hw = ixgbevf_init_hw_vf, - .reset_hw = ixgbevf_reset_hw_vf, - .start_hw = ixgbevf_start_hw_vf, - .get_mac_addr = ixgbevf_get_mac_addr_vf, - .stop_adapter = ixgbevf_stop_hw_vf, - .setup_link = ixgbevf_setup_mac_link_vf, - .check_link = ixgbevf_check_mac_link_vf, - .set_rar = ixgbevf_set_rar_vf, - .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf, - .set_uc_addr = ixgbevf_set_uc_addr_vf, - .set_vfta = ixgbevf_set_vfta_vf, -}; - -struct ixgbevf_info ixgbevf_82599_vf_info = { - .mac = ixgbe_mac_82599_vf, - .mac_ops = &ixgbevf_mac_ops, -}; - -struct ixgbevf_info ixgbevf_X540_vf_info = { - .mac = ixgbe_mac_X540_vf, - .mac_ops = &ixgbevf_mac_ops, -}; diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h deleted file mode 100644 index 10306b4..0000000 --- a/drivers/net/ixgbevf/vf.h +++ /dev/null @@ -1,174 +0,0 @@ -/******************************************************************************* - - Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2010 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - -#ifndef __IXGBE_VF_H__ -#define __IXGBE_VF_H__ - -#include -#include -#include -#include -#include - -#include "defines.h" -#include "regs.h" -#include "mbx.h" - -struct ixgbe_hw; - -/* iterator type for walking multicast address lists */ -typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, - u32 *vmdq); -struct ixgbe_mac_operations { - s32 (*init_hw)(struct ixgbe_hw *); - s32 (*reset_hw)(struct ixgbe_hw *); - s32 (*start_hw)(struct ixgbe_hw *); - s32 (*clear_hw_cntrs)(struct ixgbe_hw *); - enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); - u32 (*get_supported_physical_layer)(struct ixgbe_hw *); - s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); - s32 (*stop_adapter)(struct ixgbe_hw *); - s32 (*get_bus_info)(struct ixgbe_hw *); - - /* Link */ - s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); - s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); - s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, - bool *); - - /* RAR, Multicast, VLAN */ - s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32); - s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *); - s32 (*init_rx_addrs)(struct ixgbe_hw *); - s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); - s32 (*enable_mc)(struct ixgbe_hw *); - s32 (*disable_mc)(struct ixgbe_hw *); - s32 (*clear_vfta)(struct ixgbe_hw *); - s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); -}; - -enum ixgbe_mac_type { - ixgbe_mac_unknown = 0, - ixgbe_mac_82599_vf, - ixgbe_mac_X540_vf, - ixgbe_num_macs -}; - -struct ixgbe_mac_info { - struct ixgbe_mac_operations ops; - u8 addr[6]; - u8 perm_addr[6]; - - enum ixgbe_mac_type type; - - s32 mc_filter_type; - - bool get_link_status; - u32 max_tx_queues; - u32 max_rx_queues; - u32 max_msix_vectors; -}; - -struct ixgbe_mbx_operations { - s32 (*init_params)(struct ixgbe_hw *hw); - s32 (*read)(struct ixgbe_hw *, u32 *, u16); - s32 (*write)(struct ixgbe_hw *, u32 *, u16); - s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16); - s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16); - s32 (*check_for_msg)(struct ixgbe_hw *); - s32 (*check_for_ack)(struct ixgbe_hw *); - s32 (*check_for_rst)(struct ixgbe_hw *); -}; - -struct ixgbe_mbx_stats { - u32 msgs_tx; - u32 msgs_rx; - - u32 acks; - u32 reqs; - u32 rsts; -}; - -struct ixgbe_mbx_info { - struct ixgbe_mbx_operations ops; - struct ixgbe_mbx_stats stats; - u32 timeout; - u32 udelay; - u32 v2p_mailbox; - u16 size; -}; - -struct ixgbe_hw { - void *back; - - u8 __iomem *hw_addr; - - struct ixgbe_mac_info mac; - struct ixgbe_mbx_info mbx; - - u16 device_id; - u16 subsystem_vendor_id; - u16 subsystem_device_id; - u16 vendor_id; - - u8 revision_id; - bool adapter_stopped; -}; - -struct ixgbevf_hw_stats { - u64 base_vfgprc; - u64 base_vfgptc; - u64 base_vfgorc; - u64 base_vfgotc; - u64 base_vfmprc; - - u64 last_vfgprc; - u64 last_vfgptc; - u64 last_vfgorc; - u64 last_vfgotc; - u64 last_vfmprc; - - u64 vfgprc; - u64 vfgptc; - u64 vfgorc; - u64 vfgotc; - u64 vfmprc; - - u64 saved_reset_vfgprc; - u64 saved_reset_vfgptc; - u64 saved_reset_vfgorc; - u64 saved_reset_vfgotc; - u64 saved_reset_vfmprc; -}; - -struct ixgbevf_info { - enum ixgbe_mac_type mac; - struct ixgbe_mac_operations *mac_ops; -}; - -#endif /* __IXGBE_VF_H__ */ - -- cgit v0.10.2 From aa43c2158d5ae1dc76cccb08cd57a3ffd32c3825 Mon Sep 17 00:00:00 2001 From: Jeff Kirsher Date: Fri, 8 Apr 2011 19:06:30 -0700 Subject: qlogic: Move the QLogic drivers Moves the QLogic drivers into drivers/net/ethernet/qlogic/ and the necessary Kconfig and Makefile changes. CC: Ron Mercer CC: Amit Kumar Salecha CC: Anirban Chakraborty Signed-off-by: Jeff Kirsher Acked-by: Anirban Chakraborty diff --git a/MAINTAINERS b/MAINTAINERS index f4838da..8f2821c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4499,7 +4499,7 @@ M: Amit Kumar Salecha L: netdev@vger.kernel.org W: http://www.qlogic.com S: Supported -F: drivers/net/netxen/ +F: drivers/net/ethernet/qlogic/netxen/ NFS, SUNRPC, AND LOCKD CLIENTS M: Trond Myklebust @@ -5245,7 +5245,7 @@ M: linux-driver@qlogic.com L: netdev@vger.kernel.org S: Supported F: Documentation/networking/LICENSE.qla3xxx -F: drivers/net/qla3xxx.* +F: drivers/net/ethernet/qlogic/qla3xxx.* QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER M: Amit Kumar Salecha @@ -5253,7 +5253,7 @@ M: Anirban Chakraborty M: linux-driver@qlogic.com L: netdev@vger.kernel.org S: Supported -F: drivers/net/qlcnic/ +F: drivers/net/ethernet/qlogic/qlcnic/ QLOGIC QLGE 10Gb ETHERNET DRIVER M: Jitendra Kalsaria @@ -5261,7 +5261,7 @@ M: Ron Mercer M: linux-driver@qlogic.com L: netdev@vger.kernel.org S: Supported -F: drivers/net/qlge/ +F: drivers/net/ethernet/qlogic/qlge/ QNX4 FILESYSTEM M: Anders Larsen diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index e649116..de2293d 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1707,15 +1707,6 @@ config XILINX_LL_TEMAC This driver supports the Xilinx 10/100/1000 LocalLink TEMAC core used in Xilinx Spartan and Virtex FPGAs -config QLA3XXX - tristate "QLogic QLA3XXX Network Driver Support" - depends on PCI - help - This driver supports QLogic ISP3XXX gigabit Ethernet cards. - - To compile this driver as a module, choose M here: the module - will be called qla3xxx. - config ATL1 tristate "Atheros/Attansic L1 Gigabit Ethernet support" depends on PCI @@ -1954,23 +1945,6 @@ config TEHUTI help Tehuti Networks 10G Ethernet NIC -config QLCNIC - tristate "QLOGIC QLCNIC 1/10Gb Converged Ethernet NIC Support" - depends on PCI - select FW_LOADER - help - This driver supports QLogic QLE8240 and QLE8242 Converged Ethernet - devices. - -config QLGE - tristate "QLogic QLGE 10Gb Ethernet Driver Support" - depends on PCI - help - This driver supports QLogic ISP8XXX 10Gb Ethernet cards. - - To compile this driver as a module, choose M here: the module - will be called qlge. - config BNA tristate "Brocade 1010/1020 10Gb Ethernet Driver support" depends on PCI diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 84b9860..a58a9f0 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -110,9 +110,6 @@ obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o ll_temac-objs := ll_temac_main.o ll_temac_mdio.o obj-$(CONFIG_XILINX_LL_TEMAC) += ll_temac.o obj-$(CONFIG_XILINX_EMACLITE) += xilinx_emaclite.o -obj-$(CONFIG_QLA3XXX) += qla3xxx.o -obj-$(CONFIG_QLCNIC) += qlcnic/ -obj-$(CONFIG_QLGE) += qlge/ obj-$(CONFIG_PPP) += ppp_generic.o obj-$(CONFIG_PPP_ASYNC) += ppp_async.o diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index a2fd385..ab591bb 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -17,5 +17,6 @@ source "drivers/net/ethernet/amd/Kconfig" source "drivers/net/ethernet/broadcom/Kconfig" source "drivers/net/ethernet/chelsio/Kconfig" source "drivers/net/ethernet/intel/Kconfig" +source "drivers/net/ethernet/qlogic/Kconfig" endif # ETHERNET diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 5265271..d8cf120 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -8,3 +8,4 @@ obj-$(CONFIG_NET_VENDOR_AMD) += amd/ obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ +obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/ diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig new file mode 100644 index 0000000..a7c44240 --- /dev/null +++ b/drivers/net/ethernet/qlogic/Kconfig @@ -0,0 +1,53 @@ +# +# QLogic network device configuration +# + +config NET_VENDOR_QLOGIC + bool "QLogic devices" + depends on PCI + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about QLogic cards. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_QLOGIC + +config QLA3XXX + tristate "QLogic QLA3XXX Network Driver Support" + depends on PCI + ---help--- + This driver supports QLogic ISP3XXX gigabit Ethernet cards. + + To compile this driver as a module, choose M here: the module + will be called qla3xxx. + +config QLCNIC + tristate "QLOGIC QLCNIC 1/10Gb Converged Ethernet NIC Support" + depends on PCI + select FW_LOADER + ---help--- + This driver supports QLogic QLE8240 and QLE8242 Converged Ethernet + devices. + +config QLGE + tristate "QLogic QLGE 10Gb Ethernet Driver Support" + depends on PCI + ---help--- + This driver supports QLogic ISP8XXX 10Gb Ethernet cards. + + To compile this driver as a module, choose M here: the module + will be called qlge. + +config NETXEN_NIC + tristate "NetXen Multi port (1/10) Gigabit Ethernet NIC" + depends on PCI + select FW_LOADER + ---help--- + This enables the support for NetXen's Gigabit Ethernet card. + +endif # NET_VENDOR_QLOGIC diff --git a/drivers/net/ethernet/qlogic/Makefile b/drivers/net/ethernet/qlogic/Makefile new file mode 100644 index 0000000..b2a283d --- /dev/null +++ b/drivers/net/ethernet/qlogic/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for the QLogic network device drivers. +# + +obj-$(CONFIG_QLA3XXX) += qla3xxx.o +obj-$(CONFIG_QLCNIC) += qlcnic/ +obj-$(CONFIG_QLGE) += qlge/ +obj-$(CONFIG_NETXEN_NIC) += netxen/ diff --git a/drivers/net/ethernet/qlogic/netxen/Makefile b/drivers/net/ethernet/qlogic/netxen/Makefile new file mode 100644 index 0000000..861a059 --- /dev/null +++ b/drivers/net/ethernet/qlogic/netxen/Makefile @@ -0,0 +1,29 @@ +# Copyright (C) 2003 - 2009 NetXen, Inc. +# Copyright (C) 2009 - QLogic Corporation. +# All rights reserved. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, +# MA 02111-1307, USA. +# +# The full GNU General Public License is included in this distribution +# in the file called "COPYING". +# +# + + +obj-$(CONFIG_NETXEN_NIC) := netxen_nic.o + +netxen_nic-y := netxen_nic_hw.o netxen_nic_main.o netxen_nic_init.o \ + netxen_nic_ethtool.o netxen_nic_ctx.o diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h new file mode 100644 index 0000000..196b660 --- /dev/null +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h @@ -0,0 +1,1441 @@ +/* + * Copyright (C) 2003 - 2009 NetXen, Inc. + * Copyright (C) 2009 - QLogic Corporation. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, + * MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution + * in the file called "COPYING". + * + */ + +#ifndef _NETXEN_NIC_H_ +#define _NETXEN_NIC_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include +#include + +#include "netxen_nic_hdr.h" +#include "netxen_nic_hw.h" + +#define _NETXEN_NIC_LINUX_MAJOR 4 +#define _NETXEN_NIC_LINUX_MINOR 0 +#define _NETXEN_NIC_LINUX_SUBVERSION 76 +#define NETXEN_NIC_LINUX_VERSIONID "4.0.76" + +#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) +#define _major(v) (((v) >> 24) & 0xff) +#define _minor(v) (((v) >> 16) & 0xff) +#define _build(v) ((v) & 0xffff) + +/* version in image has weird encoding: + * 7:0 - major + * 15:8 - minor + * 31:16 - build (little endian) + */ +#define NETXEN_DECODE_VERSION(v) \ + NETXEN_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16)) + +#define NETXEN_NUM_FLASH_SECTORS (64) +#define NETXEN_FLASH_SECTOR_SIZE (64 * 1024) +#define NETXEN_FLASH_TOTAL_SIZE (NETXEN_NUM_FLASH_SECTORS \ + * NETXEN_FLASH_SECTOR_SIZE) + +#define RCV_DESC_RINGSIZE(rds_ring) \ + (sizeof(struct rcv_desc) * (rds_ring)->num_desc) +#define RCV_BUFF_RINGSIZE(rds_ring) \ + (sizeof(struct netxen_rx_buffer) * rds_ring->num_desc) +#define STATUS_DESC_RINGSIZE(sds_ring) \ + (sizeof(struct status_desc) * (sds_ring)->num_desc) +#define TX_BUFF_RINGSIZE(tx_ring) \ + (sizeof(struct netxen_cmd_buffer) * tx_ring->num_desc) +#define TX_DESC_RINGSIZE(tx_ring) \ + (sizeof(struct cmd_desc_type0) * tx_ring->num_desc) + +#define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a))) + +#define NETXEN_RCV_PRODUCER_OFFSET 0 +#define NETXEN_RCV_PEG_DB_ID 2 +#define NETXEN_HOST_DUMMY_DMA_SIZE 1024 +#define FLASH_SUCCESS 0 + +#define ADDR_IN_WINDOW1(off) \ + ((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0 + +#define ADDR_IN_RANGE(addr, low, high) \ + (((addr) < (high)) && ((addr) >= (low))) + +/* + * normalize a 64MB crb address to 32MB PCI window + * To use NETXEN_CRB_NORMALIZE, window _must_ be set to 1 + */ +#define NETXEN_CRB_NORMAL(reg) \ + ((reg) - NETXEN_CRB_PCIX_HOST2 + NETXEN_CRB_PCIX_HOST) + +#define NETXEN_CRB_NORMALIZE(adapter, reg) \ + pci_base_offset(adapter, NETXEN_CRB_NORMAL(reg)) + +#define DB_NORMALIZE(adapter, off) \ + (adapter->ahw.db_base + (off)) + +#define NX_P2_C0 0x24 +#define NX_P2_C1 0x25 +#define NX_P3_A0 0x30 +#define NX_P3_A2 0x30 +#define NX_P3_B0 0x40 +#define NX_P3_B1 0x41 +#define NX_P3_B2 0x42 +#define NX_P3P_A0 0x50 + +#define NX_IS_REVISION_P2(REVISION) (REVISION <= NX_P2_C1) +#define NX_IS_REVISION_P3(REVISION) (REVISION >= NX_P3_A0) +#define NX_IS_REVISION_P3P(REVISION) (REVISION >= NX_P3P_A0) + +#define FIRST_PAGE_GROUP_START 0 +#define FIRST_PAGE_GROUP_END 0x100000 + +#define SECOND_PAGE_GROUP_START 0x6000000 +#define SECOND_PAGE_GROUP_END 0x68BC000 + +#define THIRD_PAGE_GROUP_START 0x70E4000 +#define THIRD_PAGE_GROUP_END 0x8000000 + +#define FIRST_PAGE_GROUP_SIZE FIRST_PAGE_GROUP_END - FIRST_PAGE_GROUP_START +#define SECOND_PAGE_GROUP_SIZE SECOND_PAGE_GROUP_END - SECOND_PAGE_GROUP_START +#define THIRD_PAGE_GROUP_SIZE THIRD_PAGE_GROUP_END - THIRD_PAGE_GROUP_START + +#define P2_MAX_MTU (8000) +#define P3_MAX_MTU (9600) +#define NX_ETHERMTU 1500 +#define NX_MAX_ETHERHDR 32 /* This contains some padding */ + +#define NX_P2_RX_BUF_MAX_LEN 1760 +#define NX_P3_RX_BUF_MAX_LEN (NX_MAX_ETHERHDR + NX_ETHERMTU) +#define NX_P2_RX_JUMBO_BUF_MAX_LEN (NX_MAX_ETHERHDR + P2_MAX_MTU) +#define NX_P3_RX_JUMBO_BUF_MAX_LEN (NX_MAX_ETHERHDR + P3_MAX_MTU) +#define NX_CT_DEFAULT_RX_BUF_LEN 2048 +#define NX_LRO_BUFFER_EXTRA 2048 + +#define NX_RX_LRO_BUFFER_LENGTH (8060) + +/* + * Maximum number of ring contexts + */ +#define MAX_RING_CTX 1 + +/* Opcodes to be used with the commands */ +#define TX_ETHER_PKT 0x01 +#define TX_TCP_PKT 0x02 +#define TX_UDP_PKT 0x03 +#define TX_IP_PKT 0x04 +#define TX_TCP_LSO 0x05 +#define TX_TCP_LSO6 0x06 +#define TX_IPSEC 0x07 +#define TX_IPSEC_CMD 0x0a +#define TX_TCPV6_PKT 0x0b +#define TX_UDPV6_PKT 0x0c + +/* The following opcodes are for internal consumption. */ +#define NETXEN_CONTROL_OP 0x10 +#define PEGNET_REQUEST 0x11 + +#define MAX_NUM_CARDS 4 + +#define NETXEN_MAX_FRAGS_PER_TX 14 +#define MAX_TSO_HEADER_DESC 2 +#define MGMT_CMD_DESC_RESV 4 +#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \ + + MGMT_CMD_DESC_RESV) +#define NX_MAX_TX_TIMEOUTS 2 + +/* + * Following are the states of the Phantom. Phantom will set them and + * Host will read to check if the fields are correct. + */ +#define PHAN_INITIALIZE_START 0xff00 +#define PHAN_INITIALIZE_FAILED 0xffff +#define PHAN_INITIALIZE_COMPLETE 0xff01 + +/* Host writes the following to notify that it has done the init-handshake */ +#define PHAN_INITIALIZE_ACK 0xf00f + +#define NUM_RCV_DESC_RINGS 3 +#define NUM_STS_DESC_RINGS 4 + +#define RCV_RING_NORMAL 0 +#define RCV_RING_JUMBO 1 +#define RCV_RING_LRO 2 + +#define MIN_CMD_DESCRIPTORS 64 +#define MIN_RCV_DESCRIPTORS 64 +#define MIN_JUMBO_DESCRIPTORS 32 + +#define MAX_CMD_DESCRIPTORS 1024 +#define MAX_RCV_DESCRIPTORS_1G 4096 +#define MAX_RCV_DESCRIPTORS_10G 8192 +#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512 +#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024 +#define MAX_LRO_RCV_DESCRIPTORS 8 + +#define DEFAULT_RCV_DESCRIPTORS_1G 2048 +#define DEFAULT_RCV_DESCRIPTORS_10G 4096 + +#define NETXEN_CTX_SIGNATURE 0xdee0 +#define NETXEN_CTX_SIGNATURE_V2 0x0002dee0 +#define NETXEN_CTX_RESET 0xbad0 +#define NETXEN_CTX_D3_RESET 0xacc0 +#define NETXEN_RCV_PRODUCER(ringid) (ringid) + +#define PHAN_PEG_RCV_INITIALIZED 0xff01 +#define PHAN_PEG_RCV_START_INITIALIZE 0xff00 + +#define get_next_index(index, length) \ + (((index) + 1) & ((length) - 1)) + +#define get_index_range(index,length,count) \ + (((index) + (count)) & ((length) - 1)) + +#define MPORT_SINGLE_FUNCTION_MODE 0x1111 +#define MPORT_MULTI_FUNCTION_MODE 0x2222 + +#define NX_MAX_PCI_FUNC 8 + +/* + * NetXen host-peg signal message structure + * + * Bit 0-1 : peg_id => 0x2 for tx and 01 for rx + * Bit 2 : priv_id => must be 1 + * Bit 3-17 : count => for doorbell + * Bit 18-27 : ctx_id => Context id + * Bit 28-31 : opcode + */ + +typedef u32 netxen_ctx_msg; + +#define netxen_set_msg_peg_id(config_word, val) \ + ((config_word) &= ~3, (config_word) |= val & 3) +#define netxen_set_msg_privid(config_word) \ + ((config_word) |= 1 << 2) +#define netxen_set_msg_count(config_word, val) \ + ((config_word) &= ~(0x7fff<<3), (config_word) |= (val & 0x7fff) << 3) +#define netxen_set_msg_ctxid(config_word, val) \ + ((config_word) &= ~(0x3ff<<18), (config_word) |= (val & 0x3ff) << 18) +#define netxen_set_msg_opcode(config_word, val) \ + ((config_word) &= ~(0xf<<28), (config_word) |= (val & 0xf) << 28) + +struct netxen_rcv_ring { + __le64 addr; + __le32 size; + __le32 rsrvd; +}; + +struct netxen_sts_ring { + __le64 addr; + __le32 size; + __le16 msi_index; + __le16 rsvd; +} ; + +struct netxen_ring_ctx { + + /* one command ring */ + __le64 cmd_consumer_offset; + __le64 cmd_ring_addr; + __le32 cmd_ring_size; + __le32 rsrvd; + + /* three receive rings */ + struct netxen_rcv_ring rcv_rings[NUM_RCV_DESC_RINGS]; + + __le64 sts_ring_addr; + __le32 sts_ring_size; + + __le32 ctx_id; + + __le64 rsrvd_2[3]; + __le32 sts_ring_count; + __le32 rsrvd_3; + struct netxen_sts_ring sts_rings[NUM_STS_DESC_RINGS]; + +} __attribute__ ((aligned(64))); + +/* + * Following data structures describe the descriptors that will be used. + * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when + * we are doing LSO (above the 1500 size packet) only. + */ + +/* + * The size of reference handle been changed to 16 bits to pass the MSS fields + * for the LSO packet + */ + +#define FLAGS_CHECKSUM_ENABLED 0x01 +#define FLAGS_LSO_ENABLED 0x02 +#define FLAGS_IPSEC_SA_ADD 0x04 +#define FLAGS_IPSEC_SA_DELETE 0x08 +#define FLAGS_VLAN_TAGGED 0x10 +#define FLAGS_VLAN_OOB 0x40 + +#define netxen_set_tx_vlan_tci(cmd_desc, v) \ + (cmd_desc)->vlan_TCI = cpu_to_le16(v); + +#define netxen_set_cmd_desc_port(cmd_desc, var) \ + ((cmd_desc)->port_ctxid |= ((var) & 0x0F)) +#define netxen_set_cmd_desc_ctxid(cmd_desc, var) \ + ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0)) + +#define netxen_set_tx_port(_desc, _port) \ + (_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0) + +#define netxen_set_tx_flags_opcode(_desc, _flags, _opcode) \ + (_desc)->flags_opcode = \ + cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)) + +#define netxen_set_tx_frags_len(_desc, _frags, _len) \ + (_desc)->nfrags__length = \ + cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)) + +struct cmd_desc_type0 { + u8 tcp_hdr_offset; /* For LSO only */ + u8 ip_hdr_offset; /* For LSO only */ + __le16 flags_opcode; /* 15:13 unused, 12:7 opcode, 6:0 flags */ + __le32 nfrags__length; /* 31:8 total len, 7:0 frag count */ + + __le64 addr_buffer2; + + __le16 reference_handle; + __le16 mss; + u8 port_ctxid; /* 7:4 ctxid 3:0 port */ + u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */ + __le16 conn_id; /* IPSec offoad only */ + + __le64 addr_buffer3; + __le64 addr_buffer1; + + __le16 buffer_length[4]; + + __le64 addr_buffer4; + + __le32 reserved2; + __le16 reserved; + __le16 vlan_TCI; + +} __attribute__ ((aligned(64))); + +/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */ +struct rcv_desc { + __le16 reference_handle; + __le16 reserved; + __le32 buffer_length; /* allocated buffer length (usually 2K) */ + __le64 addr_buffer; +}; + +/* opcode field in status_desc */ +#define NETXEN_NIC_SYN_OFFLOAD 0x03 +#define NETXEN_NIC_RXPKT_DESC 0x04 +#define NETXEN_OLD_RXPKT_DESC 0x3f +#define NETXEN_NIC_RESPONSE_DESC 0x05 +#define NETXEN_NIC_LRO_DESC 0x12 + +/* for status field in status_desc */ +#define STATUS_NEED_CKSUM (1) +#define STATUS_CKSUM_OK (2) + +/* owner bits of status_desc */ +#define STATUS_OWNER_HOST (0x1ULL << 56) +#define STATUS_OWNER_PHANTOM (0x2ULL << 56) + +/* Status descriptor: + 0-3 port, 4-7 status, 8-11 type, 12-27 total_length + 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset + 53-55 desc_cnt, 56-57 owner, 58-63 opcode + */ +#define netxen_get_sts_port(sts_data) \ + ((sts_data) & 0x0F) +#define netxen_get_sts_status(sts_data) \ + (((sts_data) >> 4) & 0x0F) +#define netxen_get_sts_type(sts_data) \ + (((sts_data) >> 8) & 0x0F) +#define netxen_get_sts_totallength(sts_data) \ + (((sts_data) >> 12) & 0xFFFF) +#define netxen_get_sts_refhandle(sts_data) \ + (((sts_data) >> 28) & 0xFFFF) +#define netxen_get_sts_prot(sts_data) \ + (((sts_data) >> 44) & 0x0F) +#define netxen_get_sts_pkt_offset(sts_data) \ + (((sts_data) >> 48) & 0x1F) +#define netxen_get_sts_desc_cnt(sts_data) \ + (((sts_data) >> 53) & 0x7) +#define netxen_get_sts_opcode(sts_data) \ + (((sts_data) >> 58) & 0x03F) + +#define netxen_get_lro_sts_refhandle(sts_data) \ + ((sts_data) & 0x0FFFF) +#define netxen_get_lro_sts_length(sts_data) \ + (((sts_data) >> 16) & 0x0FFFF) +#define netxen_get_lro_sts_l2_hdr_offset(sts_data) \ + (((sts_data) >> 32) & 0x0FF) +#define netxen_get_lro_sts_l4_hdr_offset(sts_data) \ + (((sts_data) >> 40) & 0x0FF) +#define netxen_get_lro_sts_timestamp(sts_data) \ + (((sts_data) >> 48) & 0x1) +#define netxen_get_lro_sts_type(sts_data) \ + (((sts_data) >> 49) & 0x7) +#define netxen_get_lro_sts_push_flag(sts_data) \ + (((sts_data) >> 52) & 0x1) +#define netxen_get_lro_sts_seq_number(sts_data) \ + ((sts_data) & 0x0FFFFFFFF) + + +struct status_desc { + __le64 status_desc_data[2]; +} __attribute__ ((aligned(16))); + +/* UNIFIED ROMIMAGE *************************/ +#define NX_UNI_DIR_SECT_PRODUCT_TBL 0x0 +#define NX_UNI_DIR_SECT_BOOTLD 0x6 +#define NX_UNI_DIR_SECT_FW 0x7 + +/*Offsets */ +#define NX_UNI_CHIP_REV_OFF 10 +#define NX_UNI_FLAGS_OFF 11 +#define NX_UNI_BIOS_VERSION_OFF 12 +#define NX_UNI_BOOTLD_IDX_OFF 27 +#define NX_UNI_FIRMWARE_IDX_OFF 29 + +struct uni_table_desc{ + uint32_t findex; + uint32_t num_entries; + uint32_t entry_size; + uint32_t reserved[5]; +}; + +struct uni_data_desc{ + uint32_t findex; + uint32_t size; + uint32_t reserved[5]; +}; + +/* UNIFIED ROMIMAGE *************************/ + +/* The version of the main data structure */ +#define NETXEN_BDINFO_VERSION 1 + +/* Magic number to let user know flash is programmed */ +#define NETXEN_BDINFO_MAGIC 0x12345678 + +/* Max number of Gig ports on a Phantom board */ +#define NETXEN_MAX_PORTS 4 + +#define NETXEN_BRDTYPE_P1_BD 0x0000 +#define NETXEN_BRDTYPE_P1_SB 0x0001 +#define NETXEN_BRDTYPE_P1_SMAX 0x0002 +#define NETXEN_BRDTYPE_P1_SOCK 0x0003 + +#define NETXEN_BRDTYPE_P2_SOCK_31 0x0008 +#define NETXEN_BRDTYPE_P2_SOCK_35 0x0009 +#define NETXEN_BRDTYPE_P2_SB35_4G 0x000a +#define NETXEN_BRDTYPE_P2_SB31_10G 0x000b +#define NETXEN_BRDTYPE_P2_SB31_2G 0x000c + +#define NETXEN_BRDTYPE_P2_SB31_10G_IMEZ 0x000d +#define NETXEN_BRDTYPE_P2_SB31_10G_HMEZ 0x000e +#define NETXEN_BRDTYPE_P2_SB31_10G_CX4 0x000f + +#define NETXEN_BRDTYPE_P3_REF_QG 0x0021 +#define NETXEN_BRDTYPE_P3_HMEZ 0x0022 +#define NETXEN_BRDTYPE_P3_10G_CX4_LP 0x0023 +#define NETXEN_BRDTYPE_P3_4_GB 0x0024 +#define NETXEN_BRDTYPE_P3_IMEZ 0x0025 +#define NETXEN_BRDTYPE_P3_10G_SFP_PLUS 0x0026 +#define NETXEN_BRDTYPE_P3_10000_BASE_T 0x0027 +#define NETXEN_BRDTYPE_P3_XG_LOM 0x0028 +#define NETXEN_BRDTYPE_P3_4_GB_MM 0x0029 +#define NETXEN_BRDTYPE_P3_10G_SFP_CT 0x002a +#define NETXEN_BRDTYPE_P3_10G_SFP_QT 0x002b +#define NETXEN_BRDTYPE_P3_10G_CX4 0x0031 +#define NETXEN_BRDTYPE_P3_10G_XFP 0x0032 +#define NETXEN_BRDTYPE_P3_10G_TP 0x0080 + +/* Flash memory map */ +#define NETXEN_CRBINIT_START 0 /* crbinit section */ +#define NETXEN_BRDCFG_START 0x4000 /* board config */ +#define NETXEN_INITCODE_START 0x6000 /* pegtune code */ +#define NETXEN_BOOTLD_START 0x10000 /* bootld */ +#define NETXEN_IMAGE_START 0x43000 /* compressed image */ +#define NETXEN_SECONDARY_START 0x200000 /* backup images */ +#define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */ +#define NETXEN_USER_START 0x3E8000 /* Firmare info */ +#define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */ +#define NETXEN_USER_START_OLD NETXEN_PXE_START /* very old flash */ + +#define NX_OLD_MAC_ADDR_OFFSET (NETXEN_USER_START) +#define NX_FW_VERSION_OFFSET (NETXEN_USER_START+0x408) +#define NX_FW_SIZE_OFFSET (NETXEN_USER_START+0x40c) +#define NX_FW_MAC_ADDR_OFFSET (NETXEN_USER_START+0x418) +#define NX_FW_SERIAL_NUM_OFFSET (NETXEN_USER_START+0x81c) +#define NX_BIOS_VERSION_OFFSET (NETXEN_USER_START+0x83c) + +#define NX_HDR_VERSION_OFFSET (NETXEN_BRDCFG_START) +#define NX_BRDTYPE_OFFSET (NETXEN_BRDCFG_START+0x8) +#define NX_FW_MAGIC_OFFSET (NETXEN_BRDCFG_START+0x128) + +#define NX_FW_MIN_SIZE (0x3fffff) +#define NX_P2_MN_ROMIMAGE 0 +#define NX_P3_CT_ROMIMAGE 1 +#define NX_P3_MN_ROMIMAGE 2 +#define NX_UNIFIED_ROMIMAGE 3 +#define NX_FLASH_ROMIMAGE 4 +#define NX_UNKNOWN_ROMIMAGE 0xff + +#define NX_P2_MN_ROMIMAGE_NAME "nxromimg.bin" +#define NX_P3_CT_ROMIMAGE_NAME "nx3fwct.bin" +#define NX_P3_MN_ROMIMAGE_NAME "nx3fwmn.bin" +#define NX_UNIFIED_ROMIMAGE_NAME "phanfw.bin" +#define NX_FLASH_ROMIMAGE_NAME "flash" + +extern char netxen_nic_driver_name[]; + +/* Number of status descriptors to handle per interrupt */ +#define MAX_STATUS_HANDLE (64) + +/* + * netxen_skb_frag{} is to contain mapping info for each SG list. This + * has to be freed when DMA is complete. This is part of netxen_tx_buffer{}. + */ +struct netxen_skb_frag { + u64 dma; + u64 length; +}; + +struct netxen_recv_crb { + u32 crb_rcv_producer[NUM_RCV_DESC_RINGS]; + u32 crb_sts_consumer[NUM_STS_DESC_RINGS]; + u32 sw_int_mask[NUM_STS_DESC_RINGS]; +}; + +/* Following defines are for the state of the buffers */ +#define NETXEN_BUFFER_FREE 0 +#define NETXEN_BUFFER_BUSY 1 + +/* + * There will be one netxen_buffer per skb packet. These will be + * used to save the dma info for pci_unmap_page() + */ +struct netxen_cmd_buffer { + struct sk_buff *skb; + struct netxen_skb_frag frag_array[MAX_SKB_FRAGS + 1]; + u32 frag_count; +}; + +/* In rx_buffer, we do not need multiple fragments as is a single buffer */ +struct netxen_rx_buffer { + struct list_head list; + struct sk_buff *skb; + u64 dma; + u16 ref_handle; + u16 state; +}; + +/* Board types */ +#define NETXEN_NIC_GBE 0x01 +#define NETXEN_NIC_XGBE 0x02 + +/* + * One hardware_context{} per adapter + * contains interrupt info as well shared hardware info. + */ +struct netxen_hardware_context { + void __iomem *pci_base0; + void __iomem *pci_base1; + void __iomem *pci_base2; + void __iomem *db_base; + void __iomem *ocm_win_crb; + + unsigned long db_len; + unsigned long pci_len0; + + u32 ocm_win; + u32 crb_win; + + rwlock_t crb_lock; + spinlock_t mem_lock; + + u8 cut_through; + u8 revision_id; + u8 pci_func; + u8 linkup; + u16 port_type; + u16 board_type; +}; + +#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */ +#define ETHERNET_FCS_SIZE 4 + +struct netxen_adapter_stats { + u64 xmitcalled; + u64 xmitfinished; + u64 rxdropped; + u64 txdropped; + u64 csummed; + u64 rx_pkts; + u64 lro_pkts; + u64 rxbytes; + u64 txbytes; +}; + +/* + * Rcv Descriptor Context. One such per Rcv Descriptor. There may + * be one Rcv Descriptor for normal packets, one for jumbo and may be others. + */ +struct nx_host_rds_ring { + u32 producer; + u32 num_desc; + u32 dma_size; + u32 skb_size; + u32 flags; + void __iomem *crb_rcv_producer; + struct rcv_desc *desc_head; + struct netxen_rx_buffer *rx_buf_arr; + struct list_head free_list; + spinlock_t lock; + dma_addr_t phys_addr; +}; + +struct nx_host_sds_ring { + u32 consumer; + u32 num_desc; + void __iomem *crb_sts_consumer; + void __iomem *crb_intr_mask; + + struct status_desc *desc_head; + struct netxen_adapter *adapter; + struct napi_struct napi; + struct list_head free_list[NUM_RCV_DESC_RINGS]; + + int irq; + + dma_addr_t phys_addr; + char name[IFNAMSIZ+4]; +}; + +struct nx_host_tx_ring { + u32 producer; + __le32 *hw_consumer; + u32 sw_consumer; + void __iomem *crb_cmd_producer; + void __iomem *crb_cmd_consumer; + u32 num_desc; + + struct netdev_queue *txq; + + struct netxen_cmd_buffer *cmd_buf_arr; + struct cmd_desc_type0 *desc_head; + dma_addr_t phys_addr; +}; + +/* + * Receive context. There is one such structure per instance of the + * receive processing. Any state information that is relevant to + * the receive, and is must be in this structure. The global data may be + * present elsewhere. + */ +struct netxen_recv_context { + u32 state; + u16 context_id; + u16 virt_port; + + struct nx_host_rds_ring *rds_rings; + struct nx_host_sds_ring *sds_rings; + + struct netxen_ring_ctx *hwctx; + dma_addr_t phys_addr; +}; + +/* New HW context creation */ + +#define NX_OS_CRB_RETRY_COUNT 4000 +#define NX_CDRP_SIGNATURE_MAKE(pcifn, version) \ + (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16)) + +#define NX_CDRP_CLEAR 0x00000000 +#define NX_CDRP_CMD_BIT 0x80000000 + +/* + * All responses must have the NX_CDRP_CMD_BIT cleared + * in the crb NX_CDRP_CRB_OFFSET. + */ +#define NX_CDRP_FORM_RSP(rsp) (rsp) +#define NX_CDRP_IS_RSP(rsp) (((rsp) & NX_CDRP_CMD_BIT) == 0) + +#define NX_CDRP_RSP_OK 0x00000001 +#define NX_CDRP_RSP_FAIL 0x00000002 +#define NX_CDRP_RSP_TIMEOUT 0x00000003 + +/* + * All commands must have the NX_CDRP_CMD_BIT set in + * the crb NX_CDRP_CRB_OFFSET. + */ +#define NX_CDRP_FORM_CMD(cmd) (NX_CDRP_CMD_BIT | (cmd)) +#define NX_CDRP_IS_CMD(cmd) (((cmd) & NX_CDRP_CMD_BIT) != 0) + +#define NX_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001 +#define NX_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002 +#define NX_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003 +#define NX_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004 +#define NX_CDRP_CMD_READ_MAX_RX_CTX 0x00000005 +#define NX_CDRP_CMD_READ_MAX_TX_CTX 0x00000006 +#define NX_CDRP_CMD_CREATE_RX_CTX 0x00000007 +#define NX_CDRP_CMD_DESTROY_RX_CTX 0x00000008 +#define NX_CDRP_CMD_CREATE_TX_CTX 0x00000009 +#define NX_CDRP_CMD_DESTROY_TX_CTX 0x0000000a +#define NX_CDRP_CMD_SETUP_STATISTICS 0x0000000e +#define NX_CDRP_CMD_GET_STATISTICS 0x0000000f +#define NX_CDRP_CMD_DELETE_STATISTICS 0x00000010 +#define NX_CDRP_CMD_SET_MTU 0x00000012 +#define NX_CDRP_CMD_READ_PHY 0x00000013 +#define NX_CDRP_CMD_WRITE_PHY 0x00000014 +#define NX_CDRP_CMD_READ_HW_REG 0x00000015 +#define NX_CDRP_CMD_GET_FLOW_CTL 0x00000016 +#define NX_CDRP_CMD_SET_FLOW_CTL 0x00000017 +#define NX_CDRP_CMD_READ_MAX_MTU 0x00000018 +#define NX_CDRP_CMD_READ_MAX_LRO 0x00000019 +#define NX_CDRP_CMD_CONFIGURE_TOE 0x0000001a +#define NX_CDRP_CMD_FUNC_ATTRIB 0x0000001b +#define NX_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c +#define NX_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d +#define NX_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e +#define NX_CDRP_CMD_CONFIG_GBE_PORT 0x0000001f +#define NX_CDRP_CMD_MAX 0x00000020 + +#define NX_RCODE_SUCCESS 0 +#define NX_RCODE_NO_HOST_MEM 1 +#define NX_RCODE_NO_HOST_RESOURCE 2 +#define NX_RCODE_NO_CARD_CRB 3 +#define NX_RCODE_NO_CARD_MEM 4 +#define NX_RCODE_NO_CARD_RESOURCE 5 +#define NX_RCODE_INVALID_ARGS 6 +#define NX_RCODE_INVALID_ACTION 7 +#define NX_RCODE_INVALID_STATE 8 +#define NX_RCODE_NOT_SUPPORTED 9 +#define NX_RCODE_NOT_PERMITTED 10 +#define NX_RCODE_NOT_READY 11 +#define NX_RCODE_DOES_NOT_EXIST 12 +#define NX_RCODE_ALREADY_EXISTS 13 +#define NX_RCODE_BAD_SIGNATURE 14 +#define NX_RCODE_CMD_NOT_IMPL 15 +#define NX_RCODE_CMD_INVALID 16 +#define NX_RCODE_TIMEOUT 17 +#define NX_RCODE_CMD_FAILED 18 +#define NX_RCODE_MAX_EXCEEDED 19 +#define NX_RCODE_MAX 20 + +#define NX_DESTROY_CTX_RESET 0 +#define NX_DESTROY_CTX_D3_RESET 1 +#define NX_DESTROY_CTX_MAX 2 + +/* + * Capabilities + */ +#define NX_CAP_BIT(class, bit) (1 << bit) +#define NX_CAP0_LEGACY_CONTEXT NX_CAP_BIT(0, 0) +#define NX_CAP0_MULTI_CONTEXT NX_CAP_BIT(0, 1) +#define NX_CAP0_LEGACY_MN NX_CAP_BIT(0, 2) +#define NX_CAP0_LEGACY_MS NX_CAP_BIT(0, 3) +#define NX_CAP0_CUT_THROUGH NX_CAP_BIT(0, 4) +#define NX_CAP0_LRO NX_CAP_BIT(0, 5) +#define NX_CAP0_LSO NX_CAP_BIT(0, 6) +#define NX_CAP0_JUMBO_CONTIGUOUS NX_CAP_BIT(0, 7) +#define NX_CAP0_LRO_CONTIGUOUS NX_CAP_BIT(0, 8) +#define NX_CAP0_HW_LRO NX_CAP_BIT(0, 10) + +/* + * Context state + */ +#define NX_HOST_CTX_STATE_FREED 0 +#define NX_HOST_CTX_STATE_ALLOCATED 1 +#define NX_HOST_CTX_STATE_ACTIVE 2 +#define NX_HOST_CTX_STATE_DISABLED 3 +#define NX_HOST_CTX_STATE_QUIESCED 4 +#define NX_HOST_CTX_STATE_MAX 5 + +/* + * Rx context + */ + +typedef struct { + __le64 host_phys_addr; /* Ring base addr */ + __le32 ring_size; /* Ring entries */ + __le16 msi_index; + __le16 rsvd; /* Padding */ +} nx_hostrq_sds_ring_t; + +typedef struct { + __le64 host_phys_addr; /* Ring base addr */ + __le64 buff_size; /* Packet buffer size */ + __le32 ring_size; /* Ring entries */ + __le32 ring_kind; /* Class of ring */ +} nx_hostrq_rds_ring_t; + +typedef struct { + __le64 host_rsp_dma_addr; /* Response dma'd here */ + __le32 capabilities[4]; /* Flag bit vector */ + __le32 host_int_crb_mode; /* Interrupt crb usage */ + __le32 host_rds_crb_mode; /* RDS crb usage */ + /* These ring offsets are relative to data[0] below */ + __le32 rds_ring_offset; /* Offset to RDS config */ + __le32 sds_ring_offset; /* Offset to SDS config */ + __le16 num_rds_rings; /* Count of RDS rings */ + __le16 num_sds_rings; /* Count of SDS rings */ + __le16 rsvd1; /* Padding */ + __le16 rsvd2; /* Padding */ + u8 reserved[128]; /* reserve space for future expansion*/ + /* MUST BE 64-bit aligned. + The following is packed: + - N hostrq_rds_rings + - N hostrq_sds_rings */ + char data[0]; +} nx_hostrq_rx_ctx_t; + +typedef struct { + __le32 host_producer_crb; /* Crb to use */ + __le32 rsvd1; /* Padding */ +} nx_cardrsp_rds_ring_t; + +typedef struct { + __le32 host_consumer_crb; /* Crb to use */ + __le32 interrupt_crb; /* Crb to use */ +} nx_cardrsp_sds_ring_t; + +typedef struct { + /* These ring offsets are relative to data[0] below */ + __le32 rds_ring_offset; /* Offset to RDS config */ + __le32 sds_ring_offset; /* Offset to SDS config */ + __le32 host_ctx_state; /* Starting State */ + __le32 num_fn_per_port; /* How many PCI fn share the port */ + __le16 num_rds_rings; /* Count of RDS rings */ + __le16 num_sds_rings; /* Count of SDS rings */ + __le16 context_id; /* Handle for context */ + u8 phys_port; /* Physical id of port */ + u8 virt_port; /* Virtual/Logical id of port */ + u8 reserved[128]; /* save space for future expansion */ + /* MUST BE 64-bit aligned. + The following is packed: + - N cardrsp_rds_rings + - N cardrs_sds_rings */ + char data[0]; +} nx_cardrsp_rx_ctx_t; + +#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \ + (sizeof(HOSTRQ_RX) + \ + (rds_rings)*(sizeof(nx_hostrq_rds_ring_t)) + \ + (sds_rings)*(sizeof(nx_hostrq_sds_ring_t))) + +#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings) \ + (sizeof(CARDRSP_RX) + \ + (rds_rings)*(sizeof(nx_cardrsp_rds_ring_t)) + \ + (sds_rings)*(sizeof(nx_cardrsp_sds_ring_t))) + +/* + * Tx context + */ + +typedef struct { + __le64 host_phys_addr; /* Ring base addr */ + __le32 ring_size; /* Ring entries */ + __le32 rsvd; /* Padding */ +} nx_hostrq_cds_ring_t; + +typedef struct { + __le64 host_rsp_dma_addr; /* Response dma'd here */ + __le64 cmd_cons_dma_addr; /* */ + __le64 dummy_dma_addr; /* */ + __le32 capabilities[4]; /* Flag bit vector */ + __le32 host_int_crb_mode; /* Interrupt crb usage */ + __le32 rsvd1; /* Padding */ + __le16 rsvd2; /* Padding */ + __le16 interrupt_ctl; + __le16 msi_index; + __le16 rsvd3; /* Padding */ + nx_hostrq_cds_ring_t cds_ring; /* Desc of cds ring */ + u8 reserved[128]; /* future expansion */ +} nx_hostrq_tx_ctx_t; + +typedef struct { + __le32 host_producer_crb; /* Crb to use */ + __le32 interrupt_crb; /* Crb to use */ +} nx_cardrsp_cds_ring_t; + +typedef struct { + __le32 host_ctx_state; /* Starting state */ + __le16 context_id; /* Handle for context */ + u8 phys_port; /* Physical id of port */ + u8 virt_port; /* Virtual/Logical id of port */ + nx_cardrsp_cds_ring_t cds_ring; /* Card cds settings */ + u8 reserved[128]; /* future expansion */ +} nx_cardrsp_tx_ctx_t; + +#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX)) +#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX)) + +/* CRB */ + +#define NX_HOST_RDS_CRB_MODE_UNIQUE 0 +#define NX_HOST_RDS_CRB_MODE_SHARED 1 +#define NX_HOST_RDS_CRB_MODE_CUSTOM 2 +#define NX_HOST_RDS_CRB_MODE_MAX 3 + +#define NX_HOST_INT_CRB_MODE_UNIQUE 0 +#define NX_HOST_INT_CRB_MODE_SHARED 1 +#define NX_HOST_INT_CRB_MODE_NORX 2 +#define NX_HOST_INT_CRB_MODE_NOTX 3 +#define NX_HOST_INT_CRB_MODE_NORXTX 4 + + +/* MAC */ + +#define MC_COUNT_P2 16 +#define MC_COUNT_P3 38 + +#define NETXEN_MAC_NOOP 0 +#define NETXEN_MAC_ADD 1 +#define NETXEN_MAC_DEL 2 + +typedef struct nx_mac_list_s { + struct list_head list; + uint8_t mac_addr[ETH_ALEN+2]; +} nx_mac_list_t; + +struct nx_vlan_ip_list { + struct list_head list; + u32 ip_addr; +}; + +/* + * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is + * adjusted based on configured MTU. + */ +#define NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US 3 +#define NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS 256 +#define NETXEN_DEFAULT_INTR_COALESCE_TX_PACKETS 64 +#define NETXEN_DEFAULT_INTR_COALESCE_TX_TIME_US 4 + +#define NETXEN_NIC_INTR_DEFAULT 0x04 + +typedef union { + struct { + uint16_t rx_packets; + uint16_t rx_time_us; + uint16_t tx_packets; + uint16_t tx_time_us; + } data; + uint64_t word; +} nx_nic_intr_coalesce_data_t; + +typedef struct { + uint16_t stats_time_us; + uint16_t rate_sample_time; + uint16_t flags; + uint16_t rsvd_1; + uint32_t low_threshold; + uint32_t high_threshold; + nx_nic_intr_coalesce_data_t normal; + nx_nic_intr_coalesce_data_t low; + nx_nic_intr_coalesce_data_t high; + nx_nic_intr_coalesce_data_t irq; +} nx_nic_intr_coalesce_t; + +#define NX_HOST_REQUEST 0x13 +#define NX_NIC_REQUEST 0x14 + +#define NX_MAC_EVENT 0x1 + +#define NX_IP_UP 2 +#define NX_IP_DOWN 3 + +/* + * Driver --> Firmware + */ +#define NX_NIC_H2C_OPCODE_START 0 +#define NX_NIC_H2C_OPCODE_CONFIG_RSS 1 +#define NX_NIC_H2C_OPCODE_CONFIG_RSS_TBL 2 +#define NX_NIC_H2C_OPCODE_CONFIG_INTR_COALESCE 3 +#define NX_NIC_H2C_OPCODE_CONFIG_LED 4 +#define NX_NIC_H2C_OPCODE_CONFIG_PROMISCUOUS 5 +#define NX_NIC_H2C_OPCODE_CONFIG_L2_MAC 6 +#define NX_NIC_H2C_OPCODE_LRO_REQUEST 7 +#define NX_NIC_H2C_OPCODE_GET_SNMP_STATS 8 +#define NX_NIC_H2C_OPCODE_PROXY_START_REQUEST 9 +#define NX_NIC_H2C_OPCODE_PROXY_STOP_REQUEST 10 +#define NX_NIC_H2C_OPCODE_PROXY_SET_MTU 11 +#define NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE 12 +#define NX_NIC_H2C_OPCODE_GET_FINGER_PRINT_REQUEST 13 +#define NX_NIC_H2C_OPCODE_INSTALL_LICENSE_REQUEST 14 +#define NX_NIC_H2C_OPCODE_GET_LICENSE_CAPABILITY_REQUEST 15 +#define NX_NIC_H2C_OPCODE_GET_NET_STATS 16 +#define NX_NIC_H2C_OPCODE_PROXY_UPDATE_P2V 17 +#define NX_NIC_H2C_OPCODE_CONFIG_IPADDR 18 +#define NX_NIC_H2C_OPCODE_CONFIG_LOOPBACK 19 +#define NX_NIC_H2C_OPCODE_PROXY_STOP_DONE 20 +#define NX_NIC_H2C_OPCODE_GET_LINKEVENT 21 +#define NX_NIC_C2C_OPCODE 22 +#define NX_NIC_H2C_OPCODE_CONFIG_BRIDGING 23 +#define NX_NIC_H2C_OPCODE_CONFIG_HW_LRO 24 +#define NX_NIC_H2C_OPCODE_LAST 25 + +/* + * Firmware --> Driver + */ + +#define NX_NIC_C2H_OPCODE_START 128 +#define NX_NIC_C2H_OPCODE_CONFIG_RSS_RESPONSE 129 +#define NX_NIC_C2H_OPCODE_CONFIG_RSS_TBL_RESPONSE 130 +#define NX_NIC_C2H_OPCODE_CONFIG_MAC_RESPONSE 131 +#define NX_NIC_C2H_OPCODE_CONFIG_PROMISCUOUS_RESPONSE 132 +#define NX_NIC_C2H_OPCODE_CONFIG_L2_MAC_RESPONSE 133 +#define NX_NIC_C2H_OPCODE_LRO_DELETE_RESPONSE 134 +#define NX_NIC_C2H_OPCODE_LRO_ADD_FAILURE_RESPONSE 135 +#define NX_NIC_C2H_OPCODE_GET_SNMP_STATS 136 +#define NX_NIC_C2H_OPCODE_GET_FINGER_PRINT_REPLY 137 +#define NX_NIC_C2H_OPCODE_INSTALL_LICENSE_REPLY 138 +#define NX_NIC_C2H_OPCODE_GET_LICENSE_CAPABILITIES_REPLY 139 +#define NX_NIC_C2H_OPCODE_GET_NET_STATS_RESPONSE 140 +#define NX_NIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141 +#define NX_NIC_C2H_OPCODE_LAST 142 + +#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */ +#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */ +#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */ + +#define NX_NIC_LRO_REQUEST_FIRST 0 +#define NX_NIC_LRO_REQUEST_ADD_FLOW 1 +#define NX_NIC_LRO_REQUEST_DELETE_FLOW 2 +#define NX_NIC_LRO_REQUEST_TIMER 3 +#define NX_NIC_LRO_REQUEST_CLEANUP 4 +#define NX_NIC_LRO_REQUEST_ADD_FLOW_SCHEDULED 5 +#define NX_TOE_LRO_REQUEST_ADD_FLOW 6 +#define NX_TOE_LRO_REQUEST_ADD_FLOW_RESPONSE 7 +#define NX_TOE_LRO_REQUEST_DELETE_FLOW 8 +#define NX_TOE_LRO_REQUEST_DELETE_FLOW_RESPONSE 9 +#define NX_TOE_LRO_REQUEST_TIMER 10 +#define NX_NIC_LRO_REQUEST_LAST 11 + +#define NX_FW_CAPABILITY_LINK_NOTIFICATION (1 << 5) +#define NX_FW_CAPABILITY_SWITCHING (1 << 6) +#define NX_FW_CAPABILITY_PEXQ (1 << 7) +#define NX_FW_CAPABILITY_BDG (1 << 8) +#define NX_FW_CAPABILITY_FVLANTX (1 << 9) +#define NX_FW_CAPABILITY_HW_LRO (1 << 10) +#define NX_FW_CAPABILITY_GBE_LINK_CFG (1 << 11) + +/* module types */ +#define LINKEVENT_MODULE_NOT_PRESENT 1 +#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2 +#define LINKEVENT_MODULE_OPTICAL_SRLR 3 +#define LINKEVENT_MODULE_OPTICAL_LRM 4 +#define LINKEVENT_MODULE_OPTICAL_SFP_1G 5 +#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE 6 +#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN 7 +#define LINKEVENT_MODULE_TWINAX 8 + +#define LINKSPEED_10GBPS 10000 +#define LINKSPEED_1GBPS 1000 +#define LINKSPEED_100MBPS 100 +#define LINKSPEED_10MBPS 10 + +#define LINKSPEED_ENCODED_10MBPS 0 +#define LINKSPEED_ENCODED_100MBPS 1 +#define LINKSPEED_ENCODED_1GBPS 2 + +#define LINKEVENT_AUTONEG_DISABLED 0 +#define LINKEVENT_AUTONEG_ENABLED 1 + +#define LINKEVENT_HALF_DUPLEX 0 +#define LINKEVENT_FULL_DUPLEX 1 + +#define LINKEVENT_LINKSPEED_MBPS 0 +#define LINKEVENT_LINKSPEED_ENCODED 1 + +#define AUTO_FW_RESET_ENABLED 0xEF10AF12 +#define AUTO_FW_RESET_DISABLED 0xDCBAAF12 + +/* firmware response header: + * 63:58 - message type + * 57:56 - owner + * 55:53 - desc count + * 52:48 - reserved + * 47:40 - completion id + * 39:32 - opcode + * 31:16 - error code + * 15:00 - reserved + */ +#define netxen_get_nic_msgtype(msg_hdr) \ + ((msg_hdr >> 58) & 0x3F) +#define netxen_get_nic_msg_compid(msg_hdr) \ + ((msg_hdr >> 40) & 0xFF) +#define netxen_get_nic_msg_opcode(msg_hdr) \ + ((msg_hdr >> 32) & 0xFF) +#define netxen_get_nic_msg_errcode(msg_hdr) \ + ((msg_hdr >> 16) & 0xFFFF) + +typedef struct { + union { + struct { + u64 hdr; + u64 body[7]; + }; + u64 words[8]; + }; +} nx_fw_msg_t; + +typedef struct { + __le64 qhdr; + __le64 req_hdr; + __le64 words[6]; +} nx_nic_req_t; + +typedef struct { + u8 op; + u8 tag; + u8 mac_addr[6]; +} nx_mac_req_t; + +#define MAX_PENDING_DESC_BLOCK_SIZE 64 + +#define NETXEN_NIC_MSI_ENABLED 0x02 +#define NETXEN_NIC_MSIX_ENABLED 0x04 +#define NETXEN_NIC_LRO_ENABLED 0x08 +#define NETXEN_NIC_LRO_DISABLED 0x00 +#define NETXEN_NIC_BRIDGE_ENABLED 0X10 +#define NETXEN_NIC_DIAG_ENABLED 0x20 +#define NETXEN_IS_MSI_FAMILY(adapter) \ + ((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED)) + +#define MSIX_ENTRIES_PER_ADAPTER NUM_STS_DESC_RINGS +#define NETXEN_MSIX_TBL_SPACE 8192 +#define NETXEN_PCI_REG_MSIX_TBL 0x44 + +#define NETXEN_DB_MAPSIZE_BYTES 0x1000 + +#define NETXEN_NETDEV_WEIGHT 128 +#define NETXEN_ADAPTER_UP_MAGIC 777 +#define NETXEN_NIC_PEG_TUNE 0 + +#define __NX_FW_ATTACHED 0 +#define __NX_DEV_UP 1 +#define __NX_RESETTING 2 + +struct netxen_dummy_dma { + void *addr; + dma_addr_t phys_addr; +}; + +struct netxen_adapter { + struct netxen_hardware_context ahw; + + struct net_device *netdev; + struct pci_dev *pdev; + struct list_head mac_list; + struct list_head vlan_ip_list; + + spinlock_t tx_clean_lock; + + u16 num_txd; + u16 num_rxd; + u16 num_jumbo_rxd; + u16 num_lro_rxd; + + u8 max_rds_rings; + u8 max_sds_rings; + u8 driver_mismatch; + u8 msix_supported; + u8 __pad; + u8 pci_using_dac; + u8 portnum; + u8 physical_port; + + u8 mc_enabled; + u8 max_mc_count; + u8 rss_supported; + u8 link_changed; + u8 fw_wait_cnt; + u8 fw_fail_cnt; + u8 tx_timeo_cnt; + u8 need_fw_reset; + + u8 has_link_events; + u8 fw_type; + u16 tx_context_id; + u16 mtu; + u16 is_up; + + u16 link_speed; + u16 link_duplex; + u16 link_autoneg; + u16 module_type; + + u32 capabilities; + u32 flags; + u32 irq; + u32 temp; + + u32 int_vec_bit; + u32 heartbit; + + u8 mac_addr[ETH_ALEN]; + + struct netxen_adapter_stats stats; + + struct netxen_recv_context recv_ctx; + struct nx_host_tx_ring *tx_ring; + + int (*macaddr_set) (struct netxen_adapter *, u8 *); + int (*set_mtu) (struct netxen_adapter *, int); + int (*set_promisc) (struct netxen_adapter *, u32); + void (*set_multi) (struct net_device *); + int (*phy_read) (struct netxen_adapter *, u32 reg, u32 *); + int (*phy_write) (struct netxen_adapter *, u32 reg, u32 val); + int (*init_port) (struct netxen_adapter *, int); + int (*stop_port) (struct netxen_adapter *); + + u32 (*crb_read)(struct netxen_adapter *, ulong); + int (*crb_write)(struct netxen_adapter *, ulong, u32); + + int (*pci_mem_read)(struct netxen_adapter *, u64, u64 *); + int (*pci_mem_write)(struct netxen_adapter *, u64, u64); + + int (*pci_set_window)(struct netxen_adapter *, u64, u32 *); + + u32 (*io_read)(struct netxen_adapter *, void __iomem *); + void (*io_write)(struct netxen_adapter *, void __iomem *, u32); + + void __iomem *tgt_mask_reg; + void __iomem *pci_int_reg; + void __iomem *tgt_status_reg; + void __iomem *crb_int_state_reg; + void __iomem *isr_int_vec; + + struct msix_entry msix_entries[MSIX_ENTRIES_PER_ADAPTER]; + + struct netxen_dummy_dma dummy_dma; + + struct delayed_work fw_work; + + struct work_struct tx_timeout_task; + + nx_nic_intr_coalesce_t coal; + + unsigned long state; + __le32 file_prd_off; /*File fw product offset*/ + u32 fw_version; + const struct firmware *fw; +}; + +int nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val); +int nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val); + +#define NXRD32(adapter, off) \ + (adapter->crb_read(adapter, off)) +#define NXWR32(adapter, off, val) \ + (adapter->crb_write(adapter, off, val)) +#define NXRDIO(adapter, addr) \ + (adapter->io_read(adapter, addr)) +#define NXWRIO(adapter, addr, val) \ + (adapter->io_write(adapter, addr, val)) + +int netxen_pcie_sem_lock(struct netxen_adapter *, int, u32); +void netxen_pcie_sem_unlock(struct netxen_adapter *, int); + +#define netxen_rom_lock(a) \ + netxen_pcie_sem_lock((a), 2, NETXEN_ROM_LOCK_ID) +#define netxen_rom_unlock(a) \ + netxen_pcie_sem_unlock((a), 2) +#define netxen_phy_lock(a) \ + netxen_pcie_sem_lock((a), 3, NETXEN_PHY_LOCK_ID) +#define netxen_phy_unlock(a) \ + netxen_pcie_sem_unlock((a), 3) +#define netxen_api_lock(a) \ + netxen_pcie_sem_lock((a), 5, 0) +#define netxen_api_unlock(a) \ + netxen_pcie_sem_unlock((a), 5) +#define netxen_sw_lock(a) \ + netxen_pcie_sem_lock((a), 6, 0) +#define netxen_sw_unlock(a) \ + netxen_pcie_sem_unlock((a), 6) +#define crb_win_lock(a) \ + netxen_pcie_sem_lock((a), 7, NETXEN_CRB_WIN_LOCK_ID) +#define crb_win_unlock(a) \ + netxen_pcie_sem_unlock((a), 7) + +int netxen_nic_get_board_info(struct netxen_adapter *adapter); +int netxen_nic_wol_supported(struct netxen_adapter *adapter); + +/* Functions from netxen_nic_init.c */ +int netxen_init_dummy_dma(struct netxen_adapter *adapter); +void netxen_free_dummy_dma(struct netxen_adapter *adapter); + +int netxen_check_flash_fw_compatibility(struct netxen_adapter *adapter); +int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val); +int netxen_load_firmware(struct netxen_adapter *adapter); +int netxen_need_fw_reset(struct netxen_adapter *adapter); +void netxen_request_firmware(struct netxen_adapter *adapter); +void netxen_release_firmware(struct netxen_adapter *adapter); +int netxen_pinit_from_rom(struct netxen_adapter *adapter); + +int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp); +int netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr, + u8 *bytes, size_t size); +int netxen_rom_fast_write_words(struct netxen_adapter *adapter, int addr, + u8 *bytes, size_t size); +int netxen_flash_unlock(struct netxen_adapter *adapter); +int netxen_backup_crbinit(struct netxen_adapter *adapter); +int netxen_flash_erase_secondary(struct netxen_adapter *adapter); +int netxen_flash_erase_primary(struct netxen_adapter *adapter); +void netxen_halt_pegs(struct netxen_adapter *adapter); + +int netxen_rom_se(struct netxen_adapter *adapter, int addr); + +int netxen_alloc_sw_resources(struct netxen_adapter *adapter); +void netxen_free_sw_resources(struct netxen_adapter *adapter); + +void netxen_setup_hwops(struct netxen_adapter *adapter); +void __iomem *netxen_get_ioaddr(struct netxen_adapter *, u32); + +int netxen_alloc_hw_resources(struct netxen_adapter *adapter); +void netxen_free_hw_resources(struct netxen_adapter *adapter); + +void netxen_release_rx_buffers(struct netxen_adapter *adapter); +void netxen_release_tx_buffers(struct netxen_adapter *adapter); + +int netxen_init_firmware(struct netxen_adapter *adapter); +void netxen_nic_clear_stats(struct netxen_adapter *adapter); +void netxen_watchdog_task(struct work_struct *work); +void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid, + struct nx_host_rds_ring *rds_ring); +int netxen_process_cmd_ring(struct netxen_adapter *adapter); +int netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max); + +void netxen_p3_free_mac_list(struct netxen_adapter *adapter); +int netxen_config_intr_coalesce(struct netxen_adapter *adapter); +int netxen_config_rss(struct netxen_adapter *adapter, int enable); +int netxen_config_ipaddr(struct netxen_adapter *adapter, u32 ip, int cmd); +int netxen_linkevent_request(struct netxen_adapter *adapter, int enable); +void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup); +void netxen_pci_camqm_read_2M(struct netxen_adapter *, u64, u64 *); +void netxen_pci_camqm_write_2M(struct netxen_adapter *, u64, u64); + +int nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter, + u32 speed, u32 duplex, u32 autoneg); +int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu); +int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu); +int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable); +int netxen_config_bridged_mode(struct netxen_adapter *adapter, int enable); +int netxen_send_lro_cleanup(struct netxen_adapter *adapter); + +void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, + struct nx_host_tx_ring *tx_ring); + +/* Functions from netxen_nic_main.c */ +int netxen_nic_reset_context(struct netxen_adapter *); + +/* + * NetXen Board information + */ + +#define NETXEN_MAX_SHORT_NAME 32 +struct netxen_brdinfo { + int brdtype; /* type of board */ + long ports; /* max no of physical ports */ + char short_name[NETXEN_MAX_SHORT_NAME]; +}; + +static const struct netxen_brdinfo netxen_boards[] = { + {NETXEN_BRDTYPE_P2_SB31_10G_CX4, 1, "XGb CX4"}, + {NETXEN_BRDTYPE_P2_SB31_10G_HMEZ, 1, "XGb HMEZ"}, + {NETXEN_BRDTYPE_P2_SB31_10G_IMEZ, 2, "XGb IMEZ"}, + {NETXEN_BRDTYPE_P2_SB31_10G, 1, "XGb XFP"}, + {NETXEN_BRDTYPE_P2_SB35_4G, 4, "Quad Gb"}, + {NETXEN_BRDTYPE_P2_SB31_2G, 2, "Dual Gb"}, + {NETXEN_BRDTYPE_P3_REF_QG, 4, "Reference Quad Gig "}, + {NETXEN_BRDTYPE_P3_HMEZ, 2, "Dual XGb HMEZ"}, + {NETXEN_BRDTYPE_P3_10G_CX4_LP, 2, "Dual XGb CX4 LP"}, + {NETXEN_BRDTYPE_P3_4_GB, 4, "Quad Gig LP"}, + {NETXEN_BRDTYPE_P3_IMEZ, 2, "Dual XGb IMEZ"}, + {NETXEN_BRDTYPE_P3_10G_SFP_PLUS, 2, "Dual XGb SFP+ LP"}, + {NETXEN_BRDTYPE_P3_10000_BASE_T, 1, "XGB 10G BaseT LP"}, + {NETXEN_BRDTYPE_P3_XG_LOM, 2, "Dual XGb LOM"}, + {NETXEN_BRDTYPE_P3_4_GB_MM, 4, "NX3031 Gigabit Ethernet"}, + {NETXEN_BRDTYPE_P3_10G_SFP_CT, 2, "NX3031 10 Gigabit Ethernet"}, + {NETXEN_BRDTYPE_P3_10G_SFP_QT, 2, "Quanta Dual XGb SFP+"}, + {NETXEN_BRDTYPE_P3_10G_CX4, 2, "Reference Dual CX4 Option"}, + {NETXEN_BRDTYPE_P3_10G_XFP, 1, "Reference Single XFP Option"} +}; + +#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(netxen_boards) + +static inline void get_brd_name_by_type(u32 type, char *name) +{ + int i, found = 0; + for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) { + if (netxen_boards[i].brdtype == type) { + strcpy(name, netxen_boards[i].short_name); + found = 1; + break; + } + + } + if (!found) + name = "Unknown"; +} + +static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring) +{ + smp_mb(); + return find_diff_among(tx_ring->producer, + tx_ring->sw_consumer, tx_ring->num_desc); + +} + +int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac); +int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac); +extern void netxen_change_ringparam(struct netxen_adapter *adapter); +extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, + int *valp); + +extern const struct ethtool_ops netxen_nic_ethtool_ops; + +#endif /* __NETXEN_NIC_H_ */ diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c new file mode 100644 index 0000000..a925392 --- /dev/null +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c @@ -0,0 +1,793 @@ +/* + * Copyright (C) 2003 - 2009 NetXen, Inc. + * Copyright (C) 2009 - QLogic Corporation. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, + * MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution + * in the file called "COPYING". + * + */ + +#include "netxen_nic_hw.h" +#include "netxen_nic.h" + +#define NXHAL_VERSION 1 + +static u32 +netxen_poll_rsp(struct netxen_adapter *adapter) +{ + u32 rsp = NX_CDRP_RSP_OK; + int timeout = 0; + + do { + /* give atleast 1ms for firmware to respond */ + msleep(1); + + if (++timeout > NX_OS_CRB_RETRY_COUNT) + return NX_CDRP_RSP_TIMEOUT; + + rsp = NXRD32(adapter, NX_CDRP_CRB_OFFSET); + } while (!NX_CDRP_IS_RSP(rsp)); + + return rsp; +} + +static u32 +netxen_issue_cmd(struct netxen_adapter *adapter, + u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd) +{ + u32 rsp; + u32 signature = 0; + u32 rcode = NX_RCODE_SUCCESS; + + signature = NX_CDRP_SIGNATURE_MAKE(pci_fn, version); + + /* Acquire semaphore before accessing CRB */ + if (netxen_api_lock(adapter)) + return NX_RCODE_TIMEOUT; + + NXWR32(adapter, NX_SIGN_CRB_OFFSET, signature); + + NXWR32(adapter, NX_ARG1_CRB_OFFSET, arg1); + + NXWR32(adapter, NX_ARG2_CRB_OFFSET, arg2); + + NXWR32(adapter, NX_ARG3_CRB_OFFSET, arg3); + + NXWR32(adapter, NX_CDRP_CRB_OFFSET, NX_CDRP_FORM_CMD(cmd)); + + rsp = netxen_poll_rsp(adapter); + + if (rsp == NX_CDRP_RSP_TIMEOUT) { + printk(KERN_ERR "%s: card response timeout.\n", + netxen_nic_driver_name); + + rcode = NX_RCODE_TIMEOUT; + } else if (rsp == NX_CDRP_RSP_FAIL) { + rcode = NXRD32(adapter, NX_ARG1_CRB_OFFSET); + + printk(KERN_ERR "%s: failed card response code:0x%x\n", + netxen_nic_driver_name, rcode); + } + + /* Release semaphore */ + netxen_api_unlock(adapter); + + return rcode; +} + +int +nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu) +{ + u32 rcode = NX_RCODE_SUCCESS; + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + + if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE) + rcode = netxen_issue_cmd(adapter, + adapter->ahw.pci_func, + NXHAL_VERSION, + recv_ctx->context_id, + mtu, + 0, + NX_CDRP_CMD_SET_MTU); + + if (rcode != NX_RCODE_SUCCESS) + return -EIO; + + return 0; +} + +int +nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter, + u32 speed, u32 duplex, u32 autoneg) +{ + + return netxen_issue_cmd(adapter, + adapter->ahw.pci_func, + NXHAL_VERSION, + speed, + duplex, + autoneg, + NX_CDRP_CMD_CONFIG_GBE_PORT); + +} + +static int +nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter) +{ + void *addr; + nx_hostrq_rx_ctx_t *prq; + nx_cardrsp_rx_ctx_t *prsp; + nx_hostrq_rds_ring_t *prq_rds; + nx_hostrq_sds_ring_t *prq_sds; + nx_cardrsp_rds_ring_t *prsp_rds; + nx_cardrsp_sds_ring_t *prsp_sds; + struct nx_host_rds_ring *rds_ring; + struct nx_host_sds_ring *sds_ring; + + dma_addr_t hostrq_phys_addr, cardrsp_phys_addr; + u64 phys_addr; + + int i, nrds_rings, nsds_rings; + size_t rq_size, rsp_size; + u32 cap, reg, val; + + int err; + + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + + nrds_rings = adapter->max_rds_rings; + nsds_rings = adapter->max_sds_rings; + + rq_size = + SIZEOF_HOSTRQ_RX(nx_hostrq_rx_ctx_t, nrds_rings, nsds_rings); + rsp_size = + SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings); + + addr = pci_alloc_consistent(adapter->pdev, + rq_size, &hostrq_phys_addr); + if (addr == NULL) + return -ENOMEM; + prq = addr; + + addr = pci_alloc_consistent(adapter->pdev, + rsp_size, &cardrsp_phys_addr); + if (addr == NULL) { + err = -ENOMEM; + goto out_free_rq; + } + prsp = addr; + + prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr); + + cap = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN); + cap |= (NX_CAP0_JUMBO_CONTIGUOUS | NX_CAP0_LRO_CONTIGUOUS); + + prq->capabilities[0] = cpu_to_le32(cap); + prq->host_int_crb_mode = + cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED); + prq->host_rds_crb_mode = + cpu_to_le32(NX_HOST_RDS_CRB_MODE_UNIQUE); + + prq->num_rds_rings = cpu_to_le16(nrds_rings); + prq->num_sds_rings = cpu_to_le16(nsds_rings); + prq->rds_ring_offset = cpu_to_le32(0); + + val = le32_to_cpu(prq->rds_ring_offset) + + (sizeof(nx_hostrq_rds_ring_t) * nrds_rings); + prq->sds_ring_offset = cpu_to_le32(val); + + prq_rds = (nx_hostrq_rds_ring_t *)(prq->data + + le32_to_cpu(prq->rds_ring_offset)); + + for (i = 0; i < nrds_rings; i++) { + + rds_ring = &recv_ctx->rds_rings[i]; + + prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr); + prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc); + prq_rds[i].ring_kind = cpu_to_le32(i); + prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size); + } + + prq_sds = (nx_hostrq_sds_ring_t *)(prq->data + + le32_to_cpu(prq->sds_ring_offset)); + + for (i = 0; i < nsds_rings; i++) { + + sds_ring = &recv_ctx->sds_rings[i]; + + prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr); + prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc); + prq_sds[i].msi_index = cpu_to_le16(i); + } + + phys_addr = hostrq_phys_addr; + err = netxen_issue_cmd(adapter, + adapter->ahw.pci_func, + NXHAL_VERSION, + (u32)(phys_addr >> 32), + (u32)(phys_addr & 0xffffffff), + rq_size, + NX_CDRP_CMD_CREATE_RX_CTX); + if (err) { + printk(KERN_WARNING + "Failed to create rx ctx in firmware%d\n", err); + goto out_free_rsp; + } + + + prsp_rds = ((nx_cardrsp_rds_ring_t *) + &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]); + + for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) { + rds_ring = &recv_ctx->rds_rings[i]; + + reg = le32_to_cpu(prsp_rds[i].host_producer_crb); + rds_ring->crb_rcv_producer = netxen_get_ioaddr(adapter, + NETXEN_NIC_REG(reg - 0x200)); + } + + prsp_sds = ((nx_cardrsp_sds_ring_t *) + &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]); + + for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) { + sds_ring = &recv_ctx->sds_rings[i]; + + reg = le32_to_cpu(prsp_sds[i].host_consumer_crb); + sds_ring->crb_sts_consumer = netxen_get_ioaddr(adapter, + NETXEN_NIC_REG(reg - 0x200)); + + reg = le32_to_cpu(prsp_sds[i].interrupt_crb); + sds_ring->crb_intr_mask = netxen_get_ioaddr(adapter, + NETXEN_NIC_REG(reg - 0x200)); + } + + recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); + recv_ctx->context_id = le16_to_cpu(prsp->context_id); + recv_ctx->virt_port = prsp->virt_port; + +out_free_rsp: + pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr); +out_free_rq: + pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr); + return err; +} + +static void +nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter) +{ + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + + if (netxen_issue_cmd(adapter, + adapter->ahw.pci_func, + NXHAL_VERSION, + recv_ctx->context_id, + NX_DESTROY_CTX_RESET, + 0, + NX_CDRP_CMD_DESTROY_RX_CTX)) { + + printk(KERN_WARNING + "%s: Failed to destroy rx ctx in firmware\n", + netxen_nic_driver_name); + } +} + +static int +nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter) +{ + nx_hostrq_tx_ctx_t *prq; + nx_hostrq_cds_ring_t *prq_cds; + nx_cardrsp_tx_ctx_t *prsp; + void *rq_addr, *rsp_addr; + size_t rq_size, rsp_size; + u32 temp; + int err = 0; + u64 offset, phys_addr; + dma_addr_t rq_phys_addr, rsp_phys_addr; + struct nx_host_tx_ring *tx_ring = adapter->tx_ring; + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + + rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t); + rq_addr = pci_alloc_consistent(adapter->pdev, + rq_size, &rq_phys_addr); + if (!rq_addr) + return -ENOMEM; + + rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t); + rsp_addr = pci_alloc_consistent(adapter->pdev, + rsp_size, &rsp_phys_addr); + if (!rsp_addr) { + err = -ENOMEM; + goto out_free_rq; + } + + memset(rq_addr, 0, rq_size); + prq = rq_addr; + + memset(rsp_addr, 0, rsp_size); + prsp = rsp_addr; + + prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr); + + temp = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN | NX_CAP0_LSO); + prq->capabilities[0] = cpu_to_le32(temp); + + prq->host_int_crb_mode = + cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED); + + prq->interrupt_ctl = 0; + prq->msi_index = 0; + + prq->dummy_dma_addr = cpu_to_le64(adapter->dummy_dma.phys_addr); + + offset = recv_ctx->phys_addr + sizeof(struct netxen_ring_ctx); + prq->cmd_cons_dma_addr = cpu_to_le64(offset); + + prq_cds = &prq->cds_ring; + + prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr); + prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc); + + phys_addr = rq_phys_addr; + err = netxen_issue_cmd(adapter, + adapter->ahw.pci_func, + NXHAL_VERSION, + (u32)(phys_addr >> 32), + ((u32)phys_addr & 0xffffffff), + rq_size, + NX_CDRP_CMD_CREATE_TX_CTX); + + if (err == NX_RCODE_SUCCESS) { + temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); + tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter, + NETXEN_NIC_REG(temp - 0x200)); +#if 0 + adapter->tx_state = + le32_to_cpu(prsp->host_ctx_state); +#endif + adapter->tx_context_id = + le16_to_cpu(prsp->context_id); + } else { + printk(KERN_WARNING + "Failed to create tx ctx in firmware%d\n", err); + err = -EIO; + } + + pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr); + +out_free_rq: + pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr); + + return err; +} + +static void +nx_fw_cmd_destroy_tx_ctx(struct netxen_adapter *adapter) +{ + if (netxen_issue_cmd(adapter, + adapter->ahw.pci_func, + NXHAL_VERSION, + adapter->tx_context_id, + NX_DESTROY_CTX_RESET, + 0, + NX_CDRP_CMD_DESTROY_TX_CTX)) { + + printk(KERN_WARNING + "%s: Failed to destroy tx ctx in firmware\n", + netxen_nic_driver_name); + } +} + +int +nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val) +{ + u32 rcode; + + rcode = netxen_issue_cmd(adapter, + adapter->ahw.pci_func, + NXHAL_VERSION, + reg, + 0, + 0, + NX_CDRP_CMD_READ_PHY); + + if (rcode != NX_RCODE_SUCCESS) + return -EIO; + + return NXRD32(adapter, NX_ARG1_CRB_OFFSET); +} + +int +nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val) +{ + u32 rcode; + + rcode = netxen_issue_cmd(adapter, + adapter->ahw.pci_func, + NXHAL_VERSION, + reg, + val, + 0, + NX_CDRP_CMD_WRITE_PHY); + + if (rcode != NX_RCODE_SUCCESS) + return -EIO; + + return 0; +} + +static u64 ctx_addr_sig_regs[][3] = { + {NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)}, + {NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)}, + {NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)}, + {NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)} +}; + +#define CRB_CTX_ADDR_REG_LO(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][0]) +#define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2]) +#define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1]) + +#define lower32(x) ((u32)((x) & 0xffffffff)) +#define upper32(x) ((u32)(((u64)(x) >> 32) & 0xffffffff)) + +static struct netxen_recv_crb recv_crb_registers[] = { + /* Instance 0 */ + { + /* crb_rcv_producer: */ + { + NETXEN_NIC_REG(0x100), + /* Jumbo frames */ + NETXEN_NIC_REG(0x110), + /* LRO */ + NETXEN_NIC_REG(0x120) + }, + /* crb_sts_consumer: */ + { + NETXEN_NIC_REG(0x138), + NETXEN_NIC_REG_2(0x000), + NETXEN_NIC_REG_2(0x004), + NETXEN_NIC_REG_2(0x008), + }, + /* sw_int_mask */ + { + CRB_SW_INT_MASK_0, + NETXEN_NIC_REG_2(0x044), + NETXEN_NIC_REG_2(0x048), + NETXEN_NIC_REG_2(0x04c), + }, + }, + /* Instance 1 */ + { + /* crb_rcv_producer: */ + { + NETXEN_NIC_REG(0x144), + /* Jumbo frames */ + NETXEN_NIC_REG(0x154), + /* LRO */ + NETXEN_NIC_REG(0x164) + }, + /* crb_sts_consumer: */ + { + NETXEN_NIC_REG(0x17c), + NETXEN_NIC_REG_2(0x020), + NETXEN_NIC_REG_2(0x024), + NETXEN_NIC_REG_2(0x028), + }, + /* sw_int_mask */ + { + CRB_SW_INT_MASK_1, + NETXEN_NIC_REG_2(0x064), + NETXEN_NIC_REG_2(0x068), + NETXEN_NIC_REG_2(0x06c), + }, + }, + /* Instance 2 */ + { + /* crb_rcv_producer: */ + { + NETXEN_NIC_REG(0x1d8), + /* Jumbo frames */ + NETXEN_NIC_REG(0x1f8), + /* LRO */ + NETXEN_NIC_REG(0x208) + }, + /* crb_sts_consumer: */ + { + NETXEN_NIC_REG(0x220), + NETXEN_NIC_REG_2(0x03c), + NETXEN_NIC_REG_2(0x03c), + NETXEN_NIC_REG_2(0x03c), + }, + /* sw_int_mask */ + { + CRB_SW_INT_MASK_2, + NETXEN_NIC_REG_2(0x03c), + NETXEN_NIC_REG_2(0x03c), + NETXEN_NIC_REG_2(0x03c), + }, + }, + /* Instance 3 */ + { + /* crb_rcv_producer: */ + { + NETXEN_NIC_REG(0x22c), + /* Jumbo frames */ + NETXEN_NIC_REG(0x23c), + /* LRO */ + NETXEN_NIC_REG(0x24c) + }, + /* crb_sts_consumer: */ + { + NETXEN_NIC_REG(0x264), + NETXEN_NIC_REG_2(0x03c), + NETXEN_NIC_REG_2(0x03c), + NETXEN_NIC_REG_2(0x03c), + }, + /* sw_int_mask */ + { + CRB_SW_INT_MASK_3, + NETXEN_NIC_REG_2(0x03c), + NETXEN_NIC_REG_2(0x03c), + NETXEN_NIC_REG_2(0x03c), + }, + }, +}; + +static int +netxen_init_old_ctx(struct netxen_adapter *adapter) +{ + struct netxen_recv_context *recv_ctx; + struct nx_host_rds_ring *rds_ring; + struct nx_host_sds_ring *sds_ring; + struct nx_host_tx_ring *tx_ring; + int ring; + int port = adapter->portnum; + struct netxen_ring_ctx *hwctx; + u32 signature; + + tx_ring = adapter->tx_ring; + recv_ctx = &adapter->recv_ctx; + hwctx = recv_ctx->hwctx; + + hwctx->cmd_ring_addr = cpu_to_le64(tx_ring->phys_addr); + hwctx->cmd_ring_size = cpu_to_le32(tx_ring->num_desc); + + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + + hwctx->rcv_rings[ring].addr = + cpu_to_le64(rds_ring->phys_addr); + hwctx->rcv_rings[ring].size = + cpu_to_le32(rds_ring->num_desc); + } + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + + if (ring == 0) { + hwctx->sts_ring_addr = cpu_to_le64(sds_ring->phys_addr); + hwctx->sts_ring_size = cpu_to_le32(sds_ring->num_desc); + } + hwctx->sts_rings[ring].addr = cpu_to_le64(sds_ring->phys_addr); + hwctx->sts_rings[ring].size = cpu_to_le32(sds_ring->num_desc); + hwctx->sts_rings[ring].msi_index = cpu_to_le16(ring); + } + hwctx->sts_ring_count = cpu_to_le32(adapter->max_sds_rings); + + signature = (adapter->max_sds_rings > 1) ? + NETXEN_CTX_SIGNATURE_V2 : NETXEN_CTX_SIGNATURE; + + NXWR32(adapter, CRB_CTX_ADDR_REG_LO(port), + lower32(recv_ctx->phys_addr)); + NXWR32(adapter, CRB_CTX_ADDR_REG_HI(port), + upper32(recv_ctx->phys_addr)); + NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port), + signature | port); + return 0; +} + +int netxen_alloc_hw_resources(struct netxen_adapter *adapter) +{ + void *addr; + int err = 0; + int ring; + struct netxen_recv_context *recv_ctx; + struct nx_host_rds_ring *rds_ring; + struct nx_host_sds_ring *sds_ring; + struct nx_host_tx_ring *tx_ring; + + struct pci_dev *pdev = adapter->pdev; + struct net_device *netdev = adapter->netdev; + int port = adapter->portnum; + + recv_ctx = &adapter->recv_ctx; + tx_ring = adapter->tx_ring; + + addr = pci_alloc_consistent(pdev, + sizeof(struct netxen_ring_ctx) + sizeof(uint32_t), + &recv_ctx->phys_addr); + if (addr == NULL) { + dev_err(&pdev->dev, "failed to allocate hw context\n"); + return -ENOMEM; + } + + memset(addr, 0, sizeof(struct netxen_ring_ctx)); + recv_ctx->hwctx = addr; + recv_ctx->hwctx->ctx_id = cpu_to_le32(port); + recv_ctx->hwctx->cmd_consumer_offset = + cpu_to_le64(recv_ctx->phys_addr + + sizeof(struct netxen_ring_ctx)); + tx_ring->hw_consumer = + (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx)); + + /* cmd desc ring */ + addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring), + &tx_ring->phys_addr); + + if (addr == NULL) { + dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n", + netdev->name); + err = -ENOMEM; + goto err_out_free; + } + + tx_ring->desc_head = addr; + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + addr = pci_alloc_consistent(adapter->pdev, + RCV_DESC_RINGSIZE(rds_ring), + &rds_ring->phys_addr); + if (addr == NULL) { + dev_err(&pdev->dev, + "%s: failed to allocate rds ring [%d]\n", + netdev->name, ring); + err = -ENOMEM; + goto err_out_free; + } + rds_ring->desc_head = addr; + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + rds_ring->crb_rcv_producer = + netxen_get_ioaddr(adapter, + recv_crb_registers[port].crb_rcv_producer[ring]); + } + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + + addr = pci_alloc_consistent(adapter->pdev, + STATUS_DESC_RINGSIZE(sds_ring), + &sds_ring->phys_addr); + if (addr == NULL) { + dev_err(&pdev->dev, + "%s: failed to allocate sds ring [%d]\n", + netdev->name, ring); + err = -ENOMEM; + goto err_out_free; + } + sds_ring->desc_head = addr; + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + sds_ring->crb_sts_consumer = + netxen_get_ioaddr(adapter, + recv_crb_registers[port].crb_sts_consumer[ring]); + + sds_ring->crb_intr_mask = + netxen_get_ioaddr(adapter, + recv_crb_registers[port].sw_int_mask[ring]); + } + } + + + if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + if (test_and_set_bit(__NX_FW_ATTACHED, &adapter->state)) + goto done; + err = nx_fw_cmd_create_rx_ctx(adapter); + if (err) + goto err_out_free; + err = nx_fw_cmd_create_tx_ctx(adapter); + if (err) + goto err_out_free; + } else { + err = netxen_init_old_ctx(adapter); + if (err) + goto err_out_free; + } + +done: + return 0; + +err_out_free: + netxen_free_hw_resources(adapter); + return err; +} + +void netxen_free_hw_resources(struct netxen_adapter *adapter) +{ + struct netxen_recv_context *recv_ctx; + struct nx_host_rds_ring *rds_ring; + struct nx_host_sds_ring *sds_ring; + struct nx_host_tx_ring *tx_ring; + int ring; + + int port = adapter->portnum; + + if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + if (!test_and_clear_bit(__NX_FW_ATTACHED, &adapter->state)) + goto done; + + nx_fw_cmd_destroy_rx_ctx(adapter); + nx_fw_cmd_destroy_tx_ctx(adapter); + } else { + netxen_api_lock(adapter); + NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port), + NETXEN_CTX_D3_RESET | port); + netxen_api_unlock(adapter); + } + + /* Allow dma queues to drain after context reset */ + msleep(20); + +done: + recv_ctx = &adapter->recv_ctx; + + if (recv_ctx->hwctx != NULL) { + pci_free_consistent(adapter->pdev, + sizeof(struct netxen_ring_ctx) + + sizeof(uint32_t), + recv_ctx->hwctx, + recv_ctx->phys_addr); + recv_ctx->hwctx = NULL; + } + + tx_ring = adapter->tx_ring; + if (tx_ring->desc_head != NULL) { + pci_free_consistent(adapter->pdev, + TX_DESC_RINGSIZE(tx_ring), + tx_ring->desc_head, tx_ring->phys_addr); + tx_ring->desc_head = NULL; + } + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + + if (rds_ring->desc_head != NULL) { + pci_free_consistent(adapter->pdev, + RCV_DESC_RINGSIZE(rds_ring), + rds_ring->desc_head, + rds_ring->phys_addr); + rds_ring->desc_head = NULL; + } + } + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + + if (sds_ring->desc_head != NULL) { + pci_free_consistent(adapter->pdev, + STATUS_DESC_RINGSIZE(sds_ring), + sds_ring->desc_head, + sds_ring->phys_addr); + sds_ring->desc_head = NULL; + } + } +} + diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c new file mode 100644 index 0000000..b34fb74 --- /dev/null +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c @@ -0,0 +1,835 @@ +/* + * Copyright (C) 2003 - 2009 NetXen, Inc. + * Copyright (C) 2009 - QLogic Corporation. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, + * MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution + * in the file called "COPYING". + * + */ + +#include +#include +#include +#include +#include +#include + +#include "netxen_nic.h" +#include "netxen_nic_hw.h" + +struct netxen_nic_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define NETXEN_NIC_STAT(m) sizeof(((struct netxen_adapter *)0)->m), \ + offsetof(struct netxen_adapter, m) + +#define NETXEN_NIC_PORT_WINDOW 0x10000 +#define NETXEN_NIC_INVALID_DATA 0xDEADBEEF + +static const struct netxen_nic_stats netxen_nic_gstrings_stats[] = { + {"xmit_called", NETXEN_NIC_STAT(stats.xmitcalled)}, + {"xmit_finished", NETXEN_NIC_STAT(stats.xmitfinished)}, + {"rx_dropped", NETXEN_NIC_STAT(stats.rxdropped)}, + {"tx_dropped", NETXEN_NIC_STAT(stats.txdropped)}, + {"csummed", NETXEN_NIC_STAT(stats.csummed)}, + {"rx_pkts", NETXEN_NIC_STAT(stats.rx_pkts)}, + {"lro_pkts", NETXEN_NIC_STAT(stats.lro_pkts)}, + {"rx_bytes", NETXEN_NIC_STAT(stats.rxbytes)}, + {"tx_bytes", NETXEN_NIC_STAT(stats.txbytes)}, +}; + +#define NETXEN_NIC_STATS_LEN ARRAY_SIZE(netxen_nic_gstrings_stats) + +static const char netxen_nic_gstrings_test[][ETH_GSTRING_LEN] = { + "Register_Test_on_offline", + "Link_Test_on_offline" +}; + +#define NETXEN_NIC_TEST_LEN ARRAY_SIZE(netxen_nic_gstrings_test) + +#define NETXEN_NIC_REGS_COUNT 30 +#define NETXEN_NIC_REGS_LEN (NETXEN_NIC_REGS_COUNT * sizeof(__le32)) +#define NETXEN_MAX_EEPROM_LEN 1024 + +static int netxen_nic_get_eeprom_len(struct net_device *dev) +{ + return NETXEN_FLASH_TOTAL_SIZE; +} + +static void +netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + u32 fw_major = 0; + u32 fw_minor = 0; + u32 fw_build = 0; + + strncpy(drvinfo->driver, netxen_nic_driver_name, 32); + strncpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, 32); + fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); + fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR); + fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB); + sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build); + + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + drvinfo->regdump_len = NETXEN_NIC_REGS_LEN; + drvinfo->eedump_len = netxen_nic_get_eeprom_len(dev); +} + +static int +netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + int check_sfp_module = 0; + + /* read which mode */ + if (adapter->ahw.port_type == NETXEN_NIC_GBE) { + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full); + + ecmd->advertising = (ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full); + + ecmd->port = PORT_TP; + + ethtool_cmd_speed_set(ecmd, adapter->link_speed); + ecmd->duplex = adapter->link_duplex; + ecmd->autoneg = adapter->link_autoneg; + + } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { + u32 val; + + val = NXRD32(adapter, NETXEN_PORT_MODE_ADDR); + if (val == NETXEN_PORT_MODE_802_3_AP) { + ecmd->supported = SUPPORTED_1000baseT_Full; + ecmd->advertising = ADVERTISED_1000baseT_Full; + } else { + ecmd->supported = SUPPORTED_10000baseT_Full; + ecmd->advertising = ADVERTISED_10000baseT_Full; + } + + if (netif_running(dev) && adapter->has_link_events) { + ethtool_cmd_speed_set(ecmd, adapter->link_speed); + ecmd->autoneg = adapter->link_autoneg; + ecmd->duplex = adapter->link_duplex; + goto skip; + } + + ecmd->port = PORT_TP; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + u16 pcifn = adapter->ahw.pci_func; + + val = NXRD32(adapter, P3_LINK_SPEED_REG(pcifn)); + ethtool_cmd_speed_set(ecmd, P3_LINK_SPEED_MHZ * + P3_LINK_SPEED_VAL(pcifn, val)); + } else + ethtool_cmd_speed_set(ecmd, SPEED_10000); + + ecmd->duplex = DUPLEX_FULL; + ecmd->autoneg = AUTONEG_DISABLE; + } else + return -EIO; + +skip: + ecmd->phy_address = adapter->physical_port; + ecmd->transceiver = XCVR_EXTERNAL; + + switch (adapter->ahw.board_type) { + case NETXEN_BRDTYPE_P2_SB35_4G: + case NETXEN_BRDTYPE_P2_SB31_2G: + case NETXEN_BRDTYPE_P3_REF_QG: + case NETXEN_BRDTYPE_P3_4_GB: + case NETXEN_BRDTYPE_P3_4_GB_MM: + + ecmd->supported |= SUPPORTED_Autoneg; + ecmd->advertising |= ADVERTISED_Autoneg; + case NETXEN_BRDTYPE_P2_SB31_10G_CX4: + case NETXEN_BRDTYPE_P3_10G_CX4: + case NETXEN_BRDTYPE_P3_10G_CX4_LP: + case NETXEN_BRDTYPE_P3_10000_BASE_T: + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; + ecmd->port = PORT_TP; + ecmd->autoneg = (adapter->ahw.board_type == + NETXEN_BRDTYPE_P2_SB31_10G_CX4) ? + (AUTONEG_DISABLE) : (adapter->link_autoneg); + break; + case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: + case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: + case NETXEN_BRDTYPE_P3_IMEZ: + case NETXEN_BRDTYPE_P3_XG_LOM: + case NETXEN_BRDTYPE_P3_HMEZ: + ecmd->supported |= SUPPORTED_MII; + ecmd->advertising |= ADVERTISED_MII; + ecmd->port = PORT_MII; + ecmd->autoneg = AUTONEG_DISABLE; + break; + case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: + case NETXEN_BRDTYPE_P3_10G_SFP_CT: + case NETXEN_BRDTYPE_P3_10G_SFP_QT: + ecmd->advertising |= ADVERTISED_TP; + ecmd->supported |= SUPPORTED_TP; + check_sfp_module = netif_running(dev) && + adapter->has_link_events; + case NETXEN_BRDTYPE_P2_SB31_10G: + case NETXEN_BRDTYPE_P3_10G_XFP: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_FIBRE; + ecmd->autoneg = AUTONEG_DISABLE; + break; + case NETXEN_BRDTYPE_P3_10G_TP: + if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { + ecmd->autoneg = AUTONEG_DISABLE; + ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP); + ecmd->advertising |= + (ADVERTISED_FIBRE | ADVERTISED_TP); + ecmd->port = PORT_FIBRE; + check_sfp_module = netif_running(dev) && + adapter->has_link_events; + } else { + ecmd->supported |= (SUPPORTED_TP |SUPPORTED_Autoneg); + ecmd->advertising |= + (ADVERTISED_TP | ADVERTISED_Autoneg); + ecmd->port = PORT_TP; + } + break; + default: + printk(KERN_ERR "netxen-nic: Unsupported board model %d\n", + adapter->ahw.board_type); + return -EIO; + } + + if (check_sfp_module) { + switch (adapter->module_type) { + case LINKEVENT_MODULE_OPTICAL_UNKNOWN: + case LINKEVENT_MODULE_OPTICAL_SRLR: + case LINKEVENT_MODULE_OPTICAL_LRM: + case LINKEVENT_MODULE_OPTICAL_SFP_1G: + ecmd->port = PORT_FIBRE; + break; + case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE: + case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN: + case LINKEVENT_MODULE_TWINAX: + ecmd->port = PORT_TP; + break; + default: + ecmd->port = -1; + } + } + + return 0; +} + +static int +netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + u32 speed = ethtool_cmd_speed(ecmd); + int ret; + + if (adapter->ahw.port_type != NETXEN_NIC_GBE) + return -EOPNOTSUPP; + + if (!(adapter->capabilities & NX_FW_CAPABILITY_GBE_LINK_CFG)) + return -EOPNOTSUPP; + + ret = nx_fw_cmd_set_gbe_port(adapter, speed, ecmd->duplex, + ecmd->autoneg); + if (ret == NX_RCODE_NOT_SUPPORTED) + return -EOPNOTSUPP; + else if (ret) + return -EIO; + + adapter->link_speed = speed; + adapter->link_duplex = ecmd->duplex; + adapter->link_autoneg = ecmd->autoneg; + + if (!netif_running(dev)) + return 0; + + dev->netdev_ops->ndo_stop(dev); + return dev->netdev_ops->ndo_open(dev); +} + +static int netxen_nic_get_regs_len(struct net_device *dev) +{ + return NETXEN_NIC_REGS_LEN; +} + +static void +netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + struct nx_host_sds_ring *sds_ring; + u32 *regs_buff = p; + int ring, i = 0; + int port = adapter->physical_port; + + memset(p, 0, NETXEN_NIC_REGS_LEN); + + regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) | + (adapter->pdev)->device; + + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + return; + + regs_buff[i++] = NXRD32(adapter, CRB_CMDPEG_STATE); + regs_buff[i++] = NXRD32(adapter, CRB_RCVPEG_STATE); + regs_buff[i++] = NXRD32(adapter, CRB_FW_CAPABILITIES_1); + regs_buff[i++] = NXRDIO(adapter, adapter->crb_int_state_reg); + regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); + regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_STATE); + regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); + regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); + regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS2); + + regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_0+0x3c); + regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_1+0x3c); + regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_2+0x3c); + regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_3+0x3c); + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + + regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_4+0x3c); + i += 2; + + regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE_P3); + regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer)); + + } else { + i++; + + regs_buff[i++] = NXRD32(adapter, + NETXEN_NIU_XGE_CONFIG_0+(0x10000*port)); + regs_buff[i++] = NXRD32(adapter, + NETXEN_NIU_XGE_CONFIG_1+(0x10000*port)); + + regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE); + regs_buff[i++] = NXRDIO(adapter, + adapter->tx_ring->crb_cmd_consumer); + } + + regs_buff[i++] = NXRDIO(adapter, adapter->tx_ring->crb_cmd_producer); + + regs_buff[i++] = NXRDIO(adapter, + recv_ctx->rds_rings[0].crb_rcv_producer); + regs_buff[i++] = NXRDIO(adapter, + recv_ctx->rds_rings[1].crb_rcv_producer); + + regs_buff[i++] = adapter->max_sds_rings; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &(recv_ctx->sds_rings[ring]); + regs_buff[i++] = NXRDIO(adapter, + sds_ring->crb_sts_consumer); + } +} + +static u32 netxen_nic_test_link(struct net_device *dev) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + u32 val, port; + + port = adapter->physical_port; + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + val = NXRD32(adapter, CRB_XG_STATE_P3); + val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val); + return (val == XG_LINK_UP_P3) ? 0 : 1; + } else { + val = NXRD32(adapter, CRB_XG_STATE); + val = (val >> port*8) & 0xff; + return (val == XG_LINK_UP) ? 0 : 1; + } +} + +static int +netxen_nic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 * bytes) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + int offset; + int ret; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = (adapter->pdev)->vendor | + ((adapter->pdev)->device << 16); + offset = eeprom->offset; + + ret = netxen_rom_fast_read_words(adapter, offset, bytes, + eeprom->len); + if (ret < 0) + return ret; + + return 0; +} + +static void +netxen_nic_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + + ring->rx_pending = adapter->num_rxd; + ring->rx_jumbo_pending = adapter->num_jumbo_rxd; + ring->rx_jumbo_pending += adapter->num_lro_rxd; + ring->tx_pending = adapter->num_txd; + + if (adapter->ahw.port_type == NETXEN_NIC_GBE) { + ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G; + ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G; + } else { + ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G; + ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_10G; + } + + ring->tx_max_pending = MAX_CMD_DESCRIPTORS; + + ring->rx_mini_max_pending = 0; + ring->rx_mini_pending = 0; +} + +static u32 +netxen_validate_ringparam(u32 val, u32 min, u32 max, char *r_name) +{ + u32 num_desc; + num_desc = max(val, min); + num_desc = min(num_desc, max); + num_desc = roundup_pow_of_two(num_desc); + + if (val != num_desc) { + printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n", + netxen_nic_driver_name, r_name, num_desc, val); + } + + return num_desc; +} + +static int +netxen_nic_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G; + u16 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G; + u16 num_rxd, num_jumbo_rxd, num_txd; + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return -EOPNOTSUPP; + + if (ring->rx_mini_pending) + return -EOPNOTSUPP; + + if (adapter->ahw.port_type == NETXEN_NIC_GBE) { + max_rcv_desc = MAX_RCV_DESCRIPTORS_1G; + max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G; + } + + num_rxd = netxen_validate_ringparam(ring->rx_pending, + MIN_RCV_DESCRIPTORS, max_rcv_desc, "rx"); + + num_jumbo_rxd = netxen_validate_ringparam(ring->rx_jumbo_pending, + MIN_JUMBO_DESCRIPTORS, max_jumbo_desc, "rx jumbo"); + + num_txd = netxen_validate_ringparam(ring->tx_pending, + MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx"); + + if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd && + num_jumbo_rxd == adapter->num_jumbo_rxd) + return 0; + + adapter->num_rxd = num_rxd; + adapter->num_jumbo_rxd = num_jumbo_rxd; + adapter->num_txd = num_txd; + + return netxen_nic_reset_context(adapter); +} + +static void +netxen_nic_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + __u32 val; + int port = adapter->physical_port; + + if (adapter->ahw.port_type == NETXEN_NIC_GBE) { + if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS)) + return; + /* get flow control settings */ + val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port)); + pause->rx_pause = netxen_gb_get_rx_flowctl(val); + val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL); + switch (port) { + case 0: + pause->tx_pause = !(netxen_gb_get_gb0_mask(val)); + break; + case 1: + pause->tx_pause = !(netxen_gb_get_gb1_mask(val)); + break; + case 2: + pause->tx_pause = !(netxen_gb_get_gb2_mask(val)); + break; + case 3: + default: + pause->tx_pause = !(netxen_gb_get_gb3_mask(val)); + break; + } + } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { + if ((port < 0) || (port > NETXEN_NIU_MAX_XG_PORTS)) + return; + pause->rx_pause = 1; + val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL); + if (port == 0) + pause->tx_pause = !(netxen_xg_get_xg0_mask(val)); + else + pause->tx_pause = !(netxen_xg_get_xg1_mask(val)); + } else { + printk(KERN_ERR"%s: Unknown board type: %x\n", + netxen_nic_driver_name, adapter->ahw.port_type); + } +} + +static int +netxen_nic_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + __u32 val; + int port = adapter->physical_port; + /* read mode */ + if (adapter->ahw.port_type == NETXEN_NIC_GBE) { + if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS)) + return -EIO; + /* set flow control */ + val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port)); + + if (pause->rx_pause) + netxen_gb_rx_flowctl(val); + else + netxen_gb_unset_rx_flowctl(val); + + NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), + val); + /* set autoneg */ + val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL); + switch (port) { + case 0: + if (pause->tx_pause) + netxen_gb_unset_gb0_mask(val); + else + netxen_gb_set_gb0_mask(val); + break; + case 1: + if (pause->tx_pause) + netxen_gb_unset_gb1_mask(val); + else + netxen_gb_set_gb1_mask(val); + break; + case 2: + if (pause->tx_pause) + netxen_gb_unset_gb2_mask(val); + else + netxen_gb_set_gb2_mask(val); + break; + case 3: + default: + if (pause->tx_pause) + netxen_gb_unset_gb3_mask(val); + else + netxen_gb_set_gb3_mask(val); + break; + } + NXWR32(adapter, NETXEN_NIU_GB_PAUSE_CTL, val); + } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { + if ((port < 0) || (port > NETXEN_NIU_MAX_XG_PORTS)) + return -EIO; + val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL); + if (port == 0) { + if (pause->tx_pause) + netxen_xg_unset_xg0_mask(val); + else + netxen_xg_set_xg0_mask(val); + } else { + if (pause->tx_pause) + netxen_xg_unset_xg1_mask(val); + else + netxen_xg_set_xg1_mask(val); + } + NXWR32(adapter, NETXEN_NIU_XG_PAUSE_CTL, val); + } else { + printk(KERN_ERR "%s: Unknown board type: %x\n", + netxen_nic_driver_name, + adapter->ahw.port_type); + } + return 0; +} + +static int netxen_nic_reg_test(struct net_device *dev) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + u32 data_read, data_written; + + data_read = NXRD32(adapter, NETXEN_PCIX_PH_REG(0)); + if ((data_read & 0xffff) != adapter->pdev->vendor) + return 1; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + return 0; + + data_written = (u32)0xa5a5a5a5; + + NXWR32(adapter, CRB_SCRATCHPAD_TEST, data_written); + data_read = NXRD32(adapter, CRB_SCRATCHPAD_TEST); + if (data_written != data_read) + return 1; + + return 0; +} + +static int netxen_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return NETXEN_NIC_TEST_LEN; + case ETH_SS_STATS: + return NETXEN_NIC_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void +netxen_nic_diag_test(struct net_device *dev, struct ethtool_test *eth_test, + u64 * data) +{ + memset(data, 0, sizeof(uint64_t) * NETXEN_NIC_TEST_LEN); + if ((data[0] = netxen_nic_reg_test(dev))) + eth_test->flags |= ETH_TEST_FL_FAILED; + /* link test */ + if ((data[1] = (u64) netxen_nic_test_link(dev))) + eth_test->flags |= ETH_TEST_FL_FAILED; +} + +static void +netxen_nic_get_strings(struct net_device *dev, u32 stringset, u8 * data) +{ + int index; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *netxen_nic_gstrings_test, + NETXEN_NIC_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (index = 0; index < NETXEN_NIC_STATS_LEN; index++) { + memcpy(data + index * ETH_GSTRING_LEN, + netxen_nic_gstrings_stats[index].stat_string, + ETH_GSTRING_LEN); + } + break; + } +} + +static void +netxen_nic_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 * data) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + int index; + + for (index = 0; index < NETXEN_NIC_STATS_LEN; index++) { + char *p = + (char *)adapter + + netxen_nic_gstrings_stats[index].stat_offset; + data[index] = + (netxen_nic_gstrings_stats[index].sizeof_stat == + sizeof(u64)) ? *(u64 *) p : *(u32 *) p; + } +} + +static void +netxen_nic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + u32 wol_cfg = 0; + + wol->supported = 0; + wol->wolopts = 0; + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return; + + wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV); + if (wol_cfg & (1UL << adapter->portnum)) + wol->supported |= WAKE_MAGIC; + + wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG); + if (wol_cfg & (1UL << adapter->portnum)) + wol->wolopts |= WAKE_MAGIC; +} + +static int +netxen_nic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + u32 wol_cfg = 0; + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return -EOPNOTSUPP; + + if (wol->wolopts & ~WAKE_MAGIC) + return -EOPNOTSUPP; + + wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV); + if (!(wol_cfg & (1 << adapter->portnum))) + return -EOPNOTSUPP; + + wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG); + if (wol->wolopts & WAKE_MAGIC) + wol_cfg |= 1UL << adapter->portnum; + else + wol_cfg &= ~(1UL << adapter->portnum); + NXWR32(adapter, NETXEN_WOL_CONFIG, wol_cfg); + + return 0; +} + +/* + * Set the coalescing parameters. Currently only normal is supported. + * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the + * firmware coalescing to default. + */ +static int netxen_set_intr_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ethcoal) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + + if (!NX_IS_REVISION_P3(adapter->ahw.revision_id)) + return -EINVAL; + + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + return -EINVAL; + + /* + * Return Error if unsupported values or + * unsupported parameters are set. + */ + if (ethcoal->rx_coalesce_usecs > 0xffff || + ethcoal->rx_max_coalesced_frames > 0xffff || + ethcoal->tx_coalesce_usecs > 0xffff || + ethcoal->tx_max_coalesced_frames > 0xffff || + ethcoal->rx_coalesce_usecs_irq || + ethcoal->rx_max_coalesced_frames_irq || + ethcoal->tx_coalesce_usecs_irq || + ethcoal->tx_max_coalesced_frames_irq || + ethcoal->stats_block_coalesce_usecs || + ethcoal->use_adaptive_rx_coalesce || + ethcoal->use_adaptive_tx_coalesce || + ethcoal->pkt_rate_low || + ethcoal->rx_coalesce_usecs_low || + ethcoal->rx_max_coalesced_frames_low || + ethcoal->tx_coalesce_usecs_low || + ethcoal->tx_max_coalesced_frames_low || + ethcoal->pkt_rate_high || + ethcoal->rx_coalesce_usecs_high || + ethcoal->rx_max_coalesced_frames_high || + ethcoal->tx_coalesce_usecs_high || + ethcoal->tx_max_coalesced_frames_high) + return -EINVAL; + + if (!ethcoal->rx_coalesce_usecs || + !ethcoal->rx_max_coalesced_frames) { + adapter->coal.flags = NETXEN_NIC_INTR_DEFAULT; + adapter->coal.normal.data.rx_time_us = + NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US; + adapter->coal.normal.data.rx_packets = + NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS; + } else { + adapter->coal.flags = 0; + adapter->coal.normal.data.rx_time_us = + ethcoal->rx_coalesce_usecs; + adapter->coal.normal.data.rx_packets = + ethcoal->rx_max_coalesced_frames; + } + adapter->coal.normal.data.tx_time_us = ethcoal->tx_coalesce_usecs; + adapter->coal.normal.data.tx_packets = + ethcoal->tx_max_coalesced_frames; + + netxen_config_intr_coalesce(adapter); + + return 0; +} + +static int netxen_get_intr_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ethcoal) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + + if (!NX_IS_REVISION_P3(adapter->ahw.revision_id)) + return -EINVAL; + + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + return -EINVAL; + + ethcoal->rx_coalesce_usecs = adapter->coal.normal.data.rx_time_us; + ethcoal->tx_coalesce_usecs = adapter->coal.normal.data.tx_time_us; + ethcoal->rx_max_coalesced_frames = + adapter->coal.normal.data.rx_packets; + ethcoal->tx_max_coalesced_frames = + adapter->coal.normal.data.tx_packets; + + return 0; +} + +const struct ethtool_ops netxen_nic_ethtool_ops = { + .get_settings = netxen_nic_get_settings, + .set_settings = netxen_nic_set_settings, + .get_drvinfo = netxen_nic_get_drvinfo, + .get_regs_len = netxen_nic_get_regs_len, + .get_regs = netxen_nic_get_regs, + .get_link = ethtool_op_get_link, + .get_eeprom_len = netxen_nic_get_eeprom_len, + .get_eeprom = netxen_nic_get_eeprom, + .get_ringparam = netxen_nic_get_ringparam, + .set_ringparam = netxen_nic_set_ringparam, + .get_pauseparam = netxen_nic_get_pauseparam, + .set_pauseparam = netxen_nic_set_pauseparam, + .get_wol = netxen_nic_get_wol, + .set_wol = netxen_nic_set_wol, + .self_test = netxen_nic_diag_test, + .get_strings = netxen_nic_get_strings, + .get_ethtool_stats = netxen_nic_get_ethtool_stats, + .get_sset_count = netxen_get_sset_count, + .get_coalesce = netxen_get_intr_coalesce, + .set_coalesce = netxen_set_intr_coalesce, +}; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h new file mode 100644 index 0000000..dc1967c --- /dev/null +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h @@ -0,0 +1,1050 @@ +/* + * Copyright (C) 2003 - 2009 NetXen, Inc. + * Copyright (C) 2009 - QLogic Corporation. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, + * MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution + * in the file called "COPYING". + * + */ + +#ifndef __NETXEN_NIC_HDR_H_ +#define __NETXEN_NIC_HDR_H_ + +#include +#include + +/* + * The basic unit of access when reading/writing control registers. + */ + +typedef __le32 netxen_crbword_t; /* single word in CRB space */ + +enum { + NETXEN_HW_H0_CH_HUB_ADR = 0x05, + NETXEN_HW_H1_CH_HUB_ADR = 0x0E, + NETXEN_HW_H2_CH_HUB_ADR = 0x03, + NETXEN_HW_H3_CH_HUB_ADR = 0x01, + NETXEN_HW_H4_CH_HUB_ADR = 0x06, + NETXEN_HW_H5_CH_HUB_ADR = 0x07, + NETXEN_HW_H6_CH_HUB_ADR = 0x08 +}; + +/* Hub 0 */ +enum { + NETXEN_HW_MN_CRB_AGT_ADR = 0x15, + NETXEN_HW_MS_CRB_AGT_ADR = 0x25 +}; + +/* Hub 1 */ +enum { + NETXEN_HW_PS_CRB_AGT_ADR = 0x73, + NETXEN_HW_SS_CRB_AGT_ADR = 0x20, + NETXEN_HW_RPMX3_CRB_AGT_ADR = 0x0b, + NETXEN_HW_QMS_CRB_AGT_ADR = 0x00, + NETXEN_HW_SQGS0_CRB_AGT_ADR = 0x01, + NETXEN_HW_SQGS1_CRB_AGT_ADR = 0x02, + NETXEN_HW_SQGS2_CRB_AGT_ADR = 0x03, + NETXEN_HW_SQGS3_CRB_AGT_ADR = 0x04, + NETXEN_HW_C2C0_CRB_AGT_ADR = 0x58, + NETXEN_HW_C2C1_CRB_AGT_ADR = 0x59, + NETXEN_HW_C2C2_CRB_AGT_ADR = 0x5a, + NETXEN_HW_RPMX2_CRB_AGT_ADR = 0x0a, + NETXEN_HW_RPMX4_CRB_AGT_ADR = 0x0c, + NETXEN_HW_RPMX7_CRB_AGT_ADR = 0x0f, + NETXEN_HW_RPMX9_CRB_AGT_ADR = 0x12, + NETXEN_HW_SMB_CRB_AGT_ADR = 0x18 +}; + +/* Hub 2 */ +enum { + NETXEN_HW_NIU_CRB_AGT_ADR = 0x31, + NETXEN_HW_I2C0_CRB_AGT_ADR = 0x19, + NETXEN_HW_I2C1_CRB_AGT_ADR = 0x29, + + NETXEN_HW_SN_CRB_AGT_ADR = 0x10, + NETXEN_HW_I2Q_CRB_AGT_ADR = 0x20, + NETXEN_HW_LPC_CRB_AGT_ADR = 0x22, + NETXEN_HW_ROMUSB_CRB_AGT_ADR = 0x21, + NETXEN_HW_QM_CRB_AGT_ADR = 0x66, + NETXEN_HW_SQG0_CRB_AGT_ADR = 0x60, + NETXEN_HW_SQG1_CRB_AGT_ADR = 0x61, + NETXEN_HW_SQG2_CRB_AGT_ADR = 0x62, + NETXEN_HW_SQG3_CRB_AGT_ADR = 0x63, + NETXEN_HW_RPMX1_CRB_AGT_ADR = 0x09, + NETXEN_HW_RPMX5_CRB_AGT_ADR = 0x0d, + NETXEN_HW_RPMX6_CRB_AGT_ADR = 0x0e, + NETXEN_HW_RPMX8_CRB_AGT_ADR = 0x11 +}; + +/* Hub 3 */ +enum { + NETXEN_HW_PH_CRB_AGT_ADR = 0x1A, + NETXEN_HW_SRE_CRB_AGT_ADR = 0x50, + NETXEN_HW_EG_CRB_AGT_ADR = 0x51, + NETXEN_HW_RPMX0_CRB_AGT_ADR = 0x08 +}; + +/* Hub 4 */ +enum { + NETXEN_HW_PEGN0_CRB_AGT_ADR = 0x40, + NETXEN_HW_PEGN1_CRB_AGT_ADR, + NETXEN_HW_PEGN2_CRB_AGT_ADR, + NETXEN_HW_PEGN3_CRB_AGT_ADR, + NETXEN_HW_PEGNI_CRB_AGT_ADR, + NETXEN_HW_PEGND_CRB_AGT_ADR, + NETXEN_HW_PEGNC_CRB_AGT_ADR, + NETXEN_HW_PEGR0_CRB_AGT_ADR, + NETXEN_HW_PEGR1_CRB_AGT_ADR, + NETXEN_HW_PEGR2_CRB_AGT_ADR, + NETXEN_HW_PEGR3_CRB_AGT_ADR, + NETXEN_HW_PEGN4_CRB_AGT_ADR +}; + +/* Hub 5 */ +enum { + NETXEN_HW_PEGS0_CRB_AGT_ADR = 0x40, + NETXEN_HW_PEGS1_CRB_AGT_ADR, + NETXEN_HW_PEGS2_CRB_AGT_ADR, + NETXEN_HW_PEGS3_CRB_AGT_ADR, + NETXEN_HW_PEGSI_CRB_AGT_ADR, + NETXEN_HW_PEGSD_CRB_AGT_ADR, + NETXEN_HW_PEGSC_CRB_AGT_ADR +}; + +/* Hub 6 */ +enum { + NETXEN_HW_CAS0_CRB_AGT_ADR = 0x46, + NETXEN_HW_CAS1_CRB_AGT_ADR = 0x47, + NETXEN_HW_CAS2_CRB_AGT_ADR = 0x48, + NETXEN_HW_CAS3_CRB_AGT_ADR = 0x49, + NETXEN_HW_NCM_CRB_AGT_ADR = 0x16, + NETXEN_HW_TMR_CRB_AGT_ADR = 0x17, + NETXEN_HW_XDMA_CRB_AGT_ADR = 0x05, + NETXEN_HW_OCM0_CRB_AGT_ADR = 0x06, + NETXEN_HW_OCM1_CRB_AGT_ADR = 0x07 +}; + +/* Floaters - non existent modules */ +#define NETXEN_HW_EFC_RPMX0_CRB_AGT_ADR 0x67 + +/* This field defines PCI/X adr [25:20] of agents on the CRB */ +enum { + NETXEN_HW_PX_MAP_CRB_PH = 0, + NETXEN_HW_PX_MAP_CRB_PS, + NETXEN_HW_PX_MAP_CRB_MN, + NETXEN_HW_PX_MAP_CRB_MS, + NETXEN_HW_PX_MAP_CRB_PGR1, + NETXEN_HW_PX_MAP_CRB_SRE, + NETXEN_HW_PX_MAP_CRB_NIU, + NETXEN_HW_PX_MAP_CRB_QMN, + NETXEN_HW_PX_MAP_CRB_SQN0, + NETXEN_HW_PX_MAP_CRB_SQN1, + NETXEN_HW_PX_MAP_CRB_SQN2, + NETXEN_HW_PX_MAP_CRB_SQN3, + NETXEN_HW_PX_MAP_CRB_QMS, + NETXEN_HW_PX_MAP_CRB_SQS0, + NETXEN_HW_PX_MAP_CRB_SQS1, + NETXEN_HW_PX_MAP_CRB_SQS2, + NETXEN_HW_PX_MAP_CRB_SQS3, + NETXEN_HW_PX_MAP_CRB_PGN0, + NETXEN_HW_PX_MAP_CRB_PGN1, + NETXEN_HW_PX_MAP_CRB_PGN2, + NETXEN_HW_PX_MAP_CRB_PGN3, + NETXEN_HW_PX_MAP_CRB_PGND, + NETXEN_HW_PX_MAP_CRB_PGNI, + NETXEN_HW_PX_MAP_CRB_PGS0, + NETXEN_HW_PX_MAP_CRB_PGS1, + NETXEN_HW_PX_MAP_CRB_PGS2, + NETXEN_HW_PX_MAP_CRB_PGS3, + NETXEN_HW_PX_MAP_CRB_PGSD, + NETXEN_HW_PX_MAP_CRB_PGSI, + NETXEN_HW_PX_MAP_CRB_SN, + NETXEN_HW_PX_MAP_CRB_PGR2, + NETXEN_HW_PX_MAP_CRB_EG, + NETXEN_HW_PX_MAP_CRB_PH2, + NETXEN_HW_PX_MAP_CRB_PS2, + NETXEN_HW_PX_MAP_CRB_CAM, + NETXEN_HW_PX_MAP_CRB_CAS0, + NETXEN_HW_PX_MAP_CRB_CAS1, + NETXEN_HW_PX_MAP_CRB_CAS2, + NETXEN_HW_PX_MAP_CRB_C2C0, + NETXEN_HW_PX_MAP_CRB_C2C1, + NETXEN_HW_PX_MAP_CRB_TIMR, + NETXEN_HW_PX_MAP_CRB_PGR3, + NETXEN_HW_PX_MAP_CRB_RPMX1, + NETXEN_HW_PX_MAP_CRB_RPMX2, + NETXEN_HW_PX_MAP_CRB_RPMX3, + NETXEN_HW_PX_MAP_CRB_RPMX4, + NETXEN_HW_PX_MAP_CRB_RPMX5, + NETXEN_HW_PX_MAP_CRB_RPMX6, + NETXEN_HW_PX_MAP_CRB_RPMX7, + NETXEN_HW_PX_MAP_CRB_XDMA, + NETXEN_HW_PX_MAP_CRB_I2Q, + NETXEN_HW_PX_MAP_CRB_ROMUSB, + NETXEN_HW_PX_MAP_CRB_CAS3, + NETXEN_HW_PX_MAP_CRB_RPMX0, + NETXEN_HW_PX_MAP_CRB_RPMX8, + NETXEN_HW_PX_MAP_CRB_RPMX9, + NETXEN_HW_PX_MAP_CRB_OCM0, + NETXEN_HW_PX_MAP_CRB_OCM1, + NETXEN_HW_PX_MAP_CRB_SMB, + NETXEN_HW_PX_MAP_CRB_I2C0, + NETXEN_HW_PX_MAP_CRB_I2C1, + NETXEN_HW_PX_MAP_CRB_LPC, + NETXEN_HW_PX_MAP_CRB_PGNC, + NETXEN_HW_PX_MAP_CRB_PGR0 +}; + +/* This field defines CRB adr [31:20] of the agents */ + +#define NETXEN_HW_CRB_HUB_AGT_ADR_MN \ + ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_MN_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PH \ + ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_PH_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_MS \ + ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_MS_CRB_AGT_ADR) + +#define NETXEN_HW_CRB_HUB_AGT_ADR_PS \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_PS_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SS \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SS_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX3 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX3_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_QMS \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_QMS_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS0 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS0_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS1 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS1_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS2 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS2_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS3 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS3_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_C2C0 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_C2C0_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_C2C1 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_C2C1_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX2 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX2_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX4 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX4_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX7 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX7_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX9 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX9_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SMB \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SMB_CRB_AGT_ADR) + +#define NETXEN_HW_CRB_HUB_AGT_ADR_NIU \ + ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_NIU_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_I2C0 \ + ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_I2C0_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_I2C1 \ + ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_I2C1_CRB_AGT_ADR) + +#define NETXEN_HW_CRB_HUB_AGT_ADR_SRE \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SRE_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_EG \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_EG_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX0 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX0_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_QMN \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_QM_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN0 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG0_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN1 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG1_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN2 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG2_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN3 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG3_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX1 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX1_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX5 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX5_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX6 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX6_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX8 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX8_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS0 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS0_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS1 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS1_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS2 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS2_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS3 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS3_CRB_AGT_ADR) + +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGNI \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGNI_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGND \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGND_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN0 \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN0_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN1 \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN1_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN2 \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN2_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN3 \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN3_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN4 \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN4_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGNC \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGNC_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR0 \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR0_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR1 \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR1_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR2 \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR2_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR3 \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR3_CRB_AGT_ADR) + +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSI \ + ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSI_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSD \ + ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSD_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS0 \ + ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS0_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS1 \ + ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS1_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS2 \ + ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS2_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS3 \ + ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS3_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSC \ + ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSC_CRB_AGT_ADR) + +#define NETXEN_HW_CRB_HUB_AGT_ADR_CAM \ + ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_NCM_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_TIMR \ + ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_TMR_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_XDMA \ + ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_XDMA_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SN \ + ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_SN_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_I2Q \ + ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_I2Q_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB \ + ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_ROMUSB_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_OCM0 \ + ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_OCM0_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_OCM1 \ + ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_OCM1_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_LPC \ + ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_LPC_CRB_AGT_ADR) + +#define NETXEN_SRE_MISC (NETXEN_CRB_SRE + 0x0002c) +#define NETXEN_SRE_INT_STATUS (NETXEN_CRB_SRE + 0x00034) +#define NETXEN_SRE_PBI_ACTIVE_STATUS (NETXEN_CRB_SRE + 0x01014) +#define NETXEN_SRE_L1RE_CTL (NETXEN_CRB_SRE + 0x03000) +#define NETXEN_SRE_L2RE_CTL (NETXEN_CRB_SRE + 0x05000) +#define NETXEN_SRE_BUF_CTL (NETXEN_CRB_SRE + 0x01000) + +#define NETXEN_DMA_BASE(U) (NETXEN_CRB_PCIX_MD + 0x20000 + ((U)<<16)) +#define NETXEN_DMA_COMMAND(U) (NETXEN_DMA_BASE(U) + 0x00008) + +#define NETXEN_I2Q_CLR_PCI_HI (NETXEN_CRB_I2Q + 0x00034) + +#define PEG_NETWORK_BASE(N) (NETXEN_CRB_PEG_NET_0 + (((N)&3) << 20)) +#define CRB_REG_EX_PC 0x3c + +#define ROMUSB_GLB (NETXEN_CRB_ROMUSB + 0x00000) +#define ROMUSB_ROM (NETXEN_CRB_ROMUSB + 0x10000) + +#define NETXEN_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004) +#define NETXEN_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008) +#define NETXEN_ROMUSB_GLB_PAD_GPIO_I (ROMUSB_GLB + 0x000c) +#define NETXEN_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038) +#define NETXEN_ROMUSB_GLB_TEST_MUX_SEL (ROMUSB_GLB + 0x0044) +#define NETXEN_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c) +#define NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL (ROMUSB_GLB + 0x00A8) + +#define NETXEN_ROMUSB_GPIO(n) (ROMUSB_GLB + 0x60 + (4 * (n))) + +#define NETXEN_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004) +#define NETXEN_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008) +#define NETXEN_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c) +#define NETXEN_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010) +#define NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014) +#define NETXEN_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018) + +/* Lock IDs for ROM lock */ +#define ROM_LOCK_DRIVER 0x0d417340 + +/****************************************************************************** +* +* Definitions specific to M25P flash +* +******************************************************************************* +* Instructions +*/ +#define M25P_INSTR_WREN 0x06 +#define M25P_INSTR_WRDI 0x04 +#define M25P_INSTR_RDID 0x9f +#define M25P_INSTR_RDSR 0x05 +#define M25P_INSTR_WRSR 0x01 +#define M25P_INSTR_READ 0x03 +#define M25P_INSTR_FAST_READ 0x0b +#define M25P_INSTR_PP 0x02 +#define M25P_INSTR_SE 0xd8 +#define M25P_INSTR_BE 0xc7 +#define M25P_INSTR_DP 0xb9 +#define M25P_INSTR_RES 0xab + +/* all are 1MB windows */ + +#define NETXEN_PCI_CRB_WINDOWSIZE 0x00100000 +#define NETXEN_PCI_CRB_WINDOW(A) \ + (NETXEN_PCI_CRBSPACE + (A)*NETXEN_PCI_CRB_WINDOWSIZE) + +#define NETXEN_CRB_NIU NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_NIU) +#define NETXEN_CRB_SRE NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SRE) +#define NETXEN_CRB_ROMUSB \ + NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_ROMUSB) +#define NETXEN_CRB_I2Q NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2Q) +#define NETXEN_CRB_I2C0 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2C0) +#define NETXEN_CRB_SMB NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SMB) +#define NETXEN_CRB_MAX NETXEN_PCI_CRB_WINDOW(64) + +#define NETXEN_CRB_PCIX_HOST NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PH) +#define NETXEN_CRB_PCIX_HOST2 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PH2) +#define NETXEN_CRB_PEG_NET_0 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN0) +#define NETXEN_CRB_PEG_NET_1 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN1) +#define NETXEN_CRB_PEG_NET_2 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN2) +#define NETXEN_CRB_PEG_NET_3 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN3) +#define NETXEN_CRB_PEG_NET_4 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SQS2) +#define NETXEN_CRB_PEG_NET_D NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGND) +#define NETXEN_CRB_PEG_NET_I NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGNI) +#define NETXEN_CRB_DDR_NET NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_MN) +#define NETXEN_CRB_QDR_NET NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SN) + +#define NETXEN_CRB_PCIX_MD NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PS) +#define NETXEN_CRB_PCIE NETXEN_CRB_PCIX_MD + +#define ISR_INT_VECTOR (NETXEN_PCIX_PS_REG(PCIX_INT_VECTOR)) +#define ISR_INT_MASK (NETXEN_PCIX_PS_REG(PCIX_INT_MASK)) +#define ISR_INT_MASK_SLOW (NETXEN_PCIX_PS_REG(PCIX_INT_MASK)) +#define ISR_INT_TARGET_STATUS (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS)) +#define ISR_INT_TARGET_MASK (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK)) +#define ISR_INT_TARGET_STATUS_F1 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F1)) +#define ISR_INT_TARGET_MASK_F1 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F1)) +#define ISR_INT_TARGET_STATUS_F2 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F2)) +#define ISR_INT_TARGET_MASK_F2 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F2)) +#define ISR_INT_TARGET_STATUS_F3 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F3)) +#define ISR_INT_TARGET_MASK_F3 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F3)) +#define ISR_INT_TARGET_STATUS_F4 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F4)) +#define ISR_INT_TARGET_MASK_F4 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F4)) +#define ISR_INT_TARGET_STATUS_F5 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F5)) +#define ISR_INT_TARGET_MASK_F5 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F5)) +#define ISR_INT_TARGET_STATUS_F6 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F6)) +#define ISR_INT_TARGET_MASK_F6 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F6)) +#define ISR_INT_TARGET_STATUS_F7 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F7)) +#define ISR_INT_TARGET_MASK_F7 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F7)) + +#define NETXEN_PCI_MAPSIZE 128 +#define NETXEN_PCI_DDR_NET (0x00000000UL) +#define NETXEN_PCI_QDR_NET (0x04000000UL) +#define NETXEN_PCI_DIRECT_CRB (0x04400000UL) +#define NETXEN_PCI_CAMQM (0x04800000UL) +#define NETXEN_PCI_CAMQM_MAX (0x04ffffffUL) +#define NETXEN_PCI_OCM0 (0x05000000UL) +#define NETXEN_PCI_OCM0_MAX (0x050fffffUL) +#define NETXEN_PCI_OCM1 (0x05100000UL) +#define NETXEN_PCI_OCM1_MAX (0x051fffffUL) +#define NETXEN_PCI_CRBSPACE (0x06000000UL) +#define NETXEN_PCI_128MB_SIZE (0x08000000UL) +#define NETXEN_PCI_32MB_SIZE (0x02000000UL) +#define NETXEN_PCI_2MB_SIZE (0x00200000UL) + +#define NETXEN_PCI_MN_2M (0) +#define NETXEN_PCI_MS_2M (0x80000) +#define NETXEN_PCI_OCM0_2M (0x000c0000UL) +#define NETXEN_PCI_CAMQM_2M_BASE (0x000ff800UL) +#define NETXEN_PCI_CAMQM_2M_END (0x04800800UL) + +#define NETXEN_CRB_CAM NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_CAM) + +#define NETXEN_ADDR_DDR_NET (0x0000000000000000ULL) +#define NETXEN_ADDR_DDR_NET_MAX (0x000000000fffffffULL) +#define NETXEN_ADDR_OCM0 (0x0000000200000000ULL) +#define NETXEN_ADDR_OCM0_MAX (0x00000002000fffffULL) +#define NETXEN_ADDR_OCM1 (0x0000000200400000ULL) +#define NETXEN_ADDR_OCM1_MAX (0x00000002004fffffULL) +#define NETXEN_ADDR_QDR_NET (0x0000000300000000ULL) +#define NETXEN_ADDR_QDR_NET_MAX_P2 (0x00000003003fffffULL) +#define NETXEN_ADDR_QDR_NET_MAX_P3 (0x0000000303ffffffULL) + +/* + * Register offsets for MN + */ +#define NETXEN_MIU_CONTROL (0x000) +#define NETXEN_MIU_MN_CONTROL (NETXEN_CRB_DDR_NET+NETXEN_MIU_CONTROL) + + /* 200ms delay in each loop */ +#define NETXEN_NIU_PHY_WAITLEN 200000 + /* 10 seconds before we give up */ +#define NETXEN_NIU_PHY_WAITMAX 50 +#define NETXEN_NIU_MAX_GBE_PORTS 4 +#define NETXEN_NIU_MAX_XG_PORTS 2 + +#define NETXEN_NIU_MODE (NETXEN_CRB_NIU + 0x00000) + +#define NETXEN_NIU_XG_SINGLE_TERM (NETXEN_CRB_NIU + 0x00004) +#define NETXEN_NIU_XG_DRIVE_HI (NETXEN_CRB_NIU + 0x00008) +#define NETXEN_NIU_XG_DRIVE_LO (NETXEN_CRB_NIU + 0x0000c) +#define NETXEN_NIU_XG_DTX (NETXEN_CRB_NIU + 0x00010) +#define NETXEN_NIU_XG_DEQ (NETXEN_CRB_NIU + 0x00014) +#define NETXEN_NIU_XG_WORD_ALIGN (NETXEN_CRB_NIU + 0x00018) +#define NETXEN_NIU_XG_RESET (NETXEN_CRB_NIU + 0x0001c) +#define NETXEN_NIU_XG_POWER_DOWN (NETXEN_CRB_NIU + 0x00020) +#define NETXEN_NIU_XG_RESET_PLL (NETXEN_CRB_NIU + 0x00024) +#define NETXEN_NIU_XG_SERDES_LOOPBACK (NETXEN_CRB_NIU + 0x00028) +#define NETXEN_NIU_XG_DO_BYTE_ALIGN (NETXEN_CRB_NIU + 0x0002c) +#define NETXEN_NIU_XG_TX_ENABLE (NETXEN_CRB_NIU + 0x00030) +#define NETXEN_NIU_XG_RX_ENABLE (NETXEN_CRB_NIU + 0x00034) +#define NETXEN_NIU_XG_STATUS (NETXEN_CRB_NIU + 0x00038) +#define NETXEN_NIU_XG_PAUSE_THRESHOLD (NETXEN_CRB_NIU + 0x0003c) +#define NETXEN_NIU_INT_MASK (NETXEN_CRB_NIU + 0x00040) +#define NETXEN_NIU_ACTIVE_INT (NETXEN_CRB_NIU + 0x00044) +#define NETXEN_NIU_MASKABLE_INT (NETXEN_CRB_NIU + 0x00048) + +#define NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER (NETXEN_CRB_NIU + 0x0004c) + +#define NETXEN_NIU_GB_SERDES_RESET (NETXEN_CRB_NIU + 0x00050) +#define NETXEN_NIU_GB0_GMII_MODE (NETXEN_CRB_NIU + 0x00054) +#define NETXEN_NIU_GB0_MII_MODE (NETXEN_CRB_NIU + 0x00058) +#define NETXEN_NIU_GB1_GMII_MODE (NETXEN_CRB_NIU + 0x0005c) +#define NETXEN_NIU_GB1_MII_MODE (NETXEN_CRB_NIU + 0x00060) +#define NETXEN_NIU_GB2_GMII_MODE (NETXEN_CRB_NIU + 0x00064) +#define NETXEN_NIU_GB2_MII_MODE (NETXEN_CRB_NIU + 0x00068) +#define NETXEN_NIU_GB3_GMII_MODE (NETXEN_CRB_NIU + 0x0006c) +#define NETXEN_NIU_GB3_MII_MODE (NETXEN_CRB_NIU + 0x00070) +#define NETXEN_NIU_REMOTE_LOOPBACK (NETXEN_CRB_NIU + 0x00074) +#define NETXEN_NIU_GB0_HALF_DUPLEX (NETXEN_CRB_NIU + 0x00078) +#define NETXEN_NIU_GB1_HALF_DUPLEX (NETXEN_CRB_NIU + 0x0007c) +#define NETXEN_NIU_RESET_SYS_FIFOS (NETXEN_CRB_NIU + 0x00088) +#define NETXEN_NIU_GB_CRC_DROP (NETXEN_CRB_NIU + 0x0008c) +#define NETXEN_NIU_GB_DROP_WRONGADDR (NETXEN_CRB_NIU + 0x00090) +#define NETXEN_NIU_TEST_MUX_CTL (NETXEN_CRB_NIU + 0x00094) +#define NETXEN_NIU_XG_PAUSE_CTL (NETXEN_CRB_NIU + 0x00098) +#define NETXEN_NIU_XG_PAUSE_LEVEL (NETXEN_CRB_NIU + 0x000dc) +#define NETXEN_NIU_FRAME_COUNT_SELECT (NETXEN_CRB_NIU + 0x000ac) +#define NETXEN_NIU_FRAME_COUNT (NETXEN_CRB_NIU + 0x000b0) +#define NETXEN_NIU_XG_SEL (NETXEN_CRB_NIU + 0x00128) +#define NETXEN_NIU_GB_PAUSE_CTL (NETXEN_CRB_NIU + 0x0030c) + +#define NETXEN_NIU_FULL_LEVEL_XG (NETXEN_CRB_NIU + 0x00450) + +#define NETXEN_NIU_XG1_RESET (NETXEN_CRB_NIU + 0x0011c) +#define NETXEN_NIU_XG1_POWER_DOWN (NETXEN_CRB_NIU + 0x00120) +#define NETXEN_NIU_XG1_RESET_PLL (NETXEN_CRB_NIU + 0x00124) + +#define NETXEN_MAC_ADDR_CNTL_REG (NETXEN_CRB_NIU + 0x1000) + +#define NETXEN_MULTICAST_ADDR_HI_0 (NETXEN_CRB_NIU + 0x1010) +#define NETXEN_MULTICAST_ADDR_HI_1 (NETXEN_CRB_NIU + 0x1014) +#define NETXEN_MULTICAST_ADDR_HI_2 (NETXEN_CRB_NIU + 0x1018) +#define NETXEN_MULTICAST_ADDR_HI_3 (NETXEN_CRB_NIU + 0x101c) + +#define NETXEN_UNICAST_ADDR_BASE (NETXEN_CRB_NIU + 0x1080) +#define NETXEN_MULTICAST_ADDR_BASE (NETXEN_CRB_NIU + 0x1100) + +#define NETXEN_NIU_GB_MAC_CONFIG_0(I) \ + (NETXEN_CRB_NIU + 0x30000 + (I)*0x10000) +#define NETXEN_NIU_GB_MAC_CONFIG_1(I) \ + (NETXEN_CRB_NIU + 0x30004 + (I)*0x10000) +#define NETXEN_NIU_GB_MAC_IPG_IFG(I) \ + (NETXEN_CRB_NIU + 0x30008 + (I)*0x10000) +#define NETXEN_NIU_GB_HALF_DUPLEX_CTRL(I) \ + (NETXEN_CRB_NIU + 0x3000c + (I)*0x10000) +#define NETXEN_NIU_GB_MAX_FRAME_SIZE(I) \ + (NETXEN_CRB_NIU + 0x30010 + (I)*0x10000) +#define NETXEN_NIU_GB_TEST_REG(I) \ + (NETXEN_CRB_NIU + 0x3001c + (I)*0x10000) +#define NETXEN_NIU_GB_MII_MGMT_CONFIG(I) \ + (NETXEN_CRB_NIU + 0x30020 + (I)*0x10000) +#define NETXEN_NIU_GB_MII_MGMT_COMMAND(I) \ + (NETXEN_CRB_NIU + 0x30024 + (I)*0x10000) +#define NETXEN_NIU_GB_MII_MGMT_ADDR(I) \ + (NETXEN_CRB_NIU + 0x30028 + (I)*0x10000) +#define NETXEN_NIU_GB_MII_MGMT_CTRL(I) \ + (NETXEN_CRB_NIU + 0x3002c + (I)*0x10000) +#define NETXEN_NIU_GB_MII_MGMT_STATUS(I) \ + (NETXEN_CRB_NIU + 0x30030 + (I)*0x10000) +#define NETXEN_NIU_GB_MII_MGMT_INDICATE(I) \ + (NETXEN_CRB_NIU + 0x30034 + (I)*0x10000) +#define NETXEN_NIU_GB_INTERFACE_CTRL(I) \ + (NETXEN_CRB_NIU + 0x30038 + (I)*0x10000) +#define NETXEN_NIU_GB_INTERFACE_STATUS(I) \ + (NETXEN_CRB_NIU + 0x3003c + (I)*0x10000) +#define NETXEN_NIU_GB_STATION_ADDR_0(I) \ + (NETXEN_CRB_NIU + 0x30040 + (I)*0x10000) +#define NETXEN_NIU_GB_STATION_ADDR_1(I) \ + (NETXEN_CRB_NIU + 0x30044 + (I)*0x10000) + +#define NETXEN_NIU_XGE_CONFIG_0 (NETXEN_CRB_NIU + 0x70000) +#define NETXEN_NIU_XGE_CONFIG_1 (NETXEN_CRB_NIU + 0x70004) +#define NETXEN_NIU_XGE_IPG (NETXEN_CRB_NIU + 0x70008) +#define NETXEN_NIU_XGE_STATION_ADDR_0_HI (NETXEN_CRB_NIU + 0x7000c) +#define NETXEN_NIU_XGE_STATION_ADDR_0_1 (NETXEN_CRB_NIU + 0x70010) +#define NETXEN_NIU_XGE_STATION_ADDR_1_LO (NETXEN_CRB_NIU + 0x70014) +#define NETXEN_NIU_XGE_STATUS (NETXEN_CRB_NIU + 0x70018) +#define NETXEN_NIU_XGE_MAX_FRAME_SIZE (NETXEN_CRB_NIU + 0x7001c) +#define NETXEN_NIU_XGE_PAUSE_FRAME_VALUE (NETXEN_CRB_NIU + 0x70020) +#define NETXEN_NIU_XGE_TX_BYTE_CNT (NETXEN_CRB_NIU + 0x70024) +#define NETXEN_NIU_XGE_TX_FRAME_CNT (NETXEN_CRB_NIU + 0x70028) +#define NETXEN_NIU_XGE_RX_BYTE_CNT (NETXEN_CRB_NIU + 0x7002c) +#define NETXEN_NIU_XGE_RX_FRAME_CNT (NETXEN_CRB_NIU + 0x70030) +#define NETXEN_NIU_XGE_AGGR_ERROR_CNT (NETXEN_CRB_NIU + 0x70034) +#define NETXEN_NIU_XGE_MULTICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x70038) +#define NETXEN_NIU_XGE_UNICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x7003c) +#define NETXEN_NIU_XGE_CRC_ERROR_CNT (NETXEN_CRB_NIU + 0x70040) +#define NETXEN_NIU_XGE_OVERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x70044) +#define NETXEN_NIU_XGE_UNDERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x70048) +#define NETXEN_NIU_XGE_LOCAL_ERROR_CNT (NETXEN_CRB_NIU + 0x7004c) +#define NETXEN_NIU_XGE_REMOTE_ERROR_CNT (NETXEN_CRB_NIU + 0x70050) +#define NETXEN_NIU_XGE_CONTROL_CHAR_CNT (NETXEN_CRB_NIU + 0x70054) +#define NETXEN_NIU_XGE_PAUSE_FRAME_CNT (NETXEN_CRB_NIU + 0x70058) +#define NETXEN_NIU_XG1_CONFIG_0 (NETXEN_CRB_NIU + 0x80000) +#define NETXEN_NIU_XG1_CONFIG_1 (NETXEN_CRB_NIU + 0x80004) +#define NETXEN_NIU_XG1_IPG (NETXEN_CRB_NIU + 0x80008) +#define NETXEN_NIU_XG1_STATION_ADDR_0_HI (NETXEN_CRB_NIU + 0x8000c) +#define NETXEN_NIU_XG1_STATION_ADDR_0_1 (NETXEN_CRB_NIU + 0x80010) +#define NETXEN_NIU_XG1_STATION_ADDR_1_LO (NETXEN_CRB_NIU + 0x80014) +#define NETXEN_NIU_XG1_STATUS (NETXEN_CRB_NIU + 0x80018) +#define NETXEN_NIU_XG1_MAX_FRAME_SIZE (NETXEN_CRB_NIU + 0x8001c) +#define NETXEN_NIU_XG1_PAUSE_FRAME_VALUE (NETXEN_CRB_NIU + 0x80020) +#define NETXEN_NIU_XG1_TX_BYTE_CNT (NETXEN_CRB_NIU + 0x80024) +#define NETXEN_NIU_XG1_TX_FRAME_CNT (NETXEN_CRB_NIU + 0x80028) +#define NETXEN_NIU_XG1_RX_BYTE_CNT (NETXEN_CRB_NIU + 0x8002c) +#define NETXEN_NIU_XG1_RX_FRAME_CNT (NETXEN_CRB_NIU + 0x80030) +#define NETXEN_NIU_XG1_AGGR_ERROR_CNT (NETXEN_CRB_NIU + 0x80034) +#define NETXEN_NIU_XG1_MULTICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x80038) +#define NETXEN_NIU_XG1_UNICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x8003c) +#define NETXEN_NIU_XG1_CRC_ERROR_CNT (NETXEN_CRB_NIU + 0x80040) +#define NETXEN_NIU_XG1_OVERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x80044) +#define NETXEN_NIU_XG1_UNDERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x80048) +#define NETXEN_NIU_XG1_LOCAL_ERROR_CNT (NETXEN_CRB_NIU + 0x8004c) +#define NETXEN_NIU_XG1_REMOTE_ERROR_CNT (NETXEN_CRB_NIU + 0x80050) +#define NETXEN_NIU_XG1_CONTROL_CHAR_CNT (NETXEN_CRB_NIU + 0x80054) +#define NETXEN_NIU_XG1_PAUSE_FRAME_CNT (NETXEN_CRB_NIU + 0x80058) + +/* P3 802.3ap */ +#define NETXEN_NIU_AP_MAC_CONFIG_0(I) (NETXEN_CRB_NIU+0xa0000+(I)*0x10000) +#define NETXEN_NIU_AP_MAC_CONFIG_1(I) (NETXEN_CRB_NIU+0xa0004+(I)*0x10000) +#define NETXEN_NIU_AP_MAC_IPG_IFG(I) (NETXEN_CRB_NIU+0xa0008+(I)*0x10000) +#define NETXEN_NIU_AP_HALF_DUPLEX_CTRL(I) (NETXEN_CRB_NIU+0xa000c+(I)*0x10000) +#define NETXEN_NIU_AP_MAX_FRAME_SIZE(I) (NETXEN_CRB_NIU+0xa0010+(I)*0x10000) +#define NETXEN_NIU_AP_TEST_REG(I) (NETXEN_CRB_NIU+0xa001c+(I)*0x10000) +#define NETXEN_NIU_AP_MII_MGMT_CONFIG(I) (NETXEN_CRB_NIU+0xa0020+(I)*0x10000) +#define NETXEN_NIU_AP_MII_MGMT_COMMAND(I) (NETXEN_CRB_NIU+0xa0024+(I)*0x10000) +#define NETXEN_NIU_AP_MII_MGMT_ADDR(I) (NETXEN_CRB_NIU+0xa0028+(I)*0x10000) +#define NETXEN_NIU_AP_MII_MGMT_CTRL(I) (NETXEN_CRB_NIU+0xa002c+(I)*0x10000) +#define NETXEN_NIU_AP_MII_MGMT_STATUS(I) (NETXEN_CRB_NIU+0xa0030+(I)*0x10000) +#define NETXEN_NIU_AP_MII_MGMT_INDICATE(I) (NETXEN_CRB_NIU+0xa0034+(I)*0x10000) +#define NETXEN_NIU_AP_INTERFACE_CTRL(I) (NETXEN_CRB_NIU+0xa0038+(I)*0x10000) +#define NETXEN_NIU_AP_INTERFACE_STATUS(I) (NETXEN_CRB_NIU+0xa003c+(I)*0x10000) +#define NETXEN_NIU_AP_STATION_ADDR_0(I) (NETXEN_CRB_NIU+0xa0040+(I)*0x10000) +#define NETXEN_NIU_AP_STATION_ADDR_1(I) (NETXEN_CRB_NIU+0xa0044+(I)*0x10000) + + +#define TEST_AGT_CTRL (0x00) + +#define TA_CTL_START 1 +#define TA_CTL_ENABLE 2 +#define TA_CTL_WRITE 4 +#define TA_CTL_BUSY 8 + +/* + * Register offsets for MN + */ +#define MIU_TEST_AGT_BASE (0x90) + +#define MIU_TEST_AGT_ADDR_LO (0x04) +#define MIU_TEST_AGT_ADDR_HI (0x08) +#define MIU_TEST_AGT_WRDATA_LO (0x10) +#define MIU_TEST_AGT_WRDATA_HI (0x14) +#define MIU_TEST_AGT_RDDATA_LO (0x18) +#define MIU_TEST_AGT_RDDATA_HI (0x1c) + +#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8 +#define MIU_TEST_AGT_UPPER_ADDR(off) (0) + +/* + * Register offsets for MS + */ +#define SIU_TEST_AGT_BASE (0x60) + +#define SIU_TEST_AGT_ADDR_LO (0x04) +#define SIU_TEST_AGT_ADDR_HI (0x18) +#define SIU_TEST_AGT_WRDATA_LO (0x08) +#define SIU_TEST_AGT_WRDATA_HI (0x0c) +#define SIU_TEST_AGT_WRDATA(i) (0x08+(4*(i))) +#define SIU_TEST_AGT_RDDATA_LO (0x10) +#define SIU_TEST_AGT_RDDATA_HI (0x14) +#define SIU_TEST_AGT_RDDATA(i) (0x10+(4*(i))) + +#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8 +#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22) + +/* XG Link status */ +#define XG_LINK_UP 0x10 +#define XG_LINK_DOWN 0x20 + +#define XG_LINK_UP_P3 0x01 +#define XG_LINK_DOWN_P3 0x02 +#define XG_LINK_STATE_P3_MASK 0xf +#define XG_LINK_STATE_P3(pcifn,val) \ + (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK) + +#define P3_LINK_SPEED_MHZ 100 +#define P3_LINK_SPEED_MASK 0xff +#define P3_LINK_SPEED_REG(pcifn) \ + (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4)) +#define P3_LINK_SPEED_VAL(pcifn, reg) \ + (((reg) >> (8 * ((pcifn) & 0x3))) & P3_LINK_SPEED_MASK) + +#define NETXEN_CAM_RAM_BASE (NETXEN_CRB_CAM + 0x02000) +#define NETXEN_CAM_RAM(reg) (NETXEN_CAM_RAM_BASE + (reg)) +#define NETXEN_FW_VERSION_MAJOR (NETXEN_CAM_RAM(0x150)) +#define NETXEN_FW_VERSION_MINOR (NETXEN_CAM_RAM(0x154)) +#define NETXEN_FW_VERSION_SUB (NETXEN_CAM_RAM(0x158)) +#define NETXEN_ROM_LOCK_ID (NETXEN_CAM_RAM(0x100)) +#define NETXEN_PHY_LOCK_ID (NETXEN_CAM_RAM(0x120)) +#define NETXEN_CRB_WIN_LOCK_ID (NETXEN_CAM_RAM(0x124)) + +#define NIC_CRB_BASE (NETXEN_CAM_RAM(0x200)) +#define NIC_CRB_BASE_2 (NETXEN_CAM_RAM(0x700)) +#define NETXEN_NIC_REG(X) (NIC_CRB_BASE+(X)) +#define NETXEN_NIC_REG_2(X) (NIC_CRB_BASE_2+(X)) + +#define NX_CDRP_CRB_OFFSET (NETXEN_NIC_REG(0x18)) +#define NX_ARG1_CRB_OFFSET (NETXEN_NIC_REG(0x1c)) +#define NX_ARG2_CRB_OFFSET (NETXEN_NIC_REG(0x20)) +#define NX_ARG3_CRB_OFFSET (NETXEN_NIC_REG(0x24)) +#define NX_SIGN_CRB_OFFSET (NETXEN_NIC_REG(0x28)) + +#define CRB_HOST_DUMMY_BUF_ADDR_HI (NETXEN_NIC_REG(0x3c)) +#define CRB_HOST_DUMMY_BUF_ADDR_LO (NETXEN_NIC_REG(0x40)) + +#define CRB_CMDPEG_STATE (NETXEN_NIC_REG(0x50)) +#define CRB_RCVPEG_STATE (NETXEN_NIC_REG(0x13c)) + +#define CRB_XG_STATE (NETXEN_NIC_REG(0x94)) +#define CRB_XG_STATE_P3 (NETXEN_NIC_REG(0x98)) +#define CRB_PF_LINK_SPEED_1 (NETXEN_NIC_REG(0xe8)) +#define CRB_PF_LINK_SPEED_2 (NETXEN_NIC_REG(0xec)) + +#define CRB_MPORT_MODE (NETXEN_NIC_REG(0xc4)) +#define CRB_DMA_SHIFT (NETXEN_NIC_REG(0xcc)) +#define CRB_INT_VECTOR (NETXEN_NIC_REG(0xd4)) + +#define CRB_CMD_PRODUCER_OFFSET (NETXEN_NIC_REG(0x08)) +#define CRB_CMD_CONSUMER_OFFSET (NETXEN_NIC_REG(0x0c)) +#define CRB_CMD_PRODUCER_OFFSET_1 (NETXEN_NIC_REG(0x1ac)) +#define CRB_CMD_CONSUMER_OFFSET_1 (NETXEN_NIC_REG(0x1b0)) +#define CRB_CMD_PRODUCER_OFFSET_2 (NETXEN_NIC_REG(0x1b8)) +#define CRB_CMD_CONSUMER_OFFSET_2 (NETXEN_NIC_REG(0x1bc)) +#define CRB_CMD_PRODUCER_OFFSET_3 (NETXEN_NIC_REG(0x1d0)) +#define CRB_CMD_CONSUMER_OFFSET_3 (NETXEN_NIC_REG(0x1d4)) +#define CRB_TEMP_STATE (NETXEN_NIC_REG(0x1b4)) + +#define CRB_V2P_0 (NETXEN_NIC_REG(0x290)) +#define CRB_V2P(port) (CRB_V2P_0+((port)*4)) +#define CRB_DRIVER_VERSION (NETXEN_NIC_REG(0x2a0)) + +#define CRB_SW_INT_MASK_0 (NETXEN_NIC_REG(0x1d8)) +#define CRB_SW_INT_MASK_1 (NETXEN_NIC_REG(0x1e0)) +#define CRB_SW_INT_MASK_2 (NETXEN_NIC_REG(0x1e4)) +#define CRB_SW_INT_MASK_3 (NETXEN_NIC_REG(0x1e8)) + +#define CRB_FW_CAPABILITIES_1 (NETXEN_CAM_RAM(0x128)) +#define CRB_MAC_BLOCK_START (NETXEN_CAM_RAM(0x1c0)) + +/* + * capabilities register, can be used to selectively enable/disable features + * for backward compatibility + */ +#define CRB_NIC_CAPABILITIES_HOST NETXEN_NIC_REG(0x1a8) +#define CRB_NIC_MSI_MODE_HOST NETXEN_NIC_REG(0x270) + +#define INTR_SCHEME_PERPORT 0x1 +#define MSI_MODE_MULTIFUNC 0x1 + +/* used for ethtool tests */ +#define CRB_SCRATCHPAD_TEST NETXEN_NIC_REG(0x280) + +/* + * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address + * which can be read by the Phantom host to get producer/consumer indexes from + * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following + * registers will be used for the addresses of the ring's shared memory + * on the Phantom. + */ + +#define nx_get_temp_val(x) ((x) >> 16) +#define nx_get_temp_state(x) ((x) & 0xffff) +#define nx_encode_temp(val, state) (((val) << 16) | (state)) + +/* + * Temperature control. + */ +enum { + NX_TEMP_NORMAL = 0x1, /* Normal operating range */ + NX_TEMP_WARN, /* Sound alert, temperature getting high */ + NX_TEMP_PANIC /* Fatal error, hardware has shut down. */ +}; + +/* Lock IDs for PHY lock */ +#define PHY_LOCK_DRIVER 0x44524956 + +/* Used for PS PCI Memory access */ +#define PCIX_PS_OP_ADDR_LO (0x10000) +/* via CRB (PS side only) */ +#define PCIX_PS_OP_ADDR_HI (0x10004) + +#define PCIX_INT_VECTOR (0x10100) +#define PCIX_INT_MASK (0x10104) + +#define PCIX_CRB_WINDOW (0x10210) +#define PCIX_CRB_WINDOW_F0 (0x10210) +#define PCIX_CRB_WINDOW_F1 (0x10230) +#define PCIX_CRB_WINDOW_F2 (0x10250) +#define PCIX_CRB_WINDOW_F3 (0x10270) +#define PCIX_CRB_WINDOW_F4 (0x102ac) +#define PCIX_CRB_WINDOW_F5 (0x102bc) +#define PCIX_CRB_WINDOW_F6 (0x102cc) +#define PCIX_CRB_WINDOW_F7 (0x102dc) +#define PCIE_CRB_WINDOW_REG(func) (((func) < 4) ? \ + (PCIX_CRB_WINDOW_F0 + (0x20 * (func))) :\ + (PCIX_CRB_WINDOW_F4 + (0x10 * ((func)-4)))) + +#define PCIX_MN_WINDOW (0x10200) +#define PCIX_MN_WINDOW_F0 (0x10200) +#define PCIX_MN_WINDOW_F1 (0x10220) +#define PCIX_MN_WINDOW_F2 (0x10240) +#define PCIX_MN_WINDOW_F3 (0x10260) +#define PCIX_MN_WINDOW_F4 (0x102a0) +#define PCIX_MN_WINDOW_F5 (0x102b0) +#define PCIX_MN_WINDOW_F6 (0x102c0) +#define PCIX_MN_WINDOW_F7 (0x102d0) +#define PCIE_MN_WINDOW_REG(func) (((func) < 4) ? \ + (PCIX_MN_WINDOW_F0 + (0x20 * (func))) :\ + (PCIX_MN_WINDOW_F4 + (0x10 * ((func)-4)))) + +#define PCIX_SN_WINDOW (0x10208) +#define PCIX_SN_WINDOW_F0 (0x10208) +#define PCIX_SN_WINDOW_F1 (0x10228) +#define PCIX_SN_WINDOW_F2 (0x10248) +#define PCIX_SN_WINDOW_F3 (0x10268) +#define PCIX_SN_WINDOW_F4 (0x102a8) +#define PCIX_SN_WINDOW_F5 (0x102b8) +#define PCIX_SN_WINDOW_F6 (0x102c8) +#define PCIX_SN_WINDOW_F7 (0x102d8) +#define PCIE_SN_WINDOW_REG(func) (((func) < 4) ? \ + (PCIX_SN_WINDOW_F0 + (0x20 * (func))) :\ + (PCIX_SN_WINDOW_F4 + (0x10 * ((func)-4)))) + +#define PCIX_OCM_WINDOW (0x10800) +#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x20 * (func)) + +#define PCIX_TARGET_STATUS (0x10118) +#define PCIX_TARGET_STATUS_F1 (0x10160) +#define PCIX_TARGET_STATUS_F2 (0x10164) +#define PCIX_TARGET_STATUS_F3 (0x10168) +#define PCIX_TARGET_STATUS_F4 (0x10360) +#define PCIX_TARGET_STATUS_F5 (0x10364) +#define PCIX_TARGET_STATUS_F6 (0x10368) +#define PCIX_TARGET_STATUS_F7 (0x1036c) + +#define PCIX_TARGET_MASK (0x10128) +#define PCIX_TARGET_MASK_F1 (0x10170) +#define PCIX_TARGET_MASK_F2 (0x10174) +#define PCIX_TARGET_MASK_F3 (0x10178) +#define PCIX_TARGET_MASK_F4 (0x10370) +#define PCIX_TARGET_MASK_F5 (0x10374) +#define PCIX_TARGET_MASK_F6 (0x10378) +#define PCIX_TARGET_MASK_F7 (0x1037c) + +#define PCIX_MSI_F0 (0x13000) +#define PCIX_MSI_F1 (0x13004) +#define PCIX_MSI_F2 (0x13008) +#define PCIX_MSI_F3 (0x1300c) +#define PCIX_MSI_F4 (0x13010) +#define PCIX_MSI_F5 (0x13014) +#define PCIX_MSI_F6 (0x13018) +#define PCIX_MSI_F7 (0x1301c) +#define PCIX_MSI_F(i) (0x13000+((i)*4)) + +#define PCIX_PS_MEM_SPACE (0x90000) + +#define NETXEN_PCIX_PH_REG(reg) (NETXEN_CRB_PCIE + (reg)) +#define NETXEN_PCIX_PS_REG(reg) (NETXEN_CRB_PCIX_MD + (reg)) + +#define NETXEN_PCIE_REG(reg) (NETXEN_CRB_PCIE + (reg)) + +#define PCIE_MAX_DMA_XFER_SIZE (0x1404c) + +#define PCIE_DCR 0x00d8 + +#define PCIE_SEM0_LOCK (0x1c000) +#define PCIE_SEM0_UNLOCK (0x1c004) +#define PCIE_SEM1_LOCK (0x1c008) +#define PCIE_SEM1_UNLOCK (0x1c00c) +#define PCIE_SEM2_LOCK (0x1c010) /* Flash lock */ +#define PCIE_SEM2_UNLOCK (0x1c014) /* Flash unlock */ +#define PCIE_SEM3_LOCK (0x1c018) /* Phy lock */ +#define PCIE_SEM3_UNLOCK (0x1c01c) /* Phy unlock */ +#define PCIE_SEM4_LOCK (0x1c020) +#define PCIE_SEM4_UNLOCK (0x1c024) +#define PCIE_SEM5_LOCK (0x1c028) /* API lock */ +#define PCIE_SEM5_UNLOCK (0x1c02c) /* API unlock */ +#define PCIE_SEM6_LOCK (0x1c030) /* sw lock */ +#define PCIE_SEM6_UNLOCK (0x1c034) /* sw unlock */ +#define PCIE_SEM7_LOCK (0x1c038) /* crb win lock */ +#define PCIE_SEM7_UNLOCK (0x1c03c) /* crbwin unlock*/ +#define PCIE_SEM_LOCK(N) (PCIE_SEM0_LOCK + 8*(N)) +#define PCIE_SEM_UNLOCK(N) (PCIE_SEM0_UNLOCK + 8*(N)) + +#define PCIE_SETUP_FUNCTION (0x12040) +#define PCIE_SETUP_FUNCTION2 (0x12048) +#define PCIE_MISCCFG_RC (0x1206c) +#define PCIE_TGT_SPLIT_CHICKEN (0x12080) +#define PCIE_CHICKEN3 (0x120c8) + +#define ISR_INT_STATE_REG (NETXEN_PCIX_PS_REG(PCIE_MISCCFG_RC)) +#define PCIE_MAX_MASTER_SPLIT (0x14048) + +#define NETXEN_PORT_MODE_NONE 0 +#define NETXEN_PORT_MODE_XG 1 +#define NETXEN_PORT_MODE_GB 2 +#define NETXEN_PORT_MODE_802_3_AP 3 +#define NETXEN_PORT_MODE_AUTO_NEG 4 +#define NETXEN_PORT_MODE_AUTO_NEG_1G 5 +#define NETXEN_PORT_MODE_AUTO_NEG_XG 6 +#define NETXEN_PORT_MODE_ADDR (NETXEN_CAM_RAM(0x24)) +#define NETXEN_WOL_PORT_MODE (NETXEN_CAM_RAM(0x198)) + +#define NETXEN_WOL_CONFIG_NV (NETXEN_CAM_RAM(0x184)) +#define NETXEN_WOL_CONFIG (NETXEN_CAM_RAM(0x188)) + +#define NX_PEG_TUNE_MN_PRESENT 0x1 +#define NX_PEG_TUNE_CAPABILITY (NETXEN_CAM_RAM(0x02c)) + +#define NETXEN_DMA_WATCHDOG_CTRL (NETXEN_CAM_RAM(0x14)) +#define NETXEN_PEG_ALIVE_COUNTER (NETXEN_CAM_RAM(0xb0)) +#define NETXEN_PEG_HALT_STATUS1 (NETXEN_CAM_RAM(0xa8)) +#define NETXEN_PEG_HALT_STATUS2 (NETXEN_CAM_RAM(0xac)) +#define NX_CRB_DEV_REF_COUNT (NETXEN_CAM_RAM(0x138)) +#define NX_CRB_DEV_STATE (NETXEN_CAM_RAM(0x140)) + +/* Device State */ +#define NX_DEV_COLD 1 +#define NX_DEV_INITALIZING 2 +#define NX_DEV_READY 3 +#define NX_DEV_NEED_RESET 4 +#define NX_DEV_NEED_QUISCENT 5 +#define NX_DEV_NEED_AER 6 +#define NX_DEV_FAILED 7 + +#define NX_RCODE_DRIVER_INFO 0x20000000 +#define NX_RCODE_DRIVER_CAN_RELOAD 0x40000000 +#define NX_RCODE_FATAL_ERROR 0x80000000 +#define NX_FWERROR_PEGNUM(code) ((code) & 0xff) +#define NX_FWERROR_CODE(code) ((code >> 8) & 0xfffff) + +#define FW_POLL_DELAY (2 * HZ) +#define FW_FAIL_THRESH 3 +#define FW_POLL_THRESH 10 + +#define ISR_MSI_INT_TRIGGER(FUNC) (NETXEN_PCIX_PS_REG(PCIX_MSI_F(FUNC))) +#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200) + +/* + * PCI Interrupt Vector Values. + */ +#define PCIX_INT_VECTOR_BIT_F0 0x0080 +#define PCIX_INT_VECTOR_BIT_F1 0x0100 +#define PCIX_INT_VECTOR_BIT_F2 0x0200 +#define PCIX_INT_VECTOR_BIT_F3 0x0400 +#define PCIX_INT_VECTOR_BIT_F4 0x0800 +#define PCIX_INT_VECTOR_BIT_F5 0x1000 +#define PCIX_INT_VECTOR_BIT_F6 0x2000 +#define PCIX_INT_VECTOR_BIT_F7 0x4000 + +struct netxen_legacy_intr_set { + uint32_t int_vec_bit; + uint32_t tgt_status_reg; + uint32_t tgt_mask_reg; + uint32_t pci_int_reg; +}; + +#define NX_LEGACY_INTR_CONFIG \ +{ \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \ +} + +#endif /* __NETXEN_NIC_HDR_H_ */ diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c new file mode 100644 index 0000000..3f89e57 --- /dev/null +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c @@ -0,0 +1,1976 @@ +/* + * Copyright (C) 2003 - 2009 NetXen, Inc. + * Copyright (C) 2009 - QLogic Corporation. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, + * MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution + * in the file called "COPYING". + * + */ + +#include +#include "netxen_nic.h" +#include "netxen_nic_hw.h" + +#include + +#define MASK(n) ((1ULL<<(n))-1) +#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff)) +#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff)) +#define MS_WIN(addr) (addr & 0x0ffc0000) + +#define GET_MEM_OFFS_2M(addr) (addr & MASK(18)) + +#define CRB_BLK(off) ((off >> 20) & 0x3f) +#define CRB_SUBBLK(off) ((off >> 16) & 0xf) +#define CRB_WINDOW_2M (0x130060) +#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000)) +#define CRB_INDIRECT_2M (0x1e0000UL) + +static void netxen_nic_io_write_128M(struct netxen_adapter *adapter, + void __iomem *addr, u32 data); +static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter, + void __iomem *addr); + +#ifndef readq +static inline u64 readq(void __iomem *addr) +{ + return readl(addr) | (((u64) readl(addr + 4)) << 32LL); +} +#endif + +#ifndef writeq +static inline void writeq(u64 val, void __iomem *addr) +{ + writel(((u32) (val)), (addr)); + writel(((u32) (val >> 32)), (addr + 4)); +} +#endif + +#define PCI_OFFSET_FIRST_RANGE(adapter, off) \ + ((adapter)->ahw.pci_base0 + (off)) +#define PCI_OFFSET_SECOND_RANGE(adapter, off) \ + ((adapter)->ahw.pci_base1 + (off) - SECOND_PAGE_GROUP_START) +#define PCI_OFFSET_THIRD_RANGE(adapter, off) \ + ((adapter)->ahw.pci_base2 + (off) - THIRD_PAGE_GROUP_START) + +static void __iomem *pci_base_offset(struct netxen_adapter *adapter, + unsigned long off) +{ + if (ADDR_IN_RANGE(off, FIRST_PAGE_GROUP_START, FIRST_PAGE_GROUP_END)) + return PCI_OFFSET_FIRST_RANGE(adapter, off); + + if (ADDR_IN_RANGE(off, SECOND_PAGE_GROUP_START, SECOND_PAGE_GROUP_END)) + return PCI_OFFSET_SECOND_RANGE(adapter, off); + + if (ADDR_IN_RANGE(off, THIRD_PAGE_GROUP_START, THIRD_PAGE_GROUP_END)) + return PCI_OFFSET_THIRD_RANGE(adapter, off); + + return NULL; +} + +static crb_128M_2M_block_map_t +crb_128M_2M_map[64] __cacheline_aligned_in_smp = { + {{{0, 0, 0, 0} } }, /* 0: PCI */ + {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */ + {1, 0x0110000, 0x0120000, 0x130000}, + {1, 0x0120000, 0x0122000, 0x124000}, + {1, 0x0130000, 0x0132000, 0x126000}, + {1, 0x0140000, 0x0142000, 0x128000}, + {1, 0x0150000, 0x0152000, 0x12a000}, + {1, 0x0160000, 0x0170000, 0x110000}, + {1, 0x0170000, 0x0172000, 0x12e000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x01e0000, 0x01e0800, 0x122000}, + {0, 0x0000000, 0x0000000, 0x000000} } }, + {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */ + {{{0, 0, 0, 0} } }, /* 3: */ + {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */ + {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */ + {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */ + {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */ + {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */ + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x08f0000, 0x08f2000, 0x172000} } }, + {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/ + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x09f0000, 0x09f2000, 0x176000} } }, + {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/ + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x0af0000, 0x0af2000, 0x17a000} } }, + {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/ + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x0bf0000, 0x0bf2000, 0x17e000} } }, + {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */ + {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */ + {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */ + {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */ + {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */ + {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */ + {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */ + {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */ + {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */ + {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */ + {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */ + {{{0, 0, 0, 0} } }, /* 23: */ + {{{0, 0, 0, 0} } }, /* 24: */ + {{{0, 0, 0, 0} } }, /* 25: */ + {{{0, 0, 0, 0} } }, /* 26: */ + {{{0, 0, 0, 0} } }, /* 27: */ + {{{0, 0, 0, 0} } }, /* 28: */ + {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */ + {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */ + {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */ + {{{0} } }, /* 32: PCI */ + {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */ + {1, 0x2110000, 0x2120000, 0x130000}, + {1, 0x2120000, 0x2122000, 0x124000}, + {1, 0x2130000, 0x2132000, 0x126000}, + {1, 0x2140000, 0x2142000, 0x128000}, + {1, 0x2150000, 0x2152000, 0x12a000}, + {1, 0x2160000, 0x2170000, 0x110000}, + {1, 0x2170000, 0x2172000, 0x12e000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000} } }, + {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */ + {{{0} } }, /* 35: */ + {{{0} } }, /* 36: */ + {{{0} } }, /* 37: */ + {{{0} } }, /* 38: */ + {{{0} } }, /* 39: */ + {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */ + {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */ + {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */ + {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */ + {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */ + {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */ + {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */ + {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */ + {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */ + {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */ + {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */ + {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */ + {{{0} } }, /* 52: */ + {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */ + {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */ + {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */ + {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */ + {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */ + {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */ + {{{0} } }, /* 59: I2C0 */ + {{{0} } }, /* 60: I2C1 */ + {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */ + {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */ + {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */ +}; + +/* + * top 12 bits of crb internal address (hub, agent) + */ +static unsigned crb_hub_agt[64] = +{ + 0, + NETXEN_HW_CRB_HUB_AGT_ADR_PS, + NETXEN_HW_CRB_HUB_AGT_ADR_MN, + NETXEN_HW_CRB_HUB_AGT_ADR_MS, + 0, + NETXEN_HW_CRB_HUB_AGT_ADR_SRE, + NETXEN_HW_CRB_HUB_AGT_ADR_NIU, + NETXEN_HW_CRB_HUB_AGT_ADR_QMN, + NETXEN_HW_CRB_HUB_AGT_ADR_SQN0, + NETXEN_HW_CRB_HUB_AGT_ADR_SQN1, + NETXEN_HW_CRB_HUB_AGT_ADR_SQN2, + NETXEN_HW_CRB_HUB_AGT_ADR_SQN3, + NETXEN_HW_CRB_HUB_AGT_ADR_I2Q, + NETXEN_HW_CRB_HUB_AGT_ADR_TIMR, + NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB, + NETXEN_HW_CRB_HUB_AGT_ADR_PGN4, + NETXEN_HW_CRB_HUB_AGT_ADR_XDMA, + NETXEN_HW_CRB_HUB_AGT_ADR_PGN0, + NETXEN_HW_CRB_HUB_AGT_ADR_PGN1, + NETXEN_HW_CRB_HUB_AGT_ADR_PGN2, + NETXEN_HW_CRB_HUB_AGT_ADR_PGN3, + NETXEN_HW_CRB_HUB_AGT_ADR_PGND, + NETXEN_HW_CRB_HUB_AGT_ADR_PGNI, + NETXEN_HW_CRB_HUB_AGT_ADR_PGS0, + NETXEN_HW_CRB_HUB_AGT_ADR_PGS1, + NETXEN_HW_CRB_HUB_AGT_ADR_PGS2, + NETXEN_HW_CRB_HUB_AGT_ADR_PGS3, + 0, + NETXEN_HW_CRB_HUB_AGT_ADR_PGSI, + NETXEN_HW_CRB_HUB_AGT_ADR_SN, + 0, + NETXEN_HW_CRB_HUB_AGT_ADR_EG, + 0, + NETXEN_HW_CRB_HUB_AGT_ADR_PS, + NETXEN_HW_CRB_HUB_AGT_ADR_CAM, + 0, + 0, + 0, + 0, + 0, + NETXEN_HW_CRB_HUB_AGT_ADR_TIMR, + 0, + NETXEN_HW_CRB_HUB_AGT_ADR_RPMX1, + NETXEN_HW_CRB_HUB_AGT_ADR_RPMX2, + NETXEN_HW_CRB_HUB_AGT_ADR_RPMX3, + NETXEN_HW_CRB_HUB_AGT_ADR_RPMX4, + NETXEN_HW_CRB_HUB_AGT_ADR_RPMX5, + NETXEN_HW_CRB_HUB_AGT_ADR_RPMX6, + NETXEN_HW_CRB_HUB_AGT_ADR_RPMX7, + NETXEN_HW_CRB_HUB_AGT_ADR_XDMA, + NETXEN_HW_CRB_HUB_AGT_ADR_I2Q, + NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB, + 0, + NETXEN_HW_CRB_HUB_AGT_ADR_RPMX0, + NETXEN_HW_CRB_HUB_AGT_ADR_RPMX8, + NETXEN_HW_CRB_HUB_AGT_ADR_RPMX9, + NETXEN_HW_CRB_HUB_AGT_ADR_OCM0, + 0, + NETXEN_HW_CRB_HUB_AGT_ADR_SMB, + NETXEN_HW_CRB_HUB_AGT_ADR_I2C0, + NETXEN_HW_CRB_HUB_AGT_ADR_I2C1, + 0, + NETXEN_HW_CRB_HUB_AGT_ADR_PGNC, + 0, +}; + +/* PCI Windowing for DDR regions. */ + +#define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */ + +#define NETXEN_PCIE_SEM_TIMEOUT 10000 + +static int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu); + +int +netxen_pcie_sem_lock(struct netxen_adapter *adapter, int sem, u32 id_reg) +{ + int done = 0, timeout = 0; + + while (!done) { + done = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_LOCK(sem))); + if (done == 1) + break; + if (++timeout >= NETXEN_PCIE_SEM_TIMEOUT) + return -EIO; + msleep(1); + } + + if (id_reg) + NXWR32(adapter, id_reg, adapter->portnum); + + return 0; +} + +void +netxen_pcie_sem_unlock(struct netxen_adapter *adapter, int sem) +{ + NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_UNLOCK(sem))); +} + +static int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port) +{ + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_1+(0x10000*port), 0x1447); + NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0+(0x10000*port), 0x5); + } + + return 0; +} + +/* Disable an XG interface */ +static int netxen_niu_disable_xg_port(struct netxen_adapter *adapter) +{ + __u32 mac_cfg; + u32 port = adapter->physical_port; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + return 0; + + if (port > NETXEN_NIU_MAX_XG_PORTS) + return -EINVAL; + + mac_cfg = 0; + if (NXWR32(adapter, + NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg)) + return -EIO; + return 0; +} + +#define NETXEN_UNICAST_ADDR(port, index) \ + (NETXEN_UNICAST_ADDR_BASE+(port*32)+(index*8)) +#define NETXEN_MCAST_ADDR(port, index) \ + (NETXEN_MULTICAST_ADDR_BASE+(port*0x80)+(index*8)) +#define MAC_HI(addr) \ + ((addr[2] << 16) | (addr[1] << 8) | (addr[0])) +#define MAC_LO(addr) \ + ((addr[5] << 16) | (addr[4] << 8) | (addr[3])) + +static int netxen_p2_nic_set_promisc(struct netxen_adapter *adapter, u32 mode) +{ + u32 mac_cfg; + u32 cnt = 0; + __u32 reg = 0x0200; + u32 port = adapter->physical_port; + u16 board_type = adapter->ahw.board_type; + + if (port > NETXEN_NIU_MAX_XG_PORTS) + return -EINVAL; + + mac_cfg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port)); + mac_cfg &= ~0x4; + NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg); + + if ((board_type == NETXEN_BRDTYPE_P2_SB31_10G_IMEZ) || + (board_type == NETXEN_BRDTYPE_P2_SB31_10G_HMEZ)) + reg = (0x20 << port); + + NXWR32(adapter, NETXEN_NIU_FRAME_COUNT_SELECT, reg); + + mdelay(10); + + while (NXRD32(adapter, NETXEN_NIU_FRAME_COUNT) && ++cnt < 20) + mdelay(10); + + if (cnt < 20) { + + reg = NXRD32(adapter, + NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port)); + + if (mode == NETXEN_NIU_PROMISC_MODE) + reg = (reg | 0x2000UL); + else + reg = (reg & ~0x2000UL); + + if (mode == NETXEN_NIU_ALLMULTI_MODE) + reg = (reg | 0x1000UL); + else + reg = (reg & ~0x1000UL); + + NXWR32(adapter, + NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), reg); + } + + mac_cfg |= 0x4; + NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg); + + return 0; +} + +static int netxen_p2_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr) +{ + u32 mac_hi, mac_lo; + u32 reg_hi, reg_lo; + + u8 phy = adapter->physical_port; + + if (phy >= NETXEN_NIU_MAX_XG_PORTS) + return -EINVAL; + + mac_lo = ((u32)addr[0] << 16) | ((u32)addr[1] << 24); + mac_hi = addr[2] | ((u32)addr[3] << 8) | + ((u32)addr[4] << 16) | ((u32)addr[5] << 24); + + reg_lo = NETXEN_NIU_XGE_STATION_ADDR_0_1 + (0x10000 * phy); + reg_hi = NETXEN_NIU_XGE_STATION_ADDR_0_HI + (0x10000 * phy); + + /* write twice to flush */ + if (NXWR32(adapter, reg_lo, mac_lo) || NXWR32(adapter, reg_hi, mac_hi)) + return -EIO; + if (NXWR32(adapter, reg_lo, mac_lo) || NXWR32(adapter, reg_hi, mac_hi)) + return -EIO; + + return 0; +} + +static int +netxen_nic_enable_mcast_filter(struct netxen_adapter *adapter) +{ + u32 val = 0; + u16 port = adapter->physical_port; + u8 *addr = adapter->mac_addr; + + if (adapter->mc_enabled) + return 0; + + val = NXRD32(adapter, NETXEN_MAC_ADDR_CNTL_REG); + val |= (1UL << (28+port)); + NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val); + + /* add broadcast addr to filter */ + val = 0xffffff; + NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0), val); + NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0)+4, val); + + /* add station addr to filter */ + val = MAC_HI(addr); + NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1), val); + val = MAC_LO(addr); + NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, val); + + adapter->mc_enabled = 1; + return 0; +} + +static int +netxen_nic_disable_mcast_filter(struct netxen_adapter *adapter) +{ + u32 val = 0; + u16 port = adapter->physical_port; + u8 *addr = adapter->mac_addr; + + if (!adapter->mc_enabled) + return 0; + + val = NXRD32(adapter, NETXEN_MAC_ADDR_CNTL_REG); + val &= ~(1UL << (28+port)); + NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val); + + val = MAC_HI(addr); + NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0), val); + val = MAC_LO(addr); + NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0)+4, val); + + NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1), 0); + NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, 0); + + adapter->mc_enabled = 0; + return 0; +} + +static int +netxen_nic_set_mcast_addr(struct netxen_adapter *adapter, + int index, u8 *addr) +{ + u32 hi = 0, lo = 0; + u16 port = adapter->physical_port; + + lo = MAC_LO(addr); + hi = MAC_HI(addr); + + NXWR32(adapter, NETXEN_MCAST_ADDR(port, index), hi); + NXWR32(adapter, NETXEN_MCAST_ADDR(port, index)+4, lo); + + return 0; +} + +static void netxen_p2_nic_set_multi(struct net_device *netdev) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + struct netdev_hw_addr *ha; + u8 null_addr[6]; + int i; + + memset(null_addr, 0, 6); + + if (netdev->flags & IFF_PROMISC) { + + adapter->set_promisc(adapter, + NETXEN_NIU_PROMISC_MODE); + + /* Full promiscuous mode */ + netxen_nic_disable_mcast_filter(adapter); + + return; + } + + if (netdev_mc_empty(netdev)) { + adapter->set_promisc(adapter, + NETXEN_NIU_NON_PROMISC_MODE); + netxen_nic_disable_mcast_filter(adapter); + return; + } + + adapter->set_promisc(adapter, NETXEN_NIU_ALLMULTI_MODE); + if (netdev->flags & IFF_ALLMULTI || + netdev_mc_count(netdev) > adapter->max_mc_count) { + netxen_nic_disable_mcast_filter(adapter); + return; + } + + netxen_nic_enable_mcast_filter(adapter); + + i = 0; + netdev_for_each_mc_addr(ha, netdev) + netxen_nic_set_mcast_addr(adapter, i++, ha->addr); + + /* Clear out remaining addresses */ + while (i < adapter->max_mc_count) + netxen_nic_set_mcast_addr(adapter, i++, null_addr); +} + +static int +netxen_send_cmd_descs(struct netxen_adapter *adapter, + struct cmd_desc_type0 *cmd_desc_arr, int nr_desc) +{ + u32 i, producer, consumer; + struct netxen_cmd_buffer *pbuf; + struct cmd_desc_type0 *cmd_desc; + struct nx_host_tx_ring *tx_ring; + + i = 0; + + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + return -EIO; + + tx_ring = adapter->tx_ring; + __netif_tx_lock_bh(tx_ring->txq); + + producer = tx_ring->producer; + consumer = tx_ring->sw_consumer; + + if (nr_desc >= netxen_tx_avail(tx_ring)) { + netif_tx_stop_queue(tx_ring->txq); + smp_mb(); + if (netxen_tx_avail(tx_ring) > nr_desc) { + if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) + netif_tx_wake_queue(tx_ring->txq); + } else { + __netif_tx_unlock_bh(tx_ring->txq); + return -EBUSY; + } + } + + do { + cmd_desc = &cmd_desc_arr[i]; + + pbuf = &tx_ring->cmd_buf_arr[producer]; + pbuf->skb = NULL; + pbuf->frag_count = 0; + + memcpy(&tx_ring->desc_head[producer], + &cmd_desc_arr[i], sizeof(struct cmd_desc_type0)); + + producer = get_next_index(producer, tx_ring->num_desc); + i++; + + } while (i != nr_desc); + + tx_ring->producer = producer; + + netxen_nic_update_cmd_producer(adapter, tx_ring); + + __netif_tx_unlock_bh(tx_ring->txq); + + return 0; +} + +static int +nx_p3_sre_macaddr_change(struct netxen_adapter *adapter, u8 *addr, unsigned op) +{ + nx_nic_req_t req; + nx_mac_req_t *mac_req; + u64 word; + + memset(&req, 0, sizeof(nx_nic_req_t)); + req.qhdr = cpu_to_le64(NX_NIC_REQUEST << 23); + + word = NX_MAC_EVENT | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + mac_req = (nx_mac_req_t *)&req.words[0]; + mac_req->op = op; + memcpy(mac_req->mac_addr, addr, 6); + + return netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); +} + +static int nx_p3_nic_add_mac(struct netxen_adapter *adapter, + const u8 *addr, struct list_head *del_list) +{ + struct list_head *head; + nx_mac_list_t *cur; + + /* look up if already exists */ + list_for_each(head, del_list) { + cur = list_entry(head, nx_mac_list_t, list); + + if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) { + list_move_tail(head, &adapter->mac_list); + return 0; + } + } + + cur = kzalloc(sizeof(nx_mac_list_t), GFP_ATOMIC); + if (cur == NULL) { + printk(KERN_ERR "%s: failed to add mac address filter\n", + adapter->netdev->name); + return -ENOMEM; + } + memcpy(cur->mac_addr, addr, ETH_ALEN); + list_add_tail(&cur->list, &adapter->mac_list); + return nx_p3_sre_macaddr_change(adapter, + cur->mac_addr, NETXEN_MAC_ADD); +} + +static void netxen_p3_nic_set_multi(struct net_device *netdev) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + struct netdev_hw_addr *ha; + static const u8 bcast_addr[ETH_ALEN] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + }; + u32 mode = VPORT_MISS_MODE_DROP; + LIST_HEAD(del_list); + struct list_head *head; + nx_mac_list_t *cur; + + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + return; + + list_splice_tail_init(&adapter->mac_list, &del_list); + + nx_p3_nic_add_mac(adapter, adapter->mac_addr, &del_list); + nx_p3_nic_add_mac(adapter, bcast_addr, &del_list); + + if (netdev->flags & IFF_PROMISC) { + mode = VPORT_MISS_MODE_ACCEPT_ALL; + goto send_fw_cmd; + } + + if ((netdev->flags & IFF_ALLMULTI) || + (netdev_mc_count(netdev) > adapter->max_mc_count)) { + mode = VPORT_MISS_MODE_ACCEPT_MULTI; + goto send_fw_cmd; + } + + if (!netdev_mc_empty(netdev)) { + netdev_for_each_mc_addr(ha, netdev) + nx_p3_nic_add_mac(adapter, ha->addr, &del_list); + } + +send_fw_cmd: + adapter->set_promisc(adapter, mode); + head = &del_list; + while (!list_empty(head)) { + cur = list_entry(head->next, nx_mac_list_t, list); + + nx_p3_sre_macaddr_change(adapter, + cur->mac_addr, NETXEN_MAC_DEL); + list_del(&cur->list); + kfree(cur); + } +} + +static int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32 mode) +{ + nx_nic_req_t req; + u64 word; + + memset(&req, 0, sizeof(nx_nic_req_t)); + + req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); + + word = NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE | + ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + req.words[0] = cpu_to_le64(mode); + + return netxen_send_cmd_descs(adapter, + (struct cmd_desc_type0 *)&req, 1); +} + +void netxen_p3_free_mac_list(struct netxen_adapter *adapter) +{ + nx_mac_list_t *cur; + struct list_head *head = &adapter->mac_list; + + while (!list_empty(head)) { + cur = list_entry(head->next, nx_mac_list_t, list); + nx_p3_sre_macaddr_change(adapter, + cur->mac_addr, NETXEN_MAC_DEL); + list_del(&cur->list); + kfree(cur); + } +} + +static int netxen_p3_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr) +{ + /* assuming caller has already copied new addr to netdev */ + netxen_p3_nic_set_multi(adapter->netdev); + return 0; +} + +#define NETXEN_CONFIG_INTR_COALESCE 3 + +/* + * Send the interrupt coalescing parameter set by ethtool to the card. + */ +int netxen_config_intr_coalesce(struct netxen_adapter *adapter) +{ + nx_nic_req_t req; + u64 word[6]; + int rv, i; + + memset(&req, 0, sizeof(nx_nic_req_t)); + memset(word, 0, sizeof(word)); + + req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); + + word[0] = NETXEN_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word[0]); + + memcpy(&word[0], &adapter->coal, sizeof(adapter->coal)); + for (i = 0; i < 6; i++) + req.words[i] = cpu_to_le64(word[i]); + + rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) { + printk(KERN_ERR "ERROR. Could not send " + "interrupt coalescing parameters\n"); + } + + return rv; +} + +int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable) +{ + nx_nic_req_t req; + u64 word; + int rv = 0; + + if (!test_bit(__NX_FW_ATTACHED, &adapter->state)) + return 0; + + memset(&req, 0, sizeof(nx_nic_req_t)); + + req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); + + word = NX_NIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + req.words[0] = cpu_to_le64(enable); + + rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) { + printk(KERN_ERR "ERROR. Could not send " + "configure hw lro request\n"); + } + + return rv; +} + +int netxen_config_bridged_mode(struct netxen_adapter *adapter, int enable) +{ + nx_nic_req_t req; + u64 word; + int rv = 0; + + if (!!(adapter->flags & NETXEN_NIC_BRIDGE_ENABLED) == enable) + return rv; + + memset(&req, 0, sizeof(nx_nic_req_t)); + + req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); + + word = NX_NIC_H2C_OPCODE_CONFIG_BRIDGING | + ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + req.words[0] = cpu_to_le64(enable); + + rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) { + printk(KERN_ERR "ERROR. Could not send " + "configure bridge mode request\n"); + } + + adapter->flags ^= NETXEN_NIC_BRIDGE_ENABLED; + + return rv; +} + + +#define RSS_HASHTYPE_IP_TCP 0x3 + +int netxen_config_rss(struct netxen_adapter *adapter, int enable) +{ + nx_nic_req_t req; + u64 word; + int i, rv; + + static const u64 key[] = { + 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, + 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, + 0x255b0ec26d5a56daULL + }; + + + memset(&req, 0, sizeof(nx_nic_req_t)); + req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); + + word = NX_NIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + /* + * RSS request: + * bits 3-0: hash_method + * 5-4: hash_type_ipv4 + * 7-6: hash_type_ipv6 + * 8: enable + * 9: use indirection table + * 47-10: reserved + * 63-48: indirection table mask + */ + word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) | + ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) | + ((u64)(enable & 0x1) << 8) | + ((0x7ULL) << 48); + req.words[0] = cpu_to_le64(word); + for (i = 0; i < ARRAY_SIZE(key); i++) + req.words[i+1] = cpu_to_le64(key[i]); + + + rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) { + printk(KERN_ERR "%s: could not configure RSS\n", + adapter->netdev->name); + } + + return rv; +} + +int netxen_config_ipaddr(struct netxen_adapter *adapter, u32 ip, int cmd) +{ + nx_nic_req_t req; + u64 word; + int rv; + + memset(&req, 0, sizeof(nx_nic_req_t)); + req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); + + word = NX_NIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + req.words[0] = cpu_to_le64(cmd); + req.words[1] = cpu_to_le64(ip); + + rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) { + printk(KERN_ERR "%s: could not notify %s IP 0x%x reuqest\n", + adapter->netdev->name, + (cmd == NX_IP_UP) ? "Add" : "Remove", ip); + } + return rv; +} + +int netxen_linkevent_request(struct netxen_adapter *adapter, int enable) +{ + nx_nic_req_t req; + u64 word; + int rv; + + memset(&req, 0, sizeof(nx_nic_req_t)); + req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); + + word = NX_NIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + req.words[0] = cpu_to_le64(enable | (enable << 8)); + + rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) { + printk(KERN_ERR "%s: could not configure link notification\n", + adapter->netdev->name); + } + + return rv; +} + +int netxen_send_lro_cleanup(struct netxen_adapter *adapter) +{ + nx_nic_req_t req; + u64 word; + int rv; + + if (!test_bit(__NX_FW_ATTACHED, &adapter->state)) + return 0; + + memset(&req, 0, sizeof(nx_nic_req_t)); + req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); + + word = NX_NIC_H2C_OPCODE_LRO_REQUEST | + ((u64)adapter->portnum << 16) | + ((u64)NX_NIC_LRO_REQUEST_CLEANUP << 56) ; + + req.req_hdr = cpu_to_le64(word); + + rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) { + printk(KERN_ERR "%s: could not cleanup lro flows\n", + adapter->netdev->name); + } + return rv; +} + +/* + * netxen_nic_change_mtu - Change the Maximum Transfer Unit + * @returns 0 on success, negative on failure + */ + +#define MTU_FUDGE_FACTOR 100 + +int netxen_nic_change_mtu(struct net_device *netdev, int mtu) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + int max_mtu; + int rc = 0; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + max_mtu = P3_MAX_MTU; + else + max_mtu = P2_MAX_MTU; + + if (mtu > max_mtu) { + printk(KERN_ERR "%s: mtu > %d bytes unsupported\n", + netdev->name, max_mtu); + return -EINVAL; + } + + if (adapter->set_mtu) + rc = adapter->set_mtu(adapter, mtu); + + if (!rc) + netdev->mtu = mtu; + + return rc; +} + +static int netxen_get_flash_block(struct netxen_adapter *adapter, int base, + int size, __le32 * buf) +{ + int i, v, addr; + __le32 *ptr32; + + addr = base; + ptr32 = buf; + for (i = 0; i < size / sizeof(u32); i++) { + if (netxen_rom_fast_read(adapter, addr, &v) == -1) + return -1; + *ptr32 = cpu_to_le32(v); + ptr32++; + addr += sizeof(u32); + } + if ((char *)buf + size > (char *)ptr32) { + __le32 local; + if (netxen_rom_fast_read(adapter, addr, &v) == -1) + return -1; + local = cpu_to_le32(v); + memcpy(ptr32, &local, (char *)buf + size - (char *)ptr32); + } + + return 0; +} + +int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac) +{ + __le32 *pmac = (__le32 *) mac; + u32 offset; + + offset = NX_FW_MAC_ADDR_OFFSET + (adapter->portnum * sizeof(u64)); + + if (netxen_get_flash_block(adapter, offset, sizeof(u64), pmac) == -1) + return -1; + + if (*mac == cpu_to_le64(~0ULL)) { + + offset = NX_OLD_MAC_ADDR_OFFSET + + (adapter->portnum * sizeof(u64)); + + if (netxen_get_flash_block(adapter, + offset, sizeof(u64), pmac) == -1) + return -1; + + if (*mac == cpu_to_le64(~0ULL)) + return -1; + } + return 0; +} + +int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac) +{ + uint32_t crbaddr, mac_hi, mac_lo; + int pci_func = adapter->ahw.pci_func; + + crbaddr = CRB_MAC_BLOCK_START + + (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1)); + + mac_lo = NXRD32(adapter, crbaddr); + mac_hi = NXRD32(adapter, crbaddr+4); + + if (pci_func & 1) + *mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16)); + else + *mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32)); + + return 0; +} + +/* + * Changes the CRB window to the specified window. + */ +static void +netxen_nic_pci_set_crbwindow_128M(struct netxen_adapter *adapter, + u32 window) +{ + void __iomem *offset; + int count = 10; + u8 func = adapter->ahw.pci_func; + + if (adapter->ahw.crb_win == window) + return; + + offset = PCI_OFFSET_SECOND_RANGE(adapter, + NETXEN_PCIX_PH_REG(PCIE_CRB_WINDOW_REG(func))); + + writel(window, offset); + do { + if (window == readl(offset)) + break; + + if (printk_ratelimit()) + dev_warn(&adapter->pdev->dev, + "failed to set CRB window to %d\n", + (window == NETXEN_WINDOW_ONE)); + udelay(1); + + } while (--count > 0); + + if (count > 0) + adapter->ahw.crb_win = window; +} + +/* + * Returns < 0 if off is not valid, + * 1 if window access is needed. 'off' is set to offset from + * CRB space in 128M pci map + * 0 if no window access is needed. 'off' is set to 2M addr + * In: 'off' is offset from base in 128M pci map + */ +static int +netxen_nic_pci_get_crb_addr_2M(struct netxen_adapter *adapter, + ulong off, void __iomem **addr) +{ + crb_128M_2M_sub_block_map_t *m; + + + if ((off >= NETXEN_CRB_MAX) || (off < NETXEN_PCI_CRBSPACE)) + return -EINVAL; + + off -= NETXEN_PCI_CRBSPACE; + + /* + * Try direct map + */ + m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)]; + + if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) { + *addr = adapter->ahw.pci_base0 + m->start_2M + + (off - m->start_128M); + return 0; + } + + /* + * Not in direct map, use crb window + */ + *addr = adapter->ahw.pci_base0 + CRB_INDIRECT_2M + + (off & MASK(16)); + return 1; +} + +/* + * In: 'off' is offset from CRB space in 128M pci map + * Out: 'off' is 2M pci map addr + * side effect: lock crb window + */ +static void +netxen_nic_pci_set_crbwindow_2M(struct netxen_adapter *adapter, ulong off) +{ + u32 window; + void __iomem *addr = adapter->ahw.pci_base0 + CRB_WINDOW_2M; + + off -= NETXEN_PCI_CRBSPACE; + + window = CRB_HI(off); + + writel(window, addr); + if (readl(addr) != window) { + if (printk_ratelimit()) + dev_warn(&adapter->pdev->dev, + "failed to set CRB window to %d off 0x%lx\n", + window, off); + } +} + +static void __iomem * +netxen_nic_map_indirect_address_128M(struct netxen_adapter *adapter, + ulong win_off, void __iomem **mem_ptr) +{ + ulong off = win_off; + void __iomem *addr; + resource_size_t mem_base; + + if (ADDR_IN_WINDOW1(win_off)) + off = NETXEN_CRB_NORMAL(win_off); + + addr = pci_base_offset(adapter, off); + if (addr) + return addr; + + if (adapter->ahw.pci_len0 == 0) + off -= NETXEN_PCI_CRBSPACE; + + mem_base = pci_resource_start(adapter->pdev, 0); + *mem_ptr = ioremap(mem_base + (off & PAGE_MASK), PAGE_SIZE); + if (*mem_ptr) + addr = *mem_ptr + (off & (PAGE_SIZE - 1)); + + return addr; +} + +static int +netxen_nic_hw_write_wx_128M(struct netxen_adapter *adapter, ulong off, u32 data) +{ + unsigned long flags; + void __iomem *addr, *mem_ptr = NULL; + + addr = netxen_nic_map_indirect_address_128M(adapter, off, &mem_ptr); + if (!addr) + return -EIO; + + if (ADDR_IN_WINDOW1(off)) { /* Window 1 */ + netxen_nic_io_write_128M(adapter, addr, data); + } else { /* Window 0 */ + write_lock_irqsave(&adapter->ahw.crb_lock, flags); + netxen_nic_pci_set_crbwindow_128M(adapter, 0); + writel(data, addr); + netxen_nic_pci_set_crbwindow_128M(adapter, + NETXEN_WINDOW_ONE); + write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); + } + + if (mem_ptr) + iounmap(mem_ptr); + + return 0; +} + +static u32 +netxen_nic_hw_read_wx_128M(struct netxen_adapter *adapter, ulong off) +{ + unsigned long flags; + void __iomem *addr, *mem_ptr = NULL; + u32 data; + + addr = netxen_nic_map_indirect_address_128M(adapter, off, &mem_ptr); + if (!addr) + return -EIO; + + if (ADDR_IN_WINDOW1(off)) { /* Window 1 */ + data = netxen_nic_io_read_128M(adapter, addr); + } else { /* Window 0 */ + write_lock_irqsave(&adapter->ahw.crb_lock, flags); + netxen_nic_pci_set_crbwindow_128M(adapter, 0); + data = readl(addr); + netxen_nic_pci_set_crbwindow_128M(adapter, + NETXEN_WINDOW_ONE); + write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); + } + + if (mem_ptr) + iounmap(mem_ptr); + + return data; +} + +static int +netxen_nic_hw_write_wx_2M(struct netxen_adapter *adapter, ulong off, u32 data) +{ + unsigned long flags; + int rv; + void __iomem *addr = NULL; + + rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr); + + if (rv == 0) { + writel(data, addr); + return 0; + } + + if (rv > 0) { + /* indirect access */ + write_lock_irqsave(&adapter->ahw.crb_lock, flags); + crb_win_lock(adapter); + netxen_nic_pci_set_crbwindow_2M(adapter, off); + writel(data, addr); + crb_win_unlock(adapter); + write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); + return 0; + } + + dev_err(&adapter->pdev->dev, + "%s: invalid offset: 0x%016lx\n", __func__, off); + dump_stack(); + return -EIO; +} + +static u32 +netxen_nic_hw_read_wx_2M(struct netxen_adapter *adapter, ulong off) +{ + unsigned long flags; + int rv; + u32 data; + void __iomem *addr = NULL; + + rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr); + + if (rv == 0) + return readl(addr); + + if (rv > 0) { + /* indirect access */ + write_lock_irqsave(&adapter->ahw.crb_lock, flags); + crb_win_lock(adapter); + netxen_nic_pci_set_crbwindow_2M(adapter, off); + data = readl(addr); + crb_win_unlock(adapter); + write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); + return data; + } + + dev_err(&adapter->pdev->dev, + "%s: invalid offset: 0x%016lx\n", __func__, off); + dump_stack(); + return -1; +} + +/* window 1 registers only */ +static void netxen_nic_io_write_128M(struct netxen_adapter *adapter, + void __iomem *addr, u32 data) +{ + read_lock(&adapter->ahw.crb_lock); + writel(data, addr); + read_unlock(&adapter->ahw.crb_lock); +} + +static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter, + void __iomem *addr) +{ + u32 val; + + read_lock(&adapter->ahw.crb_lock); + val = readl(addr); + read_unlock(&adapter->ahw.crb_lock); + + return val; +} + +static void netxen_nic_io_write_2M(struct netxen_adapter *adapter, + void __iomem *addr, u32 data) +{ + writel(data, addr); +} + +static u32 netxen_nic_io_read_2M(struct netxen_adapter *adapter, + void __iomem *addr) +{ + return readl(addr); +} + +void __iomem * +netxen_get_ioaddr(struct netxen_adapter *adapter, u32 offset) +{ + void __iomem *addr = NULL; + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + if ((offset < NETXEN_CRB_PCIX_HOST2) && + (offset > NETXEN_CRB_PCIX_HOST)) + addr = PCI_OFFSET_SECOND_RANGE(adapter, offset); + else + addr = NETXEN_CRB_NORMALIZE(adapter, offset); + } else { + WARN_ON(netxen_nic_pci_get_crb_addr_2M(adapter, + offset, &addr)); + } + + return addr; +} + +static int +netxen_nic_pci_set_window_128M(struct netxen_adapter *adapter, + u64 addr, u32 *start) +{ + if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) { + *start = (addr - NETXEN_ADDR_OCM0 + NETXEN_PCI_OCM0); + return 0; + } else if (ADDR_IN_RANGE(addr, + NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) { + *start = (addr - NETXEN_ADDR_OCM1 + NETXEN_PCI_OCM1); + return 0; + } + + return -EIO; +} + +static int +netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter, + u64 addr, u32 *start) +{ + u32 window; + + window = OCM_WIN(addr); + + writel(window, adapter->ahw.ocm_win_crb); + /* read back to flush */ + readl(adapter->ahw.ocm_win_crb); + + adapter->ahw.ocm_win = window; + *start = NETXEN_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr); + return 0; +} + +static int +netxen_nic_pci_mem_access_direct(struct netxen_adapter *adapter, u64 off, + u64 *data, int op) +{ + void __iomem *addr, *mem_ptr = NULL; + resource_size_t mem_base; + int ret; + u32 start; + + spin_lock(&adapter->ahw.mem_lock); + + ret = adapter->pci_set_window(adapter, off, &start); + if (ret != 0) + goto unlock; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + addr = adapter->ahw.pci_base0 + start; + } else { + addr = pci_base_offset(adapter, start); + if (addr) + goto noremap; + + mem_base = pci_resource_start(adapter->pdev, 0) + + (start & PAGE_MASK); + mem_ptr = ioremap(mem_base, PAGE_SIZE); + if (mem_ptr == NULL) { + ret = -EIO; + goto unlock; + } + + addr = mem_ptr + (start & (PAGE_SIZE-1)); + } +noremap: + if (op == 0) /* read */ + *data = readq(addr); + else /* write */ + writeq(*data, addr); + +unlock: + spin_unlock(&adapter->ahw.mem_lock); + + if (mem_ptr) + iounmap(mem_ptr); + return ret; +} + +void +netxen_pci_camqm_read_2M(struct netxen_adapter *adapter, u64 off, u64 *data) +{ + void __iomem *addr = adapter->ahw.pci_base0 + + NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM); + + spin_lock(&adapter->ahw.mem_lock); + *data = readq(addr); + spin_unlock(&adapter->ahw.mem_lock); +} + +void +netxen_pci_camqm_write_2M(struct netxen_adapter *adapter, u64 off, u64 data) +{ + void __iomem *addr = adapter->ahw.pci_base0 + + NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM); + + spin_lock(&adapter->ahw.mem_lock); + writeq(data, addr); + spin_unlock(&adapter->ahw.mem_lock); +} + +#define MAX_CTL_CHECK 1000 + +static int +netxen_nic_pci_mem_write_128M(struct netxen_adapter *adapter, + u64 off, u64 data) +{ + int j, ret; + u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo; + void __iomem *mem_crb; + + /* Only 64-bit aligned access */ + if (off & 7) + return -EIO; + + /* P2 has different SIU and MIU test agent base addr */ + if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, + NETXEN_ADDR_QDR_NET_MAX_P2)) { + mem_crb = pci_base_offset(adapter, + NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE); + addr_hi = SIU_TEST_AGT_ADDR_HI; + data_lo = SIU_TEST_AGT_WRDATA_LO; + data_hi = SIU_TEST_AGT_WRDATA_HI; + off_lo = off & SIU_TEST_AGT_ADDR_MASK; + off_hi = SIU_TEST_AGT_UPPER_ADDR(off); + goto correct; + } + + if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { + mem_crb = pci_base_offset(adapter, + NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); + addr_hi = MIU_TEST_AGT_ADDR_HI; + data_lo = MIU_TEST_AGT_WRDATA_LO; + data_hi = MIU_TEST_AGT_WRDATA_HI; + off_lo = off & MIU_TEST_AGT_ADDR_MASK; + off_hi = 0; + goto correct; + } + + if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) || + ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) { + if (adapter->ahw.pci_len0 != 0) { + return netxen_nic_pci_mem_access_direct(adapter, + off, &data, 1); + } + } + + return -EIO; + +correct: + spin_lock(&adapter->ahw.mem_lock); + netxen_nic_pci_set_crbwindow_128M(adapter, 0); + + writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO)); + writel(off_hi, (mem_crb + addr_hi)); + writel(data & 0xffffffff, (mem_crb + data_lo)); + writel((data >> 32) & 0xffffffff, (mem_crb + data_hi)); + writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL)); + writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE), + (mem_crb + TEST_AGT_CTRL)); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + temp = readl((mem_crb + TEST_AGT_CTRL)); + if ((temp & TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + if (printk_ratelimit()) + dev_err(&adapter->pdev->dev, + "failed to write through agent\n"); + ret = -EIO; + } else + ret = 0; + + netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE); + spin_unlock(&adapter->ahw.mem_lock); + return ret; +} + +static int +netxen_nic_pci_mem_read_128M(struct netxen_adapter *adapter, + u64 off, u64 *data) +{ + int j, ret; + u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo; + u64 val; + void __iomem *mem_crb; + + /* Only 64-bit aligned access */ + if (off & 7) + return -EIO; + + /* P2 has different SIU and MIU test agent base addr */ + if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, + NETXEN_ADDR_QDR_NET_MAX_P2)) { + mem_crb = pci_base_offset(adapter, + NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE); + addr_hi = SIU_TEST_AGT_ADDR_HI; + data_lo = SIU_TEST_AGT_RDDATA_LO; + data_hi = SIU_TEST_AGT_RDDATA_HI; + off_lo = off & SIU_TEST_AGT_ADDR_MASK; + off_hi = SIU_TEST_AGT_UPPER_ADDR(off); + goto correct; + } + + if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { + mem_crb = pci_base_offset(adapter, + NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); + addr_hi = MIU_TEST_AGT_ADDR_HI; + data_lo = MIU_TEST_AGT_RDDATA_LO; + data_hi = MIU_TEST_AGT_RDDATA_HI; + off_lo = off & MIU_TEST_AGT_ADDR_MASK; + off_hi = 0; + goto correct; + } + + if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) || + ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) { + if (adapter->ahw.pci_len0 != 0) { + return netxen_nic_pci_mem_access_direct(adapter, + off, data, 0); + } + } + + return -EIO; + +correct: + spin_lock(&adapter->ahw.mem_lock); + netxen_nic_pci_set_crbwindow_128M(adapter, 0); + + writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO)); + writel(off_hi, (mem_crb + addr_hi)); + writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); + writel((TA_CTL_START|TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL)); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + temp = readl(mem_crb + TEST_AGT_CTRL); + if ((temp & TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + if (printk_ratelimit()) + dev_err(&adapter->pdev->dev, + "failed to read through agent\n"); + ret = -EIO; + } else { + + temp = readl(mem_crb + data_hi); + val = ((u64)temp << 32); + val |= readl(mem_crb + data_lo); + *data = val; + ret = 0; + } + + netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE); + spin_unlock(&adapter->ahw.mem_lock); + + return ret; +} + +static int +netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter, + u64 off, u64 data) +{ + int j, ret; + u32 temp, off8; + void __iomem *mem_crb; + + /* Only 64-bit aligned access */ + if (off & 7) + return -EIO; + + /* P3 onward, test agent base for MIU and SIU is same */ + if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, + NETXEN_ADDR_QDR_NET_MAX_P3)) { + mem_crb = netxen_get_ioaddr(adapter, + NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE); + goto correct; + } + + if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { + mem_crb = netxen_get_ioaddr(adapter, + NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); + goto correct; + } + + if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) + return netxen_nic_pci_mem_access_direct(adapter, off, &data, 1); + + return -EIO; + +correct: + off8 = off & 0xfffffff8; + + spin_lock(&adapter->ahw.mem_lock); + + writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); + writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); + + writel(data & 0xffffffff, + mem_crb + MIU_TEST_AGT_WRDATA_LO); + writel((data >> 32) & 0xffffffff, + mem_crb + MIU_TEST_AGT_WRDATA_HI); + + writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL)); + writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE), + (mem_crb + TEST_AGT_CTRL)); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + temp = readl(mem_crb + TEST_AGT_CTRL); + if ((temp & TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + if (printk_ratelimit()) + dev_err(&adapter->pdev->dev, + "failed to write through agent\n"); + ret = -EIO; + } else + ret = 0; + + spin_unlock(&adapter->ahw.mem_lock); + + return ret; +} + +static int +netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter, + u64 off, u64 *data) +{ + int j, ret; + u32 temp, off8; + u64 val; + void __iomem *mem_crb; + + /* Only 64-bit aligned access */ + if (off & 7) + return -EIO; + + /* P3 onward, test agent base for MIU and SIU is same */ + if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, + NETXEN_ADDR_QDR_NET_MAX_P3)) { + mem_crb = netxen_get_ioaddr(adapter, + NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE); + goto correct; + } + + if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { + mem_crb = netxen_get_ioaddr(adapter, + NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); + goto correct; + } + + if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) { + return netxen_nic_pci_mem_access_direct(adapter, + off, data, 0); + } + + return -EIO; + +correct: + off8 = off & 0xfffffff8; + + spin_lock(&adapter->ahw.mem_lock); + + writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); + writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); + writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); + writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL)); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + temp = readl(mem_crb + TEST_AGT_CTRL); + if ((temp & TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + if (printk_ratelimit()) + dev_err(&adapter->pdev->dev, + "failed to read through agent\n"); + ret = -EIO; + } else { + val = (u64)(readl(mem_crb + MIU_TEST_AGT_RDDATA_HI)) << 32; + val |= readl(mem_crb + MIU_TEST_AGT_RDDATA_LO); + *data = val; + ret = 0; + } + + spin_unlock(&adapter->ahw.mem_lock); + + return ret; +} + +void +netxen_setup_hwops(struct netxen_adapter *adapter) +{ + adapter->init_port = netxen_niu_xg_init_port; + adapter->stop_port = netxen_niu_disable_xg_port; + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + adapter->crb_read = netxen_nic_hw_read_wx_128M, + adapter->crb_write = netxen_nic_hw_write_wx_128M, + adapter->pci_set_window = netxen_nic_pci_set_window_128M, + adapter->pci_mem_read = netxen_nic_pci_mem_read_128M, + adapter->pci_mem_write = netxen_nic_pci_mem_write_128M, + adapter->io_read = netxen_nic_io_read_128M, + adapter->io_write = netxen_nic_io_write_128M, + + adapter->macaddr_set = netxen_p2_nic_set_mac_addr; + adapter->set_multi = netxen_p2_nic_set_multi; + adapter->set_mtu = netxen_nic_set_mtu_xgb; + adapter->set_promisc = netxen_p2_nic_set_promisc; + + } else { + adapter->crb_read = netxen_nic_hw_read_wx_2M, + adapter->crb_write = netxen_nic_hw_write_wx_2M, + adapter->pci_set_window = netxen_nic_pci_set_window_2M, + adapter->pci_mem_read = netxen_nic_pci_mem_read_2M, + adapter->pci_mem_write = netxen_nic_pci_mem_write_2M, + adapter->io_read = netxen_nic_io_read_2M, + adapter->io_write = netxen_nic_io_write_2M, + + adapter->set_mtu = nx_fw_cmd_set_mtu; + adapter->set_promisc = netxen_p3_nic_set_promisc; + adapter->macaddr_set = netxen_p3_nic_set_mac_addr; + adapter->set_multi = netxen_p3_nic_set_multi; + + adapter->phy_read = nx_fw_cmd_query_phy; + adapter->phy_write = nx_fw_cmd_set_phy; + } +} + +int netxen_nic_get_board_info(struct netxen_adapter *adapter) +{ + int offset, board_type, magic; + struct pci_dev *pdev = adapter->pdev; + + offset = NX_FW_MAGIC_OFFSET; + if (netxen_rom_fast_read(adapter, offset, &magic)) + return -EIO; + + if (magic != NETXEN_BDINFO_MAGIC) { + dev_err(&pdev->dev, "invalid board config, magic=%08x\n", + magic); + return -EIO; + } + + offset = NX_BRDTYPE_OFFSET; + if (netxen_rom_fast_read(adapter, offset, &board_type)) + return -EIO; + + if (board_type == NETXEN_BRDTYPE_P3_4_GB_MM) { + u32 gpio = NXRD32(adapter, NETXEN_ROMUSB_GLB_PAD_GPIO_I); + if ((gpio & 0x8000) == 0) + board_type = NETXEN_BRDTYPE_P3_10G_TP; + } + + adapter->ahw.board_type = board_type; + + switch (board_type) { + case NETXEN_BRDTYPE_P2_SB35_4G: + adapter->ahw.port_type = NETXEN_NIC_GBE; + break; + case NETXEN_BRDTYPE_P2_SB31_10G: + case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: + case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: + case NETXEN_BRDTYPE_P2_SB31_10G_CX4: + case NETXEN_BRDTYPE_P3_HMEZ: + case NETXEN_BRDTYPE_P3_XG_LOM: + case NETXEN_BRDTYPE_P3_10G_CX4: + case NETXEN_BRDTYPE_P3_10G_CX4_LP: + case NETXEN_BRDTYPE_P3_IMEZ: + case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: + case NETXEN_BRDTYPE_P3_10G_SFP_CT: + case NETXEN_BRDTYPE_P3_10G_SFP_QT: + case NETXEN_BRDTYPE_P3_10G_XFP: + case NETXEN_BRDTYPE_P3_10000_BASE_T: + adapter->ahw.port_type = NETXEN_NIC_XGBE; + break; + case NETXEN_BRDTYPE_P1_BD: + case NETXEN_BRDTYPE_P1_SB: + case NETXEN_BRDTYPE_P1_SMAX: + case NETXEN_BRDTYPE_P1_SOCK: + case NETXEN_BRDTYPE_P3_REF_QG: + case NETXEN_BRDTYPE_P3_4_GB: + case NETXEN_BRDTYPE_P3_4_GB_MM: + adapter->ahw.port_type = NETXEN_NIC_GBE; + break; + case NETXEN_BRDTYPE_P3_10G_TP: + adapter->ahw.port_type = (adapter->portnum < 2) ? + NETXEN_NIC_XGBE : NETXEN_NIC_GBE; + break; + default: + dev_err(&pdev->dev, "unknown board type %x\n", board_type); + adapter->ahw.port_type = NETXEN_NIC_XGBE; + break; + } + + return 0; +} + +/* NIU access sections */ +static int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu) +{ + new_mtu += MTU_FUDGE_FACTOR; + if (adapter->physical_port == 0) + NXWR32(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE, new_mtu); + else + NXWR32(adapter, NETXEN_NIU_XG1_MAX_FRAME_SIZE, new_mtu); + return 0; +} + +void netxen_nic_set_link_parameters(struct netxen_adapter *adapter) +{ + __u32 status; + __u32 autoneg; + __u32 port_mode; + + if (!netif_carrier_ok(adapter->netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = -1; + adapter->link_autoneg = AUTONEG_ENABLE; + return; + } + + if (adapter->ahw.port_type == NETXEN_NIC_GBE) { + port_mode = NXRD32(adapter, NETXEN_PORT_MODE_ADDR); + if (port_mode == NETXEN_PORT_MODE_802_3_AP) { + adapter->link_speed = SPEED_1000; + adapter->link_duplex = DUPLEX_FULL; + adapter->link_autoneg = AUTONEG_DISABLE; + return; + } + + if (adapter->phy_read && + adapter->phy_read(adapter, + NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, + &status) == 0) { + if (netxen_get_phy_link(status)) { + switch (netxen_get_phy_speed(status)) { + case 0: + adapter->link_speed = SPEED_10; + break; + case 1: + adapter->link_speed = SPEED_100; + break; + case 2: + adapter->link_speed = SPEED_1000; + break; + default: + adapter->link_speed = 0; + break; + } + switch (netxen_get_phy_duplex(status)) { + case 0: + adapter->link_duplex = DUPLEX_HALF; + break; + case 1: + adapter->link_duplex = DUPLEX_FULL; + break; + default: + adapter->link_duplex = -1; + break; + } + if (adapter->phy_read && + adapter->phy_read(adapter, + NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG, + &autoneg) != 0) + adapter->link_autoneg = autoneg; + } else + goto link_down; + } else { + link_down: + adapter->link_speed = 0; + adapter->link_duplex = -1; + } + } +} + +int +netxen_nic_wol_supported(struct netxen_adapter *adapter) +{ + u32 wol_cfg; + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return 0; + + wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV); + if (wol_cfg & (1UL << adapter->portnum)) { + wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG); + if (wol_cfg & (1 << adapter->portnum)) + return 1; + } + + return 0; +} diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h new file mode 100644 index 0000000..e2c5b6f --- /dev/null +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h @@ -0,0 +1,287 @@ +/* + * Copyright (C) 2003 - 2009 NetXen, Inc. + * Copyright (C) 2009 - QLogic Corporation. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, + * MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution + * in the file called "COPYING". + * + */ + +#ifndef __NETXEN_NIC_HW_H_ +#define __NETXEN_NIC_HW_H_ + +/* Hardware memory size of 128 meg */ +#define NETXEN_MEMADDR_MAX (128 * 1024 * 1024) + +struct netxen_adapter; + +#define NETXEN_PCI_MAPSIZE_BYTES (NETXEN_PCI_MAPSIZE << 20) + +void netxen_nic_set_link_parameters(struct netxen_adapter *adapter); + +/* Nibble or Byte mode for phy interface (GbE mode only) */ + +#define _netxen_crb_get_bit(var, bit) ((var >> bit) & 0x1) + +/* + * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3) + * + * Bit 0 : enable_tx => 1:enable frame xmit, 0:disable + * Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream + * Bit 2 : enable_rx => 1:enable frame recv, 0:disable + * Bit 3 : rx_synced => R/O: recv enable synched to recv stream + * Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable + * Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore + * Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal + * Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op + * Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op + * Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op + * Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op + * Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op + */ + +#define netxen_gb_tx_flowctl(config_word) \ + ((config_word) |= 1 << 4) +#define netxen_gb_rx_flowctl(config_word) \ + ((config_word) |= 1 << 5) +#define netxen_gb_tx_reset_pb(config_word) \ + ((config_word) |= 1 << 16) +#define netxen_gb_rx_reset_pb(config_word) \ + ((config_word) |= 1 << 17) +#define netxen_gb_tx_reset_mac(config_word) \ + ((config_word) |= 1 << 18) +#define netxen_gb_rx_reset_mac(config_word) \ + ((config_word) |= 1 << 19) + +#define netxen_gb_unset_tx_flowctl(config_word) \ + ((config_word) &= ~(1 << 4)) +#define netxen_gb_unset_rx_flowctl(config_word) \ + ((config_word) &= ~(1 << 5)) + +#define netxen_gb_get_tx_synced(config_word) \ + _netxen_crb_get_bit((config_word), 1) +#define netxen_gb_get_rx_synced(config_word) \ + _netxen_crb_get_bit((config_word), 3) +#define netxen_gb_get_tx_flowctl(config_word) \ + _netxen_crb_get_bit((config_word), 4) +#define netxen_gb_get_rx_flowctl(config_word) \ + _netxen_crb_get_bit((config_word), 5) +#define netxen_gb_get_soft_reset(config_word) \ + _netxen_crb_get_bit((config_word), 31) + +#define netxen_gb_get_stationaddress_low(config_word) ((config_word) >> 16) + +#define netxen_gb_set_mii_mgmt_clockselect(config_word, val) \ + ((config_word) |= ((val) & 0x07)) +#define netxen_gb_mii_mgmt_reset(config_word) \ + ((config_word) |= 1 << 31) +#define netxen_gb_mii_mgmt_unset(config_word) \ + ((config_word) &= ~(1 << 31)) + +/* + * NIU GB MII Mgmt Command Register (applies to GB0, GB1, GB2, GB3) + * Bit 0 : read_cycle => 1:perform single read cycle, 0:no-op + * Bit 1 : scan_cycle => 1:perform continuous read cycles, 0:no-op + */ + +#define netxen_gb_mii_mgmt_set_read_cycle(config_word) \ + ((config_word) |= 1 << 0) +#define netxen_gb_mii_mgmt_reg_addr(config_word, val) \ + ((config_word) |= ((val) & 0x1F)) +#define netxen_gb_mii_mgmt_phy_addr(config_word, val) \ + ((config_word) |= (((val) & 0x1F) << 8)) + +/* + * NIU GB MII Mgmt Indicators Register (applies to GB0, GB1, GB2, GB3) + * Read-only register. + * Bit 0 : busy => 1:performing an MII mgmt cycle, 0:idle + * Bit 1 : scanning => 1:scan operation in progress, 0:idle + * Bit 2 : notvalid => :mgmt result data not yet valid, 0:idle + */ +#define netxen_get_gb_mii_mgmt_busy(config_word) \ + _netxen_crb_get_bit(config_word, 0) +#define netxen_get_gb_mii_mgmt_scanning(config_word) \ + _netxen_crb_get_bit(config_word, 1) +#define netxen_get_gb_mii_mgmt_notvalid(config_word) \ + _netxen_crb_get_bit(config_word, 2) +/* + * NIU XG Pause Ctl Register + * + * Bit 0 : xg0_mask => 1:disable tx pause frames + * Bit 1 : xg0_request => 1:request single pause frame + * Bit 2 : xg0_on_off => 1:request is pause on, 0:off + * Bit 3 : xg1_mask => 1:disable tx pause frames + * Bit 4 : xg1_request => 1:request single pause frame + * Bit 5 : xg1_on_off => 1:request is pause on, 0:off + */ + +#define netxen_xg_set_xg0_mask(config_word) \ + ((config_word) |= 1 << 0) +#define netxen_xg_set_xg1_mask(config_word) \ + ((config_word) |= 1 << 3) + +#define netxen_xg_get_xg0_mask(config_word) \ + _netxen_crb_get_bit((config_word), 0) +#define netxen_xg_get_xg1_mask(config_word) \ + _netxen_crb_get_bit((config_word), 3) + +#define netxen_xg_unset_xg0_mask(config_word) \ + ((config_word) &= ~(1 << 0)) +#define netxen_xg_unset_xg1_mask(config_word) \ + ((config_word) &= ~(1 << 3)) + +/* + * NIU XG Pause Ctl Register + * + * Bit 0 : xg0_mask => 1:disable tx pause frames + * Bit 1 : xg0_request => 1:request single pause frame + * Bit 2 : xg0_on_off => 1:request is pause on, 0:off + * Bit 3 : xg1_mask => 1:disable tx pause frames + * Bit 4 : xg1_request => 1:request single pause frame + * Bit 5 : xg1_on_off => 1:request is pause on, 0:off + */ +#define netxen_gb_set_gb0_mask(config_word) \ + ((config_word) |= 1 << 0) +#define netxen_gb_set_gb1_mask(config_word) \ + ((config_word) |= 1 << 2) +#define netxen_gb_set_gb2_mask(config_word) \ + ((config_word) |= 1 << 4) +#define netxen_gb_set_gb3_mask(config_word) \ + ((config_word) |= 1 << 6) + +#define netxen_gb_get_gb0_mask(config_word) \ + _netxen_crb_get_bit((config_word), 0) +#define netxen_gb_get_gb1_mask(config_word) \ + _netxen_crb_get_bit((config_word), 2) +#define netxen_gb_get_gb2_mask(config_word) \ + _netxen_crb_get_bit((config_word), 4) +#define netxen_gb_get_gb3_mask(config_word) \ + _netxen_crb_get_bit((config_word), 6) + +#define netxen_gb_unset_gb0_mask(config_word) \ + ((config_word) &= ~(1 << 0)) +#define netxen_gb_unset_gb1_mask(config_word) \ + ((config_word) &= ~(1 << 2)) +#define netxen_gb_unset_gb2_mask(config_word) \ + ((config_word) &= ~(1 << 4)) +#define netxen_gb_unset_gb3_mask(config_word) \ + ((config_word) &= ~(1 << 6)) + + +/* + * PHY-Specific MII control/status registers. + */ +#define NETXEN_NIU_GB_MII_MGMT_ADDR_CONTROL 0 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_STATUS 1 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_0 2 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_1 3 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG 4 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART 5 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG_MORE 6 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_NEXTPAGE_XMIT 7 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART_NEXTPAGE 8 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_CONTROL 9 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_STATUS 10 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_EXTENDED_STATUS 15 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL 16 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS 17 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_INT_ENABLE 18 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_INT_STATUS 19 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE 20 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_RECV_ERROR_COUNT 21 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_LED_CONTROL 24 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_LED_OVERRIDE 25 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE_YET 26 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS_MORE 27 + +/* + * PHY-Specific Status Register (reg 17). + * + * Bit 0 : jabber => 1:jabber detected, 0:not + * Bit 1 : polarity => 1:polarity reversed, 0:normal + * Bit 2 : recvpause => 1:receive pause enabled, 0:disabled + * Bit 3 : xmitpause => 1:transmit pause enabled, 0:disabled + * Bit 4 : energydetect => 1:sleep, 0:active + * Bit 5 : downshift => 1:downshift, 0:no downshift + * Bit 6 : crossover => 1:MDIX (crossover), 0:MDI (no crossover) + * Bits 7-9 : cablelen => not valid in 10Mb/s mode + * 0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m + * Bit 10 : link => 1:link up, 0:link down + * Bit 11 : resolved => 1:speed and duplex resolved, 0:not yet + * Bit 12 : pagercvd => 1:page received, 0:page not received + * Bit 13 : duplex => 1:full duplex, 0:half duplex + * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd + */ + +#define netxen_get_phy_speed(config_word) (((config_word) >> 14) & 0x03) + +#define netxen_set_phy_speed(config_word, val) \ + ((config_word) |= ((val & 0x03) << 14)) +#define netxen_set_phy_duplex(config_word) \ + ((config_word) |= 1 << 13) +#define netxen_clear_phy_duplex(config_word) \ + ((config_word) &= ~(1 << 13)) + +#define netxen_get_phy_link(config_word) \ + _netxen_crb_get_bit(config_word, 10) +#define netxen_get_phy_duplex(config_word) \ + _netxen_crb_get_bit(config_word, 13) + +/* + * NIU Mode Register. + * Bit 0 : enable FibreChannel + * Bit 1 : enable 10/100/1000 Ethernet + * Bit 2 : enable 10Gb Ethernet + */ + +#define netxen_get_niu_enable_ge(config_word) \ + _netxen_crb_get_bit(config_word, 1) + +#define NETXEN_NIU_NON_PROMISC_MODE 0 +#define NETXEN_NIU_PROMISC_MODE 1 +#define NETXEN_NIU_ALLMULTI_MODE 2 + +/* + * NIU XG MAC Config Register + * + * Bit 0 : tx_enable => 1:enable frame xmit, 0:disable + * Bit 2 : rx_enable => 1:enable frame recv, 0:disable + * Bit 4 : soft_reset => 1:reset the MAC , 0:no-op + * Bit 27: xaui_framer_reset + * Bit 28: xaui_rx_reset + * Bit 29: xaui_tx_reset + * Bit 30: xg_ingress_afifo_reset + * Bit 31: xg_egress_afifo_reset + */ + +#define netxen_xg_soft_reset(config_word) \ + ((config_word) |= 1 << 4) + +typedef struct { + unsigned valid; + unsigned start_128M; + unsigned end_128M; + unsigned start_2M; +} crb_128M_2M_sub_block_map_t; + +typedef struct { + crb_128M_2M_sub_block_map_t sub_block[16]; +} crb_128M_2M_block_map_t; + +#endif /* __NETXEN_NIC_HW_H_ */ diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c new file mode 100644 index 0000000..d6c6357 --- /dev/null +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c @@ -0,0 +1,1949 @@ +/* + * Copyright (C) 2003 - 2009 NetXen, Inc. + * Copyright (C) 2009 - QLogic Corporation. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, + * MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution + * in the file called "COPYING". + * + */ + +#include +#include +#include +#include +#include "netxen_nic.h" +#include "netxen_nic_hw.h" + +struct crb_addr_pair { + u32 addr; + u32 data; +}; + +#define NETXEN_MAX_CRB_XFORM 60 +static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM]; +#define NETXEN_ADDR_ERROR (0xffffffff) + +#define crb_addr_transform(name) \ + crb_addr_xform[NETXEN_HW_PX_MAP_CRB_##name] = \ + NETXEN_HW_CRB_HUB_AGT_ADR_##name << 20 + +#define NETXEN_NIC_XDMA_RESET 0x8000ff + +static void +netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, + struct nx_host_rds_ring *rds_ring); +static int netxen_p3_has_mn(struct netxen_adapter *adapter); + +static void crb_addr_transform_setup(void) +{ + crb_addr_transform(XDMA); + crb_addr_transform(TIMR); + crb_addr_transform(SRE); + crb_addr_transform(SQN3); + crb_addr_transform(SQN2); + crb_addr_transform(SQN1); + crb_addr_transform(SQN0); + crb_addr_transform(SQS3); + crb_addr_transform(SQS2); + crb_addr_transform(SQS1); + crb_addr_transform(SQS0); + crb_addr_transform(RPMX7); + crb_addr_transform(RPMX6); + crb_addr_transform(RPMX5); + crb_addr_transform(RPMX4); + crb_addr_transform(RPMX3); + crb_addr_transform(RPMX2); + crb_addr_transform(RPMX1); + crb_addr_transform(RPMX0); + crb_addr_transform(ROMUSB); + crb_addr_transform(SN); + crb_addr_transform(QMN); + crb_addr_transform(QMS); + crb_addr_transform(PGNI); + crb_addr_transform(PGND); + crb_addr_transform(PGN3); + crb_addr_transform(PGN2); + crb_addr_transform(PGN1); + crb_addr_transform(PGN0); + crb_addr_transform(PGSI); + crb_addr_transform(PGSD); + crb_addr_transform(PGS3); + crb_addr_transform(PGS2); + crb_addr_transform(PGS1); + crb_addr_transform(PGS0); + crb_addr_transform(PS); + crb_addr_transform(PH); + crb_addr_transform(NIU); + crb_addr_transform(I2Q); + crb_addr_transform(EG); + crb_addr_transform(MN); + crb_addr_transform(MS); + crb_addr_transform(CAS2); + crb_addr_transform(CAS1); + crb_addr_transform(CAS0); + crb_addr_transform(CAM); + crb_addr_transform(C2C1); + crb_addr_transform(C2C0); + crb_addr_transform(SMB); + crb_addr_transform(OCM0); + crb_addr_transform(I2C0); +} + +void netxen_release_rx_buffers(struct netxen_adapter *adapter) +{ + struct netxen_recv_context *recv_ctx; + struct nx_host_rds_ring *rds_ring; + struct netxen_rx_buffer *rx_buf; + int i, ring; + + recv_ctx = &adapter->recv_ctx; + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + for (i = 0; i < rds_ring->num_desc; ++i) { + rx_buf = &(rds_ring->rx_buf_arr[i]); + if (rx_buf->state == NETXEN_BUFFER_FREE) + continue; + pci_unmap_single(adapter->pdev, + rx_buf->dma, + rds_ring->dma_size, + PCI_DMA_FROMDEVICE); + if (rx_buf->skb != NULL) + dev_kfree_skb_any(rx_buf->skb); + } + } +} + +void netxen_release_tx_buffers(struct netxen_adapter *adapter) +{ + struct netxen_cmd_buffer *cmd_buf; + struct netxen_skb_frag *buffrag; + int i, j; + struct nx_host_tx_ring *tx_ring = adapter->tx_ring; + + cmd_buf = tx_ring->cmd_buf_arr; + for (i = 0; i < tx_ring->num_desc; i++) { + buffrag = cmd_buf->frag_array; + if (buffrag->dma) { + pci_unmap_single(adapter->pdev, buffrag->dma, + buffrag->length, PCI_DMA_TODEVICE); + buffrag->dma = 0ULL; + } + for (j = 0; j < cmd_buf->frag_count; j++) { + buffrag++; + if (buffrag->dma) { + pci_unmap_page(adapter->pdev, buffrag->dma, + buffrag->length, + PCI_DMA_TODEVICE); + buffrag->dma = 0ULL; + } + } + if (cmd_buf->skb) { + dev_kfree_skb_any(cmd_buf->skb); + cmd_buf->skb = NULL; + } + cmd_buf++; + } +} + +void netxen_free_sw_resources(struct netxen_adapter *adapter) +{ + struct netxen_recv_context *recv_ctx; + struct nx_host_rds_ring *rds_ring; + struct nx_host_tx_ring *tx_ring; + int ring; + + recv_ctx = &adapter->recv_ctx; + + if (recv_ctx->rds_rings == NULL) + goto skip_rds; + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + vfree(rds_ring->rx_buf_arr); + rds_ring->rx_buf_arr = NULL; + } + kfree(recv_ctx->rds_rings); + +skip_rds: + if (adapter->tx_ring == NULL) + return; + + tx_ring = adapter->tx_ring; + vfree(tx_ring->cmd_buf_arr); + kfree(tx_ring); + adapter->tx_ring = NULL; +} + +int netxen_alloc_sw_resources(struct netxen_adapter *adapter) +{ + struct netxen_recv_context *recv_ctx; + struct nx_host_rds_ring *rds_ring; + struct nx_host_sds_ring *sds_ring; + struct nx_host_tx_ring *tx_ring; + struct netxen_rx_buffer *rx_buf; + int ring, i, size; + + struct netxen_cmd_buffer *cmd_buf_arr; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + + size = sizeof(struct nx_host_tx_ring); + tx_ring = kzalloc(size, GFP_KERNEL); + if (tx_ring == NULL) { + dev_err(&pdev->dev, "%s: failed to allocate tx ring struct\n", + netdev->name); + return -ENOMEM; + } + adapter->tx_ring = tx_ring; + + tx_ring->num_desc = adapter->num_txd; + tx_ring->txq = netdev_get_tx_queue(netdev, 0); + + cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring)); + if (cmd_buf_arr == NULL) { + dev_err(&pdev->dev, "%s: failed to allocate cmd buffer ring\n", + netdev->name); + goto err_out; + } + tx_ring->cmd_buf_arr = cmd_buf_arr; + + recv_ctx = &adapter->recv_ctx; + + size = adapter->max_rds_rings * sizeof (struct nx_host_rds_ring); + rds_ring = kzalloc(size, GFP_KERNEL); + if (rds_ring == NULL) { + dev_err(&pdev->dev, "%s: failed to allocate rds ring struct\n", + netdev->name); + goto err_out; + } + recv_ctx->rds_rings = rds_ring; + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + switch (ring) { + case RCV_RING_NORMAL: + rds_ring->num_desc = adapter->num_rxd; + if (adapter->ahw.cut_through) { + rds_ring->dma_size = + NX_CT_DEFAULT_RX_BUF_LEN; + rds_ring->skb_size = + NX_CT_DEFAULT_RX_BUF_LEN; + } else { + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + rds_ring->dma_size = + NX_P3_RX_BUF_MAX_LEN; + else + rds_ring->dma_size = + NX_P2_RX_BUF_MAX_LEN; + rds_ring->skb_size = + rds_ring->dma_size + NET_IP_ALIGN; + } + break; + + case RCV_RING_JUMBO: + rds_ring->num_desc = adapter->num_jumbo_rxd; + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + rds_ring->dma_size = + NX_P3_RX_JUMBO_BUF_MAX_LEN; + else + rds_ring->dma_size = + NX_P2_RX_JUMBO_BUF_MAX_LEN; + + if (adapter->capabilities & NX_CAP0_HW_LRO) + rds_ring->dma_size += NX_LRO_BUFFER_EXTRA; + + rds_ring->skb_size = + rds_ring->dma_size + NET_IP_ALIGN; + break; + + case RCV_RING_LRO: + rds_ring->num_desc = adapter->num_lro_rxd; + rds_ring->dma_size = NX_RX_LRO_BUFFER_LENGTH; + rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN; + break; + + } + rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring)); + if (rds_ring->rx_buf_arr == NULL) { + printk(KERN_ERR "%s: Failed to allocate " + "rx buffer ring %d\n", + netdev->name, ring); + /* free whatever was already allocated */ + goto err_out; + } + INIT_LIST_HEAD(&rds_ring->free_list); + /* + * Now go through all of them, set reference handles + * and put them in the queues. + */ + rx_buf = rds_ring->rx_buf_arr; + for (i = 0; i < rds_ring->num_desc; i++) { + list_add_tail(&rx_buf->list, + &rds_ring->free_list); + rx_buf->ref_handle = i; + rx_buf->state = NETXEN_BUFFER_FREE; + rx_buf++; + } + spin_lock_init(&rds_ring->lock); + } + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + sds_ring->irq = adapter->msix_entries[ring].vector; + sds_ring->adapter = adapter; + sds_ring->num_desc = adapter->num_rxd; + + for (i = 0; i < NUM_RCV_DESC_RINGS; i++) + INIT_LIST_HEAD(&sds_ring->free_list[i]); + } + + return 0; + +err_out: + netxen_free_sw_resources(adapter); + return -ENOMEM; +} + +/* + * netxen_decode_crb_addr(0 - utility to translate from internal Phantom CRB + * address to external PCI CRB address. + */ +static u32 netxen_decode_crb_addr(u32 addr) +{ + int i; + u32 base_addr, offset, pci_base; + + crb_addr_transform_setup(); + + pci_base = NETXEN_ADDR_ERROR; + base_addr = addr & 0xfff00000; + offset = addr & 0x000fffff; + + for (i = 0; i < NETXEN_MAX_CRB_XFORM; i++) { + if (crb_addr_xform[i] == base_addr) { + pci_base = i << 20; + break; + } + } + if (pci_base == NETXEN_ADDR_ERROR) + return pci_base; + else + return pci_base + offset; +} + +#define NETXEN_MAX_ROM_WAIT_USEC 100 + +static int netxen_wait_rom_done(struct netxen_adapter *adapter) +{ + long timeout = 0; + long done = 0; + + cond_resched(); + + while (done == 0) { + done = NXRD32(adapter, NETXEN_ROMUSB_GLB_STATUS); + done &= 2; + if (++timeout >= NETXEN_MAX_ROM_WAIT_USEC) { + dev_err(&adapter->pdev->dev, + "Timeout reached waiting for rom done"); + return -EIO; + } + udelay(1); + } + return 0; +} + +static int do_rom_fast_read(struct netxen_adapter *adapter, + int addr, int *valp) +{ + NXWR32(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr); + NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); + NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3); + NXWR32(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb); + if (netxen_wait_rom_done(adapter)) { + printk("Error waiting for rom done\n"); + return -EIO; + } + /* reset abyte_cnt and dummy_byte_cnt */ + NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0); + udelay(10); + NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); + + *valp = NXRD32(adapter, NETXEN_ROMUSB_ROM_RDATA); + return 0; +} + +static int do_rom_fast_read_words(struct netxen_adapter *adapter, int addr, + u8 *bytes, size_t size) +{ + int addridx; + int ret = 0; + + for (addridx = addr; addridx < (addr + size); addridx += 4) { + int v; + ret = do_rom_fast_read(adapter, addridx, &v); + if (ret != 0) + break; + *(__le32 *)bytes = cpu_to_le32(v); + bytes += 4; + } + + return ret; +} + +int +netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr, + u8 *bytes, size_t size) +{ + int ret; + + ret = netxen_rom_lock(adapter); + if (ret < 0) + return ret; + + ret = do_rom_fast_read_words(adapter, addr, bytes, size); + + netxen_rom_unlock(adapter); + return ret; +} + +int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp) +{ + int ret; + + if (netxen_rom_lock(adapter) != 0) + return -EIO; + + ret = do_rom_fast_read(adapter, addr, valp); + netxen_rom_unlock(adapter); + return ret; +} + +#define NETXEN_BOARDTYPE 0x4008 +#define NETXEN_BOARDNUM 0x400c +#define NETXEN_CHIPNUM 0x4010 + +int netxen_pinit_from_rom(struct netxen_adapter *adapter) +{ + int addr, val; + int i, n, init_delay = 0; + struct crb_addr_pair *buf; + unsigned offset; + u32 off; + + /* resetall */ + netxen_rom_lock(adapter); + NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0xffffffff); + netxen_rom_unlock(adapter); + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + if (netxen_rom_fast_read(adapter, 0, &n) != 0 || + (n != 0xcafecafe) || + netxen_rom_fast_read(adapter, 4, &n) != 0) { + printk(KERN_ERR "%s: ERROR Reading crb_init area: " + "n: %08x\n", netxen_nic_driver_name, n); + return -EIO; + } + offset = n & 0xffffU; + n = (n >> 16) & 0xffffU; + } else { + if (netxen_rom_fast_read(adapter, 0, &n) != 0 || + !(n & 0x80000000)) { + printk(KERN_ERR "%s: ERROR Reading crb_init area: " + "n: %08x\n", netxen_nic_driver_name, n); + return -EIO; + } + offset = 1; + n &= ~0x80000000; + } + + if (n >= 1024) { + printk(KERN_ERR "%s:n=0x%x Error! NetXen card flash not" + " initialized.\n", __func__, n); + return -EIO; + } + + buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL); + if (buf == NULL) { + printk("%s: netxen_pinit_from_rom: Unable to calloc memory.\n", + netxen_nic_driver_name); + return -ENOMEM; + } + + for (i = 0; i < n; i++) { + if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 || + netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) { + kfree(buf); + return -EIO; + } + + buf[i].addr = addr; + buf[i].data = val; + + } + + for (i = 0; i < n; i++) { + + off = netxen_decode_crb_addr(buf[i].addr); + if (off == NETXEN_ADDR_ERROR) { + printk(KERN_ERR"CRB init value out of range %x\n", + buf[i].addr); + continue; + } + off += NETXEN_PCI_CRBSPACE; + + if (off & 1) + continue; + + /* skipping cold reboot MAGIC */ + if (off == NETXEN_CAM_RAM(0x1fc)) + continue; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + if (off == (NETXEN_CRB_I2C0 + 0x1c)) + continue; + /* do not reset PCI */ + if (off == (ROMUSB_GLB + 0xbc)) + continue; + if (off == (ROMUSB_GLB + 0xa8)) + continue; + if (off == (ROMUSB_GLB + 0xc8)) /* core clock */ + continue; + if (off == (ROMUSB_GLB + 0x24)) /* MN clock */ + continue; + if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */ + continue; + if ((off & 0x0ff00000) == NETXEN_CRB_DDR_NET) + continue; + if (off == (NETXEN_CRB_PEG_NET_1 + 0x18) && + !NX_IS_REVISION_P3P(adapter->ahw.revision_id)) + buf[i].data = 0x1020; + /* skip the function enable register */ + if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION)) + continue; + if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION2)) + continue; + if ((off & 0x0ff00000) == NETXEN_CRB_SMB) + continue; + } + + init_delay = 1; + /* After writing this register, HW needs time for CRB */ + /* to quiet down (else crb_window returns 0xffffffff) */ + if (off == NETXEN_ROMUSB_GLB_SW_RESET) { + init_delay = 1000; + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + /* hold xdma in reset also */ + buf[i].data = NETXEN_NIC_XDMA_RESET; + buf[i].data = 0x8000ff; + } + } + + NXWR32(adapter, off, buf[i].data); + + msleep(init_delay); + } + kfree(buf); + + /* disable_peg_cache_all */ + + /* unreset_net_cache */ + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + val = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET); + NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, (val & 0xffffff0f)); + } + + /* p2dn replyCount */ + NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0xec, 0x1e); + /* disable_peg_cache 0 */ + NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0x4c, 8); + /* disable_peg_cache 1 */ + NXWR32(adapter, NETXEN_CRB_PEG_NET_I + 0x4c, 8); + + /* peg_clr_all */ + + /* peg_clr 0 */ + NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x8, 0); + NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0xc, 0); + /* peg_clr 1 */ + NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0x8, 0); + NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0xc, 0); + /* peg_clr 2 */ + NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0x8, 0); + NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0xc, 0); + /* peg_clr 3 */ + NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0x8, 0); + NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0xc, 0); + return 0; +} + +static struct uni_table_desc *nx_get_table_desc(const u8 *unirom, int section) +{ + uint32_t i; + struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; + __le32 entries = cpu_to_le32(directory->num_entries); + + for (i = 0; i < entries; i++) { + + __le32 offs = cpu_to_le32(directory->findex) + + (i * cpu_to_le32(directory->entry_size)); + __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8)); + + if (tab_type == section) + return (struct uni_table_desc *) &unirom[offs]; + } + + return NULL; +} + +#define QLCNIC_FILEHEADER_SIZE (14 * 4) + +static int +netxen_nic_validate_header(struct netxen_adapter *adapter) + { + const u8 *unirom = adapter->fw->data; + struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; + u32 fw_file_size = adapter->fw->size; + u32 tab_size; + __le32 entries; + __le32 entry_size; + + if (fw_file_size < QLCNIC_FILEHEADER_SIZE) + return -EINVAL; + + entries = cpu_to_le32(directory->num_entries); + entry_size = cpu_to_le32(directory->entry_size); + tab_size = cpu_to_le32(directory->findex) + (entries * entry_size); + + if (fw_file_size < tab_size) + return -EINVAL; + + return 0; +} + +static int +netxen_nic_validate_bootld(struct netxen_adapter *adapter) +{ + struct uni_table_desc *tab_desc; + struct uni_data_desc *descr; + const u8 *unirom = adapter->fw->data; + __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + + NX_UNI_BOOTLD_IDX_OFF)); + u32 offs; + u32 tab_size; + u32 data_size; + + tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_BOOTLD); + + if (!tab_desc) + return -EINVAL; + + tab_size = cpu_to_le32(tab_desc->findex) + + (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); + + if (adapter->fw->size < tab_size) + return -EINVAL; + + offs = cpu_to_le32(tab_desc->findex) + + (cpu_to_le32(tab_desc->entry_size) * (idx)); + descr = (struct uni_data_desc *)&unirom[offs]; + + data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); + + if (adapter->fw->size < data_size) + return -EINVAL; + + return 0; +} + +static int +netxen_nic_validate_fw(struct netxen_adapter *adapter) +{ + struct uni_table_desc *tab_desc; + struct uni_data_desc *descr; + const u8 *unirom = adapter->fw->data; + __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + + NX_UNI_FIRMWARE_IDX_OFF)); + u32 offs; + u32 tab_size; + u32 data_size; + + tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_FW); + + if (!tab_desc) + return -EINVAL; + + tab_size = cpu_to_le32(tab_desc->findex) + + (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); + + if (adapter->fw->size < tab_size) + return -EINVAL; + + offs = cpu_to_le32(tab_desc->findex) + + (cpu_to_le32(tab_desc->entry_size) * (idx)); + descr = (struct uni_data_desc *)&unirom[offs]; + data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); + + if (adapter->fw->size < data_size) + return -EINVAL; + + return 0; +} + + +static int +netxen_nic_validate_product_offs(struct netxen_adapter *adapter) +{ + struct uni_table_desc *ptab_descr; + const u8 *unirom = adapter->fw->data; + int mn_present = (NX_IS_REVISION_P2(adapter->ahw.revision_id)) ? + 1 : netxen_p3_has_mn(adapter); + __le32 entries; + __le32 entry_size; + u32 tab_size; + u32 i; + + ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL); + if (ptab_descr == NULL) + return -EINVAL; + + entries = cpu_to_le32(ptab_descr->num_entries); + entry_size = cpu_to_le32(ptab_descr->entry_size); + tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size); + + if (adapter->fw->size < tab_size) + return -EINVAL; + +nomn: + for (i = 0; i < entries; i++) { + + __le32 flags, file_chiprev, offs; + u8 chiprev = adapter->ahw.revision_id; + uint32_t flagbit; + + offs = cpu_to_le32(ptab_descr->findex) + + (i * cpu_to_le32(ptab_descr->entry_size)); + flags = cpu_to_le32(*((int *)&unirom[offs] + NX_UNI_FLAGS_OFF)); + file_chiprev = cpu_to_le32(*((int *)&unirom[offs] + + NX_UNI_CHIP_REV_OFF)); + + flagbit = mn_present ? 1 : 2; + + if ((chiprev == file_chiprev) && + ((1ULL << flagbit) & flags)) { + adapter->file_prd_off = offs; + return 0; + } + } + + if (mn_present && NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + mn_present = 0; + goto nomn; + } + + return -EINVAL; +} + +static int +netxen_nic_validate_unified_romimage(struct netxen_adapter *adapter) +{ + if (netxen_nic_validate_header(adapter)) { + dev_err(&adapter->pdev->dev, + "unified image: header validation failed\n"); + return -EINVAL; + } + + if (netxen_nic_validate_product_offs(adapter)) { + dev_err(&adapter->pdev->dev, + "unified image: product validation failed\n"); + return -EINVAL; + } + + if (netxen_nic_validate_bootld(adapter)) { + dev_err(&adapter->pdev->dev, + "unified image: bootld validation failed\n"); + return -EINVAL; + } + + if (netxen_nic_validate_fw(adapter)) { + dev_err(&adapter->pdev->dev, + "unified image: firmware validation failed\n"); + return -EINVAL; + } + + return 0; +} + +static struct uni_data_desc *nx_get_data_desc(struct netxen_adapter *adapter, + u32 section, u32 idx_offset) +{ + const u8 *unirom = adapter->fw->data; + int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + + idx_offset)); + struct uni_table_desc *tab_desc; + __le32 offs; + + tab_desc = nx_get_table_desc(unirom, section); + + if (tab_desc == NULL) + return NULL; + + offs = cpu_to_le32(tab_desc->findex) + + (cpu_to_le32(tab_desc->entry_size) * idx); + + return (struct uni_data_desc *)&unirom[offs]; +} + +static u8 * +nx_get_bootld_offs(struct netxen_adapter *adapter) +{ + u32 offs = NETXEN_BOOTLD_START; + + if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) + offs = cpu_to_le32((nx_get_data_desc(adapter, + NX_UNI_DIR_SECT_BOOTLD, + NX_UNI_BOOTLD_IDX_OFF))->findex); + + return (u8 *)&adapter->fw->data[offs]; +} + +static u8 * +nx_get_fw_offs(struct netxen_adapter *adapter) +{ + u32 offs = NETXEN_IMAGE_START; + + if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) + offs = cpu_to_le32((nx_get_data_desc(adapter, + NX_UNI_DIR_SECT_FW, + NX_UNI_FIRMWARE_IDX_OFF))->findex); + + return (u8 *)&adapter->fw->data[offs]; +} + +static __le32 +nx_get_fw_size(struct netxen_adapter *adapter) +{ + if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) + return cpu_to_le32((nx_get_data_desc(adapter, + NX_UNI_DIR_SECT_FW, + NX_UNI_FIRMWARE_IDX_OFF))->size); + else + return cpu_to_le32( + *(u32 *)&adapter->fw->data[NX_FW_SIZE_OFFSET]); +} + +static __le32 +nx_get_fw_version(struct netxen_adapter *adapter) +{ + struct uni_data_desc *fw_data_desc; + const struct firmware *fw = adapter->fw; + __le32 major, minor, sub; + const u8 *ver_str; + int i, ret = 0; + + if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) { + + fw_data_desc = nx_get_data_desc(adapter, + NX_UNI_DIR_SECT_FW, NX_UNI_FIRMWARE_IDX_OFF); + ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) + + cpu_to_le32(fw_data_desc->size) - 17; + + for (i = 0; i < 12; i++) { + if (!strncmp(&ver_str[i], "REV=", 4)) { + ret = sscanf(&ver_str[i+4], "%u.%u.%u ", + &major, &minor, &sub); + break; + } + } + + if (ret != 3) + return 0; + + return major + (minor << 8) + (sub << 16); + + } else + return cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]); +} + +static __le32 +nx_get_bios_version(struct netxen_adapter *adapter) +{ + const struct firmware *fw = adapter->fw; + __le32 bios_ver, prd_off = adapter->file_prd_off; + + if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) { + bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off]) + + NX_UNI_BIOS_VERSION_OFF)); + return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + + (bios_ver >> 24); + } else + return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]); + +} + +int +netxen_need_fw_reset(struct netxen_adapter *adapter) +{ + u32 count, old_count; + u32 val, version, major, minor, build; + int i, timeout; + u8 fw_type; + + /* NX2031 firmware doesn't support heartbit */ + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return 1; + + if (adapter->need_fw_reset) + return 1; + + /* last attempt had failed */ + if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED) + return 1; + + old_count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); + + for (i = 0; i < 10; i++) { + + timeout = msleep_interruptible(200); + if (timeout) { + NXWR32(adapter, CRB_CMDPEG_STATE, + PHAN_INITIALIZE_FAILED); + return -EINTR; + } + + count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); + if (count != old_count) + break; + } + + /* firmware is dead */ + if (count == old_count) + return 1; + + /* check if we have got newer or different file firmware */ + if (adapter->fw) { + + val = nx_get_fw_version(adapter); + + version = NETXEN_DECODE_VERSION(val); + + major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); + minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR); + build = NXRD32(adapter, NETXEN_FW_VERSION_SUB); + + if (version > NETXEN_VERSION_CODE(major, minor, build)) + return 1; + + if (version == NETXEN_VERSION_CODE(major, minor, build) && + adapter->fw_type != NX_UNIFIED_ROMIMAGE) { + + val = NXRD32(adapter, NETXEN_MIU_MN_CONTROL); + fw_type = (val & 0x4) ? + NX_P3_CT_ROMIMAGE : NX_P3_MN_ROMIMAGE; + + if (adapter->fw_type != fw_type) + return 1; + } + } + + return 0; +} + +#define NETXEN_MIN_P3_FW_SUPP NETXEN_VERSION_CODE(4, 0, 505) + +int +netxen_check_flash_fw_compatibility(struct netxen_adapter *adapter) +{ + u32 flash_fw_ver, min_fw_ver; + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return 0; + + if (netxen_rom_fast_read(adapter, + NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) { + dev_err(&adapter->pdev->dev, "Unable to read flash fw" + "version\n"); + return -EIO; + } + + flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver); + min_fw_ver = NETXEN_MIN_P3_FW_SUPP; + if (flash_fw_ver >= min_fw_ver) + return 0; + + dev_info(&adapter->pdev->dev, "Flash fw[%d.%d.%d] is < min fw supported" + "[4.0.505]. Please update firmware on flash\n", + _major(flash_fw_ver), _minor(flash_fw_ver), + _build(flash_fw_ver)); + return -EINVAL; +} + +static char *fw_name[] = { + NX_P2_MN_ROMIMAGE_NAME, + NX_P3_CT_ROMIMAGE_NAME, + NX_P3_MN_ROMIMAGE_NAME, + NX_UNIFIED_ROMIMAGE_NAME, + NX_FLASH_ROMIMAGE_NAME, +}; + +int +netxen_load_firmware(struct netxen_adapter *adapter) +{ + u64 *ptr64; + u32 i, flashaddr, size; + const struct firmware *fw = adapter->fw; + struct pci_dev *pdev = adapter->pdev; + + dev_info(&pdev->dev, "loading firmware from %s\n", + fw_name[adapter->fw_type]); + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 1); + + if (fw) { + __le64 data; + + size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8; + + ptr64 = (u64 *)nx_get_bootld_offs(adapter); + flashaddr = NETXEN_BOOTLD_START; + + for (i = 0; i < size; i++) { + data = cpu_to_le64(ptr64[i]); + + if (adapter->pci_mem_write(adapter, flashaddr, data)) + return -EIO; + + flashaddr += 8; + } + + size = (__force u32)nx_get_fw_size(adapter) / 8; + + ptr64 = (u64 *)nx_get_fw_offs(adapter); + flashaddr = NETXEN_IMAGE_START; + + for (i = 0; i < size; i++) { + data = cpu_to_le64(ptr64[i]); + + if (adapter->pci_mem_write(adapter, + flashaddr, data)) + return -EIO; + + flashaddr += 8; + } + + size = (__force u32)nx_get_fw_size(adapter) % 8; + if (size) { + data = cpu_to_le64(ptr64[i]); + + if (adapter->pci_mem_write(adapter, + flashaddr, data)) + return -EIO; + } + + } else { + u64 data; + u32 hi, lo; + + size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8; + flashaddr = NETXEN_BOOTLD_START; + + for (i = 0; i < size; i++) { + if (netxen_rom_fast_read(adapter, + flashaddr, (int *)&lo) != 0) + return -EIO; + if (netxen_rom_fast_read(adapter, + flashaddr + 4, (int *)&hi) != 0) + return -EIO; + + /* hi, lo are already in host endian byteorder */ + data = (((u64)hi << 32) | lo); + + if (adapter->pci_mem_write(adapter, + flashaddr, data)) + return -EIO; + + flashaddr += 8; + } + } + msleep(1); + + if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) { + NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x18, 0x1020); + NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001e); + } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001d); + else { + NXWR32(adapter, NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL, 0x3fff); + NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 0); + } + + return 0; +} + +static int +netxen_validate_firmware(struct netxen_adapter *adapter) +{ + __le32 val; + __le32 flash_fw_ver; + u32 file_fw_ver, min_ver, bios; + struct pci_dev *pdev = adapter->pdev; + const struct firmware *fw = adapter->fw; + u8 fw_type = adapter->fw_type; + u32 crbinit_fix_fw; + + if (fw_type == NX_UNIFIED_ROMIMAGE) { + if (netxen_nic_validate_unified_romimage(adapter)) + return -EINVAL; + } else { + val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]); + if ((__force u32)val != NETXEN_BDINFO_MAGIC) + return -EINVAL; + + if (fw->size < NX_FW_MIN_SIZE) + return -EINVAL; + } + + val = nx_get_fw_version(adapter); + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + min_ver = NETXEN_MIN_P3_FW_SUPP; + else + min_ver = NETXEN_VERSION_CODE(3, 4, 216); + + file_fw_ver = NETXEN_DECODE_VERSION(val); + + if ((_major(file_fw_ver) > _NETXEN_NIC_LINUX_MAJOR) || + (file_fw_ver < min_ver)) { + dev_err(&pdev->dev, + "%s: firmware version %d.%d.%d unsupported\n", + fw_name[fw_type], _major(file_fw_ver), _minor(file_fw_ver), + _build(file_fw_ver)); + return -EINVAL; + } + + val = nx_get_bios_version(adapter); + netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios); + if ((__force u32)val != bios) { + dev_err(&pdev->dev, "%s: firmware bios is incompatible\n", + fw_name[fw_type]); + return -EINVAL; + } + + if (netxen_rom_fast_read(adapter, + NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) { + dev_err(&pdev->dev, "Unable to read flash fw version\n"); + return -EIO; + } + flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver); + + /* New fw from file is not allowed, if fw on flash is < 4.0.554 */ + crbinit_fix_fw = NETXEN_VERSION_CODE(4, 0, 554); + if (file_fw_ver >= crbinit_fix_fw && flash_fw_ver < crbinit_fix_fw && + NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + dev_err(&pdev->dev, "Incompatibility detected between driver " + "and firmware version on flash. This configuration " + "is not recommended. Please update the firmware on " + "flash immediately\n"); + return -EINVAL; + } + + /* check if flashed firmware is newer only for no-mn and P2 case*/ + if (!netxen_p3_has_mn(adapter) || + NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + if (flash_fw_ver > file_fw_ver) { + dev_info(&pdev->dev, "%s: firmware is older than flash\n", + fw_name[fw_type]); + return -EINVAL; + } + } + + NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC); + return 0; +} + +static void +nx_get_next_fwtype(struct netxen_adapter *adapter) +{ + u8 fw_type; + + switch (adapter->fw_type) { + case NX_UNKNOWN_ROMIMAGE: + fw_type = NX_UNIFIED_ROMIMAGE; + break; + + case NX_UNIFIED_ROMIMAGE: + if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) + fw_type = NX_FLASH_ROMIMAGE; + else if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + fw_type = NX_P2_MN_ROMIMAGE; + else if (netxen_p3_has_mn(adapter)) + fw_type = NX_P3_MN_ROMIMAGE; + else + fw_type = NX_P3_CT_ROMIMAGE; + break; + + case NX_P3_MN_ROMIMAGE: + fw_type = NX_P3_CT_ROMIMAGE; + break; + + case NX_P2_MN_ROMIMAGE: + case NX_P3_CT_ROMIMAGE: + default: + fw_type = NX_FLASH_ROMIMAGE; + break; + } + + adapter->fw_type = fw_type; +} + +static int +netxen_p3_has_mn(struct netxen_adapter *adapter) +{ + u32 capability, flashed_ver; + capability = 0; + + /* NX2031 always had MN */ + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return 1; + + netxen_rom_fast_read(adapter, + NX_FW_VERSION_OFFSET, (int *)&flashed_ver); + flashed_ver = NETXEN_DECODE_VERSION(flashed_ver); + + if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) { + + capability = NXRD32(adapter, NX_PEG_TUNE_CAPABILITY); + if (capability & NX_PEG_TUNE_MN_PRESENT) + return 1; + } + return 0; +} + +void netxen_request_firmware(struct netxen_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int rc = 0; + + adapter->fw_type = NX_UNKNOWN_ROMIMAGE; + +next: + nx_get_next_fwtype(adapter); + + if (adapter->fw_type == NX_FLASH_ROMIMAGE) { + adapter->fw = NULL; + } else { + rc = request_firmware(&adapter->fw, + fw_name[adapter->fw_type], &pdev->dev); + if (rc != 0) + goto next; + + rc = netxen_validate_firmware(adapter); + if (rc != 0) { + release_firmware(adapter->fw); + msleep(1); + goto next; + } + } +} + + +void +netxen_release_firmware(struct netxen_adapter *adapter) +{ + if (adapter->fw) + release_firmware(adapter->fw); + adapter->fw = NULL; +} + +int netxen_init_dummy_dma(struct netxen_adapter *adapter) +{ + u64 addr; + u32 hi, lo; + + if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return 0; + + adapter->dummy_dma.addr = pci_alloc_consistent(adapter->pdev, + NETXEN_HOST_DUMMY_DMA_SIZE, + &adapter->dummy_dma.phys_addr); + if (adapter->dummy_dma.addr == NULL) { + dev_err(&adapter->pdev->dev, + "ERROR: Could not allocate dummy DMA memory\n"); + return -ENOMEM; + } + + addr = (uint64_t) adapter->dummy_dma.phys_addr; + hi = (addr >> 32) & 0xffffffff; + lo = addr & 0xffffffff; + + NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI, hi); + NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO, lo); + + return 0; +} + +/* + * NetXen DMA watchdog control: + * + * Bit 0 : enabled => R/O: 1 watchdog active, 0 inactive + * Bit 1 : disable_request => 1 req disable dma watchdog + * Bit 2 : enable_request => 1 req enable dma watchdog + * Bit 3-31 : unused + */ +void netxen_free_dummy_dma(struct netxen_adapter *adapter) +{ + int i = 100; + u32 ctrl; + + if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return; + + if (!adapter->dummy_dma.addr) + return; + + ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL); + if ((ctrl & 0x1) != 0) { + NXWR32(adapter, NETXEN_DMA_WATCHDOG_CTRL, (ctrl | 0x2)); + + while ((ctrl & 0x1) != 0) { + + msleep(50); + + ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL); + + if (--i == 0) + break; + } + } + + if (i) { + pci_free_consistent(adapter->pdev, + NETXEN_HOST_DUMMY_DMA_SIZE, + adapter->dummy_dma.addr, + adapter->dummy_dma.phys_addr); + adapter->dummy_dma.addr = NULL; + } else + dev_err(&adapter->pdev->dev, "dma_watchdog_shutdown failed\n"); +} + +int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val) +{ + u32 val = 0; + int retries = 60; + + if (pegtune_val) + return 0; + + do { + val = NXRD32(adapter, CRB_CMDPEG_STATE); + + switch (val) { + case PHAN_INITIALIZE_COMPLETE: + case PHAN_INITIALIZE_ACK: + return 0; + case PHAN_INITIALIZE_FAILED: + goto out_err; + default: + break; + } + + msleep(500); + + } while (--retries); + + NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); + +out_err: + dev_warn(&adapter->pdev->dev, "firmware init failed\n"); + return -EIO; +} + +static int +netxen_receive_peg_ready(struct netxen_adapter *adapter) +{ + u32 val = 0; + int retries = 2000; + + do { + val = NXRD32(adapter, CRB_RCVPEG_STATE); + + if (val == PHAN_PEG_RCV_INITIALIZED) + return 0; + + msleep(10); + + } while (--retries); + + if (!retries) { + printk(KERN_ERR "Receive Peg initialization not " + "complete, state: 0x%x.\n", val); + return -EIO; + } + + return 0; +} + +int netxen_init_firmware(struct netxen_adapter *adapter) +{ + int err; + + err = netxen_receive_peg_ready(adapter); + if (err) + return err; + + NXWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT); + NXWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE); + NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK); + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + NXWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC); + + return err; +} + +static void +netxen_handle_linkevent(struct netxen_adapter *adapter, nx_fw_msg_t *msg) +{ + u32 cable_OUI; + u16 cable_len; + u16 link_speed; + u8 link_status, module, duplex, autoneg; + struct net_device *netdev = adapter->netdev; + + adapter->has_link_events = 1; + + cable_OUI = msg->body[1] & 0xffffffff; + cable_len = (msg->body[1] >> 32) & 0xffff; + link_speed = (msg->body[1] >> 48) & 0xffff; + + link_status = msg->body[2] & 0xff; + duplex = (msg->body[2] >> 16) & 0xff; + autoneg = (msg->body[2] >> 24) & 0xff; + + module = (msg->body[2] >> 8) & 0xff; + if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE) { + printk(KERN_INFO "%s: unsupported cable: OUI 0x%x, length %d\n", + netdev->name, cable_OUI, cable_len); + } else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN) { + printk(KERN_INFO "%s: unsupported cable length %d\n", + netdev->name, cable_len); + } + + netxen_advert_link_change(adapter, link_status); + + /* update link parameters */ + if (duplex == LINKEVENT_FULL_DUPLEX) + adapter->link_duplex = DUPLEX_FULL; + else + adapter->link_duplex = DUPLEX_HALF; + adapter->module_type = module; + adapter->link_autoneg = autoneg; + adapter->link_speed = link_speed; +} + +static void +netxen_handle_fw_message(int desc_cnt, int index, + struct nx_host_sds_ring *sds_ring) +{ + nx_fw_msg_t msg; + struct status_desc *desc; + int i = 0, opcode; + + while (desc_cnt > 0 && i < 8) { + desc = &sds_ring->desc_head[index]; + msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]); + msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]); + + index = get_next_index(index, sds_ring->num_desc); + desc_cnt--; + } + + opcode = netxen_get_nic_msg_opcode(msg.body[0]); + switch (opcode) { + case NX_NIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE: + netxen_handle_linkevent(sds_ring->adapter, &msg); + break; + default: + break; + } +} + +static int +netxen_alloc_rx_skb(struct netxen_adapter *adapter, + struct nx_host_rds_ring *rds_ring, + struct netxen_rx_buffer *buffer) +{ + struct sk_buff *skb; + dma_addr_t dma; + struct pci_dev *pdev = adapter->pdev; + + buffer->skb = dev_alloc_skb(rds_ring->skb_size); + if (!buffer->skb) + return 1; + + skb = buffer->skb; + + if (!adapter->ahw.cut_through) + skb_reserve(skb, 2); + + dma = pci_map_single(pdev, skb->data, + rds_ring->dma_size, PCI_DMA_FROMDEVICE); + + if (pci_dma_mapping_error(pdev, dma)) { + dev_kfree_skb_any(skb); + buffer->skb = NULL; + return 1; + } + + buffer->skb = skb; + buffer->dma = dma; + buffer->state = NETXEN_BUFFER_BUSY; + + return 0; +} + +static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter, + struct nx_host_rds_ring *rds_ring, u16 index, u16 cksum) +{ + struct netxen_rx_buffer *buffer; + struct sk_buff *skb; + + buffer = &rds_ring->rx_buf_arr[index]; + + pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size, + PCI_DMA_FROMDEVICE); + + skb = buffer->skb; + if (!skb) + goto no_skb; + + if (likely((adapter->netdev->features & NETIF_F_RXCSUM) + && cksum == STATUS_CKSUM_OK)) { + adapter->stats.csummed++; + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else + skb->ip_summed = CHECKSUM_NONE; + + skb->dev = adapter->netdev; + + buffer->skb = NULL; +no_skb: + buffer->state = NETXEN_BUFFER_FREE; + return skb; +} + +static struct netxen_rx_buffer * +netxen_process_rcv(struct netxen_adapter *adapter, + struct nx_host_sds_ring *sds_ring, + int ring, u64 sts_data0) +{ + struct net_device *netdev = adapter->netdev; + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + struct netxen_rx_buffer *buffer; + struct sk_buff *skb; + struct nx_host_rds_ring *rds_ring; + int index, length, cksum, pkt_offset; + + if (unlikely(ring >= adapter->max_rds_rings)) + return NULL; + + rds_ring = &recv_ctx->rds_rings[ring]; + + index = netxen_get_sts_refhandle(sts_data0); + if (unlikely(index >= rds_ring->num_desc)) + return NULL; + + buffer = &rds_ring->rx_buf_arr[index]; + + length = netxen_get_sts_totallength(sts_data0); + cksum = netxen_get_sts_status(sts_data0); + pkt_offset = netxen_get_sts_pkt_offset(sts_data0); + + skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum); + if (!skb) + return buffer; + + if (length > rds_ring->skb_size) + skb_put(skb, rds_ring->skb_size); + else + skb_put(skb, length); + + + if (pkt_offset) + skb_pull(skb, pkt_offset); + + skb->protocol = eth_type_trans(skb, netdev); + + napi_gro_receive(&sds_ring->napi, skb); + + adapter->stats.rx_pkts++; + adapter->stats.rxbytes += length; + + return buffer; +} + +#define TCP_HDR_SIZE 20 +#define TCP_TS_OPTION_SIZE 12 +#define TCP_TS_HDR_SIZE (TCP_HDR_SIZE + TCP_TS_OPTION_SIZE) + +static struct netxen_rx_buffer * +netxen_process_lro(struct netxen_adapter *adapter, + struct nx_host_sds_ring *sds_ring, + int ring, u64 sts_data0, u64 sts_data1) +{ + struct net_device *netdev = adapter->netdev; + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + struct netxen_rx_buffer *buffer; + struct sk_buff *skb; + struct nx_host_rds_ring *rds_ring; + struct iphdr *iph; + struct tcphdr *th; + bool push, timestamp; + int l2_hdr_offset, l4_hdr_offset; + int index; + u16 lro_length, length, data_offset; + u32 seq_number; + u8 vhdr_len; + + if (unlikely(ring > adapter->max_rds_rings)) + return NULL; + + rds_ring = &recv_ctx->rds_rings[ring]; + + index = netxen_get_lro_sts_refhandle(sts_data0); + if (unlikely(index > rds_ring->num_desc)) + return NULL; + + buffer = &rds_ring->rx_buf_arr[index]; + + timestamp = netxen_get_lro_sts_timestamp(sts_data0); + lro_length = netxen_get_lro_sts_length(sts_data0); + l2_hdr_offset = netxen_get_lro_sts_l2_hdr_offset(sts_data0); + l4_hdr_offset = netxen_get_lro_sts_l4_hdr_offset(sts_data0); + push = netxen_get_lro_sts_push_flag(sts_data0); + seq_number = netxen_get_lro_sts_seq_number(sts_data1); + + skb = netxen_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK); + if (!skb) + return buffer; + + if (timestamp) + data_offset = l4_hdr_offset + TCP_TS_HDR_SIZE; + else + data_offset = l4_hdr_offset + TCP_HDR_SIZE; + + skb_put(skb, lro_length + data_offset); + + skb_pull(skb, l2_hdr_offset); + skb->protocol = eth_type_trans(skb, netdev); + + if (skb->protocol == htons(ETH_P_8021Q)) + vhdr_len = VLAN_HLEN; + iph = (struct iphdr *)(skb->data + vhdr_len); + th = (struct tcphdr *)((skb->data + vhdr_len) + (iph->ihl << 2)); + + length = (iph->ihl << 2) + (th->doff << 2) + lro_length; + iph->tot_len = htons(length); + iph->check = 0; + iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); + th->psh = push; + th->seq = htonl(seq_number); + + length = skb->len; + + netif_receive_skb(skb); + + adapter->stats.lro_pkts++; + adapter->stats.rxbytes += length; + + return buffer; +} + +#define netxen_merge_rx_buffers(list, head) \ + do { list_splice_tail_init(list, head); } while (0); + +int +netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max) +{ + struct netxen_adapter *adapter = sds_ring->adapter; + + struct list_head *cur; + + struct status_desc *desc; + struct netxen_rx_buffer *rxbuf; + + u32 consumer = sds_ring->consumer; + + int count = 0; + u64 sts_data0, sts_data1; + int opcode, ring = 0, desc_cnt; + + while (count < max) { + desc = &sds_ring->desc_head[consumer]; + sts_data0 = le64_to_cpu(desc->status_desc_data[0]); + + if (!(sts_data0 & STATUS_OWNER_HOST)) + break; + + desc_cnt = netxen_get_sts_desc_cnt(sts_data0); + + opcode = netxen_get_sts_opcode(sts_data0); + + switch (opcode) { + case NETXEN_NIC_RXPKT_DESC: + case NETXEN_OLD_RXPKT_DESC: + case NETXEN_NIC_SYN_OFFLOAD: + ring = netxen_get_sts_type(sts_data0); + rxbuf = netxen_process_rcv(adapter, sds_ring, + ring, sts_data0); + break; + case NETXEN_NIC_LRO_DESC: + ring = netxen_get_lro_sts_type(sts_data0); + sts_data1 = le64_to_cpu(desc->status_desc_data[1]); + rxbuf = netxen_process_lro(adapter, sds_ring, + ring, sts_data0, sts_data1); + break; + case NETXEN_NIC_RESPONSE_DESC: + netxen_handle_fw_message(desc_cnt, consumer, sds_ring); + default: + goto skip; + } + + WARN_ON(desc_cnt > 1); + + if (rxbuf) + list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]); + +skip: + for (; desc_cnt > 0; desc_cnt--) { + desc = &sds_ring->desc_head[consumer]; + desc->status_desc_data[0] = + cpu_to_le64(STATUS_OWNER_PHANTOM); + consumer = get_next_index(consumer, sds_ring->num_desc); + } + count++; + } + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + struct nx_host_rds_ring *rds_ring = + &adapter->recv_ctx.rds_rings[ring]; + + if (!list_empty(&sds_ring->free_list[ring])) { + list_for_each(cur, &sds_ring->free_list[ring]) { + rxbuf = list_entry(cur, + struct netxen_rx_buffer, list); + netxen_alloc_rx_skb(adapter, rds_ring, rxbuf); + } + spin_lock(&rds_ring->lock); + netxen_merge_rx_buffers(&sds_ring->free_list[ring], + &rds_ring->free_list); + spin_unlock(&rds_ring->lock); + } + + netxen_post_rx_buffers_nodb(adapter, rds_ring); + } + + if (count) { + sds_ring->consumer = consumer; + NXWRIO(adapter, sds_ring->crb_sts_consumer, consumer); + } + + return count; +} + +/* Process Command status ring */ +int netxen_process_cmd_ring(struct netxen_adapter *adapter) +{ + u32 sw_consumer, hw_consumer; + int count = 0, i; + struct netxen_cmd_buffer *buffer; + struct pci_dev *pdev = adapter->pdev; + struct net_device *netdev = adapter->netdev; + struct netxen_skb_frag *frag; + int done = 0; + struct nx_host_tx_ring *tx_ring = adapter->tx_ring; + + if (!spin_trylock(&adapter->tx_clean_lock)) + return 1; + + sw_consumer = tx_ring->sw_consumer; + hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); + + while (sw_consumer != hw_consumer) { + buffer = &tx_ring->cmd_buf_arr[sw_consumer]; + if (buffer->skb) { + frag = &buffer->frag_array[0]; + pci_unmap_single(pdev, frag->dma, frag->length, + PCI_DMA_TODEVICE); + frag->dma = 0ULL; + for (i = 1; i < buffer->frag_count; i++) { + frag++; /* Get the next frag */ + pci_unmap_page(pdev, frag->dma, frag->length, + PCI_DMA_TODEVICE); + frag->dma = 0ULL; + } + + adapter->stats.xmitfinished++; + dev_kfree_skb_any(buffer->skb); + buffer->skb = NULL; + } + + sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc); + if (++count >= MAX_STATUS_HANDLE) + break; + } + + if (count && netif_running(netdev)) { + tx_ring->sw_consumer = sw_consumer; + + smp_mb(); + + if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) + if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) + netif_wake_queue(netdev); + adapter->tx_timeo_cnt = 0; + } + /* + * If everything is freed up to consumer then check if the ring is full + * If the ring is full then check if more needs to be freed and + * schedule the call back again. + * + * This happens when there are 2 CPUs. One could be freeing and the + * other filling it. If the ring is full when we get out of here and + * the card has already interrupted the host then the host can miss the + * interrupt. + * + * There is still a possible race condition and the host could miss an + * interrupt. The card has to take care of this. + */ + hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); + done = (sw_consumer == hw_consumer); + spin_unlock(&adapter->tx_clean_lock); + + return done; +} + +void +netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid, + struct nx_host_rds_ring *rds_ring) +{ + struct rcv_desc *pdesc; + struct netxen_rx_buffer *buffer; + int producer, count = 0; + netxen_ctx_msg msg = 0; + struct list_head *head; + + producer = rds_ring->producer; + + head = &rds_ring->free_list; + while (!list_empty(head)) { + + buffer = list_entry(head->next, struct netxen_rx_buffer, list); + + if (!buffer->skb) { + if (netxen_alloc_rx_skb(adapter, rds_ring, buffer)) + break; + } + + count++; + list_del(&buffer->list); + + /* make a rcv descriptor */ + pdesc = &rds_ring->desc_head[producer]; + pdesc->addr_buffer = cpu_to_le64(buffer->dma); + pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); + pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); + + producer = get_next_index(producer, rds_ring->num_desc); + } + + if (count) { + rds_ring->producer = producer; + NXWRIO(adapter, rds_ring->crb_rcv_producer, + (producer-1) & (rds_ring->num_desc-1)); + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + /* + * Write a doorbell msg to tell phanmon of change in + * receive ring producer + * Only for firmware version < 4.0.0 + */ + netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID); + netxen_set_msg_privid(msg); + netxen_set_msg_count(msg, + ((producer - 1) & + (rds_ring->num_desc - 1))); + netxen_set_msg_ctxid(msg, adapter->portnum); + netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid)); + NXWRIO(adapter, DB_NORMALIZE(adapter, + NETXEN_RCV_PRODUCER_OFFSET), msg); + } + } +} + +static void +netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, + struct nx_host_rds_ring *rds_ring) +{ + struct rcv_desc *pdesc; + struct netxen_rx_buffer *buffer; + int producer, count = 0; + struct list_head *head; + + if (!spin_trylock(&rds_ring->lock)) + return; + + producer = rds_ring->producer; + + head = &rds_ring->free_list; + while (!list_empty(head)) { + + buffer = list_entry(head->next, struct netxen_rx_buffer, list); + + if (!buffer->skb) { + if (netxen_alloc_rx_skb(adapter, rds_ring, buffer)) + break; + } + + count++; + list_del(&buffer->list); + + /* make a rcv descriptor */ + pdesc = &rds_ring->desc_head[producer]; + pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); + pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); + pdesc->addr_buffer = cpu_to_le64(buffer->dma); + + producer = get_next_index(producer, rds_ring->num_desc); + } + + if (count) { + rds_ring->producer = producer; + NXWRIO(adapter, rds_ring->crb_rcv_producer, + (producer - 1) & (rds_ring->num_desc - 1)); + } + spin_unlock(&rds_ring->lock); +} + +void netxen_nic_clear_stats(struct netxen_adapter *adapter) +{ + memset(&adapter->stats, 0, sizeof(adapter->stats)); +} + diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c new file mode 100644 index 0000000..8c7fc32 --- /dev/null +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -0,0 +1,3100 @@ +/* + * Copyright (C) 2003 - 2009 NetXen, Inc. + * Copyright (C) 2009 - QLogic Corporation. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, + * MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution + * in the file called "COPYING". + * + */ + +#include +#include +#include +#include "netxen_nic_hw.h" + +#include "netxen_nic.h" + +#include +#include +#include +#include +#include +#include +#include + +MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Intelligent Ethernet Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID); +MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME); + +char netxen_nic_driver_name[] = "netxen_nic"; +static char netxen_nic_driver_string[] = "QLogic/NetXen Network Driver v" + NETXEN_NIC_LINUX_VERSIONID; + +static int port_mode = NETXEN_PORT_MODE_AUTO_NEG; + +/* Default to restricted 1G auto-neg mode */ +static int wol_port_mode = 5; + +static int use_msi = 1; + +static int use_msi_x = 1; + +static int auto_fw_reset = AUTO_FW_RESET_ENABLED; +module_param(auto_fw_reset, int, 0644); +MODULE_PARM_DESC(auto_fw_reset,"Auto firmware reset (0=disabled, 1=enabled"); + +static int __devinit netxen_nic_probe(struct pci_dev *pdev, + const struct pci_device_id *ent); +static void __devexit netxen_nic_remove(struct pci_dev *pdev); +static int netxen_nic_open(struct net_device *netdev); +static int netxen_nic_close(struct net_device *netdev); +static netdev_tx_t netxen_nic_xmit_frame(struct sk_buff *, + struct net_device *); +static void netxen_tx_timeout(struct net_device *netdev); +static void netxen_tx_timeout_task(struct work_struct *work); +static void netxen_fw_poll_work(struct work_struct *work); +static void netxen_schedule_work(struct netxen_adapter *adapter, + work_func_t func, int delay); +static void netxen_cancel_fw_work(struct netxen_adapter *adapter); +static int netxen_nic_poll(struct napi_struct *napi, int budget); +#ifdef CONFIG_NET_POLL_CONTROLLER +static void netxen_nic_poll_controller(struct net_device *netdev); +#endif + +static void netxen_create_sysfs_entries(struct netxen_adapter *adapter); +static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter); +static void netxen_create_diag_entries(struct netxen_adapter *adapter); +static void netxen_remove_diag_entries(struct netxen_adapter *adapter); + +static int nx_dev_request_aer(struct netxen_adapter *adapter); +static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter); +static int netxen_can_start_firmware(struct netxen_adapter *adapter); + +static irqreturn_t netxen_intr(int irq, void *data); +static irqreturn_t netxen_msi_intr(int irq, void *data); +static irqreturn_t netxen_msix_intr(int irq, void *data); + +static void netxen_free_vlan_ip_list(struct netxen_adapter *); +static void netxen_restore_indev_addr(struct net_device *dev, unsigned long); +static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats); +static int netxen_nic_set_mac(struct net_device *netdev, void *p); + +/* PCI Device ID Table */ +#define ENTRY(device) \ + {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \ + .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0} + +static DEFINE_PCI_DEVICE_TABLE(netxen_pci_tbl) = { + ENTRY(PCI_DEVICE_ID_NX2031_10GXSR), + ENTRY(PCI_DEVICE_ID_NX2031_10GCX4), + ENTRY(PCI_DEVICE_ID_NX2031_4GCU), + ENTRY(PCI_DEVICE_ID_NX2031_IMEZ), + ENTRY(PCI_DEVICE_ID_NX2031_HMEZ), + ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT), + ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2), + ENTRY(PCI_DEVICE_ID_NX3031), + {0,} +}; + +MODULE_DEVICE_TABLE(pci, netxen_pci_tbl); + +static uint32_t crb_cmd_producer[4] = { + CRB_CMD_PRODUCER_OFFSET, CRB_CMD_PRODUCER_OFFSET_1, + CRB_CMD_PRODUCER_OFFSET_2, CRB_CMD_PRODUCER_OFFSET_3 +}; + +void +netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, + struct nx_host_tx_ring *tx_ring) +{ + NXWRIO(adapter, tx_ring->crb_cmd_producer, tx_ring->producer); +} + +static uint32_t crb_cmd_consumer[4] = { + CRB_CMD_CONSUMER_OFFSET, CRB_CMD_CONSUMER_OFFSET_1, + CRB_CMD_CONSUMER_OFFSET_2, CRB_CMD_CONSUMER_OFFSET_3 +}; + +static inline void +netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter, + struct nx_host_tx_ring *tx_ring) +{ + NXWRIO(adapter, tx_ring->crb_cmd_consumer, tx_ring->sw_consumer); +} + +static uint32_t msi_tgt_status[8] = { + ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1, + ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3, + ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5, + ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7 +}; + +static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG; + +static inline void netxen_nic_disable_int(struct nx_host_sds_ring *sds_ring) +{ + struct netxen_adapter *adapter = sds_ring->adapter; + + NXWRIO(adapter, sds_ring->crb_intr_mask, 0); +} + +static inline void netxen_nic_enable_int(struct nx_host_sds_ring *sds_ring) +{ + struct netxen_adapter *adapter = sds_ring->adapter; + + NXWRIO(adapter, sds_ring->crb_intr_mask, 0x1); + + if (!NETXEN_IS_MSI_FAMILY(adapter)) + NXWRIO(adapter, adapter->tgt_mask_reg, 0xfbff); +} + +static int +netxen_alloc_sds_rings(struct netxen_recv_context *recv_ctx, int count) +{ + int size = sizeof(struct nx_host_sds_ring) * count; + + recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL); + + return recv_ctx->sds_rings == NULL; +} + +static void +netxen_free_sds_rings(struct netxen_recv_context *recv_ctx) +{ + if (recv_ctx->sds_rings != NULL) + kfree(recv_ctx->sds_rings); + + recv_ctx->sds_rings = NULL; +} + +static int +netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev) +{ + int ring; + struct nx_host_sds_ring *sds_ring; + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + + if (netxen_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) + return -ENOMEM; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + netif_napi_add(netdev, &sds_ring->napi, + netxen_nic_poll, NETXEN_NETDEV_WEIGHT); + } + + return 0; +} + +static void +netxen_napi_del(struct netxen_adapter *adapter) +{ + int ring; + struct nx_host_sds_ring *sds_ring; + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + netif_napi_del(&sds_ring->napi); + } + + netxen_free_sds_rings(&adapter->recv_ctx); +} + +static void +netxen_napi_enable(struct netxen_adapter *adapter) +{ + int ring; + struct nx_host_sds_ring *sds_ring; + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + napi_enable(&sds_ring->napi); + netxen_nic_enable_int(sds_ring); + } +} + +static void +netxen_napi_disable(struct netxen_adapter *adapter) +{ + int ring; + struct nx_host_sds_ring *sds_ring; + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + netxen_nic_disable_int(sds_ring); + napi_synchronize(&sds_ring->napi); + napi_disable(&sds_ring->napi); + } +} + +static int nx_set_dma_mask(struct netxen_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + uint64_t mask, cmask; + + adapter->pci_using_dac = 0; + + mask = DMA_BIT_MASK(32); + cmask = DMA_BIT_MASK(32); + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { +#ifndef CONFIG_IA64 + mask = DMA_BIT_MASK(35); +#endif + } else { + mask = DMA_BIT_MASK(39); + cmask = mask; + } + + if (pci_set_dma_mask(pdev, mask) == 0 && + pci_set_consistent_dma_mask(pdev, cmask) == 0) { + adapter->pci_using_dac = 1; + return 0; + } + + return -EIO; +} + +/* Update addressable range if firmware supports it */ +static int +nx_update_dma_mask(struct netxen_adapter *adapter) +{ + int change, shift, err; + uint64_t mask, old_mask, old_cmask; + struct pci_dev *pdev = adapter->pdev; + + change = 0; + + shift = NXRD32(adapter, CRB_DMA_SHIFT); + if (shift > 32) + return 0; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && (shift > 9)) + change = 1; + else if ((adapter->ahw.revision_id == NX_P2_C1) && (shift <= 4)) + change = 1; + + if (change) { + old_mask = pdev->dma_mask; + old_cmask = pdev->dev.coherent_dma_mask; + + mask = DMA_BIT_MASK(32+shift); + + err = pci_set_dma_mask(pdev, mask); + if (err) + goto err_out; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + + err = pci_set_consistent_dma_mask(pdev, mask); + if (err) + goto err_out; + } + dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift); + } + + return 0; + +err_out: + pci_set_dma_mask(pdev, old_mask); + pci_set_consistent_dma_mask(pdev, old_cmask); + return err; +} + +static int +netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot) +{ + u32 val, timeout; + + if (first_boot == 0x55555555) { + /* This is the first boot after power up */ + NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC); + + if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return 0; + + /* PCI bus master workaround */ + first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4)); + if (!(first_boot & 0x4)) { + first_boot |= 0x4; + NXWR32(adapter, NETXEN_PCIE_REG(0x4), first_boot); + NXRD32(adapter, NETXEN_PCIE_REG(0x4)); + } + + /* This is the first boot after power up */ + first_boot = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET); + if (first_boot != 0x80000f) { + /* clear the register for future unloads/loads */ + NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), 0); + return -EIO; + } + + /* Start P2 boot loader */ + val = NXRD32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE); + NXWR32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE, val | 0x1); + timeout = 0; + do { + msleep(1); + val = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc)); + + if (++timeout > 5000) + return -EIO; + + } while (val == NETXEN_BDINFO_MAGIC); + } + return 0; +} + +static void netxen_set_port_mode(struct netxen_adapter *adapter) +{ + u32 val, data; + + val = adapter->ahw.board_type; + if ((val == NETXEN_BRDTYPE_P3_HMEZ) || + (val == NETXEN_BRDTYPE_P3_XG_LOM)) { + if (port_mode == NETXEN_PORT_MODE_802_3_AP) { + data = NETXEN_PORT_MODE_802_3_AP; + NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); + } else if (port_mode == NETXEN_PORT_MODE_XG) { + data = NETXEN_PORT_MODE_XG; + NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); + } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_1G) { + data = NETXEN_PORT_MODE_AUTO_NEG_1G; + NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); + } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_XG) { + data = NETXEN_PORT_MODE_AUTO_NEG_XG; + NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); + } else { + data = NETXEN_PORT_MODE_AUTO_NEG; + NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); + } + + if ((wol_port_mode != NETXEN_PORT_MODE_802_3_AP) && + (wol_port_mode != NETXEN_PORT_MODE_XG) && + (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_1G) && + (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_XG)) { + wol_port_mode = NETXEN_PORT_MODE_AUTO_NEG; + } + NXWR32(adapter, NETXEN_WOL_PORT_MODE, wol_port_mode); + } +} + +static void netxen_set_msix_bit(struct pci_dev *pdev, int enable) +{ + u32 control; + int pos; + + pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); + if (pos) { + pci_read_config_dword(pdev, pos, &control); + if (enable) + control |= PCI_MSIX_FLAGS_ENABLE; + else + control = 0; + pci_write_config_dword(pdev, pos, control); + } +} + +static void netxen_init_msix_entries(struct netxen_adapter *adapter, int count) +{ + int i; + + for (i = 0; i < count; i++) + adapter->msix_entries[i].entry = i; +} + +static int +netxen_read_mac_addr(struct netxen_adapter *adapter) +{ + int i; + unsigned char *p; + u64 mac_addr; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0) + return -EIO; + } else { + if (netxen_get_flash_mac_addr(adapter, &mac_addr) != 0) + return -EIO; + } + + p = (unsigned char *)&mac_addr; + for (i = 0; i < 6; i++) + netdev->dev_addr[i] = *(p + 5 - i); + + memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); + memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len); + + /* set station address */ + + if (!is_valid_ether_addr(netdev->perm_addr)) + dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr); + + return 0; +} + +static int netxen_nic_set_mac(struct net_device *netdev, void *p) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + if (netif_running(netdev)) { + netif_device_detach(netdev); + netxen_napi_disable(adapter); + } + + memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len); + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + adapter->macaddr_set(adapter, addr->sa_data); + + if (netif_running(netdev)) { + netif_device_attach(netdev); + netxen_napi_enable(adapter); + } + return 0; +} + +static void netxen_set_multicast_list(struct net_device *dev) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + + adapter->set_multi(dev); +} + +static u32 netxen_fix_features(struct net_device *dev, u32 features) +{ + if (!(features & NETIF_F_RXCSUM)) { + netdev_info(dev, "disabling LRO as RXCSUM is off\n"); + + features &= ~NETIF_F_LRO; + } + + return features; +} + +static int netxen_set_features(struct net_device *dev, u32 features) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + int hw_lro; + + if (!((dev->features ^ features) & NETIF_F_LRO)) + return 0; + + hw_lro = (features & NETIF_F_LRO) ? NETXEN_NIC_LRO_ENABLED + : NETXEN_NIC_LRO_DISABLED; + + if (netxen_config_hw_lro(adapter, hw_lro)) + return -EIO; + + if (!(features & NETIF_F_LRO) && netxen_send_lro_cleanup(adapter)) + return -EIO; + + return 0; +} + +static const struct net_device_ops netxen_netdev_ops = { + .ndo_open = netxen_nic_open, + .ndo_stop = netxen_nic_close, + .ndo_start_xmit = netxen_nic_xmit_frame, + .ndo_get_stats64 = netxen_nic_get_stats, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_multicast_list = netxen_set_multicast_list, + .ndo_set_mac_address = netxen_nic_set_mac, + .ndo_change_mtu = netxen_nic_change_mtu, + .ndo_tx_timeout = netxen_tx_timeout, + .ndo_fix_features = netxen_fix_features, + .ndo_set_features = netxen_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = netxen_nic_poll_controller, +#endif +}; + +static void +netxen_setup_intr(struct netxen_adapter *adapter) +{ + struct netxen_legacy_intr_set *legacy_intrp; + struct pci_dev *pdev = adapter->pdev; + int err, num_msix; + + if (adapter->rss_supported) { + num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ? + MSIX_ENTRIES_PER_ADAPTER : 2; + } else + num_msix = 1; + + adapter->max_sds_rings = 1; + + adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED); + + if (adapter->ahw.revision_id >= NX_P3_B0) + legacy_intrp = &legacy_intr[adapter->ahw.pci_func]; + else + legacy_intrp = &legacy_intr[0]; + + adapter->int_vec_bit = legacy_intrp->int_vec_bit; + adapter->tgt_status_reg = netxen_get_ioaddr(adapter, + legacy_intrp->tgt_status_reg); + adapter->tgt_mask_reg = netxen_get_ioaddr(adapter, + legacy_intrp->tgt_mask_reg); + adapter->pci_int_reg = netxen_get_ioaddr(adapter, + legacy_intrp->pci_int_reg); + adapter->isr_int_vec = netxen_get_ioaddr(adapter, ISR_INT_VECTOR); + + if (adapter->ahw.revision_id >= NX_P3_B1) + adapter->crb_int_state_reg = netxen_get_ioaddr(adapter, + ISR_INT_STATE_REG); + else + adapter->crb_int_state_reg = netxen_get_ioaddr(adapter, + CRB_INT_VECTOR); + + netxen_set_msix_bit(pdev, 0); + + if (adapter->msix_supported) { + + netxen_init_msix_entries(adapter, num_msix); + err = pci_enable_msix(pdev, adapter->msix_entries, num_msix); + if (err == 0) { + adapter->flags |= NETXEN_NIC_MSIX_ENABLED; + netxen_set_msix_bit(pdev, 1); + + if (adapter->rss_supported) + adapter->max_sds_rings = num_msix; + + dev_info(&pdev->dev, "using msi-x interrupts\n"); + return; + } + + if (err > 0) + pci_disable_msix(pdev); + + /* fall through for msi */ + } + + if (use_msi && !pci_enable_msi(pdev)) { + adapter->flags |= NETXEN_NIC_MSI_ENABLED; + adapter->tgt_status_reg = netxen_get_ioaddr(adapter, + msi_tgt_status[adapter->ahw.pci_func]); + dev_info(&pdev->dev, "using msi interrupts\n"); + adapter->msix_entries[0].vector = pdev->irq; + return; + } + + dev_info(&pdev->dev, "using legacy interrupts\n"); + adapter->msix_entries[0].vector = pdev->irq; +} + +static void +netxen_teardown_intr(struct netxen_adapter *adapter) +{ + if (adapter->flags & NETXEN_NIC_MSIX_ENABLED) + pci_disable_msix(adapter->pdev); + if (adapter->flags & NETXEN_NIC_MSI_ENABLED) + pci_disable_msi(adapter->pdev); +} + +static void +netxen_cleanup_pci_map(struct netxen_adapter *adapter) +{ + if (adapter->ahw.db_base != NULL) + iounmap(adapter->ahw.db_base); + if (adapter->ahw.pci_base0 != NULL) + iounmap(adapter->ahw.pci_base0); + if (adapter->ahw.pci_base1 != NULL) + iounmap(adapter->ahw.pci_base1); + if (adapter->ahw.pci_base2 != NULL) + iounmap(adapter->ahw.pci_base2); +} + +static int +netxen_setup_pci_map(struct netxen_adapter *adapter) +{ + void __iomem *db_ptr = NULL; + + resource_size_t mem_base, db_base; + unsigned long mem_len, db_len = 0; + + struct pci_dev *pdev = adapter->pdev; + int pci_func = adapter->ahw.pci_func; + struct netxen_hardware_context *ahw = &adapter->ahw; + + int err = 0; + + /* + * Set the CRB window to invalid. If any register in window 0 is + * accessed it should set the window to 0 and then reset it to 1. + */ + adapter->ahw.crb_win = -1; + adapter->ahw.ocm_win = -1; + + /* remap phys address */ + mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ + mem_len = pci_resource_len(pdev, 0); + + /* 128 Meg of memory */ + if (mem_len == NETXEN_PCI_128MB_SIZE) { + + ahw->pci_base0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE); + ahw->pci_base1 = ioremap(mem_base + SECOND_PAGE_GROUP_START, + SECOND_PAGE_GROUP_SIZE); + ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START, + THIRD_PAGE_GROUP_SIZE); + if (ahw->pci_base0 == NULL || ahw->pci_base1 == NULL || + ahw->pci_base2 == NULL) { + dev_err(&pdev->dev, "failed to map PCI bar 0\n"); + err = -EIO; + goto err_out; + } + + ahw->pci_len0 = FIRST_PAGE_GROUP_SIZE; + + } else if (mem_len == NETXEN_PCI_32MB_SIZE) { + + ahw->pci_base1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE); + ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START - + SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE); + if (ahw->pci_base1 == NULL || ahw->pci_base2 == NULL) { + dev_err(&pdev->dev, "failed to map PCI bar 0\n"); + err = -EIO; + goto err_out; + } + + } else if (mem_len == NETXEN_PCI_2MB_SIZE) { + + ahw->pci_base0 = pci_ioremap_bar(pdev, 0); + if (ahw->pci_base0 == NULL) { + dev_err(&pdev->dev, "failed to map PCI bar 0\n"); + return -EIO; + } + ahw->pci_len0 = mem_len; + } else { + return -EIO; + } + + netxen_setup_hwops(adapter); + + dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20)); + + if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) { + adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter, + NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func))); + + } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter, + NETXEN_PCIX_PS_REG(PCIE_MN_WINDOW_REG(pci_func))); + } + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + goto skip_doorbell; + + db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */ + db_len = pci_resource_len(pdev, 4); + + if (db_len == 0) { + printk(KERN_ERR "%s: doorbell is disabled\n", + netxen_nic_driver_name); + err = -EIO; + goto err_out; + } + + db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES); + if (!db_ptr) { + printk(KERN_ERR "%s: Failed to allocate doorbell map.", + netxen_nic_driver_name); + err = -EIO; + goto err_out; + } + +skip_doorbell: + adapter->ahw.db_base = db_ptr; + adapter->ahw.db_len = db_len; + return 0; + +err_out: + netxen_cleanup_pci_map(adapter); + return err; +} + +static void +netxen_check_options(struct netxen_adapter *adapter) +{ + u32 fw_major, fw_minor, fw_build; + char brd_name[NETXEN_MAX_SHORT_NAME]; + char serial_num[32]; + int i, offset, val; + int *ptr32; + struct pci_dev *pdev = adapter->pdev; + + adapter->driver_mismatch = 0; + + ptr32 = (int *)&serial_num; + offset = NX_FW_SERIAL_NUM_OFFSET; + for (i = 0; i < 8; i++) { + if (netxen_rom_fast_read(adapter, offset, &val) == -1) { + dev_err(&pdev->dev, "error reading board info\n"); + adapter->driver_mismatch = 1; + return; + } + ptr32[i] = cpu_to_le32(val); + offset += sizeof(u32); + } + + fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); + fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR); + fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB); + + adapter->fw_version = NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build); + + if (adapter->portnum == 0) { + get_brd_name_by_type(adapter->ahw.board_type, brd_name); + + pr_info("%s: %s Board S/N %s Chip rev 0x%x\n", + module_name(THIS_MODULE), + brd_name, serial_num, adapter->ahw.revision_id); + } + + if (adapter->fw_version < NETXEN_VERSION_CODE(3, 4, 216)) { + adapter->driver_mismatch = 1; + dev_warn(&pdev->dev, "firmware version %d.%d.%d unsupported\n", + fw_major, fw_minor, fw_build); + return; + } + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + i = NXRD32(adapter, NETXEN_SRE_MISC); + adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0; + } + + dev_info(&pdev->dev, "firmware v%d.%d.%d [%s]\n", + fw_major, fw_minor, fw_build, + adapter->ahw.cut_through ? "cut-through" : "legacy"); + + if (adapter->fw_version >= NETXEN_VERSION_CODE(4, 0, 222)) + adapter->capabilities = NXRD32(adapter, CRB_FW_CAPABILITIES_1); + + if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { + adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G; + adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; + } else if (adapter->ahw.port_type == NETXEN_NIC_GBE) { + adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G; + adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; + } + + adapter->msix_supported = 0; + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + adapter->msix_supported = !!use_msi_x; + adapter->rss_supported = !!use_msi_x; + } else { + u32 flashed_ver = 0; + netxen_rom_fast_read(adapter, + NX_FW_VERSION_OFFSET, (int *)&flashed_ver); + flashed_ver = NETXEN_DECODE_VERSION(flashed_ver); + + if (flashed_ver >= NETXEN_VERSION_CODE(3, 4, 336)) { + switch (adapter->ahw.board_type) { + case NETXEN_BRDTYPE_P2_SB31_10G: + case NETXEN_BRDTYPE_P2_SB31_10G_CX4: + adapter->msix_supported = !!use_msi_x; + adapter->rss_supported = !!use_msi_x; + break; + default: + break; + } + } + } + + adapter->num_txd = MAX_CMD_DESCRIPTORS; + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + adapter->num_lro_rxd = MAX_LRO_RCV_DESCRIPTORS; + adapter->max_rds_rings = 3; + } else { + adapter->num_lro_rxd = 0; + adapter->max_rds_rings = 2; + } +} + +static int +netxen_start_firmware(struct netxen_adapter *adapter) +{ + int val, err, first_boot; + struct pci_dev *pdev = adapter->pdev; + + /* required for NX2031 dummy dma */ + err = nx_set_dma_mask(adapter); + if (err) + return err; + + if (!netxen_can_start_firmware(adapter)) + goto wait_init; + + first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc)); + + err = netxen_check_hw_init(adapter, first_boot); + if (err) { + dev_err(&pdev->dev, "error in init HW init sequence\n"); + return err; + } + + netxen_request_firmware(adapter); + + err = netxen_need_fw_reset(adapter); + if (err < 0) + goto err_out; + if (err == 0) + goto wait_init; + + if (first_boot != 0x55555555) { + NXWR32(adapter, CRB_CMDPEG_STATE, 0); + netxen_pinit_from_rom(adapter); + msleep(1); + } + + NXWR32(adapter, CRB_DMA_SHIFT, 0x55555555); + NXWR32(adapter, NETXEN_PEG_HALT_STATUS1, 0); + NXWR32(adapter, NETXEN_PEG_HALT_STATUS2, 0); + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + netxen_set_port_mode(adapter); + + err = netxen_load_firmware(adapter); + if (err) + goto err_out; + + netxen_release_firmware(adapter); + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + + /* Initialize multicast addr pool owners */ + val = 0x7654; + if (adapter->ahw.port_type == NETXEN_NIC_XGBE) + val |= 0x0f000000; + NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val); + + } + + err = netxen_init_dummy_dma(adapter); + if (err) + goto err_out; + + /* + * Tell the hardware our version number. + */ + val = (_NETXEN_NIC_LINUX_MAJOR << 16) + | ((_NETXEN_NIC_LINUX_MINOR << 8)) + | (_NETXEN_NIC_LINUX_SUBVERSION); + NXWR32(adapter, CRB_DRIVER_VERSION, val); + +wait_init: + /* Handshake with the card before we register the devices. */ + err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); + if (err) { + netxen_free_dummy_dma(adapter); + goto err_out; + } + + NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_READY); + + nx_update_dma_mask(adapter); + + netxen_check_options(adapter); + + adapter->need_fw_reset = 0; + + /* fall through and release firmware */ + +err_out: + netxen_release_firmware(adapter); + return err; +} + +static int +netxen_nic_request_irq(struct netxen_adapter *adapter) +{ + irq_handler_t handler; + struct nx_host_sds_ring *sds_ring; + int err, ring; + + unsigned long flags = 0; + struct net_device *netdev = adapter->netdev; + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + + if (adapter->flags & NETXEN_NIC_MSIX_ENABLED) + handler = netxen_msix_intr; + else if (adapter->flags & NETXEN_NIC_MSI_ENABLED) + handler = netxen_msi_intr; + else { + flags |= IRQF_SHARED; + handler = netxen_intr; + } + adapter->irq = netdev->irq; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + sprintf(sds_ring->name, "%s[%d]", netdev->name, ring); + err = request_irq(sds_ring->irq, handler, + flags, sds_ring->name, sds_ring); + if (err) + return err; + } + + return 0; +} + +static void +netxen_nic_free_irq(struct netxen_adapter *adapter) +{ + int ring; + struct nx_host_sds_ring *sds_ring; + + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + free_irq(sds_ring->irq, sds_ring); + } +} + +static void +netxen_nic_init_coalesce_defaults(struct netxen_adapter *adapter) +{ + adapter->coal.flags = NETXEN_NIC_INTR_DEFAULT; + adapter->coal.normal.data.rx_time_us = + NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US; + adapter->coal.normal.data.rx_packets = + NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS; + adapter->coal.normal.data.tx_time_us = + NETXEN_DEFAULT_INTR_COALESCE_TX_TIME_US; + adapter->coal.normal.data.tx_packets = + NETXEN_DEFAULT_INTR_COALESCE_TX_PACKETS; +} + +/* with rtnl_lock */ +static int +__netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev) +{ + int err; + + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + return -EIO; + + err = adapter->init_port(adapter, adapter->physical_port); + if (err) { + printk(KERN_ERR "%s: Failed to initialize port %d\n", + netxen_nic_driver_name, adapter->portnum); + return err; + } + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + adapter->macaddr_set(adapter, adapter->mac_addr); + + adapter->set_multi(netdev); + adapter->set_mtu(adapter, netdev->mtu); + + adapter->ahw.linkup = 0; + + if (adapter->max_sds_rings > 1) + netxen_config_rss(adapter, 1); + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + netxen_config_intr_coalesce(adapter); + + if (netdev->features & NETIF_F_LRO) + netxen_config_hw_lro(adapter, NETXEN_NIC_LRO_ENABLED); + + netxen_napi_enable(adapter); + + if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION) + netxen_linkevent_request(adapter, 1); + else + netxen_nic_set_link_parameters(adapter); + + set_bit(__NX_DEV_UP, &adapter->state); + return 0; +} + +/* Usage: During resume and firmware recovery module.*/ + +static inline int +netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev) +{ + int err = 0; + + rtnl_lock(); + if (netif_running(netdev)) + err = __netxen_nic_up(adapter, netdev); + rtnl_unlock(); + + return err; +} + +/* with rtnl_lock */ +static void +__netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) +{ + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + return; + + if (!test_and_clear_bit(__NX_DEV_UP, &adapter->state)) + return; + + smp_mb(); + spin_lock(&adapter->tx_clean_lock); + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION) + netxen_linkevent_request(adapter, 0); + + if (adapter->stop_port) + adapter->stop_port(adapter); + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + netxen_p3_free_mac_list(adapter); + + adapter->set_promisc(adapter, NETXEN_NIU_NON_PROMISC_MODE); + + netxen_napi_disable(adapter); + + netxen_release_tx_buffers(adapter); + spin_unlock(&adapter->tx_clean_lock); +} + +/* Usage: During suspend and firmware recovery module */ + +static inline void +netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) +{ + rtnl_lock(); + if (netif_running(netdev)) + __netxen_nic_down(adapter, netdev); + rtnl_unlock(); + +} + +static int +netxen_nic_attach(struct netxen_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err, ring; + struct nx_host_rds_ring *rds_ring; + struct nx_host_tx_ring *tx_ring; + + if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) + return 0; + + err = netxen_init_firmware(adapter); + if (err) + return err; + + err = netxen_napi_add(adapter, netdev); + if (err) + return err; + + err = netxen_alloc_sw_resources(adapter); + if (err) { + printk(KERN_ERR "%s: Error in setting sw resources\n", + netdev->name); + return err; + } + + err = netxen_alloc_hw_resources(adapter); + if (err) { + printk(KERN_ERR "%s: Error in setting hw resources\n", + netdev->name); + goto err_out_free_sw; + } + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + tx_ring = adapter->tx_ring; + tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter, + crb_cmd_producer[adapter->portnum]); + tx_ring->crb_cmd_consumer = netxen_get_ioaddr(adapter, + crb_cmd_consumer[adapter->portnum]); + + tx_ring->producer = 0; + tx_ring->sw_consumer = 0; + + netxen_nic_update_cmd_producer(adapter, tx_ring); + netxen_nic_update_cmd_consumer(adapter, tx_ring); + } + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &adapter->recv_ctx.rds_rings[ring]; + netxen_post_rx_buffers(adapter, ring, rds_ring); + } + + err = netxen_nic_request_irq(adapter); + if (err) { + dev_err(&pdev->dev, "%s: failed to setup interrupt\n", + netdev->name); + goto err_out_free_rxbuf; + } + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + netxen_nic_init_coalesce_defaults(adapter); + + netxen_create_sysfs_entries(adapter); + + adapter->is_up = NETXEN_ADAPTER_UP_MAGIC; + return 0; + +err_out_free_rxbuf: + netxen_release_rx_buffers(adapter); + netxen_free_hw_resources(adapter); +err_out_free_sw: + netxen_free_sw_resources(adapter); + return err; +} + +static void +netxen_nic_detach(struct netxen_adapter *adapter) +{ + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + return; + + netxen_remove_sysfs_entries(adapter); + + netxen_free_hw_resources(adapter); + netxen_release_rx_buffers(adapter); + netxen_nic_free_irq(adapter); + netxen_napi_del(adapter); + netxen_free_sw_resources(adapter); + + adapter->is_up = 0; +} + +int +netxen_nic_reset_context(struct netxen_adapter *adapter) +{ + int err = 0; + struct net_device *netdev = adapter->netdev; + + if (test_and_set_bit(__NX_RESETTING, &adapter->state)) + return -EBUSY; + + if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) { + + netif_device_detach(netdev); + + if (netif_running(netdev)) + __netxen_nic_down(adapter, netdev); + + netxen_nic_detach(adapter); + + if (netif_running(netdev)) { + err = netxen_nic_attach(adapter); + if (!err) + err = __netxen_nic_up(adapter, netdev); + + if (err) + goto done; + } + + netif_device_attach(netdev); + } + +done: + clear_bit(__NX_RESETTING, &adapter->state); + return err; +} + +static int +netxen_setup_netdev(struct netxen_adapter *adapter, + struct net_device *netdev) +{ + int err = 0; + struct pci_dev *pdev = adapter->pdev; + + adapter->mc_enabled = 0; + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + adapter->max_mc_count = 38; + else + adapter->max_mc_count = 16; + + netdev->netdev_ops = &netxen_netdev_ops; + netdev->watchdog_timeo = 5*HZ; + + netxen_nic_change_mtu(netdev, netdev->mtu); + + SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops); + + netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | + NETIF_F_RXCSUM; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + netdev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; + + netdev->vlan_features |= netdev->hw_features; + + if (adapter->pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + if (adapter->capabilities & NX_FW_CAPABILITY_FVLANTX) + netdev->hw_features |= NETIF_F_HW_VLAN_TX; + + if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO) + netdev->hw_features |= NETIF_F_LRO; + + netdev->features |= netdev->hw_features; + + netdev->irq = adapter->msix_entries[0].vector; + + INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task); + + if (netxen_read_mac_addr(adapter)) + dev_warn(&pdev->dev, "failed to read mac addr\n"); + + netif_carrier_off(netdev); + + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "failed to register net device\n"); + return err; + } + + return 0; +} + +#ifdef CONFIG_PCIEAER +static void netxen_mask_aer_correctable(struct netxen_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct pci_dev *root = pdev->bus->self; + u32 aer_pos; + + if (adapter->ahw.board_type != NETXEN_BRDTYPE_P3_4_GB_MM && + adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP) + return; + + if (root->pcie_type != PCI_EXP_TYPE_ROOT_PORT) + return; + + aer_pos = pci_find_ext_capability(root, PCI_EXT_CAP_ID_ERR); + if (!aer_pos) + return; + + pci_write_config_dword(root, aer_pos + PCI_ERR_COR_MASK, 0xffff); +} +#endif + +static int __devinit +netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev = NULL; + struct netxen_adapter *adapter = NULL; + int i = 0, err; + int pci_func_id = PCI_FUNC(pdev->devfn); + uint8_t revision_id; + u32 val; + + if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) { + pr_warning("%s: chip revisions between 0x%x-0x%x " + "will not be enabled.\n", + module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1); + return -ENODEV; + } + + if ((err = pci_enable_device(pdev))) + return err; + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + err = -ENODEV; + goto err_out_disable_pdev; + } + + if ((err = pci_request_regions(pdev, netxen_nic_driver_name))) + goto err_out_disable_pdev; + + if (NX_IS_REVISION_P3(pdev->revision)) + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + + netdev = alloc_etherdev(sizeof(struct netxen_adapter)); + if(!netdev) { + dev_err(&pdev->dev, "failed to allocate net_device\n"); + err = -ENOMEM; + goto err_out_free_res; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->ahw.pci_func = pci_func_id; + + revision_id = pdev->revision; + adapter->ahw.revision_id = revision_id; + + rwlock_init(&adapter->ahw.crb_lock); + spin_lock_init(&adapter->ahw.mem_lock); + + spin_lock_init(&adapter->tx_clean_lock); + INIT_LIST_HEAD(&adapter->mac_list); + INIT_LIST_HEAD(&adapter->vlan_ip_list); + + err = netxen_setup_pci_map(adapter); + if (err) + goto err_out_free_netdev; + + /* This will be reset for mezz cards */ + adapter->portnum = pci_func_id; + + err = netxen_nic_get_board_info(adapter); + if (err) { + dev_err(&pdev->dev, "Error getting board config info.\n"); + goto err_out_iounmap; + } + +#ifdef CONFIG_PCIEAER + netxen_mask_aer_correctable(adapter); +#endif + + /* Mezz cards have PCI function 0,2,3 enabled */ + switch (adapter->ahw.board_type) { + case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: + case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: + if (pci_func_id >= 2) + adapter->portnum = pci_func_id - 2; + break; + default: + break; + } + + err = netxen_check_flash_fw_compatibility(adapter); + if (err) + goto err_out_iounmap; + + if (adapter->portnum == 0) { + val = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); + if (val != 0xffffffff && val != 0) { + NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0); + adapter->need_fw_reset = 1; + } + } + + err = netxen_start_firmware(adapter); + if (err) + goto err_out_decr_ref; + + /* + * See if the firmware gave us a virtual-physical port mapping. + */ + adapter->physical_port = adapter->portnum; + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + i = NXRD32(adapter, CRB_V2P(adapter->portnum)); + if (i != 0x55555555) + adapter->physical_port = i; + } + + netxen_nic_clear_stats(adapter); + + netxen_setup_intr(adapter); + + err = netxen_setup_netdev(adapter, netdev); + if (err) + goto err_out_disable_msi; + + pci_set_drvdata(pdev, adapter); + + netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); + + switch (adapter->ahw.port_type) { + case NETXEN_NIC_GBE: + dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n", + adapter->netdev->name); + break; + case NETXEN_NIC_XGBE: + dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n", + adapter->netdev->name); + break; + } + + netxen_create_diag_entries(adapter); + + return 0; + +err_out_disable_msi: + netxen_teardown_intr(adapter); + + netxen_free_dummy_dma(adapter); + +err_out_decr_ref: + nx_decr_dev_ref_cnt(adapter); + +err_out_iounmap: + netxen_cleanup_pci_map(adapter); + +err_out_free_netdev: + free_netdev(netdev); + +err_out_free_res: + pci_release_regions(pdev); + +err_out_disable_pdev: + pci_set_drvdata(pdev, NULL); + pci_disable_device(pdev); + return err; +} + +static void __devexit netxen_nic_remove(struct pci_dev *pdev) +{ + struct netxen_adapter *adapter; + struct net_device *netdev; + + adapter = pci_get_drvdata(pdev); + if (adapter == NULL) + return; + + netdev = adapter->netdev; + + netxen_cancel_fw_work(adapter); + + unregister_netdev(netdev); + + cancel_work_sync(&adapter->tx_timeout_task); + + netxen_free_vlan_ip_list(adapter); + netxen_nic_detach(adapter); + + nx_decr_dev_ref_cnt(adapter); + + if (adapter->portnum == 0) + netxen_free_dummy_dma(adapter); + + clear_bit(__NX_RESETTING, &adapter->state); + + netxen_teardown_intr(adapter); + + netxen_remove_diag_entries(adapter); + + netxen_cleanup_pci_map(adapter); + + netxen_release_firmware(adapter); + + if (NX_IS_REVISION_P3(pdev->revision)) + pci_disable_pcie_error_reporting(pdev); + + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + free_netdev(netdev); +} + +static void netxen_nic_detach_func(struct netxen_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + netif_device_detach(netdev); + + netxen_cancel_fw_work(adapter); + + if (netif_running(netdev)) + netxen_nic_down(adapter, netdev); + + cancel_work_sync(&adapter->tx_timeout_task); + + netxen_nic_detach(adapter); + + if (adapter->portnum == 0) + netxen_free_dummy_dma(adapter); + + nx_decr_dev_ref_cnt(adapter); + + clear_bit(__NX_RESETTING, &adapter->state); +} + +static int netxen_nic_attach_func(struct pci_dev *pdev) +{ + struct netxen_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + int err; + + err = pci_enable_device(pdev); + if (err) + return err; + + pci_set_power_state(pdev, PCI_D0); + pci_set_master(pdev); + pci_restore_state(pdev); + + adapter->ahw.crb_win = -1; + adapter->ahw.ocm_win = -1; + + err = netxen_start_firmware(adapter); + if (err) { + dev_err(&pdev->dev, "failed to start firmware\n"); + return err; + } + + if (netif_running(netdev)) { + err = netxen_nic_attach(adapter); + if (err) + goto err_out; + + err = netxen_nic_up(adapter, netdev); + if (err) + goto err_out_detach; + + netxen_restore_indev_addr(netdev, NETDEV_UP); + } + + netif_device_attach(netdev); + netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); + return 0; + +err_out_detach: + netxen_nic_detach(adapter); +err_out: + nx_decr_dev_ref_cnt(adapter); + return err; +} + +static pci_ers_result_t netxen_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct netxen_adapter *adapter = pci_get_drvdata(pdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (nx_dev_request_aer(adapter)) + return PCI_ERS_RESULT_RECOVERED; + + netxen_nic_detach_func(adapter); + + pci_disable_device(pdev); + + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t netxen_io_slot_reset(struct pci_dev *pdev) +{ + int err = 0; + + err = netxen_nic_attach_func(pdev); + + return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; +} + +static void netxen_io_resume(struct pci_dev *pdev) +{ + pci_cleanup_aer_uncorrect_error_status(pdev); +} + +static void netxen_nic_shutdown(struct pci_dev *pdev) +{ + struct netxen_adapter *adapter = pci_get_drvdata(pdev); + + netxen_nic_detach_func(adapter); + + if (pci_save_state(pdev)) + return; + + if (netxen_nic_wol_supported(adapter)) { + pci_enable_wake(pdev, PCI_D3cold, 1); + pci_enable_wake(pdev, PCI_D3hot, 1); + } + + pci_disable_device(pdev); +} + +#ifdef CONFIG_PM +static int +netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct netxen_adapter *adapter = pci_get_drvdata(pdev); + int retval; + + netxen_nic_detach_func(adapter); + + retval = pci_save_state(pdev); + if (retval) + return retval; + + if (netxen_nic_wol_supported(adapter)) { + pci_enable_wake(pdev, PCI_D3cold, 1); + pci_enable_wake(pdev, PCI_D3hot, 1); + } + + pci_disable_device(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +static int +netxen_nic_resume(struct pci_dev *pdev) +{ + return netxen_nic_attach_func(pdev); +} +#endif + +static int netxen_nic_open(struct net_device *netdev) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + int err = 0; + + if (adapter->driver_mismatch) + return -EIO; + + err = netxen_nic_attach(adapter); + if (err) + return err; + + err = __netxen_nic_up(adapter, netdev); + if (err) + goto err_out; + + netif_start_queue(netdev); + + return 0; + +err_out: + netxen_nic_detach(adapter); + return err; +} + +/* + * netxen_nic_close - Disables a network interface entry point + */ +static int netxen_nic_close(struct net_device *netdev) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + + __netxen_nic_down(adapter, netdev); + return 0; +} + +static void +netxen_tso_check(struct net_device *netdev, + struct nx_host_tx_ring *tx_ring, + struct cmd_desc_type0 *first_desc, + struct sk_buff *skb) +{ + u8 opcode = TX_ETHER_PKT; + __be16 protocol = skb->protocol; + u16 flags = 0, vid = 0; + u32 producer; + int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0; + struct cmd_desc_type0 *hwdesc; + struct vlan_ethhdr *vh; + + if (protocol == cpu_to_be16(ETH_P_8021Q)) { + + vh = (struct vlan_ethhdr *)skb->data; + protocol = vh->h_vlan_encapsulated_proto; + flags = FLAGS_VLAN_TAGGED; + + } else if (vlan_tx_tag_present(skb)) { + + flags = FLAGS_VLAN_OOB; + vid = vlan_tx_tag_get(skb); + netxen_set_tx_vlan_tci(first_desc, vid); + vlan_oob = 1; + } + + if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && + skb_shinfo(skb)->gso_size > 0) { + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + + first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); + first_desc->total_hdr_length = hdr_len; + if (vlan_oob) { + first_desc->total_hdr_length += VLAN_HLEN; + first_desc->tcp_hdr_offset = VLAN_HLEN; + first_desc->ip_hdr_offset = VLAN_HLEN; + /* Only in case of TSO on vlan device */ + flags |= FLAGS_VLAN_TAGGED; + } + + opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ? + TX_TCP_LSO6 : TX_TCP_LSO; + tso = 1; + + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + u8 l4proto; + + if (protocol == cpu_to_be16(ETH_P_IP)) { + l4proto = ip_hdr(skb)->protocol; + + if (l4proto == IPPROTO_TCP) + opcode = TX_TCP_PKT; + else if(l4proto == IPPROTO_UDP) + opcode = TX_UDP_PKT; + } else if (protocol == cpu_to_be16(ETH_P_IPV6)) { + l4proto = ipv6_hdr(skb)->nexthdr; + + if (l4proto == IPPROTO_TCP) + opcode = TX_TCPV6_PKT; + else if(l4proto == IPPROTO_UDP) + opcode = TX_UDPV6_PKT; + } + } + + first_desc->tcp_hdr_offset += skb_transport_offset(skb); + first_desc->ip_hdr_offset += skb_network_offset(skb); + netxen_set_tx_flags_opcode(first_desc, flags, opcode); + + if (!tso) + return; + + /* For LSO, we need to copy the MAC/IP/TCP headers into + * the descriptor ring + */ + producer = tx_ring->producer; + copied = 0; + offset = 2; + + if (vlan_oob) { + /* Create a TSO vlan header template for firmware */ + + hwdesc = &tx_ring->desc_head[producer]; + tx_ring->cmd_buf_arr[producer].skb = NULL; + + copy_len = min((int)sizeof(struct cmd_desc_type0) - offset, + hdr_len + VLAN_HLEN); + + vh = (struct vlan_ethhdr *)((char *)hwdesc + 2); + skb_copy_from_linear_data(skb, vh, 12); + vh->h_vlan_proto = htons(ETH_P_8021Q); + vh->h_vlan_TCI = htons(vid); + skb_copy_from_linear_data_offset(skb, 12, + (char *)vh + 16, copy_len - 16); + + copied = copy_len - VLAN_HLEN; + offset = 0; + + producer = get_next_index(producer, tx_ring->num_desc); + } + + while (copied < hdr_len) { + + copy_len = min((int)sizeof(struct cmd_desc_type0) - offset, + (hdr_len - copied)); + + hwdesc = &tx_ring->desc_head[producer]; + tx_ring->cmd_buf_arr[producer].skb = NULL; + + skb_copy_from_linear_data_offset(skb, copied, + (char *)hwdesc + offset, copy_len); + + copied += copy_len; + offset = 0; + + producer = get_next_index(producer, tx_ring->num_desc); + } + + tx_ring->producer = producer; + barrier(); +} + +static int +netxen_map_tx_skb(struct pci_dev *pdev, + struct sk_buff *skb, struct netxen_cmd_buffer *pbuf) +{ + struct netxen_skb_frag *nf; + struct skb_frag_struct *frag; + int i, nr_frags; + dma_addr_t map; + + nr_frags = skb_shinfo(skb)->nr_frags; + nf = &pbuf->frag_array[0]; + + map = pci_map_single(pdev, skb->data, + skb_headlen(skb), PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(pdev, map)) + goto out_err; + + nf->dma = map; + nf->length = skb_headlen(skb); + + for (i = 0; i < nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + nf = &pbuf->frag_array[i+1]; + + map = pci_map_page(pdev, frag->page, frag->page_offset, + frag->size, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(pdev, map)) + goto unwind; + + nf->dma = map; + nf->length = frag->size; + } + + return 0; + +unwind: + while (--i >= 0) { + nf = &pbuf->frag_array[i+1]; + pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); + } + + nf = &pbuf->frag_array[0]; + pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); + +out_err: + return -ENOMEM; +} + +static inline void +netxen_clear_cmddesc(u64 *desc) +{ + desc[0] = 0ULL; + desc[2] = 0ULL; +} + +static netdev_tx_t +netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + struct nx_host_tx_ring *tx_ring = adapter->tx_ring; + struct netxen_cmd_buffer *pbuf; + struct netxen_skb_frag *buffrag; + struct cmd_desc_type0 *hwdesc, *first_desc; + struct pci_dev *pdev; + int i, k; + int delta = 0; + struct skb_frag_struct *frag; + + u32 producer; + int frag_count, no_of_desc; + u32 num_txd = tx_ring->num_desc; + + frag_count = skb_shinfo(skb)->nr_frags + 1; + + /* 14 frags supported for normal packet and + * 32 frags supported for TSO packet + */ + if (!skb_is_gso(skb) && frag_count > NETXEN_MAX_FRAGS_PER_TX) { + + for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) { + frag = &skb_shinfo(skb)->frags[i]; + delta += frag->size; + } + + if (!__pskb_pull_tail(skb, delta)) + goto drop_packet; + + frag_count = 1 + skb_shinfo(skb)->nr_frags; + } + /* 4 fragments per cmd des */ + no_of_desc = (frag_count + 3) >> 2; + + if (unlikely(netxen_tx_avail(tx_ring) <= TX_STOP_THRESH)) { + netif_stop_queue(netdev); + smp_mb(); + if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) + netif_start_queue(netdev); + else + return NETDEV_TX_BUSY; + } + + producer = tx_ring->producer; + pbuf = &tx_ring->cmd_buf_arr[producer]; + + pdev = adapter->pdev; + + if (netxen_map_tx_skb(pdev, skb, pbuf)) + goto drop_packet; + + pbuf->skb = skb; + pbuf->frag_count = frag_count; + + first_desc = hwdesc = &tx_ring->desc_head[producer]; + netxen_clear_cmddesc((u64 *)hwdesc); + + netxen_set_tx_frags_len(first_desc, frag_count, skb->len); + netxen_set_tx_port(first_desc, adapter->portnum); + + for (i = 0; i < frag_count; i++) { + + k = i % 4; + + if ((k == 0) && (i > 0)) { + /* move to next desc.*/ + producer = get_next_index(producer, num_txd); + hwdesc = &tx_ring->desc_head[producer]; + netxen_clear_cmddesc((u64 *)hwdesc); + tx_ring->cmd_buf_arr[producer].skb = NULL; + } + + buffrag = &pbuf->frag_array[i]; + + hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length); + switch (k) { + case 0: + hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma); + break; + case 1: + hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma); + break; + case 2: + hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma); + break; + case 3: + hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma); + break; + } + } + + tx_ring->producer = get_next_index(producer, num_txd); + + netxen_tso_check(netdev, tx_ring, first_desc, skb); + + adapter->stats.txbytes += skb->len; + adapter->stats.xmitcalled++; + + netxen_nic_update_cmd_producer(adapter, tx_ring); + + return NETDEV_TX_OK; + +drop_packet: + adapter->stats.txdropped++; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static int netxen_nic_check_temp(struct netxen_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + uint32_t temp, temp_state, temp_val; + int rv = 0; + + temp = NXRD32(adapter, CRB_TEMP_STATE); + + temp_state = nx_get_temp_state(temp); + temp_val = nx_get_temp_val(temp); + + if (temp_state == NX_TEMP_PANIC) { + printk(KERN_ALERT + "%s: Device temperature %d degrees C exceeds" + " maximum allowed. Hardware has been shut down.\n", + netdev->name, temp_val); + rv = 1; + } else if (temp_state == NX_TEMP_WARN) { + if (adapter->temp == NX_TEMP_NORMAL) { + printk(KERN_ALERT + "%s: Device temperature %d degrees C " + "exceeds operating range." + " Immediate action needed.\n", + netdev->name, temp_val); + } + } else { + if (adapter->temp == NX_TEMP_WARN) { + printk(KERN_INFO + "%s: Device temperature is now %d degrees C" + " in normal range.\n", netdev->name, + temp_val); + } + } + adapter->temp = temp_state; + return rv; +} + +void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup) +{ + struct net_device *netdev = adapter->netdev; + + if (adapter->ahw.linkup && !linkup) { + printk(KERN_INFO "%s: %s NIC Link is down\n", + netxen_nic_driver_name, netdev->name); + adapter->ahw.linkup = 0; + if (netif_running(netdev)) { + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + adapter->link_changed = !adapter->has_link_events; + } else if (!adapter->ahw.linkup && linkup) { + printk(KERN_INFO "%s: %s NIC Link is up\n", + netxen_nic_driver_name, netdev->name); + adapter->ahw.linkup = 1; + if (netif_running(netdev)) { + netif_carrier_on(netdev); + netif_wake_queue(netdev); + } + adapter->link_changed = !adapter->has_link_events; + } +} + +static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter) +{ + u32 val, port, linkup; + + port = adapter->physical_port; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + val = NXRD32(adapter, CRB_XG_STATE_P3); + val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val); + linkup = (val == XG_LINK_UP_P3); + } else { + val = NXRD32(adapter, CRB_XG_STATE); + val = (val >> port*8) & 0xff; + linkup = (val == XG_LINK_UP); + } + + netxen_advert_link_change(adapter, linkup); +} + +static void netxen_tx_timeout(struct net_device *netdev) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + + if (test_bit(__NX_RESETTING, &adapter->state)) + return; + + dev_err(&netdev->dev, "transmit timeout, resetting.\n"); + schedule_work(&adapter->tx_timeout_task); +} + +static void netxen_tx_timeout_task(struct work_struct *work) +{ + struct netxen_adapter *adapter = + container_of(work, struct netxen_adapter, tx_timeout_task); + + if (!netif_running(adapter->netdev)) + return; + + if (test_and_set_bit(__NX_RESETTING, &adapter->state)) + return; + + if (++adapter->tx_timeo_cnt >= NX_MAX_TX_TIMEOUTS) + goto request_reset; + + rtnl_lock(); + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + /* try to scrub interrupt */ + netxen_napi_disable(adapter); + + netxen_napi_enable(adapter); + + netif_wake_queue(adapter->netdev); + + clear_bit(__NX_RESETTING, &adapter->state); + } else { + clear_bit(__NX_RESETTING, &adapter->state); + if (netxen_nic_reset_context(adapter)) { + rtnl_unlock(); + goto request_reset; + } + } + adapter->netdev->trans_start = jiffies; + rtnl_unlock(); + return; + +request_reset: + adapter->need_fw_reset = 1; + clear_bit(__NX_RESETTING, &adapter->state); +} + +static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + + stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; + stats->tx_packets = adapter->stats.xmitfinished; + stats->rx_bytes = adapter->stats.rxbytes; + stats->tx_bytes = adapter->stats.txbytes; + stats->rx_dropped = adapter->stats.rxdropped; + stats->tx_dropped = adapter->stats.txdropped; + + return stats; +} + +static irqreturn_t netxen_intr(int irq, void *data) +{ + struct nx_host_sds_ring *sds_ring = data; + struct netxen_adapter *adapter = sds_ring->adapter; + u32 status = 0; + + status = readl(adapter->isr_int_vec); + + if (!(status & adapter->int_vec_bit)) + return IRQ_NONE; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + /* check interrupt state machine, to be sure */ + status = readl(adapter->crb_int_state_reg); + if (!ISR_LEGACY_INT_TRIGGERED(status)) + return IRQ_NONE; + + } else { + unsigned long our_int = 0; + + our_int = readl(adapter->crb_int_state_reg); + + /* not our interrupt */ + if (!test_and_clear_bit((7 + adapter->portnum), &our_int)) + return IRQ_NONE; + + /* claim interrupt */ + writel((our_int & 0xffffffff), adapter->crb_int_state_reg); + + /* clear interrupt */ + netxen_nic_disable_int(sds_ring); + } + + writel(0xffffffff, adapter->tgt_status_reg); + /* read twice to ensure write is flushed */ + readl(adapter->isr_int_vec); + readl(adapter->isr_int_vec); + + napi_schedule(&sds_ring->napi); + + return IRQ_HANDLED; +} + +static irqreturn_t netxen_msi_intr(int irq, void *data) +{ + struct nx_host_sds_ring *sds_ring = data; + struct netxen_adapter *adapter = sds_ring->adapter; + + /* clear interrupt */ + writel(0xffffffff, adapter->tgt_status_reg); + + napi_schedule(&sds_ring->napi); + return IRQ_HANDLED; +} + +static irqreturn_t netxen_msix_intr(int irq, void *data) +{ + struct nx_host_sds_ring *sds_ring = data; + + napi_schedule(&sds_ring->napi); + return IRQ_HANDLED; +} + +static int netxen_nic_poll(struct napi_struct *napi, int budget) +{ + struct nx_host_sds_ring *sds_ring = + container_of(napi, struct nx_host_sds_ring, napi); + + struct netxen_adapter *adapter = sds_ring->adapter; + + int tx_complete; + int work_done; + + tx_complete = netxen_process_cmd_ring(adapter); + + work_done = netxen_process_rcv_ring(sds_ring, budget); + + if ((work_done < budget) && tx_complete) { + napi_complete(&sds_ring->napi); + if (test_bit(__NX_DEV_UP, &adapter->state)) + netxen_nic_enable_int(sds_ring); + } + + return work_done; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void netxen_nic_poll_controller(struct net_device *netdev) +{ + int ring; + struct nx_host_sds_ring *sds_ring; + struct netxen_adapter *adapter = netdev_priv(netdev); + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + + disable_irq(adapter->irq); + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + netxen_intr(adapter->irq, sds_ring); + } + enable_irq(adapter->irq); +} +#endif + +static int +nx_incr_dev_ref_cnt(struct netxen_adapter *adapter) +{ + int count; + if (netxen_api_lock(adapter)) + return -EIO; + + count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); + + NXWR32(adapter, NX_CRB_DEV_REF_COUNT, ++count); + + netxen_api_unlock(adapter); + return count; +} + +static int +nx_decr_dev_ref_cnt(struct netxen_adapter *adapter) +{ + int count; + if (netxen_api_lock(adapter)) + return -EIO; + + count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); + WARN_ON(count == 0); + + NXWR32(adapter, NX_CRB_DEV_REF_COUNT, --count); + + if (count == 0) + NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_COLD); + + netxen_api_unlock(adapter); + return count; +} + +static int +nx_dev_request_aer(struct netxen_adapter *adapter) +{ + u32 state; + int ret = -EINVAL; + + if (netxen_api_lock(adapter)) + return ret; + + state = NXRD32(adapter, NX_CRB_DEV_STATE); + + if (state == NX_DEV_NEED_AER) + ret = 0; + else if (state == NX_DEV_READY) { + NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_AER); + ret = 0; + } + + netxen_api_unlock(adapter); + return ret; +} + +static int +nx_dev_request_reset(struct netxen_adapter *adapter) +{ + u32 state; + int ret = -EINVAL; + + if (netxen_api_lock(adapter)) + return ret; + + state = NXRD32(adapter, NX_CRB_DEV_STATE); + + if (state == NX_DEV_NEED_RESET) + ret = 0; + else if (state != NX_DEV_INITALIZING && state != NX_DEV_NEED_AER) { + NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_RESET); + ret = 0; + } + + netxen_api_unlock(adapter); + + return ret; +} + +static int +netxen_can_start_firmware(struct netxen_adapter *adapter) +{ + int count; + int can_start = 0; + + if (netxen_api_lock(adapter)) + return 0; + + count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); + + if ((count < 0) || (count >= NX_MAX_PCI_FUNC)) + count = 0; + + if (count == 0) { + can_start = 1; + NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_INITALIZING); + } + + NXWR32(adapter, NX_CRB_DEV_REF_COUNT, ++count); + + netxen_api_unlock(adapter); + + return can_start; +} + +static void +netxen_schedule_work(struct netxen_adapter *adapter, + work_func_t func, int delay) +{ + INIT_DELAYED_WORK(&adapter->fw_work, func); + schedule_delayed_work(&adapter->fw_work, delay); +} + +static void +netxen_cancel_fw_work(struct netxen_adapter *adapter) +{ + while (test_and_set_bit(__NX_RESETTING, &adapter->state)) + msleep(10); + + cancel_delayed_work_sync(&adapter->fw_work); +} + +static void +netxen_attach_work(struct work_struct *work) +{ + struct netxen_adapter *adapter = container_of(work, + struct netxen_adapter, fw_work.work); + struct net_device *netdev = adapter->netdev; + int err = 0; + + if (netif_running(netdev)) { + err = netxen_nic_attach(adapter); + if (err) + goto done; + + err = netxen_nic_up(adapter, netdev); + if (err) { + netxen_nic_detach(adapter); + goto done; + } + + netxen_restore_indev_addr(netdev, NETDEV_UP); + } + + netif_device_attach(netdev); + +done: + adapter->fw_fail_cnt = 0; + clear_bit(__NX_RESETTING, &adapter->state); + netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); +} + +static void +netxen_fwinit_work(struct work_struct *work) +{ + struct netxen_adapter *adapter = container_of(work, + struct netxen_adapter, fw_work.work); + int dev_state; + + dev_state = NXRD32(adapter, NX_CRB_DEV_STATE); + + switch (dev_state) { + case NX_DEV_COLD: + case NX_DEV_READY: + if (!netxen_start_firmware(adapter)) { + netxen_schedule_work(adapter, netxen_attach_work, 0); + return; + } + break; + + case NX_DEV_NEED_RESET: + case NX_DEV_INITALIZING: + if (++adapter->fw_wait_cnt < FW_POLL_THRESH) { + netxen_schedule_work(adapter, + netxen_fwinit_work, 2 * FW_POLL_DELAY); + return; + } + + case NX_DEV_FAILED: + default: + nx_incr_dev_ref_cnt(adapter); + break; + } + + clear_bit(__NX_RESETTING, &adapter->state); +} + +static void +netxen_detach_work(struct work_struct *work) +{ + struct netxen_adapter *adapter = container_of(work, + struct netxen_adapter, fw_work.work); + struct net_device *netdev = adapter->netdev; + int ref_cnt, delay; + u32 status; + + netif_device_detach(netdev); + + netxen_nic_down(adapter, netdev); + + rtnl_lock(); + netxen_nic_detach(adapter); + rtnl_unlock(); + + status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); + + if (status & NX_RCODE_FATAL_ERROR) + goto err_ret; + + if (adapter->temp == NX_TEMP_PANIC) + goto err_ret; + + ref_cnt = nx_decr_dev_ref_cnt(adapter); + + if (ref_cnt == -EIO) + goto err_ret; + + delay = (ref_cnt == 0) ? 0 : (2 * FW_POLL_DELAY); + + adapter->fw_wait_cnt = 0; + netxen_schedule_work(adapter, netxen_fwinit_work, delay); + + return; + +err_ret: + clear_bit(__NX_RESETTING, &adapter->state); +} + +static int +netxen_check_health(struct netxen_adapter *adapter) +{ + u32 state, heartbit; + struct net_device *netdev = adapter->netdev; + + state = NXRD32(adapter, NX_CRB_DEV_STATE); + if (state == NX_DEV_NEED_AER) + return 0; + + if (netxen_nic_check_temp(adapter)) + goto detach; + + if (adapter->need_fw_reset) { + if (nx_dev_request_reset(adapter)) + return 0; + goto detach; + } + + /* NX_DEV_NEED_RESET, this state can be marked in two cases + * 1. Tx timeout 2. Fw hang + * Send request to destroy context in case of tx timeout only + * and doesn't required in case of Fw hang + */ + if (state == NX_DEV_NEED_RESET) { + adapter->need_fw_reset = 1; + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + goto detach; + } + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return 0; + + heartbit = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); + if (heartbit != adapter->heartbit) { + adapter->heartbit = heartbit; + adapter->fw_fail_cnt = 0; + if (adapter->need_fw_reset) + goto detach; + return 0; + } + + if (++adapter->fw_fail_cnt < FW_FAIL_THRESH) + return 0; + + if (nx_dev_request_reset(adapter)) + return 0; + + clear_bit(__NX_FW_ATTACHED, &adapter->state); + + dev_info(&netdev->dev, "firmware hang detected\n"); + +detach: + if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) && + !test_and_set_bit(__NX_RESETTING, &adapter->state)) + netxen_schedule_work(adapter, netxen_detach_work, 0); + return 1; +} + +static void +netxen_fw_poll_work(struct work_struct *work) +{ + struct netxen_adapter *adapter = container_of(work, + struct netxen_adapter, fw_work.work); + + if (test_bit(__NX_RESETTING, &adapter->state)) + goto reschedule; + + if (test_bit(__NX_DEV_UP, &adapter->state)) { + if (!adapter->has_link_events) { + + netxen_nic_handle_phy_intr(adapter); + + if (adapter->link_changed) + netxen_nic_set_link_parameters(adapter); + } + } + + if (netxen_check_health(adapter)) + return; + +reschedule: + netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); +} + +static ssize_t +netxen_store_bridged_mode(struct device *dev, + struct device_attribute *attr, const char *buf, size_t len) +{ + struct net_device *net = to_net_dev(dev); + struct netxen_adapter *adapter = netdev_priv(net); + unsigned long new; + int ret = -EINVAL; + + if (!(adapter->capabilities & NX_FW_CAPABILITY_BDG)) + goto err_out; + + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + goto err_out; + + if (strict_strtoul(buf, 2, &new)) + goto err_out; + + if (!netxen_config_bridged_mode(adapter, !!new)) + ret = len; + +err_out: + return ret; +} + +static ssize_t +netxen_show_bridged_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *net = to_net_dev(dev); + struct netxen_adapter *adapter; + int bridged_mode = 0; + + adapter = netdev_priv(net); + + if (adapter->capabilities & NX_FW_CAPABILITY_BDG) + bridged_mode = !!(adapter->flags & NETXEN_NIC_BRIDGE_ENABLED); + + return sprintf(buf, "%d\n", bridged_mode); +} + +static struct device_attribute dev_attr_bridged_mode = { + .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)}, + .show = netxen_show_bridged_mode, + .store = netxen_store_bridged_mode, +}; + +static ssize_t +netxen_store_diag_mode(struct device *dev, + struct device_attribute *attr, const char *buf, size_t len) +{ + struct netxen_adapter *adapter = dev_get_drvdata(dev); + unsigned long new; + + if (strict_strtoul(buf, 2, &new)) + return -EINVAL; + + if (!!new != !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)) + adapter->flags ^= NETXEN_NIC_DIAG_ENABLED; + + return len; +} + +static ssize_t +netxen_show_diag_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct netxen_adapter *adapter = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", + !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)); +} + +static struct device_attribute dev_attr_diag_mode = { + .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)}, + .show = netxen_show_diag_mode, + .store = netxen_store_diag_mode, +}; + +static int +netxen_sysfs_validate_crb(struct netxen_adapter *adapter, + loff_t offset, size_t size) +{ + size_t crb_size = 4; + + if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)) + return -EIO; + + if (offset < NETXEN_PCI_CRBSPACE) { + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return -EINVAL; + + if (ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM, + NETXEN_PCI_CAMQM_2M_END)) + crb_size = 8; + else + return -EINVAL; + } + + if ((size != crb_size) || (offset & (crb_size-1))) + return -EINVAL; + + return 0; +} + +static ssize_t +netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct netxen_adapter *adapter = dev_get_drvdata(dev); + u32 data; + u64 qmdata; + int ret; + + ret = netxen_sysfs_validate_crb(adapter, offset, size); + if (ret != 0) + return ret; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && + ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM, + NETXEN_PCI_CAMQM_2M_END)) { + netxen_pci_camqm_read_2M(adapter, offset, &qmdata); + memcpy(buf, &qmdata, size); + } else { + data = NXRD32(adapter, offset); + memcpy(buf, &data, size); + } + + return size; +} + +static ssize_t +netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct netxen_adapter *adapter = dev_get_drvdata(dev); + u32 data; + u64 qmdata; + int ret; + + ret = netxen_sysfs_validate_crb(adapter, offset, size); + if (ret != 0) + return ret; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && + ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM, + NETXEN_PCI_CAMQM_2M_END)) { + memcpy(&qmdata, buf, size); + netxen_pci_camqm_write_2M(adapter, offset, qmdata); + } else { + memcpy(&data, buf, size); + NXWR32(adapter, offset, data); + } + + return size; +} + +static int +netxen_sysfs_validate_mem(struct netxen_adapter *adapter, + loff_t offset, size_t size) +{ + if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)) + return -EIO; + + if ((size != 8) || (offset & 0x7)) + return -EIO; + + return 0; +} + +static ssize_t +netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct netxen_adapter *adapter = dev_get_drvdata(dev); + u64 data; + int ret; + + ret = netxen_sysfs_validate_mem(adapter, offset, size); + if (ret != 0) + return ret; + + if (adapter->pci_mem_read(adapter, offset, &data)) + return -EIO; + + memcpy(buf, &data, size); + + return size; +} + +static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct netxen_adapter *adapter = dev_get_drvdata(dev); + u64 data; + int ret; + + ret = netxen_sysfs_validate_mem(adapter, offset, size); + if (ret != 0) + return ret; + + memcpy(&data, buf, size); + + if (adapter->pci_mem_write(adapter, offset, data)) + return -EIO; + + return size; +} + + +static struct bin_attribute bin_attr_crb = { + .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = netxen_sysfs_read_crb, + .write = netxen_sysfs_write_crb, +}; + +static struct bin_attribute bin_attr_mem = { + .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = netxen_sysfs_read_mem, + .write = netxen_sysfs_write_mem, +}; + + +static void +netxen_create_sysfs_entries(struct netxen_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct device *dev = &netdev->dev; + + if (adapter->capabilities & NX_FW_CAPABILITY_BDG) { + /* bridged_mode control */ + if (device_create_file(dev, &dev_attr_bridged_mode)) { + dev_warn(&netdev->dev, + "failed to create bridged_mode sysfs entry\n"); + } + } +} + +static void +netxen_remove_sysfs_entries(struct netxen_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct device *dev = &netdev->dev; + + if (adapter->capabilities & NX_FW_CAPABILITY_BDG) + device_remove_file(dev, &dev_attr_bridged_mode); +} + +static void +netxen_create_diag_entries(struct netxen_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct device *dev; + + dev = &pdev->dev; + if (device_create_file(dev, &dev_attr_diag_mode)) + dev_info(dev, "failed to create diag_mode sysfs entry\n"); + if (device_create_bin_file(dev, &bin_attr_crb)) + dev_info(dev, "failed to create crb sysfs entry\n"); + if (device_create_bin_file(dev, &bin_attr_mem)) + dev_info(dev, "failed to create mem sysfs entry\n"); +} + + +static void +netxen_remove_diag_entries(struct netxen_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct device *dev = &pdev->dev; + + device_remove_file(dev, &dev_attr_diag_mode); + device_remove_bin_file(dev, &bin_attr_crb); + device_remove_bin_file(dev, &bin_attr_mem); +} + +#ifdef CONFIG_INET + +#define is_netxen_netdev(dev) (dev->netdev_ops == &netxen_netdev_ops) + +static int +netxen_destip_supported(struct netxen_adapter *adapter) +{ + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return 0; + + if (adapter->ahw.cut_through) + return 0; + + return 1; +} + +static void +netxen_free_vlan_ip_list(struct netxen_adapter *adapter) +{ + struct nx_vlan_ip_list *cur; + struct list_head *head = &adapter->vlan_ip_list; + + while (!list_empty(head)) { + cur = list_entry(head->next, struct nx_vlan_ip_list, list); + netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN); + list_del(&cur->list); + kfree(cur); + } + +} +static void +netxen_list_config_vlan_ip(struct netxen_adapter *adapter, + struct in_ifaddr *ifa, unsigned long event) +{ + struct net_device *dev; + struct nx_vlan_ip_list *cur, *tmp_cur; + struct list_head *head; + + dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; + + if (dev == NULL) + return; + + if (!is_vlan_dev(dev)) + return; + + switch (event) { + case NX_IP_UP: + list_for_each(head, &adapter->vlan_ip_list) { + cur = list_entry(head, struct nx_vlan_ip_list, list); + + if (cur->ip_addr == ifa->ifa_address) + return; + } + + cur = kzalloc(sizeof(struct nx_vlan_ip_list), GFP_ATOMIC); + if (cur == NULL) { + printk(KERN_ERR "%s: failed to add vlan ip to list\n", + adapter->netdev->name); + return; + } + + cur->ip_addr = ifa->ifa_address; + list_add_tail(&cur->list, &adapter->vlan_ip_list); + break; + case NX_IP_DOWN: + list_for_each_entry_safe(cur, tmp_cur, + &adapter->vlan_ip_list, list) { + if (cur->ip_addr == ifa->ifa_address) { + list_del(&cur->list); + kfree(cur); + break; + } + } + } +} +static void +netxen_config_indev_addr(struct netxen_adapter *adapter, + struct net_device *dev, unsigned long event) +{ + struct in_device *indev; + + if (!netxen_destip_supported(adapter)) + return; + + indev = in_dev_get(dev); + if (!indev) + return; + + for_ifa(indev) { + switch (event) { + case NETDEV_UP: + netxen_config_ipaddr(adapter, + ifa->ifa_address, NX_IP_UP); + netxen_list_config_vlan_ip(adapter, ifa, NX_IP_UP); + break; + case NETDEV_DOWN: + netxen_config_ipaddr(adapter, + ifa->ifa_address, NX_IP_DOWN); + netxen_list_config_vlan_ip(adapter, ifa, NX_IP_DOWN); + break; + default: + break; + } + } endfor_ifa(indev); + + in_dev_put(indev); +} + +static void +netxen_restore_indev_addr(struct net_device *netdev, unsigned long event) + +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + struct nx_vlan_ip_list *pos, *tmp_pos; + unsigned long ip_event; + + ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN; + netxen_config_indev_addr(adapter, netdev, event); + + list_for_each_entry_safe(pos, tmp_pos, &adapter->vlan_ip_list, list) { + netxen_config_ipaddr(adapter, pos->ip_addr, ip_event); + } +} + +static int netxen_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct netxen_adapter *adapter; + struct net_device *dev = (struct net_device *)ptr; + struct net_device *orig_dev = dev; + +recheck: + if (dev == NULL) + goto done; + + if (dev->priv_flags & IFF_802_1Q_VLAN) { + dev = vlan_dev_real_dev(dev); + goto recheck; + } + + if (!is_netxen_netdev(dev)) + goto done; + + adapter = netdev_priv(dev); + + if (!adapter) + goto done; + + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + goto done; + + netxen_config_indev_addr(adapter, orig_dev, event); +done: + return NOTIFY_DONE; +} + +static int +netxen_inetaddr_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct netxen_adapter *adapter; + struct net_device *dev; + + struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; + + dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; + +recheck: + if (dev == NULL) + goto done; + + if (dev->priv_flags & IFF_802_1Q_VLAN) { + dev = vlan_dev_real_dev(dev); + goto recheck; + } + + if (!is_netxen_netdev(dev)) + goto done; + + adapter = netdev_priv(dev); + + if (!adapter || !netxen_destip_supported(adapter)) + goto done; + + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + goto done; + + switch (event) { + case NETDEV_UP: + netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP); + netxen_list_config_vlan_ip(adapter, ifa, NX_IP_UP); + break; + case NETDEV_DOWN: + netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_DOWN); + netxen_list_config_vlan_ip(adapter, ifa, NX_IP_DOWN); + break; + default: + break; + } + +done: + return NOTIFY_DONE; +} + +static struct notifier_block netxen_netdev_cb = { + .notifier_call = netxen_netdev_event, +}; + +static struct notifier_block netxen_inetaddr_cb = { + .notifier_call = netxen_inetaddr_event, +}; +#else +static void +netxen_restore_indev_addr(struct net_device *dev, unsigned long event) +{ } +static void +netxen_free_vlan_ip_list(struct netxen_adapter *adapter) +{ } +#endif + +static struct pci_error_handlers netxen_err_handler = { + .error_detected = netxen_io_error_detected, + .slot_reset = netxen_io_slot_reset, + .resume = netxen_io_resume, +}; + +static struct pci_driver netxen_driver = { + .name = netxen_nic_driver_name, + .id_table = netxen_pci_tbl, + .probe = netxen_nic_probe, + .remove = __devexit_p(netxen_nic_remove), +#ifdef CONFIG_PM + .suspend = netxen_nic_suspend, + .resume = netxen_nic_resume, +#endif + .shutdown = netxen_nic_shutdown, + .err_handler = &netxen_err_handler +}; + +static int __init netxen_init_module(void) +{ + printk(KERN_INFO "%s\n", netxen_nic_driver_string); + +#ifdef CONFIG_INET + register_netdevice_notifier(&netxen_netdev_cb); + register_inetaddr_notifier(&netxen_inetaddr_cb); +#endif + return pci_register_driver(&netxen_driver); +} + +module_init(netxen_init_module); + +static void __exit netxen_exit_module(void) +{ + pci_unregister_driver(&netxen_driver); + +#ifdef CONFIG_INET + unregister_inetaddr_notifier(&netxen_inetaddr_cb); + unregister_netdevice_notifier(&netxen_netdev_cb); +#endif +} + +module_exit(netxen_exit_module); diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c new file mode 100644 index 0000000..ccde806 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -0,0 +1,3970 @@ +/* + * QLogic QLA3xxx NIC HBA Driver + * Copyright (c) 2003-2006 QLogic Corporation + * + * See LICENSE.qla3xxx for copyright and licensing details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qla3xxx.h" + +#define DRV_NAME "qla3xxx" +#define DRV_STRING "QLogic ISP3XXX Network Driver" +#define DRV_VERSION "v2.03.00-k5" + +static const char ql3xxx_driver_name[] = DRV_NAME; +static const char ql3xxx_driver_version[] = DRV_VERSION; + +#define TIMED_OUT_MSG \ +"Timed out waiting for management port to get free before issuing command\n" + +MODULE_AUTHOR("QLogic Corporation"); +MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " "); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +static const u32 default_msg + = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK + | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; + +static int debug = -1; /* defaults above */ +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +static int msi; +module_param(msi, int, 0); +MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts."); + +static DEFINE_PCI_DEVICE_TABLE(ql3xxx_pci_tbl) = { + {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)}, + {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)}, + /* required last entry */ + {0,} +}; + +MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl); + +/* + * These are the known PHY's which are used + */ +enum PHY_DEVICE_TYPE { + PHY_TYPE_UNKNOWN = 0, + PHY_VITESSE_VSC8211, + PHY_AGERE_ET1011C, + MAX_PHY_DEV_TYPES +}; + +struct PHY_DEVICE_INFO { + const enum PHY_DEVICE_TYPE phyDevice; + const u32 phyIdOUI; + const u16 phyIdModel; + const char *name; +}; + +static const struct PHY_DEVICE_INFO PHY_DEVICES[] = { + {PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"}, + {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"}, + {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"}, +}; + + +/* + * Caller must take hw_lock. + */ +static int ql_sem_spinlock(struct ql3_adapter *qdev, + u32 sem_mask, u32 sem_bits) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + unsigned int seconds = 3; + + do { + writel((sem_mask | sem_bits), + &port_regs->CommonRegs.semaphoreReg); + value = readl(&port_regs->CommonRegs.semaphoreReg); + if ((value & (sem_mask >> 16)) == sem_bits) + return 0; + ssleep(1); + } while (--seconds); + return -1; +} + +static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + writel(sem_mask, &port_regs->CommonRegs.semaphoreReg); + readl(&port_regs->CommonRegs.semaphoreReg); +} + +static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + + writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); + value = readl(&port_regs->CommonRegs.semaphoreReg); + return ((value & (sem_mask >> 16)) == sem_bits); +} + +/* + * Caller holds hw_lock. + */ +static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) +{ + int i = 0; + + while (i < 10) { + if (i) + ssleep(1); + + if (ql_sem_lock(qdev, + QL_DRVR_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) + * 2) << 1)) { + netdev_printk(KERN_DEBUG, qdev->ndev, + "driver lock acquired\n"); + return 1; + } + } + + netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); + return 0; +} + +static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + writel(((ISP_CONTROL_NP_MASK << 16) | page), + &port_regs->CommonRegs.ispControlStatus); + readl(&port_regs->CommonRegs.ispControlStatus); + qdev->current_page = page; +} + +static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) +{ + u32 value; + unsigned long hw_flags; + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + value = readl(reg); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + + return value; +} + +static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg) +{ + return readl(reg); +} + +static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) +{ + u32 value; + unsigned long hw_flags; + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + + if (qdev->current_page != 0) + ql_set_register_page(qdev, 0); + value = readl(reg); + + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return value; +} + +static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg) +{ + if (qdev->current_page != 0) + ql_set_register_page(qdev, 0); + return readl(reg); +} + +static void ql_write_common_reg_l(struct ql3_adapter *qdev, + u32 __iomem *reg, u32 value) +{ + unsigned long hw_flags; + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + writel(value, reg); + readl(reg); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); +} + +static void ql_write_common_reg(struct ql3_adapter *qdev, + u32 __iomem *reg, u32 value) +{ + writel(value, reg); + readl(reg); +} + +static void ql_write_nvram_reg(struct ql3_adapter *qdev, + u32 __iomem *reg, u32 value) +{ + writel(value, reg); + readl(reg); + udelay(1); +} + +static void ql_write_page0_reg(struct ql3_adapter *qdev, + u32 __iomem *reg, u32 value) +{ + if (qdev->current_page != 0) + ql_set_register_page(qdev, 0); + writel(value, reg); + readl(reg); +} + +/* + * Caller holds hw_lock. Only called during init. + */ +static void ql_write_page1_reg(struct ql3_adapter *qdev, + u32 __iomem *reg, u32 value) +{ + if (qdev->current_page != 1) + ql_set_register_page(qdev, 1); + writel(value, reg); + readl(reg); +} + +/* + * Caller holds hw_lock. Only called during init. + */ +static void ql_write_page2_reg(struct ql3_adapter *qdev, + u32 __iomem *reg, u32 value) +{ + if (qdev->current_page != 2) + ql_set_register_page(qdev, 2); + writel(value, reg); + readl(reg); +} + +static void ql_disable_interrupts(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, + (ISP_IMR_ENABLE_INT << 16)); + +} + +static void ql_enable_interrupts(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, + ((0xff << 16) | ISP_IMR_ENABLE_INT)); + +} + +static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, + struct ql_rcv_buf_cb *lrg_buf_cb) +{ + dma_addr_t map; + int err; + lrg_buf_cb->next = NULL; + + if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */ + qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb; + } else { + qdev->lrg_buf_free_tail->next = lrg_buf_cb; + qdev->lrg_buf_free_tail = lrg_buf_cb; + } + + if (!lrg_buf_cb->skb) { + lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, + qdev->lrg_buffer_len); + if (unlikely(!lrg_buf_cb->skb)) { + netdev_err(qdev->ndev, "failed netdev_alloc_skb()\n"); + qdev->lrg_buf_skb_check++; + } else { + /* + * We save some space to copy the ethhdr from first + * buffer + */ + skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); + map = pci_map_single(qdev->pdev, + lrg_buf_cb->skb->data, + qdev->lrg_buffer_len - + QL_HEADER_SPACE, + PCI_DMA_FROMDEVICE); + err = pci_dma_mapping_error(qdev->pdev, map); + if (err) { + netdev_err(qdev->ndev, + "PCI mapping failed with error: %d\n", + err); + dev_kfree_skb(lrg_buf_cb->skb); + lrg_buf_cb->skb = NULL; + + qdev->lrg_buf_skb_check++; + return; + } + + lrg_buf_cb->buf_phy_addr_low = + cpu_to_le32(LS_64BITS(map)); + lrg_buf_cb->buf_phy_addr_high = + cpu_to_le32(MS_64BITS(map)); + dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); + dma_unmap_len_set(lrg_buf_cb, maplen, + qdev->lrg_buffer_len - + QL_HEADER_SPACE); + } + } + + qdev->lrg_buf_free_count++; +} + +static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter + *qdev) +{ + struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; + + if (lrg_buf_cb != NULL) { + qdev->lrg_buf_free_head = lrg_buf_cb->next; + if (qdev->lrg_buf_free_head == NULL) + qdev->lrg_buf_free_tail = NULL; + qdev->lrg_buf_free_count--; + } + + return lrg_buf_cb; +} + +static u32 addrBits = EEPROM_NO_ADDR_BITS; +static u32 dataBits = EEPROM_NO_DATA_BITS; + +static void fm93c56a_deselect(struct ql3_adapter *qdev); +static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr, + unsigned short *value); + +/* + * Caller holds hw_lock. + */ +static void fm93c56a_select(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; + + qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; + ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); + ql_write_nvram_reg(qdev, spir, + ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); +} + +/* + * Caller holds hw_lock. + */ +static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) +{ + int i; + u32 mask; + u32 dataBit; + u32 previousBit; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; + + /* Clock in a zero, then do the start bit */ + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + AUBURN_EEPROM_DO_1)); + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE)); + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL)); + + mask = 1 << (FM93C56A_CMD_BITS - 1); + /* Force the previous data bit to be different */ + previousBit = 0xffff; + for (i = 0; i < FM93C56A_CMD_BITS; i++) { + dataBit = (cmd & mask) + ? AUBURN_EEPROM_DO_1 + : AUBURN_EEPROM_DO_0; + if (previousBit != dataBit) { + /* If the bit changed, change the DO state to match */ + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | + qdev->eeprom_cmd_data | dataBit)); + previousBit = dataBit; + } + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + dataBit | AUBURN_EEPROM_CLK_RISE)); + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + dataBit | AUBURN_EEPROM_CLK_FALL)); + cmd = cmd << 1; + } + + mask = 1 << (addrBits - 1); + /* Force the previous data bit to be different */ + previousBit = 0xffff; + for (i = 0; i < addrBits; i++) { + dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 + : AUBURN_EEPROM_DO_0; + if (previousBit != dataBit) { + /* + * If the bit changed, then change the DO state to + * match + */ + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | + qdev->eeprom_cmd_data | dataBit)); + previousBit = dataBit; + } + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + dataBit | AUBURN_EEPROM_CLK_RISE)); + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + dataBit | AUBURN_EEPROM_CLK_FALL)); + eepromAddr = eepromAddr << 1; + } +} + +/* + * Caller holds hw_lock. + */ +static void fm93c56a_deselect(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; + + qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; + ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); +} + +/* + * Caller holds hw_lock. + */ +static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value) +{ + int i; + u32 data = 0; + u32 dataBit; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; + + /* Read the data bits */ + /* The first bit is a dummy. Clock right over it. */ + for (i = 0; i < dataBits; i++) { + ql_write_nvram_reg(qdev, spir, + ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + AUBURN_EEPROM_CLK_RISE); + ql_write_nvram_reg(qdev, spir, + ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + AUBURN_EEPROM_CLK_FALL); + dataBit = (ql_read_common_reg(qdev, spir) & + AUBURN_EEPROM_DI_1) ? 1 : 0; + data = (data << 1) | dataBit; + } + *value = (u16)data; +} + +/* + * Caller holds hw_lock. + */ +static void eeprom_readword(struct ql3_adapter *qdev, + u32 eepromAddr, unsigned short *value) +{ + fm93c56a_select(qdev); + fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr); + fm93c56a_datain(qdev, value); + fm93c56a_deselect(qdev); +} + +static void ql_set_mac_addr(struct net_device *ndev, u16 *addr) +{ + __le16 *p = (__le16 *)ndev->dev_addr; + p[0] = cpu_to_le16(addr[0]); + p[1] = cpu_to_le16(addr[1]); + p[2] = cpu_to_le16(addr[2]); +} + +static int ql_get_nvram_params(struct ql3_adapter *qdev) +{ + u16 *pEEPROMData; + u16 checksum = 0; + u32 index; + unsigned long hw_flags; + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + + pEEPROMData = (u16 *)&qdev->nvram_data; + qdev->eeprom_cmd_data = 0; + if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * + 2) << 10)) { + pr_err("%s: Failed ql_sem_spinlock()\n", __func__); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return -1; + } + + for (index = 0; index < EEPROM_SIZE; index++) { + eeprom_readword(qdev, index, pEEPROMData); + checksum += *pEEPROMData; + pEEPROMData++; + } + ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK); + + if (checksum != 0) { + netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n", + checksum); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return -1; + } + + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return checksum; +} + +static const u32 PHYAddr[2] = { + PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS +}; + +static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 temp; + int count = 1000; + + while (count) { + temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg); + if (!(temp & MAC_MII_STATUS_BSY)) + return 0; + udelay(10); + count--; + } + return -1; +} + +static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 scanControl; + + if (qdev->numPorts > 1) { + /* Auto scan will cycle through multiple ports */ + scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC; + } else { + scanControl = MAC_MII_CONTROL_SC; + } + + /* + * Scan register 1 of PHY/PETBI, + * Set up to scan both devices + * The autoscan starts from the first register, completes + * the last one before rolling over to the first + */ + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, + PHYAddr[0] | MII_SCAN_REGISTER); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, + (scanControl) | + ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16)); +} + +static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev) +{ + u8 ret; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + /* See if scan mode is enabled before we turn it off */ + if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) & + (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) { + /* Scan is enabled */ + ret = 1; + } else { + /* Scan is disabled */ + ret = 0; + } + + /* + * When disabling scan mode you must first change the MII register + * address + */ + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, + PHYAddr[0] | MII_SCAN_REGISTER); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, + ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS | + MAC_MII_CONTROL_RC) << 16)); + + return ret; +} + +static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, + u16 regAddr, u16 value, u32 phyAddr) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u8 scanWasEnabled; + + scanWasEnabled = ql_mii_disable_scan_mode(qdev); + + if (ql_wait_for_mii_ready(qdev)) { + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); + return -1; + } + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, + phyAddr | regAddr); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); + + /* Wait for write to complete 9/10/04 SJP */ + if (ql_wait_for_mii_ready(qdev)) { + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); + return -1; + } + + if (scanWasEnabled) + ql_mii_enable_scan_mode(qdev); + + return 0; +} + +static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, + u16 *value, u32 phyAddr) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u8 scanWasEnabled; + u32 temp; + + scanWasEnabled = ql_mii_disable_scan_mode(qdev); + + if (ql_wait_for_mii_ready(qdev)) { + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); + return -1; + } + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, + phyAddr | regAddr); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, + (MAC_MII_CONTROL_RC << 16)); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, + (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); + + /* Wait for the read to complete */ + if (ql_wait_for_mii_ready(qdev)) { + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); + return -1; + } + + temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); + *value = (u16) temp; + + if (scanWasEnabled) + ql_mii_enable_scan_mode(qdev); + + return 0; +} + +static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + ql_mii_disable_scan_mode(qdev); + + if (ql_wait_for_mii_ready(qdev)) { + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); + return -1; + } + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, + qdev->PHYAddr | regAddr); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); + + /* Wait for write to complete. */ + if (ql_wait_for_mii_ready(qdev)) { + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); + return -1; + } + + ql_mii_enable_scan_mode(qdev); + + return 0; +} + +static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value) +{ + u32 temp; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + ql_mii_disable_scan_mode(qdev); + + if (ql_wait_for_mii_ready(qdev)) { + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); + return -1; + } + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, + qdev->PHYAddr | regAddr); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, + (MAC_MII_CONTROL_RC << 16)); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, + (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); + + /* Wait for the read to complete */ + if (ql_wait_for_mii_ready(qdev)) { + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); + return -1; + } + + temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); + *value = (u16) temp; + + ql_mii_enable_scan_mode(qdev); + + return 0; +} + +static void ql_petbi_reset(struct ql3_adapter *qdev) +{ + ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET); +} + +static void ql_petbi_start_neg(struct ql3_adapter *qdev) +{ + u16 reg; + + /* Enable Auto-negotiation sense */ + ql_mii_read_reg(qdev, PETBI_TBI_CTRL, ®); + reg |= PETBI_TBI_AUTO_SENSE; + ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg); + + ql_mii_write_reg(qdev, PETBI_NEG_ADVER, + PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX); + + ql_mii_write_reg(qdev, PETBI_CONTROL_REG, + PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | + PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000); + +} + +static void ql_petbi_reset_ex(struct ql3_adapter *qdev) +{ + ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET, + PHYAddr[qdev->mac_index]); +} + +static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev) +{ + u16 reg; + + /* Enable Auto-negotiation sense */ + ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, ®, + PHYAddr[qdev->mac_index]); + reg |= PETBI_TBI_AUTO_SENSE; + ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, + PHYAddr[qdev->mac_index]); + + ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER, + PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, + PHYAddr[qdev->mac_index]); + + ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, + PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | + PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000, + PHYAddr[qdev->mac_index]); +} + +static void ql_petbi_init(struct ql3_adapter *qdev) +{ + ql_petbi_reset(qdev); + ql_petbi_start_neg(qdev); +} + +static void ql_petbi_init_ex(struct ql3_adapter *qdev) +{ + ql_petbi_reset_ex(qdev); + ql_petbi_start_neg_ex(qdev); +} + +static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev) +{ + u16 reg; + + if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, ®) < 0) + return 0; + + return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE; +} + +static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) +{ + netdev_info(qdev->ndev, "enabling Agere specific PHY\n"); + /* power down device bit 11 = 1 */ + ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr); + /* enable diagnostic mode bit 2 = 1 */ + ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr); + /* 1000MB amplitude adjust (see Agere errata) */ + ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr); + /* 1000MB amplitude adjust (see Agere errata) */ + ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr); + /* 100MB amplitude adjust (see Agere errata) */ + ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr); + /* 100MB amplitude adjust (see Agere errata) */ + ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr); + /* 10MB amplitude adjust (see Agere errata) */ + ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr); + /* 10MB amplitude adjust (see Agere errata) */ + ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr); + /* point to hidden reg 0x2806 */ + ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); + /* Write new PHYAD w/bit 5 set */ + ql_mii_write_reg_ex(qdev, 0x11, + 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); + /* + * Disable diagnostic mode bit 2 = 0 + * Power up device bit 11 = 0 + * Link up (on) and activity (blink) + */ + ql_mii_write_reg(qdev, 0x12, 0x840a); + ql_mii_write_reg(qdev, 0x00, 0x1140); + ql_mii_write_reg(qdev, 0x1c, 0xfaf0); +} + +static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev, + u16 phyIdReg0, u16 phyIdReg1) +{ + enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN; + u32 oui; + u16 model; + int i; + + if (phyIdReg0 == 0xffff) + return result; + + if (phyIdReg1 == 0xffff) + return result; + + /* oui is split between two registers */ + oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10); + + model = (phyIdReg1 & PHY_MODEL_MASK) >> 4; + + /* Scan table for this PHY */ + for (i = 0; i < MAX_PHY_DEV_TYPES; i++) { + if ((oui == PHY_DEVICES[i].phyIdOUI) && + (model == PHY_DEVICES[i].phyIdModel)) { + netdev_info(qdev->ndev, "Phy: %s\n", + PHY_DEVICES[i].name); + result = PHY_DEVICES[i].phyDevice; + break; + } + } + + return result; +} + +static int ql_phy_get_speed(struct ql3_adapter *qdev) +{ + u16 reg; + + switch (qdev->phyType) { + case PHY_AGERE_ET1011C: { + if (ql_mii_read_reg(qdev, 0x1A, ®) < 0) + return 0; + + reg = (reg >> 8) & 3; + break; + } + default: + if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) + return 0; + + reg = (((reg & 0x18) >> 3) & 3); + } + + switch (reg) { + case 2: + return SPEED_1000; + case 1: + return SPEED_100; + case 0: + return SPEED_10; + default: + return -1; + } +} + +static int ql_is_full_dup(struct ql3_adapter *qdev) +{ + u16 reg; + + switch (qdev->phyType) { + case PHY_AGERE_ET1011C: { + if (ql_mii_read_reg(qdev, 0x1A, ®)) + return 0; + + return ((reg & 0x0080) && (reg & 0x1000)) != 0; + } + case PHY_VITESSE_VSC8211: + default: { + if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) + return 0; + return (reg & PHY_AUX_DUPLEX_STAT) != 0; + } + } +} + +static int ql_is_phy_neg_pause(struct ql3_adapter *qdev) +{ + u16 reg; + + if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, ®) < 0) + return 0; + + return (reg & PHY_NEG_PAUSE) != 0; +} + +static int PHY_Setup(struct ql3_adapter *qdev) +{ + u16 reg1; + u16 reg2; + bool agereAddrChangeNeeded = false; + u32 miiAddr = 0; + int err; + + /* Determine the PHY we are using by reading the ID's */ + err = ql_mii_read_reg(qdev, PHY_ID_0_REG, ®1); + if (err != 0) { + netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n"); + return err; + } + + err = ql_mii_read_reg(qdev, PHY_ID_1_REG, ®2); + if (err != 0) { + netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n"); + return err; + } + + /* Check if we have a Agere PHY */ + if ((reg1 == 0xffff) || (reg2 == 0xffff)) { + + /* Determine which MII address we should be using + determined by the index of the card */ + if (qdev->mac_index == 0) + miiAddr = MII_AGERE_ADDR_1; + else + miiAddr = MII_AGERE_ADDR_2; + + err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, ®1, miiAddr); + if (err != 0) { + netdev_err(qdev->ndev, + "Could not read from reg PHY_ID_0_REG after Agere detected\n"); + return err; + } + + err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, ®2, miiAddr); + if (err != 0) { + netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n"); + return err; + } + + /* We need to remember to initialize the Agere PHY */ + agereAddrChangeNeeded = true; + } + + /* Determine the particular PHY we have on board to apply + PHY specific initializations */ + qdev->phyType = getPhyType(qdev, reg1, reg2); + + if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) { + /* need this here so address gets changed */ + phyAgereSpecificInit(qdev, miiAddr); + } else if (qdev->phyType == PHY_TYPE_UNKNOWN) { + netdev_err(qdev->ndev, "PHY is unknown\n"); + return -EIO; + } + + return 0; +} + +/* + * Caller holds hw_lock. + */ +static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + + if (enable) + value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16)); + else + value = (MAC_CONFIG_REG_PE << 16); + + if (qdev->mac_index) + ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); + else + ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); +} + +/* + * Caller holds hw_lock. + */ +static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + + if (enable) + value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16)); + else + value = (MAC_CONFIG_REG_SR << 16); + + if (qdev->mac_index) + ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); + else + ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); +} + +/* + * Caller holds hw_lock. + */ +static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + + if (enable) + value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16)); + else + value = (MAC_CONFIG_REG_GM << 16); + + if (qdev->mac_index) + ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); + else + ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); +} + +/* + * Caller holds hw_lock. + */ +static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + + if (enable) + value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16)); + else + value = (MAC_CONFIG_REG_FD << 16); + + if (qdev->mac_index) + ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); + else + ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); +} + +/* + * Caller holds hw_lock. + */ +static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + + if (enable) + value = + ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) | + ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16)); + else + value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16); + + if (qdev->mac_index) + ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); + else + ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); +} + +/* + * Caller holds hw_lock. + */ +static int ql_is_fiber(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 bitToCheck = 0; + u32 temp; + + switch (qdev->mac_index) { + case 0: + bitToCheck = PORT_STATUS_SM0; + break; + case 1: + bitToCheck = PORT_STATUS_SM1; + break; + } + + temp = ql_read_page0_reg(qdev, &port_regs->portStatus); + return (temp & bitToCheck) != 0; +} + +static int ql_is_auto_cfg(struct ql3_adapter *qdev) +{ + u16 reg; + ql_mii_read_reg(qdev, 0x00, ®); + return (reg & 0x1000) != 0; +} + +/* + * Caller holds hw_lock. + */ +static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 bitToCheck = 0; + u32 temp; + + switch (qdev->mac_index) { + case 0: + bitToCheck = PORT_STATUS_AC0; + break; + case 1: + bitToCheck = PORT_STATUS_AC1; + break; + } + + temp = ql_read_page0_reg(qdev, &port_regs->portStatus); + if (temp & bitToCheck) { + netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n"); + return 1; + } + netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n"); + return 0; +} + +/* + * ql_is_neg_pause() returns 1 if pause was negotiated to be on + */ +static int ql_is_neg_pause(struct ql3_adapter *qdev) +{ + if (ql_is_fiber(qdev)) + return ql_is_petbi_neg_pause(qdev); + else + return ql_is_phy_neg_pause(qdev); +} + +static int ql_auto_neg_error(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 bitToCheck = 0; + u32 temp; + + switch (qdev->mac_index) { + case 0: + bitToCheck = PORT_STATUS_AE0; + break; + case 1: + bitToCheck = PORT_STATUS_AE1; + break; + } + temp = ql_read_page0_reg(qdev, &port_regs->portStatus); + return (temp & bitToCheck) != 0; +} + +static u32 ql_get_link_speed(struct ql3_adapter *qdev) +{ + if (ql_is_fiber(qdev)) + return SPEED_1000; + else + return ql_phy_get_speed(qdev); +} + +static int ql_is_link_full_dup(struct ql3_adapter *qdev) +{ + if (ql_is_fiber(qdev)) + return 1; + else + return ql_is_full_dup(qdev); +} + +/* + * Caller holds hw_lock. + */ +static int ql_link_down_detect(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 bitToCheck = 0; + u32 temp; + + switch (qdev->mac_index) { + case 0: + bitToCheck = ISP_CONTROL_LINK_DN_0; + break; + case 1: + bitToCheck = ISP_CONTROL_LINK_DN_1; + break; + } + + temp = + ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); + return (temp & bitToCheck) != 0; +} + +/* + * Caller holds hw_lock. + */ +static int ql_link_down_detect_clear(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + switch (qdev->mac_index) { + case 0: + ql_write_common_reg(qdev, + &port_regs->CommonRegs.ispControlStatus, + (ISP_CONTROL_LINK_DN_0) | + (ISP_CONTROL_LINK_DN_0 << 16)); + break; + + case 1: + ql_write_common_reg(qdev, + &port_regs->CommonRegs.ispControlStatus, + (ISP_CONTROL_LINK_DN_1) | + (ISP_CONTROL_LINK_DN_1 << 16)); + break; + + default: + return 1; + } + + return 0; +} + +/* + * Caller holds hw_lock. + */ +static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 bitToCheck = 0; + u32 temp; + + switch (qdev->mac_index) { + case 0: + bitToCheck = PORT_STATUS_F1_ENABLED; + break; + case 1: + bitToCheck = PORT_STATUS_F3_ENABLED; + break; + default: + break; + } + + temp = ql_read_page0_reg(qdev, &port_regs->portStatus); + if (temp & bitToCheck) { + netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, + "not link master\n"); + return 0; + } + + netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n"); + return 1; +} + +static void ql_phy_reset_ex(struct ql3_adapter *qdev) +{ + ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, + PHYAddr[qdev->mac_index]); +} + +static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) +{ + u16 reg; + u16 portConfiguration; + + if (qdev->phyType == PHY_AGERE_ET1011C) + ql_mii_write_reg(qdev, 0x13, 0x0000); + /* turn off external loopback */ + + if (qdev->mac_index == 0) + portConfiguration = + qdev->nvram_data.macCfg_port0.portConfiguration; + else + portConfiguration = + qdev->nvram_data.macCfg_port1.portConfiguration; + + /* Some HBA's in the field are set to 0 and they need to + be reinterpreted with a default value */ + if (portConfiguration == 0) + portConfiguration = PORT_CONFIG_DEFAULT; + + /* Set the 1000 advertisements */ + ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, ®, + PHYAddr[qdev->mac_index]); + reg &= ~PHY_GIG_ALL_PARAMS; + + if (portConfiguration & PORT_CONFIG_1000MB_SPEED) { + if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) + reg |= PHY_GIG_ADV_1000F; + else + reg |= PHY_GIG_ADV_1000H; + } + + ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg, + PHYAddr[qdev->mac_index]); + + /* Set the 10/100 & pause negotiation advertisements */ + ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, ®, + PHYAddr[qdev->mac_index]); + reg &= ~PHY_NEG_ALL_PARAMS; + + if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED) + reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE; + + if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { + if (portConfiguration & PORT_CONFIG_100MB_SPEED) + reg |= PHY_NEG_ADV_100F; + + if (portConfiguration & PORT_CONFIG_10MB_SPEED) + reg |= PHY_NEG_ADV_10F; + } + + if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { + if (portConfiguration & PORT_CONFIG_100MB_SPEED) + reg |= PHY_NEG_ADV_100H; + + if (portConfiguration & PORT_CONFIG_10MB_SPEED) + reg |= PHY_NEG_ADV_10H; + } + + if (portConfiguration & PORT_CONFIG_1000MB_SPEED) + reg |= 1; + + ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, + PHYAddr[qdev->mac_index]); + + ql_mii_read_reg_ex(qdev, CONTROL_REG, ®, PHYAddr[qdev->mac_index]); + + ql_mii_write_reg_ex(qdev, CONTROL_REG, + reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG, + PHYAddr[qdev->mac_index]); +} + +static void ql_phy_init_ex(struct ql3_adapter *qdev) +{ + ql_phy_reset_ex(qdev); + PHY_Setup(qdev); + ql_phy_start_neg_ex(qdev); +} + +/* + * Caller holds hw_lock. + */ +static u32 ql_get_link_state(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 bitToCheck = 0; + u32 temp, linkState; + + switch (qdev->mac_index) { + case 0: + bitToCheck = PORT_STATUS_UP0; + break; + case 1: + bitToCheck = PORT_STATUS_UP1; + break; + } + + temp = ql_read_page0_reg(qdev, &port_regs->portStatus); + if (temp & bitToCheck) + linkState = LS_UP; + else + linkState = LS_DOWN; + + return linkState; +} + +static int ql_port_start(struct ql3_adapter *qdev) +{ + if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * + 2) << 7)) { + netdev_err(qdev->ndev, "Could not get hw lock for GIO\n"); + return -1; + } + + if (ql_is_fiber(qdev)) { + ql_petbi_init(qdev); + } else { + /* Copper port */ + ql_phy_init_ex(qdev); + } + + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + return 0; +} + +static int ql_finish_auto_neg(struct ql3_adapter *qdev) +{ + + if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * + 2) << 7)) + return -1; + + if (!ql_auto_neg_error(qdev)) { + if (test_bit(QL_LINK_MASTER, &qdev->flags)) { + /* configure the MAC */ + netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, + "Configuring link\n"); + ql_mac_cfg_soft_reset(qdev, 1); + ql_mac_cfg_gig(qdev, + (ql_get_link_speed + (qdev) == + SPEED_1000)); + ql_mac_cfg_full_dup(qdev, + ql_is_link_full_dup + (qdev)); + ql_mac_cfg_pause(qdev, + ql_is_neg_pause + (qdev)); + ql_mac_cfg_soft_reset(qdev, 0); + + /* enable the MAC */ + netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, + "Enabling mac\n"); + ql_mac_enable(qdev, 1); + } + + qdev->port_link_state = LS_UP; + netif_start_queue(qdev->ndev); + netif_carrier_on(qdev->ndev); + netif_info(qdev, link, qdev->ndev, + "Link is up at %d Mbps, %s duplex\n", + ql_get_link_speed(qdev), + ql_is_link_full_dup(qdev) ? "full" : "half"); + + } else { /* Remote error detected */ + + if (test_bit(QL_LINK_MASTER, &qdev->flags)) { + netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, + "Remote error detected. Calling ql_port_start()\n"); + /* + * ql_port_start() is shared code and needs + * to lock the PHY on it's own. + */ + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + if (ql_port_start(qdev)) /* Restart port */ + return -1; + return 0; + } + } + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + return 0; +} + +static void ql_link_state_machine_work(struct work_struct *work) +{ + struct ql3_adapter *qdev = + container_of(work, struct ql3_adapter, link_state_work.work); + + u32 curr_link_state; + unsigned long hw_flags; + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + + curr_link_state = ql_get_link_state(qdev); + + if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) { + netif_info(qdev, link, qdev->ndev, + "Reset in progress, skip processing link state\n"); + + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + + /* Restart timer on 2 second interval. */ + mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); + + return; + } + + switch (qdev->port_link_state) { + default: + if (test_bit(QL_LINK_MASTER, &qdev->flags)) + ql_port_start(qdev); + qdev->port_link_state = LS_DOWN; + /* Fall Through */ + + case LS_DOWN: + if (curr_link_state == LS_UP) { + netif_info(qdev, link, qdev->ndev, "Link is up\n"); + if (ql_is_auto_neg_complete(qdev)) + ql_finish_auto_neg(qdev); + + if (qdev->port_link_state == LS_UP) + ql_link_down_detect_clear(qdev); + + qdev->port_link_state = LS_UP; + } + break; + + case LS_UP: + /* + * See if the link is currently down or went down and came + * back up + */ + if (curr_link_state == LS_DOWN) { + netif_info(qdev, link, qdev->ndev, "Link is down\n"); + qdev->port_link_state = LS_DOWN; + } + if (ql_link_down_detect(qdev)) + qdev->port_link_state = LS_DOWN; + break; + } + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + + /* Restart timer on 2 second interval. */ + mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); +} + +/* + * Caller must take hw_lock and QL_PHY_GIO_SEM. + */ +static void ql_get_phy_owner(struct ql3_adapter *qdev) +{ + if (ql_this_adapter_controls_port(qdev)) + set_bit(QL_LINK_MASTER, &qdev->flags); + else + clear_bit(QL_LINK_MASTER, &qdev->flags); +} + +/* + * Caller must take hw_lock and QL_PHY_GIO_SEM. + */ +static void ql_init_scan_mode(struct ql3_adapter *qdev) +{ + ql_mii_enable_scan_mode(qdev); + + if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { + if (ql_this_adapter_controls_port(qdev)) + ql_petbi_init_ex(qdev); + } else { + if (ql_this_adapter_controls_port(qdev)) + ql_phy_init_ex(qdev); + } +} + +/* + * MII_Setup needs to be called before taking the PHY out of reset + * so that the management interface clock speed can be set properly. + * It would be better if we had a way to disable MDC until after the + * PHY is out of reset, but we don't have that capability. + */ +static int ql_mii_setup(struct ql3_adapter *qdev) +{ + u32 reg; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * + 2) << 7)) + return -1; + + if (qdev->device_id == QL3032_DEVICE_ID) + ql_write_page0_reg(qdev, + &port_regs->macMIIMgmtControlReg, 0x0f00000); + + /* Divide 125MHz clock by 28 to meet PHY timing requirements */ + reg = MAC_MII_CONTROL_CLK_SEL_DIV28; + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, + reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16)); + + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + return 0; +} + +#define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full | \ + SUPPORTED_FIBRE | \ + SUPPORTED_Autoneg) +#define SUPPORTED_TP_MODES (SUPPORTED_10baseT_Half | \ + SUPPORTED_10baseT_Full | \ + SUPPORTED_100baseT_Half | \ + SUPPORTED_100baseT_Full | \ + SUPPORTED_1000baseT_Half | \ + SUPPORTED_1000baseT_Full | \ + SUPPORTED_Autoneg | \ + SUPPORTED_TP) \ + +static u32 ql_supported_modes(struct ql3_adapter *qdev) +{ + if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) + return SUPPORTED_OPTICAL_MODES; + + return SUPPORTED_TP_MODES; +} + +static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) +{ + int status; + unsigned long hw_flags; + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | + (qdev->mac_index) * 2) << 7)) { + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return 0; + } + status = ql_is_auto_cfg(qdev); + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return status; +} + +static u32 ql_get_speed(struct ql3_adapter *qdev) +{ + u32 status; + unsigned long hw_flags; + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | + (qdev->mac_index) * 2) << 7)) { + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return 0; + } + status = ql_get_link_speed(qdev); + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return status; +} + +static int ql_get_full_dup(struct ql3_adapter *qdev) +{ + int status; + unsigned long hw_flags; + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | + (qdev->mac_index) * 2) << 7)) { + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return 0; + } + status = ql_is_link_full_dup(qdev); + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return status; +} + +static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + + ecmd->transceiver = XCVR_INTERNAL; + ecmd->supported = ql_supported_modes(qdev); + + if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { + ecmd->port = PORT_FIBRE; + } else { + ecmd->port = PORT_TP; + ecmd->phy_address = qdev->PHYAddr; + } + ecmd->advertising = ql_supported_modes(qdev); + ecmd->autoneg = ql_get_auto_cfg_status(qdev); + ethtool_cmd_speed_set(ecmd, ql_get_speed(qdev)); + ecmd->duplex = ql_get_full_dup(qdev); + return 0; +} + +static void ql_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *drvinfo) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + strncpy(drvinfo->driver, ql3xxx_driver_name, 32); + strncpy(drvinfo->version, ql3xxx_driver_version, 32); + strncpy(drvinfo->fw_version, "N/A", 32); + strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32); + drvinfo->regdump_len = 0; + drvinfo->eedump_len = 0; +} + +static u32 ql_get_msglevel(struct net_device *ndev) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + return qdev->msg_enable; +} + +static void ql_set_msglevel(struct net_device *ndev, u32 value) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + qdev->msg_enable = value; +} + +static void ql_get_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + u32 reg; + if (qdev->mac_index == 0) + reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg); + else + reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg); + + pause->autoneg = ql_get_auto_cfg_status(qdev); + pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2; + pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1; +} + +static const struct ethtool_ops ql3xxx_ethtool_ops = { + .get_settings = ql_get_settings, + .get_drvinfo = ql_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_msglevel = ql_get_msglevel, + .set_msglevel = ql_set_msglevel, + .get_pauseparam = ql_get_pauseparam, +}; + +static int ql_populate_free_queue(struct ql3_adapter *qdev) +{ + struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; + dma_addr_t map; + int err; + + while (lrg_buf_cb) { + if (!lrg_buf_cb->skb) { + lrg_buf_cb->skb = + netdev_alloc_skb(qdev->ndev, + qdev->lrg_buffer_len); + if (unlikely(!lrg_buf_cb->skb)) { + netdev_printk(KERN_DEBUG, qdev->ndev, + "Failed netdev_alloc_skb()\n"); + break; + } else { + /* + * We save some space to copy the ethhdr from + * first buffer + */ + skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); + map = pci_map_single(qdev->pdev, + lrg_buf_cb->skb->data, + qdev->lrg_buffer_len - + QL_HEADER_SPACE, + PCI_DMA_FROMDEVICE); + + err = pci_dma_mapping_error(qdev->pdev, map); + if (err) { + netdev_err(qdev->ndev, + "PCI mapping failed with error: %d\n", + err); + dev_kfree_skb(lrg_buf_cb->skb); + lrg_buf_cb->skb = NULL; + break; + } + + + lrg_buf_cb->buf_phy_addr_low = + cpu_to_le32(LS_64BITS(map)); + lrg_buf_cb->buf_phy_addr_high = + cpu_to_le32(MS_64BITS(map)); + dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); + dma_unmap_len_set(lrg_buf_cb, maplen, + qdev->lrg_buffer_len - + QL_HEADER_SPACE); + --qdev->lrg_buf_skb_check; + if (!qdev->lrg_buf_skb_check) + return 1; + } + } + lrg_buf_cb = lrg_buf_cb->next; + } + return 0; +} + +/* + * Caller holds hw_lock. + */ +static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + if (qdev->small_buf_release_cnt >= 16) { + while (qdev->small_buf_release_cnt >= 16) { + qdev->small_buf_q_producer_index++; + + if (qdev->small_buf_q_producer_index == + NUM_SBUFQ_ENTRIES) + qdev->small_buf_q_producer_index = 0; + qdev->small_buf_release_cnt -= 8; + } + wmb(); + writel(qdev->small_buf_q_producer_index, + &port_regs->CommonRegs.rxSmallQProducerIndex); + } +} + +/* + * Caller holds hw_lock. + */ +static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev) +{ + struct bufq_addr_element *lrg_buf_q_ele; + int i; + struct ql_rcv_buf_cb *lrg_buf_cb; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + if ((qdev->lrg_buf_free_count >= 8) && + (qdev->lrg_buf_release_cnt >= 16)) { + + if (qdev->lrg_buf_skb_check) + if (!ql_populate_free_queue(qdev)) + return; + + lrg_buf_q_ele = qdev->lrg_buf_next_free; + + while ((qdev->lrg_buf_release_cnt >= 16) && + (qdev->lrg_buf_free_count >= 8)) { + + for (i = 0; i < 8; i++) { + lrg_buf_cb = + ql_get_from_lrg_buf_free_list(qdev); + lrg_buf_q_ele->addr_high = + lrg_buf_cb->buf_phy_addr_high; + lrg_buf_q_ele->addr_low = + lrg_buf_cb->buf_phy_addr_low; + lrg_buf_q_ele++; + + qdev->lrg_buf_release_cnt--; + } + + qdev->lrg_buf_q_producer_index++; + + if (qdev->lrg_buf_q_producer_index == + qdev->num_lbufq_entries) + qdev->lrg_buf_q_producer_index = 0; + + if (qdev->lrg_buf_q_producer_index == + (qdev->num_lbufq_entries - 1)) { + lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr; + } + } + wmb(); + qdev->lrg_buf_next_free = lrg_buf_q_ele; + writel(qdev->lrg_buf_q_producer_index, + &port_regs->CommonRegs.rxLargeQProducerIndex); + } +} + +static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, + struct ob_mac_iocb_rsp *mac_rsp) +{ + struct ql_tx_buf_cb *tx_cb; + int i; + int retval = 0; + + if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { + netdev_warn(qdev->ndev, + "Frame too short but it was padded and sent\n"); + } + + tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; + + /* Check the transmit response flags for any errors */ + if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { + netdev_err(qdev->ndev, + "Frame too short to be legal, frame not sent\n"); + + qdev->ndev->stats.tx_errors++; + retval = -EIO; + goto frame_not_sent; + } + + if (tx_cb->seg_count == 0) { + netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n", + mac_rsp->transaction_id); + + qdev->ndev->stats.tx_errors++; + retval = -EIO; + goto invalid_seg_count; + } + + pci_unmap_single(qdev->pdev, + dma_unmap_addr(&tx_cb->map[0], mapaddr), + dma_unmap_len(&tx_cb->map[0], maplen), + PCI_DMA_TODEVICE); + tx_cb->seg_count--; + if (tx_cb->seg_count) { + for (i = 1; i < tx_cb->seg_count; i++) { + pci_unmap_page(qdev->pdev, + dma_unmap_addr(&tx_cb->map[i], + mapaddr), + dma_unmap_len(&tx_cb->map[i], maplen), + PCI_DMA_TODEVICE); + } + } + qdev->ndev->stats.tx_packets++; + qdev->ndev->stats.tx_bytes += tx_cb->skb->len; + +frame_not_sent: + dev_kfree_skb_irq(tx_cb->skb); + tx_cb->skb = NULL; + +invalid_seg_count: + atomic_inc(&qdev->tx_count); +} + +static void ql_get_sbuf(struct ql3_adapter *qdev) +{ + if (++qdev->small_buf_index == NUM_SMALL_BUFFERS) + qdev->small_buf_index = 0; + qdev->small_buf_release_cnt++; +} + +static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev) +{ + struct ql_rcv_buf_cb *lrg_buf_cb = NULL; + lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index]; + qdev->lrg_buf_release_cnt++; + if (++qdev->lrg_buf_index == qdev->num_large_buffers) + qdev->lrg_buf_index = 0; + return lrg_buf_cb; +} + +/* + * The difference between 3022 and 3032 for inbound completions: + * 3022 uses two buffers per completion. The first buffer contains + * (some) header info, the second the remainder of the headers plus + * the data. For this chip we reserve some space at the top of the + * receive buffer so that the header info in buffer one can be + * prepended to the buffer two. Buffer two is the sent up while + * buffer one is returned to the hardware to be reused. + * 3032 receives all of it's data and headers in one buffer for a + * simpler process. 3032 also supports checksum verification as + * can be seen in ql_process_macip_rx_intr(). + */ +static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, + struct ib_mac_iocb_rsp *ib_mac_rsp_ptr) +{ + struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; + struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; + struct sk_buff *skb; + u16 length = le16_to_cpu(ib_mac_rsp_ptr->length); + + /* + * Get the inbound address list (small buffer). + */ + ql_get_sbuf(qdev); + + if (qdev->device_id == QL3022_DEVICE_ID) + lrg_buf_cb1 = ql_get_lbuf(qdev); + + /* start of second buffer */ + lrg_buf_cb2 = ql_get_lbuf(qdev); + skb = lrg_buf_cb2->skb; + + qdev->ndev->stats.rx_packets++; + qdev->ndev->stats.rx_bytes += length; + + skb_put(skb, length); + pci_unmap_single(qdev->pdev, + dma_unmap_addr(lrg_buf_cb2, mapaddr), + dma_unmap_len(lrg_buf_cb2, maplen), + PCI_DMA_FROMDEVICE); + prefetch(skb->data); + skb_checksum_none_assert(skb); + skb->protocol = eth_type_trans(skb, qdev->ndev); + + netif_receive_skb(skb); + lrg_buf_cb2->skb = NULL; + + if (qdev->device_id == QL3022_DEVICE_ID) + ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); + ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); +} + +static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, + struct ib_ip_iocb_rsp *ib_ip_rsp_ptr) +{ + struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; + struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; + struct sk_buff *skb1 = NULL, *skb2; + struct net_device *ndev = qdev->ndev; + u16 length = le16_to_cpu(ib_ip_rsp_ptr->length); + u16 size = 0; + + /* + * Get the inbound address list (small buffer). + */ + + ql_get_sbuf(qdev); + + if (qdev->device_id == QL3022_DEVICE_ID) { + /* start of first buffer on 3022 */ + lrg_buf_cb1 = ql_get_lbuf(qdev); + skb1 = lrg_buf_cb1->skb; + size = ETH_HLEN; + if (*((u16 *) skb1->data) != 0xFFFF) + size += VLAN_ETH_HLEN - ETH_HLEN; + } + + /* start of second buffer */ + lrg_buf_cb2 = ql_get_lbuf(qdev); + skb2 = lrg_buf_cb2->skb; + + skb_put(skb2, length); /* Just the second buffer length here. */ + pci_unmap_single(qdev->pdev, + dma_unmap_addr(lrg_buf_cb2, mapaddr), + dma_unmap_len(lrg_buf_cb2, maplen), + PCI_DMA_FROMDEVICE); + prefetch(skb2->data); + + skb_checksum_none_assert(skb2); + if (qdev->device_id == QL3022_DEVICE_ID) { + /* + * Copy the ethhdr from first buffer to second. This + * is necessary for 3022 IP completions. + */ + skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN, + skb_push(skb2, size), size); + } else { + u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum); + if (checksum & + (IB_IP_IOCB_RSP_3032_ICE | + IB_IP_IOCB_RSP_3032_CE)) { + netdev_err(ndev, + "%s: Bad checksum for this %s packet, checksum = %x\n", + __func__, + ((checksum & IB_IP_IOCB_RSP_3032_TCP) ? + "TCP" : "UDP"), checksum); + } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) || + (checksum & IB_IP_IOCB_RSP_3032_UDP && + !(checksum & IB_IP_IOCB_RSP_3032_NUC))) { + skb2->ip_summed = CHECKSUM_UNNECESSARY; + } + } + skb2->protocol = eth_type_trans(skb2, qdev->ndev); + + netif_receive_skb(skb2); + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += length; + lrg_buf_cb2->skb = NULL; + + if (qdev->device_id == QL3022_DEVICE_ID) + ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); + ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); +} + +static int ql_tx_rx_clean(struct ql3_adapter *qdev, + int *tx_cleaned, int *rx_cleaned, int work_to_do) +{ + struct net_rsp_iocb *net_rsp; + struct net_device *ndev = qdev->ndev; + int work_done = 0; + + /* While there are entries in the completion queue. */ + while ((le32_to_cpu(*(qdev->prsp_producer_index)) != + qdev->rsp_consumer_index) && (work_done < work_to_do)) { + + net_rsp = qdev->rsp_current; + rmb(); + /* + * Fix 4032 chip's undocumented "feature" where bit-8 is set + * if the inbound completion is for a VLAN. + */ + if (qdev->device_id == QL3032_DEVICE_ID) + net_rsp->opcode &= 0x7f; + switch (net_rsp->opcode) { + + case OPCODE_OB_MAC_IOCB_FN0: + case OPCODE_OB_MAC_IOCB_FN2: + ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *) + net_rsp); + (*tx_cleaned)++; + break; + + case OPCODE_IB_MAC_IOCB: + case OPCODE_IB_3032_MAC_IOCB: + ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) + net_rsp); + (*rx_cleaned)++; + break; + + case OPCODE_IB_IP_IOCB: + case OPCODE_IB_3032_IP_IOCB: + ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) + net_rsp); + (*rx_cleaned)++; + break; + default: { + u32 *tmp = (u32 *)net_rsp; + netdev_err(ndev, + "Hit default case, not handled!\n" + " dropping the packet, opcode = %x\n" + "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", + net_rsp->opcode, + (unsigned long int)tmp[0], + (unsigned long int)tmp[1], + (unsigned long int)tmp[2], + (unsigned long int)tmp[3]); + } + } + + qdev->rsp_consumer_index++; + + if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) { + qdev->rsp_consumer_index = 0; + qdev->rsp_current = qdev->rsp_q_virt_addr; + } else { + qdev->rsp_current++; + } + + work_done = *tx_cleaned + *rx_cleaned; + } + + return work_done; +} + +static int ql_poll(struct napi_struct *napi, int budget) +{ + struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); + int rx_cleaned = 0, tx_cleaned = 0; + unsigned long hw_flags; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); + + if (tx_cleaned + rx_cleaned != budget) { + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + __napi_complete(napi); + ql_update_small_bufq_prod_index(qdev); + ql_update_lrg_bufq_prod_index(qdev); + writel(qdev->rsp_consumer_index, + &port_regs->CommonRegs.rspQConsumerIndex); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + + ql_enable_interrupts(qdev); + } + return tx_cleaned + rx_cleaned; +} + +static irqreturn_t ql3xxx_isr(int irq, void *dev_id) +{ + + struct net_device *ndev = dev_id; + struct ql3_adapter *qdev = netdev_priv(ndev); + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + int handled = 1; + u32 var; + + value = ql_read_common_reg_l(qdev, + &port_regs->CommonRegs.ispControlStatus); + + if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) { + spin_lock(&qdev->adapter_lock); + netif_stop_queue(qdev->ndev); + netif_carrier_off(qdev->ndev); + ql_disable_interrupts(qdev); + qdev->port_link_state = LS_DOWN; + set_bit(QL_RESET_ACTIVE, &qdev->flags) ; + + if (value & ISP_CONTROL_FE) { + /* + * Chip Fatal Error. + */ + var = + ql_read_page0_reg_l(qdev, + &port_regs->PortFatalErrStatus); + netdev_warn(ndev, + "Resetting chip. PortFatalErrStatus register = 0x%x\n", + var); + set_bit(QL_RESET_START, &qdev->flags) ; + } else { + /* + * Soft Reset Requested. + */ + set_bit(QL_RESET_PER_SCSI, &qdev->flags) ; + netdev_err(ndev, + "Another function issued a reset to the chip. ISR value = %x\n", + value); + } + queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); + spin_unlock(&qdev->adapter_lock); + } else if (value & ISP_IMR_DISABLE_CMPL_INT) { + ql_disable_interrupts(qdev); + if (likely(napi_schedule_prep(&qdev->napi))) + __napi_schedule(&qdev->napi); + } else + return IRQ_NONE; + + return IRQ_RETVAL(handled); +} + +/* + * Get the total number of segments needed for the given number of fragments. + * This is necessary because outbound address lists (OAL) will be used when + * more than two frags are given. Each address list has 5 addr/len pairs. + * The 5th pair in each OAL is used to point to the next OAL if more frags + * are coming. That is why the frags:segment count ratio is not linear. + */ +static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags) +{ + if (qdev->device_id == QL3022_DEVICE_ID) + return 1; + + if (frags <= 2) + return frags + 1; + else if (frags <= 6) + return frags + 2; + else if (frags <= 10) + return frags + 3; + else if (frags <= 14) + return frags + 4; + else if (frags <= 18) + return frags + 5; + return -1; +} + +static void ql_hw_csum_setup(const struct sk_buff *skb, + struct ob_mac_iocb_req *mac_iocb_ptr) +{ + const struct iphdr *ip = ip_hdr(skb); + + mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb); + mac_iocb_ptr->ip_hdr_len = ip->ihl; + + if (ip->protocol == IPPROTO_TCP) { + mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC | + OB_3032MAC_IOCB_REQ_IC; + } else { + mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC | + OB_3032MAC_IOCB_REQ_IC; + } + +} + +/* + * Map the buffers for this transmit. + * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success. + */ +static int ql_send_map(struct ql3_adapter *qdev, + struct ob_mac_iocb_req *mac_iocb_ptr, + struct ql_tx_buf_cb *tx_cb, + struct sk_buff *skb) +{ + struct oal *oal; + struct oal_entry *oal_entry; + int len = skb_headlen(skb); + dma_addr_t map; + int err; + int completed_segs, i; + int seg_cnt, seg = 0; + int frag_cnt = (int)skb_shinfo(skb)->nr_frags; + + seg_cnt = tx_cb->seg_count; + /* + * Map the skb buffer first. + */ + map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); + + err = pci_dma_mapping_error(qdev->pdev, map); + if (err) { + netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n", + err); + + return NETDEV_TX_BUSY; + } + + oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; + oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); + oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); + oal_entry->len = cpu_to_le32(len); + dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); + dma_unmap_len_set(&tx_cb->map[seg], maplen, len); + seg++; + + if (seg_cnt == 1) { + /* Terminate the last segment. */ + oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); + return NETDEV_TX_OK; + } + oal = tx_cb->oal; + for (completed_segs = 0; + completed_segs < frag_cnt; + completed_segs++, seg++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs]; + oal_entry++; + /* + * Check for continuation requirements. + * It's strange but necessary. + * Continuation entry points to outbound address list. + */ + if ((seg == 2 && seg_cnt > 3) || + (seg == 7 && seg_cnt > 8) || + (seg == 12 && seg_cnt > 13) || + (seg == 17 && seg_cnt > 18)) { + map = pci_map_single(qdev->pdev, oal, + sizeof(struct oal), + PCI_DMA_TODEVICE); + + err = pci_dma_mapping_error(qdev->pdev, map); + if (err) { + netdev_err(qdev->ndev, + "PCI mapping outbound address list with error: %d\n", + err); + goto map_error; + } + + oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); + oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); + oal_entry->len = cpu_to_le32(sizeof(struct oal) | + OAL_CONT_ENTRY); + dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); + dma_unmap_len_set(&tx_cb->map[seg], maplen, + sizeof(struct oal)); + oal_entry = (struct oal_entry *)oal; + oal++; + seg++; + } + + map = pci_map_page(qdev->pdev, frag->page, + frag->page_offset, frag->size, + PCI_DMA_TODEVICE); + + err = pci_dma_mapping_error(qdev->pdev, map); + if (err) { + netdev_err(qdev->ndev, + "PCI mapping frags failed with error: %d\n", + err); + goto map_error; + } + + oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); + oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); + oal_entry->len = cpu_to_le32(frag->size); + dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); + dma_unmap_len_set(&tx_cb->map[seg], maplen, frag->size); + } + /* Terminate the last segment. */ + oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); + return NETDEV_TX_OK; + +map_error: + /* A PCI mapping failed and now we will need to back out + * We need to traverse through the oal's and associated pages which + * have been mapped and now we must unmap them to clean up properly + */ + + seg = 1; + oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; + oal = tx_cb->oal; + for (i = 0; i < completed_segs; i++, seg++) { + oal_entry++; + + /* + * Check for continuation requirements. + * It's strange but necessary. + */ + + if ((seg == 2 && seg_cnt > 3) || + (seg == 7 && seg_cnt > 8) || + (seg == 12 && seg_cnt > 13) || + (seg == 17 && seg_cnt > 18)) { + pci_unmap_single(qdev->pdev, + dma_unmap_addr(&tx_cb->map[seg], mapaddr), + dma_unmap_len(&tx_cb->map[seg], maplen), + PCI_DMA_TODEVICE); + oal++; + seg++; + } + + pci_unmap_page(qdev->pdev, + dma_unmap_addr(&tx_cb->map[seg], mapaddr), + dma_unmap_len(&tx_cb->map[seg], maplen), + PCI_DMA_TODEVICE); + } + + pci_unmap_single(qdev->pdev, + dma_unmap_addr(&tx_cb->map[0], mapaddr), + dma_unmap_addr(&tx_cb->map[0], maplen), + PCI_DMA_TODEVICE); + + return NETDEV_TX_BUSY; + +} + +/* + * The difference between 3022 and 3032 sends: + * 3022 only supports a simple single segment transmission. + * 3032 supports checksumming and scatter/gather lists (fragments). + * The 3032 supports sglists by using the 3 addr/len pairs (ALP) + * in the IOCB plus a chain of outbound address lists (OAL) that + * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th) + * will be used to point to an OAL when more ALP entries are required. + * The IOCB is always the top of the chain followed by one or more + * OALs (when necessary). + */ +static netdev_tx_t ql3xxx_send(struct sk_buff *skb, + struct net_device *ndev) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + struct ql_tx_buf_cb *tx_cb; + u32 tot_len = skb->len; + struct ob_mac_iocb_req *mac_iocb_ptr; + + if (unlikely(atomic_read(&qdev->tx_count) < 2)) + return NETDEV_TX_BUSY; + + tx_cb = &qdev->tx_buf[qdev->req_producer_index]; + tx_cb->seg_count = ql_get_seg_count(qdev, + skb_shinfo(skb)->nr_frags); + if (tx_cb->seg_count == -1) { + netdev_err(ndev, "%s: invalid segment count!\n", __func__); + return NETDEV_TX_OK; + } + + mac_iocb_ptr = tx_cb->queue_entry; + memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); + mac_iocb_ptr->opcode = qdev->mac_ob_opcode; + mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X; + mac_iocb_ptr->flags |= qdev->mb_bit_mask; + mac_iocb_ptr->transaction_id = qdev->req_producer_index; + mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len); + tx_cb->skb = skb; + if (qdev->device_id == QL3032_DEVICE_ID && + skb->ip_summed == CHECKSUM_PARTIAL) + ql_hw_csum_setup(skb, mac_iocb_ptr); + + if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) { + netdev_err(ndev, "%s: Could not map the segments!\n", __func__); + return NETDEV_TX_BUSY; + } + + wmb(); + qdev->req_producer_index++; + if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) + qdev->req_producer_index = 0; + wmb(); + ql_write_common_reg_l(qdev, + &port_regs->CommonRegs.reqQProducerIndex, + qdev->req_producer_index); + + netif_printk(qdev, tx_queued, KERN_DEBUG, ndev, + "tx queued, slot %d, len %d\n", + qdev->req_producer_index, skb->len); + + atomic_dec(&qdev->tx_count); + return NETDEV_TX_OK; +} + +static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) +{ + qdev->req_q_size = + (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req)); + + qdev->req_q_virt_addr = + pci_alloc_consistent(qdev->pdev, + (size_t) qdev->req_q_size, + &qdev->req_q_phy_addr); + + if ((qdev->req_q_virt_addr == NULL) || + LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) { + netdev_err(qdev->ndev, "reqQ failed\n"); + return -ENOMEM; + } + + qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb); + + qdev->rsp_q_virt_addr = + pci_alloc_consistent(qdev->pdev, + (size_t) qdev->rsp_q_size, + &qdev->rsp_q_phy_addr); + + if ((qdev->rsp_q_virt_addr == NULL) || + LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) { + netdev_err(qdev->ndev, "rspQ allocation failed\n"); + pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size, + qdev->req_q_virt_addr, + qdev->req_q_phy_addr); + return -ENOMEM; + } + + set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); + + return 0; +} + +static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) +{ + if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) { + netdev_info(qdev->ndev, "Already done\n"); + return; + } + + pci_free_consistent(qdev->pdev, + qdev->req_q_size, + qdev->req_q_virt_addr, qdev->req_q_phy_addr); + + qdev->req_q_virt_addr = NULL; + + pci_free_consistent(qdev->pdev, + qdev->rsp_q_size, + qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr); + + qdev->rsp_q_virt_addr = NULL; + + clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); +} + +static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) +{ + /* Create Large Buffer Queue */ + qdev->lrg_buf_q_size = + qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry); + if (qdev->lrg_buf_q_size < PAGE_SIZE) + qdev->lrg_buf_q_alloc_size = PAGE_SIZE; + else + qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; + + qdev->lrg_buf = + kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb), + GFP_KERNEL); + if (qdev->lrg_buf == NULL) { + netdev_err(qdev->ndev, "qdev->lrg_buf alloc failed\n"); + return -ENOMEM; + } + + qdev->lrg_buf_q_alloc_virt_addr = + pci_alloc_consistent(qdev->pdev, + qdev->lrg_buf_q_alloc_size, + &qdev->lrg_buf_q_alloc_phy_addr); + + if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { + netdev_err(qdev->ndev, "lBufQ failed\n"); + return -ENOMEM; + } + qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; + qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr; + + /* Create Small Buffer Queue */ + qdev->small_buf_q_size = + NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); + if (qdev->small_buf_q_size < PAGE_SIZE) + qdev->small_buf_q_alloc_size = PAGE_SIZE; + else + qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2; + + qdev->small_buf_q_alloc_virt_addr = + pci_alloc_consistent(qdev->pdev, + qdev->small_buf_q_alloc_size, + &qdev->small_buf_q_alloc_phy_addr); + + if (qdev->small_buf_q_alloc_virt_addr == NULL) { + netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n"); + pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, + qdev->lrg_buf_q_alloc_virt_addr, + qdev->lrg_buf_q_alloc_phy_addr); + return -ENOMEM; + } + + qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr; + qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr; + set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); + return 0; +} + +static void ql_free_buffer_queues(struct ql3_adapter *qdev) +{ + if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) { + netdev_info(qdev->ndev, "Already done\n"); + return; + } + kfree(qdev->lrg_buf); + pci_free_consistent(qdev->pdev, + qdev->lrg_buf_q_alloc_size, + qdev->lrg_buf_q_alloc_virt_addr, + qdev->lrg_buf_q_alloc_phy_addr); + + qdev->lrg_buf_q_virt_addr = NULL; + + pci_free_consistent(qdev->pdev, + qdev->small_buf_q_alloc_size, + qdev->small_buf_q_alloc_virt_addr, + qdev->small_buf_q_alloc_phy_addr); + + qdev->small_buf_q_virt_addr = NULL; + + clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); +} + +static int ql_alloc_small_buffers(struct ql3_adapter *qdev) +{ + int i; + struct bufq_addr_element *small_buf_q_entry; + + /* Currently we allocate on one of memory and use it for smallbuffers */ + qdev->small_buf_total_size = + (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES * + QL_SMALL_BUFFER_SIZE); + + qdev->small_buf_virt_addr = + pci_alloc_consistent(qdev->pdev, + qdev->small_buf_total_size, + &qdev->small_buf_phy_addr); + + if (qdev->small_buf_virt_addr == NULL) { + netdev_err(qdev->ndev, "Failed to get small buffer memory\n"); + return -ENOMEM; + } + + qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr); + qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr); + + small_buf_q_entry = qdev->small_buf_q_virt_addr; + + /* Initialize the small buffer queue. */ + for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) { + small_buf_q_entry->addr_high = + cpu_to_le32(qdev->small_buf_phy_addr_high); + small_buf_q_entry->addr_low = + cpu_to_le32(qdev->small_buf_phy_addr_low + + (i * QL_SMALL_BUFFER_SIZE)); + small_buf_q_entry++; + } + qdev->small_buf_index = 0; + set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags); + return 0; +} + +static void ql_free_small_buffers(struct ql3_adapter *qdev) +{ + if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) { + netdev_info(qdev->ndev, "Already done\n"); + return; + } + if (qdev->small_buf_virt_addr != NULL) { + pci_free_consistent(qdev->pdev, + qdev->small_buf_total_size, + qdev->small_buf_virt_addr, + qdev->small_buf_phy_addr); + + qdev->small_buf_virt_addr = NULL; + } +} + +static void ql_free_large_buffers(struct ql3_adapter *qdev) +{ + int i = 0; + struct ql_rcv_buf_cb *lrg_buf_cb; + + for (i = 0; i < qdev->num_large_buffers; i++) { + lrg_buf_cb = &qdev->lrg_buf[i]; + if (lrg_buf_cb->skb) { + dev_kfree_skb(lrg_buf_cb->skb); + pci_unmap_single(qdev->pdev, + dma_unmap_addr(lrg_buf_cb, mapaddr), + dma_unmap_len(lrg_buf_cb, maplen), + PCI_DMA_FROMDEVICE); + memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); + } else { + break; + } + } +} + +static void ql_init_large_buffers(struct ql3_adapter *qdev) +{ + int i; + struct ql_rcv_buf_cb *lrg_buf_cb; + struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr; + + for (i = 0; i < qdev->num_large_buffers; i++) { + lrg_buf_cb = &qdev->lrg_buf[i]; + buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high; + buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low; + buf_addr_ele++; + } + qdev->lrg_buf_index = 0; + qdev->lrg_buf_skb_check = 0; +} + +static int ql_alloc_large_buffers(struct ql3_adapter *qdev) +{ + int i; + struct ql_rcv_buf_cb *lrg_buf_cb; + struct sk_buff *skb; + dma_addr_t map; + int err; + + for (i = 0; i < qdev->num_large_buffers; i++) { + skb = netdev_alloc_skb(qdev->ndev, + qdev->lrg_buffer_len); + if (unlikely(!skb)) { + /* Better luck next round */ + netdev_err(qdev->ndev, + "large buff alloc failed for %d bytes at index %d\n", + qdev->lrg_buffer_len * 2, i); + ql_free_large_buffers(qdev); + return -ENOMEM; + } else { + + lrg_buf_cb = &qdev->lrg_buf[i]; + memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); + lrg_buf_cb->index = i; + lrg_buf_cb->skb = skb; + /* + * We save some space to copy the ethhdr from first + * buffer + */ + skb_reserve(skb, QL_HEADER_SPACE); + map = pci_map_single(qdev->pdev, + skb->data, + qdev->lrg_buffer_len - + QL_HEADER_SPACE, + PCI_DMA_FROMDEVICE); + + err = pci_dma_mapping_error(qdev->pdev, map); + if (err) { + netdev_err(qdev->ndev, + "PCI mapping failed with error: %d\n", + err); + ql_free_large_buffers(qdev); + return -ENOMEM; + } + + dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); + dma_unmap_len_set(lrg_buf_cb, maplen, + qdev->lrg_buffer_len - + QL_HEADER_SPACE); + lrg_buf_cb->buf_phy_addr_low = + cpu_to_le32(LS_64BITS(map)); + lrg_buf_cb->buf_phy_addr_high = + cpu_to_le32(MS_64BITS(map)); + } + } + return 0; +} + +static void ql_free_send_free_list(struct ql3_adapter *qdev) +{ + struct ql_tx_buf_cb *tx_cb; + int i; + + tx_cb = &qdev->tx_buf[0]; + for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { + kfree(tx_cb->oal); + tx_cb->oal = NULL; + tx_cb++; + } +} + +static int ql_create_send_free_list(struct ql3_adapter *qdev) +{ + struct ql_tx_buf_cb *tx_cb; + int i; + struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr; + + /* Create free list of transmit buffers */ + for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { + + tx_cb = &qdev->tx_buf[i]; + tx_cb->skb = NULL; + tx_cb->queue_entry = req_q_curr; + req_q_curr++; + tx_cb->oal = kmalloc(512, GFP_KERNEL); + if (tx_cb->oal == NULL) + return -1; + } + return 0; +} + +static int ql_alloc_mem_resources(struct ql3_adapter *qdev) +{ + if (qdev->ndev->mtu == NORMAL_MTU_SIZE) { + qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES; + qdev->lrg_buffer_len = NORMAL_MTU_SIZE; + } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) { + /* + * Bigger buffers, so less of them. + */ + qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES; + qdev->lrg_buffer_len = JUMBO_MTU_SIZE; + } else { + netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n", + qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE); + return -ENOMEM; + } + qdev->num_large_buffers = + qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY; + qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; + qdev->max_frame_size = + (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; + + /* + * First allocate a page of shared memory and use it for shadow + * locations of Network Request Queue Consumer Address Register and + * Network Completion Queue Producer Index Register + */ + qdev->shadow_reg_virt_addr = + pci_alloc_consistent(qdev->pdev, + PAGE_SIZE, &qdev->shadow_reg_phy_addr); + + if (qdev->shadow_reg_virt_addr != NULL) { + qdev->preq_consumer_index = qdev->shadow_reg_virt_addr; + qdev->req_consumer_index_phy_addr_high = + MS_64BITS(qdev->shadow_reg_phy_addr); + qdev->req_consumer_index_phy_addr_low = + LS_64BITS(qdev->shadow_reg_phy_addr); + + qdev->prsp_producer_index = + (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8); + qdev->rsp_producer_index_phy_addr_high = + qdev->req_consumer_index_phy_addr_high; + qdev->rsp_producer_index_phy_addr_low = + qdev->req_consumer_index_phy_addr_low + 8; + } else { + netdev_err(qdev->ndev, "shadowReg Alloc failed\n"); + return -ENOMEM; + } + + if (ql_alloc_net_req_rsp_queues(qdev) != 0) { + netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n"); + goto err_req_rsp; + } + + if (ql_alloc_buffer_queues(qdev) != 0) { + netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n"); + goto err_buffer_queues; + } + + if (ql_alloc_small_buffers(qdev) != 0) { + netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n"); + goto err_small_buffers; + } + + if (ql_alloc_large_buffers(qdev) != 0) { + netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n"); + goto err_small_buffers; + } + + /* Initialize the large buffer queue. */ + ql_init_large_buffers(qdev); + if (ql_create_send_free_list(qdev)) + goto err_free_list; + + qdev->rsp_current = qdev->rsp_q_virt_addr; + + return 0; +err_free_list: + ql_free_send_free_list(qdev); +err_small_buffers: + ql_free_buffer_queues(qdev); +err_buffer_queues: + ql_free_net_req_rsp_queues(qdev); +err_req_rsp: + pci_free_consistent(qdev->pdev, + PAGE_SIZE, + qdev->shadow_reg_virt_addr, + qdev->shadow_reg_phy_addr); + + return -ENOMEM; +} + +static void ql_free_mem_resources(struct ql3_adapter *qdev) +{ + ql_free_send_free_list(qdev); + ql_free_large_buffers(qdev); + ql_free_small_buffers(qdev); + ql_free_buffer_queues(qdev); + ql_free_net_req_rsp_queues(qdev); + if (qdev->shadow_reg_virt_addr != NULL) { + pci_free_consistent(qdev->pdev, + PAGE_SIZE, + qdev->shadow_reg_virt_addr, + qdev->shadow_reg_phy_addr); + qdev->shadow_reg_virt_addr = NULL; + } +} + +static int ql_init_misc_registers(struct ql3_adapter *qdev) +{ + struct ql3xxx_local_ram_registers __iomem *local_ram = + (void __iomem *)qdev->mem_map_registers; + + if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * + 2) << 4)) + return -1; + + ql_write_page2_reg(qdev, + &local_ram->bufletSize, qdev->nvram_data.bufletSize); + + ql_write_page2_reg(qdev, + &local_ram->maxBufletCount, + qdev->nvram_data.bufletCount); + + ql_write_page2_reg(qdev, + &local_ram->freeBufletThresholdLow, + (qdev->nvram_data.tcpWindowThreshold25 << 16) | + (qdev->nvram_data.tcpWindowThreshold0)); + + ql_write_page2_reg(qdev, + &local_ram->freeBufletThresholdHigh, + qdev->nvram_data.tcpWindowThreshold50); + + ql_write_page2_reg(qdev, + &local_ram->ipHashTableBase, + (qdev->nvram_data.ipHashTableBaseHi << 16) | + qdev->nvram_data.ipHashTableBaseLo); + ql_write_page2_reg(qdev, + &local_ram->ipHashTableCount, + qdev->nvram_data.ipHashTableSize); + ql_write_page2_reg(qdev, + &local_ram->tcpHashTableBase, + (qdev->nvram_data.tcpHashTableBaseHi << 16) | + qdev->nvram_data.tcpHashTableBaseLo); + ql_write_page2_reg(qdev, + &local_ram->tcpHashTableCount, + qdev->nvram_data.tcpHashTableSize); + ql_write_page2_reg(qdev, + &local_ram->ncbBase, + (qdev->nvram_data.ncbTableBaseHi << 16) | + qdev->nvram_data.ncbTableBaseLo); + ql_write_page2_reg(qdev, + &local_ram->maxNcbCount, + qdev->nvram_data.ncbTableSize); + ql_write_page2_reg(qdev, + &local_ram->drbBase, + (qdev->nvram_data.drbTableBaseHi << 16) | + qdev->nvram_data.drbTableBaseLo); + ql_write_page2_reg(qdev, + &local_ram->maxDrbCount, + qdev->nvram_data.drbTableSize); + ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK); + return 0; +} + +static int ql_adapter_initialize(struct ql3_adapter *qdev) +{ + u32 value; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; + struct ql3xxx_host_memory_registers __iomem *hmem_regs = + (void __iomem *)port_regs; + u32 delay = 10; + int status = 0; + unsigned long hw_flags = 0; + + if (ql_mii_setup(qdev)) + return -1; + + /* Bring out PHY out of reset */ + ql_write_common_reg(qdev, spir, + (ISP_SERIAL_PORT_IF_WE | + (ISP_SERIAL_PORT_IF_WE << 16))); + /* Give the PHY time to come out of reset. */ + mdelay(100); + qdev->port_link_state = LS_DOWN; + netif_carrier_off(qdev->ndev); + + /* V2 chip fix for ARS-39168. */ + ql_write_common_reg(qdev, spir, + (ISP_SERIAL_PORT_IF_SDE | + (ISP_SERIAL_PORT_IF_SDE << 16))); + + /* Request Queue Registers */ + *((u32 *)(qdev->preq_consumer_index)) = 0; + atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES); + qdev->req_producer_index = 0; + + ql_write_page1_reg(qdev, + &hmem_regs->reqConsumerIndexAddrHigh, + qdev->req_consumer_index_phy_addr_high); + ql_write_page1_reg(qdev, + &hmem_regs->reqConsumerIndexAddrLow, + qdev->req_consumer_index_phy_addr_low); + + ql_write_page1_reg(qdev, + &hmem_regs->reqBaseAddrHigh, + MS_64BITS(qdev->req_q_phy_addr)); + ql_write_page1_reg(qdev, + &hmem_regs->reqBaseAddrLow, + LS_64BITS(qdev->req_q_phy_addr)); + ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES); + + /* Response Queue Registers */ + *((__le16 *) (qdev->prsp_producer_index)) = 0; + qdev->rsp_consumer_index = 0; + qdev->rsp_current = qdev->rsp_q_virt_addr; + + ql_write_page1_reg(qdev, + &hmem_regs->rspProducerIndexAddrHigh, + qdev->rsp_producer_index_phy_addr_high); + + ql_write_page1_reg(qdev, + &hmem_regs->rspProducerIndexAddrLow, + qdev->rsp_producer_index_phy_addr_low); + + ql_write_page1_reg(qdev, + &hmem_regs->rspBaseAddrHigh, + MS_64BITS(qdev->rsp_q_phy_addr)); + + ql_write_page1_reg(qdev, + &hmem_regs->rspBaseAddrLow, + LS_64BITS(qdev->rsp_q_phy_addr)); + + ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES); + + /* Large Buffer Queue */ + ql_write_page1_reg(qdev, + &hmem_regs->rxLargeQBaseAddrHigh, + MS_64BITS(qdev->lrg_buf_q_phy_addr)); + + ql_write_page1_reg(qdev, + &hmem_regs->rxLargeQBaseAddrLow, + LS_64BITS(qdev->lrg_buf_q_phy_addr)); + + ql_write_page1_reg(qdev, + &hmem_regs->rxLargeQLength, + qdev->num_lbufq_entries); + + ql_write_page1_reg(qdev, + &hmem_regs->rxLargeBufferLength, + qdev->lrg_buffer_len); + + /* Small Buffer Queue */ + ql_write_page1_reg(qdev, + &hmem_regs->rxSmallQBaseAddrHigh, + MS_64BITS(qdev->small_buf_q_phy_addr)); + + ql_write_page1_reg(qdev, + &hmem_regs->rxSmallQBaseAddrLow, + LS_64BITS(qdev->small_buf_q_phy_addr)); + + ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES); + ql_write_page1_reg(qdev, + &hmem_regs->rxSmallBufferLength, + QL_SMALL_BUFFER_SIZE); + + qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1; + qdev->small_buf_release_cnt = 8; + qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1; + qdev->lrg_buf_release_cnt = 8; + qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr; + qdev->small_buf_index = 0; + qdev->lrg_buf_index = 0; + qdev->lrg_buf_free_count = 0; + qdev->lrg_buf_free_head = NULL; + qdev->lrg_buf_free_tail = NULL; + + ql_write_common_reg(qdev, + &port_regs->CommonRegs. + rxSmallQProducerIndex, + qdev->small_buf_q_producer_index); + ql_write_common_reg(qdev, + &port_regs->CommonRegs. + rxLargeQProducerIndex, + qdev->lrg_buf_q_producer_index); + + /* + * Find out if the chip has already been initialized. If it has, then + * we skip some of the initialization. + */ + clear_bit(QL_LINK_MASTER, &qdev->flags); + value = ql_read_page0_reg(qdev, &port_regs->portStatus); + if ((value & PORT_STATUS_IC) == 0) { + + /* Chip has not been configured yet, so let it rip. */ + if (ql_init_misc_registers(qdev)) { + status = -1; + goto out; + } + + value = qdev->nvram_data.tcpMaxWindowSize; + ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value); + + value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig; + + if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) + * 2) << 13)) { + status = -1; + goto out; + } + ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value); + ql_write_page0_reg(qdev, &port_regs->InternalChipConfig, + (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) << + 16) | (INTERNAL_CHIP_SD | + INTERNAL_CHIP_WE))); + ql_sem_unlock(qdev, QL_FLASH_SEM_MASK); + } + + if (qdev->mac_index) + ql_write_page0_reg(qdev, + &port_regs->mac1MaxFrameLengthReg, + qdev->max_frame_size); + else + ql_write_page0_reg(qdev, + &port_regs->mac0MaxFrameLengthReg, + qdev->max_frame_size); + + if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * + 2) << 7)) { + status = -1; + goto out; + } + + PHY_Setup(qdev); + ql_init_scan_mode(qdev); + ql_get_phy_owner(qdev); + + /* Load the MAC Configuration */ + + /* Program lower 32 bits of the MAC address */ + ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, + (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); + ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, + ((qdev->ndev->dev_addr[2] << 24) + | (qdev->ndev->dev_addr[3] << 16) + | (qdev->ndev->dev_addr[4] << 8) + | qdev->ndev->dev_addr[5])); + + /* Program top 16 bits of the MAC address */ + ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, + ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); + ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, + ((qdev->ndev->dev_addr[0] << 8) + | qdev->ndev->dev_addr[1])); + + /* Enable Primary MAC */ + ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, + ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) | + MAC_ADDR_INDIRECT_PTR_REG_PE)); + + /* Clear Primary and Secondary IP addresses */ + ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, + ((IP_ADDR_INDEX_REG_MASK << 16) | + (qdev->mac_index << 2))); + ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); + + ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, + ((IP_ADDR_INDEX_REG_MASK << 16) | + ((qdev->mac_index << 2) + 1))); + ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); + + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + + /* Indicate Configuration Complete */ + ql_write_page0_reg(qdev, + &port_regs->portControl, + ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC)); + + do { + value = ql_read_page0_reg(qdev, &port_regs->portStatus); + if (value & PORT_STATUS_IC) + break; + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + msleep(500); + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + } while (--delay); + + if (delay == 0) { + netdev_err(qdev->ndev, "Hw Initialization timeout\n"); + status = -1; + goto out; + } + + /* Enable Ethernet Function */ + if (qdev->device_id == QL3032_DEVICE_ID) { + value = + (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE | + QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 | + QL3032_PORT_CONTROL_ET); + ql_write_page0_reg(qdev, &port_regs->functionControl, + ((value << 16) | value)); + } else { + value = + (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI | + PORT_CONTROL_HH); + ql_write_page0_reg(qdev, &port_regs->portControl, + ((value << 16) | value)); + } + + +out: + return status; +} + +/* + * Caller holds hw_lock. + */ +static int ql_adapter_reset(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + int status = 0; + u16 value; + int max_wait_time; + + set_bit(QL_RESET_ACTIVE, &qdev->flags); + clear_bit(QL_RESET_DONE, &qdev->flags); + + /* + * Issue soft reset to chip. + */ + netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n"); + ql_write_common_reg(qdev, + &port_regs->CommonRegs.ispControlStatus, + ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR)); + + /* Wait 3 seconds for reset to complete. */ + netdev_printk(KERN_DEBUG, qdev->ndev, + "Wait 10 milliseconds for reset to complete\n"); + + /* Wait until the firmware tells us the Soft Reset is done */ + max_wait_time = 5; + do { + value = + ql_read_common_reg(qdev, + &port_regs->CommonRegs.ispControlStatus); + if ((value & ISP_CONTROL_SR) == 0) + break; + + ssleep(1); + } while ((--max_wait_time)); + + /* + * Also, make sure that the Network Reset Interrupt bit has been + * cleared after the soft reset has taken place. + */ + value = + ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); + if (value & ISP_CONTROL_RI) { + netdev_printk(KERN_DEBUG, qdev->ndev, + "clearing RI after reset\n"); + ql_write_common_reg(qdev, + &port_regs->CommonRegs. + ispControlStatus, + ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); + } + + if (max_wait_time == 0) { + /* Issue Force Soft Reset */ + ql_write_common_reg(qdev, + &port_regs->CommonRegs. + ispControlStatus, + ((ISP_CONTROL_FSR << 16) | + ISP_CONTROL_FSR)); + /* + * Wait until the firmware tells us the Force Soft Reset is + * done + */ + max_wait_time = 5; + do { + value = ql_read_common_reg(qdev, + &port_regs->CommonRegs. + ispControlStatus); + if ((value & ISP_CONTROL_FSR) == 0) + break; + ssleep(1); + } while ((--max_wait_time)); + } + if (max_wait_time == 0) + status = 1; + + clear_bit(QL_RESET_ACTIVE, &qdev->flags); + set_bit(QL_RESET_DONE, &qdev->flags); + return status; +} + +static void ql_set_mac_info(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value, port_status; + u8 func_number; + + /* Get the function number */ + value = + ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); + func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK); + port_status = ql_read_page0_reg(qdev, &port_regs->portStatus); + switch (value & ISP_CONTROL_FN_MASK) { + case ISP_CONTROL_FN0_NET: + qdev->mac_index = 0; + qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; + qdev->mb_bit_mask = FN0_MA_BITS_MASK; + qdev->PHYAddr = PORT0_PHY_ADDRESS; + if (port_status & PORT_STATUS_SM0) + set_bit(QL_LINK_OPTICAL, &qdev->flags); + else + clear_bit(QL_LINK_OPTICAL, &qdev->flags); + break; + + case ISP_CONTROL_FN1_NET: + qdev->mac_index = 1; + qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; + qdev->mb_bit_mask = FN1_MA_BITS_MASK; + qdev->PHYAddr = PORT1_PHY_ADDRESS; + if (port_status & PORT_STATUS_SM1) + set_bit(QL_LINK_OPTICAL, &qdev->flags); + else + clear_bit(QL_LINK_OPTICAL, &qdev->flags); + break; + + case ISP_CONTROL_FN0_SCSI: + case ISP_CONTROL_FN1_SCSI: + default: + netdev_printk(KERN_DEBUG, qdev->ndev, + "Invalid function number, ispControlStatus = 0x%x\n", + value); + break; + } + qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8; +} + +static void ql_display_dev_info(struct net_device *ndev) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + struct pci_dev *pdev = qdev->pdev; + + netdev_info(ndev, + "%s Adapter %d RevisionID %d found %s on PCI slot %d\n", + DRV_NAME, qdev->index, qdev->chip_rev_id, + qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022", + qdev->pci_slot); + netdev_info(ndev, "%s Interface\n", + test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER"); + + /* + * Print PCI bus width/type. + */ + netdev_info(ndev, "Bus interface is %s %s\n", + ((qdev->pci_width == 64) ? "64-bit" : "32-bit"), + ((qdev->pci_x) ? "PCI-X" : "PCI")); + + netdev_info(ndev, "mem IO base address adjusted = 0x%p\n", + qdev->mem_map_registers); + netdev_info(ndev, "Interrupt number = %d\n", pdev->irq); + + netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr); +} + +static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) +{ + struct net_device *ndev = qdev->ndev; + int retval = 0; + + netif_stop_queue(ndev); + netif_carrier_off(ndev); + + clear_bit(QL_ADAPTER_UP, &qdev->flags); + clear_bit(QL_LINK_MASTER, &qdev->flags); + + ql_disable_interrupts(qdev); + + free_irq(qdev->pdev->irq, ndev); + + if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { + netdev_info(qdev->ndev, "calling pci_disable_msi()\n"); + clear_bit(QL_MSI_ENABLED, &qdev->flags); + pci_disable_msi(qdev->pdev); + } + + del_timer_sync(&qdev->adapter_timer); + + napi_disable(&qdev->napi); + + if (do_reset) { + int soft_reset; + unsigned long hw_flags; + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + if (ql_wait_for_drvr_lock(qdev)) { + soft_reset = ql_adapter_reset(qdev); + if (soft_reset) { + netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n", + qdev->index); + } + netdev_err(ndev, + "Releasing driver lock via chip reset\n"); + } else { + netdev_err(ndev, + "Could not acquire driver lock to do reset!\n"); + retval = -1; + } + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + } + ql_free_mem_resources(qdev); + return retval; +} + +static int ql_adapter_up(struct ql3_adapter *qdev) +{ + struct net_device *ndev = qdev->ndev; + int err; + unsigned long irq_flags = IRQF_SHARED; + unsigned long hw_flags; + + if (ql_alloc_mem_resources(qdev)) { + netdev_err(ndev, "Unable to allocate buffers\n"); + return -ENOMEM; + } + + if (qdev->msi) { + if (pci_enable_msi(qdev->pdev)) { + netdev_err(ndev, + "User requested MSI, but MSI failed to initialize. Continuing without MSI.\n"); + qdev->msi = 0; + } else { + netdev_info(ndev, "MSI Enabled...\n"); + set_bit(QL_MSI_ENABLED, &qdev->flags); + irq_flags &= ~IRQF_SHARED; + } + } + + err = request_irq(qdev->pdev->irq, ql3xxx_isr, + irq_flags, ndev->name, ndev); + if (err) { + netdev_err(ndev, + "Failed to reserve interrupt %d - already in use\n", + qdev->pdev->irq); + goto err_irq; + } + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + + err = ql_wait_for_drvr_lock(qdev); + if (err) { + err = ql_adapter_initialize(qdev); + if (err) { + netdev_err(ndev, "Unable to initialize adapter\n"); + goto err_init; + } + netdev_err(ndev, "Releasing driver lock\n"); + ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); + } else { + netdev_err(ndev, "Could not acquire driver lock\n"); + goto err_lock; + } + + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + + set_bit(QL_ADAPTER_UP, &qdev->flags); + + mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); + + napi_enable(&qdev->napi); + ql_enable_interrupts(qdev); + return 0; + +err_init: + ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); +err_lock: + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + free_irq(qdev->pdev->irq, ndev); +err_irq: + if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { + netdev_info(ndev, "calling pci_disable_msi()\n"); + clear_bit(QL_MSI_ENABLED, &qdev->flags); + pci_disable_msi(qdev->pdev); + } + return err; +} + +static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) +{ + if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) { + netdev_err(qdev->ndev, + "Driver up/down cycle failed, closing device\n"); + rtnl_lock(); + dev_close(qdev->ndev); + rtnl_unlock(); + return -1; + } + return 0; +} + +static int ql3xxx_close(struct net_device *ndev) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + + /* + * Wait for device to recover from a reset. + * (Rarely happens, but possible.) + */ + while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) + msleep(50); + + ql_adapter_down(qdev, QL_DO_RESET); + return 0; +} + +static int ql3xxx_open(struct net_device *ndev) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + return ql_adapter_up(qdev); +} + +static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + struct sockaddr *addr = p; + unsigned long hw_flags; + + if (netif_running(ndev)) + return -EBUSY; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + /* Program lower 32 bits of the MAC address */ + ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, + (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); + ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, + ((ndev->dev_addr[2] << 24) | (ndev-> + dev_addr[3] << 16) | + (ndev->dev_addr[4] << 8) | ndev->dev_addr[5])); + + /* Program top 16 bits of the MAC address */ + ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, + ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); + ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, + ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1])); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + + return 0; +} + +static void ql3xxx_tx_timeout(struct net_device *ndev) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + + netdev_err(ndev, "Resetting...\n"); + /* + * Stop the queues, we've got a problem. + */ + netif_stop_queue(ndev); + + /* + * Wake up the worker to process this event. + */ + queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0); +} + +static void ql_reset_work(struct work_struct *work) +{ + struct ql3_adapter *qdev = + container_of(work, struct ql3_adapter, reset_work.work); + struct net_device *ndev = qdev->ndev; + u32 value; + struct ql_tx_buf_cb *tx_cb; + int max_wait_time, i; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + unsigned long hw_flags; + + if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) { + clear_bit(QL_LINK_MASTER, &qdev->flags); + + /* + * Loop through the active list and return the skb. + */ + for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { + int j; + tx_cb = &qdev->tx_buf[i]; + if (tx_cb->skb) { + netdev_printk(KERN_DEBUG, ndev, + "Freeing lost SKB\n"); + pci_unmap_single(qdev->pdev, + dma_unmap_addr(&tx_cb->map[0], + mapaddr), + dma_unmap_len(&tx_cb->map[0], maplen), + PCI_DMA_TODEVICE); + for (j = 1; j < tx_cb->seg_count; j++) { + pci_unmap_page(qdev->pdev, + dma_unmap_addr(&tx_cb->map[j], + mapaddr), + dma_unmap_len(&tx_cb->map[j], + maplen), + PCI_DMA_TODEVICE); + } + dev_kfree_skb(tx_cb->skb); + tx_cb->skb = NULL; + } + } + + netdev_err(ndev, "Clearing NRI after reset\n"); + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + ql_write_common_reg(qdev, + &port_regs->CommonRegs. + ispControlStatus, + ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); + /* + * Wait the for Soft Reset to Complete. + */ + max_wait_time = 10; + do { + value = ql_read_common_reg(qdev, + &port_regs->CommonRegs. + + ispControlStatus); + if ((value & ISP_CONTROL_SR) == 0) { + netdev_printk(KERN_DEBUG, ndev, + "reset completed\n"); + break; + } + + if (value & ISP_CONTROL_RI) { + netdev_printk(KERN_DEBUG, ndev, + "clearing NRI after reset\n"); + ql_write_common_reg(qdev, + &port_regs-> + CommonRegs. + ispControlStatus, + ((ISP_CONTROL_RI << + 16) | ISP_CONTROL_RI)); + } + + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + ssleep(1); + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + } while (--max_wait_time); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + + if (value & ISP_CONTROL_SR) { + + /* + * Set the reset flags and clear the board again. + * Nothing else to do... + */ + netdev_err(ndev, + "Timed out waiting for reset to complete\n"); + netdev_err(ndev, "Do a reset\n"); + clear_bit(QL_RESET_PER_SCSI, &qdev->flags); + clear_bit(QL_RESET_START, &qdev->flags); + ql_cycle_adapter(qdev, QL_DO_RESET); + return; + } + + clear_bit(QL_RESET_ACTIVE, &qdev->flags); + clear_bit(QL_RESET_PER_SCSI, &qdev->flags); + clear_bit(QL_RESET_START, &qdev->flags); + ql_cycle_adapter(qdev, QL_NO_RESET); + } +} + +static void ql_tx_timeout_work(struct work_struct *work) +{ + struct ql3_adapter *qdev = + container_of(work, struct ql3_adapter, tx_timeout_work.work); + + ql_cycle_adapter(qdev, QL_DO_RESET); +} + +static void ql_get_board_info(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + + value = ql_read_page0_reg_l(qdev, &port_regs->portStatus); + + qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12); + if (value & PORT_STATUS_64) + qdev->pci_width = 64; + else + qdev->pci_width = 32; + if (value & PORT_STATUS_X) + qdev->pci_x = 1; + else + qdev->pci_x = 0; + qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn); +} + +static void ql3xxx_timer(unsigned long ptr) +{ + struct ql3_adapter *qdev = (struct ql3_adapter *)ptr; + queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0); +} + +static const struct net_device_ops ql3xxx_netdev_ops = { + .ndo_open = ql3xxx_open, + .ndo_start_xmit = ql3xxx_send, + .ndo_stop = ql3xxx_close, + .ndo_set_multicast_list = NULL, /* not allowed on NIC side */ + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = ql3xxx_set_mac_address, + .ndo_tx_timeout = ql3xxx_tx_timeout, +}; + +static int __devinit ql3xxx_probe(struct pci_dev *pdev, + const struct pci_device_id *pci_entry) +{ + struct net_device *ndev = NULL; + struct ql3_adapter *qdev = NULL; + static int cards_found; + int uninitialized_var(pci_using_dac), err; + + err = pci_enable_device(pdev); + if (err) { + pr_err("%s cannot enable PCI device\n", pci_name(pdev)); + goto err_out; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + pr_err("%s cannot obtain PCI resources\n", pci_name(pdev)); + goto err_out_disable_pdev; + } + + pci_set_master(pdev); + + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + pci_using_dac = 1; + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { + pci_using_dac = 0; + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + } + + if (err) { + pr_err("%s no usable DMA configuration\n", pci_name(pdev)); + goto err_out_free_regions; + } + + ndev = alloc_etherdev(sizeof(struct ql3_adapter)); + if (!ndev) { + pr_err("%s could not alloc etherdev\n", pci_name(pdev)); + err = -ENOMEM; + goto err_out_free_regions; + } + + SET_NETDEV_DEV(ndev, &pdev->dev); + + pci_set_drvdata(pdev, ndev); + + qdev = netdev_priv(ndev); + qdev->index = cards_found; + qdev->ndev = ndev; + qdev->pdev = pdev; + qdev->device_id = pci_entry->device; + qdev->port_link_state = LS_DOWN; + if (msi) + qdev->msi = 1; + + qdev->msg_enable = netif_msg_init(debug, default_msg); + + if (pci_using_dac) + ndev->features |= NETIF_F_HIGHDMA; + if (qdev->device_id == QL3032_DEVICE_ID) + ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; + + qdev->mem_map_registers = pci_ioremap_bar(pdev, 1); + if (!qdev->mem_map_registers) { + pr_err("%s: cannot map device registers\n", pci_name(pdev)); + err = -EIO; + goto err_out_free_ndev; + } + + spin_lock_init(&qdev->adapter_lock); + spin_lock_init(&qdev->hw_lock); + + /* Set driver entry points */ + ndev->netdev_ops = &ql3xxx_netdev_ops; + SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); + ndev->watchdog_timeo = 5 * HZ; + + netif_napi_add(ndev, &qdev->napi, ql_poll, 64); + + ndev->irq = pdev->irq; + + /* make sure the EEPROM is good */ + if (ql_get_nvram_params(qdev)) { + pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n", + __func__, qdev->index); + err = -EIO; + goto err_out_iounmap; + } + + ql_set_mac_info(qdev); + + /* Validate and set parameters */ + if (qdev->mac_index) { + ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ; + ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress); + } else { + ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ; + ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress); + } + memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); + + ndev->tx_queue_len = NUM_REQ_Q_ENTRIES; + + /* Record PCI bus information. */ + ql_get_board_info(qdev); + + /* + * Set the Maximum Memory Read Byte Count value. We do this to handle + * jumbo frames. + */ + if (qdev->pci_x) + pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036); + + err = register_netdev(ndev); + if (err) { + pr_err("%s: cannot register net device\n", pci_name(pdev)); + goto err_out_iounmap; + } + + /* we're going to reset, so assume we have no link for now */ + + netif_carrier_off(ndev); + netif_stop_queue(ndev); + + qdev->workqueue = create_singlethread_workqueue(ndev->name); + INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work); + INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work); + INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work); + + init_timer(&qdev->adapter_timer); + qdev->adapter_timer.function = ql3xxx_timer; + qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */ + qdev->adapter_timer.data = (unsigned long)qdev; + + if (!cards_found) { + pr_alert("%s\n", DRV_STRING); + pr_alert("Driver name: %s, Version: %s\n", + DRV_NAME, DRV_VERSION); + } + ql_display_dev_info(ndev); + + cards_found++; + return 0; + +err_out_iounmap: + iounmap(qdev->mem_map_registers); +err_out_free_ndev: + free_netdev(ndev); +err_out_free_regions: + pci_release_regions(pdev); +err_out_disable_pdev: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +err_out: + return err; +} + +static void __devexit ql3xxx_remove(struct pci_dev *pdev) +{ + struct net_device *ndev = pci_get_drvdata(pdev); + struct ql3_adapter *qdev = netdev_priv(ndev); + + unregister_netdev(ndev); + + ql_disable_interrupts(qdev); + + if (qdev->workqueue) { + cancel_delayed_work(&qdev->reset_work); + cancel_delayed_work(&qdev->tx_timeout_work); + destroy_workqueue(qdev->workqueue); + qdev->workqueue = NULL; + } + + iounmap(qdev->mem_map_registers); + pci_release_regions(pdev); + pci_set_drvdata(pdev, NULL); + free_netdev(ndev); +} + +static struct pci_driver ql3xxx_driver = { + + .name = DRV_NAME, + .id_table = ql3xxx_pci_tbl, + .probe = ql3xxx_probe, + .remove = __devexit_p(ql3xxx_remove), +}; + +static int __init ql3xxx_init_module(void) +{ + return pci_register_driver(&ql3xxx_driver); +} + +static void __exit ql3xxx_exit(void) +{ + pci_unregister_driver(&ql3xxx_driver); +} + +module_init(ql3xxx_init_module); +module_exit(ql3xxx_exit); diff --git a/drivers/net/ethernet/qlogic/qla3xxx.h b/drivers/net/ethernet/qlogic/qla3xxx.h new file mode 100644 index 0000000..73e23436 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qla3xxx.h @@ -0,0 +1,1189 @@ +/* + * QLogic QLA3xxx NIC HBA Driver + * Copyright (c) 2003-2006 QLogic Corporation + * + * See LICENSE.qla3xxx for copyright and licensing details. + */ +#ifndef _QLA3XXX_H_ +#define _QLA3XXX_H_ + +/* + * IOCB Definitions... + */ +#pragma pack(1) + +#define OPCODE_OB_MAC_IOCB_FN0 0x01 +#define OPCODE_OB_MAC_IOCB_FN2 0x21 + +#define OPCODE_IB_MAC_IOCB 0xF9 +#define OPCODE_IB_3032_MAC_IOCB 0x09 +#define OPCODE_IB_IP_IOCB 0xFA +#define OPCODE_IB_3032_IP_IOCB 0x0A + +#define OPCODE_FUNC_ID_MASK 0x30 +#define OUTBOUND_MAC_IOCB 0x01 /* plus function bits */ + +#define FN0_MA_BITS_MASK 0x00 +#define FN1_MA_BITS_MASK 0x80 + +struct ob_mac_iocb_req { + u8 opcode; + u8 flags; +#define OB_MAC_IOCB_REQ_MA 0xe0 +#define OB_MAC_IOCB_REQ_F 0x10 +#define OB_MAC_IOCB_REQ_X 0x08 +#define OB_MAC_IOCB_REQ_D 0x02 +#define OB_MAC_IOCB_REQ_I 0x01 + u8 flags1; +#define OB_3032MAC_IOCB_REQ_IC 0x04 +#define OB_3032MAC_IOCB_REQ_TC 0x02 +#define OB_3032MAC_IOCB_REQ_UC 0x01 + u8 reserved0; + + u32 transaction_id; /* opaque for hardware */ + __le16 data_len; + u8 ip_hdr_off; + u8 ip_hdr_len; + __le32 reserved1; + __le32 reserved2; + __le32 buf_addr0_low; + __le32 buf_addr0_high; + __le32 buf_0_len; + __le32 buf_addr1_low; + __le32 buf_addr1_high; + __le32 buf_1_len; + __le32 buf_addr2_low; + __le32 buf_addr2_high; + __le32 buf_2_len; + __le32 reserved3; + __le32 reserved4; +}; +/* + * The following constants define control bits for buffer + * length fields for all IOCB's. + */ +#define OB_MAC_IOCB_REQ_E 0x80000000 /* Last valid buffer in list. */ +#define OB_MAC_IOCB_REQ_C 0x40000000 /* points to an OAL. (continuation) */ +#define OB_MAC_IOCB_REQ_L 0x20000000 /* Auburn local address pointer. */ +#define OB_MAC_IOCB_REQ_R 0x10000000 /* 32-bit address pointer. */ + +struct ob_mac_iocb_rsp { + u8 opcode; + u8 flags; +#define OB_MAC_IOCB_RSP_P 0x08 +#define OB_MAC_IOCB_RSP_L 0x04 +#define OB_MAC_IOCB_RSP_S 0x02 +#define OB_MAC_IOCB_RSP_I 0x01 + + __le16 reserved0; + u32 transaction_id; /* opaque for hardware */ + __le32 reserved1; + __le32 reserved2; +}; + +struct ib_mac_iocb_rsp { + u8 opcode; +#define IB_MAC_IOCB_RSP_V 0x80 + u8 flags; +#define IB_MAC_IOCB_RSP_S 0x80 +#define IB_MAC_IOCB_RSP_H1 0x40 +#define IB_MAC_IOCB_RSP_H0 0x20 +#define IB_MAC_IOCB_RSP_B 0x10 +#define IB_MAC_IOCB_RSP_M 0x08 +#define IB_MAC_IOCB_RSP_MA 0x07 + + __le16 length; + __le32 reserved; + __le32 ial_low; + __le32 ial_high; + +}; + +struct ob_ip_iocb_req { + u8 opcode; + __le16 flags; +#define OB_IP_IOCB_REQ_O 0x100 +#define OB_IP_IOCB_REQ_H 0x008 +#define OB_IP_IOCB_REQ_U 0x004 +#define OB_IP_IOCB_REQ_D 0x002 +#define OB_IP_IOCB_REQ_I 0x001 + + u8 reserved0; + + __le32 transaction_id; + __le16 data_len; + __le16 reserved1; + __le32 hncb_ptr_low; + __le32 hncb_ptr_high; + __le32 buf_addr0_low; + __le32 buf_addr0_high; + __le32 buf_0_len; + __le32 buf_addr1_low; + __le32 buf_addr1_high; + __le32 buf_1_len; + __le32 buf_addr2_low; + __le32 buf_addr2_high; + __le32 buf_2_len; + __le32 reserved2; + __le32 reserved3; +}; + +/* defines for BufferLength fields above */ +#define OB_IP_IOCB_REQ_E 0x80000000 +#define OB_IP_IOCB_REQ_C 0x40000000 +#define OB_IP_IOCB_REQ_L 0x20000000 +#define OB_IP_IOCB_REQ_R 0x10000000 + +struct ob_ip_iocb_rsp { + u8 opcode; + u8 flags; +#define OB_MAC_IOCB_RSP_H 0x10 +#define OB_MAC_IOCB_RSP_E 0x08 +#define OB_MAC_IOCB_RSP_L 0x04 +#define OB_MAC_IOCB_RSP_S 0x02 +#define OB_MAC_IOCB_RSP_I 0x01 + + __le16 reserved0; + __le32 transaction_id; + __le32 reserved1; + __le32 reserved2; +}; + +struct ib_ip_iocb_rsp { + u8 opcode; +#define IB_IP_IOCB_RSP_3032_V 0x80 +#define IB_IP_IOCB_RSP_3032_O 0x40 +#define IB_IP_IOCB_RSP_3032_I 0x20 +#define IB_IP_IOCB_RSP_3032_R 0x10 + u8 flags; +#define IB_IP_IOCB_RSP_S 0x80 +#define IB_IP_IOCB_RSP_H1 0x40 +#define IB_IP_IOCB_RSP_H0 0x20 +#define IB_IP_IOCB_RSP_B 0x10 +#define IB_IP_IOCB_RSP_M 0x08 +#define IB_IP_IOCB_RSP_MA 0x07 + + __le16 length; + __le16 checksum; +#define IB_IP_IOCB_RSP_3032_ICE 0x01 +#define IB_IP_IOCB_RSP_3032_CE 0x02 +#define IB_IP_IOCB_RSP_3032_NUC 0x04 +#define IB_IP_IOCB_RSP_3032_UDP 0x08 +#define IB_IP_IOCB_RSP_3032_TCP 0x10 +#define IB_IP_IOCB_RSP_3032_IPE 0x20 + __le16 reserved; +#define IB_IP_IOCB_RSP_R 0x01 + __le32 ial_low; + __le32 ial_high; +}; + +struct net_rsp_iocb { + u8 opcode; + u8 flags; + __le16 reserved0; + __le32 reserved[3]; +}; +#pragma pack() + +/* + * Register Definitions... + */ +#define PORT0_PHY_ADDRESS 0x1e00 +#define PORT1_PHY_ADDRESS 0x1f00 + +#define ETHERNET_CRC_SIZE 4 + +#define MII_SCAN_REGISTER 0x00000001 + +#define PHY_ID_0_REG 2 +#define PHY_ID_1_REG 3 + +#define PHY_OUI_1_MASK 0xfc00 +#define PHY_MODEL_MASK 0x03f0 + +/* Address for the Agere Phy */ +#define MII_AGERE_ADDR_1 0x00001000 +#define MII_AGERE_ADDR_2 0x00001100 + +/* 32-bit ispControlStatus */ +enum { + ISP_CONTROL_NP_MASK = 0x0003, + ISP_CONTROL_NP_PCSR = 0x0000, + ISP_CONTROL_NP_HMCR = 0x0001, + ISP_CONTROL_NP_LRAMCR = 0x0002, + ISP_CONTROL_NP_PSR = 0x0003, + ISP_CONTROL_RI = 0x0008, + ISP_CONTROL_CI = 0x0010, + ISP_CONTROL_PI = 0x0020, + ISP_CONTROL_IN = 0x0040, + ISP_CONTROL_BE = 0x0080, + ISP_CONTROL_FN_MASK = 0x0700, + ISP_CONTROL_FN0_NET = 0x0400, + ISP_CONTROL_FN0_SCSI = 0x0500, + ISP_CONTROL_FN1_NET = 0x0600, + ISP_CONTROL_FN1_SCSI = 0x0700, + ISP_CONTROL_LINK_DN_0 = 0x0800, + ISP_CONTROL_LINK_DN_1 = 0x1000, + ISP_CONTROL_FSR = 0x2000, + ISP_CONTROL_FE = 0x4000, + ISP_CONTROL_SR = 0x8000, +}; + +/* 32-bit ispInterruptMaskReg */ +enum { + ISP_IMR_ENABLE_INT = 0x0004, + ISP_IMR_DISABLE_RESET_INT = 0x0008, + ISP_IMR_DISABLE_CMPL_INT = 0x0010, + ISP_IMR_DISABLE_PROC_INT = 0x0020, +}; + +/* 32-bit serialPortInterfaceReg */ +enum { + ISP_SERIAL_PORT_IF_CLK = 0x0001, + ISP_SERIAL_PORT_IF_CS = 0x0002, + ISP_SERIAL_PORT_IF_D0 = 0x0004, + ISP_SERIAL_PORT_IF_DI = 0x0008, + ISP_NVRAM_MASK = (0x000F << 16), + ISP_SERIAL_PORT_IF_WE = 0x0010, + ISP_SERIAL_PORT_IF_NVR_MASK = 0x001F, + ISP_SERIAL_PORT_IF_SCI = 0x0400, + ISP_SERIAL_PORT_IF_SC0 = 0x0800, + ISP_SERIAL_PORT_IF_SCE = 0x1000, + ISP_SERIAL_PORT_IF_SDI = 0x2000, + ISP_SERIAL_PORT_IF_SDO = 0x4000, + ISP_SERIAL_PORT_IF_SDE = 0x8000, + ISP_SERIAL_PORT_IF_I2C_MASK = 0xFC00, +}; + +/* semaphoreReg */ +enum { + QL_RESOURCE_MASK_BASE_CODE = 0x7, + QL_RESOURCE_BITS_BASE_CODE = 0x4, + QL_DRVR_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 1), + QL_DDR_RAM_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 4), + QL_PHY_GIO_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 7), + QL_NVRAM_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 10), + QL_FLASH_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 13), + QL_DRVR_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (1 + 16)), + QL_DDR_RAM_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (4 + 16)), + QL_PHY_GIO_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (7 + 16)), + QL_NVRAM_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (10 + 16)), + QL_FLASH_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (13 + 16)), +}; + + /* + * QL3XXX memory-mapped registers + * QL3XXX has 4 "pages" of registers, each page occupying + * 256 bytes. Each page has a "common" area at the start and then + * page-specific registers after that. + */ +struct ql3xxx_common_registers { + u32 MB0; /* Offset 0x00 */ + u32 MB1; /* Offset 0x04 */ + u32 MB2; /* Offset 0x08 */ + u32 MB3; /* Offset 0x0c */ + u32 MB4; /* Offset 0x10 */ + u32 MB5; /* Offset 0x14 */ + u32 MB6; /* Offset 0x18 */ + u32 MB7; /* Offset 0x1c */ + u32 flashBiosAddr; + u32 flashBiosData; + u32 ispControlStatus; + u32 ispInterruptMaskReg; + u32 serialPortInterfaceReg; + u32 semaphoreReg; + u32 reqQProducerIndex; + u32 rspQConsumerIndex; + + u32 rxLargeQProducerIndex; + u32 rxSmallQProducerIndex; + u32 arcMadiCommand; + u32 arcMadiData; +}; + +enum { + EXT_HW_CONFIG_SP_MASK = 0x0006, + EXT_HW_CONFIG_SP_NONE = 0x0000, + EXT_HW_CONFIG_SP_BYTE_PARITY = 0x0002, + EXT_HW_CONFIG_SP_ECC = 0x0004, + EXT_HW_CONFIG_SP_ECCx = 0x0006, + EXT_HW_CONFIG_SIZE_MASK = 0x0060, + EXT_HW_CONFIG_SIZE_128M = 0x0000, + EXT_HW_CONFIG_SIZE_256M = 0x0020, + EXT_HW_CONFIG_SIZE_512M = 0x0040, + EXT_HW_CONFIG_SIZE_INVALID = 0x0060, + EXT_HW_CONFIG_PD = 0x0080, + EXT_HW_CONFIG_FW = 0x0200, + EXT_HW_CONFIG_US = 0x0400, + EXT_HW_CONFIG_DCS_MASK = 0x1800, + EXT_HW_CONFIG_DCS_9MA = 0x0000, + EXT_HW_CONFIG_DCS_15MA = 0x0800, + EXT_HW_CONFIG_DCS_18MA = 0x1000, + EXT_HW_CONFIG_DCS_24MA = 0x1800, + EXT_HW_CONFIG_DDS_MASK = 0x6000, + EXT_HW_CONFIG_DDS_9MA = 0x0000, + EXT_HW_CONFIG_DDS_15MA = 0x2000, + EXT_HW_CONFIG_DDS_18MA = 0x4000, + EXT_HW_CONFIG_DDS_24MA = 0x6000, +}; + +/* InternalChipConfig */ +enum { + INTERNAL_CHIP_DM = 0x0001, + INTERNAL_CHIP_SD = 0x0002, + INTERNAL_CHIP_RAP_MASK = 0x000C, + INTERNAL_CHIP_RAP_RR = 0x0000, + INTERNAL_CHIP_RAP_NRM = 0x0004, + INTERNAL_CHIP_RAP_ERM = 0x0008, + INTERNAL_CHIP_RAP_ERMx = 0x000C, + INTERNAL_CHIP_WE = 0x0010, + INTERNAL_CHIP_EF = 0x0020, + INTERNAL_CHIP_FR = 0x0040, + INTERNAL_CHIP_FW = 0x0080, + INTERNAL_CHIP_FI = 0x0100, + INTERNAL_CHIP_FT = 0x0200, +}; + +/* portControl */ +enum { + PORT_CONTROL_DS = 0x0001, + PORT_CONTROL_HH = 0x0002, + PORT_CONTROL_EI = 0x0004, + PORT_CONTROL_ET = 0x0008, + PORT_CONTROL_EF = 0x0010, + PORT_CONTROL_DRM = 0x0020, + PORT_CONTROL_RLB = 0x0040, + PORT_CONTROL_RCB = 0x0080, + PORT_CONTROL_MAC = 0x0100, + PORT_CONTROL_IPV = 0x0200, + PORT_CONTROL_IFP = 0x0400, + PORT_CONTROL_ITP = 0x0800, + PORT_CONTROL_FI = 0x1000, + PORT_CONTROL_DFP = 0x2000, + PORT_CONTROL_OI = 0x4000, + PORT_CONTROL_CC = 0x8000, +}; + +/* portStatus */ +enum { + PORT_STATUS_SM0 = 0x0001, + PORT_STATUS_SM1 = 0x0002, + PORT_STATUS_X = 0x0008, + PORT_STATUS_DL = 0x0080, + PORT_STATUS_IC = 0x0200, + PORT_STATUS_MRC = 0x0400, + PORT_STATUS_NL = 0x0800, + PORT_STATUS_REV_ID_MASK = 0x7000, + PORT_STATUS_REV_ID_1 = 0x1000, + PORT_STATUS_REV_ID_2 = 0x2000, + PORT_STATUS_REV_ID_3 = 0x3000, + PORT_STATUS_64 = 0x8000, + PORT_STATUS_UP0 = 0x10000, + PORT_STATUS_AC0 = 0x20000, + PORT_STATUS_AE0 = 0x40000, + PORT_STATUS_UP1 = 0x100000, + PORT_STATUS_AC1 = 0x200000, + PORT_STATUS_AE1 = 0x400000, + PORT_STATUS_F0_ENABLED = 0x1000000, + PORT_STATUS_F1_ENABLED = 0x2000000, + PORT_STATUS_F2_ENABLED = 0x4000000, + PORT_STATUS_F3_ENABLED = 0x8000000, +}; + +/* macMIIMgmtControlReg */ +enum { + MAC_ADDR_INDIRECT_PTR_REG_RP_MASK = 0x0003, + MAC_ADDR_INDIRECT_PTR_REG_RP_PRI_LWR = 0x0000, + MAC_ADDR_INDIRECT_PTR_REG_RP_PRI_UPR = 0x0001, + MAC_ADDR_INDIRECT_PTR_REG_RP_SEC_LWR = 0x0002, + MAC_ADDR_INDIRECT_PTR_REG_RP_SEC_UPR = 0x0003, + MAC_ADDR_INDIRECT_PTR_REG_PR = 0x0008, + MAC_ADDR_INDIRECT_PTR_REG_SS = 0x0010, + MAC_ADDR_INDIRECT_PTR_REG_SE = 0x0020, + MAC_ADDR_INDIRECT_PTR_REG_SP = 0x0040, + MAC_ADDR_INDIRECT_PTR_REG_PE = 0x0080, +}; + +/* macMIIMgmtControlReg */ +enum { + MAC_MII_CONTROL_RC = 0x0001, + MAC_MII_CONTROL_SC = 0x0002, + MAC_MII_CONTROL_AS = 0x0004, + MAC_MII_CONTROL_NP = 0x0008, + MAC_MII_CONTROL_CLK_SEL_MASK = 0x0070, + MAC_MII_CONTROL_CLK_SEL_DIV2 = 0x0000, + MAC_MII_CONTROL_CLK_SEL_DIV4 = 0x0010, + MAC_MII_CONTROL_CLK_SEL_DIV6 = 0x0020, + MAC_MII_CONTROL_CLK_SEL_DIV8 = 0x0030, + MAC_MII_CONTROL_CLK_SEL_DIV10 = 0x0040, + MAC_MII_CONTROL_CLK_SEL_DIV14 = 0x0050, + MAC_MII_CONTROL_CLK_SEL_DIV20 = 0x0060, + MAC_MII_CONTROL_CLK_SEL_DIV28 = 0x0070, + MAC_MII_CONTROL_RM = 0x8000, +}; + +/* macMIIStatusReg */ +enum { + MAC_MII_STATUS_BSY = 0x0001, + MAC_MII_STATUS_SC = 0x0002, + MAC_MII_STATUS_NV = 0x0004, +}; + +enum { + MAC_CONFIG_REG_PE = 0x0001, + MAC_CONFIG_REG_TF = 0x0002, + MAC_CONFIG_REG_RF = 0x0004, + MAC_CONFIG_REG_FD = 0x0008, + MAC_CONFIG_REG_GM = 0x0010, + MAC_CONFIG_REG_LB = 0x0020, + MAC_CONFIG_REG_SR = 0x8000, +}; + +enum { + MAC_HALF_DUPLEX_REG_ED = 0x10000, + MAC_HALF_DUPLEX_REG_NB = 0x20000, + MAC_HALF_DUPLEX_REG_BNB = 0x40000, + MAC_HALF_DUPLEX_REG_ALT = 0x80000, +}; + +enum { + IP_ADDR_INDEX_REG_MASK = 0x000f, + IP_ADDR_INDEX_REG_FUNC_0_PRI = 0x0000, + IP_ADDR_INDEX_REG_FUNC_0_SEC = 0x0001, + IP_ADDR_INDEX_REG_FUNC_1_PRI = 0x0002, + IP_ADDR_INDEX_REG_FUNC_1_SEC = 0x0003, + IP_ADDR_INDEX_REG_FUNC_2_PRI = 0x0004, + IP_ADDR_INDEX_REG_FUNC_2_SEC = 0x0005, + IP_ADDR_INDEX_REG_FUNC_3_PRI = 0x0006, + IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007, + IP_ADDR_INDEX_REG_6 = 0x0008, + IP_ADDR_INDEX_REG_OFFSET_MASK = 0x0030, + IP_ADDR_INDEX_REG_E = 0x0040, +}; +enum { + QL3032_PORT_CONTROL_DS = 0x0001, + QL3032_PORT_CONTROL_HH = 0x0002, + QL3032_PORT_CONTROL_EIv6 = 0x0004, + QL3032_PORT_CONTROL_EIv4 = 0x0008, + QL3032_PORT_CONTROL_ET = 0x0010, + QL3032_PORT_CONTROL_EF = 0x0020, + QL3032_PORT_CONTROL_DRM = 0x0040, + QL3032_PORT_CONTROL_RLB = 0x0080, + QL3032_PORT_CONTROL_RCB = 0x0100, + QL3032_PORT_CONTROL_KIE = 0x0200, +}; + +enum { + PROBE_MUX_ADDR_REG_MUX_SEL_MASK = 0x003f, + PROBE_MUX_ADDR_REG_SYSCLK = 0x0000, + PROBE_MUX_ADDR_REG_PCICLK = 0x0040, + PROBE_MUX_ADDR_REG_NRXCLK = 0x0080, + PROBE_MUX_ADDR_REG_CPUCLK = 0x00C0, + PROBE_MUX_ADDR_REG_MODULE_SEL_MASK = 0x3f00, + PROBE_MUX_ADDR_REG_UP = 0x4000, + PROBE_MUX_ADDR_REG_RE = 0x8000, +}; + +enum { + STATISTICS_INDEX_REG_MASK = 0x01ff, + STATISTICS_INDEX_REG_MAC0_TX_FRAME = 0x0000, + STATISTICS_INDEX_REG_MAC0_TX_BYTES = 0x0001, + STATISTICS_INDEX_REG_MAC0_TX_STAT1 = 0x0002, + STATISTICS_INDEX_REG_MAC0_TX_STAT2 = 0x0003, + STATISTICS_INDEX_REG_MAC0_TX_STAT3 = 0x0004, + STATISTICS_INDEX_REG_MAC0_TX_STAT4 = 0x0005, + STATISTICS_INDEX_REG_MAC0_TX_STAT5 = 0x0006, + STATISTICS_INDEX_REG_MAC0_RX_FRAME = 0x0007, + STATISTICS_INDEX_REG_MAC0_RX_BYTES = 0x0008, + STATISTICS_INDEX_REG_MAC0_RX_STAT1 = 0x0009, + STATISTICS_INDEX_REG_MAC0_RX_STAT2 = 0x000a, + STATISTICS_INDEX_REG_MAC0_RX_STAT3 = 0x000b, + STATISTICS_INDEX_REG_MAC0_RX_ERR_CRC = 0x000c, + STATISTICS_INDEX_REG_MAC0_RX_ERR_ENC = 0x000d, + STATISTICS_INDEX_REG_MAC0_RX_ERR_LEN = 0x000e, + STATISTICS_INDEX_REG_MAC0_RX_STAT4 = 0x000f, + STATISTICS_INDEX_REG_MAC1_TX_FRAME = 0x0010, + STATISTICS_INDEX_REG_MAC1_TX_BYTES = 0x0011, + STATISTICS_INDEX_REG_MAC1_TX_STAT1 = 0x0012, + STATISTICS_INDEX_REG_MAC1_TX_STAT2 = 0x0013, + STATISTICS_INDEX_REG_MAC1_TX_STAT3 = 0x0014, + STATISTICS_INDEX_REG_MAC1_TX_STAT4 = 0x0015, + STATISTICS_INDEX_REG_MAC1_TX_STAT5 = 0x0016, + STATISTICS_INDEX_REG_MAC1_RX_FRAME = 0x0017, + STATISTICS_INDEX_REG_MAC1_RX_BYTES = 0x0018, + STATISTICS_INDEX_REG_MAC1_RX_STAT1 = 0x0019, + STATISTICS_INDEX_REG_MAC1_RX_STAT2 = 0x001a, + STATISTICS_INDEX_REG_MAC1_RX_STAT3 = 0x001b, + STATISTICS_INDEX_REG_MAC1_RX_ERR_CRC = 0x001c, + STATISTICS_INDEX_REG_MAC1_RX_ERR_ENC = 0x001d, + STATISTICS_INDEX_REG_MAC1_RX_ERR_LEN = 0x001e, + STATISTICS_INDEX_REG_MAC1_RX_STAT4 = 0x001f, + STATISTICS_INDEX_REG_IP_TX_PKTS = 0x0020, + STATISTICS_INDEX_REG_IP_TX_BYTES = 0x0021, + STATISTICS_INDEX_REG_IP_TX_FRAG = 0x0022, + STATISTICS_INDEX_REG_IP_RX_PKTS = 0x0023, + STATISTICS_INDEX_REG_IP_RX_BYTES = 0x0024, + STATISTICS_INDEX_REG_IP_RX_FRAG = 0x0025, + STATISTICS_INDEX_REG_IP_DGRM_REASSEMBLY = 0x0026, + STATISTICS_INDEX_REG_IP_V6_RX_PKTS = 0x0027, + STATISTICS_INDEX_REG_IP_RX_PKTERR = 0x0028, + STATISTICS_INDEX_REG_IP_REASSEMBLY_ERR = 0x0029, + STATISTICS_INDEX_REG_TCP_TX_SEG = 0x0030, + STATISTICS_INDEX_REG_TCP_TX_BYTES = 0x0031, + STATISTICS_INDEX_REG_TCP_RX_SEG = 0x0032, + STATISTICS_INDEX_REG_TCP_RX_BYTES = 0x0033, + STATISTICS_INDEX_REG_TCP_TIMER_EXP = 0x0034, + STATISTICS_INDEX_REG_TCP_RX_ACK = 0x0035, + STATISTICS_INDEX_REG_TCP_TX_ACK = 0x0036, + STATISTICS_INDEX_REG_TCP_RX_ERR = 0x0037, + STATISTICS_INDEX_REG_TCP_RX_WIN_PROBE = 0x0038, + STATISTICS_INDEX_REG_TCP_ECC_ERR_CORR = 0x003f, +}; + +enum { + PORT_FATAL_ERROR_STATUS_OFB_RE_MAC0 = 0x00000001, + PORT_FATAL_ERROR_STATUS_OFB_RE_MAC1 = 0x00000002, + PORT_FATAL_ERROR_STATUS_OFB_WE = 0x00000004, + PORT_FATAL_ERROR_STATUS_IFB_RE = 0x00000008, + PORT_FATAL_ERROR_STATUS_IFB_WE_MAC0 = 0x00000010, + PORT_FATAL_ERROR_STATUS_IFB_WE_MAC1 = 0x00000020, + PORT_FATAL_ERROR_STATUS_ODE_RE = 0x00000040, + PORT_FATAL_ERROR_STATUS_ODE_WE = 0x00000080, + PORT_FATAL_ERROR_STATUS_IDE_RE = 0x00000100, + PORT_FATAL_ERROR_STATUS_IDE_WE = 0x00000200, + PORT_FATAL_ERROR_STATUS_SDE_RE = 0x00000400, + PORT_FATAL_ERROR_STATUS_SDE_WE = 0x00000800, + PORT_FATAL_ERROR_STATUS_BLE = 0x00001000, + PORT_FATAL_ERROR_STATUS_SPE = 0x00002000, + PORT_FATAL_ERROR_STATUS_EP0 = 0x00004000, + PORT_FATAL_ERROR_STATUS_EP1 = 0x00008000, + PORT_FATAL_ERROR_STATUS_ICE = 0x00010000, + PORT_FATAL_ERROR_STATUS_ILE = 0x00020000, + PORT_FATAL_ERROR_STATUS_OPE = 0x00040000, + PORT_FATAL_ERROR_STATUS_TA = 0x00080000, + PORT_FATAL_ERROR_STATUS_MA = 0x00100000, + PORT_FATAL_ERROR_STATUS_SCE = 0x00200000, + PORT_FATAL_ERROR_STATUS_RPE = 0x00400000, + PORT_FATAL_ERROR_STATUS_MPE = 0x00800000, + PORT_FATAL_ERROR_STATUS_OCE = 0x01000000, +}; + +/* + * port control and status page - page 0 + */ + +struct ql3xxx_port_registers { + struct ql3xxx_common_registers CommonRegs; + + u32 ExternalHWConfig; + u32 InternalChipConfig; + u32 portControl; + u32 portStatus; + u32 macAddrIndirectPtrReg; + u32 macAddrDataReg; + u32 macMIIMgmtControlReg; + u32 macMIIMgmtAddrReg; + u32 macMIIMgmtDataReg; + u32 macMIIStatusReg; + u32 mac0ConfigReg; + u32 mac0IpgIfgReg; + u32 mac0HalfDuplexReg; + u32 mac0MaxFrameLengthReg; + u32 mac0PauseThresholdReg; + u32 mac1ConfigReg; + u32 mac1IpgIfgReg; + u32 mac1HalfDuplexReg; + u32 mac1MaxFrameLengthReg; + u32 mac1PauseThresholdReg; + u32 ipAddrIndexReg; + u32 ipAddrDataReg; + u32 ipReassemblyTimeout; + u32 tcpMaxWindow; + u32 currentTcpTimestamp[2]; + u32 internalRamRWAddrReg; + u32 internalRamWDataReg; + u32 reclaimedBufferAddrRegLow; + u32 reclaimedBufferAddrRegHigh; + u32 tcpConfiguration; + u32 functionControl; + u32 fpgaRevID; + u32 localRamAddr; + u32 localRamDataAutoIncr; + u32 localRamDataNonIncr; + u32 gpOutput; + u32 gpInput; + u32 probeMuxAddr; + u32 probeMuxData; + u32 statisticsIndexReg; + u32 statisticsReadDataRegAutoIncr; + u32 statisticsReadDataRegNoIncr; + u32 PortFatalErrStatus; +}; + +/* + * port host memory config page - page 1 + */ +struct ql3xxx_host_memory_registers { + struct ql3xxx_common_registers CommonRegs; + + u32 reserved[12]; + + /* Network Request Queue */ + u32 reqConsumerIndex; + u32 reqConsumerIndexAddrLow; + u32 reqConsumerIndexAddrHigh; + u32 reqBaseAddrLow; + u32 reqBaseAddrHigh; + u32 reqLength; + + /* Network Completion Queue */ + u32 rspProducerIndex; + u32 rspProducerIndexAddrLow; + u32 rspProducerIndexAddrHigh; + u32 rspBaseAddrLow; + u32 rspBaseAddrHigh; + u32 rspLength; + + /* RX Large Buffer Queue */ + u32 rxLargeQConsumerIndex; + u32 rxLargeQBaseAddrLow; + u32 rxLargeQBaseAddrHigh; + u32 rxLargeQLength; + u32 rxLargeBufferLength; + + /* RX Small Buffer Queue */ + u32 rxSmallQConsumerIndex; + u32 rxSmallQBaseAddrLow; + u32 rxSmallQBaseAddrHigh; + u32 rxSmallQLength; + u32 rxSmallBufferLength; + +}; + +/* + * port local RAM page - page 2 + */ +struct ql3xxx_local_ram_registers { + struct ql3xxx_common_registers CommonRegs; + u32 bufletSize; + u32 maxBufletCount; + u32 currentBufletCount; + u32 reserved; + u32 freeBufletThresholdLow; + u32 freeBufletThresholdHigh; + u32 ipHashTableBase; + u32 ipHashTableCount; + u32 tcpHashTableBase; + u32 tcpHashTableCount; + u32 ncbBase; + u32 maxNcbCount; + u32 currentNcbCount; + u32 drbBase; + u32 maxDrbCount; + u32 currentDrbCount; +}; + +/* + * definitions for Semaphore bits in Semaphore/Serial NVRAM interface register + */ + +#define LS_64BITS(x) (u32)(0xffffffff & ((u64)x)) +#define MS_64BITS(x) (u32)(0xffffffff & (((u64)x)>>16>>16) ) + +/* + * I/O register + */ + +enum { + CONTROL_REG = 0, + STATUS_REG = 1, + PHY_STAT_LINK_UP = 0x0004, + PHY_CTRL_LOOPBACK = 0x4000, + + PETBI_CONTROL_REG = 0x00, + PETBI_CTRL_ALL_PARAMS = 0x7140, + PETBI_CTRL_SOFT_RESET = 0x8000, + PETBI_CTRL_AUTO_NEG = 0x1000, + PETBI_CTRL_RESTART_NEG = 0x0200, + PETBI_CTRL_FULL_DUPLEX = 0x0100, + PETBI_CTRL_SPEED_1000 = 0x0040, + + PETBI_STATUS_REG = 0x01, + PETBI_STAT_NEG_DONE = 0x0020, + PETBI_STAT_LINK_UP = 0x0004, + + PETBI_NEG_ADVER = 0x04, + PETBI_NEG_PAUSE = 0x0080, + PETBI_NEG_PAUSE_MASK = 0x0180, + PETBI_NEG_DUPLEX = 0x0020, + PETBI_NEG_DUPLEX_MASK = 0x0060, + + PETBI_NEG_PARTNER = 0x05, + PETBI_NEG_ERROR_MASK = 0x3000, + + PETBI_EXPANSION_REG = 0x06, + PETBI_EXP_PAGE_RX = 0x0002, + + PHY_GIG_CONTROL = 9, + PHY_GIG_ENABLE_MAN = 0x1000, /* Enable Master/Slave Manual Config*/ + PHY_GIG_SET_MASTER = 0x0800, /* Set Master (slave if clear)*/ + PHY_GIG_ALL_PARAMS = 0x0300, + PHY_GIG_ADV_1000F = 0x0200, + PHY_GIG_ADV_1000H = 0x0100, + + PHY_NEG_ADVER = 4, + PHY_NEG_ALL_PARAMS = 0x0fe0, + PHY_NEG_ASY_PAUSE = 0x0800, + PHY_NEG_SYM_PAUSE = 0x0400, + PHY_NEG_ADV_SPEED = 0x01e0, + PHY_NEG_ADV_100F = 0x0100, + PHY_NEG_ADV_100H = 0x0080, + PHY_NEG_ADV_10F = 0x0040, + PHY_NEG_ADV_10H = 0x0020, + + PETBI_TBI_CTRL = 0x11, + PETBI_TBI_RESET = 0x8000, + PETBI_TBI_AUTO_SENSE = 0x0100, + PETBI_TBI_SERDES_MODE = 0x0010, + PETBI_TBI_SERDES_WRAP = 0x0002, + + AUX_CONTROL_STATUS = 0x1c, + PHY_AUX_NEG_DONE = 0x8000, + PHY_NEG_PARTNER = 5, + PHY_AUX_DUPLEX_STAT = 0x0020, + PHY_AUX_SPEED_STAT = 0x0018, + PHY_AUX_NO_HW_STRAP = 0x0004, + PHY_AUX_RESET_STICK = 0x0002, + PHY_NEG_PAUSE = 0x0400, + PHY_CTRL_SOFT_RESET = 0x8000, + PHY_CTRL_AUTO_NEG = 0x1000, + PHY_CTRL_RESTART_NEG = 0x0200, +}; +enum { +/* AM29LV Flash definitions */ + FM93C56A_START = 0x1, +/* Commands */ + FM93C56A_READ = 0x2, + FM93C56A_WEN = 0x0, + FM93C56A_WRITE = 0x1, + FM93C56A_WRITE_ALL = 0x0, + FM93C56A_WDS = 0x0, + FM93C56A_ERASE = 0x3, + FM93C56A_ERASE_ALL = 0x0, +/* Command Extensions */ + FM93C56A_WEN_EXT = 0x3, + FM93C56A_WRITE_ALL_EXT = 0x1, + FM93C56A_WDS_EXT = 0x0, + FM93C56A_ERASE_ALL_EXT = 0x2, +/* Special Bits */ + FM93C56A_READ_DUMMY_BITS = 1, + FM93C56A_READY = 0, + FM93C56A_BUSY = 1, + FM93C56A_CMD_BITS = 2, +/* AM29LV Flash definitions */ + FM93C56A_SIZE_8 = 0x100, + FM93C56A_SIZE_16 = 0x80, + FM93C66A_SIZE_8 = 0x200, + FM93C66A_SIZE_16 = 0x100, + FM93C86A_SIZE_16 = 0x400, +/* Address Bits */ + FM93C56A_NO_ADDR_BITS_16 = 8, + FM93C56A_NO_ADDR_BITS_8 = 9, + FM93C86A_NO_ADDR_BITS_16 = 10, +/* Data Bits */ + FM93C56A_DATA_BITS_16 = 16, + FM93C56A_DATA_BITS_8 = 8, +}; +enum { +/* Auburn Bits */ + AUBURN_EEPROM_DI = 0x8, + AUBURN_EEPROM_DI_0 = 0x0, + AUBURN_EEPROM_DI_1 = 0x8, + AUBURN_EEPROM_DO = 0x4, + AUBURN_EEPROM_DO_0 = 0x0, + AUBURN_EEPROM_DO_1 = 0x4, + AUBURN_EEPROM_CS = 0x2, + AUBURN_EEPROM_CS_0 = 0x0, + AUBURN_EEPROM_CS_1 = 0x2, + AUBURN_EEPROM_CLK_RISE = 0x1, + AUBURN_EEPROM_CLK_FALL = 0x0, +}; +enum {EEPROM_SIZE = FM93C86A_SIZE_16, + EEPROM_NO_ADDR_BITS = FM93C86A_NO_ADDR_BITS_16, + EEPROM_NO_DATA_BITS = FM93C56A_DATA_BITS_16, +}; + +/* + * MAC Config data structure + */ + struct eeprom_port_cfg { + u16 etherMtu_mac; + u16 pauseThreshold_mac; + u16 resumeThreshold_mac; + u16 portConfiguration; +#define PORT_CONFIG_DEFAULT 0xf700 +#define PORT_CONFIG_AUTO_NEG_ENABLED 0x8000 +#define PORT_CONFIG_SYM_PAUSE_ENABLED 0x4000 +#define PORT_CONFIG_FULL_DUPLEX_ENABLED 0x2000 +#define PORT_CONFIG_HALF_DUPLEX_ENABLED 0x1000 +#define PORT_CONFIG_1000MB_SPEED 0x0400 +#define PORT_CONFIG_100MB_SPEED 0x0200 +#define PORT_CONFIG_10MB_SPEED 0x0100 +#define PORT_CONFIG_LINK_SPEED_MASK 0x0F00 + u16 reserved[12]; + +}; + +/* + * BIOS data structure + */ +struct eeprom_bios_cfg { + u16 SpinDlyEn:1, disBios:1, EnMemMap:1, EnSelectBoot:1, Reserved:12; + + u8 bootID0:7, boodID0Valid:1; + u8 bootLun0[8]; + + u8 bootID1:7, boodID1Valid:1; + u8 bootLun1[8]; + + u16 MaxLunsTrgt; + u8 reserved[10]; +}; + +/* + * Function Specific Data structure + */ +struct eeprom_function_cfg { + u8 reserved[30]; + u16 macAddress[3]; + u16 macAddressSecondary[3]; + + u16 subsysVendorId; + u16 subsysDeviceId; +}; + +/* + * EEPROM format + */ +struct eeprom_data { + u8 asicId[4]; + u16 version_and_numPorts; /* together to avoid endianness crap */ + u16 boardId; + +#define EEPROM_BOARDID_STR_SIZE 16 +#define EEPROM_SERIAL_NUM_SIZE 16 + + u8 boardIdStr[16]; + u8 serialNumber[16]; + u16 extHwConfig; + struct eeprom_port_cfg macCfg_port0; + struct eeprom_port_cfg macCfg_port1; + u16 bufletSize; + u16 bufletCount; + u16 tcpWindowThreshold50; + u16 tcpWindowThreshold25; + u16 tcpWindowThreshold0; + u16 ipHashTableBaseHi; + u16 ipHashTableBaseLo; + u16 ipHashTableSize; + u16 tcpHashTableBaseHi; + u16 tcpHashTableBaseLo; + u16 tcpHashTableSize; + u16 ncbTableBaseHi; + u16 ncbTableBaseLo; + u16 ncbTableSize; + u16 drbTableBaseHi; + u16 drbTableBaseLo; + u16 drbTableSize; + u16 reserved_142[4]; + u16 ipReassemblyTimeout; + u16 tcpMaxWindowSize; + u16 ipSecurity; +#define IPSEC_CONFIG_PRESENT 0x0001 + u8 reserved_156[294]; + u16 qDebug[8]; + struct eeprom_function_cfg funcCfg_fn0; + u16 reserved_510; + u8 oemSpace[432]; + struct eeprom_bios_cfg biosCfg_fn1; + struct eeprom_function_cfg funcCfg_fn1; + u16 reserved_1022; + u8 reserved_1024[464]; + struct eeprom_function_cfg funcCfg_fn2; + u16 reserved_1534; + u8 reserved_1536[432]; + struct eeprom_bios_cfg biosCfg_fn3; + struct eeprom_function_cfg funcCfg_fn3; + u16 checksum; +}; + +/* + * General definitions... + */ + +/* + * Below are a number compiler switches for controlling driver behavior. + * Some are not supported under certain conditions and are notated as such. + */ + +#define QL3XXX_VENDOR_ID 0x1077 +#define QL3022_DEVICE_ID 0x3022 +#define QL3032_DEVICE_ID 0x3032 + +/* MTU & Frame Size stuff */ +#define NORMAL_MTU_SIZE ETH_DATA_LEN +#define JUMBO_MTU_SIZE 9000 +#define VLAN_ID_LEN 2 + +/* Request Queue Related Definitions */ +#define NUM_REQ_Q_ENTRIES 256 /* so that 64 * 64 = 4096 (1 page) */ + +/* Response Queue Related Definitions */ +#define NUM_RSP_Q_ENTRIES 256 /* so that 256 * 16 = 4096 (1 page) */ + +/* Transmit and Receive Buffers */ +#define NUM_LBUFQ_ENTRIES 128 +#define JUMBO_NUM_LBUFQ_ENTRIES 32 +#define NUM_SBUFQ_ENTRIES 64 +#define QL_SMALL_BUFFER_SIZE 32 +#define QL_ADDR_ELE_PER_BUFQ_ENTRY \ +(sizeof(struct lrg_buf_q_entry) / sizeof(struct bufq_addr_element)) + /* Each send has at least control block. This is how many we keep. */ +#define NUM_SMALL_BUFFERS NUM_SBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY + +#define QL_HEADER_SPACE 32 /* make header space at top of skb. */ +/* + * Large & Small Buffers for Receives + */ +struct lrg_buf_q_entry { + + __le32 addr0_lower; +#define IAL_LAST_ENTRY 0x00000001 +#define IAL_CONT_ENTRY 0x00000002 +#define IAL_FLAG_MASK 0x00000003 + __le32 addr0_upper; + __le32 addr1_lower; + __le32 addr1_upper; + __le32 addr2_lower; + __le32 addr2_upper; + __le32 addr3_lower; + __le32 addr3_upper; + __le32 addr4_lower; + __le32 addr4_upper; + __le32 addr5_lower; + __le32 addr5_upper; + __le32 addr6_lower; + __le32 addr6_upper; + __le32 addr7_lower; + __le32 addr7_upper; + +}; + +struct bufq_addr_element { + __le32 addr_low; + __le32 addr_high; +}; + +#define QL_NO_RESET 0 +#define QL_DO_RESET 1 + +enum link_state_t { + LS_UNKNOWN = 0, + LS_DOWN, + LS_DEGRADE, + LS_RECOVER, + LS_UP, +}; + +struct ql_rcv_buf_cb { + struct ql_rcv_buf_cb *next; + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(mapaddr); + DEFINE_DMA_UNMAP_LEN(maplen); + __le32 buf_phy_addr_low; + __le32 buf_phy_addr_high; + int index; +}; + +/* + * Original IOCB has 3 sg entries: + * first points to skb-data area + * second points to first frag + * third points to next oal. + * OAL has 5 entries: + * 1 thru 4 point to frags + * fifth points to next oal. + */ +#define MAX_OAL_CNT ((MAX_SKB_FRAGS-1)/4 + 1) + +struct oal_entry { + __le32 dma_lo; + __le32 dma_hi; + __le32 len; +#define OAL_LAST_ENTRY 0x80000000 /* Last valid buffer in list. */ +#define OAL_CONT_ENTRY 0x40000000 /* points to an OAL. (continuation) */ +}; + +struct oal { + struct oal_entry oal_entry[5]; +}; + +struct map_list { + DEFINE_DMA_UNMAP_ADDR(mapaddr); + DEFINE_DMA_UNMAP_LEN(maplen); +}; + +struct ql_tx_buf_cb { + struct sk_buff *skb; + struct ob_mac_iocb_req *queue_entry ; + int seg_count; + struct oal *oal; + struct map_list map[MAX_SKB_FRAGS+1]; +}; + +/* definitions for type field */ +#define QL_BUF_TYPE_MACIOCB 0x01 +#define QL_BUF_TYPE_IPIOCB 0x02 +#define QL_BUF_TYPE_TCPIOCB 0x03 + +/* qdev->flags definitions. */ +enum { QL_RESET_DONE = 1, /* Reset finished. */ + QL_RESET_ACTIVE = 2, /* Waiting for reset to finish. */ + QL_RESET_START = 3, /* Please reset the chip. */ + QL_RESET_PER_SCSI = 4, /* SCSI driver requests reset. */ + QL_TX_TIMEOUT = 5, /* Timeout in progress. */ + QL_LINK_MASTER = 6, /* This driver controls the link. */ + QL_ADAPTER_UP = 7, /* Adapter has been brought up. */ + QL_THREAD_UP = 8, /* This flag is available. */ + QL_LINK_UP = 9, /* Link Status. */ + QL_ALLOC_REQ_RSP_Q_DONE = 10, + QL_ALLOC_BUFQS_DONE = 11, + QL_ALLOC_SMALL_BUF_DONE = 12, + QL_LINK_OPTICAL = 13, + QL_MSI_ENABLED = 14, +}; + +/* + * ql3_adapter - The main Adapter structure definition. + * This structure has all fields relevant to the hardware. + */ + +struct ql3_adapter { + u32 reserved_00; + unsigned long flags; + + /* PCI Configuration information for this device */ + struct pci_dev *pdev; + struct net_device *ndev; /* Parent NET device */ + + struct napi_struct napi; + + /* Hardware information */ + u8 chip_rev_id; + u8 pci_slot; + u8 pci_width; + u8 pci_x; + u32 msi; + int index; + struct timer_list adapter_timer; /* timer used for various functions */ + + spinlock_t adapter_lock; + spinlock_t hw_lock; + + /* PCI Bus Relative Register Addresses */ + u8 __iomem *mmap_virt_base; /* stores return value from ioremap() */ + struct ql3xxx_port_registers __iomem *mem_map_registers; + u32 current_page; /* tracks current register page */ + + u32 msg_enable; + u8 reserved_01[2]; + u8 reserved_02[2]; + + /* Page for Shadow Registers */ + void *shadow_reg_virt_addr; + dma_addr_t shadow_reg_phy_addr; + + /* Net Request Queue */ + u32 req_q_size; + u32 reserved_03; + struct ob_mac_iocb_req *req_q_virt_addr; + dma_addr_t req_q_phy_addr; + u16 req_producer_index; + u16 reserved_04; + u16 *preq_consumer_index; + u32 req_consumer_index_phy_addr_high; + u32 req_consumer_index_phy_addr_low; + atomic_t tx_count; + struct ql_tx_buf_cb tx_buf[NUM_REQ_Q_ENTRIES]; + + /* Net Response Queue */ + u32 rsp_q_size; + u32 eeprom_cmd_data; + struct net_rsp_iocb *rsp_q_virt_addr; + dma_addr_t rsp_q_phy_addr; + struct net_rsp_iocb *rsp_current; + u16 rsp_consumer_index; + u16 reserved_06; + volatile __le32 *prsp_producer_index; + u32 rsp_producer_index_phy_addr_high; + u32 rsp_producer_index_phy_addr_low; + + /* Large Buffer Queue */ + u32 lrg_buf_q_alloc_size; + u32 lrg_buf_q_size; + void *lrg_buf_q_alloc_virt_addr; + void *lrg_buf_q_virt_addr; + dma_addr_t lrg_buf_q_alloc_phy_addr; + dma_addr_t lrg_buf_q_phy_addr; + u32 lrg_buf_q_producer_index; + u32 lrg_buf_release_cnt; + struct bufq_addr_element *lrg_buf_next_free; + u32 num_large_buffers; + u32 num_lbufq_entries; + + /* Large (Receive) Buffers */ + struct ql_rcv_buf_cb *lrg_buf; + struct ql_rcv_buf_cb *lrg_buf_free_head; + struct ql_rcv_buf_cb *lrg_buf_free_tail; + u32 lrg_buf_free_count; + u32 lrg_buffer_len; + u32 lrg_buf_index; + u32 lrg_buf_skb_check; + + /* Small Buffer Queue */ + u32 small_buf_q_alloc_size; + u32 small_buf_q_size; + u32 small_buf_q_producer_index; + void *small_buf_q_alloc_virt_addr; + void *small_buf_q_virt_addr; + dma_addr_t small_buf_q_alloc_phy_addr; + dma_addr_t small_buf_q_phy_addr; + u32 small_buf_index; + + /* Small (Receive) Buffers */ + void *small_buf_virt_addr; + dma_addr_t small_buf_phy_addr; + u32 small_buf_phy_addr_low; + u32 small_buf_phy_addr_high; + u32 small_buf_release_cnt; + u32 small_buf_total_size; + + struct eeprom_data nvram_data; + u32 port_link_state; + + /* 4022 specific */ + u32 mac_index; /* Driver's MAC number can be 0 or 1 for first and second networking functions respectively */ + u32 PHYAddr; /* Address of PHY 0x1e00 Port 0 and 0x1f00 Port 1 */ + u32 mac_ob_opcode; /* Opcode to use on mac transmission */ + u32 mb_bit_mask; /* MA Bits mask to use on transmission */ + u32 numPorts; + struct workqueue_struct *workqueue; + struct delayed_work reset_work; + struct delayed_work tx_timeout_work; + struct delayed_work link_state_work; + u32 max_frame_size; + u32 device_id; + u16 phyType; +}; + +#endif /* _QLA3XXX_H_ */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/Makefile b/drivers/net/ethernet/qlogic/qlcnic/Makefile new file mode 100644 index 0000000..ddba83e --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for Qlogic 1G/10G Ethernet Driver for CNA devices +# + +obj-$(CONFIG_QLCNIC) := qlcnic.o + +qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \ + qlcnic_ethtool.o qlcnic_ctx.o diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h new file mode 100644 index 0000000..53c6e5d --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -0,0 +1,1555 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2010 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#ifndef _QLCNIC_H_ +#define _QLCNIC_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include +#include +#include +#include + +#include "qlcnic_hdr.h" + +#define _QLCNIC_LINUX_MAJOR 5 +#define _QLCNIC_LINUX_MINOR 0 +#define _QLCNIC_LINUX_SUBVERSION 22 +#define QLCNIC_LINUX_VERSIONID "5.0.22" +#define QLCNIC_DRV_IDC_VER 0x01 +#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ + (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) + +#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) +#define _major(v) (((v) >> 24) & 0xff) +#define _minor(v) (((v) >> 16) & 0xff) +#define _build(v) ((v) & 0xffff) + +/* version in image has weird encoding: + * 7:0 - major + * 15:8 - minor + * 31:16 - build (little endian) + */ +#define QLCNIC_DECODE_VERSION(v) \ + QLCNIC_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16)) + +#define QLCNIC_MIN_FW_VERSION QLCNIC_VERSION_CODE(4, 4, 2) +#define QLCNIC_NUM_FLASH_SECTORS (64) +#define QLCNIC_FLASH_SECTOR_SIZE (64 * 1024) +#define QLCNIC_FLASH_TOTAL_SIZE (QLCNIC_NUM_FLASH_SECTORS \ + * QLCNIC_FLASH_SECTOR_SIZE) + +#define RCV_DESC_RINGSIZE(rds_ring) \ + (sizeof(struct rcv_desc) * (rds_ring)->num_desc) +#define RCV_BUFF_RINGSIZE(rds_ring) \ + (sizeof(struct qlcnic_rx_buffer) * rds_ring->num_desc) +#define STATUS_DESC_RINGSIZE(sds_ring) \ + (sizeof(struct status_desc) * (sds_ring)->num_desc) +#define TX_BUFF_RINGSIZE(tx_ring) \ + (sizeof(struct qlcnic_cmd_buffer) * tx_ring->num_desc) +#define TX_DESC_RINGSIZE(tx_ring) \ + (sizeof(struct cmd_desc_type0) * tx_ring->num_desc) + +#define QLCNIC_P3P_A0 0x50 + +#define QLCNIC_IS_REVISION_P3P(REVISION) (REVISION >= QLCNIC_P3P_A0) + +#define FIRST_PAGE_GROUP_START 0 +#define FIRST_PAGE_GROUP_END 0x100000 + +#define P3P_MAX_MTU (9600) +#define P3P_MIN_MTU (68) +#define QLCNIC_MAX_ETHERHDR 32 /* This contains some padding */ + +#define QLCNIC_P3P_RX_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + ETH_DATA_LEN) +#define QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + P3P_MAX_MTU) +#define QLCNIC_CT_DEFAULT_RX_BUF_LEN 2048 +#define QLCNIC_LRO_BUFFER_EXTRA 2048 + +/* Opcodes to be used with the commands */ +#define TX_ETHER_PKT 0x01 +#define TX_TCP_PKT 0x02 +#define TX_UDP_PKT 0x03 +#define TX_IP_PKT 0x04 +#define TX_TCP_LSO 0x05 +#define TX_TCP_LSO6 0x06 +#define TX_TCPV6_PKT 0x0b +#define TX_UDPV6_PKT 0x0c + +/* Tx defines */ +#define QLCNIC_MAX_FRAGS_PER_TX 14 +#define MAX_TSO_HEADER_DESC 2 +#define MGMT_CMD_DESC_RESV 4 +#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \ + + MGMT_CMD_DESC_RESV) +#define QLCNIC_MAX_TX_TIMEOUTS 2 + +/* + * Following are the states of the Phantom. Phantom will set them and + * Host will read to check if the fields are correct. + */ +#define PHAN_INITIALIZE_FAILED 0xffff +#define PHAN_INITIALIZE_COMPLETE 0xff01 + +/* Host writes the following to notify that it has done the init-handshake */ +#define PHAN_INITIALIZE_ACK 0xf00f +#define PHAN_PEG_RCV_INITIALIZED 0xff01 + +#define NUM_RCV_DESC_RINGS 3 + +#define RCV_RING_NORMAL 0 +#define RCV_RING_JUMBO 1 + +#define MIN_CMD_DESCRIPTORS 64 +#define MIN_RCV_DESCRIPTORS 64 +#define MIN_JUMBO_DESCRIPTORS 32 + +#define MAX_CMD_DESCRIPTORS 1024 +#define MAX_RCV_DESCRIPTORS_1G 4096 +#define MAX_RCV_DESCRIPTORS_10G 8192 +#define MAX_RCV_DESCRIPTORS_VF 2048 +#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512 +#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024 + +#define DEFAULT_RCV_DESCRIPTORS_1G 2048 +#define DEFAULT_RCV_DESCRIPTORS_10G 4096 +#define DEFAULT_RCV_DESCRIPTORS_VF 1024 +#define MAX_RDS_RINGS 2 + +#define get_next_index(index, length) \ + (((index) + 1) & ((length) - 1)) + +/* + * Following data structures describe the descriptors that will be used. + * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when + * we are doing LSO (above the 1500 size packet) only. + */ + +#define FLAGS_VLAN_TAGGED 0x10 +#define FLAGS_VLAN_OOB 0x40 + +#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \ + (cmd_desc)->vlan_TCI = cpu_to_le16(v); +#define qlcnic_set_cmd_desc_port(cmd_desc, var) \ + ((cmd_desc)->port_ctxid |= ((var) & 0x0F)) +#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \ + ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0)) + +#define qlcnic_set_tx_port(_desc, _port) \ + ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0)) + +#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \ + ((_desc)->flags_opcode |= \ + cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7))) + +#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \ + ((_desc)->nfrags__length = \ + cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8))) + +struct cmd_desc_type0 { + u8 tcp_hdr_offset; /* For LSO only */ + u8 ip_hdr_offset; /* For LSO only */ + __le16 flags_opcode; /* 15:13 unused, 12:7 opcode, 6:0 flags */ + __le32 nfrags__length; /* 31:8 total len, 7:0 frag count */ + + __le64 addr_buffer2; + + __le16 reference_handle; + __le16 mss; + u8 port_ctxid; /* 7:4 ctxid 3:0 port */ + u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */ + __le16 conn_id; /* IPSec offoad only */ + + __le64 addr_buffer3; + __le64 addr_buffer1; + + __le16 buffer_length[4]; + + __le64 addr_buffer4; + + u8 eth_addr[ETH_ALEN]; + __le16 vlan_TCI; + +} __attribute__ ((aligned(64))); + +/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */ +struct rcv_desc { + __le16 reference_handle; + __le16 reserved; + __le32 buffer_length; /* allocated buffer length (usually 2K) */ + __le64 addr_buffer; +} __packed; + +/* opcode field in status_desc */ +#define QLCNIC_SYN_OFFLOAD 0x03 +#define QLCNIC_RXPKT_DESC 0x04 +#define QLCNIC_OLD_RXPKT_DESC 0x3f +#define QLCNIC_RESPONSE_DESC 0x05 +#define QLCNIC_LRO_DESC 0x12 + +/* for status field in status_desc */ +#define STATUS_CKSUM_LOOP 0 +#define STATUS_CKSUM_OK 2 + +/* owner bits of status_desc */ +#define STATUS_OWNER_HOST (0x1ULL << 56) +#define STATUS_OWNER_PHANTOM (0x2ULL << 56) + +/* Status descriptor: + 0-3 port, 4-7 status, 8-11 type, 12-27 total_length + 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset + 53-55 desc_cnt, 56-57 owner, 58-63 opcode + */ +#define qlcnic_get_sts_port(sts_data) \ + ((sts_data) & 0x0F) +#define qlcnic_get_sts_status(sts_data) \ + (((sts_data) >> 4) & 0x0F) +#define qlcnic_get_sts_type(sts_data) \ + (((sts_data) >> 8) & 0x0F) +#define qlcnic_get_sts_totallength(sts_data) \ + (((sts_data) >> 12) & 0xFFFF) +#define qlcnic_get_sts_refhandle(sts_data) \ + (((sts_data) >> 28) & 0xFFFF) +#define qlcnic_get_sts_prot(sts_data) \ + (((sts_data) >> 44) & 0x0F) +#define qlcnic_get_sts_pkt_offset(sts_data) \ + (((sts_data) >> 48) & 0x1F) +#define qlcnic_get_sts_desc_cnt(sts_data) \ + (((sts_data) >> 53) & 0x7) +#define qlcnic_get_sts_opcode(sts_data) \ + (((sts_data) >> 58) & 0x03F) + +#define qlcnic_get_lro_sts_refhandle(sts_data) \ + ((sts_data) & 0x0FFFF) +#define qlcnic_get_lro_sts_length(sts_data) \ + (((sts_data) >> 16) & 0x0FFFF) +#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \ + (((sts_data) >> 32) & 0x0FF) +#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \ + (((sts_data) >> 40) & 0x0FF) +#define qlcnic_get_lro_sts_timestamp(sts_data) \ + (((sts_data) >> 48) & 0x1) +#define qlcnic_get_lro_sts_type(sts_data) \ + (((sts_data) >> 49) & 0x7) +#define qlcnic_get_lro_sts_push_flag(sts_data) \ + (((sts_data) >> 52) & 0x1) +#define qlcnic_get_lro_sts_seq_number(sts_data) \ + ((sts_data) & 0x0FFFFFFFF) + + +struct status_desc { + __le64 status_desc_data[2]; +} __attribute__ ((aligned(16))); + +/* UNIFIED ROMIMAGE */ +#define QLCNIC_UNI_FW_MIN_SIZE 0xc8000 +#define QLCNIC_UNI_DIR_SECT_PRODUCT_TBL 0x0 +#define QLCNIC_UNI_DIR_SECT_BOOTLD 0x6 +#define QLCNIC_UNI_DIR_SECT_FW 0x7 + +/*Offsets */ +#define QLCNIC_UNI_CHIP_REV_OFF 10 +#define QLCNIC_UNI_FLAGS_OFF 11 +#define QLCNIC_UNI_BIOS_VERSION_OFF 12 +#define QLCNIC_UNI_BOOTLD_IDX_OFF 27 +#define QLCNIC_UNI_FIRMWARE_IDX_OFF 29 + +struct uni_table_desc{ + u32 findex; + u32 num_entries; + u32 entry_size; + u32 reserved[5]; +}; + +struct uni_data_desc{ + u32 findex; + u32 size; + u32 reserved[5]; +}; + +/* Flash Defines and Structures */ +#define QLCNIC_FLT_LOCATION 0x3F1000 +#define QLCNIC_FW_IMAGE_REGION 0x74 +#define QLCNIC_BOOTLD_REGION 0X72 +struct qlcnic_flt_header { + u16 version; + u16 len; + u16 checksum; + u16 reserved; +}; + +struct qlcnic_flt_entry { + u8 region; + u8 reserved0; + u8 attrib; + u8 reserved1; + u32 size; + u32 start_addr; + u32 end_addr; +}; + +/* Magic number to let user know flash is programmed */ +#define QLCNIC_BDINFO_MAGIC 0x12345678 + +#define QLCNIC_BRDTYPE_P3P_REF_QG 0x0021 +#define QLCNIC_BRDTYPE_P3P_HMEZ 0x0022 +#define QLCNIC_BRDTYPE_P3P_10G_CX4_LP 0x0023 +#define QLCNIC_BRDTYPE_P3P_4_GB 0x0024 +#define QLCNIC_BRDTYPE_P3P_IMEZ 0x0025 +#define QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS 0x0026 +#define QLCNIC_BRDTYPE_P3P_10000_BASE_T 0x0027 +#define QLCNIC_BRDTYPE_P3P_XG_LOM 0x0028 +#define QLCNIC_BRDTYPE_P3P_4_GB_MM 0x0029 +#define QLCNIC_BRDTYPE_P3P_10G_SFP_CT 0x002a +#define QLCNIC_BRDTYPE_P3P_10G_SFP_QT 0x002b +#define QLCNIC_BRDTYPE_P3P_10G_CX4 0x0031 +#define QLCNIC_BRDTYPE_P3P_10G_XFP 0x0032 +#define QLCNIC_BRDTYPE_P3P_10G_TP 0x0080 + +#define QLCNIC_MSIX_TABLE_OFFSET 0x44 + +/* Flash memory map */ +#define QLCNIC_BRDCFG_START 0x4000 /* board config */ +#define QLCNIC_BOOTLD_START 0x10000 /* bootld */ +#define QLCNIC_IMAGE_START 0x43000 /* compressed image */ +#define QLCNIC_USER_START 0x3E8000 /* Firmare info */ + +#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408) +#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c) +#define QLCNIC_FW_SERIAL_NUM_OFFSET (QLCNIC_USER_START+0x81c) +#define QLCNIC_BIOS_VERSION_OFFSET (QLCNIC_USER_START+0x83c) + +#define QLCNIC_BRDTYPE_OFFSET (QLCNIC_BRDCFG_START+0x8) +#define QLCNIC_FW_MAGIC_OFFSET (QLCNIC_BRDCFG_START+0x128) + +#define QLCNIC_FW_MIN_SIZE (0x3fffff) +#define QLCNIC_UNIFIED_ROMIMAGE 0 +#define QLCNIC_FLASH_ROMIMAGE 1 +#define QLCNIC_UNKNOWN_ROMIMAGE 0xff + +#define QLCNIC_UNIFIED_ROMIMAGE_NAME "phanfw.bin" +#define QLCNIC_FLASH_ROMIMAGE_NAME "flash" + +extern char qlcnic_driver_name[]; + +/* Number of status descriptors to handle per interrupt */ +#define MAX_STATUS_HANDLE (64) + +/* + * qlcnic_skb_frag{} is to contain mapping info for each SG list. This + * has to be freed when DMA is complete. This is part of qlcnic_tx_buffer{}. + */ +struct qlcnic_skb_frag { + u64 dma; + u64 length; +}; + +/* Following defines are for the state of the buffers */ +#define QLCNIC_BUFFER_FREE 0 +#define QLCNIC_BUFFER_BUSY 1 + +/* + * There will be one qlcnic_buffer per skb packet. These will be + * used to save the dma info for pci_unmap_page() + */ +struct qlcnic_cmd_buffer { + struct sk_buff *skb; + struct qlcnic_skb_frag frag_array[MAX_SKB_FRAGS + 1]; + u32 frag_count; +}; + +/* In rx_buffer, we do not need multiple fragments as is a single buffer */ +struct qlcnic_rx_buffer { + u16 ref_handle; + struct sk_buff *skb; + struct list_head list; + u64 dma; +}; + +/* Board types */ +#define QLCNIC_GBE 0x01 +#define QLCNIC_XGBE 0x02 + +/* + * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is + * adjusted based on configured MTU. + */ +#define QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US 3 +#define QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS 256 + +#define QLCNIC_INTR_DEFAULT 0x04 +#define QLCNIC_CONFIG_INTR_COALESCE 3 + +struct qlcnic_nic_intr_coalesce { + u8 type; + u8 sts_ring_mask; + u16 rx_packets; + u16 rx_time_us; + u16 flag; + u32 timer_out; +}; + +struct qlcnic_dump_template_hdr { + __le32 type; + __le32 offset; + __le32 size; + __le32 cap_mask; + __le32 num_entries; + __le32 version; + __le32 timestamp; + __le32 checksum; + __le32 drv_cap_mask; + __le32 sys_info[3]; + __le32 saved_state[16]; + __le32 cap_sizes[8]; + __le32 rsvd[0]; +}; + +struct qlcnic_fw_dump { + u8 clr; /* flag to indicate if dump is cleared */ + u8 enable; /* enable/disable dump */ + u32 size; /* total size of the dump */ + void *data; /* dump data area */ + struct qlcnic_dump_template_hdr *tmpl_hdr; +}; + +/* + * One hardware_context{} per adapter + * contains interrupt info as well shared hardware info. + */ +struct qlcnic_hardware_context { + void __iomem *pci_base0; + void __iomem *ocm_win_crb; + + unsigned long pci_len0; + + rwlock_t crb_lock; + struct mutex mem_lock; + + u8 revision_id; + u8 pci_func; + u8 linkup; + u8 loopback_state; + u16 port_type; + u16 board_type; + + struct qlcnic_nic_intr_coalesce coal; + struct qlcnic_fw_dump fw_dump; +}; + +struct qlcnic_adapter_stats { + u64 xmitcalled; + u64 xmitfinished; + u64 rxdropped; + u64 txdropped; + u64 csummed; + u64 rx_pkts; + u64 lro_pkts; + u64 rxbytes; + u64 txbytes; + u64 lrobytes; + u64 lso_frames; + u64 xmit_on; + u64 xmit_off; + u64 skb_alloc_failure; + u64 null_rxbuf; + u64 rx_dma_map_error; + u64 tx_dma_map_error; +}; + +/* + * Rcv Descriptor Context. One such per Rcv Descriptor. There may + * be one Rcv Descriptor for normal packets, one for jumbo and may be others. + */ +struct qlcnic_host_rds_ring { + void __iomem *crb_rcv_producer; + struct rcv_desc *desc_head; + struct qlcnic_rx_buffer *rx_buf_arr; + u32 num_desc; + u32 producer; + u32 dma_size; + u32 skb_size; + u32 flags; + struct list_head free_list; + spinlock_t lock; + dma_addr_t phys_addr; +} ____cacheline_internodealigned_in_smp; + +struct qlcnic_host_sds_ring { + u32 consumer; + u32 num_desc; + void __iomem *crb_sts_consumer; + + struct status_desc *desc_head; + struct qlcnic_adapter *adapter; + struct napi_struct napi; + struct list_head free_list[NUM_RCV_DESC_RINGS]; + + void __iomem *crb_intr_mask; + int irq; + + dma_addr_t phys_addr; + char name[IFNAMSIZ+4]; +} ____cacheline_internodealigned_in_smp; + +struct qlcnic_host_tx_ring { + u32 producer; + u32 sw_consumer; + u32 num_desc; + void __iomem *crb_cmd_producer; + struct cmd_desc_type0 *desc_head; + struct qlcnic_cmd_buffer *cmd_buf_arr; + __le32 *hw_consumer; + + dma_addr_t phys_addr; + dma_addr_t hw_cons_phys_addr; + struct netdev_queue *txq; +} ____cacheline_internodealigned_in_smp; + +/* + * Receive context. There is one such structure per instance of the + * receive processing. Any state information that is relevant to + * the receive, and is must be in this structure. The global data may be + * present elsewhere. + */ +struct qlcnic_recv_context { + struct qlcnic_host_rds_ring *rds_rings; + struct qlcnic_host_sds_ring *sds_rings; + u32 state; + u16 context_id; + u16 virt_port; + +}; + +/* HW context creation */ + +#define QLCNIC_OS_CRB_RETRY_COUNT 4000 +#define QLCNIC_CDRP_SIGNATURE_MAKE(pcifn, version) \ + (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16)) + +#define QLCNIC_CDRP_CMD_BIT 0x80000000 + +/* + * All responses must have the QLCNIC_CDRP_CMD_BIT cleared + * in the crb QLCNIC_CDRP_CRB_OFFSET. + */ +#define QLCNIC_CDRP_FORM_RSP(rsp) (rsp) +#define QLCNIC_CDRP_IS_RSP(rsp) (((rsp) & QLCNIC_CDRP_CMD_BIT) == 0) + +#define QLCNIC_CDRP_RSP_OK 0x00000001 +#define QLCNIC_CDRP_RSP_FAIL 0x00000002 +#define QLCNIC_CDRP_RSP_TIMEOUT 0x00000003 + +/* + * All commands must have the QLCNIC_CDRP_CMD_BIT set in + * the crb QLCNIC_CDRP_CRB_OFFSET. + */ +#define QLCNIC_CDRP_FORM_CMD(cmd) (QLCNIC_CDRP_CMD_BIT | (cmd)) +#define QLCNIC_CDRP_IS_CMD(cmd) (((cmd) & QLCNIC_CDRP_CMD_BIT) != 0) + +#define QLCNIC_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001 +#define QLCNIC_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002 +#define QLCNIC_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003 +#define QLCNIC_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004 +#define QLCNIC_CDRP_CMD_READ_MAX_RX_CTX 0x00000005 +#define QLCNIC_CDRP_CMD_READ_MAX_TX_CTX 0x00000006 +#define QLCNIC_CDRP_CMD_CREATE_RX_CTX 0x00000007 +#define QLCNIC_CDRP_CMD_DESTROY_RX_CTX 0x00000008 +#define QLCNIC_CDRP_CMD_CREATE_TX_CTX 0x00000009 +#define QLCNIC_CDRP_CMD_DESTROY_TX_CTX 0x0000000a +#define QLCNIC_CDRP_CMD_SET_MTU 0x00000012 +#define QLCNIC_CDRP_CMD_READ_PHY 0x00000013 +#define QLCNIC_CDRP_CMD_WRITE_PHY 0x00000014 +#define QLCNIC_CDRP_CMD_READ_HW_REG 0x00000015 +#define QLCNIC_CDRP_CMD_GET_FLOW_CTL 0x00000016 +#define QLCNIC_CDRP_CMD_SET_FLOW_CTL 0x00000017 +#define QLCNIC_CDRP_CMD_READ_MAX_MTU 0x00000018 +#define QLCNIC_CDRP_CMD_READ_MAX_LRO 0x00000019 +#define QLCNIC_CDRP_CMD_MAC_ADDRESS 0x0000001f + +#define QLCNIC_CDRP_CMD_GET_PCI_INFO 0x00000020 +#define QLCNIC_CDRP_CMD_GET_NIC_INFO 0x00000021 +#define QLCNIC_CDRP_CMD_SET_NIC_INFO 0x00000022 +#define QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY 0x00000024 +#define QLCNIC_CDRP_CMD_TOGGLE_ESWITCH 0x00000025 +#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS 0x00000026 +#define QLCNIC_CDRP_CMD_SET_PORTMIRRORING 0x00000027 +#define QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH 0x00000028 +#define QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG 0x00000029 +#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATS 0x0000002a +#define QLCNIC_CDRP_CMD_CONFIG_PORT 0x0000002E +#define QLCNIC_CDRP_CMD_TEMP_SIZE 0x0000002f +#define QLCNIC_CDRP_CMD_GET_TEMP_HDR 0x00000030 + +#define QLCNIC_RCODE_SUCCESS 0 +#define QLCNIC_RCODE_NOT_SUPPORTED 9 +#define QLCNIC_RCODE_TIMEOUT 17 +#define QLCNIC_DESTROY_CTX_RESET 0 + +/* + * Capabilities Announced + */ +#define QLCNIC_CAP0_LEGACY_CONTEXT (1) +#define QLCNIC_CAP0_LEGACY_MN (1 << 2) +#define QLCNIC_CAP0_LSO (1 << 6) +#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7) +#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8) +#define QLCNIC_CAP0_VALIDOFF (1 << 11) + +/* + * Context state + */ +#define QLCNIC_HOST_CTX_STATE_FREED 0 +#define QLCNIC_HOST_CTX_STATE_ACTIVE 2 + +/* + * Rx context + */ + +struct qlcnic_hostrq_sds_ring { + __le64 host_phys_addr; /* Ring base addr */ + __le32 ring_size; /* Ring entries */ + __le16 msi_index; + __le16 rsvd; /* Padding */ +} __packed; + +struct qlcnic_hostrq_rds_ring { + __le64 host_phys_addr; /* Ring base addr */ + __le64 buff_size; /* Packet buffer size */ + __le32 ring_size; /* Ring entries */ + __le32 ring_kind; /* Class of ring */ +} __packed; + +struct qlcnic_hostrq_rx_ctx { + __le64 host_rsp_dma_addr; /* Response dma'd here */ + __le32 capabilities[4]; /* Flag bit vector */ + __le32 host_int_crb_mode; /* Interrupt crb usage */ + __le32 host_rds_crb_mode; /* RDS crb usage */ + /* These ring offsets are relative to data[0] below */ + __le32 rds_ring_offset; /* Offset to RDS config */ + __le32 sds_ring_offset; /* Offset to SDS config */ + __le16 num_rds_rings; /* Count of RDS rings */ + __le16 num_sds_rings; /* Count of SDS rings */ + __le16 valid_field_offset; + u8 txrx_sds_binding; + u8 msix_handler; + u8 reserved[128]; /* reserve space for future expansion*/ + /* MUST BE 64-bit aligned. + The following is packed: + - N hostrq_rds_rings + - N hostrq_sds_rings */ + char data[0]; +} __packed; + +struct qlcnic_cardrsp_rds_ring{ + __le32 host_producer_crb; /* Crb to use */ + __le32 rsvd1; /* Padding */ +} __packed; + +struct qlcnic_cardrsp_sds_ring { + __le32 host_consumer_crb; /* Crb to use */ + __le32 interrupt_crb; /* Crb to use */ +} __packed; + +struct qlcnic_cardrsp_rx_ctx { + /* These ring offsets are relative to data[0] below */ + __le32 rds_ring_offset; /* Offset to RDS config */ + __le32 sds_ring_offset; /* Offset to SDS config */ + __le32 host_ctx_state; /* Starting State */ + __le32 num_fn_per_port; /* How many PCI fn share the port */ + __le16 num_rds_rings; /* Count of RDS rings */ + __le16 num_sds_rings; /* Count of SDS rings */ + __le16 context_id; /* Handle for context */ + u8 phys_port; /* Physical id of port */ + u8 virt_port; /* Virtual/Logical id of port */ + u8 reserved[128]; /* save space for future expansion */ + /* MUST BE 64-bit aligned. + The following is packed: + - N cardrsp_rds_rings + - N cardrs_sds_rings */ + char data[0]; +} __packed; + +#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \ + (sizeof(HOSTRQ_RX) + \ + (rds_rings)*(sizeof(struct qlcnic_hostrq_rds_ring)) + \ + (sds_rings)*(sizeof(struct qlcnic_hostrq_sds_ring))) + +#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings) \ + (sizeof(CARDRSP_RX) + \ + (rds_rings)*(sizeof(struct qlcnic_cardrsp_rds_ring)) + \ + (sds_rings)*(sizeof(struct qlcnic_cardrsp_sds_ring))) + +/* + * Tx context + */ + +struct qlcnic_hostrq_cds_ring { + __le64 host_phys_addr; /* Ring base addr */ + __le32 ring_size; /* Ring entries */ + __le32 rsvd; /* Padding */ +} __packed; + +struct qlcnic_hostrq_tx_ctx { + __le64 host_rsp_dma_addr; /* Response dma'd here */ + __le64 cmd_cons_dma_addr; /* */ + __le64 dummy_dma_addr; /* */ + __le32 capabilities[4]; /* Flag bit vector */ + __le32 host_int_crb_mode; /* Interrupt crb usage */ + __le32 rsvd1; /* Padding */ + __le16 rsvd2; /* Padding */ + __le16 interrupt_ctl; + __le16 msi_index; + __le16 rsvd3; /* Padding */ + struct qlcnic_hostrq_cds_ring cds_ring; /* Desc of cds ring */ + u8 reserved[128]; /* future expansion */ +} __packed; + +struct qlcnic_cardrsp_cds_ring { + __le32 host_producer_crb; /* Crb to use */ + __le32 interrupt_crb; /* Crb to use */ +} __packed; + +struct qlcnic_cardrsp_tx_ctx { + __le32 host_ctx_state; /* Starting state */ + __le16 context_id; /* Handle for context */ + u8 phys_port; /* Physical id of port */ + u8 virt_port; /* Virtual/Logical id of port */ + struct qlcnic_cardrsp_cds_ring cds_ring; /* Card cds settings */ + u8 reserved[128]; /* future expansion */ +} __packed; + +#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX)) +#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX)) + +/* CRB */ + +#define QLCNIC_HOST_RDS_CRB_MODE_UNIQUE 0 +#define QLCNIC_HOST_RDS_CRB_MODE_SHARED 1 +#define QLCNIC_HOST_RDS_CRB_MODE_CUSTOM 2 +#define QLCNIC_HOST_RDS_CRB_MODE_MAX 3 + +#define QLCNIC_HOST_INT_CRB_MODE_UNIQUE 0 +#define QLCNIC_HOST_INT_CRB_MODE_SHARED 1 +#define QLCNIC_HOST_INT_CRB_MODE_NORX 2 +#define QLCNIC_HOST_INT_CRB_MODE_NOTX 3 +#define QLCNIC_HOST_INT_CRB_MODE_NORXTX 4 + + +/* MAC */ + +#define MC_COUNT_P3P 38 + +#define QLCNIC_MAC_NOOP 0 +#define QLCNIC_MAC_ADD 1 +#define QLCNIC_MAC_DEL 2 +#define QLCNIC_MAC_VLAN_ADD 3 +#define QLCNIC_MAC_VLAN_DEL 4 + +struct qlcnic_mac_list_s { + struct list_head list; + uint8_t mac_addr[ETH_ALEN+2]; +}; + +#define QLCNIC_HOST_REQUEST 0x13 +#define QLCNIC_REQUEST 0x14 + +#define QLCNIC_MAC_EVENT 0x1 + +#define QLCNIC_IP_UP 2 +#define QLCNIC_IP_DOWN 3 + +#define QLCNIC_ILB_MODE 0x1 +#define QLCNIC_ELB_MODE 0x2 + +#define QLCNIC_LINKEVENT 0x1 +#define QLCNIC_LB_RESPONSE 0x2 +#define QLCNIC_IS_LB_CONFIGURED(VAL) \ + (VAL == (QLCNIC_LINKEVENT | QLCNIC_LB_RESPONSE)) + +/* + * Driver --> Firmware + */ +#define QLCNIC_H2C_OPCODE_CONFIG_RSS 0x1 +#define QLCNIC_H2C_OPCODE_CONFIG_INTR_COALESCE 0x3 +#define QLCNIC_H2C_OPCODE_CONFIG_LED 0x4 +#define QLCNIC_H2C_OPCODE_LRO_REQUEST 0x7 +#define QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE 0xc +#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 0x12 + +#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 0x15 +#define QLCNIC_H2C_OPCODE_CONFIG_BRIDGING 0x17 +#define QLCNIC_H2C_OPCODE_CONFIG_HW_LRO 0x18 +#define QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK 0x13 + +/* + * Firmware --> Driver + */ + +#define QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK 0x8f +#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141 + +#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */ +#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */ +#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */ + +#define QLCNIC_LRO_REQUEST_CLEANUP 4 + +/* Capabilites received */ +#define QLCNIC_FW_CAPABILITY_TSO BIT_1 +#define QLCNIC_FW_CAPABILITY_BDG BIT_8 +#define QLCNIC_FW_CAPABILITY_FVLANTX BIT_9 +#define QLCNIC_FW_CAPABILITY_HW_LRO BIT_10 +#define QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK BIT_27 + +/* module types */ +#define LINKEVENT_MODULE_NOT_PRESENT 1 +#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2 +#define LINKEVENT_MODULE_OPTICAL_SRLR 3 +#define LINKEVENT_MODULE_OPTICAL_LRM 4 +#define LINKEVENT_MODULE_OPTICAL_SFP_1G 5 +#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE 6 +#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN 7 +#define LINKEVENT_MODULE_TWINAX 8 + +#define LINKSPEED_10GBPS 10000 +#define LINKSPEED_1GBPS 1000 +#define LINKSPEED_100MBPS 100 +#define LINKSPEED_10MBPS 10 + +#define LINKSPEED_ENCODED_10MBPS 0 +#define LINKSPEED_ENCODED_100MBPS 1 +#define LINKSPEED_ENCODED_1GBPS 2 + +#define LINKEVENT_AUTONEG_DISABLED 0 +#define LINKEVENT_AUTONEG_ENABLED 1 + +#define LINKEVENT_HALF_DUPLEX 0 +#define LINKEVENT_FULL_DUPLEX 1 + +#define LINKEVENT_LINKSPEED_MBPS 0 +#define LINKEVENT_LINKSPEED_ENCODED 1 + +/* firmware response header: + * 63:58 - message type + * 57:56 - owner + * 55:53 - desc count + * 52:48 - reserved + * 47:40 - completion id + * 39:32 - opcode + * 31:16 - error code + * 15:00 - reserved + */ +#define qlcnic_get_nic_msg_opcode(msg_hdr) \ + ((msg_hdr >> 32) & 0xFF) + +struct qlcnic_fw_msg { + union { + struct { + u64 hdr; + u64 body[7]; + }; + u64 words[8]; + }; +}; + +struct qlcnic_nic_req { + __le64 qhdr; + __le64 req_hdr; + __le64 words[6]; +} __packed; + +struct qlcnic_mac_req { + u8 op; + u8 tag; + u8 mac_addr[6]; +}; + +struct qlcnic_vlan_req { + __le16 vlan_id; + __le16 rsvd[3]; +} __packed; + +struct qlcnic_ipaddr { + __be32 ipv4; + __be32 ipv6[4]; +}; + +#define QLCNIC_MSI_ENABLED 0x02 +#define QLCNIC_MSIX_ENABLED 0x04 +#define QLCNIC_LRO_ENABLED 0x08 +#define QLCNIC_LRO_DISABLED 0x00 +#define QLCNIC_BRIDGE_ENABLED 0X10 +#define QLCNIC_DIAG_ENABLED 0x20 +#define QLCNIC_ESWITCH_ENABLED 0x40 +#define QLCNIC_ADAPTER_INITIALIZED 0x80 +#define QLCNIC_TAGGING_ENABLED 0x100 +#define QLCNIC_MACSPOOF 0x200 +#define QLCNIC_MAC_OVERRIDE_DISABLED 0x400 +#define QLCNIC_PROMISC_DISABLED 0x800 +#define QLCNIC_NEED_FLR 0x1000 +#define QLCNIC_FW_RESET_OWNER 0x2000 +#define QLCNIC_FW_HANG 0x4000 +#define QLCNIC_IS_MSI_FAMILY(adapter) \ + ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) + +#define QLCNIC_DEF_NUM_STS_DESC_RINGS 4 +#define QLCNIC_MSIX_TBL_SPACE 8192 +#define QLCNIC_PCI_REG_MSIX_TBL 0x44 +#define QLCNIC_MSIX_TBL_PGSIZE 4096 + +#define QLCNIC_NETDEV_WEIGHT 128 +#define QLCNIC_ADAPTER_UP_MAGIC 777 + +#define __QLCNIC_FW_ATTACHED 0 +#define __QLCNIC_DEV_UP 1 +#define __QLCNIC_RESETTING 2 +#define __QLCNIC_START_FW 4 +#define __QLCNIC_AER 5 +#define __QLCNIC_DIAG_RES_ALLOC 6 + +#define QLCNIC_INTERRUPT_TEST 1 +#define QLCNIC_LOOPBACK_TEST 2 +#define QLCNIC_LED_TEST 3 + +#define QLCNIC_FILTER_AGE 80 +#define QLCNIC_READD_AGE 20 +#define QLCNIC_LB_MAX_FILTERS 64 + +/* QLCNIC Driver Error Code */ +#define QLCNIC_FW_NOT_RESPOND 51 +#define QLCNIC_TEST_IN_PROGRESS 52 +#define QLCNIC_UNDEFINED_ERROR 53 +#define QLCNIC_LB_CABLE_NOT_CONN 54 + +struct qlcnic_filter { + struct hlist_node fnode; + u8 faddr[ETH_ALEN]; + __le16 vlan_id; + unsigned long ftime; +}; + +struct qlcnic_filter_hash { + struct hlist_head *fhead; + u8 fnum; + u8 fmax; +}; + +struct qlcnic_adapter { + struct qlcnic_hardware_context *ahw; + struct qlcnic_recv_context *recv_ctx; + struct qlcnic_host_tx_ring *tx_ring; + struct net_device *netdev; + struct pci_dev *pdev; + + unsigned long state; + u32 flags; + + u16 num_txd; + u16 num_rxd; + u16 num_jumbo_rxd; + u16 max_rxd; + u16 max_jumbo_rxd; + + u8 max_rds_rings; + u8 max_sds_rings; + u8 msix_supported; + u8 portnum; + u8 physical_port; + u8 reset_context; + + u8 mc_enabled; + u8 max_mc_count; + u8 fw_wait_cnt; + u8 fw_fail_cnt; + u8 tx_timeo_cnt; + u8 need_fw_reset; + + u8 has_link_events; + u8 fw_type; + u16 tx_context_id; + u16 is_up; + + u16 link_speed; + u16 link_duplex; + u16 link_autoneg; + u16 module_type; + + u16 op_mode; + u16 switch_mode; + u16 max_tx_ques; + u16 max_rx_ques; + u16 max_mtu; + u16 pvid; + + u32 fw_hal_version; + u32 capabilities; + u32 irq; + u32 temp; + + u32 int_vec_bit; + u32 heartbeat; + + u8 max_mac_filters; + u8 dev_state; + u8 diag_test; + char diag_cnt; + u8 reset_ack_timeo; + u8 dev_init_timeo; + u16 msg_enable; + + u8 mac_addr[ETH_ALEN]; + + u64 dev_rst_time; + u8 mac_learn; + unsigned long vlans[BITS_TO_LONGS(VLAN_N_VID)]; + + struct qlcnic_npar_info *npars; + struct qlcnic_eswitch *eswitch; + struct qlcnic_nic_template *nic_ops; + + struct qlcnic_adapter_stats stats; + struct list_head mac_list; + + void __iomem *tgt_mask_reg; + void __iomem *tgt_status_reg; + void __iomem *crb_int_state_reg; + void __iomem *isr_int_vec; + + struct msix_entry *msix_entries; + + struct delayed_work fw_work; + + + struct qlcnic_filter_hash fhash; + + spinlock_t tx_clean_lock; + spinlock_t mac_learn_lock; + __le32 file_prd_off; /*File fw product offset*/ + u32 fw_version; + const struct firmware *fw; +}; + +struct qlcnic_info { + __le16 pci_func; + __le16 op_mode; /* 1 = Priv, 2 = NP, 3 = NP passthru */ + __le16 phys_port; + __le16 switch_mode; /* 0 = disabled, 1 = int, 2 = ext */ + + __le32 capabilities; + u8 max_mac_filters; + u8 reserved1; + __le16 max_mtu; + + __le16 max_tx_ques; + __le16 max_rx_ques; + __le16 min_tx_bw; + __le16 max_tx_bw; + u8 reserved2[104]; +} __packed; + +struct qlcnic_pci_info { + __le16 id; /* pci function id */ + __le16 active; /* 1 = Enabled */ + __le16 type; /* 1 = NIC, 2 = FCoE, 3 = iSCSI */ + __le16 default_port; /* default port number */ + + __le16 tx_min_bw; /* Multiple of 100mbpc */ + __le16 tx_max_bw; + __le16 reserved1[2]; + + u8 mac[ETH_ALEN]; + u8 reserved2[106]; +} __packed; + +struct qlcnic_npar_info { + u16 pvid; + u16 min_bw; + u16 max_bw; + u8 phy_port; + u8 type; + u8 active; + u8 enable_pm; + u8 dest_npar; + u8 discard_tagged; + u8 mac_override; + u8 mac_anti_spoof; + u8 promisc_mode; + u8 offload_flags; +}; + +struct qlcnic_eswitch { + u8 port; + u8 active_vports; + u8 active_vlans; + u8 active_ucast_filters; + u8 max_ucast_filters; + u8 max_active_vlans; + + u32 flags; +#define QLCNIC_SWITCH_ENABLE BIT_1 +#define QLCNIC_SWITCH_VLAN_FILTERING BIT_2 +#define QLCNIC_SWITCH_PROMISC_MODE BIT_3 +#define QLCNIC_SWITCH_PORT_MIRRORING BIT_4 +}; + + +/* Return codes for Error handling */ +#define QL_STATUS_INVALID_PARAM -1 + +#define MAX_BW 100 /* % of link speed */ +#define MAX_VLAN_ID 4095 +#define MIN_VLAN_ID 2 +#define DEFAULT_MAC_LEARN 1 + +#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID) +#define IS_VALID_BW(bw) (bw <= MAX_BW) + +struct qlcnic_pci_func_cfg { + u16 func_type; + u16 min_bw; + u16 max_bw; + u16 port_num; + u8 pci_func; + u8 func_state; + u8 def_mac_addr[6]; +}; + +struct qlcnic_npar_func_cfg { + u32 fw_capab; + u16 port_num; + u16 min_bw; + u16 max_bw; + u16 max_tx_queues; + u16 max_rx_queues; + u8 pci_func; + u8 op_mode; +}; + +struct qlcnic_pm_func_cfg { + u8 pci_func; + u8 action; + u8 dest_npar; + u8 reserved[5]; +}; + +struct qlcnic_esw_func_cfg { + u16 vlan_id; + u8 op_mode; + u8 op_type; + u8 pci_func; + u8 host_vlan_tag; + u8 promisc_mode; + u8 discard_tagged; + u8 mac_override; + u8 mac_anti_spoof; + u8 offload_flags; + u8 reserved[5]; +}; + +#define QLCNIC_STATS_VERSION 1 +#define QLCNIC_STATS_PORT 1 +#define QLCNIC_STATS_ESWITCH 2 +#define QLCNIC_QUERY_RX_COUNTER 0 +#define QLCNIC_QUERY_TX_COUNTER 1 +#define QLCNIC_ESW_STATS_NOT_AVAIL 0xffffffffffffffffULL + +#define QLCNIC_ADD_ESW_STATS(VAL1, VAL2)\ +do { \ + if (((VAL1) == QLCNIC_ESW_STATS_NOT_AVAIL) && \ + ((VAL2) != QLCNIC_ESW_STATS_NOT_AVAIL)) \ + (VAL1) = (VAL2); \ + else if (((VAL1) != QLCNIC_ESW_STATS_NOT_AVAIL) && \ + ((VAL2) != QLCNIC_ESW_STATS_NOT_AVAIL)) \ + (VAL1) += (VAL2); \ +} while (0) + +struct __qlcnic_esw_statistics { + __le16 context_id; + __le16 version; + __le16 size; + __le16 unused; + __le64 unicast_frames; + __le64 multicast_frames; + __le64 broadcast_frames; + __le64 dropped_frames; + __le64 errors; + __le64 local_frames; + __le64 numbytes; + __le64 rsvd[3]; +} __packed; + +struct qlcnic_esw_statistics { + struct __qlcnic_esw_statistics rx; + struct __qlcnic_esw_statistics tx; +}; + +struct qlcnic_common_entry_hdr { + __le32 type; + __le32 offset; + __le32 cap_size; + u8 mask; + u8 rsvd[2]; + u8 flags; +} __packed; + +struct __crb { + __le32 addr; + u8 stride; + u8 rsvd1[3]; + __le32 data_size; + __le32 no_ops; + __le32 rsvd2[4]; +} __packed; + +struct __ctrl { + __le32 addr; + u8 stride; + u8 index_a; + __le16 timeout; + __le32 data_size; + __le32 no_ops; + u8 opcode; + u8 index_v; + u8 shl_val; + u8 shr_val; + __le32 val1; + __le32 val2; + __le32 val3; +} __packed; + +struct __cache { + __le32 addr; + __le16 stride; + __le16 init_tag_val; + __le32 size; + __le32 no_ops; + __le32 ctrl_addr; + __le32 ctrl_val; + __le32 read_addr; + u8 read_addr_stride; + u8 read_addr_num; + u8 rsvd1[2]; +} __packed; + +struct __ocm { + u8 rsvd[8]; + __le32 size; + __le32 no_ops; + u8 rsvd1[8]; + __le32 read_addr; + __le32 read_addr_stride; +} __packed; + +struct __mem { + u8 rsvd[24]; + __le32 addr; + __le32 size; +} __packed; + +struct __mux { + __le32 addr; + u8 rsvd[4]; + __le32 size; + __le32 no_ops; + __le32 val; + __le32 val_stride; + __le32 read_addr; + u8 rsvd2[4]; +} __packed; + +struct __queue { + __le32 sel_addr; + __le16 stride; + u8 rsvd[2]; + __le32 size; + __le32 no_ops; + u8 rsvd2[8]; + __le32 read_addr; + u8 read_addr_stride; + u8 read_addr_cnt; + u8 rsvd3[2]; +} __packed; + +struct qlcnic_dump_entry { + struct qlcnic_common_entry_hdr hdr; + union { + struct __crb crb; + struct __cache cache; + struct __ocm ocm; + struct __mem mem; + struct __mux mux; + struct __queue que; + struct __ctrl ctrl; + } region; +} __packed; + +enum op_codes { + QLCNIC_DUMP_NOP = 0, + QLCNIC_DUMP_READ_CRB = 1, + QLCNIC_DUMP_READ_MUX = 2, + QLCNIC_DUMP_QUEUE = 3, + QLCNIC_DUMP_BRD_CONFIG = 4, + QLCNIC_DUMP_READ_OCM = 6, + QLCNIC_DUMP_PEG_REG = 7, + QLCNIC_DUMP_L1_DTAG = 8, + QLCNIC_DUMP_L1_ITAG = 9, + QLCNIC_DUMP_L1_DATA = 11, + QLCNIC_DUMP_L1_INST = 12, + QLCNIC_DUMP_L2_DTAG = 21, + QLCNIC_DUMP_L2_ITAG = 22, + QLCNIC_DUMP_L2_DATA = 23, + QLCNIC_DUMP_L2_INST = 24, + QLCNIC_DUMP_READ_ROM = 71, + QLCNIC_DUMP_READ_MEM = 72, + QLCNIC_DUMP_READ_CTRL = 98, + QLCNIC_DUMP_TLHDR = 99, + QLCNIC_DUMP_RDEND = 255 +}; + +#define QLCNIC_DUMP_WCRB BIT_0 +#define QLCNIC_DUMP_RWCRB BIT_1 +#define QLCNIC_DUMP_ANDCRB BIT_2 +#define QLCNIC_DUMP_ORCRB BIT_3 +#define QLCNIC_DUMP_POLLCRB BIT_4 +#define QLCNIC_DUMP_RD_SAVE BIT_5 +#define QLCNIC_DUMP_WRT_SAVED BIT_6 +#define QLCNIC_DUMP_MOD_SAVE_ST BIT_7 +#define QLCNIC_DUMP_SKIP BIT_7 + +#define QLCNIC_DUMP_MASK_MIN 3 +#define QLCNIC_DUMP_MASK_DEF 0x1f +#define QLCNIC_DUMP_MASK_MAX 0xff +#define QLCNIC_FORCE_FW_DUMP_KEY 0xdeadfeed +#define QLCNIC_ENABLE_FW_DUMP 0xaddfeed +#define QLCNIC_DISABLE_FW_DUMP 0xbadfeed +#define QLCNIC_FORCE_FW_RESET 0xdeaddead + +struct qlcnic_dump_operations { + enum op_codes opcode; + u32 (*handler)(struct qlcnic_adapter *, + struct qlcnic_dump_entry *, u32 *); +}; + +int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter); +int qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config); + +u32 qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off); +int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data); +int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data); +int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data); +void qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *, u64, u64 *); +void qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *, u64, u64); + +#define ADDR_IN_RANGE(addr, low, high) \ + (((addr) < (high)) && ((addr) >= (low))) + +#define QLCRD32(adapter, off) \ + (qlcnic_hw_read_wx_2M(adapter, off)) +#define QLCWR32(adapter, off, val) \ + (qlcnic_hw_write_wx_2M(adapter, off, val)) + +int qlcnic_pcie_sem_lock(struct qlcnic_adapter *, int, u32); +void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int); + +#define qlcnic_rom_lock(a) \ + qlcnic_pcie_sem_lock((a), 2, QLCNIC_ROM_LOCK_ID) +#define qlcnic_rom_unlock(a) \ + qlcnic_pcie_sem_unlock((a), 2) +#define qlcnic_phy_lock(a) \ + qlcnic_pcie_sem_lock((a), 3, QLCNIC_PHY_LOCK_ID) +#define qlcnic_phy_unlock(a) \ + qlcnic_pcie_sem_unlock((a), 3) +#define qlcnic_api_lock(a) \ + qlcnic_pcie_sem_lock((a), 5, 0) +#define qlcnic_api_unlock(a) \ + qlcnic_pcie_sem_unlock((a), 5) +#define qlcnic_sw_lock(a) \ + qlcnic_pcie_sem_lock((a), 6, 0) +#define qlcnic_sw_unlock(a) \ + qlcnic_pcie_sem_unlock((a), 6) +#define crb_win_lock(a) \ + qlcnic_pcie_sem_lock((a), 7, QLCNIC_CRB_WIN_LOCK_ID) +#define crb_win_unlock(a) \ + qlcnic_pcie_sem_unlock((a), 7) + +int qlcnic_get_board_info(struct qlcnic_adapter *adapter); +int qlcnic_wol_supported(struct qlcnic_adapter *adapter); +int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate); +void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter); +void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter); +int qlcnic_dump_fw(struct qlcnic_adapter *); + +/* Functions from qlcnic_init.c */ +int qlcnic_load_firmware(struct qlcnic_adapter *adapter); +int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter); +void qlcnic_request_firmware(struct qlcnic_adapter *adapter); +void qlcnic_release_firmware(struct qlcnic_adapter *adapter); +int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter); +int qlcnic_setup_idc_param(struct qlcnic_adapter *adapter); +int qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter); + +int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp); +int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr, + u8 *bytes, size_t size); +int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter); +void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter); + +void __iomem *qlcnic_get_ioaddr(struct qlcnic_adapter *, u32); + +int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter); +void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter); + +int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter); +void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter); + +void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter); +void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter); +void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter); + +int qlcnic_check_fw_status(struct qlcnic_adapter *adapter); +void qlcnic_watchdog_task(struct work_struct *work); +void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, + struct qlcnic_host_rds_ring *rds_ring); +int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max); +void qlcnic_set_multi(struct net_device *netdev); +void qlcnic_free_mac_list(struct qlcnic_adapter *adapter); +int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32); +int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter); +int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable); +int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd); +int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable); +void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup); + +int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu); +int qlcnic_change_mtu(struct net_device *netdev, int new_mtu); +u32 qlcnic_fix_features(struct net_device *netdev, u32 features); +int qlcnic_set_features(struct net_device *netdev, u32 features); +int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable); +int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable); +int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter); +void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx_ring); +void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *); +void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring); +void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter); +int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode); + +/* Functions from qlcnic_ethtool.c */ +int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[]); + +/* Functions from qlcnic_main.c */ +int qlcnic_reset_context(struct qlcnic_adapter *); +u32 qlcnic_issue_cmd(struct qlcnic_adapter *adapter, + u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd); +void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings); +int qlcnic_diag_alloc_res(struct net_device *netdev, int test); +netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev); +int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val); +int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data); +void qlcnic_dev_request_reset(struct qlcnic_adapter *); +void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter); + +/* Management functions */ +int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*); +int qlcnic_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8); +int qlcnic_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *); +int qlcnic_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*); + +/* eSwitch management functions */ +int qlcnic_config_switch_port(struct qlcnic_adapter *, + struct qlcnic_esw_func_cfg *); +int qlcnic_get_eswitch_port_config(struct qlcnic_adapter *, + struct qlcnic_esw_func_cfg *); +int qlcnic_config_port_mirroring(struct qlcnic_adapter *, u8, u8, u8); +int qlcnic_get_port_stats(struct qlcnic_adapter *, const u8, const u8, + struct __qlcnic_esw_statistics *); +int qlcnic_get_eswitch_stats(struct qlcnic_adapter *, const u8, u8, + struct __qlcnic_esw_statistics *); +int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, u8, u8, u8); +extern int qlcnic_config_tso; + +/* + * QLOGIC Board information + */ + +#define QLCNIC_MAX_BOARD_NAME_LEN 100 +struct qlcnic_brdinfo { + unsigned short vendor; + unsigned short device; + unsigned short sub_vendor; + unsigned short sub_device; + char short_name[QLCNIC_MAX_BOARD_NAME_LEN]; +}; + +static const struct qlcnic_brdinfo qlcnic_boards[] = { + {0x1077, 0x8020, 0x1077, 0x203, + "8200 Series Single Port 10GbE Converged Network Adapter " + "(TCP/IP Networking)"}, + {0x1077, 0x8020, 0x1077, 0x207, + "8200 Series Dual Port 10GbE Converged Network Adapter " + "(TCP/IP Networking)"}, + {0x1077, 0x8020, 0x1077, 0x20b, + "3200 Series Dual Port 10Gb Intelligent Ethernet Adapter"}, + {0x1077, 0x8020, 0x1077, 0x20c, + "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter"}, + {0x1077, 0x8020, 0x1077, 0x20f, + "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"}, + {0x1077, 0x8020, 0x103c, 0x3733, + "NC523SFP 10Gb 2-port Server Adapter"}, + {0x1077, 0x8020, 0x103c, 0x3346, + "CN1000Q Dual Port Converged Network Adapter"}, + {0x1077, 0x8020, 0x1077, 0x210, + "QME8242-k 10GbE Dual Port Mezzanine Card"}, + {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"}, +}; + +#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards) + +static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring) +{ + if (likely(tx_ring->producer < tx_ring->sw_consumer)) + return tx_ring->sw_consumer - tx_ring->producer; + else + return tx_ring->sw_consumer + tx_ring->num_desc - + tx_ring->producer; +} + +extern const struct ethtool_ops qlcnic_ethtool_ops; + +struct qlcnic_nic_template { + int (*config_bridged_mode) (struct qlcnic_adapter *, u32); + int (*config_led) (struct qlcnic_adapter *, u32, u32); + int (*start_firmware) (struct qlcnic_adapter *); +}; + +#define QLCDB(adapter, lvl, _fmt, _args...) do { \ + if (NETIF_MSG_##lvl & adapter->msg_enable) \ + printk(KERN_INFO "%s: %s: " _fmt, \ + dev_name(&adapter->pdev->dev), \ + __func__, ##_args); \ + } while (0) + +#endif /* __QLCNIC_H_ */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c new file mode 100644 index 0000000..b0d32dd --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c @@ -0,0 +1,1117 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2010 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#include "qlcnic.h" + +static u32 +qlcnic_poll_rsp(struct qlcnic_adapter *adapter) +{ + u32 rsp; + int timeout = 0; + + do { + /* give atleast 1ms for firmware to respond */ + msleep(1); + + if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT) + return QLCNIC_CDRP_RSP_TIMEOUT; + + rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET); + } while (!QLCNIC_CDRP_IS_RSP(rsp)); + + return rsp; +} + +u32 +qlcnic_issue_cmd(struct qlcnic_adapter *adapter, + u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd) +{ + u32 rsp; + u32 signature; + u32 rcode = QLCNIC_RCODE_SUCCESS; + struct pci_dev *pdev = adapter->pdev; + + signature = QLCNIC_CDRP_SIGNATURE_MAKE(pci_fn, version); + + /* Acquire semaphore before accessing CRB */ + if (qlcnic_api_lock(adapter)) + return QLCNIC_RCODE_TIMEOUT; + + QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature); + QLCWR32(adapter, QLCNIC_ARG1_CRB_OFFSET, arg1); + QLCWR32(adapter, QLCNIC_ARG2_CRB_OFFSET, arg2); + QLCWR32(adapter, QLCNIC_ARG3_CRB_OFFSET, arg3); + QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET, QLCNIC_CDRP_FORM_CMD(cmd)); + + rsp = qlcnic_poll_rsp(adapter); + + if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) { + dev_err(&pdev->dev, "card response timeout.\n"); + rcode = QLCNIC_RCODE_TIMEOUT; + } else if (rsp == QLCNIC_CDRP_RSP_FAIL) { + rcode = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET); + dev_err(&pdev->dev, "failed card response code:0x%x\n", + rcode); + } + + /* Release semaphore */ + qlcnic_api_unlock(adapter); + + return rcode; +} + +static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u16 temp_size) +{ + uint64_t sum = 0; + int count = temp_size / sizeof(uint32_t); + while (count-- > 0) + sum += *temp_buffer++; + while (sum >> 32) + sum = (sum & 0xFFFFFFFF) + (sum >> 32); + return ~sum; +} + +int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter) +{ + int err, i; + u16 temp_size; + void *tmp_addr; + u32 version, csum, *template, *tmp_buf; + struct qlcnic_hardware_context *ahw; + struct qlcnic_dump_template_hdr *tmpl_hdr, *tmp_tmpl; + dma_addr_t tmp_addr_t = 0; + + ahw = adapter->ahw; + err = qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + 0, + 0, + 0, + QLCNIC_CDRP_CMD_TEMP_SIZE); + if (err != QLCNIC_RCODE_SUCCESS) { + err = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET); + dev_info(&adapter->pdev->dev, + "Can't get template size %d\n", err); + err = -EIO; + return err; + } + version = QLCRD32(adapter, QLCNIC_ARG3_CRB_OFFSET); + temp_size = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET); + if (!temp_size) + return -EIO; + + tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size, + &tmp_addr_t, GFP_KERNEL); + if (!tmp_addr) { + dev_err(&adapter->pdev->dev, + "Can't get memory for FW dump template\n"); + return -ENOMEM; + } + err = qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + LSD(tmp_addr_t), + MSD(tmp_addr_t), + temp_size, + QLCNIC_CDRP_CMD_GET_TEMP_HDR); + + if (err != QLCNIC_RCODE_SUCCESS) { + dev_err(&adapter->pdev->dev, + "Failed to get mini dump template header %d\n", err); + err = -EIO; + goto error; + } + tmp_tmpl = tmp_addr; + csum = qlcnic_temp_checksum((uint32_t *) tmp_addr, temp_size); + if (csum) { + dev_err(&adapter->pdev->dev, + "Template header checksum validation failed\n"); + err = -EIO; + goto error; + } + ahw->fw_dump.tmpl_hdr = vzalloc(temp_size); + if (!ahw->fw_dump.tmpl_hdr) { + err = -EIO; + goto error; + } + tmp_buf = tmp_addr; + template = (u32 *) ahw->fw_dump.tmpl_hdr; + for (i = 0; i < temp_size/sizeof(u32); i++) + *template++ = __le32_to_cpu(*tmp_buf++); + + tmpl_hdr = ahw->fw_dump.tmpl_hdr; + tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF; + ahw->fw_dump.enable = 1; +error: + dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t); + return err; +} + +int +qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu) +{ + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) { + if (qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + recv_ctx->context_id, + mtu, + 0, + QLCNIC_CDRP_CMD_SET_MTU)) { + + dev_err(&adapter->pdev->dev, "Failed to set mtu\n"); + return -EIO; + } + } + + return 0; +} + +static int +qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) +{ + void *addr; + struct qlcnic_hostrq_rx_ctx *prq; + struct qlcnic_cardrsp_rx_ctx *prsp; + struct qlcnic_hostrq_rds_ring *prq_rds; + struct qlcnic_hostrq_sds_ring *prq_sds; + struct qlcnic_cardrsp_rds_ring *prsp_rds; + struct qlcnic_cardrsp_sds_ring *prsp_sds; + struct qlcnic_host_rds_ring *rds_ring; + struct qlcnic_host_sds_ring *sds_ring; + + dma_addr_t hostrq_phys_addr, cardrsp_phys_addr; + u64 phys_addr; + + u8 i, nrds_rings, nsds_rings; + size_t rq_size, rsp_size; + u32 cap, reg, val, reg2; + int err; + + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + nrds_rings = adapter->max_rds_rings; + nsds_rings = adapter->max_sds_rings; + + rq_size = + SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings, + nsds_rings); + rsp_size = + SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings, + nsds_rings); + + addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, + &hostrq_phys_addr, GFP_KERNEL); + if (addr == NULL) + return -ENOMEM; + prq = addr; + + addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, + &cardrsp_phys_addr, GFP_KERNEL); + if (addr == NULL) { + err = -ENOMEM; + goto out_free_rq; + } + prsp = addr; + + prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr); + + cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN + | QLCNIC_CAP0_VALIDOFF); + cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS); + + prq->valid_field_offset = offsetof(struct qlcnic_hostrq_rx_ctx, + msix_handler); + prq->txrx_sds_binding = nsds_rings - 1; + + prq->capabilities[0] = cpu_to_le32(cap); + prq->host_int_crb_mode = + cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED); + prq->host_rds_crb_mode = + cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE); + + prq->num_rds_rings = cpu_to_le16(nrds_rings); + prq->num_sds_rings = cpu_to_le16(nsds_rings); + prq->rds_ring_offset = 0; + + val = le32_to_cpu(prq->rds_ring_offset) + + (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings); + prq->sds_ring_offset = cpu_to_le32(val); + + prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data + + le32_to_cpu(prq->rds_ring_offset)); + + for (i = 0; i < nrds_rings; i++) { + + rds_ring = &recv_ctx->rds_rings[i]; + rds_ring->producer = 0; + + prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr); + prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc); + prq_rds[i].ring_kind = cpu_to_le32(i); + prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size); + } + + prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data + + le32_to_cpu(prq->sds_ring_offset)); + + for (i = 0; i < nsds_rings; i++) { + + sds_ring = &recv_ctx->sds_rings[i]; + sds_ring->consumer = 0; + memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring)); + + prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr); + prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc); + prq_sds[i].msi_index = cpu_to_le16(i); + } + + phys_addr = hostrq_phys_addr; + err = qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + (u32)(phys_addr >> 32), + (u32)(phys_addr & 0xffffffff), + rq_size, + QLCNIC_CDRP_CMD_CREATE_RX_CTX); + if (err) { + dev_err(&adapter->pdev->dev, + "Failed to create rx ctx in firmware%d\n", err); + goto out_free_rsp; + } + + + prsp_rds = ((struct qlcnic_cardrsp_rds_ring *) + &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]); + + for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) { + rds_ring = &recv_ctx->rds_rings[i]; + + reg = le32_to_cpu(prsp_rds[i].host_producer_crb); + rds_ring->crb_rcv_producer = adapter->ahw->pci_base0 + reg; + } + + prsp_sds = ((struct qlcnic_cardrsp_sds_ring *) + &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]); + + for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) { + sds_ring = &recv_ctx->sds_rings[i]; + + reg = le32_to_cpu(prsp_sds[i].host_consumer_crb); + reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb); + + sds_ring->crb_sts_consumer = adapter->ahw->pci_base0 + reg; + sds_ring->crb_intr_mask = adapter->ahw->pci_base0 + reg2; + } + + recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); + recv_ctx->context_id = le16_to_cpu(prsp->context_id); + recv_ctx->virt_port = prsp->virt_port; + +out_free_rsp: + dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp, + cardrsp_phys_addr); +out_free_rq: + dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr); + return err; +} + +static void +qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter) +{ + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + if (qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + recv_ctx->context_id, + QLCNIC_DESTROY_CTX_RESET, + 0, + QLCNIC_CDRP_CMD_DESTROY_RX_CTX)) { + + dev_err(&adapter->pdev->dev, + "Failed to destroy rx ctx in firmware\n"); + } + + recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED; +} + +static int +qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hostrq_tx_ctx *prq; + struct qlcnic_hostrq_cds_ring *prq_cds; + struct qlcnic_cardrsp_tx_ctx *prsp; + void *rq_addr, *rsp_addr; + size_t rq_size, rsp_size; + u32 temp; + int err; + u64 phys_addr; + dma_addr_t rq_phys_addr, rsp_phys_addr; + struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; + + /* reset host resources */ + tx_ring->producer = 0; + tx_ring->sw_consumer = 0; + *(tx_ring->hw_consumer) = 0; + + rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); + rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, + &rq_phys_addr, GFP_KERNEL); + if (!rq_addr) + return -ENOMEM; + + rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx); + rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, + &rsp_phys_addr, GFP_KERNEL); + if (!rsp_addr) { + err = -ENOMEM; + goto out_free_rq; + } + + memset(rq_addr, 0, rq_size); + prq = rq_addr; + + memset(rsp_addr, 0, rsp_size); + prsp = rsp_addr; + + prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr); + + temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN | + QLCNIC_CAP0_LSO); + prq->capabilities[0] = cpu_to_le32(temp); + + prq->host_int_crb_mode = + cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED); + + prq->interrupt_ctl = 0; + prq->msi_index = 0; + prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr); + + prq_cds = &prq->cds_ring; + + prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr); + prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc); + + phys_addr = rq_phys_addr; + err = qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + (u32)(phys_addr >> 32), + ((u32)phys_addr & 0xffffffff), + rq_size, + QLCNIC_CDRP_CMD_CREATE_TX_CTX); + + if (err == QLCNIC_RCODE_SUCCESS) { + temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); + tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp; + + adapter->tx_context_id = + le16_to_cpu(prsp->context_id); + } else { + dev_err(&adapter->pdev->dev, + "Failed to create tx ctx in firmware%d\n", err); + err = -EIO; + } + + dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr, + rsp_phys_addr); + +out_free_rq: + dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr); + + return err; +} + +static void +qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter) +{ + if (qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + adapter->tx_context_id, + QLCNIC_DESTROY_CTX_RESET, + 0, + QLCNIC_CDRP_CMD_DESTROY_TX_CTX)) { + + dev_err(&adapter->pdev->dev, + "Failed to destroy tx ctx in firmware\n"); + } +} + +int +qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config) +{ + return qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + config, + 0, + 0, + QLCNIC_CDRP_CMD_CONFIG_PORT); +} + +int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter) +{ + void *addr; + int err; + int ring; + struct qlcnic_recv_context *recv_ctx; + struct qlcnic_host_rds_ring *rds_ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_tx_ring *tx_ring; + + struct pci_dev *pdev = adapter->pdev; + + recv_ctx = adapter->recv_ctx; + tx_ring = adapter->tx_ring; + + tx_ring->hw_consumer = (__le32 *) dma_alloc_coherent(&pdev->dev, + sizeof(u32), &tx_ring->hw_cons_phys_addr, GFP_KERNEL); + if (tx_ring->hw_consumer == NULL) { + dev_err(&pdev->dev, "failed to allocate tx consumer\n"); + return -ENOMEM; + } + + /* cmd desc ring */ + addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring), + &tx_ring->phys_addr, GFP_KERNEL); + + if (addr == NULL) { + dev_err(&pdev->dev, "failed to allocate tx desc ring\n"); + err = -ENOMEM; + goto err_out_free; + } + + tx_ring->desc_head = addr; + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + addr = dma_alloc_coherent(&adapter->pdev->dev, + RCV_DESC_RINGSIZE(rds_ring), + &rds_ring->phys_addr, GFP_KERNEL); + if (addr == NULL) { + dev_err(&pdev->dev, + "failed to allocate rds ring [%d]\n", ring); + err = -ENOMEM; + goto err_out_free; + } + rds_ring->desc_head = addr; + + } + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + + addr = dma_alloc_coherent(&adapter->pdev->dev, + STATUS_DESC_RINGSIZE(sds_ring), + &sds_ring->phys_addr, GFP_KERNEL); + if (addr == NULL) { + dev_err(&pdev->dev, + "failed to allocate sds ring [%d]\n", ring); + err = -ENOMEM; + goto err_out_free; + } + sds_ring->desc_head = addr; + } + + return 0; + +err_out_free: + qlcnic_free_hw_resources(adapter); + return err; +} + + +int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter) +{ + int err; + + if (adapter->flags & QLCNIC_NEED_FLR) { + pci_reset_function(adapter->pdev); + adapter->flags &= ~QLCNIC_NEED_FLR; + } + + err = qlcnic_fw_cmd_create_rx_ctx(adapter); + if (err) + return err; + + err = qlcnic_fw_cmd_create_tx_ctx(adapter); + if (err) { + qlcnic_fw_cmd_destroy_rx_ctx(adapter); + return err; + } + + set_bit(__QLCNIC_FW_ATTACHED, &adapter->state); + return 0; +} + +void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter) +{ + if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) { + qlcnic_fw_cmd_destroy_rx_ctx(adapter); + qlcnic_fw_cmd_destroy_tx_ctx(adapter); + + /* Allow dma queues to drain after context reset */ + msleep(20); + } +} + +void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter) +{ + struct qlcnic_recv_context *recv_ctx; + struct qlcnic_host_rds_ring *rds_ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_tx_ring *tx_ring; + int ring; + + recv_ctx = adapter->recv_ctx; + + tx_ring = adapter->tx_ring; + if (tx_ring->hw_consumer != NULL) { + dma_free_coherent(&adapter->pdev->dev, + sizeof(u32), + tx_ring->hw_consumer, + tx_ring->hw_cons_phys_addr); + tx_ring->hw_consumer = NULL; + } + + if (tx_ring->desc_head != NULL) { + dma_free_coherent(&adapter->pdev->dev, + TX_DESC_RINGSIZE(tx_ring), + tx_ring->desc_head, tx_ring->phys_addr); + tx_ring->desc_head = NULL; + } + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + + if (rds_ring->desc_head != NULL) { + dma_free_coherent(&adapter->pdev->dev, + RCV_DESC_RINGSIZE(rds_ring), + rds_ring->desc_head, + rds_ring->phys_addr); + rds_ring->desc_head = NULL; + } + } + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + + if (sds_ring->desc_head != NULL) { + dma_free_coherent(&adapter->pdev->dev, + STATUS_DESC_RINGSIZE(sds_ring), + sds_ring->desc_head, + sds_ring->phys_addr); + sds_ring->desc_head = NULL; + } + } +} + + +/* Get MAC address of a NIC partition */ +int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac) +{ + int err; + u32 arg1; + + arg1 = adapter->ahw->pci_func | BIT_8; + err = qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + arg1, + 0, + 0, + QLCNIC_CDRP_CMD_MAC_ADDRESS); + + if (err == QLCNIC_RCODE_SUCCESS) + qlcnic_fetch_mac(adapter, QLCNIC_ARG1_CRB_OFFSET, + QLCNIC_ARG2_CRB_OFFSET, 0, mac); + else { + dev_err(&adapter->pdev->dev, + "Failed to get mac address%d\n", err); + err = -EIO; + } + + return err; +} + +/* Get info of a NIC partition */ +int qlcnic_get_nic_info(struct qlcnic_adapter *adapter, + struct qlcnic_info *npar_info, u8 func_id) +{ + int err; + dma_addr_t nic_dma_t; + struct qlcnic_info *nic_info; + void *nic_info_addr; + size_t nic_size = sizeof(struct qlcnic_info); + + nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, + &nic_dma_t, GFP_KERNEL); + if (!nic_info_addr) + return -ENOMEM; + memset(nic_info_addr, 0, nic_size); + + nic_info = nic_info_addr; + err = qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + MSD(nic_dma_t), + LSD(nic_dma_t), + (func_id << 16 | nic_size), + QLCNIC_CDRP_CMD_GET_NIC_INFO); + + if (err == QLCNIC_RCODE_SUCCESS) { + npar_info->pci_func = le16_to_cpu(nic_info->pci_func); + npar_info->op_mode = le16_to_cpu(nic_info->op_mode); + npar_info->phys_port = le16_to_cpu(nic_info->phys_port); + npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode); + npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques); + npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques); + npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw); + npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw); + npar_info->capabilities = le32_to_cpu(nic_info->capabilities); + npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu); + + dev_info(&adapter->pdev->dev, + "phy port: %d switch_mode: %d,\n" + "\tmax_tx_q: %d max_rx_q: %d min_tx_bw: 0x%x,\n" + "\tmax_tx_bw: 0x%x max_mtu:0x%x, capabilities: 0x%x\n", + npar_info->phys_port, npar_info->switch_mode, + npar_info->max_tx_ques, npar_info->max_rx_ques, + npar_info->min_tx_bw, npar_info->max_tx_bw, + npar_info->max_mtu, npar_info->capabilities); + } else { + dev_err(&adapter->pdev->dev, + "Failed to get nic info%d\n", err); + err = -EIO; + } + + dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, + nic_dma_t); + return err; +} + +/* Configure a NIC partition */ +int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic) +{ + int err = -EIO; + dma_addr_t nic_dma_t; + void *nic_info_addr; + struct qlcnic_info *nic_info; + size_t nic_size = sizeof(struct qlcnic_info); + + if (adapter->op_mode != QLCNIC_MGMT_FUNC) + return err; + + nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, + &nic_dma_t, GFP_KERNEL); + if (!nic_info_addr) + return -ENOMEM; + + memset(nic_info_addr, 0, nic_size); + nic_info = nic_info_addr; + + nic_info->pci_func = cpu_to_le16(nic->pci_func); + nic_info->op_mode = cpu_to_le16(nic->op_mode); + nic_info->phys_port = cpu_to_le16(nic->phys_port); + nic_info->switch_mode = cpu_to_le16(nic->switch_mode); + nic_info->capabilities = cpu_to_le32(nic->capabilities); + nic_info->max_mac_filters = nic->max_mac_filters; + nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques); + nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques); + nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw); + nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw); + + err = qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + MSD(nic_dma_t), + LSD(nic_dma_t), + ((nic->pci_func << 16) | nic_size), + QLCNIC_CDRP_CMD_SET_NIC_INFO); + + if (err != QLCNIC_RCODE_SUCCESS) { + dev_err(&adapter->pdev->dev, + "Failed to set nic info%d\n", err); + err = -EIO; + } + + dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, + nic_dma_t); + return err; +} + +/* Get PCI Info of a partition */ +int qlcnic_get_pci_info(struct qlcnic_adapter *adapter, + struct qlcnic_pci_info *pci_info) +{ + int err = 0, i; + dma_addr_t pci_info_dma_t; + struct qlcnic_pci_info *npar; + void *pci_info_addr; + size_t npar_size = sizeof(struct qlcnic_pci_info); + size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC; + + pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size, + &pci_info_dma_t, GFP_KERNEL); + if (!pci_info_addr) + return -ENOMEM; + memset(pci_info_addr, 0, pci_size); + + npar = pci_info_addr; + err = qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + MSD(pci_info_dma_t), + LSD(pci_info_dma_t), + pci_size, + QLCNIC_CDRP_CMD_GET_PCI_INFO); + + if (err == QLCNIC_RCODE_SUCCESS) { + for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) { + pci_info->id = le16_to_cpu(npar->id); + pci_info->active = le16_to_cpu(npar->active); + pci_info->type = le16_to_cpu(npar->type); + pci_info->default_port = + le16_to_cpu(npar->default_port); + pci_info->tx_min_bw = + le16_to_cpu(npar->tx_min_bw); + pci_info->tx_max_bw = + le16_to_cpu(npar->tx_max_bw); + memcpy(pci_info->mac, npar->mac, ETH_ALEN); + } + } else { + dev_err(&adapter->pdev->dev, + "Failed to get PCI Info%d\n", err); + err = -EIO; + } + + dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr, + pci_info_dma_t); + return err; +} + +/* Configure eSwitch for port mirroring */ +int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id, + u8 enable_mirroring, u8 pci_func) +{ + int err = -EIO; + u32 arg1; + + if (adapter->op_mode != QLCNIC_MGMT_FUNC || + !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) + return err; + + arg1 = id | (enable_mirroring ? BIT_4 : 0); + arg1 |= pci_func << 8; + + err = qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + arg1, + 0, + 0, + QLCNIC_CDRP_CMD_SET_PORTMIRRORING); + + if (err != QLCNIC_RCODE_SUCCESS) { + dev_err(&adapter->pdev->dev, + "Failed to configure port mirroring%d on eswitch:%d\n", + pci_func, id); + } else { + dev_info(&adapter->pdev->dev, + "Configured eSwitch %d for port mirroring:%d\n", + id, pci_func); + } + + return err; +} + +int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func, + const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) { + + size_t stats_size = sizeof(struct __qlcnic_esw_statistics); + struct __qlcnic_esw_statistics *stats; + dma_addr_t stats_dma_t; + void *stats_addr; + u32 arg1; + int err; + + if (esw_stats == NULL) + return -ENOMEM; + + if (adapter->op_mode != QLCNIC_MGMT_FUNC && + func != adapter->ahw->pci_func) { + dev_err(&adapter->pdev->dev, + "Not privilege to query stats for func=%d", func); + return -EIO; + } + + stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, + &stats_dma_t, GFP_KERNEL); + if (!stats_addr) { + dev_err(&adapter->pdev->dev, "Unable to allocate memory\n"); + return -ENOMEM; + } + memset(stats_addr, 0, stats_size); + + arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12; + arg1 |= rx_tx << 15 | stats_size << 16; + + err = qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + arg1, + MSD(stats_dma_t), + LSD(stats_dma_t), + QLCNIC_CDRP_CMD_GET_ESWITCH_STATS); + + if (!err) { + stats = stats_addr; + esw_stats->context_id = le16_to_cpu(stats->context_id); + esw_stats->version = le16_to_cpu(stats->version); + esw_stats->size = le16_to_cpu(stats->size); + esw_stats->multicast_frames = + le64_to_cpu(stats->multicast_frames); + esw_stats->broadcast_frames = + le64_to_cpu(stats->broadcast_frames); + esw_stats->unicast_frames = le64_to_cpu(stats->unicast_frames); + esw_stats->dropped_frames = le64_to_cpu(stats->dropped_frames); + esw_stats->local_frames = le64_to_cpu(stats->local_frames); + esw_stats->errors = le64_to_cpu(stats->errors); + esw_stats->numbytes = le64_to_cpu(stats->numbytes); + } + + dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, + stats_dma_t); + return err; +} + +int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch, + const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) { + + struct __qlcnic_esw_statistics port_stats; + u8 i; + int ret = -EIO; + + if (esw_stats == NULL) + return -ENOMEM; + if (adapter->op_mode != QLCNIC_MGMT_FUNC) + return -EIO; + if (adapter->npars == NULL) + return -EIO; + + memset(esw_stats, 0, sizeof(u64)); + esw_stats->unicast_frames = QLCNIC_ESW_STATS_NOT_AVAIL; + esw_stats->multicast_frames = QLCNIC_ESW_STATS_NOT_AVAIL; + esw_stats->broadcast_frames = QLCNIC_ESW_STATS_NOT_AVAIL; + esw_stats->dropped_frames = QLCNIC_ESW_STATS_NOT_AVAIL; + esw_stats->errors = QLCNIC_ESW_STATS_NOT_AVAIL; + esw_stats->local_frames = QLCNIC_ESW_STATS_NOT_AVAIL; + esw_stats->numbytes = QLCNIC_ESW_STATS_NOT_AVAIL; + esw_stats->context_id = eswitch; + + for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { + if (adapter->npars[i].phy_port != eswitch) + continue; + + memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics)); + if (qlcnic_get_port_stats(adapter, i, rx_tx, &port_stats)) + continue; + + esw_stats->size = port_stats.size; + esw_stats->version = port_stats.version; + QLCNIC_ADD_ESW_STATS(esw_stats->unicast_frames, + port_stats.unicast_frames); + QLCNIC_ADD_ESW_STATS(esw_stats->multicast_frames, + port_stats.multicast_frames); + QLCNIC_ADD_ESW_STATS(esw_stats->broadcast_frames, + port_stats.broadcast_frames); + QLCNIC_ADD_ESW_STATS(esw_stats->dropped_frames, + port_stats.dropped_frames); + QLCNIC_ADD_ESW_STATS(esw_stats->errors, + port_stats.errors); + QLCNIC_ADD_ESW_STATS(esw_stats->local_frames, + port_stats.local_frames); + QLCNIC_ADD_ESW_STATS(esw_stats->numbytes, + port_stats.numbytes); + ret = 0; + } + return ret; +} + +int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw, + const u8 port, const u8 rx_tx) +{ + + u32 arg1; + + if (adapter->op_mode != QLCNIC_MGMT_FUNC) + return -EIO; + + if (func_esw == QLCNIC_STATS_PORT) { + if (port >= QLCNIC_MAX_PCI_FUNC) + goto err_ret; + } else if (func_esw == QLCNIC_STATS_ESWITCH) { + if (port >= QLCNIC_NIU_MAX_XG_PORTS) + goto err_ret; + } else { + goto err_ret; + } + + if (rx_tx > QLCNIC_QUERY_TX_COUNTER) + goto err_ret; + + arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12; + arg1 |= BIT_14 | rx_tx << 15; + + return qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + arg1, + 0, + 0, + QLCNIC_CDRP_CMD_GET_ESWITCH_STATS); + +err_ret: + dev_err(&adapter->pdev->dev, "Invalid argument func_esw=%d port=%d" + "rx_ctx=%d\n", func_esw, port, rx_tx); + return -EIO; +} + +static int +__qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, + u32 *arg1, u32 *arg2) +{ + int err = -EIO; + u8 pci_func; + pci_func = (*arg1 >> 8); + err = qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + *arg1, + 0, + 0, + QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG); + + if (err == QLCNIC_RCODE_SUCCESS) { + *arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET); + *arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET); + dev_info(&adapter->pdev->dev, + "eSwitch port config for pci func %d\n", pci_func); + } else { + dev_err(&adapter->pdev->dev, + "Failed to get eswitch port config for pci func %d\n", + pci_func); + } + return err; +} +/* Configure eSwitch port +op_mode = 0 for setting default port behavior +op_mode = 1 for setting vlan id +op_mode = 2 for deleting vlan id +op_type = 0 for vlan_id +op_type = 1 for port vlan_id +*/ +int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, + struct qlcnic_esw_func_cfg *esw_cfg) +{ + int err = -EIO; + u32 arg1, arg2 = 0; + u8 pci_func; + + if (adapter->op_mode != QLCNIC_MGMT_FUNC) + return err; + pci_func = esw_cfg->pci_func; + arg1 = (adapter->npars[pci_func].phy_port & BIT_0); + arg1 |= (pci_func << 8); + + if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2)) + return err; + arg1 &= ~(0x0ff << 8); + arg1 |= (pci_func << 8); + arg1 &= ~(BIT_2 | BIT_3); + switch (esw_cfg->op_mode) { + case QLCNIC_PORT_DEFAULTS: + arg1 |= (BIT_4 | BIT_6 | BIT_7); + arg2 |= (BIT_0 | BIT_1); + if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) + arg2 |= (BIT_2 | BIT_3); + if (!(esw_cfg->discard_tagged)) + arg1 &= ~BIT_4; + if (!(esw_cfg->promisc_mode)) + arg1 &= ~BIT_6; + if (!(esw_cfg->mac_override)) + arg1 &= ~BIT_7; + if (!(esw_cfg->mac_anti_spoof)) + arg2 &= ~BIT_0; + if (!(esw_cfg->offload_flags & BIT_0)) + arg2 &= ~(BIT_1 | BIT_2 | BIT_3); + if (!(esw_cfg->offload_flags & BIT_1)) + arg2 &= ~BIT_2; + if (!(esw_cfg->offload_flags & BIT_2)) + arg2 &= ~BIT_3; + break; + case QLCNIC_ADD_VLAN: + arg1 |= (BIT_2 | BIT_5); + arg1 |= (esw_cfg->vlan_id << 16); + break; + case QLCNIC_DEL_VLAN: + arg1 |= (BIT_3 | BIT_5); + arg1 &= ~(0x0ffff << 16); + break; + default: + return err; + } + + err = qlcnic_issue_cmd(adapter, + adapter->ahw->pci_func, + adapter->fw_hal_version, + arg1, + arg2, + 0, + QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH); + + if (err != QLCNIC_RCODE_SUCCESS) { + dev_err(&adapter->pdev->dev, + "Failed to configure eswitch pci func %d\n", pci_func); + } else { + dev_info(&adapter->pdev->dev, + "Configured eSwitch for pci func %d\n", pci_func); + } + + return err; +} + +int +qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, + struct qlcnic_esw_func_cfg *esw_cfg) +{ + u32 arg1, arg2; + u8 phy_port; + if (adapter->op_mode == QLCNIC_MGMT_FUNC) + phy_port = adapter->npars[esw_cfg->pci_func].phy_port; + else + phy_port = adapter->physical_port; + arg1 = phy_port; + arg1 |= (esw_cfg->pci_func << 8); + if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2)) + return -EIO; + + esw_cfg->discard_tagged = !!(arg1 & BIT_4); + esw_cfg->host_vlan_tag = !!(arg1 & BIT_5); + esw_cfg->promisc_mode = !!(arg1 & BIT_6); + esw_cfg->mac_override = !!(arg1 & BIT_7); + esw_cfg->vlan_id = LSW(arg1 >> 16); + esw_cfg->mac_anti_spoof = (arg2 & 0x1); + esw_cfg->offload_flags = ((arg2 >> 1) & 0x7); + + return 0; +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c new file mode 100644 index 0000000..7c64f2f --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -0,0 +1,1234 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2010 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#include +#include +#include +#include +#include +#include + +#include "qlcnic.h" + +struct qlcnic_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define QLC_SIZEOF(m) FIELD_SIZEOF(struct qlcnic_adapter, m) +#define QLC_OFF(m) offsetof(struct qlcnic_adapter, m) + +static const struct qlcnic_stats qlcnic_gstrings_stats[] = { + {"xmit_called", + QLC_SIZEOF(stats.xmitcalled), QLC_OFF(stats.xmitcalled)}, + {"xmit_finished", + QLC_SIZEOF(stats.xmitfinished), QLC_OFF(stats.xmitfinished)}, + {"rx_dropped", + QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)}, + {"tx_dropped", + QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)}, + {"csummed", + QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)}, + {"rx_pkts", + QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)}, + {"lro_pkts", + QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)}, + {"rx_bytes", + QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)}, + {"tx_bytes", + QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)}, + {"lrobytes", + QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)}, + {"lso_frames", + QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)}, + {"xmit_on", + QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)}, + {"xmit_off", + QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)}, + {"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure), + QLC_OFF(stats.skb_alloc_failure)}, + {"null rxbuf", + QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)}, + {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error), + QLC_OFF(stats.rx_dma_map_error)}, + {"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error), + QLC_OFF(stats.tx_dma_map_error)}, + +}; + +static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = { + "rx unicast frames", + "rx multicast frames", + "rx broadcast frames", + "rx dropped frames", + "rx errors", + "rx local frames", + "rx numbytes", + "tx unicast frames", + "tx multicast frames", + "tx broadcast frames", + "tx dropped frames", + "tx errors", + "tx local frames", + "tx numbytes", +}; + +#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats) +#define QLCNIC_DEVICE_STATS_LEN ARRAY_SIZE(qlcnic_device_gstrings_stats) + +static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = { + "Register_Test_on_offline", + "Link_Test_on_offline", + "Interrupt_Test_offline", + "Internal_Loopback_offline", + "External_Loopback_offline" +}; + +#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test) + +#define QLCNIC_RING_REGS_COUNT 20 +#define QLCNIC_RING_REGS_LEN (QLCNIC_RING_REGS_COUNT * sizeof(u32)) +#define QLCNIC_MAX_EEPROM_LEN 1024 + +static const u32 diag_registers[] = { + CRB_CMDPEG_STATE, + CRB_RCVPEG_STATE, + CRB_XG_STATE_P3P, + CRB_FW_CAPABILITIES_1, + ISR_INT_STATE_REG, + QLCNIC_CRB_DRV_ACTIVE, + QLCNIC_CRB_DEV_STATE, + QLCNIC_CRB_DRV_STATE, + QLCNIC_CRB_DRV_SCRATCH, + QLCNIC_CRB_DEV_PARTITION_INFO, + QLCNIC_CRB_DRV_IDC_VER, + QLCNIC_PEG_ALIVE_COUNTER, + QLCNIC_PEG_HALT_STATUS1, + QLCNIC_PEG_HALT_STATUS2, + QLCNIC_CRB_PEG_NET_0+0x3c, + QLCNIC_CRB_PEG_NET_1+0x3c, + QLCNIC_CRB_PEG_NET_2+0x3c, + QLCNIC_CRB_PEG_NET_4+0x3c, + -1 +}; + +#define QLCNIC_MGMT_API_VERSION 2 +#define QLCNIC_DEV_INFO_SIZE 1 +#define QLCNIC_ETHTOOL_REGS_VER 2 +static int qlcnic_get_regs_len(struct net_device *dev) +{ + return sizeof(diag_registers) + QLCNIC_RING_REGS_LEN + + QLCNIC_DEV_INFO_SIZE + 1; +} + +static int qlcnic_get_eeprom_len(struct net_device *dev) +{ + return QLCNIC_FLASH_TOTAL_SIZE; +} + +static void +qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + u32 fw_major, fw_minor, fw_build; + + fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR); + fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR); + fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB); + sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build); + + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + strlcpy(drvinfo->driver, qlcnic_driver_name, 32); + strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID, 32); +} + +static int +qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + int check_sfp_module = 0; + u16 pcifn = adapter->ahw->pci_func; + + /* read which mode */ + if (adapter->ahw->port_type == QLCNIC_GBE) { + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full); + + ecmd->advertising = (ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full); + + ethtool_cmd_speed_set(ecmd, adapter->link_speed); + ecmd->duplex = adapter->link_duplex; + ecmd->autoneg = adapter->link_autoneg; + + } else if (adapter->ahw->port_type == QLCNIC_XGBE) { + u32 val; + + val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR); + if (val == QLCNIC_PORT_MODE_802_3_AP) { + ecmd->supported = SUPPORTED_1000baseT_Full; + ecmd->advertising = ADVERTISED_1000baseT_Full; + } else { + ecmd->supported = SUPPORTED_10000baseT_Full; + ecmd->advertising = ADVERTISED_10000baseT_Full; + } + + if (netif_running(dev) && adapter->has_link_events) { + ethtool_cmd_speed_set(ecmd, adapter->link_speed); + ecmd->autoneg = adapter->link_autoneg; + ecmd->duplex = adapter->link_duplex; + goto skip; + } + + val = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn)); + ethtool_cmd_speed_set(ecmd, P3P_LINK_SPEED_MHZ * + P3P_LINK_SPEED_VAL(pcifn, val)); + ecmd->duplex = DUPLEX_FULL; + ecmd->autoneg = AUTONEG_DISABLE; + } else + return -EIO; + +skip: + ecmd->phy_address = adapter->physical_port; + ecmd->transceiver = XCVR_EXTERNAL; + + switch (adapter->ahw->board_type) { + case QLCNIC_BRDTYPE_P3P_REF_QG: + case QLCNIC_BRDTYPE_P3P_4_GB: + case QLCNIC_BRDTYPE_P3P_4_GB_MM: + + ecmd->supported |= SUPPORTED_Autoneg; + ecmd->advertising |= ADVERTISED_Autoneg; + case QLCNIC_BRDTYPE_P3P_10G_CX4: + case QLCNIC_BRDTYPE_P3P_10G_CX4_LP: + case QLCNIC_BRDTYPE_P3P_10000_BASE_T: + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; + ecmd->port = PORT_TP; + ecmd->autoneg = adapter->link_autoneg; + break; + case QLCNIC_BRDTYPE_P3P_IMEZ: + case QLCNIC_BRDTYPE_P3P_XG_LOM: + case QLCNIC_BRDTYPE_P3P_HMEZ: + ecmd->supported |= SUPPORTED_MII; + ecmd->advertising |= ADVERTISED_MII; + ecmd->port = PORT_MII; + ecmd->autoneg = AUTONEG_DISABLE; + break; + case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS: + case QLCNIC_BRDTYPE_P3P_10G_SFP_CT: + case QLCNIC_BRDTYPE_P3P_10G_SFP_QT: + ecmd->advertising |= ADVERTISED_TP; + ecmd->supported |= SUPPORTED_TP; + check_sfp_module = netif_running(dev) && + adapter->has_link_events; + case QLCNIC_BRDTYPE_P3P_10G_XFP: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_FIBRE; + ecmd->autoneg = AUTONEG_DISABLE; + break; + case QLCNIC_BRDTYPE_P3P_10G_TP: + if (adapter->ahw->port_type == QLCNIC_XGBE) { + ecmd->autoneg = AUTONEG_DISABLE; + ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP); + ecmd->advertising |= + (ADVERTISED_FIBRE | ADVERTISED_TP); + ecmd->port = PORT_FIBRE; + check_sfp_module = netif_running(dev) && + adapter->has_link_events; + } else { + ecmd->autoneg = AUTONEG_ENABLE; + ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); + ecmd->advertising |= + (ADVERTISED_TP | ADVERTISED_Autoneg); + ecmd->port = PORT_TP; + } + break; + default: + dev_err(&adapter->pdev->dev, "Unsupported board model %d\n", + adapter->ahw->board_type); + return -EIO; + } + + if (check_sfp_module) { + switch (adapter->module_type) { + case LINKEVENT_MODULE_OPTICAL_UNKNOWN: + case LINKEVENT_MODULE_OPTICAL_SRLR: + case LINKEVENT_MODULE_OPTICAL_LRM: + case LINKEVENT_MODULE_OPTICAL_SFP_1G: + ecmd->port = PORT_FIBRE; + break; + case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE: + case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN: + case LINKEVENT_MODULE_TWINAX: + ecmd->port = PORT_TP; + break; + default: + ecmd->port = PORT_OTHER; + } + } + + return 0; +} + +static int +qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + u32 config = 0; + u32 ret = 0; + struct qlcnic_adapter *adapter = netdev_priv(dev); + + if (adapter->ahw->port_type != QLCNIC_GBE) + return -EOPNOTSUPP; + + /* read which mode */ + if (ecmd->duplex) + config |= 0x1; + + if (ecmd->autoneg) + config |= 0x2; + + switch (ethtool_cmd_speed(ecmd)) { + case SPEED_10: + config |= (0 << 8); + break; + case SPEED_100: + config |= (1 << 8); + break; + case SPEED_1000: + config |= (10 << 8); + break; + default: + return -EIO; + } + + ret = qlcnic_fw_cmd_set_port(adapter, config); + + if (ret == QLCNIC_RCODE_NOT_SUPPORTED) + return -EOPNOTSUPP; + else if (ret) + return -EIO; + + adapter->link_speed = ethtool_cmd_speed(ecmd); + adapter->link_duplex = ecmd->duplex; + adapter->link_autoneg = ecmd->autoneg; + + if (!netif_running(dev)) + return 0; + + dev->netdev_ops->ndo_stop(dev); + return dev->netdev_ops->ndo_open(dev); +} + +static void +qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_host_sds_ring *sds_ring; + u32 *regs_buff = p; + int ring, i = 0, j = 0; + + memset(p, 0, qlcnic_get_regs_len(dev)); + regs->version = (QLCNIC_ETHTOOL_REGS_VER << 24) | + (adapter->ahw->revision_id << 16) | (adapter->pdev)->device; + + regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff)); + regs_buff[1] = QLCNIC_MGMT_API_VERSION; + + for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++) + regs_buff[i] = QLCRD32(adapter, diag_registers[j]); + + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) + return; + + regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/ + + regs_buff[i++] = 1; /* No. of tx ring */ + regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer)); + regs_buff[i++] = readl(adapter->tx_ring->crb_cmd_producer); + + regs_buff[i++] = 2; /* No. of rx ring */ + regs_buff[i++] = readl(recv_ctx->rds_rings[0].crb_rcv_producer); + regs_buff[i++] = readl(recv_ctx->rds_rings[1].crb_rcv_producer); + + regs_buff[i++] = adapter->max_sds_rings; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &(recv_ctx->sds_rings[ring]); + regs_buff[i++] = readl(sds_ring->crb_sts_consumer); + } +} + +static u32 qlcnic_test_link(struct net_device *dev) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + u32 val; + + val = QLCRD32(adapter, CRB_XG_STATE_P3P); + val = XG_LINK_STATE_P3P(adapter->ahw->pci_func, val); + return (val == XG_LINK_UP_P3P) ? 0 : 1; +} + +static int +qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *bytes) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + int offset; + int ret; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = (adapter->pdev)->vendor | + ((adapter->pdev)->device << 16); + offset = eeprom->offset; + + ret = qlcnic_rom_fast_read_words(adapter, offset, bytes, + eeprom->len); + if (ret < 0) + return ret; + + return 0; +} + +static void +qlcnic_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + + ring->rx_pending = adapter->num_rxd; + ring->rx_jumbo_pending = adapter->num_jumbo_rxd; + ring->tx_pending = adapter->num_txd; + + ring->rx_max_pending = adapter->max_rxd; + ring->rx_jumbo_max_pending = adapter->max_jumbo_rxd; + ring->tx_max_pending = MAX_CMD_DESCRIPTORS; + + ring->rx_mini_max_pending = 0; + ring->rx_mini_pending = 0; +} + +static u32 +qlcnic_validate_ringparam(u32 val, u32 min, u32 max, char *r_name) +{ + u32 num_desc; + num_desc = max(val, min); + num_desc = min(num_desc, max); + num_desc = roundup_pow_of_two(num_desc); + + if (val != num_desc) { + printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n", + qlcnic_driver_name, r_name, num_desc, val); + } + + return num_desc; +} + +static int +qlcnic_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + u16 num_rxd, num_jumbo_rxd, num_txd; + + if (ring->rx_mini_pending) + return -EOPNOTSUPP; + + num_rxd = qlcnic_validate_ringparam(ring->rx_pending, + MIN_RCV_DESCRIPTORS, adapter->max_rxd, "rx"); + + num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending, + MIN_JUMBO_DESCRIPTORS, adapter->max_jumbo_rxd, + "rx jumbo"); + + num_txd = qlcnic_validate_ringparam(ring->tx_pending, + MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx"); + + if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd && + num_jumbo_rxd == adapter->num_jumbo_rxd) + return 0; + + adapter->num_rxd = num_rxd; + adapter->num_jumbo_rxd = num_jumbo_rxd; + adapter->num_txd = num_txd; + + return qlcnic_reset_context(adapter); +} + +static void qlcnic_get_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + + channel->max_rx = rounddown_pow_of_two(min_t(int, + adapter->max_rx_ques, num_online_cpus())); + channel->max_tx = adapter->max_tx_ques; + + channel->rx_count = adapter->max_sds_rings; + channel->tx_count = adapter->max_tx_ques; +} + +static int qlcnic_set_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + int err; + + if (channel->other_count || channel->combined_count || + channel->tx_count != channel->max_tx) + return -EINVAL; + + err = qlcnic_validate_max_rss(dev, channel->max_rx, channel->rx_count); + if (err) + return err; + + err = qlcnic_set_max_rss(adapter, channel->rx_count); + netdev_info(dev, "allocated 0x%x sds rings\n", + adapter->max_sds_rings); + return err; +} + +static void +qlcnic_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + int port = adapter->physical_port; + __u32 val; + + if (adapter->ahw->port_type == QLCNIC_GBE) { + if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS)) + return; + /* get flow control settings */ + val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port)); + pause->rx_pause = qlcnic_gb_get_rx_flowctl(val); + val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL); + switch (port) { + case 0: + pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val)); + break; + case 1: + pause->tx_pause = !(qlcnic_gb_get_gb1_mask(val)); + break; + case 2: + pause->tx_pause = !(qlcnic_gb_get_gb2_mask(val)); + break; + case 3: + default: + pause->tx_pause = !(qlcnic_gb_get_gb3_mask(val)); + break; + } + } else if (adapter->ahw->port_type == QLCNIC_XGBE) { + if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS)) + return; + pause->rx_pause = 1; + val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL); + if (port == 0) + pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val)); + else + pause->tx_pause = !(qlcnic_xg_get_xg1_mask(val)); + } else { + dev_err(&netdev->dev, "Unknown board type: %x\n", + adapter->ahw->port_type); + } +} + +static int +qlcnic_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + int port = adapter->physical_port; + __u32 val; + + /* read mode */ + if (adapter->ahw->port_type == QLCNIC_GBE) { + if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS)) + return -EIO; + /* set flow control */ + val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port)); + + if (pause->rx_pause) + qlcnic_gb_rx_flowctl(val); + else + qlcnic_gb_unset_rx_flowctl(val); + + QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), + val); + /* set autoneg */ + val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL); + switch (port) { + case 0: + if (pause->tx_pause) + qlcnic_gb_unset_gb0_mask(val); + else + qlcnic_gb_set_gb0_mask(val); + break; + case 1: + if (pause->tx_pause) + qlcnic_gb_unset_gb1_mask(val); + else + qlcnic_gb_set_gb1_mask(val); + break; + case 2: + if (pause->tx_pause) + qlcnic_gb_unset_gb2_mask(val); + else + qlcnic_gb_set_gb2_mask(val); + break; + case 3: + default: + if (pause->tx_pause) + qlcnic_gb_unset_gb3_mask(val); + else + qlcnic_gb_set_gb3_mask(val); + break; + } + QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, val); + } else if (adapter->ahw->port_type == QLCNIC_XGBE) { + if (!pause->rx_pause || pause->autoneg) + return -EOPNOTSUPP; + + if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS)) + return -EIO; + + val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL); + if (port == 0) { + if (pause->tx_pause) + qlcnic_xg_unset_xg0_mask(val); + else + qlcnic_xg_set_xg0_mask(val); + } else { + if (pause->tx_pause) + qlcnic_xg_unset_xg1_mask(val); + else + qlcnic_xg_set_xg1_mask(val); + } + QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, val); + } else { + dev_err(&netdev->dev, "Unknown board type: %x\n", + adapter->ahw->port_type); + } + return 0; +} + +static int qlcnic_reg_test(struct net_device *dev) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + u32 data_read; + + data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0)); + if ((data_read & 0xffff) != adapter->pdev->vendor) + return 1; + + return 0; +} + +static int qlcnic_get_sset_count(struct net_device *dev, int sset) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + switch (sset) { + case ETH_SS_TEST: + return QLCNIC_TEST_LEN; + case ETH_SS_STATS: + if (adapter->flags & QLCNIC_ESWITCH_ENABLED) + return QLCNIC_STATS_LEN + QLCNIC_DEVICE_STATS_LEN; + return QLCNIC_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static int qlcnic_irq_test(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + int max_sds_rings = adapter->max_sds_rings; + int ret; + + if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + return -EIO; + + ret = qlcnic_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST); + if (ret) + goto clear_it; + + adapter->diag_cnt = 0; + ret = qlcnic_issue_cmd(adapter, adapter->ahw->pci_func, + adapter->fw_hal_version, adapter->ahw->pci_func, + 0, 0, 0x00000011); + if (ret) + goto done; + + msleep(10); + + ret = !adapter->diag_cnt; + +done: + qlcnic_diag_free_res(netdev, max_sds_rings); + +clear_it: + adapter->max_sds_rings = max_sds_rings; + clear_bit(__QLCNIC_RESETTING, &adapter->state); + return ret; +} + +#define QLCNIC_ILB_PKT_SIZE 64 +#define QLCNIC_NUM_ILB_PKT 16 +#define QLCNIC_ILB_MAX_RCV_LOOP 10 + +static void qlcnic_create_loopback_buff(unsigned char *data, u8 mac[]) +{ + unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00}; + + memset(data, 0x4e, QLCNIC_ILB_PKT_SIZE); + + memcpy(data, mac, ETH_ALEN); + memcpy(data + ETH_ALEN, mac, ETH_ALEN); + + memcpy(data + 2 * ETH_ALEN, random_data, sizeof(random_data)); +} + +int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[]) +{ + unsigned char buff[QLCNIC_ILB_PKT_SIZE]; + qlcnic_create_loopback_buff(buff, mac); + return memcmp(data, buff, QLCNIC_ILB_PKT_SIZE); +} + +static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter) +{ + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0]; + struct sk_buff *skb; + int i, loop, cnt = 0; + + for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) { + skb = dev_alloc_skb(QLCNIC_ILB_PKT_SIZE); + qlcnic_create_loopback_buff(skb->data, adapter->mac_addr); + skb_put(skb, QLCNIC_ILB_PKT_SIZE); + + adapter->diag_cnt = 0; + qlcnic_xmit_frame(skb, adapter->netdev); + + loop = 0; + do { + msleep(1); + qlcnic_process_rcv_ring_diag(sds_ring); + if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) + break; + } while (!adapter->diag_cnt); + + dev_kfree_skb_any(skb); + + if (!adapter->diag_cnt) + dev_warn(&adapter->pdev->dev, "LB Test: %dth packet" + " not recevied\n", i + 1); + else + cnt++; + } + if (cnt != i) { + dev_warn(&adapter->pdev->dev, "LB Test failed\n"); + return -1; + } + return 0; +} + +static int qlcnic_loopback_test(struct net_device *netdev, u8 mode) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + int max_sds_rings = adapter->max_sds_rings; + struct qlcnic_host_sds_ring *sds_ring; + int loop = 0; + int ret; + + if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK)) { + netdev_info(netdev, "Firmware is not loopback test capable\n"); + return -EOPNOTSUPP; + } + + netdev_info(netdev, "%s loopback test in progress\n", + mode == QLCNIC_ILB_MODE ? "internal" : "external"); + if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) { + netdev_warn(netdev, "Loopback test not supported for non " + "privilege function\n"); + return 0; + } + + if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + return -EBUSY; + + ret = qlcnic_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST); + if (ret) + goto clear_it; + + sds_ring = &adapter->recv_ctx->sds_rings[0]; + + ret = qlcnic_set_lb_mode(adapter, mode); + if (ret) + goto free_res; + + adapter->diag_cnt = 0; + do { + msleep(500); + qlcnic_process_rcv_ring_diag(sds_ring); + if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) { + netdev_info(netdev, "firmware didnt respond to loopback" + " configure request\n"); + ret = -QLCNIC_FW_NOT_RESPOND; + goto free_res; + } else if (adapter->diag_cnt) { + ret = adapter->diag_cnt; + goto free_res; + } + } while (!QLCNIC_IS_LB_CONFIGURED(adapter->ahw->loopback_state)); + + ret = qlcnic_do_lb_test(adapter); + + qlcnic_clear_lb_mode(adapter); + + free_res: + qlcnic_diag_free_res(netdev, max_sds_rings); + + clear_it: + adapter->max_sds_rings = max_sds_rings; + clear_bit(__QLCNIC_RESETTING, &adapter->state); + return ret; +} + +static void +qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test, + u64 *data) +{ + memset(data, 0, sizeof(u64) * QLCNIC_TEST_LEN); + + data[0] = qlcnic_reg_test(dev); + if (data[0]) + eth_test->flags |= ETH_TEST_FL_FAILED; + + data[1] = (u64) qlcnic_test_link(dev); + if (data[1]) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (eth_test->flags & ETH_TEST_FL_OFFLINE) { + data[2] = qlcnic_irq_test(dev); + if (data[2]) + eth_test->flags |= ETH_TEST_FL_FAILED; + + data[3] = qlcnic_loopback_test(dev, QLCNIC_ILB_MODE); + if (data[3]) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) { + data[4] = qlcnic_loopback_test(dev, QLCNIC_ELB_MODE); + if (data[4]) + eth_test->flags |= ETH_TEST_FL_FAILED; + eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; + } + } +} + +static void +qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + int index, i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *qlcnic_gstrings_test, + QLCNIC_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (index = 0; index < QLCNIC_STATS_LEN; index++) { + memcpy(data + index * ETH_GSTRING_LEN, + qlcnic_gstrings_stats[index].stat_string, + ETH_GSTRING_LEN); + } + if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) + return; + for (i = 0; i < QLCNIC_DEVICE_STATS_LEN; index++, i++) { + memcpy(data + index * ETH_GSTRING_LEN, + qlcnic_device_gstrings_stats[i], + ETH_GSTRING_LEN); + } + } +} + +#define QLCNIC_FILL_ESWITCH_STATS(VAL1) \ + (((VAL1) == QLCNIC_ESW_STATS_NOT_AVAIL) ? 0 : VAL1) + +static void +qlcnic_fill_device_stats(int *index, u64 *data, + struct __qlcnic_esw_statistics *stats) +{ + int ind = *index; + + data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->unicast_frames); + data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->multicast_frames); + data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->broadcast_frames); + data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->dropped_frames); + data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->errors); + data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->local_frames); + data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->numbytes); + + *index = ind; +} + +static void +qlcnic_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 * data) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + struct qlcnic_esw_statistics port_stats; + int index, ret; + + for (index = 0; index < QLCNIC_STATS_LEN; index++) { + char *p = + (char *)adapter + + qlcnic_gstrings_stats[index].stat_offset; + data[index] = + (qlcnic_gstrings_stats[index].sizeof_stat == + sizeof(u64)) ? *(u64 *)p:(*(u32 *)p); + } + + if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) + return; + + memset(&port_stats, 0, sizeof(struct qlcnic_esw_statistics)); + ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func, + QLCNIC_QUERY_RX_COUNTER, &port_stats.rx); + if (ret) + return; + + qlcnic_fill_device_stats(&index, data, &port_stats.rx); + + ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func, + QLCNIC_QUERY_TX_COUNTER, &port_stats.tx); + if (ret) + return; + + qlcnic_fill_device_stats(&index, data, &port_stats.tx); +} + +static int qlcnic_set_led(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + int max_sds_rings = adapter->max_sds_rings; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { + if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + return -EIO; + + if (qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST)) { + clear_bit(__QLCNIC_RESETTING, &adapter->state); + return -EIO; + } + set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state); + } + + if (adapter->nic_ops->config_led(adapter, 1, 0xf) == 0) + return 0; + + dev_err(&adapter->pdev->dev, + "Failed to set LED blink state.\n"); + break; + + case ETHTOOL_ID_INACTIVE: + if (adapter->nic_ops->config_led(adapter, 0, 0xf)) + dev_err(&adapter->pdev->dev, + "Failed to reset LED blink state.\n"); + + break; + + default: + return -EINVAL; + } + + if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state)) { + qlcnic_diag_free_res(dev, max_sds_rings); + clear_bit(__QLCNIC_RESETTING, &adapter->state); + } + + return -EIO; +} + +static void +qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + u32 wol_cfg; + + wol->supported = 0; + wol->wolopts = 0; + + wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV); + if (wol_cfg & (1UL << adapter->portnum)) + wol->supported |= WAKE_MAGIC; + + wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG); + if (wol_cfg & (1UL << adapter->portnum)) + wol->wolopts |= WAKE_MAGIC; +} + +static int +qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + u32 wol_cfg; + + if (wol->wolopts & ~WAKE_MAGIC) + return -EOPNOTSUPP; + + wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV); + if (!(wol_cfg & (1 << adapter->portnum))) + return -EOPNOTSUPP; + + wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG); + if (wol->wolopts & WAKE_MAGIC) + wol_cfg |= 1UL << adapter->portnum; + else + wol_cfg &= ~(1UL << adapter->portnum); + + QLCWR32(adapter, QLCNIC_WOL_CONFIG, wol_cfg); + + return 0; +} + +/* + * Set the coalescing parameters. Currently only normal is supported. + * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the + * firmware coalescing to default. + */ +static int qlcnic_set_intr_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ethcoal) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) + return -EINVAL; + + /* + * Return Error if unsupported values or + * unsupported parameters are set. + */ + if (ethcoal->rx_coalesce_usecs > 0xffff || + ethcoal->rx_max_coalesced_frames > 0xffff || + ethcoal->tx_coalesce_usecs || + ethcoal->tx_max_coalesced_frames || + ethcoal->rx_coalesce_usecs_irq || + ethcoal->rx_max_coalesced_frames_irq || + ethcoal->tx_coalesce_usecs_irq || + ethcoal->tx_max_coalesced_frames_irq || + ethcoal->stats_block_coalesce_usecs || + ethcoal->use_adaptive_rx_coalesce || + ethcoal->use_adaptive_tx_coalesce || + ethcoal->pkt_rate_low || + ethcoal->rx_coalesce_usecs_low || + ethcoal->rx_max_coalesced_frames_low || + ethcoal->tx_coalesce_usecs_low || + ethcoal->tx_max_coalesced_frames_low || + ethcoal->pkt_rate_high || + ethcoal->rx_coalesce_usecs_high || + ethcoal->rx_max_coalesced_frames_high || + ethcoal->tx_coalesce_usecs_high || + ethcoal->tx_max_coalesced_frames_high) + return -EINVAL; + + if (!ethcoal->rx_coalesce_usecs || + !ethcoal->rx_max_coalesced_frames) { + adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT; + adapter->ahw->coal.rx_time_us = + QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US; + adapter->ahw->coal.rx_packets = + QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS; + } else { + adapter->ahw->coal.flag = 0; + adapter->ahw->coal.rx_time_us = ethcoal->rx_coalesce_usecs; + adapter->ahw->coal.rx_packets = + ethcoal->rx_max_coalesced_frames; + } + + qlcnic_config_intr_coalesce(adapter); + + return 0; +} + +static int qlcnic_get_intr_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ethcoal) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + return -EINVAL; + + ethcoal->rx_coalesce_usecs = adapter->ahw->coal.rx_time_us; + ethcoal->rx_max_coalesced_frames = adapter->ahw->coal.rx_packets; + + return 0; +} + +static u32 qlcnic_get_msglevel(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = msglvl; +} + +static int +qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + + if (fw_dump->clr) + dump->len = fw_dump->tmpl_hdr->size + fw_dump->size; + else + dump->len = 0; + dump->flag = fw_dump->tmpl_hdr->drv_cap_mask; + dump->version = adapter->fw_version; + return 0; +} + +static int +qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, + void *buffer) +{ + int i, copy_sz; + u32 *hdr_ptr, *data; + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + + if (!fw_dump->clr) { + netdev_info(netdev, "Dump not available\n"); + qlcnic_api_unlock(adapter); + return -EINVAL; + } + /* Copy template header first */ + copy_sz = fw_dump->tmpl_hdr->size; + hdr_ptr = (u32 *) fw_dump->tmpl_hdr; + data = buffer; + for (i = 0; i < copy_sz/sizeof(u32); i++) + *data++ = cpu_to_le32(*hdr_ptr++); + + /* Copy captured dump data */ + memcpy(buffer + copy_sz, fw_dump->data, fw_dump->size); + dump->len = copy_sz + fw_dump->size; + dump->flag = fw_dump->tmpl_hdr->drv_cap_mask; + + /* Free dump area once data has been captured */ + vfree(fw_dump->data); + fw_dump->data = NULL; + fw_dump->clr = 0; + + return 0; +} + +static int +qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val) +{ + int ret = 0; + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + + switch (val->flag) { + case QLCNIC_FORCE_FW_DUMP_KEY: + if (!fw_dump->enable) { + netdev_info(netdev, "FW dump not enabled\n"); + return ret; + } + if (fw_dump->clr) { + dev_info(&adapter->pdev->dev, + "Previous dump not cleared, not forcing dump\n"); + return ret; + } + netdev_info(netdev, "Forcing a FW dump\n"); + qlcnic_dev_request_reset(adapter); + break; + case QLCNIC_DISABLE_FW_DUMP: + if (fw_dump->enable) { + netdev_info(netdev, "Disabling FW dump\n"); + fw_dump->enable = 0; + } + break; + case QLCNIC_ENABLE_FW_DUMP: + if (!fw_dump->enable && fw_dump->tmpl_hdr) { + netdev_info(netdev, "Enabling FW dump\n"); + fw_dump->enable = 1; + } + break; + case QLCNIC_FORCE_FW_RESET: + netdev_info(netdev, "Forcing a FW reset\n"); + qlcnic_dev_request_reset(adapter); + adapter->flags &= ~QLCNIC_FW_RESET_OWNER; + break; + default: + if (val->flag > QLCNIC_DUMP_MASK_MAX || + val->flag < QLCNIC_DUMP_MASK_MIN) { + netdev_info(netdev, + "Invalid dump level: 0x%x\n", val->flag); + ret = -EINVAL; + goto out; + } + fw_dump->tmpl_hdr->drv_cap_mask = val->flag & 0xff; + netdev_info(netdev, "Driver mask changed to: 0x%x\n", + fw_dump->tmpl_hdr->drv_cap_mask); + } +out: + return ret; +} + +const struct ethtool_ops qlcnic_ethtool_ops = { + .get_settings = qlcnic_get_settings, + .set_settings = qlcnic_set_settings, + .get_drvinfo = qlcnic_get_drvinfo, + .get_regs_len = qlcnic_get_regs_len, + .get_regs = qlcnic_get_regs, + .get_link = ethtool_op_get_link, + .get_eeprom_len = qlcnic_get_eeprom_len, + .get_eeprom = qlcnic_get_eeprom, + .get_ringparam = qlcnic_get_ringparam, + .set_ringparam = qlcnic_set_ringparam, + .get_channels = qlcnic_get_channels, + .set_channels = qlcnic_set_channels, + .get_pauseparam = qlcnic_get_pauseparam, + .set_pauseparam = qlcnic_set_pauseparam, + .get_wol = qlcnic_get_wol, + .set_wol = qlcnic_set_wol, + .self_test = qlcnic_diag_test, + .get_strings = qlcnic_get_strings, + .get_ethtool_stats = qlcnic_get_ethtool_stats, + .get_sset_count = qlcnic_get_sset_count, + .get_coalesce = qlcnic_get_intr_coalesce, + .set_coalesce = qlcnic_set_intr_coalesce, + .set_phys_id = qlcnic_set_led, + .set_msglevel = qlcnic_set_msglevel, + .get_msglevel = qlcnic_get_msglevel, + .get_dump_flag = qlcnic_get_dump_flag, + .get_dump_data = qlcnic_get_dump_data, + .set_dump = qlcnic_set_dump, +}; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h new file mode 100644 index 0000000..d14506f --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h @@ -0,0 +1,1023 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2010 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#ifndef __QLCNIC_HDR_H_ +#define __QLCNIC_HDR_H_ + +#include +#include + +/* + * The basic unit of access when reading/writing control registers. + */ + +enum { + QLCNIC_HW_H0_CH_HUB_ADR = 0x05, + QLCNIC_HW_H1_CH_HUB_ADR = 0x0E, + QLCNIC_HW_H2_CH_HUB_ADR = 0x03, + QLCNIC_HW_H3_CH_HUB_ADR = 0x01, + QLCNIC_HW_H4_CH_HUB_ADR = 0x06, + QLCNIC_HW_H5_CH_HUB_ADR = 0x07, + QLCNIC_HW_H6_CH_HUB_ADR = 0x08 +}; + +/* Hub 0 */ +enum { + QLCNIC_HW_MN_CRB_AGT_ADR = 0x15, + QLCNIC_HW_MS_CRB_AGT_ADR = 0x25 +}; + +/* Hub 1 */ +enum { + QLCNIC_HW_PS_CRB_AGT_ADR = 0x73, + QLCNIC_HW_SS_CRB_AGT_ADR = 0x20, + QLCNIC_HW_RPMX3_CRB_AGT_ADR = 0x0b, + QLCNIC_HW_QMS_CRB_AGT_ADR = 0x00, + QLCNIC_HW_SQGS0_CRB_AGT_ADR = 0x01, + QLCNIC_HW_SQGS1_CRB_AGT_ADR = 0x02, + QLCNIC_HW_SQGS2_CRB_AGT_ADR = 0x03, + QLCNIC_HW_SQGS3_CRB_AGT_ADR = 0x04, + QLCNIC_HW_C2C0_CRB_AGT_ADR = 0x58, + QLCNIC_HW_C2C1_CRB_AGT_ADR = 0x59, + QLCNIC_HW_C2C2_CRB_AGT_ADR = 0x5a, + QLCNIC_HW_RPMX2_CRB_AGT_ADR = 0x0a, + QLCNIC_HW_RPMX4_CRB_AGT_ADR = 0x0c, + QLCNIC_HW_RPMX7_CRB_AGT_ADR = 0x0f, + QLCNIC_HW_RPMX9_CRB_AGT_ADR = 0x12, + QLCNIC_HW_SMB_CRB_AGT_ADR = 0x18 +}; + +/* Hub 2 */ +enum { + QLCNIC_HW_NIU_CRB_AGT_ADR = 0x31, + QLCNIC_HW_I2C0_CRB_AGT_ADR = 0x19, + QLCNIC_HW_I2C1_CRB_AGT_ADR = 0x29, + + QLCNIC_HW_SN_CRB_AGT_ADR = 0x10, + QLCNIC_HW_I2Q_CRB_AGT_ADR = 0x20, + QLCNIC_HW_LPC_CRB_AGT_ADR = 0x22, + QLCNIC_HW_ROMUSB_CRB_AGT_ADR = 0x21, + QLCNIC_HW_QM_CRB_AGT_ADR = 0x66, + QLCNIC_HW_SQG0_CRB_AGT_ADR = 0x60, + QLCNIC_HW_SQG1_CRB_AGT_ADR = 0x61, + QLCNIC_HW_SQG2_CRB_AGT_ADR = 0x62, + QLCNIC_HW_SQG3_CRB_AGT_ADR = 0x63, + QLCNIC_HW_RPMX1_CRB_AGT_ADR = 0x09, + QLCNIC_HW_RPMX5_CRB_AGT_ADR = 0x0d, + QLCNIC_HW_RPMX6_CRB_AGT_ADR = 0x0e, + QLCNIC_HW_RPMX8_CRB_AGT_ADR = 0x11 +}; + +/* Hub 3 */ +enum { + QLCNIC_HW_PH_CRB_AGT_ADR = 0x1A, + QLCNIC_HW_SRE_CRB_AGT_ADR = 0x50, + QLCNIC_HW_EG_CRB_AGT_ADR = 0x51, + QLCNIC_HW_RPMX0_CRB_AGT_ADR = 0x08 +}; + +/* Hub 4 */ +enum { + QLCNIC_HW_PEGN0_CRB_AGT_ADR = 0x40, + QLCNIC_HW_PEGN1_CRB_AGT_ADR, + QLCNIC_HW_PEGN2_CRB_AGT_ADR, + QLCNIC_HW_PEGN3_CRB_AGT_ADR, + QLCNIC_HW_PEGNI_CRB_AGT_ADR, + QLCNIC_HW_PEGND_CRB_AGT_ADR, + QLCNIC_HW_PEGNC_CRB_AGT_ADR, + QLCNIC_HW_PEGR0_CRB_AGT_ADR, + QLCNIC_HW_PEGR1_CRB_AGT_ADR, + QLCNIC_HW_PEGR2_CRB_AGT_ADR, + QLCNIC_HW_PEGR3_CRB_AGT_ADR, + QLCNIC_HW_PEGN4_CRB_AGT_ADR +}; + +/* Hub 5 */ +enum { + QLCNIC_HW_PEGS0_CRB_AGT_ADR = 0x40, + QLCNIC_HW_PEGS1_CRB_AGT_ADR, + QLCNIC_HW_PEGS2_CRB_AGT_ADR, + QLCNIC_HW_PEGS3_CRB_AGT_ADR, + QLCNIC_HW_PEGSI_CRB_AGT_ADR, + QLCNIC_HW_PEGSD_CRB_AGT_ADR, + QLCNIC_HW_PEGSC_CRB_AGT_ADR +}; + +/* Hub 6 */ +enum { + QLCNIC_HW_CAS0_CRB_AGT_ADR = 0x46, + QLCNIC_HW_CAS1_CRB_AGT_ADR = 0x47, + QLCNIC_HW_CAS2_CRB_AGT_ADR = 0x48, + QLCNIC_HW_CAS3_CRB_AGT_ADR = 0x49, + QLCNIC_HW_NCM_CRB_AGT_ADR = 0x16, + QLCNIC_HW_TMR_CRB_AGT_ADR = 0x17, + QLCNIC_HW_XDMA_CRB_AGT_ADR = 0x05, + QLCNIC_HW_OCM0_CRB_AGT_ADR = 0x06, + QLCNIC_HW_OCM1_CRB_AGT_ADR = 0x07 +}; + +/* Floaters - non existent modules */ +#define QLCNIC_HW_EFC_RPMX0_CRB_AGT_ADR 0x67 + +/* This field defines PCI/X adr [25:20] of agents on the CRB */ +enum { + QLCNIC_HW_PX_MAP_CRB_PH = 0, + QLCNIC_HW_PX_MAP_CRB_PS, + QLCNIC_HW_PX_MAP_CRB_MN, + QLCNIC_HW_PX_MAP_CRB_MS, + QLCNIC_HW_PX_MAP_CRB_PGR1, + QLCNIC_HW_PX_MAP_CRB_SRE, + QLCNIC_HW_PX_MAP_CRB_NIU, + QLCNIC_HW_PX_MAP_CRB_QMN, + QLCNIC_HW_PX_MAP_CRB_SQN0, + QLCNIC_HW_PX_MAP_CRB_SQN1, + QLCNIC_HW_PX_MAP_CRB_SQN2, + QLCNIC_HW_PX_MAP_CRB_SQN3, + QLCNIC_HW_PX_MAP_CRB_QMS, + QLCNIC_HW_PX_MAP_CRB_SQS0, + QLCNIC_HW_PX_MAP_CRB_SQS1, + QLCNIC_HW_PX_MAP_CRB_SQS2, + QLCNIC_HW_PX_MAP_CRB_SQS3, + QLCNIC_HW_PX_MAP_CRB_PGN0, + QLCNIC_HW_PX_MAP_CRB_PGN1, + QLCNIC_HW_PX_MAP_CRB_PGN2, + QLCNIC_HW_PX_MAP_CRB_PGN3, + QLCNIC_HW_PX_MAP_CRB_PGND, + QLCNIC_HW_PX_MAP_CRB_PGNI, + QLCNIC_HW_PX_MAP_CRB_PGS0, + QLCNIC_HW_PX_MAP_CRB_PGS1, + QLCNIC_HW_PX_MAP_CRB_PGS2, + QLCNIC_HW_PX_MAP_CRB_PGS3, + QLCNIC_HW_PX_MAP_CRB_PGSD, + QLCNIC_HW_PX_MAP_CRB_PGSI, + QLCNIC_HW_PX_MAP_CRB_SN, + QLCNIC_HW_PX_MAP_CRB_PGR2, + QLCNIC_HW_PX_MAP_CRB_EG, + QLCNIC_HW_PX_MAP_CRB_PH2, + QLCNIC_HW_PX_MAP_CRB_PS2, + QLCNIC_HW_PX_MAP_CRB_CAM, + QLCNIC_HW_PX_MAP_CRB_CAS0, + QLCNIC_HW_PX_MAP_CRB_CAS1, + QLCNIC_HW_PX_MAP_CRB_CAS2, + QLCNIC_HW_PX_MAP_CRB_C2C0, + QLCNIC_HW_PX_MAP_CRB_C2C1, + QLCNIC_HW_PX_MAP_CRB_TIMR, + QLCNIC_HW_PX_MAP_CRB_PGR3, + QLCNIC_HW_PX_MAP_CRB_RPMX1, + QLCNIC_HW_PX_MAP_CRB_RPMX2, + QLCNIC_HW_PX_MAP_CRB_RPMX3, + QLCNIC_HW_PX_MAP_CRB_RPMX4, + QLCNIC_HW_PX_MAP_CRB_RPMX5, + QLCNIC_HW_PX_MAP_CRB_RPMX6, + QLCNIC_HW_PX_MAP_CRB_RPMX7, + QLCNIC_HW_PX_MAP_CRB_XDMA, + QLCNIC_HW_PX_MAP_CRB_I2Q, + QLCNIC_HW_PX_MAP_CRB_ROMUSB, + QLCNIC_HW_PX_MAP_CRB_CAS3, + QLCNIC_HW_PX_MAP_CRB_RPMX0, + QLCNIC_HW_PX_MAP_CRB_RPMX8, + QLCNIC_HW_PX_MAP_CRB_RPMX9, + QLCNIC_HW_PX_MAP_CRB_OCM0, + QLCNIC_HW_PX_MAP_CRB_OCM1, + QLCNIC_HW_PX_MAP_CRB_SMB, + QLCNIC_HW_PX_MAP_CRB_I2C0, + QLCNIC_HW_PX_MAP_CRB_I2C1, + QLCNIC_HW_PX_MAP_CRB_LPC, + QLCNIC_HW_PX_MAP_CRB_PGNC, + QLCNIC_HW_PX_MAP_CRB_PGR0 +}; + +#define BIT_0 0x1 +#define BIT_1 0x2 +#define BIT_2 0x4 +#define BIT_3 0x8 +#define BIT_4 0x10 +#define BIT_5 0x20 +#define BIT_6 0x40 +#define BIT_7 0x80 +#define BIT_8 0x100 +#define BIT_9 0x200 +#define BIT_10 0x400 +#define BIT_11 0x800 +#define BIT_12 0x1000 +#define BIT_13 0x2000 +#define BIT_14 0x4000 +#define BIT_15 0x8000 +#define BIT_16 0x10000 +#define BIT_17 0x20000 +#define BIT_18 0x40000 +#define BIT_19 0x80000 +#define BIT_20 0x100000 +#define BIT_21 0x200000 +#define BIT_22 0x400000 +#define BIT_23 0x800000 +#define BIT_24 0x1000000 +#define BIT_25 0x2000000 +#define BIT_26 0x4000000 +#define BIT_27 0x8000000 +#define BIT_28 0x10000000 +#define BIT_29 0x20000000 +#define BIT_30 0x40000000 +#define BIT_31 0x80000000 + +/* This field defines CRB adr [31:20] of the agents */ + +#define QLCNIC_HW_CRB_HUB_AGT_ADR_MN \ + ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MN_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PH \ + ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_PH_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_MS \ + ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MS_CRB_AGT_ADR) + +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PS \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_PS_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SS \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SS_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX3_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMS \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_QMS_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS0 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS0_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS1 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS1_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS2 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS2_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS3 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS3_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C0 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C0_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C1 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C1_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX2_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX4_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX7_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX9_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SMB \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SMB_CRB_AGT_ADR) + +#define QLCNIC_HW_CRB_HUB_AGT_ADR_NIU \ + ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_NIU_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0 \ + ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C0_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1 \ + ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C1_CRB_AGT_ADR) + +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SRE \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SRE_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_EG \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_EG_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX0_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMN \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_QM_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG0_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG1_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG2_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG3_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX1_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX5_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX6_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX8_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS0 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS0_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS1 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS1_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS2 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS2_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS3 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS3_CRB_AGT_ADR) + +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNI_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGND \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGND_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0 \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN0_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1 \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN1_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2 \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN2_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3 \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN3_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4 \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN4_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNC_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR0 \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR0_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR1 \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR1_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR2 \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR2_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR3 \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR3_CRB_AGT_ADR) + +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI \ + ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSI_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSD \ + ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSD_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0 \ + ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS0_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1 \ + ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS1_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2 \ + ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS2_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3 \ + ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS3_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSC \ + ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSC_CRB_AGT_ADR) + +#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAM \ + ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_NCM_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR \ + ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_TMR_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA \ + ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_XDMA_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SN \ + ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_SN_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q \ + ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_I2Q_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB \ + ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_ROMUSB_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0 \ + ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM0_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM1 \ + ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM1_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_LPC \ + ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_LPC_CRB_AGT_ADR) + +#define QLCNIC_SRE_MISC (QLCNIC_CRB_SRE + 0x0002c) + +#define QLCNIC_I2Q_CLR_PCI_HI (QLCNIC_CRB_I2Q + 0x00034) + +#define ROMUSB_GLB (QLCNIC_CRB_ROMUSB + 0x00000) +#define ROMUSB_ROM (QLCNIC_CRB_ROMUSB + 0x10000) + +#define QLCNIC_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004) +#define QLCNIC_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008) +#define QLCNIC_ROMUSB_GLB_PAD_GPIO_I (ROMUSB_GLB + 0x000c) +#define QLCNIC_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038) +#define QLCNIC_ROMUSB_GLB_TEST_MUX_SEL (ROMUSB_GLB + 0x0044) +#define QLCNIC_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c) +#define QLCNIC_ROMUSB_GLB_CHIP_CLK_CTRL (ROMUSB_GLB + 0x00A8) + +#define QLCNIC_ROMUSB_GPIO(n) (ROMUSB_GLB + 0x60 + (4 * (n))) + +#define QLCNIC_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004) +#define QLCNIC_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008) +#define QLCNIC_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c) +#define QLCNIC_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010) +#define QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014) +#define QLCNIC_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018) + +/* Lock IDs for ROM lock */ +#define ROM_LOCK_DRIVER 0x0d417340 + +/****************************************************************************** +* +* Definitions specific to M25P flash +* +******************************************************************************* +*/ + +/* all are 1MB windows */ + +#define QLCNIC_PCI_CRB_WINDOWSIZE 0x00100000 +#define QLCNIC_PCI_CRB_WINDOW(A) \ + (QLCNIC_PCI_CRBSPACE + (A)*QLCNIC_PCI_CRB_WINDOWSIZE) + +#define QLCNIC_CRB_NIU QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_NIU) +#define QLCNIC_CRB_SRE QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SRE) +#define QLCNIC_CRB_ROMUSB \ + QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_ROMUSB) +#define QLCNIC_CRB_I2Q QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2Q) +#define QLCNIC_CRB_I2C0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2C0) +#define QLCNIC_CRB_SMB QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SMB) +#define QLCNIC_CRB_MAX QLCNIC_PCI_CRB_WINDOW(64) + +#define QLCNIC_CRB_PCIX_HOST QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH) +#define QLCNIC_CRB_PCIX_HOST2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH2) +#define QLCNIC_CRB_PEG_NET_0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN0) +#define QLCNIC_CRB_PEG_NET_1 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN1) +#define QLCNIC_CRB_PEG_NET_2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN2) +#define QLCNIC_CRB_PEG_NET_3 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN3) +#define QLCNIC_CRB_PEG_NET_4 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SQS2) +#define QLCNIC_CRB_PEG_NET_D QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGND) +#define QLCNIC_CRB_PEG_NET_I QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGNI) +#define QLCNIC_CRB_DDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_MN) +#define QLCNIC_CRB_QDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SN) + +#define QLCNIC_CRB_PCIX_MD QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PS) +#define QLCNIC_CRB_PCIE QLCNIC_CRB_PCIX_MD + +#define ISR_INT_VECTOR (QLCNIC_PCIX_PS_REG(PCIX_INT_VECTOR)) +#define ISR_INT_MASK (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK)) +#define ISR_INT_MASK_SLOW (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK)) +#define ISR_INT_TARGET_STATUS (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS)) +#define ISR_INT_TARGET_MASK (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK)) +#define ISR_INT_TARGET_STATUS_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F1)) +#define ISR_INT_TARGET_MASK_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F1)) +#define ISR_INT_TARGET_STATUS_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F2)) +#define ISR_INT_TARGET_MASK_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F2)) +#define ISR_INT_TARGET_STATUS_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F3)) +#define ISR_INT_TARGET_MASK_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F3)) +#define ISR_INT_TARGET_STATUS_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F4)) +#define ISR_INT_TARGET_MASK_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F4)) +#define ISR_INT_TARGET_STATUS_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F5)) +#define ISR_INT_TARGET_MASK_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F5)) +#define ISR_INT_TARGET_STATUS_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F6)) +#define ISR_INT_TARGET_MASK_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F6)) +#define ISR_INT_TARGET_STATUS_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F7)) +#define ISR_INT_TARGET_MASK_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F7)) + +#define QLCNIC_PCI_MN_2M (0) +#define QLCNIC_PCI_MS_2M (0x80000) +#define QLCNIC_PCI_OCM0_2M (0x000c0000UL) +#define QLCNIC_PCI_CRBSPACE (0x06000000UL) +#define QLCNIC_PCI_CAMQM (0x04800000UL) +#define QLCNIC_PCI_CAMQM_END (0x04800800UL) +#define QLCNIC_PCI_2MB_SIZE (0x00200000UL) +#define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL) + +#define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM) + +#define QLCNIC_ADDR_DDR_NET (0x0000000000000000ULL) +#define QLCNIC_ADDR_DDR_NET_MAX (0x000000000fffffffULL) +#define QLCNIC_ADDR_OCM0 (0x0000000200000000ULL) +#define QLCNIC_ADDR_OCM0_MAX (0x00000002000fffffULL) +#define QLCNIC_ADDR_OCM1 (0x0000000200400000ULL) +#define QLCNIC_ADDR_OCM1_MAX (0x00000002004fffffULL) +#define QLCNIC_ADDR_QDR_NET (0x0000000300000000ULL) +#define QLCNIC_ADDR_QDR_NET_MAX (0x0000000307ffffffULL) + +/* + * Register offsets for MN + */ +#define QLCNIC_MIU_CONTROL (0x000) +#define QLCNIC_MIU_MN_CONTROL (QLCNIC_CRB_DDR_NET+QLCNIC_MIU_CONTROL) + +/* 200ms delay in each loop */ +#define QLCNIC_NIU_PHY_WAITLEN 200000 +/* 10 seconds before we give up */ +#define QLCNIC_NIU_PHY_WAITMAX 50 +#define QLCNIC_NIU_MAX_GBE_PORTS 4 +#define QLCNIC_NIU_MAX_XG_PORTS 2 + +#define QLCNIC_NIU_MODE (QLCNIC_CRB_NIU + 0x00000) +#define QLCNIC_NIU_GB_PAUSE_CTL (QLCNIC_CRB_NIU + 0x0030c) +#define QLCNIC_NIU_XG_PAUSE_CTL (QLCNIC_CRB_NIU + 0x00098) + +#define QLCNIC_NIU_GB_MAC_CONFIG_0(I) \ + (QLCNIC_CRB_NIU + 0x30000 + (I)*0x10000) +#define QLCNIC_NIU_GB_MAC_CONFIG_1(I) \ + (QLCNIC_CRB_NIU + 0x30004 + (I)*0x10000) + + +#define TEST_AGT_CTRL (0x00) + +#define TA_CTL_START BIT_0 +#define TA_CTL_ENABLE BIT_1 +#define TA_CTL_WRITE BIT_2 +#define TA_CTL_BUSY BIT_3 + +/* + * Register offsets for MN + */ +#define MIU_TEST_AGT_BASE (0x90) + +#define MIU_TEST_AGT_ADDR_LO (0x04) +#define MIU_TEST_AGT_ADDR_HI (0x08) +#define MIU_TEST_AGT_WRDATA_LO (0x10) +#define MIU_TEST_AGT_WRDATA_HI (0x14) +#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x20) +#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x24) +#define MIU_TEST_AGT_WRDATA(i) (0x10+(0x10*((i)>>1))+(4*((i)&1))) +#define MIU_TEST_AGT_RDDATA_LO (0x18) +#define MIU_TEST_AGT_RDDATA_HI (0x1c) +#define MIU_TEST_AGT_RDDATA_UPPER_LO (0x28) +#define MIU_TEST_AGT_RDDATA_UPPER_HI (0x2c) +#define MIU_TEST_AGT_RDDATA(i) (0x18+(0x10*((i)>>1))+(4*((i)&1))) + +#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8 +#define MIU_TEST_AGT_UPPER_ADDR(off) (0) + +/* + * Register offsets for MS + */ +#define SIU_TEST_AGT_BASE (0x60) + +#define SIU_TEST_AGT_ADDR_LO (0x04) +#define SIU_TEST_AGT_ADDR_HI (0x18) +#define SIU_TEST_AGT_WRDATA_LO (0x08) +#define SIU_TEST_AGT_WRDATA_HI (0x0c) +#define SIU_TEST_AGT_WRDATA(i) (0x08+(4*(i))) +#define SIU_TEST_AGT_RDDATA_LO (0x10) +#define SIU_TEST_AGT_RDDATA_HI (0x14) +#define SIU_TEST_AGT_RDDATA(i) (0x10+(4*(i))) + +#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8 +#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22) + +/* XG Link status */ +#define XG_LINK_UP 0x10 +#define XG_LINK_DOWN 0x20 + +#define XG_LINK_UP_P3P 0x01 +#define XG_LINK_DOWN_P3P 0x02 +#define XG_LINK_STATE_P3P_MASK 0xf +#define XG_LINK_STATE_P3P(pcifn, val) \ + (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3P_MASK) + +#define P3P_LINK_SPEED_MHZ 100 +#define P3P_LINK_SPEED_MASK 0xff +#define P3P_LINK_SPEED_REG(pcifn) \ + (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4)) +#define P3P_LINK_SPEED_VAL(pcifn, reg) \ + (((reg) >> (8 * ((pcifn) & 0x3))) & P3P_LINK_SPEED_MASK) + +#define QLCNIC_CAM_RAM_BASE (QLCNIC_CRB_CAM + 0x02000) +#define QLCNIC_CAM_RAM(reg) (QLCNIC_CAM_RAM_BASE + (reg)) +#define QLCNIC_FW_VERSION_MAJOR (QLCNIC_CAM_RAM(0x150)) +#define QLCNIC_FW_VERSION_MINOR (QLCNIC_CAM_RAM(0x154)) +#define QLCNIC_FW_VERSION_SUB (QLCNIC_CAM_RAM(0x158)) +#define QLCNIC_ROM_LOCK_ID (QLCNIC_CAM_RAM(0x100)) +#define QLCNIC_PHY_LOCK_ID (QLCNIC_CAM_RAM(0x120)) +#define QLCNIC_CRB_WIN_LOCK_ID (QLCNIC_CAM_RAM(0x124)) + +#define NIC_CRB_BASE (QLCNIC_CAM_RAM(0x200)) +#define NIC_CRB_BASE_2 (QLCNIC_CAM_RAM(0x700)) +#define QLCNIC_REG(X) (NIC_CRB_BASE+(X)) +#define QLCNIC_REG_2(X) (NIC_CRB_BASE_2+(X)) + +#define QLCNIC_CDRP_CRB_OFFSET (QLCNIC_REG(0x18)) +#define QLCNIC_ARG1_CRB_OFFSET (QLCNIC_REG(0x1c)) +#define QLCNIC_ARG2_CRB_OFFSET (QLCNIC_REG(0x20)) +#define QLCNIC_ARG3_CRB_OFFSET (QLCNIC_REG(0x24)) +#define QLCNIC_SIGN_CRB_OFFSET (QLCNIC_REG(0x28)) + +#define CRB_CMDPEG_STATE (QLCNIC_REG(0x50)) +#define CRB_RCVPEG_STATE (QLCNIC_REG(0x13c)) + +#define CRB_XG_STATE_P3P (QLCNIC_REG(0x98)) +#define CRB_PF_LINK_SPEED_1 (QLCNIC_REG(0xe8)) +#define CRB_PF_LINK_SPEED_2 (QLCNIC_REG(0xec)) + +#define CRB_TEMP_STATE (QLCNIC_REG(0x1b4)) + +#define CRB_V2P_0 (QLCNIC_REG(0x290)) +#define CRB_V2P(port) (CRB_V2P_0+((port)*4)) +#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0)) + +#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128)) +#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0)) + +/* + * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address + * which can be read by the Phantom host to get producer/consumer indexes from + * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following + * registers will be used for the addresses of the ring's shared memory + * on the Phantom. + */ + +#define qlcnic_get_temp_val(x) ((x) >> 16) +#define qlcnic_get_temp_state(x) ((x) & 0xffff) +#define qlcnic_encode_temp(val, state) (((val) << 16) | (state)) + +/* + * Temperature control. + */ +enum { + QLCNIC_TEMP_NORMAL = 0x1, /* Normal operating range */ + QLCNIC_TEMP_WARN, /* Sound alert, temperature getting high */ + QLCNIC_TEMP_PANIC /* Fatal error, hardware has shut down. */ +}; + +/* Lock IDs for PHY lock */ +#define PHY_LOCK_DRIVER 0x44524956 + +/* Used for PS PCI Memory access */ +#define PCIX_PS_OP_ADDR_LO (0x10000) +/* via CRB (PS side only) */ +#define PCIX_PS_OP_ADDR_HI (0x10004) + +#define PCIX_INT_VECTOR (0x10100) +#define PCIX_INT_MASK (0x10104) + +#define PCIX_OCM_WINDOW (0x10800) +#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x4 * (func)) + +#define PCIX_TARGET_STATUS (0x10118) +#define PCIX_TARGET_STATUS_F1 (0x10160) +#define PCIX_TARGET_STATUS_F2 (0x10164) +#define PCIX_TARGET_STATUS_F3 (0x10168) +#define PCIX_TARGET_STATUS_F4 (0x10360) +#define PCIX_TARGET_STATUS_F5 (0x10364) +#define PCIX_TARGET_STATUS_F6 (0x10368) +#define PCIX_TARGET_STATUS_F7 (0x1036c) + +#define PCIX_TARGET_MASK (0x10128) +#define PCIX_TARGET_MASK_F1 (0x10170) +#define PCIX_TARGET_MASK_F2 (0x10174) +#define PCIX_TARGET_MASK_F3 (0x10178) +#define PCIX_TARGET_MASK_F4 (0x10370) +#define PCIX_TARGET_MASK_F5 (0x10374) +#define PCIX_TARGET_MASK_F6 (0x10378) +#define PCIX_TARGET_MASK_F7 (0x1037c) + +#define PCIX_MSI_F(i) (0x13000+((i)*4)) + +#define QLCNIC_PCIX_PH_REG(reg) (QLCNIC_CRB_PCIE + (reg)) +#define QLCNIC_PCIX_PS_REG(reg) (QLCNIC_CRB_PCIX_MD + (reg)) +#define QLCNIC_PCIE_REG(reg) (QLCNIC_CRB_PCIE + (reg)) + +#define PCIE_SEM0_LOCK (0x1c000) +#define PCIE_SEM0_UNLOCK (0x1c004) +#define PCIE_SEM_LOCK(N) (PCIE_SEM0_LOCK + 8*(N)) +#define PCIE_SEM_UNLOCK(N) (PCIE_SEM0_UNLOCK + 8*(N)) + +#define PCIE_SETUP_FUNCTION (0x12040) +#define PCIE_SETUP_FUNCTION2 (0x12048) +#define PCIE_MISCCFG_RC (0x1206c) +#define PCIE_TGT_SPLIT_CHICKEN (0x12080) +#define PCIE_CHICKEN3 (0x120c8) + +#define ISR_INT_STATE_REG (QLCNIC_PCIX_PS_REG(PCIE_MISCCFG_RC)) +#define PCIE_MAX_MASTER_SPLIT (0x14048) + +#define QLCNIC_PORT_MODE_NONE 0 +#define QLCNIC_PORT_MODE_XG 1 +#define QLCNIC_PORT_MODE_GB 2 +#define QLCNIC_PORT_MODE_802_3_AP 3 +#define QLCNIC_PORT_MODE_AUTO_NEG 4 +#define QLCNIC_PORT_MODE_AUTO_NEG_1G 5 +#define QLCNIC_PORT_MODE_AUTO_NEG_XG 6 +#define QLCNIC_PORT_MODE_ADDR (QLCNIC_CAM_RAM(0x24)) +#define QLCNIC_WOL_PORT_MODE (QLCNIC_CAM_RAM(0x198)) + +#define QLCNIC_WOL_CONFIG_NV (QLCNIC_CAM_RAM(0x184)) +#define QLCNIC_WOL_CONFIG (QLCNIC_CAM_RAM(0x188)) + +#define QLCNIC_PEG_TUNE_MN_PRESENT 0x1 +#define QLCNIC_PEG_TUNE_CAPABILITY (QLCNIC_CAM_RAM(0x02c)) + +#define QLCNIC_DMA_WATCHDOG_CTRL (QLCNIC_CAM_RAM(0x14)) +#define QLCNIC_PEG_ALIVE_COUNTER (QLCNIC_CAM_RAM(0xb0)) +#define QLCNIC_PEG_HALT_STATUS1 (QLCNIC_CAM_RAM(0xa8)) +#define QLCNIC_PEG_HALT_STATUS2 (QLCNIC_CAM_RAM(0xac)) +#define QLCNIC_CRB_DRV_ACTIVE (QLCNIC_CAM_RAM(0x138)) +#define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140)) + +#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144)) +#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148)) +#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c)) +#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x174)) +#define QLCNIC_CRB_DEV_NPAR_STATE (QLCNIC_CAM_RAM(0x19c)) +#define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c) +#define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860) + +/* Device State */ +#define QLCNIC_DEV_COLD 0x1 +#define QLCNIC_DEV_INITIALIZING 0x2 +#define QLCNIC_DEV_READY 0x3 +#define QLCNIC_DEV_NEED_RESET 0x4 +#define QLCNIC_DEV_NEED_QUISCENT 0x5 +#define QLCNIC_DEV_FAILED 0x6 +#define QLCNIC_DEV_QUISCENT 0x7 + +#define QLCNIC_DEV_NPAR_NON_OPER 0 /* NON Operational */ +#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */ +#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */ + +#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) & (1 << (FN * 4))) +#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4))) +#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4))) +#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4))) +#define QLC_DEV_SET_QSCNT_RDY(VAL, FN) ((VAL) |= (2 << (FN * 4))) +#define QLC_DEV_CLR_RST_QSCNT(VAL, FN) ((VAL) &= ~(3 << (FN * 4))) + +#define QLC_DEV_GET_DRV(VAL, FN) (0xf & ((VAL) >> (FN * 4))) +#define QLC_DEV_SET_DRV(VAL, FN) ((VAL) << (FN * 4)) + +#define QLCNIC_TYPE_NIC 1 +#define QLCNIC_TYPE_FCOE 2 +#define QLCNIC_TYPE_ISCSI 3 + +#define QLCNIC_RCODE_DRIVER_INFO 0x20000000 +#define QLCNIC_RCODE_DRIVER_CAN_RELOAD BIT_30 +#define QLCNIC_RCODE_FATAL_ERROR BIT_31 +#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff) +#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0xfffff) + +#define FW_POLL_DELAY (1 * HZ) +#define FW_FAIL_THRESH 2 + +#define QLCNIC_RESET_TIMEOUT_SECS 10 +#define QLCNIC_INIT_TIMEOUT_SECS 30 +#define QLCNIC_RCVPEG_CHECK_RETRY_COUNT 2000 +#define QLCNIC_RCVPEG_CHECK_DELAY 10 +#define QLCNIC_CMDPEG_CHECK_RETRY_COUNT 60 +#define QLCNIC_CMDPEG_CHECK_DELAY 500 +#define QLCNIC_HEARTBEAT_PERIOD_MSECS 200 +#define QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT 45 + +#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC))) +#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200) + +/* + * PCI Interrupt Vector Values. + */ +#define PCIX_INT_VECTOR_BIT_F0 0x0080 +#define PCIX_INT_VECTOR_BIT_F1 0x0100 +#define PCIX_INT_VECTOR_BIT_F2 0x0200 +#define PCIX_INT_VECTOR_BIT_F3 0x0400 +#define PCIX_INT_VECTOR_BIT_F4 0x0800 +#define PCIX_INT_VECTOR_BIT_F5 0x1000 +#define PCIX_INT_VECTOR_BIT_F6 0x2000 +#define PCIX_INT_VECTOR_BIT_F7 0x4000 + +struct qlcnic_legacy_intr_set { + u32 int_vec_bit; + u32 tgt_status_reg; + u32 tgt_mask_reg; + u32 pci_int_reg; +}; + +#define QLCNIC_FW_API 0x1b216c +#define QLCNIC_DRV_OP_MODE 0x1b2170 +#define QLCNIC_MSIX_BASE 0x132110 +#define QLCNIC_MAX_PCI_FUNC 8 +#define QLCNIC_MAX_VLAN_FILTERS 64 + +/* FW dump defines */ +#define MIU_TEST_CTR 0x41000090 +#define MIU_TEST_ADDR_LO 0x41000094 +#define MIU_TEST_ADDR_HI 0x41000098 +#define FLASH_ROM_WINDOW 0x42110030 +#define FLASH_ROM_DATA 0x42150000 + +static const u32 MIU_TEST_READ_DATA[] = { + 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC, }; + +#define QLCNIC_FW_DUMP_REG1 0x00130060 +#define QLCNIC_FW_DUMP_REG2 0x001e0000 +#define QLCNIC_FLASH_SEM2_LK 0x0013C010 +#define QLCNIC_FLASH_SEM2_ULK 0x0013C014 +#define QLCNIC_FLASH_LOCK_ID 0x001B2100 + +#define QLCNIC_RD_DUMP_REG(addr, bar0, data) do { \ + writel((addr & 0xFFFF0000), (void *) (bar0 + \ + QLCNIC_FW_DUMP_REG1)); \ + readl((void *) (bar0 + QLCNIC_FW_DUMP_REG1)); \ + *data = readl((void *) (bar0 + QLCNIC_FW_DUMP_REG2 + \ + LSW(addr))); \ +} while (0) + +#define QLCNIC_WR_DUMP_REG(addr, bar0, data) do { \ + writel((addr & 0xFFFF0000), (void *) (bar0 + \ + QLCNIC_FW_DUMP_REG1)); \ + readl((void *) (bar0 + QLCNIC_FW_DUMP_REG1)); \ + writel(data, (void *) (bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr)));\ + readl((void *) (bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr))); \ +} while (0) + +/* PCI function operational mode */ +enum { + QLCNIC_MGMT_FUNC = 0, + QLCNIC_PRIV_FUNC = 1, + QLCNIC_NON_PRIV_FUNC = 2 +}; + +enum { + QLCNIC_PORT_DEFAULTS = 0, + QLCNIC_ADD_VLAN = 1, + QLCNIC_DEL_VLAN = 2 +}; + +#define QLC_DEV_DRV_DEFAULT 0x11111111 + +#define LSB(x) ((uint8_t)(x)) +#define MSB(x) ((uint8_t)((uint16_t)(x) >> 8)) + +#define LSW(x) ((uint16_t)((uint32_t)(x))) +#define MSW(x) ((uint16_t)((uint32_t)(x) >> 16)) + +#define LSD(x) ((uint32_t)((uint64_t)(x))) +#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16)) + +#define QLCNIC_LEGACY_INTR_CONFIG \ +{ \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \ +} + +/* NIU REGS */ + +#define _qlcnic_crb_get_bit(var, bit) ((var >> bit) & 0x1) + +/* + * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3) + * + * Bit 0 : enable_tx => 1:enable frame xmit, 0:disable + * Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream + * Bit 2 : enable_rx => 1:enable frame recv, 0:disable + * Bit 3 : rx_synced => R/O: recv enable synched to recv stream + * Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable + * Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore + * Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal + * Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op + * Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op + * Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op + * Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op + * Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op + */ +#define qlcnic_gb_rx_flowctl(config_word) \ + ((config_word) |= 1 << 5) +#define qlcnic_gb_get_rx_flowctl(config_word) \ + _qlcnic_crb_get_bit((config_word), 5) +#define qlcnic_gb_unset_rx_flowctl(config_word) \ + ((config_word) &= ~(1 << 5)) + +/* + * NIU GB Pause Ctl Register + */ + +#define qlcnic_gb_set_gb0_mask(config_word) \ + ((config_word) |= 1 << 0) +#define qlcnic_gb_set_gb1_mask(config_word) \ + ((config_word) |= 1 << 2) +#define qlcnic_gb_set_gb2_mask(config_word) \ + ((config_word) |= 1 << 4) +#define qlcnic_gb_set_gb3_mask(config_word) \ + ((config_word) |= 1 << 6) + +#define qlcnic_gb_get_gb0_mask(config_word) \ + _qlcnic_crb_get_bit((config_word), 0) +#define qlcnic_gb_get_gb1_mask(config_word) \ + _qlcnic_crb_get_bit((config_word), 2) +#define qlcnic_gb_get_gb2_mask(config_word) \ + _qlcnic_crb_get_bit((config_word), 4) +#define qlcnic_gb_get_gb3_mask(config_word) \ + _qlcnic_crb_get_bit((config_word), 6) + +#define qlcnic_gb_unset_gb0_mask(config_word) \ + ((config_word) &= ~(1 << 0)) +#define qlcnic_gb_unset_gb1_mask(config_word) \ + ((config_word) &= ~(1 << 2)) +#define qlcnic_gb_unset_gb2_mask(config_word) \ + ((config_word) &= ~(1 << 4)) +#define qlcnic_gb_unset_gb3_mask(config_word) \ + ((config_word) &= ~(1 << 6)) + +/* + * NIU XG Pause Ctl Register + * + * Bit 0 : xg0_mask => 1:disable tx pause frames + * Bit 1 : xg0_request => 1:request single pause frame + * Bit 2 : xg0_on_off => 1:request is pause on, 0:off + * Bit 3 : xg1_mask => 1:disable tx pause frames + * Bit 4 : xg1_request => 1:request single pause frame + * Bit 5 : xg1_on_off => 1:request is pause on, 0:off + */ + +#define qlcnic_xg_set_xg0_mask(config_word) \ + ((config_word) |= 1 << 0) +#define qlcnic_xg_set_xg1_mask(config_word) \ + ((config_word) |= 1 << 3) + +#define qlcnic_xg_get_xg0_mask(config_word) \ + _qlcnic_crb_get_bit((config_word), 0) +#define qlcnic_xg_get_xg1_mask(config_word) \ + _qlcnic_crb_get_bit((config_word), 3) + +#define qlcnic_xg_unset_xg0_mask(config_word) \ + ((config_word) &= ~(1 << 0)) +#define qlcnic_xg_unset_xg1_mask(config_word) \ + ((config_word) &= ~(1 << 3)) + +/* + * NIU XG Pause Ctl Register + * + * Bit 0 : xg0_mask => 1:disable tx pause frames + * Bit 1 : xg0_request => 1:request single pause frame + * Bit 2 : xg0_on_off => 1:request is pause on, 0:off + * Bit 3 : xg1_mask => 1:disable tx pause frames + * Bit 4 : xg1_request => 1:request single pause frame + * Bit 5 : xg1_on_off => 1:request is pause on, 0:off + */ + +/* + * PHY-Specific MII control/status registers. + */ +#define QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG 4 +#define QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS 17 + +/* + * PHY-Specific Status Register (reg 17). + * + * Bit 0 : jabber => 1:jabber detected, 0:not + * Bit 1 : polarity => 1:polarity reversed, 0:normal + * Bit 2 : recvpause => 1:receive pause enabled, 0:disabled + * Bit 3 : xmitpause => 1:transmit pause enabled, 0:disabled + * Bit 4 : energydetect => 1:sleep, 0:active + * Bit 5 : downshift => 1:downshift, 0:no downshift + * Bit 6 : crossover => 1:MDIX (crossover), 0:MDI (no crossover) + * Bits 7-9 : cablelen => not valid in 10Mb/s mode + * 0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m + * Bit 10 : link => 1:link up, 0:link down + * Bit 11 : resolved => 1:speed and duplex resolved, 0:not yet + * Bit 12 : pagercvd => 1:page received, 0:page not received + * Bit 13 : duplex => 1:full duplex, 0:half duplex + * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd + */ + +#define qlcnic_get_phy_speed(config_word) (((config_word) >> 14) & 0x03) + +#define qlcnic_set_phy_speed(config_word, val) \ + ((config_word) |= ((val & 0x03) << 14)) +#define qlcnic_set_phy_duplex(config_word) \ + ((config_word) |= 1 << 13) +#define qlcnic_clear_phy_duplex(config_word) \ + ((config_word) &= ~(1 << 13)) + +#define qlcnic_get_phy_link(config_word) \ + _qlcnic_crb_get_bit(config_word, 10) +#define qlcnic_get_phy_duplex(config_word) \ + _qlcnic_crb_get_bit(config_word, 13) + +#define QLCNIC_NIU_NON_PROMISC_MODE 0 +#define QLCNIC_NIU_PROMISC_MODE 1 +#define QLCNIC_NIU_ALLMULTI_MODE 2 + +struct crb_128M_2M_sub_block_map { + unsigned valid; + unsigned start_128M; + unsigned end_128M; + unsigned start_2M; +}; + +struct crb_128M_2M_block_map{ + struct crb_128M_2M_sub_block_map sub_block[16]; +}; +#endif /* __QLCNIC_HDR_H_ */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c new file mode 100644 index 0000000..74e9d7b --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -0,0 +1,1787 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2010 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#include "qlcnic.h" + +#include +#include +#include + +#define MASK(n) ((1ULL<<(n))-1) +#define OCM_WIN_P3P(addr) (addr & 0xffc0000) + +#define GET_MEM_OFFS_2M(addr) (addr & MASK(18)) + +#define CRB_BLK(off) ((off >> 20) & 0x3f) +#define CRB_SUBBLK(off) ((off >> 16) & 0xf) +#define CRB_WINDOW_2M (0x130060) +#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000)) +#define CRB_INDIRECT_2M (0x1e0000UL) + + +#ifndef readq +static inline u64 readq(void __iomem *addr) +{ + return readl(addr) | (((u64) readl(addr + 4)) << 32LL); +} +#endif + +#ifndef writeq +static inline void writeq(u64 val, void __iomem *addr) +{ + writel(((u32) (val)), (addr)); + writel(((u32) (val >> 32)), (addr + 4)); +} +#endif + +static const struct crb_128M_2M_block_map +crb_128M_2M_map[64] __cacheline_aligned_in_smp = { + {{{0, 0, 0, 0} } }, /* 0: PCI */ + {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */ + {1, 0x0110000, 0x0120000, 0x130000}, + {1, 0x0120000, 0x0122000, 0x124000}, + {1, 0x0130000, 0x0132000, 0x126000}, + {1, 0x0140000, 0x0142000, 0x128000}, + {1, 0x0150000, 0x0152000, 0x12a000}, + {1, 0x0160000, 0x0170000, 0x110000}, + {1, 0x0170000, 0x0172000, 0x12e000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x01e0000, 0x01e0800, 0x122000}, + {0, 0x0000000, 0x0000000, 0x000000} } }, + {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */ + {{{0, 0, 0, 0} } }, /* 3: */ + {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */ + {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */ + {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */ + {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */ + {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */ + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x08f0000, 0x08f2000, 0x172000} } }, + {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/ + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x09f0000, 0x09f2000, 0x176000} } }, + {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/ + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x0af0000, 0x0af2000, 0x17a000} } }, + {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/ + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x0bf0000, 0x0bf2000, 0x17e000} } }, + {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */ + {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */ + {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */ + {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */ + {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */ + {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */ + {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */ + {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */ + {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */ + {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */ + {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */ + {{{0, 0, 0, 0} } }, /* 23: */ + {{{0, 0, 0, 0} } }, /* 24: */ + {{{0, 0, 0, 0} } }, /* 25: */ + {{{0, 0, 0, 0} } }, /* 26: */ + {{{0, 0, 0, 0} } }, /* 27: */ + {{{0, 0, 0, 0} } }, /* 28: */ + {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */ + {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */ + {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */ + {{{0} } }, /* 32: PCI */ + {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */ + {1, 0x2110000, 0x2120000, 0x130000}, + {1, 0x2120000, 0x2122000, 0x124000}, + {1, 0x2130000, 0x2132000, 0x126000}, + {1, 0x2140000, 0x2142000, 0x128000}, + {1, 0x2150000, 0x2152000, 0x12a000}, + {1, 0x2160000, 0x2170000, 0x110000}, + {1, 0x2170000, 0x2172000, 0x12e000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000} } }, + {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */ + {{{0} } }, /* 35: */ + {{{0} } }, /* 36: */ + {{{0} } }, /* 37: */ + {{{0} } }, /* 38: */ + {{{0} } }, /* 39: */ + {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */ + {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */ + {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */ + {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */ + {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */ + {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */ + {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */ + {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */ + {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */ + {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */ + {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */ + {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */ + {{{0} } }, /* 52: */ + {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */ + {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */ + {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */ + {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */ + {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */ + {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */ + {{{0} } }, /* 59: I2C0 */ + {{{0} } }, /* 60: I2C1 */ + {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */ + {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */ + {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */ +}; + +/* + * top 12 bits of crb internal address (hub, agent) + */ +static const unsigned crb_hub_agt[64] = { + 0, + QLCNIC_HW_CRB_HUB_AGT_ADR_PS, + QLCNIC_HW_CRB_HUB_AGT_ADR_MN, + QLCNIC_HW_CRB_HUB_AGT_ADR_MS, + 0, + QLCNIC_HW_CRB_HUB_AGT_ADR_SRE, + QLCNIC_HW_CRB_HUB_AGT_ADR_NIU, + QLCNIC_HW_CRB_HUB_AGT_ADR_QMN, + QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0, + QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1, + QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2, + QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3, + QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q, + QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR, + QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4, + QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGND, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3, + 0, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI, + QLCNIC_HW_CRB_HUB_AGT_ADR_SN, + 0, + QLCNIC_HW_CRB_HUB_AGT_ADR_EG, + 0, + QLCNIC_HW_CRB_HUB_AGT_ADR_PS, + QLCNIC_HW_CRB_HUB_AGT_ADR_CAM, + 0, + 0, + 0, + 0, + 0, + QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR, + 0, + QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1, + QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2, + QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3, + QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4, + QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5, + QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6, + QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7, + QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA, + QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q, + QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB, + 0, + QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0, + QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8, + QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9, + QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0, + 0, + QLCNIC_HW_CRB_HUB_AGT_ADR_SMB, + QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0, + QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1, + 0, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC, + 0, +}; + +/* PCI Windowing for DDR regions. */ + +#define QLCNIC_PCIE_SEM_TIMEOUT 10000 + +int +qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg) +{ + int done = 0, timeout = 0; + + while (!done) { + done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem))); + if (done == 1) + break; + if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) { + dev_err(&adapter->pdev->dev, + "Failed to acquire sem=%d lock; holdby=%d\n", + sem, id_reg ? QLCRD32(adapter, id_reg) : -1); + return -EIO; + } + msleep(1); + } + + if (id_reg) + QLCWR32(adapter, id_reg, adapter->portnum); + + return 0; +} + +void +qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem) +{ + QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem))); +} + +static int +qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter, + struct cmd_desc_type0 *cmd_desc_arr, int nr_desc) +{ + u32 i, producer, consumer; + struct qlcnic_cmd_buffer *pbuf; + struct cmd_desc_type0 *cmd_desc; + struct qlcnic_host_tx_ring *tx_ring; + + i = 0; + + if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) + return -EIO; + + tx_ring = adapter->tx_ring; + __netif_tx_lock_bh(tx_ring->txq); + + producer = tx_ring->producer; + consumer = tx_ring->sw_consumer; + + if (nr_desc >= qlcnic_tx_avail(tx_ring)) { + netif_tx_stop_queue(tx_ring->txq); + smp_mb(); + if (qlcnic_tx_avail(tx_ring) > nr_desc) { + if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) + netif_tx_wake_queue(tx_ring->txq); + } else { + adapter->stats.xmit_off++; + __netif_tx_unlock_bh(tx_ring->txq); + return -EBUSY; + } + } + + do { + cmd_desc = &cmd_desc_arr[i]; + + pbuf = &tx_ring->cmd_buf_arr[producer]; + pbuf->skb = NULL; + pbuf->frag_count = 0; + + memcpy(&tx_ring->desc_head[producer], + &cmd_desc_arr[i], sizeof(struct cmd_desc_type0)); + + producer = get_next_index(producer, tx_ring->num_desc); + i++; + + } while (i != nr_desc); + + tx_ring->producer = producer; + + qlcnic_update_cmd_producer(adapter, tx_ring); + + __netif_tx_unlock_bh(tx_ring->txq); + + return 0; +} + +static int +qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr, + __le16 vlan_id, unsigned op) +{ + struct qlcnic_nic_req req; + struct qlcnic_mac_req *mac_req; + struct qlcnic_vlan_req *vlan_req; + u64 word; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + req.qhdr = cpu_to_le64(QLCNIC_REQUEST << 23); + + word = QLCNIC_MAC_EVENT | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + mac_req = (struct qlcnic_mac_req *)&req.words[0]; + mac_req->op = op; + memcpy(mac_req->mac_addr, addr, 6); + + vlan_req = (struct qlcnic_vlan_req *)&req.words[1]; + vlan_req->vlan_id = vlan_id; + + return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); +} + +static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr) +{ + struct list_head *head; + struct qlcnic_mac_list_s *cur; + + /* look up if already exists */ + list_for_each(head, &adapter->mac_list) { + cur = list_entry(head, struct qlcnic_mac_list_s, list); + if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) + return 0; + } + + cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC); + if (cur == NULL) { + dev_err(&adapter->netdev->dev, + "failed to add mac address filter\n"); + return -ENOMEM; + } + memcpy(cur->mac_addr, addr, ETH_ALEN); + + if (qlcnic_sre_macaddr_change(adapter, + cur->mac_addr, 0, QLCNIC_MAC_ADD)) { + kfree(cur); + return -EIO; + } + + list_add_tail(&cur->list, &adapter->mac_list); + return 0; +} + +void qlcnic_set_multi(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct netdev_hw_addr *ha; + static const u8 bcast_addr[ETH_ALEN] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + }; + u32 mode = VPORT_MISS_MODE_DROP; + + if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) + return; + + qlcnic_nic_add_mac(adapter, adapter->mac_addr); + qlcnic_nic_add_mac(adapter, bcast_addr); + + if (netdev->flags & IFF_PROMISC) { + if (!(adapter->flags & QLCNIC_PROMISC_DISABLED)) + mode = VPORT_MISS_MODE_ACCEPT_ALL; + goto send_fw_cmd; + } + + if ((netdev->flags & IFF_ALLMULTI) || + (netdev_mc_count(netdev) > adapter->max_mc_count)) { + mode = VPORT_MISS_MODE_ACCEPT_MULTI; + goto send_fw_cmd; + } + + if (!netdev_mc_empty(netdev)) { + netdev_for_each_mc_addr(ha, netdev) { + qlcnic_nic_add_mac(adapter, ha->addr); + } + } + +send_fw_cmd: + if (mode == VPORT_MISS_MODE_ACCEPT_ALL) { + qlcnic_alloc_lb_filters_mem(adapter); + adapter->mac_learn = 1; + } else { + adapter->mac_learn = 0; + } + + qlcnic_nic_set_promisc(adapter, mode); +} + +int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode) +{ + struct qlcnic_nic_req req; + u64 word; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + + req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); + + word = QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE | + ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + req.words[0] = cpu_to_le64(mode); + + return qlcnic_send_cmd_descs(adapter, + (struct cmd_desc_type0 *)&req, 1); +} + +void qlcnic_free_mac_list(struct qlcnic_adapter *adapter) +{ + struct qlcnic_mac_list_s *cur; + struct list_head *head = &adapter->mac_list; + + while (!list_empty(head)) { + cur = list_entry(head->next, struct qlcnic_mac_list_s, list); + qlcnic_sre_macaddr_change(adapter, + cur->mac_addr, 0, QLCNIC_MAC_DEL); + list_del(&cur->list); + kfree(cur); + } +} + +void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter) +{ + struct qlcnic_filter *tmp_fil; + struct hlist_node *tmp_hnode, *n; + struct hlist_head *head; + int i; + + for (i = 0; i < adapter->fhash.fmax; i++) { + head = &(adapter->fhash.fhead[i]); + + hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) + { + if (jiffies > + (QLCNIC_FILTER_AGE * HZ + tmp_fil->ftime)) { + qlcnic_sre_macaddr_change(adapter, + tmp_fil->faddr, tmp_fil->vlan_id, + tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL : + QLCNIC_MAC_DEL); + spin_lock_bh(&adapter->mac_learn_lock); + adapter->fhash.fnum--; + hlist_del(&tmp_fil->fnode); + spin_unlock_bh(&adapter->mac_learn_lock); + kfree(tmp_fil); + } + } + } +} + +void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter) +{ + struct qlcnic_filter *tmp_fil; + struct hlist_node *tmp_hnode, *n; + struct hlist_head *head; + int i; + + for (i = 0; i < adapter->fhash.fmax; i++) { + head = &(adapter->fhash.fhead[i]); + + hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { + qlcnic_sre_macaddr_change(adapter, tmp_fil->faddr, + tmp_fil->vlan_id, tmp_fil->vlan_id ? + QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL); + spin_lock_bh(&adapter->mac_learn_lock); + adapter->fhash.fnum--; + hlist_del(&tmp_fil->fnode); + spin_unlock_bh(&adapter->mac_learn_lock); + kfree(tmp_fil); + } + } +} + +int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u8 flag) +{ + struct qlcnic_nic_req req; + int rv; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + + req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); + req.req_hdr = cpu_to_le64(QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK | + ((u64) adapter->portnum << 16) | ((u64) 0x1 << 32)); + + req.words[0] = cpu_to_le64(flag); + + rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) + dev_err(&adapter->pdev->dev, "%sting loopback mode failed\n", + flag ? "Set" : "Reset"); + return rv; +} + +int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) +{ + if (qlcnic_set_fw_loopback(adapter, mode)) + return -EIO; + + if (qlcnic_nic_set_promisc(adapter, VPORT_MISS_MODE_ACCEPT_ALL)) { + qlcnic_set_fw_loopback(adapter, mode); + return -EIO; + } + + msleep(1000); + return 0; +} + +void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter) +{ + int mode = VPORT_MISS_MODE_DROP; + struct net_device *netdev = adapter->netdev; + + qlcnic_set_fw_loopback(adapter, 0); + + if (netdev->flags & IFF_PROMISC) + mode = VPORT_MISS_MODE_ACCEPT_ALL; + else if (netdev->flags & IFF_ALLMULTI) + mode = VPORT_MISS_MODE_ACCEPT_MULTI; + + qlcnic_nic_set_promisc(adapter, mode); + msleep(1000); +} + +/* + * Send the interrupt coalescing parameter set by ethtool to the card. + */ +int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter) +{ + struct qlcnic_nic_req req; + int rv; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + + req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); + + req.req_hdr = cpu_to_le64(QLCNIC_CONFIG_INTR_COALESCE | + ((u64) adapter->portnum << 16)); + + req.words[0] = cpu_to_le64(((u64) adapter->ahw->coal.flag) << 32); + req.words[2] = cpu_to_le64(adapter->ahw->coal.rx_packets | + ((u64) adapter->ahw->coal.rx_time_us) << 16); + req.words[5] = cpu_to_le64(adapter->ahw->coal.timer_out | + ((u64) adapter->ahw->coal.type) << 32 | + ((u64) adapter->ahw->coal.sts_ring_mask) << 40); + rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) + dev_err(&adapter->netdev->dev, + "Could not send interrupt coalescing parameters\n"); + return rv; +} + +int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable) +{ + struct qlcnic_nic_req req; + u64 word; + int rv; + + if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) + return 0; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + + req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); + + word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + req.words[0] = cpu_to_le64(enable); + + rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) + dev_err(&adapter->netdev->dev, + "Could not send configure hw lro request\n"); + + return rv; +} + +int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable) +{ + struct qlcnic_nic_req req; + u64 word; + int rv; + + if (!!(adapter->flags & QLCNIC_BRIDGE_ENABLED) == enable) + return 0; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + + req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); + + word = QLCNIC_H2C_OPCODE_CONFIG_BRIDGING | + ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + req.words[0] = cpu_to_le64(enable); + + rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) + dev_err(&adapter->netdev->dev, + "Could not send configure bridge mode request\n"); + + adapter->flags ^= QLCNIC_BRIDGE_ENABLED; + + return rv; +} + + +#define RSS_HASHTYPE_IP_TCP 0x3 + +int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable) +{ + struct qlcnic_nic_req req; + u64 word; + int i, rv; + + static const u64 key[] = { + 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, + 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, + 0x255b0ec26d5a56daULL + }; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); + + word = QLCNIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + /* + * RSS request: + * bits 3-0: hash_method + * 5-4: hash_type_ipv4 + * 7-6: hash_type_ipv6 + * 8: enable + * 9: use indirection table + * 47-10: reserved + * 63-48: indirection table mask + */ + word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) | + ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) | + ((u64)(enable & 0x1) << 8) | + ((0x7ULL) << 48); + req.words[0] = cpu_to_le64(word); + for (i = 0; i < 5; i++) + req.words[i+1] = cpu_to_le64(key[i]); + + rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) + dev_err(&adapter->netdev->dev, "could not configure RSS\n"); + + return rv; +} + +int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd) +{ + struct qlcnic_nic_req req; + struct qlcnic_ipaddr *ipa; + u64 word; + int rv; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); + + word = QLCNIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + req.words[0] = cpu_to_le64(cmd); + ipa = (struct qlcnic_ipaddr *)&req.words[1]; + ipa->ipv4 = ip; + + rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) + dev_err(&adapter->netdev->dev, + "could not notify %s IP 0x%x reuqest\n", + (cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip); + + return rv; +} + +int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable) +{ + struct qlcnic_nic_req req; + u64 word; + int rv; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); + + word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + req.words[0] = cpu_to_le64(enable | (enable << 8)); + + rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) + dev_err(&adapter->netdev->dev, + "could not configure link notification\n"); + + return rv; +} + +int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter) +{ + struct qlcnic_nic_req req; + u64 word; + int rv; + + if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) + return 0; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); + + word = QLCNIC_H2C_OPCODE_LRO_REQUEST | + ((u64)adapter->portnum << 16) | + ((u64)QLCNIC_LRO_REQUEST_CLEANUP << 56) ; + + req.req_hdr = cpu_to_le64(word); + + rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) + dev_err(&adapter->netdev->dev, + "could not cleanup lro flows\n"); + + return rv; +} + +/* + * qlcnic_change_mtu - Change the Maximum Transfer Unit + * @returns 0 on success, negative on failure + */ + +int qlcnic_change_mtu(struct net_device *netdev, int mtu) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + int rc = 0; + + if (mtu < P3P_MIN_MTU || mtu > P3P_MAX_MTU) { + dev_err(&adapter->netdev->dev, "%d bytes < mtu < %d bytes" + " not supported\n", P3P_MAX_MTU, P3P_MIN_MTU); + return -EINVAL; + } + + rc = qlcnic_fw_cmd_set_mtu(adapter, mtu); + + if (!rc) + netdev->mtu = mtu; + + return rc; +} + + +u32 qlcnic_fix_features(struct net_device *netdev, u32 features) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + if ((adapter->flags & QLCNIC_ESWITCH_ENABLED)) { + u32 changed = features ^ netdev->features; + features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM); + } + + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; + + return features; +} + + +int qlcnic_set_features(struct net_device *netdev, u32 features) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + u32 changed = netdev->features ^ features; + int hw_lro = (features & NETIF_F_LRO) ? QLCNIC_LRO_ENABLED : 0; + + if (!(changed & NETIF_F_LRO)) + return 0; + + netdev->features = features ^ NETIF_F_LRO; + + if (qlcnic_config_hw_lro(adapter, hw_lro)) + return -EIO; + + if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter)) + return -EIO; + + return 0; +} + +/* + * Changes the CRB window to the specified window. + */ + /* Returns < 0 if off is not valid, + * 1 if window access is needed. 'off' is set to offset from + * CRB space in 128M pci map + * 0 if no window access is needed. 'off' is set to 2M addr + * In: 'off' is offset from base in 128M pci map + */ +static int +qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter, + ulong off, void __iomem **addr) +{ + const struct crb_128M_2M_sub_block_map *m; + + if ((off >= QLCNIC_CRB_MAX) || (off < QLCNIC_PCI_CRBSPACE)) + return -EINVAL; + + off -= QLCNIC_PCI_CRBSPACE; + + /* + * Try direct map + */ + m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)]; + + if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) { + *addr = adapter->ahw->pci_base0 + m->start_2M + + (off - m->start_128M); + return 0; + } + + /* + * Not in direct map, use crb window + */ + *addr = adapter->ahw->pci_base0 + CRB_INDIRECT_2M + (off & MASK(16)); + return 1; +} + +/* + * In: 'off' is offset from CRB space in 128M pci map + * Out: 'off' is 2M pci map addr + * side effect: lock crb window + */ +static int +qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off) +{ + u32 window; + void __iomem *addr = adapter->ahw->pci_base0 + CRB_WINDOW_2M; + + off -= QLCNIC_PCI_CRBSPACE; + + window = CRB_HI(off); + if (window == 0) { + dev_err(&adapter->pdev->dev, "Invalid offset 0x%lx\n", off); + return -EIO; + } + + writel(window, addr); + if (readl(addr) != window) { + if (printk_ratelimit()) + dev_warn(&adapter->pdev->dev, + "failed to set CRB window to %d off 0x%lx\n", + window, off); + return -EIO; + } + return 0; +} + +int +qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data) +{ + unsigned long flags; + int rv; + void __iomem *addr = NULL; + + rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr); + + if (rv == 0) { + writel(data, addr); + return 0; + } + + if (rv > 0) { + /* indirect access */ + write_lock_irqsave(&adapter->ahw->crb_lock, flags); + crb_win_lock(adapter); + rv = qlcnic_pci_set_crbwindow_2M(adapter, off); + if (!rv) + writel(data, addr); + crb_win_unlock(adapter); + write_unlock_irqrestore(&adapter->ahw->crb_lock, flags); + return rv; + } + + dev_err(&adapter->pdev->dev, + "%s: invalid offset: 0x%016lx\n", __func__, off); + dump_stack(); + return -EIO; +} + +u32 +qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off) +{ + unsigned long flags; + int rv; + u32 data = -1; + void __iomem *addr = NULL; + + rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr); + + if (rv == 0) + return readl(addr); + + if (rv > 0) { + /* indirect access */ + write_lock_irqsave(&adapter->ahw->crb_lock, flags); + crb_win_lock(adapter); + if (!qlcnic_pci_set_crbwindow_2M(adapter, off)) + data = readl(addr); + crb_win_unlock(adapter); + write_unlock_irqrestore(&adapter->ahw->crb_lock, flags); + return data; + } + + dev_err(&adapter->pdev->dev, + "%s: invalid offset: 0x%016lx\n", __func__, off); + dump_stack(); + return -1; +} + + +void __iomem * +qlcnic_get_ioaddr(struct qlcnic_adapter *adapter, u32 offset) +{ + void __iomem *addr = NULL; + + WARN_ON(qlcnic_pci_get_crb_addr_2M(adapter, offset, &addr)); + + return addr; +} + + +static int +qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter, + u64 addr, u32 *start) +{ + u32 window; + + window = OCM_WIN_P3P(addr); + + writel(window, adapter->ahw->ocm_win_crb); + /* read back to flush */ + readl(adapter->ahw->ocm_win_crb); + + *start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr); + return 0; +} + +static int +qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off, + u64 *data, int op) +{ + void __iomem *addr; + int ret; + u32 start; + + mutex_lock(&adapter->ahw->mem_lock); + + ret = qlcnic_pci_set_window_2M(adapter, off, &start); + if (ret != 0) + goto unlock; + + addr = adapter->ahw->pci_base0 + start; + + if (op == 0) /* read */ + *data = readq(addr); + else /* write */ + writeq(*data, addr); + +unlock: + mutex_unlock(&adapter->ahw->mem_lock); + + return ret; +} + +void +qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data) +{ + void __iomem *addr = adapter->ahw->pci_base0 + + QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM); + + mutex_lock(&adapter->ahw->mem_lock); + *data = readq(addr); + mutex_unlock(&adapter->ahw->mem_lock); +} + +void +qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data) +{ + void __iomem *addr = adapter->ahw->pci_base0 + + QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM); + + mutex_lock(&adapter->ahw->mem_lock); + writeq(data, addr); + mutex_unlock(&adapter->ahw->mem_lock); +} + +#define MAX_CTL_CHECK 1000 + +int +qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter, + u64 off, u64 data) +{ + int i, j, ret; + u32 temp, off8; + void __iomem *mem_crb; + + /* Only 64-bit aligned access */ + if (off & 7) + return -EIO; + + /* P3 onward, test agent base for MIU and SIU is same */ + if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET, + QLCNIC_ADDR_QDR_NET_MAX)) { + mem_crb = qlcnic_get_ioaddr(adapter, + QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE); + goto correct; + } + + if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) { + mem_crb = qlcnic_get_ioaddr(adapter, + QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE); + goto correct; + } + + if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) + return qlcnic_pci_mem_access_direct(adapter, off, &data, 1); + + return -EIO; + +correct: + off8 = off & ~0xf; + + mutex_lock(&adapter->ahw->mem_lock); + + writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); + writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); + + i = 0; + writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); + writel((TA_CTL_START | TA_CTL_ENABLE), + (mem_crb + TEST_AGT_CTRL)); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + temp = readl(mem_crb + TEST_AGT_CTRL); + if ((temp & TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + ret = -EIO; + goto done; + } + + i = (off & 0xf) ? 0 : 2; + writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)), + mem_crb + MIU_TEST_AGT_WRDATA(i)); + writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)), + mem_crb + MIU_TEST_AGT_WRDATA(i+1)); + i = (off & 0xf) ? 2 : 0; + + writel(data & 0xffffffff, + mem_crb + MIU_TEST_AGT_WRDATA(i)); + writel((data >> 32) & 0xffffffff, + mem_crb + MIU_TEST_AGT_WRDATA(i+1)); + + writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL)); + writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE), + (mem_crb + TEST_AGT_CTRL)); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + temp = readl(mem_crb + TEST_AGT_CTRL); + if ((temp & TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + if (printk_ratelimit()) + dev_err(&adapter->pdev->dev, + "failed to write through agent\n"); + ret = -EIO; + } else + ret = 0; + +done: + mutex_unlock(&adapter->ahw->mem_lock); + + return ret; +} + +int +qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter, + u64 off, u64 *data) +{ + int j, ret; + u32 temp, off8; + u64 val; + void __iomem *mem_crb; + + /* Only 64-bit aligned access */ + if (off & 7) + return -EIO; + + /* P3 onward, test agent base for MIU and SIU is same */ + if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET, + QLCNIC_ADDR_QDR_NET_MAX)) { + mem_crb = qlcnic_get_ioaddr(adapter, + QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE); + goto correct; + } + + if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) { + mem_crb = qlcnic_get_ioaddr(adapter, + QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE); + goto correct; + } + + if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) { + return qlcnic_pci_mem_access_direct(adapter, + off, data, 0); + } + + return -EIO; + +correct: + off8 = off & ~0xf; + + mutex_lock(&adapter->ahw->mem_lock); + + writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); + writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); + writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); + writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL)); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + temp = readl(mem_crb + TEST_AGT_CTRL); + if ((temp & TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + if (printk_ratelimit()) + dev_err(&adapter->pdev->dev, + "failed to read through agent\n"); + ret = -EIO; + } else { + off8 = MIU_TEST_AGT_RDDATA_LO; + if (off & 0xf) + off8 = MIU_TEST_AGT_RDDATA_UPPER_LO; + + temp = readl(mem_crb + off8 + 4); + val = (u64)temp << 32; + val |= readl(mem_crb + off8); + *data = val; + ret = 0; + } + + mutex_unlock(&adapter->ahw->mem_lock); + + return ret; +} + +int qlcnic_get_board_info(struct qlcnic_adapter *adapter) +{ + int offset, board_type, magic; + struct pci_dev *pdev = adapter->pdev; + + offset = QLCNIC_FW_MAGIC_OFFSET; + if (qlcnic_rom_fast_read(adapter, offset, &magic)) + return -EIO; + + if (magic != QLCNIC_BDINFO_MAGIC) { + dev_err(&pdev->dev, "invalid board config, magic=%08x\n", + magic); + return -EIO; + } + + offset = QLCNIC_BRDTYPE_OFFSET; + if (qlcnic_rom_fast_read(adapter, offset, &board_type)) + return -EIO; + + adapter->ahw->board_type = board_type; + + if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) { + u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I); + if ((gpio & 0x8000) == 0) + board_type = QLCNIC_BRDTYPE_P3P_10G_TP; + } + + switch (board_type) { + case QLCNIC_BRDTYPE_P3P_HMEZ: + case QLCNIC_BRDTYPE_P3P_XG_LOM: + case QLCNIC_BRDTYPE_P3P_10G_CX4: + case QLCNIC_BRDTYPE_P3P_10G_CX4_LP: + case QLCNIC_BRDTYPE_P3P_IMEZ: + case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS: + case QLCNIC_BRDTYPE_P3P_10G_SFP_CT: + case QLCNIC_BRDTYPE_P3P_10G_SFP_QT: + case QLCNIC_BRDTYPE_P3P_10G_XFP: + case QLCNIC_BRDTYPE_P3P_10000_BASE_T: + adapter->ahw->port_type = QLCNIC_XGBE; + break; + case QLCNIC_BRDTYPE_P3P_REF_QG: + case QLCNIC_BRDTYPE_P3P_4_GB: + case QLCNIC_BRDTYPE_P3P_4_GB_MM: + adapter->ahw->port_type = QLCNIC_GBE; + break; + case QLCNIC_BRDTYPE_P3P_10G_TP: + adapter->ahw->port_type = (adapter->portnum < 2) ? + QLCNIC_XGBE : QLCNIC_GBE; + break; + default: + dev_err(&pdev->dev, "unknown board type %x\n", board_type); + adapter->ahw->port_type = QLCNIC_XGBE; + break; + } + + return 0; +} + +int +qlcnic_wol_supported(struct qlcnic_adapter *adapter) +{ + u32 wol_cfg; + + wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV); + if (wol_cfg & (1UL << adapter->portnum)) { + wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG); + if (wol_cfg & (1 << adapter->portnum)) + return 1; + } + + return 0; +} + +int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate) +{ + struct qlcnic_nic_req req; + int rv; + u64 word; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); + + word = QLCNIC_H2C_OPCODE_CONFIG_LED | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + req.words[0] = cpu_to_le64((u64)rate << 32); + req.words[1] = cpu_to_le64(state); + + rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv) + dev_err(&adapter->pdev->dev, "LED configuration failed.\n"); + + return rv; +} + +/* FW dump related functions */ +static u32 +qlcnic_dump_crb(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, + u32 *buffer) +{ + int i; + u32 addr, data; + struct __crb *crb = &entry->region.crb; + void __iomem *base = adapter->ahw->pci_base0; + + addr = crb->addr; + + for (i = 0; i < crb->no_ops; i++) { + QLCNIC_RD_DUMP_REG(addr, base, &data); + *buffer++ = cpu_to_le32(addr); + *buffer++ = cpu_to_le32(data); + addr += crb->stride; + } + return crb->no_ops * 2 * sizeof(u32); +} + +static u32 +qlcnic_dump_ctrl(struct qlcnic_adapter *adapter, + struct qlcnic_dump_entry *entry, u32 *buffer) +{ + int i, k, timeout = 0; + void __iomem *base = adapter->ahw->pci_base0; + u32 addr, data; + u8 opcode, no_ops; + struct __ctrl *ctr = &entry->region.ctrl; + struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr; + + addr = ctr->addr; + no_ops = ctr->no_ops; + + for (i = 0; i < no_ops; i++) { + k = 0; + opcode = 0; + for (k = 0; k < 8; k++) { + if (!(ctr->opcode & (1 << k))) + continue; + switch (1 << k) { + case QLCNIC_DUMP_WCRB: + QLCNIC_WR_DUMP_REG(addr, base, ctr->val1); + break; + case QLCNIC_DUMP_RWCRB: + QLCNIC_RD_DUMP_REG(addr, base, &data); + QLCNIC_WR_DUMP_REG(addr, base, data); + break; + case QLCNIC_DUMP_ANDCRB: + QLCNIC_RD_DUMP_REG(addr, base, &data); + QLCNIC_WR_DUMP_REG(addr, base, + (data & ctr->val2)); + break; + case QLCNIC_DUMP_ORCRB: + QLCNIC_RD_DUMP_REG(addr, base, &data); + QLCNIC_WR_DUMP_REG(addr, base, + (data | ctr->val3)); + break; + case QLCNIC_DUMP_POLLCRB: + while (timeout <= ctr->timeout) { + QLCNIC_RD_DUMP_REG(addr, base, &data); + if ((data & ctr->val2) == ctr->val1) + break; + msleep(1); + timeout++; + } + if (timeout > ctr->timeout) { + dev_info(&adapter->pdev->dev, + "Timed out, aborting poll CRB\n"); + return -EINVAL; + } + break; + case QLCNIC_DUMP_RD_SAVE: + if (ctr->index_a) + addr = t_hdr->saved_state[ctr->index_a]; + QLCNIC_RD_DUMP_REG(addr, base, &data); + t_hdr->saved_state[ctr->index_v] = data; + break; + case QLCNIC_DUMP_WRT_SAVED: + if (ctr->index_v) + data = t_hdr->saved_state[ctr->index_v]; + else + data = ctr->val1; + if (ctr->index_a) + addr = t_hdr->saved_state[ctr->index_a]; + QLCNIC_WR_DUMP_REG(addr, base, data); + break; + case QLCNIC_DUMP_MOD_SAVE_ST: + data = t_hdr->saved_state[ctr->index_v]; + data <<= ctr->shl_val; + data >>= ctr->shr_val; + if (ctr->val2) + data &= ctr->val2; + data |= ctr->val3; + data += ctr->val1; + t_hdr->saved_state[ctr->index_v] = data; + break; + default: + dev_info(&adapter->pdev->dev, + "Unknown opcode\n"); + break; + } + } + addr += ctr->stride; + } + return 0; +} + +static u32 +qlcnic_dump_mux(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, + u32 *buffer) +{ + int loop; + u32 val, data = 0; + struct __mux *mux = &entry->region.mux; + void __iomem *base = adapter->ahw->pci_base0; + + val = mux->val; + for (loop = 0; loop < mux->no_ops; loop++) { + QLCNIC_WR_DUMP_REG(mux->addr, base, val); + QLCNIC_RD_DUMP_REG(mux->read_addr, base, &data); + *buffer++ = cpu_to_le32(val); + *buffer++ = cpu_to_le32(data); + val += mux->val_stride; + } + return 2 * mux->no_ops * sizeof(u32); +} + +static u32 +qlcnic_dump_que(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, + u32 *buffer) +{ + int i, loop; + u32 cnt, addr, data, que_id = 0; + void __iomem *base = adapter->ahw->pci_base0; + struct __queue *que = &entry->region.que; + + addr = que->read_addr; + cnt = que->read_addr_cnt; + + for (loop = 0; loop < que->no_ops; loop++) { + QLCNIC_WR_DUMP_REG(que->sel_addr, base, que_id); + addr = que->read_addr; + for (i = 0; i < cnt; i++) { + QLCNIC_RD_DUMP_REG(addr, base, &data); + *buffer++ = cpu_to_le32(data); + addr += que->read_addr_stride; + } + que_id += que->stride; + } + return que->no_ops * cnt * sizeof(u32); +} + +static u32 +qlcnic_dump_ocm(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, + u32 *buffer) +{ + int i; + u32 data; + void __iomem *addr; + struct __ocm *ocm = &entry->region.ocm; + + addr = adapter->ahw->pci_base0 + ocm->read_addr; + for (i = 0; i < ocm->no_ops; i++) { + data = readl(addr); + *buffer++ = cpu_to_le32(data); + addr += ocm->read_addr_stride; + } + return ocm->no_ops * sizeof(u32); +} + +static u32 +qlcnic_read_rom(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, + u32 *buffer) +{ + int i, count = 0; + u32 fl_addr, size, val, lck_val, addr; + struct __mem *rom = &entry->region.mem; + void __iomem *base = adapter->ahw->pci_base0; + + fl_addr = rom->addr; + size = rom->size/4; +lock_try: + lck_val = readl(base + QLCNIC_FLASH_SEM2_LK); + if (!lck_val && count < MAX_CTL_CHECK) { + msleep(10); + count++; + goto lock_try; + } + writel(adapter->ahw->pci_func, (base + QLCNIC_FLASH_LOCK_ID)); + for (i = 0; i < size; i++) { + addr = fl_addr & 0xFFFF0000; + QLCNIC_WR_DUMP_REG(FLASH_ROM_WINDOW, base, addr); + addr = LSW(fl_addr) + FLASH_ROM_DATA; + QLCNIC_RD_DUMP_REG(addr, base, &val); + fl_addr += 4; + *buffer++ = cpu_to_le32(val); + } + readl(base + QLCNIC_FLASH_SEM2_ULK); + return rom->size; +} + +static u32 +qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter, + struct qlcnic_dump_entry *entry, u32 *buffer) +{ + int i; + u32 cnt, val, data, addr; + void __iomem *base = adapter->ahw->pci_base0; + struct __cache *l1 = &entry->region.cache; + + val = l1->init_tag_val; + + for (i = 0; i < l1->no_ops; i++) { + QLCNIC_WR_DUMP_REG(l1->addr, base, val); + QLCNIC_WR_DUMP_REG(l1->ctrl_addr, base, LSW(l1->ctrl_val)); + addr = l1->read_addr; + cnt = l1->read_addr_num; + while (cnt) { + QLCNIC_RD_DUMP_REG(addr, base, &data); + *buffer++ = cpu_to_le32(data); + addr += l1->read_addr_stride; + cnt--; + } + val += l1->stride; + } + return l1->no_ops * l1->read_addr_num * sizeof(u32); +} + +static u32 +qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter, + struct qlcnic_dump_entry *entry, u32 *buffer) +{ + int i; + u32 cnt, val, data, addr; + u8 poll_mask, poll_to, time_out = 0; + void __iomem *base = adapter->ahw->pci_base0; + struct __cache *l2 = &entry->region.cache; + + val = l2->init_tag_val; + poll_mask = LSB(MSW(l2->ctrl_val)); + poll_to = MSB(MSW(l2->ctrl_val)); + + for (i = 0; i < l2->no_ops; i++) { + QLCNIC_WR_DUMP_REG(l2->addr, base, val); + if (LSW(l2->ctrl_val)) + QLCNIC_WR_DUMP_REG(l2->ctrl_addr, base, + LSW(l2->ctrl_val)); + if (!poll_mask) + goto skip_poll; + do { + QLCNIC_RD_DUMP_REG(l2->ctrl_addr, base, &data); + if (!(data & poll_mask)) + break; + msleep(1); + time_out++; + } while (time_out <= poll_to); + + if (time_out > poll_to) { + dev_err(&adapter->pdev->dev, + "Timeout exceeded in %s, aborting dump\n", + __func__); + return -EINVAL; + } +skip_poll: + addr = l2->read_addr; + cnt = l2->read_addr_num; + while (cnt) { + QLCNIC_RD_DUMP_REG(addr, base, &data); + *buffer++ = cpu_to_le32(data); + addr += l2->read_addr_stride; + cnt--; + } + val += l2->stride; + } + return l2->no_ops * l2->read_addr_num * sizeof(u32); +} + +static u32 +qlcnic_read_memory(struct qlcnic_adapter *adapter, + struct qlcnic_dump_entry *entry, u32 *buffer) +{ + u32 addr, data, test, ret = 0; + int i, reg_read; + struct __mem *mem = &entry->region.mem; + void __iomem *base = adapter->ahw->pci_base0; + + reg_read = mem->size; + addr = mem->addr; + /* check for data size of multiple of 16 and 16 byte alignment */ + if ((addr & 0xf) || (reg_read%16)) { + dev_info(&adapter->pdev->dev, + "Unaligned memory addr:0x%x size:0x%x\n", + addr, reg_read); + return -EINVAL; + } + + mutex_lock(&adapter->ahw->mem_lock); + + while (reg_read != 0) { + QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_LO, base, addr); + QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_HI, base, 0); + QLCNIC_WR_DUMP_REG(MIU_TEST_CTR, base, + TA_CTL_ENABLE | TA_CTL_START); + + for (i = 0; i < MAX_CTL_CHECK; i++) { + QLCNIC_RD_DUMP_REG(MIU_TEST_CTR, base, &test); + if (!(test & TA_CTL_BUSY)) + break; + } + if (i == MAX_CTL_CHECK) { + if (printk_ratelimit()) { + dev_err(&adapter->pdev->dev, + "failed to read through agent\n"); + ret = -EINVAL; + goto out; + } + } + for (i = 0; i < 4; i++) { + QLCNIC_RD_DUMP_REG(MIU_TEST_READ_DATA[i], base, &data); + *buffer++ = cpu_to_le32(data); + } + addr += 16; + reg_read -= 16; + ret += 16; + } +out: + mutex_unlock(&adapter->ahw->mem_lock); + return mem->size; +} + +static u32 +qlcnic_dump_nop(struct qlcnic_adapter *adapter, + struct qlcnic_dump_entry *entry, u32 *buffer) +{ + entry->hdr.flags |= QLCNIC_DUMP_SKIP; + return 0; +} + +struct qlcnic_dump_operations fw_dump_ops[] = { + { QLCNIC_DUMP_NOP, qlcnic_dump_nop }, + { QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb }, + { QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux }, + { QLCNIC_DUMP_QUEUE, qlcnic_dump_que }, + { QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom }, + { QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm }, + { QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl }, + { QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache }, + { QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache }, + { QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache }, + { QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache }, + { QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache }, + { QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache }, + { QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache }, + { QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache }, + { QLCNIC_DUMP_READ_ROM, qlcnic_read_rom }, + { QLCNIC_DUMP_READ_MEM, qlcnic_read_memory }, + { QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl }, + { QLCNIC_DUMP_TLHDR, qlcnic_dump_nop }, + { QLCNIC_DUMP_RDEND, qlcnic_dump_nop }, +}; + +/* Walk the template and collect dump for each entry in the dump template */ +static int +qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry, + u32 size) +{ + int ret = 1; + if (size != entry->hdr.cap_size) { + dev_info(dev, + "Invalidate dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n", + entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size); + dev_info(dev, "Aborting further dump capture\n"); + ret = 0; + } + return ret; +} + +int qlcnic_dump_fw(struct qlcnic_adapter *adapter) +{ + u32 *buffer; + char mesg[64]; + char *msg[] = {mesg, NULL}; + int i, k, ops_cnt, ops_index, dump_size = 0; + u32 entry_offset, dump, no_entries, buf_offset = 0; + struct qlcnic_dump_entry *entry; + struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr; + + if (fw_dump->clr) { + dev_info(&adapter->pdev->dev, + "Previous dump not cleared, not capturing dump\n"); + return -EIO; + } + /* Calculate the size for dump data area only */ + for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++) + if (i & tmpl_hdr->drv_cap_mask) + dump_size += tmpl_hdr->cap_sizes[k]; + if (!dump_size) + return -EIO; + + fw_dump->data = vzalloc(dump_size); + if (!fw_dump->data) { + dev_info(&adapter->pdev->dev, + "Unable to allocate (%d KB) for fw dump\n", + dump_size/1024); + return -ENOMEM; + } + buffer = fw_dump->data; + fw_dump->size = dump_size; + no_entries = tmpl_hdr->num_entries; + ops_cnt = ARRAY_SIZE(fw_dump_ops); + entry_offset = tmpl_hdr->offset; + tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION; + tmpl_hdr->sys_info[1] = adapter->fw_version; + + for (i = 0; i < no_entries; i++) { + entry = (void *)tmpl_hdr + entry_offset; + if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) { + entry->hdr.flags |= QLCNIC_DUMP_SKIP; + entry_offset += entry->hdr.offset; + continue; + } + /* Find the handler for this entry */ + ops_index = 0; + while (ops_index < ops_cnt) { + if (entry->hdr.type == fw_dump_ops[ops_index].opcode) + break; + ops_index++; + } + if (ops_index == ops_cnt) { + dev_info(&adapter->pdev->dev, + "Invalid entry type %d, exiting dump\n", + entry->hdr.type); + goto error; + } + /* Collect dump for this entry */ + dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer); + if (dump && !qlcnic_valid_dump_entry(&adapter->pdev->dev, entry, + dump)) + entry->hdr.flags |= QLCNIC_DUMP_SKIP; + buf_offset += entry->hdr.cap_size; + entry_offset += entry->hdr.offset; + buffer = fw_dump->data + buf_offset; + } + if (dump_size != buf_offset) { + dev_info(&adapter->pdev->dev, + "Captured(%d) and expected size(%d) do not match\n", + buf_offset, dump_size); + goto error; + } else { + fw_dump->clr = 1; + snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", + adapter->netdev->name); + dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n", + fw_dump->size); + /* Send a udev event to notify availability of FW dump */ + kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg); + return 0; + } +error: + vfree(fw_dump->data); + return -EINVAL; +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c new file mode 100644 index 0000000..3b6741e --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c @@ -0,0 +1,1898 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2010 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#include +#include +#include +#include +#include "qlcnic.h" + +struct crb_addr_pair { + u32 addr; + u32 data; +}; + +#define QLCNIC_MAX_CRB_XFORM 60 +static unsigned int crb_addr_xform[QLCNIC_MAX_CRB_XFORM]; + +#define crb_addr_transform(name) \ + (crb_addr_xform[QLCNIC_HW_PX_MAP_CRB_##name] = \ + QLCNIC_HW_CRB_HUB_AGT_ADR_##name << 20) + +#define QLCNIC_ADDR_ERROR (0xffffffff) + +static void +qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter, + struct qlcnic_host_rds_ring *rds_ring); + +static int +qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter); + +static void crb_addr_transform_setup(void) +{ + crb_addr_transform(XDMA); + crb_addr_transform(TIMR); + crb_addr_transform(SRE); + crb_addr_transform(SQN3); + crb_addr_transform(SQN2); + crb_addr_transform(SQN1); + crb_addr_transform(SQN0); + crb_addr_transform(SQS3); + crb_addr_transform(SQS2); + crb_addr_transform(SQS1); + crb_addr_transform(SQS0); + crb_addr_transform(RPMX7); + crb_addr_transform(RPMX6); + crb_addr_transform(RPMX5); + crb_addr_transform(RPMX4); + crb_addr_transform(RPMX3); + crb_addr_transform(RPMX2); + crb_addr_transform(RPMX1); + crb_addr_transform(RPMX0); + crb_addr_transform(ROMUSB); + crb_addr_transform(SN); + crb_addr_transform(QMN); + crb_addr_transform(QMS); + crb_addr_transform(PGNI); + crb_addr_transform(PGND); + crb_addr_transform(PGN3); + crb_addr_transform(PGN2); + crb_addr_transform(PGN1); + crb_addr_transform(PGN0); + crb_addr_transform(PGSI); + crb_addr_transform(PGSD); + crb_addr_transform(PGS3); + crb_addr_transform(PGS2); + crb_addr_transform(PGS1); + crb_addr_transform(PGS0); + crb_addr_transform(PS); + crb_addr_transform(PH); + crb_addr_transform(NIU); + crb_addr_transform(I2Q); + crb_addr_transform(EG); + crb_addr_transform(MN); + crb_addr_transform(MS); + crb_addr_transform(CAS2); + crb_addr_transform(CAS1); + crb_addr_transform(CAS0); + crb_addr_transform(CAM); + crb_addr_transform(C2C1); + crb_addr_transform(C2C0); + crb_addr_transform(SMB); + crb_addr_transform(OCM0); + crb_addr_transform(I2C0); +} + +void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter) +{ + struct qlcnic_recv_context *recv_ctx; + struct qlcnic_host_rds_ring *rds_ring; + struct qlcnic_rx_buffer *rx_buf; + int i, ring; + + recv_ctx = adapter->recv_ctx; + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + for (i = 0; i < rds_ring->num_desc; ++i) { + rx_buf = &(rds_ring->rx_buf_arr[i]); + if (rx_buf->skb == NULL) + continue; + + pci_unmap_single(adapter->pdev, + rx_buf->dma, + rds_ring->dma_size, + PCI_DMA_FROMDEVICE); + + dev_kfree_skb_any(rx_buf->skb); + } + } +} + +void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter) +{ + struct qlcnic_recv_context *recv_ctx; + struct qlcnic_host_rds_ring *rds_ring; + struct qlcnic_rx_buffer *rx_buf; + int i, ring; + + recv_ctx = adapter->recv_ctx; + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + + INIT_LIST_HEAD(&rds_ring->free_list); + + rx_buf = rds_ring->rx_buf_arr; + for (i = 0; i < rds_ring->num_desc; i++) { + list_add_tail(&rx_buf->list, + &rds_ring->free_list); + rx_buf++; + } + } +} + +void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter) +{ + struct qlcnic_cmd_buffer *cmd_buf; + struct qlcnic_skb_frag *buffrag; + int i, j; + struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; + + cmd_buf = tx_ring->cmd_buf_arr; + for (i = 0; i < tx_ring->num_desc; i++) { + buffrag = cmd_buf->frag_array; + if (buffrag->dma) { + pci_unmap_single(adapter->pdev, buffrag->dma, + buffrag->length, PCI_DMA_TODEVICE); + buffrag->dma = 0ULL; + } + for (j = 0; j < cmd_buf->frag_count; j++) { + buffrag++; + if (buffrag->dma) { + pci_unmap_page(adapter->pdev, buffrag->dma, + buffrag->length, + PCI_DMA_TODEVICE); + buffrag->dma = 0ULL; + } + } + if (cmd_buf->skb) { + dev_kfree_skb_any(cmd_buf->skb); + cmd_buf->skb = NULL; + } + cmd_buf++; + } +} + +void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter) +{ + struct qlcnic_recv_context *recv_ctx; + struct qlcnic_host_rds_ring *rds_ring; + struct qlcnic_host_tx_ring *tx_ring; + int ring; + + recv_ctx = adapter->recv_ctx; + + if (recv_ctx->rds_rings == NULL) + goto skip_rds; + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + vfree(rds_ring->rx_buf_arr); + rds_ring->rx_buf_arr = NULL; + } + kfree(recv_ctx->rds_rings); + +skip_rds: + if (adapter->tx_ring == NULL) + return; + + tx_ring = adapter->tx_ring; + vfree(tx_ring->cmd_buf_arr); + tx_ring->cmd_buf_arr = NULL; + kfree(adapter->tx_ring); + adapter->tx_ring = NULL; +} + +int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter) +{ + struct qlcnic_recv_context *recv_ctx; + struct qlcnic_host_rds_ring *rds_ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_tx_ring *tx_ring; + struct qlcnic_rx_buffer *rx_buf; + int ring, i, size; + + struct qlcnic_cmd_buffer *cmd_buf_arr; + struct net_device *netdev = adapter->netdev; + + size = sizeof(struct qlcnic_host_tx_ring); + tx_ring = kzalloc(size, GFP_KERNEL); + if (tx_ring == NULL) { + dev_err(&netdev->dev, "failed to allocate tx ring struct\n"); + return -ENOMEM; + } + adapter->tx_ring = tx_ring; + + tx_ring->num_desc = adapter->num_txd; + tx_ring->txq = netdev_get_tx_queue(netdev, 0); + + cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring)); + if (cmd_buf_arr == NULL) { + dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n"); + goto err_out; + } + tx_ring->cmd_buf_arr = cmd_buf_arr; + + recv_ctx = adapter->recv_ctx; + + size = adapter->max_rds_rings * sizeof(struct qlcnic_host_rds_ring); + rds_ring = kzalloc(size, GFP_KERNEL); + if (rds_ring == NULL) { + dev_err(&netdev->dev, "failed to allocate rds ring struct\n"); + goto err_out; + } + recv_ctx->rds_rings = rds_ring; + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + switch (ring) { + case RCV_RING_NORMAL: + rds_ring->num_desc = adapter->num_rxd; + rds_ring->dma_size = QLCNIC_P3P_RX_BUF_MAX_LEN; + rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN; + break; + + case RCV_RING_JUMBO: + rds_ring->num_desc = adapter->num_jumbo_rxd; + rds_ring->dma_size = + QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN; + + if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO) + rds_ring->dma_size += QLCNIC_LRO_BUFFER_EXTRA; + + rds_ring->skb_size = + rds_ring->dma_size + NET_IP_ALIGN; + break; + } + rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring)); + if (rds_ring->rx_buf_arr == NULL) { + dev_err(&netdev->dev, "Failed to allocate " + "rx buffer ring %d\n", ring); + goto err_out; + } + INIT_LIST_HEAD(&rds_ring->free_list); + /* + * Now go through all of them, set reference handles + * and put them in the queues. + */ + rx_buf = rds_ring->rx_buf_arr; + for (i = 0; i < rds_ring->num_desc; i++) { + list_add_tail(&rx_buf->list, + &rds_ring->free_list); + rx_buf->ref_handle = i; + rx_buf++; + } + spin_lock_init(&rds_ring->lock); + } + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + sds_ring->irq = adapter->msix_entries[ring].vector; + sds_ring->adapter = adapter; + sds_ring->num_desc = adapter->num_rxd; + + for (i = 0; i < NUM_RCV_DESC_RINGS; i++) + INIT_LIST_HEAD(&sds_ring->free_list[i]); + } + + return 0; + +err_out: + qlcnic_free_sw_resources(adapter); + return -ENOMEM; +} + +/* + * Utility to translate from internal Phantom CRB address + * to external PCI CRB address. + */ +static u32 qlcnic_decode_crb_addr(u32 addr) +{ + int i; + u32 base_addr, offset, pci_base; + + crb_addr_transform_setup(); + + pci_base = QLCNIC_ADDR_ERROR; + base_addr = addr & 0xfff00000; + offset = addr & 0x000fffff; + + for (i = 0; i < QLCNIC_MAX_CRB_XFORM; i++) { + if (crb_addr_xform[i] == base_addr) { + pci_base = i << 20; + break; + } + } + if (pci_base == QLCNIC_ADDR_ERROR) + return pci_base; + else + return pci_base + offset; +} + +#define QLCNIC_MAX_ROM_WAIT_USEC 100 + +static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter) +{ + long timeout = 0; + long done = 0; + + cond_resched(); + + while (done == 0) { + done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS); + done &= 2; + if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) { + dev_err(&adapter->pdev->dev, + "Timeout reached waiting for rom done"); + return -EIO; + } + udelay(1); + } + return 0; +} + +static int do_rom_fast_read(struct qlcnic_adapter *adapter, + u32 addr, u32 *valp) +{ + QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr); + QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); + QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3); + QLCWR32(adapter, QLCNIC_ROMUSB_ROM_INSTR_OPCODE, 0xb); + if (qlcnic_wait_rom_done(adapter)) { + dev_err(&adapter->pdev->dev, "Error waiting for rom done\n"); + return -EIO; + } + /* reset abyte_cnt and dummy_byte_cnt */ + QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 0); + udelay(10); + QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); + + *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA); + return 0; +} + +static int do_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr, + u8 *bytes, size_t size) +{ + int addridx; + int ret = 0; + + for (addridx = addr; addridx < (addr + size); addridx += 4) { + int v; + ret = do_rom_fast_read(adapter, addridx, &v); + if (ret != 0) + break; + *(__le32 *)bytes = cpu_to_le32(v); + bytes += 4; + } + + return ret; +} + +int +qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr, + u8 *bytes, size_t size) +{ + int ret; + + ret = qlcnic_rom_lock(adapter); + if (ret < 0) + return ret; + + ret = do_rom_fast_read_words(adapter, addr, bytes, size); + + qlcnic_rom_unlock(adapter); + return ret; +} + +int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp) +{ + int ret; + + if (qlcnic_rom_lock(adapter) != 0) + return -EIO; + + ret = do_rom_fast_read(adapter, addr, valp); + qlcnic_rom_unlock(adapter); + return ret; +} + +int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter) +{ + int addr, val; + int i, n, init_delay; + struct crb_addr_pair *buf; + unsigned offset; + u32 off; + struct pci_dev *pdev = adapter->pdev; + + QLCWR32(adapter, CRB_CMDPEG_STATE, 0); + QLCWR32(adapter, CRB_RCVPEG_STATE, 0); + + qlcnic_rom_lock(adapter); + QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff); + qlcnic_rom_unlock(adapter); + + /* Init HW CRB block */ + if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) || + qlcnic_rom_fast_read(adapter, 4, &n) != 0) { + dev_err(&pdev->dev, "ERROR Reading crb_init area: val:%x\n", n); + return -EIO; + } + offset = n & 0xffffU; + n = (n >> 16) & 0xffffU; + + if (n >= 1024) { + dev_err(&pdev->dev, "QLOGIC card flash not initialized.\n"); + return -EIO; + } + + buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL); + if (buf == NULL) { + dev_err(&pdev->dev, "Unable to calloc memory for rom read.\n"); + return -ENOMEM; + } + + for (i = 0; i < n; i++) { + if (qlcnic_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 || + qlcnic_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) { + kfree(buf); + return -EIO; + } + + buf[i].addr = addr; + buf[i].data = val; + } + + for (i = 0; i < n; i++) { + + off = qlcnic_decode_crb_addr(buf[i].addr); + if (off == QLCNIC_ADDR_ERROR) { + dev_err(&pdev->dev, "CRB init value out of range %x\n", + buf[i].addr); + continue; + } + off += QLCNIC_PCI_CRBSPACE; + + if (off & 1) + continue; + + /* skipping cold reboot MAGIC */ + if (off == QLCNIC_CAM_RAM(0x1fc)) + continue; + if (off == (QLCNIC_CRB_I2C0 + 0x1c)) + continue; + if (off == (ROMUSB_GLB + 0xbc)) /* do not reset PCI */ + continue; + if (off == (ROMUSB_GLB + 0xa8)) + continue; + if (off == (ROMUSB_GLB + 0xc8)) /* core clock */ + continue; + if (off == (ROMUSB_GLB + 0x24)) /* MN clock */ + continue; + if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */ + continue; + if ((off & 0x0ff00000) == QLCNIC_CRB_DDR_NET) + continue; + /* skip the function enable register */ + if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION)) + continue; + if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION2)) + continue; + if ((off & 0x0ff00000) == QLCNIC_CRB_SMB) + continue; + + init_delay = 1; + /* After writing this register, HW needs time for CRB */ + /* to quiet down (else crb_window returns 0xffffffff) */ + if (off == QLCNIC_ROMUSB_GLB_SW_RESET) + init_delay = 1000; + + QLCWR32(adapter, off, buf[i].data); + + msleep(init_delay); + } + kfree(buf); + + /* Initialize protocol process engine */ + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0xec, 0x1e); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0x4c, 8); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_I + 0x4c, 8); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x8, 0); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0xc, 0); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x8, 0); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0xc, 0); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x8, 0); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0xc, 0); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x8, 0); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x8, 0); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0); + msleep(1); + QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0); + QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0); + return 0; +} + +static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter) +{ + u32 val; + int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT; + + do { + val = QLCRD32(adapter, CRB_CMDPEG_STATE); + + switch (val) { + case PHAN_INITIALIZE_COMPLETE: + case PHAN_INITIALIZE_ACK: + return 0; + case PHAN_INITIALIZE_FAILED: + goto out_err; + default: + break; + } + + msleep(QLCNIC_CMDPEG_CHECK_DELAY); + + } while (--retries); + + QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); + +out_err: + dev_err(&adapter->pdev->dev, "Command Peg initialization not " + "complete, state: 0x%x.\n", val); + return -EIO; +} + +static int +qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter) +{ + u32 val; + int retries = QLCNIC_RCVPEG_CHECK_RETRY_COUNT; + + do { + val = QLCRD32(adapter, CRB_RCVPEG_STATE); + + if (val == PHAN_PEG_RCV_INITIALIZED) + return 0; + + msleep(QLCNIC_RCVPEG_CHECK_DELAY); + + } while (--retries); + + if (!retries) { + dev_err(&adapter->pdev->dev, "Receive Peg initialization not " + "complete, state: 0x%x.\n", val); + return -EIO; + } + + return 0; +} + +int +qlcnic_check_fw_status(struct qlcnic_adapter *adapter) +{ + int err; + + err = qlcnic_cmd_peg_ready(adapter); + if (err) + return err; + + err = qlcnic_receive_peg_ready(adapter); + if (err) + return err; + + QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK); + + return err; +} + +int +qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) { + + int timeo; + u32 val; + + val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO); + val = QLC_DEV_GET_DRV(val, adapter->portnum); + if ((val & 0x3) != QLCNIC_TYPE_NIC) { + dev_err(&adapter->pdev->dev, + "Not an Ethernet NIC func=%u\n", val); + return -EIO; + } + adapter->physical_port = (val >> 2); + if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo)) + timeo = QLCNIC_INIT_TIMEOUT_SECS; + + adapter->dev_init_timeo = timeo; + + if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DRV_RESET_TIMEOUT, &timeo)) + timeo = QLCNIC_RESET_TIMEOUT_SECS; + + adapter->reset_ack_timeo = timeo; + + return 0; +} + +static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region, + struct qlcnic_flt_entry *region_entry) +{ + struct qlcnic_flt_header flt_hdr; + struct qlcnic_flt_entry *flt_entry; + int i = 0, ret; + u32 entry_size; + + memset(region_entry, 0, sizeof(struct qlcnic_flt_entry)); + ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION, + (u8 *)&flt_hdr, + sizeof(struct qlcnic_flt_header)); + if (ret) { + dev_warn(&adapter->pdev->dev, + "error reading flash layout header\n"); + return -EIO; + } + + entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header); + flt_entry = (struct qlcnic_flt_entry *)vzalloc(entry_size); + if (flt_entry == NULL) { + dev_warn(&adapter->pdev->dev, "error allocating memory\n"); + return -EIO; + } + + ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION + + sizeof(struct qlcnic_flt_header), + (u8 *)flt_entry, entry_size); + if (ret) { + dev_warn(&adapter->pdev->dev, + "error reading flash layout entries\n"); + goto err_out; + } + + while (i < (entry_size/sizeof(struct qlcnic_flt_entry))) { + if (flt_entry[i].region == region) + break; + i++; + } + if (i >= (entry_size/sizeof(struct qlcnic_flt_entry))) { + dev_warn(&adapter->pdev->dev, + "region=%x not found in %d regions\n", region, i); + ret = -EIO; + goto err_out; + } + memcpy(region_entry, &flt_entry[i], sizeof(struct qlcnic_flt_entry)); + +err_out: + vfree(flt_entry); + return ret; +} + +int +qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter) +{ + struct qlcnic_flt_entry fw_entry; + u32 ver = -1, min_ver; + int ret; + + ret = qlcnic_get_flt_entry(adapter, QLCNIC_FW_IMAGE_REGION, &fw_entry); + if (!ret) + /* 0-4:-signature, 4-8:-fw version */ + qlcnic_rom_fast_read(adapter, fw_entry.start_addr + 4, + (int *)&ver); + else + qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, + (int *)&ver); + + ver = QLCNIC_DECODE_VERSION(ver); + min_ver = QLCNIC_MIN_FW_VERSION; + + if (ver < min_ver) { + dev_err(&adapter->pdev->dev, + "firmware version %d.%d.%d unsupported." + "Min supported version %d.%d.%d\n", + _major(ver), _minor(ver), _build(ver), + _major(min_ver), _minor(min_ver), _build(min_ver)); + return -EINVAL; + } + + return 0; +} + +static int +qlcnic_has_mn(struct qlcnic_adapter *adapter) +{ + u32 capability; + capability = 0; + + capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY); + if (capability & QLCNIC_PEG_TUNE_MN_PRESENT) + return 1; + + return 0; +} + +static +struct uni_table_desc *qlcnic_get_table_desc(const u8 *unirom, int section) +{ + u32 i; + struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; + __le32 entries = cpu_to_le32(directory->num_entries); + + for (i = 0; i < entries; i++) { + + __le32 offs = cpu_to_le32(directory->findex) + + (i * cpu_to_le32(directory->entry_size)); + __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8)); + + if (tab_type == section) + return (struct uni_table_desc *) &unirom[offs]; + } + + return NULL; +} + +#define FILEHEADER_SIZE (14 * 4) + +static int +qlcnic_validate_header(struct qlcnic_adapter *adapter) +{ + const u8 *unirom = adapter->fw->data; + struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; + __le32 fw_file_size = adapter->fw->size; + __le32 entries; + __le32 entry_size; + __le32 tab_size; + + if (fw_file_size < FILEHEADER_SIZE) + return -EINVAL; + + entries = cpu_to_le32(directory->num_entries); + entry_size = cpu_to_le32(directory->entry_size); + tab_size = cpu_to_le32(directory->findex) + (entries * entry_size); + + if (fw_file_size < tab_size) + return -EINVAL; + + return 0; +} + +static int +qlcnic_validate_bootld(struct qlcnic_adapter *adapter) +{ + struct uni_table_desc *tab_desc; + struct uni_data_desc *descr; + const u8 *unirom = adapter->fw->data; + int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + + QLCNIC_UNI_BOOTLD_IDX_OFF)); + __le32 offs; + __le32 tab_size; + __le32 data_size; + + tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_BOOTLD); + + if (!tab_desc) + return -EINVAL; + + tab_size = cpu_to_le32(tab_desc->findex) + + (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); + + if (adapter->fw->size < tab_size) + return -EINVAL; + + offs = cpu_to_le32(tab_desc->findex) + + (cpu_to_le32(tab_desc->entry_size) * (idx)); + descr = (struct uni_data_desc *)&unirom[offs]; + + data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); + + if (adapter->fw->size < data_size) + return -EINVAL; + + return 0; +} + +static int +qlcnic_validate_fw(struct qlcnic_adapter *adapter) +{ + struct uni_table_desc *tab_desc; + struct uni_data_desc *descr; + const u8 *unirom = adapter->fw->data; + int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + + QLCNIC_UNI_FIRMWARE_IDX_OFF)); + __le32 offs; + __le32 tab_size; + __le32 data_size; + + tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_FW); + + if (!tab_desc) + return -EINVAL; + + tab_size = cpu_to_le32(tab_desc->findex) + + (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); + + if (adapter->fw->size < tab_size) + return -EINVAL; + + offs = cpu_to_le32(tab_desc->findex) + + (cpu_to_le32(tab_desc->entry_size) * (idx)); + descr = (struct uni_data_desc *)&unirom[offs]; + data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); + + if (adapter->fw->size < data_size) + return -EINVAL; + + return 0; +} + +static int +qlcnic_validate_product_offs(struct qlcnic_adapter *adapter) +{ + struct uni_table_desc *ptab_descr; + const u8 *unirom = adapter->fw->data; + int mn_present = qlcnic_has_mn(adapter); + __le32 entries; + __le32 entry_size; + __le32 tab_size; + u32 i; + + ptab_descr = qlcnic_get_table_desc(unirom, + QLCNIC_UNI_DIR_SECT_PRODUCT_TBL); + if (!ptab_descr) + return -EINVAL; + + entries = cpu_to_le32(ptab_descr->num_entries); + entry_size = cpu_to_le32(ptab_descr->entry_size); + tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size); + + if (adapter->fw->size < tab_size) + return -EINVAL; + +nomn: + for (i = 0; i < entries; i++) { + + __le32 flags, file_chiprev, offs; + u8 chiprev = adapter->ahw->revision_id; + u32 flagbit; + + offs = cpu_to_le32(ptab_descr->findex) + + (i * cpu_to_le32(ptab_descr->entry_size)); + flags = cpu_to_le32(*((int *)&unirom[offs] + + QLCNIC_UNI_FLAGS_OFF)); + file_chiprev = cpu_to_le32(*((int *)&unirom[offs] + + QLCNIC_UNI_CHIP_REV_OFF)); + + flagbit = mn_present ? 1 : 2; + + if ((chiprev == file_chiprev) && + ((1ULL << flagbit) & flags)) { + adapter->file_prd_off = offs; + return 0; + } + } + if (mn_present) { + mn_present = 0; + goto nomn; + } + return -EINVAL; +} + +static int +qlcnic_validate_unified_romimage(struct qlcnic_adapter *adapter) +{ + if (qlcnic_validate_header(adapter)) { + dev_err(&adapter->pdev->dev, + "unified image: header validation failed\n"); + return -EINVAL; + } + + if (qlcnic_validate_product_offs(adapter)) { + dev_err(&adapter->pdev->dev, + "unified image: product validation failed\n"); + return -EINVAL; + } + + if (qlcnic_validate_bootld(adapter)) { + dev_err(&adapter->pdev->dev, + "unified image: bootld validation failed\n"); + return -EINVAL; + } + + if (qlcnic_validate_fw(adapter)) { + dev_err(&adapter->pdev->dev, + "unified image: firmware validation failed\n"); + return -EINVAL; + } + + return 0; +} + +static +struct uni_data_desc *qlcnic_get_data_desc(struct qlcnic_adapter *adapter, + u32 section, u32 idx_offset) +{ + const u8 *unirom = adapter->fw->data; + int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + + idx_offset)); + struct uni_table_desc *tab_desc; + __le32 offs; + + tab_desc = qlcnic_get_table_desc(unirom, section); + + if (tab_desc == NULL) + return NULL; + + offs = cpu_to_le32(tab_desc->findex) + + (cpu_to_le32(tab_desc->entry_size) * idx); + + return (struct uni_data_desc *)&unirom[offs]; +} + +static u8 * +qlcnic_get_bootld_offs(struct qlcnic_adapter *adapter) +{ + u32 offs = QLCNIC_BOOTLD_START; + + if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE) + offs = cpu_to_le32((qlcnic_get_data_desc(adapter, + QLCNIC_UNI_DIR_SECT_BOOTLD, + QLCNIC_UNI_BOOTLD_IDX_OFF))->findex); + + return (u8 *)&adapter->fw->data[offs]; +} + +static u8 * +qlcnic_get_fw_offs(struct qlcnic_adapter *adapter) +{ + u32 offs = QLCNIC_IMAGE_START; + + if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE) + offs = cpu_to_le32((qlcnic_get_data_desc(adapter, + QLCNIC_UNI_DIR_SECT_FW, + QLCNIC_UNI_FIRMWARE_IDX_OFF))->findex); + + return (u8 *)&adapter->fw->data[offs]; +} + +static __le32 +qlcnic_get_fw_size(struct qlcnic_adapter *adapter) +{ + if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE) + return cpu_to_le32((qlcnic_get_data_desc(adapter, + QLCNIC_UNI_DIR_SECT_FW, + QLCNIC_UNI_FIRMWARE_IDX_OFF))->size); + else + return cpu_to_le32( + *(u32 *)&adapter->fw->data[QLCNIC_FW_SIZE_OFFSET]); +} + +static __le32 +qlcnic_get_fw_version(struct qlcnic_adapter *adapter) +{ + struct uni_data_desc *fw_data_desc; + const struct firmware *fw = adapter->fw; + __le32 major, minor, sub; + const u8 *ver_str; + int i, ret; + + if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE) + return cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_VERSION_OFFSET]); + + fw_data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW, + QLCNIC_UNI_FIRMWARE_IDX_OFF); + ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) + + cpu_to_le32(fw_data_desc->size) - 17; + + for (i = 0; i < 12; i++) { + if (!strncmp(&ver_str[i], "REV=", 4)) { + ret = sscanf(&ver_str[i+4], "%u.%u.%u ", + &major, &minor, &sub); + if (ret != 3) + return 0; + else + return major + (minor << 8) + (sub << 16); + } + } + + return 0; +} + +static __le32 +qlcnic_get_bios_version(struct qlcnic_adapter *adapter) +{ + const struct firmware *fw = adapter->fw; + __le32 bios_ver, prd_off = adapter->file_prd_off; + + if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE) + return cpu_to_le32( + *(u32 *)&fw->data[QLCNIC_BIOS_VERSION_OFFSET]); + + bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off]) + + QLCNIC_UNI_BIOS_VERSION_OFF)); + + return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24); +} + +static void qlcnic_rom_lock_recovery(struct qlcnic_adapter *adapter) +{ + if (qlcnic_pcie_sem_lock(adapter, 2, QLCNIC_ROM_LOCK_ID)) + dev_info(&adapter->pdev->dev, "Resetting rom_lock\n"); + + qlcnic_pcie_sem_unlock(adapter, 2); +} + +static int +qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter) +{ + u32 heartbeat, ret = -EIO; + int retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT; + + adapter->heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER); + + do { + msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS); + heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER); + if (heartbeat != adapter->heartbeat) { + ret = QLCNIC_RCODE_SUCCESS; + break; + } + } while (--retries); + + return ret; +} + +int +qlcnic_need_fw_reset(struct qlcnic_adapter *adapter) +{ + if ((adapter->flags & QLCNIC_FW_HANG) || + qlcnic_check_fw_hearbeat(adapter)) { + qlcnic_rom_lock_recovery(adapter); + return 1; + } + + if (adapter->need_fw_reset) + return 1; + + if (adapter->fw) + return 1; + + return 0; +} + +static const char *fw_name[] = { + QLCNIC_UNIFIED_ROMIMAGE_NAME, + QLCNIC_FLASH_ROMIMAGE_NAME, +}; + +int +qlcnic_load_firmware(struct qlcnic_adapter *adapter) +{ + u64 *ptr64; + u32 i, flashaddr, size; + const struct firmware *fw = adapter->fw; + struct pci_dev *pdev = adapter->pdev; + + dev_info(&pdev->dev, "loading firmware from %s\n", + fw_name[adapter->fw_type]); + + if (fw) { + __le64 data; + + size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8; + + ptr64 = (u64 *)qlcnic_get_bootld_offs(adapter); + flashaddr = QLCNIC_BOOTLD_START; + + for (i = 0; i < size; i++) { + data = cpu_to_le64(ptr64[i]); + + if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data)) + return -EIO; + + flashaddr += 8; + } + + size = (__force u32)qlcnic_get_fw_size(adapter) / 8; + + ptr64 = (u64 *)qlcnic_get_fw_offs(adapter); + flashaddr = QLCNIC_IMAGE_START; + + for (i = 0; i < size; i++) { + data = cpu_to_le64(ptr64[i]); + + if (qlcnic_pci_mem_write_2M(adapter, + flashaddr, data)) + return -EIO; + + flashaddr += 8; + } + + size = (__force u32)qlcnic_get_fw_size(adapter) % 8; + if (size) { + data = cpu_to_le64(ptr64[i]); + + if (qlcnic_pci_mem_write_2M(adapter, + flashaddr, data)) + return -EIO; + } + + } else { + u64 data; + u32 hi, lo; + int ret; + struct qlcnic_flt_entry bootld_entry; + + ret = qlcnic_get_flt_entry(adapter, QLCNIC_BOOTLD_REGION, + &bootld_entry); + if (!ret) { + size = bootld_entry.size / 8; + flashaddr = bootld_entry.start_addr; + } else { + size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8; + flashaddr = QLCNIC_BOOTLD_START; + dev_info(&pdev->dev, + "using legacy method to get flash fw region"); + } + + for (i = 0; i < size; i++) { + if (qlcnic_rom_fast_read(adapter, + flashaddr, (int *)&lo) != 0) + return -EIO; + if (qlcnic_rom_fast_read(adapter, + flashaddr + 4, (int *)&hi) != 0) + return -EIO; + + data = (((u64)hi << 32) | lo); + + if (qlcnic_pci_mem_write_2M(adapter, + flashaddr, data)) + return -EIO; + + flashaddr += 8; + } + } + msleep(1); + + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x18, 0x1020); + QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0x80001e); + return 0; +} + +static int +qlcnic_validate_firmware(struct qlcnic_adapter *adapter) +{ + __le32 val; + u32 ver, bios, min_size; + struct pci_dev *pdev = adapter->pdev; + const struct firmware *fw = adapter->fw; + u8 fw_type = adapter->fw_type; + + if (fw_type == QLCNIC_UNIFIED_ROMIMAGE) { + if (qlcnic_validate_unified_romimage(adapter)) + return -EINVAL; + + min_size = QLCNIC_UNI_FW_MIN_SIZE; + } else { + val = cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_MAGIC_OFFSET]); + if ((__force u32)val != QLCNIC_BDINFO_MAGIC) + return -EINVAL; + + min_size = QLCNIC_FW_MIN_SIZE; + } + + if (fw->size < min_size) + return -EINVAL; + + val = qlcnic_get_fw_version(adapter); + ver = QLCNIC_DECODE_VERSION(val); + + if (ver < QLCNIC_MIN_FW_VERSION) { + dev_err(&pdev->dev, + "%s: firmware version %d.%d.%d unsupported\n", + fw_name[fw_type], _major(ver), _minor(ver), _build(ver)); + return -EINVAL; + } + + val = qlcnic_get_bios_version(adapter); + qlcnic_rom_fast_read(adapter, QLCNIC_BIOS_VERSION_OFFSET, (int *)&bios); + if ((__force u32)val != bios) { + dev_err(&pdev->dev, "%s: firmware bios is incompatible\n", + fw_name[fw_type]); + return -EINVAL; + } + + QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC); + return 0; +} + +static void +qlcnic_get_next_fwtype(struct qlcnic_adapter *adapter) +{ + u8 fw_type; + + switch (adapter->fw_type) { + case QLCNIC_UNKNOWN_ROMIMAGE: + fw_type = QLCNIC_UNIFIED_ROMIMAGE; + break; + + case QLCNIC_UNIFIED_ROMIMAGE: + default: + fw_type = QLCNIC_FLASH_ROMIMAGE; + break; + } + + adapter->fw_type = fw_type; +} + + + +void qlcnic_request_firmware(struct qlcnic_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int rc; + + adapter->fw_type = QLCNIC_UNKNOWN_ROMIMAGE; + +next: + qlcnic_get_next_fwtype(adapter); + + if (adapter->fw_type == QLCNIC_FLASH_ROMIMAGE) { + adapter->fw = NULL; + } else { + rc = request_firmware(&adapter->fw, + fw_name[adapter->fw_type], &pdev->dev); + if (rc != 0) + goto next; + + rc = qlcnic_validate_firmware(adapter); + if (rc != 0) { + release_firmware(adapter->fw); + msleep(1); + goto next; + } + } +} + + +void +qlcnic_release_firmware(struct qlcnic_adapter *adapter) +{ + if (adapter->fw) + release_firmware(adapter->fw); + adapter->fw = NULL; +} + +static void +qlcnic_handle_linkevent(struct qlcnic_adapter *adapter, + struct qlcnic_fw_msg *msg) +{ + u32 cable_OUI; + u16 cable_len; + u16 link_speed; + u8 link_status, module, duplex, autoneg; + u8 lb_status = 0; + struct net_device *netdev = adapter->netdev; + + adapter->has_link_events = 1; + + cable_OUI = msg->body[1] & 0xffffffff; + cable_len = (msg->body[1] >> 32) & 0xffff; + link_speed = (msg->body[1] >> 48) & 0xffff; + + link_status = msg->body[2] & 0xff; + duplex = (msg->body[2] >> 16) & 0xff; + autoneg = (msg->body[2] >> 24) & 0xff; + lb_status = (msg->body[2] >> 32) & 0x3; + + module = (msg->body[2] >> 8) & 0xff; + if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE) + dev_info(&netdev->dev, "unsupported cable: OUI 0x%x, " + "length %d\n", cable_OUI, cable_len); + else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN) + dev_info(&netdev->dev, "unsupported cable length %d\n", + cable_len); + + if (!link_status && (lb_status == QLCNIC_ILB_MODE || + lb_status == QLCNIC_ELB_MODE)) + adapter->ahw->loopback_state |= QLCNIC_LINKEVENT; + + qlcnic_advert_link_change(adapter, link_status); + + if (duplex == LINKEVENT_FULL_DUPLEX) + adapter->link_duplex = DUPLEX_FULL; + else + adapter->link_duplex = DUPLEX_HALF; + + adapter->module_type = module; + adapter->link_autoneg = autoneg; + adapter->link_speed = link_speed; +} + +static void +qlcnic_handle_fw_message(int desc_cnt, int index, + struct qlcnic_host_sds_ring *sds_ring) +{ + struct qlcnic_fw_msg msg; + struct status_desc *desc; + struct qlcnic_adapter *adapter; + struct device *dev; + int i = 0, opcode, ret; + + while (desc_cnt > 0 && i < 8) { + desc = &sds_ring->desc_head[index]; + msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]); + msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]); + + index = get_next_index(index, sds_ring->num_desc); + desc_cnt--; + } + + adapter = sds_ring->adapter; + dev = &adapter->pdev->dev; + opcode = qlcnic_get_nic_msg_opcode(msg.body[0]); + + switch (opcode) { + case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE: + qlcnic_handle_linkevent(adapter, &msg); + break; + case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK: + ret = (u32)(msg.body[1]); + switch (ret) { + case 0: + adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE; + break; + case 1: + dev_info(dev, "loopback already in progress\n"); + adapter->diag_cnt = -QLCNIC_TEST_IN_PROGRESS; + break; + case 2: + dev_info(dev, "loopback cable is not connected\n"); + adapter->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN; + break; + default: + dev_info(dev, "loopback configure request failed," + " ret %x\n", ret); + adapter->diag_cnt = -QLCNIC_UNDEFINED_ERROR; + break; + } + break; + default: + break; + } +} + +static int +qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter, + struct qlcnic_host_rds_ring *rds_ring, + struct qlcnic_rx_buffer *buffer) +{ + struct sk_buff *skb; + dma_addr_t dma; + struct pci_dev *pdev = adapter->pdev; + + skb = dev_alloc_skb(rds_ring->skb_size); + if (!skb) { + adapter->stats.skb_alloc_failure++; + return -ENOMEM; + } + + skb_reserve(skb, NET_IP_ALIGN); + + dma = pci_map_single(pdev, skb->data, + rds_ring->dma_size, PCI_DMA_FROMDEVICE); + + if (pci_dma_mapping_error(pdev, dma)) { + adapter->stats.rx_dma_map_error++; + dev_kfree_skb_any(skb); + return -ENOMEM; + } + + buffer->skb = skb; + buffer->dma = dma; + + return 0; +} + +static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter, + struct qlcnic_host_rds_ring *rds_ring, u16 index, u16 cksum) +{ + struct qlcnic_rx_buffer *buffer; + struct sk_buff *skb; + + buffer = &rds_ring->rx_buf_arr[index]; + + if (unlikely(buffer->skb == NULL)) { + WARN_ON(1); + return NULL; + } + + pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size, + PCI_DMA_FROMDEVICE); + + skb = buffer->skb; + + if (likely((adapter->netdev->features & NETIF_F_RXCSUM) && + (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) { + adapter->stats.csummed++; + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + skb_checksum_none_assert(skb); + } + + skb->dev = adapter->netdev; + + buffer->skb = NULL; + + return skb; +} + +static inline int +qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, struct sk_buff *skb, + u16 *vlan_tag) +{ + struct ethhdr *eth_hdr; + + if (!__vlan_get_tag(skb, vlan_tag)) { + eth_hdr = (struct ethhdr *) skb->data; + memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2); + skb_pull(skb, VLAN_HLEN); + } + if (!adapter->pvid) + return 0; + + if (*vlan_tag == adapter->pvid) { + /* Outer vlan tag. Packet should follow non-vlan path */ + *vlan_tag = 0xffff; + return 0; + } + if (adapter->flags & QLCNIC_TAGGING_ENABLED) + return 0; + + return -EINVAL; +} + +static struct qlcnic_rx_buffer * +qlcnic_process_rcv(struct qlcnic_adapter *adapter, + struct qlcnic_host_sds_ring *sds_ring, + int ring, u64 sts_data0) +{ + struct net_device *netdev = adapter->netdev; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_rx_buffer *buffer; + struct sk_buff *skb; + struct qlcnic_host_rds_ring *rds_ring; + int index, length, cksum, pkt_offset; + u16 vid = 0xffff; + + if (unlikely(ring >= adapter->max_rds_rings)) + return NULL; + + rds_ring = &recv_ctx->rds_rings[ring]; + + index = qlcnic_get_sts_refhandle(sts_data0); + if (unlikely(index >= rds_ring->num_desc)) + return NULL; + + buffer = &rds_ring->rx_buf_arr[index]; + + length = qlcnic_get_sts_totallength(sts_data0); + cksum = qlcnic_get_sts_status(sts_data0); + pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0); + + skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum); + if (!skb) + return buffer; + + if (length > rds_ring->skb_size) + skb_put(skb, rds_ring->skb_size); + else + skb_put(skb, length); + + if (pkt_offset) + skb_pull(skb, pkt_offset); + + if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) { + adapter->stats.rxdropped++; + dev_kfree_skb(skb); + return buffer; + } + + skb->protocol = eth_type_trans(skb, netdev); + + if (vid != 0xffff) + __vlan_hwaccel_put_tag(skb, vid); + + napi_gro_receive(&sds_ring->napi, skb); + + adapter->stats.rx_pkts++; + adapter->stats.rxbytes += length; + + return buffer; +} + +#define QLC_TCP_HDR_SIZE 20 +#define QLC_TCP_TS_OPTION_SIZE 12 +#define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE) + +static struct qlcnic_rx_buffer * +qlcnic_process_lro(struct qlcnic_adapter *adapter, + struct qlcnic_host_sds_ring *sds_ring, + int ring, u64 sts_data0, u64 sts_data1) +{ + struct net_device *netdev = adapter->netdev; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_rx_buffer *buffer; + struct sk_buff *skb; + struct qlcnic_host_rds_ring *rds_ring; + struct iphdr *iph; + struct tcphdr *th; + bool push, timestamp; + int l2_hdr_offset, l4_hdr_offset; + int index; + u16 lro_length, length, data_offset; + u32 seq_number; + u16 vid = 0xffff; + + if (unlikely(ring > adapter->max_rds_rings)) + return NULL; + + rds_ring = &recv_ctx->rds_rings[ring]; + + index = qlcnic_get_lro_sts_refhandle(sts_data0); + if (unlikely(index > rds_ring->num_desc)) + return NULL; + + buffer = &rds_ring->rx_buf_arr[index]; + + timestamp = qlcnic_get_lro_sts_timestamp(sts_data0); + lro_length = qlcnic_get_lro_sts_length(sts_data0); + l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0); + l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0); + push = qlcnic_get_lro_sts_push_flag(sts_data0); + seq_number = qlcnic_get_lro_sts_seq_number(sts_data1); + + skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK); + if (!skb) + return buffer; + + if (timestamp) + data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE; + else + data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE; + + skb_put(skb, lro_length + data_offset); + + skb_pull(skb, l2_hdr_offset); + + if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) { + adapter->stats.rxdropped++; + dev_kfree_skb(skb); + return buffer; + } + + skb->protocol = eth_type_trans(skb, netdev); + + iph = (struct iphdr *)skb->data; + th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); + + length = (iph->ihl << 2) + (th->doff << 2) + lro_length; + iph->tot_len = htons(length); + iph->check = 0; + iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); + th->psh = push; + th->seq = htonl(seq_number); + + length = skb->len; + + if (vid != 0xffff) + __vlan_hwaccel_put_tag(skb, vid); + netif_receive_skb(skb); + + adapter->stats.lro_pkts++; + adapter->stats.lrobytes += length; + + return buffer; +} + +int +qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max) +{ + struct qlcnic_adapter *adapter = sds_ring->adapter; + struct list_head *cur; + struct status_desc *desc; + struct qlcnic_rx_buffer *rxbuf; + u64 sts_data0, sts_data1; + + int count = 0; + int opcode, ring, desc_cnt; + u32 consumer = sds_ring->consumer; + + while (count < max) { + desc = &sds_ring->desc_head[consumer]; + sts_data0 = le64_to_cpu(desc->status_desc_data[0]); + + if (!(sts_data0 & STATUS_OWNER_HOST)) + break; + + desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0); + opcode = qlcnic_get_sts_opcode(sts_data0); + + switch (opcode) { + case QLCNIC_RXPKT_DESC: + case QLCNIC_OLD_RXPKT_DESC: + case QLCNIC_SYN_OFFLOAD: + ring = qlcnic_get_sts_type(sts_data0); + rxbuf = qlcnic_process_rcv(adapter, sds_ring, + ring, sts_data0); + break; + case QLCNIC_LRO_DESC: + ring = qlcnic_get_lro_sts_type(sts_data0); + sts_data1 = le64_to_cpu(desc->status_desc_data[1]); + rxbuf = qlcnic_process_lro(adapter, sds_ring, + ring, sts_data0, sts_data1); + break; + case QLCNIC_RESPONSE_DESC: + qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring); + default: + goto skip; + } + + WARN_ON(desc_cnt > 1); + + if (likely(rxbuf)) + list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]); + else + adapter->stats.null_rxbuf++; + +skip: + for (; desc_cnt > 0; desc_cnt--) { + desc = &sds_ring->desc_head[consumer]; + desc->status_desc_data[0] = + cpu_to_le64(STATUS_OWNER_PHANTOM); + consumer = get_next_index(consumer, sds_ring->num_desc); + } + count++; + } + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + struct qlcnic_host_rds_ring *rds_ring = + &adapter->recv_ctx->rds_rings[ring]; + + if (!list_empty(&sds_ring->free_list[ring])) { + list_for_each(cur, &sds_ring->free_list[ring]) { + rxbuf = list_entry(cur, + struct qlcnic_rx_buffer, list); + qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf); + } + spin_lock(&rds_ring->lock); + list_splice_tail_init(&sds_ring->free_list[ring], + &rds_ring->free_list); + spin_unlock(&rds_ring->lock); + } + + qlcnic_post_rx_buffers_nodb(adapter, rds_ring); + } + + if (count) { + sds_ring->consumer = consumer; + writel(consumer, sds_ring->crb_sts_consumer); + } + + return count; +} + +void +qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, + struct qlcnic_host_rds_ring *rds_ring) +{ + struct rcv_desc *pdesc; + struct qlcnic_rx_buffer *buffer; + int count = 0; + u32 producer; + struct list_head *head; + + producer = rds_ring->producer; + + head = &rds_ring->free_list; + while (!list_empty(head)) { + + buffer = list_entry(head->next, struct qlcnic_rx_buffer, list); + + if (!buffer->skb) { + if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer)) + break; + } + + count++; + list_del(&buffer->list); + + /* make a rcv descriptor */ + pdesc = &rds_ring->desc_head[producer]; + pdesc->addr_buffer = cpu_to_le64(buffer->dma); + pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); + pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); + + producer = get_next_index(producer, rds_ring->num_desc); + } + + if (count) { + rds_ring->producer = producer; + writel((producer-1) & (rds_ring->num_desc-1), + rds_ring->crb_rcv_producer); + } +} + +static void +qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter, + struct qlcnic_host_rds_ring *rds_ring) +{ + struct rcv_desc *pdesc; + struct qlcnic_rx_buffer *buffer; + int count = 0; + uint32_t producer; + struct list_head *head; + + if (!spin_trylock(&rds_ring->lock)) + return; + + producer = rds_ring->producer; + + head = &rds_ring->free_list; + while (!list_empty(head)) { + + buffer = list_entry(head->next, struct qlcnic_rx_buffer, list); + + if (!buffer->skb) { + if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer)) + break; + } + + count++; + list_del(&buffer->list); + + /* make a rcv descriptor */ + pdesc = &rds_ring->desc_head[producer]; + pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); + pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); + pdesc->addr_buffer = cpu_to_le64(buffer->dma); + + producer = get_next_index(producer, rds_ring->num_desc); + } + + if (count) { + rds_ring->producer = producer; + writel((producer - 1) & (rds_ring->num_desc - 1), + rds_ring->crb_rcv_producer); + } + spin_unlock(&rds_ring->lock); +} + +static void dump_skb(struct sk_buff *skb) +{ + int i; + unsigned char *data = skb->data; + + printk(KERN_INFO "\n"); + for (i = 0; i < skb->len; i++) { + printk(KERN_INFO "%02x ", data[i]); + if ((i & 0x0f) == 8) + printk(KERN_INFO "\n"); + } +} + +void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, + struct qlcnic_host_sds_ring *sds_ring, + int ring, u64 sts_data0) +{ + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct sk_buff *skb; + struct qlcnic_host_rds_ring *rds_ring; + int index, length, cksum, pkt_offset; + + if (unlikely(ring >= adapter->max_rds_rings)) + return; + + rds_ring = &recv_ctx->rds_rings[ring]; + + index = qlcnic_get_sts_refhandle(sts_data0); + length = qlcnic_get_sts_totallength(sts_data0); + if (unlikely(index >= rds_ring->num_desc)) + return; + + cksum = qlcnic_get_sts_status(sts_data0); + pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0); + + skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum); + if (!skb) + return; + + if (length > rds_ring->skb_size) + skb_put(skb, rds_ring->skb_size); + else + skb_put(skb, length); + + if (pkt_offset) + skb_pull(skb, pkt_offset); + + if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr)) + adapter->diag_cnt++; + else + dump_skb(skb); + + dev_kfree_skb_any(skb); + adapter->stats.rx_pkts++; + adapter->stats.rxbytes += length; + + return; +} + +void +qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring) +{ + struct qlcnic_adapter *adapter = sds_ring->adapter; + struct status_desc *desc; + u64 sts_data0; + int ring, opcode, desc_cnt; + + u32 consumer = sds_ring->consumer; + + desc = &sds_ring->desc_head[consumer]; + sts_data0 = le64_to_cpu(desc->status_desc_data[0]); + + if (!(sts_data0 & STATUS_OWNER_HOST)) + return; + + desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0); + opcode = qlcnic_get_sts_opcode(sts_data0); + switch (opcode) { + case QLCNIC_RESPONSE_DESC: + qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring); + break; + default: + ring = qlcnic_get_sts_type(sts_data0); + qlcnic_process_rcv_diag(adapter, sds_ring, ring, sts_data0); + break; + } + + for (; desc_cnt > 0; desc_cnt--) { + desc = &sds_ring->desc_head[consumer]; + desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM); + consumer = get_next_index(consumer, sds_ring->num_desc); + } + + sds_ring->consumer = consumer; + writel(consumer, sds_ring->crb_sts_consumer); +} + +void +qlcnic_fetch_mac(struct qlcnic_adapter *adapter, u32 off1, u32 off2, + u8 alt_mac, u8 *mac) +{ + u32 mac_low, mac_high; + int i; + + mac_low = QLCRD32(adapter, off1); + mac_high = QLCRD32(adapter, off2); + + if (alt_mac) { + mac_low |= (mac_low >> 16) | (mac_high << 16); + mac_high >>= 16; + } + + for (i = 0; i < 2; i++) + mac[i] = (u8)(mac_high >> ((1 - i) * 8)); + for (i = 2; i < 6; i++) + mac[i] = (u8)(mac_low >> ((5 - i) * 8)); +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c new file mode 100644 index 0000000..ec8ef72 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -0,0 +1,4390 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2010 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#include +#include +#include + +#include "qlcnic.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(QLCNIC_LINUX_VERSIONID); +MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME); + +char qlcnic_driver_name[] = "qlcnic"; +static const char qlcnic_driver_string[] = "QLogic 1/10 GbE " + "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID; + +static struct workqueue_struct *qlcnic_wq; +static int qlcnic_mac_learn; +module_param(qlcnic_mac_learn, int, 0444); +MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)"); + +static int use_msi = 1; +module_param(use_msi, int, 0444); +MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled"); + +static int use_msi_x = 1; +module_param(use_msi_x, int, 0444); +MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled"); + +static int auto_fw_reset = 1; +module_param(auto_fw_reset, int, 0644); +MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled"); + +static int load_fw_file; +module_param(load_fw_file, int, 0444); +MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file"); + +static int qlcnic_config_npars; +module_param(qlcnic_config_npars, int, 0444); +MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled"); + +static int __devinit qlcnic_probe(struct pci_dev *pdev, + const struct pci_device_id *ent); +static void __devexit qlcnic_remove(struct pci_dev *pdev); +static int qlcnic_open(struct net_device *netdev); +static int qlcnic_close(struct net_device *netdev); +static void qlcnic_tx_timeout(struct net_device *netdev); +static void qlcnic_attach_work(struct work_struct *work); +static void qlcnic_fwinit_work(struct work_struct *work); +static void qlcnic_fw_poll_work(struct work_struct *work); +static void qlcnic_schedule_work(struct qlcnic_adapter *adapter, + work_func_t func, int delay); +static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter); +static int qlcnic_poll(struct napi_struct *napi, int budget); +static int qlcnic_rx_poll(struct napi_struct *napi, int budget); +#ifdef CONFIG_NET_POLL_CONTROLLER +static void qlcnic_poll_controller(struct net_device *netdev); +#endif + +static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter); +static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter); +static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter); +static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter); + +static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding); +static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8); +static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter); + +static irqreturn_t qlcnic_tmp_intr(int irq, void *data); +static irqreturn_t qlcnic_intr(int irq, void *data); +static irqreturn_t qlcnic_msi_intr(int irq, void *data); +static irqreturn_t qlcnic_msix_intr(int irq, void *data); + +static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev); +static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long); +static int qlcnic_start_firmware(struct qlcnic_adapter *); + +static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter); +static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *); +static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32); +static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32); +static int qlcnicvf_start_firmware(struct qlcnic_adapter *); +static void qlcnic_set_netdev_features(struct qlcnic_adapter *, + struct qlcnic_esw_func_cfg *); +static void qlcnic_vlan_rx_add(struct net_device *, u16); +static void qlcnic_vlan_rx_del(struct net_device *, u16); + +/* PCI Device ID Table */ +#define ENTRY(device) \ + {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \ + .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0} + +#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020 + +static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = { + ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X), + {0,} +}; + +MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl); + + +inline void +qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx_ring) +{ + writel(tx_ring->producer, tx_ring->crb_cmd_producer); +} + +static const u32 msi_tgt_status[8] = { + ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1, + ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3, + ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5, + ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7 +}; + +static const +struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG; + +static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring) +{ + writel(0, sds_ring->crb_intr_mask); +} + +static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring) +{ + struct qlcnic_adapter *adapter = sds_ring->adapter; + + writel(0x1, sds_ring->crb_intr_mask); + + if (!QLCNIC_IS_MSI_FAMILY(adapter)) + writel(0xfbff, adapter->tgt_mask_reg); +} + +static int +qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count) +{ + int size = sizeof(struct qlcnic_host_sds_ring) * count; + + recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL); + + return recv_ctx->sds_rings == NULL; +} + +static void +qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx) +{ + if (recv_ctx->sds_rings != NULL) + kfree(recv_ctx->sds_rings); + + recv_ctx->sds_rings = NULL; +} + +static int +qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev) +{ + int ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) + return -ENOMEM; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + + if (ring == adapter->max_sds_rings - 1) + netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll, + QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings); + else + netif_napi_add(netdev, &sds_ring->napi, + qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2); + } + + return 0; +} + +static void +qlcnic_napi_del(struct qlcnic_adapter *adapter) +{ + int ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + netif_napi_del(&sds_ring->napi); + } + + qlcnic_free_sds_rings(adapter->recv_ctx); +} + +static void +qlcnic_napi_enable(struct qlcnic_adapter *adapter) +{ + int ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + return; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + napi_enable(&sds_ring->napi); + qlcnic_enable_int(sds_ring); + } +} + +static void +qlcnic_napi_disable(struct qlcnic_adapter *adapter) +{ + int ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + return; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + qlcnic_disable_int(sds_ring); + napi_synchronize(&sds_ring->napi); + napi_disable(&sds_ring->napi); + } +} + +static void qlcnic_clear_stats(struct qlcnic_adapter *adapter) +{ + memset(&adapter->stats, 0, sizeof(adapter->stats)); +} + +static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable) +{ + u32 control; + int pos; + + pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); + if (pos) { + pci_read_config_dword(pdev, pos, &control); + if (enable) + control |= PCI_MSIX_FLAGS_ENABLE; + else + control = 0; + pci_write_config_dword(pdev, pos, control); + } +} + +static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count) +{ + int i; + + for (i = 0; i < count; i++) + adapter->msix_entries[i].entry = i; +} + +static int +qlcnic_read_mac_addr(struct qlcnic_adapter *adapter) +{ + u8 mac_addr[ETH_ALEN]; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + + if (qlcnic_get_mac_address(adapter, mac_addr) != 0) + return -EIO; + + memcpy(netdev->dev_addr, mac_addr, ETH_ALEN); + memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); + memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len); + + /* set station address */ + + if (!is_valid_ether_addr(netdev->perm_addr)) + dev_warn(&pdev->dev, "Bad MAC address %pM.\n", + netdev->dev_addr); + + return 0; +} + +static int qlcnic_set_mac(struct net_device *netdev, void *p) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + + if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED)) + return -EOPNOTSUPP; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { + netif_device_detach(netdev); + qlcnic_napi_disable(adapter); + } + + memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len); + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + qlcnic_set_multi(adapter->netdev); + + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { + netif_device_attach(netdev); + qlcnic_napi_enable(adapter); + } + return 0; +} + +static const struct net_device_ops qlcnic_netdev_ops = { + .ndo_open = qlcnic_open, + .ndo_stop = qlcnic_close, + .ndo_start_xmit = qlcnic_xmit_frame, + .ndo_get_stats = qlcnic_get_stats, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_multicast_list = qlcnic_set_multi, + .ndo_set_mac_address = qlcnic_set_mac, + .ndo_change_mtu = qlcnic_change_mtu, + .ndo_fix_features = qlcnic_fix_features, + .ndo_set_features = qlcnic_set_features, + .ndo_tx_timeout = qlcnic_tx_timeout, + .ndo_vlan_rx_add_vid = qlcnic_vlan_rx_add, + .ndo_vlan_rx_kill_vid = qlcnic_vlan_rx_del, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = qlcnic_poll_controller, +#endif +}; + +static struct qlcnic_nic_template qlcnic_ops = { + .config_bridged_mode = qlcnic_config_bridged_mode, + .config_led = qlcnic_config_led, + .start_firmware = qlcnic_start_firmware +}; + +static struct qlcnic_nic_template qlcnic_vf_ops = { + .config_bridged_mode = qlcnicvf_config_bridged_mode, + .config_led = qlcnicvf_config_led, + .start_firmware = qlcnicvf_start_firmware +}; + +static int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix) +{ + struct pci_dev *pdev = adapter->pdev; + int err = -1; + + adapter->max_sds_rings = 1; + adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED); + qlcnic_set_msix_bit(pdev, 0); + + if (adapter->msix_supported) { + enable_msix: + qlcnic_init_msix_entries(adapter, num_msix); + err = pci_enable_msix(pdev, adapter->msix_entries, num_msix); + if (err == 0) { + adapter->flags |= QLCNIC_MSIX_ENABLED; + qlcnic_set_msix_bit(pdev, 1); + + adapter->max_sds_rings = num_msix; + + dev_info(&pdev->dev, "using msi-x interrupts\n"); + return err; + } + if (err > 0) { + num_msix = rounddown_pow_of_two(err); + if (num_msix) + goto enable_msix; + } + } + return err; +} + + +static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter) +{ + const struct qlcnic_legacy_intr_set *legacy_intrp; + struct pci_dev *pdev = adapter->pdev; + + if (use_msi && !pci_enable_msi(pdev)) { + adapter->flags |= QLCNIC_MSI_ENABLED; + adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter, + msi_tgt_status[adapter->ahw->pci_func]); + dev_info(&pdev->dev, "using msi interrupts\n"); + adapter->msix_entries[0].vector = pdev->irq; + return; + } + + legacy_intrp = &legacy_intr[adapter->ahw->pci_func]; + + adapter->int_vec_bit = legacy_intrp->int_vec_bit; + adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter, + legacy_intrp->tgt_status_reg); + adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter, + legacy_intrp->tgt_mask_reg); + adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR); + + adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter, + ISR_INT_STATE_REG); + dev_info(&pdev->dev, "using legacy interrupts\n"); + adapter->msix_entries[0].vector = pdev->irq; +} + +static void +qlcnic_setup_intr(struct qlcnic_adapter *adapter) +{ + int num_msix; + + if (adapter->msix_supported) { + num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(), + QLCNIC_DEF_NUM_STS_DESC_RINGS)); + } else + num_msix = 1; + + if (!qlcnic_enable_msix(adapter, num_msix)) + return; + + qlcnic_enable_msi_legacy(adapter); +} + +static void +qlcnic_teardown_intr(struct qlcnic_adapter *adapter) +{ + if (adapter->flags & QLCNIC_MSIX_ENABLED) + pci_disable_msix(adapter->pdev); + if (adapter->flags & QLCNIC_MSI_ENABLED) + pci_disable_msi(adapter->pdev); +} + +static void +qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter) +{ + if (adapter->ahw->pci_base0 != NULL) + iounmap(adapter->ahw->pci_base0); +} + +static int +qlcnic_init_pci_info(struct qlcnic_adapter *adapter) +{ + struct qlcnic_pci_info *pci_info; + int i, ret = 0; + u8 pfn; + + pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL); + if (!pci_info) + return -ENOMEM; + + adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) * + QLCNIC_MAX_PCI_FUNC, GFP_KERNEL); + if (!adapter->npars) { + ret = -ENOMEM; + goto err_pci_info; + } + + adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) * + QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL); + if (!adapter->eswitch) { + ret = -ENOMEM; + goto err_npars; + } + + ret = qlcnic_get_pci_info(adapter, pci_info); + if (ret) + goto err_eswitch; + + for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { + pfn = pci_info[i].id; + if (pfn > QLCNIC_MAX_PCI_FUNC) { + ret = QL_STATUS_INVALID_PARAM; + goto err_eswitch; + } + adapter->npars[pfn].active = (u8)pci_info[i].active; + adapter->npars[pfn].type = (u8)pci_info[i].type; + adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port; + adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw; + adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw; + } + + for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++) + adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE; + + kfree(pci_info); + return 0; + +err_eswitch: + kfree(adapter->eswitch); + adapter->eswitch = NULL; +err_npars: + kfree(adapter->npars); + adapter->npars = NULL; +err_pci_info: + kfree(pci_info); + + return ret; +} + +static int +qlcnic_set_function_modes(struct qlcnic_adapter *adapter) +{ + u8 id; + u32 ref_count; + int i, ret = 1; + u32 data = QLCNIC_MGMT_FUNC; + void __iomem *priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE; + + /* If other drivers are not in use set their privilege level */ + ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE); + ret = qlcnic_api_lock(adapter); + if (ret) + goto err_lock; + + if (qlcnic_config_npars) { + for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { + id = i; + if (adapter->npars[i].type != QLCNIC_TYPE_NIC || + id == adapter->ahw->pci_func) + continue; + data |= (qlcnic_config_npars & + QLC_DEV_SET_DRV(0xf, id)); + } + } else { + data = readl(priv_op); + data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw->pci_func)) | + (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC, + adapter->ahw->pci_func)); + } + writel(data, priv_op); + qlcnic_api_unlock(adapter); +err_lock: + return ret; +} + +static void +qlcnic_check_vf(struct qlcnic_adapter *adapter) +{ + void __iomem *msix_base_addr; + void __iomem *priv_op; + u32 func; + u32 msix_base; + u32 op_mode, priv_level; + + /* Determine FW API version */ + adapter->fw_hal_version = readl(adapter->ahw->pci_base0 + + QLCNIC_FW_API); + + /* Find PCI function number */ + pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func); + msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE; + msix_base = readl(msix_base_addr); + func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE; + adapter->ahw->pci_func = func; + + /* Determine function privilege level */ + priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE; + op_mode = readl(priv_op); + if (op_mode == QLC_DEV_DRV_DEFAULT) + priv_level = QLCNIC_MGMT_FUNC; + else + priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func); + + if (priv_level == QLCNIC_NON_PRIV_FUNC) { + adapter->op_mode = QLCNIC_NON_PRIV_FUNC; + dev_info(&adapter->pdev->dev, + "HAL Version: %d Non Privileged function\n", + adapter->fw_hal_version); + adapter->nic_ops = &qlcnic_vf_ops; + } else + adapter->nic_ops = &qlcnic_ops; +} + +static int +qlcnic_setup_pci_map(struct qlcnic_adapter *adapter) +{ + void __iomem *mem_ptr0 = NULL; + resource_size_t mem_base; + unsigned long mem_len, pci_len0 = 0; + + struct pci_dev *pdev = adapter->pdev; + + /* remap phys address */ + mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ + mem_len = pci_resource_len(pdev, 0); + + if (mem_len == QLCNIC_PCI_2MB_SIZE) { + + mem_ptr0 = pci_ioremap_bar(pdev, 0); + if (mem_ptr0 == NULL) { + dev_err(&pdev->dev, "failed to map PCI bar 0\n"); + return -EIO; + } + pci_len0 = mem_len; + } else { + return -EIO; + } + + dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20)); + + adapter->ahw->pci_base0 = mem_ptr0; + adapter->ahw->pci_len0 = pci_len0; + + qlcnic_check_vf(adapter); + + adapter->ahw->ocm_win_crb = qlcnic_get_ioaddr(adapter, + QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG( + adapter->ahw->pci_func))); + + return 0; +} + +static void get_brd_name(struct qlcnic_adapter *adapter, char *name) +{ + struct pci_dev *pdev = adapter->pdev; + int i, found = 0; + + for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) { + if (qlcnic_boards[i].vendor == pdev->vendor && + qlcnic_boards[i].device == pdev->device && + qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor && + qlcnic_boards[i].sub_device == pdev->subsystem_device) { + sprintf(name, "%pM: %s" , + adapter->mac_addr, + qlcnic_boards[i].short_name); + found = 1; + break; + } + + } + + if (!found) + sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr); +} + +static void +qlcnic_check_options(struct qlcnic_adapter *adapter) +{ + u32 fw_major, fw_minor, fw_build, prev_fw_version; + struct pci_dev *pdev = adapter->pdev; + struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + + prev_fw_version = adapter->fw_version; + + fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR); + fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR); + fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB); + + adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build); + + if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC) { + if (fw_dump->tmpl_hdr == NULL || + adapter->fw_version > prev_fw_version) { + if (fw_dump->tmpl_hdr) + vfree(fw_dump->tmpl_hdr); + if (!qlcnic_fw_cmd_get_minidump_temp(adapter)) + dev_info(&pdev->dev, + "Supports FW dump capability\n"); + } + } + + dev_info(&pdev->dev, "firmware v%d.%d.%d\n", + fw_major, fw_minor, fw_build); + if (adapter->ahw->port_type == QLCNIC_XGBE) { + if (adapter->flags & QLCNIC_ESWITCH_ENABLED) { + adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF; + adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF; + } else { + adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G; + adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G; + } + + adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; + adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; + + } else if (adapter->ahw->port_type == QLCNIC_GBE) { + adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G; + adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; + adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; + adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G; + } + + adapter->msix_supported = !!use_msi_x; + + adapter->num_txd = MAX_CMD_DESCRIPTORS; + + adapter->max_rds_rings = MAX_RDS_RINGS; +} + +static int +qlcnic_initialize_nic(struct qlcnic_adapter *adapter) +{ + int err; + struct qlcnic_info nic_info; + + err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func); + if (err) + return err; + + adapter->physical_port = (u8)nic_info.phys_port; + adapter->switch_mode = nic_info.switch_mode; + adapter->max_tx_ques = nic_info.max_tx_ques; + adapter->max_rx_ques = nic_info.max_rx_ques; + adapter->capabilities = nic_info.capabilities; + adapter->max_mac_filters = nic_info.max_mac_filters; + adapter->max_mtu = nic_info.max_mtu; + + if (adapter->capabilities & BIT_6) + adapter->flags |= QLCNIC_ESWITCH_ENABLED; + else + adapter->flags &= ~QLCNIC_ESWITCH_ENABLED; + + return err; +} + +static void +qlcnic_set_vlan_config(struct qlcnic_adapter *adapter, + struct qlcnic_esw_func_cfg *esw_cfg) +{ + if (esw_cfg->discard_tagged) + adapter->flags &= ~QLCNIC_TAGGING_ENABLED; + else + adapter->flags |= QLCNIC_TAGGING_ENABLED; + + if (esw_cfg->vlan_id) + adapter->pvid = esw_cfg->vlan_id; + else + adapter->pvid = 0; +} + +static void +qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + set_bit(vid, adapter->vlans); +} + +static void +qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + qlcnic_restore_indev_addr(netdev, NETDEV_DOWN); + clear_bit(vid, adapter->vlans); +} + +static void +qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter, + struct qlcnic_esw_func_cfg *esw_cfg) +{ + adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED | + QLCNIC_PROMISC_DISABLED); + + if (esw_cfg->mac_anti_spoof) + adapter->flags |= QLCNIC_MACSPOOF; + + if (!esw_cfg->mac_override) + adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED; + + if (!esw_cfg->promisc_mode) + adapter->flags |= QLCNIC_PROMISC_DISABLED; + + qlcnic_set_netdev_features(adapter, esw_cfg); +} + +static int +qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter) +{ + struct qlcnic_esw_func_cfg esw_cfg; + + if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) + return 0; + + esw_cfg.pci_func = adapter->ahw->pci_func; + if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg)) + return -EIO; + qlcnic_set_vlan_config(adapter, &esw_cfg); + qlcnic_set_eswitch_port_features(adapter, &esw_cfg); + + return 0; +} + +static void +qlcnic_set_netdev_features(struct qlcnic_adapter *adapter, + struct qlcnic_esw_func_cfg *esw_cfg) +{ + struct net_device *netdev = adapter->netdev; + unsigned long features, vlan_features; + + features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_IPV6_CSUM | NETIF_F_GRO); + vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER); + + if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) { + features |= (NETIF_F_TSO | NETIF_F_TSO6); + vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6); + } + + if (netdev->features & NETIF_F_LRO) + features |= NETIF_F_LRO; + + if (esw_cfg->offload_flags & BIT_0) { + netdev->features |= features; + if (!(esw_cfg->offload_flags & BIT_1)) + netdev->features &= ~NETIF_F_TSO; + if (!(esw_cfg->offload_flags & BIT_2)) + netdev->features &= ~NETIF_F_TSO6; + } else { + netdev->features &= ~features; + } + + netdev->vlan_features = (features & vlan_features); +} + +static int +qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter) +{ + void __iomem *priv_op; + u32 op_mode, priv_level; + int err = 0; + + err = qlcnic_initialize_nic(adapter); + if (err) + return err; + + if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED) + return 0; + + priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE; + op_mode = readl(priv_op); + priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func); + + if (op_mode == QLC_DEV_DRV_DEFAULT) + priv_level = QLCNIC_MGMT_FUNC; + else + priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func); + + if (adapter->flags & QLCNIC_ESWITCH_ENABLED) { + if (priv_level == QLCNIC_MGMT_FUNC) { + adapter->op_mode = QLCNIC_MGMT_FUNC; + err = qlcnic_init_pci_info(adapter); + if (err) + return err; + /* Set privilege level for other functions */ + qlcnic_set_function_modes(adapter); + dev_info(&adapter->pdev->dev, + "HAL Version: %d, Management function\n", + adapter->fw_hal_version); + } else if (priv_level == QLCNIC_PRIV_FUNC) { + adapter->op_mode = QLCNIC_PRIV_FUNC; + dev_info(&adapter->pdev->dev, + "HAL Version: %d, Privileged function\n", + adapter->fw_hal_version); + } + } + + adapter->flags |= QLCNIC_ADAPTER_INITIALIZED; + + return err; +} + +static int +qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter) +{ + struct qlcnic_esw_func_cfg esw_cfg; + struct qlcnic_npar_info *npar; + u8 i; + + if (adapter->need_fw_reset) + return 0; + + for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { + if (adapter->npars[i].type != QLCNIC_TYPE_NIC) + continue; + memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg)); + esw_cfg.pci_func = i; + esw_cfg.offload_flags = BIT_0; + esw_cfg.mac_override = BIT_0; + esw_cfg.promisc_mode = BIT_0; + if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) + esw_cfg.offload_flags |= (BIT_1 | BIT_2); + if (qlcnic_config_switch_port(adapter, &esw_cfg)) + return -EIO; + npar = &adapter->npars[i]; + npar->pvid = esw_cfg.vlan_id; + npar->mac_override = esw_cfg.mac_override; + npar->mac_anti_spoof = esw_cfg.mac_anti_spoof; + npar->discard_tagged = esw_cfg.discard_tagged; + npar->promisc_mode = esw_cfg.promisc_mode; + npar->offload_flags = esw_cfg.offload_flags; + } + + return 0; +} + +static int +qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter, + struct qlcnic_npar_info *npar, int pci_func) +{ + struct qlcnic_esw_func_cfg esw_cfg; + esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS; + esw_cfg.pci_func = pci_func; + esw_cfg.vlan_id = npar->pvid; + esw_cfg.mac_override = npar->mac_override; + esw_cfg.discard_tagged = npar->discard_tagged; + esw_cfg.mac_anti_spoof = npar->mac_anti_spoof; + esw_cfg.offload_flags = npar->offload_flags; + esw_cfg.promisc_mode = npar->promisc_mode; + if (qlcnic_config_switch_port(adapter, &esw_cfg)) + return -EIO; + + esw_cfg.op_mode = QLCNIC_ADD_VLAN; + if (qlcnic_config_switch_port(adapter, &esw_cfg)) + return -EIO; + + return 0; +} + +static int +qlcnic_reset_npar_config(struct qlcnic_adapter *adapter) +{ + int i, err; + struct qlcnic_npar_info *npar; + struct qlcnic_info nic_info; + + if (!adapter->need_fw_reset) + return 0; + + /* Set the NPAR config data after FW reset */ + for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { + npar = &adapter->npars[i]; + if (npar->type != QLCNIC_TYPE_NIC) + continue; + err = qlcnic_get_nic_info(adapter, &nic_info, i); + if (err) + return err; + nic_info.min_tx_bw = npar->min_bw; + nic_info.max_tx_bw = npar->max_bw; + err = qlcnic_set_nic_info(adapter, &nic_info); + if (err) + return err; + + if (npar->enable_pm) { + err = qlcnic_config_port_mirroring(adapter, + npar->dest_npar, 1, i); + if (err) + return err; + } + err = qlcnic_reset_eswitch_config(adapter, npar, i); + if (err) + return err; + } + return 0; +} + +static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter) +{ + u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO; + u32 npar_state; + + if (adapter->op_mode == QLCNIC_MGMT_FUNC) + return 0; + + npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); + while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) { + msleep(1000); + npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); + } + if (!npar_opt_timeo) { + dev_err(&adapter->pdev->dev, + "Waiting for NPAR state to opertional timeout\n"); + return -EIO; + } + return 0; +} + +static int +qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter) +{ + int err; + + if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) || + adapter->op_mode != QLCNIC_MGMT_FUNC) + return 0; + + err = qlcnic_set_default_offload_settings(adapter); + if (err) + return err; + + err = qlcnic_reset_npar_config(adapter); + if (err) + return err; + + qlcnic_dev_set_npar_ready(adapter); + + return err; +} + +static int +qlcnic_start_firmware(struct qlcnic_adapter *adapter) +{ + int err; + + err = qlcnic_can_start_firmware(adapter); + if (err < 0) + return err; + else if (!err) + goto check_fw_status; + + if (load_fw_file) + qlcnic_request_firmware(adapter); + else { + err = qlcnic_check_flash_fw_ver(adapter); + if (err) + goto err_out; + + adapter->fw_type = QLCNIC_FLASH_ROMIMAGE; + } + + err = qlcnic_need_fw_reset(adapter); + if (err == 0) + goto check_fw_status; + + err = qlcnic_pinit_from_rom(adapter); + if (err) + goto err_out; + + err = qlcnic_load_firmware(adapter); + if (err) + goto err_out; + + qlcnic_release_firmware(adapter); + QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION); + +check_fw_status: + err = qlcnic_check_fw_status(adapter); + if (err) + goto err_out; + + QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY); + qlcnic_idc_debug_info(adapter, 1); + + err = qlcnic_check_eswitch_mode(adapter); + if (err) { + dev_err(&adapter->pdev->dev, + "Memory allocation failed for eswitch\n"); + goto err_out; + } + err = qlcnic_set_mgmt_operations(adapter); + if (err) + goto err_out; + + qlcnic_check_options(adapter); + adapter->need_fw_reset = 0; + + qlcnic_release_firmware(adapter); + return 0; + +err_out: + QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED); + dev_err(&adapter->pdev->dev, "Device state set to failed\n"); + + qlcnic_release_firmware(adapter); + return err; +} + +static int +qlcnic_request_irq(struct qlcnic_adapter *adapter) +{ + irq_handler_t handler; + struct qlcnic_host_sds_ring *sds_ring; + int err, ring; + + unsigned long flags = 0; + struct net_device *netdev = adapter->netdev; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) { + handler = qlcnic_tmp_intr; + if (!QLCNIC_IS_MSI_FAMILY(adapter)) + flags |= IRQF_SHARED; + + } else { + if (adapter->flags & QLCNIC_MSIX_ENABLED) + handler = qlcnic_msix_intr; + else if (adapter->flags & QLCNIC_MSI_ENABLED) + handler = qlcnic_msi_intr; + else { + flags |= IRQF_SHARED; + handler = qlcnic_intr; + } + } + adapter->irq = netdev->irq; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + sprintf(sds_ring->name, "%s[%d]", netdev->name, ring); + err = request_irq(sds_ring->irq, handler, + flags, sds_ring->name, sds_ring); + if (err) + return err; + } + + return 0; +} + +static void +qlcnic_free_irq(struct qlcnic_adapter *adapter) +{ + int ring; + struct qlcnic_host_sds_ring *sds_ring; + + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + free_irq(sds_ring->irq, sds_ring); + } +} + +static int +__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev) +{ + int ring; + struct qlcnic_host_rds_ring *rds_ring; + + if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + return -EIO; + + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) + return 0; + if (qlcnic_set_eswitch_port_config(adapter)) + return -EIO; + + if (qlcnic_fw_create_ctx(adapter)) + return -EIO; + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &adapter->recv_ctx->rds_rings[ring]; + qlcnic_post_rx_buffers(adapter, rds_ring); + } + + qlcnic_set_multi(netdev); + qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu); + + adapter->ahw->linkup = 0; + + if (adapter->max_sds_rings > 1) + qlcnic_config_rss(adapter, 1); + + qlcnic_config_intr_coalesce(adapter); + + if (netdev->features & NETIF_F_LRO) + qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED); + + qlcnic_napi_enable(adapter); + + qlcnic_linkevent_request(adapter, 1); + + adapter->reset_context = 0; + set_bit(__QLCNIC_DEV_UP, &adapter->state); + return 0; +} + +/* Usage: During resume and firmware recovery module.*/ + +static int +qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev) +{ + int err = 0; + + rtnl_lock(); + if (netif_running(netdev)) + err = __qlcnic_up(adapter, netdev); + rtnl_unlock(); + + return err; +} + +static void +__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) +{ + if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + return; + + if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state)) + return; + + smp_mb(); + spin_lock(&adapter->tx_clean_lock); + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + qlcnic_free_mac_list(adapter); + + if (adapter->fhash.fnum) + qlcnic_delete_lb_filters(adapter); + + qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE); + + qlcnic_napi_disable(adapter); + + qlcnic_fw_destroy_ctx(adapter); + + qlcnic_reset_rx_buffers_list(adapter); + qlcnic_release_tx_buffers(adapter); + spin_unlock(&adapter->tx_clean_lock); +} + +/* Usage: During suspend and firmware recovery module */ + +static void +qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) +{ + rtnl_lock(); + if (netif_running(netdev)) + __qlcnic_down(adapter, netdev); + rtnl_unlock(); + +} + +static int +qlcnic_attach(struct qlcnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err; + + if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) + return 0; + + err = qlcnic_napi_add(adapter, netdev); + if (err) + return err; + + err = qlcnic_alloc_sw_resources(adapter); + if (err) { + dev_err(&pdev->dev, "Error in setting sw resources\n"); + goto err_out_napi_del; + } + + err = qlcnic_alloc_hw_resources(adapter); + if (err) { + dev_err(&pdev->dev, "Error in setting hw resources\n"); + goto err_out_free_sw; + } + + err = qlcnic_request_irq(adapter); + if (err) { + dev_err(&pdev->dev, "failed to setup interrupt\n"); + goto err_out_free_hw; + } + + qlcnic_create_sysfs_entries(adapter); + + adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC; + return 0; + +err_out_free_hw: + qlcnic_free_hw_resources(adapter); +err_out_free_sw: + qlcnic_free_sw_resources(adapter); +err_out_napi_del: + qlcnic_napi_del(adapter); + return err; +} + +static void +qlcnic_detach(struct qlcnic_adapter *adapter) +{ + if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + return; + + qlcnic_remove_sysfs_entries(adapter); + + qlcnic_free_hw_resources(adapter); + qlcnic_release_rx_buffers(adapter); + qlcnic_free_irq(adapter); + qlcnic_napi_del(adapter); + qlcnic_free_sw_resources(adapter); + + adapter->is_up = 0; +} + +void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_host_sds_ring *sds_ring; + int ring; + + clear_bit(__QLCNIC_DEV_UP, &adapter->state); + if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) { + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &adapter->recv_ctx->sds_rings[ring]; + qlcnic_disable_int(sds_ring); + } + } + + qlcnic_fw_destroy_ctx(adapter); + + qlcnic_detach(adapter); + + adapter->diag_test = 0; + adapter->max_sds_rings = max_sds_rings; + + if (qlcnic_attach(adapter)) + goto out; + + if (netif_running(netdev)) + __qlcnic_up(adapter, netdev); +out: + netif_device_attach(netdev); +} + +static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter) +{ + int err = 0; + adapter->ahw = kzalloc(sizeof(struct qlcnic_hardware_context), + GFP_KERNEL); + if (!adapter->ahw) { + dev_err(&adapter->pdev->dev, + "Failed to allocate recv ctx resources for adapter\n"); + err = -ENOMEM; + goto err_out; + } + adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context), + GFP_KERNEL); + if (!adapter->recv_ctx) { + dev_err(&adapter->pdev->dev, + "Failed to allocate recv ctx resources for adapter\n"); + kfree(adapter->ahw); + adapter->ahw = NULL; + err = -ENOMEM; + goto err_out; + } + /* Initialize interrupt coalesce parameters */ + adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT; + adapter->ahw->coal.rx_time_us = QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US; + adapter->ahw->coal.rx_packets = QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS; +err_out: + return err; +} + +static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter) +{ + kfree(adapter->recv_ctx); + adapter->recv_ctx = NULL; + + if (adapter->ahw->fw_dump.tmpl_hdr) { + vfree(adapter->ahw->fw_dump.tmpl_hdr); + adapter->ahw->fw_dump.tmpl_hdr = NULL; + } + kfree(adapter->ahw); + adapter->ahw = NULL; +} + +int qlcnic_diag_alloc_res(struct net_device *netdev, int test) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_rds_ring *rds_ring; + int ring; + int ret; + + netif_device_detach(netdev); + + if (netif_running(netdev)) + __qlcnic_down(adapter, netdev); + + qlcnic_detach(adapter); + + adapter->max_sds_rings = 1; + adapter->diag_test = test; + + ret = qlcnic_attach(adapter); + if (ret) { + netif_device_attach(netdev); + return ret; + } + + ret = qlcnic_fw_create_ctx(adapter); + if (ret) { + qlcnic_detach(adapter); + netif_device_attach(netdev); + return ret; + } + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &adapter->recv_ctx->rds_rings[ring]; + qlcnic_post_rx_buffers(adapter, rds_ring); + } + + if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) { + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &adapter->recv_ctx->sds_rings[ring]; + qlcnic_enable_int(sds_ring); + } + } + + if (adapter->diag_test == QLCNIC_LOOPBACK_TEST) { + adapter->ahw->loopback_state = 0; + qlcnic_linkevent_request(adapter, 1); + } + + set_bit(__QLCNIC_DEV_UP, &adapter->state); + + return 0; +} + +/* Reset context in hardware only */ +static int +qlcnic_reset_hw_context(struct qlcnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + return -EBUSY; + + netif_device_detach(netdev); + + qlcnic_down(adapter, netdev); + + qlcnic_up(adapter, netdev); + + netif_device_attach(netdev); + + clear_bit(__QLCNIC_RESETTING, &adapter->state); + return 0; +} + +int +qlcnic_reset_context(struct qlcnic_adapter *adapter) +{ + int err = 0; + struct net_device *netdev = adapter->netdev; + + if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + return -EBUSY; + + if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) { + + netif_device_detach(netdev); + + if (netif_running(netdev)) + __qlcnic_down(adapter, netdev); + + qlcnic_detach(adapter); + + if (netif_running(netdev)) { + err = qlcnic_attach(adapter); + if (!err) + __qlcnic_up(adapter, netdev); + } + + netif_device_attach(netdev); + } + + clear_bit(__QLCNIC_RESETTING, &adapter->state); + return err; +} + +static int +qlcnic_setup_netdev(struct qlcnic_adapter *adapter, + struct net_device *netdev, u8 pci_using_dac) +{ + int err; + struct pci_dev *pdev = adapter->pdev; + + adapter->mc_enabled = 0; + adapter->max_mc_count = 38; + + netdev->netdev_ops = &qlcnic_netdev_ops; + netdev->watchdog_timeo = 5*HZ; + + qlcnic_change_mtu(netdev, netdev->mtu); + + SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops); + + netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; + + if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) + netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; + if (pci_using_dac) + netdev->hw_features |= NETIF_F_HIGHDMA; + + netdev->vlan_features = netdev->hw_features; + + if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX) + netdev->hw_features |= NETIF_F_HW_VLAN_TX; + if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO) + netdev->hw_features |= NETIF_F_LRO; + + netdev->features |= netdev->hw_features | + NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; + + netdev->irq = adapter->msix_entries[0].vector; + + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "failed to register net device\n"); + return err; + } + + return 0; +} + +static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac) +{ + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && + !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) + *pci_using_dac = 1; + else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) && + !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) + *pci_using_dac = 0; + else { + dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n"); + return -EIO; + } + + return 0; +} + +static int +qlcnic_alloc_msix_entries(struct qlcnic_adapter *adapter, u16 count) +{ + adapter->msix_entries = kcalloc(count, sizeof(struct msix_entry), + GFP_KERNEL); + + if (adapter->msix_entries) + return 0; + + dev_err(&adapter->pdev->dev, "failed allocating msix_entries\n"); + return -ENOMEM; +} + +static int __devinit +qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev = NULL; + struct qlcnic_adapter *adapter = NULL; + int err; + uint8_t revision_id; + uint8_t pci_using_dac; + char brd_name[QLCNIC_MAX_BOARD_NAME_LEN]; + + err = pci_enable_device(pdev); + if (err) + return err; + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + err = -ENODEV; + goto err_out_disable_pdev; + } + + err = qlcnic_set_dma_mask(pdev, &pci_using_dac); + if (err) + goto err_out_disable_pdev; + + err = pci_request_regions(pdev, qlcnic_driver_name); + if (err) + goto err_out_disable_pdev; + + pci_set_master(pdev); + pci_enable_pcie_error_reporting(pdev); + + netdev = alloc_etherdev(sizeof(struct qlcnic_adapter)); + if (!netdev) { + dev_err(&pdev->dev, "failed to allocate net_device\n"); + err = -ENOMEM; + goto err_out_free_res; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + + if (qlcnic_alloc_adapter_resources(adapter)) + goto err_out_free_netdev; + + adapter->dev_rst_time = jiffies; + revision_id = pdev->revision; + adapter->ahw->revision_id = revision_id; + adapter->mac_learn = qlcnic_mac_learn; + + rwlock_init(&adapter->ahw->crb_lock); + mutex_init(&adapter->ahw->mem_lock); + + spin_lock_init(&adapter->tx_clean_lock); + INIT_LIST_HEAD(&adapter->mac_list); + + err = qlcnic_setup_pci_map(adapter); + if (err) + goto err_out_free_hw; + + /* This will be reset for mezz cards */ + adapter->portnum = adapter->ahw->pci_func; + + err = qlcnic_get_board_info(adapter); + if (err) { + dev_err(&pdev->dev, "Error getting board config info.\n"); + goto err_out_iounmap; + } + + err = qlcnic_setup_idc_param(adapter); + if (err) + goto err_out_iounmap; + + adapter->flags |= QLCNIC_NEED_FLR; + + err = adapter->nic_ops->start_firmware(adapter); + if (err) { + dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"); + goto err_out_decr_ref; + } + + if (qlcnic_read_mac_addr(adapter)) + dev_warn(&pdev->dev, "failed to read mac addr\n"); + + if (adapter->portnum == 0) { + get_brd_name(adapter, brd_name); + + pr_info("%s: %s Board Chip rev 0x%x\n", + module_name(THIS_MODULE), + brd_name, adapter->ahw->revision_id); + } + + qlcnic_clear_stats(adapter); + + err = qlcnic_alloc_msix_entries(adapter, adapter->max_rx_ques); + if (err) + goto err_out_decr_ref; + + qlcnic_setup_intr(adapter); + + err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac); + if (err) + goto err_out_disable_msi; + + pci_set_drvdata(pdev, adapter); + + qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); + + switch (adapter->ahw->port_type) { + case QLCNIC_GBE: + dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n", + adapter->netdev->name); + break; + case QLCNIC_XGBE: + dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n", + adapter->netdev->name); + break; + } + + if (adapter->mac_learn) + qlcnic_alloc_lb_filters_mem(adapter); + + qlcnic_create_diag_entries(adapter); + + return 0; + +err_out_disable_msi: + qlcnic_teardown_intr(adapter); + kfree(adapter->msix_entries); + +err_out_decr_ref: + qlcnic_clr_all_drv_state(adapter, 0); + +err_out_iounmap: + qlcnic_cleanup_pci_map(adapter); + +err_out_free_hw: + qlcnic_free_adapter_resources(adapter); + +err_out_free_netdev: + free_netdev(netdev); + +err_out_free_res: + pci_release_regions(pdev); + +err_out_disable_pdev: + pci_set_drvdata(pdev, NULL); + pci_disable_device(pdev); + return err; +} + +static void __devexit qlcnic_remove(struct pci_dev *pdev) +{ + struct qlcnic_adapter *adapter; + struct net_device *netdev; + + adapter = pci_get_drvdata(pdev); + if (adapter == NULL) + return; + + netdev = adapter->netdev; + + qlcnic_cancel_fw_work(adapter); + + unregister_netdev(netdev); + + qlcnic_detach(adapter); + + if (adapter->npars != NULL) + kfree(adapter->npars); + if (adapter->eswitch != NULL) + kfree(adapter->eswitch); + + qlcnic_clr_all_drv_state(adapter, 0); + + clear_bit(__QLCNIC_RESETTING, &adapter->state); + + qlcnic_free_lb_filters_mem(adapter); + + qlcnic_teardown_intr(adapter); + kfree(adapter->msix_entries); + + qlcnic_remove_diag_entries(adapter); + + qlcnic_cleanup_pci_map(adapter); + + qlcnic_release_firmware(adapter); + + pci_disable_pcie_error_reporting(pdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + qlcnic_free_adapter_resources(adapter); + free_netdev(netdev); +} +static int __qlcnic_shutdown(struct pci_dev *pdev) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + int retval; + + netif_device_detach(netdev); + + qlcnic_cancel_fw_work(adapter); + + if (netif_running(netdev)) + qlcnic_down(adapter, netdev); + + qlcnic_clr_all_drv_state(adapter, 0); + + clear_bit(__QLCNIC_RESETTING, &adapter->state); + + retval = pci_save_state(pdev); + if (retval) + return retval; + + if (qlcnic_wol_supported(adapter)) { + pci_enable_wake(pdev, PCI_D3cold, 1); + pci_enable_wake(pdev, PCI_D3hot, 1); + } + + return 0; +} + +static void qlcnic_shutdown(struct pci_dev *pdev) +{ + if (__qlcnic_shutdown(pdev)) + return; + + pci_disable_device(pdev); +} + +#ifdef CONFIG_PM +static int +qlcnic_suspend(struct pci_dev *pdev, pm_message_t state) +{ + int retval; + + retval = __qlcnic_shutdown(pdev); + if (retval) + return retval; + + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + return 0; +} + +static int +qlcnic_resume(struct pci_dev *pdev) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + int err; + + err = pci_enable_device(pdev); + if (err) + return err; + + pci_set_power_state(pdev, PCI_D0); + pci_set_master(pdev); + pci_restore_state(pdev); + + err = adapter->nic_ops->start_firmware(adapter); + if (err) { + dev_err(&pdev->dev, "failed to start firmware\n"); + return err; + } + + if (netif_running(netdev)) { + err = qlcnic_up(adapter, netdev); + if (err) + goto done; + + qlcnic_restore_indev_addr(netdev, NETDEV_UP); + } +done: + netif_device_attach(netdev); + qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); + return 0; +} +#endif + +static int qlcnic_open(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + int err; + + netif_carrier_off(netdev); + + err = qlcnic_attach(adapter); + if (err) + return err; + + err = __qlcnic_up(adapter, netdev); + if (err) + goto err_out; + + netif_start_queue(netdev); + + return 0; + +err_out: + qlcnic_detach(adapter); + return err; +} + +/* + * qlcnic_close - Disables a network interface entry point + */ +static int qlcnic_close(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + __qlcnic_down(adapter, netdev); + return 0; +} + +void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter) +{ + void *head; + int i; + + if (adapter->fhash.fmax && adapter->fhash.fhead) + return; + + spin_lock_init(&adapter->mac_learn_lock); + + head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head), + GFP_KERNEL); + if (!head) + return; + + adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS; + adapter->fhash.fhead = head; + + for (i = 0; i < adapter->fhash.fmax; i++) + INIT_HLIST_HEAD(&adapter->fhash.fhead[i]); +} + +static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter) +{ + if (adapter->fhash.fmax && adapter->fhash.fhead) + kfree(adapter->fhash.fhead); + + adapter->fhash.fhead = NULL; + adapter->fhash.fmax = 0; +} + +static void qlcnic_change_filter(struct qlcnic_adapter *adapter, + u64 uaddr, __le16 vlan_id, struct qlcnic_host_tx_ring *tx_ring) +{ + struct cmd_desc_type0 *hwdesc; + struct qlcnic_nic_req *req; + struct qlcnic_mac_req *mac_req; + struct qlcnic_vlan_req *vlan_req; + u32 producer; + u64 word; + + producer = tx_ring->producer; + hwdesc = &tx_ring->desc_head[tx_ring->producer]; + + req = (struct qlcnic_nic_req *)hwdesc; + memset(req, 0, sizeof(struct qlcnic_nic_req)); + req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23); + + word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16); + req->req_hdr = cpu_to_le64(word); + + mac_req = (struct qlcnic_mac_req *)&(req->words[0]); + mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD; + memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN); + + vlan_req = (struct qlcnic_vlan_req *)&req->words[1]; + vlan_req->vlan_id = vlan_id; + + tx_ring->producer = get_next_index(producer, tx_ring->num_desc); + smp_mb(); +} + +#define QLCNIC_MAC_HASH(MAC)\ + ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25)) + +static void +qlcnic_send_filter(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx_ring, + struct cmd_desc_type0 *first_desc, + struct sk_buff *skb) +{ + struct ethhdr *phdr = (struct ethhdr *)(skb->data); + struct qlcnic_filter *fil, *tmp_fil; + struct hlist_node *tmp_hnode, *n; + struct hlist_head *head; + u64 src_addr = 0; + __le16 vlan_id = 0; + u8 hindex; + + if (!compare_ether_addr(phdr->h_source, adapter->mac_addr)) + return; + + if (adapter->fhash.fnum >= adapter->fhash.fmax) + return; + + /* Only NPAR capable devices support vlan based learning*/ + if (adapter->flags & QLCNIC_ESWITCH_ENABLED) + vlan_id = first_desc->vlan_TCI; + memcpy(&src_addr, phdr->h_source, ETH_ALEN); + hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1); + head = &(adapter->fhash.fhead[hindex]); + + hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { + if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) && + tmp_fil->vlan_id == vlan_id) { + + if (jiffies > + (QLCNIC_READD_AGE * HZ + tmp_fil->ftime)) + qlcnic_change_filter(adapter, src_addr, vlan_id, + tx_ring); + tmp_fil->ftime = jiffies; + return; + } + } + + fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC); + if (!fil) + return; + + qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring); + + fil->ftime = jiffies; + fil->vlan_id = vlan_id; + memcpy(fil->faddr, &src_addr, ETH_ALEN); + spin_lock(&adapter->mac_learn_lock); + hlist_add_head(&(fil->fnode), head); + adapter->fhash.fnum++; + spin_unlock(&adapter->mac_learn_lock); +} + +static int +qlcnic_tx_pkt(struct qlcnic_adapter *adapter, + struct cmd_desc_type0 *first_desc, + struct sk_buff *skb) +{ + u8 opcode = 0, hdr_len = 0; + u16 flags = 0, vlan_tci = 0; + int copied, offset, copy_len; + struct cmd_desc_type0 *hwdesc; + struct vlan_ethhdr *vh; + struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; + u16 protocol = ntohs(skb->protocol); + u32 producer = tx_ring->producer; + + if (protocol == ETH_P_8021Q) { + vh = (struct vlan_ethhdr *)skb->data; + flags = FLAGS_VLAN_TAGGED; + vlan_tci = vh->h_vlan_TCI; + } else if (vlan_tx_tag_present(skb)) { + flags = FLAGS_VLAN_OOB; + vlan_tci = vlan_tx_tag_get(skb); + } + if (unlikely(adapter->pvid)) { + if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED)) + return -EIO; + if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED)) + goto set_flags; + + flags = FLAGS_VLAN_OOB; + vlan_tci = adapter->pvid; + } +set_flags: + qlcnic_set_tx_vlan_tci(first_desc, vlan_tci); + qlcnic_set_tx_flags_opcode(first_desc, flags, opcode); + + if (*(skb->data) & BIT_0) { + flags |= BIT_0; + memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN); + } + opcode = TX_ETHER_PKT; + if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && + skb_shinfo(skb)->gso_size > 0) { + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + + first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); + first_desc->total_hdr_length = hdr_len; + + opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO; + + /* For LSO, we need to copy the MAC/IP/TCP headers into + * the descriptor ring */ + copied = 0; + offset = 2; + + if (flags & FLAGS_VLAN_OOB) { + first_desc->total_hdr_length += VLAN_HLEN; + first_desc->tcp_hdr_offset = VLAN_HLEN; + first_desc->ip_hdr_offset = VLAN_HLEN; + /* Only in case of TSO on vlan device */ + flags |= FLAGS_VLAN_TAGGED; + + /* Create a TSO vlan header template for firmware */ + + hwdesc = &tx_ring->desc_head[producer]; + tx_ring->cmd_buf_arr[producer].skb = NULL; + + copy_len = min((int)sizeof(struct cmd_desc_type0) - + offset, hdr_len + VLAN_HLEN); + + vh = (struct vlan_ethhdr *)((char *) hwdesc + 2); + skb_copy_from_linear_data(skb, vh, 12); + vh->h_vlan_proto = htons(ETH_P_8021Q); + vh->h_vlan_TCI = htons(vlan_tci); + + skb_copy_from_linear_data_offset(skb, 12, + (char *)vh + 16, copy_len - 16); + + copied = copy_len - VLAN_HLEN; + offset = 0; + + producer = get_next_index(producer, tx_ring->num_desc); + } + + while (copied < hdr_len) { + + copy_len = min((int)sizeof(struct cmd_desc_type0) - + offset, (hdr_len - copied)); + + hwdesc = &tx_ring->desc_head[producer]; + tx_ring->cmd_buf_arr[producer].skb = NULL; + + skb_copy_from_linear_data_offset(skb, copied, + (char *) hwdesc + offset, copy_len); + + copied += copy_len; + offset = 0; + + producer = get_next_index(producer, tx_ring->num_desc); + } + + tx_ring->producer = producer; + smp_mb(); + adapter->stats.lso_frames++; + + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + u8 l4proto; + + if (protocol == ETH_P_IP) { + l4proto = ip_hdr(skb)->protocol; + + if (l4proto == IPPROTO_TCP) + opcode = TX_TCP_PKT; + else if (l4proto == IPPROTO_UDP) + opcode = TX_UDP_PKT; + } else if (protocol == ETH_P_IPV6) { + l4proto = ipv6_hdr(skb)->nexthdr; + + if (l4proto == IPPROTO_TCP) + opcode = TX_TCPV6_PKT; + else if (l4proto == IPPROTO_UDP) + opcode = TX_UDPV6_PKT; + } + } + first_desc->tcp_hdr_offset += skb_transport_offset(skb); + first_desc->ip_hdr_offset += skb_network_offset(skb); + qlcnic_set_tx_flags_opcode(first_desc, flags, opcode); + + return 0; +} + +static int +qlcnic_map_tx_skb(struct pci_dev *pdev, + struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf) +{ + struct qlcnic_skb_frag *nf; + struct skb_frag_struct *frag; + int i, nr_frags; + dma_addr_t map; + + nr_frags = skb_shinfo(skb)->nr_frags; + nf = &pbuf->frag_array[0]; + + map = pci_map_single(pdev, skb->data, + skb_headlen(skb), PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(pdev, map)) + goto out_err; + + nf->dma = map; + nf->length = skb_headlen(skb); + + for (i = 0; i < nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + nf = &pbuf->frag_array[i+1]; + + map = pci_map_page(pdev, frag->page, frag->page_offset, + frag->size, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(pdev, map)) + goto unwind; + + nf->dma = map; + nf->length = frag->size; + } + + return 0; + +unwind: + while (--i >= 0) { + nf = &pbuf->frag_array[i+1]; + pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); + } + + nf = &pbuf->frag_array[0]; + pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); + +out_err: + return -ENOMEM; +} + +static void +qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb, + struct qlcnic_cmd_buffer *pbuf) +{ + struct qlcnic_skb_frag *nf = &pbuf->frag_array[0]; + int nr_frags = skb_shinfo(skb)->nr_frags; + int i; + + for (i = 0; i < nr_frags; i++) { + nf = &pbuf->frag_array[i+1]; + pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); + } + + nf = &pbuf->frag_array[0]; + pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); + pbuf->skb = NULL; +} + +static inline void +qlcnic_clear_cmddesc(u64 *desc) +{ + desc[0] = 0ULL; + desc[2] = 0ULL; + desc[7] = 0ULL; +} + +netdev_tx_t +qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; + struct qlcnic_cmd_buffer *pbuf; + struct qlcnic_skb_frag *buffrag; + struct cmd_desc_type0 *hwdesc, *first_desc; + struct pci_dev *pdev; + struct ethhdr *phdr; + int delta = 0; + int i, k; + + u32 producer; + int frag_count; + u32 num_txd = tx_ring->num_desc; + + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { + netif_stop_queue(netdev); + return NETDEV_TX_BUSY; + } + + if (adapter->flags & QLCNIC_MACSPOOF) { + phdr = (struct ethhdr *)skb->data; + if (compare_ether_addr(phdr->h_source, + adapter->mac_addr)) + goto drop_packet; + } + + frag_count = skb_shinfo(skb)->nr_frags + 1; + /* 14 frags supported for normal packet and + * 32 frags supported for TSO packet + */ + if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) { + + for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++) + delta += skb_shinfo(skb)->frags[i].size; + + if (!__pskb_pull_tail(skb, delta)) + goto drop_packet; + + frag_count = 1 + skb_shinfo(skb)->nr_frags; + } + + if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) { + netif_stop_queue(netdev); + if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) + netif_start_queue(netdev); + else { + adapter->stats.xmit_off++; + return NETDEV_TX_BUSY; + } + } + + producer = tx_ring->producer; + pbuf = &tx_ring->cmd_buf_arr[producer]; + + pdev = adapter->pdev; + + first_desc = hwdesc = &tx_ring->desc_head[producer]; + qlcnic_clear_cmddesc((u64 *)hwdesc); + + if (qlcnic_map_tx_skb(pdev, skb, pbuf)) { + adapter->stats.tx_dma_map_error++; + goto drop_packet; + } + + pbuf->skb = skb; + pbuf->frag_count = frag_count; + + qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len); + qlcnic_set_tx_port(first_desc, adapter->portnum); + + for (i = 0; i < frag_count; i++) { + + k = i % 4; + + if ((k == 0) && (i > 0)) { + /* move to next desc.*/ + producer = get_next_index(producer, num_txd); + hwdesc = &tx_ring->desc_head[producer]; + qlcnic_clear_cmddesc((u64 *)hwdesc); + tx_ring->cmd_buf_arr[producer].skb = NULL; + } + + buffrag = &pbuf->frag_array[i]; + + hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length); + switch (k) { + case 0: + hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma); + break; + case 1: + hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma); + break; + case 2: + hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma); + break; + case 3: + hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma); + break; + } + } + + tx_ring->producer = get_next_index(producer, num_txd); + smp_mb(); + + if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb))) + goto unwind_buff; + + if (adapter->mac_learn) + qlcnic_send_filter(adapter, tx_ring, first_desc, skb); + + adapter->stats.txbytes += skb->len; + adapter->stats.xmitcalled++; + + qlcnic_update_cmd_producer(adapter, tx_ring); + + return NETDEV_TX_OK; + +unwind_buff: + qlcnic_unmap_buffers(pdev, skb, pbuf); +drop_packet: + adapter->stats.txdropped++; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static int qlcnic_check_temp(struct qlcnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u32 temp, temp_state, temp_val; + int rv = 0; + + temp = QLCRD32(adapter, CRB_TEMP_STATE); + + temp_state = qlcnic_get_temp_state(temp); + temp_val = qlcnic_get_temp_val(temp); + + if (temp_state == QLCNIC_TEMP_PANIC) { + dev_err(&netdev->dev, + "Device temperature %d degrees C exceeds" + " maximum allowed. Hardware has been shut down.\n", + temp_val); + rv = 1; + } else if (temp_state == QLCNIC_TEMP_WARN) { + if (adapter->temp == QLCNIC_TEMP_NORMAL) { + dev_err(&netdev->dev, + "Device temperature %d degrees C " + "exceeds operating range." + " Immediate action needed.\n", + temp_val); + } + } else { + if (adapter->temp == QLCNIC_TEMP_WARN) { + dev_info(&netdev->dev, + "Device temperature is now %d degrees C" + " in normal range.\n", temp_val); + } + } + adapter->temp = temp_state; + return rv; +} + +void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup) +{ + struct net_device *netdev = adapter->netdev; + + if (adapter->ahw->linkup && !linkup) { + netdev_info(netdev, "NIC Link is down\n"); + adapter->ahw->linkup = 0; + if (netif_running(netdev)) { + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + } else if (!adapter->ahw->linkup && linkup) { + netdev_info(netdev, "NIC Link is up\n"); + adapter->ahw->linkup = 1; + if (netif_running(netdev)) { + netif_carrier_on(netdev); + netif_wake_queue(netdev); + } + } +} + +static void qlcnic_tx_timeout(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) + return; + + dev_err(&netdev->dev, "transmit timeout, resetting.\n"); + + if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS) + adapter->need_fw_reset = 1; + else + adapter->reset_context = 1; +} + +static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct net_device_stats *stats = &netdev->stats; + + stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; + stats->tx_packets = adapter->stats.xmitfinished; + stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes; + stats->tx_bytes = adapter->stats.txbytes; + stats->rx_dropped = adapter->stats.rxdropped; + stats->tx_dropped = adapter->stats.txdropped; + + return stats; +} + +static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter) +{ + u32 status; + + status = readl(adapter->isr_int_vec); + + if (!(status & adapter->int_vec_bit)) + return IRQ_NONE; + + /* check interrupt state machine, to be sure */ + status = readl(adapter->crb_int_state_reg); + if (!ISR_LEGACY_INT_TRIGGERED(status)) + return IRQ_NONE; + + writel(0xffffffff, adapter->tgt_status_reg); + /* read twice to ensure write is flushed */ + readl(adapter->isr_int_vec); + readl(adapter->isr_int_vec); + + return IRQ_HANDLED; +} + +static irqreturn_t qlcnic_tmp_intr(int irq, void *data) +{ + struct qlcnic_host_sds_ring *sds_ring = data; + struct qlcnic_adapter *adapter = sds_ring->adapter; + + if (adapter->flags & QLCNIC_MSIX_ENABLED) + goto done; + else if (adapter->flags & QLCNIC_MSI_ENABLED) { + writel(0xffffffff, adapter->tgt_status_reg); + goto done; + } + + if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE) + return IRQ_NONE; + +done: + adapter->diag_cnt++; + qlcnic_enable_int(sds_ring); + return IRQ_HANDLED; +} + +static irqreturn_t qlcnic_intr(int irq, void *data) +{ + struct qlcnic_host_sds_ring *sds_ring = data; + struct qlcnic_adapter *adapter = sds_ring->adapter; + + if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE) + return IRQ_NONE; + + napi_schedule(&sds_ring->napi); + + return IRQ_HANDLED; +} + +static irqreturn_t qlcnic_msi_intr(int irq, void *data) +{ + struct qlcnic_host_sds_ring *sds_ring = data; + struct qlcnic_adapter *adapter = sds_ring->adapter; + + /* clear interrupt */ + writel(0xffffffff, adapter->tgt_status_reg); + + napi_schedule(&sds_ring->napi); + return IRQ_HANDLED; +} + +static irqreturn_t qlcnic_msix_intr(int irq, void *data) +{ + struct qlcnic_host_sds_ring *sds_ring = data; + + napi_schedule(&sds_ring->napi); + return IRQ_HANDLED; +} + +static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter) +{ + u32 sw_consumer, hw_consumer; + int count = 0, i; + struct qlcnic_cmd_buffer *buffer; + struct pci_dev *pdev = adapter->pdev; + struct net_device *netdev = adapter->netdev; + struct qlcnic_skb_frag *frag; + int done; + struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; + + if (!spin_trylock(&adapter->tx_clean_lock)) + return 1; + + sw_consumer = tx_ring->sw_consumer; + hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); + + while (sw_consumer != hw_consumer) { + buffer = &tx_ring->cmd_buf_arr[sw_consumer]; + if (buffer->skb) { + frag = &buffer->frag_array[0]; + pci_unmap_single(pdev, frag->dma, frag->length, + PCI_DMA_TODEVICE); + frag->dma = 0ULL; + for (i = 1; i < buffer->frag_count; i++) { + frag++; + pci_unmap_page(pdev, frag->dma, frag->length, + PCI_DMA_TODEVICE); + frag->dma = 0ULL; + } + + adapter->stats.xmitfinished++; + dev_kfree_skb_any(buffer->skb); + buffer->skb = NULL; + } + + sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc); + if (++count >= MAX_STATUS_HANDLE) + break; + } + + if (count && netif_running(netdev)) { + tx_ring->sw_consumer = sw_consumer; + + smp_mb(); + + if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) { + if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) { + netif_wake_queue(netdev); + adapter->stats.xmit_on++; + } + } + adapter->tx_timeo_cnt = 0; + } + /* + * If everything is freed up to consumer then check if the ring is full + * If the ring is full then check if more needs to be freed and + * schedule the call back again. + * + * This happens when there are 2 CPUs. One could be freeing and the + * other filling it. If the ring is full when we get out of here and + * the card has already interrupted the host then the host can miss the + * interrupt. + * + * There is still a possible race condition and the host could miss an + * interrupt. The card has to take care of this. + */ + hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); + done = (sw_consumer == hw_consumer); + spin_unlock(&adapter->tx_clean_lock); + + return done; +} + +static int qlcnic_poll(struct napi_struct *napi, int budget) +{ + struct qlcnic_host_sds_ring *sds_ring = + container_of(napi, struct qlcnic_host_sds_ring, napi); + + struct qlcnic_adapter *adapter = sds_ring->adapter; + + int tx_complete; + int work_done; + + tx_complete = qlcnic_process_cmd_ring(adapter); + + work_done = qlcnic_process_rcv_ring(sds_ring, budget); + + if ((work_done < budget) && tx_complete) { + napi_complete(&sds_ring->napi); + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) + qlcnic_enable_int(sds_ring); + } + + return work_done; +} + +static int qlcnic_rx_poll(struct napi_struct *napi, int budget) +{ + struct qlcnic_host_sds_ring *sds_ring = + container_of(napi, struct qlcnic_host_sds_ring, napi); + + struct qlcnic_adapter *adapter = sds_ring->adapter; + int work_done; + + work_done = qlcnic_process_rcv_ring(sds_ring, budget); + + if (work_done < budget) { + napi_complete(&sds_ring->napi); + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) + qlcnic_enable_int(sds_ring); + } + + return work_done; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void qlcnic_poll_controller(struct net_device *netdev) +{ + int ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + disable_irq(adapter->irq); + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + qlcnic_intr(adapter->irq, sds_ring); + } + enable_irq(adapter->irq); +} +#endif + +static void +qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding) +{ + u32 val; + + val = adapter->portnum & 0xf; + val |= encoding << 7; + val |= (jiffies - adapter->dev_rst_time) << 8; + + QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val); + adapter->dev_rst_time = jiffies; +} + +static int +qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state) +{ + u32 val; + + WARN_ON(state != QLCNIC_DEV_NEED_RESET && + state != QLCNIC_DEV_NEED_QUISCENT); + + if (qlcnic_api_lock(adapter)) + return -EIO; + + val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); + + if (state == QLCNIC_DEV_NEED_RESET) + QLC_DEV_SET_RST_RDY(val, adapter->portnum); + else if (state == QLCNIC_DEV_NEED_QUISCENT) + QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum); + + QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); + + qlcnic_api_unlock(adapter); + + return 0; +} + +static int +qlcnic_clr_drv_state(struct qlcnic_adapter *adapter) +{ + u32 val; + + if (qlcnic_api_lock(adapter)) + return -EBUSY; + + val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); + QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum); + QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); + + qlcnic_api_unlock(adapter); + + return 0; +} + +static void +qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed) +{ + u32 val; + + if (qlcnic_api_lock(adapter)) + goto err; + + val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE); + QLC_DEV_CLR_REF_CNT(val, adapter->portnum); + QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val); + + if (failed) { + QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED); + dev_info(&adapter->pdev->dev, + "Device state set to Failed. Please Reboot\n"); + } else if (!(val & 0x11111111)) + QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD); + + val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); + QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum); + QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); + + qlcnic_api_unlock(adapter); +err: + adapter->fw_fail_cnt = 0; + adapter->flags &= ~QLCNIC_FW_HANG; + clear_bit(__QLCNIC_START_FW, &adapter->state); + clear_bit(__QLCNIC_RESETTING, &adapter->state); +} + +/* Grab api lock, before checking state */ +static int +qlcnic_check_drv_state(struct qlcnic_adapter *adapter) +{ + int act, state, active_mask; + + state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); + act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE); + + if (adapter->flags & QLCNIC_FW_RESET_OWNER) { + active_mask = (~(1 << (adapter->ahw->pci_func * 4))); + act = act & active_mask; + } + + if (((state & 0x11111111) == (act & 0x11111111)) || + ((act & 0x11111111) == ((state >> 1) & 0x11111111))) + return 0; + else + return 1; +} + +static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter) +{ + u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER); + + if (val != QLCNIC_DRV_IDC_VER) { + dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's" + " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val); + } + + return 0; +} + +static int +qlcnic_can_start_firmware(struct qlcnic_adapter *adapter) +{ + u32 val, prev_state; + u8 dev_init_timeo = adapter->dev_init_timeo; + u8 portnum = adapter->portnum; + u8 ret; + + if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state)) + return 1; + + if (qlcnic_api_lock(adapter)) + return -1; + + val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE); + if (!(val & (1 << (portnum * 4)))) { + QLC_DEV_SET_REF_CNT(val, portnum); + QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val); + } + + prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); + QLCDB(adapter, HW, "Device state = %u\n", prev_state); + + switch (prev_state) { + case QLCNIC_DEV_COLD: + QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING); + QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER); + qlcnic_idc_debug_info(adapter, 0); + qlcnic_api_unlock(adapter); + return 1; + + case QLCNIC_DEV_READY: + ret = qlcnic_check_idc_ver(adapter); + qlcnic_api_unlock(adapter); + return ret; + + case QLCNIC_DEV_NEED_RESET: + val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); + QLC_DEV_SET_RST_RDY(val, portnum); + QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); + break; + + case QLCNIC_DEV_NEED_QUISCENT: + val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); + QLC_DEV_SET_QSCNT_RDY(val, portnum); + QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); + break; + + case QLCNIC_DEV_FAILED: + dev_err(&adapter->pdev->dev, "Device in failed state.\n"); + qlcnic_api_unlock(adapter); + return -1; + + case QLCNIC_DEV_INITIALIZING: + case QLCNIC_DEV_QUISCENT: + break; + } + + qlcnic_api_unlock(adapter); + + do { + msleep(1000); + prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); + + if (prev_state == QLCNIC_DEV_QUISCENT) + continue; + } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo); + + if (!dev_init_timeo) { + dev_err(&adapter->pdev->dev, + "Waiting for device to initialize timeout\n"); + return -1; + } + + if (qlcnic_api_lock(adapter)) + return -1; + + val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); + QLC_DEV_CLR_RST_QSCNT(val, portnum); + QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); + + ret = qlcnic_check_idc_ver(adapter); + qlcnic_api_unlock(adapter); + + return ret; +} + +static void +qlcnic_fwinit_work(struct work_struct *work) +{ + struct qlcnic_adapter *adapter = container_of(work, + struct qlcnic_adapter, fw_work.work); + u32 dev_state = 0xf; + u32 val; + + if (qlcnic_api_lock(adapter)) + goto err_ret; + + dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); + if (dev_state == QLCNIC_DEV_QUISCENT || + dev_state == QLCNIC_DEV_NEED_QUISCENT) { + qlcnic_api_unlock(adapter); + qlcnic_schedule_work(adapter, qlcnic_fwinit_work, + FW_POLL_DELAY * 2); + return; + } + + if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) { + qlcnic_api_unlock(adapter); + goto wait_npar; + } + + if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) { + dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n", + adapter->reset_ack_timeo); + goto skip_ack_check; + } + + if (!qlcnic_check_drv_state(adapter)) { +skip_ack_check: + dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); + + if (dev_state == QLCNIC_DEV_NEED_RESET) { + QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, + QLCNIC_DEV_INITIALIZING); + set_bit(__QLCNIC_START_FW, &adapter->state); + QLCDB(adapter, DRV, "Restarting fw\n"); + qlcnic_idc_debug_info(adapter, 0); + val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); + QLC_DEV_SET_RST_RDY(val, adapter->portnum); + QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); + } + + qlcnic_api_unlock(adapter); + + rtnl_lock(); + if (adapter->ahw->fw_dump.enable && + (adapter->flags & QLCNIC_FW_RESET_OWNER)) { + QLCDB(adapter, DRV, "Take FW dump\n"); + qlcnic_dump_fw(adapter); + adapter->flags |= QLCNIC_FW_HANG; + } + rtnl_unlock(); + + adapter->flags &= ~QLCNIC_FW_RESET_OWNER; + if (!adapter->nic_ops->start_firmware(adapter)) { + qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); + adapter->fw_wait_cnt = 0; + return; + } + goto err_ret; + } + + qlcnic_api_unlock(adapter); + +wait_npar: + dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); + QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state); + + switch (dev_state) { + case QLCNIC_DEV_READY: + if (!adapter->nic_ops->start_firmware(adapter)) { + qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); + adapter->fw_wait_cnt = 0; + return; + } + case QLCNIC_DEV_FAILED: + break; + default: + qlcnic_schedule_work(adapter, + qlcnic_fwinit_work, FW_POLL_DELAY); + return; + } + +err_ret: + dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u " + "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt); + netif_device_attach(adapter->netdev); + qlcnic_clr_all_drv_state(adapter, 0); +} + +static void +qlcnic_detach_work(struct work_struct *work) +{ + struct qlcnic_adapter *adapter = container_of(work, + struct qlcnic_adapter, fw_work.work); + struct net_device *netdev = adapter->netdev; + u32 status; + + netif_device_detach(netdev); + + /* Dont grab rtnl lock during Quiscent mode */ + if (adapter->dev_state == QLCNIC_DEV_NEED_QUISCENT) { + if (netif_running(netdev)) + __qlcnic_down(adapter, netdev); + } else + qlcnic_down(adapter, netdev); + + status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1); + + if (status & QLCNIC_RCODE_FATAL_ERROR) + goto err_ret; + + if (adapter->temp == QLCNIC_TEMP_PANIC) + goto err_ret; + /* Dont ack if this instance is the reset owner */ + if (!(adapter->flags & QLCNIC_FW_RESET_OWNER)) { + if (qlcnic_set_drv_state(adapter, adapter->dev_state)) + goto err_ret; + } + + adapter->fw_wait_cnt = 0; + + qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY); + + return; + +err_ret: + dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n", + status, adapter->temp); + netif_device_attach(netdev); + qlcnic_clr_all_drv_state(adapter, 1); +} + +/*Transit NPAR state to NON Operational */ +static void +qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter) +{ + u32 state; + + state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); + if (state == QLCNIC_DEV_NPAR_NON_OPER) + return; + + if (qlcnic_api_lock(adapter)) + return; + QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER); + qlcnic_api_unlock(adapter); +} + +/*Transit to RESET state from READY state only */ +void +qlcnic_dev_request_reset(struct qlcnic_adapter *adapter) +{ + u32 state; + + adapter->need_fw_reset = 1; + if (qlcnic_api_lock(adapter)) + return; + + state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); + + if (state == QLCNIC_DEV_READY) { + QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET); + adapter->flags |= QLCNIC_FW_RESET_OWNER; + QLCDB(adapter, DRV, "NEED_RESET state set\n"); + qlcnic_idc_debug_info(adapter, 0); + } + + QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER); + qlcnic_api_unlock(adapter); +} + +/* Transit to NPAR READY state from NPAR NOT READY state */ +static void +qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter) +{ + if (qlcnic_api_lock(adapter)) + return; + + QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER); + QLCDB(adapter, DRV, "NPAR operational state set\n"); + + qlcnic_api_unlock(adapter); +} + +static void +qlcnic_schedule_work(struct qlcnic_adapter *adapter, + work_func_t func, int delay) +{ + if (test_bit(__QLCNIC_AER, &adapter->state)) + return; + + INIT_DELAYED_WORK(&adapter->fw_work, func); + queue_delayed_work(qlcnic_wq, &adapter->fw_work, + round_jiffies_relative(delay)); +} + +static void +qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter) +{ + while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + msleep(10); + + cancel_delayed_work_sync(&adapter->fw_work); +} + +static void +qlcnic_attach_work(struct work_struct *work) +{ + struct qlcnic_adapter *adapter = container_of(work, + struct qlcnic_adapter, fw_work.work); + struct net_device *netdev = adapter->netdev; + u32 npar_state; + + if (adapter->op_mode != QLCNIC_MGMT_FUNC) { + npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); + if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO) + qlcnic_clr_all_drv_state(adapter, 0); + else if (npar_state != QLCNIC_DEV_NPAR_OPER) + qlcnic_schedule_work(adapter, qlcnic_attach_work, + FW_POLL_DELAY); + else + goto attach; + QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n"); + return; + } +attach: + if (netif_running(netdev)) { + if (qlcnic_up(adapter, netdev)) + goto done; + + qlcnic_restore_indev_addr(netdev, NETDEV_UP); + } + +done: + netif_device_attach(netdev); + adapter->fw_fail_cnt = 0; + adapter->flags &= ~QLCNIC_FW_HANG; + clear_bit(__QLCNIC_RESETTING, &adapter->state); + + if (!qlcnic_clr_drv_state(adapter)) + qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, + FW_POLL_DELAY); +} + +static int +qlcnic_check_health(struct qlcnic_adapter *adapter) +{ + u32 state = 0, heartbeat; + struct net_device *netdev = adapter->netdev; + + if (qlcnic_check_temp(adapter)) + goto detach; + + if (adapter->need_fw_reset) + qlcnic_dev_request_reset(adapter); + + state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); + if (state == QLCNIC_DEV_NEED_RESET) { + qlcnic_set_npar_non_operational(adapter); + adapter->need_fw_reset = 1; + } else if (state == QLCNIC_DEV_NEED_QUISCENT) + goto detach; + + heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER); + if (heartbeat != adapter->heartbeat) { + adapter->heartbeat = heartbeat; + adapter->fw_fail_cnt = 0; + if (adapter->need_fw_reset) + goto detach; + + if (adapter->reset_context && auto_fw_reset) { + qlcnic_reset_hw_context(adapter); + adapter->netdev->trans_start = jiffies; + } + + return 0; + } + + if (++adapter->fw_fail_cnt < FW_FAIL_THRESH) + return 0; + + adapter->flags |= QLCNIC_FW_HANG; + + qlcnic_dev_request_reset(adapter); + + if (auto_fw_reset) + clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state); + + dev_info(&netdev->dev, "firmware hang detected\n"); + dev_info(&adapter->pdev->dev, "Dumping hw/fw registers\n" + "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n" + "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n" + "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n" + "PEG_NET_4_PC: 0x%x\n", + QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1), + QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS2), + QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c), + QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c), + QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c), + QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c), + QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c)); +detach: + adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state : + QLCNIC_DEV_NEED_RESET; + + if (auto_fw_reset && + !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) { + + qlcnic_schedule_work(adapter, qlcnic_detach_work, 0); + QLCDB(adapter, DRV, "fw recovery scheduled.\n"); + } + + return 1; +} + +static void +qlcnic_fw_poll_work(struct work_struct *work) +{ + struct qlcnic_adapter *adapter = container_of(work, + struct qlcnic_adapter, fw_work.work); + + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) + goto reschedule; + + + if (qlcnic_check_health(adapter)) + return; + + if (adapter->fhash.fnum) + qlcnic_prune_lb_filters(adapter); + +reschedule: + qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); +} + +static int qlcnic_is_first_func(struct pci_dev *pdev) +{ + struct pci_dev *oth_pdev; + int val = pdev->devfn; + + while (val-- > 0) { + oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr + (pdev->bus), pdev->bus->number, + PCI_DEVFN(PCI_SLOT(pdev->devfn), val)); + if (!oth_pdev) + continue; + + if (oth_pdev->current_state != PCI_D3cold) { + pci_dev_put(oth_pdev); + return 0; + } + pci_dev_put(oth_pdev); + } + return 1; +} + +static int qlcnic_attach_func(struct pci_dev *pdev) +{ + int err, first_func; + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + + pdev->error_state = pci_channel_io_normal; + + err = pci_enable_device(pdev); + if (err) + return err; + + pci_set_power_state(pdev, PCI_D0); + pci_set_master(pdev); + pci_restore_state(pdev); + + first_func = qlcnic_is_first_func(pdev); + + if (qlcnic_api_lock(adapter)) + return -EINVAL; + + if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) { + adapter->need_fw_reset = 1; + set_bit(__QLCNIC_START_FW, &adapter->state); + QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING); + QLCDB(adapter, DRV, "Restarting fw\n"); + } + qlcnic_api_unlock(adapter); + + err = adapter->nic_ops->start_firmware(adapter); + if (err) + return err; + + qlcnic_clr_drv_state(adapter); + qlcnic_setup_intr(adapter); + + if (netif_running(netdev)) { + err = qlcnic_attach(adapter); + if (err) { + qlcnic_clr_all_drv_state(adapter, 1); + clear_bit(__QLCNIC_AER, &adapter->state); + netif_device_attach(netdev); + return err; + } + + err = qlcnic_up(adapter, netdev); + if (err) + goto done; + + qlcnic_restore_indev_addr(netdev, NETDEV_UP); + } + done: + netif_device_attach(netdev); + return err; +} + +static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (state == pci_channel_io_normal) + return PCI_ERS_RESULT_RECOVERED; + + set_bit(__QLCNIC_AER, &adapter->state); + netif_device_detach(netdev); + + cancel_delayed_work_sync(&adapter->fw_work); + + if (netif_running(netdev)) + qlcnic_down(adapter, netdev); + + qlcnic_detach(adapter); + qlcnic_teardown_intr(adapter); + + clear_bit(__QLCNIC_RESETTING, &adapter->state); + + pci_save_state(pdev); + pci_disable_device(pdev); + + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev) +{ + return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT : + PCI_ERS_RESULT_RECOVERED; +} + +static void qlcnic_io_resume(struct pci_dev *pdev) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + + pci_cleanup_aer_uncorrect_error_status(pdev); + + if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY && + test_and_clear_bit(__QLCNIC_AER, &adapter->state)) + qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, + FW_POLL_DELAY); +} + +static int +qlcnicvf_start_firmware(struct qlcnic_adapter *adapter) +{ + int err; + + err = qlcnic_can_start_firmware(adapter); + if (err) + return err; + + err = qlcnic_check_npar_opertional(adapter); + if (err) + return err; + + err = qlcnic_initialize_nic(adapter); + if (err) + return err; + + qlcnic_check_options(adapter); + + err = qlcnic_set_eswitch_port_config(adapter); + if (err) + return err; + + adapter->need_fw_reset = 0; + + return err; +} + +static int +qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable) +{ + return -EOPNOTSUPP; +} + +static int +qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate) +{ + return -EOPNOTSUPP; +} + +static ssize_t +qlcnic_store_bridged_mode(struct device *dev, + struct device_attribute *attr, const char *buf, size_t len) +{ + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + unsigned long new; + int ret = -EINVAL; + + if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)) + goto err_out; + + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) + goto err_out; + + if (strict_strtoul(buf, 2, &new)) + goto err_out; + + if (!adapter->nic_ops->config_bridged_mode(adapter, !!new)) + ret = len; + +err_out: + return ret; +} + +static ssize_t +qlcnic_show_bridged_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + int bridged_mode = 0; + + if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG) + bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED); + + return sprintf(buf, "%d\n", bridged_mode); +} + +static struct device_attribute dev_attr_bridged_mode = { + .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)}, + .show = qlcnic_show_bridged_mode, + .store = qlcnic_store_bridged_mode, +}; + +static ssize_t +qlcnic_store_diag_mode(struct device *dev, + struct device_attribute *attr, const char *buf, size_t len) +{ + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + unsigned long new; + + if (strict_strtoul(buf, 2, &new)) + return -EINVAL; + + if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED)) + adapter->flags ^= QLCNIC_DIAG_ENABLED; + + return len; +} + +static ssize_t +qlcnic_show_diag_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", + !!(adapter->flags & QLCNIC_DIAG_ENABLED)); +} + +static struct device_attribute dev_attr_diag_mode = { + .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)}, + .show = qlcnic_show_diag_mode, + .store = qlcnic_store_diag_mode, +}; + +int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val) +{ + if (!use_msi_x && !use_msi) { + netdev_info(netdev, "no msix or msi support, hence no rss\n"); + return -EINVAL; + } + + if ((val > max_hw) || (val < 2) || !is_power_of_2(val)) { + netdev_info(netdev, "rss_ring valid range [2 - %x] in " + " powers of 2\n", max_hw); + return -EINVAL; + } + return 0; + +} + +int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data) +{ + struct net_device *netdev = adapter->netdev; + int err = 0; + + if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + return -EBUSY; + + netif_device_detach(netdev); + if (netif_running(netdev)) + __qlcnic_down(adapter, netdev); + qlcnic_detach(adapter); + qlcnic_teardown_intr(adapter); + + if (qlcnic_enable_msix(adapter, data)) { + netdev_info(netdev, "failed setting max_rss; rss disabled\n"); + qlcnic_enable_msi_legacy(adapter); + } + + if (netif_running(netdev)) { + err = qlcnic_attach(adapter); + if (err) + goto done; + err = __qlcnic_up(adapter, netdev); + if (err) + goto done; + qlcnic_restore_indev_addr(netdev, NETDEV_UP); + } + done: + netif_device_attach(netdev); + clear_bit(__QLCNIC_RESETTING, &adapter->state); + return err; +} + +static int +qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter, + loff_t offset, size_t size) +{ + size_t crb_size = 4; + + if (!(adapter->flags & QLCNIC_DIAG_ENABLED)) + return -EIO; + + if (offset < QLCNIC_PCI_CRBSPACE) { + if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, + QLCNIC_PCI_CAMQM_END)) + crb_size = 8; + else + return -EINVAL; + } + + if ((size != crb_size) || (offset & (crb_size-1))) + return -EINVAL; + + return 0; +} + +static ssize_t +qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + u32 data; + u64 qmdata; + int ret; + + ret = qlcnic_sysfs_validate_crb(adapter, offset, size); + if (ret != 0) + return ret; + + if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) { + qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata); + memcpy(buf, &qmdata, size); + } else { + data = QLCRD32(adapter, offset); + memcpy(buf, &data, size); + } + return size; +} + +static ssize_t +qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + u32 data; + u64 qmdata; + int ret; + + ret = qlcnic_sysfs_validate_crb(adapter, offset, size); + if (ret != 0) + return ret; + + if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) { + memcpy(&qmdata, buf, size); + qlcnic_pci_camqm_write_2M(adapter, offset, qmdata); + } else { + memcpy(&data, buf, size); + QLCWR32(adapter, offset, data); + } + return size; +} + +static int +qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter, + loff_t offset, size_t size) +{ + if (!(adapter->flags & QLCNIC_DIAG_ENABLED)) + return -EIO; + + if ((size != 8) || (offset & 0x7)) + return -EIO; + + return 0; +} + +static ssize_t +qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + u64 data; + int ret; + + ret = qlcnic_sysfs_validate_mem(adapter, offset, size); + if (ret != 0) + return ret; + + if (qlcnic_pci_mem_read_2M(adapter, offset, &data)) + return -EIO; + + memcpy(buf, &data, size); + + return size; +} + +static ssize_t +qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + u64 data; + int ret; + + ret = qlcnic_sysfs_validate_mem(adapter, offset, size); + if (ret != 0) + return ret; + + memcpy(&data, buf, size); + + if (qlcnic_pci_mem_write_2M(adapter, offset, data)) + return -EIO; + + return size; +} + +static struct bin_attribute bin_attr_crb = { + .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = qlcnic_sysfs_read_crb, + .write = qlcnic_sysfs_write_crb, +}; + +static struct bin_attribute bin_attr_mem = { + .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = qlcnic_sysfs_read_mem, + .write = qlcnic_sysfs_write_mem, +}; + +static int +validate_pm_config(struct qlcnic_adapter *adapter, + struct qlcnic_pm_func_cfg *pm_cfg, int count) +{ + + u8 src_pci_func, s_esw_id, d_esw_id; + u8 dest_pci_func; + int i; + + for (i = 0; i < count; i++) { + src_pci_func = pm_cfg[i].pci_func; + dest_pci_func = pm_cfg[i].dest_npar; + if (src_pci_func >= QLCNIC_MAX_PCI_FUNC + || dest_pci_func >= QLCNIC_MAX_PCI_FUNC) + return QL_STATUS_INVALID_PARAM; + + if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC) + return QL_STATUS_INVALID_PARAM; + + if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC) + return QL_STATUS_INVALID_PARAM; + + s_esw_id = adapter->npars[src_pci_func].phy_port; + d_esw_id = adapter->npars[dest_pci_func].phy_port; + + if (s_esw_id != d_esw_id) + return QL_STATUS_INVALID_PARAM; + + } + return 0; + +} + +static ssize_t +qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_pm_func_cfg *pm_cfg; + u32 id, action, pci_func; + int count, rem, i, ret; + + count = size / sizeof(struct qlcnic_pm_func_cfg); + rem = size % sizeof(struct qlcnic_pm_func_cfg); + if (rem) + return QL_STATUS_INVALID_PARAM; + + pm_cfg = (struct qlcnic_pm_func_cfg *) buf; + + ret = validate_pm_config(adapter, pm_cfg, count); + if (ret) + return ret; + for (i = 0; i < count; i++) { + pci_func = pm_cfg[i].pci_func; + action = !!pm_cfg[i].action; + id = adapter->npars[pci_func].phy_port; + ret = qlcnic_config_port_mirroring(adapter, id, + action, pci_func); + if (ret) + return ret; + } + + for (i = 0; i < count; i++) { + pci_func = pm_cfg[i].pci_func; + id = adapter->npars[pci_func].phy_port; + adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action; + adapter->npars[pci_func].dest_npar = id; + } + return size; +} + +static ssize_t +qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC]; + int i; + + if (size != sizeof(pm_cfg)) + return QL_STATUS_INVALID_PARAM; + + for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { + if (adapter->npars[i].type != QLCNIC_TYPE_NIC) + continue; + pm_cfg[i].action = adapter->npars[i].enable_pm; + pm_cfg[i].dest_npar = 0; + pm_cfg[i].pci_func = i; + } + memcpy(buf, &pm_cfg, size); + + return size; +} + +static int +validate_esw_config(struct qlcnic_adapter *adapter, + struct qlcnic_esw_func_cfg *esw_cfg, int count) +{ + u32 op_mode; + u8 pci_func; + int i; + + op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE); + + for (i = 0; i < count; i++) { + pci_func = esw_cfg[i].pci_func; + if (pci_func >= QLCNIC_MAX_PCI_FUNC) + return QL_STATUS_INVALID_PARAM; + + if (adapter->op_mode == QLCNIC_MGMT_FUNC) + if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC) + return QL_STATUS_INVALID_PARAM; + + switch (esw_cfg[i].op_mode) { + case QLCNIC_PORT_DEFAULTS: + if (QLC_DEV_GET_DRV(op_mode, pci_func) != + QLCNIC_NON_PRIV_FUNC) { + if (esw_cfg[i].mac_anti_spoof != 0) + return QL_STATUS_INVALID_PARAM; + if (esw_cfg[i].mac_override != 1) + return QL_STATUS_INVALID_PARAM; + if (esw_cfg[i].promisc_mode != 1) + return QL_STATUS_INVALID_PARAM; + } + break; + case QLCNIC_ADD_VLAN: + if (!IS_VALID_VLAN(esw_cfg[i].vlan_id)) + return QL_STATUS_INVALID_PARAM; + if (!esw_cfg[i].op_type) + return QL_STATUS_INVALID_PARAM; + break; + case QLCNIC_DEL_VLAN: + if (!esw_cfg[i].op_type) + return QL_STATUS_INVALID_PARAM; + break; + default: + return QL_STATUS_INVALID_PARAM; + } + } + return 0; +} + +static ssize_t +qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_esw_func_cfg *esw_cfg; + struct qlcnic_npar_info *npar; + int count, rem, i, ret; + u8 pci_func, op_mode = 0; + + count = size / sizeof(struct qlcnic_esw_func_cfg); + rem = size % sizeof(struct qlcnic_esw_func_cfg); + if (rem) + return QL_STATUS_INVALID_PARAM; + + esw_cfg = (struct qlcnic_esw_func_cfg *) buf; + ret = validate_esw_config(adapter, esw_cfg, count); + if (ret) + return ret; + + for (i = 0; i < count; i++) { + if (adapter->op_mode == QLCNIC_MGMT_FUNC) + if (qlcnic_config_switch_port(adapter, &esw_cfg[i])) + return QL_STATUS_INVALID_PARAM; + + if (adapter->ahw->pci_func != esw_cfg[i].pci_func) + continue; + + op_mode = esw_cfg[i].op_mode; + qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]); + esw_cfg[i].op_mode = op_mode; + esw_cfg[i].pci_func = adapter->ahw->pci_func; + + switch (esw_cfg[i].op_mode) { + case QLCNIC_PORT_DEFAULTS: + qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]); + break; + case QLCNIC_ADD_VLAN: + qlcnic_set_vlan_config(adapter, &esw_cfg[i]); + break; + case QLCNIC_DEL_VLAN: + esw_cfg[i].vlan_id = 0; + qlcnic_set_vlan_config(adapter, &esw_cfg[i]); + break; + } + } + + if (adapter->op_mode != QLCNIC_MGMT_FUNC) + goto out; + + for (i = 0; i < count; i++) { + pci_func = esw_cfg[i].pci_func; + npar = &adapter->npars[pci_func]; + switch (esw_cfg[i].op_mode) { + case QLCNIC_PORT_DEFAULTS: + npar->promisc_mode = esw_cfg[i].promisc_mode; + npar->mac_override = esw_cfg[i].mac_override; + npar->offload_flags = esw_cfg[i].offload_flags; + npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof; + npar->discard_tagged = esw_cfg[i].discard_tagged; + break; + case QLCNIC_ADD_VLAN: + npar->pvid = esw_cfg[i].vlan_id; + break; + case QLCNIC_DEL_VLAN: + npar->pvid = 0; + break; + } + } +out: + return size; +} + +static ssize_t +qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC]; + u8 i; + + if (size != sizeof(esw_cfg)) + return QL_STATUS_INVALID_PARAM; + + for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { + if (adapter->npars[i].type != QLCNIC_TYPE_NIC) + continue; + esw_cfg[i].pci_func = i; + if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i])) + return QL_STATUS_INVALID_PARAM; + } + memcpy(buf, &esw_cfg, size); + + return size; +} + +static int +validate_npar_config(struct qlcnic_adapter *adapter, + struct qlcnic_npar_func_cfg *np_cfg, int count) +{ + u8 pci_func, i; + + for (i = 0; i < count; i++) { + pci_func = np_cfg[i].pci_func; + if (pci_func >= QLCNIC_MAX_PCI_FUNC) + return QL_STATUS_INVALID_PARAM; + + if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC) + return QL_STATUS_INVALID_PARAM; + + if (!IS_VALID_BW(np_cfg[i].min_bw) || + !IS_VALID_BW(np_cfg[i].max_bw)) + return QL_STATUS_INVALID_PARAM; + } + return 0; +} + +static ssize_t +qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_info nic_info; + struct qlcnic_npar_func_cfg *np_cfg; + int i, count, rem, ret; + u8 pci_func; + + count = size / sizeof(struct qlcnic_npar_func_cfg); + rem = size % sizeof(struct qlcnic_npar_func_cfg); + if (rem) + return QL_STATUS_INVALID_PARAM; + + np_cfg = (struct qlcnic_npar_func_cfg *) buf; + ret = validate_npar_config(adapter, np_cfg, count); + if (ret) + return ret; + + for (i = 0; i < count ; i++) { + pci_func = np_cfg[i].pci_func; + ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func); + if (ret) + return ret; + nic_info.pci_func = pci_func; + nic_info.min_tx_bw = np_cfg[i].min_bw; + nic_info.max_tx_bw = np_cfg[i].max_bw; + ret = qlcnic_set_nic_info(adapter, &nic_info); + if (ret) + return ret; + adapter->npars[i].min_bw = nic_info.min_tx_bw; + adapter->npars[i].max_bw = nic_info.max_tx_bw; + } + + return size; + +} +static ssize_t +qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_info nic_info; + struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC]; + int i, ret; + + if (size != sizeof(np_cfg)) + return QL_STATUS_INVALID_PARAM; + + for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) { + if (adapter->npars[i].type != QLCNIC_TYPE_NIC) + continue; + ret = qlcnic_get_nic_info(adapter, &nic_info, i); + if (ret) + return ret; + + np_cfg[i].pci_func = i; + np_cfg[i].op_mode = (u8)nic_info.op_mode; + np_cfg[i].port_num = nic_info.phys_port; + np_cfg[i].fw_capab = nic_info.capabilities; + np_cfg[i].min_bw = nic_info.min_tx_bw ; + np_cfg[i].max_bw = nic_info.max_tx_bw; + np_cfg[i].max_tx_queues = nic_info.max_tx_ques; + np_cfg[i].max_rx_queues = nic_info.max_rx_ques; + } + memcpy(buf, &np_cfg, size); + return size; +} + +static ssize_t +qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_esw_statistics port_stats; + int ret; + + if (size != sizeof(struct qlcnic_esw_statistics)) + return QL_STATUS_INVALID_PARAM; + + if (offset >= QLCNIC_MAX_PCI_FUNC) + return QL_STATUS_INVALID_PARAM; + + memset(&port_stats, 0, size); + ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER, + &port_stats.rx); + if (ret) + return ret; + + ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER, + &port_stats.tx); + if (ret) + return ret; + + memcpy(buf, &port_stats, size); + return size; +} + +static ssize_t +qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_esw_statistics esw_stats; + int ret; + + if (size != sizeof(struct qlcnic_esw_statistics)) + return QL_STATUS_INVALID_PARAM; + + if (offset >= QLCNIC_NIU_MAX_XG_PORTS) + return QL_STATUS_INVALID_PARAM; + + memset(&esw_stats, 0, size); + ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER, + &esw_stats.rx); + if (ret) + return ret; + + ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER, + &esw_stats.tx); + if (ret) + return ret; + + memcpy(buf, &esw_stats, size); + return size; +} + +static ssize_t +qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + int ret; + + if (offset >= QLCNIC_NIU_MAX_XG_PORTS) + return QL_STATUS_INVALID_PARAM; + + ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset, + QLCNIC_QUERY_RX_COUNTER); + if (ret) + return ret; + + ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset, + QLCNIC_QUERY_TX_COUNTER); + if (ret) + return ret; + + return size; +} + +static ssize_t +qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, size_t size) +{ + + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + int ret; + + if (offset >= QLCNIC_MAX_PCI_FUNC) + return QL_STATUS_INVALID_PARAM; + + ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset, + QLCNIC_QUERY_RX_COUNTER); + if (ret) + return ret; + + ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset, + QLCNIC_QUERY_TX_COUNTER); + if (ret) + return ret; + + return size; +} + +static ssize_t +qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC]; + struct qlcnic_pci_info *pci_info; + int i, ret; + + if (size != sizeof(pci_cfg)) + return QL_STATUS_INVALID_PARAM; + + pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL); + if (!pci_info) + return -ENOMEM; + + ret = qlcnic_get_pci_info(adapter, pci_info); + if (ret) { + kfree(pci_info); + return ret; + } + + for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) { + pci_cfg[i].pci_func = pci_info[i].id; + pci_cfg[i].func_type = pci_info[i].type; + pci_cfg[i].port_num = pci_info[i].default_port; + pci_cfg[i].min_bw = pci_info[i].tx_min_bw; + pci_cfg[i].max_bw = pci_info[i].tx_max_bw; + memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN); + } + memcpy(buf, &pci_cfg, size); + kfree(pci_info); + return size; +} +static struct bin_attribute bin_attr_npar_config = { + .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = qlcnic_sysfs_read_npar_config, + .write = qlcnic_sysfs_write_npar_config, +}; + +static struct bin_attribute bin_attr_pci_config = { + .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = qlcnic_sysfs_read_pci_config, + .write = NULL, +}; + +static struct bin_attribute bin_attr_port_stats = { + .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = qlcnic_sysfs_get_port_stats, + .write = qlcnic_sysfs_clear_port_stats, +}; + +static struct bin_attribute bin_attr_esw_stats = { + .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = qlcnic_sysfs_get_esw_stats, + .write = qlcnic_sysfs_clear_esw_stats, +}; + +static struct bin_attribute bin_attr_esw_config = { + .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = qlcnic_sysfs_read_esw_config, + .write = qlcnic_sysfs_write_esw_config, +}; + +static struct bin_attribute bin_attr_pm_config = { + .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = qlcnic_sysfs_read_pm_config, + .write = qlcnic_sysfs_write_pm_config, +}; + +static void +qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + + if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG) + if (device_create_file(dev, &dev_attr_bridged_mode)) + dev_warn(dev, + "failed to create bridged_mode sysfs entry\n"); +} + +static void +qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + + if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG) + device_remove_file(dev, &dev_attr_bridged_mode); +} + +static void +qlcnic_create_diag_entries(struct qlcnic_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + + if (device_create_bin_file(dev, &bin_attr_port_stats)) + dev_info(dev, "failed to create port stats sysfs entry"); + + if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) + return; + if (device_create_file(dev, &dev_attr_diag_mode)) + dev_info(dev, "failed to create diag_mode sysfs entry\n"); + if (device_create_bin_file(dev, &bin_attr_crb)) + dev_info(dev, "failed to create crb sysfs entry\n"); + if (device_create_bin_file(dev, &bin_attr_mem)) + dev_info(dev, "failed to create mem sysfs entry\n"); + if (device_create_bin_file(dev, &bin_attr_pci_config)) + dev_info(dev, "failed to create pci config sysfs entry"); + if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) + return; + if (device_create_bin_file(dev, &bin_attr_esw_config)) + dev_info(dev, "failed to create esw config sysfs entry"); + if (adapter->op_mode != QLCNIC_MGMT_FUNC) + return; + if (device_create_bin_file(dev, &bin_attr_npar_config)) + dev_info(dev, "failed to create npar config sysfs entry"); + if (device_create_bin_file(dev, &bin_attr_pm_config)) + dev_info(dev, "failed to create pm config sysfs entry"); + if (device_create_bin_file(dev, &bin_attr_esw_stats)) + dev_info(dev, "failed to create eswitch stats sysfs entry"); +} + +static void +qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + + device_remove_bin_file(dev, &bin_attr_port_stats); + + if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) + return; + device_remove_file(dev, &dev_attr_diag_mode); + device_remove_bin_file(dev, &bin_attr_crb); + device_remove_bin_file(dev, &bin_attr_mem); + device_remove_bin_file(dev, &bin_attr_pci_config); + if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) + return; + device_remove_bin_file(dev, &bin_attr_esw_config); + if (adapter->op_mode != QLCNIC_MGMT_FUNC) + return; + device_remove_bin_file(dev, &bin_attr_npar_config); + device_remove_bin_file(dev, &bin_attr_pm_config); + device_remove_bin_file(dev, &bin_attr_esw_stats); +} + +#ifdef CONFIG_INET + +#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops) + +static void +qlcnic_config_indev_addr(struct qlcnic_adapter *adapter, + struct net_device *dev, unsigned long event) +{ + struct in_device *indev; + + indev = in_dev_get(dev); + if (!indev) + return; + + for_ifa(indev) { + switch (event) { + case NETDEV_UP: + qlcnic_config_ipaddr(adapter, + ifa->ifa_address, QLCNIC_IP_UP); + break; + case NETDEV_DOWN: + qlcnic_config_ipaddr(adapter, + ifa->ifa_address, QLCNIC_IP_DOWN); + break; + default: + break; + } + } endfor_ifa(indev); + + in_dev_put(indev); +} + +static void +qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct net_device *dev; + u16 vid; + + qlcnic_config_indev_addr(adapter, netdev, event); + + for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) { + dev = __vlan_find_dev_deep(netdev, vid); + if (!dev) + continue; + qlcnic_config_indev_addr(adapter, dev, event); + } +} + +static int qlcnic_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct qlcnic_adapter *adapter; + struct net_device *dev = (struct net_device *)ptr; + +recheck: + if (dev == NULL) + goto done; + + if (dev->priv_flags & IFF_802_1Q_VLAN) { + dev = vlan_dev_real_dev(dev); + goto recheck; + } + + if (!is_qlcnic_netdev(dev)) + goto done; + + adapter = netdev_priv(dev); + + if (!adapter) + goto done; + + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) + goto done; + + qlcnic_config_indev_addr(adapter, dev, event); +done: + return NOTIFY_DONE; +} + +static int +qlcnic_inetaddr_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct qlcnic_adapter *adapter; + struct net_device *dev; + + struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; + + dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; + +recheck: + if (dev == NULL) + goto done; + + if (dev->priv_flags & IFF_802_1Q_VLAN) { + dev = vlan_dev_real_dev(dev); + goto recheck; + } + + if (!is_qlcnic_netdev(dev)) + goto done; + + adapter = netdev_priv(dev); + + if (!adapter) + goto done; + + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) + goto done; + + switch (event) { + case NETDEV_UP: + qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP); + break; + case NETDEV_DOWN: + qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN); + break; + default: + break; + } + +done: + return NOTIFY_DONE; +} + +static struct notifier_block qlcnic_netdev_cb = { + .notifier_call = qlcnic_netdev_event, +}; + +static struct notifier_block qlcnic_inetaddr_cb = { + .notifier_call = qlcnic_inetaddr_event, +}; +#else +static void +qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event) +{ } +#endif +static struct pci_error_handlers qlcnic_err_handler = { + .error_detected = qlcnic_io_error_detected, + .slot_reset = qlcnic_io_slot_reset, + .resume = qlcnic_io_resume, +}; + +static struct pci_driver qlcnic_driver = { + .name = qlcnic_driver_name, + .id_table = qlcnic_pci_tbl, + .probe = qlcnic_probe, + .remove = __devexit_p(qlcnic_remove), +#ifdef CONFIG_PM + .suspend = qlcnic_suspend, + .resume = qlcnic_resume, +#endif + .shutdown = qlcnic_shutdown, + .err_handler = &qlcnic_err_handler + +}; + +static int __init qlcnic_init_module(void) +{ + int ret; + + printk(KERN_INFO "%s\n", qlcnic_driver_string); + + qlcnic_wq = create_singlethread_workqueue("qlcnic"); + if (qlcnic_wq == NULL) { + printk(KERN_ERR "qlcnic: cannot create workqueue\n"); + return -ENOMEM; + } + +#ifdef CONFIG_INET + register_netdevice_notifier(&qlcnic_netdev_cb); + register_inetaddr_notifier(&qlcnic_inetaddr_cb); +#endif + + ret = pci_register_driver(&qlcnic_driver); + if (ret) { +#ifdef CONFIG_INET + unregister_inetaddr_notifier(&qlcnic_inetaddr_cb); + unregister_netdevice_notifier(&qlcnic_netdev_cb); +#endif + destroy_workqueue(qlcnic_wq); + } + + return ret; +} + +module_init(qlcnic_init_module); + +static void __exit qlcnic_exit_module(void) +{ + + pci_unregister_driver(&qlcnic_driver); + +#ifdef CONFIG_INET + unregister_inetaddr_notifier(&qlcnic_inetaddr_cb); + unregister_netdevice_notifier(&qlcnic_netdev_cb); +#endif + destroy_workqueue(qlcnic_wq); +} + +module_exit(qlcnic_exit_module); diff --git a/drivers/net/ethernet/qlogic/qlge/Makefile b/drivers/net/ethernet/qlogic/qlge/Makefile new file mode 100644 index 0000000..8a19765 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlge/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the Qlogic 10GbE PCI Express ethernet driver +# + +obj-$(CONFIG_QLGE) += qlge.o + +qlge-objs := qlge_main.o qlge_dbg.o qlge_mpi.o qlge_ethtool.o diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h new file mode 100644 index 0000000..8731f79 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlge/qlge.h @@ -0,0 +1,2334 @@ +/* + * QLogic QLA41xx NIC HBA Driver + * Copyright (c) 2003-2006 QLogic Corporation + * + * See LICENSE.qlge for copyright and licensing details. + */ +#ifndef _QLGE_H_ +#define _QLGE_H_ + +#include +#include +#include +#include +#include + +/* + * General definitions... + */ +#define DRV_NAME "qlge" +#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " +#define DRV_VERSION "v1.00.00.29.00.00-01" + +#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ + +#define QLGE_VENDOR_ID 0x1077 +#define QLGE_DEVICE_ID_8012 0x8012 +#define QLGE_DEVICE_ID_8000 0x8000 +#define MAX_CPUS 8 +#define MAX_TX_RINGS MAX_CPUS +#define MAX_RX_RINGS ((MAX_CPUS * 2) + 1) + +#define NUM_TX_RING_ENTRIES 256 +#define NUM_RX_RING_ENTRIES 256 + +#define NUM_SMALL_BUFFERS 512 +#define NUM_LARGE_BUFFERS 512 +#define DB_PAGE_SIZE 4096 + +/* Calculate the number of (4k) pages required to + * contain a buffer queue of the given length. + */ +#define MAX_DB_PAGES_PER_BQ(x) \ + (((x * sizeof(u64)) / DB_PAGE_SIZE) + \ + (((x * sizeof(u64)) % DB_PAGE_SIZE) ? 1 : 0)) + +#define RX_RING_SHADOW_SPACE (sizeof(u64) + \ + MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \ + MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64)) +#define LARGE_BUFFER_MAX_SIZE 8192 +#define LARGE_BUFFER_MIN_SIZE 2048 + +#define MAX_CQ 128 +#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */ +#define MAX_INTER_FRAME_WAIT 10 /* 10 usec max interframe-wait for coalescing */ +#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2) +#define UDELAY_COUNT 3 +#define UDELAY_DELAY 100 + + +#define TX_DESC_PER_IOCB 8 +/* The maximum number of frags we handle is based + * on PAGE_SIZE... + */ +#if (PAGE_SHIFT == 12) || (PAGE_SHIFT == 13) /* 4k & 8k pages */ +#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) +#else /* all other page sizes */ +#define TX_DESC_PER_OAL 0 +#endif + +/* Word shifting for converting 64-bit + * address to a series of 16-bit words. + * This is used for some MPI firmware + * mailbox commands. + */ +#define LSW(x) ((u16)(x)) +#define MSW(x) ((u16)((u32)(x) >> 16)) +#define LSD(x) ((u32)((u64)(x))) +#define MSD(x) ((u32)((((u64)(x)) >> 32))) + +/* MPI test register definitions. This register + * is used for determining alternate NIC function's + * PCI->func number. + */ +enum { + MPI_TEST_FUNC_PORT_CFG = 0x1002, + MPI_TEST_FUNC_PRB_CTL = 0x100e, + MPI_TEST_FUNC_PRB_EN = 0x18a20000, + MPI_TEST_FUNC_RST_STS = 0x100a, + MPI_TEST_FUNC_RST_FRC = 0x00000003, + MPI_TEST_NIC_FUNC_MASK = 0x00000007, + MPI_TEST_NIC1_FUNCTION_ENABLE = (1 << 0), + MPI_TEST_NIC1_FUNCTION_MASK = 0x0000000e, + MPI_TEST_NIC1_FUNC_SHIFT = 1, + MPI_TEST_NIC2_FUNCTION_ENABLE = (1 << 4), + MPI_TEST_NIC2_FUNCTION_MASK = 0x000000e0, + MPI_TEST_NIC2_FUNC_SHIFT = 5, + MPI_TEST_FC1_FUNCTION_ENABLE = (1 << 8), + MPI_TEST_FC1_FUNCTION_MASK = 0x00000e00, + MPI_TEST_FC1_FUNCTION_SHIFT = 9, + MPI_TEST_FC2_FUNCTION_ENABLE = (1 << 12), + MPI_TEST_FC2_FUNCTION_MASK = 0x0000e000, + MPI_TEST_FC2_FUNCTION_SHIFT = 13, + + MPI_NIC_READ = 0x00000000, + MPI_NIC_REG_BLOCK = 0x00020000, + MPI_NIC_FUNCTION_SHIFT = 6, +}; + +/* + * Processor Address Register (PROC_ADDR) bit definitions. + */ +enum { + + /* Misc. stuff */ + MAILBOX_COUNT = 16, + MAILBOX_TIMEOUT = 5, + + PROC_ADDR_RDY = (1 << 31), + PROC_ADDR_R = (1 << 30), + PROC_ADDR_ERR = (1 << 29), + PROC_ADDR_DA = (1 << 28), + PROC_ADDR_FUNC0_MBI = 0x00001180, + PROC_ADDR_FUNC0_MBO = (PROC_ADDR_FUNC0_MBI + MAILBOX_COUNT), + PROC_ADDR_FUNC0_CTL = 0x000011a1, + PROC_ADDR_FUNC2_MBI = 0x00001280, + PROC_ADDR_FUNC2_MBO = (PROC_ADDR_FUNC2_MBI + MAILBOX_COUNT), + PROC_ADDR_FUNC2_CTL = 0x000012a1, + PROC_ADDR_MPI_RISC = 0x00000000, + PROC_ADDR_MDE = 0x00010000, + PROC_ADDR_REGBLOCK = 0x00020000, + PROC_ADDR_RISC_REG = 0x00030000, +}; + +/* + * System Register (SYS) bit definitions. + */ +enum { + SYS_EFE = (1 << 0), + SYS_FAE = (1 << 1), + SYS_MDC = (1 << 2), + SYS_DST = (1 << 3), + SYS_DWC = (1 << 4), + SYS_EVW = (1 << 5), + SYS_OMP_DLY_MASK = 0x3f000000, + /* + * There are no values defined as of edit #15. + */ + SYS_ODI = (1 << 14), +}; + +/* + * Reset/Failover Register (RST_FO) bit definitions. + */ +enum { + RST_FO_TFO = (1 << 0), + RST_FO_RR_MASK = 0x00060000, + RST_FO_RR_CQ_CAM = 0x00000000, + RST_FO_RR_DROP = 0x00000002, + RST_FO_RR_DQ = 0x00000004, + RST_FO_RR_RCV_FUNC_CQ = 0x00000006, + RST_FO_FRB = (1 << 12), + RST_FO_MOP = (1 << 13), + RST_FO_REG = (1 << 14), + RST_FO_FR = (1 << 15), +}; + +/* + * Function Specific Control Register (FSC) bit definitions. + */ +enum { + FSC_DBRST_MASK = 0x00070000, + FSC_DBRST_256 = 0x00000000, + FSC_DBRST_512 = 0x00000001, + FSC_DBRST_768 = 0x00000002, + FSC_DBRST_1024 = 0x00000003, + FSC_DBL_MASK = 0x00180000, + FSC_DBL_DBRST = 0x00000000, + FSC_DBL_MAX_PLD = 0x00000008, + FSC_DBL_MAX_BRST = 0x00000010, + FSC_DBL_128_BYTES = 0x00000018, + FSC_EC = (1 << 5), + FSC_EPC_MASK = 0x00c00000, + FSC_EPC_INBOUND = (1 << 6), + FSC_EPC_OUTBOUND = (1 << 7), + FSC_VM_PAGESIZE_MASK = 0x07000000, + FSC_VM_PAGE_2K = 0x00000100, + FSC_VM_PAGE_4K = 0x00000200, + FSC_VM_PAGE_8K = 0x00000300, + FSC_VM_PAGE_64K = 0x00000600, + FSC_SH = (1 << 11), + FSC_DSB = (1 << 12), + FSC_STE = (1 << 13), + FSC_FE = (1 << 15), +}; + +/* + * Host Command Status Register (CSR) bit definitions. + */ +enum { + CSR_ERR_STS_MASK = 0x0000003f, + /* + * There are no valued defined as of edit #15. + */ + CSR_RR = (1 << 8), + CSR_HRI = (1 << 9), + CSR_RP = (1 << 10), + CSR_CMD_PARM_SHIFT = 22, + CSR_CMD_NOP = 0x00000000, + CSR_CMD_SET_RST = 0x10000000, + CSR_CMD_CLR_RST = 0x20000000, + CSR_CMD_SET_PAUSE = 0x30000000, + CSR_CMD_CLR_PAUSE = 0x40000000, + CSR_CMD_SET_H2R_INT = 0x50000000, + CSR_CMD_CLR_H2R_INT = 0x60000000, + CSR_CMD_PAR_EN = 0x70000000, + CSR_CMD_SET_BAD_PAR = 0x80000000, + CSR_CMD_CLR_BAD_PAR = 0x90000000, + CSR_CMD_CLR_R2PCI_INT = 0xa0000000, +}; + +/* + * Configuration Register (CFG) bit definitions. + */ +enum { + CFG_LRQ = (1 << 0), + CFG_DRQ = (1 << 1), + CFG_LR = (1 << 2), + CFG_DR = (1 << 3), + CFG_LE = (1 << 5), + CFG_LCQ = (1 << 6), + CFG_DCQ = (1 << 7), + CFG_Q_SHIFT = 8, + CFG_Q_MASK = 0x7f000000, +}; + +/* + * Status Register (STS) bit definitions. + */ +enum { + STS_FE = (1 << 0), + STS_PI = (1 << 1), + STS_PL0 = (1 << 2), + STS_PL1 = (1 << 3), + STS_PI0 = (1 << 4), + STS_PI1 = (1 << 5), + STS_FUNC_ID_MASK = 0x000000c0, + STS_FUNC_ID_SHIFT = 6, + STS_F0E = (1 << 8), + STS_F1E = (1 << 9), + STS_F2E = (1 << 10), + STS_F3E = (1 << 11), + STS_NFE = (1 << 12), +}; + +/* + * Interrupt Enable Register (INTR_EN) bit definitions. + */ +enum { + INTR_EN_INTR_MASK = 0x007f0000, + INTR_EN_TYPE_MASK = 0x03000000, + INTR_EN_TYPE_ENABLE = 0x00000100, + INTR_EN_TYPE_DISABLE = 0x00000200, + INTR_EN_TYPE_READ = 0x00000300, + INTR_EN_IHD = (1 << 13), + INTR_EN_IHD_MASK = (INTR_EN_IHD << 16), + INTR_EN_EI = (1 << 14), + INTR_EN_EN = (1 << 15), +}; + +/* + * Interrupt Mask Register (INTR_MASK) bit definitions. + */ +enum { + INTR_MASK_PI = (1 << 0), + INTR_MASK_HL0 = (1 << 1), + INTR_MASK_LH0 = (1 << 2), + INTR_MASK_HL1 = (1 << 3), + INTR_MASK_LH1 = (1 << 4), + INTR_MASK_SE = (1 << 5), + INTR_MASK_LSC = (1 << 6), + INTR_MASK_MC = (1 << 7), + INTR_MASK_LINK_IRQS = INTR_MASK_LSC | INTR_MASK_SE | INTR_MASK_MC, +}; + +/* + * Register (REV_ID) bit definitions. + */ +enum { + REV_ID_MASK = 0x0000000f, + REV_ID_NICROLL_SHIFT = 0, + REV_ID_NICREV_SHIFT = 4, + REV_ID_XGROLL_SHIFT = 8, + REV_ID_XGREV_SHIFT = 12, + REV_ID_CHIPREV_SHIFT = 28, +}; + +/* + * Force ECC Error Register (FRC_ECC_ERR) bit definitions. + */ +enum { + FRC_ECC_ERR_VW = (1 << 12), + FRC_ECC_ERR_VB = (1 << 13), + FRC_ECC_ERR_NI = (1 << 14), + FRC_ECC_ERR_NO = (1 << 15), + FRC_ECC_PFE_SHIFT = 16, + FRC_ECC_ERR_DO = (1 << 18), + FRC_ECC_P14 = (1 << 19), +}; + +/* + * Error Status Register (ERR_STS) bit definitions. + */ +enum { + ERR_STS_NOF = (1 << 0), + ERR_STS_NIF = (1 << 1), + ERR_STS_DRP = (1 << 2), + ERR_STS_XGP = (1 << 3), + ERR_STS_FOU = (1 << 4), + ERR_STS_FOC = (1 << 5), + ERR_STS_FOF = (1 << 6), + ERR_STS_FIU = (1 << 7), + ERR_STS_FIC = (1 << 8), + ERR_STS_FIF = (1 << 9), + ERR_STS_MOF = (1 << 10), + ERR_STS_TA = (1 << 11), + ERR_STS_MA = (1 << 12), + ERR_STS_MPE = (1 << 13), + ERR_STS_SCE = (1 << 14), + ERR_STS_STE = (1 << 15), + ERR_STS_FOW = (1 << 16), + ERR_STS_UE = (1 << 17), + ERR_STS_MCH = (1 << 26), + ERR_STS_LOC_SHIFT = 27, +}; + +/* + * RAM Debug Address Register (RAM_DBG_ADDR) bit definitions. + */ +enum { + RAM_DBG_ADDR_FW = (1 << 30), + RAM_DBG_ADDR_FR = (1 << 31), +}; + +/* + * Semaphore Register (SEM) bit definitions. + */ +enum { + /* + * Example: + * reg = SEM_XGMAC0_MASK | (SEM_SET << SEM_XGMAC0_SHIFT) + */ + SEM_CLEAR = 0, + SEM_SET = 1, + SEM_FORCE = 3, + SEM_XGMAC0_SHIFT = 0, + SEM_XGMAC1_SHIFT = 2, + SEM_ICB_SHIFT = 4, + SEM_MAC_ADDR_SHIFT = 6, + SEM_FLASH_SHIFT = 8, + SEM_PROBE_SHIFT = 10, + SEM_RT_IDX_SHIFT = 12, + SEM_PROC_REG_SHIFT = 14, + SEM_XGMAC0_MASK = 0x00030000, + SEM_XGMAC1_MASK = 0x000c0000, + SEM_ICB_MASK = 0x00300000, + SEM_MAC_ADDR_MASK = 0x00c00000, + SEM_FLASH_MASK = 0x03000000, + SEM_PROBE_MASK = 0x0c000000, + SEM_RT_IDX_MASK = 0x30000000, + SEM_PROC_REG_MASK = 0xc0000000, +}; + +/* + * 10G MAC Address Register (XGMAC_ADDR) bit definitions. + */ +enum { + XGMAC_ADDR_RDY = (1 << 31), + XGMAC_ADDR_R = (1 << 30), + XGMAC_ADDR_XME = (1 << 29), + + /* XGMAC control registers */ + PAUSE_SRC_LO = 0x00000100, + PAUSE_SRC_HI = 0x00000104, + GLOBAL_CFG = 0x00000108, + GLOBAL_CFG_RESET = (1 << 0), + GLOBAL_CFG_JUMBO = (1 << 6), + GLOBAL_CFG_TX_STAT_EN = (1 << 10), + GLOBAL_CFG_RX_STAT_EN = (1 << 11), + TX_CFG = 0x0000010c, + TX_CFG_RESET = (1 << 0), + TX_CFG_EN = (1 << 1), + TX_CFG_PREAM = (1 << 2), + RX_CFG = 0x00000110, + RX_CFG_RESET = (1 << 0), + RX_CFG_EN = (1 << 1), + RX_CFG_PREAM = (1 << 2), + FLOW_CTL = 0x0000011c, + PAUSE_OPCODE = 0x00000120, + PAUSE_TIMER = 0x00000124, + PAUSE_FRM_DEST_LO = 0x00000128, + PAUSE_FRM_DEST_HI = 0x0000012c, + MAC_TX_PARAMS = 0x00000134, + MAC_TX_PARAMS_JUMBO = (1 << 31), + MAC_TX_PARAMS_SIZE_SHIFT = 16, + MAC_RX_PARAMS = 0x00000138, + MAC_SYS_INT = 0x00000144, + MAC_SYS_INT_MASK = 0x00000148, + MAC_MGMT_INT = 0x0000014c, + MAC_MGMT_IN_MASK = 0x00000150, + EXT_ARB_MODE = 0x000001fc, + + /* XGMAC TX statistics registers */ + TX_PKTS = 0x00000200, + TX_BYTES = 0x00000208, + TX_MCAST_PKTS = 0x00000210, + TX_BCAST_PKTS = 0x00000218, + TX_UCAST_PKTS = 0x00000220, + TX_CTL_PKTS = 0x00000228, + TX_PAUSE_PKTS = 0x00000230, + TX_64_PKT = 0x00000238, + TX_65_TO_127_PKT = 0x00000240, + TX_128_TO_255_PKT = 0x00000248, + TX_256_511_PKT = 0x00000250, + TX_512_TO_1023_PKT = 0x00000258, + TX_1024_TO_1518_PKT = 0x00000260, + TX_1519_TO_MAX_PKT = 0x00000268, + TX_UNDERSIZE_PKT = 0x00000270, + TX_OVERSIZE_PKT = 0x00000278, + + /* XGMAC statistics control registers */ + RX_HALF_FULL_DET = 0x000002a0, + TX_HALF_FULL_DET = 0x000002a4, + RX_OVERFLOW_DET = 0x000002a8, + TX_OVERFLOW_DET = 0x000002ac, + RX_HALF_FULL_MASK = 0x000002b0, + TX_HALF_FULL_MASK = 0x000002b4, + RX_OVERFLOW_MASK = 0x000002b8, + TX_OVERFLOW_MASK = 0x000002bc, + STAT_CNT_CTL = 0x000002c0, + STAT_CNT_CTL_CLEAR_TX = (1 << 0), + STAT_CNT_CTL_CLEAR_RX = (1 << 1), + AUX_RX_HALF_FULL_DET = 0x000002d0, + AUX_TX_HALF_FULL_DET = 0x000002d4, + AUX_RX_OVERFLOW_DET = 0x000002d8, + AUX_TX_OVERFLOW_DET = 0x000002dc, + AUX_RX_HALF_FULL_MASK = 0x000002f0, + AUX_TX_HALF_FULL_MASK = 0x000002f4, + AUX_RX_OVERFLOW_MASK = 0x000002f8, + AUX_TX_OVERFLOW_MASK = 0x000002fc, + + /* XGMAC RX statistics registers */ + RX_BYTES = 0x00000300, + RX_BYTES_OK = 0x00000308, + RX_PKTS = 0x00000310, + RX_PKTS_OK = 0x00000318, + RX_BCAST_PKTS = 0x00000320, + RX_MCAST_PKTS = 0x00000328, + RX_UCAST_PKTS = 0x00000330, + RX_UNDERSIZE_PKTS = 0x00000338, + RX_OVERSIZE_PKTS = 0x00000340, + RX_JABBER_PKTS = 0x00000348, + RX_UNDERSIZE_FCERR_PKTS = 0x00000350, + RX_DROP_EVENTS = 0x00000358, + RX_FCERR_PKTS = 0x00000360, + RX_ALIGN_ERR = 0x00000368, + RX_SYMBOL_ERR = 0x00000370, + RX_MAC_ERR = 0x00000378, + RX_CTL_PKTS = 0x00000380, + RX_PAUSE_PKTS = 0x00000388, + RX_64_PKTS = 0x00000390, + RX_65_TO_127_PKTS = 0x00000398, + RX_128_255_PKTS = 0x000003a0, + RX_256_511_PKTS = 0x000003a8, + RX_512_TO_1023_PKTS = 0x000003b0, + RX_1024_TO_1518_PKTS = 0x000003b8, + RX_1519_TO_MAX_PKTS = 0x000003c0, + RX_LEN_ERR_PKTS = 0x000003c8, + + /* XGMAC MDIO control registers */ + MDIO_TX_DATA = 0x00000400, + MDIO_RX_DATA = 0x00000410, + MDIO_CMD = 0x00000420, + MDIO_PHY_ADDR = 0x00000430, + MDIO_PORT = 0x00000440, + MDIO_STATUS = 0x00000450, + + XGMAC_REGISTER_END = 0x00000740, +}; + +/* + * Enhanced Transmission Schedule Registers (NIC_ETS,CNA_ETS) bit definitions. + */ +enum { + ETS_QUEUE_SHIFT = 29, + ETS_REF = (1 << 26), + ETS_RS = (1 << 27), + ETS_P = (1 << 28), + ETS_FC_COS_SHIFT = 23, +}; + +/* + * Flash Address Register (FLASH_ADDR) bit definitions. + */ +enum { + FLASH_ADDR_RDY = (1 << 31), + FLASH_ADDR_R = (1 << 30), + FLASH_ADDR_ERR = (1 << 29), +}; + +/* + * Stop CQ Processing Register (CQ_STOP) bit definitions. + */ +enum { + CQ_STOP_QUEUE_MASK = (0x007f0000), + CQ_STOP_TYPE_MASK = (0x03000000), + CQ_STOP_TYPE_START = 0x00000100, + CQ_STOP_TYPE_STOP = 0x00000200, + CQ_STOP_TYPE_READ = 0x00000300, + CQ_STOP_EN = (1 << 15), +}; + +/* + * MAC Protocol Address Index Register (MAC_ADDR_IDX) bit definitions. + */ +enum { + MAC_ADDR_IDX_SHIFT = 4, + MAC_ADDR_TYPE_SHIFT = 16, + MAC_ADDR_TYPE_COUNT = 10, + MAC_ADDR_TYPE_MASK = 0x000f0000, + MAC_ADDR_TYPE_CAM_MAC = 0x00000000, + MAC_ADDR_TYPE_MULTI_MAC = 0x00010000, + MAC_ADDR_TYPE_VLAN = 0x00020000, + MAC_ADDR_TYPE_MULTI_FLTR = 0x00030000, + MAC_ADDR_TYPE_FC_MAC = 0x00040000, + MAC_ADDR_TYPE_MGMT_MAC = 0x00050000, + MAC_ADDR_TYPE_MGMT_VLAN = 0x00060000, + MAC_ADDR_TYPE_MGMT_V4 = 0x00070000, + MAC_ADDR_TYPE_MGMT_V6 = 0x00080000, + MAC_ADDR_TYPE_MGMT_TU_DP = 0x00090000, + MAC_ADDR_ADR = (1 << 25), + MAC_ADDR_RS = (1 << 26), + MAC_ADDR_E = (1 << 27), + MAC_ADDR_MR = (1 << 30), + MAC_ADDR_MW = (1 << 31), + MAX_MULTICAST_ENTRIES = 32, + + /* Entry count and words per entry + * for each address type in the filter. + */ + MAC_ADDR_MAX_CAM_ENTRIES = 512, + MAC_ADDR_MAX_CAM_WCOUNT = 3, + MAC_ADDR_MAX_MULTICAST_ENTRIES = 32, + MAC_ADDR_MAX_MULTICAST_WCOUNT = 2, + MAC_ADDR_MAX_VLAN_ENTRIES = 4096, + MAC_ADDR_MAX_VLAN_WCOUNT = 1, + MAC_ADDR_MAX_MCAST_FLTR_ENTRIES = 4096, + MAC_ADDR_MAX_MCAST_FLTR_WCOUNT = 1, + MAC_ADDR_MAX_FC_MAC_ENTRIES = 4, + MAC_ADDR_MAX_FC_MAC_WCOUNT = 2, + MAC_ADDR_MAX_MGMT_MAC_ENTRIES = 8, + MAC_ADDR_MAX_MGMT_MAC_WCOUNT = 2, + MAC_ADDR_MAX_MGMT_VLAN_ENTRIES = 16, + MAC_ADDR_MAX_MGMT_VLAN_WCOUNT = 1, + MAC_ADDR_MAX_MGMT_V4_ENTRIES = 4, + MAC_ADDR_MAX_MGMT_V4_WCOUNT = 1, + MAC_ADDR_MAX_MGMT_V6_ENTRIES = 4, + MAC_ADDR_MAX_MGMT_V6_WCOUNT = 4, + MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES = 4, + MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT = 1, +}; + +/* + * MAC Protocol Address Index Register (SPLT_HDR) bit definitions. + */ +enum { + SPLT_HDR_EP = (1 << 31), +}; + +/* + * FCoE Receive Configuration Register (FC_RCV_CFG) bit definitions. + */ +enum { + FC_RCV_CFG_ECT = (1 << 15), + FC_RCV_CFG_DFH = (1 << 20), + FC_RCV_CFG_DVF = (1 << 21), + FC_RCV_CFG_RCE = (1 << 27), + FC_RCV_CFG_RFE = (1 << 28), + FC_RCV_CFG_TEE = (1 << 29), + FC_RCV_CFG_TCE = (1 << 30), + FC_RCV_CFG_TFE = (1 << 31), +}; + +/* + * NIC Receive Configuration Register (NIC_RCV_CFG) bit definitions. + */ +enum { + NIC_RCV_CFG_PPE = (1 << 0), + NIC_RCV_CFG_VLAN_MASK = 0x00060000, + NIC_RCV_CFG_VLAN_ALL = 0x00000000, + NIC_RCV_CFG_VLAN_MATCH_ONLY = 0x00000002, + NIC_RCV_CFG_VLAN_MATCH_AND_NON = 0x00000004, + NIC_RCV_CFG_VLAN_NONE_AND_NON = 0x00000006, + NIC_RCV_CFG_RV = (1 << 3), + NIC_RCV_CFG_DFQ_MASK = (0x7f000000), + NIC_RCV_CFG_DFQ_SHIFT = 8, + NIC_RCV_CFG_DFQ = 0, /* HARDCODE default queue to 0. */ +}; + +/* + * Mgmt Receive Configuration Register (MGMT_RCV_CFG) bit definitions. + */ +enum { + MGMT_RCV_CFG_ARP = (1 << 0), + MGMT_RCV_CFG_DHC = (1 << 1), + MGMT_RCV_CFG_DHS = (1 << 2), + MGMT_RCV_CFG_NP = (1 << 3), + MGMT_RCV_CFG_I6N = (1 << 4), + MGMT_RCV_CFG_I6R = (1 << 5), + MGMT_RCV_CFG_DH6 = (1 << 6), + MGMT_RCV_CFG_UD1 = (1 << 7), + MGMT_RCV_CFG_UD0 = (1 << 8), + MGMT_RCV_CFG_BCT = (1 << 9), + MGMT_RCV_CFG_MCT = (1 << 10), + MGMT_RCV_CFG_DM = (1 << 11), + MGMT_RCV_CFG_RM = (1 << 12), + MGMT_RCV_CFG_STL = (1 << 13), + MGMT_RCV_CFG_VLAN_MASK = 0xc0000000, + MGMT_RCV_CFG_VLAN_ALL = 0x00000000, + MGMT_RCV_CFG_VLAN_MATCH_ONLY = 0x00004000, + MGMT_RCV_CFG_VLAN_MATCH_AND_NON = 0x00008000, + MGMT_RCV_CFG_VLAN_NONE_AND_NON = 0x0000c000, +}; + +/* + * Routing Index Register (RT_IDX) bit definitions. + */ +enum { + RT_IDX_IDX_SHIFT = 8, + RT_IDX_TYPE_MASK = 0x000f0000, + RT_IDX_TYPE_SHIFT = 16, + RT_IDX_TYPE_RT = 0x00000000, + RT_IDX_TYPE_RT_INV = 0x00010000, + RT_IDX_TYPE_NICQ = 0x00020000, + RT_IDX_TYPE_NICQ_INV = 0x00030000, + RT_IDX_DST_MASK = 0x00700000, + RT_IDX_DST_RSS = 0x00000000, + RT_IDX_DST_CAM_Q = 0x00100000, + RT_IDX_DST_COS_Q = 0x00200000, + RT_IDX_DST_DFLT_Q = 0x00300000, + RT_IDX_DST_DEST_Q = 0x00400000, + RT_IDX_RS = (1 << 26), + RT_IDX_E = (1 << 27), + RT_IDX_MR = (1 << 30), + RT_IDX_MW = (1 << 31), + + /* Nic Queue format - type 2 bits */ + RT_IDX_BCAST = (1 << 0), + RT_IDX_MCAST = (1 << 1), + RT_IDX_MCAST_MATCH = (1 << 2), + RT_IDX_MCAST_REG_MATCH = (1 << 3), + RT_IDX_MCAST_HASH_MATCH = (1 << 4), + RT_IDX_FC_MACH = (1 << 5), + RT_IDX_ETH_FCOE = (1 << 6), + RT_IDX_CAM_HIT = (1 << 7), + RT_IDX_CAM_BIT0 = (1 << 8), + RT_IDX_CAM_BIT1 = (1 << 9), + RT_IDX_VLAN_TAG = (1 << 10), + RT_IDX_VLAN_MATCH = (1 << 11), + RT_IDX_VLAN_FILTER = (1 << 12), + RT_IDX_ETH_SKIP1 = (1 << 13), + RT_IDX_ETH_SKIP2 = (1 << 14), + RT_IDX_BCAST_MCAST_MATCH = (1 << 15), + RT_IDX_802_3 = (1 << 16), + RT_IDX_LLDP = (1 << 17), + RT_IDX_UNUSED018 = (1 << 18), + RT_IDX_UNUSED019 = (1 << 19), + RT_IDX_UNUSED20 = (1 << 20), + RT_IDX_UNUSED21 = (1 << 21), + RT_IDX_ERR = (1 << 22), + RT_IDX_VALID = (1 << 23), + RT_IDX_TU_CSUM_ERR = (1 << 24), + RT_IDX_IP_CSUM_ERR = (1 << 25), + RT_IDX_MAC_ERR = (1 << 26), + RT_IDX_RSS_TCP6 = (1 << 27), + RT_IDX_RSS_TCP4 = (1 << 28), + RT_IDX_RSS_IPV6 = (1 << 29), + RT_IDX_RSS_IPV4 = (1 << 30), + RT_IDX_RSS_MATCH = (1 << 31), + + /* Hierarchy for the NIC Queue Mask */ + RT_IDX_ALL_ERR_SLOT = 0, + RT_IDX_MAC_ERR_SLOT = 0, + RT_IDX_IP_CSUM_ERR_SLOT = 1, + RT_IDX_TCP_UDP_CSUM_ERR_SLOT = 2, + RT_IDX_BCAST_SLOT = 3, + RT_IDX_MCAST_MATCH_SLOT = 4, + RT_IDX_ALLMULTI_SLOT = 5, + RT_IDX_UNUSED6_SLOT = 6, + RT_IDX_UNUSED7_SLOT = 7, + RT_IDX_RSS_MATCH_SLOT = 8, + RT_IDX_RSS_IPV4_SLOT = 8, + RT_IDX_RSS_IPV6_SLOT = 9, + RT_IDX_RSS_TCP4_SLOT = 10, + RT_IDX_RSS_TCP6_SLOT = 11, + RT_IDX_CAM_HIT_SLOT = 12, + RT_IDX_UNUSED013 = 13, + RT_IDX_UNUSED014 = 14, + RT_IDX_PROMISCUOUS_SLOT = 15, + RT_IDX_MAX_RT_SLOTS = 8, + RT_IDX_MAX_NIC_SLOTS = 16, +}; + +/* + * Serdes Address Register (XG_SERDES_ADDR) bit definitions. + */ +enum { + XG_SERDES_ADDR_RDY = (1 << 31), + XG_SERDES_ADDR_R = (1 << 30), + + XG_SERDES_ADDR_STS = 0x00001E06, + XG_SERDES_ADDR_XFI1_PWR_UP = 0x00000005, + XG_SERDES_ADDR_XFI2_PWR_UP = 0x0000000a, + XG_SERDES_ADDR_XAUI_PWR_DOWN = 0x00000001, + + /* Serdes coredump definitions. */ + XG_SERDES_XAUI_AN_START = 0x00000000, + XG_SERDES_XAUI_AN_END = 0x00000034, + XG_SERDES_XAUI_HSS_PCS_START = 0x00000800, + XG_SERDES_XAUI_HSS_PCS_END = 0x0000880, + XG_SERDES_XFI_AN_START = 0x00001000, + XG_SERDES_XFI_AN_END = 0x00001034, + XG_SERDES_XFI_TRAIN_START = 0x10001050, + XG_SERDES_XFI_TRAIN_END = 0x1000107C, + XG_SERDES_XFI_HSS_PCS_START = 0x00001800, + XG_SERDES_XFI_HSS_PCS_END = 0x00001838, + XG_SERDES_XFI_HSS_TX_START = 0x00001c00, + XG_SERDES_XFI_HSS_TX_END = 0x00001c1f, + XG_SERDES_XFI_HSS_RX_START = 0x00001c40, + XG_SERDES_XFI_HSS_RX_END = 0x00001c5f, + XG_SERDES_XFI_HSS_PLL_START = 0x00001e00, + XG_SERDES_XFI_HSS_PLL_END = 0x00001e1f, +}; + +/* + * NIC Probe Mux Address Register (PRB_MX_ADDR) bit definitions. + */ +enum { + PRB_MX_ADDR_ARE = (1 << 16), + PRB_MX_ADDR_UP = (1 << 15), + PRB_MX_ADDR_SWP = (1 << 14), + + /* Module select values. */ + PRB_MX_ADDR_MAX_MODS = 21, + PRB_MX_ADDR_MOD_SEL_SHIFT = 9, + PRB_MX_ADDR_MOD_SEL_TBD = 0, + PRB_MX_ADDR_MOD_SEL_IDE1 = 1, + PRB_MX_ADDR_MOD_SEL_IDE2 = 2, + PRB_MX_ADDR_MOD_SEL_FRB = 3, + PRB_MX_ADDR_MOD_SEL_ODE1 = 4, + PRB_MX_ADDR_MOD_SEL_ODE2 = 5, + PRB_MX_ADDR_MOD_SEL_DA1 = 6, + PRB_MX_ADDR_MOD_SEL_DA2 = 7, + PRB_MX_ADDR_MOD_SEL_IMP1 = 8, + PRB_MX_ADDR_MOD_SEL_IMP2 = 9, + PRB_MX_ADDR_MOD_SEL_OMP1 = 10, + PRB_MX_ADDR_MOD_SEL_OMP2 = 11, + PRB_MX_ADDR_MOD_SEL_ORS1 = 12, + PRB_MX_ADDR_MOD_SEL_ORS2 = 13, + PRB_MX_ADDR_MOD_SEL_REG = 14, + PRB_MX_ADDR_MOD_SEL_MAC1 = 16, + PRB_MX_ADDR_MOD_SEL_MAC2 = 17, + PRB_MX_ADDR_MOD_SEL_VQM1 = 18, + PRB_MX_ADDR_MOD_SEL_VQM2 = 19, + PRB_MX_ADDR_MOD_SEL_MOP = 20, + /* Bit fields indicating which modules + * are valid for each clock domain. + */ + PRB_MX_ADDR_VALID_SYS_MOD = 0x000f7ff7, + PRB_MX_ADDR_VALID_PCI_MOD = 0x000040c1, + PRB_MX_ADDR_VALID_XGM_MOD = 0x00037309, + PRB_MX_ADDR_VALID_FC_MOD = 0x00003001, + PRB_MX_ADDR_VALID_TOTAL = 34, + + /* Clock domain values. */ + PRB_MX_ADDR_CLOCK_SHIFT = 6, + PRB_MX_ADDR_SYS_CLOCK = 0, + PRB_MX_ADDR_PCI_CLOCK = 2, + PRB_MX_ADDR_FC_CLOCK = 5, + PRB_MX_ADDR_XGM_CLOCK = 6, + + PRB_MX_ADDR_MAX_MUX = 64, +}; + +/* + * Control Register Set Map + */ +enum { + PROC_ADDR = 0, /* Use semaphore */ + PROC_DATA = 0x04, /* Use semaphore */ + SYS = 0x08, + RST_FO = 0x0c, + FSC = 0x10, + CSR = 0x14, + LED = 0x18, + ICB_RID = 0x1c, /* Use semaphore */ + ICB_L = 0x20, /* Use semaphore */ + ICB_H = 0x24, /* Use semaphore */ + CFG = 0x28, + BIOS_ADDR = 0x2c, + STS = 0x30, + INTR_EN = 0x34, + INTR_MASK = 0x38, + ISR1 = 0x3c, + ISR2 = 0x40, + ISR3 = 0x44, + ISR4 = 0x48, + REV_ID = 0x4c, + FRC_ECC_ERR = 0x50, + ERR_STS = 0x54, + RAM_DBG_ADDR = 0x58, + RAM_DBG_DATA = 0x5c, + ECC_ERR_CNT = 0x60, + SEM = 0x64, + GPIO_1 = 0x68, /* Use semaphore */ + GPIO_2 = 0x6c, /* Use semaphore */ + GPIO_3 = 0x70, /* Use semaphore */ + RSVD2 = 0x74, + XGMAC_ADDR = 0x78, /* Use semaphore */ + XGMAC_DATA = 0x7c, /* Use semaphore */ + NIC_ETS = 0x80, + CNA_ETS = 0x84, + FLASH_ADDR = 0x88, /* Use semaphore */ + FLASH_DATA = 0x8c, /* Use semaphore */ + CQ_STOP = 0x90, + PAGE_TBL_RID = 0x94, + WQ_PAGE_TBL_LO = 0x98, + WQ_PAGE_TBL_HI = 0x9c, + CQ_PAGE_TBL_LO = 0xa0, + CQ_PAGE_TBL_HI = 0xa4, + MAC_ADDR_IDX = 0xa8, /* Use semaphore */ + MAC_ADDR_DATA = 0xac, /* Use semaphore */ + COS_DFLT_CQ1 = 0xb0, + COS_DFLT_CQ2 = 0xb4, + ETYPE_SKIP1 = 0xb8, + ETYPE_SKIP2 = 0xbc, + SPLT_HDR = 0xc0, + FC_PAUSE_THRES = 0xc4, + NIC_PAUSE_THRES = 0xc8, + FC_ETHERTYPE = 0xcc, + FC_RCV_CFG = 0xd0, + NIC_RCV_CFG = 0xd4, + FC_COS_TAGS = 0xd8, + NIC_COS_TAGS = 0xdc, + MGMT_RCV_CFG = 0xe0, + RT_IDX = 0xe4, + RT_DATA = 0xe8, + RSVD7 = 0xec, + XG_SERDES_ADDR = 0xf0, + XG_SERDES_DATA = 0xf4, + PRB_MX_ADDR = 0xf8, /* Use semaphore */ + PRB_MX_DATA = 0xfc, /* Use semaphore */ +}; + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#define SMALL_BUFFER_SIZE 256 +#define SMALL_BUF_MAP_SIZE SMALL_BUFFER_SIZE +#define SPLT_SETTING FSC_DBRST_1024 +#define SPLT_LEN 0 +#define QLGE_SB_PAD 0 +#else +#define SMALL_BUFFER_SIZE 512 +#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2) +#define SPLT_SETTING FSC_SH +#define SPLT_LEN (SPLT_HDR_EP | \ + min(SMALL_BUF_MAP_SIZE, 1023)) +#define QLGE_SB_PAD 32 +#endif + +/* + * CAM output format. + */ +enum { + CAM_OUT_ROUTE_FC = 0, + CAM_OUT_ROUTE_NIC = 1, + CAM_OUT_FUNC_SHIFT = 2, + CAM_OUT_RV = (1 << 4), + CAM_OUT_SH = (1 << 15), + CAM_OUT_CQ_ID_SHIFT = 5, +}; + +/* + * Mailbox definitions + */ +enum { + /* Asynchronous Event Notifications */ + AEN_SYS_ERR = 0x00008002, + AEN_LINK_UP = 0x00008011, + AEN_LINK_DOWN = 0x00008012, + AEN_IDC_CMPLT = 0x00008100, + AEN_IDC_REQ = 0x00008101, + AEN_IDC_EXT = 0x00008102, + AEN_DCBX_CHG = 0x00008110, + AEN_AEN_LOST = 0x00008120, + AEN_AEN_SFP_IN = 0x00008130, + AEN_AEN_SFP_OUT = 0x00008131, + AEN_FW_INIT_DONE = 0x00008400, + AEN_FW_INIT_FAIL = 0x00008401, + + /* Mailbox Command Opcodes. */ + MB_CMD_NOP = 0x00000000, + MB_CMD_EX_FW = 0x00000002, + MB_CMD_MB_TEST = 0x00000006, + MB_CMD_CSUM_TEST = 0x00000007, /* Verify Checksum */ + MB_CMD_ABOUT_FW = 0x00000008, + MB_CMD_COPY_RISC_RAM = 0x0000000a, + MB_CMD_LOAD_RISC_RAM = 0x0000000b, + MB_CMD_DUMP_RISC_RAM = 0x0000000c, + MB_CMD_WRITE_RAM = 0x0000000d, + MB_CMD_INIT_RISC_RAM = 0x0000000e, + MB_CMD_READ_RAM = 0x0000000f, + MB_CMD_STOP_FW = 0x00000014, + MB_CMD_MAKE_SYS_ERR = 0x0000002a, + MB_CMD_WRITE_SFP = 0x00000030, + MB_CMD_READ_SFP = 0x00000031, + MB_CMD_INIT_FW = 0x00000060, + MB_CMD_GET_IFCB = 0x00000061, + MB_CMD_GET_FW_STATE = 0x00000069, + MB_CMD_IDC_REQ = 0x00000100, /* Inter-Driver Communication */ + MB_CMD_IDC_ACK = 0x00000101, /* Inter-Driver Communication */ + MB_CMD_SET_WOL_MODE = 0x00000110, /* Wake On Lan */ + MB_WOL_DISABLE = 0, + MB_WOL_MAGIC_PKT = (1 << 1), + MB_WOL_FLTR = (1 << 2), + MB_WOL_UCAST = (1 << 3), + MB_WOL_MCAST = (1 << 4), + MB_WOL_BCAST = (1 << 5), + MB_WOL_LINK_UP = (1 << 6), + MB_WOL_LINK_DOWN = (1 << 7), + MB_WOL_MODE_ON = (1 << 16), /* Wake on Lan Mode on */ + MB_CMD_SET_WOL_FLTR = 0x00000111, /* Wake On Lan Filter */ + MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */ + MB_CMD_SET_WOL_MAGIC = 0x00000113, /* Wake On Lan Magic Packet */ + MB_CMD_CLEAR_WOL_MAGIC = 0x00000114,/* Wake On Lan Magic Packet */ + MB_CMD_SET_WOL_IMMED = 0x00000115, + MB_CMD_PORT_RESET = 0x00000120, + MB_CMD_SET_PORT_CFG = 0x00000122, + MB_CMD_GET_PORT_CFG = 0x00000123, + MB_CMD_GET_LINK_STS = 0x00000124, + MB_CMD_SET_LED_CFG = 0x00000125, /* Set LED Configuration Register */ + QL_LED_BLINK = 0x03e803e8, + MB_CMD_GET_LED_CFG = 0x00000126, /* Get LED Configuration Register */ + MB_CMD_SET_MGMNT_TFK_CTL = 0x00000160, /* Set Mgmnt Traffic Control */ + MB_SET_MPI_TFK_STOP = (1 << 0), + MB_SET_MPI_TFK_RESUME = (1 << 1), + MB_CMD_GET_MGMNT_TFK_CTL = 0x00000161, /* Get Mgmnt Traffic Control */ + MB_GET_MPI_TFK_STOPPED = (1 << 0), + MB_GET_MPI_TFK_FIFO_EMPTY = (1 << 1), + /* Sub-commands for IDC request. + * This describes the reason for the + * IDC request. + */ + MB_CMD_IOP_NONE = 0x0000, + MB_CMD_IOP_PREP_UPDATE_MPI = 0x0001, + MB_CMD_IOP_COMP_UPDATE_MPI = 0x0002, + MB_CMD_IOP_PREP_LINK_DOWN = 0x0010, + MB_CMD_IOP_DVR_START = 0x0100, + MB_CMD_IOP_FLASH_ACC = 0x0101, + MB_CMD_IOP_RESTART_MPI = 0x0102, + MB_CMD_IOP_CORE_DUMP_MPI = 0x0103, + + /* Mailbox Command Status. */ + MB_CMD_STS_GOOD = 0x00004000, /* Success. */ + MB_CMD_STS_INTRMDT = 0x00001000, /* Intermediate Complete. */ + MB_CMD_STS_INVLD_CMD = 0x00004001, /* Invalid. */ + MB_CMD_STS_XFC_ERR = 0x00004002, /* Interface Error. */ + MB_CMD_STS_CSUM_ERR = 0x00004003, /* Csum Error. */ + MB_CMD_STS_ERR = 0x00004005, /* System Error. */ + MB_CMD_STS_PARAM_ERR = 0x00004006, /* Parameter Error. */ +}; + +struct mbox_params { + u32 mbox_in[MAILBOX_COUNT]; + u32 mbox_out[MAILBOX_COUNT]; + int in_count; + int out_count; +}; + +struct flash_params_8012 { + u8 dev_id_str[4]; + __le16 size; + __le16 csum; + __le16 ver; + __le16 sub_dev_id; + u8 mac_addr[6]; + __le16 res; +}; + +/* 8000 device's flash is a different structure + * at a different offset in flash. + */ +#define FUNC0_FLASH_OFFSET 0x140200 +#define FUNC1_FLASH_OFFSET 0x140600 + +/* Flash related data structures. */ +struct flash_params_8000 { + u8 dev_id_str[4]; /* "8000" */ + __le16 ver; + __le16 size; + __le16 csum; + __le16 reserved0; + __le16 total_size; + __le16 entry_count; + u8 data_type0; + u8 data_size0; + u8 mac_addr[6]; + u8 data_type1; + u8 data_size1; + u8 mac_addr1[6]; + u8 data_type2; + u8 data_size2; + __le16 vlan_id; + u8 data_type3; + u8 data_size3; + __le16 last; + u8 reserved1[464]; + __le16 subsys_ven_id; + __le16 subsys_dev_id; + u8 reserved2[4]; +}; + +union flash_params { + struct flash_params_8012 flash_params_8012; + struct flash_params_8000 flash_params_8000; +}; + +/* + * doorbell space for the rx ring context + */ +struct rx_doorbell_context { + u32 cnsmr_idx; /* 0x00 */ + u32 valid; /* 0x04 */ + u32 reserved[4]; /* 0x08-0x14 */ + u32 lbq_prod_idx; /* 0x18 */ + u32 sbq_prod_idx; /* 0x1c */ +}; + +/* + * doorbell space for the tx ring context + */ +struct tx_doorbell_context { + u32 prod_idx; /* 0x00 */ + u32 valid; /* 0x04 */ + u32 reserved[4]; /* 0x08-0x14 */ + u32 lbq_prod_idx; /* 0x18 */ + u32 sbq_prod_idx; /* 0x1c */ +}; + +/* DATA STRUCTURES SHARED WITH HARDWARE. */ +struct tx_buf_desc { + __le64 addr; + __le32 len; +#define TX_DESC_LEN_MASK 0x000fffff +#define TX_DESC_C 0x40000000 +#define TX_DESC_E 0x80000000 +} __packed; + +/* + * IOCB Definitions... + */ + +#define OPCODE_OB_MAC_IOCB 0x01 +#define OPCODE_OB_MAC_TSO_IOCB 0x02 +#define OPCODE_IB_MAC_IOCB 0x20 +#define OPCODE_IB_MPI_IOCB 0x21 +#define OPCODE_IB_AE_IOCB 0x3f + +struct ob_mac_iocb_req { + u8 opcode; + u8 flags1; +#define OB_MAC_IOCB_REQ_OI 0x01 +#define OB_MAC_IOCB_REQ_I 0x02 +#define OB_MAC_IOCB_REQ_D 0x08 +#define OB_MAC_IOCB_REQ_F 0x10 + u8 flags2; + u8 flags3; +#define OB_MAC_IOCB_DFP 0x02 +#define OB_MAC_IOCB_V 0x04 + __le32 reserved1[2]; + __le16 frame_len; +#define OB_MAC_IOCB_LEN_MASK 0x3ffff + __le16 reserved2; + u32 tid; + u32 txq_idx; + __le32 reserved3; + __le16 vlan_tci; + __le16 reserved4; + struct tx_buf_desc tbd[TX_DESC_PER_IOCB]; +} __packed; + +struct ob_mac_iocb_rsp { + u8 opcode; /* */ + u8 flags1; /* */ +#define OB_MAC_IOCB_RSP_OI 0x01 /* */ +#define OB_MAC_IOCB_RSP_I 0x02 /* */ +#define OB_MAC_IOCB_RSP_E 0x08 /* */ +#define OB_MAC_IOCB_RSP_S 0x10 /* too Short */ +#define OB_MAC_IOCB_RSP_L 0x20 /* too Large */ +#define OB_MAC_IOCB_RSP_P 0x40 /* Padded */ + u8 flags2; /* */ + u8 flags3; /* */ +#define OB_MAC_IOCB_RSP_B 0x80 /* */ + u32 tid; + u32 txq_idx; + __le32 reserved[13]; +} __packed; + +struct ob_mac_tso_iocb_req { + u8 opcode; + u8 flags1; +#define OB_MAC_TSO_IOCB_OI 0x01 +#define OB_MAC_TSO_IOCB_I 0x02 +#define OB_MAC_TSO_IOCB_D 0x08 +#define OB_MAC_TSO_IOCB_IP4 0x40 +#define OB_MAC_TSO_IOCB_IP6 0x80 + u8 flags2; +#define OB_MAC_TSO_IOCB_LSO 0x20 +#define OB_MAC_TSO_IOCB_UC 0x40 +#define OB_MAC_TSO_IOCB_TC 0x80 + u8 flags3; +#define OB_MAC_TSO_IOCB_IC 0x01 +#define OB_MAC_TSO_IOCB_DFP 0x02 +#define OB_MAC_TSO_IOCB_V 0x04 + __le32 reserved1[2]; + __le32 frame_len; + u32 tid; + u32 txq_idx; + __le16 total_hdrs_len; + __le16 net_trans_offset; +#define OB_MAC_TRANSPORT_HDR_SHIFT 6 + __le16 vlan_tci; + __le16 mss; + struct tx_buf_desc tbd[TX_DESC_PER_IOCB]; +} __packed; + +struct ob_mac_tso_iocb_rsp { + u8 opcode; + u8 flags1; +#define OB_MAC_TSO_IOCB_RSP_OI 0x01 +#define OB_MAC_TSO_IOCB_RSP_I 0x02 +#define OB_MAC_TSO_IOCB_RSP_E 0x08 +#define OB_MAC_TSO_IOCB_RSP_S 0x10 +#define OB_MAC_TSO_IOCB_RSP_L 0x20 +#define OB_MAC_TSO_IOCB_RSP_P 0x40 + u8 flags2; /* */ + u8 flags3; /* */ +#define OB_MAC_TSO_IOCB_RSP_B 0x8000 + u32 tid; + u32 txq_idx; + __le32 reserved2[13]; +} __packed; + +struct ib_mac_iocb_rsp { + u8 opcode; /* 0x20 */ + u8 flags1; +#define IB_MAC_IOCB_RSP_OI 0x01 /* Overide intr delay */ +#define IB_MAC_IOCB_RSP_I 0x02 /* Disble Intr Generation */ +#define IB_MAC_CSUM_ERR_MASK 0x1c /* A mask to use for csum errs */ +#define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */ +#define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */ +#define IB_MAC_IOCB_RSP_IE 0x10 /* IPv4 checksum error */ +#define IB_MAC_IOCB_RSP_M_MASK 0x60 /* Multicast info */ +#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* Not mcast frame */ +#define IB_MAC_IOCB_RSP_M_HASH 0x20 /* HASH mcast frame */ +#define IB_MAC_IOCB_RSP_M_REG 0x40 /* Registered mcast frame */ +#define IB_MAC_IOCB_RSP_M_PROM 0x60 /* Promiscuous mcast frame */ +#define IB_MAC_IOCB_RSP_B 0x80 /* Broadcast frame */ + u8 flags2; +#define IB_MAC_IOCB_RSP_P 0x01 /* Promiscuous frame */ +#define IB_MAC_IOCB_RSP_V 0x02 /* Vlan tag present */ +#define IB_MAC_IOCB_RSP_ERR_MASK 0x1c /* */ +#define IB_MAC_IOCB_RSP_ERR_CODE_ERR 0x04 +#define IB_MAC_IOCB_RSP_ERR_OVERSIZE 0x08 +#define IB_MAC_IOCB_RSP_ERR_UNDERSIZE 0x10 +#define IB_MAC_IOCB_RSP_ERR_PREAMBLE 0x14 +#define IB_MAC_IOCB_RSP_ERR_FRAME_LEN 0x18 +#define IB_MAC_IOCB_RSP_ERR_CRC 0x1c +#define IB_MAC_IOCB_RSP_U 0x20 /* UDP packet */ +#define IB_MAC_IOCB_RSP_T 0x40 /* TCP packet */ +#define IB_MAC_IOCB_RSP_FO 0x80 /* Failover port */ + u8 flags3; +#define IB_MAC_IOCB_RSP_RSS_MASK 0x07 /* RSS mask */ +#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* No RSS match */ +#define IB_MAC_IOCB_RSP_M_IPV4 0x04 /* IPv4 RSS match */ +#define IB_MAC_IOCB_RSP_M_IPV6 0x02 /* IPv6 RSS match */ +#define IB_MAC_IOCB_RSP_M_TCP_V4 0x05 /* TCP with IPv4 */ +#define IB_MAC_IOCB_RSP_M_TCP_V6 0x03 /* TCP with IPv6 */ +#define IB_MAC_IOCB_RSP_V4 0x08 /* IPV4 */ +#define IB_MAC_IOCB_RSP_V6 0x10 /* IPV6 */ +#define IB_MAC_IOCB_RSP_IH 0x20 /* Split after IP header */ +#define IB_MAC_IOCB_RSP_DS 0x40 /* data is in small buffer */ +#define IB_MAC_IOCB_RSP_DL 0x80 /* data is in large buffer */ + __le32 data_len; /* */ + __le64 data_addr; /* */ + __le32 rss; /* */ + __le16 vlan_id; /* 12 bits */ +#define IB_MAC_IOCB_RSP_C 0x1000 /* VLAN CFI bit */ +#define IB_MAC_IOCB_RSP_COS_SHIFT 12 /* class of service value */ +#define IB_MAC_IOCB_RSP_VLAN_MASK 0x0ffff + + __le16 reserved1; + __le32 reserved2[6]; + u8 reserved3[3]; + u8 flags4; +#define IB_MAC_IOCB_RSP_HV 0x20 +#define IB_MAC_IOCB_RSP_HS 0x40 +#define IB_MAC_IOCB_RSP_HL 0x80 + __le32 hdr_len; /* */ + __le64 hdr_addr; /* */ +} __packed; + +struct ib_ae_iocb_rsp { + u8 opcode; + u8 flags1; +#define IB_AE_IOCB_RSP_OI 0x01 +#define IB_AE_IOCB_RSP_I 0x02 + u8 event; +#define LINK_UP_EVENT 0x00 +#define LINK_DOWN_EVENT 0x01 +#define CAM_LOOKUP_ERR_EVENT 0x06 +#define SOFT_ECC_ERROR_EVENT 0x07 +#define MGMT_ERR_EVENT 0x08 +#define TEN_GIG_MAC_EVENT 0x09 +#define GPI0_H2L_EVENT 0x10 +#define GPI0_L2H_EVENT 0x20 +#define GPI1_H2L_EVENT 0x11 +#define GPI1_L2H_EVENT 0x21 +#define PCI_ERR_ANON_BUF_RD 0x40 + u8 q_id; + __le32 reserved[15]; +} __packed; + +/* + * These three structures are for generic + * handling of ib and ob iocbs. + */ +struct ql_net_rsp_iocb { + u8 opcode; + u8 flags0; + __le16 length; + __le32 tid; + __le32 reserved[14]; +} __packed; + +struct net_req_iocb { + u8 opcode; + u8 flags0; + __le16 flags1; + __le32 tid; + __le32 reserved1[30]; +} __packed; + +/* + * tx ring initialization control block for chip. + * It is defined as: + * "Work Queue Initialization Control Block" + */ +struct wqicb { + __le16 len; +#define Q_LEN_V (1 << 4) +#define Q_LEN_CPP_CONT 0x0000 +#define Q_LEN_CPP_16 0x0001 +#define Q_LEN_CPP_32 0x0002 +#define Q_LEN_CPP_64 0x0003 +#define Q_LEN_CPP_512 0x0006 + __le16 flags; +#define Q_PRI_SHIFT 1 +#define Q_FLAGS_LC 0x1000 +#define Q_FLAGS_LB 0x2000 +#define Q_FLAGS_LI 0x4000 +#define Q_FLAGS_LO 0x8000 + __le16 cq_id_rss; +#define Q_CQ_ID_RSS_RV 0x8000 + __le16 rid; + __le64 addr; + __le64 cnsmr_idx_addr; +} __packed; + +/* + * rx ring initialization control block for chip. + * It is defined as: + * "Completion Queue Initialization Control Block" + */ +struct cqicb { + u8 msix_vect; + u8 reserved1; + u8 reserved2; + u8 flags; +#define FLAGS_LV 0x08 +#define FLAGS_LS 0x10 +#define FLAGS_LL 0x20 +#define FLAGS_LI 0x40 +#define FLAGS_LC 0x80 + __le16 len; +#define LEN_V (1 << 4) +#define LEN_CPP_CONT 0x0000 +#define LEN_CPP_32 0x0001 +#define LEN_CPP_64 0x0002 +#define LEN_CPP_128 0x0003 + __le16 rid; + __le64 addr; + __le64 prod_idx_addr; + __le16 pkt_delay; + __le16 irq_delay; + __le64 lbq_addr; + __le16 lbq_buf_size; + __le16 lbq_len; /* entry count */ + __le64 sbq_addr; + __le16 sbq_buf_size; + __le16 sbq_len; /* entry count */ +} __packed; + +struct ricb { + u8 base_cq; +#define RSS_L4K 0x80 + u8 flags; +#define RSS_L6K 0x01 +#define RSS_LI 0x02 +#define RSS_LB 0x04 +#define RSS_LM 0x08 +#define RSS_RI4 0x10 +#define RSS_RT4 0x20 +#define RSS_RI6 0x40 +#define RSS_RT6 0x80 + __le16 mask; + u8 hash_cq_id[1024]; + __le32 ipv6_hash_key[10]; + __le32 ipv4_hash_key[4]; +} __packed; + +/* SOFTWARE/DRIVER DATA STRUCTURES. */ + +struct oal { + struct tx_buf_desc oal[TX_DESC_PER_OAL]; +}; + +struct map_list { + DEFINE_DMA_UNMAP_ADDR(mapaddr); + DEFINE_DMA_UNMAP_LEN(maplen); +}; + +struct tx_ring_desc { + struct sk_buff *skb; + struct ob_mac_iocb_req *queue_entry; + u32 index; + struct oal oal; + struct map_list map[MAX_SKB_FRAGS + 1]; + int map_cnt; + struct tx_ring_desc *next; +}; + +struct page_chunk { + struct page *page; /* master page */ + char *va; /* virt addr for this chunk */ + u64 map; /* mapping for master */ + unsigned int offset; /* offset for this chunk */ + unsigned int last_flag; /* flag set for last chunk in page */ +}; + +struct bq_desc { + union { + struct page_chunk pg_chunk; + struct sk_buff *skb; + } p; + __le64 *addr; + u32 index; + DEFINE_DMA_UNMAP_ADDR(mapaddr); + DEFINE_DMA_UNMAP_LEN(maplen); +}; + +#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count)) + +struct tx_ring { + /* + * queue info. + */ + struct wqicb wqicb; /* structure used to inform chip of new queue */ + void *wq_base; /* pci_alloc:virtual addr for tx */ + dma_addr_t wq_base_dma; /* pci_alloc:dma addr for tx */ + __le32 *cnsmr_idx_sh_reg; /* shadow copy of consumer idx */ + dma_addr_t cnsmr_idx_sh_reg_dma; /* dma-shadow copy of consumer */ + u32 wq_size; /* size in bytes of queue area */ + u32 wq_len; /* number of entries in queue */ + void __iomem *prod_idx_db_reg; /* doorbell area index reg at offset 0x00 */ + void __iomem *valid_db_reg; /* doorbell area valid reg at offset 0x04 */ + u16 prod_idx; /* current value for prod idx */ + u16 cq_id; /* completion (rx) queue for tx completions */ + u8 wq_id; /* queue id for this entry */ + u8 reserved1[3]; + struct tx_ring_desc *q; /* descriptor list for the queue */ + spinlock_t lock; + atomic_t tx_count; /* counts down for every outstanding IO */ + atomic_t queue_stopped; /* Turns queue off when full. */ + struct delayed_work tx_work; + struct ql_adapter *qdev; + u64 tx_packets; + u64 tx_bytes; + u64 tx_errors; +}; + +/* + * Type of inbound queue. + */ +enum { + DEFAULT_Q = 2, /* Handles slow queue and chip/MPI events. */ + TX_Q = 3, /* Handles outbound completions. */ + RX_Q = 4, /* Handles inbound completions. */ +}; + +struct rx_ring { + struct cqicb cqicb; /* The chip's completion queue init control block. */ + + /* Completion queue elements. */ + void *cq_base; + dma_addr_t cq_base_dma; + u32 cq_size; + u32 cq_len; + u16 cq_id; + __le32 *prod_idx_sh_reg; /* Shadowed producer register. */ + dma_addr_t prod_idx_sh_reg_dma; + void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */ + u32 cnsmr_idx; /* current sw idx */ + struct ql_net_rsp_iocb *curr_entry; /* next entry on queue */ + void __iomem *valid_db_reg; /* PCI doorbell mem area + 0x04 */ + + /* Large buffer queue elements. */ + u32 lbq_len; /* entry count */ + u32 lbq_size; /* size in bytes of queue */ + u32 lbq_buf_size; + void *lbq_base; + dma_addr_t lbq_base_dma; + void *lbq_base_indirect; + dma_addr_t lbq_base_indirect_dma; + struct page_chunk pg_chunk; /* current page for chunks */ + struct bq_desc *lbq; /* array of control blocks */ + void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */ + u32 lbq_prod_idx; /* current sw prod idx */ + u32 lbq_curr_idx; /* next entry we expect */ + u32 lbq_clean_idx; /* beginning of new descs */ + u32 lbq_free_cnt; /* free buffer desc cnt */ + + /* Small buffer queue elements. */ + u32 sbq_len; /* entry count */ + u32 sbq_size; /* size in bytes of queue */ + u32 sbq_buf_size; + void *sbq_base; + dma_addr_t sbq_base_dma; + void *sbq_base_indirect; + dma_addr_t sbq_base_indirect_dma; + struct bq_desc *sbq; /* array of control blocks */ + void __iomem *sbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x1c */ + u32 sbq_prod_idx; /* current sw prod idx */ + u32 sbq_curr_idx; /* next entry we expect */ + u32 sbq_clean_idx; /* beginning of new descs */ + u32 sbq_free_cnt; /* free buffer desc cnt */ + + /* Misc. handler elements. */ + u32 type; /* Type of queue, tx, rx. */ + u32 irq; /* Which vector this ring is assigned. */ + u32 cpu; /* Which CPU this should run on. */ + char name[IFNAMSIZ + 5]; + struct napi_struct napi; + u8 reserved; + struct ql_adapter *qdev; + u64 rx_packets; + u64 rx_multicast; + u64 rx_bytes; + u64 rx_dropped; + u64 rx_errors; +}; + +/* + * RSS Initialization Control Block + */ +struct hash_id { + u8 value[4]; +}; + +struct nic_stats { + /* + * These stats come from offset 200h to 278h + * in the XGMAC register. + */ + u64 tx_pkts; + u64 tx_bytes; + u64 tx_mcast_pkts; + u64 tx_bcast_pkts; + u64 tx_ucast_pkts; + u64 tx_ctl_pkts; + u64 tx_pause_pkts; + u64 tx_64_pkt; + u64 tx_65_to_127_pkt; + u64 tx_128_to_255_pkt; + u64 tx_256_511_pkt; + u64 tx_512_to_1023_pkt; + u64 tx_1024_to_1518_pkt; + u64 tx_1519_to_max_pkt; + u64 tx_undersize_pkt; + u64 tx_oversize_pkt; + + /* + * These stats come from offset 300h to 3C8h + * in the XGMAC register. + */ + u64 rx_bytes; + u64 rx_bytes_ok; + u64 rx_pkts; + u64 rx_pkts_ok; + u64 rx_bcast_pkts; + u64 rx_mcast_pkts; + u64 rx_ucast_pkts; + u64 rx_undersize_pkts; + u64 rx_oversize_pkts; + u64 rx_jabber_pkts; + u64 rx_undersize_fcerr_pkts; + u64 rx_drop_events; + u64 rx_fcerr_pkts; + u64 rx_align_err; + u64 rx_symbol_err; + u64 rx_mac_err; + u64 rx_ctl_pkts; + u64 rx_pause_pkts; + u64 rx_64_pkts; + u64 rx_65_to_127_pkts; + u64 rx_128_255_pkts; + u64 rx_256_511_pkts; + u64 rx_512_to_1023_pkts; + u64 rx_1024_to_1518_pkts; + u64 rx_1519_to_max_pkts; + u64 rx_len_err_pkts; + /* + * These stats come from offset 500h to 5C8h + * in the XGMAC register. + */ + u64 tx_cbfc_pause_frames0; + u64 tx_cbfc_pause_frames1; + u64 tx_cbfc_pause_frames2; + u64 tx_cbfc_pause_frames3; + u64 tx_cbfc_pause_frames4; + u64 tx_cbfc_pause_frames5; + u64 tx_cbfc_pause_frames6; + u64 tx_cbfc_pause_frames7; + u64 rx_cbfc_pause_frames0; + u64 rx_cbfc_pause_frames1; + u64 rx_cbfc_pause_frames2; + u64 rx_cbfc_pause_frames3; + u64 rx_cbfc_pause_frames4; + u64 rx_cbfc_pause_frames5; + u64 rx_cbfc_pause_frames6; + u64 rx_cbfc_pause_frames7; + u64 rx_nic_fifo_drop; +}; + +/* Firmware coredump internal register address/length pairs. */ +enum { + MPI_CORE_REGS_ADDR = 0x00030000, + MPI_CORE_REGS_CNT = 127, + MPI_CORE_SH_REGS_CNT = 16, + TEST_REGS_ADDR = 0x00001000, + TEST_REGS_CNT = 23, + RMII_REGS_ADDR = 0x00001040, + RMII_REGS_CNT = 64, + FCMAC1_REGS_ADDR = 0x00001080, + FCMAC2_REGS_ADDR = 0x000010c0, + FCMAC_REGS_CNT = 64, + FC1_MBX_REGS_ADDR = 0x00001100, + FC2_MBX_REGS_ADDR = 0x00001240, + FC_MBX_REGS_CNT = 64, + IDE_REGS_ADDR = 0x00001140, + IDE_REGS_CNT = 64, + NIC1_MBX_REGS_ADDR = 0x00001180, + NIC2_MBX_REGS_ADDR = 0x00001280, + NIC_MBX_REGS_CNT = 64, + SMBUS_REGS_ADDR = 0x00001200, + SMBUS_REGS_CNT = 64, + I2C_REGS_ADDR = 0x00001fc0, + I2C_REGS_CNT = 64, + MEMC_REGS_ADDR = 0x00003000, + MEMC_REGS_CNT = 256, + PBUS_REGS_ADDR = 0x00007c00, + PBUS_REGS_CNT = 256, + MDE_REGS_ADDR = 0x00010000, + MDE_REGS_CNT = 6, + CODE_RAM_ADDR = 0x00020000, + CODE_RAM_CNT = 0x2000, + MEMC_RAM_ADDR = 0x00100000, + MEMC_RAM_CNT = 0x2000, +}; + +#define MPI_COREDUMP_COOKIE 0x5555aaaa +struct mpi_coredump_global_header { + u32 cookie; + u8 idString[16]; + u32 timeLo; + u32 timeHi; + u32 imageSize; + u32 headerSize; + u8 info[220]; +}; + +struct mpi_coredump_segment_header { + u32 cookie; + u32 segNum; + u32 segSize; + u32 extra; + u8 description[16]; +}; + +/* Firmware coredump header segment numbers. */ +enum { + CORE_SEG_NUM = 1, + TEST_LOGIC_SEG_NUM = 2, + RMII_SEG_NUM = 3, + FCMAC1_SEG_NUM = 4, + FCMAC2_SEG_NUM = 5, + FC1_MBOX_SEG_NUM = 6, + IDE_SEG_NUM = 7, + NIC1_MBOX_SEG_NUM = 8, + SMBUS_SEG_NUM = 9, + FC2_MBOX_SEG_NUM = 10, + NIC2_MBOX_SEG_NUM = 11, + I2C_SEG_NUM = 12, + MEMC_SEG_NUM = 13, + PBUS_SEG_NUM = 14, + MDE_SEG_NUM = 15, + NIC1_CONTROL_SEG_NUM = 16, + NIC2_CONTROL_SEG_NUM = 17, + NIC1_XGMAC_SEG_NUM = 18, + NIC2_XGMAC_SEG_NUM = 19, + WCS_RAM_SEG_NUM = 20, + MEMC_RAM_SEG_NUM = 21, + XAUI_AN_SEG_NUM = 22, + XAUI_HSS_PCS_SEG_NUM = 23, + XFI_AN_SEG_NUM = 24, + XFI_TRAIN_SEG_NUM = 25, + XFI_HSS_PCS_SEG_NUM = 26, + XFI_HSS_TX_SEG_NUM = 27, + XFI_HSS_RX_SEG_NUM = 28, + XFI_HSS_PLL_SEG_NUM = 29, + MISC_NIC_INFO_SEG_NUM = 30, + INTR_STATES_SEG_NUM = 31, + CAM_ENTRIES_SEG_NUM = 32, + ROUTING_WORDS_SEG_NUM = 33, + ETS_SEG_NUM = 34, + PROBE_DUMP_SEG_NUM = 35, + ROUTING_INDEX_SEG_NUM = 36, + MAC_PROTOCOL_SEG_NUM = 37, + XAUI2_AN_SEG_NUM = 38, + XAUI2_HSS_PCS_SEG_NUM = 39, + XFI2_AN_SEG_NUM = 40, + XFI2_TRAIN_SEG_NUM = 41, + XFI2_HSS_PCS_SEG_NUM = 42, + XFI2_HSS_TX_SEG_NUM = 43, + XFI2_HSS_RX_SEG_NUM = 44, + XFI2_HSS_PLL_SEG_NUM = 45, + SEM_REGS_SEG_NUM = 50 + +}; + +/* There are 64 generic NIC registers. */ +#define NIC_REGS_DUMP_WORD_COUNT 64 +/* XGMAC word count. */ +#define XGMAC_DUMP_WORD_COUNT (XGMAC_REGISTER_END / 4) +/* Word counts for the SERDES blocks. */ +#define XG_SERDES_XAUI_AN_COUNT 14 +#define XG_SERDES_XAUI_HSS_PCS_COUNT 33 +#define XG_SERDES_XFI_AN_COUNT 14 +#define XG_SERDES_XFI_TRAIN_COUNT 12 +#define XG_SERDES_XFI_HSS_PCS_COUNT 15 +#define XG_SERDES_XFI_HSS_TX_COUNT 32 +#define XG_SERDES_XFI_HSS_RX_COUNT 32 +#define XG_SERDES_XFI_HSS_PLL_COUNT 32 + +/* There are 2 CNA ETS and 8 NIC ETS registers. */ +#define ETS_REGS_DUMP_WORD_COUNT 10 + +/* Each probe mux entry stores the probe type plus 64 entries + * that are each each 64-bits in length. There are a total of + * 34 (PRB_MX_ADDR_VALID_TOTAL) valid probes. + */ +#define PRB_MX_ADDR_PRB_WORD_COUNT (1 + (PRB_MX_ADDR_MAX_MUX * 2)) +#define PRB_MX_DUMP_TOT_COUNT (PRB_MX_ADDR_PRB_WORD_COUNT * \ + PRB_MX_ADDR_VALID_TOTAL) +/* Each routing entry consists of 4 32-bit words. + * They are route type, index, index word, and result. + * There are 2 route blocks with 8 entries each and + * 2 NIC blocks with 16 entries each. + * The totol entries is 48 with 4 words each. + */ +#define RT_IDX_DUMP_ENTRIES 48 +#define RT_IDX_DUMP_WORDS_PER_ENTRY 4 +#define RT_IDX_DUMP_TOT_WORDS (RT_IDX_DUMP_ENTRIES * \ + RT_IDX_DUMP_WORDS_PER_ENTRY) +/* There are 10 address blocks in filter, each with + * different entry counts and different word-count-per-entry. + */ +#define MAC_ADDR_DUMP_ENTRIES \ + ((MAC_ADDR_MAX_CAM_ENTRIES * MAC_ADDR_MAX_CAM_WCOUNT) + \ + (MAC_ADDR_MAX_MULTICAST_ENTRIES * MAC_ADDR_MAX_MULTICAST_WCOUNT) + \ + (MAC_ADDR_MAX_VLAN_ENTRIES * MAC_ADDR_MAX_VLAN_WCOUNT) + \ + (MAC_ADDR_MAX_MCAST_FLTR_ENTRIES * MAC_ADDR_MAX_MCAST_FLTR_WCOUNT) + \ + (MAC_ADDR_MAX_FC_MAC_ENTRIES * MAC_ADDR_MAX_FC_MAC_WCOUNT) + \ + (MAC_ADDR_MAX_MGMT_MAC_ENTRIES * MAC_ADDR_MAX_MGMT_MAC_WCOUNT) + \ + (MAC_ADDR_MAX_MGMT_VLAN_ENTRIES * MAC_ADDR_MAX_MGMT_VLAN_WCOUNT) + \ + (MAC_ADDR_MAX_MGMT_V4_ENTRIES * MAC_ADDR_MAX_MGMT_V4_WCOUNT) + \ + (MAC_ADDR_MAX_MGMT_V6_ENTRIES * MAC_ADDR_MAX_MGMT_V6_WCOUNT) + \ + (MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES * MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT)) +#define MAC_ADDR_DUMP_WORDS_PER_ENTRY 2 +#define MAC_ADDR_DUMP_TOT_WORDS (MAC_ADDR_DUMP_ENTRIES * \ + MAC_ADDR_DUMP_WORDS_PER_ENTRY) +/* Maximum of 4 functions whose semaphore registeres are + * in the coredump. + */ +#define MAX_SEMAPHORE_FUNCTIONS 4 +/* Defines for access the MPI shadow registers. */ +#define RISC_124 0x0003007c +#define RISC_127 0x0003007f +#define SHADOW_OFFSET 0xb0000000 +#define SHADOW_REG_SHIFT 20 + +struct ql_nic_misc { + u32 rx_ring_count; + u32 tx_ring_count; + u32 intr_count; + u32 function; +}; + +struct ql_reg_dump { + + /* segment 0 */ + struct mpi_coredump_global_header mpi_global_header; + + /* segment 16 */ + struct mpi_coredump_segment_header nic_regs_seg_hdr; + u32 nic_regs[64]; + + /* segment 30 */ + struct mpi_coredump_segment_header misc_nic_seg_hdr; + struct ql_nic_misc misc_nic_info; + + /* segment 31 */ + /* one interrupt state for each CQ */ + struct mpi_coredump_segment_header intr_states_seg_hdr; + u32 intr_states[MAX_CPUS]; + + /* segment 32 */ + /* 3 cam words each for 16 unicast, + * 2 cam words for each of 32 multicast. + */ + struct mpi_coredump_segment_header cam_entries_seg_hdr; + u32 cam_entries[(16 * 3) + (32 * 3)]; + + /* segment 33 */ + struct mpi_coredump_segment_header nic_routing_words_seg_hdr; + u32 nic_routing_words[16]; + + /* segment 34 */ + struct mpi_coredump_segment_header ets_seg_hdr; + u32 ets[8+2]; +}; + +struct ql_mpi_coredump { + /* segment 0 */ + struct mpi_coredump_global_header mpi_global_header; + + /* segment 1 */ + struct mpi_coredump_segment_header core_regs_seg_hdr; + u32 mpi_core_regs[MPI_CORE_REGS_CNT]; + u32 mpi_core_sh_regs[MPI_CORE_SH_REGS_CNT]; + + /* segment 2 */ + struct mpi_coredump_segment_header test_logic_regs_seg_hdr; + u32 test_logic_regs[TEST_REGS_CNT]; + + /* segment 3 */ + struct mpi_coredump_segment_header rmii_regs_seg_hdr; + u32 rmii_regs[RMII_REGS_CNT]; + + /* segment 4 */ + struct mpi_coredump_segment_header fcmac1_regs_seg_hdr; + u32 fcmac1_regs[FCMAC_REGS_CNT]; + + /* segment 5 */ + struct mpi_coredump_segment_header fcmac2_regs_seg_hdr; + u32 fcmac2_regs[FCMAC_REGS_CNT]; + + /* segment 6 */ + struct mpi_coredump_segment_header fc1_mbx_regs_seg_hdr; + u32 fc1_mbx_regs[FC_MBX_REGS_CNT]; + + /* segment 7 */ + struct mpi_coredump_segment_header ide_regs_seg_hdr; + u32 ide_regs[IDE_REGS_CNT]; + + /* segment 8 */ + struct mpi_coredump_segment_header nic1_mbx_regs_seg_hdr; + u32 nic1_mbx_regs[NIC_MBX_REGS_CNT]; + + /* segment 9 */ + struct mpi_coredump_segment_header smbus_regs_seg_hdr; + u32 smbus_regs[SMBUS_REGS_CNT]; + + /* segment 10 */ + struct mpi_coredump_segment_header fc2_mbx_regs_seg_hdr; + u32 fc2_mbx_regs[FC_MBX_REGS_CNT]; + + /* segment 11 */ + struct mpi_coredump_segment_header nic2_mbx_regs_seg_hdr; + u32 nic2_mbx_regs[NIC_MBX_REGS_CNT]; + + /* segment 12 */ + struct mpi_coredump_segment_header i2c_regs_seg_hdr; + u32 i2c_regs[I2C_REGS_CNT]; + /* segment 13 */ + struct mpi_coredump_segment_header memc_regs_seg_hdr; + u32 memc_regs[MEMC_REGS_CNT]; + + /* segment 14 */ + struct mpi_coredump_segment_header pbus_regs_seg_hdr; + u32 pbus_regs[PBUS_REGS_CNT]; + + /* segment 15 */ + struct mpi_coredump_segment_header mde_regs_seg_hdr; + u32 mde_regs[MDE_REGS_CNT]; + + /* segment 16 */ + struct mpi_coredump_segment_header nic_regs_seg_hdr; + u32 nic_regs[NIC_REGS_DUMP_WORD_COUNT]; + + /* segment 17 */ + struct mpi_coredump_segment_header nic2_regs_seg_hdr; + u32 nic2_regs[NIC_REGS_DUMP_WORD_COUNT]; + + /* segment 18 */ + struct mpi_coredump_segment_header xgmac1_seg_hdr; + u32 xgmac1[XGMAC_DUMP_WORD_COUNT]; + + /* segment 19 */ + struct mpi_coredump_segment_header xgmac2_seg_hdr; + u32 xgmac2[XGMAC_DUMP_WORD_COUNT]; + + /* segment 20 */ + struct mpi_coredump_segment_header code_ram_seg_hdr; + u32 code_ram[CODE_RAM_CNT]; + + /* segment 21 */ + struct mpi_coredump_segment_header memc_ram_seg_hdr; + u32 memc_ram[MEMC_RAM_CNT]; + + /* segment 22 */ + struct mpi_coredump_segment_header xaui_an_hdr; + u32 serdes_xaui_an[XG_SERDES_XAUI_AN_COUNT]; + + /* segment 23 */ + struct mpi_coredump_segment_header xaui_hss_pcs_hdr; + u32 serdes_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT]; + + /* segment 24 */ + struct mpi_coredump_segment_header xfi_an_hdr; + u32 serdes_xfi_an[XG_SERDES_XFI_AN_COUNT]; + + /* segment 25 */ + struct mpi_coredump_segment_header xfi_train_hdr; + u32 serdes_xfi_train[XG_SERDES_XFI_TRAIN_COUNT]; + + /* segment 26 */ + struct mpi_coredump_segment_header xfi_hss_pcs_hdr; + u32 serdes_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT]; + + /* segment 27 */ + struct mpi_coredump_segment_header xfi_hss_tx_hdr; + u32 serdes_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT]; + + /* segment 28 */ + struct mpi_coredump_segment_header xfi_hss_rx_hdr; + u32 serdes_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT]; + + /* segment 29 */ + struct mpi_coredump_segment_header xfi_hss_pll_hdr; + u32 serdes_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT]; + + /* segment 30 */ + struct mpi_coredump_segment_header misc_nic_seg_hdr; + struct ql_nic_misc misc_nic_info; + + /* segment 31 */ + /* one interrupt state for each CQ */ + struct mpi_coredump_segment_header intr_states_seg_hdr; + u32 intr_states[MAX_RX_RINGS]; + + /* segment 32 */ + /* 3 cam words each for 16 unicast, + * 2 cam words for each of 32 multicast. + */ + struct mpi_coredump_segment_header cam_entries_seg_hdr; + u32 cam_entries[(16 * 3) + (32 * 3)]; + + /* segment 33 */ + struct mpi_coredump_segment_header nic_routing_words_seg_hdr; + u32 nic_routing_words[16]; + /* segment 34 */ + struct mpi_coredump_segment_header ets_seg_hdr; + u32 ets[ETS_REGS_DUMP_WORD_COUNT]; + + /* segment 35 */ + struct mpi_coredump_segment_header probe_dump_seg_hdr; + u32 probe_dump[PRB_MX_DUMP_TOT_COUNT]; + + /* segment 36 */ + struct mpi_coredump_segment_header routing_reg_seg_hdr; + u32 routing_regs[RT_IDX_DUMP_TOT_WORDS]; + + /* segment 37 */ + struct mpi_coredump_segment_header mac_prot_reg_seg_hdr; + u32 mac_prot_regs[MAC_ADDR_DUMP_TOT_WORDS]; + + /* segment 38 */ + struct mpi_coredump_segment_header xaui2_an_hdr; + u32 serdes2_xaui_an[XG_SERDES_XAUI_AN_COUNT]; + + /* segment 39 */ + struct mpi_coredump_segment_header xaui2_hss_pcs_hdr; + u32 serdes2_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT]; + + /* segment 40 */ + struct mpi_coredump_segment_header xfi2_an_hdr; + u32 serdes2_xfi_an[XG_SERDES_XFI_AN_COUNT]; + + /* segment 41 */ + struct mpi_coredump_segment_header xfi2_train_hdr; + u32 serdes2_xfi_train[XG_SERDES_XFI_TRAIN_COUNT]; + + /* segment 42 */ + struct mpi_coredump_segment_header xfi2_hss_pcs_hdr; + u32 serdes2_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT]; + + /* segment 43 */ + struct mpi_coredump_segment_header xfi2_hss_tx_hdr; + u32 serdes2_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT]; + + /* segment 44 */ + struct mpi_coredump_segment_header xfi2_hss_rx_hdr; + u32 serdes2_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT]; + + /* segment 45 */ + struct mpi_coredump_segment_header xfi2_hss_pll_hdr; + u32 serdes2_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT]; + + /* segment 50 */ + /* semaphore register for all 5 functions */ + struct mpi_coredump_segment_header sem_regs_seg_hdr; + u32 sem_regs[MAX_SEMAPHORE_FUNCTIONS]; +}; + +/* + * intr_context structure is used during initialization + * to hook the interrupts. It is also used in a single + * irq environment as a context to the ISR. + */ +struct intr_context { + struct ql_adapter *qdev; + u32 intr; + u32 irq_mask; /* Mask of which rings the vector services. */ + u32 hooked; + u32 intr_en_mask; /* value/mask used to enable this intr */ + u32 intr_dis_mask; /* value/mask used to disable this intr */ + u32 intr_read_mask; /* value/mask used to read this intr */ + char name[IFNAMSIZ * 2]; + atomic_t irq_cnt; /* irq_cnt is used in single vector + * environment. It's incremented for each + * irq handler that is scheduled. When each + * handler finishes it decrements irq_cnt and + * enables interrupts if it's zero. */ + irq_handler_t handler; +}; + +/* adapter flags definitions. */ +enum { + QL_ADAPTER_UP = 0, /* Adapter has been brought up. */ + QL_LEGACY_ENABLED = 1, + QL_MSI_ENABLED = 2, + QL_MSIX_ENABLED = 3, + QL_DMA64 = 4, + QL_PROMISCUOUS = 5, + QL_ALLMULTI = 6, + QL_PORT_CFG = 7, + QL_CAM_RT_SET = 8, + QL_SELFTEST = 9, + QL_LB_LINK_UP = 10, + QL_FRC_COREDUMP = 11, + QL_EEH_FATAL = 12, + QL_ASIC_RECOVERY = 14, /* We are in ascic recovery. */ +}; + +/* link_status bit definitions */ +enum { + STS_LOOPBACK_MASK = 0x00000700, + STS_LOOPBACK_PCS = 0x00000100, + STS_LOOPBACK_HSS = 0x00000200, + STS_LOOPBACK_EXT = 0x00000300, + STS_PAUSE_MASK = 0x000000c0, + STS_PAUSE_STD = 0x00000040, + STS_PAUSE_PRI = 0x00000080, + STS_SPEED_MASK = 0x00000038, + STS_SPEED_100Mb = 0x00000000, + STS_SPEED_1Gb = 0x00000008, + STS_SPEED_10Gb = 0x00000010, + STS_LINK_TYPE_MASK = 0x00000007, + STS_LINK_TYPE_XFI = 0x00000001, + STS_LINK_TYPE_XAUI = 0x00000002, + STS_LINK_TYPE_XFI_BP = 0x00000003, + STS_LINK_TYPE_XAUI_BP = 0x00000004, + STS_LINK_TYPE_10GBASET = 0x00000005, +}; + +/* link_config bit definitions */ +enum { + CFG_JUMBO_FRAME_SIZE = 0x00010000, + CFG_PAUSE_MASK = 0x00000060, + CFG_PAUSE_STD = 0x00000020, + CFG_PAUSE_PRI = 0x00000040, + CFG_DCBX = 0x00000010, + CFG_LOOPBACK_MASK = 0x00000007, + CFG_LOOPBACK_PCS = 0x00000002, + CFG_LOOPBACK_HSS = 0x00000004, + CFG_LOOPBACK_EXT = 0x00000006, + CFG_DEFAULT_MAX_FRAME_SIZE = 0x00002580, +}; + +struct nic_operations { + + int (*get_flash) (struct ql_adapter *); + int (*port_initialize) (struct ql_adapter *); +}; + +/* + * The main Adapter structure definition. + * This structure has all fields relevant to the hardware. + */ +struct ql_adapter { + struct ricb ricb; + unsigned long flags; + u32 wol; + + struct nic_stats nic_stats; + + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + + /* PCI Configuration information for this device */ + struct pci_dev *pdev; + struct net_device *ndev; /* Parent NET device */ + + /* Hardware information */ + u32 chip_rev_id; + u32 fw_rev_id; + u32 func; /* PCI function for this adapter */ + u32 alt_func; /* PCI function for alternate adapter */ + u32 port; /* Port number this adapter */ + + spinlock_t adapter_lock; + spinlock_t hw_lock; + spinlock_t stats_lock; + + /* PCI Bus Relative Register Addresses */ + void __iomem *reg_base; + void __iomem *doorbell_area; + u32 doorbell_area_size; + + u32 msg_enable; + + /* Page for Shadow Registers */ + void *rx_ring_shadow_reg_area; + dma_addr_t rx_ring_shadow_reg_dma; + void *tx_ring_shadow_reg_area; + dma_addr_t tx_ring_shadow_reg_dma; + + u32 mailbox_in; + u32 mailbox_out; + struct mbox_params idc_mbc; + struct mutex mpi_mutex; + + int tx_ring_size; + int rx_ring_size; + u32 intr_count; + struct msix_entry *msi_x_entry; + struct intr_context intr_context[MAX_RX_RINGS]; + + int tx_ring_count; /* One per online CPU. */ + u32 rss_ring_count; /* One per irq vector. */ + /* + * rx_ring_count = + * (CPU count * outbound completion rx_ring) + + * (irq_vector_cnt * inbound (RSS) completion rx_ring) + */ + int rx_ring_count; + int ring_mem_size; + void *ring_mem; + + struct rx_ring rx_ring[MAX_RX_RINGS]; + struct tx_ring tx_ring[MAX_TX_RINGS]; + unsigned int lbq_buf_order; + + int rx_csum; + u32 default_rx_queue; + + u16 rx_coalesce_usecs; /* cqicb->int_delay */ + u16 rx_max_coalesced_frames; /* cqicb->pkt_int_delay */ + u16 tx_coalesce_usecs; /* cqicb->int_delay */ + u16 tx_max_coalesced_frames; /* cqicb->pkt_int_delay */ + + u32 xg_sem_mask; + u32 port_link_up; + u32 port_init; + u32 link_status; + struct ql_mpi_coredump *mpi_coredump; + u32 core_is_dumped; + u32 link_config; + u32 led_config; + u32 max_frame_size; + + union flash_params flash; + + struct workqueue_struct *workqueue; + struct delayed_work asic_reset_work; + struct delayed_work mpi_reset_work; + struct delayed_work mpi_work; + struct delayed_work mpi_port_cfg_work; + struct delayed_work mpi_idc_work; + struct delayed_work mpi_core_to_log; + struct completion ide_completion; + const struct nic_operations *nic_ops; + u16 device_id; + struct timer_list timer; + atomic_t lb_count; + /* Keep local copy of current mac address. */ + char current_mac_addr[6]; +}; + +/* + * Typical Register accessor for memory mapped device. + */ +static inline u32 ql_read32(const struct ql_adapter *qdev, int reg) +{ + return readl(qdev->reg_base + reg); +} + +/* + * Typical Register accessor for memory mapped device. + */ +static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val) +{ + writel(val, qdev->reg_base + reg); +} + +/* + * Doorbell Registers: + * Doorbell registers are virtual registers in the PCI memory space. + * The space is allocated by the chip during PCI initialization. The + * device driver finds the doorbell address in BAR 3 in PCI config space. + * The registers are used to control outbound and inbound queues. For + * example, the producer index for an outbound queue. Each queue uses + * 1 4k chunk of memory. The lower half of the space is for outbound + * queues. The upper half is for inbound queues. + */ +static inline void ql_write_db_reg(u32 val, void __iomem *addr) +{ + writel(val, addr); + mmiowb(); +} + +/* + * Shadow Registers: + * Outbound queues have a consumer index that is maintained by the chip. + * Inbound queues have a producer index that is maintained by the chip. + * For lower overhead, these registers are "shadowed" to host memory + * which allows the device driver to track the queue progress without + * PCI reads. When an entry is placed on an inbound queue, the chip will + * update the relevant index register and then copy the value to the + * shadow register in host memory. + */ +static inline u32 ql_read_sh_reg(__le32 *addr) +{ + u32 reg; + reg = le32_to_cpu(*addr); + rmb(); + return reg; +} + +extern char qlge_driver_name[]; +extern const char qlge_driver_version[]; +extern const struct ethtool_ops qlge_ethtool_ops; + +extern int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask); +extern void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask); +extern int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data); +extern int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index, + u32 *value); +extern int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value); +extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit, + u16 q_id); +void ql_queue_fw_error(struct ql_adapter *qdev); +void ql_mpi_work(struct work_struct *work); +void ql_mpi_reset_work(struct work_struct *work); +void ql_mpi_core_to_log(struct work_struct *work); +int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit); +void ql_queue_asic_error(struct ql_adapter *qdev); +u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr); +void ql_set_ethtool_ops(struct net_device *ndev); +int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data); +void ql_mpi_idc_work(struct work_struct *work); +void ql_mpi_port_cfg_work(struct work_struct *work); +int ql_mb_get_fw_state(struct ql_adapter *qdev); +int ql_cam_route_initialize(struct ql_adapter *qdev); +int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data); +int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data); +int ql_unpause_mpi_risc(struct ql_adapter *qdev); +int ql_pause_mpi_risc(struct ql_adapter *qdev); +int ql_hard_reset_mpi_risc(struct ql_adapter *qdev); +int ql_soft_reset_mpi_risc(struct ql_adapter *qdev); +int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, + u32 ram_addr, int word_count); +int ql_core_dump(struct ql_adapter *qdev, + struct ql_mpi_coredump *mpi_coredump); +int ql_mb_about_fw(struct ql_adapter *qdev); +int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol); +int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol); +int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config); +int ql_mb_get_led_cfg(struct ql_adapter *qdev); +void ql_link_on(struct ql_adapter *qdev); +void ql_link_off(struct ql_adapter *qdev); +int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control); +int ql_mb_get_port_cfg(struct ql_adapter *qdev); +int ql_mb_set_port_cfg(struct ql_adapter *qdev); +int ql_wait_fifo_empty(struct ql_adapter *qdev); +void ql_get_dump(struct ql_adapter *qdev, void *buff); +void ql_gen_reg_dump(struct ql_adapter *qdev, + struct ql_reg_dump *mpi_coredump); +netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev); +void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *); +int ql_own_firmware(struct ql_adapter *qdev); +int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget); + +/* #define QL_ALL_DUMP */ +/* #define QL_REG_DUMP */ +/* #define QL_DEV_DUMP */ +/* #define QL_CB_DUMP */ +/* #define QL_IB_DUMP */ +/* #define QL_OB_DUMP */ + +#ifdef QL_REG_DUMP +extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev); +extern void ql_dump_routing_entries(struct ql_adapter *qdev); +extern void ql_dump_regs(struct ql_adapter *qdev); +#define QL_DUMP_REGS(qdev) ql_dump_regs(qdev) +#define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev) +#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev) +#else +#define QL_DUMP_REGS(qdev) +#define QL_DUMP_ROUTE(qdev) +#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) +#endif + +#ifdef QL_STAT_DUMP +extern void ql_dump_stat(struct ql_adapter *qdev); +#define QL_DUMP_STAT(qdev) ql_dump_stat(qdev) +#else +#define QL_DUMP_STAT(qdev) +#endif + +#ifdef QL_DEV_DUMP +extern void ql_dump_qdev(struct ql_adapter *qdev); +#define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev) +#else +#define QL_DUMP_QDEV(qdev) +#endif + +#ifdef QL_CB_DUMP +extern void ql_dump_wqicb(struct wqicb *wqicb); +extern void ql_dump_tx_ring(struct tx_ring *tx_ring); +extern void ql_dump_ricb(struct ricb *ricb); +extern void ql_dump_cqicb(struct cqicb *cqicb); +extern void ql_dump_rx_ring(struct rx_ring *rx_ring); +extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id); +#define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb) +#define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb) +#define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring) +#define QL_DUMP_CQICB(cqicb) ql_dump_cqicb(cqicb) +#define QL_DUMP_RX_RING(rx_ring) ql_dump_rx_ring(rx_ring) +#define QL_DUMP_HW_CB(qdev, size, bit, q_id) \ + ql_dump_hw_cb(qdev, size, bit, q_id) +#else +#define QL_DUMP_RICB(ricb) +#define QL_DUMP_WQICB(wqicb) +#define QL_DUMP_TX_RING(tx_ring) +#define QL_DUMP_CQICB(cqicb) +#define QL_DUMP_RX_RING(rx_ring) +#define QL_DUMP_HW_CB(qdev, size, bit, q_id) +#endif + +#ifdef QL_OB_DUMP +extern void ql_dump_tx_desc(struct tx_buf_desc *tbd); +extern void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb); +extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp); +#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb) +#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp) +#else +#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) +#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) +#endif + +#ifdef QL_IB_DUMP +extern void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp); +#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp) +#else +#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) +#endif + +#ifdef QL_ALL_DUMP +extern void ql_dump_all(struct ql_adapter *qdev); +#define QL_DUMP_ALL(qdev) ql_dump_all(qdev) +#else +#define QL_DUMP_ALL(qdev) +#endif + +#endif /* _QLGE_H_ */ diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c new file mode 100644 index 0000000..fca804f --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c @@ -0,0 +1,2044 @@ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include + +#include "qlge.h" + +/* Read a NIC register from the alternate function. */ +static u32 ql_read_other_func_reg(struct ql_adapter *qdev, + u32 reg) +{ + u32 register_to_read; + u32 reg_val; + unsigned int status = 0; + + register_to_read = MPI_NIC_REG_BLOCK + | MPI_NIC_READ + | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT) + | reg; + status = ql_read_mpi_reg(qdev, register_to_read, ®_val); + if (status != 0) + return 0xffffffff; + + return reg_val; +} + +/* Write a NIC register from the alternate function. */ +static int ql_write_other_func_reg(struct ql_adapter *qdev, + u32 reg, u32 reg_val) +{ + u32 register_to_read; + int status = 0; + + register_to_read = MPI_NIC_REG_BLOCK + | MPI_NIC_READ + | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT) + | reg; + status = ql_write_mpi_reg(qdev, register_to_read, reg_val); + + return status; +} + +static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg, + u32 bit, u32 err_bit) +{ + u32 temp; + int count = 10; + + while (count) { + temp = ql_read_other_func_reg(qdev, reg); + + /* check for errors */ + if (temp & err_bit) + return -1; + else if (temp & bit) + return 0; + mdelay(10); + count--; + } + return -1; +} + +static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg, + u32 *data) +{ + int status; + + /* wait for reg to come ready */ + status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4, + XG_SERDES_ADDR_RDY, 0); + if (status) + goto exit; + + /* set up for reg read */ + ql_write_other_func_reg(qdev, XG_SERDES_ADDR/4, reg | PROC_ADDR_R); + + /* wait for reg to come ready */ + status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4, + XG_SERDES_ADDR_RDY, 0); + if (status) + goto exit; + + /* get the data */ + *data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4)); +exit: + return status; +} + +/* Read out the SERDES registers */ +static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 * data) +{ + int status; + + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0); + if (status) + goto exit; + + /* set up for reg read */ + ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R); + + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0); + if (status) + goto exit; + + /* get the data */ + *data = ql_read32(qdev, XG_SERDES_DATA); +exit: + return status; +} + +static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr, + u32 *direct_ptr, u32 *indirect_ptr, + unsigned int direct_valid, unsigned int indirect_valid) +{ + unsigned int status; + + status = 1; + if (direct_valid) + status = ql_read_serdes_reg(qdev, addr, direct_ptr); + /* Dead fill any failures or invalids. */ + if (status) + *direct_ptr = 0xDEADBEEF; + + status = 1; + if (indirect_valid) + status = ql_read_other_func_serdes_reg( + qdev, addr, indirect_ptr); + /* Dead fill any failures or invalids. */ + if (status) + *indirect_ptr = 0xDEADBEEF; +} + +static int ql_get_serdes_regs(struct ql_adapter *qdev, + struct ql_mpi_coredump *mpi_coredump) +{ + int status; + unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid; + unsigned int xaui_indirect_valid, i; + u32 *direct_ptr, temp; + u32 *indirect_ptr; + + xfi_direct_valid = xfi_indirect_valid = 0; + xaui_direct_valid = xaui_indirect_valid = 1; + + /* The XAUI needs to be read out per port */ + if (qdev->func & 1) { + /* We are NIC 2 */ + status = ql_read_other_func_serdes_reg(qdev, + XG_SERDES_XAUI_HSS_PCS_START, &temp); + if (status) + temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; + if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == + XG_SERDES_ADDR_XAUI_PWR_DOWN) + xaui_indirect_valid = 0; + + status = ql_read_serdes_reg(qdev, + XG_SERDES_XAUI_HSS_PCS_START, &temp); + if (status) + temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; + + if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == + XG_SERDES_ADDR_XAUI_PWR_DOWN) + xaui_direct_valid = 0; + } else { + /* We are NIC 1 */ + status = ql_read_other_func_serdes_reg(qdev, + XG_SERDES_XAUI_HSS_PCS_START, &temp); + if (status) + temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; + if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == + XG_SERDES_ADDR_XAUI_PWR_DOWN) + xaui_indirect_valid = 0; + + status = ql_read_serdes_reg(qdev, + XG_SERDES_XAUI_HSS_PCS_START, &temp); + if (status) + temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; + if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == + XG_SERDES_ADDR_XAUI_PWR_DOWN) + xaui_direct_valid = 0; + } + + /* + * XFI register is shared so only need to read one + * functions and then check the bits. + */ + status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp); + if (status) + temp = 0; + + if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) == + XG_SERDES_ADDR_XFI1_PWR_UP) { + /* now see if i'm NIC 1 or NIC 2 */ + if (qdev->func & 1) + /* I'm NIC 2, so the indirect (NIC1) xfi is up. */ + xfi_indirect_valid = 1; + else + xfi_direct_valid = 1; + } + if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) == + XG_SERDES_ADDR_XFI2_PWR_UP) { + /* now see if i'm NIC 1 or NIC 2 */ + if (qdev->func & 1) + /* I'm NIC 2, so the indirect (NIC1) xfi is up. */ + xfi_direct_valid = 1; + else + xfi_indirect_valid = 1; + } + + /* Get XAUI_AN register block. */ + if (qdev->func & 1) { + /* Function 2 is direct */ + direct_ptr = mpi_coredump->serdes2_xaui_an; + indirect_ptr = mpi_coredump->serdes_xaui_an; + } else { + /* Function 1 is direct */ + direct_ptr = mpi_coredump->serdes_xaui_an; + indirect_ptr = mpi_coredump->serdes2_xaui_an; + } + + for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++) + ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, + xaui_direct_valid, xaui_indirect_valid); + + /* Get XAUI_HSS_PCS register block. */ + if (qdev->func & 1) { + direct_ptr = + mpi_coredump->serdes2_xaui_hss_pcs; + indirect_ptr = + mpi_coredump->serdes_xaui_hss_pcs; + } else { + direct_ptr = + mpi_coredump->serdes_xaui_hss_pcs; + indirect_ptr = + mpi_coredump->serdes2_xaui_hss_pcs; + } + + for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++) + ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, + xaui_direct_valid, xaui_indirect_valid); + + /* Get XAUI_XFI_AN register block. */ + if (qdev->func & 1) { + direct_ptr = mpi_coredump->serdes2_xfi_an; + indirect_ptr = mpi_coredump->serdes_xfi_an; + } else { + direct_ptr = mpi_coredump->serdes_xfi_an; + indirect_ptr = mpi_coredump->serdes2_xfi_an; + } + + for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++) + ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, + xfi_direct_valid, xfi_indirect_valid); + + /* Get XAUI_XFI_TRAIN register block. */ + if (qdev->func & 1) { + direct_ptr = mpi_coredump->serdes2_xfi_train; + indirect_ptr = + mpi_coredump->serdes_xfi_train; + } else { + direct_ptr = mpi_coredump->serdes_xfi_train; + indirect_ptr = + mpi_coredump->serdes2_xfi_train; + } + + for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++) + ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, + xfi_direct_valid, xfi_indirect_valid); + + /* Get XAUI_XFI_HSS_PCS register block. */ + if (qdev->func & 1) { + direct_ptr = + mpi_coredump->serdes2_xfi_hss_pcs; + indirect_ptr = + mpi_coredump->serdes_xfi_hss_pcs; + } else { + direct_ptr = + mpi_coredump->serdes_xfi_hss_pcs; + indirect_ptr = + mpi_coredump->serdes2_xfi_hss_pcs; + } + + for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++) + ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, + xfi_direct_valid, xfi_indirect_valid); + + /* Get XAUI_XFI_HSS_TX register block. */ + if (qdev->func & 1) { + direct_ptr = + mpi_coredump->serdes2_xfi_hss_tx; + indirect_ptr = + mpi_coredump->serdes_xfi_hss_tx; + } else { + direct_ptr = mpi_coredump->serdes_xfi_hss_tx; + indirect_ptr = + mpi_coredump->serdes2_xfi_hss_tx; + } + for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++) + ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, + xfi_direct_valid, xfi_indirect_valid); + + /* Get XAUI_XFI_HSS_RX register block. */ + if (qdev->func & 1) { + direct_ptr = + mpi_coredump->serdes2_xfi_hss_rx; + indirect_ptr = + mpi_coredump->serdes_xfi_hss_rx; + } else { + direct_ptr = mpi_coredump->serdes_xfi_hss_rx; + indirect_ptr = + mpi_coredump->serdes2_xfi_hss_rx; + } + + for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++) + ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, + xfi_direct_valid, xfi_indirect_valid); + + + /* Get XAUI_XFI_HSS_PLL register block. */ + if (qdev->func & 1) { + direct_ptr = + mpi_coredump->serdes2_xfi_hss_pll; + indirect_ptr = + mpi_coredump->serdes_xfi_hss_pll; + } else { + direct_ptr = + mpi_coredump->serdes_xfi_hss_pll; + indirect_ptr = + mpi_coredump->serdes2_xfi_hss_pll; + } + for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++) + ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, + xfi_direct_valid, xfi_indirect_valid); + return 0; +} + +static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg, + u32 *data) +{ + int status = 0; + + /* wait for reg to come ready */ + status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4, + XGMAC_ADDR_RDY, XGMAC_ADDR_XME); + if (status) + goto exit; + + /* set up for reg read */ + ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R); + + /* wait for reg to come ready */ + status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4, + XGMAC_ADDR_RDY, XGMAC_ADDR_XME); + if (status) + goto exit; + + /* get the data */ + *data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4); +exit: + return status; +} + +/* Read the 400 xgmac control/statistics registers + * skipping unused locations. + */ +static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 * buf, + unsigned int other_function) +{ + int status = 0; + int i; + + for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) { + /* We're reading 400 xgmac registers, but we filter out + * serveral locations that are non-responsive to reads. + */ + if ((i == 0x00000114) || + (i == 0x00000118) || + (i == 0x0000013c) || + (i == 0x00000140) || + (i > 0x00000150 && i < 0x000001fc) || + (i > 0x00000278 && i < 0x000002a0) || + (i > 0x000002c0 && i < 0x000002cf) || + (i > 0x000002dc && i < 0x000002f0) || + (i > 0x000003c8 && i < 0x00000400) || + (i > 0x00000400 && i < 0x00000410) || + (i > 0x00000410 && i < 0x00000420) || + (i > 0x00000420 && i < 0x00000430) || + (i > 0x00000430 && i < 0x00000440) || + (i > 0x00000440 && i < 0x00000450) || + (i > 0x00000450 && i < 0x00000500) || + (i > 0x0000054c && i < 0x00000568) || + (i > 0x000005c8 && i < 0x00000600)) { + if (other_function) + status = + ql_read_other_func_xgmac_reg(qdev, i, buf); + else + status = ql_read_xgmac_reg(qdev, i, buf); + + if (status) + *buf = 0xdeadbeef; + break; + } + } + return status; +} + +static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf) +{ + int status = 0; + int i; + + for (i = 0; i < 8; i++, buf++) { + ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000); + *buf = ql_read32(qdev, NIC_ETS); + } + + for (i = 0; i < 2; i++, buf++) { + ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000); + *buf = ql_read32(qdev, CNA_ETS); + } + + return status; +} + +static void ql_get_intr_states(struct ql_adapter *qdev, u32 * buf) +{ + int i; + + for (i = 0; i < qdev->rx_ring_count; i++, buf++) { + ql_write32(qdev, INTR_EN, + qdev->intr_context[i].intr_read_mask); + *buf = ql_read32(qdev, INTR_EN); + } +} + +static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf) +{ + int i, status; + u32 value[3]; + + status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); + if (status) + return status; + + for (i = 0; i < 16; i++) { + status = ql_get_mac_addr_reg(qdev, + MAC_ADDR_TYPE_CAM_MAC, i, value); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Failed read of mac index register\n"); + goto err; + } + *buf++ = value[0]; /* lower MAC address */ + *buf++ = value[1]; /* upper MAC address */ + *buf++ = value[2]; /* output */ + } + for (i = 0; i < 32; i++) { + status = ql_get_mac_addr_reg(qdev, + MAC_ADDR_TYPE_MULTI_MAC, i, value); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Failed read of mac index register\n"); + goto err; + } + *buf++ = value[0]; /* lower Mcast address */ + *buf++ = value[1]; /* upper Mcast address */ + } +err: + ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); + return status; +} + +static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf) +{ + int status; + u32 value, i; + + status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); + if (status) + return status; + + for (i = 0; i < 16; i++) { + status = ql_get_routing_reg(qdev, i, &value); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Failed read of routing index register\n"); + goto err; + } else { + *buf++ = value; + } + } +err: + ql_sem_unlock(qdev, SEM_RT_IDX_MASK); + return status; +} + +/* Read the MPI Processor shadow registers */ +static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 * buf) +{ + u32 i; + int status; + + for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) { + status = ql_write_mpi_reg(qdev, RISC_124, + (SHADOW_OFFSET | i << SHADOW_REG_SHIFT)); + if (status) + goto end; + status = ql_read_mpi_reg(qdev, RISC_127, buf); + if (status) + goto end; + } +end: + return status; +} + +/* Read the MPI Processor core registers */ +static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 * buf, + u32 offset, u32 count) +{ + int i, status = 0; + for (i = 0; i < count; i++, buf++) { + status = ql_read_mpi_reg(qdev, offset + i, buf); + if (status) + return status; + } + return status; +} + +/* Read the ASIC probe dump */ +static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock, + u32 valid, u32 *buf) +{ + u32 module, mux_sel, probe, lo_val, hi_val; + + for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) { + if (!((valid >> module) & 1)) + continue; + for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) { + probe = clock + | PRB_MX_ADDR_ARE + | mux_sel + | (module << PRB_MX_ADDR_MOD_SEL_SHIFT); + ql_write32(qdev, PRB_MX_ADDR, probe); + lo_val = ql_read32(qdev, PRB_MX_DATA); + if (mux_sel == 0) { + *buf = probe; + buf++; + } + probe |= PRB_MX_ADDR_UP; + ql_write32(qdev, PRB_MX_ADDR, probe); + hi_val = ql_read32(qdev, PRB_MX_DATA); + *buf = lo_val; + buf++; + *buf = hi_val; + buf++; + } + } + return buf; +} + +static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf) +{ + /* First we have to enable the probe mux */ + ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN); + buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK, + PRB_MX_ADDR_VALID_SYS_MOD, buf); + buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK, + PRB_MX_ADDR_VALID_PCI_MOD, buf); + buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK, + PRB_MX_ADDR_VALID_XGM_MOD, buf); + buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK, + PRB_MX_ADDR_VALID_FC_MOD, buf); + return 0; + +} + +/* Read out the routing index registers */ +static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf) +{ + int status; + u32 type, index, index_max; + u32 result_index; + u32 result_data; + u32 val; + + status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); + if (status) + return status; + + for (type = 0; type < 4; type++) { + if (type < 2) + index_max = 8; + else + index_max = 16; + for (index = 0; index < index_max; index++) { + val = RT_IDX_RS + | (type << RT_IDX_TYPE_SHIFT) + | (index << RT_IDX_IDX_SHIFT); + ql_write32(qdev, RT_IDX, val); + result_index = 0; + while ((result_index & RT_IDX_MR) == 0) + result_index = ql_read32(qdev, RT_IDX); + result_data = ql_read32(qdev, RT_DATA); + *buf = type; + buf++; + *buf = index; + buf++; + *buf = result_index; + buf++; + *buf = result_data; + buf++; + } + } + ql_sem_unlock(qdev, SEM_RT_IDX_MASK); + return status; +} + +/* Read out the MAC protocol registers */ +static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf) +{ + u32 result_index, result_data; + u32 type; + u32 index; + u32 offset; + u32 val; + u32 initial_val = MAC_ADDR_RS; + u32 max_index; + u32 max_offset; + + for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) { + switch (type) { + + case 0: /* CAM */ + initial_val |= MAC_ADDR_ADR; + max_index = MAC_ADDR_MAX_CAM_ENTRIES; + max_offset = MAC_ADDR_MAX_CAM_WCOUNT; + break; + case 1: /* Multicast MAC Address */ + max_index = MAC_ADDR_MAX_CAM_WCOUNT; + max_offset = MAC_ADDR_MAX_CAM_WCOUNT; + break; + case 2: /* VLAN filter mask */ + case 3: /* MC filter mask */ + max_index = MAC_ADDR_MAX_CAM_WCOUNT; + max_offset = MAC_ADDR_MAX_CAM_WCOUNT; + break; + case 4: /* FC MAC addresses */ + max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES; + max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT; + break; + case 5: /* Mgmt MAC addresses */ + max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES; + max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT; + break; + case 6: /* Mgmt VLAN addresses */ + max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES; + max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT; + break; + case 7: /* Mgmt IPv4 address */ + max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES; + max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT; + break; + case 8: /* Mgmt IPv6 address */ + max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES; + max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT; + break; + case 9: /* Mgmt TCP/UDP Dest port */ + max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES; + max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT; + break; + default: + pr_err("Bad type!!! 0x%08x\n", type); + max_index = 0; + max_offset = 0; + break; + } + for (index = 0; index < max_index; index++) { + for (offset = 0; offset < max_offset; offset++) { + val = initial_val + | (type << MAC_ADDR_TYPE_SHIFT) + | (index << MAC_ADDR_IDX_SHIFT) + | (offset); + ql_write32(qdev, MAC_ADDR_IDX, val); + result_index = 0; + while ((result_index & MAC_ADDR_MR) == 0) { + result_index = ql_read32(qdev, + MAC_ADDR_IDX); + } + result_data = ql_read32(qdev, MAC_ADDR_DATA); + *buf = result_index; + buf++; + *buf = result_data; + buf++; + } + } + } +} + +static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf) +{ + u32 func_num, reg, reg_val; + int status; + + for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) { + reg = MPI_NIC_REG_BLOCK + | (func_num << MPI_NIC_FUNCTION_SHIFT) + | (SEM / 4); + status = ql_read_mpi_reg(qdev, reg, ®_val); + *buf = reg_val; + /* if the read failed then dead fill the element. */ + if (!status) + *buf = 0xdeadbeef; + buf++; + } +} + +/* Create a coredump segment header */ +static void ql_build_coredump_seg_header( + struct mpi_coredump_segment_header *seg_hdr, + u32 seg_number, u32 seg_size, u8 *desc) +{ + memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header)); + seg_hdr->cookie = MPI_COREDUMP_COOKIE; + seg_hdr->segNum = seg_number; + seg_hdr->segSize = seg_size; + memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1); +} + +/* + * This function should be called when a coredump / probedump + * is to be extracted from the HBA. It is assumed there is a + * qdev structure that contains the base address of the register + * space for this function as well as a coredump structure that + * will contain the dump. + */ +int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) +{ + int status; + int i; + + if (!mpi_coredump) { + netif_err(qdev, drv, qdev->ndev, "No memory available\n"); + return -ENOMEM; + } + + /* Try to get the spinlock, but dont worry if + * it isn't available. If the firmware died it + * might be holding the sem. + */ + ql_sem_spinlock(qdev, SEM_PROC_REG_MASK); + + status = ql_pause_mpi_risc(qdev); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Failed RISC pause. Status = 0x%.08x\n", status); + goto err; + } + + /* Insert the global header */ + memset(&(mpi_coredump->mpi_global_header), 0, + sizeof(struct mpi_coredump_global_header)); + mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE; + mpi_coredump->mpi_global_header.headerSize = + sizeof(struct mpi_coredump_global_header); + mpi_coredump->mpi_global_header.imageSize = + sizeof(struct ql_mpi_coredump); + memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", + sizeof(mpi_coredump->mpi_global_header.idString)); + + /* Get generic NIC reg dump */ + ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr, + NIC1_CONTROL_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->nic_regs), "NIC1 Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr, + NIC2_CONTROL_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->nic2_regs), "NIC2 Registers"); + + /* Get XGMac registers. (Segment 18, Rev C. step 21) */ + ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr, + NIC1_XGMAC_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr, + NIC2_XGMAC_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers"); + + if (qdev->func & 1) { + /* Odd means our function is NIC 2 */ + for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) + mpi_coredump->nic2_regs[i] = + ql_read32(qdev, i * sizeof(u32)); + + for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) + mpi_coredump->nic_regs[i] = + ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4); + + ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0); + ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1); + } else { + /* Even means our function is NIC 1 */ + for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) + mpi_coredump->nic_regs[i] = + ql_read32(qdev, i * sizeof(u32)); + for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) + mpi_coredump->nic2_regs[i] = + ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4); + + ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0); + ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1); + } + + /* Rev C. Step 20a */ + ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr, + XAUI_AN_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes_xaui_an), + "XAUI AN Registers"); + + /* Rev C. Step 20b */ + ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr, + XAUI_HSS_PCS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes_xaui_hss_pcs), + "XAUI HSS PCS Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes_xfi_an), + "XFI AN Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr, + XFI_TRAIN_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes_xfi_train), + "XFI TRAIN Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr, + XFI_HSS_PCS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes_xfi_hss_pcs), + "XFI HSS PCS Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr, + XFI_HSS_TX_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes_xfi_hss_tx), + "XFI HSS TX Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr, + XFI_HSS_RX_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes_xfi_hss_rx), + "XFI HSS RX Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr, + XFI_HSS_PLL_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes_xfi_hss_pll), + "XFI HSS PLL Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr, + XAUI2_AN_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes2_xaui_an), + "XAUI2 AN Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr, + XAUI2_HSS_PCS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes2_xaui_hss_pcs), + "XAUI2 HSS PCS Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr, + XFI2_AN_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes2_xfi_an), + "XFI2 AN Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr, + XFI2_TRAIN_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes2_xfi_train), + "XFI2 TRAIN Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr, + XFI2_HSS_PCS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes2_xfi_hss_pcs), + "XFI2 HSS PCS Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr, + XFI2_HSS_TX_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes2_xfi_hss_tx), + "XFI2 HSS TX Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr, + XFI2_HSS_RX_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes2_xfi_hss_rx), + "XFI2 HSS RX Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr, + XFI2_HSS_PLL_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes2_xfi_hss_pll), + "XFI2 HSS PLL Registers"); + + status = ql_get_serdes_regs(qdev, mpi_coredump); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Failed Dump of Serdes Registers. Status = 0x%.08x\n", + status); + goto err; + } + + ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr, + CORE_SEG_NUM, + sizeof(mpi_coredump->core_regs_seg_hdr) + + sizeof(mpi_coredump->mpi_core_regs) + + sizeof(mpi_coredump->mpi_core_sh_regs), + "Core Registers"); + + /* Get the MPI Core Registers */ + status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0], + MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT); + if (status) + goto err; + /* Get the 16 MPI shadow registers */ + status = ql_get_mpi_shadow_regs(qdev, + &mpi_coredump->mpi_core_sh_regs[0]); + if (status) + goto err; + + /* Get the Test Logic Registers */ + ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr, + TEST_LOGIC_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->test_logic_regs), + "Test Logic Regs"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0], + TEST_REGS_ADDR, TEST_REGS_CNT); + if (status) + goto err; + + /* Get the RMII Registers */ + ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr, + RMII_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->rmii_regs), + "RMII Registers"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0], + RMII_REGS_ADDR, RMII_REGS_CNT); + if (status) + goto err; + + /* Get the FCMAC1 Registers */ + ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr, + FCMAC1_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->fcmac1_regs), + "FCMAC1 Registers"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0], + FCMAC1_REGS_ADDR, FCMAC_REGS_CNT); + if (status) + goto err; + + /* Get the FCMAC2 Registers */ + + ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr, + FCMAC2_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->fcmac2_regs), + "FCMAC2 Registers"); + + status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0], + FCMAC2_REGS_ADDR, FCMAC_REGS_CNT); + if (status) + goto err; + + /* Get the FC1 MBX Registers */ + ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr, + FC1_MBOX_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->fc1_mbx_regs), + "FC1 MBox Regs"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0], + FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT); + if (status) + goto err; + + /* Get the IDE Registers */ + ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr, + IDE_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->ide_regs), + "IDE Registers"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0], + IDE_REGS_ADDR, IDE_REGS_CNT); + if (status) + goto err; + + /* Get the NIC1 MBX Registers */ + ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr, + NIC1_MBOX_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->nic1_mbx_regs), + "NIC1 MBox Regs"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0], + NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT); + if (status) + goto err; + + /* Get the SMBus Registers */ + ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr, + SMBUS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->smbus_regs), + "SMBus Registers"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0], + SMBUS_REGS_ADDR, SMBUS_REGS_CNT); + if (status) + goto err; + + /* Get the FC2 MBX Registers */ + ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr, + FC2_MBOX_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->fc2_mbx_regs), + "FC2 MBox Regs"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0], + FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT); + if (status) + goto err; + + /* Get the NIC2 MBX Registers */ + ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr, + NIC2_MBOX_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->nic2_mbx_regs), + "NIC2 MBox Regs"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0], + NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT); + if (status) + goto err; + + /* Get the I2C Registers */ + ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr, + I2C_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->i2c_regs), + "I2C Registers"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0], + I2C_REGS_ADDR, I2C_REGS_CNT); + if (status) + goto err; + + /* Get the MEMC Registers */ + ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr, + MEMC_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->memc_regs), + "MEMC Registers"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0], + MEMC_REGS_ADDR, MEMC_REGS_CNT); + if (status) + goto err; + + /* Get the PBus Registers */ + ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr, + PBUS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->pbus_regs), + "PBUS Registers"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0], + PBUS_REGS_ADDR, PBUS_REGS_CNT); + if (status) + goto err; + + /* Get the MDE Registers */ + ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr, + MDE_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->mde_regs), + "MDE Registers"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0], + MDE_REGS_ADDR, MDE_REGS_CNT); + if (status) + goto err; + + ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr, + MISC_NIC_INFO_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->misc_nic_info), + "MISC NIC INFO"); + mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count; + mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count; + mpi_coredump->misc_nic_info.intr_count = qdev->intr_count; + mpi_coredump->misc_nic_info.function = qdev->func; + + /* Segment 31 */ + /* Get indexed register values. */ + ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr, + INTR_STATES_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->intr_states), + "INTR States"); + ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]); + + ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr, + CAM_ENTRIES_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->cam_entries), + "CAM Entries"); + status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]); + if (status) + goto err; + + ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr, + ROUTING_WORDS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->nic_routing_words), + "Routing Words"); + status = ql_get_routing_entries(qdev, + &mpi_coredump->nic_routing_words[0]); + if (status) + goto err; + + /* Segment 34 (Rev C. step 23) */ + ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr, + ETS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->ets), + "ETS Registers"); + status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]); + if (status) + goto err; + + ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr, + PROBE_DUMP_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->probe_dump), + "Probe Dump"); + ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]); + + ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr, + ROUTING_INDEX_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->routing_regs), + "Routing Regs"); + status = ql_get_routing_index_registers(qdev, + &mpi_coredump->routing_regs[0]); + if (status) + goto err; + + ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr, + MAC_PROTOCOL_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->mac_prot_regs), + "MAC Prot Regs"); + ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]); + + /* Get the semaphore registers for all 5 functions */ + ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr, + SEM_REGS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->sem_regs), "Sem Registers"); + + ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]); + + /* Prevent the mpi restarting while we dump the memory.*/ + ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC); + + /* clear the pause */ + status = ql_unpause_mpi_risc(qdev); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Failed RISC unpause. Status = 0x%.08x\n", status); + goto err; + } + + /* Reset the RISC so we can dump RAM */ + status = ql_hard_reset_mpi_risc(qdev); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Failed RISC reset. Status = 0x%.08x\n", status); + goto err; + } + + ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr, + WCS_RAM_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->code_ram), + "WCS RAM"); + status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0], + CODE_RAM_ADDR, CODE_RAM_CNT); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Failed Dump of CODE RAM. Status = 0x%.08x\n", + status); + goto err; + } + + /* Insert the segment header */ + ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr, + MEMC_RAM_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->memc_ram), + "MEMC RAM"); + status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0], + MEMC_RAM_ADDR, MEMC_RAM_CNT); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Failed Dump of MEMC RAM. Status = 0x%.08x\n", + status); + goto err; + } +err: + ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */ + return status; + +} + +static void ql_get_core_dump(struct ql_adapter *qdev) +{ + if (!ql_own_firmware(qdev)) { + netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n"); + return; + } + + if (!netif_running(qdev->ndev)) { + netif_err(qdev, ifup, qdev->ndev, + "Force Coredump can only be done from interface that is up\n"); + return; + } + ql_queue_fw_error(qdev); +} + +void ql_gen_reg_dump(struct ql_adapter *qdev, + struct ql_reg_dump *mpi_coredump) +{ + int i, status; + + + memset(&(mpi_coredump->mpi_global_header), 0, + sizeof(struct mpi_coredump_global_header)); + mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE; + mpi_coredump->mpi_global_header.headerSize = + sizeof(struct mpi_coredump_global_header); + mpi_coredump->mpi_global_header.imageSize = + sizeof(struct ql_reg_dump); + memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", + sizeof(mpi_coredump->mpi_global_header.idString)); + + + /* segment 16 */ + ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr, + MISC_NIC_INFO_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->misc_nic_info), + "MISC NIC INFO"); + mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count; + mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count; + mpi_coredump->misc_nic_info.intr_count = qdev->intr_count; + mpi_coredump->misc_nic_info.function = qdev->func; + + /* Segment 16, Rev C. Step 18 */ + ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr, + NIC1_CONTROL_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->nic_regs), + "NIC Registers"); + /* Get generic reg dump */ + for (i = 0; i < 64; i++) + mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32)); + + /* Segment 31 */ + /* Get indexed register values. */ + ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr, + INTR_STATES_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->intr_states), + "INTR States"); + ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]); + + ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr, + CAM_ENTRIES_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->cam_entries), + "CAM Entries"); + status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]); + if (status) + return; + + ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr, + ROUTING_WORDS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->nic_routing_words), + "Routing Words"); + status = ql_get_routing_entries(qdev, + &mpi_coredump->nic_routing_words[0]); + if (status) + return; + + /* Segment 34 (Rev C. step 23) */ + ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr, + ETS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->ets), + "ETS Registers"); + status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]); + if (status) + return; +} + +void ql_get_dump(struct ql_adapter *qdev, void *buff) +{ + /* + * If the dump has already been taken and is stored + * in our internal buffer and if force dump is set then + * just start the spool to dump it to the log file + * and also, take a snapshot of the general regs to + * to the user's buffer or else take complete dump + * to the user's buffer if force is not set. + */ + + if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) { + if (!ql_core_dump(qdev, buff)) + ql_soft_reset_mpi_risc(qdev); + else + netif_err(qdev, drv, qdev->ndev, "coredump failed!\n"); + } else { + ql_gen_reg_dump(qdev, buff); + ql_get_core_dump(qdev); + } +} + +/* Coredump to messages log file using separate worker thread */ +void ql_mpi_core_to_log(struct work_struct *work) +{ + struct ql_adapter *qdev = + container_of(work, struct ql_adapter, mpi_core_to_log.work); + u32 *tmp, count; + int i; + + count = sizeof(struct ql_mpi_coredump) / sizeof(u32); + tmp = (u32 *)qdev->mpi_coredump; + netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, + "Core is dumping to log file!\n"); + + for (i = 0; i < count; i += 8) { + pr_err("%.08x: %.08x %.08x %.08x %.08x %.08x " + "%.08x %.08x %.08x\n", i, + tmp[i + 0], + tmp[i + 1], + tmp[i + 2], + tmp[i + 3], + tmp[i + 4], + tmp[i + 5], + tmp[i + 6], + tmp[i + 7]); + msleep(5); + } +} + +#ifdef QL_REG_DUMP +static void ql_dump_intr_states(struct ql_adapter *qdev) +{ + int i; + u32 value; + for (i = 0; i < qdev->intr_count; i++) { + ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask); + value = ql_read32(qdev, INTR_EN); + pr_err("%s: Interrupt %d is %s\n", + qdev->ndev->name, i, + (value & INTR_EN_EN ? "enabled" : "disabled")); + } +} + +#define DUMP_XGMAC(qdev, reg) \ +do { \ + u32 data; \ + ql_read_xgmac_reg(qdev, reg, &data); \ + pr_err("%s: %s = 0x%.08x\n", qdev->ndev->name, #reg, data); \ +} while (0) + +void ql_dump_xgmac_control_regs(struct ql_adapter *qdev) +{ + if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) { + pr_err("%s: Couldn't get xgmac sem\n", __func__); + return; + } + DUMP_XGMAC(qdev, PAUSE_SRC_LO); + DUMP_XGMAC(qdev, PAUSE_SRC_HI); + DUMP_XGMAC(qdev, GLOBAL_CFG); + DUMP_XGMAC(qdev, TX_CFG); + DUMP_XGMAC(qdev, RX_CFG); + DUMP_XGMAC(qdev, FLOW_CTL); + DUMP_XGMAC(qdev, PAUSE_OPCODE); + DUMP_XGMAC(qdev, PAUSE_TIMER); + DUMP_XGMAC(qdev, PAUSE_FRM_DEST_LO); + DUMP_XGMAC(qdev, PAUSE_FRM_DEST_HI); + DUMP_XGMAC(qdev, MAC_TX_PARAMS); + DUMP_XGMAC(qdev, MAC_RX_PARAMS); + DUMP_XGMAC(qdev, MAC_SYS_INT); + DUMP_XGMAC(qdev, MAC_SYS_INT_MASK); + DUMP_XGMAC(qdev, MAC_MGMT_INT); + DUMP_XGMAC(qdev, MAC_MGMT_IN_MASK); + DUMP_XGMAC(qdev, EXT_ARB_MODE); + ql_sem_unlock(qdev, qdev->xg_sem_mask); +} + +static void ql_dump_ets_regs(struct ql_adapter *qdev) +{ +} + +static void ql_dump_cam_entries(struct ql_adapter *qdev) +{ + int i; + u32 value[3]; + + i = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); + if (i) + return; + for (i = 0; i < 4; i++) { + if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) { + pr_err("%s: Failed read of mac index register\n", + __func__); + return; + } else { + if (value[0]) + pr_err("%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x\n", + qdev->ndev->name, i, value[1], value[0], + value[2]); + } + } + for (i = 0; i < 32; i++) { + if (ql_get_mac_addr_reg + (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) { + pr_err("%s: Failed read of mac index register\n", + __func__); + return; + } else { + if (value[0]) + pr_err("%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x\n", + qdev->ndev->name, i, value[1], value[0]); + } + } + ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); +} + +void ql_dump_routing_entries(struct ql_adapter *qdev) +{ + int i; + u32 value; + i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); + if (i) + return; + for (i = 0; i < 16; i++) { + value = 0; + if (ql_get_routing_reg(qdev, i, &value)) { + pr_err("%s: Failed read of routing index register\n", + __func__); + return; + } else { + if (value) + pr_err("%s: Routing Mask %d = 0x%.08x\n", + qdev->ndev->name, i, value); + } + } + ql_sem_unlock(qdev, SEM_RT_IDX_MASK); +} + +#define DUMP_REG(qdev, reg) \ + pr_err("%-32s= 0x%x\n", #reg, ql_read32(qdev, reg)) + +void ql_dump_regs(struct ql_adapter *qdev) +{ + pr_err("reg dump for function #%d\n", qdev->func); + DUMP_REG(qdev, SYS); + DUMP_REG(qdev, RST_FO); + DUMP_REG(qdev, FSC); + DUMP_REG(qdev, CSR); + DUMP_REG(qdev, ICB_RID); + DUMP_REG(qdev, ICB_L); + DUMP_REG(qdev, ICB_H); + DUMP_REG(qdev, CFG); + DUMP_REG(qdev, BIOS_ADDR); + DUMP_REG(qdev, STS); + DUMP_REG(qdev, INTR_EN); + DUMP_REG(qdev, INTR_MASK); + DUMP_REG(qdev, ISR1); + DUMP_REG(qdev, ISR2); + DUMP_REG(qdev, ISR3); + DUMP_REG(qdev, ISR4); + DUMP_REG(qdev, REV_ID); + DUMP_REG(qdev, FRC_ECC_ERR); + DUMP_REG(qdev, ERR_STS); + DUMP_REG(qdev, RAM_DBG_ADDR); + DUMP_REG(qdev, RAM_DBG_DATA); + DUMP_REG(qdev, ECC_ERR_CNT); + DUMP_REG(qdev, SEM); + DUMP_REG(qdev, GPIO_1); + DUMP_REG(qdev, GPIO_2); + DUMP_REG(qdev, GPIO_3); + DUMP_REG(qdev, XGMAC_ADDR); + DUMP_REG(qdev, XGMAC_DATA); + DUMP_REG(qdev, NIC_ETS); + DUMP_REG(qdev, CNA_ETS); + DUMP_REG(qdev, FLASH_ADDR); + DUMP_REG(qdev, FLASH_DATA); + DUMP_REG(qdev, CQ_STOP); + DUMP_REG(qdev, PAGE_TBL_RID); + DUMP_REG(qdev, WQ_PAGE_TBL_LO); + DUMP_REG(qdev, WQ_PAGE_TBL_HI); + DUMP_REG(qdev, CQ_PAGE_TBL_LO); + DUMP_REG(qdev, CQ_PAGE_TBL_HI); + DUMP_REG(qdev, COS_DFLT_CQ1); + DUMP_REG(qdev, COS_DFLT_CQ2); + DUMP_REG(qdev, SPLT_HDR); + DUMP_REG(qdev, FC_PAUSE_THRES); + DUMP_REG(qdev, NIC_PAUSE_THRES); + DUMP_REG(qdev, FC_ETHERTYPE); + DUMP_REG(qdev, FC_RCV_CFG); + DUMP_REG(qdev, NIC_RCV_CFG); + DUMP_REG(qdev, FC_COS_TAGS); + DUMP_REG(qdev, NIC_COS_TAGS); + DUMP_REG(qdev, MGMT_RCV_CFG); + DUMP_REG(qdev, XG_SERDES_ADDR); + DUMP_REG(qdev, XG_SERDES_DATA); + DUMP_REG(qdev, PRB_MX_ADDR); + DUMP_REG(qdev, PRB_MX_DATA); + ql_dump_intr_states(qdev); + ql_dump_xgmac_control_regs(qdev); + ql_dump_ets_regs(qdev); + ql_dump_cam_entries(qdev); + ql_dump_routing_entries(qdev); +} +#endif + +#ifdef QL_STAT_DUMP + +#define DUMP_STAT(qdev, stat) \ + pr_err("%s = %ld\n", #stat, (unsigned long)qdev->nic_stats.stat) + +void ql_dump_stat(struct ql_adapter *qdev) +{ + pr_err("%s: Enter\n", __func__); + DUMP_STAT(qdev, tx_pkts); + DUMP_STAT(qdev, tx_bytes); + DUMP_STAT(qdev, tx_mcast_pkts); + DUMP_STAT(qdev, tx_bcast_pkts); + DUMP_STAT(qdev, tx_ucast_pkts); + DUMP_STAT(qdev, tx_ctl_pkts); + DUMP_STAT(qdev, tx_pause_pkts); + DUMP_STAT(qdev, tx_64_pkt); + DUMP_STAT(qdev, tx_65_to_127_pkt); + DUMP_STAT(qdev, tx_128_to_255_pkt); + DUMP_STAT(qdev, tx_256_511_pkt); + DUMP_STAT(qdev, tx_512_to_1023_pkt); + DUMP_STAT(qdev, tx_1024_to_1518_pkt); + DUMP_STAT(qdev, tx_1519_to_max_pkt); + DUMP_STAT(qdev, tx_undersize_pkt); + DUMP_STAT(qdev, tx_oversize_pkt); + DUMP_STAT(qdev, rx_bytes); + DUMP_STAT(qdev, rx_bytes_ok); + DUMP_STAT(qdev, rx_pkts); + DUMP_STAT(qdev, rx_pkts_ok); + DUMP_STAT(qdev, rx_bcast_pkts); + DUMP_STAT(qdev, rx_mcast_pkts); + DUMP_STAT(qdev, rx_ucast_pkts); + DUMP_STAT(qdev, rx_undersize_pkts); + DUMP_STAT(qdev, rx_oversize_pkts); + DUMP_STAT(qdev, rx_jabber_pkts); + DUMP_STAT(qdev, rx_undersize_fcerr_pkts); + DUMP_STAT(qdev, rx_drop_events); + DUMP_STAT(qdev, rx_fcerr_pkts); + DUMP_STAT(qdev, rx_align_err); + DUMP_STAT(qdev, rx_symbol_err); + DUMP_STAT(qdev, rx_mac_err); + DUMP_STAT(qdev, rx_ctl_pkts); + DUMP_STAT(qdev, rx_pause_pkts); + DUMP_STAT(qdev, rx_64_pkts); + DUMP_STAT(qdev, rx_65_to_127_pkts); + DUMP_STAT(qdev, rx_128_255_pkts); + DUMP_STAT(qdev, rx_256_511_pkts); + DUMP_STAT(qdev, rx_512_to_1023_pkts); + DUMP_STAT(qdev, rx_1024_to_1518_pkts); + DUMP_STAT(qdev, rx_1519_to_max_pkts); + DUMP_STAT(qdev, rx_len_err_pkts); +}; +#endif + +#ifdef QL_DEV_DUMP + +#define DUMP_QDEV_FIELD(qdev, type, field) \ + pr_err("qdev->%-24s = " type "\n", #field, qdev->field) +#define DUMP_QDEV_DMA_FIELD(qdev, field) \ + pr_err("qdev->%-24s = %llx\n", #field, (unsigned long long)qdev->field) +#define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \ + pr_err("%s[%d].%s = " type "\n", \ + #array, index, #field, qdev->array[index].field); +void ql_dump_qdev(struct ql_adapter *qdev) +{ + int i; + DUMP_QDEV_FIELD(qdev, "%lx", flags); + DUMP_QDEV_FIELD(qdev, "%p", vlgrp); + DUMP_QDEV_FIELD(qdev, "%p", pdev); + DUMP_QDEV_FIELD(qdev, "%p", ndev); + DUMP_QDEV_FIELD(qdev, "%d", chip_rev_id); + DUMP_QDEV_FIELD(qdev, "%p", reg_base); + DUMP_QDEV_FIELD(qdev, "%p", doorbell_area); + DUMP_QDEV_FIELD(qdev, "%d", doorbell_area_size); + DUMP_QDEV_FIELD(qdev, "%x", msg_enable); + DUMP_QDEV_FIELD(qdev, "%p", rx_ring_shadow_reg_area); + DUMP_QDEV_DMA_FIELD(qdev, rx_ring_shadow_reg_dma); + DUMP_QDEV_FIELD(qdev, "%p", tx_ring_shadow_reg_area); + DUMP_QDEV_DMA_FIELD(qdev, tx_ring_shadow_reg_dma); + DUMP_QDEV_FIELD(qdev, "%d", intr_count); + if (qdev->msi_x_entry) + for (i = 0; i < qdev->intr_count; i++) { + DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, vector); + DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, entry); + } + for (i = 0; i < qdev->intr_count; i++) { + DUMP_QDEV_ARRAY(qdev, "%p", intr_context, i, qdev); + DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, intr); + DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, hooked); + DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_en_mask); + DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_dis_mask); + DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_read_mask); + } + DUMP_QDEV_FIELD(qdev, "%d", tx_ring_count); + DUMP_QDEV_FIELD(qdev, "%d", rx_ring_count); + DUMP_QDEV_FIELD(qdev, "%d", ring_mem_size); + DUMP_QDEV_FIELD(qdev, "%p", ring_mem); + DUMP_QDEV_FIELD(qdev, "%d", intr_count); + DUMP_QDEV_FIELD(qdev, "%p", tx_ring); + DUMP_QDEV_FIELD(qdev, "%d", rss_ring_count); + DUMP_QDEV_FIELD(qdev, "%p", rx_ring); + DUMP_QDEV_FIELD(qdev, "%d", default_rx_queue); + DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask); + DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up); + DUMP_QDEV_FIELD(qdev, "0x%08x", port_init); +} +#endif + +#ifdef QL_CB_DUMP +void ql_dump_wqicb(struct wqicb *wqicb) +{ + pr_err("Dumping wqicb stuff...\n"); + pr_err("wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len)); + pr_err("wqicb->flags = %x\n", le16_to_cpu(wqicb->flags)); + pr_err("wqicb->cq_id_rss = %d\n", + le16_to_cpu(wqicb->cq_id_rss)); + pr_err("wqicb->rid = 0x%x\n", le16_to_cpu(wqicb->rid)); + pr_err("wqicb->wq_addr = 0x%llx\n", + (unsigned long long) le64_to_cpu(wqicb->addr)); + pr_err("wqicb->wq_cnsmr_idx_addr = 0x%llx\n", + (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr)); +} + +void ql_dump_tx_ring(struct tx_ring *tx_ring) +{ + if (tx_ring == NULL) + return; + pr_err("===================== Dumping tx_ring %d ===============\n", + tx_ring->wq_id); + pr_err("tx_ring->base = %p\n", tx_ring->wq_base); + pr_err("tx_ring->base_dma = 0x%llx\n", + (unsigned long long) tx_ring->wq_base_dma); + pr_err("tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n", + tx_ring->cnsmr_idx_sh_reg, + tx_ring->cnsmr_idx_sh_reg + ? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0); + pr_err("tx_ring->size = %d\n", tx_ring->wq_size); + pr_err("tx_ring->len = %d\n", tx_ring->wq_len); + pr_err("tx_ring->prod_idx_db_reg = %p\n", tx_ring->prod_idx_db_reg); + pr_err("tx_ring->valid_db_reg = %p\n", tx_ring->valid_db_reg); + pr_err("tx_ring->prod_idx = %d\n", tx_ring->prod_idx); + pr_err("tx_ring->cq_id = %d\n", tx_ring->cq_id); + pr_err("tx_ring->wq_id = %d\n", tx_ring->wq_id); + pr_err("tx_ring->q = %p\n", tx_ring->q); + pr_err("tx_ring->tx_count = %d\n", atomic_read(&tx_ring->tx_count)); +} + +void ql_dump_ricb(struct ricb *ricb) +{ + int i; + pr_err("===================== Dumping ricb ===============\n"); + pr_err("Dumping ricb stuff...\n"); + + pr_err("ricb->base_cq = %d\n", ricb->base_cq & 0x1f); + pr_err("ricb->flags = %s%s%s%s%s%s%s%s%s\n", + ricb->base_cq & RSS_L4K ? "RSS_L4K " : "", + ricb->flags & RSS_L6K ? "RSS_L6K " : "", + ricb->flags & RSS_LI ? "RSS_LI " : "", + ricb->flags & RSS_LB ? "RSS_LB " : "", + ricb->flags & RSS_LM ? "RSS_LM " : "", + ricb->flags & RSS_RI4 ? "RSS_RI4 " : "", + ricb->flags & RSS_RT4 ? "RSS_RT4 " : "", + ricb->flags & RSS_RI6 ? "RSS_RI6 " : "", + ricb->flags & RSS_RT6 ? "RSS_RT6 " : ""); + pr_err("ricb->mask = 0x%.04x\n", le16_to_cpu(ricb->mask)); + for (i = 0; i < 16; i++) + pr_err("ricb->hash_cq_id[%d] = 0x%.08x\n", i, + le32_to_cpu(ricb->hash_cq_id[i])); + for (i = 0; i < 10; i++) + pr_err("ricb->ipv6_hash_key[%d] = 0x%.08x\n", i, + le32_to_cpu(ricb->ipv6_hash_key[i])); + for (i = 0; i < 4; i++) + pr_err("ricb->ipv4_hash_key[%d] = 0x%.08x\n", i, + le32_to_cpu(ricb->ipv4_hash_key[i])); +} + +void ql_dump_cqicb(struct cqicb *cqicb) +{ + pr_err("Dumping cqicb stuff...\n"); + + pr_err("cqicb->msix_vect = %d\n", cqicb->msix_vect); + pr_err("cqicb->flags = %x\n", cqicb->flags); + pr_err("cqicb->len = %d\n", le16_to_cpu(cqicb->len)); + pr_err("cqicb->addr = 0x%llx\n", + (unsigned long long) le64_to_cpu(cqicb->addr)); + pr_err("cqicb->prod_idx_addr = 0x%llx\n", + (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr)); + pr_err("cqicb->pkt_delay = 0x%.04x\n", + le16_to_cpu(cqicb->pkt_delay)); + pr_err("cqicb->irq_delay = 0x%.04x\n", + le16_to_cpu(cqicb->irq_delay)); + pr_err("cqicb->lbq_addr = 0x%llx\n", + (unsigned long long) le64_to_cpu(cqicb->lbq_addr)); + pr_err("cqicb->lbq_buf_size = 0x%.04x\n", + le16_to_cpu(cqicb->lbq_buf_size)); + pr_err("cqicb->lbq_len = 0x%.04x\n", + le16_to_cpu(cqicb->lbq_len)); + pr_err("cqicb->sbq_addr = 0x%llx\n", + (unsigned long long) le64_to_cpu(cqicb->sbq_addr)); + pr_err("cqicb->sbq_buf_size = 0x%.04x\n", + le16_to_cpu(cqicb->sbq_buf_size)); + pr_err("cqicb->sbq_len = 0x%.04x\n", + le16_to_cpu(cqicb->sbq_len)); +} + +void ql_dump_rx_ring(struct rx_ring *rx_ring) +{ + if (rx_ring == NULL) + return; + pr_err("===================== Dumping rx_ring %d ===============\n", + rx_ring->cq_id); + pr_err("Dumping rx_ring %d, type = %s%s%s\n", + rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "", + rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "", + rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : ""); + pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb); + pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base); + pr_err("rx_ring->cq_base_dma = %llx\n", + (unsigned long long) rx_ring->cq_base_dma); + pr_err("rx_ring->cq_size = %d\n", rx_ring->cq_size); + pr_err("rx_ring->cq_len = %d\n", rx_ring->cq_len); + pr_err("rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n", + rx_ring->prod_idx_sh_reg, + rx_ring->prod_idx_sh_reg + ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0); + pr_err("rx_ring->prod_idx_sh_reg_dma = %llx\n", + (unsigned long long) rx_ring->prod_idx_sh_reg_dma); + pr_err("rx_ring->cnsmr_idx_db_reg = %p\n", + rx_ring->cnsmr_idx_db_reg); + pr_err("rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx); + pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry); + pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg); + + pr_err("rx_ring->lbq_base = %p\n", rx_ring->lbq_base); + pr_err("rx_ring->lbq_base_dma = %llx\n", + (unsigned long long) rx_ring->lbq_base_dma); + pr_err("rx_ring->lbq_base_indirect = %p\n", + rx_ring->lbq_base_indirect); + pr_err("rx_ring->lbq_base_indirect_dma = %llx\n", + (unsigned long long) rx_ring->lbq_base_indirect_dma); + pr_err("rx_ring->lbq = %p\n", rx_ring->lbq); + pr_err("rx_ring->lbq_len = %d\n", rx_ring->lbq_len); + pr_err("rx_ring->lbq_size = %d\n", rx_ring->lbq_size); + pr_err("rx_ring->lbq_prod_idx_db_reg = %p\n", + rx_ring->lbq_prod_idx_db_reg); + pr_err("rx_ring->lbq_prod_idx = %d\n", rx_ring->lbq_prod_idx); + pr_err("rx_ring->lbq_curr_idx = %d\n", rx_ring->lbq_curr_idx); + pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx); + pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt); + pr_err("rx_ring->lbq_buf_size = %d\n", rx_ring->lbq_buf_size); + + pr_err("rx_ring->sbq_base = %p\n", rx_ring->sbq_base); + pr_err("rx_ring->sbq_base_dma = %llx\n", + (unsigned long long) rx_ring->sbq_base_dma); + pr_err("rx_ring->sbq_base_indirect = %p\n", + rx_ring->sbq_base_indirect); + pr_err("rx_ring->sbq_base_indirect_dma = %llx\n", + (unsigned long long) rx_ring->sbq_base_indirect_dma); + pr_err("rx_ring->sbq = %p\n", rx_ring->sbq); + pr_err("rx_ring->sbq_len = %d\n", rx_ring->sbq_len); + pr_err("rx_ring->sbq_size = %d\n", rx_ring->sbq_size); + pr_err("rx_ring->sbq_prod_idx_db_reg addr = %p\n", + rx_ring->sbq_prod_idx_db_reg); + pr_err("rx_ring->sbq_prod_idx = %d\n", rx_ring->sbq_prod_idx); + pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx); + pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx); + pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt); + pr_err("rx_ring->sbq_buf_size = %d\n", rx_ring->sbq_buf_size); + pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id); + pr_err("rx_ring->irq = %d\n", rx_ring->irq); + pr_err("rx_ring->cpu = %d\n", rx_ring->cpu); + pr_err("rx_ring->qdev = %p\n", rx_ring->qdev); +} + +void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id) +{ + void *ptr; + + pr_err("%s: Enter\n", __func__); + + ptr = kmalloc(size, GFP_ATOMIC); + if (ptr == NULL) { + pr_err("%s: Couldn't allocate a buffer\n", __func__); + return; + } + + if (ql_write_cfg(qdev, ptr, size, bit, q_id)) { + pr_err("%s: Failed to upload control block!\n", __func__); + goto fail_it; + } + switch (bit) { + case CFG_DRQ: + ql_dump_wqicb((struct wqicb *)ptr); + break; + case CFG_DCQ: + ql_dump_cqicb((struct cqicb *)ptr); + break; + case CFG_DR: + ql_dump_ricb((struct ricb *)ptr); + break; + default: + pr_err("%s: Invalid bit value = %x\n", __func__, bit); + break; + } +fail_it: + kfree(ptr); +} +#endif + +#ifdef QL_OB_DUMP +void ql_dump_tx_desc(struct tx_buf_desc *tbd) +{ + pr_err("tbd->addr = 0x%llx\n", + le64_to_cpu((u64) tbd->addr)); + pr_err("tbd->len = %d\n", + le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); + pr_err("tbd->flags = %s %s\n", + tbd->len & TX_DESC_C ? "C" : ".", + tbd->len & TX_DESC_E ? "E" : "."); + tbd++; + pr_err("tbd->addr = 0x%llx\n", + le64_to_cpu((u64) tbd->addr)); + pr_err("tbd->len = %d\n", + le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); + pr_err("tbd->flags = %s %s\n", + tbd->len & TX_DESC_C ? "C" : ".", + tbd->len & TX_DESC_E ? "E" : "."); + tbd++; + pr_err("tbd->addr = 0x%llx\n", + le64_to_cpu((u64) tbd->addr)); + pr_err("tbd->len = %d\n", + le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); + pr_err("tbd->flags = %s %s\n", + tbd->len & TX_DESC_C ? "C" : ".", + tbd->len & TX_DESC_E ? "E" : "."); + +} + +void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb) +{ + struct ob_mac_tso_iocb_req *ob_mac_tso_iocb = + (struct ob_mac_tso_iocb_req *)ob_mac_iocb; + struct tx_buf_desc *tbd; + u16 frame_len; + + pr_err("%s\n", __func__); + pr_err("opcode = %s\n", + (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO"); + pr_err("flags1 = %s %s %s %s %s\n", + ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "", + ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "", + ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "", + ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "", + ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : ""); + pr_err("flags2 = %s %s %s\n", + ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "", + ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "", + ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : ""); + pr_err("flags3 = %s %s %s\n", + ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "", + ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "", + ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : ""); + pr_err("tid = %x\n", ob_mac_iocb->tid); + pr_err("txq_idx = %d\n", ob_mac_iocb->txq_idx); + pr_err("vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci); + if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) { + pr_err("frame_len = %d\n", + le32_to_cpu(ob_mac_tso_iocb->frame_len)); + pr_err("mss = %d\n", + le16_to_cpu(ob_mac_tso_iocb->mss)); + pr_err("prot_hdr_len = %d\n", + le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len)); + pr_err("hdr_offset = 0x%.04x\n", + le16_to_cpu(ob_mac_tso_iocb->net_trans_offset)); + frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len); + } else { + pr_err("frame_len = %d\n", + le16_to_cpu(ob_mac_iocb->frame_len)); + frame_len = le16_to_cpu(ob_mac_iocb->frame_len); + } + tbd = &ob_mac_iocb->tbd[0]; + ql_dump_tx_desc(tbd); +} + +void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp) +{ + pr_err("%s\n", __func__); + pr_err("opcode = %d\n", ob_mac_rsp->opcode); + pr_err("flags = %s %s %s %s %s %s %s\n", + ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".", + ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".", + ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".", + ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".", + ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".", + ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".", + ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : "."); + pr_err("tid = %x\n", ob_mac_rsp->tid); +} +#endif + +#ifdef QL_IB_DUMP +void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp) +{ + pr_err("%s\n", __func__); + pr_err("opcode = 0x%x\n", ib_mac_rsp->opcode); + pr_err("flags1 = %s%s%s%s%s%s\n", + ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "", + ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "", + ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "", + ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "", + ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "", + ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : ""); + + if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) + pr_err("%s%s%s Multicast\n", + (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == + IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "", + (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == + IB_MAC_IOCB_RSP_M_REG ? "Registered" : "", + (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == + IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); + + pr_err("flags2 = %s%s%s%s%s\n", + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "", + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "", + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "", + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "", + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : ""); + + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) + pr_err("%s%s%s%s%s error\n", + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == + IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "", + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == + IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "", + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == + IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "", + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == + IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "", + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == + IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : ""); + + pr_err("flags3 = %s%s\n", + ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "", + ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : ""); + + if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) + pr_err("RSS flags = %s%s%s%s\n", + ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == + IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "", + ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == + IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "", + ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == + IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "", + ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == + IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : ""); + + pr_err("data_len = %d\n", + le32_to_cpu(ib_mac_rsp->data_len)); + pr_err("data_addr = 0x%llx\n", + (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr)); + if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) + pr_err("rss = %x\n", + le32_to_cpu(ib_mac_rsp->rss)); + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) + pr_err("vlan_id = %x\n", + le16_to_cpu(ib_mac_rsp->vlan_id)); + + pr_err("flags4 = %s%s%s\n", + ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "", + ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "", + ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : ""); + + if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { + pr_err("hdr length = %d\n", + le32_to_cpu(ib_mac_rsp->hdr_len)); + pr_err("hdr addr = 0x%llx\n", + (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr)); + } +} +#endif + +#ifdef QL_ALL_DUMP +void ql_dump_all(struct ql_adapter *qdev) +{ + int i; + + QL_DUMP_REGS(qdev); + QL_DUMP_QDEV(qdev); + for (i = 0; i < qdev->tx_ring_count; i++) { + QL_DUMP_TX_RING(&qdev->tx_ring[i]); + QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]); + } + for (i = 0; i < qdev->rx_ring_count; i++) { + QL_DUMP_RX_RING(&qdev->rx_ring[i]); + QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]); + } +} +#endif diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c new file mode 100644 index 0000000..9b67bfe --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c @@ -0,0 +1,688 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#include "qlge.h" + +static const char ql_gstrings_test[][ETH_GSTRING_LEN] = { + "Loopback test (offline)" +}; +#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN) + +static int ql_update_ring_coalescing(struct ql_adapter *qdev) +{ + int i, status = 0; + struct rx_ring *rx_ring; + struct cqicb *cqicb; + + if (!netif_running(qdev->ndev)) + return status; + + /* Skip the default queue, and update the outbound handler + * queues if they changed. + */ + cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_count]; + if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs || + le16_to_cpu(cqicb->pkt_delay) != + qdev->tx_max_coalesced_frames) { + for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) { + rx_ring = &qdev->rx_ring[i]; + cqicb = (struct cqicb *)rx_ring; + cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs); + cqicb->pkt_delay = + cpu_to_le16(qdev->tx_max_coalesced_frames); + cqicb->flags = FLAGS_LI; + status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb), + CFG_LCQ, rx_ring->cq_id); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to load CQICB.\n"); + goto exit; + } + } + } + + /* Update the inbound (RSS) handler queues if they changed. */ + cqicb = (struct cqicb *)&qdev->rx_ring[0]; + if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs || + le16_to_cpu(cqicb->pkt_delay) != + qdev->rx_max_coalesced_frames) { + for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) { + rx_ring = &qdev->rx_ring[i]; + cqicb = (struct cqicb *)rx_ring; + cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs); + cqicb->pkt_delay = + cpu_to_le16(qdev->rx_max_coalesced_frames); + cqicb->flags = FLAGS_LI; + status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb), + CFG_LCQ, rx_ring->cq_id); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to load CQICB.\n"); + goto exit; + } + } + } +exit: + return status; +} + +static void ql_update_stats(struct ql_adapter *qdev) +{ + u32 i; + u64 data; + u64 *iter = &qdev->nic_stats.tx_pkts; + + spin_lock(&qdev->stats_lock); + if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) { + netif_err(qdev, drv, qdev->ndev, + "Couldn't get xgmac sem.\n"); + goto quit; + } + /* + * Get TX statistics. + */ + for (i = 0x200; i < 0x280; i += 8) { + if (ql_read_xgmac_reg64(qdev, i, &data)) { + netif_err(qdev, drv, qdev->ndev, + "Error reading status register 0x%.04x.\n", + i); + goto end; + } else + *iter = data; + iter++; + } + + /* + * Get RX statistics. + */ + for (i = 0x300; i < 0x3d0; i += 8) { + if (ql_read_xgmac_reg64(qdev, i, &data)) { + netif_err(qdev, drv, qdev->ndev, + "Error reading status register 0x%.04x.\n", + i); + goto end; + } else + *iter = data; + iter++; + } + + /* + * Get Per-priority TX pause frame counter statistics. + */ + for (i = 0x500; i < 0x540; i += 8) { + if (ql_read_xgmac_reg64(qdev, i, &data)) { + netif_err(qdev, drv, qdev->ndev, + "Error reading status register 0x%.04x.\n", + i); + goto end; + } else + *iter = data; + iter++; + } + + /* + * Get Per-priority RX pause frame counter statistics. + */ + for (i = 0x568; i < 0x5a8; i += 8) { + if (ql_read_xgmac_reg64(qdev, i, &data)) { + netif_err(qdev, drv, qdev->ndev, + "Error reading status register 0x%.04x.\n", + i); + goto end; + } else + *iter = data; + iter++; + } + + /* + * Get RX NIC FIFO DROP statistics. + */ + if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) { + netif_err(qdev, drv, qdev->ndev, + "Error reading status register 0x%.04x.\n", i); + goto end; + } else + *iter = data; +end: + ql_sem_unlock(qdev, qdev->xg_sem_mask); +quit: + spin_unlock(&qdev->stats_lock); + + QL_DUMP_STAT(qdev); +} + +static char ql_stats_str_arr[][ETH_GSTRING_LEN] = { + {"tx_pkts"}, + {"tx_bytes"}, + {"tx_mcast_pkts"}, + {"tx_bcast_pkts"}, + {"tx_ucast_pkts"}, + {"tx_ctl_pkts"}, + {"tx_pause_pkts"}, + {"tx_64_pkts"}, + {"tx_65_to_127_pkts"}, + {"tx_128_to_255_pkts"}, + {"tx_256_511_pkts"}, + {"tx_512_to_1023_pkts"}, + {"tx_1024_to_1518_pkts"}, + {"tx_1519_to_max_pkts"}, + {"tx_undersize_pkts"}, + {"tx_oversize_pkts"}, + {"rx_bytes"}, + {"rx_bytes_ok"}, + {"rx_pkts"}, + {"rx_pkts_ok"}, + {"rx_bcast_pkts"}, + {"rx_mcast_pkts"}, + {"rx_ucast_pkts"}, + {"rx_undersize_pkts"}, + {"rx_oversize_pkts"}, + {"rx_jabber_pkts"}, + {"rx_undersize_fcerr_pkts"}, + {"rx_drop_events"}, + {"rx_fcerr_pkts"}, + {"rx_align_err"}, + {"rx_symbol_err"}, + {"rx_mac_err"}, + {"rx_ctl_pkts"}, + {"rx_pause_pkts"}, + {"rx_64_pkts"}, + {"rx_65_to_127_pkts"}, + {"rx_128_255_pkts"}, + {"rx_256_511_pkts"}, + {"rx_512_to_1023_pkts"}, + {"rx_1024_to_1518_pkts"}, + {"rx_1519_to_max_pkts"}, + {"rx_len_err_pkts"}, + {"tx_cbfc_pause_frames0"}, + {"tx_cbfc_pause_frames1"}, + {"tx_cbfc_pause_frames2"}, + {"tx_cbfc_pause_frames3"}, + {"tx_cbfc_pause_frames4"}, + {"tx_cbfc_pause_frames5"}, + {"tx_cbfc_pause_frames6"}, + {"tx_cbfc_pause_frames7"}, + {"rx_cbfc_pause_frames0"}, + {"rx_cbfc_pause_frames1"}, + {"rx_cbfc_pause_frames2"}, + {"rx_cbfc_pause_frames3"}, + {"rx_cbfc_pause_frames4"}, + {"rx_cbfc_pause_frames5"}, + {"rx_cbfc_pause_frames6"}, + {"rx_cbfc_pause_frames7"}, + {"rx_nic_fifo_drop"}, +}; + +static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(buf, ql_stats_str_arr, sizeof(ql_stats_str_arr)); + break; + } +} + +static int ql_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return QLGE_TEST_LEN; + case ETH_SS_STATS: + return ARRAY_SIZE(ql_stats_str_arr); + default: + return -EOPNOTSUPP; + } +} + +static void +ql_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *stats, u64 *data) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + struct nic_stats *s = &qdev->nic_stats; + + ql_update_stats(qdev); + + *data++ = s->tx_pkts; + *data++ = s->tx_bytes; + *data++ = s->tx_mcast_pkts; + *data++ = s->tx_bcast_pkts; + *data++ = s->tx_ucast_pkts; + *data++ = s->tx_ctl_pkts; + *data++ = s->tx_pause_pkts; + *data++ = s->tx_64_pkt; + *data++ = s->tx_65_to_127_pkt; + *data++ = s->tx_128_to_255_pkt; + *data++ = s->tx_256_511_pkt; + *data++ = s->tx_512_to_1023_pkt; + *data++ = s->tx_1024_to_1518_pkt; + *data++ = s->tx_1519_to_max_pkt; + *data++ = s->tx_undersize_pkt; + *data++ = s->tx_oversize_pkt; + *data++ = s->rx_bytes; + *data++ = s->rx_bytes_ok; + *data++ = s->rx_pkts; + *data++ = s->rx_pkts_ok; + *data++ = s->rx_bcast_pkts; + *data++ = s->rx_mcast_pkts; + *data++ = s->rx_ucast_pkts; + *data++ = s->rx_undersize_pkts; + *data++ = s->rx_oversize_pkts; + *data++ = s->rx_jabber_pkts; + *data++ = s->rx_undersize_fcerr_pkts; + *data++ = s->rx_drop_events; + *data++ = s->rx_fcerr_pkts; + *data++ = s->rx_align_err; + *data++ = s->rx_symbol_err; + *data++ = s->rx_mac_err; + *data++ = s->rx_ctl_pkts; + *data++ = s->rx_pause_pkts; + *data++ = s->rx_64_pkts; + *data++ = s->rx_65_to_127_pkts; + *data++ = s->rx_128_255_pkts; + *data++ = s->rx_256_511_pkts; + *data++ = s->rx_512_to_1023_pkts; + *data++ = s->rx_1024_to_1518_pkts; + *data++ = s->rx_1519_to_max_pkts; + *data++ = s->rx_len_err_pkts; + *data++ = s->tx_cbfc_pause_frames0; + *data++ = s->tx_cbfc_pause_frames1; + *data++ = s->tx_cbfc_pause_frames2; + *data++ = s->tx_cbfc_pause_frames3; + *data++ = s->tx_cbfc_pause_frames4; + *data++ = s->tx_cbfc_pause_frames5; + *data++ = s->tx_cbfc_pause_frames6; + *data++ = s->tx_cbfc_pause_frames7; + *data++ = s->rx_cbfc_pause_frames0; + *data++ = s->rx_cbfc_pause_frames1; + *data++ = s->rx_cbfc_pause_frames2; + *data++ = s->rx_cbfc_pause_frames3; + *data++ = s->rx_cbfc_pause_frames4; + *data++ = s->rx_cbfc_pause_frames5; + *data++ = s->rx_cbfc_pause_frames6; + *data++ = s->rx_cbfc_pause_frames7; + *data++ = s->rx_nic_fifo_drop; +} + +static int ql_get_settings(struct net_device *ndev, + struct ethtool_cmd *ecmd) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + + ecmd->supported = SUPPORTED_10000baseT_Full; + ecmd->advertising = ADVERTISED_10000baseT_Full; + ecmd->autoneg = AUTONEG_ENABLE; + ecmd->transceiver = XCVR_EXTERNAL; + if ((qdev->link_status & STS_LINK_TYPE_MASK) == + STS_LINK_TYPE_10GBASET) { + ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); + ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); + ecmd->port = PORT_TP; + } else { + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_FIBRE; + } + + ethtool_cmd_speed_set(ecmd, SPEED_10000); + ecmd->duplex = DUPLEX_FULL; + + return 0; +} + +static void ql_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *drvinfo) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + strncpy(drvinfo->driver, qlge_driver_name, 32); + strncpy(drvinfo->version, qlge_driver_version, 32); + snprintf(drvinfo->fw_version, 32, "v%d.%d.%d", + (qdev->fw_rev_id & 0x00ff0000) >> 16, + (qdev->fw_rev_id & 0x0000ff00) >> 8, + (qdev->fw_rev_id & 0x000000ff)); + strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32); + drvinfo->n_stats = 0; + drvinfo->testinfo_len = 0; + if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) + drvinfo->regdump_len = sizeof(struct ql_mpi_coredump); + else + drvinfo->regdump_len = sizeof(struct ql_reg_dump); + drvinfo->eedump_len = 0; +} + +static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + /* What we support. */ + wol->supported = WAKE_MAGIC; + /* What we've currently got set. */ + wol->wolopts = qdev->wol; +} + +static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + int status; + + if (wol->wolopts & ~WAKE_MAGIC) + return -EINVAL; + qdev->wol = wol->wolopts; + + netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol); + if (!qdev->wol) { + u32 wol = 0; + status = ql_mb_wol_mode(qdev, wol); + netif_err(qdev, drv, qdev->ndev, "WOL %s (wol code 0x%x)\n", + status == 0 ? "cleared successfully" : "clear failed", + wol); + } + + return 0; +} + +static int ql_set_phys_id(struct net_device *ndev, + enum ethtool_phys_id_state state) + +{ + struct ql_adapter *qdev = netdev_priv(ndev); + + switch (state) { + case ETHTOOL_ID_ACTIVE: + /* Save the current LED settings */ + if (ql_mb_get_led_cfg(qdev)) + return -EIO; + + /* Start blinking */ + ql_mb_set_led_cfg(qdev, QL_LED_BLINK); + return 0; + + case ETHTOOL_ID_INACTIVE: + /* Restore LED settings */ + if (ql_mb_set_led_cfg(qdev, qdev->led_config)) + return -EIO; + return 0; + + default: + return -EINVAL; + } +} + +static int ql_start_loopback(struct ql_adapter *qdev) +{ + if (netif_carrier_ok(qdev->ndev)) { + set_bit(QL_LB_LINK_UP, &qdev->flags); + netif_carrier_off(qdev->ndev); + } else + clear_bit(QL_LB_LINK_UP, &qdev->flags); + qdev->link_config |= CFG_LOOPBACK_PCS; + return ql_mb_set_port_cfg(qdev); +} + +static void ql_stop_loopback(struct ql_adapter *qdev) +{ + qdev->link_config &= ~CFG_LOOPBACK_PCS; + ql_mb_set_port_cfg(qdev); + if (test_bit(QL_LB_LINK_UP, &qdev->flags)) { + netif_carrier_on(qdev->ndev); + clear_bit(QL_LB_LINK_UP, &qdev->flags); + } +} + +static void ql_create_lb_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size &= ~1; + memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); + memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); + memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); +} + +void ql_check_lb_frame(struct ql_adapter *qdev, + struct sk_buff *skb) +{ + unsigned int frame_size = skb->len; + + if ((*(skb->data + 3) == 0xFF) && + (*(skb->data + frame_size / 2 + 10) == 0xBE) && + (*(skb->data + frame_size / 2 + 12) == 0xAF)) { + atomic_dec(&qdev->lb_count); + return; + } +} + +static int ql_run_loopback_test(struct ql_adapter *qdev) +{ + int i; + netdev_tx_t rc; + struct sk_buff *skb; + unsigned int size = SMALL_BUF_MAP_SIZE; + + for (i = 0; i < 64; i++) { + skb = netdev_alloc_skb(qdev->ndev, size); + if (!skb) + return -ENOMEM; + + skb->queue_mapping = 0; + skb_put(skb, size); + ql_create_lb_frame(skb, size); + rc = ql_lb_send(skb, qdev->ndev); + if (rc != NETDEV_TX_OK) + return -EPIPE; + atomic_inc(&qdev->lb_count); + } + /* Give queue time to settle before testing results. */ + msleep(2); + ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128); + return atomic_read(&qdev->lb_count) ? -EIO : 0; +} + +static int ql_loopback_test(struct ql_adapter *qdev, u64 *data) +{ + *data = ql_start_loopback(qdev); + if (*data) + goto out; + *data = ql_run_loopback_test(qdev); +out: + ql_stop_loopback(qdev); + return *data; +} + +static void ql_self_test(struct net_device *ndev, + struct ethtool_test *eth_test, u64 *data) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + + if (netif_running(ndev)) { + set_bit(QL_SELFTEST, &qdev->flags); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + if (ql_loopback_test(qdev, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + } else { + /* Online tests */ + data[0] = 0; + } + clear_bit(QL_SELFTEST, &qdev->flags); + /* Give link time to come up after + * port configuration changes. + */ + msleep_interruptible(4 * 1000); + } else { + netif_err(qdev, drv, qdev->ndev, + "is down, Loopback test will fail.\n"); + eth_test->flags |= ETH_TEST_FL_FAILED; + } +} + +static int ql_get_regs_len(struct net_device *ndev) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + + if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) + return sizeof(struct ql_mpi_coredump); + else + return sizeof(struct ql_reg_dump); +} + +static void ql_get_regs(struct net_device *ndev, + struct ethtool_regs *regs, void *p) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + + ql_get_dump(qdev, p); + qdev->core_is_dumped = 0; + if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) + regs->len = sizeof(struct ql_mpi_coredump); + else + regs->len = sizeof(struct ql_reg_dump); +} + +static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +{ + struct ql_adapter *qdev = netdev_priv(dev); + + c->rx_coalesce_usecs = qdev->rx_coalesce_usecs; + c->tx_coalesce_usecs = qdev->tx_coalesce_usecs; + + /* This chip coalesces as follows: + * If a packet arrives, hold off interrupts until + * cqicb->int_delay expires, but if no other packets arrive don't + * wait longer than cqicb->pkt_int_delay. But ethtool doesn't use a + * timer to coalesce on a frame basis. So, we have to take ethtool's + * max_coalesced_frames value and convert it to a delay in microseconds. + * We do this by using a basic thoughput of 1,000,000 frames per + * second @ (1024 bytes). This means one frame per usec. So it's a + * simple one to one ratio. + */ + c->rx_max_coalesced_frames = qdev->rx_max_coalesced_frames; + c->tx_max_coalesced_frames = qdev->tx_max_coalesced_frames; + + return 0; +} + +static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + + /* Validate user parameters. */ + if (c->rx_coalesce_usecs > qdev->rx_ring_size / 2) + return -EINVAL; + /* Don't wait more than 10 usec. */ + if (c->rx_max_coalesced_frames > MAX_INTER_FRAME_WAIT) + return -EINVAL; + if (c->tx_coalesce_usecs > qdev->tx_ring_size / 2) + return -EINVAL; + if (c->tx_max_coalesced_frames > MAX_INTER_FRAME_WAIT) + return -EINVAL; + + /* Verify a change took place before updating the hardware. */ + if (qdev->rx_coalesce_usecs == c->rx_coalesce_usecs && + qdev->tx_coalesce_usecs == c->tx_coalesce_usecs && + qdev->rx_max_coalesced_frames == c->rx_max_coalesced_frames && + qdev->tx_max_coalesced_frames == c->tx_max_coalesced_frames) + return 0; + + qdev->rx_coalesce_usecs = c->rx_coalesce_usecs; + qdev->tx_coalesce_usecs = c->tx_coalesce_usecs; + qdev->rx_max_coalesced_frames = c->rx_max_coalesced_frames; + qdev->tx_max_coalesced_frames = c->tx_max_coalesced_frames; + + return ql_update_ring_coalescing(qdev); +} + +static void ql_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ql_adapter *qdev = netdev_priv(netdev); + + ql_mb_get_port_cfg(qdev); + if (qdev->link_config & CFG_PAUSE_STD) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int ql_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ql_adapter *qdev = netdev_priv(netdev); + int status = 0; + + if ((pause->rx_pause) && (pause->tx_pause)) + qdev->link_config |= CFG_PAUSE_STD; + else if (!pause->rx_pause && !pause->tx_pause) + qdev->link_config &= ~CFG_PAUSE_STD; + else + return -EINVAL; + + status = ql_mb_set_port_cfg(qdev); + return status; +} + +static u32 ql_get_msglevel(struct net_device *ndev) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + return qdev->msg_enable; +} + +static void ql_set_msglevel(struct net_device *ndev, u32 value) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + qdev->msg_enable = value; +} + +const struct ethtool_ops qlge_ethtool_ops = { + .get_settings = ql_get_settings, + .get_drvinfo = ql_get_drvinfo, + .get_wol = ql_get_wol, + .set_wol = ql_set_wol, + .get_regs_len = ql_get_regs_len, + .get_regs = ql_get_regs, + .get_msglevel = ql_get_msglevel, + .set_msglevel = ql_set_msglevel, + .get_link = ethtool_op_get_link, + .set_phys_id = ql_set_phys_id, + .self_test = ql_self_test, + .get_pauseparam = ql_get_pauseparam, + .set_pauseparam = ql_set_pauseparam, + .get_coalesce = ql_get_coalesce, + .set_coalesce = ql_set_coalesce, + .get_sset_count = ql_get_sset_count, + .get_strings = ql_get_strings, + .get_ethtool_stats = ql_get_ethtool_stats, +}; + diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c new file mode 100644 index 0000000..f07e96e --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -0,0 +1,4987 @@ +/* + * QLogic qlge NIC HBA Driver + * Copyright (c) 2003-2008 QLogic Corporation + * See LICENSE.qlge for copyright and licensing details. + * Author: Linux qlge network device driver by + * Ron Mercer + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qlge.h" + +char qlge_driver_name[] = DRV_NAME; +const char qlge_driver_version[] = DRV_VERSION; + +MODULE_AUTHOR("Ron Mercer "); +MODULE_DESCRIPTION(DRV_STRING " "); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +static const u32 default_msg = + NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | +/* NETIF_MSG_TIMER | */ + NETIF_MSG_IFDOWN | + NETIF_MSG_IFUP | + NETIF_MSG_RX_ERR | + NETIF_MSG_TX_ERR | +/* NETIF_MSG_TX_QUEUED | */ +/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */ +/* NETIF_MSG_PKTDATA | */ + NETIF_MSG_HW | NETIF_MSG_WOL | 0; + +static int debug = -1; /* defaults above */ +module_param(debug, int, 0664); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +#define MSIX_IRQ 0 +#define MSI_IRQ 1 +#define LEG_IRQ 2 +static int qlge_irq_type = MSIX_IRQ; +module_param(qlge_irq_type, int, 0664); +MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); + +static int qlge_mpi_coredump; +module_param(qlge_mpi_coredump, int, 0); +MODULE_PARM_DESC(qlge_mpi_coredump, + "Option to enable MPI firmware dump. " + "Default is OFF - Do Not allocate memory. "); + +static int qlge_force_coredump; +module_param(qlge_force_coredump, int, 0); +MODULE_PARM_DESC(qlge_force_coredump, + "Option to allow force of firmware core dump. " + "Default is OFF - Do not allow."); + +static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = { + {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)}, + {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)}, + /* required last entry */ + {0,} +}; + +MODULE_DEVICE_TABLE(pci, qlge_pci_tbl); + +static int ql_wol(struct ql_adapter *qdev); +static void qlge_set_multicast_list(struct net_device *ndev); + +/* This hardware semaphore causes exclusive access to + * resources shared between the NIC driver, MPI firmware, + * FCOE firmware and the FC driver. + */ +static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask) +{ + u32 sem_bits = 0; + + switch (sem_mask) { + case SEM_XGMAC0_MASK: + sem_bits = SEM_SET << SEM_XGMAC0_SHIFT; + break; + case SEM_XGMAC1_MASK: + sem_bits = SEM_SET << SEM_XGMAC1_SHIFT; + break; + case SEM_ICB_MASK: + sem_bits = SEM_SET << SEM_ICB_SHIFT; + break; + case SEM_MAC_ADDR_MASK: + sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT; + break; + case SEM_FLASH_MASK: + sem_bits = SEM_SET << SEM_FLASH_SHIFT; + break; + case SEM_PROBE_MASK: + sem_bits = SEM_SET << SEM_PROBE_SHIFT; + break; + case SEM_RT_IDX_MASK: + sem_bits = SEM_SET << SEM_RT_IDX_SHIFT; + break; + case SEM_PROC_REG_MASK: + sem_bits = SEM_SET << SEM_PROC_REG_SHIFT; + break; + default: + netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n"); + return -EINVAL; + } + + ql_write32(qdev, SEM, sem_bits | sem_mask); + return !(ql_read32(qdev, SEM) & sem_bits); +} + +int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask) +{ + unsigned int wait_count = 30; + do { + if (!ql_sem_trylock(qdev, sem_mask)) + return 0; + udelay(100); + } while (--wait_count); + return -ETIMEDOUT; +} + +void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask) +{ + ql_write32(qdev, SEM, sem_mask); + ql_read32(qdev, SEM); /* flush */ +} + +/* This function waits for a specific bit to come ready + * in a given register. It is used mostly by the initialize + * process, but is also used in kernel thread API such as + * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid. + */ +int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit) +{ + u32 temp; + int count = UDELAY_COUNT; + + while (count) { + temp = ql_read32(qdev, reg); + + /* check for errors */ + if (temp & err_bit) { + netif_alert(qdev, probe, qdev->ndev, + "register 0x%.08x access error, value = 0x%.08x!.\n", + reg, temp); + return -EIO; + } else if (temp & bit) + return 0; + udelay(UDELAY_DELAY); + count--; + } + netif_alert(qdev, probe, qdev->ndev, + "Timed out waiting for reg %x to come ready.\n", reg); + return -ETIMEDOUT; +} + +/* The CFG register is used to download TX and RX control blocks + * to the chip. This function waits for an operation to complete. + */ +static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit) +{ + int count = UDELAY_COUNT; + u32 temp; + + while (count) { + temp = ql_read32(qdev, CFG); + if (temp & CFG_LE) + return -EIO; + if (!(temp & bit)) + return 0; + udelay(UDELAY_DELAY); + count--; + } + return -ETIMEDOUT; +} + + +/* Used to issue init control blocks to hw. Maps control block, + * sets address, triggers download, waits for completion. + */ +int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit, + u16 q_id) +{ + u64 map; + int status = 0; + int direction; + u32 mask; + u32 value; + + direction = + (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE : + PCI_DMA_FROMDEVICE; + + map = pci_map_single(qdev->pdev, ptr, size, direction); + if (pci_dma_mapping_error(qdev->pdev, map)) { + netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n"); + return -ENOMEM; + } + + status = ql_sem_spinlock(qdev, SEM_ICB_MASK); + if (status) + return status; + + status = ql_wait_cfg(qdev, bit); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Timed out waiting for CFG to come ready.\n"); + goto exit; + } + + ql_write32(qdev, ICB_L, (u32) map); + ql_write32(qdev, ICB_H, (u32) (map >> 32)); + + mask = CFG_Q_MASK | (bit << 16); + value = bit | (q_id << CFG_Q_SHIFT); + ql_write32(qdev, CFG, (mask | value)); + + /* + * Wait for the bit to clear after signaling hw. + */ + status = ql_wait_cfg(qdev, bit); +exit: + ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */ + pci_unmap_single(qdev->pdev, map, size, direction); + return status; +} + +/* Get a specific MAC address from the CAM. Used for debug and reg dump. */ +int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index, + u32 *value) +{ + u32 offset = 0; + int status; + + switch (type) { + case MAC_ADDR_TYPE_MULTI_MAC: + case MAC_ADDR_TYPE_CAM_MAC: + { + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ + (index << MAC_ADDR_IDX_SHIFT) | /* index */ + MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MR, 0); + if (status) + goto exit; + *value++ = ql_read32(qdev, MAC_ADDR_DATA); + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ + (index << MAC_ADDR_IDX_SHIFT) | /* index */ + MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MR, 0); + if (status) + goto exit; + *value++ = ql_read32(qdev, MAC_ADDR_DATA); + if (type == MAC_ADDR_TYPE_CAM_MAC) { + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ + (index << MAC_ADDR_IDX_SHIFT) | /* index */ + MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ + status = + ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, + MAC_ADDR_MR, 0); + if (status) + goto exit; + *value++ = ql_read32(qdev, MAC_ADDR_DATA); + } + break; + } + case MAC_ADDR_TYPE_VLAN: + case MAC_ADDR_TYPE_MULTI_FLTR: + default: + netif_crit(qdev, ifup, qdev->ndev, + "Address type %d not yet supported.\n", type); + status = -EPERM; + } +exit: + return status; +} + +/* Set up a MAC, multicast or VLAN address for the + * inbound frame matching. + */ +static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type, + u16 index) +{ + u32 offset = 0; + int status = 0; + + switch (type) { + case MAC_ADDR_TYPE_MULTI_MAC: + { + u32 upper = (addr[0] << 8) | addr[1]; + u32 lower = (addr[2] << 24) | (addr[3] << 16) | + (addr[4] << 8) | (addr[5]); + + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + ql_write32(qdev, MAC_ADDR_IDX, (offset++) | + (index << MAC_ADDR_IDX_SHIFT) | + type | MAC_ADDR_E); + ql_write32(qdev, MAC_ADDR_DATA, lower); + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + ql_write32(qdev, MAC_ADDR_IDX, (offset++) | + (index << MAC_ADDR_IDX_SHIFT) | + type | MAC_ADDR_E); + + ql_write32(qdev, MAC_ADDR_DATA, upper); + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + break; + } + case MAC_ADDR_TYPE_CAM_MAC: + { + u32 cam_output; + u32 upper = (addr[0] << 8) | addr[1]; + u32 lower = + (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | + (addr[5]); + + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "Adding %s address %pM at index %d in the CAM.\n", + type == MAC_ADDR_TYPE_MULTI_MAC ? + "MULTICAST" : "UNICAST", + addr, index); + + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ + (index << MAC_ADDR_IDX_SHIFT) | /* index */ + type); /* type */ + ql_write32(qdev, MAC_ADDR_DATA, lower); + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ + (index << MAC_ADDR_IDX_SHIFT) | /* index */ + type); /* type */ + ql_write32(qdev, MAC_ADDR_DATA, upper); + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */ + (index << MAC_ADDR_IDX_SHIFT) | /* index */ + type); /* type */ + /* This field should also include the queue id + and possibly the function id. Right now we hardcode + the route field to NIC core. + */ + cam_output = (CAM_OUT_ROUTE_NIC | + (qdev-> + func << CAM_OUT_FUNC_SHIFT) | + (0 << CAM_OUT_CQ_ID_SHIFT)); + if (qdev->ndev->features & NETIF_F_HW_VLAN_RX) + cam_output |= CAM_OUT_RV; + /* route to NIC core */ + ql_write32(qdev, MAC_ADDR_DATA, cam_output); + break; + } + case MAC_ADDR_TYPE_VLAN: + { + u32 enable_bit = *((u32 *) &addr[0]); + /* For VLAN, the addr actually holds a bit that + * either enables or disables the vlan id we are + * addressing. It's either MAC_ADDR_E on or off. + * That's bit-27 we're talking about. + */ + netif_info(qdev, ifup, qdev->ndev, + "%s VLAN ID %d %s the CAM.\n", + enable_bit ? "Adding" : "Removing", + index, + enable_bit ? "to" : "from"); + + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */ + (index << MAC_ADDR_IDX_SHIFT) | /* index */ + type | /* type */ + enable_bit); /* enable/disable */ + break; + } + case MAC_ADDR_TYPE_MULTI_FLTR: + default: + netif_crit(qdev, ifup, qdev->ndev, + "Address type %d not yet supported.\n", type); + status = -EPERM; + } +exit: + return status; +} + +/* Set or clear MAC address in hardware. We sometimes + * have to clear it to prevent wrong frame routing + * especially in a bonding environment. + */ +static int ql_set_mac_addr(struct ql_adapter *qdev, int set) +{ + int status; + char zero_mac_addr[ETH_ALEN]; + char *addr; + + if (set) { + addr = &qdev->current_mac_addr[0]; + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "Set Mac addr %pM\n", addr); + } else { + memset(zero_mac_addr, 0, ETH_ALEN); + addr = &zero_mac_addr[0]; + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "Clearing MAC address\n"); + } + status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); + if (status) + return status; + status = ql_set_mac_addr_reg(qdev, (u8 *) addr, + MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); + ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); + if (status) + netif_err(qdev, ifup, qdev->ndev, + "Failed to init mac address.\n"); + return status; +} + +void ql_link_on(struct ql_adapter *qdev) +{ + netif_err(qdev, link, qdev->ndev, "Link is up.\n"); + netif_carrier_on(qdev->ndev); + ql_set_mac_addr(qdev, 1); +} + +void ql_link_off(struct ql_adapter *qdev) +{ + netif_err(qdev, link, qdev->ndev, "Link is down.\n"); + netif_carrier_off(qdev->ndev); + ql_set_mac_addr(qdev, 0); +} + +/* Get a specific frame routing value from the CAM. + * Used for debug and reg dump. + */ +int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value) +{ + int status = 0; + + status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0); + if (status) + goto exit; + + ql_write32(qdev, RT_IDX, + RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT)); + status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0); + if (status) + goto exit; + *value = ql_read32(qdev, RT_DATA); +exit: + return status; +} + +/* The NIC function for this chip has 16 routing indexes. Each one can be used + * to route different frame types to various inbound queues. We send broadcast/ + * multicast/error frames to the default queue for slow handling, + * and CAM hit/RSS frames to the fast handling queues. + */ +static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask, + int enable) +{ + int status = -EINVAL; /* Return error if no mask match. */ + u32 value = 0; + + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "%s %s mask %s the routing reg.\n", + enable ? "Adding" : "Removing", + index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" : + index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" : + index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" : + index == RT_IDX_BCAST_SLOT ? "BROADCAST" : + index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" : + index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" : + index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" : + index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" : + index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" : + index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" : + index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" : + index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" : + index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" : + index == RT_IDX_UNUSED013 ? "UNUSED13" : + index == RT_IDX_UNUSED014 ? "UNUSED14" : + index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" : + "(Bad index != RT_IDX)", + enable ? "to" : "from"); + + switch (mask) { + case RT_IDX_CAM_HIT: + { + value = RT_IDX_DST_CAM_Q | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */ + break; + } + case RT_IDX_VALID: /* Promiscuous Mode frames. */ + { + value = RT_IDX_DST_DFLT_Q | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */ + break; + } + case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */ + { + value = RT_IDX_DST_DFLT_Q | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */ + break; + } + case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */ + { + value = RT_IDX_DST_DFLT_Q | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (RT_IDX_IP_CSUM_ERR_SLOT << + RT_IDX_IDX_SHIFT); /* index */ + break; + } + case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */ + { + value = RT_IDX_DST_DFLT_Q | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (RT_IDX_TCP_UDP_CSUM_ERR_SLOT << + RT_IDX_IDX_SHIFT); /* index */ + break; + } + case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */ + { + value = RT_IDX_DST_DFLT_Q | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */ + break; + } + case RT_IDX_MCAST: /* Pass up All Multicast frames. */ + { + value = RT_IDX_DST_DFLT_Q | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */ + break; + } + case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */ + { + value = RT_IDX_DST_DFLT_Q | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */ + break; + } + case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */ + { + value = RT_IDX_DST_RSS | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */ + break; + } + case 0: /* Clear the E-bit on an entry. */ + { + value = RT_IDX_DST_DFLT_Q | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (index << RT_IDX_IDX_SHIFT);/* index */ + break; + } + default: + netif_err(qdev, ifup, qdev->ndev, + "Mask type %d not yet supported.\n", mask); + status = -EPERM; + goto exit; + } + + if (value) { + status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0); + if (status) + goto exit; + value |= (enable ? RT_IDX_E : 0); + ql_write32(qdev, RT_IDX, value); + ql_write32(qdev, RT_DATA, enable ? mask : 0); + } +exit: + return status; +} + +static void ql_enable_interrupts(struct ql_adapter *qdev) +{ + ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI); +} + +static void ql_disable_interrupts(struct ql_adapter *qdev) +{ + ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16)); +} + +/* If we're running with multiple MSI-X vectors then we enable on the fly. + * Otherwise, we may have multiple outstanding workers and don't want to + * enable until the last one finishes. In this case, the irq_cnt gets + * incremented every time we queue a worker and decremented every time + * a worker finishes. Once it hits zero we enable the interrupt. + */ +u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr) +{ + u32 var = 0; + unsigned long hw_flags = 0; + struct intr_context *ctx = qdev->intr_context + intr; + + if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) { + /* Always enable if we're MSIX multi interrupts and + * it's not the default (zeroeth) interrupt. + */ + ql_write32(qdev, INTR_EN, + ctx->intr_en_mask); + var = ql_read32(qdev, STS); + return var; + } + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + if (atomic_dec_and_test(&ctx->irq_cnt)) { + ql_write32(qdev, INTR_EN, + ctx->intr_en_mask); + var = ql_read32(qdev, STS); + } + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return var; +} + +static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr) +{ + u32 var = 0; + struct intr_context *ctx; + + /* HW disables for us if we're MSIX multi interrupts and + * it's not the default (zeroeth) interrupt. + */ + if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) + return 0; + + ctx = qdev->intr_context + intr; + spin_lock(&qdev->hw_lock); + if (!atomic_read(&ctx->irq_cnt)) { + ql_write32(qdev, INTR_EN, + ctx->intr_dis_mask); + var = ql_read32(qdev, STS); + } + atomic_inc(&ctx->irq_cnt); + spin_unlock(&qdev->hw_lock); + return var; +} + +static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev) +{ + int i; + for (i = 0; i < qdev->intr_count; i++) { + /* The enable call does a atomic_dec_and_test + * and enables only if the result is zero. + * So we precharge it here. + */ + if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) || + i == 0)) + atomic_set(&qdev->intr_context[i].irq_cnt, 1); + ql_enable_completion_interrupt(qdev, i); + } + +} + +static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str) +{ + int status, i; + u16 csum = 0; + __le16 *flash = (__le16 *)&qdev->flash; + + status = strncmp((char *)&qdev->flash, str, 4); + if (status) { + netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n"); + return status; + } + + for (i = 0; i < size; i++) + csum += le16_to_cpu(*flash++); + + if (csum) + netif_err(qdev, ifup, qdev->ndev, + "Invalid flash checksum, csum = 0x%.04x.\n", csum); + + return csum; +} + +static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data) +{ + int status = 0; + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, + FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR); + if (status) + goto exit; + /* set up for reg read */ + ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset); + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, + FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR); + if (status) + goto exit; + /* This data is stored on flash as an array of + * __le32. Since ql_read32() returns cpu endian + * we need to swap it back. + */ + *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA)); +exit: + return status; +} + +static int ql_get_8000_flash_params(struct ql_adapter *qdev) +{ + u32 i, size; + int status; + __le32 *p = (__le32 *)&qdev->flash; + u32 offset; + u8 mac_addr[6]; + + /* Get flash offset for function and adjust + * for dword access. + */ + if (!qdev->port) + offset = FUNC0_FLASH_OFFSET / sizeof(u32); + else + offset = FUNC1_FLASH_OFFSET / sizeof(u32); + + if (ql_sem_spinlock(qdev, SEM_FLASH_MASK)) + return -ETIMEDOUT; + + size = sizeof(struct flash_params_8000) / sizeof(u32); + for (i = 0; i < size; i++, p++) { + status = ql_read_flash_word(qdev, i+offset, p); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Error reading flash.\n"); + goto exit; + } + } + + status = ql_validate_flash(qdev, + sizeof(struct flash_params_8000) / sizeof(u16), + "8000"); + if (status) { + netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n"); + status = -EINVAL; + goto exit; + } + + /* Extract either manufacturer or BOFM modified + * MAC address. + */ + if (qdev->flash.flash_params_8000.data_type1 == 2) + memcpy(mac_addr, + qdev->flash.flash_params_8000.mac_addr1, + qdev->ndev->addr_len); + else + memcpy(mac_addr, + qdev->flash.flash_params_8000.mac_addr, + qdev->ndev->addr_len); + + if (!is_valid_ether_addr(mac_addr)) { + netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n"); + status = -EINVAL; + goto exit; + } + + memcpy(qdev->ndev->dev_addr, + mac_addr, + qdev->ndev->addr_len); + +exit: + ql_sem_unlock(qdev, SEM_FLASH_MASK); + return status; +} + +static int ql_get_8012_flash_params(struct ql_adapter *qdev) +{ + int i; + int status; + __le32 *p = (__le32 *)&qdev->flash; + u32 offset = 0; + u32 size = sizeof(struct flash_params_8012) / sizeof(u32); + + /* Second function's parameters follow the first + * function's. + */ + if (qdev->port) + offset = size; + + if (ql_sem_spinlock(qdev, SEM_FLASH_MASK)) + return -ETIMEDOUT; + + for (i = 0; i < size; i++, p++) { + status = ql_read_flash_word(qdev, i+offset, p); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Error reading flash.\n"); + goto exit; + } + + } + + status = ql_validate_flash(qdev, + sizeof(struct flash_params_8012) / sizeof(u16), + "8012"); + if (status) { + netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n"); + status = -EINVAL; + goto exit; + } + + if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) { + status = -EINVAL; + goto exit; + } + + memcpy(qdev->ndev->dev_addr, + qdev->flash.flash_params_8012.mac_addr, + qdev->ndev->addr_len); + +exit: + ql_sem_unlock(qdev, SEM_FLASH_MASK); + return status; +} + +/* xgmac register are located behind the xgmac_addr and xgmac_data + * register pair. Each read/write requires us to wait for the ready + * bit before reading/writing the data. + */ +static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data) +{ + int status; + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, + XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); + if (status) + return status; + /* write the data to the data reg */ + ql_write32(qdev, XGMAC_DATA, data); + /* trigger the write */ + ql_write32(qdev, XGMAC_ADDR, reg); + return status; +} + +/* xgmac register are located behind the xgmac_addr and xgmac_data + * register pair. Each read/write requires us to wait for the ready + * bit before reading/writing the data. + */ +int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data) +{ + int status = 0; + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, + XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); + if (status) + goto exit; + /* set up for reg read */ + ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R); + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, + XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); + if (status) + goto exit; + /* get the data */ + *data = ql_read32(qdev, XGMAC_DATA); +exit: + return status; +} + +/* This is used for reading the 64-bit statistics regs. */ +int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data) +{ + int status = 0; + u32 hi = 0; + u32 lo = 0; + + status = ql_read_xgmac_reg(qdev, reg, &lo); + if (status) + goto exit; + + status = ql_read_xgmac_reg(qdev, reg + 4, &hi); + if (status) + goto exit; + + *data = (u64) lo | ((u64) hi << 32); + +exit: + return status; +} + +static int ql_8000_port_initialize(struct ql_adapter *qdev) +{ + int status; + /* + * Get MPI firmware version for driver banner + * and ethool info. + */ + status = ql_mb_about_fw(qdev); + if (status) + goto exit; + status = ql_mb_get_fw_state(qdev); + if (status) + goto exit; + /* Wake up a worker to get/set the TX/RX frame sizes. */ + queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0); +exit: + return status; +} + +/* Take the MAC Core out of reset. + * Enable statistics counting. + * Take the transmitter/receiver out of reset. + * This functionality may be done in the MPI firmware at a + * later date. + */ +static int ql_8012_port_initialize(struct ql_adapter *qdev) +{ + int status = 0; + u32 data; + + if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) { + /* Another function has the semaphore, so + * wait for the port init bit to come ready. + */ + netif_info(qdev, link, qdev->ndev, + "Another function has the semaphore, so wait for the port init bit to come ready.\n"); + status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0); + if (status) { + netif_crit(qdev, link, qdev->ndev, + "Port initialize timed out.\n"); + } + return status; + } + + netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n"); + /* Set the core reset. */ + status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data); + if (status) + goto end; + data |= GLOBAL_CFG_RESET; + status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data); + if (status) + goto end; + + /* Clear the core reset and turn on jumbo for receiver. */ + data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */ + data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */ + data |= GLOBAL_CFG_TX_STAT_EN; + data |= GLOBAL_CFG_RX_STAT_EN; + status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data); + if (status) + goto end; + + /* Enable transmitter, and clear it's reset. */ + status = ql_read_xgmac_reg(qdev, TX_CFG, &data); + if (status) + goto end; + data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */ + data |= TX_CFG_EN; /* Enable the transmitter. */ + status = ql_write_xgmac_reg(qdev, TX_CFG, data); + if (status) + goto end; + + /* Enable receiver and clear it's reset. */ + status = ql_read_xgmac_reg(qdev, RX_CFG, &data); + if (status) + goto end; + data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */ + data |= RX_CFG_EN; /* Enable the receiver. */ + status = ql_write_xgmac_reg(qdev, RX_CFG, data); + if (status) + goto end; + + /* Turn on jumbo. */ + status = + ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16)); + if (status) + goto end; + status = + ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580); + if (status) + goto end; + + /* Signal to the world that the port is enabled. */ + ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init)); +end: + ql_sem_unlock(qdev, qdev->xg_sem_mask); + return status; +} + +static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev) +{ + return PAGE_SIZE << qdev->lbq_buf_order; +} + +/* Get the next large buffer. */ +static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) +{ + struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx]; + rx_ring->lbq_curr_idx++; + if (rx_ring->lbq_curr_idx == rx_ring->lbq_len) + rx_ring->lbq_curr_idx = 0; + rx_ring->lbq_free_cnt++; + return lbq_desc; +} + +static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev, + struct rx_ring *rx_ring) +{ + struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring); + + pci_dma_sync_single_for_cpu(qdev->pdev, + dma_unmap_addr(lbq_desc, mapaddr), + rx_ring->lbq_buf_size, + PCI_DMA_FROMDEVICE); + + /* If it's the last chunk of our master page then + * we unmap it. + */ + if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size) + == ql_lbq_block_size(qdev)) + pci_unmap_page(qdev->pdev, + lbq_desc->p.pg_chunk.map, + ql_lbq_block_size(qdev), + PCI_DMA_FROMDEVICE); + return lbq_desc; +} + +/* Get the next small buffer. */ +static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring) +{ + struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx]; + rx_ring->sbq_curr_idx++; + if (rx_ring->sbq_curr_idx == rx_ring->sbq_len) + rx_ring->sbq_curr_idx = 0; + rx_ring->sbq_free_cnt++; + return sbq_desc; +} + +/* Update an rx ring index. */ +static void ql_update_cq(struct rx_ring *rx_ring) +{ + rx_ring->cnsmr_idx++; + rx_ring->curr_entry++; + if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) { + rx_ring->cnsmr_idx = 0; + rx_ring->curr_entry = rx_ring->cq_base; + } +} + +static void ql_write_cq_idx(struct rx_ring *rx_ring) +{ + ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg); +} + +static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring, + struct bq_desc *lbq_desc) +{ + if (!rx_ring->pg_chunk.page) { + u64 map; + rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP | + GFP_ATOMIC, + qdev->lbq_buf_order); + if (unlikely(!rx_ring->pg_chunk.page)) { + netif_err(qdev, drv, qdev->ndev, + "page allocation failed.\n"); + return -ENOMEM; + } + rx_ring->pg_chunk.offset = 0; + map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page, + 0, ql_lbq_block_size(qdev), + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(qdev->pdev, map)) { + __free_pages(rx_ring->pg_chunk.page, + qdev->lbq_buf_order); + netif_err(qdev, drv, qdev->ndev, + "PCI mapping failed.\n"); + return -ENOMEM; + } + rx_ring->pg_chunk.map = map; + rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page); + } + + /* Copy the current master pg_chunk info + * to the current descriptor. + */ + lbq_desc->p.pg_chunk = rx_ring->pg_chunk; + + /* Adjust the master page chunk for next + * buffer get. + */ + rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size; + if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) { + rx_ring->pg_chunk.page = NULL; + lbq_desc->p.pg_chunk.last_flag = 1; + } else { + rx_ring->pg_chunk.va += rx_ring->lbq_buf_size; + get_page(rx_ring->pg_chunk.page); + lbq_desc->p.pg_chunk.last_flag = 0; + } + return 0; +} +/* Process (refill) a large buffer queue. */ +static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) +{ + u32 clean_idx = rx_ring->lbq_clean_idx; + u32 start_idx = clean_idx; + struct bq_desc *lbq_desc; + u64 map; + int i; + + while (rx_ring->lbq_free_cnt > 32) { + for (i = 0; i < 16; i++) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "lbq: try cleaning clean_idx = %d.\n", + clean_idx); + lbq_desc = &rx_ring->lbq[clean_idx]; + if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) { + netif_err(qdev, ifup, qdev->ndev, + "Could not get a page chunk.\n"); + return; + } + + map = lbq_desc->p.pg_chunk.map + + lbq_desc->p.pg_chunk.offset; + dma_unmap_addr_set(lbq_desc, mapaddr, map); + dma_unmap_len_set(lbq_desc, maplen, + rx_ring->lbq_buf_size); + *lbq_desc->addr = cpu_to_le64(map); + + pci_dma_sync_single_for_device(qdev->pdev, map, + rx_ring->lbq_buf_size, + PCI_DMA_FROMDEVICE); + clean_idx++; + if (clean_idx == rx_ring->lbq_len) + clean_idx = 0; + } + + rx_ring->lbq_clean_idx = clean_idx; + rx_ring->lbq_prod_idx += 16; + if (rx_ring->lbq_prod_idx == rx_ring->lbq_len) + rx_ring->lbq_prod_idx = 0; + rx_ring->lbq_free_cnt -= 16; + } + + if (start_idx != clean_idx) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "lbq: updating prod idx = %d.\n", + rx_ring->lbq_prod_idx); + ql_write_db_reg(rx_ring->lbq_prod_idx, + rx_ring->lbq_prod_idx_db_reg); + } +} + +/* Process (refill) a small buffer queue. */ +static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) +{ + u32 clean_idx = rx_ring->sbq_clean_idx; + u32 start_idx = clean_idx; + struct bq_desc *sbq_desc; + u64 map; + int i; + + while (rx_ring->sbq_free_cnt > 16) { + for (i = 0; i < 16; i++) { + sbq_desc = &rx_ring->sbq[clean_idx]; + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "sbq: try cleaning clean_idx = %d.\n", + clean_idx); + if (sbq_desc->p.skb == NULL) { + netif_printk(qdev, rx_status, KERN_DEBUG, + qdev->ndev, + "sbq: getting new skb for index %d.\n", + sbq_desc->index); + sbq_desc->p.skb = + netdev_alloc_skb(qdev->ndev, + SMALL_BUFFER_SIZE); + if (sbq_desc->p.skb == NULL) { + netif_err(qdev, probe, qdev->ndev, + "Couldn't get an skb.\n"); + rx_ring->sbq_clean_idx = clean_idx; + return; + } + skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD); + map = pci_map_single(qdev->pdev, + sbq_desc->p.skb->data, + rx_ring->sbq_buf_size, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(qdev->pdev, map)) { + netif_err(qdev, ifup, qdev->ndev, + "PCI mapping failed.\n"); + rx_ring->sbq_clean_idx = clean_idx; + dev_kfree_skb_any(sbq_desc->p.skb); + sbq_desc->p.skb = NULL; + return; + } + dma_unmap_addr_set(sbq_desc, mapaddr, map); + dma_unmap_len_set(sbq_desc, maplen, + rx_ring->sbq_buf_size); + *sbq_desc->addr = cpu_to_le64(map); + } + + clean_idx++; + if (clean_idx == rx_ring->sbq_len) + clean_idx = 0; + } + rx_ring->sbq_clean_idx = clean_idx; + rx_ring->sbq_prod_idx += 16; + if (rx_ring->sbq_prod_idx == rx_ring->sbq_len) + rx_ring->sbq_prod_idx = 0; + rx_ring->sbq_free_cnt -= 16; + } + + if (start_idx != clean_idx) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "sbq: updating prod idx = %d.\n", + rx_ring->sbq_prod_idx); + ql_write_db_reg(rx_ring->sbq_prod_idx, + rx_ring->sbq_prod_idx_db_reg); + } +} + +static void ql_update_buffer_queues(struct ql_adapter *qdev, + struct rx_ring *rx_ring) +{ + ql_update_sbq(qdev, rx_ring); + ql_update_lbq(qdev, rx_ring); +} + +/* Unmaps tx buffers. Can be called from send() if a pci mapping + * fails at some stage, or from the interrupt when a tx completes. + */ +static void ql_unmap_send(struct ql_adapter *qdev, + struct tx_ring_desc *tx_ring_desc, int mapped) +{ + int i; + for (i = 0; i < mapped; i++) { + if (i == 0 || (i == 7 && mapped > 7)) { + /* + * Unmap the skb->data area, or the + * external sglist (AKA the Outbound + * Address List (OAL)). + * If its the zeroeth element, then it's + * the skb->data area. If it's the 7th + * element and there is more than 6 frags, + * then its an OAL. + */ + if (i == 7) { + netif_printk(qdev, tx_done, KERN_DEBUG, + qdev->ndev, + "unmapping OAL area.\n"); + } + pci_unmap_single(qdev->pdev, + dma_unmap_addr(&tx_ring_desc->map[i], + mapaddr), + dma_unmap_len(&tx_ring_desc->map[i], + maplen), + PCI_DMA_TODEVICE); + } else { + netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev, + "unmapping frag %d.\n", i); + pci_unmap_page(qdev->pdev, + dma_unmap_addr(&tx_ring_desc->map[i], + mapaddr), + dma_unmap_len(&tx_ring_desc->map[i], + maplen), PCI_DMA_TODEVICE); + } + } + +} + +/* Map the buffers for this transmit. This will return + * NETDEV_TX_BUSY or NETDEV_TX_OK based on success. + */ +static int ql_map_send(struct ql_adapter *qdev, + struct ob_mac_iocb_req *mac_iocb_ptr, + struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc) +{ + int len = skb_headlen(skb); + dma_addr_t map; + int frag_idx, err, map_idx = 0; + struct tx_buf_desc *tbd = mac_iocb_ptr->tbd; + int frag_cnt = skb_shinfo(skb)->nr_frags; + + if (frag_cnt) { + netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, + "frag_cnt = %d.\n", frag_cnt); + } + /* + * Map the skb buffer first. + */ + map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); + + err = pci_dma_mapping_error(qdev->pdev, map); + if (err) { + netif_err(qdev, tx_queued, qdev->ndev, + "PCI mapping failed with error: %d\n", err); + + return NETDEV_TX_BUSY; + } + + tbd->len = cpu_to_le32(len); + tbd->addr = cpu_to_le64(map); + dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); + dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len); + map_idx++; + + /* + * This loop fills the remainder of the 8 address descriptors + * in the IOCB. If there are more than 7 fragments, then the + * eighth address desc will point to an external list (OAL). + * When this happens, the remainder of the frags will be stored + * in this list. + */ + for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx]; + tbd++; + if (frag_idx == 6 && frag_cnt > 7) { + /* Let's tack on an sglist. + * Our control block will now + * look like this: + * iocb->seg[0] = skb->data + * iocb->seg[1] = frag[0] + * iocb->seg[2] = frag[1] + * iocb->seg[3] = frag[2] + * iocb->seg[4] = frag[3] + * iocb->seg[5] = frag[4] + * iocb->seg[6] = frag[5] + * iocb->seg[7] = ptr to OAL (external sglist) + * oal->seg[0] = frag[6] + * oal->seg[1] = frag[7] + * oal->seg[2] = frag[8] + * oal->seg[3] = frag[9] + * oal->seg[4] = frag[10] + * etc... + */ + /* Tack on the OAL in the eighth segment of IOCB. */ + map = pci_map_single(qdev->pdev, &tx_ring_desc->oal, + sizeof(struct oal), + PCI_DMA_TODEVICE); + err = pci_dma_mapping_error(qdev->pdev, map); + if (err) { + netif_err(qdev, tx_queued, qdev->ndev, + "PCI mapping outbound address list with error: %d\n", + err); + goto map_error; + } + + tbd->addr = cpu_to_le64(map); + /* + * The length is the number of fragments + * that remain to be mapped times the length + * of our sglist (OAL). + */ + tbd->len = + cpu_to_le32((sizeof(struct tx_buf_desc) * + (frag_cnt - frag_idx)) | TX_DESC_C); + dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, + map); + dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, + sizeof(struct oal)); + tbd = (struct tx_buf_desc *)&tx_ring_desc->oal; + map_idx++; + } + + map = + pci_map_page(qdev->pdev, frag->page, + frag->page_offset, frag->size, + PCI_DMA_TODEVICE); + + err = pci_dma_mapping_error(qdev->pdev, map); + if (err) { + netif_err(qdev, tx_queued, qdev->ndev, + "PCI mapping frags failed with error: %d.\n", + err); + goto map_error; + } + + tbd->addr = cpu_to_le64(map); + tbd->len = cpu_to_le32(frag->size); + dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); + dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, + frag->size); + + } + /* Save the number of segments we've mapped. */ + tx_ring_desc->map_cnt = map_idx; + /* Terminate the last segment. */ + tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E); + return NETDEV_TX_OK; + +map_error: + /* + * If the first frag mapping failed, then i will be zero. + * This causes the unmap of the skb->data area. Otherwise + * we pass in the number of frags that mapped successfully + * so they can be umapped. + */ + ql_unmap_send(qdev, tx_ring_desc, map_idx); + return NETDEV_TX_BUSY; +} + +/* Process an inbound completion from an rx ring. */ +static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev, + struct rx_ring *rx_ring, + struct ib_mac_iocb_rsp *ib_mac_rsp, + u32 length, + u16 vlan_id) +{ + struct sk_buff *skb; + struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); + struct skb_frag_struct *rx_frag; + int nr_frags; + struct napi_struct *napi = &rx_ring->napi; + + napi->dev = qdev->ndev; + + skb = napi_get_frags(napi); + if (!skb) { + netif_err(qdev, drv, qdev->ndev, + "Couldn't get an skb, exiting.\n"); + rx_ring->rx_dropped++; + put_page(lbq_desc->p.pg_chunk.page); + return; + } + prefetch(lbq_desc->p.pg_chunk.va); + rx_frag = skb_shinfo(skb)->frags; + nr_frags = skb_shinfo(skb)->nr_frags; + rx_frag += nr_frags; + rx_frag->page = lbq_desc->p.pg_chunk.page; + rx_frag->page_offset = lbq_desc->p.pg_chunk.offset; + rx_frag->size = length; + + skb->len += length; + skb->data_len += length; + skb->truesize += length; + skb_shinfo(skb)->nr_frags++; + + rx_ring->rx_packets++; + rx_ring->rx_bytes += length; + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_record_rx_queue(skb, rx_ring->cq_id); + if (vlan_id != 0xffff) + __vlan_hwaccel_put_tag(skb, vlan_id); + napi_gro_frags(napi); +} + +/* Process an inbound completion from an rx ring. */ +static void ql_process_mac_rx_page(struct ql_adapter *qdev, + struct rx_ring *rx_ring, + struct ib_mac_iocb_rsp *ib_mac_rsp, + u32 length, + u16 vlan_id) +{ + struct net_device *ndev = qdev->ndev; + struct sk_buff *skb = NULL; + void *addr; + struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); + struct napi_struct *napi = &rx_ring->napi; + + skb = netdev_alloc_skb(ndev, length); + if (!skb) { + netif_err(qdev, drv, qdev->ndev, + "Couldn't get an skb, need to unwind!.\n"); + rx_ring->rx_dropped++; + put_page(lbq_desc->p.pg_chunk.page); + return; + } + + addr = lbq_desc->p.pg_chunk.va; + prefetch(addr); + + + /* Frame error, so drop the packet. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { + netif_info(qdev, drv, qdev->ndev, + "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2); + rx_ring->rx_errors++; + goto err_out; + } + + /* The max framesize filter on this chip is set higher than + * MTU since FCoE uses 2k frames. + */ + if (skb->len > ndev->mtu + ETH_HLEN) { + netif_err(qdev, drv, qdev->ndev, + "Segment too small, dropping.\n"); + rx_ring->rx_dropped++; + goto err_out; + } + memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN); + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", + length); + skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, + lbq_desc->p.pg_chunk.offset+ETH_HLEN, + length-ETH_HLEN); + skb->len += length-ETH_HLEN; + skb->data_len += length-ETH_HLEN; + skb->truesize += length-ETH_HLEN; + + rx_ring->rx_packets++; + rx_ring->rx_bytes += skb->len; + skb->protocol = eth_type_trans(skb, ndev); + skb_checksum_none_assert(skb); + + if ((ndev->features & NETIF_F_RXCSUM) && + !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { + /* TCP frame. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "TCP checksum done!\n"); + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && + (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { + /* Unfragmented ipv4 UDP frame. */ + struct iphdr *iph = (struct iphdr *) skb->data; + if (!(iph->frag_off & + cpu_to_be16(IP_MF|IP_OFFSET))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + netif_printk(qdev, rx_status, KERN_DEBUG, + qdev->ndev, + "TCP checksum done!\n"); + } + } + } + + skb_record_rx_queue(skb, rx_ring->cq_id); + if (vlan_id != 0xffff) + __vlan_hwaccel_put_tag(skb, vlan_id); + if (skb->ip_summed == CHECKSUM_UNNECESSARY) + napi_gro_receive(napi, skb); + else + netif_receive_skb(skb); + return; +err_out: + dev_kfree_skb_any(skb); + put_page(lbq_desc->p.pg_chunk.page); +} + +/* Process an inbound completion from an rx ring. */ +static void ql_process_mac_rx_skb(struct ql_adapter *qdev, + struct rx_ring *rx_ring, + struct ib_mac_iocb_rsp *ib_mac_rsp, + u32 length, + u16 vlan_id) +{ + struct net_device *ndev = qdev->ndev; + struct sk_buff *skb = NULL; + struct sk_buff *new_skb = NULL; + struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring); + + skb = sbq_desc->p.skb; + /* Allocate new_skb and copy */ + new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN); + if (new_skb == NULL) { + netif_err(qdev, probe, qdev->ndev, + "No skb available, drop the packet.\n"); + rx_ring->rx_dropped++; + return; + } + skb_reserve(new_skb, NET_IP_ALIGN); + memcpy(skb_put(new_skb, length), skb->data, length); + skb = new_skb; + + /* Frame error, so drop the packet. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { + netif_info(qdev, drv, qdev->ndev, + "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2); + dev_kfree_skb_any(skb); + rx_ring->rx_errors++; + return; + } + + /* loopback self test for ethtool */ + if (test_bit(QL_SELFTEST, &qdev->flags)) { + ql_check_lb_frame(qdev, skb); + dev_kfree_skb_any(skb); + return; + } + + /* The max framesize filter on this chip is set higher than + * MTU since FCoE uses 2k frames. + */ + if (skb->len > ndev->mtu + ETH_HLEN) { + dev_kfree_skb_any(skb); + rx_ring->rx_dropped++; + return; + } + + prefetch(skb->data); + skb->dev = ndev; + if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "%s Multicast.\n", + (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == + IB_MAC_IOCB_RSP_M_HASH ? "Hash" : + (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == + IB_MAC_IOCB_RSP_M_REG ? "Registered" : + (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == + IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); + } + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "Promiscuous Packet.\n"); + + rx_ring->rx_packets++; + rx_ring->rx_bytes += skb->len; + skb->protocol = eth_type_trans(skb, ndev); + skb_checksum_none_assert(skb); + + /* If rx checksum is on, and there are no + * csum or frame errors. + */ + if ((ndev->features & NETIF_F_RXCSUM) && + !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { + /* TCP frame. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "TCP checksum done!\n"); + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && + (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { + /* Unfragmented ipv4 UDP frame. */ + struct iphdr *iph = (struct iphdr *) skb->data; + if (!(iph->frag_off & + ntohs(IP_MF|IP_OFFSET))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + netif_printk(qdev, rx_status, KERN_DEBUG, + qdev->ndev, + "TCP checksum done!\n"); + } + } + } + + skb_record_rx_queue(skb, rx_ring->cq_id); + if (vlan_id != 0xffff) + __vlan_hwaccel_put_tag(skb, vlan_id); + if (skb->ip_summed == CHECKSUM_UNNECESSARY) + napi_gro_receive(&rx_ring->napi, skb); + else + netif_receive_skb(skb); +} + +static void ql_realign_skb(struct sk_buff *skb, int len) +{ + void *temp_addr = skb->data; + + /* Undo the skb_reserve(skb,32) we did before + * giving to hardware, and realign data on + * a 2-byte boundary. + */ + skb->data -= QLGE_SB_PAD - NET_IP_ALIGN; + skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN; + skb_copy_to_linear_data(skb, temp_addr, + (unsigned int)len); +} + +/* + * This function builds an skb for the given inbound + * completion. It will be rewritten for readability in the near + * future, but for not it works well. + */ +static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, + struct rx_ring *rx_ring, + struct ib_mac_iocb_rsp *ib_mac_rsp) +{ + struct bq_desc *lbq_desc; + struct bq_desc *sbq_desc; + struct sk_buff *skb = NULL; + u32 length = le32_to_cpu(ib_mac_rsp->data_len); + u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len); + + /* + * Handle the header buffer if present. + */ + if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV && + ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "Header of %d bytes in small buffer.\n", hdr_len); + /* + * Headers fit nicely into a small buffer. + */ + sbq_desc = ql_get_curr_sbuf(rx_ring); + pci_unmap_single(qdev->pdev, + dma_unmap_addr(sbq_desc, mapaddr), + dma_unmap_len(sbq_desc, maplen), + PCI_DMA_FROMDEVICE); + skb = sbq_desc->p.skb; + ql_realign_skb(skb, hdr_len); + skb_put(skb, hdr_len); + sbq_desc->p.skb = NULL; + } + + /* + * Handle the data buffer(s). + */ + if (unlikely(!length)) { /* Is there data too? */ + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "No Data buffer in this packet.\n"); + return skb; + } + + if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) { + if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "Headers in small, data of %d bytes in small, combine them.\n", + length); + /* + * Data is less than small buffer size so it's + * stuffed in a small buffer. + * For this case we append the data + * from the "data" small buffer to the "header" small + * buffer. + */ + sbq_desc = ql_get_curr_sbuf(rx_ring); + pci_dma_sync_single_for_cpu(qdev->pdev, + dma_unmap_addr + (sbq_desc, mapaddr), + dma_unmap_len + (sbq_desc, maplen), + PCI_DMA_FROMDEVICE); + memcpy(skb_put(skb, length), + sbq_desc->p.skb->data, length); + pci_dma_sync_single_for_device(qdev->pdev, + dma_unmap_addr + (sbq_desc, + mapaddr), + dma_unmap_len + (sbq_desc, + maplen), + PCI_DMA_FROMDEVICE); + } else { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "%d bytes in a single small buffer.\n", + length); + sbq_desc = ql_get_curr_sbuf(rx_ring); + skb = sbq_desc->p.skb; + ql_realign_skb(skb, length); + skb_put(skb, length); + pci_unmap_single(qdev->pdev, + dma_unmap_addr(sbq_desc, + mapaddr), + dma_unmap_len(sbq_desc, + maplen), + PCI_DMA_FROMDEVICE); + sbq_desc->p.skb = NULL; + } + } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) { + if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "Header in small, %d bytes in large. Chain large to small!\n", + length); + /* + * The data is in a single large buffer. We + * chain it to the header buffer's skb and let + * it rip. + */ + lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "Chaining page at offset = %d, for %d bytes to skb.\n", + lbq_desc->p.pg_chunk.offset, length); + skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, + lbq_desc->p.pg_chunk.offset, + length); + skb->len += length; + skb->data_len += length; + skb->truesize += length; + } else { + /* + * The headers and data are in a single large buffer. We + * copy it to a new skb and let it go. This can happen with + * jumbo mtu on a non-TCP/UDP frame. + */ + lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); + skb = netdev_alloc_skb(qdev->ndev, length); + if (skb == NULL) { + netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev, + "No skb available, drop the packet.\n"); + return NULL; + } + pci_unmap_page(qdev->pdev, + dma_unmap_addr(lbq_desc, + mapaddr), + dma_unmap_len(lbq_desc, maplen), + PCI_DMA_FROMDEVICE); + skb_reserve(skb, NET_IP_ALIGN); + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", + length); + skb_fill_page_desc(skb, 0, + lbq_desc->p.pg_chunk.page, + lbq_desc->p.pg_chunk.offset, + length); + skb->len += length; + skb->data_len += length; + skb->truesize += length; + length -= length; + __pskb_pull_tail(skb, + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? + VLAN_ETH_HLEN : ETH_HLEN); + } + } else { + /* + * The data is in a chain of large buffers + * pointed to by a small buffer. We loop + * thru and chain them to the our small header + * buffer's skb. + * frags: There are 18 max frags and our small + * buffer will hold 32 of them. The thing is, + * we'll use 3 max for our 9000 byte jumbo + * frames. If the MTU goes up we could + * eventually be in trouble. + */ + int size, i = 0; + sbq_desc = ql_get_curr_sbuf(rx_ring); + pci_unmap_single(qdev->pdev, + dma_unmap_addr(sbq_desc, mapaddr), + dma_unmap_len(sbq_desc, maplen), + PCI_DMA_FROMDEVICE); + if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) { + /* + * This is an non TCP/UDP IP frame, so + * the headers aren't split into a small + * buffer. We have to use the small buffer + * that contains our sg list as our skb to + * send upstairs. Copy the sg list here to + * a local buffer and use it to find the + * pages to chain. + */ + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "%d bytes of headers & data in chain of large.\n", + length); + skb = sbq_desc->p.skb; + sbq_desc->p.skb = NULL; + skb_reserve(skb, NET_IP_ALIGN); + } + while (length > 0) { + lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); + size = (length < rx_ring->lbq_buf_size) ? length : + rx_ring->lbq_buf_size; + + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "Adding page %d to skb for %d bytes.\n", + i, size); + skb_fill_page_desc(skb, i, + lbq_desc->p.pg_chunk.page, + lbq_desc->p.pg_chunk.offset, + size); + skb->len += size; + skb->data_len += size; + skb->truesize += size; + length -= size; + i++; + } + __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? + VLAN_ETH_HLEN : ETH_HLEN); + } + return skb; +} + +/* Process an inbound completion from an rx ring. */ +static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev, + struct rx_ring *rx_ring, + struct ib_mac_iocb_rsp *ib_mac_rsp, + u16 vlan_id) +{ + struct net_device *ndev = qdev->ndev; + struct sk_buff *skb = NULL; + + QL_DUMP_IB_MAC_RSP(ib_mac_rsp); + + skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp); + if (unlikely(!skb)) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "No skb available, drop packet.\n"); + rx_ring->rx_dropped++; + return; + } + + /* Frame error, so drop the packet. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { + netif_info(qdev, drv, qdev->ndev, + "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2); + dev_kfree_skb_any(skb); + rx_ring->rx_errors++; + return; + } + + /* The max framesize filter on this chip is set higher than + * MTU since FCoE uses 2k frames. + */ + if (skb->len > ndev->mtu + ETH_HLEN) { + dev_kfree_skb_any(skb); + rx_ring->rx_dropped++; + return; + } + + /* loopback self test for ethtool */ + if (test_bit(QL_SELFTEST, &qdev->flags)) { + ql_check_lb_frame(qdev, skb); + dev_kfree_skb_any(skb); + return; + } + + prefetch(skb->data); + skb->dev = ndev; + if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n", + (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == + IB_MAC_IOCB_RSP_M_HASH ? "Hash" : + (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == + IB_MAC_IOCB_RSP_M_REG ? "Registered" : + (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == + IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); + rx_ring->rx_multicast++; + } + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "Promiscuous Packet.\n"); + } + + skb->protocol = eth_type_trans(skb, ndev); + skb_checksum_none_assert(skb); + + /* If rx checksum is on, and there are no + * csum or frame errors. + */ + if ((ndev->features & NETIF_F_RXCSUM) && + !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { + /* TCP frame. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "TCP checksum done!\n"); + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && + (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { + /* Unfragmented ipv4 UDP frame. */ + struct iphdr *iph = (struct iphdr *) skb->data; + if (!(iph->frag_off & + ntohs(IP_MF|IP_OFFSET))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "TCP checksum done!\n"); + } + } + } + + rx_ring->rx_packets++; + rx_ring->rx_bytes += skb->len; + skb_record_rx_queue(skb, rx_ring->cq_id); + if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0)) + __vlan_hwaccel_put_tag(skb, vlan_id); + if (skb->ip_summed == CHECKSUM_UNNECESSARY) + napi_gro_receive(&rx_ring->napi, skb); + else + netif_receive_skb(skb); +} + +/* Process an inbound completion from an rx ring. */ +static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev, + struct rx_ring *rx_ring, + struct ib_mac_iocb_rsp *ib_mac_rsp) +{ + u32 length = le32_to_cpu(ib_mac_rsp->data_len); + u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? + ((le16_to_cpu(ib_mac_rsp->vlan_id) & + IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff; + + QL_DUMP_IB_MAC_RSP(ib_mac_rsp); + + if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { + /* The data and headers are split into + * separate buffers. + */ + ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, + vlan_id); + } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) { + /* The data fit in a single small buffer. + * Allocate a new skb, copy the data and + * return the buffer to the free pool. + */ + ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, + length, vlan_id); + } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) && + !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) && + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) { + /* TCP packet in a page chunk that's been checksummed. + * Tack it on to our GRO skb and let it go. + */ + ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, + length, vlan_id); + } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) { + /* Non-TCP packet in a page chunk. Allocate an + * skb, tack it on frags, and send it up. + */ + ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, + length, vlan_id); + } else { + /* Non-TCP/UDP large frames that span multiple buffers + * can be processed corrrectly by the split frame logic. + */ + ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, + vlan_id); + } + + return (unsigned long)length; +} + +/* Process an outbound completion from an rx ring. */ +static void ql_process_mac_tx_intr(struct ql_adapter *qdev, + struct ob_mac_iocb_rsp *mac_rsp) +{ + struct tx_ring *tx_ring; + struct tx_ring_desc *tx_ring_desc; + + QL_DUMP_OB_MAC_RSP(mac_rsp); + tx_ring = &qdev->tx_ring[mac_rsp->txq_idx]; + tx_ring_desc = &tx_ring->q[mac_rsp->tid]; + ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt); + tx_ring->tx_bytes += (tx_ring_desc->skb)->len; + tx_ring->tx_packets++; + dev_kfree_skb(tx_ring_desc->skb); + tx_ring_desc->skb = NULL; + + if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E | + OB_MAC_IOCB_RSP_S | + OB_MAC_IOCB_RSP_L | + OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) { + if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) { + netif_warn(qdev, tx_done, qdev->ndev, + "Total descriptor length did not match transfer length.\n"); + } + if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) { + netif_warn(qdev, tx_done, qdev->ndev, + "Frame too short to be valid, not sent.\n"); + } + if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) { + netif_warn(qdev, tx_done, qdev->ndev, + "Frame too long, but sent anyway.\n"); + } + if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) { + netif_warn(qdev, tx_done, qdev->ndev, + "PCI backplane error. Frame not sent.\n"); + } + } + atomic_inc(&tx_ring->tx_count); +} + +/* Fire up a handler to reset the MPI processor. */ +void ql_queue_fw_error(struct ql_adapter *qdev) +{ + ql_link_off(qdev); + queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0); +} + +void ql_queue_asic_error(struct ql_adapter *qdev) +{ + ql_link_off(qdev); + ql_disable_interrupts(qdev); + /* Clear adapter up bit to signal the recovery + * process that it shouldn't kill the reset worker + * thread + */ + clear_bit(QL_ADAPTER_UP, &qdev->flags); + /* Set asic recovery bit to indicate reset process that we are + * in fatal error recovery process rather than normal close + */ + set_bit(QL_ASIC_RECOVERY, &qdev->flags); + queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0); +} + +static void ql_process_chip_ae_intr(struct ql_adapter *qdev, + struct ib_ae_iocb_rsp *ib_ae_rsp) +{ + switch (ib_ae_rsp->event) { + case MGMT_ERR_EVENT: + netif_err(qdev, rx_err, qdev->ndev, + "Management Processor Fatal Error.\n"); + ql_queue_fw_error(qdev); + return; + + case CAM_LOOKUP_ERR_EVENT: + netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n"); + netdev_err(qdev->ndev, "This event shouldn't occur.\n"); + ql_queue_asic_error(qdev); + return; + + case SOFT_ECC_ERROR_EVENT: + netdev_err(qdev->ndev, "Soft ECC error detected.\n"); + ql_queue_asic_error(qdev); + break; + + case PCI_ERR_ANON_BUF_RD: + netdev_err(qdev->ndev, "PCI error occurred when reading " + "anonymous buffers from rx_ring %d.\n", + ib_ae_rsp->q_id); + ql_queue_asic_error(qdev); + break; + + default: + netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n", + ib_ae_rsp->event); + ql_queue_asic_error(qdev); + break; + } +} + +static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) +{ + struct ql_adapter *qdev = rx_ring->qdev; + u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); + struct ob_mac_iocb_rsp *net_rsp = NULL; + int count = 0; + + struct tx_ring *tx_ring; + /* While there are entries in the completion queue. */ + while (prod != rx_ring->cnsmr_idx) { + + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "cq_id = %d, prod = %d, cnsmr = %d.\n.", + rx_ring->cq_id, prod, rx_ring->cnsmr_idx); + + net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry; + rmb(); + switch (net_rsp->opcode) { + + case OPCODE_OB_MAC_TSO_IOCB: + case OPCODE_OB_MAC_IOCB: + ql_process_mac_tx_intr(qdev, net_rsp); + break; + default: + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "Hit default case, not handled! dropping the packet, opcode = %x.\n", + net_rsp->opcode); + } + count++; + ql_update_cq(rx_ring); + prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); + } + if (!net_rsp) + return 0; + ql_write_cq_idx(rx_ring); + tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; + if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) { + if (atomic_read(&tx_ring->queue_stopped) && + (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) + /* + * The queue got stopped because the tx_ring was full. + * Wake it up, because it's now at least 25% empty. + */ + netif_wake_subqueue(qdev->ndev, tx_ring->wq_id); + } + + return count; +} + +static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget) +{ + struct ql_adapter *qdev = rx_ring->qdev; + u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); + struct ql_net_rsp_iocb *net_rsp; + int count = 0; + + /* While there are entries in the completion queue. */ + while (prod != rx_ring->cnsmr_idx) { + + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "cq_id = %d, prod = %d, cnsmr = %d.\n.", + rx_ring->cq_id, prod, rx_ring->cnsmr_idx); + + net_rsp = rx_ring->curr_entry; + rmb(); + switch (net_rsp->opcode) { + case OPCODE_IB_MAC_IOCB: + ql_process_mac_rx_intr(qdev, rx_ring, + (struct ib_mac_iocb_rsp *) + net_rsp); + break; + + case OPCODE_IB_AE_IOCB: + ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *) + net_rsp); + break; + default: + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "Hit default case, not handled! dropping the packet, opcode = %x.\n", + net_rsp->opcode); + break; + } + count++; + ql_update_cq(rx_ring); + prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); + if (count == budget) + break; + } + ql_update_buffer_queues(qdev, rx_ring); + ql_write_cq_idx(rx_ring); + return count; +} + +static int ql_napi_poll_msix(struct napi_struct *napi, int budget) +{ + struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi); + struct ql_adapter *qdev = rx_ring->qdev; + struct rx_ring *trx_ring; + int i, work_done = 0; + struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id]; + + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id); + + /* Service the TX rings first. They start + * right after the RSS rings. */ + for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) { + trx_ring = &qdev->rx_ring[i]; + /* If this TX completion ring belongs to this vector and + * it's not empty then service it. + */ + if ((ctx->irq_mask & (1 << trx_ring->cq_id)) && + (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) != + trx_ring->cnsmr_idx)) { + netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, + "%s: Servicing TX completion ring %d.\n", + __func__, trx_ring->cq_id); + ql_clean_outbound_rx_ring(trx_ring); + } + } + + /* + * Now service the RSS ring if it's active. + */ + if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != + rx_ring->cnsmr_idx) { + netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, + "%s: Servicing RX completion ring %d.\n", + __func__, rx_ring->cq_id); + work_done = ql_clean_inbound_rx_ring(rx_ring, budget); + } + + if (work_done < budget) { + napi_complete(napi); + ql_enable_completion_interrupt(qdev, rx_ring->irq); + } + return work_done; +} + +static void qlge_vlan_mode(struct net_device *ndev, u32 features) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + + if (features & NETIF_F_HW_VLAN_RX) { + netif_printk(qdev, ifup, KERN_DEBUG, ndev, + "Turning on VLAN in NIC_RCV_CFG.\n"); + ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK | + NIC_RCV_CFG_VLAN_MATCH_AND_NON); + } else { + netif_printk(qdev, ifup, KERN_DEBUG, ndev, + "Turning off VLAN in NIC_RCV_CFG.\n"); + ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK); + } +} + +static u32 qlge_fix_features(struct net_device *ndev, u32 features) +{ + /* + * Since there is no support for separate rx/tx vlan accel + * enable/disable make sure tx flag is always in same state as rx. + */ + if (features & NETIF_F_HW_VLAN_RX) + features |= NETIF_F_HW_VLAN_TX; + else + features &= ~NETIF_F_HW_VLAN_TX; + + return features; +} + +static int qlge_set_features(struct net_device *ndev, u32 features) +{ + u32 changed = ndev->features ^ features; + + if (changed & NETIF_F_HW_VLAN_RX) + qlge_vlan_mode(ndev, features); + + return 0; +} + +static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid) +{ + u32 enable_bit = MAC_ADDR_E; + + if (ql_set_mac_addr_reg + (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to init vlan address.\n"); + } +} + +static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + int status; + + status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); + if (status) + return; + + __qlge_vlan_rx_add_vid(qdev, vid); + set_bit(vid, qdev->active_vlans); + + ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); +} + +static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid) +{ + u32 enable_bit = 0; + + if (ql_set_mac_addr_reg + (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to clear vlan address.\n"); + } +} + +static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + int status; + + status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); + if (status) + return; + + __qlge_vlan_rx_kill_vid(qdev, vid); + clear_bit(vid, qdev->active_vlans); + + ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); +} + +static void qlge_restore_vlan(struct ql_adapter *qdev) +{ + int status; + u16 vid; + + status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); + if (status) + return; + + for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID) + __qlge_vlan_rx_add_vid(qdev, vid); + + ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); +} + +/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */ +static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id) +{ + struct rx_ring *rx_ring = dev_id; + napi_schedule(&rx_ring->napi); + return IRQ_HANDLED; +} + +/* This handles a fatal error, MPI activity, and the default + * rx_ring in an MSI-X multiple vector environment. + * In MSI/Legacy environment it also process the rest of + * the rx_rings. + */ +static irqreturn_t qlge_isr(int irq, void *dev_id) +{ + struct rx_ring *rx_ring = dev_id; + struct ql_adapter *qdev = rx_ring->qdev; + struct intr_context *intr_context = &qdev->intr_context[0]; + u32 var; + int work_done = 0; + + spin_lock(&qdev->hw_lock); + if (atomic_read(&qdev->intr_context[0].irq_cnt)) { + netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, + "Shared Interrupt, Not ours!\n"); + spin_unlock(&qdev->hw_lock); + return IRQ_NONE; + } + spin_unlock(&qdev->hw_lock); + + var = ql_disable_completion_interrupt(qdev, intr_context->intr); + + /* + * Check for fatal error. + */ + if (var & STS_FE) { + ql_queue_asic_error(qdev); + netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var); + var = ql_read32(qdev, ERR_STS); + netdev_err(qdev->ndev, "Resetting chip. " + "Error Status Register = 0x%x\n", var); + return IRQ_HANDLED; + } + + /* + * Check MPI processor activity. + */ + if ((var & STS_PI) && + (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) { + /* + * We've got an async event or mailbox completion. + * Handle it and clear the source of the interrupt. + */ + netif_err(qdev, intr, qdev->ndev, + "Got MPI processor interrupt.\n"); + ql_disable_completion_interrupt(qdev, intr_context->intr); + ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); + queue_delayed_work_on(smp_processor_id(), + qdev->workqueue, &qdev->mpi_work, 0); + work_done++; + } + + /* + * Get the bit-mask that shows the active queues for this + * pass. Compare it to the queues that this irq services + * and call napi if there's a match. + */ + var = ql_read32(qdev, ISR1); + if (var & intr_context->irq_mask) { + netif_info(qdev, intr, qdev->ndev, + "Waking handler for rx_ring[0].\n"); + ql_disable_completion_interrupt(qdev, intr_context->intr); + napi_schedule(&rx_ring->napi); + work_done++; + } + ql_enable_completion_interrupt(qdev, intr_context->intr); + return work_done ? IRQ_HANDLED : IRQ_NONE; +} + +static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr) +{ + + if (skb_is_gso(skb)) { + int err; + if (skb_header_cloned(skb)) { + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (err) + return err; + } + + mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB; + mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC; + mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len); + mac_iocb_ptr->total_hdrs_len = + cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb)); + mac_iocb_ptr->net_trans_offset = + cpu_to_le16(skb_network_offset(skb) | + skb_transport_offset(skb) + << OB_MAC_TRANSPORT_HDR_SHIFT); + mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); + mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO; + if (likely(skb->protocol == htons(ETH_P_IP))) { + struct iphdr *iph = ip_hdr(skb); + iph->check = 0; + mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + } else if (skb->protocol == htons(ETH_P_IPV6)) { + mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6; + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + } + return 1; + } + return 0; +} + +static void ql_hw_csum_setup(struct sk_buff *skb, + struct ob_mac_tso_iocb_req *mac_iocb_ptr) +{ + int len; + struct iphdr *iph = ip_hdr(skb); + __sum16 *check; + mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB; + mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len); + mac_iocb_ptr->net_trans_offset = + cpu_to_le16(skb_network_offset(skb) | + skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT); + + mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; + len = (ntohs(iph->tot_len) - (iph->ihl << 2)); + if (likely(iph->protocol == IPPROTO_TCP)) { + check = &(tcp_hdr(skb)->check); + mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC; + mac_iocb_ptr->total_hdrs_len = + cpu_to_le16(skb_transport_offset(skb) + + (tcp_hdr(skb)->doff << 2)); + } else { + check = &(udp_hdr(skb)->check); + mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC; + mac_iocb_ptr->total_hdrs_len = + cpu_to_le16(skb_transport_offset(skb) + + sizeof(struct udphdr)); + } + *check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, len, iph->protocol, 0); +} + +static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev) +{ + struct tx_ring_desc *tx_ring_desc; + struct ob_mac_iocb_req *mac_iocb_ptr; + struct ql_adapter *qdev = netdev_priv(ndev); + int tso; + struct tx_ring *tx_ring; + u32 tx_ring_idx = (u32) skb->queue_mapping; + + tx_ring = &qdev->tx_ring[tx_ring_idx]; + + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + + if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) { + netif_info(qdev, tx_queued, qdev->ndev, + "%s: shutting down tx queue %d du to lack of resources.\n", + __func__, tx_ring_idx); + netif_stop_subqueue(ndev, tx_ring->wq_id); + atomic_inc(&tx_ring->queue_stopped); + tx_ring->tx_errors++; + return NETDEV_TX_BUSY; + } + tx_ring_desc = &tx_ring->q[tx_ring->prod_idx]; + mac_iocb_ptr = tx_ring_desc->queue_entry; + memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr)); + + mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB; + mac_iocb_ptr->tid = tx_ring_desc->index; + /* We use the upper 32-bits to store the tx queue for this IO. + * When we get the completion we can use it to establish the context. + */ + mac_iocb_ptr->txq_idx = tx_ring_idx; + tx_ring_desc->skb = skb; + + mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len); + + if (vlan_tx_tag_present(skb)) { + netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, + "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb)); + mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V; + mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb)); + } + tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); + if (tso < 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) { + ql_hw_csum_setup(skb, + (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); + } + if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != + NETDEV_TX_OK) { + netif_err(qdev, tx_queued, qdev->ndev, + "Could not map the segments.\n"); + tx_ring->tx_errors++; + return NETDEV_TX_BUSY; + } + QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr); + tx_ring->prod_idx++; + if (tx_ring->prod_idx == tx_ring->wq_len) + tx_ring->prod_idx = 0; + wmb(); + + ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg); + netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, + "tx queued, slot %d, len %d\n", + tx_ring->prod_idx, skb->len); + + atomic_dec(&tx_ring->tx_count); + return NETDEV_TX_OK; +} + + +static void ql_free_shadow_space(struct ql_adapter *qdev) +{ + if (qdev->rx_ring_shadow_reg_area) { + pci_free_consistent(qdev->pdev, + PAGE_SIZE, + qdev->rx_ring_shadow_reg_area, + qdev->rx_ring_shadow_reg_dma); + qdev->rx_ring_shadow_reg_area = NULL; + } + if (qdev->tx_ring_shadow_reg_area) { + pci_free_consistent(qdev->pdev, + PAGE_SIZE, + qdev->tx_ring_shadow_reg_area, + qdev->tx_ring_shadow_reg_dma); + qdev->tx_ring_shadow_reg_area = NULL; + } +} + +static int ql_alloc_shadow_space(struct ql_adapter *qdev) +{ + qdev->rx_ring_shadow_reg_area = + pci_alloc_consistent(qdev->pdev, + PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma); + if (qdev->rx_ring_shadow_reg_area == NULL) { + netif_err(qdev, ifup, qdev->ndev, + "Allocation of RX shadow space failed.\n"); + return -ENOMEM; + } + memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE); + qdev->tx_ring_shadow_reg_area = + pci_alloc_consistent(qdev->pdev, PAGE_SIZE, + &qdev->tx_ring_shadow_reg_dma); + if (qdev->tx_ring_shadow_reg_area == NULL) { + netif_err(qdev, ifup, qdev->ndev, + "Allocation of TX shadow space failed.\n"); + goto err_wqp_sh_area; + } + memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE); + return 0; + +err_wqp_sh_area: + pci_free_consistent(qdev->pdev, + PAGE_SIZE, + qdev->rx_ring_shadow_reg_area, + qdev->rx_ring_shadow_reg_dma); + return -ENOMEM; +} + +static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) +{ + struct tx_ring_desc *tx_ring_desc; + int i; + struct ob_mac_iocb_req *mac_iocb_ptr; + + mac_iocb_ptr = tx_ring->wq_base; + tx_ring_desc = tx_ring->q; + for (i = 0; i < tx_ring->wq_len; i++) { + tx_ring_desc->index = i; + tx_ring_desc->skb = NULL; + tx_ring_desc->queue_entry = mac_iocb_ptr; + mac_iocb_ptr++; + tx_ring_desc++; + } + atomic_set(&tx_ring->tx_count, tx_ring->wq_len); + atomic_set(&tx_ring->queue_stopped, 0); +} + +static void ql_free_tx_resources(struct ql_adapter *qdev, + struct tx_ring *tx_ring) +{ + if (tx_ring->wq_base) { + pci_free_consistent(qdev->pdev, tx_ring->wq_size, + tx_ring->wq_base, tx_ring->wq_base_dma); + tx_ring->wq_base = NULL; + } + kfree(tx_ring->q); + tx_ring->q = NULL; +} + +static int ql_alloc_tx_resources(struct ql_adapter *qdev, + struct tx_ring *tx_ring) +{ + tx_ring->wq_base = + pci_alloc_consistent(qdev->pdev, tx_ring->wq_size, + &tx_ring->wq_base_dma); + + if ((tx_ring->wq_base == NULL) || + tx_ring->wq_base_dma & WQ_ADDR_ALIGN) { + netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n"); + return -ENOMEM; + } + tx_ring->q = + kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL); + if (tx_ring->q == NULL) + goto err; + + return 0; +err: + pci_free_consistent(qdev->pdev, tx_ring->wq_size, + tx_ring->wq_base, tx_ring->wq_base_dma); + return -ENOMEM; +} + +static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) +{ + struct bq_desc *lbq_desc; + + uint32_t curr_idx, clean_idx; + + curr_idx = rx_ring->lbq_curr_idx; + clean_idx = rx_ring->lbq_clean_idx; + while (curr_idx != clean_idx) { + lbq_desc = &rx_ring->lbq[curr_idx]; + + if (lbq_desc->p.pg_chunk.last_flag) { + pci_unmap_page(qdev->pdev, + lbq_desc->p.pg_chunk.map, + ql_lbq_block_size(qdev), + PCI_DMA_FROMDEVICE); + lbq_desc->p.pg_chunk.last_flag = 0; + } + + put_page(lbq_desc->p.pg_chunk.page); + lbq_desc->p.pg_chunk.page = NULL; + + if (++curr_idx == rx_ring->lbq_len) + curr_idx = 0; + + } +} + +static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) +{ + int i; + struct bq_desc *sbq_desc; + + for (i = 0; i < rx_ring->sbq_len; i++) { + sbq_desc = &rx_ring->sbq[i]; + if (sbq_desc == NULL) { + netif_err(qdev, ifup, qdev->ndev, + "sbq_desc %d is NULL.\n", i); + return; + } + if (sbq_desc->p.skb) { + pci_unmap_single(qdev->pdev, + dma_unmap_addr(sbq_desc, mapaddr), + dma_unmap_len(sbq_desc, maplen), + PCI_DMA_FROMDEVICE); + dev_kfree_skb(sbq_desc->p.skb); + sbq_desc->p.skb = NULL; + } + } +} + +/* Free all large and small rx buffers associated + * with the completion queues for this device. + */ +static void ql_free_rx_buffers(struct ql_adapter *qdev) +{ + int i; + struct rx_ring *rx_ring; + + for (i = 0; i < qdev->rx_ring_count; i++) { + rx_ring = &qdev->rx_ring[i]; + if (rx_ring->lbq) + ql_free_lbq_buffers(qdev, rx_ring); + if (rx_ring->sbq) + ql_free_sbq_buffers(qdev, rx_ring); + } +} + +static void ql_alloc_rx_buffers(struct ql_adapter *qdev) +{ + struct rx_ring *rx_ring; + int i; + + for (i = 0; i < qdev->rx_ring_count; i++) { + rx_ring = &qdev->rx_ring[i]; + if (rx_ring->type != TX_Q) + ql_update_buffer_queues(qdev, rx_ring); + } +} + +static void ql_init_lbq_ring(struct ql_adapter *qdev, + struct rx_ring *rx_ring) +{ + int i; + struct bq_desc *lbq_desc; + __le64 *bq = rx_ring->lbq_base; + + memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc)); + for (i = 0; i < rx_ring->lbq_len; i++) { + lbq_desc = &rx_ring->lbq[i]; + memset(lbq_desc, 0, sizeof(*lbq_desc)); + lbq_desc->index = i; + lbq_desc->addr = bq; + bq++; + } +} + +static void ql_init_sbq_ring(struct ql_adapter *qdev, + struct rx_ring *rx_ring) +{ + int i; + struct bq_desc *sbq_desc; + __le64 *bq = rx_ring->sbq_base; + + memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc)); + for (i = 0; i < rx_ring->sbq_len; i++) { + sbq_desc = &rx_ring->sbq[i]; + memset(sbq_desc, 0, sizeof(*sbq_desc)); + sbq_desc->index = i; + sbq_desc->addr = bq; + bq++; + } +} + +static void ql_free_rx_resources(struct ql_adapter *qdev, + struct rx_ring *rx_ring) +{ + /* Free the small buffer queue. */ + if (rx_ring->sbq_base) { + pci_free_consistent(qdev->pdev, + rx_ring->sbq_size, + rx_ring->sbq_base, rx_ring->sbq_base_dma); + rx_ring->sbq_base = NULL; + } + + /* Free the small buffer queue control blocks. */ + kfree(rx_ring->sbq); + rx_ring->sbq = NULL; + + /* Free the large buffer queue. */ + if (rx_ring->lbq_base) { + pci_free_consistent(qdev->pdev, + rx_ring->lbq_size, + rx_ring->lbq_base, rx_ring->lbq_base_dma); + rx_ring->lbq_base = NULL; + } + + /* Free the large buffer queue control blocks. */ + kfree(rx_ring->lbq); + rx_ring->lbq = NULL; + + /* Free the rx queue. */ + if (rx_ring->cq_base) { + pci_free_consistent(qdev->pdev, + rx_ring->cq_size, + rx_ring->cq_base, rx_ring->cq_base_dma); + rx_ring->cq_base = NULL; + } +} + +/* Allocate queues and buffers for this completions queue based + * on the values in the parameter structure. */ +static int ql_alloc_rx_resources(struct ql_adapter *qdev, + struct rx_ring *rx_ring) +{ + + /* + * Allocate the completion queue for this rx_ring. + */ + rx_ring->cq_base = + pci_alloc_consistent(qdev->pdev, rx_ring->cq_size, + &rx_ring->cq_base_dma); + + if (rx_ring->cq_base == NULL) { + netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n"); + return -ENOMEM; + } + + if (rx_ring->sbq_len) { + /* + * Allocate small buffer queue. + */ + rx_ring->sbq_base = + pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size, + &rx_ring->sbq_base_dma); + + if (rx_ring->sbq_base == NULL) { + netif_err(qdev, ifup, qdev->ndev, + "Small buffer queue allocation failed.\n"); + goto err_mem; + } + + /* + * Allocate small buffer queue control blocks. + */ + rx_ring->sbq = + kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc), + GFP_KERNEL); + if (rx_ring->sbq == NULL) { + netif_err(qdev, ifup, qdev->ndev, + "Small buffer queue control block allocation failed.\n"); + goto err_mem; + } + + ql_init_sbq_ring(qdev, rx_ring); + } + + if (rx_ring->lbq_len) { + /* + * Allocate large buffer queue. + */ + rx_ring->lbq_base = + pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size, + &rx_ring->lbq_base_dma); + + if (rx_ring->lbq_base == NULL) { + netif_err(qdev, ifup, qdev->ndev, + "Large buffer queue allocation failed.\n"); + goto err_mem; + } + /* + * Allocate large buffer queue control blocks. + */ + rx_ring->lbq = + kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc), + GFP_KERNEL); + if (rx_ring->lbq == NULL) { + netif_err(qdev, ifup, qdev->ndev, + "Large buffer queue control block allocation failed.\n"); + goto err_mem; + } + + ql_init_lbq_ring(qdev, rx_ring); + } + + return 0; + +err_mem: + ql_free_rx_resources(qdev, rx_ring); + return -ENOMEM; +} + +static void ql_tx_ring_clean(struct ql_adapter *qdev) +{ + struct tx_ring *tx_ring; + struct tx_ring_desc *tx_ring_desc; + int i, j; + + /* + * Loop through all queues and free + * any resources. + */ + for (j = 0; j < qdev->tx_ring_count; j++) { + tx_ring = &qdev->tx_ring[j]; + for (i = 0; i < tx_ring->wq_len; i++) { + tx_ring_desc = &tx_ring->q[i]; + if (tx_ring_desc && tx_ring_desc->skb) { + netif_err(qdev, ifdown, qdev->ndev, + "Freeing lost SKB %p, from queue %d, index %d.\n", + tx_ring_desc->skb, j, + tx_ring_desc->index); + ql_unmap_send(qdev, tx_ring_desc, + tx_ring_desc->map_cnt); + dev_kfree_skb(tx_ring_desc->skb); + tx_ring_desc->skb = NULL; + } + } + } +} + +static void ql_free_mem_resources(struct ql_adapter *qdev) +{ + int i; + + for (i = 0; i < qdev->tx_ring_count; i++) + ql_free_tx_resources(qdev, &qdev->tx_ring[i]); + for (i = 0; i < qdev->rx_ring_count; i++) + ql_free_rx_resources(qdev, &qdev->rx_ring[i]); + ql_free_shadow_space(qdev); +} + +static int ql_alloc_mem_resources(struct ql_adapter *qdev) +{ + int i; + + /* Allocate space for our shadow registers and such. */ + if (ql_alloc_shadow_space(qdev)) + return -ENOMEM; + + for (i = 0; i < qdev->rx_ring_count; i++) { + if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) { + netif_err(qdev, ifup, qdev->ndev, + "RX resource allocation failed.\n"); + goto err_mem; + } + } + /* Allocate tx queue resources */ + for (i = 0; i < qdev->tx_ring_count; i++) { + if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) { + netif_err(qdev, ifup, qdev->ndev, + "TX resource allocation failed.\n"); + goto err_mem; + } + } + return 0; + +err_mem: + ql_free_mem_resources(qdev); + return -ENOMEM; +} + +/* Set up the rx ring control block and pass it to the chip. + * The control block is defined as + * "Completion Queue Initialization Control Block", or cqicb. + */ +static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) +{ + struct cqicb *cqicb = &rx_ring->cqicb; + void *shadow_reg = qdev->rx_ring_shadow_reg_area + + (rx_ring->cq_id * RX_RING_SHADOW_SPACE); + u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma + + (rx_ring->cq_id * RX_RING_SHADOW_SPACE); + void __iomem *doorbell_area = + qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id)); + int err = 0; + u16 bq_len; + u64 tmp; + __le64 *base_indirect_ptr; + int page_entries; + + /* Set up the shadow registers for this ring. */ + rx_ring->prod_idx_sh_reg = shadow_reg; + rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma; + *rx_ring->prod_idx_sh_reg = 0; + shadow_reg += sizeof(u64); + shadow_reg_dma += sizeof(u64); + rx_ring->lbq_base_indirect = shadow_reg; + rx_ring->lbq_base_indirect_dma = shadow_reg_dma; + shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); + shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); + rx_ring->sbq_base_indirect = shadow_reg; + rx_ring->sbq_base_indirect_dma = shadow_reg_dma; + + /* PCI doorbell mem area + 0x00 for consumer index register */ + rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area; + rx_ring->cnsmr_idx = 0; + rx_ring->curr_entry = rx_ring->cq_base; + + /* PCI doorbell mem area + 0x04 for valid register */ + rx_ring->valid_db_reg = doorbell_area + 0x04; + + /* PCI doorbell mem area + 0x18 for large buffer consumer */ + rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18); + + /* PCI doorbell mem area + 0x1c */ + rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c); + + memset((void *)cqicb, 0, sizeof(struct cqicb)); + cqicb->msix_vect = rx_ring->irq; + + bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len; + cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT); + + cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma); + + cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma); + + /* + * Set up the control block load flags. + */ + cqicb->flags = FLAGS_LC | /* Load queue base address */ + FLAGS_LV | /* Load MSI-X vector */ + FLAGS_LI; /* Load irq delay values */ + if (rx_ring->lbq_len) { + cqicb->flags |= FLAGS_LL; /* Load lbq values */ + tmp = (u64)rx_ring->lbq_base_dma; + base_indirect_ptr = rx_ring->lbq_base_indirect; + page_entries = 0; + do { + *base_indirect_ptr = cpu_to_le64(tmp); + tmp += DB_PAGE_SIZE; + base_indirect_ptr++; + page_entries++; + } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); + cqicb->lbq_addr = + cpu_to_le64(rx_ring->lbq_base_indirect_dma); + bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 : + (u16) rx_ring->lbq_buf_size; + cqicb->lbq_buf_size = cpu_to_le16(bq_len); + bq_len = (rx_ring->lbq_len == 65536) ? 0 : + (u16) rx_ring->lbq_len; + cqicb->lbq_len = cpu_to_le16(bq_len); + rx_ring->lbq_prod_idx = 0; + rx_ring->lbq_curr_idx = 0; + rx_ring->lbq_clean_idx = 0; + rx_ring->lbq_free_cnt = rx_ring->lbq_len; + } + if (rx_ring->sbq_len) { + cqicb->flags |= FLAGS_LS; /* Load sbq values */ + tmp = (u64)rx_ring->sbq_base_dma; + base_indirect_ptr = rx_ring->sbq_base_indirect; + page_entries = 0; + do { + *base_indirect_ptr = cpu_to_le64(tmp); + tmp += DB_PAGE_SIZE; + base_indirect_ptr++; + page_entries++; + } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len)); + cqicb->sbq_addr = + cpu_to_le64(rx_ring->sbq_base_indirect_dma); + cqicb->sbq_buf_size = + cpu_to_le16((u16)(rx_ring->sbq_buf_size)); + bq_len = (rx_ring->sbq_len == 65536) ? 0 : + (u16) rx_ring->sbq_len; + cqicb->sbq_len = cpu_to_le16(bq_len); + rx_ring->sbq_prod_idx = 0; + rx_ring->sbq_curr_idx = 0; + rx_ring->sbq_clean_idx = 0; + rx_ring->sbq_free_cnt = rx_ring->sbq_len; + } + switch (rx_ring->type) { + case TX_Q: + cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs); + cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames); + break; + case RX_Q: + /* Inbound completion handling rx_rings run in + * separate NAPI contexts. + */ + netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix, + 64); + cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs); + cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames); + break; + default: + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "Invalid rx_ring->type = %d.\n", rx_ring->type); + } + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "Initializing rx work queue.\n"); + err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb), + CFG_LCQ, rx_ring->cq_id); + if (err) { + netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n"); + return err; + } + return err; +} + +static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) +{ + struct wqicb *wqicb = (struct wqicb *)tx_ring; + void __iomem *doorbell_area = + qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id); + void *shadow_reg = qdev->tx_ring_shadow_reg_area + + (tx_ring->wq_id * sizeof(u64)); + u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma + + (tx_ring->wq_id * sizeof(u64)); + int err = 0; + + /* + * Assign doorbell registers for this tx_ring. + */ + /* TX PCI doorbell mem area for tx producer index */ + tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area; + tx_ring->prod_idx = 0; + /* TX PCI doorbell mem area + 0x04 */ + tx_ring->valid_db_reg = doorbell_area + 0x04; + + /* + * Assign shadow registers for this tx_ring. + */ + tx_ring->cnsmr_idx_sh_reg = shadow_reg; + tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma; + + wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT); + wqicb->flags = cpu_to_le16(Q_FLAGS_LC | + Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO); + wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id); + wqicb->rid = 0; + wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma); + + wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma); + + ql_init_tx_ring(qdev, tx_ring); + + err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ, + (u16) tx_ring->wq_id); + if (err) { + netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n"); + return err; + } + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "Successfully loaded WQICB.\n"); + return err; +} + +static void ql_disable_msix(struct ql_adapter *qdev) +{ + if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { + pci_disable_msix(qdev->pdev); + clear_bit(QL_MSIX_ENABLED, &qdev->flags); + kfree(qdev->msi_x_entry); + qdev->msi_x_entry = NULL; + } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) { + pci_disable_msi(qdev->pdev); + clear_bit(QL_MSI_ENABLED, &qdev->flags); + } +} + +/* We start by trying to get the number of vectors + * stored in qdev->intr_count. If we don't get that + * many then we reduce the count and try again. + */ +static void ql_enable_msix(struct ql_adapter *qdev) +{ + int i, err; + + /* Get the MSIX vectors. */ + if (qlge_irq_type == MSIX_IRQ) { + /* Try to alloc space for the msix struct, + * if it fails then go to MSI/legacy. + */ + qdev->msi_x_entry = kcalloc(qdev->intr_count, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!qdev->msi_x_entry) { + qlge_irq_type = MSI_IRQ; + goto msi; + } + + for (i = 0; i < qdev->intr_count; i++) + qdev->msi_x_entry[i].entry = i; + + /* Loop to get our vectors. We start with + * what we want and settle for what we get. + */ + do { + err = pci_enable_msix(qdev->pdev, + qdev->msi_x_entry, qdev->intr_count); + if (err > 0) + qdev->intr_count = err; + } while (err > 0); + + if (err < 0) { + kfree(qdev->msi_x_entry); + qdev->msi_x_entry = NULL; + netif_warn(qdev, ifup, qdev->ndev, + "MSI-X Enable failed, trying MSI.\n"); + qdev->intr_count = 1; + qlge_irq_type = MSI_IRQ; + } else if (err == 0) { + set_bit(QL_MSIX_ENABLED, &qdev->flags); + netif_info(qdev, ifup, qdev->ndev, + "MSI-X Enabled, got %d vectors.\n", + qdev->intr_count); + return; + } + } +msi: + qdev->intr_count = 1; + if (qlge_irq_type == MSI_IRQ) { + if (!pci_enable_msi(qdev->pdev)) { + set_bit(QL_MSI_ENABLED, &qdev->flags); + netif_info(qdev, ifup, qdev->ndev, + "Running with MSI interrupts.\n"); + return; + } + } + qlge_irq_type = LEG_IRQ; + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "Running with legacy interrupts.\n"); +} + +/* Each vector services 1 RSS ring and and 1 or more + * TX completion rings. This function loops through + * the TX completion rings and assigns the vector that + * will service it. An example would be if there are + * 2 vectors (so 2 RSS rings) and 8 TX completion rings. + * This would mean that vector 0 would service RSS ring 0 + * and TX completion rings 0,1,2 and 3. Vector 1 would + * service RSS ring 1 and TX completion rings 4,5,6 and 7. + */ +static void ql_set_tx_vect(struct ql_adapter *qdev) +{ + int i, j, vect; + u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count; + + if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { + /* Assign irq vectors to TX rx_rings.*/ + for (vect = 0, j = 0, i = qdev->rss_ring_count; + i < qdev->rx_ring_count; i++) { + if (j == tx_rings_per_vector) { + vect++; + j = 0; + } + qdev->rx_ring[i].irq = vect; + j++; + } + } else { + /* For single vector all rings have an irq + * of zero. + */ + for (i = 0; i < qdev->rx_ring_count; i++) + qdev->rx_ring[i].irq = 0; + } +} + +/* Set the interrupt mask for this vector. Each vector + * will service 1 RSS ring and 1 or more TX completion + * rings. This function sets up a bit mask per vector + * that indicates which rings it services. + */ +static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx) +{ + int j, vect = ctx->intr; + u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count; + + if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { + /* Add the RSS ring serviced by this vector + * to the mask. + */ + ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id); + /* Add the TX ring(s) serviced by this vector + * to the mask. */ + for (j = 0; j < tx_rings_per_vector; j++) { + ctx->irq_mask |= + (1 << qdev->rx_ring[qdev->rss_ring_count + + (vect * tx_rings_per_vector) + j].cq_id); + } + } else { + /* For single vector we just shift each queue's + * ID into the mask. + */ + for (j = 0; j < qdev->rx_ring_count; j++) + ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id); + } +} + +/* + * Here we build the intr_context structures based on + * our rx_ring count and intr vector count. + * The intr_context structure is used to hook each vector + * to possibly different handlers. + */ +static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev) +{ + int i = 0; + struct intr_context *intr_context = &qdev->intr_context[0]; + + if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { + /* Each rx_ring has it's + * own intr_context since we have separate + * vectors for each queue. + */ + for (i = 0; i < qdev->intr_count; i++, intr_context++) { + qdev->rx_ring[i].irq = i; + intr_context->intr = i; + intr_context->qdev = qdev; + /* Set up this vector's bit-mask that indicates + * which queues it services. + */ + ql_set_irq_mask(qdev, intr_context); + /* + * We set up each vectors enable/disable/read bits so + * there's no bit/mask calculations in the critical path. + */ + intr_context->intr_en_mask = + INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | + INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD + | i; + intr_context->intr_dis_mask = + INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | + INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK | + INTR_EN_IHD | i; + intr_context->intr_read_mask = + INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | + INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD | + i; + if (i == 0) { + /* The first vector/queue handles + * broadcast/multicast, fatal errors, + * and firmware events. This in addition + * to normal inbound NAPI processing. + */ + intr_context->handler = qlge_isr; + sprintf(intr_context->name, "%s-rx-%d", + qdev->ndev->name, i); + } else { + /* + * Inbound queues handle unicast frames only. + */ + intr_context->handler = qlge_msix_rx_isr; + sprintf(intr_context->name, "%s-rx-%d", + qdev->ndev->name, i); + } + } + } else { + /* + * All rx_rings use the same intr_context since + * there is only one vector. + */ + intr_context->intr = 0; + intr_context->qdev = qdev; + /* + * We set up each vectors enable/disable/read bits so + * there's no bit/mask calculations in the critical path. + */ + intr_context->intr_en_mask = + INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE; + intr_context->intr_dis_mask = + INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | + INTR_EN_TYPE_DISABLE; + intr_context->intr_read_mask = + INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ; + /* + * Single interrupt means one handler for all rings. + */ + intr_context->handler = qlge_isr; + sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name); + /* Set up this vector's bit-mask that indicates + * which queues it services. In this case there is + * a single vector so it will service all RSS and + * TX completion rings. + */ + ql_set_irq_mask(qdev, intr_context); + } + /* Tell the TX completion rings which MSIx vector + * they will be using. + */ + ql_set_tx_vect(qdev); +} + +static void ql_free_irq(struct ql_adapter *qdev) +{ + int i; + struct intr_context *intr_context = &qdev->intr_context[0]; + + for (i = 0; i < qdev->intr_count; i++, intr_context++) { + if (intr_context->hooked) { + if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { + free_irq(qdev->msi_x_entry[i].vector, + &qdev->rx_ring[i]); + netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev, + "freeing msix interrupt %d.\n", i); + } else { + free_irq(qdev->pdev->irq, &qdev->rx_ring[0]); + netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev, + "freeing msi interrupt %d.\n", i); + } + } + } + ql_disable_msix(qdev); +} + +static int ql_request_irq(struct ql_adapter *qdev) +{ + int i; + int status = 0; + struct pci_dev *pdev = qdev->pdev; + struct intr_context *intr_context = &qdev->intr_context[0]; + + ql_resolve_queues_to_irqs(qdev); + + for (i = 0; i < qdev->intr_count; i++, intr_context++) { + atomic_set(&intr_context->irq_cnt, 0); + if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { + status = request_irq(qdev->msi_x_entry[i].vector, + intr_context->handler, + 0, + intr_context->name, + &qdev->rx_ring[i]); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed request for MSIX interrupt %d.\n", + i); + goto err_irq; + } else { + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "Hooked intr %d, queue type %s, with name %s.\n", + i, + qdev->rx_ring[i].type == DEFAULT_Q ? + "DEFAULT_Q" : + qdev->rx_ring[i].type == TX_Q ? + "TX_Q" : + qdev->rx_ring[i].type == RX_Q ? + "RX_Q" : "", + intr_context->name); + } + } else { + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "trying msi or legacy interrupts.\n"); + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "%s: irq = %d.\n", __func__, pdev->irq); + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "%s: context->name = %s.\n", __func__, + intr_context->name); + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "%s: dev_id = 0x%p.\n", __func__, + &qdev->rx_ring[0]); + status = + request_irq(pdev->irq, qlge_isr, + test_bit(QL_MSI_ENABLED, + &qdev-> + flags) ? 0 : IRQF_SHARED, + intr_context->name, &qdev->rx_ring[0]); + if (status) + goto err_irq; + + netif_err(qdev, ifup, qdev->ndev, + "Hooked intr %d, queue type %s, with name %s.\n", + i, + qdev->rx_ring[0].type == DEFAULT_Q ? + "DEFAULT_Q" : + qdev->rx_ring[0].type == TX_Q ? "TX_Q" : + qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "", + intr_context->name); + } + intr_context->hooked = 1; + } + return status; +err_irq: + netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n"); + ql_free_irq(qdev); + return status; +} + +static int ql_start_rss(struct ql_adapter *qdev) +{ + static const u8 init_hash_seed[] = { + 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, + 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, + 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, + 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, + 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa + }; + struct ricb *ricb = &qdev->ricb; + int status = 0; + int i; + u8 *hash_id = (u8 *) ricb->hash_cq_id; + + memset((void *)ricb, 0, sizeof(*ricb)); + + ricb->base_cq = RSS_L4K; + ricb->flags = + (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6); + ricb->mask = cpu_to_le16((u16)(0x3ff)); + + /* + * Fill out the Indirection Table. + */ + for (i = 0; i < 1024; i++) + hash_id[i] = (i & (qdev->rss_ring_count - 1)); + + memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40); + memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16); + + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n"); + + status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0); + if (status) { + netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n"); + return status; + } + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "Successfully loaded RICB.\n"); + return status; +} + +static int ql_clear_routing_entries(struct ql_adapter *qdev) +{ + int i, status = 0; + + status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); + if (status) + return status; + /* Clear all the entries in the routing table. */ + for (i = 0; i < 16; i++) { + status = ql_set_routing_reg(qdev, i, 0, 0); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to init routing register for CAM packets.\n"); + break; + } + } + ql_sem_unlock(qdev, SEM_RT_IDX_MASK); + return status; +} + +/* Initialize the frame-to-queue routing. */ +static int ql_route_initialize(struct ql_adapter *qdev) +{ + int status = 0; + + /* Clear all the entries in the routing table. */ + status = ql_clear_routing_entries(qdev); + if (status) + return status; + + status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); + if (status) + return status; + + status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT, + RT_IDX_IP_CSUM_ERR, 1); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to init routing register " + "for IP CSUM error packets.\n"); + goto exit; + } + status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT, + RT_IDX_TU_CSUM_ERR, 1); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to init routing register " + "for TCP/UDP CSUM error packets.\n"); + goto exit; + } + status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to init routing register for broadcast packets.\n"); + goto exit; + } + /* If we have more than one inbound queue, then turn on RSS in the + * routing block. + */ + if (qdev->rss_ring_count > 1) { + status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT, + RT_IDX_RSS_MATCH, 1); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to init routing register for MATCH RSS packets.\n"); + goto exit; + } + } + + status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT, + RT_IDX_CAM_HIT, 1); + if (status) + netif_err(qdev, ifup, qdev->ndev, + "Failed to init routing register for CAM packets.\n"); +exit: + ql_sem_unlock(qdev, SEM_RT_IDX_MASK); + return status; +} + +int ql_cam_route_initialize(struct ql_adapter *qdev) +{ + int status, set; + + /* If check if the link is up and use to + * determine if we are setting or clearing + * the MAC address in the CAM. + */ + set = ql_read32(qdev, STS); + set &= qdev->port_link_up; + status = ql_set_mac_addr(qdev, set); + if (status) { + netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n"); + return status; + } + + status = ql_route_initialize(qdev); + if (status) + netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n"); + + return status; +} + +static int ql_adapter_initialize(struct ql_adapter *qdev) +{ + u32 value, mask; + int i; + int status = 0; + + /* + * Set up the System register to halt on errors. + */ + value = SYS_EFE | SYS_FAE; + mask = value << 16; + ql_write32(qdev, SYS, mask | value); + + /* Set the default queue, and VLAN behavior. */ + value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV; + mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16); + ql_write32(qdev, NIC_RCV_CFG, (mask | value)); + + /* Set the MPI interrupt to enabled. */ + ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); + + /* Enable the function, set pagesize, enable error checking. */ + value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND | + FSC_EC | FSC_VM_PAGE_4K; + value |= SPLT_SETTING; + + /* Set/clear header splitting. */ + mask = FSC_VM_PAGESIZE_MASK | + FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16); + ql_write32(qdev, FSC, mask | value); + + ql_write32(qdev, SPLT_HDR, SPLT_LEN); + + /* Set RX packet routing to use port/pci function on which the + * packet arrived on in addition to usual frame routing. + * This is helpful on bonding where both interfaces can have + * the same MAC address. + */ + ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ); + /* Reroute all packets to our Interface. + * They may have been routed to MPI firmware + * due to WOL. + */ + value = ql_read32(qdev, MGMT_RCV_CFG); + value &= ~MGMT_RCV_CFG_RM; + mask = 0xffff0000; + + /* Sticky reg needs clearing due to WOL. */ + ql_write32(qdev, MGMT_RCV_CFG, mask); + ql_write32(qdev, MGMT_RCV_CFG, mask | value); + + /* Default WOL is enable on Mezz cards */ + if (qdev->pdev->subsystem_device == 0x0068 || + qdev->pdev->subsystem_device == 0x0180) + qdev->wol = WAKE_MAGIC; + + /* Start up the rx queues. */ + for (i = 0; i < qdev->rx_ring_count; i++) { + status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to start rx ring[%d].\n", i); + return status; + } + } + + /* If there is more than one inbound completion queue + * then download a RICB to configure RSS. + */ + if (qdev->rss_ring_count > 1) { + status = ql_start_rss(qdev); + if (status) { + netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n"); + return status; + } + } + + /* Start up the tx queues. */ + for (i = 0; i < qdev->tx_ring_count; i++) { + status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to start tx ring[%d].\n", i); + return status; + } + } + + /* Initialize the port and set the max framesize. */ + status = qdev->nic_ops->port_initialize(qdev); + if (status) + netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n"); + + /* Set up the MAC address and frame routing filter. */ + status = ql_cam_route_initialize(qdev); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to init CAM/Routing tables.\n"); + return status; + } + + /* Start NAPI for the RSS queues. */ + for (i = 0; i < qdev->rss_ring_count; i++) { + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "Enabling NAPI for rx_ring[%d].\n", i); + napi_enable(&qdev->rx_ring[i].napi); + } + + return status; +} + +/* Issue soft reset to chip. */ +static int ql_adapter_reset(struct ql_adapter *qdev) +{ + u32 value; + int status = 0; + unsigned long end_jiffies; + + /* Clear all the entries in the routing table. */ + status = ql_clear_routing_entries(qdev); + if (status) { + netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n"); + return status; + } + + end_jiffies = jiffies + + max((unsigned long)1, usecs_to_jiffies(30)); + + /* Check if bit is set then skip the mailbox command and + * clear the bit, else we are in normal reset process. + */ + if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) { + /* Stop management traffic. */ + ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP); + + /* Wait for the NIC and MGMNT FIFOs to empty. */ + ql_wait_fifo_empty(qdev); + } else + clear_bit(QL_ASIC_RECOVERY, &qdev->flags); + + ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR); + + do { + value = ql_read32(qdev, RST_FO); + if ((value & RST_FO_FR) == 0) + break; + cpu_relax(); + } while (time_before(jiffies, end_jiffies)); + + if (value & RST_FO_FR) { + netif_err(qdev, ifdown, qdev->ndev, + "ETIMEDOUT!!! errored out of resetting the chip!\n"); + status = -ETIMEDOUT; + } + + /* Resume management traffic. */ + ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME); + return status; +} + +static void ql_display_dev_info(struct net_device *ndev) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + + netif_info(qdev, probe, qdev->ndev, + "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, " + "XG Roll = %d, XG Rev = %d.\n", + qdev->func, + qdev->port, + qdev->chip_rev_id & 0x0000000f, + qdev->chip_rev_id >> 4 & 0x0000000f, + qdev->chip_rev_id >> 8 & 0x0000000f, + qdev->chip_rev_id >> 12 & 0x0000000f); + netif_info(qdev, probe, qdev->ndev, + "MAC address %pM\n", ndev->dev_addr); +} + +static int ql_wol(struct ql_adapter *qdev) +{ + int status = 0; + u32 wol = MB_WOL_DISABLE; + + /* The CAM is still intact after a reset, but if we + * are doing WOL, then we may need to program the + * routing regs. We would also need to issue the mailbox + * commands to instruct the MPI what to do per the ethtool + * settings. + */ + + if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST | + WAKE_MCAST | WAKE_BCAST)) { + netif_err(qdev, ifdown, qdev->ndev, + "Unsupported WOL paramter. qdev->wol = 0x%x.\n", + qdev->wol); + return -EINVAL; + } + + if (qdev->wol & WAKE_MAGIC) { + status = ql_mb_wol_set_magic(qdev, 1); + if (status) { + netif_err(qdev, ifdown, qdev->ndev, + "Failed to set magic packet on %s.\n", + qdev->ndev->name); + return status; + } else + netif_info(qdev, drv, qdev->ndev, + "Enabled magic packet successfully on %s.\n", + qdev->ndev->name); + + wol |= MB_WOL_MAGIC_PKT; + } + + if (qdev->wol) { + wol |= MB_WOL_MODE_ON; + status = ql_mb_wol_mode(qdev, wol); + netif_err(qdev, drv, qdev->ndev, + "WOL %s (wol code 0x%x) on %s\n", + (status == 0) ? "Successfully set" : "Failed", + wol, qdev->ndev->name); + } + + return status; +} + +static void ql_cancel_all_work_sync(struct ql_adapter *qdev) +{ + + /* Don't kill the reset worker thread if we + * are in the process of recovery. + */ + if (test_bit(QL_ADAPTER_UP, &qdev->flags)) + cancel_delayed_work_sync(&qdev->asic_reset_work); + cancel_delayed_work_sync(&qdev->mpi_reset_work); + cancel_delayed_work_sync(&qdev->mpi_work); + cancel_delayed_work_sync(&qdev->mpi_idc_work); + cancel_delayed_work_sync(&qdev->mpi_core_to_log); + cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); +} + +static int ql_adapter_down(struct ql_adapter *qdev) +{ + int i, status = 0; + + ql_link_off(qdev); + + ql_cancel_all_work_sync(qdev); + + for (i = 0; i < qdev->rss_ring_count; i++) + napi_disable(&qdev->rx_ring[i].napi); + + clear_bit(QL_ADAPTER_UP, &qdev->flags); + + ql_disable_interrupts(qdev); + + ql_tx_ring_clean(qdev); + + /* Call netif_napi_del() from common point. + */ + for (i = 0; i < qdev->rss_ring_count; i++) + netif_napi_del(&qdev->rx_ring[i].napi); + + status = ql_adapter_reset(qdev); + if (status) + netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n", + qdev->func); + ql_free_rx_buffers(qdev); + + return status; +} + +static int ql_adapter_up(struct ql_adapter *qdev) +{ + int err = 0; + + err = ql_adapter_initialize(qdev); + if (err) { + netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n"); + goto err_init; + } + set_bit(QL_ADAPTER_UP, &qdev->flags); + ql_alloc_rx_buffers(qdev); + /* If the port is initialized and the + * link is up the turn on the carrier. + */ + if ((ql_read32(qdev, STS) & qdev->port_init) && + (ql_read32(qdev, STS) & qdev->port_link_up)) + ql_link_on(qdev); + /* Restore rx mode. */ + clear_bit(QL_ALLMULTI, &qdev->flags); + clear_bit(QL_PROMISCUOUS, &qdev->flags); + qlge_set_multicast_list(qdev->ndev); + + /* Restore vlan setting. */ + qlge_restore_vlan(qdev); + + ql_enable_interrupts(qdev); + ql_enable_all_completion_interrupts(qdev); + netif_tx_start_all_queues(qdev->ndev); + + return 0; +err_init: + ql_adapter_reset(qdev); + return err; +} + +static void ql_release_adapter_resources(struct ql_adapter *qdev) +{ + ql_free_mem_resources(qdev); + ql_free_irq(qdev); +} + +static int ql_get_adapter_resources(struct ql_adapter *qdev) +{ + int status = 0; + + if (ql_alloc_mem_resources(qdev)) { + netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n"); + return -ENOMEM; + } + status = ql_request_irq(qdev); + return status; +} + +static int qlge_close(struct net_device *ndev) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + + /* If we hit pci_channel_io_perm_failure + * failure condition, then we already + * brought the adapter down. + */ + if (test_bit(QL_EEH_FATAL, &qdev->flags)) { + netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n"); + clear_bit(QL_EEH_FATAL, &qdev->flags); + return 0; + } + + /* + * Wait for device to recover from a reset. + * (Rarely happens, but possible.) + */ + while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) + msleep(1); + ql_adapter_down(qdev); + ql_release_adapter_resources(qdev); + return 0; +} + +static int ql_configure_rings(struct ql_adapter *qdev) +{ + int i; + struct rx_ring *rx_ring; + struct tx_ring *tx_ring; + int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus()); + unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ? + LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE; + + qdev->lbq_buf_order = get_order(lbq_buf_len); + + /* In a perfect world we have one RSS ring for each CPU + * and each has it's own vector. To do that we ask for + * cpu_cnt vectors. ql_enable_msix() will adjust the + * vector count to what we actually get. We then + * allocate an RSS ring for each. + * Essentially, we are doing min(cpu_count, msix_vector_count). + */ + qdev->intr_count = cpu_cnt; + ql_enable_msix(qdev); + /* Adjust the RSS ring count to the actual vector count. */ + qdev->rss_ring_count = qdev->intr_count; + qdev->tx_ring_count = cpu_cnt; + qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count; + + for (i = 0; i < qdev->tx_ring_count; i++) { + tx_ring = &qdev->tx_ring[i]; + memset((void *)tx_ring, 0, sizeof(*tx_ring)); + tx_ring->qdev = qdev; + tx_ring->wq_id = i; + tx_ring->wq_len = qdev->tx_ring_size; + tx_ring->wq_size = + tx_ring->wq_len * sizeof(struct ob_mac_iocb_req); + + /* + * The completion queue ID for the tx rings start + * immediately after the rss rings. + */ + tx_ring->cq_id = qdev->rss_ring_count + i; + } + + for (i = 0; i < qdev->rx_ring_count; i++) { + rx_ring = &qdev->rx_ring[i]; + memset((void *)rx_ring, 0, sizeof(*rx_ring)); + rx_ring->qdev = qdev; + rx_ring->cq_id = i; + rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */ + if (i < qdev->rss_ring_count) { + /* + * Inbound (RSS) queues. + */ + rx_ring->cq_len = qdev->rx_ring_size; + rx_ring->cq_size = + rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); + rx_ring->lbq_len = NUM_LARGE_BUFFERS; + rx_ring->lbq_size = + rx_ring->lbq_len * sizeof(__le64); + rx_ring->lbq_buf_size = (u16)lbq_buf_len; + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "lbq_buf_size %d, order = %d\n", + rx_ring->lbq_buf_size, + qdev->lbq_buf_order); + rx_ring->sbq_len = NUM_SMALL_BUFFERS; + rx_ring->sbq_size = + rx_ring->sbq_len * sizeof(__le64); + rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE; + rx_ring->type = RX_Q; + } else { + /* + * Outbound queue handles outbound completions only. + */ + /* outbound cq is same size as tx_ring it services. */ + rx_ring->cq_len = qdev->tx_ring_size; + rx_ring->cq_size = + rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); + rx_ring->lbq_len = 0; + rx_ring->lbq_size = 0; + rx_ring->lbq_buf_size = 0; + rx_ring->sbq_len = 0; + rx_ring->sbq_size = 0; + rx_ring->sbq_buf_size = 0; + rx_ring->type = TX_Q; + } + } + return 0; +} + +static int qlge_open(struct net_device *ndev) +{ + int err = 0; + struct ql_adapter *qdev = netdev_priv(ndev); + + err = ql_adapter_reset(qdev); + if (err) + return err; + + err = ql_configure_rings(qdev); + if (err) + return err; + + err = ql_get_adapter_resources(qdev); + if (err) + goto error_up; + + err = ql_adapter_up(qdev); + if (err) + goto error_up; + + return err; + +error_up: + ql_release_adapter_resources(qdev); + return err; +} + +static int ql_change_rx_buffers(struct ql_adapter *qdev) +{ + struct rx_ring *rx_ring; + int i, status; + u32 lbq_buf_len; + + /* Wait for an outstanding reset to complete. */ + if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) { + int i = 3; + while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) { + netif_err(qdev, ifup, qdev->ndev, + "Waiting for adapter UP...\n"); + ssleep(1); + } + + if (!i) { + netif_err(qdev, ifup, qdev->ndev, + "Timed out waiting for adapter UP\n"); + return -ETIMEDOUT; + } + } + + status = ql_adapter_down(qdev); + if (status) + goto error; + + /* Get the new rx buffer size. */ + lbq_buf_len = (qdev->ndev->mtu > 1500) ? + LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE; + qdev->lbq_buf_order = get_order(lbq_buf_len); + + for (i = 0; i < qdev->rss_ring_count; i++) { + rx_ring = &qdev->rx_ring[i]; + /* Set the new size. */ + rx_ring->lbq_buf_size = lbq_buf_len; + } + + status = ql_adapter_up(qdev); + if (status) + goto error; + + return status; +error: + netif_alert(qdev, ifup, qdev->ndev, + "Driver up/down cycle failed, closing device.\n"); + set_bit(QL_ADAPTER_UP, &qdev->flags); + dev_close(qdev->ndev); + return status; +} + +static int qlge_change_mtu(struct net_device *ndev, int new_mtu) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + int status; + + if (ndev->mtu == 1500 && new_mtu == 9000) { + netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n"); + } else if (ndev->mtu == 9000 && new_mtu == 1500) { + netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n"); + } else + return -EINVAL; + + queue_delayed_work(qdev->workqueue, + &qdev->mpi_port_cfg_work, 3*HZ); + + ndev->mtu = new_mtu; + + if (!netif_running(qdev->ndev)) { + return 0; + } + + status = ql_change_rx_buffers(qdev); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Changing MTU failed.\n"); + } + + return status; +} + +static struct net_device_stats *qlge_get_stats(struct net_device + *ndev) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + struct rx_ring *rx_ring = &qdev->rx_ring[0]; + struct tx_ring *tx_ring = &qdev->tx_ring[0]; + unsigned long pkts, mcast, dropped, errors, bytes; + int i; + + /* Get RX stats. */ + pkts = mcast = dropped = errors = bytes = 0; + for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) { + pkts += rx_ring->rx_packets; + bytes += rx_ring->rx_bytes; + dropped += rx_ring->rx_dropped; + errors += rx_ring->rx_errors; + mcast += rx_ring->rx_multicast; + } + ndev->stats.rx_packets = pkts; + ndev->stats.rx_bytes = bytes; + ndev->stats.rx_dropped = dropped; + ndev->stats.rx_errors = errors; + ndev->stats.multicast = mcast; + + /* Get TX stats. */ + pkts = errors = bytes = 0; + for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) { + pkts += tx_ring->tx_packets; + bytes += tx_ring->tx_bytes; + errors += tx_ring->tx_errors; + } + ndev->stats.tx_packets = pkts; + ndev->stats.tx_bytes = bytes; + ndev->stats.tx_errors = errors; + return &ndev->stats; +} + +static void qlge_set_multicast_list(struct net_device *ndev) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + struct netdev_hw_addr *ha; + int i, status; + + status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); + if (status) + return; + /* + * Set or clear promiscuous mode if a + * transition is taking place. + */ + if (ndev->flags & IFF_PROMISC) { + if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) { + if (ql_set_routing_reg + (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) { + netif_err(qdev, hw, qdev->ndev, + "Failed to set promiscuous mode.\n"); + } else { + set_bit(QL_PROMISCUOUS, &qdev->flags); + } + } + } else { + if (test_bit(QL_PROMISCUOUS, &qdev->flags)) { + if (ql_set_routing_reg + (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) { + netif_err(qdev, hw, qdev->ndev, + "Failed to clear promiscuous mode.\n"); + } else { + clear_bit(QL_PROMISCUOUS, &qdev->flags); + } + } + } + + /* + * Set or clear all multicast mode if a + * transition is taking place. + */ + if ((ndev->flags & IFF_ALLMULTI) || + (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) { + if (!test_bit(QL_ALLMULTI, &qdev->flags)) { + if (ql_set_routing_reg + (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) { + netif_err(qdev, hw, qdev->ndev, + "Failed to set all-multi mode.\n"); + } else { + set_bit(QL_ALLMULTI, &qdev->flags); + } + } + } else { + if (test_bit(QL_ALLMULTI, &qdev->flags)) { + if (ql_set_routing_reg + (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) { + netif_err(qdev, hw, qdev->ndev, + "Failed to clear all-multi mode.\n"); + } else { + clear_bit(QL_ALLMULTI, &qdev->flags); + } + } + } + + if (!netdev_mc_empty(ndev)) { + status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); + if (status) + goto exit; + i = 0; + netdev_for_each_mc_addr(ha, ndev) { + if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr, + MAC_ADDR_TYPE_MULTI_MAC, i)) { + netif_err(qdev, hw, qdev->ndev, + "Failed to loadmulticast address.\n"); + ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); + goto exit; + } + i++; + } + ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); + if (ql_set_routing_reg + (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) { + netif_err(qdev, hw, qdev->ndev, + "Failed to set multicast match mode.\n"); + } else { + set_bit(QL_ALLMULTI, &qdev->flags); + } + } +exit: + ql_sem_unlock(qdev, SEM_RT_IDX_MASK); +} + +static int qlge_set_mac_address(struct net_device *ndev, void *p) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + struct sockaddr *addr = p; + int status; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + /* Update local copy of current mac address. */ + memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len); + + status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); + if (status) + return status; + status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, + MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); + if (status) + netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n"); + ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); + return status; +} + +static void qlge_tx_timeout(struct net_device *ndev) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + ql_queue_asic_error(qdev); +} + +static void ql_asic_reset_work(struct work_struct *work) +{ + struct ql_adapter *qdev = + container_of(work, struct ql_adapter, asic_reset_work.work); + int status; + rtnl_lock(); + status = ql_adapter_down(qdev); + if (status) + goto error; + + status = ql_adapter_up(qdev); + if (status) + goto error; + + /* Restore rx mode. */ + clear_bit(QL_ALLMULTI, &qdev->flags); + clear_bit(QL_PROMISCUOUS, &qdev->flags); + qlge_set_multicast_list(qdev->ndev); + + rtnl_unlock(); + return; +error: + netif_alert(qdev, ifup, qdev->ndev, + "Driver up/down cycle failed, closing device\n"); + + set_bit(QL_ADAPTER_UP, &qdev->flags); + dev_close(qdev->ndev); + rtnl_unlock(); +} + +static const struct nic_operations qla8012_nic_ops = { + .get_flash = ql_get_8012_flash_params, + .port_initialize = ql_8012_port_initialize, +}; + +static const struct nic_operations qla8000_nic_ops = { + .get_flash = ql_get_8000_flash_params, + .port_initialize = ql_8000_port_initialize, +}; + +/* Find the pcie function number for the other NIC + * on this chip. Since both NIC functions share a + * common firmware we have the lowest enabled function + * do any common work. Examples would be resetting + * after a fatal firmware error, or doing a firmware + * coredump. + */ +static int ql_get_alt_pcie_func(struct ql_adapter *qdev) +{ + int status = 0; + u32 temp; + u32 nic_func1, nic_func2; + + status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG, + &temp); + if (status) + return status; + + nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) & + MPI_TEST_NIC_FUNC_MASK); + nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) & + MPI_TEST_NIC_FUNC_MASK); + + if (qdev->func == nic_func1) + qdev->alt_func = nic_func2; + else if (qdev->func == nic_func2) + qdev->alt_func = nic_func1; + else + status = -EIO; + + return status; +} + +static int ql_get_board_info(struct ql_adapter *qdev) +{ + int status; + qdev->func = + (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT; + if (qdev->func > 3) + return -EIO; + + status = ql_get_alt_pcie_func(qdev); + if (status) + return status; + + qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1; + if (qdev->port) { + qdev->xg_sem_mask = SEM_XGMAC1_MASK; + qdev->port_link_up = STS_PL1; + qdev->port_init = STS_PI1; + qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI; + qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO; + } else { + qdev->xg_sem_mask = SEM_XGMAC0_MASK; + qdev->port_link_up = STS_PL0; + qdev->port_init = STS_PI0; + qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI; + qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO; + } + qdev->chip_rev_id = ql_read32(qdev, REV_ID); + qdev->device_id = qdev->pdev->device; + if (qdev->device_id == QLGE_DEVICE_ID_8012) + qdev->nic_ops = &qla8012_nic_ops; + else if (qdev->device_id == QLGE_DEVICE_ID_8000) + qdev->nic_ops = &qla8000_nic_ops; + return status; +} + +static void ql_release_all(struct pci_dev *pdev) +{ + struct net_device *ndev = pci_get_drvdata(pdev); + struct ql_adapter *qdev = netdev_priv(ndev); + + if (qdev->workqueue) { + destroy_workqueue(qdev->workqueue); + qdev->workqueue = NULL; + } + + if (qdev->reg_base) + iounmap(qdev->reg_base); + if (qdev->doorbell_area) + iounmap(qdev->doorbell_area); + vfree(qdev->mpi_coredump); + pci_release_regions(pdev); + pci_set_drvdata(pdev, NULL); +} + +static int __devinit ql_init_device(struct pci_dev *pdev, + struct net_device *ndev, int cards_found) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + int err = 0; + + memset((void *)qdev, 0, sizeof(*qdev)); + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "PCI device enable failed.\n"); + return err; + } + + qdev->ndev = ndev; + qdev->pdev = pdev; + pci_set_drvdata(pdev, ndev); + + /* Set PCIe read request size */ + err = pcie_set_readrq(pdev, 4096); + if (err) { + dev_err(&pdev->dev, "Set readrq failed.\n"); + goto err_out1; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(&pdev->dev, "PCI region request failed.\n"); + return err; + } + + pci_set_master(pdev); + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + set_bit(QL_DMA64, &qdev->flags); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + } else { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (!err) + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + } + + if (err) { + dev_err(&pdev->dev, "No usable DMA configuration.\n"); + goto err_out2; + } + + /* Set PCIe reset type for EEH to fundamental. */ + pdev->needs_freset = 1; + pci_save_state(pdev); + qdev->reg_base = + ioremap_nocache(pci_resource_start(pdev, 1), + pci_resource_len(pdev, 1)); + if (!qdev->reg_base) { + dev_err(&pdev->dev, "Register mapping failed.\n"); + err = -ENOMEM; + goto err_out2; + } + + qdev->doorbell_area_size = pci_resource_len(pdev, 3); + qdev->doorbell_area = + ioremap_nocache(pci_resource_start(pdev, 3), + pci_resource_len(pdev, 3)); + if (!qdev->doorbell_area) { + dev_err(&pdev->dev, "Doorbell register mapping failed.\n"); + err = -ENOMEM; + goto err_out2; + } + + err = ql_get_board_info(qdev); + if (err) { + dev_err(&pdev->dev, "Register access failed.\n"); + err = -EIO; + goto err_out2; + } + qdev->msg_enable = netif_msg_init(debug, default_msg); + spin_lock_init(&qdev->hw_lock); + spin_lock_init(&qdev->stats_lock); + + if (qlge_mpi_coredump) { + qdev->mpi_coredump = + vmalloc(sizeof(struct ql_mpi_coredump)); + if (qdev->mpi_coredump == NULL) { + dev_err(&pdev->dev, "Coredump alloc failed.\n"); + err = -ENOMEM; + goto err_out2; + } + if (qlge_force_coredump) + set_bit(QL_FRC_COREDUMP, &qdev->flags); + } + /* make sure the EEPROM is good */ + err = qdev->nic_ops->get_flash(qdev); + if (err) { + dev_err(&pdev->dev, "Invalid FLASH.\n"); + goto err_out2; + } + + memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); + /* Keep local copy of current mac address. */ + memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len); + + /* Set up the default ring sizes. */ + qdev->tx_ring_size = NUM_TX_RING_ENTRIES; + qdev->rx_ring_size = NUM_RX_RING_ENTRIES; + + /* Set up the coalescing parameters. */ + qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT; + qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT; + qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT; + qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT; + + /* + * Set up the operating parameters. + */ + qdev->workqueue = create_singlethread_workqueue(ndev->name); + INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work); + INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work); + INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); + INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work); + INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); + INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log); + init_completion(&qdev->ide_completion); + mutex_init(&qdev->mpi_mutex); + + if (!cards_found) { + dev_info(&pdev->dev, "%s\n", DRV_STRING); + dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n", + DRV_NAME, DRV_VERSION); + } + return 0; +err_out2: + ql_release_all(pdev); +err_out1: + pci_disable_device(pdev); + return err; +} + +static const struct net_device_ops qlge_netdev_ops = { + .ndo_open = qlge_open, + .ndo_stop = qlge_close, + .ndo_start_xmit = qlge_send, + .ndo_change_mtu = qlge_change_mtu, + .ndo_get_stats = qlge_get_stats, + .ndo_set_multicast_list = qlge_set_multicast_list, + .ndo_set_mac_address = qlge_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_tx_timeout = qlge_tx_timeout, + .ndo_fix_features = qlge_fix_features, + .ndo_set_features = qlge_set_features, + .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid, +}; + +static void ql_timer(unsigned long data) +{ + struct ql_adapter *qdev = (struct ql_adapter *)data; + u32 var = 0; + + var = ql_read32(qdev, STS); + if (pci_channel_offline(qdev->pdev)) { + netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var); + return; + } + + mod_timer(&qdev->timer, jiffies + (5*HZ)); +} + +static int __devinit qlge_probe(struct pci_dev *pdev, + const struct pci_device_id *pci_entry) +{ + struct net_device *ndev = NULL; + struct ql_adapter *qdev = NULL; + static int cards_found = 0; + int err = 0; + + ndev = alloc_etherdev_mq(sizeof(struct ql_adapter), + min(MAX_CPUS, (int)num_online_cpus())); + if (!ndev) + return -ENOMEM; + + err = ql_init_device(pdev, ndev, cards_found); + if (err < 0) { + free_netdev(ndev); + return err; + } + + qdev = netdev_priv(ndev); + SET_NETDEV_DEV(ndev, &pdev->dev); + ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | + NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM; + ndev->features = ndev->hw_features | + NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; + + if (test_bit(QL_DMA64, &qdev->flags)) + ndev->features |= NETIF_F_HIGHDMA; + + /* + * Set up net_device structure. + */ + ndev->tx_queue_len = qdev->tx_ring_size; + ndev->irq = pdev->irq; + + ndev->netdev_ops = &qlge_netdev_ops; + SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops); + ndev->watchdog_timeo = 10 * HZ; + + err = register_netdev(ndev); + if (err) { + dev_err(&pdev->dev, "net device registration failed.\n"); + ql_release_all(pdev); + pci_disable_device(pdev); + return err; + } + /* Start up the timer to trigger EEH if + * the bus goes dead + */ + init_timer_deferrable(&qdev->timer); + qdev->timer.data = (unsigned long)qdev; + qdev->timer.function = ql_timer; + qdev->timer.expires = jiffies + (5*HZ); + add_timer(&qdev->timer); + ql_link_off(qdev); + ql_display_dev_info(ndev); + atomic_set(&qdev->lb_count, 0); + cards_found++; + return 0; +} + +netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev) +{ + return qlge_send(skb, ndev); +} + +int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget) +{ + return ql_clean_inbound_rx_ring(rx_ring, budget); +} + +static void __devexit qlge_remove(struct pci_dev *pdev) +{ + struct net_device *ndev = pci_get_drvdata(pdev); + struct ql_adapter *qdev = netdev_priv(ndev); + del_timer_sync(&qdev->timer); + ql_cancel_all_work_sync(qdev); + unregister_netdev(ndev); + ql_release_all(pdev); + pci_disable_device(pdev); + free_netdev(ndev); +} + +/* Clean up resources without touching hardware. */ +static void ql_eeh_close(struct net_device *ndev) +{ + int i; + struct ql_adapter *qdev = netdev_priv(ndev); + + if (netif_carrier_ok(ndev)) { + netif_carrier_off(ndev); + netif_stop_queue(ndev); + } + + /* Disabling the timer */ + del_timer_sync(&qdev->timer); + ql_cancel_all_work_sync(qdev); + + for (i = 0; i < qdev->rss_ring_count; i++) + netif_napi_del(&qdev->rx_ring[i].napi); + + clear_bit(QL_ADAPTER_UP, &qdev->flags); + ql_tx_ring_clean(qdev); + ql_free_rx_buffers(qdev); + ql_release_adapter_resources(qdev); +} + +/* + * This callback is called by the PCI subsystem whenever + * a PCI bus error is detected. + */ +static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev, + enum pci_channel_state state) +{ + struct net_device *ndev = pci_get_drvdata(pdev); + struct ql_adapter *qdev = netdev_priv(ndev); + + switch (state) { + case pci_channel_io_normal: + return PCI_ERS_RESULT_CAN_RECOVER; + case pci_channel_io_frozen: + netif_device_detach(ndev); + if (netif_running(ndev)) + ql_eeh_close(ndev); + pci_disable_device(pdev); + return PCI_ERS_RESULT_NEED_RESET; + case pci_channel_io_perm_failure: + dev_err(&pdev->dev, + "%s: pci_channel_io_perm_failure.\n", __func__); + ql_eeh_close(ndev); + set_bit(QL_EEH_FATAL, &qdev->flags); + return PCI_ERS_RESULT_DISCONNECT; + } + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/* + * This callback is called after the PCI buss has been reset. + * Basically, this tries to restart the card from scratch. + * This is a shortened version of the device probe/discovery code, + * it resembles the first-half of the () routine. + */ +static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *ndev = pci_get_drvdata(pdev); + struct ql_adapter *qdev = netdev_priv(ndev); + + pdev->error_state = pci_channel_io_normal; + + pci_restore_state(pdev); + if (pci_enable_device(pdev)) { + netif_err(qdev, ifup, qdev->ndev, + "Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + if (ql_adapter_reset(qdev)) { + netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n"); + set_bit(QL_EEH_FATAL, &qdev->flags); + return PCI_ERS_RESULT_DISCONNECT; + } + + return PCI_ERS_RESULT_RECOVERED; +} + +static void qlge_io_resume(struct pci_dev *pdev) +{ + struct net_device *ndev = pci_get_drvdata(pdev); + struct ql_adapter *qdev = netdev_priv(ndev); + int err = 0; + + if (netif_running(ndev)) { + err = qlge_open(ndev); + if (err) { + netif_err(qdev, ifup, qdev->ndev, + "Device initialization failed after reset.\n"); + return; + } + } else { + netif_err(qdev, ifup, qdev->ndev, + "Device was not running prior to EEH.\n"); + } + mod_timer(&qdev->timer, jiffies + (5*HZ)); + netif_device_attach(ndev); +} + +static struct pci_error_handlers qlge_err_handler = { + .error_detected = qlge_io_error_detected, + .slot_reset = qlge_io_slot_reset, + .resume = qlge_io_resume, +}; + +static int qlge_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *ndev = pci_get_drvdata(pdev); + struct ql_adapter *qdev = netdev_priv(ndev); + int err; + + netif_device_detach(ndev); + del_timer_sync(&qdev->timer); + + if (netif_running(ndev)) { + err = ql_adapter_down(qdev); + if (!err) + return err; + } + + ql_wol(qdev); + err = pci_save_state(pdev); + if (err) + return err; + + pci_disable_device(pdev); + + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +#ifdef CONFIG_PM +static int qlge_resume(struct pci_dev *pdev) +{ + struct net_device *ndev = pci_get_drvdata(pdev); + struct ql_adapter *qdev = netdev_priv(ndev); + int err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + err = pci_enable_device(pdev); + if (err) { + netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (netif_running(ndev)) { + err = ql_adapter_up(qdev); + if (err) + return err; + } + + mod_timer(&qdev->timer, jiffies + (5*HZ)); + netif_device_attach(ndev); + + return 0; +} +#endif /* CONFIG_PM */ + +static void qlge_shutdown(struct pci_dev *pdev) +{ + qlge_suspend(pdev, PMSG_SUSPEND); +} + +static struct pci_driver qlge_driver = { + .name = DRV_NAME, + .id_table = qlge_pci_tbl, + .probe = qlge_probe, + .remove = __devexit_p(qlge_remove), +#ifdef CONFIG_PM + .suspend = qlge_suspend, + .resume = qlge_resume, +#endif + .shutdown = qlge_shutdown, + .err_handler = &qlge_err_handler +}; + +static int __init qlge_init_module(void) +{ + return pci_register_driver(&qlge_driver); +} + +static void __exit qlge_exit(void) +{ + pci_unregister_driver(&qlge_driver); +} + +module_init(qlge_init_module); +module_exit(qlge_exit); diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c new file mode 100644 index 0000000..ff2bf8a --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c @@ -0,0 +1,1284 @@ +#include "qlge.h" + +int ql_unpause_mpi_risc(struct ql_adapter *qdev) +{ + u32 tmp; + + /* Un-pause the RISC */ + tmp = ql_read32(qdev, CSR); + if (!(tmp & CSR_RP)) + return -EIO; + + ql_write32(qdev, CSR, CSR_CMD_CLR_PAUSE); + return 0; +} + +int ql_pause_mpi_risc(struct ql_adapter *qdev) +{ + u32 tmp; + int count = UDELAY_COUNT; + + /* Pause the RISC */ + ql_write32(qdev, CSR, CSR_CMD_SET_PAUSE); + do { + tmp = ql_read32(qdev, CSR); + if (tmp & CSR_RP) + break; + mdelay(UDELAY_DELAY); + count--; + } while (count); + return (count == 0) ? -ETIMEDOUT : 0; +} + +int ql_hard_reset_mpi_risc(struct ql_adapter *qdev) +{ + u32 tmp; + int count = UDELAY_COUNT; + + /* Reset the RISC */ + ql_write32(qdev, CSR, CSR_CMD_SET_RST); + do { + tmp = ql_read32(qdev, CSR); + if (tmp & CSR_RR) { + ql_write32(qdev, CSR, CSR_CMD_CLR_RST); + break; + } + mdelay(UDELAY_DELAY); + count--; + } while (count); + return (count == 0) ? -ETIMEDOUT : 0; +} + +int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data) +{ + int status; + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); + if (status) + goto exit; + /* set up for reg read */ + ql_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R); + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); + if (status) + goto exit; + /* get the data */ + *data = ql_read32(qdev, PROC_DATA); +exit: + return status; +} + +int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data) +{ + int status = 0; + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); + if (status) + goto exit; + /* write the data to the data reg */ + ql_write32(qdev, PROC_DATA, data); + /* trigger the write */ + ql_write32(qdev, PROC_ADDR, reg); + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); + if (status) + goto exit; +exit: + return status; +} + +int ql_soft_reset_mpi_risc(struct ql_adapter *qdev) +{ + int status; + status = ql_write_mpi_reg(qdev, 0x00001010, 1); + return status; +} + +/* Determine if we are in charge of the firwmare. If + * we are the lower of the 2 NIC pcie functions, or if + * we are the higher function and the lower function + * is not enabled. + */ +int ql_own_firmware(struct ql_adapter *qdev) +{ + u32 temp; + + /* If we are the lower of the 2 NIC functions + * on the chip the we are responsible for + * core dump and firmware reset after an error. + */ + if (qdev->func < qdev->alt_func) + return 1; + + /* If we are the higher of the 2 NIC functions + * on the chip and the lower function is not + * enabled, then we are responsible for + * core dump and firmware reset after an error. + */ + temp = ql_read32(qdev, STS); + if (!(temp & (1 << (8 + qdev->alt_func)))) + return 1; + + return 0; + +} + +static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp) +{ + int i, status; + + status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK); + if (status) + return -EBUSY; + for (i = 0; i < mbcp->out_count; i++) { + status = + ql_read_mpi_reg(qdev, qdev->mailbox_out + i, + &mbcp->mbox_out[i]); + if (status) { + netif_err(qdev, drv, qdev->ndev, "Failed mailbox read.\n"); + break; + } + } + ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */ + return status; +} + +/* Wait for a single mailbox command to complete. + * Returns zero on success. + */ +static int ql_wait_mbx_cmd_cmplt(struct ql_adapter *qdev) +{ + int count = 100; + u32 value; + + do { + value = ql_read32(qdev, STS); + if (value & STS_PI) + return 0; + mdelay(UDELAY_DELAY); /* 100ms */ + } while (--count); + return -ETIMEDOUT; +} + +/* Execute a single mailbox command. + * Caller must hold PROC_ADDR semaphore. + */ +static int ql_exec_mb_cmd(struct ql_adapter *qdev, struct mbox_params *mbcp) +{ + int i, status; + + /* + * Make sure there's nothing pending. + * This shouldn't happen. + */ + if (ql_read32(qdev, CSR) & CSR_HRI) + return -EIO; + + status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK); + if (status) + return status; + + /* + * Fill the outbound mailboxes. + */ + for (i = 0; i < mbcp->in_count; i++) { + status = ql_write_mpi_reg(qdev, qdev->mailbox_in + i, + mbcp->mbox_in[i]); + if (status) + goto end; + } + /* + * Wake up the MPI firmware. + */ + ql_write32(qdev, CSR, CSR_CMD_SET_H2R_INT); +end: + ql_sem_unlock(qdev, SEM_PROC_REG_MASK); + return status; +} + +/* We are being asked by firmware to accept + * a change to the port. This is only + * a change to max frame sizes (Tx/Rx), pause + * parameters, or loopback mode. We wake up a worker + * to handler processing this since a mailbox command + * will need to be sent to ACK the request. + */ +static int ql_idc_req_aen(struct ql_adapter *qdev) +{ + int status; + struct mbox_params *mbcp = &qdev->idc_mbc; + + netif_err(qdev, drv, qdev->ndev, "Enter!\n"); + /* Get the status data and start up a thread to + * handle the request. + */ + mbcp = &qdev->idc_mbc; + mbcp->out_count = 4; + status = ql_get_mb_sts(qdev, mbcp); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Could not read MPI, resetting ASIC!\n"); + ql_queue_asic_error(qdev); + } else { + /* Begin polled mode early so + * we don't get another interrupt + * when we leave mpi_worker. + */ + ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); + queue_delayed_work(qdev->workqueue, &qdev->mpi_idc_work, 0); + } + return status; +} + +/* Process an inter-device event completion. + * If good, signal the caller's completion. + */ +static int ql_idc_cmplt_aen(struct ql_adapter *qdev) +{ + int status; + struct mbox_params *mbcp = &qdev->idc_mbc; + mbcp->out_count = 4; + status = ql_get_mb_sts(qdev, mbcp); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Could not read MPI, resetting RISC!\n"); + ql_queue_fw_error(qdev); + } else + /* Wake up the sleeping mpi_idc_work thread that is + * waiting for this event. + */ + complete(&qdev->ide_completion); + + return status; +} + +static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp) +{ + int status; + mbcp->out_count = 2; + + status = ql_get_mb_sts(qdev, mbcp); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "%s: Could not get mailbox status.\n", __func__); + return; + } + + qdev->link_status = mbcp->mbox_out[1]; + netif_err(qdev, drv, qdev->ndev, "Link Up.\n"); + + /* If we're coming back from an IDC event + * then set up the CAM and frame routing. + */ + if (test_bit(QL_CAM_RT_SET, &qdev->flags)) { + status = ql_cam_route_initialize(qdev); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to init CAM/Routing tables.\n"); + return; + } else + clear_bit(QL_CAM_RT_SET, &qdev->flags); + } + + /* Queue up a worker to check the frame + * size information, and fix it if it's not + * to our liking. + */ + if (!test_bit(QL_PORT_CFG, &qdev->flags)) { + netif_err(qdev, drv, qdev->ndev, "Queue Port Config Worker!\n"); + set_bit(QL_PORT_CFG, &qdev->flags); + /* Begin polled mode early so + * we don't get another interrupt + * when we leave mpi_worker dpc. + */ + ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); + queue_delayed_work(qdev->workqueue, + &qdev->mpi_port_cfg_work, 0); + } + + ql_link_on(qdev); +} + +static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp) +{ + int status; + + mbcp->out_count = 3; + + status = ql_get_mb_sts(qdev, mbcp); + if (status) + netif_err(qdev, drv, qdev->ndev, "Link down AEN broken!\n"); + + ql_link_off(qdev); +} + +static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp) +{ + int status; + + mbcp->out_count = 5; + + status = ql_get_mb_sts(qdev, mbcp); + if (status) + netif_err(qdev, drv, qdev->ndev, "SFP in AEN broken!\n"); + else + netif_err(qdev, drv, qdev->ndev, "SFP insertion detected.\n"); + + return status; +} + +static int ql_sfp_out(struct ql_adapter *qdev, struct mbox_params *mbcp) +{ + int status; + + mbcp->out_count = 1; + + status = ql_get_mb_sts(qdev, mbcp); + if (status) + netif_err(qdev, drv, qdev->ndev, "SFP out AEN broken!\n"); + else + netif_err(qdev, drv, qdev->ndev, "SFP removal detected.\n"); + + return status; +} + +static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp) +{ + int status; + + mbcp->out_count = 6; + + status = ql_get_mb_sts(qdev, mbcp); + if (status) + netif_err(qdev, drv, qdev->ndev, "Lost AEN broken!\n"); + else { + int i; + netif_err(qdev, drv, qdev->ndev, "Lost AEN detected.\n"); + for (i = 0; i < mbcp->out_count; i++) + netif_err(qdev, drv, qdev->ndev, "mbox_out[%d] = 0x%.08x.\n", + i, mbcp->mbox_out[i]); + + } + + return status; +} + +static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp) +{ + int status; + + mbcp->out_count = 2; + + status = ql_get_mb_sts(qdev, mbcp); + if (status) { + netif_err(qdev, drv, qdev->ndev, "Firmware did not initialize!\n"); + } else { + netif_err(qdev, drv, qdev->ndev, "Firmware Revision = 0x%.08x.\n", + mbcp->mbox_out[1]); + qdev->fw_rev_id = mbcp->mbox_out[1]; + status = ql_cam_route_initialize(qdev); + if (status) + netif_err(qdev, ifup, qdev->ndev, + "Failed to init CAM/Routing tables.\n"); + } +} + +/* Process an async event and clear it unless it's an + * error condition. + * This can get called iteratively from the mpi_work thread + * when events arrive via an interrupt. + * It also gets called when a mailbox command is polling for + * it's completion. */ +static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp) +{ + int status; + int orig_count = mbcp->out_count; + + /* Just get mailbox zero for now. */ + mbcp->out_count = 1; + status = ql_get_mb_sts(qdev, mbcp); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Could not read MPI, resetting ASIC!\n"); + ql_queue_asic_error(qdev); + goto end; + } + + switch (mbcp->mbox_out[0]) { + + /* This case is only active when we arrive here + * as a result of issuing a mailbox command to + * the firmware. + */ + case MB_CMD_STS_INTRMDT: + case MB_CMD_STS_GOOD: + case MB_CMD_STS_INVLD_CMD: + case MB_CMD_STS_XFC_ERR: + case MB_CMD_STS_CSUM_ERR: + case MB_CMD_STS_ERR: + case MB_CMD_STS_PARAM_ERR: + /* We can only get mailbox status if we're polling from an + * unfinished command. Get the rest of the status data and + * return back to the caller. + * We only end up here when we're polling for a mailbox + * command completion. + */ + mbcp->out_count = orig_count; + status = ql_get_mb_sts(qdev, mbcp); + return status; + + /* We are being asked by firmware to accept + * a change to the port. This is only + * a change to max frame sizes (Tx/Rx), pause + * parameters, or loopback mode. + */ + case AEN_IDC_REQ: + status = ql_idc_req_aen(qdev); + break; + + /* Process and inbound IDC event. + * This will happen when we're trying to + * change tx/rx max frame size, change pause + * parameters or loopback mode. + */ + case AEN_IDC_CMPLT: + case AEN_IDC_EXT: + status = ql_idc_cmplt_aen(qdev); + break; + + case AEN_LINK_UP: + ql_link_up(qdev, mbcp); + break; + + case AEN_LINK_DOWN: + ql_link_down(qdev, mbcp); + break; + + case AEN_FW_INIT_DONE: + /* If we're in process on executing the firmware, + * then convert the status to normal mailbox status. + */ + if (mbcp->mbox_in[0] == MB_CMD_EX_FW) { + mbcp->out_count = orig_count; + status = ql_get_mb_sts(qdev, mbcp); + mbcp->mbox_out[0] = MB_CMD_STS_GOOD; + return status; + } + ql_init_fw_done(qdev, mbcp); + break; + + case AEN_AEN_SFP_IN: + ql_sfp_in(qdev, mbcp); + break; + + case AEN_AEN_SFP_OUT: + ql_sfp_out(qdev, mbcp); + break; + + /* This event can arrive at boot time or after an + * MPI reset if the firmware failed to initialize. + */ + case AEN_FW_INIT_FAIL: + /* If we're in process on executing the firmware, + * then convert the status to normal mailbox status. + */ + if (mbcp->mbox_in[0] == MB_CMD_EX_FW) { + mbcp->out_count = orig_count; + status = ql_get_mb_sts(qdev, mbcp); + mbcp->mbox_out[0] = MB_CMD_STS_ERR; + return status; + } + netif_err(qdev, drv, qdev->ndev, + "Firmware initialization failed.\n"); + status = -EIO; + ql_queue_fw_error(qdev); + break; + + case AEN_SYS_ERR: + netif_err(qdev, drv, qdev->ndev, "System Error.\n"); + ql_queue_fw_error(qdev); + status = -EIO; + break; + + case AEN_AEN_LOST: + ql_aen_lost(qdev, mbcp); + break; + + case AEN_DCBX_CHG: + /* Need to support AEN 8110 */ + break; + default: + netif_err(qdev, drv, qdev->ndev, + "Unsupported AE %.08x.\n", mbcp->mbox_out[0]); + /* Clear the MPI firmware status. */ + } +end: + ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT); + /* Restore the original mailbox count to + * what the caller asked for. This can get + * changed when a mailbox command is waiting + * for a response and an AEN arrives and + * is handled. + * */ + mbcp->out_count = orig_count; + return status; +} + +/* Execute a single mailbox command. + * mbcp is a pointer to an array of u32. Each + * element in the array contains the value for it's + * respective mailbox register. + */ +static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp) +{ + int status; + unsigned long count; + + mutex_lock(&qdev->mpi_mutex); + + /* Begin polled mode for MPI */ + ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); + + /* Load the mailbox registers and wake up MPI RISC. */ + status = ql_exec_mb_cmd(qdev, mbcp); + if (status) + goto end; + + + /* If we're generating a system error, then there's nothing + * to wait for. + */ + if (mbcp->mbox_in[0] == MB_CMD_MAKE_SYS_ERR) + goto end; + + /* Wait for the command to complete. We loop + * here because some AEN might arrive while + * we're waiting for the mailbox command to + * complete. If more than 5 seconds expire we can + * assume something is wrong. */ + count = jiffies + HZ * MAILBOX_TIMEOUT; + do { + /* Wait for the interrupt to come in. */ + status = ql_wait_mbx_cmd_cmplt(qdev); + if (status) + continue; + + /* Process the event. If it's an AEN, it + * will be handled in-line or a worker + * will be spawned. If it's our completion + * we will catch it below. + */ + status = ql_mpi_handler(qdev, mbcp); + if (status) + goto end; + + /* It's either the completion for our mailbox + * command complete or an AEN. If it's our + * completion then get out. + */ + if (((mbcp->mbox_out[0] & 0x0000f000) == + MB_CMD_STS_GOOD) || + ((mbcp->mbox_out[0] & 0x0000f000) == + MB_CMD_STS_INTRMDT)) + goto done; + } while (time_before(jiffies, count)); + + netif_err(qdev, drv, qdev->ndev, + "Timed out waiting for mailbox complete.\n"); + status = -ETIMEDOUT; + goto end; + +done: + + /* Now we can clear the interrupt condition + * and look at our status. + */ + ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT); + + if (((mbcp->mbox_out[0] & 0x0000f000) != + MB_CMD_STS_GOOD) && + ((mbcp->mbox_out[0] & 0x0000f000) != + MB_CMD_STS_INTRMDT)) { + status = -EIO; + } +end: + /* End polled mode for MPI */ + ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); + mutex_unlock(&qdev->mpi_mutex); + return status; +} + +/* Get MPI firmware version. This will be used for + * driver banner and for ethtool info. + * Returns zero on success. + */ +int ql_mb_about_fw(struct ql_adapter *qdev) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status = 0; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 1; + mbcp->out_count = 3; + + mbcp->mbox_in[0] = MB_CMD_ABOUT_FW; + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + netif_err(qdev, drv, qdev->ndev, + "Failed about firmware command\n"); + status = -EIO; + } + + /* Store the firmware version */ + qdev->fw_rev_id = mbcp->mbox_out[1]; + + return status; +} + +/* Get functional state for MPI firmware. + * Returns zero on success. + */ +int ql_mb_get_fw_state(struct ql_adapter *qdev) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status = 0; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 1; + mbcp->out_count = 2; + + mbcp->mbox_in[0] = MB_CMD_GET_FW_STATE; + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + netif_err(qdev, drv, qdev->ndev, + "Failed Get Firmware State.\n"); + status = -EIO; + } + + /* If bit zero is set in mbx 1 then the firmware is + * running, but not initialized. This should never + * happen. + */ + if (mbcp->mbox_out[1] & 1) { + netif_err(qdev, drv, qdev->ndev, + "Firmware waiting for initialization.\n"); + status = -EIO; + } + + return status; +} + +/* Send and ACK mailbox command to the firmware to + * let it continue with the change. + */ +static int ql_mb_idc_ack(struct ql_adapter *qdev) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status = 0; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 5; + mbcp->out_count = 1; + + mbcp->mbox_in[0] = MB_CMD_IDC_ACK; + mbcp->mbox_in[1] = qdev->idc_mbc.mbox_out[1]; + mbcp->mbox_in[2] = qdev->idc_mbc.mbox_out[2]; + mbcp->mbox_in[3] = qdev->idc_mbc.mbox_out[3]; + mbcp->mbox_in[4] = qdev->idc_mbc.mbox_out[4]; + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + netif_err(qdev, drv, qdev->ndev, "Failed IDC ACK send.\n"); + status = -EIO; + } + return status; +} + +/* Get link settings and maximum frame size settings + * for the current port. + * Most likely will block. + */ +int ql_mb_set_port_cfg(struct ql_adapter *qdev) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status = 0; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 3; + mbcp->out_count = 1; + + mbcp->mbox_in[0] = MB_CMD_SET_PORT_CFG; + mbcp->mbox_in[1] = qdev->link_config; + mbcp->mbox_in[2] = qdev->max_frame_size; + + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] == MB_CMD_STS_INTRMDT) { + netif_err(qdev, drv, qdev->ndev, + "Port Config sent, wait for IDC.\n"); + } else if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + netif_err(qdev, drv, qdev->ndev, + "Failed Set Port Configuration.\n"); + status = -EIO; + } + return status; +} + +static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr, + u32 size) +{ + int status = 0; + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 9; + mbcp->out_count = 1; + + mbcp->mbox_in[0] = MB_CMD_DUMP_RISC_RAM; + mbcp->mbox_in[1] = LSW(addr); + mbcp->mbox_in[2] = MSW(req_dma); + mbcp->mbox_in[3] = LSW(req_dma); + mbcp->mbox_in[4] = MSW(size); + mbcp->mbox_in[5] = LSW(size); + mbcp->mbox_in[6] = MSW(MSD(req_dma)); + mbcp->mbox_in[7] = LSW(MSD(req_dma)); + mbcp->mbox_in[8] = MSW(addr); + + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + netif_err(qdev, drv, qdev->ndev, "Failed to dump risc RAM.\n"); + status = -EIO; + } + return status; +} + +/* Issue a mailbox command to dump RISC RAM. */ +int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, + u32 ram_addr, int word_count) +{ + int status; + char *my_buf; + dma_addr_t buf_dma; + + my_buf = pci_alloc_consistent(qdev->pdev, word_count * sizeof(u32), + &buf_dma); + if (!my_buf) + return -EIO; + + status = ql_mb_dump_ram(qdev, buf_dma, ram_addr, word_count); + if (!status) + memcpy(buf, my_buf, word_count * sizeof(u32)); + + pci_free_consistent(qdev->pdev, word_count * sizeof(u32), my_buf, + buf_dma); + return status; +} + +/* Get link settings and maximum frame size settings + * for the current port. + * Most likely will block. + */ +int ql_mb_get_port_cfg(struct ql_adapter *qdev) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status = 0; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 1; + mbcp->out_count = 3; + + mbcp->mbox_in[0] = MB_CMD_GET_PORT_CFG; + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + netif_err(qdev, drv, qdev->ndev, + "Failed Get Port Configuration.\n"); + status = -EIO; + } else { + netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, + "Passed Get Port Configuration.\n"); + qdev->link_config = mbcp->mbox_out[1]; + qdev->max_frame_size = mbcp->mbox_out[2]; + } + return status; +} + +int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 2; + mbcp->out_count = 1; + + mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE; + mbcp->mbox_in[1] = wol; + + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n"); + status = -EIO; + } + return status; +} + +int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status; + u8 *addr = qdev->ndev->dev_addr; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 8; + mbcp->out_count = 1; + + mbcp->mbox_in[0] = MB_CMD_SET_WOL_MAGIC; + if (enable_wol) { + mbcp->mbox_in[1] = (u32)addr[0]; + mbcp->mbox_in[2] = (u32)addr[1]; + mbcp->mbox_in[3] = (u32)addr[2]; + mbcp->mbox_in[4] = (u32)addr[3]; + mbcp->mbox_in[5] = (u32)addr[4]; + mbcp->mbox_in[6] = (u32)addr[5]; + mbcp->mbox_in[7] = 0; + } else { + mbcp->mbox_in[1] = 0; + mbcp->mbox_in[2] = 1; + mbcp->mbox_in[3] = 1; + mbcp->mbox_in[4] = 1; + mbcp->mbox_in[5] = 1; + mbcp->mbox_in[6] = 1; + mbcp->mbox_in[7] = 0; + } + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n"); + status = -EIO; + } + return status; +} + +/* IDC - Inter Device Communication... + * Some firmware commands require consent of adjacent FCOE + * function. This function waits for the OK, or a + * counter-request for a little more time.i + * The firmware will complete the request if the other + * function doesn't respond. + */ +static int ql_idc_wait(struct ql_adapter *qdev) +{ + int status = -ETIMEDOUT; + long wait_time = 1 * HZ; + struct mbox_params *mbcp = &qdev->idc_mbc; + do { + /* Wait here for the command to complete + * via the IDC process. + */ + wait_time = + wait_for_completion_timeout(&qdev->ide_completion, + wait_time); + if (!wait_time) { + netif_err(qdev, drv, qdev->ndev, "IDC Timeout.\n"); + break; + } + /* Now examine the response from the IDC process. + * We might have a good completion or a request for + * more wait time. + */ + if (mbcp->mbox_out[0] == AEN_IDC_EXT) { + netif_err(qdev, drv, qdev->ndev, + "IDC Time Extension from function.\n"); + wait_time += (mbcp->mbox_out[1] >> 8) & 0x0000000f; + } else if (mbcp->mbox_out[0] == AEN_IDC_CMPLT) { + netif_err(qdev, drv, qdev->ndev, "IDC Success.\n"); + status = 0; + break; + } else { + netif_err(qdev, drv, qdev->ndev, + "IDC: Invalid State 0x%.04x.\n", + mbcp->mbox_out[0]); + status = -EIO; + break; + } + } while (wait_time); + + return status; +} + +int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 2; + mbcp->out_count = 1; + + mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG; + mbcp->mbox_in[1] = led_config; + + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + netif_err(qdev, drv, qdev->ndev, + "Failed to set LED Configuration.\n"); + status = -EIO; + } + + return status; +} + +int ql_mb_get_led_cfg(struct ql_adapter *qdev) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 1; + mbcp->out_count = 2; + + mbcp->mbox_in[0] = MB_CMD_GET_LED_CFG; + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + netif_err(qdev, drv, qdev->ndev, + "Failed to get LED Configuration.\n"); + status = -EIO; + } else + qdev->led_config = mbcp->mbox_out[1]; + + return status; +} + +int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 1; + mbcp->out_count = 2; + + mbcp->mbox_in[0] = MB_CMD_SET_MGMNT_TFK_CTL; + mbcp->mbox_in[1] = control; + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) + return status; + + if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) { + netif_err(qdev, drv, qdev->ndev, + "Command not supported by firmware.\n"); + status = -EINVAL; + } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) { + /* This indicates that the firmware is + * already in the state we are trying to + * change it to. + */ + netif_err(qdev, drv, qdev->ndev, + "Command parameters make no change.\n"); + } + return status; +} + +/* Returns a negative error code or the mailbox command status. */ +static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status; + + memset(mbcp, 0, sizeof(struct mbox_params)); + *control = 0; + + mbcp->in_count = 1; + mbcp->out_count = 1; + + mbcp->mbox_in[0] = MB_CMD_GET_MGMNT_TFK_CTL; + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) { + *control = mbcp->mbox_in[1]; + return status; + } + + if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) { + netif_err(qdev, drv, qdev->ndev, + "Command not supported by firmware.\n"); + status = -EINVAL; + } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) { + netif_err(qdev, drv, qdev->ndev, + "Failed to get MPI traffic control.\n"); + status = -EIO; + } + return status; +} + +int ql_wait_fifo_empty(struct ql_adapter *qdev) +{ + int count = 5; + u32 mgmnt_fifo_empty; + u32 nic_fifo_empty; + + do { + nic_fifo_empty = ql_read32(qdev, STS) & STS_NFE; + ql_mb_get_mgmnt_traffic_ctl(qdev, &mgmnt_fifo_empty); + mgmnt_fifo_empty &= MB_GET_MPI_TFK_FIFO_EMPTY; + if (nic_fifo_empty && mgmnt_fifo_empty) + return 0; + msleep(100); + } while (count-- > 0); + return -ETIMEDOUT; +} + +/* API called in work thread context to set new TX/RX + * maximum frame size values to match MTU. + */ +static int ql_set_port_cfg(struct ql_adapter *qdev) +{ + int status; + status = ql_mb_set_port_cfg(qdev); + if (status) + return status; + status = ql_idc_wait(qdev); + return status; +} + +/* The following routines are worker threads that process + * events that may sleep waiting for completion. + */ + +/* This thread gets the maximum TX and RX frame size values + * from the firmware and, if necessary, changes them to match + * the MTU setting. + */ +void ql_mpi_port_cfg_work(struct work_struct *work) +{ + struct ql_adapter *qdev = + container_of(work, struct ql_adapter, mpi_port_cfg_work.work); + int status; + + status = ql_mb_get_port_cfg(qdev); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Bug: Failed to get port config data.\n"); + goto err; + } + + if (qdev->link_config & CFG_JUMBO_FRAME_SIZE && + qdev->max_frame_size == + CFG_DEFAULT_MAX_FRAME_SIZE) + goto end; + + qdev->link_config |= CFG_JUMBO_FRAME_SIZE; + qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE; + status = ql_set_port_cfg(qdev); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Bug: Failed to set port config data.\n"); + goto err; + } +end: + clear_bit(QL_PORT_CFG, &qdev->flags); + return; +err: + ql_queue_fw_error(qdev); + goto end; +} + +/* Process an inter-device request. This is issues by + * the firmware in response to another function requesting + * a change to the port. We set a flag to indicate a change + * has been made and then send a mailbox command ACKing + * the change request. + */ +void ql_mpi_idc_work(struct work_struct *work) +{ + struct ql_adapter *qdev = + container_of(work, struct ql_adapter, mpi_idc_work.work); + int status; + struct mbox_params *mbcp = &qdev->idc_mbc; + u32 aen; + int timeout; + + aen = mbcp->mbox_out[1] >> 16; + timeout = (mbcp->mbox_out[1] >> 8) & 0xf; + + switch (aen) { + default: + netif_err(qdev, drv, qdev->ndev, + "Bug: Unhandled IDC action.\n"); + break; + case MB_CMD_PORT_RESET: + case MB_CMD_STOP_FW: + ql_link_off(qdev); + case MB_CMD_SET_PORT_CFG: + /* Signal the resulting link up AEN + * that the frame routing and mac addr + * needs to be set. + * */ + set_bit(QL_CAM_RT_SET, &qdev->flags); + /* Do ACK if required */ + if (timeout) { + status = ql_mb_idc_ack(qdev); + if (status) + netif_err(qdev, drv, qdev->ndev, + "Bug: No pending IDC!\n"); + } else { + netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, + "IDC ACK not required\n"); + status = 0; /* success */ + } + break; + + /* These sub-commands issued by another (FCoE) + * function are requesting to do an operation + * on the shared resource (MPI environment). + * We currently don't issue these so we just + * ACK the request. + */ + case MB_CMD_IOP_RESTART_MPI: + case MB_CMD_IOP_PREP_LINK_DOWN: + /* Drop the link, reload the routing + * table when link comes up. + */ + ql_link_off(qdev); + set_bit(QL_CAM_RT_SET, &qdev->flags); + /* Fall through. */ + case MB_CMD_IOP_DVR_START: + case MB_CMD_IOP_FLASH_ACC: + case MB_CMD_IOP_CORE_DUMP_MPI: + case MB_CMD_IOP_PREP_UPDATE_MPI: + case MB_CMD_IOP_COMP_UPDATE_MPI: + case MB_CMD_IOP_NONE: /* an IDC without params */ + /* Do ACK if required */ + if (timeout) { + status = ql_mb_idc_ack(qdev); + if (status) + netif_err(qdev, drv, qdev->ndev, + "Bug: No pending IDC!\n"); + } else { + netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, + "IDC ACK not required\n"); + status = 0; /* success */ + } + break; + } +} + +void ql_mpi_work(struct work_struct *work) +{ + struct ql_adapter *qdev = + container_of(work, struct ql_adapter, mpi_work.work); + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int err = 0; + + mutex_lock(&qdev->mpi_mutex); + /* Begin polled mode for MPI */ + ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); + + while (ql_read32(qdev, STS) & STS_PI) { + memset(mbcp, 0, sizeof(struct mbox_params)); + mbcp->out_count = 1; + /* Don't continue if an async event + * did not complete properly. + */ + err = ql_mpi_handler(qdev, mbcp); + if (err) + break; + } + + /* End polled mode for MPI */ + ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); + mutex_unlock(&qdev->mpi_mutex); + ql_enable_completion_interrupt(qdev, 0); +} + +void ql_mpi_reset_work(struct work_struct *work) +{ + struct ql_adapter *qdev = + container_of(work, struct ql_adapter, mpi_reset_work.work); + cancel_delayed_work_sync(&qdev->mpi_work); + cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); + cancel_delayed_work_sync(&qdev->mpi_idc_work); + /* If we're not the dominant NIC function, + * then there is nothing to do. + */ + if (!ql_own_firmware(qdev)) { + netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n"); + return; + } + + if (!ql_core_dump(qdev, qdev->mpi_coredump)) { + netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n"); + qdev->core_is_dumped = 1; + queue_delayed_work(qdev->workqueue, + &qdev->mpi_core_to_log, 5 * HZ); + } + ql_soft_reset_mpi_risc(qdev); +} diff --git a/drivers/net/netxen/Makefile b/drivers/net/netxen/Makefile deleted file mode 100644 index 861a059..0000000 --- a/drivers/net/netxen/Makefile +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2003 - 2009 NetXen, Inc. -# Copyright (C) 2009 - QLogic Corporation. -# All rights reserved. -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place - Suite 330, Boston, -# MA 02111-1307, USA. -# -# The full GNU General Public License is included in this distribution -# in the file called "COPYING". -# -# - - -obj-$(CONFIG_NETXEN_NIC) := netxen_nic.o - -netxen_nic-y := netxen_nic_hw.o netxen_nic_main.o netxen_nic_init.o \ - netxen_nic_ethtool.o netxen_nic_ctx.o diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h deleted file mode 100644 index 196b660..0000000 --- a/drivers/net/netxen/netxen_nic.h +++ /dev/null @@ -1,1441 +0,0 @@ -/* - * Copyright (C) 2003 - 2009 NetXen, Inc. - * Copyright (C) 2009 - QLogic Corporation. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, - * MA 02111-1307, USA. - * - * The full GNU General Public License is included in this distribution - * in the file called "COPYING". - * - */ - -#ifndef _NETXEN_NIC_H_ -#define _NETXEN_NIC_H_ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include - -#include -#include - -#include "netxen_nic_hdr.h" -#include "netxen_nic_hw.h" - -#define _NETXEN_NIC_LINUX_MAJOR 4 -#define _NETXEN_NIC_LINUX_MINOR 0 -#define _NETXEN_NIC_LINUX_SUBVERSION 76 -#define NETXEN_NIC_LINUX_VERSIONID "4.0.76" - -#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) -#define _major(v) (((v) >> 24) & 0xff) -#define _minor(v) (((v) >> 16) & 0xff) -#define _build(v) ((v) & 0xffff) - -/* version in image has weird encoding: - * 7:0 - major - * 15:8 - minor - * 31:16 - build (little endian) - */ -#define NETXEN_DECODE_VERSION(v) \ - NETXEN_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16)) - -#define NETXEN_NUM_FLASH_SECTORS (64) -#define NETXEN_FLASH_SECTOR_SIZE (64 * 1024) -#define NETXEN_FLASH_TOTAL_SIZE (NETXEN_NUM_FLASH_SECTORS \ - * NETXEN_FLASH_SECTOR_SIZE) - -#define RCV_DESC_RINGSIZE(rds_ring) \ - (sizeof(struct rcv_desc) * (rds_ring)->num_desc) -#define RCV_BUFF_RINGSIZE(rds_ring) \ - (sizeof(struct netxen_rx_buffer) * rds_ring->num_desc) -#define STATUS_DESC_RINGSIZE(sds_ring) \ - (sizeof(struct status_desc) * (sds_ring)->num_desc) -#define TX_BUFF_RINGSIZE(tx_ring) \ - (sizeof(struct netxen_cmd_buffer) * tx_ring->num_desc) -#define TX_DESC_RINGSIZE(tx_ring) \ - (sizeof(struct cmd_desc_type0) * tx_ring->num_desc) - -#define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a))) - -#define NETXEN_RCV_PRODUCER_OFFSET 0 -#define NETXEN_RCV_PEG_DB_ID 2 -#define NETXEN_HOST_DUMMY_DMA_SIZE 1024 -#define FLASH_SUCCESS 0 - -#define ADDR_IN_WINDOW1(off) \ - ((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0 - -#define ADDR_IN_RANGE(addr, low, high) \ - (((addr) < (high)) && ((addr) >= (low))) - -/* - * normalize a 64MB crb address to 32MB PCI window - * To use NETXEN_CRB_NORMALIZE, window _must_ be set to 1 - */ -#define NETXEN_CRB_NORMAL(reg) \ - ((reg) - NETXEN_CRB_PCIX_HOST2 + NETXEN_CRB_PCIX_HOST) - -#define NETXEN_CRB_NORMALIZE(adapter, reg) \ - pci_base_offset(adapter, NETXEN_CRB_NORMAL(reg)) - -#define DB_NORMALIZE(adapter, off) \ - (adapter->ahw.db_base + (off)) - -#define NX_P2_C0 0x24 -#define NX_P2_C1 0x25 -#define NX_P3_A0 0x30 -#define NX_P3_A2 0x30 -#define NX_P3_B0 0x40 -#define NX_P3_B1 0x41 -#define NX_P3_B2 0x42 -#define NX_P3P_A0 0x50 - -#define NX_IS_REVISION_P2(REVISION) (REVISION <= NX_P2_C1) -#define NX_IS_REVISION_P3(REVISION) (REVISION >= NX_P3_A0) -#define NX_IS_REVISION_P3P(REVISION) (REVISION >= NX_P3P_A0) - -#define FIRST_PAGE_GROUP_START 0 -#define FIRST_PAGE_GROUP_END 0x100000 - -#define SECOND_PAGE_GROUP_START 0x6000000 -#define SECOND_PAGE_GROUP_END 0x68BC000 - -#define THIRD_PAGE_GROUP_START 0x70E4000 -#define THIRD_PAGE_GROUP_END 0x8000000 - -#define FIRST_PAGE_GROUP_SIZE FIRST_PAGE_GROUP_END - FIRST_PAGE_GROUP_START -#define SECOND_PAGE_GROUP_SIZE SECOND_PAGE_GROUP_END - SECOND_PAGE_GROUP_START -#define THIRD_PAGE_GROUP_SIZE THIRD_PAGE_GROUP_END - THIRD_PAGE_GROUP_START - -#define P2_MAX_MTU (8000) -#define P3_MAX_MTU (9600) -#define NX_ETHERMTU 1500 -#define NX_MAX_ETHERHDR 32 /* This contains some padding */ - -#define NX_P2_RX_BUF_MAX_LEN 1760 -#define NX_P3_RX_BUF_MAX_LEN (NX_MAX_ETHERHDR + NX_ETHERMTU) -#define NX_P2_RX_JUMBO_BUF_MAX_LEN (NX_MAX_ETHERHDR + P2_MAX_MTU) -#define NX_P3_RX_JUMBO_BUF_MAX_LEN (NX_MAX_ETHERHDR + P3_MAX_MTU) -#define NX_CT_DEFAULT_RX_BUF_LEN 2048 -#define NX_LRO_BUFFER_EXTRA 2048 - -#define NX_RX_LRO_BUFFER_LENGTH (8060) - -/* - * Maximum number of ring contexts - */ -#define MAX_RING_CTX 1 - -/* Opcodes to be used with the commands */ -#define TX_ETHER_PKT 0x01 -#define TX_TCP_PKT 0x02 -#define TX_UDP_PKT 0x03 -#define TX_IP_PKT 0x04 -#define TX_TCP_LSO 0x05 -#define TX_TCP_LSO6 0x06 -#define TX_IPSEC 0x07 -#define TX_IPSEC_CMD 0x0a -#define TX_TCPV6_PKT 0x0b -#define TX_UDPV6_PKT 0x0c - -/* The following opcodes are for internal consumption. */ -#define NETXEN_CONTROL_OP 0x10 -#define PEGNET_REQUEST 0x11 - -#define MAX_NUM_CARDS 4 - -#define NETXEN_MAX_FRAGS_PER_TX 14 -#define MAX_TSO_HEADER_DESC 2 -#define MGMT_CMD_DESC_RESV 4 -#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \ - + MGMT_CMD_DESC_RESV) -#define NX_MAX_TX_TIMEOUTS 2 - -/* - * Following are the states of the Phantom. Phantom will set them and - * Host will read to check if the fields are correct. - */ -#define PHAN_INITIALIZE_START 0xff00 -#define PHAN_INITIALIZE_FAILED 0xffff -#define PHAN_INITIALIZE_COMPLETE 0xff01 - -/* Host writes the following to notify that it has done the init-handshake */ -#define PHAN_INITIALIZE_ACK 0xf00f - -#define NUM_RCV_DESC_RINGS 3 -#define NUM_STS_DESC_RINGS 4 - -#define RCV_RING_NORMAL 0 -#define RCV_RING_JUMBO 1 -#define RCV_RING_LRO 2 - -#define MIN_CMD_DESCRIPTORS 64 -#define MIN_RCV_DESCRIPTORS 64 -#define MIN_JUMBO_DESCRIPTORS 32 - -#define MAX_CMD_DESCRIPTORS 1024 -#define MAX_RCV_DESCRIPTORS_1G 4096 -#define MAX_RCV_DESCRIPTORS_10G 8192 -#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512 -#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024 -#define MAX_LRO_RCV_DESCRIPTORS 8 - -#define DEFAULT_RCV_DESCRIPTORS_1G 2048 -#define DEFAULT_RCV_DESCRIPTORS_10G 4096 - -#define NETXEN_CTX_SIGNATURE 0xdee0 -#define NETXEN_CTX_SIGNATURE_V2 0x0002dee0 -#define NETXEN_CTX_RESET 0xbad0 -#define NETXEN_CTX_D3_RESET 0xacc0 -#define NETXEN_RCV_PRODUCER(ringid) (ringid) - -#define PHAN_PEG_RCV_INITIALIZED 0xff01 -#define PHAN_PEG_RCV_START_INITIALIZE 0xff00 - -#define get_next_index(index, length) \ - (((index) + 1) & ((length) - 1)) - -#define get_index_range(index,length,count) \ - (((index) + (count)) & ((length) - 1)) - -#define MPORT_SINGLE_FUNCTION_MODE 0x1111 -#define MPORT_MULTI_FUNCTION_MODE 0x2222 - -#define NX_MAX_PCI_FUNC 8 - -/* - * NetXen host-peg signal message structure - * - * Bit 0-1 : peg_id => 0x2 for tx and 01 for rx - * Bit 2 : priv_id => must be 1 - * Bit 3-17 : count => for doorbell - * Bit 18-27 : ctx_id => Context id - * Bit 28-31 : opcode - */ - -typedef u32 netxen_ctx_msg; - -#define netxen_set_msg_peg_id(config_word, val) \ - ((config_word) &= ~3, (config_word) |= val & 3) -#define netxen_set_msg_privid(config_word) \ - ((config_word) |= 1 << 2) -#define netxen_set_msg_count(config_word, val) \ - ((config_word) &= ~(0x7fff<<3), (config_word) |= (val & 0x7fff) << 3) -#define netxen_set_msg_ctxid(config_word, val) \ - ((config_word) &= ~(0x3ff<<18), (config_word) |= (val & 0x3ff) << 18) -#define netxen_set_msg_opcode(config_word, val) \ - ((config_word) &= ~(0xf<<28), (config_word) |= (val & 0xf) << 28) - -struct netxen_rcv_ring { - __le64 addr; - __le32 size; - __le32 rsrvd; -}; - -struct netxen_sts_ring { - __le64 addr; - __le32 size; - __le16 msi_index; - __le16 rsvd; -} ; - -struct netxen_ring_ctx { - - /* one command ring */ - __le64 cmd_consumer_offset; - __le64 cmd_ring_addr; - __le32 cmd_ring_size; - __le32 rsrvd; - - /* three receive rings */ - struct netxen_rcv_ring rcv_rings[NUM_RCV_DESC_RINGS]; - - __le64 sts_ring_addr; - __le32 sts_ring_size; - - __le32 ctx_id; - - __le64 rsrvd_2[3]; - __le32 sts_ring_count; - __le32 rsrvd_3; - struct netxen_sts_ring sts_rings[NUM_STS_DESC_RINGS]; - -} __attribute__ ((aligned(64))); - -/* - * Following data structures describe the descriptors that will be used. - * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when - * we are doing LSO (above the 1500 size packet) only. - */ - -/* - * The size of reference handle been changed to 16 bits to pass the MSS fields - * for the LSO packet - */ - -#define FLAGS_CHECKSUM_ENABLED 0x01 -#define FLAGS_LSO_ENABLED 0x02 -#define FLAGS_IPSEC_SA_ADD 0x04 -#define FLAGS_IPSEC_SA_DELETE 0x08 -#define FLAGS_VLAN_TAGGED 0x10 -#define FLAGS_VLAN_OOB 0x40 - -#define netxen_set_tx_vlan_tci(cmd_desc, v) \ - (cmd_desc)->vlan_TCI = cpu_to_le16(v); - -#define netxen_set_cmd_desc_port(cmd_desc, var) \ - ((cmd_desc)->port_ctxid |= ((var) & 0x0F)) -#define netxen_set_cmd_desc_ctxid(cmd_desc, var) \ - ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0)) - -#define netxen_set_tx_port(_desc, _port) \ - (_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0) - -#define netxen_set_tx_flags_opcode(_desc, _flags, _opcode) \ - (_desc)->flags_opcode = \ - cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)) - -#define netxen_set_tx_frags_len(_desc, _frags, _len) \ - (_desc)->nfrags__length = \ - cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)) - -struct cmd_desc_type0 { - u8 tcp_hdr_offset; /* For LSO only */ - u8 ip_hdr_offset; /* For LSO only */ - __le16 flags_opcode; /* 15:13 unused, 12:7 opcode, 6:0 flags */ - __le32 nfrags__length; /* 31:8 total len, 7:0 frag count */ - - __le64 addr_buffer2; - - __le16 reference_handle; - __le16 mss; - u8 port_ctxid; /* 7:4 ctxid 3:0 port */ - u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */ - __le16 conn_id; /* IPSec offoad only */ - - __le64 addr_buffer3; - __le64 addr_buffer1; - - __le16 buffer_length[4]; - - __le64 addr_buffer4; - - __le32 reserved2; - __le16 reserved; - __le16 vlan_TCI; - -} __attribute__ ((aligned(64))); - -/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */ -struct rcv_desc { - __le16 reference_handle; - __le16 reserved; - __le32 buffer_length; /* allocated buffer length (usually 2K) */ - __le64 addr_buffer; -}; - -/* opcode field in status_desc */ -#define NETXEN_NIC_SYN_OFFLOAD 0x03 -#define NETXEN_NIC_RXPKT_DESC 0x04 -#define NETXEN_OLD_RXPKT_DESC 0x3f -#define NETXEN_NIC_RESPONSE_DESC 0x05 -#define NETXEN_NIC_LRO_DESC 0x12 - -/* for status field in status_desc */ -#define STATUS_NEED_CKSUM (1) -#define STATUS_CKSUM_OK (2) - -/* owner bits of status_desc */ -#define STATUS_OWNER_HOST (0x1ULL << 56) -#define STATUS_OWNER_PHANTOM (0x2ULL << 56) - -/* Status descriptor: - 0-3 port, 4-7 status, 8-11 type, 12-27 total_length - 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset - 53-55 desc_cnt, 56-57 owner, 58-63 opcode - */ -#define netxen_get_sts_port(sts_data) \ - ((sts_data) & 0x0F) -#define netxen_get_sts_status(sts_data) \ - (((sts_data) >> 4) & 0x0F) -#define netxen_get_sts_type(sts_data) \ - (((sts_data) >> 8) & 0x0F) -#define netxen_get_sts_totallength(sts_data) \ - (((sts_data) >> 12) & 0xFFFF) -#define netxen_get_sts_refhandle(sts_data) \ - (((sts_data) >> 28) & 0xFFFF) -#define netxen_get_sts_prot(sts_data) \ - (((sts_data) >> 44) & 0x0F) -#define netxen_get_sts_pkt_offset(sts_data) \ - (((sts_data) >> 48) & 0x1F) -#define netxen_get_sts_desc_cnt(sts_data) \ - (((sts_data) >> 53) & 0x7) -#define netxen_get_sts_opcode(sts_data) \ - (((sts_data) >> 58) & 0x03F) - -#define netxen_get_lro_sts_refhandle(sts_data) \ - ((sts_data) & 0x0FFFF) -#define netxen_get_lro_sts_length(sts_data) \ - (((sts_data) >> 16) & 0x0FFFF) -#define netxen_get_lro_sts_l2_hdr_offset(sts_data) \ - (((sts_data) >> 32) & 0x0FF) -#define netxen_get_lro_sts_l4_hdr_offset(sts_data) \ - (((sts_data) >> 40) & 0x0FF) -#define netxen_get_lro_sts_timestamp(sts_data) \ - (((sts_data) >> 48) & 0x1) -#define netxen_get_lro_sts_type(sts_data) \ - (((sts_data) >> 49) & 0x7) -#define netxen_get_lro_sts_push_flag(sts_data) \ - (((sts_data) >> 52) & 0x1) -#define netxen_get_lro_sts_seq_number(sts_data) \ - ((sts_data) & 0x0FFFFFFFF) - - -struct status_desc { - __le64 status_desc_data[2]; -} __attribute__ ((aligned(16))); - -/* UNIFIED ROMIMAGE *************************/ -#define NX_UNI_DIR_SECT_PRODUCT_TBL 0x0 -#define NX_UNI_DIR_SECT_BOOTLD 0x6 -#define NX_UNI_DIR_SECT_FW 0x7 - -/*Offsets */ -#define NX_UNI_CHIP_REV_OFF 10 -#define NX_UNI_FLAGS_OFF 11 -#define NX_UNI_BIOS_VERSION_OFF 12 -#define NX_UNI_BOOTLD_IDX_OFF 27 -#define NX_UNI_FIRMWARE_IDX_OFF 29 - -struct uni_table_desc{ - uint32_t findex; - uint32_t num_entries; - uint32_t entry_size; - uint32_t reserved[5]; -}; - -struct uni_data_desc{ - uint32_t findex; - uint32_t size; - uint32_t reserved[5]; -}; - -/* UNIFIED ROMIMAGE *************************/ - -/* The version of the main data structure */ -#define NETXEN_BDINFO_VERSION 1 - -/* Magic number to let user know flash is programmed */ -#define NETXEN_BDINFO_MAGIC 0x12345678 - -/* Max number of Gig ports on a Phantom board */ -#define NETXEN_MAX_PORTS 4 - -#define NETXEN_BRDTYPE_P1_BD 0x0000 -#define NETXEN_BRDTYPE_P1_SB 0x0001 -#define NETXEN_BRDTYPE_P1_SMAX 0x0002 -#define NETXEN_BRDTYPE_P1_SOCK 0x0003 - -#define NETXEN_BRDTYPE_P2_SOCK_31 0x0008 -#define NETXEN_BRDTYPE_P2_SOCK_35 0x0009 -#define NETXEN_BRDTYPE_P2_SB35_4G 0x000a -#define NETXEN_BRDTYPE_P2_SB31_10G 0x000b -#define NETXEN_BRDTYPE_P2_SB31_2G 0x000c - -#define NETXEN_BRDTYPE_P2_SB31_10G_IMEZ 0x000d -#define NETXEN_BRDTYPE_P2_SB31_10G_HMEZ 0x000e -#define NETXEN_BRDTYPE_P2_SB31_10G_CX4 0x000f - -#define NETXEN_BRDTYPE_P3_REF_QG 0x0021 -#define NETXEN_BRDTYPE_P3_HMEZ 0x0022 -#define NETXEN_BRDTYPE_P3_10G_CX4_LP 0x0023 -#define NETXEN_BRDTYPE_P3_4_GB 0x0024 -#define NETXEN_BRDTYPE_P3_IMEZ 0x0025 -#define NETXEN_BRDTYPE_P3_10G_SFP_PLUS 0x0026 -#define NETXEN_BRDTYPE_P3_10000_BASE_T 0x0027 -#define NETXEN_BRDTYPE_P3_XG_LOM 0x0028 -#define NETXEN_BRDTYPE_P3_4_GB_MM 0x0029 -#define NETXEN_BRDTYPE_P3_10G_SFP_CT 0x002a -#define NETXEN_BRDTYPE_P3_10G_SFP_QT 0x002b -#define NETXEN_BRDTYPE_P3_10G_CX4 0x0031 -#define NETXEN_BRDTYPE_P3_10G_XFP 0x0032 -#define NETXEN_BRDTYPE_P3_10G_TP 0x0080 - -/* Flash memory map */ -#define NETXEN_CRBINIT_START 0 /* crbinit section */ -#define NETXEN_BRDCFG_START 0x4000 /* board config */ -#define NETXEN_INITCODE_START 0x6000 /* pegtune code */ -#define NETXEN_BOOTLD_START 0x10000 /* bootld */ -#define NETXEN_IMAGE_START 0x43000 /* compressed image */ -#define NETXEN_SECONDARY_START 0x200000 /* backup images */ -#define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */ -#define NETXEN_USER_START 0x3E8000 /* Firmare info */ -#define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */ -#define NETXEN_USER_START_OLD NETXEN_PXE_START /* very old flash */ - -#define NX_OLD_MAC_ADDR_OFFSET (NETXEN_USER_START) -#define NX_FW_VERSION_OFFSET (NETXEN_USER_START+0x408) -#define NX_FW_SIZE_OFFSET (NETXEN_USER_START+0x40c) -#define NX_FW_MAC_ADDR_OFFSET (NETXEN_USER_START+0x418) -#define NX_FW_SERIAL_NUM_OFFSET (NETXEN_USER_START+0x81c) -#define NX_BIOS_VERSION_OFFSET (NETXEN_USER_START+0x83c) - -#define NX_HDR_VERSION_OFFSET (NETXEN_BRDCFG_START) -#define NX_BRDTYPE_OFFSET (NETXEN_BRDCFG_START+0x8) -#define NX_FW_MAGIC_OFFSET (NETXEN_BRDCFG_START+0x128) - -#define NX_FW_MIN_SIZE (0x3fffff) -#define NX_P2_MN_ROMIMAGE 0 -#define NX_P3_CT_ROMIMAGE 1 -#define NX_P3_MN_ROMIMAGE 2 -#define NX_UNIFIED_ROMIMAGE 3 -#define NX_FLASH_ROMIMAGE 4 -#define NX_UNKNOWN_ROMIMAGE 0xff - -#define NX_P2_MN_ROMIMAGE_NAME "nxromimg.bin" -#define NX_P3_CT_ROMIMAGE_NAME "nx3fwct.bin" -#define NX_P3_MN_ROMIMAGE_NAME "nx3fwmn.bin" -#define NX_UNIFIED_ROMIMAGE_NAME "phanfw.bin" -#define NX_FLASH_ROMIMAGE_NAME "flash" - -extern char netxen_nic_driver_name[]; - -/* Number of status descriptors to handle per interrupt */ -#define MAX_STATUS_HANDLE (64) - -/* - * netxen_skb_frag{} is to contain mapping info for each SG list. This - * has to be freed when DMA is complete. This is part of netxen_tx_buffer{}. - */ -struct netxen_skb_frag { - u64 dma; - u64 length; -}; - -struct netxen_recv_crb { - u32 crb_rcv_producer[NUM_RCV_DESC_RINGS]; - u32 crb_sts_consumer[NUM_STS_DESC_RINGS]; - u32 sw_int_mask[NUM_STS_DESC_RINGS]; -}; - -/* Following defines are for the state of the buffers */ -#define NETXEN_BUFFER_FREE 0 -#define NETXEN_BUFFER_BUSY 1 - -/* - * There will be one netxen_buffer per skb packet. These will be - * used to save the dma info for pci_unmap_page() - */ -struct netxen_cmd_buffer { - struct sk_buff *skb; - struct netxen_skb_frag frag_array[MAX_SKB_FRAGS + 1]; - u32 frag_count; -}; - -/* In rx_buffer, we do not need multiple fragments as is a single buffer */ -struct netxen_rx_buffer { - struct list_head list; - struct sk_buff *skb; - u64 dma; - u16 ref_handle; - u16 state; -}; - -/* Board types */ -#define NETXEN_NIC_GBE 0x01 -#define NETXEN_NIC_XGBE 0x02 - -/* - * One hardware_context{} per adapter - * contains interrupt info as well shared hardware info. - */ -struct netxen_hardware_context { - void __iomem *pci_base0; - void __iomem *pci_base1; - void __iomem *pci_base2; - void __iomem *db_base; - void __iomem *ocm_win_crb; - - unsigned long db_len; - unsigned long pci_len0; - - u32 ocm_win; - u32 crb_win; - - rwlock_t crb_lock; - spinlock_t mem_lock; - - u8 cut_through; - u8 revision_id; - u8 pci_func; - u8 linkup; - u16 port_type; - u16 board_type; -}; - -#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */ -#define ETHERNET_FCS_SIZE 4 - -struct netxen_adapter_stats { - u64 xmitcalled; - u64 xmitfinished; - u64 rxdropped; - u64 txdropped; - u64 csummed; - u64 rx_pkts; - u64 lro_pkts; - u64 rxbytes; - u64 txbytes; -}; - -/* - * Rcv Descriptor Context. One such per Rcv Descriptor. There may - * be one Rcv Descriptor for normal packets, one for jumbo and may be others. - */ -struct nx_host_rds_ring { - u32 producer; - u32 num_desc; - u32 dma_size; - u32 skb_size; - u32 flags; - void __iomem *crb_rcv_producer; - struct rcv_desc *desc_head; - struct netxen_rx_buffer *rx_buf_arr; - struct list_head free_list; - spinlock_t lock; - dma_addr_t phys_addr; -}; - -struct nx_host_sds_ring { - u32 consumer; - u32 num_desc; - void __iomem *crb_sts_consumer; - void __iomem *crb_intr_mask; - - struct status_desc *desc_head; - struct netxen_adapter *adapter; - struct napi_struct napi; - struct list_head free_list[NUM_RCV_DESC_RINGS]; - - int irq; - - dma_addr_t phys_addr; - char name[IFNAMSIZ+4]; -}; - -struct nx_host_tx_ring { - u32 producer; - __le32 *hw_consumer; - u32 sw_consumer; - void __iomem *crb_cmd_producer; - void __iomem *crb_cmd_consumer; - u32 num_desc; - - struct netdev_queue *txq; - - struct netxen_cmd_buffer *cmd_buf_arr; - struct cmd_desc_type0 *desc_head; - dma_addr_t phys_addr; -}; - -/* - * Receive context. There is one such structure per instance of the - * receive processing. Any state information that is relevant to - * the receive, and is must be in this structure. The global data may be - * present elsewhere. - */ -struct netxen_recv_context { - u32 state; - u16 context_id; - u16 virt_port; - - struct nx_host_rds_ring *rds_rings; - struct nx_host_sds_ring *sds_rings; - - struct netxen_ring_ctx *hwctx; - dma_addr_t phys_addr; -}; - -/* New HW context creation */ - -#define NX_OS_CRB_RETRY_COUNT 4000 -#define NX_CDRP_SIGNATURE_MAKE(pcifn, version) \ - (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16)) - -#define NX_CDRP_CLEAR 0x00000000 -#define NX_CDRP_CMD_BIT 0x80000000 - -/* - * All responses must have the NX_CDRP_CMD_BIT cleared - * in the crb NX_CDRP_CRB_OFFSET. - */ -#define NX_CDRP_FORM_RSP(rsp) (rsp) -#define NX_CDRP_IS_RSP(rsp) (((rsp) & NX_CDRP_CMD_BIT) == 0) - -#define NX_CDRP_RSP_OK 0x00000001 -#define NX_CDRP_RSP_FAIL 0x00000002 -#define NX_CDRP_RSP_TIMEOUT 0x00000003 - -/* - * All commands must have the NX_CDRP_CMD_BIT set in - * the crb NX_CDRP_CRB_OFFSET. - */ -#define NX_CDRP_FORM_CMD(cmd) (NX_CDRP_CMD_BIT | (cmd)) -#define NX_CDRP_IS_CMD(cmd) (((cmd) & NX_CDRP_CMD_BIT) != 0) - -#define NX_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001 -#define NX_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002 -#define NX_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003 -#define NX_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004 -#define NX_CDRP_CMD_READ_MAX_RX_CTX 0x00000005 -#define NX_CDRP_CMD_READ_MAX_TX_CTX 0x00000006 -#define NX_CDRP_CMD_CREATE_RX_CTX 0x00000007 -#define NX_CDRP_CMD_DESTROY_RX_CTX 0x00000008 -#define NX_CDRP_CMD_CREATE_TX_CTX 0x00000009 -#define NX_CDRP_CMD_DESTROY_TX_CTX 0x0000000a -#define NX_CDRP_CMD_SETUP_STATISTICS 0x0000000e -#define NX_CDRP_CMD_GET_STATISTICS 0x0000000f -#define NX_CDRP_CMD_DELETE_STATISTICS 0x00000010 -#define NX_CDRP_CMD_SET_MTU 0x00000012 -#define NX_CDRP_CMD_READ_PHY 0x00000013 -#define NX_CDRP_CMD_WRITE_PHY 0x00000014 -#define NX_CDRP_CMD_READ_HW_REG 0x00000015 -#define NX_CDRP_CMD_GET_FLOW_CTL 0x00000016 -#define NX_CDRP_CMD_SET_FLOW_CTL 0x00000017 -#define NX_CDRP_CMD_READ_MAX_MTU 0x00000018 -#define NX_CDRP_CMD_READ_MAX_LRO 0x00000019 -#define NX_CDRP_CMD_CONFIGURE_TOE 0x0000001a -#define NX_CDRP_CMD_FUNC_ATTRIB 0x0000001b -#define NX_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c -#define NX_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d -#define NX_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e -#define NX_CDRP_CMD_CONFIG_GBE_PORT 0x0000001f -#define NX_CDRP_CMD_MAX 0x00000020 - -#define NX_RCODE_SUCCESS 0 -#define NX_RCODE_NO_HOST_MEM 1 -#define NX_RCODE_NO_HOST_RESOURCE 2 -#define NX_RCODE_NO_CARD_CRB 3 -#define NX_RCODE_NO_CARD_MEM 4 -#define NX_RCODE_NO_CARD_RESOURCE 5 -#define NX_RCODE_INVALID_ARGS 6 -#define NX_RCODE_INVALID_ACTION 7 -#define NX_RCODE_INVALID_STATE 8 -#define NX_RCODE_NOT_SUPPORTED 9 -#define NX_RCODE_NOT_PERMITTED 10 -#define NX_RCODE_NOT_READY 11 -#define NX_RCODE_DOES_NOT_EXIST 12 -#define NX_RCODE_ALREADY_EXISTS 13 -#define NX_RCODE_BAD_SIGNATURE 14 -#define NX_RCODE_CMD_NOT_IMPL 15 -#define NX_RCODE_CMD_INVALID 16 -#define NX_RCODE_TIMEOUT 17 -#define NX_RCODE_CMD_FAILED 18 -#define NX_RCODE_MAX_EXCEEDED 19 -#define NX_RCODE_MAX 20 - -#define NX_DESTROY_CTX_RESET 0 -#define NX_DESTROY_CTX_D3_RESET 1 -#define NX_DESTROY_CTX_MAX 2 - -/* - * Capabilities - */ -#define NX_CAP_BIT(class, bit) (1 << bit) -#define NX_CAP0_LEGACY_CONTEXT NX_CAP_BIT(0, 0) -#define NX_CAP0_MULTI_CONTEXT NX_CAP_BIT(0, 1) -#define NX_CAP0_LEGACY_MN NX_CAP_BIT(0, 2) -#define NX_CAP0_LEGACY_MS NX_CAP_BIT(0, 3) -#define NX_CAP0_CUT_THROUGH NX_CAP_BIT(0, 4) -#define NX_CAP0_LRO NX_CAP_BIT(0, 5) -#define NX_CAP0_LSO NX_CAP_BIT(0, 6) -#define NX_CAP0_JUMBO_CONTIGUOUS NX_CAP_BIT(0, 7) -#define NX_CAP0_LRO_CONTIGUOUS NX_CAP_BIT(0, 8) -#define NX_CAP0_HW_LRO NX_CAP_BIT(0, 10) - -/* - * Context state - */ -#define NX_HOST_CTX_STATE_FREED 0 -#define NX_HOST_CTX_STATE_ALLOCATED 1 -#define NX_HOST_CTX_STATE_ACTIVE 2 -#define NX_HOST_CTX_STATE_DISABLED 3 -#define NX_HOST_CTX_STATE_QUIESCED 4 -#define NX_HOST_CTX_STATE_MAX 5 - -/* - * Rx context - */ - -typedef struct { - __le64 host_phys_addr; /* Ring base addr */ - __le32 ring_size; /* Ring entries */ - __le16 msi_index; - __le16 rsvd; /* Padding */ -} nx_hostrq_sds_ring_t; - -typedef struct { - __le64 host_phys_addr; /* Ring base addr */ - __le64 buff_size; /* Packet buffer size */ - __le32 ring_size; /* Ring entries */ - __le32 ring_kind; /* Class of ring */ -} nx_hostrq_rds_ring_t; - -typedef struct { - __le64 host_rsp_dma_addr; /* Response dma'd here */ - __le32 capabilities[4]; /* Flag bit vector */ - __le32 host_int_crb_mode; /* Interrupt crb usage */ - __le32 host_rds_crb_mode; /* RDS crb usage */ - /* These ring offsets are relative to data[0] below */ - __le32 rds_ring_offset; /* Offset to RDS config */ - __le32 sds_ring_offset; /* Offset to SDS config */ - __le16 num_rds_rings; /* Count of RDS rings */ - __le16 num_sds_rings; /* Count of SDS rings */ - __le16 rsvd1; /* Padding */ - __le16 rsvd2; /* Padding */ - u8 reserved[128]; /* reserve space for future expansion*/ - /* MUST BE 64-bit aligned. - The following is packed: - - N hostrq_rds_rings - - N hostrq_sds_rings */ - char data[0]; -} nx_hostrq_rx_ctx_t; - -typedef struct { - __le32 host_producer_crb; /* Crb to use */ - __le32 rsvd1; /* Padding */ -} nx_cardrsp_rds_ring_t; - -typedef struct { - __le32 host_consumer_crb; /* Crb to use */ - __le32 interrupt_crb; /* Crb to use */ -} nx_cardrsp_sds_ring_t; - -typedef struct { - /* These ring offsets are relative to data[0] below */ - __le32 rds_ring_offset; /* Offset to RDS config */ - __le32 sds_ring_offset; /* Offset to SDS config */ - __le32 host_ctx_state; /* Starting State */ - __le32 num_fn_per_port; /* How many PCI fn share the port */ - __le16 num_rds_rings; /* Count of RDS rings */ - __le16 num_sds_rings; /* Count of SDS rings */ - __le16 context_id; /* Handle for context */ - u8 phys_port; /* Physical id of port */ - u8 virt_port; /* Virtual/Logical id of port */ - u8 reserved[128]; /* save space for future expansion */ - /* MUST BE 64-bit aligned. - The following is packed: - - N cardrsp_rds_rings - - N cardrs_sds_rings */ - char data[0]; -} nx_cardrsp_rx_ctx_t; - -#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \ - (sizeof(HOSTRQ_RX) + \ - (rds_rings)*(sizeof(nx_hostrq_rds_ring_t)) + \ - (sds_rings)*(sizeof(nx_hostrq_sds_ring_t))) - -#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings) \ - (sizeof(CARDRSP_RX) + \ - (rds_rings)*(sizeof(nx_cardrsp_rds_ring_t)) + \ - (sds_rings)*(sizeof(nx_cardrsp_sds_ring_t))) - -/* - * Tx context - */ - -typedef struct { - __le64 host_phys_addr; /* Ring base addr */ - __le32 ring_size; /* Ring entries */ - __le32 rsvd; /* Padding */ -} nx_hostrq_cds_ring_t; - -typedef struct { - __le64 host_rsp_dma_addr; /* Response dma'd here */ - __le64 cmd_cons_dma_addr; /* */ - __le64 dummy_dma_addr; /* */ - __le32 capabilities[4]; /* Flag bit vector */ - __le32 host_int_crb_mode; /* Interrupt crb usage */ - __le32 rsvd1; /* Padding */ - __le16 rsvd2; /* Padding */ - __le16 interrupt_ctl; - __le16 msi_index; - __le16 rsvd3; /* Padding */ - nx_hostrq_cds_ring_t cds_ring; /* Desc of cds ring */ - u8 reserved[128]; /* future expansion */ -} nx_hostrq_tx_ctx_t; - -typedef struct { - __le32 host_producer_crb; /* Crb to use */ - __le32 interrupt_crb; /* Crb to use */ -} nx_cardrsp_cds_ring_t; - -typedef struct { - __le32 host_ctx_state; /* Starting state */ - __le16 context_id; /* Handle for context */ - u8 phys_port; /* Physical id of port */ - u8 virt_port; /* Virtual/Logical id of port */ - nx_cardrsp_cds_ring_t cds_ring; /* Card cds settings */ - u8 reserved[128]; /* future expansion */ -} nx_cardrsp_tx_ctx_t; - -#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX)) -#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX)) - -/* CRB */ - -#define NX_HOST_RDS_CRB_MODE_UNIQUE 0 -#define NX_HOST_RDS_CRB_MODE_SHARED 1 -#define NX_HOST_RDS_CRB_MODE_CUSTOM 2 -#define NX_HOST_RDS_CRB_MODE_MAX 3 - -#define NX_HOST_INT_CRB_MODE_UNIQUE 0 -#define NX_HOST_INT_CRB_MODE_SHARED 1 -#define NX_HOST_INT_CRB_MODE_NORX 2 -#define NX_HOST_INT_CRB_MODE_NOTX 3 -#define NX_HOST_INT_CRB_MODE_NORXTX 4 - - -/* MAC */ - -#define MC_COUNT_P2 16 -#define MC_COUNT_P3 38 - -#define NETXEN_MAC_NOOP 0 -#define NETXEN_MAC_ADD 1 -#define NETXEN_MAC_DEL 2 - -typedef struct nx_mac_list_s { - struct list_head list; - uint8_t mac_addr[ETH_ALEN+2]; -} nx_mac_list_t; - -struct nx_vlan_ip_list { - struct list_head list; - u32 ip_addr; -}; - -/* - * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is - * adjusted based on configured MTU. - */ -#define NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US 3 -#define NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS 256 -#define NETXEN_DEFAULT_INTR_COALESCE_TX_PACKETS 64 -#define NETXEN_DEFAULT_INTR_COALESCE_TX_TIME_US 4 - -#define NETXEN_NIC_INTR_DEFAULT 0x04 - -typedef union { - struct { - uint16_t rx_packets; - uint16_t rx_time_us; - uint16_t tx_packets; - uint16_t tx_time_us; - } data; - uint64_t word; -} nx_nic_intr_coalesce_data_t; - -typedef struct { - uint16_t stats_time_us; - uint16_t rate_sample_time; - uint16_t flags; - uint16_t rsvd_1; - uint32_t low_threshold; - uint32_t high_threshold; - nx_nic_intr_coalesce_data_t normal; - nx_nic_intr_coalesce_data_t low; - nx_nic_intr_coalesce_data_t high; - nx_nic_intr_coalesce_data_t irq; -} nx_nic_intr_coalesce_t; - -#define NX_HOST_REQUEST 0x13 -#define NX_NIC_REQUEST 0x14 - -#define NX_MAC_EVENT 0x1 - -#define NX_IP_UP 2 -#define NX_IP_DOWN 3 - -/* - * Driver --> Firmware - */ -#define NX_NIC_H2C_OPCODE_START 0 -#define NX_NIC_H2C_OPCODE_CONFIG_RSS 1 -#define NX_NIC_H2C_OPCODE_CONFIG_RSS_TBL 2 -#define NX_NIC_H2C_OPCODE_CONFIG_INTR_COALESCE 3 -#define NX_NIC_H2C_OPCODE_CONFIG_LED 4 -#define NX_NIC_H2C_OPCODE_CONFIG_PROMISCUOUS 5 -#define NX_NIC_H2C_OPCODE_CONFIG_L2_MAC 6 -#define NX_NIC_H2C_OPCODE_LRO_REQUEST 7 -#define NX_NIC_H2C_OPCODE_GET_SNMP_STATS 8 -#define NX_NIC_H2C_OPCODE_PROXY_START_REQUEST 9 -#define NX_NIC_H2C_OPCODE_PROXY_STOP_REQUEST 10 -#define NX_NIC_H2C_OPCODE_PROXY_SET_MTU 11 -#define NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE 12 -#define NX_NIC_H2C_OPCODE_GET_FINGER_PRINT_REQUEST 13 -#define NX_NIC_H2C_OPCODE_INSTALL_LICENSE_REQUEST 14 -#define NX_NIC_H2C_OPCODE_GET_LICENSE_CAPABILITY_REQUEST 15 -#define NX_NIC_H2C_OPCODE_GET_NET_STATS 16 -#define NX_NIC_H2C_OPCODE_PROXY_UPDATE_P2V 17 -#define NX_NIC_H2C_OPCODE_CONFIG_IPADDR 18 -#define NX_NIC_H2C_OPCODE_CONFIG_LOOPBACK 19 -#define NX_NIC_H2C_OPCODE_PROXY_STOP_DONE 20 -#define NX_NIC_H2C_OPCODE_GET_LINKEVENT 21 -#define NX_NIC_C2C_OPCODE 22 -#define NX_NIC_H2C_OPCODE_CONFIG_BRIDGING 23 -#define NX_NIC_H2C_OPCODE_CONFIG_HW_LRO 24 -#define NX_NIC_H2C_OPCODE_LAST 25 - -/* - * Firmware --> Driver - */ - -#define NX_NIC_C2H_OPCODE_START 128 -#define NX_NIC_C2H_OPCODE_CONFIG_RSS_RESPONSE 129 -#define NX_NIC_C2H_OPCODE_CONFIG_RSS_TBL_RESPONSE 130 -#define NX_NIC_C2H_OPCODE_CONFIG_MAC_RESPONSE 131 -#define NX_NIC_C2H_OPCODE_CONFIG_PROMISCUOUS_RESPONSE 132 -#define NX_NIC_C2H_OPCODE_CONFIG_L2_MAC_RESPONSE 133 -#define NX_NIC_C2H_OPCODE_LRO_DELETE_RESPONSE 134 -#define NX_NIC_C2H_OPCODE_LRO_ADD_FAILURE_RESPONSE 135 -#define NX_NIC_C2H_OPCODE_GET_SNMP_STATS 136 -#define NX_NIC_C2H_OPCODE_GET_FINGER_PRINT_REPLY 137 -#define NX_NIC_C2H_OPCODE_INSTALL_LICENSE_REPLY 138 -#define NX_NIC_C2H_OPCODE_GET_LICENSE_CAPABILITIES_REPLY 139 -#define NX_NIC_C2H_OPCODE_GET_NET_STATS_RESPONSE 140 -#define NX_NIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141 -#define NX_NIC_C2H_OPCODE_LAST 142 - -#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */ -#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */ -#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */ - -#define NX_NIC_LRO_REQUEST_FIRST 0 -#define NX_NIC_LRO_REQUEST_ADD_FLOW 1 -#define NX_NIC_LRO_REQUEST_DELETE_FLOW 2 -#define NX_NIC_LRO_REQUEST_TIMER 3 -#define NX_NIC_LRO_REQUEST_CLEANUP 4 -#define NX_NIC_LRO_REQUEST_ADD_FLOW_SCHEDULED 5 -#define NX_TOE_LRO_REQUEST_ADD_FLOW 6 -#define NX_TOE_LRO_REQUEST_ADD_FLOW_RESPONSE 7 -#define NX_TOE_LRO_REQUEST_DELETE_FLOW 8 -#define NX_TOE_LRO_REQUEST_DELETE_FLOW_RESPONSE 9 -#define NX_TOE_LRO_REQUEST_TIMER 10 -#define NX_NIC_LRO_REQUEST_LAST 11 - -#define NX_FW_CAPABILITY_LINK_NOTIFICATION (1 << 5) -#define NX_FW_CAPABILITY_SWITCHING (1 << 6) -#define NX_FW_CAPABILITY_PEXQ (1 << 7) -#define NX_FW_CAPABILITY_BDG (1 << 8) -#define NX_FW_CAPABILITY_FVLANTX (1 << 9) -#define NX_FW_CAPABILITY_HW_LRO (1 << 10) -#define NX_FW_CAPABILITY_GBE_LINK_CFG (1 << 11) - -/* module types */ -#define LINKEVENT_MODULE_NOT_PRESENT 1 -#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2 -#define LINKEVENT_MODULE_OPTICAL_SRLR 3 -#define LINKEVENT_MODULE_OPTICAL_LRM 4 -#define LINKEVENT_MODULE_OPTICAL_SFP_1G 5 -#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE 6 -#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN 7 -#define LINKEVENT_MODULE_TWINAX 8 - -#define LINKSPEED_10GBPS 10000 -#define LINKSPEED_1GBPS 1000 -#define LINKSPEED_100MBPS 100 -#define LINKSPEED_10MBPS 10 - -#define LINKSPEED_ENCODED_10MBPS 0 -#define LINKSPEED_ENCODED_100MBPS 1 -#define LINKSPEED_ENCODED_1GBPS 2 - -#define LINKEVENT_AUTONEG_DISABLED 0 -#define LINKEVENT_AUTONEG_ENABLED 1 - -#define LINKEVENT_HALF_DUPLEX 0 -#define LINKEVENT_FULL_DUPLEX 1 - -#define LINKEVENT_LINKSPEED_MBPS 0 -#define LINKEVENT_LINKSPEED_ENCODED 1 - -#define AUTO_FW_RESET_ENABLED 0xEF10AF12 -#define AUTO_FW_RESET_DISABLED 0xDCBAAF12 - -/* firmware response header: - * 63:58 - message type - * 57:56 - owner - * 55:53 - desc count - * 52:48 - reserved - * 47:40 - completion id - * 39:32 - opcode - * 31:16 - error code - * 15:00 - reserved - */ -#define netxen_get_nic_msgtype(msg_hdr) \ - ((msg_hdr >> 58) & 0x3F) -#define netxen_get_nic_msg_compid(msg_hdr) \ - ((msg_hdr >> 40) & 0xFF) -#define netxen_get_nic_msg_opcode(msg_hdr) \ - ((msg_hdr >> 32) & 0xFF) -#define netxen_get_nic_msg_errcode(msg_hdr) \ - ((msg_hdr >> 16) & 0xFFFF) - -typedef struct { - union { - struct { - u64 hdr; - u64 body[7]; - }; - u64 words[8]; - }; -} nx_fw_msg_t; - -typedef struct { - __le64 qhdr; - __le64 req_hdr; - __le64 words[6]; -} nx_nic_req_t; - -typedef struct { - u8 op; - u8 tag; - u8 mac_addr[6]; -} nx_mac_req_t; - -#define MAX_PENDING_DESC_BLOCK_SIZE 64 - -#define NETXEN_NIC_MSI_ENABLED 0x02 -#define NETXEN_NIC_MSIX_ENABLED 0x04 -#define NETXEN_NIC_LRO_ENABLED 0x08 -#define NETXEN_NIC_LRO_DISABLED 0x00 -#define NETXEN_NIC_BRIDGE_ENABLED 0X10 -#define NETXEN_NIC_DIAG_ENABLED 0x20 -#define NETXEN_IS_MSI_FAMILY(adapter) \ - ((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED)) - -#define MSIX_ENTRIES_PER_ADAPTER NUM_STS_DESC_RINGS -#define NETXEN_MSIX_TBL_SPACE 8192 -#define NETXEN_PCI_REG_MSIX_TBL 0x44 - -#define NETXEN_DB_MAPSIZE_BYTES 0x1000 - -#define NETXEN_NETDEV_WEIGHT 128 -#define NETXEN_ADAPTER_UP_MAGIC 777 -#define NETXEN_NIC_PEG_TUNE 0 - -#define __NX_FW_ATTACHED 0 -#define __NX_DEV_UP 1 -#define __NX_RESETTING 2 - -struct netxen_dummy_dma { - void *addr; - dma_addr_t phys_addr; -}; - -struct netxen_adapter { - struct netxen_hardware_context ahw; - - struct net_device *netdev; - struct pci_dev *pdev; - struct list_head mac_list; - struct list_head vlan_ip_list; - - spinlock_t tx_clean_lock; - - u16 num_txd; - u16 num_rxd; - u16 num_jumbo_rxd; - u16 num_lro_rxd; - - u8 max_rds_rings; - u8 max_sds_rings; - u8 driver_mismatch; - u8 msix_supported; - u8 __pad; - u8 pci_using_dac; - u8 portnum; - u8 physical_port; - - u8 mc_enabled; - u8 max_mc_count; - u8 rss_supported; - u8 link_changed; - u8 fw_wait_cnt; - u8 fw_fail_cnt; - u8 tx_timeo_cnt; - u8 need_fw_reset; - - u8 has_link_events; - u8 fw_type; - u16 tx_context_id; - u16 mtu; - u16 is_up; - - u16 link_speed; - u16 link_duplex; - u16 link_autoneg; - u16 module_type; - - u32 capabilities; - u32 flags; - u32 irq; - u32 temp; - - u32 int_vec_bit; - u32 heartbit; - - u8 mac_addr[ETH_ALEN]; - - struct netxen_adapter_stats stats; - - struct netxen_recv_context recv_ctx; - struct nx_host_tx_ring *tx_ring; - - int (*macaddr_set) (struct netxen_adapter *, u8 *); - int (*set_mtu) (struct netxen_adapter *, int); - int (*set_promisc) (struct netxen_adapter *, u32); - void (*set_multi) (struct net_device *); - int (*phy_read) (struct netxen_adapter *, u32 reg, u32 *); - int (*phy_write) (struct netxen_adapter *, u32 reg, u32 val); - int (*init_port) (struct netxen_adapter *, int); - int (*stop_port) (struct netxen_adapter *); - - u32 (*crb_read)(struct netxen_adapter *, ulong); - int (*crb_write)(struct netxen_adapter *, ulong, u32); - - int (*pci_mem_read)(struct netxen_adapter *, u64, u64 *); - int (*pci_mem_write)(struct netxen_adapter *, u64, u64); - - int (*pci_set_window)(struct netxen_adapter *, u64, u32 *); - - u32 (*io_read)(struct netxen_adapter *, void __iomem *); - void (*io_write)(struct netxen_adapter *, void __iomem *, u32); - - void __iomem *tgt_mask_reg; - void __iomem *pci_int_reg; - void __iomem *tgt_status_reg; - void __iomem *crb_int_state_reg; - void __iomem *isr_int_vec; - - struct msix_entry msix_entries[MSIX_ENTRIES_PER_ADAPTER]; - - struct netxen_dummy_dma dummy_dma; - - struct delayed_work fw_work; - - struct work_struct tx_timeout_task; - - nx_nic_intr_coalesce_t coal; - - unsigned long state; - __le32 file_prd_off; /*File fw product offset*/ - u32 fw_version; - const struct firmware *fw; -}; - -int nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val); -int nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val); - -#define NXRD32(adapter, off) \ - (adapter->crb_read(adapter, off)) -#define NXWR32(adapter, off, val) \ - (adapter->crb_write(adapter, off, val)) -#define NXRDIO(adapter, addr) \ - (adapter->io_read(adapter, addr)) -#define NXWRIO(adapter, addr, val) \ - (adapter->io_write(adapter, addr, val)) - -int netxen_pcie_sem_lock(struct netxen_adapter *, int, u32); -void netxen_pcie_sem_unlock(struct netxen_adapter *, int); - -#define netxen_rom_lock(a) \ - netxen_pcie_sem_lock((a), 2, NETXEN_ROM_LOCK_ID) -#define netxen_rom_unlock(a) \ - netxen_pcie_sem_unlock((a), 2) -#define netxen_phy_lock(a) \ - netxen_pcie_sem_lock((a), 3, NETXEN_PHY_LOCK_ID) -#define netxen_phy_unlock(a) \ - netxen_pcie_sem_unlock((a), 3) -#define netxen_api_lock(a) \ - netxen_pcie_sem_lock((a), 5, 0) -#define netxen_api_unlock(a) \ - netxen_pcie_sem_unlock((a), 5) -#define netxen_sw_lock(a) \ - netxen_pcie_sem_lock((a), 6, 0) -#define netxen_sw_unlock(a) \ - netxen_pcie_sem_unlock((a), 6) -#define crb_win_lock(a) \ - netxen_pcie_sem_lock((a), 7, NETXEN_CRB_WIN_LOCK_ID) -#define crb_win_unlock(a) \ - netxen_pcie_sem_unlock((a), 7) - -int netxen_nic_get_board_info(struct netxen_adapter *adapter); -int netxen_nic_wol_supported(struct netxen_adapter *adapter); - -/* Functions from netxen_nic_init.c */ -int netxen_init_dummy_dma(struct netxen_adapter *adapter); -void netxen_free_dummy_dma(struct netxen_adapter *adapter); - -int netxen_check_flash_fw_compatibility(struct netxen_adapter *adapter); -int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val); -int netxen_load_firmware(struct netxen_adapter *adapter); -int netxen_need_fw_reset(struct netxen_adapter *adapter); -void netxen_request_firmware(struct netxen_adapter *adapter); -void netxen_release_firmware(struct netxen_adapter *adapter); -int netxen_pinit_from_rom(struct netxen_adapter *adapter); - -int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp); -int netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr, - u8 *bytes, size_t size); -int netxen_rom_fast_write_words(struct netxen_adapter *adapter, int addr, - u8 *bytes, size_t size); -int netxen_flash_unlock(struct netxen_adapter *adapter); -int netxen_backup_crbinit(struct netxen_adapter *adapter); -int netxen_flash_erase_secondary(struct netxen_adapter *adapter); -int netxen_flash_erase_primary(struct netxen_adapter *adapter); -void netxen_halt_pegs(struct netxen_adapter *adapter); - -int netxen_rom_se(struct netxen_adapter *adapter, int addr); - -int netxen_alloc_sw_resources(struct netxen_adapter *adapter); -void netxen_free_sw_resources(struct netxen_adapter *adapter); - -void netxen_setup_hwops(struct netxen_adapter *adapter); -void __iomem *netxen_get_ioaddr(struct netxen_adapter *, u32); - -int netxen_alloc_hw_resources(struct netxen_adapter *adapter); -void netxen_free_hw_resources(struct netxen_adapter *adapter); - -void netxen_release_rx_buffers(struct netxen_adapter *adapter); -void netxen_release_tx_buffers(struct netxen_adapter *adapter); - -int netxen_init_firmware(struct netxen_adapter *adapter); -void netxen_nic_clear_stats(struct netxen_adapter *adapter); -void netxen_watchdog_task(struct work_struct *work); -void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid, - struct nx_host_rds_ring *rds_ring); -int netxen_process_cmd_ring(struct netxen_adapter *adapter); -int netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max); - -void netxen_p3_free_mac_list(struct netxen_adapter *adapter); -int netxen_config_intr_coalesce(struct netxen_adapter *adapter); -int netxen_config_rss(struct netxen_adapter *adapter, int enable); -int netxen_config_ipaddr(struct netxen_adapter *adapter, u32 ip, int cmd); -int netxen_linkevent_request(struct netxen_adapter *adapter, int enable); -void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup); -void netxen_pci_camqm_read_2M(struct netxen_adapter *, u64, u64 *); -void netxen_pci_camqm_write_2M(struct netxen_adapter *, u64, u64); - -int nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter, - u32 speed, u32 duplex, u32 autoneg); -int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu); -int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu); -int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable); -int netxen_config_bridged_mode(struct netxen_adapter *adapter, int enable); -int netxen_send_lro_cleanup(struct netxen_adapter *adapter); - -void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, - struct nx_host_tx_ring *tx_ring); - -/* Functions from netxen_nic_main.c */ -int netxen_nic_reset_context(struct netxen_adapter *); - -/* - * NetXen Board information - */ - -#define NETXEN_MAX_SHORT_NAME 32 -struct netxen_brdinfo { - int brdtype; /* type of board */ - long ports; /* max no of physical ports */ - char short_name[NETXEN_MAX_SHORT_NAME]; -}; - -static const struct netxen_brdinfo netxen_boards[] = { - {NETXEN_BRDTYPE_P2_SB31_10G_CX4, 1, "XGb CX4"}, - {NETXEN_BRDTYPE_P2_SB31_10G_HMEZ, 1, "XGb HMEZ"}, - {NETXEN_BRDTYPE_P2_SB31_10G_IMEZ, 2, "XGb IMEZ"}, - {NETXEN_BRDTYPE_P2_SB31_10G, 1, "XGb XFP"}, - {NETXEN_BRDTYPE_P2_SB35_4G, 4, "Quad Gb"}, - {NETXEN_BRDTYPE_P2_SB31_2G, 2, "Dual Gb"}, - {NETXEN_BRDTYPE_P3_REF_QG, 4, "Reference Quad Gig "}, - {NETXEN_BRDTYPE_P3_HMEZ, 2, "Dual XGb HMEZ"}, - {NETXEN_BRDTYPE_P3_10G_CX4_LP, 2, "Dual XGb CX4 LP"}, - {NETXEN_BRDTYPE_P3_4_GB, 4, "Quad Gig LP"}, - {NETXEN_BRDTYPE_P3_IMEZ, 2, "Dual XGb IMEZ"}, - {NETXEN_BRDTYPE_P3_10G_SFP_PLUS, 2, "Dual XGb SFP+ LP"}, - {NETXEN_BRDTYPE_P3_10000_BASE_T, 1, "XGB 10G BaseT LP"}, - {NETXEN_BRDTYPE_P3_XG_LOM, 2, "Dual XGb LOM"}, - {NETXEN_BRDTYPE_P3_4_GB_MM, 4, "NX3031 Gigabit Ethernet"}, - {NETXEN_BRDTYPE_P3_10G_SFP_CT, 2, "NX3031 10 Gigabit Ethernet"}, - {NETXEN_BRDTYPE_P3_10G_SFP_QT, 2, "Quanta Dual XGb SFP+"}, - {NETXEN_BRDTYPE_P3_10G_CX4, 2, "Reference Dual CX4 Option"}, - {NETXEN_BRDTYPE_P3_10G_XFP, 1, "Reference Single XFP Option"} -}; - -#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(netxen_boards) - -static inline void get_brd_name_by_type(u32 type, char *name) -{ - int i, found = 0; - for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) { - if (netxen_boards[i].brdtype == type) { - strcpy(name, netxen_boards[i].short_name); - found = 1; - break; - } - - } - if (!found) - name = "Unknown"; -} - -static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring) -{ - smp_mb(); - return find_diff_among(tx_ring->producer, - tx_ring->sw_consumer, tx_ring->num_desc); - -} - -int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac); -int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac); -extern void netxen_change_ringparam(struct netxen_adapter *adapter); -extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, - int *valp); - -extern const struct ethtool_ops netxen_nic_ethtool_ops; - -#endif /* __NETXEN_NIC_H_ */ diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c deleted file mode 100644 index a925392..0000000 --- a/drivers/net/netxen/netxen_nic_ctx.c +++ /dev/null @@ -1,793 +0,0 @@ -/* - * Copyright (C) 2003 - 2009 NetXen, Inc. - * Copyright (C) 2009 - QLogic Corporation. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, - * MA 02111-1307, USA. - * - * The full GNU General Public License is included in this distribution - * in the file called "COPYING". - * - */ - -#include "netxen_nic_hw.h" -#include "netxen_nic.h" - -#define NXHAL_VERSION 1 - -static u32 -netxen_poll_rsp(struct netxen_adapter *adapter) -{ - u32 rsp = NX_CDRP_RSP_OK; - int timeout = 0; - - do { - /* give atleast 1ms for firmware to respond */ - msleep(1); - - if (++timeout > NX_OS_CRB_RETRY_COUNT) - return NX_CDRP_RSP_TIMEOUT; - - rsp = NXRD32(adapter, NX_CDRP_CRB_OFFSET); - } while (!NX_CDRP_IS_RSP(rsp)); - - return rsp; -} - -static u32 -netxen_issue_cmd(struct netxen_adapter *adapter, - u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd) -{ - u32 rsp; - u32 signature = 0; - u32 rcode = NX_RCODE_SUCCESS; - - signature = NX_CDRP_SIGNATURE_MAKE(pci_fn, version); - - /* Acquire semaphore before accessing CRB */ - if (netxen_api_lock(adapter)) - return NX_RCODE_TIMEOUT; - - NXWR32(adapter, NX_SIGN_CRB_OFFSET, signature); - - NXWR32(adapter, NX_ARG1_CRB_OFFSET, arg1); - - NXWR32(adapter, NX_ARG2_CRB_OFFSET, arg2); - - NXWR32(adapter, NX_ARG3_CRB_OFFSET, arg3); - - NXWR32(adapter, NX_CDRP_CRB_OFFSET, NX_CDRP_FORM_CMD(cmd)); - - rsp = netxen_poll_rsp(adapter); - - if (rsp == NX_CDRP_RSP_TIMEOUT) { - printk(KERN_ERR "%s: card response timeout.\n", - netxen_nic_driver_name); - - rcode = NX_RCODE_TIMEOUT; - } else if (rsp == NX_CDRP_RSP_FAIL) { - rcode = NXRD32(adapter, NX_ARG1_CRB_OFFSET); - - printk(KERN_ERR "%s: failed card response code:0x%x\n", - netxen_nic_driver_name, rcode); - } - - /* Release semaphore */ - netxen_api_unlock(adapter); - - return rcode; -} - -int -nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu) -{ - u32 rcode = NX_RCODE_SUCCESS; - struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; - - if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE) - rcode = netxen_issue_cmd(adapter, - adapter->ahw.pci_func, - NXHAL_VERSION, - recv_ctx->context_id, - mtu, - 0, - NX_CDRP_CMD_SET_MTU); - - if (rcode != NX_RCODE_SUCCESS) - return -EIO; - - return 0; -} - -int -nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter, - u32 speed, u32 duplex, u32 autoneg) -{ - - return netxen_issue_cmd(adapter, - adapter->ahw.pci_func, - NXHAL_VERSION, - speed, - duplex, - autoneg, - NX_CDRP_CMD_CONFIG_GBE_PORT); - -} - -static int -nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter) -{ - void *addr; - nx_hostrq_rx_ctx_t *prq; - nx_cardrsp_rx_ctx_t *prsp; - nx_hostrq_rds_ring_t *prq_rds; - nx_hostrq_sds_ring_t *prq_sds; - nx_cardrsp_rds_ring_t *prsp_rds; - nx_cardrsp_sds_ring_t *prsp_sds; - struct nx_host_rds_ring *rds_ring; - struct nx_host_sds_ring *sds_ring; - - dma_addr_t hostrq_phys_addr, cardrsp_phys_addr; - u64 phys_addr; - - int i, nrds_rings, nsds_rings; - size_t rq_size, rsp_size; - u32 cap, reg, val; - - int err; - - struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; - - nrds_rings = adapter->max_rds_rings; - nsds_rings = adapter->max_sds_rings; - - rq_size = - SIZEOF_HOSTRQ_RX(nx_hostrq_rx_ctx_t, nrds_rings, nsds_rings); - rsp_size = - SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings); - - addr = pci_alloc_consistent(adapter->pdev, - rq_size, &hostrq_phys_addr); - if (addr == NULL) - return -ENOMEM; - prq = addr; - - addr = pci_alloc_consistent(adapter->pdev, - rsp_size, &cardrsp_phys_addr); - if (addr == NULL) { - err = -ENOMEM; - goto out_free_rq; - } - prsp = addr; - - prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr); - - cap = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN); - cap |= (NX_CAP0_JUMBO_CONTIGUOUS | NX_CAP0_LRO_CONTIGUOUS); - - prq->capabilities[0] = cpu_to_le32(cap); - prq->host_int_crb_mode = - cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED); - prq->host_rds_crb_mode = - cpu_to_le32(NX_HOST_RDS_CRB_MODE_UNIQUE); - - prq->num_rds_rings = cpu_to_le16(nrds_rings); - prq->num_sds_rings = cpu_to_le16(nsds_rings); - prq->rds_ring_offset = cpu_to_le32(0); - - val = le32_to_cpu(prq->rds_ring_offset) + - (sizeof(nx_hostrq_rds_ring_t) * nrds_rings); - prq->sds_ring_offset = cpu_to_le32(val); - - prq_rds = (nx_hostrq_rds_ring_t *)(prq->data + - le32_to_cpu(prq->rds_ring_offset)); - - for (i = 0; i < nrds_rings; i++) { - - rds_ring = &recv_ctx->rds_rings[i]; - - prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr); - prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc); - prq_rds[i].ring_kind = cpu_to_le32(i); - prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size); - } - - prq_sds = (nx_hostrq_sds_ring_t *)(prq->data + - le32_to_cpu(prq->sds_ring_offset)); - - for (i = 0; i < nsds_rings; i++) { - - sds_ring = &recv_ctx->sds_rings[i]; - - prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr); - prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc); - prq_sds[i].msi_index = cpu_to_le16(i); - } - - phys_addr = hostrq_phys_addr; - err = netxen_issue_cmd(adapter, - adapter->ahw.pci_func, - NXHAL_VERSION, - (u32)(phys_addr >> 32), - (u32)(phys_addr & 0xffffffff), - rq_size, - NX_CDRP_CMD_CREATE_RX_CTX); - if (err) { - printk(KERN_WARNING - "Failed to create rx ctx in firmware%d\n", err); - goto out_free_rsp; - } - - - prsp_rds = ((nx_cardrsp_rds_ring_t *) - &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]); - - for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) { - rds_ring = &recv_ctx->rds_rings[i]; - - reg = le32_to_cpu(prsp_rds[i].host_producer_crb); - rds_ring->crb_rcv_producer = netxen_get_ioaddr(adapter, - NETXEN_NIC_REG(reg - 0x200)); - } - - prsp_sds = ((nx_cardrsp_sds_ring_t *) - &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]); - - for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) { - sds_ring = &recv_ctx->sds_rings[i]; - - reg = le32_to_cpu(prsp_sds[i].host_consumer_crb); - sds_ring->crb_sts_consumer = netxen_get_ioaddr(adapter, - NETXEN_NIC_REG(reg - 0x200)); - - reg = le32_to_cpu(prsp_sds[i].interrupt_crb); - sds_ring->crb_intr_mask = netxen_get_ioaddr(adapter, - NETXEN_NIC_REG(reg - 0x200)); - } - - recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); - recv_ctx->context_id = le16_to_cpu(prsp->context_id); - recv_ctx->virt_port = prsp->virt_port; - -out_free_rsp: - pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr); -out_free_rq: - pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr); - return err; -} - -static void -nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter) -{ - struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; - - if (netxen_issue_cmd(adapter, - adapter->ahw.pci_func, - NXHAL_VERSION, - recv_ctx->context_id, - NX_DESTROY_CTX_RESET, - 0, - NX_CDRP_CMD_DESTROY_RX_CTX)) { - - printk(KERN_WARNING - "%s: Failed to destroy rx ctx in firmware\n", - netxen_nic_driver_name); - } -} - -static int -nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter) -{ - nx_hostrq_tx_ctx_t *prq; - nx_hostrq_cds_ring_t *prq_cds; - nx_cardrsp_tx_ctx_t *prsp; - void *rq_addr, *rsp_addr; - size_t rq_size, rsp_size; - u32 temp; - int err = 0; - u64 offset, phys_addr; - dma_addr_t rq_phys_addr, rsp_phys_addr; - struct nx_host_tx_ring *tx_ring = adapter->tx_ring; - struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; - - rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t); - rq_addr = pci_alloc_consistent(adapter->pdev, - rq_size, &rq_phys_addr); - if (!rq_addr) - return -ENOMEM; - - rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t); - rsp_addr = pci_alloc_consistent(adapter->pdev, - rsp_size, &rsp_phys_addr); - if (!rsp_addr) { - err = -ENOMEM; - goto out_free_rq; - } - - memset(rq_addr, 0, rq_size); - prq = rq_addr; - - memset(rsp_addr, 0, rsp_size); - prsp = rsp_addr; - - prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr); - - temp = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN | NX_CAP0_LSO); - prq->capabilities[0] = cpu_to_le32(temp); - - prq->host_int_crb_mode = - cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED); - - prq->interrupt_ctl = 0; - prq->msi_index = 0; - - prq->dummy_dma_addr = cpu_to_le64(adapter->dummy_dma.phys_addr); - - offset = recv_ctx->phys_addr + sizeof(struct netxen_ring_ctx); - prq->cmd_cons_dma_addr = cpu_to_le64(offset); - - prq_cds = &prq->cds_ring; - - prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr); - prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc); - - phys_addr = rq_phys_addr; - err = netxen_issue_cmd(adapter, - adapter->ahw.pci_func, - NXHAL_VERSION, - (u32)(phys_addr >> 32), - ((u32)phys_addr & 0xffffffff), - rq_size, - NX_CDRP_CMD_CREATE_TX_CTX); - - if (err == NX_RCODE_SUCCESS) { - temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); - tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter, - NETXEN_NIC_REG(temp - 0x200)); -#if 0 - adapter->tx_state = - le32_to_cpu(prsp->host_ctx_state); -#endif - adapter->tx_context_id = - le16_to_cpu(prsp->context_id); - } else { - printk(KERN_WARNING - "Failed to create tx ctx in firmware%d\n", err); - err = -EIO; - } - - pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr); - -out_free_rq: - pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr); - - return err; -} - -static void -nx_fw_cmd_destroy_tx_ctx(struct netxen_adapter *adapter) -{ - if (netxen_issue_cmd(adapter, - adapter->ahw.pci_func, - NXHAL_VERSION, - adapter->tx_context_id, - NX_DESTROY_CTX_RESET, - 0, - NX_CDRP_CMD_DESTROY_TX_CTX)) { - - printk(KERN_WARNING - "%s: Failed to destroy tx ctx in firmware\n", - netxen_nic_driver_name); - } -} - -int -nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val) -{ - u32 rcode; - - rcode = netxen_issue_cmd(adapter, - adapter->ahw.pci_func, - NXHAL_VERSION, - reg, - 0, - 0, - NX_CDRP_CMD_READ_PHY); - - if (rcode != NX_RCODE_SUCCESS) - return -EIO; - - return NXRD32(adapter, NX_ARG1_CRB_OFFSET); -} - -int -nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val) -{ - u32 rcode; - - rcode = netxen_issue_cmd(adapter, - adapter->ahw.pci_func, - NXHAL_VERSION, - reg, - val, - 0, - NX_CDRP_CMD_WRITE_PHY); - - if (rcode != NX_RCODE_SUCCESS) - return -EIO; - - return 0; -} - -static u64 ctx_addr_sig_regs[][3] = { - {NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)}, - {NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)}, - {NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)}, - {NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)} -}; - -#define CRB_CTX_ADDR_REG_LO(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][0]) -#define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2]) -#define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1]) - -#define lower32(x) ((u32)((x) & 0xffffffff)) -#define upper32(x) ((u32)(((u64)(x) >> 32) & 0xffffffff)) - -static struct netxen_recv_crb recv_crb_registers[] = { - /* Instance 0 */ - { - /* crb_rcv_producer: */ - { - NETXEN_NIC_REG(0x100), - /* Jumbo frames */ - NETXEN_NIC_REG(0x110), - /* LRO */ - NETXEN_NIC_REG(0x120) - }, - /* crb_sts_consumer: */ - { - NETXEN_NIC_REG(0x138), - NETXEN_NIC_REG_2(0x000), - NETXEN_NIC_REG_2(0x004), - NETXEN_NIC_REG_2(0x008), - }, - /* sw_int_mask */ - { - CRB_SW_INT_MASK_0, - NETXEN_NIC_REG_2(0x044), - NETXEN_NIC_REG_2(0x048), - NETXEN_NIC_REG_2(0x04c), - }, - }, - /* Instance 1 */ - { - /* crb_rcv_producer: */ - { - NETXEN_NIC_REG(0x144), - /* Jumbo frames */ - NETXEN_NIC_REG(0x154), - /* LRO */ - NETXEN_NIC_REG(0x164) - }, - /* crb_sts_consumer: */ - { - NETXEN_NIC_REG(0x17c), - NETXEN_NIC_REG_2(0x020), - NETXEN_NIC_REG_2(0x024), - NETXEN_NIC_REG_2(0x028), - }, - /* sw_int_mask */ - { - CRB_SW_INT_MASK_1, - NETXEN_NIC_REG_2(0x064), - NETXEN_NIC_REG_2(0x068), - NETXEN_NIC_REG_2(0x06c), - }, - }, - /* Instance 2 */ - { - /* crb_rcv_producer: */ - { - NETXEN_NIC_REG(0x1d8), - /* Jumbo frames */ - NETXEN_NIC_REG(0x1f8), - /* LRO */ - NETXEN_NIC_REG(0x208) - }, - /* crb_sts_consumer: */ - { - NETXEN_NIC_REG(0x220), - NETXEN_NIC_REG_2(0x03c), - NETXEN_NIC_REG_2(0x03c), - NETXEN_NIC_REG_2(0x03c), - }, - /* sw_int_mask */ - { - CRB_SW_INT_MASK_2, - NETXEN_NIC_REG_2(0x03c), - NETXEN_NIC_REG_2(0x03c), - NETXEN_NIC_REG_2(0x03c), - }, - }, - /* Instance 3 */ - { - /* crb_rcv_producer: */ - { - NETXEN_NIC_REG(0x22c), - /* Jumbo frames */ - NETXEN_NIC_REG(0x23c), - /* LRO */ - NETXEN_NIC_REG(0x24c) - }, - /* crb_sts_consumer: */ - { - NETXEN_NIC_REG(0x264), - NETXEN_NIC_REG_2(0x03c), - NETXEN_NIC_REG_2(0x03c), - NETXEN_NIC_REG_2(0x03c), - }, - /* sw_int_mask */ - { - CRB_SW_INT_MASK_3, - NETXEN_NIC_REG_2(0x03c), - NETXEN_NIC_REG_2(0x03c), - NETXEN_NIC_REG_2(0x03c), - }, - }, -}; - -static int -netxen_init_old_ctx(struct netxen_adapter *adapter) -{ - struct netxen_recv_context *recv_ctx; - struct nx_host_rds_ring *rds_ring; - struct nx_host_sds_ring *sds_ring; - struct nx_host_tx_ring *tx_ring; - int ring; - int port = adapter->portnum; - struct netxen_ring_ctx *hwctx; - u32 signature; - - tx_ring = adapter->tx_ring; - recv_ctx = &adapter->recv_ctx; - hwctx = recv_ctx->hwctx; - - hwctx->cmd_ring_addr = cpu_to_le64(tx_ring->phys_addr); - hwctx->cmd_ring_size = cpu_to_le32(tx_ring->num_desc); - - - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - rds_ring = &recv_ctx->rds_rings[ring]; - - hwctx->rcv_rings[ring].addr = - cpu_to_le64(rds_ring->phys_addr); - hwctx->rcv_rings[ring].size = - cpu_to_le32(rds_ring->num_desc); - } - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - - if (ring == 0) { - hwctx->sts_ring_addr = cpu_to_le64(sds_ring->phys_addr); - hwctx->sts_ring_size = cpu_to_le32(sds_ring->num_desc); - } - hwctx->sts_rings[ring].addr = cpu_to_le64(sds_ring->phys_addr); - hwctx->sts_rings[ring].size = cpu_to_le32(sds_ring->num_desc); - hwctx->sts_rings[ring].msi_index = cpu_to_le16(ring); - } - hwctx->sts_ring_count = cpu_to_le32(adapter->max_sds_rings); - - signature = (adapter->max_sds_rings > 1) ? - NETXEN_CTX_SIGNATURE_V2 : NETXEN_CTX_SIGNATURE; - - NXWR32(adapter, CRB_CTX_ADDR_REG_LO(port), - lower32(recv_ctx->phys_addr)); - NXWR32(adapter, CRB_CTX_ADDR_REG_HI(port), - upper32(recv_ctx->phys_addr)); - NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port), - signature | port); - return 0; -} - -int netxen_alloc_hw_resources(struct netxen_adapter *adapter) -{ - void *addr; - int err = 0; - int ring; - struct netxen_recv_context *recv_ctx; - struct nx_host_rds_ring *rds_ring; - struct nx_host_sds_ring *sds_ring; - struct nx_host_tx_ring *tx_ring; - - struct pci_dev *pdev = adapter->pdev; - struct net_device *netdev = adapter->netdev; - int port = adapter->portnum; - - recv_ctx = &adapter->recv_ctx; - tx_ring = adapter->tx_ring; - - addr = pci_alloc_consistent(pdev, - sizeof(struct netxen_ring_ctx) + sizeof(uint32_t), - &recv_ctx->phys_addr); - if (addr == NULL) { - dev_err(&pdev->dev, "failed to allocate hw context\n"); - return -ENOMEM; - } - - memset(addr, 0, sizeof(struct netxen_ring_ctx)); - recv_ctx->hwctx = addr; - recv_ctx->hwctx->ctx_id = cpu_to_le32(port); - recv_ctx->hwctx->cmd_consumer_offset = - cpu_to_le64(recv_ctx->phys_addr + - sizeof(struct netxen_ring_ctx)); - tx_ring->hw_consumer = - (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx)); - - /* cmd desc ring */ - addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring), - &tx_ring->phys_addr); - - if (addr == NULL) { - dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n", - netdev->name); - err = -ENOMEM; - goto err_out_free; - } - - tx_ring->desc_head = addr; - - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - rds_ring = &recv_ctx->rds_rings[ring]; - addr = pci_alloc_consistent(adapter->pdev, - RCV_DESC_RINGSIZE(rds_ring), - &rds_ring->phys_addr); - if (addr == NULL) { - dev_err(&pdev->dev, - "%s: failed to allocate rds ring [%d]\n", - netdev->name, ring); - err = -ENOMEM; - goto err_out_free; - } - rds_ring->desc_head = addr; - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - rds_ring->crb_rcv_producer = - netxen_get_ioaddr(adapter, - recv_crb_registers[port].crb_rcv_producer[ring]); - } - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - - addr = pci_alloc_consistent(adapter->pdev, - STATUS_DESC_RINGSIZE(sds_ring), - &sds_ring->phys_addr); - if (addr == NULL) { - dev_err(&pdev->dev, - "%s: failed to allocate sds ring [%d]\n", - netdev->name, ring); - err = -ENOMEM; - goto err_out_free; - } - sds_ring->desc_head = addr; - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { - sds_ring->crb_sts_consumer = - netxen_get_ioaddr(adapter, - recv_crb_registers[port].crb_sts_consumer[ring]); - - sds_ring->crb_intr_mask = - netxen_get_ioaddr(adapter, - recv_crb_registers[port].sw_int_mask[ring]); - } - } - - - if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) { - if (test_and_set_bit(__NX_FW_ATTACHED, &adapter->state)) - goto done; - err = nx_fw_cmd_create_rx_ctx(adapter); - if (err) - goto err_out_free; - err = nx_fw_cmd_create_tx_ctx(adapter); - if (err) - goto err_out_free; - } else { - err = netxen_init_old_ctx(adapter); - if (err) - goto err_out_free; - } - -done: - return 0; - -err_out_free: - netxen_free_hw_resources(adapter); - return err; -} - -void netxen_free_hw_resources(struct netxen_adapter *adapter) -{ - struct netxen_recv_context *recv_ctx; - struct nx_host_rds_ring *rds_ring; - struct nx_host_sds_ring *sds_ring; - struct nx_host_tx_ring *tx_ring; - int ring; - - int port = adapter->portnum; - - if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) { - if (!test_and_clear_bit(__NX_FW_ATTACHED, &adapter->state)) - goto done; - - nx_fw_cmd_destroy_rx_ctx(adapter); - nx_fw_cmd_destroy_tx_ctx(adapter); - } else { - netxen_api_lock(adapter); - NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port), - NETXEN_CTX_D3_RESET | port); - netxen_api_unlock(adapter); - } - - /* Allow dma queues to drain after context reset */ - msleep(20); - -done: - recv_ctx = &adapter->recv_ctx; - - if (recv_ctx->hwctx != NULL) { - pci_free_consistent(adapter->pdev, - sizeof(struct netxen_ring_ctx) + - sizeof(uint32_t), - recv_ctx->hwctx, - recv_ctx->phys_addr); - recv_ctx->hwctx = NULL; - } - - tx_ring = adapter->tx_ring; - if (tx_ring->desc_head != NULL) { - pci_free_consistent(adapter->pdev, - TX_DESC_RINGSIZE(tx_ring), - tx_ring->desc_head, tx_ring->phys_addr); - tx_ring->desc_head = NULL; - } - - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - rds_ring = &recv_ctx->rds_rings[ring]; - - if (rds_ring->desc_head != NULL) { - pci_free_consistent(adapter->pdev, - RCV_DESC_RINGSIZE(rds_ring), - rds_ring->desc_head, - rds_ring->phys_addr); - rds_ring->desc_head = NULL; - } - } - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - - if (sds_ring->desc_head != NULL) { - pci_free_consistent(adapter->pdev, - STATUS_DESC_RINGSIZE(sds_ring), - sds_ring->desc_head, - sds_ring->phys_addr); - sds_ring->desc_head = NULL; - } - } -} - diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c deleted file mode 100644 index b34fb74..0000000 --- a/drivers/net/netxen/netxen_nic_ethtool.c +++ /dev/null @@ -1,835 +0,0 @@ -/* - * Copyright (C) 2003 - 2009 NetXen, Inc. - * Copyright (C) 2009 - QLogic Corporation. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, - * MA 02111-1307, USA. - * - * The full GNU General Public License is included in this distribution - * in the file called "COPYING". - * - */ - -#include -#include -#include -#include -#include -#include - -#include "netxen_nic.h" -#include "netxen_nic_hw.h" - -struct netxen_nic_stats { - char stat_string[ETH_GSTRING_LEN]; - int sizeof_stat; - int stat_offset; -}; - -#define NETXEN_NIC_STAT(m) sizeof(((struct netxen_adapter *)0)->m), \ - offsetof(struct netxen_adapter, m) - -#define NETXEN_NIC_PORT_WINDOW 0x10000 -#define NETXEN_NIC_INVALID_DATA 0xDEADBEEF - -static const struct netxen_nic_stats netxen_nic_gstrings_stats[] = { - {"xmit_called", NETXEN_NIC_STAT(stats.xmitcalled)}, - {"xmit_finished", NETXEN_NIC_STAT(stats.xmitfinished)}, - {"rx_dropped", NETXEN_NIC_STAT(stats.rxdropped)}, - {"tx_dropped", NETXEN_NIC_STAT(stats.txdropped)}, - {"csummed", NETXEN_NIC_STAT(stats.csummed)}, - {"rx_pkts", NETXEN_NIC_STAT(stats.rx_pkts)}, - {"lro_pkts", NETXEN_NIC_STAT(stats.lro_pkts)}, - {"rx_bytes", NETXEN_NIC_STAT(stats.rxbytes)}, - {"tx_bytes", NETXEN_NIC_STAT(stats.txbytes)}, -}; - -#define NETXEN_NIC_STATS_LEN ARRAY_SIZE(netxen_nic_gstrings_stats) - -static const char netxen_nic_gstrings_test[][ETH_GSTRING_LEN] = { - "Register_Test_on_offline", - "Link_Test_on_offline" -}; - -#define NETXEN_NIC_TEST_LEN ARRAY_SIZE(netxen_nic_gstrings_test) - -#define NETXEN_NIC_REGS_COUNT 30 -#define NETXEN_NIC_REGS_LEN (NETXEN_NIC_REGS_COUNT * sizeof(__le32)) -#define NETXEN_MAX_EEPROM_LEN 1024 - -static int netxen_nic_get_eeprom_len(struct net_device *dev) -{ - return NETXEN_FLASH_TOTAL_SIZE; -} - -static void -netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - u32 fw_major = 0; - u32 fw_minor = 0; - u32 fw_build = 0; - - strncpy(drvinfo->driver, netxen_nic_driver_name, 32); - strncpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, 32); - fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); - fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR); - fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB); - sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build); - - strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); - drvinfo->regdump_len = NETXEN_NIC_REGS_LEN; - drvinfo->eedump_len = netxen_nic_get_eeprom_len(dev); -} - -static int -netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - int check_sfp_module = 0; - - /* read which mode */ - if (adapter->ahw.port_type == NETXEN_NIC_GBE) { - ecmd->supported = (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full); - - ecmd->advertising = (ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | - ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full); - - ecmd->port = PORT_TP; - - ethtool_cmd_speed_set(ecmd, adapter->link_speed); - ecmd->duplex = adapter->link_duplex; - ecmd->autoneg = adapter->link_autoneg; - - } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { - u32 val; - - val = NXRD32(adapter, NETXEN_PORT_MODE_ADDR); - if (val == NETXEN_PORT_MODE_802_3_AP) { - ecmd->supported = SUPPORTED_1000baseT_Full; - ecmd->advertising = ADVERTISED_1000baseT_Full; - } else { - ecmd->supported = SUPPORTED_10000baseT_Full; - ecmd->advertising = ADVERTISED_10000baseT_Full; - } - - if (netif_running(dev) && adapter->has_link_events) { - ethtool_cmd_speed_set(ecmd, adapter->link_speed); - ecmd->autoneg = adapter->link_autoneg; - ecmd->duplex = adapter->link_duplex; - goto skip; - } - - ecmd->port = PORT_TP; - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - u16 pcifn = adapter->ahw.pci_func; - - val = NXRD32(adapter, P3_LINK_SPEED_REG(pcifn)); - ethtool_cmd_speed_set(ecmd, P3_LINK_SPEED_MHZ * - P3_LINK_SPEED_VAL(pcifn, val)); - } else - ethtool_cmd_speed_set(ecmd, SPEED_10000); - - ecmd->duplex = DUPLEX_FULL; - ecmd->autoneg = AUTONEG_DISABLE; - } else - return -EIO; - -skip: - ecmd->phy_address = adapter->physical_port; - ecmd->transceiver = XCVR_EXTERNAL; - - switch (adapter->ahw.board_type) { - case NETXEN_BRDTYPE_P2_SB35_4G: - case NETXEN_BRDTYPE_P2_SB31_2G: - case NETXEN_BRDTYPE_P3_REF_QG: - case NETXEN_BRDTYPE_P3_4_GB: - case NETXEN_BRDTYPE_P3_4_GB_MM: - - ecmd->supported |= SUPPORTED_Autoneg; - ecmd->advertising |= ADVERTISED_Autoneg; - case NETXEN_BRDTYPE_P2_SB31_10G_CX4: - case NETXEN_BRDTYPE_P3_10G_CX4: - case NETXEN_BRDTYPE_P3_10G_CX4_LP: - case NETXEN_BRDTYPE_P3_10000_BASE_T: - ecmd->supported |= SUPPORTED_TP; - ecmd->advertising |= ADVERTISED_TP; - ecmd->port = PORT_TP; - ecmd->autoneg = (adapter->ahw.board_type == - NETXEN_BRDTYPE_P2_SB31_10G_CX4) ? - (AUTONEG_DISABLE) : (adapter->link_autoneg); - break; - case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: - case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: - case NETXEN_BRDTYPE_P3_IMEZ: - case NETXEN_BRDTYPE_P3_XG_LOM: - case NETXEN_BRDTYPE_P3_HMEZ: - ecmd->supported |= SUPPORTED_MII; - ecmd->advertising |= ADVERTISED_MII; - ecmd->port = PORT_MII; - ecmd->autoneg = AUTONEG_DISABLE; - break; - case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: - case NETXEN_BRDTYPE_P3_10G_SFP_CT: - case NETXEN_BRDTYPE_P3_10G_SFP_QT: - ecmd->advertising |= ADVERTISED_TP; - ecmd->supported |= SUPPORTED_TP; - check_sfp_module = netif_running(dev) && - adapter->has_link_events; - case NETXEN_BRDTYPE_P2_SB31_10G: - case NETXEN_BRDTYPE_P3_10G_XFP: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_FIBRE; - ecmd->autoneg = AUTONEG_DISABLE; - break; - case NETXEN_BRDTYPE_P3_10G_TP: - if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { - ecmd->autoneg = AUTONEG_DISABLE; - ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP); - ecmd->advertising |= - (ADVERTISED_FIBRE | ADVERTISED_TP); - ecmd->port = PORT_FIBRE; - check_sfp_module = netif_running(dev) && - adapter->has_link_events; - } else { - ecmd->supported |= (SUPPORTED_TP |SUPPORTED_Autoneg); - ecmd->advertising |= - (ADVERTISED_TP | ADVERTISED_Autoneg); - ecmd->port = PORT_TP; - } - break; - default: - printk(KERN_ERR "netxen-nic: Unsupported board model %d\n", - adapter->ahw.board_type); - return -EIO; - } - - if (check_sfp_module) { - switch (adapter->module_type) { - case LINKEVENT_MODULE_OPTICAL_UNKNOWN: - case LINKEVENT_MODULE_OPTICAL_SRLR: - case LINKEVENT_MODULE_OPTICAL_LRM: - case LINKEVENT_MODULE_OPTICAL_SFP_1G: - ecmd->port = PORT_FIBRE; - break; - case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE: - case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN: - case LINKEVENT_MODULE_TWINAX: - ecmd->port = PORT_TP; - break; - default: - ecmd->port = -1; - } - } - - return 0; -} - -static int -netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - u32 speed = ethtool_cmd_speed(ecmd); - int ret; - - if (adapter->ahw.port_type != NETXEN_NIC_GBE) - return -EOPNOTSUPP; - - if (!(adapter->capabilities & NX_FW_CAPABILITY_GBE_LINK_CFG)) - return -EOPNOTSUPP; - - ret = nx_fw_cmd_set_gbe_port(adapter, speed, ecmd->duplex, - ecmd->autoneg); - if (ret == NX_RCODE_NOT_SUPPORTED) - return -EOPNOTSUPP; - else if (ret) - return -EIO; - - adapter->link_speed = speed; - adapter->link_duplex = ecmd->duplex; - adapter->link_autoneg = ecmd->autoneg; - - if (!netif_running(dev)) - return 0; - - dev->netdev_ops->ndo_stop(dev); - return dev->netdev_ops->ndo_open(dev); -} - -static int netxen_nic_get_regs_len(struct net_device *dev) -{ - return NETXEN_NIC_REGS_LEN; -} - -static void -netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; - struct nx_host_sds_ring *sds_ring; - u32 *regs_buff = p; - int ring, i = 0; - int port = adapter->physical_port; - - memset(p, 0, NETXEN_NIC_REGS_LEN); - - regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) | - (adapter->pdev)->device; - - if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) - return; - - regs_buff[i++] = NXRD32(adapter, CRB_CMDPEG_STATE); - regs_buff[i++] = NXRD32(adapter, CRB_RCVPEG_STATE); - regs_buff[i++] = NXRD32(adapter, CRB_FW_CAPABILITIES_1); - regs_buff[i++] = NXRDIO(adapter, adapter->crb_int_state_reg); - regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); - regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_STATE); - regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); - regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); - regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS2); - - regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_0+0x3c); - regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_1+0x3c); - regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_2+0x3c); - regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_3+0x3c); - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - - regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_4+0x3c); - i += 2; - - regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE_P3); - regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer)); - - } else { - i++; - - regs_buff[i++] = NXRD32(adapter, - NETXEN_NIU_XGE_CONFIG_0+(0x10000*port)); - regs_buff[i++] = NXRD32(adapter, - NETXEN_NIU_XGE_CONFIG_1+(0x10000*port)); - - regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE); - regs_buff[i++] = NXRDIO(adapter, - adapter->tx_ring->crb_cmd_consumer); - } - - regs_buff[i++] = NXRDIO(adapter, adapter->tx_ring->crb_cmd_producer); - - regs_buff[i++] = NXRDIO(adapter, - recv_ctx->rds_rings[0].crb_rcv_producer); - regs_buff[i++] = NXRDIO(adapter, - recv_ctx->rds_rings[1].crb_rcv_producer); - - regs_buff[i++] = adapter->max_sds_rings; - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &(recv_ctx->sds_rings[ring]); - regs_buff[i++] = NXRDIO(adapter, - sds_ring->crb_sts_consumer); - } -} - -static u32 netxen_nic_test_link(struct net_device *dev) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - u32 val, port; - - port = adapter->physical_port; - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - val = NXRD32(adapter, CRB_XG_STATE_P3); - val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val); - return (val == XG_LINK_UP_P3) ? 0 : 1; - } else { - val = NXRD32(adapter, CRB_XG_STATE); - val = (val >> port*8) & 0xff; - return (val == XG_LINK_UP) ? 0 : 1; - } -} - -static int -netxen_nic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, - u8 * bytes) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - int offset; - int ret; - - if (eeprom->len == 0) - return -EINVAL; - - eeprom->magic = (adapter->pdev)->vendor | - ((adapter->pdev)->device << 16); - offset = eeprom->offset; - - ret = netxen_rom_fast_read_words(adapter, offset, bytes, - eeprom->len); - if (ret < 0) - return ret; - - return 0; -} - -static void -netxen_nic_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - - ring->rx_pending = adapter->num_rxd; - ring->rx_jumbo_pending = adapter->num_jumbo_rxd; - ring->rx_jumbo_pending += adapter->num_lro_rxd; - ring->tx_pending = adapter->num_txd; - - if (adapter->ahw.port_type == NETXEN_NIC_GBE) { - ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G; - ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G; - } else { - ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G; - ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_10G; - } - - ring->tx_max_pending = MAX_CMD_DESCRIPTORS; - - ring->rx_mini_max_pending = 0; - ring->rx_mini_pending = 0; -} - -static u32 -netxen_validate_ringparam(u32 val, u32 min, u32 max, char *r_name) -{ - u32 num_desc; - num_desc = max(val, min); - num_desc = min(num_desc, max); - num_desc = roundup_pow_of_two(num_desc); - - if (val != num_desc) { - printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n", - netxen_nic_driver_name, r_name, num_desc, val); - } - - return num_desc; -} - -static int -netxen_nic_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G; - u16 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G; - u16 num_rxd, num_jumbo_rxd, num_txd; - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - return -EOPNOTSUPP; - - if (ring->rx_mini_pending) - return -EOPNOTSUPP; - - if (adapter->ahw.port_type == NETXEN_NIC_GBE) { - max_rcv_desc = MAX_RCV_DESCRIPTORS_1G; - max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G; - } - - num_rxd = netxen_validate_ringparam(ring->rx_pending, - MIN_RCV_DESCRIPTORS, max_rcv_desc, "rx"); - - num_jumbo_rxd = netxen_validate_ringparam(ring->rx_jumbo_pending, - MIN_JUMBO_DESCRIPTORS, max_jumbo_desc, "rx jumbo"); - - num_txd = netxen_validate_ringparam(ring->tx_pending, - MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx"); - - if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd && - num_jumbo_rxd == adapter->num_jumbo_rxd) - return 0; - - adapter->num_rxd = num_rxd; - adapter->num_jumbo_rxd = num_jumbo_rxd; - adapter->num_txd = num_txd; - - return netxen_nic_reset_context(adapter); -} - -static void -netxen_nic_get_pauseparam(struct net_device *dev, - struct ethtool_pauseparam *pause) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - __u32 val; - int port = adapter->physical_port; - - if (adapter->ahw.port_type == NETXEN_NIC_GBE) { - if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS)) - return; - /* get flow control settings */ - val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port)); - pause->rx_pause = netxen_gb_get_rx_flowctl(val); - val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL); - switch (port) { - case 0: - pause->tx_pause = !(netxen_gb_get_gb0_mask(val)); - break; - case 1: - pause->tx_pause = !(netxen_gb_get_gb1_mask(val)); - break; - case 2: - pause->tx_pause = !(netxen_gb_get_gb2_mask(val)); - break; - case 3: - default: - pause->tx_pause = !(netxen_gb_get_gb3_mask(val)); - break; - } - } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { - if ((port < 0) || (port > NETXEN_NIU_MAX_XG_PORTS)) - return; - pause->rx_pause = 1; - val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL); - if (port == 0) - pause->tx_pause = !(netxen_xg_get_xg0_mask(val)); - else - pause->tx_pause = !(netxen_xg_get_xg1_mask(val)); - } else { - printk(KERN_ERR"%s: Unknown board type: %x\n", - netxen_nic_driver_name, adapter->ahw.port_type); - } -} - -static int -netxen_nic_set_pauseparam(struct net_device *dev, - struct ethtool_pauseparam *pause) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - __u32 val; - int port = adapter->physical_port; - /* read mode */ - if (adapter->ahw.port_type == NETXEN_NIC_GBE) { - if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS)) - return -EIO; - /* set flow control */ - val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port)); - - if (pause->rx_pause) - netxen_gb_rx_flowctl(val); - else - netxen_gb_unset_rx_flowctl(val); - - NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), - val); - /* set autoneg */ - val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL); - switch (port) { - case 0: - if (pause->tx_pause) - netxen_gb_unset_gb0_mask(val); - else - netxen_gb_set_gb0_mask(val); - break; - case 1: - if (pause->tx_pause) - netxen_gb_unset_gb1_mask(val); - else - netxen_gb_set_gb1_mask(val); - break; - case 2: - if (pause->tx_pause) - netxen_gb_unset_gb2_mask(val); - else - netxen_gb_set_gb2_mask(val); - break; - case 3: - default: - if (pause->tx_pause) - netxen_gb_unset_gb3_mask(val); - else - netxen_gb_set_gb3_mask(val); - break; - } - NXWR32(adapter, NETXEN_NIU_GB_PAUSE_CTL, val); - } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { - if ((port < 0) || (port > NETXEN_NIU_MAX_XG_PORTS)) - return -EIO; - val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL); - if (port == 0) { - if (pause->tx_pause) - netxen_xg_unset_xg0_mask(val); - else - netxen_xg_set_xg0_mask(val); - } else { - if (pause->tx_pause) - netxen_xg_unset_xg1_mask(val); - else - netxen_xg_set_xg1_mask(val); - } - NXWR32(adapter, NETXEN_NIU_XG_PAUSE_CTL, val); - } else { - printk(KERN_ERR "%s: Unknown board type: %x\n", - netxen_nic_driver_name, - adapter->ahw.port_type); - } - return 0; -} - -static int netxen_nic_reg_test(struct net_device *dev) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - u32 data_read, data_written; - - data_read = NXRD32(adapter, NETXEN_PCIX_PH_REG(0)); - if ((data_read & 0xffff) != adapter->pdev->vendor) - return 1; - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) - return 0; - - data_written = (u32)0xa5a5a5a5; - - NXWR32(adapter, CRB_SCRATCHPAD_TEST, data_written); - data_read = NXRD32(adapter, CRB_SCRATCHPAD_TEST); - if (data_written != data_read) - return 1; - - return 0; -} - -static int netxen_get_sset_count(struct net_device *dev, int sset) -{ - switch (sset) { - case ETH_SS_TEST: - return NETXEN_NIC_TEST_LEN; - case ETH_SS_STATS: - return NETXEN_NIC_STATS_LEN; - default: - return -EOPNOTSUPP; - } -} - -static void -netxen_nic_diag_test(struct net_device *dev, struct ethtool_test *eth_test, - u64 * data) -{ - memset(data, 0, sizeof(uint64_t) * NETXEN_NIC_TEST_LEN); - if ((data[0] = netxen_nic_reg_test(dev))) - eth_test->flags |= ETH_TEST_FL_FAILED; - /* link test */ - if ((data[1] = (u64) netxen_nic_test_link(dev))) - eth_test->flags |= ETH_TEST_FL_FAILED; -} - -static void -netxen_nic_get_strings(struct net_device *dev, u32 stringset, u8 * data) -{ - int index; - - switch (stringset) { - case ETH_SS_TEST: - memcpy(data, *netxen_nic_gstrings_test, - NETXEN_NIC_TEST_LEN * ETH_GSTRING_LEN); - break; - case ETH_SS_STATS: - for (index = 0; index < NETXEN_NIC_STATS_LEN; index++) { - memcpy(data + index * ETH_GSTRING_LEN, - netxen_nic_gstrings_stats[index].stat_string, - ETH_GSTRING_LEN); - } - break; - } -} - -static void -netxen_nic_get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 * data) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - int index; - - for (index = 0; index < NETXEN_NIC_STATS_LEN; index++) { - char *p = - (char *)adapter + - netxen_nic_gstrings_stats[index].stat_offset; - data[index] = - (netxen_nic_gstrings_stats[index].sizeof_stat == - sizeof(u64)) ? *(u64 *) p : *(u32 *) p; - } -} - -static void -netxen_nic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - u32 wol_cfg = 0; - - wol->supported = 0; - wol->wolopts = 0; - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - return; - - wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV); - if (wol_cfg & (1UL << adapter->portnum)) - wol->supported |= WAKE_MAGIC; - - wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG); - if (wol_cfg & (1UL << adapter->portnum)) - wol->wolopts |= WAKE_MAGIC; -} - -static int -netxen_nic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - u32 wol_cfg = 0; - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - return -EOPNOTSUPP; - - if (wol->wolopts & ~WAKE_MAGIC) - return -EOPNOTSUPP; - - wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV); - if (!(wol_cfg & (1 << adapter->portnum))) - return -EOPNOTSUPP; - - wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG); - if (wol->wolopts & WAKE_MAGIC) - wol_cfg |= 1UL << adapter->portnum; - else - wol_cfg &= ~(1UL << adapter->portnum); - NXWR32(adapter, NETXEN_WOL_CONFIG, wol_cfg); - - return 0; -} - -/* - * Set the coalescing parameters. Currently only normal is supported. - * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the - * firmware coalescing to default. - */ -static int netxen_set_intr_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ethcoal) -{ - struct netxen_adapter *adapter = netdev_priv(netdev); - - if (!NX_IS_REVISION_P3(adapter->ahw.revision_id)) - return -EINVAL; - - if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) - return -EINVAL; - - /* - * Return Error if unsupported values or - * unsupported parameters are set. - */ - if (ethcoal->rx_coalesce_usecs > 0xffff || - ethcoal->rx_max_coalesced_frames > 0xffff || - ethcoal->tx_coalesce_usecs > 0xffff || - ethcoal->tx_max_coalesced_frames > 0xffff || - ethcoal->rx_coalesce_usecs_irq || - ethcoal->rx_max_coalesced_frames_irq || - ethcoal->tx_coalesce_usecs_irq || - ethcoal->tx_max_coalesced_frames_irq || - ethcoal->stats_block_coalesce_usecs || - ethcoal->use_adaptive_rx_coalesce || - ethcoal->use_adaptive_tx_coalesce || - ethcoal->pkt_rate_low || - ethcoal->rx_coalesce_usecs_low || - ethcoal->rx_max_coalesced_frames_low || - ethcoal->tx_coalesce_usecs_low || - ethcoal->tx_max_coalesced_frames_low || - ethcoal->pkt_rate_high || - ethcoal->rx_coalesce_usecs_high || - ethcoal->rx_max_coalesced_frames_high || - ethcoal->tx_coalesce_usecs_high || - ethcoal->tx_max_coalesced_frames_high) - return -EINVAL; - - if (!ethcoal->rx_coalesce_usecs || - !ethcoal->rx_max_coalesced_frames) { - adapter->coal.flags = NETXEN_NIC_INTR_DEFAULT; - adapter->coal.normal.data.rx_time_us = - NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US; - adapter->coal.normal.data.rx_packets = - NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS; - } else { - adapter->coal.flags = 0; - adapter->coal.normal.data.rx_time_us = - ethcoal->rx_coalesce_usecs; - adapter->coal.normal.data.rx_packets = - ethcoal->rx_max_coalesced_frames; - } - adapter->coal.normal.data.tx_time_us = ethcoal->tx_coalesce_usecs; - adapter->coal.normal.data.tx_packets = - ethcoal->tx_max_coalesced_frames; - - netxen_config_intr_coalesce(adapter); - - return 0; -} - -static int netxen_get_intr_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ethcoal) -{ - struct netxen_adapter *adapter = netdev_priv(netdev); - - if (!NX_IS_REVISION_P3(adapter->ahw.revision_id)) - return -EINVAL; - - if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) - return -EINVAL; - - ethcoal->rx_coalesce_usecs = adapter->coal.normal.data.rx_time_us; - ethcoal->tx_coalesce_usecs = adapter->coal.normal.data.tx_time_us; - ethcoal->rx_max_coalesced_frames = - adapter->coal.normal.data.rx_packets; - ethcoal->tx_max_coalesced_frames = - adapter->coal.normal.data.tx_packets; - - return 0; -} - -const struct ethtool_ops netxen_nic_ethtool_ops = { - .get_settings = netxen_nic_get_settings, - .set_settings = netxen_nic_set_settings, - .get_drvinfo = netxen_nic_get_drvinfo, - .get_regs_len = netxen_nic_get_regs_len, - .get_regs = netxen_nic_get_regs, - .get_link = ethtool_op_get_link, - .get_eeprom_len = netxen_nic_get_eeprom_len, - .get_eeprom = netxen_nic_get_eeprom, - .get_ringparam = netxen_nic_get_ringparam, - .set_ringparam = netxen_nic_set_ringparam, - .get_pauseparam = netxen_nic_get_pauseparam, - .set_pauseparam = netxen_nic_set_pauseparam, - .get_wol = netxen_nic_get_wol, - .set_wol = netxen_nic_set_wol, - .self_test = netxen_nic_diag_test, - .get_strings = netxen_nic_get_strings, - .get_ethtool_stats = netxen_nic_get_ethtool_stats, - .get_sset_count = netxen_get_sset_count, - .get_coalesce = netxen_get_intr_coalesce, - .set_coalesce = netxen_set_intr_coalesce, -}; diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h deleted file mode 100644 index dc1967c..0000000 --- a/drivers/net/netxen/netxen_nic_hdr.h +++ /dev/null @@ -1,1050 +0,0 @@ -/* - * Copyright (C) 2003 - 2009 NetXen, Inc. - * Copyright (C) 2009 - QLogic Corporation. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, - * MA 02111-1307, USA. - * - * The full GNU General Public License is included in this distribution - * in the file called "COPYING". - * - */ - -#ifndef __NETXEN_NIC_HDR_H_ -#define __NETXEN_NIC_HDR_H_ - -#include -#include - -/* - * The basic unit of access when reading/writing control registers. - */ - -typedef __le32 netxen_crbword_t; /* single word in CRB space */ - -enum { - NETXEN_HW_H0_CH_HUB_ADR = 0x05, - NETXEN_HW_H1_CH_HUB_ADR = 0x0E, - NETXEN_HW_H2_CH_HUB_ADR = 0x03, - NETXEN_HW_H3_CH_HUB_ADR = 0x01, - NETXEN_HW_H4_CH_HUB_ADR = 0x06, - NETXEN_HW_H5_CH_HUB_ADR = 0x07, - NETXEN_HW_H6_CH_HUB_ADR = 0x08 -}; - -/* Hub 0 */ -enum { - NETXEN_HW_MN_CRB_AGT_ADR = 0x15, - NETXEN_HW_MS_CRB_AGT_ADR = 0x25 -}; - -/* Hub 1 */ -enum { - NETXEN_HW_PS_CRB_AGT_ADR = 0x73, - NETXEN_HW_SS_CRB_AGT_ADR = 0x20, - NETXEN_HW_RPMX3_CRB_AGT_ADR = 0x0b, - NETXEN_HW_QMS_CRB_AGT_ADR = 0x00, - NETXEN_HW_SQGS0_CRB_AGT_ADR = 0x01, - NETXEN_HW_SQGS1_CRB_AGT_ADR = 0x02, - NETXEN_HW_SQGS2_CRB_AGT_ADR = 0x03, - NETXEN_HW_SQGS3_CRB_AGT_ADR = 0x04, - NETXEN_HW_C2C0_CRB_AGT_ADR = 0x58, - NETXEN_HW_C2C1_CRB_AGT_ADR = 0x59, - NETXEN_HW_C2C2_CRB_AGT_ADR = 0x5a, - NETXEN_HW_RPMX2_CRB_AGT_ADR = 0x0a, - NETXEN_HW_RPMX4_CRB_AGT_ADR = 0x0c, - NETXEN_HW_RPMX7_CRB_AGT_ADR = 0x0f, - NETXEN_HW_RPMX9_CRB_AGT_ADR = 0x12, - NETXEN_HW_SMB_CRB_AGT_ADR = 0x18 -}; - -/* Hub 2 */ -enum { - NETXEN_HW_NIU_CRB_AGT_ADR = 0x31, - NETXEN_HW_I2C0_CRB_AGT_ADR = 0x19, - NETXEN_HW_I2C1_CRB_AGT_ADR = 0x29, - - NETXEN_HW_SN_CRB_AGT_ADR = 0x10, - NETXEN_HW_I2Q_CRB_AGT_ADR = 0x20, - NETXEN_HW_LPC_CRB_AGT_ADR = 0x22, - NETXEN_HW_ROMUSB_CRB_AGT_ADR = 0x21, - NETXEN_HW_QM_CRB_AGT_ADR = 0x66, - NETXEN_HW_SQG0_CRB_AGT_ADR = 0x60, - NETXEN_HW_SQG1_CRB_AGT_ADR = 0x61, - NETXEN_HW_SQG2_CRB_AGT_ADR = 0x62, - NETXEN_HW_SQG3_CRB_AGT_ADR = 0x63, - NETXEN_HW_RPMX1_CRB_AGT_ADR = 0x09, - NETXEN_HW_RPMX5_CRB_AGT_ADR = 0x0d, - NETXEN_HW_RPMX6_CRB_AGT_ADR = 0x0e, - NETXEN_HW_RPMX8_CRB_AGT_ADR = 0x11 -}; - -/* Hub 3 */ -enum { - NETXEN_HW_PH_CRB_AGT_ADR = 0x1A, - NETXEN_HW_SRE_CRB_AGT_ADR = 0x50, - NETXEN_HW_EG_CRB_AGT_ADR = 0x51, - NETXEN_HW_RPMX0_CRB_AGT_ADR = 0x08 -}; - -/* Hub 4 */ -enum { - NETXEN_HW_PEGN0_CRB_AGT_ADR = 0x40, - NETXEN_HW_PEGN1_CRB_AGT_ADR, - NETXEN_HW_PEGN2_CRB_AGT_ADR, - NETXEN_HW_PEGN3_CRB_AGT_ADR, - NETXEN_HW_PEGNI_CRB_AGT_ADR, - NETXEN_HW_PEGND_CRB_AGT_ADR, - NETXEN_HW_PEGNC_CRB_AGT_ADR, - NETXEN_HW_PEGR0_CRB_AGT_ADR, - NETXEN_HW_PEGR1_CRB_AGT_ADR, - NETXEN_HW_PEGR2_CRB_AGT_ADR, - NETXEN_HW_PEGR3_CRB_AGT_ADR, - NETXEN_HW_PEGN4_CRB_AGT_ADR -}; - -/* Hub 5 */ -enum { - NETXEN_HW_PEGS0_CRB_AGT_ADR = 0x40, - NETXEN_HW_PEGS1_CRB_AGT_ADR, - NETXEN_HW_PEGS2_CRB_AGT_ADR, - NETXEN_HW_PEGS3_CRB_AGT_ADR, - NETXEN_HW_PEGSI_CRB_AGT_ADR, - NETXEN_HW_PEGSD_CRB_AGT_ADR, - NETXEN_HW_PEGSC_CRB_AGT_ADR -}; - -/* Hub 6 */ -enum { - NETXEN_HW_CAS0_CRB_AGT_ADR = 0x46, - NETXEN_HW_CAS1_CRB_AGT_ADR = 0x47, - NETXEN_HW_CAS2_CRB_AGT_ADR = 0x48, - NETXEN_HW_CAS3_CRB_AGT_ADR = 0x49, - NETXEN_HW_NCM_CRB_AGT_ADR = 0x16, - NETXEN_HW_TMR_CRB_AGT_ADR = 0x17, - NETXEN_HW_XDMA_CRB_AGT_ADR = 0x05, - NETXEN_HW_OCM0_CRB_AGT_ADR = 0x06, - NETXEN_HW_OCM1_CRB_AGT_ADR = 0x07 -}; - -/* Floaters - non existent modules */ -#define NETXEN_HW_EFC_RPMX0_CRB_AGT_ADR 0x67 - -/* This field defines PCI/X adr [25:20] of agents on the CRB */ -enum { - NETXEN_HW_PX_MAP_CRB_PH = 0, - NETXEN_HW_PX_MAP_CRB_PS, - NETXEN_HW_PX_MAP_CRB_MN, - NETXEN_HW_PX_MAP_CRB_MS, - NETXEN_HW_PX_MAP_CRB_PGR1, - NETXEN_HW_PX_MAP_CRB_SRE, - NETXEN_HW_PX_MAP_CRB_NIU, - NETXEN_HW_PX_MAP_CRB_QMN, - NETXEN_HW_PX_MAP_CRB_SQN0, - NETXEN_HW_PX_MAP_CRB_SQN1, - NETXEN_HW_PX_MAP_CRB_SQN2, - NETXEN_HW_PX_MAP_CRB_SQN3, - NETXEN_HW_PX_MAP_CRB_QMS, - NETXEN_HW_PX_MAP_CRB_SQS0, - NETXEN_HW_PX_MAP_CRB_SQS1, - NETXEN_HW_PX_MAP_CRB_SQS2, - NETXEN_HW_PX_MAP_CRB_SQS3, - NETXEN_HW_PX_MAP_CRB_PGN0, - NETXEN_HW_PX_MAP_CRB_PGN1, - NETXEN_HW_PX_MAP_CRB_PGN2, - NETXEN_HW_PX_MAP_CRB_PGN3, - NETXEN_HW_PX_MAP_CRB_PGND, - NETXEN_HW_PX_MAP_CRB_PGNI, - NETXEN_HW_PX_MAP_CRB_PGS0, - NETXEN_HW_PX_MAP_CRB_PGS1, - NETXEN_HW_PX_MAP_CRB_PGS2, - NETXEN_HW_PX_MAP_CRB_PGS3, - NETXEN_HW_PX_MAP_CRB_PGSD, - NETXEN_HW_PX_MAP_CRB_PGSI, - NETXEN_HW_PX_MAP_CRB_SN, - NETXEN_HW_PX_MAP_CRB_PGR2, - NETXEN_HW_PX_MAP_CRB_EG, - NETXEN_HW_PX_MAP_CRB_PH2, - NETXEN_HW_PX_MAP_CRB_PS2, - NETXEN_HW_PX_MAP_CRB_CAM, - NETXEN_HW_PX_MAP_CRB_CAS0, - NETXEN_HW_PX_MAP_CRB_CAS1, - NETXEN_HW_PX_MAP_CRB_CAS2, - NETXEN_HW_PX_MAP_CRB_C2C0, - NETXEN_HW_PX_MAP_CRB_C2C1, - NETXEN_HW_PX_MAP_CRB_TIMR, - NETXEN_HW_PX_MAP_CRB_PGR3, - NETXEN_HW_PX_MAP_CRB_RPMX1, - NETXEN_HW_PX_MAP_CRB_RPMX2, - NETXEN_HW_PX_MAP_CRB_RPMX3, - NETXEN_HW_PX_MAP_CRB_RPMX4, - NETXEN_HW_PX_MAP_CRB_RPMX5, - NETXEN_HW_PX_MAP_CRB_RPMX6, - NETXEN_HW_PX_MAP_CRB_RPMX7, - NETXEN_HW_PX_MAP_CRB_XDMA, - NETXEN_HW_PX_MAP_CRB_I2Q, - NETXEN_HW_PX_MAP_CRB_ROMUSB, - NETXEN_HW_PX_MAP_CRB_CAS3, - NETXEN_HW_PX_MAP_CRB_RPMX0, - NETXEN_HW_PX_MAP_CRB_RPMX8, - NETXEN_HW_PX_MAP_CRB_RPMX9, - NETXEN_HW_PX_MAP_CRB_OCM0, - NETXEN_HW_PX_MAP_CRB_OCM1, - NETXEN_HW_PX_MAP_CRB_SMB, - NETXEN_HW_PX_MAP_CRB_I2C0, - NETXEN_HW_PX_MAP_CRB_I2C1, - NETXEN_HW_PX_MAP_CRB_LPC, - NETXEN_HW_PX_MAP_CRB_PGNC, - NETXEN_HW_PX_MAP_CRB_PGR0 -}; - -/* This field defines CRB adr [31:20] of the agents */ - -#define NETXEN_HW_CRB_HUB_AGT_ADR_MN \ - ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_MN_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PH \ - ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_PH_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_MS \ - ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_MS_CRB_AGT_ADR) - -#define NETXEN_HW_CRB_HUB_AGT_ADR_PS \ - ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_PS_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_SS \ - ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SS_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX3 \ - ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX3_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_QMS \ - ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_QMS_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS0 \ - ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS0_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS1 \ - ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS1_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS2 \ - ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS2_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS3 \ - ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS3_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_C2C0 \ - ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_C2C0_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_C2C1 \ - ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_C2C1_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX2 \ - ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX2_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX4 \ - ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX4_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX7 \ - ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX7_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX9 \ - ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX9_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_SMB \ - ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SMB_CRB_AGT_ADR) - -#define NETXEN_HW_CRB_HUB_AGT_ADR_NIU \ - ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_NIU_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_I2C0 \ - ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_I2C0_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_I2C1 \ - ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_I2C1_CRB_AGT_ADR) - -#define NETXEN_HW_CRB_HUB_AGT_ADR_SRE \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SRE_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_EG \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_EG_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX0 \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX0_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_QMN \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_QM_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN0 \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG0_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN1 \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG1_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN2 \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG2_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN3 \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG3_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX1 \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX1_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX5 \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX5_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX6 \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX6_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX8 \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX8_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS0 \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS0_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS1 \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS1_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS2 \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS2_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS3 \ - ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS3_CRB_AGT_ADR) - -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGNI \ - ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGNI_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGND \ - ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGND_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN0 \ - ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN0_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN1 \ - ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN1_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN2 \ - ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN2_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN3 \ - ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN3_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN4 \ - ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN4_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGNC \ - ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGNC_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR0 \ - ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR0_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR1 \ - ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR1_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR2 \ - ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR2_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR3 \ - ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR3_CRB_AGT_ADR) - -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSI \ - ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSI_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSD \ - ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSD_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS0 \ - ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS0_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS1 \ - ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS1_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS2 \ - ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS2_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS3 \ - ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS3_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSC \ - ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSC_CRB_AGT_ADR) - -#define NETXEN_HW_CRB_HUB_AGT_ADR_CAM \ - ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_NCM_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_TIMR \ - ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_TMR_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_XDMA \ - ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_XDMA_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_SN \ - ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_SN_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_I2Q \ - ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_I2Q_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB \ - ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_ROMUSB_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_OCM0 \ - ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_OCM0_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_OCM1 \ - ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_OCM1_CRB_AGT_ADR) -#define NETXEN_HW_CRB_HUB_AGT_ADR_LPC \ - ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_LPC_CRB_AGT_ADR) - -#define NETXEN_SRE_MISC (NETXEN_CRB_SRE + 0x0002c) -#define NETXEN_SRE_INT_STATUS (NETXEN_CRB_SRE + 0x00034) -#define NETXEN_SRE_PBI_ACTIVE_STATUS (NETXEN_CRB_SRE + 0x01014) -#define NETXEN_SRE_L1RE_CTL (NETXEN_CRB_SRE + 0x03000) -#define NETXEN_SRE_L2RE_CTL (NETXEN_CRB_SRE + 0x05000) -#define NETXEN_SRE_BUF_CTL (NETXEN_CRB_SRE + 0x01000) - -#define NETXEN_DMA_BASE(U) (NETXEN_CRB_PCIX_MD + 0x20000 + ((U)<<16)) -#define NETXEN_DMA_COMMAND(U) (NETXEN_DMA_BASE(U) + 0x00008) - -#define NETXEN_I2Q_CLR_PCI_HI (NETXEN_CRB_I2Q + 0x00034) - -#define PEG_NETWORK_BASE(N) (NETXEN_CRB_PEG_NET_0 + (((N)&3) << 20)) -#define CRB_REG_EX_PC 0x3c - -#define ROMUSB_GLB (NETXEN_CRB_ROMUSB + 0x00000) -#define ROMUSB_ROM (NETXEN_CRB_ROMUSB + 0x10000) - -#define NETXEN_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004) -#define NETXEN_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008) -#define NETXEN_ROMUSB_GLB_PAD_GPIO_I (ROMUSB_GLB + 0x000c) -#define NETXEN_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038) -#define NETXEN_ROMUSB_GLB_TEST_MUX_SEL (ROMUSB_GLB + 0x0044) -#define NETXEN_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c) -#define NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL (ROMUSB_GLB + 0x00A8) - -#define NETXEN_ROMUSB_GPIO(n) (ROMUSB_GLB + 0x60 + (4 * (n))) - -#define NETXEN_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004) -#define NETXEN_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008) -#define NETXEN_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c) -#define NETXEN_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010) -#define NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014) -#define NETXEN_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018) - -/* Lock IDs for ROM lock */ -#define ROM_LOCK_DRIVER 0x0d417340 - -/****************************************************************************** -* -* Definitions specific to M25P flash -* -******************************************************************************* -* Instructions -*/ -#define M25P_INSTR_WREN 0x06 -#define M25P_INSTR_WRDI 0x04 -#define M25P_INSTR_RDID 0x9f -#define M25P_INSTR_RDSR 0x05 -#define M25P_INSTR_WRSR 0x01 -#define M25P_INSTR_READ 0x03 -#define M25P_INSTR_FAST_READ 0x0b -#define M25P_INSTR_PP 0x02 -#define M25P_INSTR_SE 0xd8 -#define M25P_INSTR_BE 0xc7 -#define M25P_INSTR_DP 0xb9 -#define M25P_INSTR_RES 0xab - -/* all are 1MB windows */ - -#define NETXEN_PCI_CRB_WINDOWSIZE 0x00100000 -#define NETXEN_PCI_CRB_WINDOW(A) \ - (NETXEN_PCI_CRBSPACE + (A)*NETXEN_PCI_CRB_WINDOWSIZE) - -#define NETXEN_CRB_NIU NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_NIU) -#define NETXEN_CRB_SRE NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SRE) -#define NETXEN_CRB_ROMUSB \ - NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_ROMUSB) -#define NETXEN_CRB_I2Q NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2Q) -#define NETXEN_CRB_I2C0 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2C0) -#define NETXEN_CRB_SMB NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SMB) -#define NETXEN_CRB_MAX NETXEN_PCI_CRB_WINDOW(64) - -#define NETXEN_CRB_PCIX_HOST NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PH) -#define NETXEN_CRB_PCIX_HOST2 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PH2) -#define NETXEN_CRB_PEG_NET_0 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN0) -#define NETXEN_CRB_PEG_NET_1 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN1) -#define NETXEN_CRB_PEG_NET_2 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN2) -#define NETXEN_CRB_PEG_NET_3 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN3) -#define NETXEN_CRB_PEG_NET_4 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SQS2) -#define NETXEN_CRB_PEG_NET_D NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGND) -#define NETXEN_CRB_PEG_NET_I NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGNI) -#define NETXEN_CRB_DDR_NET NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_MN) -#define NETXEN_CRB_QDR_NET NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SN) - -#define NETXEN_CRB_PCIX_MD NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PS) -#define NETXEN_CRB_PCIE NETXEN_CRB_PCIX_MD - -#define ISR_INT_VECTOR (NETXEN_PCIX_PS_REG(PCIX_INT_VECTOR)) -#define ISR_INT_MASK (NETXEN_PCIX_PS_REG(PCIX_INT_MASK)) -#define ISR_INT_MASK_SLOW (NETXEN_PCIX_PS_REG(PCIX_INT_MASK)) -#define ISR_INT_TARGET_STATUS (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS)) -#define ISR_INT_TARGET_MASK (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK)) -#define ISR_INT_TARGET_STATUS_F1 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F1)) -#define ISR_INT_TARGET_MASK_F1 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F1)) -#define ISR_INT_TARGET_STATUS_F2 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F2)) -#define ISR_INT_TARGET_MASK_F2 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F2)) -#define ISR_INT_TARGET_STATUS_F3 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F3)) -#define ISR_INT_TARGET_MASK_F3 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F3)) -#define ISR_INT_TARGET_STATUS_F4 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F4)) -#define ISR_INT_TARGET_MASK_F4 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F4)) -#define ISR_INT_TARGET_STATUS_F5 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F5)) -#define ISR_INT_TARGET_MASK_F5 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F5)) -#define ISR_INT_TARGET_STATUS_F6 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F6)) -#define ISR_INT_TARGET_MASK_F6 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F6)) -#define ISR_INT_TARGET_STATUS_F7 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F7)) -#define ISR_INT_TARGET_MASK_F7 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F7)) - -#define NETXEN_PCI_MAPSIZE 128 -#define NETXEN_PCI_DDR_NET (0x00000000UL) -#define NETXEN_PCI_QDR_NET (0x04000000UL) -#define NETXEN_PCI_DIRECT_CRB (0x04400000UL) -#define NETXEN_PCI_CAMQM (0x04800000UL) -#define NETXEN_PCI_CAMQM_MAX (0x04ffffffUL) -#define NETXEN_PCI_OCM0 (0x05000000UL) -#define NETXEN_PCI_OCM0_MAX (0x050fffffUL) -#define NETXEN_PCI_OCM1 (0x05100000UL) -#define NETXEN_PCI_OCM1_MAX (0x051fffffUL) -#define NETXEN_PCI_CRBSPACE (0x06000000UL) -#define NETXEN_PCI_128MB_SIZE (0x08000000UL) -#define NETXEN_PCI_32MB_SIZE (0x02000000UL) -#define NETXEN_PCI_2MB_SIZE (0x00200000UL) - -#define NETXEN_PCI_MN_2M (0) -#define NETXEN_PCI_MS_2M (0x80000) -#define NETXEN_PCI_OCM0_2M (0x000c0000UL) -#define NETXEN_PCI_CAMQM_2M_BASE (0x000ff800UL) -#define NETXEN_PCI_CAMQM_2M_END (0x04800800UL) - -#define NETXEN_CRB_CAM NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_CAM) - -#define NETXEN_ADDR_DDR_NET (0x0000000000000000ULL) -#define NETXEN_ADDR_DDR_NET_MAX (0x000000000fffffffULL) -#define NETXEN_ADDR_OCM0 (0x0000000200000000ULL) -#define NETXEN_ADDR_OCM0_MAX (0x00000002000fffffULL) -#define NETXEN_ADDR_OCM1 (0x0000000200400000ULL) -#define NETXEN_ADDR_OCM1_MAX (0x00000002004fffffULL) -#define NETXEN_ADDR_QDR_NET (0x0000000300000000ULL) -#define NETXEN_ADDR_QDR_NET_MAX_P2 (0x00000003003fffffULL) -#define NETXEN_ADDR_QDR_NET_MAX_P3 (0x0000000303ffffffULL) - -/* - * Register offsets for MN - */ -#define NETXEN_MIU_CONTROL (0x000) -#define NETXEN_MIU_MN_CONTROL (NETXEN_CRB_DDR_NET+NETXEN_MIU_CONTROL) - - /* 200ms delay in each loop */ -#define NETXEN_NIU_PHY_WAITLEN 200000 - /* 10 seconds before we give up */ -#define NETXEN_NIU_PHY_WAITMAX 50 -#define NETXEN_NIU_MAX_GBE_PORTS 4 -#define NETXEN_NIU_MAX_XG_PORTS 2 - -#define NETXEN_NIU_MODE (NETXEN_CRB_NIU + 0x00000) - -#define NETXEN_NIU_XG_SINGLE_TERM (NETXEN_CRB_NIU + 0x00004) -#define NETXEN_NIU_XG_DRIVE_HI (NETXEN_CRB_NIU + 0x00008) -#define NETXEN_NIU_XG_DRIVE_LO (NETXEN_CRB_NIU + 0x0000c) -#define NETXEN_NIU_XG_DTX (NETXEN_CRB_NIU + 0x00010) -#define NETXEN_NIU_XG_DEQ (NETXEN_CRB_NIU + 0x00014) -#define NETXEN_NIU_XG_WORD_ALIGN (NETXEN_CRB_NIU + 0x00018) -#define NETXEN_NIU_XG_RESET (NETXEN_CRB_NIU + 0x0001c) -#define NETXEN_NIU_XG_POWER_DOWN (NETXEN_CRB_NIU + 0x00020) -#define NETXEN_NIU_XG_RESET_PLL (NETXEN_CRB_NIU + 0x00024) -#define NETXEN_NIU_XG_SERDES_LOOPBACK (NETXEN_CRB_NIU + 0x00028) -#define NETXEN_NIU_XG_DO_BYTE_ALIGN (NETXEN_CRB_NIU + 0x0002c) -#define NETXEN_NIU_XG_TX_ENABLE (NETXEN_CRB_NIU + 0x00030) -#define NETXEN_NIU_XG_RX_ENABLE (NETXEN_CRB_NIU + 0x00034) -#define NETXEN_NIU_XG_STATUS (NETXEN_CRB_NIU + 0x00038) -#define NETXEN_NIU_XG_PAUSE_THRESHOLD (NETXEN_CRB_NIU + 0x0003c) -#define NETXEN_NIU_INT_MASK (NETXEN_CRB_NIU + 0x00040) -#define NETXEN_NIU_ACTIVE_INT (NETXEN_CRB_NIU + 0x00044) -#define NETXEN_NIU_MASKABLE_INT (NETXEN_CRB_NIU + 0x00048) - -#define NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER (NETXEN_CRB_NIU + 0x0004c) - -#define NETXEN_NIU_GB_SERDES_RESET (NETXEN_CRB_NIU + 0x00050) -#define NETXEN_NIU_GB0_GMII_MODE (NETXEN_CRB_NIU + 0x00054) -#define NETXEN_NIU_GB0_MII_MODE (NETXEN_CRB_NIU + 0x00058) -#define NETXEN_NIU_GB1_GMII_MODE (NETXEN_CRB_NIU + 0x0005c) -#define NETXEN_NIU_GB1_MII_MODE (NETXEN_CRB_NIU + 0x00060) -#define NETXEN_NIU_GB2_GMII_MODE (NETXEN_CRB_NIU + 0x00064) -#define NETXEN_NIU_GB2_MII_MODE (NETXEN_CRB_NIU + 0x00068) -#define NETXEN_NIU_GB3_GMII_MODE (NETXEN_CRB_NIU + 0x0006c) -#define NETXEN_NIU_GB3_MII_MODE (NETXEN_CRB_NIU + 0x00070) -#define NETXEN_NIU_REMOTE_LOOPBACK (NETXEN_CRB_NIU + 0x00074) -#define NETXEN_NIU_GB0_HALF_DUPLEX (NETXEN_CRB_NIU + 0x00078) -#define NETXEN_NIU_GB1_HALF_DUPLEX (NETXEN_CRB_NIU + 0x0007c) -#define NETXEN_NIU_RESET_SYS_FIFOS (NETXEN_CRB_NIU + 0x00088) -#define NETXEN_NIU_GB_CRC_DROP (NETXEN_CRB_NIU + 0x0008c) -#define NETXEN_NIU_GB_DROP_WRONGADDR (NETXEN_CRB_NIU + 0x00090) -#define NETXEN_NIU_TEST_MUX_CTL (NETXEN_CRB_NIU + 0x00094) -#define NETXEN_NIU_XG_PAUSE_CTL (NETXEN_CRB_NIU + 0x00098) -#define NETXEN_NIU_XG_PAUSE_LEVEL (NETXEN_CRB_NIU + 0x000dc) -#define NETXEN_NIU_FRAME_COUNT_SELECT (NETXEN_CRB_NIU + 0x000ac) -#define NETXEN_NIU_FRAME_COUNT (NETXEN_CRB_NIU + 0x000b0) -#define NETXEN_NIU_XG_SEL (NETXEN_CRB_NIU + 0x00128) -#define NETXEN_NIU_GB_PAUSE_CTL (NETXEN_CRB_NIU + 0x0030c) - -#define NETXEN_NIU_FULL_LEVEL_XG (NETXEN_CRB_NIU + 0x00450) - -#define NETXEN_NIU_XG1_RESET (NETXEN_CRB_NIU + 0x0011c) -#define NETXEN_NIU_XG1_POWER_DOWN (NETXEN_CRB_NIU + 0x00120) -#define NETXEN_NIU_XG1_RESET_PLL (NETXEN_CRB_NIU + 0x00124) - -#define NETXEN_MAC_ADDR_CNTL_REG (NETXEN_CRB_NIU + 0x1000) - -#define NETXEN_MULTICAST_ADDR_HI_0 (NETXEN_CRB_NIU + 0x1010) -#define NETXEN_MULTICAST_ADDR_HI_1 (NETXEN_CRB_NIU + 0x1014) -#define NETXEN_MULTICAST_ADDR_HI_2 (NETXEN_CRB_NIU + 0x1018) -#define NETXEN_MULTICAST_ADDR_HI_3 (NETXEN_CRB_NIU + 0x101c) - -#define NETXEN_UNICAST_ADDR_BASE (NETXEN_CRB_NIU + 0x1080) -#define NETXEN_MULTICAST_ADDR_BASE (NETXEN_CRB_NIU + 0x1100) - -#define NETXEN_NIU_GB_MAC_CONFIG_0(I) \ - (NETXEN_CRB_NIU + 0x30000 + (I)*0x10000) -#define NETXEN_NIU_GB_MAC_CONFIG_1(I) \ - (NETXEN_CRB_NIU + 0x30004 + (I)*0x10000) -#define NETXEN_NIU_GB_MAC_IPG_IFG(I) \ - (NETXEN_CRB_NIU + 0x30008 + (I)*0x10000) -#define NETXEN_NIU_GB_HALF_DUPLEX_CTRL(I) \ - (NETXEN_CRB_NIU + 0x3000c + (I)*0x10000) -#define NETXEN_NIU_GB_MAX_FRAME_SIZE(I) \ - (NETXEN_CRB_NIU + 0x30010 + (I)*0x10000) -#define NETXEN_NIU_GB_TEST_REG(I) \ - (NETXEN_CRB_NIU + 0x3001c + (I)*0x10000) -#define NETXEN_NIU_GB_MII_MGMT_CONFIG(I) \ - (NETXEN_CRB_NIU + 0x30020 + (I)*0x10000) -#define NETXEN_NIU_GB_MII_MGMT_COMMAND(I) \ - (NETXEN_CRB_NIU + 0x30024 + (I)*0x10000) -#define NETXEN_NIU_GB_MII_MGMT_ADDR(I) \ - (NETXEN_CRB_NIU + 0x30028 + (I)*0x10000) -#define NETXEN_NIU_GB_MII_MGMT_CTRL(I) \ - (NETXEN_CRB_NIU + 0x3002c + (I)*0x10000) -#define NETXEN_NIU_GB_MII_MGMT_STATUS(I) \ - (NETXEN_CRB_NIU + 0x30030 + (I)*0x10000) -#define NETXEN_NIU_GB_MII_MGMT_INDICATE(I) \ - (NETXEN_CRB_NIU + 0x30034 + (I)*0x10000) -#define NETXEN_NIU_GB_INTERFACE_CTRL(I) \ - (NETXEN_CRB_NIU + 0x30038 + (I)*0x10000) -#define NETXEN_NIU_GB_INTERFACE_STATUS(I) \ - (NETXEN_CRB_NIU + 0x3003c + (I)*0x10000) -#define NETXEN_NIU_GB_STATION_ADDR_0(I) \ - (NETXEN_CRB_NIU + 0x30040 + (I)*0x10000) -#define NETXEN_NIU_GB_STATION_ADDR_1(I) \ - (NETXEN_CRB_NIU + 0x30044 + (I)*0x10000) - -#define NETXEN_NIU_XGE_CONFIG_0 (NETXEN_CRB_NIU + 0x70000) -#define NETXEN_NIU_XGE_CONFIG_1 (NETXEN_CRB_NIU + 0x70004) -#define NETXEN_NIU_XGE_IPG (NETXEN_CRB_NIU + 0x70008) -#define NETXEN_NIU_XGE_STATION_ADDR_0_HI (NETXEN_CRB_NIU + 0x7000c) -#define NETXEN_NIU_XGE_STATION_ADDR_0_1 (NETXEN_CRB_NIU + 0x70010) -#define NETXEN_NIU_XGE_STATION_ADDR_1_LO (NETXEN_CRB_NIU + 0x70014) -#define NETXEN_NIU_XGE_STATUS (NETXEN_CRB_NIU + 0x70018) -#define NETXEN_NIU_XGE_MAX_FRAME_SIZE (NETXEN_CRB_NIU + 0x7001c) -#define NETXEN_NIU_XGE_PAUSE_FRAME_VALUE (NETXEN_CRB_NIU + 0x70020) -#define NETXEN_NIU_XGE_TX_BYTE_CNT (NETXEN_CRB_NIU + 0x70024) -#define NETXEN_NIU_XGE_TX_FRAME_CNT (NETXEN_CRB_NIU + 0x70028) -#define NETXEN_NIU_XGE_RX_BYTE_CNT (NETXEN_CRB_NIU + 0x7002c) -#define NETXEN_NIU_XGE_RX_FRAME_CNT (NETXEN_CRB_NIU + 0x70030) -#define NETXEN_NIU_XGE_AGGR_ERROR_CNT (NETXEN_CRB_NIU + 0x70034) -#define NETXEN_NIU_XGE_MULTICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x70038) -#define NETXEN_NIU_XGE_UNICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x7003c) -#define NETXEN_NIU_XGE_CRC_ERROR_CNT (NETXEN_CRB_NIU + 0x70040) -#define NETXEN_NIU_XGE_OVERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x70044) -#define NETXEN_NIU_XGE_UNDERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x70048) -#define NETXEN_NIU_XGE_LOCAL_ERROR_CNT (NETXEN_CRB_NIU + 0x7004c) -#define NETXEN_NIU_XGE_REMOTE_ERROR_CNT (NETXEN_CRB_NIU + 0x70050) -#define NETXEN_NIU_XGE_CONTROL_CHAR_CNT (NETXEN_CRB_NIU + 0x70054) -#define NETXEN_NIU_XGE_PAUSE_FRAME_CNT (NETXEN_CRB_NIU + 0x70058) -#define NETXEN_NIU_XG1_CONFIG_0 (NETXEN_CRB_NIU + 0x80000) -#define NETXEN_NIU_XG1_CONFIG_1 (NETXEN_CRB_NIU + 0x80004) -#define NETXEN_NIU_XG1_IPG (NETXEN_CRB_NIU + 0x80008) -#define NETXEN_NIU_XG1_STATION_ADDR_0_HI (NETXEN_CRB_NIU + 0x8000c) -#define NETXEN_NIU_XG1_STATION_ADDR_0_1 (NETXEN_CRB_NIU + 0x80010) -#define NETXEN_NIU_XG1_STATION_ADDR_1_LO (NETXEN_CRB_NIU + 0x80014) -#define NETXEN_NIU_XG1_STATUS (NETXEN_CRB_NIU + 0x80018) -#define NETXEN_NIU_XG1_MAX_FRAME_SIZE (NETXEN_CRB_NIU + 0x8001c) -#define NETXEN_NIU_XG1_PAUSE_FRAME_VALUE (NETXEN_CRB_NIU + 0x80020) -#define NETXEN_NIU_XG1_TX_BYTE_CNT (NETXEN_CRB_NIU + 0x80024) -#define NETXEN_NIU_XG1_TX_FRAME_CNT (NETXEN_CRB_NIU + 0x80028) -#define NETXEN_NIU_XG1_RX_BYTE_CNT (NETXEN_CRB_NIU + 0x8002c) -#define NETXEN_NIU_XG1_RX_FRAME_CNT (NETXEN_CRB_NIU + 0x80030) -#define NETXEN_NIU_XG1_AGGR_ERROR_CNT (NETXEN_CRB_NIU + 0x80034) -#define NETXEN_NIU_XG1_MULTICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x80038) -#define NETXEN_NIU_XG1_UNICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x8003c) -#define NETXEN_NIU_XG1_CRC_ERROR_CNT (NETXEN_CRB_NIU + 0x80040) -#define NETXEN_NIU_XG1_OVERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x80044) -#define NETXEN_NIU_XG1_UNDERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x80048) -#define NETXEN_NIU_XG1_LOCAL_ERROR_CNT (NETXEN_CRB_NIU + 0x8004c) -#define NETXEN_NIU_XG1_REMOTE_ERROR_CNT (NETXEN_CRB_NIU + 0x80050) -#define NETXEN_NIU_XG1_CONTROL_CHAR_CNT (NETXEN_CRB_NIU + 0x80054) -#define NETXEN_NIU_XG1_PAUSE_FRAME_CNT (NETXEN_CRB_NIU + 0x80058) - -/* P3 802.3ap */ -#define NETXEN_NIU_AP_MAC_CONFIG_0(I) (NETXEN_CRB_NIU+0xa0000+(I)*0x10000) -#define NETXEN_NIU_AP_MAC_CONFIG_1(I) (NETXEN_CRB_NIU+0xa0004+(I)*0x10000) -#define NETXEN_NIU_AP_MAC_IPG_IFG(I) (NETXEN_CRB_NIU+0xa0008+(I)*0x10000) -#define NETXEN_NIU_AP_HALF_DUPLEX_CTRL(I) (NETXEN_CRB_NIU+0xa000c+(I)*0x10000) -#define NETXEN_NIU_AP_MAX_FRAME_SIZE(I) (NETXEN_CRB_NIU+0xa0010+(I)*0x10000) -#define NETXEN_NIU_AP_TEST_REG(I) (NETXEN_CRB_NIU+0xa001c+(I)*0x10000) -#define NETXEN_NIU_AP_MII_MGMT_CONFIG(I) (NETXEN_CRB_NIU+0xa0020+(I)*0x10000) -#define NETXEN_NIU_AP_MII_MGMT_COMMAND(I) (NETXEN_CRB_NIU+0xa0024+(I)*0x10000) -#define NETXEN_NIU_AP_MII_MGMT_ADDR(I) (NETXEN_CRB_NIU+0xa0028+(I)*0x10000) -#define NETXEN_NIU_AP_MII_MGMT_CTRL(I) (NETXEN_CRB_NIU+0xa002c+(I)*0x10000) -#define NETXEN_NIU_AP_MII_MGMT_STATUS(I) (NETXEN_CRB_NIU+0xa0030+(I)*0x10000) -#define NETXEN_NIU_AP_MII_MGMT_INDICATE(I) (NETXEN_CRB_NIU+0xa0034+(I)*0x10000) -#define NETXEN_NIU_AP_INTERFACE_CTRL(I) (NETXEN_CRB_NIU+0xa0038+(I)*0x10000) -#define NETXEN_NIU_AP_INTERFACE_STATUS(I) (NETXEN_CRB_NIU+0xa003c+(I)*0x10000) -#define NETXEN_NIU_AP_STATION_ADDR_0(I) (NETXEN_CRB_NIU+0xa0040+(I)*0x10000) -#define NETXEN_NIU_AP_STATION_ADDR_1(I) (NETXEN_CRB_NIU+0xa0044+(I)*0x10000) - - -#define TEST_AGT_CTRL (0x00) - -#define TA_CTL_START 1 -#define TA_CTL_ENABLE 2 -#define TA_CTL_WRITE 4 -#define TA_CTL_BUSY 8 - -/* - * Register offsets for MN - */ -#define MIU_TEST_AGT_BASE (0x90) - -#define MIU_TEST_AGT_ADDR_LO (0x04) -#define MIU_TEST_AGT_ADDR_HI (0x08) -#define MIU_TEST_AGT_WRDATA_LO (0x10) -#define MIU_TEST_AGT_WRDATA_HI (0x14) -#define MIU_TEST_AGT_RDDATA_LO (0x18) -#define MIU_TEST_AGT_RDDATA_HI (0x1c) - -#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8 -#define MIU_TEST_AGT_UPPER_ADDR(off) (0) - -/* - * Register offsets for MS - */ -#define SIU_TEST_AGT_BASE (0x60) - -#define SIU_TEST_AGT_ADDR_LO (0x04) -#define SIU_TEST_AGT_ADDR_HI (0x18) -#define SIU_TEST_AGT_WRDATA_LO (0x08) -#define SIU_TEST_AGT_WRDATA_HI (0x0c) -#define SIU_TEST_AGT_WRDATA(i) (0x08+(4*(i))) -#define SIU_TEST_AGT_RDDATA_LO (0x10) -#define SIU_TEST_AGT_RDDATA_HI (0x14) -#define SIU_TEST_AGT_RDDATA(i) (0x10+(4*(i))) - -#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8 -#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22) - -/* XG Link status */ -#define XG_LINK_UP 0x10 -#define XG_LINK_DOWN 0x20 - -#define XG_LINK_UP_P3 0x01 -#define XG_LINK_DOWN_P3 0x02 -#define XG_LINK_STATE_P3_MASK 0xf -#define XG_LINK_STATE_P3(pcifn,val) \ - (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK) - -#define P3_LINK_SPEED_MHZ 100 -#define P3_LINK_SPEED_MASK 0xff -#define P3_LINK_SPEED_REG(pcifn) \ - (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4)) -#define P3_LINK_SPEED_VAL(pcifn, reg) \ - (((reg) >> (8 * ((pcifn) & 0x3))) & P3_LINK_SPEED_MASK) - -#define NETXEN_CAM_RAM_BASE (NETXEN_CRB_CAM + 0x02000) -#define NETXEN_CAM_RAM(reg) (NETXEN_CAM_RAM_BASE + (reg)) -#define NETXEN_FW_VERSION_MAJOR (NETXEN_CAM_RAM(0x150)) -#define NETXEN_FW_VERSION_MINOR (NETXEN_CAM_RAM(0x154)) -#define NETXEN_FW_VERSION_SUB (NETXEN_CAM_RAM(0x158)) -#define NETXEN_ROM_LOCK_ID (NETXEN_CAM_RAM(0x100)) -#define NETXEN_PHY_LOCK_ID (NETXEN_CAM_RAM(0x120)) -#define NETXEN_CRB_WIN_LOCK_ID (NETXEN_CAM_RAM(0x124)) - -#define NIC_CRB_BASE (NETXEN_CAM_RAM(0x200)) -#define NIC_CRB_BASE_2 (NETXEN_CAM_RAM(0x700)) -#define NETXEN_NIC_REG(X) (NIC_CRB_BASE+(X)) -#define NETXEN_NIC_REG_2(X) (NIC_CRB_BASE_2+(X)) - -#define NX_CDRP_CRB_OFFSET (NETXEN_NIC_REG(0x18)) -#define NX_ARG1_CRB_OFFSET (NETXEN_NIC_REG(0x1c)) -#define NX_ARG2_CRB_OFFSET (NETXEN_NIC_REG(0x20)) -#define NX_ARG3_CRB_OFFSET (NETXEN_NIC_REG(0x24)) -#define NX_SIGN_CRB_OFFSET (NETXEN_NIC_REG(0x28)) - -#define CRB_HOST_DUMMY_BUF_ADDR_HI (NETXEN_NIC_REG(0x3c)) -#define CRB_HOST_DUMMY_BUF_ADDR_LO (NETXEN_NIC_REG(0x40)) - -#define CRB_CMDPEG_STATE (NETXEN_NIC_REG(0x50)) -#define CRB_RCVPEG_STATE (NETXEN_NIC_REG(0x13c)) - -#define CRB_XG_STATE (NETXEN_NIC_REG(0x94)) -#define CRB_XG_STATE_P3 (NETXEN_NIC_REG(0x98)) -#define CRB_PF_LINK_SPEED_1 (NETXEN_NIC_REG(0xe8)) -#define CRB_PF_LINK_SPEED_2 (NETXEN_NIC_REG(0xec)) - -#define CRB_MPORT_MODE (NETXEN_NIC_REG(0xc4)) -#define CRB_DMA_SHIFT (NETXEN_NIC_REG(0xcc)) -#define CRB_INT_VECTOR (NETXEN_NIC_REG(0xd4)) - -#define CRB_CMD_PRODUCER_OFFSET (NETXEN_NIC_REG(0x08)) -#define CRB_CMD_CONSUMER_OFFSET (NETXEN_NIC_REG(0x0c)) -#define CRB_CMD_PRODUCER_OFFSET_1 (NETXEN_NIC_REG(0x1ac)) -#define CRB_CMD_CONSUMER_OFFSET_1 (NETXEN_NIC_REG(0x1b0)) -#define CRB_CMD_PRODUCER_OFFSET_2 (NETXEN_NIC_REG(0x1b8)) -#define CRB_CMD_CONSUMER_OFFSET_2 (NETXEN_NIC_REG(0x1bc)) -#define CRB_CMD_PRODUCER_OFFSET_3 (NETXEN_NIC_REG(0x1d0)) -#define CRB_CMD_CONSUMER_OFFSET_3 (NETXEN_NIC_REG(0x1d4)) -#define CRB_TEMP_STATE (NETXEN_NIC_REG(0x1b4)) - -#define CRB_V2P_0 (NETXEN_NIC_REG(0x290)) -#define CRB_V2P(port) (CRB_V2P_0+((port)*4)) -#define CRB_DRIVER_VERSION (NETXEN_NIC_REG(0x2a0)) - -#define CRB_SW_INT_MASK_0 (NETXEN_NIC_REG(0x1d8)) -#define CRB_SW_INT_MASK_1 (NETXEN_NIC_REG(0x1e0)) -#define CRB_SW_INT_MASK_2 (NETXEN_NIC_REG(0x1e4)) -#define CRB_SW_INT_MASK_3 (NETXEN_NIC_REG(0x1e8)) - -#define CRB_FW_CAPABILITIES_1 (NETXEN_CAM_RAM(0x128)) -#define CRB_MAC_BLOCK_START (NETXEN_CAM_RAM(0x1c0)) - -/* - * capabilities register, can be used to selectively enable/disable features - * for backward compatibility - */ -#define CRB_NIC_CAPABILITIES_HOST NETXEN_NIC_REG(0x1a8) -#define CRB_NIC_MSI_MODE_HOST NETXEN_NIC_REG(0x270) - -#define INTR_SCHEME_PERPORT 0x1 -#define MSI_MODE_MULTIFUNC 0x1 - -/* used for ethtool tests */ -#define CRB_SCRATCHPAD_TEST NETXEN_NIC_REG(0x280) - -/* - * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address - * which can be read by the Phantom host to get producer/consumer indexes from - * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following - * registers will be used for the addresses of the ring's shared memory - * on the Phantom. - */ - -#define nx_get_temp_val(x) ((x) >> 16) -#define nx_get_temp_state(x) ((x) & 0xffff) -#define nx_encode_temp(val, state) (((val) << 16) | (state)) - -/* - * Temperature control. - */ -enum { - NX_TEMP_NORMAL = 0x1, /* Normal operating range */ - NX_TEMP_WARN, /* Sound alert, temperature getting high */ - NX_TEMP_PANIC /* Fatal error, hardware has shut down. */ -}; - -/* Lock IDs for PHY lock */ -#define PHY_LOCK_DRIVER 0x44524956 - -/* Used for PS PCI Memory access */ -#define PCIX_PS_OP_ADDR_LO (0x10000) -/* via CRB (PS side only) */ -#define PCIX_PS_OP_ADDR_HI (0x10004) - -#define PCIX_INT_VECTOR (0x10100) -#define PCIX_INT_MASK (0x10104) - -#define PCIX_CRB_WINDOW (0x10210) -#define PCIX_CRB_WINDOW_F0 (0x10210) -#define PCIX_CRB_WINDOW_F1 (0x10230) -#define PCIX_CRB_WINDOW_F2 (0x10250) -#define PCIX_CRB_WINDOW_F3 (0x10270) -#define PCIX_CRB_WINDOW_F4 (0x102ac) -#define PCIX_CRB_WINDOW_F5 (0x102bc) -#define PCIX_CRB_WINDOW_F6 (0x102cc) -#define PCIX_CRB_WINDOW_F7 (0x102dc) -#define PCIE_CRB_WINDOW_REG(func) (((func) < 4) ? \ - (PCIX_CRB_WINDOW_F0 + (0x20 * (func))) :\ - (PCIX_CRB_WINDOW_F4 + (0x10 * ((func)-4)))) - -#define PCIX_MN_WINDOW (0x10200) -#define PCIX_MN_WINDOW_F0 (0x10200) -#define PCIX_MN_WINDOW_F1 (0x10220) -#define PCIX_MN_WINDOW_F2 (0x10240) -#define PCIX_MN_WINDOW_F3 (0x10260) -#define PCIX_MN_WINDOW_F4 (0x102a0) -#define PCIX_MN_WINDOW_F5 (0x102b0) -#define PCIX_MN_WINDOW_F6 (0x102c0) -#define PCIX_MN_WINDOW_F7 (0x102d0) -#define PCIE_MN_WINDOW_REG(func) (((func) < 4) ? \ - (PCIX_MN_WINDOW_F0 + (0x20 * (func))) :\ - (PCIX_MN_WINDOW_F4 + (0x10 * ((func)-4)))) - -#define PCIX_SN_WINDOW (0x10208) -#define PCIX_SN_WINDOW_F0 (0x10208) -#define PCIX_SN_WINDOW_F1 (0x10228) -#define PCIX_SN_WINDOW_F2 (0x10248) -#define PCIX_SN_WINDOW_F3 (0x10268) -#define PCIX_SN_WINDOW_F4 (0x102a8) -#define PCIX_SN_WINDOW_F5 (0x102b8) -#define PCIX_SN_WINDOW_F6 (0x102c8) -#define PCIX_SN_WINDOW_F7 (0x102d8) -#define PCIE_SN_WINDOW_REG(func) (((func) < 4) ? \ - (PCIX_SN_WINDOW_F0 + (0x20 * (func))) :\ - (PCIX_SN_WINDOW_F4 + (0x10 * ((func)-4)))) - -#define PCIX_OCM_WINDOW (0x10800) -#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x20 * (func)) - -#define PCIX_TARGET_STATUS (0x10118) -#define PCIX_TARGET_STATUS_F1 (0x10160) -#define PCIX_TARGET_STATUS_F2 (0x10164) -#define PCIX_TARGET_STATUS_F3 (0x10168) -#define PCIX_TARGET_STATUS_F4 (0x10360) -#define PCIX_TARGET_STATUS_F5 (0x10364) -#define PCIX_TARGET_STATUS_F6 (0x10368) -#define PCIX_TARGET_STATUS_F7 (0x1036c) - -#define PCIX_TARGET_MASK (0x10128) -#define PCIX_TARGET_MASK_F1 (0x10170) -#define PCIX_TARGET_MASK_F2 (0x10174) -#define PCIX_TARGET_MASK_F3 (0x10178) -#define PCIX_TARGET_MASK_F4 (0x10370) -#define PCIX_TARGET_MASK_F5 (0x10374) -#define PCIX_TARGET_MASK_F6 (0x10378) -#define PCIX_TARGET_MASK_F7 (0x1037c) - -#define PCIX_MSI_F0 (0x13000) -#define PCIX_MSI_F1 (0x13004) -#define PCIX_MSI_F2 (0x13008) -#define PCIX_MSI_F3 (0x1300c) -#define PCIX_MSI_F4 (0x13010) -#define PCIX_MSI_F5 (0x13014) -#define PCIX_MSI_F6 (0x13018) -#define PCIX_MSI_F7 (0x1301c) -#define PCIX_MSI_F(i) (0x13000+((i)*4)) - -#define PCIX_PS_MEM_SPACE (0x90000) - -#define NETXEN_PCIX_PH_REG(reg) (NETXEN_CRB_PCIE + (reg)) -#define NETXEN_PCIX_PS_REG(reg) (NETXEN_CRB_PCIX_MD + (reg)) - -#define NETXEN_PCIE_REG(reg) (NETXEN_CRB_PCIE + (reg)) - -#define PCIE_MAX_DMA_XFER_SIZE (0x1404c) - -#define PCIE_DCR 0x00d8 - -#define PCIE_SEM0_LOCK (0x1c000) -#define PCIE_SEM0_UNLOCK (0x1c004) -#define PCIE_SEM1_LOCK (0x1c008) -#define PCIE_SEM1_UNLOCK (0x1c00c) -#define PCIE_SEM2_LOCK (0x1c010) /* Flash lock */ -#define PCIE_SEM2_UNLOCK (0x1c014) /* Flash unlock */ -#define PCIE_SEM3_LOCK (0x1c018) /* Phy lock */ -#define PCIE_SEM3_UNLOCK (0x1c01c) /* Phy unlock */ -#define PCIE_SEM4_LOCK (0x1c020) -#define PCIE_SEM4_UNLOCK (0x1c024) -#define PCIE_SEM5_LOCK (0x1c028) /* API lock */ -#define PCIE_SEM5_UNLOCK (0x1c02c) /* API unlock */ -#define PCIE_SEM6_LOCK (0x1c030) /* sw lock */ -#define PCIE_SEM6_UNLOCK (0x1c034) /* sw unlock */ -#define PCIE_SEM7_LOCK (0x1c038) /* crb win lock */ -#define PCIE_SEM7_UNLOCK (0x1c03c) /* crbwin unlock*/ -#define PCIE_SEM_LOCK(N) (PCIE_SEM0_LOCK + 8*(N)) -#define PCIE_SEM_UNLOCK(N) (PCIE_SEM0_UNLOCK + 8*(N)) - -#define PCIE_SETUP_FUNCTION (0x12040) -#define PCIE_SETUP_FUNCTION2 (0x12048) -#define PCIE_MISCCFG_RC (0x1206c) -#define PCIE_TGT_SPLIT_CHICKEN (0x12080) -#define PCIE_CHICKEN3 (0x120c8) - -#define ISR_INT_STATE_REG (NETXEN_PCIX_PS_REG(PCIE_MISCCFG_RC)) -#define PCIE_MAX_MASTER_SPLIT (0x14048) - -#define NETXEN_PORT_MODE_NONE 0 -#define NETXEN_PORT_MODE_XG 1 -#define NETXEN_PORT_MODE_GB 2 -#define NETXEN_PORT_MODE_802_3_AP 3 -#define NETXEN_PORT_MODE_AUTO_NEG 4 -#define NETXEN_PORT_MODE_AUTO_NEG_1G 5 -#define NETXEN_PORT_MODE_AUTO_NEG_XG 6 -#define NETXEN_PORT_MODE_ADDR (NETXEN_CAM_RAM(0x24)) -#define NETXEN_WOL_PORT_MODE (NETXEN_CAM_RAM(0x198)) - -#define NETXEN_WOL_CONFIG_NV (NETXEN_CAM_RAM(0x184)) -#define NETXEN_WOL_CONFIG (NETXEN_CAM_RAM(0x188)) - -#define NX_PEG_TUNE_MN_PRESENT 0x1 -#define NX_PEG_TUNE_CAPABILITY (NETXEN_CAM_RAM(0x02c)) - -#define NETXEN_DMA_WATCHDOG_CTRL (NETXEN_CAM_RAM(0x14)) -#define NETXEN_PEG_ALIVE_COUNTER (NETXEN_CAM_RAM(0xb0)) -#define NETXEN_PEG_HALT_STATUS1 (NETXEN_CAM_RAM(0xa8)) -#define NETXEN_PEG_HALT_STATUS2 (NETXEN_CAM_RAM(0xac)) -#define NX_CRB_DEV_REF_COUNT (NETXEN_CAM_RAM(0x138)) -#define NX_CRB_DEV_STATE (NETXEN_CAM_RAM(0x140)) - -/* Device State */ -#define NX_DEV_COLD 1 -#define NX_DEV_INITALIZING 2 -#define NX_DEV_READY 3 -#define NX_DEV_NEED_RESET 4 -#define NX_DEV_NEED_QUISCENT 5 -#define NX_DEV_NEED_AER 6 -#define NX_DEV_FAILED 7 - -#define NX_RCODE_DRIVER_INFO 0x20000000 -#define NX_RCODE_DRIVER_CAN_RELOAD 0x40000000 -#define NX_RCODE_FATAL_ERROR 0x80000000 -#define NX_FWERROR_PEGNUM(code) ((code) & 0xff) -#define NX_FWERROR_CODE(code) ((code >> 8) & 0xfffff) - -#define FW_POLL_DELAY (2 * HZ) -#define FW_FAIL_THRESH 3 -#define FW_POLL_THRESH 10 - -#define ISR_MSI_INT_TRIGGER(FUNC) (NETXEN_PCIX_PS_REG(PCIX_MSI_F(FUNC))) -#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200) - -/* - * PCI Interrupt Vector Values. - */ -#define PCIX_INT_VECTOR_BIT_F0 0x0080 -#define PCIX_INT_VECTOR_BIT_F1 0x0100 -#define PCIX_INT_VECTOR_BIT_F2 0x0200 -#define PCIX_INT_VECTOR_BIT_F3 0x0400 -#define PCIX_INT_VECTOR_BIT_F4 0x0800 -#define PCIX_INT_VECTOR_BIT_F5 0x1000 -#define PCIX_INT_VECTOR_BIT_F6 0x2000 -#define PCIX_INT_VECTOR_BIT_F7 0x4000 - -struct netxen_legacy_intr_set { - uint32_t int_vec_bit; - uint32_t tgt_status_reg; - uint32_t tgt_mask_reg; - uint32_t pci_int_reg; -}; - -#define NX_LEGACY_INTR_CONFIG \ -{ \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \ - \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \ - \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \ - \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \ - \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \ - \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \ - \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \ - \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \ -} - -#endif /* __NETXEN_NIC_HDR_H_ */ diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c deleted file mode 100644 index 3f89e57..0000000 --- a/drivers/net/netxen/netxen_nic_hw.c +++ /dev/null @@ -1,1976 +0,0 @@ -/* - * Copyright (C) 2003 - 2009 NetXen, Inc. - * Copyright (C) 2009 - QLogic Corporation. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, - * MA 02111-1307, USA. - * - * The full GNU General Public License is included in this distribution - * in the file called "COPYING". - * - */ - -#include -#include "netxen_nic.h" -#include "netxen_nic_hw.h" - -#include - -#define MASK(n) ((1ULL<<(n))-1) -#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff)) -#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff)) -#define MS_WIN(addr) (addr & 0x0ffc0000) - -#define GET_MEM_OFFS_2M(addr) (addr & MASK(18)) - -#define CRB_BLK(off) ((off >> 20) & 0x3f) -#define CRB_SUBBLK(off) ((off >> 16) & 0xf) -#define CRB_WINDOW_2M (0x130060) -#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000)) -#define CRB_INDIRECT_2M (0x1e0000UL) - -static void netxen_nic_io_write_128M(struct netxen_adapter *adapter, - void __iomem *addr, u32 data); -static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter, - void __iomem *addr); - -#ifndef readq -static inline u64 readq(void __iomem *addr) -{ - return readl(addr) | (((u64) readl(addr + 4)) << 32LL); -} -#endif - -#ifndef writeq -static inline void writeq(u64 val, void __iomem *addr) -{ - writel(((u32) (val)), (addr)); - writel(((u32) (val >> 32)), (addr + 4)); -} -#endif - -#define PCI_OFFSET_FIRST_RANGE(adapter, off) \ - ((adapter)->ahw.pci_base0 + (off)) -#define PCI_OFFSET_SECOND_RANGE(adapter, off) \ - ((adapter)->ahw.pci_base1 + (off) - SECOND_PAGE_GROUP_START) -#define PCI_OFFSET_THIRD_RANGE(adapter, off) \ - ((adapter)->ahw.pci_base2 + (off) - THIRD_PAGE_GROUP_START) - -static void __iomem *pci_base_offset(struct netxen_adapter *adapter, - unsigned long off) -{ - if (ADDR_IN_RANGE(off, FIRST_PAGE_GROUP_START, FIRST_PAGE_GROUP_END)) - return PCI_OFFSET_FIRST_RANGE(adapter, off); - - if (ADDR_IN_RANGE(off, SECOND_PAGE_GROUP_START, SECOND_PAGE_GROUP_END)) - return PCI_OFFSET_SECOND_RANGE(adapter, off); - - if (ADDR_IN_RANGE(off, THIRD_PAGE_GROUP_START, THIRD_PAGE_GROUP_END)) - return PCI_OFFSET_THIRD_RANGE(adapter, off); - - return NULL; -} - -static crb_128M_2M_block_map_t -crb_128M_2M_map[64] __cacheline_aligned_in_smp = { - {{{0, 0, 0, 0} } }, /* 0: PCI */ - {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */ - {1, 0x0110000, 0x0120000, 0x130000}, - {1, 0x0120000, 0x0122000, 0x124000}, - {1, 0x0130000, 0x0132000, 0x126000}, - {1, 0x0140000, 0x0142000, 0x128000}, - {1, 0x0150000, 0x0152000, 0x12a000}, - {1, 0x0160000, 0x0170000, 0x110000}, - {1, 0x0170000, 0x0172000, 0x12e000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {1, 0x01e0000, 0x01e0800, 0x122000}, - {0, 0x0000000, 0x0000000, 0x000000} } }, - {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */ - {{{0, 0, 0, 0} } }, /* 3: */ - {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */ - {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */ - {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */ - {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */ - {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */ - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {1, 0x08f0000, 0x08f2000, 0x172000} } }, - {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/ - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {1, 0x09f0000, 0x09f2000, 0x176000} } }, - {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/ - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {1, 0x0af0000, 0x0af2000, 0x17a000} } }, - {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/ - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {1, 0x0bf0000, 0x0bf2000, 0x17e000} } }, - {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */ - {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */ - {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */ - {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */ - {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */ - {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */ - {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */ - {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */ - {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */ - {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */ - {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */ - {{{0, 0, 0, 0} } }, /* 23: */ - {{{0, 0, 0, 0} } }, /* 24: */ - {{{0, 0, 0, 0} } }, /* 25: */ - {{{0, 0, 0, 0} } }, /* 26: */ - {{{0, 0, 0, 0} } }, /* 27: */ - {{{0, 0, 0, 0} } }, /* 28: */ - {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */ - {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */ - {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */ - {{{0} } }, /* 32: PCI */ - {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */ - {1, 0x2110000, 0x2120000, 0x130000}, - {1, 0x2120000, 0x2122000, 0x124000}, - {1, 0x2130000, 0x2132000, 0x126000}, - {1, 0x2140000, 0x2142000, 0x128000}, - {1, 0x2150000, 0x2152000, 0x12a000}, - {1, 0x2160000, 0x2170000, 0x110000}, - {1, 0x2170000, 0x2172000, 0x12e000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000} } }, - {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */ - {{{0} } }, /* 35: */ - {{{0} } }, /* 36: */ - {{{0} } }, /* 37: */ - {{{0} } }, /* 38: */ - {{{0} } }, /* 39: */ - {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */ - {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */ - {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */ - {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */ - {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */ - {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */ - {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */ - {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */ - {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */ - {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */ - {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */ - {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */ - {{{0} } }, /* 52: */ - {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */ - {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */ - {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */ - {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */ - {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */ - {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */ - {{{0} } }, /* 59: I2C0 */ - {{{0} } }, /* 60: I2C1 */ - {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */ - {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */ - {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */ -}; - -/* - * top 12 bits of crb internal address (hub, agent) - */ -static unsigned crb_hub_agt[64] = -{ - 0, - NETXEN_HW_CRB_HUB_AGT_ADR_PS, - NETXEN_HW_CRB_HUB_AGT_ADR_MN, - NETXEN_HW_CRB_HUB_AGT_ADR_MS, - 0, - NETXEN_HW_CRB_HUB_AGT_ADR_SRE, - NETXEN_HW_CRB_HUB_AGT_ADR_NIU, - NETXEN_HW_CRB_HUB_AGT_ADR_QMN, - NETXEN_HW_CRB_HUB_AGT_ADR_SQN0, - NETXEN_HW_CRB_HUB_AGT_ADR_SQN1, - NETXEN_HW_CRB_HUB_AGT_ADR_SQN2, - NETXEN_HW_CRB_HUB_AGT_ADR_SQN3, - NETXEN_HW_CRB_HUB_AGT_ADR_I2Q, - NETXEN_HW_CRB_HUB_AGT_ADR_TIMR, - NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB, - NETXEN_HW_CRB_HUB_AGT_ADR_PGN4, - NETXEN_HW_CRB_HUB_AGT_ADR_XDMA, - NETXEN_HW_CRB_HUB_AGT_ADR_PGN0, - NETXEN_HW_CRB_HUB_AGT_ADR_PGN1, - NETXEN_HW_CRB_HUB_AGT_ADR_PGN2, - NETXEN_HW_CRB_HUB_AGT_ADR_PGN3, - NETXEN_HW_CRB_HUB_AGT_ADR_PGND, - NETXEN_HW_CRB_HUB_AGT_ADR_PGNI, - NETXEN_HW_CRB_HUB_AGT_ADR_PGS0, - NETXEN_HW_CRB_HUB_AGT_ADR_PGS1, - NETXEN_HW_CRB_HUB_AGT_ADR_PGS2, - NETXEN_HW_CRB_HUB_AGT_ADR_PGS3, - 0, - NETXEN_HW_CRB_HUB_AGT_ADR_PGSI, - NETXEN_HW_CRB_HUB_AGT_ADR_SN, - 0, - NETXEN_HW_CRB_HUB_AGT_ADR_EG, - 0, - NETXEN_HW_CRB_HUB_AGT_ADR_PS, - NETXEN_HW_CRB_HUB_AGT_ADR_CAM, - 0, - 0, - 0, - 0, - 0, - NETXEN_HW_CRB_HUB_AGT_ADR_TIMR, - 0, - NETXEN_HW_CRB_HUB_AGT_ADR_RPMX1, - NETXEN_HW_CRB_HUB_AGT_ADR_RPMX2, - NETXEN_HW_CRB_HUB_AGT_ADR_RPMX3, - NETXEN_HW_CRB_HUB_AGT_ADR_RPMX4, - NETXEN_HW_CRB_HUB_AGT_ADR_RPMX5, - NETXEN_HW_CRB_HUB_AGT_ADR_RPMX6, - NETXEN_HW_CRB_HUB_AGT_ADR_RPMX7, - NETXEN_HW_CRB_HUB_AGT_ADR_XDMA, - NETXEN_HW_CRB_HUB_AGT_ADR_I2Q, - NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB, - 0, - NETXEN_HW_CRB_HUB_AGT_ADR_RPMX0, - NETXEN_HW_CRB_HUB_AGT_ADR_RPMX8, - NETXEN_HW_CRB_HUB_AGT_ADR_RPMX9, - NETXEN_HW_CRB_HUB_AGT_ADR_OCM0, - 0, - NETXEN_HW_CRB_HUB_AGT_ADR_SMB, - NETXEN_HW_CRB_HUB_AGT_ADR_I2C0, - NETXEN_HW_CRB_HUB_AGT_ADR_I2C1, - 0, - NETXEN_HW_CRB_HUB_AGT_ADR_PGNC, - 0, -}; - -/* PCI Windowing for DDR regions. */ - -#define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */ - -#define NETXEN_PCIE_SEM_TIMEOUT 10000 - -static int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu); - -int -netxen_pcie_sem_lock(struct netxen_adapter *adapter, int sem, u32 id_reg) -{ - int done = 0, timeout = 0; - - while (!done) { - done = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_LOCK(sem))); - if (done == 1) - break; - if (++timeout >= NETXEN_PCIE_SEM_TIMEOUT) - return -EIO; - msleep(1); - } - - if (id_reg) - NXWR32(adapter, id_reg, adapter->portnum); - - return 0; -} - -void -netxen_pcie_sem_unlock(struct netxen_adapter *adapter, int sem) -{ - NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_UNLOCK(sem))); -} - -static int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port) -{ - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { - NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_1+(0x10000*port), 0x1447); - NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0+(0x10000*port), 0x5); - } - - return 0; -} - -/* Disable an XG interface */ -static int netxen_niu_disable_xg_port(struct netxen_adapter *adapter) -{ - __u32 mac_cfg; - u32 port = adapter->physical_port; - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) - return 0; - - if (port > NETXEN_NIU_MAX_XG_PORTS) - return -EINVAL; - - mac_cfg = 0; - if (NXWR32(adapter, - NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg)) - return -EIO; - return 0; -} - -#define NETXEN_UNICAST_ADDR(port, index) \ - (NETXEN_UNICAST_ADDR_BASE+(port*32)+(index*8)) -#define NETXEN_MCAST_ADDR(port, index) \ - (NETXEN_MULTICAST_ADDR_BASE+(port*0x80)+(index*8)) -#define MAC_HI(addr) \ - ((addr[2] << 16) | (addr[1] << 8) | (addr[0])) -#define MAC_LO(addr) \ - ((addr[5] << 16) | (addr[4] << 8) | (addr[3])) - -static int netxen_p2_nic_set_promisc(struct netxen_adapter *adapter, u32 mode) -{ - u32 mac_cfg; - u32 cnt = 0; - __u32 reg = 0x0200; - u32 port = adapter->physical_port; - u16 board_type = adapter->ahw.board_type; - - if (port > NETXEN_NIU_MAX_XG_PORTS) - return -EINVAL; - - mac_cfg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port)); - mac_cfg &= ~0x4; - NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg); - - if ((board_type == NETXEN_BRDTYPE_P2_SB31_10G_IMEZ) || - (board_type == NETXEN_BRDTYPE_P2_SB31_10G_HMEZ)) - reg = (0x20 << port); - - NXWR32(adapter, NETXEN_NIU_FRAME_COUNT_SELECT, reg); - - mdelay(10); - - while (NXRD32(adapter, NETXEN_NIU_FRAME_COUNT) && ++cnt < 20) - mdelay(10); - - if (cnt < 20) { - - reg = NXRD32(adapter, - NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port)); - - if (mode == NETXEN_NIU_PROMISC_MODE) - reg = (reg | 0x2000UL); - else - reg = (reg & ~0x2000UL); - - if (mode == NETXEN_NIU_ALLMULTI_MODE) - reg = (reg | 0x1000UL); - else - reg = (reg & ~0x1000UL); - - NXWR32(adapter, - NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), reg); - } - - mac_cfg |= 0x4; - NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg); - - return 0; -} - -static int netxen_p2_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr) -{ - u32 mac_hi, mac_lo; - u32 reg_hi, reg_lo; - - u8 phy = adapter->physical_port; - - if (phy >= NETXEN_NIU_MAX_XG_PORTS) - return -EINVAL; - - mac_lo = ((u32)addr[0] << 16) | ((u32)addr[1] << 24); - mac_hi = addr[2] | ((u32)addr[3] << 8) | - ((u32)addr[4] << 16) | ((u32)addr[5] << 24); - - reg_lo = NETXEN_NIU_XGE_STATION_ADDR_0_1 + (0x10000 * phy); - reg_hi = NETXEN_NIU_XGE_STATION_ADDR_0_HI + (0x10000 * phy); - - /* write twice to flush */ - if (NXWR32(adapter, reg_lo, mac_lo) || NXWR32(adapter, reg_hi, mac_hi)) - return -EIO; - if (NXWR32(adapter, reg_lo, mac_lo) || NXWR32(adapter, reg_hi, mac_hi)) - return -EIO; - - return 0; -} - -static int -netxen_nic_enable_mcast_filter(struct netxen_adapter *adapter) -{ - u32 val = 0; - u16 port = adapter->physical_port; - u8 *addr = adapter->mac_addr; - - if (adapter->mc_enabled) - return 0; - - val = NXRD32(adapter, NETXEN_MAC_ADDR_CNTL_REG); - val |= (1UL << (28+port)); - NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val); - - /* add broadcast addr to filter */ - val = 0xffffff; - NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0), val); - NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0)+4, val); - - /* add station addr to filter */ - val = MAC_HI(addr); - NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1), val); - val = MAC_LO(addr); - NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, val); - - adapter->mc_enabled = 1; - return 0; -} - -static int -netxen_nic_disable_mcast_filter(struct netxen_adapter *adapter) -{ - u32 val = 0; - u16 port = adapter->physical_port; - u8 *addr = adapter->mac_addr; - - if (!adapter->mc_enabled) - return 0; - - val = NXRD32(adapter, NETXEN_MAC_ADDR_CNTL_REG); - val &= ~(1UL << (28+port)); - NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val); - - val = MAC_HI(addr); - NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0), val); - val = MAC_LO(addr); - NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0)+4, val); - - NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1), 0); - NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, 0); - - adapter->mc_enabled = 0; - return 0; -} - -static int -netxen_nic_set_mcast_addr(struct netxen_adapter *adapter, - int index, u8 *addr) -{ - u32 hi = 0, lo = 0; - u16 port = adapter->physical_port; - - lo = MAC_LO(addr); - hi = MAC_HI(addr); - - NXWR32(adapter, NETXEN_MCAST_ADDR(port, index), hi); - NXWR32(adapter, NETXEN_MCAST_ADDR(port, index)+4, lo); - - return 0; -} - -static void netxen_p2_nic_set_multi(struct net_device *netdev) -{ - struct netxen_adapter *adapter = netdev_priv(netdev); - struct netdev_hw_addr *ha; - u8 null_addr[6]; - int i; - - memset(null_addr, 0, 6); - - if (netdev->flags & IFF_PROMISC) { - - adapter->set_promisc(adapter, - NETXEN_NIU_PROMISC_MODE); - - /* Full promiscuous mode */ - netxen_nic_disable_mcast_filter(adapter); - - return; - } - - if (netdev_mc_empty(netdev)) { - adapter->set_promisc(adapter, - NETXEN_NIU_NON_PROMISC_MODE); - netxen_nic_disable_mcast_filter(adapter); - return; - } - - adapter->set_promisc(adapter, NETXEN_NIU_ALLMULTI_MODE); - if (netdev->flags & IFF_ALLMULTI || - netdev_mc_count(netdev) > adapter->max_mc_count) { - netxen_nic_disable_mcast_filter(adapter); - return; - } - - netxen_nic_enable_mcast_filter(adapter); - - i = 0; - netdev_for_each_mc_addr(ha, netdev) - netxen_nic_set_mcast_addr(adapter, i++, ha->addr); - - /* Clear out remaining addresses */ - while (i < adapter->max_mc_count) - netxen_nic_set_mcast_addr(adapter, i++, null_addr); -} - -static int -netxen_send_cmd_descs(struct netxen_adapter *adapter, - struct cmd_desc_type0 *cmd_desc_arr, int nr_desc) -{ - u32 i, producer, consumer; - struct netxen_cmd_buffer *pbuf; - struct cmd_desc_type0 *cmd_desc; - struct nx_host_tx_ring *tx_ring; - - i = 0; - - if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) - return -EIO; - - tx_ring = adapter->tx_ring; - __netif_tx_lock_bh(tx_ring->txq); - - producer = tx_ring->producer; - consumer = tx_ring->sw_consumer; - - if (nr_desc >= netxen_tx_avail(tx_ring)) { - netif_tx_stop_queue(tx_ring->txq); - smp_mb(); - if (netxen_tx_avail(tx_ring) > nr_desc) { - if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) - netif_tx_wake_queue(tx_ring->txq); - } else { - __netif_tx_unlock_bh(tx_ring->txq); - return -EBUSY; - } - } - - do { - cmd_desc = &cmd_desc_arr[i]; - - pbuf = &tx_ring->cmd_buf_arr[producer]; - pbuf->skb = NULL; - pbuf->frag_count = 0; - - memcpy(&tx_ring->desc_head[producer], - &cmd_desc_arr[i], sizeof(struct cmd_desc_type0)); - - producer = get_next_index(producer, tx_ring->num_desc); - i++; - - } while (i != nr_desc); - - tx_ring->producer = producer; - - netxen_nic_update_cmd_producer(adapter, tx_ring); - - __netif_tx_unlock_bh(tx_ring->txq); - - return 0; -} - -static int -nx_p3_sre_macaddr_change(struct netxen_adapter *adapter, u8 *addr, unsigned op) -{ - nx_nic_req_t req; - nx_mac_req_t *mac_req; - u64 word; - - memset(&req, 0, sizeof(nx_nic_req_t)); - req.qhdr = cpu_to_le64(NX_NIC_REQUEST << 23); - - word = NX_MAC_EVENT | ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word); - - mac_req = (nx_mac_req_t *)&req.words[0]; - mac_req->op = op; - memcpy(mac_req->mac_addr, addr, 6); - - return netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); -} - -static int nx_p3_nic_add_mac(struct netxen_adapter *adapter, - const u8 *addr, struct list_head *del_list) -{ - struct list_head *head; - nx_mac_list_t *cur; - - /* look up if already exists */ - list_for_each(head, del_list) { - cur = list_entry(head, nx_mac_list_t, list); - - if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) { - list_move_tail(head, &adapter->mac_list); - return 0; - } - } - - cur = kzalloc(sizeof(nx_mac_list_t), GFP_ATOMIC); - if (cur == NULL) { - printk(KERN_ERR "%s: failed to add mac address filter\n", - adapter->netdev->name); - return -ENOMEM; - } - memcpy(cur->mac_addr, addr, ETH_ALEN); - list_add_tail(&cur->list, &adapter->mac_list); - return nx_p3_sre_macaddr_change(adapter, - cur->mac_addr, NETXEN_MAC_ADD); -} - -static void netxen_p3_nic_set_multi(struct net_device *netdev) -{ - struct netxen_adapter *adapter = netdev_priv(netdev); - struct netdev_hw_addr *ha; - static const u8 bcast_addr[ETH_ALEN] = { - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff - }; - u32 mode = VPORT_MISS_MODE_DROP; - LIST_HEAD(del_list); - struct list_head *head; - nx_mac_list_t *cur; - - if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) - return; - - list_splice_tail_init(&adapter->mac_list, &del_list); - - nx_p3_nic_add_mac(adapter, adapter->mac_addr, &del_list); - nx_p3_nic_add_mac(adapter, bcast_addr, &del_list); - - if (netdev->flags & IFF_PROMISC) { - mode = VPORT_MISS_MODE_ACCEPT_ALL; - goto send_fw_cmd; - } - - if ((netdev->flags & IFF_ALLMULTI) || - (netdev_mc_count(netdev) > adapter->max_mc_count)) { - mode = VPORT_MISS_MODE_ACCEPT_MULTI; - goto send_fw_cmd; - } - - if (!netdev_mc_empty(netdev)) { - netdev_for_each_mc_addr(ha, netdev) - nx_p3_nic_add_mac(adapter, ha->addr, &del_list); - } - -send_fw_cmd: - adapter->set_promisc(adapter, mode); - head = &del_list; - while (!list_empty(head)) { - cur = list_entry(head->next, nx_mac_list_t, list); - - nx_p3_sre_macaddr_change(adapter, - cur->mac_addr, NETXEN_MAC_DEL); - list_del(&cur->list); - kfree(cur); - } -} - -static int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32 mode) -{ - nx_nic_req_t req; - u64 word; - - memset(&req, 0, sizeof(nx_nic_req_t)); - - req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); - - word = NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE | - ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word); - - req.words[0] = cpu_to_le64(mode); - - return netxen_send_cmd_descs(adapter, - (struct cmd_desc_type0 *)&req, 1); -} - -void netxen_p3_free_mac_list(struct netxen_adapter *adapter) -{ - nx_mac_list_t *cur; - struct list_head *head = &adapter->mac_list; - - while (!list_empty(head)) { - cur = list_entry(head->next, nx_mac_list_t, list); - nx_p3_sre_macaddr_change(adapter, - cur->mac_addr, NETXEN_MAC_DEL); - list_del(&cur->list); - kfree(cur); - } -} - -static int netxen_p3_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr) -{ - /* assuming caller has already copied new addr to netdev */ - netxen_p3_nic_set_multi(adapter->netdev); - return 0; -} - -#define NETXEN_CONFIG_INTR_COALESCE 3 - -/* - * Send the interrupt coalescing parameter set by ethtool to the card. - */ -int netxen_config_intr_coalesce(struct netxen_adapter *adapter) -{ - nx_nic_req_t req; - u64 word[6]; - int rv, i; - - memset(&req, 0, sizeof(nx_nic_req_t)); - memset(word, 0, sizeof(word)); - - req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); - - word[0] = NETXEN_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word[0]); - - memcpy(&word[0], &adapter->coal, sizeof(adapter->coal)); - for (i = 0; i < 6; i++) - req.words[i] = cpu_to_le64(word[i]); - - rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv != 0) { - printk(KERN_ERR "ERROR. Could not send " - "interrupt coalescing parameters\n"); - } - - return rv; -} - -int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable) -{ - nx_nic_req_t req; - u64 word; - int rv = 0; - - if (!test_bit(__NX_FW_ATTACHED, &adapter->state)) - return 0; - - memset(&req, 0, sizeof(nx_nic_req_t)); - - req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); - - word = NX_NIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word); - - req.words[0] = cpu_to_le64(enable); - - rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv != 0) { - printk(KERN_ERR "ERROR. Could not send " - "configure hw lro request\n"); - } - - return rv; -} - -int netxen_config_bridged_mode(struct netxen_adapter *adapter, int enable) -{ - nx_nic_req_t req; - u64 word; - int rv = 0; - - if (!!(adapter->flags & NETXEN_NIC_BRIDGE_ENABLED) == enable) - return rv; - - memset(&req, 0, sizeof(nx_nic_req_t)); - - req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); - - word = NX_NIC_H2C_OPCODE_CONFIG_BRIDGING | - ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word); - - req.words[0] = cpu_to_le64(enable); - - rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv != 0) { - printk(KERN_ERR "ERROR. Could not send " - "configure bridge mode request\n"); - } - - adapter->flags ^= NETXEN_NIC_BRIDGE_ENABLED; - - return rv; -} - - -#define RSS_HASHTYPE_IP_TCP 0x3 - -int netxen_config_rss(struct netxen_adapter *adapter, int enable) -{ - nx_nic_req_t req; - u64 word; - int i, rv; - - static const u64 key[] = { - 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, - 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, - 0x255b0ec26d5a56daULL - }; - - - memset(&req, 0, sizeof(nx_nic_req_t)); - req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); - - word = NX_NIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word); - - /* - * RSS request: - * bits 3-0: hash_method - * 5-4: hash_type_ipv4 - * 7-6: hash_type_ipv6 - * 8: enable - * 9: use indirection table - * 47-10: reserved - * 63-48: indirection table mask - */ - word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) | - ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) | - ((u64)(enable & 0x1) << 8) | - ((0x7ULL) << 48); - req.words[0] = cpu_to_le64(word); - for (i = 0; i < ARRAY_SIZE(key); i++) - req.words[i+1] = cpu_to_le64(key[i]); - - - rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv != 0) { - printk(KERN_ERR "%s: could not configure RSS\n", - adapter->netdev->name); - } - - return rv; -} - -int netxen_config_ipaddr(struct netxen_adapter *adapter, u32 ip, int cmd) -{ - nx_nic_req_t req; - u64 word; - int rv; - - memset(&req, 0, sizeof(nx_nic_req_t)); - req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); - - word = NX_NIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word); - - req.words[0] = cpu_to_le64(cmd); - req.words[1] = cpu_to_le64(ip); - - rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv != 0) { - printk(KERN_ERR "%s: could not notify %s IP 0x%x reuqest\n", - adapter->netdev->name, - (cmd == NX_IP_UP) ? "Add" : "Remove", ip); - } - return rv; -} - -int netxen_linkevent_request(struct netxen_adapter *adapter, int enable) -{ - nx_nic_req_t req; - u64 word; - int rv; - - memset(&req, 0, sizeof(nx_nic_req_t)); - req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); - - word = NX_NIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word); - req.words[0] = cpu_to_le64(enable | (enable << 8)); - - rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv != 0) { - printk(KERN_ERR "%s: could not configure link notification\n", - adapter->netdev->name); - } - - return rv; -} - -int netxen_send_lro_cleanup(struct netxen_adapter *adapter) -{ - nx_nic_req_t req; - u64 word; - int rv; - - if (!test_bit(__NX_FW_ATTACHED, &adapter->state)) - return 0; - - memset(&req, 0, sizeof(nx_nic_req_t)); - req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); - - word = NX_NIC_H2C_OPCODE_LRO_REQUEST | - ((u64)adapter->portnum << 16) | - ((u64)NX_NIC_LRO_REQUEST_CLEANUP << 56) ; - - req.req_hdr = cpu_to_le64(word); - - rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv != 0) { - printk(KERN_ERR "%s: could not cleanup lro flows\n", - adapter->netdev->name); - } - return rv; -} - -/* - * netxen_nic_change_mtu - Change the Maximum Transfer Unit - * @returns 0 on success, negative on failure - */ - -#define MTU_FUDGE_FACTOR 100 - -int netxen_nic_change_mtu(struct net_device *netdev, int mtu) -{ - struct netxen_adapter *adapter = netdev_priv(netdev); - int max_mtu; - int rc = 0; - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) - max_mtu = P3_MAX_MTU; - else - max_mtu = P2_MAX_MTU; - - if (mtu > max_mtu) { - printk(KERN_ERR "%s: mtu > %d bytes unsupported\n", - netdev->name, max_mtu); - return -EINVAL; - } - - if (adapter->set_mtu) - rc = adapter->set_mtu(adapter, mtu); - - if (!rc) - netdev->mtu = mtu; - - return rc; -} - -static int netxen_get_flash_block(struct netxen_adapter *adapter, int base, - int size, __le32 * buf) -{ - int i, v, addr; - __le32 *ptr32; - - addr = base; - ptr32 = buf; - for (i = 0; i < size / sizeof(u32); i++) { - if (netxen_rom_fast_read(adapter, addr, &v) == -1) - return -1; - *ptr32 = cpu_to_le32(v); - ptr32++; - addr += sizeof(u32); - } - if ((char *)buf + size > (char *)ptr32) { - __le32 local; - if (netxen_rom_fast_read(adapter, addr, &v) == -1) - return -1; - local = cpu_to_le32(v); - memcpy(ptr32, &local, (char *)buf + size - (char *)ptr32); - } - - return 0; -} - -int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac) -{ - __le32 *pmac = (__le32 *) mac; - u32 offset; - - offset = NX_FW_MAC_ADDR_OFFSET + (adapter->portnum * sizeof(u64)); - - if (netxen_get_flash_block(adapter, offset, sizeof(u64), pmac) == -1) - return -1; - - if (*mac == cpu_to_le64(~0ULL)) { - - offset = NX_OLD_MAC_ADDR_OFFSET + - (adapter->portnum * sizeof(u64)); - - if (netxen_get_flash_block(adapter, - offset, sizeof(u64), pmac) == -1) - return -1; - - if (*mac == cpu_to_le64(~0ULL)) - return -1; - } - return 0; -} - -int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac) -{ - uint32_t crbaddr, mac_hi, mac_lo; - int pci_func = adapter->ahw.pci_func; - - crbaddr = CRB_MAC_BLOCK_START + - (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1)); - - mac_lo = NXRD32(adapter, crbaddr); - mac_hi = NXRD32(adapter, crbaddr+4); - - if (pci_func & 1) - *mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16)); - else - *mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32)); - - return 0; -} - -/* - * Changes the CRB window to the specified window. - */ -static void -netxen_nic_pci_set_crbwindow_128M(struct netxen_adapter *adapter, - u32 window) -{ - void __iomem *offset; - int count = 10; - u8 func = adapter->ahw.pci_func; - - if (adapter->ahw.crb_win == window) - return; - - offset = PCI_OFFSET_SECOND_RANGE(adapter, - NETXEN_PCIX_PH_REG(PCIE_CRB_WINDOW_REG(func))); - - writel(window, offset); - do { - if (window == readl(offset)) - break; - - if (printk_ratelimit()) - dev_warn(&adapter->pdev->dev, - "failed to set CRB window to %d\n", - (window == NETXEN_WINDOW_ONE)); - udelay(1); - - } while (--count > 0); - - if (count > 0) - adapter->ahw.crb_win = window; -} - -/* - * Returns < 0 if off is not valid, - * 1 if window access is needed. 'off' is set to offset from - * CRB space in 128M pci map - * 0 if no window access is needed. 'off' is set to 2M addr - * In: 'off' is offset from base in 128M pci map - */ -static int -netxen_nic_pci_get_crb_addr_2M(struct netxen_adapter *adapter, - ulong off, void __iomem **addr) -{ - crb_128M_2M_sub_block_map_t *m; - - - if ((off >= NETXEN_CRB_MAX) || (off < NETXEN_PCI_CRBSPACE)) - return -EINVAL; - - off -= NETXEN_PCI_CRBSPACE; - - /* - * Try direct map - */ - m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)]; - - if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) { - *addr = adapter->ahw.pci_base0 + m->start_2M + - (off - m->start_128M); - return 0; - } - - /* - * Not in direct map, use crb window - */ - *addr = adapter->ahw.pci_base0 + CRB_INDIRECT_2M + - (off & MASK(16)); - return 1; -} - -/* - * In: 'off' is offset from CRB space in 128M pci map - * Out: 'off' is 2M pci map addr - * side effect: lock crb window - */ -static void -netxen_nic_pci_set_crbwindow_2M(struct netxen_adapter *adapter, ulong off) -{ - u32 window; - void __iomem *addr = adapter->ahw.pci_base0 + CRB_WINDOW_2M; - - off -= NETXEN_PCI_CRBSPACE; - - window = CRB_HI(off); - - writel(window, addr); - if (readl(addr) != window) { - if (printk_ratelimit()) - dev_warn(&adapter->pdev->dev, - "failed to set CRB window to %d off 0x%lx\n", - window, off); - } -} - -static void __iomem * -netxen_nic_map_indirect_address_128M(struct netxen_adapter *adapter, - ulong win_off, void __iomem **mem_ptr) -{ - ulong off = win_off; - void __iomem *addr; - resource_size_t mem_base; - - if (ADDR_IN_WINDOW1(win_off)) - off = NETXEN_CRB_NORMAL(win_off); - - addr = pci_base_offset(adapter, off); - if (addr) - return addr; - - if (adapter->ahw.pci_len0 == 0) - off -= NETXEN_PCI_CRBSPACE; - - mem_base = pci_resource_start(adapter->pdev, 0); - *mem_ptr = ioremap(mem_base + (off & PAGE_MASK), PAGE_SIZE); - if (*mem_ptr) - addr = *mem_ptr + (off & (PAGE_SIZE - 1)); - - return addr; -} - -static int -netxen_nic_hw_write_wx_128M(struct netxen_adapter *adapter, ulong off, u32 data) -{ - unsigned long flags; - void __iomem *addr, *mem_ptr = NULL; - - addr = netxen_nic_map_indirect_address_128M(adapter, off, &mem_ptr); - if (!addr) - return -EIO; - - if (ADDR_IN_WINDOW1(off)) { /* Window 1 */ - netxen_nic_io_write_128M(adapter, addr, data); - } else { /* Window 0 */ - write_lock_irqsave(&adapter->ahw.crb_lock, flags); - netxen_nic_pci_set_crbwindow_128M(adapter, 0); - writel(data, addr); - netxen_nic_pci_set_crbwindow_128M(adapter, - NETXEN_WINDOW_ONE); - write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); - } - - if (mem_ptr) - iounmap(mem_ptr); - - return 0; -} - -static u32 -netxen_nic_hw_read_wx_128M(struct netxen_adapter *adapter, ulong off) -{ - unsigned long flags; - void __iomem *addr, *mem_ptr = NULL; - u32 data; - - addr = netxen_nic_map_indirect_address_128M(adapter, off, &mem_ptr); - if (!addr) - return -EIO; - - if (ADDR_IN_WINDOW1(off)) { /* Window 1 */ - data = netxen_nic_io_read_128M(adapter, addr); - } else { /* Window 0 */ - write_lock_irqsave(&adapter->ahw.crb_lock, flags); - netxen_nic_pci_set_crbwindow_128M(adapter, 0); - data = readl(addr); - netxen_nic_pci_set_crbwindow_128M(adapter, - NETXEN_WINDOW_ONE); - write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); - } - - if (mem_ptr) - iounmap(mem_ptr); - - return data; -} - -static int -netxen_nic_hw_write_wx_2M(struct netxen_adapter *adapter, ulong off, u32 data) -{ - unsigned long flags; - int rv; - void __iomem *addr = NULL; - - rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr); - - if (rv == 0) { - writel(data, addr); - return 0; - } - - if (rv > 0) { - /* indirect access */ - write_lock_irqsave(&adapter->ahw.crb_lock, flags); - crb_win_lock(adapter); - netxen_nic_pci_set_crbwindow_2M(adapter, off); - writel(data, addr); - crb_win_unlock(adapter); - write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); - return 0; - } - - dev_err(&adapter->pdev->dev, - "%s: invalid offset: 0x%016lx\n", __func__, off); - dump_stack(); - return -EIO; -} - -static u32 -netxen_nic_hw_read_wx_2M(struct netxen_adapter *adapter, ulong off) -{ - unsigned long flags; - int rv; - u32 data; - void __iomem *addr = NULL; - - rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr); - - if (rv == 0) - return readl(addr); - - if (rv > 0) { - /* indirect access */ - write_lock_irqsave(&adapter->ahw.crb_lock, flags); - crb_win_lock(adapter); - netxen_nic_pci_set_crbwindow_2M(adapter, off); - data = readl(addr); - crb_win_unlock(adapter); - write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); - return data; - } - - dev_err(&adapter->pdev->dev, - "%s: invalid offset: 0x%016lx\n", __func__, off); - dump_stack(); - return -1; -} - -/* window 1 registers only */ -static void netxen_nic_io_write_128M(struct netxen_adapter *adapter, - void __iomem *addr, u32 data) -{ - read_lock(&adapter->ahw.crb_lock); - writel(data, addr); - read_unlock(&adapter->ahw.crb_lock); -} - -static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter, - void __iomem *addr) -{ - u32 val; - - read_lock(&adapter->ahw.crb_lock); - val = readl(addr); - read_unlock(&adapter->ahw.crb_lock); - - return val; -} - -static void netxen_nic_io_write_2M(struct netxen_adapter *adapter, - void __iomem *addr, u32 data) -{ - writel(data, addr); -} - -static u32 netxen_nic_io_read_2M(struct netxen_adapter *adapter, - void __iomem *addr) -{ - return readl(addr); -} - -void __iomem * -netxen_get_ioaddr(struct netxen_adapter *adapter, u32 offset) -{ - void __iomem *addr = NULL; - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { - if ((offset < NETXEN_CRB_PCIX_HOST2) && - (offset > NETXEN_CRB_PCIX_HOST)) - addr = PCI_OFFSET_SECOND_RANGE(adapter, offset); - else - addr = NETXEN_CRB_NORMALIZE(adapter, offset); - } else { - WARN_ON(netxen_nic_pci_get_crb_addr_2M(adapter, - offset, &addr)); - } - - return addr; -} - -static int -netxen_nic_pci_set_window_128M(struct netxen_adapter *adapter, - u64 addr, u32 *start) -{ - if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) { - *start = (addr - NETXEN_ADDR_OCM0 + NETXEN_PCI_OCM0); - return 0; - } else if (ADDR_IN_RANGE(addr, - NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) { - *start = (addr - NETXEN_ADDR_OCM1 + NETXEN_PCI_OCM1); - return 0; - } - - return -EIO; -} - -static int -netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter, - u64 addr, u32 *start) -{ - u32 window; - - window = OCM_WIN(addr); - - writel(window, adapter->ahw.ocm_win_crb); - /* read back to flush */ - readl(adapter->ahw.ocm_win_crb); - - adapter->ahw.ocm_win = window; - *start = NETXEN_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr); - return 0; -} - -static int -netxen_nic_pci_mem_access_direct(struct netxen_adapter *adapter, u64 off, - u64 *data, int op) -{ - void __iomem *addr, *mem_ptr = NULL; - resource_size_t mem_base; - int ret; - u32 start; - - spin_lock(&adapter->ahw.mem_lock); - - ret = adapter->pci_set_window(adapter, off, &start); - if (ret != 0) - goto unlock; - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - addr = adapter->ahw.pci_base0 + start; - } else { - addr = pci_base_offset(adapter, start); - if (addr) - goto noremap; - - mem_base = pci_resource_start(adapter->pdev, 0) + - (start & PAGE_MASK); - mem_ptr = ioremap(mem_base, PAGE_SIZE); - if (mem_ptr == NULL) { - ret = -EIO; - goto unlock; - } - - addr = mem_ptr + (start & (PAGE_SIZE-1)); - } -noremap: - if (op == 0) /* read */ - *data = readq(addr); - else /* write */ - writeq(*data, addr); - -unlock: - spin_unlock(&adapter->ahw.mem_lock); - - if (mem_ptr) - iounmap(mem_ptr); - return ret; -} - -void -netxen_pci_camqm_read_2M(struct netxen_adapter *adapter, u64 off, u64 *data) -{ - void __iomem *addr = adapter->ahw.pci_base0 + - NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM); - - spin_lock(&adapter->ahw.mem_lock); - *data = readq(addr); - spin_unlock(&adapter->ahw.mem_lock); -} - -void -netxen_pci_camqm_write_2M(struct netxen_adapter *adapter, u64 off, u64 data) -{ - void __iomem *addr = adapter->ahw.pci_base0 + - NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM); - - spin_lock(&adapter->ahw.mem_lock); - writeq(data, addr); - spin_unlock(&adapter->ahw.mem_lock); -} - -#define MAX_CTL_CHECK 1000 - -static int -netxen_nic_pci_mem_write_128M(struct netxen_adapter *adapter, - u64 off, u64 data) -{ - int j, ret; - u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo; - void __iomem *mem_crb; - - /* Only 64-bit aligned access */ - if (off & 7) - return -EIO; - - /* P2 has different SIU and MIU test agent base addr */ - if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, - NETXEN_ADDR_QDR_NET_MAX_P2)) { - mem_crb = pci_base_offset(adapter, - NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE); - addr_hi = SIU_TEST_AGT_ADDR_HI; - data_lo = SIU_TEST_AGT_WRDATA_LO; - data_hi = SIU_TEST_AGT_WRDATA_HI; - off_lo = off & SIU_TEST_AGT_ADDR_MASK; - off_hi = SIU_TEST_AGT_UPPER_ADDR(off); - goto correct; - } - - if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { - mem_crb = pci_base_offset(adapter, - NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); - addr_hi = MIU_TEST_AGT_ADDR_HI; - data_lo = MIU_TEST_AGT_WRDATA_LO; - data_hi = MIU_TEST_AGT_WRDATA_HI; - off_lo = off & MIU_TEST_AGT_ADDR_MASK; - off_hi = 0; - goto correct; - } - - if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) || - ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) { - if (adapter->ahw.pci_len0 != 0) { - return netxen_nic_pci_mem_access_direct(adapter, - off, &data, 1); - } - } - - return -EIO; - -correct: - spin_lock(&adapter->ahw.mem_lock); - netxen_nic_pci_set_crbwindow_128M(adapter, 0); - - writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO)); - writel(off_hi, (mem_crb + addr_hi)); - writel(data & 0xffffffff, (mem_crb + data_lo)); - writel((data >> 32) & 0xffffffff, (mem_crb + data_hi)); - writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL)); - writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE), - (mem_crb + TEST_AGT_CTRL)); - - for (j = 0; j < MAX_CTL_CHECK; j++) { - temp = readl((mem_crb + TEST_AGT_CTRL)); - if ((temp & TA_CTL_BUSY) == 0) - break; - } - - if (j >= MAX_CTL_CHECK) { - if (printk_ratelimit()) - dev_err(&adapter->pdev->dev, - "failed to write through agent\n"); - ret = -EIO; - } else - ret = 0; - - netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE); - spin_unlock(&adapter->ahw.mem_lock); - return ret; -} - -static int -netxen_nic_pci_mem_read_128M(struct netxen_adapter *adapter, - u64 off, u64 *data) -{ - int j, ret; - u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo; - u64 val; - void __iomem *mem_crb; - - /* Only 64-bit aligned access */ - if (off & 7) - return -EIO; - - /* P2 has different SIU and MIU test agent base addr */ - if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, - NETXEN_ADDR_QDR_NET_MAX_P2)) { - mem_crb = pci_base_offset(adapter, - NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE); - addr_hi = SIU_TEST_AGT_ADDR_HI; - data_lo = SIU_TEST_AGT_RDDATA_LO; - data_hi = SIU_TEST_AGT_RDDATA_HI; - off_lo = off & SIU_TEST_AGT_ADDR_MASK; - off_hi = SIU_TEST_AGT_UPPER_ADDR(off); - goto correct; - } - - if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { - mem_crb = pci_base_offset(adapter, - NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); - addr_hi = MIU_TEST_AGT_ADDR_HI; - data_lo = MIU_TEST_AGT_RDDATA_LO; - data_hi = MIU_TEST_AGT_RDDATA_HI; - off_lo = off & MIU_TEST_AGT_ADDR_MASK; - off_hi = 0; - goto correct; - } - - if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) || - ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) { - if (adapter->ahw.pci_len0 != 0) { - return netxen_nic_pci_mem_access_direct(adapter, - off, data, 0); - } - } - - return -EIO; - -correct: - spin_lock(&adapter->ahw.mem_lock); - netxen_nic_pci_set_crbwindow_128M(adapter, 0); - - writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO)); - writel(off_hi, (mem_crb + addr_hi)); - writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); - writel((TA_CTL_START|TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL)); - - for (j = 0; j < MAX_CTL_CHECK; j++) { - temp = readl(mem_crb + TEST_AGT_CTRL); - if ((temp & TA_CTL_BUSY) == 0) - break; - } - - if (j >= MAX_CTL_CHECK) { - if (printk_ratelimit()) - dev_err(&adapter->pdev->dev, - "failed to read through agent\n"); - ret = -EIO; - } else { - - temp = readl(mem_crb + data_hi); - val = ((u64)temp << 32); - val |= readl(mem_crb + data_lo); - *data = val; - ret = 0; - } - - netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE); - spin_unlock(&adapter->ahw.mem_lock); - - return ret; -} - -static int -netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter, - u64 off, u64 data) -{ - int j, ret; - u32 temp, off8; - void __iomem *mem_crb; - - /* Only 64-bit aligned access */ - if (off & 7) - return -EIO; - - /* P3 onward, test agent base for MIU and SIU is same */ - if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, - NETXEN_ADDR_QDR_NET_MAX_P3)) { - mem_crb = netxen_get_ioaddr(adapter, - NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE); - goto correct; - } - - if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { - mem_crb = netxen_get_ioaddr(adapter, - NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); - goto correct; - } - - if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) - return netxen_nic_pci_mem_access_direct(adapter, off, &data, 1); - - return -EIO; - -correct: - off8 = off & 0xfffffff8; - - spin_lock(&adapter->ahw.mem_lock); - - writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); - writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); - - writel(data & 0xffffffff, - mem_crb + MIU_TEST_AGT_WRDATA_LO); - writel((data >> 32) & 0xffffffff, - mem_crb + MIU_TEST_AGT_WRDATA_HI); - - writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL)); - writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE), - (mem_crb + TEST_AGT_CTRL)); - - for (j = 0; j < MAX_CTL_CHECK; j++) { - temp = readl(mem_crb + TEST_AGT_CTRL); - if ((temp & TA_CTL_BUSY) == 0) - break; - } - - if (j >= MAX_CTL_CHECK) { - if (printk_ratelimit()) - dev_err(&adapter->pdev->dev, - "failed to write through agent\n"); - ret = -EIO; - } else - ret = 0; - - spin_unlock(&adapter->ahw.mem_lock); - - return ret; -} - -static int -netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter, - u64 off, u64 *data) -{ - int j, ret; - u32 temp, off8; - u64 val; - void __iomem *mem_crb; - - /* Only 64-bit aligned access */ - if (off & 7) - return -EIO; - - /* P3 onward, test agent base for MIU and SIU is same */ - if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, - NETXEN_ADDR_QDR_NET_MAX_P3)) { - mem_crb = netxen_get_ioaddr(adapter, - NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE); - goto correct; - } - - if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { - mem_crb = netxen_get_ioaddr(adapter, - NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); - goto correct; - } - - if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) { - return netxen_nic_pci_mem_access_direct(adapter, - off, data, 0); - } - - return -EIO; - -correct: - off8 = off & 0xfffffff8; - - spin_lock(&adapter->ahw.mem_lock); - - writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); - writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); - writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); - writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL)); - - for (j = 0; j < MAX_CTL_CHECK; j++) { - temp = readl(mem_crb + TEST_AGT_CTRL); - if ((temp & TA_CTL_BUSY) == 0) - break; - } - - if (j >= MAX_CTL_CHECK) { - if (printk_ratelimit()) - dev_err(&adapter->pdev->dev, - "failed to read through agent\n"); - ret = -EIO; - } else { - val = (u64)(readl(mem_crb + MIU_TEST_AGT_RDDATA_HI)) << 32; - val |= readl(mem_crb + MIU_TEST_AGT_RDDATA_LO); - *data = val; - ret = 0; - } - - spin_unlock(&adapter->ahw.mem_lock); - - return ret; -} - -void -netxen_setup_hwops(struct netxen_adapter *adapter) -{ - adapter->init_port = netxen_niu_xg_init_port; - adapter->stop_port = netxen_niu_disable_xg_port; - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { - adapter->crb_read = netxen_nic_hw_read_wx_128M, - adapter->crb_write = netxen_nic_hw_write_wx_128M, - adapter->pci_set_window = netxen_nic_pci_set_window_128M, - adapter->pci_mem_read = netxen_nic_pci_mem_read_128M, - adapter->pci_mem_write = netxen_nic_pci_mem_write_128M, - adapter->io_read = netxen_nic_io_read_128M, - adapter->io_write = netxen_nic_io_write_128M, - - adapter->macaddr_set = netxen_p2_nic_set_mac_addr; - adapter->set_multi = netxen_p2_nic_set_multi; - adapter->set_mtu = netxen_nic_set_mtu_xgb; - adapter->set_promisc = netxen_p2_nic_set_promisc; - - } else { - adapter->crb_read = netxen_nic_hw_read_wx_2M, - adapter->crb_write = netxen_nic_hw_write_wx_2M, - adapter->pci_set_window = netxen_nic_pci_set_window_2M, - adapter->pci_mem_read = netxen_nic_pci_mem_read_2M, - adapter->pci_mem_write = netxen_nic_pci_mem_write_2M, - adapter->io_read = netxen_nic_io_read_2M, - adapter->io_write = netxen_nic_io_write_2M, - - adapter->set_mtu = nx_fw_cmd_set_mtu; - adapter->set_promisc = netxen_p3_nic_set_promisc; - adapter->macaddr_set = netxen_p3_nic_set_mac_addr; - adapter->set_multi = netxen_p3_nic_set_multi; - - adapter->phy_read = nx_fw_cmd_query_phy; - adapter->phy_write = nx_fw_cmd_set_phy; - } -} - -int netxen_nic_get_board_info(struct netxen_adapter *adapter) -{ - int offset, board_type, magic; - struct pci_dev *pdev = adapter->pdev; - - offset = NX_FW_MAGIC_OFFSET; - if (netxen_rom_fast_read(adapter, offset, &magic)) - return -EIO; - - if (magic != NETXEN_BDINFO_MAGIC) { - dev_err(&pdev->dev, "invalid board config, magic=%08x\n", - magic); - return -EIO; - } - - offset = NX_BRDTYPE_OFFSET; - if (netxen_rom_fast_read(adapter, offset, &board_type)) - return -EIO; - - if (board_type == NETXEN_BRDTYPE_P3_4_GB_MM) { - u32 gpio = NXRD32(adapter, NETXEN_ROMUSB_GLB_PAD_GPIO_I); - if ((gpio & 0x8000) == 0) - board_type = NETXEN_BRDTYPE_P3_10G_TP; - } - - adapter->ahw.board_type = board_type; - - switch (board_type) { - case NETXEN_BRDTYPE_P2_SB35_4G: - adapter->ahw.port_type = NETXEN_NIC_GBE; - break; - case NETXEN_BRDTYPE_P2_SB31_10G: - case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: - case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: - case NETXEN_BRDTYPE_P2_SB31_10G_CX4: - case NETXEN_BRDTYPE_P3_HMEZ: - case NETXEN_BRDTYPE_P3_XG_LOM: - case NETXEN_BRDTYPE_P3_10G_CX4: - case NETXEN_BRDTYPE_P3_10G_CX4_LP: - case NETXEN_BRDTYPE_P3_IMEZ: - case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: - case NETXEN_BRDTYPE_P3_10G_SFP_CT: - case NETXEN_BRDTYPE_P3_10G_SFP_QT: - case NETXEN_BRDTYPE_P3_10G_XFP: - case NETXEN_BRDTYPE_P3_10000_BASE_T: - adapter->ahw.port_type = NETXEN_NIC_XGBE; - break; - case NETXEN_BRDTYPE_P1_BD: - case NETXEN_BRDTYPE_P1_SB: - case NETXEN_BRDTYPE_P1_SMAX: - case NETXEN_BRDTYPE_P1_SOCK: - case NETXEN_BRDTYPE_P3_REF_QG: - case NETXEN_BRDTYPE_P3_4_GB: - case NETXEN_BRDTYPE_P3_4_GB_MM: - adapter->ahw.port_type = NETXEN_NIC_GBE; - break; - case NETXEN_BRDTYPE_P3_10G_TP: - adapter->ahw.port_type = (adapter->portnum < 2) ? - NETXEN_NIC_XGBE : NETXEN_NIC_GBE; - break; - default: - dev_err(&pdev->dev, "unknown board type %x\n", board_type); - adapter->ahw.port_type = NETXEN_NIC_XGBE; - break; - } - - return 0; -} - -/* NIU access sections */ -static int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu) -{ - new_mtu += MTU_FUDGE_FACTOR; - if (adapter->physical_port == 0) - NXWR32(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE, new_mtu); - else - NXWR32(adapter, NETXEN_NIU_XG1_MAX_FRAME_SIZE, new_mtu); - return 0; -} - -void netxen_nic_set_link_parameters(struct netxen_adapter *adapter) -{ - __u32 status; - __u32 autoneg; - __u32 port_mode; - - if (!netif_carrier_ok(adapter->netdev)) { - adapter->link_speed = 0; - adapter->link_duplex = -1; - adapter->link_autoneg = AUTONEG_ENABLE; - return; - } - - if (adapter->ahw.port_type == NETXEN_NIC_GBE) { - port_mode = NXRD32(adapter, NETXEN_PORT_MODE_ADDR); - if (port_mode == NETXEN_PORT_MODE_802_3_AP) { - adapter->link_speed = SPEED_1000; - adapter->link_duplex = DUPLEX_FULL; - adapter->link_autoneg = AUTONEG_DISABLE; - return; - } - - if (adapter->phy_read && - adapter->phy_read(adapter, - NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, - &status) == 0) { - if (netxen_get_phy_link(status)) { - switch (netxen_get_phy_speed(status)) { - case 0: - adapter->link_speed = SPEED_10; - break; - case 1: - adapter->link_speed = SPEED_100; - break; - case 2: - adapter->link_speed = SPEED_1000; - break; - default: - adapter->link_speed = 0; - break; - } - switch (netxen_get_phy_duplex(status)) { - case 0: - adapter->link_duplex = DUPLEX_HALF; - break; - case 1: - adapter->link_duplex = DUPLEX_FULL; - break; - default: - adapter->link_duplex = -1; - break; - } - if (adapter->phy_read && - adapter->phy_read(adapter, - NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG, - &autoneg) != 0) - adapter->link_autoneg = autoneg; - } else - goto link_down; - } else { - link_down: - adapter->link_speed = 0; - adapter->link_duplex = -1; - } - } -} - -int -netxen_nic_wol_supported(struct netxen_adapter *adapter) -{ - u32 wol_cfg; - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - return 0; - - wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV); - if (wol_cfg & (1UL << adapter->portnum)) { - wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG); - if (wol_cfg & (1 << adapter->portnum)) - return 1; - } - - return 0; -} diff --git a/drivers/net/netxen/netxen_nic_hw.h b/drivers/net/netxen/netxen_nic_hw.h deleted file mode 100644 index e2c5b6f..0000000 --- a/drivers/net/netxen/netxen_nic_hw.h +++ /dev/null @@ -1,287 +0,0 @@ -/* - * Copyright (C) 2003 - 2009 NetXen, Inc. - * Copyright (C) 2009 - QLogic Corporation. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, - * MA 02111-1307, USA. - * - * The full GNU General Public License is included in this distribution - * in the file called "COPYING". - * - */ - -#ifndef __NETXEN_NIC_HW_H_ -#define __NETXEN_NIC_HW_H_ - -/* Hardware memory size of 128 meg */ -#define NETXEN_MEMADDR_MAX (128 * 1024 * 1024) - -struct netxen_adapter; - -#define NETXEN_PCI_MAPSIZE_BYTES (NETXEN_PCI_MAPSIZE << 20) - -void netxen_nic_set_link_parameters(struct netxen_adapter *adapter); - -/* Nibble or Byte mode for phy interface (GbE mode only) */ - -#define _netxen_crb_get_bit(var, bit) ((var >> bit) & 0x1) - -/* - * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3) - * - * Bit 0 : enable_tx => 1:enable frame xmit, 0:disable - * Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream - * Bit 2 : enable_rx => 1:enable frame recv, 0:disable - * Bit 3 : rx_synced => R/O: recv enable synched to recv stream - * Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable - * Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore - * Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal - * Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op - * Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op - * Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op - * Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op - * Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op - */ - -#define netxen_gb_tx_flowctl(config_word) \ - ((config_word) |= 1 << 4) -#define netxen_gb_rx_flowctl(config_word) \ - ((config_word) |= 1 << 5) -#define netxen_gb_tx_reset_pb(config_word) \ - ((config_word) |= 1 << 16) -#define netxen_gb_rx_reset_pb(config_word) \ - ((config_word) |= 1 << 17) -#define netxen_gb_tx_reset_mac(config_word) \ - ((config_word) |= 1 << 18) -#define netxen_gb_rx_reset_mac(config_word) \ - ((config_word) |= 1 << 19) - -#define netxen_gb_unset_tx_flowctl(config_word) \ - ((config_word) &= ~(1 << 4)) -#define netxen_gb_unset_rx_flowctl(config_word) \ - ((config_word) &= ~(1 << 5)) - -#define netxen_gb_get_tx_synced(config_word) \ - _netxen_crb_get_bit((config_word), 1) -#define netxen_gb_get_rx_synced(config_word) \ - _netxen_crb_get_bit((config_word), 3) -#define netxen_gb_get_tx_flowctl(config_word) \ - _netxen_crb_get_bit((config_word), 4) -#define netxen_gb_get_rx_flowctl(config_word) \ - _netxen_crb_get_bit((config_word), 5) -#define netxen_gb_get_soft_reset(config_word) \ - _netxen_crb_get_bit((config_word), 31) - -#define netxen_gb_get_stationaddress_low(config_word) ((config_word) >> 16) - -#define netxen_gb_set_mii_mgmt_clockselect(config_word, val) \ - ((config_word) |= ((val) & 0x07)) -#define netxen_gb_mii_mgmt_reset(config_word) \ - ((config_word) |= 1 << 31) -#define netxen_gb_mii_mgmt_unset(config_word) \ - ((config_word) &= ~(1 << 31)) - -/* - * NIU GB MII Mgmt Command Register (applies to GB0, GB1, GB2, GB3) - * Bit 0 : read_cycle => 1:perform single read cycle, 0:no-op - * Bit 1 : scan_cycle => 1:perform continuous read cycles, 0:no-op - */ - -#define netxen_gb_mii_mgmt_set_read_cycle(config_word) \ - ((config_word) |= 1 << 0) -#define netxen_gb_mii_mgmt_reg_addr(config_word, val) \ - ((config_word) |= ((val) & 0x1F)) -#define netxen_gb_mii_mgmt_phy_addr(config_word, val) \ - ((config_word) |= (((val) & 0x1F) << 8)) - -/* - * NIU GB MII Mgmt Indicators Register (applies to GB0, GB1, GB2, GB3) - * Read-only register. - * Bit 0 : busy => 1:performing an MII mgmt cycle, 0:idle - * Bit 1 : scanning => 1:scan operation in progress, 0:idle - * Bit 2 : notvalid => :mgmt result data not yet valid, 0:idle - */ -#define netxen_get_gb_mii_mgmt_busy(config_word) \ - _netxen_crb_get_bit(config_word, 0) -#define netxen_get_gb_mii_mgmt_scanning(config_word) \ - _netxen_crb_get_bit(config_word, 1) -#define netxen_get_gb_mii_mgmt_notvalid(config_word) \ - _netxen_crb_get_bit(config_word, 2) -/* - * NIU XG Pause Ctl Register - * - * Bit 0 : xg0_mask => 1:disable tx pause frames - * Bit 1 : xg0_request => 1:request single pause frame - * Bit 2 : xg0_on_off => 1:request is pause on, 0:off - * Bit 3 : xg1_mask => 1:disable tx pause frames - * Bit 4 : xg1_request => 1:request single pause frame - * Bit 5 : xg1_on_off => 1:request is pause on, 0:off - */ - -#define netxen_xg_set_xg0_mask(config_word) \ - ((config_word) |= 1 << 0) -#define netxen_xg_set_xg1_mask(config_word) \ - ((config_word) |= 1 << 3) - -#define netxen_xg_get_xg0_mask(config_word) \ - _netxen_crb_get_bit((config_word), 0) -#define netxen_xg_get_xg1_mask(config_word) \ - _netxen_crb_get_bit((config_word), 3) - -#define netxen_xg_unset_xg0_mask(config_word) \ - ((config_word) &= ~(1 << 0)) -#define netxen_xg_unset_xg1_mask(config_word) \ - ((config_word) &= ~(1 << 3)) - -/* - * NIU XG Pause Ctl Register - * - * Bit 0 : xg0_mask => 1:disable tx pause frames - * Bit 1 : xg0_request => 1:request single pause frame - * Bit 2 : xg0_on_off => 1:request is pause on, 0:off - * Bit 3 : xg1_mask => 1:disable tx pause frames - * Bit 4 : xg1_request => 1:request single pause frame - * Bit 5 : xg1_on_off => 1:request is pause on, 0:off - */ -#define netxen_gb_set_gb0_mask(config_word) \ - ((config_word) |= 1 << 0) -#define netxen_gb_set_gb1_mask(config_word) \ - ((config_word) |= 1 << 2) -#define netxen_gb_set_gb2_mask(config_word) \ - ((config_word) |= 1 << 4) -#define netxen_gb_set_gb3_mask(config_word) \ - ((config_word) |= 1 << 6) - -#define netxen_gb_get_gb0_mask(config_word) \ - _netxen_crb_get_bit((config_word), 0) -#define netxen_gb_get_gb1_mask(config_word) \ - _netxen_crb_get_bit((config_word), 2) -#define netxen_gb_get_gb2_mask(config_word) \ - _netxen_crb_get_bit((config_word), 4) -#define netxen_gb_get_gb3_mask(config_word) \ - _netxen_crb_get_bit((config_word), 6) - -#define netxen_gb_unset_gb0_mask(config_word) \ - ((config_word) &= ~(1 << 0)) -#define netxen_gb_unset_gb1_mask(config_word) \ - ((config_word) &= ~(1 << 2)) -#define netxen_gb_unset_gb2_mask(config_word) \ - ((config_word) &= ~(1 << 4)) -#define netxen_gb_unset_gb3_mask(config_word) \ - ((config_word) &= ~(1 << 6)) - - -/* - * PHY-Specific MII control/status registers. - */ -#define NETXEN_NIU_GB_MII_MGMT_ADDR_CONTROL 0 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_STATUS 1 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_0 2 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_1 3 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG 4 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART 5 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG_MORE 6 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_NEXTPAGE_XMIT 7 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART_NEXTPAGE 8 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_CONTROL 9 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_STATUS 10 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_EXTENDED_STATUS 15 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL 16 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS 17 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_INT_ENABLE 18 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_INT_STATUS 19 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE 20 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_RECV_ERROR_COUNT 21 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_LED_CONTROL 24 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_LED_OVERRIDE 25 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE_YET 26 -#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS_MORE 27 - -/* - * PHY-Specific Status Register (reg 17). - * - * Bit 0 : jabber => 1:jabber detected, 0:not - * Bit 1 : polarity => 1:polarity reversed, 0:normal - * Bit 2 : recvpause => 1:receive pause enabled, 0:disabled - * Bit 3 : xmitpause => 1:transmit pause enabled, 0:disabled - * Bit 4 : energydetect => 1:sleep, 0:active - * Bit 5 : downshift => 1:downshift, 0:no downshift - * Bit 6 : crossover => 1:MDIX (crossover), 0:MDI (no crossover) - * Bits 7-9 : cablelen => not valid in 10Mb/s mode - * 0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m - * Bit 10 : link => 1:link up, 0:link down - * Bit 11 : resolved => 1:speed and duplex resolved, 0:not yet - * Bit 12 : pagercvd => 1:page received, 0:page not received - * Bit 13 : duplex => 1:full duplex, 0:half duplex - * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd - */ - -#define netxen_get_phy_speed(config_word) (((config_word) >> 14) & 0x03) - -#define netxen_set_phy_speed(config_word, val) \ - ((config_word) |= ((val & 0x03) << 14)) -#define netxen_set_phy_duplex(config_word) \ - ((config_word) |= 1 << 13) -#define netxen_clear_phy_duplex(config_word) \ - ((config_word) &= ~(1 << 13)) - -#define netxen_get_phy_link(config_word) \ - _netxen_crb_get_bit(config_word, 10) -#define netxen_get_phy_duplex(config_word) \ - _netxen_crb_get_bit(config_word, 13) - -/* - * NIU Mode Register. - * Bit 0 : enable FibreChannel - * Bit 1 : enable 10/100/1000 Ethernet - * Bit 2 : enable 10Gb Ethernet - */ - -#define netxen_get_niu_enable_ge(config_word) \ - _netxen_crb_get_bit(config_word, 1) - -#define NETXEN_NIU_NON_PROMISC_MODE 0 -#define NETXEN_NIU_PROMISC_MODE 1 -#define NETXEN_NIU_ALLMULTI_MODE 2 - -/* - * NIU XG MAC Config Register - * - * Bit 0 : tx_enable => 1:enable frame xmit, 0:disable - * Bit 2 : rx_enable => 1:enable frame recv, 0:disable - * Bit 4 : soft_reset => 1:reset the MAC , 0:no-op - * Bit 27: xaui_framer_reset - * Bit 28: xaui_rx_reset - * Bit 29: xaui_tx_reset - * Bit 30: xg_ingress_afifo_reset - * Bit 31: xg_egress_afifo_reset - */ - -#define netxen_xg_soft_reset(config_word) \ - ((config_word) |= 1 << 4) - -typedef struct { - unsigned valid; - unsigned start_128M; - unsigned end_128M; - unsigned start_2M; -} crb_128M_2M_sub_block_map_t; - -typedef struct { - crb_128M_2M_sub_block_map_t sub_block[16]; -} crb_128M_2M_block_map_t; - -#endif /* __NETXEN_NIC_HW_H_ */ diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c deleted file mode 100644 index d6c6357..0000000 --- a/drivers/net/netxen/netxen_nic_init.c +++ /dev/null @@ -1,1949 +0,0 @@ -/* - * Copyright (C) 2003 - 2009 NetXen, Inc. - * Copyright (C) 2009 - QLogic Corporation. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, - * MA 02111-1307, USA. - * - * The full GNU General Public License is included in this distribution - * in the file called "COPYING". - * - */ - -#include -#include -#include -#include -#include "netxen_nic.h" -#include "netxen_nic_hw.h" - -struct crb_addr_pair { - u32 addr; - u32 data; -}; - -#define NETXEN_MAX_CRB_XFORM 60 -static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM]; -#define NETXEN_ADDR_ERROR (0xffffffff) - -#define crb_addr_transform(name) \ - crb_addr_xform[NETXEN_HW_PX_MAP_CRB_##name] = \ - NETXEN_HW_CRB_HUB_AGT_ADR_##name << 20 - -#define NETXEN_NIC_XDMA_RESET 0x8000ff - -static void -netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, - struct nx_host_rds_ring *rds_ring); -static int netxen_p3_has_mn(struct netxen_adapter *adapter); - -static void crb_addr_transform_setup(void) -{ - crb_addr_transform(XDMA); - crb_addr_transform(TIMR); - crb_addr_transform(SRE); - crb_addr_transform(SQN3); - crb_addr_transform(SQN2); - crb_addr_transform(SQN1); - crb_addr_transform(SQN0); - crb_addr_transform(SQS3); - crb_addr_transform(SQS2); - crb_addr_transform(SQS1); - crb_addr_transform(SQS0); - crb_addr_transform(RPMX7); - crb_addr_transform(RPMX6); - crb_addr_transform(RPMX5); - crb_addr_transform(RPMX4); - crb_addr_transform(RPMX3); - crb_addr_transform(RPMX2); - crb_addr_transform(RPMX1); - crb_addr_transform(RPMX0); - crb_addr_transform(ROMUSB); - crb_addr_transform(SN); - crb_addr_transform(QMN); - crb_addr_transform(QMS); - crb_addr_transform(PGNI); - crb_addr_transform(PGND); - crb_addr_transform(PGN3); - crb_addr_transform(PGN2); - crb_addr_transform(PGN1); - crb_addr_transform(PGN0); - crb_addr_transform(PGSI); - crb_addr_transform(PGSD); - crb_addr_transform(PGS3); - crb_addr_transform(PGS2); - crb_addr_transform(PGS1); - crb_addr_transform(PGS0); - crb_addr_transform(PS); - crb_addr_transform(PH); - crb_addr_transform(NIU); - crb_addr_transform(I2Q); - crb_addr_transform(EG); - crb_addr_transform(MN); - crb_addr_transform(MS); - crb_addr_transform(CAS2); - crb_addr_transform(CAS1); - crb_addr_transform(CAS0); - crb_addr_transform(CAM); - crb_addr_transform(C2C1); - crb_addr_transform(C2C0); - crb_addr_transform(SMB); - crb_addr_transform(OCM0); - crb_addr_transform(I2C0); -} - -void netxen_release_rx_buffers(struct netxen_adapter *adapter) -{ - struct netxen_recv_context *recv_ctx; - struct nx_host_rds_ring *rds_ring; - struct netxen_rx_buffer *rx_buf; - int i, ring; - - recv_ctx = &adapter->recv_ctx; - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - rds_ring = &recv_ctx->rds_rings[ring]; - for (i = 0; i < rds_ring->num_desc; ++i) { - rx_buf = &(rds_ring->rx_buf_arr[i]); - if (rx_buf->state == NETXEN_BUFFER_FREE) - continue; - pci_unmap_single(adapter->pdev, - rx_buf->dma, - rds_ring->dma_size, - PCI_DMA_FROMDEVICE); - if (rx_buf->skb != NULL) - dev_kfree_skb_any(rx_buf->skb); - } - } -} - -void netxen_release_tx_buffers(struct netxen_adapter *adapter) -{ - struct netxen_cmd_buffer *cmd_buf; - struct netxen_skb_frag *buffrag; - int i, j; - struct nx_host_tx_ring *tx_ring = adapter->tx_ring; - - cmd_buf = tx_ring->cmd_buf_arr; - for (i = 0; i < tx_ring->num_desc; i++) { - buffrag = cmd_buf->frag_array; - if (buffrag->dma) { - pci_unmap_single(adapter->pdev, buffrag->dma, - buffrag->length, PCI_DMA_TODEVICE); - buffrag->dma = 0ULL; - } - for (j = 0; j < cmd_buf->frag_count; j++) { - buffrag++; - if (buffrag->dma) { - pci_unmap_page(adapter->pdev, buffrag->dma, - buffrag->length, - PCI_DMA_TODEVICE); - buffrag->dma = 0ULL; - } - } - if (cmd_buf->skb) { - dev_kfree_skb_any(cmd_buf->skb); - cmd_buf->skb = NULL; - } - cmd_buf++; - } -} - -void netxen_free_sw_resources(struct netxen_adapter *adapter) -{ - struct netxen_recv_context *recv_ctx; - struct nx_host_rds_ring *rds_ring; - struct nx_host_tx_ring *tx_ring; - int ring; - - recv_ctx = &adapter->recv_ctx; - - if (recv_ctx->rds_rings == NULL) - goto skip_rds; - - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - rds_ring = &recv_ctx->rds_rings[ring]; - vfree(rds_ring->rx_buf_arr); - rds_ring->rx_buf_arr = NULL; - } - kfree(recv_ctx->rds_rings); - -skip_rds: - if (adapter->tx_ring == NULL) - return; - - tx_ring = adapter->tx_ring; - vfree(tx_ring->cmd_buf_arr); - kfree(tx_ring); - adapter->tx_ring = NULL; -} - -int netxen_alloc_sw_resources(struct netxen_adapter *adapter) -{ - struct netxen_recv_context *recv_ctx; - struct nx_host_rds_ring *rds_ring; - struct nx_host_sds_ring *sds_ring; - struct nx_host_tx_ring *tx_ring; - struct netxen_rx_buffer *rx_buf; - int ring, i, size; - - struct netxen_cmd_buffer *cmd_buf_arr; - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - - size = sizeof(struct nx_host_tx_ring); - tx_ring = kzalloc(size, GFP_KERNEL); - if (tx_ring == NULL) { - dev_err(&pdev->dev, "%s: failed to allocate tx ring struct\n", - netdev->name); - return -ENOMEM; - } - adapter->tx_ring = tx_ring; - - tx_ring->num_desc = adapter->num_txd; - tx_ring->txq = netdev_get_tx_queue(netdev, 0); - - cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring)); - if (cmd_buf_arr == NULL) { - dev_err(&pdev->dev, "%s: failed to allocate cmd buffer ring\n", - netdev->name); - goto err_out; - } - tx_ring->cmd_buf_arr = cmd_buf_arr; - - recv_ctx = &adapter->recv_ctx; - - size = adapter->max_rds_rings * sizeof (struct nx_host_rds_ring); - rds_ring = kzalloc(size, GFP_KERNEL); - if (rds_ring == NULL) { - dev_err(&pdev->dev, "%s: failed to allocate rds ring struct\n", - netdev->name); - goto err_out; - } - recv_ctx->rds_rings = rds_ring; - - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - rds_ring = &recv_ctx->rds_rings[ring]; - switch (ring) { - case RCV_RING_NORMAL: - rds_ring->num_desc = adapter->num_rxd; - if (adapter->ahw.cut_through) { - rds_ring->dma_size = - NX_CT_DEFAULT_RX_BUF_LEN; - rds_ring->skb_size = - NX_CT_DEFAULT_RX_BUF_LEN; - } else { - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) - rds_ring->dma_size = - NX_P3_RX_BUF_MAX_LEN; - else - rds_ring->dma_size = - NX_P2_RX_BUF_MAX_LEN; - rds_ring->skb_size = - rds_ring->dma_size + NET_IP_ALIGN; - } - break; - - case RCV_RING_JUMBO: - rds_ring->num_desc = adapter->num_jumbo_rxd; - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) - rds_ring->dma_size = - NX_P3_RX_JUMBO_BUF_MAX_LEN; - else - rds_ring->dma_size = - NX_P2_RX_JUMBO_BUF_MAX_LEN; - - if (adapter->capabilities & NX_CAP0_HW_LRO) - rds_ring->dma_size += NX_LRO_BUFFER_EXTRA; - - rds_ring->skb_size = - rds_ring->dma_size + NET_IP_ALIGN; - break; - - case RCV_RING_LRO: - rds_ring->num_desc = adapter->num_lro_rxd; - rds_ring->dma_size = NX_RX_LRO_BUFFER_LENGTH; - rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN; - break; - - } - rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring)); - if (rds_ring->rx_buf_arr == NULL) { - printk(KERN_ERR "%s: Failed to allocate " - "rx buffer ring %d\n", - netdev->name, ring); - /* free whatever was already allocated */ - goto err_out; - } - INIT_LIST_HEAD(&rds_ring->free_list); - /* - * Now go through all of them, set reference handles - * and put them in the queues. - */ - rx_buf = rds_ring->rx_buf_arr; - for (i = 0; i < rds_ring->num_desc; i++) { - list_add_tail(&rx_buf->list, - &rds_ring->free_list); - rx_buf->ref_handle = i; - rx_buf->state = NETXEN_BUFFER_FREE; - rx_buf++; - } - spin_lock_init(&rds_ring->lock); - } - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - sds_ring->irq = adapter->msix_entries[ring].vector; - sds_ring->adapter = adapter; - sds_ring->num_desc = adapter->num_rxd; - - for (i = 0; i < NUM_RCV_DESC_RINGS; i++) - INIT_LIST_HEAD(&sds_ring->free_list[i]); - } - - return 0; - -err_out: - netxen_free_sw_resources(adapter); - return -ENOMEM; -} - -/* - * netxen_decode_crb_addr(0 - utility to translate from internal Phantom CRB - * address to external PCI CRB address. - */ -static u32 netxen_decode_crb_addr(u32 addr) -{ - int i; - u32 base_addr, offset, pci_base; - - crb_addr_transform_setup(); - - pci_base = NETXEN_ADDR_ERROR; - base_addr = addr & 0xfff00000; - offset = addr & 0x000fffff; - - for (i = 0; i < NETXEN_MAX_CRB_XFORM; i++) { - if (crb_addr_xform[i] == base_addr) { - pci_base = i << 20; - break; - } - } - if (pci_base == NETXEN_ADDR_ERROR) - return pci_base; - else - return pci_base + offset; -} - -#define NETXEN_MAX_ROM_WAIT_USEC 100 - -static int netxen_wait_rom_done(struct netxen_adapter *adapter) -{ - long timeout = 0; - long done = 0; - - cond_resched(); - - while (done == 0) { - done = NXRD32(adapter, NETXEN_ROMUSB_GLB_STATUS); - done &= 2; - if (++timeout >= NETXEN_MAX_ROM_WAIT_USEC) { - dev_err(&adapter->pdev->dev, - "Timeout reached waiting for rom done"); - return -EIO; - } - udelay(1); - } - return 0; -} - -static int do_rom_fast_read(struct netxen_adapter *adapter, - int addr, int *valp) -{ - NXWR32(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr); - NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); - NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3); - NXWR32(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb); - if (netxen_wait_rom_done(adapter)) { - printk("Error waiting for rom done\n"); - return -EIO; - } - /* reset abyte_cnt and dummy_byte_cnt */ - NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0); - udelay(10); - NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); - - *valp = NXRD32(adapter, NETXEN_ROMUSB_ROM_RDATA); - return 0; -} - -static int do_rom_fast_read_words(struct netxen_adapter *adapter, int addr, - u8 *bytes, size_t size) -{ - int addridx; - int ret = 0; - - for (addridx = addr; addridx < (addr + size); addridx += 4) { - int v; - ret = do_rom_fast_read(adapter, addridx, &v); - if (ret != 0) - break; - *(__le32 *)bytes = cpu_to_le32(v); - bytes += 4; - } - - return ret; -} - -int -netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr, - u8 *bytes, size_t size) -{ - int ret; - - ret = netxen_rom_lock(adapter); - if (ret < 0) - return ret; - - ret = do_rom_fast_read_words(adapter, addr, bytes, size); - - netxen_rom_unlock(adapter); - return ret; -} - -int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp) -{ - int ret; - - if (netxen_rom_lock(adapter) != 0) - return -EIO; - - ret = do_rom_fast_read(adapter, addr, valp); - netxen_rom_unlock(adapter); - return ret; -} - -#define NETXEN_BOARDTYPE 0x4008 -#define NETXEN_BOARDNUM 0x400c -#define NETXEN_CHIPNUM 0x4010 - -int netxen_pinit_from_rom(struct netxen_adapter *adapter) -{ - int addr, val; - int i, n, init_delay = 0; - struct crb_addr_pair *buf; - unsigned offset; - u32 off; - - /* resetall */ - netxen_rom_lock(adapter); - NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0xffffffff); - netxen_rom_unlock(adapter); - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - if (netxen_rom_fast_read(adapter, 0, &n) != 0 || - (n != 0xcafecafe) || - netxen_rom_fast_read(adapter, 4, &n) != 0) { - printk(KERN_ERR "%s: ERROR Reading crb_init area: " - "n: %08x\n", netxen_nic_driver_name, n); - return -EIO; - } - offset = n & 0xffffU; - n = (n >> 16) & 0xffffU; - } else { - if (netxen_rom_fast_read(adapter, 0, &n) != 0 || - !(n & 0x80000000)) { - printk(KERN_ERR "%s: ERROR Reading crb_init area: " - "n: %08x\n", netxen_nic_driver_name, n); - return -EIO; - } - offset = 1; - n &= ~0x80000000; - } - - if (n >= 1024) { - printk(KERN_ERR "%s:n=0x%x Error! NetXen card flash not" - " initialized.\n", __func__, n); - return -EIO; - } - - buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL); - if (buf == NULL) { - printk("%s: netxen_pinit_from_rom: Unable to calloc memory.\n", - netxen_nic_driver_name); - return -ENOMEM; - } - - for (i = 0; i < n; i++) { - if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 || - netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) { - kfree(buf); - return -EIO; - } - - buf[i].addr = addr; - buf[i].data = val; - - } - - for (i = 0; i < n; i++) { - - off = netxen_decode_crb_addr(buf[i].addr); - if (off == NETXEN_ADDR_ERROR) { - printk(KERN_ERR"CRB init value out of range %x\n", - buf[i].addr); - continue; - } - off += NETXEN_PCI_CRBSPACE; - - if (off & 1) - continue; - - /* skipping cold reboot MAGIC */ - if (off == NETXEN_CAM_RAM(0x1fc)) - continue; - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - if (off == (NETXEN_CRB_I2C0 + 0x1c)) - continue; - /* do not reset PCI */ - if (off == (ROMUSB_GLB + 0xbc)) - continue; - if (off == (ROMUSB_GLB + 0xa8)) - continue; - if (off == (ROMUSB_GLB + 0xc8)) /* core clock */ - continue; - if (off == (ROMUSB_GLB + 0x24)) /* MN clock */ - continue; - if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */ - continue; - if ((off & 0x0ff00000) == NETXEN_CRB_DDR_NET) - continue; - if (off == (NETXEN_CRB_PEG_NET_1 + 0x18) && - !NX_IS_REVISION_P3P(adapter->ahw.revision_id)) - buf[i].data = 0x1020; - /* skip the function enable register */ - if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION)) - continue; - if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION2)) - continue; - if ((off & 0x0ff00000) == NETXEN_CRB_SMB) - continue; - } - - init_delay = 1; - /* After writing this register, HW needs time for CRB */ - /* to quiet down (else crb_window returns 0xffffffff) */ - if (off == NETXEN_ROMUSB_GLB_SW_RESET) { - init_delay = 1000; - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { - /* hold xdma in reset also */ - buf[i].data = NETXEN_NIC_XDMA_RESET; - buf[i].data = 0x8000ff; - } - } - - NXWR32(adapter, off, buf[i].data); - - msleep(init_delay); - } - kfree(buf); - - /* disable_peg_cache_all */ - - /* unreset_net_cache */ - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { - val = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET); - NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, (val & 0xffffff0f)); - } - - /* p2dn replyCount */ - NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0xec, 0x1e); - /* disable_peg_cache 0 */ - NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0x4c, 8); - /* disable_peg_cache 1 */ - NXWR32(adapter, NETXEN_CRB_PEG_NET_I + 0x4c, 8); - - /* peg_clr_all */ - - /* peg_clr 0 */ - NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x8, 0); - NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0xc, 0); - /* peg_clr 1 */ - NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0x8, 0); - NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0xc, 0); - /* peg_clr 2 */ - NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0x8, 0); - NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0xc, 0); - /* peg_clr 3 */ - NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0x8, 0); - NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0xc, 0); - return 0; -} - -static struct uni_table_desc *nx_get_table_desc(const u8 *unirom, int section) -{ - uint32_t i; - struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; - __le32 entries = cpu_to_le32(directory->num_entries); - - for (i = 0; i < entries; i++) { - - __le32 offs = cpu_to_le32(directory->findex) + - (i * cpu_to_le32(directory->entry_size)); - __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8)); - - if (tab_type == section) - return (struct uni_table_desc *) &unirom[offs]; - } - - return NULL; -} - -#define QLCNIC_FILEHEADER_SIZE (14 * 4) - -static int -netxen_nic_validate_header(struct netxen_adapter *adapter) - { - const u8 *unirom = adapter->fw->data; - struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; - u32 fw_file_size = adapter->fw->size; - u32 tab_size; - __le32 entries; - __le32 entry_size; - - if (fw_file_size < QLCNIC_FILEHEADER_SIZE) - return -EINVAL; - - entries = cpu_to_le32(directory->num_entries); - entry_size = cpu_to_le32(directory->entry_size); - tab_size = cpu_to_le32(directory->findex) + (entries * entry_size); - - if (fw_file_size < tab_size) - return -EINVAL; - - return 0; -} - -static int -netxen_nic_validate_bootld(struct netxen_adapter *adapter) -{ - struct uni_table_desc *tab_desc; - struct uni_data_desc *descr; - const u8 *unirom = adapter->fw->data; - __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + - NX_UNI_BOOTLD_IDX_OFF)); - u32 offs; - u32 tab_size; - u32 data_size; - - tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_BOOTLD); - - if (!tab_desc) - return -EINVAL; - - tab_size = cpu_to_le32(tab_desc->findex) + - (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); - - if (adapter->fw->size < tab_size) - return -EINVAL; - - offs = cpu_to_le32(tab_desc->findex) + - (cpu_to_le32(tab_desc->entry_size) * (idx)); - descr = (struct uni_data_desc *)&unirom[offs]; - - data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); - - if (adapter->fw->size < data_size) - return -EINVAL; - - return 0; -} - -static int -netxen_nic_validate_fw(struct netxen_adapter *adapter) -{ - struct uni_table_desc *tab_desc; - struct uni_data_desc *descr; - const u8 *unirom = adapter->fw->data; - __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + - NX_UNI_FIRMWARE_IDX_OFF)); - u32 offs; - u32 tab_size; - u32 data_size; - - tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_FW); - - if (!tab_desc) - return -EINVAL; - - tab_size = cpu_to_le32(tab_desc->findex) + - (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); - - if (adapter->fw->size < tab_size) - return -EINVAL; - - offs = cpu_to_le32(tab_desc->findex) + - (cpu_to_le32(tab_desc->entry_size) * (idx)); - descr = (struct uni_data_desc *)&unirom[offs]; - data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); - - if (adapter->fw->size < data_size) - return -EINVAL; - - return 0; -} - - -static int -netxen_nic_validate_product_offs(struct netxen_adapter *adapter) -{ - struct uni_table_desc *ptab_descr; - const u8 *unirom = adapter->fw->data; - int mn_present = (NX_IS_REVISION_P2(adapter->ahw.revision_id)) ? - 1 : netxen_p3_has_mn(adapter); - __le32 entries; - __le32 entry_size; - u32 tab_size; - u32 i; - - ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL); - if (ptab_descr == NULL) - return -EINVAL; - - entries = cpu_to_le32(ptab_descr->num_entries); - entry_size = cpu_to_le32(ptab_descr->entry_size); - tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size); - - if (adapter->fw->size < tab_size) - return -EINVAL; - -nomn: - for (i = 0; i < entries; i++) { - - __le32 flags, file_chiprev, offs; - u8 chiprev = adapter->ahw.revision_id; - uint32_t flagbit; - - offs = cpu_to_le32(ptab_descr->findex) + - (i * cpu_to_le32(ptab_descr->entry_size)); - flags = cpu_to_le32(*((int *)&unirom[offs] + NX_UNI_FLAGS_OFF)); - file_chiprev = cpu_to_le32(*((int *)&unirom[offs] + - NX_UNI_CHIP_REV_OFF)); - - flagbit = mn_present ? 1 : 2; - - if ((chiprev == file_chiprev) && - ((1ULL << flagbit) & flags)) { - adapter->file_prd_off = offs; - return 0; - } - } - - if (mn_present && NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - mn_present = 0; - goto nomn; - } - - return -EINVAL; -} - -static int -netxen_nic_validate_unified_romimage(struct netxen_adapter *adapter) -{ - if (netxen_nic_validate_header(adapter)) { - dev_err(&adapter->pdev->dev, - "unified image: header validation failed\n"); - return -EINVAL; - } - - if (netxen_nic_validate_product_offs(adapter)) { - dev_err(&adapter->pdev->dev, - "unified image: product validation failed\n"); - return -EINVAL; - } - - if (netxen_nic_validate_bootld(adapter)) { - dev_err(&adapter->pdev->dev, - "unified image: bootld validation failed\n"); - return -EINVAL; - } - - if (netxen_nic_validate_fw(adapter)) { - dev_err(&adapter->pdev->dev, - "unified image: firmware validation failed\n"); - return -EINVAL; - } - - return 0; -} - -static struct uni_data_desc *nx_get_data_desc(struct netxen_adapter *adapter, - u32 section, u32 idx_offset) -{ - const u8 *unirom = adapter->fw->data; - int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + - idx_offset)); - struct uni_table_desc *tab_desc; - __le32 offs; - - tab_desc = nx_get_table_desc(unirom, section); - - if (tab_desc == NULL) - return NULL; - - offs = cpu_to_le32(tab_desc->findex) + - (cpu_to_le32(tab_desc->entry_size) * idx); - - return (struct uni_data_desc *)&unirom[offs]; -} - -static u8 * -nx_get_bootld_offs(struct netxen_adapter *adapter) -{ - u32 offs = NETXEN_BOOTLD_START; - - if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) - offs = cpu_to_le32((nx_get_data_desc(adapter, - NX_UNI_DIR_SECT_BOOTLD, - NX_UNI_BOOTLD_IDX_OFF))->findex); - - return (u8 *)&adapter->fw->data[offs]; -} - -static u8 * -nx_get_fw_offs(struct netxen_adapter *adapter) -{ - u32 offs = NETXEN_IMAGE_START; - - if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) - offs = cpu_to_le32((nx_get_data_desc(adapter, - NX_UNI_DIR_SECT_FW, - NX_UNI_FIRMWARE_IDX_OFF))->findex); - - return (u8 *)&adapter->fw->data[offs]; -} - -static __le32 -nx_get_fw_size(struct netxen_adapter *adapter) -{ - if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) - return cpu_to_le32((nx_get_data_desc(adapter, - NX_UNI_DIR_SECT_FW, - NX_UNI_FIRMWARE_IDX_OFF))->size); - else - return cpu_to_le32( - *(u32 *)&adapter->fw->data[NX_FW_SIZE_OFFSET]); -} - -static __le32 -nx_get_fw_version(struct netxen_adapter *adapter) -{ - struct uni_data_desc *fw_data_desc; - const struct firmware *fw = adapter->fw; - __le32 major, minor, sub; - const u8 *ver_str; - int i, ret = 0; - - if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) { - - fw_data_desc = nx_get_data_desc(adapter, - NX_UNI_DIR_SECT_FW, NX_UNI_FIRMWARE_IDX_OFF); - ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) + - cpu_to_le32(fw_data_desc->size) - 17; - - for (i = 0; i < 12; i++) { - if (!strncmp(&ver_str[i], "REV=", 4)) { - ret = sscanf(&ver_str[i+4], "%u.%u.%u ", - &major, &minor, &sub); - break; - } - } - - if (ret != 3) - return 0; - - return major + (minor << 8) + (sub << 16); - - } else - return cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]); -} - -static __le32 -nx_get_bios_version(struct netxen_adapter *adapter) -{ - const struct firmware *fw = adapter->fw; - __le32 bios_ver, prd_off = adapter->file_prd_off; - - if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) { - bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off]) - + NX_UNI_BIOS_VERSION_OFF)); - return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + - (bios_ver >> 24); - } else - return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]); - -} - -int -netxen_need_fw_reset(struct netxen_adapter *adapter) -{ - u32 count, old_count; - u32 val, version, major, minor, build; - int i, timeout; - u8 fw_type; - - /* NX2031 firmware doesn't support heartbit */ - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - return 1; - - if (adapter->need_fw_reset) - return 1; - - /* last attempt had failed */ - if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED) - return 1; - - old_count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); - - for (i = 0; i < 10; i++) { - - timeout = msleep_interruptible(200); - if (timeout) { - NXWR32(adapter, CRB_CMDPEG_STATE, - PHAN_INITIALIZE_FAILED); - return -EINTR; - } - - count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); - if (count != old_count) - break; - } - - /* firmware is dead */ - if (count == old_count) - return 1; - - /* check if we have got newer or different file firmware */ - if (adapter->fw) { - - val = nx_get_fw_version(adapter); - - version = NETXEN_DECODE_VERSION(val); - - major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); - minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR); - build = NXRD32(adapter, NETXEN_FW_VERSION_SUB); - - if (version > NETXEN_VERSION_CODE(major, minor, build)) - return 1; - - if (version == NETXEN_VERSION_CODE(major, minor, build) && - adapter->fw_type != NX_UNIFIED_ROMIMAGE) { - - val = NXRD32(adapter, NETXEN_MIU_MN_CONTROL); - fw_type = (val & 0x4) ? - NX_P3_CT_ROMIMAGE : NX_P3_MN_ROMIMAGE; - - if (adapter->fw_type != fw_type) - return 1; - } - } - - return 0; -} - -#define NETXEN_MIN_P3_FW_SUPP NETXEN_VERSION_CODE(4, 0, 505) - -int -netxen_check_flash_fw_compatibility(struct netxen_adapter *adapter) -{ - u32 flash_fw_ver, min_fw_ver; - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - return 0; - - if (netxen_rom_fast_read(adapter, - NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) { - dev_err(&adapter->pdev->dev, "Unable to read flash fw" - "version\n"); - return -EIO; - } - - flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver); - min_fw_ver = NETXEN_MIN_P3_FW_SUPP; - if (flash_fw_ver >= min_fw_ver) - return 0; - - dev_info(&adapter->pdev->dev, "Flash fw[%d.%d.%d] is < min fw supported" - "[4.0.505]. Please update firmware on flash\n", - _major(flash_fw_ver), _minor(flash_fw_ver), - _build(flash_fw_ver)); - return -EINVAL; -} - -static char *fw_name[] = { - NX_P2_MN_ROMIMAGE_NAME, - NX_P3_CT_ROMIMAGE_NAME, - NX_P3_MN_ROMIMAGE_NAME, - NX_UNIFIED_ROMIMAGE_NAME, - NX_FLASH_ROMIMAGE_NAME, -}; - -int -netxen_load_firmware(struct netxen_adapter *adapter) -{ - u64 *ptr64; - u32 i, flashaddr, size; - const struct firmware *fw = adapter->fw; - struct pci_dev *pdev = adapter->pdev; - - dev_info(&pdev->dev, "loading firmware from %s\n", - fw_name[adapter->fw_type]); - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 1); - - if (fw) { - __le64 data; - - size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8; - - ptr64 = (u64 *)nx_get_bootld_offs(adapter); - flashaddr = NETXEN_BOOTLD_START; - - for (i = 0; i < size; i++) { - data = cpu_to_le64(ptr64[i]); - - if (adapter->pci_mem_write(adapter, flashaddr, data)) - return -EIO; - - flashaddr += 8; - } - - size = (__force u32)nx_get_fw_size(adapter) / 8; - - ptr64 = (u64 *)nx_get_fw_offs(adapter); - flashaddr = NETXEN_IMAGE_START; - - for (i = 0; i < size; i++) { - data = cpu_to_le64(ptr64[i]); - - if (adapter->pci_mem_write(adapter, - flashaddr, data)) - return -EIO; - - flashaddr += 8; - } - - size = (__force u32)nx_get_fw_size(adapter) % 8; - if (size) { - data = cpu_to_le64(ptr64[i]); - - if (adapter->pci_mem_write(adapter, - flashaddr, data)) - return -EIO; - } - - } else { - u64 data; - u32 hi, lo; - - size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8; - flashaddr = NETXEN_BOOTLD_START; - - for (i = 0; i < size; i++) { - if (netxen_rom_fast_read(adapter, - flashaddr, (int *)&lo) != 0) - return -EIO; - if (netxen_rom_fast_read(adapter, - flashaddr + 4, (int *)&hi) != 0) - return -EIO; - - /* hi, lo are already in host endian byteorder */ - data = (((u64)hi << 32) | lo); - - if (adapter->pci_mem_write(adapter, - flashaddr, data)) - return -EIO; - - flashaddr += 8; - } - } - msleep(1); - - if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) { - NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x18, 0x1020); - NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001e); - } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) - NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001d); - else { - NXWR32(adapter, NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL, 0x3fff); - NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 0); - } - - return 0; -} - -static int -netxen_validate_firmware(struct netxen_adapter *adapter) -{ - __le32 val; - __le32 flash_fw_ver; - u32 file_fw_ver, min_ver, bios; - struct pci_dev *pdev = adapter->pdev; - const struct firmware *fw = adapter->fw; - u8 fw_type = adapter->fw_type; - u32 crbinit_fix_fw; - - if (fw_type == NX_UNIFIED_ROMIMAGE) { - if (netxen_nic_validate_unified_romimage(adapter)) - return -EINVAL; - } else { - val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]); - if ((__force u32)val != NETXEN_BDINFO_MAGIC) - return -EINVAL; - - if (fw->size < NX_FW_MIN_SIZE) - return -EINVAL; - } - - val = nx_get_fw_version(adapter); - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) - min_ver = NETXEN_MIN_P3_FW_SUPP; - else - min_ver = NETXEN_VERSION_CODE(3, 4, 216); - - file_fw_ver = NETXEN_DECODE_VERSION(val); - - if ((_major(file_fw_ver) > _NETXEN_NIC_LINUX_MAJOR) || - (file_fw_ver < min_ver)) { - dev_err(&pdev->dev, - "%s: firmware version %d.%d.%d unsupported\n", - fw_name[fw_type], _major(file_fw_ver), _minor(file_fw_ver), - _build(file_fw_ver)); - return -EINVAL; - } - - val = nx_get_bios_version(adapter); - netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios); - if ((__force u32)val != bios) { - dev_err(&pdev->dev, "%s: firmware bios is incompatible\n", - fw_name[fw_type]); - return -EINVAL; - } - - if (netxen_rom_fast_read(adapter, - NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) { - dev_err(&pdev->dev, "Unable to read flash fw version\n"); - return -EIO; - } - flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver); - - /* New fw from file is not allowed, if fw on flash is < 4.0.554 */ - crbinit_fix_fw = NETXEN_VERSION_CODE(4, 0, 554); - if (file_fw_ver >= crbinit_fix_fw && flash_fw_ver < crbinit_fix_fw && - NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - dev_err(&pdev->dev, "Incompatibility detected between driver " - "and firmware version on flash. This configuration " - "is not recommended. Please update the firmware on " - "flash immediately\n"); - return -EINVAL; - } - - /* check if flashed firmware is newer only for no-mn and P2 case*/ - if (!netxen_p3_has_mn(adapter) || - NX_IS_REVISION_P2(adapter->ahw.revision_id)) { - if (flash_fw_ver > file_fw_ver) { - dev_info(&pdev->dev, "%s: firmware is older than flash\n", - fw_name[fw_type]); - return -EINVAL; - } - } - - NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC); - return 0; -} - -static void -nx_get_next_fwtype(struct netxen_adapter *adapter) -{ - u8 fw_type; - - switch (adapter->fw_type) { - case NX_UNKNOWN_ROMIMAGE: - fw_type = NX_UNIFIED_ROMIMAGE; - break; - - case NX_UNIFIED_ROMIMAGE: - if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) - fw_type = NX_FLASH_ROMIMAGE; - else if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - fw_type = NX_P2_MN_ROMIMAGE; - else if (netxen_p3_has_mn(adapter)) - fw_type = NX_P3_MN_ROMIMAGE; - else - fw_type = NX_P3_CT_ROMIMAGE; - break; - - case NX_P3_MN_ROMIMAGE: - fw_type = NX_P3_CT_ROMIMAGE; - break; - - case NX_P2_MN_ROMIMAGE: - case NX_P3_CT_ROMIMAGE: - default: - fw_type = NX_FLASH_ROMIMAGE; - break; - } - - adapter->fw_type = fw_type; -} - -static int -netxen_p3_has_mn(struct netxen_adapter *adapter) -{ - u32 capability, flashed_ver; - capability = 0; - - /* NX2031 always had MN */ - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - return 1; - - netxen_rom_fast_read(adapter, - NX_FW_VERSION_OFFSET, (int *)&flashed_ver); - flashed_ver = NETXEN_DECODE_VERSION(flashed_ver); - - if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) { - - capability = NXRD32(adapter, NX_PEG_TUNE_CAPABILITY); - if (capability & NX_PEG_TUNE_MN_PRESENT) - return 1; - } - return 0; -} - -void netxen_request_firmware(struct netxen_adapter *adapter) -{ - struct pci_dev *pdev = adapter->pdev; - int rc = 0; - - adapter->fw_type = NX_UNKNOWN_ROMIMAGE; - -next: - nx_get_next_fwtype(adapter); - - if (adapter->fw_type == NX_FLASH_ROMIMAGE) { - adapter->fw = NULL; - } else { - rc = request_firmware(&adapter->fw, - fw_name[adapter->fw_type], &pdev->dev); - if (rc != 0) - goto next; - - rc = netxen_validate_firmware(adapter); - if (rc != 0) { - release_firmware(adapter->fw); - msleep(1); - goto next; - } - } -} - - -void -netxen_release_firmware(struct netxen_adapter *adapter) -{ - if (adapter->fw) - release_firmware(adapter->fw); - adapter->fw = NULL; -} - -int netxen_init_dummy_dma(struct netxen_adapter *adapter) -{ - u64 addr; - u32 hi, lo; - - if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) - return 0; - - adapter->dummy_dma.addr = pci_alloc_consistent(adapter->pdev, - NETXEN_HOST_DUMMY_DMA_SIZE, - &adapter->dummy_dma.phys_addr); - if (adapter->dummy_dma.addr == NULL) { - dev_err(&adapter->pdev->dev, - "ERROR: Could not allocate dummy DMA memory\n"); - return -ENOMEM; - } - - addr = (uint64_t) adapter->dummy_dma.phys_addr; - hi = (addr >> 32) & 0xffffffff; - lo = addr & 0xffffffff; - - NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI, hi); - NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO, lo); - - return 0; -} - -/* - * NetXen DMA watchdog control: - * - * Bit 0 : enabled => R/O: 1 watchdog active, 0 inactive - * Bit 1 : disable_request => 1 req disable dma watchdog - * Bit 2 : enable_request => 1 req enable dma watchdog - * Bit 3-31 : unused - */ -void netxen_free_dummy_dma(struct netxen_adapter *adapter) -{ - int i = 100; - u32 ctrl; - - if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) - return; - - if (!adapter->dummy_dma.addr) - return; - - ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL); - if ((ctrl & 0x1) != 0) { - NXWR32(adapter, NETXEN_DMA_WATCHDOG_CTRL, (ctrl | 0x2)); - - while ((ctrl & 0x1) != 0) { - - msleep(50); - - ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL); - - if (--i == 0) - break; - } - } - - if (i) { - pci_free_consistent(adapter->pdev, - NETXEN_HOST_DUMMY_DMA_SIZE, - adapter->dummy_dma.addr, - adapter->dummy_dma.phys_addr); - adapter->dummy_dma.addr = NULL; - } else - dev_err(&adapter->pdev->dev, "dma_watchdog_shutdown failed\n"); -} - -int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val) -{ - u32 val = 0; - int retries = 60; - - if (pegtune_val) - return 0; - - do { - val = NXRD32(adapter, CRB_CMDPEG_STATE); - - switch (val) { - case PHAN_INITIALIZE_COMPLETE: - case PHAN_INITIALIZE_ACK: - return 0; - case PHAN_INITIALIZE_FAILED: - goto out_err; - default: - break; - } - - msleep(500); - - } while (--retries); - - NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); - -out_err: - dev_warn(&adapter->pdev->dev, "firmware init failed\n"); - return -EIO; -} - -static int -netxen_receive_peg_ready(struct netxen_adapter *adapter) -{ - u32 val = 0; - int retries = 2000; - - do { - val = NXRD32(adapter, CRB_RCVPEG_STATE); - - if (val == PHAN_PEG_RCV_INITIALIZED) - return 0; - - msleep(10); - - } while (--retries); - - if (!retries) { - printk(KERN_ERR "Receive Peg initialization not " - "complete, state: 0x%x.\n", val); - return -EIO; - } - - return 0; -} - -int netxen_init_firmware(struct netxen_adapter *adapter) -{ - int err; - - err = netxen_receive_peg_ready(adapter); - if (err) - return err; - - NXWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT); - NXWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE); - NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK); - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - NXWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC); - - return err; -} - -static void -netxen_handle_linkevent(struct netxen_adapter *adapter, nx_fw_msg_t *msg) -{ - u32 cable_OUI; - u16 cable_len; - u16 link_speed; - u8 link_status, module, duplex, autoneg; - struct net_device *netdev = adapter->netdev; - - adapter->has_link_events = 1; - - cable_OUI = msg->body[1] & 0xffffffff; - cable_len = (msg->body[1] >> 32) & 0xffff; - link_speed = (msg->body[1] >> 48) & 0xffff; - - link_status = msg->body[2] & 0xff; - duplex = (msg->body[2] >> 16) & 0xff; - autoneg = (msg->body[2] >> 24) & 0xff; - - module = (msg->body[2] >> 8) & 0xff; - if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE) { - printk(KERN_INFO "%s: unsupported cable: OUI 0x%x, length %d\n", - netdev->name, cable_OUI, cable_len); - } else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN) { - printk(KERN_INFO "%s: unsupported cable length %d\n", - netdev->name, cable_len); - } - - netxen_advert_link_change(adapter, link_status); - - /* update link parameters */ - if (duplex == LINKEVENT_FULL_DUPLEX) - adapter->link_duplex = DUPLEX_FULL; - else - adapter->link_duplex = DUPLEX_HALF; - adapter->module_type = module; - adapter->link_autoneg = autoneg; - adapter->link_speed = link_speed; -} - -static void -netxen_handle_fw_message(int desc_cnt, int index, - struct nx_host_sds_ring *sds_ring) -{ - nx_fw_msg_t msg; - struct status_desc *desc; - int i = 0, opcode; - - while (desc_cnt > 0 && i < 8) { - desc = &sds_ring->desc_head[index]; - msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]); - msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]); - - index = get_next_index(index, sds_ring->num_desc); - desc_cnt--; - } - - opcode = netxen_get_nic_msg_opcode(msg.body[0]); - switch (opcode) { - case NX_NIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE: - netxen_handle_linkevent(sds_ring->adapter, &msg); - break; - default: - break; - } -} - -static int -netxen_alloc_rx_skb(struct netxen_adapter *adapter, - struct nx_host_rds_ring *rds_ring, - struct netxen_rx_buffer *buffer) -{ - struct sk_buff *skb; - dma_addr_t dma; - struct pci_dev *pdev = adapter->pdev; - - buffer->skb = dev_alloc_skb(rds_ring->skb_size); - if (!buffer->skb) - return 1; - - skb = buffer->skb; - - if (!adapter->ahw.cut_through) - skb_reserve(skb, 2); - - dma = pci_map_single(pdev, skb->data, - rds_ring->dma_size, PCI_DMA_FROMDEVICE); - - if (pci_dma_mapping_error(pdev, dma)) { - dev_kfree_skb_any(skb); - buffer->skb = NULL; - return 1; - } - - buffer->skb = skb; - buffer->dma = dma; - buffer->state = NETXEN_BUFFER_BUSY; - - return 0; -} - -static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter, - struct nx_host_rds_ring *rds_ring, u16 index, u16 cksum) -{ - struct netxen_rx_buffer *buffer; - struct sk_buff *skb; - - buffer = &rds_ring->rx_buf_arr[index]; - - pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size, - PCI_DMA_FROMDEVICE); - - skb = buffer->skb; - if (!skb) - goto no_skb; - - if (likely((adapter->netdev->features & NETIF_F_RXCSUM) - && cksum == STATUS_CKSUM_OK)) { - adapter->stats.csummed++; - skb->ip_summed = CHECKSUM_UNNECESSARY; - } else - skb->ip_summed = CHECKSUM_NONE; - - skb->dev = adapter->netdev; - - buffer->skb = NULL; -no_skb: - buffer->state = NETXEN_BUFFER_FREE; - return skb; -} - -static struct netxen_rx_buffer * -netxen_process_rcv(struct netxen_adapter *adapter, - struct nx_host_sds_ring *sds_ring, - int ring, u64 sts_data0) -{ - struct net_device *netdev = adapter->netdev; - struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; - struct netxen_rx_buffer *buffer; - struct sk_buff *skb; - struct nx_host_rds_ring *rds_ring; - int index, length, cksum, pkt_offset; - - if (unlikely(ring >= adapter->max_rds_rings)) - return NULL; - - rds_ring = &recv_ctx->rds_rings[ring]; - - index = netxen_get_sts_refhandle(sts_data0); - if (unlikely(index >= rds_ring->num_desc)) - return NULL; - - buffer = &rds_ring->rx_buf_arr[index]; - - length = netxen_get_sts_totallength(sts_data0); - cksum = netxen_get_sts_status(sts_data0); - pkt_offset = netxen_get_sts_pkt_offset(sts_data0); - - skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum); - if (!skb) - return buffer; - - if (length > rds_ring->skb_size) - skb_put(skb, rds_ring->skb_size); - else - skb_put(skb, length); - - - if (pkt_offset) - skb_pull(skb, pkt_offset); - - skb->protocol = eth_type_trans(skb, netdev); - - napi_gro_receive(&sds_ring->napi, skb); - - adapter->stats.rx_pkts++; - adapter->stats.rxbytes += length; - - return buffer; -} - -#define TCP_HDR_SIZE 20 -#define TCP_TS_OPTION_SIZE 12 -#define TCP_TS_HDR_SIZE (TCP_HDR_SIZE + TCP_TS_OPTION_SIZE) - -static struct netxen_rx_buffer * -netxen_process_lro(struct netxen_adapter *adapter, - struct nx_host_sds_ring *sds_ring, - int ring, u64 sts_data0, u64 sts_data1) -{ - struct net_device *netdev = adapter->netdev; - struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; - struct netxen_rx_buffer *buffer; - struct sk_buff *skb; - struct nx_host_rds_ring *rds_ring; - struct iphdr *iph; - struct tcphdr *th; - bool push, timestamp; - int l2_hdr_offset, l4_hdr_offset; - int index; - u16 lro_length, length, data_offset; - u32 seq_number; - u8 vhdr_len; - - if (unlikely(ring > adapter->max_rds_rings)) - return NULL; - - rds_ring = &recv_ctx->rds_rings[ring]; - - index = netxen_get_lro_sts_refhandle(sts_data0); - if (unlikely(index > rds_ring->num_desc)) - return NULL; - - buffer = &rds_ring->rx_buf_arr[index]; - - timestamp = netxen_get_lro_sts_timestamp(sts_data0); - lro_length = netxen_get_lro_sts_length(sts_data0); - l2_hdr_offset = netxen_get_lro_sts_l2_hdr_offset(sts_data0); - l4_hdr_offset = netxen_get_lro_sts_l4_hdr_offset(sts_data0); - push = netxen_get_lro_sts_push_flag(sts_data0); - seq_number = netxen_get_lro_sts_seq_number(sts_data1); - - skb = netxen_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK); - if (!skb) - return buffer; - - if (timestamp) - data_offset = l4_hdr_offset + TCP_TS_HDR_SIZE; - else - data_offset = l4_hdr_offset + TCP_HDR_SIZE; - - skb_put(skb, lro_length + data_offset); - - skb_pull(skb, l2_hdr_offset); - skb->protocol = eth_type_trans(skb, netdev); - - if (skb->protocol == htons(ETH_P_8021Q)) - vhdr_len = VLAN_HLEN; - iph = (struct iphdr *)(skb->data + vhdr_len); - th = (struct tcphdr *)((skb->data + vhdr_len) + (iph->ihl << 2)); - - length = (iph->ihl << 2) + (th->doff << 2) + lro_length; - iph->tot_len = htons(length); - iph->check = 0; - iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); - th->psh = push; - th->seq = htonl(seq_number); - - length = skb->len; - - netif_receive_skb(skb); - - adapter->stats.lro_pkts++; - adapter->stats.rxbytes += length; - - return buffer; -} - -#define netxen_merge_rx_buffers(list, head) \ - do { list_splice_tail_init(list, head); } while (0); - -int -netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max) -{ - struct netxen_adapter *adapter = sds_ring->adapter; - - struct list_head *cur; - - struct status_desc *desc; - struct netxen_rx_buffer *rxbuf; - - u32 consumer = sds_ring->consumer; - - int count = 0; - u64 sts_data0, sts_data1; - int opcode, ring = 0, desc_cnt; - - while (count < max) { - desc = &sds_ring->desc_head[consumer]; - sts_data0 = le64_to_cpu(desc->status_desc_data[0]); - - if (!(sts_data0 & STATUS_OWNER_HOST)) - break; - - desc_cnt = netxen_get_sts_desc_cnt(sts_data0); - - opcode = netxen_get_sts_opcode(sts_data0); - - switch (opcode) { - case NETXEN_NIC_RXPKT_DESC: - case NETXEN_OLD_RXPKT_DESC: - case NETXEN_NIC_SYN_OFFLOAD: - ring = netxen_get_sts_type(sts_data0); - rxbuf = netxen_process_rcv(adapter, sds_ring, - ring, sts_data0); - break; - case NETXEN_NIC_LRO_DESC: - ring = netxen_get_lro_sts_type(sts_data0); - sts_data1 = le64_to_cpu(desc->status_desc_data[1]); - rxbuf = netxen_process_lro(adapter, sds_ring, - ring, sts_data0, sts_data1); - break; - case NETXEN_NIC_RESPONSE_DESC: - netxen_handle_fw_message(desc_cnt, consumer, sds_ring); - default: - goto skip; - } - - WARN_ON(desc_cnt > 1); - - if (rxbuf) - list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]); - -skip: - for (; desc_cnt > 0; desc_cnt--) { - desc = &sds_ring->desc_head[consumer]; - desc->status_desc_data[0] = - cpu_to_le64(STATUS_OWNER_PHANTOM); - consumer = get_next_index(consumer, sds_ring->num_desc); - } - count++; - } - - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - struct nx_host_rds_ring *rds_ring = - &adapter->recv_ctx.rds_rings[ring]; - - if (!list_empty(&sds_ring->free_list[ring])) { - list_for_each(cur, &sds_ring->free_list[ring]) { - rxbuf = list_entry(cur, - struct netxen_rx_buffer, list); - netxen_alloc_rx_skb(adapter, rds_ring, rxbuf); - } - spin_lock(&rds_ring->lock); - netxen_merge_rx_buffers(&sds_ring->free_list[ring], - &rds_ring->free_list); - spin_unlock(&rds_ring->lock); - } - - netxen_post_rx_buffers_nodb(adapter, rds_ring); - } - - if (count) { - sds_ring->consumer = consumer; - NXWRIO(adapter, sds_ring->crb_sts_consumer, consumer); - } - - return count; -} - -/* Process Command status ring */ -int netxen_process_cmd_ring(struct netxen_adapter *adapter) -{ - u32 sw_consumer, hw_consumer; - int count = 0, i; - struct netxen_cmd_buffer *buffer; - struct pci_dev *pdev = adapter->pdev; - struct net_device *netdev = adapter->netdev; - struct netxen_skb_frag *frag; - int done = 0; - struct nx_host_tx_ring *tx_ring = adapter->tx_ring; - - if (!spin_trylock(&adapter->tx_clean_lock)) - return 1; - - sw_consumer = tx_ring->sw_consumer; - hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); - - while (sw_consumer != hw_consumer) { - buffer = &tx_ring->cmd_buf_arr[sw_consumer]; - if (buffer->skb) { - frag = &buffer->frag_array[0]; - pci_unmap_single(pdev, frag->dma, frag->length, - PCI_DMA_TODEVICE); - frag->dma = 0ULL; - for (i = 1; i < buffer->frag_count; i++) { - frag++; /* Get the next frag */ - pci_unmap_page(pdev, frag->dma, frag->length, - PCI_DMA_TODEVICE); - frag->dma = 0ULL; - } - - adapter->stats.xmitfinished++; - dev_kfree_skb_any(buffer->skb); - buffer->skb = NULL; - } - - sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc); - if (++count >= MAX_STATUS_HANDLE) - break; - } - - if (count && netif_running(netdev)) { - tx_ring->sw_consumer = sw_consumer; - - smp_mb(); - - if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) - if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) - netif_wake_queue(netdev); - adapter->tx_timeo_cnt = 0; - } - /* - * If everything is freed up to consumer then check if the ring is full - * If the ring is full then check if more needs to be freed and - * schedule the call back again. - * - * This happens when there are 2 CPUs. One could be freeing and the - * other filling it. If the ring is full when we get out of here and - * the card has already interrupted the host then the host can miss the - * interrupt. - * - * There is still a possible race condition and the host could miss an - * interrupt. The card has to take care of this. - */ - hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); - done = (sw_consumer == hw_consumer); - spin_unlock(&adapter->tx_clean_lock); - - return done; -} - -void -netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid, - struct nx_host_rds_ring *rds_ring) -{ - struct rcv_desc *pdesc; - struct netxen_rx_buffer *buffer; - int producer, count = 0; - netxen_ctx_msg msg = 0; - struct list_head *head; - - producer = rds_ring->producer; - - head = &rds_ring->free_list; - while (!list_empty(head)) { - - buffer = list_entry(head->next, struct netxen_rx_buffer, list); - - if (!buffer->skb) { - if (netxen_alloc_rx_skb(adapter, rds_ring, buffer)) - break; - } - - count++; - list_del(&buffer->list); - - /* make a rcv descriptor */ - pdesc = &rds_ring->desc_head[producer]; - pdesc->addr_buffer = cpu_to_le64(buffer->dma); - pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); - pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); - - producer = get_next_index(producer, rds_ring->num_desc); - } - - if (count) { - rds_ring->producer = producer; - NXWRIO(adapter, rds_ring->crb_rcv_producer, - (producer-1) & (rds_ring->num_desc-1)); - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { - /* - * Write a doorbell msg to tell phanmon of change in - * receive ring producer - * Only for firmware version < 4.0.0 - */ - netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID); - netxen_set_msg_privid(msg); - netxen_set_msg_count(msg, - ((producer - 1) & - (rds_ring->num_desc - 1))); - netxen_set_msg_ctxid(msg, adapter->portnum); - netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid)); - NXWRIO(adapter, DB_NORMALIZE(adapter, - NETXEN_RCV_PRODUCER_OFFSET), msg); - } - } -} - -static void -netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, - struct nx_host_rds_ring *rds_ring) -{ - struct rcv_desc *pdesc; - struct netxen_rx_buffer *buffer; - int producer, count = 0; - struct list_head *head; - - if (!spin_trylock(&rds_ring->lock)) - return; - - producer = rds_ring->producer; - - head = &rds_ring->free_list; - while (!list_empty(head)) { - - buffer = list_entry(head->next, struct netxen_rx_buffer, list); - - if (!buffer->skb) { - if (netxen_alloc_rx_skb(adapter, rds_ring, buffer)) - break; - } - - count++; - list_del(&buffer->list); - - /* make a rcv descriptor */ - pdesc = &rds_ring->desc_head[producer]; - pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); - pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); - pdesc->addr_buffer = cpu_to_le64(buffer->dma); - - producer = get_next_index(producer, rds_ring->num_desc); - } - - if (count) { - rds_ring->producer = producer; - NXWRIO(adapter, rds_ring->crb_rcv_producer, - (producer - 1) & (rds_ring->num_desc - 1)); - } - spin_unlock(&rds_ring->lock); -} - -void netxen_nic_clear_stats(struct netxen_adapter *adapter) -{ - memset(&adapter->stats, 0, sizeof(adapter->stats)); -} - diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c deleted file mode 100644 index 8c7fc32..0000000 --- a/drivers/net/netxen/netxen_nic_main.c +++ /dev/null @@ -1,3100 +0,0 @@ -/* - * Copyright (C) 2003 - 2009 NetXen, Inc. - * Copyright (C) 2009 - QLogic Corporation. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, - * MA 02111-1307, USA. - * - * The full GNU General Public License is included in this distribution - * in the file called "COPYING". - * - */ - -#include -#include -#include -#include "netxen_nic_hw.h" - -#include "netxen_nic.h" - -#include -#include -#include -#include -#include -#include -#include - -MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Intelligent Ethernet Driver"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID); -MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME); - -char netxen_nic_driver_name[] = "netxen_nic"; -static char netxen_nic_driver_string[] = "QLogic/NetXen Network Driver v" - NETXEN_NIC_LINUX_VERSIONID; - -static int port_mode = NETXEN_PORT_MODE_AUTO_NEG; - -/* Default to restricted 1G auto-neg mode */ -static int wol_port_mode = 5; - -static int use_msi = 1; - -static int use_msi_x = 1; - -static int auto_fw_reset = AUTO_FW_RESET_ENABLED; -module_param(auto_fw_reset, int, 0644); -MODULE_PARM_DESC(auto_fw_reset,"Auto firmware reset (0=disabled, 1=enabled"); - -static int __devinit netxen_nic_probe(struct pci_dev *pdev, - const struct pci_device_id *ent); -static void __devexit netxen_nic_remove(struct pci_dev *pdev); -static int netxen_nic_open(struct net_device *netdev); -static int netxen_nic_close(struct net_device *netdev); -static netdev_tx_t netxen_nic_xmit_frame(struct sk_buff *, - struct net_device *); -static void netxen_tx_timeout(struct net_device *netdev); -static void netxen_tx_timeout_task(struct work_struct *work); -static void netxen_fw_poll_work(struct work_struct *work); -static void netxen_schedule_work(struct netxen_adapter *adapter, - work_func_t func, int delay); -static void netxen_cancel_fw_work(struct netxen_adapter *adapter); -static int netxen_nic_poll(struct napi_struct *napi, int budget); -#ifdef CONFIG_NET_POLL_CONTROLLER -static void netxen_nic_poll_controller(struct net_device *netdev); -#endif - -static void netxen_create_sysfs_entries(struct netxen_adapter *adapter); -static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter); -static void netxen_create_diag_entries(struct netxen_adapter *adapter); -static void netxen_remove_diag_entries(struct netxen_adapter *adapter); - -static int nx_dev_request_aer(struct netxen_adapter *adapter); -static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter); -static int netxen_can_start_firmware(struct netxen_adapter *adapter); - -static irqreturn_t netxen_intr(int irq, void *data); -static irqreturn_t netxen_msi_intr(int irq, void *data); -static irqreturn_t netxen_msix_intr(int irq, void *data); - -static void netxen_free_vlan_ip_list(struct netxen_adapter *); -static void netxen_restore_indev_addr(struct net_device *dev, unsigned long); -static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev, - struct rtnl_link_stats64 *stats); -static int netxen_nic_set_mac(struct net_device *netdev, void *p); - -/* PCI Device ID Table */ -#define ENTRY(device) \ - {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \ - .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0} - -static DEFINE_PCI_DEVICE_TABLE(netxen_pci_tbl) = { - ENTRY(PCI_DEVICE_ID_NX2031_10GXSR), - ENTRY(PCI_DEVICE_ID_NX2031_10GCX4), - ENTRY(PCI_DEVICE_ID_NX2031_4GCU), - ENTRY(PCI_DEVICE_ID_NX2031_IMEZ), - ENTRY(PCI_DEVICE_ID_NX2031_HMEZ), - ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT), - ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2), - ENTRY(PCI_DEVICE_ID_NX3031), - {0,} -}; - -MODULE_DEVICE_TABLE(pci, netxen_pci_tbl); - -static uint32_t crb_cmd_producer[4] = { - CRB_CMD_PRODUCER_OFFSET, CRB_CMD_PRODUCER_OFFSET_1, - CRB_CMD_PRODUCER_OFFSET_2, CRB_CMD_PRODUCER_OFFSET_3 -}; - -void -netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, - struct nx_host_tx_ring *tx_ring) -{ - NXWRIO(adapter, tx_ring->crb_cmd_producer, tx_ring->producer); -} - -static uint32_t crb_cmd_consumer[4] = { - CRB_CMD_CONSUMER_OFFSET, CRB_CMD_CONSUMER_OFFSET_1, - CRB_CMD_CONSUMER_OFFSET_2, CRB_CMD_CONSUMER_OFFSET_3 -}; - -static inline void -netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter, - struct nx_host_tx_ring *tx_ring) -{ - NXWRIO(adapter, tx_ring->crb_cmd_consumer, tx_ring->sw_consumer); -} - -static uint32_t msi_tgt_status[8] = { - ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1, - ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3, - ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5, - ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7 -}; - -static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG; - -static inline void netxen_nic_disable_int(struct nx_host_sds_ring *sds_ring) -{ - struct netxen_adapter *adapter = sds_ring->adapter; - - NXWRIO(adapter, sds_ring->crb_intr_mask, 0); -} - -static inline void netxen_nic_enable_int(struct nx_host_sds_ring *sds_ring) -{ - struct netxen_adapter *adapter = sds_ring->adapter; - - NXWRIO(adapter, sds_ring->crb_intr_mask, 0x1); - - if (!NETXEN_IS_MSI_FAMILY(adapter)) - NXWRIO(adapter, adapter->tgt_mask_reg, 0xfbff); -} - -static int -netxen_alloc_sds_rings(struct netxen_recv_context *recv_ctx, int count) -{ - int size = sizeof(struct nx_host_sds_ring) * count; - - recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL); - - return recv_ctx->sds_rings == NULL; -} - -static void -netxen_free_sds_rings(struct netxen_recv_context *recv_ctx) -{ - if (recv_ctx->sds_rings != NULL) - kfree(recv_ctx->sds_rings); - - recv_ctx->sds_rings = NULL; -} - -static int -netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev) -{ - int ring; - struct nx_host_sds_ring *sds_ring; - struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; - - if (netxen_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) - return -ENOMEM; - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - netif_napi_add(netdev, &sds_ring->napi, - netxen_nic_poll, NETXEN_NETDEV_WEIGHT); - } - - return 0; -} - -static void -netxen_napi_del(struct netxen_adapter *adapter) -{ - int ring; - struct nx_host_sds_ring *sds_ring; - struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - netif_napi_del(&sds_ring->napi); - } - - netxen_free_sds_rings(&adapter->recv_ctx); -} - -static void -netxen_napi_enable(struct netxen_adapter *adapter) -{ - int ring; - struct nx_host_sds_ring *sds_ring; - struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - napi_enable(&sds_ring->napi); - netxen_nic_enable_int(sds_ring); - } -} - -static void -netxen_napi_disable(struct netxen_adapter *adapter) -{ - int ring; - struct nx_host_sds_ring *sds_ring; - struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - netxen_nic_disable_int(sds_ring); - napi_synchronize(&sds_ring->napi); - napi_disable(&sds_ring->napi); - } -} - -static int nx_set_dma_mask(struct netxen_adapter *adapter) -{ - struct pci_dev *pdev = adapter->pdev; - uint64_t mask, cmask; - - adapter->pci_using_dac = 0; - - mask = DMA_BIT_MASK(32); - cmask = DMA_BIT_MASK(32); - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { -#ifndef CONFIG_IA64 - mask = DMA_BIT_MASK(35); -#endif - } else { - mask = DMA_BIT_MASK(39); - cmask = mask; - } - - if (pci_set_dma_mask(pdev, mask) == 0 && - pci_set_consistent_dma_mask(pdev, cmask) == 0) { - adapter->pci_using_dac = 1; - return 0; - } - - return -EIO; -} - -/* Update addressable range if firmware supports it */ -static int -nx_update_dma_mask(struct netxen_adapter *adapter) -{ - int change, shift, err; - uint64_t mask, old_mask, old_cmask; - struct pci_dev *pdev = adapter->pdev; - - change = 0; - - shift = NXRD32(adapter, CRB_DMA_SHIFT); - if (shift > 32) - return 0; - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && (shift > 9)) - change = 1; - else if ((adapter->ahw.revision_id == NX_P2_C1) && (shift <= 4)) - change = 1; - - if (change) { - old_mask = pdev->dma_mask; - old_cmask = pdev->dev.coherent_dma_mask; - - mask = DMA_BIT_MASK(32+shift); - - err = pci_set_dma_mask(pdev, mask); - if (err) - goto err_out; - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - - err = pci_set_consistent_dma_mask(pdev, mask); - if (err) - goto err_out; - } - dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift); - } - - return 0; - -err_out: - pci_set_dma_mask(pdev, old_mask); - pci_set_consistent_dma_mask(pdev, old_cmask); - return err; -} - -static int -netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot) -{ - u32 val, timeout; - - if (first_boot == 0x55555555) { - /* This is the first boot after power up */ - NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC); - - if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) - return 0; - - /* PCI bus master workaround */ - first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4)); - if (!(first_boot & 0x4)) { - first_boot |= 0x4; - NXWR32(adapter, NETXEN_PCIE_REG(0x4), first_boot); - NXRD32(adapter, NETXEN_PCIE_REG(0x4)); - } - - /* This is the first boot after power up */ - first_boot = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET); - if (first_boot != 0x80000f) { - /* clear the register for future unloads/loads */ - NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), 0); - return -EIO; - } - - /* Start P2 boot loader */ - val = NXRD32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE); - NXWR32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE, val | 0x1); - timeout = 0; - do { - msleep(1); - val = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc)); - - if (++timeout > 5000) - return -EIO; - - } while (val == NETXEN_BDINFO_MAGIC); - } - return 0; -} - -static void netxen_set_port_mode(struct netxen_adapter *adapter) -{ - u32 val, data; - - val = adapter->ahw.board_type; - if ((val == NETXEN_BRDTYPE_P3_HMEZ) || - (val == NETXEN_BRDTYPE_P3_XG_LOM)) { - if (port_mode == NETXEN_PORT_MODE_802_3_AP) { - data = NETXEN_PORT_MODE_802_3_AP; - NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); - } else if (port_mode == NETXEN_PORT_MODE_XG) { - data = NETXEN_PORT_MODE_XG; - NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); - } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_1G) { - data = NETXEN_PORT_MODE_AUTO_NEG_1G; - NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); - } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_XG) { - data = NETXEN_PORT_MODE_AUTO_NEG_XG; - NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); - } else { - data = NETXEN_PORT_MODE_AUTO_NEG; - NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); - } - - if ((wol_port_mode != NETXEN_PORT_MODE_802_3_AP) && - (wol_port_mode != NETXEN_PORT_MODE_XG) && - (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_1G) && - (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_XG)) { - wol_port_mode = NETXEN_PORT_MODE_AUTO_NEG; - } - NXWR32(adapter, NETXEN_WOL_PORT_MODE, wol_port_mode); - } -} - -static void netxen_set_msix_bit(struct pci_dev *pdev, int enable) -{ - u32 control; - int pos; - - pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); - if (pos) { - pci_read_config_dword(pdev, pos, &control); - if (enable) - control |= PCI_MSIX_FLAGS_ENABLE; - else - control = 0; - pci_write_config_dword(pdev, pos, control); - } -} - -static void netxen_init_msix_entries(struct netxen_adapter *adapter, int count) -{ - int i; - - for (i = 0; i < count; i++) - adapter->msix_entries[i].entry = i; -} - -static int -netxen_read_mac_addr(struct netxen_adapter *adapter) -{ - int i; - unsigned char *p; - u64 mac_addr; - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0) - return -EIO; - } else { - if (netxen_get_flash_mac_addr(adapter, &mac_addr) != 0) - return -EIO; - } - - p = (unsigned char *)&mac_addr; - for (i = 0; i < 6; i++) - netdev->dev_addr[i] = *(p + 5 - i); - - memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); - memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len); - - /* set station address */ - - if (!is_valid_ether_addr(netdev->perm_addr)) - dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr); - - return 0; -} - -static int netxen_nic_set_mac(struct net_device *netdev, void *p) -{ - struct netxen_adapter *adapter = netdev_priv(netdev); - struct sockaddr *addr = p; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EINVAL; - - if (netif_running(netdev)) { - netif_device_detach(netdev); - netxen_napi_disable(adapter); - } - - memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len); - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - adapter->macaddr_set(adapter, addr->sa_data); - - if (netif_running(netdev)) { - netif_device_attach(netdev); - netxen_napi_enable(adapter); - } - return 0; -} - -static void netxen_set_multicast_list(struct net_device *dev) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - - adapter->set_multi(dev); -} - -static u32 netxen_fix_features(struct net_device *dev, u32 features) -{ - if (!(features & NETIF_F_RXCSUM)) { - netdev_info(dev, "disabling LRO as RXCSUM is off\n"); - - features &= ~NETIF_F_LRO; - } - - return features; -} - -static int netxen_set_features(struct net_device *dev, u32 features) -{ - struct netxen_adapter *adapter = netdev_priv(dev); - int hw_lro; - - if (!((dev->features ^ features) & NETIF_F_LRO)) - return 0; - - hw_lro = (features & NETIF_F_LRO) ? NETXEN_NIC_LRO_ENABLED - : NETXEN_NIC_LRO_DISABLED; - - if (netxen_config_hw_lro(adapter, hw_lro)) - return -EIO; - - if (!(features & NETIF_F_LRO) && netxen_send_lro_cleanup(adapter)) - return -EIO; - - return 0; -} - -static const struct net_device_ops netxen_netdev_ops = { - .ndo_open = netxen_nic_open, - .ndo_stop = netxen_nic_close, - .ndo_start_xmit = netxen_nic_xmit_frame, - .ndo_get_stats64 = netxen_nic_get_stats, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_multicast_list = netxen_set_multicast_list, - .ndo_set_mac_address = netxen_nic_set_mac, - .ndo_change_mtu = netxen_nic_change_mtu, - .ndo_tx_timeout = netxen_tx_timeout, - .ndo_fix_features = netxen_fix_features, - .ndo_set_features = netxen_set_features, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = netxen_nic_poll_controller, -#endif -}; - -static void -netxen_setup_intr(struct netxen_adapter *adapter) -{ - struct netxen_legacy_intr_set *legacy_intrp; - struct pci_dev *pdev = adapter->pdev; - int err, num_msix; - - if (adapter->rss_supported) { - num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ? - MSIX_ENTRIES_PER_ADAPTER : 2; - } else - num_msix = 1; - - adapter->max_sds_rings = 1; - - adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED); - - if (adapter->ahw.revision_id >= NX_P3_B0) - legacy_intrp = &legacy_intr[adapter->ahw.pci_func]; - else - legacy_intrp = &legacy_intr[0]; - - adapter->int_vec_bit = legacy_intrp->int_vec_bit; - adapter->tgt_status_reg = netxen_get_ioaddr(adapter, - legacy_intrp->tgt_status_reg); - adapter->tgt_mask_reg = netxen_get_ioaddr(adapter, - legacy_intrp->tgt_mask_reg); - adapter->pci_int_reg = netxen_get_ioaddr(adapter, - legacy_intrp->pci_int_reg); - adapter->isr_int_vec = netxen_get_ioaddr(adapter, ISR_INT_VECTOR); - - if (adapter->ahw.revision_id >= NX_P3_B1) - adapter->crb_int_state_reg = netxen_get_ioaddr(adapter, - ISR_INT_STATE_REG); - else - adapter->crb_int_state_reg = netxen_get_ioaddr(adapter, - CRB_INT_VECTOR); - - netxen_set_msix_bit(pdev, 0); - - if (adapter->msix_supported) { - - netxen_init_msix_entries(adapter, num_msix); - err = pci_enable_msix(pdev, adapter->msix_entries, num_msix); - if (err == 0) { - adapter->flags |= NETXEN_NIC_MSIX_ENABLED; - netxen_set_msix_bit(pdev, 1); - - if (adapter->rss_supported) - adapter->max_sds_rings = num_msix; - - dev_info(&pdev->dev, "using msi-x interrupts\n"); - return; - } - - if (err > 0) - pci_disable_msix(pdev); - - /* fall through for msi */ - } - - if (use_msi && !pci_enable_msi(pdev)) { - adapter->flags |= NETXEN_NIC_MSI_ENABLED; - adapter->tgt_status_reg = netxen_get_ioaddr(adapter, - msi_tgt_status[adapter->ahw.pci_func]); - dev_info(&pdev->dev, "using msi interrupts\n"); - adapter->msix_entries[0].vector = pdev->irq; - return; - } - - dev_info(&pdev->dev, "using legacy interrupts\n"); - adapter->msix_entries[0].vector = pdev->irq; -} - -static void -netxen_teardown_intr(struct netxen_adapter *adapter) -{ - if (adapter->flags & NETXEN_NIC_MSIX_ENABLED) - pci_disable_msix(adapter->pdev); - if (adapter->flags & NETXEN_NIC_MSI_ENABLED) - pci_disable_msi(adapter->pdev); -} - -static void -netxen_cleanup_pci_map(struct netxen_adapter *adapter) -{ - if (adapter->ahw.db_base != NULL) - iounmap(adapter->ahw.db_base); - if (adapter->ahw.pci_base0 != NULL) - iounmap(adapter->ahw.pci_base0); - if (adapter->ahw.pci_base1 != NULL) - iounmap(adapter->ahw.pci_base1); - if (adapter->ahw.pci_base2 != NULL) - iounmap(adapter->ahw.pci_base2); -} - -static int -netxen_setup_pci_map(struct netxen_adapter *adapter) -{ - void __iomem *db_ptr = NULL; - - resource_size_t mem_base, db_base; - unsigned long mem_len, db_len = 0; - - struct pci_dev *pdev = adapter->pdev; - int pci_func = adapter->ahw.pci_func; - struct netxen_hardware_context *ahw = &adapter->ahw; - - int err = 0; - - /* - * Set the CRB window to invalid. If any register in window 0 is - * accessed it should set the window to 0 and then reset it to 1. - */ - adapter->ahw.crb_win = -1; - adapter->ahw.ocm_win = -1; - - /* remap phys address */ - mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ - mem_len = pci_resource_len(pdev, 0); - - /* 128 Meg of memory */ - if (mem_len == NETXEN_PCI_128MB_SIZE) { - - ahw->pci_base0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE); - ahw->pci_base1 = ioremap(mem_base + SECOND_PAGE_GROUP_START, - SECOND_PAGE_GROUP_SIZE); - ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START, - THIRD_PAGE_GROUP_SIZE); - if (ahw->pci_base0 == NULL || ahw->pci_base1 == NULL || - ahw->pci_base2 == NULL) { - dev_err(&pdev->dev, "failed to map PCI bar 0\n"); - err = -EIO; - goto err_out; - } - - ahw->pci_len0 = FIRST_PAGE_GROUP_SIZE; - - } else if (mem_len == NETXEN_PCI_32MB_SIZE) { - - ahw->pci_base1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE); - ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START - - SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE); - if (ahw->pci_base1 == NULL || ahw->pci_base2 == NULL) { - dev_err(&pdev->dev, "failed to map PCI bar 0\n"); - err = -EIO; - goto err_out; - } - - } else if (mem_len == NETXEN_PCI_2MB_SIZE) { - - ahw->pci_base0 = pci_ioremap_bar(pdev, 0); - if (ahw->pci_base0 == NULL) { - dev_err(&pdev->dev, "failed to map PCI bar 0\n"); - return -EIO; - } - ahw->pci_len0 = mem_len; - } else { - return -EIO; - } - - netxen_setup_hwops(adapter); - - dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20)); - - if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) { - adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter, - NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func))); - - } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter, - NETXEN_PCIX_PS_REG(PCIE_MN_WINDOW_REG(pci_func))); - } - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) - goto skip_doorbell; - - db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */ - db_len = pci_resource_len(pdev, 4); - - if (db_len == 0) { - printk(KERN_ERR "%s: doorbell is disabled\n", - netxen_nic_driver_name); - err = -EIO; - goto err_out; - } - - db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES); - if (!db_ptr) { - printk(KERN_ERR "%s: Failed to allocate doorbell map.", - netxen_nic_driver_name); - err = -EIO; - goto err_out; - } - -skip_doorbell: - adapter->ahw.db_base = db_ptr; - adapter->ahw.db_len = db_len; - return 0; - -err_out: - netxen_cleanup_pci_map(adapter); - return err; -} - -static void -netxen_check_options(struct netxen_adapter *adapter) -{ - u32 fw_major, fw_minor, fw_build; - char brd_name[NETXEN_MAX_SHORT_NAME]; - char serial_num[32]; - int i, offset, val; - int *ptr32; - struct pci_dev *pdev = adapter->pdev; - - adapter->driver_mismatch = 0; - - ptr32 = (int *)&serial_num; - offset = NX_FW_SERIAL_NUM_OFFSET; - for (i = 0; i < 8; i++) { - if (netxen_rom_fast_read(adapter, offset, &val) == -1) { - dev_err(&pdev->dev, "error reading board info\n"); - adapter->driver_mismatch = 1; - return; - } - ptr32[i] = cpu_to_le32(val); - offset += sizeof(u32); - } - - fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); - fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR); - fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB); - - adapter->fw_version = NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build); - - if (adapter->portnum == 0) { - get_brd_name_by_type(adapter->ahw.board_type, brd_name); - - pr_info("%s: %s Board S/N %s Chip rev 0x%x\n", - module_name(THIS_MODULE), - brd_name, serial_num, adapter->ahw.revision_id); - } - - if (adapter->fw_version < NETXEN_VERSION_CODE(3, 4, 216)) { - adapter->driver_mismatch = 1; - dev_warn(&pdev->dev, "firmware version %d.%d.%d unsupported\n", - fw_major, fw_minor, fw_build); - return; - } - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - i = NXRD32(adapter, NETXEN_SRE_MISC); - adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0; - } - - dev_info(&pdev->dev, "firmware v%d.%d.%d [%s]\n", - fw_major, fw_minor, fw_build, - adapter->ahw.cut_through ? "cut-through" : "legacy"); - - if (adapter->fw_version >= NETXEN_VERSION_CODE(4, 0, 222)) - adapter->capabilities = NXRD32(adapter, CRB_FW_CAPABILITIES_1); - - if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { - adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G; - adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; - } else if (adapter->ahw.port_type == NETXEN_NIC_GBE) { - adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G; - adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; - } - - adapter->msix_supported = 0; - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - adapter->msix_supported = !!use_msi_x; - adapter->rss_supported = !!use_msi_x; - } else { - u32 flashed_ver = 0; - netxen_rom_fast_read(adapter, - NX_FW_VERSION_OFFSET, (int *)&flashed_ver); - flashed_ver = NETXEN_DECODE_VERSION(flashed_ver); - - if (flashed_ver >= NETXEN_VERSION_CODE(3, 4, 336)) { - switch (adapter->ahw.board_type) { - case NETXEN_BRDTYPE_P2_SB31_10G: - case NETXEN_BRDTYPE_P2_SB31_10G_CX4: - adapter->msix_supported = !!use_msi_x; - adapter->rss_supported = !!use_msi_x; - break; - default: - break; - } - } - } - - adapter->num_txd = MAX_CMD_DESCRIPTORS; - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { - adapter->num_lro_rxd = MAX_LRO_RCV_DESCRIPTORS; - adapter->max_rds_rings = 3; - } else { - adapter->num_lro_rxd = 0; - adapter->max_rds_rings = 2; - } -} - -static int -netxen_start_firmware(struct netxen_adapter *adapter) -{ - int val, err, first_boot; - struct pci_dev *pdev = adapter->pdev; - - /* required for NX2031 dummy dma */ - err = nx_set_dma_mask(adapter); - if (err) - return err; - - if (!netxen_can_start_firmware(adapter)) - goto wait_init; - - first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc)); - - err = netxen_check_hw_init(adapter, first_boot); - if (err) { - dev_err(&pdev->dev, "error in init HW init sequence\n"); - return err; - } - - netxen_request_firmware(adapter); - - err = netxen_need_fw_reset(adapter); - if (err < 0) - goto err_out; - if (err == 0) - goto wait_init; - - if (first_boot != 0x55555555) { - NXWR32(adapter, CRB_CMDPEG_STATE, 0); - netxen_pinit_from_rom(adapter); - msleep(1); - } - - NXWR32(adapter, CRB_DMA_SHIFT, 0x55555555); - NXWR32(adapter, NETXEN_PEG_HALT_STATUS1, 0); - NXWR32(adapter, NETXEN_PEG_HALT_STATUS2, 0); - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) - netxen_set_port_mode(adapter); - - err = netxen_load_firmware(adapter); - if (err) - goto err_out; - - netxen_release_firmware(adapter); - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { - - /* Initialize multicast addr pool owners */ - val = 0x7654; - if (adapter->ahw.port_type == NETXEN_NIC_XGBE) - val |= 0x0f000000; - NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val); - - } - - err = netxen_init_dummy_dma(adapter); - if (err) - goto err_out; - - /* - * Tell the hardware our version number. - */ - val = (_NETXEN_NIC_LINUX_MAJOR << 16) - | ((_NETXEN_NIC_LINUX_MINOR << 8)) - | (_NETXEN_NIC_LINUX_SUBVERSION); - NXWR32(adapter, CRB_DRIVER_VERSION, val); - -wait_init: - /* Handshake with the card before we register the devices. */ - err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); - if (err) { - netxen_free_dummy_dma(adapter); - goto err_out; - } - - NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_READY); - - nx_update_dma_mask(adapter); - - netxen_check_options(adapter); - - adapter->need_fw_reset = 0; - - /* fall through and release firmware */ - -err_out: - netxen_release_firmware(adapter); - return err; -} - -static int -netxen_nic_request_irq(struct netxen_adapter *adapter) -{ - irq_handler_t handler; - struct nx_host_sds_ring *sds_ring; - int err, ring; - - unsigned long flags = 0; - struct net_device *netdev = adapter->netdev; - struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; - - if (adapter->flags & NETXEN_NIC_MSIX_ENABLED) - handler = netxen_msix_intr; - else if (adapter->flags & NETXEN_NIC_MSI_ENABLED) - handler = netxen_msi_intr; - else { - flags |= IRQF_SHARED; - handler = netxen_intr; - } - adapter->irq = netdev->irq; - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - sprintf(sds_ring->name, "%s[%d]", netdev->name, ring); - err = request_irq(sds_ring->irq, handler, - flags, sds_ring->name, sds_ring); - if (err) - return err; - } - - return 0; -} - -static void -netxen_nic_free_irq(struct netxen_adapter *adapter) -{ - int ring; - struct nx_host_sds_ring *sds_ring; - - struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - free_irq(sds_ring->irq, sds_ring); - } -} - -static void -netxen_nic_init_coalesce_defaults(struct netxen_adapter *adapter) -{ - adapter->coal.flags = NETXEN_NIC_INTR_DEFAULT; - adapter->coal.normal.data.rx_time_us = - NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US; - adapter->coal.normal.data.rx_packets = - NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS; - adapter->coal.normal.data.tx_time_us = - NETXEN_DEFAULT_INTR_COALESCE_TX_TIME_US; - adapter->coal.normal.data.tx_packets = - NETXEN_DEFAULT_INTR_COALESCE_TX_PACKETS; -} - -/* with rtnl_lock */ -static int -__netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev) -{ - int err; - - if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) - return -EIO; - - err = adapter->init_port(adapter, adapter->physical_port); - if (err) { - printk(KERN_ERR "%s: Failed to initialize port %d\n", - netxen_nic_driver_name, adapter->portnum); - return err; - } - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - adapter->macaddr_set(adapter, adapter->mac_addr); - - adapter->set_multi(netdev); - adapter->set_mtu(adapter, netdev->mtu); - - adapter->ahw.linkup = 0; - - if (adapter->max_sds_rings > 1) - netxen_config_rss(adapter, 1); - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) - netxen_config_intr_coalesce(adapter); - - if (netdev->features & NETIF_F_LRO) - netxen_config_hw_lro(adapter, NETXEN_NIC_LRO_ENABLED); - - netxen_napi_enable(adapter); - - if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION) - netxen_linkevent_request(adapter, 1); - else - netxen_nic_set_link_parameters(adapter); - - set_bit(__NX_DEV_UP, &adapter->state); - return 0; -} - -/* Usage: During resume and firmware recovery module.*/ - -static inline int -netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev) -{ - int err = 0; - - rtnl_lock(); - if (netif_running(netdev)) - err = __netxen_nic_up(adapter, netdev); - rtnl_unlock(); - - return err; -} - -/* with rtnl_lock */ -static void -__netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) -{ - if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) - return; - - if (!test_and_clear_bit(__NX_DEV_UP, &adapter->state)) - return; - - smp_mb(); - spin_lock(&adapter->tx_clean_lock); - netif_carrier_off(netdev); - netif_tx_disable(netdev); - - if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION) - netxen_linkevent_request(adapter, 0); - - if (adapter->stop_port) - adapter->stop_port(adapter); - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) - netxen_p3_free_mac_list(adapter); - - adapter->set_promisc(adapter, NETXEN_NIU_NON_PROMISC_MODE); - - netxen_napi_disable(adapter); - - netxen_release_tx_buffers(adapter); - spin_unlock(&adapter->tx_clean_lock); -} - -/* Usage: During suspend and firmware recovery module */ - -static inline void -netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) -{ - rtnl_lock(); - if (netif_running(netdev)) - __netxen_nic_down(adapter, netdev); - rtnl_unlock(); - -} - -static int -netxen_nic_attach(struct netxen_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - int err, ring; - struct nx_host_rds_ring *rds_ring; - struct nx_host_tx_ring *tx_ring; - - if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) - return 0; - - err = netxen_init_firmware(adapter); - if (err) - return err; - - err = netxen_napi_add(adapter, netdev); - if (err) - return err; - - err = netxen_alloc_sw_resources(adapter); - if (err) { - printk(KERN_ERR "%s: Error in setting sw resources\n", - netdev->name); - return err; - } - - err = netxen_alloc_hw_resources(adapter); - if (err) { - printk(KERN_ERR "%s: Error in setting hw resources\n", - netdev->name); - goto err_out_free_sw; - } - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { - tx_ring = adapter->tx_ring; - tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter, - crb_cmd_producer[adapter->portnum]); - tx_ring->crb_cmd_consumer = netxen_get_ioaddr(adapter, - crb_cmd_consumer[adapter->portnum]); - - tx_ring->producer = 0; - tx_ring->sw_consumer = 0; - - netxen_nic_update_cmd_producer(adapter, tx_ring); - netxen_nic_update_cmd_consumer(adapter, tx_ring); - } - - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - rds_ring = &adapter->recv_ctx.rds_rings[ring]; - netxen_post_rx_buffers(adapter, ring, rds_ring); - } - - err = netxen_nic_request_irq(adapter); - if (err) { - dev_err(&pdev->dev, "%s: failed to setup interrupt\n", - netdev->name); - goto err_out_free_rxbuf; - } - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) - netxen_nic_init_coalesce_defaults(adapter); - - netxen_create_sysfs_entries(adapter); - - adapter->is_up = NETXEN_ADAPTER_UP_MAGIC; - return 0; - -err_out_free_rxbuf: - netxen_release_rx_buffers(adapter); - netxen_free_hw_resources(adapter); -err_out_free_sw: - netxen_free_sw_resources(adapter); - return err; -} - -static void -netxen_nic_detach(struct netxen_adapter *adapter) -{ - if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) - return; - - netxen_remove_sysfs_entries(adapter); - - netxen_free_hw_resources(adapter); - netxen_release_rx_buffers(adapter); - netxen_nic_free_irq(adapter); - netxen_napi_del(adapter); - netxen_free_sw_resources(adapter); - - adapter->is_up = 0; -} - -int -netxen_nic_reset_context(struct netxen_adapter *adapter) -{ - int err = 0; - struct net_device *netdev = adapter->netdev; - - if (test_and_set_bit(__NX_RESETTING, &adapter->state)) - return -EBUSY; - - if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) { - - netif_device_detach(netdev); - - if (netif_running(netdev)) - __netxen_nic_down(adapter, netdev); - - netxen_nic_detach(adapter); - - if (netif_running(netdev)) { - err = netxen_nic_attach(adapter); - if (!err) - err = __netxen_nic_up(adapter, netdev); - - if (err) - goto done; - } - - netif_device_attach(netdev); - } - -done: - clear_bit(__NX_RESETTING, &adapter->state); - return err; -} - -static int -netxen_setup_netdev(struct netxen_adapter *adapter, - struct net_device *netdev) -{ - int err = 0; - struct pci_dev *pdev = adapter->pdev; - - adapter->mc_enabled = 0; - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) - adapter->max_mc_count = 38; - else - adapter->max_mc_count = 16; - - netdev->netdev_ops = &netxen_netdev_ops; - netdev->watchdog_timeo = 5*HZ; - - netxen_nic_change_mtu(netdev, netdev->mtu); - - SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops); - - netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | - NETIF_F_RXCSUM; - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) - netdev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; - - netdev->vlan_features |= netdev->hw_features; - - if (adapter->pci_using_dac) { - netdev->features |= NETIF_F_HIGHDMA; - netdev->vlan_features |= NETIF_F_HIGHDMA; - } - - if (adapter->capabilities & NX_FW_CAPABILITY_FVLANTX) - netdev->hw_features |= NETIF_F_HW_VLAN_TX; - - if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO) - netdev->hw_features |= NETIF_F_LRO; - - netdev->features |= netdev->hw_features; - - netdev->irq = adapter->msix_entries[0].vector; - - INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task); - - if (netxen_read_mac_addr(adapter)) - dev_warn(&pdev->dev, "failed to read mac addr\n"); - - netif_carrier_off(netdev); - - err = register_netdev(netdev); - if (err) { - dev_err(&pdev->dev, "failed to register net device\n"); - return err; - } - - return 0; -} - -#ifdef CONFIG_PCIEAER -static void netxen_mask_aer_correctable(struct netxen_adapter *adapter) -{ - struct pci_dev *pdev = adapter->pdev; - struct pci_dev *root = pdev->bus->self; - u32 aer_pos; - - if (adapter->ahw.board_type != NETXEN_BRDTYPE_P3_4_GB_MM && - adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP) - return; - - if (root->pcie_type != PCI_EXP_TYPE_ROOT_PORT) - return; - - aer_pos = pci_find_ext_capability(root, PCI_EXT_CAP_ID_ERR); - if (!aer_pos) - return; - - pci_write_config_dword(root, aer_pos + PCI_ERR_COR_MASK, 0xffff); -} -#endif - -static int __devinit -netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - struct net_device *netdev = NULL; - struct netxen_adapter *adapter = NULL; - int i = 0, err; - int pci_func_id = PCI_FUNC(pdev->devfn); - uint8_t revision_id; - u32 val; - - if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) { - pr_warning("%s: chip revisions between 0x%x-0x%x " - "will not be enabled.\n", - module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1); - return -ENODEV; - } - - if ((err = pci_enable_device(pdev))) - return err; - - if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { - err = -ENODEV; - goto err_out_disable_pdev; - } - - if ((err = pci_request_regions(pdev, netxen_nic_driver_name))) - goto err_out_disable_pdev; - - if (NX_IS_REVISION_P3(pdev->revision)) - pci_enable_pcie_error_reporting(pdev); - - pci_set_master(pdev); - - netdev = alloc_etherdev(sizeof(struct netxen_adapter)); - if(!netdev) { - dev_err(&pdev->dev, "failed to allocate net_device\n"); - err = -ENOMEM; - goto err_out_free_res; - } - - SET_NETDEV_DEV(netdev, &pdev->dev); - - adapter = netdev_priv(netdev); - adapter->netdev = netdev; - adapter->pdev = pdev; - adapter->ahw.pci_func = pci_func_id; - - revision_id = pdev->revision; - adapter->ahw.revision_id = revision_id; - - rwlock_init(&adapter->ahw.crb_lock); - spin_lock_init(&adapter->ahw.mem_lock); - - spin_lock_init(&adapter->tx_clean_lock); - INIT_LIST_HEAD(&adapter->mac_list); - INIT_LIST_HEAD(&adapter->vlan_ip_list); - - err = netxen_setup_pci_map(adapter); - if (err) - goto err_out_free_netdev; - - /* This will be reset for mezz cards */ - adapter->portnum = pci_func_id; - - err = netxen_nic_get_board_info(adapter); - if (err) { - dev_err(&pdev->dev, "Error getting board config info.\n"); - goto err_out_iounmap; - } - -#ifdef CONFIG_PCIEAER - netxen_mask_aer_correctable(adapter); -#endif - - /* Mezz cards have PCI function 0,2,3 enabled */ - switch (adapter->ahw.board_type) { - case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: - case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: - if (pci_func_id >= 2) - adapter->portnum = pci_func_id - 2; - break; - default: - break; - } - - err = netxen_check_flash_fw_compatibility(adapter); - if (err) - goto err_out_iounmap; - - if (adapter->portnum == 0) { - val = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); - if (val != 0xffffffff && val != 0) { - NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0); - adapter->need_fw_reset = 1; - } - } - - err = netxen_start_firmware(adapter); - if (err) - goto err_out_decr_ref; - - /* - * See if the firmware gave us a virtual-physical port mapping. - */ - adapter->physical_port = adapter->portnum; - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { - i = NXRD32(adapter, CRB_V2P(adapter->portnum)); - if (i != 0x55555555) - adapter->physical_port = i; - } - - netxen_nic_clear_stats(adapter); - - netxen_setup_intr(adapter); - - err = netxen_setup_netdev(adapter, netdev); - if (err) - goto err_out_disable_msi; - - pci_set_drvdata(pdev, adapter); - - netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); - - switch (adapter->ahw.port_type) { - case NETXEN_NIC_GBE: - dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n", - adapter->netdev->name); - break; - case NETXEN_NIC_XGBE: - dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n", - adapter->netdev->name); - break; - } - - netxen_create_diag_entries(adapter); - - return 0; - -err_out_disable_msi: - netxen_teardown_intr(adapter); - - netxen_free_dummy_dma(adapter); - -err_out_decr_ref: - nx_decr_dev_ref_cnt(adapter); - -err_out_iounmap: - netxen_cleanup_pci_map(adapter); - -err_out_free_netdev: - free_netdev(netdev); - -err_out_free_res: - pci_release_regions(pdev); - -err_out_disable_pdev: - pci_set_drvdata(pdev, NULL); - pci_disable_device(pdev); - return err; -} - -static void __devexit netxen_nic_remove(struct pci_dev *pdev) -{ - struct netxen_adapter *adapter; - struct net_device *netdev; - - adapter = pci_get_drvdata(pdev); - if (adapter == NULL) - return; - - netdev = adapter->netdev; - - netxen_cancel_fw_work(adapter); - - unregister_netdev(netdev); - - cancel_work_sync(&adapter->tx_timeout_task); - - netxen_free_vlan_ip_list(adapter); - netxen_nic_detach(adapter); - - nx_decr_dev_ref_cnt(adapter); - - if (adapter->portnum == 0) - netxen_free_dummy_dma(adapter); - - clear_bit(__NX_RESETTING, &adapter->state); - - netxen_teardown_intr(adapter); - - netxen_remove_diag_entries(adapter); - - netxen_cleanup_pci_map(adapter); - - netxen_release_firmware(adapter); - - if (NX_IS_REVISION_P3(pdev->revision)) - pci_disable_pcie_error_reporting(pdev); - - pci_release_regions(pdev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - - free_netdev(netdev); -} - -static void netxen_nic_detach_func(struct netxen_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - - netif_device_detach(netdev); - - netxen_cancel_fw_work(adapter); - - if (netif_running(netdev)) - netxen_nic_down(adapter, netdev); - - cancel_work_sync(&adapter->tx_timeout_task); - - netxen_nic_detach(adapter); - - if (adapter->portnum == 0) - netxen_free_dummy_dma(adapter); - - nx_decr_dev_ref_cnt(adapter); - - clear_bit(__NX_RESETTING, &adapter->state); -} - -static int netxen_nic_attach_func(struct pci_dev *pdev) -{ - struct netxen_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; - int err; - - err = pci_enable_device(pdev); - if (err) - return err; - - pci_set_power_state(pdev, PCI_D0); - pci_set_master(pdev); - pci_restore_state(pdev); - - adapter->ahw.crb_win = -1; - adapter->ahw.ocm_win = -1; - - err = netxen_start_firmware(adapter); - if (err) { - dev_err(&pdev->dev, "failed to start firmware\n"); - return err; - } - - if (netif_running(netdev)) { - err = netxen_nic_attach(adapter); - if (err) - goto err_out; - - err = netxen_nic_up(adapter, netdev); - if (err) - goto err_out_detach; - - netxen_restore_indev_addr(netdev, NETDEV_UP); - } - - netif_device_attach(netdev); - netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); - return 0; - -err_out_detach: - netxen_nic_detach(adapter); -err_out: - nx_decr_dev_ref_cnt(adapter); - return err; -} - -static pci_ers_result_t netxen_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) -{ - struct netxen_adapter *adapter = pci_get_drvdata(pdev); - - if (state == pci_channel_io_perm_failure) - return PCI_ERS_RESULT_DISCONNECT; - - if (nx_dev_request_aer(adapter)) - return PCI_ERS_RESULT_RECOVERED; - - netxen_nic_detach_func(adapter); - - pci_disable_device(pdev); - - return PCI_ERS_RESULT_NEED_RESET; -} - -static pci_ers_result_t netxen_io_slot_reset(struct pci_dev *pdev) -{ - int err = 0; - - err = netxen_nic_attach_func(pdev); - - return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; -} - -static void netxen_io_resume(struct pci_dev *pdev) -{ - pci_cleanup_aer_uncorrect_error_status(pdev); -} - -static void netxen_nic_shutdown(struct pci_dev *pdev) -{ - struct netxen_adapter *adapter = pci_get_drvdata(pdev); - - netxen_nic_detach_func(adapter); - - if (pci_save_state(pdev)) - return; - - if (netxen_nic_wol_supported(adapter)) { - pci_enable_wake(pdev, PCI_D3cold, 1); - pci_enable_wake(pdev, PCI_D3hot, 1); - } - - pci_disable_device(pdev); -} - -#ifdef CONFIG_PM -static int -netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state) -{ - struct netxen_adapter *adapter = pci_get_drvdata(pdev); - int retval; - - netxen_nic_detach_func(adapter); - - retval = pci_save_state(pdev); - if (retval) - return retval; - - if (netxen_nic_wol_supported(adapter)) { - pci_enable_wake(pdev, PCI_D3cold, 1); - pci_enable_wake(pdev, PCI_D3hot, 1); - } - - pci_disable_device(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - - return 0; -} - -static int -netxen_nic_resume(struct pci_dev *pdev) -{ - return netxen_nic_attach_func(pdev); -} -#endif - -static int netxen_nic_open(struct net_device *netdev) -{ - struct netxen_adapter *adapter = netdev_priv(netdev); - int err = 0; - - if (adapter->driver_mismatch) - return -EIO; - - err = netxen_nic_attach(adapter); - if (err) - return err; - - err = __netxen_nic_up(adapter, netdev); - if (err) - goto err_out; - - netif_start_queue(netdev); - - return 0; - -err_out: - netxen_nic_detach(adapter); - return err; -} - -/* - * netxen_nic_close - Disables a network interface entry point - */ -static int netxen_nic_close(struct net_device *netdev) -{ - struct netxen_adapter *adapter = netdev_priv(netdev); - - __netxen_nic_down(adapter, netdev); - return 0; -} - -static void -netxen_tso_check(struct net_device *netdev, - struct nx_host_tx_ring *tx_ring, - struct cmd_desc_type0 *first_desc, - struct sk_buff *skb) -{ - u8 opcode = TX_ETHER_PKT; - __be16 protocol = skb->protocol; - u16 flags = 0, vid = 0; - u32 producer; - int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0; - struct cmd_desc_type0 *hwdesc; - struct vlan_ethhdr *vh; - - if (protocol == cpu_to_be16(ETH_P_8021Q)) { - - vh = (struct vlan_ethhdr *)skb->data; - protocol = vh->h_vlan_encapsulated_proto; - flags = FLAGS_VLAN_TAGGED; - - } else if (vlan_tx_tag_present(skb)) { - - flags = FLAGS_VLAN_OOB; - vid = vlan_tx_tag_get(skb); - netxen_set_tx_vlan_tci(first_desc, vid); - vlan_oob = 1; - } - - if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && - skb_shinfo(skb)->gso_size > 0) { - - hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - - first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); - first_desc->total_hdr_length = hdr_len; - if (vlan_oob) { - first_desc->total_hdr_length += VLAN_HLEN; - first_desc->tcp_hdr_offset = VLAN_HLEN; - first_desc->ip_hdr_offset = VLAN_HLEN; - /* Only in case of TSO on vlan device */ - flags |= FLAGS_VLAN_TAGGED; - } - - opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ? - TX_TCP_LSO6 : TX_TCP_LSO; - tso = 1; - - } else if (skb->ip_summed == CHECKSUM_PARTIAL) { - u8 l4proto; - - if (protocol == cpu_to_be16(ETH_P_IP)) { - l4proto = ip_hdr(skb)->protocol; - - if (l4proto == IPPROTO_TCP) - opcode = TX_TCP_PKT; - else if(l4proto == IPPROTO_UDP) - opcode = TX_UDP_PKT; - } else if (protocol == cpu_to_be16(ETH_P_IPV6)) { - l4proto = ipv6_hdr(skb)->nexthdr; - - if (l4proto == IPPROTO_TCP) - opcode = TX_TCPV6_PKT; - else if(l4proto == IPPROTO_UDP) - opcode = TX_UDPV6_PKT; - } - } - - first_desc->tcp_hdr_offset += skb_transport_offset(skb); - first_desc->ip_hdr_offset += skb_network_offset(skb); - netxen_set_tx_flags_opcode(first_desc, flags, opcode); - - if (!tso) - return; - - /* For LSO, we need to copy the MAC/IP/TCP headers into - * the descriptor ring - */ - producer = tx_ring->producer; - copied = 0; - offset = 2; - - if (vlan_oob) { - /* Create a TSO vlan header template for firmware */ - - hwdesc = &tx_ring->desc_head[producer]; - tx_ring->cmd_buf_arr[producer].skb = NULL; - - copy_len = min((int)sizeof(struct cmd_desc_type0) - offset, - hdr_len + VLAN_HLEN); - - vh = (struct vlan_ethhdr *)((char *)hwdesc + 2); - skb_copy_from_linear_data(skb, vh, 12); - vh->h_vlan_proto = htons(ETH_P_8021Q); - vh->h_vlan_TCI = htons(vid); - skb_copy_from_linear_data_offset(skb, 12, - (char *)vh + 16, copy_len - 16); - - copied = copy_len - VLAN_HLEN; - offset = 0; - - producer = get_next_index(producer, tx_ring->num_desc); - } - - while (copied < hdr_len) { - - copy_len = min((int)sizeof(struct cmd_desc_type0) - offset, - (hdr_len - copied)); - - hwdesc = &tx_ring->desc_head[producer]; - tx_ring->cmd_buf_arr[producer].skb = NULL; - - skb_copy_from_linear_data_offset(skb, copied, - (char *)hwdesc + offset, copy_len); - - copied += copy_len; - offset = 0; - - producer = get_next_index(producer, tx_ring->num_desc); - } - - tx_ring->producer = producer; - barrier(); -} - -static int -netxen_map_tx_skb(struct pci_dev *pdev, - struct sk_buff *skb, struct netxen_cmd_buffer *pbuf) -{ - struct netxen_skb_frag *nf; - struct skb_frag_struct *frag; - int i, nr_frags; - dma_addr_t map; - - nr_frags = skb_shinfo(skb)->nr_frags; - nf = &pbuf->frag_array[0]; - - map = pci_map_single(pdev, skb->data, - skb_headlen(skb), PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(pdev, map)) - goto out_err; - - nf->dma = map; - nf->length = skb_headlen(skb); - - for (i = 0; i < nr_frags; i++) { - frag = &skb_shinfo(skb)->frags[i]; - nf = &pbuf->frag_array[i+1]; - - map = pci_map_page(pdev, frag->page, frag->page_offset, - frag->size, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(pdev, map)) - goto unwind; - - nf->dma = map; - nf->length = frag->size; - } - - return 0; - -unwind: - while (--i >= 0) { - nf = &pbuf->frag_array[i+1]; - pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); - } - - nf = &pbuf->frag_array[0]; - pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); - -out_err: - return -ENOMEM; -} - -static inline void -netxen_clear_cmddesc(u64 *desc) -{ - desc[0] = 0ULL; - desc[2] = 0ULL; -} - -static netdev_tx_t -netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) -{ - struct netxen_adapter *adapter = netdev_priv(netdev); - struct nx_host_tx_ring *tx_ring = adapter->tx_ring; - struct netxen_cmd_buffer *pbuf; - struct netxen_skb_frag *buffrag; - struct cmd_desc_type0 *hwdesc, *first_desc; - struct pci_dev *pdev; - int i, k; - int delta = 0; - struct skb_frag_struct *frag; - - u32 producer; - int frag_count, no_of_desc; - u32 num_txd = tx_ring->num_desc; - - frag_count = skb_shinfo(skb)->nr_frags + 1; - - /* 14 frags supported for normal packet and - * 32 frags supported for TSO packet - */ - if (!skb_is_gso(skb) && frag_count > NETXEN_MAX_FRAGS_PER_TX) { - - for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) { - frag = &skb_shinfo(skb)->frags[i]; - delta += frag->size; - } - - if (!__pskb_pull_tail(skb, delta)) - goto drop_packet; - - frag_count = 1 + skb_shinfo(skb)->nr_frags; - } - /* 4 fragments per cmd des */ - no_of_desc = (frag_count + 3) >> 2; - - if (unlikely(netxen_tx_avail(tx_ring) <= TX_STOP_THRESH)) { - netif_stop_queue(netdev); - smp_mb(); - if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) - netif_start_queue(netdev); - else - return NETDEV_TX_BUSY; - } - - producer = tx_ring->producer; - pbuf = &tx_ring->cmd_buf_arr[producer]; - - pdev = adapter->pdev; - - if (netxen_map_tx_skb(pdev, skb, pbuf)) - goto drop_packet; - - pbuf->skb = skb; - pbuf->frag_count = frag_count; - - first_desc = hwdesc = &tx_ring->desc_head[producer]; - netxen_clear_cmddesc((u64 *)hwdesc); - - netxen_set_tx_frags_len(first_desc, frag_count, skb->len); - netxen_set_tx_port(first_desc, adapter->portnum); - - for (i = 0; i < frag_count; i++) { - - k = i % 4; - - if ((k == 0) && (i > 0)) { - /* move to next desc.*/ - producer = get_next_index(producer, num_txd); - hwdesc = &tx_ring->desc_head[producer]; - netxen_clear_cmddesc((u64 *)hwdesc); - tx_ring->cmd_buf_arr[producer].skb = NULL; - } - - buffrag = &pbuf->frag_array[i]; - - hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length); - switch (k) { - case 0: - hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma); - break; - case 1: - hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma); - break; - case 2: - hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma); - break; - case 3: - hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma); - break; - } - } - - tx_ring->producer = get_next_index(producer, num_txd); - - netxen_tso_check(netdev, tx_ring, first_desc, skb); - - adapter->stats.txbytes += skb->len; - adapter->stats.xmitcalled++; - - netxen_nic_update_cmd_producer(adapter, tx_ring); - - return NETDEV_TX_OK; - -drop_packet: - adapter->stats.txdropped++; - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; -} - -static int netxen_nic_check_temp(struct netxen_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - uint32_t temp, temp_state, temp_val; - int rv = 0; - - temp = NXRD32(adapter, CRB_TEMP_STATE); - - temp_state = nx_get_temp_state(temp); - temp_val = nx_get_temp_val(temp); - - if (temp_state == NX_TEMP_PANIC) { - printk(KERN_ALERT - "%s: Device temperature %d degrees C exceeds" - " maximum allowed. Hardware has been shut down.\n", - netdev->name, temp_val); - rv = 1; - } else if (temp_state == NX_TEMP_WARN) { - if (adapter->temp == NX_TEMP_NORMAL) { - printk(KERN_ALERT - "%s: Device temperature %d degrees C " - "exceeds operating range." - " Immediate action needed.\n", - netdev->name, temp_val); - } - } else { - if (adapter->temp == NX_TEMP_WARN) { - printk(KERN_INFO - "%s: Device temperature is now %d degrees C" - " in normal range.\n", netdev->name, - temp_val); - } - } - adapter->temp = temp_state; - return rv; -} - -void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup) -{ - struct net_device *netdev = adapter->netdev; - - if (adapter->ahw.linkup && !linkup) { - printk(KERN_INFO "%s: %s NIC Link is down\n", - netxen_nic_driver_name, netdev->name); - adapter->ahw.linkup = 0; - if (netif_running(netdev)) { - netif_carrier_off(netdev); - netif_stop_queue(netdev); - } - adapter->link_changed = !adapter->has_link_events; - } else if (!adapter->ahw.linkup && linkup) { - printk(KERN_INFO "%s: %s NIC Link is up\n", - netxen_nic_driver_name, netdev->name); - adapter->ahw.linkup = 1; - if (netif_running(netdev)) { - netif_carrier_on(netdev); - netif_wake_queue(netdev); - } - adapter->link_changed = !adapter->has_link_events; - } -} - -static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter) -{ - u32 val, port, linkup; - - port = adapter->physical_port; - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - val = NXRD32(adapter, CRB_XG_STATE_P3); - val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val); - linkup = (val == XG_LINK_UP_P3); - } else { - val = NXRD32(adapter, CRB_XG_STATE); - val = (val >> port*8) & 0xff; - linkup = (val == XG_LINK_UP); - } - - netxen_advert_link_change(adapter, linkup); -} - -static void netxen_tx_timeout(struct net_device *netdev) -{ - struct netxen_adapter *adapter = netdev_priv(netdev); - - if (test_bit(__NX_RESETTING, &adapter->state)) - return; - - dev_err(&netdev->dev, "transmit timeout, resetting.\n"); - schedule_work(&adapter->tx_timeout_task); -} - -static void netxen_tx_timeout_task(struct work_struct *work) -{ - struct netxen_adapter *adapter = - container_of(work, struct netxen_adapter, tx_timeout_task); - - if (!netif_running(adapter->netdev)) - return; - - if (test_and_set_bit(__NX_RESETTING, &adapter->state)) - return; - - if (++adapter->tx_timeo_cnt >= NX_MAX_TX_TIMEOUTS) - goto request_reset; - - rtnl_lock(); - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { - /* try to scrub interrupt */ - netxen_napi_disable(adapter); - - netxen_napi_enable(adapter); - - netif_wake_queue(adapter->netdev); - - clear_bit(__NX_RESETTING, &adapter->state); - } else { - clear_bit(__NX_RESETTING, &adapter->state); - if (netxen_nic_reset_context(adapter)) { - rtnl_unlock(); - goto request_reset; - } - } - adapter->netdev->trans_start = jiffies; - rtnl_unlock(); - return; - -request_reset: - adapter->need_fw_reset = 1; - clear_bit(__NX_RESETTING, &adapter->state); -} - -static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *netdev, - struct rtnl_link_stats64 *stats) -{ - struct netxen_adapter *adapter = netdev_priv(netdev); - - stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; - stats->tx_packets = adapter->stats.xmitfinished; - stats->rx_bytes = adapter->stats.rxbytes; - stats->tx_bytes = adapter->stats.txbytes; - stats->rx_dropped = adapter->stats.rxdropped; - stats->tx_dropped = adapter->stats.txdropped; - - return stats; -} - -static irqreturn_t netxen_intr(int irq, void *data) -{ - struct nx_host_sds_ring *sds_ring = data; - struct netxen_adapter *adapter = sds_ring->adapter; - u32 status = 0; - - status = readl(adapter->isr_int_vec); - - if (!(status & adapter->int_vec_bit)) - return IRQ_NONE; - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - /* check interrupt state machine, to be sure */ - status = readl(adapter->crb_int_state_reg); - if (!ISR_LEGACY_INT_TRIGGERED(status)) - return IRQ_NONE; - - } else { - unsigned long our_int = 0; - - our_int = readl(adapter->crb_int_state_reg); - - /* not our interrupt */ - if (!test_and_clear_bit((7 + adapter->portnum), &our_int)) - return IRQ_NONE; - - /* claim interrupt */ - writel((our_int & 0xffffffff), adapter->crb_int_state_reg); - - /* clear interrupt */ - netxen_nic_disable_int(sds_ring); - } - - writel(0xffffffff, adapter->tgt_status_reg); - /* read twice to ensure write is flushed */ - readl(adapter->isr_int_vec); - readl(adapter->isr_int_vec); - - napi_schedule(&sds_ring->napi); - - return IRQ_HANDLED; -} - -static irqreturn_t netxen_msi_intr(int irq, void *data) -{ - struct nx_host_sds_ring *sds_ring = data; - struct netxen_adapter *adapter = sds_ring->adapter; - - /* clear interrupt */ - writel(0xffffffff, adapter->tgt_status_reg); - - napi_schedule(&sds_ring->napi); - return IRQ_HANDLED; -} - -static irqreturn_t netxen_msix_intr(int irq, void *data) -{ - struct nx_host_sds_ring *sds_ring = data; - - napi_schedule(&sds_ring->napi); - return IRQ_HANDLED; -} - -static int netxen_nic_poll(struct napi_struct *napi, int budget) -{ - struct nx_host_sds_ring *sds_ring = - container_of(napi, struct nx_host_sds_ring, napi); - - struct netxen_adapter *adapter = sds_ring->adapter; - - int tx_complete; - int work_done; - - tx_complete = netxen_process_cmd_ring(adapter); - - work_done = netxen_process_rcv_ring(sds_ring, budget); - - if ((work_done < budget) && tx_complete) { - napi_complete(&sds_ring->napi); - if (test_bit(__NX_DEV_UP, &adapter->state)) - netxen_nic_enable_int(sds_ring); - } - - return work_done; -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void netxen_nic_poll_controller(struct net_device *netdev) -{ - int ring; - struct nx_host_sds_ring *sds_ring; - struct netxen_adapter *adapter = netdev_priv(netdev); - struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; - - disable_irq(adapter->irq); - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - netxen_intr(adapter->irq, sds_ring); - } - enable_irq(adapter->irq); -} -#endif - -static int -nx_incr_dev_ref_cnt(struct netxen_adapter *adapter) -{ - int count; - if (netxen_api_lock(adapter)) - return -EIO; - - count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); - - NXWR32(adapter, NX_CRB_DEV_REF_COUNT, ++count); - - netxen_api_unlock(adapter); - return count; -} - -static int -nx_decr_dev_ref_cnt(struct netxen_adapter *adapter) -{ - int count; - if (netxen_api_lock(adapter)) - return -EIO; - - count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); - WARN_ON(count == 0); - - NXWR32(adapter, NX_CRB_DEV_REF_COUNT, --count); - - if (count == 0) - NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_COLD); - - netxen_api_unlock(adapter); - return count; -} - -static int -nx_dev_request_aer(struct netxen_adapter *adapter) -{ - u32 state; - int ret = -EINVAL; - - if (netxen_api_lock(adapter)) - return ret; - - state = NXRD32(adapter, NX_CRB_DEV_STATE); - - if (state == NX_DEV_NEED_AER) - ret = 0; - else if (state == NX_DEV_READY) { - NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_AER); - ret = 0; - } - - netxen_api_unlock(adapter); - return ret; -} - -static int -nx_dev_request_reset(struct netxen_adapter *adapter) -{ - u32 state; - int ret = -EINVAL; - - if (netxen_api_lock(adapter)) - return ret; - - state = NXRD32(adapter, NX_CRB_DEV_STATE); - - if (state == NX_DEV_NEED_RESET) - ret = 0; - else if (state != NX_DEV_INITALIZING && state != NX_DEV_NEED_AER) { - NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_RESET); - ret = 0; - } - - netxen_api_unlock(adapter); - - return ret; -} - -static int -netxen_can_start_firmware(struct netxen_adapter *adapter) -{ - int count; - int can_start = 0; - - if (netxen_api_lock(adapter)) - return 0; - - count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); - - if ((count < 0) || (count >= NX_MAX_PCI_FUNC)) - count = 0; - - if (count == 0) { - can_start = 1; - NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_INITALIZING); - } - - NXWR32(adapter, NX_CRB_DEV_REF_COUNT, ++count); - - netxen_api_unlock(adapter); - - return can_start; -} - -static void -netxen_schedule_work(struct netxen_adapter *adapter, - work_func_t func, int delay) -{ - INIT_DELAYED_WORK(&adapter->fw_work, func); - schedule_delayed_work(&adapter->fw_work, delay); -} - -static void -netxen_cancel_fw_work(struct netxen_adapter *adapter) -{ - while (test_and_set_bit(__NX_RESETTING, &adapter->state)) - msleep(10); - - cancel_delayed_work_sync(&adapter->fw_work); -} - -static void -netxen_attach_work(struct work_struct *work) -{ - struct netxen_adapter *adapter = container_of(work, - struct netxen_adapter, fw_work.work); - struct net_device *netdev = adapter->netdev; - int err = 0; - - if (netif_running(netdev)) { - err = netxen_nic_attach(adapter); - if (err) - goto done; - - err = netxen_nic_up(adapter, netdev); - if (err) { - netxen_nic_detach(adapter); - goto done; - } - - netxen_restore_indev_addr(netdev, NETDEV_UP); - } - - netif_device_attach(netdev); - -done: - adapter->fw_fail_cnt = 0; - clear_bit(__NX_RESETTING, &adapter->state); - netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); -} - -static void -netxen_fwinit_work(struct work_struct *work) -{ - struct netxen_adapter *adapter = container_of(work, - struct netxen_adapter, fw_work.work); - int dev_state; - - dev_state = NXRD32(adapter, NX_CRB_DEV_STATE); - - switch (dev_state) { - case NX_DEV_COLD: - case NX_DEV_READY: - if (!netxen_start_firmware(adapter)) { - netxen_schedule_work(adapter, netxen_attach_work, 0); - return; - } - break; - - case NX_DEV_NEED_RESET: - case NX_DEV_INITALIZING: - if (++adapter->fw_wait_cnt < FW_POLL_THRESH) { - netxen_schedule_work(adapter, - netxen_fwinit_work, 2 * FW_POLL_DELAY); - return; - } - - case NX_DEV_FAILED: - default: - nx_incr_dev_ref_cnt(adapter); - break; - } - - clear_bit(__NX_RESETTING, &adapter->state); -} - -static void -netxen_detach_work(struct work_struct *work) -{ - struct netxen_adapter *adapter = container_of(work, - struct netxen_adapter, fw_work.work); - struct net_device *netdev = adapter->netdev; - int ref_cnt, delay; - u32 status; - - netif_device_detach(netdev); - - netxen_nic_down(adapter, netdev); - - rtnl_lock(); - netxen_nic_detach(adapter); - rtnl_unlock(); - - status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); - - if (status & NX_RCODE_FATAL_ERROR) - goto err_ret; - - if (adapter->temp == NX_TEMP_PANIC) - goto err_ret; - - ref_cnt = nx_decr_dev_ref_cnt(adapter); - - if (ref_cnt == -EIO) - goto err_ret; - - delay = (ref_cnt == 0) ? 0 : (2 * FW_POLL_DELAY); - - adapter->fw_wait_cnt = 0; - netxen_schedule_work(adapter, netxen_fwinit_work, delay); - - return; - -err_ret: - clear_bit(__NX_RESETTING, &adapter->state); -} - -static int -netxen_check_health(struct netxen_adapter *adapter) -{ - u32 state, heartbit; - struct net_device *netdev = adapter->netdev; - - state = NXRD32(adapter, NX_CRB_DEV_STATE); - if (state == NX_DEV_NEED_AER) - return 0; - - if (netxen_nic_check_temp(adapter)) - goto detach; - - if (adapter->need_fw_reset) { - if (nx_dev_request_reset(adapter)) - return 0; - goto detach; - } - - /* NX_DEV_NEED_RESET, this state can be marked in two cases - * 1. Tx timeout 2. Fw hang - * Send request to destroy context in case of tx timeout only - * and doesn't required in case of Fw hang - */ - if (state == NX_DEV_NEED_RESET) { - adapter->need_fw_reset = 1; - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - goto detach; - } - - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - return 0; - - heartbit = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); - if (heartbit != adapter->heartbit) { - adapter->heartbit = heartbit; - adapter->fw_fail_cnt = 0; - if (adapter->need_fw_reset) - goto detach; - return 0; - } - - if (++adapter->fw_fail_cnt < FW_FAIL_THRESH) - return 0; - - if (nx_dev_request_reset(adapter)) - return 0; - - clear_bit(__NX_FW_ATTACHED, &adapter->state); - - dev_info(&netdev->dev, "firmware hang detected\n"); - -detach: - if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) && - !test_and_set_bit(__NX_RESETTING, &adapter->state)) - netxen_schedule_work(adapter, netxen_detach_work, 0); - return 1; -} - -static void -netxen_fw_poll_work(struct work_struct *work) -{ - struct netxen_adapter *adapter = container_of(work, - struct netxen_adapter, fw_work.work); - - if (test_bit(__NX_RESETTING, &adapter->state)) - goto reschedule; - - if (test_bit(__NX_DEV_UP, &adapter->state)) { - if (!adapter->has_link_events) { - - netxen_nic_handle_phy_intr(adapter); - - if (adapter->link_changed) - netxen_nic_set_link_parameters(adapter); - } - } - - if (netxen_check_health(adapter)) - return; - -reschedule: - netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); -} - -static ssize_t -netxen_store_bridged_mode(struct device *dev, - struct device_attribute *attr, const char *buf, size_t len) -{ - struct net_device *net = to_net_dev(dev); - struct netxen_adapter *adapter = netdev_priv(net); - unsigned long new; - int ret = -EINVAL; - - if (!(adapter->capabilities & NX_FW_CAPABILITY_BDG)) - goto err_out; - - if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) - goto err_out; - - if (strict_strtoul(buf, 2, &new)) - goto err_out; - - if (!netxen_config_bridged_mode(adapter, !!new)) - ret = len; - -err_out: - return ret; -} - -static ssize_t -netxen_show_bridged_mode(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct net_device *net = to_net_dev(dev); - struct netxen_adapter *adapter; - int bridged_mode = 0; - - adapter = netdev_priv(net); - - if (adapter->capabilities & NX_FW_CAPABILITY_BDG) - bridged_mode = !!(adapter->flags & NETXEN_NIC_BRIDGE_ENABLED); - - return sprintf(buf, "%d\n", bridged_mode); -} - -static struct device_attribute dev_attr_bridged_mode = { - .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)}, - .show = netxen_show_bridged_mode, - .store = netxen_store_bridged_mode, -}; - -static ssize_t -netxen_store_diag_mode(struct device *dev, - struct device_attribute *attr, const char *buf, size_t len) -{ - struct netxen_adapter *adapter = dev_get_drvdata(dev); - unsigned long new; - - if (strict_strtoul(buf, 2, &new)) - return -EINVAL; - - if (!!new != !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)) - adapter->flags ^= NETXEN_NIC_DIAG_ENABLED; - - return len; -} - -static ssize_t -netxen_show_diag_mode(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct netxen_adapter *adapter = dev_get_drvdata(dev); - - return sprintf(buf, "%d\n", - !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)); -} - -static struct device_attribute dev_attr_diag_mode = { - .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)}, - .show = netxen_show_diag_mode, - .store = netxen_store_diag_mode, -}; - -static int -netxen_sysfs_validate_crb(struct netxen_adapter *adapter, - loff_t offset, size_t size) -{ - size_t crb_size = 4; - - if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)) - return -EIO; - - if (offset < NETXEN_PCI_CRBSPACE) { - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - return -EINVAL; - - if (ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM, - NETXEN_PCI_CAMQM_2M_END)) - crb_size = 8; - else - return -EINVAL; - } - - if ((size != crb_size) || (offset & (crb_size-1))) - return -EINVAL; - - return 0; -} - -static ssize_t -netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct netxen_adapter *adapter = dev_get_drvdata(dev); - u32 data; - u64 qmdata; - int ret; - - ret = netxen_sysfs_validate_crb(adapter, offset, size); - if (ret != 0) - return ret; - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && - ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM, - NETXEN_PCI_CAMQM_2M_END)) { - netxen_pci_camqm_read_2M(adapter, offset, &qmdata); - memcpy(buf, &qmdata, size); - } else { - data = NXRD32(adapter, offset); - memcpy(buf, &data, size); - } - - return size; -} - -static ssize_t -netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct netxen_adapter *adapter = dev_get_drvdata(dev); - u32 data; - u64 qmdata; - int ret; - - ret = netxen_sysfs_validate_crb(adapter, offset, size); - if (ret != 0) - return ret; - - if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && - ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM, - NETXEN_PCI_CAMQM_2M_END)) { - memcpy(&qmdata, buf, size); - netxen_pci_camqm_write_2M(adapter, offset, qmdata); - } else { - memcpy(&data, buf, size); - NXWR32(adapter, offset, data); - } - - return size; -} - -static int -netxen_sysfs_validate_mem(struct netxen_adapter *adapter, - loff_t offset, size_t size) -{ - if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)) - return -EIO; - - if ((size != 8) || (offset & 0x7)) - return -EIO; - - return 0; -} - -static ssize_t -netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct netxen_adapter *adapter = dev_get_drvdata(dev); - u64 data; - int ret; - - ret = netxen_sysfs_validate_mem(adapter, offset, size); - if (ret != 0) - return ret; - - if (adapter->pci_mem_read(adapter, offset, &data)) - return -EIO; - - memcpy(buf, &data, size); - - return size; -} - -static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, char *buf, - loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct netxen_adapter *adapter = dev_get_drvdata(dev); - u64 data; - int ret; - - ret = netxen_sysfs_validate_mem(adapter, offset, size); - if (ret != 0) - return ret; - - memcpy(&data, buf, size); - - if (adapter->pci_mem_write(adapter, offset, data)) - return -EIO; - - return size; -} - - -static struct bin_attribute bin_attr_crb = { - .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)}, - .size = 0, - .read = netxen_sysfs_read_crb, - .write = netxen_sysfs_write_crb, -}; - -static struct bin_attribute bin_attr_mem = { - .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)}, - .size = 0, - .read = netxen_sysfs_read_mem, - .write = netxen_sysfs_write_mem, -}; - - -static void -netxen_create_sysfs_entries(struct netxen_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct device *dev = &netdev->dev; - - if (adapter->capabilities & NX_FW_CAPABILITY_BDG) { - /* bridged_mode control */ - if (device_create_file(dev, &dev_attr_bridged_mode)) { - dev_warn(&netdev->dev, - "failed to create bridged_mode sysfs entry\n"); - } - } -} - -static void -netxen_remove_sysfs_entries(struct netxen_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct device *dev = &netdev->dev; - - if (adapter->capabilities & NX_FW_CAPABILITY_BDG) - device_remove_file(dev, &dev_attr_bridged_mode); -} - -static void -netxen_create_diag_entries(struct netxen_adapter *adapter) -{ - struct pci_dev *pdev = adapter->pdev; - struct device *dev; - - dev = &pdev->dev; - if (device_create_file(dev, &dev_attr_diag_mode)) - dev_info(dev, "failed to create diag_mode sysfs entry\n"); - if (device_create_bin_file(dev, &bin_attr_crb)) - dev_info(dev, "failed to create crb sysfs entry\n"); - if (device_create_bin_file(dev, &bin_attr_mem)) - dev_info(dev, "failed to create mem sysfs entry\n"); -} - - -static void -netxen_remove_diag_entries(struct netxen_adapter *adapter) -{ - struct pci_dev *pdev = adapter->pdev; - struct device *dev = &pdev->dev; - - device_remove_file(dev, &dev_attr_diag_mode); - device_remove_bin_file(dev, &bin_attr_crb); - device_remove_bin_file(dev, &bin_attr_mem); -} - -#ifdef CONFIG_INET - -#define is_netxen_netdev(dev) (dev->netdev_ops == &netxen_netdev_ops) - -static int -netxen_destip_supported(struct netxen_adapter *adapter) -{ - if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) - return 0; - - if (adapter->ahw.cut_through) - return 0; - - return 1; -} - -static void -netxen_free_vlan_ip_list(struct netxen_adapter *adapter) -{ - struct nx_vlan_ip_list *cur; - struct list_head *head = &adapter->vlan_ip_list; - - while (!list_empty(head)) { - cur = list_entry(head->next, struct nx_vlan_ip_list, list); - netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN); - list_del(&cur->list); - kfree(cur); - } - -} -static void -netxen_list_config_vlan_ip(struct netxen_adapter *adapter, - struct in_ifaddr *ifa, unsigned long event) -{ - struct net_device *dev; - struct nx_vlan_ip_list *cur, *tmp_cur; - struct list_head *head; - - dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; - - if (dev == NULL) - return; - - if (!is_vlan_dev(dev)) - return; - - switch (event) { - case NX_IP_UP: - list_for_each(head, &adapter->vlan_ip_list) { - cur = list_entry(head, struct nx_vlan_ip_list, list); - - if (cur->ip_addr == ifa->ifa_address) - return; - } - - cur = kzalloc(sizeof(struct nx_vlan_ip_list), GFP_ATOMIC); - if (cur == NULL) { - printk(KERN_ERR "%s: failed to add vlan ip to list\n", - adapter->netdev->name); - return; - } - - cur->ip_addr = ifa->ifa_address; - list_add_tail(&cur->list, &adapter->vlan_ip_list); - break; - case NX_IP_DOWN: - list_for_each_entry_safe(cur, tmp_cur, - &adapter->vlan_ip_list, list) { - if (cur->ip_addr == ifa->ifa_address) { - list_del(&cur->list); - kfree(cur); - break; - } - } - } -} -static void -netxen_config_indev_addr(struct netxen_adapter *adapter, - struct net_device *dev, unsigned long event) -{ - struct in_device *indev; - - if (!netxen_destip_supported(adapter)) - return; - - indev = in_dev_get(dev); - if (!indev) - return; - - for_ifa(indev) { - switch (event) { - case NETDEV_UP: - netxen_config_ipaddr(adapter, - ifa->ifa_address, NX_IP_UP); - netxen_list_config_vlan_ip(adapter, ifa, NX_IP_UP); - break; - case NETDEV_DOWN: - netxen_config_ipaddr(adapter, - ifa->ifa_address, NX_IP_DOWN); - netxen_list_config_vlan_ip(adapter, ifa, NX_IP_DOWN); - break; - default: - break; - } - } endfor_ifa(indev); - - in_dev_put(indev); -} - -static void -netxen_restore_indev_addr(struct net_device *netdev, unsigned long event) - -{ - struct netxen_adapter *adapter = netdev_priv(netdev); - struct nx_vlan_ip_list *pos, *tmp_pos; - unsigned long ip_event; - - ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN; - netxen_config_indev_addr(adapter, netdev, event); - - list_for_each_entry_safe(pos, tmp_pos, &adapter->vlan_ip_list, list) { - netxen_config_ipaddr(adapter, pos->ip_addr, ip_event); - } -} - -static int netxen_netdev_event(struct notifier_block *this, - unsigned long event, void *ptr) -{ - struct netxen_adapter *adapter; - struct net_device *dev = (struct net_device *)ptr; - struct net_device *orig_dev = dev; - -recheck: - if (dev == NULL) - goto done; - - if (dev->priv_flags & IFF_802_1Q_VLAN) { - dev = vlan_dev_real_dev(dev); - goto recheck; - } - - if (!is_netxen_netdev(dev)) - goto done; - - adapter = netdev_priv(dev); - - if (!adapter) - goto done; - - if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) - goto done; - - netxen_config_indev_addr(adapter, orig_dev, event); -done: - return NOTIFY_DONE; -} - -static int -netxen_inetaddr_event(struct notifier_block *this, - unsigned long event, void *ptr) -{ - struct netxen_adapter *adapter; - struct net_device *dev; - - struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; - - dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; - -recheck: - if (dev == NULL) - goto done; - - if (dev->priv_flags & IFF_802_1Q_VLAN) { - dev = vlan_dev_real_dev(dev); - goto recheck; - } - - if (!is_netxen_netdev(dev)) - goto done; - - adapter = netdev_priv(dev); - - if (!adapter || !netxen_destip_supported(adapter)) - goto done; - - if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) - goto done; - - switch (event) { - case NETDEV_UP: - netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP); - netxen_list_config_vlan_ip(adapter, ifa, NX_IP_UP); - break; - case NETDEV_DOWN: - netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_DOWN); - netxen_list_config_vlan_ip(adapter, ifa, NX_IP_DOWN); - break; - default: - break; - } - -done: - return NOTIFY_DONE; -} - -static struct notifier_block netxen_netdev_cb = { - .notifier_call = netxen_netdev_event, -}; - -static struct notifier_block netxen_inetaddr_cb = { - .notifier_call = netxen_inetaddr_event, -}; -#else -static void -netxen_restore_indev_addr(struct net_device *dev, unsigned long event) -{ } -static void -netxen_free_vlan_ip_list(struct netxen_adapter *adapter) -{ } -#endif - -static struct pci_error_handlers netxen_err_handler = { - .error_detected = netxen_io_error_detected, - .slot_reset = netxen_io_slot_reset, - .resume = netxen_io_resume, -}; - -static struct pci_driver netxen_driver = { - .name = netxen_nic_driver_name, - .id_table = netxen_pci_tbl, - .probe = netxen_nic_probe, - .remove = __devexit_p(netxen_nic_remove), -#ifdef CONFIG_PM - .suspend = netxen_nic_suspend, - .resume = netxen_nic_resume, -#endif - .shutdown = netxen_nic_shutdown, - .err_handler = &netxen_err_handler -}; - -static int __init netxen_init_module(void) -{ - printk(KERN_INFO "%s\n", netxen_nic_driver_string); - -#ifdef CONFIG_INET - register_netdevice_notifier(&netxen_netdev_cb); - register_inetaddr_notifier(&netxen_inetaddr_cb); -#endif - return pci_register_driver(&netxen_driver); -} - -module_init(netxen_init_module); - -static void __exit netxen_exit_module(void) -{ - pci_unregister_driver(&netxen_driver); - -#ifdef CONFIG_INET - unregister_inetaddr_notifier(&netxen_inetaddr_cb); - unregister_netdevice_notifier(&netxen_netdev_cb); -#endif -} - -module_exit(netxen_exit_module); diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c deleted file mode 100644 index ccde806..0000000 --- a/drivers/net/qla3xxx.c +++ /dev/null @@ -1,3970 +0,0 @@ -/* - * QLogic QLA3xxx NIC HBA Driver - * Copyright (c) 2003-2006 QLogic Corporation - * - * See LICENSE.qla3xxx for copyright and licensing details. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "qla3xxx.h" - -#define DRV_NAME "qla3xxx" -#define DRV_STRING "QLogic ISP3XXX Network Driver" -#define DRV_VERSION "v2.03.00-k5" - -static const char ql3xxx_driver_name[] = DRV_NAME; -static const char ql3xxx_driver_version[] = DRV_VERSION; - -#define TIMED_OUT_MSG \ -"Timed out waiting for management port to get free before issuing command\n" - -MODULE_AUTHOR("QLogic Corporation"); -MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " "); -MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_VERSION); - -static const u32 default_msg - = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK - | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; - -static int debug = -1; /* defaults above */ -module_param(debug, int, 0); -MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); - -static int msi; -module_param(msi, int, 0); -MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts."); - -static DEFINE_PCI_DEVICE_TABLE(ql3xxx_pci_tbl) = { - {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)}, - {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)}, - /* required last entry */ - {0,} -}; - -MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl); - -/* - * These are the known PHY's which are used - */ -enum PHY_DEVICE_TYPE { - PHY_TYPE_UNKNOWN = 0, - PHY_VITESSE_VSC8211, - PHY_AGERE_ET1011C, - MAX_PHY_DEV_TYPES -}; - -struct PHY_DEVICE_INFO { - const enum PHY_DEVICE_TYPE phyDevice; - const u32 phyIdOUI; - const u16 phyIdModel; - const char *name; -}; - -static const struct PHY_DEVICE_INFO PHY_DEVICES[] = { - {PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"}, - {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"}, - {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"}, -}; - - -/* - * Caller must take hw_lock. - */ -static int ql_sem_spinlock(struct ql3_adapter *qdev, - u32 sem_mask, u32 sem_bits) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 value; - unsigned int seconds = 3; - - do { - writel((sem_mask | sem_bits), - &port_regs->CommonRegs.semaphoreReg); - value = readl(&port_regs->CommonRegs.semaphoreReg); - if ((value & (sem_mask >> 16)) == sem_bits) - return 0; - ssleep(1); - } while (--seconds); - return -1; -} - -static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - writel(sem_mask, &port_regs->CommonRegs.semaphoreReg); - readl(&port_regs->CommonRegs.semaphoreReg); -} - -static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 value; - - writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); - value = readl(&port_regs->CommonRegs.semaphoreReg); - return ((value & (sem_mask >> 16)) == sem_bits); -} - -/* - * Caller holds hw_lock. - */ -static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) -{ - int i = 0; - - while (i < 10) { - if (i) - ssleep(1); - - if (ql_sem_lock(qdev, - QL_DRVR_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) - * 2) << 1)) { - netdev_printk(KERN_DEBUG, qdev->ndev, - "driver lock acquired\n"); - return 1; - } - } - - netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); - return 0; -} - -static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - - writel(((ISP_CONTROL_NP_MASK << 16) | page), - &port_regs->CommonRegs.ispControlStatus); - readl(&port_regs->CommonRegs.ispControlStatus); - qdev->current_page = page; -} - -static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) -{ - u32 value; - unsigned long hw_flags; - - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - value = readl(reg); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - - return value; -} - -static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg) -{ - return readl(reg); -} - -static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) -{ - u32 value; - unsigned long hw_flags; - - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - - if (qdev->current_page != 0) - ql_set_register_page(qdev, 0); - value = readl(reg); - - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - return value; -} - -static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg) -{ - if (qdev->current_page != 0) - ql_set_register_page(qdev, 0); - return readl(reg); -} - -static void ql_write_common_reg_l(struct ql3_adapter *qdev, - u32 __iomem *reg, u32 value) -{ - unsigned long hw_flags; - - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - writel(value, reg); - readl(reg); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); -} - -static void ql_write_common_reg(struct ql3_adapter *qdev, - u32 __iomem *reg, u32 value) -{ - writel(value, reg); - readl(reg); -} - -static void ql_write_nvram_reg(struct ql3_adapter *qdev, - u32 __iomem *reg, u32 value) -{ - writel(value, reg); - readl(reg); - udelay(1); -} - -static void ql_write_page0_reg(struct ql3_adapter *qdev, - u32 __iomem *reg, u32 value) -{ - if (qdev->current_page != 0) - ql_set_register_page(qdev, 0); - writel(value, reg); - readl(reg); -} - -/* - * Caller holds hw_lock. Only called during init. - */ -static void ql_write_page1_reg(struct ql3_adapter *qdev, - u32 __iomem *reg, u32 value) -{ - if (qdev->current_page != 1) - ql_set_register_page(qdev, 1); - writel(value, reg); - readl(reg); -} - -/* - * Caller holds hw_lock. Only called during init. - */ -static void ql_write_page2_reg(struct ql3_adapter *qdev, - u32 __iomem *reg, u32 value) -{ - if (qdev->current_page != 2) - ql_set_register_page(qdev, 2); - writel(value, reg); - readl(reg); -} - -static void ql_disable_interrupts(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - - ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, - (ISP_IMR_ENABLE_INT << 16)); - -} - -static void ql_enable_interrupts(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - - ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, - ((0xff << 16) | ISP_IMR_ENABLE_INT)); - -} - -static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, - struct ql_rcv_buf_cb *lrg_buf_cb) -{ - dma_addr_t map; - int err; - lrg_buf_cb->next = NULL; - - if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */ - qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb; - } else { - qdev->lrg_buf_free_tail->next = lrg_buf_cb; - qdev->lrg_buf_free_tail = lrg_buf_cb; - } - - if (!lrg_buf_cb->skb) { - lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, - qdev->lrg_buffer_len); - if (unlikely(!lrg_buf_cb->skb)) { - netdev_err(qdev->ndev, "failed netdev_alloc_skb()\n"); - qdev->lrg_buf_skb_check++; - } else { - /* - * We save some space to copy the ethhdr from first - * buffer - */ - skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); - map = pci_map_single(qdev->pdev, - lrg_buf_cb->skb->data, - qdev->lrg_buffer_len - - QL_HEADER_SPACE, - PCI_DMA_FROMDEVICE); - err = pci_dma_mapping_error(qdev->pdev, map); - if (err) { - netdev_err(qdev->ndev, - "PCI mapping failed with error: %d\n", - err); - dev_kfree_skb(lrg_buf_cb->skb); - lrg_buf_cb->skb = NULL; - - qdev->lrg_buf_skb_check++; - return; - } - - lrg_buf_cb->buf_phy_addr_low = - cpu_to_le32(LS_64BITS(map)); - lrg_buf_cb->buf_phy_addr_high = - cpu_to_le32(MS_64BITS(map)); - dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); - dma_unmap_len_set(lrg_buf_cb, maplen, - qdev->lrg_buffer_len - - QL_HEADER_SPACE); - } - } - - qdev->lrg_buf_free_count++; -} - -static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter - *qdev) -{ - struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; - - if (lrg_buf_cb != NULL) { - qdev->lrg_buf_free_head = lrg_buf_cb->next; - if (qdev->lrg_buf_free_head == NULL) - qdev->lrg_buf_free_tail = NULL; - qdev->lrg_buf_free_count--; - } - - return lrg_buf_cb; -} - -static u32 addrBits = EEPROM_NO_ADDR_BITS; -static u32 dataBits = EEPROM_NO_DATA_BITS; - -static void fm93c56a_deselect(struct ql3_adapter *qdev); -static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr, - unsigned short *value); - -/* - * Caller holds hw_lock. - */ -static void fm93c56a_select(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; - - qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; - ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); - ql_write_nvram_reg(qdev, spir, - ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); -} - -/* - * Caller holds hw_lock. - */ -static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) -{ - int i; - u32 mask; - u32 dataBit; - u32 previousBit; - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; - - /* Clock in a zero, then do the start bit */ - ql_write_nvram_reg(qdev, spir, - (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | - AUBURN_EEPROM_DO_1)); - ql_write_nvram_reg(qdev, spir, - (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | - AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE)); - ql_write_nvram_reg(qdev, spir, - (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | - AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL)); - - mask = 1 << (FM93C56A_CMD_BITS - 1); - /* Force the previous data bit to be different */ - previousBit = 0xffff; - for (i = 0; i < FM93C56A_CMD_BITS; i++) { - dataBit = (cmd & mask) - ? AUBURN_EEPROM_DO_1 - : AUBURN_EEPROM_DO_0; - if (previousBit != dataBit) { - /* If the bit changed, change the DO state to match */ - ql_write_nvram_reg(qdev, spir, - (ISP_NVRAM_MASK | - qdev->eeprom_cmd_data | dataBit)); - previousBit = dataBit; - } - ql_write_nvram_reg(qdev, spir, - (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | - dataBit | AUBURN_EEPROM_CLK_RISE)); - ql_write_nvram_reg(qdev, spir, - (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | - dataBit | AUBURN_EEPROM_CLK_FALL)); - cmd = cmd << 1; - } - - mask = 1 << (addrBits - 1); - /* Force the previous data bit to be different */ - previousBit = 0xffff; - for (i = 0; i < addrBits; i++) { - dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 - : AUBURN_EEPROM_DO_0; - if (previousBit != dataBit) { - /* - * If the bit changed, then change the DO state to - * match - */ - ql_write_nvram_reg(qdev, spir, - (ISP_NVRAM_MASK | - qdev->eeprom_cmd_data | dataBit)); - previousBit = dataBit; - } - ql_write_nvram_reg(qdev, spir, - (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | - dataBit | AUBURN_EEPROM_CLK_RISE)); - ql_write_nvram_reg(qdev, spir, - (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | - dataBit | AUBURN_EEPROM_CLK_FALL)); - eepromAddr = eepromAddr << 1; - } -} - -/* - * Caller holds hw_lock. - */ -static void fm93c56a_deselect(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; - - qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; - ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); -} - -/* - * Caller holds hw_lock. - */ -static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value) -{ - int i; - u32 data = 0; - u32 dataBit; - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; - - /* Read the data bits */ - /* The first bit is a dummy. Clock right over it. */ - for (i = 0; i < dataBits; i++) { - ql_write_nvram_reg(qdev, spir, - ISP_NVRAM_MASK | qdev->eeprom_cmd_data | - AUBURN_EEPROM_CLK_RISE); - ql_write_nvram_reg(qdev, spir, - ISP_NVRAM_MASK | qdev->eeprom_cmd_data | - AUBURN_EEPROM_CLK_FALL); - dataBit = (ql_read_common_reg(qdev, spir) & - AUBURN_EEPROM_DI_1) ? 1 : 0; - data = (data << 1) | dataBit; - } - *value = (u16)data; -} - -/* - * Caller holds hw_lock. - */ -static void eeprom_readword(struct ql3_adapter *qdev, - u32 eepromAddr, unsigned short *value) -{ - fm93c56a_select(qdev); - fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr); - fm93c56a_datain(qdev, value); - fm93c56a_deselect(qdev); -} - -static void ql_set_mac_addr(struct net_device *ndev, u16 *addr) -{ - __le16 *p = (__le16 *)ndev->dev_addr; - p[0] = cpu_to_le16(addr[0]); - p[1] = cpu_to_le16(addr[1]); - p[2] = cpu_to_le16(addr[2]); -} - -static int ql_get_nvram_params(struct ql3_adapter *qdev) -{ - u16 *pEEPROMData; - u16 checksum = 0; - u32 index; - unsigned long hw_flags; - - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - - pEEPROMData = (u16 *)&qdev->nvram_data; - qdev->eeprom_cmd_data = 0; - if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * - 2) << 10)) { - pr_err("%s: Failed ql_sem_spinlock()\n", __func__); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - return -1; - } - - for (index = 0; index < EEPROM_SIZE; index++) { - eeprom_readword(qdev, index, pEEPROMData); - checksum += *pEEPROMData; - pEEPROMData++; - } - ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK); - - if (checksum != 0) { - netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n", - checksum); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - return -1; - } - - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - return checksum; -} - -static const u32 PHYAddr[2] = { - PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS -}; - -static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 temp; - int count = 1000; - - while (count) { - temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg); - if (!(temp & MAC_MII_STATUS_BSY)) - return 0; - udelay(10); - count--; - } - return -1; -} - -static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 scanControl; - - if (qdev->numPorts > 1) { - /* Auto scan will cycle through multiple ports */ - scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC; - } else { - scanControl = MAC_MII_CONTROL_SC; - } - - /* - * Scan register 1 of PHY/PETBI, - * Set up to scan both devices - * The autoscan starts from the first register, completes - * the last one before rolling over to the first - */ - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, - PHYAddr[0] | MII_SCAN_REGISTER); - - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, - (scanControl) | - ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16)); -} - -static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev) -{ - u8 ret; - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - - /* See if scan mode is enabled before we turn it off */ - if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) & - (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) { - /* Scan is enabled */ - ret = 1; - } else { - /* Scan is disabled */ - ret = 0; - } - - /* - * When disabling scan mode you must first change the MII register - * address - */ - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, - PHYAddr[0] | MII_SCAN_REGISTER); - - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, - ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS | - MAC_MII_CONTROL_RC) << 16)); - - return ret; -} - -static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, - u16 regAddr, u16 value, u32 phyAddr) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u8 scanWasEnabled; - - scanWasEnabled = ql_mii_disable_scan_mode(qdev); - - if (ql_wait_for_mii_ready(qdev)) { - netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); - return -1; - } - - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, - phyAddr | regAddr); - - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); - - /* Wait for write to complete 9/10/04 SJP */ - if (ql_wait_for_mii_ready(qdev)) { - netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); - return -1; - } - - if (scanWasEnabled) - ql_mii_enable_scan_mode(qdev); - - return 0; -} - -static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, - u16 *value, u32 phyAddr) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u8 scanWasEnabled; - u32 temp; - - scanWasEnabled = ql_mii_disable_scan_mode(qdev); - - if (ql_wait_for_mii_ready(qdev)) { - netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); - return -1; - } - - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, - phyAddr | regAddr); - - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, - (MAC_MII_CONTROL_RC << 16)); - - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, - (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); - - /* Wait for the read to complete */ - if (ql_wait_for_mii_ready(qdev)) { - netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); - return -1; - } - - temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); - *value = (u16) temp; - - if (scanWasEnabled) - ql_mii_enable_scan_mode(qdev); - - return 0; -} - -static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - - ql_mii_disable_scan_mode(qdev); - - if (ql_wait_for_mii_ready(qdev)) { - netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); - return -1; - } - - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, - qdev->PHYAddr | regAddr); - - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); - - /* Wait for write to complete. */ - if (ql_wait_for_mii_ready(qdev)) { - netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); - return -1; - } - - ql_mii_enable_scan_mode(qdev); - - return 0; -} - -static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value) -{ - u32 temp; - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - - ql_mii_disable_scan_mode(qdev); - - if (ql_wait_for_mii_ready(qdev)) { - netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); - return -1; - } - - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, - qdev->PHYAddr | regAddr); - - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, - (MAC_MII_CONTROL_RC << 16)); - - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, - (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); - - /* Wait for the read to complete */ - if (ql_wait_for_mii_ready(qdev)) { - netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); - return -1; - } - - temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); - *value = (u16) temp; - - ql_mii_enable_scan_mode(qdev); - - return 0; -} - -static void ql_petbi_reset(struct ql3_adapter *qdev) -{ - ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET); -} - -static void ql_petbi_start_neg(struct ql3_adapter *qdev) -{ - u16 reg; - - /* Enable Auto-negotiation sense */ - ql_mii_read_reg(qdev, PETBI_TBI_CTRL, ®); - reg |= PETBI_TBI_AUTO_SENSE; - ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg); - - ql_mii_write_reg(qdev, PETBI_NEG_ADVER, - PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX); - - ql_mii_write_reg(qdev, PETBI_CONTROL_REG, - PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | - PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000); - -} - -static void ql_petbi_reset_ex(struct ql3_adapter *qdev) -{ - ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET, - PHYAddr[qdev->mac_index]); -} - -static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev) -{ - u16 reg; - - /* Enable Auto-negotiation sense */ - ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, ®, - PHYAddr[qdev->mac_index]); - reg |= PETBI_TBI_AUTO_SENSE; - ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, - PHYAddr[qdev->mac_index]); - - ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER, - PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, - PHYAddr[qdev->mac_index]); - - ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, - PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | - PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000, - PHYAddr[qdev->mac_index]); -} - -static void ql_petbi_init(struct ql3_adapter *qdev) -{ - ql_petbi_reset(qdev); - ql_petbi_start_neg(qdev); -} - -static void ql_petbi_init_ex(struct ql3_adapter *qdev) -{ - ql_petbi_reset_ex(qdev); - ql_petbi_start_neg_ex(qdev); -} - -static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev) -{ - u16 reg; - - if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, ®) < 0) - return 0; - - return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE; -} - -static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) -{ - netdev_info(qdev->ndev, "enabling Agere specific PHY\n"); - /* power down device bit 11 = 1 */ - ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr); - /* enable diagnostic mode bit 2 = 1 */ - ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr); - /* 1000MB amplitude adjust (see Agere errata) */ - ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr); - /* 1000MB amplitude adjust (see Agere errata) */ - ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr); - /* 100MB amplitude adjust (see Agere errata) */ - ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr); - /* 100MB amplitude adjust (see Agere errata) */ - ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr); - /* 10MB amplitude adjust (see Agere errata) */ - ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr); - /* 10MB amplitude adjust (see Agere errata) */ - ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr); - /* point to hidden reg 0x2806 */ - ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); - /* Write new PHYAD w/bit 5 set */ - ql_mii_write_reg_ex(qdev, 0x11, - 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); - /* - * Disable diagnostic mode bit 2 = 0 - * Power up device bit 11 = 0 - * Link up (on) and activity (blink) - */ - ql_mii_write_reg(qdev, 0x12, 0x840a); - ql_mii_write_reg(qdev, 0x00, 0x1140); - ql_mii_write_reg(qdev, 0x1c, 0xfaf0); -} - -static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev, - u16 phyIdReg0, u16 phyIdReg1) -{ - enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN; - u32 oui; - u16 model; - int i; - - if (phyIdReg0 == 0xffff) - return result; - - if (phyIdReg1 == 0xffff) - return result; - - /* oui is split between two registers */ - oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10); - - model = (phyIdReg1 & PHY_MODEL_MASK) >> 4; - - /* Scan table for this PHY */ - for (i = 0; i < MAX_PHY_DEV_TYPES; i++) { - if ((oui == PHY_DEVICES[i].phyIdOUI) && - (model == PHY_DEVICES[i].phyIdModel)) { - netdev_info(qdev->ndev, "Phy: %s\n", - PHY_DEVICES[i].name); - result = PHY_DEVICES[i].phyDevice; - break; - } - } - - return result; -} - -static int ql_phy_get_speed(struct ql3_adapter *qdev) -{ - u16 reg; - - switch (qdev->phyType) { - case PHY_AGERE_ET1011C: { - if (ql_mii_read_reg(qdev, 0x1A, ®) < 0) - return 0; - - reg = (reg >> 8) & 3; - break; - } - default: - if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) - return 0; - - reg = (((reg & 0x18) >> 3) & 3); - } - - switch (reg) { - case 2: - return SPEED_1000; - case 1: - return SPEED_100; - case 0: - return SPEED_10; - default: - return -1; - } -} - -static int ql_is_full_dup(struct ql3_adapter *qdev) -{ - u16 reg; - - switch (qdev->phyType) { - case PHY_AGERE_ET1011C: { - if (ql_mii_read_reg(qdev, 0x1A, ®)) - return 0; - - return ((reg & 0x0080) && (reg & 0x1000)) != 0; - } - case PHY_VITESSE_VSC8211: - default: { - if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) - return 0; - return (reg & PHY_AUX_DUPLEX_STAT) != 0; - } - } -} - -static int ql_is_phy_neg_pause(struct ql3_adapter *qdev) -{ - u16 reg; - - if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, ®) < 0) - return 0; - - return (reg & PHY_NEG_PAUSE) != 0; -} - -static int PHY_Setup(struct ql3_adapter *qdev) -{ - u16 reg1; - u16 reg2; - bool agereAddrChangeNeeded = false; - u32 miiAddr = 0; - int err; - - /* Determine the PHY we are using by reading the ID's */ - err = ql_mii_read_reg(qdev, PHY_ID_0_REG, ®1); - if (err != 0) { - netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n"); - return err; - } - - err = ql_mii_read_reg(qdev, PHY_ID_1_REG, ®2); - if (err != 0) { - netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n"); - return err; - } - - /* Check if we have a Agere PHY */ - if ((reg1 == 0xffff) || (reg2 == 0xffff)) { - - /* Determine which MII address we should be using - determined by the index of the card */ - if (qdev->mac_index == 0) - miiAddr = MII_AGERE_ADDR_1; - else - miiAddr = MII_AGERE_ADDR_2; - - err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, ®1, miiAddr); - if (err != 0) { - netdev_err(qdev->ndev, - "Could not read from reg PHY_ID_0_REG after Agere detected\n"); - return err; - } - - err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, ®2, miiAddr); - if (err != 0) { - netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n"); - return err; - } - - /* We need to remember to initialize the Agere PHY */ - agereAddrChangeNeeded = true; - } - - /* Determine the particular PHY we have on board to apply - PHY specific initializations */ - qdev->phyType = getPhyType(qdev, reg1, reg2); - - if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) { - /* need this here so address gets changed */ - phyAgereSpecificInit(qdev, miiAddr); - } else if (qdev->phyType == PHY_TYPE_UNKNOWN) { - netdev_err(qdev->ndev, "PHY is unknown\n"); - return -EIO; - } - - return 0; -} - -/* - * Caller holds hw_lock. - */ -static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 value; - - if (enable) - value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16)); - else - value = (MAC_CONFIG_REG_PE << 16); - - if (qdev->mac_index) - ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); - else - ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); -} - -/* - * Caller holds hw_lock. - */ -static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 value; - - if (enable) - value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16)); - else - value = (MAC_CONFIG_REG_SR << 16); - - if (qdev->mac_index) - ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); - else - ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); -} - -/* - * Caller holds hw_lock. - */ -static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 value; - - if (enable) - value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16)); - else - value = (MAC_CONFIG_REG_GM << 16); - - if (qdev->mac_index) - ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); - else - ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); -} - -/* - * Caller holds hw_lock. - */ -static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 value; - - if (enable) - value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16)); - else - value = (MAC_CONFIG_REG_FD << 16); - - if (qdev->mac_index) - ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); - else - ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); -} - -/* - * Caller holds hw_lock. - */ -static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 value; - - if (enable) - value = - ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) | - ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16)); - else - value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16); - - if (qdev->mac_index) - ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); - else - ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); -} - -/* - * Caller holds hw_lock. - */ -static int ql_is_fiber(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 bitToCheck = 0; - u32 temp; - - switch (qdev->mac_index) { - case 0: - bitToCheck = PORT_STATUS_SM0; - break; - case 1: - bitToCheck = PORT_STATUS_SM1; - break; - } - - temp = ql_read_page0_reg(qdev, &port_regs->portStatus); - return (temp & bitToCheck) != 0; -} - -static int ql_is_auto_cfg(struct ql3_adapter *qdev) -{ - u16 reg; - ql_mii_read_reg(qdev, 0x00, ®); - return (reg & 0x1000) != 0; -} - -/* - * Caller holds hw_lock. - */ -static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 bitToCheck = 0; - u32 temp; - - switch (qdev->mac_index) { - case 0: - bitToCheck = PORT_STATUS_AC0; - break; - case 1: - bitToCheck = PORT_STATUS_AC1; - break; - } - - temp = ql_read_page0_reg(qdev, &port_regs->portStatus); - if (temp & bitToCheck) { - netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n"); - return 1; - } - netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n"); - return 0; -} - -/* - * ql_is_neg_pause() returns 1 if pause was negotiated to be on - */ -static int ql_is_neg_pause(struct ql3_adapter *qdev) -{ - if (ql_is_fiber(qdev)) - return ql_is_petbi_neg_pause(qdev); - else - return ql_is_phy_neg_pause(qdev); -} - -static int ql_auto_neg_error(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 bitToCheck = 0; - u32 temp; - - switch (qdev->mac_index) { - case 0: - bitToCheck = PORT_STATUS_AE0; - break; - case 1: - bitToCheck = PORT_STATUS_AE1; - break; - } - temp = ql_read_page0_reg(qdev, &port_regs->portStatus); - return (temp & bitToCheck) != 0; -} - -static u32 ql_get_link_speed(struct ql3_adapter *qdev) -{ - if (ql_is_fiber(qdev)) - return SPEED_1000; - else - return ql_phy_get_speed(qdev); -} - -static int ql_is_link_full_dup(struct ql3_adapter *qdev) -{ - if (ql_is_fiber(qdev)) - return 1; - else - return ql_is_full_dup(qdev); -} - -/* - * Caller holds hw_lock. - */ -static int ql_link_down_detect(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 bitToCheck = 0; - u32 temp; - - switch (qdev->mac_index) { - case 0: - bitToCheck = ISP_CONTROL_LINK_DN_0; - break; - case 1: - bitToCheck = ISP_CONTROL_LINK_DN_1; - break; - } - - temp = - ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); - return (temp & bitToCheck) != 0; -} - -/* - * Caller holds hw_lock. - */ -static int ql_link_down_detect_clear(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - - switch (qdev->mac_index) { - case 0: - ql_write_common_reg(qdev, - &port_regs->CommonRegs.ispControlStatus, - (ISP_CONTROL_LINK_DN_0) | - (ISP_CONTROL_LINK_DN_0 << 16)); - break; - - case 1: - ql_write_common_reg(qdev, - &port_regs->CommonRegs.ispControlStatus, - (ISP_CONTROL_LINK_DN_1) | - (ISP_CONTROL_LINK_DN_1 << 16)); - break; - - default: - return 1; - } - - return 0; -} - -/* - * Caller holds hw_lock. - */ -static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 bitToCheck = 0; - u32 temp; - - switch (qdev->mac_index) { - case 0: - bitToCheck = PORT_STATUS_F1_ENABLED; - break; - case 1: - bitToCheck = PORT_STATUS_F3_ENABLED; - break; - default: - break; - } - - temp = ql_read_page0_reg(qdev, &port_regs->portStatus); - if (temp & bitToCheck) { - netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, - "not link master\n"); - return 0; - } - - netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n"); - return 1; -} - -static void ql_phy_reset_ex(struct ql3_adapter *qdev) -{ - ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, - PHYAddr[qdev->mac_index]); -} - -static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) -{ - u16 reg; - u16 portConfiguration; - - if (qdev->phyType == PHY_AGERE_ET1011C) - ql_mii_write_reg(qdev, 0x13, 0x0000); - /* turn off external loopback */ - - if (qdev->mac_index == 0) - portConfiguration = - qdev->nvram_data.macCfg_port0.portConfiguration; - else - portConfiguration = - qdev->nvram_data.macCfg_port1.portConfiguration; - - /* Some HBA's in the field are set to 0 and they need to - be reinterpreted with a default value */ - if (portConfiguration == 0) - portConfiguration = PORT_CONFIG_DEFAULT; - - /* Set the 1000 advertisements */ - ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, ®, - PHYAddr[qdev->mac_index]); - reg &= ~PHY_GIG_ALL_PARAMS; - - if (portConfiguration & PORT_CONFIG_1000MB_SPEED) { - if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) - reg |= PHY_GIG_ADV_1000F; - else - reg |= PHY_GIG_ADV_1000H; - } - - ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg, - PHYAddr[qdev->mac_index]); - - /* Set the 10/100 & pause negotiation advertisements */ - ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, ®, - PHYAddr[qdev->mac_index]); - reg &= ~PHY_NEG_ALL_PARAMS; - - if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED) - reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE; - - if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { - if (portConfiguration & PORT_CONFIG_100MB_SPEED) - reg |= PHY_NEG_ADV_100F; - - if (portConfiguration & PORT_CONFIG_10MB_SPEED) - reg |= PHY_NEG_ADV_10F; - } - - if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { - if (portConfiguration & PORT_CONFIG_100MB_SPEED) - reg |= PHY_NEG_ADV_100H; - - if (portConfiguration & PORT_CONFIG_10MB_SPEED) - reg |= PHY_NEG_ADV_10H; - } - - if (portConfiguration & PORT_CONFIG_1000MB_SPEED) - reg |= 1; - - ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, - PHYAddr[qdev->mac_index]); - - ql_mii_read_reg_ex(qdev, CONTROL_REG, ®, PHYAddr[qdev->mac_index]); - - ql_mii_write_reg_ex(qdev, CONTROL_REG, - reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG, - PHYAddr[qdev->mac_index]); -} - -static void ql_phy_init_ex(struct ql3_adapter *qdev) -{ - ql_phy_reset_ex(qdev); - PHY_Setup(qdev); - ql_phy_start_neg_ex(qdev); -} - -/* - * Caller holds hw_lock. - */ -static u32 ql_get_link_state(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 bitToCheck = 0; - u32 temp, linkState; - - switch (qdev->mac_index) { - case 0: - bitToCheck = PORT_STATUS_UP0; - break; - case 1: - bitToCheck = PORT_STATUS_UP1; - break; - } - - temp = ql_read_page0_reg(qdev, &port_regs->portStatus); - if (temp & bitToCheck) - linkState = LS_UP; - else - linkState = LS_DOWN; - - return linkState; -} - -static int ql_port_start(struct ql3_adapter *qdev) -{ - if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * - 2) << 7)) { - netdev_err(qdev->ndev, "Could not get hw lock for GIO\n"); - return -1; - } - - if (ql_is_fiber(qdev)) { - ql_petbi_init(qdev); - } else { - /* Copper port */ - ql_phy_init_ex(qdev); - } - - ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); - return 0; -} - -static int ql_finish_auto_neg(struct ql3_adapter *qdev) -{ - - if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * - 2) << 7)) - return -1; - - if (!ql_auto_neg_error(qdev)) { - if (test_bit(QL_LINK_MASTER, &qdev->flags)) { - /* configure the MAC */ - netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, - "Configuring link\n"); - ql_mac_cfg_soft_reset(qdev, 1); - ql_mac_cfg_gig(qdev, - (ql_get_link_speed - (qdev) == - SPEED_1000)); - ql_mac_cfg_full_dup(qdev, - ql_is_link_full_dup - (qdev)); - ql_mac_cfg_pause(qdev, - ql_is_neg_pause - (qdev)); - ql_mac_cfg_soft_reset(qdev, 0); - - /* enable the MAC */ - netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, - "Enabling mac\n"); - ql_mac_enable(qdev, 1); - } - - qdev->port_link_state = LS_UP; - netif_start_queue(qdev->ndev); - netif_carrier_on(qdev->ndev); - netif_info(qdev, link, qdev->ndev, - "Link is up at %d Mbps, %s duplex\n", - ql_get_link_speed(qdev), - ql_is_link_full_dup(qdev) ? "full" : "half"); - - } else { /* Remote error detected */ - - if (test_bit(QL_LINK_MASTER, &qdev->flags)) { - netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, - "Remote error detected. Calling ql_port_start()\n"); - /* - * ql_port_start() is shared code and needs - * to lock the PHY on it's own. - */ - ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); - if (ql_port_start(qdev)) /* Restart port */ - return -1; - return 0; - } - } - ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); - return 0; -} - -static void ql_link_state_machine_work(struct work_struct *work) -{ - struct ql3_adapter *qdev = - container_of(work, struct ql3_adapter, link_state_work.work); - - u32 curr_link_state; - unsigned long hw_flags; - - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - - curr_link_state = ql_get_link_state(qdev); - - if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) { - netif_info(qdev, link, qdev->ndev, - "Reset in progress, skip processing link state\n"); - - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - - /* Restart timer on 2 second interval. */ - mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); - - return; - } - - switch (qdev->port_link_state) { - default: - if (test_bit(QL_LINK_MASTER, &qdev->flags)) - ql_port_start(qdev); - qdev->port_link_state = LS_DOWN; - /* Fall Through */ - - case LS_DOWN: - if (curr_link_state == LS_UP) { - netif_info(qdev, link, qdev->ndev, "Link is up\n"); - if (ql_is_auto_neg_complete(qdev)) - ql_finish_auto_neg(qdev); - - if (qdev->port_link_state == LS_UP) - ql_link_down_detect_clear(qdev); - - qdev->port_link_state = LS_UP; - } - break; - - case LS_UP: - /* - * See if the link is currently down or went down and came - * back up - */ - if (curr_link_state == LS_DOWN) { - netif_info(qdev, link, qdev->ndev, "Link is down\n"); - qdev->port_link_state = LS_DOWN; - } - if (ql_link_down_detect(qdev)) - qdev->port_link_state = LS_DOWN; - break; - } - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - - /* Restart timer on 2 second interval. */ - mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); -} - -/* - * Caller must take hw_lock and QL_PHY_GIO_SEM. - */ -static void ql_get_phy_owner(struct ql3_adapter *qdev) -{ - if (ql_this_adapter_controls_port(qdev)) - set_bit(QL_LINK_MASTER, &qdev->flags); - else - clear_bit(QL_LINK_MASTER, &qdev->flags); -} - -/* - * Caller must take hw_lock and QL_PHY_GIO_SEM. - */ -static void ql_init_scan_mode(struct ql3_adapter *qdev) -{ - ql_mii_enable_scan_mode(qdev); - - if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { - if (ql_this_adapter_controls_port(qdev)) - ql_petbi_init_ex(qdev); - } else { - if (ql_this_adapter_controls_port(qdev)) - ql_phy_init_ex(qdev); - } -} - -/* - * MII_Setup needs to be called before taking the PHY out of reset - * so that the management interface clock speed can be set properly. - * It would be better if we had a way to disable MDC until after the - * PHY is out of reset, but we don't have that capability. - */ -static int ql_mii_setup(struct ql3_adapter *qdev) -{ - u32 reg; - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - - if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * - 2) << 7)) - return -1; - - if (qdev->device_id == QL3032_DEVICE_ID) - ql_write_page0_reg(qdev, - &port_regs->macMIIMgmtControlReg, 0x0f00000); - - /* Divide 125MHz clock by 28 to meet PHY timing requirements */ - reg = MAC_MII_CONTROL_CLK_SEL_DIV28; - - ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, - reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16)); - - ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); - return 0; -} - -#define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full | \ - SUPPORTED_FIBRE | \ - SUPPORTED_Autoneg) -#define SUPPORTED_TP_MODES (SUPPORTED_10baseT_Half | \ - SUPPORTED_10baseT_Full | \ - SUPPORTED_100baseT_Half | \ - SUPPORTED_100baseT_Full | \ - SUPPORTED_1000baseT_Half | \ - SUPPORTED_1000baseT_Full | \ - SUPPORTED_Autoneg | \ - SUPPORTED_TP) \ - -static u32 ql_supported_modes(struct ql3_adapter *qdev) -{ - if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) - return SUPPORTED_OPTICAL_MODES; - - return SUPPORTED_TP_MODES; -} - -static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) -{ - int status; - unsigned long hw_flags; - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | - (qdev->mac_index) * 2) << 7)) { - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - return 0; - } - status = ql_is_auto_cfg(qdev); - ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - return status; -} - -static u32 ql_get_speed(struct ql3_adapter *qdev) -{ - u32 status; - unsigned long hw_flags; - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | - (qdev->mac_index) * 2) << 7)) { - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - return 0; - } - status = ql_get_link_speed(qdev); - ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - return status; -} - -static int ql_get_full_dup(struct ql3_adapter *qdev) -{ - int status; - unsigned long hw_flags; - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | - (qdev->mac_index) * 2) << 7)) { - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - return 0; - } - status = ql_is_link_full_dup(qdev); - ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - return status; -} - -static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) -{ - struct ql3_adapter *qdev = netdev_priv(ndev); - - ecmd->transceiver = XCVR_INTERNAL; - ecmd->supported = ql_supported_modes(qdev); - - if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { - ecmd->port = PORT_FIBRE; - } else { - ecmd->port = PORT_TP; - ecmd->phy_address = qdev->PHYAddr; - } - ecmd->advertising = ql_supported_modes(qdev); - ecmd->autoneg = ql_get_auto_cfg_status(qdev); - ethtool_cmd_speed_set(ecmd, ql_get_speed(qdev)); - ecmd->duplex = ql_get_full_dup(qdev); - return 0; -} - -static void ql_get_drvinfo(struct net_device *ndev, - struct ethtool_drvinfo *drvinfo) -{ - struct ql3_adapter *qdev = netdev_priv(ndev); - strncpy(drvinfo->driver, ql3xxx_driver_name, 32); - strncpy(drvinfo->version, ql3xxx_driver_version, 32); - strncpy(drvinfo->fw_version, "N/A", 32); - strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32); - drvinfo->regdump_len = 0; - drvinfo->eedump_len = 0; -} - -static u32 ql_get_msglevel(struct net_device *ndev) -{ - struct ql3_adapter *qdev = netdev_priv(ndev); - return qdev->msg_enable; -} - -static void ql_set_msglevel(struct net_device *ndev, u32 value) -{ - struct ql3_adapter *qdev = netdev_priv(ndev); - qdev->msg_enable = value; -} - -static void ql_get_pauseparam(struct net_device *ndev, - struct ethtool_pauseparam *pause) -{ - struct ql3_adapter *qdev = netdev_priv(ndev); - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - - u32 reg; - if (qdev->mac_index == 0) - reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg); - else - reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg); - - pause->autoneg = ql_get_auto_cfg_status(qdev); - pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2; - pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1; -} - -static const struct ethtool_ops ql3xxx_ethtool_ops = { - .get_settings = ql_get_settings, - .get_drvinfo = ql_get_drvinfo, - .get_link = ethtool_op_get_link, - .get_msglevel = ql_get_msglevel, - .set_msglevel = ql_set_msglevel, - .get_pauseparam = ql_get_pauseparam, -}; - -static int ql_populate_free_queue(struct ql3_adapter *qdev) -{ - struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; - dma_addr_t map; - int err; - - while (lrg_buf_cb) { - if (!lrg_buf_cb->skb) { - lrg_buf_cb->skb = - netdev_alloc_skb(qdev->ndev, - qdev->lrg_buffer_len); - if (unlikely(!lrg_buf_cb->skb)) { - netdev_printk(KERN_DEBUG, qdev->ndev, - "Failed netdev_alloc_skb()\n"); - break; - } else { - /* - * We save some space to copy the ethhdr from - * first buffer - */ - skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); - map = pci_map_single(qdev->pdev, - lrg_buf_cb->skb->data, - qdev->lrg_buffer_len - - QL_HEADER_SPACE, - PCI_DMA_FROMDEVICE); - - err = pci_dma_mapping_error(qdev->pdev, map); - if (err) { - netdev_err(qdev->ndev, - "PCI mapping failed with error: %d\n", - err); - dev_kfree_skb(lrg_buf_cb->skb); - lrg_buf_cb->skb = NULL; - break; - } - - - lrg_buf_cb->buf_phy_addr_low = - cpu_to_le32(LS_64BITS(map)); - lrg_buf_cb->buf_phy_addr_high = - cpu_to_le32(MS_64BITS(map)); - dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); - dma_unmap_len_set(lrg_buf_cb, maplen, - qdev->lrg_buffer_len - - QL_HEADER_SPACE); - --qdev->lrg_buf_skb_check; - if (!qdev->lrg_buf_skb_check) - return 1; - } - } - lrg_buf_cb = lrg_buf_cb->next; - } - return 0; -} - -/* - * Caller holds hw_lock. - */ -static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - - if (qdev->small_buf_release_cnt >= 16) { - while (qdev->small_buf_release_cnt >= 16) { - qdev->small_buf_q_producer_index++; - - if (qdev->small_buf_q_producer_index == - NUM_SBUFQ_ENTRIES) - qdev->small_buf_q_producer_index = 0; - qdev->small_buf_release_cnt -= 8; - } - wmb(); - writel(qdev->small_buf_q_producer_index, - &port_regs->CommonRegs.rxSmallQProducerIndex); - } -} - -/* - * Caller holds hw_lock. - */ -static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev) -{ - struct bufq_addr_element *lrg_buf_q_ele; - int i; - struct ql_rcv_buf_cb *lrg_buf_cb; - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - - if ((qdev->lrg_buf_free_count >= 8) && - (qdev->lrg_buf_release_cnt >= 16)) { - - if (qdev->lrg_buf_skb_check) - if (!ql_populate_free_queue(qdev)) - return; - - lrg_buf_q_ele = qdev->lrg_buf_next_free; - - while ((qdev->lrg_buf_release_cnt >= 16) && - (qdev->lrg_buf_free_count >= 8)) { - - for (i = 0; i < 8; i++) { - lrg_buf_cb = - ql_get_from_lrg_buf_free_list(qdev); - lrg_buf_q_ele->addr_high = - lrg_buf_cb->buf_phy_addr_high; - lrg_buf_q_ele->addr_low = - lrg_buf_cb->buf_phy_addr_low; - lrg_buf_q_ele++; - - qdev->lrg_buf_release_cnt--; - } - - qdev->lrg_buf_q_producer_index++; - - if (qdev->lrg_buf_q_producer_index == - qdev->num_lbufq_entries) - qdev->lrg_buf_q_producer_index = 0; - - if (qdev->lrg_buf_q_producer_index == - (qdev->num_lbufq_entries - 1)) { - lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr; - } - } - wmb(); - qdev->lrg_buf_next_free = lrg_buf_q_ele; - writel(qdev->lrg_buf_q_producer_index, - &port_regs->CommonRegs.rxLargeQProducerIndex); - } -} - -static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, - struct ob_mac_iocb_rsp *mac_rsp) -{ - struct ql_tx_buf_cb *tx_cb; - int i; - int retval = 0; - - if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { - netdev_warn(qdev->ndev, - "Frame too short but it was padded and sent\n"); - } - - tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; - - /* Check the transmit response flags for any errors */ - if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { - netdev_err(qdev->ndev, - "Frame too short to be legal, frame not sent\n"); - - qdev->ndev->stats.tx_errors++; - retval = -EIO; - goto frame_not_sent; - } - - if (tx_cb->seg_count == 0) { - netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n", - mac_rsp->transaction_id); - - qdev->ndev->stats.tx_errors++; - retval = -EIO; - goto invalid_seg_count; - } - - pci_unmap_single(qdev->pdev, - dma_unmap_addr(&tx_cb->map[0], mapaddr), - dma_unmap_len(&tx_cb->map[0], maplen), - PCI_DMA_TODEVICE); - tx_cb->seg_count--; - if (tx_cb->seg_count) { - for (i = 1; i < tx_cb->seg_count; i++) { - pci_unmap_page(qdev->pdev, - dma_unmap_addr(&tx_cb->map[i], - mapaddr), - dma_unmap_len(&tx_cb->map[i], maplen), - PCI_DMA_TODEVICE); - } - } - qdev->ndev->stats.tx_packets++; - qdev->ndev->stats.tx_bytes += tx_cb->skb->len; - -frame_not_sent: - dev_kfree_skb_irq(tx_cb->skb); - tx_cb->skb = NULL; - -invalid_seg_count: - atomic_inc(&qdev->tx_count); -} - -static void ql_get_sbuf(struct ql3_adapter *qdev) -{ - if (++qdev->small_buf_index == NUM_SMALL_BUFFERS) - qdev->small_buf_index = 0; - qdev->small_buf_release_cnt++; -} - -static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev) -{ - struct ql_rcv_buf_cb *lrg_buf_cb = NULL; - lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index]; - qdev->lrg_buf_release_cnt++; - if (++qdev->lrg_buf_index == qdev->num_large_buffers) - qdev->lrg_buf_index = 0; - return lrg_buf_cb; -} - -/* - * The difference between 3022 and 3032 for inbound completions: - * 3022 uses two buffers per completion. The first buffer contains - * (some) header info, the second the remainder of the headers plus - * the data. For this chip we reserve some space at the top of the - * receive buffer so that the header info in buffer one can be - * prepended to the buffer two. Buffer two is the sent up while - * buffer one is returned to the hardware to be reused. - * 3032 receives all of it's data and headers in one buffer for a - * simpler process. 3032 also supports checksum verification as - * can be seen in ql_process_macip_rx_intr(). - */ -static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, - struct ib_mac_iocb_rsp *ib_mac_rsp_ptr) -{ - struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; - struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; - struct sk_buff *skb; - u16 length = le16_to_cpu(ib_mac_rsp_ptr->length); - - /* - * Get the inbound address list (small buffer). - */ - ql_get_sbuf(qdev); - - if (qdev->device_id == QL3022_DEVICE_ID) - lrg_buf_cb1 = ql_get_lbuf(qdev); - - /* start of second buffer */ - lrg_buf_cb2 = ql_get_lbuf(qdev); - skb = lrg_buf_cb2->skb; - - qdev->ndev->stats.rx_packets++; - qdev->ndev->stats.rx_bytes += length; - - skb_put(skb, length); - pci_unmap_single(qdev->pdev, - dma_unmap_addr(lrg_buf_cb2, mapaddr), - dma_unmap_len(lrg_buf_cb2, maplen), - PCI_DMA_FROMDEVICE); - prefetch(skb->data); - skb_checksum_none_assert(skb); - skb->protocol = eth_type_trans(skb, qdev->ndev); - - netif_receive_skb(skb); - lrg_buf_cb2->skb = NULL; - - if (qdev->device_id == QL3022_DEVICE_ID) - ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); - ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); -} - -static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, - struct ib_ip_iocb_rsp *ib_ip_rsp_ptr) -{ - struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; - struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; - struct sk_buff *skb1 = NULL, *skb2; - struct net_device *ndev = qdev->ndev; - u16 length = le16_to_cpu(ib_ip_rsp_ptr->length); - u16 size = 0; - - /* - * Get the inbound address list (small buffer). - */ - - ql_get_sbuf(qdev); - - if (qdev->device_id == QL3022_DEVICE_ID) { - /* start of first buffer on 3022 */ - lrg_buf_cb1 = ql_get_lbuf(qdev); - skb1 = lrg_buf_cb1->skb; - size = ETH_HLEN; - if (*((u16 *) skb1->data) != 0xFFFF) - size += VLAN_ETH_HLEN - ETH_HLEN; - } - - /* start of second buffer */ - lrg_buf_cb2 = ql_get_lbuf(qdev); - skb2 = lrg_buf_cb2->skb; - - skb_put(skb2, length); /* Just the second buffer length here. */ - pci_unmap_single(qdev->pdev, - dma_unmap_addr(lrg_buf_cb2, mapaddr), - dma_unmap_len(lrg_buf_cb2, maplen), - PCI_DMA_FROMDEVICE); - prefetch(skb2->data); - - skb_checksum_none_assert(skb2); - if (qdev->device_id == QL3022_DEVICE_ID) { - /* - * Copy the ethhdr from first buffer to second. This - * is necessary for 3022 IP completions. - */ - skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN, - skb_push(skb2, size), size); - } else { - u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum); - if (checksum & - (IB_IP_IOCB_RSP_3032_ICE | - IB_IP_IOCB_RSP_3032_CE)) { - netdev_err(ndev, - "%s: Bad checksum for this %s packet, checksum = %x\n", - __func__, - ((checksum & IB_IP_IOCB_RSP_3032_TCP) ? - "TCP" : "UDP"), checksum); - } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) || - (checksum & IB_IP_IOCB_RSP_3032_UDP && - !(checksum & IB_IP_IOCB_RSP_3032_NUC))) { - skb2->ip_summed = CHECKSUM_UNNECESSARY; - } - } - skb2->protocol = eth_type_trans(skb2, qdev->ndev); - - netif_receive_skb(skb2); - ndev->stats.rx_packets++; - ndev->stats.rx_bytes += length; - lrg_buf_cb2->skb = NULL; - - if (qdev->device_id == QL3022_DEVICE_ID) - ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); - ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); -} - -static int ql_tx_rx_clean(struct ql3_adapter *qdev, - int *tx_cleaned, int *rx_cleaned, int work_to_do) -{ - struct net_rsp_iocb *net_rsp; - struct net_device *ndev = qdev->ndev; - int work_done = 0; - - /* While there are entries in the completion queue. */ - while ((le32_to_cpu(*(qdev->prsp_producer_index)) != - qdev->rsp_consumer_index) && (work_done < work_to_do)) { - - net_rsp = qdev->rsp_current; - rmb(); - /* - * Fix 4032 chip's undocumented "feature" where bit-8 is set - * if the inbound completion is for a VLAN. - */ - if (qdev->device_id == QL3032_DEVICE_ID) - net_rsp->opcode &= 0x7f; - switch (net_rsp->opcode) { - - case OPCODE_OB_MAC_IOCB_FN0: - case OPCODE_OB_MAC_IOCB_FN2: - ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *) - net_rsp); - (*tx_cleaned)++; - break; - - case OPCODE_IB_MAC_IOCB: - case OPCODE_IB_3032_MAC_IOCB: - ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) - net_rsp); - (*rx_cleaned)++; - break; - - case OPCODE_IB_IP_IOCB: - case OPCODE_IB_3032_IP_IOCB: - ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) - net_rsp); - (*rx_cleaned)++; - break; - default: { - u32 *tmp = (u32 *)net_rsp; - netdev_err(ndev, - "Hit default case, not handled!\n" - " dropping the packet, opcode = %x\n" - "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", - net_rsp->opcode, - (unsigned long int)tmp[0], - (unsigned long int)tmp[1], - (unsigned long int)tmp[2], - (unsigned long int)tmp[3]); - } - } - - qdev->rsp_consumer_index++; - - if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) { - qdev->rsp_consumer_index = 0; - qdev->rsp_current = qdev->rsp_q_virt_addr; - } else { - qdev->rsp_current++; - } - - work_done = *tx_cleaned + *rx_cleaned; - } - - return work_done; -} - -static int ql_poll(struct napi_struct *napi, int budget) -{ - struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); - int rx_cleaned = 0, tx_cleaned = 0; - unsigned long hw_flags; - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - - ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); - - if (tx_cleaned + rx_cleaned != budget) { - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - __napi_complete(napi); - ql_update_small_bufq_prod_index(qdev); - ql_update_lrg_bufq_prod_index(qdev); - writel(qdev->rsp_consumer_index, - &port_regs->CommonRegs.rspQConsumerIndex); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - - ql_enable_interrupts(qdev); - } - return tx_cleaned + rx_cleaned; -} - -static irqreturn_t ql3xxx_isr(int irq, void *dev_id) -{ - - struct net_device *ndev = dev_id; - struct ql3_adapter *qdev = netdev_priv(ndev); - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 value; - int handled = 1; - u32 var; - - value = ql_read_common_reg_l(qdev, - &port_regs->CommonRegs.ispControlStatus); - - if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) { - spin_lock(&qdev->adapter_lock); - netif_stop_queue(qdev->ndev); - netif_carrier_off(qdev->ndev); - ql_disable_interrupts(qdev); - qdev->port_link_state = LS_DOWN; - set_bit(QL_RESET_ACTIVE, &qdev->flags) ; - - if (value & ISP_CONTROL_FE) { - /* - * Chip Fatal Error. - */ - var = - ql_read_page0_reg_l(qdev, - &port_regs->PortFatalErrStatus); - netdev_warn(ndev, - "Resetting chip. PortFatalErrStatus register = 0x%x\n", - var); - set_bit(QL_RESET_START, &qdev->flags) ; - } else { - /* - * Soft Reset Requested. - */ - set_bit(QL_RESET_PER_SCSI, &qdev->flags) ; - netdev_err(ndev, - "Another function issued a reset to the chip. ISR value = %x\n", - value); - } - queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); - spin_unlock(&qdev->adapter_lock); - } else if (value & ISP_IMR_DISABLE_CMPL_INT) { - ql_disable_interrupts(qdev); - if (likely(napi_schedule_prep(&qdev->napi))) - __napi_schedule(&qdev->napi); - } else - return IRQ_NONE; - - return IRQ_RETVAL(handled); -} - -/* - * Get the total number of segments needed for the given number of fragments. - * This is necessary because outbound address lists (OAL) will be used when - * more than two frags are given. Each address list has 5 addr/len pairs. - * The 5th pair in each OAL is used to point to the next OAL if more frags - * are coming. That is why the frags:segment count ratio is not linear. - */ -static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags) -{ - if (qdev->device_id == QL3022_DEVICE_ID) - return 1; - - if (frags <= 2) - return frags + 1; - else if (frags <= 6) - return frags + 2; - else if (frags <= 10) - return frags + 3; - else if (frags <= 14) - return frags + 4; - else if (frags <= 18) - return frags + 5; - return -1; -} - -static void ql_hw_csum_setup(const struct sk_buff *skb, - struct ob_mac_iocb_req *mac_iocb_ptr) -{ - const struct iphdr *ip = ip_hdr(skb); - - mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb); - mac_iocb_ptr->ip_hdr_len = ip->ihl; - - if (ip->protocol == IPPROTO_TCP) { - mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC | - OB_3032MAC_IOCB_REQ_IC; - } else { - mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC | - OB_3032MAC_IOCB_REQ_IC; - } - -} - -/* - * Map the buffers for this transmit. - * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success. - */ -static int ql_send_map(struct ql3_adapter *qdev, - struct ob_mac_iocb_req *mac_iocb_ptr, - struct ql_tx_buf_cb *tx_cb, - struct sk_buff *skb) -{ - struct oal *oal; - struct oal_entry *oal_entry; - int len = skb_headlen(skb); - dma_addr_t map; - int err; - int completed_segs, i; - int seg_cnt, seg = 0; - int frag_cnt = (int)skb_shinfo(skb)->nr_frags; - - seg_cnt = tx_cb->seg_count; - /* - * Map the skb buffer first. - */ - map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); - - err = pci_dma_mapping_error(qdev->pdev, map); - if (err) { - netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n", - err); - - return NETDEV_TX_BUSY; - } - - oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; - oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); - oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); - oal_entry->len = cpu_to_le32(len); - dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); - dma_unmap_len_set(&tx_cb->map[seg], maplen, len); - seg++; - - if (seg_cnt == 1) { - /* Terminate the last segment. */ - oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); - return NETDEV_TX_OK; - } - oal = tx_cb->oal; - for (completed_segs = 0; - completed_segs < frag_cnt; - completed_segs++, seg++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs]; - oal_entry++; - /* - * Check for continuation requirements. - * It's strange but necessary. - * Continuation entry points to outbound address list. - */ - if ((seg == 2 && seg_cnt > 3) || - (seg == 7 && seg_cnt > 8) || - (seg == 12 && seg_cnt > 13) || - (seg == 17 && seg_cnt > 18)) { - map = pci_map_single(qdev->pdev, oal, - sizeof(struct oal), - PCI_DMA_TODEVICE); - - err = pci_dma_mapping_error(qdev->pdev, map); - if (err) { - netdev_err(qdev->ndev, - "PCI mapping outbound address list with error: %d\n", - err); - goto map_error; - } - - oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); - oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); - oal_entry->len = cpu_to_le32(sizeof(struct oal) | - OAL_CONT_ENTRY); - dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); - dma_unmap_len_set(&tx_cb->map[seg], maplen, - sizeof(struct oal)); - oal_entry = (struct oal_entry *)oal; - oal++; - seg++; - } - - map = pci_map_page(qdev->pdev, frag->page, - frag->page_offset, frag->size, - PCI_DMA_TODEVICE); - - err = pci_dma_mapping_error(qdev->pdev, map); - if (err) { - netdev_err(qdev->ndev, - "PCI mapping frags failed with error: %d\n", - err); - goto map_error; - } - - oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); - oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); - oal_entry->len = cpu_to_le32(frag->size); - dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); - dma_unmap_len_set(&tx_cb->map[seg], maplen, frag->size); - } - /* Terminate the last segment. */ - oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); - return NETDEV_TX_OK; - -map_error: - /* A PCI mapping failed and now we will need to back out - * We need to traverse through the oal's and associated pages which - * have been mapped and now we must unmap them to clean up properly - */ - - seg = 1; - oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; - oal = tx_cb->oal; - for (i = 0; i < completed_segs; i++, seg++) { - oal_entry++; - - /* - * Check for continuation requirements. - * It's strange but necessary. - */ - - if ((seg == 2 && seg_cnt > 3) || - (seg == 7 && seg_cnt > 8) || - (seg == 12 && seg_cnt > 13) || - (seg == 17 && seg_cnt > 18)) { - pci_unmap_single(qdev->pdev, - dma_unmap_addr(&tx_cb->map[seg], mapaddr), - dma_unmap_len(&tx_cb->map[seg], maplen), - PCI_DMA_TODEVICE); - oal++; - seg++; - } - - pci_unmap_page(qdev->pdev, - dma_unmap_addr(&tx_cb->map[seg], mapaddr), - dma_unmap_len(&tx_cb->map[seg], maplen), - PCI_DMA_TODEVICE); - } - - pci_unmap_single(qdev->pdev, - dma_unmap_addr(&tx_cb->map[0], mapaddr), - dma_unmap_addr(&tx_cb->map[0], maplen), - PCI_DMA_TODEVICE); - - return NETDEV_TX_BUSY; - -} - -/* - * The difference between 3022 and 3032 sends: - * 3022 only supports a simple single segment transmission. - * 3032 supports checksumming and scatter/gather lists (fragments). - * The 3032 supports sglists by using the 3 addr/len pairs (ALP) - * in the IOCB plus a chain of outbound address lists (OAL) that - * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th) - * will be used to point to an OAL when more ALP entries are required. - * The IOCB is always the top of the chain followed by one or more - * OALs (when necessary). - */ -static netdev_tx_t ql3xxx_send(struct sk_buff *skb, - struct net_device *ndev) -{ - struct ql3_adapter *qdev = netdev_priv(ndev); - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - struct ql_tx_buf_cb *tx_cb; - u32 tot_len = skb->len; - struct ob_mac_iocb_req *mac_iocb_ptr; - - if (unlikely(atomic_read(&qdev->tx_count) < 2)) - return NETDEV_TX_BUSY; - - tx_cb = &qdev->tx_buf[qdev->req_producer_index]; - tx_cb->seg_count = ql_get_seg_count(qdev, - skb_shinfo(skb)->nr_frags); - if (tx_cb->seg_count == -1) { - netdev_err(ndev, "%s: invalid segment count!\n", __func__); - return NETDEV_TX_OK; - } - - mac_iocb_ptr = tx_cb->queue_entry; - memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); - mac_iocb_ptr->opcode = qdev->mac_ob_opcode; - mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X; - mac_iocb_ptr->flags |= qdev->mb_bit_mask; - mac_iocb_ptr->transaction_id = qdev->req_producer_index; - mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len); - tx_cb->skb = skb; - if (qdev->device_id == QL3032_DEVICE_ID && - skb->ip_summed == CHECKSUM_PARTIAL) - ql_hw_csum_setup(skb, mac_iocb_ptr); - - if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) { - netdev_err(ndev, "%s: Could not map the segments!\n", __func__); - return NETDEV_TX_BUSY; - } - - wmb(); - qdev->req_producer_index++; - if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) - qdev->req_producer_index = 0; - wmb(); - ql_write_common_reg_l(qdev, - &port_regs->CommonRegs.reqQProducerIndex, - qdev->req_producer_index); - - netif_printk(qdev, tx_queued, KERN_DEBUG, ndev, - "tx queued, slot %d, len %d\n", - qdev->req_producer_index, skb->len); - - atomic_dec(&qdev->tx_count); - return NETDEV_TX_OK; -} - -static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) -{ - qdev->req_q_size = - (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req)); - - qdev->req_q_virt_addr = - pci_alloc_consistent(qdev->pdev, - (size_t) qdev->req_q_size, - &qdev->req_q_phy_addr); - - if ((qdev->req_q_virt_addr == NULL) || - LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) { - netdev_err(qdev->ndev, "reqQ failed\n"); - return -ENOMEM; - } - - qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb); - - qdev->rsp_q_virt_addr = - pci_alloc_consistent(qdev->pdev, - (size_t) qdev->rsp_q_size, - &qdev->rsp_q_phy_addr); - - if ((qdev->rsp_q_virt_addr == NULL) || - LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) { - netdev_err(qdev->ndev, "rspQ allocation failed\n"); - pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size, - qdev->req_q_virt_addr, - qdev->req_q_phy_addr); - return -ENOMEM; - } - - set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); - - return 0; -} - -static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) -{ - if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) { - netdev_info(qdev->ndev, "Already done\n"); - return; - } - - pci_free_consistent(qdev->pdev, - qdev->req_q_size, - qdev->req_q_virt_addr, qdev->req_q_phy_addr); - - qdev->req_q_virt_addr = NULL; - - pci_free_consistent(qdev->pdev, - qdev->rsp_q_size, - qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr); - - qdev->rsp_q_virt_addr = NULL; - - clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); -} - -static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) -{ - /* Create Large Buffer Queue */ - qdev->lrg_buf_q_size = - qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry); - if (qdev->lrg_buf_q_size < PAGE_SIZE) - qdev->lrg_buf_q_alloc_size = PAGE_SIZE; - else - qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; - - qdev->lrg_buf = - kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb), - GFP_KERNEL); - if (qdev->lrg_buf == NULL) { - netdev_err(qdev->ndev, "qdev->lrg_buf alloc failed\n"); - return -ENOMEM; - } - - qdev->lrg_buf_q_alloc_virt_addr = - pci_alloc_consistent(qdev->pdev, - qdev->lrg_buf_q_alloc_size, - &qdev->lrg_buf_q_alloc_phy_addr); - - if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { - netdev_err(qdev->ndev, "lBufQ failed\n"); - return -ENOMEM; - } - qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; - qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr; - - /* Create Small Buffer Queue */ - qdev->small_buf_q_size = - NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); - if (qdev->small_buf_q_size < PAGE_SIZE) - qdev->small_buf_q_alloc_size = PAGE_SIZE; - else - qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2; - - qdev->small_buf_q_alloc_virt_addr = - pci_alloc_consistent(qdev->pdev, - qdev->small_buf_q_alloc_size, - &qdev->small_buf_q_alloc_phy_addr); - - if (qdev->small_buf_q_alloc_virt_addr == NULL) { - netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n"); - pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, - qdev->lrg_buf_q_alloc_virt_addr, - qdev->lrg_buf_q_alloc_phy_addr); - return -ENOMEM; - } - - qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr; - qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr; - set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); - return 0; -} - -static void ql_free_buffer_queues(struct ql3_adapter *qdev) -{ - if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) { - netdev_info(qdev->ndev, "Already done\n"); - return; - } - kfree(qdev->lrg_buf); - pci_free_consistent(qdev->pdev, - qdev->lrg_buf_q_alloc_size, - qdev->lrg_buf_q_alloc_virt_addr, - qdev->lrg_buf_q_alloc_phy_addr); - - qdev->lrg_buf_q_virt_addr = NULL; - - pci_free_consistent(qdev->pdev, - qdev->small_buf_q_alloc_size, - qdev->small_buf_q_alloc_virt_addr, - qdev->small_buf_q_alloc_phy_addr); - - qdev->small_buf_q_virt_addr = NULL; - - clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); -} - -static int ql_alloc_small_buffers(struct ql3_adapter *qdev) -{ - int i; - struct bufq_addr_element *small_buf_q_entry; - - /* Currently we allocate on one of memory and use it for smallbuffers */ - qdev->small_buf_total_size = - (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES * - QL_SMALL_BUFFER_SIZE); - - qdev->small_buf_virt_addr = - pci_alloc_consistent(qdev->pdev, - qdev->small_buf_total_size, - &qdev->small_buf_phy_addr); - - if (qdev->small_buf_virt_addr == NULL) { - netdev_err(qdev->ndev, "Failed to get small buffer memory\n"); - return -ENOMEM; - } - - qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr); - qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr); - - small_buf_q_entry = qdev->small_buf_q_virt_addr; - - /* Initialize the small buffer queue. */ - for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) { - small_buf_q_entry->addr_high = - cpu_to_le32(qdev->small_buf_phy_addr_high); - small_buf_q_entry->addr_low = - cpu_to_le32(qdev->small_buf_phy_addr_low + - (i * QL_SMALL_BUFFER_SIZE)); - small_buf_q_entry++; - } - qdev->small_buf_index = 0; - set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags); - return 0; -} - -static void ql_free_small_buffers(struct ql3_adapter *qdev) -{ - if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) { - netdev_info(qdev->ndev, "Already done\n"); - return; - } - if (qdev->small_buf_virt_addr != NULL) { - pci_free_consistent(qdev->pdev, - qdev->small_buf_total_size, - qdev->small_buf_virt_addr, - qdev->small_buf_phy_addr); - - qdev->small_buf_virt_addr = NULL; - } -} - -static void ql_free_large_buffers(struct ql3_adapter *qdev) -{ - int i = 0; - struct ql_rcv_buf_cb *lrg_buf_cb; - - for (i = 0; i < qdev->num_large_buffers; i++) { - lrg_buf_cb = &qdev->lrg_buf[i]; - if (lrg_buf_cb->skb) { - dev_kfree_skb(lrg_buf_cb->skb); - pci_unmap_single(qdev->pdev, - dma_unmap_addr(lrg_buf_cb, mapaddr), - dma_unmap_len(lrg_buf_cb, maplen), - PCI_DMA_FROMDEVICE); - memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); - } else { - break; - } - } -} - -static void ql_init_large_buffers(struct ql3_adapter *qdev) -{ - int i; - struct ql_rcv_buf_cb *lrg_buf_cb; - struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr; - - for (i = 0; i < qdev->num_large_buffers; i++) { - lrg_buf_cb = &qdev->lrg_buf[i]; - buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high; - buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low; - buf_addr_ele++; - } - qdev->lrg_buf_index = 0; - qdev->lrg_buf_skb_check = 0; -} - -static int ql_alloc_large_buffers(struct ql3_adapter *qdev) -{ - int i; - struct ql_rcv_buf_cb *lrg_buf_cb; - struct sk_buff *skb; - dma_addr_t map; - int err; - - for (i = 0; i < qdev->num_large_buffers; i++) { - skb = netdev_alloc_skb(qdev->ndev, - qdev->lrg_buffer_len); - if (unlikely(!skb)) { - /* Better luck next round */ - netdev_err(qdev->ndev, - "large buff alloc failed for %d bytes at index %d\n", - qdev->lrg_buffer_len * 2, i); - ql_free_large_buffers(qdev); - return -ENOMEM; - } else { - - lrg_buf_cb = &qdev->lrg_buf[i]; - memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); - lrg_buf_cb->index = i; - lrg_buf_cb->skb = skb; - /* - * We save some space to copy the ethhdr from first - * buffer - */ - skb_reserve(skb, QL_HEADER_SPACE); - map = pci_map_single(qdev->pdev, - skb->data, - qdev->lrg_buffer_len - - QL_HEADER_SPACE, - PCI_DMA_FROMDEVICE); - - err = pci_dma_mapping_error(qdev->pdev, map); - if (err) { - netdev_err(qdev->ndev, - "PCI mapping failed with error: %d\n", - err); - ql_free_large_buffers(qdev); - return -ENOMEM; - } - - dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); - dma_unmap_len_set(lrg_buf_cb, maplen, - qdev->lrg_buffer_len - - QL_HEADER_SPACE); - lrg_buf_cb->buf_phy_addr_low = - cpu_to_le32(LS_64BITS(map)); - lrg_buf_cb->buf_phy_addr_high = - cpu_to_le32(MS_64BITS(map)); - } - } - return 0; -} - -static void ql_free_send_free_list(struct ql3_adapter *qdev) -{ - struct ql_tx_buf_cb *tx_cb; - int i; - - tx_cb = &qdev->tx_buf[0]; - for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { - kfree(tx_cb->oal); - tx_cb->oal = NULL; - tx_cb++; - } -} - -static int ql_create_send_free_list(struct ql3_adapter *qdev) -{ - struct ql_tx_buf_cb *tx_cb; - int i; - struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr; - - /* Create free list of transmit buffers */ - for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { - - tx_cb = &qdev->tx_buf[i]; - tx_cb->skb = NULL; - tx_cb->queue_entry = req_q_curr; - req_q_curr++; - tx_cb->oal = kmalloc(512, GFP_KERNEL); - if (tx_cb->oal == NULL) - return -1; - } - return 0; -} - -static int ql_alloc_mem_resources(struct ql3_adapter *qdev) -{ - if (qdev->ndev->mtu == NORMAL_MTU_SIZE) { - qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES; - qdev->lrg_buffer_len = NORMAL_MTU_SIZE; - } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) { - /* - * Bigger buffers, so less of them. - */ - qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES; - qdev->lrg_buffer_len = JUMBO_MTU_SIZE; - } else { - netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n", - qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE); - return -ENOMEM; - } - qdev->num_large_buffers = - qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY; - qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; - qdev->max_frame_size = - (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; - - /* - * First allocate a page of shared memory and use it for shadow - * locations of Network Request Queue Consumer Address Register and - * Network Completion Queue Producer Index Register - */ - qdev->shadow_reg_virt_addr = - pci_alloc_consistent(qdev->pdev, - PAGE_SIZE, &qdev->shadow_reg_phy_addr); - - if (qdev->shadow_reg_virt_addr != NULL) { - qdev->preq_consumer_index = qdev->shadow_reg_virt_addr; - qdev->req_consumer_index_phy_addr_high = - MS_64BITS(qdev->shadow_reg_phy_addr); - qdev->req_consumer_index_phy_addr_low = - LS_64BITS(qdev->shadow_reg_phy_addr); - - qdev->prsp_producer_index = - (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8); - qdev->rsp_producer_index_phy_addr_high = - qdev->req_consumer_index_phy_addr_high; - qdev->rsp_producer_index_phy_addr_low = - qdev->req_consumer_index_phy_addr_low + 8; - } else { - netdev_err(qdev->ndev, "shadowReg Alloc failed\n"); - return -ENOMEM; - } - - if (ql_alloc_net_req_rsp_queues(qdev) != 0) { - netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n"); - goto err_req_rsp; - } - - if (ql_alloc_buffer_queues(qdev) != 0) { - netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n"); - goto err_buffer_queues; - } - - if (ql_alloc_small_buffers(qdev) != 0) { - netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n"); - goto err_small_buffers; - } - - if (ql_alloc_large_buffers(qdev) != 0) { - netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n"); - goto err_small_buffers; - } - - /* Initialize the large buffer queue. */ - ql_init_large_buffers(qdev); - if (ql_create_send_free_list(qdev)) - goto err_free_list; - - qdev->rsp_current = qdev->rsp_q_virt_addr; - - return 0; -err_free_list: - ql_free_send_free_list(qdev); -err_small_buffers: - ql_free_buffer_queues(qdev); -err_buffer_queues: - ql_free_net_req_rsp_queues(qdev); -err_req_rsp: - pci_free_consistent(qdev->pdev, - PAGE_SIZE, - qdev->shadow_reg_virt_addr, - qdev->shadow_reg_phy_addr); - - return -ENOMEM; -} - -static void ql_free_mem_resources(struct ql3_adapter *qdev) -{ - ql_free_send_free_list(qdev); - ql_free_large_buffers(qdev); - ql_free_small_buffers(qdev); - ql_free_buffer_queues(qdev); - ql_free_net_req_rsp_queues(qdev); - if (qdev->shadow_reg_virt_addr != NULL) { - pci_free_consistent(qdev->pdev, - PAGE_SIZE, - qdev->shadow_reg_virt_addr, - qdev->shadow_reg_phy_addr); - qdev->shadow_reg_virt_addr = NULL; - } -} - -static int ql_init_misc_registers(struct ql3_adapter *qdev) -{ - struct ql3xxx_local_ram_registers __iomem *local_ram = - (void __iomem *)qdev->mem_map_registers; - - if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * - 2) << 4)) - return -1; - - ql_write_page2_reg(qdev, - &local_ram->bufletSize, qdev->nvram_data.bufletSize); - - ql_write_page2_reg(qdev, - &local_ram->maxBufletCount, - qdev->nvram_data.bufletCount); - - ql_write_page2_reg(qdev, - &local_ram->freeBufletThresholdLow, - (qdev->nvram_data.tcpWindowThreshold25 << 16) | - (qdev->nvram_data.tcpWindowThreshold0)); - - ql_write_page2_reg(qdev, - &local_ram->freeBufletThresholdHigh, - qdev->nvram_data.tcpWindowThreshold50); - - ql_write_page2_reg(qdev, - &local_ram->ipHashTableBase, - (qdev->nvram_data.ipHashTableBaseHi << 16) | - qdev->nvram_data.ipHashTableBaseLo); - ql_write_page2_reg(qdev, - &local_ram->ipHashTableCount, - qdev->nvram_data.ipHashTableSize); - ql_write_page2_reg(qdev, - &local_ram->tcpHashTableBase, - (qdev->nvram_data.tcpHashTableBaseHi << 16) | - qdev->nvram_data.tcpHashTableBaseLo); - ql_write_page2_reg(qdev, - &local_ram->tcpHashTableCount, - qdev->nvram_data.tcpHashTableSize); - ql_write_page2_reg(qdev, - &local_ram->ncbBase, - (qdev->nvram_data.ncbTableBaseHi << 16) | - qdev->nvram_data.ncbTableBaseLo); - ql_write_page2_reg(qdev, - &local_ram->maxNcbCount, - qdev->nvram_data.ncbTableSize); - ql_write_page2_reg(qdev, - &local_ram->drbBase, - (qdev->nvram_data.drbTableBaseHi << 16) | - qdev->nvram_data.drbTableBaseLo); - ql_write_page2_reg(qdev, - &local_ram->maxDrbCount, - qdev->nvram_data.drbTableSize); - ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK); - return 0; -} - -static int ql_adapter_initialize(struct ql3_adapter *qdev) -{ - u32 value; - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; - struct ql3xxx_host_memory_registers __iomem *hmem_regs = - (void __iomem *)port_regs; - u32 delay = 10; - int status = 0; - unsigned long hw_flags = 0; - - if (ql_mii_setup(qdev)) - return -1; - - /* Bring out PHY out of reset */ - ql_write_common_reg(qdev, spir, - (ISP_SERIAL_PORT_IF_WE | - (ISP_SERIAL_PORT_IF_WE << 16))); - /* Give the PHY time to come out of reset. */ - mdelay(100); - qdev->port_link_state = LS_DOWN; - netif_carrier_off(qdev->ndev); - - /* V2 chip fix for ARS-39168. */ - ql_write_common_reg(qdev, spir, - (ISP_SERIAL_PORT_IF_SDE | - (ISP_SERIAL_PORT_IF_SDE << 16))); - - /* Request Queue Registers */ - *((u32 *)(qdev->preq_consumer_index)) = 0; - atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES); - qdev->req_producer_index = 0; - - ql_write_page1_reg(qdev, - &hmem_regs->reqConsumerIndexAddrHigh, - qdev->req_consumer_index_phy_addr_high); - ql_write_page1_reg(qdev, - &hmem_regs->reqConsumerIndexAddrLow, - qdev->req_consumer_index_phy_addr_low); - - ql_write_page1_reg(qdev, - &hmem_regs->reqBaseAddrHigh, - MS_64BITS(qdev->req_q_phy_addr)); - ql_write_page1_reg(qdev, - &hmem_regs->reqBaseAddrLow, - LS_64BITS(qdev->req_q_phy_addr)); - ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES); - - /* Response Queue Registers */ - *((__le16 *) (qdev->prsp_producer_index)) = 0; - qdev->rsp_consumer_index = 0; - qdev->rsp_current = qdev->rsp_q_virt_addr; - - ql_write_page1_reg(qdev, - &hmem_regs->rspProducerIndexAddrHigh, - qdev->rsp_producer_index_phy_addr_high); - - ql_write_page1_reg(qdev, - &hmem_regs->rspProducerIndexAddrLow, - qdev->rsp_producer_index_phy_addr_low); - - ql_write_page1_reg(qdev, - &hmem_regs->rspBaseAddrHigh, - MS_64BITS(qdev->rsp_q_phy_addr)); - - ql_write_page1_reg(qdev, - &hmem_regs->rspBaseAddrLow, - LS_64BITS(qdev->rsp_q_phy_addr)); - - ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES); - - /* Large Buffer Queue */ - ql_write_page1_reg(qdev, - &hmem_regs->rxLargeQBaseAddrHigh, - MS_64BITS(qdev->lrg_buf_q_phy_addr)); - - ql_write_page1_reg(qdev, - &hmem_regs->rxLargeQBaseAddrLow, - LS_64BITS(qdev->lrg_buf_q_phy_addr)); - - ql_write_page1_reg(qdev, - &hmem_regs->rxLargeQLength, - qdev->num_lbufq_entries); - - ql_write_page1_reg(qdev, - &hmem_regs->rxLargeBufferLength, - qdev->lrg_buffer_len); - - /* Small Buffer Queue */ - ql_write_page1_reg(qdev, - &hmem_regs->rxSmallQBaseAddrHigh, - MS_64BITS(qdev->small_buf_q_phy_addr)); - - ql_write_page1_reg(qdev, - &hmem_regs->rxSmallQBaseAddrLow, - LS_64BITS(qdev->small_buf_q_phy_addr)); - - ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES); - ql_write_page1_reg(qdev, - &hmem_regs->rxSmallBufferLength, - QL_SMALL_BUFFER_SIZE); - - qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1; - qdev->small_buf_release_cnt = 8; - qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1; - qdev->lrg_buf_release_cnt = 8; - qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr; - qdev->small_buf_index = 0; - qdev->lrg_buf_index = 0; - qdev->lrg_buf_free_count = 0; - qdev->lrg_buf_free_head = NULL; - qdev->lrg_buf_free_tail = NULL; - - ql_write_common_reg(qdev, - &port_regs->CommonRegs. - rxSmallQProducerIndex, - qdev->small_buf_q_producer_index); - ql_write_common_reg(qdev, - &port_regs->CommonRegs. - rxLargeQProducerIndex, - qdev->lrg_buf_q_producer_index); - - /* - * Find out if the chip has already been initialized. If it has, then - * we skip some of the initialization. - */ - clear_bit(QL_LINK_MASTER, &qdev->flags); - value = ql_read_page0_reg(qdev, &port_regs->portStatus); - if ((value & PORT_STATUS_IC) == 0) { - - /* Chip has not been configured yet, so let it rip. */ - if (ql_init_misc_registers(qdev)) { - status = -1; - goto out; - } - - value = qdev->nvram_data.tcpMaxWindowSize; - ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value); - - value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig; - - if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) - * 2) << 13)) { - status = -1; - goto out; - } - ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value); - ql_write_page0_reg(qdev, &port_regs->InternalChipConfig, - (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) << - 16) | (INTERNAL_CHIP_SD | - INTERNAL_CHIP_WE))); - ql_sem_unlock(qdev, QL_FLASH_SEM_MASK); - } - - if (qdev->mac_index) - ql_write_page0_reg(qdev, - &port_regs->mac1MaxFrameLengthReg, - qdev->max_frame_size); - else - ql_write_page0_reg(qdev, - &port_regs->mac0MaxFrameLengthReg, - qdev->max_frame_size); - - if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, - (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * - 2) << 7)) { - status = -1; - goto out; - } - - PHY_Setup(qdev); - ql_init_scan_mode(qdev); - ql_get_phy_owner(qdev); - - /* Load the MAC Configuration */ - - /* Program lower 32 bits of the MAC address */ - ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, - (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); - ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, - ((qdev->ndev->dev_addr[2] << 24) - | (qdev->ndev->dev_addr[3] << 16) - | (qdev->ndev->dev_addr[4] << 8) - | qdev->ndev->dev_addr[5])); - - /* Program top 16 bits of the MAC address */ - ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, - ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); - ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, - ((qdev->ndev->dev_addr[0] << 8) - | qdev->ndev->dev_addr[1])); - - /* Enable Primary MAC */ - ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, - ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) | - MAC_ADDR_INDIRECT_PTR_REG_PE)); - - /* Clear Primary and Secondary IP addresses */ - ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, - ((IP_ADDR_INDEX_REG_MASK << 16) | - (qdev->mac_index << 2))); - ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); - - ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, - ((IP_ADDR_INDEX_REG_MASK << 16) | - ((qdev->mac_index << 2) + 1))); - ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); - - ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); - - /* Indicate Configuration Complete */ - ql_write_page0_reg(qdev, - &port_regs->portControl, - ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC)); - - do { - value = ql_read_page0_reg(qdev, &port_regs->portStatus); - if (value & PORT_STATUS_IC) - break; - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - msleep(500); - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - } while (--delay); - - if (delay == 0) { - netdev_err(qdev->ndev, "Hw Initialization timeout\n"); - status = -1; - goto out; - } - - /* Enable Ethernet Function */ - if (qdev->device_id == QL3032_DEVICE_ID) { - value = - (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE | - QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 | - QL3032_PORT_CONTROL_ET); - ql_write_page0_reg(qdev, &port_regs->functionControl, - ((value << 16) | value)); - } else { - value = - (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI | - PORT_CONTROL_HH); - ql_write_page0_reg(qdev, &port_regs->portControl, - ((value << 16) | value)); - } - - -out: - return status; -} - -/* - * Caller holds hw_lock. - */ -static int ql_adapter_reset(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - int status = 0; - u16 value; - int max_wait_time; - - set_bit(QL_RESET_ACTIVE, &qdev->flags); - clear_bit(QL_RESET_DONE, &qdev->flags); - - /* - * Issue soft reset to chip. - */ - netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n"); - ql_write_common_reg(qdev, - &port_regs->CommonRegs.ispControlStatus, - ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR)); - - /* Wait 3 seconds for reset to complete. */ - netdev_printk(KERN_DEBUG, qdev->ndev, - "Wait 10 milliseconds for reset to complete\n"); - - /* Wait until the firmware tells us the Soft Reset is done */ - max_wait_time = 5; - do { - value = - ql_read_common_reg(qdev, - &port_regs->CommonRegs.ispControlStatus); - if ((value & ISP_CONTROL_SR) == 0) - break; - - ssleep(1); - } while ((--max_wait_time)); - - /* - * Also, make sure that the Network Reset Interrupt bit has been - * cleared after the soft reset has taken place. - */ - value = - ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); - if (value & ISP_CONTROL_RI) { - netdev_printk(KERN_DEBUG, qdev->ndev, - "clearing RI after reset\n"); - ql_write_common_reg(qdev, - &port_regs->CommonRegs. - ispControlStatus, - ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); - } - - if (max_wait_time == 0) { - /* Issue Force Soft Reset */ - ql_write_common_reg(qdev, - &port_regs->CommonRegs. - ispControlStatus, - ((ISP_CONTROL_FSR << 16) | - ISP_CONTROL_FSR)); - /* - * Wait until the firmware tells us the Force Soft Reset is - * done - */ - max_wait_time = 5; - do { - value = ql_read_common_reg(qdev, - &port_regs->CommonRegs. - ispControlStatus); - if ((value & ISP_CONTROL_FSR) == 0) - break; - ssleep(1); - } while ((--max_wait_time)); - } - if (max_wait_time == 0) - status = 1; - - clear_bit(QL_RESET_ACTIVE, &qdev->flags); - set_bit(QL_RESET_DONE, &qdev->flags); - return status; -} - -static void ql_set_mac_info(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 value, port_status; - u8 func_number; - - /* Get the function number */ - value = - ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); - func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK); - port_status = ql_read_page0_reg(qdev, &port_regs->portStatus); - switch (value & ISP_CONTROL_FN_MASK) { - case ISP_CONTROL_FN0_NET: - qdev->mac_index = 0; - qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; - qdev->mb_bit_mask = FN0_MA_BITS_MASK; - qdev->PHYAddr = PORT0_PHY_ADDRESS; - if (port_status & PORT_STATUS_SM0) - set_bit(QL_LINK_OPTICAL, &qdev->flags); - else - clear_bit(QL_LINK_OPTICAL, &qdev->flags); - break; - - case ISP_CONTROL_FN1_NET: - qdev->mac_index = 1; - qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; - qdev->mb_bit_mask = FN1_MA_BITS_MASK; - qdev->PHYAddr = PORT1_PHY_ADDRESS; - if (port_status & PORT_STATUS_SM1) - set_bit(QL_LINK_OPTICAL, &qdev->flags); - else - clear_bit(QL_LINK_OPTICAL, &qdev->flags); - break; - - case ISP_CONTROL_FN0_SCSI: - case ISP_CONTROL_FN1_SCSI: - default: - netdev_printk(KERN_DEBUG, qdev->ndev, - "Invalid function number, ispControlStatus = 0x%x\n", - value); - break; - } - qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8; -} - -static void ql_display_dev_info(struct net_device *ndev) -{ - struct ql3_adapter *qdev = netdev_priv(ndev); - struct pci_dev *pdev = qdev->pdev; - - netdev_info(ndev, - "%s Adapter %d RevisionID %d found %s on PCI slot %d\n", - DRV_NAME, qdev->index, qdev->chip_rev_id, - qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022", - qdev->pci_slot); - netdev_info(ndev, "%s Interface\n", - test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER"); - - /* - * Print PCI bus width/type. - */ - netdev_info(ndev, "Bus interface is %s %s\n", - ((qdev->pci_width == 64) ? "64-bit" : "32-bit"), - ((qdev->pci_x) ? "PCI-X" : "PCI")); - - netdev_info(ndev, "mem IO base address adjusted = 0x%p\n", - qdev->mem_map_registers); - netdev_info(ndev, "Interrupt number = %d\n", pdev->irq); - - netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr); -} - -static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) -{ - struct net_device *ndev = qdev->ndev; - int retval = 0; - - netif_stop_queue(ndev); - netif_carrier_off(ndev); - - clear_bit(QL_ADAPTER_UP, &qdev->flags); - clear_bit(QL_LINK_MASTER, &qdev->flags); - - ql_disable_interrupts(qdev); - - free_irq(qdev->pdev->irq, ndev); - - if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { - netdev_info(qdev->ndev, "calling pci_disable_msi()\n"); - clear_bit(QL_MSI_ENABLED, &qdev->flags); - pci_disable_msi(qdev->pdev); - } - - del_timer_sync(&qdev->adapter_timer); - - napi_disable(&qdev->napi); - - if (do_reset) { - int soft_reset; - unsigned long hw_flags; - - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - if (ql_wait_for_drvr_lock(qdev)) { - soft_reset = ql_adapter_reset(qdev); - if (soft_reset) { - netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n", - qdev->index); - } - netdev_err(ndev, - "Releasing driver lock via chip reset\n"); - } else { - netdev_err(ndev, - "Could not acquire driver lock to do reset!\n"); - retval = -1; - } - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - } - ql_free_mem_resources(qdev); - return retval; -} - -static int ql_adapter_up(struct ql3_adapter *qdev) -{ - struct net_device *ndev = qdev->ndev; - int err; - unsigned long irq_flags = IRQF_SHARED; - unsigned long hw_flags; - - if (ql_alloc_mem_resources(qdev)) { - netdev_err(ndev, "Unable to allocate buffers\n"); - return -ENOMEM; - } - - if (qdev->msi) { - if (pci_enable_msi(qdev->pdev)) { - netdev_err(ndev, - "User requested MSI, but MSI failed to initialize. Continuing without MSI.\n"); - qdev->msi = 0; - } else { - netdev_info(ndev, "MSI Enabled...\n"); - set_bit(QL_MSI_ENABLED, &qdev->flags); - irq_flags &= ~IRQF_SHARED; - } - } - - err = request_irq(qdev->pdev->irq, ql3xxx_isr, - irq_flags, ndev->name, ndev); - if (err) { - netdev_err(ndev, - "Failed to reserve interrupt %d - already in use\n", - qdev->pdev->irq); - goto err_irq; - } - - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - - err = ql_wait_for_drvr_lock(qdev); - if (err) { - err = ql_adapter_initialize(qdev); - if (err) { - netdev_err(ndev, "Unable to initialize adapter\n"); - goto err_init; - } - netdev_err(ndev, "Releasing driver lock\n"); - ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); - } else { - netdev_err(ndev, "Could not acquire driver lock\n"); - goto err_lock; - } - - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - - set_bit(QL_ADAPTER_UP, &qdev->flags); - - mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); - - napi_enable(&qdev->napi); - ql_enable_interrupts(qdev); - return 0; - -err_init: - ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); -err_lock: - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - free_irq(qdev->pdev->irq, ndev); -err_irq: - if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { - netdev_info(ndev, "calling pci_disable_msi()\n"); - clear_bit(QL_MSI_ENABLED, &qdev->flags); - pci_disable_msi(qdev->pdev); - } - return err; -} - -static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) -{ - if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) { - netdev_err(qdev->ndev, - "Driver up/down cycle failed, closing device\n"); - rtnl_lock(); - dev_close(qdev->ndev); - rtnl_unlock(); - return -1; - } - return 0; -} - -static int ql3xxx_close(struct net_device *ndev) -{ - struct ql3_adapter *qdev = netdev_priv(ndev); - - /* - * Wait for device to recover from a reset. - * (Rarely happens, but possible.) - */ - while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) - msleep(50); - - ql_adapter_down(qdev, QL_DO_RESET); - return 0; -} - -static int ql3xxx_open(struct net_device *ndev) -{ - struct ql3_adapter *qdev = netdev_priv(ndev); - return ql_adapter_up(qdev); -} - -static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) -{ - struct ql3_adapter *qdev = netdev_priv(ndev); - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - struct sockaddr *addr = p; - unsigned long hw_flags; - - if (netif_running(ndev)) - return -EBUSY; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - - memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); - - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - /* Program lower 32 bits of the MAC address */ - ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, - (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); - ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, - ((ndev->dev_addr[2] << 24) | (ndev-> - dev_addr[3] << 16) | - (ndev->dev_addr[4] << 8) | ndev->dev_addr[5])); - - /* Program top 16 bits of the MAC address */ - ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, - ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); - ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, - ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1])); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - - return 0; -} - -static void ql3xxx_tx_timeout(struct net_device *ndev) -{ - struct ql3_adapter *qdev = netdev_priv(ndev); - - netdev_err(ndev, "Resetting...\n"); - /* - * Stop the queues, we've got a problem. - */ - netif_stop_queue(ndev); - - /* - * Wake up the worker to process this event. - */ - queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0); -} - -static void ql_reset_work(struct work_struct *work) -{ - struct ql3_adapter *qdev = - container_of(work, struct ql3_adapter, reset_work.work); - struct net_device *ndev = qdev->ndev; - u32 value; - struct ql_tx_buf_cb *tx_cb; - int max_wait_time, i; - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - unsigned long hw_flags; - - if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) { - clear_bit(QL_LINK_MASTER, &qdev->flags); - - /* - * Loop through the active list and return the skb. - */ - for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { - int j; - tx_cb = &qdev->tx_buf[i]; - if (tx_cb->skb) { - netdev_printk(KERN_DEBUG, ndev, - "Freeing lost SKB\n"); - pci_unmap_single(qdev->pdev, - dma_unmap_addr(&tx_cb->map[0], - mapaddr), - dma_unmap_len(&tx_cb->map[0], maplen), - PCI_DMA_TODEVICE); - for (j = 1; j < tx_cb->seg_count; j++) { - pci_unmap_page(qdev->pdev, - dma_unmap_addr(&tx_cb->map[j], - mapaddr), - dma_unmap_len(&tx_cb->map[j], - maplen), - PCI_DMA_TODEVICE); - } - dev_kfree_skb(tx_cb->skb); - tx_cb->skb = NULL; - } - } - - netdev_err(ndev, "Clearing NRI after reset\n"); - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - ql_write_common_reg(qdev, - &port_regs->CommonRegs. - ispControlStatus, - ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); - /* - * Wait the for Soft Reset to Complete. - */ - max_wait_time = 10; - do { - value = ql_read_common_reg(qdev, - &port_regs->CommonRegs. - - ispControlStatus); - if ((value & ISP_CONTROL_SR) == 0) { - netdev_printk(KERN_DEBUG, ndev, - "reset completed\n"); - break; - } - - if (value & ISP_CONTROL_RI) { - netdev_printk(KERN_DEBUG, ndev, - "clearing NRI after reset\n"); - ql_write_common_reg(qdev, - &port_regs-> - CommonRegs. - ispControlStatus, - ((ISP_CONTROL_RI << - 16) | ISP_CONTROL_RI)); - } - - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - ssleep(1); - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - } while (--max_wait_time); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - - if (value & ISP_CONTROL_SR) { - - /* - * Set the reset flags and clear the board again. - * Nothing else to do... - */ - netdev_err(ndev, - "Timed out waiting for reset to complete\n"); - netdev_err(ndev, "Do a reset\n"); - clear_bit(QL_RESET_PER_SCSI, &qdev->flags); - clear_bit(QL_RESET_START, &qdev->flags); - ql_cycle_adapter(qdev, QL_DO_RESET); - return; - } - - clear_bit(QL_RESET_ACTIVE, &qdev->flags); - clear_bit(QL_RESET_PER_SCSI, &qdev->flags); - clear_bit(QL_RESET_START, &qdev->flags); - ql_cycle_adapter(qdev, QL_NO_RESET); - } -} - -static void ql_tx_timeout_work(struct work_struct *work) -{ - struct ql3_adapter *qdev = - container_of(work, struct ql3_adapter, tx_timeout_work.work); - - ql_cycle_adapter(qdev, QL_DO_RESET); -} - -static void ql_get_board_info(struct ql3_adapter *qdev) -{ - struct ql3xxx_port_registers __iomem *port_regs = - qdev->mem_map_registers; - u32 value; - - value = ql_read_page0_reg_l(qdev, &port_regs->portStatus); - - qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12); - if (value & PORT_STATUS_64) - qdev->pci_width = 64; - else - qdev->pci_width = 32; - if (value & PORT_STATUS_X) - qdev->pci_x = 1; - else - qdev->pci_x = 0; - qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn); -} - -static void ql3xxx_timer(unsigned long ptr) -{ - struct ql3_adapter *qdev = (struct ql3_adapter *)ptr; - queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0); -} - -static const struct net_device_ops ql3xxx_netdev_ops = { - .ndo_open = ql3xxx_open, - .ndo_start_xmit = ql3xxx_send, - .ndo_stop = ql3xxx_close, - .ndo_set_multicast_list = NULL, /* not allowed on NIC side */ - .ndo_change_mtu = eth_change_mtu, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = ql3xxx_set_mac_address, - .ndo_tx_timeout = ql3xxx_tx_timeout, -}; - -static int __devinit ql3xxx_probe(struct pci_dev *pdev, - const struct pci_device_id *pci_entry) -{ - struct net_device *ndev = NULL; - struct ql3_adapter *qdev = NULL; - static int cards_found; - int uninitialized_var(pci_using_dac), err; - - err = pci_enable_device(pdev); - if (err) { - pr_err("%s cannot enable PCI device\n", pci_name(pdev)); - goto err_out; - } - - err = pci_request_regions(pdev, DRV_NAME); - if (err) { - pr_err("%s cannot obtain PCI resources\n", pci_name(pdev)); - goto err_out_disable_pdev; - } - - pci_set_master(pdev); - - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { - pci_using_dac = 1; - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { - pci_using_dac = 0; - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - } - - if (err) { - pr_err("%s no usable DMA configuration\n", pci_name(pdev)); - goto err_out_free_regions; - } - - ndev = alloc_etherdev(sizeof(struct ql3_adapter)); - if (!ndev) { - pr_err("%s could not alloc etherdev\n", pci_name(pdev)); - err = -ENOMEM; - goto err_out_free_regions; - } - - SET_NETDEV_DEV(ndev, &pdev->dev); - - pci_set_drvdata(pdev, ndev); - - qdev = netdev_priv(ndev); - qdev->index = cards_found; - qdev->ndev = ndev; - qdev->pdev = pdev; - qdev->device_id = pci_entry->device; - qdev->port_link_state = LS_DOWN; - if (msi) - qdev->msi = 1; - - qdev->msg_enable = netif_msg_init(debug, default_msg); - - if (pci_using_dac) - ndev->features |= NETIF_F_HIGHDMA; - if (qdev->device_id == QL3032_DEVICE_ID) - ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; - - qdev->mem_map_registers = pci_ioremap_bar(pdev, 1); - if (!qdev->mem_map_registers) { - pr_err("%s: cannot map device registers\n", pci_name(pdev)); - err = -EIO; - goto err_out_free_ndev; - } - - spin_lock_init(&qdev->adapter_lock); - spin_lock_init(&qdev->hw_lock); - - /* Set driver entry points */ - ndev->netdev_ops = &ql3xxx_netdev_ops; - SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); - ndev->watchdog_timeo = 5 * HZ; - - netif_napi_add(ndev, &qdev->napi, ql_poll, 64); - - ndev->irq = pdev->irq; - - /* make sure the EEPROM is good */ - if (ql_get_nvram_params(qdev)) { - pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n", - __func__, qdev->index); - err = -EIO; - goto err_out_iounmap; - } - - ql_set_mac_info(qdev); - - /* Validate and set parameters */ - if (qdev->mac_index) { - ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ; - ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress); - } else { - ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ; - ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress); - } - memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); - - ndev->tx_queue_len = NUM_REQ_Q_ENTRIES; - - /* Record PCI bus information. */ - ql_get_board_info(qdev); - - /* - * Set the Maximum Memory Read Byte Count value. We do this to handle - * jumbo frames. - */ - if (qdev->pci_x) - pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036); - - err = register_netdev(ndev); - if (err) { - pr_err("%s: cannot register net device\n", pci_name(pdev)); - goto err_out_iounmap; - } - - /* we're going to reset, so assume we have no link for now */ - - netif_carrier_off(ndev); - netif_stop_queue(ndev); - - qdev->workqueue = create_singlethread_workqueue(ndev->name); - INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work); - INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work); - INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work); - - init_timer(&qdev->adapter_timer); - qdev->adapter_timer.function = ql3xxx_timer; - qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */ - qdev->adapter_timer.data = (unsigned long)qdev; - - if (!cards_found) { - pr_alert("%s\n", DRV_STRING); - pr_alert("Driver name: %s, Version: %s\n", - DRV_NAME, DRV_VERSION); - } - ql_display_dev_info(ndev); - - cards_found++; - return 0; - -err_out_iounmap: - iounmap(qdev->mem_map_registers); -err_out_free_ndev: - free_netdev(ndev); -err_out_free_regions: - pci_release_regions(pdev); -err_out_disable_pdev: - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); -err_out: - return err; -} - -static void __devexit ql3xxx_remove(struct pci_dev *pdev) -{ - struct net_device *ndev = pci_get_drvdata(pdev); - struct ql3_adapter *qdev = netdev_priv(ndev); - - unregister_netdev(ndev); - - ql_disable_interrupts(qdev); - - if (qdev->workqueue) { - cancel_delayed_work(&qdev->reset_work); - cancel_delayed_work(&qdev->tx_timeout_work); - destroy_workqueue(qdev->workqueue); - qdev->workqueue = NULL; - } - - iounmap(qdev->mem_map_registers); - pci_release_regions(pdev); - pci_set_drvdata(pdev, NULL); - free_netdev(ndev); -} - -static struct pci_driver ql3xxx_driver = { - - .name = DRV_NAME, - .id_table = ql3xxx_pci_tbl, - .probe = ql3xxx_probe, - .remove = __devexit_p(ql3xxx_remove), -}; - -static int __init ql3xxx_init_module(void) -{ - return pci_register_driver(&ql3xxx_driver); -} - -static void __exit ql3xxx_exit(void) -{ - pci_unregister_driver(&ql3xxx_driver); -} - -module_init(ql3xxx_init_module); -module_exit(ql3xxx_exit); diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h deleted file mode 100644 index 73e23436..0000000 --- a/drivers/net/qla3xxx.h +++ /dev/null @@ -1,1189 +0,0 @@ -/* - * QLogic QLA3xxx NIC HBA Driver - * Copyright (c) 2003-2006 QLogic Corporation - * - * See LICENSE.qla3xxx for copyright and licensing details. - */ -#ifndef _QLA3XXX_H_ -#define _QLA3XXX_H_ - -/* - * IOCB Definitions... - */ -#pragma pack(1) - -#define OPCODE_OB_MAC_IOCB_FN0 0x01 -#define OPCODE_OB_MAC_IOCB_FN2 0x21 - -#define OPCODE_IB_MAC_IOCB 0xF9 -#define OPCODE_IB_3032_MAC_IOCB 0x09 -#define OPCODE_IB_IP_IOCB 0xFA -#define OPCODE_IB_3032_IP_IOCB 0x0A - -#define OPCODE_FUNC_ID_MASK 0x30 -#define OUTBOUND_MAC_IOCB 0x01 /* plus function bits */ - -#define FN0_MA_BITS_MASK 0x00 -#define FN1_MA_BITS_MASK 0x80 - -struct ob_mac_iocb_req { - u8 opcode; - u8 flags; -#define OB_MAC_IOCB_REQ_MA 0xe0 -#define OB_MAC_IOCB_REQ_F 0x10 -#define OB_MAC_IOCB_REQ_X 0x08 -#define OB_MAC_IOCB_REQ_D 0x02 -#define OB_MAC_IOCB_REQ_I 0x01 - u8 flags1; -#define OB_3032MAC_IOCB_REQ_IC 0x04 -#define OB_3032MAC_IOCB_REQ_TC 0x02 -#define OB_3032MAC_IOCB_REQ_UC 0x01 - u8 reserved0; - - u32 transaction_id; /* opaque for hardware */ - __le16 data_len; - u8 ip_hdr_off; - u8 ip_hdr_len; - __le32 reserved1; - __le32 reserved2; - __le32 buf_addr0_low; - __le32 buf_addr0_high; - __le32 buf_0_len; - __le32 buf_addr1_low; - __le32 buf_addr1_high; - __le32 buf_1_len; - __le32 buf_addr2_low; - __le32 buf_addr2_high; - __le32 buf_2_len; - __le32 reserved3; - __le32 reserved4; -}; -/* - * The following constants define control bits for buffer - * length fields for all IOCB's. - */ -#define OB_MAC_IOCB_REQ_E 0x80000000 /* Last valid buffer in list. */ -#define OB_MAC_IOCB_REQ_C 0x40000000 /* points to an OAL. (continuation) */ -#define OB_MAC_IOCB_REQ_L 0x20000000 /* Auburn local address pointer. */ -#define OB_MAC_IOCB_REQ_R 0x10000000 /* 32-bit address pointer. */ - -struct ob_mac_iocb_rsp { - u8 opcode; - u8 flags; -#define OB_MAC_IOCB_RSP_P 0x08 -#define OB_MAC_IOCB_RSP_L 0x04 -#define OB_MAC_IOCB_RSP_S 0x02 -#define OB_MAC_IOCB_RSP_I 0x01 - - __le16 reserved0; - u32 transaction_id; /* opaque for hardware */ - __le32 reserved1; - __le32 reserved2; -}; - -struct ib_mac_iocb_rsp { - u8 opcode; -#define IB_MAC_IOCB_RSP_V 0x80 - u8 flags; -#define IB_MAC_IOCB_RSP_S 0x80 -#define IB_MAC_IOCB_RSP_H1 0x40 -#define IB_MAC_IOCB_RSP_H0 0x20 -#define IB_MAC_IOCB_RSP_B 0x10 -#define IB_MAC_IOCB_RSP_M 0x08 -#define IB_MAC_IOCB_RSP_MA 0x07 - - __le16 length; - __le32 reserved; - __le32 ial_low; - __le32 ial_high; - -}; - -struct ob_ip_iocb_req { - u8 opcode; - __le16 flags; -#define OB_IP_IOCB_REQ_O 0x100 -#define OB_IP_IOCB_REQ_H 0x008 -#define OB_IP_IOCB_REQ_U 0x004 -#define OB_IP_IOCB_REQ_D 0x002 -#define OB_IP_IOCB_REQ_I 0x001 - - u8 reserved0; - - __le32 transaction_id; - __le16 data_len; - __le16 reserved1; - __le32 hncb_ptr_low; - __le32 hncb_ptr_high; - __le32 buf_addr0_low; - __le32 buf_addr0_high; - __le32 buf_0_len; - __le32 buf_addr1_low; - __le32 buf_addr1_high; - __le32 buf_1_len; - __le32 buf_addr2_low; - __le32 buf_addr2_high; - __le32 buf_2_len; - __le32 reserved2; - __le32 reserved3; -}; - -/* defines for BufferLength fields above */ -#define OB_IP_IOCB_REQ_E 0x80000000 -#define OB_IP_IOCB_REQ_C 0x40000000 -#define OB_IP_IOCB_REQ_L 0x20000000 -#define OB_IP_IOCB_REQ_R 0x10000000 - -struct ob_ip_iocb_rsp { - u8 opcode; - u8 flags; -#define OB_MAC_IOCB_RSP_H 0x10 -#define OB_MAC_IOCB_RSP_E 0x08 -#define OB_MAC_IOCB_RSP_L 0x04 -#define OB_MAC_IOCB_RSP_S 0x02 -#define OB_MAC_IOCB_RSP_I 0x01 - - __le16 reserved0; - __le32 transaction_id; - __le32 reserved1; - __le32 reserved2; -}; - -struct ib_ip_iocb_rsp { - u8 opcode; -#define IB_IP_IOCB_RSP_3032_V 0x80 -#define IB_IP_IOCB_RSP_3032_O 0x40 -#define IB_IP_IOCB_RSP_3032_I 0x20 -#define IB_IP_IOCB_RSP_3032_R 0x10 - u8 flags; -#define IB_IP_IOCB_RSP_S 0x80 -#define IB_IP_IOCB_RSP_H1 0x40 -#define IB_IP_IOCB_RSP_H0 0x20 -#define IB_IP_IOCB_RSP_B 0x10 -#define IB_IP_IOCB_RSP_M 0x08 -#define IB_IP_IOCB_RSP_MA 0x07 - - __le16 length; - __le16 checksum; -#define IB_IP_IOCB_RSP_3032_ICE 0x01 -#define IB_IP_IOCB_RSP_3032_CE 0x02 -#define IB_IP_IOCB_RSP_3032_NUC 0x04 -#define IB_IP_IOCB_RSP_3032_UDP 0x08 -#define IB_IP_IOCB_RSP_3032_TCP 0x10 -#define IB_IP_IOCB_RSP_3032_IPE 0x20 - __le16 reserved; -#define IB_IP_IOCB_RSP_R 0x01 - __le32 ial_low; - __le32 ial_high; -}; - -struct net_rsp_iocb { - u8 opcode; - u8 flags; - __le16 reserved0; - __le32 reserved[3]; -}; -#pragma pack() - -/* - * Register Definitions... - */ -#define PORT0_PHY_ADDRESS 0x1e00 -#define PORT1_PHY_ADDRESS 0x1f00 - -#define ETHERNET_CRC_SIZE 4 - -#define MII_SCAN_REGISTER 0x00000001 - -#define PHY_ID_0_REG 2 -#define PHY_ID_1_REG 3 - -#define PHY_OUI_1_MASK 0xfc00 -#define PHY_MODEL_MASK 0x03f0 - -/* Address for the Agere Phy */ -#define MII_AGERE_ADDR_1 0x00001000 -#define MII_AGERE_ADDR_2 0x00001100 - -/* 32-bit ispControlStatus */ -enum { - ISP_CONTROL_NP_MASK = 0x0003, - ISP_CONTROL_NP_PCSR = 0x0000, - ISP_CONTROL_NP_HMCR = 0x0001, - ISP_CONTROL_NP_LRAMCR = 0x0002, - ISP_CONTROL_NP_PSR = 0x0003, - ISP_CONTROL_RI = 0x0008, - ISP_CONTROL_CI = 0x0010, - ISP_CONTROL_PI = 0x0020, - ISP_CONTROL_IN = 0x0040, - ISP_CONTROL_BE = 0x0080, - ISP_CONTROL_FN_MASK = 0x0700, - ISP_CONTROL_FN0_NET = 0x0400, - ISP_CONTROL_FN0_SCSI = 0x0500, - ISP_CONTROL_FN1_NET = 0x0600, - ISP_CONTROL_FN1_SCSI = 0x0700, - ISP_CONTROL_LINK_DN_0 = 0x0800, - ISP_CONTROL_LINK_DN_1 = 0x1000, - ISP_CONTROL_FSR = 0x2000, - ISP_CONTROL_FE = 0x4000, - ISP_CONTROL_SR = 0x8000, -}; - -/* 32-bit ispInterruptMaskReg */ -enum { - ISP_IMR_ENABLE_INT = 0x0004, - ISP_IMR_DISABLE_RESET_INT = 0x0008, - ISP_IMR_DISABLE_CMPL_INT = 0x0010, - ISP_IMR_DISABLE_PROC_INT = 0x0020, -}; - -/* 32-bit serialPortInterfaceReg */ -enum { - ISP_SERIAL_PORT_IF_CLK = 0x0001, - ISP_SERIAL_PORT_IF_CS = 0x0002, - ISP_SERIAL_PORT_IF_D0 = 0x0004, - ISP_SERIAL_PORT_IF_DI = 0x0008, - ISP_NVRAM_MASK = (0x000F << 16), - ISP_SERIAL_PORT_IF_WE = 0x0010, - ISP_SERIAL_PORT_IF_NVR_MASK = 0x001F, - ISP_SERIAL_PORT_IF_SCI = 0x0400, - ISP_SERIAL_PORT_IF_SC0 = 0x0800, - ISP_SERIAL_PORT_IF_SCE = 0x1000, - ISP_SERIAL_PORT_IF_SDI = 0x2000, - ISP_SERIAL_PORT_IF_SDO = 0x4000, - ISP_SERIAL_PORT_IF_SDE = 0x8000, - ISP_SERIAL_PORT_IF_I2C_MASK = 0xFC00, -}; - -/* semaphoreReg */ -enum { - QL_RESOURCE_MASK_BASE_CODE = 0x7, - QL_RESOURCE_BITS_BASE_CODE = 0x4, - QL_DRVR_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 1), - QL_DDR_RAM_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 4), - QL_PHY_GIO_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 7), - QL_NVRAM_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 10), - QL_FLASH_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 13), - QL_DRVR_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (1 + 16)), - QL_DDR_RAM_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (4 + 16)), - QL_PHY_GIO_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (7 + 16)), - QL_NVRAM_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (10 + 16)), - QL_FLASH_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (13 + 16)), -}; - - /* - * QL3XXX memory-mapped registers - * QL3XXX has 4 "pages" of registers, each page occupying - * 256 bytes. Each page has a "common" area at the start and then - * page-specific registers after that. - */ -struct ql3xxx_common_registers { - u32 MB0; /* Offset 0x00 */ - u32 MB1; /* Offset 0x04 */ - u32 MB2; /* Offset 0x08 */ - u32 MB3; /* Offset 0x0c */ - u32 MB4; /* Offset 0x10 */ - u32 MB5; /* Offset 0x14 */ - u32 MB6; /* Offset 0x18 */ - u32 MB7; /* Offset 0x1c */ - u32 flashBiosAddr; - u32 flashBiosData; - u32 ispControlStatus; - u32 ispInterruptMaskReg; - u32 serialPortInterfaceReg; - u32 semaphoreReg; - u32 reqQProducerIndex; - u32 rspQConsumerIndex; - - u32 rxLargeQProducerIndex; - u32 rxSmallQProducerIndex; - u32 arcMadiCommand; - u32 arcMadiData; -}; - -enum { - EXT_HW_CONFIG_SP_MASK = 0x0006, - EXT_HW_CONFIG_SP_NONE = 0x0000, - EXT_HW_CONFIG_SP_BYTE_PARITY = 0x0002, - EXT_HW_CONFIG_SP_ECC = 0x0004, - EXT_HW_CONFIG_SP_ECCx = 0x0006, - EXT_HW_CONFIG_SIZE_MASK = 0x0060, - EXT_HW_CONFIG_SIZE_128M = 0x0000, - EXT_HW_CONFIG_SIZE_256M = 0x0020, - EXT_HW_CONFIG_SIZE_512M = 0x0040, - EXT_HW_CONFIG_SIZE_INVALID = 0x0060, - EXT_HW_CONFIG_PD = 0x0080, - EXT_HW_CONFIG_FW = 0x0200, - EXT_HW_CONFIG_US = 0x0400, - EXT_HW_CONFIG_DCS_MASK = 0x1800, - EXT_HW_CONFIG_DCS_9MA = 0x0000, - EXT_HW_CONFIG_DCS_15MA = 0x0800, - EXT_HW_CONFIG_DCS_18MA = 0x1000, - EXT_HW_CONFIG_DCS_24MA = 0x1800, - EXT_HW_CONFIG_DDS_MASK = 0x6000, - EXT_HW_CONFIG_DDS_9MA = 0x0000, - EXT_HW_CONFIG_DDS_15MA = 0x2000, - EXT_HW_CONFIG_DDS_18MA = 0x4000, - EXT_HW_CONFIG_DDS_24MA = 0x6000, -}; - -/* InternalChipConfig */ -enum { - INTERNAL_CHIP_DM = 0x0001, - INTERNAL_CHIP_SD = 0x0002, - INTERNAL_CHIP_RAP_MASK = 0x000C, - INTERNAL_CHIP_RAP_RR = 0x0000, - INTERNAL_CHIP_RAP_NRM = 0x0004, - INTERNAL_CHIP_RAP_ERM = 0x0008, - INTERNAL_CHIP_RAP_ERMx = 0x000C, - INTERNAL_CHIP_WE = 0x0010, - INTERNAL_CHIP_EF = 0x0020, - INTERNAL_CHIP_FR = 0x0040, - INTERNAL_CHIP_FW = 0x0080, - INTERNAL_CHIP_FI = 0x0100, - INTERNAL_CHIP_FT = 0x0200, -}; - -/* portControl */ -enum { - PORT_CONTROL_DS = 0x0001, - PORT_CONTROL_HH = 0x0002, - PORT_CONTROL_EI = 0x0004, - PORT_CONTROL_ET = 0x0008, - PORT_CONTROL_EF = 0x0010, - PORT_CONTROL_DRM = 0x0020, - PORT_CONTROL_RLB = 0x0040, - PORT_CONTROL_RCB = 0x0080, - PORT_CONTROL_MAC = 0x0100, - PORT_CONTROL_IPV = 0x0200, - PORT_CONTROL_IFP = 0x0400, - PORT_CONTROL_ITP = 0x0800, - PORT_CONTROL_FI = 0x1000, - PORT_CONTROL_DFP = 0x2000, - PORT_CONTROL_OI = 0x4000, - PORT_CONTROL_CC = 0x8000, -}; - -/* portStatus */ -enum { - PORT_STATUS_SM0 = 0x0001, - PORT_STATUS_SM1 = 0x0002, - PORT_STATUS_X = 0x0008, - PORT_STATUS_DL = 0x0080, - PORT_STATUS_IC = 0x0200, - PORT_STATUS_MRC = 0x0400, - PORT_STATUS_NL = 0x0800, - PORT_STATUS_REV_ID_MASK = 0x7000, - PORT_STATUS_REV_ID_1 = 0x1000, - PORT_STATUS_REV_ID_2 = 0x2000, - PORT_STATUS_REV_ID_3 = 0x3000, - PORT_STATUS_64 = 0x8000, - PORT_STATUS_UP0 = 0x10000, - PORT_STATUS_AC0 = 0x20000, - PORT_STATUS_AE0 = 0x40000, - PORT_STATUS_UP1 = 0x100000, - PORT_STATUS_AC1 = 0x200000, - PORT_STATUS_AE1 = 0x400000, - PORT_STATUS_F0_ENABLED = 0x1000000, - PORT_STATUS_F1_ENABLED = 0x2000000, - PORT_STATUS_F2_ENABLED = 0x4000000, - PORT_STATUS_F3_ENABLED = 0x8000000, -}; - -/* macMIIMgmtControlReg */ -enum { - MAC_ADDR_INDIRECT_PTR_REG_RP_MASK = 0x0003, - MAC_ADDR_INDIRECT_PTR_REG_RP_PRI_LWR = 0x0000, - MAC_ADDR_INDIRECT_PTR_REG_RP_PRI_UPR = 0x0001, - MAC_ADDR_INDIRECT_PTR_REG_RP_SEC_LWR = 0x0002, - MAC_ADDR_INDIRECT_PTR_REG_RP_SEC_UPR = 0x0003, - MAC_ADDR_INDIRECT_PTR_REG_PR = 0x0008, - MAC_ADDR_INDIRECT_PTR_REG_SS = 0x0010, - MAC_ADDR_INDIRECT_PTR_REG_SE = 0x0020, - MAC_ADDR_INDIRECT_PTR_REG_SP = 0x0040, - MAC_ADDR_INDIRECT_PTR_REG_PE = 0x0080, -}; - -/* macMIIMgmtControlReg */ -enum { - MAC_MII_CONTROL_RC = 0x0001, - MAC_MII_CONTROL_SC = 0x0002, - MAC_MII_CONTROL_AS = 0x0004, - MAC_MII_CONTROL_NP = 0x0008, - MAC_MII_CONTROL_CLK_SEL_MASK = 0x0070, - MAC_MII_CONTROL_CLK_SEL_DIV2 = 0x0000, - MAC_MII_CONTROL_CLK_SEL_DIV4 = 0x0010, - MAC_MII_CONTROL_CLK_SEL_DIV6 = 0x0020, - MAC_MII_CONTROL_CLK_SEL_DIV8 = 0x0030, - MAC_MII_CONTROL_CLK_SEL_DIV10 = 0x0040, - MAC_MII_CONTROL_CLK_SEL_DIV14 = 0x0050, - MAC_MII_CONTROL_CLK_SEL_DIV20 = 0x0060, - MAC_MII_CONTROL_CLK_SEL_DIV28 = 0x0070, - MAC_MII_CONTROL_RM = 0x8000, -}; - -/* macMIIStatusReg */ -enum { - MAC_MII_STATUS_BSY = 0x0001, - MAC_MII_STATUS_SC = 0x0002, - MAC_MII_STATUS_NV = 0x0004, -}; - -enum { - MAC_CONFIG_REG_PE = 0x0001, - MAC_CONFIG_REG_TF = 0x0002, - MAC_CONFIG_REG_RF = 0x0004, - MAC_CONFIG_REG_FD = 0x0008, - MAC_CONFIG_REG_GM = 0x0010, - MAC_CONFIG_REG_LB = 0x0020, - MAC_CONFIG_REG_SR = 0x8000, -}; - -enum { - MAC_HALF_DUPLEX_REG_ED = 0x10000, - MAC_HALF_DUPLEX_REG_NB = 0x20000, - MAC_HALF_DUPLEX_REG_BNB = 0x40000, - MAC_HALF_DUPLEX_REG_ALT = 0x80000, -}; - -enum { - IP_ADDR_INDEX_REG_MASK = 0x000f, - IP_ADDR_INDEX_REG_FUNC_0_PRI = 0x0000, - IP_ADDR_INDEX_REG_FUNC_0_SEC = 0x0001, - IP_ADDR_INDEX_REG_FUNC_1_PRI = 0x0002, - IP_ADDR_INDEX_REG_FUNC_1_SEC = 0x0003, - IP_ADDR_INDEX_REG_FUNC_2_PRI = 0x0004, - IP_ADDR_INDEX_REG_FUNC_2_SEC = 0x0005, - IP_ADDR_INDEX_REG_FUNC_3_PRI = 0x0006, - IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007, - IP_ADDR_INDEX_REG_6 = 0x0008, - IP_ADDR_INDEX_REG_OFFSET_MASK = 0x0030, - IP_ADDR_INDEX_REG_E = 0x0040, -}; -enum { - QL3032_PORT_CONTROL_DS = 0x0001, - QL3032_PORT_CONTROL_HH = 0x0002, - QL3032_PORT_CONTROL_EIv6 = 0x0004, - QL3032_PORT_CONTROL_EIv4 = 0x0008, - QL3032_PORT_CONTROL_ET = 0x0010, - QL3032_PORT_CONTROL_EF = 0x0020, - QL3032_PORT_CONTROL_DRM = 0x0040, - QL3032_PORT_CONTROL_RLB = 0x0080, - QL3032_PORT_CONTROL_RCB = 0x0100, - QL3032_PORT_CONTROL_KIE = 0x0200, -}; - -enum { - PROBE_MUX_ADDR_REG_MUX_SEL_MASK = 0x003f, - PROBE_MUX_ADDR_REG_SYSCLK = 0x0000, - PROBE_MUX_ADDR_REG_PCICLK = 0x0040, - PROBE_MUX_ADDR_REG_NRXCLK = 0x0080, - PROBE_MUX_ADDR_REG_CPUCLK = 0x00C0, - PROBE_MUX_ADDR_REG_MODULE_SEL_MASK = 0x3f00, - PROBE_MUX_ADDR_REG_UP = 0x4000, - PROBE_MUX_ADDR_REG_RE = 0x8000, -}; - -enum { - STATISTICS_INDEX_REG_MASK = 0x01ff, - STATISTICS_INDEX_REG_MAC0_TX_FRAME = 0x0000, - STATISTICS_INDEX_REG_MAC0_TX_BYTES = 0x0001, - STATISTICS_INDEX_REG_MAC0_TX_STAT1 = 0x0002, - STATISTICS_INDEX_REG_MAC0_TX_STAT2 = 0x0003, - STATISTICS_INDEX_REG_MAC0_TX_STAT3 = 0x0004, - STATISTICS_INDEX_REG_MAC0_TX_STAT4 = 0x0005, - STATISTICS_INDEX_REG_MAC0_TX_STAT5 = 0x0006, - STATISTICS_INDEX_REG_MAC0_RX_FRAME = 0x0007, - STATISTICS_INDEX_REG_MAC0_RX_BYTES = 0x0008, - STATISTICS_INDEX_REG_MAC0_RX_STAT1 = 0x0009, - STATISTICS_INDEX_REG_MAC0_RX_STAT2 = 0x000a, - STATISTICS_INDEX_REG_MAC0_RX_STAT3 = 0x000b, - STATISTICS_INDEX_REG_MAC0_RX_ERR_CRC = 0x000c, - STATISTICS_INDEX_REG_MAC0_RX_ERR_ENC = 0x000d, - STATISTICS_INDEX_REG_MAC0_RX_ERR_LEN = 0x000e, - STATISTICS_INDEX_REG_MAC0_RX_STAT4 = 0x000f, - STATISTICS_INDEX_REG_MAC1_TX_FRAME = 0x0010, - STATISTICS_INDEX_REG_MAC1_TX_BYTES = 0x0011, - STATISTICS_INDEX_REG_MAC1_TX_STAT1 = 0x0012, - STATISTICS_INDEX_REG_MAC1_TX_STAT2 = 0x0013, - STATISTICS_INDEX_REG_MAC1_TX_STAT3 = 0x0014, - STATISTICS_INDEX_REG_MAC1_TX_STAT4 = 0x0015, - STATISTICS_INDEX_REG_MAC1_TX_STAT5 = 0x0016, - STATISTICS_INDEX_REG_MAC1_RX_FRAME = 0x0017, - STATISTICS_INDEX_REG_MAC1_RX_BYTES = 0x0018, - STATISTICS_INDEX_REG_MAC1_RX_STAT1 = 0x0019, - STATISTICS_INDEX_REG_MAC1_RX_STAT2 = 0x001a, - STATISTICS_INDEX_REG_MAC1_RX_STAT3 = 0x001b, - STATISTICS_INDEX_REG_MAC1_RX_ERR_CRC = 0x001c, - STATISTICS_INDEX_REG_MAC1_RX_ERR_ENC = 0x001d, - STATISTICS_INDEX_REG_MAC1_RX_ERR_LEN = 0x001e, - STATISTICS_INDEX_REG_MAC1_RX_STAT4 = 0x001f, - STATISTICS_INDEX_REG_IP_TX_PKTS = 0x0020, - STATISTICS_INDEX_REG_IP_TX_BYTES = 0x0021, - STATISTICS_INDEX_REG_IP_TX_FRAG = 0x0022, - STATISTICS_INDEX_REG_IP_RX_PKTS = 0x0023, - STATISTICS_INDEX_REG_IP_RX_BYTES = 0x0024, - STATISTICS_INDEX_REG_IP_RX_FRAG = 0x0025, - STATISTICS_INDEX_REG_IP_DGRM_REASSEMBLY = 0x0026, - STATISTICS_INDEX_REG_IP_V6_RX_PKTS = 0x0027, - STATISTICS_INDEX_REG_IP_RX_PKTERR = 0x0028, - STATISTICS_INDEX_REG_IP_REASSEMBLY_ERR = 0x0029, - STATISTICS_INDEX_REG_TCP_TX_SEG = 0x0030, - STATISTICS_INDEX_REG_TCP_TX_BYTES = 0x0031, - STATISTICS_INDEX_REG_TCP_RX_SEG = 0x0032, - STATISTICS_INDEX_REG_TCP_RX_BYTES = 0x0033, - STATISTICS_INDEX_REG_TCP_TIMER_EXP = 0x0034, - STATISTICS_INDEX_REG_TCP_RX_ACK = 0x0035, - STATISTICS_INDEX_REG_TCP_TX_ACK = 0x0036, - STATISTICS_INDEX_REG_TCP_RX_ERR = 0x0037, - STATISTICS_INDEX_REG_TCP_RX_WIN_PROBE = 0x0038, - STATISTICS_INDEX_REG_TCP_ECC_ERR_CORR = 0x003f, -}; - -enum { - PORT_FATAL_ERROR_STATUS_OFB_RE_MAC0 = 0x00000001, - PORT_FATAL_ERROR_STATUS_OFB_RE_MAC1 = 0x00000002, - PORT_FATAL_ERROR_STATUS_OFB_WE = 0x00000004, - PORT_FATAL_ERROR_STATUS_IFB_RE = 0x00000008, - PORT_FATAL_ERROR_STATUS_IFB_WE_MAC0 = 0x00000010, - PORT_FATAL_ERROR_STATUS_IFB_WE_MAC1 = 0x00000020, - PORT_FATAL_ERROR_STATUS_ODE_RE = 0x00000040, - PORT_FATAL_ERROR_STATUS_ODE_WE = 0x00000080, - PORT_FATAL_ERROR_STATUS_IDE_RE = 0x00000100, - PORT_FATAL_ERROR_STATUS_IDE_WE = 0x00000200, - PORT_FATAL_ERROR_STATUS_SDE_RE = 0x00000400, - PORT_FATAL_ERROR_STATUS_SDE_WE = 0x00000800, - PORT_FATAL_ERROR_STATUS_BLE = 0x00001000, - PORT_FATAL_ERROR_STATUS_SPE = 0x00002000, - PORT_FATAL_ERROR_STATUS_EP0 = 0x00004000, - PORT_FATAL_ERROR_STATUS_EP1 = 0x00008000, - PORT_FATAL_ERROR_STATUS_ICE = 0x00010000, - PORT_FATAL_ERROR_STATUS_ILE = 0x00020000, - PORT_FATAL_ERROR_STATUS_OPE = 0x00040000, - PORT_FATAL_ERROR_STATUS_TA = 0x00080000, - PORT_FATAL_ERROR_STATUS_MA = 0x00100000, - PORT_FATAL_ERROR_STATUS_SCE = 0x00200000, - PORT_FATAL_ERROR_STATUS_RPE = 0x00400000, - PORT_FATAL_ERROR_STATUS_MPE = 0x00800000, - PORT_FATAL_ERROR_STATUS_OCE = 0x01000000, -}; - -/* - * port control and status page - page 0 - */ - -struct ql3xxx_port_registers { - struct ql3xxx_common_registers CommonRegs; - - u32 ExternalHWConfig; - u32 InternalChipConfig; - u32 portControl; - u32 portStatus; - u32 macAddrIndirectPtrReg; - u32 macAddrDataReg; - u32 macMIIMgmtControlReg; - u32 macMIIMgmtAddrReg; - u32 macMIIMgmtDataReg; - u32 macMIIStatusReg; - u32 mac0ConfigReg; - u32 mac0IpgIfgReg; - u32 mac0HalfDuplexReg; - u32 mac0MaxFrameLengthReg; - u32 mac0PauseThresholdReg; - u32 mac1ConfigReg; - u32 mac1IpgIfgReg; - u32 mac1HalfDuplexReg; - u32 mac1MaxFrameLengthReg; - u32 mac1PauseThresholdReg; - u32 ipAddrIndexReg; - u32 ipAddrDataReg; - u32 ipReassemblyTimeout; - u32 tcpMaxWindow; - u32 currentTcpTimestamp[2]; - u32 internalRamRWAddrReg; - u32 internalRamWDataReg; - u32 reclaimedBufferAddrRegLow; - u32 reclaimedBufferAddrRegHigh; - u32 tcpConfiguration; - u32 functionControl; - u32 fpgaRevID; - u32 localRamAddr; - u32 localRamDataAutoIncr; - u32 localRamDataNonIncr; - u32 gpOutput; - u32 gpInput; - u32 probeMuxAddr; - u32 probeMuxData; - u32 statisticsIndexReg; - u32 statisticsReadDataRegAutoIncr; - u32 statisticsReadDataRegNoIncr; - u32 PortFatalErrStatus; -}; - -/* - * port host memory config page - page 1 - */ -struct ql3xxx_host_memory_registers { - struct ql3xxx_common_registers CommonRegs; - - u32 reserved[12]; - - /* Network Request Queue */ - u32 reqConsumerIndex; - u32 reqConsumerIndexAddrLow; - u32 reqConsumerIndexAddrHigh; - u32 reqBaseAddrLow; - u32 reqBaseAddrHigh; - u32 reqLength; - - /* Network Completion Queue */ - u32 rspProducerIndex; - u32 rspProducerIndexAddrLow; - u32 rspProducerIndexAddrHigh; - u32 rspBaseAddrLow; - u32 rspBaseAddrHigh; - u32 rspLength; - - /* RX Large Buffer Queue */ - u32 rxLargeQConsumerIndex; - u32 rxLargeQBaseAddrLow; - u32 rxLargeQBaseAddrHigh; - u32 rxLargeQLength; - u32 rxLargeBufferLength; - - /* RX Small Buffer Queue */ - u32 rxSmallQConsumerIndex; - u32 rxSmallQBaseAddrLow; - u32 rxSmallQBaseAddrHigh; - u32 rxSmallQLength; - u32 rxSmallBufferLength; - -}; - -/* - * port local RAM page - page 2 - */ -struct ql3xxx_local_ram_registers { - struct ql3xxx_common_registers CommonRegs; - u32 bufletSize; - u32 maxBufletCount; - u32 currentBufletCount; - u32 reserved; - u32 freeBufletThresholdLow; - u32 freeBufletThresholdHigh; - u32 ipHashTableBase; - u32 ipHashTableCount; - u32 tcpHashTableBase; - u32 tcpHashTableCount; - u32 ncbBase; - u32 maxNcbCount; - u32 currentNcbCount; - u32 drbBase; - u32 maxDrbCount; - u32 currentDrbCount; -}; - -/* - * definitions for Semaphore bits in Semaphore/Serial NVRAM interface register - */ - -#define LS_64BITS(x) (u32)(0xffffffff & ((u64)x)) -#define MS_64BITS(x) (u32)(0xffffffff & (((u64)x)>>16>>16) ) - -/* - * I/O register - */ - -enum { - CONTROL_REG = 0, - STATUS_REG = 1, - PHY_STAT_LINK_UP = 0x0004, - PHY_CTRL_LOOPBACK = 0x4000, - - PETBI_CONTROL_REG = 0x00, - PETBI_CTRL_ALL_PARAMS = 0x7140, - PETBI_CTRL_SOFT_RESET = 0x8000, - PETBI_CTRL_AUTO_NEG = 0x1000, - PETBI_CTRL_RESTART_NEG = 0x0200, - PETBI_CTRL_FULL_DUPLEX = 0x0100, - PETBI_CTRL_SPEED_1000 = 0x0040, - - PETBI_STATUS_REG = 0x01, - PETBI_STAT_NEG_DONE = 0x0020, - PETBI_STAT_LINK_UP = 0x0004, - - PETBI_NEG_ADVER = 0x04, - PETBI_NEG_PAUSE = 0x0080, - PETBI_NEG_PAUSE_MASK = 0x0180, - PETBI_NEG_DUPLEX = 0x0020, - PETBI_NEG_DUPLEX_MASK = 0x0060, - - PETBI_NEG_PARTNER = 0x05, - PETBI_NEG_ERROR_MASK = 0x3000, - - PETBI_EXPANSION_REG = 0x06, - PETBI_EXP_PAGE_RX = 0x0002, - - PHY_GIG_CONTROL = 9, - PHY_GIG_ENABLE_MAN = 0x1000, /* Enable Master/Slave Manual Config*/ - PHY_GIG_SET_MASTER = 0x0800, /* Set Master (slave if clear)*/ - PHY_GIG_ALL_PARAMS = 0x0300, - PHY_GIG_ADV_1000F = 0x0200, - PHY_GIG_ADV_1000H = 0x0100, - - PHY_NEG_ADVER = 4, - PHY_NEG_ALL_PARAMS = 0x0fe0, - PHY_NEG_ASY_PAUSE = 0x0800, - PHY_NEG_SYM_PAUSE = 0x0400, - PHY_NEG_ADV_SPEED = 0x01e0, - PHY_NEG_ADV_100F = 0x0100, - PHY_NEG_ADV_100H = 0x0080, - PHY_NEG_ADV_10F = 0x0040, - PHY_NEG_ADV_10H = 0x0020, - - PETBI_TBI_CTRL = 0x11, - PETBI_TBI_RESET = 0x8000, - PETBI_TBI_AUTO_SENSE = 0x0100, - PETBI_TBI_SERDES_MODE = 0x0010, - PETBI_TBI_SERDES_WRAP = 0x0002, - - AUX_CONTROL_STATUS = 0x1c, - PHY_AUX_NEG_DONE = 0x8000, - PHY_NEG_PARTNER = 5, - PHY_AUX_DUPLEX_STAT = 0x0020, - PHY_AUX_SPEED_STAT = 0x0018, - PHY_AUX_NO_HW_STRAP = 0x0004, - PHY_AUX_RESET_STICK = 0x0002, - PHY_NEG_PAUSE = 0x0400, - PHY_CTRL_SOFT_RESET = 0x8000, - PHY_CTRL_AUTO_NEG = 0x1000, - PHY_CTRL_RESTART_NEG = 0x0200, -}; -enum { -/* AM29LV Flash definitions */ - FM93C56A_START = 0x1, -/* Commands */ - FM93C56A_READ = 0x2, - FM93C56A_WEN = 0x0, - FM93C56A_WRITE = 0x1, - FM93C56A_WRITE_ALL = 0x0, - FM93C56A_WDS = 0x0, - FM93C56A_ERASE = 0x3, - FM93C56A_ERASE_ALL = 0x0, -/* Command Extensions */ - FM93C56A_WEN_EXT = 0x3, - FM93C56A_WRITE_ALL_EXT = 0x1, - FM93C56A_WDS_EXT = 0x0, - FM93C56A_ERASE_ALL_EXT = 0x2, -/* Special Bits */ - FM93C56A_READ_DUMMY_BITS = 1, - FM93C56A_READY = 0, - FM93C56A_BUSY = 1, - FM93C56A_CMD_BITS = 2, -/* AM29LV Flash definitions */ - FM93C56A_SIZE_8 = 0x100, - FM93C56A_SIZE_16 = 0x80, - FM93C66A_SIZE_8 = 0x200, - FM93C66A_SIZE_16 = 0x100, - FM93C86A_SIZE_16 = 0x400, -/* Address Bits */ - FM93C56A_NO_ADDR_BITS_16 = 8, - FM93C56A_NO_ADDR_BITS_8 = 9, - FM93C86A_NO_ADDR_BITS_16 = 10, -/* Data Bits */ - FM93C56A_DATA_BITS_16 = 16, - FM93C56A_DATA_BITS_8 = 8, -}; -enum { -/* Auburn Bits */ - AUBURN_EEPROM_DI = 0x8, - AUBURN_EEPROM_DI_0 = 0x0, - AUBURN_EEPROM_DI_1 = 0x8, - AUBURN_EEPROM_DO = 0x4, - AUBURN_EEPROM_DO_0 = 0x0, - AUBURN_EEPROM_DO_1 = 0x4, - AUBURN_EEPROM_CS = 0x2, - AUBURN_EEPROM_CS_0 = 0x0, - AUBURN_EEPROM_CS_1 = 0x2, - AUBURN_EEPROM_CLK_RISE = 0x1, - AUBURN_EEPROM_CLK_FALL = 0x0, -}; -enum {EEPROM_SIZE = FM93C86A_SIZE_16, - EEPROM_NO_ADDR_BITS = FM93C86A_NO_ADDR_BITS_16, - EEPROM_NO_DATA_BITS = FM93C56A_DATA_BITS_16, -}; - -/* - * MAC Config data structure - */ - struct eeprom_port_cfg { - u16 etherMtu_mac; - u16 pauseThreshold_mac; - u16 resumeThreshold_mac; - u16 portConfiguration; -#define PORT_CONFIG_DEFAULT 0xf700 -#define PORT_CONFIG_AUTO_NEG_ENABLED 0x8000 -#define PORT_CONFIG_SYM_PAUSE_ENABLED 0x4000 -#define PORT_CONFIG_FULL_DUPLEX_ENABLED 0x2000 -#define PORT_CONFIG_HALF_DUPLEX_ENABLED 0x1000 -#define PORT_CONFIG_1000MB_SPEED 0x0400 -#define PORT_CONFIG_100MB_SPEED 0x0200 -#define PORT_CONFIG_10MB_SPEED 0x0100 -#define PORT_CONFIG_LINK_SPEED_MASK 0x0F00 - u16 reserved[12]; - -}; - -/* - * BIOS data structure - */ -struct eeprom_bios_cfg { - u16 SpinDlyEn:1, disBios:1, EnMemMap:1, EnSelectBoot:1, Reserved:12; - - u8 bootID0:7, boodID0Valid:1; - u8 bootLun0[8]; - - u8 bootID1:7, boodID1Valid:1; - u8 bootLun1[8]; - - u16 MaxLunsTrgt; - u8 reserved[10]; -}; - -/* - * Function Specific Data structure - */ -struct eeprom_function_cfg { - u8 reserved[30]; - u16 macAddress[3]; - u16 macAddressSecondary[3]; - - u16 subsysVendorId; - u16 subsysDeviceId; -}; - -/* - * EEPROM format - */ -struct eeprom_data { - u8 asicId[4]; - u16 version_and_numPorts; /* together to avoid endianness crap */ - u16 boardId; - -#define EEPROM_BOARDID_STR_SIZE 16 -#define EEPROM_SERIAL_NUM_SIZE 16 - - u8 boardIdStr[16]; - u8 serialNumber[16]; - u16 extHwConfig; - struct eeprom_port_cfg macCfg_port0; - struct eeprom_port_cfg macCfg_port1; - u16 bufletSize; - u16 bufletCount; - u16 tcpWindowThreshold50; - u16 tcpWindowThreshold25; - u16 tcpWindowThreshold0; - u16 ipHashTableBaseHi; - u16 ipHashTableBaseLo; - u16 ipHashTableSize; - u16 tcpHashTableBaseHi; - u16 tcpHashTableBaseLo; - u16 tcpHashTableSize; - u16 ncbTableBaseHi; - u16 ncbTableBaseLo; - u16 ncbTableSize; - u16 drbTableBaseHi; - u16 drbTableBaseLo; - u16 drbTableSize; - u16 reserved_142[4]; - u16 ipReassemblyTimeout; - u16 tcpMaxWindowSize; - u16 ipSecurity; -#define IPSEC_CONFIG_PRESENT 0x0001 - u8 reserved_156[294]; - u16 qDebug[8]; - struct eeprom_function_cfg funcCfg_fn0; - u16 reserved_510; - u8 oemSpace[432]; - struct eeprom_bios_cfg biosCfg_fn1; - struct eeprom_function_cfg funcCfg_fn1; - u16 reserved_1022; - u8 reserved_1024[464]; - struct eeprom_function_cfg funcCfg_fn2; - u16 reserved_1534; - u8 reserved_1536[432]; - struct eeprom_bios_cfg biosCfg_fn3; - struct eeprom_function_cfg funcCfg_fn3; - u16 checksum; -}; - -/* - * General definitions... - */ - -/* - * Below are a number compiler switches for controlling driver behavior. - * Some are not supported under certain conditions and are notated as such. - */ - -#define QL3XXX_VENDOR_ID 0x1077 -#define QL3022_DEVICE_ID 0x3022 -#define QL3032_DEVICE_ID 0x3032 - -/* MTU & Frame Size stuff */ -#define NORMAL_MTU_SIZE ETH_DATA_LEN -#define JUMBO_MTU_SIZE 9000 -#define VLAN_ID_LEN 2 - -/* Request Queue Related Definitions */ -#define NUM_REQ_Q_ENTRIES 256 /* so that 64 * 64 = 4096 (1 page) */ - -/* Response Queue Related Definitions */ -#define NUM_RSP_Q_ENTRIES 256 /* so that 256 * 16 = 4096 (1 page) */ - -/* Transmit and Receive Buffers */ -#define NUM_LBUFQ_ENTRIES 128 -#define JUMBO_NUM_LBUFQ_ENTRIES 32 -#define NUM_SBUFQ_ENTRIES 64 -#define QL_SMALL_BUFFER_SIZE 32 -#define QL_ADDR_ELE_PER_BUFQ_ENTRY \ -(sizeof(struct lrg_buf_q_entry) / sizeof(struct bufq_addr_element)) - /* Each send has at least control block. This is how many we keep. */ -#define NUM_SMALL_BUFFERS NUM_SBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY - -#define QL_HEADER_SPACE 32 /* make header space at top of skb. */ -/* - * Large & Small Buffers for Receives - */ -struct lrg_buf_q_entry { - - __le32 addr0_lower; -#define IAL_LAST_ENTRY 0x00000001 -#define IAL_CONT_ENTRY 0x00000002 -#define IAL_FLAG_MASK 0x00000003 - __le32 addr0_upper; - __le32 addr1_lower; - __le32 addr1_upper; - __le32 addr2_lower; - __le32 addr2_upper; - __le32 addr3_lower; - __le32 addr3_upper; - __le32 addr4_lower; - __le32 addr4_upper; - __le32 addr5_lower; - __le32 addr5_upper; - __le32 addr6_lower; - __le32 addr6_upper; - __le32 addr7_lower; - __le32 addr7_upper; - -}; - -struct bufq_addr_element { - __le32 addr_low; - __le32 addr_high; -}; - -#define QL_NO_RESET 0 -#define QL_DO_RESET 1 - -enum link_state_t { - LS_UNKNOWN = 0, - LS_DOWN, - LS_DEGRADE, - LS_RECOVER, - LS_UP, -}; - -struct ql_rcv_buf_cb { - struct ql_rcv_buf_cb *next; - struct sk_buff *skb; - DEFINE_DMA_UNMAP_ADDR(mapaddr); - DEFINE_DMA_UNMAP_LEN(maplen); - __le32 buf_phy_addr_low; - __le32 buf_phy_addr_high; - int index; -}; - -/* - * Original IOCB has 3 sg entries: - * first points to skb-data area - * second points to first frag - * third points to next oal. - * OAL has 5 entries: - * 1 thru 4 point to frags - * fifth points to next oal. - */ -#define MAX_OAL_CNT ((MAX_SKB_FRAGS-1)/4 + 1) - -struct oal_entry { - __le32 dma_lo; - __le32 dma_hi; - __le32 len; -#define OAL_LAST_ENTRY 0x80000000 /* Last valid buffer in list. */ -#define OAL_CONT_ENTRY 0x40000000 /* points to an OAL. (continuation) */ -}; - -struct oal { - struct oal_entry oal_entry[5]; -}; - -struct map_list { - DEFINE_DMA_UNMAP_ADDR(mapaddr); - DEFINE_DMA_UNMAP_LEN(maplen); -}; - -struct ql_tx_buf_cb { - struct sk_buff *skb; - struct ob_mac_iocb_req *queue_entry ; - int seg_count; - struct oal *oal; - struct map_list map[MAX_SKB_FRAGS+1]; -}; - -/* definitions for type field */ -#define QL_BUF_TYPE_MACIOCB 0x01 -#define QL_BUF_TYPE_IPIOCB 0x02 -#define QL_BUF_TYPE_TCPIOCB 0x03 - -/* qdev->flags definitions. */ -enum { QL_RESET_DONE = 1, /* Reset finished. */ - QL_RESET_ACTIVE = 2, /* Waiting for reset to finish. */ - QL_RESET_START = 3, /* Please reset the chip. */ - QL_RESET_PER_SCSI = 4, /* SCSI driver requests reset. */ - QL_TX_TIMEOUT = 5, /* Timeout in progress. */ - QL_LINK_MASTER = 6, /* This driver controls the link. */ - QL_ADAPTER_UP = 7, /* Adapter has been brought up. */ - QL_THREAD_UP = 8, /* This flag is available. */ - QL_LINK_UP = 9, /* Link Status. */ - QL_ALLOC_REQ_RSP_Q_DONE = 10, - QL_ALLOC_BUFQS_DONE = 11, - QL_ALLOC_SMALL_BUF_DONE = 12, - QL_LINK_OPTICAL = 13, - QL_MSI_ENABLED = 14, -}; - -/* - * ql3_adapter - The main Adapter structure definition. - * This structure has all fields relevant to the hardware. - */ - -struct ql3_adapter { - u32 reserved_00; - unsigned long flags; - - /* PCI Configuration information for this device */ - struct pci_dev *pdev; - struct net_device *ndev; /* Parent NET device */ - - struct napi_struct napi; - - /* Hardware information */ - u8 chip_rev_id; - u8 pci_slot; - u8 pci_width; - u8 pci_x; - u32 msi; - int index; - struct timer_list adapter_timer; /* timer used for various functions */ - - spinlock_t adapter_lock; - spinlock_t hw_lock; - - /* PCI Bus Relative Register Addresses */ - u8 __iomem *mmap_virt_base; /* stores return value from ioremap() */ - struct ql3xxx_port_registers __iomem *mem_map_registers; - u32 current_page; /* tracks current register page */ - - u32 msg_enable; - u8 reserved_01[2]; - u8 reserved_02[2]; - - /* Page for Shadow Registers */ - void *shadow_reg_virt_addr; - dma_addr_t shadow_reg_phy_addr; - - /* Net Request Queue */ - u32 req_q_size; - u32 reserved_03; - struct ob_mac_iocb_req *req_q_virt_addr; - dma_addr_t req_q_phy_addr; - u16 req_producer_index; - u16 reserved_04; - u16 *preq_consumer_index; - u32 req_consumer_index_phy_addr_high; - u32 req_consumer_index_phy_addr_low; - atomic_t tx_count; - struct ql_tx_buf_cb tx_buf[NUM_REQ_Q_ENTRIES]; - - /* Net Response Queue */ - u32 rsp_q_size; - u32 eeprom_cmd_data; - struct net_rsp_iocb *rsp_q_virt_addr; - dma_addr_t rsp_q_phy_addr; - struct net_rsp_iocb *rsp_current; - u16 rsp_consumer_index; - u16 reserved_06; - volatile __le32 *prsp_producer_index; - u32 rsp_producer_index_phy_addr_high; - u32 rsp_producer_index_phy_addr_low; - - /* Large Buffer Queue */ - u32 lrg_buf_q_alloc_size; - u32 lrg_buf_q_size; - void *lrg_buf_q_alloc_virt_addr; - void *lrg_buf_q_virt_addr; - dma_addr_t lrg_buf_q_alloc_phy_addr; - dma_addr_t lrg_buf_q_phy_addr; - u32 lrg_buf_q_producer_index; - u32 lrg_buf_release_cnt; - struct bufq_addr_element *lrg_buf_next_free; - u32 num_large_buffers; - u32 num_lbufq_entries; - - /* Large (Receive) Buffers */ - struct ql_rcv_buf_cb *lrg_buf; - struct ql_rcv_buf_cb *lrg_buf_free_head; - struct ql_rcv_buf_cb *lrg_buf_free_tail; - u32 lrg_buf_free_count; - u32 lrg_buffer_len; - u32 lrg_buf_index; - u32 lrg_buf_skb_check; - - /* Small Buffer Queue */ - u32 small_buf_q_alloc_size; - u32 small_buf_q_size; - u32 small_buf_q_producer_index; - void *small_buf_q_alloc_virt_addr; - void *small_buf_q_virt_addr; - dma_addr_t small_buf_q_alloc_phy_addr; - dma_addr_t small_buf_q_phy_addr; - u32 small_buf_index; - - /* Small (Receive) Buffers */ - void *small_buf_virt_addr; - dma_addr_t small_buf_phy_addr; - u32 small_buf_phy_addr_low; - u32 small_buf_phy_addr_high; - u32 small_buf_release_cnt; - u32 small_buf_total_size; - - struct eeprom_data nvram_data; - u32 port_link_state; - - /* 4022 specific */ - u32 mac_index; /* Driver's MAC number can be 0 or 1 for first and second networking functions respectively */ - u32 PHYAddr; /* Address of PHY 0x1e00 Port 0 and 0x1f00 Port 1 */ - u32 mac_ob_opcode; /* Opcode to use on mac transmission */ - u32 mb_bit_mask; /* MA Bits mask to use on transmission */ - u32 numPorts; - struct workqueue_struct *workqueue; - struct delayed_work reset_work; - struct delayed_work tx_timeout_work; - struct delayed_work link_state_work; - u32 max_frame_size; - u32 device_id; - u16 phyType; -}; - -#endif /* _QLA3XXX_H_ */ diff --git a/drivers/net/qlcnic/Makefile b/drivers/net/qlcnic/Makefile deleted file mode 100644 index ddba83e..0000000 --- a/drivers/net/qlcnic/Makefile +++ /dev/null @@ -1,8 +0,0 @@ -# -# Makefile for Qlogic 1G/10G Ethernet Driver for CNA devices -# - -obj-$(CONFIG_QLCNIC) := qlcnic.o - -qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \ - qlcnic_ethtool.o qlcnic_ctx.o diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h deleted file mode 100644 index 53c6e5d..0000000 --- a/drivers/net/qlcnic/qlcnic.h +++ /dev/null @@ -1,1555 +0,0 @@ -/* - * QLogic qlcnic NIC Driver - * Copyright (c) 2009-2010 QLogic Corporation - * - * See LICENSE.qlcnic for copyright and licensing details. - */ - -#ifndef _QLCNIC_H_ -#define _QLCNIC_H_ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include - -#include -#include -#include -#include - -#include "qlcnic_hdr.h" - -#define _QLCNIC_LINUX_MAJOR 5 -#define _QLCNIC_LINUX_MINOR 0 -#define _QLCNIC_LINUX_SUBVERSION 22 -#define QLCNIC_LINUX_VERSIONID "5.0.22" -#define QLCNIC_DRV_IDC_VER 0x01 -#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ - (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) - -#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) -#define _major(v) (((v) >> 24) & 0xff) -#define _minor(v) (((v) >> 16) & 0xff) -#define _build(v) ((v) & 0xffff) - -/* version in image has weird encoding: - * 7:0 - major - * 15:8 - minor - * 31:16 - build (little endian) - */ -#define QLCNIC_DECODE_VERSION(v) \ - QLCNIC_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16)) - -#define QLCNIC_MIN_FW_VERSION QLCNIC_VERSION_CODE(4, 4, 2) -#define QLCNIC_NUM_FLASH_SECTORS (64) -#define QLCNIC_FLASH_SECTOR_SIZE (64 * 1024) -#define QLCNIC_FLASH_TOTAL_SIZE (QLCNIC_NUM_FLASH_SECTORS \ - * QLCNIC_FLASH_SECTOR_SIZE) - -#define RCV_DESC_RINGSIZE(rds_ring) \ - (sizeof(struct rcv_desc) * (rds_ring)->num_desc) -#define RCV_BUFF_RINGSIZE(rds_ring) \ - (sizeof(struct qlcnic_rx_buffer) * rds_ring->num_desc) -#define STATUS_DESC_RINGSIZE(sds_ring) \ - (sizeof(struct status_desc) * (sds_ring)->num_desc) -#define TX_BUFF_RINGSIZE(tx_ring) \ - (sizeof(struct qlcnic_cmd_buffer) * tx_ring->num_desc) -#define TX_DESC_RINGSIZE(tx_ring) \ - (sizeof(struct cmd_desc_type0) * tx_ring->num_desc) - -#define QLCNIC_P3P_A0 0x50 - -#define QLCNIC_IS_REVISION_P3P(REVISION) (REVISION >= QLCNIC_P3P_A0) - -#define FIRST_PAGE_GROUP_START 0 -#define FIRST_PAGE_GROUP_END 0x100000 - -#define P3P_MAX_MTU (9600) -#define P3P_MIN_MTU (68) -#define QLCNIC_MAX_ETHERHDR 32 /* This contains some padding */ - -#define QLCNIC_P3P_RX_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + ETH_DATA_LEN) -#define QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + P3P_MAX_MTU) -#define QLCNIC_CT_DEFAULT_RX_BUF_LEN 2048 -#define QLCNIC_LRO_BUFFER_EXTRA 2048 - -/* Opcodes to be used with the commands */ -#define TX_ETHER_PKT 0x01 -#define TX_TCP_PKT 0x02 -#define TX_UDP_PKT 0x03 -#define TX_IP_PKT 0x04 -#define TX_TCP_LSO 0x05 -#define TX_TCP_LSO6 0x06 -#define TX_TCPV6_PKT 0x0b -#define TX_UDPV6_PKT 0x0c - -/* Tx defines */ -#define QLCNIC_MAX_FRAGS_PER_TX 14 -#define MAX_TSO_HEADER_DESC 2 -#define MGMT_CMD_DESC_RESV 4 -#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \ - + MGMT_CMD_DESC_RESV) -#define QLCNIC_MAX_TX_TIMEOUTS 2 - -/* - * Following are the states of the Phantom. Phantom will set them and - * Host will read to check if the fields are correct. - */ -#define PHAN_INITIALIZE_FAILED 0xffff -#define PHAN_INITIALIZE_COMPLETE 0xff01 - -/* Host writes the following to notify that it has done the init-handshake */ -#define PHAN_INITIALIZE_ACK 0xf00f -#define PHAN_PEG_RCV_INITIALIZED 0xff01 - -#define NUM_RCV_DESC_RINGS 3 - -#define RCV_RING_NORMAL 0 -#define RCV_RING_JUMBO 1 - -#define MIN_CMD_DESCRIPTORS 64 -#define MIN_RCV_DESCRIPTORS 64 -#define MIN_JUMBO_DESCRIPTORS 32 - -#define MAX_CMD_DESCRIPTORS 1024 -#define MAX_RCV_DESCRIPTORS_1G 4096 -#define MAX_RCV_DESCRIPTORS_10G 8192 -#define MAX_RCV_DESCRIPTORS_VF 2048 -#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512 -#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024 - -#define DEFAULT_RCV_DESCRIPTORS_1G 2048 -#define DEFAULT_RCV_DESCRIPTORS_10G 4096 -#define DEFAULT_RCV_DESCRIPTORS_VF 1024 -#define MAX_RDS_RINGS 2 - -#define get_next_index(index, length) \ - (((index) + 1) & ((length) - 1)) - -/* - * Following data structures describe the descriptors that will be used. - * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when - * we are doing LSO (above the 1500 size packet) only. - */ - -#define FLAGS_VLAN_TAGGED 0x10 -#define FLAGS_VLAN_OOB 0x40 - -#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \ - (cmd_desc)->vlan_TCI = cpu_to_le16(v); -#define qlcnic_set_cmd_desc_port(cmd_desc, var) \ - ((cmd_desc)->port_ctxid |= ((var) & 0x0F)) -#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \ - ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0)) - -#define qlcnic_set_tx_port(_desc, _port) \ - ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0)) - -#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \ - ((_desc)->flags_opcode |= \ - cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7))) - -#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \ - ((_desc)->nfrags__length = \ - cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8))) - -struct cmd_desc_type0 { - u8 tcp_hdr_offset; /* For LSO only */ - u8 ip_hdr_offset; /* For LSO only */ - __le16 flags_opcode; /* 15:13 unused, 12:7 opcode, 6:0 flags */ - __le32 nfrags__length; /* 31:8 total len, 7:0 frag count */ - - __le64 addr_buffer2; - - __le16 reference_handle; - __le16 mss; - u8 port_ctxid; /* 7:4 ctxid 3:0 port */ - u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */ - __le16 conn_id; /* IPSec offoad only */ - - __le64 addr_buffer3; - __le64 addr_buffer1; - - __le16 buffer_length[4]; - - __le64 addr_buffer4; - - u8 eth_addr[ETH_ALEN]; - __le16 vlan_TCI; - -} __attribute__ ((aligned(64))); - -/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */ -struct rcv_desc { - __le16 reference_handle; - __le16 reserved; - __le32 buffer_length; /* allocated buffer length (usually 2K) */ - __le64 addr_buffer; -} __packed; - -/* opcode field in status_desc */ -#define QLCNIC_SYN_OFFLOAD 0x03 -#define QLCNIC_RXPKT_DESC 0x04 -#define QLCNIC_OLD_RXPKT_DESC 0x3f -#define QLCNIC_RESPONSE_DESC 0x05 -#define QLCNIC_LRO_DESC 0x12 - -/* for status field in status_desc */ -#define STATUS_CKSUM_LOOP 0 -#define STATUS_CKSUM_OK 2 - -/* owner bits of status_desc */ -#define STATUS_OWNER_HOST (0x1ULL << 56) -#define STATUS_OWNER_PHANTOM (0x2ULL << 56) - -/* Status descriptor: - 0-3 port, 4-7 status, 8-11 type, 12-27 total_length - 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset - 53-55 desc_cnt, 56-57 owner, 58-63 opcode - */ -#define qlcnic_get_sts_port(sts_data) \ - ((sts_data) & 0x0F) -#define qlcnic_get_sts_status(sts_data) \ - (((sts_data) >> 4) & 0x0F) -#define qlcnic_get_sts_type(sts_data) \ - (((sts_data) >> 8) & 0x0F) -#define qlcnic_get_sts_totallength(sts_data) \ - (((sts_data) >> 12) & 0xFFFF) -#define qlcnic_get_sts_refhandle(sts_data) \ - (((sts_data) >> 28) & 0xFFFF) -#define qlcnic_get_sts_prot(sts_data) \ - (((sts_data) >> 44) & 0x0F) -#define qlcnic_get_sts_pkt_offset(sts_data) \ - (((sts_data) >> 48) & 0x1F) -#define qlcnic_get_sts_desc_cnt(sts_data) \ - (((sts_data) >> 53) & 0x7) -#define qlcnic_get_sts_opcode(sts_data) \ - (((sts_data) >> 58) & 0x03F) - -#define qlcnic_get_lro_sts_refhandle(sts_data) \ - ((sts_data) & 0x0FFFF) -#define qlcnic_get_lro_sts_length(sts_data) \ - (((sts_data) >> 16) & 0x0FFFF) -#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \ - (((sts_data) >> 32) & 0x0FF) -#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \ - (((sts_data) >> 40) & 0x0FF) -#define qlcnic_get_lro_sts_timestamp(sts_data) \ - (((sts_data) >> 48) & 0x1) -#define qlcnic_get_lro_sts_type(sts_data) \ - (((sts_data) >> 49) & 0x7) -#define qlcnic_get_lro_sts_push_flag(sts_data) \ - (((sts_data) >> 52) & 0x1) -#define qlcnic_get_lro_sts_seq_number(sts_data) \ - ((sts_data) & 0x0FFFFFFFF) - - -struct status_desc { - __le64 status_desc_data[2]; -} __attribute__ ((aligned(16))); - -/* UNIFIED ROMIMAGE */ -#define QLCNIC_UNI_FW_MIN_SIZE 0xc8000 -#define QLCNIC_UNI_DIR_SECT_PRODUCT_TBL 0x0 -#define QLCNIC_UNI_DIR_SECT_BOOTLD 0x6 -#define QLCNIC_UNI_DIR_SECT_FW 0x7 - -/*Offsets */ -#define QLCNIC_UNI_CHIP_REV_OFF 10 -#define QLCNIC_UNI_FLAGS_OFF 11 -#define QLCNIC_UNI_BIOS_VERSION_OFF 12 -#define QLCNIC_UNI_BOOTLD_IDX_OFF 27 -#define QLCNIC_UNI_FIRMWARE_IDX_OFF 29 - -struct uni_table_desc{ - u32 findex; - u32 num_entries; - u32 entry_size; - u32 reserved[5]; -}; - -struct uni_data_desc{ - u32 findex; - u32 size; - u32 reserved[5]; -}; - -/* Flash Defines and Structures */ -#define QLCNIC_FLT_LOCATION 0x3F1000 -#define QLCNIC_FW_IMAGE_REGION 0x74 -#define QLCNIC_BOOTLD_REGION 0X72 -struct qlcnic_flt_header { - u16 version; - u16 len; - u16 checksum; - u16 reserved; -}; - -struct qlcnic_flt_entry { - u8 region; - u8 reserved0; - u8 attrib; - u8 reserved1; - u32 size; - u32 start_addr; - u32 end_addr; -}; - -/* Magic number to let user know flash is programmed */ -#define QLCNIC_BDINFO_MAGIC 0x12345678 - -#define QLCNIC_BRDTYPE_P3P_REF_QG 0x0021 -#define QLCNIC_BRDTYPE_P3P_HMEZ 0x0022 -#define QLCNIC_BRDTYPE_P3P_10G_CX4_LP 0x0023 -#define QLCNIC_BRDTYPE_P3P_4_GB 0x0024 -#define QLCNIC_BRDTYPE_P3P_IMEZ 0x0025 -#define QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS 0x0026 -#define QLCNIC_BRDTYPE_P3P_10000_BASE_T 0x0027 -#define QLCNIC_BRDTYPE_P3P_XG_LOM 0x0028 -#define QLCNIC_BRDTYPE_P3P_4_GB_MM 0x0029 -#define QLCNIC_BRDTYPE_P3P_10G_SFP_CT 0x002a -#define QLCNIC_BRDTYPE_P3P_10G_SFP_QT 0x002b -#define QLCNIC_BRDTYPE_P3P_10G_CX4 0x0031 -#define QLCNIC_BRDTYPE_P3P_10G_XFP 0x0032 -#define QLCNIC_BRDTYPE_P3P_10G_TP 0x0080 - -#define QLCNIC_MSIX_TABLE_OFFSET 0x44 - -/* Flash memory map */ -#define QLCNIC_BRDCFG_START 0x4000 /* board config */ -#define QLCNIC_BOOTLD_START 0x10000 /* bootld */ -#define QLCNIC_IMAGE_START 0x43000 /* compressed image */ -#define QLCNIC_USER_START 0x3E8000 /* Firmare info */ - -#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408) -#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c) -#define QLCNIC_FW_SERIAL_NUM_OFFSET (QLCNIC_USER_START+0x81c) -#define QLCNIC_BIOS_VERSION_OFFSET (QLCNIC_USER_START+0x83c) - -#define QLCNIC_BRDTYPE_OFFSET (QLCNIC_BRDCFG_START+0x8) -#define QLCNIC_FW_MAGIC_OFFSET (QLCNIC_BRDCFG_START+0x128) - -#define QLCNIC_FW_MIN_SIZE (0x3fffff) -#define QLCNIC_UNIFIED_ROMIMAGE 0 -#define QLCNIC_FLASH_ROMIMAGE 1 -#define QLCNIC_UNKNOWN_ROMIMAGE 0xff - -#define QLCNIC_UNIFIED_ROMIMAGE_NAME "phanfw.bin" -#define QLCNIC_FLASH_ROMIMAGE_NAME "flash" - -extern char qlcnic_driver_name[]; - -/* Number of status descriptors to handle per interrupt */ -#define MAX_STATUS_HANDLE (64) - -/* - * qlcnic_skb_frag{} is to contain mapping info for each SG list. This - * has to be freed when DMA is complete. This is part of qlcnic_tx_buffer{}. - */ -struct qlcnic_skb_frag { - u64 dma; - u64 length; -}; - -/* Following defines are for the state of the buffers */ -#define QLCNIC_BUFFER_FREE 0 -#define QLCNIC_BUFFER_BUSY 1 - -/* - * There will be one qlcnic_buffer per skb packet. These will be - * used to save the dma info for pci_unmap_page() - */ -struct qlcnic_cmd_buffer { - struct sk_buff *skb; - struct qlcnic_skb_frag frag_array[MAX_SKB_FRAGS + 1]; - u32 frag_count; -}; - -/* In rx_buffer, we do not need multiple fragments as is a single buffer */ -struct qlcnic_rx_buffer { - u16 ref_handle; - struct sk_buff *skb; - struct list_head list; - u64 dma; -}; - -/* Board types */ -#define QLCNIC_GBE 0x01 -#define QLCNIC_XGBE 0x02 - -/* - * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is - * adjusted based on configured MTU. - */ -#define QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US 3 -#define QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS 256 - -#define QLCNIC_INTR_DEFAULT 0x04 -#define QLCNIC_CONFIG_INTR_COALESCE 3 - -struct qlcnic_nic_intr_coalesce { - u8 type; - u8 sts_ring_mask; - u16 rx_packets; - u16 rx_time_us; - u16 flag; - u32 timer_out; -}; - -struct qlcnic_dump_template_hdr { - __le32 type; - __le32 offset; - __le32 size; - __le32 cap_mask; - __le32 num_entries; - __le32 version; - __le32 timestamp; - __le32 checksum; - __le32 drv_cap_mask; - __le32 sys_info[3]; - __le32 saved_state[16]; - __le32 cap_sizes[8]; - __le32 rsvd[0]; -}; - -struct qlcnic_fw_dump { - u8 clr; /* flag to indicate if dump is cleared */ - u8 enable; /* enable/disable dump */ - u32 size; /* total size of the dump */ - void *data; /* dump data area */ - struct qlcnic_dump_template_hdr *tmpl_hdr; -}; - -/* - * One hardware_context{} per adapter - * contains interrupt info as well shared hardware info. - */ -struct qlcnic_hardware_context { - void __iomem *pci_base0; - void __iomem *ocm_win_crb; - - unsigned long pci_len0; - - rwlock_t crb_lock; - struct mutex mem_lock; - - u8 revision_id; - u8 pci_func; - u8 linkup; - u8 loopback_state; - u16 port_type; - u16 board_type; - - struct qlcnic_nic_intr_coalesce coal; - struct qlcnic_fw_dump fw_dump; -}; - -struct qlcnic_adapter_stats { - u64 xmitcalled; - u64 xmitfinished; - u64 rxdropped; - u64 txdropped; - u64 csummed; - u64 rx_pkts; - u64 lro_pkts; - u64 rxbytes; - u64 txbytes; - u64 lrobytes; - u64 lso_frames; - u64 xmit_on; - u64 xmit_off; - u64 skb_alloc_failure; - u64 null_rxbuf; - u64 rx_dma_map_error; - u64 tx_dma_map_error; -}; - -/* - * Rcv Descriptor Context. One such per Rcv Descriptor. There may - * be one Rcv Descriptor for normal packets, one for jumbo and may be others. - */ -struct qlcnic_host_rds_ring { - void __iomem *crb_rcv_producer; - struct rcv_desc *desc_head; - struct qlcnic_rx_buffer *rx_buf_arr; - u32 num_desc; - u32 producer; - u32 dma_size; - u32 skb_size; - u32 flags; - struct list_head free_list; - spinlock_t lock; - dma_addr_t phys_addr; -} ____cacheline_internodealigned_in_smp; - -struct qlcnic_host_sds_ring { - u32 consumer; - u32 num_desc; - void __iomem *crb_sts_consumer; - - struct status_desc *desc_head; - struct qlcnic_adapter *adapter; - struct napi_struct napi; - struct list_head free_list[NUM_RCV_DESC_RINGS]; - - void __iomem *crb_intr_mask; - int irq; - - dma_addr_t phys_addr; - char name[IFNAMSIZ+4]; -} ____cacheline_internodealigned_in_smp; - -struct qlcnic_host_tx_ring { - u32 producer; - u32 sw_consumer; - u32 num_desc; - void __iomem *crb_cmd_producer; - struct cmd_desc_type0 *desc_head; - struct qlcnic_cmd_buffer *cmd_buf_arr; - __le32 *hw_consumer; - - dma_addr_t phys_addr; - dma_addr_t hw_cons_phys_addr; - struct netdev_queue *txq; -} ____cacheline_internodealigned_in_smp; - -/* - * Receive context. There is one such structure per instance of the - * receive processing. Any state information that is relevant to - * the receive, and is must be in this structure. The global data may be - * present elsewhere. - */ -struct qlcnic_recv_context { - struct qlcnic_host_rds_ring *rds_rings; - struct qlcnic_host_sds_ring *sds_rings; - u32 state; - u16 context_id; - u16 virt_port; - -}; - -/* HW context creation */ - -#define QLCNIC_OS_CRB_RETRY_COUNT 4000 -#define QLCNIC_CDRP_SIGNATURE_MAKE(pcifn, version) \ - (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16)) - -#define QLCNIC_CDRP_CMD_BIT 0x80000000 - -/* - * All responses must have the QLCNIC_CDRP_CMD_BIT cleared - * in the crb QLCNIC_CDRP_CRB_OFFSET. - */ -#define QLCNIC_CDRP_FORM_RSP(rsp) (rsp) -#define QLCNIC_CDRP_IS_RSP(rsp) (((rsp) & QLCNIC_CDRP_CMD_BIT) == 0) - -#define QLCNIC_CDRP_RSP_OK 0x00000001 -#define QLCNIC_CDRP_RSP_FAIL 0x00000002 -#define QLCNIC_CDRP_RSP_TIMEOUT 0x00000003 - -/* - * All commands must have the QLCNIC_CDRP_CMD_BIT set in - * the crb QLCNIC_CDRP_CRB_OFFSET. - */ -#define QLCNIC_CDRP_FORM_CMD(cmd) (QLCNIC_CDRP_CMD_BIT | (cmd)) -#define QLCNIC_CDRP_IS_CMD(cmd) (((cmd) & QLCNIC_CDRP_CMD_BIT) != 0) - -#define QLCNIC_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001 -#define QLCNIC_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002 -#define QLCNIC_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003 -#define QLCNIC_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004 -#define QLCNIC_CDRP_CMD_READ_MAX_RX_CTX 0x00000005 -#define QLCNIC_CDRP_CMD_READ_MAX_TX_CTX 0x00000006 -#define QLCNIC_CDRP_CMD_CREATE_RX_CTX 0x00000007 -#define QLCNIC_CDRP_CMD_DESTROY_RX_CTX 0x00000008 -#define QLCNIC_CDRP_CMD_CREATE_TX_CTX 0x00000009 -#define QLCNIC_CDRP_CMD_DESTROY_TX_CTX 0x0000000a -#define QLCNIC_CDRP_CMD_SET_MTU 0x00000012 -#define QLCNIC_CDRP_CMD_READ_PHY 0x00000013 -#define QLCNIC_CDRP_CMD_WRITE_PHY 0x00000014 -#define QLCNIC_CDRP_CMD_READ_HW_REG 0x00000015 -#define QLCNIC_CDRP_CMD_GET_FLOW_CTL 0x00000016 -#define QLCNIC_CDRP_CMD_SET_FLOW_CTL 0x00000017 -#define QLCNIC_CDRP_CMD_READ_MAX_MTU 0x00000018 -#define QLCNIC_CDRP_CMD_READ_MAX_LRO 0x00000019 -#define QLCNIC_CDRP_CMD_MAC_ADDRESS 0x0000001f - -#define QLCNIC_CDRP_CMD_GET_PCI_INFO 0x00000020 -#define QLCNIC_CDRP_CMD_GET_NIC_INFO 0x00000021 -#define QLCNIC_CDRP_CMD_SET_NIC_INFO 0x00000022 -#define QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY 0x00000024 -#define QLCNIC_CDRP_CMD_TOGGLE_ESWITCH 0x00000025 -#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS 0x00000026 -#define QLCNIC_CDRP_CMD_SET_PORTMIRRORING 0x00000027 -#define QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH 0x00000028 -#define QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG 0x00000029 -#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATS 0x0000002a -#define QLCNIC_CDRP_CMD_CONFIG_PORT 0x0000002E -#define QLCNIC_CDRP_CMD_TEMP_SIZE 0x0000002f -#define QLCNIC_CDRP_CMD_GET_TEMP_HDR 0x00000030 - -#define QLCNIC_RCODE_SUCCESS 0 -#define QLCNIC_RCODE_NOT_SUPPORTED 9 -#define QLCNIC_RCODE_TIMEOUT 17 -#define QLCNIC_DESTROY_CTX_RESET 0 - -/* - * Capabilities Announced - */ -#define QLCNIC_CAP0_LEGACY_CONTEXT (1) -#define QLCNIC_CAP0_LEGACY_MN (1 << 2) -#define QLCNIC_CAP0_LSO (1 << 6) -#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7) -#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8) -#define QLCNIC_CAP0_VALIDOFF (1 << 11) - -/* - * Context state - */ -#define QLCNIC_HOST_CTX_STATE_FREED 0 -#define QLCNIC_HOST_CTX_STATE_ACTIVE 2 - -/* - * Rx context - */ - -struct qlcnic_hostrq_sds_ring { - __le64 host_phys_addr; /* Ring base addr */ - __le32 ring_size; /* Ring entries */ - __le16 msi_index; - __le16 rsvd; /* Padding */ -} __packed; - -struct qlcnic_hostrq_rds_ring { - __le64 host_phys_addr; /* Ring base addr */ - __le64 buff_size; /* Packet buffer size */ - __le32 ring_size; /* Ring entries */ - __le32 ring_kind; /* Class of ring */ -} __packed; - -struct qlcnic_hostrq_rx_ctx { - __le64 host_rsp_dma_addr; /* Response dma'd here */ - __le32 capabilities[4]; /* Flag bit vector */ - __le32 host_int_crb_mode; /* Interrupt crb usage */ - __le32 host_rds_crb_mode; /* RDS crb usage */ - /* These ring offsets are relative to data[0] below */ - __le32 rds_ring_offset; /* Offset to RDS config */ - __le32 sds_ring_offset; /* Offset to SDS config */ - __le16 num_rds_rings; /* Count of RDS rings */ - __le16 num_sds_rings; /* Count of SDS rings */ - __le16 valid_field_offset; - u8 txrx_sds_binding; - u8 msix_handler; - u8 reserved[128]; /* reserve space for future expansion*/ - /* MUST BE 64-bit aligned. - The following is packed: - - N hostrq_rds_rings - - N hostrq_sds_rings */ - char data[0]; -} __packed; - -struct qlcnic_cardrsp_rds_ring{ - __le32 host_producer_crb; /* Crb to use */ - __le32 rsvd1; /* Padding */ -} __packed; - -struct qlcnic_cardrsp_sds_ring { - __le32 host_consumer_crb; /* Crb to use */ - __le32 interrupt_crb; /* Crb to use */ -} __packed; - -struct qlcnic_cardrsp_rx_ctx { - /* These ring offsets are relative to data[0] below */ - __le32 rds_ring_offset; /* Offset to RDS config */ - __le32 sds_ring_offset; /* Offset to SDS config */ - __le32 host_ctx_state; /* Starting State */ - __le32 num_fn_per_port; /* How many PCI fn share the port */ - __le16 num_rds_rings; /* Count of RDS rings */ - __le16 num_sds_rings; /* Count of SDS rings */ - __le16 context_id; /* Handle for context */ - u8 phys_port; /* Physical id of port */ - u8 virt_port; /* Virtual/Logical id of port */ - u8 reserved[128]; /* save space for future expansion */ - /* MUST BE 64-bit aligned. - The following is packed: - - N cardrsp_rds_rings - - N cardrs_sds_rings */ - char data[0]; -} __packed; - -#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \ - (sizeof(HOSTRQ_RX) + \ - (rds_rings)*(sizeof(struct qlcnic_hostrq_rds_ring)) + \ - (sds_rings)*(sizeof(struct qlcnic_hostrq_sds_ring))) - -#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings) \ - (sizeof(CARDRSP_RX) + \ - (rds_rings)*(sizeof(struct qlcnic_cardrsp_rds_ring)) + \ - (sds_rings)*(sizeof(struct qlcnic_cardrsp_sds_ring))) - -/* - * Tx context - */ - -struct qlcnic_hostrq_cds_ring { - __le64 host_phys_addr; /* Ring base addr */ - __le32 ring_size; /* Ring entries */ - __le32 rsvd; /* Padding */ -} __packed; - -struct qlcnic_hostrq_tx_ctx { - __le64 host_rsp_dma_addr; /* Response dma'd here */ - __le64 cmd_cons_dma_addr; /* */ - __le64 dummy_dma_addr; /* */ - __le32 capabilities[4]; /* Flag bit vector */ - __le32 host_int_crb_mode; /* Interrupt crb usage */ - __le32 rsvd1; /* Padding */ - __le16 rsvd2; /* Padding */ - __le16 interrupt_ctl; - __le16 msi_index; - __le16 rsvd3; /* Padding */ - struct qlcnic_hostrq_cds_ring cds_ring; /* Desc of cds ring */ - u8 reserved[128]; /* future expansion */ -} __packed; - -struct qlcnic_cardrsp_cds_ring { - __le32 host_producer_crb; /* Crb to use */ - __le32 interrupt_crb; /* Crb to use */ -} __packed; - -struct qlcnic_cardrsp_tx_ctx { - __le32 host_ctx_state; /* Starting state */ - __le16 context_id; /* Handle for context */ - u8 phys_port; /* Physical id of port */ - u8 virt_port; /* Virtual/Logical id of port */ - struct qlcnic_cardrsp_cds_ring cds_ring; /* Card cds settings */ - u8 reserved[128]; /* future expansion */ -} __packed; - -#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX)) -#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX)) - -/* CRB */ - -#define QLCNIC_HOST_RDS_CRB_MODE_UNIQUE 0 -#define QLCNIC_HOST_RDS_CRB_MODE_SHARED 1 -#define QLCNIC_HOST_RDS_CRB_MODE_CUSTOM 2 -#define QLCNIC_HOST_RDS_CRB_MODE_MAX 3 - -#define QLCNIC_HOST_INT_CRB_MODE_UNIQUE 0 -#define QLCNIC_HOST_INT_CRB_MODE_SHARED 1 -#define QLCNIC_HOST_INT_CRB_MODE_NORX 2 -#define QLCNIC_HOST_INT_CRB_MODE_NOTX 3 -#define QLCNIC_HOST_INT_CRB_MODE_NORXTX 4 - - -/* MAC */ - -#define MC_COUNT_P3P 38 - -#define QLCNIC_MAC_NOOP 0 -#define QLCNIC_MAC_ADD 1 -#define QLCNIC_MAC_DEL 2 -#define QLCNIC_MAC_VLAN_ADD 3 -#define QLCNIC_MAC_VLAN_DEL 4 - -struct qlcnic_mac_list_s { - struct list_head list; - uint8_t mac_addr[ETH_ALEN+2]; -}; - -#define QLCNIC_HOST_REQUEST 0x13 -#define QLCNIC_REQUEST 0x14 - -#define QLCNIC_MAC_EVENT 0x1 - -#define QLCNIC_IP_UP 2 -#define QLCNIC_IP_DOWN 3 - -#define QLCNIC_ILB_MODE 0x1 -#define QLCNIC_ELB_MODE 0x2 - -#define QLCNIC_LINKEVENT 0x1 -#define QLCNIC_LB_RESPONSE 0x2 -#define QLCNIC_IS_LB_CONFIGURED(VAL) \ - (VAL == (QLCNIC_LINKEVENT | QLCNIC_LB_RESPONSE)) - -/* - * Driver --> Firmware - */ -#define QLCNIC_H2C_OPCODE_CONFIG_RSS 0x1 -#define QLCNIC_H2C_OPCODE_CONFIG_INTR_COALESCE 0x3 -#define QLCNIC_H2C_OPCODE_CONFIG_LED 0x4 -#define QLCNIC_H2C_OPCODE_LRO_REQUEST 0x7 -#define QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE 0xc -#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 0x12 - -#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 0x15 -#define QLCNIC_H2C_OPCODE_CONFIG_BRIDGING 0x17 -#define QLCNIC_H2C_OPCODE_CONFIG_HW_LRO 0x18 -#define QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK 0x13 - -/* - * Firmware --> Driver - */ - -#define QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK 0x8f -#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141 - -#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */ -#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */ -#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */ - -#define QLCNIC_LRO_REQUEST_CLEANUP 4 - -/* Capabilites received */ -#define QLCNIC_FW_CAPABILITY_TSO BIT_1 -#define QLCNIC_FW_CAPABILITY_BDG BIT_8 -#define QLCNIC_FW_CAPABILITY_FVLANTX BIT_9 -#define QLCNIC_FW_CAPABILITY_HW_LRO BIT_10 -#define QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK BIT_27 - -/* module types */ -#define LINKEVENT_MODULE_NOT_PRESENT 1 -#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2 -#define LINKEVENT_MODULE_OPTICAL_SRLR 3 -#define LINKEVENT_MODULE_OPTICAL_LRM 4 -#define LINKEVENT_MODULE_OPTICAL_SFP_1G 5 -#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE 6 -#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN 7 -#define LINKEVENT_MODULE_TWINAX 8 - -#define LINKSPEED_10GBPS 10000 -#define LINKSPEED_1GBPS 1000 -#define LINKSPEED_100MBPS 100 -#define LINKSPEED_10MBPS 10 - -#define LINKSPEED_ENCODED_10MBPS 0 -#define LINKSPEED_ENCODED_100MBPS 1 -#define LINKSPEED_ENCODED_1GBPS 2 - -#define LINKEVENT_AUTONEG_DISABLED 0 -#define LINKEVENT_AUTONEG_ENABLED 1 - -#define LINKEVENT_HALF_DUPLEX 0 -#define LINKEVENT_FULL_DUPLEX 1 - -#define LINKEVENT_LINKSPEED_MBPS 0 -#define LINKEVENT_LINKSPEED_ENCODED 1 - -/* firmware response header: - * 63:58 - message type - * 57:56 - owner - * 55:53 - desc count - * 52:48 - reserved - * 47:40 - completion id - * 39:32 - opcode - * 31:16 - error code - * 15:00 - reserved - */ -#define qlcnic_get_nic_msg_opcode(msg_hdr) \ - ((msg_hdr >> 32) & 0xFF) - -struct qlcnic_fw_msg { - union { - struct { - u64 hdr; - u64 body[7]; - }; - u64 words[8]; - }; -}; - -struct qlcnic_nic_req { - __le64 qhdr; - __le64 req_hdr; - __le64 words[6]; -} __packed; - -struct qlcnic_mac_req { - u8 op; - u8 tag; - u8 mac_addr[6]; -}; - -struct qlcnic_vlan_req { - __le16 vlan_id; - __le16 rsvd[3]; -} __packed; - -struct qlcnic_ipaddr { - __be32 ipv4; - __be32 ipv6[4]; -}; - -#define QLCNIC_MSI_ENABLED 0x02 -#define QLCNIC_MSIX_ENABLED 0x04 -#define QLCNIC_LRO_ENABLED 0x08 -#define QLCNIC_LRO_DISABLED 0x00 -#define QLCNIC_BRIDGE_ENABLED 0X10 -#define QLCNIC_DIAG_ENABLED 0x20 -#define QLCNIC_ESWITCH_ENABLED 0x40 -#define QLCNIC_ADAPTER_INITIALIZED 0x80 -#define QLCNIC_TAGGING_ENABLED 0x100 -#define QLCNIC_MACSPOOF 0x200 -#define QLCNIC_MAC_OVERRIDE_DISABLED 0x400 -#define QLCNIC_PROMISC_DISABLED 0x800 -#define QLCNIC_NEED_FLR 0x1000 -#define QLCNIC_FW_RESET_OWNER 0x2000 -#define QLCNIC_FW_HANG 0x4000 -#define QLCNIC_IS_MSI_FAMILY(adapter) \ - ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) - -#define QLCNIC_DEF_NUM_STS_DESC_RINGS 4 -#define QLCNIC_MSIX_TBL_SPACE 8192 -#define QLCNIC_PCI_REG_MSIX_TBL 0x44 -#define QLCNIC_MSIX_TBL_PGSIZE 4096 - -#define QLCNIC_NETDEV_WEIGHT 128 -#define QLCNIC_ADAPTER_UP_MAGIC 777 - -#define __QLCNIC_FW_ATTACHED 0 -#define __QLCNIC_DEV_UP 1 -#define __QLCNIC_RESETTING 2 -#define __QLCNIC_START_FW 4 -#define __QLCNIC_AER 5 -#define __QLCNIC_DIAG_RES_ALLOC 6 - -#define QLCNIC_INTERRUPT_TEST 1 -#define QLCNIC_LOOPBACK_TEST 2 -#define QLCNIC_LED_TEST 3 - -#define QLCNIC_FILTER_AGE 80 -#define QLCNIC_READD_AGE 20 -#define QLCNIC_LB_MAX_FILTERS 64 - -/* QLCNIC Driver Error Code */ -#define QLCNIC_FW_NOT_RESPOND 51 -#define QLCNIC_TEST_IN_PROGRESS 52 -#define QLCNIC_UNDEFINED_ERROR 53 -#define QLCNIC_LB_CABLE_NOT_CONN 54 - -struct qlcnic_filter { - struct hlist_node fnode; - u8 faddr[ETH_ALEN]; - __le16 vlan_id; - unsigned long ftime; -}; - -struct qlcnic_filter_hash { - struct hlist_head *fhead; - u8 fnum; - u8 fmax; -}; - -struct qlcnic_adapter { - struct qlcnic_hardware_context *ahw; - struct qlcnic_recv_context *recv_ctx; - struct qlcnic_host_tx_ring *tx_ring; - struct net_device *netdev; - struct pci_dev *pdev; - - unsigned long state; - u32 flags; - - u16 num_txd; - u16 num_rxd; - u16 num_jumbo_rxd; - u16 max_rxd; - u16 max_jumbo_rxd; - - u8 max_rds_rings; - u8 max_sds_rings; - u8 msix_supported; - u8 portnum; - u8 physical_port; - u8 reset_context; - - u8 mc_enabled; - u8 max_mc_count; - u8 fw_wait_cnt; - u8 fw_fail_cnt; - u8 tx_timeo_cnt; - u8 need_fw_reset; - - u8 has_link_events; - u8 fw_type; - u16 tx_context_id; - u16 is_up; - - u16 link_speed; - u16 link_duplex; - u16 link_autoneg; - u16 module_type; - - u16 op_mode; - u16 switch_mode; - u16 max_tx_ques; - u16 max_rx_ques; - u16 max_mtu; - u16 pvid; - - u32 fw_hal_version; - u32 capabilities; - u32 irq; - u32 temp; - - u32 int_vec_bit; - u32 heartbeat; - - u8 max_mac_filters; - u8 dev_state; - u8 diag_test; - char diag_cnt; - u8 reset_ack_timeo; - u8 dev_init_timeo; - u16 msg_enable; - - u8 mac_addr[ETH_ALEN]; - - u64 dev_rst_time; - u8 mac_learn; - unsigned long vlans[BITS_TO_LONGS(VLAN_N_VID)]; - - struct qlcnic_npar_info *npars; - struct qlcnic_eswitch *eswitch; - struct qlcnic_nic_template *nic_ops; - - struct qlcnic_adapter_stats stats; - struct list_head mac_list; - - void __iomem *tgt_mask_reg; - void __iomem *tgt_status_reg; - void __iomem *crb_int_state_reg; - void __iomem *isr_int_vec; - - struct msix_entry *msix_entries; - - struct delayed_work fw_work; - - - struct qlcnic_filter_hash fhash; - - spinlock_t tx_clean_lock; - spinlock_t mac_learn_lock; - __le32 file_prd_off; /*File fw product offset*/ - u32 fw_version; - const struct firmware *fw; -}; - -struct qlcnic_info { - __le16 pci_func; - __le16 op_mode; /* 1 = Priv, 2 = NP, 3 = NP passthru */ - __le16 phys_port; - __le16 switch_mode; /* 0 = disabled, 1 = int, 2 = ext */ - - __le32 capabilities; - u8 max_mac_filters; - u8 reserved1; - __le16 max_mtu; - - __le16 max_tx_ques; - __le16 max_rx_ques; - __le16 min_tx_bw; - __le16 max_tx_bw; - u8 reserved2[104]; -} __packed; - -struct qlcnic_pci_info { - __le16 id; /* pci function id */ - __le16 active; /* 1 = Enabled */ - __le16 type; /* 1 = NIC, 2 = FCoE, 3 = iSCSI */ - __le16 default_port; /* default port number */ - - __le16 tx_min_bw; /* Multiple of 100mbpc */ - __le16 tx_max_bw; - __le16 reserved1[2]; - - u8 mac[ETH_ALEN]; - u8 reserved2[106]; -} __packed; - -struct qlcnic_npar_info { - u16 pvid; - u16 min_bw; - u16 max_bw; - u8 phy_port; - u8 type; - u8 active; - u8 enable_pm; - u8 dest_npar; - u8 discard_tagged; - u8 mac_override; - u8 mac_anti_spoof; - u8 promisc_mode; - u8 offload_flags; -}; - -struct qlcnic_eswitch { - u8 port; - u8 active_vports; - u8 active_vlans; - u8 active_ucast_filters; - u8 max_ucast_filters; - u8 max_active_vlans; - - u32 flags; -#define QLCNIC_SWITCH_ENABLE BIT_1 -#define QLCNIC_SWITCH_VLAN_FILTERING BIT_2 -#define QLCNIC_SWITCH_PROMISC_MODE BIT_3 -#define QLCNIC_SWITCH_PORT_MIRRORING BIT_4 -}; - - -/* Return codes for Error handling */ -#define QL_STATUS_INVALID_PARAM -1 - -#define MAX_BW 100 /* % of link speed */ -#define MAX_VLAN_ID 4095 -#define MIN_VLAN_ID 2 -#define DEFAULT_MAC_LEARN 1 - -#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID) -#define IS_VALID_BW(bw) (bw <= MAX_BW) - -struct qlcnic_pci_func_cfg { - u16 func_type; - u16 min_bw; - u16 max_bw; - u16 port_num; - u8 pci_func; - u8 func_state; - u8 def_mac_addr[6]; -}; - -struct qlcnic_npar_func_cfg { - u32 fw_capab; - u16 port_num; - u16 min_bw; - u16 max_bw; - u16 max_tx_queues; - u16 max_rx_queues; - u8 pci_func; - u8 op_mode; -}; - -struct qlcnic_pm_func_cfg { - u8 pci_func; - u8 action; - u8 dest_npar; - u8 reserved[5]; -}; - -struct qlcnic_esw_func_cfg { - u16 vlan_id; - u8 op_mode; - u8 op_type; - u8 pci_func; - u8 host_vlan_tag; - u8 promisc_mode; - u8 discard_tagged; - u8 mac_override; - u8 mac_anti_spoof; - u8 offload_flags; - u8 reserved[5]; -}; - -#define QLCNIC_STATS_VERSION 1 -#define QLCNIC_STATS_PORT 1 -#define QLCNIC_STATS_ESWITCH 2 -#define QLCNIC_QUERY_RX_COUNTER 0 -#define QLCNIC_QUERY_TX_COUNTER 1 -#define QLCNIC_ESW_STATS_NOT_AVAIL 0xffffffffffffffffULL - -#define QLCNIC_ADD_ESW_STATS(VAL1, VAL2)\ -do { \ - if (((VAL1) == QLCNIC_ESW_STATS_NOT_AVAIL) && \ - ((VAL2) != QLCNIC_ESW_STATS_NOT_AVAIL)) \ - (VAL1) = (VAL2); \ - else if (((VAL1) != QLCNIC_ESW_STATS_NOT_AVAIL) && \ - ((VAL2) != QLCNIC_ESW_STATS_NOT_AVAIL)) \ - (VAL1) += (VAL2); \ -} while (0) - -struct __qlcnic_esw_statistics { - __le16 context_id; - __le16 version; - __le16 size; - __le16 unused; - __le64 unicast_frames; - __le64 multicast_frames; - __le64 broadcast_frames; - __le64 dropped_frames; - __le64 errors; - __le64 local_frames; - __le64 numbytes; - __le64 rsvd[3]; -} __packed; - -struct qlcnic_esw_statistics { - struct __qlcnic_esw_statistics rx; - struct __qlcnic_esw_statistics tx; -}; - -struct qlcnic_common_entry_hdr { - __le32 type; - __le32 offset; - __le32 cap_size; - u8 mask; - u8 rsvd[2]; - u8 flags; -} __packed; - -struct __crb { - __le32 addr; - u8 stride; - u8 rsvd1[3]; - __le32 data_size; - __le32 no_ops; - __le32 rsvd2[4]; -} __packed; - -struct __ctrl { - __le32 addr; - u8 stride; - u8 index_a; - __le16 timeout; - __le32 data_size; - __le32 no_ops; - u8 opcode; - u8 index_v; - u8 shl_val; - u8 shr_val; - __le32 val1; - __le32 val2; - __le32 val3; -} __packed; - -struct __cache { - __le32 addr; - __le16 stride; - __le16 init_tag_val; - __le32 size; - __le32 no_ops; - __le32 ctrl_addr; - __le32 ctrl_val; - __le32 read_addr; - u8 read_addr_stride; - u8 read_addr_num; - u8 rsvd1[2]; -} __packed; - -struct __ocm { - u8 rsvd[8]; - __le32 size; - __le32 no_ops; - u8 rsvd1[8]; - __le32 read_addr; - __le32 read_addr_stride; -} __packed; - -struct __mem { - u8 rsvd[24]; - __le32 addr; - __le32 size; -} __packed; - -struct __mux { - __le32 addr; - u8 rsvd[4]; - __le32 size; - __le32 no_ops; - __le32 val; - __le32 val_stride; - __le32 read_addr; - u8 rsvd2[4]; -} __packed; - -struct __queue { - __le32 sel_addr; - __le16 stride; - u8 rsvd[2]; - __le32 size; - __le32 no_ops; - u8 rsvd2[8]; - __le32 read_addr; - u8 read_addr_stride; - u8 read_addr_cnt; - u8 rsvd3[2]; -} __packed; - -struct qlcnic_dump_entry { - struct qlcnic_common_entry_hdr hdr; - union { - struct __crb crb; - struct __cache cache; - struct __ocm ocm; - struct __mem mem; - struct __mux mux; - struct __queue que; - struct __ctrl ctrl; - } region; -} __packed; - -enum op_codes { - QLCNIC_DUMP_NOP = 0, - QLCNIC_DUMP_READ_CRB = 1, - QLCNIC_DUMP_READ_MUX = 2, - QLCNIC_DUMP_QUEUE = 3, - QLCNIC_DUMP_BRD_CONFIG = 4, - QLCNIC_DUMP_READ_OCM = 6, - QLCNIC_DUMP_PEG_REG = 7, - QLCNIC_DUMP_L1_DTAG = 8, - QLCNIC_DUMP_L1_ITAG = 9, - QLCNIC_DUMP_L1_DATA = 11, - QLCNIC_DUMP_L1_INST = 12, - QLCNIC_DUMP_L2_DTAG = 21, - QLCNIC_DUMP_L2_ITAG = 22, - QLCNIC_DUMP_L2_DATA = 23, - QLCNIC_DUMP_L2_INST = 24, - QLCNIC_DUMP_READ_ROM = 71, - QLCNIC_DUMP_READ_MEM = 72, - QLCNIC_DUMP_READ_CTRL = 98, - QLCNIC_DUMP_TLHDR = 99, - QLCNIC_DUMP_RDEND = 255 -}; - -#define QLCNIC_DUMP_WCRB BIT_0 -#define QLCNIC_DUMP_RWCRB BIT_1 -#define QLCNIC_DUMP_ANDCRB BIT_2 -#define QLCNIC_DUMP_ORCRB BIT_3 -#define QLCNIC_DUMP_POLLCRB BIT_4 -#define QLCNIC_DUMP_RD_SAVE BIT_5 -#define QLCNIC_DUMP_WRT_SAVED BIT_6 -#define QLCNIC_DUMP_MOD_SAVE_ST BIT_7 -#define QLCNIC_DUMP_SKIP BIT_7 - -#define QLCNIC_DUMP_MASK_MIN 3 -#define QLCNIC_DUMP_MASK_DEF 0x1f -#define QLCNIC_DUMP_MASK_MAX 0xff -#define QLCNIC_FORCE_FW_DUMP_KEY 0xdeadfeed -#define QLCNIC_ENABLE_FW_DUMP 0xaddfeed -#define QLCNIC_DISABLE_FW_DUMP 0xbadfeed -#define QLCNIC_FORCE_FW_RESET 0xdeaddead - -struct qlcnic_dump_operations { - enum op_codes opcode; - u32 (*handler)(struct qlcnic_adapter *, - struct qlcnic_dump_entry *, u32 *); -}; - -int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter); -int qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config); - -u32 qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off); -int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data); -int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data); -int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data); -void qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *, u64, u64 *); -void qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *, u64, u64); - -#define ADDR_IN_RANGE(addr, low, high) \ - (((addr) < (high)) && ((addr) >= (low))) - -#define QLCRD32(adapter, off) \ - (qlcnic_hw_read_wx_2M(adapter, off)) -#define QLCWR32(adapter, off, val) \ - (qlcnic_hw_write_wx_2M(adapter, off, val)) - -int qlcnic_pcie_sem_lock(struct qlcnic_adapter *, int, u32); -void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int); - -#define qlcnic_rom_lock(a) \ - qlcnic_pcie_sem_lock((a), 2, QLCNIC_ROM_LOCK_ID) -#define qlcnic_rom_unlock(a) \ - qlcnic_pcie_sem_unlock((a), 2) -#define qlcnic_phy_lock(a) \ - qlcnic_pcie_sem_lock((a), 3, QLCNIC_PHY_LOCK_ID) -#define qlcnic_phy_unlock(a) \ - qlcnic_pcie_sem_unlock((a), 3) -#define qlcnic_api_lock(a) \ - qlcnic_pcie_sem_lock((a), 5, 0) -#define qlcnic_api_unlock(a) \ - qlcnic_pcie_sem_unlock((a), 5) -#define qlcnic_sw_lock(a) \ - qlcnic_pcie_sem_lock((a), 6, 0) -#define qlcnic_sw_unlock(a) \ - qlcnic_pcie_sem_unlock((a), 6) -#define crb_win_lock(a) \ - qlcnic_pcie_sem_lock((a), 7, QLCNIC_CRB_WIN_LOCK_ID) -#define crb_win_unlock(a) \ - qlcnic_pcie_sem_unlock((a), 7) - -int qlcnic_get_board_info(struct qlcnic_adapter *adapter); -int qlcnic_wol_supported(struct qlcnic_adapter *adapter); -int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate); -void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter); -void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter); -int qlcnic_dump_fw(struct qlcnic_adapter *); - -/* Functions from qlcnic_init.c */ -int qlcnic_load_firmware(struct qlcnic_adapter *adapter); -int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter); -void qlcnic_request_firmware(struct qlcnic_adapter *adapter); -void qlcnic_release_firmware(struct qlcnic_adapter *adapter); -int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter); -int qlcnic_setup_idc_param(struct qlcnic_adapter *adapter); -int qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter); - -int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp); -int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr, - u8 *bytes, size_t size); -int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter); -void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter); - -void __iomem *qlcnic_get_ioaddr(struct qlcnic_adapter *, u32); - -int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter); -void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter); - -int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter); -void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter); - -void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter); -void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter); -void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter); - -int qlcnic_check_fw_status(struct qlcnic_adapter *adapter); -void qlcnic_watchdog_task(struct work_struct *work); -void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, - struct qlcnic_host_rds_ring *rds_ring); -int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max); -void qlcnic_set_multi(struct net_device *netdev); -void qlcnic_free_mac_list(struct qlcnic_adapter *adapter); -int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32); -int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter); -int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable); -int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd); -int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable); -void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup); - -int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu); -int qlcnic_change_mtu(struct net_device *netdev, int new_mtu); -u32 qlcnic_fix_features(struct net_device *netdev, u32 features); -int qlcnic_set_features(struct net_device *netdev, u32 features); -int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable); -int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable); -int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter); -void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter, - struct qlcnic_host_tx_ring *tx_ring); -void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *); -void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring); -void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter); -int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode); - -/* Functions from qlcnic_ethtool.c */ -int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[]); - -/* Functions from qlcnic_main.c */ -int qlcnic_reset_context(struct qlcnic_adapter *); -u32 qlcnic_issue_cmd(struct qlcnic_adapter *adapter, - u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd); -void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings); -int qlcnic_diag_alloc_res(struct net_device *netdev, int test); -netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev); -int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val); -int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data); -void qlcnic_dev_request_reset(struct qlcnic_adapter *); -void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter); - -/* Management functions */ -int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*); -int qlcnic_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8); -int qlcnic_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *); -int qlcnic_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*); - -/* eSwitch management functions */ -int qlcnic_config_switch_port(struct qlcnic_adapter *, - struct qlcnic_esw_func_cfg *); -int qlcnic_get_eswitch_port_config(struct qlcnic_adapter *, - struct qlcnic_esw_func_cfg *); -int qlcnic_config_port_mirroring(struct qlcnic_adapter *, u8, u8, u8); -int qlcnic_get_port_stats(struct qlcnic_adapter *, const u8, const u8, - struct __qlcnic_esw_statistics *); -int qlcnic_get_eswitch_stats(struct qlcnic_adapter *, const u8, u8, - struct __qlcnic_esw_statistics *); -int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, u8, u8, u8); -extern int qlcnic_config_tso; - -/* - * QLOGIC Board information - */ - -#define QLCNIC_MAX_BOARD_NAME_LEN 100 -struct qlcnic_brdinfo { - unsigned short vendor; - unsigned short device; - unsigned short sub_vendor; - unsigned short sub_device; - char short_name[QLCNIC_MAX_BOARD_NAME_LEN]; -}; - -static const struct qlcnic_brdinfo qlcnic_boards[] = { - {0x1077, 0x8020, 0x1077, 0x203, - "8200 Series Single Port 10GbE Converged Network Adapter " - "(TCP/IP Networking)"}, - {0x1077, 0x8020, 0x1077, 0x207, - "8200 Series Dual Port 10GbE Converged Network Adapter " - "(TCP/IP Networking)"}, - {0x1077, 0x8020, 0x1077, 0x20b, - "3200 Series Dual Port 10Gb Intelligent Ethernet Adapter"}, - {0x1077, 0x8020, 0x1077, 0x20c, - "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter"}, - {0x1077, 0x8020, 0x1077, 0x20f, - "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"}, - {0x1077, 0x8020, 0x103c, 0x3733, - "NC523SFP 10Gb 2-port Server Adapter"}, - {0x1077, 0x8020, 0x103c, 0x3346, - "CN1000Q Dual Port Converged Network Adapter"}, - {0x1077, 0x8020, 0x1077, 0x210, - "QME8242-k 10GbE Dual Port Mezzanine Card"}, - {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"}, -}; - -#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards) - -static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring) -{ - if (likely(tx_ring->producer < tx_ring->sw_consumer)) - return tx_ring->sw_consumer - tx_ring->producer; - else - return tx_ring->sw_consumer + tx_ring->num_desc - - tx_ring->producer; -} - -extern const struct ethtool_ops qlcnic_ethtool_ops; - -struct qlcnic_nic_template { - int (*config_bridged_mode) (struct qlcnic_adapter *, u32); - int (*config_led) (struct qlcnic_adapter *, u32, u32); - int (*start_firmware) (struct qlcnic_adapter *); -}; - -#define QLCDB(adapter, lvl, _fmt, _args...) do { \ - if (NETIF_MSG_##lvl & adapter->msg_enable) \ - printk(KERN_INFO "%s: %s: " _fmt, \ - dev_name(&adapter->pdev->dev), \ - __func__, ##_args); \ - } while (0) - -#endif /* __QLCNIC_H_ */ diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c deleted file mode 100644 index b0d32dd..0000000 --- a/drivers/net/qlcnic/qlcnic_ctx.c +++ /dev/null @@ -1,1117 +0,0 @@ -/* - * QLogic qlcnic NIC Driver - * Copyright (c) 2009-2010 QLogic Corporation - * - * See LICENSE.qlcnic for copyright and licensing details. - */ - -#include "qlcnic.h" - -static u32 -qlcnic_poll_rsp(struct qlcnic_adapter *adapter) -{ - u32 rsp; - int timeout = 0; - - do { - /* give atleast 1ms for firmware to respond */ - msleep(1); - - if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT) - return QLCNIC_CDRP_RSP_TIMEOUT; - - rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET); - } while (!QLCNIC_CDRP_IS_RSP(rsp)); - - return rsp; -} - -u32 -qlcnic_issue_cmd(struct qlcnic_adapter *adapter, - u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd) -{ - u32 rsp; - u32 signature; - u32 rcode = QLCNIC_RCODE_SUCCESS; - struct pci_dev *pdev = adapter->pdev; - - signature = QLCNIC_CDRP_SIGNATURE_MAKE(pci_fn, version); - - /* Acquire semaphore before accessing CRB */ - if (qlcnic_api_lock(adapter)) - return QLCNIC_RCODE_TIMEOUT; - - QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature); - QLCWR32(adapter, QLCNIC_ARG1_CRB_OFFSET, arg1); - QLCWR32(adapter, QLCNIC_ARG2_CRB_OFFSET, arg2); - QLCWR32(adapter, QLCNIC_ARG3_CRB_OFFSET, arg3); - QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET, QLCNIC_CDRP_FORM_CMD(cmd)); - - rsp = qlcnic_poll_rsp(adapter); - - if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) { - dev_err(&pdev->dev, "card response timeout.\n"); - rcode = QLCNIC_RCODE_TIMEOUT; - } else if (rsp == QLCNIC_CDRP_RSP_FAIL) { - rcode = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET); - dev_err(&pdev->dev, "failed card response code:0x%x\n", - rcode); - } - - /* Release semaphore */ - qlcnic_api_unlock(adapter); - - return rcode; -} - -static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u16 temp_size) -{ - uint64_t sum = 0; - int count = temp_size / sizeof(uint32_t); - while (count-- > 0) - sum += *temp_buffer++; - while (sum >> 32) - sum = (sum & 0xFFFFFFFF) + (sum >> 32); - return ~sum; -} - -int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter) -{ - int err, i; - u16 temp_size; - void *tmp_addr; - u32 version, csum, *template, *tmp_buf; - struct qlcnic_hardware_context *ahw; - struct qlcnic_dump_template_hdr *tmpl_hdr, *tmp_tmpl; - dma_addr_t tmp_addr_t = 0; - - ahw = adapter->ahw; - err = qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - 0, - 0, - 0, - QLCNIC_CDRP_CMD_TEMP_SIZE); - if (err != QLCNIC_RCODE_SUCCESS) { - err = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET); - dev_info(&adapter->pdev->dev, - "Can't get template size %d\n", err); - err = -EIO; - return err; - } - version = QLCRD32(adapter, QLCNIC_ARG3_CRB_OFFSET); - temp_size = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET); - if (!temp_size) - return -EIO; - - tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size, - &tmp_addr_t, GFP_KERNEL); - if (!tmp_addr) { - dev_err(&adapter->pdev->dev, - "Can't get memory for FW dump template\n"); - return -ENOMEM; - } - err = qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - LSD(tmp_addr_t), - MSD(tmp_addr_t), - temp_size, - QLCNIC_CDRP_CMD_GET_TEMP_HDR); - - if (err != QLCNIC_RCODE_SUCCESS) { - dev_err(&adapter->pdev->dev, - "Failed to get mini dump template header %d\n", err); - err = -EIO; - goto error; - } - tmp_tmpl = tmp_addr; - csum = qlcnic_temp_checksum((uint32_t *) tmp_addr, temp_size); - if (csum) { - dev_err(&adapter->pdev->dev, - "Template header checksum validation failed\n"); - err = -EIO; - goto error; - } - ahw->fw_dump.tmpl_hdr = vzalloc(temp_size); - if (!ahw->fw_dump.tmpl_hdr) { - err = -EIO; - goto error; - } - tmp_buf = tmp_addr; - template = (u32 *) ahw->fw_dump.tmpl_hdr; - for (i = 0; i < temp_size/sizeof(u32); i++) - *template++ = __le32_to_cpu(*tmp_buf++); - - tmpl_hdr = ahw->fw_dump.tmpl_hdr; - tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF; - ahw->fw_dump.enable = 1; -error: - dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t); - return err; -} - -int -qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu) -{ - struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - - if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) { - if (qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - recv_ctx->context_id, - mtu, - 0, - QLCNIC_CDRP_CMD_SET_MTU)) { - - dev_err(&adapter->pdev->dev, "Failed to set mtu\n"); - return -EIO; - } - } - - return 0; -} - -static int -qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) -{ - void *addr; - struct qlcnic_hostrq_rx_ctx *prq; - struct qlcnic_cardrsp_rx_ctx *prsp; - struct qlcnic_hostrq_rds_ring *prq_rds; - struct qlcnic_hostrq_sds_ring *prq_sds; - struct qlcnic_cardrsp_rds_ring *prsp_rds; - struct qlcnic_cardrsp_sds_ring *prsp_sds; - struct qlcnic_host_rds_ring *rds_ring; - struct qlcnic_host_sds_ring *sds_ring; - - dma_addr_t hostrq_phys_addr, cardrsp_phys_addr; - u64 phys_addr; - - u8 i, nrds_rings, nsds_rings; - size_t rq_size, rsp_size; - u32 cap, reg, val, reg2; - int err; - - struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - - nrds_rings = adapter->max_rds_rings; - nsds_rings = adapter->max_sds_rings; - - rq_size = - SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings, - nsds_rings); - rsp_size = - SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings, - nsds_rings); - - addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, - &hostrq_phys_addr, GFP_KERNEL); - if (addr == NULL) - return -ENOMEM; - prq = addr; - - addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, - &cardrsp_phys_addr, GFP_KERNEL); - if (addr == NULL) { - err = -ENOMEM; - goto out_free_rq; - } - prsp = addr; - - prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr); - - cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN - | QLCNIC_CAP0_VALIDOFF); - cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS); - - prq->valid_field_offset = offsetof(struct qlcnic_hostrq_rx_ctx, - msix_handler); - prq->txrx_sds_binding = nsds_rings - 1; - - prq->capabilities[0] = cpu_to_le32(cap); - prq->host_int_crb_mode = - cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED); - prq->host_rds_crb_mode = - cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE); - - prq->num_rds_rings = cpu_to_le16(nrds_rings); - prq->num_sds_rings = cpu_to_le16(nsds_rings); - prq->rds_ring_offset = 0; - - val = le32_to_cpu(prq->rds_ring_offset) + - (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings); - prq->sds_ring_offset = cpu_to_le32(val); - - prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data + - le32_to_cpu(prq->rds_ring_offset)); - - for (i = 0; i < nrds_rings; i++) { - - rds_ring = &recv_ctx->rds_rings[i]; - rds_ring->producer = 0; - - prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr); - prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc); - prq_rds[i].ring_kind = cpu_to_le32(i); - prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size); - } - - prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data + - le32_to_cpu(prq->sds_ring_offset)); - - for (i = 0; i < nsds_rings; i++) { - - sds_ring = &recv_ctx->sds_rings[i]; - sds_ring->consumer = 0; - memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring)); - - prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr); - prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc); - prq_sds[i].msi_index = cpu_to_le16(i); - } - - phys_addr = hostrq_phys_addr; - err = qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - (u32)(phys_addr >> 32), - (u32)(phys_addr & 0xffffffff), - rq_size, - QLCNIC_CDRP_CMD_CREATE_RX_CTX); - if (err) { - dev_err(&adapter->pdev->dev, - "Failed to create rx ctx in firmware%d\n", err); - goto out_free_rsp; - } - - - prsp_rds = ((struct qlcnic_cardrsp_rds_ring *) - &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]); - - for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) { - rds_ring = &recv_ctx->rds_rings[i]; - - reg = le32_to_cpu(prsp_rds[i].host_producer_crb); - rds_ring->crb_rcv_producer = adapter->ahw->pci_base0 + reg; - } - - prsp_sds = ((struct qlcnic_cardrsp_sds_ring *) - &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]); - - for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) { - sds_ring = &recv_ctx->sds_rings[i]; - - reg = le32_to_cpu(prsp_sds[i].host_consumer_crb); - reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb); - - sds_ring->crb_sts_consumer = adapter->ahw->pci_base0 + reg; - sds_ring->crb_intr_mask = adapter->ahw->pci_base0 + reg2; - } - - recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); - recv_ctx->context_id = le16_to_cpu(prsp->context_id); - recv_ctx->virt_port = prsp->virt_port; - -out_free_rsp: - dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp, - cardrsp_phys_addr); -out_free_rq: - dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr); - return err; -} - -static void -qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter) -{ - struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - - if (qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - recv_ctx->context_id, - QLCNIC_DESTROY_CTX_RESET, - 0, - QLCNIC_CDRP_CMD_DESTROY_RX_CTX)) { - - dev_err(&adapter->pdev->dev, - "Failed to destroy rx ctx in firmware\n"); - } - - recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED; -} - -static int -qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter) -{ - struct qlcnic_hostrq_tx_ctx *prq; - struct qlcnic_hostrq_cds_ring *prq_cds; - struct qlcnic_cardrsp_tx_ctx *prsp; - void *rq_addr, *rsp_addr; - size_t rq_size, rsp_size; - u32 temp; - int err; - u64 phys_addr; - dma_addr_t rq_phys_addr, rsp_phys_addr; - struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; - - /* reset host resources */ - tx_ring->producer = 0; - tx_ring->sw_consumer = 0; - *(tx_ring->hw_consumer) = 0; - - rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); - rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, - &rq_phys_addr, GFP_KERNEL); - if (!rq_addr) - return -ENOMEM; - - rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx); - rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, - &rsp_phys_addr, GFP_KERNEL); - if (!rsp_addr) { - err = -ENOMEM; - goto out_free_rq; - } - - memset(rq_addr, 0, rq_size); - prq = rq_addr; - - memset(rsp_addr, 0, rsp_size); - prsp = rsp_addr; - - prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr); - - temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN | - QLCNIC_CAP0_LSO); - prq->capabilities[0] = cpu_to_le32(temp); - - prq->host_int_crb_mode = - cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED); - - prq->interrupt_ctl = 0; - prq->msi_index = 0; - prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr); - - prq_cds = &prq->cds_ring; - - prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr); - prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc); - - phys_addr = rq_phys_addr; - err = qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - (u32)(phys_addr >> 32), - ((u32)phys_addr & 0xffffffff), - rq_size, - QLCNIC_CDRP_CMD_CREATE_TX_CTX); - - if (err == QLCNIC_RCODE_SUCCESS) { - temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); - tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp; - - adapter->tx_context_id = - le16_to_cpu(prsp->context_id); - } else { - dev_err(&adapter->pdev->dev, - "Failed to create tx ctx in firmware%d\n", err); - err = -EIO; - } - - dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr, - rsp_phys_addr); - -out_free_rq: - dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr); - - return err; -} - -static void -qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter) -{ - if (qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - adapter->tx_context_id, - QLCNIC_DESTROY_CTX_RESET, - 0, - QLCNIC_CDRP_CMD_DESTROY_TX_CTX)) { - - dev_err(&adapter->pdev->dev, - "Failed to destroy tx ctx in firmware\n"); - } -} - -int -qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config) -{ - return qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - config, - 0, - 0, - QLCNIC_CDRP_CMD_CONFIG_PORT); -} - -int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter) -{ - void *addr; - int err; - int ring; - struct qlcnic_recv_context *recv_ctx; - struct qlcnic_host_rds_ring *rds_ring; - struct qlcnic_host_sds_ring *sds_ring; - struct qlcnic_host_tx_ring *tx_ring; - - struct pci_dev *pdev = adapter->pdev; - - recv_ctx = adapter->recv_ctx; - tx_ring = adapter->tx_ring; - - tx_ring->hw_consumer = (__le32 *) dma_alloc_coherent(&pdev->dev, - sizeof(u32), &tx_ring->hw_cons_phys_addr, GFP_KERNEL); - if (tx_ring->hw_consumer == NULL) { - dev_err(&pdev->dev, "failed to allocate tx consumer\n"); - return -ENOMEM; - } - - /* cmd desc ring */ - addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring), - &tx_ring->phys_addr, GFP_KERNEL); - - if (addr == NULL) { - dev_err(&pdev->dev, "failed to allocate tx desc ring\n"); - err = -ENOMEM; - goto err_out_free; - } - - tx_ring->desc_head = addr; - - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - rds_ring = &recv_ctx->rds_rings[ring]; - addr = dma_alloc_coherent(&adapter->pdev->dev, - RCV_DESC_RINGSIZE(rds_ring), - &rds_ring->phys_addr, GFP_KERNEL); - if (addr == NULL) { - dev_err(&pdev->dev, - "failed to allocate rds ring [%d]\n", ring); - err = -ENOMEM; - goto err_out_free; - } - rds_ring->desc_head = addr; - - } - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - - addr = dma_alloc_coherent(&adapter->pdev->dev, - STATUS_DESC_RINGSIZE(sds_ring), - &sds_ring->phys_addr, GFP_KERNEL); - if (addr == NULL) { - dev_err(&pdev->dev, - "failed to allocate sds ring [%d]\n", ring); - err = -ENOMEM; - goto err_out_free; - } - sds_ring->desc_head = addr; - } - - return 0; - -err_out_free: - qlcnic_free_hw_resources(adapter); - return err; -} - - -int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter) -{ - int err; - - if (adapter->flags & QLCNIC_NEED_FLR) { - pci_reset_function(adapter->pdev); - adapter->flags &= ~QLCNIC_NEED_FLR; - } - - err = qlcnic_fw_cmd_create_rx_ctx(adapter); - if (err) - return err; - - err = qlcnic_fw_cmd_create_tx_ctx(adapter); - if (err) { - qlcnic_fw_cmd_destroy_rx_ctx(adapter); - return err; - } - - set_bit(__QLCNIC_FW_ATTACHED, &adapter->state); - return 0; -} - -void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter) -{ - if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) { - qlcnic_fw_cmd_destroy_rx_ctx(adapter); - qlcnic_fw_cmd_destroy_tx_ctx(adapter); - - /* Allow dma queues to drain after context reset */ - msleep(20); - } -} - -void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter) -{ - struct qlcnic_recv_context *recv_ctx; - struct qlcnic_host_rds_ring *rds_ring; - struct qlcnic_host_sds_ring *sds_ring; - struct qlcnic_host_tx_ring *tx_ring; - int ring; - - recv_ctx = adapter->recv_ctx; - - tx_ring = adapter->tx_ring; - if (tx_ring->hw_consumer != NULL) { - dma_free_coherent(&adapter->pdev->dev, - sizeof(u32), - tx_ring->hw_consumer, - tx_ring->hw_cons_phys_addr); - tx_ring->hw_consumer = NULL; - } - - if (tx_ring->desc_head != NULL) { - dma_free_coherent(&adapter->pdev->dev, - TX_DESC_RINGSIZE(tx_ring), - tx_ring->desc_head, tx_ring->phys_addr); - tx_ring->desc_head = NULL; - } - - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - rds_ring = &recv_ctx->rds_rings[ring]; - - if (rds_ring->desc_head != NULL) { - dma_free_coherent(&adapter->pdev->dev, - RCV_DESC_RINGSIZE(rds_ring), - rds_ring->desc_head, - rds_ring->phys_addr); - rds_ring->desc_head = NULL; - } - } - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - - if (sds_ring->desc_head != NULL) { - dma_free_coherent(&adapter->pdev->dev, - STATUS_DESC_RINGSIZE(sds_ring), - sds_ring->desc_head, - sds_ring->phys_addr); - sds_ring->desc_head = NULL; - } - } -} - - -/* Get MAC address of a NIC partition */ -int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac) -{ - int err; - u32 arg1; - - arg1 = adapter->ahw->pci_func | BIT_8; - err = qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - arg1, - 0, - 0, - QLCNIC_CDRP_CMD_MAC_ADDRESS); - - if (err == QLCNIC_RCODE_SUCCESS) - qlcnic_fetch_mac(adapter, QLCNIC_ARG1_CRB_OFFSET, - QLCNIC_ARG2_CRB_OFFSET, 0, mac); - else { - dev_err(&adapter->pdev->dev, - "Failed to get mac address%d\n", err); - err = -EIO; - } - - return err; -} - -/* Get info of a NIC partition */ -int qlcnic_get_nic_info(struct qlcnic_adapter *adapter, - struct qlcnic_info *npar_info, u8 func_id) -{ - int err; - dma_addr_t nic_dma_t; - struct qlcnic_info *nic_info; - void *nic_info_addr; - size_t nic_size = sizeof(struct qlcnic_info); - - nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, - &nic_dma_t, GFP_KERNEL); - if (!nic_info_addr) - return -ENOMEM; - memset(nic_info_addr, 0, nic_size); - - nic_info = nic_info_addr; - err = qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - MSD(nic_dma_t), - LSD(nic_dma_t), - (func_id << 16 | nic_size), - QLCNIC_CDRP_CMD_GET_NIC_INFO); - - if (err == QLCNIC_RCODE_SUCCESS) { - npar_info->pci_func = le16_to_cpu(nic_info->pci_func); - npar_info->op_mode = le16_to_cpu(nic_info->op_mode); - npar_info->phys_port = le16_to_cpu(nic_info->phys_port); - npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode); - npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques); - npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques); - npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw); - npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw); - npar_info->capabilities = le32_to_cpu(nic_info->capabilities); - npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu); - - dev_info(&adapter->pdev->dev, - "phy port: %d switch_mode: %d,\n" - "\tmax_tx_q: %d max_rx_q: %d min_tx_bw: 0x%x,\n" - "\tmax_tx_bw: 0x%x max_mtu:0x%x, capabilities: 0x%x\n", - npar_info->phys_port, npar_info->switch_mode, - npar_info->max_tx_ques, npar_info->max_rx_ques, - npar_info->min_tx_bw, npar_info->max_tx_bw, - npar_info->max_mtu, npar_info->capabilities); - } else { - dev_err(&adapter->pdev->dev, - "Failed to get nic info%d\n", err); - err = -EIO; - } - - dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, - nic_dma_t); - return err; -} - -/* Configure a NIC partition */ -int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic) -{ - int err = -EIO; - dma_addr_t nic_dma_t; - void *nic_info_addr; - struct qlcnic_info *nic_info; - size_t nic_size = sizeof(struct qlcnic_info); - - if (adapter->op_mode != QLCNIC_MGMT_FUNC) - return err; - - nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, - &nic_dma_t, GFP_KERNEL); - if (!nic_info_addr) - return -ENOMEM; - - memset(nic_info_addr, 0, nic_size); - nic_info = nic_info_addr; - - nic_info->pci_func = cpu_to_le16(nic->pci_func); - nic_info->op_mode = cpu_to_le16(nic->op_mode); - nic_info->phys_port = cpu_to_le16(nic->phys_port); - nic_info->switch_mode = cpu_to_le16(nic->switch_mode); - nic_info->capabilities = cpu_to_le32(nic->capabilities); - nic_info->max_mac_filters = nic->max_mac_filters; - nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques); - nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques); - nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw); - nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw); - - err = qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - MSD(nic_dma_t), - LSD(nic_dma_t), - ((nic->pci_func << 16) | nic_size), - QLCNIC_CDRP_CMD_SET_NIC_INFO); - - if (err != QLCNIC_RCODE_SUCCESS) { - dev_err(&adapter->pdev->dev, - "Failed to set nic info%d\n", err); - err = -EIO; - } - - dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, - nic_dma_t); - return err; -} - -/* Get PCI Info of a partition */ -int qlcnic_get_pci_info(struct qlcnic_adapter *adapter, - struct qlcnic_pci_info *pci_info) -{ - int err = 0, i; - dma_addr_t pci_info_dma_t; - struct qlcnic_pci_info *npar; - void *pci_info_addr; - size_t npar_size = sizeof(struct qlcnic_pci_info); - size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC; - - pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size, - &pci_info_dma_t, GFP_KERNEL); - if (!pci_info_addr) - return -ENOMEM; - memset(pci_info_addr, 0, pci_size); - - npar = pci_info_addr; - err = qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - MSD(pci_info_dma_t), - LSD(pci_info_dma_t), - pci_size, - QLCNIC_CDRP_CMD_GET_PCI_INFO); - - if (err == QLCNIC_RCODE_SUCCESS) { - for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) { - pci_info->id = le16_to_cpu(npar->id); - pci_info->active = le16_to_cpu(npar->active); - pci_info->type = le16_to_cpu(npar->type); - pci_info->default_port = - le16_to_cpu(npar->default_port); - pci_info->tx_min_bw = - le16_to_cpu(npar->tx_min_bw); - pci_info->tx_max_bw = - le16_to_cpu(npar->tx_max_bw); - memcpy(pci_info->mac, npar->mac, ETH_ALEN); - } - } else { - dev_err(&adapter->pdev->dev, - "Failed to get PCI Info%d\n", err); - err = -EIO; - } - - dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr, - pci_info_dma_t); - return err; -} - -/* Configure eSwitch for port mirroring */ -int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id, - u8 enable_mirroring, u8 pci_func) -{ - int err = -EIO; - u32 arg1; - - if (adapter->op_mode != QLCNIC_MGMT_FUNC || - !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) - return err; - - arg1 = id | (enable_mirroring ? BIT_4 : 0); - arg1 |= pci_func << 8; - - err = qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - arg1, - 0, - 0, - QLCNIC_CDRP_CMD_SET_PORTMIRRORING); - - if (err != QLCNIC_RCODE_SUCCESS) { - dev_err(&adapter->pdev->dev, - "Failed to configure port mirroring%d on eswitch:%d\n", - pci_func, id); - } else { - dev_info(&adapter->pdev->dev, - "Configured eSwitch %d for port mirroring:%d\n", - id, pci_func); - } - - return err; -} - -int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func, - const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) { - - size_t stats_size = sizeof(struct __qlcnic_esw_statistics); - struct __qlcnic_esw_statistics *stats; - dma_addr_t stats_dma_t; - void *stats_addr; - u32 arg1; - int err; - - if (esw_stats == NULL) - return -ENOMEM; - - if (adapter->op_mode != QLCNIC_MGMT_FUNC && - func != adapter->ahw->pci_func) { - dev_err(&adapter->pdev->dev, - "Not privilege to query stats for func=%d", func); - return -EIO; - } - - stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, - &stats_dma_t, GFP_KERNEL); - if (!stats_addr) { - dev_err(&adapter->pdev->dev, "Unable to allocate memory\n"); - return -ENOMEM; - } - memset(stats_addr, 0, stats_size); - - arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12; - arg1 |= rx_tx << 15 | stats_size << 16; - - err = qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - arg1, - MSD(stats_dma_t), - LSD(stats_dma_t), - QLCNIC_CDRP_CMD_GET_ESWITCH_STATS); - - if (!err) { - stats = stats_addr; - esw_stats->context_id = le16_to_cpu(stats->context_id); - esw_stats->version = le16_to_cpu(stats->version); - esw_stats->size = le16_to_cpu(stats->size); - esw_stats->multicast_frames = - le64_to_cpu(stats->multicast_frames); - esw_stats->broadcast_frames = - le64_to_cpu(stats->broadcast_frames); - esw_stats->unicast_frames = le64_to_cpu(stats->unicast_frames); - esw_stats->dropped_frames = le64_to_cpu(stats->dropped_frames); - esw_stats->local_frames = le64_to_cpu(stats->local_frames); - esw_stats->errors = le64_to_cpu(stats->errors); - esw_stats->numbytes = le64_to_cpu(stats->numbytes); - } - - dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, - stats_dma_t); - return err; -} - -int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch, - const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) { - - struct __qlcnic_esw_statistics port_stats; - u8 i; - int ret = -EIO; - - if (esw_stats == NULL) - return -ENOMEM; - if (adapter->op_mode != QLCNIC_MGMT_FUNC) - return -EIO; - if (adapter->npars == NULL) - return -EIO; - - memset(esw_stats, 0, sizeof(u64)); - esw_stats->unicast_frames = QLCNIC_ESW_STATS_NOT_AVAIL; - esw_stats->multicast_frames = QLCNIC_ESW_STATS_NOT_AVAIL; - esw_stats->broadcast_frames = QLCNIC_ESW_STATS_NOT_AVAIL; - esw_stats->dropped_frames = QLCNIC_ESW_STATS_NOT_AVAIL; - esw_stats->errors = QLCNIC_ESW_STATS_NOT_AVAIL; - esw_stats->local_frames = QLCNIC_ESW_STATS_NOT_AVAIL; - esw_stats->numbytes = QLCNIC_ESW_STATS_NOT_AVAIL; - esw_stats->context_id = eswitch; - - for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { - if (adapter->npars[i].phy_port != eswitch) - continue; - - memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics)); - if (qlcnic_get_port_stats(adapter, i, rx_tx, &port_stats)) - continue; - - esw_stats->size = port_stats.size; - esw_stats->version = port_stats.version; - QLCNIC_ADD_ESW_STATS(esw_stats->unicast_frames, - port_stats.unicast_frames); - QLCNIC_ADD_ESW_STATS(esw_stats->multicast_frames, - port_stats.multicast_frames); - QLCNIC_ADD_ESW_STATS(esw_stats->broadcast_frames, - port_stats.broadcast_frames); - QLCNIC_ADD_ESW_STATS(esw_stats->dropped_frames, - port_stats.dropped_frames); - QLCNIC_ADD_ESW_STATS(esw_stats->errors, - port_stats.errors); - QLCNIC_ADD_ESW_STATS(esw_stats->local_frames, - port_stats.local_frames); - QLCNIC_ADD_ESW_STATS(esw_stats->numbytes, - port_stats.numbytes); - ret = 0; - } - return ret; -} - -int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw, - const u8 port, const u8 rx_tx) -{ - - u32 arg1; - - if (adapter->op_mode != QLCNIC_MGMT_FUNC) - return -EIO; - - if (func_esw == QLCNIC_STATS_PORT) { - if (port >= QLCNIC_MAX_PCI_FUNC) - goto err_ret; - } else if (func_esw == QLCNIC_STATS_ESWITCH) { - if (port >= QLCNIC_NIU_MAX_XG_PORTS) - goto err_ret; - } else { - goto err_ret; - } - - if (rx_tx > QLCNIC_QUERY_TX_COUNTER) - goto err_ret; - - arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12; - arg1 |= BIT_14 | rx_tx << 15; - - return qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - arg1, - 0, - 0, - QLCNIC_CDRP_CMD_GET_ESWITCH_STATS); - -err_ret: - dev_err(&adapter->pdev->dev, "Invalid argument func_esw=%d port=%d" - "rx_ctx=%d\n", func_esw, port, rx_tx); - return -EIO; -} - -static int -__qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, - u32 *arg1, u32 *arg2) -{ - int err = -EIO; - u8 pci_func; - pci_func = (*arg1 >> 8); - err = qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - *arg1, - 0, - 0, - QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG); - - if (err == QLCNIC_RCODE_SUCCESS) { - *arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET); - *arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET); - dev_info(&adapter->pdev->dev, - "eSwitch port config for pci func %d\n", pci_func); - } else { - dev_err(&adapter->pdev->dev, - "Failed to get eswitch port config for pci func %d\n", - pci_func); - } - return err; -} -/* Configure eSwitch port -op_mode = 0 for setting default port behavior -op_mode = 1 for setting vlan id -op_mode = 2 for deleting vlan id -op_type = 0 for vlan_id -op_type = 1 for port vlan_id -*/ -int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, - struct qlcnic_esw_func_cfg *esw_cfg) -{ - int err = -EIO; - u32 arg1, arg2 = 0; - u8 pci_func; - - if (adapter->op_mode != QLCNIC_MGMT_FUNC) - return err; - pci_func = esw_cfg->pci_func; - arg1 = (adapter->npars[pci_func].phy_port & BIT_0); - arg1 |= (pci_func << 8); - - if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2)) - return err; - arg1 &= ~(0x0ff << 8); - arg1 |= (pci_func << 8); - arg1 &= ~(BIT_2 | BIT_3); - switch (esw_cfg->op_mode) { - case QLCNIC_PORT_DEFAULTS: - arg1 |= (BIT_4 | BIT_6 | BIT_7); - arg2 |= (BIT_0 | BIT_1); - if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) - arg2 |= (BIT_2 | BIT_3); - if (!(esw_cfg->discard_tagged)) - arg1 &= ~BIT_4; - if (!(esw_cfg->promisc_mode)) - arg1 &= ~BIT_6; - if (!(esw_cfg->mac_override)) - arg1 &= ~BIT_7; - if (!(esw_cfg->mac_anti_spoof)) - arg2 &= ~BIT_0; - if (!(esw_cfg->offload_flags & BIT_0)) - arg2 &= ~(BIT_1 | BIT_2 | BIT_3); - if (!(esw_cfg->offload_flags & BIT_1)) - arg2 &= ~BIT_2; - if (!(esw_cfg->offload_flags & BIT_2)) - arg2 &= ~BIT_3; - break; - case QLCNIC_ADD_VLAN: - arg1 |= (BIT_2 | BIT_5); - arg1 |= (esw_cfg->vlan_id << 16); - break; - case QLCNIC_DEL_VLAN: - arg1 |= (BIT_3 | BIT_5); - arg1 &= ~(0x0ffff << 16); - break; - default: - return err; - } - - err = qlcnic_issue_cmd(adapter, - adapter->ahw->pci_func, - adapter->fw_hal_version, - arg1, - arg2, - 0, - QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH); - - if (err != QLCNIC_RCODE_SUCCESS) { - dev_err(&adapter->pdev->dev, - "Failed to configure eswitch pci func %d\n", pci_func); - } else { - dev_info(&adapter->pdev->dev, - "Configured eSwitch for pci func %d\n", pci_func); - } - - return err; -} - -int -qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, - struct qlcnic_esw_func_cfg *esw_cfg) -{ - u32 arg1, arg2; - u8 phy_port; - if (adapter->op_mode == QLCNIC_MGMT_FUNC) - phy_port = adapter->npars[esw_cfg->pci_func].phy_port; - else - phy_port = adapter->physical_port; - arg1 = phy_port; - arg1 |= (esw_cfg->pci_func << 8); - if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2)) - return -EIO; - - esw_cfg->discard_tagged = !!(arg1 & BIT_4); - esw_cfg->host_vlan_tag = !!(arg1 & BIT_5); - esw_cfg->promisc_mode = !!(arg1 & BIT_6); - esw_cfg->mac_override = !!(arg1 & BIT_7); - esw_cfg->vlan_id = LSW(arg1 >> 16); - esw_cfg->mac_anti_spoof = (arg2 & 0x1); - esw_cfg->offload_flags = ((arg2 >> 1) & 0x7); - - return 0; -} diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c deleted file mode 100644 index 7c64f2f..0000000 --- a/drivers/net/qlcnic/qlcnic_ethtool.c +++ /dev/null @@ -1,1234 +0,0 @@ -/* - * QLogic qlcnic NIC Driver - * Copyright (c) 2009-2010 QLogic Corporation - * - * See LICENSE.qlcnic for copyright and licensing details. - */ - -#include -#include -#include -#include -#include -#include - -#include "qlcnic.h" - -struct qlcnic_stats { - char stat_string[ETH_GSTRING_LEN]; - int sizeof_stat; - int stat_offset; -}; - -#define QLC_SIZEOF(m) FIELD_SIZEOF(struct qlcnic_adapter, m) -#define QLC_OFF(m) offsetof(struct qlcnic_adapter, m) - -static const struct qlcnic_stats qlcnic_gstrings_stats[] = { - {"xmit_called", - QLC_SIZEOF(stats.xmitcalled), QLC_OFF(stats.xmitcalled)}, - {"xmit_finished", - QLC_SIZEOF(stats.xmitfinished), QLC_OFF(stats.xmitfinished)}, - {"rx_dropped", - QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)}, - {"tx_dropped", - QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)}, - {"csummed", - QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)}, - {"rx_pkts", - QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)}, - {"lro_pkts", - QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)}, - {"rx_bytes", - QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)}, - {"tx_bytes", - QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)}, - {"lrobytes", - QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)}, - {"lso_frames", - QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)}, - {"xmit_on", - QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)}, - {"xmit_off", - QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)}, - {"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure), - QLC_OFF(stats.skb_alloc_failure)}, - {"null rxbuf", - QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)}, - {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error), - QLC_OFF(stats.rx_dma_map_error)}, - {"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error), - QLC_OFF(stats.tx_dma_map_error)}, - -}; - -static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = { - "rx unicast frames", - "rx multicast frames", - "rx broadcast frames", - "rx dropped frames", - "rx errors", - "rx local frames", - "rx numbytes", - "tx unicast frames", - "tx multicast frames", - "tx broadcast frames", - "tx dropped frames", - "tx errors", - "tx local frames", - "tx numbytes", -}; - -#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats) -#define QLCNIC_DEVICE_STATS_LEN ARRAY_SIZE(qlcnic_device_gstrings_stats) - -static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = { - "Register_Test_on_offline", - "Link_Test_on_offline", - "Interrupt_Test_offline", - "Internal_Loopback_offline", - "External_Loopback_offline" -}; - -#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test) - -#define QLCNIC_RING_REGS_COUNT 20 -#define QLCNIC_RING_REGS_LEN (QLCNIC_RING_REGS_COUNT * sizeof(u32)) -#define QLCNIC_MAX_EEPROM_LEN 1024 - -static const u32 diag_registers[] = { - CRB_CMDPEG_STATE, - CRB_RCVPEG_STATE, - CRB_XG_STATE_P3P, - CRB_FW_CAPABILITIES_1, - ISR_INT_STATE_REG, - QLCNIC_CRB_DRV_ACTIVE, - QLCNIC_CRB_DEV_STATE, - QLCNIC_CRB_DRV_STATE, - QLCNIC_CRB_DRV_SCRATCH, - QLCNIC_CRB_DEV_PARTITION_INFO, - QLCNIC_CRB_DRV_IDC_VER, - QLCNIC_PEG_ALIVE_COUNTER, - QLCNIC_PEG_HALT_STATUS1, - QLCNIC_PEG_HALT_STATUS2, - QLCNIC_CRB_PEG_NET_0+0x3c, - QLCNIC_CRB_PEG_NET_1+0x3c, - QLCNIC_CRB_PEG_NET_2+0x3c, - QLCNIC_CRB_PEG_NET_4+0x3c, - -1 -}; - -#define QLCNIC_MGMT_API_VERSION 2 -#define QLCNIC_DEV_INFO_SIZE 1 -#define QLCNIC_ETHTOOL_REGS_VER 2 -static int qlcnic_get_regs_len(struct net_device *dev) -{ - return sizeof(diag_registers) + QLCNIC_RING_REGS_LEN + - QLCNIC_DEV_INFO_SIZE + 1; -} - -static int qlcnic_get_eeprom_len(struct net_device *dev) -{ - return QLCNIC_FLASH_TOTAL_SIZE; -} - -static void -qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - u32 fw_major, fw_minor, fw_build; - - fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR); - fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR); - fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB); - sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build); - - strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); - strlcpy(drvinfo->driver, qlcnic_driver_name, 32); - strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID, 32); -} - -static int -qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - int check_sfp_module = 0; - u16 pcifn = adapter->ahw->pci_func; - - /* read which mode */ - if (adapter->ahw->port_type == QLCNIC_GBE) { - ecmd->supported = (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full); - - ecmd->advertising = (ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | - ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full); - - ethtool_cmd_speed_set(ecmd, adapter->link_speed); - ecmd->duplex = adapter->link_duplex; - ecmd->autoneg = adapter->link_autoneg; - - } else if (adapter->ahw->port_type == QLCNIC_XGBE) { - u32 val; - - val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR); - if (val == QLCNIC_PORT_MODE_802_3_AP) { - ecmd->supported = SUPPORTED_1000baseT_Full; - ecmd->advertising = ADVERTISED_1000baseT_Full; - } else { - ecmd->supported = SUPPORTED_10000baseT_Full; - ecmd->advertising = ADVERTISED_10000baseT_Full; - } - - if (netif_running(dev) && adapter->has_link_events) { - ethtool_cmd_speed_set(ecmd, adapter->link_speed); - ecmd->autoneg = adapter->link_autoneg; - ecmd->duplex = adapter->link_duplex; - goto skip; - } - - val = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn)); - ethtool_cmd_speed_set(ecmd, P3P_LINK_SPEED_MHZ * - P3P_LINK_SPEED_VAL(pcifn, val)); - ecmd->duplex = DUPLEX_FULL; - ecmd->autoneg = AUTONEG_DISABLE; - } else - return -EIO; - -skip: - ecmd->phy_address = adapter->physical_port; - ecmd->transceiver = XCVR_EXTERNAL; - - switch (adapter->ahw->board_type) { - case QLCNIC_BRDTYPE_P3P_REF_QG: - case QLCNIC_BRDTYPE_P3P_4_GB: - case QLCNIC_BRDTYPE_P3P_4_GB_MM: - - ecmd->supported |= SUPPORTED_Autoneg; - ecmd->advertising |= ADVERTISED_Autoneg; - case QLCNIC_BRDTYPE_P3P_10G_CX4: - case QLCNIC_BRDTYPE_P3P_10G_CX4_LP: - case QLCNIC_BRDTYPE_P3P_10000_BASE_T: - ecmd->supported |= SUPPORTED_TP; - ecmd->advertising |= ADVERTISED_TP; - ecmd->port = PORT_TP; - ecmd->autoneg = adapter->link_autoneg; - break; - case QLCNIC_BRDTYPE_P3P_IMEZ: - case QLCNIC_BRDTYPE_P3P_XG_LOM: - case QLCNIC_BRDTYPE_P3P_HMEZ: - ecmd->supported |= SUPPORTED_MII; - ecmd->advertising |= ADVERTISED_MII; - ecmd->port = PORT_MII; - ecmd->autoneg = AUTONEG_DISABLE; - break; - case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS: - case QLCNIC_BRDTYPE_P3P_10G_SFP_CT: - case QLCNIC_BRDTYPE_P3P_10G_SFP_QT: - ecmd->advertising |= ADVERTISED_TP; - ecmd->supported |= SUPPORTED_TP; - check_sfp_module = netif_running(dev) && - adapter->has_link_events; - case QLCNIC_BRDTYPE_P3P_10G_XFP: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_FIBRE; - ecmd->autoneg = AUTONEG_DISABLE; - break; - case QLCNIC_BRDTYPE_P3P_10G_TP: - if (adapter->ahw->port_type == QLCNIC_XGBE) { - ecmd->autoneg = AUTONEG_DISABLE; - ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP); - ecmd->advertising |= - (ADVERTISED_FIBRE | ADVERTISED_TP); - ecmd->port = PORT_FIBRE; - check_sfp_module = netif_running(dev) && - adapter->has_link_events; - } else { - ecmd->autoneg = AUTONEG_ENABLE; - ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); - ecmd->advertising |= - (ADVERTISED_TP | ADVERTISED_Autoneg); - ecmd->port = PORT_TP; - } - break; - default: - dev_err(&adapter->pdev->dev, "Unsupported board model %d\n", - adapter->ahw->board_type); - return -EIO; - } - - if (check_sfp_module) { - switch (adapter->module_type) { - case LINKEVENT_MODULE_OPTICAL_UNKNOWN: - case LINKEVENT_MODULE_OPTICAL_SRLR: - case LINKEVENT_MODULE_OPTICAL_LRM: - case LINKEVENT_MODULE_OPTICAL_SFP_1G: - ecmd->port = PORT_FIBRE; - break; - case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE: - case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN: - case LINKEVENT_MODULE_TWINAX: - ecmd->port = PORT_TP; - break; - default: - ecmd->port = PORT_OTHER; - } - } - - return 0; -} - -static int -qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) -{ - u32 config = 0; - u32 ret = 0; - struct qlcnic_adapter *adapter = netdev_priv(dev); - - if (adapter->ahw->port_type != QLCNIC_GBE) - return -EOPNOTSUPP; - - /* read which mode */ - if (ecmd->duplex) - config |= 0x1; - - if (ecmd->autoneg) - config |= 0x2; - - switch (ethtool_cmd_speed(ecmd)) { - case SPEED_10: - config |= (0 << 8); - break; - case SPEED_100: - config |= (1 << 8); - break; - case SPEED_1000: - config |= (10 << 8); - break; - default: - return -EIO; - } - - ret = qlcnic_fw_cmd_set_port(adapter, config); - - if (ret == QLCNIC_RCODE_NOT_SUPPORTED) - return -EOPNOTSUPP; - else if (ret) - return -EIO; - - adapter->link_speed = ethtool_cmd_speed(ecmd); - adapter->link_duplex = ecmd->duplex; - adapter->link_autoneg = ecmd->autoneg; - - if (!netif_running(dev)) - return 0; - - dev->netdev_ops->ndo_stop(dev); - return dev->netdev_ops->ndo_open(dev); -} - -static void -qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - struct qlcnic_host_sds_ring *sds_ring; - u32 *regs_buff = p; - int ring, i = 0, j = 0; - - memset(p, 0, qlcnic_get_regs_len(dev)); - regs->version = (QLCNIC_ETHTOOL_REGS_VER << 24) | - (adapter->ahw->revision_id << 16) | (adapter->pdev)->device; - - regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff)); - regs_buff[1] = QLCNIC_MGMT_API_VERSION; - - for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++) - regs_buff[i] = QLCRD32(adapter, diag_registers[j]); - - if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) - return; - - regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/ - - regs_buff[i++] = 1; /* No. of tx ring */ - regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer)); - regs_buff[i++] = readl(adapter->tx_ring->crb_cmd_producer); - - regs_buff[i++] = 2; /* No. of rx ring */ - regs_buff[i++] = readl(recv_ctx->rds_rings[0].crb_rcv_producer); - regs_buff[i++] = readl(recv_ctx->rds_rings[1].crb_rcv_producer); - - regs_buff[i++] = adapter->max_sds_rings; - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &(recv_ctx->sds_rings[ring]); - regs_buff[i++] = readl(sds_ring->crb_sts_consumer); - } -} - -static u32 qlcnic_test_link(struct net_device *dev) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - u32 val; - - val = QLCRD32(adapter, CRB_XG_STATE_P3P); - val = XG_LINK_STATE_P3P(adapter->ahw->pci_func, val); - return (val == XG_LINK_UP_P3P) ? 0 : 1; -} - -static int -qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, - u8 *bytes) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - int offset; - int ret; - - if (eeprom->len == 0) - return -EINVAL; - - eeprom->magic = (adapter->pdev)->vendor | - ((adapter->pdev)->device << 16); - offset = eeprom->offset; - - ret = qlcnic_rom_fast_read_words(adapter, offset, bytes, - eeprom->len); - if (ret < 0) - return ret; - - return 0; -} - -static void -qlcnic_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - - ring->rx_pending = adapter->num_rxd; - ring->rx_jumbo_pending = adapter->num_jumbo_rxd; - ring->tx_pending = adapter->num_txd; - - ring->rx_max_pending = adapter->max_rxd; - ring->rx_jumbo_max_pending = adapter->max_jumbo_rxd; - ring->tx_max_pending = MAX_CMD_DESCRIPTORS; - - ring->rx_mini_max_pending = 0; - ring->rx_mini_pending = 0; -} - -static u32 -qlcnic_validate_ringparam(u32 val, u32 min, u32 max, char *r_name) -{ - u32 num_desc; - num_desc = max(val, min); - num_desc = min(num_desc, max); - num_desc = roundup_pow_of_two(num_desc); - - if (val != num_desc) { - printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n", - qlcnic_driver_name, r_name, num_desc, val); - } - - return num_desc; -} - -static int -qlcnic_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - u16 num_rxd, num_jumbo_rxd, num_txd; - - if (ring->rx_mini_pending) - return -EOPNOTSUPP; - - num_rxd = qlcnic_validate_ringparam(ring->rx_pending, - MIN_RCV_DESCRIPTORS, adapter->max_rxd, "rx"); - - num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending, - MIN_JUMBO_DESCRIPTORS, adapter->max_jumbo_rxd, - "rx jumbo"); - - num_txd = qlcnic_validate_ringparam(ring->tx_pending, - MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx"); - - if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd && - num_jumbo_rxd == adapter->num_jumbo_rxd) - return 0; - - adapter->num_rxd = num_rxd; - adapter->num_jumbo_rxd = num_jumbo_rxd; - adapter->num_txd = num_txd; - - return qlcnic_reset_context(adapter); -} - -static void qlcnic_get_channels(struct net_device *dev, - struct ethtool_channels *channel) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - - channel->max_rx = rounddown_pow_of_two(min_t(int, - adapter->max_rx_ques, num_online_cpus())); - channel->max_tx = adapter->max_tx_ques; - - channel->rx_count = adapter->max_sds_rings; - channel->tx_count = adapter->max_tx_ques; -} - -static int qlcnic_set_channels(struct net_device *dev, - struct ethtool_channels *channel) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - int err; - - if (channel->other_count || channel->combined_count || - channel->tx_count != channel->max_tx) - return -EINVAL; - - err = qlcnic_validate_max_rss(dev, channel->max_rx, channel->rx_count); - if (err) - return err; - - err = qlcnic_set_max_rss(adapter, channel->rx_count); - netdev_info(dev, "allocated 0x%x sds rings\n", - adapter->max_sds_rings); - return err; -} - -static void -qlcnic_get_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - int port = adapter->physical_port; - __u32 val; - - if (adapter->ahw->port_type == QLCNIC_GBE) { - if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS)) - return; - /* get flow control settings */ - val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port)); - pause->rx_pause = qlcnic_gb_get_rx_flowctl(val); - val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL); - switch (port) { - case 0: - pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val)); - break; - case 1: - pause->tx_pause = !(qlcnic_gb_get_gb1_mask(val)); - break; - case 2: - pause->tx_pause = !(qlcnic_gb_get_gb2_mask(val)); - break; - case 3: - default: - pause->tx_pause = !(qlcnic_gb_get_gb3_mask(val)); - break; - } - } else if (adapter->ahw->port_type == QLCNIC_XGBE) { - if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS)) - return; - pause->rx_pause = 1; - val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL); - if (port == 0) - pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val)); - else - pause->tx_pause = !(qlcnic_xg_get_xg1_mask(val)); - } else { - dev_err(&netdev->dev, "Unknown board type: %x\n", - adapter->ahw->port_type); - } -} - -static int -qlcnic_set_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - int port = adapter->physical_port; - __u32 val; - - /* read mode */ - if (adapter->ahw->port_type == QLCNIC_GBE) { - if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS)) - return -EIO; - /* set flow control */ - val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port)); - - if (pause->rx_pause) - qlcnic_gb_rx_flowctl(val); - else - qlcnic_gb_unset_rx_flowctl(val); - - QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), - val); - /* set autoneg */ - val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL); - switch (port) { - case 0: - if (pause->tx_pause) - qlcnic_gb_unset_gb0_mask(val); - else - qlcnic_gb_set_gb0_mask(val); - break; - case 1: - if (pause->tx_pause) - qlcnic_gb_unset_gb1_mask(val); - else - qlcnic_gb_set_gb1_mask(val); - break; - case 2: - if (pause->tx_pause) - qlcnic_gb_unset_gb2_mask(val); - else - qlcnic_gb_set_gb2_mask(val); - break; - case 3: - default: - if (pause->tx_pause) - qlcnic_gb_unset_gb3_mask(val); - else - qlcnic_gb_set_gb3_mask(val); - break; - } - QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, val); - } else if (adapter->ahw->port_type == QLCNIC_XGBE) { - if (!pause->rx_pause || pause->autoneg) - return -EOPNOTSUPP; - - if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS)) - return -EIO; - - val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL); - if (port == 0) { - if (pause->tx_pause) - qlcnic_xg_unset_xg0_mask(val); - else - qlcnic_xg_set_xg0_mask(val); - } else { - if (pause->tx_pause) - qlcnic_xg_unset_xg1_mask(val); - else - qlcnic_xg_set_xg1_mask(val); - } - QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, val); - } else { - dev_err(&netdev->dev, "Unknown board type: %x\n", - adapter->ahw->port_type); - } - return 0; -} - -static int qlcnic_reg_test(struct net_device *dev) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - u32 data_read; - - data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0)); - if ((data_read & 0xffff) != adapter->pdev->vendor) - return 1; - - return 0; -} - -static int qlcnic_get_sset_count(struct net_device *dev, int sset) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - switch (sset) { - case ETH_SS_TEST: - return QLCNIC_TEST_LEN; - case ETH_SS_STATS: - if (adapter->flags & QLCNIC_ESWITCH_ENABLED) - return QLCNIC_STATS_LEN + QLCNIC_DEVICE_STATS_LEN; - return QLCNIC_STATS_LEN; - default: - return -EOPNOTSUPP; - } -} - -static int qlcnic_irq_test(struct net_device *netdev) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - int max_sds_rings = adapter->max_sds_rings; - int ret; - - if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) - return -EIO; - - ret = qlcnic_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST); - if (ret) - goto clear_it; - - adapter->diag_cnt = 0; - ret = qlcnic_issue_cmd(adapter, adapter->ahw->pci_func, - adapter->fw_hal_version, adapter->ahw->pci_func, - 0, 0, 0x00000011); - if (ret) - goto done; - - msleep(10); - - ret = !adapter->diag_cnt; - -done: - qlcnic_diag_free_res(netdev, max_sds_rings); - -clear_it: - adapter->max_sds_rings = max_sds_rings; - clear_bit(__QLCNIC_RESETTING, &adapter->state); - return ret; -} - -#define QLCNIC_ILB_PKT_SIZE 64 -#define QLCNIC_NUM_ILB_PKT 16 -#define QLCNIC_ILB_MAX_RCV_LOOP 10 - -static void qlcnic_create_loopback_buff(unsigned char *data, u8 mac[]) -{ - unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00}; - - memset(data, 0x4e, QLCNIC_ILB_PKT_SIZE); - - memcpy(data, mac, ETH_ALEN); - memcpy(data + ETH_ALEN, mac, ETH_ALEN); - - memcpy(data + 2 * ETH_ALEN, random_data, sizeof(random_data)); -} - -int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[]) -{ - unsigned char buff[QLCNIC_ILB_PKT_SIZE]; - qlcnic_create_loopback_buff(buff, mac); - return memcmp(data, buff, QLCNIC_ILB_PKT_SIZE); -} - -static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter) -{ - struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0]; - struct sk_buff *skb; - int i, loop, cnt = 0; - - for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) { - skb = dev_alloc_skb(QLCNIC_ILB_PKT_SIZE); - qlcnic_create_loopback_buff(skb->data, adapter->mac_addr); - skb_put(skb, QLCNIC_ILB_PKT_SIZE); - - adapter->diag_cnt = 0; - qlcnic_xmit_frame(skb, adapter->netdev); - - loop = 0; - do { - msleep(1); - qlcnic_process_rcv_ring_diag(sds_ring); - if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) - break; - } while (!adapter->diag_cnt); - - dev_kfree_skb_any(skb); - - if (!adapter->diag_cnt) - dev_warn(&adapter->pdev->dev, "LB Test: %dth packet" - " not recevied\n", i + 1); - else - cnt++; - } - if (cnt != i) { - dev_warn(&adapter->pdev->dev, "LB Test failed\n"); - return -1; - } - return 0; -} - -static int qlcnic_loopback_test(struct net_device *netdev, u8 mode) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - int max_sds_rings = adapter->max_sds_rings; - struct qlcnic_host_sds_ring *sds_ring; - int loop = 0; - int ret; - - if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK)) { - netdev_info(netdev, "Firmware is not loopback test capable\n"); - return -EOPNOTSUPP; - } - - netdev_info(netdev, "%s loopback test in progress\n", - mode == QLCNIC_ILB_MODE ? "internal" : "external"); - if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) { - netdev_warn(netdev, "Loopback test not supported for non " - "privilege function\n"); - return 0; - } - - if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) - return -EBUSY; - - ret = qlcnic_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST); - if (ret) - goto clear_it; - - sds_ring = &adapter->recv_ctx->sds_rings[0]; - - ret = qlcnic_set_lb_mode(adapter, mode); - if (ret) - goto free_res; - - adapter->diag_cnt = 0; - do { - msleep(500); - qlcnic_process_rcv_ring_diag(sds_ring); - if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) { - netdev_info(netdev, "firmware didnt respond to loopback" - " configure request\n"); - ret = -QLCNIC_FW_NOT_RESPOND; - goto free_res; - } else if (adapter->diag_cnt) { - ret = adapter->diag_cnt; - goto free_res; - } - } while (!QLCNIC_IS_LB_CONFIGURED(adapter->ahw->loopback_state)); - - ret = qlcnic_do_lb_test(adapter); - - qlcnic_clear_lb_mode(adapter); - - free_res: - qlcnic_diag_free_res(netdev, max_sds_rings); - - clear_it: - adapter->max_sds_rings = max_sds_rings; - clear_bit(__QLCNIC_RESETTING, &adapter->state); - return ret; -} - -static void -qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test, - u64 *data) -{ - memset(data, 0, sizeof(u64) * QLCNIC_TEST_LEN); - - data[0] = qlcnic_reg_test(dev); - if (data[0]) - eth_test->flags |= ETH_TEST_FL_FAILED; - - data[1] = (u64) qlcnic_test_link(dev); - if (data[1]) - eth_test->flags |= ETH_TEST_FL_FAILED; - - if (eth_test->flags & ETH_TEST_FL_OFFLINE) { - data[2] = qlcnic_irq_test(dev); - if (data[2]) - eth_test->flags |= ETH_TEST_FL_FAILED; - - data[3] = qlcnic_loopback_test(dev, QLCNIC_ILB_MODE); - if (data[3]) - eth_test->flags |= ETH_TEST_FL_FAILED; - - if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) { - data[4] = qlcnic_loopback_test(dev, QLCNIC_ELB_MODE); - if (data[4]) - eth_test->flags |= ETH_TEST_FL_FAILED; - eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; - } - } -} - -static void -qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - int index, i; - - switch (stringset) { - case ETH_SS_TEST: - memcpy(data, *qlcnic_gstrings_test, - QLCNIC_TEST_LEN * ETH_GSTRING_LEN); - break; - case ETH_SS_STATS: - for (index = 0; index < QLCNIC_STATS_LEN; index++) { - memcpy(data + index * ETH_GSTRING_LEN, - qlcnic_gstrings_stats[index].stat_string, - ETH_GSTRING_LEN); - } - if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) - return; - for (i = 0; i < QLCNIC_DEVICE_STATS_LEN; index++, i++) { - memcpy(data + index * ETH_GSTRING_LEN, - qlcnic_device_gstrings_stats[i], - ETH_GSTRING_LEN); - } - } -} - -#define QLCNIC_FILL_ESWITCH_STATS(VAL1) \ - (((VAL1) == QLCNIC_ESW_STATS_NOT_AVAIL) ? 0 : VAL1) - -static void -qlcnic_fill_device_stats(int *index, u64 *data, - struct __qlcnic_esw_statistics *stats) -{ - int ind = *index; - - data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->unicast_frames); - data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->multicast_frames); - data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->broadcast_frames); - data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->dropped_frames); - data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->errors); - data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->local_frames); - data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->numbytes); - - *index = ind; -} - -static void -qlcnic_get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 * data) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - struct qlcnic_esw_statistics port_stats; - int index, ret; - - for (index = 0; index < QLCNIC_STATS_LEN; index++) { - char *p = - (char *)adapter + - qlcnic_gstrings_stats[index].stat_offset; - data[index] = - (qlcnic_gstrings_stats[index].sizeof_stat == - sizeof(u64)) ? *(u64 *)p:(*(u32 *)p); - } - - if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) - return; - - memset(&port_stats, 0, sizeof(struct qlcnic_esw_statistics)); - ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func, - QLCNIC_QUERY_RX_COUNTER, &port_stats.rx); - if (ret) - return; - - qlcnic_fill_device_stats(&index, data, &port_stats.rx); - - ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func, - QLCNIC_QUERY_TX_COUNTER, &port_stats.tx); - if (ret) - return; - - qlcnic_fill_device_stats(&index, data, &port_stats.tx); -} - -static int qlcnic_set_led(struct net_device *dev, - enum ethtool_phys_id_state state) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - int max_sds_rings = adapter->max_sds_rings; - - switch (state) { - case ETHTOOL_ID_ACTIVE: - if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { - if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) - return -EIO; - - if (qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST)) { - clear_bit(__QLCNIC_RESETTING, &adapter->state); - return -EIO; - } - set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state); - } - - if (adapter->nic_ops->config_led(adapter, 1, 0xf) == 0) - return 0; - - dev_err(&adapter->pdev->dev, - "Failed to set LED blink state.\n"); - break; - - case ETHTOOL_ID_INACTIVE: - if (adapter->nic_ops->config_led(adapter, 0, 0xf)) - dev_err(&adapter->pdev->dev, - "Failed to reset LED blink state.\n"); - - break; - - default: - return -EINVAL; - } - - if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state)) { - qlcnic_diag_free_res(dev, max_sds_rings); - clear_bit(__QLCNIC_RESETTING, &adapter->state); - } - - return -EIO; -} - -static void -qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - u32 wol_cfg; - - wol->supported = 0; - wol->wolopts = 0; - - wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV); - if (wol_cfg & (1UL << adapter->portnum)) - wol->supported |= WAKE_MAGIC; - - wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG); - if (wol_cfg & (1UL << adapter->portnum)) - wol->wolopts |= WAKE_MAGIC; -} - -static int -qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - struct qlcnic_adapter *adapter = netdev_priv(dev); - u32 wol_cfg; - - if (wol->wolopts & ~WAKE_MAGIC) - return -EOPNOTSUPP; - - wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV); - if (!(wol_cfg & (1 << adapter->portnum))) - return -EOPNOTSUPP; - - wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG); - if (wol->wolopts & WAKE_MAGIC) - wol_cfg |= 1UL << adapter->portnum; - else - wol_cfg &= ~(1UL << adapter->portnum); - - QLCWR32(adapter, QLCNIC_WOL_CONFIG, wol_cfg); - - return 0; -} - -/* - * Set the coalescing parameters. Currently only normal is supported. - * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the - * firmware coalescing to default. - */ -static int qlcnic_set_intr_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ethcoal) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - - if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) - return -EINVAL; - - /* - * Return Error if unsupported values or - * unsupported parameters are set. - */ - if (ethcoal->rx_coalesce_usecs > 0xffff || - ethcoal->rx_max_coalesced_frames > 0xffff || - ethcoal->tx_coalesce_usecs || - ethcoal->tx_max_coalesced_frames || - ethcoal->rx_coalesce_usecs_irq || - ethcoal->rx_max_coalesced_frames_irq || - ethcoal->tx_coalesce_usecs_irq || - ethcoal->tx_max_coalesced_frames_irq || - ethcoal->stats_block_coalesce_usecs || - ethcoal->use_adaptive_rx_coalesce || - ethcoal->use_adaptive_tx_coalesce || - ethcoal->pkt_rate_low || - ethcoal->rx_coalesce_usecs_low || - ethcoal->rx_max_coalesced_frames_low || - ethcoal->tx_coalesce_usecs_low || - ethcoal->tx_max_coalesced_frames_low || - ethcoal->pkt_rate_high || - ethcoal->rx_coalesce_usecs_high || - ethcoal->rx_max_coalesced_frames_high || - ethcoal->tx_coalesce_usecs_high || - ethcoal->tx_max_coalesced_frames_high) - return -EINVAL; - - if (!ethcoal->rx_coalesce_usecs || - !ethcoal->rx_max_coalesced_frames) { - adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT; - adapter->ahw->coal.rx_time_us = - QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US; - adapter->ahw->coal.rx_packets = - QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS; - } else { - adapter->ahw->coal.flag = 0; - adapter->ahw->coal.rx_time_us = ethcoal->rx_coalesce_usecs; - adapter->ahw->coal.rx_packets = - ethcoal->rx_max_coalesced_frames; - } - - qlcnic_config_intr_coalesce(adapter); - - return 0; -} - -static int qlcnic_get_intr_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ethcoal) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - - if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) - return -EINVAL; - - ethcoal->rx_coalesce_usecs = adapter->ahw->coal.rx_time_us; - ethcoal->rx_max_coalesced_frames = adapter->ahw->coal.rx_packets; - - return 0; -} - -static u32 qlcnic_get_msglevel(struct net_device *netdev) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - - return adapter->msg_enable; -} - -static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - - adapter->msg_enable = msglvl; -} - -static int -qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; - - if (fw_dump->clr) - dump->len = fw_dump->tmpl_hdr->size + fw_dump->size; - else - dump->len = 0; - dump->flag = fw_dump->tmpl_hdr->drv_cap_mask; - dump->version = adapter->fw_version; - return 0; -} - -static int -qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, - void *buffer) -{ - int i, copy_sz; - u32 *hdr_ptr, *data; - struct qlcnic_adapter *adapter = netdev_priv(netdev); - struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; - - if (!fw_dump->clr) { - netdev_info(netdev, "Dump not available\n"); - qlcnic_api_unlock(adapter); - return -EINVAL; - } - /* Copy template header first */ - copy_sz = fw_dump->tmpl_hdr->size; - hdr_ptr = (u32 *) fw_dump->tmpl_hdr; - data = buffer; - for (i = 0; i < copy_sz/sizeof(u32); i++) - *data++ = cpu_to_le32(*hdr_ptr++); - - /* Copy captured dump data */ - memcpy(buffer + copy_sz, fw_dump->data, fw_dump->size); - dump->len = copy_sz + fw_dump->size; - dump->flag = fw_dump->tmpl_hdr->drv_cap_mask; - - /* Free dump area once data has been captured */ - vfree(fw_dump->data); - fw_dump->data = NULL; - fw_dump->clr = 0; - - return 0; -} - -static int -qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val) -{ - int ret = 0; - struct qlcnic_adapter *adapter = netdev_priv(netdev); - struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; - - switch (val->flag) { - case QLCNIC_FORCE_FW_DUMP_KEY: - if (!fw_dump->enable) { - netdev_info(netdev, "FW dump not enabled\n"); - return ret; - } - if (fw_dump->clr) { - dev_info(&adapter->pdev->dev, - "Previous dump not cleared, not forcing dump\n"); - return ret; - } - netdev_info(netdev, "Forcing a FW dump\n"); - qlcnic_dev_request_reset(adapter); - break; - case QLCNIC_DISABLE_FW_DUMP: - if (fw_dump->enable) { - netdev_info(netdev, "Disabling FW dump\n"); - fw_dump->enable = 0; - } - break; - case QLCNIC_ENABLE_FW_DUMP: - if (!fw_dump->enable && fw_dump->tmpl_hdr) { - netdev_info(netdev, "Enabling FW dump\n"); - fw_dump->enable = 1; - } - break; - case QLCNIC_FORCE_FW_RESET: - netdev_info(netdev, "Forcing a FW reset\n"); - qlcnic_dev_request_reset(adapter); - adapter->flags &= ~QLCNIC_FW_RESET_OWNER; - break; - default: - if (val->flag > QLCNIC_DUMP_MASK_MAX || - val->flag < QLCNIC_DUMP_MASK_MIN) { - netdev_info(netdev, - "Invalid dump level: 0x%x\n", val->flag); - ret = -EINVAL; - goto out; - } - fw_dump->tmpl_hdr->drv_cap_mask = val->flag & 0xff; - netdev_info(netdev, "Driver mask changed to: 0x%x\n", - fw_dump->tmpl_hdr->drv_cap_mask); - } -out: - return ret; -} - -const struct ethtool_ops qlcnic_ethtool_ops = { - .get_settings = qlcnic_get_settings, - .set_settings = qlcnic_set_settings, - .get_drvinfo = qlcnic_get_drvinfo, - .get_regs_len = qlcnic_get_regs_len, - .get_regs = qlcnic_get_regs, - .get_link = ethtool_op_get_link, - .get_eeprom_len = qlcnic_get_eeprom_len, - .get_eeprom = qlcnic_get_eeprom, - .get_ringparam = qlcnic_get_ringparam, - .set_ringparam = qlcnic_set_ringparam, - .get_channels = qlcnic_get_channels, - .set_channels = qlcnic_set_channels, - .get_pauseparam = qlcnic_get_pauseparam, - .set_pauseparam = qlcnic_set_pauseparam, - .get_wol = qlcnic_get_wol, - .set_wol = qlcnic_set_wol, - .self_test = qlcnic_diag_test, - .get_strings = qlcnic_get_strings, - .get_ethtool_stats = qlcnic_get_ethtool_stats, - .get_sset_count = qlcnic_get_sset_count, - .get_coalesce = qlcnic_get_intr_coalesce, - .set_coalesce = qlcnic_set_intr_coalesce, - .set_phys_id = qlcnic_set_led, - .set_msglevel = qlcnic_set_msglevel, - .get_msglevel = qlcnic_get_msglevel, - .get_dump_flag = qlcnic_get_dump_flag, - .get_dump_data = qlcnic_get_dump_data, - .set_dump = qlcnic_set_dump, -}; diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h deleted file mode 100644 index d14506f..0000000 --- a/drivers/net/qlcnic/qlcnic_hdr.h +++ /dev/null @@ -1,1023 +0,0 @@ -/* - * QLogic qlcnic NIC Driver - * Copyright (c) 2009-2010 QLogic Corporation - * - * See LICENSE.qlcnic for copyright and licensing details. - */ - -#ifndef __QLCNIC_HDR_H_ -#define __QLCNIC_HDR_H_ - -#include -#include - -/* - * The basic unit of access when reading/writing control registers. - */ - -enum { - QLCNIC_HW_H0_CH_HUB_ADR = 0x05, - QLCNIC_HW_H1_CH_HUB_ADR = 0x0E, - QLCNIC_HW_H2_CH_HUB_ADR = 0x03, - QLCNIC_HW_H3_CH_HUB_ADR = 0x01, - QLCNIC_HW_H4_CH_HUB_ADR = 0x06, - QLCNIC_HW_H5_CH_HUB_ADR = 0x07, - QLCNIC_HW_H6_CH_HUB_ADR = 0x08 -}; - -/* Hub 0 */ -enum { - QLCNIC_HW_MN_CRB_AGT_ADR = 0x15, - QLCNIC_HW_MS_CRB_AGT_ADR = 0x25 -}; - -/* Hub 1 */ -enum { - QLCNIC_HW_PS_CRB_AGT_ADR = 0x73, - QLCNIC_HW_SS_CRB_AGT_ADR = 0x20, - QLCNIC_HW_RPMX3_CRB_AGT_ADR = 0x0b, - QLCNIC_HW_QMS_CRB_AGT_ADR = 0x00, - QLCNIC_HW_SQGS0_CRB_AGT_ADR = 0x01, - QLCNIC_HW_SQGS1_CRB_AGT_ADR = 0x02, - QLCNIC_HW_SQGS2_CRB_AGT_ADR = 0x03, - QLCNIC_HW_SQGS3_CRB_AGT_ADR = 0x04, - QLCNIC_HW_C2C0_CRB_AGT_ADR = 0x58, - QLCNIC_HW_C2C1_CRB_AGT_ADR = 0x59, - QLCNIC_HW_C2C2_CRB_AGT_ADR = 0x5a, - QLCNIC_HW_RPMX2_CRB_AGT_ADR = 0x0a, - QLCNIC_HW_RPMX4_CRB_AGT_ADR = 0x0c, - QLCNIC_HW_RPMX7_CRB_AGT_ADR = 0x0f, - QLCNIC_HW_RPMX9_CRB_AGT_ADR = 0x12, - QLCNIC_HW_SMB_CRB_AGT_ADR = 0x18 -}; - -/* Hub 2 */ -enum { - QLCNIC_HW_NIU_CRB_AGT_ADR = 0x31, - QLCNIC_HW_I2C0_CRB_AGT_ADR = 0x19, - QLCNIC_HW_I2C1_CRB_AGT_ADR = 0x29, - - QLCNIC_HW_SN_CRB_AGT_ADR = 0x10, - QLCNIC_HW_I2Q_CRB_AGT_ADR = 0x20, - QLCNIC_HW_LPC_CRB_AGT_ADR = 0x22, - QLCNIC_HW_ROMUSB_CRB_AGT_ADR = 0x21, - QLCNIC_HW_QM_CRB_AGT_ADR = 0x66, - QLCNIC_HW_SQG0_CRB_AGT_ADR = 0x60, - QLCNIC_HW_SQG1_CRB_AGT_ADR = 0x61, - QLCNIC_HW_SQG2_CRB_AGT_ADR = 0x62, - QLCNIC_HW_SQG3_CRB_AGT_ADR = 0x63, - QLCNIC_HW_RPMX1_CRB_AGT_ADR = 0x09, - QLCNIC_HW_RPMX5_CRB_AGT_ADR = 0x0d, - QLCNIC_HW_RPMX6_CRB_AGT_ADR = 0x0e, - QLCNIC_HW_RPMX8_CRB_AGT_ADR = 0x11 -}; - -/* Hub 3 */ -enum { - QLCNIC_HW_PH_CRB_AGT_ADR = 0x1A, - QLCNIC_HW_SRE_CRB_AGT_ADR = 0x50, - QLCNIC_HW_EG_CRB_AGT_ADR = 0x51, - QLCNIC_HW_RPMX0_CRB_AGT_ADR = 0x08 -}; - -/* Hub 4 */ -enum { - QLCNIC_HW_PEGN0_CRB_AGT_ADR = 0x40, - QLCNIC_HW_PEGN1_CRB_AGT_ADR, - QLCNIC_HW_PEGN2_CRB_AGT_ADR, - QLCNIC_HW_PEGN3_CRB_AGT_ADR, - QLCNIC_HW_PEGNI_CRB_AGT_ADR, - QLCNIC_HW_PEGND_CRB_AGT_ADR, - QLCNIC_HW_PEGNC_CRB_AGT_ADR, - QLCNIC_HW_PEGR0_CRB_AGT_ADR, - QLCNIC_HW_PEGR1_CRB_AGT_ADR, - QLCNIC_HW_PEGR2_CRB_AGT_ADR, - QLCNIC_HW_PEGR3_CRB_AGT_ADR, - QLCNIC_HW_PEGN4_CRB_AGT_ADR -}; - -/* Hub 5 */ -enum { - QLCNIC_HW_PEGS0_CRB_AGT_ADR = 0x40, - QLCNIC_HW_PEGS1_CRB_AGT_ADR, - QLCNIC_HW_PEGS2_CRB_AGT_ADR, - QLCNIC_HW_PEGS3_CRB_AGT_ADR, - QLCNIC_HW_PEGSI_CRB_AGT_ADR, - QLCNIC_HW_PEGSD_CRB_AGT_ADR, - QLCNIC_HW_PEGSC_CRB_AGT_ADR -}; - -/* Hub 6 */ -enum { - QLCNIC_HW_CAS0_CRB_AGT_ADR = 0x46, - QLCNIC_HW_CAS1_CRB_AGT_ADR = 0x47, - QLCNIC_HW_CAS2_CRB_AGT_ADR = 0x48, - QLCNIC_HW_CAS3_CRB_AGT_ADR = 0x49, - QLCNIC_HW_NCM_CRB_AGT_ADR = 0x16, - QLCNIC_HW_TMR_CRB_AGT_ADR = 0x17, - QLCNIC_HW_XDMA_CRB_AGT_ADR = 0x05, - QLCNIC_HW_OCM0_CRB_AGT_ADR = 0x06, - QLCNIC_HW_OCM1_CRB_AGT_ADR = 0x07 -}; - -/* Floaters - non existent modules */ -#define QLCNIC_HW_EFC_RPMX0_CRB_AGT_ADR 0x67 - -/* This field defines PCI/X adr [25:20] of agents on the CRB */ -enum { - QLCNIC_HW_PX_MAP_CRB_PH = 0, - QLCNIC_HW_PX_MAP_CRB_PS, - QLCNIC_HW_PX_MAP_CRB_MN, - QLCNIC_HW_PX_MAP_CRB_MS, - QLCNIC_HW_PX_MAP_CRB_PGR1, - QLCNIC_HW_PX_MAP_CRB_SRE, - QLCNIC_HW_PX_MAP_CRB_NIU, - QLCNIC_HW_PX_MAP_CRB_QMN, - QLCNIC_HW_PX_MAP_CRB_SQN0, - QLCNIC_HW_PX_MAP_CRB_SQN1, - QLCNIC_HW_PX_MAP_CRB_SQN2, - QLCNIC_HW_PX_MAP_CRB_SQN3, - QLCNIC_HW_PX_MAP_CRB_QMS, - QLCNIC_HW_PX_MAP_CRB_SQS0, - QLCNIC_HW_PX_MAP_CRB_SQS1, - QLCNIC_HW_PX_MAP_CRB_SQS2, - QLCNIC_HW_PX_MAP_CRB_SQS3, - QLCNIC_HW_PX_MAP_CRB_PGN0, - QLCNIC_HW_PX_MAP_CRB_PGN1, - QLCNIC_HW_PX_MAP_CRB_PGN2, - QLCNIC_HW_PX_MAP_CRB_PGN3, - QLCNIC_HW_PX_MAP_CRB_PGND, - QLCNIC_HW_PX_MAP_CRB_PGNI, - QLCNIC_HW_PX_MAP_CRB_PGS0, - QLCNIC_HW_PX_MAP_CRB_PGS1, - QLCNIC_HW_PX_MAP_CRB_PGS2, - QLCNIC_HW_PX_MAP_CRB_PGS3, - QLCNIC_HW_PX_MAP_CRB_PGSD, - QLCNIC_HW_PX_MAP_CRB_PGSI, - QLCNIC_HW_PX_MAP_CRB_SN, - QLCNIC_HW_PX_MAP_CRB_PGR2, - QLCNIC_HW_PX_MAP_CRB_EG, - QLCNIC_HW_PX_MAP_CRB_PH2, - QLCNIC_HW_PX_MAP_CRB_PS2, - QLCNIC_HW_PX_MAP_CRB_CAM, - QLCNIC_HW_PX_MAP_CRB_CAS0, - QLCNIC_HW_PX_MAP_CRB_CAS1, - QLCNIC_HW_PX_MAP_CRB_CAS2, - QLCNIC_HW_PX_MAP_CRB_C2C0, - QLCNIC_HW_PX_MAP_CRB_C2C1, - QLCNIC_HW_PX_MAP_CRB_TIMR, - QLCNIC_HW_PX_MAP_CRB_PGR3, - QLCNIC_HW_PX_MAP_CRB_RPMX1, - QLCNIC_HW_PX_MAP_CRB_RPMX2, - QLCNIC_HW_PX_MAP_CRB_RPMX3, - QLCNIC_HW_PX_MAP_CRB_RPMX4, - QLCNIC_HW_PX_MAP_CRB_RPMX5, - QLCNIC_HW_PX_MAP_CRB_RPMX6, - QLCNIC_HW_PX_MAP_CRB_RPMX7, - QLCNIC_HW_PX_MAP_CRB_XDMA, - QLCNIC_HW_PX_MAP_CRB_I2Q, - QLCNIC_HW_PX_MAP_CRB_ROMUSB, - QLCNIC_HW_PX_MAP_CRB_CAS3, - QLCNIC_HW_PX_MAP_CRB_RPMX0, - QLCNIC_HW_PX_MAP_CRB_RPMX8, - QLCNIC_HW_PX_MAP_CRB_RPMX9, - QLCNIC_HW_PX_MAP_CRB_OCM0, - QLCNIC_HW_PX_MAP_CRB_OCM1, - QLCNIC_HW_PX_MAP_CRB_SMB, - QLCNIC_HW_PX_MAP_CRB_I2C0, - QLCNIC_HW_PX_MAP_CRB_I2C1, - QLCNIC_HW_PX_MAP_CRB_LPC, - QLCNIC_HW_PX_MAP_CRB_PGNC, - QLCNIC_HW_PX_MAP_CRB_PGR0 -}; - -#define BIT_0 0x1 -#define BIT_1 0x2 -#define BIT_2 0x4 -#define BIT_3 0x8 -#define BIT_4 0x10 -#define BIT_5 0x20 -#define BIT_6 0x40 -#define BIT_7 0x80 -#define BIT_8 0x100 -#define BIT_9 0x200 -#define BIT_10 0x400 -#define BIT_11 0x800 -#define BIT_12 0x1000 -#define BIT_13 0x2000 -#define BIT_14 0x4000 -#define BIT_15 0x8000 -#define BIT_16 0x10000 -#define BIT_17 0x20000 -#define BIT_18 0x40000 -#define BIT_19 0x80000 -#define BIT_20 0x100000 -#define BIT_21 0x200000 -#define BIT_22 0x400000 -#define BIT_23 0x800000 -#define BIT_24 0x1000000 -#define BIT_25 0x2000000 -#define BIT_26 0x4000000 -#define BIT_27 0x8000000 -#define BIT_28 0x10000000 -#define BIT_29 0x20000000 -#define BIT_30 0x40000000 -#define BIT_31 0x80000000 - -/* This field defines CRB adr [31:20] of the agents */ - -#define QLCNIC_HW_CRB_HUB_AGT_ADR_MN \ - ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MN_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PH \ - ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_PH_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_MS \ - ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MS_CRB_AGT_ADR) - -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PS \ - ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_PS_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_SS \ - ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SS_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3 \ - ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX3_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMS \ - ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_QMS_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS0 \ - ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS0_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS1 \ - ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS1_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS2 \ - ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS2_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS3 \ - ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS3_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C0 \ - ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C0_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C1 \ - ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C1_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2 \ - ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX2_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4 \ - ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX4_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7 \ - ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX7_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9 \ - ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX9_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_SMB \ - ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SMB_CRB_AGT_ADR) - -#define QLCNIC_HW_CRB_HUB_AGT_ADR_NIU \ - ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_NIU_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0 \ - ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C0_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1 \ - ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C1_CRB_AGT_ADR) - -#define QLCNIC_HW_CRB_HUB_AGT_ADR_SRE \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SRE_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_EG \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_EG_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0 \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX0_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMN \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_QM_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0 \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG0_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1 \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG1_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2 \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG2_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3 \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG3_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1 \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX1_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5 \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX5_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6 \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX6_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8 \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX8_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS0 \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS0_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS1 \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS1_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS2 \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS2_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS3 \ - ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS3_CRB_AGT_ADR) - -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI \ - ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNI_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGND \ - ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGND_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0 \ - ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN0_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1 \ - ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN1_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2 \ - ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN2_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3 \ - ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN3_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4 \ - ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN4_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC \ - ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNC_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR0 \ - ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR0_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR1 \ - ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR1_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR2 \ - ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR2_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR3 \ - ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR3_CRB_AGT_ADR) - -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI \ - ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSI_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSD \ - ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSD_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0 \ - ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS0_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1 \ - ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS1_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2 \ - ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS2_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3 \ - ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS3_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSC \ - ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSC_CRB_AGT_ADR) - -#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAM \ - ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_NCM_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR \ - ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_TMR_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA \ - ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_XDMA_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_SN \ - ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_SN_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q \ - ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_I2Q_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB \ - ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_ROMUSB_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0 \ - ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM0_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM1 \ - ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM1_CRB_AGT_ADR) -#define QLCNIC_HW_CRB_HUB_AGT_ADR_LPC \ - ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_LPC_CRB_AGT_ADR) - -#define QLCNIC_SRE_MISC (QLCNIC_CRB_SRE + 0x0002c) - -#define QLCNIC_I2Q_CLR_PCI_HI (QLCNIC_CRB_I2Q + 0x00034) - -#define ROMUSB_GLB (QLCNIC_CRB_ROMUSB + 0x00000) -#define ROMUSB_ROM (QLCNIC_CRB_ROMUSB + 0x10000) - -#define QLCNIC_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004) -#define QLCNIC_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008) -#define QLCNIC_ROMUSB_GLB_PAD_GPIO_I (ROMUSB_GLB + 0x000c) -#define QLCNIC_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038) -#define QLCNIC_ROMUSB_GLB_TEST_MUX_SEL (ROMUSB_GLB + 0x0044) -#define QLCNIC_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c) -#define QLCNIC_ROMUSB_GLB_CHIP_CLK_CTRL (ROMUSB_GLB + 0x00A8) - -#define QLCNIC_ROMUSB_GPIO(n) (ROMUSB_GLB + 0x60 + (4 * (n))) - -#define QLCNIC_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004) -#define QLCNIC_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008) -#define QLCNIC_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c) -#define QLCNIC_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010) -#define QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014) -#define QLCNIC_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018) - -/* Lock IDs for ROM lock */ -#define ROM_LOCK_DRIVER 0x0d417340 - -/****************************************************************************** -* -* Definitions specific to M25P flash -* -******************************************************************************* -*/ - -/* all are 1MB windows */ - -#define QLCNIC_PCI_CRB_WINDOWSIZE 0x00100000 -#define QLCNIC_PCI_CRB_WINDOW(A) \ - (QLCNIC_PCI_CRBSPACE + (A)*QLCNIC_PCI_CRB_WINDOWSIZE) - -#define QLCNIC_CRB_NIU QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_NIU) -#define QLCNIC_CRB_SRE QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SRE) -#define QLCNIC_CRB_ROMUSB \ - QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_ROMUSB) -#define QLCNIC_CRB_I2Q QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2Q) -#define QLCNIC_CRB_I2C0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2C0) -#define QLCNIC_CRB_SMB QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SMB) -#define QLCNIC_CRB_MAX QLCNIC_PCI_CRB_WINDOW(64) - -#define QLCNIC_CRB_PCIX_HOST QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH) -#define QLCNIC_CRB_PCIX_HOST2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH2) -#define QLCNIC_CRB_PEG_NET_0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN0) -#define QLCNIC_CRB_PEG_NET_1 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN1) -#define QLCNIC_CRB_PEG_NET_2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN2) -#define QLCNIC_CRB_PEG_NET_3 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN3) -#define QLCNIC_CRB_PEG_NET_4 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SQS2) -#define QLCNIC_CRB_PEG_NET_D QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGND) -#define QLCNIC_CRB_PEG_NET_I QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGNI) -#define QLCNIC_CRB_DDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_MN) -#define QLCNIC_CRB_QDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SN) - -#define QLCNIC_CRB_PCIX_MD QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PS) -#define QLCNIC_CRB_PCIE QLCNIC_CRB_PCIX_MD - -#define ISR_INT_VECTOR (QLCNIC_PCIX_PS_REG(PCIX_INT_VECTOR)) -#define ISR_INT_MASK (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK)) -#define ISR_INT_MASK_SLOW (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK)) -#define ISR_INT_TARGET_STATUS (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS)) -#define ISR_INT_TARGET_MASK (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK)) -#define ISR_INT_TARGET_STATUS_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F1)) -#define ISR_INT_TARGET_MASK_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F1)) -#define ISR_INT_TARGET_STATUS_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F2)) -#define ISR_INT_TARGET_MASK_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F2)) -#define ISR_INT_TARGET_STATUS_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F3)) -#define ISR_INT_TARGET_MASK_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F3)) -#define ISR_INT_TARGET_STATUS_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F4)) -#define ISR_INT_TARGET_MASK_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F4)) -#define ISR_INT_TARGET_STATUS_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F5)) -#define ISR_INT_TARGET_MASK_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F5)) -#define ISR_INT_TARGET_STATUS_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F6)) -#define ISR_INT_TARGET_MASK_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F6)) -#define ISR_INT_TARGET_STATUS_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F7)) -#define ISR_INT_TARGET_MASK_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F7)) - -#define QLCNIC_PCI_MN_2M (0) -#define QLCNIC_PCI_MS_2M (0x80000) -#define QLCNIC_PCI_OCM0_2M (0x000c0000UL) -#define QLCNIC_PCI_CRBSPACE (0x06000000UL) -#define QLCNIC_PCI_CAMQM (0x04800000UL) -#define QLCNIC_PCI_CAMQM_END (0x04800800UL) -#define QLCNIC_PCI_2MB_SIZE (0x00200000UL) -#define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL) - -#define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM) - -#define QLCNIC_ADDR_DDR_NET (0x0000000000000000ULL) -#define QLCNIC_ADDR_DDR_NET_MAX (0x000000000fffffffULL) -#define QLCNIC_ADDR_OCM0 (0x0000000200000000ULL) -#define QLCNIC_ADDR_OCM0_MAX (0x00000002000fffffULL) -#define QLCNIC_ADDR_OCM1 (0x0000000200400000ULL) -#define QLCNIC_ADDR_OCM1_MAX (0x00000002004fffffULL) -#define QLCNIC_ADDR_QDR_NET (0x0000000300000000ULL) -#define QLCNIC_ADDR_QDR_NET_MAX (0x0000000307ffffffULL) - -/* - * Register offsets for MN - */ -#define QLCNIC_MIU_CONTROL (0x000) -#define QLCNIC_MIU_MN_CONTROL (QLCNIC_CRB_DDR_NET+QLCNIC_MIU_CONTROL) - -/* 200ms delay in each loop */ -#define QLCNIC_NIU_PHY_WAITLEN 200000 -/* 10 seconds before we give up */ -#define QLCNIC_NIU_PHY_WAITMAX 50 -#define QLCNIC_NIU_MAX_GBE_PORTS 4 -#define QLCNIC_NIU_MAX_XG_PORTS 2 - -#define QLCNIC_NIU_MODE (QLCNIC_CRB_NIU + 0x00000) -#define QLCNIC_NIU_GB_PAUSE_CTL (QLCNIC_CRB_NIU + 0x0030c) -#define QLCNIC_NIU_XG_PAUSE_CTL (QLCNIC_CRB_NIU + 0x00098) - -#define QLCNIC_NIU_GB_MAC_CONFIG_0(I) \ - (QLCNIC_CRB_NIU + 0x30000 + (I)*0x10000) -#define QLCNIC_NIU_GB_MAC_CONFIG_1(I) \ - (QLCNIC_CRB_NIU + 0x30004 + (I)*0x10000) - - -#define TEST_AGT_CTRL (0x00) - -#define TA_CTL_START BIT_0 -#define TA_CTL_ENABLE BIT_1 -#define TA_CTL_WRITE BIT_2 -#define TA_CTL_BUSY BIT_3 - -/* - * Register offsets for MN - */ -#define MIU_TEST_AGT_BASE (0x90) - -#define MIU_TEST_AGT_ADDR_LO (0x04) -#define MIU_TEST_AGT_ADDR_HI (0x08) -#define MIU_TEST_AGT_WRDATA_LO (0x10) -#define MIU_TEST_AGT_WRDATA_HI (0x14) -#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x20) -#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x24) -#define MIU_TEST_AGT_WRDATA(i) (0x10+(0x10*((i)>>1))+(4*((i)&1))) -#define MIU_TEST_AGT_RDDATA_LO (0x18) -#define MIU_TEST_AGT_RDDATA_HI (0x1c) -#define MIU_TEST_AGT_RDDATA_UPPER_LO (0x28) -#define MIU_TEST_AGT_RDDATA_UPPER_HI (0x2c) -#define MIU_TEST_AGT_RDDATA(i) (0x18+(0x10*((i)>>1))+(4*((i)&1))) - -#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8 -#define MIU_TEST_AGT_UPPER_ADDR(off) (0) - -/* - * Register offsets for MS - */ -#define SIU_TEST_AGT_BASE (0x60) - -#define SIU_TEST_AGT_ADDR_LO (0x04) -#define SIU_TEST_AGT_ADDR_HI (0x18) -#define SIU_TEST_AGT_WRDATA_LO (0x08) -#define SIU_TEST_AGT_WRDATA_HI (0x0c) -#define SIU_TEST_AGT_WRDATA(i) (0x08+(4*(i))) -#define SIU_TEST_AGT_RDDATA_LO (0x10) -#define SIU_TEST_AGT_RDDATA_HI (0x14) -#define SIU_TEST_AGT_RDDATA(i) (0x10+(4*(i))) - -#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8 -#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22) - -/* XG Link status */ -#define XG_LINK_UP 0x10 -#define XG_LINK_DOWN 0x20 - -#define XG_LINK_UP_P3P 0x01 -#define XG_LINK_DOWN_P3P 0x02 -#define XG_LINK_STATE_P3P_MASK 0xf -#define XG_LINK_STATE_P3P(pcifn, val) \ - (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3P_MASK) - -#define P3P_LINK_SPEED_MHZ 100 -#define P3P_LINK_SPEED_MASK 0xff -#define P3P_LINK_SPEED_REG(pcifn) \ - (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4)) -#define P3P_LINK_SPEED_VAL(pcifn, reg) \ - (((reg) >> (8 * ((pcifn) & 0x3))) & P3P_LINK_SPEED_MASK) - -#define QLCNIC_CAM_RAM_BASE (QLCNIC_CRB_CAM + 0x02000) -#define QLCNIC_CAM_RAM(reg) (QLCNIC_CAM_RAM_BASE + (reg)) -#define QLCNIC_FW_VERSION_MAJOR (QLCNIC_CAM_RAM(0x150)) -#define QLCNIC_FW_VERSION_MINOR (QLCNIC_CAM_RAM(0x154)) -#define QLCNIC_FW_VERSION_SUB (QLCNIC_CAM_RAM(0x158)) -#define QLCNIC_ROM_LOCK_ID (QLCNIC_CAM_RAM(0x100)) -#define QLCNIC_PHY_LOCK_ID (QLCNIC_CAM_RAM(0x120)) -#define QLCNIC_CRB_WIN_LOCK_ID (QLCNIC_CAM_RAM(0x124)) - -#define NIC_CRB_BASE (QLCNIC_CAM_RAM(0x200)) -#define NIC_CRB_BASE_2 (QLCNIC_CAM_RAM(0x700)) -#define QLCNIC_REG(X) (NIC_CRB_BASE+(X)) -#define QLCNIC_REG_2(X) (NIC_CRB_BASE_2+(X)) - -#define QLCNIC_CDRP_CRB_OFFSET (QLCNIC_REG(0x18)) -#define QLCNIC_ARG1_CRB_OFFSET (QLCNIC_REG(0x1c)) -#define QLCNIC_ARG2_CRB_OFFSET (QLCNIC_REG(0x20)) -#define QLCNIC_ARG3_CRB_OFFSET (QLCNIC_REG(0x24)) -#define QLCNIC_SIGN_CRB_OFFSET (QLCNIC_REG(0x28)) - -#define CRB_CMDPEG_STATE (QLCNIC_REG(0x50)) -#define CRB_RCVPEG_STATE (QLCNIC_REG(0x13c)) - -#define CRB_XG_STATE_P3P (QLCNIC_REG(0x98)) -#define CRB_PF_LINK_SPEED_1 (QLCNIC_REG(0xe8)) -#define CRB_PF_LINK_SPEED_2 (QLCNIC_REG(0xec)) - -#define CRB_TEMP_STATE (QLCNIC_REG(0x1b4)) - -#define CRB_V2P_0 (QLCNIC_REG(0x290)) -#define CRB_V2P(port) (CRB_V2P_0+((port)*4)) -#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0)) - -#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128)) -#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0)) - -/* - * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address - * which can be read by the Phantom host to get producer/consumer indexes from - * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following - * registers will be used for the addresses of the ring's shared memory - * on the Phantom. - */ - -#define qlcnic_get_temp_val(x) ((x) >> 16) -#define qlcnic_get_temp_state(x) ((x) & 0xffff) -#define qlcnic_encode_temp(val, state) (((val) << 16) | (state)) - -/* - * Temperature control. - */ -enum { - QLCNIC_TEMP_NORMAL = 0x1, /* Normal operating range */ - QLCNIC_TEMP_WARN, /* Sound alert, temperature getting high */ - QLCNIC_TEMP_PANIC /* Fatal error, hardware has shut down. */ -}; - -/* Lock IDs for PHY lock */ -#define PHY_LOCK_DRIVER 0x44524956 - -/* Used for PS PCI Memory access */ -#define PCIX_PS_OP_ADDR_LO (0x10000) -/* via CRB (PS side only) */ -#define PCIX_PS_OP_ADDR_HI (0x10004) - -#define PCIX_INT_VECTOR (0x10100) -#define PCIX_INT_MASK (0x10104) - -#define PCIX_OCM_WINDOW (0x10800) -#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x4 * (func)) - -#define PCIX_TARGET_STATUS (0x10118) -#define PCIX_TARGET_STATUS_F1 (0x10160) -#define PCIX_TARGET_STATUS_F2 (0x10164) -#define PCIX_TARGET_STATUS_F3 (0x10168) -#define PCIX_TARGET_STATUS_F4 (0x10360) -#define PCIX_TARGET_STATUS_F5 (0x10364) -#define PCIX_TARGET_STATUS_F6 (0x10368) -#define PCIX_TARGET_STATUS_F7 (0x1036c) - -#define PCIX_TARGET_MASK (0x10128) -#define PCIX_TARGET_MASK_F1 (0x10170) -#define PCIX_TARGET_MASK_F2 (0x10174) -#define PCIX_TARGET_MASK_F3 (0x10178) -#define PCIX_TARGET_MASK_F4 (0x10370) -#define PCIX_TARGET_MASK_F5 (0x10374) -#define PCIX_TARGET_MASK_F6 (0x10378) -#define PCIX_TARGET_MASK_F7 (0x1037c) - -#define PCIX_MSI_F(i) (0x13000+((i)*4)) - -#define QLCNIC_PCIX_PH_REG(reg) (QLCNIC_CRB_PCIE + (reg)) -#define QLCNIC_PCIX_PS_REG(reg) (QLCNIC_CRB_PCIX_MD + (reg)) -#define QLCNIC_PCIE_REG(reg) (QLCNIC_CRB_PCIE + (reg)) - -#define PCIE_SEM0_LOCK (0x1c000) -#define PCIE_SEM0_UNLOCK (0x1c004) -#define PCIE_SEM_LOCK(N) (PCIE_SEM0_LOCK + 8*(N)) -#define PCIE_SEM_UNLOCK(N) (PCIE_SEM0_UNLOCK + 8*(N)) - -#define PCIE_SETUP_FUNCTION (0x12040) -#define PCIE_SETUP_FUNCTION2 (0x12048) -#define PCIE_MISCCFG_RC (0x1206c) -#define PCIE_TGT_SPLIT_CHICKEN (0x12080) -#define PCIE_CHICKEN3 (0x120c8) - -#define ISR_INT_STATE_REG (QLCNIC_PCIX_PS_REG(PCIE_MISCCFG_RC)) -#define PCIE_MAX_MASTER_SPLIT (0x14048) - -#define QLCNIC_PORT_MODE_NONE 0 -#define QLCNIC_PORT_MODE_XG 1 -#define QLCNIC_PORT_MODE_GB 2 -#define QLCNIC_PORT_MODE_802_3_AP 3 -#define QLCNIC_PORT_MODE_AUTO_NEG 4 -#define QLCNIC_PORT_MODE_AUTO_NEG_1G 5 -#define QLCNIC_PORT_MODE_AUTO_NEG_XG 6 -#define QLCNIC_PORT_MODE_ADDR (QLCNIC_CAM_RAM(0x24)) -#define QLCNIC_WOL_PORT_MODE (QLCNIC_CAM_RAM(0x198)) - -#define QLCNIC_WOL_CONFIG_NV (QLCNIC_CAM_RAM(0x184)) -#define QLCNIC_WOL_CONFIG (QLCNIC_CAM_RAM(0x188)) - -#define QLCNIC_PEG_TUNE_MN_PRESENT 0x1 -#define QLCNIC_PEG_TUNE_CAPABILITY (QLCNIC_CAM_RAM(0x02c)) - -#define QLCNIC_DMA_WATCHDOG_CTRL (QLCNIC_CAM_RAM(0x14)) -#define QLCNIC_PEG_ALIVE_COUNTER (QLCNIC_CAM_RAM(0xb0)) -#define QLCNIC_PEG_HALT_STATUS1 (QLCNIC_CAM_RAM(0xa8)) -#define QLCNIC_PEG_HALT_STATUS2 (QLCNIC_CAM_RAM(0xac)) -#define QLCNIC_CRB_DRV_ACTIVE (QLCNIC_CAM_RAM(0x138)) -#define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140)) - -#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144)) -#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148)) -#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c)) -#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x174)) -#define QLCNIC_CRB_DEV_NPAR_STATE (QLCNIC_CAM_RAM(0x19c)) -#define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c) -#define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860) - -/* Device State */ -#define QLCNIC_DEV_COLD 0x1 -#define QLCNIC_DEV_INITIALIZING 0x2 -#define QLCNIC_DEV_READY 0x3 -#define QLCNIC_DEV_NEED_RESET 0x4 -#define QLCNIC_DEV_NEED_QUISCENT 0x5 -#define QLCNIC_DEV_FAILED 0x6 -#define QLCNIC_DEV_QUISCENT 0x7 - -#define QLCNIC_DEV_NPAR_NON_OPER 0 /* NON Operational */ -#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */ -#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */ - -#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) & (1 << (FN * 4))) -#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4))) -#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4))) -#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4))) -#define QLC_DEV_SET_QSCNT_RDY(VAL, FN) ((VAL) |= (2 << (FN * 4))) -#define QLC_DEV_CLR_RST_QSCNT(VAL, FN) ((VAL) &= ~(3 << (FN * 4))) - -#define QLC_DEV_GET_DRV(VAL, FN) (0xf & ((VAL) >> (FN * 4))) -#define QLC_DEV_SET_DRV(VAL, FN) ((VAL) << (FN * 4)) - -#define QLCNIC_TYPE_NIC 1 -#define QLCNIC_TYPE_FCOE 2 -#define QLCNIC_TYPE_ISCSI 3 - -#define QLCNIC_RCODE_DRIVER_INFO 0x20000000 -#define QLCNIC_RCODE_DRIVER_CAN_RELOAD BIT_30 -#define QLCNIC_RCODE_FATAL_ERROR BIT_31 -#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff) -#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0xfffff) - -#define FW_POLL_DELAY (1 * HZ) -#define FW_FAIL_THRESH 2 - -#define QLCNIC_RESET_TIMEOUT_SECS 10 -#define QLCNIC_INIT_TIMEOUT_SECS 30 -#define QLCNIC_RCVPEG_CHECK_RETRY_COUNT 2000 -#define QLCNIC_RCVPEG_CHECK_DELAY 10 -#define QLCNIC_CMDPEG_CHECK_RETRY_COUNT 60 -#define QLCNIC_CMDPEG_CHECK_DELAY 500 -#define QLCNIC_HEARTBEAT_PERIOD_MSECS 200 -#define QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT 45 - -#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC))) -#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200) - -/* - * PCI Interrupt Vector Values. - */ -#define PCIX_INT_VECTOR_BIT_F0 0x0080 -#define PCIX_INT_VECTOR_BIT_F1 0x0100 -#define PCIX_INT_VECTOR_BIT_F2 0x0200 -#define PCIX_INT_VECTOR_BIT_F3 0x0400 -#define PCIX_INT_VECTOR_BIT_F4 0x0800 -#define PCIX_INT_VECTOR_BIT_F5 0x1000 -#define PCIX_INT_VECTOR_BIT_F6 0x2000 -#define PCIX_INT_VECTOR_BIT_F7 0x4000 - -struct qlcnic_legacy_intr_set { - u32 int_vec_bit; - u32 tgt_status_reg; - u32 tgt_mask_reg; - u32 pci_int_reg; -}; - -#define QLCNIC_FW_API 0x1b216c -#define QLCNIC_DRV_OP_MODE 0x1b2170 -#define QLCNIC_MSIX_BASE 0x132110 -#define QLCNIC_MAX_PCI_FUNC 8 -#define QLCNIC_MAX_VLAN_FILTERS 64 - -/* FW dump defines */ -#define MIU_TEST_CTR 0x41000090 -#define MIU_TEST_ADDR_LO 0x41000094 -#define MIU_TEST_ADDR_HI 0x41000098 -#define FLASH_ROM_WINDOW 0x42110030 -#define FLASH_ROM_DATA 0x42150000 - -static const u32 MIU_TEST_READ_DATA[] = { - 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC, }; - -#define QLCNIC_FW_DUMP_REG1 0x00130060 -#define QLCNIC_FW_DUMP_REG2 0x001e0000 -#define QLCNIC_FLASH_SEM2_LK 0x0013C010 -#define QLCNIC_FLASH_SEM2_ULK 0x0013C014 -#define QLCNIC_FLASH_LOCK_ID 0x001B2100 - -#define QLCNIC_RD_DUMP_REG(addr, bar0, data) do { \ - writel((addr & 0xFFFF0000), (void *) (bar0 + \ - QLCNIC_FW_DUMP_REG1)); \ - readl((void *) (bar0 + QLCNIC_FW_DUMP_REG1)); \ - *data = readl((void *) (bar0 + QLCNIC_FW_DUMP_REG2 + \ - LSW(addr))); \ -} while (0) - -#define QLCNIC_WR_DUMP_REG(addr, bar0, data) do { \ - writel((addr & 0xFFFF0000), (void *) (bar0 + \ - QLCNIC_FW_DUMP_REG1)); \ - readl((void *) (bar0 + QLCNIC_FW_DUMP_REG1)); \ - writel(data, (void *) (bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr)));\ - readl((void *) (bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr))); \ -} while (0) - -/* PCI function operational mode */ -enum { - QLCNIC_MGMT_FUNC = 0, - QLCNIC_PRIV_FUNC = 1, - QLCNIC_NON_PRIV_FUNC = 2 -}; - -enum { - QLCNIC_PORT_DEFAULTS = 0, - QLCNIC_ADD_VLAN = 1, - QLCNIC_DEL_VLAN = 2 -}; - -#define QLC_DEV_DRV_DEFAULT 0x11111111 - -#define LSB(x) ((uint8_t)(x)) -#define MSB(x) ((uint8_t)((uint16_t)(x) >> 8)) - -#define LSW(x) ((uint16_t)((uint32_t)(x))) -#define MSW(x) ((uint16_t)((uint32_t)(x) >> 16)) - -#define LSD(x) ((uint32_t)((uint64_t)(x))) -#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16)) - -#define QLCNIC_LEGACY_INTR_CONFIG \ -{ \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \ - \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \ - \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \ - \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \ - \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \ - \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \ - \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \ - \ - { \ - .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \ - .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \ - .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \ - .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \ -} - -/* NIU REGS */ - -#define _qlcnic_crb_get_bit(var, bit) ((var >> bit) & 0x1) - -/* - * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3) - * - * Bit 0 : enable_tx => 1:enable frame xmit, 0:disable - * Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream - * Bit 2 : enable_rx => 1:enable frame recv, 0:disable - * Bit 3 : rx_synced => R/O: recv enable synched to recv stream - * Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable - * Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore - * Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal - * Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op - * Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op - * Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op - * Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op - * Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op - */ -#define qlcnic_gb_rx_flowctl(config_word) \ - ((config_word) |= 1 << 5) -#define qlcnic_gb_get_rx_flowctl(config_word) \ - _qlcnic_crb_get_bit((config_word), 5) -#define qlcnic_gb_unset_rx_flowctl(config_word) \ - ((config_word) &= ~(1 << 5)) - -/* - * NIU GB Pause Ctl Register - */ - -#define qlcnic_gb_set_gb0_mask(config_word) \ - ((config_word) |= 1 << 0) -#define qlcnic_gb_set_gb1_mask(config_word) \ - ((config_word) |= 1 << 2) -#define qlcnic_gb_set_gb2_mask(config_word) \ - ((config_word) |= 1 << 4) -#define qlcnic_gb_set_gb3_mask(config_word) \ - ((config_word) |= 1 << 6) - -#define qlcnic_gb_get_gb0_mask(config_word) \ - _qlcnic_crb_get_bit((config_word), 0) -#define qlcnic_gb_get_gb1_mask(config_word) \ - _qlcnic_crb_get_bit((config_word), 2) -#define qlcnic_gb_get_gb2_mask(config_word) \ - _qlcnic_crb_get_bit((config_word), 4) -#define qlcnic_gb_get_gb3_mask(config_word) \ - _qlcnic_crb_get_bit((config_word), 6) - -#define qlcnic_gb_unset_gb0_mask(config_word) \ - ((config_word) &= ~(1 << 0)) -#define qlcnic_gb_unset_gb1_mask(config_word) \ - ((config_word) &= ~(1 << 2)) -#define qlcnic_gb_unset_gb2_mask(config_word) \ - ((config_word) &= ~(1 << 4)) -#define qlcnic_gb_unset_gb3_mask(config_word) \ - ((config_word) &= ~(1 << 6)) - -/* - * NIU XG Pause Ctl Register - * - * Bit 0 : xg0_mask => 1:disable tx pause frames - * Bit 1 : xg0_request => 1:request single pause frame - * Bit 2 : xg0_on_off => 1:request is pause on, 0:off - * Bit 3 : xg1_mask => 1:disable tx pause frames - * Bit 4 : xg1_request => 1:request single pause frame - * Bit 5 : xg1_on_off => 1:request is pause on, 0:off - */ - -#define qlcnic_xg_set_xg0_mask(config_word) \ - ((config_word) |= 1 << 0) -#define qlcnic_xg_set_xg1_mask(config_word) \ - ((config_word) |= 1 << 3) - -#define qlcnic_xg_get_xg0_mask(config_word) \ - _qlcnic_crb_get_bit((config_word), 0) -#define qlcnic_xg_get_xg1_mask(config_word) \ - _qlcnic_crb_get_bit((config_word), 3) - -#define qlcnic_xg_unset_xg0_mask(config_word) \ - ((config_word) &= ~(1 << 0)) -#define qlcnic_xg_unset_xg1_mask(config_word) \ - ((config_word) &= ~(1 << 3)) - -/* - * NIU XG Pause Ctl Register - * - * Bit 0 : xg0_mask => 1:disable tx pause frames - * Bit 1 : xg0_request => 1:request single pause frame - * Bit 2 : xg0_on_off => 1:request is pause on, 0:off - * Bit 3 : xg1_mask => 1:disable tx pause frames - * Bit 4 : xg1_request => 1:request single pause frame - * Bit 5 : xg1_on_off => 1:request is pause on, 0:off - */ - -/* - * PHY-Specific MII control/status registers. - */ -#define QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG 4 -#define QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS 17 - -/* - * PHY-Specific Status Register (reg 17). - * - * Bit 0 : jabber => 1:jabber detected, 0:not - * Bit 1 : polarity => 1:polarity reversed, 0:normal - * Bit 2 : recvpause => 1:receive pause enabled, 0:disabled - * Bit 3 : xmitpause => 1:transmit pause enabled, 0:disabled - * Bit 4 : energydetect => 1:sleep, 0:active - * Bit 5 : downshift => 1:downshift, 0:no downshift - * Bit 6 : crossover => 1:MDIX (crossover), 0:MDI (no crossover) - * Bits 7-9 : cablelen => not valid in 10Mb/s mode - * 0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m - * Bit 10 : link => 1:link up, 0:link down - * Bit 11 : resolved => 1:speed and duplex resolved, 0:not yet - * Bit 12 : pagercvd => 1:page received, 0:page not received - * Bit 13 : duplex => 1:full duplex, 0:half duplex - * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd - */ - -#define qlcnic_get_phy_speed(config_word) (((config_word) >> 14) & 0x03) - -#define qlcnic_set_phy_speed(config_word, val) \ - ((config_word) |= ((val & 0x03) << 14)) -#define qlcnic_set_phy_duplex(config_word) \ - ((config_word) |= 1 << 13) -#define qlcnic_clear_phy_duplex(config_word) \ - ((config_word) &= ~(1 << 13)) - -#define qlcnic_get_phy_link(config_word) \ - _qlcnic_crb_get_bit(config_word, 10) -#define qlcnic_get_phy_duplex(config_word) \ - _qlcnic_crb_get_bit(config_word, 13) - -#define QLCNIC_NIU_NON_PROMISC_MODE 0 -#define QLCNIC_NIU_PROMISC_MODE 1 -#define QLCNIC_NIU_ALLMULTI_MODE 2 - -struct crb_128M_2M_sub_block_map { - unsigned valid; - unsigned start_128M; - unsigned end_128M; - unsigned start_2M; -}; - -struct crb_128M_2M_block_map{ - struct crb_128M_2M_sub_block_map sub_block[16]; -}; -#endif /* __QLCNIC_HDR_H_ */ diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c deleted file mode 100644 index 74e9d7b..0000000 --- a/drivers/net/qlcnic/qlcnic_hw.c +++ /dev/null @@ -1,1787 +0,0 @@ -/* - * QLogic qlcnic NIC Driver - * Copyright (c) 2009-2010 QLogic Corporation - * - * See LICENSE.qlcnic for copyright and licensing details. - */ - -#include "qlcnic.h" - -#include -#include -#include - -#define MASK(n) ((1ULL<<(n))-1) -#define OCM_WIN_P3P(addr) (addr & 0xffc0000) - -#define GET_MEM_OFFS_2M(addr) (addr & MASK(18)) - -#define CRB_BLK(off) ((off >> 20) & 0x3f) -#define CRB_SUBBLK(off) ((off >> 16) & 0xf) -#define CRB_WINDOW_2M (0x130060) -#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000)) -#define CRB_INDIRECT_2M (0x1e0000UL) - - -#ifndef readq -static inline u64 readq(void __iomem *addr) -{ - return readl(addr) | (((u64) readl(addr + 4)) << 32LL); -} -#endif - -#ifndef writeq -static inline void writeq(u64 val, void __iomem *addr) -{ - writel(((u32) (val)), (addr)); - writel(((u32) (val >> 32)), (addr + 4)); -} -#endif - -static const struct crb_128M_2M_block_map -crb_128M_2M_map[64] __cacheline_aligned_in_smp = { - {{{0, 0, 0, 0} } }, /* 0: PCI */ - {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */ - {1, 0x0110000, 0x0120000, 0x130000}, - {1, 0x0120000, 0x0122000, 0x124000}, - {1, 0x0130000, 0x0132000, 0x126000}, - {1, 0x0140000, 0x0142000, 0x128000}, - {1, 0x0150000, 0x0152000, 0x12a000}, - {1, 0x0160000, 0x0170000, 0x110000}, - {1, 0x0170000, 0x0172000, 0x12e000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {1, 0x01e0000, 0x01e0800, 0x122000}, - {0, 0x0000000, 0x0000000, 0x000000} } }, - {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */ - {{{0, 0, 0, 0} } }, /* 3: */ - {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */ - {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */ - {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */ - {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */ - {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */ - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {1, 0x08f0000, 0x08f2000, 0x172000} } }, - {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/ - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {1, 0x09f0000, 0x09f2000, 0x176000} } }, - {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/ - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {1, 0x0af0000, 0x0af2000, 0x17a000} } }, - {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/ - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {1, 0x0bf0000, 0x0bf2000, 0x17e000} } }, - {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */ - {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */ - {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */ - {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */ - {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */ - {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */ - {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */ - {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */ - {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */ - {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */ - {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */ - {{{0, 0, 0, 0} } }, /* 23: */ - {{{0, 0, 0, 0} } }, /* 24: */ - {{{0, 0, 0, 0} } }, /* 25: */ - {{{0, 0, 0, 0} } }, /* 26: */ - {{{0, 0, 0, 0} } }, /* 27: */ - {{{0, 0, 0, 0} } }, /* 28: */ - {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */ - {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */ - {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */ - {{{0} } }, /* 32: PCI */ - {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */ - {1, 0x2110000, 0x2120000, 0x130000}, - {1, 0x2120000, 0x2122000, 0x124000}, - {1, 0x2130000, 0x2132000, 0x126000}, - {1, 0x2140000, 0x2142000, 0x128000}, - {1, 0x2150000, 0x2152000, 0x12a000}, - {1, 0x2160000, 0x2170000, 0x110000}, - {1, 0x2170000, 0x2172000, 0x12e000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000}, - {0, 0x0000000, 0x0000000, 0x000000} } }, - {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */ - {{{0} } }, /* 35: */ - {{{0} } }, /* 36: */ - {{{0} } }, /* 37: */ - {{{0} } }, /* 38: */ - {{{0} } }, /* 39: */ - {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */ - {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */ - {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */ - {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */ - {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */ - {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */ - {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */ - {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */ - {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */ - {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */ - {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */ - {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */ - {{{0} } }, /* 52: */ - {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */ - {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */ - {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */ - {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */ - {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */ - {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */ - {{{0} } }, /* 59: I2C0 */ - {{{0} } }, /* 60: I2C1 */ - {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */ - {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */ - {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */ -}; - -/* - * top 12 bits of crb internal address (hub, agent) - */ -static const unsigned crb_hub_agt[64] = { - 0, - QLCNIC_HW_CRB_HUB_AGT_ADR_PS, - QLCNIC_HW_CRB_HUB_AGT_ADR_MN, - QLCNIC_HW_CRB_HUB_AGT_ADR_MS, - 0, - QLCNIC_HW_CRB_HUB_AGT_ADR_SRE, - QLCNIC_HW_CRB_HUB_AGT_ADR_NIU, - QLCNIC_HW_CRB_HUB_AGT_ADR_QMN, - QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0, - QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1, - QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2, - QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3, - QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q, - QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR, - QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB, - QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4, - QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA, - QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0, - QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1, - QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2, - QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3, - QLCNIC_HW_CRB_HUB_AGT_ADR_PGND, - QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI, - QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0, - QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1, - QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2, - QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3, - 0, - QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI, - QLCNIC_HW_CRB_HUB_AGT_ADR_SN, - 0, - QLCNIC_HW_CRB_HUB_AGT_ADR_EG, - 0, - QLCNIC_HW_CRB_HUB_AGT_ADR_PS, - QLCNIC_HW_CRB_HUB_AGT_ADR_CAM, - 0, - 0, - 0, - 0, - 0, - QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR, - 0, - QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1, - QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2, - QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3, - QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4, - QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5, - QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6, - QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7, - QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA, - QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q, - QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB, - 0, - QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0, - QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8, - QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9, - QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0, - 0, - QLCNIC_HW_CRB_HUB_AGT_ADR_SMB, - QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0, - QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1, - 0, - QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC, - 0, -}; - -/* PCI Windowing for DDR regions. */ - -#define QLCNIC_PCIE_SEM_TIMEOUT 10000 - -int -qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg) -{ - int done = 0, timeout = 0; - - while (!done) { - done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem))); - if (done == 1) - break; - if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) { - dev_err(&adapter->pdev->dev, - "Failed to acquire sem=%d lock; holdby=%d\n", - sem, id_reg ? QLCRD32(adapter, id_reg) : -1); - return -EIO; - } - msleep(1); - } - - if (id_reg) - QLCWR32(adapter, id_reg, adapter->portnum); - - return 0; -} - -void -qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem) -{ - QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem))); -} - -static int -qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter, - struct cmd_desc_type0 *cmd_desc_arr, int nr_desc) -{ - u32 i, producer, consumer; - struct qlcnic_cmd_buffer *pbuf; - struct cmd_desc_type0 *cmd_desc; - struct qlcnic_host_tx_ring *tx_ring; - - i = 0; - - if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) - return -EIO; - - tx_ring = adapter->tx_ring; - __netif_tx_lock_bh(tx_ring->txq); - - producer = tx_ring->producer; - consumer = tx_ring->sw_consumer; - - if (nr_desc >= qlcnic_tx_avail(tx_ring)) { - netif_tx_stop_queue(tx_ring->txq); - smp_mb(); - if (qlcnic_tx_avail(tx_ring) > nr_desc) { - if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) - netif_tx_wake_queue(tx_ring->txq); - } else { - adapter->stats.xmit_off++; - __netif_tx_unlock_bh(tx_ring->txq); - return -EBUSY; - } - } - - do { - cmd_desc = &cmd_desc_arr[i]; - - pbuf = &tx_ring->cmd_buf_arr[producer]; - pbuf->skb = NULL; - pbuf->frag_count = 0; - - memcpy(&tx_ring->desc_head[producer], - &cmd_desc_arr[i], sizeof(struct cmd_desc_type0)); - - producer = get_next_index(producer, tx_ring->num_desc); - i++; - - } while (i != nr_desc); - - tx_ring->producer = producer; - - qlcnic_update_cmd_producer(adapter, tx_ring); - - __netif_tx_unlock_bh(tx_ring->txq); - - return 0; -} - -static int -qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr, - __le16 vlan_id, unsigned op) -{ - struct qlcnic_nic_req req; - struct qlcnic_mac_req *mac_req; - struct qlcnic_vlan_req *vlan_req; - u64 word; - - memset(&req, 0, sizeof(struct qlcnic_nic_req)); - req.qhdr = cpu_to_le64(QLCNIC_REQUEST << 23); - - word = QLCNIC_MAC_EVENT | ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word); - - mac_req = (struct qlcnic_mac_req *)&req.words[0]; - mac_req->op = op; - memcpy(mac_req->mac_addr, addr, 6); - - vlan_req = (struct qlcnic_vlan_req *)&req.words[1]; - vlan_req->vlan_id = vlan_id; - - return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); -} - -static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr) -{ - struct list_head *head; - struct qlcnic_mac_list_s *cur; - - /* look up if already exists */ - list_for_each(head, &adapter->mac_list) { - cur = list_entry(head, struct qlcnic_mac_list_s, list); - if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) - return 0; - } - - cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC); - if (cur == NULL) { - dev_err(&adapter->netdev->dev, - "failed to add mac address filter\n"); - return -ENOMEM; - } - memcpy(cur->mac_addr, addr, ETH_ALEN); - - if (qlcnic_sre_macaddr_change(adapter, - cur->mac_addr, 0, QLCNIC_MAC_ADD)) { - kfree(cur); - return -EIO; - } - - list_add_tail(&cur->list, &adapter->mac_list); - return 0; -} - -void qlcnic_set_multi(struct net_device *netdev) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - struct netdev_hw_addr *ha; - static const u8 bcast_addr[ETH_ALEN] = { - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff - }; - u32 mode = VPORT_MISS_MODE_DROP; - - if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) - return; - - qlcnic_nic_add_mac(adapter, adapter->mac_addr); - qlcnic_nic_add_mac(adapter, bcast_addr); - - if (netdev->flags & IFF_PROMISC) { - if (!(adapter->flags & QLCNIC_PROMISC_DISABLED)) - mode = VPORT_MISS_MODE_ACCEPT_ALL; - goto send_fw_cmd; - } - - if ((netdev->flags & IFF_ALLMULTI) || - (netdev_mc_count(netdev) > adapter->max_mc_count)) { - mode = VPORT_MISS_MODE_ACCEPT_MULTI; - goto send_fw_cmd; - } - - if (!netdev_mc_empty(netdev)) { - netdev_for_each_mc_addr(ha, netdev) { - qlcnic_nic_add_mac(adapter, ha->addr); - } - } - -send_fw_cmd: - if (mode == VPORT_MISS_MODE_ACCEPT_ALL) { - qlcnic_alloc_lb_filters_mem(adapter); - adapter->mac_learn = 1; - } else { - adapter->mac_learn = 0; - } - - qlcnic_nic_set_promisc(adapter, mode); -} - -int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode) -{ - struct qlcnic_nic_req req; - u64 word; - - memset(&req, 0, sizeof(struct qlcnic_nic_req)); - - req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); - - word = QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE | - ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word); - - req.words[0] = cpu_to_le64(mode); - - return qlcnic_send_cmd_descs(adapter, - (struct cmd_desc_type0 *)&req, 1); -} - -void qlcnic_free_mac_list(struct qlcnic_adapter *adapter) -{ - struct qlcnic_mac_list_s *cur; - struct list_head *head = &adapter->mac_list; - - while (!list_empty(head)) { - cur = list_entry(head->next, struct qlcnic_mac_list_s, list); - qlcnic_sre_macaddr_change(adapter, - cur->mac_addr, 0, QLCNIC_MAC_DEL); - list_del(&cur->list); - kfree(cur); - } -} - -void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter) -{ - struct qlcnic_filter *tmp_fil; - struct hlist_node *tmp_hnode, *n; - struct hlist_head *head; - int i; - - for (i = 0; i < adapter->fhash.fmax; i++) { - head = &(adapter->fhash.fhead[i]); - - hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) - { - if (jiffies > - (QLCNIC_FILTER_AGE * HZ + tmp_fil->ftime)) { - qlcnic_sre_macaddr_change(adapter, - tmp_fil->faddr, tmp_fil->vlan_id, - tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL : - QLCNIC_MAC_DEL); - spin_lock_bh(&adapter->mac_learn_lock); - adapter->fhash.fnum--; - hlist_del(&tmp_fil->fnode); - spin_unlock_bh(&adapter->mac_learn_lock); - kfree(tmp_fil); - } - } - } -} - -void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter) -{ - struct qlcnic_filter *tmp_fil; - struct hlist_node *tmp_hnode, *n; - struct hlist_head *head; - int i; - - for (i = 0; i < adapter->fhash.fmax; i++) { - head = &(adapter->fhash.fhead[i]); - - hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { - qlcnic_sre_macaddr_change(adapter, tmp_fil->faddr, - tmp_fil->vlan_id, tmp_fil->vlan_id ? - QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL); - spin_lock_bh(&adapter->mac_learn_lock); - adapter->fhash.fnum--; - hlist_del(&tmp_fil->fnode); - spin_unlock_bh(&adapter->mac_learn_lock); - kfree(tmp_fil); - } - } -} - -int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u8 flag) -{ - struct qlcnic_nic_req req; - int rv; - - memset(&req, 0, sizeof(struct qlcnic_nic_req)); - - req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); - req.req_hdr = cpu_to_le64(QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK | - ((u64) adapter->portnum << 16) | ((u64) 0x1 << 32)); - - req.words[0] = cpu_to_le64(flag); - - rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv != 0) - dev_err(&adapter->pdev->dev, "%sting loopback mode failed\n", - flag ? "Set" : "Reset"); - return rv; -} - -int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) -{ - if (qlcnic_set_fw_loopback(adapter, mode)) - return -EIO; - - if (qlcnic_nic_set_promisc(adapter, VPORT_MISS_MODE_ACCEPT_ALL)) { - qlcnic_set_fw_loopback(adapter, mode); - return -EIO; - } - - msleep(1000); - return 0; -} - -void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter) -{ - int mode = VPORT_MISS_MODE_DROP; - struct net_device *netdev = adapter->netdev; - - qlcnic_set_fw_loopback(adapter, 0); - - if (netdev->flags & IFF_PROMISC) - mode = VPORT_MISS_MODE_ACCEPT_ALL; - else if (netdev->flags & IFF_ALLMULTI) - mode = VPORT_MISS_MODE_ACCEPT_MULTI; - - qlcnic_nic_set_promisc(adapter, mode); - msleep(1000); -} - -/* - * Send the interrupt coalescing parameter set by ethtool to the card. - */ -int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter) -{ - struct qlcnic_nic_req req; - int rv; - - memset(&req, 0, sizeof(struct qlcnic_nic_req)); - - req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); - - req.req_hdr = cpu_to_le64(QLCNIC_CONFIG_INTR_COALESCE | - ((u64) adapter->portnum << 16)); - - req.words[0] = cpu_to_le64(((u64) adapter->ahw->coal.flag) << 32); - req.words[2] = cpu_to_le64(adapter->ahw->coal.rx_packets | - ((u64) adapter->ahw->coal.rx_time_us) << 16); - req.words[5] = cpu_to_le64(adapter->ahw->coal.timer_out | - ((u64) adapter->ahw->coal.type) << 32 | - ((u64) adapter->ahw->coal.sts_ring_mask) << 40); - rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv != 0) - dev_err(&adapter->netdev->dev, - "Could not send interrupt coalescing parameters\n"); - return rv; -} - -int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable) -{ - struct qlcnic_nic_req req; - u64 word; - int rv; - - if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) - return 0; - - memset(&req, 0, sizeof(struct qlcnic_nic_req)); - - req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); - - word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word); - - req.words[0] = cpu_to_le64(enable); - - rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv != 0) - dev_err(&adapter->netdev->dev, - "Could not send configure hw lro request\n"); - - return rv; -} - -int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable) -{ - struct qlcnic_nic_req req; - u64 word; - int rv; - - if (!!(adapter->flags & QLCNIC_BRIDGE_ENABLED) == enable) - return 0; - - memset(&req, 0, sizeof(struct qlcnic_nic_req)); - - req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); - - word = QLCNIC_H2C_OPCODE_CONFIG_BRIDGING | - ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word); - - req.words[0] = cpu_to_le64(enable); - - rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv != 0) - dev_err(&adapter->netdev->dev, - "Could not send configure bridge mode request\n"); - - adapter->flags ^= QLCNIC_BRIDGE_ENABLED; - - return rv; -} - - -#define RSS_HASHTYPE_IP_TCP 0x3 - -int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable) -{ - struct qlcnic_nic_req req; - u64 word; - int i, rv; - - static const u64 key[] = { - 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, - 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, - 0x255b0ec26d5a56daULL - }; - - memset(&req, 0, sizeof(struct qlcnic_nic_req)); - req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); - - word = QLCNIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word); - - /* - * RSS request: - * bits 3-0: hash_method - * 5-4: hash_type_ipv4 - * 7-6: hash_type_ipv6 - * 8: enable - * 9: use indirection table - * 47-10: reserved - * 63-48: indirection table mask - */ - word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) | - ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) | - ((u64)(enable & 0x1) << 8) | - ((0x7ULL) << 48); - req.words[0] = cpu_to_le64(word); - for (i = 0; i < 5; i++) - req.words[i+1] = cpu_to_le64(key[i]); - - rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv != 0) - dev_err(&adapter->netdev->dev, "could not configure RSS\n"); - - return rv; -} - -int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd) -{ - struct qlcnic_nic_req req; - struct qlcnic_ipaddr *ipa; - u64 word; - int rv; - - memset(&req, 0, sizeof(struct qlcnic_nic_req)); - req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); - - word = QLCNIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word); - - req.words[0] = cpu_to_le64(cmd); - ipa = (struct qlcnic_ipaddr *)&req.words[1]; - ipa->ipv4 = ip; - - rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv != 0) - dev_err(&adapter->netdev->dev, - "could not notify %s IP 0x%x reuqest\n", - (cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip); - - return rv; -} - -int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable) -{ - struct qlcnic_nic_req req; - u64 word; - int rv; - - memset(&req, 0, sizeof(struct qlcnic_nic_req)); - req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); - - word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word); - req.words[0] = cpu_to_le64(enable | (enable << 8)); - - rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv != 0) - dev_err(&adapter->netdev->dev, - "could not configure link notification\n"); - - return rv; -} - -int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter) -{ - struct qlcnic_nic_req req; - u64 word; - int rv; - - if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) - return 0; - - memset(&req, 0, sizeof(struct qlcnic_nic_req)); - req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); - - word = QLCNIC_H2C_OPCODE_LRO_REQUEST | - ((u64)adapter->portnum << 16) | - ((u64)QLCNIC_LRO_REQUEST_CLEANUP << 56) ; - - req.req_hdr = cpu_to_le64(word); - - rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv != 0) - dev_err(&adapter->netdev->dev, - "could not cleanup lro flows\n"); - - return rv; -} - -/* - * qlcnic_change_mtu - Change the Maximum Transfer Unit - * @returns 0 on success, negative on failure - */ - -int qlcnic_change_mtu(struct net_device *netdev, int mtu) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - int rc = 0; - - if (mtu < P3P_MIN_MTU || mtu > P3P_MAX_MTU) { - dev_err(&adapter->netdev->dev, "%d bytes < mtu < %d bytes" - " not supported\n", P3P_MAX_MTU, P3P_MIN_MTU); - return -EINVAL; - } - - rc = qlcnic_fw_cmd_set_mtu(adapter, mtu); - - if (!rc) - netdev->mtu = mtu; - - return rc; -} - - -u32 qlcnic_fix_features(struct net_device *netdev, u32 features) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - - if ((adapter->flags & QLCNIC_ESWITCH_ENABLED)) { - u32 changed = features ^ netdev->features; - features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM); - } - - if (!(features & NETIF_F_RXCSUM)) - features &= ~NETIF_F_LRO; - - return features; -} - - -int qlcnic_set_features(struct net_device *netdev, u32 features) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - u32 changed = netdev->features ^ features; - int hw_lro = (features & NETIF_F_LRO) ? QLCNIC_LRO_ENABLED : 0; - - if (!(changed & NETIF_F_LRO)) - return 0; - - netdev->features = features ^ NETIF_F_LRO; - - if (qlcnic_config_hw_lro(adapter, hw_lro)) - return -EIO; - - if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter)) - return -EIO; - - return 0; -} - -/* - * Changes the CRB window to the specified window. - */ - /* Returns < 0 if off is not valid, - * 1 if window access is needed. 'off' is set to offset from - * CRB space in 128M pci map - * 0 if no window access is needed. 'off' is set to 2M addr - * In: 'off' is offset from base in 128M pci map - */ -static int -qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter, - ulong off, void __iomem **addr) -{ - const struct crb_128M_2M_sub_block_map *m; - - if ((off >= QLCNIC_CRB_MAX) || (off < QLCNIC_PCI_CRBSPACE)) - return -EINVAL; - - off -= QLCNIC_PCI_CRBSPACE; - - /* - * Try direct map - */ - m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)]; - - if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) { - *addr = adapter->ahw->pci_base0 + m->start_2M + - (off - m->start_128M); - return 0; - } - - /* - * Not in direct map, use crb window - */ - *addr = adapter->ahw->pci_base0 + CRB_INDIRECT_2M + (off & MASK(16)); - return 1; -} - -/* - * In: 'off' is offset from CRB space in 128M pci map - * Out: 'off' is 2M pci map addr - * side effect: lock crb window - */ -static int -qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off) -{ - u32 window; - void __iomem *addr = adapter->ahw->pci_base0 + CRB_WINDOW_2M; - - off -= QLCNIC_PCI_CRBSPACE; - - window = CRB_HI(off); - if (window == 0) { - dev_err(&adapter->pdev->dev, "Invalid offset 0x%lx\n", off); - return -EIO; - } - - writel(window, addr); - if (readl(addr) != window) { - if (printk_ratelimit()) - dev_warn(&adapter->pdev->dev, - "failed to set CRB window to %d off 0x%lx\n", - window, off); - return -EIO; - } - return 0; -} - -int -qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data) -{ - unsigned long flags; - int rv; - void __iomem *addr = NULL; - - rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr); - - if (rv == 0) { - writel(data, addr); - return 0; - } - - if (rv > 0) { - /* indirect access */ - write_lock_irqsave(&adapter->ahw->crb_lock, flags); - crb_win_lock(adapter); - rv = qlcnic_pci_set_crbwindow_2M(adapter, off); - if (!rv) - writel(data, addr); - crb_win_unlock(adapter); - write_unlock_irqrestore(&adapter->ahw->crb_lock, flags); - return rv; - } - - dev_err(&adapter->pdev->dev, - "%s: invalid offset: 0x%016lx\n", __func__, off); - dump_stack(); - return -EIO; -} - -u32 -qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off) -{ - unsigned long flags; - int rv; - u32 data = -1; - void __iomem *addr = NULL; - - rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr); - - if (rv == 0) - return readl(addr); - - if (rv > 0) { - /* indirect access */ - write_lock_irqsave(&adapter->ahw->crb_lock, flags); - crb_win_lock(adapter); - if (!qlcnic_pci_set_crbwindow_2M(adapter, off)) - data = readl(addr); - crb_win_unlock(adapter); - write_unlock_irqrestore(&adapter->ahw->crb_lock, flags); - return data; - } - - dev_err(&adapter->pdev->dev, - "%s: invalid offset: 0x%016lx\n", __func__, off); - dump_stack(); - return -1; -} - - -void __iomem * -qlcnic_get_ioaddr(struct qlcnic_adapter *adapter, u32 offset) -{ - void __iomem *addr = NULL; - - WARN_ON(qlcnic_pci_get_crb_addr_2M(adapter, offset, &addr)); - - return addr; -} - - -static int -qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter, - u64 addr, u32 *start) -{ - u32 window; - - window = OCM_WIN_P3P(addr); - - writel(window, adapter->ahw->ocm_win_crb); - /* read back to flush */ - readl(adapter->ahw->ocm_win_crb); - - *start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr); - return 0; -} - -static int -qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off, - u64 *data, int op) -{ - void __iomem *addr; - int ret; - u32 start; - - mutex_lock(&adapter->ahw->mem_lock); - - ret = qlcnic_pci_set_window_2M(adapter, off, &start); - if (ret != 0) - goto unlock; - - addr = adapter->ahw->pci_base0 + start; - - if (op == 0) /* read */ - *data = readq(addr); - else /* write */ - writeq(*data, addr); - -unlock: - mutex_unlock(&adapter->ahw->mem_lock); - - return ret; -} - -void -qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data) -{ - void __iomem *addr = adapter->ahw->pci_base0 + - QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM); - - mutex_lock(&adapter->ahw->mem_lock); - *data = readq(addr); - mutex_unlock(&adapter->ahw->mem_lock); -} - -void -qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data) -{ - void __iomem *addr = adapter->ahw->pci_base0 + - QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM); - - mutex_lock(&adapter->ahw->mem_lock); - writeq(data, addr); - mutex_unlock(&adapter->ahw->mem_lock); -} - -#define MAX_CTL_CHECK 1000 - -int -qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter, - u64 off, u64 data) -{ - int i, j, ret; - u32 temp, off8; - void __iomem *mem_crb; - - /* Only 64-bit aligned access */ - if (off & 7) - return -EIO; - - /* P3 onward, test agent base for MIU and SIU is same */ - if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET, - QLCNIC_ADDR_QDR_NET_MAX)) { - mem_crb = qlcnic_get_ioaddr(adapter, - QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE); - goto correct; - } - - if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) { - mem_crb = qlcnic_get_ioaddr(adapter, - QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE); - goto correct; - } - - if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) - return qlcnic_pci_mem_access_direct(adapter, off, &data, 1); - - return -EIO; - -correct: - off8 = off & ~0xf; - - mutex_lock(&adapter->ahw->mem_lock); - - writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); - writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); - - i = 0; - writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); - writel((TA_CTL_START | TA_CTL_ENABLE), - (mem_crb + TEST_AGT_CTRL)); - - for (j = 0; j < MAX_CTL_CHECK; j++) { - temp = readl(mem_crb + TEST_AGT_CTRL); - if ((temp & TA_CTL_BUSY) == 0) - break; - } - - if (j >= MAX_CTL_CHECK) { - ret = -EIO; - goto done; - } - - i = (off & 0xf) ? 0 : 2; - writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)), - mem_crb + MIU_TEST_AGT_WRDATA(i)); - writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)), - mem_crb + MIU_TEST_AGT_WRDATA(i+1)); - i = (off & 0xf) ? 2 : 0; - - writel(data & 0xffffffff, - mem_crb + MIU_TEST_AGT_WRDATA(i)); - writel((data >> 32) & 0xffffffff, - mem_crb + MIU_TEST_AGT_WRDATA(i+1)); - - writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL)); - writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE), - (mem_crb + TEST_AGT_CTRL)); - - for (j = 0; j < MAX_CTL_CHECK; j++) { - temp = readl(mem_crb + TEST_AGT_CTRL); - if ((temp & TA_CTL_BUSY) == 0) - break; - } - - if (j >= MAX_CTL_CHECK) { - if (printk_ratelimit()) - dev_err(&adapter->pdev->dev, - "failed to write through agent\n"); - ret = -EIO; - } else - ret = 0; - -done: - mutex_unlock(&adapter->ahw->mem_lock); - - return ret; -} - -int -qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter, - u64 off, u64 *data) -{ - int j, ret; - u32 temp, off8; - u64 val; - void __iomem *mem_crb; - - /* Only 64-bit aligned access */ - if (off & 7) - return -EIO; - - /* P3 onward, test agent base for MIU and SIU is same */ - if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET, - QLCNIC_ADDR_QDR_NET_MAX)) { - mem_crb = qlcnic_get_ioaddr(adapter, - QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE); - goto correct; - } - - if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) { - mem_crb = qlcnic_get_ioaddr(adapter, - QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE); - goto correct; - } - - if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) { - return qlcnic_pci_mem_access_direct(adapter, - off, data, 0); - } - - return -EIO; - -correct: - off8 = off & ~0xf; - - mutex_lock(&adapter->ahw->mem_lock); - - writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); - writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); - writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); - writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL)); - - for (j = 0; j < MAX_CTL_CHECK; j++) { - temp = readl(mem_crb + TEST_AGT_CTRL); - if ((temp & TA_CTL_BUSY) == 0) - break; - } - - if (j >= MAX_CTL_CHECK) { - if (printk_ratelimit()) - dev_err(&adapter->pdev->dev, - "failed to read through agent\n"); - ret = -EIO; - } else { - off8 = MIU_TEST_AGT_RDDATA_LO; - if (off & 0xf) - off8 = MIU_TEST_AGT_RDDATA_UPPER_LO; - - temp = readl(mem_crb + off8 + 4); - val = (u64)temp << 32; - val |= readl(mem_crb + off8); - *data = val; - ret = 0; - } - - mutex_unlock(&adapter->ahw->mem_lock); - - return ret; -} - -int qlcnic_get_board_info(struct qlcnic_adapter *adapter) -{ - int offset, board_type, magic; - struct pci_dev *pdev = adapter->pdev; - - offset = QLCNIC_FW_MAGIC_OFFSET; - if (qlcnic_rom_fast_read(adapter, offset, &magic)) - return -EIO; - - if (magic != QLCNIC_BDINFO_MAGIC) { - dev_err(&pdev->dev, "invalid board config, magic=%08x\n", - magic); - return -EIO; - } - - offset = QLCNIC_BRDTYPE_OFFSET; - if (qlcnic_rom_fast_read(adapter, offset, &board_type)) - return -EIO; - - adapter->ahw->board_type = board_type; - - if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) { - u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I); - if ((gpio & 0x8000) == 0) - board_type = QLCNIC_BRDTYPE_P3P_10G_TP; - } - - switch (board_type) { - case QLCNIC_BRDTYPE_P3P_HMEZ: - case QLCNIC_BRDTYPE_P3P_XG_LOM: - case QLCNIC_BRDTYPE_P3P_10G_CX4: - case QLCNIC_BRDTYPE_P3P_10G_CX4_LP: - case QLCNIC_BRDTYPE_P3P_IMEZ: - case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS: - case QLCNIC_BRDTYPE_P3P_10G_SFP_CT: - case QLCNIC_BRDTYPE_P3P_10G_SFP_QT: - case QLCNIC_BRDTYPE_P3P_10G_XFP: - case QLCNIC_BRDTYPE_P3P_10000_BASE_T: - adapter->ahw->port_type = QLCNIC_XGBE; - break; - case QLCNIC_BRDTYPE_P3P_REF_QG: - case QLCNIC_BRDTYPE_P3P_4_GB: - case QLCNIC_BRDTYPE_P3P_4_GB_MM: - adapter->ahw->port_type = QLCNIC_GBE; - break; - case QLCNIC_BRDTYPE_P3P_10G_TP: - adapter->ahw->port_type = (adapter->portnum < 2) ? - QLCNIC_XGBE : QLCNIC_GBE; - break; - default: - dev_err(&pdev->dev, "unknown board type %x\n", board_type); - adapter->ahw->port_type = QLCNIC_XGBE; - break; - } - - return 0; -} - -int -qlcnic_wol_supported(struct qlcnic_adapter *adapter) -{ - u32 wol_cfg; - - wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV); - if (wol_cfg & (1UL << adapter->portnum)) { - wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG); - if (wol_cfg & (1 << adapter->portnum)) - return 1; - } - - return 0; -} - -int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate) -{ - struct qlcnic_nic_req req; - int rv; - u64 word; - - memset(&req, 0, sizeof(struct qlcnic_nic_req)); - req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); - - word = QLCNIC_H2C_OPCODE_CONFIG_LED | ((u64)adapter->portnum << 16); - req.req_hdr = cpu_to_le64(word); - - req.words[0] = cpu_to_le64((u64)rate << 32); - req.words[1] = cpu_to_le64(state); - - rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); - if (rv) - dev_err(&adapter->pdev->dev, "LED configuration failed.\n"); - - return rv; -} - -/* FW dump related functions */ -static u32 -qlcnic_dump_crb(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, - u32 *buffer) -{ - int i; - u32 addr, data; - struct __crb *crb = &entry->region.crb; - void __iomem *base = adapter->ahw->pci_base0; - - addr = crb->addr; - - for (i = 0; i < crb->no_ops; i++) { - QLCNIC_RD_DUMP_REG(addr, base, &data); - *buffer++ = cpu_to_le32(addr); - *buffer++ = cpu_to_le32(data); - addr += crb->stride; - } - return crb->no_ops * 2 * sizeof(u32); -} - -static u32 -qlcnic_dump_ctrl(struct qlcnic_adapter *adapter, - struct qlcnic_dump_entry *entry, u32 *buffer) -{ - int i, k, timeout = 0; - void __iomem *base = adapter->ahw->pci_base0; - u32 addr, data; - u8 opcode, no_ops; - struct __ctrl *ctr = &entry->region.ctrl; - struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr; - - addr = ctr->addr; - no_ops = ctr->no_ops; - - for (i = 0; i < no_ops; i++) { - k = 0; - opcode = 0; - for (k = 0; k < 8; k++) { - if (!(ctr->opcode & (1 << k))) - continue; - switch (1 << k) { - case QLCNIC_DUMP_WCRB: - QLCNIC_WR_DUMP_REG(addr, base, ctr->val1); - break; - case QLCNIC_DUMP_RWCRB: - QLCNIC_RD_DUMP_REG(addr, base, &data); - QLCNIC_WR_DUMP_REG(addr, base, data); - break; - case QLCNIC_DUMP_ANDCRB: - QLCNIC_RD_DUMP_REG(addr, base, &data); - QLCNIC_WR_DUMP_REG(addr, base, - (data & ctr->val2)); - break; - case QLCNIC_DUMP_ORCRB: - QLCNIC_RD_DUMP_REG(addr, base, &data); - QLCNIC_WR_DUMP_REG(addr, base, - (data | ctr->val3)); - break; - case QLCNIC_DUMP_POLLCRB: - while (timeout <= ctr->timeout) { - QLCNIC_RD_DUMP_REG(addr, base, &data); - if ((data & ctr->val2) == ctr->val1) - break; - msleep(1); - timeout++; - } - if (timeout > ctr->timeout) { - dev_info(&adapter->pdev->dev, - "Timed out, aborting poll CRB\n"); - return -EINVAL; - } - break; - case QLCNIC_DUMP_RD_SAVE: - if (ctr->index_a) - addr = t_hdr->saved_state[ctr->index_a]; - QLCNIC_RD_DUMP_REG(addr, base, &data); - t_hdr->saved_state[ctr->index_v] = data; - break; - case QLCNIC_DUMP_WRT_SAVED: - if (ctr->index_v) - data = t_hdr->saved_state[ctr->index_v]; - else - data = ctr->val1; - if (ctr->index_a) - addr = t_hdr->saved_state[ctr->index_a]; - QLCNIC_WR_DUMP_REG(addr, base, data); - break; - case QLCNIC_DUMP_MOD_SAVE_ST: - data = t_hdr->saved_state[ctr->index_v]; - data <<= ctr->shl_val; - data >>= ctr->shr_val; - if (ctr->val2) - data &= ctr->val2; - data |= ctr->val3; - data += ctr->val1; - t_hdr->saved_state[ctr->index_v] = data; - break; - default: - dev_info(&adapter->pdev->dev, - "Unknown opcode\n"); - break; - } - } - addr += ctr->stride; - } - return 0; -} - -static u32 -qlcnic_dump_mux(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, - u32 *buffer) -{ - int loop; - u32 val, data = 0; - struct __mux *mux = &entry->region.mux; - void __iomem *base = adapter->ahw->pci_base0; - - val = mux->val; - for (loop = 0; loop < mux->no_ops; loop++) { - QLCNIC_WR_DUMP_REG(mux->addr, base, val); - QLCNIC_RD_DUMP_REG(mux->read_addr, base, &data); - *buffer++ = cpu_to_le32(val); - *buffer++ = cpu_to_le32(data); - val += mux->val_stride; - } - return 2 * mux->no_ops * sizeof(u32); -} - -static u32 -qlcnic_dump_que(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, - u32 *buffer) -{ - int i, loop; - u32 cnt, addr, data, que_id = 0; - void __iomem *base = adapter->ahw->pci_base0; - struct __queue *que = &entry->region.que; - - addr = que->read_addr; - cnt = que->read_addr_cnt; - - for (loop = 0; loop < que->no_ops; loop++) { - QLCNIC_WR_DUMP_REG(que->sel_addr, base, que_id); - addr = que->read_addr; - for (i = 0; i < cnt; i++) { - QLCNIC_RD_DUMP_REG(addr, base, &data); - *buffer++ = cpu_to_le32(data); - addr += que->read_addr_stride; - } - que_id += que->stride; - } - return que->no_ops * cnt * sizeof(u32); -} - -static u32 -qlcnic_dump_ocm(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, - u32 *buffer) -{ - int i; - u32 data; - void __iomem *addr; - struct __ocm *ocm = &entry->region.ocm; - - addr = adapter->ahw->pci_base0 + ocm->read_addr; - for (i = 0; i < ocm->no_ops; i++) { - data = readl(addr); - *buffer++ = cpu_to_le32(data); - addr += ocm->read_addr_stride; - } - return ocm->no_ops * sizeof(u32); -} - -static u32 -qlcnic_read_rom(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, - u32 *buffer) -{ - int i, count = 0; - u32 fl_addr, size, val, lck_val, addr; - struct __mem *rom = &entry->region.mem; - void __iomem *base = adapter->ahw->pci_base0; - - fl_addr = rom->addr; - size = rom->size/4; -lock_try: - lck_val = readl(base + QLCNIC_FLASH_SEM2_LK); - if (!lck_val && count < MAX_CTL_CHECK) { - msleep(10); - count++; - goto lock_try; - } - writel(adapter->ahw->pci_func, (base + QLCNIC_FLASH_LOCK_ID)); - for (i = 0; i < size; i++) { - addr = fl_addr & 0xFFFF0000; - QLCNIC_WR_DUMP_REG(FLASH_ROM_WINDOW, base, addr); - addr = LSW(fl_addr) + FLASH_ROM_DATA; - QLCNIC_RD_DUMP_REG(addr, base, &val); - fl_addr += 4; - *buffer++ = cpu_to_le32(val); - } - readl(base + QLCNIC_FLASH_SEM2_ULK); - return rom->size; -} - -static u32 -qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter, - struct qlcnic_dump_entry *entry, u32 *buffer) -{ - int i; - u32 cnt, val, data, addr; - void __iomem *base = adapter->ahw->pci_base0; - struct __cache *l1 = &entry->region.cache; - - val = l1->init_tag_val; - - for (i = 0; i < l1->no_ops; i++) { - QLCNIC_WR_DUMP_REG(l1->addr, base, val); - QLCNIC_WR_DUMP_REG(l1->ctrl_addr, base, LSW(l1->ctrl_val)); - addr = l1->read_addr; - cnt = l1->read_addr_num; - while (cnt) { - QLCNIC_RD_DUMP_REG(addr, base, &data); - *buffer++ = cpu_to_le32(data); - addr += l1->read_addr_stride; - cnt--; - } - val += l1->stride; - } - return l1->no_ops * l1->read_addr_num * sizeof(u32); -} - -static u32 -qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter, - struct qlcnic_dump_entry *entry, u32 *buffer) -{ - int i; - u32 cnt, val, data, addr; - u8 poll_mask, poll_to, time_out = 0; - void __iomem *base = adapter->ahw->pci_base0; - struct __cache *l2 = &entry->region.cache; - - val = l2->init_tag_val; - poll_mask = LSB(MSW(l2->ctrl_val)); - poll_to = MSB(MSW(l2->ctrl_val)); - - for (i = 0; i < l2->no_ops; i++) { - QLCNIC_WR_DUMP_REG(l2->addr, base, val); - if (LSW(l2->ctrl_val)) - QLCNIC_WR_DUMP_REG(l2->ctrl_addr, base, - LSW(l2->ctrl_val)); - if (!poll_mask) - goto skip_poll; - do { - QLCNIC_RD_DUMP_REG(l2->ctrl_addr, base, &data); - if (!(data & poll_mask)) - break; - msleep(1); - time_out++; - } while (time_out <= poll_to); - - if (time_out > poll_to) { - dev_err(&adapter->pdev->dev, - "Timeout exceeded in %s, aborting dump\n", - __func__); - return -EINVAL; - } -skip_poll: - addr = l2->read_addr; - cnt = l2->read_addr_num; - while (cnt) { - QLCNIC_RD_DUMP_REG(addr, base, &data); - *buffer++ = cpu_to_le32(data); - addr += l2->read_addr_stride; - cnt--; - } - val += l2->stride; - } - return l2->no_ops * l2->read_addr_num * sizeof(u32); -} - -static u32 -qlcnic_read_memory(struct qlcnic_adapter *adapter, - struct qlcnic_dump_entry *entry, u32 *buffer) -{ - u32 addr, data, test, ret = 0; - int i, reg_read; - struct __mem *mem = &entry->region.mem; - void __iomem *base = adapter->ahw->pci_base0; - - reg_read = mem->size; - addr = mem->addr; - /* check for data size of multiple of 16 and 16 byte alignment */ - if ((addr & 0xf) || (reg_read%16)) { - dev_info(&adapter->pdev->dev, - "Unaligned memory addr:0x%x size:0x%x\n", - addr, reg_read); - return -EINVAL; - } - - mutex_lock(&adapter->ahw->mem_lock); - - while (reg_read != 0) { - QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_LO, base, addr); - QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_HI, base, 0); - QLCNIC_WR_DUMP_REG(MIU_TEST_CTR, base, - TA_CTL_ENABLE | TA_CTL_START); - - for (i = 0; i < MAX_CTL_CHECK; i++) { - QLCNIC_RD_DUMP_REG(MIU_TEST_CTR, base, &test); - if (!(test & TA_CTL_BUSY)) - break; - } - if (i == MAX_CTL_CHECK) { - if (printk_ratelimit()) { - dev_err(&adapter->pdev->dev, - "failed to read through agent\n"); - ret = -EINVAL; - goto out; - } - } - for (i = 0; i < 4; i++) { - QLCNIC_RD_DUMP_REG(MIU_TEST_READ_DATA[i], base, &data); - *buffer++ = cpu_to_le32(data); - } - addr += 16; - reg_read -= 16; - ret += 16; - } -out: - mutex_unlock(&adapter->ahw->mem_lock); - return mem->size; -} - -static u32 -qlcnic_dump_nop(struct qlcnic_adapter *adapter, - struct qlcnic_dump_entry *entry, u32 *buffer) -{ - entry->hdr.flags |= QLCNIC_DUMP_SKIP; - return 0; -} - -struct qlcnic_dump_operations fw_dump_ops[] = { - { QLCNIC_DUMP_NOP, qlcnic_dump_nop }, - { QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb }, - { QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux }, - { QLCNIC_DUMP_QUEUE, qlcnic_dump_que }, - { QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom }, - { QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm }, - { QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl }, - { QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache }, - { QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache }, - { QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache }, - { QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache }, - { QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache }, - { QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache }, - { QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache }, - { QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache }, - { QLCNIC_DUMP_READ_ROM, qlcnic_read_rom }, - { QLCNIC_DUMP_READ_MEM, qlcnic_read_memory }, - { QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl }, - { QLCNIC_DUMP_TLHDR, qlcnic_dump_nop }, - { QLCNIC_DUMP_RDEND, qlcnic_dump_nop }, -}; - -/* Walk the template and collect dump for each entry in the dump template */ -static int -qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry, - u32 size) -{ - int ret = 1; - if (size != entry->hdr.cap_size) { - dev_info(dev, - "Invalidate dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n", - entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size); - dev_info(dev, "Aborting further dump capture\n"); - ret = 0; - } - return ret; -} - -int qlcnic_dump_fw(struct qlcnic_adapter *adapter) -{ - u32 *buffer; - char mesg[64]; - char *msg[] = {mesg, NULL}; - int i, k, ops_cnt, ops_index, dump_size = 0; - u32 entry_offset, dump, no_entries, buf_offset = 0; - struct qlcnic_dump_entry *entry; - struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; - struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr; - - if (fw_dump->clr) { - dev_info(&adapter->pdev->dev, - "Previous dump not cleared, not capturing dump\n"); - return -EIO; - } - /* Calculate the size for dump data area only */ - for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++) - if (i & tmpl_hdr->drv_cap_mask) - dump_size += tmpl_hdr->cap_sizes[k]; - if (!dump_size) - return -EIO; - - fw_dump->data = vzalloc(dump_size); - if (!fw_dump->data) { - dev_info(&adapter->pdev->dev, - "Unable to allocate (%d KB) for fw dump\n", - dump_size/1024); - return -ENOMEM; - } - buffer = fw_dump->data; - fw_dump->size = dump_size; - no_entries = tmpl_hdr->num_entries; - ops_cnt = ARRAY_SIZE(fw_dump_ops); - entry_offset = tmpl_hdr->offset; - tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION; - tmpl_hdr->sys_info[1] = adapter->fw_version; - - for (i = 0; i < no_entries; i++) { - entry = (void *)tmpl_hdr + entry_offset; - if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) { - entry->hdr.flags |= QLCNIC_DUMP_SKIP; - entry_offset += entry->hdr.offset; - continue; - } - /* Find the handler for this entry */ - ops_index = 0; - while (ops_index < ops_cnt) { - if (entry->hdr.type == fw_dump_ops[ops_index].opcode) - break; - ops_index++; - } - if (ops_index == ops_cnt) { - dev_info(&adapter->pdev->dev, - "Invalid entry type %d, exiting dump\n", - entry->hdr.type); - goto error; - } - /* Collect dump for this entry */ - dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer); - if (dump && !qlcnic_valid_dump_entry(&adapter->pdev->dev, entry, - dump)) - entry->hdr.flags |= QLCNIC_DUMP_SKIP; - buf_offset += entry->hdr.cap_size; - entry_offset += entry->hdr.offset; - buffer = fw_dump->data + buf_offset; - } - if (dump_size != buf_offset) { - dev_info(&adapter->pdev->dev, - "Captured(%d) and expected size(%d) do not match\n", - buf_offset, dump_size); - goto error; - } else { - fw_dump->clr = 1; - snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", - adapter->netdev->name); - dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n", - fw_dump->size); - /* Send a udev event to notify availability of FW dump */ - kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg); - return 0; - } -error: - vfree(fw_dump->data); - return -EINVAL; -} diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c deleted file mode 100644 index 3b6741e..0000000 --- a/drivers/net/qlcnic/qlcnic_init.c +++ /dev/null @@ -1,1898 +0,0 @@ -/* - * QLogic qlcnic NIC Driver - * Copyright (c) 2009-2010 QLogic Corporation - * - * See LICENSE.qlcnic for copyright and licensing details. - */ - -#include -#include -#include -#include -#include "qlcnic.h" - -struct crb_addr_pair { - u32 addr; - u32 data; -}; - -#define QLCNIC_MAX_CRB_XFORM 60 -static unsigned int crb_addr_xform[QLCNIC_MAX_CRB_XFORM]; - -#define crb_addr_transform(name) \ - (crb_addr_xform[QLCNIC_HW_PX_MAP_CRB_##name] = \ - QLCNIC_HW_CRB_HUB_AGT_ADR_##name << 20) - -#define QLCNIC_ADDR_ERROR (0xffffffff) - -static void -qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter, - struct qlcnic_host_rds_ring *rds_ring); - -static int -qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter); - -static void crb_addr_transform_setup(void) -{ - crb_addr_transform(XDMA); - crb_addr_transform(TIMR); - crb_addr_transform(SRE); - crb_addr_transform(SQN3); - crb_addr_transform(SQN2); - crb_addr_transform(SQN1); - crb_addr_transform(SQN0); - crb_addr_transform(SQS3); - crb_addr_transform(SQS2); - crb_addr_transform(SQS1); - crb_addr_transform(SQS0); - crb_addr_transform(RPMX7); - crb_addr_transform(RPMX6); - crb_addr_transform(RPMX5); - crb_addr_transform(RPMX4); - crb_addr_transform(RPMX3); - crb_addr_transform(RPMX2); - crb_addr_transform(RPMX1); - crb_addr_transform(RPMX0); - crb_addr_transform(ROMUSB); - crb_addr_transform(SN); - crb_addr_transform(QMN); - crb_addr_transform(QMS); - crb_addr_transform(PGNI); - crb_addr_transform(PGND); - crb_addr_transform(PGN3); - crb_addr_transform(PGN2); - crb_addr_transform(PGN1); - crb_addr_transform(PGN0); - crb_addr_transform(PGSI); - crb_addr_transform(PGSD); - crb_addr_transform(PGS3); - crb_addr_transform(PGS2); - crb_addr_transform(PGS1); - crb_addr_transform(PGS0); - crb_addr_transform(PS); - crb_addr_transform(PH); - crb_addr_transform(NIU); - crb_addr_transform(I2Q); - crb_addr_transform(EG); - crb_addr_transform(MN); - crb_addr_transform(MS); - crb_addr_transform(CAS2); - crb_addr_transform(CAS1); - crb_addr_transform(CAS0); - crb_addr_transform(CAM); - crb_addr_transform(C2C1); - crb_addr_transform(C2C0); - crb_addr_transform(SMB); - crb_addr_transform(OCM0); - crb_addr_transform(I2C0); -} - -void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter) -{ - struct qlcnic_recv_context *recv_ctx; - struct qlcnic_host_rds_ring *rds_ring; - struct qlcnic_rx_buffer *rx_buf; - int i, ring; - - recv_ctx = adapter->recv_ctx; - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - rds_ring = &recv_ctx->rds_rings[ring]; - for (i = 0; i < rds_ring->num_desc; ++i) { - rx_buf = &(rds_ring->rx_buf_arr[i]); - if (rx_buf->skb == NULL) - continue; - - pci_unmap_single(adapter->pdev, - rx_buf->dma, - rds_ring->dma_size, - PCI_DMA_FROMDEVICE); - - dev_kfree_skb_any(rx_buf->skb); - } - } -} - -void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter) -{ - struct qlcnic_recv_context *recv_ctx; - struct qlcnic_host_rds_ring *rds_ring; - struct qlcnic_rx_buffer *rx_buf; - int i, ring; - - recv_ctx = adapter->recv_ctx; - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - rds_ring = &recv_ctx->rds_rings[ring]; - - INIT_LIST_HEAD(&rds_ring->free_list); - - rx_buf = rds_ring->rx_buf_arr; - for (i = 0; i < rds_ring->num_desc; i++) { - list_add_tail(&rx_buf->list, - &rds_ring->free_list); - rx_buf++; - } - } -} - -void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter) -{ - struct qlcnic_cmd_buffer *cmd_buf; - struct qlcnic_skb_frag *buffrag; - int i, j; - struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; - - cmd_buf = tx_ring->cmd_buf_arr; - for (i = 0; i < tx_ring->num_desc; i++) { - buffrag = cmd_buf->frag_array; - if (buffrag->dma) { - pci_unmap_single(adapter->pdev, buffrag->dma, - buffrag->length, PCI_DMA_TODEVICE); - buffrag->dma = 0ULL; - } - for (j = 0; j < cmd_buf->frag_count; j++) { - buffrag++; - if (buffrag->dma) { - pci_unmap_page(adapter->pdev, buffrag->dma, - buffrag->length, - PCI_DMA_TODEVICE); - buffrag->dma = 0ULL; - } - } - if (cmd_buf->skb) { - dev_kfree_skb_any(cmd_buf->skb); - cmd_buf->skb = NULL; - } - cmd_buf++; - } -} - -void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter) -{ - struct qlcnic_recv_context *recv_ctx; - struct qlcnic_host_rds_ring *rds_ring; - struct qlcnic_host_tx_ring *tx_ring; - int ring; - - recv_ctx = adapter->recv_ctx; - - if (recv_ctx->rds_rings == NULL) - goto skip_rds; - - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - rds_ring = &recv_ctx->rds_rings[ring]; - vfree(rds_ring->rx_buf_arr); - rds_ring->rx_buf_arr = NULL; - } - kfree(recv_ctx->rds_rings); - -skip_rds: - if (adapter->tx_ring == NULL) - return; - - tx_ring = adapter->tx_ring; - vfree(tx_ring->cmd_buf_arr); - tx_ring->cmd_buf_arr = NULL; - kfree(adapter->tx_ring); - adapter->tx_ring = NULL; -} - -int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter) -{ - struct qlcnic_recv_context *recv_ctx; - struct qlcnic_host_rds_ring *rds_ring; - struct qlcnic_host_sds_ring *sds_ring; - struct qlcnic_host_tx_ring *tx_ring; - struct qlcnic_rx_buffer *rx_buf; - int ring, i, size; - - struct qlcnic_cmd_buffer *cmd_buf_arr; - struct net_device *netdev = adapter->netdev; - - size = sizeof(struct qlcnic_host_tx_ring); - tx_ring = kzalloc(size, GFP_KERNEL); - if (tx_ring == NULL) { - dev_err(&netdev->dev, "failed to allocate tx ring struct\n"); - return -ENOMEM; - } - adapter->tx_ring = tx_ring; - - tx_ring->num_desc = adapter->num_txd; - tx_ring->txq = netdev_get_tx_queue(netdev, 0); - - cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring)); - if (cmd_buf_arr == NULL) { - dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n"); - goto err_out; - } - tx_ring->cmd_buf_arr = cmd_buf_arr; - - recv_ctx = adapter->recv_ctx; - - size = adapter->max_rds_rings * sizeof(struct qlcnic_host_rds_ring); - rds_ring = kzalloc(size, GFP_KERNEL); - if (rds_ring == NULL) { - dev_err(&netdev->dev, "failed to allocate rds ring struct\n"); - goto err_out; - } - recv_ctx->rds_rings = rds_ring; - - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - rds_ring = &recv_ctx->rds_rings[ring]; - switch (ring) { - case RCV_RING_NORMAL: - rds_ring->num_desc = adapter->num_rxd; - rds_ring->dma_size = QLCNIC_P3P_RX_BUF_MAX_LEN; - rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN; - break; - - case RCV_RING_JUMBO: - rds_ring->num_desc = adapter->num_jumbo_rxd; - rds_ring->dma_size = - QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN; - - if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO) - rds_ring->dma_size += QLCNIC_LRO_BUFFER_EXTRA; - - rds_ring->skb_size = - rds_ring->dma_size + NET_IP_ALIGN; - break; - } - rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring)); - if (rds_ring->rx_buf_arr == NULL) { - dev_err(&netdev->dev, "Failed to allocate " - "rx buffer ring %d\n", ring); - goto err_out; - } - INIT_LIST_HEAD(&rds_ring->free_list); - /* - * Now go through all of them, set reference handles - * and put them in the queues. - */ - rx_buf = rds_ring->rx_buf_arr; - for (i = 0; i < rds_ring->num_desc; i++) { - list_add_tail(&rx_buf->list, - &rds_ring->free_list); - rx_buf->ref_handle = i; - rx_buf++; - } - spin_lock_init(&rds_ring->lock); - } - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - sds_ring->irq = adapter->msix_entries[ring].vector; - sds_ring->adapter = adapter; - sds_ring->num_desc = adapter->num_rxd; - - for (i = 0; i < NUM_RCV_DESC_RINGS; i++) - INIT_LIST_HEAD(&sds_ring->free_list[i]); - } - - return 0; - -err_out: - qlcnic_free_sw_resources(adapter); - return -ENOMEM; -} - -/* - * Utility to translate from internal Phantom CRB address - * to external PCI CRB address. - */ -static u32 qlcnic_decode_crb_addr(u32 addr) -{ - int i; - u32 base_addr, offset, pci_base; - - crb_addr_transform_setup(); - - pci_base = QLCNIC_ADDR_ERROR; - base_addr = addr & 0xfff00000; - offset = addr & 0x000fffff; - - for (i = 0; i < QLCNIC_MAX_CRB_XFORM; i++) { - if (crb_addr_xform[i] == base_addr) { - pci_base = i << 20; - break; - } - } - if (pci_base == QLCNIC_ADDR_ERROR) - return pci_base; - else - return pci_base + offset; -} - -#define QLCNIC_MAX_ROM_WAIT_USEC 100 - -static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter) -{ - long timeout = 0; - long done = 0; - - cond_resched(); - - while (done == 0) { - done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS); - done &= 2; - if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) { - dev_err(&adapter->pdev->dev, - "Timeout reached waiting for rom done"); - return -EIO; - } - udelay(1); - } - return 0; -} - -static int do_rom_fast_read(struct qlcnic_adapter *adapter, - u32 addr, u32 *valp) -{ - QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr); - QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); - QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3); - QLCWR32(adapter, QLCNIC_ROMUSB_ROM_INSTR_OPCODE, 0xb); - if (qlcnic_wait_rom_done(adapter)) { - dev_err(&adapter->pdev->dev, "Error waiting for rom done\n"); - return -EIO; - } - /* reset abyte_cnt and dummy_byte_cnt */ - QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 0); - udelay(10); - QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); - - *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA); - return 0; -} - -static int do_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr, - u8 *bytes, size_t size) -{ - int addridx; - int ret = 0; - - for (addridx = addr; addridx < (addr + size); addridx += 4) { - int v; - ret = do_rom_fast_read(adapter, addridx, &v); - if (ret != 0) - break; - *(__le32 *)bytes = cpu_to_le32(v); - bytes += 4; - } - - return ret; -} - -int -qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr, - u8 *bytes, size_t size) -{ - int ret; - - ret = qlcnic_rom_lock(adapter); - if (ret < 0) - return ret; - - ret = do_rom_fast_read_words(adapter, addr, bytes, size); - - qlcnic_rom_unlock(adapter); - return ret; -} - -int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp) -{ - int ret; - - if (qlcnic_rom_lock(adapter) != 0) - return -EIO; - - ret = do_rom_fast_read(adapter, addr, valp); - qlcnic_rom_unlock(adapter); - return ret; -} - -int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter) -{ - int addr, val; - int i, n, init_delay; - struct crb_addr_pair *buf; - unsigned offset; - u32 off; - struct pci_dev *pdev = adapter->pdev; - - QLCWR32(adapter, CRB_CMDPEG_STATE, 0); - QLCWR32(adapter, CRB_RCVPEG_STATE, 0); - - qlcnic_rom_lock(adapter); - QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff); - qlcnic_rom_unlock(adapter); - - /* Init HW CRB block */ - if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) || - qlcnic_rom_fast_read(adapter, 4, &n) != 0) { - dev_err(&pdev->dev, "ERROR Reading crb_init area: val:%x\n", n); - return -EIO; - } - offset = n & 0xffffU; - n = (n >> 16) & 0xffffU; - - if (n >= 1024) { - dev_err(&pdev->dev, "QLOGIC card flash not initialized.\n"); - return -EIO; - } - - buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL); - if (buf == NULL) { - dev_err(&pdev->dev, "Unable to calloc memory for rom read.\n"); - return -ENOMEM; - } - - for (i = 0; i < n; i++) { - if (qlcnic_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 || - qlcnic_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) { - kfree(buf); - return -EIO; - } - - buf[i].addr = addr; - buf[i].data = val; - } - - for (i = 0; i < n; i++) { - - off = qlcnic_decode_crb_addr(buf[i].addr); - if (off == QLCNIC_ADDR_ERROR) { - dev_err(&pdev->dev, "CRB init value out of range %x\n", - buf[i].addr); - continue; - } - off += QLCNIC_PCI_CRBSPACE; - - if (off & 1) - continue; - - /* skipping cold reboot MAGIC */ - if (off == QLCNIC_CAM_RAM(0x1fc)) - continue; - if (off == (QLCNIC_CRB_I2C0 + 0x1c)) - continue; - if (off == (ROMUSB_GLB + 0xbc)) /* do not reset PCI */ - continue; - if (off == (ROMUSB_GLB + 0xa8)) - continue; - if (off == (ROMUSB_GLB + 0xc8)) /* core clock */ - continue; - if (off == (ROMUSB_GLB + 0x24)) /* MN clock */ - continue; - if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */ - continue; - if ((off & 0x0ff00000) == QLCNIC_CRB_DDR_NET) - continue; - /* skip the function enable register */ - if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION)) - continue; - if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION2)) - continue; - if ((off & 0x0ff00000) == QLCNIC_CRB_SMB) - continue; - - init_delay = 1; - /* After writing this register, HW needs time for CRB */ - /* to quiet down (else crb_window returns 0xffffffff) */ - if (off == QLCNIC_ROMUSB_GLB_SW_RESET) - init_delay = 1000; - - QLCWR32(adapter, off, buf[i].data); - - msleep(init_delay); - } - kfree(buf); - - /* Initialize protocol process engine */ - QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0xec, 0x1e); - QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0x4c, 8); - QLCWR32(adapter, QLCNIC_CRB_PEG_NET_I + 0x4c, 8); - QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x8, 0); - QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0xc, 0); - QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x8, 0); - QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0xc, 0); - QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x8, 0); - QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0xc, 0); - QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x8, 0); - QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0); - QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x8, 0); - QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0); - msleep(1); - QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0); - QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0); - return 0; -} - -static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter) -{ - u32 val; - int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT; - - do { - val = QLCRD32(adapter, CRB_CMDPEG_STATE); - - switch (val) { - case PHAN_INITIALIZE_COMPLETE: - case PHAN_INITIALIZE_ACK: - return 0; - case PHAN_INITIALIZE_FAILED: - goto out_err; - default: - break; - } - - msleep(QLCNIC_CMDPEG_CHECK_DELAY); - - } while (--retries); - - QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); - -out_err: - dev_err(&adapter->pdev->dev, "Command Peg initialization not " - "complete, state: 0x%x.\n", val); - return -EIO; -} - -static int -qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter) -{ - u32 val; - int retries = QLCNIC_RCVPEG_CHECK_RETRY_COUNT; - - do { - val = QLCRD32(adapter, CRB_RCVPEG_STATE); - - if (val == PHAN_PEG_RCV_INITIALIZED) - return 0; - - msleep(QLCNIC_RCVPEG_CHECK_DELAY); - - } while (--retries); - - if (!retries) { - dev_err(&adapter->pdev->dev, "Receive Peg initialization not " - "complete, state: 0x%x.\n", val); - return -EIO; - } - - return 0; -} - -int -qlcnic_check_fw_status(struct qlcnic_adapter *adapter) -{ - int err; - - err = qlcnic_cmd_peg_ready(adapter); - if (err) - return err; - - err = qlcnic_receive_peg_ready(adapter); - if (err) - return err; - - QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK); - - return err; -} - -int -qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) { - - int timeo; - u32 val; - - val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO); - val = QLC_DEV_GET_DRV(val, adapter->portnum); - if ((val & 0x3) != QLCNIC_TYPE_NIC) { - dev_err(&adapter->pdev->dev, - "Not an Ethernet NIC func=%u\n", val); - return -EIO; - } - adapter->physical_port = (val >> 2); - if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo)) - timeo = QLCNIC_INIT_TIMEOUT_SECS; - - adapter->dev_init_timeo = timeo; - - if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DRV_RESET_TIMEOUT, &timeo)) - timeo = QLCNIC_RESET_TIMEOUT_SECS; - - adapter->reset_ack_timeo = timeo; - - return 0; -} - -static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region, - struct qlcnic_flt_entry *region_entry) -{ - struct qlcnic_flt_header flt_hdr; - struct qlcnic_flt_entry *flt_entry; - int i = 0, ret; - u32 entry_size; - - memset(region_entry, 0, sizeof(struct qlcnic_flt_entry)); - ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION, - (u8 *)&flt_hdr, - sizeof(struct qlcnic_flt_header)); - if (ret) { - dev_warn(&adapter->pdev->dev, - "error reading flash layout header\n"); - return -EIO; - } - - entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header); - flt_entry = (struct qlcnic_flt_entry *)vzalloc(entry_size); - if (flt_entry == NULL) { - dev_warn(&adapter->pdev->dev, "error allocating memory\n"); - return -EIO; - } - - ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION + - sizeof(struct qlcnic_flt_header), - (u8 *)flt_entry, entry_size); - if (ret) { - dev_warn(&adapter->pdev->dev, - "error reading flash layout entries\n"); - goto err_out; - } - - while (i < (entry_size/sizeof(struct qlcnic_flt_entry))) { - if (flt_entry[i].region == region) - break; - i++; - } - if (i >= (entry_size/sizeof(struct qlcnic_flt_entry))) { - dev_warn(&adapter->pdev->dev, - "region=%x not found in %d regions\n", region, i); - ret = -EIO; - goto err_out; - } - memcpy(region_entry, &flt_entry[i], sizeof(struct qlcnic_flt_entry)); - -err_out: - vfree(flt_entry); - return ret; -} - -int -qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter) -{ - struct qlcnic_flt_entry fw_entry; - u32 ver = -1, min_ver; - int ret; - - ret = qlcnic_get_flt_entry(adapter, QLCNIC_FW_IMAGE_REGION, &fw_entry); - if (!ret) - /* 0-4:-signature, 4-8:-fw version */ - qlcnic_rom_fast_read(adapter, fw_entry.start_addr + 4, - (int *)&ver); - else - qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, - (int *)&ver); - - ver = QLCNIC_DECODE_VERSION(ver); - min_ver = QLCNIC_MIN_FW_VERSION; - - if (ver < min_ver) { - dev_err(&adapter->pdev->dev, - "firmware version %d.%d.%d unsupported." - "Min supported version %d.%d.%d\n", - _major(ver), _minor(ver), _build(ver), - _major(min_ver), _minor(min_ver), _build(min_ver)); - return -EINVAL; - } - - return 0; -} - -static int -qlcnic_has_mn(struct qlcnic_adapter *adapter) -{ - u32 capability; - capability = 0; - - capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY); - if (capability & QLCNIC_PEG_TUNE_MN_PRESENT) - return 1; - - return 0; -} - -static -struct uni_table_desc *qlcnic_get_table_desc(const u8 *unirom, int section) -{ - u32 i; - struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; - __le32 entries = cpu_to_le32(directory->num_entries); - - for (i = 0; i < entries; i++) { - - __le32 offs = cpu_to_le32(directory->findex) + - (i * cpu_to_le32(directory->entry_size)); - __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8)); - - if (tab_type == section) - return (struct uni_table_desc *) &unirom[offs]; - } - - return NULL; -} - -#define FILEHEADER_SIZE (14 * 4) - -static int -qlcnic_validate_header(struct qlcnic_adapter *adapter) -{ - const u8 *unirom = adapter->fw->data; - struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; - __le32 fw_file_size = adapter->fw->size; - __le32 entries; - __le32 entry_size; - __le32 tab_size; - - if (fw_file_size < FILEHEADER_SIZE) - return -EINVAL; - - entries = cpu_to_le32(directory->num_entries); - entry_size = cpu_to_le32(directory->entry_size); - tab_size = cpu_to_le32(directory->findex) + (entries * entry_size); - - if (fw_file_size < tab_size) - return -EINVAL; - - return 0; -} - -static int -qlcnic_validate_bootld(struct qlcnic_adapter *adapter) -{ - struct uni_table_desc *tab_desc; - struct uni_data_desc *descr; - const u8 *unirom = adapter->fw->data; - int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + - QLCNIC_UNI_BOOTLD_IDX_OFF)); - __le32 offs; - __le32 tab_size; - __le32 data_size; - - tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_BOOTLD); - - if (!tab_desc) - return -EINVAL; - - tab_size = cpu_to_le32(tab_desc->findex) + - (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); - - if (adapter->fw->size < tab_size) - return -EINVAL; - - offs = cpu_to_le32(tab_desc->findex) + - (cpu_to_le32(tab_desc->entry_size) * (idx)); - descr = (struct uni_data_desc *)&unirom[offs]; - - data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); - - if (adapter->fw->size < data_size) - return -EINVAL; - - return 0; -} - -static int -qlcnic_validate_fw(struct qlcnic_adapter *adapter) -{ - struct uni_table_desc *tab_desc; - struct uni_data_desc *descr; - const u8 *unirom = adapter->fw->data; - int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + - QLCNIC_UNI_FIRMWARE_IDX_OFF)); - __le32 offs; - __le32 tab_size; - __le32 data_size; - - tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_FW); - - if (!tab_desc) - return -EINVAL; - - tab_size = cpu_to_le32(tab_desc->findex) + - (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); - - if (adapter->fw->size < tab_size) - return -EINVAL; - - offs = cpu_to_le32(tab_desc->findex) + - (cpu_to_le32(tab_desc->entry_size) * (idx)); - descr = (struct uni_data_desc *)&unirom[offs]; - data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); - - if (adapter->fw->size < data_size) - return -EINVAL; - - return 0; -} - -static int -qlcnic_validate_product_offs(struct qlcnic_adapter *adapter) -{ - struct uni_table_desc *ptab_descr; - const u8 *unirom = adapter->fw->data; - int mn_present = qlcnic_has_mn(adapter); - __le32 entries; - __le32 entry_size; - __le32 tab_size; - u32 i; - - ptab_descr = qlcnic_get_table_desc(unirom, - QLCNIC_UNI_DIR_SECT_PRODUCT_TBL); - if (!ptab_descr) - return -EINVAL; - - entries = cpu_to_le32(ptab_descr->num_entries); - entry_size = cpu_to_le32(ptab_descr->entry_size); - tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size); - - if (adapter->fw->size < tab_size) - return -EINVAL; - -nomn: - for (i = 0; i < entries; i++) { - - __le32 flags, file_chiprev, offs; - u8 chiprev = adapter->ahw->revision_id; - u32 flagbit; - - offs = cpu_to_le32(ptab_descr->findex) + - (i * cpu_to_le32(ptab_descr->entry_size)); - flags = cpu_to_le32(*((int *)&unirom[offs] + - QLCNIC_UNI_FLAGS_OFF)); - file_chiprev = cpu_to_le32(*((int *)&unirom[offs] + - QLCNIC_UNI_CHIP_REV_OFF)); - - flagbit = mn_present ? 1 : 2; - - if ((chiprev == file_chiprev) && - ((1ULL << flagbit) & flags)) { - adapter->file_prd_off = offs; - return 0; - } - } - if (mn_present) { - mn_present = 0; - goto nomn; - } - return -EINVAL; -} - -static int -qlcnic_validate_unified_romimage(struct qlcnic_adapter *adapter) -{ - if (qlcnic_validate_header(adapter)) { - dev_err(&adapter->pdev->dev, - "unified image: header validation failed\n"); - return -EINVAL; - } - - if (qlcnic_validate_product_offs(adapter)) { - dev_err(&adapter->pdev->dev, - "unified image: product validation failed\n"); - return -EINVAL; - } - - if (qlcnic_validate_bootld(adapter)) { - dev_err(&adapter->pdev->dev, - "unified image: bootld validation failed\n"); - return -EINVAL; - } - - if (qlcnic_validate_fw(adapter)) { - dev_err(&adapter->pdev->dev, - "unified image: firmware validation failed\n"); - return -EINVAL; - } - - return 0; -} - -static -struct uni_data_desc *qlcnic_get_data_desc(struct qlcnic_adapter *adapter, - u32 section, u32 idx_offset) -{ - const u8 *unirom = adapter->fw->data; - int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + - idx_offset)); - struct uni_table_desc *tab_desc; - __le32 offs; - - tab_desc = qlcnic_get_table_desc(unirom, section); - - if (tab_desc == NULL) - return NULL; - - offs = cpu_to_le32(tab_desc->findex) + - (cpu_to_le32(tab_desc->entry_size) * idx); - - return (struct uni_data_desc *)&unirom[offs]; -} - -static u8 * -qlcnic_get_bootld_offs(struct qlcnic_adapter *adapter) -{ - u32 offs = QLCNIC_BOOTLD_START; - - if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE) - offs = cpu_to_le32((qlcnic_get_data_desc(adapter, - QLCNIC_UNI_DIR_SECT_BOOTLD, - QLCNIC_UNI_BOOTLD_IDX_OFF))->findex); - - return (u8 *)&adapter->fw->data[offs]; -} - -static u8 * -qlcnic_get_fw_offs(struct qlcnic_adapter *adapter) -{ - u32 offs = QLCNIC_IMAGE_START; - - if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE) - offs = cpu_to_le32((qlcnic_get_data_desc(adapter, - QLCNIC_UNI_DIR_SECT_FW, - QLCNIC_UNI_FIRMWARE_IDX_OFF))->findex); - - return (u8 *)&adapter->fw->data[offs]; -} - -static __le32 -qlcnic_get_fw_size(struct qlcnic_adapter *adapter) -{ - if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE) - return cpu_to_le32((qlcnic_get_data_desc(adapter, - QLCNIC_UNI_DIR_SECT_FW, - QLCNIC_UNI_FIRMWARE_IDX_OFF))->size); - else - return cpu_to_le32( - *(u32 *)&adapter->fw->data[QLCNIC_FW_SIZE_OFFSET]); -} - -static __le32 -qlcnic_get_fw_version(struct qlcnic_adapter *adapter) -{ - struct uni_data_desc *fw_data_desc; - const struct firmware *fw = adapter->fw; - __le32 major, minor, sub; - const u8 *ver_str; - int i, ret; - - if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE) - return cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_VERSION_OFFSET]); - - fw_data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW, - QLCNIC_UNI_FIRMWARE_IDX_OFF); - ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) + - cpu_to_le32(fw_data_desc->size) - 17; - - for (i = 0; i < 12; i++) { - if (!strncmp(&ver_str[i], "REV=", 4)) { - ret = sscanf(&ver_str[i+4], "%u.%u.%u ", - &major, &minor, &sub); - if (ret != 3) - return 0; - else - return major + (minor << 8) + (sub << 16); - } - } - - return 0; -} - -static __le32 -qlcnic_get_bios_version(struct qlcnic_adapter *adapter) -{ - const struct firmware *fw = adapter->fw; - __le32 bios_ver, prd_off = adapter->file_prd_off; - - if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE) - return cpu_to_le32( - *(u32 *)&fw->data[QLCNIC_BIOS_VERSION_OFFSET]); - - bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off]) - + QLCNIC_UNI_BIOS_VERSION_OFF)); - - return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24); -} - -static void qlcnic_rom_lock_recovery(struct qlcnic_adapter *adapter) -{ - if (qlcnic_pcie_sem_lock(adapter, 2, QLCNIC_ROM_LOCK_ID)) - dev_info(&adapter->pdev->dev, "Resetting rom_lock\n"); - - qlcnic_pcie_sem_unlock(adapter, 2); -} - -static int -qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter) -{ - u32 heartbeat, ret = -EIO; - int retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT; - - adapter->heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER); - - do { - msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS); - heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER); - if (heartbeat != adapter->heartbeat) { - ret = QLCNIC_RCODE_SUCCESS; - break; - } - } while (--retries); - - return ret; -} - -int -qlcnic_need_fw_reset(struct qlcnic_adapter *adapter) -{ - if ((adapter->flags & QLCNIC_FW_HANG) || - qlcnic_check_fw_hearbeat(adapter)) { - qlcnic_rom_lock_recovery(adapter); - return 1; - } - - if (adapter->need_fw_reset) - return 1; - - if (adapter->fw) - return 1; - - return 0; -} - -static const char *fw_name[] = { - QLCNIC_UNIFIED_ROMIMAGE_NAME, - QLCNIC_FLASH_ROMIMAGE_NAME, -}; - -int -qlcnic_load_firmware(struct qlcnic_adapter *adapter) -{ - u64 *ptr64; - u32 i, flashaddr, size; - const struct firmware *fw = adapter->fw; - struct pci_dev *pdev = adapter->pdev; - - dev_info(&pdev->dev, "loading firmware from %s\n", - fw_name[adapter->fw_type]); - - if (fw) { - __le64 data; - - size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8; - - ptr64 = (u64 *)qlcnic_get_bootld_offs(adapter); - flashaddr = QLCNIC_BOOTLD_START; - - for (i = 0; i < size; i++) { - data = cpu_to_le64(ptr64[i]); - - if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data)) - return -EIO; - - flashaddr += 8; - } - - size = (__force u32)qlcnic_get_fw_size(adapter) / 8; - - ptr64 = (u64 *)qlcnic_get_fw_offs(adapter); - flashaddr = QLCNIC_IMAGE_START; - - for (i = 0; i < size; i++) { - data = cpu_to_le64(ptr64[i]); - - if (qlcnic_pci_mem_write_2M(adapter, - flashaddr, data)) - return -EIO; - - flashaddr += 8; - } - - size = (__force u32)qlcnic_get_fw_size(adapter) % 8; - if (size) { - data = cpu_to_le64(ptr64[i]); - - if (qlcnic_pci_mem_write_2M(adapter, - flashaddr, data)) - return -EIO; - } - - } else { - u64 data; - u32 hi, lo; - int ret; - struct qlcnic_flt_entry bootld_entry; - - ret = qlcnic_get_flt_entry(adapter, QLCNIC_BOOTLD_REGION, - &bootld_entry); - if (!ret) { - size = bootld_entry.size / 8; - flashaddr = bootld_entry.start_addr; - } else { - size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8; - flashaddr = QLCNIC_BOOTLD_START; - dev_info(&pdev->dev, - "using legacy method to get flash fw region"); - } - - for (i = 0; i < size; i++) { - if (qlcnic_rom_fast_read(adapter, - flashaddr, (int *)&lo) != 0) - return -EIO; - if (qlcnic_rom_fast_read(adapter, - flashaddr + 4, (int *)&hi) != 0) - return -EIO; - - data = (((u64)hi << 32) | lo); - - if (qlcnic_pci_mem_write_2M(adapter, - flashaddr, data)) - return -EIO; - - flashaddr += 8; - } - } - msleep(1); - - QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x18, 0x1020); - QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0x80001e); - return 0; -} - -static int -qlcnic_validate_firmware(struct qlcnic_adapter *adapter) -{ - __le32 val; - u32 ver, bios, min_size; - struct pci_dev *pdev = adapter->pdev; - const struct firmware *fw = adapter->fw; - u8 fw_type = adapter->fw_type; - - if (fw_type == QLCNIC_UNIFIED_ROMIMAGE) { - if (qlcnic_validate_unified_romimage(adapter)) - return -EINVAL; - - min_size = QLCNIC_UNI_FW_MIN_SIZE; - } else { - val = cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_MAGIC_OFFSET]); - if ((__force u32)val != QLCNIC_BDINFO_MAGIC) - return -EINVAL; - - min_size = QLCNIC_FW_MIN_SIZE; - } - - if (fw->size < min_size) - return -EINVAL; - - val = qlcnic_get_fw_version(adapter); - ver = QLCNIC_DECODE_VERSION(val); - - if (ver < QLCNIC_MIN_FW_VERSION) { - dev_err(&pdev->dev, - "%s: firmware version %d.%d.%d unsupported\n", - fw_name[fw_type], _major(ver), _minor(ver), _build(ver)); - return -EINVAL; - } - - val = qlcnic_get_bios_version(adapter); - qlcnic_rom_fast_read(adapter, QLCNIC_BIOS_VERSION_OFFSET, (int *)&bios); - if ((__force u32)val != bios) { - dev_err(&pdev->dev, "%s: firmware bios is incompatible\n", - fw_name[fw_type]); - return -EINVAL; - } - - QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC); - return 0; -} - -static void -qlcnic_get_next_fwtype(struct qlcnic_adapter *adapter) -{ - u8 fw_type; - - switch (adapter->fw_type) { - case QLCNIC_UNKNOWN_ROMIMAGE: - fw_type = QLCNIC_UNIFIED_ROMIMAGE; - break; - - case QLCNIC_UNIFIED_ROMIMAGE: - default: - fw_type = QLCNIC_FLASH_ROMIMAGE; - break; - } - - adapter->fw_type = fw_type; -} - - - -void qlcnic_request_firmware(struct qlcnic_adapter *adapter) -{ - struct pci_dev *pdev = adapter->pdev; - int rc; - - adapter->fw_type = QLCNIC_UNKNOWN_ROMIMAGE; - -next: - qlcnic_get_next_fwtype(adapter); - - if (adapter->fw_type == QLCNIC_FLASH_ROMIMAGE) { - adapter->fw = NULL; - } else { - rc = request_firmware(&adapter->fw, - fw_name[adapter->fw_type], &pdev->dev); - if (rc != 0) - goto next; - - rc = qlcnic_validate_firmware(adapter); - if (rc != 0) { - release_firmware(adapter->fw); - msleep(1); - goto next; - } - } -} - - -void -qlcnic_release_firmware(struct qlcnic_adapter *adapter) -{ - if (adapter->fw) - release_firmware(adapter->fw); - adapter->fw = NULL; -} - -static void -qlcnic_handle_linkevent(struct qlcnic_adapter *adapter, - struct qlcnic_fw_msg *msg) -{ - u32 cable_OUI; - u16 cable_len; - u16 link_speed; - u8 link_status, module, duplex, autoneg; - u8 lb_status = 0; - struct net_device *netdev = adapter->netdev; - - adapter->has_link_events = 1; - - cable_OUI = msg->body[1] & 0xffffffff; - cable_len = (msg->body[1] >> 32) & 0xffff; - link_speed = (msg->body[1] >> 48) & 0xffff; - - link_status = msg->body[2] & 0xff; - duplex = (msg->body[2] >> 16) & 0xff; - autoneg = (msg->body[2] >> 24) & 0xff; - lb_status = (msg->body[2] >> 32) & 0x3; - - module = (msg->body[2] >> 8) & 0xff; - if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE) - dev_info(&netdev->dev, "unsupported cable: OUI 0x%x, " - "length %d\n", cable_OUI, cable_len); - else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN) - dev_info(&netdev->dev, "unsupported cable length %d\n", - cable_len); - - if (!link_status && (lb_status == QLCNIC_ILB_MODE || - lb_status == QLCNIC_ELB_MODE)) - adapter->ahw->loopback_state |= QLCNIC_LINKEVENT; - - qlcnic_advert_link_change(adapter, link_status); - - if (duplex == LINKEVENT_FULL_DUPLEX) - adapter->link_duplex = DUPLEX_FULL; - else - adapter->link_duplex = DUPLEX_HALF; - - adapter->module_type = module; - adapter->link_autoneg = autoneg; - adapter->link_speed = link_speed; -} - -static void -qlcnic_handle_fw_message(int desc_cnt, int index, - struct qlcnic_host_sds_ring *sds_ring) -{ - struct qlcnic_fw_msg msg; - struct status_desc *desc; - struct qlcnic_adapter *adapter; - struct device *dev; - int i = 0, opcode, ret; - - while (desc_cnt > 0 && i < 8) { - desc = &sds_ring->desc_head[index]; - msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]); - msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]); - - index = get_next_index(index, sds_ring->num_desc); - desc_cnt--; - } - - adapter = sds_ring->adapter; - dev = &adapter->pdev->dev; - opcode = qlcnic_get_nic_msg_opcode(msg.body[0]); - - switch (opcode) { - case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE: - qlcnic_handle_linkevent(adapter, &msg); - break; - case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK: - ret = (u32)(msg.body[1]); - switch (ret) { - case 0: - adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE; - break; - case 1: - dev_info(dev, "loopback already in progress\n"); - adapter->diag_cnt = -QLCNIC_TEST_IN_PROGRESS; - break; - case 2: - dev_info(dev, "loopback cable is not connected\n"); - adapter->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN; - break; - default: - dev_info(dev, "loopback configure request failed," - " ret %x\n", ret); - adapter->diag_cnt = -QLCNIC_UNDEFINED_ERROR; - break; - } - break; - default: - break; - } -} - -static int -qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter, - struct qlcnic_host_rds_ring *rds_ring, - struct qlcnic_rx_buffer *buffer) -{ - struct sk_buff *skb; - dma_addr_t dma; - struct pci_dev *pdev = adapter->pdev; - - skb = dev_alloc_skb(rds_ring->skb_size); - if (!skb) { - adapter->stats.skb_alloc_failure++; - return -ENOMEM; - } - - skb_reserve(skb, NET_IP_ALIGN); - - dma = pci_map_single(pdev, skb->data, - rds_ring->dma_size, PCI_DMA_FROMDEVICE); - - if (pci_dma_mapping_error(pdev, dma)) { - adapter->stats.rx_dma_map_error++; - dev_kfree_skb_any(skb); - return -ENOMEM; - } - - buffer->skb = skb; - buffer->dma = dma; - - return 0; -} - -static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter, - struct qlcnic_host_rds_ring *rds_ring, u16 index, u16 cksum) -{ - struct qlcnic_rx_buffer *buffer; - struct sk_buff *skb; - - buffer = &rds_ring->rx_buf_arr[index]; - - if (unlikely(buffer->skb == NULL)) { - WARN_ON(1); - return NULL; - } - - pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size, - PCI_DMA_FROMDEVICE); - - skb = buffer->skb; - - if (likely((adapter->netdev->features & NETIF_F_RXCSUM) && - (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) { - adapter->stats.csummed++; - skb->ip_summed = CHECKSUM_UNNECESSARY; - } else { - skb_checksum_none_assert(skb); - } - - skb->dev = adapter->netdev; - - buffer->skb = NULL; - - return skb; -} - -static inline int -qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, struct sk_buff *skb, - u16 *vlan_tag) -{ - struct ethhdr *eth_hdr; - - if (!__vlan_get_tag(skb, vlan_tag)) { - eth_hdr = (struct ethhdr *) skb->data; - memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2); - skb_pull(skb, VLAN_HLEN); - } - if (!adapter->pvid) - return 0; - - if (*vlan_tag == adapter->pvid) { - /* Outer vlan tag. Packet should follow non-vlan path */ - *vlan_tag = 0xffff; - return 0; - } - if (adapter->flags & QLCNIC_TAGGING_ENABLED) - return 0; - - return -EINVAL; -} - -static struct qlcnic_rx_buffer * -qlcnic_process_rcv(struct qlcnic_adapter *adapter, - struct qlcnic_host_sds_ring *sds_ring, - int ring, u64 sts_data0) -{ - struct net_device *netdev = adapter->netdev; - struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - struct qlcnic_rx_buffer *buffer; - struct sk_buff *skb; - struct qlcnic_host_rds_ring *rds_ring; - int index, length, cksum, pkt_offset; - u16 vid = 0xffff; - - if (unlikely(ring >= adapter->max_rds_rings)) - return NULL; - - rds_ring = &recv_ctx->rds_rings[ring]; - - index = qlcnic_get_sts_refhandle(sts_data0); - if (unlikely(index >= rds_ring->num_desc)) - return NULL; - - buffer = &rds_ring->rx_buf_arr[index]; - - length = qlcnic_get_sts_totallength(sts_data0); - cksum = qlcnic_get_sts_status(sts_data0); - pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0); - - skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum); - if (!skb) - return buffer; - - if (length > rds_ring->skb_size) - skb_put(skb, rds_ring->skb_size); - else - skb_put(skb, length); - - if (pkt_offset) - skb_pull(skb, pkt_offset); - - if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) { - adapter->stats.rxdropped++; - dev_kfree_skb(skb); - return buffer; - } - - skb->protocol = eth_type_trans(skb, netdev); - - if (vid != 0xffff) - __vlan_hwaccel_put_tag(skb, vid); - - napi_gro_receive(&sds_ring->napi, skb); - - adapter->stats.rx_pkts++; - adapter->stats.rxbytes += length; - - return buffer; -} - -#define QLC_TCP_HDR_SIZE 20 -#define QLC_TCP_TS_OPTION_SIZE 12 -#define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE) - -static struct qlcnic_rx_buffer * -qlcnic_process_lro(struct qlcnic_adapter *adapter, - struct qlcnic_host_sds_ring *sds_ring, - int ring, u64 sts_data0, u64 sts_data1) -{ - struct net_device *netdev = adapter->netdev; - struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - struct qlcnic_rx_buffer *buffer; - struct sk_buff *skb; - struct qlcnic_host_rds_ring *rds_ring; - struct iphdr *iph; - struct tcphdr *th; - bool push, timestamp; - int l2_hdr_offset, l4_hdr_offset; - int index; - u16 lro_length, length, data_offset; - u32 seq_number; - u16 vid = 0xffff; - - if (unlikely(ring > adapter->max_rds_rings)) - return NULL; - - rds_ring = &recv_ctx->rds_rings[ring]; - - index = qlcnic_get_lro_sts_refhandle(sts_data0); - if (unlikely(index > rds_ring->num_desc)) - return NULL; - - buffer = &rds_ring->rx_buf_arr[index]; - - timestamp = qlcnic_get_lro_sts_timestamp(sts_data0); - lro_length = qlcnic_get_lro_sts_length(sts_data0); - l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0); - l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0); - push = qlcnic_get_lro_sts_push_flag(sts_data0); - seq_number = qlcnic_get_lro_sts_seq_number(sts_data1); - - skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK); - if (!skb) - return buffer; - - if (timestamp) - data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE; - else - data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE; - - skb_put(skb, lro_length + data_offset); - - skb_pull(skb, l2_hdr_offset); - - if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) { - adapter->stats.rxdropped++; - dev_kfree_skb(skb); - return buffer; - } - - skb->protocol = eth_type_trans(skb, netdev); - - iph = (struct iphdr *)skb->data; - th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); - - length = (iph->ihl << 2) + (th->doff << 2) + lro_length; - iph->tot_len = htons(length); - iph->check = 0; - iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); - th->psh = push; - th->seq = htonl(seq_number); - - length = skb->len; - - if (vid != 0xffff) - __vlan_hwaccel_put_tag(skb, vid); - netif_receive_skb(skb); - - adapter->stats.lro_pkts++; - adapter->stats.lrobytes += length; - - return buffer; -} - -int -qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max) -{ - struct qlcnic_adapter *adapter = sds_ring->adapter; - struct list_head *cur; - struct status_desc *desc; - struct qlcnic_rx_buffer *rxbuf; - u64 sts_data0, sts_data1; - - int count = 0; - int opcode, ring, desc_cnt; - u32 consumer = sds_ring->consumer; - - while (count < max) { - desc = &sds_ring->desc_head[consumer]; - sts_data0 = le64_to_cpu(desc->status_desc_data[0]); - - if (!(sts_data0 & STATUS_OWNER_HOST)) - break; - - desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0); - opcode = qlcnic_get_sts_opcode(sts_data0); - - switch (opcode) { - case QLCNIC_RXPKT_DESC: - case QLCNIC_OLD_RXPKT_DESC: - case QLCNIC_SYN_OFFLOAD: - ring = qlcnic_get_sts_type(sts_data0); - rxbuf = qlcnic_process_rcv(adapter, sds_ring, - ring, sts_data0); - break; - case QLCNIC_LRO_DESC: - ring = qlcnic_get_lro_sts_type(sts_data0); - sts_data1 = le64_to_cpu(desc->status_desc_data[1]); - rxbuf = qlcnic_process_lro(adapter, sds_ring, - ring, sts_data0, sts_data1); - break; - case QLCNIC_RESPONSE_DESC: - qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring); - default: - goto skip; - } - - WARN_ON(desc_cnt > 1); - - if (likely(rxbuf)) - list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]); - else - adapter->stats.null_rxbuf++; - -skip: - for (; desc_cnt > 0; desc_cnt--) { - desc = &sds_ring->desc_head[consumer]; - desc->status_desc_data[0] = - cpu_to_le64(STATUS_OWNER_PHANTOM); - consumer = get_next_index(consumer, sds_ring->num_desc); - } - count++; - } - - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - struct qlcnic_host_rds_ring *rds_ring = - &adapter->recv_ctx->rds_rings[ring]; - - if (!list_empty(&sds_ring->free_list[ring])) { - list_for_each(cur, &sds_ring->free_list[ring]) { - rxbuf = list_entry(cur, - struct qlcnic_rx_buffer, list); - qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf); - } - spin_lock(&rds_ring->lock); - list_splice_tail_init(&sds_ring->free_list[ring], - &rds_ring->free_list); - spin_unlock(&rds_ring->lock); - } - - qlcnic_post_rx_buffers_nodb(adapter, rds_ring); - } - - if (count) { - sds_ring->consumer = consumer; - writel(consumer, sds_ring->crb_sts_consumer); - } - - return count; -} - -void -qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, - struct qlcnic_host_rds_ring *rds_ring) -{ - struct rcv_desc *pdesc; - struct qlcnic_rx_buffer *buffer; - int count = 0; - u32 producer; - struct list_head *head; - - producer = rds_ring->producer; - - head = &rds_ring->free_list; - while (!list_empty(head)) { - - buffer = list_entry(head->next, struct qlcnic_rx_buffer, list); - - if (!buffer->skb) { - if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer)) - break; - } - - count++; - list_del(&buffer->list); - - /* make a rcv descriptor */ - pdesc = &rds_ring->desc_head[producer]; - pdesc->addr_buffer = cpu_to_le64(buffer->dma); - pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); - pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); - - producer = get_next_index(producer, rds_ring->num_desc); - } - - if (count) { - rds_ring->producer = producer; - writel((producer-1) & (rds_ring->num_desc-1), - rds_ring->crb_rcv_producer); - } -} - -static void -qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter, - struct qlcnic_host_rds_ring *rds_ring) -{ - struct rcv_desc *pdesc; - struct qlcnic_rx_buffer *buffer; - int count = 0; - uint32_t producer; - struct list_head *head; - - if (!spin_trylock(&rds_ring->lock)) - return; - - producer = rds_ring->producer; - - head = &rds_ring->free_list; - while (!list_empty(head)) { - - buffer = list_entry(head->next, struct qlcnic_rx_buffer, list); - - if (!buffer->skb) { - if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer)) - break; - } - - count++; - list_del(&buffer->list); - - /* make a rcv descriptor */ - pdesc = &rds_ring->desc_head[producer]; - pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); - pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); - pdesc->addr_buffer = cpu_to_le64(buffer->dma); - - producer = get_next_index(producer, rds_ring->num_desc); - } - - if (count) { - rds_ring->producer = producer; - writel((producer - 1) & (rds_ring->num_desc - 1), - rds_ring->crb_rcv_producer); - } - spin_unlock(&rds_ring->lock); -} - -static void dump_skb(struct sk_buff *skb) -{ - int i; - unsigned char *data = skb->data; - - printk(KERN_INFO "\n"); - for (i = 0; i < skb->len; i++) { - printk(KERN_INFO "%02x ", data[i]); - if ((i & 0x0f) == 8) - printk(KERN_INFO "\n"); - } -} - -void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, - struct qlcnic_host_sds_ring *sds_ring, - int ring, u64 sts_data0) -{ - struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - struct sk_buff *skb; - struct qlcnic_host_rds_ring *rds_ring; - int index, length, cksum, pkt_offset; - - if (unlikely(ring >= adapter->max_rds_rings)) - return; - - rds_ring = &recv_ctx->rds_rings[ring]; - - index = qlcnic_get_sts_refhandle(sts_data0); - length = qlcnic_get_sts_totallength(sts_data0); - if (unlikely(index >= rds_ring->num_desc)) - return; - - cksum = qlcnic_get_sts_status(sts_data0); - pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0); - - skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum); - if (!skb) - return; - - if (length > rds_ring->skb_size) - skb_put(skb, rds_ring->skb_size); - else - skb_put(skb, length); - - if (pkt_offset) - skb_pull(skb, pkt_offset); - - if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr)) - adapter->diag_cnt++; - else - dump_skb(skb); - - dev_kfree_skb_any(skb); - adapter->stats.rx_pkts++; - adapter->stats.rxbytes += length; - - return; -} - -void -qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring) -{ - struct qlcnic_adapter *adapter = sds_ring->adapter; - struct status_desc *desc; - u64 sts_data0; - int ring, opcode, desc_cnt; - - u32 consumer = sds_ring->consumer; - - desc = &sds_ring->desc_head[consumer]; - sts_data0 = le64_to_cpu(desc->status_desc_data[0]); - - if (!(sts_data0 & STATUS_OWNER_HOST)) - return; - - desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0); - opcode = qlcnic_get_sts_opcode(sts_data0); - switch (opcode) { - case QLCNIC_RESPONSE_DESC: - qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring); - break; - default: - ring = qlcnic_get_sts_type(sts_data0); - qlcnic_process_rcv_diag(adapter, sds_ring, ring, sts_data0); - break; - } - - for (; desc_cnt > 0; desc_cnt--) { - desc = &sds_ring->desc_head[consumer]; - desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM); - consumer = get_next_index(consumer, sds_ring->num_desc); - } - - sds_ring->consumer = consumer; - writel(consumer, sds_ring->crb_sts_consumer); -} - -void -qlcnic_fetch_mac(struct qlcnic_adapter *adapter, u32 off1, u32 off2, - u8 alt_mac, u8 *mac) -{ - u32 mac_low, mac_high; - int i; - - mac_low = QLCRD32(adapter, off1); - mac_high = QLCRD32(adapter, off2); - - if (alt_mac) { - mac_low |= (mac_low >> 16) | (mac_high << 16); - mac_high >>= 16; - } - - for (i = 0; i < 2; i++) - mac[i] = (u8)(mac_high >> ((1 - i) * 8)); - for (i = 2; i < 6; i++) - mac[i] = (u8)(mac_low >> ((5 - i) * 8)); -} diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c deleted file mode 100644 index ec8ef72..0000000 --- a/drivers/net/qlcnic/qlcnic_main.c +++ /dev/null @@ -1,4390 +0,0 @@ -/* - * QLogic qlcnic NIC Driver - * Copyright (c) 2009-2010 QLogic Corporation - * - * See LICENSE.qlcnic for copyright and licensing details. - */ - -#include -#include -#include - -#include "qlcnic.h" - -#include -#include -#include -#include -#include -#include -#include -#include - -MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(QLCNIC_LINUX_VERSIONID); -MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME); - -char qlcnic_driver_name[] = "qlcnic"; -static const char qlcnic_driver_string[] = "QLogic 1/10 GbE " - "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID; - -static struct workqueue_struct *qlcnic_wq; -static int qlcnic_mac_learn; -module_param(qlcnic_mac_learn, int, 0444); -MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)"); - -static int use_msi = 1; -module_param(use_msi, int, 0444); -MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled"); - -static int use_msi_x = 1; -module_param(use_msi_x, int, 0444); -MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled"); - -static int auto_fw_reset = 1; -module_param(auto_fw_reset, int, 0644); -MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled"); - -static int load_fw_file; -module_param(load_fw_file, int, 0444); -MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file"); - -static int qlcnic_config_npars; -module_param(qlcnic_config_npars, int, 0444); -MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled"); - -static int __devinit qlcnic_probe(struct pci_dev *pdev, - const struct pci_device_id *ent); -static void __devexit qlcnic_remove(struct pci_dev *pdev); -static int qlcnic_open(struct net_device *netdev); -static int qlcnic_close(struct net_device *netdev); -static void qlcnic_tx_timeout(struct net_device *netdev); -static void qlcnic_attach_work(struct work_struct *work); -static void qlcnic_fwinit_work(struct work_struct *work); -static void qlcnic_fw_poll_work(struct work_struct *work); -static void qlcnic_schedule_work(struct qlcnic_adapter *adapter, - work_func_t func, int delay); -static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter); -static int qlcnic_poll(struct napi_struct *napi, int budget); -static int qlcnic_rx_poll(struct napi_struct *napi, int budget); -#ifdef CONFIG_NET_POLL_CONTROLLER -static void qlcnic_poll_controller(struct net_device *netdev); -#endif - -static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter); -static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter); -static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter); -static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter); - -static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding); -static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8); -static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter); - -static irqreturn_t qlcnic_tmp_intr(int irq, void *data); -static irqreturn_t qlcnic_intr(int irq, void *data); -static irqreturn_t qlcnic_msi_intr(int irq, void *data); -static irqreturn_t qlcnic_msix_intr(int irq, void *data); - -static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev); -static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long); -static int qlcnic_start_firmware(struct qlcnic_adapter *); - -static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter); -static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *); -static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32); -static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32); -static int qlcnicvf_start_firmware(struct qlcnic_adapter *); -static void qlcnic_set_netdev_features(struct qlcnic_adapter *, - struct qlcnic_esw_func_cfg *); -static void qlcnic_vlan_rx_add(struct net_device *, u16); -static void qlcnic_vlan_rx_del(struct net_device *, u16); - -/* PCI Device ID Table */ -#define ENTRY(device) \ - {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \ - .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0} - -#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020 - -static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = { - ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X), - {0,} -}; - -MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl); - - -inline void -qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter, - struct qlcnic_host_tx_ring *tx_ring) -{ - writel(tx_ring->producer, tx_ring->crb_cmd_producer); -} - -static const u32 msi_tgt_status[8] = { - ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1, - ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3, - ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5, - ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7 -}; - -static const -struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG; - -static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring) -{ - writel(0, sds_ring->crb_intr_mask); -} - -static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring) -{ - struct qlcnic_adapter *adapter = sds_ring->adapter; - - writel(0x1, sds_ring->crb_intr_mask); - - if (!QLCNIC_IS_MSI_FAMILY(adapter)) - writel(0xfbff, adapter->tgt_mask_reg); -} - -static int -qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count) -{ - int size = sizeof(struct qlcnic_host_sds_ring) * count; - - recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL); - - return recv_ctx->sds_rings == NULL; -} - -static void -qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx) -{ - if (recv_ctx->sds_rings != NULL) - kfree(recv_ctx->sds_rings); - - recv_ctx->sds_rings = NULL; -} - -static int -qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev) -{ - int ring; - struct qlcnic_host_sds_ring *sds_ring; - struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - - if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) - return -ENOMEM; - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - - if (ring == adapter->max_sds_rings - 1) - netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll, - QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings); - else - netif_napi_add(netdev, &sds_ring->napi, - qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2); - } - - return 0; -} - -static void -qlcnic_napi_del(struct qlcnic_adapter *adapter) -{ - int ring; - struct qlcnic_host_sds_ring *sds_ring; - struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - netif_napi_del(&sds_ring->napi); - } - - qlcnic_free_sds_rings(adapter->recv_ctx); -} - -static void -qlcnic_napi_enable(struct qlcnic_adapter *adapter) -{ - int ring; - struct qlcnic_host_sds_ring *sds_ring; - struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - - if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) - return; - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - napi_enable(&sds_ring->napi); - qlcnic_enable_int(sds_ring); - } -} - -static void -qlcnic_napi_disable(struct qlcnic_adapter *adapter) -{ - int ring; - struct qlcnic_host_sds_ring *sds_ring; - struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - - if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) - return; - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - qlcnic_disable_int(sds_ring); - napi_synchronize(&sds_ring->napi); - napi_disable(&sds_ring->napi); - } -} - -static void qlcnic_clear_stats(struct qlcnic_adapter *adapter) -{ - memset(&adapter->stats, 0, sizeof(adapter->stats)); -} - -static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable) -{ - u32 control; - int pos; - - pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); - if (pos) { - pci_read_config_dword(pdev, pos, &control); - if (enable) - control |= PCI_MSIX_FLAGS_ENABLE; - else - control = 0; - pci_write_config_dword(pdev, pos, control); - } -} - -static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count) -{ - int i; - - for (i = 0; i < count; i++) - adapter->msix_entries[i].entry = i; -} - -static int -qlcnic_read_mac_addr(struct qlcnic_adapter *adapter) -{ - u8 mac_addr[ETH_ALEN]; - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - - if (qlcnic_get_mac_address(adapter, mac_addr) != 0) - return -EIO; - - memcpy(netdev->dev_addr, mac_addr, ETH_ALEN); - memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); - memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len); - - /* set station address */ - - if (!is_valid_ether_addr(netdev->perm_addr)) - dev_warn(&pdev->dev, "Bad MAC address %pM.\n", - netdev->dev_addr); - - return 0; -} - -static int qlcnic_set_mac(struct net_device *netdev, void *p) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - struct sockaddr *addr = p; - - if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED)) - return -EOPNOTSUPP; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EINVAL; - - if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { - netif_device_detach(netdev); - qlcnic_napi_disable(adapter); - } - - memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len); - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - qlcnic_set_multi(adapter->netdev); - - if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { - netif_device_attach(netdev); - qlcnic_napi_enable(adapter); - } - return 0; -} - -static const struct net_device_ops qlcnic_netdev_ops = { - .ndo_open = qlcnic_open, - .ndo_stop = qlcnic_close, - .ndo_start_xmit = qlcnic_xmit_frame, - .ndo_get_stats = qlcnic_get_stats, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_multicast_list = qlcnic_set_multi, - .ndo_set_mac_address = qlcnic_set_mac, - .ndo_change_mtu = qlcnic_change_mtu, - .ndo_fix_features = qlcnic_fix_features, - .ndo_set_features = qlcnic_set_features, - .ndo_tx_timeout = qlcnic_tx_timeout, - .ndo_vlan_rx_add_vid = qlcnic_vlan_rx_add, - .ndo_vlan_rx_kill_vid = qlcnic_vlan_rx_del, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = qlcnic_poll_controller, -#endif -}; - -static struct qlcnic_nic_template qlcnic_ops = { - .config_bridged_mode = qlcnic_config_bridged_mode, - .config_led = qlcnic_config_led, - .start_firmware = qlcnic_start_firmware -}; - -static struct qlcnic_nic_template qlcnic_vf_ops = { - .config_bridged_mode = qlcnicvf_config_bridged_mode, - .config_led = qlcnicvf_config_led, - .start_firmware = qlcnicvf_start_firmware -}; - -static int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix) -{ - struct pci_dev *pdev = adapter->pdev; - int err = -1; - - adapter->max_sds_rings = 1; - adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED); - qlcnic_set_msix_bit(pdev, 0); - - if (adapter->msix_supported) { - enable_msix: - qlcnic_init_msix_entries(adapter, num_msix); - err = pci_enable_msix(pdev, adapter->msix_entries, num_msix); - if (err == 0) { - adapter->flags |= QLCNIC_MSIX_ENABLED; - qlcnic_set_msix_bit(pdev, 1); - - adapter->max_sds_rings = num_msix; - - dev_info(&pdev->dev, "using msi-x interrupts\n"); - return err; - } - if (err > 0) { - num_msix = rounddown_pow_of_two(err); - if (num_msix) - goto enable_msix; - } - } - return err; -} - - -static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter) -{ - const struct qlcnic_legacy_intr_set *legacy_intrp; - struct pci_dev *pdev = adapter->pdev; - - if (use_msi && !pci_enable_msi(pdev)) { - adapter->flags |= QLCNIC_MSI_ENABLED; - adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter, - msi_tgt_status[adapter->ahw->pci_func]); - dev_info(&pdev->dev, "using msi interrupts\n"); - adapter->msix_entries[0].vector = pdev->irq; - return; - } - - legacy_intrp = &legacy_intr[adapter->ahw->pci_func]; - - adapter->int_vec_bit = legacy_intrp->int_vec_bit; - adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter, - legacy_intrp->tgt_status_reg); - adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter, - legacy_intrp->tgt_mask_reg); - adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR); - - adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter, - ISR_INT_STATE_REG); - dev_info(&pdev->dev, "using legacy interrupts\n"); - adapter->msix_entries[0].vector = pdev->irq; -} - -static void -qlcnic_setup_intr(struct qlcnic_adapter *adapter) -{ - int num_msix; - - if (adapter->msix_supported) { - num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(), - QLCNIC_DEF_NUM_STS_DESC_RINGS)); - } else - num_msix = 1; - - if (!qlcnic_enable_msix(adapter, num_msix)) - return; - - qlcnic_enable_msi_legacy(adapter); -} - -static void -qlcnic_teardown_intr(struct qlcnic_adapter *adapter) -{ - if (adapter->flags & QLCNIC_MSIX_ENABLED) - pci_disable_msix(adapter->pdev); - if (adapter->flags & QLCNIC_MSI_ENABLED) - pci_disable_msi(adapter->pdev); -} - -static void -qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter) -{ - if (adapter->ahw->pci_base0 != NULL) - iounmap(adapter->ahw->pci_base0); -} - -static int -qlcnic_init_pci_info(struct qlcnic_adapter *adapter) -{ - struct qlcnic_pci_info *pci_info; - int i, ret = 0; - u8 pfn; - - pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL); - if (!pci_info) - return -ENOMEM; - - adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) * - QLCNIC_MAX_PCI_FUNC, GFP_KERNEL); - if (!adapter->npars) { - ret = -ENOMEM; - goto err_pci_info; - } - - adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) * - QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL); - if (!adapter->eswitch) { - ret = -ENOMEM; - goto err_npars; - } - - ret = qlcnic_get_pci_info(adapter, pci_info); - if (ret) - goto err_eswitch; - - for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { - pfn = pci_info[i].id; - if (pfn > QLCNIC_MAX_PCI_FUNC) { - ret = QL_STATUS_INVALID_PARAM; - goto err_eswitch; - } - adapter->npars[pfn].active = (u8)pci_info[i].active; - adapter->npars[pfn].type = (u8)pci_info[i].type; - adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port; - adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw; - adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw; - } - - for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++) - adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE; - - kfree(pci_info); - return 0; - -err_eswitch: - kfree(adapter->eswitch); - adapter->eswitch = NULL; -err_npars: - kfree(adapter->npars); - adapter->npars = NULL; -err_pci_info: - kfree(pci_info); - - return ret; -} - -static int -qlcnic_set_function_modes(struct qlcnic_adapter *adapter) -{ - u8 id; - u32 ref_count; - int i, ret = 1; - u32 data = QLCNIC_MGMT_FUNC; - void __iomem *priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE; - - /* If other drivers are not in use set their privilege level */ - ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE); - ret = qlcnic_api_lock(adapter); - if (ret) - goto err_lock; - - if (qlcnic_config_npars) { - for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { - id = i; - if (adapter->npars[i].type != QLCNIC_TYPE_NIC || - id == adapter->ahw->pci_func) - continue; - data |= (qlcnic_config_npars & - QLC_DEV_SET_DRV(0xf, id)); - } - } else { - data = readl(priv_op); - data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw->pci_func)) | - (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC, - adapter->ahw->pci_func)); - } - writel(data, priv_op); - qlcnic_api_unlock(adapter); -err_lock: - return ret; -} - -static void -qlcnic_check_vf(struct qlcnic_adapter *adapter) -{ - void __iomem *msix_base_addr; - void __iomem *priv_op; - u32 func; - u32 msix_base; - u32 op_mode, priv_level; - - /* Determine FW API version */ - adapter->fw_hal_version = readl(adapter->ahw->pci_base0 + - QLCNIC_FW_API); - - /* Find PCI function number */ - pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func); - msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE; - msix_base = readl(msix_base_addr); - func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE; - adapter->ahw->pci_func = func; - - /* Determine function privilege level */ - priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE; - op_mode = readl(priv_op); - if (op_mode == QLC_DEV_DRV_DEFAULT) - priv_level = QLCNIC_MGMT_FUNC; - else - priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func); - - if (priv_level == QLCNIC_NON_PRIV_FUNC) { - adapter->op_mode = QLCNIC_NON_PRIV_FUNC; - dev_info(&adapter->pdev->dev, - "HAL Version: %d Non Privileged function\n", - adapter->fw_hal_version); - adapter->nic_ops = &qlcnic_vf_ops; - } else - adapter->nic_ops = &qlcnic_ops; -} - -static int -qlcnic_setup_pci_map(struct qlcnic_adapter *adapter) -{ - void __iomem *mem_ptr0 = NULL; - resource_size_t mem_base; - unsigned long mem_len, pci_len0 = 0; - - struct pci_dev *pdev = adapter->pdev; - - /* remap phys address */ - mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ - mem_len = pci_resource_len(pdev, 0); - - if (mem_len == QLCNIC_PCI_2MB_SIZE) { - - mem_ptr0 = pci_ioremap_bar(pdev, 0); - if (mem_ptr0 == NULL) { - dev_err(&pdev->dev, "failed to map PCI bar 0\n"); - return -EIO; - } - pci_len0 = mem_len; - } else { - return -EIO; - } - - dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20)); - - adapter->ahw->pci_base0 = mem_ptr0; - adapter->ahw->pci_len0 = pci_len0; - - qlcnic_check_vf(adapter); - - adapter->ahw->ocm_win_crb = qlcnic_get_ioaddr(adapter, - QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG( - adapter->ahw->pci_func))); - - return 0; -} - -static void get_brd_name(struct qlcnic_adapter *adapter, char *name) -{ - struct pci_dev *pdev = adapter->pdev; - int i, found = 0; - - for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) { - if (qlcnic_boards[i].vendor == pdev->vendor && - qlcnic_boards[i].device == pdev->device && - qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor && - qlcnic_boards[i].sub_device == pdev->subsystem_device) { - sprintf(name, "%pM: %s" , - adapter->mac_addr, - qlcnic_boards[i].short_name); - found = 1; - break; - } - - } - - if (!found) - sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr); -} - -static void -qlcnic_check_options(struct qlcnic_adapter *adapter) -{ - u32 fw_major, fw_minor, fw_build, prev_fw_version; - struct pci_dev *pdev = adapter->pdev; - struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; - - prev_fw_version = adapter->fw_version; - - fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR); - fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR); - fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB); - - adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build); - - if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC) { - if (fw_dump->tmpl_hdr == NULL || - adapter->fw_version > prev_fw_version) { - if (fw_dump->tmpl_hdr) - vfree(fw_dump->tmpl_hdr); - if (!qlcnic_fw_cmd_get_minidump_temp(adapter)) - dev_info(&pdev->dev, - "Supports FW dump capability\n"); - } - } - - dev_info(&pdev->dev, "firmware v%d.%d.%d\n", - fw_major, fw_minor, fw_build); - if (adapter->ahw->port_type == QLCNIC_XGBE) { - if (adapter->flags & QLCNIC_ESWITCH_ENABLED) { - adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF; - adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF; - } else { - adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G; - adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G; - } - - adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; - adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; - - } else if (adapter->ahw->port_type == QLCNIC_GBE) { - adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G; - adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; - adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; - adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G; - } - - adapter->msix_supported = !!use_msi_x; - - adapter->num_txd = MAX_CMD_DESCRIPTORS; - - adapter->max_rds_rings = MAX_RDS_RINGS; -} - -static int -qlcnic_initialize_nic(struct qlcnic_adapter *adapter) -{ - int err; - struct qlcnic_info nic_info; - - err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func); - if (err) - return err; - - adapter->physical_port = (u8)nic_info.phys_port; - adapter->switch_mode = nic_info.switch_mode; - adapter->max_tx_ques = nic_info.max_tx_ques; - adapter->max_rx_ques = nic_info.max_rx_ques; - adapter->capabilities = nic_info.capabilities; - adapter->max_mac_filters = nic_info.max_mac_filters; - adapter->max_mtu = nic_info.max_mtu; - - if (adapter->capabilities & BIT_6) - adapter->flags |= QLCNIC_ESWITCH_ENABLED; - else - adapter->flags &= ~QLCNIC_ESWITCH_ENABLED; - - return err; -} - -static void -qlcnic_set_vlan_config(struct qlcnic_adapter *adapter, - struct qlcnic_esw_func_cfg *esw_cfg) -{ - if (esw_cfg->discard_tagged) - adapter->flags &= ~QLCNIC_TAGGING_ENABLED; - else - adapter->flags |= QLCNIC_TAGGING_ENABLED; - - if (esw_cfg->vlan_id) - adapter->pvid = esw_cfg->vlan_id; - else - adapter->pvid = 0; -} - -static void -qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - set_bit(vid, adapter->vlans); -} - -static void -qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - - qlcnic_restore_indev_addr(netdev, NETDEV_DOWN); - clear_bit(vid, adapter->vlans); -} - -static void -qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter, - struct qlcnic_esw_func_cfg *esw_cfg) -{ - adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED | - QLCNIC_PROMISC_DISABLED); - - if (esw_cfg->mac_anti_spoof) - adapter->flags |= QLCNIC_MACSPOOF; - - if (!esw_cfg->mac_override) - adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED; - - if (!esw_cfg->promisc_mode) - adapter->flags |= QLCNIC_PROMISC_DISABLED; - - qlcnic_set_netdev_features(adapter, esw_cfg); -} - -static int -qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter) -{ - struct qlcnic_esw_func_cfg esw_cfg; - - if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) - return 0; - - esw_cfg.pci_func = adapter->ahw->pci_func; - if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg)) - return -EIO; - qlcnic_set_vlan_config(adapter, &esw_cfg); - qlcnic_set_eswitch_port_features(adapter, &esw_cfg); - - return 0; -} - -static void -qlcnic_set_netdev_features(struct qlcnic_adapter *adapter, - struct qlcnic_esw_func_cfg *esw_cfg) -{ - struct net_device *netdev = adapter->netdev; - unsigned long features, vlan_features; - - features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | - NETIF_F_IPV6_CSUM | NETIF_F_GRO); - vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER); - - if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) { - features |= (NETIF_F_TSO | NETIF_F_TSO6); - vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6); - } - - if (netdev->features & NETIF_F_LRO) - features |= NETIF_F_LRO; - - if (esw_cfg->offload_flags & BIT_0) { - netdev->features |= features; - if (!(esw_cfg->offload_flags & BIT_1)) - netdev->features &= ~NETIF_F_TSO; - if (!(esw_cfg->offload_flags & BIT_2)) - netdev->features &= ~NETIF_F_TSO6; - } else { - netdev->features &= ~features; - } - - netdev->vlan_features = (features & vlan_features); -} - -static int -qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter) -{ - void __iomem *priv_op; - u32 op_mode, priv_level; - int err = 0; - - err = qlcnic_initialize_nic(adapter); - if (err) - return err; - - if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED) - return 0; - - priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE; - op_mode = readl(priv_op); - priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func); - - if (op_mode == QLC_DEV_DRV_DEFAULT) - priv_level = QLCNIC_MGMT_FUNC; - else - priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func); - - if (adapter->flags & QLCNIC_ESWITCH_ENABLED) { - if (priv_level == QLCNIC_MGMT_FUNC) { - adapter->op_mode = QLCNIC_MGMT_FUNC; - err = qlcnic_init_pci_info(adapter); - if (err) - return err; - /* Set privilege level for other functions */ - qlcnic_set_function_modes(adapter); - dev_info(&adapter->pdev->dev, - "HAL Version: %d, Management function\n", - adapter->fw_hal_version); - } else if (priv_level == QLCNIC_PRIV_FUNC) { - adapter->op_mode = QLCNIC_PRIV_FUNC; - dev_info(&adapter->pdev->dev, - "HAL Version: %d, Privileged function\n", - adapter->fw_hal_version); - } - } - - adapter->flags |= QLCNIC_ADAPTER_INITIALIZED; - - return err; -} - -static int -qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter) -{ - struct qlcnic_esw_func_cfg esw_cfg; - struct qlcnic_npar_info *npar; - u8 i; - - if (adapter->need_fw_reset) - return 0; - - for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { - if (adapter->npars[i].type != QLCNIC_TYPE_NIC) - continue; - memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg)); - esw_cfg.pci_func = i; - esw_cfg.offload_flags = BIT_0; - esw_cfg.mac_override = BIT_0; - esw_cfg.promisc_mode = BIT_0; - if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) - esw_cfg.offload_flags |= (BIT_1 | BIT_2); - if (qlcnic_config_switch_port(adapter, &esw_cfg)) - return -EIO; - npar = &adapter->npars[i]; - npar->pvid = esw_cfg.vlan_id; - npar->mac_override = esw_cfg.mac_override; - npar->mac_anti_spoof = esw_cfg.mac_anti_spoof; - npar->discard_tagged = esw_cfg.discard_tagged; - npar->promisc_mode = esw_cfg.promisc_mode; - npar->offload_flags = esw_cfg.offload_flags; - } - - return 0; -} - -static int -qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter, - struct qlcnic_npar_info *npar, int pci_func) -{ - struct qlcnic_esw_func_cfg esw_cfg; - esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS; - esw_cfg.pci_func = pci_func; - esw_cfg.vlan_id = npar->pvid; - esw_cfg.mac_override = npar->mac_override; - esw_cfg.discard_tagged = npar->discard_tagged; - esw_cfg.mac_anti_spoof = npar->mac_anti_spoof; - esw_cfg.offload_flags = npar->offload_flags; - esw_cfg.promisc_mode = npar->promisc_mode; - if (qlcnic_config_switch_port(adapter, &esw_cfg)) - return -EIO; - - esw_cfg.op_mode = QLCNIC_ADD_VLAN; - if (qlcnic_config_switch_port(adapter, &esw_cfg)) - return -EIO; - - return 0; -} - -static int -qlcnic_reset_npar_config(struct qlcnic_adapter *adapter) -{ - int i, err; - struct qlcnic_npar_info *npar; - struct qlcnic_info nic_info; - - if (!adapter->need_fw_reset) - return 0; - - /* Set the NPAR config data after FW reset */ - for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { - npar = &adapter->npars[i]; - if (npar->type != QLCNIC_TYPE_NIC) - continue; - err = qlcnic_get_nic_info(adapter, &nic_info, i); - if (err) - return err; - nic_info.min_tx_bw = npar->min_bw; - nic_info.max_tx_bw = npar->max_bw; - err = qlcnic_set_nic_info(adapter, &nic_info); - if (err) - return err; - - if (npar->enable_pm) { - err = qlcnic_config_port_mirroring(adapter, - npar->dest_npar, 1, i); - if (err) - return err; - } - err = qlcnic_reset_eswitch_config(adapter, npar, i); - if (err) - return err; - } - return 0; -} - -static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter) -{ - u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO; - u32 npar_state; - - if (adapter->op_mode == QLCNIC_MGMT_FUNC) - return 0; - - npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); - while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) { - msleep(1000); - npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); - } - if (!npar_opt_timeo) { - dev_err(&adapter->pdev->dev, - "Waiting for NPAR state to opertional timeout\n"); - return -EIO; - } - return 0; -} - -static int -qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter) -{ - int err; - - if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) || - adapter->op_mode != QLCNIC_MGMT_FUNC) - return 0; - - err = qlcnic_set_default_offload_settings(adapter); - if (err) - return err; - - err = qlcnic_reset_npar_config(adapter); - if (err) - return err; - - qlcnic_dev_set_npar_ready(adapter); - - return err; -} - -static int -qlcnic_start_firmware(struct qlcnic_adapter *adapter) -{ - int err; - - err = qlcnic_can_start_firmware(adapter); - if (err < 0) - return err; - else if (!err) - goto check_fw_status; - - if (load_fw_file) - qlcnic_request_firmware(adapter); - else { - err = qlcnic_check_flash_fw_ver(adapter); - if (err) - goto err_out; - - adapter->fw_type = QLCNIC_FLASH_ROMIMAGE; - } - - err = qlcnic_need_fw_reset(adapter); - if (err == 0) - goto check_fw_status; - - err = qlcnic_pinit_from_rom(adapter); - if (err) - goto err_out; - - err = qlcnic_load_firmware(adapter); - if (err) - goto err_out; - - qlcnic_release_firmware(adapter); - QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION); - -check_fw_status: - err = qlcnic_check_fw_status(adapter); - if (err) - goto err_out; - - QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY); - qlcnic_idc_debug_info(adapter, 1); - - err = qlcnic_check_eswitch_mode(adapter); - if (err) { - dev_err(&adapter->pdev->dev, - "Memory allocation failed for eswitch\n"); - goto err_out; - } - err = qlcnic_set_mgmt_operations(adapter); - if (err) - goto err_out; - - qlcnic_check_options(adapter); - adapter->need_fw_reset = 0; - - qlcnic_release_firmware(adapter); - return 0; - -err_out: - QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED); - dev_err(&adapter->pdev->dev, "Device state set to failed\n"); - - qlcnic_release_firmware(adapter); - return err; -} - -static int -qlcnic_request_irq(struct qlcnic_adapter *adapter) -{ - irq_handler_t handler; - struct qlcnic_host_sds_ring *sds_ring; - int err, ring; - - unsigned long flags = 0; - struct net_device *netdev = adapter->netdev; - struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - - if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) { - handler = qlcnic_tmp_intr; - if (!QLCNIC_IS_MSI_FAMILY(adapter)) - flags |= IRQF_SHARED; - - } else { - if (adapter->flags & QLCNIC_MSIX_ENABLED) - handler = qlcnic_msix_intr; - else if (adapter->flags & QLCNIC_MSI_ENABLED) - handler = qlcnic_msi_intr; - else { - flags |= IRQF_SHARED; - handler = qlcnic_intr; - } - } - adapter->irq = netdev->irq; - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - sprintf(sds_ring->name, "%s[%d]", netdev->name, ring); - err = request_irq(sds_ring->irq, handler, - flags, sds_ring->name, sds_ring); - if (err) - return err; - } - - return 0; -} - -static void -qlcnic_free_irq(struct qlcnic_adapter *adapter) -{ - int ring; - struct qlcnic_host_sds_ring *sds_ring; - - struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - free_irq(sds_ring->irq, sds_ring); - } -} - -static int -__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev) -{ - int ring; - struct qlcnic_host_rds_ring *rds_ring; - - if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) - return -EIO; - - if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) - return 0; - if (qlcnic_set_eswitch_port_config(adapter)) - return -EIO; - - if (qlcnic_fw_create_ctx(adapter)) - return -EIO; - - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - rds_ring = &adapter->recv_ctx->rds_rings[ring]; - qlcnic_post_rx_buffers(adapter, rds_ring); - } - - qlcnic_set_multi(netdev); - qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu); - - adapter->ahw->linkup = 0; - - if (adapter->max_sds_rings > 1) - qlcnic_config_rss(adapter, 1); - - qlcnic_config_intr_coalesce(adapter); - - if (netdev->features & NETIF_F_LRO) - qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED); - - qlcnic_napi_enable(adapter); - - qlcnic_linkevent_request(adapter, 1); - - adapter->reset_context = 0; - set_bit(__QLCNIC_DEV_UP, &adapter->state); - return 0; -} - -/* Usage: During resume and firmware recovery module.*/ - -static int -qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev) -{ - int err = 0; - - rtnl_lock(); - if (netif_running(netdev)) - err = __qlcnic_up(adapter, netdev); - rtnl_unlock(); - - return err; -} - -static void -__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) -{ - if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) - return; - - if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state)) - return; - - smp_mb(); - spin_lock(&adapter->tx_clean_lock); - netif_carrier_off(netdev); - netif_tx_disable(netdev); - - qlcnic_free_mac_list(adapter); - - if (adapter->fhash.fnum) - qlcnic_delete_lb_filters(adapter); - - qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE); - - qlcnic_napi_disable(adapter); - - qlcnic_fw_destroy_ctx(adapter); - - qlcnic_reset_rx_buffers_list(adapter); - qlcnic_release_tx_buffers(adapter); - spin_unlock(&adapter->tx_clean_lock); -} - -/* Usage: During suspend and firmware recovery module */ - -static void -qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) -{ - rtnl_lock(); - if (netif_running(netdev)) - __qlcnic_down(adapter, netdev); - rtnl_unlock(); - -} - -static int -qlcnic_attach(struct qlcnic_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - int err; - - if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) - return 0; - - err = qlcnic_napi_add(adapter, netdev); - if (err) - return err; - - err = qlcnic_alloc_sw_resources(adapter); - if (err) { - dev_err(&pdev->dev, "Error in setting sw resources\n"); - goto err_out_napi_del; - } - - err = qlcnic_alloc_hw_resources(adapter); - if (err) { - dev_err(&pdev->dev, "Error in setting hw resources\n"); - goto err_out_free_sw; - } - - err = qlcnic_request_irq(adapter); - if (err) { - dev_err(&pdev->dev, "failed to setup interrupt\n"); - goto err_out_free_hw; - } - - qlcnic_create_sysfs_entries(adapter); - - adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC; - return 0; - -err_out_free_hw: - qlcnic_free_hw_resources(adapter); -err_out_free_sw: - qlcnic_free_sw_resources(adapter); -err_out_napi_del: - qlcnic_napi_del(adapter); - return err; -} - -static void -qlcnic_detach(struct qlcnic_adapter *adapter) -{ - if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) - return; - - qlcnic_remove_sysfs_entries(adapter); - - qlcnic_free_hw_resources(adapter); - qlcnic_release_rx_buffers(adapter); - qlcnic_free_irq(adapter); - qlcnic_napi_del(adapter); - qlcnic_free_sw_resources(adapter); - - adapter->is_up = 0; -} - -void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - struct qlcnic_host_sds_ring *sds_ring; - int ring; - - clear_bit(__QLCNIC_DEV_UP, &adapter->state); - if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) { - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &adapter->recv_ctx->sds_rings[ring]; - qlcnic_disable_int(sds_ring); - } - } - - qlcnic_fw_destroy_ctx(adapter); - - qlcnic_detach(adapter); - - adapter->diag_test = 0; - adapter->max_sds_rings = max_sds_rings; - - if (qlcnic_attach(adapter)) - goto out; - - if (netif_running(netdev)) - __qlcnic_up(adapter, netdev); -out: - netif_device_attach(netdev); -} - -static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter) -{ - int err = 0; - adapter->ahw = kzalloc(sizeof(struct qlcnic_hardware_context), - GFP_KERNEL); - if (!adapter->ahw) { - dev_err(&adapter->pdev->dev, - "Failed to allocate recv ctx resources for adapter\n"); - err = -ENOMEM; - goto err_out; - } - adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context), - GFP_KERNEL); - if (!adapter->recv_ctx) { - dev_err(&adapter->pdev->dev, - "Failed to allocate recv ctx resources for adapter\n"); - kfree(adapter->ahw); - adapter->ahw = NULL; - err = -ENOMEM; - goto err_out; - } - /* Initialize interrupt coalesce parameters */ - adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT; - adapter->ahw->coal.rx_time_us = QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US; - adapter->ahw->coal.rx_packets = QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS; -err_out: - return err; -} - -static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter) -{ - kfree(adapter->recv_ctx); - adapter->recv_ctx = NULL; - - if (adapter->ahw->fw_dump.tmpl_hdr) { - vfree(adapter->ahw->fw_dump.tmpl_hdr); - adapter->ahw->fw_dump.tmpl_hdr = NULL; - } - kfree(adapter->ahw); - adapter->ahw = NULL; -} - -int qlcnic_diag_alloc_res(struct net_device *netdev, int test) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - struct qlcnic_host_sds_ring *sds_ring; - struct qlcnic_host_rds_ring *rds_ring; - int ring; - int ret; - - netif_device_detach(netdev); - - if (netif_running(netdev)) - __qlcnic_down(adapter, netdev); - - qlcnic_detach(adapter); - - adapter->max_sds_rings = 1; - adapter->diag_test = test; - - ret = qlcnic_attach(adapter); - if (ret) { - netif_device_attach(netdev); - return ret; - } - - ret = qlcnic_fw_create_ctx(adapter); - if (ret) { - qlcnic_detach(adapter); - netif_device_attach(netdev); - return ret; - } - - for (ring = 0; ring < adapter->max_rds_rings; ring++) { - rds_ring = &adapter->recv_ctx->rds_rings[ring]; - qlcnic_post_rx_buffers(adapter, rds_ring); - } - - if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) { - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &adapter->recv_ctx->sds_rings[ring]; - qlcnic_enable_int(sds_ring); - } - } - - if (adapter->diag_test == QLCNIC_LOOPBACK_TEST) { - adapter->ahw->loopback_state = 0; - qlcnic_linkevent_request(adapter, 1); - } - - set_bit(__QLCNIC_DEV_UP, &adapter->state); - - return 0; -} - -/* Reset context in hardware only */ -static int -qlcnic_reset_hw_context(struct qlcnic_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - - if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) - return -EBUSY; - - netif_device_detach(netdev); - - qlcnic_down(adapter, netdev); - - qlcnic_up(adapter, netdev); - - netif_device_attach(netdev); - - clear_bit(__QLCNIC_RESETTING, &adapter->state); - return 0; -} - -int -qlcnic_reset_context(struct qlcnic_adapter *adapter) -{ - int err = 0; - struct net_device *netdev = adapter->netdev; - - if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) - return -EBUSY; - - if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) { - - netif_device_detach(netdev); - - if (netif_running(netdev)) - __qlcnic_down(adapter, netdev); - - qlcnic_detach(adapter); - - if (netif_running(netdev)) { - err = qlcnic_attach(adapter); - if (!err) - __qlcnic_up(adapter, netdev); - } - - netif_device_attach(netdev); - } - - clear_bit(__QLCNIC_RESETTING, &adapter->state); - return err; -} - -static int -qlcnic_setup_netdev(struct qlcnic_adapter *adapter, - struct net_device *netdev, u8 pci_using_dac) -{ - int err; - struct pci_dev *pdev = adapter->pdev; - - adapter->mc_enabled = 0; - adapter->max_mc_count = 38; - - netdev->netdev_ops = &qlcnic_netdev_ops; - netdev->watchdog_timeo = 5*HZ; - - qlcnic_change_mtu(netdev, netdev->mtu); - - SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops); - - netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; - - if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) - netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; - if (pci_using_dac) - netdev->hw_features |= NETIF_F_HIGHDMA; - - netdev->vlan_features = netdev->hw_features; - - if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX) - netdev->hw_features |= NETIF_F_HW_VLAN_TX; - if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO) - netdev->hw_features |= NETIF_F_LRO; - - netdev->features |= netdev->hw_features | - NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; - - netdev->irq = adapter->msix_entries[0].vector; - - err = register_netdev(netdev); - if (err) { - dev_err(&pdev->dev, "failed to register net device\n"); - return err; - } - - return 0; -} - -static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac) -{ - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && - !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) - *pci_using_dac = 1; - else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) && - !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) - *pci_using_dac = 0; - else { - dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n"); - return -EIO; - } - - return 0; -} - -static int -qlcnic_alloc_msix_entries(struct qlcnic_adapter *adapter, u16 count) -{ - adapter->msix_entries = kcalloc(count, sizeof(struct msix_entry), - GFP_KERNEL); - - if (adapter->msix_entries) - return 0; - - dev_err(&adapter->pdev->dev, "failed allocating msix_entries\n"); - return -ENOMEM; -} - -static int __devinit -qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - struct net_device *netdev = NULL; - struct qlcnic_adapter *adapter = NULL; - int err; - uint8_t revision_id; - uint8_t pci_using_dac; - char brd_name[QLCNIC_MAX_BOARD_NAME_LEN]; - - err = pci_enable_device(pdev); - if (err) - return err; - - if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { - err = -ENODEV; - goto err_out_disable_pdev; - } - - err = qlcnic_set_dma_mask(pdev, &pci_using_dac); - if (err) - goto err_out_disable_pdev; - - err = pci_request_regions(pdev, qlcnic_driver_name); - if (err) - goto err_out_disable_pdev; - - pci_set_master(pdev); - pci_enable_pcie_error_reporting(pdev); - - netdev = alloc_etherdev(sizeof(struct qlcnic_adapter)); - if (!netdev) { - dev_err(&pdev->dev, "failed to allocate net_device\n"); - err = -ENOMEM; - goto err_out_free_res; - } - - SET_NETDEV_DEV(netdev, &pdev->dev); - - adapter = netdev_priv(netdev); - adapter->netdev = netdev; - adapter->pdev = pdev; - - if (qlcnic_alloc_adapter_resources(adapter)) - goto err_out_free_netdev; - - adapter->dev_rst_time = jiffies; - revision_id = pdev->revision; - adapter->ahw->revision_id = revision_id; - adapter->mac_learn = qlcnic_mac_learn; - - rwlock_init(&adapter->ahw->crb_lock); - mutex_init(&adapter->ahw->mem_lock); - - spin_lock_init(&adapter->tx_clean_lock); - INIT_LIST_HEAD(&adapter->mac_list); - - err = qlcnic_setup_pci_map(adapter); - if (err) - goto err_out_free_hw; - - /* This will be reset for mezz cards */ - adapter->portnum = adapter->ahw->pci_func; - - err = qlcnic_get_board_info(adapter); - if (err) { - dev_err(&pdev->dev, "Error getting board config info.\n"); - goto err_out_iounmap; - } - - err = qlcnic_setup_idc_param(adapter); - if (err) - goto err_out_iounmap; - - adapter->flags |= QLCNIC_NEED_FLR; - - err = adapter->nic_ops->start_firmware(adapter); - if (err) { - dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"); - goto err_out_decr_ref; - } - - if (qlcnic_read_mac_addr(adapter)) - dev_warn(&pdev->dev, "failed to read mac addr\n"); - - if (adapter->portnum == 0) { - get_brd_name(adapter, brd_name); - - pr_info("%s: %s Board Chip rev 0x%x\n", - module_name(THIS_MODULE), - brd_name, adapter->ahw->revision_id); - } - - qlcnic_clear_stats(adapter); - - err = qlcnic_alloc_msix_entries(adapter, adapter->max_rx_ques); - if (err) - goto err_out_decr_ref; - - qlcnic_setup_intr(adapter); - - err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac); - if (err) - goto err_out_disable_msi; - - pci_set_drvdata(pdev, adapter); - - qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); - - switch (adapter->ahw->port_type) { - case QLCNIC_GBE: - dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n", - adapter->netdev->name); - break; - case QLCNIC_XGBE: - dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n", - adapter->netdev->name); - break; - } - - if (adapter->mac_learn) - qlcnic_alloc_lb_filters_mem(adapter); - - qlcnic_create_diag_entries(adapter); - - return 0; - -err_out_disable_msi: - qlcnic_teardown_intr(adapter); - kfree(adapter->msix_entries); - -err_out_decr_ref: - qlcnic_clr_all_drv_state(adapter, 0); - -err_out_iounmap: - qlcnic_cleanup_pci_map(adapter); - -err_out_free_hw: - qlcnic_free_adapter_resources(adapter); - -err_out_free_netdev: - free_netdev(netdev); - -err_out_free_res: - pci_release_regions(pdev); - -err_out_disable_pdev: - pci_set_drvdata(pdev, NULL); - pci_disable_device(pdev); - return err; -} - -static void __devexit qlcnic_remove(struct pci_dev *pdev) -{ - struct qlcnic_adapter *adapter; - struct net_device *netdev; - - adapter = pci_get_drvdata(pdev); - if (adapter == NULL) - return; - - netdev = adapter->netdev; - - qlcnic_cancel_fw_work(adapter); - - unregister_netdev(netdev); - - qlcnic_detach(adapter); - - if (adapter->npars != NULL) - kfree(adapter->npars); - if (adapter->eswitch != NULL) - kfree(adapter->eswitch); - - qlcnic_clr_all_drv_state(adapter, 0); - - clear_bit(__QLCNIC_RESETTING, &adapter->state); - - qlcnic_free_lb_filters_mem(adapter); - - qlcnic_teardown_intr(adapter); - kfree(adapter->msix_entries); - - qlcnic_remove_diag_entries(adapter); - - qlcnic_cleanup_pci_map(adapter); - - qlcnic_release_firmware(adapter); - - pci_disable_pcie_error_reporting(pdev); - pci_release_regions(pdev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - - qlcnic_free_adapter_resources(adapter); - free_netdev(netdev); -} -static int __qlcnic_shutdown(struct pci_dev *pdev) -{ - struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; - int retval; - - netif_device_detach(netdev); - - qlcnic_cancel_fw_work(adapter); - - if (netif_running(netdev)) - qlcnic_down(adapter, netdev); - - qlcnic_clr_all_drv_state(adapter, 0); - - clear_bit(__QLCNIC_RESETTING, &adapter->state); - - retval = pci_save_state(pdev); - if (retval) - return retval; - - if (qlcnic_wol_supported(adapter)) { - pci_enable_wake(pdev, PCI_D3cold, 1); - pci_enable_wake(pdev, PCI_D3hot, 1); - } - - return 0; -} - -static void qlcnic_shutdown(struct pci_dev *pdev) -{ - if (__qlcnic_shutdown(pdev)) - return; - - pci_disable_device(pdev); -} - -#ifdef CONFIG_PM -static int -qlcnic_suspend(struct pci_dev *pdev, pm_message_t state) -{ - int retval; - - retval = __qlcnic_shutdown(pdev); - if (retval) - return retval; - - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - return 0; -} - -static int -qlcnic_resume(struct pci_dev *pdev) -{ - struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; - int err; - - err = pci_enable_device(pdev); - if (err) - return err; - - pci_set_power_state(pdev, PCI_D0); - pci_set_master(pdev); - pci_restore_state(pdev); - - err = adapter->nic_ops->start_firmware(adapter); - if (err) { - dev_err(&pdev->dev, "failed to start firmware\n"); - return err; - } - - if (netif_running(netdev)) { - err = qlcnic_up(adapter, netdev); - if (err) - goto done; - - qlcnic_restore_indev_addr(netdev, NETDEV_UP); - } -done: - netif_device_attach(netdev); - qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); - return 0; -} -#endif - -static int qlcnic_open(struct net_device *netdev) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - int err; - - netif_carrier_off(netdev); - - err = qlcnic_attach(adapter); - if (err) - return err; - - err = __qlcnic_up(adapter, netdev); - if (err) - goto err_out; - - netif_start_queue(netdev); - - return 0; - -err_out: - qlcnic_detach(adapter); - return err; -} - -/* - * qlcnic_close - Disables a network interface entry point - */ -static int qlcnic_close(struct net_device *netdev) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - - __qlcnic_down(adapter, netdev); - return 0; -} - -void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter) -{ - void *head; - int i; - - if (adapter->fhash.fmax && adapter->fhash.fhead) - return; - - spin_lock_init(&adapter->mac_learn_lock); - - head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head), - GFP_KERNEL); - if (!head) - return; - - adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS; - adapter->fhash.fhead = head; - - for (i = 0; i < adapter->fhash.fmax; i++) - INIT_HLIST_HEAD(&adapter->fhash.fhead[i]); -} - -static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter) -{ - if (adapter->fhash.fmax && adapter->fhash.fhead) - kfree(adapter->fhash.fhead); - - adapter->fhash.fhead = NULL; - adapter->fhash.fmax = 0; -} - -static void qlcnic_change_filter(struct qlcnic_adapter *adapter, - u64 uaddr, __le16 vlan_id, struct qlcnic_host_tx_ring *tx_ring) -{ - struct cmd_desc_type0 *hwdesc; - struct qlcnic_nic_req *req; - struct qlcnic_mac_req *mac_req; - struct qlcnic_vlan_req *vlan_req; - u32 producer; - u64 word; - - producer = tx_ring->producer; - hwdesc = &tx_ring->desc_head[tx_ring->producer]; - - req = (struct qlcnic_nic_req *)hwdesc; - memset(req, 0, sizeof(struct qlcnic_nic_req)); - req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23); - - word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16); - req->req_hdr = cpu_to_le64(word); - - mac_req = (struct qlcnic_mac_req *)&(req->words[0]); - mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD; - memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN); - - vlan_req = (struct qlcnic_vlan_req *)&req->words[1]; - vlan_req->vlan_id = vlan_id; - - tx_ring->producer = get_next_index(producer, tx_ring->num_desc); - smp_mb(); -} - -#define QLCNIC_MAC_HASH(MAC)\ - ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25)) - -static void -qlcnic_send_filter(struct qlcnic_adapter *adapter, - struct qlcnic_host_tx_ring *tx_ring, - struct cmd_desc_type0 *first_desc, - struct sk_buff *skb) -{ - struct ethhdr *phdr = (struct ethhdr *)(skb->data); - struct qlcnic_filter *fil, *tmp_fil; - struct hlist_node *tmp_hnode, *n; - struct hlist_head *head; - u64 src_addr = 0; - __le16 vlan_id = 0; - u8 hindex; - - if (!compare_ether_addr(phdr->h_source, adapter->mac_addr)) - return; - - if (adapter->fhash.fnum >= adapter->fhash.fmax) - return; - - /* Only NPAR capable devices support vlan based learning*/ - if (adapter->flags & QLCNIC_ESWITCH_ENABLED) - vlan_id = first_desc->vlan_TCI; - memcpy(&src_addr, phdr->h_source, ETH_ALEN); - hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1); - head = &(adapter->fhash.fhead[hindex]); - - hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { - if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) && - tmp_fil->vlan_id == vlan_id) { - - if (jiffies > - (QLCNIC_READD_AGE * HZ + tmp_fil->ftime)) - qlcnic_change_filter(adapter, src_addr, vlan_id, - tx_ring); - tmp_fil->ftime = jiffies; - return; - } - } - - fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC); - if (!fil) - return; - - qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring); - - fil->ftime = jiffies; - fil->vlan_id = vlan_id; - memcpy(fil->faddr, &src_addr, ETH_ALEN); - spin_lock(&adapter->mac_learn_lock); - hlist_add_head(&(fil->fnode), head); - adapter->fhash.fnum++; - spin_unlock(&adapter->mac_learn_lock); -} - -static int -qlcnic_tx_pkt(struct qlcnic_adapter *adapter, - struct cmd_desc_type0 *first_desc, - struct sk_buff *skb) -{ - u8 opcode = 0, hdr_len = 0; - u16 flags = 0, vlan_tci = 0; - int copied, offset, copy_len; - struct cmd_desc_type0 *hwdesc; - struct vlan_ethhdr *vh; - struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; - u16 protocol = ntohs(skb->protocol); - u32 producer = tx_ring->producer; - - if (protocol == ETH_P_8021Q) { - vh = (struct vlan_ethhdr *)skb->data; - flags = FLAGS_VLAN_TAGGED; - vlan_tci = vh->h_vlan_TCI; - } else if (vlan_tx_tag_present(skb)) { - flags = FLAGS_VLAN_OOB; - vlan_tci = vlan_tx_tag_get(skb); - } - if (unlikely(adapter->pvid)) { - if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED)) - return -EIO; - if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED)) - goto set_flags; - - flags = FLAGS_VLAN_OOB; - vlan_tci = adapter->pvid; - } -set_flags: - qlcnic_set_tx_vlan_tci(first_desc, vlan_tci); - qlcnic_set_tx_flags_opcode(first_desc, flags, opcode); - - if (*(skb->data) & BIT_0) { - flags |= BIT_0; - memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN); - } - opcode = TX_ETHER_PKT; - if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && - skb_shinfo(skb)->gso_size > 0) { - - hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - - first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); - first_desc->total_hdr_length = hdr_len; - - opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO; - - /* For LSO, we need to copy the MAC/IP/TCP headers into - * the descriptor ring */ - copied = 0; - offset = 2; - - if (flags & FLAGS_VLAN_OOB) { - first_desc->total_hdr_length += VLAN_HLEN; - first_desc->tcp_hdr_offset = VLAN_HLEN; - first_desc->ip_hdr_offset = VLAN_HLEN; - /* Only in case of TSO on vlan device */ - flags |= FLAGS_VLAN_TAGGED; - - /* Create a TSO vlan header template for firmware */ - - hwdesc = &tx_ring->desc_head[producer]; - tx_ring->cmd_buf_arr[producer].skb = NULL; - - copy_len = min((int)sizeof(struct cmd_desc_type0) - - offset, hdr_len + VLAN_HLEN); - - vh = (struct vlan_ethhdr *)((char *) hwdesc + 2); - skb_copy_from_linear_data(skb, vh, 12); - vh->h_vlan_proto = htons(ETH_P_8021Q); - vh->h_vlan_TCI = htons(vlan_tci); - - skb_copy_from_linear_data_offset(skb, 12, - (char *)vh + 16, copy_len - 16); - - copied = copy_len - VLAN_HLEN; - offset = 0; - - producer = get_next_index(producer, tx_ring->num_desc); - } - - while (copied < hdr_len) { - - copy_len = min((int)sizeof(struct cmd_desc_type0) - - offset, (hdr_len - copied)); - - hwdesc = &tx_ring->desc_head[producer]; - tx_ring->cmd_buf_arr[producer].skb = NULL; - - skb_copy_from_linear_data_offset(skb, copied, - (char *) hwdesc + offset, copy_len); - - copied += copy_len; - offset = 0; - - producer = get_next_index(producer, tx_ring->num_desc); - } - - tx_ring->producer = producer; - smp_mb(); - adapter->stats.lso_frames++; - - } else if (skb->ip_summed == CHECKSUM_PARTIAL) { - u8 l4proto; - - if (protocol == ETH_P_IP) { - l4proto = ip_hdr(skb)->protocol; - - if (l4proto == IPPROTO_TCP) - opcode = TX_TCP_PKT; - else if (l4proto == IPPROTO_UDP) - opcode = TX_UDP_PKT; - } else if (protocol == ETH_P_IPV6) { - l4proto = ipv6_hdr(skb)->nexthdr; - - if (l4proto == IPPROTO_TCP) - opcode = TX_TCPV6_PKT; - else if (l4proto == IPPROTO_UDP) - opcode = TX_UDPV6_PKT; - } - } - first_desc->tcp_hdr_offset += skb_transport_offset(skb); - first_desc->ip_hdr_offset += skb_network_offset(skb); - qlcnic_set_tx_flags_opcode(first_desc, flags, opcode); - - return 0; -} - -static int -qlcnic_map_tx_skb(struct pci_dev *pdev, - struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf) -{ - struct qlcnic_skb_frag *nf; - struct skb_frag_struct *frag; - int i, nr_frags; - dma_addr_t map; - - nr_frags = skb_shinfo(skb)->nr_frags; - nf = &pbuf->frag_array[0]; - - map = pci_map_single(pdev, skb->data, - skb_headlen(skb), PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(pdev, map)) - goto out_err; - - nf->dma = map; - nf->length = skb_headlen(skb); - - for (i = 0; i < nr_frags; i++) { - frag = &skb_shinfo(skb)->frags[i]; - nf = &pbuf->frag_array[i+1]; - - map = pci_map_page(pdev, frag->page, frag->page_offset, - frag->size, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(pdev, map)) - goto unwind; - - nf->dma = map; - nf->length = frag->size; - } - - return 0; - -unwind: - while (--i >= 0) { - nf = &pbuf->frag_array[i+1]; - pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); - } - - nf = &pbuf->frag_array[0]; - pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); - -out_err: - return -ENOMEM; -} - -static void -qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb, - struct qlcnic_cmd_buffer *pbuf) -{ - struct qlcnic_skb_frag *nf = &pbuf->frag_array[0]; - int nr_frags = skb_shinfo(skb)->nr_frags; - int i; - - for (i = 0; i < nr_frags; i++) { - nf = &pbuf->frag_array[i+1]; - pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); - } - - nf = &pbuf->frag_array[0]; - pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); - pbuf->skb = NULL; -} - -static inline void -qlcnic_clear_cmddesc(u64 *desc) -{ - desc[0] = 0ULL; - desc[2] = 0ULL; - desc[7] = 0ULL; -} - -netdev_tx_t -qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; - struct qlcnic_cmd_buffer *pbuf; - struct qlcnic_skb_frag *buffrag; - struct cmd_desc_type0 *hwdesc, *first_desc; - struct pci_dev *pdev; - struct ethhdr *phdr; - int delta = 0; - int i, k; - - u32 producer; - int frag_count; - u32 num_txd = tx_ring->num_desc; - - if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { - netif_stop_queue(netdev); - return NETDEV_TX_BUSY; - } - - if (adapter->flags & QLCNIC_MACSPOOF) { - phdr = (struct ethhdr *)skb->data; - if (compare_ether_addr(phdr->h_source, - adapter->mac_addr)) - goto drop_packet; - } - - frag_count = skb_shinfo(skb)->nr_frags + 1; - /* 14 frags supported for normal packet and - * 32 frags supported for TSO packet - */ - if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) { - - for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++) - delta += skb_shinfo(skb)->frags[i].size; - - if (!__pskb_pull_tail(skb, delta)) - goto drop_packet; - - frag_count = 1 + skb_shinfo(skb)->nr_frags; - } - - if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) { - netif_stop_queue(netdev); - if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) - netif_start_queue(netdev); - else { - adapter->stats.xmit_off++; - return NETDEV_TX_BUSY; - } - } - - producer = tx_ring->producer; - pbuf = &tx_ring->cmd_buf_arr[producer]; - - pdev = adapter->pdev; - - first_desc = hwdesc = &tx_ring->desc_head[producer]; - qlcnic_clear_cmddesc((u64 *)hwdesc); - - if (qlcnic_map_tx_skb(pdev, skb, pbuf)) { - adapter->stats.tx_dma_map_error++; - goto drop_packet; - } - - pbuf->skb = skb; - pbuf->frag_count = frag_count; - - qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len); - qlcnic_set_tx_port(first_desc, adapter->portnum); - - for (i = 0; i < frag_count; i++) { - - k = i % 4; - - if ((k == 0) && (i > 0)) { - /* move to next desc.*/ - producer = get_next_index(producer, num_txd); - hwdesc = &tx_ring->desc_head[producer]; - qlcnic_clear_cmddesc((u64 *)hwdesc); - tx_ring->cmd_buf_arr[producer].skb = NULL; - } - - buffrag = &pbuf->frag_array[i]; - - hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length); - switch (k) { - case 0: - hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma); - break; - case 1: - hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma); - break; - case 2: - hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma); - break; - case 3: - hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma); - break; - } - } - - tx_ring->producer = get_next_index(producer, num_txd); - smp_mb(); - - if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb))) - goto unwind_buff; - - if (adapter->mac_learn) - qlcnic_send_filter(adapter, tx_ring, first_desc, skb); - - adapter->stats.txbytes += skb->len; - adapter->stats.xmitcalled++; - - qlcnic_update_cmd_producer(adapter, tx_ring); - - return NETDEV_TX_OK; - -unwind_buff: - qlcnic_unmap_buffers(pdev, skb, pbuf); -drop_packet: - adapter->stats.txdropped++; - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; -} - -static int qlcnic_check_temp(struct qlcnic_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - u32 temp, temp_state, temp_val; - int rv = 0; - - temp = QLCRD32(adapter, CRB_TEMP_STATE); - - temp_state = qlcnic_get_temp_state(temp); - temp_val = qlcnic_get_temp_val(temp); - - if (temp_state == QLCNIC_TEMP_PANIC) { - dev_err(&netdev->dev, - "Device temperature %d degrees C exceeds" - " maximum allowed. Hardware has been shut down.\n", - temp_val); - rv = 1; - } else if (temp_state == QLCNIC_TEMP_WARN) { - if (adapter->temp == QLCNIC_TEMP_NORMAL) { - dev_err(&netdev->dev, - "Device temperature %d degrees C " - "exceeds operating range." - " Immediate action needed.\n", - temp_val); - } - } else { - if (adapter->temp == QLCNIC_TEMP_WARN) { - dev_info(&netdev->dev, - "Device temperature is now %d degrees C" - " in normal range.\n", temp_val); - } - } - adapter->temp = temp_state; - return rv; -} - -void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup) -{ - struct net_device *netdev = adapter->netdev; - - if (adapter->ahw->linkup && !linkup) { - netdev_info(netdev, "NIC Link is down\n"); - adapter->ahw->linkup = 0; - if (netif_running(netdev)) { - netif_carrier_off(netdev); - netif_stop_queue(netdev); - } - } else if (!adapter->ahw->linkup && linkup) { - netdev_info(netdev, "NIC Link is up\n"); - adapter->ahw->linkup = 1; - if (netif_running(netdev)) { - netif_carrier_on(netdev); - netif_wake_queue(netdev); - } - } -} - -static void qlcnic_tx_timeout(struct net_device *netdev) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - - if (test_bit(__QLCNIC_RESETTING, &adapter->state)) - return; - - dev_err(&netdev->dev, "transmit timeout, resetting.\n"); - - if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS) - adapter->need_fw_reset = 1; - else - adapter->reset_context = 1; -} - -static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - struct net_device_stats *stats = &netdev->stats; - - stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; - stats->tx_packets = adapter->stats.xmitfinished; - stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes; - stats->tx_bytes = adapter->stats.txbytes; - stats->rx_dropped = adapter->stats.rxdropped; - stats->tx_dropped = adapter->stats.txdropped; - - return stats; -} - -static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter) -{ - u32 status; - - status = readl(adapter->isr_int_vec); - - if (!(status & adapter->int_vec_bit)) - return IRQ_NONE; - - /* check interrupt state machine, to be sure */ - status = readl(adapter->crb_int_state_reg); - if (!ISR_LEGACY_INT_TRIGGERED(status)) - return IRQ_NONE; - - writel(0xffffffff, adapter->tgt_status_reg); - /* read twice to ensure write is flushed */ - readl(adapter->isr_int_vec); - readl(adapter->isr_int_vec); - - return IRQ_HANDLED; -} - -static irqreturn_t qlcnic_tmp_intr(int irq, void *data) -{ - struct qlcnic_host_sds_ring *sds_ring = data; - struct qlcnic_adapter *adapter = sds_ring->adapter; - - if (adapter->flags & QLCNIC_MSIX_ENABLED) - goto done; - else if (adapter->flags & QLCNIC_MSI_ENABLED) { - writel(0xffffffff, adapter->tgt_status_reg); - goto done; - } - - if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE) - return IRQ_NONE; - -done: - adapter->diag_cnt++; - qlcnic_enable_int(sds_ring); - return IRQ_HANDLED; -} - -static irqreturn_t qlcnic_intr(int irq, void *data) -{ - struct qlcnic_host_sds_ring *sds_ring = data; - struct qlcnic_adapter *adapter = sds_ring->adapter; - - if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE) - return IRQ_NONE; - - napi_schedule(&sds_ring->napi); - - return IRQ_HANDLED; -} - -static irqreturn_t qlcnic_msi_intr(int irq, void *data) -{ - struct qlcnic_host_sds_ring *sds_ring = data; - struct qlcnic_adapter *adapter = sds_ring->adapter; - - /* clear interrupt */ - writel(0xffffffff, adapter->tgt_status_reg); - - napi_schedule(&sds_ring->napi); - return IRQ_HANDLED; -} - -static irqreturn_t qlcnic_msix_intr(int irq, void *data) -{ - struct qlcnic_host_sds_ring *sds_ring = data; - - napi_schedule(&sds_ring->napi); - return IRQ_HANDLED; -} - -static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter) -{ - u32 sw_consumer, hw_consumer; - int count = 0, i; - struct qlcnic_cmd_buffer *buffer; - struct pci_dev *pdev = adapter->pdev; - struct net_device *netdev = adapter->netdev; - struct qlcnic_skb_frag *frag; - int done; - struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; - - if (!spin_trylock(&adapter->tx_clean_lock)) - return 1; - - sw_consumer = tx_ring->sw_consumer; - hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); - - while (sw_consumer != hw_consumer) { - buffer = &tx_ring->cmd_buf_arr[sw_consumer]; - if (buffer->skb) { - frag = &buffer->frag_array[0]; - pci_unmap_single(pdev, frag->dma, frag->length, - PCI_DMA_TODEVICE); - frag->dma = 0ULL; - for (i = 1; i < buffer->frag_count; i++) { - frag++; - pci_unmap_page(pdev, frag->dma, frag->length, - PCI_DMA_TODEVICE); - frag->dma = 0ULL; - } - - adapter->stats.xmitfinished++; - dev_kfree_skb_any(buffer->skb); - buffer->skb = NULL; - } - - sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc); - if (++count >= MAX_STATUS_HANDLE) - break; - } - - if (count && netif_running(netdev)) { - tx_ring->sw_consumer = sw_consumer; - - smp_mb(); - - if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) { - if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) { - netif_wake_queue(netdev); - adapter->stats.xmit_on++; - } - } - adapter->tx_timeo_cnt = 0; - } - /* - * If everything is freed up to consumer then check if the ring is full - * If the ring is full then check if more needs to be freed and - * schedule the call back again. - * - * This happens when there are 2 CPUs. One could be freeing and the - * other filling it. If the ring is full when we get out of here and - * the card has already interrupted the host then the host can miss the - * interrupt. - * - * There is still a possible race condition and the host could miss an - * interrupt. The card has to take care of this. - */ - hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); - done = (sw_consumer == hw_consumer); - spin_unlock(&adapter->tx_clean_lock); - - return done; -} - -static int qlcnic_poll(struct napi_struct *napi, int budget) -{ - struct qlcnic_host_sds_ring *sds_ring = - container_of(napi, struct qlcnic_host_sds_ring, napi); - - struct qlcnic_adapter *adapter = sds_ring->adapter; - - int tx_complete; - int work_done; - - tx_complete = qlcnic_process_cmd_ring(adapter); - - work_done = qlcnic_process_rcv_ring(sds_ring, budget); - - if ((work_done < budget) && tx_complete) { - napi_complete(&sds_ring->napi); - if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) - qlcnic_enable_int(sds_ring); - } - - return work_done; -} - -static int qlcnic_rx_poll(struct napi_struct *napi, int budget) -{ - struct qlcnic_host_sds_ring *sds_ring = - container_of(napi, struct qlcnic_host_sds_ring, napi); - - struct qlcnic_adapter *adapter = sds_ring->adapter; - int work_done; - - work_done = qlcnic_process_rcv_ring(sds_ring, budget); - - if (work_done < budget) { - napi_complete(&sds_ring->napi); - if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) - qlcnic_enable_int(sds_ring); - } - - return work_done; -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void qlcnic_poll_controller(struct net_device *netdev) -{ - int ring; - struct qlcnic_host_sds_ring *sds_ring; - struct qlcnic_adapter *adapter = netdev_priv(netdev); - struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - - disable_irq(adapter->irq); - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - qlcnic_intr(adapter->irq, sds_ring); - } - enable_irq(adapter->irq); -} -#endif - -static void -qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding) -{ - u32 val; - - val = adapter->portnum & 0xf; - val |= encoding << 7; - val |= (jiffies - adapter->dev_rst_time) << 8; - - QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val); - adapter->dev_rst_time = jiffies; -} - -static int -qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state) -{ - u32 val; - - WARN_ON(state != QLCNIC_DEV_NEED_RESET && - state != QLCNIC_DEV_NEED_QUISCENT); - - if (qlcnic_api_lock(adapter)) - return -EIO; - - val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); - - if (state == QLCNIC_DEV_NEED_RESET) - QLC_DEV_SET_RST_RDY(val, adapter->portnum); - else if (state == QLCNIC_DEV_NEED_QUISCENT) - QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum); - - QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); - - qlcnic_api_unlock(adapter); - - return 0; -} - -static int -qlcnic_clr_drv_state(struct qlcnic_adapter *adapter) -{ - u32 val; - - if (qlcnic_api_lock(adapter)) - return -EBUSY; - - val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); - QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum); - QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); - - qlcnic_api_unlock(adapter); - - return 0; -} - -static void -qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed) -{ - u32 val; - - if (qlcnic_api_lock(adapter)) - goto err; - - val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE); - QLC_DEV_CLR_REF_CNT(val, adapter->portnum); - QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val); - - if (failed) { - QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED); - dev_info(&adapter->pdev->dev, - "Device state set to Failed. Please Reboot\n"); - } else if (!(val & 0x11111111)) - QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD); - - val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); - QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum); - QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); - - qlcnic_api_unlock(adapter); -err: - adapter->fw_fail_cnt = 0; - adapter->flags &= ~QLCNIC_FW_HANG; - clear_bit(__QLCNIC_START_FW, &adapter->state); - clear_bit(__QLCNIC_RESETTING, &adapter->state); -} - -/* Grab api lock, before checking state */ -static int -qlcnic_check_drv_state(struct qlcnic_adapter *adapter) -{ - int act, state, active_mask; - - state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); - act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE); - - if (adapter->flags & QLCNIC_FW_RESET_OWNER) { - active_mask = (~(1 << (adapter->ahw->pci_func * 4))); - act = act & active_mask; - } - - if (((state & 0x11111111) == (act & 0x11111111)) || - ((act & 0x11111111) == ((state >> 1) & 0x11111111))) - return 0; - else - return 1; -} - -static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter) -{ - u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER); - - if (val != QLCNIC_DRV_IDC_VER) { - dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's" - " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val); - } - - return 0; -} - -static int -qlcnic_can_start_firmware(struct qlcnic_adapter *adapter) -{ - u32 val, prev_state; - u8 dev_init_timeo = adapter->dev_init_timeo; - u8 portnum = adapter->portnum; - u8 ret; - - if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state)) - return 1; - - if (qlcnic_api_lock(adapter)) - return -1; - - val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE); - if (!(val & (1 << (portnum * 4)))) { - QLC_DEV_SET_REF_CNT(val, portnum); - QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val); - } - - prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); - QLCDB(adapter, HW, "Device state = %u\n", prev_state); - - switch (prev_state) { - case QLCNIC_DEV_COLD: - QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING); - QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER); - qlcnic_idc_debug_info(adapter, 0); - qlcnic_api_unlock(adapter); - return 1; - - case QLCNIC_DEV_READY: - ret = qlcnic_check_idc_ver(adapter); - qlcnic_api_unlock(adapter); - return ret; - - case QLCNIC_DEV_NEED_RESET: - val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); - QLC_DEV_SET_RST_RDY(val, portnum); - QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); - break; - - case QLCNIC_DEV_NEED_QUISCENT: - val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); - QLC_DEV_SET_QSCNT_RDY(val, portnum); - QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); - break; - - case QLCNIC_DEV_FAILED: - dev_err(&adapter->pdev->dev, "Device in failed state.\n"); - qlcnic_api_unlock(adapter); - return -1; - - case QLCNIC_DEV_INITIALIZING: - case QLCNIC_DEV_QUISCENT: - break; - } - - qlcnic_api_unlock(adapter); - - do { - msleep(1000); - prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); - - if (prev_state == QLCNIC_DEV_QUISCENT) - continue; - } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo); - - if (!dev_init_timeo) { - dev_err(&adapter->pdev->dev, - "Waiting for device to initialize timeout\n"); - return -1; - } - - if (qlcnic_api_lock(adapter)) - return -1; - - val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); - QLC_DEV_CLR_RST_QSCNT(val, portnum); - QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); - - ret = qlcnic_check_idc_ver(adapter); - qlcnic_api_unlock(adapter); - - return ret; -} - -static void -qlcnic_fwinit_work(struct work_struct *work) -{ - struct qlcnic_adapter *adapter = container_of(work, - struct qlcnic_adapter, fw_work.work); - u32 dev_state = 0xf; - u32 val; - - if (qlcnic_api_lock(adapter)) - goto err_ret; - - dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); - if (dev_state == QLCNIC_DEV_QUISCENT || - dev_state == QLCNIC_DEV_NEED_QUISCENT) { - qlcnic_api_unlock(adapter); - qlcnic_schedule_work(adapter, qlcnic_fwinit_work, - FW_POLL_DELAY * 2); - return; - } - - if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) { - qlcnic_api_unlock(adapter); - goto wait_npar; - } - - if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) { - dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n", - adapter->reset_ack_timeo); - goto skip_ack_check; - } - - if (!qlcnic_check_drv_state(adapter)) { -skip_ack_check: - dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); - - if (dev_state == QLCNIC_DEV_NEED_RESET) { - QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, - QLCNIC_DEV_INITIALIZING); - set_bit(__QLCNIC_START_FW, &adapter->state); - QLCDB(adapter, DRV, "Restarting fw\n"); - qlcnic_idc_debug_info(adapter, 0); - val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); - QLC_DEV_SET_RST_RDY(val, adapter->portnum); - QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); - } - - qlcnic_api_unlock(adapter); - - rtnl_lock(); - if (adapter->ahw->fw_dump.enable && - (adapter->flags & QLCNIC_FW_RESET_OWNER)) { - QLCDB(adapter, DRV, "Take FW dump\n"); - qlcnic_dump_fw(adapter); - adapter->flags |= QLCNIC_FW_HANG; - } - rtnl_unlock(); - - adapter->flags &= ~QLCNIC_FW_RESET_OWNER; - if (!adapter->nic_ops->start_firmware(adapter)) { - qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); - adapter->fw_wait_cnt = 0; - return; - } - goto err_ret; - } - - qlcnic_api_unlock(adapter); - -wait_npar: - dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); - QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state); - - switch (dev_state) { - case QLCNIC_DEV_READY: - if (!adapter->nic_ops->start_firmware(adapter)) { - qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); - adapter->fw_wait_cnt = 0; - return; - } - case QLCNIC_DEV_FAILED: - break; - default: - qlcnic_schedule_work(adapter, - qlcnic_fwinit_work, FW_POLL_DELAY); - return; - } - -err_ret: - dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u " - "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt); - netif_device_attach(adapter->netdev); - qlcnic_clr_all_drv_state(adapter, 0); -} - -static void -qlcnic_detach_work(struct work_struct *work) -{ - struct qlcnic_adapter *adapter = container_of(work, - struct qlcnic_adapter, fw_work.work); - struct net_device *netdev = adapter->netdev; - u32 status; - - netif_device_detach(netdev); - - /* Dont grab rtnl lock during Quiscent mode */ - if (adapter->dev_state == QLCNIC_DEV_NEED_QUISCENT) { - if (netif_running(netdev)) - __qlcnic_down(adapter, netdev); - } else - qlcnic_down(adapter, netdev); - - status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1); - - if (status & QLCNIC_RCODE_FATAL_ERROR) - goto err_ret; - - if (adapter->temp == QLCNIC_TEMP_PANIC) - goto err_ret; - /* Dont ack if this instance is the reset owner */ - if (!(adapter->flags & QLCNIC_FW_RESET_OWNER)) { - if (qlcnic_set_drv_state(adapter, adapter->dev_state)) - goto err_ret; - } - - adapter->fw_wait_cnt = 0; - - qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY); - - return; - -err_ret: - dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n", - status, adapter->temp); - netif_device_attach(netdev); - qlcnic_clr_all_drv_state(adapter, 1); -} - -/*Transit NPAR state to NON Operational */ -static void -qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter) -{ - u32 state; - - state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); - if (state == QLCNIC_DEV_NPAR_NON_OPER) - return; - - if (qlcnic_api_lock(adapter)) - return; - QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER); - qlcnic_api_unlock(adapter); -} - -/*Transit to RESET state from READY state only */ -void -qlcnic_dev_request_reset(struct qlcnic_adapter *adapter) -{ - u32 state; - - adapter->need_fw_reset = 1; - if (qlcnic_api_lock(adapter)) - return; - - state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); - - if (state == QLCNIC_DEV_READY) { - QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET); - adapter->flags |= QLCNIC_FW_RESET_OWNER; - QLCDB(adapter, DRV, "NEED_RESET state set\n"); - qlcnic_idc_debug_info(adapter, 0); - } - - QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER); - qlcnic_api_unlock(adapter); -} - -/* Transit to NPAR READY state from NPAR NOT READY state */ -static void -qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter) -{ - if (qlcnic_api_lock(adapter)) - return; - - QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER); - QLCDB(adapter, DRV, "NPAR operational state set\n"); - - qlcnic_api_unlock(adapter); -} - -static void -qlcnic_schedule_work(struct qlcnic_adapter *adapter, - work_func_t func, int delay) -{ - if (test_bit(__QLCNIC_AER, &adapter->state)) - return; - - INIT_DELAYED_WORK(&adapter->fw_work, func); - queue_delayed_work(qlcnic_wq, &adapter->fw_work, - round_jiffies_relative(delay)); -} - -static void -qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter) -{ - while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) - msleep(10); - - cancel_delayed_work_sync(&adapter->fw_work); -} - -static void -qlcnic_attach_work(struct work_struct *work) -{ - struct qlcnic_adapter *adapter = container_of(work, - struct qlcnic_adapter, fw_work.work); - struct net_device *netdev = adapter->netdev; - u32 npar_state; - - if (adapter->op_mode != QLCNIC_MGMT_FUNC) { - npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); - if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO) - qlcnic_clr_all_drv_state(adapter, 0); - else if (npar_state != QLCNIC_DEV_NPAR_OPER) - qlcnic_schedule_work(adapter, qlcnic_attach_work, - FW_POLL_DELAY); - else - goto attach; - QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n"); - return; - } -attach: - if (netif_running(netdev)) { - if (qlcnic_up(adapter, netdev)) - goto done; - - qlcnic_restore_indev_addr(netdev, NETDEV_UP); - } - -done: - netif_device_attach(netdev); - adapter->fw_fail_cnt = 0; - adapter->flags &= ~QLCNIC_FW_HANG; - clear_bit(__QLCNIC_RESETTING, &adapter->state); - - if (!qlcnic_clr_drv_state(adapter)) - qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, - FW_POLL_DELAY); -} - -static int -qlcnic_check_health(struct qlcnic_adapter *adapter) -{ - u32 state = 0, heartbeat; - struct net_device *netdev = adapter->netdev; - - if (qlcnic_check_temp(adapter)) - goto detach; - - if (adapter->need_fw_reset) - qlcnic_dev_request_reset(adapter); - - state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); - if (state == QLCNIC_DEV_NEED_RESET) { - qlcnic_set_npar_non_operational(adapter); - adapter->need_fw_reset = 1; - } else if (state == QLCNIC_DEV_NEED_QUISCENT) - goto detach; - - heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER); - if (heartbeat != adapter->heartbeat) { - adapter->heartbeat = heartbeat; - adapter->fw_fail_cnt = 0; - if (adapter->need_fw_reset) - goto detach; - - if (adapter->reset_context && auto_fw_reset) { - qlcnic_reset_hw_context(adapter); - adapter->netdev->trans_start = jiffies; - } - - return 0; - } - - if (++adapter->fw_fail_cnt < FW_FAIL_THRESH) - return 0; - - adapter->flags |= QLCNIC_FW_HANG; - - qlcnic_dev_request_reset(adapter); - - if (auto_fw_reset) - clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state); - - dev_info(&netdev->dev, "firmware hang detected\n"); - dev_info(&adapter->pdev->dev, "Dumping hw/fw registers\n" - "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n" - "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n" - "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n" - "PEG_NET_4_PC: 0x%x\n", - QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1), - QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS2), - QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c), - QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c), - QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c), - QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c), - QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c)); -detach: - adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state : - QLCNIC_DEV_NEED_RESET; - - if (auto_fw_reset && - !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) { - - qlcnic_schedule_work(adapter, qlcnic_detach_work, 0); - QLCDB(adapter, DRV, "fw recovery scheduled.\n"); - } - - return 1; -} - -static void -qlcnic_fw_poll_work(struct work_struct *work) -{ - struct qlcnic_adapter *adapter = container_of(work, - struct qlcnic_adapter, fw_work.work); - - if (test_bit(__QLCNIC_RESETTING, &adapter->state)) - goto reschedule; - - - if (qlcnic_check_health(adapter)) - return; - - if (adapter->fhash.fnum) - qlcnic_prune_lb_filters(adapter); - -reschedule: - qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); -} - -static int qlcnic_is_first_func(struct pci_dev *pdev) -{ - struct pci_dev *oth_pdev; - int val = pdev->devfn; - - while (val-- > 0) { - oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr - (pdev->bus), pdev->bus->number, - PCI_DEVFN(PCI_SLOT(pdev->devfn), val)); - if (!oth_pdev) - continue; - - if (oth_pdev->current_state != PCI_D3cold) { - pci_dev_put(oth_pdev); - return 0; - } - pci_dev_put(oth_pdev); - } - return 1; -} - -static int qlcnic_attach_func(struct pci_dev *pdev) -{ - int err, first_func; - struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; - - pdev->error_state = pci_channel_io_normal; - - err = pci_enable_device(pdev); - if (err) - return err; - - pci_set_power_state(pdev, PCI_D0); - pci_set_master(pdev); - pci_restore_state(pdev); - - first_func = qlcnic_is_first_func(pdev); - - if (qlcnic_api_lock(adapter)) - return -EINVAL; - - if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) { - adapter->need_fw_reset = 1; - set_bit(__QLCNIC_START_FW, &adapter->state); - QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING); - QLCDB(adapter, DRV, "Restarting fw\n"); - } - qlcnic_api_unlock(adapter); - - err = adapter->nic_ops->start_firmware(adapter); - if (err) - return err; - - qlcnic_clr_drv_state(adapter); - qlcnic_setup_intr(adapter); - - if (netif_running(netdev)) { - err = qlcnic_attach(adapter); - if (err) { - qlcnic_clr_all_drv_state(adapter, 1); - clear_bit(__QLCNIC_AER, &adapter->state); - netif_device_attach(netdev); - return err; - } - - err = qlcnic_up(adapter, netdev); - if (err) - goto done; - - qlcnic_restore_indev_addr(netdev, NETDEV_UP); - } - done: - netif_device_attach(netdev); - return err; -} - -static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) -{ - struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; - - if (state == pci_channel_io_perm_failure) - return PCI_ERS_RESULT_DISCONNECT; - - if (state == pci_channel_io_normal) - return PCI_ERS_RESULT_RECOVERED; - - set_bit(__QLCNIC_AER, &adapter->state); - netif_device_detach(netdev); - - cancel_delayed_work_sync(&adapter->fw_work); - - if (netif_running(netdev)) - qlcnic_down(adapter, netdev); - - qlcnic_detach(adapter); - qlcnic_teardown_intr(adapter); - - clear_bit(__QLCNIC_RESETTING, &adapter->state); - - pci_save_state(pdev); - pci_disable_device(pdev); - - return PCI_ERS_RESULT_NEED_RESET; -} - -static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev) -{ - return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT : - PCI_ERS_RESULT_RECOVERED; -} - -static void qlcnic_io_resume(struct pci_dev *pdev) -{ - struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); - - pci_cleanup_aer_uncorrect_error_status(pdev); - - if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY && - test_and_clear_bit(__QLCNIC_AER, &adapter->state)) - qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, - FW_POLL_DELAY); -} - -static int -qlcnicvf_start_firmware(struct qlcnic_adapter *adapter) -{ - int err; - - err = qlcnic_can_start_firmware(adapter); - if (err) - return err; - - err = qlcnic_check_npar_opertional(adapter); - if (err) - return err; - - err = qlcnic_initialize_nic(adapter); - if (err) - return err; - - qlcnic_check_options(adapter); - - err = qlcnic_set_eswitch_port_config(adapter); - if (err) - return err; - - adapter->need_fw_reset = 0; - - return err; -} - -static int -qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable) -{ - return -EOPNOTSUPP; -} - -static int -qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate) -{ - return -EOPNOTSUPP; -} - -static ssize_t -qlcnic_store_bridged_mode(struct device *dev, - struct device_attribute *attr, const char *buf, size_t len) -{ - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - unsigned long new; - int ret = -EINVAL; - - if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)) - goto err_out; - - if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) - goto err_out; - - if (strict_strtoul(buf, 2, &new)) - goto err_out; - - if (!adapter->nic_ops->config_bridged_mode(adapter, !!new)) - ret = len; - -err_out: - return ret; -} - -static ssize_t -qlcnic_show_bridged_mode(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - int bridged_mode = 0; - - if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG) - bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED); - - return sprintf(buf, "%d\n", bridged_mode); -} - -static struct device_attribute dev_attr_bridged_mode = { - .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)}, - .show = qlcnic_show_bridged_mode, - .store = qlcnic_store_bridged_mode, -}; - -static ssize_t -qlcnic_store_diag_mode(struct device *dev, - struct device_attribute *attr, const char *buf, size_t len) -{ - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - unsigned long new; - - if (strict_strtoul(buf, 2, &new)) - return -EINVAL; - - if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED)) - adapter->flags ^= QLCNIC_DIAG_ENABLED; - - return len; -} - -static ssize_t -qlcnic_show_diag_mode(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - - return sprintf(buf, "%d\n", - !!(adapter->flags & QLCNIC_DIAG_ENABLED)); -} - -static struct device_attribute dev_attr_diag_mode = { - .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)}, - .show = qlcnic_show_diag_mode, - .store = qlcnic_store_diag_mode, -}; - -int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val) -{ - if (!use_msi_x && !use_msi) { - netdev_info(netdev, "no msix or msi support, hence no rss\n"); - return -EINVAL; - } - - if ((val > max_hw) || (val < 2) || !is_power_of_2(val)) { - netdev_info(netdev, "rss_ring valid range [2 - %x] in " - " powers of 2\n", max_hw); - return -EINVAL; - } - return 0; - -} - -int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data) -{ - struct net_device *netdev = adapter->netdev; - int err = 0; - - if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) - return -EBUSY; - - netif_device_detach(netdev); - if (netif_running(netdev)) - __qlcnic_down(adapter, netdev); - qlcnic_detach(adapter); - qlcnic_teardown_intr(adapter); - - if (qlcnic_enable_msix(adapter, data)) { - netdev_info(netdev, "failed setting max_rss; rss disabled\n"); - qlcnic_enable_msi_legacy(adapter); - } - - if (netif_running(netdev)) { - err = qlcnic_attach(adapter); - if (err) - goto done; - err = __qlcnic_up(adapter, netdev); - if (err) - goto done; - qlcnic_restore_indev_addr(netdev, NETDEV_UP); - } - done: - netif_device_attach(netdev); - clear_bit(__QLCNIC_RESETTING, &adapter->state); - return err; -} - -static int -qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter, - loff_t offset, size_t size) -{ - size_t crb_size = 4; - - if (!(adapter->flags & QLCNIC_DIAG_ENABLED)) - return -EIO; - - if (offset < QLCNIC_PCI_CRBSPACE) { - if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, - QLCNIC_PCI_CAMQM_END)) - crb_size = 8; - else - return -EINVAL; - } - - if ((size != crb_size) || (offset & (crb_size-1))) - return -EINVAL; - - return 0; -} - -static ssize_t -qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - u32 data; - u64 qmdata; - int ret; - - ret = qlcnic_sysfs_validate_crb(adapter, offset, size); - if (ret != 0) - return ret; - - if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) { - qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata); - memcpy(buf, &qmdata, size); - } else { - data = QLCRD32(adapter, offset); - memcpy(buf, &data, size); - } - return size; -} - -static ssize_t -qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - u32 data; - u64 qmdata; - int ret; - - ret = qlcnic_sysfs_validate_crb(adapter, offset, size); - if (ret != 0) - return ret; - - if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) { - memcpy(&qmdata, buf, size); - qlcnic_pci_camqm_write_2M(adapter, offset, qmdata); - } else { - memcpy(&data, buf, size); - QLCWR32(adapter, offset, data); - } - return size; -} - -static int -qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter, - loff_t offset, size_t size) -{ - if (!(adapter->flags & QLCNIC_DIAG_ENABLED)) - return -EIO; - - if ((size != 8) || (offset & 0x7)) - return -EIO; - - return 0; -} - -static ssize_t -qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - u64 data; - int ret; - - ret = qlcnic_sysfs_validate_mem(adapter, offset, size); - if (ret != 0) - return ret; - - if (qlcnic_pci_mem_read_2M(adapter, offset, &data)) - return -EIO; - - memcpy(buf, &data, size); - - return size; -} - -static ssize_t -qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - u64 data; - int ret; - - ret = qlcnic_sysfs_validate_mem(adapter, offset, size); - if (ret != 0) - return ret; - - memcpy(&data, buf, size); - - if (qlcnic_pci_mem_write_2M(adapter, offset, data)) - return -EIO; - - return size; -} - -static struct bin_attribute bin_attr_crb = { - .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)}, - .size = 0, - .read = qlcnic_sysfs_read_crb, - .write = qlcnic_sysfs_write_crb, -}; - -static struct bin_attribute bin_attr_mem = { - .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)}, - .size = 0, - .read = qlcnic_sysfs_read_mem, - .write = qlcnic_sysfs_write_mem, -}; - -static int -validate_pm_config(struct qlcnic_adapter *adapter, - struct qlcnic_pm_func_cfg *pm_cfg, int count) -{ - - u8 src_pci_func, s_esw_id, d_esw_id; - u8 dest_pci_func; - int i; - - for (i = 0; i < count; i++) { - src_pci_func = pm_cfg[i].pci_func; - dest_pci_func = pm_cfg[i].dest_npar; - if (src_pci_func >= QLCNIC_MAX_PCI_FUNC - || dest_pci_func >= QLCNIC_MAX_PCI_FUNC) - return QL_STATUS_INVALID_PARAM; - - if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC) - return QL_STATUS_INVALID_PARAM; - - if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC) - return QL_STATUS_INVALID_PARAM; - - s_esw_id = adapter->npars[src_pci_func].phy_port; - d_esw_id = adapter->npars[dest_pci_func].phy_port; - - if (s_esw_id != d_esw_id) - return QL_STATUS_INVALID_PARAM; - - } - return 0; - -} - -static ssize_t -qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - struct qlcnic_pm_func_cfg *pm_cfg; - u32 id, action, pci_func; - int count, rem, i, ret; - - count = size / sizeof(struct qlcnic_pm_func_cfg); - rem = size % sizeof(struct qlcnic_pm_func_cfg); - if (rem) - return QL_STATUS_INVALID_PARAM; - - pm_cfg = (struct qlcnic_pm_func_cfg *) buf; - - ret = validate_pm_config(adapter, pm_cfg, count); - if (ret) - return ret; - for (i = 0; i < count; i++) { - pci_func = pm_cfg[i].pci_func; - action = !!pm_cfg[i].action; - id = adapter->npars[pci_func].phy_port; - ret = qlcnic_config_port_mirroring(adapter, id, - action, pci_func); - if (ret) - return ret; - } - - for (i = 0; i < count; i++) { - pci_func = pm_cfg[i].pci_func; - id = adapter->npars[pci_func].phy_port; - adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action; - adapter->npars[pci_func].dest_npar = id; - } - return size; -} - -static ssize_t -qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC]; - int i; - - if (size != sizeof(pm_cfg)) - return QL_STATUS_INVALID_PARAM; - - for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { - if (adapter->npars[i].type != QLCNIC_TYPE_NIC) - continue; - pm_cfg[i].action = adapter->npars[i].enable_pm; - pm_cfg[i].dest_npar = 0; - pm_cfg[i].pci_func = i; - } - memcpy(buf, &pm_cfg, size); - - return size; -} - -static int -validate_esw_config(struct qlcnic_adapter *adapter, - struct qlcnic_esw_func_cfg *esw_cfg, int count) -{ - u32 op_mode; - u8 pci_func; - int i; - - op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE); - - for (i = 0; i < count; i++) { - pci_func = esw_cfg[i].pci_func; - if (pci_func >= QLCNIC_MAX_PCI_FUNC) - return QL_STATUS_INVALID_PARAM; - - if (adapter->op_mode == QLCNIC_MGMT_FUNC) - if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC) - return QL_STATUS_INVALID_PARAM; - - switch (esw_cfg[i].op_mode) { - case QLCNIC_PORT_DEFAULTS: - if (QLC_DEV_GET_DRV(op_mode, pci_func) != - QLCNIC_NON_PRIV_FUNC) { - if (esw_cfg[i].mac_anti_spoof != 0) - return QL_STATUS_INVALID_PARAM; - if (esw_cfg[i].mac_override != 1) - return QL_STATUS_INVALID_PARAM; - if (esw_cfg[i].promisc_mode != 1) - return QL_STATUS_INVALID_PARAM; - } - break; - case QLCNIC_ADD_VLAN: - if (!IS_VALID_VLAN(esw_cfg[i].vlan_id)) - return QL_STATUS_INVALID_PARAM; - if (!esw_cfg[i].op_type) - return QL_STATUS_INVALID_PARAM; - break; - case QLCNIC_DEL_VLAN: - if (!esw_cfg[i].op_type) - return QL_STATUS_INVALID_PARAM; - break; - default: - return QL_STATUS_INVALID_PARAM; - } - } - return 0; -} - -static ssize_t -qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - struct qlcnic_esw_func_cfg *esw_cfg; - struct qlcnic_npar_info *npar; - int count, rem, i, ret; - u8 pci_func, op_mode = 0; - - count = size / sizeof(struct qlcnic_esw_func_cfg); - rem = size % sizeof(struct qlcnic_esw_func_cfg); - if (rem) - return QL_STATUS_INVALID_PARAM; - - esw_cfg = (struct qlcnic_esw_func_cfg *) buf; - ret = validate_esw_config(adapter, esw_cfg, count); - if (ret) - return ret; - - for (i = 0; i < count; i++) { - if (adapter->op_mode == QLCNIC_MGMT_FUNC) - if (qlcnic_config_switch_port(adapter, &esw_cfg[i])) - return QL_STATUS_INVALID_PARAM; - - if (adapter->ahw->pci_func != esw_cfg[i].pci_func) - continue; - - op_mode = esw_cfg[i].op_mode; - qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]); - esw_cfg[i].op_mode = op_mode; - esw_cfg[i].pci_func = adapter->ahw->pci_func; - - switch (esw_cfg[i].op_mode) { - case QLCNIC_PORT_DEFAULTS: - qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]); - break; - case QLCNIC_ADD_VLAN: - qlcnic_set_vlan_config(adapter, &esw_cfg[i]); - break; - case QLCNIC_DEL_VLAN: - esw_cfg[i].vlan_id = 0; - qlcnic_set_vlan_config(adapter, &esw_cfg[i]); - break; - } - } - - if (adapter->op_mode != QLCNIC_MGMT_FUNC) - goto out; - - for (i = 0; i < count; i++) { - pci_func = esw_cfg[i].pci_func; - npar = &adapter->npars[pci_func]; - switch (esw_cfg[i].op_mode) { - case QLCNIC_PORT_DEFAULTS: - npar->promisc_mode = esw_cfg[i].promisc_mode; - npar->mac_override = esw_cfg[i].mac_override; - npar->offload_flags = esw_cfg[i].offload_flags; - npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof; - npar->discard_tagged = esw_cfg[i].discard_tagged; - break; - case QLCNIC_ADD_VLAN: - npar->pvid = esw_cfg[i].vlan_id; - break; - case QLCNIC_DEL_VLAN: - npar->pvid = 0; - break; - } - } -out: - return size; -} - -static ssize_t -qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC]; - u8 i; - - if (size != sizeof(esw_cfg)) - return QL_STATUS_INVALID_PARAM; - - for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { - if (adapter->npars[i].type != QLCNIC_TYPE_NIC) - continue; - esw_cfg[i].pci_func = i; - if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i])) - return QL_STATUS_INVALID_PARAM; - } - memcpy(buf, &esw_cfg, size); - - return size; -} - -static int -validate_npar_config(struct qlcnic_adapter *adapter, - struct qlcnic_npar_func_cfg *np_cfg, int count) -{ - u8 pci_func, i; - - for (i = 0; i < count; i++) { - pci_func = np_cfg[i].pci_func; - if (pci_func >= QLCNIC_MAX_PCI_FUNC) - return QL_STATUS_INVALID_PARAM; - - if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC) - return QL_STATUS_INVALID_PARAM; - - if (!IS_VALID_BW(np_cfg[i].min_bw) || - !IS_VALID_BW(np_cfg[i].max_bw)) - return QL_STATUS_INVALID_PARAM; - } - return 0; -} - -static ssize_t -qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - struct qlcnic_info nic_info; - struct qlcnic_npar_func_cfg *np_cfg; - int i, count, rem, ret; - u8 pci_func; - - count = size / sizeof(struct qlcnic_npar_func_cfg); - rem = size % sizeof(struct qlcnic_npar_func_cfg); - if (rem) - return QL_STATUS_INVALID_PARAM; - - np_cfg = (struct qlcnic_npar_func_cfg *) buf; - ret = validate_npar_config(adapter, np_cfg, count); - if (ret) - return ret; - - for (i = 0; i < count ; i++) { - pci_func = np_cfg[i].pci_func; - ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func); - if (ret) - return ret; - nic_info.pci_func = pci_func; - nic_info.min_tx_bw = np_cfg[i].min_bw; - nic_info.max_tx_bw = np_cfg[i].max_bw; - ret = qlcnic_set_nic_info(adapter, &nic_info); - if (ret) - return ret; - adapter->npars[i].min_bw = nic_info.min_tx_bw; - adapter->npars[i].max_bw = nic_info.max_tx_bw; - } - - return size; - -} -static ssize_t -qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - struct qlcnic_info nic_info; - struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC]; - int i, ret; - - if (size != sizeof(np_cfg)) - return QL_STATUS_INVALID_PARAM; - - for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) { - if (adapter->npars[i].type != QLCNIC_TYPE_NIC) - continue; - ret = qlcnic_get_nic_info(adapter, &nic_info, i); - if (ret) - return ret; - - np_cfg[i].pci_func = i; - np_cfg[i].op_mode = (u8)nic_info.op_mode; - np_cfg[i].port_num = nic_info.phys_port; - np_cfg[i].fw_capab = nic_info.capabilities; - np_cfg[i].min_bw = nic_info.min_tx_bw ; - np_cfg[i].max_bw = nic_info.max_tx_bw; - np_cfg[i].max_tx_queues = nic_info.max_tx_ques; - np_cfg[i].max_rx_queues = nic_info.max_rx_ques; - } - memcpy(buf, &np_cfg, size); - return size; -} - -static ssize_t -qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - struct qlcnic_esw_statistics port_stats; - int ret; - - if (size != sizeof(struct qlcnic_esw_statistics)) - return QL_STATUS_INVALID_PARAM; - - if (offset >= QLCNIC_MAX_PCI_FUNC) - return QL_STATUS_INVALID_PARAM; - - memset(&port_stats, 0, size); - ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER, - &port_stats.rx); - if (ret) - return ret; - - ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER, - &port_stats.tx); - if (ret) - return ret; - - memcpy(buf, &port_stats, size); - return size; -} - -static ssize_t -qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - struct qlcnic_esw_statistics esw_stats; - int ret; - - if (size != sizeof(struct qlcnic_esw_statistics)) - return QL_STATUS_INVALID_PARAM; - - if (offset >= QLCNIC_NIU_MAX_XG_PORTS) - return QL_STATUS_INVALID_PARAM; - - memset(&esw_stats, 0, size); - ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER, - &esw_stats.rx); - if (ret) - return ret; - - ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER, - &esw_stats.tx); - if (ret) - return ret; - - memcpy(buf, &esw_stats, size); - return size; -} - -static ssize_t -qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - int ret; - - if (offset >= QLCNIC_NIU_MAX_XG_PORTS) - return QL_STATUS_INVALID_PARAM; - - ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset, - QLCNIC_QUERY_RX_COUNTER); - if (ret) - return ret; - - ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset, - QLCNIC_QUERY_TX_COUNTER); - if (ret) - return ret; - - return size; -} - -static ssize_t -qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t offset, size_t size) -{ - - struct device *dev = container_of(kobj, struct device, kobj); - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - int ret; - - if (offset >= QLCNIC_MAX_PCI_FUNC) - return QL_STATUS_INVALID_PARAM; - - ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset, - QLCNIC_QUERY_RX_COUNTER); - if (ret) - return ret; - - ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset, - QLCNIC_QUERY_TX_COUNTER); - if (ret) - return ret; - - return size; -} - -static ssize_t -qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t offset, size_t size) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC]; - struct qlcnic_pci_info *pci_info; - int i, ret; - - if (size != sizeof(pci_cfg)) - return QL_STATUS_INVALID_PARAM; - - pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL); - if (!pci_info) - return -ENOMEM; - - ret = qlcnic_get_pci_info(adapter, pci_info); - if (ret) { - kfree(pci_info); - return ret; - } - - for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) { - pci_cfg[i].pci_func = pci_info[i].id; - pci_cfg[i].func_type = pci_info[i].type; - pci_cfg[i].port_num = pci_info[i].default_port; - pci_cfg[i].min_bw = pci_info[i].tx_min_bw; - pci_cfg[i].max_bw = pci_info[i].tx_max_bw; - memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN); - } - memcpy(buf, &pci_cfg, size); - kfree(pci_info); - return size; -} -static struct bin_attribute bin_attr_npar_config = { - .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)}, - .size = 0, - .read = qlcnic_sysfs_read_npar_config, - .write = qlcnic_sysfs_write_npar_config, -}; - -static struct bin_attribute bin_attr_pci_config = { - .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)}, - .size = 0, - .read = qlcnic_sysfs_read_pci_config, - .write = NULL, -}; - -static struct bin_attribute bin_attr_port_stats = { - .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)}, - .size = 0, - .read = qlcnic_sysfs_get_port_stats, - .write = qlcnic_sysfs_clear_port_stats, -}; - -static struct bin_attribute bin_attr_esw_stats = { - .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)}, - .size = 0, - .read = qlcnic_sysfs_get_esw_stats, - .write = qlcnic_sysfs_clear_esw_stats, -}; - -static struct bin_attribute bin_attr_esw_config = { - .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)}, - .size = 0, - .read = qlcnic_sysfs_read_esw_config, - .write = qlcnic_sysfs_write_esw_config, -}; - -static struct bin_attribute bin_attr_pm_config = { - .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)}, - .size = 0, - .read = qlcnic_sysfs_read_pm_config, - .write = qlcnic_sysfs_write_pm_config, -}; - -static void -qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter) -{ - struct device *dev = &adapter->pdev->dev; - - if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG) - if (device_create_file(dev, &dev_attr_bridged_mode)) - dev_warn(dev, - "failed to create bridged_mode sysfs entry\n"); -} - -static void -qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter) -{ - struct device *dev = &adapter->pdev->dev; - - if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG) - device_remove_file(dev, &dev_attr_bridged_mode); -} - -static void -qlcnic_create_diag_entries(struct qlcnic_adapter *adapter) -{ - struct device *dev = &adapter->pdev->dev; - - if (device_create_bin_file(dev, &bin_attr_port_stats)) - dev_info(dev, "failed to create port stats sysfs entry"); - - if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) - return; - if (device_create_file(dev, &dev_attr_diag_mode)) - dev_info(dev, "failed to create diag_mode sysfs entry\n"); - if (device_create_bin_file(dev, &bin_attr_crb)) - dev_info(dev, "failed to create crb sysfs entry\n"); - if (device_create_bin_file(dev, &bin_attr_mem)) - dev_info(dev, "failed to create mem sysfs entry\n"); - if (device_create_bin_file(dev, &bin_attr_pci_config)) - dev_info(dev, "failed to create pci config sysfs entry"); - if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) - return; - if (device_create_bin_file(dev, &bin_attr_esw_config)) - dev_info(dev, "failed to create esw config sysfs entry"); - if (adapter->op_mode != QLCNIC_MGMT_FUNC) - return; - if (device_create_bin_file(dev, &bin_attr_npar_config)) - dev_info(dev, "failed to create npar config sysfs entry"); - if (device_create_bin_file(dev, &bin_attr_pm_config)) - dev_info(dev, "failed to create pm config sysfs entry"); - if (device_create_bin_file(dev, &bin_attr_esw_stats)) - dev_info(dev, "failed to create eswitch stats sysfs entry"); -} - -static void -qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter) -{ - struct device *dev = &adapter->pdev->dev; - - device_remove_bin_file(dev, &bin_attr_port_stats); - - if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) - return; - device_remove_file(dev, &dev_attr_diag_mode); - device_remove_bin_file(dev, &bin_attr_crb); - device_remove_bin_file(dev, &bin_attr_mem); - device_remove_bin_file(dev, &bin_attr_pci_config); - if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) - return; - device_remove_bin_file(dev, &bin_attr_esw_config); - if (adapter->op_mode != QLCNIC_MGMT_FUNC) - return; - device_remove_bin_file(dev, &bin_attr_npar_config); - device_remove_bin_file(dev, &bin_attr_pm_config); - device_remove_bin_file(dev, &bin_attr_esw_stats); -} - -#ifdef CONFIG_INET - -#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops) - -static void -qlcnic_config_indev_addr(struct qlcnic_adapter *adapter, - struct net_device *dev, unsigned long event) -{ - struct in_device *indev; - - indev = in_dev_get(dev); - if (!indev) - return; - - for_ifa(indev) { - switch (event) { - case NETDEV_UP: - qlcnic_config_ipaddr(adapter, - ifa->ifa_address, QLCNIC_IP_UP); - break; - case NETDEV_DOWN: - qlcnic_config_ipaddr(adapter, - ifa->ifa_address, QLCNIC_IP_DOWN); - break; - default: - break; - } - } endfor_ifa(indev); - - in_dev_put(indev); -} - -static void -qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - struct net_device *dev; - u16 vid; - - qlcnic_config_indev_addr(adapter, netdev, event); - - for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) { - dev = __vlan_find_dev_deep(netdev, vid); - if (!dev) - continue; - qlcnic_config_indev_addr(adapter, dev, event); - } -} - -static int qlcnic_netdev_event(struct notifier_block *this, - unsigned long event, void *ptr) -{ - struct qlcnic_adapter *adapter; - struct net_device *dev = (struct net_device *)ptr; - -recheck: - if (dev == NULL) - goto done; - - if (dev->priv_flags & IFF_802_1Q_VLAN) { - dev = vlan_dev_real_dev(dev); - goto recheck; - } - - if (!is_qlcnic_netdev(dev)) - goto done; - - adapter = netdev_priv(dev); - - if (!adapter) - goto done; - - if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) - goto done; - - qlcnic_config_indev_addr(adapter, dev, event); -done: - return NOTIFY_DONE; -} - -static int -qlcnic_inetaddr_event(struct notifier_block *this, - unsigned long event, void *ptr) -{ - struct qlcnic_adapter *adapter; - struct net_device *dev; - - struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; - - dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; - -recheck: - if (dev == NULL) - goto done; - - if (dev->priv_flags & IFF_802_1Q_VLAN) { - dev = vlan_dev_real_dev(dev); - goto recheck; - } - - if (!is_qlcnic_netdev(dev)) - goto done; - - adapter = netdev_priv(dev); - - if (!adapter) - goto done; - - if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) - goto done; - - switch (event) { - case NETDEV_UP: - qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP); - break; - case NETDEV_DOWN: - qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN); - break; - default: - break; - } - -done: - return NOTIFY_DONE; -} - -static struct notifier_block qlcnic_netdev_cb = { - .notifier_call = qlcnic_netdev_event, -}; - -static struct notifier_block qlcnic_inetaddr_cb = { - .notifier_call = qlcnic_inetaddr_event, -}; -#else -static void -qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event) -{ } -#endif -static struct pci_error_handlers qlcnic_err_handler = { - .error_detected = qlcnic_io_error_detected, - .slot_reset = qlcnic_io_slot_reset, - .resume = qlcnic_io_resume, -}; - -static struct pci_driver qlcnic_driver = { - .name = qlcnic_driver_name, - .id_table = qlcnic_pci_tbl, - .probe = qlcnic_probe, - .remove = __devexit_p(qlcnic_remove), -#ifdef CONFIG_PM - .suspend = qlcnic_suspend, - .resume = qlcnic_resume, -#endif - .shutdown = qlcnic_shutdown, - .err_handler = &qlcnic_err_handler - -}; - -static int __init qlcnic_init_module(void) -{ - int ret; - - printk(KERN_INFO "%s\n", qlcnic_driver_string); - - qlcnic_wq = create_singlethread_workqueue("qlcnic"); - if (qlcnic_wq == NULL) { - printk(KERN_ERR "qlcnic: cannot create workqueue\n"); - return -ENOMEM; - } - -#ifdef CONFIG_INET - register_netdevice_notifier(&qlcnic_netdev_cb); - register_inetaddr_notifier(&qlcnic_inetaddr_cb); -#endif - - ret = pci_register_driver(&qlcnic_driver); - if (ret) { -#ifdef CONFIG_INET - unregister_inetaddr_notifier(&qlcnic_inetaddr_cb); - unregister_netdevice_notifier(&qlcnic_netdev_cb); -#endif - destroy_workqueue(qlcnic_wq); - } - - return ret; -} - -module_init(qlcnic_init_module); - -static void __exit qlcnic_exit_module(void) -{ - - pci_unregister_driver(&qlcnic_driver); - -#ifdef CONFIG_INET - unregister_inetaddr_notifier(&qlcnic_inetaddr_cb); - unregister_netdevice_notifier(&qlcnic_netdev_cb); -#endif - destroy_workqueue(qlcnic_wq); -} - -module_exit(qlcnic_exit_module); diff --git a/drivers/net/qlge/Makefile b/drivers/net/qlge/Makefile deleted file mode 100644 index 8a19765..0000000 --- a/drivers/net/qlge/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# -# Makefile for the Qlogic 10GbE PCI Express ethernet driver -# - -obj-$(CONFIG_QLGE) += qlge.o - -qlge-objs := qlge_main.o qlge_dbg.o qlge_mpi.o qlge_ethtool.o diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h deleted file mode 100644 index 8731f79..0000000 --- a/drivers/net/qlge/qlge.h +++ /dev/null @@ -1,2334 +0,0 @@ -/* - * QLogic QLA41xx NIC HBA Driver - * Copyright (c) 2003-2006 QLogic Corporation - * - * See LICENSE.qlge for copyright and licensing details. - */ -#ifndef _QLGE_H_ -#define _QLGE_H_ - -#include -#include -#include -#include -#include - -/* - * General definitions... - */ -#define DRV_NAME "qlge" -#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " -#define DRV_VERSION "v1.00.00.29.00.00-01" - -#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ - -#define QLGE_VENDOR_ID 0x1077 -#define QLGE_DEVICE_ID_8012 0x8012 -#define QLGE_DEVICE_ID_8000 0x8000 -#define MAX_CPUS 8 -#define MAX_TX_RINGS MAX_CPUS -#define MAX_RX_RINGS ((MAX_CPUS * 2) + 1) - -#define NUM_TX_RING_ENTRIES 256 -#define NUM_RX_RING_ENTRIES 256 - -#define NUM_SMALL_BUFFERS 512 -#define NUM_LARGE_BUFFERS 512 -#define DB_PAGE_SIZE 4096 - -/* Calculate the number of (4k) pages required to - * contain a buffer queue of the given length. - */ -#define MAX_DB_PAGES_PER_BQ(x) \ - (((x * sizeof(u64)) / DB_PAGE_SIZE) + \ - (((x * sizeof(u64)) % DB_PAGE_SIZE) ? 1 : 0)) - -#define RX_RING_SHADOW_SPACE (sizeof(u64) + \ - MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \ - MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64)) -#define LARGE_BUFFER_MAX_SIZE 8192 -#define LARGE_BUFFER_MIN_SIZE 2048 - -#define MAX_CQ 128 -#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */ -#define MAX_INTER_FRAME_WAIT 10 /* 10 usec max interframe-wait for coalescing */ -#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2) -#define UDELAY_COUNT 3 -#define UDELAY_DELAY 100 - - -#define TX_DESC_PER_IOCB 8 -/* The maximum number of frags we handle is based - * on PAGE_SIZE... - */ -#if (PAGE_SHIFT == 12) || (PAGE_SHIFT == 13) /* 4k & 8k pages */ -#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) -#else /* all other page sizes */ -#define TX_DESC_PER_OAL 0 -#endif - -/* Word shifting for converting 64-bit - * address to a series of 16-bit words. - * This is used for some MPI firmware - * mailbox commands. - */ -#define LSW(x) ((u16)(x)) -#define MSW(x) ((u16)((u32)(x) >> 16)) -#define LSD(x) ((u32)((u64)(x))) -#define MSD(x) ((u32)((((u64)(x)) >> 32))) - -/* MPI test register definitions. This register - * is used for determining alternate NIC function's - * PCI->func number. - */ -enum { - MPI_TEST_FUNC_PORT_CFG = 0x1002, - MPI_TEST_FUNC_PRB_CTL = 0x100e, - MPI_TEST_FUNC_PRB_EN = 0x18a20000, - MPI_TEST_FUNC_RST_STS = 0x100a, - MPI_TEST_FUNC_RST_FRC = 0x00000003, - MPI_TEST_NIC_FUNC_MASK = 0x00000007, - MPI_TEST_NIC1_FUNCTION_ENABLE = (1 << 0), - MPI_TEST_NIC1_FUNCTION_MASK = 0x0000000e, - MPI_TEST_NIC1_FUNC_SHIFT = 1, - MPI_TEST_NIC2_FUNCTION_ENABLE = (1 << 4), - MPI_TEST_NIC2_FUNCTION_MASK = 0x000000e0, - MPI_TEST_NIC2_FUNC_SHIFT = 5, - MPI_TEST_FC1_FUNCTION_ENABLE = (1 << 8), - MPI_TEST_FC1_FUNCTION_MASK = 0x00000e00, - MPI_TEST_FC1_FUNCTION_SHIFT = 9, - MPI_TEST_FC2_FUNCTION_ENABLE = (1 << 12), - MPI_TEST_FC2_FUNCTION_MASK = 0x0000e000, - MPI_TEST_FC2_FUNCTION_SHIFT = 13, - - MPI_NIC_READ = 0x00000000, - MPI_NIC_REG_BLOCK = 0x00020000, - MPI_NIC_FUNCTION_SHIFT = 6, -}; - -/* - * Processor Address Register (PROC_ADDR) bit definitions. - */ -enum { - - /* Misc. stuff */ - MAILBOX_COUNT = 16, - MAILBOX_TIMEOUT = 5, - - PROC_ADDR_RDY = (1 << 31), - PROC_ADDR_R = (1 << 30), - PROC_ADDR_ERR = (1 << 29), - PROC_ADDR_DA = (1 << 28), - PROC_ADDR_FUNC0_MBI = 0x00001180, - PROC_ADDR_FUNC0_MBO = (PROC_ADDR_FUNC0_MBI + MAILBOX_COUNT), - PROC_ADDR_FUNC0_CTL = 0x000011a1, - PROC_ADDR_FUNC2_MBI = 0x00001280, - PROC_ADDR_FUNC2_MBO = (PROC_ADDR_FUNC2_MBI + MAILBOX_COUNT), - PROC_ADDR_FUNC2_CTL = 0x000012a1, - PROC_ADDR_MPI_RISC = 0x00000000, - PROC_ADDR_MDE = 0x00010000, - PROC_ADDR_REGBLOCK = 0x00020000, - PROC_ADDR_RISC_REG = 0x00030000, -}; - -/* - * System Register (SYS) bit definitions. - */ -enum { - SYS_EFE = (1 << 0), - SYS_FAE = (1 << 1), - SYS_MDC = (1 << 2), - SYS_DST = (1 << 3), - SYS_DWC = (1 << 4), - SYS_EVW = (1 << 5), - SYS_OMP_DLY_MASK = 0x3f000000, - /* - * There are no values defined as of edit #15. - */ - SYS_ODI = (1 << 14), -}; - -/* - * Reset/Failover Register (RST_FO) bit definitions. - */ -enum { - RST_FO_TFO = (1 << 0), - RST_FO_RR_MASK = 0x00060000, - RST_FO_RR_CQ_CAM = 0x00000000, - RST_FO_RR_DROP = 0x00000002, - RST_FO_RR_DQ = 0x00000004, - RST_FO_RR_RCV_FUNC_CQ = 0x00000006, - RST_FO_FRB = (1 << 12), - RST_FO_MOP = (1 << 13), - RST_FO_REG = (1 << 14), - RST_FO_FR = (1 << 15), -}; - -/* - * Function Specific Control Register (FSC) bit definitions. - */ -enum { - FSC_DBRST_MASK = 0x00070000, - FSC_DBRST_256 = 0x00000000, - FSC_DBRST_512 = 0x00000001, - FSC_DBRST_768 = 0x00000002, - FSC_DBRST_1024 = 0x00000003, - FSC_DBL_MASK = 0x00180000, - FSC_DBL_DBRST = 0x00000000, - FSC_DBL_MAX_PLD = 0x00000008, - FSC_DBL_MAX_BRST = 0x00000010, - FSC_DBL_128_BYTES = 0x00000018, - FSC_EC = (1 << 5), - FSC_EPC_MASK = 0x00c00000, - FSC_EPC_INBOUND = (1 << 6), - FSC_EPC_OUTBOUND = (1 << 7), - FSC_VM_PAGESIZE_MASK = 0x07000000, - FSC_VM_PAGE_2K = 0x00000100, - FSC_VM_PAGE_4K = 0x00000200, - FSC_VM_PAGE_8K = 0x00000300, - FSC_VM_PAGE_64K = 0x00000600, - FSC_SH = (1 << 11), - FSC_DSB = (1 << 12), - FSC_STE = (1 << 13), - FSC_FE = (1 << 15), -}; - -/* - * Host Command Status Register (CSR) bit definitions. - */ -enum { - CSR_ERR_STS_MASK = 0x0000003f, - /* - * There are no valued defined as of edit #15. - */ - CSR_RR = (1 << 8), - CSR_HRI = (1 << 9), - CSR_RP = (1 << 10), - CSR_CMD_PARM_SHIFT = 22, - CSR_CMD_NOP = 0x00000000, - CSR_CMD_SET_RST = 0x10000000, - CSR_CMD_CLR_RST = 0x20000000, - CSR_CMD_SET_PAUSE = 0x30000000, - CSR_CMD_CLR_PAUSE = 0x40000000, - CSR_CMD_SET_H2R_INT = 0x50000000, - CSR_CMD_CLR_H2R_INT = 0x60000000, - CSR_CMD_PAR_EN = 0x70000000, - CSR_CMD_SET_BAD_PAR = 0x80000000, - CSR_CMD_CLR_BAD_PAR = 0x90000000, - CSR_CMD_CLR_R2PCI_INT = 0xa0000000, -}; - -/* - * Configuration Register (CFG) bit definitions. - */ -enum { - CFG_LRQ = (1 << 0), - CFG_DRQ = (1 << 1), - CFG_LR = (1 << 2), - CFG_DR = (1 << 3), - CFG_LE = (1 << 5), - CFG_LCQ = (1 << 6), - CFG_DCQ = (1 << 7), - CFG_Q_SHIFT = 8, - CFG_Q_MASK = 0x7f000000, -}; - -/* - * Status Register (STS) bit definitions. - */ -enum { - STS_FE = (1 << 0), - STS_PI = (1 << 1), - STS_PL0 = (1 << 2), - STS_PL1 = (1 << 3), - STS_PI0 = (1 << 4), - STS_PI1 = (1 << 5), - STS_FUNC_ID_MASK = 0x000000c0, - STS_FUNC_ID_SHIFT = 6, - STS_F0E = (1 << 8), - STS_F1E = (1 << 9), - STS_F2E = (1 << 10), - STS_F3E = (1 << 11), - STS_NFE = (1 << 12), -}; - -/* - * Interrupt Enable Register (INTR_EN) bit definitions. - */ -enum { - INTR_EN_INTR_MASK = 0x007f0000, - INTR_EN_TYPE_MASK = 0x03000000, - INTR_EN_TYPE_ENABLE = 0x00000100, - INTR_EN_TYPE_DISABLE = 0x00000200, - INTR_EN_TYPE_READ = 0x00000300, - INTR_EN_IHD = (1 << 13), - INTR_EN_IHD_MASK = (INTR_EN_IHD << 16), - INTR_EN_EI = (1 << 14), - INTR_EN_EN = (1 << 15), -}; - -/* - * Interrupt Mask Register (INTR_MASK) bit definitions. - */ -enum { - INTR_MASK_PI = (1 << 0), - INTR_MASK_HL0 = (1 << 1), - INTR_MASK_LH0 = (1 << 2), - INTR_MASK_HL1 = (1 << 3), - INTR_MASK_LH1 = (1 << 4), - INTR_MASK_SE = (1 << 5), - INTR_MASK_LSC = (1 << 6), - INTR_MASK_MC = (1 << 7), - INTR_MASK_LINK_IRQS = INTR_MASK_LSC | INTR_MASK_SE | INTR_MASK_MC, -}; - -/* - * Register (REV_ID) bit definitions. - */ -enum { - REV_ID_MASK = 0x0000000f, - REV_ID_NICROLL_SHIFT = 0, - REV_ID_NICREV_SHIFT = 4, - REV_ID_XGROLL_SHIFT = 8, - REV_ID_XGREV_SHIFT = 12, - REV_ID_CHIPREV_SHIFT = 28, -}; - -/* - * Force ECC Error Register (FRC_ECC_ERR) bit definitions. - */ -enum { - FRC_ECC_ERR_VW = (1 << 12), - FRC_ECC_ERR_VB = (1 << 13), - FRC_ECC_ERR_NI = (1 << 14), - FRC_ECC_ERR_NO = (1 << 15), - FRC_ECC_PFE_SHIFT = 16, - FRC_ECC_ERR_DO = (1 << 18), - FRC_ECC_P14 = (1 << 19), -}; - -/* - * Error Status Register (ERR_STS) bit definitions. - */ -enum { - ERR_STS_NOF = (1 << 0), - ERR_STS_NIF = (1 << 1), - ERR_STS_DRP = (1 << 2), - ERR_STS_XGP = (1 << 3), - ERR_STS_FOU = (1 << 4), - ERR_STS_FOC = (1 << 5), - ERR_STS_FOF = (1 << 6), - ERR_STS_FIU = (1 << 7), - ERR_STS_FIC = (1 << 8), - ERR_STS_FIF = (1 << 9), - ERR_STS_MOF = (1 << 10), - ERR_STS_TA = (1 << 11), - ERR_STS_MA = (1 << 12), - ERR_STS_MPE = (1 << 13), - ERR_STS_SCE = (1 << 14), - ERR_STS_STE = (1 << 15), - ERR_STS_FOW = (1 << 16), - ERR_STS_UE = (1 << 17), - ERR_STS_MCH = (1 << 26), - ERR_STS_LOC_SHIFT = 27, -}; - -/* - * RAM Debug Address Register (RAM_DBG_ADDR) bit definitions. - */ -enum { - RAM_DBG_ADDR_FW = (1 << 30), - RAM_DBG_ADDR_FR = (1 << 31), -}; - -/* - * Semaphore Register (SEM) bit definitions. - */ -enum { - /* - * Example: - * reg = SEM_XGMAC0_MASK | (SEM_SET << SEM_XGMAC0_SHIFT) - */ - SEM_CLEAR = 0, - SEM_SET = 1, - SEM_FORCE = 3, - SEM_XGMAC0_SHIFT = 0, - SEM_XGMAC1_SHIFT = 2, - SEM_ICB_SHIFT = 4, - SEM_MAC_ADDR_SHIFT = 6, - SEM_FLASH_SHIFT = 8, - SEM_PROBE_SHIFT = 10, - SEM_RT_IDX_SHIFT = 12, - SEM_PROC_REG_SHIFT = 14, - SEM_XGMAC0_MASK = 0x00030000, - SEM_XGMAC1_MASK = 0x000c0000, - SEM_ICB_MASK = 0x00300000, - SEM_MAC_ADDR_MASK = 0x00c00000, - SEM_FLASH_MASK = 0x03000000, - SEM_PROBE_MASK = 0x0c000000, - SEM_RT_IDX_MASK = 0x30000000, - SEM_PROC_REG_MASK = 0xc0000000, -}; - -/* - * 10G MAC Address Register (XGMAC_ADDR) bit definitions. - */ -enum { - XGMAC_ADDR_RDY = (1 << 31), - XGMAC_ADDR_R = (1 << 30), - XGMAC_ADDR_XME = (1 << 29), - - /* XGMAC control registers */ - PAUSE_SRC_LO = 0x00000100, - PAUSE_SRC_HI = 0x00000104, - GLOBAL_CFG = 0x00000108, - GLOBAL_CFG_RESET = (1 << 0), - GLOBAL_CFG_JUMBO = (1 << 6), - GLOBAL_CFG_TX_STAT_EN = (1 << 10), - GLOBAL_CFG_RX_STAT_EN = (1 << 11), - TX_CFG = 0x0000010c, - TX_CFG_RESET = (1 << 0), - TX_CFG_EN = (1 << 1), - TX_CFG_PREAM = (1 << 2), - RX_CFG = 0x00000110, - RX_CFG_RESET = (1 << 0), - RX_CFG_EN = (1 << 1), - RX_CFG_PREAM = (1 << 2), - FLOW_CTL = 0x0000011c, - PAUSE_OPCODE = 0x00000120, - PAUSE_TIMER = 0x00000124, - PAUSE_FRM_DEST_LO = 0x00000128, - PAUSE_FRM_DEST_HI = 0x0000012c, - MAC_TX_PARAMS = 0x00000134, - MAC_TX_PARAMS_JUMBO = (1 << 31), - MAC_TX_PARAMS_SIZE_SHIFT = 16, - MAC_RX_PARAMS = 0x00000138, - MAC_SYS_INT = 0x00000144, - MAC_SYS_INT_MASK = 0x00000148, - MAC_MGMT_INT = 0x0000014c, - MAC_MGMT_IN_MASK = 0x00000150, - EXT_ARB_MODE = 0x000001fc, - - /* XGMAC TX statistics registers */ - TX_PKTS = 0x00000200, - TX_BYTES = 0x00000208, - TX_MCAST_PKTS = 0x00000210, - TX_BCAST_PKTS = 0x00000218, - TX_UCAST_PKTS = 0x00000220, - TX_CTL_PKTS = 0x00000228, - TX_PAUSE_PKTS = 0x00000230, - TX_64_PKT = 0x00000238, - TX_65_TO_127_PKT = 0x00000240, - TX_128_TO_255_PKT = 0x00000248, - TX_256_511_PKT = 0x00000250, - TX_512_TO_1023_PKT = 0x00000258, - TX_1024_TO_1518_PKT = 0x00000260, - TX_1519_TO_MAX_PKT = 0x00000268, - TX_UNDERSIZE_PKT = 0x00000270, - TX_OVERSIZE_PKT = 0x00000278, - - /* XGMAC statistics control registers */ - RX_HALF_FULL_DET = 0x000002a0, - TX_HALF_FULL_DET = 0x000002a4, - RX_OVERFLOW_DET = 0x000002a8, - TX_OVERFLOW_DET = 0x000002ac, - RX_HALF_FULL_MASK = 0x000002b0, - TX_HALF_FULL_MASK = 0x000002b4, - RX_OVERFLOW_MASK = 0x000002b8, - TX_OVERFLOW_MASK = 0x000002bc, - STAT_CNT_CTL = 0x000002c0, - STAT_CNT_CTL_CLEAR_TX = (1 << 0), - STAT_CNT_CTL_CLEAR_RX = (1 << 1), - AUX_RX_HALF_FULL_DET = 0x000002d0, - AUX_TX_HALF_FULL_DET = 0x000002d4, - AUX_RX_OVERFLOW_DET = 0x000002d8, - AUX_TX_OVERFLOW_DET = 0x000002dc, - AUX_RX_HALF_FULL_MASK = 0x000002f0, - AUX_TX_HALF_FULL_MASK = 0x000002f4, - AUX_RX_OVERFLOW_MASK = 0x000002f8, - AUX_TX_OVERFLOW_MASK = 0x000002fc, - - /* XGMAC RX statistics registers */ - RX_BYTES = 0x00000300, - RX_BYTES_OK = 0x00000308, - RX_PKTS = 0x00000310, - RX_PKTS_OK = 0x00000318, - RX_BCAST_PKTS = 0x00000320, - RX_MCAST_PKTS = 0x00000328, - RX_UCAST_PKTS = 0x00000330, - RX_UNDERSIZE_PKTS = 0x00000338, - RX_OVERSIZE_PKTS = 0x00000340, - RX_JABBER_PKTS = 0x00000348, - RX_UNDERSIZE_FCERR_PKTS = 0x00000350, - RX_DROP_EVENTS = 0x00000358, - RX_FCERR_PKTS = 0x00000360, - RX_ALIGN_ERR = 0x00000368, - RX_SYMBOL_ERR = 0x00000370, - RX_MAC_ERR = 0x00000378, - RX_CTL_PKTS = 0x00000380, - RX_PAUSE_PKTS = 0x00000388, - RX_64_PKTS = 0x00000390, - RX_65_TO_127_PKTS = 0x00000398, - RX_128_255_PKTS = 0x000003a0, - RX_256_511_PKTS = 0x000003a8, - RX_512_TO_1023_PKTS = 0x000003b0, - RX_1024_TO_1518_PKTS = 0x000003b8, - RX_1519_TO_MAX_PKTS = 0x000003c0, - RX_LEN_ERR_PKTS = 0x000003c8, - - /* XGMAC MDIO control registers */ - MDIO_TX_DATA = 0x00000400, - MDIO_RX_DATA = 0x00000410, - MDIO_CMD = 0x00000420, - MDIO_PHY_ADDR = 0x00000430, - MDIO_PORT = 0x00000440, - MDIO_STATUS = 0x00000450, - - XGMAC_REGISTER_END = 0x00000740, -}; - -/* - * Enhanced Transmission Schedule Registers (NIC_ETS,CNA_ETS) bit definitions. - */ -enum { - ETS_QUEUE_SHIFT = 29, - ETS_REF = (1 << 26), - ETS_RS = (1 << 27), - ETS_P = (1 << 28), - ETS_FC_COS_SHIFT = 23, -}; - -/* - * Flash Address Register (FLASH_ADDR) bit definitions. - */ -enum { - FLASH_ADDR_RDY = (1 << 31), - FLASH_ADDR_R = (1 << 30), - FLASH_ADDR_ERR = (1 << 29), -}; - -/* - * Stop CQ Processing Register (CQ_STOP) bit definitions. - */ -enum { - CQ_STOP_QUEUE_MASK = (0x007f0000), - CQ_STOP_TYPE_MASK = (0x03000000), - CQ_STOP_TYPE_START = 0x00000100, - CQ_STOP_TYPE_STOP = 0x00000200, - CQ_STOP_TYPE_READ = 0x00000300, - CQ_STOP_EN = (1 << 15), -}; - -/* - * MAC Protocol Address Index Register (MAC_ADDR_IDX) bit definitions. - */ -enum { - MAC_ADDR_IDX_SHIFT = 4, - MAC_ADDR_TYPE_SHIFT = 16, - MAC_ADDR_TYPE_COUNT = 10, - MAC_ADDR_TYPE_MASK = 0x000f0000, - MAC_ADDR_TYPE_CAM_MAC = 0x00000000, - MAC_ADDR_TYPE_MULTI_MAC = 0x00010000, - MAC_ADDR_TYPE_VLAN = 0x00020000, - MAC_ADDR_TYPE_MULTI_FLTR = 0x00030000, - MAC_ADDR_TYPE_FC_MAC = 0x00040000, - MAC_ADDR_TYPE_MGMT_MAC = 0x00050000, - MAC_ADDR_TYPE_MGMT_VLAN = 0x00060000, - MAC_ADDR_TYPE_MGMT_V4 = 0x00070000, - MAC_ADDR_TYPE_MGMT_V6 = 0x00080000, - MAC_ADDR_TYPE_MGMT_TU_DP = 0x00090000, - MAC_ADDR_ADR = (1 << 25), - MAC_ADDR_RS = (1 << 26), - MAC_ADDR_E = (1 << 27), - MAC_ADDR_MR = (1 << 30), - MAC_ADDR_MW = (1 << 31), - MAX_MULTICAST_ENTRIES = 32, - - /* Entry count and words per entry - * for each address type in the filter. - */ - MAC_ADDR_MAX_CAM_ENTRIES = 512, - MAC_ADDR_MAX_CAM_WCOUNT = 3, - MAC_ADDR_MAX_MULTICAST_ENTRIES = 32, - MAC_ADDR_MAX_MULTICAST_WCOUNT = 2, - MAC_ADDR_MAX_VLAN_ENTRIES = 4096, - MAC_ADDR_MAX_VLAN_WCOUNT = 1, - MAC_ADDR_MAX_MCAST_FLTR_ENTRIES = 4096, - MAC_ADDR_MAX_MCAST_FLTR_WCOUNT = 1, - MAC_ADDR_MAX_FC_MAC_ENTRIES = 4, - MAC_ADDR_MAX_FC_MAC_WCOUNT = 2, - MAC_ADDR_MAX_MGMT_MAC_ENTRIES = 8, - MAC_ADDR_MAX_MGMT_MAC_WCOUNT = 2, - MAC_ADDR_MAX_MGMT_VLAN_ENTRIES = 16, - MAC_ADDR_MAX_MGMT_VLAN_WCOUNT = 1, - MAC_ADDR_MAX_MGMT_V4_ENTRIES = 4, - MAC_ADDR_MAX_MGMT_V4_WCOUNT = 1, - MAC_ADDR_MAX_MGMT_V6_ENTRIES = 4, - MAC_ADDR_MAX_MGMT_V6_WCOUNT = 4, - MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES = 4, - MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT = 1, -}; - -/* - * MAC Protocol Address Index Register (SPLT_HDR) bit definitions. - */ -enum { - SPLT_HDR_EP = (1 << 31), -}; - -/* - * FCoE Receive Configuration Register (FC_RCV_CFG) bit definitions. - */ -enum { - FC_RCV_CFG_ECT = (1 << 15), - FC_RCV_CFG_DFH = (1 << 20), - FC_RCV_CFG_DVF = (1 << 21), - FC_RCV_CFG_RCE = (1 << 27), - FC_RCV_CFG_RFE = (1 << 28), - FC_RCV_CFG_TEE = (1 << 29), - FC_RCV_CFG_TCE = (1 << 30), - FC_RCV_CFG_TFE = (1 << 31), -}; - -/* - * NIC Receive Configuration Register (NIC_RCV_CFG) bit definitions. - */ -enum { - NIC_RCV_CFG_PPE = (1 << 0), - NIC_RCV_CFG_VLAN_MASK = 0x00060000, - NIC_RCV_CFG_VLAN_ALL = 0x00000000, - NIC_RCV_CFG_VLAN_MATCH_ONLY = 0x00000002, - NIC_RCV_CFG_VLAN_MATCH_AND_NON = 0x00000004, - NIC_RCV_CFG_VLAN_NONE_AND_NON = 0x00000006, - NIC_RCV_CFG_RV = (1 << 3), - NIC_RCV_CFG_DFQ_MASK = (0x7f000000), - NIC_RCV_CFG_DFQ_SHIFT = 8, - NIC_RCV_CFG_DFQ = 0, /* HARDCODE default queue to 0. */ -}; - -/* - * Mgmt Receive Configuration Register (MGMT_RCV_CFG) bit definitions. - */ -enum { - MGMT_RCV_CFG_ARP = (1 << 0), - MGMT_RCV_CFG_DHC = (1 << 1), - MGMT_RCV_CFG_DHS = (1 << 2), - MGMT_RCV_CFG_NP = (1 << 3), - MGMT_RCV_CFG_I6N = (1 << 4), - MGMT_RCV_CFG_I6R = (1 << 5), - MGMT_RCV_CFG_DH6 = (1 << 6), - MGMT_RCV_CFG_UD1 = (1 << 7), - MGMT_RCV_CFG_UD0 = (1 << 8), - MGMT_RCV_CFG_BCT = (1 << 9), - MGMT_RCV_CFG_MCT = (1 << 10), - MGMT_RCV_CFG_DM = (1 << 11), - MGMT_RCV_CFG_RM = (1 << 12), - MGMT_RCV_CFG_STL = (1 << 13), - MGMT_RCV_CFG_VLAN_MASK = 0xc0000000, - MGMT_RCV_CFG_VLAN_ALL = 0x00000000, - MGMT_RCV_CFG_VLAN_MATCH_ONLY = 0x00004000, - MGMT_RCV_CFG_VLAN_MATCH_AND_NON = 0x00008000, - MGMT_RCV_CFG_VLAN_NONE_AND_NON = 0x0000c000, -}; - -/* - * Routing Index Register (RT_IDX) bit definitions. - */ -enum { - RT_IDX_IDX_SHIFT = 8, - RT_IDX_TYPE_MASK = 0x000f0000, - RT_IDX_TYPE_SHIFT = 16, - RT_IDX_TYPE_RT = 0x00000000, - RT_IDX_TYPE_RT_INV = 0x00010000, - RT_IDX_TYPE_NICQ = 0x00020000, - RT_IDX_TYPE_NICQ_INV = 0x00030000, - RT_IDX_DST_MASK = 0x00700000, - RT_IDX_DST_RSS = 0x00000000, - RT_IDX_DST_CAM_Q = 0x00100000, - RT_IDX_DST_COS_Q = 0x00200000, - RT_IDX_DST_DFLT_Q = 0x00300000, - RT_IDX_DST_DEST_Q = 0x00400000, - RT_IDX_RS = (1 << 26), - RT_IDX_E = (1 << 27), - RT_IDX_MR = (1 << 30), - RT_IDX_MW = (1 << 31), - - /* Nic Queue format - type 2 bits */ - RT_IDX_BCAST = (1 << 0), - RT_IDX_MCAST = (1 << 1), - RT_IDX_MCAST_MATCH = (1 << 2), - RT_IDX_MCAST_REG_MATCH = (1 << 3), - RT_IDX_MCAST_HASH_MATCH = (1 << 4), - RT_IDX_FC_MACH = (1 << 5), - RT_IDX_ETH_FCOE = (1 << 6), - RT_IDX_CAM_HIT = (1 << 7), - RT_IDX_CAM_BIT0 = (1 << 8), - RT_IDX_CAM_BIT1 = (1 << 9), - RT_IDX_VLAN_TAG = (1 << 10), - RT_IDX_VLAN_MATCH = (1 << 11), - RT_IDX_VLAN_FILTER = (1 << 12), - RT_IDX_ETH_SKIP1 = (1 << 13), - RT_IDX_ETH_SKIP2 = (1 << 14), - RT_IDX_BCAST_MCAST_MATCH = (1 << 15), - RT_IDX_802_3 = (1 << 16), - RT_IDX_LLDP = (1 << 17), - RT_IDX_UNUSED018 = (1 << 18), - RT_IDX_UNUSED019 = (1 << 19), - RT_IDX_UNUSED20 = (1 << 20), - RT_IDX_UNUSED21 = (1 << 21), - RT_IDX_ERR = (1 << 22), - RT_IDX_VALID = (1 << 23), - RT_IDX_TU_CSUM_ERR = (1 << 24), - RT_IDX_IP_CSUM_ERR = (1 << 25), - RT_IDX_MAC_ERR = (1 << 26), - RT_IDX_RSS_TCP6 = (1 << 27), - RT_IDX_RSS_TCP4 = (1 << 28), - RT_IDX_RSS_IPV6 = (1 << 29), - RT_IDX_RSS_IPV4 = (1 << 30), - RT_IDX_RSS_MATCH = (1 << 31), - - /* Hierarchy for the NIC Queue Mask */ - RT_IDX_ALL_ERR_SLOT = 0, - RT_IDX_MAC_ERR_SLOT = 0, - RT_IDX_IP_CSUM_ERR_SLOT = 1, - RT_IDX_TCP_UDP_CSUM_ERR_SLOT = 2, - RT_IDX_BCAST_SLOT = 3, - RT_IDX_MCAST_MATCH_SLOT = 4, - RT_IDX_ALLMULTI_SLOT = 5, - RT_IDX_UNUSED6_SLOT = 6, - RT_IDX_UNUSED7_SLOT = 7, - RT_IDX_RSS_MATCH_SLOT = 8, - RT_IDX_RSS_IPV4_SLOT = 8, - RT_IDX_RSS_IPV6_SLOT = 9, - RT_IDX_RSS_TCP4_SLOT = 10, - RT_IDX_RSS_TCP6_SLOT = 11, - RT_IDX_CAM_HIT_SLOT = 12, - RT_IDX_UNUSED013 = 13, - RT_IDX_UNUSED014 = 14, - RT_IDX_PROMISCUOUS_SLOT = 15, - RT_IDX_MAX_RT_SLOTS = 8, - RT_IDX_MAX_NIC_SLOTS = 16, -}; - -/* - * Serdes Address Register (XG_SERDES_ADDR) bit definitions. - */ -enum { - XG_SERDES_ADDR_RDY = (1 << 31), - XG_SERDES_ADDR_R = (1 << 30), - - XG_SERDES_ADDR_STS = 0x00001E06, - XG_SERDES_ADDR_XFI1_PWR_UP = 0x00000005, - XG_SERDES_ADDR_XFI2_PWR_UP = 0x0000000a, - XG_SERDES_ADDR_XAUI_PWR_DOWN = 0x00000001, - - /* Serdes coredump definitions. */ - XG_SERDES_XAUI_AN_START = 0x00000000, - XG_SERDES_XAUI_AN_END = 0x00000034, - XG_SERDES_XAUI_HSS_PCS_START = 0x00000800, - XG_SERDES_XAUI_HSS_PCS_END = 0x0000880, - XG_SERDES_XFI_AN_START = 0x00001000, - XG_SERDES_XFI_AN_END = 0x00001034, - XG_SERDES_XFI_TRAIN_START = 0x10001050, - XG_SERDES_XFI_TRAIN_END = 0x1000107C, - XG_SERDES_XFI_HSS_PCS_START = 0x00001800, - XG_SERDES_XFI_HSS_PCS_END = 0x00001838, - XG_SERDES_XFI_HSS_TX_START = 0x00001c00, - XG_SERDES_XFI_HSS_TX_END = 0x00001c1f, - XG_SERDES_XFI_HSS_RX_START = 0x00001c40, - XG_SERDES_XFI_HSS_RX_END = 0x00001c5f, - XG_SERDES_XFI_HSS_PLL_START = 0x00001e00, - XG_SERDES_XFI_HSS_PLL_END = 0x00001e1f, -}; - -/* - * NIC Probe Mux Address Register (PRB_MX_ADDR) bit definitions. - */ -enum { - PRB_MX_ADDR_ARE = (1 << 16), - PRB_MX_ADDR_UP = (1 << 15), - PRB_MX_ADDR_SWP = (1 << 14), - - /* Module select values. */ - PRB_MX_ADDR_MAX_MODS = 21, - PRB_MX_ADDR_MOD_SEL_SHIFT = 9, - PRB_MX_ADDR_MOD_SEL_TBD = 0, - PRB_MX_ADDR_MOD_SEL_IDE1 = 1, - PRB_MX_ADDR_MOD_SEL_IDE2 = 2, - PRB_MX_ADDR_MOD_SEL_FRB = 3, - PRB_MX_ADDR_MOD_SEL_ODE1 = 4, - PRB_MX_ADDR_MOD_SEL_ODE2 = 5, - PRB_MX_ADDR_MOD_SEL_DA1 = 6, - PRB_MX_ADDR_MOD_SEL_DA2 = 7, - PRB_MX_ADDR_MOD_SEL_IMP1 = 8, - PRB_MX_ADDR_MOD_SEL_IMP2 = 9, - PRB_MX_ADDR_MOD_SEL_OMP1 = 10, - PRB_MX_ADDR_MOD_SEL_OMP2 = 11, - PRB_MX_ADDR_MOD_SEL_ORS1 = 12, - PRB_MX_ADDR_MOD_SEL_ORS2 = 13, - PRB_MX_ADDR_MOD_SEL_REG = 14, - PRB_MX_ADDR_MOD_SEL_MAC1 = 16, - PRB_MX_ADDR_MOD_SEL_MAC2 = 17, - PRB_MX_ADDR_MOD_SEL_VQM1 = 18, - PRB_MX_ADDR_MOD_SEL_VQM2 = 19, - PRB_MX_ADDR_MOD_SEL_MOP = 20, - /* Bit fields indicating which modules - * are valid for each clock domain. - */ - PRB_MX_ADDR_VALID_SYS_MOD = 0x000f7ff7, - PRB_MX_ADDR_VALID_PCI_MOD = 0x000040c1, - PRB_MX_ADDR_VALID_XGM_MOD = 0x00037309, - PRB_MX_ADDR_VALID_FC_MOD = 0x00003001, - PRB_MX_ADDR_VALID_TOTAL = 34, - - /* Clock domain values. */ - PRB_MX_ADDR_CLOCK_SHIFT = 6, - PRB_MX_ADDR_SYS_CLOCK = 0, - PRB_MX_ADDR_PCI_CLOCK = 2, - PRB_MX_ADDR_FC_CLOCK = 5, - PRB_MX_ADDR_XGM_CLOCK = 6, - - PRB_MX_ADDR_MAX_MUX = 64, -}; - -/* - * Control Register Set Map - */ -enum { - PROC_ADDR = 0, /* Use semaphore */ - PROC_DATA = 0x04, /* Use semaphore */ - SYS = 0x08, - RST_FO = 0x0c, - FSC = 0x10, - CSR = 0x14, - LED = 0x18, - ICB_RID = 0x1c, /* Use semaphore */ - ICB_L = 0x20, /* Use semaphore */ - ICB_H = 0x24, /* Use semaphore */ - CFG = 0x28, - BIOS_ADDR = 0x2c, - STS = 0x30, - INTR_EN = 0x34, - INTR_MASK = 0x38, - ISR1 = 0x3c, - ISR2 = 0x40, - ISR3 = 0x44, - ISR4 = 0x48, - REV_ID = 0x4c, - FRC_ECC_ERR = 0x50, - ERR_STS = 0x54, - RAM_DBG_ADDR = 0x58, - RAM_DBG_DATA = 0x5c, - ECC_ERR_CNT = 0x60, - SEM = 0x64, - GPIO_1 = 0x68, /* Use semaphore */ - GPIO_2 = 0x6c, /* Use semaphore */ - GPIO_3 = 0x70, /* Use semaphore */ - RSVD2 = 0x74, - XGMAC_ADDR = 0x78, /* Use semaphore */ - XGMAC_DATA = 0x7c, /* Use semaphore */ - NIC_ETS = 0x80, - CNA_ETS = 0x84, - FLASH_ADDR = 0x88, /* Use semaphore */ - FLASH_DATA = 0x8c, /* Use semaphore */ - CQ_STOP = 0x90, - PAGE_TBL_RID = 0x94, - WQ_PAGE_TBL_LO = 0x98, - WQ_PAGE_TBL_HI = 0x9c, - CQ_PAGE_TBL_LO = 0xa0, - CQ_PAGE_TBL_HI = 0xa4, - MAC_ADDR_IDX = 0xa8, /* Use semaphore */ - MAC_ADDR_DATA = 0xac, /* Use semaphore */ - COS_DFLT_CQ1 = 0xb0, - COS_DFLT_CQ2 = 0xb4, - ETYPE_SKIP1 = 0xb8, - ETYPE_SKIP2 = 0xbc, - SPLT_HDR = 0xc0, - FC_PAUSE_THRES = 0xc4, - NIC_PAUSE_THRES = 0xc8, - FC_ETHERTYPE = 0xcc, - FC_RCV_CFG = 0xd0, - NIC_RCV_CFG = 0xd4, - FC_COS_TAGS = 0xd8, - NIC_COS_TAGS = 0xdc, - MGMT_RCV_CFG = 0xe0, - RT_IDX = 0xe4, - RT_DATA = 0xe8, - RSVD7 = 0xec, - XG_SERDES_ADDR = 0xf0, - XG_SERDES_DATA = 0xf4, - PRB_MX_ADDR = 0xf8, /* Use semaphore */ - PRB_MX_DATA = 0xfc, /* Use semaphore */ -}; - -#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS -#define SMALL_BUFFER_SIZE 256 -#define SMALL_BUF_MAP_SIZE SMALL_BUFFER_SIZE -#define SPLT_SETTING FSC_DBRST_1024 -#define SPLT_LEN 0 -#define QLGE_SB_PAD 0 -#else -#define SMALL_BUFFER_SIZE 512 -#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2) -#define SPLT_SETTING FSC_SH -#define SPLT_LEN (SPLT_HDR_EP | \ - min(SMALL_BUF_MAP_SIZE, 1023)) -#define QLGE_SB_PAD 32 -#endif - -/* - * CAM output format. - */ -enum { - CAM_OUT_ROUTE_FC = 0, - CAM_OUT_ROUTE_NIC = 1, - CAM_OUT_FUNC_SHIFT = 2, - CAM_OUT_RV = (1 << 4), - CAM_OUT_SH = (1 << 15), - CAM_OUT_CQ_ID_SHIFT = 5, -}; - -/* - * Mailbox definitions - */ -enum { - /* Asynchronous Event Notifications */ - AEN_SYS_ERR = 0x00008002, - AEN_LINK_UP = 0x00008011, - AEN_LINK_DOWN = 0x00008012, - AEN_IDC_CMPLT = 0x00008100, - AEN_IDC_REQ = 0x00008101, - AEN_IDC_EXT = 0x00008102, - AEN_DCBX_CHG = 0x00008110, - AEN_AEN_LOST = 0x00008120, - AEN_AEN_SFP_IN = 0x00008130, - AEN_AEN_SFP_OUT = 0x00008131, - AEN_FW_INIT_DONE = 0x00008400, - AEN_FW_INIT_FAIL = 0x00008401, - - /* Mailbox Command Opcodes. */ - MB_CMD_NOP = 0x00000000, - MB_CMD_EX_FW = 0x00000002, - MB_CMD_MB_TEST = 0x00000006, - MB_CMD_CSUM_TEST = 0x00000007, /* Verify Checksum */ - MB_CMD_ABOUT_FW = 0x00000008, - MB_CMD_COPY_RISC_RAM = 0x0000000a, - MB_CMD_LOAD_RISC_RAM = 0x0000000b, - MB_CMD_DUMP_RISC_RAM = 0x0000000c, - MB_CMD_WRITE_RAM = 0x0000000d, - MB_CMD_INIT_RISC_RAM = 0x0000000e, - MB_CMD_READ_RAM = 0x0000000f, - MB_CMD_STOP_FW = 0x00000014, - MB_CMD_MAKE_SYS_ERR = 0x0000002a, - MB_CMD_WRITE_SFP = 0x00000030, - MB_CMD_READ_SFP = 0x00000031, - MB_CMD_INIT_FW = 0x00000060, - MB_CMD_GET_IFCB = 0x00000061, - MB_CMD_GET_FW_STATE = 0x00000069, - MB_CMD_IDC_REQ = 0x00000100, /* Inter-Driver Communication */ - MB_CMD_IDC_ACK = 0x00000101, /* Inter-Driver Communication */ - MB_CMD_SET_WOL_MODE = 0x00000110, /* Wake On Lan */ - MB_WOL_DISABLE = 0, - MB_WOL_MAGIC_PKT = (1 << 1), - MB_WOL_FLTR = (1 << 2), - MB_WOL_UCAST = (1 << 3), - MB_WOL_MCAST = (1 << 4), - MB_WOL_BCAST = (1 << 5), - MB_WOL_LINK_UP = (1 << 6), - MB_WOL_LINK_DOWN = (1 << 7), - MB_WOL_MODE_ON = (1 << 16), /* Wake on Lan Mode on */ - MB_CMD_SET_WOL_FLTR = 0x00000111, /* Wake On Lan Filter */ - MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */ - MB_CMD_SET_WOL_MAGIC = 0x00000113, /* Wake On Lan Magic Packet */ - MB_CMD_CLEAR_WOL_MAGIC = 0x00000114,/* Wake On Lan Magic Packet */ - MB_CMD_SET_WOL_IMMED = 0x00000115, - MB_CMD_PORT_RESET = 0x00000120, - MB_CMD_SET_PORT_CFG = 0x00000122, - MB_CMD_GET_PORT_CFG = 0x00000123, - MB_CMD_GET_LINK_STS = 0x00000124, - MB_CMD_SET_LED_CFG = 0x00000125, /* Set LED Configuration Register */ - QL_LED_BLINK = 0x03e803e8, - MB_CMD_GET_LED_CFG = 0x00000126, /* Get LED Configuration Register */ - MB_CMD_SET_MGMNT_TFK_CTL = 0x00000160, /* Set Mgmnt Traffic Control */ - MB_SET_MPI_TFK_STOP = (1 << 0), - MB_SET_MPI_TFK_RESUME = (1 << 1), - MB_CMD_GET_MGMNT_TFK_CTL = 0x00000161, /* Get Mgmnt Traffic Control */ - MB_GET_MPI_TFK_STOPPED = (1 << 0), - MB_GET_MPI_TFK_FIFO_EMPTY = (1 << 1), - /* Sub-commands for IDC request. - * This describes the reason for the - * IDC request. - */ - MB_CMD_IOP_NONE = 0x0000, - MB_CMD_IOP_PREP_UPDATE_MPI = 0x0001, - MB_CMD_IOP_COMP_UPDATE_MPI = 0x0002, - MB_CMD_IOP_PREP_LINK_DOWN = 0x0010, - MB_CMD_IOP_DVR_START = 0x0100, - MB_CMD_IOP_FLASH_ACC = 0x0101, - MB_CMD_IOP_RESTART_MPI = 0x0102, - MB_CMD_IOP_CORE_DUMP_MPI = 0x0103, - - /* Mailbox Command Status. */ - MB_CMD_STS_GOOD = 0x00004000, /* Success. */ - MB_CMD_STS_INTRMDT = 0x00001000, /* Intermediate Complete. */ - MB_CMD_STS_INVLD_CMD = 0x00004001, /* Invalid. */ - MB_CMD_STS_XFC_ERR = 0x00004002, /* Interface Error. */ - MB_CMD_STS_CSUM_ERR = 0x00004003, /* Csum Error. */ - MB_CMD_STS_ERR = 0x00004005, /* System Error. */ - MB_CMD_STS_PARAM_ERR = 0x00004006, /* Parameter Error. */ -}; - -struct mbox_params { - u32 mbox_in[MAILBOX_COUNT]; - u32 mbox_out[MAILBOX_COUNT]; - int in_count; - int out_count; -}; - -struct flash_params_8012 { - u8 dev_id_str[4]; - __le16 size; - __le16 csum; - __le16 ver; - __le16 sub_dev_id; - u8 mac_addr[6]; - __le16 res; -}; - -/* 8000 device's flash is a different structure - * at a different offset in flash. - */ -#define FUNC0_FLASH_OFFSET 0x140200 -#define FUNC1_FLASH_OFFSET 0x140600 - -/* Flash related data structures. */ -struct flash_params_8000 { - u8 dev_id_str[4]; /* "8000" */ - __le16 ver; - __le16 size; - __le16 csum; - __le16 reserved0; - __le16 total_size; - __le16 entry_count; - u8 data_type0; - u8 data_size0; - u8 mac_addr[6]; - u8 data_type1; - u8 data_size1; - u8 mac_addr1[6]; - u8 data_type2; - u8 data_size2; - __le16 vlan_id; - u8 data_type3; - u8 data_size3; - __le16 last; - u8 reserved1[464]; - __le16 subsys_ven_id; - __le16 subsys_dev_id; - u8 reserved2[4]; -}; - -union flash_params { - struct flash_params_8012 flash_params_8012; - struct flash_params_8000 flash_params_8000; -}; - -/* - * doorbell space for the rx ring context - */ -struct rx_doorbell_context { - u32 cnsmr_idx; /* 0x00 */ - u32 valid; /* 0x04 */ - u32 reserved[4]; /* 0x08-0x14 */ - u32 lbq_prod_idx; /* 0x18 */ - u32 sbq_prod_idx; /* 0x1c */ -}; - -/* - * doorbell space for the tx ring context - */ -struct tx_doorbell_context { - u32 prod_idx; /* 0x00 */ - u32 valid; /* 0x04 */ - u32 reserved[4]; /* 0x08-0x14 */ - u32 lbq_prod_idx; /* 0x18 */ - u32 sbq_prod_idx; /* 0x1c */ -}; - -/* DATA STRUCTURES SHARED WITH HARDWARE. */ -struct tx_buf_desc { - __le64 addr; - __le32 len; -#define TX_DESC_LEN_MASK 0x000fffff -#define TX_DESC_C 0x40000000 -#define TX_DESC_E 0x80000000 -} __packed; - -/* - * IOCB Definitions... - */ - -#define OPCODE_OB_MAC_IOCB 0x01 -#define OPCODE_OB_MAC_TSO_IOCB 0x02 -#define OPCODE_IB_MAC_IOCB 0x20 -#define OPCODE_IB_MPI_IOCB 0x21 -#define OPCODE_IB_AE_IOCB 0x3f - -struct ob_mac_iocb_req { - u8 opcode; - u8 flags1; -#define OB_MAC_IOCB_REQ_OI 0x01 -#define OB_MAC_IOCB_REQ_I 0x02 -#define OB_MAC_IOCB_REQ_D 0x08 -#define OB_MAC_IOCB_REQ_F 0x10 - u8 flags2; - u8 flags3; -#define OB_MAC_IOCB_DFP 0x02 -#define OB_MAC_IOCB_V 0x04 - __le32 reserved1[2]; - __le16 frame_len; -#define OB_MAC_IOCB_LEN_MASK 0x3ffff - __le16 reserved2; - u32 tid; - u32 txq_idx; - __le32 reserved3; - __le16 vlan_tci; - __le16 reserved4; - struct tx_buf_desc tbd[TX_DESC_PER_IOCB]; -} __packed; - -struct ob_mac_iocb_rsp { - u8 opcode; /* */ - u8 flags1; /* */ -#define OB_MAC_IOCB_RSP_OI 0x01 /* */ -#define OB_MAC_IOCB_RSP_I 0x02 /* */ -#define OB_MAC_IOCB_RSP_E 0x08 /* */ -#define OB_MAC_IOCB_RSP_S 0x10 /* too Short */ -#define OB_MAC_IOCB_RSP_L 0x20 /* too Large */ -#define OB_MAC_IOCB_RSP_P 0x40 /* Padded */ - u8 flags2; /* */ - u8 flags3; /* */ -#define OB_MAC_IOCB_RSP_B 0x80 /* */ - u32 tid; - u32 txq_idx; - __le32 reserved[13]; -} __packed; - -struct ob_mac_tso_iocb_req { - u8 opcode; - u8 flags1; -#define OB_MAC_TSO_IOCB_OI 0x01 -#define OB_MAC_TSO_IOCB_I 0x02 -#define OB_MAC_TSO_IOCB_D 0x08 -#define OB_MAC_TSO_IOCB_IP4 0x40 -#define OB_MAC_TSO_IOCB_IP6 0x80 - u8 flags2; -#define OB_MAC_TSO_IOCB_LSO 0x20 -#define OB_MAC_TSO_IOCB_UC 0x40 -#define OB_MAC_TSO_IOCB_TC 0x80 - u8 flags3; -#define OB_MAC_TSO_IOCB_IC 0x01 -#define OB_MAC_TSO_IOCB_DFP 0x02 -#define OB_MAC_TSO_IOCB_V 0x04 - __le32 reserved1[2]; - __le32 frame_len; - u32 tid; - u32 txq_idx; - __le16 total_hdrs_len; - __le16 net_trans_offset; -#define OB_MAC_TRANSPORT_HDR_SHIFT 6 - __le16 vlan_tci; - __le16 mss; - struct tx_buf_desc tbd[TX_DESC_PER_IOCB]; -} __packed; - -struct ob_mac_tso_iocb_rsp { - u8 opcode; - u8 flags1; -#define OB_MAC_TSO_IOCB_RSP_OI 0x01 -#define OB_MAC_TSO_IOCB_RSP_I 0x02 -#define OB_MAC_TSO_IOCB_RSP_E 0x08 -#define OB_MAC_TSO_IOCB_RSP_S 0x10 -#define OB_MAC_TSO_IOCB_RSP_L 0x20 -#define OB_MAC_TSO_IOCB_RSP_P 0x40 - u8 flags2; /* */ - u8 flags3; /* */ -#define OB_MAC_TSO_IOCB_RSP_B 0x8000 - u32 tid; - u32 txq_idx; - __le32 reserved2[13]; -} __packed; - -struct ib_mac_iocb_rsp { - u8 opcode; /* 0x20 */ - u8 flags1; -#define IB_MAC_IOCB_RSP_OI 0x01 /* Overide intr delay */ -#define IB_MAC_IOCB_RSP_I 0x02 /* Disble Intr Generation */ -#define IB_MAC_CSUM_ERR_MASK 0x1c /* A mask to use for csum errs */ -#define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */ -#define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */ -#define IB_MAC_IOCB_RSP_IE 0x10 /* IPv4 checksum error */ -#define IB_MAC_IOCB_RSP_M_MASK 0x60 /* Multicast info */ -#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* Not mcast frame */ -#define IB_MAC_IOCB_RSP_M_HASH 0x20 /* HASH mcast frame */ -#define IB_MAC_IOCB_RSP_M_REG 0x40 /* Registered mcast frame */ -#define IB_MAC_IOCB_RSP_M_PROM 0x60 /* Promiscuous mcast frame */ -#define IB_MAC_IOCB_RSP_B 0x80 /* Broadcast frame */ - u8 flags2; -#define IB_MAC_IOCB_RSP_P 0x01 /* Promiscuous frame */ -#define IB_MAC_IOCB_RSP_V 0x02 /* Vlan tag present */ -#define IB_MAC_IOCB_RSP_ERR_MASK 0x1c /* */ -#define IB_MAC_IOCB_RSP_ERR_CODE_ERR 0x04 -#define IB_MAC_IOCB_RSP_ERR_OVERSIZE 0x08 -#define IB_MAC_IOCB_RSP_ERR_UNDERSIZE 0x10 -#define IB_MAC_IOCB_RSP_ERR_PREAMBLE 0x14 -#define IB_MAC_IOCB_RSP_ERR_FRAME_LEN 0x18 -#define IB_MAC_IOCB_RSP_ERR_CRC 0x1c -#define IB_MAC_IOCB_RSP_U 0x20 /* UDP packet */ -#define IB_MAC_IOCB_RSP_T 0x40 /* TCP packet */ -#define IB_MAC_IOCB_RSP_FO 0x80 /* Failover port */ - u8 flags3; -#define IB_MAC_IOCB_RSP_RSS_MASK 0x07 /* RSS mask */ -#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* No RSS match */ -#define IB_MAC_IOCB_RSP_M_IPV4 0x04 /* IPv4 RSS match */ -#define IB_MAC_IOCB_RSP_M_IPV6 0x02 /* IPv6 RSS match */ -#define IB_MAC_IOCB_RSP_M_TCP_V4 0x05 /* TCP with IPv4 */ -#define IB_MAC_IOCB_RSP_M_TCP_V6 0x03 /* TCP with IPv6 */ -#define IB_MAC_IOCB_RSP_V4 0x08 /* IPV4 */ -#define IB_MAC_IOCB_RSP_V6 0x10 /* IPV6 */ -#define IB_MAC_IOCB_RSP_IH 0x20 /* Split after IP header */ -#define IB_MAC_IOCB_RSP_DS 0x40 /* data is in small buffer */ -#define IB_MAC_IOCB_RSP_DL 0x80 /* data is in large buffer */ - __le32 data_len; /* */ - __le64 data_addr; /* */ - __le32 rss; /* */ - __le16 vlan_id; /* 12 bits */ -#define IB_MAC_IOCB_RSP_C 0x1000 /* VLAN CFI bit */ -#define IB_MAC_IOCB_RSP_COS_SHIFT 12 /* class of service value */ -#define IB_MAC_IOCB_RSP_VLAN_MASK 0x0ffff - - __le16 reserved1; - __le32 reserved2[6]; - u8 reserved3[3]; - u8 flags4; -#define IB_MAC_IOCB_RSP_HV 0x20 -#define IB_MAC_IOCB_RSP_HS 0x40 -#define IB_MAC_IOCB_RSP_HL 0x80 - __le32 hdr_len; /* */ - __le64 hdr_addr; /* */ -} __packed; - -struct ib_ae_iocb_rsp { - u8 opcode; - u8 flags1; -#define IB_AE_IOCB_RSP_OI 0x01 -#define IB_AE_IOCB_RSP_I 0x02 - u8 event; -#define LINK_UP_EVENT 0x00 -#define LINK_DOWN_EVENT 0x01 -#define CAM_LOOKUP_ERR_EVENT 0x06 -#define SOFT_ECC_ERROR_EVENT 0x07 -#define MGMT_ERR_EVENT 0x08 -#define TEN_GIG_MAC_EVENT 0x09 -#define GPI0_H2L_EVENT 0x10 -#define GPI0_L2H_EVENT 0x20 -#define GPI1_H2L_EVENT 0x11 -#define GPI1_L2H_EVENT 0x21 -#define PCI_ERR_ANON_BUF_RD 0x40 - u8 q_id; - __le32 reserved[15]; -} __packed; - -/* - * These three structures are for generic - * handling of ib and ob iocbs. - */ -struct ql_net_rsp_iocb { - u8 opcode; - u8 flags0; - __le16 length; - __le32 tid; - __le32 reserved[14]; -} __packed; - -struct net_req_iocb { - u8 opcode; - u8 flags0; - __le16 flags1; - __le32 tid; - __le32 reserved1[30]; -} __packed; - -/* - * tx ring initialization control block for chip. - * It is defined as: - * "Work Queue Initialization Control Block" - */ -struct wqicb { - __le16 len; -#define Q_LEN_V (1 << 4) -#define Q_LEN_CPP_CONT 0x0000 -#define Q_LEN_CPP_16 0x0001 -#define Q_LEN_CPP_32 0x0002 -#define Q_LEN_CPP_64 0x0003 -#define Q_LEN_CPP_512 0x0006 - __le16 flags; -#define Q_PRI_SHIFT 1 -#define Q_FLAGS_LC 0x1000 -#define Q_FLAGS_LB 0x2000 -#define Q_FLAGS_LI 0x4000 -#define Q_FLAGS_LO 0x8000 - __le16 cq_id_rss; -#define Q_CQ_ID_RSS_RV 0x8000 - __le16 rid; - __le64 addr; - __le64 cnsmr_idx_addr; -} __packed; - -/* - * rx ring initialization control block for chip. - * It is defined as: - * "Completion Queue Initialization Control Block" - */ -struct cqicb { - u8 msix_vect; - u8 reserved1; - u8 reserved2; - u8 flags; -#define FLAGS_LV 0x08 -#define FLAGS_LS 0x10 -#define FLAGS_LL 0x20 -#define FLAGS_LI 0x40 -#define FLAGS_LC 0x80 - __le16 len; -#define LEN_V (1 << 4) -#define LEN_CPP_CONT 0x0000 -#define LEN_CPP_32 0x0001 -#define LEN_CPP_64 0x0002 -#define LEN_CPP_128 0x0003 - __le16 rid; - __le64 addr; - __le64 prod_idx_addr; - __le16 pkt_delay; - __le16 irq_delay; - __le64 lbq_addr; - __le16 lbq_buf_size; - __le16 lbq_len; /* entry count */ - __le64 sbq_addr; - __le16 sbq_buf_size; - __le16 sbq_len; /* entry count */ -} __packed; - -struct ricb { - u8 base_cq; -#define RSS_L4K 0x80 - u8 flags; -#define RSS_L6K 0x01 -#define RSS_LI 0x02 -#define RSS_LB 0x04 -#define RSS_LM 0x08 -#define RSS_RI4 0x10 -#define RSS_RT4 0x20 -#define RSS_RI6 0x40 -#define RSS_RT6 0x80 - __le16 mask; - u8 hash_cq_id[1024]; - __le32 ipv6_hash_key[10]; - __le32 ipv4_hash_key[4]; -} __packed; - -/* SOFTWARE/DRIVER DATA STRUCTURES. */ - -struct oal { - struct tx_buf_desc oal[TX_DESC_PER_OAL]; -}; - -struct map_list { - DEFINE_DMA_UNMAP_ADDR(mapaddr); - DEFINE_DMA_UNMAP_LEN(maplen); -}; - -struct tx_ring_desc { - struct sk_buff *skb; - struct ob_mac_iocb_req *queue_entry; - u32 index; - struct oal oal; - struct map_list map[MAX_SKB_FRAGS + 1]; - int map_cnt; - struct tx_ring_desc *next; -}; - -struct page_chunk { - struct page *page; /* master page */ - char *va; /* virt addr for this chunk */ - u64 map; /* mapping for master */ - unsigned int offset; /* offset for this chunk */ - unsigned int last_flag; /* flag set for last chunk in page */ -}; - -struct bq_desc { - union { - struct page_chunk pg_chunk; - struct sk_buff *skb; - } p; - __le64 *addr; - u32 index; - DEFINE_DMA_UNMAP_ADDR(mapaddr); - DEFINE_DMA_UNMAP_LEN(maplen); -}; - -#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count)) - -struct tx_ring { - /* - * queue info. - */ - struct wqicb wqicb; /* structure used to inform chip of new queue */ - void *wq_base; /* pci_alloc:virtual addr for tx */ - dma_addr_t wq_base_dma; /* pci_alloc:dma addr for tx */ - __le32 *cnsmr_idx_sh_reg; /* shadow copy of consumer idx */ - dma_addr_t cnsmr_idx_sh_reg_dma; /* dma-shadow copy of consumer */ - u32 wq_size; /* size in bytes of queue area */ - u32 wq_len; /* number of entries in queue */ - void __iomem *prod_idx_db_reg; /* doorbell area index reg at offset 0x00 */ - void __iomem *valid_db_reg; /* doorbell area valid reg at offset 0x04 */ - u16 prod_idx; /* current value for prod idx */ - u16 cq_id; /* completion (rx) queue for tx completions */ - u8 wq_id; /* queue id for this entry */ - u8 reserved1[3]; - struct tx_ring_desc *q; /* descriptor list for the queue */ - spinlock_t lock; - atomic_t tx_count; /* counts down for every outstanding IO */ - atomic_t queue_stopped; /* Turns queue off when full. */ - struct delayed_work tx_work; - struct ql_adapter *qdev; - u64 tx_packets; - u64 tx_bytes; - u64 tx_errors; -}; - -/* - * Type of inbound queue. - */ -enum { - DEFAULT_Q = 2, /* Handles slow queue and chip/MPI events. */ - TX_Q = 3, /* Handles outbound completions. */ - RX_Q = 4, /* Handles inbound completions. */ -}; - -struct rx_ring { - struct cqicb cqicb; /* The chip's completion queue init control block. */ - - /* Completion queue elements. */ - void *cq_base; - dma_addr_t cq_base_dma; - u32 cq_size; - u32 cq_len; - u16 cq_id; - __le32 *prod_idx_sh_reg; /* Shadowed producer register. */ - dma_addr_t prod_idx_sh_reg_dma; - void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */ - u32 cnsmr_idx; /* current sw idx */ - struct ql_net_rsp_iocb *curr_entry; /* next entry on queue */ - void __iomem *valid_db_reg; /* PCI doorbell mem area + 0x04 */ - - /* Large buffer queue elements. */ - u32 lbq_len; /* entry count */ - u32 lbq_size; /* size in bytes of queue */ - u32 lbq_buf_size; - void *lbq_base; - dma_addr_t lbq_base_dma; - void *lbq_base_indirect; - dma_addr_t lbq_base_indirect_dma; - struct page_chunk pg_chunk; /* current page for chunks */ - struct bq_desc *lbq; /* array of control blocks */ - void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */ - u32 lbq_prod_idx; /* current sw prod idx */ - u32 lbq_curr_idx; /* next entry we expect */ - u32 lbq_clean_idx; /* beginning of new descs */ - u32 lbq_free_cnt; /* free buffer desc cnt */ - - /* Small buffer queue elements. */ - u32 sbq_len; /* entry count */ - u32 sbq_size; /* size in bytes of queue */ - u32 sbq_buf_size; - void *sbq_base; - dma_addr_t sbq_base_dma; - void *sbq_base_indirect; - dma_addr_t sbq_base_indirect_dma; - struct bq_desc *sbq; /* array of control blocks */ - void __iomem *sbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x1c */ - u32 sbq_prod_idx; /* current sw prod idx */ - u32 sbq_curr_idx; /* next entry we expect */ - u32 sbq_clean_idx; /* beginning of new descs */ - u32 sbq_free_cnt; /* free buffer desc cnt */ - - /* Misc. handler elements. */ - u32 type; /* Type of queue, tx, rx. */ - u32 irq; /* Which vector this ring is assigned. */ - u32 cpu; /* Which CPU this should run on. */ - char name[IFNAMSIZ + 5]; - struct napi_struct napi; - u8 reserved; - struct ql_adapter *qdev; - u64 rx_packets; - u64 rx_multicast; - u64 rx_bytes; - u64 rx_dropped; - u64 rx_errors; -}; - -/* - * RSS Initialization Control Block - */ -struct hash_id { - u8 value[4]; -}; - -struct nic_stats { - /* - * These stats come from offset 200h to 278h - * in the XGMAC register. - */ - u64 tx_pkts; - u64 tx_bytes; - u64 tx_mcast_pkts; - u64 tx_bcast_pkts; - u64 tx_ucast_pkts; - u64 tx_ctl_pkts; - u64 tx_pause_pkts; - u64 tx_64_pkt; - u64 tx_65_to_127_pkt; - u64 tx_128_to_255_pkt; - u64 tx_256_511_pkt; - u64 tx_512_to_1023_pkt; - u64 tx_1024_to_1518_pkt; - u64 tx_1519_to_max_pkt; - u64 tx_undersize_pkt; - u64 tx_oversize_pkt; - - /* - * These stats come from offset 300h to 3C8h - * in the XGMAC register. - */ - u64 rx_bytes; - u64 rx_bytes_ok; - u64 rx_pkts; - u64 rx_pkts_ok; - u64 rx_bcast_pkts; - u64 rx_mcast_pkts; - u64 rx_ucast_pkts; - u64 rx_undersize_pkts; - u64 rx_oversize_pkts; - u64 rx_jabber_pkts; - u64 rx_undersize_fcerr_pkts; - u64 rx_drop_events; - u64 rx_fcerr_pkts; - u64 rx_align_err; - u64 rx_symbol_err; - u64 rx_mac_err; - u64 rx_ctl_pkts; - u64 rx_pause_pkts; - u64 rx_64_pkts; - u64 rx_65_to_127_pkts; - u64 rx_128_255_pkts; - u64 rx_256_511_pkts; - u64 rx_512_to_1023_pkts; - u64 rx_1024_to_1518_pkts; - u64 rx_1519_to_max_pkts; - u64 rx_len_err_pkts; - /* - * These stats come from offset 500h to 5C8h - * in the XGMAC register. - */ - u64 tx_cbfc_pause_frames0; - u64 tx_cbfc_pause_frames1; - u64 tx_cbfc_pause_frames2; - u64 tx_cbfc_pause_frames3; - u64 tx_cbfc_pause_frames4; - u64 tx_cbfc_pause_frames5; - u64 tx_cbfc_pause_frames6; - u64 tx_cbfc_pause_frames7; - u64 rx_cbfc_pause_frames0; - u64 rx_cbfc_pause_frames1; - u64 rx_cbfc_pause_frames2; - u64 rx_cbfc_pause_frames3; - u64 rx_cbfc_pause_frames4; - u64 rx_cbfc_pause_frames5; - u64 rx_cbfc_pause_frames6; - u64 rx_cbfc_pause_frames7; - u64 rx_nic_fifo_drop; -}; - -/* Firmware coredump internal register address/length pairs. */ -enum { - MPI_CORE_REGS_ADDR = 0x00030000, - MPI_CORE_REGS_CNT = 127, - MPI_CORE_SH_REGS_CNT = 16, - TEST_REGS_ADDR = 0x00001000, - TEST_REGS_CNT = 23, - RMII_REGS_ADDR = 0x00001040, - RMII_REGS_CNT = 64, - FCMAC1_REGS_ADDR = 0x00001080, - FCMAC2_REGS_ADDR = 0x000010c0, - FCMAC_REGS_CNT = 64, - FC1_MBX_REGS_ADDR = 0x00001100, - FC2_MBX_REGS_ADDR = 0x00001240, - FC_MBX_REGS_CNT = 64, - IDE_REGS_ADDR = 0x00001140, - IDE_REGS_CNT = 64, - NIC1_MBX_REGS_ADDR = 0x00001180, - NIC2_MBX_REGS_ADDR = 0x00001280, - NIC_MBX_REGS_CNT = 64, - SMBUS_REGS_ADDR = 0x00001200, - SMBUS_REGS_CNT = 64, - I2C_REGS_ADDR = 0x00001fc0, - I2C_REGS_CNT = 64, - MEMC_REGS_ADDR = 0x00003000, - MEMC_REGS_CNT = 256, - PBUS_REGS_ADDR = 0x00007c00, - PBUS_REGS_CNT = 256, - MDE_REGS_ADDR = 0x00010000, - MDE_REGS_CNT = 6, - CODE_RAM_ADDR = 0x00020000, - CODE_RAM_CNT = 0x2000, - MEMC_RAM_ADDR = 0x00100000, - MEMC_RAM_CNT = 0x2000, -}; - -#define MPI_COREDUMP_COOKIE 0x5555aaaa -struct mpi_coredump_global_header { - u32 cookie; - u8 idString[16]; - u32 timeLo; - u32 timeHi; - u32 imageSize; - u32 headerSize; - u8 info[220]; -}; - -struct mpi_coredump_segment_header { - u32 cookie; - u32 segNum; - u32 segSize; - u32 extra; - u8 description[16]; -}; - -/* Firmware coredump header segment numbers. */ -enum { - CORE_SEG_NUM = 1, - TEST_LOGIC_SEG_NUM = 2, - RMII_SEG_NUM = 3, - FCMAC1_SEG_NUM = 4, - FCMAC2_SEG_NUM = 5, - FC1_MBOX_SEG_NUM = 6, - IDE_SEG_NUM = 7, - NIC1_MBOX_SEG_NUM = 8, - SMBUS_SEG_NUM = 9, - FC2_MBOX_SEG_NUM = 10, - NIC2_MBOX_SEG_NUM = 11, - I2C_SEG_NUM = 12, - MEMC_SEG_NUM = 13, - PBUS_SEG_NUM = 14, - MDE_SEG_NUM = 15, - NIC1_CONTROL_SEG_NUM = 16, - NIC2_CONTROL_SEG_NUM = 17, - NIC1_XGMAC_SEG_NUM = 18, - NIC2_XGMAC_SEG_NUM = 19, - WCS_RAM_SEG_NUM = 20, - MEMC_RAM_SEG_NUM = 21, - XAUI_AN_SEG_NUM = 22, - XAUI_HSS_PCS_SEG_NUM = 23, - XFI_AN_SEG_NUM = 24, - XFI_TRAIN_SEG_NUM = 25, - XFI_HSS_PCS_SEG_NUM = 26, - XFI_HSS_TX_SEG_NUM = 27, - XFI_HSS_RX_SEG_NUM = 28, - XFI_HSS_PLL_SEG_NUM = 29, - MISC_NIC_INFO_SEG_NUM = 30, - INTR_STATES_SEG_NUM = 31, - CAM_ENTRIES_SEG_NUM = 32, - ROUTING_WORDS_SEG_NUM = 33, - ETS_SEG_NUM = 34, - PROBE_DUMP_SEG_NUM = 35, - ROUTING_INDEX_SEG_NUM = 36, - MAC_PROTOCOL_SEG_NUM = 37, - XAUI2_AN_SEG_NUM = 38, - XAUI2_HSS_PCS_SEG_NUM = 39, - XFI2_AN_SEG_NUM = 40, - XFI2_TRAIN_SEG_NUM = 41, - XFI2_HSS_PCS_SEG_NUM = 42, - XFI2_HSS_TX_SEG_NUM = 43, - XFI2_HSS_RX_SEG_NUM = 44, - XFI2_HSS_PLL_SEG_NUM = 45, - SEM_REGS_SEG_NUM = 50 - -}; - -/* There are 64 generic NIC registers. */ -#define NIC_REGS_DUMP_WORD_COUNT 64 -/* XGMAC word count. */ -#define XGMAC_DUMP_WORD_COUNT (XGMAC_REGISTER_END / 4) -/* Word counts for the SERDES blocks. */ -#define XG_SERDES_XAUI_AN_COUNT 14 -#define XG_SERDES_XAUI_HSS_PCS_COUNT 33 -#define XG_SERDES_XFI_AN_COUNT 14 -#define XG_SERDES_XFI_TRAIN_COUNT 12 -#define XG_SERDES_XFI_HSS_PCS_COUNT 15 -#define XG_SERDES_XFI_HSS_TX_COUNT 32 -#define XG_SERDES_XFI_HSS_RX_COUNT 32 -#define XG_SERDES_XFI_HSS_PLL_COUNT 32 - -/* There are 2 CNA ETS and 8 NIC ETS registers. */ -#define ETS_REGS_DUMP_WORD_COUNT 10 - -/* Each probe mux entry stores the probe type plus 64 entries - * that are each each 64-bits in length. There are a total of - * 34 (PRB_MX_ADDR_VALID_TOTAL) valid probes. - */ -#define PRB_MX_ADDR_PRB_WORD_COUNT (1 + (PRB_MX_ADDR_MAX_MUX * 2)) -#define PRB_MX_DUMP_TOT_COUNT (PRB_MX_ADDR_PRB_WORD_COUNT * \ - PRB_MX_ADDR_VALID_TOTAL) -/* Each routing entry consists of 4 32-bit words. - * They are route type, index, index word, and result. - * There are 2 route blocks with 8 entries each and - * 2 NIC blocks with 16 entries each. - * The totol entries is 48 with 4 words each. - */ -#define RT_IDX_DUMP_ENTRIES 48 -#define RT_IDX_DUMP_WORDS_PER_ENTRY 4 -#define RT_IDX_DUMP_TOT_WORDS (RT_IDX_DUMP_ENTRIES * \ - RT_IDX_DUMP_WORDS_PER_ENTRY) -/* There are 10 address blocks in filter, each with - * different entry counts and different word-count-per-entry. - */ -#define MAC_ADDR_DUMP_ENTRIES \ - ((MAC_ADDR_MAX_CAM_ENTRIES * MAC_ADDR_MAX_CAM_WCOUNT) + \ - (MAC_ADDR_MAX_MULTICAST_ENTRIES * MAC_ADDR_MAX_MULTICAST_WCOUNT) + \ - (MAC_ADDR_MAX_VLAN_ENTRIES * MAC_ADDR_MAX_VLAN_WCOUNT) + \ - (MAC_ADDR_MAX_MCAST_FLTR_ENTRIES * MAC_ADDR_MAX_MCAST_FLTR_WCOUNT) + \ - (MAC_ADDR_MAX_FC_MAC_ENTRIES * MAC_ADDR_MAX_FC_MAC_WCOUNT) + \ - (MAC_ADDR_MAX_MGMT_MAC_ENTRIES * MAC_ADDR_MAX_MGMT_MAC_WCOUNT) + \ - (MAC_ADDR_MAX_MGMT_VLAN_ENTRIES * MAC_ADDR_MAX_MGMT_VLAN_WCOUNT) + \ - (MAC_ADDR_MAX_MGMT_V4_ENTRIES * MAC_ADDR_MAX_MGMT_V4_WCOUNT) + \ - (MAC_ADDR_MAX_MGMT_V6_ENTRIES * MAC_ADDR_MAX_MGMT_V6_WCOUNT) + \ - (MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES * MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT)) -#define MAC_ADDR_DUMP_WORDS_PER_ENTRY 2 -#define MAC_ADDR_DUMP_TOT_WORDS (MAC_ADDR_DUMP_ENTRIES * \ - MAC_ADDR_DUMP_WORDS_PER_ENTRY) -/* Maximum of 4 functions whose semaphore registeres are - * in the coredump. - */ -#define MAX_SEMAPHORE_FUNCTIONS 4 -/* Defines for access the MPI shadow registers. */ -#define RISC_124 0x0003007c -#define RISC_127 0x0003007f -#define SHADOW_OFFSET 0xb0000000 -#define SHADOW_REG_SHIFT 20 - -struct ql_nic_misc { - u32 rx_ring_count; - u32 tx_ring_count; - u32 intr_count; - u32 function; -}; - -struct ql_reg_dump { - - /* segment 0 */ - struct mpi_coredump_global_header mpi_global_header; - - /* segment 16 */ - struct mpi_coredump_segment_header nic_regs_seg_hdr; - u32 nic_regs[64]; - - /* segment 30 */ - struct mpi_coredump_segment_header misc_nic_seg_hdr; - struct ql_nic_misc misc_nic_info; - - /* segment 31 */ - /* one interrupt state for each CQ */ - struct mpi_coredump_segment_header intr_states_seg_hdr; - u32 intr_states[MAX_CPUS]; - - /* segment 32 */ - /* 3 cam words each for 16 unicast, - * 2 cam words for each of 32 multicast. - */ - struct mpi_coredump_segment_header cam_entries_seg_hdr; - u32 cam_entries[(16 * 3) + (32 * 3)]; - - /* segment 33 */ - struct mpi_coredump_segment_header nic_routing_words_seg_hdr; - u32 nic_routing_words[16]; - - /* segment 34 */ - struct mpi_coredump_segment_header ets_seg_hdr; - u32 ets[8+2]; -}; - -struct ql_mpi_coredump { - /* segment 0 */ - struct mpi_coredump_global_header mpi_global_header; - - /* segment 1 */ - struct mpi_coredump_segment_header core_regs_seg_hdr; - u32 mpi_core_regs[MPI_CORE_REGS_CNT]; - u32 mpi_core_sh_regs[MPI_CORE_SH_REGS_CNT]; - - /* segment 2 */ - struct mpi_coredump_segment_header test_logic_regs_seg_hdr; - u32 test_logic_regs[TEST_REGS_CNT]; - - /* segment 3 */ - struct mpi_coredump_segment_header rmii_regs_seg_hdr; - u32 rmii_regs[RMII_REGS_CNT]; - - /* segment 4 */ - struct mpi_coredump_segment_header fcmac1_regs_seg_hdr; - u32 fcmac1_regs[FCMAC_REGS_CNT]; - - /* segment 5 */ - struct mpi_coredump_segment_header fcmac2_regs_seg_hdr; - u32 fcmac2_regs[FCMAC_REGS_CNT]; - - /* segment 6 */ - struct mpi_coredump_segment_header fc1_mbx_regs_seg_hdr; - u32 fc1_mbx_regs[FC_MBX_REGS_CNT]; - - /* segment 7 */ - struct mpi_coredump_segment_header ide_regs_seg_hdr; - u32 ide_regs[IDE_REGS_CNT]; - - /* segment 8 */ - struct mpi_coredump_segment_header nic1_mbx_regs_seg_hdr; - u32 nic1_mbx_regs[NIC_MBX_REGS_CNT]; - - /* segment 9 */ - struct mpi_coredump_segment_header smbus_regs_seg_hdr; - u32 smbus_regs[SMBUS_REGS_CNT]; - - /* segment 10 */ - struct mpi_coredump_segment_header fc2_mbx_regs_seg_hdr; - u32 fc2_mbx_regs[FC_MBX_REGS_CNT]; - - /* segment 11 */ - struct mpi_coredump_segment_header nic2_mbx_regs_seg_hdr; - u32 nic2_mbx_regs[NIC_MBX_REGS_CNT]; - - /* segment 12 */ - struct mpi_coredump_segment_header i2c_regs_seg_hdr; - u32 i2c_regs[I2C_REGS_CNT]; - /* segment 13 */ - struct mpi_coredump_segment_header memc_regs_seg_hdr; - u32 memc_regs[MEMC_REGS_CNT]; - - /* segment 14 */ - struct mpi_coredump_segment_header pbus_regs_seg_hdr; - u32 pbus_regs[PBUS_REGS_CNT]; - - /* segment 15 */ - struct mpi_coredump_segment_header mde_regs_seg_hdr; - u32 mde_regs[MDE_REGS_CNT]; - - /* segment 16 */ - struct mpi_coredump_segment_header nic_regs_seg_hdr; - u32 nic_regs[NIC_REGS_DUMP_WORD_COUNT]; - - /* segment 17 */ - struct mpi_coredump_segment_header nic2_regs_seg_hdr; - u32 nic2_regs[NIC_REGS_DUMP_WORD_COUNT]; - - /* segment 18 */ - struct mpi_coredump_segment_header xgmac1_seg_hdr; - u32 xgmac1[XGMAC_DUMP_WORD_COUNT]; - - /* segment 19 */ - struct mpi_coredump_segment_header xgmac2_seg_hdr; - u32 xgmac2[XGMAC_DUMP_WORD_COUNT]; - - /* segment 20 */ - struct mpi_coredump_segment_header code_ram_seg_hdr; - u32 code_ram[CODE_RAM_CNT]; - - /* segment 21 */ - struct mpi_coredump_segment_header memc_ram_seg_hdr; - u32 memc_ram[MEMC_RAM_CNT]; - - /* segment 22 */ - struct mpi_coredump_segment_header xaui_an_hdr; - u32 serdes_xaui_an[XG_SERDES_XAUI_AN_COUNT]; - - /* segment 23 */ - struct mpi_coredump_segment_header xaui_hss_pcs_hdr; - u32 serdes_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT]; - - /* segment 24 */ - struct mpi_coredump_segment_header xfi_an_hdr; - u32 serdes_xfi_an[XG_SERDES_XFI_AN_COUNT]; - - /* segment 25 */ - struct mpi_coredump_segment_header xfi_train_hdr; - u32 serdes_xfi_train[XG_SERDES_XFI_TRAIN_COUNT]; - - /* segment 26 */ - struct mpi_coredump_segment_header xfi_hss_pcs_hdr; - u32 serdes_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT]; - - /* segment 27 */ - struct mpi_coredump_segment_header xfi_hss_tx_hdr; - u32 serdes_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT]; - - /* segment 28 */ - struct mpi_coredump_segment_header xfi_hss_rx_hdr; - u32 serdes_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT]; - - /* segment 29 */ - struct mpi_coredump_segment_header xfi_hss_pll_hdr; - u32 serdes_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT]; - - /* segment 30 */ - struct mpi_coredump_segment_header misc_nic_seg_hdr; - struct ql_nic_misc misc_nic_info; - - /* segment 31 */ - /* one interrupt state for each CQ */ - struct mpi_coredump_segment_header intr_states_seg_hdr; - u32 intr_states[MAX_RX_RINGS]; - - /* segment 32 */ - /* 3 cam words each for 16 unicast, - * 2 cam words for each of 32 multicast. - */ - struct mpi_coredump_segment_header cam_entries_seg_hdr; - u32 cam_entries[(16 * 3) + (32 * 3)]; - - /* segment 33 */ - struct mpi_coredump_segment_header nic_routing_words_seg_hdr; - u32 nic_routing_words[16]; - /* segment 34 */ - struct mpi_coredump_segment_header ets_seg_hdr; - u32 ets[ETS_REGS_DUMP_WORD_COUNT]; - - /* segment 35 */ - struct mpi_coredump_segment_header probe_dump_seg_hdr; - u32 probe_dump[PRB_MX_DUMP_TOT_COUNT]; - - /* segment 36 */ - struct mpi_coredump_segment_header routing_reg_seg_hdr; - u32 routing_regs[RT_IDX_DUMP_TOT_WORDS]; - - /* segment 37 */ - struct mpi_coredump_segment_header mac_prot_reg_seg_hdr; - u32 mac_prot_regs[MAC_ADDR_DUMP_TOT_WORDS]; - - /* segment 38 */ - struct mpi_coredump_segment_header xaui2_an_hdr; - u32 serdes2_xaui_an[XG_SERDES_XAUI_AN_COUNT]; - - /* segment 39 */ - struct mpi_coredump_segment_header xaui2_hss_pcs_hdr; - u32 serdes2_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT]; - - /* segment 40 */ - struct mpi_coredump_segment_header xfi2_an_hdr; - u32 serdes2_xfi_an[XG_SERDES_XFI_AN_COUNT]; - - /* segment 41 */ - struct mpi_coredump_segment_header xfi2_train_hdr; - u32 serdes2_xfi_train[XG_SERDES_XFI_TRAIN_COUNT]; - - /* segment 42 */ - struct mpi_coredump_segment_header xfi2_hss_pcs_hdr; - u32 serdes2_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT]; - - /* segment 43 */ - struct mpi_coredump_segment_header xfi2_hss_tx_hdr; - u32 serdes2_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT]; - - /* segment 44 */ - struct mpi_coredump_segment_header xfi2_hss_rx_hdr; - u32 serdes2_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT]; - - /* segment 45 */ - struct mpi_coredump_segment_header xfi2_hss_pll_hdr; - u32 serdes2_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT]; - - /* segment 50 */ - /* semaphore register for all 5 functions */ - struct mpi_coredump_segment_header sem_regs_seg_hdr; - u32 sem_regs[MAX_SEMAPHORE_FUNCTIONS]; -}; - -/* - * intr_context structure is used during initialization - * to hook the interrupts. It is also used in a single - * irq environment as a context to the ISR. - */ -struct intr_context { - struct ql_adapter *qdev; - u32 intr; - u32 irq_mask; /* Mask of which rings the vector services. */ - u32 hooked; - u32 intr_en_mask; /* value/mask used to enable this intr */ - u32 intr_dis_mask; /* value/mask used to disable this intr */ - u32 intr_read_mask; /* value/mask used to read this intr */ - char name[IFNAMSIZ * 2]; - atomic_t irq_cnt; /* irq_cnt is used in single vector - * environment. It's incremented for each - * irq handler that is scheduled. When each - * handler finishes it decrements irq_cnt and - * enables interrupts if it's zero. */ - irq_handler_t handler; -}; - -/* adapter flags definitions. */ -enum { - QL_ADAPTER_UP = 0, /* Adapter has been brought up. */ - QL_LEGACY_ENABLED = 1, - QL_MSI_ENABLED = 2, - QL_MSIX_ENABLED = 3, - QL_DMA64 = 4, - QL_PROMISCUOUS = 5, - QL_ALLMULTI = 6, - QL_PORT_CFG = 7, - QL_CAM_RT_SET = 8, - QL_SELFTEST = 9, - QL_LB_LINK_UP = 10, - QL_FRC_COREDUMP = 11, - QL_EEH_FATAL = 12, - QL_ASIC_RECOVERY = 14, /* We are in ascic recovery. */ -}; - -/* link_status bit definitions */ -enum { - STS_LOOPBACK_MASK = 0x00000700, - STS_LOOPBACK_PCS = 0x00000100, - STS_LOOPBACK_HSS = 0x00000200, - STS_LOOPBACK_EXT = 0x00000300, - STS_PAUSE_MASK = 0x000000c0, - STS_PAUSE_STD = 0x00000040, - STS_PAUSE_PRI = 0x00000080, - STS_SPEED_MASK = 0x00000038, - STS_SPEED_100Mb = 0x00000000, - STS_SPEED_1Gb = 0x00000008, - STS_SPEED_10Gb = 0x00000010, - STS_LINK_TYPE_MASK = 0x00000007, - STS_LINK_TYPE_XFI = 0x00000001, - STS_LINK_TYPE_XAUI = 0x00000002, - STS_LINK_TYPE_XFI_BP = 0x00000003, - STS_LINK_TYPE_XAUI_BP = 0x00000004, - STS_LINK_TYPE_10GBASET = 0x00000005, -}; - -/* link_config bit definitions */ -enum { - CFG_JUMBO_FRAME_SIZE = 0x00010000, - CFG_PAUSE_MASK = 0x00000060, - CFG_PAUSE_STD = 0x00000020, - CFG_PAUSE_PRI = 0x00000040, - CFG_DCBX = 0x00000010, - CFG_LOOPBACK_MASK = 0x00000007, - CFG_LOOPBACK_PCS = 0x00000002, - CFG_LOOPBACK_HSS = 0x00000004, - CFG_LOOPBACK_EXT = 0x00000006, - CFG_DEFAULT_MAX_FRAME_SIZE = 0x00002580, -}; - -struct nic_operations { - - int (*get_flash) (struct ql_adapter *); - int (*port_initialize) (struct ql_adapter *); -}; - -/* - * The main Adapter structure definition. - * This structure has all fields relevant to the hardware. - */ -struct ql_adapter { - struct ricb ricb; - unsigned long flags; - u32 wol; - - struct nic_stats nic_stats; - - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; - - /* PCI Configuration information for this device */ - struct pci_dev *pdev; - struct net_device *ndev; /* Parent NET device */ - - /* Hardware information */ - u32 chip_rev_id; - u32 fw_rev_id; - u32 func; /* PCI function for this adapter */ - u32 alt_func; /* PCI function for alternate adapter */ - u32 port; /* Port number this adapter */ - - spinlock_t adapter_lock; - spinlock_t hw_lock; - spinlock_t stats_lock; - - /* PCI Bus Relative Register Addresses */ - void __iomem *reg_base; - void __iomem *doorbell_area; - u32 doorbell_area_size; - - u32 msg_enable; - - /* Page for Shadow Registers */ - void *rx_ring_shadow_reg_area; - dma_addr_t rx_ring_shadow_reg_dma; - void *tx_ring_shadow_reg_area; - dma_addr_t tx_ring_shadow_reg_dma; - - u32 mailbox_in; - u32 mailbox_out; - struct mbox_params idc_mbc; - struct mutex mpi_mutex; - - int tx_ring_size; - int rx_ring_size; - u32 intr_count; - struct msix_entry *msi_x_entry; - struct intr_context intr_context[MAX_RX_RINGS]; - - int tx_ring_count; /* One per online CPU. */ - u32 rss_ring_count; /* One per irq vector. */ - /* - * rx_ring_count = - * (CPU count * outbound completion rx_ring) + - * (irq_vector_cnt * inbound (RSS) completion rx_ring) - */ - int rx_ring_count; - int ring_mem_size; - void *ring_mem; - - struct rx_ring rx_ring[MAX_RX_RINGS]; - struct tx_ring tx_ring[MAX_TX_RINGS]; - unsigned int lbq_buf_order; - - int rx_csum; - u32 default_rx_queue; - - u16 rx_coalesce_usecs; /* cqicb->int_delay */ - u16 rx_max_coalesced_frames; /* cqicb->pkt_int_delay */ - u16 tx_coalesce_usecs; /* cqicb->int_delay */ - u16 tx_max_coalesced_frames; /* cqicb->pkt_int_delay */ - - u32 xg_sem_mask; - u32 port_link_up; - u32 port_init; - u32 link_status; - struct ql_mpi_coredump *mpi_coredump; - u32 core_is_dumped; - u32 link_config; - u32 led_config; - u32 max_frame_size; - - union flash_params flash; - - struct workqueue_struct *workqueue; - struct delayed_work asic_reset_work; - struct delayed_work mpi_reset_work; - struct delayed_work mpi_work; - struct delayed_work mpi_port_cfg_work; - struct delayed_work mpi_idc_work; - struct delayed_work mpi_core_to_log; - struct completion ide_completion; - const struct nic_operations *nic_ops; - u16 device_id; - struct timer_list timer; - atomic_t lb_count; - /* Keep local copy of current mac address. */ - char current_mac_addr[6]; -}; - -/* - * Typical Register accessor for memory mapped device. - */ -static inline u32 ql_read32(const struct ql_adapter *qdev, int reg) -{ - return readl(qdev->reg_base + reg); -} - -/* - * Typical Register accessor for memory mapped device. - */ -static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val) -{ - writel(val, qdev->reg_base + reg); -} - -/* - * Doorbell Registers: - * Doorbell registers are virtual registers in the PCI memory space. - * The space is allocated by the chip during PCI initialization. The - * device driver finds the doorbell address in BAR 3 in PCI config space. - * The registers are used to control outbound and inbound queues. For - * example, the producer index for an outbound queue. Each queue uses - * 1 4k chunk of memory. The lower half of the space is for outbound - * queues. The upper half is for inbound queues. - */ -static inline void ql_write_db_reg(u32 val, void __iomem *addr) -{ - writel(val, addr); - mmiowb(); -} - -/* - * Shadow Registers: - * Outbound queues have a consumer index that is maintained by the chip. - * Inbound queues have a producer index that is maintained by the chip. - * For lower overhead, these registers are "shadowed" to host memory - * which allows the device driver to track the queue progress without - * PCI reads. When an entry is placed on an inbound queue, the chip will - * update the relevant index register and then copy the value to the - * shadow register in host memory. - */ -static inline u32 ql_read_sh_reg(__le32 *addr) -{ - u32 reg; - reg = le32_to_cpu(*addr); - rmb(); - return reg; -} - -extern char qlge_driver_name[]; -extern const char qlge_driver_version[]; -extern const struct ethtool_ops qlge_ethtool_ops; - -extern int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask); -extern void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask); -extern int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data); -extern int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index, - u32 *value); -extern int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value); -extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit, - u16 q_id); -void ql_queue_fw_error(struct ql_adapter *qdev); -void ql_mpi_work(struct work_struct *work); -void ql_mpi_reset_work(struct work_struct *work); -void ql_mpi_core_to_log(struct work_struct *work); -int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit); -void ql_queue_asic_error(struct ql_adapter *qdev); -u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr); -void ql_set_ethtool_ops(struct net_device *ndev); -int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data); -void ql_mpi_idc_work(struct work_struct *work); -void ql_mpi_port_cfg_work(struct work_struct *work); -int ql_mb_get_fw_state(struct ql_adapter *qdev); -int ql_cam_route_initialize(struct ql_adapter *qdev); -int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data); -int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data); -int ql_unpause_mpi_risc(struct ql_adapter *qdev); -int ql_pause_mpi_risc(struct ql_adapter *qdev); -int ql_hard_reset_mpi_risc(struct ql_adapter *qdev); -int ql_soft_reset_mpi_risc(struct ql_adapter *qdev); -int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, - u32 ram_addr, int word_count); -int ql_core_dump(struct ql_adapter *qdev, - struct ql_mpi_coredump *mpi_coredump); -int ql_mb_about_fw(struct ql_adapter *qdev); -int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol); -int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol); -int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config); -int ql_mb_get_led_cfg(struct ql_adapter *qdev); -void ql_link_on(struct ql_adapter *qdev); -void ql_link_off(struct ql_adapter *qdev); -int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control); -int ql_mb_get_port_cfg(struct ql_adapter *qdev); -int ql_mb_set_port_cfg(struct ql_adapter *qdev); -int ql_wait_fifo_empty(struct ql_adapter *qdev); -void ql_get_dump(struct ql_adapter *qdev, void *buff); -void ql_gen_reg_dump(struct ql_adapter *qdev, - struct ql_reg_dump *mpi_coredump); -netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev); -void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *); -int ql_own_firmware(struct ql_adapter *qdev); -int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget); - -/* #define QL_ALL_DUMP */ -/* #define QL_REG_DUMP */ -/* #define QL_DEV_DUMP */ -/* #define QL_CB_DUMP */ -/* #define QL_IB_DUMP */ -/* #define QL_OB_DUMP */ - -#ifdef QL_REG_DUMP -extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev); -extern void ql_dump_routing_entries(struct ql_adapter *qdev); -extern void ql_dump_regs(struct ql_adapter *qdev); -#define QL_DUMP_REGS(qdev) ql_dump_regs(qdev) -#define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev) -#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev) -#else -#define QL_DUMP_REGS(qdev) -#define QL_DUMP_ROUTE(qdev) -#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) -#endif - -#ifdef QL_STAT_DUMP -extern void ql_dump_stat(struct ql_adapter *qdev); -#define QL_DUMP_STAT(qdev) ql_dump_stat(qdev) -#else -#define QL_DUMP_STAT(qdev) -#endif - -#ifdef QL_DEV_DUMP -extern void ql_dump_qdev(struct ql_adapter *qdev); -#define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev) -#else -#define QL_DUMP_QDEV(qdev) -#endif - -#ifdef QL_CB_DUMP -extern void ql_dump_wqicb(struct wqicb *wqicb); -extern void ql_dump_tx_ring(struct tx_ring *tx_ring); -extern void ql_dump_ricb(struct ricb *ricb); -extern void ql_dump_cqicb(struct cqicb *cqicb); -extern void ql_dump_rx_ring(struct rx_ring *rx_ring); -extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id); -#define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb) -#define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb) -#define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring) -#define QL_DUMP_CQICB(cqicb) ql_dump_cqicb(cqicb) -#define QL_DUMP_RX_RING(rx_ring) ql_dump_rx_ring(rx_ring) -#define QL_DUMP_HW_CB(qdev, size, bit, q_id) \ - ql_dump_hw_cb(qdev, size, bit, q_id) -#else -#define QL_DUMP_RICB(ricb) -#define QL_DUMP_WQICB(wqicb) -#define QL_DUMP_TX_RING(tx_ring) -#define QL_DUMP_CQICB(cqicb) -#define QL_DUMP_RX_RING(rx_ring) -#define QL_DUMP_HW_CB(qdev, size, bit, q_id) -#endif - -#ifdef QL_OB_DUMP -extern void ql_dump_tx_desc(struct tx_buf_desc *tbd); -extern void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb); -extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp); -#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb) -#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp) -#else -#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) -#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) -#endif - -#ifdef QL_IB_DUMP -extern void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp); -#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp) -#else -#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) -#endif - -#ifdef QL_ALL_DUMP -extern void ql_dump_all(struct ql_adapter *qdev); -#define QL_DUMP_ALL(qdev) ql_dump_all(qdev) -#else -#define QL_DUMP_ALL(qdev) -#endif - -#endif /* _QLGE_H_ */ diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c deleted file mode 100644 index fca804f..0000000 --- a/drivers/net/qlge/qlge_dbg.c +++ /dev/null @@ -1,2044 +0,0 @@ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include - -#include "qlge.h" - -/* Read a NIC register from the alternate function. */ -static u32 ql_read_other_func_reg(struct ql_adapter *qdev, - u32 reg) -{ - u32 register_to_read; - u32 reg_val; - unsigned int status = 0; - - register_to_read = MPI_NIC_REG_BLOCK - | MPI_NIC_READ - | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT) - | reg; - status = ql_read_mpi_reg(qdev, register_to_read, ®_val); - if (status != 0) - return 0xffffffff; - - return reg_val; -} - -/* Write a NIC register from the alternate function. */ -static int ql_write_other_func_reg(struct ql_adapter *qdev, - u32 reg, u32 reg_val) -{ - u32 register_to_read; - int status = 0; - - register_to_read = MPI_NIC_REG_BLOCK - | MPI_NIC_READ - | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT) - | reg; - status = ql_write_mpi_reg(qdev, register_to_read, reg_val); - - return status; -} - -static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg, - u32 bit, u32 err_bit) -{ - u32 temp; - int count = 10; - - while (count) { - temp = ql_read_other_func_reg(qdev, reg); - - /* check for errors */ - if (temp & err_bit) - return -1; - else if (temp & bit) - return 0; - mdelay(10); - count--; - } - return -1; -} - -static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg, - u32 *data) -{ - int status; - - /* wait for reg to come ready */ - status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4, - XG_SERDES_ADDR_RDY, 0); - if (status) - goto exit; - - /* set up for reg read */ - ql_write_other_func_reg(qdev, XG_SERDES_ADDR/4, reg | PROC_ADDR_R); - - /* wait for reg to come ready */ - status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4, - XG_SERDES_ADDR_RDY, 0); - if (status) - goto exit; - - /* get the data */ - *data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4)); -exit: - return status; -} - -/* Read out the SERDES registers */ -static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 * data) -{ - int status; - - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0); - if (status) - goto exit; - - /* set up for reg read */ - ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R); - - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0); - if (status) - goto exit; - - /* get the data */ - *data = ql_read32(qdev, XG_SERDES_DATA); -exit: - return status; -} - -static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr, - u32 *direct_ptr, u32 *indirect_ptr, - unsigned int direct_valid, unsigned int indirect_valid) -{ - unsigned int status; - - status = 1; - if (direct_valid) - status = ql_read_serdes_reg(qdev, addr, direct_ptr); - /* Dead fill any failures or invalids. */ - if (status) - *direct_ptr = 0xDEADBEEF; - - status = 1; - if (indirect_valid) - status = ql_read_other_func_serdes_reg( - qdev, addr, indirect_ptr); - /* Dead fill any failures or invalids. */ - if (status) - *indirect_ptr = 0xDEADBEEF; -} - -static int ql_get_serdes_regs(struct ql_adapter *qdev, - struct ql_mpi_coredump *mpi_coredump) -{ - int status; - unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid; - unsigned int xaui_indirect_valid, i; - u32 *direct_ptr, temp; - u32 *indirect_ptr; - - xfi_direct_valid = xfi_indirect_valid = 0; - xaui_direct_valid = xaui_indirect_valid = 1; - - /* The XAUI needs to be read out per port */ - if (qdev->func & 1) { - /* We are NIC 2 */ - status = ql_read_other_func_serdes_reg(qdev, - XG_SERDES_XAUI_HSS_PCS_START, &temp); - if (status) - temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; - if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == - XG_SERDES_ADDR_XAUI_PWR_DOWN) - xaui_indirect_valid = 0; - - status = ql_read_serdes_reg(qdev, - XG_SERDES_XAUI_HSS_PCS_START, &temp); - if (status) - temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; - - if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == - XG_SERDES_ADDR_XAUI_PWR_DOWN) - xaui_direct_valid = 0; - } else { - /* We are NIC 1 */ - status = ql_read_other_func_serdes_reg(qdev, - XG_SERDES_XAUI_HSS_PCS_START, &temp); - if (status) - temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; - if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == - XG_SERDES_ADDR_XAUI_PWR_DOWN) - xaui_indirect_valid = 0; - - status = ql_read_serdes_reg(qdev, - XG_SERDES_XAUI_HSS_PCS_START, &temp); - if (status) - temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; - if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == - XG_SERDES_ADDR_XAUI_PWR_DOWN) - xaui_direct_valid = 0; - } - - /* - * XFI register is shared so only need to read one - * functions and then check the bits. - */ - status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp); - if (status) - temp = 0; - - if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) == - XG_SERDES_ADDR_XFI1_PWR_UP) { - /* now see if i'm NIC 1 or NIC 2 */ - if (qdev->func & 1) - /* I'm NIC 2, so the indirect (NIC1) xfi is up. */ - xfi_indirect_valid = 1; - else - xfi_direct_valid = 1; - } - if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) == - XG_SERDES_ADDR_XFI2_PWR_UP) { - /* now see if i'm NIC 1 or NIC 2 */ - if (qdev->func & 1) - /* I'm NIC 2, so the indirect (NIC1) xfi is up. */ - xfi_direct_valid = 1; - else - xfi_indirect_valid = 1; - } - - /* Get XAUI_AN register block. */ - if (qdev->func & 1) { - /* Function 2 is direct */ - direct_ptr = mpi_coredump->serdes2_xaui_an; - indirect_ptr = mpi_coredump->serdes_xaui_an; - } else { - /* Function 1 is direct */ - direct_ptr = mpi_coredump->serdes_xaui_an; - indirect_ptr = mpi_coredump->serdes2_xaui_an; - } - - for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++) - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xaui_direct_valid, xaui_indirect_valid); - - /* Get XAUI_HSS_PCS register block. */ - if (qdev->func & 1) { - direct_ptr = - mpi_coredump->serdes2_xaui_hss_pcs; - indirect_ptr = - mpi_coredump->serdes_xaui_hss_pcs; - } else { - direct_ptr = - mpi_coredump->serdes_xaui_hss_pcs; - indirect_ptr = - mpi_coredump->serdes2_xaui_hss_pcs; - } - - for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++) - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xaui_direct_valid, xaui_indirect_valid); - - /* Get XAUI_XFI_AN register block. */ - if (qdev->func & 1) { - direct_ptr = mpi_coredump->serdes2_xfi_an; - indirect_ptr = mpi_coredump->serdes_xfi_an; - } else { - direct_ptr = mpi_coredump->serdes_xfi_an; - indirect_ptr = mpi_coredump->serdes2_xfi_an; - } - - for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++) - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xfi_direct_valid, xfi_indirect_valid); - - /* Get XAUI_XFI_TRAIN register block. */ - if (qdev->func & 1) { - direct_ptr = mpi_coredump->serdes2_xfi_train; - indirect_ptr = - mpi_coredump->serdes_xfi_train; - } else { - direct_ptr = mpi_coredump->serdes_xfi_train; - indirect_ptr = - mpi_coredump->serdes2_xfi_train; - } - - for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++) - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xfi_direct_valid, xfi_indirect_valid); - - /* Get XAUI_XFI_HSS_PCS register block. */ - if (qdev->func & 1) { - direct_ptr = - mpi_coredump->serdes2_xfi_hss_pcs; - indirect_ptr = - mpi_coredump->serdes_xfi_hss_pcs; - } else { - direct_ptr = - mpi_coredump->serdes_xfi_hss_pcs; - indirect_ptr = - mpi_coredump->serdes2_xfi_hss_pcs; - } - - for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++) - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xfi_direct_valid, xfi_indirect_valid); - - /* Get XAUI_XFI_HSS_TX register block. */ - if (qdev->func & 1) { - direct_ptr = - mpi_coredump->serdes2_xfi_hss_tx; - indirect_ptr = - mpi_coredump->serdes_xfi_hss_tx; - } else { - direct_ptr = mpi_coredump->serdes_xfi_hss_tx; - indirect_ptr = - mpi_coredump->serdes2_xfi_hss_tx; - } - for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++) - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xfi_direct_valid, xfi_indirect_valid); - - /* Get XAUI_XFI_HSS_RX register block. */ - if (qdev->func & 1) { - direct_ptr = - mpi_coredump->serdes2_xfi_hss_rx; - indirect_ptr = - mpi_coredump->serdes_xfi_hss_rx; - } else { - direct_ptr = mpi_coredump->serdes_xfi_hss_rx; - indirect_ptr = - mpi_coredump->serdes2_xfi_hss_rx; - } - - for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++) - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xfi_direct_valid, xfi_indirect_valid); - - - /* Get XAUI_XFI_HSS_PLL register block. */ - if (qdev->func & 1) { - direct_ptr = - mpi_coredump->serdes2_xfi_hss_pll; - indirect_ptr = - mpi_coredump->serdes_xfi_hss_pll; - } else { - direct_ptr = - mpi_coredump->serdes_xfi_hss_pll; - indirect_ptr = - mpi_coredump->serdes2_xfi_hss_pll; - } - for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++) - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xfi_direct_valid, xfi_indirect_valid); - return 0; -} - -static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg, - u32 *data) -{ - int status = 0; - - /* wait for reg to come ready */ - status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4, - XGMAC_ADDR_RDY, XGMAC_ADDR_XME); - if (status) - goto exit; - - /* set up for reg read */ - ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R); - - /* wait for reg to come ready */ - status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4, - XGMAC_ADDR_RDY, XGMAC_ADDR_XME); - if (status) - goto exit; - - /* get the data */ - *data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4); -exit: - return status; -} - -/* Read the 400 xgmac control/statistics registers - * skipping unused locations. - */ -static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 * buf, - unsigned int other_function) -{ - int status = 0; - int i; - - for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) { - /* We're reading 400 xgmac registers, but we filter out - * serveral locations that are non-responsive to reads. - */ - if ((i == 0x00000114) || - (i == 0x00000118) || - (i == 0x0000013c) || - (i == 0x00000140) || - (i > 0x00000150 && i < 0x000001fc) || - (i > 0x00000278 && i < 0x000002a0) || - (i > 0x000002c0 && i < 0x000002cf) || - (i > 0x000002dc && i < 0x000002f0) || - (i > 0x000003c8 && i < 0x00000400) || - (i > 0x00000400 && i < 0x00000410) || - (i > 0x00000410 && i < 0x00000420) || - (i > 0x00000420 && i < 0x00000430) || - (i > 0x00000430 && i < 0x00000440) || - (i > 0x00000440 && i < 0x00000450) || - (i > 0x00000450 && i < 0x00000500) || - (i > 0x0000054c && i < 0x00000568) || - (i > 0x000005c8 && i < 0x00000600)) { - if (other_function) - status = - ql_read_other_func_xgmac_reg(qdev, i, buf); - else - status = ql_read_xgmac_reg(qdev, i, buf); - - if (status) - *buf = 0xdeadbeef; - break; - } - } - return status; -} - -static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf) -{ - int status = 0; - int i; - - for (i = 0; i < 8; i++, buf++) { - ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000); - *buf = ql_read32(qdev, NIC_ETS); - } - - for (i = 0; i < 2; i++, buf++) { - ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000); - *buf = ql_read32(qdev, CNA_ETS); - } - - return status; -} - -static void ql_get_intr_states(struct ql_adapter *qdev, u32 * buf) -{ - int i; - - for (i = 0; i < qdev->rx_ring_count; i++, buf++) { - ql_write32(qdev, INTR_EN, - qdev->intr_context[i].intr_read_mask); - *buf = ql_read32(qdev, INTR_EN); - } -} - -static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf) -{ - int i, status; - u32 value[3]; - - status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); - if (status) - return status; - - for (i = 0; i < 16; i++) { - status = ql_get_mac_addr_reg(qdev, - MAC_ADDR_TYPE_CAM_MAC, i, value); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Failed read of mac index register\n"); - goto err; - } - *buf++ = value[0]; /* lower MAC address */ - *buf++ = value[1]; /* upper MAC address */ - *buf++ = value[2]; /* output */ - } - for (i = 0; i < 32; i++) { - status = ql_get_mac_addr_reg(qdev, - MAC_ADDR_TYPE_MULTI_MAC, i, value); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Failed read of mac index register\n"); - goto err; - } - *buf++ = value[0]; /* lower Mcast address */ - *buf++ = value[1]; /* upper Mcast address */ - } -err: - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); - return status; -} - -static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf) -{ - int status; - u32 value, i; - - status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); - if (status) - return status; - - for (i = 0; i < 16; i++) { - status = ql_get_routing_reg(qdev, i, &value); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Failed read of routing index register\n"); - goto err; - } else { - *buf++ = value; - } - } -err: - ql_sem_unlock(qdev, SEM_RT_IDX_MASK); - return status; -} - -/* Read the MPI Processor shadow registers */ -static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 * buf) -{ - u32 i; - int status; - - for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) { - status = ql_write_mpi_reg(qdev, RISC_124, - (SHADOW_OFFSET | i << SHADOW_REG_SHIFT)); - if (status) - goto end; - status = ql_read_mpi_reg(qdev, RISC_127, buf); - if (status) - goto end; - } -end: - return status; -} - -/* Read the MPI Processor core registers */ -static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 * buf, - u32 offset, u32 count) -{ - int i, status = 0; - for (i = 0; i < count; i++, buf++) { - status = ql_read_mpi_reg(qdev, offset + i, buf); - if (status) - return status; - } - return status; -} - -/* Read the ASIC probe dump */ -static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock, - u32 valid, u32 *buf) -{ - u32 module, mux_sel, probe, lo_val, hi_val; - - for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) { - if (!((valid >> module) & 1)) - continue; - for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) { - probe = clock - | PRB_MX_ADDR_ARE - | mux_sel - | (module << PRB_MX_ADDR_MOD_SEL_SHIFT); - ql_write32(qdev, PRB_MX_ADDR, probe); - lo_val = ql_read32(qdev, PRB_MX_DATA); - if (mux_sel == 0) { - *buf = probe; - buf++; - } - probe |= PRB_MX_ADDR_UP; - ql_write32(qdev, PRB_MX_ADDR, probe); - hi_val = ql_read32(qdev, PRB_MX_DATA); - *buf = lo_val; - buf++; - *buf = hi_val; - buf++; - } - } - return buf; -} - -static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf) -{ - /* First we have to enable the probe mux */ - ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN); - buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK, - PRB_MX_ADDR_VALID_SYS_MOD, buf); - buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK, - PRB_MX_ADDR_VALID_PCI_MOD, buf); - buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK, - PRB_MX_ADDR_VALID_XGM_MOD, buf); - buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK, - PRB_MX_ADDR_VALID_FC_MOD, buf); - return 0; - -} - -/* Read out the routing index registers */ -static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf) -{ - int status; - u32 type, index, index_max; - u32 result_index; - u32 result_data; - u32 val; - - status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); - if (status) - return status; - - for (type = 0; type < 4; type++) { - if (type < 2) - index_max = 8; - else - index_max = 16; - for (index = 0; index < index_max; index++) { - val = RT_IDX_RS - | (type << RT_IDX_TYPE_SHIFT) - | (index << RT_IDX_IDX_SHIFT); - ql_write32(qdev, RT_IDX, val); - result_index = 0; - while ((result_index & RT_IDX_MR) == 0) - result_index = ql_read32(qdev, RT_IDX); - result_data = ql_read32(qdev, RT_DATA); - *buf = type; - buf++; - *buf = index; - buf++; - *buf = result_index; - buf++; - *buf = result_data; - buf++; - } - } - ql_sem_unlock(qdev, SEM_RT_IDX_MASK); - return status; -} - -/* Read out the MAC protocol registers */ -static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf) -{ - u32 result_index, result_data; - u32 type; - u32 index; - u32 offset; - u32 val; - u32 initial_val = MAC_ADDR_RS; - u32 max_index; - u32 max_offset; - - for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) { - switch (type) { - - case 0: /* CAM */ - initial_val |= MAC_ADDR_ADR; - max_index = MAC_ADDR_MAX_CAM_ENTRIES; - max_offset = MAC_ADDR_MAX_CAM_WCOUNT; - break; - case 1: /* Multicast MAC Address */ - max_index = MAC_ADDR_MAX_CAM_WCOUNT; - max_offset = MAC_ADDR_MAX_CAM_WCOUNT; - break; - case 2: /* VLAN filter mask */ - case 3: /* MC filter mask */ - max_index = MAC_ADDR_MAX_CAM_WCOUNT; - max_offset = MAC_ADDR_MAX_CAM_WCOUNT; - break; - case 4: /* FC MAC addresses */ - max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES; - max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT; - break; - case 5: /* Mgmt MAC addresses */ - max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES; - max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT; - break; - case 6: /* Mgmt VLAN addresses */ - max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES; - max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT; - break; - case 7: /* Mgmt IPv4 address */ - max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES; - max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT; - break; - case 8: /* Mgmt IPv6 address */ - max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES; - max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT; - break; - case 9: /* Mgmt TCP/UDP Dest port */ - max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES; - max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT; - break; - default: - pr_err("Bad type!!! 0x%08x\n", type); - max_index = 0; - max_offset = 0; - break; - } - for (index = 0; index < max_index; index++) { - for (offset = 0; offset < max_offset; offset++) { - val = initial_val - | (type << MAC_ADDR_TYPE_SHIFT) - | (index << MAC_ADDR_IDX_SHIFT) - | (offset); - ql_write32(qdev, MAC_ADDR_IDX, val); - result_index = 0; - while ((result_index & MAC_ADDR_MR) == 0) { - result_index = ql_read32(qdev, - MAC_ADDR_IDX); - } - result_data = ql_read32(qdev, MAC_ADDR_DATA); - *buf = result_index; - buf++; - *buf = result_data; - buf++; - } - } - } -} - -static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf) -{ - u32 func_num, reg, reg_val; - int status; - - for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) { - reg = MPI_NIC_REG_BLOCK - | (func_num << MPI_NIC_FUNCTION_SHIFT) - | (SEM / 4); - status = ql_read_mpi_reg(qdev, reg, ®_val); - *buf = reg_val; - /* if the read failed then dead fill the element. */ - if (!status) - *buf = 0xdeadbeef; - buf++; - } -} - -/* Create a coredump segment header */ -static void ql_build_coredump_seg_header( - struct mpi_coredump_segment_header *seg_hdr, - u32 seg_number, u32 seg_size, u8 *desc) -{ - memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header)); - seg_hdr->cookie = MPI_COREDUMP_COOKIE; - seg_hdr->segNum = seg_number; - seg_hdr->segSize = seg_size; - memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1); -} - -/* - * This function should be called when a coredump / probedump - * is to be extracted from the HBA. It is assumed there is a - * qdev structure that contains the base address of the register - * space for this function as well as a coredump structure that - * will contain the dump. - */ -int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) -{ - int status; - int i; - - if (!mpi_coredump) { - netif_err(qdev, drv, qdev->ndev, "No memory available\n"); - return -ENOMEM; - } - - /* Try to get the spinlock, but dont worry if - * it isn't available. If the firmware died it - * might be holding the sem. - */ - ql_sem_spinlock(qdev, SEM_PROC_REG_MASK); - - status = ql_pause_mpi_risc(qdev); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Failed RISC pause. Status = 0x%.08x\n", status); - goto err; - } - - /* Insert the global header */ - memset(&(mpi_coredump->mpi_global_header), 0, - sizeof(struct mpi_coredump_global_header)); - mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE; - mpi_coredump->mpi_global_header.headerSize = - sizeof(struct mpi_coredump_global_header); - mpi_coredump->mpi_global_header.imageSize = - sizeof(struct ql_mpi_coredump); - memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", - sizeof(mpi_coredump->mpi_global_header.idString)); - - /* Get generic NIC reg dump */ - ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr, - NIC1_CONTROL_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->nic_regs), "NIC1 Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr, - NIC2_CONTROL_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->nic2_regs), "NIC2 Registers"); - - /* Get XGMac registers. (Segment 18, Rev C. step 21) */ - ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr, - NIC1_XGMAC_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr, - NIC2_XGMAC_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers"); - - if (qdev->func & 1) { - /* Odd means our function is NIC 2 */ - for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) - mpi_coredump->nic2_regs[i] = - ql_read32(qdev, i * sizeof(u32)); - - for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) - mpi_coredump->nic_regs[i] = - ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4); - - ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0); - ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1); - } else { - /* Even means our function is NIC 1 */ - for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) - mpi_coredump->nic_regs[i] = - ql_read32(qdev, i * sizeof(u32)); - for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) - mpi_coredump->nic2_regs[i] = - ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4); - - ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0); - ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1); - } - - /* Rev C. Step 20a */ - ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr, - XAUI_AN_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes_xaui_an), - "XAUI AN Registers"); - - /* Rev C. Step 20b */ - ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr, - XAUI_HSS_PCS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes_xaui_hss_pcs), - "XAUI HSS PCS Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes_xfi_an), - "XFI AN Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr, - XFI_TRAIN_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes_xfi_train), - "XFI TRAIN Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr, - XFI_HSS_PCS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes_xfi_hss_pcs), - "XFI HSS PCS Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr, - XFI_HSS_TX_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes_xfi_hss_tx), - "XFI HSS TX Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr, - XFI_HSS_RX_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes_xfi_hss_rx), - "XFI HSS RX Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr, - XFI_HSS_PLL_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes_xfi_hss_pll), - "XFI HSS PLL Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr, - XAUI2_AN_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes2_xaui_an), - "XAUI2 AN Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr, - XAUI2_HSS_PCS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes2_xaui_hss_pcs), - "XAUI2 HSS PCS Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr, - XFI2_AN_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes2_xfi_an), - "XFI2 AN Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr, - XFI2_TRAIN_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes2_xfi_train), - "XFI2 TRAIN Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr, - XFI2_HSS_PCS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes2_xfi_hss_pcs), - "XFI2 HSS PCS Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr, - XFI2_HSS_TX_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes2_xfi_hss_tx), - "XFI2 HSS TX Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr, - XFI2_HSS_RX_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes2_xfi_hss_rx), - "XFI2 HSS RX Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr, - XFI2_HSS_PLL_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes2_xfi_hss_pll), - "XFI2 HSS PLL Registers"); - - status = ql_get_serdes_regs(qdev, mpi_coredump); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Failed Dump of Serdes Registers. Status = 0x%.08x\n", - status); - goto err; - } - - ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr, - CORE_SEG_NUM, - sizeof(mpi_coredump->core_regs_seg_hdr) + - sizeof(mpi_coredump->mpi_core_regs) + - sizeof(mpi_coredump->mpi_core_sh_regs), - "Core Registers"); - - /* Get the MPI Core Registers */ - status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0], - MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT); - if (status) - goto err; - /* Get the 16 MPI shadow registers */ - status = ql_get_mpi_shadow_regs(qdev, - &mpi_coredump->mpi_core_sh_regs[0]); - if (status) - goto err; - - /* Get the Test Logic Registers */ - ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr, - TEST_LOGIC_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->test_logic_regs), - "Test Logic Regs"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0], - TEST_REGS_ADDR, TEST_REGS_CNT); - if (status) - goto err; - - /* Get the RMII Registers */ - ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr, - RMII_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->rmii_regs), - "RMII Registers"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0], - RMII_REGS_ADDR, RMII_REGS_CNT); - if (status) - goto err; - - /* Get the FCMAC1 Registers */ - ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr, - FCMAC1_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->fcmac1_regs), - "FCMAC1 Registers"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0], - FCMAC1_REGS_ADDR, FCMAC_REGS_CNT); - if (status) - goto err; - - /* Get the FCMAC2 Registers */ - - ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr, - FCMAC2_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->fcmac2_regs), - "FCMAC2 Registers"); - - status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0], - FCMAC2_REGS_ADDR, FCMAC_REGS_CNT); - if (status) - goto err; - - /* Get the FC1 MBX Registers */ - ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr, - FC1_MBOX_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->fc1_mbx_regs), - "FC1 MBox Regs"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0], - FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT); - if (status) - goto err; - - /* Get the IDE Registers */ - ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr, - IDE_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->ide_regs), - "IDE Registers"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0], - IDE_REGS_ADDR, IDE_REGS_CNT); - if (status) - goto err; - - /* Get the NIC1 MBX Registers */ - ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr, - NIC1_MBOX_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->nic1_mbx_regs), - "NIC1 MBox Regs"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0], - NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT); - if (status) - goto err; - - /* Get the SMBus Registers */ - ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr, - SMBUS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->smbus_regs), - "SMBus Registers"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0], - SMBUS_REGS_ADDR, SMBUS_REGS_CNT); - if (status) - goto err; - - /* Get the FC2 MBX Registers */ - ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr, - FC2_MBOX_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->fc2_mbx_regs), - "FC2 MBox Regs"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0], - FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT); - if (status) - goto err; - - /* Get the NIC2 MBX Registers */ - ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr, - NIC2_MBOX_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->nic2_mbx_regs), - "NIC2 MBox Regs"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0], - NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT); - if (status) - goto err; - - /* Get the I2C Registers */ - ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr, - I2C_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->i2c_regs), - "I2C Registers"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0], - I2C_REGS_ADDR, I2C_REGS_CNT); - if (status) - goto err; - - /* Get the MEMC Registers */ - ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr, - MEMC_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->memc_regs), - "MEMC Registers"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0], - MEMC_REGS_ADDR, MEMC_REGS_CNT); - if (status) - goto err; - - /* Get the PBus Registers */ - ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr, - PBUS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->pbus_regs), - "PBUS Registers"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0], - PBUS_REGS_ADDR, PBUS_REGS_CNT); - if (status) - goto err; - - /* Get the MDE Registers */ - ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr, - MDE_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->mde_regs), - "MDE Registers"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0], - MDE_REGS_ADDR, MDE_REGS_CNT); - if (status) - goto err; - - ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr, - MISC_NIC_INFO_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->misc_nic_info), - "MISC NIC INFO"); - mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count; - mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count; - mpi_coredump->misc_nic_info.intr_count = qdev->intr_count; - mpi_coredump->misc_nic_info.function = qdev->func; - - /* Segment 31 */ - /* Get indexed register values. */ - ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr, - INTR_STATES_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->intr_states), - "INTR States"); - ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]); - - ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr, - CAM_ENTRIES_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->cam_entries), - "CAM Entries"); - status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]); - if (status) - goto err; - - ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr, - ROUTING_WORDS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->nic_routing_words), - "Routing Words"); - status = ql_get_routing_entries(qdev, - &mpi_coredump->nic_routing_words[0]); - if (status) - goto err; - - /* Segment 34 (Rev C. step 23) */ - ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr, - ETS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->ets), - "ETS Registers"); - status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]); - if (status) - goto err; - - ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr, - PROBE_DUMP_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->probe_dump), - "Probe Dump"); - ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]); - - ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr, - ROUTING_INDEX_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->routing_regs), - "Routing Regs"); - status = ql_get_routing_index_registers(qdev, - &mpi_coredump->routing_regs[0]); - if (status) - goto err; - - ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr, - MAC_PROTOCOL_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->mac_prot_regs), - "MAC Prot Regs"); - ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]); - - /* Get the semaphore registers for all 5 functions */ - ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr, - SEM_REGS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->sem_regs), "Sem Registers"); - - ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]); - - /* Prevent the mpi restarting while we dump the memory.*/ - ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC); - - /* clear the pause */ - status = ql_unpause_mpi_risc(qdev); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Failed RISC unpause. Status = 0x%.08x\n", status); - goto err; - } - - /* Reset the RISC so we can dump RAM */ - status = ql_hard_reset_mpi_risc(qdev); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Failed RISC reset. Status = 0x%.08x\n", status); - goto err; - } - - ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr, - WCS_RAM_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->code_ram), - "WCS RAM"); - status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0], - CODE_RAM_ADDR, CODE_RAM_CNT); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Failed Dump of CODE RAM. Status = 0x%.08x\n", - status); - goto err; - } - - /* Insert the segment header */ - ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr, - MEMC_RAM_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->memc_ram), - "MEMC RAM"); - status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0], - MEMC_RAM_ADDR, MEMC_RAM_CNT); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Failed Dump of MEMC RAM. Status = 0x%.08x\n", - status); - goto err; - } -err: - ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */ - return status; - -} - -static void ql_get_core_dump(struct ql_adapter *qdev) -{ - if (!ql_own_firmware(qdev)) { - netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n"); - return; - } - - if (!netif_running(qdev->ndev)) { - netif_err(qdev, ifup, qdev->ndev, - "Force Coredump can only be done from interface that is up\n"); - return; - } - ql_queue_fw_error(qdev); -} - -void ql_gen_reg_dump(struct ql_adapter *qdev, - struct ql_reg_dump *mpi_coredump) -{ - int i, status; - - - memset(&(mpi_coredump->mpi_global_header), 0, - sizeof(struct mpi_coredump_global_header)); - mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE; - mpi_coredump->mpi_global_header.headerSize = - sizeof(struct mpi_coredump_global_header); - mpi_coredump->mpi_global_header.imageSize = - sizeof(struct ql_reg_dump); - memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", - sizeof(mpi_coredump->mpi_global_header.idString)); - - - /* segment 16 */ - ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr, - MISC_NIC_INFO_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->misc_nic_info), - "MISC NIC INFO"); - mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count; - mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count; - mpi_coredump->misc_nic_info.intr_count = qdev->intr_count; - mpi_coredump->misc_nic_info.function = qdev->func; - - /* Segment 16, Rev C. Step 18 */ - ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr, - NIC1_CONTROL_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->nic_regs), - "NIC Registers"); - /* Get generic reg dump */ - for (i = 0; i < 64; i++) - mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32)); - - /* Segment 31 */ - /* Get indexed register values. */ - ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr, - INTR_STATES_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->intr_states), - "INTR States"); - ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]); - - ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr, - CAM_ENTRIES_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->cam_entries), - "CAM Entries"); - status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]); - if (status) - return; - - ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr, - ROUTING_WORDS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->nic_routing_words), - "Routing Words"); - status = ql_get_routing_entries(qdev, - &mpi_coredump->nic_routing_words[0]); - if (status) - return; - - /* Segment 34 (Rev C. step 23) */ - ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr, - ETS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->ets), - "ETS Registers"); - status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]); - if (status) - return; -} - -void ql_get_dump(struct ql_adapter *qdev, void *buff) -{ - /* - * If the dump has already been taken and is stored - * in our internal buffer and if force dump is set then - * just start the spool to dump it to the log file - * and also, take a snapshot of the general regs to - * to the user's buffer or else take complete dump - * to the user's buffer if force is not set. - */ - - if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) { - if (!ql_core_dump(qdev, buff)) - ql_soft_reset_mpi_risc(qdev); - else - netif_err(qdev, drv, qdev->ndev, "coredump failed!\n"); - } else { - ql_gen_reg_dump(qdev, buff); - ql_get_core_dump(qdev); - } -} - -/* Coredump to messages log file using separate worker thread */ -void ql_mpi_core_to_log(struct work_struct *work) -{ - struct ql_adapter *qdev = - container_of(work, struct ql_adapter, mpi_core_to_log.work); - u32 *tmp, count; - int i; - - count = sizeof(struct ql_mpi_coredump) / sizeof(u32); - tmp = (u32 *)qdev->mpi_coredump; - netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, - "Core is dumping to log file!\n"); - - for (i = 0; i < count; i += 8) { - pr_err("%.08x: %.08x %.08x %.08x %.08x %.08x " - "%.08x %.08x %.08x\n", i, - tmp[i + 0], - tmp[i + 1], - tmp[i + 2], - tmp[i + 3], - tmp[i + 4], - tmp[i + 5], - tmp[i + 6], - tmp[i + 7]); - msleep(5); - } -} - -#ifdef QL_REG_DUMP -static void ql_dump_intr_states(struct ql_adapter *qdev) -{ - int i; - u32 value; - for (i = 0; i < qdev->intr_count; i++) { - ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask); - value = ql_read32(qdev, INTR_EN); - pr_err("%s: Interrupt %d is %s\n", - qdev->ndev->name, i, - (value & INTR_EN_EN ? "enabled" : "disabled")); - } -} - -#define DUMP_XGMAC(qdev, reg) \ -do { \ - u32 data; \ - ql_read_xgmac_reg(qdev, reg, &data); \ - pr_err("%s: %s = 0x%.08x\n", qdev->ndev->name, #reg, data); \ -} while (0) - -void ql_dump_xgmac_control_regs(struct ql_adapter *qdev) -{ - if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) { - pr_err("%s: Couldn't get xgmac sem\n", __func__); - return; - } - DUMP_XGMAC(qdev, PAUSE_SRC_LO); - DUMP_XGMAC(qdev, PAUSE_SRC_HI); - DUMP_XGMAC(qdev, GLOBAL_CFG); - DUMP_XGMAC(qdev, TX_CFG); - DUMP_XGMAC(qdev, RX_CFG); - DUMP_XGMAC(qdev, FLOW_CTL); - DUMP_XGMAC(qdev, PAUSE_OPCODE); - DUMP_XGMAC(qdev, PAUSE_TIMER); - DUMP_XGMAC(qdev, PAUSE_FRM_DEST_LO); - DUMP_XGMAC(qdev, PAUSE_FRM_DEST_HI); - DUMP_XGMAC(qdev, MAC_TX_PARAMS); - DUMP_XGMAC(qdev, MAC_RX_PARAMS); - DUMP_XGMAC(qdev, MAC_SYS_INT); - DUMP_XGMAC(qdev, MAC_SYS_INT_MASK); - DUMP_XGMAC(qdev, MAC_MGMT_INT); - DUMP_XGMAC(qdev, MAC_MGMT_IN_MASK); - DUMP_XGMAC(qdev, EXT_ARB_MODE); - ql_sem_unlock(qdev, qdev->xg_sem_mask); -} - -static void ql_dump_ets_regs(struct ql_adapter *qdev) -{ -} - -static void ql_dump_cam_entries(struct ql_adapter *qdev) -{ - int i; - u32 value[3]; - - i = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); - if (i) - return; - for (i = 0; i < 4; i++) { - if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) { - pr_err("%s: Failed read of mac index register\n", - __func__); - return; - } else { - if (value[0]) - pr_err("%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x\n", - qdev->ndev->name, i, value[1], value[0], - value[2]); - } - } - for (i = 0; i < 32; i++) { - if (ql_get_mac_addr_reg - (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) { - pr_err("%s: Failed read of mac index register\n", - __func__); - return; - } else { - if (value[0]) - pr_err("%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x\n", - qdev->ndev->name, i, value[1], value[0]); - } - } - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); -} - -void ql_dump_routing_entries(struct ql_adapter *qdev) -{ - int i; - u32 value; - i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); - if (i) - return; - for (i = 0; i < 16; i++) { - value = 0; - if (ql_get_routing_reg(qdev, i, &value)) { - pr_err("%s: Failed read of routing index register\n", - __func__); - return; - } else { - if (value) - pr_err("%s: Routing Mask %d = 0x%.08x\n", - qdev->ndev->name, i, value); - } - } - ql_sem_unlock(qdev, SEM_RT_IDX_MASK); -} - -#define DUMP_REG(qdev, reg) \ - pr_err("%-32s= 0x%x\n", #reg, ql_read32(qdev, reg)) - -void ql_dump_regs(struct ql_adapter *qdev) -{ - pr_err("reg dump for function #%d\n", qdev->func); - DUMP_REG(qdev, SYS); - DUMP_REG(qdev, RST_FO); - DUMP_REG(qdev, FSC); - DUMP_REG(qdev, CSR); - DUMP_REG(qdev, ICB_RID); - DUMP_REG(qdev, ICB_L); - DUMP_REG(qdev, ICB_H); - DUMP_REG(qdev, CFG); - DUMP_REG(qdev, BIOS_ADDR); - DUMP_REG(qdev, STS); - DUMP_REG(qdev, INTR_EN); - DUMP_REG(qdev, INTR_MASK); - DUMP_REG(qdev, ISR1); - DUMP_REG(qdev, ISR2); - DUMP_REG(qdev, ISR3); - DUMP_REG(qdev, ISR4); - DUMP_REG(qdev, REV_ID); - DUMP_REG(qdev, FRC_ECC_ERR); - DUMP_REG(qdev, ERR_STS); - DUMP_REG(qdev, RAM_DBG_ADDR); - DUMP_REG(qdev, RAM_DBG_DATA); - DUMP_REG(qdev, ECC_ERR_CNT); - DUMP_REG(qdev, SEM); - DUMP_REG(qdev, GPIO_1); - DUMP_REG(qdev, GPIO_2); - DUMP_REG(qdev, GPIO_3); - DUMP_REG(qdev, XGMAC_ADDR); - DUMP_REG(qdev, XGMAC_DATA); - DUMP_REG(qdev, NIC_ETS); - DUMP_REG(qdev, CNA_ETS); - DUMP_REG(qdev, FLASH_ADDR); - DUMP_REG(qdev, FLASH_DATA); - DUMP_REG(qdev, CQ_STOP); - DUMP_REG(qdev, PAGE_TBL_RID); - DUMP_REG(qdev, WQ_PAGE_TBL_LO); - DUMP_REG(qdev, WQ_PAGE_TBL_HI); - DUMP_REG(qdev, CQ_PAGE_TBL_LO); - DUMP_REG(qdev, CQ_PAGE_TBL_HI); - DUMP_REG(qdev, COS_DFLT_CQ1); - DUMP_REG(qdev, COS_DFLT_CQ2); - DUMP_REG(qdev, SPLT_HDR); - DUMP_REG(qdev, FC_PAUSE_THRES); - DUMP_REG(qdev, NIC_PAUSE_THRES); - DUMP_REG(qdev, FC_ETHERTYPE); - DUMP_REG(qdev, FC_RCV_CFG); - DUMP_REG(qdev, NIC_RCV_CFG); - DUMP_REG(qdev, FC_COS_TAGS); - DUMP_REG(qdev, NIC_COS_TAGS); - DUMP_REG(qdev, MGMT_RCV_CFG); - DUMP_REG(qdev, XG_SERDES_ADDR); - DUMP_REG(qdev, XG_SERDES_DATA); - DUMP_REG(qdev, PRB_MX_ADDR); - DUMP_REG(qdev, PRB_MX_DATA); - ql_dump_intr_states(qdev); - ql_dump_xgmac_control_regs(qdev); - ql_dump_ets_regs(qdev); - ql_dump_cam_entries(qdev); - ql_dump_routing_entries(qdev); -} -#endif - -#ifdef QL_STAT_DUMP - -#define DUMP_STAT(qdev, stat) \ - pr_err("%s = %ld\n", #stat, (unsigned long)qdev->nic_stats.stat) - -void ql_dump_stat(struct ql_adapter *qdev) -{ - pr_err("%s: Enter\n", __func__); - DUMP_STAT(qdev, tx_pkts); - DUMP_STAT(qdev, tx_bytes); - DUMP_STAT(qdev, tx_mcast_pkts); - DUMP_STAT(qdev, tx_bcast_pkts); - DUMP_STAT(qdev, tx_ucast_pkts); - DUMP_STAT(qdev, tx_ctl_pkts); - DUMP_STAT(qdev, tx_pause_pkts); - DUMP_STAT(qdev, tx_64_pkt); - DUMP_STAT(qdev, tx_65_to_127_pkt); - DUMP_STAT(qdev, tx_128_to_255_pkt); - DUMP_STAT(qdev, tx_256_511_pkt); - DUMP_STAT(qdev, tx_512_to_1023_pkt); - DUMP_STAT(qdev, tx_1024_to_1518_pkt); - DUMP_STAT(qdev, tx_1519_to_max_pkt); - DUMP_STAT(qdev, tx_undersize_pkt); - DUMP_STAT(qdev, tx_oversize_pkt); - DUMP_STAT(qdev, rx_bytes); - DUMP_STAT(qdev, rx_bytes_ok); - DUMP_STAT(qdev, rx_pkts); - DUMP_STAT(qdev, rx_pkts_ok); - DUMP_STAT(qdev, rx_bcast_pkts); - DUMP_STAT(qdev, rx_mcast_pkts); - DUMP_STAT(qdev, rx_ucast_pkts); - DUMP_STAT(qdev, rx_undersize_pkts); - DUMP_STAT(qdev, rx_oversize_pkts); - DUMP_STAT(qdev, rx_jabber_pkts); - DUMP_STAT(qdev, rx_undersize_fcerr_pkts); - DUMP_STAT(qdev, rx_drop_events); - DUMP_STAT(qdev, rx_fcerr_pkts); - DUMP_STAT(qdev, rx_align_err); - DUMP_STAT(qdev, rx_symbol_err); - DUMP_STAT(qdev, rx_mac_err); - DUMP_STAT(qdev, rx_ctl_pkts); - DUMP_STAT(qdev, rx_pause_pkts); - DUMP_STAT(qdev, rx_64_pkts); - DUMP_STAT(qdev, rx_65_to_127_pkts); - DUMP_STAT(qdev, rx_128_255_pkts); - DUMP_STAT(qdev, rx_256_511_pkts); - DUMP_STAT(qdev, rx_512_to_1023_pkts); - DUMP_STAT(qdev, rx_1024_to_1518_pkts); - DUMP_STAT(qdev, rx_1519_to_max_pkts); - DUMP_STAT(qdev, rx_len_err_pkts); -}; -#endif - -#ifdef QL_DEV_DUMP - -#define DUMP_QDEV_FIELD(qdev, type, field) \ - pr_err("qdev->%-24s = " type "\n", #field, qdev->field) -#define DUMP_QDEV_DMA_FIELD(qdev, field) \ - pr_err("qdev->%-24s = %llx\n", #field, (unsigned long long)qdev->field) -#define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \ - pr_err("%s[%d].%s = " type "\n", \ - #array, index, #field, qdev->array[index].field); -void ql_dump_qdev(struct ql_adapter *qdev) -{ - int i; - DUMP_QDEV_FIELD(qdev, "%lx", flags); - DUMP_QDEV_FIELD(qdev, "%p", vlgrp); - DUMP_QDEV_FIELD(qdev, "%p", pdev); - DUMP_QDEV_FIELD(qdev, "%p", ndev); - DUMP_QDEV_FIELD(qdev, "%d", chip_rev_id); - DUMP_QDEV_FIELD(qdev, "%p", reg_base); - DUMP_QDEV_FIELD(qdev, "%p", doorbell_area); - DUMP_QDEV_FIELD(qdev, "%d", doorbell_area_size); - DUMP_QDEV_FIELD(qdev, "%x", msg_enable); - DUMP_QDEV_FIELD(qdev, "%p", rx_ring_shadow_reg_area); - DUMP_QDEV_DMA_FIELD(qdev, rx_ring_shadow_reg_dma); - DUMP_QDEV_FIELD(qdev, "%p", tx_ring_shadow_reg_area); - DUMP_QDEV_DMA_FIELD(qdev, tx_ring_shadow_reg_dma); - DUMP_QDEV_FIELD(qdev, "%d", intr_count); - if (qdev->msi_x_entry) - for (i = 0; i < qdev->intr_count; i++) { - DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, vector); - DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, entry); - } - for (i = 0; i < qdev->intr_count; i++) { - DUMP_QDEV_ARRAY(qdev, "%p", intr_context, i, qdev); - DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, intr); - DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, hooked); - DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_en_mask); - DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_dis_mask); - DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_read_mask); - } - DUMP_QDEV_FIELD(qdev, "%d", tx_ring_count); - DUMP_QDEV_FIELD(qdev, "%d", rx_ring_count); - DUMP_QDEV_FIELD(qdev, "%d", ring_mem_size); - DUMP_QDEV_FIELD(qdev, "%p", ring_mem); - DUMP_QDEV_FIELD(qdev, "%d", intr_count); - DUMP_QDEV_FIELD(qdev, "%p", tx_ring); - DUMP_QDEV_FIELD(qdev, "%d", rss_ring_count); - DUMP_QDEV_FIELD(qdev, "%p", rx_ring); - DUMP_QDEV_FIELD(qdev, "%d", default_rx_queue); - DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask); - DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up); - DUMP_QDEV_FIELD(qdev, "0x%08x", port_init); -} -#endif - -#ifdef QL_CB_DUMP -void ql_dump_wqicb(struct wqicb *wqicb) -{ - pr_err("Dumping wqicb stuff...\n"); - pr_err("wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len)); - pr_err("wqicb->flags = %x\n", le16_to_cpu(wqicb->flags)); - pr_err("wqicb->cq_id_rss = %d\n", - le16_to_cpu(wqicb->cq_id_rss)); - pr_err("wqicb->rid = 0x%x\n", le16_to_cpu(wqicb->rid)); - pr_err("wqicb->wq_addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(wqicb->addr)); - pr_err("wqicb->wq_cnsmr_idx_addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr)); -} - -void ql_dump_tx_ring(struct tx_ring *tx_ring) -{ - if (tx_ring == NULL) - return; - pr_err("===================== Dumping tx_ring %d ===============\n", - tx_ring->wq_id); - pr_err("tx_ring->base = %p\n", tx_ring->wq_base); - pr_err("tx_ring->base_dma = 0x%llx\n", - (unsigned long long) tx_ring->wq_base_dma); - pr_err("tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n", - tx_ring->cnsmr_idx_sh_reg, - tx_ring->cnsmr_idx_sh_reg - ? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0); - pr_err("tx_ring->size = %d\n", tx_ring->wq_size); - pr_err("tx_ring->len = %d\n", tx_ring->wq_len); - pr_err("tx_ring->prod_idx_db_reg = %p\n", tx_ring->prod_idx_db_reg); - pr_err("tx_ring->valid_db_reg = %p\n", tx_ring->valid_db_reg); - pr_err("tx_ring->prod_idx = %d\n", tx_ring->prod_idx); - pr_err("tx_ring->cq_id = %d\n", tx_ring->cq_id); - pr_err("tx_ring->wq_id = %d\n", tx_ring->wq_id); - pr_err("tx_ring->q = %p\n", tx_ring->q); - pr_err("tx_ring->tx_count = %d\n", atomic_read(&tx_ring->tx_count)); -} - -void ql_dump_ricb(struct ricb *ricb) -{ - int i; - pr_err("===================== Dumping ricb ===============\n"); - pr_err("Dumping ricb stuff...\n"); - - pr_err("ricb->base_cq = %d\n", ricb->base_cq & 0x1f); - pr_err("ricb->flags = %s%s%s%s%s%s%s%s%s\n", - ricb->base_cq & RSS_L4K ? "RSS_L4K " : "", - ricb->flags & RSS_L6K ? "RSS_L6K " : "", - ricb->flags & RSS_LI ? "RSS_LI " : "", - ricb->flags & RSS_LB ? "RSS_LB " : "", - ricb->flags & RSS_LM ? "RSS_LM " : "", - ricb->flags & RSS_RI4 ? "RSS_RI4 " : "", - ricb->flags & RSS_RT4 ? "RSS_RT4 " : "", - ricb->flags & RSS_RI6 ? "RSS_RI6 " : "", - ricb->flags & RSS_RT6 ? "RSS_RT6 " : ""); - pr_err("ricb->mask = 0x%.04x\n", le16_to_cpu(ricb->mask)); - for (i = 0; i < 16; i++) - pr_err("ricb->hash_cq_id[%d] = 0x%.08x\n", i, - le32_to_cpu(ricb->hash_cq_id[i])); - for (i = 0; i < 10; i++) - pr_err("ricb->ipv6_hash_key[%d] = 0x%.08x\n", i, - le32_to_cpu(ricb->ipv6_hash_key[i])); - for (i = 0; i < 4; i++) - pr_err("ricb->ipv4_hash_key[%d] = 0x%.08x\n", i, - le32_to_cpu(ricb->ipv4_hash_key[i])); -} - -void ql_dump_cqicb(struct cqicb *cqicb) -{ - pr_err("Dumping cqicb stuff...\n"); - - pr_err("cqicb->msix_vect = %d\n", cqicb->msix_vect); - pr_err("cqicb->flags = %x\n", cqicb->flags); - pr_err("cqicb->len = %d\n", le16_to_cpu(cqicb->len)); - pr_err("cqicb->addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(cqicb->addr)); - pr_err("cqicb->prod_idx_addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr)); - pr_err("cqicb->pkt_delay = 0x%.04x\n", - le16_to_cpu(cqicb->pkt_delay)); - pr_err("cqicb->irq_delay = 0x%.04x\n", - le16_to_cpu(cqicb->irq_delay)); - pr_err("cqicb->lbq_addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(cqicb->lbq_addr)); - pr_err("cqicb->lbq_buf_size = 0x%.04x\n", - le16_to_cpu(cqicb->lbq_buf_size)); - pr_err("cqicb->lbq_len = 0x%.04x\n", - le16_to_cpu(cqicb->lbq_len)); - pr_err("cqicb->sbq_addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(cqicb->sbq_addr)); - pr_err("cqicb->sbq_buf_size = 0x%.04x\n", - le16_to_cpu(cqicb->sbq_buf_size)); - pr_err("cqicb->sbq_len = 0x%.04x\n", - le16_to_cpu(cqicb->sbq_len)); -} - -void ql_dump_rx_ring(struct rx_ring *rx_ring) -{ - if (rx_ring == NULL) - return; - pr_err("===================== Dumping rx_ring %d ===============\n", - rx_ring->cq_id); - pr_err("Dumping rx_ring %d, type = %s%s%s\n", - rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "", - rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "", - rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : ""); - pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb); - pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base); - pr_err("rx_ring->cq_base_dma = %llx\n", - (unsigned long long) rx_ring->cq_base_dma); - pr_err("rx_ring->cq_size = %d\n", rx_ring->cq_size); - pr_err("rx_ring->cq_len = %d\n", rx_ring->cq_len); - pr_err("rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n", - rx_ring->prod_idx_sh_reg, - rx_ring->prod_idx_sh_reg - ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0); - pr_err("rx_ring->prod_idx_sh_reg_dma = %llx\n", - (unsigned long long) rx_ring->prod_idx_sh_reg_dma); - pr_err("rx_ring->cnsmr_idx_db_reg = %p\n", - rx_ring->cnsmr_idx_db_reg); - pr_err("rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx); - pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry); - pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg); - - pr_err("rx_ring->lbq_base = %p\n", rx_ring->lbq_base); - pr_err("rx_ring->lbq_base_dma = %llx\n", - (unsigned long long) rx_ring->lbq_base_dma); - pr_err("rx_ring->lbq_base_indirect = %p\n", - rx_ring->lbq_base_indirect); - pr_err("rx_ring->lbq_base_indirect_dma = %llx\n", - (unsigned long long) rx_ring->lbq_base_indirect_dma); - pr_err("rx_ring->lbq = %p\n", rx_ring->lbq); - pr_err("rx_ring->lbq_len = %d\n", rx_ring->lbq_len); - pr_err("rx_ring->lbq_size = %d\n", rx_ring->lbq_size); - pr_err("rx_ring->lbq_prod_idx_db_reg = %p\n", - rx_ring->lbq_prod_idx_db_reg); - pr_err("rx_ring->lbq_prod_idx = %d\n", rx_ring->lbq_prod_idx); - pr_err("rx_ring->lbq_curr_idx = %d\n", rx_ring->lbq_curr_idx); - pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx); - pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt); - pr_err("rx_ring->lbq_buf_size = %d\n", rx_ring->lbq_buf_size); - - pr_err("rx_ring->sbq_base = %p\n", rx_ring->sbq_base); - pr_err("rx_ring->sbq_base_dma = %llx\n", - (unsigned long long) rx_ring->sbq_base_dma); - pr_err("rx_ring->sbq_base_indirect = %p\n", - rx_ring->sbq_base_indirect); - pr_err("rx_ring->sbq_base_indirect_dma = %llx\n", - (unsigned long long) rx_ring->sbq_base_indirect_dma); - pr_err("rx_ring->sbq = %p\n", rx_ring->sbq); - pr_err("rx_ring->sbq_len = %d\n", rx_ring->sbq_len); - pr_err("rx_ring->sbq_size = %d\n", rx_ring->sbq_size); - pr_err("rx_ring->sbq_prod_idx_db_reg addr = %p\n", - rx_ring->sbq_prod_idx_db_reg); - pr_err("rx_ring->sbq_prod_idx = %d\n", rx_ring->sbq_prod_idx); - pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx); - pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx); - pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt); - pr_err("rx_ring->sbq_buf_size = %d\n", rx_ring->sbq_buf_size); - pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id); - pr_err("rx_ring->irq = %d\n", rx_ring->irq); - pr_err("rx_ring->cpu = %d\n", rx_ring->cpu); - pr_err("rx_ring->qdev = %p\n", rx_ring->qdev); -} - -void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id) -{ - void *ptr; - - pr_err("%s: Enter\n", __func__); - - ptr = kmalloc(size, GFP_ATOMIC); - if (ptr == NULL) { - pr_err("%s: Couldn't allocate a buffer\n", __func__); - return; - } - - if (ql_write_cfg(qdev, ptr, size, bit, q_id)) { - pr_err("%s: Failed to upload control block!\n", __func__); - goto fail_it; - } - switch (bit) { - case CFG_DRQ: - ql_dump_wqicb((struct wqicb *)ptr); - break; - case CFG_DCQ: - ql_dump_cqicb((struct cqicb *)ptr); - break; - case CFG_DR: - ql_dump_ricb((struct ricb *)ptr); - break; - default: - pr_err("%s: Invalid bit value = %x\n", __func__, bit); - break; - } -fail_it: - kfree(ptr); -} -#endif - -#ifdef QL_OB_DUMP -void ql_dump_tx_desc(struct tx_buf_desc *tbd) -{ - pr_err("tbd->addr = 0x%llx\n", - le64_to_cpu((u64) tbd->addr)); - pr_err("tbd->len = %d\n", - le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); - pr_err("tbd->flags = %s %s\n", - tbd->len & TX_DESC_C ? "C" : ".", - tbd->len & TX_DESC_E ? "E" : "."); - tbd++; - pr_err("tbd->addr = 0x%llx\n", - le64_to_cpu((u64) tbd->addr)); - pr_err("tbd->len = %d\n", - le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); - pr_err("tbd->flags = %s %s\n", - tbd->len & TX_DESC_C ? "C" : ".", - tbd->len & TX_DESC_E ? "E" : "."); - tbd++; - pr_err("tbd->addr = 0x%llx\n", - le64_to_cpu((u64) tbd->addr)); - pr_err("tbd->len = %d\n", - le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); - pr_err("tbd->flags = %s %s\n", - tbd->len & TX_DESC_C ? "C" : ".", - tbd->len & TX_DESC_E ? "E" : "."); - -} - -void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb) -{ - struct ob_mac_tso_iocb_req *ob_mac_tso_iocb = - (struct ob_mac_tso_iocb_req *)ob_mac_iocb; - struct tx_buf_desc *tbd; - u16 frame_len; - - pr_err("%s\n", __func__); - pr_err("opcode = %s\n", - (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO"); - pr_err("flags1 = %s %s %s %s %s\n", - ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "", - ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "", - ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "", - ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "", - ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : ""); - pr_err("flags2 = %s %s %s\n", - ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "", - ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "", - ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : ""); - pr_err("flags3 = %s %s %s\n", - ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "", - ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "", - ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : ""); - pr_err("tid = %x\n", ob_mac_iocb->tid); - pr_err("txq_idx = %d\n", ob_mac_iocb->txq_idx); - pr_err("vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci); - if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) { - pr_err("frame_len = %d\n", - le32_to_cpu(ob_mac_tso_iocb->frame_len)); - pr_err("mss = %d\n", - le16_to_cpu(ob_mac_tso_iocb->mss)); - pr_err("prot_hdr_len = %d\n", - le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len)); - pr_err("hdr_offset = 0x%.04x\n", - le16_to_cpu(ob_mac_tso_iocb->net_trans_offset)); - frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len); - } else { - pr_err("frame_len = %d\n", - le16_to_cpu(ob_mac_iocb->frame_len)); - frame_len = le16_to_cpu(ob_mac_iocb->frame_len); - } - tbd = &ob_mac_iocb->tbd[0]; - ql_dump_tx_desc(tbd); -} - -void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp) -{ - pr_err("%s\n", __func__); - pr_err("opcode = %d\n", ob_mac_rsp->opcode); - pr_err("flags = %s %s %s %s %s %s %s\n", - ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".", - ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".", - ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".", - ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".", - ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".", - ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".", - ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : "."); - pr_err("tid = %x\n", ob_mac_rsp->tid); -} -#endif - -#ifdef QL_IB_DUMP -void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp) -{ - pr_err("%s\n", __func__); - pr_err("opcode = 0x%x\n", ib_mac_rsp->opcode); - pr_err("flags1 = %s%s%s%s%s%s\n", - ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "", - ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "", - ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "", - ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "", - ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "", - ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : ""); - - if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) - pr_err("%s%s%s Multicast\n", - (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == - IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "", - (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == - IB_MAC_IOCB_RSP_M_REG ? "Registered" : "", - (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == - IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); - - pr_err("flags2 = %s%s%s%s%s\n", - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "", - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "", - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "", - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "", - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : ""); - - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) - pr_err("%s%s%s%s%s error\n", - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == - IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "", - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == - IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "", - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == - IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "", - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == - IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "", - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == - IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : ""); - - pr_err("flags3 = %s%s\n", - ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "", - ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : ""); - - if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) - pr_err("RSS flags = %s%s%s%s\n", - ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == - IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "", - ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == - IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "", - ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == - IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "", - ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == - IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : ""); - - pr_err("data_len = %d\n", - le32_to_cpu(ib_mac_rsp->data_len)); - pr_err("data_addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr)); - if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) - pr_err("rss = %x\n", - le32_to_cpu(ib_mac_rsp->rss)); - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) - pr_err("vlan_id = %x\n", - le16_to_cpu(ib_mac_rsp->vlan_id)); - - pr_err("flags4 = %s%s%s\n", - ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "", - ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "", - ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : ""); - - if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { - pr_err("hdr length = %d\n", - le32_to_cpu(ib_mac_rsp->hdr_len)); - pr_err("hdr addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr)); - } -} -#endif - -#ifdef QL_ALL_DUMP -void ql_dump_all(struct ql_adapter *qdev) -{ - int i; - - QL_DUMP_REGS(qdev); - QL_DUMP_QDEV(qdev); - for (i = 0; i < qdev->tx_ring_count; i++) { - QL_DUMP_TX_RING(&qdev->tx_ring[i]); - QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]); - } - for (i = 0; i < qdev->rx_ring_count; i++) { - QL_DUMP_RX_RING(&qdev->rx_ring[i]); - QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]); - } -} -#endif diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c deleted file mode 100644 index 9b67bfe..0000000 --- a/drivers/net/qlge/qlge_ethtool.c +++ /dev/null @@ -1,688 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -#include "qlge.h" - -static const char ql_gstrings_test[][ETH_GSTRING_LEN] = { - "Loopback test (offline)" -}; -#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN) - -static int ql_update_ring_coalescing(struct ql_adapter *qdev) -{ - int i, status = 0; - struct rx_ring *rx_ring; - struct cqicb *cqicb; - - if (!netif_running(qdev->ndev)) - return status; - - /* Skip the default queue, and update the outbound handler - * queues if they changed. - */ - cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_count]; - if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs || - le16_to_cpu(cqicb->pkt_delay) != - qdev->tx_max_coalesced_frames) { - for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) { - rx_ring = &qdev->rx_ring[i]; - cqicb = (struct cqicb *)rx_ring; - cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs); - cqicb->pkt_delay = - cpu_to_le16(qdev->tx_max_coalesced_frames); - cqicb->flags = FLAGS_LI; - status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb), - CFG_LCQ, rx_ring->cq_id); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to load CQICB.\n"); - goto exit; - } - } - } - - /* Update the inbound (RSS) handler queues if they changed. */ - cqicb = (struct cqicb *)&qdev->rx_ring[0]; - if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs || - le16_to_cpu(cqicb->pkt_delay) != - qdev->rx_max_coalesced_frames) { - for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) { - rx_ring = &qdev->rx_ring[i]; - cqicb = (struct cqicb *)rx_ring; - cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs); - cqicb->pkt_delay = - cpu_to_le16(qdev->rx_max_coalesced_frames); - cqicb->flags = FLAGS_LI; - status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb), - CFG_LCQ, rx_ring->cq_id); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to load CQICB.\n"); - goto exit; - } - } - } -exit: - return status; -} - -static void ql_update_stats(struct ql_adapter *qdev) -{ - u32 i; - u64 data; - u64 *iter = &qdev->nic_stats.tx_pkts; - - spin_lock(&qdev->stats_lock); - if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) { - netif_err(qdev, drv, qdev->ndev, - "Couldn't get xgmac sem.\n"); - goto quit; - } - /* - * Get TX statistics. - */ - for (i = 0x200; i < 0x280; i += 8) { - if (ql_read_xgmac_reg64(qdev, i, &data)) { - netif_err(qdev, drv, qdev->ndev, - "Error reading status register 0x%.04x.\n", - i); - goto end; - } else - *iter = data; - iter++; - } - - /* - * Get RX statistics. - */ - for (i = 0x300; i < 0x3d0; i += 8) { - if (ql_read_xgmac_reg64(qdev, i, &data)) { - netif_err(qdev, drv, qdev->ndev, - "Error reading status register 0x%.04x.\n", - i); - goto end; - } else - *iter = data; - iter++; - } - - /* - * Get Per-priority TX pause frame counter statistics. - */ - for (i = 0x500; i < 0x540; i += 8) { - if (ql_read_xgmac_reg64(qdev, i, &data)) { - netif_err(qdev, drv, qdev->ndev, - "Error reading status register 0x%.04x.\n", - i); - goto end; - } else - *iter = data; - iter++; - } - - /* - * Get Per-priority RX pause frame counter statistics. - */ - for (i = 0x568; i < 0x5a8; i += 8) { - if (ql_read_xgmac_reg64(qdev, i, &data)) { - netif_err(qdev, drv, qdev->ndev, - "Error reading status register 0x%.04x.\n", - i); - goto end; - } else - *iter = data; - iter++; - } - - /* - * Get RX NIC FIFO DROP statistics. - */ - if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) { - netif_err(qdev, drv, qdev->ndev, - "Error reading status register 0x%.04x.\n", i); - goto end; - } else - *iter = data; -end: - ql_sem_unlock(qdev, qdev->xg_sem_mask); -quit: - spin_unlock(&qdev->stats_lock); - - QL_DUMP_STAT(qdev); -} - -static char ql_stats_str_arr[][ETH_GSTRING_LEN] = { - {"tx_pkts"}, - {"tx_bytes"}, - {"tx_mcast_pkts"}, - {"tx_bcast_pkts"}, - {"tx_ucast_pkts"}, - {"tx_ctl_pkts"}, - {"tx_pause_pkts"}, - {"tx_64_pkts"}, - {"tx_65_to_127_pkts"}, - {"tx_128_to_255_pkts"}, - {"tx_256_511_pkts"}, - {"tx_512_to_1023_pkts"}, - {"tx_1024_to_1518_pkts"}, - {"tx_1519_to_max_pkts"}, - {"tx_undersize_pkts"}, - {"tx_oversize_pkts"}, - {"rx_bytes"}, - {"rx_bytes_ok"}, - {"rx_pkts"}, - {"rx_pkts_ok"}, - {"rx_bcast_pkts"}, - {"rx_mcast_pkts"}, - {"rx_ucast_pkts"}, - {"rx_undersize_pkts"}, - {"rx_oversize_pkts"}, - {"rx_jabber_pkts"}, - {"rx_undersize_fcerr_pkts"}, - {"rx_drop_events"}, - {"rx_fcerr_pkts"}, - {"rx_align_err"}, - {"rx_symbol_err"}, - {"rx_mac_err"}, - {"rx_ctl_pkts"}, - {"rx_pause_pkts"}, - {"rx_64_pkts"}, - {"rx_65_to_127_pkts"}, - {"rx_128_255_pkts"}, - {"rx_256_511_pkts"}, - {"rx_512_to_1023_pkts"}, - {"rx_1024_to_1518_pkts"}, - {"rx_1519_to_max_pkts"}, - {"rx_len_err_pkts"}, - {"tx_cbfc_pause_frames0"}, - {"tx_cbfc_pause_frames1"}, - {"tx_cbfc_pause_frames2"}, - {"tx_cbfc_pause_frames3"}, - {"tx_cbfc_pause_frames4"}, - {"tx_cbfc_pause_frames5"}, - {"tx_cbfc_pause_frames6"}, - {"tx_cbfc_pause_frames7"}, - {"rx_cbfc_pause_frames0"}, - {"rx_cbfc_pause_frames1"}, - {"rx_cbfc_pause_frames2"}, - {"rx_cbfc_pause_frames3"}, - {"rx_cbfc_pause_frames4"}, - {"rx_cbfc_pause_frames5"}, - {"rx_cbfc_pause_frames6"}, - {"rx_cbfc_pause_frames7"}, - {"rx_nic_fifo_drop"}, -}; - -static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf) -{ - switch (stringset) { - case ETH_SS_STATS: - memcpy(buf, ql_stats_str_arr, sizeof(ql_stats_str_arr)); - break; - } -} - -static int ql_get_sset_count(struct net_device *dev, int sset) -{ - switch (sset) { - case ETH_SS_TEST: - return QLGE_TEST_LEN; - case ETH_SS_STATS: - return ARRAY_SIZE(ql_stats_str_arr); - default: - return -EOPNOTSUPP; - } -} - -static void -ql_get_ethtool_stats(struct net_device *ndev, - struct ethtool_stats *stats, u64 *data) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - struct nic_stats *s = &qdev->nic_stats; - - ql_update_stats(qdev); - - *data++ = s->tx_pkts; - *data++ = s->tx_bytes; - *data++ = s->tx_mcast_pkts; - *data++ = s->tx_bcast_pkts; - *data++ = s->tx_ucast_pkts; - *data++ = s->tx_ctl_pkts; - *data++ = s->tx_pause_pkts; - *data++ = s->tx_64_pkt; - *data++ = s->tx_65_to_127_pkt; - *data++ = s->tx_128_to_255_pkt; - *data++ = s->tx_256_511_pkt; - *data++ = s->tx_512_to_1023_pkt; - *data++ = s->tx_1024_to_1518_pkt; - *data++ = s->tx_1519_to_max_pkt; - *data++ = s->tx_undersize_pkt; - *data++ = s->tx_oversize_pkt; - *data++ = s->rx_bytes; - *data++ = s->rx_bytes_ok; - *data++ = s->rx_pkts; - *data++ = s->rx_pkts_ok; - *data++ = s->rx_bcast_pkts; - *data++ = s->rx_mcast_pkts; - *data++ = s->rx_ucast_pkts; - *data++ = s->rx_undersize_pkts; - *data++ = s->rx_oversize_pkts; - *data++ = s->rx_jabber_pkts; - *data++ = s->rx_undersize_fcerr_pkts; - *data++ = s->rx_drop_events; - *data++ = s->rx_fcerr_pkts; - *data++ = s->rx_align_err; - *data++ = s->rx_symbol_err; - *data++ = s->rx_mac_err; - *data++ = s->rx_ctl_pkts; - *data++ = s->rx_pause_pkts; - *data++ = s->rx_64_pkts; - *data++ = s->rx_65_to_127_pkts; - *data++ = s->rx_128_255_pkts; - *data++ = s->rx_256_511_pkts; - *data++ = s->rx_512_to_1023_pkts; - *data++ = s->rx_1024_to_1518_pkts; - *data++ = s->rx_1519_to_max_pkts; - *data++ = s->rx_len_err_pkts; - *data++ = s->tx_cbfc_pause_frames0; - *data++ = s->tx_cbfc_pause_frames1; - *data++ = s->tx_cbfc_pause_frames2; - *data++ = s->tx_cbfc_pause_frames3; - *data++ = s->tx_cbfc_pause_frames4; - *data++ = s->tx_cbfc_pause_frames5; - *data++ = s->tx_cbfc_pause_frames6; - *data++ = s->tx_cbfc_pause_frames7; - *data++ = s->rx_cbfc_pause_frames0; - *data++ = s->rx_cbfc_pause_frames1; - *data++ = s->rx_cbfc_pause_frames2; - *data++ = s->rx_cbfc_pause_frames3; - *data++ = s->rx_cbfc_pause_frames4; - *data++ = s->rx_cbfc_pause_frames5; - *data++ = s->rx_cbfc_pause_frames6; - *data++ = s->rx_cbfc_pause_frames7; - *data++ = s->rx_nic_fifo_drop; -} - -static int ql_get_settings(struct net_device *ndev, - struct ethtool_cmd *ecmd) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - - ecmd->supported = SUPPORTED_10000baseT_Full; - ecmd->advertising = ADVERTISED_10000baseT_Full; - ecmd->autoneg = AUTONEG_ENABLE; - ecmd->transceiver = XCVR_EXTERNAL; - if ((qdev->link_status & STS_LINK_TYPE_MASK) == - STS_LINK_TYPE_10GBASET) { - ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); - ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); - ecmd->port = PORT_TP; - } else { - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_FIBRE; - } - - ethtool_cmd_speed_set(ecmd, SPEED_10000); - ecmd->duplex = DUPLEX_FULL; - - return 0; -} - -static void ql_get_drvinfo(struct net_device *ndev, - struct ethtool_drvinfo *drvinfo) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - strncpy(drvinfo->driver, qlge_driver_name, 32); - strncpy(drvinfo->version, qlge_driver_version, 32); - snprintf(drvinfo->fw_version, 32, "v%d.%d.%d", - (qdev->fw_rev_id & 0x00ff0000) >> 16, - (qdev->fw_rev_id & 0x0000ff00) >> 8, - (qdev->fw_rev_id & 0x000000ff)); - strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32); - drvinfo->n_stats = 0; - drvinfo->testinfo_len = 0; - if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) - drvinfo->regdump_len = sizeof(struct ql_mpi_coredump); - else - drvinfo->regdump_len = sizeof(struct ql_reg_dump); - drvinfo->eedump_len = 0; -} - -static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - /* What we support. */ - wol->supported = WAKE_MAGIC; - /* What we've currently got set. */ - wol->wolopts = qdev->wol; -} - -static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - int status; - - if (wol->wolopts & ~WAKE_MAGIC) - return -EINVAL; - qdev->wol = wol->wolopts; - - netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol); - if (!qdev->wol) { - u32 wol = 0; - status = ql_mb_wol_mode(qdev, wol); - netif_err(qdev, drv, qdev->ndev, "WOL %s (wol code 0x%x)\n", - status == 0 ? "cleared successfully" : "clear failed", - wol); - } - - return 0; -} - -static int ql_set_phys_id(struct net_device *ndev, - enum ethtool_phys_id_state state) - -{ - struct ql_adapter *qdev = netdev_priv(ndev); - - switch (state) { - case ETHTOOL_ID_ACTIVE: - /* Save the current LED settings */ - if (ql_mb_get_led_cfg(qdev)) - return -EIO; - - /* Start blinking */ - ql_mb_set_led_cfg(qdev, QL_LED_BLINK); - return 0; - - case ETHTOOL_ID_INACTIVE: - /* Restore LED settings */ - if (ql_mb_set_led_cfg(qdev, qdev->led_config)) - return -EIO; - return 0; - - default: - return -EINVAL; - } -} - -static int ql_start_loopback(struct ql_adapter *qdev) -{ - if (netif_carrier_ok(qdev->ndev)) { - set_bit(QL_LB_LINK_UP, &qdev->flags); - netif_carrier_off(qdev->ndev); - } else - clear_bit(QL_LB_LINK_UP, &qdev->flags); - qdev->link_config |= CFG_LOOPBACK_PCS; - return ql_mb_set_port_cfg(qdev); -} - -static void ql_stop_loopback(struct ql_adapter *qdev) -{ - qdev->link_config &= ~CFG_LOOPBACK_PCS; - ql_mb_set_port_cfg(qdev); - if (test_bit(QL_LB_LINK_UP, &qdev->flags)) { - netif_carrier_on(qdev->ndev); - clear_bit(QL_LB_LINK_UP, &qdev->flags); - } -} - -static void ql_create_lb_frame(struct sk_buff *skb, - unsigned int frame_size) -{ - memset(skb->data, 0xFF, frame_size); - frame_size &= ~1; - memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); - memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); - memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); -} - -void ql_check_lb_frame(struct ql_adapter *qdev, - struct sk_buff *skb) -{ - unsigned int frame_size = skb->len; - - if ((*(skb->data + 3) == 0xFF) && - (*(skb->data + frame_size / 2 + 10) == 0xBE) && - (*(skb->data + frame_size / 2 + 12) == 0xAF)) { - atomic_dec(&qdev->lb_count); - return; - } -} - -static int ql_run_loopback_test(struct ql_adapter *qdev) -{ - int i; - netdev_tx_t rc; - struct sk_buff *skb; - unsigned int size = SMALL_BUF_MAP_SIZE; - - for (i = 0; i < 64; i++) { - skb = netdev_alloc_skb(qdev->ndev, size); - if (!skb) - return -ENOMEM; - - skb->queue_mapping = 0; - skb_put(skb, size); - ql_create_lb_frame(skb, size); - rc = ql_lb_send(skb, qdev->ndev); - if (rc != NETDEV_TX_OK) - return -EPIPE; - atomic_inc(&qdev->lb_count); - } - /* Give queue time to settle before testing results. */ - msleep(2); - ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128); - return atomic_read(&qdev->lb_count) ? -EIO : 0; -} - -static int ql_loopback_test(struct ql_adapter *qdev, u64 *data) -{ - *data = ql_start_loopback(qdev); - if (*data) - goto out; - *data = ql_run_loopback_test(qdev); -out: - ql_stop_loopback(qdev); - return *data; -} - -static void ql_self_test(struct net_device *ndev, - struct ethtool_test *eth_test, u64 *data) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - - if (netif_running(ndev)) { - set_bit(QL_SELFTEST, &qdev->flags); - if (eth_test->flags == ETH_TEST_FL_OFFLINE) { - /* Offline tests */ - if (ql_loopback_test(qdev, &data[0])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - } else { - /* Online tests */ - data[0] = 0; - } - clear_bit(QL_SELFTEST, &qdev->flags); - /* Give link time to come up after - * port configuration changes. - */ - msleep_interruptible(4 * 1000); - } else { - netif_err(qdev, drv, qdev->ndev, - "is down, Loopback test will fail.\n"); - eth_test->flags |= ETH_TEST_FL_FAILED; - } -} - -static int ql_get_regs_len(struct net_device *ndev) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - - if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) - return sizeof(struct ql_mpi_coredump); - else - return sizeof(struct ql_reg_dump); -} - -static void ql_get_regs(struct net_device *ndev, - struct ethtool_regs *regs, void *p) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - - ql_get_dump(qdev, p); - qdev->core_is_dumped = 0; - if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) - regs->len = sizeof(struct ql_mpi_coredump); - else - regs->len = sizeof(struct ql_reg_dump); -} - -static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) -{ - struct ql_adapter *qdev = netdev_priv(dev); - - c->rx_coalesce_usecs = qdev->rx_coalesce_usecs; - c->tx_coalesce_usecs = qdev->tx_coalesce_usecs; - - /* This chip coalesces as follows: - * If a packet arrives, hold off interrupts until - * cqicb->int_delay expires, but if no other packets arrive don't - * wait longer than cqicb->pkt_int_delay. But ethtool doesn't use a - * timer to coalesce on a frame basis. So, we have to take ethtool's - * max_coalesced_frames value and convert it to a delay in microseconds. - * We do this by using a basic thoughput of 1,000,000 frames per - * second @ (1024 bytes). This means one frame per usec. So it's a - * simple one to one ratio. - */ - c->rx_max_coalesced_frames = qdev->rx_max_coalesced_frames; - c->tx_max_coalesced_frames = qdev->tx_max_coalesced_frames; - - return 0; -} - -static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - - /* Validate user parameters. */ - if (c->rx_coalesce_usecs > qdev->rx_ring_size / 2) - return -EINVAL; - /* Don't wait more than 10 usec. */ - if (c->rx_max_coalesced_frames > MAX_INTER_FRAME_WAIT) - return -EINVAL; - if (c->tx_coalesce_usecs > qdev->tx_ring_size / 2) - return -EINVAL; - if (c->tx_max_coalesced_frames > MAX_INTER_FRAME_WAIT) - return -EINVAL; - - /* Verify a change took place before updating the hardware. */ - if (qdev->rx_coalesce_usecs == c->rx_coalesce_usecs && - qdev->tx_coalesce_usecs == c->tx_coalesce_usecs && - qdev->rx_max_coalesced_frames == c->rx_max_coalesced_frames && - qdev->tx_max_coalesced_frames == c->tx_max_coalesced_frames) - return 0; - - qdev->rx_coalesce_usecs = c->rx_coalesce_usecs; - qdev->tx_coalesce_usecs = c->tx_coalesce_usecs; - qdev->rx_max_coalesced_frames = c->rx_max_coalesced_frames; - qdev->tx_max_coalesced_frames = c->tx_max_coalesced_frames; - - return ql_update_ring_coalescing(qdev); -} - -static void ql_get_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) -{ - struct ql_adapter *qdev = netdev_priv(netdev); - - ql_mb_get_port_cfg(qdev); - if (qdev->link_config & CFG_PAUSE_STD) { - pause->rx_pause = 1; - pause->tx_pause = 1; - } -} - -static int ql_set_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) -{ - struct ql_adapter *qdev = netdev_priv(netdev); - int status = 0; - - if ((pause->rx_pause) && (pause->tx_pause)) - qdev->link_config |= CFG_PAUSE_STD; - else if (!pause->rx_pause && !pause->tx_pause) - qdev->link_config &= ~CFG_PAUSE_STD; - else - return -EINVAL; - - status = ql_mb_set_port_cfg(qdev); - return status; -} - -static u32 ql_get_msglevel(struct net_device *ndev) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - return qdev->msg_enable; -} - -static void ql_set_msglevel(struct net_device *ndev, u32 value) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - qdev->msg_enable = value; -} - -const struct ethtool_ops qlge_ethtool_ops = { - .get_settings = ql_get_settings, - .get_drvinfo = ql_get_drvinfo, - .get_wol = ql_get_wol, - .set_wol = ql_set_wol, - .get_regs_len = ql_get_regs_len, - .get_regs = ql_get_regs, - .get_msglevel = ql_get_msglevel, - .set_msglevel = ql_set_msglevel, - .get_link = ethtool_op_get_link, - .set_phys_id = ql_set_phys_id, - .self_test = ql_self_test, - .get_pauseparam = ql_get_pauseparam, - .set_pauseparam = ql_set_pauseparam, - .get_coalesce = ql_get_coalesce, - .set_coalesce = ql_set_coalesce, - .get_sset_count = ql_get_sset_count, - .get_strings = ql_get_strings, - .get_ethtool_stats = ql_get_ethtool_stats, -}; - diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c deleted file mode 100644 index f07e96e..0000000 --- a/drivers/net/qlge/qlge_main.c +++ /dev/null @@ -1,4987 +0,0 @@ -/* - * QLogic qlge NIC HBA Driver - * Copyright (c) 2003-2008 QLogic Corporation - * See LICENSE.qlge for copyright and licensing details. - * Author: Linux qlge network device driver by - * Ron Mercer - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "qlge.h" - -char qlge_driver_name[] = DRV_NAME; -const char qlge_driver_version[] = DRV_VERSION; - -MODULE_AUTHOR("Ron Mercer "); -MODULE_DESCRIPTION(DRV_STRING " "); -MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_VERSION); - -static const u32 default_msg = - NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | -/* NETIF_MSG_TIMER | */ - NETIF_MSG_IFDOWN | - NETIF_MSG_IFUP | - NETIF_MSG_RX_ERR | - NETIF_MSG_TX_ERR | -/* NETIF_MSG_TX_QUEUED | */ -/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */ -/* NETIF_MSG_PKTDATA | */ - NETIF_MSG_HW | NETIF_MSG_WOL | 0; - -static int debug = -1; /* defaults above */ -module_param(debug, int, 0664); -MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); - -#define MSIX_IRQ 0 -#define MSI_IRQ 1 -#define LEG_IRQ 2 -static int qlge_irq_type = MSIX_IRQ; -module_param(qlge_irq_type, int, 0664); -MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); - -static int qlge_mpi_coredump; -module_param(qlge_mpi_coredump, int, 0); -MODULE_PARM_DESC(qlge_mpi_coredump, - "Option to enable MPI firmware dump. " - "Default is OFF - Do Not allocate memory. "); - -static int qlge_force_coredump; -module_param(qlge_force_coredump, int, 0); -MODULE_PARM_DESC(qlge_force_coredump, - "Option to allow force of firmware core dump. " - "Default is OFF - Do not allow."); - -static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = { - {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)}, - {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)}, - /* required last entry */ - {0,} -}; - -MODULE_DEVICE_TABLE(pci, qlge_pci_tbl); - -static int ql_wol(struct ql_adapter *qdev); -static void qlge_set_multicast_list(struct net_device *ndev); - -/* This hardware semaphore causes exclusive access to - * resources shared between the NIC driver, MPI firmware, - * FCOE firmware and the FC driver. - */ -static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask) -{ - u32 sem_bits = 0; - - switch (sem_mask) { - case SEM_XGMAC0_MASK: - sem_bits = SEM_SET << SEM_XGMAC0_SHIFT; - break; - case SEM_XGMAC1_MASK: - sem_bits = SEM_SET << SEM_XGMAC1_SHIFT; - break; - case SEM_ICB_MASK: - sem_bits = SEM_SET << SEM_ICB_SHIFT; - break; - case SEM_MAC_ADDR_MASK: - sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT; - break; - case SEM_FLASH_MASK: - sem_bits = SEM_SET << SEM_FLASH_SHIFT; - break; - case SEM_PROBE_MASK: - sem_bits = SEM_SET << SEM_PROBE_SHIFT; - break; - case SEM_RT_IDX_MASK: - sem_bits = SEM_SET << SEM_RT_IDX_SHIFT; - break; - case SEM_PROC_REG_MASK: - sem_bits = SEM_SET << SEM_PROC_REG_SHIFT; - break; - default: - netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n"); - return -EINVAL; - } - - ql_write32(qdev, SEM, sem_bits | sem_mask); - return !(ql_read32(qdev, SEM) & sem_bits); -} - -int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask) -{ - unsigned int wait_count = 30; - do { - if (!ql_sem_trylock(qdev, sem_mask)) - return 0; - udelay(100); - } while (--wait_count); - return -ETIMEDOUT; -} - -void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask) -{ - ql_write32(qdev, SEM, sem_mask); - ql_read32(qdev, SEM); /* flush */ -} - -/* This function waits for a specific bit to come ready - * in a given register. It is used mostly by the initialize - * process, but is also used in kernel thread API such as - * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid. - */ -int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit) -{ - u32 temp; - int count = UDELAY_COUNT; - - while (count) { - temp = ql_read32(qdev, reg); - - /* check for errors */ - if (temp & err_bit) { - netif_alert(qdev, probe, qdev->ndev, - "register 0x%.08x access error, value = 0x%.08x!.\n", - reg, temp); - return -EIO; - } else if (temp & bit) - return 0; - udelay(UDELAY_DELAY); - count--; - } - netif_alert(qdev, probe, qdev->ndev, - "Timed out waiting for reg %x to come ready.\n", reg); - return -ETIMEDOUT; -} - -/* The CFG register is used to download TX and RX control blocks - * to the chip. This function waits for an operation to complete. - */ -static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit) -{ - int count = UDELAY_COUNT; - u32 temp; - - while (count) { - temp = ql_read32(qdev, CFG); - if (temp & CFG_LE) - return -EIO; - if (!(temp & bit)) - return 0; - udelay(UDELAY_DELAY); - count--; - } - return -ETIMEDOUT; -} - - -/* Used to issue init control blocks to hw. Maps control block, - * sets address, triggers download, waits for completion. - */ -int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit, - u16 q_id) -{ - u64 map; - int status = 0; - int direction; - u32 mask; - u32 value; - - direction = - (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE : - PCI_DMA_FROMDEVICE; - - map = pci_map_single(qdev->pdev, ptr, size, direction); - if (pci_dma_mapping_error(qdev->pdev, map)) { - netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n"); - return -ENOMEM; - } - - status = ql_sem_spinlock(qdev, SEM_ICB_MASK); - if (status) - return status; - - status = ql_wait_cfg(qdev, bit); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Timed out waiting for CFG to come ready.\n"); - goto exit; - } - - ql_write32(qdev, ICB_L, (u32) map); - ql_write32(qdev, ICB_H, (u32) (map >> 32)); - - mask = CFG_Q_MASK | (bit << 16); - value = bit | (q_id << CFG_Q_SHIFT); - ql_write32(qdev, CFG, (mask | value)); - - /* - * Wait for the bit to clear after signaling hw. - */ - status = ql_wait_cfg(qdev, bit); -exit: - ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */ - pci_unmap_single(qdev->pdev, map, size, direction); - return status; -} - -/* Get a specific MAC address from the CAM. Used for debug and reg dump. */ -int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index, - u32 *value) -{ - u32 offset = 0; - int status; - - switch (type) { - case MAC_ADDR_TYPE_MULTI_MAC: - case MAC_ADDR_TYPE_CAM_MAC: - { - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MW, 0); - if (status) - goto exit; - ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ - (index << MAC_ADDR_IDX_SHIFT) | /* index */ - MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MR, 0); - if (status) - goto exit; - *value++ = ql_read32(qdev, MAC_ADDR_DATA); - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MW, 0); - if (status) - goto exit; - ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ - (index << MAC_ADDR_IDX_SHIFT) | /* index */ - MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MR, 0); - if (status) - goto exit; - *value++ = ql_read32(qdev, MAC_ADDR_DATA); - if (type == MAC_ADDR_TYPE_CAM_MAC) { - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MW, 0); - if (status) - goto exit; - ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ - (index << MAC_ADDR_IDX_SHIFT) | /* index */ - MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ - status = - ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, - MAC_ADDR_MR, 0); - if (status) - goto exit; - *value++ = ql_read32(qdev, MAC_ADDR_DATA); - } - break; - } - case MAC_ADDR_TYPE_VLAN: - case MAC_ADDR_TYPE_MULTI_FLTR: - default: - netif_crit(qdev, ifup, qdev->ndev, - "Address type %d not yet supported.\n", type); - status = -EPERM; - } -exit: - return status; -} - -/* Set up a MAC, multicast or VLAN address for the - * inbound frame matching. - */ -static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type, - u16 index) -{ - u32 offset = 0; - int status = 0; - - switch (type) { - case MAC_ADDR_TYPE_MULTI_MAC: - { - u32 upper = (addr[0] << 8) | addr[1]; - u32 lower = (addr[2] << 24) | (addr[3] << 16) | - (addr[4] << 8) | (addr[5]); - - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MW, 0); - if (status) - goto exit; - ql_write32(qdev, MAC_ADDR_IDX, (offset++) | - (index << MAC_ADDR_IDX_SHIFT) | - type | MAC_ADDR_E); - ql_write32(qdev, MAC_ADDR_DATA, lower); - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MW, 0); - if (status) - goto exit; - ql_write32(qdev, MAC_ADDR_IDX, (offset++) | - (index << MAC_ADDR_IDX_SHIFT) | - type | MAC_ADDR_E); - - ql_write32(qdev, MAC_ADDR_DATA, upper); - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MW, 0); - if (status) - goto exit; - break; - } - case MAC_ADDR_TYPE_CAM_MAC: - { - u32 cam_output; - u32 upper = (addr[0] << 8) | addr[1]; - u32 lower = - (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | - (addr[5]); - - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "Adding %s address %pM at index %d in the CAM.\n", - type == MAC_ADDR_TYPE_MULTI_MAC ? - "MULTICAST" : "UNICAST", - addr, index); - - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MW, 0); - if (status) - goto exit; - ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ - (index << MAC_ADDR_IDX_SHIFT) | /* index */ - type); /* type */ - ql_write32(qdev, MAC_ADDR_DATA, lower); - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MW, 0); - if (status) - goto exit; - ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ - (index << MAC_ADDR_IDX_SHIFT) | /* index */ - type); /* type */ - ql_write32(qdev, MAC_ADDR_DATA, upper); - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MW, 0); - if (status) - goto exit; - ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */ - (index << MAC_ADDR_IDX_SHIFT) | /* index */ - type); /* type */ - /* This field should also include the queue id - and possibly the function id. Right now we hardcode - the route field to NIC core. - */ - cam_output = (CAM_OUT_ROUTE_NIC | - (qdev-> - func << CAM_OUT_FUNC_SHIFT) | - (0 << CAM_OUT_CQ_ID_SHIFT)); - if (qdev->ndev->features & NETIF_F_HW_VLAN_RX) - cam_output |= CAM_OUT_RV; - /* route to NIC core */ - ql_write32(qdev, MAC_ADDR_DATA, cam_output); - break; - } - case MAC_ADDR_TYPE_VLAN: - { - u32 enable_bit = *((u32 *) &addr[0]); - /* For VLAN, the addr actually holds a bit that - * either enables or disables the vlan id we are - * addressing. It's either MAC_ADDR_E on or off. - * That's bit-27 we're talking about. - */ - netif_info(qdev, ifup, qdev->ndev, - "%s VLAN ID %d %s the CAM.\n", - enable_bit ? "Adding" : "Removing", - index, - enable_bit ? "to" : "from"); - - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MW, 0); - if (status) - goto exit; - ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */ - (index << MAC_ADDR_IDX_SHIFT) | /* index */ - type | /* type */ - enable_bit); /* enable/disable */ - break; - } - case MAC_ADDR_TYPE_MULTI_FLTR: - default: - netif_crit(qdev, ifup, qdev->ndev, - "Address type %d not yet supported.\n", type); - status = -EPERM; - } -exit: - return status; -} - -/* Set or clear MAC address in hardware. We sometimes - * have to clear it to prevent wrong frame routing - * especially in a bonding environment. - */ -static int ql_set_mac_addr(struct ql_adapter *qdev, int set) -{ - int status; - char zero_mac_addr[ETH_ALEN]; - char *addr; - - if (set) { - addr = &qdev->current_mac_addr[0]; - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "Set Mac addr %pM\n", addr); - } else { - memset(zero_mac_addr, 0, ETH_ALEN); - addr = &zero_mac_addr[0]; - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "Clearing MAC address\n"); - } - status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); - if (status) - return status; - status = ql_set_mac_addr_reg(qdev, (u8 *) addr, - MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); - if (status) - netif_err(qdev, ifup, qdev->ndev, - "Failed to init mac address.\n"); - return status; -} - -void ql_link_on(struct ql_adapter *qdev) -{ - netif_err(qdev, link, qdev->ndev, "Link is up.\n"); - netif_carrier_on(qdev->ndev); - ql_set_mac_addr(qdev, 1); -} - -void ql_link_off(struct ql_adapter *qdev) -{ - netif_err(qdev, link, qdev->ndev, "Link is down.\n"); - netif_carrier_off(qdev->ndev); - ql_set_mac_addr(qdev, 0); -} - -/* Get a specific frame routing value from the CAM. - * Used for debug and reg dump. - */ -int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value) -{ - int status = 0; - - status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0); - if (status) - goto exit; - - ql_write32(qdev, RT_IDX, - RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT)); - status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0); - if (status) - goto exit; - *value = ql_read32(qdev, RT_DATA); -exit: - return status; -} - -/* The NIC function for this chip has 16 routing indexes. Each one can be used - * to route different frame types to various inbound queues. We send broadcast/ - * multicast/error frames to the default queue for slow handling, - * and CAM hit/RSS frames to the fast handling queues. - */ -static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask, - int enable) -{ - int status = -EINVAL; /* Return error if no mask match. */ - u32 value = 0; - - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "%s %s mask %s the routing reg.\n", - enable ? "Adding" : "Removing", - index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" : - index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" : - index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" : - index == RT_IDX_BCAST_SLOT ? "BROADCAST" : - index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" : - index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" : - index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" : - index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" : - index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" : - index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" : - index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" : - index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" : - index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" : - index == RT_IDX_UNUSED013 ? "UNUSED13" : - index == RT_IDX_UNUSED014 ? "UNUSED14" : - index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" : - "(Bad index != RT_IDX)", - enable ? "to" : "from"); - - switch (mask) { - case RT_IDX_CAM_HIT: - { - value = RT_IDX_DST_CAM_Q | /* dest */ - RT_IDX_TYPE_NICQ | /* type */ - (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */ - break; - } - case RT_IDX_VALID: /* Promiscuous Mode frames. */ - { - value = RT_IDX_DST_DFLT_Q | /* dest */ - RT_IDX_TYPE_NICQ | /* type */ - (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */ - break; - } - case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */ - { - value = RT_IDX_DST_DFLT_Q | /* dest */ - RT_IDX_TYPE_NICQ | /* type */ - (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */ - break; - } - case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */ - { - value = RT_IDX_DST_DFLT_Q | /* dest */ - RT_IDX_TYPE_NICQ | /* type */ - (RT_IDX_IP_CSUM_ERR_SLOT << - RT_IDX_IDX_SHIFT); /* index */ - break; - } - case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */ - { - value = RT_IDX_DST_DFLT_Q | /* dest */ - RT_IDX_TYPE_NICQ | /* type */ - (RT_IDX_TCP_UDP_CSUM_ERR_SLOT << - RT_IDX_IDX_SHIFT); /* index */ - break; - } - case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */ - { - value = RT_IDX_DST_DFLT_Q | /* dest */ - RT_IDX_TYPE_NICQ | /* type */ - (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */ - break; - } - case RT_IDX_MCAST: /* Pass up All Multicast frames. */ - { - value = RT_IDX_DST_DFLT_Q | /* dest */ - RT_IDX_TYPE_NICQ | /* type */ - (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */ - break; - } - case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */ - { - value = RT_IDX_DST_DFLT_Q | /* dest */ - RT_IDX_TYPE_NICQ | /* type */ - (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */ - break; - } - case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */ - { - value = RT_IDX_DST_RSS | /* dest */ - RT_IDX_TYPE_NICQ | /* type */ - (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */ - break; - } - case 0: /* Clear the E-bit on an entry. */ - { - value = RT_IDX_DST_DFLT_Q | /* dest */ - RT_IDX_TYPE_NICQ | /* type */ - (index << RT_IDX_IDX_SHIFT);/* index */ - break; - } - default: - netif_err(qdev, ifup, qdev->ndev, - "Mask type %d not yet supported.\n", mask); - status = -EPERM; - goto exit; - } - - if (value) { - status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0); - if (status) - goto exit; - value |= (enable ? RT_IDX_E : 0); - ql_write32(qdev, RT_IDX, value); - ql_write32(qdev, RT_DATA, enable ? mask : 0); - } -exit: - return status; -} - -static void ql_enable_interrupts(struct ql_adapter *qdev) -{ - ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI); -} - -static void ql_disable_interrupts(struct ql_adapter *qdev) -{ - ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16)); -} - -/* If we're running with multiple MSI-X vectors then we enable on the fly. - * Otherwise, we may have multiple outstanding workers and don't want to - * enable until the last one finishes. In this case, the irq_cnt gets - * incremented every time we queue a worker and decremented every time - * a worker finishes. Once it hits zero we enable the interrupt. - */ -u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr) -{ - u32 var = 0; - unsigned long hw_flags = 0; - struct intr_context *ctx = qdev->intr_context + intr; - - if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) { - /* Always enable if we're MSIX multi interrupts and - * it's not the default (zeroeth) interrupt. - */ - ql_write32(qdev, INTR_EN, - ctx->intr_en_mask); - var = ql_read32(qdev, STS); - return var; - } - - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - if (atomic_dec_and_test(&ctx->irq_cnt)) { - ql_write32(qdev, INTR_EN, - ctx->intr_en_mask); - var = ql_read32(qdev, STS); - } - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - return var; -} - -static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr) -{ - u32 var = 0; - struct intr_context *ctx; - - /* HW disables for us if we're MSIX multi interrupts and - * it's not the default (zeroeth) interrupt. - */ - if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) - return 0; - - ctx = qdev->intr_context + intr; - spin_lock(&qdev->hw_lock); - if (!atomic_read(&ctx->irq_cnt)) { - ql_write32(qdev, INTR_EN, - ctx->intr_dis_mask); - var = ql_read32(qdev, STS); - } - atomic_inc(&ctx->irq_cnt); - spin_unlock(&qdev->hw_lock); - return var; -} - -static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev) -{ - int i; - for (i = 0; i < qdev->intr_count; i++) { - /* The enable call does a atomic_dec_and_test - * and enables only if the result is zero. - * So we precharge it here. - */ - if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) || - i == 0)) - atomic_set(&qdev->intr_context[i].irq_cnt, 1); - ql_enable_completion_interrupt(qdev, i); - } - -} - -static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str) -{ - int status, i; - u16 csum = 0; - __le16 *flash = (__le16 *)&qdev->flash; - - status = strncmp((char *)&qdev->flash, str, 4); - if (status) { - netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n"); - return status; - } - - for (i = 0; i < size; i++) - csum += le16_to_cpu(*flash++); - - if (csum) - netif_err(qdev, ifup, qdev->ndev, - "Invalid flash checksum, csum = 0x%.04x.\n", csum); - - return csum; -} - -static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data) -{ - int status = 0; - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, - FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR); - if (status) - goto exit; - /* set up for reg read */ - ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset); - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, - FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR); - if (status) - goto exit; - /* This data is stored on flash as an array of - * __le32. Since ql_read32() returns cpu endian - * we need to swap it back. - */ - *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA)); -exit: - return status; -} - -static int ql_get_8000_flash_params(struct ql_adapter *qdev) -{ - u32 i, size; - int status; - __le32 *p = (__le32 *)&qdev->flash; - u32 offset; - u8 mac_addr[6]; - - /* Get flash offset for function and adjust - * for dword access. - */ - if (!qdev->port) - offset = FUNC0_FLASH_OFFSET / sizeof(u32); - else - offset = FUNC1_FLASH_OFFSET / sizeof(u32); - - if (ql_sem_spinlock(qdev, SEM_FLASH_MASK)) - return -ETIMEDOUT; - - size = sizeof(struct flash_params_8000) / sizeof(u32); - for (i = 0; i < size; i++, p++) { - status = ql_read_flash_word(qdev, i+offset, p); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Error reading flash.\n"); - goto exit; - } - } - - status = ql_validate_flash(qdev, - sizeof(struct flash_params_8000) / sizeof(u16), - "8000"); - if (status) { - netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n"); - status = -EINVAL; - goto exit; - } - - /* Extract either manufacturer or BOFM modified - * MAC address. - */ - if (qdev->flash.flash_params_8000.data_type1 == 2) - memcpy(mac_addr, - qdev->flash.flash_params_8000.mac_addr1, - qdev->ndev->addr_len); - else - memcpy(mac_addr, - qdev->flash.flash_params_8000.mac_addr, - qdev->ndev->addr_len); - - if (!is_valid_ether_addr(mac_addr)) { - netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n"); - status = -EINVAL; - goto exit; - } - - memcpy(qdev->ndev->dev_addr, - mac_addr, - qdev->ndev->addr_len); - -exit: - ql_sem_unlock(qdev, SEM_FLASH_MASK); - return status; -} - -static int ql_get_8012_flash_params(struct ql_adapter *qdev) -{ - int i; - int status; - __le32 *p = (__le32 *)&qdev->flash; - u32 offset = 0; - u32 size = sizeof(struct flash_params_8012) / sizeof(u32); - - /* Second function's parameters follow the first - * function's. - */ - if (qdev->port) - offset = size; - - if (ql_sem_spinlock(qdev, SEM_FLASH_MASK)) - return -ETIMEDOUT; - - for (i = 0; i < size; i++, p++) { - status = ql_read_flash_word(qdev, i+offset, p); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Error reading flash.\n"); - goto exit; - } - - } - - status = ql_validate_flash(qdev, - sizeof(struct flash_params_8012) / sizeof(u16), - "8012"); - if (status) { - netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n"); - status = -EINVAL; - goto exit; - } - - if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) { - status = -EINVAL; - goto exit; - } - - memcpy(qdev->ndev->dev_addr, - qdev->flash.flash_params_8012.mac_addr, - qdev->ndev->addr_len); - -exit: - ql_sem_unlock(qdev, SEM_FLASH_MASK); - return status; -} - -/* xgmac register are located behind the xgmac_addr and xgmac_data - * register pair. Each read/write requires us to wait for the ready - * bit before reading/writing the data. - */ -static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data) -{ - int status; - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, - XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); - if (status) - return status; - /* write the data to the data reg */ - ql_write32(qdev, XGMAC_DATA, data); - /* trigger the write */ - ql_write32(qdev, XGMAC_ADDR, reg); - return status; -} - -/* xgmac register are located behind the xgmac_addr and xgmac_data - * register pair. Each read/write requires us to wait for the ready - * bit before reading/writing the data. - */ -int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data) -{ - int status = 0; - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, - XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); - if (status) - goto exit; - /* set up for reg read */ - ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R); - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, - XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); - if (status) - goto exit; - /* get the data */ - *data = ql_read32(qdev, XGMAC_DATA); -exit: - return status; -} - -/* This is used for reading the 64-bit statistics regs. */ -int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data) -{ - int status = 0; - u32 hi = 0; - u32 lo = 0; - - status = ql_read_xgmac_reg(qdev, reg, &lo); - if (status) - goto exit; - - status = ql_read_xgmac_reg(qdev, reg + 4, &hi); - if (status) - goto exit; - - *data = (u64) lo | ((u64) hi << 32); - -exit: - return status; -} - -static int ql_8000_port_initialize(struct ql_adapter *qdev) -{ - int status; - /* - * Get MPI firmware version for driver banner - * and ethool info. - */ - status = ql_mb_about_fw(qdev); - if (status) - goto exit; - status = ql_mb_get_fw_state(qdev); - if (status) - goto exit; - /* Wake up a worker to get/set the TX/RX frame sizes. */ - queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0); -exit: - return status; -} - -/* Take the MAC Core out of reset. - * Enable statistics counting. - * Take the transmitter/receiver out of reset. - * This functionality may be done in the MPI firmware at a - * later date. - */ -static int ql_8012_port_initialize(struct ql_adapter *qdev) -{ - int status = 0; - u32 data; - - if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) { - /* Another function has the semaphore, so - * wait for the port init bit to come ready. - */ - netif_info(qdev, link, qdev->ndev, - "Another function has the semaphore, so wait for the port init bit to come ready.\n"); - status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0); - if (status) { - netif_crit(qdev, link, qdev->ndev, - "Port initialize timed out.\n"); - } - return status; - } - - netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n"); - /* Set the core reset. */ - status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data); - if (status) - goto end; - data |= GLOBAL_CFG_RESET; - status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data); - if (status) - goto end; - - /* Clear the core reset and turn on jumbo for receiver. */ - data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */ - data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */ - data |= GLOBAL_CFG_TX_STAT_EN; - data |= GLOBAL_CFG_RX_STAT_EN; - status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data); - if (status) - goto end; - - /* Enable transmitter, and clear it's reset. */ - status = ql_read_xgmac_reg(qdev, TX_CFG, &data); - if (status) - goto end; - data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */ - data |= TX_CFG_EN; /* Enable the transmitter. */ - status = ql_write_xgmac_reg(qdev, TX_CFG, data); - if (status) - goto end; - - /* Enable receiver and clear it's reset. */ - status = ql_read_xgmac_reg(qdev, RX_CFG, &data); - if (status) - goto end; - data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */ - data |= RX_CFG_EN; /* Enable the receiver. */ - status = ql_write_xgmac_reg(qdev, RX_CFG, data); - if (status) - goto end; - - /* Turn on jumbo. */ - status = - ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16)); - if (status) - goto end; - status = - ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580); - if (status) - goto end; - - /* Signal to the world that the port is enabled. */ - ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init)); -end: - ql_sem_unlock(qdev, qdev->xg_sem_mask); - return status; -} - -static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev) -{ - return PAGE_SIZE << qdev->lbq_buf_order; -} - -/* Get the next large buffer. */ -static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) -{ - struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx]; - rx_ring->lbq_curr_idx++; - if (rx_ring->lbq_curr_idx == rx_ring->lbq_len) - rx_ring->lbq_curr_idx = 0; - rx_ring->lbq_free_cnt++; - return lbq_desc; -} - -static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev, - struct rx_ring *rx_ring) -{ - struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring); - - pci_dma_sync_single_for_cpu(qdev->pdev, - dma_unmap_addr(lbq_desc, mapaddr), - rx_ring->lbq_buf_size, - PCI_DMA_FROMDEVICE); - - /* If it's the last chunk of our master page then - * we unmap it. - */ - if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size) - == ql_lbq_block_size(qdev)) - pci_unmap_page(qdev->pdev, - lbq_desc->p.pg_chunk.map, - ql_lbq_block_size(qdev), - PCI_DMA_FROMDEVICE); - return lbq_desc; -} - -/* Get the next small buffer. */ -static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring) -{ - struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx]; - rx_ring->sbq_curr_idx++; - if (rx_ring->sbq_curr_idx == rx_ring->sbq_len) - rx_ring->sbq_curr_idx = 0; - rx_ring->sbq_free_cnt++; - return sbq_desc; -} - -/* Update an rx ring index. */ -static void ql_update_cq(struct rx_ring *rx_ring) -{ - rx_ring->cnsmr_idx++; - rx_ring->curr_entry++; - if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) { - rx_ring->cnsmr_idx = 0; - rx_ring->curr_entry = rx_ring->cq_base; - } -} - -static void ql_write_cq_idx(struct rx_ring *rx_ring) -{ - ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg); -} - -static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring, - struct bq_desc *lbq_desc) -{ - if (!rx_ring->pg_chunk.page) { - u64 map; - rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP | - GFP_ATOMIC, - qdev->lbq_buf_order); - if (unlikely(!rx_ring->pg_chunk.page)) { - netif_err(qdev, drv, qdev->ndev, - "page allocation failed.\n"); - return -ENOMEM; - } - rx_ring->pg_chunk.offset = 0; - map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page, - 0, ql_lbq_block_size(qdev), - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(qdev->pdev, map)) { - __free_pages(rx_ring->pg_chunk.page, - qdev->lbq_buf_order); - netif_err(qdev, drv, qdev->ndev, - "PCI mapping failed.\n"); - return -ENOMEM; - } - rx_ring->pg_chunk.map = map; - rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page); - } - - /* Copy the current master pg_chunk info - * to the current descriptor. - */ - lbq_desc->p.pg_chunk = rx_ring->pg_chunk; - - /* Adjust the master page chunk for next - * buffer get. - */ - rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size; - if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) { - rx_ring->pg_chunk.page = NULL; - lbq_desc->p.pg_chunk.last_flag = 1; - } else { - rx_ring->pg_chunk.va += rx_ring->lbq_buf_size; - get_page(rx_ring->pg_chunk.page); - lbq_desc->p.pg_chunk.last_flag = 0; - } - return 0; -} -/* Process (refill) a large buffer queue. */ -static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) -{ - u32 clean_idx = rx_ring->lbq_clean_idx; - u32 start_idx = clean_idx; - struct bq_desc *lbq_desc; - u64 map; - int i; - - while (rx_ring->lbq_free_cnt > 32) { - for (i = 0; i < 16; i++) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "lbq: try cleaning clean_idx = %d.\n", - clean_idx); - lbq_desc = &rx_ring->lbq[clean_idx]; - if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) { - netif_err(qdev, ifup, qdev->ndev, - "Could not get a page chunk.\n"); - return; - } - - map = lbq_desc->p.pg_chunk.map + - lbq_desc->p.pg_chunk.offset; - dma_unmap_addr_set(lbq_desc, mapaddr, map); - dma_unmap_len_set(lbq_desc, maplen, - rx_ring->lbq_buf_size); - *lbq_desc->addr = cpu_to_le64(map); - - pci_dma_sync_single_for_device(qdev->pdev, map, - rx_ring->lbq_buf_size, - PCI_DMA_FROMDEVICE); - clean_idx++; - if (clean_idx == rx_ring->lbq_len) - clean_idx = 0; - } - - rx_ring->lbq_clean_idx = clean_idx; - rx_ring->lbq_prod_idx += 16; - if (rx_ring->lbq_prod_idx == rx_ring->lbq_len) - rx_ring->lbq_prod_idx = 0; - rx_ring->lbq_free_cnt -= 16; - } - - if (start_idx != clean_idx) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "lbq: updating prod idx = %d.\n", - rx_ring->lbq_prod_idx); - ql_write_db_reg(rx_ring->lbq_prod_idx, - rx_ring->lbq_prod_idx_db_reg); - } -} - -/* Process (refill) a small buffer queue. */ -static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) -{ - u32 clean_idx = rx_ring->sbq_clean_idx; - u32 start_idx = clean_idx; - struct bq_desc *sbq_desc; - u64 map; - int i; - - while (rx_ring->sbq_free_cnt > 16) { - for (i = 0; i < 16; i++) { - sbq_desc = &rx_ring->sbq[clean_idx]; - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "sbq: try cleaning clean_idx = %d.\n", - clean_idx); - if (sbq_desc->p.skb == NULL) { - netif_printk(qdev, rx_status, KERN_DEBUG, - qdev->ndev, - "sbq: getting new skb for index %d.\n", - sbq_desc->index); - sbq_desc->p.skb = - netdev_alloc_skb(qdev->ndev, - SMALL_BUFFER_SIZE); - if (sbq_desc->p.skb == NULL) { - netif_err(qdev, probe, qdev->ndev, - "Couldn't get an skb.\n"); - rx_ring->sbq_clean_idx = clean_idx; - return; - } - skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD); - map = pci_map_single(qdev->pdev, - sbq_desc->p.skb->data, - rx_ring->sbq_buf_size, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(qdev->pdev, map)) { - netif_err(qdev, ifup, qdev->ndev, - "PCI mapping failed.\n"); - rx_ring->sbq_clean_idx = clean_idx; - dev_kfree_skb_any(sbq_desc->p.skb); - sbq_desc->p.skb = NULL; - return; - } - dma_unmap_addr_set(sbq_desc, mapaddr, map); - dma_unmap_len_set(sbq_desc, maplen, - rx_ring->sbq_buf_size); - *sbq_desc->addr = cpu_to_le64(map); - } - - clean_idx++; - if (clean_idx == rx_ring->sbq_len) - clean_idx = 0; - } - rx_ring->sbq_clean_idx = clean_idx; - rx_ring->sbq_prod_idx += 16; - if (rx_ring->sbq_prod_idx == rx_ring->sbq_len) - rx_ring->sbq_prod_idx = 0; - rx_ring->sbq_free_cnt -= 16; - } - - if (start_idx != clean_idx) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "sbq: updating prod idx = %d.\n", - rx_ring->sbq_prod_idx); - ql_write_db_reg(rx_ring->sbq_prod_idx, - rx_ring->sbq_prod_idx_db_reg); - } -} - -static void ql_update_buffer_queues(struct ql_adapter *qdev, - struct rx_ring *rx_ring) -{ - ql_update_sbq(qdev, rx_ring); - ql_update_lbq(qdev, rx_ring); -} - -/* Unmaps tx buffers. Can be called from send() if a pci mapping - * fails at some stage, or from the interrupt when a tx completes. - */ -static void ql_unmap_send(struct ql_adapter *qdev, - struct tx_ring_desc *tx_ring_desc, int mapped) -{ - int i; - for (i = 0; i < mapped; i++) { - if (i == 0 || (i == 7 && mapped > 7)) { - /* - * Unmap the skb->data area, or the - * external sglist (AKA the Outbound - * Address List (OAL)). - * If its the zeroeth element, then it's - * the skb->data area. If it's the 7th - * element and there is more than 6 frags, - * then its an OAL. - */ - if (i == 7) { - netif_printk(qdev, tx_done, KERN_DEBUG, - qdev->ndev, - "unmapping OAL area.\n"); - } - pci_unmap_single(qdev->pdev, - dma_unmap_addr(&tx_ring_desc->map[i], - mapaddr), - dma_unmap_len(&tx_ring_desc->map[i], - maplen), - PCI_DMA_TODEVICE); - } else { - netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev, - "unmapping frag %d.\n", i); - pci_unmap_page(qdev->pdev, - dma_unmap_addr(&tx_ring_desc->map[i], - mapaddr), - dma_unmap_len(&tx_ring_desc->map[i], - maplen), PCI_DMA_TODEVICE); - } - } - -} - -/* Map the buffers for this transmit. This will return - * NETDEV_TX_BUSY or NETDEV_TX_OK based on success. - */ -static int ql_map_send(struct ql_adapter *qdev, - struct ob_mac_iocb_req *mac_iocb_ptr, - struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc) -{ - int len = skb_headlen(skb); - dma_addr_t map; - int frag_idx, err, map_idx = 0; - struct tx_buf_desc *tbd = mac_iocb_ptr->tbd; - int frag_cnt = skb_shinfo(skb)->nr_frags; - - if (frag_cnt) { - netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, - "frag_cnt = %d.\n", frag_cnt); - } - /* - * Map the skb buffer first. - */ - map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); - - err = pci_dma_mapping_error(qdev->pdev, map); - if (err) { - netif_err(qdev, tx_queued, qdev->ndev, - "PCI mapping failed with error: %d\n", err); - - return NETDEV_TX_BUSY; - } - - tbd->len = cpu_to_le32(len); - tbd->addr = cpu_to_le64(map); - dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); - dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len); - map_idx++; - - /* - * This loop fills the remainder of the 8 address descriptors - * in the IOCB. If there are more than 7 fragments, then the - * eighth address desc will point to an external list (OAL). - * When this happens, the remainder of the frags will be stored - * in this list. - */ - for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx]; - tbd++; - if (frag_idx == 6 && frag_cnt > 7) { - /* Let's tack on an sglist. - * Our control block will now - * look like this: - * iocb->seg[0] = skb->data - * iocb->seg[1] = frag[0] - * iocb->seg[2] = frag[1] - * iocb->seg[3] = frag[2] - * iocb->seg[4] = frag[3] - * iocb->seg[5] = frag[4] - * iocb->seg[6] = frag[5] - * iocb->seg[7] = ptr to OAL (external sglist) - * oal->seg[0] = frag[6] - * oal->seg[1] = frag[7] - * oal->seg[2] = frag[8] - * oal->seg[3] = frag[9] - * oal->seg[4] = frag[10] - * etc... - */ - /* Tack on the OAL in the eighth segment of IOCB. */ - map = pci_map_single(qdev->pdev, &tx_ring_desc->oal, - sizeof(struct oal), - PCI_DMA_TODEVICE); - err = pci_dma_mapping_error(qdev->pdev, map); - if (err) { - netif_err(qdev, tx_queued, qdev->ndev, - "PCI mapping outbound address list with error: %d\n", - err); - goto map_error; - } - - tbd->addr = cpu_to_le64(map); - /* - * The length is the number of fragments - * that remain to be mapped times the length - * of our sglist (OAL). - */ - tbd->len = - cpu_to_le32((sizeof(struct tx_buf_desc) * - (frag_cnt - frag_idx)) | TX_DESC_C); - dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, - map); - dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, - sizeof(struct oal)); - tbd = (struct tx_buf_desc *)&tx_ring_desc->oal; - map_idx++; - } - - map = - pci_map_page(qdev->pdev, frag->page, - frag->page_offset, frag->size, - PCI_DMA_TODEVICE); - - err = pci_dma_mapping_error(qdev->pdev, map); - if (err) { - netif_err(qdev, tx_queued, qdev->ndev, - "PCI mapping frags failed with error: %d.\n", - err); - goto map_error; - } - - tbd->addr = cpu_to_le64(map); - tbd->len = cpu_to_le32(frag->size); - dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); - dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, - frag->size); - - } - /* Save the number of segments we've mapped. */ - tx_ring_desc->map_cnt = map_idx; - /* Terminate the last segment. */ - tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E); - return NETDEV_TX_OK; - -map_error: - /* - * If the first frag mapping failed, then i will be zero. - * This causes the unmap of the skb->data area. Otherwise - * we pass in the number of frags that mapped successfully - * so they can be umapped. - */ - ql_unmap_send(qdev, tx_ring_desc, map_idx); - return NETDEV_TX_BUSY; -} - -/* Process an inbound completion from an rx ring. */ -static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev, - struct rx_ring *rx_ring, - struct ib_mac_iocb_rsp *ib_mac_rsp, - u32 length, - u16 vlan_id) -{ - struct sk_buff *skb; - struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); - struct skb_frag_struct *rx_frag; - int nr_frags; - struct napi_struct *napi = &rx_ring->napi; - - napi->dev = qdev->ndev; - - skb = napi_get_frags(napi); - if (!skb) { - netif_err(qdev, drv, qdev->ndev, - "Couldn't get an skb, exiting.\n"); - rx_ring->rx_dropped++; - put_page(lbq_desc->p.pg_chunk.page); - return; - } - prefetch(lbq_desc->p.pg_chunk.va); - rx_frag = skb_shinfo(skb)->frags; - nr_frags = skb_shinfo(skb)->nr_frags; - rx_frag += nr_frags; - rx_frag->page = lbq_desc->p.pg_chunk.page; - rx_frag->page_offset = lbq_desc->p.pg_chunk.offset; - rx_frag->size = length; - - skb->len += length; - skb->data_len += length; - skb->truesize += length; - skb_shinfo(skb)->nr_frags++; - - rx_ring->rx_packets++; - rx_ring->rx_bytes += length; - skb->ip_summed = CHECKSUM_UNNECESSARY; - skb_record_rx_queue(skb, rx_ring->cq_id); - if (vlan_id != 0xffff) - __vlan_hwaccel_put_tag(skb, vlan_id); - napi_gro_frags(napi); -} - -/* Process an inbound completion from an rx ring. */ -static void ql_process_mac_rx_page(struct ql_adapter *qdev, - struct rx_ring *rx_ring, - struct ib_mac_iocb_rsp *ib_mac_rsp, - u32 length, - u16 vlan_id) -{ - struct net_device *ndev = qdev->ndev; - struct sk_buff *skb = NULL; - void *addr; - struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); - struct napi_struct *napi = &rx_ring->napi; - - skb = netdev_alloc_skb(ndev, length); - if (!skb) { - netif_err(qdev, drv, qdev->ndev, - "Couldn't get an skb, need to unwind!.\n"); - rx_ring->rx_dropped++; - put_page(lbq_desc->p.pg_chunk.page); - return; - } - - addr = lbq_desc->p.pg_chunk.va; - prefetch(addr); - - - /* Frame error, so drop the packet. */ - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { - netif_info(qdev, drv, qdev->ndev, - "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2); - rx_ring->rx_errors++; - goto err_out; - } - - /* The max framesize filter on this chip is set higher than - * MTU since FCoE uses 2k frames. - */ - if (skb->len > ndev->mtu + ETH_HLEN) { - netif_err(qdev, drv, qdev->ndev, - "Segment too small, dropping.\n"); - rx_ring->rx_dropped++; - goto err_out; - } - memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN); - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", - length); - skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, - lbq_desc->p.pg_chunk.offset+ETH_HLEN, - length-ETH_HLEN); - skb->len += length-ETH_HLEN; - skb->data_len += length-ETH_HLEN; - skb->truesize += length-ETH_HLEN; - - rx_ring->rx_packets++; - rx_ring->rx_bytes += skb->len; - skb->protocol = eth_type_trans(skb, ndev); - skb_checksum_none_assert(skb); - - if ((ndev->features & NETIF_F_RXCSUM) && - !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { - /* TCP frame. */ - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "TCP checksum done!\n"); - skb->ip_summed = CHECKSUM_UNNECESSARY; - } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && - (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { - /* Unfragmented ipv4 UDP frame. */ - struct iphdr *iph = (struct iphdr *) skb->data; - if (!(iph->frag_off & - cpu_to_be16(IP_MF|IP_OFFSET))) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - netif_printk(qdev, rx_status, KERN_DEBUG, - qdev->ndev, - "TCP checksum done!\n"); - } - } - } - - skb_record_rx_queue(skb, rx_ring->cq_id); - if (vlan_id != 0xffff) - __vlan_hwaccel_put_tag(skb, vlan_id); - if (skb->ip_summed == CHECKSUM_UNNECESSARY) - napi_gro_receive(napi, skb); - else - netif_receive_skb(skb); - return; -err_out: - dev_kfree_skb_any(skb); - put_page(lbq_desc->p.pg_chunk.page); -} - -/* Process an inbound completion from an rx ring. */ -static void ql_process_mac_rx_skb(struct ql_adapter *qdev, - struct rx_ring *rx_ring, - struct ib_mac_iocb_rsp *ib_mac_rsp, - u32 length, - u16 vlan_id) -{ - struct net_device *ndev = qdev->ndev; - struct sk_buff *skb = NULL; - struct sk_buff *new_skb = NULL; - struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring); - - skb = sbq_desc->p.skb; - /* Allocate new_skb and copy */ - new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN); - if (new_skb == NULL) { - netif_err(qdev, probe, qdev->ndev, - "No skb available, drop the packet.\n"); - rx_ring->rx_dropped++; - return; - } - skb_reserve(new_skb, NET_IP_ALIGN); - memcpy(skb_put(new_skb, length), skb->data, length); - skb = new_skb; - - /* Frame error, so drop the packet. */ - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { - netif_info(qdev, drv, qdev->ndev, - "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2); - dev_kfree_skb_any(skb); - rx_ring->rx_errors++; - return; - } - - /* loopback self test for ethtool */ - if (test_bit(QL_SELFTEST, &qdev->flags)) { - ql_check_lb_frame(qdev, skb); - dev_kfree_skb_any(skb); - return; - } - - /* The max framesize filter on this chip is set higher than - * MTU since FCoE uses 2k frames. - */ - if (skb->len > ndev->mtu + ETH_HLEN) { - dev_kfree_skb_any(skb); - rx_ring->rx_dropped++; - return; - } - - prefetch(skb->data); - skb->dev = ndev; - if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "%s Multicast.\n", - (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == - IB_MAC_IOCB_RSP_M_HASH ? "Hash" : - (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == - IB_MAC_IOCB_RSP_M_REG ? "Registered" : - (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == - IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); - } - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "Promiscuous Packet.\n"); - - rx_ring->rx_packets++; - rx_ring->rx_bytes += skb->len; - skb->protocol = eth_type_trans(skb, ndev); - skb_checksum_none_assert(skb); - - /* If rx checksum is on, and there are no - * csum or frame errors. - */ - if ((ndev->features & NETIF_F_RXCSUM) && - !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { - /* TCP frame. */ - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "TCP checksum done!\n"); - skb->ip_summed = CHECKSUM_UNNECESSARY; - } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && - (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { - /* Unfragmented ipv4 UDP frame. */ - struct iphdr *iph = (struct iphdr *) skb->data; - if (!(iph->frag_off & - ntohs(IP_MF|IP_OFFSET))) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - netif_printk(qdev, rx_status, KERN_DEBUG, - qdev->ndev, - "TCP checksum done!\n"); - } - } - } - - skb_record_rx_queue(skb, rx_ring->cq_id); - if (vlan_id != 0xffff) - __vlan_hwaccel_put_tag(skb, vlan_id); - if (skb->ip_summed == CHECKSUM_UNNECESSARY) - napi_gro_receive(&rx_ring->napi, skb); - else - netif_receive_skb(skb); -} - -static void ql_realign_skb(struct sk_buff *skb, int len) -{ - void *temp_addr = skb->data; - - /* Undo the skb_reserve(skb,32) we did before - * giving to hardware, and realign data on - * a 2-byte boundary. - */ - skb->data -= QLGE_SB_PAD - NET_IP_ALIGN; - skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN; - skb_copy_to_linear_data(skb, temp_addr, - (unsigned int)len); -} - -/* - * This function builds an skb for the given inbound - * completion. It will be rewritten for readability in the near - * future, but for not it works well. - */ -static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, - struct rx_ring *rx_ring, - struct ib_mac_iocb_rsp *ib_mac_rsp) -{ - struct bq_desc *lbq_desc; - struct bq_desc *sbq_desc; - struct sk_buff *skb = NULL; - u32 length = le32_to_cpu(ib_mac_rsp->data_len); - u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len); - - /* - * Handle the header buffer if present. - */ - if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV && - ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "Header of %d bytes in small buffer.\n", hdr_len); - /* - * Headers fit nicely into a small buffer. - */ - sbq_desc = ql_get_curr_sbuf(rx_ring); - pci_unmap_single(qdev->pdev, - dma_unmap_addr(sbq_desc, mapaddr), - dma_unmap_len(sbq_desc, maplen), - PCI_DMA_FROMDEVICE); - skb = sbq_desc->p.skb; - ql_realign_skb(skb, hdr_len); - skb_put(skb, hdr_len); - sbq_desc->p.skb = NULL; - } - - /* - * Handle the data buffer(s). - */ - if (unlikely(!length)) { /* Is there data too? */ - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "No Data buffer in this packet.\n"); - return skb; - } - - if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) { - if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "Headers in small, data of %d bytes in small, combine them.\n", - length); - /* - * Data is less than small buffer size so it's - * stuffed in a small buffer. - * For this case we append the data - * from the "data" small buffer to the "header" small - * buffer. - */ - sbq_desc = ql_get_curr_sbuf(rx_ring); - pci_dma_sync_single_for_cpu(qdev->pdev, - dma_unmap_addr - (sbq_desc, mapaddr), - dma_unmap_len - (sbq_desc, maplen), - PCI_DMA_FROMDEVICE); - memcpy(skb_put(skb, length), - sbq_desc->p.skb->data, length); - pci_dma_sync_single_for_device(qdev->pdev, - dma_unmap_addr - (sbq_desc, - mapaddr), - dma_unmap_len - (sbq_desc, - maplen), - PCI_DMA_FROMDEVICE); - } else { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "%d bytes in a single small buffer.\n", - length); - sbq_desc = ql_get_curr_sbuf(rx_ring); - skb = sbq_desc->p.skb; - ql_realign_skb(skb, length); - skb_put(skb, length); - pci_unmap_single(qdev->pdev, - dma_unmap_addr(sbq_desc, - mapaddr), - dma_unmap_len(sbq_desc, - maplen), - PCI_DMA_FROMDEVICE); - sbq_desc->p.skb = NULL; - } - } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) { - if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "Header in small, %d bytes in large. Chain large to small!\n", - length); - /* - * The data is in a single large buffer. We - * chain it to the header buffer's skb and let - * it rip. - */ - lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "Chaining page at offset = %d, for %d bytes to skb.\n", - lbq_desc->p.pg_chunk.offset, length); - skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, - lbq_desc->p.pg_chunk.offset, - length); - skb->len += length; - skb->data_len += length; - skb->truesize += length; - } else { - /* - * The headers and data are in a single large buffer. We - * copy it to a new skb and let it go. This can happen with - * jumbo mtu on a non-TCP/UDP frame. - */ - lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); - skb = netdev_alloc_skb(qdev->ndev, length); - if (skb == NULL) { - netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev, - "No skb available, drop the packet.\n"); - return NULL; - } - pci_unmap_page(qdev->pdev, - dma_unmap_addr(lbq_desc, - mapaddr), - dma_unmap_len(lbq_desc, maplen), - PCI_DMA_FROMDEVICE); - skb_reserve(skb, NET_IP_ALIGN); - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", - length); - skb_fill_page_desc(skb, 0, - lbq_desc->p.pg_chunk.page, - lbq_desc->p.pg_chunk.offset, - length); - skb->len += length; - skb->data_len += length; - skb->truesize += length; - length -= length; - __pskb_pull_tail(skb, - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? - VLAN_ETH_HLEN : ETH_HLEN); - } - } else { - /* - * The data is in a chain of large buffers - * pointed to by a small buffer. We loop - * thru and chain them to the our small header - * buffer's skb. - * frags: There are 18 max frags and our small - * buffer will hold 32 of them. The thing is, - * we'll use 3 max for our 9000 byte jumbo - * frames. If the MTU goes up we could - * eventually be in trouble. - */ - int size, i = 0; - sbq_desc = ql_get_curr_sbuf(rx_ring); - pci_unmap_single(qdev->pdev, - dma_unmap_addr(sbq_desc, mapaddr), - dma_unmap_len(sbq_desc, maplen), - PCI_DMA_FROMDEVICE); - if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) { - /* - * This is an non TCP/UDP IP frame, so - * the headers aren't split into a small - * buffer. We have to use the small buffer - * that contains our sg list as our skb to - * send upstairs. Copy the sg list here to - * a local buffer and use it to find the - * pages to chain. - */ - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "%d bytes of headers & data in chain of large.\n", - length); - skb = sbq_desc->p.skb; - sbq_desc->p.skb = NULL; - skb_reserve(skb, NET_IP_ALIGN); - } - while (length > 0) { - lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); - size = (length < rx_ring->lbq_buf_size) ? length : - rx_ring->lbq_buf_size; - - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "Adding page %d to skb for %d bytes.\n", - i, size); - skb_fill_page_desc(skb, i, - lbq_desc->p.pg_chunk.page, - lbq_desc->p.pg_chunk.offset, - size); - skb->len += size; - skb->data_len += size; - skb->truesize += size; - length -= size; - i++; - } - __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? - VLAN_ETH_HLEN : ETH_HLEN); - } - return skb; -} - -/* Process an inbound completion from an rx ring. */ -static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev, - struct rx_ring *rx_ring, - struct ib_mac_iocb_rsp *ib_mac_rsp, - u16 vlan_id) -{ - struct net_device *ndev = qdev->ndev; - struct sk_buff *skb = NULL; - - QL_DUMP_IB_MAC_RSP(ib_mac_rsp); - - skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp); - if (unlikely(!skb)) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "No skb available, drop packet.\n"); - rx_ring->rx_dropped++; - return; - } - - /* Frame error, so drop the packet. */ - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { - netif_info(qdev, drv, qdev->ndev, - "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2); - dev_kfree_skb_any(skb); - rx_ring->rx_errors++; - return; - } - - /* The max framesize filter on this chip is set higher than - * MTU since FCoE uses 2k frames. - */ - if (skb->len > ndev->mtu + ETH_HLEN) { - dev_kfree_skb_any(skb); - rx_ring->rx_dropped++; - return; - } - - /* loopback self test for ethtool */ - if (test_bit(QL_SELFTEST, &qdev->flags)) { - ql_check_lb_frame(qdev, skb); - dev_kfree_skb_any(skb); - return; - } - - prefetch(skb->data); - skb->dev = ndev; - if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n", - (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == - IB_MAC_IOCB_RSP_M_HASH ? "Hash" : - (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == - IB_MAC_IOCB_RSP_M_REG ? "Registered" : - (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == - IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); - rx_ring->rx_multicast++; - } - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "Promiscuous Packet.\n"); - } - - skb->protocol = eth_type_trans(skb, ndev); - skb_checksum_none_assert(skb); - - /* If rx checksum is on, and there are no - * csum or frame errors. - */ - if ((ndev->features & NETIF_F_RXCSUM) && - !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { - /* TCP frame. */ - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "TCP checksum done!\n"); - skb->ip_summed = CHECKSUM_UNNECESSARY; - } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && - (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { - /* Unfragmented ipv4 UDP frame. */ - struct iphdr *iph = (struct iphdr *) skb->data; - if (!(iph->frag_off & - ntohs(IP_MF|IP_OFFSET))) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "TCP checksum done!\n"); - } - } - } - - rx_ring->rx_packets++; - rx_ring->rx_bytes += skb->len; - skb_record_rx_queue(skb, rx_ring->cq_id); - if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0)) - __vlan_hwaccel_put_tag(skb, vlan_id); - if (skb->ip_summed == CHECKSUM_UNNECESSARY) - napi_gro_receive(&rx_ring->napi, skb); - else - netif_receive_skb(skb); -} - -/* Process an inbound completion from an rx ring. */ -static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev, - struct rx_ring *rx_ring, - struct ib_mac_iocb_rsp *ib_mac_rsp) -{ - u32 length = le32_to_cpu(ib_mac_rsp->data_len); - u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? - ((le16_to_cpu(ib_mac_rsp->vlan_id) & - IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff; - - QL_DUMP_IB_MAC_RSP(ib_mac_rsp); - - if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { - /* The data and headers are split into - * separate buffers. - */ - ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, - vlan_id); - } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) { - /* The data fit in a single small buffer. - * Allocate a new skb, copy the data and - * return the buffer to the free pool. - */ - ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, - length, vlan_id); - } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) && - !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) && - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) { - /* TCP packet in a page chunk that's been checksummed. - * Tack it on to our GRO skb and let it go. - */ - ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, - length, vlan_id); - } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) { - /* Non-TCP packet in a page chunk. Allocate an - * skb, tack it on frags, and send it up. - */ - ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, - length, vlan_id); - } else { - /* Non-TCP/UDP large frames that span multiple buffers - * can be processed corrrectly by the split frame logic. - */ - ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, - vlan_id); - } - - return (unsigned long)length; -} - -/* Process an outbound completion from an rx ring. */ -static void ql_process_mac_tx_intr(struct ql_adapter *qdev, - struct ob_mac_iocb_rsp *mac_rsp) -{ - struct tx_ring *tx_ring; - struct tx_ring_desc *tx_ring_desc; - - QL_DUMP_OB_MAC_RSP(mac_rsp); - tx_ring = &qdev->tx_ring[mac_rsp->txq_idx]; - tx_ring_desc = &tx_ring->q[mac_rsp->tid]; - ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt); - tx_ring->tx_bytes += (tx_ring_desc->skb)->len; - tx_ring->tx_packets++; - dev_kfree_skb(tx_ring_desc->skb); - tx_ring_desc->skb = NULL; - - if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E | - OB_MAC_IOCB_RSP_S | - OB_MAC_IOCB_RSP_L | - OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) { - if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) { - netif_warn(qdev, tx_done, qdev->ndev, - "Total descriptor length did not match transfer length.\n"); - } - if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) { - netif_warn(qdev, tx_done, qdev->ndev, - "Frame too short to be valid, not sent.\n"); - } - if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) { - netif_warn(qdev, tx_done, qdev->ndev, - "Frame too long, but sent anyway.\n"); - } - if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) { - netif_warn(qdev, tx_done, qdev->ndev, - "PCI backplane error. Frame not sent.\n"); - } - } - atomic_inc(&tx_ring->tx_count); -} - -/* Fire up a handler to reset the MPI processor. */ -void ql_queue_fw_error(struct ql_adapter *qdev) -{ - ql_link_off(qdev); - queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0); -} - -void ql_queue_asic_error(struct ql_adapter *qdev) -{ - ql_link_off(qdev); - ql_disable_interrupts(qdev); - /* Clear adapter up bit to signal the recovery - * process that it shouldn't kill the reset worker - * thread - */ - clear_bit(QL_ADAPTER_UP, &qdev->flags); - /* Set asic recovery bit to indicate reset process that we are - * in fatal error recovery process rather than normal close - */ - set_bit(QL_ASIC_RECOVERY, &qdev->flags); - queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0); -} - -static void ql_process_chip_ae_intr(struct ql_adapter *qdev, - struct ib_ae_iocb_rsp *ib_ae_rsp) -{ - switch (ib_ae_rsp->event) { - case MGMT_ERR_EVENT: - netif_err(qdev, rx_err, qdev->ndev, - "Management Processor Fatal Error.\n"); - ql_queue_fw_error(qdev); - return; - - case CAM_LOOKUP_ERR_EVENT: - netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n"); - netdev_err(qdev->ndev, "This event shouldn't occur.\n"); - ql_queue_asic_error(qdev); - return; - - case SOFT_ECC_ERROR_EVENT: - netdev_err(qdev->ndev, "Soft ECC error detected.\n"); - ql_queue_asic_error(qdev); - break; - - case PCI_ERR_ANON_BUF_RD: - netdev_err(qdev->ndev, "PCI error occurred when reading " - "anonymous buffers from rx_ring %d.\n", - ib_ae_rsp->q_id); - ql_queue_asic_error(qdev); - break; - - default: - netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n", - ib_ae_rsp->event); - ql_queue_asic_error(qdev); - break; - } -} - -static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) -{ - struct ql_adapter *qdev = rx_ring->qdev; - u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); - struct ob_mac_iocb_rsp *net_rsp = NULL; - int count = 0; - - struct tx_ring *tx_ring; - /* While there are entries in the completion queue. */ - while (prod != rx_ring->cnsmr_idx) { - - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "cq_id = %d, prod = %d, cnsmr = %d.\n.", - rx_ring->cq_id, prod, rx_ring->cnsmr_idx); - - net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry; - rmb(); - switch (net_rsp->opcode) { - - case OPCODE_OB_MAC_TSO_IOCB: - case OPCODE_OB_MAC_IOCB: - ql_process_mac_tx_intr(qdev, net_rsp); - break; - default: - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "Hit default case, not handled! dropping the packet, opcode = %x.\n", - net_rsp->opcode); - } - count++; - ql_update_cq(rx_ring); - prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); - } - if (!net_rsp) - return 0; - ql_write_cq_idx(rx_ring); - tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; - if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) { - if (atomic_read(&tx_ring->queue_stopped) && - (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) - /* - * The queue got stopped because the tx_ring was full. - * Wake it up, because it's now at least 25% empty. - */ - netif_wake_subqueue(qdev->ndev, tx_ring->wq_id); - } - - return count; -} - -static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget) -{ - struct ql_adapter *qdev = rx_ring->qdev; - u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); - struct ql_net_rsp_iocb *net_rsp; - int count = 0; - - /* While there are entries in the completion queue. */ - while (prod != rx_ring->cnsmr_idx) { - - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "cq_id = %d, prod = %d, cnsmr = %d.\n.", - rx_ring->cq_id, prod, rx_ring->cnsmr_idx); - - net_rsp = rx_ring->curr_entry; - rmb(); - switch (net_rsp->opcode) { - case OPCODE_IB_MAC_IOCB: - ql_process_mac_rx_intr(qdev, rx_ring, - (struct ib_mac_iocb_rsp *) - net_rsp); - break; - - case OPCODE_IB_AE_IOCB: - ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *) - net_rsp); - break; - default: - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "Hit default case, not handled! dropping the packet, opcode = %x.\n", - net_rsp->opcode); - break; - } - count++; - ql_update_cq(rx_ring); - prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); - if (count == budget) - break; - } - ql_update_buffer_queues(qdev, rx_ring); - ql_write_cq_idx(rx_ring); - return count; -} - -static int ql_napi_poll_msix(struct napi_struct *napi, int budget) -{ - struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi); - struct ql_adapter *qdev = rx_ring->qdev; - struct rx_ring *trx_ring; - int i, work_done = 0; - struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id]; - - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id); - - /* Service the TX rings first. They start - * right after the RSS rings. */ - for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) { - trx_ring = &qdev->rx_ring[i]; - /* If this TX completion ring belongs to this vector and - * it's not empty then service it. - */ - if ((ctx->irq_mask & (1 << trx_ring->cq_id)) && - (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) != - trx_ring->cnsmr_idx)) { - netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, - "%s: Servicing TX completion ring %d.\n", - __func__, trx_ring->cq_id); - ql_clean_outbound_rx_ring(trx_ring); - } - } - - /* - * Now service the RSS ring if it's active. - */ - if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != - rx_ring->cnsmr_idx) { - netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, - "%s: Servicing RX completion ring %d.\n", - __func__, rx_ring->cq_id); - work_done = ql_clean_inbound_rx_ring(rx_ring, budget); - } - - if (work_done < budget) { - napi_complete(napi); - ql_enable_completion_interrupt(qdev, rx_ring->irq); - } - return work_done; -} - -static void qlge_vlan_mode(struct net_device *ndev, u32 features) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - - if (features & NETIF_F_HW_VLAN_RX) { - netif_printk(qdev, ifup, KERN_DEBUG, ndev, - "Turning on VLAN in NIC_RCV_CFG.\n"); - ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK | - NIC_RCV_CFG_VLAN_MATCH_AND_NON); - } else { - netif_printk(qdev, ifup, KERN_DEBUG, ndev, - "Turning off VLAN in NIC_RCV_CFG.\n"); - ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK); - } -} - -static u32 qlge_fix_features(struct net_device *ndev, u32 features) -{ - /* - * Since there is no support for separate rx/tx vlan accel - * enable/disable make sure tx flag is always in same state as rx. - */ - if (features & NETIF_F_HW_VLAN_RX) - features |= NETIF_F_HW_VLAN_TX; - else - features &= ~NETIF_F_HW_VLAN_TX; - - return features; -} - -static int qlge_set_features(struct net_device *ndev, u32 features) -{ - u32 changed = ndev->features ^ features; - - if (changed & NETIF_F_HW_VLAN_RX) - qlge_vlan_mode(ndev, features); - - return 0; -} - -static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid) -{ - u32 enable_bit = MAC_ADDR_E; - - if (ql_set_mac_addr_reg - (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to init vlan address.\n"); - } -} - -static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - int status; - - status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); - if (status) - return; - - __qlge_vlan_rx_add_vid(qdev, vid); - set_bit(vid, qdev->active_vlans); - - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); -} - -static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid) -{ - u32 enable_bit = 0; - - if (ql_set_mac_addr_reg - (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to clear vlan address.\n"); - } -} - -static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - int status; - - status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); - if (status) - return; - - __qlge_vlan_rx_kill_vid(qdev, vid); - clear_bit(vid, qdev->active_vlans); - - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); -} - -static void qlge_restore_vlan(struct ql_adapter *qdev) -{ - int status; - u16 vid; - - status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); - if (status) - return; - - for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID) - __qlge_vlan_rx_add_vid(qdev, vid); - - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); -} - -/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */ -static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id) -{ - struct rx_ring *rx_ring = dev_id; - napi_schedule(&rx_ring->napi); - return IRQ_HANDLED; -} - -/* This handles a fatal error, MPI activity, and the default - * rx_ring in an MSI-X multiple vector environment. - * In MSI/Legacy environment it also process the rest of - * the rx_rings. - */ -static irqreturn_t qlge_isr(int irq, void *dev_id) -{ - struct rx_ring *rx_ring = dev_id; - struct ql_adapter *qdev = rx_ring->qdev; - struct intr_context *intr_context = &qdev->intr_context[0]; - u32 var; - int work_done = 0; - - spin_lock(&qdev->hw_lock); - if (atomic_read(&qdev->intr_context[0].irq_cnt)) { - netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, - "Shared Interrupt, Not ours!\n"); - spin_unlock(&qdev->hw_lock); - return IRQ_NONE; - } - spin_unlock(&qdev->hw_lock); - - var = ql_disable_completion_interrupt(qdev, intr_context->intr); - - /* - * Check for fatal error. - */ - if (var & STS_FE) { - ql_queue_asic_error(qdev); - netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var); - var = ql_read32(qdev, ERR_STS); - netdev_err(qdev->ndev, "Resetting chip. " - "Error Status Register = 0x%x\n", var); - return IRQ_HANDLED; - } - - /* - * Check MPI processor activity. - */ - if ((var & STS_PI) && - (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) { - /* - * We've got an async event or mailbox completion. - * Handle it and clear the source of the interrupt. - */ - netif_err(qdev, intr, qdev->ndev, - "Got MPI processor interrupt.\n"); - ql_disable_completion_interrupt(qdev, intr_context->intr); - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); - queue_delayed_work_on(smp_processor_id(), - qdev->workqueue, &qdev->mpi_work, 0); - work_done++; - } - - /* - * Get the bit-mask that shows the active queues for this - * pass. Compare it to the queues that this irq services - * and call napi if there's a match. - */ - var = ql_read32(qdev, ISR1); - if (var & intr_context->irq_mask) { - netif_info(qdev, intr, qdev->ndev, - "Waking handler for rx_ring[0].\n"); - ql_disable_completion_interrupt(qdev, intr_context->intr); - napi_schedule(&rx_ring->napi); - work_done++; - } - ql_enable_completion_interrupt(qdev, intr_context->intr); - return work_done ? IRQ_HANDLED : IRQ_NONE; -} - -static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr) -{ - - if (skb_is_gso(skb)) { - int err; - if (skb_header_cloned(skb)) { - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); - if (err) - return err; - } - - mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB; - mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC; - mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len); - mac_iocb_ptr->total_hdrs_len = - cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb)); - mac_iocb_ptr->net_trans_offset = - cpu_to_le16(skb_network_offset(skb) | - skb_transport_offset(skb) - << OB_MAC_TRANSPORT_HDR_SHIFT); - mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); - mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO; - if (likely(skb->protocol == htons(ETH_P_IP))) { - struct iphdr *iph = ip_hdr(skb); - iph->check = 0; - mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; - tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, - 0); - } else if (skb->protocol == htons(ETH_P_IPV6)) { - mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6; - tcp_hdr(skb)->check = - ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); - } - return 1; - } - return 0; -} - -static void ql_hw_csum_setup(struct sk_buff *skb, - struct ob_mac_tso_iocb_req *mac_iocb_ptr) -{ - int len; - struct iphdr *iph = ip_hdr(skb); - __sum16 *check; - mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB; - mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len); - mac_iocb_ptr->net_trans_offset = - cpu_to_le16(skb_network_offset(skb) | - skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT); - - mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; - len = (ntohs(iph->tot_len) - (iph->ihl << 2)); - if (likely(iph->protocol == IPPROTO_TCP)) { - check = &(tcp_hdr(skb)->check); - mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC; - mac_iocb_ptr->total_hdrs_len = - cpu_to_le16(skb_transport_offset(skb) + - (tcp_hdr(skb)->doff << 2)); - } else { - check = &(udp_hdr(skb)->check); - mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC; - mac_iocb_ptr->total_hdrs_len = - cpu_to_le16(skb_transport_offset(skb) + - sizeof(struct udphdr)); - } - *check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, len, iph->protocol, 0); -} - -static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev) -{ - struct tx_ring_desc *tx_ring_desc; - struct ob_mac_iocb_req *mac_iocb_ptr; - struct ql_adapter *qdev = netdev_priv(ndev); - int tso; - struct tx_ring *tx_ring; - u32 tx_ring_idx = (u32) skb->queue_mapping; - - tx_ring = &qdev->tx_ring[tx_ring_idx]; - - if (skb_padto(skb, ETH_ZLEN)) - return NETDEV_TX_OK; - - if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) { - netif_info(qdev, tx_queued, qdev->ndev, - "%s: shutting down tx queue %d du to lack of resources.\n", - __func__, tx_ring_idx); - netif_stop_subqueue(ndev, tx_ring->wq_id); - atomic_inc(&tx_ring->queue_stopped); - tx_ring->tx_errors++; - return NETDEV_TX_BUSY; - } - tx_ring_desc = &tx_ring->q[tx_ring->prod_idx]; - mac_iocb_ptr = tx_ring_desc->queue_entry; - memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr)); - - mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB; - mac_iocb_ptr->tid = tx_ring_desc->index; - /* We use the upper 32-bits to store the tx queue for this IO. - * When we get the completion we can use it to establish the context. - */ - mac_iocb_ptr->txq_idx = tx_ring_idx; - tx_ring_desc->skb = skb; - - mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len); - - if (vlan_tx_tag_present(skb)) { - netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, - "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb)); - mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V; - mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb)); - } - tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); - if (tso < 0) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) { - ql_hw_csum_setup(skb, - (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); - } - if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != - NETDEV_TX_OK) { - netif_err(qdev, tx_queued, qdev->ndev, - "Could not map the segments.\n"); - tx_ring->tx_errors++; - return NETDEV_TX_BUSY; - } - QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr); - tx_ring->prod_idx++; - if (tx_ring->prod_idx == tx_ring->wq_len) - tx_ring->prod_idx = 0; - wmb(); - - ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg); - netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, - "tx queued, slot %d, len %d\n", - tx_ring->prod_idx, skb->len); - - atomic_dec(&tx_ring->tx_count); - return NETDEV_TX_OK; -} - - -static void ql_free_shadow_space(struct ql_adapter *qdev) -{ - if (qdev->rx_ring_shadow_reg_area) { - pci_free_consistent(qdev->pdev, - PAGE_SIZE, - qdev->rx_ring_shadow_reg_area, - qdev->rx_ring_shadow_reg_dma); - qdev->rx_ring_shadow_reg_area = NULL; - } - if (qdev->tx_ring_shadow_reg_area) { - pci_free_consistent(qdev->pdev, - PAGE_SIZE, - qdev->tx_ring_shadow_reg_area, - qdev->tx_ring_shadow_reg_dma); - qdev->tx_ring_shadow_reg_area = NULL; - } -} - -static int ql_alloc_shadow_space(struct ql_adapter *qdev) -{ - qdev->rx_ring_shadow_reg_area = - pci_alloc_consistent(qdev->pdev, - PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma); - if (qdev->rx_ring_shadow_reg_area == NULL) { - netif_err(qdev, ifup, qdev->ndev, - "Allocation of RX shadow space failed.\n"); - return -ENOMEM; - } - memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE); - qdev->tx_ring_shadow_reg_area = - pci_alloc_consistent(qdev->pdev, PAGE_SIZE, - &qdev->tx_ring_shadow_reg_dma); - if (qdev->tx_ring_shadow_reg_area == NULL) { - netif_err(qdev, ifup, qdev->ndev, - "Allocation of TX shadow space failed.\n"); - goto err_wqp_sh_area; - } - memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE); - return 0; - -err_wqp_sh_area: - pci_free_consistent(qdev->pdev, - PAGE_SIZE, - qdev->rx_ring_shadow_reg_area, - qdev->rx_ring_shadow_reg_dma); - return -ENOMEM; -} - -static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) -{ - struct tx_ring_desc *tx_ring_desc; - int i; - struct ob_mac_iocb_req *mac_iocb_ptr; - - mac_iocb_ptr = tx_ring->wq_base; - tx_ring_desc = tx_ring->q; - for (i = 0; i < tx_ring->wq_len; i++) { - tx_ring_desc->index = i; - tx_ring_desc->skb = NULL; - tx_ring_desc->queue_entry = mac_iocb_ptr; - mac_iocb_ptr++; - tx_ring_desc++; - } - atomic_set(&tx_ring->tx_count, tx_ring->wq_len); - atomic_set(&tx_ring->queue_stopped, 0); -} - -static void ql_free_tx_resources(struct ql_adapter *qdev, - struct tx_ring *tx_ring) -{ - if (tx_ring->wq_base) { - pci_free_consistent(qdev->pdev, tx_ring->wq_size, - tx_ring->wq_base, tx_ring->wq_base_dma); - tx_ring->wq_base = NULL; - } - kfree(tx_ring->q); - tx_ring->q = NULL; -} - -static int ql_alloc_tx_resources(struct ql_adapter *qdev, - struct tx_ring *tx_ring) -{ - tx_ring->wq_base = - pci_alloc_consistent(qdev->pdev, tx_ring->wq_size, - &tx_ring->wq_base_dma); - - if ((tx_ring->wq_base == NULL) || - tx_ring->wq_base_dma & WQ_ADDR_ALIGN) { - netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n"); - return -ENOMEM; - } - tx_ring->q = - kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL); - if (tx_ring->q == NULL) - goto err; - - return 0; -err: - pci_free_consistent(qdev->pdev, tx_ring->wq_size, - tx_ring->wq_base, tx_ring->wq_base_dma); - return -ENOMEM; -} - -static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) -{ - struct bq_desc *lbq_desc; - - uint32_t curr_idx, clean_idx; - - curr_idx = rx_ring->lbq_curr_idx; - clean_idx = rx_ring->lbq_clean_idx; - while (curr_idx != clean_idx) { - lbq_desc = &rx_ring->lbq[curr_idx]; - - if (lbq_desc->p.pg_chunk.last_flag) { - pci_unmap_page(qdev->pdev, - lbq_desc->p.pg_chunk.map, - ql_lbq_block_size(qdev), - PCI_DMA_FROMDEVICE); - lbq_desc->p.pg_chunk.last_flag = 0; - } - - put_page(lbq_desc->p.pg_chunk.page); - lbq_desc->p.pg_chunk.page = NULL; - - if (++curr_idx == rx_ring->lbq_len) - curr_idx = 0; - - } -} - -static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) -{ - int i; - struct bq_desc *sbq_desc; - - for (i = 0; i < rx_ring->sbq_len; i++) { - sbq_desc = &rx_ring->sbq[i]; - if (sbq_desc == NULL) { - netif_err(qdev, ifup, qdev->ndev, - "sbq_desc %d is NULL.\n", i); - return; - } - if (sbq_desc->p.skb) { - pci_unmap_single(qdev->pdev, - dma_unmap_addr(sbq_desc, mapaddr), - dma_unmap_len(sbq_desc, maplen), - PCI_DMA_FROMDEVICE); - dev_kfree_skb(sbq_desc->p.skb); - sbq_desc->p.skb = NULL; - } - } -} - -/* Free all large and small rx buffers associated - * with the completion queues for this device. - */ -static void ql_free_rx_buffers(struct ql_adapter *qdev) -{ - int i; - struct rx_ring *rx_ring; - - for (i = 0; i < qdev->rx_ring_count; i++) { - rx_ring = &qdev->rx_ring[i]; - if (rx_ring->lbq) - ql_free_lbq_buffers(qdev, rx_ring); - if (rx_ring->sbq) - ql_free_sbq_buffers(qdev, rx_ring); - } -} - -static void ql_alloc_rx_buffers(struct ql_adapter *qdev) -{ - struct rx_ring *rx_ring; - int i; - - for (i = 0; i < qdev->rx_ring_count; i++) { - rx_ring = &qdev->rx_ring[i]; - if (rx_ring->type != TX_Q) - ql_update_buffer_queues(qdev, rx_ring); - } -} - -static void ql_init_lbq_ring(struct ql_adapter *qdev, - struct rx_ring *rx_ring) -{ - int i; - struct bq_desc *lbq_desc; - __le64 *bq = rx_ring->lbq_base; - - memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc)); - for (i = 0; i < rx_ring->lbq_len; i++) { - lbq_desc = &rx_ring->lbq[i]; - memset(lbq_desc, 0, sizeof(*lbq_desc)); - lbq_desc->index = i; - lbq_desc->addr = bq; - bq++; - } -} - -static void ql_init_sbq_ring(struct ql_adapter *qdev, - struct rx_ring *rx_ring) -{ - int i; - struct bq_desc *sbq_desc; - __le64 *bq = rx_ring->sbq_base; - - memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc)); - for (i = 0; i < rx_ring->sbq_len; i++) { - sbq_desc = &rx_ring->sbq[i]; - memset(sbq_desc, 0, sizeof(*sbq_desc)); - sbq_desc->index = i; - sbq_desc->addr = bq; - bq++; - } -} - -static void ql_free_rx_resources(struct ql_adapter *qdev, - struct rx_ring *rx_ring) -{ - /* Free the small buffer queue. */ - if (rx_ring->sbq_base) { - pci_free_consistent(qdev->pdev, - rx_ring->sbq_size, - rx_ring->sbq_base, rx_ring->sbq_base_dma); - rx_ring->sbq_base = NULL; - } - - /* Free the small buffer queue control blocks. */ - kfree(rx_ring->sbq); - rx_ring->sbq = NULL; - - /* Free the large buffer queue. */ - if (rx_ring->lbq_base) { - pci_free_consistent(qdev->pdev, - rx_ring->lbq_size, - rx_ring->lbq_base, rx_ring->lbq_base_dma); - rx_ring->lbq_base = NULL; - } - - /* Free the large buffer queue control blocks. */ - kfree(rx_ring->lbq); - rx_ring->lbq = NULL; - - /* Free the rx queue. */ - if (rx_ring->cq_base) { - pci_free_consistent(qdev->pdev, - rx_ring->cq_size, - rx_ring->cq_base, rx_ring->cq_base_dma); - rx_ring->cq_base = NULL; - } -} - -/* Allocate queues and buffers for this completions queue based - * on the values in the parameter structure. */ -static int ql_alloc_rx_resources(struct ql_adapter *qdev, - struct rx_ring *rx_ring) -{ - - /* - * Allocate the completion queue for this rx_ring. - */ - rx_ring->cq_base = - pci_alloc_consistent(qdev->pdev, rx_ring->cq_size, - &rx_ring->cq_base_dma); - - if (rx_ring->cq_base == NULL) { - netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n"); - return -ENOMEM; - } - - if (rx_ring->sbq_len) { - /* - * Allocate small buffer queue. - */ - rx_ring->sbq_base = - pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size, - &rx_ring->sbq_base_dma); - - if (rx_ring->sbq_base == NULL) { - netif_err(qdev, ifup, qdev->ndev, - "Small buffer queue allocation failed.\n"); - goto err_mem; - } - - /* - * Allocate small buffer queue control blocks. - */ - rx_ring->sbq = - kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc), - GFP_KERNEL); - if (rx_ring->sbq == NULL) { - netif_err(qdev, ifup, qdev->ndev, - "Small buffer queue control block allocation failed.\n"); - goto err_mem; - } - - ql_init_sbq_ring(qdev, rx_ring); - } - - if (rx_ring->lbq_len) { - /* - * Allocate large buffer queue. - */ - rx_ring->lbq_base = - pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size, - &rx_ring->lbq_base_dma); - - if (rx_ring->lbq_base == NULL) { - netif_err(qdev, ifup, qdev->ndev, - "Large buffer queue allocation failed.\n"); - goto err_mem; - } - /* - * Allocate large buffer queue control blocks. - */ - rx_ring->lbq = - kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc), - GFP_KERNEL); - if (rx_ring->lbq == NULL) { - netif_err(qdev, ifup, qdev->ndev, - "Large buffer queue control block allocation failed.\n"); - goto err_mem; - } - - ql_init_lbq_ring(qdev, rx_ring); - } - - return 0; - -err_mem: - ql_free_rx_resources(qdev, rx_ring); - return -ENOMEM; -} - -static void ql_tx_ring_clean(struct ql_adapter *qdev) -{ - struct tx_ring *tx_ring; - struct tx_ring_desc *tx_ring_desc; - int i, j; - - /* - * Loop through all queues and free - * any resources. - */ - for (j = 0; j < qdev->tx_ring_count; j++) { - tx_ring = &qdev->tx_ring[j]; - for (i = 0; i < tx_ring->wq_len; i++) { - tx_ring_desc = &tx_ring->q[i]; - if (tx_ring_desc && tx_ring_desc->skb) { - netif_err(qdev, ifdown, qdev->ndev, - "Freeing lost SKB %p, from queue %d, index %d.\n", - tx_ring_desc->skb, j, - tx_ring_desc->index); - ql_unmap_send(qdev, tx_ring_desc, - tx_ring_desc->map_cnt); - dev_kfree_skb(tx_ring_desc->skb); - tx_ring_desc->skb = NULL; - } - } - } -} - -static void ql_free_mem_resources(struct ql_adapter *qdev) -{ - int i; - - for (i = 0; i < qdev->tx_ring_count; i++) - ql_free_tx_resources(qdev, &qdev->tx_ring[i]); - for (i = 0; i < qdev->rx_ring_count; i++) - ql_free_rx_resources(qdev, &qdev->rx_ring[i]); - ql_free_shadow_space(qdev); -} - -static int ql_alloc_mem_resources(struct ql_adapter *qdev) -{ - int i; - - /* Allocate space for our shadow registers and such. */ - if (ql_alloc_shadow_space(qdev)) - return -ENOMEM; - - for (i = 0; i < qdev->rx_ring_count; i++) { - if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) { - netif_err(qdev, ifup, qdev->ndev, - "RX resource allocation failed.\n"); - goto err_mem; - } - } - /* Allocate tx queue resources */ - for (i = 0; i < qdev->tx_ring_count; i++) { - if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) { - netif_err(qdev, ifup, qdev->ndev, - "TX resource allocation failed.\n"); - goto err_mem; - } - } - return 0; - -err_mem: - ql_free_mem_resources(qdev); - return -ENOMEM; -} - -/* Set up the rx ring control block and pass it to the chip. - * The control block is defined as - * "Completion Queue Initialization Control Block", or cqicb. - */ -static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) -{ - struct cqicb *cqicb = &rx_ring->cqicb; - void *shadow_reg = qdev->rx_ring_shadow_reg_area + - (rx_ring->cq_id * RX_RING_SHADOW_SPACE); - u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma + - (rx_ring->cq_id * RX_RING_SHADOW_SPACE); - void __iomem *doorbell_area = - qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id)); - int err = 0; - u16 bq_len; - u64 tmp; - __le64 *base_indirect_ptr; - int page_entries; - - /* Set up the shadow registers for this ring. */ - rx_ring->prod_idx_sh_reg = shadow_reg; - rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma; - *rx_ring->prod_idx_sh_reg = 0; - shadow_reg += sizeof(u64); - shadow_reg_dma += sizeof(u64); - rx_ring->lbq_base_indirect = shadow_reg; - rx_ring->lbq_base_indirect_dma = shadow_reg_dma; - shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); - shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); - rx_ring->sbq_base_indirect = shadow_reg; - rx_ring->sbq_base_indirect_dma = shadow_reg_dma; - - /* PCI doorbell mem area + 0x00 for consumer index register */ - rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area; - rx_ring->cnsmr_idx = 0; - rx_ring->curr_entry = rx_ring->cq_base; - - /* PCI doorbell mem area + 0x04 for valid register */ - rx_ring->valid_db_reg = doorbell_area + 0x04; - - /* PCI doorbell mem area + 0x18 for large buffer consumer */ - rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18); - - /* PCI doorbell mem area + 0x1c */ - rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c); - - memset((void *)cqicb, 0, sizeof(struct cqicb)); - cqicb->msix_vect = rx_ring->irq; - - bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len; - cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT); - - cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma); - - cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma); - - /* - * Set up the control block load flags. - */ - cqicb->flags = FLAGS_LC | /* Load queue base address */ - FLAGS_LV | /* Load MSI-X vector */ - FLAGS_LI; /* Load irq delay values */ - if (rx_ring->lbq_len) { - cqicb->flags |= FLAGS_LL; /* Load lbq values */ - tmp = (u64)rx_ring->lbq_base_dma; - base_indirect_ptr = rx_ring->lbq_base_indirect; - page_entries = 0; - do { - *base_indirect_ptr = cpu_to_le64(tmp); - tmp += DB_PAGE_SIZE; - base_indirect_ptr++; - page_entries++; - } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); - cqicb->lbq_addr = - cpu_to_le64(rx_ring->lbq_base_indirect_dma); - bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 : - (u16) rx_ring->lbq_buf_size; - cqicb->lbq_buf_size = cpu_to_le16(bq_len); - bq_len = (rx_ring->lbq_len == 65536) ? 0 : - (u16) rx_ring->lbq_len; - cqicb->lbq_len = cpu_to_le16(bq_len); - rx_ring->lbq_prod_idx = 0; - rx_ring->lbq_curr_idx = 0; - rx_ring->lbq_clean_idx = 0; - rx_ring->lbq_free_cnt = rx_ring->lbq_len; - } - if (rx_ring->sbq_len) { - cqicb->flags |= FLAGS_LS; /* Load sbq values */ - tmp = (u64)rx_ring->sbq_base_dma; - base_indirect_ptr = rx_ring->sbq_base_indirect; - page_entries = 0; - do { - *base_indirect_ptr = cpu_to_le64(tmp); - tmp += DB_PAGE_SIZE; - base_indirect_ptr++; - page_entries++; - } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len)); - cqicb->sbq_addr = - cpu_to_le64(rx_ring->sbq_base_indirect_dma); - cqicb->sbq_buf_size = - cpu_to_le16((u16)(rx_ring->sbq_buf_size)); - bq_len = (rx_ring->sbq_len == 65536) ? 0 : - (u16) rx_ring->sbq_len; - cqicb->sbq_len = cpu_to_le16(bq_len); - rx_ring->sbq_prod_idx = 0; - rx_ring->sbq_curr_idx = 0; - rx_ring->sbq_clean_idx = 0; - rx_ring->sbq_free_cnt = rx_ring->sbq_len; - } - switch (rx_ring->type) { - case TX_Q: - cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs); - cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames); - break; - case RX_Q: - /* Inbound completion handling rx_rings run in - * separate NAPI contexts. - */ - netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix, - 64); - cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs); - cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames); - break; - default: - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "Invalid rx_ring->type = %d.\n", rx_ring->type); - } - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "Initializing rx work queue.\n"); - err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb), - CFG_LCQ, rx_ring->cq_id); - if (err) { - netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n"); - return err; - } - return err; -} - -static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) -{ - struct wqicb *wqicb = (struct wqicb *)tx_ring; - void __iomem *doorbell_area = - qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id); - void *shadow_reg = qdev->tx_ring_shadow_reg_area + - (tx_ring->wq_id * sizeof(u64)); - u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma + - (tx_ring->wq_id * sizeof(u64)); - int err = 0; - - /* - * Assign doorbell registers for this tx_ring. - */ - /* TX PCI doorbell mem area for tx producer index */ - tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area; - tx_ring->prod_idx = 0; - /* TX PCI doorbell mem area + 0x04 */ - tx_ring->valid_db_reg = doorbell_area + 0x04; - - /* - * Assign shadow registers for this tx_ring. - */ - tx_ring->cnsmr_idx_sh_reg = shadow_reg; - tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma; - - wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT); - wqicb->flags = cpu_to_le16(Q_FLAGS_LC | - Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO); - wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id); - wqicb->rid = 0; - wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma); - - wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma); - - ql_init_tx_ring(qdev, tx_ring); - - err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ, - (u16) tx_ring->wq_id); - if (err) { - netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n"); - return err; - } - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "Successfully loaded WQICB.\n"); - return err; -} - -static void ql_disable_msix(struct ql_adapter *qdev) -{ - if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { - pci_disable_msix(qdev->pdev); - clear_bit(QL_MSIX_ENABLED, &qdev->flags); - kfree(qdev->msi_x_entry); - qdev->msi_x_entry = NULL; - } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) { - pci_disable_msi(qdev->pdev); - clear_bit(QL_MSI_ENABLED, &qdev->flags); - } -} - -/* We start by trying to get the number of vectors - * stored in qdev->intr_count. If we don't get that - * many then we reduce the count and try again. - */ -static void ql_enable_msix(struct ql_adapter *qdev) -{ - int i, err; - - /* Get the MSIX vectors. */ - if (qlge_irq_type == MSIX_IRQ) { - /* Try to alloc space for the msix struct, - * if it fails then go to MSI/legacy. - */ - qdev->msi_x_entry = kcalloc(qdev->intr_count, - sizeof(struct msix_entry), - GFP_KERNEL); - if (!qdev->msi_x_entry) { - qlge_irq_type = MSI_IRQ; - goto msi; - } - - for (i = 0; i < qdev->intr_count; i++) - qdev->msi_x_entry[i].entry = i; - - /* Loop to get our vectors. We start with - * what we want and settle for what we get. - */ - do { - err = pci_enable_msix(qdev->pdev, - qdev->msi_x_entry, qdev->intr_count); - if (err > 0) - qdev->intr_count = err; - } while (err > 0); - - if (err < 0) { - kfree(qdev->msi_x_entry); - qdev->msi_x_entry = NULL; - netif_warn(qdev, ifup, qdev->ndev, - "MSI-X Enable failed, trying MSI.\n"); - qdev->intr_count = 1; - qlge_irq_type = MSI_IRQ; - } else if (err == 0) { - set_bit(QL_MSIX_ENABLED, &qdev->flags); - netif_info(qdev, ifup, qdev->ndev, - "MSI-X Enabled, got %d vectors.\n", - qdev->intr_count); - return; - } - } -msi: - qdev->intr_count = 1; - if (qlge_irq_type == MSI_IRQ) { - if (!pci_enable_msi(qdev->pdev)) { - set_bit(QL_MSI_ENABLED, &qdev->flags); - netif_info(qdev, ifup, qdev->ndev, - "Running with MSI interrupts.\n"); - return; - } - } - qlge_irq_type = LEG_IRQ; - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "Running with legacy interrupts.\n"); -} - -/* Each vector services 1 RSS ring and and 1 or more - * TX completion rings. This function loops through - * the TX completion rings and assigns the vector that - * will service it. An example would be if there are - * 2 vectors (so 2 RSS rings) and 8 TX completion rings. - * This would mean that vector 0 would service RSS ring 0 - * and TX completion rings 0,1,2 and 3. Vector 1 would - * service RSS ring 1 and TX completion rings 4,5,6 and 7. - */ -static void ql_set_tx_vect(struct ql_adapter *qdev) -{ - int i, j, vect; - u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count; - - if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { - /* Assign irq vectors to TX rx_rings.*/ - for (vect = 0, j = 0, i = qdev->rss_ring_count; - i < qdev->rx_ring_count; i++) { - if (j == tx_rings_per_vector) { - vect++; - j = 0; - } - qdev->rx_ring[i].irq = vect; - j++; - } - } else { - /* For single vector all rings have an irq - * of zero. - */ - for (i = 0; i < qdev->rx_ring_count; i++) - qdev->rx_ring[i].irq = 0; - } -} - -/* Set the interrupt mask for this vector. Each vector - * will service 1 RSS ring and 1 or more TX completion - * rings. This function sets up a bit mask per vector - * that indicates which rings it services. - */ -static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx) -{ - int j, vect = ctx->intr; - u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count; - - if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { - /* Add the RSS ring serviced by this vector - * to the mask. - */ - ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id); - /* Add the TX ring(s) serviced by this vector - * to the mask. */ - for (j = 0; j < tx_rings_per_vector; j++) { - ctx->irq_mask |= - (1 << qdev->rx_ring[qdev->rss_ring_count + - (vect * tx_rings_per_vector) + j].cq_id); - } - } else { - /* For single vector we just shift each queue's - * ID into the mask. - */ - for (j = 0; j < qdev->rx_ring_count; j++) - ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id); - } -} - -/* - * Here we build the intr_context structures based on - * our rx_ring count and intr vector count. - * The intr_context structure is used to hook each vector - * to possibly different handlers. - */ -static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev) -{ - int i = 0; - struct intr_context *intr_context = &qdev->intr_context[0]; - - if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { - /* Each rx_ring has it's - * own intr_context since we have separate - * vectors for each queue. - */ - for (i = 0; i < qdev->intr_count; i++, intr_context++) { - qdev->rx_ring[i].irq = i; - intr_context->intr = i; - intr_context->qdev = qdev; - /* Set up this vector's bit-mask that indicates - * which queues it services. - */ - ql_set_irq_mask(qdev, intr_context); - /* - * We set up each vectors enable/disable/read bits so - * there's no bit/mask calculations in the critical path. - */ - intr_context->intr_en_mask = - INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | - INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD - | i; - intr_context->intr_dis_mask = - INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | - INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK | - INTR_EN_IHD | i; - intr_context->intr_read_mask = - INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | - INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD | - i; - if (i == 0) { - /* The first vector/queue handles - * broadcast/multicast, fatal errors, - * and firmware events. This in addition - * to normal inbound NAPI processing. - */ - intr_context->handler = qlge_isr; - sprintf(intr_context->name, "%s-rx-%d", - qdev->ndev->name, i); - } else { - /* - * Inbound queues handle unicast frames only. - */ - intr_context->handler = qlge_msix_rx_isr; - sprintf(intr_context->name, "%s-rx-%d", - qdev->ndev->name, i); - } - } - } else { - /* - * All rx_rings use the same intr_context since - * there is only one vector. - */ - intr_context->intr = 0; - intr_context->qdev = qdev; - /* - * We set up each vectors enable/disable/read bits so - * there's no bit/mask calculations in the critical path. - */ - intr_context->intr_en_mask = - INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE; - intr_context->intr_dis_mask = - INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | - INTR_EN_TYPE_DISABLE; - intr_context->intr_read_mask = - INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ; - /* - * Single interrupt means one handler for all rings. - */ - intr_context->handler = qlge_isr; - sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name); - /* Set up this vector's bit-mask that indicates - * which queues it services. In this case there is - * a single vector so it will service all RSS and - * TX completion rings. - */ - ql_set_irq_mask(qdev, intr_context); - } - /* Tell the TX completion rings which MSIx vector - * they will be using. - */ - ql_set_tx_vect(qdev); -} - -static void ql_free_irq(struct ql_adapter *qdev) -{ - int i; - struct intr_context *intr_context = &qdev->intr_context[0]; - - for (i = 0; i < qdev->intr_count; i++, intr_context++) { - if (intr_context->hooked) { - if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { - free_irq(qdev->msi_x_entry[i].vector, - &qdev->rx_ring[i]); - netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev, - "freeing msix interrupt %d.\n", i); - } else { - free_irq(qdev->pdev->irq, &qdev->rx_ring[0]); - netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev, - "freeing msi interrupt %d.\n", i); - } - } - } - ql_disable_msix(qdev); -} - -static int ql_request_irq(struct ql_adapter *qdev) -{ - int i; - int status = 0; - struct pci_dev *pdev = qdev->pdev; - struct intr_context *intr_context = &qdev->intr_context[0]; - - ql_resolve_queues_to_irqs(qdev); - - for (i = 0; i < qdev->intr_count; i++, intr_context++) { - atomic_set(&intr_context->irq_cnt, 0); - if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { - status = request_irq(qdev->msi_x_entry[i].vector, - intr_context->handler, - 0, - intr_context->name, - &qdev->rx_ring[i]); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed request for MSIX interrupt %d.\n", - i); - goto err_irq; - } else { - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "Hooked intr %d, queue type %s, with name %s.\n", - i, - qdev->rx_ring[i].type == DEFAULT_Q ? - "DEFAULT_Q" : - qdev->rx_ring[i].type == TX_Q ? - "TX_Q" : - qdev->rx_ring[i].type == RX_Q ? - "RX_Q" : "", - intr_context->name); - } - } else { - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "trying msi or legacy interrupts.\n"); - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "%s: irq = %d.\n", __func__, pdev->irq); - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "%s: context->name = %s.\n", __func__, - intr_context->name); - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "%s: dev_id = 0x%p.\n", __func__, - &qdev->rx_ring[0]); - status = - request_irq(pdev->irq, qlge_isr, - test_bit(QL_MSI_ENABLED, - &qdev-> - flags) ? 0 : IRQF_SHARED, - intr_context->name, &qdev->rx_ring[0]); - if (status) - goto err_irq; - - netif_err(qdev, ifup, qdev->ndev, - "Hooked intr %d, queue type %s, with name %s.\n", - i, - qdev->rx_ring[0].type == DEFAULT_Q ? - "DEFAULT_Q" : - qdev->rx_ring[0].type == TX_Q ? "TX_Q" : - qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "", - intr_context->name); - } - intr_context->hooked = 1; - } - return status; -err_irq: - netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n"); - ql_free_irq(qdev); - return status; -} - -static int ql_start_rss(struct ql_adapter *qdev) -{ - static const u8 init_hash_seed[] = { - 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, - 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, - 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, - 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, - 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa - }; - struct ricb *ricb = &qdev->ricb; - int status = 0; - int i; - u8 *hash_id = (u8 *) ricb->hash_cq_id; - - memset((void *)ricb, 0, sizeof(*ricb)); - - ricb->base_cq = RSS_L4K; - ricb->flags = - (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6); - ricb->mask = cpu_to_le16((u16)(0x3ff)); - - /* - * Fill out the Indirection Table. - */ - for (i = 0; i < 1024; i++) - hash_id[i] = (i & (qdev->rss_ring_count - 1)); - - memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40); - memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16); - - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n"); - - status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0); - if (status) { - netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n"); - return status; - } - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "Successfully loaded RICB.\n"); - return status; -} - -static int ql_clear_routing_entries(struct ql_adapter *qdev) -{ - int i, status = 0; - - status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); - if (status) - return status; - /* Clear all the entries in the routing table. */ - for (i = 0; i < 16; i++) { - status = ql_set_routing_reg(qdev, i, 0, 0); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to init routing register for CAM packets.\n"); - break; - } - } - ql_sem_unlock(qdev, SEM_RT_IDX_MASK); - return status; -} - -/* Initialize the frame-to-queue routing. */ -static int ql_route_initialize(struct ql_adapter *qdev) -{ - int status = 0; - - /* Clear all the entries in the routing table. */ - status = ql_clear_routing_entries(qdev); - if (status) - return status; - - status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); - if (status) - return status; - - status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT, - RT_IDX_IP_CSUM_ERR, 1); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to init routing register " - "for IP CSUM error packets.\n"); - goto exit; - } - status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT, - RT_IDX_TU_CSUM_ERR, 1); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to init routing register " - "for TCP/UDP CSUM error packets.\n"); - goto exit; - } - status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to init routing register for broadcast packets.\n"); - goto exit; - } - /* If we have more than one inbound queue, then turn on RSS in the - * routing block. - */ - if (qdev->rss_ring_count > 1) { - status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT, - RT_IDX_RSS_MATCH, 1); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to init routing register for MATCH RSS packets.\n"); - goto exit; - } - } - - status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT, - RT_IDX_CAM_HIT, 1); - if (status) - netif_err(qdev, ifup, qdev->ndev, - "Failed to init routing register for CAM packets.\n"); -exit: - ql_sem_unlock(qdev, SEM_RT_IDX_MASK); - return status; -} - -int ql_cam_route_initialize(struct ql_adapter *qdev) -{ - int status, set; - - /* If check if the link is up and use to - * determine if we are setting or clearing - * the MAC address in the CAM. - */ - set = ql_read32(qdev, STS); - set &= qdev->port_link_up; - status = ql_set_mac_addr(qdev, set); - if (status) { - netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n"); - return status; - } - - status = ql_route_initialize(qdev); - if (status) - netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n"); - - return status; -} - -static int ql_adapter_initialize(struct ql_adapter *qdev) -{ - u32 value, mask; - int i; - int status = 0; - - /* - * Set up the System register to halt on errors. - */ - value = SYS_EFE | SYS_FAE; - mask = value << 16; - ql_write32(qdev, SYS, mask | value); - - /* Set the default queue, and VLAN behavior. */ - value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV; - mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16); - ql_write32(qdev, NIC_RCV_CFG, (mask | value)); - - /* Set the MPI interrupt to enabled. */ - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); - - /* Enable the function, set pagesize, enable error checking. */ - value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND | - FSC_EC | FSC_VM_PAGE_4K; - value |= SPLT_SETTING; - - /* Set/clear header splitting. */ - mask = FSC_VM_PAGESIZE_MASK | - FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16); - ql_write32(qdev, FSC, mask | value); - - ql_write32(qdev, SPLT_HDR, SPLT_LEN); - - /* Set RX packet routing to use port/pci function on which the - * packet arrived on in addition to usual frame routing. - * This is helpful on bonding where both interfaces can have - * the same MAC address. - */ - ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ); - /* Reroute all packets to our Interface. - * They may have been routed to MPI firmware - * due to WOL. - */ - value = ql_read32(qdev, MGMT_RCV_CFG); - value &= ~MGMT_RCV_CFG_RM; - mask = 0xffff0000; - - /* Sticky reg needs clearing due to WOL. */ - ql_write32(qdev, MGMT_RCV_CFG, mask); - ql_write32(qdev, MGMT_RCV_CFG, mask | value); - - /* Default WOL is enable on Mezz cards */ - if (qdev->pdev->subsystem_device == 0x0068 || - qdev->pdev->subsystem_device == 0x0180) - qdev->wol = WAKE_MAGIC; - - /* Start up the rx queues. */ - for (i = 0; i < qdev->rx_ring_count; i++) { - status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to start rx ring[%d].\n", i); - return status; - } - } - - /* If there is more than one inbound completion queue - * then download a RICB to configure RSS. - */ - if (qdev->rss_ring_count > 1) { - status = ql_start_rss(qdev); - if (status) { - netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n"); - return status; - } - } - - /* Start up the tx queues. */ - for (i = 0; i < qdev->tx_ring_count; i++) { - status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to start tx ring[%d].\n", i); - return status; - } - } - - /* Initialize the port and set the max framesize. */ - status = qdev->nic_ops->port_initialize(qdev); - if (status) - netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n"); - - /* Set up the MAC address and frame routing filter. */ - status = ql_cam_route_initialize(qdev); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to init CAM/Routing tables.\n"); - return status; - } - - /* Start NAPI for the RSS queues. */ - for (i = 0; i < qdev->rss_ring_count; i++) { - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "Enabling NAPI for rx_ring[%d].\n", i); - napi_enable(&qdev->rx_ring[i].napi); - } - - return status; -} - -/* Issue soft reset to chip. */ -static int ql_adapter_reset(struct ql_adapter *qdev) -{ - u32 value; - int status = 0; - unsigned long end_jiffies; - - /* Clear all the entries in the routing table. */ - status = ql_clear_routing_entries(qdev); - if (status) { - netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n"); - return status; - } - - end_jiffies = jiffies + - max((unsigned long)1, usecs_to_jiffies(30)); - - /* Check if bit is set then skip the mailbox command and - * clear the bit, else we are in normal reset process. - */ - if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) { - /* Stop management traffic. */ - ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP); - - /* Wait for the NIC and MGMNT FIFOs to empty. */ - ql_wait_fifo_empty(qdev); - } else - clear_bit(QL_ASIC_RECOVERY, &qdev->flags); - - ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR); - - do { - value = ql_read32(qdev, RST_FO); - if ((value & RST_FO_FR) == 0) - break; - cpu_relax(); - } while (time_before(jiffies, end_jiffies)); - - if (value & RST_FO_FR) { - netif_err(qdev, ifdown, qdev->ndev, - "ETIMEDOUT!!! errored out of resetting the chip!\n"); - status = -ETIMEDOUT; - } - - /* Resume management traffic. */ - ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME); - return status; -} - -static void ql_display_dev_info(struct net_device *ndev) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - - netif_info(qdev, probe, qdev->ndev, - "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, " - "XG Roll = %d, XG Rev = %d.\n", - qdev->func, - qdev->port, - qdev->chip_rev_id & 0x0000000f, - qdev->chip_rev_id >> 4 & 0x0000000f, - qdev->chip_rev_id >> 8 & 0x0000000f, - qdev->chip_rev_id >> 12 & 0x0000000f); - netif_info(qdev, probe, qdev->ndev, - "MAC address %pM\n", ndev->dev_addr); -} - -static int ql_wol(struct ql_adapter *qdev) -{ - int status = 0; - u32 wol = MB_WOL_DISABLE; - - /* The CAM is still intact after a reset, but if we - * are doing WOL, then we may need to program the - * routing regs. We would also need to issue the mailbox - * commands to instruct the MPI what to do per the ethtool - * settings. - */ - - if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST | - WAKE_MCAST | WAKE_BCAST)) { - netif_err(qdev, ifdown, qdev->ndev, - "Unsupported WOL paramter. qdev->wol = 0x%x.\n", - qdev->wol); - return -EINVAL; - } - - if (qdev->wol & WAKE_MAGIC) { - status = ql_mb_wol_set_magic(qdev, 1); - if (status) { - netif_err(qdev, ifdown, qdev->ndev, - "Failed to set magic packet on %s.\n", - qdev->ndev->name); - return status; - } else - netif_info(qdev, drv, qdev->ndev, - "Enabled magic packet successfully on %s.\n", - qdev->ndev->name); - - wol |= MB_WOL_MAGIC_PKT; - } - - if (qdev->wol) { - wol |= MB_WOL_MODE_ON; - status = ql_mb_wol_mode(qdev, wol); - netif_err(qdev, drv, qdev->ndev, - "WOL %s (wol code 0x%x) on %s\n", - (status == 0) ? "Successfully set" : "Failed", - wol, qdev->ndev->name); - } - - return status; -} - -static void ql_cancel_all_work_sync(struct ql_adapter *qdev) -{ - - /* Don't kill the reset worker thread if we - * are in the process of recovery. - */ - if (test_bit(QL_ADAPTER_UP, &qdev->flags)) - cancel_delayed_work_sync(&qdev->asic_reset_work); - cancel_delayed_work_sync(&qdev->mpi_reset_work); - cancel_delayed_work_sync(&qdev->mpi_work); - cancel_delayed_work_sync(&qdev->mpi_idc_work); - cancel_delayed_work_sync(&qdev->mpi_core_to_log); - cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); -} - -static int ql_adapter_down(struct ql_adapter *qdev) -{ - int i, status = 0; - - ql_link_off(qdev); - - ql_cancel_all_work_sync(qdev); - - for (i = 0; i < qdev->rss_ring_count; i++) - napi_disable(&qdev->rx_ring[i].napi); - - clear_bit(QL_ADAPTER_UP, &qdev->flags); - - ql_disable_interrupts(qdev); - - ql_tx_ring_clean(qdev); - - /* Call netif_napi_del() from common point. - */ - for (i = 0; i < qdev->rss_ring_count; i++) - netif_napi_del(&qdev->rx_ring[i].napi); - - status = ql_adapter_reset(qdev); - if (status) - netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n", - qdev->func); - ql_free_rx_buffers(qdev); - - return status; -} - -static int ql_adapter_up(struct ql_adapter *qdev) -{ - int err = 0; - - err = ql_adapter_initialize(qdev); - if (err) { - netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n"); - goto err_init; - } - set_bit(QL_ADAPTER_UP, &qdev->flags); - ql_alloc_rx_buffers(qdev); - /* If the port is initialized and the - * link is up the turn on the carrier. - */ - if ((ql_read32(qdev, STS) & qdev->port_init) && - (ql_read32(qdev, STS) & qdev->port_link_up)) - ql_link_on(qdev); - /* Restore rx mode. */ - clear_bit(QL_ALLMULTI, &qdev->flags); - clear_bit(QL_PROMISCUOUS, &qdev->flags); - qlge_set_multicast_list(qdev->ndev); - - /* Restore vlan setting. */ - qlge_restore_vlan(qdev); - - ql_enable_interrupts(qdev); - ql_enable_all_completion_interrupts(qdev); - netif_tx_start_all_queues(qdev->ndev); - - return 0; -err_init: - ql_adapter_reset(qdev); - return err; -} - -static void ql_release_adapter_resources(struct ql_adapter *qdev) -{ - ql_free_mem_resources(qdev); - ql_free_irq(qdev); -} - -static int ql_get_adapter_resources(struct ql_adapter *qdev) -{ - int status = 0; - - if (ql_alloc_mem_resources(qdev)) { - netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n"); - return -ENOMEM; - } - status = ql_request_irq(qdev); - return status; -} - -static int qlge_close(struct net_device *ndev) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - - /* If we hit pci_channel_io_perm_failure - * failure condition, then we already - * brought the adapter down. - */ - if (test_bit(QL_EEH_FATAL, &qdev->flags)) { - netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n"); - clear_bit(QL_EEH_FATAL, &qdev->flags); - return 0; - } - - /* - * Wait for device to recover from a reset. - * (Rarely happens, but possible.) - */ - while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) - msleep(1); - ql_adapter_down(qdev); - ql_release_adapter_resources(qdev); - return 0; -} - -static int ql_configure_rings(struct ql_adapter *qdev) -{ - int i; - struct rx_ring *rx_ring; - struct tx_ring *tx_ring; - int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus()); - unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ? - LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE; - - qdev->lbq_buf_order = get_order(lbq_buf_len); - - /* In a perfect world we have one RSS ring for each CPU - * and each has it's own vector. To do that we ask for - * cpu_cnt vectors. ql_enable_msix() will adjust the - * vector count to what we actually get. We then - * allocate an RSS ring for each. - * Essentially, we are doing min(cpu_count, msix_vector_count). - */ - qdev->intr_count = cpu_cnt; - ql_enable_msix(qdev); - /* Adjust the RSS ring count to the actual vector count. */ - qdev->rss_ring_count = qdev->intr_count; - qdev->tx_ring_count = cpu_cnt; - qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count; - - for (i = 0; i < qdev->tx_ring_count; i++) { - tx_ring = &qdev->tx_ring[i]; - memset((void *)tx_ring, 0, sizeof(*tx_ring)); - tx_ring->qdev = qdev; - tx_ring->wq_id = i; - tx_ring->wq_len = qdev->tx_ring_size; - tx_ring->wq_size = - tx_ring->wq_len * sizeof(struct ob_mac_iocb_req); - - /* - * The completion queue ID for the tx rings start - * immediately after the rss rings. - */ - tx_ring->cq_id = qdev->rss_ring_count + i; - } - - for (i = 0; i < qdev->rx_ring_count; i++) { - rx_ring = &qdev->rx_ring[i]; - memset((void *)rx_ring, 0, sizeof(*rx_ring)); - rx_ring->qdev = qdev; - rx_ring->cq_id = i; - rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */ - if (i < qdev->rss_ring_count) { - /* - * Inbound (RSS) queues. - */ - rx_ring->cq_len = qdev->rx_ring_size; - rx_ring->cq_size = - rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); - rx_ring->lbq_len = NUM_LARGE_BUFFERS; - rx_ring->lbq_size = - rx_ring->lbq_len * sizeof(__le64); - rx_ring->lbq_buf_size = (u16)lbq_buf_len; - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "lbq_buf_size %d, order = %d\n", - rx_ring->lbq_buf_size, - qdev->lbq_buf_order); - rx_ring->sbq_len = NUM_SMALL_BUFFERS; - rx_ring->sbq_size = - rx_ring->sbq_len * sizeof(__le64); - rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE; - rx_ring->type = RX_Q; - } else { - /* - * Outbound queue handles outbound completions only. - */ - /* outbound cq is same size as tx_ring it services. */ - rx_ring->cq_len = qdev->tx_ring_size; - rx_ring->cq_size = - rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); - rx_ring->lbq_len = 0; - rx_ring->lbq_size = 0; - rx_ring->lbq_buf_size = 0; - rx_ring->sbq_len = 0; - rx_ring->sbq_size = 0; - rx_ring->sbq_buf_size = 0; - rx_ring->type = TX_Q; - } - } - return 0; -} - -static int qlge_open(struct net_device *ndev) -{ - int err = 0; - struct ql_adapter *qdev = netdev_priv(ndev); - - err = ql_adapter_reset(qdev); - if (err) - return err; - - err = ql_configure_rings(qdev); - if (err) - return err; - - err = ql_get_adapter_resources(qdev); - if (err) - goto error_up; - - err = ql_adapter_up(qdev); - if (err) - goto error_up; - - return err; - -error_up: - ql_release_adapter_resources(qdev); - return err; -} - -static int ql_change_rx_buffers(struct ql_adapter *qdev) -{ - struct rx_ring *rx_ring; - int i, status; - u32 lbq_buf_len; - - /* Wait for an outstanding reset to complete. */ - if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) { - int i = 3; - while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) { - netif_err(qdev, ifup, qdev->ndev, - "Waiting for adapter UP...\n"); - ssleep(1); - } - - if (!i) { - netif_err(qdev, ifup, qdev->ndev, - "Timed out waiting for adapter UP\n"); - return -ETIMEDOUT; - } - } - - status = ql_adapter_down(qdev); - if (status) - goto error; - - /* Get the new rx buffer size. */ - lbq_buf_len = (qdev->ndev->mtu > 1500) ? - LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE; - qdev->lbq_buf_order = get_order(lbq_buf_len); - - for (i = 0; i < qdev->rss_ring_count; i++) { - rx_ring = &qdev->rx_ring[i]; - /* Set the new size. */ - rx_ring->lbq_buf_size = lbq_buf_len; - } - - status = ql_adapter_up(qdev); - if (status) - goto error; - - return status; -error: - netif_alert(qdev, ifup, qdev->ndev, - "Driver up/down cycle failed, closing device.\n"); - set_bit(QL_ADAPTER_UP, &qdev->flags); - dev_close(qdev->ndev); - return status; -} - -static int qlge_change_mtu(struct net_device *ndev, int new_mtu) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - int status; - - if (ndev->mtu == 1500 && new_mtu == 9000) { - netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n"); - } else if (ndev->mtu == 9000 && new_mtu == 1500) { - netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n"); - } else - return -EINVAL; - - queue_delayed_work(qdev->workqueue, - &qdev->mpi_port_cfg_work, 3*HZ); - - ndev->mtu = new_mtu; - - if (!netif_running(qdev->ndev)) { - return 0; - } - - status = ql_change_rx_buffers(qdev); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Changing MTU failed.\n"); - } - - return status; -} - -static struct net_device_stats *qlge_get_stats(struct net_device - *ndev) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - struct rx_ring *rx_ring = &qdev->rx_ring[0]; - struct tx_ring *tx_ring = &qdev->tx_ring[0]; - unsigned long pkts, mcast, dropped, errors, bytes; - int i; - - /* Get RX stats. */ - pkts = mcast = dropped = errors = bytes = 0; - for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) { - pkts += rx_ring->rx_packets; - bytes += rx_ring->rx_bytes; - dropped += rx_ring->rx_dropped; - errors += rx_ring->rx_errors; - mcast += rx_ring->rx_multicast; - } - ndev->stats.rx_packets = pkts; - ndev->stats.rx_bytes = bytes; - ndev->stats.rx_dropped = dropped; - ndev->stats.rx_errors = errors; - ndev->stats.multicast = mcast; - - /* Get TX stats. */ - pkts = errors = bytes = 0; - for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) { - pkts += tx_ring->tx_packets; - bytes += tx_ring->tx_bytes; - errors += tx_ring->tx_errors; - } - ndev->stats.tx_packets = pkts; - ndev->stats.tx_bytes = bytes; - ndev->stats.tx_errors = errors; - return &ndev->stats; -} - -static void qlge_set_multicast_list(struct net_device *ndev) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - struct netdev_hw_addr *ha; - int i, status; - - status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); - if (status) - return; - /* - * Set or clear promiscuous mode if a - * transition is taking place. - */ - if (ndev->flags & IFF_PROMISC) { - if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) { - if (ql_set_routing_reg - (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) { - netif_err(qdev, hw, qdev->ndev, - "Failed to set promiscuous mode.\n"); - } else { - set_bit(QL_PROMISCUOUS, &qdev->flags); - } - } - } else { - if (test_bit(QL_PROMISCUOUS, &qdev->flags)) { - if (ql_set_routing_reg - (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) { - netif_err(qdev, hw, qdev->ndev, - "Failed to clear promiscuous mode.\n"); - } else { - clear_bit(QL_PROMISCUOUS, &qdev->flags); - } - } - } - - /* - * Set or clear all multicast mode if a - * transition is taking place. - */ - if ((ndev->flags & IFF_ALLMULTI) || - (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) { - if (!test_bit(QL_ALLMULTI, &qdev->flags)) { - if (ql_set_routing_reg - (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) { - netif_err(qdev, hw, qdev->ndev, - "Failed to set all-multi mode.\n"); - } else { - set_bit(QL_ALLMULTI, &qdev->flags); - } - } - } else { - if (test_bit(QL_ALLMULTI, &qdev->flags)) { - if (ql_set_routing_reg - (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) { - netif_err(qdev, hw, qdev->ndev, - "Failed to clear all-multi mode.\n"); - } else { - clear_bit(QL_ALLMULTI, &qdev->flags); - } - } - } - - if (!netdev_mc_empty(ndev)) { - status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); - if (status) - goto exit; - i = 0; - netdev_for_each_mc_addr(ha, ndev) { - if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr, - MAC_ADDR_TYPE_MULTI_MAC, i)) { - netif_err(qdev, hw, qdev->ndev, - "Failed to loadmulticast address.\n"); - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); - goto exit; - } - i++; - } - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); - if (ql_set_routing_reg - (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) { - netif_err(qdev, hw, qdev->ndev, - "Failed to set multicast match mode.\n"); - } else { - set_bit(QL_ALLMULTI, &qdev->flags); - } - } -exit: - ql_sem_unlock(qdev, SEM_RT_IDX_MASK); -} - -static int qlge_set_mac_address(struct net_device *ndev, void *p) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - struct sockaddr *addr = p; - int status; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); - /* Update local copy of current mac address. */ - memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len); - - status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); - if (status) - return status; - status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, - MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); - if (status) - netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n"); - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); - return status; -} - -static void qlge_tx_timeout(struct net_device *ndev) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - ql_queue_asic_error(qdev); -} - -static void ql_asic_reset_work(struct work_struct *work) -{ - struct ql_adapter *qdev = - container_of(work, struct ql_adapter, asic_reset_work.work); - int status; - rtnl_lock(); - status = ql_adapter_down(qdev); - if (status) - goto error; - - status = ql_adapter_up(qdev); - if (status) - goto error; - - /* Restore rx mode. */ - clear_bit(QL_ALLMULTI, &qdev->flags); - clear_bit(QL_PROMISCUOUS, &qdev->flags); - qlge_set_multicast_list(qdev->ndev); - - rtnl_unlock(); - return; -error: - netif_alert(qdev, ifup, qdev->ndev, - "Driver up/down cycle failed, closing device\n"); - - set_bit(QL_ADAPTER_UP, &qdev->flags); - dev_close(qdev->ndev); - rtnl_unlock(); -} - -static const struct nic_operations qla8012_nic_ops = { - .get_flash = ql_get_8012_flash_params, - .port_initialize = ql_8012_port_initialize, -}; - -static const struct nic_operations qla8000_nic_ops = { - .get_flash = ql_get_8000_flash_params, - .port_initialize = ql_8000_port_initialize, -}; - -/* Find the pcie function number for the other NIC - * on this chip. Since both NIC functions share a - * common firmware we have the lowest enabled function - * do any common work. Examples would be resetting - * after a fatal firmware error, or doing a firmware - * coredump. - */ -static int ql_get_alt_pcie_func(struct ql_adapter *qdev) -{ - int status = 0; - u32 temp; - u32 nic_func1, nic_func2; - - status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG, - &temp); - if (status) - return status; - - nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) & - MPI_TEST_NIC_FUNC_MASK); - nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) & - MPI_TEST_NIC_FUNC_MASK); - - if (qdev->func == nic_func1) - qdev->alt_func = nic_func2; - else if (qdev->func == nic_func2) - qdev->alt_func = nic_func1; - else - status = -EIO; - - return status; -} - -static int ql_get_board_info(struct ql_adapter *qdev) -{ - int status; - qdev->func = - (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT; - if (qdev->func > 3) - return -EIO; - - status = ql_get_alt_pcie_func(qdev); - if (status) - return status; - - qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1; - if (qdev->port) { - qdev->xg_sem_mask = SEM_XGMAC1_MASK; - qdev->port_link_up = STS_PL1; - qdev->port_init = STS_PI1; - qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI; - qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO; - } else { - qdev->xg_sem_mask = SEM_XGMAC0_MASK; - qdev->port_link_up = STS_PL0; - qdev->port_init = STS_PI0; - qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI; - qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO; - } - qdev->chip_rev_id = ql_read32(qdev, REV_ID); - qdev->device_id = qdev->pdev->device; - if (qdev->device_id == QLGE_DEVICE_ID_8012) - qdev->nic_ops = &qla8012_nic_ops; - else if (qdev->device_id == QLGE_DEVICE_ID_8000) - qdev->nic_ops = &qla8000_nic_ops; - return status; -} - -static void ql_release_all(struct pci_dev *pdev) -{ - struct net_device *ndev = pci_get_drvdata(pdev); - struct ql_adapter *qdev = netdev_priv(ndev); - - if (qdev->workqueue) { - destroy_workqueue(qdev->workqueue); - qdev->workqueue = NULL; - } - - if (qdev->reg_base) - iounmap(qdev->reg_base); - if (qdev->doorbell_area) - iounmap(qdev->doorbell_area); - vfree(qdev->mpi_coredump); - pci_release_regions(pdev); - pci_set_drvdata(pdev, NULL); -} - -static int __devinit ql_init_device(struct pci_dev *pdev, - struct net_device *ndev, int cards_found) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - int err = 0; - - memset((void *)qdev, 0, sizeof(*qdev)); - err = pci_enable_device(pdev); - if (err) { - dev_err(&pdev->dev, "PCI device enable failed.\n"); - return err; - } - - qdev->ndev = ndev; - qdev->pdev = pdev; - pci_set_drvdata(pdev, ndev); - - /* Set PCIe read request size */ - err = pcie_set_readrq(pdev, 4096); - if (err) { - dev_err(&pdev->dev, "Set readrq failed.\n"); - goto err_out1; - } - - err = pci_request_regions(pdev, DRV_NAME); - if (err) { - dev_err(&pdev->dev, "PCI region request failed.\n"); - return err; - } - - pci_set_master(pdev); - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { - set_bit(QL_DMA64, &qdev->flags); - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - } else { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - } - - if (err) { - dev_err(&pdev->dev, "No usable DMA configuration.\n"); - goto err_out2; - } - - /* Set PCIe reset type for EEH to fundamental. */ - pdev->needs_freset = 1; - pci_save_state(pdev); - qdev->reg_base = - ioremap_nocache(pci_resource_start(pdev, 1), - pci_resource_len(pdev, 1)); - if (!qdev->reg_base) { - dev_err(&pdev->dev, "Register mapping failed.\n"); - err = -ENOMEM; - goto err_out2; - } - - qdev->doorbell_area_size = pci_resource_len(pdev, 3); - qdev->doorbell_area = - ioremap_nocache(pci_resource_start(pdev, 3), - pci_resource_len(pdev, 3)); - if (!qdev->doorbell_area) { - dev_err(&pdev->dev, "Doorbell register mapping failed.\n"); - err = -ENOMEM; - goto err_out2; - } - - err = ql_get_board_info(qdev); - if (err) { - dev_err(&pdev->dev, "Register access failed.\n"); - err = -EIO; - goto err_out2; - } - qdev->msg_enable = netif_msg_init(debug, default_msg); - spin_lock_init(&qdev->hw_lock); - spin_lock_init(&qdev->stats_lock); - - if (qlge_mpi_coredump) { - qdev->mpi_coredump = - vmalloc(sizeof(struct ql_mpi_coredump)); - if (qdev->mpi_coredump == NULL) { - dev_err(&pdev->dev, "Coredump alloc failed.\n"); - err = -ENOMEM; - goto err_out2; - } - if (qlge_force_coredump) - set_bit(QL_FRC_COREDUMP, &qdev->flags); - } - /* make sure the EEPROM is good */ - err = qdev->nic_ops->get_flash(qdev); - if (err) { - dev_err(&pdev->dev, "Invalid FLASH.\n"); - goto err_out2; - } - - memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); - /* Keep local copy of current mac address. */ - memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len); - - /* Set up the default ring sizes. */ - qdev->tx_ring_size = NUM_TX_RING_ENTRIES; - qdev->rx_ring_size = NUM_RX_RING_ENTRIES; - - /* Set up the coalescing parameters. */ - qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT; - qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT; - qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT; - qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT; - - /* - * Set up the operating parameters. - */ - qdev->workqueue = create_singlethread_workqueue(ndev->name); - INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work); - INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work); - INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); - INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work); - INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); - INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log); - init_completion(&qdev->ide_completion); - mutex_init(&qdev->mpi_mutex); - - if (!cards_found) { - dev_info(&pdev->dev, "%s\n", DRV_STRING); - dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n", - DRV_NAME, DRV_VERSION); - } - return 0; -err_out2: - ql_release_all(pdev); -err_out1: - pci_disable_device(pdev); - return err; -} - -static const struct net_device_ops qlge_netdev_ops = { - .ndo_open = qlge_open, - .ndo_stop = qlge_close, - .ndo_start_xmit = qlge_send, - .ndo_change_mtu = qlge_change_mtu, - .ndo_get_stats = qlge_get_stats, - .ndo_set_multicast_list = qlge_set_multicast_list, - .ndo_set_mac_address = qlge_set_mac_address, - .ndo_validate_addr = eth_validate_addr, - .ndo_tx_timeout = qlge_tx_timeout, - .ndo_fix_features = qlge_fix_features, - .ndo_set_features = qlge_set_features, - .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid, -}; - -static void ql_timer(unsigned long data) -{ - struct ql_adapter *qdev = (struct ql_adapter *)data; - u32 var = 0; - - var = ql_read32(qdev, STS); - if (pci_channel_offline(qdev->pdev)) { - netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var); - return; - } - - mod_timer(&qdev->timer, jiffies + (5*HZ)); -} - -static int __devinit qlge_probe(struct pci_dev *pdev, - const struct pci_device_id *pci_entry) -{ - struct net_device *ndev = NULL; - struct ql_adapter *qdev = NULL; - static int cards_found = 0; - int err = 0; - - ndev = alloc_etherdev_mq(sizeof(struct ql_adapter), - min(MAX_CPUS, (int)num_online_cpus())); - if (!ndev) - return -ENOMEM; - - err = ql_init_device(pdev, ndev, cards_found); - if (err < 0) { - free_netdev(ndev); - return err; - } - - qdev = netdev_priv(ndev); - SET_NETDEV_DEV(ndev, &pdev->dev); - ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | - NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | - NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM; - ndev->features = ndev->hw_features | - NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; - - if (test_bit(QL_DMA64, &qdev->flags)) - ndev->features |= NETIF_F_HIGHDMA; - - /* - * Set up net_device structure. - */ - ndev->tx_queue_len = qdev->tx_ring_size; - ndev->irq = pdev->irq; - - ndev->netdev_ops = &qlge_netdev_ops; - SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops); - ndev->watchdog_timeo = 10 * HZ; - - err = register_netdev(ndev); - if (err) { - dev_err(&pdev->dev, "net device registration failed.\n"); - ql_release_all(pdev); - pci_disable_device(pdev); - return err; - } - /* Start up the timer to trigger EEH if - * the bus goes dead - */ - init_timer_deferrable(&qdev->timer); - qdev->timer.data = (unsigned long)qdev; - qdev->timer.function = ql_timer; - qdev->timer.expires = jiffies + (5*HZ); - add_timer(&qdev->timer); - ql_link_off(qdev); - ql_display_dev_info(ndev); - atomic_set(&qdev->lb_count, 0); - cards_found++; - return 0; -} - -netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev) -{ - return qlge_send(skb, ndev); -} - -int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget) -{ - return ql_clean_inbound_rx_ring(rx_ring, budget); -} - -static void __devexit qlge_remove(struct pci_dev *pdev) -{ - struct net_device *ndev = pci_get_drvdata(pdev); - struct ql_adapter *qdev = netdev_priv(ndev); - del_timer_sync(&qdev->timer); - ql_cancel_all_work_sync(qdev); - unregister_netdev(ndev); - ql_release_all(pdev); - pci_disable_device(pdev); - free_netdev(ndev); -} - -/* Clean up resources without touching hardware. */ -static void ql_eeh_close(struct net_device *ndev) -{ - int i; - struct ql_adapter *qdev = netdev_priv(ndev); - - if (netif_carrier_ok(ndev)) { - netif_carrier_off(ndev); - netif_stop_queue(ndev); - } - - /* Disabling the timer */ - del_timer_sync(&qdev->timer); - ql_cancel_all_work_sync(qdev); - - for (i = 0; i < qdev->rss_ring_count; i++) - netif_napi_del(&qdev->rx_ring[i].napi); - - clear_bit(QL_ADAPTER_UP, &qdev->flags); - ql_tx_ring_clean(qdev); - ql_free_rx_buffers(qdev); - ql_release_adapter_resources(qdev); -} - -/* - * This callback is called by the PCI subsystem whenever - * a PCI bus error is detected. - */ -static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev, - enum pci_channel_state state) -{ - struct net_device *ndev = pci_get_drvdata(pdev); - struct ql_adapter *qdev = netdev_priv(ndev); - - switch (state) { - case pci_channel_io_normal: - return PCI_ERS_RESULT_CAN_RECOVER; - case pci_channel_io_frozen: - netif_device_detach(ndev); - if (netif_running(ndev)) - ql_eeh_close(ndev); - pci_disable_device(pdev); - return PCI_ERS_RESULT_NEED_RESET; - case pci_channel_io_perm_failure: - dev_err(&pdev->dev, - "%s: pci_channel_io_perm_failure.\n", __func__); - ql_eeh_close(ndev); - set_bit(QL_EEH_FATAL, &qdev->flags); - return PCI_ERS_RESULT_DISCONNECT; - } - - /* Request a slot reset. */ - return PCI_ERS_RESULT_NEED_RESET; -} - -/* - * This callback is called after the PCI buss has been reset. - * Basically, this tries to restart the card from scratch. - * This is a shortened version of the device probe/discovery code, - * it resembles the first-half of the () routine. - */ -static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev) -{ - struct net_device *ndev = pci_get_drvdata(pdev); - struct ql_adapter *qdev = netdev_priv(ndev); - - pdev->error_state = pci_channel_io_normal; - - pci_restore_state(pdev); - if (pci_enable_device(pdev)) { - netif_err(qdev, ifup, qdev->ndev, - "Cannot re-enable PCI device after reset.\n"); - return PCI_ERS_RESULT_DISCONNECT; - } - pci_set_master(pdev); - - if (ql_adapter_reset(qdev)) { - netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n"); - set_bit(QL_EEH_FATAL, &qdev->flags); - return PCI_ERS_RESULT_DISCONNECT; - } - - return PCI_ERS_RESULT_RECOVERED; -} - -static void qlge_io_resume(struct pci_dev *pdev) -{ - struct net_device *ndev = pci_get_drvdata(pdev); - struct ql_adapter *qdev = netdev_priv(ndev); - int err = 0; - - if (netif_running(ndev)) { - err = qlge_open(ndev); - if (err) { - netif_err(qdev, ifup, qdev->ndev, - "Device initialization failed after reset.\n"); - return; - } - } else { - netif_err(qdev, ifup, qdev->ndev, - "Device was not running prior to EEH.\n"); - } - mod_timer(&qdev->timer, jiffies + (5*HZ)); - netif_device_attach(ndev); -} - -static struct pci_error_handlers qlge_err_handler = { - .error_detected = qlge_io_error_detected, - .slot_reset = qlge_io_slot_reset, - .resume = qlge_io_resume, -}; - -static int qlge_suspend(struct pci_dev *pdev, pm_message_t state) -{ - struct net_device *ndev = pci_get_drvdata(pdev); - struct ql_adapter *qdev = netdev_priv(ndev); - int err; - - netif_device_detach(ndev); - del_timer_sync(&qdev->timer); - - if (netif_running(ndev)) { - err = ql_adapter_down(qdev); - if (!err) - return err; - } - - ql_wol(qdev); - err = pci_save_state(pdev); - if (err) - return err; - - pci_disable_device(pdev); - - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - - return 0; -} - -#ifdef CONFIG_PM -static int qlge_resume(struct pci_dev *pdev) -{ - struct net_device *ndev = pci_get_drvdata(pdev); - struct ql_adapter *qdev = netdev_priv(ndev); - int err; - - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - err = pci_enable_device(pdev); - if (err) { - netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n"); - return err; - } - pci_set_master(pdev); - - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); - - if (netif_running(ndev)) { - err = ql_adapter_up(qdev); - if (err) - return err; - } - - mod_timer(&qdev->timer, jiffies + (5*HZ)); - netif_device_attach(ndev); - - return 0; -} -#endif /* CONFIG_PM */ - -static void qlge_shutdown(struct pci_dev *pdev) -{ - qlge_suspend(pdev, PMSG_SUSPEND); -} - -static struct pci_driver qlge_driver = { - .name = DRV_NAME, - .id_table = qlge_pci_tbl, - .probe = qlge_probe, - .remove = __devexit_p(qlge_remove), -#ifdef CONFIG_PM - .suspend = qlge_suspend, - .resume = qlge_resume, -#endif - .shutdown = qlge_shutdown, - .err_handler = &qlge_err_handler -}; - -static int __init qlge_init_module(void) -{ - return pci_register_driver(&qlge_driver); -} - -static void __exit qlge_exit(void) -{ - pci_unregister_driver(&qlge_driver); -} - -module_init(qlge_init_module); -module_exit(qlge_exit); diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c deleted file mode 100644 index ff2bf8a..0000000 --- a/drivers/net/qlge/qlge_mpi.c +++ /dev/null @@ -1,1284 +0,0 @@ -#include "qlge.h" - -int ql_unpause_mpi_risc(struct ql_adapter *qdev) -{ - u32 tmp; - - /* Un-pause the RISC */ - tmp = ql_read32(qdev, CSR); - if (!(tmp & CSR_RP)) - return -EIO; - - ql_write32(qdev, CSR, CSR_CMD_CLR_PAUSE); - return 0; -} - -int ql_pause_mpi_risc(struct ql_adapter *qdev) -{ - u32 tmp; - int count = UDELAY_COUNT; - - /* Pause the RISC */ - ql_write32(qdev, CSR, CSR_CMD_SET_PAUSE); - do { - tmp = ql_read32(qdev, CSR); - if (tmp & CSR_RP) - break; - mdelay(UDELAY_DELAY); - count--; - } while (count); - return (count == 0) ? -ETIMEDOUT : 0; -} - -int ql_hard_reset_mpi_risc(struct ql_adapter *qdev) -{ - u32 tmp; - int count = UDELAY_COUNT; - - /* Reset the RISC */ - ql_write32(qdev, CSR, CSR_CMD_SET_RST); - do { - tmp = ql_read32(qdev, CSR); - if (tmp & CSR_RR) { - ql_write32(qdev, CSR, CSR_CMD_CLR_RST); - break; - } - mdelay(UDELAY_DELAY); - count--; - } while (count); - return (count == 0) ? -ETIMEDOUT : 0; -} - -int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data) -{ - int status; - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); - if (status) - goto exit; - /* set up for reg read */ - ql_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R); - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); - if (status) - goto exit; - /* get the data */ - *data = ql_read32(qdev, PROC_DATA); -exit: - return status; -} - -int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data) -{ - int status = 0; - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); - if (status) - goto exit; - /* write the data to the data reg */ - ql_write32(qdev, PROC_DATA, data); - /* trigger the write */ - ql_write32(qdev, PROC_ADDR, reg); - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); - if (status) - goto exit; -exit: - return status; -} - -int ql_soft_reset_mpi_risc(struct ql_adapter *qdev) -{ - int status; - status = ql_write_mpi_reg(qdev, 0x00001010, 1); - return status; -} - -/* Determine if we are in charge of the firwmare. If - * we are the lower of the 2 NIC pcie functions, or if - * we are the higher function and the lower function - * is not enabled. - */ -int ql_own_firmware(struct ql_adapter *qdev) -{ - u32 temp; - - /* If we are the lower of the 2 NIC functions - * on the chip the we are responsible for - * core dump and firmware reset after an error. - */ - if (qdev->func < qdev->alt_func) - return 1; - - /* If we are the higher of the 2 NIC functions - * on the chip and the lower function is not - * enabled, then we are responsible for - * core dump and firmware reset after an error. - */ - temp = ql_read32(qdev, STS); - if (!(temp & (1 << (8 + qdev->alt_func)))) - return 1; - - return 0; - -} - -static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp) -{ - int i, status; - - status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK); - if (status) - return -EBUSY; - for (i = 0; i < mbcp->out_count; i++) { - status = - ql_read_mpi_reg(qdev, qdev->mailbox_out + i, - &mbcp->mbox_out[i]); - if (status) { - netif_err(qdev, drv, qdev->ndev, "Failed mailbox read.\n"); - break; - } - } - ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */ - return status; -} - -/* Wait for a single mailbox command to complete. - * Returns zero on success. - */ -static int ql_wait_mbx_cmd_cmplt(struct ql_adapter *qdev) -{ - int count = 100; - u32 value; - - do { - value = ql_read32(qdev, STS); - if (value & STS_PI) - return 0; - mdelay(UDELAY_DELAY); /* 100ms */ - } while (--count); - return -ETIMEDOUT; -} - -/* Execute a single mailbox command. - * Caller must hold PROC_ADDR semaphore. - */ -static int ql_exec_mb_cmd(struct ql_adapter *qdev, struct mbox_params *mbcp) -{ - int i, status; - - /* - * Make sure there's nothing pending. - * This shouldn't happen. - */ - if (ql_read32(qdev, CSR) & CSR_HRI) - return -EIO; - - status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK); - if (status) - return status; - - /* - * Fill the outbound mailboxes. - */ - for (i = 0; i < mbcp->in_count; i++) { - status = ql_write_mpi_reg(qdev, qdev->mailbox_in + i, - mbcp->mbox_in[i]); - if (status) - goto end; - } - /* - * Wake up the MPI firmware. - */ - ql_write32(qdev, CSR, CSR_CMD_SET_H2R_INT); -end: - ql_sem_unlock(qdev, SEM_PROC_REG_MASK); - return status; -} - -/* We are being asked by firmware to accept - * a change to the port. This is only - * a change to max frame sizes (Tx/Rx), pause - * parameters, or loopback mode. We wake up a worker - * to handler processing this since a mailbox command - * will need to be sent to ACK the request. - */ -static int ql_idc_req_aen(struct ql_adapter *qdev) -{ - int status; - struct mbox_params *mbcp = &qdev->idc_mbc; - - netif_err(qdev, drv, qdev->ndev, "Enter!\n"); - /* Get the status data and start up a thread to - * handle the request. - */ - mbcp = &qdev->idc_mbc; - mbcp->out_count = 4; - status = ql_get_mb_sts(qdev, mbcp); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Could not read MPI, resetting ASIC!\n"); - ql_queue_asic_error(qdev); - } else { - /* Begin polled mode early so - * we don't get another interrupt - * when we leave mpi_worker. - */ - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); - queue_delayed_work(qdev->workqueue, &qdev->mpi_idc_work, 0); - } - return status; -} - -/* Process an inter-device event completion. - * If good, signal the caller's completion. - */ -static int ql_idc_cmplt_aen(struct ql_adapter *qdev) -{ - int status; - struct mbox_params *mbcp = &qdev->idc_mbc; - mbcp->out_count = 4; - status = ql_get_mb_sts(qdev, mbcp); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Could not read MPI, resetting RISC!\n"); - ql_queue_fw_error(qdev); - } else - /* Wake up the sleeping mpi_idc_work thread that is - * waiting for this event. - */ - complete(&qdev->ide_completion); - - return status; -} - -static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp) -{ - int status; - mbcp->out_count = 2; - - status = ql_get_mb_sts(qdev, mbcp); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "%s: Could not get mailbox status.\n", __func__); - return; - } - - qdev->link_status = mbcp->mbox_out[1]; - netif_err(qdev, drv, qdev->ndev, "Link Up.\n"); - - /* If we're coming back from an IDC event - * then set up the CAM and frame routing. - */ - if (test_bit(QL_CAM_RT_SET, &qdev->flags)) { - status = ql_cam_route_initialize(qdev); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to init CAM/Routing tables.\n"); - return; - } else - clear_bit(QL_CAM_RT_SET, &qdev->flags); - } - - /* Queue up a worker to check the frame - * size information, and fix it if it's not - * to our liking. - */ - if (!test_bit(QL_PORT_CFG, &qdev->flags)) { - netif_err(qdev, drv, qdev->ndev, "Queue Port Config Worker!\n"); - set_bit(QL_PORT_CFG, &qdev->flags); - /* Begin polled mode early so - * we don't get another interrupt - * when we leave mpi_worker dpc. - */ - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); - queue_delayed_work(qdev->workqueue, - &qdev->mpi_port_cfg_work, 0); - } - - ql_link_on(qdev); -} - -static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp) -{ - int status; - - mbcp->out_count = 3; - - status = ql_get_mb_sts(qdev, mbcp); - if (status) - netif_err(qdev, drv, qdev->ndev, "Link down AEN broken!\n"); - - ql_link_off(qdev); -} - -static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp) -{ - int status; - - mbcp->out_count = 5; - - status = ql_get_mb_sts(qdev, mbcp); - if (status) - netif_err(qdev, drv, qdev->ndev, "SFP in AEN broken!\n"); - else - netif_err(qdev, drv, qdev->ndev, "SFP insertion detected.\n"); - - return status; -} - -static int ql_sfp_out(struct ql_adapter *qdev, struct mbox_params *mbcp) -{ - int status; - - mbcp->out_count = 1; - - status = ql_get_mb_sts(qdev, mbcp); - if (status) - netif_err(qdev, drv, qdev->ndev, "SFP out AEN broken!\n"); - else - netif_err(qdev, drv, qdev->ndev, "SFP removal detected.\n"); - - return status; -} - -static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp) -{ - int status; - - mbcp->out_count = 6; - - status = ql_get_mb_sts(qdev, mbcp); - if (status) - netif_err(qdev, drv, qdev->ndev, "Lost AEN broken!\n"); - else { - int i; - netif_err(qdev, drv, qdev->ndev, "Lost AEN detected.\n"); - for (i = 0; i < mbcp->out_count; i++) - netif_err(qdev, drv, qdev->ndev, "mbox_out[%d] = 0x%.08x.\n", - i, mbcp->mbox_out[i]); - - } - - return status; -} - -static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp) -{ - int status; - - mbcp->out_count = 2; - - status = ql_get_mb_sts(qdev, mbcp); - if (status) { - netif_err(qdev, drv, qdev->ndev, "Firmware did not initialize!\n"); - } else { - netif_err(qdev, drv, qdev->ndev, "Firmware Revision = 0x%.08x.\n", - mbcp->mbox_out[1]); - qdev->fw_rev_id = mbcp->mbox_out[1]; - status = ql_cam_route_initialize(qdev); - if (status) - netif_err(qdev, ifup, qdev->ndev, - "Failed to init CAM/Routing tables.\n"); - } -} - -/* Process an async event and clear it unless it's an - * error condition. - * This can get called iteratively from the mpi_work thread - * when events arrive via an interrupt. - * It also gets called when a mailbox command is polling for - * it's completion. */ -static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp) -{ - int status; - int orig_count = mbcp->out_count; - - /* Just get mailbox zero for now. */ - mbcp->out_count = 1; - status = ql_get_mb_sts(qdev, mbcp); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Could not read MPI, resetting ASIC!\n"); - ql_queue_asic_error(qdev); - goto end; - } - - switch (mbcp->mbox_out[0]) { - - /* This case is only active when we arrive here - * as a result of issuing a mailbox command to - * the firmware. - */ - case MB_CMD_STS_INTRMDT: - case MB_CMD_STS_GOOD: - case MB_CMD_STS_INVLD_CMD: - case MB_CMD_STS_XFC_ERR: - case MB_CMD_STS_CSUM_ERR: - case MB_CMD_STS_ERR: - case MB_CMD_STS_PARAM_ERR: - /* We can only get mailbox status if we're polling from an - * unfinished command. Get the rest of the status data and - * return back to the caller. - * We only end up here when we're polling for a mailbox - * command completion. - */ - mbcp->out_count = orig_count; - status = ql_get_mb_sts(qdev, mbcp); - return status; - - /* We are being asked by firmware to accept - * a change to the port. This is only - * a change to max frame sizes (Tx/Rx), pause - * parameters, or loopback mode. - */ - case AEN_IDC_REQ: - status = ql_idc_req_aen(qdev); - break; - - /* Process and inbound IDC event. - * This will happen when we're trying to - * change tx/rx max frame size, change pause - * parameters or loopback mode. - */ - case AEN_IDC_CMPLT: - case AEN_IDC_EXT: - status = ql_idc_cmplt_aen(qdev); - break; - - case AEN_LINK_UP: - ql_link_up(qdev, mbcp); - break; - - case AEN_LINK_DOWN: - ql_link_down(qdev, mbcp); - break; - - case AEN_FW_INIT_DONE: - /* If we're in process on executing the firmware, - * then convert the status to normal mailbox status. - */ - if (mbcp->mbox_in[0] == MB_CMD_EX_FW) { - mbcp->out_count = orig_count; - status = ql_get_mb_sts(qdev, mbcp); - mbcp->mbox_out[0] = MB_CMD_STS_GOOD; - return status; - } - ql_init_fw_done(qdev, mbcp); - break; - - case AEN_AEN_SFP_IN: - ql_sfp_in(qdev, mbcp); - break; - - case AEN_AEN_SFP_OUT: - ql_sfp_out(qdev, mbcp); - break; - - /* This event can arrive at boot time or after an - * MPI reset if the firmware failed to initialize. - */ - case AEN_FW_INIT_FAIL: - /* If we're in process on executing the firmware, - * then convert the status to normal mailbox status. - */ - if (mbcp->mbox_in[0] == MB_CMD_EX_FW) { - mbcp->out_count = orig_count; - status = ql_get_mb_sts(qdev, mbcp); - mbcp->mbox_out[0] = MB_CMD_STS_ERR; - return status; - } - netif_err(qdev, drv, qdev->ndev, - "Firmware initialization failed.\n"); - status = -EIO; - ql_queue_fw_error(qdev); - break; - - case AEN_SYS_ERR: - netif_err(qdev, drv, qdev->ndev, "System Error.\n"); - ql_queue_fw_error(qdev); - status = -EIO; - break; - - case AEN_AEN_LOST: - ql_aen_lost(qdev, mbcp); - break; - - case AEN_DCBX_CHG: - /* Need to support AEN 8110 */ - break; - default: - netif_err(qdev, drv, qdev->ndev, - "Unsupported AE %.08x.\n", mbcp->mbox_out[0]); - /* Clear the MPI firmware status. */ - } -end: - ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT); - /* Restore the original mailbox count to - * what the caller asked for. This can get - * changed when a mailbox command is waiting - * for a response and an AEN arrives and - * is handled. - * */ - mbcp->out_count = orig_count; - return status; -} - -/* Execute a single mailbox command. - * mbcp is a pointer to an array of u32. Each - * element in the array contains the value for it's - * respective mailbox register. - */ -static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp) -{ - int status; - unsigned long count; - - mutex_lock(&qdev->mpi_mutex); - - /* Begin polled mode for MPI */ - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); - - /* Load the mailbox registers and wake up MPI RISC. */ - status = ql_exec_mb_cmd(qdev, mbcp); - if (status) - goto end; - - - /* If we're generating a system error, then there's nothing - * to wait for. - */ - if (mbcp->mbox_in[0] == MB_CMD_MAKE_SYS_ERR) - goto end; - - /* Wait for the command to complete. We loop - * here because some AEN might arrive while - * we're waiting for the mailbox command to - * complete. If more than 5 seconds expire we can - * assume something is wrong. */ - count = jiffies + HZ * MAILBOX_TIMEOUT; - do { - /* Wait for the interrupt to come in. */ - status = ql_wait_mbx_cmd_cmplt(qdev); - if (status) - continue; - - /* Process the event. If it's an AEN, it - * will be handled in-line or a worker - * will be spawned. If it's our completion - * we will catch it below. - */ - status = ql_mpi_handler(qdev, mbcp); - if (status) - goto end; - - /* It's either the completion for our mailbox - * command complete or an AEN. If it's our - * completion then get out. - */ - if (((mbcp->mbox_out[0] & 0x0000f000) == - MB_CMD_STS_GOOD) || - ((mbcp->mbox_out[0] & 0x0000f000) == - MB_CMD_STS_INTRMDT)) - goto done; - } while (time_before(jiffies, count)); - - netif_err(qdev, drv, qdev->ndev, - "Timed out waiting for mailbox complete.\n"); - status = -ETIMEDOUT; - goto end; - -done: - - /* Now we can clear the interrupt condition - * and look at our status. - */ - ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT); - - if (((mbcp->mbox_out[0] & 0x0000f000) != - MB_CMD_STS_GOOD) && - ((mbcp->mbox_out[0] & 0x0000f000) != - MB_CMD_STS_INTRMDT)) { - status = -EIO; - } -end: - /* End polled mode for MPI */ - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); - mutex_unlock(&qdev->mpi_mutex); - return status; -} - -/* Get MPI firmware version. This will be used for - * driver banner and for ethtool info. - * Returns zero on success. - */ -int ql_mb_about_fw(struct ql_adapter *qdev) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status = 0; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 1; - mbcp->out_count = 3; - - mbcp->mbox_in[0] = MB_CMD_ABOUT_FW; - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { - netif_err(qdev, drv, qdev->ndev, - "Failed about firmware command\n"); - status = -EIO; - } - - /* Store the firmware version */ - qdev->fw_rev_id = mbcp->mbox_out[1]; - - return status; -} - -/* Get functional state for MPI firmware. - * Returns zero on success. - */ -int ql_mb_get_fw_state(struct ql_adapter *qdev) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status = 0; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 1; - mbcp->out_count = 2; - - mbcp->mbox_in[0] = MB_CMD_GET_FW_STATE; - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { - netif_err(qdev, drv, qdev->ndev, - "Failed Get Firmware State.\n"); - status = -EIO; - } - - /* If bit zero is set in mbx 1 then the firmware is - * running, but not initialized. This should never - * happen. - */ - if (mbcp->mbox_out[1] & 1) { - netif_err(qdev, drv, qdev->ndev, - "Firmware waiting for initialization.\n"); - status = -EIO; - } - - return status; -} - -/* Send and ACK mailbox command to the firmware to - * let it continue with the change. - */ -static int ql_mb_idc_ack(struct ql_adapter *qdev) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status = 0; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 5; - mbcp->out_count = 1; - - mbcp->mbox_in[0] = MB_CMD_IDC_ACK; - mbcp->mbox_in[1] = qdev->idc_mbc.mbox_out[1]; - mbcp->mbox_in[2] = qdev->idc_mbc.mbox_out[2]; - mbcp->mbox_in[3] = qdev->idc_mbc.mbox_out[3]; - mbcp->mbox_in[4] = qdev->idc_mbc.mbox_out[4]; - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { - netif_err(qdev, drv, qdev->ndev, "Failed IDC ACK send.\n"); - status = -EIO; - } - return status; -} - -/* Get link settings and maximum frame size settings - * for the current port. - * Most likely will block. - */ -int ql_mb_set_port_cfg(struct ql_adapter *qdev) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status = 0; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 3; - mbcp->out_count = 1; - - mbcp->mbox_in[0] = MB_CMD_SET_PORT_CFG; - mbcp->mbox_in[1] = qdev->link_config; - mbcp->mbox_in[2] = qdev->max_frame_size; - - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] == MB_CMD_STS_INTRMDT) { - netif_err(qdev, drv, qdev->ndev, - "Port Config sent, wait for IDC.\n"); - } else if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { - netif_err(qdev, drv, qdev->ndev, - "Failed Set Port Configuration.\n"); - status = -EIO; - } - return status; -} - -static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr, - u32 size) -{ - int status = 0; - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 9; - mbcp->out_count = 1; - - mbcp->mbox_in[0] = MB_CMD_DUMP_RISC_RAM; - mbcp->mbox_in[1] = LSW(addr); - mbcp->mbox_in[2] = MSW(req_dma); - mbcp->mbox_in[3] = LSW(req_dma); - mbcp->mbox_in[4] = MSW(size); - mbcp->mbox_in[5] = LSW(size); - mbcp->mbox_in[6] = MSW(MSD(req_dma)); - mbcp->mbox_in[7] = LSW(MSD(req_dma)); - mbcp->mbox_in[8] = MSW(addr); - - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { - netif_err(qdev, drv, qdev->ndev, "Failed to dump risc RAM.\n"); - status = -EIO; - } - return status; -} - -/* Issue a mailbox command to dump RISC RAM. */ -int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, - u32 ram_addr, int word_count) -{ - int status; - char *my_buf; - dma_addr_t buf_dma; - - my_buf = pci_alloc_consistent(qdev->pdev, word_count * sizeof(u32), - &buf_dma); - if (!my_buf) - return -EIO; - - status = ql_mb_dump_ram(qdev, buf_dma, ram_addr, word_count); - if (!status) - memcpy(buf, my_buf, word_count * sizeof(u32)); - - pci_free_consistent(qdev->pdev, word_count * sizeof(u32), my_buf, - buf_dma); - return status; -} - -/* Get link settings and maximum frame size settings - * for the current port. - * Most likely will block. - */ -int ql_mb_get_port_cfg(struct ql_adapter *qdev) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status = 0; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 1; - mbcp->out_count = 3; - - mbcp->mbox_in[0] = MB_CMD_GET_PORT_CFG; - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { - netif_err(qdev, drv, qdev->ndev, - "Failed Get Port Configuration.\n"); - status = -EIO; - } else { - netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, - "Passed Get Port Configuration.\n"); - qdev->link_config = mbcp->mbox_out[1]; - qdev->max_frame_size = mbcp->mbox_out[2]; - } - return status; -} - -int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 2; - mbcp->out_count = 1; - - mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE; - mbcp->mbox_in[1] = wol; - - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { - netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n"); - status = -EIO; - } - return status; -} - -int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status; - u8 *addr = qdev->ndev->dev_addr; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 8; - mbcp->out_count = 1; - - mbcp->mbox_in[0] = MB_CMD_SET_WOL_MAGIC; - if (enable_wol) { - mbcp->mbox_in[1] = (u32)addr[0]; - mbcp->mbox_in[2] = (u32)addr[1]; - mbcp->mbox_in[3] = (u32)addr[2]; - mbcp->mbox_in[4] = (u32)addr[3]; - mbcp->mbox_in[5] = (u32)addr[4]; - mbcp->mbox_in[6] = (u32)addr[5]; - mbcp->mbox_in[7] = 0; - } else { - mbcp->mbox_in[1] = 0; - mbcp->mbox_in[2] = 1; - mbcp->mbox_in[3] = 1; - mbcp->mbox_in[4] = 1; - mbcp->mbox_in[5] = 1; - mbcp->mbox_in[6] = 1; - mbcp->mbox_in[7] = 0; - } - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { - netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n"); - status = -EIO; - } - return status; -} - -/* IDC - Inter Device Communication... - * Some firmware commands require consent of adjacent FCOE - * function. This function waits for the OK, or a - * counter-request for a little more time.i - * The firmware will complete the request if the other - * function doesn't respond. - */ -static int ql_idc_wait(struct ql_adapter *qdev) -{ - int status = -ETIMEDOUT; - long wait_time = 1 * HZ; - struct mbox_params *mbcp = &qdev->idc_mbc; - do { - /* Wait here for the command to complete - * via the IDC process. - */ - wait_time = - wait_for_completion_timeout(&qdev->ide_completion, - wait_time); - if (!wait_time) { - netif_err(qdev, drv, qdev->ndev, "IDC Timeout.\n"); - break; - } - /* Now examine the response from the IDC process. - * We might have a good completion or a request for - * more wait time. - */ - if (mbcp->mbox_out[0] == AEN_IDC_EXT) { - netif_err(qdev, drv, qdev->ndev, - "IDC Time Extension from function.\n"); - wait_time += (mbcp->mbox_out[1] >> 8) & 0x0000000f; - } else if (mbcp->mbox_out[0] == AEN_IDC_CMPLT) { - netif_err(qdev, drv, qdev->ndev, "IDC Success.\n"); - status = 0; - break; - } else { - netif_err(qdev, drv, qdev->ndev, - "IDC: Invalid State 0x%.04x.\n", - mbcp->mbox_out[0]); - status = -EIO; - break; - } - } while (wait_time); - - return status; -} - -int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 2; - mbcp->out_count = 1; - - mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG; - mbcp->mbox_in[1] = led_config; - - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { - netif_err(qdev, drv, qdev->ndev, - "Failed to set LED Configuration.\n"); - status = -EIO; - } - - return status; -} - -int ql_mb_get_led_cfg(struct ql_adapter *qdev) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 1; - mbcp->out_count = 2; - - mbcp->mbox_in[0] = MB_CMD_GET_LED_CFG; - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { - netif_err(qdev, drv, qdev->ndev, - "Failed to get LED Configuration.\n"); - status = -EIO; - } else - qdev->led_config = mbcp->mbox_out[1]; - - return status; -} - -int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 1; - mbcp->out_count = 2; - - mbcp->mbox_in[0] = MB_CMD_SET_MGMNT_TFK_CTL; - mbcp->mbox_in[1] = control; - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) - return status; - - if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) { - netif_err(qdev, drv, qdev->ndev, - "Command not supported by firmware.\n"); - status = -EINVAL; - } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) { - /* This indicates that the firmware is - * already in the state we are trying to - * change it to. - */ - netif_err(qdev, drv, qdev->ndev, - "Command parameters make no change.\n"); - } - return status; -} - -/* Returns a negative error code or the mailbox command status. */ -static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status; - - memset(mbcp, 0, sizeof(struct mbox_params)); - *control = 0; - - mbcp->in_count = 1; - mbcp->out_count = 1; - - mbcp->mbox_in[0] = MB_CMD_GET_MGMNT_TFK_CTL; - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) { - *control = mbcp->mbox_in[1]; - return status; - } - - if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) { - netif_err(qdev, drv, qdev->ndev, - "Command not supported by firmware.\n"); - status = -EINVAL; - } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) { - netif_err(qdev, drv, qdev->ndev, - "Failed to get MPI traffic control.\n"); - status = -EIO; - } - return status; -} - -int ql_wait_fifo_empty(struct ql_adapter *qdev) -{ - int count = 5; - u32 mgmnt_fifo_empty; - u32 nic_fifo_empty; - - do { - nic_fifo_empty = ql_read32(qdev, STS) & STS_NFE; - ql_mb_get_mgmnt_traffic_ctl(qdev, &mgmnt_fifo_empty); - mgmnt_fifo_empty &= MB_GET_MPI_TFK_FIFO_EMPTY; - if (nic_fifo_empty && mgmnt_fifo_empty) - return 0; - msleep(100); - } while (count-- > 0); - return -ETIMEDOUT; -} - -/* API called in work thread context to set new TX/RX - * maximum frame size values to match MTU. - */ -static int ql_set_port_cfg(struct ql_adapter *qdev) -{ - int status; - status = ql_mb_set_port_cfg(qdev); - if (status) - return status; - status = ql_idc_wait(qdev); - return status; -} - -/* The following routines are worker threads that process - * events that may sleep waiting for completion. - */ - -/* This thread gets the maximum TX and RX frame size values - * from the firmware and, if necessary, changes them to match - * the MTU setting. - */ -void ql_mpi_port_cfg_work(struct work_struct *work) -{ - struct ql_adapter *qdev = - container_of(work, struct ql_adapter, mpi_port_cfg_work.work); - int status; - - status = ql_mb_get_port_cfg(qdev); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Bug: Failed to get port config data.\n"); - goto err; - } - - if (qdev->link_config & CFG_JUMBO_FRAME_SIZE && - qdev->max_frame_size == - CFG_DEFAULT_MAX_FRAME_SIZE) - goto end; - - qdev->link_config |= CFG_JUMBO_FRAME_SIZE; - qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE; - status = ql_set_port_cfg(qdev); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Bug: Failed to set port config data.\n"); - goto err; - } -end: - clear_bit(QL_PORT_CFG, &qdev->flags); - return; -err: - ql_queue_fw_error(qdev); - goto end; -} - -/* Process an inter-device request. This is issues by - * the firmware in response to another function requesting - * a change to the port. We set a flag to indicate a change - * has been made and then send a mailbox command ACKing - * the change request. - */ -void ql_mpi_idc_work(struct work_struct *work) -{ - struct ql_adapter *qdev = - container_of(work, struct ql_adapter, mpi_idc_work.work); - int status; - struct mbox_params *mbcp = &qdev->idc_mbc; - u32 aen; - int timeout; - - aen = mbcp->mbox_out[1] >> 16; - timeout = (mbcp->mbox_out[1] >> 8) & 0xf; - - switch (aen) { - default: - netif_err(qdev, drv, qdev->ndev, - "Bug: Unhandled IDC action.\n"); - break; - case MB_CMD_PORT_RESET: - case MB_CMD_STOP_FW: - ql_link_off(qdev); - case MB_CMD_SET_PORT_CFG: - /* Signal the resulting link up AEN - * that the frame routing and mac addr - * needs to be set. - * */ - set_bit(QL_CAM_RT_SET, &qdev->flags); - /* Do ACK if required */ - if (timeout) { - status = ql_mb_idc_ack(qdev); - if (status) - netif_err(qdev, drv, qdev->ndev, - "Bug: No pending IDC!\n"); - } else { - netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, - "IDC ACK not required\n"); - status = 0; /* success */ - } - break; - - /* These sub-commands issued by another (FCoE) - * function are requesting to do an operation - * on the shared resource (MPI environment). - * We currently don't issue these so we just - * ACK the request. - */ - case MB_CMD_IOP_RESTART_MPI: - case MB_CMD_IOP_PREP_LINK_DOWN: - /* Drop the link, reload the routing - * table when link comes up. - */ - ql_link_off(qdev); - set_bit(QL_CAM_RT_SET, &qdev->flags); - /* Fall through. */ - case MB_CMD_IOP_DVR_START: - case MB_CMD_IOP_FLASH_ACC: - case MB_CMD_IOP_CORE_DUMP_MPI: - case MB_CMD_IOP_PREP_UPDATE_MPI: - case MB_CMD_IOP_COMP_UPDATE_MPI: - case MB_CMD_IOP_NONE: /* an IDC without params */ - /* Do ACK if required */ - if (timeout) { - status = ql_mb_idc_ack(qdev); - if (status) - netif_err(qdev, drv, qdev->ndev, - "Bug: No pending IDC!\n"); - } else { - netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, - "IDC ACK not required\n"); - status = 0; /* success */ - } - break; - } -} - -void ql_mpi_work(struct work_struct *work) -{ - struct ql_adapter *qdev = - container_of(work, struct ql_adapter, mpi_work.work); - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int err = 0; - - mutex_lock(&qdev->mpi_mutex); - /* Begin polled mode for MPI */ - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); - - while (ql_read32(qdev, STS) & STS_PI) { - memset(mbcp, 0, sizeof(struct mbox_params)); - mbcp->out_count = 1; - /* Don't continue if an async event - * did not complete properly. - */ - err = ql_mpi_handler(qdev, mbcp); - if (err) - break; - } - - /* End polled mode for MPI */ - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); - mutex_unlock(&qdev->mpi_mutex); - ql_enable_completion_interrupt(qdev, 0); -} - -void ql_mpi_reset_work(struct work_struct *work) -{ - struct ql_adapter *qdev = - container_of(work, struct ql_adapter, mpi_reset_work.work); - cancel_delayed_work_sync(&qdev->mpi_work); - cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); - cancel_delayed_work_sync(&qdev->mpi_idc_work); - /* If we're not the dominant NIC function, - * then there is nothing to do. - */ - if (!ql_own_firmware(qdev)) { - netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n"); - return; - } - - if (!ql_core_dump(qdev, qdev->mpi_coredump)) { - netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n"); - qdev->core_is_dumped = 1; - queue_delayed_work(qdev->workqueue, - &qdev->mpi_core_to_log, 5 * HZ); - } - ql_soft_reset_mpi_risc(qdev); -} -- cgit v0.10.2 From ae150435b59e68de00546330241727f2fec54517 Mon Sep 17 00:00:00 2001 From: Jeff Kirsher Date: Thu, 12 May 2011 20:21:07 -0700 Subject: smsc: Move the SMC (SMSC) drivers Moves the SMC (SMSC) drivers into drivers/net/ethernet/smsc/ and the necessary Kconfig and Makefile changes. Also did some cleanup of NET_VENDOR_SMC Kconfig tag for the 8390 based drivers. CC: Nicolas Pitre CC: Donald Becker CC: Erik Stahlman CC: Dustin McIntire CC: Steve Glendinning CC: David Hinds Signed-off-by: Jeff Kirsher diff --git a/MAINTAINERS b/MAINTAINERS index 8f2821c..7c0294c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5869,7 +5869,7 @@ F: mm/sl?b.c SMC91x ETHERNET DRIVER M: Nicolas Pitre S: Odd Fixes -F: drivers/net/smc91x.* +F: drivers/net/ethernet/smsc/smc91x.* SMM665 HARDWARE MONITOR DRIVER M: Guenter Roeck @@ -5904,13 +5904,13 @@ M: Steve Glendinning L: netdev@vger.kernel.org S: Supported F: include/linux/smsc911x.h -F: drivers/net/smsc911x.* +F: drivers/net/ethernet/smsc/smsc911x.* SMSC9420 PCI ETHERNET DRIVER M: Steve Glendinning L: netdev@vger.kernel.org S: Supported -F: drivers/net/smsc9420.* +F: drivers/net/ethernet/smsc/smsc9420.* SN-IA64 (Itanium) SUB-PLATFORM M: Jes Sorensen diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index de2293d..6499186 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -565,39 +565,6 @@ config BFIN_MAC_USE_HWSTAMP help To support the IEEE 1588 Precision Time Protocol (PTP), select y here -config SMC9194 - tristate "SMC 9194 support" - depends on NET_VENDOR_SMC && (ISA || MAC && BROKEN) - select CRC32 - ---help--- - This is support for the SMC9xxx based Ethernet cards. Choose this - option if you have a DELL laptop with the docking station, or - another SMC9192/9194 based chipset. Say Y if you want it compiled - into the kernel, and read the file - and the Ethernet-HOWTO, - available from . - - To compile this driver as a module, choose M here. The module - will be called smc9194. - -config SMC91X - tristate "SMC 91C9x/91C1xxx support" - select CRC32 - select MII - depends on ARM || M32R || SUPERH || \ - MIPS || BLACKFIN || MN10300 || COLDFIRE - help - This is a driver for SMC's 91x series of Ethernet chipsets, - including the SMC91C94 and the SMC91C111. Say Y if you want it - compiled into the kernel, and read the file - and the Ethernet-HOWTO, - available from . - - This driver is also available as a module ( = code which can be - inserted in and removed from the running kernel whenever you want). - The module will be called smc91x. If you want to compile it as a - module, say M here and read . - config PXA168_ETH tristate "Marvell pxa168 ethernet support" depends on CPU_PXA168 @@ -712,44 +679,6 @@ config GRETH help Say Y here if you want to use the Aeroflex Gaisler GRETH Ethernet MAC. -config SMC911X - tristate "SMSC LAN911[5678] support" - select CRC32 - select MII - depends on ARM || SUPERH || MN10300 - help - This is a driver for SMSC's LAN911x series of Ethernet chipsets - including the new LAN9115, LAN9116, LAN9117, and LAN9118. - Say Y if you want it compiled into the kernel, - and read the Ethernet-HOWTO, available from - . - - This driver is also available as a module. The module will be - called smc911x. If you want to compile it as a module, say M - here and read - -config SMSC911X - tristate "SMSC LAN911x/LAN921x families embedded ethernet support" - depends on ARM || SUPERH || BLACKFIN || MIPS || MN10300 - select CRC32 - select MII - select PHYLIB - ---help--- - Say Y here if you want support for SMSC LAN911x and LAN921x families - of ethernet controllers. - - To compile this driver as a module, choose M here and read - . The module - will be called smsc911x. - -config SMSC911X_ARCH_HOOKS - def_bool n - depends on SMSC911X - help - If the arch enables this, it allows the arch to implement various - hooks for more comprehensive interrupt control and also to override - the source of the MAC address. - config NET_VENDOR_RACAL bool "Racal-Interlan (Micom) NI cards" depends on ISA @@ -1148,33 +1077,6 @@ config SIS900 To compile this driver as a module, choose M here: the module will be called sis900. This is recommended. -config EPIC100 - tristate "SMC EtherPower II" - depends on NET_PCI && PCI - select CRC32 - select MII - help - This driver is for the SMC EtherPower II 9432 PCI Ethernet NIC, - which is based on the SMC83c17x (EPIC/100). - More specific information and updates are available from - . - -config SMSC9420 - tristate "SMSC LAN9420 PCI ethernet adapter support" - depends on NET_PCI && PCI - select CRC32 - select PHYLIB - select SMSC_PHY - help - This is a driver for SMSC's LAN9420 PCI ethernet adapter. - Say Y if you want it compiled into the kernel, - and read the Ethernet-HOWTO, available from - . - - This driver is also available as a module. The module will be - called smsc9420. If you want to compile it as a module, say M - here and read - config SUNDANCE tristate "Sundance Alta support" depends on NET_PCI && PCI @@ -1891,13 +1793,6 @@ config MYRI10GE_DCA driver. DCA is a method for warming the CPU cache before data is used, with the intent of lessening the impact of cache misses. -config NETXEN_NIC - tristate "NetXen Multi port (1/10) Gigabit Ethernet NIC" - depends on PCI - select FW_LOADER - help - This enables the support for NetXen's Gigabit Ethernet card. - config NIU tristate "Sun Neptune 10Gbit Ethernet support" depends on PCI diff --git a/drivers/net/Makefile b/drivers/net/Makefile index a58a9f0..e74b424 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -55,8 +55,6 @@ obj-$(CONFIG_MACE) += mace.o obj-$(CONFIG_BMAC) += bmac.o obj-$(CONFIG_TLAN) += tlan.o -obj-$(CONFIG_EPIC100) += epic100.o -obj-$(CONFIG_SMSC9420) += smsc9420.o obj-$(CONFIG_SIS190) += sis190.o obj-$(CONFIG_SIS900) += sis900.o obj-$(CONFIG_R6040) += r6040.o @@ -95,7 +93,6 @@ obj-$(CONFIG_NET) += Space.o loopback.o obj-$(CONFIG_SEEQ8005) += seeq8005.o obj-$(CONFIG_NET_SB1000) += sb1000.o obj-$(CONFIG_HP100) += hp100.o -obj-$(CONFIG_SMC9194) += smc9194.o obj-$(CONFIG_FEC) += fec.o obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y) @@ -182,9 +179,6 @@ obj-$(CONFIG_IBMVETH) += ibmveth.o obj-$(CONFIG_S2IO) += s2io.o obj-$(CONFIG_VXGE) += vxge/ obj-$(CONFIG_MYRI10GE) += myri10ge/ -obj-$(CONFIG_SMC91X) += smc91x.o -obj-$(CONFIG_SMC911X) += smc911x.o -obj-$(CONFIG_SMSC911X) += smsc911x.o obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o obj-$(CONFIG_BFIN_MAC) += bfin_mac.o obj-$(CONFIG_DM9000) += dm9000.o @@ -231,7 +225,6 @@ obj-$(CONFIG_NETCONSOLE) += netconsole.o obj-$(CONFIG_FS_ENET) += fs_enet/ -obj-$(CONFIG_NETXEN_NIC) += netxen/ obj-$(CONFIG_NIU) += niu.o obj-$(CONFIG_VIRTIO_NET) += virtio_net.o obj-$(CONFIG_SFC) += sfc/ diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c deleted file mode 100644 index 814c187..0000000 --- a/drivers/net/epic100.c +++ /dev/null @@ -1,1609 +0,0 @@ -/* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */ -/* - Written/copyright 1997-2001 by Donald Becker. - - This software may be used and distributed according to the terms of - the GNU General Public License (GPL), incorporated herein by reference. - Drivers based on or derived from this code fall under the GPL and must - retain the authorship, copyright and license notice. This file is not - a complete program and may only be used when the entire operating - system is licensed under the GPL. - - This driver is for the SMC83c170/175 "EPIC" series, as used on the - SMC EtherPower II 9432 PCI adapter, and several CardBus cards. - - The author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation - 410 Severn Ave., Suite 210 - Annapolis MD 21403 - - Information and updates available at - http://www.scyld.com/network/epic100.html - [this link no longer provides anything useful -jgarzik] - - --------------------------------------------------------------------- - -*/ - -#define DRV_NAME "epic100" -#define DRV_VERSION "2.1" -#define DRV_RELDATE "Sept 11, 2006" - -/* The user-configurable values. - These may be modified when a driver module is loaded.*/ - -static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ - -/* Used to pass the full-duplex flag, etc. */ -#define MAX_UNITS 8 /* More are supported, limit only on options */ -static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; -static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; - -/* Set the copy breakpoint for the copy-only-tiny-frames scheme. - Setting to > 1518 effectively disables this feature. */ -static int rx_copybreak; - -/* Operational parameters that are set at compile time. */ - -/* Keep the ring sizes a power of two for operational efficiency. - The compiler will convert '%'<2^N> into a bit mask. - Making the Tx ring too large decreases the effectiveness of channel - bonding and packet priority. - There are no ill effects from too-large receive rings. */ -#define TX_RING_SIZE 256 -#define TX_QUEUE_LEN 240 /* Limit ring entries actually used. */ -#define RX_RING_SIZE 256 -#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct epic_tx_desc) -#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct epic_rx_desc) - -/* Operational parameters that usually are not changed. */ -/* Time in jiffies before concluding the transmitter is hung. */ -#define TX_TIMEOUT (2*HZ) - -#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ - -/* Bytes transferred to chip before transmission starts. */ -/* Initial threshold, increased on underflow, rounded down to 4 byte units. */ -#define TX_FIFO_THRESH 256 -#define RX_FIFO_THRESH 1 /* 0-3, 0==32, 64,96, or 3==128 bytes */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* These identify the driver base version and may not be removed. */ -static char version[] __devinitdata = -DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker \n"; -static char version2[] __devinitdata = -" (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n"; - -MODULE_AUTHOR("Donald Becker "); -MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver"); -MODULE_LICENSE("GPL"); - -module_param(debug, int, 0); -module_param(rx_copybreak, int, 0); -module_param_array(options, int, NULL, 0); -module_param_array(full_duplex, int, NULL, 0); -MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)"); -MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex"); -MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames"); -MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)"); - -/* - Theory of Operation - -I. Board Compatibility - -This device driver is designed for the SMC "EPIC/100", the SMC -single-chip Ethernet controllers for PCI. This chip is used on -the SMC EtherPower II boards. - -II. Board-specific settings - -PCI bus devices are configured by the system at boot time, so no jumpers -need to be set on the board. The system BIOS will assign the -PCI INTA signal to a (preferably otherwise unused) system IRQ line. -Note: Kernel versions earlier than 1.3.73 do not support shared PCI -interrupt lines. - -III. Driver operation - -IIIa. Ring buffers - -IVb. References - -http://www.smsc.com/media/Downloads_Public/discontinued/83c171.pdf -http://www.smsc.com/media/Downloads_Public/discontinued/83c175.pdf -http://scyld.com/expert/NWay.html -http://www.national.com/pf/DP/DP83840A.html - -IVc. Errata - -*/ - - -enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 }; - -#define EPIC_TOTAL_SIZE 0x100 -#define USE_IO_OPS 1 - -typedef enum { - SMSC_83C170_0, - SMSC_83C170, - SMSC_83C175, -} chip_t; - - -struct epic_chip_info { - const char *name; - int drv_flags; /* Driver use, intended as capability flags. */ -}; - - -/* indexed by chip_t */ -static const struct epic_chip_info pci_id_tbl[] = { - { "SMSC EPIC/100 83c170", TYPE2_INTR | NO_MII | MII_PWRDWN }, - { "SMSC EPIC/100 83c170", TYPE2_INTR }, - { "SMSC EPIC/C 83c175", TYPE2_INTR | MII_PWRDWN }, -}; - - -static DEFINE_PCI_DEVICE_TABLE(epic_pci_tbl) = { - { 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 }, - { 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 }, - { 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID, - PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 }, - { 0,} -}; -MODULE_DEVICE_TABLE (pci, epic_pci_tbl); - - -#ifndef USE_IO_OPS -#undef inb -#undef inw -#undef inl -#undef outb -#undef outw -#undef outl -#define inb readb -#define inw readw -#define inl readl -#define outb writeb -#define outw writew -#define outl writel -#endif - -/* Offsets to registers, using the (ugh) SMC names. */ -enum epic_registers { - COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14, - PCIBurstCnt=0x18, - TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28, /* Rx error counters. */ - MIICtrl=0x30, MIIData=0x34, MIICfg=0x38, - LAN0=64, /* MAC address. */ - MC0=80, /* Multicast filter table. */ - RxCtrl=96, TxCtrl=112, TxSTAT=0x74, - PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC, -}; - -/* Interrupt register bits, using my own meaningful names. */ -enum IntrStatus { - TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000, - PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000, - RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100, - TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010, - RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001, -}; -enum CommandBits { - StopRx=1, StartRx=2, TxQueued=4, RxQueued=8, - StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80, -}; - -#define EpicRemoved 0xffffffff /* Chip failed or removed (CardBus) */ - -#define EpicNapiEvent (TxEmpty | TxDone | \ - RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull) -#define EpicNormalEvent (0x0000ffff & ~EpicNapiEvent) - -static const u16 media2miictl[16] = { - 0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0 }; - -/* - * The EPIC100 Rx and Tx buffer descriptors. Note that these - * really ARE host-endian; it's not a misannotation. We tell - * the card to byteswap them internally on big-endian hosts - - * look for #ifdef __BIG_ENDIAN in epic_open(). - */ - -struct epic_tx_desc { - u32 txstatus; - u32 bufaddr; - u32 buflength; - u32 next; -}; - -struct epic_rx_desc { - u32 rxstatus; - u32 bufaddr; - u32 buflength; - u32 next; -}; - -enum desc_status_bits { - DescOwn=0x8000, -}; - -#define PRIV_ALIGN 15 /* Required alignment mask */ -struct epic_private { - struct epic_rx_desc *rx_ring; - struct epic_tx_desc *tx_ring; - /* The saved address of a sent-in-place packet/buffer, for skfree(). */ - struct sk_buff* tx_skbuff[TX_RING_SIZE]; - /* The addresses of receive-in-place skbuffs. */ - struct sk_buff* rx_skbuff[RX_RING_SIZE]; - - dma_addr_t tx_ring_dma; - dma_addr_t rx_ring_dma; - - /* Ring pointers. */ - spinlock_t lock; /* Group with Tx control cache line. */ - spinlock_t napi_lock; - struct napi_struct napi; - unsigned int reschedule_in_poll; - unsigned int cur_tx, dirty_tx; - - unsigned int cur_rx, dirty_rx; - u32 irq_mask; - unsigned int rx_buf_sz; /* Based on MTU+slack. */ - - struct pci_dev *pci_dev; /* PCI bus location. */ - int chip_id, chip_flags; - - struct timer_list timer; /* Media selection timer. */ - int tx_threshold; - unsigned char mc_filter[8]; - signed char phys[4]; /* MII device addresses. */ - u16 advertising; /* NWay media advertisement */ - int mii_phy_cnt; - struct mii_if_info mii; - unsigned int tx_full:1; /* The Tx queue is full. */ - unsigned int default_port:4; /* Last dev->if_port value. */ -}; - -static int epic_open(struct net_device *dev); -static int read_eeprom(long ioaddr, int location); -static int mdio_read(struct net_device *dev, int phy_id, int location); -static void mdio_write(struct net_device *dev, int phy_id, int loc, int val); -static void epic_restart(struct net_device *dev); -static void epic_timer(unsigned long data); -static void epic_tx_timeout(struct net_device *dev); -static void epic_init_ring(struct net_device *dev); -static netdev_tx_t epic_start_xmit(struct sk_buff *skb, - struct net_device *dev); -static int epic_rx(struct net_device *dev, int budget); -static int epic_poll(struct napi_struct *napi, int budget); -static irqreturn_t epic_interrupt(int irq, void *dev_instance); -static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); -static const struct ethtool_ops netdev_ethtool_ops; -static int epic_close(struct net_device *dev); -static struct net_device_stats *epic_get_stats(struct net_device *dev); -static void set_rx_mode(struct net_device *dev); - -static const struct net_device_ops epic_netdev_ops = { - .ndo_open = epic_open, - .ndo_stop = epic_close, - .ndo_start_xmit = epic_start_xmit, - .ndo_tx_timeout = epic_tx_timeout, - .ndo_get_stats = epic_get_stats, - .ndo_set_multicast_list = set_rx_mode, - .ndo_do_ioctl = netdev_ioctl, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -static int __devinit epic_init_one (struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - static int card_idx = -1; - long ioaddr; - int chip_idx = (int) ent->driver_data; - int irq; - struct net_device *dev; - struct epic_private *ep; - int i, ret, option = 0, duplex = 0; - void *ring_space; - dma_addr_t ring_dma; - -/* when built into the kernel, we only print version if device is found */ -#ifndef MODULE - static int printed_version; - if (!printed_version++) - printk(KERN_INFO "%s%s", version, version2); -#endif - - card_idx++; - - ret = pci_enable_device(pdev); - if (ret) - goto out; - irq = pdev->irq; - - if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) { - dev_err(&pdev->dev, "no PCI region space\n"); - ret = -ENODEV; - goto err_out_disable; - } - - pci_set_master(pdev); - - ret = pci_request_regions(pdev, DRV_NAME); - if (ret < 0) - goto err_out_disable; - - ret = -ENOMEM; - - dev = alloc_etherdev(sizeof (*ep)); - if (!dev) { - dev_err(&pdev->dev, "no memory for eth device\n"); - goto err_out_free_res; - } - SET_NETDEV_DEV(dev, &pdev->dev); - -#ifdef USE_IO_OPS - ioaddr = pci_resource_start (pdev, 0); -#else - ioaddr = pci_resource_start (pdev, 1); - ioaddr = (long) pci_ioremap_bar(pdev, 1); - if (!ioaddr) { - dev_err(&pdev->dev, "ioremap failed\n"); - goto err_out_free_netdev; - } -#endif - - pci_set_drvdata(pdev, dev); - ep = netdev_priv(dev); - ep->mii.dev = dev; - ep->mii.mdio_read = mdio_read; - ep->mii.mdio_write = mdio_write; - ep->mii.phy_id_mask = 0x1f; - ep->mii.reg_num_mask = 0x1f; - - ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); - if (!ring_space) - goto err_out_iounmap; - ep->tx_ring = ring_space; - ep->tx_ring_dma = ring_dma; - - ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); - if (!ring_space) - goto err_out_unmap_tx; - ep->rx_ring = ring_space; - ep->rx_ring_dma = ring_dma; - - if (dev->mem_start) { - option = dev->mem_start; - duplex = (dev->mem_start & 16) ? 1 : 0; - } else if (card_idx >= 0 && card_idx < MAX_UNITS) { - if (options[card_idx] >= 0) - option = options[card_idx]; - if (full_duplex[card_idx] >= 0) - duplex = full_duplex[card_idx]; - } - - dev->base_addr = ioaddr; - dev->irq = irq; - - spin_lock_init(&ep->lock); - spin_lock_init(&ep->napi_lock); - ep->reschedule_in_poll = 0; - - /* Bring the chip out of low-power mode. */ - outl(0x4200, ioaddr + GENCTL); - /* Magic?! If we don't set this bit the MII interface won't work. */ - /* This magic is documented in SMSC app note 7.15 */ - for (i = 16; i > 0; i--) - outl(0x0008, ioaddr + TEST1); - - /* Turn on the MII transceiver. */ - outl(0x12, ioaddr + MIICfg); - if (chip_idx == 1) - outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); - outl(0x0200, ioaddr + GENCTL); - - /* Note: the '175 does not have a serial EEPROM. */ - for (i = 0; i < 3; i++) - ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(inw(ioaddr + LAN0 + i*4)); - - if (debug > 2) { - dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n"); - for (i = 0; i < 64; i++) - printk(" %4.4x%s", read_eeprom(ioaddr, i), - i % 16 == 15 ? "\n" : ""); - } - - ep->pci_dev = pdev; - ep->chip_id = chip_idx; - ep->chip_flags = pci_id_tbl[chip_idx].drv_flags; - ep->irq_mask = - (ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170) - | CntFull | TxUnderrun | EpicNapiEvent; - - /* Find the connected MII xcvrs. - Doing this in open() would allow detecting external xcvrs later, but - takes much time and no cards have external MII. */ - { - int phy, phy_idx = 0; - for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) { - int mii_status = mdio_read(dev, phy, MII_BMSR); - if (mii_status != 0xffff && mii_status != 0x0000) { - ep->phys[phy_idx++] = phy; - dev_info(&pdev->dev, - "MII transceiver #%d control " - "%4.4x status %4.4x.\n", - phy, mdio_read(dev, phy, 0), mii_status); - } - } - ep->mii_phy_cnt = phy_idx; - if (phy_idx != 0) { - phy = ep->phys[0]; - ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE); - dev_info(&pdev->dev, - "Autonegotiation advertising %4.4x link " - "partner %4.4x.\n", - ep->mii.advertising, mdio_read(dev, phy, 5)); - } else if ( ! (ep->chip_flags & NO_MII)) { - dev_warn(&pdev->dev, - "***WARNING***: No MII transceiver found!\n"); - /* Use the known PHY address of the EPII. */ - ep->phys[0] = 3; - } - ep->mii.phy_id = ep->phys[0]; - } - - /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */ - if (ep->chip_flags & MII_PWRDWN) - outl(inl(ioaddr + NVCTL) & ~0x483C, ioaddr + NVCTL); - outl(0x0008, ioaddr + GENCTL); - - /* The lower four bits are the media type. */ - if (duplex) { - ep->mii.force_media = ep->mii.full_duplex = 1; - dev_info(&pdev->dev, "Forced full duplex requested.\n"); - } - dev->if_port = ep->default_port = option; - - /* The Epic-specific entries in the device structure. */ - dev->netdev_ops = &epic_netdev_ops; - dev->ethtool_ops = &netdev_ethtool_ops; - dev->watchdog_timeo = TX_TIMEOUT; - netif_napi_add(dev, &ep->napi, epic_poll, 64); - - ret = register_netdev(dev); - if (ret < 0) - goto err_out_unmap_rx; - - printk(KERN_INFO "%s: %s at %#lx, IRQ %d, %pM\n", - dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq, - dev->dev_addr); - -out: - return ret; - -err_out_unmap_rx: - pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma); -err_out_unmap_tx: - pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma); -err_out_iounmap: -#ifndef USE_IO_OPS - iounmap(ioaddr); -err_out_free_netdev: -#endif - free_netdev(dev); -err_out_free_res: - pci_release_regions(pdev); -err_out_disable: - pci_disable_device(pdev); - goto out; -} - -/* Serial EEPROM section. */ - -/* EEPROM_Ctrl bits. */ -#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */ -#define EE_CS 0x02 /* EEPROM chip select. */ -#define EE_DATA_WRITE 0x08 /* EEPROM chip data in. */ -#define EE_WRITE_0 0x01 -#define EE_WRITE_1 0x09 -#define EE_DATA_READ 0x10 /* EEPROM chip data out. */ -#define EE_ENB (0x0001 | EE_CS) - -/* Delay between EEPROM clock transitions. - This serves to flush the operation to the PCI bus. - */ - -#define eeprom_delay() inl(ee_addr) - -/* The EEPROM commands include the alway-set leading bit. */ -#define EE_WRITE_CMD (5 << 6) -#define EE_READ64_CMD (6 << 6) -#define EE_READ256_CMD (6 << 8) -#define EE_ERASE_CMD (7 << 6) - -static void epic_disable_int(struct net_device *dev, struct epic_private *ep) -{ - long ioaddr = dev->base_addr; - - outl(0x00000000, ioaddr + INTMASK); -} - -static inline void __epic_pci_commit(long ioaddr) -{ -#ifndef USE_IO_OPS - inl(ioaddr + INTMASK); -#endif -} - -static inline void epic_napi_irq_off(struct net_device *dev, - struct epic_private *ep) -{ - long ioaddr = dev->base_addr; - - outl(ep->irq_mask & ~EpicNapiEvent, ioaddr + INTMASK); - __epic_pci_commit(ioaddr); -} - -static inline void epic_napi_irq_on(struct net_device *dev, - struct epic_private *ep) -{ - long ioaddr = dev->base_addr; - - /* No need to commit possible posted write */ - outl(ep->irq_mask | EpicNapiEvent, ioaddr + INTMASK); -} - -static int __devinit read_eeprom(long ioaddr, int location) -{ - int i; - int retval = 0; - long ee_addr = ioaddr + EECTL; - int read_cmd = location | - (inl(ee_addr) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD); - - outl(EE_ENB & ~EE_CS, ee_addr); - outl(EE_ENB, ee_addr); - - /* Shift the read command bits out. */ - for (i = 12; i >= 0; i--) { - short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0; - outl(EE_ENB | dataval, ee_addr); - eeprom_delay(); - outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr); - eeprom_delay(); - } - outl(EE_ENB, ee_addr); - - for (i = 16; i > 0; i--) { - outl(EE_ENB | EE_SHIFT_CLK, ee_addr); - eeprom_delay(); - retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0); - outl(EE_ENB, ee_addr); - eeprom_delay(); - } - - /* Terminate the EEPROM access. */ - outl(EE_ENB & ~EE_CS, ee_addr); - return retval; -} - -#define MII_READOP 1 -#define MII_WRITEOP 2 -static int mdio_read(struct net_device *dev, int phy_id, int location) -{ - long ioaddr = dev->base_addr; - int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP; - int i; - - outl(read_cmd, ioaddr + MIICtrl); - /* Typical operation takes 25 loops. */ - for (i = 400; i > 0; i--) { - barrier(); - if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0) { - /* Work around read failure bug. */ - if (phy_id == 1 && location < 6 && - inw(ioaddr + MIIData) == 0xffff) { - outl(read_cmd, ioaddr + MIICtrl); - continue; - } - return inw(ioaddr + MIIData); - } - } - return 0xffff; -} - -static void mdio_write(struct net_device *dev, int phy_id, int loc, int value) -{ - long ioaddr = dev->base_addr; - int i; - - outw(value, ioaddr + MIIData); - outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl); - for (i = 10000; i > 0; i--) { - barrier(); - if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0) - break; - } -} - - -static int epic_open(struct net_device *dev) -{ - struct epic_private *ep = netdev_priv(dev); - long ioaddr = dev->base_addr; - int i; - int retval; - - /* Soft reset the chip. */ - outl(0x4001, ioaddr + GENCTL); - - napi_enable(&ep->napi); - if ((retval = request_irq(dev->irq, epic_interrupt, IRQF_SHARED, dev->name, dev))) { - napi_disable(&ep->napi); - return retval; - } - - epic_init_ring(dev); - - outl(0x4000, ioaddr + GENCTL); - /* This magic is documented in SMSC app note 7.15 */ - for (i = 16; i > 0; i--) - outl(0x0008, ioaddr + TEST1); - - /* Pull the chip out of low-power mode, enable interrupts, and set for - PCI read multiple. The MIIcfg setting and strange write order are - required by the details of which bits are reset and the transceiver - wiring on the Ositech CardBus card. - */ -#if 0 - outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg); -#endif - if (ep->chip_flags & MII_PWRDWN) - outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); - - /* Tell the chip to byteswap descriptors on big-endian hosts */ -#ifdef __BIG_ENDIAN - outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); - inl(ioaddr + GENCTL); - outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); -#else - outl(0x4412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); - inl(ioaddr + GENCTL); - outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); -#endif - - udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */ - - for (i = 0; i < 3; i++) - outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4); - - ep->tx_threshold = TX_FIFO_THRESH; - outl(ep->tx_threshold, ioaddr + TxThresh); - - if (media2miictl[dev->if_port & 15]) { - if (ep->mii_phy_cnt) - mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]); - if (dev->if_port == 1) { - if (debug > 1) - printk(KERN_INFO "%s: Using the 10base2 transceiver, MII " - "status %4.4x.\n", - dev->name, mdio_read(dev, ep->phys[0], MII_BMSR)); - } - } else { - int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA); - if (mii_lpa != 0xffff) { - if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL) - ep->mii.full_duplex = 1; - else if (! (mii_lpa & LPA_LPACK)) - mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART); - if (debug > 1) - printk(KERN_INFO "%s: Setting %s-duplex based on MII xcvr %d" - " register read of %4.4x.\n", dev->name, - ep->mii.full_duplex ? "full" : "half", - ep->phys[0], mii_lpa); - } - } - - outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl); - outl(ep->rx_ring_dma, ioaddr + PRxCDAR); - outl(ep->tx_ring_dma, ioaddr + PTxCDAR); - - /* Start the chip's Rx process. */ - set_rx_mode(dev); - outl(StartRx | RxQueued, ioaddr + COMMAND); - - netif_start_queue(dev); - - /* Enable interrupts by setting the interrupt mask. */ - outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170) - | CntFull | TxUnderrun - | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK); - - if (debug > 1) - printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x " - "%s-duplex.\n", - dev->name, ioaddr, dev->irq, (int)inl(ioaddr + GENCTL), - ep->mii.full_duplex ? "full" : "half"); - - /* Set the timer to switch to check for link beat and perhaps switch - to an alternate media type. */ - init_timer(&ep->timer); - ep->timer.expires = jiffies + 3*HZ; - ep->timer.data = (unsigned long)dev; - ep->timer.function = epic_timer; /* timer handler */ - add_timer(&ep->timer); - - return 0; -} - -/* Reset the chip to recover from a PCI transaction error. - This may occur at interrupt time. */ -static void epic_pause(struct net_device *dev) -{ - long ioaddr = dev->base_addr; - - netif_stop_queue (dev); - - /* Disable interrupts by clearing the interrupt mask. */ - outl(0x00000000, ioaddr + INTMASK); - /* Stop the chip's Tx and Rx DMA processes. */ - outw(StopRx | StopTxDMA | StopRxDMA, ioaddr + COMMAND); - - /* Update the error counts. */ - if (inw(ioaddr + COMMAND) != 0xffff) { - dev->stats.rx_missed_errors += inb(ioaddr + MPCNT); - dev->stats.rx_frame_errors += inb(ioaddr + ALICNT); - dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT); - } - - /* Remove the packets on the Rx queue. */ - epic_rx(dev, RX_RING_SIZE); -} - -static void epic_restart(struct net_device *dev) -{ - long ioaddr = dev->base_addr; - struct epic_private *ep = netdev_priv(dev); - int i; - - /* Soft reset the chip. */ - outl(0x4001, ioaddr + GENCTL); - - printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n", - dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx); - udelay(1); - - /* This magic is documented in SMSC app note 7.15 */ - for (i = 16; i > 0; i--) - outl(0x0008, ioaddr + TEST1); - -#ifdef __BIG_ENDIAN - outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); -#else - outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); -#endif - outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg); - if (ep->chip_flags & MII_PWRDWN) - outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); - - for (i = 0; i < 3; i++) - outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4); - - ep->tx_threshold = TX_FIFO_THRESH; - outl(ep->tx_threshold, ioaddr + TxThresh); - outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl); - outl(ep->rx_ring_dma + (ep->cur_rx%RX_RING_SIZE)* - sizeof(struct epic_rx_desc), ioaddr + PRxCDAR); - outl(ep->tx_ring_dma + (ep->dirty_tx%TX_RING_SIZE)* - sizeof(struct epic_tx_desc), ioaddr + PTxCDAR); - - /* Start the chip's Rx process. */ - set_rx_mode(dev); - outl(StartRx | RxQueued, ioaddr + COMMAND); - - /* Enable interrupts by setting the interrupt mask. */ - outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170) - | CntFull | TxUnderrun - | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK); - - printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x" - " interrupt %4.4x.\n", - dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL), - (int)inl(ioaddr + INTSTAT)); -} - -static void check_media(struct net_device *dev) -{ - struct epic_private *ep = netdev_priv(dev); - long ioaddr = dev->base_addr; - int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0; - int negotiated = mii_lpa & ep->mii.advertising; - int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040; - - if (ep->mii.force_media) - return; - if (mii_lpa == 0xffff) /* Bogus read */ - return; - if (ep->mii.full_duplex != duplex) { - ep->mii.full_duplex = duplex; - printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link" - " partner capability of %4.4x.\n", dev->name, - ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa); - outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl); - } -} - -static void epic_timer(unsigned long data) -{ - struct net_device *dev = (struct net_device *)data; - struct epic_private *ep = netdev_priv(dev); - long ioaddr = dev->base_addr; - int next_tick = 5*HZ; - - if (debug > 3) { - printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n", - dev->name, (int)inl(ioaddr + TxSTAT)); - printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x " - "IntStatus %4.4x RxStatus %4.4x.\n", - dev->name, (int)inl(ioaddr + INTMASK), - (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT)); - } - - check_media(dev); - - ep->timer.expires = jiffies + next_tick; - add_timer(&ep->timer); -} - -static void epic_tx_timeout(struct net_device *dev) -{ - struct epic_private *ep = netdev_priv(dev); - long ioaddr = dev->base_addr; - - if (debug > 0) { - printk(KERN_WARNING "%s: Transmit timeout using MII device, " - "Tx status %4.4x.\n", - dev->name, (int)inw(ioaddr + TxSTAT)); - if (debug > 1) { - printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n", - dev->name, ep->dirty_tx, ep->cur_tx); - } - } - if (inw(ioaddr + TxSTAT) & 0x10) { /* Tx FIFO underflow. */ - dev->stats.tx_fifo_errors++; - outl(RestartTx, ioaddr + COMMAND); - } else { - epic_restart(dev); - outl(TxQueued, dev->base_addr + COMMAND); - } - - dev->trans_start = jiffies; /* prevent tx timeout */ - dev->stats.tx_errors++; - if (!ep->tx_full) - netif_wake_queue(dev); -} - -/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ -static void epic_init_ring(struct net_device *dev) -{ - struct epic_private *ep = netdev_priv(dev); - int i; - - ep->tx_full = 0; - ep->dirty_tx = ep->cur_tx = 0; - ep->cur_rx = ep->dirty_rx = 0; - ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); - - /* Initialize all Rx descriptors. */ - for (i = 0; i < RX_RING_SIZE; i++) { - ep->rx_ring[i].rxstatus = 0; - ep->rx_ring[i].buflength = ep->rx_buf_sz; - ep->rx_ring[i].next = ep->rx_ring_dma + - (i+1)*sizeof(struct epic_rx_desc); - ep->rx_skbuff[i] = NULL; - } - /* Mark the last entry as wrapping the ring. */ - ep->rx_ring[i-1].next = ep->rx_ring_dma; - - /* Fill in the Rx buffers. Handle allocation failure gracefully. */ - for (i = 0; i < RX_RING_SIZE; i++) { - struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz + 2); - ep->rx_skbuff[i] = skb; - if (skb == NULL) - break; - skb_reserve(skb, 2); /* 16 byte align the IP header. */ - ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev, - skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); - ep->rx_ring[i].rxstatus = DescOwn; - } - ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE); - - /* The Tx buffer descriptor is filled in as needed, but we - do need to clear the ownership bit. */ - for (i = 0; i < TX_RING_SIZE; i++) { - ep->tx_skbuff[i] = NULL; - ep->tx_ring[i].txstatus = 0x0000; - ep->tx_ring[i].next = ep->tx_ring_dma + - (i+1)*sizeof(struct epic_tx_desc); - } - ep->tx_ring[i-1].next = ep->tx_ring_dma; -} - -static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct epic_private *ep = netdev_priv(dev); - int entry, free_count; - u32 ctrl_word; - unsigned long flags; - - if (skb_padto(skb, ETH_ZLEN)) - return NETDEV_TX_OK; - - /* Caution: the write order is important here, set the field with the - "ownership" bit last. */ - - /* Calculate the next Tx descriptor entry. */ - spin_lock_irqsave(&ep->lock, flags); - free_count = ep->cur_tx - ep->dirty_tx; - entry = ep->cur_tx % TX_RING_SIZE; - - ep->tx_skbuff[entry] = skb; - ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data, - skb->len, PCI_DMA_TODEVICE); - if (free_count < TX_QUEUE_LEN/2) {/* Typical path */ - ctrl_word = 0x100000; /* No interrupt */ - } else if (free_count == TX_QUEUE_LEN/2) { - ctrl_word = 0x140000; /* Tx-done intr. */ - } else if (free_count < TX_QUEUE_LEN - 1) { - ctrl_word = 0x100000; /* No Tx-done intr. */ - } else { - /* Leave room for an additional entry. */ - ctrl_word = 0x140000; /* Tx-done intr. */ - ep->tx_full = 1; - } - ep->tx_ring[entry].buflength = ctrl_word | skb->len; - ep->tx_ring[entry].txstatus = - ((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16) - | DescOwn; - - ep->cur_tx++; - if (ep->tx_full) - netif_stop_queue(dev); - - spin_unlock_irqrestore(&ep->lock, flags); - /* Trigger an immediate transmit demand. */ - outl(TxQueued, dev->base_addr + COMMAND); - - if (debug > 4) - printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, " - "flag %2.2x Tx status %8.8x.\n", - dev->name, (int)skb->len, entry, ctrl_word, - (int)inl(dev->base_addr + TxSTAT)); - - return NETDEV_TX_OK; -} - -static void epic_tx_error(struct net_device *dev, struct epic_private *ep, - int status) -{ - struct net_device_stats *stats = &dev->stats; - -#ifndef final_version - /* There was an major error, log it. */ - if (debug > 1) - printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n", - dev->name, status); -#endif - stats->tx_errors++; - if (status & 0x1050) - stats->tx_aborted_errors++; - if (status & 0x0008) - stats->tx_carrier_errors++; - if (status & 0x0040) - stats->tx_window_errors++; - if (status & 0x0010) - stats->tx_fifo_errors++; -} - -static void epic_tx(struct net_device *dev, struct epic_private *ep) -{ - unsigned int dirty_tx, cur_tx; - - /* - * Note: if this lock becomes a problem we can narrow the locked - * region at the cost of occasionally grabbing the lock more times. - */ - cur_tx = ep->cur_tx; - for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) { - struct sk_buff *skb; - int entry = dirty_tx % TX_RING_SIZE; - int txstatus = ep->tx_ring[entry].txstatus; - - if (txstatus & DescOwn) - break; /* It still hasn't been Txed */ - - if (likely(txstatus & 0x0001)) { - dev->stats.collisions += (txstatus >> 8) & 15; - dev->stats.tx_packets++; - dev->stats.tx_bytes += ep->tx_skbuff[entry]->len; - } else - epic_tx_error(dev, ep, txstatus); - - /* Free the original skb. */ - skb = ep->tx_skbuff[entry]; - pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr, - skb->len, PCI_DMA_TODEVICE); - dev_kfree_skb_irq(skb); - ep->tx_skbuff[entry] = NULL; - } - -#ifndef final_version - if (cur_tx - dirty_tx > TX_RING_SIZE) { - printk(KERN_WARNING - "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n", - dev->name, dirty_tx, cur_tx, ep->tx_full); - dirty_tx += TX_RING_SIZE; - } -#endif - ep->dirty_tx = dirty_tx; - if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) { - /* The ring is no longer full, allow new TX entries. */ - ep->tx_full = 0; - netif_wake_queue(dev); - } -} - -/* The interrupt handler does all of the Rx thread work and cleans up - after the Tx thread. */ -static irqreturn_t epic_interrupt(int irq, void *dev_instance) -{ - struct net_device *dev = dev_instance; - struct epic_private *ep = netdev_priv(dev); - long ioaddr = dev->base_addr; - unsigned int handled = 0; - int status; - - status = inl(ioaddr + INTSTAT); - /* Acknowledge all of the current interrupt sources ASAP. */ - outl(status & EpicNormalEvent, ioaddr + INTSTAT); - - if (debug > 4) { - printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new " - "intstat=%#8.8x.\n", dev->name, status, - (int)inl(ioaddr + INTSTAT)); - } - - if ((status & IntrSummary) == 0) - goto out; - - handled = 1; - - if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) { - spin_lock(&ep->napi_lock); - if (napi_schedule_prep(&ep->napi)) { - epic_napi_irq_off(dev, ep); - __napi_schedule(&ep->napi); - } else - ep->reschedule_in_poll++; - spin_unlock(&ep->napi_lock); - } - status &= ~EpicNapiEvent; - - /* Check uncommon events all at once. */ - if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) { - if (status == EpicRemoved) - goto out; - - /* Always update the error counts to avoid overhead later. */ - dev->stats.rx_missed_errors += inb(ioaddr + MPCNT); - dev->stats.rx_frame_errors += inb(ioaddr + ALICNT); - dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT); - - if (status & TxUnderrun) { /* Tx FIFO underflow. */ - dev->stats.tx_fifo_errors++; - outl(ep->tx_threshold += 128, ioaddr + TxThresh); - /* Restart the transmit process. */ - outl(RestartTx, ioaddr + COMMAND); - } - if (status & PCIBusErr170) { - printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n", - dev->name, status); - epic_pause(dev); - epic_restart(dev); - } - /* Clear all error sources. */ - outl(status & 0x7f18, ioaddr + INTSTAT); - } - -out: - if (debug > 3) { - printk(KERN_DEBUG "%s: exit interrupt, intr_status=%#4.4x.\n", - dev->name, status); - } - - return IRQ_RETVAL(handled); -} - -static int epic_rx(struct net_device *dev, int budget) -{ - struct epic_private *ep = netdev_priv(dev); - int entry = ep->cur_rx % RX_RING_SIZE; - int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx; - int work_done = 0; - - if (debug > 4) - printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry, - ep->rx_ring[entry].rxstatus); - - if (rx_work_limit > budget) - rx_work_limit = budget; - - /* If we own the next entry, it's a new packet. Send it up. */ - while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) { - int status = ep->rx_ring[entry].rxstatus; - - if (debug > 4) - printk(KERN_DEBUG " epic_rx() status was %8.8x.\n", status); - if (--rx_work_limit < 0) - break; - if (status & 0x2006) { - if (debug > 2) - printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n", - dev->name, status); - if (status & 0x2000) { - printk(KERN_WARNING "%s: Oversized Ethernet frame spanned " - "multiple buffers, status %4.4x!\n", dev->name, status); - dev->stats.rx_length_errors++; - } else if (status & 0x0006) - /* Rx Frame errors are counted in hardware. */ - dev->stats.rx_errors++; - } else { - /* Malloc up new buffer, compatible with net-2e. */ - /* Omit the four octet CRC from the length. */ - short pkt_len = (status >> 16) - 4; - struct sk_buff *skb; - - if (pkt_len > PKT_BUF_SZ - 4) { - printk(KERN_ERR "%s: Oversized Ethernet frame, status %x " - "%d bytes.\n", - dev->name, status, pkt_len); - pkt_len = 1514; - } - /* Check if the packet is long enough to accept without copying - to a minimally-sized skbuff. */ - if (pkt_len < rx_copybreak && - (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { - skb_reserve(skb, 2); /* 16 byte align the IP header */ - pci_dma_sync_single_for_cpu(ep->pci_dev, - ep->rx_ring[entry].bufaddr, - ep->rx_buf_sz, - PCI_DMA_FROMDEVICE); - skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len); - skb_put(skb, pkt_len); - pci_dma_sync_single_for_device(ep->pci_dev, - ep->rx_ring[entry].bufaddr, - ep->rx_buf_sz, - PCI_DMA_FROMDEVICE); - } else { - pci_unmap_single(ep->pci_dev, - ep->rx_ring[entry].bufaddr, - ep->rx_buf_sz, PCI_DMA_FROMDEVICE); - skb_put(skb = ep->rx_skbuff[entry], pkt_len); - ep->rx_skbuff[entry] = NULL; - } - skb->protocol = eth_type_trans(skb, dev); - netif_receive_skb(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; - } - work_done++; - entry = (++ep->cur_rx) % RX_RING_SIZE; - } - - /* Refill the Rx ring buffers. */ - for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) { - entry = ep->dirty_rx % RX_RING_SIZE; - if (ep->rx_skbuff[entry] == NULL) { - struct sk_buff *skb; - skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz + 2); - if (skb == NULL) - break; - skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ - ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, - skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); - work_done++; - } - /* AV: shouldn't we add a barrier here? */ - ep->rx_ring[entry].rxstatus = DescOwn; - } - return work_done; -} - -static void epic_rx_err(struct net_device *dev, struct epic_private *ep) -{ - long ioaddr = dev->base_addr; - int status; - - status = inl(ioaddr + INTSTAT); - - if (status == EpicRemoved) - return; - if (status & RxOverflow) /* Missed a Rx frame. */ - dev->stats.rx_errors++; - if (status & (RxOverflow | RxFull)) - outw(RxQueued, ioaddr + COMMAND); -} - -static int epic_poll(struct napi_struct *napi, int budget) -{ - struct epic_private *ep = container_of(napi, struct epic_private, napi); - struct net_device *dev = ep->mii.dev; - int work_done = 0; - long ioaddr = dev->base_addr; - -rx_action: - - epic_tx(dev, ep); - - work_done += epic_rx(dev, budget); - - epic_rx_err(dev, ep); - - if (work_done < budget) { - unsigned long flags; - int more; - - /* A bit baroque but it avoids a (space hungry) spin_unlock */ - - spin_lock_irqsave(&ep->napi_lock, flags); - - more = ep->reschedule_in_poll; - if (!more) { - __napi_complete(napi); - outl(EpicNapiEvent, ioaddr + INTSTAT); - epic_napi_irq_on(dev, ep); - } else - ep->reschedule_in_poll--; - - spin_unlock_irqrestore(&ep->napi_lock, flags); - - if (more) - goto rx_action; - } - - return work_done; -} - -static int epic_close(struct net_device *dev) -{ - long ioaddr = dev->base_addr; - struct epic_private *ep = netdev_priv(dev); - struct sk_buff *skb; - int i; - - netif_stop_queue(dev); - napi_disable(&ep->napi); - - if (debug > 1) - printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n", - dev->name, (int)inl(ioaddr + INTSTAT)); - - del_timer_sync(&ep->timer); - - epic_disable_int(dev, ep); - - free_irq(dev->irq, dev); - - epic_pause(dev); - - /* Free all the skbuffs in the Rx queue. */ - for (i = 0; i < RX_RING_SIZE; i++) { - skb = ep->rx_skbuff[i]; - ep->rx_skbuff[i] = NULL; - ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */ - ep->rx_ring[i].buflength = 0; - if (skb) { - pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr, - ep->rx_buf_sz, PCI_DMA_FROMDEVICE); - dev_kfree_skb(skb); - } - ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */ - } - for (i = 0; i < TX_RING_SIZE; i++) { - skb = ep->tx_skbuff[i]; - ep->tx_skbuff[i] = NULL; - if (!skb) - continue; - pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr, - skb->len, PCI_DMA_TODEVICE); - dev_kfree_skb(skb); - } - - /* Green! Leave the chip in low-power mode. */ - outl(0x0008, ioaddr + GENCTL); - - return 0; -} - -static struct net_device_stats *epic_get_stats(struct net_device *dev) -{ - long ioaddr = dev->base_addr; - - if (netif_running(dev)) { - /* Update the error counts. */ - dev->stats.rx_missed_errors += inb(ioaddr + MPCNT); - dev->stats.rx_frame_errors += inb(ioaddr + ALICNT); - dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT); - } - - return &dev->stats; -} - -/* Set or clear the multicast filter for this adaptor. - Note that we only use exclusion around actually queueing the - new frame, not around filling ep->setup_frame. This is non-deterministic - when re-entered but still correct. */ - -static void set_rx_mode(struct net_device *dev) -{ - long ioaddr = dev->base_addr; - struct epic_private *ep = netdev_priv(dev); - unsigned char mc_filter[8]; /* Multicast hash filter */ - int i; - - if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ - outl(0x002C, ioaddr + RxCtrl); - /* Unconditionally log net taps. */ - memset(mc_filter, 0xff, sizeof(mc_filter)); - } else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) { - /* There is apparently a chip bug, so the multicast filter - is never enabled. */ - /* Too many to filter perfectly -- accept all multicasts. */ - memset(mc_filter, 0xff, sizeof(mc_filter)); - outl(0x000C, ioaddr + RxCtrl); - } else if (netdev_mc_empty(dev)) { - outl(0x0004, ioaddr + RxCtrl); - return; - } else { /* Never executed, for now. */ - struct netdev_hw_addr *ha; - - memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(ha, dev) { - unsigned int bit_nr = - ether_crc_le(ETH_ALEN, ha->addr) & 0x3f; - mc_filter[bit_nr >> 3] |= (1 << bit_nr); - } - } - /* ToDo: perhaps we need to stop the Tx and Rx process here? */ - if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) { - for (i = 0; i < 4; i++) - outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4); - memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter)); - } -} - -static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) -{ - struct epic_private *np = netdev_priv(dev); - - strcpy (info->driver, DRV_NAME); - strcpy (info->version, DRV_VERSION); - strcpy (info->bus_info, pci_name(np->pci_dev)); -} - -static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct epic_private *np = netdev_priv(dev); - int rc; - - spin_lock_irq(&np->lock); - rc = mii_ethtool_gset(&np->mii, cmd); - spin_unlock_irq(&np->lock); - - return rc; -} - -static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct epic_private *np = netdev_priv(dev); - int rc; - - spin_lock_irq(&np->lock); - rc = mii_ethtool_sset(&np->mii, cmd); - spin_unlock_irq(&np->lock); - - return rc; -} - -static int netdev_nway_reset(struct net_device *dev) -{ - struct epic_private *np = netdev_priv(dev); - return mii_nway_restart(&np->mii); -} - -static u32 netdev_get_link(struct net_device *dev) -{ - struct epic_private *np = netdev_priv(dev); - return mii_link_ok(&np->mii); -} - -static u32 netdev_get_msglevel(struct net_device *dev) -{ - return debug; -} - -static void netdev_set_msglevel(struct net_device *dev, u32 value) -{ - debug = value; -} - -static int ethtool_begin(struct net_device *dev) -{ - unsigned long ioaddr = dev->base_addr; - /* power-up, if interface is down */ - if (! netif_running(dev)) { - outl(0x0200, ioaddr + GENCTL); - outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); - } - return 0; -} - -static void ethtool_complete(struct net_device *dev) -{ - unsigned long ioaddr = dev->base_addr; - /* power-down, if interface is down */ - if (! netif_running(dev)) { - outl(0x0008, ioaddr + GENCTL); - outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL); - } -} - -static const struct ethtool_ops netdev_ethtool_ops = { - .get_drvinfo = netdev_get_drvinfo, - .get_settings = netdev_get_settings, - .set_settings = netdev_set_settings, - .nway_reset = netdev_nway_reset, - .get_link = netdev_get_link, - .get_msglevel = netdev_get_msglevel, - .set_msglevel = netdev_set_msglevel, - .begin = ethtool_begin, - .complete = ethtool_complete -}; - -static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -{ - struct epic_private *np = netdev_priv(dev); - long ioaddr = dev->base_addr; - struct mii_ioctl_data *data = if_mii(rq); - int rc; - - /* power-up, if interface is down */ - if (! netif_running(dev)) { - outl(0x0200, ioaddr + GENCTL); - outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); - } - - /* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */ - spin_lock_irq(&np->lock); - rc = generic_mii_ioctl(&np->mii, data, cmd, NULL); - spin_unlock_irq(&np->lock); - - /* power-down, if interface is down */ - if (! netif_running(dev)) { - outl(0x0008, ioaddr + GENCTL); - outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL); - } - return rc; -} - - -static void __devexit epic_remove_one (struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct epic_private *ep = netdev_priv(dev); - - pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma); - pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma); - unregister_netdev(dev); -#ifndef USE_IO_OPS - iounmap((void*) dev->base_addr); -#endif - pci_release_regions(pdev); - free_netdev(dev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - /* pci_power_off(pdev, -1); */ -} - - -#ifdef CONFIG_PM - -static int epic_suspend (struct pci_dev *pdev, pm_message_t state) -{ - struct net_device *dev = pci_get_drvdata(pdev); - long ioaddr = dev->base_addr; - - if (!netif_running(dev)) - return 0; - epic_pause(dev); - /* Put the chip into low-power mode. */ - outl(0x0008, ioaddr + GENCTL); - /* pci_power_off(pdev, -1); */ - return 0; -} - - -static int epic_resume (struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - - if (!netif_running(dev)) - return 0; - epic_restart(dev); - /* pci_power_on(pdev); */ - return 0; -} - -#endif /* CONFIG_PM */ - - -static struct pci_driver epic_driver = { - .name = DRV_NAME, - .id_table = epic_pci_tbl, - .probe = epic_init_one, - .remove = __devexit_p(epic_remove_one), -#ifdef CONFIG_PM - .suspend = epic_suspend, - .resume = epic_resume, -#endif /* CONFIG_PM */ -}; - - -static int __init epic_init (void) -{ -/* when a module, this is printed whether or not devices are found in probe */ -#ifdef MODULE - printk (KERN_INFO "%s%s", - version, version2); -#endif - - return pci_register_driver(&epic_driver); -} - - -static void __exit epic_cleanup (void) -{ - pci_unregister_driver (&epic_driver); -} - - -module_init(epic_init); -module_exit(epic_cleanup); diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig index 5cd53f1..f1b9bdd 100644 --- a/drivers/net/ethernet/8390/Kconfig +++ b/drivers/net/ethernet/8390/Kconfig @@ -264,22 +264,9 @@ config STNIC If unsure, say N. -config NET_VENDOR_SMC - bool "Western Digital/SMC cards" - depends on (ISA || MCA || EISA || MAC) - ---help--- - If you have a network (Ethernet) card belonging to this class, say Y - and read the Ethernet-HOWTO, available from - . - - Note that the answer to this question doesn't directly affect the - kernel: saying N will just cause the configurator to skip all - the questions about Western Digital cards. If you say Y, you will be - asked for your specific card in the following questions. - config ULTRAMCA tristate "SMC Ultra MCA support" - depends on NET_VENDOR_SMC && MCA + depends on MCA select CRC32 ---help--- If you have a network (Ethernet) card of this type and are running @@ -291,7 +278,7 @@ config ULTRAMCA config ULTRA tristate "SMC Ultra support" - depends on NET_VENDOR_SMC && ISA + depends on ISA select CRC32 ---help--- If you have a network (Ethernet) card of this type, say Y and read @@ -310,7 +297,7 @@ config ULTRA config ULTRA32 tristate "SMC Ultra32 EISA support" - depends on NET_VENDOR_SMC && EISA + depends on EISA select CRC32 ---help--- If you have a network (Ethernet) card of this type, say Y and read @@ -322,7 +309,7 @@ config ULTRA32 config WD80x3 tristate "WD80*3 support" - depends on NET_VENDOR_SMC && ISA + depends on ISA select CRC32 ---help--- If you have a network (Ethernet) card of this type, say Y and read diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index ab591bb..ed5836c 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -18,5 +18,6 @@ source "drivers/net/ethernet/broadcom/Kconfig" source "drivers/net/ethernet/chelsio/Kconfig" source "drivers/net/ethernet/intel/Kconfig" source "drivers/net/ethernet/qlogic/Kconfig" +source "drivers/net/ethernet/smsc/Kconfig" endif # ETHERNET diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index d8cf120..983fd27 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -9,3 +9,4 @@ obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/ +obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/ diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig new file mode 100644 index 0000000..702efe6 --- /dev/null +++ b/drivers/net/ethernet/smsc/Kconfig @@ -0,0 +1,131 @@ +# +# Western Digital/SMC network device configuration +# + +config NET_VENDOR_SMSC + bool "SMC (SMSC)/Western Digital devices" + depends on ARM || ISA || MAC || ARM || MIPS || M32R || SUPERH || \ + BLACKFIN || MN10300 || COLDFIRE || PCI || PCMCIA + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about SMC/Western Digital cards. If you say Y, you will + be asked for your specific card in the following questions. + +if NET_VENDOR_SMSC + +config SMC9194 + tristate "SMC 9194 support" + depends on (ISA || MAC && BROKEN) + select CRC32 + ---help--- + This is support for the SMC9xxx based Ethernet cards. Choose this + option if you have a DELL laptop with the docking station, or + another SMC9192/9194 based chipset. Say Y if you want it compiled + into the kernel, and read the file + and the Ethernet-HOWTO, + available from . + + To compile this driver as a module, choose M here. The module + will be called smc9194. + +config SMC91X + tristate "SMC 91C9x/91C1xxx support" + select CRC32 + select MII + depends on (ARM || M32R || SUPERH || MIPS || BLACKFIN || \ + MN10300 || COLDFIRE) + ---help--- + This is a driver for SMC's 91x series of Ethernet chipsets, + including the SMC91C94 and the SMC91C111. Say Y if you want it + compiled into the kernel, and read the file + and the Ethernet-HOWTO, + available from . + + This driver is also available as a module ( = code which can be + inserted in and removed from the running kernel whenever you want). + The module will be called smc91x. If you want to compile it as a + module, say M here and read . + +config PCMCIA_SMC91C92 + tristate "SMC 91Cxx PCMCIA support" + depends on PCMCIA + select CRC32 + select MII + ---help--- + Say Y here if you intend to attach an SMC 91Cxx compatible PCMCIA + (PC-card) Ethernet or Fast Ethernet card to your computer. + + To compile this driver as a module, choose M here: the module will be + called smc91c92_cs. If unsure, say N. + +config EPIC100 + tristate "SMC EtherPower II" + depends on PCI + select CRC32 + select MII + ---help--- + This driver is for the SMC EtherPower II 9432 PCI Ethernet NIC, + which is based on the SMC83c17x (EPIC/100). + More specific information and updates are available from + . + +config SMC911X + tristate "SMSC LAN911[5678] support" + select CRC32 + select MII + depends on (ARM || SUPERH || MN10300) + ---help--- + This is a driver for SMSC's LAN911x series of Ethernet chipsets + including the new LAN9115, LAN9116, LAN9117, and LAN9118. + Say Y if you want it compiled into the kernel, + and read the Ethernet-HOWTO, available from + . + + This driver is also available as a module. The module will be + called smc911x. If you want to compile it as a module, say M + here and read + +config SMSC911X + tristate "SMSC LAN911x/LAN921x families embedded ethernet support" + depends on (ARM || SUPERH || BLACKFIN || MIPS || MN10300) + select CRC32 + select MII + select PHYLIB + ---help--- + Say Y here if you want support for SMSC LAN911x and LAN921x families + of ethernet controllers. + + To compile this driver as a module, choose M here and read + . The module + will be called smsc911x. + +config SMSC911X_ARCH_HOOKS + def_bool n + depends on SMSC911X + ---help--- + If the arch enables this, it allows the arch to implement various + hooks for more comprehensive interrupt control and also to override + the source of the MAC address. + +config SMSC9420 + tristate "SMSC LAN9420 PCI ethernet adapter support" + depends on PCI + select CRC32 + select PHYLIB + select SMSC_PHY + ---help--- + This is a driver for SMSC's LAN9420 PCI ethernet adapter. + Say Y if you want it compiled into the kernel, + and read the Ethernet-HOWTO, available from + . + + This driver is also available as a module. The module will be + called smsc9420. If you want to compile it as a module, say M + here and read + +endif # NET_VENDOR_SMSC diff --git a/drivers/net/ethernet/smsc/Makefile b/drivers/net/ethernet/smsc/Makefile new file mode 100644 index 0000000..f3438de --- /dev/null +++ b/drivers/net/ethernet/smsc/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for the SMSC network device drivers. +# + +obj-$(CONFIG_SMC9194) += smc9194.o +obj-$(CONFIG_SMC91X) += smc91x.o +obj-$(CONFIG_PCMCIA_SMC91C92) += smc91c92_cs.o +obj-$(CONFIG_EPIC100) += epic100.o +obj-$(CONFIG_SMSC9420) += smsc9420.o +obj-$(CONFIG_SMC911X) += smc911x.o +obj-$(CONFIG_SMSC911X) += smsc911x.o diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c new file mode 100644 index 0000000..814c187 --- /dev/null +++ b/drivers/net/ethernet/smsc/epic100.c @@ -0,0 +1,1609 @@ +/* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */ +/* + Written/copyright 1997-2001 by Donald Becker. + + This software may be used and distributed according to the terms of + the GNU General Public License (GPL), incorporated herein by reference. + Drivers based on or derived from this code fall under the GPL and must + retain the authorship, copyright and license notice. This file is not + a complete program and may only be used when the entire operating + system is licensed under the GPL. + + This driver is for the SMC83c170/175 "EPIC" series, as used on the + SMC EtherPower II 9432 PCI adapter, and several CardBus cards. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + Information and updates available at + http://www.scyld.com/network/epic100.html + [this link no longer provides anything useful -jgarzik] + + --------------------------------------------------------------------- + +*/ + +#define DRV_NAME "epic100" +#define DRV_VERSION "2.1" +#define DRV_RELDATE "Sept 11, 2006" + +/* The user-configurable values. + These may be modified when a driver module is loaded.*/ + +static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ + +/* Used to pass the full-duplex flag, etc. */ +#define MAX_UNITS 8 /* More are supported, limit only on options */ +static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; +static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; + +/* Set the copy breakpoint for the copy-only-tiny-frames scheme. + Setting to > 1518 effectively disables this feature. */ +static int rx_copybreak; + +/* Operational parameters that are set at compile time. */ + +/* Keep the ring sizes a power of two for operational efficiency. + The compiler will convert '%'<2^N> into a bit mask. + Making the Tx ring too large decreases the effectiveness of channel + bonding and packet priority. + There are no ill effects from too-large receive rings. */ +#define TX_RING_SIZE 256 +#define TX_QUEUE_LEN 240 /* Limit ring entries actually used. */ +#define RX_RING_SIZE 256 +#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct epic_tx_desc) +#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct epic_rx_desc) + +/* Operational parameters that usually are not changed. */ +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (2*HZ) + +#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ + +/* Bytes transferred to chip before transmission starts. */ +/* Initial threshold, increased on underflow, rounded down to 4 byte units. */ +#define TX_FIFO_THRESH 256 +#define RX_FIFO_THRESH 1 /* 0-3, 0==32, 64,96, or 3==128 bytes */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* These identify the driver base version and may not be removed. */ +static char version[] __devinitdata = +DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker \n"; +static char version2[] __devinitdata = +" (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n"; + +MODULE_AUTHOR("Donald Becker "); +MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver"); +MODULE_LICENSE("GPL"); + +module_param(debug, int, 0); +module_param(rx_copybreak, int, 0); +module_param_array(options, int, NULL, 0); +module_param_array(full_duplex, int, NULL, 0); +MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)"); +MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex"); +MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames"); +MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)"); + +/* + Theory of Operation + +I. Board Compatibility + +This device driver is designed for the SMC "EPIC/100", the SMC +single-chip Ethernet controllers for PCI. This chip is used on +the SMC EtherPower II boards. + +II. Board-specific settings + +PCI bus devices are configured by the system at boot time, so no jumpers +need to be set on the board. The system BIOS will assign the +PCI INTA signal to a (preferably otherwise unused) system IRQ line. +Note: Kernel versions earlier than 1.3.73 do not support shared PCI +interrupt lines. + +III. Driver operation + +IIIa. Ring buffers + +IVb. References + +http://www.smsc.com/media/Downloads_Public/discontinued/83c171.pdf +http://www.smsc.com/media/Downloads_Public/discontinued/83c175.pdf +http://scyld.com/expert/NWay.html +http://www.national.com/pf/DP/DP83840A.html + +IVc. Errata + +*/ + + +enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 }; + +#define EPIC_TOTAL_SIZE 0x100 +#define USE_IO_OPS 1 + +typedef enum { + SMSC_83C170_0, + SMSC_83C170, + SMSC_83C175, +} chip_t; + + +struct epic_chip_info { + const char *name; + int drv_flags; /* Driver use, intended as capability flags. */ +}; + + +/* indexed by chip_t */ +static const struct epic_chip_info pci_id_tbl[] = { + { "SMSC EPIC/100 83c170", TYPE2_INTR | NO_MII | MII_PWRDWN }, + { "SMSC EPIC/100 83c170", TYPE2_INTR }, + { "SMSC EPIC/C 83c175", TYPE2_INTR | MII_PWRDWN }, +}; + + +static DEFINE_PCI_DEVICE_TABLE(epic_pci_tbl) = { + { 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 }, + { 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 }, + { 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 }, + { 0,} +}; +MODULE_DEVICE_TABLE (pci, epic_pci_tbl); + + +#ifndef USE_IO_OPS +#undef inb +#undef inw +#undef inl +#undef outb +#undef outw +#undef outl +#define inb readb +#define inw readw +#define inl readl +#define outb writeb +#define outw writew +#define outl writel +#endif + +/* Offsets to registers, using the (ugh) SMC names. */ +enum epic_registers { + COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14, + PCIBurstCnt=0x18, + TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28, /* Rx error counters. */ + MIICtrl=0x30, MIIData=0x34, MIICfg=0x38, + LAN0=64, /* MAC address. */ + MC0=80, /* Multicast filter table. */ + RxCtrl=96, TxCtrl=112, TxSTAT=0x74, + PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC, +}; + +/* Interrupt register bits, using my own meaningful names. */ +enum IntrStatus { + TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000, + PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000, + RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100, + TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010, + RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001, +}; +enum CommandBits { + StopRx=1, StartRx=2, TxQueued=4, RxQueued=8, + StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80, +}; + +#define EpicRemoved 0xffffffff /* Chip failed or removed (CardBus) */ + +#define EpicNapiEvent (TxEmpty | TxDone | \ + RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull) +#define EpicNormalEvent (0x0000ffff & ~EpicNapiEvent) + +static const u16 media2miictl[16] = { + 0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0 }; + +/* + * The EPIC100 Rx and Tx buffer descriptors. Note that these + * really ARE host-endian; it's not a misannotation. We tell + * the card to byteswap them internally on big-endian hosts - + * look for #ifdef __BIG_ENDIAN in epic_open(). + */ + +struct epic_tx_desc { + u32 txstatus; + u32 bufaddr; + u32 buflength; + u32 next; +}; + +struct epic_rx_desc { + u32 rxstatus; + u32 bufaddr; + u32 buflength; + u32 next; +}; + +enum desc_status_bits { + DescOwn=0x8000, +}; + +#define PRIV_ALIGN 15 /* Required alignment mask */ +struct epic_private { + struct epic_rx_desc *rx_ring; + struct epic_tx_desc *tx_ring; + /* The saved address of a sent-in-place packet/buffer, for skfree(). */ + struct sk_buff* tx_skbuff[TX_RING_SIZE]; + /* The addresses of receive-in-place skbuffs. */ + struct sk_buff* rx_skbuff[RX_RING_SIZE]; + + dma_addr_t tx_ring_dma; + dma_addr_t rx_ring_dma; + + /* Ring pointers. */ + spinlock_t lock; /* Group with Tx control cache line. */ + spinlock_t napi_lock; + struct napi_struct napi; + unsigned int reschedule_in_poll; + unsigned int cur_tx, dirty_tx; + + unsigned int cur_rx, dirty_rx; + u32 irq_mask; + unsigned int rx_buf_sz; /* Based on MTU+slack. */ + + struct pci_dev *pci_dev; /* PCI bus location. */ + int chip_id, chip_flags; + + struct timer_list timer; /* Media selection timer. */ + int tx_threshold; + unsigned char mc_filter[8]; + signed char phys[4]; /* MII device addresses. */ + u16 advertising; /* NWay media advertisement */ + int mii_phy_cnt; + struct mii_if_info mii; + unsigned int tx_full:1; /* The Tx queue is full. */ + unsigned int default_port:4; /* Last dev->if_port value. */ +}; + +static int epic_open(struct net_device *dev); +static int read_eeprom(long ioaddr, int location); +static int mdio_read(struct net_device *dev, int phy_id, int location); +static void mdio_write(struct net_device *dev, int phy_id, int loc, int val); +static void epic_restart(struct net_device *dev); +static void epic_timer(unsigned long data); +static void epic_tx_timeout(struct net_device *dev); +static void epic_init_ring(struct net_device *dev); +static netdev_tx_t epic_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static int epic_rx(struct net_device *dev, int budget); +static int epic_poll(struct napi_struct *napi, int budget); +static irqreturn_t epic_interrupt(int irq, void *dev_instance); +static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static const struct ethtool_ops netdev_ethtool_ops; +static int epic_close(struct net_device *dev); +static struct net_device_stats *epic_get_stats(struct net_device *dev); +static void set_rx_mode(struct net_device *dev); + +static const struct net_device_ops epic_netdev_ops = { + .ndo_open = epic_open, + .ndo_stop = epic_close, + .ndo_start_xmit = epic_start_xmit, + .ndo_tx_timeout = epic_tx_timeout, + .ndo_get_stats = epic_get_stats, + .ndo_set_multicast_list = set_rx_mode, + .ndo_do_ioctl = netdev_ioctl, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int __devinit epic_init_one (struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + static int card_idx = -1; + long ioaddr; + int chip_idx = (int) ent->driver_data; + int irq; + struct net_device *dev; + struct epic_private *ep; + int i, ret, option = 0, duplex = 0; + void *ring_space; + dma_addr_t ring_dma; + +/* when built into the kernel, we only print version if device is found */ +#ifndef MODULE + static int printed_version; + if (!printed_version++) + printk(KERN_INFO "%s%s", version, version2); +#endif + + card_idx++; + + ret = pci_enable_device(pdev); + if (ret) + goto out; + irq = pdev->irq; + + if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) { + dev_err(&pdev->dev, "no PCI region space\n"); + ret = -ENODEV; + goto err_out_disable; + } + + pci_set_master(pdev); + + ret = pci_request_regions(pdev, DRV_NAME); + if (ret < 0) + goto err_out_disable; + + ret = -ENOMEM; + + dev = alloc_etherdev(sizeof (*ep)); + if (!dev) { + dev_err(&pdev->dev, "no memory for eth device\n"); + goto err_out_free_res; + } + SET_NETDEV_DEV(dev, &pdev->dev); + +#ifdef USE_IO_OPS + ioaddr = pci_resource_start (pdev, 0); +#else + ioaddr = pci_resource_start (pdev, 1); + ioaddr = (long) pci_ioremap_bar(pdev, 1); + if (!ioaddr) { + dev_err(&pdev->dev, "ioremap failed\n"); + goto err_out_free_netdev; + } +#endif + + pci_set_drvdata(pdev, dev); + ep = netdev_priv(dev); + ep->mii.dev = dev; + ep->mii.mdio_read = mdio_read; + ep->mii.mdio_write = mdio_write; + ep->mii.phy_id_mask = 0x1f; + ep->mii.reg_num_mask = 0x1f; + + ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); + if (!ring_space) + goto err_out_iounmap; + ep->tx_ring = ring_space; + ep->tx_ring_dma = ring_dma; + + ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); + if (!ring_space) + goto err_out_unmap_tx; + ep->rx_ring = ring_space; + ep->rx_ring_dma = ring_dma; + + if (dev->mem_start) { + option = dev->mem_start; + duplex = (dev->mem_start & 16) ? 1 : 0; + } else if (card_idx >= 0 && card_idx < MAX_UNITS) { + if (options[card_idx] >= 0) + option = options[card_idx]; + if (full_duplex[card_idx] >= 0) + duplex = full_duplex[card_idx]; + } + + dev->base_addr = ioaddr; + dev->irq = irq; + + spin_lock_init(&ep->lock); + spin_lock_init(&ep->napi_lock); + ep->reschedule_in_poll = 0; + + /* Bring the chip out of low-power mode. */ + outl(0x4200, ioaddr + GENCTL); + /* Magic?! If we don't set this bit the MII interface won't work. */ + /* This magic is documented in SMSC app note 7.15 */ + for (i = 16; i > 0; i--) + outl(0x0008, ioaddr + TEST1); + + /* Turn on the MII transceiver. */ + outl(0x12, ioaddr + MIICfg); + if (chip_idx == 1) + outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); + outl(0x0200, ioaddr + GENCTL); + + /* Note: the '175 does not have a serial EEPROM. */ + for (i = 0; i < 3; i++) + ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(inw(ioaddr + LAN0 + i*4)); + + if (debug > 2) { + dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n"); + for (i = 0; i < 64; i++) + printk(" %4.4x%s", read_eeprom(ioaddr, i), + i % 16 == 15 ? "\n" : ""); + } + + ep->pci_dev = pdev; + ep->chip_id = chip_idx; + ep->chip_flags = pci_id_tbl[chip_idx].drv_flags; + ep->irq_mask = + (ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170) + | CntFull | TxUnderrun | EpicNapiEvent; + + /* Find the connected MII xcvrs. + Doing this in open() would allow detecting external xcvrs later, but + takes much time and no cards have external MII. */ + { + int phy, phy_idx = 0; + for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) { + int mii_status = mdio_read(dev, phy, MII_BMSR); + if (mii_status != 0xffff && mii_status != 0x0000) { + ep->phys[phy_idx++] = phy; + dev_info(&pdev->dev, + "MII transceiver #%d control " + "%4.4x status %4.4x.\n", + phy, mdio_read(dev, phy, 0), mii_status); + } + } + ep->mii_phy_cnt = phy_idx; + if (phy_idx != 0) { + phy = ep->phys[0]; + ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE); + dev_info(&pdev->dev, + "Autonegotiation advertising %4.4x link " + "partner %4.4x.\n", + ep->mii.advertising, mdio_read(dev, phy, 5)); + } else if ( ! (ep->chip_flags & NO_MII)) { + dev_warn(&pdev->dev, + "***WARNING***: No MII transceiver found!\n"); + /* Use the known PHY address of the EPII. */ + ep->phys[0] = 3; + } + ep->mii.phy_id = ep->phys[0]; + } + + /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */ + if (ep->chip_flags & MII_PWRDWN) + outl(inl(ioaddr + NVCTL) & ~0x483C, ioaddr + NVCTL); + outl(0x0008, ioaddr + GENCTL); + + /* The lower four bits are the media type. */ + if (duplex) { + ep->mii.force_media = ep->mii.full_duplex = 1; + dev_info(&pdev->dev, "Forced full duplex requested.\n"); + } + dev->if_port = ep->default_port = option; + + /* The Epic-specific entries in the device structure. */ + dev->netdev_ops = &epic_netdev_ops; + dev->ethtool_ops = &netdev_ethtool_ops; + dev->watchdog_timeo = TX_TIMEOUT; + netif_napi_add(dev, &ep->napi, epic_poll, 64); + + ret = register_netdev(dev); + if (ret < 0) + goto err_out_unmap_rx; + + printk(KERN_INFO "%s: %s at %#lx, IRQ %d, %pM\n", + dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq, + dev->dev_addr); + +out: + return ret; + +err_out_unmap_rx: + pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma); +err_out_unmap_tx: + pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma); +err_out_iounmap: +#ifndef USE_IO_OPS + iounmap(ioaddr); +err_out_free_netdev: +#endif + free_netdev(dev); +err_out_free_res: + pci_release_regions(pdev); +err_out_disable: + pci_disable_device(pdev); + goto out; +} + +/* Serial EEPROM section. */ + +/* EEPROM_Ctrl bits. */ +#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */ +#define EE_CS 0x02 /* EEPROM chip select. */ +#define EE_DATA_WRITE 0x08 /* EEPROM chip data in. */ +#define EE_WRITE_0 0x01 +#define EE_WRITE_1 0x09 +#define EE_DATA_READ 0x10 /* EEPROM chip data out. */ +#define EE_ENB (0x0001 | EE_CS) + +/* Delay between EEPROM clock transitions. + This serves to flush the operation to the PCI bus. + */ + +#define eeprom_delay() inl(ee_addr) + +/* The EEPROM commands include the alway-set leading bit. */ +#define EE_WRITE_CMD (5 << 6) +#define EE_READ64_CMD (6 << 6) +#define EE_READ256_CMD (6 << 8) +#define EE_ERASE_CMD (7 << 6) + +static void epic_disable_int(struct net_device *dev, struct epic_private *ep) +{ + long ioaddr = dev->base_addr; + + outl(0x00000000, ioaddr + INTMASK); +} + +static inline void __epic_pci_commit(long ioaddr) +{ +#ifndef USE_IO_OPS + inl(ioaddr + INTMASK); +#endif +} + +static inline void epic_napi_irq_off(struct net_device *dev, + struct epic_private *ep) +{ + long ioaddr = dev->base_addr; + + outl(ep->irq_mask & ~EpicNapiEvent, ioaddr + INTMASK); + __epic_pci_commit(ioaddr); +} + +static inline void epic_napi_irq_on(struct net_device *dev, + struct epic_private *ep) +{ + long ioaddr = dev->base_addr; + + /* No need to commit possible posted write */ + outl(ep->irq_mask | EpicNapiEvent, ioaddr + INTMASK); +} + +static int __devinit read_eeprom(long ioaddr, int location) +{ + int i; + int retval = 0; + long ee_addr = ioaddr + EECTL; + int read_cmd = location | + (inl(ee_addr) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD); + + outl(EE_ENB & ~EE_CS, ee_addr); + outl(EE_ENB, ee_addr); + + /* Shift the read command bits out. */ + for (i = 12; i >= 0; i--) { + short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0; + outl(EE_ENB | dataval, ee_addr); + eeprom_delay(); + outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr); + eeprom_delay(); + } + outl(EE_ENB, ee_addr); + + for (i = 16; i > 0; i--) { + outl(EE_ENB | EE_SHIFT_CLK, ee_addr); + eeprom_delay(); + retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0); + outl(EE_ENB, ee_addr); + eeprom_delay(); + } + + /* Terminate the EEPROM access. */ + outl(EE_ENB & ~EE_CS, ee_addr); + return retval; +} + +#define MII_READOP 1 +#define MII_WRITEOP 2 +static int mdio_read(struct net_device *dev, int phy_id, int location) +{ + long ioaddr = dev->base_addr; + int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP; + int i; + + outl(read_cmd, ioaddr + MIICtrl); + /* Typical operation takes 25 loops. */ + for (i = 400; i > 0; i--) { + barrier(); + if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0) { + /* Work around read failure bug. */ + if (phy_id == 1 && location < 6 && + inw(ioaddr + MIIData) == 0xffff) { + outl(read_cmd, ioaddr + MIICtrl); + continue; + } + return inw(ioaddr + MIIData); + } + } + return 0xffff; +} + +static void mdio_write(struct net_device *dev, int phy_id, int loc, int value) +{ + long ioaddr = dev->base_addr; + int i; + + outw(value, ioaddr + MIIData); + outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl); + for (i = 10000; i > 0; i--) { + barrier(); + if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0) + break; + } +} + + +static int epic_open(struct net_device *dev) +{ + struct epic_private *ep = netdev_priv(dev); + long ioaddr = dev->base_addr; + int i; + int retval; + + /* Soft reset the chip. */ + outl(0x4001, ioaddr + GENCTL); + + napi_enable(&ep->napi); + if ((retval = request_irq(dev->irq, epic_interrupt, IRQF_SHARED, dev->name, dev))) { + napi_disable(&ep->napi); + return retval; + } + + epic_init_ring(dev); + + outl(0x4000, ioaddr + GENCTL); + /* This magic is documented in SMSC app note 7.15 */ + for (i = 16; i > 0; i--) + outl(0x0008, ioaddr + TEST1); + + /* Pull the chip out of low-power mode, enable interrupts, and set for + PCI read multiple. The MIIcfg setting and strange write order are + required by the details of which bits are reset and the transceiver + wiring on the Ositech CardBus card. + */ +#if 0 + outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg); +#endif + if (ep->chip_flags & MII_PWRDWN) + outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); + + /* Tell the chip to byteswap descriptors on big-endian hosts */ +#ifdef __BIG_ENDIAN + outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); + inl(ioaddr + GENCTL); + outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); +#else + outl(0x4412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); + inl(ioaddr + GENCTL); + outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); +#endif + + udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */ + + for (i = 0; i < 3; i++) + outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4); + + ep->tx_threshold = TX_FIFO_THRESH; + outl(ep->tx_threshold, ioaddr + TxThresh); + + if (media2miictl[dev->if_port & 15]) { + if (ep->mii_phy_cnt) + mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]); + if (dev->if_port == 1) { + if (debug > 1) + printk(KERN_INFO "%s: Using the 10base2 transceiver, MII " + "status %4.4x.\n", + dev->name, mdio_read(dev, ep->phys[0], MII_BMSR)); + } + } else { + int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA); + if (mii_lpa != 0xffff) { + if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL) + ep->mii.full_duplex = 1; + else if (! (mii_lpa & LPA_LPACK)) + mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART); + if (debug > 1) + printk(KERN_INFO "%s: Setting %s-duplex based on MII xcvr %d" + " register read of %4.4x.\n", dev->name, + ep->mii.full_duplex ? "full" : "half", + ep->phys[0], mii_lpa); + } + } + + outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl); + outl(ep->rx_ring_dma, ioaddr + PRxCDAR); + outl(ep->tx_ring_dma, ioaddr + PTxCDAR); + + /* Start the chip's Rx process. */ + set_rx_mode(dev); + outl(StartRx | RxQueued, ioaddr + COMMAND); + + netif_start_queue(dev); + + /* Enable interrupts by setting the interrupt mask. */ + outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170) + | CntFull | TxUnderrun + | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK); + + if (debug > 1) + printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x " + "%s-duplex.\n", + dev->name, ioaddr, dev->irq, (int)inl(ioaddr + GENCTL), + ep->mii.full_duplex ? "full" : "half"); + + /* Set the timer to switch to check for link beat and perhaps switch + to an alternate media type. */ + init_timer(&ep->timer); + ep->timer.expires = jiffies + 3*HZ; + ep->timer.data = (unsigned long)dev; + ep->timer.function = epic_timer; /* timer handler */ + add_timer(&ep->timer); + + return 0; +} + +/* Reset the chip to recover from a PCI transaction error. + This may occur at interrupt time. */ +static void epic_pause(struct net_device *dev) +{ + long ioaddr = dev->base_addr; + + netif_stop_queue (dev); + + /* Disable interrupts by clearing the interrupt mask. */ + outl(0x00000000, ioaddr + INTMASK); + /* Stop the chip's Tx and Rx DMA processes. */ + outw(StopRx | StopTxDMA | StopRxDMA, ioaddr + COMMAND); + + /* Update the error counts. */ + if (inw(ioaddr + COMMAND) != 0xffff) { + dev->stats.rx_missed_errors += inb(ioaddr + MPCNT); + dev->stats.rx_frame_errors += inb(ioaddr + ALICNT); + dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT); + } + + /* Remove the packets on the Rx queue. */ + epic_rx(dev, RX_RING_SIZE); +} + +static void epic_restart(struct net_device *dev) +{ + long ioaddr = dev->base_addr; + struct epic_private *ep = netdev_priv(dev); + int i; + + /* Soft reset the chip. */ + outl(0x4001, ioaddr + GENCTL); + + printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n", + dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx); + udelay(1); + + /* This magic is documented in SMSC app note 7.15 */ + for (i = 16; i > 0; i--) + outl(0x0008, ioaddr + TEST1); + +#ifdef __BIG_ENDIAN + outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); +#else + outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); +#endif + outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg); + if (ep->chip_flags & MII_PWRDWN) + outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); + + for (i = 0; i < 3; i++) + outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4); + + ep->tx_threshold = TX_FIFO_THRESH; + outl(ep->tx_threshold, ioaddr + TxThresh); + outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl); + outl(ep->rx_ring_dma + (ep->cur_rx%RX_RING_SIZE)* + sizeof(struct epic_rx_desc), ioaddr + PRxCDAR); + outl(ep->tx_ring_dma + (ep->dirty_tx%TX_RING_SIZE)* + sizeof(struct epic_tx_desc), ioaddr + PTxCDAR); + + /* Start the chip's Rx process. */ + set_rx_mode(dev); + outl(StartRx | RxQueued, ioaddr + COMMAND); + + /* Enable interrupts by setting the interrupt mask. */ + outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170) + | CntFull | TxUnderrun + | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK); + + printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x" + " interrupt %4.4x.\n", + dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL), + (int)inl(ioaddr + INTSTAT)); +} + +static void check_media(struct net_device *dev) +{ + struct epic_private *ep = netdev_priv(dev); + long ioaddr = dev->base_addr; + int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0; + int negotiated = mii_lpa & ep->mii.advertising; + int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040; + + if (ep->mii.force_media) + return; + if (mii_lpa == 0xffff) /* Bogus read */ + return; + if (ep->mii.full_duplex != duplex) { + ep->mii.full_duplex = duplex; + printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link" + " partner capability of %4.4x.\n", dev->name, + ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa); + outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl); + } +} + +static void epic_timer(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct epic_private *ep = netdev_priv(dev); + long ioaddr = dev->base_addr; + int next_tick = 5*HZ; + + if (debug > 3) { + printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n", + dev->name, (int)inl(ioaddr + TxSTAT)); + printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x " + "IntStatus %4.4x RxStatus %4.4x.\n", + dev->name, (int)inl(ioaddr + INTMASK), + (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT)); + } + + check_media(dev); + + ep->timer.expires = jiffies + next_tick; + add_timer(&ep->timer); +} + +static void epic_tx_timeout(struct net_device *dev) +{ + struct epic_private *ep = netdev_priv(dev); + long ioaddr = dev->base_addr; + + if (debug > 0) { + printk(KERN_WARNING "%s: Transmit timeout using MII device, " + "Tx status %4.4x.\n", + dev->name, (int)inw(ioaddr + TxSTAT)); + if (debug > 1) { + printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n", + dev->name, ep->dirty_tx, ep->cur_tx); + } + } + if (inw(ioaddr + TxSTAT) & 0x10) { /* Tx FIFO underflow. */ + dev->stats.tx_fifo_errors++; + outl(RestartTx, ioaddr + COMMAND); + } else { + epic_restart(dev); + outl(TxQueued, dev->base_addr + COMMAND); + } + + dev->trans_start = jiffies; /* prevent tx timeout */ + dev->stats.tx_errors++; + if (!ep->tx_full) + netif_wake_queue(dev); +} + +/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ +static void epic_init_ring(struct net_device *dev) +{ + struct epic_private *ep = netdev_priv(dev); + int i; + + ep->tx_full = 0; + ep->dirty_tx = ep->cur_tx = 0; + ep->cur_rx = ep->dirty_rx = 0; + ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); + + /* Initialize all Rx descriptors. */ + for (i = 0; i < RX_RING_SIZE; i++) { + ep->rx_ring[i].rxstatus = 0; + ep->rx_ring[i].buflength = ep->rx_buf_sz; + ep->rx_ring[i].next = ep->rx_ring_dma + + (i+1)*sizeof(struct epic_rx_desc); + ep->rx_skbuff[i] = NULL; + } + /* Mark the last entry as wrapping the ring. */ + ep->rx_ring[i-1].next = ep->rx_ring_dma; + + /* Fill in the Rx buffers. Handle allocation failure gracefully. */ + for (i = 0; i < RX_RING_SIZE; i++) { + struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz + 2); + ep->rx_skbuff[i] = skb; + if (skb == NULL) + break; + skb_reserve(skb, 2); /* 16 byte align the IP header. */ + ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev, + skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); + ep->rx_ring[i].rxstatus = DescOwn; + } + ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE); + + /* The Tx buffer descriptor is filled in as needed, but we + do need to clear the ownership bit. */ + for (i = 0; i < TX_RING_SIZE; i++) { + ep->tx_skbuff[i] = NULL; + ep->tx_ring[i].txstatus = 0x0000; + ep->tx_ring[i].next = ep->tx_ring_dma + + (i+1)*sizeof(struct epic_tx_desc); + } + ep->tx_ring[i-1].next = ep->tx_ring_dma; +} + +static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct epic_private *ep = netdev_priv(dev); + int entry, free_count; + u32 ctrl_word; + unsigned long flags; + + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + + /* Caution: the write order is important here, set the field with the + "ownership" bit last. */ + + /* Calculate the next Tx descriptor entry. */ + spin_lock_irqsave(&ep->lock, flags); + free_count = ep->cur_tx - ep->dirty_tx; + entry = ep->cur_tx % TX_RING_SIZE; + + ep->tx_skbuff[entry] = skb; + ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data, + skb->len, PCI_DMA_TODEVICE); + if (free_count < TX_QUEUE_LEN/2) {/* Typical path */ + ctrl_word = 0x100000; /* No interrupt */ + } else if (free_count == TX_QUEUE_LEN/2) { + ctrl_word = 0x140000; /* Tx-done intr. */ + } else if (free_count < TX_QUEUE_LEN - 1) { + ctrl_word = 0x100000; /* No Tx-done intr. */ + } else { + /* Leave room for an additional entry. */ + ctrl_word = 0x140000; /* Tx-done intr. */ + ep->tx_full = 1; + } + ep->tx_ring[entry].buflength = ctrl_word | skb->len; + ep->tx_ring[entry].txstatus = + ((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16) + | DescOwn; + + ep->cur_tx++; + if (ep->tx_full) + netif_stop_queue(dev); + + spin_unlock_irqrestore(&ep->lock, flags); + /* Trigger an immediate transmit demand. */ + outl(TxQueued, dev->base_addr + COMMAND); + + if (debug > 4) + printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, " + "flag %2.2x Tx status %8.8x.\n", + dev->name, (int)skb->len, entry, ctrl_word, + (int)inl(dev->base_addr + TxSTAT)); + + return NETDEV_TX_OK; +} + +static void epic_tx_error(struct net_device *dev, struct epic_private *ep, + int status) +{ + struct net_device_stats *stats = &dev->stats; + +#ifndef final_version + /* There was an major error, log it. */ + if (debug > 1) + printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n", + dev->name, status); +#endif + stats->tx_errors++; + if (status & 0x1050) + stats->tx_aborted_errors++; + if (status & 0x0008) + stats->tx_carrier_errors++; + if (status & 0x0040) + stats->tx_window_errors++; + if (status & 0x0010) + stats->tx_fifo_errors++; +} + +static void epic_tx(struct net_device *dev, struct epic_private *ep) +{ + unsigned int dirty_tx, cur_tx; + + /* + * Note: if this lock becomes a problem we can narrow the locked + * region at the cost of occasionally grabbing the lock more times. + */ + cur_tx = ep->cur_tx; + for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) { + struct sk_buff *skb; + int entry = dirty_tx % TX_RING_SIZE; + int txstatus = ep->tx_ring[entry].txstatus; + + if (txstatus & DescOwn) + break; /* It still hasn't been Txed */ + + if (likely(txstatus & 0x0001)) { + dev->stats.collisions += (txstatus >> 8) & 15; + dev->stats.tx_packets++; + dev->stats.tx_bytes += ep->tx_skbuff[entry]->len; + } else + epic_tx_error(dev, ep, txstatus); + + /* Free the original skb. */ + skb = ep->tx_skbuff[entry]; + pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr, + skb->len, PCI_DMA_TODEVICE); + dev_kfree_skb_irq(skb); + ep->tx_skbuff[entry] = NULL; + } + +#ifndef final_version + if (cur_tx - dirty_tx > TX_RING_SIZE) { + printk(KERN_WARNING + "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n", + dev->name, dirty_tx, cur_tx, ep->tx_full); + dirty_tx += TX_RING_SIZE; + } +#endif + ep->dirty_tx = dirty_tx; + if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) { + /* The ring is no longer full, allow new TX entries. */ + ep->tx_full = 0; + netif_wake_queue(dev); + } +} + +/* The interrupt handler does all of the Rx thread work and cleans up + after the Tx thread. */ +static irqreturn_t epic_interrupt(int irq, void *dev_instance) +{ + struct net_device *dev = dev_instance; + struct epic_private *ep = netdev_priv(dev); + long ioaddr = dev->base_addr; + unsigned int handled = 0; + int status; + + status = inl(ioaddr + INTSTAT); + /* Acknowledge all of the current interrupt sources ASAP. */ + outl(status & EpicNormalEvent, ioaddr + INTSTAT); + + if (debug > 4) { + printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new " + "intstat=%#8.8x.\n", dev->name, status, + (int)inl(ioaddr + INTSTAT)); + } + + if ((status & IntrSummary) == 0) + goto out; + + handled = 1; + + if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) { + spin_lock(&ep->napi_lock); + if (napi_schedule_prep(&ep->napi)) { + epic_napi_irq_off(dev, ep); + __napi_schedule(&ep->napi); + } else + ep->reschedule_in_poll++; + spin_unlock(&ep->napi_lock); + } + status &= ~EpicNapiEvent; + + /* Check uncommon events all at once. */ + if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) { + if (status == EpicRemoved) + goto out; + + /* Always update the error counts to avoid overhead later. */ + dev->stats.rx_missed_errors += inb(ioaddr + MPCNT); + dev->stats.rx_frame_errors += inb(ioaddr + ALICNT); + dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT); + + if (status & TxUnderrun) { /* Tx FIFO underflow. */ + dev->stats.tx_fifo_errors++; + outl(ep->tx_threshold += 128, ioaddr + TxThresh); + /* Restart the transmit process. */ + outl(RestartTx, ioaddr + COMMAND); + } + if (status & PCIBusErr170) { + printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n", + dev->name, status); + epic_pause(dev); + epic_restart(dev); + } + /* Clear all error sources. */ + outl(status & 0x7f18, ioaddr + INTSTAT); + } + +out: + if (debug > 3) { + printk(KERN_DEBUG "%s: exit interrupt, intr_status=%#4.4x.\n", + dev->name, status); + } + + return IRQ_RETVAL(handled); +} + +static int epic_rx(struct net_device *dev, int budget) +{ + struct epic_private *ep = netdev_priv(dev); + int entry = ep->cur_rx % RX_RING_SIZE; + int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx; + int work_done = 0; + + if (debug > 4) + printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry, + ep->rx_ring[entry].rxstatus); + + if (rx_work_limit > budget) + rx_work_limit = budget; + + /* If we own the next entry, it's a new packet. Send it up. */ + while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) { + int status = ep->rx_ring[entry].rxstatus; + + if (debug > 4) + printk(KERN_DEBUG " epic_rx() status was %8.8x.\n", status); + if (--rx_work_limit < 0) + break; + if (status & 0x2006) { + if (debug > 2) + printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n", + dev->name, status); + if (status & 0x2000) { + printk(KERN_WARNING "%s: Oversized Ethernet frame spanned " + "multiple buffers, status %4.4x!\n", dev->name, status); + dev->stats.rx_length_errors++; + } else if (status & 0x0006) + /* Rx Frame errors are counted in hardware. */ + dev->stats.rx_errors++; + } else { + /* Malloc up new buffer, compatible with net-2e. */ + /* Omit the four octet CRC from the length. */ + short pkt_len = (status >> 16) - 4; + struct sk_buff *skb; + + if (pkt_len > PKT_BUF_SZ - 4) { + printk(KERN_ERR "%s: Oversized Ethernet frame, status %x " + "%d bytes.\n", + dev->name, status, pkt_len); + pkt_len = 1514; + } + /* Check if the packet is long enough to accept without copying + to a minimally-sized skbuff. */ + if (pkt_len < rx_copybreak && + (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { + skb_reserve(skb, 2); /* 16 byte align the IP header */ + pci_dma_sync_single_for_cpu(ep->pci_dev, + ep->rx_ring[entry].bufaddr, + ep->rx_buf_sz, + PCI_DMA_FROMDEVICE); + skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len); + skb_put(skb, pkt_len); + pci_dma_sync_single_for_device(ep->pci_dev, + ep->rx_ring[entry].bufaddr, + ep->rx_buf_sz, + PCI_DMA_FROMDEVICE); + } else { + pci_unmap_single(ep->pci_dev, + ep->rx_ring[entry].bufaddr, + ep->rx_buf_sz, PCI_DMA_FROMDEVICE); + skb_put(skb = ep->rx_skbuff[entry], pkt_len); + ep->rx_skbuff[entry] = NULL; + } + skb->protocol = eth_type_trans(skb, dev); + netif_receive_skb(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + } + work_done++; + entry = (++ep->cur_rx) % RX_RING_SIZE; + } + + /* Refill the Rx ring buffers. */ + for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) { + entry = ep->dirty_rx % RX_RING_SIZE; + if (ep->rx_skbuff[entry] == NULL) { + struct sk_buff *skb; + skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz + 2); + if (skb == NULL) + break; + skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ + ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, + skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); + work_done++; + } + /* AV: shouldn't we add a barrier here? */ + ep->rx_ring[entry].rxstatus = DescOwn; + } + return work_done; +} + +static void epic_rx_err(struct net_device *dev, struct epic_private *ep) +{ + long ioaddr = dev->base_addr; + int status; + + status = inl(ioaddr + INTSTAT); + + if (status == EpicRemoved) + return; + if (status & RxOverflow) /* Missed a Rx frame. */ + dev->stats.rx_errors++; + if (status & (RxOverflow | RxFull)) + outw(RxQueued, ioaddr + COMMAND); +} + +static int epic_poll(struct napi_struct *napi, int budget) +{ + struct epic_private *ep = container_of(napi, struct epic_private, napi); + struct net_device *dev = ep->mii.dev; + int work_done = 0; + long ioaddr = dev->base_addr; + +rx_action: + + epic_tx(dev, ep); + + work_done += epic_rx(dev, budget); + + epic_rx_err(dev, ep); + + if (work_done < budget) { + unsigned long flags; + int more; + + /* A bit baroque but it avoids a (space hungry) spin_unlock */ + + spin_lock_irqsave(&ep->napi_lock, flags); + + more = ep->reschedule_in_poll; + if (!more) { + __napi_complete(napi); + outl(EpicNapiEvent, ioaddr + INTSTAT); + epic_napi_irq_on(dev, ep); + } else + ep->reschedule_in_poll--; + + spin_unlock_irqrestore(&ep->napi_lock, flags); + + if (more) + goto rx_action; + } + + return work_done; +} + +static int epic_close(struct net_device *dev) +{ + long ioaddr = dev->base_addr; + struct epic_private *ep = netdev_priv(dev); + struct sk_buff *skb; + int i; + + netif_stop_queue(dev); + napi_disable(&ep->napi); + + if (debug > 1) + printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n", + dev->name, (int)inl(ioaddr + INTSTAT)); + + del_timer_sync(&ep->timer); + + epic_disable_int(dev, ep); + + free_irq(dev->irq, dev); + + epic_pause(dev); + + /* Free all the skbuffs in the Rx queue. */ + for (i = 0; i < RX_RING_SIZE; i++) { + skb = ep->rx_skbuff[i]; + ep->rx_skbuff[i] = NULL; + ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */ + ep->rx_ring[i].buflength = 0; + if (skb) { + pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr, + ep->rx_buf_sz, PCI_DMA_FROMDEVICE); + dev_kfree_skb(skb); + } + ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */ + } + for (i = 0; i < TX_RING_SIZE; i++) { + skb = ep->tx_skbuff[i]; + ep->tx_skbuff[i] = NULL; + if (!skb) + continue; + pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr, + skb->len, PCI_DMA_TODEVICE); + dev_kfree_skb(skb); + } + + /* Green! Leave the chip in low-power mode. */ + outl(0x0008, ioaddr + GENCTL); + + return 0; +} + +static struct net_device_stats *epic_get_stats(struct net_device *dev) +{ + long ioaddr = dev->base_addr; + + if (netif_running(dev)) { + /* Update the error counts. */ + dev->stats.rx_missed_errors += inb(ioaddr + MPCNT); + dev->stats.rx_frame_errors += inb(ioaddr + ALICNT); + dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT); + } + + return &dev->stats; +} + +/* Set or clear the multicast filter for this adaptor. + Note that we only use exclusion around actually queueing the + new frame, not around filling ep->setup_frame. This is non-deterministic + when re-entered but still correct. */ + +static void set_rx_mode(struct net_device *dev) +{ + long ioaddr = dev->base_addr; + struct epic_private *ep = netdev_priv(dev); + unsigned char mc_filter[8]; /* Multicast hash filter */ + int i; + + if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ + outl(0x002C, ioaddr + RxCtrl); + /* Unconditionally log net taps. */ + memset(mc_filter, 0xff, sizeof(mc_filter)); + } else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) { + /* There is apparently a chip bug, so the multicast filter + is never enabled. */ + /* Too many to filter perfectly -- accept all multicasts. */ + memset(mc_filter, 0xff, sizeof(mc_filter)); + outl(0x000C, ioaddr + RxCtrl); + } else if (netdev_mc_empty(dev)) { + outl(0x0004, ioaddr + RxCtrl); + return; + } else { /* Never executed, for now. */ + struct netdev_hw_addr *ha; + + memset(mc_filter, 0, sizeof(mc_filter)); + netdev_for_each_mc_addr(ha, dev) { + unsigned int bit_nr = + ether_crc_le(ETH_ALEN, ha->addr) & 0x3f; + mc_filter[bit_nr >> 3] |= (1 << bit_nr); + } + } + /* ToDo: perhaps we need to stop the Tx and Rx process here? */ + if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) { + for (i = 0; i < 4; i++) + outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4); + memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter)); + } +} + +static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct epic_private *np = netdev_priv(dev); + + strcpy (info->driver, DRV_NAME); + strcpy (info->version, DRV_VERSION); + strcpy (info->bus_info, pci_name(np->pci_dev)); +} + +static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct epic_private *np = netdev_priv(dev); + int rc; + + spin_lock_irq(&np->lock); + rc = mii_ethtool_gset(&np->mii, cmd); + spin_unlock_irq(&np->lock); + + return rc; +} + +static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct epic_private *np = netdev_priv(dev); + int rc; + + spin_lock_irq(&np->lock); + rc = mii_ethtool_sset(&np->mii, cmd); + spin_unlock_irq(&np->lock); + + return rc; +} + +static int netdev_nway_reset(struct net_device *dev) +{ + struct epic_private *np = netdev_priv(dev); + return mii_nway_restart(&np->mii); +} + +static u32 netdev_get_link(struct net_device *dev) +{ + struct epic_private *np = netdev_priv(dev); + return mii_link_ok(&np->mii); +} + +static u32 netdev_get_msglevel(struct net_device *dev) +{ + return debug; +} + +static void netdev_set_msglevel(struct net_device *dev, u32 value) +{ + debug = value; +} + +static int ethtool_begin(struct net_device *dev) +{ + unsigned long ioaddr = dev->base_addr; + /* power-up, if interface is down */ + if (! netif_running(dev)) { + outl(0x0200, ioaddr + GENCTL); + outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); + } + return 0; +} + +static void ethtool_complete(struct net_device *dev) +{ + unsigned long ioaddr = dev->base_addr; + /* power-down, if interface is down */ + if (! netif_running(dev)) { + outl(0x0008, ioaddr + GENCTL); + outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL); + } +} + +static const struct ethtool_ops netdev_ethtool_ops = { + .get_drvinfo = netdev_get_drvinfo, + .get_settings = netdev_get_settings, + .set_settings = netdev_set_settings, + .nway_reset = netdev_nway_reset, + .get_link = netdev_get_link, + .get_msglevel = netdev_get_msglevel, + .set_msglevel = netdev_set_msglevel, + .begin = ethtool_begin, + .complete = ethtool_complete +}; + +static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct epic_private *np = netdev_priv(dev); + long ioaddr = dev->base_addr; + struct mii_ioctl_data *data = if_mii(rq); + int rc; + + /* power-up, if interface is down */ + if (! netif_running(dev)) { + outl(0x0200, ioaddr + GENCTL); + outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); + } + + /* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */ + spin_lock_irq(&np->lock); + rc = generic_mii_ioctl(&np->mii, data, cmd, NULL); + spin_unlock_irq(&np->lock); + + /* power-down, if interface is down */ + if (! netif_running(dev)) { + outl(0x0008, ioaddr + GENCTL); + outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL); + } + return rc; +} + + +static void __devexit epic_remove_one (struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct epic_private *ep = netdev_priv(dev); + + pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma); + pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma); + unregister_netdev(dev); +#ifndef USE_IO_OPS + iounmap((void*) dev->base_addr); +#endif + pci_release_regions(pdev); + free_netdev(dev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + /* pci_power_off(pdev, -1); */ +} + + +#ifdef CONFIG_PM + +static int epic_suspend (struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + long ioaddr = dev->base_addr; + + if (!netif_running(dev)) + return 0; + epic_pause(dev); + /* Put the chip into low-power mode. */ + outl(0x0008, ioaddr + GENCTL); + /* pci_power_off(pdev, -1); */ + return 0; +} + + +static int epic_resume (struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + if (!netif_running(dev)) + return 0; + epic_restart(dev); + /* pci_power_on(pdev); */ + return 0; +} + +#endif /* CONFIG_PM */ + + +static struct pci_driver epic_driver = { + .name = DRV_NAME, + .id_table = epic_pci_tbl, + .probe = epic_init_one, + .remove = __devexit_p(epic_remove_one), +#ifdef CONFIG_PM + .suspend = epic_suspend, + .resume = epic_resume, +#endif /* CONFIG_PM */ +}; + + +static int __init epic_init (void) +{ +/* when a module, this is printed whether or not devices are found in probe */ +#ifdef MODULE + printk (KERN_INFO "%s%s", + version, version2); +#endif + + return pci_register_driver(&epic_driver); +} + + +static void __exit epic_cleanup (void) +{ + pci_unregister_driver (&epic_driver); +} + + +module_init(epic_init); +module_exit(epic_cleanup); diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c new file mode 100644 index 0000000..a91fe17 --- /dev/null +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -0,0 +1,2210 @@ +/* + * smc911x.c + * This is a driver for SMSC's LAN911{5,6,7,8} single-chip Ethernet devices. + * + * Copyright (C) 2005 Sensoria Corp + * Derived from the unified SMC91x driver by Nicolas Pitre + * and the smsc911x.c reference driver by SMSC + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Arguments: + * watchdog = TX watchdog timeout + * tx_fifo_kb = Size of TX FIFO in KB + * + * History: + * 04/16/05 Dustin McIntire Initial version + */ +static const char version[] = + "smc911x.c: v1.0 04-16-2005 by Dustin McIntire \n"; + +/* Debugging options */ +#define ENABLE_SMC_DEBUG_RX 0 +#define ENABLE_SMC_DEBUG_TX 0 +#define ENABLE_SMC_DEBUG_DMA 0 +#define ENABLE_SMC_DEBUG_PKTS 0 +#define ENABLE_SMC_DEBUG_MISC 0 +#define ENABLE_SMC_DEBUG_FUNC 0 + +#define SMC_DEBUG_RX ((ENABLE_SMC_DEBUG_RX ? 1 : 0) << 0) +#define SMC_DEBUG_TX ((ENABLE_SMC_DEBUG_TX ? 1 : 0) << 1) +#define SMC_DEBUG_DMA ((ENABLE_SMC_DEBUG_DMA ? 1 : 0) << 2) +#define SMC_DEBUG_PKTS ((ENABLE_SMC_DEBUG_PKTS ? 1 : 0) << 3) +#define SMC_DEBUG_MISC ((ENABLE_SMC_DEBUG_MISC ? 1 : 0) << 4) +#define SMC_DEBUG_FUNC ((ENABLE_SMC_DEBUG_FUNC ? 1 : 0) << 5) + +#ifndef SMC_DEBUG +#define SMC_DEBUG ( SMC_DEBUG_RX | \ + SMC_DEBUG_TX | \ + SMC_DEBUG_DMA | \ + SMC_DEBUG_PKTS | \ + SMC_DEBUG_MISC | \ + SMC_DEBUG_FUNC \ + ) +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include "smc911x.h" + +/* + * Transmit timeout, default 5 seconds. + */ +static int watchdog = 5000; +module_param(watchdog, int, 0400); +MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds"); + +static int tx_fifo_kb=8; +module_param(tx_fifo_kb, int, 0400); +MODULE_PARM_DESC(tx_fifo_kb,"transmit FIFO size in KB (1 0 +#define DBG(n, args...) \ + do { \ + if (SMC_DEBUG & (n)) \ + printk(args); \ + } while (0) + +#define PRINTK(args...) printk(args) +#else +#define DBG(n, args...) do { } while (0) +#define PRINTK(args...) printk(KERN_DEBUG args) +#endif + +#if SMC_DEBUG_PKTS > 0 +static void PRINT_PKT(u_char *buf, int length) +{ + int i; + int remainder; + int lines; + + lines = length / 16; + remainder = length % 16; + + for (i = 0; i < lines ; i ++) { + int cur; + for (cur = 0; cur < 8; cur++) { + u_char a, b; + a = *buf++; + b = *buf++; + printk("%02x%02x ", a, b); + } + printk("\n"); + } + for (i = 0; i < remainder/2 ; i++) { + u_char a, b; + a = *buf++; + b = *buf++; + printk("%02x%02x ", a, b); + } + printk("\n"); +} +#else +#define PRINT_PKT(x...) do { } while (0) +#endif + + +/* this enables an interrupt in the interrupt mask register */ +#define SMC_ENABLE_INT(lp, x) do { \ + unsigned int __mask; \ + __mask = SMC_GET_INT_EN((lp)); \ + __mask |= (x); \ + SMC_SET_INT_EN((lp), __mask); \ +} while (0) + +/* this disables an interrupt from the interrupt mask register */ +#define SMC_DISABLE_INT(lp, x) do { \ + unsigned int __mask; \ + __mask = SMC_GET_INT_EN((lp)); \ + __mask &= ~(x); \ + SMC_SET_INT_EN((lp), __mask); \ +} while (0) + +/* + * this does a soft reset on the device + */ +static void smc911x_reset(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + unsigned int reg, timeout=0, resets=1, irq_cfg; + unsigned long flags; + + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); + + /* Take out of PM setting first */ + if ((SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_) == 0) { + /* Write to the bytetest will take out of powerdown */ + SMC_SET_BYTE_TEST(lp, 0); + timeout=10; + do { + udelay(10); + reg = SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_; + } while (--timeout && !reg); + if (timeout == 0) { + PRINTK("%s: smc911x_reset timeout waiting for PM restore\n", dev->name); + return; + } + } + + /* Disable all interrupts */ + spin_lock_irqsave(&lp->lock, flags); + SMC_SET_INT_EN(lp, 0); + spin_unlock_irqrestore(&lp->lock, flags); + + while (resets--) { + SMC_SET_HW_CFG(lp, HW_CFG_SRST_); + timeout=10; + do { + udelay(10); + reg = SMC_GET_HW_CFG(lp); + /* If chip indicates reset timeout then try again */ + if (reg & HW_CFG_SRST_TO_) { + PRINTK("%s: chip reset timeout, retrying...\n", dev->name); + resets++; + break; + } + } while (--timeout && (reg & HW_CFG_SRST_)); + } + if (timeout == 0) { + PRINTK("%s: smc911x_reset timeout waiting for reset\n", dev->name); + return; + } + + /* make sure EEPROM has finished loading before setting GPIO_CFG */ + timeout=1000; + while (--timeout && (SMC_GET_E2P_CMD(lp) & E2P_CMD_EPC_BUSY_)) + udelay(10); + + if (timeout == 0){ + PRINTK("%s: smc911x_reset timeout waiting for EEPROM busy\n", dev->name); + return; + } + + /* Initialize interrupts */ + SMC_SET_INT_EN(lp, 0); + SMC_ACK_INT(lp, -1); + + /* Reset the FIFO level and flow control settings */ + SMC_SET_HW_CFG(lp, (lp->tx_fifo_kb & 0xF) << 16); +//TODO: Figure out what appropriate pause time is + SMC_SET_FLOW(lp, FLOW_FCPT_ | FLOW_FCEN_); + SMC_SET_AFC_CFG(lp, lp->afc_cfg); + + + /* Set to LED outputs */ + SMC_SET_GPIO_CFG(lp, 0x70070000); + + /* + * Deassert IRQ for 1*10us for edge type interrupts + * and drive IRQ pin push-pull + */ + irq_cfg = (1 << 24) | INT_CFG_IRQ_EN_ | INT_CFG_IRQ_TYPE_; +#ifdef SMC_DYNAMIC_BUS_CONFIG + if (lp->cfg.irq_polarity) + irq_cfg |= INT_CFG_IRQ_POL_; +#endif + SMC_SET_IRQ_CFG(lp, irq_cfg); + + /* clear anything saved */ + if (lp->pending_tx_skb != NULL) { + dev_kfree_skb (lp->pending_tx_skb); + lp->pending_tx_skb = NULL; + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; + } +} + +/* + * Enable Interrupts, Receive, and Transmit + */ +static void smc911x_enable(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + unsigned mask, cfg, cr; + unsigned long flags; + + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); + + spin_lock_irqsave(&lp->lock, flags); + + SMC_SET_MAC_ADDR(lp, dev->dev_addr); + + /* Enable TX */ + cfg = SMC_GET_HW_CFG(lp); + cfg &= HW_CFG_TX_FIF_SZ_ | 0xFFF; + cfg |= HW_CFG_SF_; + SMC_SET_HW_CFG(lp, cfg); + SMC_SET_FIFO_TDA(lp, 0xFF); + /* Update TX stats on every 64 packets received or every 1 sec */ + SMC_SET_FIFO_TSL(lp, 64); + SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000); + + SMC_GET_MAC_CR(lp, cr); + cr |= MAC_CR_TXEN_ | MAC_CR_HBDIS_; + SMC_SET_MAC_CR(lp, cr); + SMC_SET_TX_CFG(lp, TX_CFG_TX_ON_); + + /* Add 2 byte padding to start of packets */ + SMC_SET_RX_CFG(lp, (2<<8) & RX_CFG_RXDOFF_); + + /* Turn on receiver and enable RX */ + if (cr & MAC_CR_RXEN_) + DBG(SMC_DEBUG_RX, "%s: Receiver already enabled\n", dev->name); + + SMC_SET_MAC_CR(lp, cr | MAC_CR_RXEN_); + + /* Interrupt on every received packet */ + SMC_SET_FIFO_RSA(lp, 0x01); + SMC_SET_FIFO_RSL(lp, 0x00); + + /* now, enable interrupts */ + mask = INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_ | INT_EN_RSFL_EN_ | + INT_EN_GPT_INT_EN_ | INT_EN_RXDFH_INT_EN_ | INT_EN_RXE_EN_ | + INT_EN_PHY_INT_EN_; + if (IS_REV_A(lp->revision)) + mask|=INT_EN_RDFL_EN_; + else { + mask|=INT_EN_RDFO_EN_; + } + SMC_ENABLE_INT(lp, mask); + + spin_unlock_irqrestore(&lp->lock, flags); +} + +/* + * this puts the device in an inactive state + */ +static void smc911x_shutdown(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + unsigned cr; + unsigned long flags; + + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", CARDNAME, __func__); + + /* Disable IRQ's */ + SMC_SET_INT_EN(lp, 0); + + /* Turn of Rx and TX */ + spin_lock_irqsave(&lp->lock, flags); + SMC_GET_MAC_CR(lp, cr); + cr &= ~(MAC_CR_TXEN_ | MAC_CR_RXEN_ | MAC_CR_HBDIS_); + SMC_SET_MAC_CR(lp, cr); + SMC_SET_TX_CFG(lp, TX_CFG_STOP_TX_); + spin_unlock_irqrestore(&lp->lock, flags); +} + +static inline void smc911x_drop_pkt(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + unsigned int fifo_count, timeout, reg; + + DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", CARDNAME, __func__); + fifo_count = SMC_GET_RX_FIFO_INF(lp) & 0xFFFF; + if (fifo_count <= 4) { + /* Manually dump the packet data */ + while (fifo_count--) + SMC_GET_RX_FIFO(lp); + } else { + /* Fast forward through the bad packet */ + SMC_SET_RX_DP_CTRL(lp, RX_DP_CTRL_FFWD_BUSY_); + timeout=50; + do { + udelay(10); + reg = SMC_GET_RX_DP_CTRL(lp) & RX_DP_CTRL_FFWD_BUSY_; + } while (--timeout && reg); + if (timeout == 0) { + PRINTK("%s: timeout waiting for RX fast forward\n", dev->name); + } + } +} + +/* + * This is the procedure to handle the receipt of a packet. + * It should be called after checking for packet presence in + * the RX status FIFO. It must be called with the spin lock + * already held. + */ +static inline void smc911x_rcv(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + unsigned int pkt_len, status; + struct sk_buff *skb; + unsigned char *data; + + DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", + dev->name, __func__); + status = SMC_GET_RX_STS_FIFO(lp); + DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x\n", + dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff); + pkt_len = (status & RX_STS_PKT_LEN_) >> 16; + if (status & RX_STS_ES_) { + /* Deal with a bad packet */ + dev->stats.rx_errors++; + if (status & RX_STS_CRC_ERR_) + dev->stats.rx_crc_errors++; + else { + if (status & RX_STS_LEN_ERR_) + dev->stats.rx_length_errors++; + if (status & RX_STS_MCAST_) + dev->stats.multicast++; + } + /* Remove the bad packet data from the RX FIFO */ + smc911x_drop_pkt(dev); + } else { + /* Receive a valid packet */ + /* Alloc a buffer with extra room for DMA alignment */ + skb=dev_alloc_skb(pkt_len+32); + if (unlikely(skb == NULL)) { + PRINTK( "%s: Low memory, rcvd packet dropped.\n", + dev->name); + dev->stats.rx_dropped++; + smc911x_drop_pkt(dev); + return; + } + /* Align IP header to 32 bits + * Note that the device is configured to add a 2 + * byte padding to the packet start, so we really + * want to write to the orignal data pointer */ + data = skb->data; + skb_reserve(skb, 2); + skb_put(skb,pkt_len-4); +#ifdef SMC_USE_DMA + { + unsigned int fifo; + /* Lower the FIFO threshold if possible */ + fifo = SMC_GET_FIFO_INT(lp); + if (fifo & 0xFF) fifo--; + DBG(SMC_DEBUG_RX, "%s: Setting RX stat FIFO threshold to %d\n", + dev->name, fifo & 0xff); + SMC_SET_FIFO_INT(lp, fifo); + /* Setup RX DMA */ + SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN16_ | ((2<<8) & RX_CFG_RXDOFF_)); + lp->rxdma_active = 1; + lp->current_rx_skb = skb; + SMC_PULL_DATA(lp, data, (pkt_len+2+15) & ~15); + /* Packet processing deferred to DMA RX interrupt */ + } +#else + SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN4_ | ((2<<8) & RX_CFG_RXDOFF_)); + SMC_PULL_DATA(lp, data, pkt_len+2+3); + + DBG(SMC_DEBUG_PKTS, "%s: Received packet\n", dev->name); + PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len-4; +#endif + } +} + +/* + * This is called to actually send a packet to the chip. + */ +static void smc911x_hardware_send_pkt(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + struct sk_buff *skb; + unsigned int cmdA, cmdB, len; + unsigned char *buf; + + DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __func__); + BUG_ON(lp->pending_tx_skb == NULL); + + skb = lp->pending_tx_skb; + lp->pending_tx_skb = NULL; + + /* cmdA {25:24] data alignment [20:16] start offset [10:0] buffer length */ + /* cmdB {31:16] pkt tag [10:0] length */ +#ifdef SMC_USE_DMA + /* 16 byte buffer alignment mode */ + buf = (char*)((u32)(skb->data) & ~0xF); + len = (skb->len + 0xF + ((u32)skb->data & 0xF)) & ~0xF; + cmdA = (1<<24) | (((u32)skb->data & 0xF)<<16) | + TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ | + skb->len; +#else + buf = (char*)((u32)skb->data & ~0x3); + len = (skb->len + 3 + ((u32)skb->data & 3)) & ~0x3; + cmdA = (((u32)skb->data & 0x3) << 16) | + TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ | + skb->len; +#endif + /* tag is packet length so we can use this in stats update later */ + cmdB = (skb->len << 16) | (skb->len & 0x7FF); + + DBG(SMC_DEBUG_TX, "%s: TX PKT LENGTH 0x%04x (%d) BUF 0x%p CMDA 0x%08x CMDB 0x%08x\n", + dev->name, len, len, buf, cmdA, cmdB); + SMC_SET_TX_FIFO(lp, cmdA); + SMC_SET_TX_FIFO(lp, cmdB); + + DBG(SMC_DEBUG_PKTS, "%s: Transmitted packet\n", dev->name); + PRINT_PKT(buf, len <= 64 ? len : 64); + + /* Send pkt via PIO or DMA */ +#ifdef SMC_USE_DMA + lp->current_tx_skb = skb; + SMC_PUSH_DATA(lp, buf, len); + /* DMA complete IRQ will free buffer and set jiffies */ +#else + SMC_PUSH_DATA(lp, buf, len); + dev->trans_start = jiffies; + dev_kfree_skb_irq(skb); +#endif + if (!lp->tx_throttle) { + netif_wake_queue(dev); + } + SMC_ENABLE_INT(lp, INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_); +} + +/* + * Since I am not sure if I will have enough room in the chip's ram + * to store the packet, I call this routine which either sends it + * now, or set the card to generates an interrupt when ready + * for the packet. + */ +static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + unsigned int free; + unsigned long flags; + + DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", + dev->name, __func__); + + spin_lock_irqsave(&lp->lock, flags); + + BUG_ON(lp->pending_tx_skb != NULL); + + free = SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TDFREE_; + DBG(SMC_DEBUG_TX, "%s: TX free space %d\n", dev->name, free); + + /* Turn off the flow when running out of space in FIFO */ + if (free <= SMC911X_TX_FIFO_LOW_THRESHOLD) { + DBG(SMC_DEBUG_TX, "%s: Disabling data flow due to low FIFO space (%d)\n", + dev->name, free); + /* Reenable when at least 1 packet of size MTU present */ + SMC_SET_FIFO_TDA(lp, (SMC911X_TX_FIFO_LOW_THRESHOLD)/64); + lp->tx_throttle = 1; + netif_stop_queue(dev); + } + + /* Drop packets when we run out of space in TX FIFO + * Account for overhead required for: + * + * Tx command words 8 bytes + * Start offset 15 bytes + * End padding 15 bytes + */ + if (unlikely(free < (skb->len + 8 + 15 + 15))) { + printk("%s: No Tx free space %d < %d\n", + dev->name, free, skb->len); + lp->pending_tx_skb = NULL; + dev->stats.tx_errors++; + dev->stats.tx_dropped++; + spin_unlock_irqrestore(&lp->lock, flags); + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + +#ifdef SMC_USE_DMA + { + /* If the DMA is already running then defer this packet Tx until + * the DMA IRQ starts it + */ + if (lp->txdma_active) { + DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Tx DMA running, deferring packet\n", dev->name); + lp->pending_tx_skb = skb; + netif_stop_queue(dev); + spin_unlock_irqrestore(&lp->lock, flags); + return NETDEV_TX_OK; + } else { + DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Activating Tx DMA\n", dev->name); + lp->txdma_active = 1; + } + } +#endif + lp->pending_tx_skb = skb; + smc911x_hardware_send_pkt(dev); + spin_unlock_irqrestore(&lp->lock, flags); + + return NETDEV_TX_OK; +} + +/* + * This handles a TX status interrupt, which is only called when: + * - a TX error occurred, or + * - TX of a packet completed. + */ +static void smc911x_tx(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + unsigned int tx_status; + + DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", + dev->name, __func__); + + /* Collect the TX status */ + while (((SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16) != 0) { + DBG(SMC_DEBUG_TX, "%s: Tx stat FIFO used 0x%04x\n", + dev->name, + (SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16); + tx_status = SMC_GET_TX_STS_FIFO(lp); + dev->stats.tx_packets++; + dev->stats.tx_bytes+=tx_status>>16; + DBG(SMC_DEBUG_TX, "%s: Tx FIFO tag 0x%04x status 0x%04x\n", + dev->name, (tx_status & 0xffff0000) >> 16, + tx_status & 0x0000ffff); + /* count Tx errors, but ignore lost carrier errors when in + * full-duplex mode */ + if ((tx_status & TX_STS_ES_) && !(lp->ctl_rfduplx && + !(tx_status & 0x00000306))) { + dev->stats.tx_errors++; + } + if (tx_status & TX_STS_MANY_COLL_) { + dev->stats.collisions+=16; + dev->stats.tx_aborted_errors++; + } else { + dev->stats.collisions+=(tx_status & TX_STS_COLL_CNT_) >> 3; + } + /* carrier error only has meaning for half-duplex communication */ + if ((tx_status & (TX_STS_LOC_ | TX_STS_NO_CARR_)) && + !lp->ctl_rfduplx) { + dev->stats.tx_carrier_errors++; + } + if (tx_status & TX_STS_LATE_COLL_) { + dev->stats.collisions++; + dev->stats.tx_aborted_errors++; + } + } +} + + +/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/ +/* + * Reads a register from the MII Management serial interface + */ + +static int smc911x_phy_read(struct net_device *dev, int phyaddr, int phyreg) +{ + struct smc911x_local *lp = netdev_priv(dev); + unsigned int phydata; + + SMC_GET_MII(lp, phyreg, phyaddr, phydata); + + DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n", + __func__, phyaddr, phyreg, phydata); + return phydata; +} + + +/* + * Writes a register to the MII Management serial interface + */ +static void smc911x_phy_write(struct net_device *dev, int phyaddr, int phyreg, + int phydata) +{ + struct smc911x_local *lp = netdev_priv(dev); + + DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", + __func__, phyaddr, phyreg, phydata); + + SMC_SET_MII(lp, phyreg, phyaddr, phydata); +} + +/* + * Finds and reports the PHY address (115 and 117 have external + * PHY interface 118 has internal only + */ +static void smc911x_phy_detect(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + int phyaddr; + unsigned int cfg, id1, id2; + + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); + + lp->phy_type = 0; + + /* + * Scan all 32 PHY addresses if necessary, starting at + * PHY#1 to PHY#31, and then PHY#0 last. + */ + switch(lp->version) { + case CHIP_9115: + case CHIP_9117: + case CHIP_9215: + case CHIP_9217: + cfg = SMC_GET_HW_CFG(lp); + if (cfg & HW_CFG_EXT_PHY_DET_) { + cfg &= ~HW_CFG_PHY_CLK_SEL_; + cfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_; + SMC_SET_HW_CFG(lp, cfg); + udelay(10); /* Wait for clocks to stop */ + + cfg |= HW_CFG_EXT_PHY_EN_; + SMC_SET_HW_CFG(lp, cfg); + udelay(10); /* Wait for clocks to stop */ + + cfg &= ~HW_CFG_PHY_CLK_SEL_; + cfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_; + SMC_SET_HW_CFG(lp, cfg); + udelay(10); /* Wait for clocks to stop */ + + cfg |= HW_CFG_SMI_SEL_; + SMC_SET_HW_CFG(lp, cfg); + + for (phyaddr = 1; phyaddr < 32; ++phyaddr) { + + /* Read the PHY identifiers */ + SMC_GET_PHY_ID1(lp, phyaddr & 31, id1); + SMC_GET_PHY_ID2(lp, phyaddr & 31, id2); + + /* Make sure it is a valid identifier */ + if (id1 != 0x0000 && id1 != 0xffff && + id1 != 0x8000 && id2 != 0x0000 && + id2 != 0xffff && id2 != 0x8000) { + /* Save the PHY's address */ + lp->mii.phy_id = phyaddr & 31; + lp->phy_type = id1 << 16 | id2; + break; + } + } + if (phyaddr < 32) + /* Found an external PHY */ + break; + } + default: + /* Internal media only */ + SMC_GET_PHY_ID1(lp, 1, id1); + SMC_GET_PHY_ID2(lp, 1, id2); + /* Save the PHY's address */ + lp->mii.phy_id = 1; + lp->phy_type = id1 << 16 | id2; + } + + DBG(SMC_DEBUG_MISC, "%s: phy_id1=0x%x, phy_id2=0x%x phyaddr=0x%d\n", + dev->name, id1, id2, lp->mii.phy_id); +} + +/* + * Sets the PHY to a configuration as determined by the user. + * Called with spin_lock held. + */ +static int smc911x_phy_fixed(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + int phyaddr = lp->mii.phy_id; + int bmcr; + + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); + + /* Enter Link Disable state */ + SMC_GET_PHY_BMCR(lp, phyaddr, bmcr); + bmcr |= BMCR_PDOWN; + SMC_SET_PHY_BMCR(lp, phyaddr, bmcr); + + /* + * Set our fixed capabilities + * Disable auto-negotiation + */ + bmcr &= ~BMCR_ANENABLE; + if (lp->ctl_rfduplx) + bmcr |= BMCR_FULLDPLX; + + if (lp->ctl_rspeed == 100) + bmcr |= BMCR_SPEED100; + + /* Write our capabilities to the phy control register */ + SMC_SET_PHY_BMCR(lp, phyaddr, bmcr); + + /* Re-Configure the Receive/Phy Control register */ + bmcr &= ~BMCR_PDOWN; + SMC_SET_PHY_BMCR(lp, phyaddr, bmcr); + + return 1; +} + +/* + * smc911x_phy_reset - reset the phy + * @dev: net device + * @phy: phy address + * + * Issue a software reset for the specified PHY and + * wait up to 100ms for the reset to complete. We should + * not access the PHY for 50ms after issuing the reset. + * + * The time to wait appears to be dependent on the PHY. + * + */ +static int smc911x_phy_reset(struct net_device *dev, int phy) +{ + struct smc911x_local *lp = netdev_priv(dev); + int timeout; + unsigned long flags; + unsigned int reg; + + DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __func__); + + spin_lock_irqsave(&lp->lock, flags); + reg = SMC_GET_PMT_CTRL(lp); + reg &= ~0xfffff030; + reg |= PMT_CTRL_PHY_RST_; + SMC_SET_PMT_CTRL(lp, reg); + spin_unlock_irqrestore(&lp->lock, flags); + for (timeout = 2; timeout; timeout--) { + msleep(50); + spin_lock_irqsave(&lp->lock, flags); + reg = SMC_GET_PMT_CTRL(lp); + spin_unlock_irqrestore(&lp->lock, flags); + if (!(reg & PMT_CTRL_PHY_RST_)) { + /* extra delay required because the phy may + * not be completed with its reset + * when PHY_BCR_RESET_ is cleared. 256us + * should suffice, but use 500us to be safe + */ + udelay(500); + break; + } + } + + return reg & PMT_CTRL_PHY_RST_; +} + +/* + * smc911x_phy_powerdown - powerdown phy + * @dev: net device + * @phy: phy address + * + * Power down the specified PHY + */ +static void smc911x_phy_powerdown(struct net_device *dev, int phy) +{ + struct smc911x_local *lp = netdev_priv(dev); + unsigned int bmcr; + + /* Enter Link Disable state */ + SMC_GET_PHY_BMCR(lp, phy, bmcr); + bmcr |= BMCR_PDOWN; + SMC_SET_PHY_BMCR(lp, phy, bmcr); +} + +/* + * smc911x_phy_check_media - check the media status and adjust BMCR + * @dev: net device + * @init: set true for initialisation + * + * Select duplex mode depending on negotiation state. This + * also updates our carrier state. + */ +static void smc911x_phy_check_media(struct net_device *dev, int init) +{ + struct smc911x_local *lp = netdev_priv(dev); + int phyaddr = lp->mii.phy_id; + unsigned int bmcr, cr; + + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); + + if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) { + /* duplex state has changed */ + SMC_GET_PHY_BMCR(lp, phyaddr, bmcr); + SMC_GET_MAC_CR(lp, cr); + if (lp->mii.full_duplex) { + DBG(SMC_DEBUG_MISC, "%s: Configuring for full-duplex mode\n", dev->name); + bmcr |= BMCR_FULLDPLX; + cr |= MAC_CR_RCVOWN_; + } else { + DBG(SMC_DEBUG_MISC, "%s: Configuring for half-duplex mode\n", dev->name); + bmcr &= ~BMCR_FULLDPLX; + cr &= ~MAC_CR_RCVOWN_; + } + SMC_SET_PHY_BMCR(lp, phyaddr, bmcr); + SMC_SET_MAC_CR(lp, cr); + } +} + +/* + * Configures the specified PHY through the MII management interface + * using Autonegotiation. + * Calls smc911x_phy_fixed() if the user has requested a certain config. + * If RPC ANEG bit is set, the media selection is dependent purely on + * the selection by the MII (either in the MII BMCR reg or the result + * of autonegotiation.) If the RPC ANEG bit is cleared, the selection + * is controlled by the RPC SPEED and RPC DPLX bits. + */ +static void smc911x_phy_configure(struct work_struct *work) +{ + struct smc911x_local *lp = container_of(work, struct smc911x_local, + phy_configure); + struct net_device *dev = lp->netdev; + int phyaddr = lp->mii.phy_id; + int my_phy_caps; /* My PHY capabilities */ + int my_ad_caps; /* My Advertised capabilities */ + int status; + unsigned long flags; + + DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __func__); + + /* + * We should not be called if phy_type is zero. + */ + if (lp->phy_type == 0) + return; + + if (smc911x_phy_reset(dev, phyaddr)) { + printk("%s: PHY reset timed out\n", dev->name); + return; + } + spin_lock_irqsave(&lp->lock, flags); + + /* + * Enable PHY Interrupts (for register 18) + * Interrupts listed here are enabled + */ + SMC_SET_PHY_INT_MASK(lp, phyaddr, PHY_INT_MASK_ENERGY_ON_ | + PHY_INT_MASK_ANEG_COMP_ | PHY_INT_MASK_REMOTE_FAULT_ | + PHY_INT_MASK_LINK_DOWN_); + + /* If the user requested no auto neg, then go set his request */ + if (lp->mii.force_media) { + smc911x_phy_fixed(dev); + goto smc911x_phy_configure_exit; + } + + /* Copy our capabilities from MII_BMSR to MII_ADVERTISE */ + SMC_GET_PHY_BMSR(lp, phyaddr, my_phy_caps); + if (!(my_phy_caps & BMSR_ANEGCAPABLE)) { + printk(KERN_INFO "Auto negotiation NOT supported\n"); + smc911x_phy_fixed(dev); + goto smc911x_phy_configure_exit; + } + + /* CSMA capable w/ both pauses */ + my_ad_caps = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + + if (my_phy_caps & BMSR_100BASE4) + my_ad_caps |= ADVERTISE_100BASE4; + if (my_phy_caps & BMSR_100FULL) + my_ad_caps |= ADVERTISE_100FULL; + if (my_phy_caps & BMSR_100HALF) + my_ad_caps |= ADVERTISE_100HALF; + if (my_phy_caps & BMSR_10FULL) + my_ad_caps |= ADVERTISE_10FULL; + if (my_phy_caps & BMSR_10HALF) + my_ad_caps |= ADVERTISE_10HALF; + + /* Disable capabilities not selected by our user */ + if (lp->ctl_rspeed != 100) + my_ad_caps &= ~(ADVERTISE_100BASE4|ADVERTISE_100FULL|ADVERTISE_100HALF); + + if (!lp->ctl_rfduplx) + my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL); + + /* Update our Auto-Neg Advertisement Register */ + SMC_SET_PHY_MII_ADV(lp, phyaddr, my_ad_caps); + lp->mii.advertising = my_ad_caps; + + /* + * Read the register back. Without this, it appears that when + * auto-negotiation is restarted, sometimes it isn't ready and + * the link does not come up. + */ + udelay(10); + SMC_GET_PHY_MII_ADV(lp, phyaddr, status); + + DBG(SMC_DEBUG_MISC, "%s: phy caps=0x%04x\n", dev->name, my_phy_caps); + DBG(SMC_DEBUG_MISC, "%s: phy advertised caps=0x%04x\n", dev->name, my_ad_caps); + + /* Restart auto-negotiation process in order to advertise my caps */ + SMC_SET_PHY_BMCR(lp, phyaddr, BMCR_ANENABLE | BMCR_ANRESTART); + + smc911x_phy_check_media(dev, 1); + +smc911x_phy_configure_exit: + spin_unlock_irqrestore(&lp->lock, flags); +} + +/* + * smc911x_phy_interrupt + * + * Purpose: Handle interrupts relating to PHY register 18. This is + * called from the "hard" interrupt handler under our private spinlock. + */ +static void smc911x_phy_interrupt(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + int phyaddr = lp->mii.phy_id; + int status; + + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); + + if (lp->phy_type == 0) + return; + + smc911x_phy_check_media(dev, 0); + /* read to clear status bits */ + SMC_GET_PHY_INT_SRC(lp, phyaddr,status); + DBG(SMC_DEBUG_MISC, "%s: PHY interrupt status 0x%04x\n", + dev->name, status & 0xffff); + DBG(SMC_DEBUG_MISC, "%s: AFC_CFG 0x%08x\n", + dev->name, SMC_GET_AFC_CFG(lp)); +} + +/*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/ + +/* + * This is the main routine of the driver, to handle the device when + * it needs some attention. + */ +static irqreturn_t smc911x_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct smc911x_local *lp = netdev_priv(dev); + unsigned int status, mask, timeout; + unsigned int rx_overrun=0, cr, pkts; + unsigned long flags; + + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); + + spin_lock_irqsave(&lp->lock, flags); + + /* Spurious interrupt check */ + if ((SMC_GET_IRQ_CFG(lp) & (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) != + (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) { + spin_unlock_irqrestore(&lp->lock, flags); + return IRQ_NONE; + } + + mask = SMC_GET_INT_EN(lp); + SMC_SET_INT_EN(lp, 0); + + /* set a timeout value, so I don't stay here forever */ + timeout = 8; + + + do { + status = SMC_GET_INT(lp); + + DBG(SMC_DEBUG_MISC, "%s: INT 0x%08x MASK 0x%08x OUTSIDE MASK 0x%08x\n", + dev->name, status, mask, status & ~mask); + + status &= mask; + if (!status) + break; + + /* Handle SW interrupt condition */ + if (status & INT_STS_SW_INT_) { + SMC_ACK_INT(lp, INT_STS_SW_INT_); + mask &= ~INT_EN_SW_INT_EN_; + } + /* Handle various error conditions */ + if (status & INT_STS_RXE_) { + SMC_ACK_INT(lp, INT_STS_RXE_); + dev->stats.rx_errors++; + } + if (status & INT_STS_RXDFH_INT_) { + SMC_ACK_INT(lp, INT_STS_RXDFH_INT_); + dev->stats.rx_dropped+=SMC_GET_RX_DROP(lp); + } + /* Undocumented interrupt-what is the right thing to do here? */ + if (status & INT_STS_RXDF_INT_) { + SMC_ACK_INT(lp, INT_STS_RXDF_INT_); + } + + /* Rx Data FIFO exceeds set level */ + if (status & INT_STS_RDFL_) { + if (IS_REV_A(lp->revision)) { + rx_overrun=1; + SMC_GET_MAC_CR(lp, cr); + cr &= ~MAC_CR_RXEN_; + SMC_SET_MAC_CR(lp, cr); + DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name); + dev->stats.rx_errors++; + dev->stats.rx_fifo_errors++; + } + SMC_ACK_INT(lp, INT_STS_RDFL_); + } + if (status & INT_STS_RDFO_) { + if (!IS_REV_A(lp->revision)) { + SMC_GET_MAC_CR(lp, cr); + cr &= ~MAC_CR_RXEN_; + SMC_SET_MAC_CR(lp, cr); + rx_overrun=1; + DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name); + dev->stats.rx_errors++; + dev->stats.rx_fifo_errors++; + } + SMC_ACK_INT(lp, INT_STS_RDFO_); + } + /* Handle receive condition */ + if ((status & INT_STS_RSFL_) || rx_overrun) { + unsigned int fifo; + DBG(SMC_DEBUG_RX, "%s: RX irq\n", dev->name); + fifo = SMC_GET_RX_FIFO_INF(lp); + pkts = (fifo & RX_FIFO_INF_RXSUSED_) >> 16; + DBG(SMC_DEBUG_RX, "%s: Rx FIFO pkts %d, bytes %d\n", + dev->name, pkts, fifo & 0xFFFF ); + if (pkts != 0) { +#ifdef SMC_USE_DMA + unsigned int fifo; + if (lp->rxdma_active){ + DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, + "%s: RX DMA active\n", dev->name); + /* The DMA is already running so up the IRQ threshold */ + fifo = SMC_GET_FIFO_INT(lp) & ~0xFF; + fifo |= pkts & 0xFF; + DBG(SMC_DEBUG_RX, + "%s: Setting RX stat FIFO threshold to %d\n", + dev->name, fifo & 0xff); + SMC_SET_FIFO_INT(lp, fifo); + } else +#endif + smc911x_rcv(dev); + } + SMC_ACK_INT(lp, INT_STS_RSFL_); + } + /* Handle transmit FIFO available */ + if (status & INT_STS_TDFA_) { + DBG(SMC_DEBUG_TX, "%s: TX data FIFO space available irq\n", dev->name); + SMC_SET_FIFO_TDA(lp, 0xFF); + lp->tx_throttle = 0; +#ifdef SMC_USE_DMA + if (!lp->txdma_active) +#endif + netif_wake_queue(dev); + SMC_ACK_INT(lp, INT_STS_TDFA_); + } + /* Handle transmit done condition */ +#if 1 + if (status & (INT_STS_TSFL_ | INT_STS_GPT_INT_)) { + DBG(SMC_DEBUG_TX | SMC_DEBUG_MISC, + "%s: Tx stat FIFO limit (%d) /GPT irq\n", + dev->name, (SMC_GET_FIFO_INT(lp) & 0x00ff0000) >> 16); + smc911x_tx(dev); + SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000); + SMC_ACK_INT(lp, INT_STS_TSFL_); + SMC_ACK_INT(lp, INT_STS_TSFL_ | INT_STS_GPT_INT_); + } +#else + if (status & INT_STS_TSFL_) { + DBG(SMC_DEBUG_TX, "%s: TX status FIFO limit (%d) irq\n", dev->name, ); + smc911x_tx(dev); + SMC_ACK_INT(lp, INT_STS_TSFL_); + } + + if (status & INT_STS_GPT_INT_) { + DBG(SMC_DEBUG_RX, "%s: IRQ_CFG 0x%08x FIFO_INT 0x%08x RX_CFG 0x%08x\n", + dev->name, + SMC_GET_IRQ_CFG(lp), + SMC_GET_FIFO_INT(lp), + SMC_GET_RX_CFG(lp)); + DBG(SMC_DEBUG_RX, "%s: Rx Stat FIFO Used 0x%02x " + "Data FIFO Used 0x%04x Stat FIFO 0x%08x\n", + dev->name, + (SMC_GET_RX_FIFO_INF(lp) & 0x00ff0000) >> 16, + SMC_GET_RX_FIFO_INF(lp) & 0xffff, + SMC_GET_RX_STS_FIFO_PEEK(lp)); + SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000); + SMC_ACK_INT(lp, INT_STS_GPT_INT_); + } +#endif + + /* Handle PHY interrupt condition */ + if (status & INT_STS_PHY_INT_) { + DBG(SMC_DEBUG_MISC, "%s: PHY irq\n", dev->name); + smc911x_phy_interrupt(dev); + SMC_ACK_INT(lp, INT_STS_PHY_INT_); + } + } while (--timeout); + + /* restore mask state */ + SMC_SET_INT_EN(lp, mask); + + DBG(SMC_DEBUG_MISC, "%s: Interrupt done (%d loops)\n", + dev->name, 8-timeout); + + spin_unlock_irqrestore(&lp->lock, flags); + + return IRQ_HANDLED; +} + +#ifdef SMC_USE_DMA +static void +smc911x_tx_dma_irq(int dma, void *data) +{ + struct net_device *dev = (struct net_device *)data; + struct smc911x_local *lp = netdev_priv(dev); + struct sk_buff *skb = lp->current_tx_skb; + unsigned long flags; + + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); + + DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: TX DMA irq handler\n", dev->name); + /* Clear the DMA interrupt sources */ + SMC_DMA_ACK_IRQ(dev, dma); + BUG_ON(skb == NULL); + dma_unmap_single(NULL, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE); + dev->trans_start = jiffies; + dev_kfree_skb_irq(skb); + lp->current_tx_skb = NULL; + if (lp->pending_tx_skb != NULL) + smc911x_hardware_send_pkt(dev); + else { + DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, + "%s: No pending Tx packets. DMA disabled\n", dev->name); + spin_lock_irqsave(&lp->lock, flags); + lp->txdma_active = 0; + if (!lp->tx_throttle) { + netif_wake_queue(dev); + } + spin_unlock_irqrestore(&lp->lock, flags); + } + + DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, + "%s: TX DMA irq completed\n", dev->name); +} +static void +smc911x_rx_dma_irq(int dma, void *data) +{ + struct net_device *dev = (struct net_device *)data; + unsigned long ioaddr = dev->base_addr; + struct smc911x_local *lp = netdev_priv(dev); + struct sk_buff *skb = lp->current_rx_skb; + unsigned long flags; + unsigned int pkts; + + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); + DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, "%s: RX DMA irq handler\n", dev->name); + /* Clear the DMA interrupt sources */ + SMC_DMA_ACK_IRQ(dev, dma); + dma_unmap_single(NULL, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE); + BUG_ON(skb == NULL); + lp->current_rx_skb = NULL; + PRINT_PKT(skb->data, skb->len); + skb->protocol = eth_type_trans(skb, dev); + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; + netif_rx(skb); + + spin_lock_irqsave(&lp->lock, flags); + pkts = (SMC_GET_RX_FIFO_INF(lp) & RX_FIFO_INF_RXSUSED_) >> 16; + if (pkts != 0) { + smc911x_rcv(dev); + }else { + lp->rxdma_active = 0; + } + spin_unlock_irqrestore(&lp->lock, flags); + DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, + "%s: RX DMA irq completed. DMA RX FIFO PKTS %d\n", + dev->name, pkts); +} +#endif /* SMC_USE_DMA */ + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling receive - used by netconsole and other diagnostic tools + * to allow network i/o with interrupts disabled. + */ +static void smc911x_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + smc911x_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +/* Our watchdog timed out. Called by the networking layer */ +static void smc911x_timeout(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + int status, mask; + unsigned long flags; + + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); + + spin_lock_irqsave(&lp->lock, flags); + status = SMC_GET_INT(lp); + mask = SMC_GET_INT_EN(lp); + spin_unlock_irqrestore(&lp->lock, flags); + DBG(SMC_DEBUG_MISC, "%s: INT 0x%02x MASK 0x%02x\n", + dev->name, status, mask); + + /* Dump the current TX FIFO contents and restart */ + mask = SMC_GET_TX_CFG(lp); + SMC_SET_TX_CFG(lp, mask | TX_CFG_TXS_DUMP_ | TX_CFG_TXD_DUMP_); + /* + * Reconfiguring the PHY doesn't seem like a bad idea here, but + * smc911x_phy_configure() calls msleep() which calls schedule_timeout() + * which calls schedule(). Hence we use a work queue. + */ + if (lp->phy_type != 0) + schedule_work(&lp->phy_configure); + + /* We can accept TX packets again */ + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue(dev); +} + +/* + * This routine will, depending on the values passed to it, + * either make it accept multicast packets, go into + * promiscuous mode (for TCPDUMP and cousins) or accept + * a select set of multicast packets + */ +static void smc911x_set_multicast_list(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + unsigned int multicast_table[2]; + unsigned int mcr, update_multicast = 0; + unsigned long flags; + + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); + + spin_lock_irqsave(&lp->lock, flags); + SMC_GET_MAC_CR(lp, mcr); + spin_unlock_irqrestore(&lp->lock, flags); + + if (dev->flags & IFF_PROMISC) { + + DBG(SMC_DEBUG_MISC, "%s: RCR_PRMS\n", dev->name); + mcr |= MAC_CR_PRMS_; + } + /* + * Here, I am setting this to accept all multicast packets. + * I don't need to zero the multicast table, because the flag is + * checked before the table is + */ + else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) { + DBG(SMC_DEBUG_MISC, "%s: RCR_ALMUL\n", dev->name); + mcr |= MAC_CR_MCPAS_; + } + + /* + * This sets the internal hardware table to filter out unwanted + * multicast packets before they take up memory. + * + * The SMC chip uses a hash table where the high 6 bits of the CRC of + * address are the offset into the table. If that bit is 1, then the + * multicast packet is accepted. Otherwise, it's dropped silently. + * + * To use the 6 bits as an offset into the table, the high 1 bit is + * the number of the 32 bit register, while the low 5 bits are the bit + * within that register. + */ + else if (!netdev_mc_empty(dev)) { + struct netdev_hw_addr *ha; + + /* Set the Hash perfec mode */ + mcr |= MAC_CR_HPFILT_; + + /* start with a table of all zeros: reject all */ + memset(multicast_table, 0, sizeof(multicast_table)); + + netdev_for_each_mc_addr(ha, dev) { + u32 position; + + /* upper 6 bits are used as hash index */ + position = ether_crc(ETH_ALEN, ha->addr)>>26; + + multicast_table[position>>5] |= 1 << (position&0x1f); + } + + /* be sure I get rid of flags I might have set */ + mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_); + + /* now, the table can be loaded into the chipset */ + update_multicast = 1; + } else { + DBG(SMC_DEBUG_MISC, "%s: ~(MAC_CR_PRMS_|MAC_CR_MCPAS_)\n", + dev->name); + mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_); + + /* + * since I'm disabling all multicast entirely, I need to + * clear the multicast list + */ + memset(multicast_table, 0, sizeof(multicast_table)); + update_multicast = 1; + } + + spin_lock_irqsave(&lp->lock, flags); + SMC_SET_MAC_CR(lp, mcr); + if (update_multicast) { + DBG(SMC_DEBUG_MISC, + "%s: update mcast hash table 0x%08x 0x%08x\n", + dev->name, multicast_table[0], multicast_table[1]); + SMC_SET_HASHL(lp, multicast_table[0]); + SMC_SET_HASHH(lp, multicast_table[1]); + } + spin_unlock_irqrestore(&lp->lock, flags); +} + + +/* + * Open and Initialize the board + * + * Set up everything, reset the card, etc.. + */ +static int +smc911x_open(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); + + /* + * Check that the address is valid. If its not, refuse + * to bring the device up. The user must specify an + * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx + */ + if (!is_valid_ether_addr(dev->dev_addr)) { + PRINTK("%s: no valid ethernet hw addr\n", __func__); + return -EINVAL; + } + + /* reset the hardware */ + smc911x_reset(dev); + + /* Configure the PHY, initialize the link state */ + smc911x_phy_configure(&lp->phy_configure); + + /* Turn on Tx + Rx */ + smc911x_enable(dev); + + netif_start_queue(dev); + + return 0; +} + +/* + * smc911x_close + * + * this makes the board clean up everything that it can + * and not talk to the outside world. Caused by + * an 'ifconfig ethX down' + */ +static int smc911x_close(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); + + netif_stop_queue(dev); + netif_carrier_off(dev); + + /* clear everything */ + smc911x_shutdown(dev); + + if (lp->phy_type != 0) { + /* We need to ensure that no calls to + * smc911x_phy_configure are pending. + */ + cancel_work_sync(&lp->phy_configure); + smc911x_phy_powerdown(dev, lp->mii.phy_id); + } + + if (lp->pending_tx_skb) { + dev_kfree_skb(lp->pending_tx_skb); + lp->pending_tx_skb = NULL; + } + + return 0; +} + +/* + * Ethtool support + */ +static int +smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct smc911x_local *lp = netdev_priv(dev); + int ret, status; + unsigned long flags; + + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); + cmd->maxtxpkt = 1; + cmd->maxrxpkt = 1; + + if (lp->phy_type != 0) { + spin_lock_irqsave(&lp->lock, flags); + ret = mii_ethtool_gset(&lp->mii, cmd); + spin_unlock_irqrestore(&lp->lock, flags); + } else { + cmd->supported = SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_TP | SUPPORTED_AUI; + + if (lp->ctl_rspeed == 10) + ethtool_cmd_speed_set(cmd, SPEED_10); + else if (lp->ctl_rspeed == 100) + ethtool_cmd_speed_set(cmd, SPEED_100); + + cmd->autoneg = AUTONEG_DISABLE; + if (lp->mii.phy_id==1) + cmd->transceiver = XCVR_INTERNAL; + else + cmd->transceiver = XCVR_EXTERNAL; + cmd->port = 0; + SMC_GET_PHY_SPECIAL(lp, lp->mii.phy_id, status); + cmd->duplex = + (status & (PHY_SPECIAL_SPD_10FULL_ | PHY_SPECIAL_SPD_100FULL_)) ? + DUPLEX_FULL : DUPLEX_HALF; + ret = 0; + } + + return ret; +} + +static int +smc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct smc911x_local *lp = netdev_priv(dev); + int ret; + unsigned long flags; + + if (lp->phy_type != 0) { + spin_lock_irqsave(&lp->lock, flags); + ret = mii_ethtool_sset(&lp->mii, cmd); + spin_unlock_irqrestore(&lp->lock, flags); + } else { + if (cmd->autoneg != AUTONEG_DISABLE || + cmd->speed != SPEED_10 || + (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) || + (cmd->port != PORT_TP && cmd->port != PORT_AUI)) + return -EINVAL; + + lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL; + + ret = 0; + } + + return ret; +} + +static void +smc911x_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + strncpy(info->driver, CARDNAME, sizeof(info->driver)); + strncpy(info->version, version, sizeof(info->version)); + strncpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info)); +} + +static int smc911x_ethtool_nwayreset(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + int ret = -EINVAL; + unsigned long flags; + + if (lp->phy_type != 0) { + spin_lock_irqsave(&lp->lock, flags); + ret = mii_nway_restart(&lp->mii); + spin_unlock_irqrestore(&lp->lock, flags); + } + + return ret; +} + +static u32 smc911x_ethtool_getmsglevel(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + return lp->msg_enable; +} + +static void smc911x_ethtool_setmsglevel(struct net_device *dev, u32 level) +{ + struct smc911x_local *lp = netdev_priv(dev); + lp->msg_enable = level; +} + +static int smc911x_ethtool_getregslen(struct net_device *dev) +{ + /* System regs + MAC regs + PHY regs */ + return (((E2P_CMD - ID_REV)/4 + 1) + + (WUCSR - MAC_CR)+1 + 32) * sizeof(u32); +} + +static void smc911x_ethtool_getregs(struct net_device *dev, + struct ethtool_regs* regs, void *buf) +{ + struct smc911x_local *lp = netdev_priv(dev); + unsigned long flags; + u32 reg,i,j=0; + u32 *data = (u32*)buf; + + regs->version = lp->version; + for(i=ID_REV;i<=E2P_CMD;i+=4) { + data[j++] = SMC_inl(lp, i); + } + for(i=MAC_CR;i<=WUCSR;i++) { + spin_lock_irqsave(&lp->lock, flags); + SMC_GET_MAC_CSR(lp, i, reg); + spin_unlock_irqrestore(&lp->lock, flags); + data[j++] = reg; + } + for(i=0;i<=31;i++) { + spin_lock_irqsave(&lp->lock, flags); + SMC_GET_MII(lp, i, lp->mii.phy_id, reg); + spin_unlock_irqrestore(&lp->lock, flags); + data[j++] = reg & 0xFFFF; + } +} + +static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + unsigned int timeout; + int e2p_cmd; + + e2p_cmd = SMC_GET_E2P_CMD(lp); + for(timeout=10;(e2p_cmd & E2P_CMD_EPC_BUSY_) && timeout; timeout--) { + if (e2p_cmd & E2P_CMD_EPC_TIMEOUT_) { + PRINTK("%s: %s timeout waiting for EEPROM to respond\n", + dev->name, __func__); + return -EFAULT; + } + mdelay(1); + e2p_cmd = SMC_GET_E2P_CMD(lp); + } + if (timeout == 0) { + PRINTK("%s: %s timeout waiting for EEPROM CMD not busy\n", + dev->name, __func__); + return -ETIMEDOUT; + } + return 0; +} + +static inline int smc911x_ethtool_write_eeprom_cmd(struct net_device *dev, + int cmd, int addr) +{ + struct smc911x_local *lp = netdev_priv(dev); + int ret; + + if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0) + return ret; + SMC_SET_E2P_CMD(lp, E2P_CMD_EPC_BUSY_ | + ((cmd) & (0x7<<28)) | + ((addr) & 0xFF)); + return 0; +} + +static inline int smc911x_ethtool_read_eeprom_byte(struct net_device *dev, + u8 *data) +{ + struct smc911x_local *lp = netdev_priv(dev); + int ret; + + if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0) + return ret; + *data = SMC_GET_E2P_DATA(lp); + return 0; +} + +static inline int smc911x_ethtool_write_eeprom_byte(struct net_device *dev, + u8 data) +{ + struct smc911x_local *lp = netdev_priv(dev); + int ret; + + if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0) + return ret; + SMC_SET_E2P_DATA(lp, data); + return 0; +} + +static int smc911x_ethtool_geteeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + u8 eebuf[SMC911X_EEPROM_LEN]; + int i, ret; + + for(i=0;ioffset, eeprom->len); + return 0; +} + +static int smc911x_ethtool_seteeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + int i, ret; + + /* Enable erase */ + if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_EWEN_, 0 ))!=0) + return ret; + for(i=eeprom->offset;i<(eeprom->offset+eeprom->len);i++) { + /* erase byte */ + if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_ERASE_, i ))!=0) + return ret; + /* write byte */ + if ((ret=smc911x_ethtool_write_eeprom_byte(dev, *data))!=0) + return ret; + if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_WRITE_, i ))!=0) + return ret; + } + return 0; +} + +static int smc911x_ethtool_geteeprom_len(struct net_device *dev) +{ + return SMC911X_EEPROM_LEN; +} + +static const struct ethtool_ops smc911x_ethtool_ops = { + .get_settings = smc911x_ethtool_getsettings, + .set_settings = smc911x_ethtool_setsettings, + .get_drvinfo = smc911x_ethtool_getdrvinfo, + .get_msglevel = smc911x_ethtool_getmsglevel, + .set_msglevel = smc911x_ethtool_setmsglevel, + .nway_reset = smc911x_ethtool_nwayreset, + .get_link = ethtool_op_get_link, + .get_regs_len = smc911x_ethtool_getregslen, + .get_regs = smc911x_ethtool_getregs, + .get_eeprom_len = smc911x_ethtool_geteeprom_len, + .get_eeprom = smc911x_ethtool_geteeprom, + .set_eeprom = smc911x_ethtool_seteeprom, +}; + +/* + * smc911x_findirq + * + * This routine has a simple purpose -- make the SMC chip generate an + * interrupt, so an auto-detect routine can detect it, and find the IRQ, + */ +static int __devinit smc911x_findirq(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + int timeout = 20; + unsigned long cookie; + + DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__); + + cookie = probe_irq_on(); + + /* + * Force a SW interrupt + */ + + SMC_SET_INT_EN(lp, INT_EN_SW_INT_EN_); + + /* + * Wait until positive that the interrupt has been generated + */ + do { + int int_status; + udelay(10); + int_status = SMC_GET_INT_EN(lp); + if (int_status & INT_EN_SW_INT_EN_) + break; /* got the interrupt */ + } while (--timeout); + + /* + * there is really nothing that I can do here if timeout fails, + * as autoirq_report will return a 0 anyway, which is what I + * want in this case. Plus, the clean up is needed in both + * cases. + */ + + /* and disable all interrupts again */ + SMC_SET_INT_EN(lp, 0); + + /* and return what I found */ + return probe_irq_off(cookie); +} + +static const struct net_device_ops smc911x_netdev_ops = { + .ndo_open = smc911x_open, + .ndo_stop = smc911x_close, + .ndo_start_xmit = smc911x_hard_start_xmit, + .ndo_tx_timeout = smc911x_timeout, + .ndo_set_multicast_list = smc911x_set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = smc911x_poll_controller, +#endif +}; + +/* + * Function: smc911x_probe(unsigned long ioaddr) + * + * Purpose: + * Tests to see if a given ioaddr points to an SMC911x chip. + * Returns a 0 on success + * + * Algorithm: + * (1) see if the endian word is OK + * (1) see if I recognize the chip ID in the appropriate register + * + * Here I do typical initialization tasks. + * + * o Initialize the structure if needed + * o print out my vanity message if not done so already + * o print out what type of hardware is detected + * o print out the ethernet address + * o find the IRQ + * o set up my private data + * o configure the dev structure with my subroutines + * o actually GRAB the irq. + * o GRAB the region + */ +static int __devinit smc911x_probe(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + int i, retval; + unsigned int val, chip_id, revision; + const char *version_string; + unsigned long irq_flags; + + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); + + /* First, see if the endian word is recognized */ + val = SMC_GET_BYTE_TEST(lp); + DBG(SMC_DEBUG_MISC, "%s: endian probe returned 0x%04x\n", CARDNAME, val); + if (val != 0x87654321) { + printk(KERN_ERR "Invalid chip endian 0x%08x\n",val); + retval = -ENODEV; + goto err_out; + } + + /* + * check if the revision register is something that I + * recognize. These might need to be added to later, + * as future revisions could be added. + */ + chip_id = SMC_GET_PN(lp); + DBG(SMC_DEBUG_MISC, "%s: id probe returned 0x%04x\n", CARDNAME, chip_id); + for(i=0;chip_ids[i].id != 0; i++) { + if (chip_ids[i].id == chip_id) break; + } + if (!chip_ids[i].id) { + printk(KERN_ERR "Unknown chip ID %04x\n", chip_id); + retval = -ENODEV; + goto err_out; + } + version_string = chip_ids[i].name; + + revision = SMC_GET_REV(lp); + DBG(SMC_DEBUG_MISC, "%s: revision = 0x%04x\n", CARDNAME, revision); + + /* At this point I'll assume that the chip is an SMC911x. */ + DBG(SMC_DEBUG_MISC, "%s: Found a %s\n", CARDNAME, chip_ids[i].name); + + /* Validate the TX FIFO size requested */ + if ((tx_fifo_kb < 2) || (tx_fifo_kb > 14)) { + printk(KERN_ERR "Invalid TX FIFO size requested %d\n", tx_fifo_kb); + retval = -EINVAL; + goto err_out; + } + + /* fill in some of the fields */ + lp->version = chip_ids[i].id; + lp->revision = revision; + lp->tx_fifo_kb = tx_fifo_kb; + /* Reverse calculate the RX FIFO size from the TX */ + lp->tx_fifo_size=(lp->tx_fifo_kb<<10) - 512; + lp->rx_fifo_size= ((0x4000 - 512 - lp->tx_fifo_size) / 16) * 15; + + /* Set the automatic flow control values */ + switch(lp->tx_fifo_kb) { + /* + * AFC_HI is about ((Rx Data Fifo Size)*2/3)/64 + * AFC_LO is AFC_HI/2 + * BACK_DUR is about 5uS*(AFC_LO) rounded down + */ + case 2:/* 13440 Rx Data Fifo Size */ + lp->afc_cfg=0x008C46AF;break; + case 3:/* 12480 Rx Data Fifo Size */ + lp->afc_cfg=0x0082419F;break; + case 4:/* 11520 Rx Data Fifo Size */ + lp->afc_cfg=0x00783C9F;break; + case 5:/* 10560 Rx Data Fifo Size */ + lp->afc_cfg=0x006E374F;break; + case 6:/* 9600 Rx Data Fifo Size */ + lp->afc_cfg=0x0064328F;break; + case 7:/* 8640 Rx Data Fifo Size */ + lp->afc_cfg=0x005A2D7F;break; + case 8:/* 7680 Rx Data Fifo Size */ + lp->afc_cfg=0x0050287F;break; + case 9:/* 6720 Rx Data Fifo Size */ + lp->afc_cfg=0x0046236F;break; + case 10:/* 5760 Rx Data Fifo Size */ + lp->afc_cfg=0x003C1E6F;break; + case 11:/* 4800 Rx Data Fifo Size */ + lp->afc_cfg=0x0032195F;break; + /* + * AFC_HI is ~1520 bytes less than RX Data Fifo Size + * AFC_LO is AFC_HI/2 + * BACK_DUR is about 5uS*(AFC_LO) rounded down + */ + case 12:/* 3840 Rx Data Fifo Size */ + lp->afc_cfg=0x0024124F;break; + case 13:/* 2880 Rx Data Fifo Size */ + lp->afc_cfg=0x0015073F;break; + case 14:/* 1920 Rx Data Fifo Size */ + lp->afc_cfg=0x0006032F;break; + default: + PRINTK("%s: ERROR -- no AFC_CFG setting found", + dev->name); + break; + } + + DBG(SMC_DEBUG_MISC | SMC_DEBUG_TX | SMC_DEBUG_RX, + "%s: tx_fifo %d rx_fifo %d afc_cfg 0x%08x\n", CARDNAME, + lp->tx_fifo_size, lp->rx_fifo_size, lp->afc_cfg); + + spin_lock_init(&lp->lock); + + /* Get the MAC address */ + SMC_GET_MAC_ADDR(lp, dev->dev_addr); + + /* now, reset the chip, and put it into a known state */ + smc911x_reset(dev); + + /* + * If dev->irq is 0, then the device has to be banged on to see + * what the IRQ is. + * + * Specifying an IRQ is done with the assumption that the user knows + * what (s)he is doing. No checking is done!!!! + */ + if (dev->irq < 1) { + int trials; + + trials = 3; + while (trials--) { + dev->irq = smc911x_findirq(dev); + if (dev->irq) + break; + /* kick the card and try again */ + smc911x_reset(dev); + } + } + if (dev->irq == 0) { + printk("%s: Couldn't autodetect your IRQ. Use irq=xx.\n", + dev->name); + retval = -ENODEV; + goto err_out; + } + dev->irq = irq_canonicalize(dev->irq); + + /* Fill in the fields of the device structure with ethernet values. */ + ether_setup(dev); + + dev->netdev_ops = &smc911x_netdev_ops; + dev->watchdog_timeo = msecs_to_jiffies(watchdog); + dev->ethtool_ops = &smc911x_ethtool_ops; + + INIT_WORK(&lp->phy_configure, smc911x_phy_configure); + lp->mii.phy_id_mask = 0x1f; + lp->mii.reg_num_mask = 0x1f; + lp->mii.force_media = 0; + lp->mii.full_duplex = 0; + lp->mii.dev = dev; + lp->mii.mdio_read = smc911x_phy_read; + lp->mii.mdio_write = smc911x_phy_write; + + /* + * Locate the phy, if any. + */ + smc911x_phy_detect(dev); + + /* Set default parameters */ + lp->msg_enable = NETIF_MSG_LINK; + lp->ctl_rfduplx = 1; + lp->ctl_rspeed = 100; + +#ifdef SMC_DYNAMIC_BUS_CONFIG + irq_flags = lp->cfg.irq_flags; +#else + irq_flags = IRQF_SHARED | SMC_IRQ_SENSE; +#endif + + /* Grab the IRQ */ + retval = request_irq(dev->irq, smc911x_interrupt, + irq_flags, dev->name, dev); + if (retval) + goto err_out; + +#ifdef SMC_USE_DMA + lp->rxdma = SMC_DMA_REQUEST(dev, smc911x_rx_dma_irq); + lp->txdma = SMC_DMA_REQUEST(dev, smc911x_tx_dma_irq); + lp->rxdma_active = 0; + lp->txdma_active = 0; + dev->dma = lp->rxdma; +#endif + + retval = register_netdev(dev); + if (retval == 0) { + /* now, print out the card info, in a short format.. */ + printk("%s: %s (rev %d) at %#lx IRQ %d", + dev->name, version_string, lp->revision, + dev->base_addr, dev->irq); + +#ifdef SMC_USE_DMA + if (lp->rxdma != -1) + printk(" RXDMA %d ", lp->rxdma); + + if (lp->txdma != -1) + printk("TXDMA %d", lp->txdma); +#endif + printk("\n"); + if (!is_valid_ether_addr(dev->dev_addr)) { + printk("%s: Invalid ethernet MAC address. Please " + "set using ifconfig\n", dev->name); + } else { + /* Print the Ethernet address */ + printk("%s: Ethernet addr: %pM\n", + dev->name, dev->dev_addr); + } + + if (lp->phy_type == 0) { + PRINTK("%s: No PHY found\n", dev->name); + } else if ((lp->phy_type & ~0xff) == LAN911X_INTERNAL_PHY_ID) { + PRINTK("%s: LAN911x Internal PHY\n", dev->name); + } else { + PRINTK("%s: External PHY 0x%08x\n", dev->name, lp->phy_type); + } + } + +err_out: +#ifdef SMC_USE_DMA + if (retval) { + if (lp->rxdma != -1) { + SMC_DMA_FREE(dev, lp->rxdma); + } + if (lp->txdma != -1) { + SMC_DMA_FREE(dev, lp->txdma); + } + } +#endif + return retval; +} + +/* + * smc911x_init(void) + * + * Output: + * 0 --> there is a device + * anything else, error + */ +static int __devinit smc911x_drv_probe(struct platform_device *pdev) +{ + struct net_device *ndev; + struct resource *res; + struct smc911x_local *lp; + unsigned int *addr; + int ret; + + DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + ret = -ENODEV; + goto out; + } + + /* + * Request the regions. + */ + if (!request_mem_region(res->start, SMC911X_IO_EXTENT, CARDNAME)) { + ret = -EBUSY; + goto out; + } + + ndev = alloc_etherdev(sizeof(struct smc911x_local)); + if (!ndev) { + printk("%s: could not allocate device.\n", CARDNAME); + ret = -ENOMEM; + goto release_1; + } + SET_NETDEV_DEV(ndev, &pdev->dev); + + ndev->dma = (unsigned char)-1; + ndev->irq = platform_get_irq(pdev, 0); + lp = netdev_priv(ndev); + lp->netdev = ndev; +#ifdef SMC_DYNAMIC_BUS_CONFIG + { + struct smc911x_platdata *pd = pdev->dev.platform_data; + if (!pd) { + ret = -EINVAL; + goto release_both; + } + memcpy(&lp->cfg, pd, sizeof(lp->cfg)); + } +#endif + + addr = ioremap(res->start, SMC911X_IO_EXTENT); + if (!addr) { + ret = -ENOMEM; + goto release_both; + } + + platform_set_drvdata(pdev, ndev); + lp->base = addr; + ndev->base_addr = res->start; + ret = smc911x_probe(ndev); + if (ret != 0) { + platform_set_drvdata(pdev, NULL); + iounmap(addr); +release_both: + free_netdev(ndev); +release_1: + release_mem_region(res->start, SMC911X_IO_EXTENT); +out: + printk("%s: not found (%d).\n", CARDNAME, ret); + } +#ifdef SMC_USE_DMA + else { + lp->physaddr = res->start; + lp->dev = &pdev->dev; + } +#endif + + return ret; +} + +static int __devexit smc911x_drv_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct smc911x_local *lp = netdev_priv(ndev); + struct resource *res; + + DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__); + platform_set_drvdata(pdev, NULL); + + unregister_netdev(ndev); + + free_irq(ndev->irq, ndev); + +#ifdef SMC_USE_DMA + { + if (lp->rxdma != -1) { + SMC_DMA_FREE(dev, lp->rxdma); + } + if (lp->txdma != -1) { + SMC_DMA_FREE(dev, lp->txdma); + } + } +#endif + iounmap(lp->base); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, SMC911X_IO_EXTENT); + + free_netdev(ndev); + return 0; +} + +static int smc911x_drv_suspend(struct platform_device *dev, pm_message_t state) +{ + struct net_device *ndev = platform_get_drvdata(dev); + struct smc911x_local *lp = netdev_priv(ndev); + + DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__); + if (ndev) { + if (netif_running(ndev)) { + netif_device_detach(ndev); + smc911x_shutdown(ndev); +#if POWER_DOWN + /* Set D2 - Energy detect only setting */ + SMC_SET_PMT_CTRL(lp, 2<<12); +#endif + } + } + return 0; +} + +static int smc911x_drv_resume(struct platform_device *dev) +{ + struct net_device *ndev = platform_get_drvdata(dev); + + DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__); + if (ndev) { + struct smc911x_local *lp = netdev_priv(ndev); + + if (netif_running(ndev)) { + smc911x_reset(ndev); + if (lp->phy_type != 0) + smc911x_phy_configure(&lp->phy_configure); + smc911x_enable(ndev); + netif_device_attach(ndev); + } + } + return 0; +} + +static struct platform_driver smc911x_driver = { + .probe = smc911x_drv_probe, + .remove = __devexit_p(smc911x_drv_remove), + .suspend = smc911x_drv_suspend, + .resume = smc911x_drv_resume, + .driver = { + .name = CARDNAME, + .owner = THIS_MODULE, + }, +}; + +static int __init smc911x_init(void) +{ + return platform_driver_register(&smc911x_driver); +} + +static void __exit smc911x_cleanup(void) +{ + platform_driver_unregister(&smc911x_driver); +} + +module_init(smc911x_init); +module_exit(smc911x_cleanup); diff --git a/drivers/net/ethernet/smsc/smc911x.h b/drivers/net/ethernet/smsc/smc911x.h new file mode 100644 index 0000000..3269292 --- /dev/null +++ b/drivers/net/ethernet/smsc/smc911x.h @@ -0,0 +1,924 @@ +/*------------------------------------------------------------------------ + . smc911x.h - macros for SMSC's LAN911{5,6,7,8} single-chip Ethernet device. + . + . Copyright (C) 2005 Sensoria Corp. + . Derived from the unified SMC91x driver by Nicolas Pitre + . + . This program is free software; you can redistribute it and/or modify + . it under the terms of the GNU General Public License as published by + . the Free Software Foundation; either version 2 of the License, or + . (at your option) any later version. + . + . This program is distributed in the hope that it will be useful, + . but WITHOUT ANY WARRANTY; without even the implied warranty of + . MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + . GNU General Public License for more details. + . + . You should have received a copy of the GNU General Public License + . along with this program; if not, write to the Free Software + . Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + . + . Information contained in this file was obtained from the LAN9118 + . manual from SMC. To get a copy, if you really want one, you can find + . information under www.smsc.com. + . + . Authors + . Dustin McIntire + . + ---------------------------------------------------------------------------*/ +#ifndef _SMC911X_H_ +#define _SMC911X_H_ + +#include +/* + * Use the DMA feature on PXA chips + */ +#ifdef CONFIG_ARCH_PXA + #define SMC_USE_PXA_DMA 1 + #define SMC_USE_16BIT 0 + #define SMC_USE_32BIT 1 + #define SMC_IRQ_SENSE IRQF_TRIGGER_FALLING +#elif defined(CONFIG_SH_MAGIC_PANEL_R2) + #define SMC_USE_16BIT 0 + #define SMC_USE_32BIT 1 + #define SMC_IRQ_SENSE IRQF_TRIGGER_LOW +#elif defined(CONFIG_ARCH_OMAP3) + #define SMC_USE_16BIT 0 + #define SMC_USE_32BIT 1 + #define SMC_IRQ_SENSE IRQF_TRIGGER_LOW + #define SMC_MEM_RESERVED 1 +#elif defined(CONFIG_ARCH_OMAP2) + #define SMC_USE_16BIT 0 + #define SMC_USE_32BIT 1 + #define SMC_IRQ_SENSE IRQF_TRIGGER_LOW + #define SMC_MEM_RESERVED 1 +#else +/* + * Default configuration + */ + +#define SMC_DYNAMIC_BUS_CONFIG +#endif + +#ifdef SMC_USE_PXA_DMA +#define SMC_USE_DMA +#endif + +/* store this information for the driver.. */ +struct smc911x_local { + /* + * If I have to wait until the DMA is finished and ready to reload a + * packet, I will store the skbuff here. Then, the DMA will send it + * out and free it. + */ + struct sk_buff *pending_tx_skb; + + /* version/revision of the SMC911x chip */ + u16 version; + u16 revision; + + /* FIFO sizes */ + int tx_fifo_kb; + int tx_fifo_size; + int rx_fifo_size; + int afc_cfg; + + /* Contains the current active receive/phy mode */ + int ctl_rfduplx; + int ctl_rspeed; + + u32 msg_enable; + u32 phy_type; + struct mii_if_info mii; + + /* work queue */ + struct work_struct phy_configure; + + int tx_throttle; + spinlock_t lock; + + struct net_device *netdev; + +#ifdef SMC_USE_DMA + /* DMA needs the physical address of the chip */ + u_long physaddr; + int rxdma; + int txdma; + int rxdma_active; + int txdma_active; + struct sk_buff *current_rx_skb; + struct sk_buff *current_tx_skb; + struct device *dev; +#endif + void __iomem *base; +#ifdef SMC_DYNAMIC_BUS_CONFIG + struct smc911x_platdata cfg; +#endif +}; + +/* + * Define the bus width specific IO macros + */ + +#ifdef SMC_DYNAMIC_BUS_CONFIG +static inline unsigned int SMC_inl(struct smc911x_local *lp, int reg) +{ + void __iomem *ioaddr = lp->base + reg; + + if (lp->cfg.flags & SMC911X_USE_32BIT) + return readl(ioaddr); + + if (lp->cfg.flags & SMC911X_USE_16BIT) + return readw(ioaddr) | (readw(ioaddr + 2) << 16); + + BUG(); +} + +static inline void SMC_outl(unsigned int value, struct smc911x_local *lp, + int reg) +{ + void __iomem *ioaddr = lp->base + reg; + + if (lp->cfg.flags & SMC911X_USE_32BIT) { + writel(value, ioaddr); + return; + } + + if (lp->cfg.flags & SMC911X_USE_16BIT) { + writew(value & 0xffff, ioaddr); + writew(value >> 16, ioaddr + 2); + return; + } + + BUG(); +} + +static inline void SMC_insl(struct smc911x_local *lp, int reg, + void *addr, unsigned int count) +{ + void __iomem *ioaddr = lp->base + reg; + + if (lp->cfg.flags & SMC911X_USE_32BIT) { + readsl(ioaddr, addr, count); + return; + } + + if (lp->cfg.flags & SMC911X_USE_16BIT) { + readsw(ioaddr, addr, count * 2); + return; + } + + BUG(); +} + +static inline void SMC_outsl(struct smc911x_local *lp, int reg, + void *addr, unsigned int count) +{ + void __iomem *ioaddr = lp->base + reg; + + if (lp->cfg.flags & SMC911X_USE_32BIT) { + writesl(ioaddr, addr, count); + return; + } + + if (lp->cfg.flags & SMC911X_USE_16BIT) { + writesw(ioaddr, addr, count * 2); + return; + } + + BUG(); +} +#else +#if SMC_USE_16BIT +#define SMC_inl(lp, r) ((readw((lp)->base + (r)) & 0xFFFF) + (readw((lp)->base + (r) + 2) << 16)) +#define SMC_outl(v, lp, r) \ + do{ \ + writew(v & 0xFFFF, (lp)->base + (r)); \ + writew(v >> 16, (lp)->base + (r) + 2); \ + } while (0) +#define SMC_insl(lp, r, p, l) readsw((short*)((lp)->base + (r)), p, l*2) +#define SMC_outsl(lp, r, p, l) writesw((short*)((lp)->base + (r)), p, l*2) + +#elif SMC_USE_32BIT +#define SMC_inl(lp, r) readl((lp)->base + (r)) +#define SMC_outl(v, lp, r) writel(v, (lp)->base + (r)) +#define SMC_insl(lp, r, p, l) readsl((int*)((lp)->base + (r)), p, l) +#define SMC_outsl(lp, r, p, l) writesl((int*)((lp)->base + (r)), p, l) + +#endif /* SMC_USE_16BIT */ +#endif /* SMC_DYNAMIC_BUS_CONFIG */ + + +#ifdef SMC_USE_PXA_DMA + +#include + +/* + * Define the request and free functions + * These are unfortunately architecture specific as no generic allocation + * mechanism exits + */ +#define SMC_DMA_REQUEST(dev, handler) \ + pxa_request_dma(dev->name, DMA_PRIO_LOW, handler, dev) + +#define SMC_DMA_FREE(dev, dma) \ + pxa_free_dma(dma) + +#define SMC_DMA_ACK_IRQ(dev, dma) \ +{ \ + if (DCSR(dma) & DCSR_BUSERR) { \ + printk("%s: DMA %d bus error!\n", dev->name, dma); \ + } \ + DCSR(dma) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR; \ +} + +/* + * Use a DMA for RX and TX packets. + */ +#include + +static dma_addr_t rx_dmabuf, tx_dmabuf; +static int rx_dmalen, tx_dmalen; + +#ifdef SMC_insl +#undef SMC_insl +#define SMC_insl(lp, r, p, l) \ + smc_pxa_dma_insl(lp, lp->physaddr, r, lp->rxdma, p, l) + +static inline void +smc_pxa_dma_insl(struct smc911x_local *lp, u_long physaddr, + int reg, int dma, u_char *buf, int len) +{ + /* 64 bit alignment is required for memory to memory DMA */ + if ((long)buf & 4) { + *((u32 *)buf) = SMC_inl(lp, reg); + buf += 4; + len--; + } + + len *= 4; + rx_dmabuf = dma_map_single(lp->dev, buf, len, DMA_FROM_DEVICE); + rx_dmalen = len; + DCSR(dma) = DCSR_NODESC; + DTADR(dma) = rx_dmabuf; + DSADR(dma) = physaddr + reg; + DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 | + DCMD_WIDTH4 | DCMD_ENDIRQEN | (DCMD_LENGTH & rx_dmalen)); + DCSR(dma) = DCSR_NODESC | DCSR_RUN; +} +#endif + +#ifdef SMC_outsl +#undef SMC_outsl +#define SMC_outsl(lp, r, p, l) \ + smc_pxa_dma_outsl(lp, lp->physaddr, r, lp->txdma, p, l) + +static inline void +smc_pxa_dma_outsl(struct smc911x_local *lp, u_long physaddr, + int reg, int dma, u_char *buf, int len) +{ + /* 64 bit alignment is required for memory to memory DMA */ + if ((long)buf & 4) { + SMC_outl(*((u32 *)buf), lp, reg); + buf += 4; + len--; + } + + len *= 4; + tx_dmabuf = dma_map_single(lp->dev, buf, len, DMA_TO_DEVICE); + tx_dmalen = len; + DCSR(dma) = DCSR_NODESC; + DSADR(dma) = tx_dmabuf; + DTADR(dma) = physaddr + reg; + DCMD(dma) = (DCMD_INCSRCADDR | DCMD_BURST32 | + DCMD_WIDTH4 | DCMD_ENDIRQEN | (DCMD_LENGTH & tx_dmalen)); + DCSR(dma) = DCSR_NODESC | DCSR_RUN; +} +#endif +#endif /* SMC_USE_PXA_DMA */ + + +/* Chip Parameters and Register Definitions */ + +#define SMC911X_TX_FIFO_LOW_THRESHOLD (1536*2) + +#define SMC911X_IO_EXTENT 0x100 + +#define SMC911X_EEPROM_LEN 7 + +/* Below are the register offsets and bit definitions + * of the Lan911x memory space + */ +#define RX_DATA_FIFO (0x00) + +#define TX_DATA_FIFO (0x20) +#define TX_CMD_A_INT_ON_COMP_ (0x80000000) +#define TX_CMD_A_INT_BUF_END_ALGN_ (0x03000000) +#define TX_CMD_A_INT_4_BYTE_ALGN_ (0x00000000) +#define TX_CMD_A_INT_16_BYTE_ALGN_ (0x01000000) +#define TX_CMD_A_INT_32_BYTE_ALGN_ (0x02000000) +#define TX_CMD_A_INT_DATA_OFFSET_ (0x001F0000) +#define TX_CMD_A_INT_FIRST_SEG_ (0x00002000) +#define TX_CMD_A_INT_LAST_SEG_ (0x00001000) +#define TX_CMD_A_BUF_SIZE_ (0x000007FF) +#define TX_CMD_B_PKT_TAG_ (0xFFFF0000) +#define TX_CMD_B_ADD_CRC_DISABLE_ (0x00002000) +#define TX_CMD_B_DISABLE_PADDING_ (0x00001000) +#define TX_CMD_B_PKT_BYTE_LENGTH_ (0x000007FF) + +#define RX_STATUS_FIFO (0x40) +#define RX_STS_PKT_LEN_ (0x3FFF0000) +#define RX_STS_ES_ (0x00008000) +#define RX_STS_BCST_ (0x00002000) +#define RX_STS_LEN_ERR_ (0x00001000) +#define RX_STS_RUNT_ERR_ (0x00000800) +#define RX_STS_MCAST_ (0x00000400) +#define RX_STS_TOO_LONG_ (0x00000080) +#define RX_STS_COLL_ (0x00000040) +#define RX_STS_ETH_TYPE_ (0x00000020) +#define RX_STS_WDOG_TMT_ (0x00000010) +#define RX_STS_MII_ERR_ (0x00000008) +#define RX_STS_DRIBBLING_ (0x00000004) +#define RX_STS_CRC_ERR_ (0x00000002) +#define RX_STATUS_FIFO_PEEK (0x44) +#define TX_STATUS_FIFO (0x48) +#define TX_STS_TAG_ (0xFFFF0000) +#define TX_STS_ES_ (0x00008000) +#define TX_STS_LOC_ (0x00000800) +#define TX_STS_NO_CARR_ (0x00000400) +#define TX_STS_LATE_COLL_ (0x00000200) +#define TX_STS_MANY_COLL_ (0x00000100) +#define TX_STS_COLL_CNT_ (0x00000078) +#define TX_STS_MANY_DEFER_ (0x00000004) +#define TX_STS_UNDERRUN_ (0x00000002) +#define TX_STS_DEFERRED_ (0x00000001) +#define TX_STATUS_FIFO_PEEK (0x4C) +#define ID_REV (0x50) +#define ID_REV_CHIP_ID_ (0xFFFF0000) /* RO */ +#define ID_REV_REV_ID_ (0x0000FFFF) /* RO */ + +#define INT_CFG (0x54) +#define INT_CFG_INT_DEAS_ (0xFF000000) /* R/W */ +#define INT_CFG_INT_DEAS_CLR_ (0x00004000) +#define INT_CFG_INT_DEAS_STS_ (0x00002000) +#define INT_CFG_IRQ_INT_ (0x00001000) /* RO */ +#define INT_CFG_IRQ_EN_ (0x00000100) /* R/W */ +#define INT_CFG_IRQ_POL_ (0x00000010) /* R/W Not Affected by SW Reset */ +#define INT_CFG_IRQ_TYPE_ (0x00000001) /* R/W Not Affected by SW Reset */ + +#define INT_STS (0x58) +#define INT_STS_SW_INT_ (0x80000000) /* R/WC */ +#define INT_STS_TXSTOP_INT_ (0x02000000) /* R/WC */ +#define INT_STS_RXSTOP_INT_ (0x01000000) /* R/WC */ +#define INT_STS_RXDFH_INT_ (0x00800000) /* R/WC */ +#define INT_STS_RXDF_INT_ (0x00400000) /* R/WC */ +#define INT_STS_TX_IOC_ (0x00200000) /* R/WC */ +#define INT_STS_RXD_INT_ (0x00100000) /* R/WC */ +#define INT_STS_GPT_INT_ (0x00080000) /* R/WC */ +#define INT_STS_PHY_INT_ (0x00040000) /* RO */ +#define INT_STS_PME_INT_ (0x00020000) /* R/WC */ +#define INT_STS_TXSO_ (0x00010000) /* R/WC */ +#define INT_STS_RWT_ (0x00008000) /* R/WC */ +#define INT_STS_RXE_ (0x00004000) /* R/WC */ +#define INT_STS_TXE_ (0x00002000) /* R/WC */ +//#define INT_STS_ERX_ (0x00001000) /* R/WC */ +#define INT_STS_TDFU_ (0x00000800) /* R/WC */ +#define INT_STS_TDFO_ (0x00000400) /* R/WC */ +#define INT_STS_TDFA_ (0x00000200) /* R/WC */ +#define INT_STS_TSFF_ (0x00000100) /* R/WC */ +#define INT_STS_TSFL_ (0x00000080) /* R/WC */ +//#define INT_STS_RXDF_ (0x00000040) /* R/WC */ +#define INT_STS_RDFO_ (0x00000040) /* R/WC */ +#define INT_STS_RDFL_ (0x00000020) /* R/WC */ +#define INT_STS_RSFF_ (0x00000010) /* R/WC */ +#define INT_STS_RSFL_ (0x00000008) /* R/WC */ +#define INT_STS_GPIO2_INT_ (0x00000004) /* R/WC */ +#define INT_STS_GPIO1_INT_ (0x00000002) /* R/WC */ +#define INT_STS_GPIO0_INT_ (0x00000001) /* R/WC */ + +#define INT_EN (0x5C) +#define INT_EN_SW_INT_EN_ (0x80000000) /* R/W */ +#define INT_EN_TXSTOP_INT_EN_ (0x02000000) /* R/W */ +#define INT_EN_RXSTOP_INT_EN_ (0x01000000) /* R/W */ +#define INT_EN_RXDFH_INT_EN_ (0x00800000) /* R/W */ +//#define INT_EN_RXDF_INT_EN_ (0x00400000) /* R/W */ +#define INT_EN_TIOC_INT_EN_ (0x00200000) /* R/W */ +#define INT_EN_RXD_INT_EN_ (0x00100000) /* R/W */ +#define INT_EN_GPT_INT_EN_ (0x00080000) /* R/W */ +#define INT_EN_PHY_INT_EN_ (0x00040000) /* R/W */ +#define INT_EN_PME_INT_EN_ (0x00020000) /* R/W */ +#define INT_EN_TXSO_EN_ (0x00010000) /* R/W */ +#define INT_EN_RWT_EN_ (0x00008000) /* R/W */ +#define INT_EN_RXE_EN_ (0x00004000) /* R/W */ +#define INT_EN_TXE_EN_ (0x00002000) /* R/W */ +//#define INT_EN_ERX_EN_ (0x00001000) /* R/W */ +#define INT_EN_TDFU_EN_ (0x00000800) /* R/W */ +#define INT_EN_TDFO_EN_ (0x00000400) /* R/W */ +#define INT_EN_TDFA_EN_ (0x00000200) /* R/W */ +#define INT_EN_TSFF_EN_ (0x00000100) /* R/W */ +#define INT_EN_TSFL_EN_ (0x00000080) /* R/W */ +//#define INT_EN_RXDF_EN_ (0x00000040) /* R/W */ +#define INT_EN_RDFO_EN_ (0x00000040) /* R/W */ +#define INT_EN_RDFL_EN_ (0x00000020) /* R/W */ +#define INT_EN_RSFF_EN_ (0x00000010) /* R/W */ +#define INT_EN_RSFL_EN_ (0x00000008) /* R/W */ +#define INT_EN_GPIO2_INT_ (0x00000004) /* R/W */ +#define INT_EN_GPIO1_INT_ (0x00000002) /* R/W */ +#define INT_EN_GPIO0_INT_ (0x00000001) /* R/W */ + +#define BYTE_TEST (0x64) +#define FIFO_INT (0x68) +#define FIFO_INT_TX_AVAIL_LEVEL_ (0xFF000000) /* R/W */ +#define FIFO_INT_TX_STS_LEVEL_ (0x00FF0000) /* R/W */ +#define FIFO_INT_RX_AVAIL_LEVEL_ (0x0000FF00) /* R/W */ +#define FIFO_INT_RX_STS_LEVEL_ (0x000000FF) /* R/W */ + +#define RX_CFG (0x6C) +#define RX_CFG_RX_END_ALGN_ (0xC0000000) /* R/W */ +#define RX_CFG_RX_END_ALGN4_ (0x00000000) /* R/W */ +#define RX_CFG_RX_END_ALGN16_ (0x40000000) /* R/W */ +#define RX_CFG_RX_END_ALGN32_ (0x80000000) /* R/W */ +#define RX_CFG_RX_DMA_CNT_ (0x0FFF0000) /* R/W */ +#define RX_CFG_RX_DUMP_ (0x00008000) /* R/W */ +#define RX_CFG_RXDOFF_ (0x00001F00) /* R/W */ +//#define RX_CFG_RXBAD_ (0x00000001) /* R/W */ + +#define TX_CFG (0x70) +//#define TX_CFG_TX_DMA_LVL_ (0xE0000000) /* R/W */ +//#define TX_CFG_TX_DMA_CNT_ (0x0FFF0000) /* R/W Self Clearing */ +#define TX_CFG_TXS_DUMP_ (0x00008000) /* Self Clearing */ +#define TX_CFG_TXD_DUMP_ (0x00004000) /* Self Clearing */ +#define TX_CFG_TXSAO_ (0x00000004) /* R/W */ +#define TX_CFG_TX_ON_ (0x00000002) /* R/W */ +#define TX_CFG_STOP_TX_ (0x00000001) /* Self Clearing */ + +#define HW_CFG (0x74) +#define HW_CFG_TTM_ (0x00200000) /* R/W */ +#define HW_CFG_SF_ (0x00100000) /* R/W */ +#define HW_CFG_TX_FIF_SZ_ (0x000F0000) /* R/W */ +#define HW_CFG_TR_ (0x00003000) /* R/W */ +#define HW_CFG_PHY_CLK_SEL_ (0x00000060) /* R/W */ +#define HW_CFG_PHY_CLK_SEL_INT_PHY_ (0x00000000) /* R/W */ +#define HW_CFG_PHY_CLK_SEL_EXT_PHY_ (0x00000020) /* R/W */ +#define HW_CFG_PHY_CLK_SEL_CLK_DIS_ (0x00000040) /* R/W */ +#define HW_CFG_SMI_SEL_ (0x00000010) /* R/W */ +#define HW_CFG_EXT_PHY_DET_ (0x00000008) /* RO */ +#define HW_CFG_EXT_PHY_EN_ (0x00000004) /* R/W */ +#define HW_CFG_32_16_BIT_MODE_ (0x00000004) /* RO */ +#define HW_CFG_SRST_TO_ (0x00000002) /* RO */ +#define HW_CFG_SRST_ (0x00000001) /* Self Clearing */ + +#define RX_DP_CTRL (0x78) +#define RX_DP_CTRL_RX_FFWD_ (0x80000000) /* R/W */ +#define RX_DP_CTRL_FFWD_BUSY_ (0x80000000) /* RO */ + +#define RX_FIFO_INF (0x7C) +#define RX_FIFO_INF_RXSUSED_ (0x00FF0000) /* RO */ +#define RX_FIFO_INF_RXDUSED_ (0x0000FFFF) /* RO */ + +#define TX_FIFO_INF (0x80) +#define TX_FIFO_INF_TSUSED_ (0x00FF0000) /* RO */ +#define TX_FIFO_INF_TDFREE_ (0x0000FFFF) /* RO */ + +#define PMT_CTRL (0x84) +#define PMT_CTRL_PM_MODE_ (0x00003000) /* Self Clearing */ +#define PMT_CTRL_PHY_RST_ (0x00000400) /* Self Clearing */ +#define PMT_CTRL_WOL_EN_ (0x00000200) /* R/W */ +#define PMT_CTRL_ED_EN_ (0x00000100) /* R/W */ +#define PMT_CTRL_PME_TYPE_ (0x00000040) /* R/W Not Affected by SW Reset */ +#define PMT_CTRL_WUPS_ (0x00000030) /* R/WC */ +#define PMT_CTRL_WUPS_NOWAKE_ (0x00000000) /* R/WC */ +#define PMT_CTRL_WUPS_ED_ (0x00000010) /* R/WC */ +#define PMT_CTRL_WUPS_WOL_ (0x00000020) /* R/WC */ +#define PMT_CTRL_WUPS_MULTI_ (0x00000030) /* R/WC */ +#define PMT_CTRL_PME_IND_ (0x00000008) /* R/W */ +#define PMT_CTRL_PME_POL_ (0x00000004) /* R/W */ +#define PMT_CTRL_PME_EN_ (0x00000002) /* R/W Not Affected by SW Reset */ +#define PMT_CTRL_READY_ (0x00000001) /* RO */ + +#define GPIO_CFG (0x88) +#define GPIO_CFG_LED3_EN_ (0x40000000) /* R/W */ +#define GPIO_CFG_LED2_EN_ (0x20000000) /* R/W */ +#define GPIO_CFG_LED1_EN_ (0x10000000) /* R/W */ +#define GPIO_CFG_GPIO2_INT_POL_ (0x04000000) /* R/W */ +#define GPIO_CFG_GPIO1_INT_POL_ (0x02000000) /* R/W */ +#define GPIO_CFG_GPIO0_INT_POL_ (0x01000000) /* R/W */ +#define GPIO_CFG_EEPR_EN_ (0x00700000) /* R/W */ +#define GPIO_CFG_GPIOBUF2_ (0x00040000) /* R/W */ +#define GPIO_CFG_GPIOBUF1_ (0x00020000) /* R/W */ +#define GPIO_CFG_GPIOBUF0_ (0x00010000) /* R/W */ +#define GPIO_CFG_GPIODIR2_ (0x00000400) /* R/W */ +#define GPIO_CFG_GPIODIR1_ (0x00000200) /* R/W */ +#define GPIO_CFG_GPIODIR0_ (0x00000100) /* R/W */ +#define GPIO_CFG_GPIOD4_ (0x00000010) /* R/W */ +#define GPIO_CFG_GPIOD3_ (0x00000008) /* R/W */ +#define GPIO_CFG_GPIOD2_ (0x00000004) /* R/W */ +#define GPIO_CFG_GPIOD1_ (0x00000002) /* R/W */ +#define GPIO_CFG_GPIOD0_ (0x00000001) /* R/W */ + +#define GPT_CFG (0x8C) +#define GPT_CFG_TIMER_EN_ (0x20000000) /* R/W */ +#define GPT_CFG_GPT_LOAD_ (0x0000FFFF) /* R/W */ + +#define GPT_CNT (0x90) +#define GPT_CNT_GPT_CNT_ (0x0000FFFF) /* RO */ + +#define ENDIAN (0x98) +#define FREE_RUN (0x9C) +#define RX_DROP (0xA0) +#define MAC_CSR_CMD (0xA4) +#define MAC_CSR_CMD_CSR_BUSY_ (0x80000000) /* Self Clearing */ +#define MAC_CSR_CMD_R_NOT_W_ (0x40000000) /* R/W */ +#define MAC_CSR_CMD_CSR_ADDR_ (0x000000FF) /* R/W */ + +#define MAC_CSR_DATA (0xA8) +#define AFC_CFG (0xAC) +#define AFC_CFG_AFC_HI_ (0x00FF0000) /* R/W */ +#define AFC_CFG_AFC_LO_ (0x0000FF00) /* R/W */ +#define AFC_CFG_BACK_DUR_ (0x000000F0) /* R/W */ +#define AFC_CFG_FCMULT_ (0x00000008) /* R/W */ +#define AFC_CFG_FCBRD_ (0x00000004) /* R/W */ +#define AFC_CFG_FCADD_ (0x00000002) /* R/W */ +#define AFC_CFG_FCANY_ (0x00000001) /* R/W */ + +#define E2P_CMD (0xB0) +#define E2P_CMD_EPC_BUSY_ (0x80000000) /* Self Clearing */ +#define E2P_CMD_EPC_CMD_ (0x70000000) /* R/W */ +#define E2P_CMD_EPC_CMD_READ_ (0x00000000) /* R/W */ +#define E2P_CMD_EPC_CMD_EWDS_ (0x10000000) /* R/W */ +#define E2P_CMD_EPC_CMD_EWEN_ (0x20000000) /* R/W */ +#define E2P_CMD_EPC_CMD_WRITE_ (0x30000000) /* R/W */ +#define E2P_CMD_EPC_CMD_WRAL_ (0x40000000) /* R/W */ +#define E2P_CMD_EPC_CMD_ERASE_ (0x50000000) /* R/W */ +#define E2P_CMD_EPC_CMD_ERAL_ (0x60000000) /* R/W */ +#define E2P_CMD_EPC_CMD_RELOAD_ (0x70000000) /* R/W */ +#define E2P_CMD_EPC_TIMEOUT_ (0x00000200) /* RO */ +#define E2P_CMD_MAC_ADDR_LOADED_ (0x00000100) /* RO */ +#define E2P_CMD_EPC_ADDR_ (0x000000FF) /* R/W */ + +#define E2P_DATA (0xB4) +#define E2P_DATA_EEPROM_DATA_ (0x000000FF) /* R/W */ +/* end of LAN register offsets and bit definitions */ + +/* + **************************************************************************** + **************************************************************************** + * MAC Control and Status Register (Indirect Address) + * Offset (through the MAC_CSR CMD and DATA port) + **************************************************************************** + **************************************************************************** + * + */ +#define MAC_CR (0x01) /* R/W */ + +/* MAC_CR - MAC Control Register */ +#define MAC_CR_RXALL_ (0x80000000) +// TODO: delete this bit? It is not described in the data sheet. +#define MAC_CR_HBDIS_ (0x10000000) +#define MAC_CR_RCVOWN_ (0x00800000) +#define MAC_CR_LOOPBK_ (0x00200000) +#define MAC_CR_FDPX_ (0x00100000) +#define MAC_CR_MCPAS_ (0x00080000) +#define MAC_CR_PRMS_ (0x00040000) +#define MAC_CR_INVFILT_ (0x00020000) +#define MAC_CR_PASSBAD_ (0x00010000) +#define MAC_CR_HFILT_ (0x00008000) +#define MAC_CR_HPFILT_ (0x00002000) +#define MAC_CR_LCOLL_ (0x00001000) +#define MAC_CR_BCAST_ (0x00000800) +#define MAC_CR_DISRTY_ (0x00000400) +#define MAC_CR_PADSTR_ (0x00000100) +#define MAC_CR_BOLMT_MASK_ (0x000000C0) +#define MAC_CR_DFCHK_ (0x00000020) +#define MAC_CR_TXEN_ (0x00000008) +#define MAC_CR_RXEN_ (0x00000004) + +#define ADDRH (0x02) /* R/W mask 0x0000FFFFUL */ +#define ADDRL (0x03) /* R/W mask 0xFFFFFFFFUL */ +#define HASHH (0x04) /* R/W */ +#define HASHL (0x05) /* R/W */ + +#define MII_ACC (0x06) /* R/W */ +#define MII_ACC_PHY_ADDR_ (0x0000F800) +#define MII_ACC_MIIRINDA_ (0x000007C0) +#define MII_ACC_MII_WRITE_ (0x00000002) +#define MII_ACC_MII_BUSY_ (0x00000001) + +#define MII_DATA (0x07) /* R/W mask 0x0000FFFFUL */ + +#define FLOW (0x08) /* R/W */ +#define FLOW_FCPT_ (0xFFFF0000) +#define FLOW_FCPASS_ (0x00000004) +#define FLOW_FCEN_ (0x00000002) +#define FLOW_FCBSY_ (0x00000001) + +#define VLAN1 (0x09) /* R/W mask 0x0000FFFFUL */ +#define VLAN1_VTI1_ (0x0000ffff) + +#define VLAN2 (0x0A) /* R/W mask 0x0000FFFFUL */ +#define VLAN2_VTI2_ (0x0000ffff) + +#define WUFF (0x0B) /* WO */ + +#define WUCSR (0x0C) /* R/W */ +#define WUCSR_GUE_ (0x00000200) +#define WUCSR_WUFR_ (0x00000040) +#define WUCSR_MPR_ (0x00000020) +#define WUCSR_WAKE_EN_ (0x00000004) +#define WUCSR_MPEN_ (0x00000002) + +/* + **************************************************************************** + * Chip Specific MII Defines + **************************************************************************** + * + * Phy register offsets and bit definitions + * + */ + +#define PHY_MODE_CTRL_STS ((u32)17) /* Mode Control/Status Register */ +//#define MODE_CTRL_STS_FASTRIP_ ((u16)0x4000) +#define MODE_CTRL_STS_EDPWRDOWN_ ((u16)0x2000) +//#define MODE_CTRL_STS_LOWSQEN_ ((u16)0x0800) +//#define MODE_CTRL_STS_MDPREBP_ ((u16)0x0400) +//#define MODE_CTRL_STS_FARLOOPBACK_ ((u16)0x0200) +//#define MODE_CTRL_STS_FASTEST_ ((u16)0x0100) +//#define MODE_CTRL_STS_REFCLKEN_ ((u16)0x0010) +//#define MODE_CTRL_STS_PHYADBP_ ((u16)0x0008) +//#define MODE_CTRL_STS_FORCE_G_LINK_ ((u16)0x0004) +#define MODE_CTRL_STS_ENERGYON_ ((u16)0x0002) + +#define PHY_INT_SRC ((u32)29) +#define PHY_INT_SRC_ENERGY_ON_ ((u16)0x0080) +#define PHY_INT_SRC_ANEG_COMP_ ((u16)0x0040) +#define PHY_INT_SRC_REMOTE_FAULT_ ((u16)0x0020) +#define PHY_INT_SRC_LINK_DOWN_ ((u16)0x0010) +#define PHY_INT_SRC_ANEG_LP_ACK_ ((u16)0x0008) +#define PHY_INT_SRC_PAR_DET_FAULT_ ((u16)0x0004) +#define PHY_INT_SRC_ANEG_PGRX_ ((u16)0x0002) + +#define PHY_INT_MASK ((u32)30) +#define PHY_INT_MASK_ENERGY_ON_ ((u16)0x0080) +#define PHY_INT_MASK_ANEG_COMP_ ((u16)0x0040) +#define PHY_INT_MASK_REMOTE_FAULT_ ((u16)0x0020) +#define PHY_INT_MASK_LINK_DOWN_ ((u16)0x0010) +#define PHY_INT_MASK_ANEG_LP_ACK_ ((u16)0x0008) +#define PHY_INT_MASK_PAR_DET_FAULT_ ((u16)0x0004) +#define PHY_INT_MASK_ANEG_PGRX_ ((u16)0x0002) + +#define PHY_SPECIAL ((u32)31) +#define PHY_SPECIAL_ANEG_DONE_ ((u16)0x1000) +#define PHY_SPECIAL_RES_ ((u16)0x0040) +#define PHY_SPECIAL_RES_MASK_ ((u16)0x0FE1) +#define PHY_SPECIAL_SPD_ ((u16)0x001C) +#define PHY_SPECIAL_SPD_10HALF_ ((u16)0x0004) +#define PHY_SPECIAL_SPD_10FULL_ ((u16)0x0014) +#define PHY_SPECIAL_SPD_100HALF_ ((u16)0x0008) +#define PHY_SPECIAL_SPD_100FULL_ ((u16)0x0018) + +#define LAN911X_INTERNAL_PHY_ID (0x0007C000) + +/* Chip ID values */ +#define CHIP_9115 0x0115 +#define CHIP_9116 0x0116 +#define CHIP_9117 0x0117 +#define CHIP_9118 0x0118 +#define CHIP_9211 0x9211 +#define CHIP_9215 0x115A +#define CHIP_9217 0x117A +#define CHIP_9218 0x118A + +struct chip_id { + u16 id; + char *name; +}; + +static const struct chip_id chip_ids[] = { + { CHIP_9115, "LAN9115" }, + { CHIP_9116, "LAN9116" }, + { CHIP_9117, "LAN9117" }, + { CHIP_9118, "LAN9118" }, + { CHIP_9211, "LAN9211" }, + { CHIP_9215, "LAN9215" }, + { CHIP_9217, "LAN9217" }, + { CHIP_9218, "LAN9218" }, + { 0, NULL }, +}; + +#define IS_REV_A(x) ((x & 0xFFFF)==0) + +/* + * Macros to abstract register access according to the data bus + * capabilities. Please use those and not the in/out primitives. + */ +/* FIFO read/write macros */ +#define SMC_PUSH_DATA(lp, p, l) SMC_outsl( lp, TX_DATA_FIFO, p, (l) >> 2 ) +#define SMC_PULL_DATA(lp, p, l) SMC_insl ( lp, RX_DATA_FIFO, p, (l) >> 2 ) +#define SMC_SET_TX_FIFO(lp, x) SMC_outl( x, lp, TX_DATA_FIFO ) +#define SMC_GET_RX_FIFO(lp) SMC_inl( lp, RX_DATA_FIFO ) + + +/* I/O mapped register read/write macros */ +#define SMC_GET_TX_STS_FIFO(lp) SMC_inl( lp, TX_STATUS_FIFO ) +#define SMC_GET_RX_STS_FIFO(lp) SMC_inl( lp, RX_STATUS_FIFO ) +#define SMC_GET_RX_STS_FIFO_PEEK(lp) SMC_inl( lp, RX_STATUS_FIFO_PEEK ) +#define SMC_GET_PN(lp) (SMC_inl( lp, ID_REV ) >> 16) +#define SMC_GET_REV(lp) (SMC_inl( lp, ID_REV ) & 0xFFFF) +#define SMC_GET_IRQ_CFG(lp) SMC_inl( lp, INT_CFG ) +#define SMC_SET_IRQ_CFG(lp, x) SMC_outl( x, lp, INT_CFG ) +#define SMC_GET_INT(lp) SMC_inl( lp, INT_STS ) +#define SMC_ACK_INT(lp, x) SMC_outl( x, lp, INT_STS ) +#define SMC_GET_INT_EN(lp) SMC_inl( lp, INT_EN ) +#define SMC_SET_INT_EN(lp, x) SMC_outl( x, lp, INT_EN ) +#define SMC_GET_BYTE_TEST(lp) SMC_inl( lp, BYTE_TEST ) +#define SMC_SET_BYTE_TEST(lp, x) SMC_outl( x, lp, BYTE_TEST ) +#define SMC_GET_FIFO_INT(lp) SMC_inl( lp, FIFO_INT ) +#define SMC_SET_FIFO_INT(lp, x) SMC_outl( x, lp, FIFO_INT ) +#define SMC_SET_FIFO_TDA(lp, x) \ + do { \ + unsigned long __flags; \ + int __mask; \ + local_irq_save(__flags); \ + __mask = SMC_GET_FIFO_INT((lp)) & ~(0xFF<<24); \ + SMC_SET_FIFO_INT( (lp), __mask | (x)<<24 ); \ + local_irq_restore(__flags); \ + } while (0) +#define SMC_SET_FIFO_TSL(lp, x) \ + do { \ + unsigned long __flags; \ + int __mask; \ + local_irq_save(__flags); \ + __mask = SMC_GET_FIFO_INT((lp)) & ~(0xFF<<16); \ + SMC_SET_FIFO_INT( (lp), __mask | (((x) & 0xFF)<<16)); \ + local_irq_restore(__flags); \ + } while (0) +#define SMC_SET_FIFO_RSA(lp, x) \ + do { \ + unsigned long __flags; \ + int __mask; \ + local_irq_save(__flags); \ + __mask = SMC_GET_FIFO_INT((lp)) & ~(0xFF<<8); \ + SMC_SET_FIFO_INT( (lp), __mask | (((x) & 0xFF)<<8)); \ + local_irq_restore(__flags); \ + } while (0) +#define SMC_SET_FIFO_RSL(lp, x) \ + do { \ + unsigned long __flags; \ + int __mask; \ + local_irq_save(__flags); \ + __mask = SMC_GET_FIFO_INT((lp)) & ~0xFF; \ + SMC_SET_FIFO_INT( (lp),__mask | ((x) & 0xFF)); \ + local_irq_restore(__flags); \ + } while (0) +#define SMC_GET_RX_CFG(lp) SMC_inl( lp, RX_CFG ) +#define SMC_SET_RX_CFG(lp, x) SMC_outl( x, lp, RX_CFG ) +#define SMC_GET_TX_CFG(lp) SMC_inl( lp, TX_CFG ) +#define SMC_SET_TX_CFG(lp, x) SMC_outl( x, lp, TX_CFG ) +#define SMC_GET_HW_CFG(lp) SMC_inl( lp, HW_CFG ) +#define SMC_SET_HW_CFG(lp, x) SMC_outl( x, lp, HW_CFG ) +#define SMC_GET_RX_DP_CTRL(lp) SMC_inl( lp, RX_DP_CTRL ) +#define SMC_SET_RX_DP_CTRL(lp, x) SMC_outl( x, lp, RX_DP_CTRL ) +#define SMC_GET_PMT_CTRL(lp) SMC_inl( lp, PMT_CTRL ) +#define SMC_SET_PMT_CTRL(lp, x) SMC_outl( x, lp, PMT_CTRL ) +#define SMC_GET_GPIO_CFG(lp) SMC_inl( lp, GPIO_CFG ) +#define SMC_SET_GPIO_CFG(lp, x) SMC_outl( x, lp, GPIO_CFG ) +#define SMC_GET_RX_FIFO_INF(lp) SMC_inl( lp, RX_FIFO_INF ) +#define SMC_SET_RX_FIFO_INF(lp, x) SMC_outl( x, lp, RX_FIFO_INF ) +#define SMC_GET_TX_FIFO_INF(lp) SMC_inl( lp, TX_FIFO_INF ) +#define SMC_SET_TX_FIFO_INF(lp, x) SMC_outl( x, lp, TX_FIFO_INF ) +#define SMC_GET_GPT_CFG(lp) SMC_inl( lp, GPT_CFG ) +#define SMC_SET_GPT_CFG(lp, x) SMC_outl( x, lp, GPT_CFG ) +#define SMC_GET_RX_DROP(lp) SMC_inl( lp, RX_DROP ) +#define SMC_SET_RX_DROP(lp, x) SMC_outl( x, lp, RX_DROP ) +#define SMC_GET_MAC_CMD(lp) SMC_inl( lp, MAC_CSR_CMD ) +#define SMC_SET_MAC_CMD(lp, x) SMC_outl( x, lp, MAC_CSR_CMD ) +#define SMC_GET_MAC_DATA(lp) SMC_inl( lp, MAC_CSR_DATA ) +#define SMC_SET_MAC_DATA(lp, x) SMC_outl( x, lp, MAC_CSR_DATA ) +#define SMC_GET_AFC_CFG(lp) SMC_inl( lp, AFC_CFG ) +#define SMC_SET_AFC_CFG(lp, x) SMC_outl( x, lp, AFC_CFG ) +#define SMC_GET_E2P_CMD(lp) SMC_inl( lp, E2P_CMD ) +#define SMC_SET_E2P_CMD(lp, x) SMC_outl( x, lp, E2P_CMD ) +#define SMC_GET_E2P_DATA(lp) SMC_inl( lp, E2P_DATA ) +#define SMC_SET_E2P_DATA(lp, x) SMC_outl( x, lp, E2P_DATA ) + +/* MAC register read/write macros */ +#define SMC_GET_MAC_CSR(lp,a,v) \ + do { \ + while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \ + SMC_SET_MAC_CMD((lp),MAC_CSR_CMD_CSR_BUSY_ | \ + MAC_CSR_CMD_R_NOT_W_ | (a) ); \ + while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \ + v = SMC_GET_MAC_DATA((lp)); \ + } while (0) +#define SMC_SET_MAC_CSR(lp,a,v) \ + do { \ + while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \ + SMC_SET_MAC_DATA((lp), v); \ + SMC_SET_MAC_CMD((lp), MAC_CSR_CMD_CSR_BUSY_ | (a) ); \ + while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \ + } while (0) +#define SMC_GET_MAC_CR(lp, x) SMC_GET_MAC_CSR( (lp), MAC_CR, x ) +#define SMC_SET_MAC_CR(lp, x) SMC_SET_MAC_CSR( (lp), MAC_CR, x ) +#define SMC_GET_ADDRH(lp, x) SMC_GET_MAC_CSR( (lp), ADDRH, x ) +#define SMC_SET_ADDRH(lp, x) SMC_SET_MAC_CSR( (lp), ADDRH, x ) +#define SMC_GET_ADDRL(lp, x) SMC_GET_MAC_CSR( (lp), ADDRL, x ) +#define SMC_SET_ADDRL(lp, x) SMC_SET_MAC_CSR( (lp), ADDRL, x ) +#define SMC_GET_HASHH(lp, x) SMC_GET_MAC_CSR( (lp), HASHH, x ) +#define SMC_SET_HASHH(lp, x) SMC_SET_MAC_CSR( (lp), HASHH, x ) +#define SMC_GET_HASHL(lp, x) SMC_GET_MAC_CSR( (lp), HASHL, x ) +#define SMC_SET_HASHL(lp, x) SMC_SET_MAC_CSR( (lp), HASHL, x ) +#define SMC_GET_MII_ACC(lp, x) SMC_GET_MAC_CSR( (lp), MII_ACC, x ) +#define SMC_SET_MII_ACC(lp, x) SMC_SET_MAC_CSR( (lp), MII_ACC, x ) +#define SMC_GET_MII_DATA(lp, x) SMC_GET_MAC_CSR( (lp), MII_DATA, x ) +#define SMC_SET_MII_DATA(lp, x) SMC_SET_MAC_CSR( (lp), MII_DATA, x ) +#define SMC_GET_FLOW(lp, x) SMC_GET_MAC_CSR( (lp), FLOW, x ) +#define SMC_SET_FLOW(lp, x) SMC_SET_MAC_CSR( (lp), FLOW, x ) +#define SMC_GET_VLAN1(lp, x) SMC_GET_MAC_CSR( (lp), VLAN1, x ) +#define SMC_SET_VLAN1(lp, x) SMC_SET_MAC_CSR( (lp), VLAN1, x ) +#define SMC_GET_VLAN2(lp, x) SMC_GET_MAC_CSR( (lp), VLAN2, x ) +#define SMC_SET_VLAN2(lp, x) SMC_SET_MAC_CSR( (lp), VLAN2, x ) +#define SMC_SET_WUFF(lp, x) SMC_SET_MAC_CSR( (lp), WUFF, x ) +#define SMC_GET_WUCSR(lp, x) SMC_GET_MAC_CSR( (lp), WUCSR, x ) +#define SMC_SET_WUCSR(lp, x) SMC_SET_MAC_CSR( (lp), WUCSR, x ) + +/* PHY register read/write macros */ +#define SMC_GET_MII(lp,a,phy,v) \ + do { \ + u32 __v; \ + do { \ + SMC_GET_MII_ACC((lp), __v); \ + } while ( __v & MII_ACC_MII_BUSY_ ); \ + SMC_SET_MII_ACC( (lp), ((phy)<<11) | ((a)<<6) | \ + MII_ACC_MII_BUSY_); \ + do { \ + SMC_GET_MII_ACC( (lp), __v); \ + } while ( __v & MII_ACC_MII_BUSY_ ); \ + SMC_GET_MII_DATA((lp), v); \ + } while (0) +#define SMC_SET_MII(lp,a,phy,v) \ + do { \ + u32 __v; \ + do { \ + SMC_GET_MII_ACC((lp), __v); \ + } while ( __v & MII_ACC_MII_BUSY_ ); \ + SMC_SET_MII_DATA((lp), v); \ + SMC_SET_MII_ACC( (lp), ((phy)<<11) | ((a)<<6) | \ + MII_ACC_MII_BUSY_ | \ + MII_ACC_MII_WRITE_ ); \ + do { \ + SMC_GET_MII_ACC((lp), __v); \ + } while ( __v & MII_ACC_MII_BUSY_ ); \ + } while (0) +#define SMC_GET_PHY_BMCR(lp,phy,x) SMC_GET_MII( (lp), MII_BMCR, phy, x ) +#define SMC_SET_PHY_BMCR(lp,phy,x) SMC_SET_MII( (lp), MII_BMCR, phy, x ) +#define SMC_GET_PHY_BMSR(lp,phy,x) SMC_GET_MII( (lp), MII_BMSR, phy, x ) +#define SMC_GET_PHY_ID1(lp,phy,x) SMC_GET_MII( (lp), MII_PHYSID1, phy, x ) +#define SMC_GET_PHY_ID2(lp,phy,x) SMC_GET_MII( (lp), MII_PHYSID2, phy, x ) +#define SMC_GET_PHY_MII_ADV(lp,phy,x) SMC_GET_MII( (lp), MII_ADVERTISE, phy, x ) +#define SMC_SET_PHY_MII_ADV(lp,phy,x) SMC_SET_MII( (lp), MII_ADVERTISE, phy, x ) +#define SMC_GET_PHY_MII_LPA(lp,phy,x) SMC_GET_MII( (lp), MII_LPA, phy, x ) +#define SMC_SET_PHY_MII_LPA(lp,phy,x) SMC_SET_MII( (lp), MII_LPA, phy, x ) +#define SMC_GET_PHY_CTRL_STS(lp,phy,x) SMC_GET_MII( (lp), PHY_MODE_CTRL_STS, phy, x ) +#define SMC_SET_PHY_CTRL_STS(lp,phy,x) SMC_SET_MII( (lp), PHY_MODE_CTRL_STS, phy, x ) +#define SMC_GET_PHY_INT_SRC(lp,phy,x) SMC_GET_MII( (lp), PHY_INT_SRC, phy, x ) +#define SMC_SET_PHY_INT_SRC(lp,phy,x) SMC_SET_MII( (lp), PHY_INT_SRC, phy, x ) +#define SMC_GET_PHY_INT_MASK(lp,phy,x) SMC_GET_MII( (lp), PHY_INT_MASK, phy, x ) +#define SMC_SET_PHY_INT_MASK(lp,phy,x) SMC_SET_MII( (lp), PHY_INT_MASK, phy, x ) +#define SMC_GET_PHY_SPECIAL(lp,phy,x) SMC_GET_MII( (lp), PHY_SPECIAL, phy, x ) + + + +/* Misc read/write macros */ + +#ifndef SMC_GET_MAC_ADDR +#define SMC_GET_MAC_ADDR(lp, addr) \ + do { \ + unsigned int __v; \ + \ + SMC_GET_MAC_CSR((lp), ADDRL, __v); \ + addr[0] = __v; addr[1] = __v >> 8; \ + addr[2] = __v >> 16; addr[3] = __v >> 24; \ + SMC_GET_MAC_CSR((lp), ADDRH, __v); \ + addr[4] = __v; addr[5] = __v >> 8; \ + } while (0) +#endif + +#define SMC_SET_MAC_ADDR(lp, addr) \ + do { \ + SMC_SET_MAC_CSR((lp), ADDRL, \ + addr[0] | \ + (addr[1] << 8) | \ + (addr[2] << 16) | \ + (addr[3] << 24)); \ + SMC_SET_MAC_CSR((lp), ADDRH, addr[4]|(addr[5] << 8));\ + } while (0) + + +#define SMC_WRITE_EEPROM_CMD(lp, cmd, addr) \ + do { \ + while (SMC_GET_E2P_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \ + SMC_SET_MAC_CMD((lp), MAC_CSR_CMD_R_NOT_W_ | a ); \ + while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \ + } while (0) + +#endif /* _SMC911X_H_ */ diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c new file mode 100644 index 0000000..5b65ac4 --- /dev/null +++ b/drivers/net/ethernet/smsc/smc9194.c @@ -0,0 +1,1589 @@ +/*------------------------------------------------------------------------ + . smc9194.c + . This is a driver for SMC's 9000 series of Ethernet cards. + . + . Copyright (C) 1996 by Erik Stahlman + . This software may be used and distributed according to the terms + . of the GNU General Public License, incorporated herein by reference. + . + . "Features" of the SMC chip: + . 4608 byte packet memory. ( for the 91C92. Others have more ) + . EEPROM for configuration + . AUI/TP selection ( mine has 10Base2/10BaseT select ) + . + . Arguments: + . io = for the base address + . irq = for the IRQ + . ifport = 0 for autodetect, 1 for TP, 2 for AUI ( or 10base2 ) + . + . author: + . Erik Stahlman ( erik@vt.edu ) + . contributors: + . Arnaldo Carvalho de Melo + . + . Hardware multicast code from Peter Cammaert ( pc@denkart.be ) + . + . Sources: + . o SMC databook + . o skeleton.c by Donald Becker ( becker@scyld.com ) + . o ( a LOT of advice from Becker as well ) + . + . History: + . 12/07/95 Erik Stahlman written, got receive/xmit handled + . 01/03/96 Erik Stahlman worked out some bugs, actually usable!!! :-) + . 01/06/96 Erik Stahlman cleaned up some, better testing, etc + . 01/29/96 Erik Stahlman fixed autoirq, added multicast + . 02/01/96 Erik Stahlman 1. disabled all interrupts in smc_reset + . 2. got rid of post-decrementing bug -- UGH. + . 02/13/96 Erik Stahlman Tried to fix autoirq failure. Added more + . descriptive error messages. + . 02/15/96 Erik Stahlman Fixed typo that caused detection failure + . 02/23/96 Erik Stahlman Modified it to fit into kernel tree + . Added support to change hardware address + . Cleared stats on opens + . 02/26/96 Erik Stahlman Trial support for Kernel 1.2.13 + . Kludge for automatic IRQ detection + . 03/04/96 Erik Stahlman Fixed kernel 1.3.70 + + . Fixed bug reported by Gardner Buchanan in + . smc_enable, with outw instead of outb + . 03/06/96 Erik Stahlman Added hardware multicast from Peter Cammaert + . 04/14/00 Heiko Pruessing (SMA Regelsysteme) Fixed bug in chip memory + . allocation + . 08/20/00 Arnaldo Melo fix kfree(skb) in smc_hardware_send_packet + . 12/15/00 Christian Jullien fix "Warning: kfree_skb on hard IRQ" + . 11/08/01 Matt Domsch Use common crc32 function + ----------------------------------------------------------------------------*/ + +static const char version[] = + "smc9194.c:v0.14 12/15/00 by Erik Stahlman (erik@vt.edu)\n"; + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "smc9194.h" + +#define DRV_NAME "smc9194" + +/*------------------------------------------------------------------------ + . + . Configuration options, for the experienced user to change. + . + -------------------------------------------------------------------------*/ + +/* + . Do you want to use 32 bit xfers? This should work on all chips, as + . the chipset is designed to accommodate them. +*/ +#ifdef __sh__ +#undef USE_32_BIT +#else +#define USE_32_BIT 1 +#endif + +#if defined(__H8300H__) || defined(__H8300S__) +#define NO_AUTOPROBE +#undef insl +#undef outsl +#define insl(a,b,l) io_insl_noswap(a,b,l) +#define outsl(a,b,l) io_outsl_noswap(a,b,l) +#endif + +/* + .the SMC9194 can be at any of the following port addresses. To change, + .for a slightly different card, you can add it to the array. Keep in + .mind that the array must end in zero. +*/ + +struct devlist { + unsigned int port; + unsigned int irq; +}; + +#if defined(CONFIG_H8S_EDOSK2674) +static struct devlist smc_devlist[] __initdata = { + {.port = 0xf80000, .irq = 16}, + {.port = 0, .irq = 0 }, +}; +#else +static struct devlist smc_devlist[] __initdata = { + {.port = 0x200, .irq = 0}, + {.port = 0x220, .irq = 0}, + {.port = 0x240, .irq = 0}, + {.port = 0x260, .irq = 0}, + {.port = 0x280, .irq = 0}, + {.port = 0x2A0, .irq = 0}, + {.port = 0x2C0, .irq = 0}, + {.port = 0x2E0, .irq = 0}, + {.port = 0x300, .irq = 0}, + {.port = 0x320, .irq = 0}, + {.port = 0x340, .irq = 0}, + {.port = 0x360, .irq = 0}, + {.port = 0x380, .irq = 0}, + {.port = 0x3A0, .irq = 0}, + {.port = 0x3C0, .irq = 0}, + {.port = 0x3E0, .irq = 0}, + {.port = 0, .irq = 0}, +}; +#endif +/* + . Wait time for memory to be free. This probably shouldn't be + . tuned that much, as waiting for this means nothing else happens + . in the system +*/ +#define MEMORY_WAIT_TIME 16 + +/* + . DEBUGGING LEVELS + . + . 0 for normal operation + . 1 for slightly more details + . >2 for various levels of increasingly useless information + . 2 for interrupt tracking, status flags + . 3 for packet dumps, etc. +*/ +#define SMC_DEBUG 0 + +#if (SMC_DEBUG > 2 ) +#define PRINTK3(x) printk x +#else +#define PRINTK3(x) +#endif + +#if SMC_DEBUG > 1 +#define PRINTK2(x) printk x +#else +#define PRINTK2(x) +#endif + +#ifdef SMC_DEBUG +#define PRINTK(x) printk x +#else +#define PRINTK(x) +#endif + + +/*------------------------------------------------------------------------ + . + . The internal workings of the driver. If you are changing anything + . here with the SMC stuff, you should have the datasheet and known + . what you are doing. + . + -------------------------------------------------------------------------*/ +#define CARDNAME "SMC9194" + + +/* store this information for the driver.. */ +struct smc_local { + /* + If I have to wait until memory is available to send + a packet, I will store the skbuff here, until I get the + desired memory. Then, I'll send it out and free it. + */ + struct sk_buff * saved_skb; + + /* + . This keeps track of how many packets that I have + . sent out. When an TX_EMPTY interrupt comes, I know + . that all of these have been sent. + */ + int packets_waiting; +}; + + +/*----------------------------------------------------------------- + . + . The driver can be entered at any of the following entry points. + . + .------------------------------------------------------------------ */ + +/* + . This is called by register_netdev(). It is responsible for + . checking the portlist for the SMC9000 series chipset. If it finds + . one, then it will initialize the device, find the hardware information, + . and sets up the appropriate device parameters. + . NOTE: Interrupts are *OFF* when this procedure is called. + . + . NB:This shouldn't be static since it is referred to externally. +*/ +struct net_device *smc_init(int unit); + +/* + . The kernel calls this function when someone wants to use the device, + . typically 'ifconfig ethX up'. +*/ +static int smc_open(struct net_device *dev); + +/* + . Our watchdog timed out. Called by the networking layer +*/ +static void smc_timeout(struct net_device *dev); + +/* + . This is called by the kernel in response to 'ifconfig ethX down'. It + . is responsible for cleaning up everything that the open routine + . does, and maybe putting the card into a powerdown state. +*/ +static int smc_close(struct net_device *dev); + +/* + . Finally, a call to set promiscuous mode ( for TCPDUMP and related + . programs ) and multicast modes. +*/ +static void smc_set_multicast_list(struct net_device *dev); + + +/*--------------------------------------------------------------- + . + . Interrupt level calls.. + . + ----------------------------------------------------------------*/ + +/* + . Handles the actual interrupt +*/ +static irqreturn_t smc_interrupt(int irq, void *); +/* + . This is a separate procedure to handle the receipt of a packet, to + . leave the interrupt code looking slightly cleaner +*/ +static inline void smc_rcv( struct net_device *dev ); +/* + . This handles a TX interrupt, which is only called when an error + . relating to a packet is sent. +*/ +static inline void smc_tx( struct net_device * dev ); + +/* + ------------------------------------------------------------ + . + . Internal routines + . + ------------------------------------------------------------ +*/ + +/* + . Test if a given location contains a chip, trying to cause as + . little damage as possible if it's not a SMC chip. +*/ +static int smc_probe(struct net_device *dev, int ioaddr); + +/* + . A rather simple routine to print out a packet for debugging purposes. +*/ +#if SMC_DEBUG > 2 +static void print_packet( byte *, int ); +#endif + +#define tx_done(dev) 1 + +/* this is called to actually send the packet to the chip */ +static void smc_hardware_send_packet( struct net_device * dev ); + +/* Since I am not sure if I will have enough room in the chip's ram + . to store the packet, I call this routine, which either sends it + . now, or generates an interrupt when the card is ready for the + . packet */ +static netdev_tx_t smc_wait_to_send_packet( struct sk_buff * skb, + struct net_device *dev ); + +/* this does a soft reset on the device */ +static void smc_reset( int ioaddr ); + +/* Enable Interrupts, Receive, and Transmit */ +static void smc_enable( int ioaddr ); + +/* this puts the device in an inactive state */ +static void smc_shutdown( int ioaddr ); + +/* This routine will find the IRQ of the driver if one is not + . specified in the input to the device. */ +static int smc_findirq( int ioaddr ); + +/* + . Function: smc_reset( int ioaddr ) + . Purpose: + . This sets the SMC91xx chip to its normal state, hopefully from whatever + . mess that any other DOS driver has put it in. + . + . Maybe I should reset more registers to defaults in here? SOFTRESET should + . do that for me. + . + . Method: + . 1. send a SOFT RESET + . 2. wait for it to finish + . 3. enable autorelease mode + . 4. reset the memory management unit + . 5. clear all interrupts + . +*/ +static void smc_reset( int ioaddr ) +{ + /* This resets the registers mostly to defaults, but doesn't + affect EEPROM. That seems unnecessary */ + SMC_SELECT_BANK( 0 ); + outw( RCR_SOFTRESET, ioaddr + RCR ); + + /* this should pause enough for the chip to be happy */ + SMC_DELAY( ); + + /* Set the transmit and receive configuration registers to + default values */ + outw( RCR_CLEAR, ioaddr + RCR ); + outw( TCR_CLEAR, ioaddr + TCR ); + + /* set the control register to automatically + release successfully transmitted packets, to make the best + use out of our limited memory */ + SMC_SELECT_BANK( 1 ); + outw( inw( ioaddr + CONTROL ) | CTL_AUTO_RELEASE , ioaddr + CONTROL ); + + /* Reset the MMU */ + SMC_SELECT_BANK( 2 ); + outw( MC_RESET, ioaddr + MMU_CMD ); + + /* Note: It doesn't seem that waiting for the MMU busy is needed here, + but this is a place where future chipsets _COULD_ break. Be wary + of issuing another MMU command right after this */ + + outb( 0, ioaddr + INT_MASK ); +} + +/* + . Function: smc_enable + . Purpose: let the chip talk to the outside work + . Method: + . 1. Enable the transmitter + . 2. Enable the receiver + . 3. Enable interrupts +*/ +static void smc_enable( int ioaddr ) +{ + SMC_SELECT_BANK( 0 ); + /* see the header file for options in TCR/RCR NORMAL*/ + outw( TCR_NORMAL, ioaddr + TCR ); + outw( RCR_NORMAL, ioaddr + RCR ); + + /* now, enable interrupts */ + SMC_SELECT_BANK( 2 ); + outb( SMC_INTERRUPT_MASK, ioaddr + INT_MASK ); +} + +/* + . Function: smc_shutdown + . Purpose: closes down the SMC91xxx chip. + . Method: + . 1. zero the interrupt mask + . 2. clear the enable receive flag + . 3. clear the enable xmit flags + . + . TODO: + . (1) maybe utilize power down mode. + . Why not yet? Because while the chip will go into power down mode, + . the manual says that it will wake up in response to any I/O requests + . in the register space. Empirical results do not show this working. +*/ +static void smc_shutdown( int ioaddr ) +{ + /* no more interrupts for me */ + SMC_SELECT_BANK( 2 ); + outb( 0, ioaddr + INT_MASK ); + + /* and tell the card to stay away from that nasty outside world */ + SMC_SELECT_BANK( 0 ); + outb( RCR_CLEAR, ioaddr + RCR ); + outb( TCR_CLEAR, ioaddr + TCR ); +#if 0 + /* finally, shut the chip down */ + SMC_SELECT_BANK( 1 ); + outw( inw( ioaddr + CONTROL ), CTL_POWERDOWN, ioaddr + CONTROL ); +#endif +} + + +/* + . Function: smc_setmulticast( int ioaddr, struct net_device *dev ) + . Purpose: + . This sets the internal hardware table to filter out unwanted multicast + . packets before they take up memory. + . + . The SMC chip uses a hash table where the high 6 bits of the CRC of + . address are the offset into the table. If that bit is 1, then the + . multicast packet is accepted. Otherwise, it's dropped silently. + . + . To use the 6 bits as an offset into the table, the high 3 bits are the + . number of the 8 bit register, while the low 3 bits are the bit within + . that register. + . + . This routine is based very heavily on the one provided by Peter Cammaert. +*/ + + +static void smc_setmulticast(int ioaddr, struct net_device *dev) +{ + int i; + unsigned char multicast_table[ 8 ]; + struct netdev_hw_addr *ha; + /* table for flipping the order of 3 bits */ + unsigned char invert3[] = { 0, 4, 2, 6, 1, 5, 3, 7 }; + + /* start with a table of all zeros: reject all */ + memset( multicast_table, 0, sizeof( multicast_table ) ); + + netdev_for_each_mc_addr(ha, dev) { + int position; + + /* only use the low order bits */ + position = ether_crc_le(6, ha->addr) & 0x3f; + + /* do some messy swapping to put the bit in the right spot */ + multicast_table[invert3[position&7]] |= + (1<>3)&7]); + + } + /* now, the table can be loaded into the chipset */ + SMC_SELECT_BANK( 3 ); + + for ( i = 0; i < 8 ; i++ ) { + outb( multicast_table[i], ioaddr + MULTICAST1 + i ); + } +} + +/* + . Function: smc_wait_to_send_packet( struct sk_buff * skb, struct net_device * ) + . Purpose: + . Attempt to allocate memory for a packet, if chip-memory is not + . available, then tell the card to generate an interrupt when it + . is available. + . + . Algorithm: + . + . o if the saved_skb is not currently null, then drop this packet + . on the floor. This should never happen, because of TBUSY. + . o if the saved_skb is null, then replace it with the current packet, + . o See if I can sending it now. + . o (NO): Enable interrupts and let the interrupt handler deal with it. + . o (YES):Send it now. +*/ +static netdev_tx_t smc_wait_to_send_packet(struct sk_buff *skb, + struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + word length; + unsigned short numPages; + word time_out; + + netif_stop_queue(dev); + /* Well, I want to send the packet.. but I don't know + if I can send it right now... */ + + if ( lp->saved_skb) { + /* THIS SHOULD NEVER HAPPEN. */ + dev->stats.tx_aborted_errors++; + printk(CARDNAME": Bad Craziness - sent packet while busy.\n" ); + return NETDEV_TX_BUSY; + } + lp->saved_skb = skb; + + length = skb->len; + + if (length < ETH_ZLEN) { + if (skb_padto(skb, ETH_ZLEN)) { + netif_wake_queue(dev); + return NETDEV_TX_OK; + } + length = ETH_ZLEN; + } + + /* + ** The MMU wants the number of pages to be the number of 256 bytes + ** 'pages', minus 1 ( since a packet can't ever have 0 pages :) ) + ** + ** Pkt size for allocating is data length +6 (for additional status words, + ** length and ctl!) If odd size last byte is included in this header. + */ + numPages = ((length & 0xfffe) + 6) / 256; + + if (numPages > 7 ) { + printk(CARDNAME": Far too big packet error.\n"); + /* freeing the packet is a good thing here... but should + . any packets of this size get down here? */ + dev_kfree_skb (skb); + lp->saved_skb = NULL; + /* this IS an error, but, i don't want the skb saved */ + netif_wake_queue(dev); + return NETDEV_TX_OK; + } + /* either way, a packet is waiting now */ + lp->packets_waiting++; + + /* now, try to allocate the memory */ + SMC_SELECT_BANK( 2 ); + outw( MC_ALLOC | numPages, ioaddr + MMU_CMD ); + /* + . Performance Hack + . + . wait a short amount of time.. if I can send a packet now, I send + . it now. Otherwise, I enable an interrupt and wait for one to be + . available. + . + . I could have handled this a slightly different way, by checking to + . see if any memory was available in the FREE MEMORY register. However, + . either way, I need to generate an allocation, and the allocation works + . no matter what, so I saw no point in checking free memory. + */ + time_out = MEMORY_WAIT_TIME; + do { + word status; + + status = inb( ioaddr + INTERRUPT ); + if ( status & IM_ALLOC_INT ) { + /* acknowledge the interrupt */ + outb( IM_ALLOC_INT, ioaddr + INTERRUPT ); + break; + } + } while ( -- time_out ); + + if ( !time_out ) { + /* oh well, wait until the chip finds memory later */ + SMC_ENABLE_INT( IM_ALLOC_INT ); + PRINTK2((CARDNAME": memory allocation deferred.\n")); + /* it's deferred, but I'll handle it later */ + return NETDEV_TX_OK; + } + /* or YES! I can send the packet now.. */ + smc_hardware_send_packet(dev); + netif_wake_queue(dev); + return NETDEV_TX_OK; +} + +/* + . Function: smc_hardware_send_packet(struct net_device * ) + . Purpose: + . This sends the actual packet to the SMC9xxx chip. + . + . Algorithm: + . First, see if a saved_skb is available. + . ( this should NOT be called if there is no 'saved_skb' + . Now, find the packet number that the chip allocated + . Point the data pointers at it in memory + . Set the length word in the chip's memory + . Dump the packet to chip memory + . Check if a last byte is needed ( odd length packet ) + . if so, set the control flag right + . Tell the card to send it + . Enable the transmit interrupt, so I know if it failed + . Free the kernel data if I actually sent it. +*/ +static void smc_hardware_send_packet( struct net_device * dev ) +{ + struct smc_local *lp = netdev_priv(dev); + byte packet_no; + struct sk_buff * skb = lp->saved_skb; + word length; + unsigned int ioaddr; + byte * buf; + + ioaddr = dev->base_addr; + + if ( !skb ) { + PRINTK((CARDNAME": In XMIT with no packet to send\n")); + return; + } + length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; + buf = skb->data; + + /* If I get here, I _know_ there is a packet slot waiting for me */ + packet_no = inb( ioaddr + PNR_ARR + 1 ); + if ( packet_no & 0x80 ) { + /* or isn't there? BAD CHIP! */ + printk(KERN_DEBUG CARDNAME": Memory allocation failed.\n"); + dev_kfree_skb_any(skb); + lp->saved_skb = NULL; + netif_wake_queue(dev); + return; + } + + /* we have a packet address, so tell the card to use it */ + outb( packet_no, ioaddr + PNR_ARR ); + + /* point to the beginning of the packet */ + outw( PTR_AUTOINC , ioaddr + POINTER ); + + PRINTK3((CARDNAME": Trying to xmit packet of length %x\n", length )); +#if SMC_DEBUG > 2 + print_packet( buf, length ); +#endif + + /* send the packet length ( +6 for status, length and ctl byte ) + and the status word ( set to zeros ) */ +#ifdef USE_32_BIT + outl( (length +6 ) << 16 , ioaddr + DATA_1 ); +#else + outw( 0, ioaddr + DATA_1 ); + /* send the packet length ( +6 for status words, length, and ctl*/ + outb( (length+6) & 0xFF,ioaddr + DATA_1 ); + outb( (length+6) >> 8 , ioaddr + DATA_1 ); +#endif + + /* send the actual data + . I _think_ it's faster to send the longs first, and then + . mop up by sending the last word. It depends heavily + . on alignment, at least on the 486. Maybe it would be + . a good idea to check which is optimal? But that could take + . almost as much time as is saved? + */ +#ifdef USE_32_BIT + if ( length & 0x2 ) { + outsl(ioaddr + DATA_1, buf, length >> 2 ); +#if !defined(__H8300H__) && !defined(__H8300S__) + outw( *((word *)(buf + (length & 0xFFFFFFFC))),ioaddr +DATA_1); +#else + ctrl_outw( *((word *)(buf + (length & 0xFFFFFFFC))),ioaddr +DATA_1); +#endif + } + else + outsl(ioaddr + DATA_1, buf, length >> 2 ); +#else + outsw(ioaddr + DATA_1 , buf, (length ) >> 1); +#endif + /* Send the last byte, if there is one. */ + + if ( (length & 1) == 0 ) { + outw( 0, ioaddr + DATA_1 ); + } else { + outb( buf[length -1 ], ioaddr + DATA_1 ); + outb( 0x20, ioaddr + DATA_1); + } + + /* enable the interrupts */ + SMC_ENABLE_INT( (IM_TX_INT | IM_TX_EMPTY_INT) ); + + /* and let the chipset deal with it */ + outw( MC_ENQUEUE , ioaddr + MMU_CMD ); + + PRINTK2((CARDNAME": Sent packet of length %d\n", length)); + + lp->saved_skb = NULL; + dev_kfree_skb_any (skb); + + dev->trans_start = jiffies; + + /* we can send another packet */ + netif_wake_queue(dev); +} + +/*------------------------------------------------------------------------- + | + | smc_init(int unit) + | Input parameters: + | dev->base_addr == 0, try to find all possible locations + | dev->base_addr == 1, return failure code + | dev->base_addr == 2, always allocate space, and return success + | dev->base_addr == this is the address to check + | + | Output: + | pointer to net_device or ERR_PTR(error) + | + --------------------------------------------------------------------------- +*/ +static int io; +static int irq; +static int ifport; + +struct net_device * __init smc_init(int unit) +{ + struct net_device *dev = alloc_etherdev(sizeof(struct smc_local)); + struct devlist *smcdev = smc_devlist; + int err = 0; + + if (!dev) + return ERR_PTR(-ENODEV); + + if (unit >= 0) { + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + io = dev->base_addr; + irq = dev->irq; + } + + if (io > 0x1ff) { /* Check a single specified location. */ + err = smc_probe(dev, io); + } else if (io != 0) { /* Don't probe at all. */ + err = -ENXIO; + } else { + for (;smcdev->port; smcdev++) { + if (smc_probe(dev, smcdev->port) == 0) + break; + } + if (!smcdev->port) + err = -ENODEV; + } + if (err) + goto out; + err = register_netdev(dev); + if (err) + goto out1; + return dev; +out1: + free_irq(dev->irq, dev); + release_region(dev->base_addr, SMC_IO_EXTENT); +out: + free_netdev(dev); + return ERR_PTR(err); +} + +/*---------------------------------------------------------------------- + . smc_findirq + . + . This routine has a simple purpose -- make the SMC chip generate an + . interrupt, so an auto-detect routine can detect it, and find the IRQ, + ------------------------------------------------------------------------ +*/ +static int __init smc_findirq(int ioaddr) +{ +#ifndef NO_AUTOPROBE + int timeout = 20; + unsigned long cookie; + + + cookie = probe_irq_on(); + + /* + * What I try to do here is trigger an ALLOC_INT. This is done + * by allocating a small chunk of memory, which will give an interrupt + * when done. + */ + + + SMC_SELECT_BANK(2); + /* enable ALLOCation interrupts ONLY */ + outb( IM_ALLOC_INT, ioaddr + INT_MASK ); + + /* + . Allocate 512 bytes of memory. Note that the chip was just + . reset so all the memory is available + */ + outw( MC_ALLOC | 1, ioaddr + MMU_CMD ); + + /* + . Wait until positive that the interrupt has been generated + */ + while ( timeout ) { + byte int_status; + + int_status = inb( ioaddr + INTERRUPT ); + + if ( int_status & IM_ALLOC_INT ) + break; /* got the interrupt */ + timeout--; + } + /* there is really nothing that I can do here if timeout fails, + as probe_irq_off will return a 0 anyway, which is what I + want in this case. Plus, the clean up is needed in both + cases. */ + + /* DELAY HERE! + On a fast machine, the status might change before the interrupt + is given to the processor. This means that the interrupt was + never detected, and probe_irq_off fails to report anything. + This should fix probe_irq_* problems. + */ + SMC_DELAY(); + SMC_DELAY(); + + /* and disable all interrupts again */ + outb( 0, ioaddr + INT_MASK ); + + /* and return what I found */ + return probe_irq_off(cookie); +#else /* NO_AUTOPROBE */ + struct devlist *smcdev; + for (smcdev = smc_devlist; smcdev->port; smcdev++) { + if (smcdev->port == ioaddr) + return smcdev->irq; + } + return 0; +#endif +} + +static const struct net_device_ops smc_netdev_ops = { + .ndo_open = smc_open, + .ndo_stop = smc_close, + .ndo_start_xmit = smc_wait_to_send_packet, + .ndo_tx_timeout = smc_timeout, + .ndo_set_multicast_list = smc_set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +/*---------------------------------------------------------------------- + . Function: smc_probe( int ioaddr ) + . + . Purpose: + . Tests to see if a given ioaddr points to an SMC9xxx chip. + . Returns a 0 on success + . + . Algorithm: + . (1) see if the high byte of BANK_SELECT is 0x33 + . (2) compare the ioaddr with the base register's address + . (3) see if I recognize the chip ID in the appropriate register + . + .--------------------------------------------------------------------- + */ + +/*--------------------------------------------------------------- + . Here I do typical initialization tasks. + . + . o Initialize the structure if needed + . o print out my vanity message if not done so already + . o print out what type of hardware is detected + . o print out the ethernet address + . o find the IRQ + . o set up my private data + . o configure the dev structure with my subroutines + . o actually GRAB the irq. + . o GRAB the region + .----------------------------------------------------------------- +*/ +static int __init smc_probe(struct net_device *dev, int ioaddr) +{ + int i, memory, retval; + static unsigned version_printed; + unsigned int bank; + + const char *version_string; + const char *if_string; + + /* registers */ + word revision_register; + word base_address_register; + word configuration_register; + word memory_info_register; + word memory_cfg_register; + + /* Grab the region so that no one else tries to probe our ioports. */ + if (!request_region(ioaddr, SMC_IO_EXTENT, DRV_NAME)) + return -EBUSY; + + dev->irq = irq; + dev->if_port = ifport; + + /* First, see if the high byte is 0x33 */ + bank = inw( ioaddr + BANK_SELECT ); + if ( (bank & 0xFF00) != 0x3300 ) { + retval = -ENODEV; + goto err_out; + } + /* The above MIGHT indicate a device, but I need to write to further + test this. */ + outw( 0x0, ioaddr + BANK_SELECT ); + bank = inw( ioaddr + BANK_SELECT ); + if ( (bank & 0xFF00 ) != 0x3300 ) { + retval = -ENODEV; + goto err_out; + } +#if !defined(CONFIG_H8S_EDOSK2674) + /* well, we've already written once, so hopefully another time won't + hurt. This time, I need to switch the bank register to bank 1, + so I can access the base address register */ + SMC_SELECT_BANK(1); + base_address_register = inw( ioaddr + BASE ); + if ( ioaddr != ( base_address_register >> 3 & 0x3E0 ) ) { + printk(CARDNAME ": IOADDR %x doesn't match configuration (%x). " + "Probably not a SMC chip\n", + ioaddr, base_address_register >> 3 & 0x3E0 ); + /* well, the base address register didn't match. Must not have + been a SMC chip after all. */ + retval = -ENODEV; + goto err_out; + } +#else + (void)base_address_register; /* Warning suppression */ +#endif + + + /* check if the revision register is something that I recognize. + These might need to be added to later, as future revisions + could be added. */ + SMC_SELECT_BANK(3); + revision_register = inw( ioaddr + REVISION ); + if ( !chip_ids[ ( revision_register >> 4 ) & 0xF ] ) { + /* I don't recognize this chip, so... */ + printk(CARDNAME ": IO %x: Unrecognized revision register:" + " %x, Contact author.\n", ioaddr, revision_register); + + retval = -ENODEV; + goto err_out; + } + + /* at this point I'll assume that the chip is an SMC9xxx. + It might be prudent to check a listing of MAC addresses + against the hardware address, or do some other tests. */ + + if (version_printed++ == 0) + printk("%s", version); + + /* fill in some of the fields */ + dev->base_addr = ioaddr; + + /* + . Get the MAC address ( bank 1, regs 4 - 9 ) + */ + SMC_SELECT_BANK( 1 ); + for ( i = 0; i < 6; i += 2 ) { + word address; + + address = inw( ioaddr + ADDR0 + i ); + dev->dev_addr[ i + 1] = address >> 8; + dev->dev_addr[ i ] = address & 0xFF; + } + + /* get the memory information */ + + SMC_SELECT_BANK( 0 ); + memory_info_register = inw( ioaddr + MIR ); + memory_cfg_register = inw( ioaddr + MCR ); + memory = ( memory_cfg_register >> 9 ) & 0x7; /* multiplier */ + memory *= 256 * ( memory_info_register & 0xFF ); + + /* + Now, I want to find out more about the chip. This is sort of + redundant, but it's cleaner to have it in both, rather than having + one VERY long probe procedure. + */ + SMC_SELECT_BANK(3); + revision_register = inw( ioaddr + REVISION ); + version_string = chip_ids[ ( revision_register >> 4 ) & 0xF ]; + if ( !version_string ) { + /* I shouldn't get here because this call was done before.... */ + retval = -ENODEV; + goto err_out; + } + + /* is it using AUI or 10BaseT ? */ + if ( dev->if_port == 0 ) { + SMC_SELECT_BANK(1); + configuration_register = inw( ioaddr + CONFIG ); + if ( configuration_register & CFG_AUI_SELECT ) + dev->if_port = 2; + else + dev->if_port = 1; + } + if_string = interfaces[ dev->if_port - 1 ]; + + /* now, reset the chip, and put it into a known state */ + smc_reset( ioaddr ); + + /* + . If dev->irq is 0, then the device has to be banged on to see + . what the IRQ is. + . + . This banging doesn't always detect the IRQ, for unknown reasons. + . a workaround is to reset the chip and try again. + . + . Interestingly, the DOS packet driver *SETS* the IRQ on the card to + . be what is requested on the command line. I don't do that, mostly + . because the card that I have uses a non-standard method of accessing + . the IRQs, and because this _should_ work in most configurations. + . + . Specifying an IRQ is done with the assumption that the user knows + . what (s)he is doing. No checking is done!!!! + . + */ + if ( dev->irq < 2 ) { + int trials; + + trials = 3; + while ( trials-- ) { + dev->irq = smc_findirq( ioaddr ); + if ( dev->irq ) + break; + /* kick the card and try again */ + smc_reset( ioaddr ); + } + } + if (dev->irq == 0 ) { + printk(CARDNAME": Couldn't autodetect your IRQ. Use irq=xx.\n"); + retval = -ENODEV; + goto err_out; + } + + /* now, print out the card info, in a short format.. */ + + printk("%s: %s(r:%d) at %#3x IRQ:%d INTF:%s MEM:%db ", dev->name, + version_string, revision_register & 0xF, ioaddr, dev->irq, + if_string, memory ); + /* + . Print the Ethernet address + */ + printk("ADDR: %pM\n", dev->dev_addr); + + /* Grab the IRQ */ + retval = request_irq(dev->irq, smc_interrupt, 0, DRV_NAME, dev); + if (retval) { + printk("%s: unable to get IRQ %d (irqval=%d).\n", DRV_NAME, + dev->irq, retval); + goto err_out; + } + + dev->netdev_ops = &smc_netdev_ops; + dev->watchdog_timeo = HZ/20; + + return 0; + +err_out: + release_region(ioaddr, SMC_IO_EXTENT); + return retval; +} + +#if SMC_DEBUG > 2 +static void print_packet( byte * buf, int length ) +{ +#if 0 + int i; + int remainder; + int lines; + + printk("Packet of length %d\n", length); + lines = length / 16; + remainder = length % 16; + + for ( i = 0; i < lines ; i ++ ) { + int cur; + + for ( cur = 0; cur < 8; cur ++ ) { + byte a, b; + + a = *(buf ++ ); + b = *(buf ++ ); + printk("%02x%02x ", a, b ); + } + printk("\n"); + } + for ( i = 0; i < remainder/2 ; i++ ) { + byte a, b; + + a = *(buf ++ ); + b = *(buf ++ ); + printk("%02x%02x ", a, b ); + } + printk("\n"); +#endif +} +#endif + + +/* + * Open and Initialize the board + * + * Set up everything, reset the card, etc .. + * + */ +static int smc_open(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + + int i; /* used to set hw ethernet address */ + + /* clear out all the junk that was put here before... */ + memset(netdev_priv(dev), 0, sizeof(struct smc_local)); + + /* reset the hardware */ + + smc_reset( ioaddr ); + smc_enable( ioaddr ); + + /* Select which interface to use */ + + SMC_SELECT_BANK( 1 ); + if ( dev->if_port == 1 ) { + outw( inw( ioaddr + CONFIG ) & ~CFG_AUI_SELECT, + ioaddr + CONFIG ); + } + else if ( dev->if_port == 2 ) { + outw( inw( ioaddr + CONFIG ) | CFG_AUI_SELECT, + ioaddr + CONFIG ); + } + + /* + According to Becker, I have to set the hardware address + at this point, because the (l)user can set it with an + ioctl. Easily done... + */ + SMC_SELECT_BANK( 1 ); + for ( i = 0; i < 6; i += 2 ) { + word address; + + address = dev->dev_addr[ i + 1 ] << 8 ; + address |= dev->dev_addr[ i ]; + outw( address, ioaddr + ADDR0 + i ); + } + + netif_start_queue(dev); + return 0; +} + +/*-------------------------------------------------------- + . Called by the kernel to send a packet out into the void + . of the net. This routine is largely based on + . skeleton.c, from Becker. + .-------------------------------------------------------- +*/ + +static void smc_timeout(struct net_device *dev) +{ + /* If we get here, some higher level has decided we are broken. + There should really be a "kick me" function call instead. */ + printk(KERN_WARNING CARDNAME": transmit timed out, %s?\n", + tx_done(dev) ? "IRQ conflict" : + "network cable problem"); + /* "kick" the adaptor */ + smc_reset( dev->base_addr ); + smc_enable( dev->base_addr ); + dev->trans_start = jiffies; /* prevent tx timeout */ + /* clear anything saved */ + ((struct smc_local *)netdev_priv(dev))->saved_skb = NULL; + netif_wake_queue(dev); +} + +/*------------------------------------------------------------- + . + . smc_rcv - receive a packet from the card + . + . There is ( at least ) a packet waiting to be read from + . chip-memory. + . + . o Read the status + . o If an error, record it + . o otherwise, read in the packet + -------------------------------------------------------------- +*/ +static void smc_rcv(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + int packet_number; + word status; + word packet_length; + + /* assume bank 2 */ + + packet_number = inw( ioaddr + FIFO_PORTS ); + + if ( packet_number & FP_RXEMPTY ) { + /* we got called , but nothing was on the FIFO */ + PRINTK((CARDNAME ": WARNING: smc_rcv with nothing on FIFO.\n")); + /* don't need to restore anything */ + return; + } + + /* start reading from the start of the packet */ + outw( PTR_READ | PTR_RCV | PTR_AUTOINC, ioaddr + POINTER ); + + /* First two words are status and packet_length */ + status = inw( ioaddr + DATA_1 ); + packet_length = inw( ioaddr + DATA_1 ); + + packet_length &= 0x07ff; /* mask off top bits */ + + PRINTK2(("RCV: STATUS %4x LENGTH %4x\n", status, packet_length )); + /* + . the packet length contains 3 extra words : + . status, length, and an extra word with an odd byte . + */ + packet_length -= 6; + + if ( !(status & RS_ERRORS ) ){ + /* do stuff to make a new packet */ + struct sk_buff * skb; + byte * data; + + /* read one extra byte */ + if ( status & RS_ODDFRAME ) + packet_length++; + + /* set multicast stats */ + if ( status & RS_MULTICAST ) + dev->stats.multicast++; + + skb = dev_alloc_skb( packet_length + 5); + + if ( skb == NULL ) { + printk(KERN_NOTICE CARDNAME ": Low memory, packet dropped.\n"); + dev->stats.rx_dropped++; + goto done; + } + + /* + ! This should work without alignment, but it could be + ! in the worse case + */ + + skb_reserve( skb, 2 ); /* 16 bit alignment */ + + data = skb_put( skb, packet_length); + +#ifdef USE_32_BIT + /* QUESTION: Like in the TX routine, do I want + to send the DWORDs or the bytes first, or some + mixture. A mixture might improve already slow PIO + performance */ + PRINTK3((" Reading %d dwords (and %d bytes)\n", + packet_length >> 2, packet_length & 3 )); + insl(ioaddr + DATA_1 , data, packet_length >> 2 ); + /* read the left over bytes */ + insb( ioaddr + DATA_1, data + (packet_length & 0xFFFFFC), + packet_length & 0x3 ); +#else + PRINTK3((" Reading %d words and %d byte(s)\n", + (packet_length >> 1 ), packet_length & 1 )); + insw(ioaddr + DATA_1 , data, packet_length >> 1); + if ( packet_length & 1 ) { + data += packet_length & ~1; + *(data++) = inb( ioaddr + DATA_1 ); + } +#endif +#if SMC_DEBUG > 2 + print_packet( data, packet_length ); +#endif + + skb->protocol = eth_type_trans(skb, dev ); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += packet_length; + } else { + /* error ... */ + dev->stats.rx_errors++; + + if ( status & RS_ALGNERR ) dev->stats.rx_frame_errors++; + if ( status & (RS_TOOSHORT | RS_TOOLONG ) ) + dev->stats.rx_length_errors++; + if ( status & RS_BADCRC) dev->stats.rx_crc_errors++; + } + +done: + /* error or good, tell the card to get rid of this packet */ + outw( MC_RELEASE, ioaddr + MMU_CMD ); +} + + +/************************************************************************* + . smc_tx + . + . Purpose: Handle a transmit error message. This will only be called + . when an error, because of the AUTO_RELEASE mode. + . + . Algorithm: + . Save pointer and packet no + . Get the packet no from the top of the queue + . check if it's valid ( if not, is this an error??? ) + . read the status word + . record the error + . ( resend? Not really, since we don't want old packets around ) + . Restore saved values + ************************************************************************/ +static void smc_tx( struct net_device * dev ) +{ + int ioaddr = dev->base_addr; + struct smc_local *lp = netdev_priv(dev); + byte saved_packet; + byte packet_no; + word tx_status; + + + /* assume bank 2 */ + + saved_packet = inb( ioaddr + PNR_ARR ); + packet_no = inw( ioaddr + FIFO_PORTS ); + packet_no &= 0x7F; + + /* select this as the packet to read from */ + outb( packet_no, ioaddr + PNR_ARR ); + + /* read the first word from this packet */ + outw( PTR_AUTOINC | PTR_READ, ioaddr + POINTER ); + + tx_status = inw( ioaddr + DATA_1 ); + PRINTK3((CARDNAME": TX DONE STATUS: %4x\n", tx_status)); + + dev->stats.tx_errors++; + if ( tx_status & TS_LOSTCAR ) dev->stats.tx_carrier_errors++; + if ( tx_status & TS_LATCOL ) { + printk(KERN_DEBUG CARDNAME + ": Late collision occurred on last xmit.\n"); + dev->stats.tx_window_errors++; + } +#if 0 + if ( tx_status & TS_16COL ) { ... } +#endif + + if ( tx_status & TS_SUCCESS ) { + printk(CARDNAME": Successful packet caused interrupt\n"); + } + /* re-enable transmit */ + SMC_SELECT_BANK( 0 ); + outw( inw( ioaddr + TCR ) | TCR_ENABLE, ioaddr + TCR ); + + /* kill the packet */ + SMC_SELECT_BANK( 2 ); + outw( MC_FREEPKT, ioaddr + MMU_CMD ); + + /* one less packet waiting for me */ + lp->packets_waiting--; + + outb( saved_packet, ioaddr + PNR_ARR ); +} + +/*-------------------------------------------------------------------- + . + . This is the main routine of the driver, to handle the device when + . it needs some attention. + . + . So: + . first, save state of the chipset + . branch off into routines to handle each case, and acknowledge + . each to the interrupt register + . and finally restore state. + . + ---------------------------------------------------------------------*/ + +static irqreturn_t smc_interrupt(int irq, void * dev_id) +{ + struct net_device *dev = dev_id; + int ioaddr = dev->base_addr; + struct smc_local *lp = netdev_priv(dev); + + byte status; + word card_stats; + byte mask; + int timeout; + /* state registers */ + word saved_bank; + word saved_pointer; + int handled = 0; + + + PRINTK3((CARDNAME": SMC interrupt started\n")); + + saved_bank = inw( ioaddr + BANK_SELECT ); + + SMC_SELECT_BANK(2); + saved_pointer = inw( ioaddr + POINTER ); + + mask = inb( ioaddr + INT_MASK ); + /* clear all interrupts */ + outb( 0, ioaddr + INT_MASK ); + + + /* set a timeout value, so I don't stay here forever */ + timeout = 4; + + PRINTK2((KERN_WARNING CARDNAME ": MASK IS %x\n", mask)); + do { + /* read the status flag, and mask it */ + status = inb( ioaddr + INTERRUPT ) & mask; + if (!status ) + break; + + handled = 1; + + PRINTK3((KERN_WARNING CARDNAME + ": Handling interrupt status %x\n", status)); + + if (status & IM_RCV_INT) { + /* Got a packet(s). */ + PRINTK2((KERN_WARNING CARDNAME + ": Receive Interrupt\n")); + smc_rcv(dev); + } else if (status & IM_TX_INT ) { + PRINTK2((KERN_WARNING CARDNAME + ": TX ERROR handled\n")); + smc_tx(dev); + outb(IM_TX_INT, ioaddr + INTERRUPT ); + } else if (status & IM_TX_EMPTY_INT ) { + /* update stats */ + SMC_SELECT_BANK( 0 ); + card_stats = inw( ioaddr + COUNTER ); + /* single collisions */ + dev->stats.collisions += card_stats & 0xF; + card_stats >>= 4; + /* multiple collisions */ + dev->stats.collisions += card_stats & 0xF; + + /* these are for when linux supports these statistics */ + + SMC_SELECT_BANK( 2 ); + PRINTK2((KERN_WARNING CARDNAME + ": TX_BUFFER_EMPTY handled\n")); + outb( IM_TX_EMPTY_INT, ioaddr + INTERRUPT ); + mask &= ~IM_TX_EMPTY_INT; + dev->stats.tx_packets += lp->packets_waiting; + lp->packets_waiting = 0; + + } else if (status & IM_ALLOC_INT ) { + PRINTK2((KERN_DEBUG CARDNAME + ": Allocation interrupt\n")); + /* clear this interrupt so it doesn't happen again */ + mask &= ~IM_ALLOC_INT; + + smc_hardware_send_packet( dev ); + + /* enable xmit interrupts based on this */ + mask |= ( IM_TX_EMPTY_INT | IM_TX_INT ); + + /* and let the card send more packets to me */ + netif_wake_queue(dev); + + PRINTK2((CARDNAME": Handoff done successfully.\n")); + } else if (status & IM_RX_OVRN_INT ) { + dev->stats.rx_errors++; + dev->stats.rx_fifo_errors++; + outb( IM_RX_OVRN_INT, ioaddr + INTERRUPT ); + } else if (status & IM_EPH_INT ) { + PRINTK((CARDNAME ": UNSUPPORTED: EPH INTERRUPT\n")); + } else if (status & IM_ERCV_INT ) { + PRINTK((CARDNAME ": UNSUPPORTED: ERCV INTERRUPT\n")); + outb( IM_ERCV_INT, ioaddr + INTERRUPT ); + } + } while ( timeout -- ); + + + /* restore state register */ + SMC_SELECT_BANK( 2 ); + outb( mask, ioaddr + INT_MASK ); + + PRINTK3((KERN_WARNING CARDNAME ": MASK is now %x\n", mask)); + outw( saved_pointer, ioaddr + POINTER ); + + SMC_SELECT_BANK( saved_bank ); + + PRINTK3((CARDNAME ": Interrupt done\n")); + return IRQ_RETVAL(handled); +} + + +/*---------------------------------------------------- + . smc_close + . + . this makes the board clean up everything that it can + . and not talk to the outside world. Caused by + . an 'ifconfig ethX down' + . + -----------------------------------------------------*/ +static int smc_close(struct net_device *dev) +{ + netif_stop_queue(dev); + /* clear everything */ + smc_shutdown( dev->base_addr ); + + /* Update the statistics here. */ + return 0; +} + +/*----------------------------------------------------------- + . smc_set_multicast_list + . + . This routine will, depending on the values passed to it, + . either make it accept multicast packets, go into + . promiscuous mode ( for TCPDUMP and cousins ) or accept + . a select set of multicast packets +*/ +static void smc_set_multicast_list(struct net_device *dev) +{ + short ioaddr = dev->base_addr; + + SMC_SELECT_BANK(0); + if ( dev->flags & IFF_PROMISC ) + outw( inw(ioaddr + RCR ) | RCR_PROMISC, ioaddr + RCR ); + +/* BUG? I never disable promiscuous mode if multicasting was turned on. + Now, I turn off promiscuous mode, but I don't do anything to multicasting + when promiscuous mode is turned on. +*/ + + /* Here, I am setting this to accept all multicast packets. + I don't need to zero the multicast table, because the flag is + checked before the table is + */ + else if (dev->flags & IFF_ALLMULTI) + outw( inw(ioaddr + RCR ) | RCR_ALMUL, ioaddr + RCR ); + + /* We just get all multicast packets even if we only want them + . from one source. This will be changed at some future + . point. */ + else if (!netdev_mc_empty(dev)) { + /* support hardware multicasting */ + + /* be sure I get rid of flags I might have set */ + outw( inw( ioaddr + RCR ) & ~(RCR_PROMISC | RCR_ALMUL), + ioaddr + RCR ); + /* NOTE: this has to set the bank, so make sure it is the + last thing called. The bank is set to zero at the top */ + smc_setmulticast(ioaddr, dev); + } + else { + outw( inw( ioaddr + RCR ) & ~(RCR_PROMISC | RCR_ALMUL), + ioaddr + RCR ); + + /* + since I'm disabling all multicast entirely, I need to + clear the multicast list + */ + SMC_SELECT_BANK( 3 ); + outw( 0, ioaddr + MULTICAST1 ); + outw( 0, ioaddr + MULTICAST2 ); + outw( 0, ioaddr + MULTICAST3 ); + outw( 0, ioaddr + MULTICAST4 ); + } +} + +#ifdef MODULE + +static struct net_device *devSMC9194; +MODULE_LICENSE("GPL"); + +module_param(io, int, 0); +module_param(irq, int, 0); +module_param(ifport, int, 0); +MODULE_PARM_DESC(io, "SMC 99194 I/O base address"); +MODULE_PARM_DESC(irq, "SMC 99194 IRQ number"); +MODULE_PARM_DESC(ifport, "SMC 99194 interface port (0-default, 1-TP, 2-AUI)"); + +int __init init_module(void) +{ + if (io == 0) + printk(KERN_WARNING + CARDNAME": You shouldn't use auto-probing with insmod!\n" ); + + /* copy the parameters from insmod into the device structure */ + devSMC9194 = smc_init(-1); + if (IS_ERR(devSMC9194)) + return PTR_ERR(devSMC9194); + return 0; +} + +void __exit cleanup_module(void) +{ + unregister_netdev(devSMC9194); + free_irq(devSMC9194->irq, devSMC9194); + release_region(devSMC9194->base_addr, SMC_IO_EXTENT); + free_netdev(devSMC9194); +} + +#endif /* MODULE */ diff --git a/drivers/net/ethernet/smsc/smc9194.h b/drivers/net/ethernet/smsc/smc9194.h new file mode 100644 index 0000000..cf69d0a --- /dev/null +++ b/drivers/net/ethernet/smsc/smc9194.h @@ -0,0 +1,241 @@ +/*------------------------------------------------------------------------ + . smc9194.h + . Copyright (C) 1996 by Erik Stahlman + . + . This software may be used and distributed according to the terms + . of the GNU General Public License, incorporated herein by reference. + . + . This file contains register information and access macros for + . the SMC91xxx chipset. + . + . Information contained in this file was obtained from the SMC91C94 + . manual from SMC. To get a copy, if you really want one, you can find + . information under www.smc.com in the components division. + . ( this thanks to advice from Donald Becker ). + . + . Authors + . Erik Stahlman ( erik@vt.edu ) + . + . History + . 01/06/96 Erik Stahlman moved definitions here from main .c file + . 01/19/96 Erik Stahlman polished this up some, and added better + . error handling + . + ---------------------------------------------------------------------------*/ +#ifndef _SMC9194_H_ +#define _SMC9194_H_ + +/* I want some simple types */ + +typedef unsigned char byte; +typedef unsigned short word; +typedef unsigned long int dword; + + +/* Because of bank switching, the SMC91xxx uses only 16 I/O ports */ + +#define SMC_IO_EXTENT 16 + + +/*--------------------------------------------------------------- + . + . A description of the SMC registers is probably in order here, + . although for details, the SMC datasheet is invaluable. + . + . Basically, the chip has 4 banks of registers ( 0 to 3 ), which + . are accessed by writing a number into the BANK_SELECT register + . ( I also use a SMC_SELECT_BANK macro for this ). + . + . The banks are configured so that for most purposes, bank 2 is all + . that is needed for simple run time tasks. + -----------------------------------------------------------------------*/ + +/* + . Bank Select Register: + . + . yyyy yyyy 0000 00xx + . xx = bank number + . yyyy yyyy = 0x33, for identification purposes. +*/ +#define BANK_SELECT 14 + +/* BANK 0 */ + +#define TCR 0 /* transmit control register */ +#define TCR_ENABLE 0x0001 /* if this is 1, we can transmit */ +#define TCR_FDUPLX 0x0800 /* receive packets sent out */ +#define TCR_STP_SQET 0x1000 /* stop transmitting if Signal quality error */ +#define TCR_MON_CNS 0x0400 /* monitors the carrier status */ +#define TCR_PAD_ENABLE 0x0080 /* pads short packets to 64 bytes */ + +#define TCR_CLEAR 0 /* do NOTHING */ +/* the normal settings for the TCR register : */ +/* QUESTION: do I want to enable padding of short packets ? */ +#define TCR_NORMAL TCR_ENABLE + + +#define EPH_STATUS 2 +#define ES_LINK_OK 0x4000 /* is the link integrity ok ? */ + +#define RCR 4 +#define RCR_SOFTRESET 0x8000 /* resets the chip */ +#define RCR_STRIP_CRC 0x200 /* strips CRC */ +#define RCR_ENABLE 0x100 /* IFF this is set, we can receive packets */ +#define RCR_ALMUL 0x4 /* receive all multicast packets */ +#define RCR_PROMISC 0x2 /* enable promiscuous mode */ + +/* the normal settings for the RCR register : */ +#define RCR_NORMAL (RCR_STRIP_CRC | RCR_ENABLE) +#define RCR_CLEAR 0x0 /* set it to a base state */ + +#define COUNTER 6 +#define MIR 8 +#define MCR 10 +/* 12 is reserved */ + +/* BANK 1 */ +#define CONFIG 0 +#define CFG_AUI_SELECT 0x100 +#define BASE 2 +#define ADDR0 4 +#define ADDR1 6 +#define ADDR2 8 +#define GENERAL 10 +#define CONTROL 12 +#define CTL_POWERDOWN 0x2000 +#define CTL_LE_ENABLE 0x80 +#define CTL_CR_ENABLE 0x40 +#define CTL_TE_ENABLE 0x0020 +#define CTL_AUTO_RELEASE 0x0800 +#define CTL_EPROM_ACCESS 0x0003 /* high if Eprom is being read */ + +/* BANK 2 */ +#define MMU_CMD 0 +#define MC_BUSY 1 /* only readable bit in the register */ +#define MC_NOP 0 +#define MC_ALLOC 0x20 /* or with number of 256 byte packets */ +#define MC_RESET 0x40 +#define MC_REMOVE 0x60 /* remove the current rx packet */ +#define MC_RELEASE 0x80 /* remove and release the current rx packet */ +#define MC_FREEPKT 0xA0 /* Release packet in PNR register */ +#define MC_ENQUEUE 0xC0 /* Enqueue the packet for transmit */ + +#define PNR_ARR 2 +#define FIFO_PORTS 4 + +#define FP_RXEMPTY 0x8000 +#define FP_TXEMPTY 0x80 + +#define POINTER 6 +#define PTR_READ 0x2000 +#define PTR_RCV 0x8000 +#define PTR_AUTOINC 0x4000 +#define PTR_AUTO_INC 0x0040 + +#define DATA_1 8 +#define DATA_2 10 +#define INTERRUPT 12 + +#define INT_MASK 13 +#define IM_RCV_INT 0x1 +#define IM_TX_INT 0x2 +#define IM_TX_EMPTY_INT 0x4 +#define IM_ALLOC_INT 0x8 +#define IM_RX_OVRN_INT 0x10 +#define IM_EPH_INT 0x20 +#define IM_ERCV_INT 0x40 /* not on SMC9192 */ + +/* BANK 3 */ +#define MULTICAST1 0 +#define MULTICAST2 2 +#define MULTICAST3 4 +#define MULTICAST4 6 +#define MGMT 8 +#define REVISION 10 /* ( hi: chip id low: rev # ) */ + + +/* this is NOT on SMC9192 */ +#define ERCV 12 + +#define CHIP_9190 3 +#define CHIP_9194 4 +#define CHIP_9195 5 +#define CHIP_91100 7 + +static const char * chip_ids[ 15 ] = { + NULL, NULL, NULL, + /* 3 */ "SMC91C90/91C92", + /* 4 */ "SMC91C94", + /* 5 */ "SMC91C95", + NULL, + /* 7 */ "SMC91C100", + /* 8 */ "SMC91C100FD", + NULL, NULL, NULL, + NULL, NULL, NULL}; + +/* + . Transmit status bits +*/ +#define TS_SUCCESS 0x0001 +#define TS_LOSTCAR 0x0400 +#define TS_LATCOL 0x0200 +#define TS_16COL 0x0010 + +/* + . Receive status bits +*/ +#define RS_ALGNERR 0x8000 +#define RS_BADCRC 0x2000 +#define RS_ODDFRAME 0x1000 +#define RS_TOOLONG 0x0800 +#define RS_TOOSHORT 0x0400 +#define RS_MULTICAST 0x0001 +#define RS_ERRORS (RS_ALGNERR | RS_BADCRC | RS_TOOLONG | RS_TOOSHORT) + +static const char * interfaces[ 2 ] = { "TP", "AUI" }; + +/*------------------------------------------------------------------------- + . I define some macros to make it easier to do somewhat common + . or slightly complicated, repeated tasks. + --------------------------------------------------------------------------*/ + +/* select a register bank, 0 to 3 */ + +#define SMC_SELECT_BANK(x) { outw( x, ioaddr + BANK_SELECT ); } + +/* define a small delay for the reset */ +#define SMC_DELAY() { inw( ioaddr + RCR );\ + inw( ioaddr + RCR );\ + inw( ioaddr + RCR ); } + +/* this enables an interrupt in the interrupt mask register */ +#define SMC_ENABLE_INT(x) {\ + unsigned char mask;\ + SMC_SELECT_BANK(2);\ + mask = inb( ioaddr + INT_MASK );\ + mask |= (x);\ + outb( mask, ioaddr + INT_MASK ); \ +} + +/* this disables an interrupt from the interrupt mask register */ + +#define SMC_DISABLE_INT(x) {\ + unsigned char mask;\ + SMC_SELECT_BANK(2);\ + mask = inb( ioaddr + INT_MASK );\ + mask &= ~(x);\ + outb( mask, ioaddr + INT_MASK ); \ +} + +/*---------------------------------------------------------------------- + . Define the interrupts that I want to receive from the card + . + . I want: + . IM_EPH_INT, for nasty errors + . IM_RCV_INT, for happy received packets + . IM_RX_OVRN_INT, because I have to kick the receiver + --------------------------------------------------------------------------*/ +#define SMC_INTERRUPT_MASK (IM_EPH_INT | IM_RX_OVRN_INT | IM_RCV_INT) + +#endif /* _SMC_9194_H_ */ + diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c new file mode 100644 index 0000000..cffbc03 --- /dev/null +++ b/drivers/net/ethernet/smsc/smc91c92_cs.c @@ -0,0 +1,2070 @@ +/*====================================================================== + + A PCMCIA ethernet driver for SMC91c92-based cards. + + This driver supports Megahertz PCMCIA ethernet cards; and + Megahertz, Motorola, Ositech, and Psion Dacom ethernet/modem + multifunction cards. + + Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net + + smc91c92_cs.c 1.122 2002/10/25 06:26:39 + + This driver contains code written by Donald Becker + (becker@scyld.com), Rowan Hughes (x-csrdh@jcu.edu.au), + David Hinds (dahinds@users.sourceforge.net), and Erik Stahlman + (erik@vt.edu). Donald wrote the SMC 91c92 code using parts of + Erik's SMC 91c94 driver. Rowan wrote a similar driver, and I've + incorporated some parts of his driver here. I (Dave) wrote most + of the PCMCIA glue code, and the Ositech support code. Kelly + Stephens (kstephen@holli.com) added support for the Motorola + Mariner, with help from Allen Brost. + + This software may be used and distributed according to the terms of + the GNU General Public License, incorporated herein by reference. + +======================================================================*/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +/*====================================================================*/ + +static const char *if_names[] = { "auto", "10baseT", "10base2"}; + +/* Firmware name */ +#define FIRMWARE_NAME "ositech/Xilinx7OD.bin" + +/* Module parameters */ + +MODULE_DESCRIPTION("SMC 91c92 series PCMCIA ethernet driver"); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(FIRMWARE_NAME); + +#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) + +/* + Transceiver/media type. + 0 = auto + 1 = 10baseT (and autoselect if #define AUTOSELECT), + 2 = AUI/10base2, +*/ +INT_MODULE_PARM(if_port, 0); + + +#define DRV_NAME "smc91c92_cs" +#define DRV_VERSION "1.123" + +/*====================================================================*/ + +/* Operational parameter that usually are not changed. */ + +/* Time in jiffies before concluding Tx hung */ +#define TX_TIMEOUT ((400*HZ)/1000) + +/* Maximum events (Rx packets, etc.) to handle at each interrupt. */ +#define INTR_WORK 4 + +/* Times to check the check the chip before concluding that it doesn't + currently have room for another Tx packet. */ +#define MEMORY_WAIT_TIME 8 + +struct smc_private { + struct pcmcia_device *p_dev; + spinlock_t lock; + u_short manfid; + u_short cardid; + + struct sk_buff *saved_skb; + int packets_waiting; + void __iomem *base; + u_short cfg; + struct timer_list media; + int watchdog, tx_err; + u_short media_status; + u_short fast_poll; + u_short link_status; + struct mii_if_info mii_if; + int duplex; + int rx_ovrn; +}; + +/* Special definitions for Megahertz multifunction cards */ +#define MEGAHERTZ_ISR 0x0380 + +/* Special function registers for Motorola Mariner */ +#define MOT_LAN 0x0000 +#define MOT_UART 0x0020 +#define MOT_EEPROM 0x20 + +#define MOT_NORMAL \ +(COR_LEVEL_REQ | COR_FUNC_ENA | COR_ADDR_DECODE | COR_IREQ_ENA) + +/* Special function registers for Ositech cards */ +#define OSITECH_AUI_CTL 0x0c +#define OSITECH_PWRDOWN 0x0d +#define OSITECH_RESET 0x0e +#define OSITECH_ISR 0x0f +#define OSITECH_AUI_PWR 0x0c +#define OSITECH_RESET_ISR 0x0e + +#define OSI_AUI_PWR 0x40 +#define OSI_LAN_PWRDOWN 0x02 +#define OSI_MODEM_PWRDOWN 0x01 +#define OSI_LAN_RESET 0x02 +#define OSI_MODEM_RESET 0x01 + +/* Symbolic constants for the SMC91c9* series chips, from Erik Stahlman. */ +#define BANK_SELECT 14 /* Window select register. */ +#define SMC_SELECT_BANK(x) { outw(x, ioaddr + BANK_SELECT); } + +/* Bank 0 registers. */ +#define TCR 0 /* transmit control register */ +#define TCR_CLEAR 0 /* do NOTHING */ +#define TCR_ENABLE 0x0001 /* if this is 1, we can transmit */ +#define TCR_PAD_EN 0x0080 /* pads short packets to 64 bytes */ +#define TCR_MONCSN 0x0400 /* Monitor Carrier. */ +#define TCR_FDUPLX 0x0800 /* Full duplex mode. */ +#define TCR_NORMAL TCR_ENABLE | TCR_PAD_EN + +#define EPH 2 /* Ethernet Protocol Handler report. */ +#define EPH_TX_SUC 0x0001 +#define EPH_SNGLCOL 0x0002 +#define EPH_MULCOL 0x0004 +#define EPH_LTX_MULT 0x0008 +#define EPH_16COL 0x0010 +#define EPH_SQET 0x0020 +#define EPH_LTX_BRD 0x0040 +#define EPH_TX_DEFR 0x0080 +#define EPH_LAT_COL 0x0200 +#define EPH_LOST_CAR 0x0400 +#define EPH_EXC_DEF 0x0800 +#define EPH_CTR_ROL 0x1000 +#define EPH_RX_OVRN 0x2000 +#define EPH_LINK_OK 0x4000 +#define EPH_TX_UNRN 0x8000 +#define MEMINFO 8 /* Memory Information Register */ +#define MEMCFG 10 /* Memory Configuration Register */ + +/* Bank 1 registers. */ +#define CONFIG 0 +#define CFG_MII_SELECT 0x8000 /* 91C100 only */ +#define CFG_NO_WAIT 0x1000 +#define CFG_FULL_STEP 0x0400 +#define CFG_SET_SQLCH 0x0200 +#define CFG_AUI_SELECT 0x0100 +#define CFG_16BIT 0x0080 +#define CFG_DIS_LINK 0x0040 +#define CFG_STATIC 0x0030 +#define CFG_IRQ_SEL_1 0x0004 +#define CFG_IRQ_SEL_0 0x0002 +#define BASE_ADDR 2 +#define ADDR0 4 +#define GENERAL 10 +#define CONTROL 12 +#define CTL_STORE 0x0001 +#define CTL_RELOAD 0x0002 +#define CTL_EE_SELECT 0x0004 +#define CTL_TE_ENABLE 0x0020 +#define CTL_CR_ENABLE 0x0040 +#define CTL_LE_ENABLE 0x0080 +#define CTL_AUTO_RELEASE 0x0800 +#define CTL_POWERDOWN 0x2000 + +/* Bank 2 registers. */ +#define MMU_CMD 0 +#define MC_ALLOC 0x20 /* or with number of 256 byte packets */ +#define MC_RESET 0x40 +#define MC_RELEASE 0x80 /* remove and release the current rx packet */ +#define MC_FREEPKT 0xA0 /* Release packet in PNR register */ +#define MC_ENQUEUE 0xC0 /* Enqueue the packet for transmit */ +#define PNR_ARR 2 +#define FIFO_PORTS 4 +#define FP_RXEMPTY 0x8000 +#define POINTER 6 +#define PTR_AUTO_INC 0x0040 +#define PTR_READ 0x2000 +#define PTR_AUTOINC 0x4000 +#define PTR_RCV 0x8000 +#define DATA_1 8 +#define INTERRUPT 12 +#define IM_RCV_INT 0x1 +#define IM_TX_INT 0x2 +#define IM_TX_EMPTY_INT 0x4 +#define IM_ALLOC_INT 0x8 +#define IM_RX_OVRN_INT 0x10 +#define IM_EPH_INT 0x20 + +#define RCR 4 +enum RxCfg { RxAllMulti = 0x0004, RxPromisc = 0x0002, + RxEnable = 0x0100, RxStripCRC = 0x0200}; +#define RCR_SOFTRESET 0x8000 /* resets the chip */ +#define RCR_STRIP_CRC 0x200 /* strips CRC */ +#define RCR_ENABLE 0x100 /* IFF this is set, we can receive packets */ +#define RCR_ALMUL 0x4 /* receive all multicast packets */ +#define RCR_PROMISC 0x2 /* enable promiscuous mode */ + +/* the normal settings for the RCR register : */ +#define RCR_NORMAL (RCR_STRIP_CRC | RCR_ENABLE) +#define RCR_CLEAR 0x0 /* set it to a base state */ +#define COUNTER 6 + +/* BANK 3 -- not the same values as in smc9194! */ +#define MULTICAST0 0 +#define MULTICAST2 2 +#define MULTICAST4 4 +#define MULTICAST6 6 +#define MGMT 8 +#define REVISION 0x0a + +/* Transmit status bits. */ +#define TS_SUCCESS 0x0001 +#define TS_16COL 0x0010 +#define TS_LATCOL 0x0200 +#define TS_LOSTCAR 0x0400 + +/* Receive status bits. */ +#define RS_ALGNERR 0x8000 +#define RS_BADCRC 0x2000 +#define RS_ODDFRAME 0x1000 +#define RS_TOOLONG 0x0800 +#define RS_TOOSHORT 0x0400 +#define RS_MULTICAST 0x0001 +#define RS_ERRORS (RS_ALGNERR | RS_BADCRC | RS_TOOLONG | RS_TOOSHORT) + +#define set_bits(v, p) outw(inw(p)|(v), (p)) +#define mask_bits(v, p) outw(inw(p)&(v), (p)) + +/*====================================================================*/ + +static void smc91c92_detach(struct pcmcia_device *p_dev); +static int smc91c92_config(struct pcmcia_device *link); +static void smc91c92_release(struct pcmcia_device *link); + +static int smc_open(struct net_device *dev); +static int smc_close(struct net_device *dev); +static int smc_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static void smc_tx_timeout(struct net_device *dev); +static netdev_tx_t smc_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static irqreturn_t smc_interrupt(int irq, void *dev_id); +static void smc_rx(struct net_device *dev); +static void set_rx_mode(struct net_device *dev); +static int s9k_config(struct net_device *dev, struct ifmap *map); +static void smc_set_xcvr(struct net_device *dev, int if_port); +static void smc_reset(struct net_device *dev); +static void media_check(u_long arg); +static void mdio_sync(unsigned int addr); +static int mdio_read(struct net_device *dev, int phy_id, int loc); +static void mdio_write(struct net_device *dev, int phy_id, int loc, int value); +static int smc_link_ok(struct net_device *dev); +static const struct ethtool_ops ethtool_ops; + +static const struct net_device_ops smc_netdev_ops = { + .ndo_open = smc_open, + .ndo_stop = smc_close, + .ndo_start_xmit = smc_start_xmit, + .ndo_tx_timeout = smc_tx_timeout, + .ndo_set_config = s9k_config, + .ndo_set_multicast_list = set_rx_mode, + .ndo_do_ioctl = smc_ioctl, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int smc91c92_probe(struct pcmcia_device *link) +{ + struct smc_private *smc; + struct net_device *dev; + + dev_dbg(&link->dev, "smc91c92_attach()\n"); + + /* Create new ethernet device */ + dev = alloc_etherdev(sizeof(struct smc_private)); + if (!dev) + return -ENOMEM; + smc = netdev_priv(dev); + smc->p_dev = link; + link->priv = dev; + + spin_lock_init(&smc->lock); + + /* The SMC91c92-specific entries in the device structure. */ + dev->netdev_ops = &smc_netdev_ops; + SET_ETHTOOL_OPS(dev, ðtool_ops); + dev->watchdog_timeo = TX_TIMEOUT; + + smc->mii_if.dev = dev; + smc->mii_if.mdio_read = mdio_read; + smc->mii_if.mdio_write = mdio_write; + smc->mii_if.phy_id_mask = 0x1f; + smc->mii_if.reg_num_mask = 0x1f; + + return smc91c92_config(link); +} /* smc91c92_attach */ + +static void smc91c92_detach(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + dev_dbg(&link->dev, "smc91c92_detach\n"); + + unregister_netdev(dev); + + smc91c92_release(link); + + free_netdev(dev); +} /* smc91c92_detach */ + +/*====================================================================*/ + +static int cvt_ascii_address(struct net_device *dev, char *s) +{ + int i, j, da, c; + + if (strlen(s) != 12) + return -1; + for (i = 0; i < 6; i++) { + da = 0; + for (j = 0; j < 2; j++) { + c = *s++; + da <<= 4; + da += ((c >= '0') && (c <= '9')) ? + (c - '0') : ((c & 0x0f) + 9); + } + dev->dev_addr[i] = da; + } + return 0; +} + +/*==================================================================== + + Configuration stuff for Megahertz cards + + mhz_3288_power() is used to power up a 3288's ethernet chip. + mhz_mfc_config() handles socket setup for multifunction (1144 + and 3288) cards. mhz_setup() gets a card's hardware ethernet + address. + +======================================================================*/ + +static int mhz_3288_power(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + struct smc_private *smc = netdev_priv(dev); + u_char tmp; + + /* Read the ISR twice... */ + readb(smc->base+MEGAHERTZ_ISR); + udelay(5); + readb(smc->base+MEGAHERTZ_ISR); + + /* Pause 200ms... */ + mdelay(200); + + /* Now read and write the COR... */ + tmp = readb(smc->base + link->config_base + CISREG_COR); + udelay(5); + writeb(tmp, smc->base + link->config_base + CISREG_COR); + + return 0; +} + +static int mhz_mfc_config_check(struct pcmcia_device *p_dev, void *priv_data) +{ + int k; + p_dev->io_lines = 16; + p_dev->resource[1]->start = p_dev->resource[0]->start; + p_dev->resource[1]->end = 8; + p_dev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; + p_dev->resource[0]->end = 16; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; + for (k = 0; k < 0x400; k += 0x10) { + if (k & 0x80) + continue; + p_dev->resource[0]->start = k ^ 0x300; + if (!pcmcia_request_io(p_dev)) + return 0; + } + return -ENODEV; +} + +static int mhz_mfc_config(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + struct smc_private *smc = netdev_priv(dev); + unsigned int offset; + int i; + + link->config_flags |= CONF_ENABLE_SPKR | CONF_ENABLE_IRQ | + CONF_AUTO_SET_IO; + + /* The Megahertz combo cards have modem-like CIS entries, so + we have to explicitly try a bunch of port combinations. */ + if (pcmcia_loop_config(link, mhz_mfc_config_check, NULL)) + return -ENODEV; + + dev->base_addr = link->resource[0]->start; + + /* Allocate a memory window, for accessing the ISR */ + link->resource[2]->flags = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; + link->resource[2]->start = link->resource[2]->end = 0; + i = pcmcia_request_window(link, link->resource[2], 0); + if (i != 0) + return -ENODEV; + + smc->base = ioremap(link->resource[2]->start, + resource_size(link->resource[2])); + offset = (smc->manfid == MANFID_MOTOROLA) ? link->config_base : 0; + i = pcmcia_map_mem_page(link, link->resource[2], offset); + if ((i == 0) && + (smc->manfid == MANFID_MEGAHERTZ) && + (smc->cardid == PRODID_MEGAHERTZ_EM3288)) + mhz_3288_power(link); + + return 0; +} + +static int pcmcia_get_versmac(struct pcmcia_device *p_dev, + tuple_t *tuple, + void *priv) +{ + struct net_device *dev = priv; + cisparse_t parse; + u8 *buf; + + if (pcmcia_parse_tuple(tuple, &parse)) + return -EINVAL; + + buf = parse.version_1.str + parse.version_1.ofs[3]; + + if ((parse.version_1.ns > 3) && (cvt_ascii_address(dev, buf) == 0)) + return 0; + + return -EINVAL; +}; + +static int mhz_setup(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + size_t len; + u8 *buf; + int rc; + + /* Read the station address from the CIS. It is stored as the last + (fourth) string in the Version 1 Version/ID tuple. */ + if ((link->prod_id[3]) && + (cvt_ascii_address(dev, link->prod_id[3]) == 0)) + return 0; + + /* Workarounds for broken cards start here. */ + /* Ugh -- the EM1144 card has two VERS_1 tuples!?! */ + if (!pcmcia_loop_tuple(link, CISTPL_VERS_1, pcmcia_get_versmac, dev)) + return 0; + + /* Another possibility: for the EM3288, in a special tuple */ + rc = -1; + len = pcmcia_get_tuple(link, 0x81, &buf); + if (buf && len >= 13) { + buf[12] = '\0'; + if (cvt_ascii_address(dev, buf) == 0) + rc = 0; + } + kfree(buf); + + return rc; +}; + +/*====================================================================== + + Configuration stuff for the Motorola Mariner + + mot_config() writes directly to the Mariner configuration + registers because the CIS is just bogus. + +======================================================================*/ + +static void mot_config(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + struct smc_private *smc = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + unsigned int iouart = link->resource[1]->start; + + /* Set UART base address and force map with COR bit 1 */ + writeb(iouart & 0xff, smc->base + MOT_UART + CISREG_IOBASE_0); + writeb((iouart >> 8) & 0xff, smc->base + MOT_UART + CISREG_IOBASE_1); + writeb(MOT_NORMAL, smc->base + MOT_UART + CISREG_COR); + + /* Set SMC base address and force map with COR bit 1 */ + writeb(ioaddr & 0xff, smc->base + MOT_LAN + CISREG_IOBASE_0); + writeb((ioaddr >> 8) & 0xff, smc->base + MOT_LAN + CISREG_IOBASE_1); + writeb(MOT_NORMAL, smc->base + MOT_LAN + CISREG_COR); + + /* Wait for things to settle down */ + mdelay(100); +} + +static int mot_setup(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + unsigned int ioaddr = dev->base_addr; + int i, wait, loop; + u_int addr; + + /* Read Ethernet address from Serial EEPROM */ + + for (i = 0; i < 3; i++) { + SMC_SELECT_BANK(2); + outw(MOT_EEPROM + i, ioaddr + POINTER); + SMC_SELECT_BANK(1); + outw((CTL_RELOAD | CTL_EE_SELECT), ioaddr + CONTROL); + + for (loop = wait = 0; loop < 200; loop++) { + udelay(10); + wait = ((CTL_RELOAD | CTL_STORE) & inw(ioaddr + CONTROL)); + if (wait == 0) break; + } + + if (wait) + return -1; + + addr = inw(ioaddr + GENERAL); + dev->dev_addr[2*i] = addr & 0xff; + dev->dev_addr[2*i+1] = (addr >> 8) & 0xff; + } + + return 0; +} + +/*====================================================================*/ + +static int smc_configcheck(struct pcmcia_device *p_dev, void *priv_data) +{ + p_dev->resource[0]->end = 16; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; + + return pcmcia_request_io(p_dev); +} + +static int smc_config(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + int i; + + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; + + i = pcmcia_loop_config(link, smc_configcheck, NULL); + if (!i) + dev->base_addr = link->resource[0]->start; + + return i; +} + + +static int smc_setup(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + /* Check for a LAN function extension tuple */ + if (!pcmcia_get_mac_from_cis(link, dev)) + return 0; + + /* Try the third string in the Version 1 Version/ID tuple. */ + if (link->prod_id[2]) { + if (cvt_ascii_address(dev, link->prod_id[2]) == 0) + return 0; + } + return -1; +} + +/*====================================================================*/ + +static int osi_config(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + static const unsigned int com[4] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8 }; + int i, j; + + link->config_flags |= CONF_ENABLE_SPKR | CONF_ENABLE_IRQ; + link->resource[0]->end = 64; + link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; + link->resource[1]->end = 8; + + /* Enable Hard Decode, LAN, Modem */ + link->io_lines = 16; + link->config_index = 0x23; + + for (i = j = 0; j < 4; j++) { + link->resource[1]->start = com[j]; + i = pcmcia_request_io(link); + if (i == 0) + break; + } + if (i != 0) { + /* Fallback: turn off hard decode */ + link->config_index = 0x03; + link->resource[1]->end = 0; + i = pcmcia_request_io(link); + } + dev->base_addr = link->resource[0]->start + 0x10; + return i; +} + +static int osi_load_firmware(struct pcmcia_device *link) +{ + const struct firmware *fw; + int i, err; + + err = request_firmware(&fw, FIRMWARE_NAME, &link->dev); + if (err) { + pr_err("Failed to load firmware \"%s\"\n", FIRMWARE_NAME); + return err; + } + + /* Download the Seven of Diamonds firmware */ + for (i = 0; i < fw->size; i++) { + outb(fw->data[i], link->resource[0]->start + 2); + udelay(50); + } + release_firmware(fw); + return err; +} + +static int pcmcia_osi_mac(struct pcmcia_device *p_dev, + tuple_t *tuple, + void *priv) +{ + struct net_device *dev = priv; + int i; + + if (tuple->TupleDataLen < 8) + return -EINVAL; + if (tuple->TupleData[0] != 0x04) + return -EINVAL; + for (i = 0; i < 6; i++) + dev->dev_addr[i] = tuple->TupleData[i+2]; + return 0; +}; + + +static int osi_setup(struct pcmcia_device *link, u_short manfid, u_short cardid) +{ + struct net_device *dev = link->priv; + int rc; + + /* Read the station address from tuple 0x90, subtuple 0x04 */ + if (pcmcia_loop_tuple(link, 0x90, pcmcia_osi_mac, dev)) + return -1; + + if (((manfid == MANFID_OSITECH) && + (cardid == PRODID_OSITECH_SEVEN)) || + ((manfid == MANFID_PSION) && + (cardid == PRODID_PSION_NET100))) { + rc = osi_load_firmware(link); + if (rc) + return rc; + } else if (manfid == MANFID_OSITECH) { + /* Make sure both functions are powered up */ + set_bits(0x300, link->resource[0]->start + OSITECH_AUI_PWR); + /* Now, turn on the interrupt for both card functions */ + set_bits(0x300, link->resource[0]->start + OSITECH_RESET_ISR); + dev_dbg(&link->dev, "AUI/PWR: %4.4x RESET/ISR: %4.4x\n", + inw(link->resource[0]->start + OSITECH_AUI_PWR), + inw(link->resource[0]->start + OSITECH_RESET_ISR)); + } + return 0; +} + +static int smc91c92_suspend(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + if (link->open) + netif_device_detach(dev); + + return 0; +} + +static int smc91c92_resume(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + struct smc_private *smc = netdev_priv(dev); + int i; + + if ((smc->manfid == MANFID_MEGAHERTZ) && + (smc->cardid == PRODID_MEGAHERTZ_EM3288)) + mhz_3288_power(link); + if (smc->manfid == MANFID_MOTOROLA) + mot_config(link); + if ((smc->manfid == MANFID_OSITECH) && + (smc->cardid != PRODID_OSITECH_SEVEN)) { + /* Power up the card and enable interrupts */ + set_bits(0x0300, dev->base_addr-0x10+OSITECH_AUI_PWR); + set_bits(0x0300, dev->base_addr-0x10+OSITECH_RESET_ISR); + } + if (((smc->manfid == MANFID_OSITECH) && + (smc->cardid == PRODID_OSITECH_SEVEN)) || + ((smc->manfid == MANFID_PSION) && + (smc->cardid == PRODID_PSION_NET100))) { + i = osi_load_firmware(link); + if (i) { + pr_err("smc91c92_cs: Failed to load firmware\n"); + return i; + } + } + if (link->open) { + smc_reset(dev); + netif_device_attach(dev); + } + + return 0; +} + + +/*====================================================================== + + This verifies that the chip is some SMC91cXX variant, and returns + the revision code if successful. Otherwise, it returns -ENODEV. + +======================================================================*/ + +static int check_sig(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + unsigned int ioaddr = dev->base_addr; + int width; + u_short s; + + SMC_SELECT_BANK(1); + if (inw(ioaddr + BANK_SELECT) >> 8 != 0x33) { + /* Try powering up the chip */ + outw(0, ioaddr + CONTROL); + mdelay(55); + } + + /* Try setting bus width */ + width = (link->resource[0]->flags == IO_DATA_PATH_WIDTH_AUTO); + s = inb(ioaddr + CONFIG); + if (width) + s |= CFG_16BIT; + else + s &= ~CFG_16BIT; + outb(s, ioaddr + CONFIG); + + /* Check Base Address Register to make sure bus width is OK */ + s = inw(ioaddr + BASE_ADDR); + if ((inw(ioaddr + BANK_SELECT) >> 8 == 0x33) && + ((s >> 8) != (s & 0xff))) { + SMC_SELECT_BANK(3); + s = inw(ioaddr + REVISION); + return s & 0xff; + } + + if (width) { + pr_info("using 8-bit IO window\n"); + + smc91c92_suspend(link); + pcmcia_fixup_iowidth(link); + smc91c92_resume(link); + return check_sig(link); + } + return -ENODEV; +} + +static int smc91c92_config(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + struct smc_private *smc = netdev_priv(dev); + char *name; + int i, rev, j = 0; + unsigned int ioaddr; + u_long mir; + + dev_dbg(&link->dev, "smc91c92_config\n"); + + smc->manfid = link->manf_id; + smc->cardid = link->card_id; + + if ((smc->manfid == MANFID_OSITECH) && + (smc->cardid != PRODID_OSITECH_SEVEN)) { + i = osi_config(link); + } else if ((smc->manfid == MANFID_MOTOROLA) || + ((smc->manfid == MANFID_MEGAHERTZ) && + ((smc->cardid == PRODID_MEGAHERTZ_VARIOUS) || + (smc->cardid == PRODID_MEGAHERTZ_EM3288)))) { + i = mhz_mfc_config(link); + } else { + i = smc_config(link); + } + if (i) + goto config_failed; + + i = pcmcia_request_irq(link, smc_interrupt); + if (i) + goto config_failed; + i = pcmcia_enable_device(link); + if (i) + goto config_failed; + + if (smc->manfid == MANFID_MOTOROLA) + mot_config(link); + + dev->irq = link->irq; + + if ((if_port >= 0) && (if_port <= 2)) + dev->if_port = if_port; + else + dev_notice(&link->dev, "invalid if_port requested\n"); + + switch (smc->manfid) { + case MANFID_OSITECH: + case MANFID_PSION: + i = osi_setup(link, smc->manfid, smc->cardid); break; + case MANFID_SMC: + case MANFID_NEW_MEDIA: + i = smc_setup(link); break; + case 0x128: /* For broken Megahertz cards */ + case MANFID_MEGAHERTZ: + i = mhz_setup(link); break; + case MANFID_MOTOROLA: + default: /* get the hw address from EEPROM */ + i = mot_setup(link); break; + } + + if (i != 0) { + dev_notice(&link->dev, "Unable to find hardware address.\n"); + goto config_failed; + } + + smc->duplex = 0; + smc->rx_ovrn = 0; + + rev = check_sig(link); + name = "???"; + if (rev > 0) + switch (rev >> 4) { + case 3: name = "92"; break; + case 4: name = ((rev & 15) >= 6) ? "96" : "94"; break; + case 5: name = "95"; break; + case 7: name = "100"; break; + case 8: name = "100-FD"; break; + case 9: name = "110"; break; + } + + ioaddr = dev->base_addr; + if (rev > 0) { + u_long mcr; + SMC_SELECT_BANK(0); + mir = inw(ioaddr + MEMINFO) & 0xff; + if (mir == 0xff) mir++; + /* Get scale factor for memory size */ + mcr = ((rev >> 4) > 3) ? inw(ioaddr + MEMCFG) : 0x0200; + mir *= 128 * (1<<((mcr >> 9) & 7)); + SMC_SELECT_BANK(1); + smc->cfg = inw(ioaddr + CONFIG) & ~CFG_AUI_SELECT; + smc->cfg |= CFG_NO_WAIT | CFG_16BIT | CFG_STATIC; + if (smc->manfid == MANFID_OSITECH) + smc->cfg |= CFG_IRQ_SEL_1 | CFG_IRQ_SEL_0; + if ((rev >> 4) >= 7) + smc->cfg |= CFG_MII_SELECT; + } else + mir = 0; + + if (smc->cfg & CFG_MII_SELECT) { + SMC_SELECT_BANK(3); + + for (i = 0; i < 32; i++) { + j = mdio_read(dev, i, 1); + if ((j != 0) && (j != 0xffff)) break; + } + smc->mii_if.phy_id = (i < 32) ? i : -1; + + SMC_SELECT_BANK(0); + } + + SET_NETDEV_DEV(dev, &link->dev); + + if (register_netdev(dev) != 0) { + dev_err(&link->dev, "register_netdev() failed\n"); + goto config_undo; + } + + netdev_info(dev, "smc91c%s rev %d: io %#3lx, irq %d, hw_addr %pM\n", + name, (rev & 0x0f), dev->base_addr, dev->irq, dev->dev_addr); + + if (rev > 0) { + if (mir & 0x3ff) + netdev_info(dev, " %lu byte", mir); + else + netdev_info(dev, " %lu kb", mir>>10); + pr_cont(" buffer, %s xcvr\n", + (smc->cfg & CFG_MII_SELECT) ? "MII" : if_names[dev->if_port]); + } + + if (smc->cfg & CFG_MII_SELECT) { + if (smc->mii_if.phy_id != -1) { + netdev_dbg(dev, " MII transceiver at index %d, status %x\n", + smc->mii_if.phy_id, j); + } else { + netdev_notice(dev, " No MII transceivers found!\n"); + } + } + return 0; + +config_undo: + unregister_netdev(dev); +config_failed: + smc91c92_release(link); + free_netdev(dev); + return -ENODEV; +} /* smc91c92_config */ + +static void smc91c92_release(struct pcmcia_device *link) +{ + dev_dbg(&link->dev, "smc91c92_release\n"); + if (link->resource[2]->end) { + struct net_device *dev = link->priv; + struct smc_private *smc = netdev_priv(dev); + iounmap(smc->base); + } + pcmcia_disable_device(link); +} + +/*====================================================================== + + MII interface support for SMC91cXX based cards +======================================================================*/ + +#define MDIO_SHIFT_CLK 0x04 +#define MDIO_DATA_OUT 0x01 +#define MDIO_DIR_WRITE 0x08 +#define MDIO_DATA_WRITE0 (MDIO_DIR_WRITE) +#define MDIO_DATA_WRITE1 (MDIO_DIR_WRITE | MDIO_DATA_OUT) +#define MDIO_DATA_READ 0x02 + +static void mdio_sync(unsigned int addr) +{ + int bits; + for (bits = 0; bits < 32; bits++) { + outb(MDIO_DATA_WRITE1, addr); + outb(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, addr); + } +} + +static int mdio_read(struct net_device *dev, int phy_id, int loc) +{ + unsigned int addr = dev->base_addr + MGMT; + u_int cmd = (0x06<<10)|(phy_id<<5)|loc; + int i, retval = 0; + + mdio_sync(addr); + for (i = 13; i >= 0; i--) { + int dat = (cmd&(1< 0; i--) { + outb(0, addr); + retval = (retval << 1) | ((inb(addr) & MDIO_DATA_READ) != 0); + outb(MDIO_SHIFT_CLK, addr); + } + return (retval>>1) & 0xffff; +} + +static void mdio_write(struct net_device *dev, int phy_id, int loc, int value) +{ + unsigned int addr = dev->base_addr + MGMT; + u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value; + int i; + + mdio_sync(addr); + for (i = 31; i >= 0; i--) { + int dat = (cmd&(1<= 0; i--) { + outb(0, addr); + outb(MDIO_SHIFT_CLK, addr); + } +} + +/*====================================================================== + + The driver core code, most of which should be common with a + non-PCMCIA implementation. + +======================================================================*/ + +#ifdef PCMCIA_DEBUG +static void smc_dump(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + u_short i, w, save; + save = inw(ioaddr + BANK_SELECT); + for (w = 0; w < 4; w++) { + SMC_SELECT_BANK(w); + netdev_printk(KERN_DEBUG, dev, "bank %d: ", w); + for (i = 0; i < 14; i += 2) + pr_cont(" %04x", inw(ioaddr + i)); + pr_cont("\n"); + } + outw(save, ioaddr + BANK_SELECT); +} +#endif + +static int smc_open(struct net_device *dev) +{ + struct smc_private *smc = netdev_priv(dev); + struct pcmcia_device *link = smc->p_dev; + + dev_dbg(&link->dev, "%s: smc_open(%p), ID/Window %4.4x.\n", + dev->name, dev, inw(dev->base_addr + BANK_SELECT)); +#ifdef PCMCIA_DEBUG + smc_dump(dev); +#endif + + /* Check that the PCMCIA card is still here. */ + if (!pcmcia_dev_present(link)) + return -ENODEV; + /* Physical device present signature. */ + if (check_sig(link) < 0) { + netdev_info(dev, "Yikes! Bad chip signature!\n"); + return -ENODEV; + } + link->open++; + + netif_start_queue(dev); + smc->saved_skb = NULL; + smc->packets_waiting = 0; + + smc_reset(dev); + init_timer(&smc->media); + smc->media.function = media_check; + smc->media.data = (u_long) dev; + smc->media.expires = jiffies + HZ; + add_timer(&smc->media); + + return 0; +} /* smc_open */ + +/*====================================================================*/ + +static int smc_close(struct net_device *dev) +{ + struct smc_private *smc = netdev_priv(dev); + struct pcmcia_device *link = smc->p_dev; + unsigned int ioaddr = dev->base_addr; + + dev_dbg(&link->dev, "%s: smc_close(), status %4.4x.\n", + dev->name, inw(ioaddr + BANK_SELECT)); + + netif_stop_queue(dev); + + /* Shut off all interrupts, and turn off the Tx and Rx sections. + Don't bother to check for chip present. */ + SMC_SELECT_BANK(2); /* Nominally paranoia, but do no assume... */ + outw(0, ioaddr + INTERRUPT); + SMC_SELECT_BANK(0); + mask_bits(0xff00, ioaddr + RCR); + mask_bits(0xff00, ioaddr + TCR); + + /* Put the chip into power-down mode. */ + SMC_SELECT_BANK(1); + outw(CTL_POWERDOWN, ioaddr + CONTROL ); + + link->open--; + del_timer_sync(&smc->media); + + return 0; +} /* smc_close */ + +/*====================================================================== + + Transfer a packet to the hardware and trigger the packet send. + This may be called at either from either the Tx queue code + or the interrupt handler. + +======================================================================*/ + +static void smc_hardware_send_packet(struct net_device * dev) +{ + struct smc_private *smc = netdev_priv(dev); + struct sk_buff *skb = smc->saved_skb; + unsigned int ioaddr = dev->base_addr; + u_char packet_no; + + if (!skb) { + netdev_err(dev, "In XMIT with no packet to send\n"); + return; + } + + /* There should be a packet slot waiting. */ + packet_no = inw(ioaddr + PNR_ARR) >> 8; + if (packet_no & 0x80) { + /* If not, there is a hardware problem! Likely an ejected card. */ + netdev_warn(dev, "hardware Tx buffer allocation failed, status %#2.2x\n", + packet_no); + dev_kfree_skb_irq(skb); + smc->saved_skb = NULL; + netif_start_queue(dev); + return; + } + + dev->stats.tx_bytes += skb->len; + /* The card should use the just-allocated buffer. */ + outw(packet_no, ioaddr + PNR_ARR); + /* point to the beginning of the packet */ + outw(PTR_AUTOINC , ioaddr + POINTER); + + /* Send the packet length (+6 for status, length and ctl byte) + and the status word (set to zeros). */ + { + u_char *buf = skb->data; + u_int length = skb->len; /* The chip will pad to ethernet min. */ + + netdev_dbg(dev, "Trying to xmit packet of length %d\n", length); + + /* send the packet length: +6 for status word, length, and ctl */ + outw(0, ioaddr + DATA_1); + outw(length + 6, ioaddr + DATA_1); + outsw(ioaddr + DATA_1, buf, length >> 1); + + /* The odd last byte, if there is one, goes in the control word. */ + outw((length & 1) ? 0x2000 | buf[length-1] : 0, ioaddr + DATA_1); + } + + /* Enable the Tx interrupts, both Tx (TxErr) and TxEmpty. */ + outw(((IM_TX_INT|IM_TX_EMPTY_INT)<<8) | + (inw(ioaddr + INTERRUPT) & 0xff00), + ioaddr + INTERRUPT); + + /* The chip does the rest of the work. */ + outw(MC_ENQUEUE , ioaddr + MMU_CMD); + + smc->saved_skb = NULL; + dev_kfree_skb_irq(skb); + dev->trans_start = jiffies; + netif_start_queue(dev); +} + +/*====================================================================*/ + +static void smc_tx_timeout(struct net_device *dev) +{ + struct smc_private *smc = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + + netdev_notice(dev, "transmit timed out, Tx_status %2.2x status %4.4x.\n", + inw(ioaddr)&0xff, inw(ioaddr + 2)); + dev->stats.tx_errors++; + smc_reset(dev); + dev->trans_start = jiffies; /* prevent tx timeout */ + smc->saved_skb = NULL; + netif_wake_queue(dev); +} + +static netdev_tx_t smc_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct smc_private *smc = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + u_short num_pages; + short time_out, ir; + unsigned long flags; + + netif_stop_queue(dev); + + netdev_dbg(dev, "smc_start_xmit(length = %d) called, status %04x\n", + skb->len, inw(ioaddr + 2)); + + if (smc->saved_skb) { + /* THIS SHOULD NEVER HAPPEN. */ + dev->stats.tx_aborted_errors++; + netdev_printk(KERN_DEBUG, dev, + "Internal error -- sent packet while busy\n"); + return NETDEV_TX_BUSY; + } + smc->saved_skb = skb; + + num_pages = skb->len >> 8; + + if (num_pages > 7) { + netdev_err(dev, "Far too big packet error: %d pages\n", num_pages); + dev_kfree_skb (skb); + smc->saved_skb = NULL; + dev->stats.tx_dropped++; + return NETDEV_TX_OK; /* Do not re-queue this packet. */ + } + /* A packet is now waiting. */ + smc->packets_waiting++; + + spin_lock_irqsave(&smc->lock, flags); + SMC_SELECT_BANK(2); /* Paranoia, we should always be in window 2 */ + + /* need MC_RESET to keep the memory consistent. errata? */ + if (smc->rx_ovrn) { + outw(MC_RESET, ioaddr + MMU_CMD); + smc->rx_ovrn = 0; + } + + /* Allocate the memory; send the packet now if we win. */ + outw(MC_ALLOC | num_pages, ioaddr + MMU_CMD); + for (time_out = MEMORY_WAIT_TIME; time_out >= 0; time_out--) { + ir = inw(ioaddr+INTERRUPT); + if (ir & IM_ALLOC_INT) { + /* Acknowledge the interrupt, send the packet. */ + outw((ir&0xff00) | IM_ALLOC_INT, ioaddr + INTERRUPT); + smc_hardware_send_packet(dev); /* Send the packet now.. */ + spin_unlock_irqrestore(&smc->lock, flags); + return NETDEV_TX_OK; + } + } + + /* Otherwise defer until the Tx-space-allocated interrupt. */ + pr_debug("%s: memory allocation deferred.\n", dev->name); + outw((IM_ALLOC_INT << 8) | (ir & 0xff00), ioaddr + INTERRUPT); + spin_unlock_irqrestore(&smc->lock, flags); + + return NETDEV_TX_OK; +} + +/*====================================================================== + + Handle a Tx anomalous event. Entered while in Window 2. + +======================================================================*/ + +static void smc_tx_err(struct net_device * dev) +{ + struct smc_private *smc = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + int saved_packet = inw(ioaddr + PNR_ARR) & 0xff; + int packet_no = inw(ioaddr + FIFO_PORTS) & 0x7f; + int tx_status; + + /* select this as the packet to read from */ + outw(packet_no, ioaddr + PNR_ARR); + + /* read the first word from this packet */ + outw(PTR_AUTOINC | PTR_READ | 0, ioaddr + POINTER); + + tx_status = inw(ioaddr + DATA_1); + + dev->stats.tx_errors++; + if (tx_status & TS_LOSTCAR) dev->stats.tx_carrier_errors++; + if (tx_status & TS_LATCOL) dev->stats.tx_window_errors++; + if (tx_status & TS_16COL) { + dev->stats.tx_aborted_errors++; + smc->tx_err++; + } + + if (tx_status & TS_SUCCESS) { + netdev_notice(dev, "Successful packet caused error interrupt?\n"); + } + /* re-enable transmit */ + SMC_SELECT_BANK(0); + outw(inw(ioaddr + TCR) | TCR_ENABLE | smc->duplex, ioaddr + TCR); + SMC_SELECT_BANK(2); + + outw(MC_FREEPKT, ioaddr + MMU_CMD); /* Free the packet memory. */ + + /* one less packet waiting for me */ + smc->packets_waiting--; + + outw(saved_packet, ioaddr + PNR_ARR); +} + +/*====================================================================*/ + +static void smc_eph_irq(struct net_device *dev) +{ + struct smc_private *smc = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + u_short card_stats, ephs; + + SMC_SELECT_BANK(0); + ephs = inw(ioaddr + EPH); + pr_debug("%s: Ethernet protocol handler interrupt, status" + " %4.4x.\n", dev->name, ephs); + /* Could be a counter roll-over warning: update stats. */ + card_stats = inw(ioaddr + COUNTER); + /* single collisions */ + dev->stats.collisions += card_stats & 0xF; + card_stats >>= 4; + /* multiple collisions */ + dev->stats.collisions += card_stats & 0xF; +#if 0 /* These are for when linux supports these statistics */ + card_stats >>= 4; /* deferred */ + card_stats >>= 4; /* excess deferred */ +#endif + /* If we had a transmit error we must re-enable the transmitter. */ + outw(inw(ioaddr + TCR) | TCR_ENABLE | smc->duplex, ioaddr + TCR); + + /* Clear a link error interrupt. */ + SMC_SELECT_BANK(1); + outw(CTL_AUTO_RELEASE | 0x0000, ioaddr + CONTROL); + outw(CTL_AUTO_RELEASE | CTL_TE_ENABLE | CTL_CR_ENABLE, + ioaddr + CONTROL); + SMC_SELECT_BANK(2); +} + +/*====================================================================*/ + +static irqreturn_t smc_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct smc_private *smc = netdev_priv(dev); + unsigned int ioaddr; + u_short saved_bank, saved_pointer, mask, status; + unsigned int handled = 1; + char bogus_cnt = INTR_WORK; /* Work we are willing to do. */ + + if (!netif_device_present(dev)) + return IRQ_NONE; + + ioaddr = dev->base_addr; + + pr_debug("%s: SMC91c92 interrupt %d at %#x.\n", dev->name, + irq, ioaddr); + + spin_lock(&smc->lock); + smc->watchdog = 0; + saved_bank = inw(ioaddr + BANK_SELECT); + if ((saved_bank & 0xff00) != 0x3300) { + /* The device does not exist -- the card could be off-line, or + maybe it has been ejected. */ + pr_debug("%s: SMC91c92 interrupt %d for non-existent" + "/ejected device.\n", dev->name, irq); + handled = 0; + goto irq_done; + } + + SMC_SELECT_BANK(2); + saved_pointer = inw(ioaddr + POINTER); + mask = inw(ioaddr + INTERRUPT) >> 8; + /* clear all interrupts */ + outw(0, ioaddr + INTERRUPT); + + do { /* read the status flag, and mask it */ + status = inw(ioaddr + INTERRUPT) & 0xff; + pr_debug("%s: Status is %#2.2x (mask %#2.2x).\n", dev->name, + status, mask); + if ((status & mask) == 0) { + if (bogus_cnt == INTR_WORK) + handled = 0; + break; + } + if (status & IM_RCV_INT) { + /* Got a packet(s). */ + smc_rx(dev); + } + if (status & IM_TX_INT) { + smc_tx_err(dev); + outw(IM_TX_INT, ioaddr + INTERRUPT); + } + status &= mask; + if (status & IM_TX_EMPTY_INT) { + outw(IM_TX_EMPTY_INT, ioaddr + INTERRUPT); + mask &= ~IM_TX_EMPTY_INT; + dev->stats.tx_packets += smc->packets_waiting; + smc->packets_waiting = 0; + } + if (status & IM_ALLOC_INT) { + /* Clear this interrupt so it doesn't happen again */ + mask &= ~IM_ALLOC_INT; + + smc_hardware_send_packet(dev); + + /* enable xmit interrupts based on this */ + mask |= (IM_TX_EMPTY_INT | IM_TX_INT); + + /* and let the card send more packets to me */ + netif_wake_queue(dev); + } + if (status & IM_RX_OVRN_INT) { + dev->stats.rx_errors++; + dev->stats.rx_fifo_errors++; + if (smc->duplex) + smc->rx_ovrn = 1; /* need MC_RESET outside smc_interrupt */ + outw(IM_RX_OVRN_INT, ioaddr + INTERRUPT); + } + if (status & IM_EPH_INT) + smc_eph_irq(dev); + } while (--bogus_cnt); + + pr_debug(" Restoring saved registers mask %2.2x bank %4.4x" + " pointer %4.4x.\n", mask, saved_bank, saved_pointer); + + /* restore state register */ + outw((mask<<8), ioaddr + INTERRUPT); + outw(saved_pointer, ioaddr + POINTER); + SMC_SELECT_BANK(saved_bank); + + pr_debug("%s: Exiting interrupt IRQ%d.\n", dev->name, irq); + +irq_done: + + if ((smc->manfid == MANFID_OSITECH) && + (smc->cardid != PRODID_OSITECH_SEVEN)) { + /* Retrigger interrupt if needed */ + mask_bits(0x00ff, ioaddr-0x10+OSITECH_RESET_ISR); + set_bits(0x0300, ioaddr-0x10+OSITECH_RESET_ISR); + } + if (smc->manfid == MANFID_MOTOROLA) { + u_char cor; + cor = readb(smc->base + MOT_UART + CISREG_COR); + writeb(cor & ~COR_IREQ_ENA, smc->base + MOT_UART + CISREG_COR); + writeb(cor, smc->base + MOT_UART + CISREG_COR); + cor = readb(smc->base + MOT_LAN + CISREG_COR); + writeb(cor & ~COR_IREQ_ENA, smc->base + MOT_LAN + CISREG_COR); + writeb(cor, smc->base + MOT_LAN + CISREG_COR); + } + + if ((smc->base != NULL) && /* Megahertz MFC's */ + (smc->manfid == MANFID_MEGAHERTZ) && + (smc->cardid == PRODID_MEGAHERTZ_EM3288)) { + + u_char tmp; + tmp = readb(smc->base+MEGAHERTZ_ISR); + tmp = readb(smc->base+MEGAHERTZ_ISR); + + /* Retrigger interrupt if needed */ + writeb(tmp, smc->base + MEGAHERTZ_ISR); + writeb(tmp, smc->base + MEGAHERTZ_ISR); + } + + spin_unlock(&smc->lock); + return IRQ_RETVAL(handled); +} + +/*====================================================================*/ + +static void smc_rx(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + int rx_status; + int packet_length; /* Caution: not frame length, rather words + to transfer from the chip. */ + + /* Assertion: we are in Window 2. */ + + if (inw(ioaddr + FIFO_PORTS) & FP_RXEMPTY) { + netdev_err(dev, "smc_rx() with nothing on Rx FIFO\n"); + return; + } + + /* Reset the read pointer, and read the status and packet length. */ + outw(PTR_READ | PTR_RCV | PTR_AUTOINC, ioaddr + POINTER); + rx_status = inw(ioaddr + DATA_1); + packet_length = inw(ioaddr + DATA_1) & 0x07ff; + + pr_debug("%s: Receive status %4.4x length %d.\n", + dev->name, rx_status, packet_length); + + if (!(rx_status & RS_ERRORS)) { + /* do stuff to make a new packet */ + struct sk_buff *skb; + + /* Note: packet_length adds 5 or 6 extra bytes here! */ + skb = dev_alloc_skb(packet_length+2); + + if (skb == NULL) { + pr_debug("%s: Low memory, packet dropped.\n", dev->name); + dev->stats.rx_dropped++; + outw(MC_RELEASE, ioaddr + MMU_CMD); + return; + } + + packet_length -= (rx_status & RS_ODDFRAME ? 5 : 6); + skb_reserve(skb, 2); + insw(ioaddr+DATA_1, skb_put(skb, packet_length), + (packet_length+1)>>1); + skb->protocol = eth_type_trans(skb, dev); + + netif_rx(skb); + dev->last_rx = jiffies; + dev->stats.rx_packets++; + dev->stats.rx_bytes += packet_length; + if (rx_status & RS_MULTICAST) + dev->stats.multicast++; + } else { + /* error ... */ + dev->stats.rx_errors++; + + if (rx_status & RS_ALGNERR) dev->stats.rx_frame_errors++; + if (rx_status & (RS_TOOSHORT | RS_TOOLONG)) + dev->stats.rx_length_errors++; + if (rx_status & RS_BADCRC) dev->stats.rx_crc_errors++; + } + /* Let the MMU free the memory of this packet. */ + outw(MC_RELEASE, ioaddr + MMU_CMD); +} + +/*====================================================================== + + Set the receive mode. + + This routine is used by both the protocol level to notify us of + promiscuous/multicast mode changes, and by the open/reset code to + initialize the Rx registers. We always set the multicast list and + leave the receiver running. + +======================================================================*/ + +static void set_rx_mode(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + struct smc_private *smc = netdev_priv(dev); + unsigned char multicast_table[8]; + unsigned long flags; + u_short rx_cfg_setting; + int i; + + memset(multicast_table, 0, sizeof(multicast_table)); + + if (dev->flags & IFF_PROMISC) { + rx_cfg_setting = RxStripCRC | RxEnable | RxPromisc | RxAllMulti; + } else if (dev->flags & IFF_ALLMULTI) + rx_cfg_setting = RxStripCRC | RxEnable | RxAllMulti; + else { + if (!netdev_mc_empty(dev)) { + struct netdev_hw_addr *ha; + + netdev_for_each_mc_addr(ha, dev) { + u_int position = ether_crc(6, ha->addr); + multicast_table[position >> 29] |= 1 << ((position >> 26) & 7); + } + } + rx_cfg_setting = RxStripCRC | RxEnable; + } + + /* Load MC table and Rx setting into the chip without interrupts. */ + spin_lock_irqsave(&smc->lock, flags); + SMC_SELECT_BANK(3); + for (i = 0; i < 8; i++) + outb(multicast_table[i], ioaddr + MULTICAST0 + i); + SMC_SELECT_BANK(0); + outw(rx_cfg_setting, ioaddr + RCR); + SMC_SELECT_BANK(2); + spin_unlock_irqrestore(&smc->lock, flags); +} + +/*====================================================================== + + Senses when a card's config changes. Here, it's coax or TP. + +======================================================================*/ + +static int s9k_config(struct net_device *dev, struct ifmap *map) +{ + struct smc_private *smc = netdev_priv(dev); + if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { + if (smc->cfg & CFG_MII_SELECT) + return -EOPNOTSUPP; + else if (map->port > 2) + return -EINVAL; + dev->if_port = map->port; + netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]); + smc_reset(dev); + } + return 0; +} + +/*====================================================================== + + Reset the chip, reloading every register that might be corrupted. + +======================================================================*/ + +/* + Set transceiver type, perhaps to something other than what the user + specified in dev->if_port. +*/ +static void smc_set_xcvr(struct net_device *dev, int if_port) +{ + struct smc_private *smc = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + u_short saved_bank; + + saved_bank = inw(ioaddr + BANK_SELECT); + SMC_SELECT_BANK(1); + if (if_port == 2) { + outw(smc->cfg | CFG_AUI_SELECT, ioaddr + CONFIG); + if ((smc->manfid == MANFID_OSITECH) && + (smc->cardid != PRODID_OSITECH_SEVEN)) + set_bits(OSI_AUI_PWR, ioaddr - 0x10 + OSITECH_AUI_PWR); + smc->media_status = ((dev->if_port == 0) ? 0x0001 : 0x0002); + } else { + outw(smc->cfg, ioaddr + CONFIG); + if ((smc->manfid == MANFID_OSITECH) && + (smc->cardid != PRODID_OSITECH_SEVEN)) + mask_bits(~OSI_AUI_PWR, ioaddr - 0x10 + OSITECH_AUI_PWR); + smc->media_status = ((dev->if_port == 0) ? 0x0012 : 0x4001); + } + SMC_SELECT_BANK(saved_bank); +} + +static void smc_reset(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + struct smc_private *smc = netdev_priv(dev); + int i; + + pr_debug("%s: smc91c92 reset called.\n", dev->name); + + /* The first interaction must be a write to bring the chip out + of sleep mode. */ + SMC_SELECT_BANK(0); + /* Reset the chip. */ + outw(RCR_SOFTRESET, ioaddr + RCR); + udelay(10); + + /* Clear the transmit and receive configuration registers. */ + outw(RCR_CLEAR, ioaddr + RCR); + outw(TCR_CLEAR, ioaddr + TCR); + + /* Set the Window 1 control, configuration and station addr registers. + No point in writing the I/O base register ;-> */ + SMC_SELECT_BANK(1); + /* Automatically release successfully transmitted packets, + Accept link errors, counter and Tx error interrupts. */ + outw(CTL_AUTO_RELEASE | CTL_TE_ENABLE | CTL_CR_ENABLE, + ioaddr + CONTROL); + smc_set_xcvr(dev, dev->if_port); + if ((smc->manfid == MANFID_OSITECH) && + (smc->cardid != PRODID_OSITECH_SEVEN)) + outw((dev->if_port == 2 ? OSI_AUI_PWR : 0) | + (inw(ioaddr-0x10+OSITECH_AUI_PWR) & 0xff00), + ioaddr - 0x10 + OSITECH_AUI_PWR); + + /* Fill in the physical address. The databook is wrong about the order! */ + for (i = 0; i < 6; i += 2) + outw((dev->dev_addr[i+1]<<8)+dev->dev_addr[i], + ioaddr + ADDR0 + i); + + /* Reset the MMU */ + SMC_SELECT_BANK(2); + outw(MC_RESET, ioaddr + MMU_CMD); + outw(0, ioaddr + INTERRUPT); + + /* Re-enable the chip. */ + SMC_SELECT_BANK(0); + outw(((smc->cfg & CFG_MII_SELECT) ? 0 : TCR_MONCSN) | + TCR_ENABLE | TCR_PAD_EN | smc->duplex, ioaddr + TCR); + set_rx_mode(dev); + + if (smc->cfg & CFG_MII_SELECT) { + SMC_SELECT_BANK(3); + + /* Reset MII */ + mdio_write(dev, smc->mii_if.phy_id, 0, 0x8000); + + /* Advertise 100F, 100H, 10F, 10H */ + mdio_write(dev, smc->mii_if.phy_id, 4, 0x01e1); + + /* Restart MII autonegotiation */ + mdio_write(dev, smc->mii_if.phy_id, 0, 0x0000); + mdio_write(dev, smc->mii_if.phy_id, 0, 0x1200); + } + + /* Enable interrupts. */ + SMC_SELECT_BANK(2); + outw((IM_EPH_INT | IM_RX_OVRN_INT | IM_RCV_INT) << 8, + ioaddr + INTERRUPT); +} + +/*====================================================================== + + Media selection timer routine + +======================================================================*/ + +static void media_check(u_long arg) +{ + struct net_device *dev = (struct net_device *) arg; + struct smc_private *smc = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + u_short i, media, saved_bank; + u_short link; + unsigned long flags; + + spin_lock_irqsave(&smc->lock, flags); + + saved_bank = inw(ioaddr + BANK_SELECT); + + if (!netif_device_present(dev)) + goto reschedule; + + SMC_SELECT_BANK(2); + + /* need MC_RESET to keep the memory consistent. errata? */ + if (smc->rx_ovrn) { + outw(MC_RESET, ioaddr + MMU_CMD); + smc->rx_ovrn = 0; + } + i = inw(ioaddr + INTERRUPT); + SMC_SELECT_BANK(0); + media = inw(ioaddr + EPH) & EPH_LINK_OK; + SMC_SELECT_BANK(1); + media |= (inw(ioaddr + CONFIG) & CFG_AUI_SELECT) ? 2 : 1; + + SMC_SELECT_BANK(saved_bank); + spin_unlock_irqrestore(&smc->lock, flags); + + /* Check for pending interrupt with watchdog flag set: with + this, we can limp along even if the interrupt is blocked */ + if (smc->watchdog++ && ((i>>8) & i)) { + if (!smc->fast_poll) + netdev_info(dev, "interrupt(s) dropped!\n"); + local_irq_save(flags); + smc_interrupt(dev->irq, dev); + local_irq_restore(flags); + smc->fast_poll = HZ; + } + if (smc->fast_poll) { + smc->fast_poll--; + smc->media.expires = jiffies + HZ/100; + add_timer(&smc->media); + return; + } + + spin_lock_irqsave(&smc->lock, flags); + + saved_bank = inw(ioaddr + BANK_SELECT); + + if (smc->cfg & CFG_MII_SELECT) { + if (smc->mii_if.phy_id < 0) + goto reschedule; + + SMC_SELECT_BANK(3); + link = mdio_read(dev, smc->mii_if.phy_id, 1); + if (!link || (link == 0xffff)) { + netdev_info(dev, "MII is missing!\n"); + smc->mii_if.phy_id = -1; + goto reschedule; + } + + link &= 0x0004; + if (link != smc->link_status) { + u_short p = mdio_read(dev, smc->mii_if.phy_id, 5); + netdev_info(dev, "%s link beat\n", link ? "found" : "lost"); + smc->duplex = (((p & 0x0100) || ((p & 0x1c0) == 0x40)) + ? TCR_FDUPLX : 0); + if (link) { + netdev_info(dev, "autonegotiation complete: " + "%dbaseT-%cD selected\n", + (p & 0x0180) ? 100 : 10, smc->duplex ? 'F' : 'H'); + } + SMC_SELECT_BANK(0); + outw(inw(ioaddr + TCR) | smc->duplex, ioaddr + TCR); + smc->link_status = link; + } + goto reschedule; + } + + /* Ignore collisions unless we've had no rx's recently */ + if (time_after(jiffies, dev->last_rx + HZ)) { + if (smc->tx_err || (smc->media_status & EPH_16COL)) + media |= EPH_16COL; + } + smc->tx_err = 0; + + if (media != smc->media_status) { + if ((media & smc->media_status & 1) && + ((smc->media_status ^ media) & EPH_LINK_OK)) + netdev_info(dev, "%s link beat\n", + smc->media_status & EPH_LINK_OK ? "lost" : "found"); + else if ((media & smc->media_status & 2) && + ((smc->media_status ^ media) & EPH_16COL)) + netdev_info(dev, "coax cable %s\n", + media & EPH_16COL ? "problem" : "ok"); + if (dev->if_port == 0) { + if (media & 1) { + if (media & EPH_LINK_OK) + netdev_info(dev, "flipped to 10baseT\n"); + else + smc_set_xcvr(dev, 2); + } else { + if (media & EPH_16COL) + smc_set_xcvr(dev, 1); + else + netdev_info(dev, "flipped to 10base2\n"); + } + } + smc->media_status = media; + } + +reschedule: + smc->media.expires = jiffies + HZ; + add_timer(&smc->media); + SMC_SELECT_BANK(saved_bank); + spin_unlock_irqrestore(&smc->lock, flags); +} + +static int smc_link_ok(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + struct smc_private *smc = netdev_priv(dev); + + if (smc->cfg & CFG_MII_SELECT) { + return mii_link_ok(&smc->mii_if); + } else { + SMC_SELECT_BANK(0); + return inw(ioaddr + EPH) & EPH_LINK_OK; + } +} + +static int smc_netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + u16 tmp; + unsigned int ioaddr = dev->base_addr; + + ecmd->supported = (SUPPORTED_TP | SUPPORTED_AUI | + SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full); + + SMC_SELECT_BANK(1); + tmp = inw(ioaddr + CONFIG); + ecmd->port = (tmp & CFG_AUI_SELECT) ? PORT_AUI : PORT_TP; + ecmd->transceiver = XCVR_INTERNAL; + ethtool_cmd_speed_set(ecmd, SPEED_10); + ecmd->phy_address = ioaddr + MGMT; + + SMC_SELECT_BANK(0); + tmp = inw(ioaddr + TCR); + ecmd->duplex = (tmp & TCR_FDUPLX) ? DUPLEX_FULL : DUPLEX_HALF; + + return 0; +} + +static int smc_netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + u16 tmp; + unsigned int ioaddr = dev->base_addr; + + if (ethtool_cmd_speed(ecmd) != SPEED_10) + return -EINVAL; + if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) + return -EINVAL; + if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI) + return -EINVAL; + if (ecmd->transceiver != XCVR_INTERNAL) + return -EINVAL; + + if (ecmd->port == PORT_AUI) + smc_set_xcvr(dev, 1); + else + smc_set_xcvr(dev, 0); + + SMC_SELECT_BANK(0); + tmp = inw(ioaddr + TCR); + if (ecmd->duplex == DUPLEX_FULL) + tmp |= TCR_FDUPLX; + else + tmp &= ~TCR_FDUPLX; + outw(tmp, ioaddr + TCR); + + return 0; +} + +static int check_if_running(struct net_device *dev) +{ + if (!netif_running(dev)) + return -EINVAL; + return 0; +} + +static void smc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); +} + +static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct smc_private *smc = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + u16 saved_bank = inw(ioaddr + BANK_SELECT); + int ret; + unsigned long flags; + + spin_lock_irqsave(&smc->lock, flags); + SMC_SELECT_BANK(3); + if (smc->cfg & CFG_MII_SELECT) + ret = mii_ethtool_gset(&smc->mii_if, ecmd); + else + ret = smc_netdev_get_ecmd(dev, ecmd); + SMC_SELECT_BANK(saved_bank); + spin_unlock_irqrestore(&smc->lock, flags); + return ret; +} + +static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct smc_private *smc = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + u16 saved_bank = inw(ioaddr + BANK_SELECT); + int ret; + unsigned long flags; + + spin_lock_irqsave(&smc->lock, flags); + SMC_SELECT_BANK(3); + if (smc->cfg & CFG_MII_SELECT) + ret = mii_ethtool_sset(&smc->mii_if, ecmd); + else + ret = smc_netdev_set_ecmd(dev, ecmd); + SMC_SELECT_BANK(saved_bank); + spin_unlock_irqrestore(&smc->lock, flags); + return ret; +} + +static u32 smc_get_link(struct net_device *dev) +{ + struct smc_private *smc = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + u16 saved_bank = inw(ioaddr + BANK_SELECT); + u32 ret; + unsigned long flags; + + spin_lock_irqsave(&smc->lock, flags); + SMC_SELECT_BANK(3); + ret = smc_link_ok(dev); + SMC_SELECT_BANK(saved_bank); + spin_unlock_irqrestore(&smc->lock, flags); + return ret; +} + +static int smc_nway_reset(struct net_device *dev) +{ + struct smc_private *smc = netdev_priv(dev); + if (smc->cfg & CFG_MII_SELECT) { + unsigned int ioaddr = dev->base_addr; + u16 saved_bank = inw(ioaddr + BANK_SELECT); + int res; + + SMC_SELECT_BANK(3); + res = mii_nway_restart(&smc->mii_if); + SMC_SELECT_BANK(saved_bank); + + return res; + } else + return -EOPNOTSUPP; +} + +static const struct ethtool_ops ethtool_ops = { + .begin = check_if_running, + .get_drvinfo = smc_get_drvinfo, + .get_settings = smc_get_settings, + .set_settings = smc_set_settings, + .get_link = smc_get_link, + .nway_reset = smc_nway_reset, +}; + +static int smc_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct smc_private *smc = netdev_priv(dev); + struct mii_ioctl_data *mii = if_mii(rq); + int rc = 0; + u16 saved_bank; + unsigned int ioaddr = dev->base_addr; + unsigned long flags; + + if (!netif_running(dev)) + return -EINVAL; + + spin_lock_irqsave(&smc->lock, flags); + saved_bank = inw(ioaddr + BANK_SELECT); + SMC_SELECT_BANK(3); + rc = generic_mii_ioctl(&smc->mii_if, mii, cmd, NULL); + SMC_SELECT_BANK(saved_bank); + spin_unlock_irqrestore(&smc->lock, flags); + return rc; +} + +static const struct pcmcia_device_id smc91c92_ids[] = { + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0109, 0x0501), + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0140, 0x000a), + PCMCIA_PFC_DEVICE_PROD_ID123(0, "MEGAHERTZ", "CC/XJEM3288", "DATA/FAX/CELL ETHERNET MODEM", 0xf510db04, 0x04cd2988, 0x46a52d63), + PCMCIA_PFC_DEVICE_PROD_ID123(0, "MEGAHERTZ", "CC/XJEM3336", "DATA/FAX/CELL ETHERNET MODEM", 0xf510db04, 0x0143b773, 0x46a52d63), + PCMCIA_PFC_DEVICE_PROD_ID123(0, "MEGAHERTZ", "EM1144T", "PCMCIA MODEM", 0xf510db04, 0x856d66c8, 0xbd6c43ef), + PCMCIA_PFC_DEVICE_PROD_ID123(0, "MEGAHERTZ", "XJEM1144/CCEM1144", "PCMCIA MODEM", 0xf510db04, 0x52d21e1e, 0xbd6c43ef), + PCMCIA_PFC_DEVICE_PROD_ID12(0, "Gateway 2000", "XJEM3336", 0xdd9989be, 0x662c394c), + PCMCIA_PFC_DEVICE_PROD_ID12(0, "MEGAHERTZ", "XJEM1144/CCEM1144", 0xf510db04, 0x52d21e1e), + PCMCIA_PFC_DEVICE_PROD_ID12(0, "Ositech", "Trumpcard:Jack of Diamonds Modem+Ethernet", 0xc2f80cd, 0x656947b9), + PCMCIA_PFC_DEVICE_PROD_ID12(0, "Ositech", "Trumpcard:Jack of Hearts Modem+Ethernet", 0xc2f80cd, 0xdc9ba5ed), + PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x016c, 0x0020), + PCMCIA_DEVICE_MANF_CARD(0x016c, 0x0023), + PCMCIA_DEVICE_PROD_ID123("BASICS by New Media Corporation", "Ethernet", "SMC91C94", 0x23c78a9d, 0x00b2e941, 0xcef397fb), + PCMCIA_DEVICE_PROD_ID12("ARGOSY", "Fast Ethernet PCCard", 0x78f308dc, 0xdcea68bc), + PCMCIA_DEVICE_PROD_ID12("dit Co., Ltd.", "PC Card-10/100BTX", 0xe59365c8, 0x6a2161d1), + PCMCIA_DEVICE_PROD_ID12("DYNALINK", "L100C", 0x6a26d1cf, 0xc16ce9c5), + PCMCIA_DEVICE_PROD_ID12("Farallon", "Farallon Enet", 0x58d93fc4, 0x244734e9), + PCMCIA_DEVICE_PROD_ID12("Megahertz", "CC10BT/2", 0x33234748, 0x3c95b953), + PCMCIA_DEVICE_PROD_ID12("MELCO/SMC", "LPC-TX", 0xa2cd8e6d, 0x42da662a), + PCMCIA_DEVICE_PROD_ID12("Ositech", "Trumpcard:Four of Diamonds Ethernet", 0xc2f80cd, 0xb3466314), + PCMCIA_DEVICE_PROD_ID12("Ositech", "Trumpcard:Seven of Diamonds Ethernet", 0xc2f80cd, 0x194b650a), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "Fast Ethernet PCCard", 0x281f1c5d, 0xdcea68bc), + PCMCIA_DEVICE_PROD_ID12("Psion", "10Mb Ethernet", 0x4ef00b21, 0x844be9e9), + PCMCIA_DEVICE_PROD_ID12("SMC", "EtherEZ Ethernet 8020", 0xc4f8b18b, 0x4a0eeb2d), + /* These conflict with other cards! */ + /* PCMCIA_DEVICE_MANF_CARD(0x0186, 0x0100), */ + /* PCMCIA_DEVICE_MANF_CARD(0x8a01, 0xc1ab), */ + PCMCIA_DEVICE_NULL, +}; +MODULE_DEVICE_TABLE(pcmcia, smc91c92_ids); + +static struct pcmcia_driver smc91c92_cs_driver = { + .owner = THIS_MODULE, + .name = "smc91c92_cs", + .probe = smc91c92_probe, + .remove = smc91c92_detach, + .id_table = smc91c92_ids, + .suspend = smc91c92_suspend, + .resume = smc91c92_resume, +}; + +static int __init init_smc91c92_cs(void) +{ + return pcmcia_register_driver(&smc91c92_cs_driver); +} + +static void __exit exit_smc91c92_cs(void) +{ + pcmcia_unregister_driver(&smc91c92_cs_driver); +} + +module_init(init_smc91c92_cs); +module_exit(exit_smc91c92_cs); diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c new file mode 100644 index 0000000..2b1d254 --- /dev/null +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -0,0 +1,2431 @@ +/* + * smc91x.c + * This is a driver for SMSC's 91C9x/91C1xx single-chip Ethernet devices. + * + * Copyright (C) 1996 by Erik Stahlman + * Copyright (C) 2001 Standard Microsystems Corporation + * Developed by Simple Network Magic Corporation + * Copyright (C) 2003 Monta Vista Software, Inc. + * Unified SMC91x driver by Nicolas Pitre + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Arguments: + * io = for the base address + * irq = for the IRQ + * nowait = 0 for normal wait states, 1 eliminates additional wait states + * + * original author: + * Erik Stahlman + * + * hardware multicast code: + * Peter Cammaert + * + * contributors: + * Daris A Nevil + * Nicolas Pitre + * Russell King + * + * History: + * 08/20/00 Arnaldo Melo fix kfree(skb) in smc_hardware_send_packet + * 12/15/00 Christian Jullien fix "Warning: kfree_skb on hard IRQ" + * 03/16/01 Daris A Nevil modified smc9194.c for use with LAN91C111 + * 08/22/01 Scott Anderson merge changes from smc9194 to smc91111 + * 08/21/01 Pramod B Bhardwaj added support for RevB of LAN91C111 + * 12/20/01 Jeff Sutherland initial port to Xscale PXA with DMA support + * 04/07/03 Nicolas Pitre unified SMC91x driver, killed irq races, + * more bus abstraction, big cleanup, etc. + * 29/09/03 Russell King - add driver model support + * - ethtool support + * - convert to use generic MII interface + * - add link up/down notification + * - don't try to handle full negotiation in + * smc_phy_configure + * - clean up (and fix stack overrun) in PHY + * MII read/write functions + * 22/09/04 Nicolas Pitre big update (see commit log for details) + */ +static const char version[] = + "smc91x.c: v1.1, sep 22 2004 by Nicolas Pitre \n"; + +/* Debugging level */ +#ifndef SMC_DEBUG +#define SMC_DEBUG 0 +#endif + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include "smc91x.h" + +#ifndef SMC_NOWAIT +# define SMC_NOWAIT 0 +#endif +static int nowait = SMC_NOWAIT; +module_param(nowait, int, 0400); +MODULE_PARM_DESC(nowait, "set to 1 for no wait state"); + +/* + * Transmit timeout, default 5 seconds. + */ +static int watchdog = 1000; +module_param(watchdog, int, 0400); +MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds"); + +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:smc91x"); + +/* + * The internal workings of the driver. If you are changing anything + * here with the SMC stuff, you should have the datasheet and know + * what you are doing. + */ +#define CARDNAME "smc91x" + +/* + * Use power-down feature of the chip + */ +#define POWER_DOWN 1 + +/* + * Wait time for memory to be free. This probably shouldn't be + * tuned that much, as waiting for this means nothing else happens + * in the system + */ +#define MEMORY_WAIT_TIME 16 + +/* + * The maximum number of processing loops allowed for each call to the + * IRQ handler. + */ +#define MAX_IRQ_LOOPS 8 + +/* + * This selects whether TX packets are sent one by one to the SMC91x internal + * memory and throttled until transmission completes. This may prevent + * RX overruns a litle by keeping much of the memory free for RX packets + * but to the expense of reduced TX throughput and increased IRQ overhead. + * Note this is not a cure for a too slow data bus or too high IRQ latency. + */ +#define THROTTLE_TX_PKTS 0 + +/* + * The MII clock high/low times. 2x this number gives the MII clock period + * in microseconds. (was 50, but this gives 6.4ms for each MII transaction!) + */ +#define MII_DELAY 1 + +#if SMC_DEBUG > 0 +#define DBG(n, args...) \ + do { \ + if (SMC_DEBUG >= (n)) \ + printk(args); \ + } while (0) + +#define PRINTK(args...) printk(args) +#else +#define DBG(n, args...) do { } while(0) +#define PRINTK(args...) printk(KERN_DEBUG args) +#endif + +#if SMC_DEBUG > 3 +static void PRINT_PKT(u_char *buf, int length) +{ + int i; + int remainder; + int lines; + + lines = length / 16; + remainder = length % 16; + + for (i = 0; i < lines ; i ++) { + int cur; + for (cur = 0; cur < 8; cur++) { + u_char a, b; + a = *buf++; + b = *buf++; + printk("%02x%02x ", a, b); + } + printk("\n"); + } + for (i = 0; i < remainder/2 ; i++) { + u_char a, b; + a = *buf++; + b = *buf++; + printk("%02x%02x ", a, b); + } + printk("\n"); +} +#else +#define PRINT_PKT(x...) do { } while(0) +#endif + + +/* this enables an interrupt in the interrupt mask register */ +#define SMC_ENABLE_INT(lp, x) do { \ + unsigned char mask; \ + unsigned long smc_enable_flags; \ + spin_lock_irqsave(&lp->lock, smc_enable_flags); \ + mask = SMC_GET_INT_MASK(lp); \ + mask |= (x); \ + SMC_SET_INT_MASK(lp, mask); \ + spin_unlock_irqrestore(&lp->lock, smc_enable_flags); \ +} while (0) + +/* this disables an interrupt from the interrupt mask register */ +#define SMC_DISABLE_INT(lp, x) do { \ + unsigned char mask; \ + unsigned long smc_disable_flags; \ + spin_lock_irqsave(&lp->lock, smc_disable_flags); \ + mask = SMC_GET_INT_MASK(lp); \ + mask &= ~(x); \ + SMC_SET_INT_MASK(lp, mask); \ + spin_unlock_irqrestore(&lp->lock, smc_disable_flags); \ +} while (0) + +/* + * Wait while MMU is busy. This is usually in the order of a few nanosecs + * if at all, but let's avoid deadlocking the system if the hardware + * decides to go south. + */ +#define SMC_WAIT_MMU_BUSY(lp) do { \ + if (unlikely(SMC_GET_MMU_CMD(lp) & MC_BUSY)) { \ + unsigned long timeout = jiffies + 2; \ + while (SMC_GET_MMU_CMD(lp) & MC_BUSY) { \ + if (time_after(jiffies, timeout)) { \ + printk("%s: timeout %s line %d\n", \ + dev->name, __FILE__, __LINE__); \ + break; \ + } \ + cpu_relax(); \ + } \ + } \ +} while (0) + + +/* + * this does a soft reset on the device + */ +static void smc_reset(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + unsigned int ctl, cfg; + struct sk_buff *pending_skb; + + DBG(2, "%s: %s\n", dev->name, __func__); + + /* Disable all interrupts, block TX tasklet */ + spin_lock_irq(&lp->lock); + SMC_SELECT_BANK(lp, 2); + SMC_SET_INT_MASK(lp, 0); + pending_skb = lp->pending_tx_skb; + lp->pending_tx_skb = NULL; + spin_unlock_irq(&lp->lock); + + /* free any pending tx skb */ + if (pending_skb) { + dev_kfree_skb(pending_skb); + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; + } + + /* + * This resets the registers mostly to defaults, but doesn't + * affect EEPROM. That seems unnecessary + */ + SMC_SELECT_BANK(lp, 0); + SMC_SET_RCR(lp, RCR_SOFTRST); + + /* + * Setup the Configuration Register + * This is necessary because the CONFIG_REG is not affected + * by a soft reset + */ + SMC_SELECT_BANK(lp, 1); + + cfg = CONFIG_DEFAULT; + + /* + * Setup for fast accesses if requested. If the card/system + * can't handle it then there will be no recovery except for + * a hard reset or power cycle + */ + if (lp->cfg.flags & SMC91X_NOWAIT) + cfg |= CONFIG_NO_WAIT; + + /* + * Release from possible power-down state + * Configuration register is not affected by Soft Reset + */ + cfg |= CONFIG_EPH_POWER_EN; + + SMC_SET_CONFIG(lp, cfg); + + /* this should pause enough for the chip to be happy */ + /* + * elaborate? What does the chip _need_? --jgarzik + * + * This seems to be undocumented, but something the original + * driver(s) have always done. Suspect undocumented timing + * info/determined empirically. --rmk + */ + udelay(1); + + /* Disable transmit and receive functionality */ + SMC_SELECT_BANK(lp, 0); + SMC_SET_RCR(lp, RCR_CLEAR); + SMC_SET_TCR(lp, TCR_CLEAR); + + SMC_SELECT_BANK(lp, 1); + ctl = SMC_GET_CTL(lp) | CTL_LE_ENABLE; + + /* + * Set the control register to automatically release successfully + * transmitted packets, to make the best use out of our limited + * memory + */ + if(!THROTTLE_TX_PKTS) + ctl |= CTL_AUTO_RELEASE; + else + ctl &= ~CTL_AUTO_RELEASE; + SMC_SET_CTL(lp, ctl); + + /* Reset the MMU */ + SMC_SELECT_BANK(lp, 2); + SMC_SET_MMU_CMD(lp, MC_RESET); + SMC_WAIT_MMU_BUSY(lp); +} + +/* + * Enable Interrupts, Receive, and Transmit + */ +static void smc_enable(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + int mask; + + DBG(2, "%s: %s\n", dev->name, __func__); + + /* see the header file for options in TCR/RCR DEFAULT */ + SMC_SELECT_BANK(lp, 0); + SMC_SET_TCR(lp, lp->tcr_cur_mode); + SMC_SET_RCR(lp, lp->rcr_cur_mode); + + SMC_SELECT_BANK(lp, 1); + SMC_SET_MAC_ADDR(lp, dev->dev_addr); + + /* now, enable interrupts */ + mask = IM_EPH_INT|IM_RX_OVRN_INT|IM_RCV_INT; + if (lp->version >= (CHIP_91100 << 4)) + mask |= IM_MDINT; + SMC_SELECT_BANK(lp, 2); + SMC_SET_INT_MASK(lp, mask); + + /* + * From this point the register bank must _NOT_ be switched away + * to something else than bank 2 without proper locking against + * races with any tasklet or interrupt handlers until smc_shutdown() + * or smc_reset() is called. + */ +} + +/* + * this puts the device in an inactive state + */ +static void smc_shutdown(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + struct sk_buff *pending_skb; + + DBG(2, "%s: %s\n", CARDNAME, __func__); + + /* no more interrupts for me */ + spin_lock_irq(&lp->lock); + SMC_SELECT_BANK(lp, 2); + SMC_SET_INT_MASK(lp, 0); + pending_skb = lp->pending_tx_skb; + lp->pending_tx_skb = NULL; + spin_unlock_irq(&lp->lock); + if (pending_skb) + dev_kfree_skb(pending_skb); + + /* and tell the card to stay away from that nasty outside world */ + SMC_SELECT_BANK(lp, 0); + SMC_SET_RCR(lp, RCR_CLEAR); + SMC_SET_TCR(lp, TCR_CLEAR); + +#ifdef POWER_DOWN + /* finally, shut the chip down */ + SMC_SELECT_BANK(lp, 1); + SMC_SET_CONFIG(lp, SMC_GET_CONFIG(lp) & ~CONFIG_EPH_POWER_EN); +#endif +} + +/* + * This is the procedure to handle the receipt of a packet. + */ +static inline void smc_rcv(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + unsigned int packet_number, status, packet_len; + + DBG(3, "%s: %s\n", dev->name, __func__); + + packet_number = SMC_GET_RXFIFO(lp); + if (unlikely(packet_number & RXFIFO_REMPTY)) { + PRINTK("%s: smc_rcv with nothing on FIFO.\n", dev->name); + return; + } + + /* read from start of packet */ + SMC_SET_PTR(lp, PTR_READ | PTR_RCV | PTR_AUTOINC); + + /* First two words are status and packet length */ + SMC_GET_PKT_HDR(lp, status, packet_len); + packet_len &= 0x07ff; /* mask off top bits */ + DBG(2, "%s: RX PNR 0x%x STATUS 0x%04x LENGTH 0x%04x (%d)\n", + dev->name, packet_number, status, + packet_len, packet_len); + + back: + if (unlikely(packet_len < 6 || status & RS_ERRORS)) { + if (status & RS_TOOLONG && packet_len <= (1514 + 4 + 6)) { + /* accept VLAN packets */ + status &= ~RS_TOOLONG; + goto back; + } + if (packet_len < 6) { + /* bloody hardware */ + printk(KERN_ERR "%s: fubar (rxlen %u status %x\n", + dev->name, packet_len, status); + status |= RS_TOOSHORT; + } + SMC_WAIT_MMU_BUSY(lp); + SMC_SET_MMU_CMD(lp, MC_RELEASE); + dev->stats.rx_errors++; + if (status & RS_ALGNERR) + dev->stats.rx_frame_errors++; + if (status & (RS_TOOSHORT | RS_TOOLONG)) + dev->stats.rx_length_errors++; + if (status & RS_BADCRC) + dev->stats.rx_crc_errors++; + } else { + struct sk_buff *skb; + unsigned char *data; + unsigned int data_len; + + /* set multicast stats */ + if (status & RS_MULTICAST) + dev->stats.multicast++; + + /* + * Actual payload is packet_len - 6 (or 5 if odd byte). + * We want skb_reserve(2) and the final ctrl word + * (2 bytes, possibly containing the payload odd byte). + * Furthermore, we add 2 bytes to allow rounding up to + * multiple of 4 bytes on 32 bit buses. + * Hence packet_len - 6 + 2 + 2 + 2. + */ + skb = dev_alloc_skb(packet_len); + if (unlikely(skb == NULL)) { + printk(KERN_NOTICE "%s: Low memory, packet dropped.\n", + dev->name); + SMC_WAIT_MMU_BUSY(lp); + SMC_SET_MMU_CMD(lp, MC_RELEASE); + dev->stats.rx_dropped++; + return; + } + + /* Align IP header to 32 bits */ + skb_reserve(skb, 2); + + /* BUG: the LAN91C111 rev A never sets this bit. Force it. */ + if (lp->version == 0x90) + status |= RS_ODDFRAME; + + /* + * If odd length: packet_len - 5, + * otherwise packet_len - 6. + * With the trailing ctrl byte it's packet_len - 4. + */ + data_len = packet_len - ((status & RS_ODDFRAME) ? 5 : 6); + data = skb_put(skb, data_len); + SMC_PULL_DATA(lp, data, packet_len - 4); + + SMC_WAIT_MMU_BUSY(lp); + SMC_SET_MMU_CMD(lp, MC_RELEASE); + + PRINT_PKT(data, packet_len - 4); + + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += data_len; + } +} + +#ifdef CONFIG_SMP +/* + * On SMP we have the following problem: + * + * A = smc_hardware_send_pkt() + * B = smc_hard_start_xmit() + * C = smc_interrupt() + * + * A and B can never be executed simultaneously. However, at least on UP, + * it is possible (and even desirable) for C to interrupt execution of + * A or B in order to have better RX reliability and avoid overruns. + * C, just like A and B, must have exclusive access to the chip and + * each of them must lock against any other concurrent access. + * Unfortunately this is not possible to have C suspend execution of A or + * B taking place on another CPU. On UP this is no an issue since A and B + * are run from softirq context and C from hard IRQ context, and there is + * no other CPU where concurrent access can happen. + * If ever there is a way to force at least B and C to always be executed + * on the same CPU then we could use read/write locks to protect against + * any other concurrent access and C would always interrupt B. But life + * isn't that easy in a SMP world... + */ +#define smc_special_trylock(lock, flags) \ +({ \ + int __ret; \ + local_irq_save(flags); \ + __ret = spin_trylock(lock); \ + if (!__ret) \ + local_irq_restore(flags); \ + __ret; \ +}) +#define smc_special_lock(lock, flags) spin_lock_irqsave(lock, flags) +#define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags) +#else +#define smc_special_trylock(lock, flags) (flags == flags) +#define smc_special_lock(lock, flags) do { flags = 0; } while (0) +#define smc_special_unlock(lock, flags) do { flags = 0; } while (0) +#endif + +/* + * This is called to actually send a packet to the chip. + */ +static void smc_hardware_send_pkt(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + struct sk_buff *skb; + unsigned int packet_no, len; + unsigned char *buf; + unsigned long flags; + + DBG(3, "%s: %s\n", dev->name, __func__); + + if (!smc_special_trylock(&lp->lock, flags)) { + netif_stop_queue(dev); + tasklet_schedule(&lp->tx_task); + return; + } + + skb = lp->pending_tx_skb; + if (unlikely(!skb)) { + smc_special_unlock(&lp->lock, flags); + return; + } + lp->pending_tx_skb = NULL; + + packet_no = SMC_GET_AR(lp); + if (unlikely(packet_no & AR_FAILED)) { + printk("%s: Memory allocation failed.\n", dev->name); + dev->stats.tx_errors++; + dev->stats.tx_fifo_errors++; + smc_special_unlock(&lp->lock, flags); + goto done; + } + + /* point to the beginning of the packet */ + SMC_SET_PN(lp, packet_no); + SMC_SET_PTR(lp, PTR_AUTOINC); + + buf = skb->data; + len = skb->len; + DBG(2, "%s: TX PNR 0x%x LENGTH 0x%04x (%d) BUF 0x%p\n", + dev->name, packet_no, len, len, buf); + PRINT_PKT(buf, len); + + /* + * Send the packet length (+6 for status words, length, and ctl. + * The card will pad to 64 bytes with zeroes if packet is too small. + */ + SMC_PUT_PKT_HDR(lp, 0, len + 6); + + /* send the actual data */ + SMC_PUSH_DATA(lp, buf, len & ~1); + + /* Send final ctl word with the last byte if there is one */ + SMC_outw(((len & 1) ? (0x2000 | buf[len-1]) : 0), ioaddr, DATA_REG(lp)); + + /* + * If THROTTLE_TX_PKTS is set, we stop the queue here. This will + * have the effect of having at most one packet queued for TX + * in the chip's memory at all time. + * + * If THROTTLE_TX_PKTS is not set then the queue is stopped only + * when memory allocation (MC_ALLOC) does not succeed right away. + */ + if (THROTTLE_TX_PKTS) + netif_stop_queue(dev); + + /* queue the packet for TX */ + SMC_SET_MMU_CMD(lp, MC_ENQUEUE); + smc_special_unlock(&lp->lock, flags); + + dev->trans_start = jiffies; + dev->stats.tx_packets++; + dev->stats.tx_bytes += len; + + SMC_ENABLE_INT(lp, IM_TX_INT | IM_TX_EMPTY_INT); + +done: if (!THROTTLE_TX_PKTS) + netif_wake_queue(dev); + + dev_kfree_skb(skb); +} + +/* + * Since I am not sure if I will have enough room in the chip's ram + * to store the packet, I call this routine which either sends it + * now, or set the card to generates an interrupt when ready + * for the packet. + */ +static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + unsigned int numPages, poll_count, status; + unsigned long flags; + + DBG(3, "%s: %s\n", dev->name, __func__); + + BUG_ON(lp->pending_tx_skb != NULL); + + /* + * The MMU wants the number of pages to be the number of 256 bytes + * 'pages', minus 1 (since a packet can't ever have 0 pages :)) + * + * The 91C111 ignores the size bits, but earlier models don't. + * + * Pkt size for allocating is data length +6 (for additional status + * words, length and ctl) + * + * If odd size then last byte is included in ctl word. + */ + numPages = ((skb->len & ~1) + (6 - 1)) >> 8; + if (unlikely(numPages > 7)) { + printk("%s: Far too big packet error.\n", dev->name); + dev->stats.tx_errors++; + dev->stats.tx_dropped++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + smc_special_lock(&lp->lock, flags); + + /* now, try to allocate the memory */ + SMC_SET_MMU_CMD(lp, MC_ALLOC | numPages); + + /* + * Poll the chip for a short amount of time in case the + * allocation succeeds quickly. + */ + poll_count = MEMORY_WAIT_TIME; + do { + status = SMC_GET_INT(lp); + if (status & IM_ALLOC_INT) { + SMC_ACK_INT(lp, IM_ALLOC_INT); + break; + } + } while (--poll_count); + + smc_special_unlock(&lp->lock, flags); + + lp->pending_tx_skb = skb; + if (!poll_count) { + /* oh well, wait until the chip finds memory later */ + netif_stop_queue(dev); + DBG(2, "%s: TX memory allocation deferred.\n", dev->name); + SMC_ENABLE_INT(lp, IM_ALLOC_INT); + } else { + /* + * Allocation succeeded: push packet to the chip's own memory + * immediately. + */ + smc_hardware_send_pkt((unsigned long)dev); + } + + return NETDEV_TX_OK; +} + +/* + * This handles a TX interrupt, which is only called when: + * - a TX error occurred, or + * - CTL_AUTO_RELEASE is not set and TX of a packet completed. + */ +static void smc_tx(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + unsigned int saved_packet, packet_no, tx_status, pkt_len; + + DBG(3, "%s: %s\n", dev->name, __func__); + + /* If the TX FIFO is empty then nothing to do */ + packet_no = SMC_GET_TXFIFO(lp); + if (unlikely(packet_no & TXFIFO_TEMPTY)) { + PRINTK("%s: smc_tx with nothing on FIFO.\n", dev->name); + return; + } + + /* select packet to read from */ + saved_packet = SMC_GET_PN(lp); + SMC_SET_PN(lp, packet_no); + + /* read the first word (status word) from this packet */ + SMC_SET_PTR(lp, PTR_AUTOINC | PTR_READ); + SMC_GET_PKT_HDR(lp, tx_status, pkt_len); + DBG(2, "%s: TX STATUS 0x%04x PNR 0x%02x\n", + dev->name, tx_status, packet_no); + + if (!(tx_status & ES_TX_SUC)) + dev->stats.tx_errors++; + + if (tx_status & ES_LOSTCARR) + dev->stats.tx_carrier_errors++; + + if (tx_status & (ES_LATCOL | ES_16COL)) { + PRINTK("%s: %s occurred on last xmit\n", dev->name, + (tx_status & ES_LATCOL) ? + "late collision" : "too many collisions"); + dev->stats.tx_window_errors++; + if (!(dev->stats.tx_window_errors & 63) && net_ratelimit()) { + printk(KERN_INFO "%s: unexpectedly large number of " + "bad collisions. Please check duplex " + "setting.\n", dev->name); + } + } + + /* kill the packet */ + SMC_WAIT_MMU_BUSY(lp); + SMC_SET_MMU_CMD(lp, MC_FREEPKT); + + /* Don't restore Packet Number Reg until busy bit is cleared */ + SMC_WAIT_MMU_BUSY(lp); + SMC_SET_PN(lp, saved_packet); + + /* re-enable transmit */ + SMC_SELECT_BANK(lp, 0); + SMC_SET_TCR(lp, lp->tcr_cur_mode); + SMC_SELECT_BANK(lp, 2); +} + + +/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/ + +static void smc_mii_out(struct net_device *dev, unsigned int val, int bits) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + unsigned int mii_reg, mask; + + mii_reg = SMC_GET_MII(lp) & ~(MII_MCLK | MII_MDOE | MII_MDO); + mii_reg |= MII_MDOE; + + for (mask = 1 << (bits - 1); mask; mask >>= 1) { + if (val & mask) + mii_reg |= MII_MDO; + else + mii_reg &= ~MII_MDO; + + SMC_SET_MII(lp, mii_reg); + udelay(MII_DELAY); + SMC_SET_MII(lp, mii_reg | MII_MCLK); + udelay(MII_DELAY); + } +} + +static unsigned int smc_mii_in(struct net_device *dev, int bits) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + unsigned int mii_reg, mask, val; + + mii_reg = SMC_GET_MII(lp) & ~(MII_MCLK | MII_MDOE | MII_MDO); + SMC_SET_MII(lp, mii_reg); + + for (mask = 1 << (bits - 1), val = 0; mask; mask >>= 1) { + if (SMC_GET_MII(lp) & MII_MDI) + val |= mask; + + SMC_SET_MII(lp, mii_reg); + udelay(MII_DELAY); + SMC_SET_MII(lp, mii_reg | MII_MCLK); + udelay(MII_DELAY); + } + + return val; +} + +/* + * Reads a register from the MII Management serial interface + */ +static int smc_phy_read(struct net_device *dev, int phyaddr, int phyreg) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + unsigned int phydata; + + SMC_SELECT_BANK(lp, 3); + + /* Idle - 32 ones */ + smc_mii_out(dev, 0xffffffff, 32); + + /* Start code (01) + read (10) + phyaddr + phyreg */ + smc_mii_out(dev, 6 << 10 | phyaddr << 5 | phyreg, 14); + + /* Turnaround (2bits) + phydata */ + phydata = smc_mii_in(dev, 18); + + /* Return to idle state */ + SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO)); + + DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", + __func__, phyaddr, phyreg, phydata); + + SMC_SELECT_BANK(lp, 2); + return phydata; +} + +/* + * Writes a register to the MII Management serial interface + */ +static void smc_phy_write(struct net_device *dev, int phyaddr, int phyreg, + int phydata) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + + SMC_SELECT_BANK(lp, 3); + + /* Idle - 32 ones */ + smc_mii_out(dev, 0xffffffff, 32); + + /* Start code (01) + write (01) + phyaddr + phyreg + turnaround + phydata */ + smc_mii_out(dev, 5 << 28 | phyaddr << 23 | phyreg << 18 | 2 << 16 | phydata, 32); + + /* Return to idle state */ + SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO)); + + DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", + __func__, phyaddr, phyreg, phydata); + + SMC_SELECT_BANK(lp, 2); +} + +/* + * Finds and reports the PHY address + */ +static void smc_phy_detect(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + int phyaddr; + + DBG(2, "%s: %s\n", dev->name, __func__); + + lp->phy_type = 0; + + /* + * Scan all 32 PHY addresses if necessary, starting at + * PHY#1 to PHY#31, and then PHY#0 last. + */ + for (phyaddr = 1; phyaddr < 33; ++phyaddr) { + unsigned int id1, id2; + + /* Read the PHY identifiers */ + id1 = smc_phy_read(dev, phyaddr & 31, MII_PHYSID1); + id2 = smc_phy_read(dev, phyaddr & 31, MII_PHYSID2); + + DBG(3, "%s: phy_id1=0x%x, phy_id2=0x%x\n", + dev->name, id1, id2); + + /* Make sure it is a valid identifier */ + if (id1 != 0x0000 && id1 != 0xffff && id1 != 0x8000 && + id2 != 0x0000 && id2 != 0xffff && id2 != 0x8000) { + /* Save the PHY's address */ + lp->mii.phy_id = phyaddr & 31; + lp->phy_type = id1 << 16 | id2; + break; + } + } +} + +/* + * Sets the PHY to a configuration as determined by the user + */ +static int smc_phy_fixed(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + int phyaddr = lp->mii.phy_id; + int bmcr, cfg1; + + DBG(3, "%s: %s\n", dev->name, __func__); + + /* Enter Link Disable state */ + cfg1 = smc_phy_read(dev, phyaddr, PHY_CFG1_REG); + cfg1 |= PHY_CFG1_LNKDIS; + smc_phy_write(dev, phyaddr, PHY_CFG1_REG, cfg1); + + /* + * Set our fixed capabilities + * Disable auto-negotiation + */ + bmcr = 0; + + if (lp->ctl_rfduplx) + bmcr |= BMCR_FULLDPLX; + + if (lp->ctl_rspeed == 100) + bmcr |= BMCR_SPEED100; + + /* Write our capabilities to the phy control register */ + smc_phy_write(dev, phyaddr, MII_BMCR, bmcr); + + /* Re-Configure the Receive/Phy Control register */ + SMC_SELECT_BANK(lp, 0); + SMC_SET_RPC(lp, lp->rpc_cur_mode); + SMC_SELECT_BANK(lp, 2); + + return 1; +} + +/* + * smc_phy_reset - reset the phy + * @dev: net device + * @phy: phy address + * + * Issue a software reset for the specified PHY and + * wait up to 100ms for the reset to complete. We should + * not access the PHY for 50ms after issuing the reset. + * + * The time to wait appears to be dependent on the PHY. + * + * Must be called with lp->lock locked. + */ +static int smc_phy_reset(struct net_device *dev, int phy) +{ + struct smc_local *lp = netdev_priv(dev); + unsigned int bmcr; + int timeout; + + smc_phy_write(dev, phy, MII_BMCR, BMCR_RESET); + + for (timeout = 2; timeout; timeout--) { + spin_unlock_irq(&lp->lock); + msleep(50); + spin_lock_irq(&lp->lock); + + bmcr = smc_phy_read(dev, phy, MII_BMCR); + if (!(bmcr & BMCR_RESET)) + break; + } + + return bmcr & BMCR_RESET; +} + +/* + * smc_phy_powerdown - powerdown phy + * @dev: net device + * + * Power down the specified PHY + */ +static void smc_phy_powerdown(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + unsigned int bmcr; + int phy = lp->mii.phy_id; + + if (lp->phy_type == 0) + return; + + /* We need to ensure that no calls to smc_phy_configure are + pending. + */ + cancel_work_sync(&lp->phy_configure); + + bmcr = smc_phy_read(dev, phy, MII_BMCR); + smc_phy_write(dev, phy, MII_BMCR, bmcr | BMCR_PDOWN); +} + +/* + * smc_phy_check_media - check the media status and adjust TCR + * @dev: net device + * @init: set true for initialisation + * + * Select duplex mode depending on negotiation state. This + * also updates our carrier state. + */ +static void smc_phy_check_media(struct net_device *dev, int init) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + + if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) { + /* duplex state has changed */ + if (lp->mii.full_duplex) { + lp->tcr_cur_mode |= TCR_SWFDUP; + } else { + lp->tcr_cur_mode &= ~TCR_SWFDUP; + } + + SMC_SELECT_BANK(lp, 0); + SMC_SET_TCR(lp, lp->tcr_cur_mode); + } +} + +/* + * Configures the specified PHY through the MII management interface + * using Autonegotiation. + * Calls smc_phy_fixed() if the user has requested a certain config. + * If RPC ANEG bit is set, the media selection is dependent purely on + * the selection by the MII (either in the MII BMCR reg or the result + * of autonegotiation.) If the RPC ANEG bit is cleared, the selection + * is controlled by the RPC SPEED and RPC DPLX bits. + */ +static void smc_phy_configure(struct work_struct *work) +{ + struct smc_local *lp = + container_of(work, struct smc_local, phy_configure); + struct net_device *dev = lp->dev; + void __iomem *ioaddr = lp->base; + int phyaddr = lp->mii.phy_id; + int my_phy_caps; /* My PHY capabilities */ + int my_ad_caps; /* My Advertised capabilities */ + int status; + + DBG(3, "%s:smc_program_phy()\n", dev->name); + + spin_lock_irq(&lp->lock); + + /* + * We should not be called if phy_type is zero. + */ + if (lp->phy_type == 0) + goto smc_phy_configure_exit; + + if (smc_phy_reset(dev, phyaddr)) { + printk("%s: PHY reset timed out\n", dev->name); + goto smc_phy_configure_exit; + } + + /* + * Enable PHY Interrupts (for register 18) + * Interrupts listed here are disabled + */ + smc_phy_write(dev, phyaddr, PHY_MASK_REG, + PHY_INT_LOSSSYNC | PHY_INT_CWRD | PHY_INT_SSD | + PHY_INT_ESD | PHY_INT_RPOL | PHY_INT_JAB | + PHY_INT_SPDDET | PHY_INT_DPLXDET); + + /* Configure the Receive/Phy Control register */ + SMC_SELECT_BANK(lp, 0); + SMC_SET_RPC(lp, lp->rpc_cur_mode); + + /* If the user requested no auto neg, then go set his request */ + if (lp->mii.force_media) { + smc_phy_fixed(dev); + goto smc_phy_configure_exit; + } + + /* Copy our capabilities from MII_BMSR to MII_ADVERTISE */ + my_phy_caps = smc_phy_read(dev, phyaddr, MII_BMSR); + + if (!(my_phy_caps & BMSR_ANEGCAPABLE)) { + printk(KERN_INFO "Auto negotiation NOT supported\n"); + smc_phy_fixed(dev); + goto smc_phy_configure_exit; + } + + my_ad_caps = ADVERTISE_CSMA; /* I am CSMA capable */ + + if (my_phy_caps & BMSR_100BASE4) + my_ad_caps |= ADVERTISE_100BASE4; + if (my_phy_caps & BMSR_100FULL) + my_ad_caps |= ADVERTISE_100FULL; + if (my_phy_caps & BMSR_100HALF) + my_ad_caps |= ADVERTISE_100HALF; + if (my_phy_caps & BMSR_10FULL) + my_ad_caps |= ADVERTISE_10FULL; + if (my_phy_caps & BMSR_10HALF) + my_ad_caps |= ADVERTISE_10HALF; + + /* Disable capabilities not selected by our user */ + if (lp->ctl_rspeed != 100) + my_ad_caps &= ~(ADVERTISE_100BASE4|ADVERTISE_100FULL|ADVERTISE_100HALF); + + if (!lp->ctl_rfduplx) + my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL); + + /* Update our Auto-Neg Advertisement Register */ + smc_phy_write(dev, phyaddr, MII_ADVERTISE, my_ad_caps); + lp->mii.advertising = my_ad_caps; + + /* + * Read the register back. Without this, it appears that when + * auto-negotiation is restarted, sometimes it isn't ready and + * the link does not come up. + */ + status = smc_phy_read(dev, phyaddr, MII_ADVERTISE); + + DBG(2, "%s: phy caps=%x\n", dev->name, my_phy_caps); + DBG(2, "%s: phy advertised caps=%x\n", dev->name, my_ad_caps); + + /* Restart auto-negotiation process in order to advertise my caps */ + smc_phy_write(dev, phyaddr, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART); + + smc_phy_check_media(dev, 1); + +smc_phy_configure_exit: + SMC_SELECT_BANK(lp, 2); + spin_unlock_irq(&lp->lock); +} + +/* + * smc_phy_interrupt + * + * Purpose: Handle interrupts relating to PHY register 18. This is + * called from the "hard" interrupt handler under our private spinlock. + */ +static void smc_phy_interrupt(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + int phyaddr = lp->mii.phy_id; + int phy18; + + DBG(2, "%s: %s\n", dev->name, __func__); + + if (lp->phy_type == 0) + return; + + for(;;) { + smc_phy_check_media(dev, 0); + + /* Read PHY Register 18, Status Output */ + phy18 = smc_phy_read(dev, phyaddr, PHY_INT_REG); + if ((phy18 & PHY_INT_INT) == 0) + break; + } +} + +/*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/ + +static void smc_10bt_check_media(struct net_device *dev, int init) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + unsigned int old_carrier, new_carrier; + + old_carrier = netif_carrier_ok(dev) ? 1 : 0; + + SMC_SELECT_BANK(lp, 0); + new_carrier = (SMC_GET_EPH_STATUS(lp) & ES_LINK_OK) ? 1 : 0; + SMC_SELECT_BANK(lp, 2); + + if (init || (old_carrier != new_carrier)) { + if (!new_carrier) { + netif_carrier_off(dev); + } else { + netif_carrier_on(dev); + } + if (netif_msg_link(lp)) + printk(KERN_INFO "%s: link %s\n", dev->name, + new_carrier ? "up" : "down"); + } +} + +static void smc_eph_interrupt(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + unsigned int ctl; + + smc_10bt_check_media(dev, 0); + + SMC_SELECT_BANK(lp, 1); + ctl = SMC_GET_CTL(lp); + SMC_SET_CTL(lp, ctl & ~CTL_LE_ENABLE); + SMC_SET_CTL(lp, ctl); + SMC_SELECT_BANK(lp, 2); +} + +/* + * This is the main routine of the driver, to handle the device when + * it needs some attention. + */ +static irqreturn_t smc_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + int status, mask, timeout, card_stats; + int saved_pointer; + + DBG(3, "%s: %s\n", dev->name, __func__); + + spin_lock(&lp->lock); + + /* A preamble may be used when there is a potential race + * between the interruptible transmit functions and this + * ISR. */ + SMC_INTERRUPT_PREAMBLE; + + saved_pointer = SMC_GET_PTR(lp); + mask = SMC_GET_INT_MASK(lp); + SMC_SET_INT_MASK(lp, 0); + + /* set a timeout value, so I don't stay here forever */ + timeout = MAX_IRQ_LOOPS; + + do { + status = SMC_GET_INT(lp); + + DBG(2, "%s: INT 0x%02x MASK 0x%02x MEM 0x%04x FIFO 0x%04x\n", + dev->name, status, mask, + ({ int meminfo; SMC_SELECT_BANK(lp, 0); + meminfo = SMC_GET_MIR(lp); + SMC_SELECT_BANK(lp, 2); meminfo; }), + SMC_GET_FIFO(lp)); + + status &= mask; + if (!status) + break; + + if (status & IM_TX_INT) { + /* do this before RX as it will free memory quickly */ + DBG(3, "%s: TX int\n", dev->name); + smc_tx(dev); + SMC_ACK_INT(lp, IM_TX_INT); + if (THROTTLE_TX_PKTS) + netif_wake_queue(dev); + } else if (status & IM_RCV_INT) { + DBG(3, "%s: RX irq\n", dev->name); + smc_rcv(dev); + } else if (status & IM_ALLOC_INT) { + DBG(3, "%s: Allocation irq\n", dev->name); + tasklet_hi_schedule(&lp->tx_task); + mask &= ~IM_ALLOC_INT; + } else if (status & IM_TX_EMPTY_INT) { + DBG(3, "%s: TX empty\n", dev->name); + mask &= ~IM_TX_EMPTY_INT; + + /* update stats */ + SMC_SELECT_BANK(lp, 0); + card_stats = SMC_GET_COUNTER(lp); + SMC_SELECT_BANK(lp, 2); + + /* single collisions */ + dev->stats.collisions += card_stats & 0xF; + card_stats >>= 4; + + /* multiple collisions */ + dev->stats.collisions += card_stats & 0xF; + } else if (status & IM_RX_OVRN_INT) { + DBG(1, "%s: RX overrun (EPH_ST 0x%04x)\n", dev->name, + ({ int eph_st; SMC_SELECT_BANK(lp, 0); + eph_st = SMC_GET_EPH_STATUS(lp); + SMC_SELECT_BANK(lp, 2); eph_st; })); + SMC_ACK_INT(lp, IM_RX_OVRN_INT); + dev->stats.rx_errors++; + dev->stats.rx_fifo_errors++; + } else if (status & IM_EPH_INT) { + smc_eph_interrupt(dev); + } else if (status & IM_MDINT) { + SMC_ACK_INT(lp, IM_MDINT); + smc_phy_interrupt(dev); + } else if (status & IM_ERCV_INT) { + SMC_ACK_INT(lp, IM_ERCV_INT); + PRINTK("%s: UNSUPPORTED: ERCV INTERRUPT\n", dev->name); + } + } while (--timeout); + + /* restore register states */ + SMC_SET_PTR(lp, saved_pointer); + SMC_SET_INT_MASK(lp, mask); + spin_unlock(&lp->lock); + +#ifndef CONFIG_NET_POLL_CONTROLLER + if (timeout == MAX_IRQ_LOOPS) + PRINTK("%s: spurious interrupt (mask = 0x%02x)\n", + dev->name, mask); +#endif + DBG(3, "%s: Interrupt done (%d loops)\n", + dev->name, MAX_IRQ_LOOPS - timeout); + + /* + * We return IRQ_HANDLED unconditionally here even if there was + * nothing to do. There is a possibility that a packet might + * get enqueued into the chip right after TX_EMPTY_INT is raised + * but just before the CPU acknowledges the IRQ. + * Better take an unneeded IRQ in some occasions than complexifying + * the code for all cases. + */ + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling receive - used by netconsole and other diagnostic tools + * to allow network i/o with interrupts disabled. + */ +static void smc_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + smc_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +/* Our watchdog timed out. Called by the networking layer */ +static void smc_timeout(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + int status, mask, eph_st, meminfo, fifo; + + DBG(2, "%s: %s\n", dev->name, __func__); + + spin_lock_irq(&lp->lock); + status = SMC_GET_INT(lp); + mask = SMC_GET_INT_MASK(lp); + fifo = SMC_GET_FIFO(lp); + SMC_SELECT_BANK(lp, 0); + eph_st = SMC_GET_EPH_STATUS(lp); + meminfo = SMC_GET_MIR(lp); + SMC_SELECT_BANK(lp, 2); + spin_unlock_irq(&lp->lock); + PRINTK( "%s: TX timeout (INT 0x%02x INTMASK 0x%02x " + "MEM 0x%04x FIFO 0x%04x EPH_ST 0x%04x)\n", + dev->name, status, mask, meminfo, fifo, eph_st ); + + smc_reset(dev); + smc_enable(dev); + + /* + * Reconfiguring the PHY doesn't seem like a bad idea here, but + * smc_phy_configure() calls msleep() which calls schedule_timeout() + * which calls schedule(). Hence we use a work queue. + */ + if (lp->phy_type != 0) + schedule_work(&lp->phy_configure); + + /* We can accept TX packets again */ + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue(dev); +} + +/* + * This routine will, depending on the values passed to it, + * either make it accept multicast packets, go into + * promiscuous mode (for TCPDUMP and cousins) or accept + * a select set of multicast packets + */ +static void smc_set_multicast_list(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + unsigned char multicast_table[8]; + int update_multicast = 0; + + DBG(2, "%s: %s\n", dev->name, __func__); + + if (dev->flags & IFF_PROMISC) { + DBG(2, "%s: RCR_PRMS\n", dev->name); + lp->rcr_cur_mode |= RCR_PRMS; + } + +/* BUG? I never disable promiscuous mode if multicasting was turned on. + Now, I turn off promiscuous mode, but I don't do anything to multicasting + when promiscuous mode is turned on. +*/ + + /* + * Here, I am setting this to accept all multicast packets. + * I don't need to zero the multicast table, because the flag is + * checked before the table is + */ + else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) { + DBG(2, "%s: RCR_ALMUL\n", dev->name); + lp->rcr_cur_mode |= RCR_ALMUL; + } + + /* + * This sets the internal hardware table to filter out unwanted + * multicast packets before they take up memory. + * + * The SMC chip uses a hash table where the high 6 bits of the CRC of + * address are the offset into the table. If that bit is 1, then the + * multicast packet is accepted. Otherwise, it's dropped silently. + * + * To use the 6 bits as an offset into the table, the high 3 bits are + * the number of the 8 bit register, while the low 3 bits are the bit + * within that register. + */ + else if (!netdev_mc_empty(dev)) { + struct netdev_hw_addr *ha; + + /* table for flipping the order of 3 bits */ + static const unsigned char invert3[] = {0, 4, 2, 6, 1, 5, 3, 7}; + + /* start with a table of all zeros: reject all */ + memset(multicast_table, 0, sizeof(multicast_table)); + + netdev_for_each_mc_addr(ha, dev) { + int position; + + /* only use the low order bits */ + position = crc32_le(~0, ha->addr, 6) & 0x3f; + + /* do some messy swapping to put the bit in the right spot */ + multicast_table[invert3[position&7]] |= + (1<>3)&7]); + } + + /* be sure I get rid of flags I might have set */ + lp->rcr_cur_mode &= ~(RCR_PRMS | RCR_ALMUL); + + /* now, the table can be loaded into the chipset */ + update_multicast = 1; + } else { + DBG(2, "%s: ~(RCR_PRMS|RCR_ALMUL)\n", dev->name); + lp->rcr_cur_mode &= ~(RCR_PRMS | RCR_ALMUL); + + /* + * since I'm disabling all multicast entirely, I need to + * clear the multicast list + */ + memset(multicast_table, 0, sizeof(multicast_table)); + update_multicast = 1; + } + + spin_lock_irq(&lp->lock); + SMC_SELECT_BANK(lp, 0); + SMC_SET_RCR(lp, lp->rcr_cur_mode); + if (update_multicast) { + SMC_SELECT_BANK(lp, 3); + SMC_SET_MCAST(lp, multicast_table); + } + SMC_SELECT_BANK(lp, 2); + spin_unlock_irq(&lp->lock); +} + + +/* + * Open and Initialize the board + * + * Set up everything, reset the card, etc.. + */ +static int +smc_open(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + + DBG(2, "%s: %s\n", dev->name, __func__); + + /* + * Check that the address is valid. If its not, refuse + * to bring the device up. The user must specify an + * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx + */ + if (!is_valid_ether_addr(dev->dev_addr)) { + PRINTK("%s: no valid ethernet hw addr\n", __func__); + return -EINVAL; + } + + /* Setup the default Register Modes */ + lp->tcr_cur_mode = TCR_DEFAULT; + lp->rcr_cur_mode = RCR_DEFAULT; + lp->rpc_cur_mode = RPC_DEFAULT | + lp->cfg.leda << RPC_LSXA_SHFT | + lp->cfg.ledb << RPC_LSXB_SHFT; + + /* + * If we are not using a MII interface, we need to + * monitor our own carrier signal to detect faults. + */ + if (lp->phy_type == 0) + lp->tcr_cur_mode |= TCR_MON_CSN; + + /* reset the hardware */ + smc_reset(dev); + smc_enable(dev); + + /* Configure the PHY, initialize the link state */ + if (lp->phy_type != 0) + smc_phy_configure(&lp->phy_configure); + else { + spin_lock_irq(&lp->lock); + smc_10bt_check_media(dev, 1); + spin_unlock_irq(&lp->lock); + } + + netif_start_queue(dev); + return 0; +} + +/* + * smc_close + * + * this makes the board clean up everything that it can + * and not talk to the outside world. Caused by + * an 'ifconfig ethX down' + */ +static int smc_close(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + + DBG(2, "%s: %s\n", dev->name, __func__); + + netif_stop_queue(dev); + netif_carrier_off(dev); + + /* clear everything */ + smc_shutdown(dev); + tasklet_kill(&lp->tx_task); + smc_phy_powerdown(dev); + return 0; +} + +/* + * Ethtool support + */ +static int +smc_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct smc_local *lp = netdev_priv(dev); + int ret; + + cmd->maxtxpkt = 1; + cmd->maxrxpkt = 1; + + if (lp->phy_type != 0) { + spin_lock_irq(&lp->lock); + ret = mii_ethtool_gset(&lp->mii, cmd); + spin_unlock_irq(&lp->lock); + } else { + cmd->supported = SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_TP | SUPPORTED_AUI; + + if (lp->ctl_rspeed == 10) + ethtool_cmd_speed_set(cmd, SPEED_10); + else if (lp->ctl_rspeed == 100) + ethtool_cmd_speed_set(cmd, SPEED_100); + + cmd->autoneg = AUTONEG_DISABLE; + cmd->transceiver = XCVR_INTERNAL; + cmd->port = 0; + cmd->duplex = lp->tcr_cur_mode & TCR_SWFDUP ? DUPLEX_FULL : DUPLEX_HALF; + + ret = 0; + } + + return ret; +} + +static int +smc_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct smc_local *lp = netdev_priv(dev); + int ret; + + if (lp->phy_type != 0) { + spin_lock_irq(&lp->lock); + ret = mii_ethtool_sset(&lp->mii, cmd); + spin_unlock_irq(&lp->lock); + } else { + if (cmd->autoneg != AUTONEG_DISABLE || + cmd->speed != SPEED_10 || + (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) || + (cmd->port != PORT_TP && cmd->port != PORT_AUI)) + return -EINVAL; + +// lp->port = cmd->port; + lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL; + +// if (netif_running(dev)) +// smc_set_port(dev); + + ret = 0; + } + + return ret; +} + +static void +smc_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + strncpy(info->driver, CARDNAME, sizeof(info->driver)); + strncpy(info->version, version, sizeof(info->version)); + strncpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info)); +} + +static int smc_ethtool_nwayreset(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + int ret = -EINVAL; + + if (lp->phy_type != 0) { + spin_lock_irq(&lp->lock); + ret = mii_nway_restart(&lp->mii); + spin_unlock_irq(&lp->lock); + } + + return ret; +} + +static u32 smc_ethtool_getmsglevel(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + return lp->msg_enable; +} + +static void smc_ethtool_setmsglevel(struct net_device *dev, u32 level) +{ + struct smc_local *lp = netdev_priv(dev); + lp->msg_enable = level; +} + +static int smc_write_eeprom_word(struct net_device *dev, u16 addr, u16 word) +{ + u16 ctl; + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + + spin_lock_irq(&lp->lock); + /* load word into GP register */ + SMC_SELECT_BANK(lp, 1); + SMC_SET_GP(lp, word); + /* set the address to put the data in EEPROM */ + SMC_SELECT_BANK(lp, 2); + SMC_SET_PTR(lp, addr); + /* tell it to write */ + SMC_SELECT_BANK(lp, 1); + ctl = SMC_GET_CTL(lp); + SMC_SET_CTL(lp, ctl | (CTL_EEPROM_SELECT | CTL_STORE)); + /* wait for it to finish */ + do { + udelay(1); + } while (SMC_GET_CTL(lp) & CTL_STORE); + /* clean up */ + SMC_SET_CTL(lp, ctl); + SMC_SELECT_BANK(lp, 2); + spin_unlock_irq(&lp->lock); + return 0; +} + +static int smc_read_eeprom_word(struct net_device *dev, u16 addr, u16 *word) +{ + u16 ctl; + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + + spin_lock_irq(&lp->lock); + /* set the EEPROM address to get the data from */ + SMC_SELECT_BANK(lp, 2); + SMC_SET_PTR(lp, addr | PTR_READ); + /* tell it to load */ + SMC_SELECT_BANK(lp, 1); + SMC_SET_GP(lp, 0xffff); /* init to known */ + ctl = SMC_GET_CTL(lp); + SMC_SET_CTL(lp, ctl | (CTL_EEPROM_SELECT | CTL_RELOAD)); + /* wait for it to finish */ + do { + udelay(1); + } while (SMC_GET_CTL(lp) & CTL_RELOAD); + /* read word from GP register */ + *word = SMC_GET_GP(lp); + /* clean up */ + SMC_SET_CTL(lp, ctl); + SMC_SELECT_BANK(lp, 2); + spin_unlock_irq(&lp->lock); + return 0; +} + +static int smc_ethtool_geteeprom_len(struct net_device *dev) +{ + return 0x23 * 2; +} + +static int smc_ethtool_geteeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + int i; + int imax; + + DBG(1, "Reading %d bytes at %d(0x%x)\n", + eeprom->len, eeprom->offset, eeprom->offset); + imax = smc_ethtool_geteeprom_len(dev); + for (i = 0; i < eeprom->len; i += 2) { + int ret; + u16 wbuf; + int offset = i + eeprom->offset; + if (offset > imax) + break; + ret = smc_read_eeprom_word(dev, offset >> 1, &wbuf); + if (ret != 0) + return ret; + DBG(2, "Read 0x%x from 0x%x\n", wbuf, offset >> 1); + data[i] = (wbuf >> 8) & 0xff; + data[i+1] = wbuf & 0xff; + } + return 0; +} + +static int smc_ethtool_seteeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + int i; + int imax; + + DBG(1, "Writing %d bytes to %d(0x%x)\n", + eeprom->len, eeprom->offset, eeprom->offset); + imax = smc_ethtool_geteeprom_len(dev); + for (i = 0; i < eeprom->len; i += 2) { + int ret; + u16 wbuf; + int offset = i + eeprom->offset; + if (offset > imax) + break; + wbuf = (data[i] << 8) | data[i + 1]; + DBG(2, "Writing 0x%x to 0x%x\n", wbuf, offset >> 1); + ret = smc_write_eeprom_word(dev, offset >> 1, wbuf); + if (ret != 0) + return ret; + } + return 0; +} + + +static const struct ethtool_ops smc_ethtool_ops = { + .get_settings = smc_ethtool_getsettings, + .set_settings = smc_ethtool_setsettings, + .get_drvinfo = smc_ethtool_getdrvinfo, + + .get_msglevel = smc_ethtool_getmsglevel, + .set_msglevel = smc_ethtool_setmsglevel, + .nway_reset = smc_ethtool_nwayreset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = smc_ethtool_geteeprom_len, + .get_eeprom = smc_ethtool_geteeprom, + .set_eeprom = smc_ethtool_seteeprom, +}; + +static const struct net_device_ops smc_netdev_ops = { + .ndo_open = smc_open, + .ndo_stop = smc_close, + .ndo_start_xmit = smc_hard_start_xmit, + .ndo_tx_timeout = smc_timeout, + .ndo_set_multicast_list = smc_set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = smc_poll_controller, +#endif +}; + +/* + * smc_findirq + * + * This routine has a simple purpose -- make the SMC chip generate an + * interrupt, so an auto-detect routine can detect it, and find the IRQ, + */ +/* + * does this still work? + * + * I just deleted auto_irq.c, since it was never built... + * --jgarzik + */ +static int __devinit smc_findirq(struct smc_local *lp) +{ + void __iomem *ioaddr = lp->base; + int timeout = 20; + unsigned long cookie; + + DBG(2, "%s: %s\n", CARDNAME, __func__); + + cookie = probe_irq_on(); + + /* + * What I try to do here is trigger an ALLOC_INT. This is done + * by allocating a small chunk of memory, which will give an interrupt + * when done. + */ + /* enable ALLOCation interrupts ONLY */ + SMC_SELECT_BANK(lp, 2); + SMC_SET_INT_MASK(lp, IM_ALLOC_INT); + + /* + * Allocate 512 bytes of memory. Note that the chip was just + * reset so all the memory is available + */ + SMC_SET_MMU_CMD(lp, MC_ALLOC | 1); + + /* + * Wait until positive that the interrupt has been generated + */ + do { + int int_status; + udelay(10); + int_status = SMC_GET_INT(lp); + if (int_status & IM_ALLOC_INT) + break; /* got the interrupt */ + } while (--timeout); + + /* + * there is really nothing that I can do here if timeout fails, + * as autoirq_report will return a 0 anyway, which is what I + * want in this case. Plus, the clean up is needed in both + * cases. + */ + + /* and disable all interrupts again */ + SMC_SET_INT_MASK(lp, 0); + + /* and return what I found */ + return probe_irq_off(cookie); +} + +/* + * Function: smc_probe(unsigned long ioaddr) + * + * Purpose: + * Tests to see if a given ioaddr points to an SMC91x chip. + * Returns a 0 on success + * + * Algorithm: + * (1) see if the high byte of BANK_SELECT is 0x33 + * (2) compare the ioaddr with the base register's address + * (3) see if I recognize the chip ID in the appropriate register + * + * Here I do typical initialization tasks. + * + * o Initialize the structure if needed + * o print out my vanity message if not done so already + * o print out what type of hardware is detected + * o print out the ethernet address + * o find the IRQ + * o set up my private data + * o configure the dev structure with my subroutines + * o actually GRAB the irq. + * o GRAB the region + */ +static int __devinit smc_probe(struct net_device *dev, void __iomem *ioaddr, + unsigned long irq_flags) +{ + struct smc_local *lp = netdev_priv(dev); + static int version_printed = 0; + int retval; + unsigned int val, revision_register; + const char *version_string; + + DBG(2, "%s: %s\n", CARDNAME, __func__); + + /* First, see if the high byte is 0x33 */ + val = SMC_CURRENT_BANK(lp); + DBG(2, "%s: bank signature probe returned 0x%04x\n", CARDNAME, val); + if ((val & 0xFF00) != 0x3300) { + if ((val & 0xFF) == 0x33) { + printk(KERN_WARNING + "%s: Detected possible byte-swapped interface" + " at IOADDR %p\n", CARDNAME, ioaddr); + } + retval = -ENODEV; + goto err_out; + } + + /* + * The above MIGHT indicate a device, but I need to write to + * further test this. + */ + SMC_SELECT_BANK(lp, 0); + val = SMC_CURRENT_BANK(lp); + if ((val & 0xFF00) != 0x3300) { + retval = -ENODEV; + goto err_out; + } + + /* + * well, we've already written once, so hopefully another + * time won't hurt. This time, I need to switch the bank + * register to bank 1, so I can access the base address + * register + */ + SMC_SELECT_BANK(lp, 1); + val = SMC_GET_BASE(lp); + val = ((val & 0x1F00) >> 3) << SMC_IO_SHIFT; + if (((unsigned int)ioaddr & (0x3e0 << SMC_IO_SHIFT)) != val) { + printk("%s: IOADDR %p doesn't match configuration (%x).\n", + CARDNAME, ioaddr, val); + } + + /* + * check if the revision register is something that I + * recognize. These might need to be added to later, + * as future revisions could be added. + */ + SMC_SELECT_BANK(lp, 3); + revision_register = SMC_GET_REV(lp); + DBG(2, "%s: revision = 0x%04x\n", CARDNAME, revision_register); + version_string = chip_ids[ (revision_register >> 4) & 0xF]; + if (!version_string || (revision_register & 0xff00) != 0x3300) { + /* I don't recognize this chip, so... */ + printk("%s: IO %p: Unrecognized revision register 0x%04x" + ", Contact author.\n", CARDNAME, + ioaddr, revision_register); + + retval = -ENODEV; + goto err_out; + } + + /* At this point I'll assume that the chip is an SMC91x. */ + if (version_printed++ == 0) + printk("%s", version); + + /* fill in some of the fields */ + dev->base_addr = (unsigned long)ioaddr; + lp->base = ioaddr; + lp->version = revision_register & 0xff; + spin_lock_init(&lp->lock); + + /* Get the MAC address */ + SMC_SELECT_BANK(lp, 1); + SMC_GET_MAC_ADDR(lp, dev->dev_addr); + + /* now, reset the chip, and put it into a known state */ + smc_reset(dev); + + /* + * If dev->irq is 0, then the device has to be banged on to see + * what the IRQ is. + * + * This banging doesn't always detect the IRQ, for unknown reasons. + * a workaround is to reset the chip and try again. + * + * Interestingly, the DOS packet driver *SETS* the IRQ on the card to + * be what is requested on the command line. I don't do that, mostly + * because the card that I have uses a non-standard method of accessing + * the IRQs, and because this _should_ work in most configurations. + * + * Specifying an IRQ is done with the assumption that the user knows + * what (s)he is doing. No checking is done!!!! + */ + if (dev->irq < 1) { + int trials; + + trials = 3; + while (trials--) { + dev->irq = smc_findirq(lp); + if (dev->irq) + break; + /* kick the card and try again */ + smc_reset(dev); + } + } + if (dev->irq == 0) { + printk("%s: Couldn't autodetect your IRQ. Use irq=xx.\n", + dev->name); + retval = -ENODEV; + goto err_out; + } + dev->irq = irq_canonicalize(dev->irq); + + /* Fill in the fields of the device structure with ethernet values. */ + ether_setup(dev); + + dev->watchdog_timeo = msecs_to_jiffies(watchdog); + dev->netdev_ops = &smc_netdev_ops; + dev->ethtool_ops = &smc_ethtool_ops; + + tasklet_init(&lp->tx_task, smc_hardware_send_pkt, (unsigned long)dev); + INIT_WORK(&lp->phy_configure, smc_phy_configure); + lp->dev = dev; + lp->mii.phy_id_mask = 0x1f; + lp->mii.reg_num_mask = 0x1f; + lp->mii.force_media = 0; + lp->mii.full_duplex = 0; + lp->mii.dev = dev; + lp->mii.mdio_read = smc_phy_read; + lp->mii.mdio_write = smc_phy_write; + + /* + * Locate the phy, if any. + */ + if (lp->version >= (CHIP_91100 << 4)) + smc_phy_detect(dev); + + /* then shut everything down to save power */ + smc_shutdown(dev); + smc_phy_powerdown(dev); + + /* Set default parameters */ + lp->msg_enable = NETIF_MSG_LINK; + lp->ctl_rfduplx = 0; + lp->ctl_rspeed = 10; + + if (lp->version >= (CHIP_91100 << 4)) { + lp->ctl_rfduplx = 1; + lp->ctl_rspeed = 100; + } + + /* Grab the IRQ */ + retval = request_irq(dev->irq, smc_interrupt, irq_flags, dev->name, dev); + if (retval) + goto err_out; + +#ifdef CONFIG_ARCH_PXA +# ifdef SMC_USE_PXA_DMA + lp->cfg.flags |= SMC91X_USE_DMA; +# endif + if (lp->cfg.flags & SMC91X_USE_DMA) { + int dma = pxa_request_dma(dev->name, DMA_PRIO_LOW, + smc_pxa_dma_irq, NULL); + if (dma >= 0) + dev->dma = dma; + } +#endif + + retval = register_netdev(dev); + if (retval == 0) { + /* now, print out the card info, in a short format.. */ + printk("%s: %s (rev %d) at %p IRQ %d", + dev->name, version_string, revision_register & 0x0f, + lp->base, dev->irq); + + if (dev->dma != (unsigned char)-1) + printk(" DMA %d", dev->dma); + + printk("%s%s\n", + lp->cfg.flags & SMC91X_NOWAIT ? " [nowait]" : "", + THROTTLE_TX_PKTS ? " [throttle_tx]" : ""); + + if (!is_valid_ether_addr(dev->dev_addr)) { + printk("%s: Invalid ethernet MAC address. Please " + "set using ifconfig\n", dev->name); + } else { + /* Print the Ethernet address */ + printk("%s: Ethernet addr: %pM\n", + dev->name, dev->dev_addr); + } + + if (lp->phy_type == 0) { + PRINTK("%s: No PHY found\n", dev->name); + } else if ((lp->phy_type & 0xfffffff0) == 0x0016f840) { + PRINTK("%s: PHY LAN83C183 (LAN91C111 Internal)\n", dev->name); + } else if ((lp->phy_type & 0xfffffff0) == 0x02821c50) { + PRINTK("%s: PHY LAN83C180\n", dev->name); + } + } + +err_out: +#ifdef CONFIG_ARCH_PXA + if (retval && dev->dma != (unsigned char)-1) + pxa_free_dma(dev->dma); +#endif + return retval; +} + +static int smc_enable_device(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct smc_local *lp = netdev_priv(ndev); + unsigned long flags; + unsigned char ecor, ecsr; + void __iomem *addr; + struct resource * res; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib"); + if (!res) + return 0; + + /* + * Map the attribute space. This is overkill, but clean. + */ + addr = ioremap(res->start, ATTRIB_SIZE); + if (!addr) + return -ENOMEM; + + /* + * Reset the device. We must disable IRQs around this + * since a reset causes the IRQ line become active. + */ + local_irq_save(flags); + ecor = readb(addr + (ECOR << SMC_IO_SHIFT)) & ~ECOR_RESET; + writeb(ecor | ECOR_RESET, addr + (ECOR << SMC_IO_SHIFT)); + readb(addr + (ECOR << SMC_IO_SHIFT)); + + /* + * Wait 100us for the chip to reset. + */ + udelay(100); + + /* + * The device will ignore all writes to the enable bit while + * reset is asserted, even if the reset bit is cleared in the + * same write. Must clear reset first, then enable the device. + */ + writeb(ecor, addr + (ECOR << SMC_IO_SHIFT)); + writeb(ecor | ECOR_ENABLE, addr + (ECOR << SMC_IO_SHIFT)); + + /* + * Set the appropriate byte/word mode. + */ + ecsr = readb(addr + (ECSR << SMC_IO_SHIFT)) & ~ECSR_IOIS8; + if (!SMC_16BIT(lp)) + ecsr |= ECSR_IOIS8; + writeb(ecsr, addr + (ECSR << SMC_IO_SHIFT)); + local_irq_restore(flags); + + iounmap(addr); + + /* + * Wait for the chip to wake up. We could poll the control + * register in the main register space, but that isn't mapped + * yet. We know this is going to take 750us. + */ + msleep(1); + + return 0; +} + +static int smc_request_attrib(struct platform_device *pdev, + struct net_device *ndev) +{ + struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib"); + struct smc_local *lp __maybe_unused = netdev_priv(ndev); + + if (!res) + return 0; + + if (!request_mem_region(res->start, ATTRIB_SIZE, CARDNAME)) + return -EBUSY; + + return 0; +} + +static void smc_release_attrib(struct platform_device *pdev, + struct net_device *ndev) +{ + struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib"); + struct smc_local *lp __maybe_unused = netdev_priv(ndev); + + if (res) + release_mem_region(res->start, ATTRIB_SIZE); +} + +static inline void smc_request_datacs(struct platform_device *pdev, struct net_device *ndev) +{ + if (SMC_CAN_USE_DATACS) { + struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32"); + struct smc_local *lp = netdev_priv(ndev); + + if (!res) + return; + + if(!request_mem_region(res->start, SMC_DATA_EXTENT, CARDNAME)) { + printk(KERN_INFO "%s: failed to request datacs memory region.\n", CARDNAME); + return; + } + + lp->datacs = ioremap(res->start, SMC_DATA_EXTENT); + } +} + +static void smc_release_datacs(struct platform_device *pdev, struct net_device *ndev) +{ + if (SMC_CAN_USE_DATACS) { + struct smc_local *lp = netdev_priv(ndev); + struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32"); + + if (lp->datacs) + iounmap(lp->datacs); + + lp->datacs = NULL; + + if (res) + release_mem_region(res->start, SMC_DATA_EXTENT); + } +} + +/* + * smc_init(void) + * Input parameters: + * dev->base_addr == 0, try to find all possible locations + * dev->base_addr > 0x1ff, this is the address to check + * dev->base_addr == , return failure code + * + * Output: + * 0 --> there is a device + * anything else, error + */ +static int __devinit smc_drv_probe(struct platform_device *pdev) +{ + struct smc91x_platdata *pd = pdev->dev.platform_data; + struct smc_local *lp; + struct net_device *ndev; + struct resource *res, *ires; + unsigned int __iomem *addr; + unsigned long irq_flags = SMC_IRQ_FLAGS; + int ret; + + ndev = alloc_etherdev(sizeof(struct smc_local)); + if (!ndev) { + printk("%s: could not allocate device.\n", CARDNAME); + ret = -ENOMEM; + goto out; + } + SET_NETDEV_DEV(ndev, &pdev->dev); + + /* get configuration from platform data, only allow use of + * bus width if both SMC_CAN_USE_xxx and SMC91X_USE_xxx are set. + */ + + lp = netdev_priv(ndev); + + if (pd) { + memcpy(&lp->cfg, pd, sizeof(lp->cfg)); + lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags); + } else { + lp->cfg.flags |= (SMC_CAN_USE_8BIT) ? SMC91X_USE_8BIT : 0; + lp->cfg.flags |= (SMC_CAN_USE_16BIT) ? SMC91X_USE_16BIT : 0; + lp->cfg.flags |= (SMC_CAN_USE_32BIT) ? SMC91X_USE_32BIT : 0; + lp->cfg.flags |= (nowait) ? SMC91X_NOWAIT : 0; + } + + if (!lp->cfg.leda && !lp->cfg.ledb) { + lp->cfg.leda = RPC_LSA_DEFAULT; + lp->cfg.ledb = RPC_LSB_DEFAULT; + } + + ndev->dma = (unsigned char)-1; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs"); + if (!res) + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + ret = -ENODEV; + goto out_free_netdev; + } + + + if (!request_mem_region(res->start, SMC_IO_EXTENT, CARDNAME)) { + ret = -EBUSY; + goto out_free_netdev; + } + + ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!ires) { + ret = -ENODEV; + goto out_release_io; + } + + ndev->irq = ires->start; + + if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK) + irq_flags = ires->flags & IRQF_TRIGGER_MASK; + + ret = smc_request_attrib(pdev, ndev); + if (ret) + goto out_release_io; +#if defined(CONFIG_SA1100_ASSABET) + NCR_0 |= NCR_ENET_OSC_EN; +#endif + platform_set_drvdata(pdev, ndev); + ret = smc_enable_device(pdev); + if (ret) + goto out_release_attrib; + + addr = ioremap(res->start, SMC_IO_EXTENT); + if (!addr) { + ret = -ENOMEM; + goto out_release_attrib; + } + +#ifdef CONFIG_ARCH_PXA + { + struct smc_local *lp = netdev_priv(ndev); + lp->device = &pdev->dev; + lp->physaddr = res->start; + } +#endif + + ret = smc_probe(ndev, addr, irq_flags); + if (ret != 0) + goto out_iounmap; + + smc_request_datacs(pdev, ndev); + + return 0; + + out_iounmap: + platform_set_drvdata(pdev, NULL); + iounmap(addr); + out_release_attrib: + smc_release_attrib(pdev, ndev); + out_release_io: + release_mem_region(res->start, SMC_IO_EXTENT); + out_free_netdev: + free_netdev(ndev); + out: + printk("%s: not found (%d).\n", CARDNAME, ret); + + return ret; +} + +static int __devexit smc_drv_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct smc_local *lp = netdev_priv(ndev); + struct resource *res; + + platform_set_drvdata(pdev, NULL); + + unregister_netdev(ndev); + + free_irq(ndev->irq, ndev); + +#ifdef CONFIG_ARCH_PXA + if (ndev->dma != (unsigned char)-1) + pxa_free_dma(ndev->dma); +#endif + iounmap(lp->base); + + smc_release_datacs(pdev,ndev); + smc_release_attrib(pdev,ndev); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs"); + if (!res) + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, SMC_IO_EXTENT); + + free_netdev(ndev); + + return 0; +} + +static int smc_drv_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct net_device *ndev = platform_get_drvdata(pdev); + + if (ndev) { + if (netif_running(ndev)) { + netif_device_detach(ndev); + smc_shutdown(ndev); + smc_phy_powerdown(ndev); + } + } + return 0; +} + +static int smc_drv_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct net_device *ndev = platform_get_drvdata(pdev); + + if (ndev) { + struct smc_local *lp = netdev_priv(ndev); + smc_enable_device(pdev); + if (netif_running(ndev)) { + smc_reset(ndev); + smc_enable(ndev); + if (lp->phy_type != 0) + smc_phy_configure(&lp->phy_configure); + netif_device_attach(ndev); + } + } + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id smc91x_match[] = { + { .compatible = "smsc,lan91c94", }, + { .compatible = "smsc,lan91c111", }, + {}, +}; +MODULE_DEVICE_TABLE(of, smc91x_match); +#else +#define smc91x_match NULL +#endif + +static struct dev_pm_ops smc_drv_pm_ops = { + .suspend = smc_drv_suspend, + .resume = smc_drv_resume, +}; + +static struct platform_driver smc_driver = { + .probe = smc_drv_probe, + .remove = __devexit_p(smc_drv_remove), + .driver = { + .name = CARDNAME, + .owner = THIS_MODULE, + .pm = &smc_drv_pm_ops, + .of_match_table = smc91x_match, + }, +}; + +static int __init smc_init(void) +{ + return platform_driver_register(&smc_driver); +} + +static void __exit smc_cleanup(void) +{ + platform_driver_unregister(&smc_driver); +} + +module_init(smc_init); +module_exit(smc_cleanup); diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h new file mode 100644 index 0000000..5f53fbb --- /dev/null +++ b/drivers/net/ethernet/smsc/smc91x.h @@ -0,0 +1,1180 @@ +/*------------------------------------------------------------------------ + . smc91x.h - macros for SMSC's 91C9x/91C1xx single-chip Ethernet device. + . + . Copyright (C) 1996 by Erik Stahlman + . Copyright (C) 2001 Standard Microsystems Corporation + . Developed by Simple Network Magic Corporation + . Copyright (C) 2003 Monta Vista Software, Inc. + . Unified SMC91x driver by Nicolas Pitre + . + . This program is free software; you can redistribute it and/or modify + . it under the terms of the GNU General Public License as published by + . the Free Software Foundation; either version 2 of the License, or + . (at your option) any later version. + . + . This program is distributed in the hope that it will be useful, + . but WITHOUT ANY WARRANTY; without even the implied warranty of + . MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + . GNU General Public License for more details. + . + . You should have received a copy of the GNU General Public License + . along with this program; if not, write to the Free Software + . Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + . + . Information contained in this file was obtained from the LAN91C111 + . manual from SMC. To get a copy, if you really want one, you can find + . information under www.smsc.com. + . + . Authors + . Erik Stahlman + . Daris A Nevil + . Nicolas Pitre + . + ---------------------------------------------------------------------------*/ +#ifndef _SMC91X_H_ +#define _SMC91X_H_ + +#include + +/* + * Define your architecture specific bus configuration parameters here. + */ + +#if defined(CONFIG_ARCH_LUBBOCK) ||\ + defined(CONFIG_MACH_MAINSTONE) ||\ + defined(CONFIG_MACH_ZYLONITE) ||\ + defined(CONFIG_MACH_LITTLETON) ||\ + defined(CONFIG_MACH_ZYLONITE2) ||\ + defined(CONFIG_ARCH_VIPER) ||\ + defined(CONFIG_MACH_STARGATE2) + +#include + +/* Now the bus width is specified in the platform data + * pretend here to support all I/O access types + */ +#define SMC_CAN_USE_8BIT 1 +#define SMC_CAN_USE_16BIT 1 +#define SMC_CAN_USE_32BIT 1 +#define SMC_NOWAIT 1 + +#define SMC_IO_SHIFT (lp->io_shift) + +#define SMC_inb(a, r) readb((a) + (r)) +#define SMC_inw(a, r) readw((a) + (r)) +#define SMC_inl(a, r) readl((a) + (r)) +#define SMC_outb(v, a, r) writeb(v, (a) + (r)) +#define SMC_outl(v, a, r) writel(v, (a) + (r)) +#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) +#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) +#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) +#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) +#define SMC_IRQ_FLAGS (-1) /* from resource */ + +/* We actually can't write halfwords properly if not word aligned */ +static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) +{ + if ((machine_is_mainstone() || machine_is_stargate2()) && reg & 2) { + unsigned int v = val << 16; + v |= readl(ioaddr + (reg & ~2)) & 0xffff; + writel(v, ioaddr + (reg & ~2)); + } else { + writew(val, ioaddr + reg); + } +} + +#elif defined(CONFIG_SA1100_PLEB) +/* We can only do 16-bit reads and writes in the static memory space. */ +#define SMC_CAN_USE_8BIT 1 +#define SMC_CAN_USE_16BIT 1 +#define SMC_CAN_USE_32BIT 0 +#define SMC_IO_SHIFT 0 +#define SMC_NOWAIT 1 + +#define SMC_inb(a, r) readb((a) + (r)) +#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l)) +#define SMC_inw(a, r) readw((a) + (r)) +#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) +#define SMC_outb(v, a, r) writeb(v, (a) + (r)) +#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l)) +#define SMC_outw(v, a, r) writew(v, (a) + (r)) +#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) + +#define SMC_IRQ_FLAGS (-1) + +#elif defined(CONFIG_SA1100_ASSABET) + +#include + +/* We can only do 8-bit reads and writes in the static memory space. */ +#define SMC_CAN_USE_8BIT 1 +#define SMC_CAN_USE_16BIT 0 +#define SMC_CAN_USE_32BIT 0 +#define SMC_NOWAIT 1 + +/* The first two address lines aren't connected... */ +#define SMC_IO_SHIFT 2 + +#define SMC_inb(a, r) readb((a) + (r)) +#define SMC_outb(v, a, r) writeb(v, (a) + (r)) +#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l)) +#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l)) +#define SMC_IRQ_FLAGS (-1) /* from resource */ + +#elif defined(CONFIG_MACH_LOGICPD_PXA270) || \ + defined(CONFIG_MACH_NOMADIK_8815NHK) + +#define SMC_CAN_USE_8BIT 0 +#define SMC_CAN_USE_16BIT 1 +#define SMC_CAN_USE_32BIT 0 +#define SMC_IO_SHIFT 0 +#define SMC_NOWAIT 1 + +#define SMC_inw(a, r) readw((a) + (r)) +#define SMC_outw(v, a, r) writew(v, (a) + (r)) +#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) +#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) + +#elif defined(CONFIG_ARCH_INNOKOM) || \ + defined(CONFIG_ARCH_PXA_IDP) || \ + defined(CONFIG_ARCH_RAMSES) || \ + defined(CONFIG_ARCH_PCM027) + +#define SMC_CAN_USE_8BIT 1 +#define SMC_CAN_USE_16BIT 1 +#define SMC_CAN_USE_32BIT 1 +#define SMC_IO_SHIFT 0 +#define SMC_NOWAIT 1 +#define SMC_USE_PXA_DMA 1 + +#define SMC_inb(a, r) readb((a) + (r)) +#define SMC_inw(a, r) readw((a) + (r)) +#define SMC_inl(a, r) readl((a) + (r)) +#define SMC_outb(v, a, r) writeb(v, (a) + (r)) +#define SMC_outl(v, a, r) writel(v, (a) + (r)) +#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) +#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) +#define SMC_IRQ_FLAGS (-1) /* from resource */ + +/* We actually can't write halfwords properly if not word aligned */ +static inline void +SMC_outw(u16 val, void __iomem *ioaddr, int reg) +{ + if (reg & 2) { + unsigned int v = val << 16; + v |= readl(ioaddr + (reg & ~2)) & 0xffff; + writel(v, ioaddr + (reg & ~2)); + } else { + writew(val, ioaddr + reg); + } +} + +#elif defined(CONFIG_SH_SH4202_MICRODEV) + +#define SMC_CAN_USE_8BIT 0 +#define SMC_CAN_USE_16BIT 1 +#define SMC_CAN_USE_32BIT 0 + +#define SMC_inb(a, r) inb((a) + (r) - 0xa0000000) +#define SMC_inw(a, r) inw((a) + (r) - 0xa0000000) +#define SMC_inl(a, r) inl((a) + (r) - 0xa0000000) +#define SMC_outb(v, a, r) outb(v, (a) + (r) - 0xa0000000) +#define SMC_outw(v, a, r) outw(v, (a) + (r) - 0xa0000000) +#define SMC_outl(v, a, r) outl(v, (a) + (r) - 0xa0000000) +#define SMC_insl(a, r, p, l) insl((a) + (r) - 0xa0000000, p, l) +#define SMC_outsl(a, r, p, l) outsl((a) + (r) - 0xa0000000, p, l) +#define SMC_insw(a, r, p, l) insw((a) + (r) - 0xa0000000, p, l) +#define SMC_outsw(a, r, p, l) outsw((a) + (r) - 0xa0000000, p, l) + +#define SMC_IRQ_FLAGS (0) + +#elif defined(CONFIG_M32R) + +#define SMC_CAN_USE_8BIT 0 +#define SMC_CAN_USE_16BIT 1 +#define SMC_CAN_USE_32BIT 0 + +#define SMC_inb(a, r) inb(((u32)a) + (r)) +#define SMC_inw(a, r) inw(((u32)a) + (r)) +#define SMC_outb(v, a, r) outb(v, ((u32)a) + (r)) +#define SMC_outw(v, a, r) outw(v, ((u32)a) + (r)) +#define SMC_insw(a, r, p, l) insw(((u32)a) + (r), p, l) +#define SMC_outsw(a, r, p, l) outsw(((u32)a) + (r), p, l) + +#define SMC_IRQ_FLAGS (0) + +#define RPC_LSA_DEFAULT RPC_LED_TX_RX +#define RPC_LSB_DEFAULT RPC_LED_100_10 + +#elif defined(CONFIG_ARCH_VERSATILE) + +#define SMC_CAN_USE_8BIT 1 +#define SMC_CAN_USE_16BIT 1 +#define SMC_CAN_USE_32BIT 1 +#define SMC_NOWAIT 1 + +#define SMC_inb(a, r) readb((a) + (r)) +#define SMC_inw(a, r) readw((a) + (r)) +#define SMC_inl(a, r) readl((a) + (r)) +#define SMC_outb(v, a, r) writeb(v, (a) + (r)) +#define SMC_outw(v, a, r) writew(v, (a) + (r)) +#define SMC_outl(v, a, r) writel(v, (a) + (r)) +#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) +#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) +#define SMC_IRQ_FLAGS (-1) /* from resource */ + +#elif defined(CONFIG_MN10300) + +/* + * MN10300/AM33 configuration + */ + +#include + +#elif defined(CONFIG_ARCH_MSM) + +#define SMC_CAN_USE_8BIT 0 +#define SMC_CAN_USE_16BIT 1 +#define SMC_CAN_USE_32BIT 0 +#define SMC_NOWAIT 1 + +#define SMC_inw(a, r) readw((a) + (r)) +#define SMC_outw(v, a, r) writew(v, (a) + (r)) +#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) +#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) + +#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH + +#elif defined(CONFIG_COLDFIRE) + +#define SMC_CAN_USE_8BIT 0 +#define SMC_CAN_USE_16BIT 1 +#define SMC_CAN_USE_32BIT 0 +#define SMC_NOWAIT 1 + +static inline void mcf_insw(void *a, unsigned char *p, int l) +{ + u16 *wp = (u16 *) p; + while (l-- > 0) + *wp++ = readw(a); +} + +static inline void mcf_outsw(void *a, unsigned char *p, int l) +{ + u16 *wp = (u16 *) p; + while (l-- > 0) + writew(*wp++, a); +} + +#define SMC_inw(a, r) _swapw(readw((a) + (r))) +#define SMC_outw(v, a, r) writew(_swapw(v), (a) + (r)) +#define SMC_insw(a, r, p, l) mcf_insw(a + r, p, l) +#define SMC_outsw(a, r, p, l) mcf_outsw(a + r, p, l) + +#define SMC_IRQ_FLAGS (IRQF_DISABLED) + +#else + +/* + * Default configuration + */ + +#define SMC_CAN_USE_8BIT 1 +#define SMC_CAN_USE_16BIT 1 +#define SMC_CAN_USE_32BIT 1 +#define SMC_NOWAIT 1 + +#define SMC_IO_SHIFT (lp->io_shift) + +#define SMC_inb(a, r) readb((a) + (r)) +#define SMC_inw(a, r) readw((a) + (r)) +#define SMC_inl(a, r) readl((a) + (r)) +#define SMC_outb(v, a, r) writeb(v, (a) + (r)) +#define SMC_outw(v, a, r) writew(v, (a) + (r)) +#define SMC_outl(v, a, r) writel(v, (a) + (r)) +#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) +#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) +#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) +#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) + +#define RPC_LSA_DEFAULT RPC_LED_100_10 +#define RPC_LSB_DEFAULT RPC_LED_TX_RX + +#endif + + +/* store this information for the driver.. */ +struct smc_local { + /* + * If I have to wait until memory is available to send a + * packet, I will store the skbuff here, until I get the + * desired memory. Then, I'll send it out and free it. + */ + struct sk_buff *pending_tx_skb; + struct tasklet_struct tx_task; + + /* version/revision of the SMC91x chip */ + int version; + + /* Contains the current active transmission mode */ + int tcr_cur_mode; + + /* Contains the current active receive mode */ + int rcr_cur_mode; + + /* Contains the current active receive/phy mode */ + int rpc_cur_mode; + int ctl_rfduplx; + int ctl_rspeed; + + u32 msg_enable; + u32 phy_type; + struct mii_if_info mii; + + /* work queue */ + struct work_struct phy_configure; + struct net_device *dev; + int work_pending; + + spinlock_t lock; + +#ifdef CONFIG_ARCH_PXA + /* DMA needs the physical address of the chip */ + u_long physaddr; + struct device *device; +#endif + void __iomem *base; + void __iomem *datacs; + + /* the low address lines on some platforms aren't connected... */ + int io_shift; + + struct smc91x_platdata cfg; +}; + +#define SMC_8BIT(p) ((p)->cfg.flags & SMC91X_USE_8BIT) +#define SMC_16BIT(p) ((p)->cfg.flags & SMC91X_USE_16BIT) +#define SMC_32BIT(p) ((p)->cfg.flags & SMC91X_USE_32BIT) + +#ifdef CONFIG_ARCH_PXA +/* + * Let's use the DMA engine on the XScale PXA2xx for RX packets. This is + * always happening in irq context so no need to worry about races. TX is + * different and probably not worth it for that reason, and not as critical + * as RX which can overrun memory and lose packets. + */ +#include +#include + +#ifdef SMC_insl +#undef SMC_insl +#define SMC_insl(a, r, p, l) \ + smc_pxa_dma_insl(a, lp, r, dev->dma, p, l) +static inline void +smc_pxa_dma_insl(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma, + u_char *buf, int len) +{ + u_long physaddr = lp->physaddr; + dma_addr_t dmabuf; + + /* fallback if no DMA available */ + if (dma == (unsigned char)-1) { + readsl(ioaddr + reg, buf, len); + return; + } + + /* 64 bit alignment is required for memory to memory DMA */ + if ((long)buf & 4) { + *((u32 *)buf) = SMC_inl(ioaddr, reg); + buf += 4; + len--; + } + + len *= 4; + dmabuf = dma_map_single(lp->device, buf, len, DMA_FROM_DEVICE); + DCSR(dma) = DCSR_NODESC; + DTADR(dma) = dmabuf; + DSADR(dma) = physaddr + reg; + DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 | + DCMD_WIDTH4 | (DCMD_LENGTH & len)); + DCSR(dma) = DCSR_NODESC | DCSR_RUN; + while (!(DCSR(dma) & DCSR_STOPSTATE)) + cpu_relax(); + DCSR(dma) = 0; + dma_unmap_single(lp->device, dmabuf, len, DMA_FROM_DEVICE); +} +#endif + +#ifdef SMC_insw +#undef SMC_insw +#define SMC_insw(a, r, p, l) \ + smc_pxa_dma_insw(a, lp, r, dev->dma, p, l) +static inline void +smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma, + u_char *buf, int len) +{ + u_long physaddr = lp->physaddr; + dma_addr_t dmabuf; + + /* fallback if no DMA available */ + if (dma == (unsigned char)-1) { + readsw(ioaddr + reg, buf, len); + return; + } + + /* 64 bit alignment is required for memory to memory DMA */ + while ((long)buf & 6) { + *((u16 *)buf) = SMC_inw(ioaddr, reg); + buf += 2; + len--; + } + + len *= 2; + dmabuf = dma_map_single(lp->device, buf, len, DMA_FROM_DEVICE); + DCSR(dma) = DCSR_NODESC; + DTADR(dma) = dmabuf; + DSADR(dma) = physaddr + reg; + DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 | + DCMD_WIDTH2 | (DCMD_LENGTH & len)); + DCSR(dma) = DCSR_NODESC | DCSR_RUN; + while (!(DCSR(dma) & DCSR_STOPSTATE)) + cpu_relax(); + DCSR(dma) = 0; + dma_unmap_single(lp->device, dmabuf, len, DMA_FROM_DEVICE); +} +#endif + +static void +smc_pxa_dma_irq(int dma, void *dummy) +{ + DCSR(dma) = 0; +} +#endif /* CONFIG_ARCH_PXA */ + + +/* + * Everything a particular hardware setup needs should have been defined + * at this point. Add stubs for the undefined cases, mainly to avoid + * compilation warnings since they'll be optimized away, or to prevent buggy + * use of them. + */ + +#if ! SMC_CAN_USE_32BIT +#define SMC_inl(ioaddr, reg) ({ BUG(); 0; }) +#define SMC_outl(x, ioaddr, reg) BUG() +#define SMC_insl(a, r, p, l) BUG() +#define SMC_outsl(a, r, p, l) BUG() +#endif + +#if !defined(SMC_insl) || !defined(SMC_outsl) +#define SMC_insl(a, r, p, l) BUG() +#define SMC_outsl(a, r, p, l) BUG() +#endif + +#if ! SMC_CAN_USE_16BIT + +/* + * Any 16-bit access is performed with two 8-bit accesses if the hardware + * can't do it directly. Most registers are 16-bit so those are mandatory. + */ +#define SMC_outw(x, ioaddr, reg) \ + do { \ + unsigned int __val16 = (x); \ + SMC_outb( __val16, ioaddr, reg ); \ + SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\ + } while (0) +#define SMC_inw(ioaddr, reg) \ + ({ \ + unsigned int __val16; \ + __val16 = SMC_inb( ioaddr, reg ); \ + __val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \ + __val16; \ + }) + +#define SMC_insw(a, r, p, l) BUG() +#define SMC_outsw(a, r, p, l) BUG() + +#endif + +#if !defined(SMC_insw) || !defined(SMC_outsw) +#define SMC_insw(a, r, p, l) BUG() +#define SMC_outsw(a, r, p, l) BUG() +#endif + +#if ! SMC_CAN_USE_8BIT +#define SMC_inb(ioaddr, reg) ({ BUG(); 0; }) +#define SMC_outb(x, ioaddr, reg) BUG() +#define SMC_insb(a, r, p, l) BUG() +#define SMC_outsb(a, r, p, l) BUG() +#endif + +#if !defined(SMC_insb) || !defined(SMC_outsb) +#define SMC_insb(a, r, p, l) BUG() +#define SMC_outsb(a, r, p, l) BUG() +#endif + +#ifndef SMC_CAN_USE_DATACS +#define SMC_CAN_USE_DATACS 0 +#endif + +#ifndef SMC_IO_SHIFT +#define SMC_IO_SHIFT 0 +#endif + +#ifndef SMC_IRQ_FLAGS +#define SMC_IRQ_FLAGS IRQF_TRIGGER_RISING +#endif + +#ifndef SMC_INTERRUPT_PREAMBLE +#define SMC_INTERRUPT_PREAMBLE +#endif + + +/* Because of bank switching, the LAN91x uses only 16 I/O ports */ +#define SMC_IO_EXTENT (16 << SMC_IO_SHIFT) +#define SMC_DATA_EXTENT (4) + +/* + . Bank Select Register: + . + . yyyy yyyy 0000 00xx + . xx = bank number + . yyyy yyyy = 0x33, for identification purposes. +*/ +#define BANK_SELECT (14 << SMC_IO_SHIFT) + + +// Transmit Control Register +/* BANK 0 */ +#define TCR_REG(lp) SMC_REG(lp, 0x0000, 0) +#define TCR_ENABLE 0x0001 // When 1 we can transmit +#define TCR_LOOP 0x0002 // Controls output pin LBK +#define TCR_FORCOL 0x0004 // When 1 will force a collision +#define TCR_PAD_EN 0x0080 // When 1 will pad tx frames < 64 bytes w/0 +#define TCR_NOCRC 0x0100 // When 1 will not append CRC to tx frames +#define TCR_MON_CSN 0x0400 // When 1 tx monitors carrier +#define TCR_FDUPLX 0x0800 // When 1 enables full duplex operation +#define TCR_STP_SQET 0x1000 // When 1 stops tx if Signal Quality Error +#define TCR_EPH_LOOP 0x2000 // When 1 enables EPH block loopback +#define TCR_SWFDUP 0x8000 // When 1 enables Switched Full Duplex mode + +#define TCR_CLEAR 0 /* do NOTHING */ +/* the default settings for the TCR register : */ +#define TCR_DEFAULT (TCR_ENABLE | TCR_PAD_EN) + + +// EPH Status Register +/* BANK 0 */ +#define EPH_STATUS_REG(lp) SMC_REG(lp, 0x0002, 0) +#define ES_TX_SUC 0x0001 // Last TX was successful +#define ES_SNGL_COL 0x0002 // Single collision detected for last tx +#define ES_MUL_COL 0x0004 // Multiple collisions detected for last tx +#define ES_LTX_MULT 0x0008 // Last tx was a multicast +#define ES_16COL 0x0010 // 16 Collisions Reached +#define ES_SQET 0x0020 // Signal Quality Error Test +#define ES_LTXBRD 0x0040 // Last tx was a broadcast +#define ES_TXDEFR 0x0080 // Transmit Deferred +#define ES_LATCOL 0x0200 // Late collision detected on last tx +#define ES_LOSTCARR 0x0400 // Lost Carrier Sense +#define ES_EXC_DEF 0x0800 // Excessive Deferral +#define ES_CTR_ROL 0x1000 // Counter Roll Over indication +#define ES_LINK_OK 0x4000 // Driven by inverted value of nLNK pin +#define ES_TXUNRN 0x8000 // Tx Underrun + + +// Receive Control Register +/* BANK 0 */ +#define RCR_REG(lp) SMC_REG(lp, 0x0004, 0) +#define RCR_RX_ABORT 0x0001 // Set if a rx frame was aborted +#define RCR_PRMS 0x0002 // Enable promiscuous mode +#define RCR_ALMUL 0x0004 // When set accepts all multicast frames +#define RCR_RXEN 0x0100 // IFF this is set, we can receive packets +#define RCR_STRIP_CRC 0x0200 // When set strips CRC from rx packets +#define RCR_ABORT_ENB 0x0200 // When set will abort rx on collision +#define RCR_FILT_CAR 0x0400 // When set filters leading 12 bit s of carrier +#define RCR_SOFTRST 0x8000 // resets the chip + +/* the normal settings for the RCR register : */ +#define RCR_DEFAULT (RCR_STRIP_CRC | RCR_RXEN) +#define RCR_CLEAR 0x0 // set it to a base state + + +// Counter Register +/* BANK 0 */ +#define COUNTER_REG(lp) SMC_REG(lp, 0x0006, 0) + + +// Memory Information Register +/* BANK 0 */ +#define MIR_REG(lp) SMC_REG(lp, 0x0008, 0) + + +// Receive/Phy Control Register +/* BANK 0 */ +#define RPC_REG(lp) SMC_REG(lp, 0x000A, 0) +#define RPC_SPEED 0x2000 // When 1 PHY is in 100Mbps mode. +#define RPC_DPLX 0x1000 // When 1 PHY is in Full-Duplex Mode +#define RPC_ANEG 0x0800 // When 1 PHY is in Auto-Negotiate Mode +#define RPC_LSXA_SHFT 5 // Bits to shift LS2A,LS1A,LS0A to lsb +#define RPC_LSXB_SHFT 2 // Bits to get LS2B,LS1B,LS0B to lsb + +#ifndef RPC_LSA_DEFAULT +#define RPC_LSA_DEFAULT RPC_LED_100 +#endif +#ifndef RPC_LSB_DEFAULT +#define RPC_LSB_DEFAULT RPC_LED_FD +#endif + +#define RPC_DEFAULT (RPC_ANEG | RPC_SPEED | RPC_DPLX) + + +/* Bank 0 0x0C is reserved */ + +// Bank Select Register +/* All Banks */ +#define BSR_REG 0x000E + + +// Configuration Reg +/* BANK 1 */ +#define CONFIG_REG(lp) SMC_REG(lp, 0x0000, 1) +#define CONFIG_EXT_PHY 0x0200 // 1=external MII, 0=internal Phy +#define CONFIG_GPCNTRL 0x0400 // Inverse value drives pin nCNTRL +#define CONFIG_NO_WAIT 0x1000 // When 1 no extra wait states on ISA bus +#define CONFIG_EPH_POWER_EN 0x8000 // When 0 EPH is placed into low power mode. + +// Default is powered-up, Internal Phy, Wait States, and pin nCNTRL=low +#define CONFIG_DEFAULT (CONFIG_EPH_POWER_EN) + + +// Base Address Register +/* BANK 1 */ +#define BASE_REG(lp) SMC_REG(lp, 0x0002, 1) + + +// Individual Address Registers +/* BANK 1 */ +#define ADDR0_REG(lp) SMC_REG(lp, 0x0004, 1) +#define ADDR1_REG(lp) SMC_REG(lp, 0x0006, 1) +#define ADDR2_REG(lp) SMC_REG(lp, 0x0008, 1) + + +// General Purpose Register +/* BANK 1 */ +#define GP_REG(lp) SMC_REG(lp, 0x000A, 1) + + +// Control Register +/* BANK 1 */ +#define CTL_REG(lp) SMC_REG(lp, 0x000C, 1) +#define CTL_RCV_BAD 0x4000 // When 1 bad CRC packets are received +#define CTL_AUTO_RELEASE 0x0800 // When 1 tx pages are released automatically +#define CTL_LE_ENABLE 0x0080 // When 1 enables Link Error interrupt +#define CTL_CR_ENABLE 0x0040 // When 1 enables Counter Rollover interrupt +#define CTL_TE_ENABLE 0x0020 // When 1 enables Transmit Error interrupt +#define CTL_EEPROM_SELECT 0x0004 // Controls EEPROM reload & store +#define CTL_RELOAD 0x0002 // When set reads EEPROM into registers +#define CTL_STORE 0x0001 // When set stores registers into EEPROM + + +// MMU Command Register +/* BANK 2 */ +#define MMU_CMD_REG(lp) SMC_REG(lp, 0x0000, 2) +#define MC_BUSY 1 // When 1 the last release has not completed +#define MC_NOP (0<<5) // No Op +#define MC_ALLOC (1<<5) // OR with number of 256 byte packets +#define MC_RESET (2<<5) // Reset MMU to initial state +#define MC_REMOVE (3<<5) // Remove the current rx packet +#define MC_RELEASE (4<<5) // Remove and release the current rx packet +#define MC_FREEPKT (5<<5) // Release packet in PNR register +#define MC_ENQUEUE (6<<5) // Enqueue the packet for transmit +#define MC_RSTTXFIFO (7<<5) // Reset the TX FIFOs + + +// Packet Number Register +/* BANK 2 */ +#define PN_REG(lp) SMC_REG(lp, 0x0002, 2) + + +// Allocation Result Register +/* BANK 2 */ +#define AR_REG(lp) SMC_REG(lp, 0x0003, 2) +#define AR_FAILED 0x80 // Alocation Failed + + +// TX FIFO Ports Register +/* BANK 2 */ +#define TXFIFO_REG(lp) SMC_REG(lp, 0x0004, 2) +#define TXFIFO_TEMPTY 0x80 // TX FIFO Empty + +// RX FIFO Ports Register +/* BANK 2 */ +#define RXFIFO_REG(lp) SMC_REG(lp, 0x0005, 2) +#define RXFIFO_REMPTY 0x80 // RX FIFO Empty + +#define FIFO_REG(lp) SMC_REG(lp, 0x0004, 2) + +// Pointer Register +/* BANK 2 */ +#define PTR_REG(lp) SMC_REG(lp, 0x0006, 2) +#define PTR_RCV 0x8000 // 1=Receive area, 0=Transmit area +#define PTR_AUTOINC 0x4000 // Auto increment the pointer on each access +#define PTR_READ 0x2000 // When 1 the operation is a read + + +// Data Register +/* BANK 2 */ +#define DATA_REG(lp) SMC_REG(lp, 0x0008, 2) + + +// Interrupt Status/Acknowledge Register +/* BANK 2 */ +#define INT_REG(lp) SMC_REG(lp, 0x000C, 2) + + +// Interrupt Mask Register +/* BANK 2 */ +#define IM_REG(lp) SMC_REG(lp, 0x000D, 2) +#define IM_MDINT 0x80 // PHY MI Register 18 Interrupt +#define IM_ERCV_INT 0x40 // Early Receive Interrupt +#define IM_EPH_INT 0x20 // Set by Ethernet Protocol Handler section +#define IM_RX_OVRN_INT 0x10 // Set by Receiver Overruns +#define IM_ALLOC_INT 0x08 // Set when allocation request is completed +#define IM_TX_EMPTY_INT 0x04 // Set if the TX FIFO goes empty +#define IM_TX_INT 0x02 // Transmit Interrupt +#define IM_RCV_INT 0x01 // Receive Interrupt + + +// Multicast Table Registers +/* BANK 3 */ +#define MCAST_REG1(lp) SMC_REG(lp, 0x0000, 3) +#define MCAST_REG2(lp) SMC_REG(lp, 0x0002, 3) +#define MCAST_REG3(lp) SMC_REG(lp, 0x0004, 3) +#define MCAST_REG4(lp) SMC_REG(lp, 0x0006, 3) + + +// Management Interface Register (MII) +/* BANK 3 */ +#define MII_REG(lp) SMC_REG(lp, 0x0008, 3) +#define MII_MSK_CRS100 0x4000 // Disables CRS100 detection during tx half dup +#define MII_MDOE 0x0008 // MII Output Enable +#define MII_MCLK 0x0004 // MII Clock, pin MDCLK +#define MII_MDI 0x0002 // MII Input, pin MDI +#define MII_MDO 0x0001 // MII Output, pin MDO + + +// Revision Register +/* BANK 3 */ +/* ( hi: chip id low: rev # ) */ +#define REV_REG(lp) SMC_REG(lp, 0x000A, 3) + + +// Early RCV Register +/* BANK 3 */ +/* this is NOT on SMC9192 */ +#define ERCV_REG(lp) SMC_REG(lp, 0x000C, 3) +#define ERCV_RCV_DISCRD 0x0080 // When 1 discards a packet being received +#define ERCV_THRESHOLD 0x001F // ERCV Threshold Mask + + +// External Register +/* BANK 7 */ +#define EXT_REG(lp) SMC_REG(lp, 0x0000, 7) + + +#define CHIP_9192 3 +#define CHIP_9194 4 +#define CHIP_9195 5 +#define CHIP_9196 6 +#define CHIP_91100 7 +#define CHIP_91100FD 8 +#define CHIP_91111FD 9 + +static const char * chip_ids[ 16 ] = { + NULL, NULL, NULL, + /* 3 */ "SMC91C90/91C92", + /* 4 */ "SMC91C94", + /* 5 */ "SMC91C95", + /* 6 */ "SMC91C96", + /* 7 */ "SMC91C100", + /* 8 */ "SMC91C100FD", + /* 9 */ "SMC91C11xFD", + NULL, NULL, NULL, + NULL, NULL, NULL}; + + +/* + . Receive status bits +*/ +#define RS_ALGNERR 0x8000 +#define RS_BRODCAST 0x4000 +#define RS_BADCRC 0x2000 +#define RS_ODDFRAME 0x1000 +#define RS_TOOLONG 0x0800 +#define RS_TOOSHORT 0x0400 +#define RS_MULTICAST 0x0001 +#define RS_ERRORS (RS_ALGNERR | RS_BADCRC | RS_TOOLONG | RS_TOOSHORT) + + +/* + * PHY IDs + * LAN83C183 == LAN91C111 Internal PHY + */ +#define PHY_LAN83C183 0x0016f840 +#define PHY_LAN83C180 0x02821c50 + +/* + * PHY Register Addresses (LAN91C111 Internal PHY) + * + * Generic PHY registers can be found in + * + * These phy registers are specific to our on-board phy. + */ + +// PHY Configuration Register 1 +#define PHY_CFG1_REG 0x10 +#define PHY_CFG1_LNKDIS 0x8000 // 1=Rx Link Detect Function disabled +#define PHY_CFG1_XMTDIS 0x4000 // 1=TP Transmitter Disabled +#define PHY_CFG1_XMTPDN 0x2000 // 1=TP Transmitter Powered Down +#define PHY_CFG1_BYPSCR 0x0400 // 1=Bypass scrambler/descrambler +#define PHY_CFG1_UNSCDS 0x0200 // 1=Unscramble Idle Reception Disable +#define PHY_CFG1_EQLZR 0x0100 // 1=Rx Equalizer Disabled +#define PHY_CFG1_CABLE 0x0080 // 1=STP(150ohm), 0=UTP(100ohm) +#define PHY_CFG1_RLVL0 0x0040 // 1=Rx Squelch level reduced by 4.5db +#define PHY_CFG1_TLVL_SHIFT 2 // Transmit Output Level Adjust +#define PHY_CFG1_TLVL_MASK 0x003C +#define PHY_CFG1_TRF_MASK 0x0003 // Transmitter Rise/Fall time + + +// PHY Configuration Register 2 +#define PHY_CFG2_REG 0x11 +#define PHY_CFG2_APOLDIS 0x0020 // 1=Auto Polarity Correction disabled +#define PHY_CFG2_JABDIS 0x0010 // 1=Jabber disabled +#define PHY_CFG2_MREG 0x0008 // 1=Multiple register access (MII mgt) +#define PHY_CFG2_INTMDIO 0x0004 // 1=Interrupt signaled with MDIO pulseo + +// PHY Status Output (and Interrupt status) Register +#define PHY_INT_REG 0x12 // Status Output (Interrupt Status) +#define PHY_INT_INT 0x8000 // 1=bits have changed since last read +#define PHY_INT_LNKFAIL 0x4000 // 1=Link Not detected +#define PHY_INT_LOSSSYNC 0x2000 // 1=Descrambler has lost sync +#define PHY_INT_CWRD 0x1000 // 1=Invalid 4B5B code detected on rx +#define PHY_INT_SSD 0x0800 // 1=No Start Of Stream detected on rx +#define PHY_INT_ESD 0x0400 // 1=No End Of Stream detected on rx +#define PHY_INT_RPOL 0x0200 // 1=Reverse Polarity detected +#define PHY_INT_JAB 0x0100 // 1=Jabber detected +#define PHY_INT_SPDDET 0x0080 // 1=100Base-TX mode, 0=10Base-T mode +#define PHY_INT_DPLXDET 0x0040 // 1=Device in Full Duplex + +// PHY Interrupt/Status Mask Register +#define PHY_MASK_REG 0x13 // Interrupt Mask +// Uses the same bit definitions as PHY_INT_REG + + +/* + * SMC91C96 ethernet config and status registers. + * These are in the "attribute" space. + */ +#define ECOR 0x8000 +#define ECOR_RESET 0x80 +#define ECOR_LEVEL_IRQ 0x40 +#define ECOR_WR_ATTRIB 0x04 +#define ECOR_ENABLE 0x01 + +#define ECSR 0x8002 +#define ECSR_IOIS8 0x20 +#define ECSR_PWRDWN 0x04 +#define ECSR_INT 0x02 + +#define ATTRIB_SIZE ((64*1024) << SMC_IO_SHIFT) + + +/* + * Macros to abstract register access according to the data bus + * capabilities. Please use those and not the in/out primitives. + * Note: the following macros do *not* select the bank -- this must + * be done separately as needed in the main code. The SMC_REG() macro + * only uses the bank argument for debugging purposes (when enabled). + * + * Note: despite inline functions being safer, everything leading to this + * should preferably be macros to let BUG() display the line number in + * the core source code since we're interested in the top call site + * not in any inline function location. + */ + +#if SMC_DEBUG > 0 +#define SMC_REG(lp, reg, bank) \ + ({ \ + int __b = SMC_CURRENT_BANK(lp); \ + if (unlikely((__b & ~0xf0) != (0x3300 | bank))) { \ + printk( "%s: bank reg screwed (0x%04x)\n", \ + CARDNAME, __b ); \ + BUG(); \ + } \ + reg<> 8)) + +#define SMC_GET_TXFIFO(lp) \ + (SMC_8BIT(lp) ? (SMC_inb(ioaddr, TXFIFO_REG(lp))) \ + : (SMC_inw(ioaddr, TXFIFO_REG(lp)) & 0xFF)) + +#define SMC_GET_RXFIFO(lp) \ + (SMC_8BIT(lp) ? (SMC_inb(ioaddr, RXFIFO_REG(lp))) \ + : (SMC_inw(ioaddr, TXFIFO_REG(lp)) >> 8)) + +#define SMC_GET_INT(lp) \ + (SMC_8BIT(lp) ? (SMC_inb(ioaddr, INT_REG(lp))) \ + : (SMC_inw(ioaddr, INT_REG(lp)) & 0xFF)) + +#define SMC_ACK_INT(lp, x) \ + do { \ + if (SMC_8BIT(lp)) \ + SMC_outb(x, ioaddr, INT_REG(lp)); \ + else { \ + unsigned long __flags; \ + int __mask; \ + local_irq_save(__flags); \ + __mask = SMC_inw(ioaddr, INT_REG(lp)) & ~0xff; \ + SMC_outw(__mask | (x), ioaddr, INT_REG(lp)); \ + local_irq_restore(__flags); \ + } \ + } while (0) + +#define SMC_GET_INT_MASK(lp) \ + (SMC_8BIT(lp) ? (SMC_inb(ioaddr, IM_REG(lp))) \ + : (SMC_inw(ioaddr, INT_REG(lp)) >> 8)) + +#define SMC_SET_INT_MASK(lp, x) \ + do { \ + if (SMC_8BIT(lp)) \ + SMC_outb(x, ioaddr, IM_REG(lp)); \ + else \ + SMC_outw((x) << 8, ioaddr, INT_REG(lp)); \ + } while (0) + +#define SMC_CURRENT_BANK(lp) SMC_inw(ioaddr, BANK_SELECT) + +#define SMC_SELECT_BANK(lp, x) \ + do { \ + if (SMC_MUST_ALIGN_WRITE(lp)) \ + SMC_outl((x)<<16, ioaddr, 12<> 8; \ + __v = SMC_inw(ioaddr, ADDR1_REG(lp)); \ + addr[2] = __v; addr[3] = __v >> 8; \ + __v = SMC_inw(ioaddr, ADDR2_REG(lp)); \ + addr[4] = __v; addr[5] = __v >> 8; \ + } while (0) +#endif + +#define SMC_SET_MAC_ADDR(lp, addr) \ + do { \ + SMC_outw(addr[0]|(addr[1] << 8), ioaddr, ADDR0_REG(lp)); \ + SMC_outw(addr[2]|(addr[3] << 8), ioaddr, ADDR1_REG(lp)); \ + SMC_outw(addr[4]|(addr[5] << 8), ioaddr, ADDR2_REG(lp)); \ + } while (0) + +#define SMC_SET_MCAST(lp, x) \ + do { \ + const unsigned char *mt = (x); \ + SMC_outw(mt[0] | (mt[1] << 8), ioaddr, MCAST_REG1(lp)); \ + SMC_outw(mt[2] | (mt[3] << 8), ioaddr, MCAST_REG2(lp)); \ + SMC_outw(mt[4] | (mt[5] << 8), ioaddr, MCAST_REG3(lp)); \ + SMC_outw(mt[6] | (mt[7] << 8), ioaddr, MCAST_REG4(lp)); \ + } while (0) + +#define SMC_PUT_PKT_HDR(lp, status, length) \ + do { \ + if (SMC_32BIT(lp)) \ + SMC_outl((status) | (length)<<16, ioaddr, \ + DATA_REG(lp)); \ + else { \ + SMC_outw(status, ioaddr, DATA_REG(lp)); \ + SMC_outw(length, ioaddr, DATA_REG(lp)); \ + } \ + } while (0) + +#define SMC_GET_PKT_HDR(lp, status, length) \ + do { \ + if (SMC_32BIT(lp)) { \ + unsigned int __val = SMC_inl(ioaddr, DATA_REG(lp)); \ + (status) = __val & 0xffff; \ + (length) = __val >> 16; \ + } else { \ + (status) = SMC_inw(ioaddr, DATA_REG(lp)); \ + (length) = SMC_inw(ioaddr, DATA_REG(lp)); \ + } \ + } while (0) + +#define SMC_PUSH_DATA(lp, p, l) \ + do { \ + if (SMC_32BIT(lp)) { \ + void *__ptr = (p); \ + int __len = (l); \ + void __iomem *__ioaddr = ioaddr; \ + if (__len >= 2 && (unsigned long)__ptr & 2) { \ + __len -= 2; \ + SMC_outw(*(u16 *)__ptr, ioaddr, \ + DATA_REG(lp)); \ + __ptr += 2; \ + } \ + if (SMC_CAN_USE_DATACS && lp->datacs) \ + __ioaddr = lp->datacs; \ + SMC_outsl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \ + if (__len & 2) { \ + __ptr += (__len & ~3); \ + SMC_outw(*((u16 *)__ptr), ioaddr, \ + DATA_REG(lp)); \ + } \ + } else if (SMC_16BIT(lp)) \ + SMC_outsw(ioaddr, DATA_REG(lp), p, (l) >> 1); \ + else if (SMC_8BIT(lp)) \ + SMC_outsb(ioaddr, DATA_REG(lp), p, l); \ + } while (0) + +#define SMC_PULL_DATA(lp, p, l) \ + do { \ + if (SMC_32BIT(lp)) { \ + void *__ptr = (p); \ + int __len = (l); \ + void __iomem *__ioaddr = ioaddr; \ + if ((unsigned long)__ptr & 2) { \ + /* \ + * We want 32bit alignment here. \ + * Since some buses perform a full \ + * 32bit fetch even for 16bit data \ + * we can't use SMC_inw() here. \ + * Back both source (on-chip) and \ + * destination pointers of 2 bytes. \ + * This is possible since the call to \ + * SMC_GET_PKT_HDR() already advanced \ + * the source pointer of 4 bytes, and \ + * the skb_reserve(skb, 2) advanced \ + * the destination pointer of 2 bytes. \ + */ \ + __ptr -= 2; \ + __len += 2; \ + SMC_SET_PTR(lp, \ + 2|PTR_READ|PTR_RCV|PTR_AUTOINC); \ + } \ + if (SMC_CAN_USE_DATACS && lp->datacs) \ + __ioaddr = lp->datacs; \ + __len += 2; \ + SMC_insl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \ + } else if (SMC_16BIT(lp)) \ + SMC_insw(ioaddr, DATA_REG(lp), p, (l) >> 1); \ + else if (SMC_8BIT(lp)) \ + SMC_insb(ioaddr, DATA_REG(lp), p, l); \ + } while (0) + +#endif /* _SMC91X_H_ */ diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c new file mode 100644 index 0000000..75c08a5 --- /dev/null +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -0,0 +1,2404 @@ +/*************************************************************************** + * + * Copyright (C) 2004-2008 SMSC + * Copyright (C) 2005-2008 ARM + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + *************************************************************************** + * Rewritten, heavily based on smsc911x simple driver by SMSC. + * Partly uses io macros from smc91x.c by Nicolas Pitre + * + * Supported devices: + * LAN9115, LAN9116, LAN9117, LAN9118 + * LAN9215, LAN9216, LAN9217, LAN9218 + * LAN9210, LAN9211 + * LAN9220, LAN9221 + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "smsc911x.h" + +#define SMSC_CHIPNAME "smsc911x" +#define SMSC_MDIONAME "smsc911x-mdio" +#define SMSC_DRV_VERSION "2008-10-21" + +MODULE_LICENSE("GPL"); +MODULE_VERSION(SMSC_DRV_VERSION); +MODULE_ALIAS("platform:smsc911x"); + +#if USE_DEBUG > 0 +static int debug = 16; +#else +static int debug = 3; +#endif + +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +struct smsc911x_data; + +struct smsc911x_ops { + u32 (*reg_read)(struct smsc911x_data *pdata, u32 reg); + void (*reg_write)(struct smsc911x_data *pdata, u32 reg, u32 val); + void (*rx_readfifo)(struct smsc911x_data *pdata, + unsigned int *buf, unsigned int wordcount); + void (*tx_writefifo)(struct smsc911x_data *pdata, + unsigned int *buf, unsigned int wordcount); +}; + +struct smsc911x_data { + void __iomem *ioaddr; + + unsigned int idrev; + + /* used to decide which workarounds apply */ + unsigned int generation; + + /* device configuration (copied from platform_data during probe) */ + struct smsc911x_platform_config config; + + /* This needs to be acquired before calling any of below: + * smsc911x_mac_read(), smsc911x_mac_write() + */ + spinlock_t mac_lock; + + /* spinlock to ensure register accesses are serialised */ + spinlock_t dev_lock; + + struct phy_device *phy_dev; + struct mii_bus *mii_bus; + int phy_irq[PHY_MAX_ADDR]; + unsigned int using_extphy; + int last_duplex; + int last_carrier; + + u32 msg_enable; + unsigned int gpio_setting; + unsigned int gpio_orig_setting; + struct net_device *dev; + struct napi_struct napi; + + unsigned int software_irq_signal; + +#ifdef USE_PHY_WORK_AROUND +#define MIN_PACKET_SIZE (64) + char loopback_tx_pkt[MIN_PACKET_SIZE]; + char loopback_rx_pkt[MIN_PACKET_SIZE]; + unsigned int resetcount; +#endif + + /* Members for Multicast filter workaround */ + unsigned int multicast_update_pending; + unsigned int set_bits_mask; + unsigned int clear_bits_mask; + unsigned int hashhi; + unsigned int hashlo; + + /* register access functions */ + const struct smsc911x_ops *ops; +}; + +/* Easy access to information */ +#define __smsc_shift(pdata, reg) ((reg) << ((pdata)->config.shift)) + +static inline u32 __smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg) +{ + if (pdata->config.flags & SMSC911X_USE_32BIT) + return readl(pdata->ioaddr + reg); + + if (pdata->config.flags & SMSC911X_USE_16BIT) + return ((readw(pdata->ioaddr + reg) & 0xFFFF) | + ((readw(pdata->ioaddr + reg + 2) & 0xFFFF) << 16)); + + BUG(); + return 0; +} + +static inline u32 +__smsc911x_reg_read_shift(struct smsc911x_data *pdata, u32 reg) +{ + if (pdata->config.flags & SMSC911X_USE_32BIT) + return readl(pdata->ioaddr + __smsc_shift(pdata, reg)); + + if (pdata->config.flags & SMSC911X_USE_16BIT) + return (readw(pdata->ioaddr + + __smsc_shift(pdata, reg)) & 0xFFFF) | + ((readw(pdata->ioaddr + + __smsc_shift(pdata, reg + 2)) & 0xFFFF) << 16); + + BUG(); + return 0; +} + +static inline u32 smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg) +{ + u32 data; + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + data = pdata->ops->reg_read(pdata, reg); + spin_unlock_irqrestore(&pdata->dev_lock, flags); + + return data; +} + +static inline void __smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg, + u32 val) +{ + if (pdata->config.flags & SMSC911X_USE_32BIT) { + writel(val, pdata->ioaddr + reg); + return; + } + + if (pdata->config.flags & SMSC911X_USE_16BIT) { + writew(val & 0xFFFF, pdata->ioaddr + reg); + writew((val >> 16) & 0xFFFF, pdata->ioaddr + reg + 2); + return; + } + + BUG(); +} + +static inline void +__smsc911x_reg_write_shift(struct smsc911x_data *pdata, u32 reg, u32 val) +{ + if (pdata->config.flags & SMSC911X_USE_32BIT) { + writel(val, pdata->ioaddr + __smsc_shift(pdata, reg)); + return; + } + + if (pdata->config.flags & SMSC911X_USE_16BIT) { + writew(val & 0xFFFF, + pdata->ioaddr + __smsc_shift(pdata, reg)); + writew((val >> 16) & 0xFFFF, + pdata->ioaddr + __smsc_shift(pdata, reg + 2)); + return; + } + + BUG(); +} + +static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg, + u32 val) +{ + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + pdata->ops->reg_write(pdata, reg, val); + spin_unlock_irqrestore(&pdata->dev_lock, flags); +} + +/* Writes a packet to the TX_DATA_FIFO */ +static inline void +smsc911x_tx_writefifo(struct smsc911x_data *pdata, unsigned int *buf, + unsigned int wordcount) +{ + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + + if (pdata->config.flags & SMSC911X_SWAP_FIFO) { + while (wordcount--) + __smsc911x_reg_write(pdata, TX_DATA_FIFO, + swab32(*buf++)); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_32BIT) { + writesl(pdata->ioaddr + TX_DATA_FIFO, buf, wordcount); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_16BIT) { + while (wordcount--) + __smsc911x_reg_write(pdata, TX_DATA_FIFO, *buf++); + goto out; + } + + BUG(); +out: + spin_unlock_irqrestore(&pdata->dev_lock, flags); +} + +/* Writes a packet to the TX_DATA_FIFO - shifted version */ +static inline void +smsc911x_tx_writefifo_shift(struct smsc911x_data *pdata, unsigned int *buf, + unsigned int wordcount) +{ + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + + if (pdata->config.flags & SMSC911X_SWAP_FIFO) { + while (wordcount--) + __smsc911x_reg_write_shift(pdata, TX_DATA_FIFO, + swab32(*buf++)); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_32BIT) { + writesl(pdata->ioaddr + __smsc_shift(pdata, + TX_DATA_FIFO), buf, wordcount); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_16BIT) { + while (wordcount--) + __smsc911x_reg_write_shift(pdata, + TX_DATA_FIFO, *buf++); + goto out; + } + + BUG(); +out: + spin_unlock_irqrestore(&pdata->dev_lock, flags); +} + +/* Reads a packet out of the RX_DATA_FIFO */ +static inline void +smsc911x_rx_readfifo(struct smsc911x_data *pdata, unsigned int *buf, + unsigned int wordcount) +{ + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + + if (pdata->config.flags & SMSC911X_SWAP_FIFO) { + while (wordcount--) + *buf++ = swab32(__smsc911x_reg_read(pdata, + RX_DATA_FIFO)); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_32BIT) { + readsl(pdata->ioaddr + RX_DATA_FIFO, buf, wordcount); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_16BIT) { + while (wordcount--) + *buf++ = __smsc911x_reg_read(pdata, RX_DATA_FIFO); + goto out; + } + + BUG(); +out: + spin_unlock_irqrestore(&pdata->dev_lock, flags); +} + +/* Reads a packet out of the RX_DATA_FIFO - shifted version */ +static inline void +smsc911x_rx_readfifo_shift(struct smsc911x_data *pdata, unsigned int *buf, + unsigned int wordcount) +{ + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + + if (pdata->config.flags & SMSC911X_SWAP_FIFO) { + while (wordcount--) + *buf++ = swab32(__smsc911x_reg_read_shift(pdata, + RX_DATA_FIFO)); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_32BIT) { + readsl(pdata->ioaddr + __smsc_shift(pdata, + RX_DATA_FIFO), buf, wordcount); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_16BIT) { + while (wordcount--) + *buf++ = __smsc911x_reg_read_shift(pdata, + RX_DATA_FIFO); + goto out; + } + + BUG(); +out: + spin_unlock_irqrestore(&pdata->dev_lock, flags); +} + +/* waits for MAC not busy, with timeout. Only called by smsc911x_mac_read + * and smsc911x_mac_write, so assumes mac_lock is held */ +static int smsc911x_mac_complete(struct smsc911x_data *pdata) +{ + int i; + u32 val; + + SMSC_ASSERT_MAC_LOCK(pdata); + + for (i = 0; i < 40; i++) { + val = smsc911x_reg_read(pdata, MAC_CSR_CMD); + if (!(val & MAC_CSR_CMD_CSR_BUSY_)) + return 0; + } + SMSC_WARN(pdata, hw, "Timed out waiting for MAC not BUSY. " + "MAC_CSR_CMD: 0x%08X", val); + return -EIO; +} + +/* Fetches a MAC register value. Assumes mac_lock is acquired */ +static u32 smsc911x_mac_read(struct smsc911x_data *pdata, unsigned int offset) +{ + unsigned int temp; + + SMSC_ASSERT_MAC_LOCK(pdata); + + temp = smsc911x_reg_read(pdata, MAC_CSR_CMD); + if (unlikely(temp & MAC_CSR_CMD_CSR_BUSY_)) { + SMSC_WARN(pdata, hw, "MAC busy at entry"); + return 0xFFFFFFFF; + } + + /* Send the MAC cmd */ + smsc911x_reg_write(pdata, MAC_CSR_CMD, ((offset & 0xFF) | + MAC_CSR_CMD_CSR_BUSY_ | MAC_CSR_CMD_R_NOT_W_)); + + /* Workaround for hardware read-after-write restriction */ + temp = smsc911x_reg_read(pdata, BYTE_TEST); + + /* Wait for the read to complete */ + if (likely(smsc911x_mac_complete(pdata) == 0)) + return smsc911x_reg_read(pdata, MAC_CSR_DATA); + + SMSC_WARN(pdata, hw, "MAC busy after read"); + return 0xFFFFFFFF; +} + +/* Set a mac register, mac_lock must be acquired before calling */ +static void smsc911x_mac_write(struct smsc911x_data *pdata, + unsigned int offset, u32 val) +{ + unsigned int temp; + + SMSC_ASSERT_MAC_LOCK(pdata); + + temp = smsc911x_reg_read(pdata, MAC_CSR_CMD); + if (unlikely(temp & MAC_CSR_CMD_CSR_BUSY_)) { + SMSC_WARN(pdata, hw, + "smsc911x_mac_write failed, MAC busy at entry"); + return; + } + + /* Send data to write */ + smsc911x_reg_write(pdata, MAC_CSR_DATA, val); + + /* Write the actual data */ + smsc911x_reg_write(pdata, MAC_CSR_CMD, ((offset & 0xFF) | + MAC_CSR_CMD_CSR_BUSY_)); + + /* Workaround for hardware read-after-write restriction */ + temp = smsc911x_reg_read(pdata, BYTE_TEST); + + /* Wait for the write to complete */ + if (likely(smsc911x_mac_complete(pdata) == 0)) + return; + + SMSC_WARN(pdata, hw, "smsc911x_mac_write failed, MAC busy after write"); +} + +/* Get a phy register */ +static int smsc911x_mii_read(struct mii_bus *bus, int phyaddr, int regidx) +{ + struct smsc911x_data *pdata = (struct smsc911x_data *)bus->priv; + unsigned long flags; + unsigned int addr; + int i, reg; + + spin_lock_irqsave(&pdata->mac_lock, flags); + + /* Confirm MII not busy */ + if (unlikely(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) { + SMSC_WARN(pdata, hw, "MII is busy in smsc911x_mii_read???"); + reg = -EIO; + goto out; + } + + /* Set the address, index & direction (read from PHY) */ + addr = ((phyaddr & 0x1F) << 11) | ((regidx & 0x1F) << 6); + smsc911x_mac_write(pdata, MII_ACC, addr); + + /* Wait for read to complete w/ timeout */ + for (i = 0; i < 100; i++) + if (!(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) { + reg = smsc911x_mac_read(pdata, MII_DATA); + goto out; + } + + SMSC_WARN(pdata, hw, "Timed out waiting for MII read to finish"); + reg = -EIO; + +out: + spin_unlock_irqrestore(&pdata->mac_lock, flags); + return reg; +} + +/* Set a phy register */ +static int smsc911x_mii_write(struct mii_bus *bus, int phyaddr, int regidx, + u16 val) +{ + struct smsc911x_data *pdata = (struct smsc911x_data *)bus->priv; + unsigned long flags; + unsigned int addr; + int i, reg; + + spin_lock_irqsave(&pdata->mac_lock, flags); + + /* Confirm MII not busy */ + if (unlikely(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) { + SMSC_WARN(pdata, hw, "MII is busy in smsc911x_mii_write???"); + reg = -EIO; + goto out; + } + + /* Put the data to write in the MAC */ + smsc911x_mac_write(pdata, MII_DATA, val); + + /* Set the address, index & direction (write to PHY) */ + addr = ((phyaddr & 0x1F) << 11) | ((regidx & 0x1F) << 6) | + MII_ACC_MII_WRITE_; + smsc911x_mac_write(pdata, MII_ACC, addr); + + /* Wait for write to complete w/ timeout */ + for (i = 0; i < 100; i++) + if (!(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) { + reg = 0; + goto out; + } + + SMSC_WARN(pdata, hw, "Timed out waiting for MII write to finish"); + reg = -EIO; + +out: + spin_unlock_irqrestore(&pdata->mac_lock, flags); + return reg; +} + +/* Switch to external phy. Assumes tx and rx are stopped. */ +static void smsc911x_phy_enable_external(struct smsc911x_data *pdata) +{ + unsigned int hwcfg = smsc911x_reg_read(pdata, HW_CFG); + + /* Disable phy clocks to the MAC */ + hwcfg &= (~HW_CFG_PHY_CLK_SEL_); + hwcfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_; + smsc911x_reg_write(pdata, HW_CFG, hwcfg); + udelay(10); /* Enough time for clocks to stop */ + + /* Switch to external phy */ + hwcfg |= HW_CFG_EXT_PHY_EN_; + smsc911x_reg_write(pdata, HW_CFG, hwcfg); + + /* Enable phy clocks to the MAC */ + hwcfg &= (~HW_CFG_PHY_CLK_SEL_); + hwcfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_; + smsc911x_reg_write(pdata, HW_CFG, hwcfg); + udelay(10); /* Enough time for clocks to restart */ + + hwcfg |= HW_CFG_SMI_SEL_; + smsc911x_reg_write(pdata, HW_CFG, hwcfg); +} + +/* Autodetects and enables external phy if present on supported chips. + * autodetection can be overridden by specifying SMSC911X_FORCE_INTERNAL_PHY + * or SMSC911X_FORCE_EXTERNAL_PHY in the platform_data flags. */ +static void smsc911x_phy_initialise_external(struct smsc911x_data *pdata) +{ + unsigned int hwcfg = smsc911x_reg_read(pdata, HW_CFG); + + if (pdata->config.flags & SMSC911X_FORCE_INTERNAL_PHY) { + SMSC_TRACE(pdata, hw, "Forcing internal PHY"); + pdata->using_extphy = 0; + } else if (pdata->config.flags & SMSC911X_FORCE_EXTERNAL_PHY) { + SMSC_TRACE(pdata, hw, "Forcing external PHY"); + smsc911x_phy_enable_external(pdata); + pdata->using_extphy = 1; + } else if (hwcfg & HW_CFG_EXT_PHY_DET_) { + SMSC_TRACE(pdata, hw, + "HW_CFG EXT_PHY_DET set, using external PHY"); + smsc911x_phy_enable_external(pdata); + pdata->using_extphy = 1; + } else { + SMSC_TRACE(pdata, hw, + "HW_CFG EXT_PHY_DET clear, using internal PHY"); + pdata->using_extphy = 0; + } +} + +/* Fetches a tx status out of the status fifo */ +static unsigned int smsc911x_tx_get_txstatus(struct smsc911x_data *pdata) +{ + unsigned int result = + smsc911x_reg_read(pdata, TX_FIFO_INF) & TX_FIFO_INF_TSUSED_; + + if (result != 0) + result = smsc911x_reg_read(pdata, TX_STATUS_FIFO); + + return result; +} + +/* Fetches the next rx status */ +static unsigned int smsc911x_rx_get_rxstatus(struct smsc911x_data *pdata) +{ + unsigned int result = + smsc911x_reg_read(pdata, RX_FIFO_INF) & RX_FIFO_INF_RXSUSED_; + + if (result != 0) + result = smsc911x_reg_read(pdata, RX_STATUS_FIFO); + + return result; +} + +#ifdef USE_PHY_WORK_AROUND +static int smsc911x_phy_check_loopbackpkt(struct smsc911x_data *pdata) +{ + unsigned int tries; + u32 wrsz; + u32 rdsz; + ulong bufp; + + for (tries = 0; tries < 10; tries++) { + unsigned int txcmd_a; + unsigned int txcmd_b; + unsigned int status; + unsigned int pktlength; + unsigned int i; + + /* Zero-out rx packet memory */ + memset(pdata->loopback_rx_pkt, 0, MIN_PACKET_SIZE); + + /* Write tx packet to 118 */ + txcmd_a = (u32)((ulong)pdata->loopback_tx_pkt & 0x03) << 16; + txcmd_a |= TX_CMD_A_FIRST_SEG_ | TX_CMD_A_LAST_SEG_; + txcmd_a |= MIN_PACKET_SIZE; + + txcmd_b = MIN_PACKET_SIZE << 16 | MIN_PACKET_SIZE; + + smsc911x_reg_write(pdata, TX_DATA_FIFO, txcmd_a); + smsc911x_reg_write(pdata, TX_DATA_FIFO, txcmd_b); + + bufp = (ulong)pdata->loopback_tx_pkt & (~0x3); + wrsz = MIN_PACKET_SIZE + 3; + wrsz += (u32)((ulong)pdata->loopback_tx_pkt & 0x3); + wrsz >>= 2; + + pdata->ops->tx_writefifo(pdata, (unsigned int *)bufp, wrsz); + + /* Wait till transmit is done */ + i = 60; + do { + udelay(5); + status = smsc911x_tx_get_txstatus(pdata); + } while ((i--) && (!status)); + + if (!status) { + SMSC_WARN(pdata, hw, + "Failed to transmit during loopback test"); + continue; + } + if (status & TX_STS_ES_) { + SMSC_WARN(pdata, hw, + "Transmit encountered errors during loopback test"); + continue; + } + + /* Wait till receive is done */ + i = 60; + do { + udelay(5); + status = smsc911x_rx_get_rxstatus(pdata); + } while ((i--) && (!status)); + + if (!status) { + SMSC_WARN(pdata, hw, + "Failed to receive during loopback test"); + continue; + } + if (status & RX_STS_ES_) { + SMSC_WARN(pdata, hw, + "Receive encountered errors during loopback test"); + continue; + } + + pktlength = ((status & 0x3FFF0000UL) >> 16); + bufp = (ulong)pdata->loopback_rx_pkt; + rdsz = pktlength + 3; + rdsz += (u32)((ulong)pdata->loopback_rx_pkt & 0x3); + rdsz >>= 2; + + pdata->ops->rx_readfifo(pdata, (unsigned int *)bufp, rdsz); + + if (pktlength != (MIN_PACKET_SIZE + 4)) { + SMSC_WARN(pdata, hw, "Unexpected packet size " + "during loop back test, size=%d, will retry", + pktlength); + } else { + unsigned int j; + int mismatch = 0; + for (j = 0; j < MIN_PACKET_SIZE; j++) { + if (pdata->loopback_tx_pkt[j] + != pdata->loopback_rx_pkt[j]) { + mismatch = 1; + break; + } + } + if (!mismatch) { + SMSC_TRACE(pdata, hw, "Successfully verified " + "loopback packet"); + return 0; + } else { + SMSC_WARN(pdata, hw, "Data mismatch " + "during loop back test, will retry"); + } + } + } + + return -EIO; +} + +static int smsc911x_phy_reset(struct smsc911x_data *pdata) +{ + struct phy_device *phy_dev = pdata->phy_dev; + unsigned int temp; + unsigned int i = 100000; + + BUG_ON(!phy_dev); + BUG_ON(!phy_dev->bus); + + SMSC_TRACE(pdata, hw, "Performing PHY BCR Reset"); + smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, BMCR_RESET); + do { + msleep(1); + temp = smsc911x_mii_read(phy_dev->bus, phy_dev->addr, + MII_BMCR); + } while ((i--) && (temp & BMCR_RESET)); + + if (temp & BMCR_RESET) { + SMSC_WARN(pdata, hw, "PHY reset failed to complete"); + return -EIO; + } + /* Extra delay required because the phy may not be completed with + * its reset when BMCR_RESET is cleared. Specs say 256 uS is + * enough delay but using 1ms here to be safe */ + msleep(1); + + return 0; +} + +static int smsc911x_phy_loopbacktest(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + struct phy_device *phy_dev = pdata->phy_dev; + int result = -EIO; + unsigned int i, val; + unsigned long flags; + + /* Initialise tx packet using broadcast destination address */ + memset(pdata->loopback_tx_pkt, 0xff, ETH_ALEN); + + /* Use incrementing source address */ + for (i = 6; i < 12; i++) + pdata->loopback_tx_pkt[i] = (char)i; + + /* Set length type field */ + pdata->loopback_tx_pkt[12] = 0x00; + pdata->loopback_tx_pkt[13] = 0x00; + + for (i = 14; i < MIN_PACKET_SIZE; i++) + pdata->loopback_tx_pkt[i] = (char)i; + + val = smsc911x_reg_read(pdata, HW_CFG); + val &= HW_CFG_TX_FIF_SZ_; + val |= HW_CFG_SF_; + smsc911x_reg_write(pdata, HW_CFG, val); + + smsc911x_reg_write(pdata, TX_CFG, TX_CFG_TX_ON_); + smsc911x_reg_write(pdata, RX_CFG, + (u32)((ulong)pdata->loopback_rx_pkt & 0x03) << 8); + + for (i = 0; i < 10; i++) { + /* Set PHY to 10/FD, no ANEG, and loopback mode */ + smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, + BMCR_LOOPBACK | BMCR_FULLDPLX); + + /* Enable MAC tx/rx, FD */ + spin_lock_irqsave(&pdata->mac_lock, flags); + smsc911x_mac_write(pdata, MAC_CR, MAC_CR_FDPX_ + | MAC_CR_TXEN_ | MAC_CR_RXEN_); + spin_unlock_irqrestore(&pdata->mac_lock, flags); + + if (smsc911x_phy_check_loopbackpkt(pdata) == 0) { + result = 0; + break; + } + pdata->resetcount++; + + /* Disable MAC rx */ + spin_lock_irqsave(&pdata->mac_lock, flags); + smsc911x_mac_write(pdata, MAC_CR, 0); + spin_unlock_irqrestore(&pdata->mac_lock, flags); + + smsc911x_phy_reset(pdata); + } + + /* Disable MAC */ + spin_lock_irqsave(&pdata->mac_lock, flags); + smsc911x_mac_write(pdata, MAC_CR, 0); + spin_unlock_irqrestore(&pdata->mac_lock, flags); + + /* Cancel PHY loopback mode */ + smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, 0); + + smsc911x_reg_write(pdata, TX_CFG, 0); + smsc911x_reg_write(pdata, RX_CFG, 0); + + return result; +} +#endif /* USE_PHY_WORK_AROUND */ + +static void smsc911x_phy_update_flowcontrol(struct smsc911x_data *pdata) +{ + struct phy_device *phy_dev = pdata->phy_dev; + u32 afc = smsc911x_reg_read(pdata, AFC_CFG); + u32 flow; + unsigned long flags; + + if (phy_dev->duplex == DUPLEX_FULL) { + u16 lcladv = phy_read(phy_dev, MII_ADVERTISE); + u16 rmtadv = phy_read(phy_dev, MII_LPA); + u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); + + if (cap & FLOW_CTRL_RX) + flow = 0xFFFF0002; + else + flow = 0; + + if (cap & FLOW_CTRL_TX) + afc |= 0xF; + else + afc &= ~0xF; + + SMSC_TRACE(pdata, hw, "rx pause %s, tx pause %s", + (cap & FLOW_CTRL_RX ? "enabled" : "disabled"), + (cap & FLOW_CTRL_TX ? "enabled" : "disabled")); + } else { + SMSC_TRACE(pdata, hw, "half duplex"); + flow = 0; + afc |= 0xF; + } + + spin_lock_irqsave(&pdata->mac_lock, flags); + smsc911x_mac_write(pdata, FLOW, flow); + spin_unlock_irqrestore(&pdata->mac_lock, flags); + + smsc911x_reg_write(pdata, AFC_CFG, afc); +} + +/* Update link mode if anything has changed. Called periodically when the + * PHY is in polling mode, even if nothing has changed. */ +static void smsc911x_phy_adjust_link(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + struct phy_device *phy_dev = pdata->phy_dev; + unsigned long flags; + int carrier; + + if (phy_dev->duplex != pdata->last_duplex) { + unsigned int mac_cr; + SMSC_TRACE(pdata, hw, "duplex state has changed"); + + spin_lock_irqsave(&pdata->mac_lock, flags); + mac_cr = smsc911x_mac_read(pdata, MAC_CR); + if (phy_dev->duplex) { + SMSC_TRACE(pdata, hw, + "configuring for full duplex mode"); + mac_cr |= MAC_CR_FDPX_; + } else { + SMSC_TRACE(pdata, hw, + "configuring for half duplex mode"); + mac_cr &= ~MAC_CR_FDPX_; + } + smsc911x_mac_write(pdata, MAC_CR, mac_cr); + spin_unlock_irqrestore(&pdata->mac_lock, flags); + + smsc911x_phy_update_flowcontrol(pdata); + pdata->last_duplex = phy_dev->duplex; + } + + carrier = netif_carrier_ok(dev); + if (carrier != pdata->last_carrier) { + SMSC_TRACE(pdata, hw, "carrier state has changed"); + if (carrier) { + SMSC_TRACE(pdata, hw, "configuring for carrier OK"); + if ((pdata->gpio_orig_setting & GPIO_CFG_LED1_EN_) && + (!pdata->using_extphy)) { + /* Restore original GPIO configuration */ + pdata->gpio_setting = pdata->gpio_orig_setting; + smsc911x_reg_write(pdata, GPIO_CFG, + pdata->gpio_setting); + } + } else { + SMSC_TRACE(pdata, hw, "configuring for no carrier"); + /* Check global setting that LED1 + * usage is 10/100 indicator */ + pdata->gpio_setting = smsc911x_reg_read(pdata, + GPIO_CFG); + if ((pdata->gpio_setting & GPIO_CFG_LED1_EN_) && + (!pdata->using_extphy)) { + /* Force 10/100 LED off, after saving + * original GPIO configuration */ + pdata->gpio_orig_setting = pdata->gpio_setting; + + pdata->gpio_setting &= ~GPIO_CFG_LED1_EN_; + pdata->gpio_setting |= (GPIO_CFG_GPIOBUF0_ + | GPIO_CFG_GPIODIR0_ + | GPIO_CFG_GPIOD0_); + smsc911x_reg_write(pdata, GPIO_CFG, + pdata->gpio_setting); + } + } + pdata->last_carrier = carrier; + } +} + +static int smsc911x_mii_probe(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + struct phy_device *phydev = NULL; + int ret; + + /* find the first phy */ + phydev = phy_find_first(pdata->mii_bus); + if (!phydev) { + netdev_err(dev, "no PHY found\n"); + return -ENODEV; + } + + SMSC_TRACE(pdata, probe, "PHY: addr %d, phy_id 0x%08X", + phydev->addr, phydev->phy_id); + + ret = phy_connect_direct(dev, phydev, + &smsc911x_phy_adjust_link, 0, + pdata->config.phy_interface); + + if (ret) { + netdev_err(dev, "Could not attach to PHY\n"); + return ret; + } + + netdev_info(dev, + "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", + phydev->drv->name, dev_name(&phydev->dev), phydev->irq); + + /* mask with MAC supported features */ + phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + phydev->advertising = phydev->supported; + + pdata->phy_dev = phydev; + pdata->last_duplex = -1; + pdata->last_carrier = -1; + +#ifdef USE_PHY_WORK_AROUND + if (smsc911x_phy_loopbacktest(dev) < 0) { + SMSC_WARN(pdata, hw, "Failed Loop Back Test"); + return -ENODEV; + } + SMSC_TRACE(pdata, hw, "Passed Loop Back Test"); +#endif /* USE_PHY_WORK_AROUND */ + + SMSC_TRACE(pdata, hw, "phy initialised successfully"); + return 0; +} + +static int __devinit smsc911x_mii_init(struct platform_device *pdev, + struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + int err = -ENXIO, i; + + pdata->mii_bus = mdiobus_alloc(); + if (!pdata->mii_bus) { + err = -ENOMEM; + goto err_out_1; + } + + pdata->mii_bus->name = SMSC_MDIONAME; + snprintf(pdata->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id); + pdata->mii_bus->priv = pdata; + pdata->mii_bus->read = smsc911x_mii_read; + pdata->mii_bus->write = smsc911x_mii_write; + pdata->mii_bus->irq = pdata->phy_irq; + for (i = 0; i < PHY_MAX_ADDR; ++i) + pdata->mii_bus->irq[i] = PHY_POLL; + + pdata->mii_bus->parent = &pdev->dev; + + switch (pdata->idrev & 0xFFFF0000) { + case 0x01170000: + case 0x01150000: + case 0x117A0000: + case 0x115A0000: + /* External PHY supported, try to autodetect */ + smsc911x_phy_initialise_external(pdata); + break; + default: + SMSC_TRACE(pdata, hw, "External PHY is not supported, " + "using internal PHY"); + pdata->using_extphy = 0; + break; + } + + if (!pdata->using_extphy) { + /* Mask all PHYs except ID 1 (internal) */ + pdata->mii_bus->phy_mask = ~(1 << 1); + } + + if (mdiobus_register(pdata->mii_bus)) { + SMSC_WARN(pdata, probe, "Error registering mii bus"); + goto err_out_free_bus_2; + } + + if (smsc911x_mii_probe(dev) < 0) { + SMSC_WARN(pdata, probe, "Error registering mii bus"); + goto err_out_unregister_bus_3; + } + + return 0; + +err_out_unregister_bus_3: + mdiobus_unregister(pdata->mii_bus); +err_out_free_bus_2: + mdiobus_free(pdata->mii_bus); +err_out_1: + return err; +} + +/* Gets the number of tx statuses in the fifo */ +static unsigned int smsc911x_tx_get_txstatcount(struct smsc911x_data *pdata) +{ + return (smsc911x_reg_read(pdata, TX_FIFO_INF) + & TX_FIFO_INF_TSUSED_) >> 16; +} + +/* Reads tx statuses and increments counters where necessary */ +static void smsc911x_tx_update_txcounters(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + unsigned int tx_stat; + + while ((tx_stat = smsc911x_tx_get_txstatus(pdata)) != 0) { + if (unlikely(tx_stat & 0x80000000)) { + /* In this driver the packet tag is used as the packet + * length. Since a packet length can never reach the + * size of 0x8000, this bit is reserved. It is worth + * noting that the "reserved bit" in the warning above + * does not reference a hardware defined reserved bit + * but rather a driver defined one. + */ + SMSC_WARN(pdata, hw, "Packet tag reserved bit is high"); + } else { + if (unlikely(tx_stat & TX_STS_ES_)) { + dev->stats.tx_errors++; + } else { + dev->stats.tx_packets++; + dev->stats.tx_bytes += (tx_stat >> 16); + } + if (unlikely(tx_stat & TX_STS_EXCESS_COL_)) { + dev->stats.collisions += 16; + dev->stats.tx_aborted_errors += 1; + } else { + dev->stats.collisions += + ((tx_stat >> 3) & 0xF); + } + if (unlikely(tx_stat & TX_STS_LOST_CARRIER_)) + dev->stats.tx_carrier_errors += 1; + if (unlikely(tx_stat & TX_STS_LATE_COL_)) { + dev->stats.collisions++; + dev->stats.tx_aborted_errors++; + } + } + } +} + +/* Increments the Rx error counters */ +static void +smsc911x_rx_counterrors(struct net_device *dev, unsigned int rxstat) +{ + int crc_err = 0; + + if (unlikely(rxstat & RX_STS_ES_)) { + dev->stats.rx_errors++; + if (unlikely(rxstat & RX_STS_CRC_ERR_)) { + dev->stats.rx_crc_errors++; + crc_err = 1; + } + } + if (likely(!crc_err)) { + if (unlikely((rxstat & RX_STS_FRAME_TYPE_) && + (rxstat & RX_STS_LENGTH_ERR_))) + dev->stats.rx_length_errors++; + if (rxstat & RX_STS_MCAST_) + dev->stats.multicast++; + } +} + +/* Quickly dumps bad packets */ +static void +smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes) +{ + unsigned int pktwords = (pktbytes + NET_IP_ALIGN + 3) >> 2; + + if (likely(pktwords >= 4)) { + unsigned int timeout = 500; + unsigned int val; + smsc911x_reg_write(pdata, RX_DP_CTRL, RX_DP_CTRL_RX_FFWD_); + do { + udelay(1); + val = smsc911x_reg_read(pdata, RX_DP_CTRL); + } while ((val & RX_DP_CTRL_RX_FFWD_) && --timeout); + + if (unlikely(timeout == 0)) + SMSC_WARN(pdata, hw, "Timed out waiting for " + "RX FFWD to finish, RX_DP_CTRL: 0x%08X", val); + } else { + unsigned int temp; + while (pktwords--) + temp = smsc911x_reg_read(pdata, RX_DATA_FIFO); + } +} + +/* NAPI poll function */ +static int smsc911x_poll(struct napi_struct *napi, int budget) +{ + struct smsc911x_data *pdata = + container_of(napi, struct smsc911x_data, napi); + struct net_device *dev = pdata->dev; + int npackets = 0; + + while (npackets < budget) { + unsigned int pktlength; + unsigned int pktwords; + struct sk_buff *skb; + unsigned int rxstat = smsc911x_rx_get_rxstatus(pdata); + + if (!rxstat) { + unsigned int temp; + /* We processed all packets available. Tell NAPI it can + * stop polling then re-enable rx interrupts */ + smsc911x_reg_write(pdata, INT_STS, INT_STS_RSFL_); + napi_complete(napi); + temp = smsc911x_reg_read(pdata, INT_EN); + temp |= INT_EN_RSFL_EN_; + smsc911x_reg_write(pdata, INT_EN, temp); + break; + } + + /* Count packet for NAPI scheduling, even if it has an error. + * Error packets still require cycles to discard */ + npackets++; + + pktlength = ((rxstat & 0x3FFF0000) >> 16); + pktwords = (pktlength + NET_IP_ALIGN + 3) >> 2; + smsc911x_rx_counterrors(dev, rxstat); + + if (unlikely(rxstat & RX_STS_ES_)) { + SMSC_WARN(pdata, rx_err, + "Discarding packet with error bit set"); + /* Packet has an error, discard it and continue with + * the next */ + smsc911x_rx_fastforward(pdata, pktwords); + dev->stats.rx_dropped++; + continue; + } + + skb = netdev_alloc_skb(dev, pktlength + NET_IP_ALIGN); + if (unlikely(!skb)) { + SMSC_WARN(pdata, rx_err, + "Unable to allocate skb for rx packet"); + /* Drop the packet and stop this polling iteration */ + smsc911x_rx_fastforward(pdata, pktwords); + dev->stats.rx_dropped++; + break; + } + + skb->data = skb->head; + skb_reset_tail_pointer(skb); + + /* Align IP on 16B boundary */ + skb_reserve(skb, NET_IP_ALIGN); + skb_put(skb, pktlength - 4); + pdata->ops->rx_readfifo(pdata, + (unsigned int *)skb->head, pktwords); + skb->protocol = eth_type_trans(skb, dev); + skb_checksum_none_assert(skb); + netif_receive_skb(skb); + + /* Update counters */ + dev->stats.rx_packets++; + dev->stats.rx_bytes += (pktlength - 4); + } + + /* Return total received packets */ + return npackets; +} + +/* Returns hash bit number for given MAC address + * Example: + * 01 00 5E 00 00 01 -> returns bit number 31 */ +static unsigned int smsc911x_hash(char addr[ETH_ALEN]) +{ + return (ether_crc(ETH_ALEN, addr) >> 26) & 0x3f; +} + +static void smsc911x_rx_multicast_update(struct smsc911x_data *pdata) +{ + /* Performs the multicast & mac_cr update. This is called when + * safe on the current hardware, and with the mac_lock held */ + unsigned int mac_cr; + + SMSC_ASSERT_MAC_LOCK(pdata); + + mac_cr = smsc911x_mac_read(pdata, MAC_CR); + mac_cr |= pdata->set_bits_mask; + mac_cr &= ~(pdata->clear_bits_mask); + smsc911x_mac_write(pdata, MAC_CR, mac_cr); + smsc911x_mac_write(pdata, HASHH, pdata->hashhi); + smsc911x_mac_write(pdata, HASHL, pdata->hashlo); + SMSC_TRACE(pdata, hw, "maccr 0x%08X, HASHH 0x%08X, HASHL 0x%08X", + mac_cr, pdata->hashhi, pdata->hashlo); +} + +static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata) +{ + unsigned int mac_cr; + + /* This function is only called for older LAN911x devices + * (revA or revB), where MAC_CR, HASHH and HASHL should not + * be modified during Rx - newer devices immediately update the + * registers. + * + * This is called from interrupt context */ + + spin_lock(&pdata->mac_lock); + + /* Check Rx has stopped */ + if (smsc911x_mac_read(pdata, MAC_CR) & MAC_CR_RXEN_) + SMSC_WARN(pdata, drv, "Rx not stopped"); + + /* Perform the update - safe to do now Rx has stopped */ + smsc911x_rx_multicast_update(pdata); + + /* Re-enable Rx */ + mac_cr = smsc911x_mac_read(pdata, MAC_CR); + mac_cr |= MAC_CR_RXEN_; + smsc911x_mac_write(pdata, MAC_CR, mac_cr); + + pdata->multicast_update_pending = 0; + + spin_unlock(&pdata->mac_lock); +} + +static int smsc911x_soft_reset(struct smsc911x_data *pdata) +{ + unsigned int timeout; + unsigned int temp; + + /* Reset the LAN911x */ + smsc911x_reg_write(pdata, HW_CFG, HW_CFG_SRST_); + timeout = 10; + do { + udelay(10); + temp = smsc911x_reg_read(pdata, HW_CFG); + } while ((--timeout) && (temp & HW_CFG_SRST_)); + + if (unlikely(temp & HW_CFG_SRST_)) { + SMSC_WARN(pdata, drv, "Failed to complete reset"); + return -EIO; + } + return 0; +} + +/* Sets the device MAC address to dev_addr, called with mac_lock held */ +static void +smsc911x_set_hw_mac_address(struct smsc911x_data *pdata, u8 dev_addr[6]) +{ + u32 mac_high16 = (dev_addr[5] << 8) | dev_addr[4]; + u32 mac_low32 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | + (dev_addr[1] << 8) | dev_addr[0]; + + SMSC_ASSERT_MAC_LOCK(pdata); + + smsc911x_mac_write(pdata, ADDRH, mac_high16); + smsc911x_mac_write(pdata, ADDRL, mac_low32); +} + +static int smsc911x_open(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + unsigned int timeout; + unsigned int temp; + unsigned int intcfg; + + /* if the phy is not yet registered, retry later*/ + if (!pdata->phy_dev) { + SMSC_WARN(pdata, hw, "phy_dev is NULL"); + return -EAGAIN; + } + + if (!is_valid_ether_addr(dev->dev_addr)) { + SMSC_WARN(pdata, hw, "dev_addr is not a valid MAC address"); + return -EADDRNOTAVAIL; + } + + /* Reset the LAN911x */ + if (smsc911x_soft_reset(pdata)) { + SMSC_WARN(pdata, hw, "soft reset failed"); + return -EIO; + } + + smsc911x_reg_write(pdata, HW_CFG, 0x00050000); + smsc911x_reg_write(pdata, AFC_CFG, 0x006E3740); + + /* Increase the legal frame size of VLAN tagged frames to 1522 bytes */ + spin_lock_irq(&pdata->mac_lock); + smsc911x_mac_write(pdata, VLAN1, ETH_P_8021Q); + spin_unlock_irq(&pdata->mac_lock); + + /* Make sure EEPROM has finished loading before setting GPIO_CFG */ + timeout = 50; + while ((smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) && + --timeout) { + udelay(10); + } + + if (unlikely(timeout == 0)) + SMSC_WARN(pdata, ifup, + "Timed out waiting for EEPROM busy bit to clear"); + + smsc911x_reg_write(pdata, GPIO_CFG, 0x70070000); + + /* The soft reset above cleared the device's MAC address, + * restore it from local copy (set in probe) */ + spin_lock_irq(&pdata->mac_lock); + smsc911x_set_hw_mac_address(pdata, dev->dev_addr); + spin_unlock_irq(&pdata->mac_lock); + + /* Initialise irqs, but leave all sources disabled */ + smsc911x_reg_write(pdata, INT_EN, 0); + smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF); + + /* Set interrupt deassertion to 100uS */ + intcfg = ((10 << 24) | INT_CFG_IRQ_EN_); + + if (pdata->config.irq_polarity) { + SMSC_TRACE(pdata, ifup, "irq polarity: active high"); + intcfg |= INT_CFG_IRQ_POL_; + } else { + SMSC_TRACE(pdata, ifup, "irq polarity: active low"); + } + + if (pdata->config.irq_type) { + SMSC_TRACE(pdata, ifup, "irq type: push-pull"); + intcfg |= INT_CFG_IRQ_TYPE_; + } else { + SMSC_TRACE(pdata, ifup, "irq type: open drain"); + } + + smsc911x_reg_write(pdata, INT_CFG, intcfg); + + SMSC_TRACE(pdata, ifup, "Testing irq handler using IRQ %d", dev->irq); + pdata->software_irq_signal = 0; + smp_wmb(); + + temp = smsc911x_reg_read(pdata, INT_EN); + temp |= INT_EN_SW_INT_EN_; + smsc911x_reg_write(pdata, INT_EN, temp); + + timeout = 1000; + while (timeout--) { + if (pdata->software_irq_signal) + break; + msleep(1); + } + + if (!pdata->software_irq_signal) { + netdev_warn(dev, "ISR failed signaling test (IRQ %d)\n", + dev->irq); + return -ENODEV; + } + SMSC_TRACE(pdata, ifup, "IRQ handler passed test using IRQ %d", + dev->irq); + + netdev_info(dev, "SMSC911x/921x identified at %#08lx, IRQ: %d\n", + (unsigned long)pdata->ioaddr, dev->irq); + + /* Reset the last known duplex and carrier */ + pdata->last_duplex = -1; + pdata->last_carrier = -1; + + /* Bring the PHY up */ + phy_start(pdata->phy_dev); + + temp = smsc911x_reg_read(pdata, HW_CFG); + /* Preserve TX FIFO size and external PHY configuration */ + temp &= (HW_CFG_TX_FIF_SZ_|0x00000FFF); + temp |= HW_CFG_SF_; + smsc911x_reg_write(pdata, HW_CFG, temp); + + temp = smsc911x_reg_read(pdata, FIFO_INT); + temp |= FIFO_INT_TX_AVAIL_LEVEL_; + temp &= ~(FIFO_INT_RX_STS_LEVEL_); + smsc911x_reg_write(pdata, FIFO_INT, temp); + + /* set RX Data offset to 2 bytes for alignment */ + smsc911x_reg_write(pdata, RX_CFG, (2 << 8)); + + /* enable NAPI polling before enabling RX interrupts */ + napi_enable(&pdata->napi); + + temp = smsc911x_reg_read(pdata, INT_EN); + temp |= (INT_EN_TDFA_EN_ | INT_EN_RSFL_EN_ | INT_EN_RXSTOP_INT_EN_); + smsc911x_reg_write(pdata, INT_EN, temp); + + spin_lock_irq(&pdata->mac_lock); + temp = smsc911x_mac_read(pdata, MAC_CR); + temp |= (MAC_CR_TXEN_ | MAC_CR_RXEN_ | MAC_CR_HBDIS_); + smsc911x_mac_write(pdata, MAC_CR, temp); + spin_unlock_irq(&pdata->mac_lock); + + smsc911x_reg_write(pdata, TX_CFG, TX_CFG_TX_ON_); + + netif_start_queue(dev); + return 0; +} + +/* Entry point for stopping the interface */ +static int smsc911x_stop(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + unsigned int temp; + + /* Disable all device interrupts */ + temp = smsc911x_reg_read(pdata, INT_CFG); + temp &= ~INT_CFG_IRQ_EN_; + smsc911x_reg_write(pdata, INT_CFG, temp); + + /* Stop Tx and Rx polling */ + netif_stop_queue(dev); + napi_disable(&pdata->napi); + + /* At this point all Rx and Tx activity is stopped */ + dev->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP); + smsc911x_tx_update_txcounters(dev); + + /* Bring the PHY down */ + if (pdata->phy_dev) + phy_stop(pdata->phy_dev); + + SMSC_TRACE(pdata, ifdown, "Interface stopped"); + return 0; +} + +/* Entry point for transmitting a packet */ +static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + unsigned int freespace; + unsigned int tx_cmd_a; + unsigned int tx_cmd_b; + unsigned int temp; + u32 wrsz; + ulong bufp; + + freespace = smsc911x_reg_read(pdata, TX_FIFO_INF) & TX_FIFO_INF_TDFREE_; + + if (unlikely(freespace < TX_FIFO_LOW_THRESHOLD)) + SMSC_WARN(pdata, tx_err, + "Tx data fifo low, space available: %d", freespace); + + /* Word alignment adjustment */ + tx_cmd_a = (u32)((ulong)skb->data & 0x03) << 16; + tx_cmd_a |= TX_CMD_A_FIRST_SEG_ | TX_CMD_A_LAST_SEG_; + tx_cmd_a |= (unsigned int)skb->len; + + tx_cmd_b = ((unsigned int)skb->len) << 16; + tx_cmd_b |= (unsigned int)skb->len; + + smsc911x_reg_write(pdata, TX_DATA_FIFO, tx_cmd_a); + smsc911x_reg_write(pdata, TX_DATA_FIFO, tx_cmd_b); + + bufp = (ulong)skb->data & (~0x3); + wrsz = (u32)skb->len + 3; + wrsz += (u32)((ulong)skb->data & 0x3); + wrsz >>= 2; + + pdata->ops->tx_writefifo(pdata, (unsigned int *)bufp, wrsz); + freespace -= (skb->len + 32); + skb_tx_timestamp(skb); + dev_kfree_skb(skb); + + if (unlikely(smsc911x_tx_get_txstatcount(pdata) >= 30)) + smsc911x_tx_update_txcounters(dev); + + if (freespace < TX_FIFO_LOW_THRESHOLD) { + netif_stop_queue(dev); + temp = smsc911x_reg_read(pdata, FIFO_INT); + temp &= 0x00FFFFFF; + temp |= 0x32000000; + smsc911x_reg_write(pdata, FIFO_INT, temp); + } + + return NETDEV_TX_OK; +} + +/* Entry point for getting status counters */ +static struct net_device_stats *smsc911x_get_stats(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + smsc911x_tx_update_txcounters(dev); + dev->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP); + return &dev->stats; +} + +/* Entry point for setting addressing modes */ +static void smsc911x_set_multicast_list(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + unsigned long flags; + + if (dev->flags & IFF_PROMISC) { + /* Enabling promiscuous mode */ + pdata->set_bits_mask = MAC_CR_PRMS_; + pdata->clear_bits_mask = (MAC_CR_MCPAS_ | MAC_CR_HPFILT_); + pdata->hashhi = 0; + pdata->hashlo = 0; + } else if (dev->flags & IFF_ALLMULTI) { + /* Enabling all multicast mode */ + pdata->set_bits_mask = MAC_CR_MCPAS_; + pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_HPFILT_); + pdata->hashhi = 0; + pdata->hashlo = 0; + } else if (!netdev_mc_empty(dev)) { + /* Enabling specific multicast addresses */ + unsigned int hash_high = 0; + unsigned int hash_low = 0; + struct netdev_hw_addr *ha; + + pdata->set_bits_mask = MAC_CR_HPFILT_; + pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_MCPAS_); + + netdev_for_each_mc_addr(ha, dev) { + unsigned int bitnum = smsc911x_hash(ha->addr); + unsigned int mask = 0x01 << (bitnum & 0x1F); + + if (bitnum & 0x20) + hash_high |= mask; + else + hash_low |= mask; + } + + pdata->hashhi = hash_high; + pdata->hashlo = hash_low; + } else { + /* Enabling local MAC address only */ + pdata->set_bits_mask = 0; + pdata->clear_bits_mask = + (MAC_CR_PRMS_ | MAC_CR_MCPAS_ | MAC_CR_HPFILT_); + pdata->hashhi = 0; + pdata->hashlo = 0; + } + + spin_lock_irqsave(&pdata->mac_lock, flags); + + if (pdata->generation <= 1) { + /* Older hardware revision - cannot change these flags while + * receiving data */ + if (!pdata->multicast_update_pending) { + unsigned int temp; + SMSC_TRACE(pdata, hw, "scheduling mcast update"); + pdata->multicast_update_pending = 1; + + /* Request the hardware to stop, then perform the + * update when we get an RX_STOP interrupt */ + temp = smsc911x_mac_read(pdata, MAC_CR); + temp &= ~(MAC_CR_RXEN_); + smsc911x_mac_write(pdata, MAC_CR, temp); + } else { + /* There is another update pending, this should now + * use the newer values */ + } + } else { + /* Newer hardware revision - can write immediately */ + smsc911x_rx_multicast_update(pdata); + } + + spin_unlock_irqrestore(&pdata->mac_lock, flags); +} + +static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct smsc911x_data *pdata = netdev_priv(dev); + u32 intsts = smsc911x_reg_read(pdata, INT_STS); + u32 inten = smsc911x_reg_read(pdata, INT_EN); + int serviced = IRQ_NONE; + u32 temp; + + if (unlikely(intsts & inten & INT_STS_SW_INT_)) { + temp = smsc911x_reg_read(pdata, INT_EN); + temp &= (~INT_EN_SW_INT_EN_); + smsc911x_reg_write(pdata, INT_EN, temp); + smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_); + pdata->software_irq_signal = 1; + smp_wmb(); + serviced = IRQ_HANDLED; + } + + if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) { + /* Called when there is a multicast update scheduled and + * it is now safe to complete the update */ + SMSC_TRACE(pdata, intr, "RX Stop interrupt"); + smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_); + if (pdata->multicast_update_pending) + smsc911x_rx_multicast_update_workaround(pdata); + serviced = IRQ_HANDLED; + } + + if (intsts & inten & INT_STS_TDFA_) { + temp = smsc911x_reg_read(pdata, FIFO_INT); + temp |= FIFO_INT_TX_AVAIL_LEVEL_; + smsc911x_reg_write(pdata, FIFO_INT, temp); + smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_); + netif_wake_queue(dev); + serviced = IRQ_HANDLED; + } + + if (unlikely(intsts & inten & INT_STS_RXE_)) { + SMSC_TRACE(pdata, intr, "RX Error interrupt"); + smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_); + serviced = IRQ_HANDLED; + } + + if (likely(intsts & inten & INT_STS_RSFL_)) { + if (likely(napi_schedule_prep(&pdata->napi))) { + /* Disable Rx interrupts */ + temp = smsc911x_reg_read(pdata, INT_EN); + temp &= (~INT_EN_RSFL_EN_); + smsc911x_reg_write(pdata, INT_EN, temp); + /* Schedule a NAPI poll */ + __napi_schedule(&pdata->napi); + } else { + SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed"); + } + serviced = IRQ_HANDLED; + } + + return serviced; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void smsc911x_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + smsc911x_irqhandler(0, dev); + enable_irq(dev->irq); +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + +static int smsc911x_set_mac_address(struct net_device *dev, void *p) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + struct sockaddr *addr = p; + + /* On older hardware revisions we cannot change the mac address + * registers while receiving data. Newer devices can safely change + * this at any time. */ + if (pdata->generation <= 1 && netif_running(dev)) + return -EBUSY; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + + spin_lock_irq(&pdata->mac_lock); + smsc911x_set_hw_mac_address(pdata, dev->dev_addr); + spin_unlock_irq(&pdata->mac_lock); + + netdev_info(dev, "MAC Address: %pM\n", dev->dev_addr); + + return 0; +} + +/* Standard ioctls for mii-tool */ +static int smsc911x_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + + if (!netif_running(dev) || !pdata->phy_dev) + return -EINVAL; + + return phy_mii_ioctl(pdata->phy_dev, ifr, cmd); +} + +static int +smsc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + + cmd->maxtxpkt = 1; + cmd->maxrxpkt = 1; + return phy_ethtool_gset(pdata->phy_dev, cmd); +} + +static int +smsc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + + return phy_ethtool_sset(pdata->phy_dev, cmd); +} + +static void smsc911x_ethtool_getdrvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, SMSC_CHIPNAME, sizeof(info->driver)); + strlcpy(info->version, SMSC_DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, dev_name(dev->dev.parent), + sizeof(info->bus_info)); +} + +static int smsc911x_ethtool_nwayreset(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + + return phy_start_aneg(pdata->phy_dev); +} + +static u32 smsc911x_ethtool_getmsglevel(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + return pdata->msg_enable; +} + +static void smsc911x_ethtool_setmsglevel(struct net_device *dev, u32 level) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + pdata->msg_enable = level; +} + +static int smsc911x_ethtool_getregslen(struct net_device *dev) +{ + return (((E2P_DATA - ID_REV) / 4 + 1) + (WUCSR - MAC_CR) + 1 + 32) * + sizeof(u32); +} + +static void +smsc911x_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs, + void *buf) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + struct phy_device *phy_dev = pdata->phy_dev; + unsigned long flags; + unsigned int i; + unsigned int j = 0; + u32 *data = buf; + + regs->version = pdata->idrev; + for (i = ID_REV; i <= E2P_DATA; i += (sizeof(u32))) + data[j++] = smsc911x_reg_read(pdata, i); + + for (i = MAC_CR; i <= WUCSR; i++) { + spin_lock_irqsave(&pdata->mac_lock, flags); + data[j++] = smsc911x_mac_read(pdata, i); + spin_unlock_irqrestore(&pdata->mac_lock, flags); + } + + for (i = 0; i <= 31; i++) + data[j++] = smsc911x_mii_read(phy_dev->bus, phy_dev->addr, i); +} + +static void smsc911x_eeprom_enable_access(struct smsc911x_data *pdata) +{ + unsigned int temp = smsc911x_reg_read(pdata, GPIO_CFG); + temp &= ~GPIO_CFG_EEPR_EN_; + smsc911x_reg_write(pdata, GPIO_CFG, temp); + msleep(1); +} + +static int smsc911x_eeprom_send_cmd(struct smsc911x_data *pdata, u32 op) +{ + int timeout = 100; + u32 e2cmd; + + SMSC_TRACE(pdata, drv, "op 0x%08x", op); + if (smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) { + SMSC_WARN(pdata, drv, "Busy at start"); + return -EBUSY; + } + + e2cmd = op | E2P_CMD_EPC_BUSY_; + smsc911x_reg_write(pdata, E2P_CMD, e2cmd); + + do { + msleep(1); + e2cmd = smsc911x_reg_read(pdata, E2P_CMD); + } while ((e2cmd & E2P_CMD_EPC_BUSY_) && (--timeout)); + + if (!timeout) { + SMSC_TRACE(pdata, drv, "TIMED OUT"); + return -EAGAIN; + } + + if (e2cmd & E2P_CMD_EPC_TIMEOUT_) { + SMSC_TRACE(pdata, drv, "Error occurred during eeprom operation"); + return -EINVAL; + } + + return 0; +} + +static int smsc911x_eeprom_read_location(struct smsc911x_data *pdata, + u8 address, u8 *data) +{ + u32 op = E2P_CMD_EPC_CMD_READ_ | address; + int ret; + + SMSC_TRACE(pdata, drv, "address 0x%x", address); + ret = smsc911x_eeprom_send_cmd(pdata, op); + + if (!ret) + data[address] = smsc911x_reg_read(pdata, E2P_DATA); + + return ret; +} + +static int smsc911x_eeprom_write_location(struct smsc911x_data *pdata, + u8 address, u8 data) +{ + u32 op = E2P_CMD_EPC_CMD_ERASE_ | address; + u32 temp; + int ret; + + SMSC_TRACE(pdata, drv, "address 0x%x, data 0x%x", address, data); + ret = smsc911x_eeprom_send_cmd(pdata, op); + + if (!ret) { + op = E2P_CMD_EPC_CMD_WRITE_ | address; + smsc911x_reg_write(pdata, E2P_DATA, (u32)data); + + /* Workaround for hardware read-after-write restriction */ + temp = smsc911x_reg_read(pdata, BYTE_TEST); + + ret = smsc911x_eeprom_send_cmd(pdata, op); + } + + return ret; +} + +static int smsc911x_ethtool_get_eeprom_len(struct net_device *dev) +{ + return SMSC911X_EEPROM_SIZE; +} + +static int smsc911x_ethtool_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + u8 eeprom_data[SMSC911X_EEPROM_SIZE]; + int len; + int i; + + smsc911x_eeprom_enable_access(pdata); + + len = min(eeprom->len, SMSC911X_EEPROM_SIZE); + for (i = 0; i < len; i++) { + int ret = smsc911x_eeprom_read_location(pdata, i, eeprom_data); + if (ret < 0) { + eeprom->len = 0; + return ret; + } + } + + memcpy(data, &eeprom_data[eeprom->offset], len); + eeprom->len = len; + return 0; +} + +static int smsc911x_ethtool_set_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + int ret; + struct smsc911x_data *pdata = netdev_priv(dev); + + smsc911x_eeprom_enable_access(pdata); + smsc911x_eeprom_send_cmd(pdata, E2P_CMD_EPC_CMD_EWEN_); + ret = smsc911x_eeprom_write_location(pdata, eeprom->offset, *data); + smsc911x_eeprom_send_cmd(pdata, E2P_CMD_EPC_CMD_EWDS_); + + /* Single byte write, according to man page */ + eeprom->len = 1; + + return ret; +} + +static const struct ethtool_ops smsc911x_ethtool_ops = { + .get_settings = smsc911x_ethtool_getsettings, + .set_settings = smsc911x_ethtool_setsettings, + .get_link = ethtool_op_get_link, + .get_drvinfo = smsc911x_ethtool_getdrvinfo, + .nway_reset = smsc911x_ethtool_nwayreset, + .get_msglevel = smsc911x_ethtool_getmsglevel, + .set_msglevel = smsc911x_ethtool_setmsglevel, + .get_regs_len = smsc911x_ethtool_getregslen, + .get_regs = smsc911x_ethtool_getregs, + .get_eeprom_len = smsc911x_ethtool_get_eeprom_len, + .get_eeprom = smsc911x_ethtool_get_eeprom, + .set_eeprom = smsc911x_ethtool_set_eeprom, +}; + +static const struct net_device_ops smsc911x_netdev_ops = { + .ndo_open = smsc911x_open, + .ndo_stop = smsc911x_stop, + .ndo_start_xmit = smsc911x_hard_start_xmit, + .ndo_get_stats = smsc911x_get_stats, + .ndo_set_multicast_list = smsc911x_set_multicast_list, + .ndo_do_ioctl = smsc911x_do_ioctl, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = smsc911x_set_mac_address, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = smsc911x_poll_controller, +#endif +}; + +/* copies the current mac address from hardware to dev->dev_addr */ +static void __devinit smsc911x_read_mac_address(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + u32 mac_high16 = smsc911x_mac_read(pdata, ADDRH); + u32 mac_low32 = smsc911x_mac_read(pdata, ADDRL); + + dev->dev_addr[0] = (u8)(mac_low32); + dev->dev_addr[1] = (u8)(mac_low32 >> 8); + dev->dev_addr[2] = (u8)(mac_low32 >> 16); + dev->dev_addr[3] = (u8)(mac_low32 >> 24); + dev->dev_addr[4] = (u8)(mac_high16); + dev->dev_addr[5] = (u8)(mac_high16 >> 8); +} + +/* Initializing private device structures, only called from probe */ +static int __devinit smsc911x_init(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + unsigned int byte_test; + + SMSC_TRACE(pdata, probe, "Driver Parameters:"); + SMSC_TRACE(pdata, probe, "LAN base: 0x%08lX", + (unsigned long)pdata->ioaddr); + SMSC_TRACE(pdata, probe, "IRQ: %d", dev->irq); + SMSC_TRACE(pdata, probe, "PHY will be autodetected."); + + spin_lock_init(&pdata->dev_lock); + spin_lock_init(&pdata->mac_lock); + + if (pdata->ioaddr == 0) { + SMSC_WARN(pdata, probe, "pdata->ioaddr: 0x00000000"); + return -ENODEV; + } + + /* Check byte ordering */ + byte_test = smsc911x_reg_read(pdata, BYTE_TEST); + SMSC_TRACE(pdata, probe, "BYTE_TEST: 0x%08X", byte_test); + if (byte_test == 0x43218765) { + SMSC_TRACE(pdata, probe, "BYTE_TEST looks swapped, " + "applying WORD_SWAP"); + smsc911x_reg_write(pdata, WORD_SWAP, 0xffffffff); + + /* 1 dummy read of BYTE_TEST is needed after a write to + * WORD_SWAP before its contents are valid */ + byte_test = smsc911x_reg_read(pdata, BYTE_TEST); + + byte_test = smsc911x_reg_read(pdata, BYTE_TEST); + } + + if (byte_test != 0x87654321) { + SMSC_WARN(pdata, drv, "BYTE_TEST: 0x%08X", byte_test); + if (((byte_test >> 16) & 0xFFFF) == (byte_test & 0xFFFF)) { + SMSC_WARN(pdata, probe, + "top 16 bits equal to bottom 16 bits"); + SMSC_TRACE(pdata, probe, + "This may mean the chip is set " + "for 32 bit while the bus is reading 16 bit"); + } + return -ENODEV; + } + + /* Default generation to zero (all workarounds apply) */ + pdata->generation = 0; + + pdata->idrev = smsc911x_reg_read(pdata, ID_REV); + switch (pdata->idrev & 0xFFFF0000) { + case 0x01180000: + case 0x01170000: + case 0x01160000: + case 0x01150000: + /* LAN911[5678] family */ + pdata->generation = pdata->idrev & 0x0000FFFF; + break; + + case 0x118A0000: + case 0x117A0000: + case 0x116A0000: + case 0x115A0000: + /* LAN921[5678] family */ + pdata->generation = 3; + break; + + case 0x92100000: + case 0x92110000: + case 0x92200000: + case 0x92210000: + /* LAN9210/LAN9211/LAN9220/LAN9221 */ + pdata->generation = 4; + break; + + default: + SMSC_WARN(pdata, probe, "LAN911x not identified, idrev: 0x%08X", + pdata->idrev); + return -ENODEV; + } + + SMSC_TRACE(pdata, probe, + "LAN911x identified, idrev: 0x%08X, generation: %d", + pdata->idrev, pdata->generation); + + if (pdata->generation == 0) + SMSC_WARN(pdata, probe, + "This driver is not intended for this chip revision"); + + /* workaround for platforms without an eeprom, where the mac address + * is stored elsewhere and set by the bootloader. This saves the + * mac address before resetting the device */ + if (pdata->config.flags & SMSC911X_SAVE_MAC_ADDRESS) { + spin_lock_irq(&pdata->mac_lock); + smsc911x_read_mac_address(dev); + spin_unlock_irq(&pdata->mac_lock); + } + + /* Reset the LAN911x */ + if (smsc911x_soft_reset(pdata)) + return -ENODEV; + + /* Disable all interrupt sources until we bring the device up */ + smsc911x_reg_write(pdata, INT_EN, 0); + + ether_setup(dev); + dev->flags |= IFF_MULTICAST; + netif_napi_add(dev, &pdata->napi, smsc911x_poll, SMSC_NAPI_WEIGHT); + dev->netdev_ops = &smsc911x_netdev_ops; + dev->ethtool_ops = &smsc911x_ethtool_ops; + + return 0; +} + +static int __devexit smsc911x_drv_remove(struct platform_device *pdev) +{ + struct net_device *dev; + struct smsc911x_data *pdata; + struct resource *res; + + dev = platform_get_drvdata(pdev); + BUG_ON(!dev); + pdata = netdev_priv(dev); + BUG_ON(!pdata); + BUG_ON(!pdata->ioaddr); + BUG_ON(!pdata->phy_dev); + + SMSC_TRACE(pdata, ifdown, "Stopping driver"); + + phy_disconnect(pdata->phy_dev); + pdata->phy_dev = NULL; + mdiobus_unregister(pdata->mii_bus); + mdiobus_free(pdata->mii_bus); + + platform_set_drvdata(pdev, NULL); + unregister_netdev(dev); + free_irq(dev->irq, dev); + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "smsc911x-memory"); + if (!res) + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + release_mem_region(res->start, resource_size(res)); + + iounmap(pdata->ioaddr); + + free_netdev(dev); + + return 0; +} + +/* standard register acces */ +static const struct smsc911x_ops standard_smsc911x_ops = { + .reg_read = __smsc911x_reg_read, + .reg_write = __smsc911x_reg_write, + .rx_readfifo = smsc911x_rx_readfifo, + .tx_writefifo = smsc911x_tx_writefifo, +}; + +/* shifted register access */ +static const struct smsc911x_ops shifted_smsc911x_ops = { + .reg_read = __smsc911x_reg_read_shift, + .reg_write = __smsc911x_reg_write_shift, + .rx_readfifo = smsc911x_rx_readfifo_shift, + .tx_writefifo = smsc911x_tx_writefifo_shift, +}; + +#ifdef CONFIG_OF +static int __devinit smsc911x_probe_config_dt( + struct smsc911x_platform_config *config, + struct device_node *np) +{ + const char *mac; + u32 width = 0; + + if (!np) + return -ENODEV; + + config->phy_interface = of_get_phy_mode(np); + + mac = of_get_mac_address(np); + if (mac) + memcpy(config->mac, mac, ETH_ALEN); + + of_property_read_u32(np, "reg-shift", &config->shift); + + of_property_read_u32(np, "reg-io-width", &width); + if (width == 4) + config->flags |= SMSC911X_USE_32BIT; + + if (of_get_property(np, "smsc,irq-active-high", NULL)) + config->irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH; + + if (of_get_property(np, "smsc,irq-push-pull", NULL)) + config->irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL; + + if (of_get_property(np, "smsc,force-internal-phy", NULL)) + config->flags |= SMSC911X_FORCE_INTERNAL_PHY; + + if (of_get_property(np, "smsc,force-external-phy", NULL)) + config->flags |= SMSC911X_FORCE_EXTERNAL_PHY; + + if (of_get_property(np, "smsc,save-mac-address", NULL)) + config->flags |= SMSC911X_SAVE_MAC_ADDRESS; + + return 0; +} +#else +static inline int smsc911x_probe_config_dt( + struct smsc911x_platform_config *config, + struct device_node *np) +{ + return -ENODEV; +} +#endif /* CONFIG_OF */ + +static int __devinit smsc911x_drv_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct net_device *dev; + struct smsc911x_data *pdata; + struct smsc911x_platform_config *config = pdev->dev.platform_data; + struct resource *res, *irq_res; + unsigned int intcfg = 0; + int res_size, irq_flags; + int retval; + + pr_info("Driver version %s\n", SMSC_DRV_VERSION); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "smsc911x-memory"); + if (!res) + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + pr_warn("Could not allocate resource\n"); + retval = -ENODEV; + goto out_0; + } + res_size = resource_size(res); + + irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!irq_res) { + pr_warn("Could not allocate irq resource\n"); + retval = -ENODEV; + goto out_0; + } + + if (!request_mem_region(res->start, res_size, SMSC_CHIPNAME)) { + retval = -EBUSY; + goto out_0; + } + + dev = alloc_etherdev(sizeof(struct smsc911x_data)); + if (!dev) { + pr_warn("Could not allocate device\n"); + retval = -ENOMEM; + goto out_release_io_1; + } + + SET_NETDEV_DEV(dev, &pdev->dev); + + pdata = netdev_priv(dev); + + dev->irq = irq_res->start; + irq_flags = irq_res->flags & IRQF_TRIGGER_MASK; + pdata->ioaddr = ioremap_nocache(res->start, res_size); + + pdata->dev = dev; + pdata->msg_enable = ((1 << debug) - 1); + + if (pdata->ioaddr == NULL) { + SMSC_WARN(pdata, probe, "Error smsc911x base address invalid"); + retval = -ENOMEM; + goto out_free_netdev_2; + } + + retval = smsc911x_probe_config_dt(&pdata->config, np); + if (retval && config) { + /* copy config parameters across to pdata */ + memcpy(&pdata->config, config, sizeof(pdata->config)); + retval = 0; + } + + if (retval) { + SMSC_WARN(pdata, probe, "Error smsc911x config not found"); + goto out_unmap_io_3; + } + + /* assume standard, non-shifted, access to HW registers */ + pdata->ops = &standard_smsc911x_ops; + /* apply the right access if shifting is needed */ + if (pdata->config.shift) + pdata->ops = &shifted_smsc911x_ops; + + retval = smsc911x_init(dev); + if (retval < 0) + goto out_unmap_io_3; + + /* configure irq polarity and type before connecting isr */ + if (pdata->config.irq_polarity == SMSC911X_IRQ_POLARITY_ACTIVE_HIGH) + intcfg |= INT_CFG_IRQ_POL_; + + if (pdata->config.irq_type == SMSC911X_IRQ_TYPE_PUSH_PULL) + intcfg |= INT_CFG_IRQ_TYPE_; + + smsc911x_reg_write(pdata, INT_CFG, intcfg); + + /* Ensure interrupts are globally disabled before connecting ISR */ + smsc911x_reg_write(pdata, INT_EN, 0); + smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF); + + retval = request_irq(dev->irq, smsc911x_irqhandler, + irq_flags | IRQF_SHARED, dev->name, dev); + if (retval) { + SMSC_WARN(pdata, probe, + "Unable to claim requested irq: %d", dev->irq); + goto out_unmap_io_3; + } + + platform_set_drvdata(pdev, dev); + + retval = register_netdev(dev); + if (retval) { + SMSC_WARN(pdata, probe, "Error %i registering device", retval); + goto out_unset_drvdata_4; + } else { + SMSC_TRACE(pdata, probe, + "Network interface: \"%s\"", dev->name); + } + + retval = smsc911x_mii_init(pdev, dev); + if (retval) { + SMSC_WARN(pdata, probe, "Error %i initialising mii", retval); + goto out_unregister_netdev_5; + } + + spin_lock_irq(&pdata->mac_lock); + + /* Check if mac address has been specified when bringing interface up */ + if (is_valid_ether_addr(dev->dev_addr)) { + smsc911x_set_hw_mac_address(pdata, dev->dev_addr); + SMSC_TRACE(pdata, probe, + "MAC Address is specified by configuration"); + } else if (is_valid_ether_addr(pdata->config.mac)) { + memcpy(dev->dev_addr, pdata->config.mac, 6); + SMSC_TRACE(pdata, probe, + "MAC Address specified by platform data"); + } else { + /* Try reading mac address from device. if EEPROM is present + * it will already have been set */ + smsc_get_mac(dev); + + if (is_valid_ether_addr(dev->dev_addr)) { + /* eeprom values are valid so use them */ + SMSC_TRACE(pdata, probe, + "Mac Address is read from LAN911x EEPROM"); + } else { + /* eeprom values are invalid, generate random MAC */ + random_ether_addr(dev->dev_addr); + smsc911x_set_hw_mac_address(pdata, dev->dev_addr); + SMSC_TRACE(pdata, probe, + "MAC Address is set to random_ether_addr"); + } + } + + spin_unlock_irq(&pdata->mac_lock); + + netdev_info(dev, "MAC Address: %pM\n", dev->dev_addr); + + return 0; + +out_unregister_netdev_5: + unregister_netdev(dev); +out_unset_drvdata_4: + platform_set_drvdata(pdev, NULL); + free_irq(dev->irq, dev); +out_unmap_io_3: + iounmap(pdata->ioaddr); +out_free_netdev_2: + free_netdev(dev); +out_release_io_1: + release_mem_region(res->start, resource_size(res)); +out_0: + return retval; +} + +#ifdef CONFIG_PM +/* This implementation assumes the devices remains powered on its VDDVARIO + * pins during suspend. */ + +/* TODO: implement freeze/thaw callbacks for hibernation.*/ + +static int smsc911x_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct smsc911x_data *pdata = netdev_priv(ndev); + + /* enable wake on LAN, energy detection and the external PME + * signal. */ + smsc911x_reg_write(pdata, PMT_CTRL, + PMT_CTRL_PM_MODE_D1_ | PMT_CTRL_WOL_EN_ | + PMT_CTRL_ED_EN_ | PMT_CTRL_PME_EN_); + + return 0; +} + +static int smsc911x_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct smsc911x_data *pdata = netdev_priv(ndev); + unsigned int to = 100; + + /* Note 3.11 from the datasheet: + * "When the LAN9220 is in a power saving state, a write of any + * data to the BYTE_TEST register will wake-up the device." + */ + smsc911x_reg_write(pdata, BYTE_TEST, 0); + + /* poll the READY bit in PMT_CTRL. Any other access to the device is + * forbidden while this bit isn't set. Try for 100ms and return -EIO + * if it failed. */ + while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to) + udelay(1000); + + return (to == 0) ? -EIO : 0; +} + +static const struct dev_pm_ops smsc911x_pm_ops = { + .suspend = smsc911x_suspend, + .resume = smsc911x_resume, +}; + +#define SMSC911X_PM_OPS (&smsc911x_pm_ops) + +#else +#define SMSC911X_PM_OPS NULL +#endif + +static const struct of_device_id smsc911x_dt_ids[] = { + { .compatible = "smsc,lan9115", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, smsc911x_dt_ids); + +static struct platform_driver smsc911x_driver = { + .probe = smsc911x_drv_probe, + .remove = __devexit_p(smsc911x_drv_remove), + .driver = { + .name = SMSC_CHIPNAME, + .owner = THIS_MODULE, + .pm = SMSC911X_PM_OPS, + .of_match_table = smsc911x_dt_ids, + }, +}; + +/* Entry point for loading the module */ +static int __init smsc911x_init_module(void) +{ + SMSC_INITIALIZE(); + return platform_driver_register(&smsc911x_driver); +} + +/* entry point for unloading the module */ +static void __exit smsc911x_cleanup_module(void) +{ + platform_driver_unregister(&smsc911x_driver); +} + +module_init(smsc911x_init_module); +module_exit(smsc911x_cleanup_module); diff --git a/drivers/net/ethernet/smsc/smsc911x.h b/drivers/net/ethernet/smsc/smsc911x.h new file mode 100644 index 0000000..8d67aac --- /dev/null +++ b/drivers/net/ethernet/smsc/smsc911x.h @@ -0,0 +1,404 @@ +/*************************************************************************** + * + * Copyright (C) 2004-2008 SMSC + * Copyright (C) 2005-2008 ARM + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + ***************************************************************************/ +#ifndef __SMSC911X_H__ +#define __SMSC911X_H__ + +#define TX_FIFO_LOW_THRESHOLD ((u32)1600) +#define SMSC911X_EEPROM_SIZE ((u32)128) +#define USE_DEBUG 0 + +/* This is the maximum number of packets to be received every + * NAPI poll */ +#define SMSC_NAPI_WEIGHT 16 + +/* implements a PHY loopback test at initialisation time, to ensure a packet + * can be successfully looped back */ +#define USE_PHY_WORK_AROUND + +#if USE_DEBUG >= 1 +#define SMSC_WARN(pdata, nlevel, fmt, args...) \ + netif_warn(pdata, nlevel, (pdata)->dev, \ + "%s: " fmt "\n", __func__, ##args) +#else +#define SMSC_WARN(pdata, nlevel, fmt, args...) \ + no_printk(fmt "\n", ##args) +#endif + +#if USE_DEBUG >= 2 +#define SMSC_TRACE(pdata, nlevel, fmt, args...) \ + netif_info(pdata, nlevel, pdata->dev, fmt "\n", ##args) +#else +#define SMSC_TRACE(pdata, nlevel, fmt, args...) \ + no_printk(fmt "\n", ##args) +#endif + +#ifdef CONFIG_DEBUG_SPINLOCK +#define SMSC_ASSERT_MAC_LOCK(pdata) \ + WARN_ON(!spin_is_locked(&pdata->mac_lock)) +#else +#define SMSC_ASSERT_MAC_LOCK(pdata) do {} while (0) +#endif /* CONFIG_DEBUG_SPINLOCK */ + +/* SMSC911x registers and bitfields */ +#define RX_DATA_FIFO 0x00 + +#define TX_DATA_FIFO 0x20 +#define TX_CMD_A_ON_COMP_ 0x80000000 +#define TX_CMD_A_BUF_END_ALGN_ 0x03000000 +#define TX_CMD_A_4_BYTE_ALGN_ 0x00000000 +#define TX_CMD_A_16_BYTE_ALGN_ 0x01000000 +#define TX_CMD_A_32_BYTE_ALGN_ 0x02000000 +#define TX_CMD_A_DATA_OFFSET_ 0x001F0000 +#define TX_CMD_A_FIRST_SEG_ 0x00002000 +#define TX_CMD_A_LAST_SEG_ 0x00001000 +#define TX_CMD_A_BUF_SIZE_ 0x000007FF +#define TX_CMD_B_PKT_TAG_ 0xFFFF0000 +#define TX_CMD_B_ADD_CRC_DISABLE_ 0x00002000 +#define TX_CMD_B_DISABLE_PADDING_ 0x00001000 +#define TX_CMD_B_PKT_BYTE_LENGTH_ 0x000007FF + +#define RX_STATUS_FIFO 0x40 +#define RX_STS_ES_ 0x00008000 +#define RX_STS_LENGTH_ERR_ 0x00001000 +#define RX_STS_MCAST_ 0x00000400 +#define RX_STS_FRAME_TYPE_ 0x00000020 +#define RX_STS_CRC_ERR_ 0x00000002 + +#define RX_STATUS_FIFO_PEEK 0x44 + +#define TX_STATUS_FIFO 0x48 +#define TX_STS_ES_ 0x00008000 +#define TX_STS_LOST_CARRIER_ 0x00000800 +#define TX_STS_NO_CARRIER_ 0x00000400 +#define TX_STS_LATE_COL_ 0x00000200 +#define TX_STS_EXCESS_COL_ 0x00000100 + +#define TX_STATUS_FIFO_PEEK 0x4C + +#define ID_REV 0x50 +#define ID_REV_CHIP_ID_ 0xFFFF0000 +#define ID_REV_REV_ID_ 0x0000FFFF + +#define INT_CFG 0x54 +#define INT_CFG_INT_DEAS_ 0xFF000000 +#define INT_CFG_INT_DEAS_CLR_ 0x00004000 +#define INT_CFG_INT_DEAS_STS_ 0x00002000 +#define INT_CFG_IRQ_INT_ 0x00001000 +#define INT_CFG_IRQ_EN_ 0x00000100 +#define INT_CFG_IRQ_POL_ 0x00000010 +#define INT_CFG_IRQ_TYPE_ 0x00000001 + +#define INT_STS 0x58 +#define INT_STS_SW_INT_ 0x80000000 +#define INT_STS_TXSTOP_INT_ 0x02000000 +#define INT_STS_RXSTOP_INT_ 0x01000000 +#define INT_STS_RXDFH_INT_ 0x00800000 +#define INT_STS_RXDF_INT_ 0x00400000 +#define INT_STS_TX_IOC_ 0x00200000 +#define INT_STS_RXD_INT_ 0x00100000 +#define INT_STS_GPT_INT_ 0x00080000 +#define INT_STS_PHY_INT_ 0x00040000 +#define INT_STS_PME_INT_ 0x00020000 +#define INT_STS_TXSO_ 0x00010000 +#define INT_STS_RWT_ 0x00008000 +#define INT_STS_RXE_ 0x00004000 +#define INT_STS_TXE_ 0x00002000 +#define INT_STS_TDFU_ 0x00000800 +#define INT_STS_TDFO_ 0x00000400 +#define INT_STS_TDFA_ 0x00000200 +#define INT_STS_TSFF_ 0x00000100 +#define INT_STS_TSFL_ 0x00000080 +#define INT_STS_RXDF_ 0x00000040 +#define INT_STS_RDFL_ 0x00000020 +#define INT_STS_RSFF_ 0x00000010 +#define INT_STS_RSFL_ 0x00000008 +#define INT_STS_GPIO2_INT_ 0x00000004 +#define INT_STS_GPIO1_INT_ 0x00000002 +#define INT_STS_GPIO0_INT_ 0x00000001 + +#define INT_EN 0x5C +#define INT_EN_SW_INT_EN_ 0x80000000 +#define INT_EN_TXSTOP_INT_EN_ 0x02000000 +#define INT_EN_RXSTOP_INT_EN_ 0x01000000 +#define INT_EN_RXDFH_INT_EN_ 0x00800000 +#define INT_EN_TIOC_INT_EN_ 0x00200000 +#define INT_EN_RXD_INT_EN_ 0x00100000 +#define INT_EN_GPT_INT_EN_ 0x00080000 +#define INT_EN_PHY_INT_EN_ 0x00040000 +#define INT_EN_PME_INT_EN_ 0x00020000 +#define INT_EN_TXSO_EN_ 0x00010000 +#define INT_EN_RWT_EN_ 0x00008000 +#define INT_EN_RXE_EN_ 0x00004000 +#define INT_EN_TXE_EN_ 0x00002000 +#define INT_EN_TDFU_EN_ 0x00000800 +#define INT_EN_TDFO_EN_ 0x00000400 +#define INT_EN_TDFA_EN_ 0x00000200 +#define INT_EN_TSFF_EN_ 0x00000100 +#define INT_EN_TSFL_EN_ 0x00000080 +#define INT_EN_RXDF_EN_ 0x00000040 +#define INT_EN_RDFL_EN_ 0x00000020 +#define INT_EN_RSFF_EN_ 0x00000010 +#define INT_EN_RSFL_EN_ 0x00000008 +#define INT_EN_GPIO2_INT_ 0x00000004 +#define INT_EN_GPIO1_INT_ 0x00000002 +#define INT_EN_GPIO0_INT_ 0x00000001 + +#define BYTE_TEST 0x64 + +#define FIFO_INT 0x68 +#define FIFO_INT_TX_AVAIL_LEVEL_ 0xFF000000 +#define FIFO_INT_TX_STS_LEVEL_ 0x00FF0000 +#define FIFO_INT_RX_AVAIL_LEVEL_ 0x0000FF00 +#define FIFO_INT_RX_STS_LEVEL_ 0x000000FF + +#define RX_CFG 0x6C +#define RX_CFG_RX_END_ALGN_ 0xC0000000 +#define RX_CFG_RX_END_ALGN4_ 0x00000000 +#define RX_CFG_RX_END_ALGN16_ 0x40000000 +#define RX_CFG_RX_END_ALGN32_ 0x80000000 +#define RX_CFG_RX_DMA_CNT_ 0x0FFF0000 +#define RX_CFG_RX_DUMP_ 0x00008000 +#define RX_CFG_RXDOFF_ 0x00001F00 + +#define TX_CFG 0x70 +#define TX_CFG_TXS_DUMP_ 0x00008000 +#define TX_CFG_TXD_DUMP_ 0x00004000 +#define TX_CFG_TXSAO_ 0x00000004 +#define TX_CFG_TX_ON_ 0x00000002 +#define TX_CFG_STOP_TX_ 0x00000001 + +#define HW_CFG 0x74 +#define HW_CFG_TTM_ 0x00200000 +#define HW_CFG_SF_ 0x00100000 +#define HW_CFG_TX_FIF_SZ_ 0x000F0000 +#define HW_CFG_TR_ 0x00003000 +#define HW_CFG_SRST_ 0x00000001 + +/* only available on 115/117 */ +#define HW_CFG_PHY_CLK_SEL_ 0x00000060 +#define HW_CFG_PHY_CLK_SEL_INT_PHY_ 0x00000000 +#define HW_CFG_PHY_CLK_SEL_EXT_PHY_ 0x00000020 +#define HW_CFG_PHY_CLK_SEL_CLK_DIS_ 0x00000040 +#define HW_CFG_SMI_SEL_ 0x00000010 +#define HW_CFG_EXT_PHY_DET_ 0x00000008 +#define HW_CFG_EXT_PHY_EN_ 0x00000004 +#define HW_CFG_SRST_TO_ 0x00000002 + +/* only available on 116/118 */ +#define HW_CFG_32_16_BIT_MODE_ 0x00000004 + +#define RX_DP_CTRL 0x78 +#define RX_DP_CTRL_RX_FFWD_ 0x80000000 + +#define RX_FIFO_INF 0x7C +#define RX_FIFO_INF_RXSUSED_ 0x00FF0000 +#define RX_FIFO_INF_RXDUSED_ 0x0000FFFF + +#define TX_FIFO_INF 0x80 +#define TX_FIFO_INF_TSUSED_ 0x00FF0000 +#define TX_FIFO_INF_TDFREE_ 0x0000FFFF + +#define PMT_CTRL 0x84 +#define PMT_CTRL_PM_MODE_ 0x00003000 +#define PMT_CTRL_PM_MODE_D0_ 0x00000000 +#define PMT_CTRL_PM_MODE_D1_ 0x00001000 +#define PMT_CTRL_PM_MODE_D2_ 0x00002000 +#define PMT_CTRL_PM_MODE_D3_ 0x00003000 +#define PMT_CTRL_PHY_RST_ 0x00000400 +#define PMT_CTRL_WOL_EN_ 0x00000200 +#define PMT_CTRL_ED_EN_ 0x00000100 +#define PMT_CTRL_PME_TYPE_ 0x00000040 +#define PMT_CTRL_WUPS_ 0x00000030 +#define PMT_CTRL_WUPS_NOWAKE_ 0x00000000 +#define PMT_CTRL_WUPS_ED_ 0x00000010 +#define PMT_CTRL_WUPS_WOL_ 0x00000020 +#define PMT_CTRL_WUPS_MULTI_ 0x00000030 +#define PMT_CTRL_PME_IND_ 0x00000008 +#define PMT_CTRL_PME_POL_ 0x00000004 +#define PMT_CTRL_PME_EN_ 0x00000002 +#define PMT_CTRL_READY_ 0x00000001 + +#define GPIO_CFG 0x88 +#define GPIO_CFG_LED3_EN_ 0x40000000 +#define GPIO_CFG_LED2_EN_ 0x20000000 +#define GPIO_CFG_LED1_EN_ 0x10000000 +#define GPIO_CFG_GPIO2_INT_POL_ 0x04000000 +#define GPIO_CFG_GPIO1_INT_POL_ 0x02000000 +#define GPIO_CFG_GPIO0_INT_POL_ 0x01000000 +#define GPIO_CFG_EEPR_EN_ 0x00700000 +#define GPIO_CFG_GPIOBUF2_ 0x00040000 +#define GPIO_CFG_GPIOBUF1_ 0x00020000 +#define GPIO_CFG_GPIOBUF0_ 0x00010000 +#define GPIO_CFG_GPIODIR2_ 0x00000400 +#define GPIO_CFG_GPIODIR1_ 0x00000200 +#define GPIO_CFG_GPIODIR0_ 0x00000100 +#define GPIO_CFG_GPIOD4_ 0x00000020 +#define GPIO_CFG_GPIOD3_ 0x00000010 +#define GPIO_CFG_GPIOD2_ 0x00000004 +#define GPIO_CFG_GPIOD1_ 0x00000002 +#define GPIO_CFG_GPIOD0_ 0x00000001 + +#define GPT_CFG 0x8C +#define GPT_CFG_TIMER_EN_ 0x20000000 +#define GPT_CFG_GPT_LOAD_ 0x0000FFFF + +#define GPT_CNT 0x90 +#define GPT_CNT_GPT_CNT_ 0x0000FFFF + +#define WORD_SWAP 0x98 + +#define FREE_RUN 0x9C + +#define RX_DROP 0xA0 + +#define MAC_CSR_CMD 0xA4 +#define MAC_CSR_CMD_CSR_BUSY_ 0x80000000 +#define MAC_CSR_CMD_R_NOT_W_ 0x40000000 +#define MAC_CSR_CMD_CSR_ADDR_ 0x000000FF + +#define MAC_CSR_DATA 0xA8 + +#define AFC_CFG 0xAC +#define AFC_CFG_AFC_HI_ 0x00FF0000 +#define AFC_CFG_AFC_LO_ 0x0000FF00 +#define AFC_CFG_BACK_DUR_ 0x000000F0 +#define AFC_CFG_FCMULT_ 0x00000008 +#define AFC_CFG_FCBRD_ 0x00000004 +#define AFC_CFG_FCADD_ 0x00000002 +#define AFC_CFG_FCANY_ 0x00000001 + +#define E2P_CMD 0xB0 +#define E2P_CMD_EPC_BUSY_ 0x80000000 +#define E2P_CMD_EPC_CMD_ 0x70000000 +#define E2P_CMD_EPC_CMD_READ_ 0x00000000 +#define E2P_CMD_EPC_CMD_EWDS_ 0x10000000 +#define E2P_CMD_EPC_CMD_EWEN_ 0x20000000 +#define E2P_CMD_EPC_CMD_WRITE_ 0x30000000 +#define E2P_CMD_EPC_CMD_WRAL_ 0x40000000 +#define E2P_CMD_EPC_CMD_ERASE_ 0x50000000 +#define E2P_CMD_EPC_CMD_ERAL_ 0x60000000 +#define E2P_CMD_EPC_CMD_RELOAD_ 0x70000000 +#define E2P_CMD_EPC_TIMEOUT_ 0x00000200 +#define E2P_CMD_MAC_ADDR_LOADED_ 0x00000100 +#define E2P_CMD_EPC_ADDR_ 0x000000FF + +#define E2P_DATA 0xB4 +#define E2P_DATA_EEPROM_DATA_ 0x000000FF +#define LAN_REGISTER_EXTENT 0x00000100 + +/* + * MAC Control and Status Register (Indirect Address) + * Offset (through the MAC_CSR CMD and DATA port) + */ +#define MAC_CR 0x01 +#define MAC_CR_RXALL_ 0x80000000 +#define MAC_CR_HBDIS_ 0x10000000 +#define MAC_CR_RCVOWN_ 0x00800000 +#define MAC_CR_LOOPBK_ 0x00200000 +#define MAC_CR_FDPX_ 0x00100000 +#define MAC_CR_MCPAS_ 0x00080000 +#define MAC_CR_PRMS_ 0x00040000 +#define MAC_CR_INVFILT_ 0x00020000 +#define MAC_CR_PASSBAD_ 0x00010000 +#define MAC_CR_HFILT_ 0x00008000 +#define MAC_CR_HPFILT_ 0x00002000 +#define MAC_CR_LCOLL_ 0x00001000 +#define MAC_CR_BCAST_ 0x00000800 +#define MAC_CR_DISRTY_ 0x00000400 +#define MAC_CR_PADSTR_ 0x00000100 +#define MAC_CR_BOLMT_MASK_ 0x000000C0 +#define MAC_CR_DFCHK_ 0x00000020 +#define MAC_CR_TXEN_ 0x00000008 +#define MAC_CR_RXEN_ 0x00000004 + +#define ADDRH 0x02 + +#define ADDRL 0x03 + +#define HASHH 0x04 + +#define HASHL 0x05 + +#define MII_ACC 0x06 +#define MII_ACC_PHY_ADDR_ 0x0000F800 +#define MII_ACC_MIIRINDA_ 0x000007C0 +#define MII_ACC_MII_WRITE_ 0x00000002 +#define MII_ACC_MII_BUSY_ 0x00000001 + +#define MII_DATA 0x07 + +#define FLOW 0x08 +#define FLOW_FCPT_ 0xFFFF0000 +#define FLOW_FCPASS_ 0x00000004 +#define FLOW_FCEN_ 0x00000002 +#define FLOW_FCBSY_ 0x00000001 + +#define VLAN1 0x09 + +#define VLAN2 0x0A + +#define WUFF 0x0B + +#define WUCSR 0x0C +#define WUCSR_GUE_ 0x00000200 +#define WUCSR_WUFR_ 0x00000040 +#define WUCSR_MPR_ 0x00000020 +#define WUCSR_WAKE_EN_ 0x00000004 +#define WUCSR_MPEN_ 0x00000002 + +/* + * Phy definitions (vendor-specific) + */ +#define LAN9118_PHY_ID 0x00C0001C + +#define MII_INTSTS 0x1D + +#define MII_INTMSK 0x1E +#define PHY_INTMSK_AN_RCV_ (1 << 1) +#define PHY_INTMSK_PDFAULT_ (1 << 2) +#define PHY_INTMSK_AN_ACK_ (1 << 3) +#define PHY_INTMSK_LNKDOWN_ (1 << 4) +#define PHY_INTMSK_RFAULT_ (1 << 5) +#define PHY_INTMSK_AN_COMP_ (1 << 6) +#define PHY_INTMSK_ENERGYON_ (1 << 7) +#define PHY_INTMSK_DEFAULT_ (PHY_INTMSK_ENERGYON_ | \ + PHY_INTMSK_AN_COMP_ | \ + PHY_INTMSK_RFAULT_ | \ + PHY_INTMSK_LNKDOWN_) + +#define ADVERTISE_PAUSE_ALL (ADVERTISE_PAUSE_CAP | \ + ADVERTISE_PAUSE_ASYM) + +#define LPA_PAUSE_ALL (LPA_PAUSE_CAP | \ + LPA_PAUSE_ASYM) + +/* + * Provide hooks to let the arch add to the initialisation procedure + * and to override the source of the MAC address. + */ +#define SMSC_INITIALIZE() do {} while (0) +#define smsc_get_mac(dev) smsc911x_read_mac_address((dev)) + +#ifdef CONFIG_SMSC911X_ARCH_HOOKS +#include +#endif + +#endif /* __SMSC911X_H__ */ diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c new file mode 100644 index 0000000..459726f --- /dev/null +++ b/drivers/net/ethernet/smsc/smsc9420.c @@ -0,0 +1,1763 @@ + /*************************************************************************** + * + * Copyright (C) 2007,2008 SMSC + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + *************************************************************************** + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "smsc9420.h" + +#define DRV_NAME "smsc9420" +#define PFX DRV_NAME ": " +#define DRV_MDIONAME "smsc9420-mdio" +#define DRV_DESCRIPTION "SMSC LAN9420 driver" +#define DRV_VERSION "1.01" + +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +struct smsc9420_dma_desc { + u32 status; + u32 length; + u32 buffer1; + u32 buffer2; +}; + +struct smsc9420_ring_info { + struct sk_buff *skb; + dma_addr_t mapping; +}; + +struct smsc9420_pdata { + void __iomem *base_addr; + struct pci_dev *pdev; + struct net_device *dev; + + struct smsc9420_dma_desc *rx_ring; + struct smsc9420_dma_desc *tx_ring; + struct smsc9420_ring_info *tx_buffers; + struct smsc9420_ring_info *rx_buffers; + dma_addr_t rx_dma_addr; + dma_addr_t tx_dma_addr; + int tx_ring_head, tx_ring_tail; + int rx_ring_head, rx_ring_tail; + + spinlock_t int_lock; + spinlock_t phy_lock; + + struct napi_struct napi; + + bool software_irq_signal; + bool rx_csum; + u32 msg_enable; + + struct phy_device *phy_dev; + struct mii_bus *mii_bus; + int phy_irq[PHY_MAX_ADDR]; + int last_duplex; + int last_carrier; +}; + +static DEFINE_PCI_DEVICE_TABLE(smsc9420_id_table) = { + { PCI_VENDOR_ID_9420, PCI_DEVICE_ID_9420, PCI_ANY_ID, PCI_ANY_ID, }, + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, smsc9420_id_table); + +#define SMSC_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) + +static uint smsc_debug; +static uint debug = -1; +module_param(debug, uint, 0); +MODULE_PARM_DESC(debug, "debug level"); + +#define smsc_dbg(TYPE, f, a...) \ +do { if ((pd)->msg_enable & NETIF_MSG_##TYPE) \ + printk(KERN_DEBUG PFX f "\n", ## a); \ +} while (0) + +#define smsc_info(TYPE, f, a...) \ +do { if ((pd)->msg_enable & NETIF_MSG_##TYPE) \ + printk(KERN_INFO PFX f "\n", ## a); \ +} while (0) + +#define smsc_warn(TYPE, f, a...) \ +do { if ((pd)->msg_enable & NETIF_MSG_##TYPE) \ + printk(KERN_WARNING PFX f "\n", ## a); \ +} while (0) + +static inline u32 smsc9420_reg_read(struct smsc9420_pdata *pd, u32 offset) +{ + return ioread32(pd->base_addr + offset); +} + +static inline void +smsc9420_reg_write(struct smsc9420_pdata *pd, u32 offset, u32 value) +{ + iowrite32(value, pd->base_addr + offset); +} + +static inline void smsc9420_pci_flush_write(struct smsc9420_pdata *pd) +{ + /* to ensure PCI write completion, we must perform a PCI read */ + smsc9420_reg_read(pd, ID_REV); +} + +static int smsc9420_mii_read(struct mii_bus *bus, int phyaddr, int regidx) +{ + struct smsc9420_pdata *pd = (struct smsc9420_pdata *)bus->priv; + unsigned long flags; + u32 addr; + int i, reg = -EIO; + + spin_lock_irqsave(&pd->phy_lock, flags); + + /* confirm MII not busy */ + if ((smsc9420_reg_read(pd, MII_ACCESS) & MII_ACCESS_MII_BUSY_)) { + smsc_warn(DRV, "MII is busy???"); + goto out; + } + + /* set the address, index & direction (read from PHY) */ + addr = ((phyaddr & 0x1F) << 11) | ((regidx & 0x1F) << 6) | + MII_ACCESS_MII_READ_; + smsc9420_reg_write(pd, MII_ACCESS, addr); + + /* wait for read to complete with 50us timeout */ + for (i = 0; i < 5; i++) { + if (!(smsc9420_reg_read(pd, MII_ACCESS) & + MII_ACCESS_MII_BUSY_)) { + reg = (u16)smsc9420_reg_read(pd, MII_DATA); + goto out; + } + udelay(10); + } + + smsc_warn(DRV, "MII busy timeout!"); + +out: + spin_unlock_irqrestore(&pd->phy_lock, flags); + return reg; +} + +static int smsc9420_mii_write(struct mii_bus *bus, int phyaddr, int regidx, + u16 val) +{ + struct smsc9420_pdata *pd = (struct smsc9420_pdata *)bus->priv; + unsigned long flags; + u32 addr; + int i, reg = -EIO; + + spin_lock_irqsave(&pd->phy_lock, flags); + + /* confirm MII not busy */ + if ((smsc9420_reg_read(pd, MII_ACCESS) & MII_ACCESS_MII_BUSY_)) { + smsc_warn(DRV, "MII is busy???"); + goto out; + } + + /* put the data to write in the MAC */ + smsc9420_reg_write(pd, MII_DATA, (u32)val); + + /* set the address, index & direction (write to PHY) */ + addr = ((phyaddr & 0x1F) << 11) | ((regidx & 0x1F) << 6) | + MII_ACCESS_MII_WRITE_; + smsc9420_reg_write(pd, MII_ACCESS, addr); + + /* wait for write to complete with 50us timeout */ + for (i = 0; i < 5; i++) { + if (!(smsc9420_reg_read(pd, MII_ACCESS) & + MII_ACCESS_MII_BUSY_)) { + reg = 0; + goto out; + } + udelay(10); + } + + smsc_warn(DRV, "MII busy timeout!"); + +out: + spin_unlock_irqrestore(&pd->phy_lock, flags); + return reg; +} + +/* Returns hash bit number for given MAC address + * Example: + * 01 00 5E 00 00 01 -> returns bit number 31 */ +static u32 smsc9420_hash(u8 addr[ETH_ALEN]) +{ + return (ether_crc(ETH_ALEN, addr) >> 26) & 0x3f; +} + +static int smsc9420_eeprom_reload(struct smsc9420_pdata *pd) +{ + int timeout = 100000; + + BUG_ON(!pd); + + if (smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_) { + smsc_dbg(DRV, "smsc9420_eeprom_reload: Eeprom busy"); + return -EIO; + } + + smsc9420_reg_write(pd, E2P_CMD, + (E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_RELOAD_)); + + do { + udelay(10); + if (!(smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_)) + return 0; + } while (timeout--); + + smsc_warn(DRV, "smsc9420_eeprom_reload: Eeprom timed out"); + return -EIO; +} + +/* Standard ioctls for mii-tool */ +static int smsc9420_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + + if (!netif_running(dev) || !pd->phy_dev) + return -EINVAL; + + return phy_mii_ioctl(pd->phy_dev, ifr, cmd); +} + +static int smsc9420_ethtool_get_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + + if (!pd->phy_dev) + return -ENODEV; + + cmd->maxtxpkt = 1; + cmd->maxrxpkt = 1; + return phy_ethtool_gset(pd->phy_dev, cmd); +} + +static int smsc9420_ethtool_set_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + + if (!pd->phy_dev) + return -ENODEV; + + return phy_ethtool_sset(pd->phy_dev, cmd); +} + +static void smsc9420_ethtool_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct smsc9420_pdata *pd = netdev_priv(netdev); + + strcpy(drvinfo->driver, DRV_NAME); + strcpy(drvinfo->bus_info, pci_name(pd->pdev)); + strcpy(drvinfo->version, DRV_VERSION); +} + +static u32 smsc9420_ethtool_get_msglevel(struct net_device *netdev) +{ + struct smsc9420_pdata *pd = netdev_priv(netdev); + return pd->msg_enable; +} + +static void smsc9420_ethtool_set_msglevel(struct net_device *netdev, u32 data) +{ + struct smsc9420_pdata *pd = netdev_priv(netdev); + pd->msg_enable = data; +} + +static int smsc9420_ethtool_nway_reset(struct net_device *netdev) +{ + struct smsc9420_pdata *pd = netdev_priv(netdev); + + if (!pd->phy_dev) + return -ENODEV; + + return phy_start_aneg(pd->phy_dev); +} + +static int smsc9420_ethtool_getregslen(struct net_device *dev) +{ + /* all smsc9420 registers plus all phy registers */ + return 0x100 + (32 * sizeof(u32)); +} + +static void +smsc9420_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs, + void *buf) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + struct phy_device *phy_dev = pd->phy_dev; + unsigned int i, j = 0; + u32 *data = buf; + + regs->version = smsc9420_reg_read(pd, ID_REV); + for (i = 0; i < 0x100; i += (sizeof(u32))) + data[j++] = smsc9420_reg_read(pd, i); + + // cannot read phy registers if the net device is down + if (!phy_dev) + return; + + for (i = 0; i <= 31; i++) + data[j++] = smsc9420_mii_read(phy_dev->bus, phy_dev->addr, i); +} + +static void smsc9420_eeprom_enable_access(struct smsc9420_pdata *pd) +{ + unsigned int temp = smsc9420_reg_read(pd, GPIO_CFG); + temp &= ~GPIO_CFG_EEPR_EN_; + smsc9420_reg_write(pd, GPIO_CFG, temp); + msleep(1); +} + +static int smsc9420_eeprom_send_cmd(struct smsc9420_pdata *pd, u32 op) +{ + int timeout = 100; + u32 e2cmd; + + smsc_dbg(HW, "op 0x%08x", op); + if (smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_) { + smsc_warn(HW, "Busy at start"); + return -EBUSY; + } + + e2cmd = op | E2P_CMD_EPC_BUSY_; + smsc9420_reg_write(pd, E2P_CMD, e2cmd); + + do { + msleep(1); + e2cmd = smsc9420_reg_read(pd, E2P_CMD); + } while ((e2cmd & E2P_CMD_EPC_BUSY_) && (--timeout)); + + if (!timeout) { + smsc_info(HW, "TIMED OUT"); + return -EAGAIN; + } + + if (e2cmd & E2P_CMD_EPC_TIMEOUT_) { + smsc_info(HW, "Error occurred during eeprom operation"); + return -EINVAL; + } + + return 0; +} + +static int smsc9420_eeprom_read_location(struct smsc9420_pdata *pd, + u8 address, u8 *data) +{ + u32 op = E2P_CMD_EPC_CMD_READ_ | address; + int ret; + + smsc_dbg(HW, "address 0x%x", address); + ret = smsc9420_eeprom_send_cmd(pd, op); + + if (!ret) + data[address] = smsc9420_reg_read(pd, E2P_DATA); + + return ret; +} + +static int smsc9420_eeprom_write_location(struct smsc9420_pdata *pd, + u8 address, u8 data) +{ + u32 op = E2P_CMD_EPC_CMD_ERASE_ | address; + int ret; + + smsc_dbg(HW, "address 0x%x, data 0x%x", address, data); + ret = smsc9420_eeprom_send_cmd(pd, op); + + if (!ret) { + op = E2P_CMD_EPC_CMD_WRITE_ | address; + smsc9420_reg_write(pd, E2P_DATA, (u32)data); + ret = smsc9420_eeprom_send_cmd(pd, op); + } + + return ret; +} + +static int smsc9420_ethtool_get_eeprom_len(struct net_device *dev) +{ + return SMSC9420_EEPROM_SIZE; +} + +static int smsc9420_ethtool_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + u8 eeprom_data[SMSC9420_EEPROM_SIZE]; + int len, i; + + smsc9420_eeprom_enable_access(pd); + + len = min(eeprom->len, SMSC9420_EEPROM_SIZE); + for (i = 0; i < len; i++) { + int ret = smsc9420_eeprom_read_location(pd, i, eeprom_data); + if (ret < 0) { + eeprom->len = 0; + return ret; + } + } + + memcpy(data, &eeprom_data[eeprom->offset], len); + eeprom->magic = SMSC9420_EEPROM_MAGIC; + eeprom->len = len; + return 0; +} + +static int smsc9420_ethtool_set_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + int ret; + + if (eeprom->magic != SMSC9420_EEPROM_MAGIC) + return -EINVAL; + + smsc9420_eeprom_enable_access(pd); + smsc9420_eeprom_send_cmd(pd, E2P_CMD_EPC_CMD_EWEN_); + ret = smsc9420_eeprom_write_location(pd, eeprom->offset, *data); + smsc9420_eeprom_send_cmd(pd, E2P_CMD_EPC_CMD_EWDS_); + + /* Single byte write, according to man page */ + eeprom->len = 1; + + return ret; +} + +static const struct ethtool_ops smsc9420_ethtool_ops = { + .get_settings = smsc9420_ethtool_get_settings, + .set_settings = smsc9420_ethtool_set_settings, + .get_drvinfo = smsc9420_ethtool_get_drvinfo, + .get_msglevel = smsc9420_ethtool_get_msglevel, + .set_msglevel = smsc9420_ethtool_set_msglevel, + .nway_reset = smsc9420_ethtool_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = smsc9420_ethtool_get_eeprom_len, + .get_eeprom = smsc9420_ethtool_get_eeprom, + .set_eeprom = smsc9420_ethtool_set_eeprom, + .get_regs_len = smsc9420_ethtool_getregslen, + .get_regs = smsc9420_ethtool_getregs, +}; + +/* Sets the device MAC address to dev_addr */ +static void smsc9420_set_mac_address(struct net_device *dev) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + u8 *dev_addr = dev->dev_addr; + u32 mac_high16 = (dev_addr[5] << 8) | dev_addr[4]; + u32 mac_low32 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | + (dev_addr[1] << 8) | dev_addr[0]; + + smsc9420_reg_write(pd, ADDRH, mac_high16); + smsc9420_reg_write(pd, ADDRL, mac_low32); +} + +static void smsc9420_check_mac_address(struct net_device *dev) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + + /* Check if mac address has been specified when bringing interface up */ + if (is_valid_ether_addr(dev->dev_addr)) { + smsc9420_set_mac_address(dev); + smsc_dbg(PROBE, "MAC Address is specified by configuration"); + } else { + /* Try reading mac address from device. if EEPROM is present + * it will already have been set */ + u32 mac_high16 = smsc9420_reg_read(pd, ADDRH); + u32 mac_low32 = smsc9420_reg_read(pd, ADDRL); + dev->dev_addr[0] = (u8)(mac_low32); + dev->dev_addr[1] = (u8)(mac_low32 >> 8); + dev->dev_addr[2] = (u8)(mac_low32 >> 16); + dev->dev_addr[3] = (u8)(mac_low32 >> 24); + dev->dev_addr[4] = (u8)(mac_high16); + dev->dev_addr[5] = (u8)(mac_high16 >> 8); + + if (is_valid_ether_addr(dev->dev_addr)) { + /* eeprom values are valid so use them */ + smsc_dbg(PROBE, "Mac Address is read from EEPROM"); + } else { + /* eeprom values are invalid, generate random MAC */ + random_ether_addr(dev->dev_addr); + smsc9420_set_mac_address(dev); + smsc_dbg(PROBE, + "MAC Address is set to random_ether_addr"); + } + } +} + +static void smsc9420_stop_tx(struct smsc9420_pdata *pd) +{ + u32 dmac_control, mac_cr, dma_intr_ena; + int timeout = 1000; + + /* disable TX DMAC */ + dmac_control = smsc9420_reg_read(pd, DMAC_CONTROL); + dmac_control &= (~DMAC_CONTROL_ST_); + smsc9420_reg_write(pd, DMAC_CONTROL, dmac_control); + + /* Wait max 10ms for transmit process to stop */ + while (--timeout) { + if (smsc9420_reg_read(pd, DMAC_STATUS) & DMAC_STS_TS_) + break; + udelay(10); + } + + if (!timeout) + smsc_warn(IFDOWN, "TX DMAC failed to stop"); + + /* ACK Tx DMAC stop bit */ + smsc9420_reg_write(pd, DMAC_STATUS, DMAC_STS_TXPS_); + + /* mask TX DMAC interrupts */ + dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); + dma_intr_ena &= ~(DMAC_INTR_ENA_TX_); + smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena); + smsc9420_pci_flush_write(pd); + + /* stop MAC TX */ + mac_cr = smsc9420_reg_read(pd, MAC_CR) & (~MAC_CR_TXEN_); + smsc9420_reg_write(pd, MAC_CR, mac_cr); + smsc9420_pci_flush_write(pd); +} + +static void smsc9420_free_tx_ring(struct smsc9420_pdata *pd) +{ + int i; + + BUG_ON(!pd->tx_ring); + + if (!pd->tx_buffers) + return; + + for (i = 0; i < TX_RING_SIZE; i++) { + struct sk_buff *skb = pd->tx_buffers[i].skb; + + if (skb) { + BUG_ON(!pd->tx_buffers[i].mapping); + pci_unmap_single(pd->pdev, pd->tx_buffers[i].mapping, + skb->len, PCI_DMA_TODEVICE); + dev_kfree_skb_any(skb); + } + + pd->tx_ring[i].status = 0; + pd->tx_ring[i].length = 0; + pd->tx_ring[i].buffer1 = 0; + pd->tx_ring[i].buffer2 = 0; + } + wmb(); + + kfree(pd->tx_buffers); + pd->tx_buffers = NULL; + + pd->tx_ring_head = 0; + pd->tx_ring_tail = 0; +} + +static void smsc9420_free_rx_ring(struct smsc9420_pdata *pd) +{ + int i; + + BUG_ON(!pd->rx_ring); + + if (!pd->rx_buffers) + return; + + for (i = 0; i < RX_RING_SIZE; i++) { + if (pd->rx_buffers[i].skb) + dev_kfree_skb_any(pd->rx_buffers[i].skb); + + if (pd->rx_buffers[i].mapping) + pci_unmap_single(pd->pdev, pd->rx_buffers[i].mapping, + PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + + pd->rx_ring[i].status = 0; + pd->rx_ring[i].length = 0; + pd->rx_ring[i].buffer1 = 0; + pd->rx_ring[i].buffer2 = 0; + } + wmb(); + + kfree(pd->rx_buffers); + pd->rx_buffers = NULL; + + pd->rx_ring_head = 0; + pd->rx_ring_tail = 0; +} + +static void smsc9420_stop_rx(struct smsc9420_pdata *pd) +{ + int timeout = 1000; + u32 mac_cr, dmac_control, dma_intr_ena; + + /* mask RX DMAC interrupts */ + dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); + dma_intr_ena &= (~DMAC_INTR_ENA_RX_); + smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena); + smsc9420_pci_flush_write(pd); + + /* stop RX MAC prior to stoping DMA */ + mac_cr = smsc9420_reg_read(pd, MAC_CR) & (~MAC_CR_RXEN_); + smsc9420_reg_write(pd, MAC_CR, mac_cr); + smsc9420_pci_flush_write(pd); + + /* stop RX DMAC */ + dmac_control = smsc9420_reg_read(pd, DMAC_CONTROL); + dmac_control &= (~DMAC_CONTROL_SR_); + smsc9420_reg_write(pd, DMAC_CONTROL, dmac_control); + smsc9420_pci_flush_write(pd); + + /* wait up to 10ms for receive to stop */ + while (--timeout) { + if (smsc9420_reg_read(pd, DMAC_STATUS) & DMAC_STS_RS_) + break; + udelay(10); + } + + if (!timeout) + smsc_warn(IFDOWN, "RX DMAC did not stop! timeout."); + + /* ACK the Rx DMAC stop bit */ + smsc9420_reg_write(pd, DMAC_STATUS, DMAC_STS_RXPS_); +} + +static irqreturn_t smsc9420_isr(int irq, void *dev_id) +{ + struct smsc9420_pdata *pd = dev_id; + u32 int_cfg, int_sts, int_ctl; + irqreturn_t ret = IRQ_NONE; + ulong flags; + + BUG_ON(!pd); + BUG_ON(!pd->base_addr); + + int_cfg = smsc9420_reg_read(pd, INT_CFG); + + /* check if it's our interrupt */ + if ((int_cfg & (INT_CFG_IRQ_EN_ | INT_CFG_IRQ_INT_)) != + (INT_CFG_IRQ_EN_ | INT_CFG_IRQ_INT_)) + return IRQ_NONE; + + int_sts = smsc9420_reg_read(pd, INT_STAT); + + if (likely(INT_STAT_DMAC_INT_ & int_sts)) { + u32 status = smsc9420_reg_read(pd, DMAC_STATUS); + u32 ints_to_clear = 0; + + if (status & DMAC_STS_TX_) { + ints_to_clear |= (DMAC_STS_TX_ | DMAC_STS_NIS_); + netif_wake_queue(pd->dev); + } + + if (status & DMAC_STS_RX_) { + /* mask RX DMAC interrupts */ + u32 dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); + dma_intr_ena &= (~DMAC_INTR_ENA_RX_); + smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena); + smsc9420_pci_flush_write(pd); + + ints_to_clear |= (DMAC_STS_RX_ | DMAC_STS_NIS_); + napi_schedule(&pd->napi); + } + + if (ints_to_clear) + smsc9420_reg_write(pd, DMAC_STATUS, ints_to_clear); + + ret = IRQ_HANDLED; + } + + if (unlikely(INT_STAT_SW_INT_ & int_sts)) { + /* mask software interrupt */ + spin_lock_irqsave(&pd->int_lock, flags); + int_ctl = smsc9420_reg_read(pd, INT_CTL); + int_ctl &= (~INT_CTL_SW_INT_EN_); + smsc9420_reg_write(pd, INT_CTL, int_ctl); + spin_unlock_irqrestore(&pd->int_lock, flags); + + smsc9420_reg_write(pd, INT_STAT, INT_STAT_SW_INT_); + pd->software_irq_signal = true; + smp_wmb(); + + ret = IRQ_HANDLED; + } + + /* to ensure PCI write completion, we must perform a PCI read */ + smsc9420_pci_flush_write(pd); + + return ret; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void smsc9420_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + smsc9420_isr(0, dev); + enable_irq(dev->irq); +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + +static void smsc9420_dmac_soft_reset(struct smsc9420_pdata *pd) +{ + smsc9420_reg_write(pd, BUS_MODE, BUS_MODE_SWR_); + smsc9420_reg_read(pd, BUS_MODE); + udelay(2); + if (smsc9420_reg_read(pd, BUS_MODE) & BUS_MODE_SWR_) + smsc_warn(DRV, "Software reset not cleared"); +} + +static int smsc9420_stop(struct net_device *dev) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + u32 int_cfg; + ulong flags; + + BUG_ON(!pd); + BUG_ON(!pd->phy_dev); + + /* disable master interrupt */ + spin_lock_irqsave(&pd->int_lock, flags); + int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_); + smsc9420_reg_write(pd, INT_CFG, int_cfg); + spin_unlock_irqrestore(&pd->int_lock, flags); + + netif_tx_disable(dev); + napi_disable(&pd->napi); + + smsc9420_stop_tx(pd); + smsc9420_free_tx_ring(pd); + + smsc9420_stop_rx(pd); + smsc9420_free_rx_ring(pd); + + free_irq(dev->irq, pd); + + smsc9420_dmac_soft_reset(pd); + + phy_stop(pd->phy_dev); + + phy_disconnect(pd->phy_dev); + pd->phy_dev = NULL; + mdiobus_unregister(pd->mii_bus); + mdiobus_free(pd->mii_bus); + + return 0; +} + +static void smsc9420_rx_count_stats(struct net_device *dev, u32 desc_status) +{ + if (unlikely(desc_status & RDES0_ERROR_SUMMARY_)) { + dev->stats.rx_errors++; + if (desc_status & RDES0_DESCRIPTOR_ERROR_) + dev->stats.rx_over_errors++; + else if (desc_status & (RDES0_FRAME_TOO_LONG_ | + RDES0_RUNT_FRAME_ | RDES0_COLLISION_SEEN_)) + dev->stats.rx_frame_errors++; + else if (desc_status & RDES0_CRC_ERROR_) + dev->stats.rx_crc_errors++; + } + + if (unlikely(desc_status & RDES0_LENGTH_ERROR_)) + dev->stats.rx_length_errors++; + + if (unlikely(!((desc_status & RDES0_LAST_DESCRIPTOR_) && + (desc_status & RDES0_FIRST_DESCRIPTOR_)))) + dev->stats.rx_length_errors++; + + if (desc_status & RDES0_MULTICAST_FRAME_) + dev->stats.multicast++; +} + +static void smsc9420_rx_handoff(struct smsc9420_pdata *pd, const int index, + const u32 status) +{ + struct net_device *dev = pd->dev; + struct sk_buff *skb; + u16 packet_length = (status & RDES0_FRAME_LENGTH_MASK_) + >> RDES0_FRAME_LENGTH_SHFT_; + + /* remove crc from packet lendth */ + packet_length -= 4; + + if (pd->rx_csum) + packet_length -= 2; + + dev->stats.rx_packets++; + dev->stats.rx_bytes += packet_length; + + pci_unmap_single(pd->pdev, pd->rx_buffers[index].mapping, + PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + pd->rx_buffers[index].mapping = 0; + + skb = pd->rx_buffers[index].skb; + pd->rx_buffers[index].skb = NULL; + + if (pd->rx_csum) { + u16 hw_csum = get_unaligned_le16(skb_tail_pointer(skb) + + NET_IP_ALIGN + packet_length + 4); + put_unaligned_le16(hw_csum, &skb->csum); + skb->ip_summed = CHECKSUM_COMPLETE; + } + + skb_reserve(skb, NET_IP_ALIGN); + skb_put(skb, packet_length); + + skb->protocol = eth_type_trans(skb, dev); + + netif_receive_skb(skb); +} + +static int smsc9420_alloc_rx_buffer(struct smsc9420_pdata *pd, int index) +{ + struct sk_buff *skb = netdev_alloc_skb(pd->dev, PKT_BUF_SZ); + dma_addr_t mapping; + + BUG_ON(pd->rx_buffers[index].skb); + BUG_ON(pd->rx_buffers[index].mapping); + + if (unlikely(!skb)) { + smsc_warn(RX_ERR, "Failed to allocate new skb!"); + return -ENOMEM; + } + + skb->dev = pd->dev; + + mapping = pci_map_single(pd->pdev, skb_tail_pointer(skb), + PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(pd->pdev, mapping)) { + dev_kfree_skb_any(skb); + smsc_warn(RX_ERR, "pci_map_single failed!"); + return -ENOMEM; + } + + pd->rx_buffers[index].skb = skb; + pd->rx_buffers[index].mapping = mapping; + pd->rx_ring[index].buffer1 = mapping + NET_IP_ALIGN; + pd->rx_ring[index].status = RDES0_OWN_; + wmb(); + + return 0; +} + +static void smsc9420_alloc_new_rx_buffers(struct smsc9420_pdata *pd) +{ + while (pd->rx_ring_tail != pd->rx_ring_head) { + if (smsc9420_alloc_rx_buffer(pd, pd->rx_ring_tail)) + break; + + pd->rx_ring_tail = (pd->rx_ring_tail + 1) % RX_RING_SIZE; + } +} + +static int smsc9420_rx_poll(struct napi_struct *napi, int budget) +{ + struct smsc9420_pdata *pd = + container_of(napi, struct smsc9420_pdata, napi); + struct net_device *dev = pd->dev; + u32 drop_frame_cnt, dma_intr_ena, status; + int work_done; + + for (work_done = 0; work_done < budget; work_done++) { + rmb(); + status = pd->rx_ring[pd->rx_ring_head].status; + + /* stop if DMAC owns this dma descriptor */ + if (status & RDES0_OWN_) + break; + + smsc9420_rx_count_stats(dev, status); + smsc9420_rx_handoff(pd, pd->rx_ring_head, status); + pd->rx_ring_head = (pd->rx_ring_head + 1) % RX_RING_SIZE; + smsc9420_alloc_new_rx_buffers(pd); + } + + drop_frame_cnt = smsc9420_reg_read(pd, MISS_FRAME_CNTR); + dev->stats.rx_dropped += + (drop_frame_cnt & 0xFFFF) + ((drop_frame_cnt >> 17) & 0x3FF); + + /* Kick RXDMA */ + smsc9420_reg_write(pd, RX_POLL_DEMAND, 1); + smsc9420_pci_flush_write(pd); + + if (work_done < budget) { + napi_complete(&pd->napi); + + /* re-enable RX DMA interrupts */ + dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); + dma_intr_ena |= (DMAC_INTR_ENA_RX_ | DMAC_INTR_ENA_NIS_); + smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena); + smsc9420_pci_flush_write(pd); + } + return work_done; +} + +static void +smsc9420_tx_update_stats(struct net_device *dev, u32 status, u32 length) +{ + if (unlikely(status & TDES0_ERROR_SUMMARY_)) { + dev->stats.tx_errors++; + if (status & (TDES0_EXCESSIVE_DEFERRAL_ | + TDES0_EXCESSIVE_COLLISIONS_)) + dev->stats.tx_aborted_errors++; + + if (status & (TDES0_LOSS_OF_CARRIER_ | TDES0_NO_CARRIER_)) + dev->stats.tx_carrier_errors++; + } else { + dev->stats.tx_packets++; + dev->stats.tx_bytes += (length & 0x7FF); + } + + if (unlikely(status & TDES0_EXCESSIVE_COLLISIONS_)) { + dev->stats.collisions += 16; + } else { + dev->stats.collisions += + (status & TDES0_COLLISION_COUNT_MASK_) >> + TDES0_COLLISION_COUNT_SHFT_; + } + + if (unlikely(status & TDES0_HEARTBEAT_FAIL_)) + dev->stats.tx_heartbeat_errors++; +} + +/* Check for completed dma transfers, update stats and free skbs */ +static void smsc9420_complete_tx(struct net_device *dev) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + + while (pd->tx_ring_tail != pd->tx_ring_head) { + int index = pd->tx_ring_tail; + u32 status, length; + + rmb(); + status = pd->tx_ring[index].status; + length = pd->tx_ring[index].length; + + /* Check if DMA still owns this descriptor */ + if (unlikely(TDES0_OWN_ & status)) + break; + + smsc9420_tx_update_stats(dev, status, length); + + BUG_ON(!pd->tx_buffers[index].skb); + BUG_ON(!pd->tx_buffers[index].mapping); + + pci_unmap_single(pd->pdev, pd->tx_buffers[index].mapping, + pd->tx_buffers[index].skb->len, PCI_DMA_TODEVICE); + pd->tx_buffers[index].mapping = 0; + + dev_kfree_skb_any(pd->tx_buffers[index].skb); + pd->tx_buffers[index].skb = NULL; + + pd->tx_ring[index].buffer1 = 0; + wmb(); + + pd->tx_ring_tail = (pd->tx_ring_tail + 1) % TX_RING_SIZE; + } +} + +static netdev_tx_t smsc9420_hard_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + dma_addr_t mapping; + int index = pd->tx_ring_head; + u32 tmp_desc1; + bool about_to_take_last_desc = + (((pd->tx_ring_head + 2) % TX_RING_SIZE) == pd->tx_ring_tail); + + smsc9420_complete_tx(dev); + + rmb(); + BUG_ON(pd->tx_ring[index].status & TDES0_OWN_); + BUG_ON(pd->tx_buffers[index].skb); + BUG_ON(pd->tx_buffers[index].mapping); + + mapping = pci_map_single(pd->pdev, skb->data, + skb->len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(pd->pdev, mapping)) { + smsc_warn(TX_ERR, "pci_map_single failed, dropping packet"); + return NETDEV_TX_BUSY; + } + + pd->tx_buffers[index].skb = skb; + pd->tx_buffers[index].mapping = mapping; + + tmp_desc1 = (TDES1_LS_ | ((u32)skb->len & 0x7FF)); + if (unlikely(about_to_take_last_desc)) { + tmp_desc1 |= TDES1_IC_; + netif_stop_queue(pd->dev); + } + + /* check if we are at the last descriptor and need to set EOR */ + if (unlikely(index == (TX_RING_SIZE - 1))) + tmp_desc1 |= TDES1_TER_; + + pd->tx_ring[index].buffer1 = mapping; + pd->tx_ring[index].length = tmp_desc1; + wmb(); + + /* increment head */ + pd->tx_ring_head = (pd->tx_ring_head + 1) % TX_RING_SIZE; + + /* assign ownership to DMAC */ + pd->tx_ring[index].status = TDES0_OWN_; + wmb(); + + skb_tx_timestamp(skb); + + /* kick the DMA */ + smsc9420_reg_write(pd, TX_POLL_DEMAND, 1); + smsc9420_pci_flush_write(pd); + + return NETDEV_TX_OK; +} + +static struct net_device_stats *smsc9420_get_stats(struct net_device *dev) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + u32 counter = smsc9420_reg_read(pd, MISS_FRAME_CNTR); + dev->stats.rx_dropped += + (counter & 0x0000FFFF) + ((counter >> 17) & 0x000003FF); + return &dev->stats; +} + +static void smsc9420_set_multicast_list(struct net_device *dev) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + u32 mac_cr = smsc9420_reg_read(pd, MAC_CR); + + if (dev->flags & IFF_PROMISC) { + smsc_dbg(HW, "Promiscuous Mode Enabled"); + mac_cr |= MAC_CR_PRMS_; + mac_cr &= (~MAC_CR_MCPAS_); + mac_cr &= (~MAC_CR_HPFILT_); + } else if (dev->flags & IFF_ALLMULTI) { + smsc_dbg(HW, "Receive all Multicast Enabled"); + mac_cr &= (~MAC_CR_PRMS_); + mac_cr |= MAC_CR_MCPAS_; + mac_cr &= (~MAC_CR_HPFILT_); + } else if (!netdev_mc_empty(dev)) { + struct netdev_hw_addr *ha; + u32 hash_lo = 0, hash_hi = 0; + + smsc_dbg(HW, "Multicast filter enabled"); + netdev_for_each_mc_addr(ha, dev) { + u32 bit_num = smsc9420_hash(ha->addr); + u32 mask = 1 << (bit_num & 0x1F); + + if (bit_num & 0x20) + hash_hi |= mask; + else + hash_lo |= mask; + + } + smsc9420_reg_write(pd, HASHH, hash_hi); + smsc9420_reg_write(pd, HASHL, hash_lo); + + mac_cr &= (~MAC_CR_PRMS_); + mac_cr &= (~MAC_CR_MCPAS_); + mac_cr |= MAC_CR_HPFILT_; + } else { + smsc_dbg(HW, "Receive own packets only."); + smsc9420_reg_write(pd, HASHH, 0); + smsc9420_reg_write(pd, HASHL, 0); + + mac_cr &= (~MAC_CR_PRMS_); + mac_cr &= (~MAC_CR_MCPAS_); + mac_cr &= (~MAC_CR_HPFILT_); + } + + smsc9420_reg_write(pd, MAC_CR, mac_cr); + smsc9420_pci_flush_write(pd); +} + +static void smsc9420_phy_update_flowcontrol(struct smsc9420_pdata *pd) +{ + struct phy_device *phy_dev = pd->phy_dev; + u32 flow; + + if (phy_dev->duplex == DUPLEX_FULL) { + u16 lcladv = phy_read(phy_dev, MII_ADVERTISE); + u16 rmtadv = phy_read(phy_dev, MII_LPA); + u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); + + if (cap & FLOW_CTRL_RX) + flow = 0xFFFF0002; + else + flow = 0; + + smsc_info(LINK, "rx pause %s, tx pause %s", + (cap & FLOW_CTRL_RX ? "enabled" : "disabled"), + (cap & FLOW_CTRL_TX ? "enabled" : "disabled")); + } else { + smsc_info(LINK, "half duplex"); + flow = 0; + } + + smsc9420_reg_write(pd, FLOW, flow); +} + +/* Update link mode if anything has changed. Called periodically when the + * PHY is in polling mode, even if nothing has changed. */ +static void smsc9420_phy_adjust_link(struct net_device *dev) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + struct phy_device *phy_dev = pd->phy_dev; + int carrier; + + if (phy_dev->duplex != pd->last_duplex) { + u32 mac_cr = smsc9420_reg_read(pd, MAC_CR); + if (phy_dev->duplex) { + smsc_dbg(LINK, "full duplex mode"); + mac_cr |= MAC_CR_FDPX_; + } else { + smsc_dbg(LINK, "half duplex mode"); + mac_cr &= ~MAC_CR_FDPX_; + } + smsc9420_reg_write(pd, MAC_CR, mac_cr); + + smsc9420_phy_update_flowcontrol(pd); + pd->last_duplex = phy_dev->duplex; + } + + carrier = netif_carrier_ok(dev); + if (carrier != pd->last_carrier) { + if (carrier) + smsc_dbg(LINK, "carrier OK"); + else + smsc_dbg(LINK, "no carrier"); + pd->last_carrier = carrier; + } +} + +static int smsc9420_mii_probe(struct net_device *dev) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + struct phy_device *phydev = NULL; + + BUG_ON(pd->phy_dev); + + /* Device only supports internal PHY at address 1 */ + if (!pd->mii_bus->phy_map[1]) { + pr_err("%s: no PHY found at address 1\n", dev->name); + return -ENODEV; + } + + phydev = pd->mii_bus->phy_map[1]; + smsc_info(PROBE, "PHY addr %d, phy_id 0x%08X", phydev->addr, + phydev->phy_id); + + phydev = phy_connect(dev, dev_name(&phydev->dev), + smsc9420_phy_adjust_link, 0, PHY_INTERFACE_MODE_MII); + + if (IS_ERR(phydev)) { + pr_err("%s: Could not attach to PHY\n", dev->name); + return PTR_ERR(phydev); + } + + pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", + dev->name, phydev->drv->name, dev_name(&phydev->dev), phydev->irq); + + /* mask with MAC supported features */ + phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + phydev->advertising = phydev->supported; + + pd->phy_dev = phydev; + pd->last_duplex = -1; + pd->last_carrier = -1; + + return 0; +} + +static int smsc9420_mii_init(struct net_device *dev) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + int err = -ENXIO, i; + + pd->mii_bus = mdiobus_alloc(); + if (!pd->mii_bus) { + err = -ENOMEM; + goto err_out_1; + } + pd->mii_bus->name = DRV_MDIONAME; + snprintf(pd->mii_bus->id, MII_BUS_ID_SIZE, "%x", + (pd->pdev->bus->number << 8) | pd->pdev->devfn); + pd->mii_bus->priv = pd; + pd->mii_bus->read = smsc9420_mii_read; + pd->mii_bus->write = smsc9420_mii_write; + pd->mii_bus->irq = pd->phy_irq; + for (i = 0; i < PHY_MAX_ADDR; ++i) + pd->mii_bus->irq[i] = PHY_POLL; + + /* Mask all PHYs except ID 1 (internal) */ + pd->mii_bus->phy_mask = ~(1 << 1); + + if (mdiobus_register(pd->mii_bus)) { + smsc_warn(PROBE, "Error registering mii bus"); + goto err_out_free_bus_2; + } + + if (smsc9420_mii_probe(dev) < 0) { + smsc_warn(PROBE, "Error probing mii bus"); + goto err_out_unregister_bus_3; + } + + return 0; + +err_out_unregister_bus_3: + mdiobus_unregister(pd->mii_bus); +err_out_free_bus_2: + mdiobus_free(pd->mii_bus); +err_out_1: + return err; +} + +static int smsc9420_alloc_tx_ring(struct smsc9420_pdata *pd) +{ + int i; + + BUG_ON(!pd->tx_ring); + + pd->tx_buffers = kmalloc((sizeof(struct smsc9420_ring_info) * + TX_RING_SIZE), GFP_KERNEL); + if (!pd->tx_buffers) { + smsc_warn(IFUP, "Failed to allocated tx_buffers"); + return -ENOMEM; + } + + /* Initialize the TX Ring */ + for (i = 0; i < TX_RING_SIZE; i++) { + pd->tx_buffers[i].skb = NULL; + pd->tx_buffers[i].mapping = 0; + pd->tx_ring[i].status = 0; + pd->tx_ring[i].length = 0; + pd->tx_ring[i].buffer1 = 0; + pd->tx_ring[i].buffer2 = 0; + } + pd->tx_ring[TX_RING_SIZE - 1].length = TDES1_TER_; + wmb(); + + pd->tx_ring_head = 0; + pd->tx_ring_tail = 0; + + smsc9420_reg_write(pd, TX_BASE_ADDR, pd->tx_dma_addr); + smsc9420_pci_flush_write(pd); + + return 0; +} + +static int smsc9420_alloc_rx_ring(struct smsc9420_pdata *pd) +{ + int i; + + BUG_ON(!pd->rx_ring); + + pd->rx_buffers = kmalloc((sizeof(struct smsc9420_ring_info) * + RX_RING_SIZE), GFP_KERNEL); + if (pd->rx_buffers == NULL) { + smsc_warn(IFUP, "Failed to allocated rx_buffers"); + goto out; + } + + /* initialize the rx ring */ + for (i = 0; i < RX_RING_SIZE; i++) { + pd->rx_ring[i].status = 0; + pd->rx_ring[i].length = PKT_BUF_SZ; + pd->rx_ring[i].buffer2 = 0; + pd->rx_buffers[i].skb = NULL; + pd->rx_buffers[i].mapping = 0; + } + pd->rx_ring[RX_RING_SIZE - 1].length = (PKT_BUF_SZ | RDES1_RER_); + + /* now allocate the entire ring of skbs */ + for (i = 0; i < RX_RING_SIZE; i++) { + if (smsc9420_alloc_rx_buffer(pd, i)) { + smsc_warn(IFUP, "failed to allocate rx skb %d", i); + goto out_free_rx_skbs; + } + } + + pd->rx_ring_head = 0; + pd->rx_ring_tail = 0; + + smsc9420_reg_write(pd, VLAN1, ETH_P_8021Q); + smsc_dbg(IFUP, "VLAN1 = 0x%08x", smsc9420_reg_read(pd, VLAN1)); + + if (pd->rx_csum) { + /* Enable RX COE */ + u32 coe = smsc9420_reg_read(pd, COE_CR) | RX_COE_EN; + smsc9420_reg_write(pd, COE_CR, coe); + smsc_dbg(IFUP, "COE_CR = 0x%08x", coe); + } + + smsc9420_reg_write(pd, RX_BASE_ADDR, pd->rx_dma_addr); + smsc9420_pci_flush_write(pd); + + return 0; + +out_free_rx_skbs: + smsc9420_free_rx_ring(pd); +out: + return -ENOMEM; +} + +static int smsc9420_open(struct net_device *dev) +{ + struct smsc9420_pdata *pd; + u32 bus_mode, mac_cr, dmac_control, int_cfg, dma_intr_ena, int_ctl; + unsigned long flags; + int result = 0, timeout; + + BUG_ON(!dev); + pd = netdev_priv(dev); + BUG_ON(!pd); + + if (!is_valid_ether_addr(dev->dev_addr)) { + smsc_warn(IFUP, "dev_addr is not a valid MAC address"); + result = -EADDRNOTAVAIL; + goto out_0; + } + + netif_carrier_off(dev); + + /* disable, mask and acknowledge all interrupts */ + spin_lock_irqsave(&pd->int_lock, flags); + int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_); + smsc9420_reg_write(pd, INT_CFG, int_cfg); + smsc9420_reg_write(pd, INT_CTL, 0); + spin_unlock_irqrestore(&pd->int_lock, flags); + smsc9420_reg_write(pd, DMAC_INTR_ENA, 0); + smsc9420_reg_write(pd, INT_STAT, 0xFFFFFFFF); + smsc9420_pci_flush_write(pd); + + if (request_irq(dev->irq, smsc9420_isr, IRQF_SHARED | IRQF_DISABLED, + DRV_NAME, pd)) { + smsc_warn(IFUP, "Unable to use IRQ = %d", dev->irq); + result = -ENODEV; + goto out_0; + } + + smsc9420_dmac_soft_reset(pd); + + /* make sure MAC_CR is sane */ + smsc9420_reg_write(pd, MAC_CR, 0); + + smsc9420_set_mac_address(dev); + + /* Configure GPIO pins to drive LEDs */ + smsc9420_reg_write(pd, GPIO_CFG, + (GPIO_CFG_LED_3_ | GPIO_CFG_LED_2_ | GPIO_CFG_LED_1_)); + + bus_mode = BUS_MODE_DMA_BURST_LENGTH_16; + +#ifdef __BIG_ENDIAN + bus_mode |= BUS_MODE_DBO_; +#endif + + smsc9420_reg_write(pd, BUS_MODE, bus_mode); + + smsc9420_pci_flush_write(pd); + + /* set bus master bridge arbitration priority for Rx and TX DMA */ + smsc9420_reg_write(pd, BUS_CFG, BUS_CFG_RXTXWEIGHT_4_1); + + smsc9420_reg_write(pd, DMAC_CONTROL, + (DMAC_CONTROL_SF_ | DMAC_CONTROL_OSF_)); + + smsc9420_pci_flush_write(pd); + + /* test the IRQ connection to the ISR */ + smsc_dbg(IFUP, "Testing ISR using IRQ %d", dev->irq); + pd->software_irq_signal = false; + + spin_lock_irqsave(&pd->int_lock, flags); + /* configure interrupt deassertion timer and enable interrupts */ + int_cfg = smsc9420_reg_read(pd, INT_CFG) | INT_CFG_IRQ_EN_; + int_cfg &= ~(INT_CFG_INT_DEAS_MASK); + int_cfg |= (INT_DEAS_TIME & INT_CFG_INT_DEAS_MASK); + smsc9420_reg_write(pd, INT_CFG, int_cfg); + + /* unmask software interrupt */ + int_ctl = smsc9420_reg_read(pd, INT_CTL) | INT_CTL_SW_INT_EN_; + smsc9420_reg_write(pd, INT_CTL, int_ctl); + spin_unlock_irqrestore(&pd->int_lock, flags); + smsc9420_pci_flush_write(pd); + + timeout = 1000; + while (timeout--) { + if (pd->software_irq_signal) + break; + msleep(1); + } + + /* disable interrupts */ + spin_lock_irqsave(&pd->int_lock, flags); + int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_); + smsc9420_reg_write(pd, INT_CFG, int_cfg); + spin_unlock_irqrestore(&pd->int_lock, flags); + + if (!pd->software_irq_signal) { + smsc_warn(IFUP, "ISR failed signaling test"); + result = -ENODEV; + goto out_free_irq_1; + } + + smsc_dbg(IFUP, "ISR passed test using IRQ %d", dev->irq); + + result = smsc9420_alloc_tx_ring(pd); + if (result) { + smsc_warn(IFUP, "Failed to Initialize tx dma ring"); + result = -ENOMEM; + goto out_free_irq_1; + } + + result = smsc9420_alloc_rx_ring(pd); + if (result) { + smsc_warn(IFUP, "Failed to Initialize rx dma ring"); + result = -ENOMEM; + goto out_free_tx_ring_2; + } + + result = smsc9420_mii_init(dev); + if (result) { + smsc_warn(IFUP, "Failed to initialize Phy"); + result = -ENODEV; + goto out_free_rx_ring_3; + } + + /* Bring the PHY up */ + phy_start(pd->phy_dev); + + napi_enable(&pd->napi); + + /* start tx and rx */ + mac_cr = smsc9420_reg_read(pd, MAC_CR) | MAC_CR_TXEN_ | MAC_CR_RXEN_; + smsc9420_reg_write(pd, MAC_CR, mac_cr); + + dmac_control = smsc9420_reg_read(pd, DMAC_CONTROL); + dmac_control |= DMAC_CONTROL_ST_ | DMAC_CONTROL_SR_; + smsc9420_reg_write(pd, DMAC_CONTROL, dmac_control); + smsc9420_pci_flush_write(pd); + + dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); + dma_intr_ena |= + (DMAC_INTR_ENA_TX_ | DMAC_INTR_ENA_RX_ | DMAC_INTR_ENA_NIS_); + smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena); + smsc9420_pci_flush_write(pd); + + netif_wake_queue(dev); + + smsc9420_reg_write(pd, RX_POLL_DEMAND, 1); + + /* enable interrupts */ + spin_lock_irqsave(&pd->int_lock, flags); + int_cfg = smsc9420_reg_read(pd, INT_CFG) | INT_CFG_IRQ_EN_; + smsc9420_reg_write(pd, INT_CFG, int_cfg); + spin_unlock_irqrestore(&pd->int_lock, flags); + + return 0; + +out_free_rx_ring_3: + smsc9420_free_rx_ring(pd); +out_free_tx_ring_2: + smsc9420_free_tx_ring(pd); +out_free_irq_1: + free_irq(dev->irq, pd); +out_0: + return result; +} + +#ifdef CONFIG_PM + +static int smsc9420_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct smsc9420_pdata *pd = netdev_priv(dev); + u32 int_cfg; + ulong flags; + + /* disable interrupts */ + spin_lock_irqsave(&pd->int_lock, flags); + int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_); + smsc9420_reg_write(pd, INT_CFG, int_cfg); + spin_unlock_irqrestore(&pd->int_lock, flags); + + if (netif_running(dev)) { + netif_tx_disable(dev); + smsc9420_stop_tx(pd); + smsc9420_free_tx_ring(pd); + + napi_disable(&pd->napi); + smsc9420_stop_rx(pd); + smsc9420_free_rx_ring(pd); + + free_irq(dev->irq, pd); + + netif_device_detach(dev); + } + + pci_save_state(pdev); + pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); + pci_disable_device(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +static int smsc9420_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct smsc9420_pdata *pd = netdev_priv(dev); + int err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + err = pci_enable_device(pdev); + if (err) + return err; + + pci_set_master(pdev); + + err = pci_enable_wake(pdev, 0, 0); + if (err) + smsc_warn(IFUP, "pci_enable_wake failed: %d", err); + + if (netif_running(dev)) { + err = smsc9420_open(dev); + netif_device_attach(dev); + } + return err; +} + +#endif /* CONFIG_PM */ + +static const struct net_device_ops smsc9420_netdev_ops = { + .ndo_open = smsc9420_open, + .ndo_stop = smsc9420_stop, + .ndo_start_xmit = smsc9420_hard_start_xmit, + .ndo_get_stats = smsc9420_get_stats, + .ndo_set_multicast_list = smsc9420_set_multicast_list, + .ndo_do_ioctl = smsc9420_do_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = smsc9420_poll_controller, +#endif /* CONFIG_NET_POLL_CONTROLLER */ +}; + +static int __devinit +smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct net_device *dev; + struct smsc9420_pdata *pd; + void __iomem *virt_addr; + int result = 0; + u32 id_rev; + + printk(KERN_INFO DRV_DESCRIPTION " version " DRV_VERSION "\n"); + + /* First do the PCI initialisation */ + result = pci_enable_device(pdev); + if (unlikely(result)) { + printk(KERN_ERR "Cannot enable smsc9420\n"); + goto out_0; + } + + pci_set_master(pdev); + + dev = alloc_etherdev(sizeof(*pd)); + if (!dev) { + printk(KERN_ERR "ether device alloc failed\n"); + goto out_disable_pci_device_1; + } + + SET_NETDEV_DEV(dev, &pdev->dev); + + if (!(pci_resource_flags(pdev, SMSC_BAR) & IORESOURCE_MEM)) { + printk(KERN_ERR "Cannot find PCI device base address\n"); + goto out_free_netdev_2; + } + + if ((pci_request_regions(pdev, DRV_NAME))) { + printk(KERN_ERR "Cannot obtain PCI resources, aborting.\n"); + goto out_free_netdev_2; + } + + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { + printk(KERN_ERR "No usable DMA configuration, aborting.\n"); + goto out_free_regions_3; + } + + virt_addr = ioremap(pci_resource_start(pdev, SMSC_BAR), + pci_resource_len(pdev, SMSC_BAR)); + if (!virt_addr) { + printk(KERN_ERR "Cannot map device registers, aborting.\n"); + goto out_free_regions_3; + } + + /* registers are double mapped with 0 offset for LE and 0x200 for BE */ + virt_addr += LAN9420_CPSR_ENDIAN_OFFSET; + + dev->base_addr = (ulong)virt_addr; + + pd = netdev_priv(dev); + + /* pci descriptors are created in the PCI consistent area */ + pd->rx_ring = pci_alloc_consistent(pdev, + sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE + + sizeof(struct smsc9420_dma_desc) * TX_RING_SIZE, + &pd->rx_dma_addr); + + if (!pd->rx_ring) + goto out_free_io_4; + + /* descriptors are aligned due to the nature of pci_alloc_consistent */ + pd->tx_ring = (struct smsc9420_dma_desc *) + (pd->rx_ring + RX_RING_SIZE); + pd->tx_dma_addr = pd->rx_dma_addr + + sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE; + + pd->pdev = pdev; + pd->dev = dev; + pd->base_addr = virt_addr; + pd->msg_enable = smsc_debug; + pd->rx_csum = true; + + smsc_dbg(PROBE, "lan_base=0x%08lx", (ulong)virt_addr); + + id_rev = smsc9420_reg_read(pd, ID_REV); + switch (id_rev & 0xFFFF0000) { + case 0x94200000: + smsc_info(PROBE, "LAN9420 identified, ID_REV=0x%08X", id_rev); + break; + default: + smsc_warn(PROBE, "LAN9420 NOT identified"); + smsc_warn(PROBE, "ID_REV=0x%08X", id_rev); + goto out_free_dmadesc_5; + } + + smsc9420_dmac_soft_reset(pd); + smsc9420_eeprom_reload(pd); + smsc9420_check_mac_address(dev); + + dev->netdev_ops = &smsc9420_netdev_ops; + dev->ethtool_ops = &smsc9420_ethtool_ops; + dev->irq = pdev->irq; + + netif_napi_add(dev, &pd->napi, smsc9420_rx_poll, NAPI_WEIGHT); + + result = register_netdev(dev); + if (result) { + smsc_warn(PROBE, "error %i registering device", result); + goto out_free_dmadesc_5; + } + + pci_set_drvdata(pdev, dev); + + spin_lock_init(&pd->int_lock); + spin_lock_init(&pd->phy_lock); + + dev_info(&dev->dev, "MAC Address: %pM\n", dev->dev_addr); + + return 0; + +out_free_dmadesc_5: + pci_free_consistent(pdev, sizeof(struct smsc9420_dma_desc) * + (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr); +out_free_io_4: + iounmap(virt_addr - LAN9420_CPSR_ENDIAN_OFFSET); +out_free_regions_3: + pci_release_regions(pdev); +out_free_netdev_2: + free_netdev(dev); +out_disable_pci_device_1: + pci_disable_device(pdev); +out_0: + return -ENODEV; +} + +static void __devexit smsc9420_remove(struct pci_dev *pdev) +{ + struct net_device *dev; + struct smsc9420_pdata *pd; + + dev = pci_get_drvdata(pdev); + if (!dev) + return; + + pci_set_drvdata(pdev, NULL); + + pd = netdev_priv(dev); + unregister_netdev(dev); + + /* tx_buffers and rx_buffers are freed in stop */ + BUG_ON(pd->tx_buffers); + BUG_ON(pd->rx_buffers); + + BUG_ON(!pd->tx_ring); + BUG_ON(!pd->rx_ring); + + pci_free_consistent(pdev, sizeof(struct smsc9420_dma_desc) * + (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr); + + iounmap(pd->base_addr - LAN9420_CPSR_ENDIAN_OFFSET); + pci_release_regions(pdev); + free_netdev(dev); + pci_disable_device(pdev); +} + +static struct pci_driver smsc9420_driver = { + .name = DRV_NAME, + .id_table = smsc9420_id_table, + .probe = smsc9420_probe, + .remove = __devexit_p(smsc9420_remove), +#ifdef CONFIG_PM + .suspend = smsc9420_suspend, + .resume = smsc9420_resume, +#endif /* CONFIG_PM */ +}; + +static int __init smsc9420_init_module(void) +{ + smsc_debug = netif_msg_init(debug, SMSC_MSG_DEFAULT); + + return pci_register_driver(&smsc9420_driver); +} + +static void __exit smsc9420_exit_module(void) +{ + pci_unregister_driver(&smsc9420_driver); +} + +module_init(smsc9420_init_module); +module_exit(smsc9420_exit_module); diff --git a/drivers/net/ethernet/smsc/smsc9420.h b/drivers/net/ethernet/smsc/smsc9420.h new file mode 100644 index 0000000..e441402 --- /dev/null +++ b/drivers/net/ethernet/smsc/smsc9420.h @@ -0,0 +1,276 @@ + /*************************************************************************** + * + * Copyright (C) 2007,2008 SMSC + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + *************************************************************************** + */ + +#ifndef _SMSC9420_H +#define _SMSC9420_H + +#define TX_RING_SIZE (32) +#define RX_RING_SIZE (128) + +/* interrupt deassertion in multiples of 10us */ +#define INT_DEAS_TIME (50) + +#define NAPI_WEIGHT (64) +#define SMSC_BAR (3) + +#ifdef __BIG_ENDIAN +/* Register set is duplicated for BE at an offset of 0x200 */ +#define LAN9420_CPSR_ENDIAN_OFFSET (0x200) +#else +#define LAN9420_CPSR_ENDIAN_OFFSET (0) +#endif + +#define PCI_VENDOR_ID_9420 (0x1055) +#define PCI_DEVICE_ID_9420 (0xE420) + +#define LAN_REGISTER_EXTENT (0x400) + +#define SMSC9420_EEPROM_SIZE ((u32)11) +#define SMSC9420_EEPROM_MAGIC (0x9420) + +#define PKT_BUF_SZ (VLAN_ETH_FRAME_LEN + NET_IP_ALIGN + 4) + +/***********************************************/ +/* DMA Controller Control and Status Registers */ +/***********************************************/ +#define BUS_MODE (0x00) +#define BUS_MODE_SWR_ (BIT(0)) +#define BUS_MODE_DMA_BURST_LENGTH_1 (BIT(8)) +#define BUS_MODE_DMA_BURST_LENGTH_2 (BIT(9)) +#define BUS_MODE_DMA_BURST_LENGTH_4 (BIT(10)) +#define BUS_MODE_DMA_BURST_LENGTH_8 (BIT(11)) +#define BUS_MODE_DMA_BURST_LENGTH_16 (BIT(12)) +#define BUS_MODE_DMA_BURST_LENGTH_32 (BIT(13)) +#define BUS_MODE_DBO_ (BIT(20)) + +#define TX_POLL_DEMAND (0x04) + +#define RX_POLL_DEMAND (0x08) + +#define RX_BASE_ADDR (0x0C) + +#define TX_BASE_ADDR (0x10) + +#define DMAC_STATUS (0x14) +#define DMAC_STS_TS_ (7 << 20) +#define DMAC_STS_RS_ (7 << 17) +#define DMAC_STS_NIS_ (BIT(16)) +#define DMAC_STS_AIS_ (BIT(15)) +#define DMAC_STS_RWT_ (BIT(9)) +#define DMAC_STS_RXPS_ (BIT(8)) +#define DMAC_STS_RXBU_ (BIT(7)) +#define DMAC_STS_RX_ (BIT(6)) +#define DMAC_STS_TXUNF_ (BIT(5)) +#define DMAC_STS_TXBU_ (BIT(2)) +#define DMAC_STS_TXPS_ (BIT(1)) +#define DMAC_STS_TX_ (BIT(0)) + +#define DMAC_CONTROL (0x18) +#define DMAC_CONTROL_TTM_ (BIT(22)) +#define DMAC_CONTROL_SF_ (BIT(21)) +#define DMAC_CONTROL_ST_ (BIT(13)) +#define DMAC_CONTROL_OSF_ (BIT(2)) +#define DMAC_CONTROL_SR_ (BIT(1)) + +#define DMAC_INTR_ENA (0x1C) +#define DMAC_INTR_ENA_NIS_ (BIT(16)) +#define DMAC_INTR_ENA_AIS_ (BIT(15)) +#define DMAC_INTR_ENA_RWT_ (BIT(9)) +#define DMAC_INTR_ENA_RXPS_ (BIT(8)) +#define DMAC_INTR_ENA_RXBU_ (BIT(7)) +#define DMAC_INTR_ENA_RX_ (BIT(6)) +#define DMAC_INTR_ENA_TXBU_ (BIT(2)) +#define DMAC_INTR_ENA_TXPS_ (BIT(1)) +#define DMAC_INTR_ENA_TX_ (BIT(0)) + +#define MISS_FRAME_CNTR (0x20) + +#define TX_BUFF_ADDR (0x50) + +#define RX_BUFF_ADDR (0x54) + +/* Transmit Descriptor Bit Defs */ +#define TDES0_OWN_ (0x80000000) +#define TDES0_ERROR_SUMMARY_ (0x00008000) +#define TDES0_LOSS_OF_CARRIER_ (0x00000800) +#define TDES0_NO_CARRIER_ (0x00000400) +#define TDES0_LATE_COLLISION_ (0x00000200) +#define TDES0_EXCESSIVE_COLLISIONS_ (0x00000100) +#define TDES0_HEARTBEAT_FAIL_ (0x00000080) +#define TDES0_COLLISION_COUNT_MASK_ (0x00000078) +#define TDES0_COLLISION_COUNT_SHFT_ (3) +#define TDES0_EXCESSIVE_DEFERRAL_ (0x00000004) +#define TDES0_DEFERRED_ (0x00000001) + +#define TDES1_IC_ 0x80000000 +#define TDES1_LS_ 0x40000000 +#define TDES1_FS_ 0x20000000 +#define TDES1_TXCSEN_ 0x08000000 +#define TDES1_TER_ (BIT(25)) +#define TDES1_TCH_ 0x01000000 + +/* Receive Descriptor 0 Bit Defs */ +#define RDES0_OWN_ (0x80000000) +#define RDES0_FRAME_LENGTH_MASK_ (0x07FF0000) +#define RDES0_FRAME_LENGTH_SHFT_ (16) +#define RDES0_ERROR_SUMMARY_ (0x00008000) +#define RDES0_DESCRIPTOR_ERROR_ (0x00004000) +#define RDES0_LENGTH_ERROR_ (0x00001000) +#define RDES0_RUNT_FRAME_ (0x00000800) +#define RDES0_MULTICAST_FRAME_ (0x00000400) +#define RDES0_FIRST_DESCRIPTOR_ (0x00000200) +#define RDES0_LAST_DESCRIPTOR_ (0x00000100) +#define RDES0_FRAME_TOO_LONG_ (0x00000080) +#define RDES0_COLLISION_SEEN_ (0x00000040) +#define RDES0_FRAME_TYPE_ (0x00000020) +#define RDES0_WATCHDOG_TIMEOUT_ (0x00000010) +#define RDES0_MII_ERROR_ (0x00000008) +#define RDES0_DRIBBLING_BIT_ (0x00000004) +#define RDES0_CRC_ERROR_ (0x00000002) + +/* Receive Descriptor 1 Bit Defs */ +#define RDES1_RER_ (0x02000000) + +/***********************************************/ +/* MAC Control and Status Registers */ +/***********************************************/ +#define MAC_CR (0x80) +#define MAC_CR_RXALL_ (0x80000000) +#define MAC_CR_DIS_RXOWN_ (0x00800000) +#define MAC_CR_LOOPBK_ (0x00200000) +#define MAC_CR_FDPX_ (0x00100000) +#define MAC_CR_MCPAS_ (0x00080000) +#define MAC_CR_PRMS_ (0x00040000) +#define MAC_CR_INVFILT_ (0x00020000) +#define MAC_CR_PASSBAD_ (0x00010000) +#define MAC_CR_HFILT_ (0x00008000) +#define MAC_CR_HPFILT_ (0x00002000) +#define MAC_CR_LCOLL_ (0x00001000) +#define MAC_CR_DIS_BCAST_ (0x00000800) +#define MAC_CR_DIS_RTRY_ (0x00000400) +#define MAC_CR_PADSTR_ (0x00000100) +#define MAC_CR_BOLMT_MSK (0x000000C0) +#define MAC_CR_MFCHK_ (0x00000020) +#define MAC_CR_TXEN_ (0x00000008) +#define MAC_CR_RXEN_ (0x00000004) + +#define ADDRH (0x84) + +#define ADDRL (0x88) + +#define HASHH (0x8C) + +#define HASHL (0x90) + +#define MII_ACCESS (0x94) +#define MII_ACCESS_MII_BUSY_ (0x00000001) +#define MII_ACCESS_MII_WRITE_ (0x00000002) +#define MII_ACCESS_MII_READ_ (0x00000000) +#define MII_ACCESS_INDX_MSK_ (0x000007C0) +#define MII_ACCESS_PHYADDR_MSK_ (0x0000F8C0) +#define MII_ACCESS_INDX_SHFT_CNT (6) +#define MII_ACCESS_PHYADDR_SHFT_CNT (11) + +#define MII_DATA (0x98) + +#define FLOW (0x9C) + +#define VLAN1 (0xA0) + +#define VLAN2 (0xA4) + +#define WUFF (0xA8) + +#define WUCSR (0xAC) + +#define COE_CR (0xB0) +#define TX_COE_EN (0x00010000) +#define RX_COE_MODE (0x00000002) +#define RX_COE_EN (0x00000001) + +/***********************************************/ +/* System Control and Status Registers */ +/***********************************************/ +#define ID_REV (0xC0) + +#define INT_CTL (0xC4) +#define INT_CTL_SW_INT_EN_ (0x00008000) +#define INT_CTL_SBERR_INT_EN_ (1 << 12) +#define INT_CTL_MBERR_INT_EN_ (1 << 13) +#define INT_CTL_GPT_INT_EN_ (0x00000008) +#define INT_CTL_PHY_INT_EN_ (0x00000004) +#define INT_CTL_WAKE_INT_EN_ (0x00000002) + +#define INT_STAT (0xC8) +#define INT_STAT_SW_INT_ (1 << 15) +#define INT_STAT_MBERR_INT_ (1 << 13) +#define INT_STAT_SBERR_INT_ (1 << 12) +#define INT_STAT_GPT_INT_ (1 << 3) +#define INT_STAT_PHY_INT_ (0x00000004) +#define INT_STAT_WAKE_INT_ (0x00000002) +#define INT_STAT_DMAC_INT_ (0x00000001) + +#define INT_CFG (0xCC) +#define INT_CFG_IRQ_INT_ (0x00080000) +#define INT_CFG_IRQ_EN_ (0x00040000) +#define INT_CFG_INT_DEAS_CLR_ (0x00000200) +#define INT_CFG_INT_DEAS_MASK (0x000000FF) + +#define GPIO_CFG (0xD0) +#define GPIO_CFG_LED_3_ (0x40000000) +#define GPIO_CFG_LED_2_ (0x20000000) +#define GPIO_CFG_LED_1_ (0x10000000) +#define GPIO_CFG_EEPR_EN_ (0x00700000) + +#define GPT_CFG (0xD4) +#define GPT_CFG_TIMER_EN_ (0x20000000) + +#define GPT_CNT (0xD8) + +#define BUS_CFG (0xDC) +#define BUS_CFG_RXTXWEIGHT_1_1 (0 << 25) +#define BUS_CFG_RXTXWEIGHT_2_1 (1 << 25) +#define BUS_CFG_RXTXWEIGHT_3_1 (2 << 25) +#define BUS_CFG_RXTXWEIGHT_4_1 (3 << 25) + +#define PMT_CTRL (0xE0) + +#define FREE_RUN (0xF4) + +#define E2P_CMD (0xF8) +#define E2P_CMD_EPC_BUSY_ (0x80000000) +#define E2P_CMD_EPC_CMD_ (0x70000000) +#define E2P_CMD_EPC_CMD_READ_ (0x00000000) +#define E2P_CMD_EPC_CMD_EWDS_ (0x10000000) +#define E2P_CMD_EPC_CMD_EWEN_ (0x20000000) +#define E2P_CMD_EPC_CMD_WRITE_ (0x30000000) +#define E2P_CMD_EPC_CMD_WRAL_ (0x40000000) +#define E2P_CMD_EPC_CMD_ERASE_ (0x50000000) +#define E2P_CMD_EPC_CMD_ERAL_ (0x60000000) +#define E2P_CMD_EPC_CMD_RELOAD_ (0x70000000) +#define E2P_CMD_EPC_TIMEOUT_ (0x00000200) +#define E2P_CMD_MAC_ADDR_LOADED_ (0x00000100) +#define E2P_CMD_EPC_ADDR_ (0x000000FF) + +#define E2P_DATA (0xFC) +#define E2P_DATA_EEPROM_DATA_ (0x000000FF) + +#endif /* _SMSC9420_H */ diff --git a/drivers/net/pcmcia/Kconfig b/drivers/net/pcmcia/Kconfig index 72aa257..f5a738f 100644 --- a/drivers/net/pcmcia/Kconfig +++ b/drivers/net/pcmcia/Kconfig @@ -31,17 +31,6 @@ config PCMCIA_FMVJ18X To compile this driver as a module, choose M here: the module will be called fmvj18x_cs. If unsure, say N. -config PCMCIA_SMC91C92 - tristate "SMC 91Cxx PCMCIA support" - select CRC32 - select MII - help - Say Y here if you intend to attach an SMC 91Cxx compatible PCMCIA - (PC-card) Ethernet or Fast Ethernet card to your computer. - - To compile this driver as a module, choose M here: the module will be - called smc91c92_cs. If unsure, say N. - config PCMCIA_XIRC2PS tristate "Xircom 16-bit PCMCIA support" help diff --git a/drivers/net/pcmcia/Makefile b/drivers/net/pcmcia/Makefile index c2b8b44..f9c9883 100644 --- a/drivers/net/pcmcia/Makefile +++ b/drivers/net/pcmcia/Makefile @@ -4,7 +4,6 @@ # 16-bit client drivers obj-$(CONFIG_PCMCIA_FMVJ18X) += fmvj18x_cs.o -obj-$(CONFIG_PCMCIA_SMC91C92) += smc91c92_cs.o obj-$(CONFIG_PCMCIA_XIRC2PS) += xirc2ps_cs.o obj-$(CONFIG_ARCNET_COM20020_CS)+= com20020_cs.o diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c deleted file mode 100644 index cffbc03..0000000 --- a/drivers/net/pcmcia/smc91c92_cs.c +++ /dev/null @@ -1,2070 +0,0 @@ -/*====================================================================== - - A PCMCIA ethernet driver for SMC91c92-based cards. - - This driver supports Megahertz PCMCIA ethernet cards; and - Megahertz, Motorola, Ositech, and Psion Dacom ethernet/modem - multifunction cards. - - Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net - - smc91c92_cs.c 1.122 2002/10/25 06:26:39 - - This driver contains code written by Donald Becker - (becker@scyld.com), Rowan Hughes (x-csrdh@jcu.edu.au), - David Hinds (dahinds@users.sourceforge.net), and Erik Stahlman - (erik@vt.edu). Donald wrote the SMC 91c92 code using parts of - Erik's SMC 91c94 driver. Rowan wrote a similar driver, and I've - incorporated some parts of his driver here. I (Dave) wrote most - of the PCMCIA glue code, and the Ositech support code. Kelly - Stephens (kstephen@holli.com) added support for the Motorola - Mariner, with help from Allen Brost. - - This software may be used and distributed according to the terms of - the GNU General Public License, incorporated herein by reference. - -======================================================================*/ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include -#include -#include - -/*====================================================================*/ - -static const char *if_names[] = { "auto", "10baseT", "10base2"}; - -/* Firmware name */ -#define FIRMWARE_NAME "ositech/Xilinx7OD.bin" - -/* Module parameters */ - -MODULE_DESCRIPTION("SMC 91c92 series PCMCIA ethernet driver"); -MODULE_LICENSE("GPL"); -MODULE_FIRMWARE(FIRMWARE_NAME); - -#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) - -/* - Transceiver/media type. - 0 = auto - 1 = 10baseT (and autoselect if #define AUTOSELECT), - 2 = AUI/10base2, -*/ -INT_MODULE_PARM(if_port, 0); - - -#define DRV_NAME "smc91c92_cs" -#define DRV_VERSION "1.123" - -/*====================================================================*/ - -/* Operational parameter that usually are not changed. */ - -/* Time in jiffies before concluding Tx hung */ -#define TX_TIMEOUT ((400*HZ)/1000) - -/* Maximum events (Rx packets, etc.) to handle at each interrupt. */ -#define INTR_WORK 4 - -/* Times to check the check the chip before concluding that it doesn't - currently have room for another Tx packet. */ -#define MEMORY_WAIT_TIME 8 - -struct smc_private { - struct pcmcia_device *p_dev; - spinlock_t lock; - u_short manfid; - u_short cardid; - - struct sk_buff *saved_skb; - int packets_waiting; - void __iomem *base; - u_short cfg; - struct timer_list media; - int watchdog, tx_err; - u_short media_status; - u_short fast_poll; - u_short link_status; - struct mii_if_info mii_if; - int duplex; - int rx_ovrn; -}; - -/* Special definitions for Megahertz multifunction cards */ -#define MEGAHERTZ_ISR 0x0380 - -/* Special function registers for Motorola Mariner */ -#define MOT_LAN 0x0000 -#define MOT_UART 0x0020 -#define MOT_EEPROM 0x20 - -#define MOT_NORMAL \ -(COR_LEVEL_REQ | COR_FUNC_ENA | COR_ADDR_DECODE | COR_IREQ_ENA) - -/* Special function registers for Ositech cards */ -#define OSITECH_AUI_CTL 0x0c -#define OSITECH_PWRDOWN 0x0d -#define OSITECH_RESET 0x0e -#define OSITECH_ISR 0x0f -#define OSITECH_AUI_PWR 0x0c -#define OSITECH_RESET_ISR 0x0e - -#define OSI_AUI_PWR 0x40 -#define OSI_LAN_PWRDOWN 0x02 -#define OSI_MODEM_PWRDOWN 0x01 -#define OSI_LAN_RESET 0x02 -#define OSI_MODEM_RESET 0x01 - -/* Symbolic constants for the SMC91c9* series chips, from Erik Stahlman. */ -#define BANK_SELECT 14 /* Window select register. */ -#define SMC_SELECT_BANK(x) { outw(x, ioaddr + BANK_SELECT); } - -/* Bank 0 registers. */ -#define TCR 0 /* transmit control register */ -#define TCR_CLEAR 0 /* do NOTHING */ -#define TCR_ENABLE 0x0001 /* if this is 1, we can transmit */ -#define TCR_PAD_EN 0x0080 /* pads short packets to 64 bytes */ -#define TCR_MONCSN 0x0400 /* Monitor Carrier. */ -#define TCR_FDUPLX 0x0800 /* Full duplex mode. */ -#define TCR_NORMAL TCR_ENABLE | TCR_PAD_EN - -#define EPH 2 /* Ethernet Protocol Handler report. */ -#define EPH_TX_SUC 0x0001 -#define EPH_SNGLCOL 0x0002 -#define EPH_MULCOL 0x0004 -#define EPH_LTX_MULT 0x0008 -#define EPH_16COL 0x0010 -#define EPH_SQET 0x0020 -#define EPH_LTX_BRD 0x0040 -#define EPH_TX_DEFR 0x0080 -#define EPH_LAT_COL 0x0200 -#define EPH_LOST_CAR 0x0400 -#define EPH_EXC_DEF 0x0800 -#define EPH_CTR_ROL 0x1000 -#define EPH_RX_OVRN 0x2000 -#define EPH_LINK_OK 0x4000 -#define EPH_TX_UNRN 0x8000 -#define MEMINFO 8 /* Memory Information Register */ -#define MEMCFG 10 /* Memory Configuration Register */ - -/* Bank 1 registers. */ -#define CONFIG 0 -#define CFG_MII_SELECT 0x8000 /* 91C100 only */ -#define CFG_NO_WAIT 0x1000 -#define CFG_FULL_STEP 0x0400 -#define CFG_SET_SQLCH 0x0200 -#define CFG_AUI_SELECT 0x0100 -#define CFG_16BIT 0x0080 -#define CFG_DIS_LINK 0x0040 -#define CFG_STATIC 0x0030 -#define CFG_IRQ_SEL_1 0x0004 -#define CFG_IRQ_SEL_0 0x0002 -#define BASE_ADDR 2 -#define ADDR0 4 -#define GENERAL 10 -#define CONTROL 12 -#define CTL_STORE 0x0001 -#define CTL_RELOAD 0x0002 -#define CTL_EE_SELECT 0x0004 -#define CTL_TE_ENABLE 0x0020 -#define CTL_CR_ENABLE 0x0040 -#define CTL_LE_ENABLE 0x0080 -#define CTL_AUTO_RELEASE 0x0800 -#define CTL_POWERDOWN 0x2000 - -/* Bank 2 registers. */ -#define MMU_CMD 0 -#define MC_ALLOC 0x20 /* or with number of 256 byte packets */ -#define MC_RESET 0x40 -#define MC_RELEASE 0x80 /* remove and release the current rx packet */ -#define MC_FREEPKT 0xA0 /* Release packet in PNR register */ -#define MC_ENQUEUE 0xC0 /* Enqueue the packet for transmit */ -#define PNR_ARR 2 -#define FIFO_PORTS 4 -#define FP_RXEMPTY 0x8000 -#define POINTER 6 -#define PTR_AUTO_INC 0x0040 -#define PTR_READ 0x2000 -#define PTR_AUTOINC 0x4000 -#define PTR_RCV 0x8000 -#define DATA_1 8 -#define INTERRUPT 12 -#define IM_RCV_INT 0x1 -#define IM_TX_INT 0x2 -#define IM_TX_EMPTY_INT 0x4 -#define IM_ALLOC_INT 0x8 -#define IM_RX_OVRN_INT 0x10 -#define IM_EPH_INT 0x20 - -#define RCR 4 -enum RxCfg { RxAllMulti = 0x0004, RxPromisc = 0x0002, - RxEnable = 0x0100, RxStripCRC = 0x0200}; -#define RCR_SOFTRESET 0x8000 /* resets the chip */ -#define RCR_STRIP_CRC 0x200 /* strips CRC */ -#define RCR_ENABLE 0x100 /* IFF this is set, we can receive packets */ -#define RCR_ALMUL 0x4 /* receive all multicast packets */ -#define RCR_PROMISC 0x2 /* enable promiscuous mode */ - -/* the normal settings for the RCR register : */ -#define RCR_NORMAL (RCR_STRIP_CRC | RCR_ENABLE) -#define RCR_CLEAR 0x0 /* set it to a base state */ -#define COUNTER 6 - -/* BANK 3 -- not the same values as in smc9194! */ -#define MULTICAST0 0 -#define MULTICAST2 2 -#define MULTICAST4 4 -#define MULTICAST6 6 -#define MGMT 8 -#define REVISION 0x0a - -/* Transmit status bits. */ -#define TS_SUCCESS 0x0001 -#define TS_16COL 0x0010 -#define TS_LATCOL 0x0200 -#define TS_LOSTCAR 0x0400 - -/* Receive status bits. */ -#define RS_ALGNERR 0x8000 -#define RS_BADCRC 0x2000 -#define RS_ODDFRAME 0x1000 -#define RS_TOOLONG 0x0800 -#define RS_TOOSHORT 0x0400 -#define RS_MULTICAST 0x0001 -#define RS_ERRORS (RS_ALGNERR | RS_BADCRC | RS_TOOLONG | RS_TOOSHORT) - -#define set_bits(v, p) outw(inw(p)|(v), (p)) -#define mask_bits(v, p) outw(inw(p)&(v), (p)) - -/*====================================================================*/ - -static void smc91c92_detach(struct pcmcia_device *p_dev); -static int smc91c92_config(struct pcmcia_device *link); -static void smc91c92_release(struct pcmcia_device *link); - -static int smc_open(struct net_device *dev); -static int smc_close(struct net_device *dev); -static int smc_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); -static void smc_tx_timeout(struct net_device *dev); -static netdev_tx_t smc_start_xmit(struct sk_buff *skb, - struct net_device *dev); -static irqreturn_t smc_interrupt(int irq, void *dev_id); -static void smc_rx(struct net_device *dev); -static void set_rx_mode(struct net_device *dev); -static int s9k_config(struct net_device *dev, struct ifmap *map); -static void smc_set_xcvr(struct net_device *dev, int if_port); -static void smc_reset(struct net_device *dev); -static void media_check(u_long arg); -static void mdio_sync(unsigned int addr); -static int mdio_read(struct net_device *dev, int phy_id, int loc); -static void mdio_write(struct net_device *dev, int phy_id, int loc, int value); -static int smc_link_ok(struct net_device *dev); -static const struct ethtool_ops ethtool_ops; - -static const struct net_device_ops smc_netdev_ops = { - .ndo_open = smc_open, - .ndo_stop = smc_close, - .ndo_start_xmit = smc_start_xmit, - .ndo_tx_timeout = smc_tx_timeout, - .ndo_set_config = s9k_config, - .ndo_set_multicast_list = set_rx_mode, - .ndo_do_ioctl = smc_ioctl, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -static int smc91c92_probe(struct pcmcia_device *link) -{ - struct smc_private *smc; - struct net_device *dev; - - dev_dbg(&link->dev, "smc91c92_attach()\n"); - - /* Create new ethernet device */ - dev = alloc_etherdev(sizeof(struct smc_private)); - if (!dev) - return -ENOMEM; - smc = netdev_priv(dev); - smc->p_dev = link; - link->priv = dev; - - spin_lock_init(&smc->lock); - - /* The SMC91c92-specific entries in the device structure. */ - dev->netdev_ops = &smc_netdev_ops; - SET_ETHTOOL_OPS(dev, ðtool_ops); - dev->watchdog_timeo = TX_TIMEOUT; - - smc->mii_if.dev = dev; - smc->mii_if.mdio_read = mdio_read; - smc->mii_if.mdio_write = mdio_write; - smc->mii_if.phy_id_mask = 0x1f; - smc->mii_if.reg_num_mask = 0x1f; - - return smc91c92_config(link); -} /* smc91c92_attach */ - -static void smc91c92_detach(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - - dev_dbg(&link->dev, "smc91c92_detach\n"); - - unregister_netdev(dev); - - smc91c92_release(link); - - free_netdev(dev); -} /* smc91c92_detach */ - -/*====================================================================*/ - -static int cvt_ascii_address(struct net_device *dev, char *s) -{ - int i, j, da, c; - - if (strlen(s) != 12) - return -1; - for (i = 0; i < 6; i++) { - da = 0; - for (j = 0; j < 2; j++) { - c = *s++; - da <<= 4; - da += ((c >= '0') && (c <= '9')) ? - (c - '0') : ((c & 0x0f) + 9); - } - dev->dev_addr[i] = da; - } - return 0; -} - -/*==================================================================== - - Configuration stuff for Megahertz cards - - mhz_3288_power() is used to power up a 3288's ethernet chip. - mhz_mfc_config() handles socket setup for multifunction (1144 - and 3288) cards. mhz_setup() gets a card's hardware ethernet - address. - -======================================================================*/ - -static int mhz_3288_power(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - struct smc_private *smc = netdev_priv(dev); - u_char tmp; - - /* Read the ISR twice... */ - readb(smc->base+MEGAHERTZ_ISR); - udelay(5); - readb(smc->base+MEGAHERTZ_ISR); - - /* Pause 200ms... */ - mdelay(200); - - /* Now read and write the COR... */ - tmp = readb(smc->base + link->config_base + CISREG_COR); - udelay(5); - writeb(tmp, smc->base + link->config_base + CISREG_COR); - - return 0; -} - -static int mhz_mfc_config_check(struct pcmcia_device *p_dev, void *priv_data) -{ - int k; - p_dev->io_lines = 16; - p_dev->resource[1]->start = p_dev->resource[0]->start; - p_dev->resource[1]->end = 8; - p_dev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH; - p_dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; - p_dev->resource[0]->end = 16; - p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; - p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; - for (k = 0; k < 0x400; k += 0x10) { - if (k & 0x80) - continue; - p_dev->resource[0]->start = k ^ 0x300; - if (!pcmcia_request_io(p_dev)) - return 0; - } - return -ENODEV; -} - -static int mhz_mfc_config(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - struct smc_private *smc = netdev_priv(dev); - unsigned int offset; - int i; - - link->config_flags |= CONF_ENABLE_SPKR | CONF_ENABLE_IRQ | - CONF_AUTO_SET_IO; - - /* The Megahertz combo cards have modem-like CIS entries, so - we have to explicitly try a bunch of port combinations. */ - if (pcmcia_loop_config(link, mhz_mfc_config_check, NULL)) - return -ENODEV; - - dev->base_addr = link->resource[0]->start; - - /* Allocate a memory window, for accessing the ISR */ - link->resource[2]->flags = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; - link->resource[2]->start = link->resource[2]->end = 0; - i = pcmcia_request_window(link, link->resource[2], 0); - if (i != 0) - return -ENODEV; - - smc->base = ioremap(link->resource[2]->start, - resource_size(link->resource[2])); - offset = (smc->manfid == MANFID_MOTOROLA) ? link->config_base : 0; - i = pcmcia_map_mem_page(link, link->resource[2], offset); - if ((i == 0) && - (smc->manfid == MANFID_MEGAHERTZ) && - (smc->cardid == PRODID_MEGAHERTZ_EM3288)) - mhz_3288_power(link); - - return 0; -} - -static int pcmcia_get_versmac(struct pcmcia_device *p_dev, - tuple_t *tuple, - void *priv) -{ - struct net_device *dev = priv; - cisparse_t parse; - u8 *buf; - - if (pcmcia_parse_tuple(tuple, &parse)) - return -EINVAL; - - buf = parse.version_1.str + parse.version_1.ofs[3]; - - if ((parse.version_1.ns > 3) && (cvt_ascii_address(dev, buf) == 0)) - return 0; - - return -EINVAL; -}; - -static int mhz_setup(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - size_t len; - u8 *buf; - int rc; - - /* Read the station address from the CIS. It is stored as the last - (fourth) string in the Version 1 Version/ID tuple. */ - if ((link->prod_id[3]) && - (cvt_ascii_address(dev, link->prod_id[3]) == 0)) - return 0; - - /* Workarounds for broken cards start here. */ - /* Ugh -- the EM1144 card has two VERS_1 tuples!?! */ - if (!pcmcia_loop_tuple(link, CISTPL_VERS_1, pcmcia_get_versmac, dev)) - return 0; - - /* Another possibility: for the EM3288, in a special tuple */ - rc = -1; - len = pcmcia_get_tuple(link, 0x81, &buf); - if (buf && len >= 13) { - buf[12] = '\0'; - if (cvt_ascii_address(dev, buf) == 0) - rc = 0; - } - kfree(buf); - - return rc; -}; - -/*====================================================================== - - Configuration stuff for the Motorola Mariner - - mot_config() writes directly to the Mariner configuration - registers because the CIS is just bogus. - -======================================================================*/ - -static void mot_config(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - struct smc_private *smc = netdev_priv(dev); - unsigned int ioaddr = dev->base_addr; - unsigned int iouart = link->resource[1]->start; - - /* Set UART base address and force map with COR bit 1 */ - writeb(iouart & 0xff, smc->base + MOT_UART + CISREG_IOBASE_0); - writeb((iouart >> 8) & 0xff, smc->base + MOT_UART + CISREG_IOBASE_1); - writeb(MOT_NORMAL, smc->base + MOT_UART + CISREG_COR); - - /* Set SMC base address and force map with COR bit 1 */ - writeb(ioaddr & 0xff, smc->base + MOT_LAN + CISREG_IOBASE_0); - writeb((ioaddr >> 8) & 0xff, smc->base + MOT_LAN + CISREG_IOBASE_1); - writeb(MOT_NORMAL, smc->base + MOT_LAN + CISREG_COR); - - /* Wait for things to settle down */ - mdelay(100); -} - -static int mot_setup(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - unsigned int ioaddr = dev->base_addr; - int i, wait, loop; - u_int addr; - - /* Read Ethernet address from Serial EEPROM */ - - for (i = 0; i < 3; i++) { - SMC_SELECT_BANK(2); - outw(MOT_EEPROM + i, ioaddr + POINTER); - SMC_SELECT_BANK(1); - outw((CTL_RELOAD | CTL_EE_SELECT), ioaddr + CONTROL); - - for (loop = wait = 0; loop < 200; loop++) { - udelay(10); - wait = ((CTL_RELOAD | CTL_STORE) & inw(ioaddr + CONTROL)); - if (wait == 0) break; - } - - if (wait) - return -1; - - addr = inw(ioaddr + GENERAL); - dev->dev_addr[2*i] = addr & 0xff; - dev->dev_addr[2*i+1] = (addr >> 8) & 0xff; - } - - return 0; -} - -/*====================================================================*/ - -static int smc_configcheck(struct pcmcia_device *p_dev, void *priv_data) -{ - p_dev->resource[0]->end = 16; - p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; - p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; - - return pcmcia_request_io(p_dev); -} - -static int smc_config(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - int i; - - link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; - - i = pcmcia_loop_config(link, smc_configcheck, NULL); - if (!i) - dev->base_addr = link->resource[0]->start; - - return i; -} - - -static int smc_setup(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - - /* Check for a LAN function extension tuple */ - if (!pcmcia_get_mac_from_cis(link, dev)) - return 0; - - /* Try the third string in the Version 1 Version/ID tuple. */ - if (link->prod_id[2]) { - if (cvt_ascii_address(dev, link->prod_id[2]) == 0) - return 0; - } - return -1; -} - -/*====================================================================*/ - -static int osi_config(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - static const unsigned int com[4] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8 }; - int i, j; - - link->config_flags |= CONF_ENABLE_SPKR | CONF_ENABLE_IRQ; - link->resource[0]->end = 64; - link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; - link->resource[1]->end = 8; - - /* Enable Hard Decode, LAN, Modem */ - link->io_lines = 16; - link->config_index = 0x23; - - for (i = j = 0; j < 4; j++) { - link->resource[1]->start = com[j]; - i = pcmcia_request_io(link); - if (i == 0) - break; - } - if (i != 0) { - /* Fallback: turn off hard decode */ - link->config_index = 0x03; - link->resource[1]->end = 0; - i = pcmcia_request_io(link); - } - dev->base_addr = link->resource[0]->start + 0x10; - return i; -} - -static int osi_load_firmware(struct pcmcia_device *link) -{ - const struct firmware *fw; - int i, err; - - err = request_firmware(&fw, FIRMWARE_NAME, &link->dev); - if (err) { - pr_err("Failed to load firmware \"%s\"\n", FIRMWARE_NAME); - return err; - } - - /* Download the Seven of Diamonds firmware */ - for (i = 0; i < fw->size; i++) { - outb(fw->data[i], link->resource[0]->start + 2); - udelay(50); - } - release_firmware(fw); - return err; -} - -static int pcmcia_osi_mac(struct pcmcia_device *p_dev, - tuple_t *tuple, - void *priv) -{ - struct net_device *dev = priv; - int i; - - if (tuple->TupleDataLen < 8) - return -EINVAL; - if (tuple->TupleData[0] != 0x04) - return -EINVAL; - for (i = 0; i < 6; i++) - dev->dev_addr[i] = tuple->TupleData[i+2]; - return 0; -}; - - -static int osi_setup(struct pcmcia_device *link, u_short manfid, u_short cardid) -{ - struct net_device *dev = link->priv; - int rc; - - /* Read the station address from tuple 0x90, subtuple 0x04 */ - if (pcmcia_loop_tuple(link, 0x90, pcmcia_osi_mac, dev)) - return -1; - - if (((manfid == MANFID_OSITECH) && - (cardid == PRODID_OSITECH_SEVEN)) || - ((manfid == MANFID_PSION) && - (cardid == PRODID_PSION_NET100))) { - rc = osi_load_firmware(link); - if (rc) - return rc; - } else if (manfid == MANFID_OSITECH) { - /* Make sure both functions are powered up */ - set_bits(0x300, link->resource[0]->start + OSITECH_AUI_PWR); - /* Now, turn on the interrupt for both card functions */ - set_bits(0x300, link->resource[0]->start + OSITECH_RESET_ISR); - dev_dbg(&link->dev, "AUI/PWR: %4.4x RESET/ISR: %4.4x\n", - inw(link->resource[0]->start + OSITECH_AUI_PWR), - inw(link->resource[0]->start + OSITECH_RESET_ISR)); - } - return 0; -} - -static int smc91c92_suspend(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - - if (link->open) - netif_device_detach(dev); - - return 0; -} - -static int smc91c92_resume(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - struct smc_private *smc = netdev_priv(dev); - int i; - - if ((smc->manfid == MANFID_MEGAHERTZ) && - (smc->cardid == PRODID_MEGAHERTZ_EM3288)) - mhz_3288_power(link); - if (smc->manfid == MANFID_MOTOROLA) - mot_config(link); - if ((smc->manfid == MANFID_OSITECH) && - (smc->cardid != PRODID_OSITECH_SEVEN)) { - /* Power up the card and enable interrupts */ - set_bits(0x0300, dev->base_addr-0x10+OSITECH_AUI_PWR); - set_bits(0x0300, dev->base_addr-0x10+OSITECH_RESET_ISR); - } - if (((smc->manfid == MANFID_OSITECH) && - (smc->cardid == PRODID_OSITECH_SEVEN)) || - ((smc->manfid == MANFID_PSION) && - (smc->cardid == PRODID_PSION_NET100))) { - i = osi_load_firmware(link); - if (i) { - pr_err("smc91c92_cs: Failed to load firmware\n"); - return i; - } - } - if (link->open) { - smc_reset(dev); - netif_device_attach(dev); - } - - return 0; -} - - -/*====================================================================== - - This verifies that the chip is some SMC91cXX variant, and returns - the revision code if successful. Otherwise, it returns -ENODEV. - -======================================================================*/ - -static int check_sig(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - unsigned int ioaddr = dev->base_addr; - int width; - u_short s; - - SMC_SELECT_BANK(1); - if (inw(ioaddr + BANK_SELECT) >> 8 != 0x33) { - /* Try powering up the chip */ - outw(0, ioaddr + CONTROL); - mdelay(55); - } - - /* Try setting bus width */ - width = (link->resource[0]->flags == IO_DATA_PATH_WIDTH_AUTO); - s = inb(ioaddr + CONFIG); - if (width) - s |= CFG_16BIT; - else - s &= ~CFG_16BIT; - outb(s, ioaddr + CONFIG); - - /* Check Base Address Register to make sure bus width is OK */ - s = inw(ioaddr + BASE_ADDR); - if ((inw(ioaddr + BANK_SELECT) >> 8 == 0x33) && - ((s >> 8) != (s & 0xff))) { - SMC_SELECT_BANK(3); - s = inw(ioaddr + REVISION); - return s & 0xff; - } - - if (width) { - pr_info("using 8-bit IO window\n"); - - smc91c92_suspend(link); - pcmcia_fixup_iowidth(link); - smc91c92_resume(link); - return check_sig(link); - } - return -ENODEV; -} - -static int smc91c92_config(struct pcmcia_device *link) -{ - struct net_device *dev = link->priv; - struct smc_private *smc = netdev_priv(dev); - char *name; - int i, rev, j = 0; - unsigned int ioaddr; - u_long mir; - - dev_dbg(&link->dev, "smc91c92_config\n"); - - smc->manfid = link->manf_id; - smc->cardid = link->card_id; - - if ((smc->manfid == MANFID_OSITECH) && - (smc->cardid != PRODID_OSITECH_SEVEN)) { - i = osi_config(link); - } else if ((smc->manfid == MANFID_MOTOROLA) || - ((smc->manfid == MANFID_MEGAHERTZ) && - ((smc->cardid == PRODID_MEGAHERTZ_VARIOUS) || - (smc->cardid == PRODID_MEGAHERTZ_EM3288)))) { - i = mhz_mfc_config(link); - } else { - i = smc_config(link); - } - if (i) - goto config_failed; - - i = pcmcia_request_irq(link, smc_interrupt); - if (i) - goto config_failed; - i = pcmcia_enable_device(link); - if (i) - goto config_failed; - - if (smc->manfid == MANFID_MOTOROLA) - mot_config(link); - - dev->irq = link->irq; - - if ((if_port >= 0) && (if_port <= 2)) - dev->if_port = if_port; - else - dev_notice(&link->dev, "invalid if_port requested\n"); - - switch (smc->manfid) { - case MANFID_OSITECH: - case MANFID_PSION: - i = osi_setup(link, smc->manfid, smc->cardid); break; - case MANFID_SMC: - case MANFID_NEW_MEDIA: - i = smc_setup(link); break; - case 0x128: /* For broken Megahertz cards */ - case MANFID_MEGAHERTZ: - i = mhz_setup(link); break; - case MANFID_MOTOROLA: - default: /* get the hw address from EEPROM */ - i = mot_setup(link); break; - } - - if (i != 0) { - dev_notice(&link->dev, "Unable to find hardware address.\n"); - goto config_failed; - } - - smc->duplex = 0; - smc->rx_ovrn = 0; - - rev = check_sig(link); - name = "???"; - if (rev > 0) - switch (rev >> 4) { - case 3: name = "92"; break; - case 4: name = ((rev & 15) >= 6) ? "96" : "94"; break; - case 5: name = "95"; break; - case 7: name = "100"; break; - case 8: name = "100-FD"; break; - case 9: name = "110"; break; - } - - ioaddr = dev->base_addr; - if (rev > 0) { - u_long mcr; - SMC_SELECT_BANK(0); - mir = inw(ioaddr + MEMINFO) & 0xff; - if (mir == 0xff) mir++; - /* Get scale factor for memory size */ - mcr = ((rev >> 4) > 3) ? inw(ioaddr + MEMCFG) : 0x0200; - mir *= 128 * (1<<((mcr >> 9) & 7)); - SMC_SELECT_BANK(1); - smc->cfg = inw(ioaddr + CONFIG) & ~CFG_AUI_SELECT; - smc->cfg |= CFG_NO_WAIT | CFG_16BIT | CFG_STATIC; - if (smc->manfid == MANFID_OSITECH) - smc->cfg |= CFG_IRQ_SEL_1 | CFG_IRQ_SEL_0; - if ((rev >> 4) >= 7) - smc->cfg |= CFG_MII_SELECT; - } else - mir = 0; - - if (smc->cfg & CFG_MII_SELECT) { - SMC_SELECT_BANK(3); - - for (i = 0; i < 32; i++) { - j = mdio_read(dev, i, 1); - if ((j != 0) && (j != 0xffff)) break; - } - smc->mii_if.phy_id = (i < 32) ? i : -1; - - SMC_SELECT_BANK(0); - } - - SET_NETDEV_DEV(dev, &link->dev); - - if (register_netdev(dev) != 0) { - dev_err(&link->dev, "register_netdev() failed\n"); - goto config_undo; - } - - netdev_info(dev, "smc91c%s rev %d: io %#3lx, irq %d, hw_addr %pM\n", - name, (rev & 0x0f), dev->base_addr, dev->irq, dev->dev_addr); - - if (rev > 0) { - if (mir & 0x3ff) - netdev_info(dev, " %lu byte", mir); - else - netdev_info(dev, " %lu kb", mir>>10); - pr_cont(" buffer, %s xcvr\n", - (smc->cfg & CFG_MII_SELECT) ? "MII" : if_names[dev->if_port]); - } - - if (smc->cfg & CFG_MII_SELECT) { - if (smc->mii_if.phy_id != -1) { - netdev_dbg(dev, " MII transceiver at index %d, status %x\n", - smc->mii_if.phy_id, j); - } else { - netdev_notice(dev, " No MII transceivers found!\n"); - } - } - return 0; - -config_undo: - unregister_netdev(dev); -config_failed: - smc91c92_release(link); - free_netdev(dev); - return -ENODEV; -} /* smc91c92_config */ - -static void smc91c92_release(struct pcmcia_device *link) -{ - dev_dbg(&link->dev, "smc91c92_release\n"); - if (link->resource[2]->end) { - struct net_device *dev = link->priv; - struct smc_private *smc = netdev_priv(dev); - iounmap(smc->base); - } - pcmcia_disable_device(link); -} - -/*====================================================================== - - MII interface support for SMC91cXX based cards -======================================================================*/ - -#define MDIO_SHIFT_CLK 0x04 -#define MDIO_DATA_OUT 0x01 -#define MDIO_DIR_WRITE 0x08 -#define MDIO_DATA_WRITE0 (MDIO_DIR_WRITE) -#define MDIO_DATA_WRITE1 (MDIO_DIR_WRITE | MDIO_DATA_OUT) -#define MDIO_DATA_READ 0x02 - -static void mdio_sync(unsigned int addr) -{ - int bits; - for (bits = 0; bits < 32; bits++) { - outb(MDIO_DATA_WRITE1, addr); - outb(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, addr); - } -} - -static int mdio_read(struct net_device *dev, int phy_id, int loc) -{ - unsigned int addr = dev->base_addr + MGMT; - u_int cmd = (0x06<<10)|(phy_id<<5)|loc; - int i, retval = 0; - - mdio_sync(addr); - for (i = 13; i >= 0; i--) { - int dat = (cmd&(1< 0; i--) { - outb(0, addr); - retval = (retval << 1) | ((inb(addr) & MDIO_DATA_READ) != 0); - outb(MDIO_SHIFT_CLK, addr); - } - return (retval>>1) & 0xffff; -} - -static void mdio_write(struct net_device *dev, int phy_id, int loc, int value) -{ - unsigned int addr = dev->base_addr + MGMT; - u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value; - int i; - - mdio_sync(addr); - for (i = 31; i >= 0; i--) { - int dat = (cmd&(1<= 0; i--) { - outb(0, addr); - outb(MDIO_SHIFT_CLK, addr); - } -} - -/*====================================================================== - - The driver core code, most of which should be common with a - non-PCMCIA implementation. - -======================================================================*/ - -#ifdef PCMCIA_DEBUG -static void smc_dump(struct net_device *dev) -{ - unsigned int ioaddr = dev->base_addr; - u_short i, w, save; - save = inw(ioaddr + BANK_SELECT); - for (w = 0; w < 4; w++) { - SMC_SELECT_BANK(w); - netdev_printk(KERN_DEBUG, dev, "bank %d: ", w); - for (i = 0; i < 14; i += 2) - pr_cont(" %04x", inw(ioaddr + i)); - pr_cont("\n"); - } - outw(save, ioaddr + BANK_SELECT); -} -#endif - -static int smc_open(struct net_device *dev) -{ - struct smc_private *smc = netdev_priv(dev); - struct pcmcia_device *link = smc->p_dev; - - dev_dbg(&link->dev, "%s: smc_open(%p), ID/Window %4.4x.\n", - dev->name, dev, inw(dev->base_addr + BANK_SELECT)); -#ifdef PCMCIA_DEBUG - smc_dump(dev); -#endif - - /* Check that the PCMCIA card is still here. */ - if (!pcmcia_dev_present(link)) - return -ENODEV; - /* Physical device present signature. */ - if (check_sig(link) < 0) { - netdev_info(dev, "Yikes! Bad chip signature!\n"); - return -ENODEV; - } - link->open++; - - netif_start_queue(dev); - smc->saved_skb = NULL; - smc->packets_waiting = 0; - - smc_reset(dev); - init_timer(&smc->media); - smc->media.function = media_check; - smc->media.data = (u_long) dev; - smc->media.expires = jiffies + HZ; - add_timer(&smc->media); - - return 0; -} /* smc_open */ - -/*====================================================================*/ - -static int smc_close(struct net_device *dev) -{ - struct smc_private *smc = netdev_priv(dev); - struct pcmcia_device *link = smc->p_dev; - unsigned int ioaddr = dev->base_addr; - - dev_dbg(&link->dev, "%s: smc_close(), status %4.4x.\n", - dev->name, inw(ioaddr + BANK_SELECT)); - - netif_stop_queue(dev); - - /* Shut off all interrupts, and turn off the Tx and Rx sections. - Don't bother to check for chip present. */ - SMC_SELECT_BANK(2); /* Nominally paranoia, but do no assume... */ - outw(0, ioaddr + INTERRUPT); - SMC_SELECT_BANK(0); - mask_bits(0xff00, ioaddr + RCR); - mask_bits(0xff00, ioaddr + TCR); - - /* Put the chip into power-down mode. */ - SMC_SELECT_BANK(1); - outw(CTL_POWERDOWN, ioaddr + CONTROL ); - - link->open--; - del_timer_sync(&smc->media); - - return 0; -} /* smc_close */ - -/*====================================================================== - - Transfer a packet to the hardware and trigger the packet send. - This may be called at either from either the Tx queue code - or the interrupt handler. - -======================================================================*/ - -static void smc_hardware_send_packet(struct net_device * dev) -{ - struct smc_private *smc = netdev_priv(dev); - struct sk_buff *skb = smc->saved_skb; - unsigned int ioaddr = dev->base_addr; - u_char packet_no; - - if (!skb) { - netdev_err(dev, "In XMIT with no packet to send\n"); - return; - } - - /* There should be a packet slot waiting. */ - packet_no = inw(ioaddr + PNR_ARR) >> 8; - if (packet_no & 0x80) { - /* If not, there is a hardware problem! Likely an ejected card. */ - netdev_warn(dev, "hardware Tx buffer allocation failed, status %#2.2x\n", - packet_no); - dev_kfree_skb_irq(skb); - smc->saved_skb = NULL; - netif_start_queue(dev); - return; - } - - dev->stats.tx_bytes += skb->len; - /* The card should use the just-allocated buffer. */ - outw(packet_no, ioaddr + PNR_ARR); - /* point to the beginning of the packet */ - outw(PTR_AUTOINC , ioaddr + POINTER); - - /* Send the packet length (+6 for status, length and ctl byte) - and the status word (set to zeros). */ - { - u_char *buf = skb->data; - u_int length = skb->len; /* The chip will pad to ethernet min. */ - - netdev_dbg(dev, "Trying to xmit packet of length %d\n", length); - - /* send the packet length: +6 for status word, length, and ctl */ - outw(0, ioaddr + DATA_1); - outw(length + 6, ioaddr + DATA_1); - outsw(ioaddr + DATA_1, buf, length >> 1); - - /* The odd last byte, if there is one, goes in the control word. */ - outw((length & 1) ? 0x2000 | buf[length-1] : 0, ioaddr + DATA_1); - } - - /* Enable the Tx interrupts, both Tx (TxErr) and TxEmpty. */ - outw(((IM_TX_INT|IM_TX_EMPTY_INT)<<8) | - (inw(ioaddr + INTERRUPT) & 0xff00), - ioaddr + INTERRUPT); - - /* The chip does the rest of the work. */ - outw(MC_ENQUEUE , ioaddr + MMU_CMD); - - smc->saved_skb = NULL; - dev_kfree_skb_irq(skb); - dev->trans_start = jiffies; - netif_start_queue(dev); -} - -/*====================================================================*/ - -static void smc_tx_timeout(struct net_device *dev) -{ - struct smc_private *smc = netdev_priv(dev); - unsigned int ioaddr = dev->base_addr; - - netdev_notice(dev, "transmit timed out, Tx_status %2.2x status %4.4x.\n", - inw(ioaddr)&0xff, inw(ioaddr + 2)); - dev->stats.tx_errors++; - smc_reset(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ - smc->saved_skb = NULL; - netif_wake_queue(dev); -} - -static netdev_tx_t smc_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - struct smc_private *smc = netdev_priv(dev); - unsigned int ioaddr = dev->base_addr; - u_short num_pages; - short time_out, ir; - unsigned long flags; - - netif_stop_queue(dev); - - netdev_dbg(dev, "smc_start_xmit(length = %d) called, status %04x\n", - skb->len, inw(ioaddr + 2)); - - if (smc->saved_skb) { - /* THIS SHOULD NEVER HAPPEN. */ - dev->stats.tx_aborted_errors++; - netdev_printk(KERN_DEBUG, dev, - "Internal error -- sent packet while busy\n"); - return NETDEV_TX_BUSY; - } - smc->saved_skb = skb; - - num_pages = skb->len >> 8; - - if (num_pages > 7) { - netdev_err(dev, "Far too big packet error: %d pages\n", num_pages); - dev_kfree_skb (skb); - smc->saved_skb = NULL; - dev->stats.tx_dropped++; - return NETDEV_TX_OK; /* Do not re-queue this packet. */ - } - /* A packet is now waiting. */ - smc->packets_waiting++; - - spin_lock_irqsave(&smc->lock, flags); - SMC_SELECT_BANK(2); /* Paranoia, we should always be in window 2 */ - - /* need MC_RESET to keep the memory consistent. errata? */ - if (smc->rx_ovrn) { - outw(MC_RESET, ioaddr + MMU_CMD); - smc->rx_ovrn = 0; - } - - /* Allocate the memory; send the packet now if we win. */ - outw(MC_ALLOC | num_pages, ioaddr + MMU_CMD); - for (time_out = MEMORY_WAIT_TIME; time_out >= 0; time_out--) { - ir = inw(ioaddr+INTERRUPT); - if (ir & IM_ALLOC_INT) { - /* Acknowledge the interrupt, send the packet. */ - outw((ir&0xff00) | IM_ALLOC_INT, ioaddr + INTERRUPT); - smc_hardware_send_packet(dev); /* Send the packet now.. */ - spin_unlock_irqrestore(&smc->lock, flags); - return NETDEV_TX_OK; - } - } - - /* Otherwise defer until the Tx-space-allocated interrupt. */ - pr_debug("%s: memory allocation deferred.\n", dev->name); - outw((IM_ALLOC_INT << 8) | (ir & 0xff00), ioaddr + INTERRUPT); - spin_unlock_irqrestore(&smc->lock, flags); - - return NETDEV_TX_OK; -} - -/*====================================================================== - - Handle a Tx anomalous event. Entered while in Window 2. - -======================================================================*/ - -static void smc_tx_err(struct net_device * dev) -{ - struct smc_private *smc = netdev_priv(dev); - unsigned int ioaddr = dev->base_addr; - int saved_packet = inw(ioaddr + PNR_ARR) & 0xff; - int packet_no = inw(ioaddr + FIFO_PORTS) & 0x7f; - int tx_status; - - /* select this as the packet to read from */ - outw(packet_no, ioaddr + PNR_ARR); - - /* read the first word from this packet */ - outw(PTR_AUTOINC | PTR_READ | 0, ioaddr + POINTER); - - tx_status = inw(ioaddr + DATA_1); - - dev->stats.tx_errors++; - if (tx_status & TS_LOSTCAR) dev->stats.tx_carrier_errors++; - if (tx_status & TS_LATCOL) dev->stats.tx_window_errors++; - if (tx_status & TS_16COL) { - dev->stats.tx_aborted_errors++; - smc->tx_err++; - } - - if (tx_status & TS_SUCCESS) { - netdev_notice(dev, "Successful packet caused error interrupt?\n"); - } - /* re-enable transmit */ - SMC_SELECT_BANK(0); - outw(inw(ioaddr + TCR) | TCR_ENABLE | smc->duplex, ioaddr + TCR); - SMC_SELECT_BANK(2); - - outw(MC_FREEPKT, ioaddr + MMU_CMD); /* Free the packet memory. */ - - /* one less packet waiting for me */ - smc->packets_waiting--; - - outw(saved_packet, ioaddr + PNR_ARR); -} - -/*====================================================================*/ - -static void smc_eph_irq(struct net_device *dev) -{ - struct smc_private *smc = netdev_priv(dev); - unsigned int ioaddr = dev->base_addr; - u_short card_stats, ephs; - - SMC_SELECT_BANK(0); - ephs = inw(ioaddr + EPH); - pr_debug("%s: Ethernet protocol handler interrupt, status" - " %4.4x.\n", dev->name, ephs); - /* Could be a counter roll-over warning: update stats. */ - card_stats = inw(ioaddr + COUNTER); - /* single collisions */ - dev->stats.collisions += card_stats & 0xF; - card_stats >>= 4; - /* multiple collisions */ - dev->stats.collisions += card_stats & 0xF; -#if 0 /* These are for when linux supports these statistics */ - card_stats >>= 4; /* deferred */ - card_stats >>= 4; /* excess deferred */ -#endif - /* If we had a transmit error we must re-enable the transmitter. */ - outw(inw(ioaddr + TCR) | TCR_ENABLE | smc->duplex, ioaddr + TCR); - - /* Clear a link error interrupt. */ - SMC_SELECT_BANK(1); - outw(CTL_AUTO_RELEASE | 0x0000, ioaddr + CONTROL); - outw(CTL_AUTO_RELEASE | CTL_TE_ENABLE | CTL_CR_ENABLE, - ioaddr + CONTROL); - SMC_SELECT_BANK(2); -} - -/*====================================================================*/ - -static irqreturn_t smc_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct smc_private *smc = netdev_priv(dev); - unsigned int ioaddr; - u_short saved_bank, saved_pointer, mask, status; - unsigned int handled = 1; - char bogus_cnt = INTR_WORK; /* Work we are willing to do. */ - - if (!netif_device_present(dev)) - return IRQ_NONE; - - ioaddr = dev->base_addr; - - pr_debug("%s: SMC91c92 interrupt %d at %#x.\n", dev->name, - irq, ioaddr); - - spin_lock(&smc->lock); - smc->watchdog = 0; - saved_bank = inw(ioaddr + BANK_SELECT); - if ((saved_bank & 0xff00) != 0x3300) { - /* The device does not exist -- the card could be off-line, or - maybe it has been ejected. */ - pr_debug("%s: SMC91c92 interrupt %d for non-existent" - "/ejected device.\n", dev->name, irq); - handled = 0; - goto irq_done; - } - - SMC_SELECT_BANK(2); - saved_pointer = inw(ioaddr + POINTER); - mask = inw(ioaddr + INTERRUPT) >> 8; - /* clear all interrupts */ - outw(0, ioaddr + INTERRUPT); - - do { /* read the status flag, and mask it */ - status = inw(ioaddr + INTERRUPT) & 0xff; - pr_debug("%s: Status is %#2.2x (mask %#2.2x).\n", dev->name, - status, mask); - if ((status & mask) == 0) { - if (bogus_cnt == INTR_WORK) - handled = 0; - break; - } - if (status & IM_RCV_INT) { - /* Got a packet(s). */ - smc_rx(dev); - } - if (status & IM_TX_INT) { - smc_tx_err(dev); - outw(IM_TX_INT, ioaddr + INTERRUPT); - } - status &= mask; - if (status & IM_TX_EMPTY_INT) { - outw(IM_TX_EMPTY_INT, ioaddr + INTERRUPT); - mask &= ~IM_TX_EMPTY_INT; - dev->stats.tx_packets += smc->packets_waiting; - smc->packets_waiting = 0; - } - if (status & IM_ALLOC_INT) { - /* Clear this interrupt so it doesn't happen again */ - mask &= ~IM_ALLOC_INT; - - smc_hardware_send_packet(dev); - - /* enable xmit interrupts based on this */ - mask |= (IM_TX_EMPTY_INT | IM_TX_INT); - - /* and let the card send more packets to me */ - netif_wake_queue(dev); - } - if (status & IM_RX_OVRN_INT) { - dev->stats.rx_errors++; - dev->stats.rx_fifo_errors++; - if (smc->duplex) - smc->rx_ovrn = 1; /* need MC_RESET outside smc_interrupt */ - outw(IM_RX_OVRN_INT, ioaddr + INTERRUPT); - } - if (status & IM_EPH_INT) - smc_eph_irq(dev); - } while (--bogus_cnt); - - pr_debug(" Restoring saved registers mask %2.2x bank %4.4x" - " pointer %4.4x.\n", mask, saved_bank, saved_pointer); - - /* restore state register */ - outw((mask<<8), ioaddr + INTERRUPT); - outw(saved_pointer, ioaddr + POINTER); - SMC_SELECT_BANK(saved_bank); - - pr_debug("%s: Exiting interrupt IRQ%d.\n", dev->name, irq); - -irq_done: - - if ((smc->manfid == MANFID_OSITECH) && - (smc->cardid != PRODID_OSITECH_SEVEN)) { - /* Retrigger interrupt if needed */ - mask_bits(0x00ff, ioaddr-0x10+OSITECH_RESET_ISR); - set_bits(0x0300, ioaddr-0x10+OSITECH_RESET_ISR); - } - if (smc->manfid == MANFID_MOTOROLA) { - u_char cor; - cor = readb(smc->base + MOT_UART + CISREG_COR); - writeb(cor & ~COR_IREQ_ENA, smc->base + MOT_UART + CISREG_COR); - writeb(cor, smc->base + MOT_UART + CISREG_COR); - cor = readb(smc->base + MOT_LAN + CISREG_COR); - writeb(cor & ~COR_IREQ_ENA, smc->base + MOT_LAN + CISREG_COR); - writeb(cor, smc->base + MOT_LAN + CISREG_COR); - } - - if ((smc->base != NULL) && /* Megahertz MFC's */ - (smc->manfid == MANFID_MEGAHERTZ) && - (smc->cardid == PRODID_MEGAHERTZ_EM3288)) { - - u_char tmp; - tmp = readb(smc->base+MEGAHERTZ_ISR); - tmp = readb(smc->base+MEGAHERTZ_ISR); - - /* Retrigger interrupt if needed */ - writeb(tmp, smc->base + MEGAHERTZ_ISR); - writeb(tmp, smc->base + MEGAHERTZ_ISR); - } - - spin_unlock(&smc->lock); - return IRQ_RETVAL(handled); -} - -/*====================================================================*/ - -static void smc_rx(struct net_device *dev) -{ - unsigned int ioaddr = dev->base_addr; - int rx_status; - int packet_length; /* Caution: not frame length, rather words - to transfer from the chip. */ - - /* Assertion: we are in Window 2. */ - - if (inw(ioaddr + FIFO_PORTS) & FP_RXEMPTY) { - netdev_err(dev, "smc_rx() with nothing on Rx FIFO\n"); - return; - } - - /* Reset the read pointer, and read the status and packet length. */ - outw(PTR_READ | PTR_RCV | PTR_AUTOINC, ioaddr + POINTER); - rx_status = inw(ioaddr + DATA_1); - packet_length = inw(ioaddr + DATA_1) & 0x07ff; - - pr_debug("%s: Receive status %4.4x length %d.\n", - dev->name, rx_status, packet_length); - - if (!(rx_status & RS_ERRORS)) { - /* do stuff to make a new packet */ - struct sk_buff *skb; - - /* Note: packet_length adds 5 or 6 extra bytes here! */ - skb = dev_alloc_skb(packet_length+2); - - if (skb == NULL) { - pr_debug("%s: Low memory, packet dropped.\n", dev->name); - dev->stats.rx_dropped++; - outw(MC_RELEASE, ioaddr + MMU_CMD); - return; - } - - packet_length -= (rx_status & RS_ODDFRAME ? 5 : 6); - skb_reserve(skb, 2); - insw(ioaddr+DATA_1, skb_put(skb, packet_length), - (packet_length+1)>>1); - skb->protocol = eth_type_trans(skb, dev); - - netif_rx(skb); - dev->last_rx = jiffies; - dev->stats.rx_packets++; - dev->stats.rx_bytes += packet_length; - if (rx_status & RS_MULTICAST) - dev->stats.multicast++; - } else { - /* error ... */ - dev->stats.rx_errors++; - - if (rx_status & RS_ALGNERR) dev->stats.rx_frame_errors++; - if (rx_status & (RS_TOOSHORT | RS_TOOLONG)) - dev->stats.rx_length_errors++; - if (rx_status & RS_BADCRC) dev->stats.rx_crc_errors++; - } - /* Let the MMU free the memory of this packet. */ - outw(MC_RELEASE, ioaddr + MMU_CMD); -} - -/*====================================================================== - - Set the receive mode. - - This routine is used by both the protocol level to notify us of - promiscuous/multicast mode changes, and by the open/reset code to - initialize the Rx registers. We always set the multicast list and - leave the receiver running. - -======================================================================*/ - -static void set_rx_mode(struct net_device *dev) -{ - unsigned int ioaddr = dev->base_addr; - struct smc_private *smc = netdev_priv(dev); - unsigned char multicast_table[8]; - unsigned long flags; - u_short rx_cfg_setting; - int i; - - memset(multicast_table, 0, sizeof(multicast_table)); - - if (dev->flags & IFF_PROMISC) { - rx_cfg_setting = RxStripCRC | RxEnable | RxPromisc | RxAllMulti; - } else if (dev->flags & IFF_ALLMULTI) - rx_cfg_setting = RxStripCRC | RxEnable | RxAllMulti; - else { - if (!netdev_mc_empty(dev)) { - struct netdev_hw_addr *ha; - - netdev_for_each_mc_addr(ha, dev) { - u_int position = ether_crc(6, ha->addr); - multicast_table[position >> 29] |= 1 << ((position >> 26) & 7); - } - } - rx_cfg_setting = RxStripCRC | RxEnable; - } - - /* Load MC table and Rx setting into the chip without interrupts. */ - spin_lock_irqsave(&smc->lock, flags); - SMC_SELECT_BANK(3); - for (i = 0; i < 8; i++) - outb(multicast_table[i], ioaddr + MULTICAST0 + i); - SMC_SELECT_BANK(0); - outw(rx_cfg_setting, ioaddr + RCR); - SMC_SELECT_BANK(2); - spin_unlock_irqrestore(&smc->lock, flags); -} - -/*====================================================================== - - Senses when a card's config changes. Here, it's coax or TP. - -======================================================================*/ - -static int s9k_config(struct net_device *dev, struct ifmap *map) -{ - struct smc_private *smc = netdev_priv(dev); - if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { - if (smc->cfg & CFG_MII_SELECT) - return -EOPNOTSUPP; - else if (map->port > 2) - return -EINVAL; - dev->if_port = map->port; - netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]); - smc_reset(dev); - } - return 0; -} - -/*====================================================================== - - Reset the chip, reloading every register that might be corrupted. - -======================================================================*/ - -/* - Set transceiver type, perhaps to something other than what the user - specified in dev->if_port. -*/ -static void smc_set_xcvr(struct net_device *dev, int if_port) -{ - struct smc_private *smc = netdev_priv(dev); - unsigned int ioaddr = dev->base_addr; - u_short saved_bank; - - saved_bank = inw(ioaddr + BANK_SELECT); - SMC_SELECT_BANK(1); - if (if_port == 2) { - outw(smc->cfg | CFG_AUI_SELECT, ioaddr + CONFIG); - if ((smc->manfid == MANFID_OSITECH) && - (smc->cardid != PRODID_OSITECH_SEVEN)) - set_bits(OSI_AUI_PWR, ioaddr - 0x10 + OSITECH_AUI_PWR); - smc->media_status = ((dev->if_port == 0) ? 0x0001 : 0x0002); - } else { - outw(smc->cfg, ioaddr + CONFIG); - if ((smc->manfid == MANFID_OSITECH) && - (smc->cardid != PRODID_OSITECH_SEVEN)) - mask_bits(~OSI_AUI_PWR, ioaddr - 0x10 + OSITECH_AUI_PWR); - smc->media_status = ((dev->if_port == 0) ? 0x0012 : 0x4001); - } - SMC_SELECT_BANK(saved_bank); -} - -static void smc_reset(struct net_device *dev) -{ - unsigned int ioaddr = dev->base_addr; - struct smc_private *smc = netdev_priv(dev); - int i; - - pr_debug("%s: smc91c92 reset called.\n", dev->name); - - /* The first interaction must be a write to bring the chip out - of sleep mode. */ - SMC_SELECT_BANK(0); - /* Reset the chip. */ - outw(RCR_SOFTRESET, ioaddr + RCR); - udelay(10); - - /* Clear the transmit and receive configuration registers. */ - outw(RCR_CLEAR, ioaddr + RCR); - outw(TCR_CLEAR, ioaddr + TCR); - - /* Set the Window 1 control, configuration and station addr registers. - No point in writing the I/O base register ;-> */ - SMC_SELECT_BANK(1); - /* Automatically release successfully transmitted packets, - Accept link errors, counter and Tx error interrupts. */ - outw(CTL_AUTO_RELEASE | CTL_TE_ENABLE | CTL_CR_ENABLE, - ioaddr + CONTROL); - smc_set_xcvr(dev, dev->if_port); - if ((smc->manfid == MANFID_OSITECH) && - (smc->cardid != PRODID_OSITECH_SEVEN)) - outw((dev->if_port == 2 ? OSI_AUI_PWR : 0) | - (inw(ioaddr-0x10+OSITECH_AUI_PWR) & 0xff00), - ioaddr - 0x10 + OSITECH_AUI_PWR); - - /* Fill in the physical address. The databook is wrong about the order! */ - for (i = 0; i < 6; i += 2) - outw((dev->dev_addr[i+1]<<8)+dev->dev_addr[i], - ioaddr + ADDR0 + i); - - /* Reset the MMU */ - SMC_SELECT_BANK(2); - outw(MC_RESET, ioaddr + MMU_CMD); - outw(0, ioaddr + INTERRUPT); - - /* Re-enable the chip. */ - SMC_SELECT_BANK(0); - outw(((smc->cfg & CFG_MII_SELECT) ? 0 : TCR_MONCSN) | - TCR_ENABLE | TCR_PAD_EN | smc->duplex, ioaddr + TCR); - set_rx_mode(dev); - - if (smc->cfg & CFG_MII_SELECT) { - SMC_SELECT_BANK(3); - - /* Reset MII */ - mdio_write(dev, smc->mii_if.phy_id, 0, 0x8000); - - /* Advertise 100F, 100H, 10F, 10H */ - mdio_write(dev, smc->mii_if.phy_id, 4, 0x01e1); - - /* Restart MII autonegotiation */ - mdio_write(dev, smc->mii_if.phy_id, 0, 0x0000); - mdio_write(dev, smc->mii_if.phy_id, 0, 0x1200); - } - - /* Enable interrupts. */ - SMC_SELECT_BANK(2); - outw((IM_EPH_INT | IM_RX_OVRN_INT | IM_RCV_INT) << 8, - ioaddr + INTERRUPT); -} - -/*====================================================================== - - Media selection timer routine - -======================================================================*/ - -static void media_check(u_long arg) -{ - struct net_device *dev = (struct net_device *) arg; - struct smc_private *smc = netdev_priv(dev); - unsigned int ioaddr = dev->base_addr; - u_short i, media, saved_bank; - u_short link; - unsigned long flags; - - spin_lock_irqsave(&smc->lock, flags); - - saved_bank = inw(ioaddr + BANK_SELECT); - - if (!netif_device_present(dev)) - goto reschedule; - - SMC_SELECT_BANK(2); - - /* need MC_RESET to keep the memory consistent. errata? */ - if (smc->rx_ovrn) { - outw(MC_RESET, ioaddr + MMU_CMD); - smc->rx_ovrn = 0; - } - i = inw(ioaddr + INTERRUPT); - SMC_SELECT_BANK(0); - media = inw(ioaddr + EPH) & EPH_LINK_OK; - SMC_SELECT_BANK(1); - media |= (inw(ioaddr + CONFIG) & CFG_AUI_SELECT) ? 2 : 1; - - SMC_SELECT_BANK(saved_bank); - spin_unlock_irqrestore(&smc->lock, flags); - - /* Check for pending interrupt with watchdog flag set: with - this, we can limp along even if the interrupt is blocked */ - if (smc->watchdog++ && ((i>>8) & i)) { - if (!smc->fast_poll) - netdev_info(dev, "interrupt(s) dropped!\n"); - local_irq_save(flags); - smc_interrupt(dev->irq, dev); - local_irq_restore(flags); - smc->fast_poll = HZ; - } - if (smc->fast_poll) { - smc->fast_poll--; - smc->media.expires = jiffies + HZ/100; - add_timer(&smc->media); - return; - } - - spin_lock_irqsave(&smc->lock, flags); - - saved_bank = inw(ioaddr + BANK_SELECT); - - if (smc->cfg & CFG_MII_SELECT) { - if (smc->mii_if.phy_id < 0) - goto reschedule; - - SMC_SELECT_BANK(3); - link = mdio_read(dev, smc->mii_if.phy_id, 1); - if (!link || (link == 0xffff)) { - netdev_info(dev, "MII is missing!\n"); - smc->mii_if.phy_id = -1; - goto reschedule; - } - - link &= 0x0004; - if (link != smc->link_status) { - u_short p = mdio_read(dev, smc->mii_if.phy_id, 5); - netdev_info(dev, "%s link beat\n", link ? "found" : "lost"); - smc->duplex = (((p & 0x0100) || ((p & 0x1c0) == 0x40)) - ? TCR_FDUPLX : 0); - if (link) { - netdev_info(dev, "autonegotiation complete: " - "%dbaseT-%cD selected\n", - (p & 0x0180) ? 100 : 10, smc->duplex ? 'F' : 'H'); - } - SMC_SELECT_BANK(0); - outw(inw(ioaddr + TCR) | smc->duplex, ioaddr + TCR); - smc->link_status = link; - } - goto reschedule; - } - - /* Ignore collisions unless we've had no rx's recently */ - if (time_after(jiffies, dev->last_rx + HZ)) { - if (smc->tx_err || (smc->media_status & EPH_16COL)) - media |= EPH_16COL; - } - smc->tx_err = 0; - - if (media != smc->media_status) { - if ((media & smc->media_status & 1) && - ((smc->media_status ^ media) & EPH_LINK_OK)) - netdev_info(dev, "%s link beat\n", - smc->media_status & EPH_LINK_OK ? "lost" : "found"); - else if ((media & smc->media_status & 2) && - ((smc->media_status ^ media) & EPH_16COL)) - netdev_info(dev, "coax cable %s\n", - media & EPH_16COL ? "problem" : "ok"); - if (dev->if_port == 0) { - if (media & 1) { - if (media & EPH_LINK_OK) - netdev_info(dev, "flipped to 10baseT\n"); - else - smc_set_xcvr(dev, 2); - } else { - if (media & EPH_16COL) - smc_set_xcvr(dev, 1); - else - netdev_info(dev, "flipped to 10base2\n"); - } - } - smc->media_status = media; - } - -reschedule: - smc->media.expires = jiffies + HZ; - add_timer(&smc->media); - SMC_SELECT_BANK(saved_bank); - spin_unlock_irqrestore(&smc->lock, flags); -} - -static int smc_link_ok(struct net_device *dev) -{ - unsigned int ioaddr = dev->base_addr; - struct smc_private *smc = netdev_priv(dev); - - if (smc->cfg & CFG_MII_SELECT) { - return mii_link_ok(&smc->mii_if); - } else { - SMC_SELECT_BANK(0); - return inw(ioaddr + EPH) & EPH_LINK_OK; - } -} - -static int smc_netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) -{ - u16 tmp; - unsigned int ioaddr = dev->base_addr; - - ecmd->supported = (SUPPORTED_TP | SUPPORTED_AUI | - SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full); - - SMC_SELECT_BANK(1); - tmp = inw(ioaddr + CONFIG); - ecmd->port = (tmp & CFG_AUI_SELECT) ? PORT_AUI : PORT_TP; - ecmd->transceiver = XCVR_INTERNAL; - ethtool_cmd_speed_set(ecmd, SPEED_10); - ecmd->phy_address = ioaddr + MGMT; - - SMC_SELECT_BANK(0); - tmp = inw(ioaddr + TCR); - ecmd->duplex = (tmp & TCR_FDUPLX) ? DUPLEX_FULL : DUPLEX_HALF; - - return 0; -} - -static int smc_netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) -{ - u16 tmp; - unsigned int ioaddr = dev->base_addr; - - if (ethtool_cmd_speed(ecmd) != SPEED_10) - return -EINVAL; - if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) - return -EINVAL; - if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI) - return -EINVAL; - if (ecmd->transceiver != XCVR_INTERNAL) - return -EINVAL; - - if (ecmd->port == PORT_AUI) - smc_set_xcvr(dev, 1); - else - smc_set_xcvr(dev, 0); - - SMC_SELECT_BANK(0); - tmp = inw(ioaddr + TCR); - if (ecmd->duplex == DUPLEX_FULL) - tmp |= TCR_FDUPLX; - else - tmp &= ~TCR_FDUPLX; - outw(tmp, ioaddr + TCR); - - return 0; -} - -static int check_if_running(struct net_device *dev) -{ - if (!netif_running(dev)) - return -EINVAL; - return 0; -} - -static void smc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) -{ - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); -} - -static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) -{ - struct smc_private *smc = netdev_priv(dev); - unsigned int ioaddr = dev->base_addr; - u16 saved_bank = inw(ioaddr + BANK_SELECT); - int ret; - unsigned long flags; - - spin_lock_irqsave(&smc->lock, flags); - SMC_SELECT_BANK(3); - if (smc->cfg & CFG_MII_SELECT) - ret = mii_ethtool_gset(&smc->mii_if, ecmd); - else - ret = smc_netdev_get_ecmd(dev, ecmd); - SMC_SELECT_BANK(saved_bank); - spin_unlock_irqrestore(&smc->lock, flags); - return ret; -} - -static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) -{ - struct smc_private *smc = netdev_priv(dev); - unsigned int ioaddr = dev->base_addr; - u16 saved_bank = inw(ioaddr + BANK_SELECT); - int ret; - unsigned long flags; - - spin_lock_irqsave(&smc->lock, flags); - SMC_SELECT_BANK(3); - if (smc->cfg & CFG_MII_SELECT) - ret = mii_ethtool_sset(&smc->mii_if, ecmd); - else - ret = smc_netdev_set_ecmd(dev, ecmd); - SMC_SELECT_BANK(saved_bank); - spin_unlock_irqrestore(&smc->lock, flags); - return ret; -} - -static u32 smc_get_link(struct net_device *dev) -{ - struct smc_private *smc = netdev_priv(dev); - unsigned int ioaddr = dev->base_addr; - u16 saved_bank = inw(ioaddr + BANK_SELECT); - u32 ret; - unsigned long flags; - - spin_lock_irqsave(&smc->lock, flags); - SMC_SELECT_BANK(3); - ret = smc_link_ok(dev); - SMC_SELECT_BANK(saved_bank); - spin_unlock_irqrestore(&smc->lock, flags); - return ret; -} - -static int smc_nway_reset(struct net_device *dev) -{ - struct smc_private *smc = netdev_priv(dev); - if (smc->cfg & CFG_MII_SELECT) { - unsigned int ioaddr = dev->base_addr; - u16 saved_bank = inw(ioaddr + BANK_SELECT); - int res; - - SMC_SELECT_BANK(3); - res = mii_nway_restart(&smc->mii_if); - SMC_SELECT_BANK(saved_bank); - - return res; - } else - return -EOPNOTSUPP; -} - -static const struct ethtool_ops ethtool_ops = { - .begin = check_if_running, - .get_drvinfo = smc_get_drvinfo, - .get_settings = smc_get_settings, - .set_settings = smc_set_settings, - .get_link = smc_get_link, - .nway_reset = smc_nway_reset, -}; - -static int smc_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) -{ - struct smc_private *smc = netdev_priv(dev); - struct mii_ioctl_data *mii = if_mii(rq); - int rc = 0; - u16 saved_bank; - unsigned int ioaddr = dev->base_addr; - unsigned long flags; - - if (!netif_running(dev)) - return -EINVAL; - - spin_lock_irqsave(&smc->lock, flags); - saved_bank = inw(ioaddr + BANK_SELECT); - SMC_SELECT_BANK(3); - rc = generic_mii_ioctl(&smc->mii_if, mii, cmd, NULL); - SMC_SELECT_BANK(saved_bank); - spin_unlock_irqrestore(&smc->lock, flags); - return rc; -} - -static const struct pcmcia_device_id smc91c92_ids[] = { - PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0109, 0x0501), - PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0140, 0x000a), - PCMCIA_PFC_DEVICE_PROD_ID123(0, "MEGAHERTZ", "CC/XJEM3288", "DATA/FAX/CELL ETHERNET MODEM", 0xf510db04, 0x04cd2988, 0x46a52d63), - PCMCIA_PFC_DEVICE_PROD_ID123(0, "MEGAHERTZ", "CC/XJEM3336", "DATA/FAX/CELL ETHERNET MODEM", 0xf510db04, 0x0143b773, 0x46a52d63), - PCMCIA_PFC_DEVICE_PROD_ID123(0, "MEGAHERTZ", "EM1144T", "PCMCIA MODEM", 0xf510db04, 0x856d66c8, 0xbd6c43ef), - PCMCIA_PFC_DEVICE_PROD_ID123(0, "MEGAHERTZ", "XJEM1144/CCEM1144", "PCMCIA MODEM", 0xf510db04, 0x52d21e1e, 0xbd6c43ef), - PCMCIA_PFC_DEVICE_PROD_ID12(0, "Gateway 2000", "XJEM3336", 0xdd9989be, 0x662c394c), - PCMCIA_PFC_DEVICE_PROD_ID12(0, "MEGAHERTZ", "XJEM1144/CCEM1144", 0xf510db04, 0x52d21e1e), - PCMCIA_PFC_DEVICE_PROD_ID12(0, "Ositech", "Trumpcard:Jack of Diamonds Modem+Ethernet", 0xc2f80cd, 0x656947b9), - PCMCIA_PFC_DEVICE_PROD_ID12(0, "Ositech", "Trumpcard:Jack of Hearts Modem+Ethernet", 0xc2f80cd, 0xdc9ba5ed), - PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x016c, 0x0020), - PCMCIA_DEVICE_MANF_CARD(0x016c, 0x0023), - PCMCIA_DEVICE_PROD_ID123("BASICS by New Media Corporation", "Ethernet", "SMC91C94", 0x23c78a9d, 0x00b2e941, 0xcef397fb), - PCMCIA_DEVICE_PROD_ID12("ARGOSY", "Fast Ethernet PCCard", 0x78f308dc, 0xdcea68bc), - PCMCIA_DEVICE_PROD_ID12("dit Co., Ltd.", "PC Card-10/100BTX", 0xe59365c8, 0x6a2161d1), - PCMCIA_DEVICE_PROD_ID12("DYNALINK", "L100C", 0x6a26d1cf, 0xc16ce9c5), - PCMCIA_DEVICE_PROD_ID12("Farallon", "Farallon Enet", 0x58d93fc4, 0x244734e9), - PCMCIA_DEVICE_PROD_ID12("Megahertz", "CC10BT/2", 0x33234748, 0x3c95b953), - PCMCIA_DEVICE_PROD_ID12("MELCO/SMC", "LPC-TX", 0xa2cd8e6d, 0x42da662a), - PCMCIA_DEVICE_PROD_ID12("Ositech", "Trumpcard:Four of Diamonds Ethernet", 0xc2f80cd, 0xb3466314), - PCMCIA_DEVICE_PROD_ID12("Ositech", "Trumpcard:Seven of Diamonds Ethernet", 0xc2f80cd, 0x194b650a), - PCMCIA_DEVICE_PROD_ID12("PCMCIA", "Fast Ethernet PCCard", 0x281f1c5d, 0xdcea68bc), - PCMCIA_DEVICE_PROD_ID12("Psion", "10Mb Ethernet", 0x4ef00b21, 0x844be9e9), - PCMCIA_DEVICE_PROD_ID12("SMC", "EtherEZ Ethernet 8020", 0xc4f8b18b, 0x4a0eeb2d), - /* These conflict with other cards! */ - /* PCMCIA_DEVICE_MANF_CARD(0x0186, 0x0100), */ - /* PCMCIA_DEVICE_MANF_CARD(0x8a01, 0xc1ab), */ - PCMCIA_DEVICE_NULL, -}; -MODULE_DEVICE_TABLE(pcmcia, smc91c92_ids); - -static struct pcmcia_driver smc91c92_cs_driver = { - .owner = THIS_MODULE, - .name = "smc91c92_cs", - .probe = smc91c92_probe, - .remove = smc91c92_detach, - .id_table = smc91c92_ids, - .suspend = smc91c92_suspend, - .resume = smc91c92_resume, -}; - -static int __init init_smc91c92_cs(void) -{ - return pcmcia_register_driver(&smc91c92_cs_driver); -} - -static void __exit exit_smc91c92_cs(void) -{ - pcmcia_unregister_driver(&smc91c92_cs_driver); -} - -module_init(init_smc91c92_cs); -module_exit(exit_smc91c92_cs); diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c deleted file mode 100644 index a91fe17..0000000 --- a/drivers/net/smc911x.c +++ /dev/null @@ -1,2210 +0,0 @@ -/* - * smc911x.c - * This is a driver for SMSC's LAN911{5,6,7,8} single-chip Ethernet devices. - * - * Copyright (C) 2005 Sensoria Corp - * Derived from the unified SMC91x driver by Nicolas Pitre - * and the smsc911x.c reference driver by SMSC - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * Arguments: - * watchdog = TX watchdog timeout - * tx_fifo_kb = Size of TX FIFO in KB - * - * History: - * 04/16/05 Dustin McIntire Initial version - */ -static const char version[] = - "smc911x.c: v1.0 04-16-2005 by Dustin McIntire \n"; - -/* Debugging options */ -#define ENABLE_SMC_DEBUG_RX 0 -#define ENABLE_SMC_DEBUG_TX 0 -#define ENABLE_SMC_DEBUG_DMA 0 -#define ENABLE_SMC_DEBUG_PKTS 0 -#define ENABLE_SMC_DEBUG_MISC 0 -#define ENABLE_SMC_DEBUG_FUNC 0 - -#define SMC_DEBUG_RX ((ENABLE_SMC_DEBUG_RX ? 1 : 0) << 0) -#define SMC_DEBUG_TX ((ENABLE_SMC_DEBUG_TX ? 1 : 0) << 1) -#define SMC_DEBUG_DMA ((ENABLE_SMC_DEBUG_DMA ? 1 : 0) << 2) -#define SMC_DEBUG_PKTS ((ENABLE_SMC_DEBUG_PKTS ? 1 : 0) << 3) -#define SMC_DEBUG_MISC ((ENABLE_SMC_DEBUG_MISC ? 1 : 0) << 4) -#define SMC_DEBUG_FUNC ((ENABLE_SMC_DEBUG_FUNC ? 1 : 0) << 5) - -#ifndef SMC_DEBUG -#define SMC_DEBUG ( SMC_DEBUG_RX | \ - SMC_DEBUG_TX | \ - SMC_DEBUG_DMA | \ - SMC_DEBUG_PKTS | \ - SMC_DEBUG_MISC | \ - SMC_DEBUG_FUNC \ - ) -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include - -#include "smc911x.h" - -/* - * Transmit timeout, default 5 seconds. - */ -static int watchdog = 5000; -module_param(watchdog, int, 0400); -MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds"); - -static int tx_fifo_kb=8; -module_param(tx_fifo_kb, int, 0400); -MODULE_PARM_DESC(tx_fifo_kb,"transmit FIFO size in KB (1 0 -#define DBG(n, args...) \ - do { \ - if (SMC_DEBUG & (n)) \ - printk(args); \ - } while (0) - -#define PRINTK(args...) printk(args) -#else -#define DBG(n, args...) do { } while (0) -#define PRINTK(args...) printk(KERN_DEBUG args) -#endif - -#if SMC_DEBUG_PKTS > 0 -static void PRINT_PKT(u_char *buf, int length) -{ - int i; - int remainder; - int lines; - - lines = length / 16; - remainder = length % 16; - - for (i = 0; i < lines ; i ++) { - int cur; - for (cur = 0; cur < 8; cur++) { - u_char a, b; - a = *buf++; - b = *buf++; - printk("%02x%02x ", a, b); - } - printk("\n"); - } - for (i = 0; i < remainder/2 ; i++) { - u_char a, b; - a = *buf++; - b = *buf++; - printk("%02x%02x ", a, b); - } - printk("\n"); -} -#else -#define PRINT_PKT(x...) do { } while (0) -#endif - - -/* this enables an interrupt in the interrupt mask register */ -#define SMC_ENABLE_INT(lp, x) do { \ - unsigned int __mask; \ - __mask = SMC_GET_INT_EN((lp)); \ - __mask |= (x); \ - SMC_SET_INT_EN((lp), __mask); \ -} while (0) - -/* this disables an interrupt from the interrupt mask register */ -#define SMC_DISABLE_INT(lp, x) do { \ - unsigned int __mask; \ - __mask = SMC_GET_INT_EN((lp)); \ - __mask &= ~(x); \ - SMC_SET_INT_EN((lp), __mask); \ -} while (0) - -/* - * this does a soft reset on the device - */ -static void smc911x_reset(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - unsigned int reg, timeout=0, resets=1, irq_cfg; - unsigned long flags; - - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); - - /* Take out of PM setting first */ - if ((SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_) == 0) { - /* Write to the bytetest will take out of powerdown */ - SMC_SET_BYTE_TEST(lp, 0); - timeout=10; - do { - udelay(10); - reg = SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_; - } while (--timeout && !reg); - if (timeout == 0) { - PRINTK("%s: smc911x_reset timeout waiting for PM restore\n", dev->name); - return; - } - } - - /* Disable all interrupts */ - spin_lock_irqsave(&lp->lock, flags); - SMC_SET_INT_EN(lp, 0); - spin_unlock_irqrestore(&lp->lock, flags); - - while (resets--) { - SMC_SET_HW_CFG(lp, HW_CFG_SRST_); - timeout=10; - do { - udelay(10); - reg = SMC_GET_HW_CFG(lp); - /* If chip indicates reset timeout then try again */ - if (reg & HW_CFG_SRST_TO_) { - PRINTK("%s: chip reset timeout, retrying...\n", dev->name); - resets++; - break; - } - } while (--timeout && (reg & HW_CFG_SRST_)); - } - if (timeout == 0) { - PRINTK("%s: smc911x_reset timeout waiting for reset\n", dev->name); - return; - } - - /* make sure EEPROM has finished loading before setting GPIO_CFG */ - timeout=1000; - while (--timeout && (SMC_GET_E2P_CMD(lp) & E2P_CMD_EPC_BUSY_)) - udelay(10); - - if (timeout == 0){ - PRINTK("%s: smc911x_reset timeout waiting for EEPROM busy\n", dev->name); - return; - } - - /* Initialize interrupts */ - SMC_SET_INT_EN(lp, 0); - SMC_ACK_INT(lp, -1); - - /* Reset the FIFO level and flow control settings */ - SMC_SET_HW_CFG(lp, (lp->tx_fifo_kb & 0xF) << 16); -//TODO: Figure out what appropriate pause time is - SMC_SET_FLOW(lp, FLOW_FCPT_ | FLOW_FCEN_); - SMC_SET_AFC_CFG(lp, lp->afc_cfg); - - - /* Set to LED outputs */ - SMC_SET_GPIO_CFG(lp, 0x70070000); - - /* - * Deassert IRQ for 1*10us for edge type interrupts - * and drive IRQ pin push-pull - */ - irq_cfg = (1 << 24) | INT_CFG_IRQ_EN_ | INT_CFG_IRQ_TYPE_; -#ifdef SMC_DYNAMIC_BUS_CONFIG - if (lp->cfg.irq_polarity) - irq_cfg |= INT_CFG_IRQ_POL_; -#endif - SMC_SET_IRQ_CFG(lp, irq_cfg); - - /* clear anything saved */ - if (lp->pending_tx_skb != NULL) { - dev_kfree_skb (lp->pending_tx_skb); - lp->pending_tx_skb = NULL; - dev->stats.tx_errors++; - dev->stats.tx_aborted_errors++; - } -} - -/* - * Enable Interrupts, Receive, and Transmit - */ -static void smc911x_enable(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - unsigned mask, cfg, cr; - unsigned long flags; - - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); - - spin_lock_irqsave(&lp->lock, flags); - - SMC_SET_MAC_ADDR(lp, dev->dev_addr); - - /* Enable TX */ - cfg = SMC_GET_HW_CFG(lp); - cfg &= HW_CFG_TX_FIF_SZ_ | 0xFFF; - cfg |= HW_CFG_SF_; - SMC_SET_HW_CFG(lp, cfg); - SMC_SET_FIFO_TDA(lp, 0xFF); - /* Update TX stats on every 64 packets received or every 1 sec */ - SMC_SET_FIFO_TSL(lp, 64); - SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000); - - SMC_GET_MAC_CR(lp, cr); - cr |= MAC_CR_TXEN_ | MAC_CR_HBDIS_; - SMC_SET_MAC_CR(lp, cr); - SMC_SET_TX_CFG(lp, TX_CFG_TX_ON_); - - /* Add 2 byte padding to start of packets */ - SMC_SET_RX_CFG(lp, (2<<8) & RX_CFG_RXDOFF_); - - /* Turn on receiver and enable RX */ - if (cr & MAC_CR_RXEN_) - DBG(SMC_DEBUG_RX, "%s: Receiver already enabled\n", dev->name); - - SMC_SET_MAC_CR(lp, cr | MAC_CR_RXEN_); - - /* Interrupt on every received packet */ - SMC_SET_FIFO_RSA(lp, 0x01); - SMC_SET_FIFO_RSL(lp, 0x00); - - /* now, enable interrupts */ - mask = INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_ | INT_EN_RSFL_EN_ | - INT_EN_GPT_INT_EN_ | INT_EN_RXDFH_INT_EN_ | INT_EN_RXE_EN_ | - INT_EN_PHY_INT_EN_; - if (IS_REV_A(lp->revision)) - mask|=INT_EN_RDFL_EN_; - else { - mask|=INT_EN_RDFO_EN_; - } - SMC_ENABLE_INT(lp, mask); - - spin_unlock_irqrestore(&lp->lock, flags); -} - -/* - * this puts the device in an inactive state - */ -static void smc911x_shutdown(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - unsigned cr; - unsigned long flags; - - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", CARDNAME, __func__); - - /* Disable IRQ's */ - SMC_SET_INT_EN(lp, 0); - - /* Turn of Rx and TX */ - spin_lock_irqsave(&lp->lock, flags); - SMC_GET_MAC_CR(lp, cr); - cr &= ~(MAC_CR_TXEN_ | MAC_CR_RXEN_ | MAC_CR_HBDIS_); - SMC_SET_MAC_CR(lp, cr); - SMC_SET_TX_CFG(lp, TX_CFG_STOP_TX_); - spin_unlock_irqrestore(&lp->lock, flags); -} - -static inline void smc911x_drop_pkt(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - unsigned int fifo_count, timeout, reg; - - DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", CARDNAME, __func__); - fifo_count = SMC_GET_RX_FIFO_INF(lp) & 0xFFFF; - if (fifo_count <= 4) { - /* Manually dump the packet data */ - while (fifo_count--) - SMC_GET_RX_FIFO(lp); - } else { - /* Fast forward through the bad packet */ - SMC_SET_RX_DP_CTRL(lp, RX_DP_CTRL_FFWD_BUSY_); - timeout=50; - do { - udelay(10); - reg = SMC_GET_RX_DP_CTRL(lp) & RX_DP_CTRL_FFWD_BUSY_; - } while (--timeout && reg); - if (timeout == 0) { - PRINTK("%s: timeout waiting for RX fast forward\n", dev->name); - } - } -} - -/* - * This is the procedure to handle the receipt of a packet. - * It should be called after checking for packet presence in - * the RX status FIFO. It must be called with the spin lock - * already held. - */ -static inline void smc911x_rcv(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - unsigned int pkt_len, status; - struct sk_buff *skb; - unsigned char *data; - - DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", - dev->name, __func__); - status = SMC_GET_RX_STS_FIFO(lp); - DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x\n", - dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff); - pkt_len = (status & RX_STS_PKT_LEN_) >> 16; - if (status & RX_STS_ES_) { - /* Deal with a bad packet */ - dev->stats.rx_errors++; - if (status & RX_STS_CRC_ERR_) - dev->stats.rx_crc_errors++; - else { - if (status & RX_STS_LEN_ERR_) - dev->stats.rx_length_errors++; - if (status & RX_STS_MCAST_) - dev->stats.multicast++; - } - /* Remove the bad packet data from the RX FIFO */ - smc911x_drop_pkt(dev); - } else { - /* Receive a valid packet */ - /* Alloc a buffer with extra room for DMA alignment */ - skb=dev_alloc_skb(pkt_len+32); - if (unlikely(skb == NULL)) { - PRINTK( "%s: Low memory, rcvd packet dropped.\n", - dev->name); - dev->stats.rx_dropped++; - smc911x_drop_pkt(dev); - return; - } - /* Align IP header to 32 bits - * Note that the device is configured to add a 2 - * byte padding to the packet start, so we really - * want to write to the orignal data pointer */ - data = skb->data; - skb_reserve(skb, 2); - skb_put(skb,pkt_len-4); -#ifdef SMC_USE_DMA - { - unsigned int fifo; - /* Lower the FIFO threshold if possible */ - fifo = SMC_GET_FIFO_INT(lp); - if (fifo & 0xFF) fifo--; - DBG(SMC_DEBUG_RX, "%s: Setting RX stat FIFO threshold to %d\n", - dev->name, fifo & 0xff); - SMC_SET_FIFO_INT(lp, fifo); - /* Setup RX DMA */ - SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN16_ | ((2<<8) & RX_CFG_RXDOFF_)); - lp->rxdma_active = 1; - lp->current_rx_skb = skb; - SMC_PULL_DATA(lp, data, (pkt_len+2+15) & ~15); - /* Packet processing deferred to DMA RX interrupt */ - } -#else - SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN4_ | ((2<<8) & RX_CFG_RXDOFF_)); - SMC_PULL_DATA(lp, data, pkt_len+2+3); - - DBG(SMC_DEBUG_PKTS, "%s: Received packet\n", dev->name); - PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64); - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len-4; -#endif - } -} - -/* - * This is called to actually send a packet to the chip. - */ -static void smc911x_hardware_send_pkt(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - struct sk_buff *skb; - unsigned int cmdA, cmdB, len; - unsigned char *buf; - - DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __func__); - BUG_ON(lp->pending_tx_skb == NULL); - - skb = lp->pending_tx_skb; - lp->pending_tx_skb = NULL; - - /* cmdA {25:24] data alignment [20:16] start offset [10:0] buffer length */ - /* cmdB {31:16] pkt tag [10:0] length */ -#ifdef SMC_USE_DMA - /* 16 byte buffer alignment mode */ - buf = (char*)((u32)(skb->data) & ~0xF); - len = (skb->len + 0xF + ((u32)skb->data & 0xF)) & ~0xF; - cmdA = (1<<24) | (((u32)skb->data & 0xF)<<16) | - TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ | - skb->len; -#else - buf = (char*)((u32)skb->data & ~0x3); - len = (skb->len + 3 + ((u32)skb->data & 3)) & ~0x3; - cmdA = (((u32)skb->data & 0x3) << 16) | - TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ | - skb->len; -#endif - /* tag is packet length so we can use this in stats update later */ - cmdB = (skb->len << 16) | (skb->len & 0x7FF); - - DBG(SMC_DEBUG_TX, "%s: TX PKT LENGTH 0x%04x (%d) BUF 0x%p CMDA 0x%08x CMDB 0x%08x\n", - dev->name, len, len, buf, cmdA, cmdB); - SMC_SET_TX_FIFO(lp, cmdA); - SMC_SET_TX_FIFO(lp, cmdB); - - DBG(SMC_DEBUG_PKTS, "%s: Transmitted packet\n", dev->name); - PRINT_PKT(buf, len <= 64 ? len : 64); - - /* Send pkt via PIO or DMA */ -#ifdef SMC_USE_DMA - lp->current_tx_skb = skb; - SMC_PUSH_DATA(lp, buf, len); - /* DMA complete IRQ will free buffer and set jiffies */ -#else - SMC_PUSH_DATA(lp, buf, len); - dev->trans_start = jiffies; - dev_kfree_skb_irq(skb); -#endif - if (!lp->tx_throttle) { - netif_wake_queue(dev); - } - SMC_ENABLE_INT(lp, INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_); -} - -/* - * Since I am not sure if I will have enough room in the chip's ram - * to store the packet, I call this routine which either sends it - * now, or set the card to generates an interrupt when ready - * for the packet. - */ -static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - unsigned int free; - unsigned long flags; - - DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", - dev->name, __func__); - - spin_lock_irqsave(&lp->lock, flags); - - BUG_ON(lp->pending_tx_skb != NULL); - - free = SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TDFREE_; - DBG(SMC_DEBUG_TX, "%s: TX free space %d\n", dev->name, free); - - /* Turn off the flow when running out of space in FIFO */ - if (free <= SMC911X_TX_FIFO_LOW_THRESHOLD) { - DBG(SMC_DEBUG_TX, "%s: Disabling data flow due to low FIFO space (%d)\n", - dev->name, free); - /* Reenable when at least 1 packet of size MTU present */ - SMC_SET_FIFO_TDA(lp, (SMC911X_TX_FIFO_LOW_THRESHOLD)/64); - lp->tx_throttle = 1; - netif_stop_queue(dev); - } - - /* Drop packets when we run out of space in TX FIFO - * Account for overhead required for: - * - * Tx command words 8 bytes - * Start offset 15 bytes - * End padding 15 bytes - */ - if (unlikely(free < (skb->len + 8 + 15 + 15))) { - printk("%s: No Tx free space %d < %d\n", - dev->name, free, skb->len); - lp->pending_tx_skb = NULL; - dev->stats.tx_errors++; - dev->stats.tx_dropped++; - spin_unlock_irqrestore(&lp->lock, flags); - dev_kfree_skb(skb); - return NETDEV_TX_OK; - } - -#ifdef SMC_USE_DMA - { - /* If the DMA is already running then defer this packet Tx until - * the DMA IRQ starts it - */ - if (lp->txdma_active) { - DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Tx DMA running, deferring packet\n", dev->name); - lp->pending_tx_skb = skb; - netif_stop_queue(dev); - spin_unlock_irqrestore(&lp->lock, flags); - return NETDEV_TX_OK; - } else { - DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Activating Tx DMA\n", dev->name); - lp->txdma_active = 1; - } - } -#endif - lp->pending_tx_skb = skb; - smc911x_hardware_send_pkt(dev); - spin_unlock_irqrestore(&lp->lock, flags); - - return NETDEV_TX_OK; -} - -/* - * This handles a TX status interrupt, which is only called when: - * - a TX error occurred, or - * - TX of a packet completed. - */ -static void smc911x_tx(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - unsigned int tx_status; - - DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", - dev->name, __func__); - - /* Collect the TX status */ - while (((SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16) != 0) { - DBG(SMC_DEBUG_TX, "%s: Tx stat FIFO used 0x%04x\n", - dev->name, - (SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16); - tx_status = SMC_GET_TX_STS_FIFO(lp); - dev->stats.tx_packets++; - dev->stats.tx_bytes+=tx_status>>16; - DBG(SMC_DEBUG_TX, "%s: Tx FIFO tag 0x%04x status 0x%04x\n", - dev->name, (tx_status & 0xffff0000) >> 16, - tx_status & 0x0000ffff); - /* count Tx errors, but ignore lost carrier errors when in - * full-duplex mode */ - if ((tx_status & TX_STS_ES_) && !(lp->ctl_rfduplx && - !(tx_status & 0x00000306))) { - dev->stats.tx_errors++; - } - if (tx_status & TX_STS_MANY_COLL_) { - dev->stats.collisions+=16; - dev->stats.tx_aborted_errors++; - } else { - dev->stats.collisions+=(tx_status & TX_STS_COLL_CNT_) >> 3; - } - /* carrier error only has meaning for half-duplex communication */ - if ((tx_status & (TX_STS_LOC_ | TX_STS_NO_CARR_)) && - !lp->ctl_rfduplx) { - dev->stats.tx_carrier_errors++; - } - if (tx_status & TX_STS_LATE_COLL_) { - dev->stats.collisions++; - dev->stats.tx_aborted_errors++; - } - } -} - - -/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/ -/* - * Reads a register from the MII Management serial interface - */ - -static int smc911x_phy_read(struct net_device *dev, int phyaddr, int phyreg) -{ - struct smc911x_local *lp = netdev_priv(dev); - unsigned int phydata; - - SMC_GET_MII(lp, phyreg, phyaddr, phydata); - - DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n", - __func__, phyaddr, phyreg, phydata); - return phydata; -} - - -/* - * Writes a register to the MII Management serial interface - */ -static void smc911x_phy_write(struct net_device *dev, int phyaddr, int phyreg, - int phydata) -{ - struct smc911x_local *lp = netdev_priv(dev); - - DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", - __func__, phyaddr, phyreg, phydata); - - SMC_SET_MII(lp, phyreg, phyaddr, phydata); -} - -/* - * Finds and reports the PHY address (115 and 117 have external - * PHY interface 118 has internal only - */ -static void smc911x_phy_detect(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - int phyaddr; - unsigned int cfg, id1, id2; - - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); - - lp->phy_type = 0; - - /* - * Scan all 32 PHY addresses if necessary, starting at - * PHY#1 to PHY#31, and then PHY#0 last. - */ - switch(lp->version) { - case CHIP_9115: - case CHIP_9117: - case CHIP_9215: - case CHIP_9217: - cfg = SMC_GET_HW_CFG(lp); - if (cfg & HW_CFG_EXT_PHY_DET_) { - cfg &= ~HW_CFG_PHY_CLK_SEL_; - cfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_; - SMC_SET_HW_CFG(lp, cfg); - udelay(10); /* Wait for clocks to stop */ - - cfg |= HW_CFG_EXT_PHY_EN_; - SMC_SET_HW_CFG(lp, cfg); - udelay(10); /* Wait for clocks to stop */ - - cfg &= ~HW_CFG_PHY_CLK_SEL_; - cfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_; - SMC_SET_HW_CFG(lp, cfg); - udelay(10); /* Wait for clocks to stop */ - - cfg |= HW_CFG_SMI_SEL_; - SMC_SET_HW_CFG(lp, cfg); - - for (phyaddr = 1; phyaddr < 32; ++phyaddr) { - - /* Read the PHY identifiers */ - SMC_GET_PHY_ID1(lp, phyaddr & 31, id1); - SMC_GET_PHY_ID2(lp, phyaddr & 31, id2); - - /* Make sure it is a valid identifier */ - if (id1 != 0x0000 && id1 != 0xffff && - id1 != 0x8000 && id2 != 0x0000 && - id2 != 0xffff && id2 != 0x8000) { - /* Save the PHY's address */ - lp->mii.phy_id = phyaddr & 31; - lp->phy_type = id1 << 16 | id2; - break; - } - } - if (phyaddr < 32) - /* Found an external PHY */ - break; - } - default: - /* Internal media only */ - SMC_GET_PHY_ID1(lp, 1, id1); - SMC_GET_PHY_ID2(lp, 1, id2); - /* Save the PHY's address */ - lp->mii.phy_id = 1; - lp->phy_type = id1 << 16 | id2; - } - - DBG(SMC_DEBUG_MISC, "%s: phy_id1=0x%x, phy_id2=0x%x phyaddr=0x%d\n", - dev->name, id1, id2, lp->mii.phy_id); -} - -/* - * Sets the PHY to a configuration as determined by the user. - * Called with spin_lock held. - */ -static int smc911x_phy_fixed(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - int phyaddr = lp->mii.phy_id; - int bmcr; - - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); - - /* Enter Link Disable state */ - SMC_GET_PHY_BMCR(lp, phyaddr, bmcr); - bmcr |= BMCR_PDOWN; - SMC_SET_PHY_BMCR(lp, phyaddr, bmcr); - - /* - * Set our fixed capabilities - * Disable auto-negotiation - */ - bmcr &= ~BMCR_ANENABLE; - if (lp->ctl_rfduplx) - bmcr |= BMCR_FULLDPLX; - - if (lp->ctl_rspeed == 100) - bmcr |= BMCR_SPEED100; - - /* Write our capabilities to the phy control register */ - SMC_SET_PHY_BMCR(lp, phyaddr, bmcr); - - /* Re-Configure the Receive/Phy Control register */ - bmcr &= ~BMCR_PDOWN; - SMC_SET_PHY_BMCR(lp, phyaddr, bmcr); - - return 1; -} - -/* - * smc911x_phy_reset - reset the phy - * @dev: net device - * @phy: phy address - * - * Issue a software reset for the specified PHY and - * wait up to 100ms for the reset to complete. We should - * not access the PHY for 50ms after issuing the reset. - * - * The time to wait appears to be dependent on the PHY. - * - */ -static int smc911x_phy_reset(struct net_device *dev, int phy) -{ - struct smc911x_local *lp = netdev_priv(dev); - int timeout; - unsigned long flags; - unsigned int reg; - - DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __func__); - - spin_lock_irqsave(&lp->lock, flags); - reg = SMC_GET_PMT_CTRL(lp); - reg &= ~0xfffff030; - reg |= PMT_CTRL_PHY_RST_; - SMC_SET_PMT_CTRL(lp, reg); - spin_unlock_irqrestore(&lp->lock, flags); - for (timeout = 2; timeout; timeout--) { - msleep(50); - spin_lock_irqsave(&lp->lock, flags); - reg = SMC_GET_PMT_CTRL(lp); - spin_unlock_irqrestore(&lp->lock, flags); - if (!(reg & PMT_CTRL_PHY_RST_)) { - /* extra delay required because the phy may - * not be completed with its reset - * when PHY_BCR_RESET_ is cleared. 256us - * should suffice, but use 500us to be safe - */ - udelay(500); - break; - } - } - - return reg & PMT_CTRL_PHY_RST_; -} - -/* - * smc911x_phy_powerdown - powerdown phy - * @dev: net device - * @phy: phy address - * - * Power down the specified PHY - */ -static void smc911x_phy_powerdown(struct net_device *dev, int phy) -{ - struct smc911x_local *lp = netdev_priv(dev); - unsigned int bmcr; - - /* Enter Link Disable state */ - SMC_GET_PHY_BMCR(lp, phy, bmcr); - bmcr |= BMCR_PDOWN; - SMC_SET_PHY_BMCR(lp, phy, bmcr); -} - -/* - * smc911x_phy_check_media - check the media status and adjust BMCR - * @dev: net device - * @init: set true for initialisation - * - * Select duplex mode depending on negotiation state. This - * also updates our carrier state. - */ -static void smc911x_phy_check_media(struct net_device *dev, int init) -{ - struct smc911x_local *lp = netdev_priv(dev); - int phyaddr = lp->mii.phy_id; - unsigned int bmcr, cr; - - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); - - if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) { - /* duplex state has changed */ - SMC_GET_PHY_BMCR(lp, phyaddr, bmcr); - SMC_GET_MAC_CR(lp, cr); - if (lp->mii.full_duplex) { - DBG(SMC_DEBUG_MISC, "%s: Configuring for full-duplex mode\n", dev->name); - bmcr |= BMCR_FULLDPLX; - cr |= MAC_CR_RCVOWN_; - } else { - DBG(SMC_DEBUG_MISC, "%s: Configuring for half-duplex mode\n", dev->name); - bmcr &= ~BMCR_FULLDPLX; - cr &= ~MAC_CR_RCVOWN_; - } - SMC_SET_PHY_BMCR(lp, phyaddr, bmcr); - SMC_SET_MAC_CR(lp, cr); - } -} - -/* - * Configures the specified PHY through the MII management interface - * using Autonegotiation. - * Calls smc911x_phy_fixed() if the user has requested a certain config. - * If RPC ANEG bit is set, the media selection is dependent purely on - * the selection by the MII (either in the MII BMCR reg or the result - * of autonegotiation.) If the RPC ANEG bit is cleared, the selection - * is controlled by the RPC SPEED and RPC DPLX bits. - */ -static void smc911x_phy_configure(struct work_struct *work) -{ - struct smc911x_local *lp = container_of(work, struct smc911x_local, - phy_configure); - struct net_device *dev = lp->netdev; - int phyaddr = lp->mii.phy_id; - int my_phy_caps; /* My PHY capabilities */ - int my_ad_caps; /* My Advertised capabilities */ - int status; - unsigned long flags; - - DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __func__); - - /* - * We should not be called if phy_type is zero. - */ - if (lp->phy_type == 0) - return; - - if (smc911x_phy_reset(dev, phyaddr)) { - printk("%s: PHY reset timed out\n", dev->name); - return; - } - spin_lock_irqsave(&lp->lock, flags); - - /* - * Enable PHY Interrupts (for register 18) - * Interrupts listed here are enabled - */ - SMC_SET_PHY_INT_MASK(lp, phyaddr, PHY_INT_MASK_ENERGY_ON_ | - PHY_INT_MASK_ANEG_COMP_ | PHY_INT_MASK_REMOTE_FAULT_ | - PHY_INT_MASK_LINK_DOWN_); - - /* If the user requested no auto neg, then go set his request */ - if (lp->mii.force_media) { - smc911x_phy_fixed(dev); - goto smc911x_phy_configure_exit; - } - - /* Copy our capabilities from MII_BMSR to MII_ADVERTISE */ - SMC_GET_PHY_BMSR(lp, phyaddr, my_phy_caps); - if (!(my_phy_caps & BMSR_ANEGCAPABLE)) { - printk(KERN_INFO "Auto negotiation NOT supported\n"); - smc911x_phy_fixed(dev); - goto smc911x_phy_configure_exit; - } - - /* CSMA capable w/ both pauses */ - my_ad_caps = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; - - if (my_phy_caps & BMSR_100BASE4) - my_ad_caps |= ADVERTISE_100BASE4; - if (my_phy_caps & BMSR_100FULL) - my_ad_caps |= ADVERTISE_100FULL; - if (my_phy_caps & BMSR_100HALF) - my_ad_caps |= ADVERTISE_100HALF; - if (my_phy_caps & BMSR_10FULL) - my_ad_caps |= ADVERTISE_10FULL; - if (my_phy_caps & BMSR_10HALF) - my_ad_caps |= ADVERTISE_10HALF; - - /* Disable capabilities not selected by our user */ - if (lp->ctl_rspeed != 100) - my_ad_caps &= ~(ADVERTISE_100BASE4|ADVERTISE_100FULL|ADVERTISE_100HALF); - - if (!lp->ctl_rfduplx) - my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL); - - /* Update our Auto-Neg Advertisement Register */ - SMC_SET_PHY_MII_ADV(lp, phyaddr, my_ad_caps); - lp->mii.advertising = my_ad_caps; - - /* - * Read the register back. Without this, it appears that when - * auto-negotiation is restarted, sometimes it isn't ready and - * the link does not come up. - */ - udelay(10); - SMC_GET_PHY_MII_ADV(lp, phyaddr, status); - - DBG(SMC_DEBUG_MISC, "%s: phy caps=0x%04x\n", dev->name, my_phy_caps); - DBG(SMC_DEBUG_MISC, "%s: phy advertised caps=0x%04x\n", dev->name, my_ad_caps); - - /* Restart auto-negotiation process in order to advertise my caps */ - SMC_SET_PHY_BMCR(lp, phyaddr, BMCR_ANENABLE | BMCR_ANRESTART); - - smc911x_phy_check_media(dev, 1); - -smc911x_phy_configure_exit: - spin_unlock_irqrestore(&lp->lock, flags); -} - -/* - * smc911x_phy_interrupt - * - * Purpose: Handle interrupts relating to PHY register 18. This is - * called from the "hard" interrupt handler under our private spinlock. - */ -static void smc911x_phy_interrupt(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - int phyaddr = lp->mii.phy_id; - int status; - - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); - - if (lp->phy_type == 0) - return; - - smc911x_phy_check_media(dev, 0); - /* read to clear status bits */ - SMC_GET_PHY_INT_SRC(lp, phyaddr,status); - DBG(SMC_DEBUG_MISC, "%s: PHY interrupt status 0x%04x\n", - dev->name, status & 0xffff); - DBG(SMC_DEBUG_MISC, "%s: AFC_CFG 0x%08x\n", - dev->name, SMC_GET_AFC_CFG(lp)); -} - -/*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/ - -/* - * This is the main routine of the driver, to handle the device when - * it needs some attention. - */ -static irqreturn_t smc911x_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct smc911x_local *lp = netdev_priv(dev); - unsigned int status, mask, timeout; - unsigned int rx_overrun=0, cr, pkts; - unsigned long flags; - - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); - - spin_lock_irqsave(&lp->lock, flags); - - /* Spurious interrupt check */ - if ((SMC_GET_IRQ_CFG(lp) & (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) != - (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) { - spin_unlock_irqrestore(&lp->lock, flags); - return IRQ_NONE; - } - - mask = SMC_GET_INT_EN(lp); - SMC_SET_INT_EN(lp, 0); - - /* set a timeout value, so I don't stay here forever */ - timeout = 8; - - - do { - status = SMC_GET_INT(lp); - - DBG(SMC_DEBUG_MISC, "%s: INT 0x%08x MASK 0x%08x OUTSIDE MASK 0x%08x\n", - dev->name, status, mask, status & ~mask); - - status &= mask; - if (!status) - break; - - /* Handle SW interrupt condition */ - if (status & INT_STS_SW_INT_) { - SMC_ACK_INT(lp, INT_STS_SW_INT_); - mask &= ~INT_EN_SW_INT_EN_; - } - /* Handle various error conditions */ - if (status & INT_STS_RXE_) { - SMC_ACK_INT(lp, INT_STS_RXE_); - dev->stats.rx_errors++; - } - if (status & INT_STS_RXDFH_INT_) { - SMC_ACK_INT(lp, INT_STS_RXDFH_INT_); - dev->stats.rx_dropped+=SMC_GET_RX_DROP(lp); - } - /* Undocumented interrupt-what is the right thing to do here? */ - if (status & INT_STS_RXDF_INT_) { - SMC_ACK_INT(lp, INT_STS_RXDF_INT_); - } - - /* Rx Data FIFO exceeds set level */ - if (status & INT_STS_RDFL_) { - if (IS_REV_A(lp->revision)) { - rx_overrun=1; - SMC_GET_MAC_CR(lp, cr); - cr &= ~MAC_CR_RXEN_; - SMC_SET_MAC_CR(lp, cr); - DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name); - dev->stats.rx_errors++; - dev->stats.rx_fifo_errors++; - } - SMC_ACK_INT(lp, INT_STS_RDFL_); - } - if (status & INT_STS_RDFO_) { - if (!IS_REV_A(lp->revision)) { - SMC_GET_MAC_CR(lp, cr); - cr &= ~MAC_CR_RXEN_; - SMC_SET_MAC_CR(lp, cr); - rx_overrun=1; - DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name); - dev->stats.rx_errors++; - dev->stats.rx_fifo_errors++; - } - SMC_ACK_INT(lp, INT_STS_RDFO_); - } - /* Handle receive condition */ - if ((status & INT_STS_RSFL_) || rx_overrun) { - unsigned int fifo; - DBG(SMC_DEBUG_RX, "%s: RX irq\n", dev->name); - fifo = SMC_GET_RX_FIFO_INF(lp); - pkts = (fifo & RX_FIFO_INF_RXSUSED_) >> 16; - DBG(SMC_DEBUG_RX, "%s: Rx FIFO pkts %d, bytes %d\n", - dev->name, pkts, fifo & 0xFFFF ); - if (pkts != 0) { -#ifdef SMC_USE_DMA - unsigned int fifo; - if (lp->rxdma_active){ - DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, - "%s: RX DMA active\n", dev->name); - /* The DMA is already running so up the IRQ threshold */ - fifo = SMC_GET_FIFO_INT(lp) & ~0xFF; - fifo |= pkts & 0xFF; - DBG(SMC_DEBUG_RX, - "%s: Setting RX stat FIFO threshold to %d\n", - dev->name, fifo & 0xff); - SMC_SET_FIFO_INT(lp, fifo); - } else -#endif - smc911x_rcv(dev); - } - SMC_ACK_INT(lp, INT_STS_RSFL_); - } - /* Handle transmit FIFO available */ - if (status & INT_STS_TDFA_) { - DBG(SMC_DEBUG_TX, "%s: TX data FIFO space available irq\n", dev->name); - SMC_SET_FIFO_TDA(lp, 0xFF); - lp->tx_throttle = 0; -#ifdef SMC_USE_DMA - if (!lp->txdma_active) -#endif - netif_wake_queue(dev); - SMC_ACK_INT(lp, INT_STS_TDFA_); - } - /* Handle transmit done condition */ -#if 1 - if (status & (INT_STS_TSFL_ | INT_STS_GPT_INT_)) { - DBG(SMC_DEBUG_TX | SMC_DEBUG_MISC, - "%s: Tx stat FIFO limit (%d) /GPT irq\n", - dev->name, (SMC_GET_FIFO_INT(lp) & 0x00ff0000) >> 16); - smc911x_tx(dev); - SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000); - SMC_ACK_INT(lp, INT_STS_TSFL_); - SMC_ACK_INT(lp, INT_STS_TSFL_ | INT_STS_GPT_INT_); - } -#else - if (status & INT_STS_TSFL_) { - DBG(SMC_DEBUG_TX, "%s: TX status FIFO limit (%d) irq\n", dev->name, ); - smc911x_tx(dev); - SMC_ACK_INT(lp, INT_STS_TSFL_); - } - - if (status & INT_STS_GPT_INT_) { - DBG(SMC_DEBUG_RX, "%s: IRQ_CFG 0x%08x FIFO_INT 0x%08x RX_CFG 0x%08x\n", - dev->name, - SMC_GET_IRQ_CFG(lp), - SMC_GET_FIFO_INT(lp), - SMC_GET_RX_CFG(lp)); - DBG(SMC_DEBUG_RX, "%s: Rx Stat FIFO Used 0x%02x " - "Data FIFO Used 0x%04x Stat FIFO 0x%08x\n", - dev->name, - (SMC_GET_RX_FIFO_INF(lp) & 0x00ff0000) >> 16, - SMC_GET_RX_FIFO_INF(lp) & 0xffff, - SMC_GET_RX_STS_FIFO_PEEK(lp)); - SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000); - SMC_ACK_INT(lp, INT_STS_GPT_INT_); - } -#endif - - /* Handle PHY interrupt condition */ - if (status & INT_STS_PHY_INT_) { - DBG(SMC_DEBUG_MISC, "%s: PHY irq\n", dev->name); - smc911x_phy_interrupt(dev); - SMC_ACK_INT(lp, INT_STS_PHY_INT_); - } - } while (--timeout); - - /* restore mask state */ - SMC_SET_INT_EN(lp, mask); - - DBG(SMC_DEBUG_MISC, "%s: Interrupt done (%d loops)\n", - dev->name, 8-timeout); - - spin_unlock_irqrestore(&lp->lock, flags); - - return IRQ_HANDLED; -} - -#ifdef SMC_USE_DMA -static void -smc911x_tx_dma_irq(int dma, void *data) -{ - struct net_device *dev = (struct net_device *)data; - struct smc911x_local *lp = netdev_priv(dev); - struct sk_buff *skb = lp->current_tx_skb; - unsigned long flags; - - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); - - DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: TX DMA irq handler\n", dev->name); - /* Clear the DMA interrupt sources */ - SMC_DMA_ACK_IRQ(dev, dma); - BUG_ON(skb == NULL); - dma_unmap_single(NULL, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE); - dev->trans_start = jiffies; - dev_kfree_skb_irq(skb); - lp->current_tx_skb = NULL; - if (lp->pending_tx_skb != NULL) - smc911x_hardware_send_pkt(dev); - else { - DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, - "%s: No pending Tx packets. DMA disabled\n", dev->name); - spin_lock_irqsave(&lp->lock, flags); - lp->txdma_active = 0; - if (!lp->tx_throttle) { - netif_wake_queue(dev); - } - spin_unlock_irqrestore(&lp->lock, flags); - } - - DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, - "%s: TX DMA irq completed\n", dev->name); -} -static void -smc911x_rx_dma_irq(int dma, void *data) -{ - struct net_device *dev = (struct net_device *)data; - unsigned long ioaddr = dev->base_addr; - struct smc911x_local *lp = netdev_priv(dev); - struct sk_buff *skb = lp->current_rx_skb; - unsigned long flags; - unsigned int pkts; - - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); - DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, "%s: RX DMA irq handler\n", dev->name); - /* Clear the DMA interrupt sources */ - SMC_DMA_ACK_IRQ(dev, dma); - dma_unmap_single(NULL, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE); - BUG_ON(skb == NULL); - lp->current_rx_skb = NULL; - PRINT_PKT(skb->data, skb->len); - skb->protocol = eth_type_trans(skb, dev); - dev->stats.rx_packets++; - dev->stats.rx_bytes += skb->len; - netif_rx(skb); - - spin_lock_irqsave(&lp->lock, flags); - pkts = (SMC_GET_RX_FIFO_INF(lp) & RX_FIFO_INF_RXSUSED_) >> 16; - if (pkts != 0) { - smc911x_rcv(dev); - }else { - lp->rxdma_active = 0; - } - spin_unlock_irqrestore(&lp->lock, flags); - DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, - "%s: RX DMA irq completed. DMA RX FIFO PKTS %d\n", - dev->name, pkts); -} -#endif /* SMC_USE_DMA */ - -#ifdef CONFIG_NET_POLL_CONTROLLER -/* - * Polling receive - used by netconsole and other diagnostic tools - * to allow network i/o with interrupts disabled. - */ -static void smc911x_poll_controller(struct net_device *dev) -{ - disable_irq(dev->irq); - smc911x_interrupt(dev->irq, dev); - enable_irq(dev->irq); -} -#endif - -/* Our watchdog timed out. Called by the networking layer */ -static void smc911x_timeout(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - int status, mask; - unsigned long flags; - - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); - - spin_lock_irqsave(&lp->lock, flags); - status = SMC_GET_INT(lp); - mask = SMC_GET_INT_EN(lp); - spin_unlock_irqrestore(&lp->lock, flags); - DBG(SMC_DEBUG_MISC, "%s: INT 0x%02x MASK 0x%02x\n", - dev->name, status, mask); - - /* Dump the current TX FIFO contents and restart */ - mask = SMC_GET_TX_CFG(lp); - SMC_SET_TX_CFG(lp, mask | TX_CFG_TXS_DUMP_ | TX_CFG_TXD_DUMP_); - /* - * Reconfiguring the PHY doesn't seem like a bad idea here, but - * smc911x_phy_configure() calls msleep() which calls schedule_timeout() - * which calls schedule(). Hence we use a work queue. - */ - if (lp->phy_type != 0) - schedule_work(&lp->phy_configure); - - /* We can accept TX packets again */ - dev->trans_start = jiffies; /* prevent tx timeout */ - netif_wake_queue(dev); -} - -/* - * This routine will, depending on the values passed to it, - * either make it accept multicast packets, go into - * promiscuous mode (for TCPDUMP and cousins) or accept - * a select set of multicast packets - */ -static void smc911x_set_multicast_list(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - unsigned int multicast_table[2]; - unsigned int mcr, update_multicast = 0; - unsigned long flags; - - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); - - spin_lock_irqsave(&lp->lock, flags); - SMC_GET_MAC_CR(lp, mcr); - spin_unlock_irqrestore(&lp->lock, flags); - - if (dev->flags & IFF_PROMISC) { - - DBG(SMC_DEBUG_MISC, "%s: RCR_PRMS\n", dev->name); - mcr |= MAC_CR_PRMS_; - } - /* - * Here, I am setting this to accept all multicast packets. - * I don't need to zero the multicast table, because the flag is - * checked before the table is - */ - else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) { - DBG(SMC_DEBUG_MISC, "%s: RCR_ALMUL\n", dev->name); - mcr |= MAC_CR_MCPAS_; - } - - /* - * This sets the internal hardware table to filter out unwanted - * multicast packets before they take up memory. - * - * The SMC chip uses a hash table where the high 6 bits of the CRC of - * address are the offset into the table. If that bit is 1, then the - * multicast packet is accepted. Otherwise, it's dropped silently. - * - * To use the 6 bits as an offset into the table, the high 1 bit is - * the number of the 32 bit register, while the low 5 bits are the bit - * within that register. - */ - else if (!netdev_mc_empty(dev)) { - struct netdev_hw_addr *ha; - - /* Set the Hash perfec mode */ - mcr |= MAC_CR_HPFILT_; - - /* start with a table of all zeros: reject all */ - memset(multicast_table, 0, sizeof(multicast_table)); - - netdev_for_each_mc_addr(ha, dev) { - u32 position; - - /* upper 6 bits are used as hash index */ - position = ether_crc(ETH_ALEN, ha->addr)>>26; - - multicast_table[position>>5] |= 1 << (position&0x1f); - } - - /* be sure I get rid of flags I might have set */ - mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_); - - /* now, the table can be loaded into the chipset */ - update_multicast = 1; - } else { - DBG(SMC_DEBUG_MISC, "%s: ~(MAC_CR_PRMS_|MAC_CR_MCPAS_)\n", - dev->name); - mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_); - - /* - * since I'm disabling all multicast entirely, I need to - * clear the multicast list - */ - memset(multicast_table, 0, sizeof(multicast_table)); - update_multicast = 1; - } - - spin_lock_irqsave(&lp->lock, flags); - SMC_SET_MAC_CR(lp, mcr); - if (update_multicast) { - DBG(SMC_DEBUG_MISC, - "%s: update mcast hash table 0x%08x 0x%08x\n", - dev->name, multicast_table[0], multicast_table[1]); - SMC_SET_HASHL(lp, multicast_table[0]); - SMC_SET_HASHH(lp, multicast_table[1]); - } - spin_unlock_irqrestore(&lp->lock, flags); -} - - -/* - * Open and Initialize the board - * - * Set up everything, reset the card, etc.. - */ -static int -smc911x_open(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); - - /* - * Check that the address is valid. If its not, refuse - * to bring the device up. The user must specify an - * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx - */ - if (!is_valid_ether_addr(dev->dev_addr)) { - PRINTK("%s: no valid ethernet hw addr\n", __func__); - return -EINVAL; - } - - /* reset the hardware */ - smc911x_reset(dev); - - /* Configure the PHY, initialize the link state */ - smc911x_phy_configure(&lp->phy_configure); - - /* Turn on Tx + Rx */ - smc911x_enable(dev); - - netif_start_queue(dev); - - return 0; -} - -/* - * smc911x_close - * - * this makes the board clean up everything that it can - * and not talk to the outside world. Caused by - * an 'ifconfig ethX down' - */ -static int smc911x_close(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); - - netif_stop_queue(dev); - netif_carrier_off(dev); - - /* clear everything */ - smc911x_shutdown(dev); - - if (lp->phy_type != 0) { - /* We need to ensure that no calls to - * smc911x_phy_configure are pending. - */ - cancel_work_sync(&lp->phy_configure); - smc911x_phy_powerdown(dev, lp->mii.phy_id); - } - - if (lp->pending_tx_skb) { - dev_kfree_skb(lp->pending_tx_skb); - lp->pending_tx_skb = NULL; - } - - return 0; -} - -/* - * Ethtool support - */ -static int -smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct smc911x_local *lp = netdev_priv(dev); - int ret, status; - unsigned long flags; - - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); - cmd->maxtxpkt = 1; - cmd->maxrxpkt = 1; - - if (lp->phy_type != 0) { - spin_lock_irqsave(&lp->lock, flags); - ret = mii_ethtool_gset(&lp->mii, cmd); - spin_unlock_irqrestore(&lp->lock, flags); - } else { - cmd->supported = SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_TP | SUPPORTED_AUI; - - if (lp->ctl_rspeed == 10) - ethtool_cmd_speed_set(cmd, SPEED_10); - else if (lp->ctl_rspeed == 100) - ethtool_cmd_speed_set(cmd, SPEED_100); - - cmd->autoneg = AUTONEG_DISABLE; - if (lp->mii.phy_id==1) - cmd->transceiver = XCVR_INTERNAL; - else - cmd->transceiver = XCVR_EXTERNAL; - cmd->port = 0; - SMC_GET_PHY_SPECIAL(lp, lp->mii.phy_id, status); - cmd->duplex = - (status & (PHY_SPECIAL_SPD_10FULL_ | PHY_SPECIAL_SPD_100FULL_)) ? - DUPLEX_FULL : DUPLEX_HALF; - ret = 0; - } - - return ret; -} - -static int -smc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct smc911x_local *lp = netdev_priv(dev); - int ret; - unsigned long flags; - - if (lp->phy_type != 0) { - spin_lock_irqsave(&lp->lock, flags); - ret = mii_ethtool_sset(&lp->mii, cmd); - spin_unlock_irqrestore(&lp->lock, flags); - } else { - if (cmd->autoneg != AUTONEG_DISABLE || - cmd->speed != SPEED_10 || - (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) || - (cmd->port != PORT_TP && cmd->port != PORT_AUI)) - return -EINVAL; - - lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL; - - ret = 0; - } - - return ret; -} - -static void -smc911x_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info) -{ - strncpy(info->driver, CARDNAME, sizeof(info->driver)); - strncpy(info->version, version, sizeof(info->version)); - strncpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info)); -} - -static int smc911x_ethtool_nwayreset(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - int ret = -EINVAL; - unsigned long flags; - - if (lp->phy_type != 0) { - spin_lock_irqsave(&lp->lock, flags); - ret = mii_nway_restart(&lp->mii); - spin_unlock_irqrestore(&lp->lock, flags); - } - - return ret; -} - -static u32 smc911x_ethtool_getmsglevel(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - return lp->msg_enable; -} - -static void smc911x_ethtool_setmsglevel(struct net_device *dev, u32 level) -{ - struct smc911x_local *lp = netdev_priv(dev); - lp->msg_enable = level; -} - -static int smc911x_ethtool_getregslen(struct net_device *dev) -{ - /* System regs + MAC regs + PHY regs */ - return (((E2P_CMD - ID_REV)/4 + 1) + - (WUCSR - MAC_CR)+1 + 32) * sizeof(u32); -} - -static void smc911x_ethtool_getregs(struct net_device *dev, - struct ethtool_regs* regs, void *buf) -{ - struct smc911x_local *lp = netdev_priv(dev); - unsigned long flags; - u32 reg,i,j=0; - u32 *data = (u32*)buf; - - regs->version = lp->version; - for(i=ID_REV;i<=E2P_CMD;i+=4) { - data[j++] = SMC_inl(lp, i); - } - for(i=MAC_CR;i<=WUCSR;i++) { - spin_lock_irqsave(&lp->lock, flags); - SMC_GET_MAC_CSR(lp, i, reg); - spin_unlock_irqrestore(&lp->lock, flags); - data[j++] = reg; - } - for(i=0;i<=31;i++) { - spin_lock_irqsave(&lp->lock, flags); - SMC_GET_MII(lp, i, lp->mii.phy_id, reg); - spin_unlock_irqrestore(&lp->lock, flags); - data[j++] = reg & 0xFFFF; - } -} - -static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - unsigned int timeout; - int e2p_cmd; - - e2p_cmd = SMC_GET_E2P_CMD(lp); - for(timeout=10;(e2p_cmd & E2P_CMD_EPC_BUSY_) && timeout; timeout--) { - if (e2p_cmd & E2P_CMD_EPC_TIMEOUT_) { - PRINTK("%s: %s timeout waiting for EEPROM to respond\n", - dev->name, __func__); - return -EFAULT; - } - mdelay(1); - e2p_cmd = SMC_GET_E2P_CMD(lp); - } - if (timeout == 0) { - PRINTK("%s: %s timeout waiting for EEPROM CMD not busy\n", - dev->name, __func__); - return -ETIMEDOUT; - } - return 0; -} - -static inline int smc911x_ethtool_write_eeprom_cmd(struct net_device *dev, - int cmd, int addr) -{ - struct smc911x_local *lp = netdev_priv(dev); - int ret; - - if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0) - return ret; - SMC_SET_E2P_CMD(lp, E2P_CMD_EPC_BUSY_ | - ((cmd) & (0x7<<28)) | - ((addr) & 0xFF)); - return 0; -} - -static inline int smc911x_ethtool_read_eeprom_byte(struct net_device *dev, - u8 *data) -{ - struct smc911x_local *lp = netdev_priv(dev); - int ret; - - if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0) - return ret; - *data = SMC_GET_E2P_DATA(lp); - return 0; -} - -static inline int smc911x_ethtool_write_eeprom_byte(struct net_device *dev, - u8 data) -{ - struct smc911x_local *lp = netdev_priv(dev); - int ret; - - if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0) - return ret; - SMC_SET_E2P_DATA(lp, data); - return 0; -} - -static int smc911x_ethtool_geteeprom(struct net_device *dev, - struct ethtool_eeprom *eeprom, u8 *data) -{ - u8 eebuf[SMC911X_EEPROM_LEN]; - int i, ret; - - for(i=0;ioffset, eeprom->len); - return 0; -} - -static int smc911x_ethtool_seteeprom(struct net_device *dev, - struct ethtool_eeprom *eeprom, u8 *data) -{ - int i, ret; - - /* Enable erase */ - if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_EWEN_, 0 ))!=0) - return ret; - for(i=eeprom->offset;i<(eeprom->offset+eeprom->len);i++) { - /* erase byte */ - if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_ERASE_, i ))!=0) - return ret; - /* write byte */ - if ((ret=smc911x_ethtool_write_eeprom_byte(dev, *data))!=0) - return ret; - if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_WRITE_, i ))!=0) - return ret; - } - return 0; -} - -static int smc911x_ethtool_geteeprom_len(struct net_device *dev) -{ - return SMC911X_EEPROM_LEN; -} - -static const struct ethtool_ops smc911x_ethtool_ops = { - .get_settings = smc911x_ethtool_getsettings, - .set_settings = smc911x_ethtool_setsettings, - .get_drvinfo = smc911x_ethtool_getdrvinfo, - .get_msglevel = smc911x_ethtool_getmsglevel, - .set_msglevel = smc911x_ethtool_setmsglevel, - .nway_reset = smc911x_ethtool_nwayreset, - .get_link = ethtool_op_get_link, - .get_regs_len = smc911x_ethtool_getregslen, - .get_regs = smc911x_ethtool_getregs, - .get_eeprom_len = smc911x_ethtool_geteeprom_len, - .get_eeprom = smc911x_ethtool_geteeprom, - .set_eeprom = smc911x_ethtool_seteeprom, -}; - -/* - * smc911x_findirq - * - * This routine has a simple purpose -- make the SMC chip generate an - * interrupt, so an auto-detect routine can detect it, and find the IRQ, - */ -static int __devinit smc911x_findirq(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - int timeout = 20; - unsigned long cookie; - - DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__); - - cookie = probe_irq_on(); - - /* - * Force a SW interrupt - */ - - SMC_SET_INT_EN(lp, INT_EN_SW_INT_EN_); - - /* - * Wait until positive that the interrupt has been generated - */ - do { - int int_status; - udelay(10); - int_status = SMC_GET_INT_EN(lp); - if (int_status & INT_EN_SW_INT_EN_) - break; /* got the interrupt */ - } while (--timeout); - - /* - * there is really nothing that I can do here if timeout fails, - * as autoirq_report will return a 0 anyway, which is what I - * want in this case. Plus, the clean up is needed in both - * cases. - */ - - /* and disable all interrupts again */ - SMC_SET_INT_EN(lp, 0); - - /* and return what I found */ - return probe_irq_off(cookie); -} - -static const struct net_device_ops smc911x_netdev_ops = { - .ndo_open = smc911x_open, - .ndo_stop = smc911x_close, - .ndo_start_xmit = smc911x_hard_start_xmit, - .ndo_tx_timeout = smc911x_timeout, - .ndo_set_multicast_list = smc911x_set_multicast_list, - .ndo_change_mtu = eth_change_mtu, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = smc911x_poll_controller, -#endif -}; - -/* - * Function: smc911x_probe(unsigned long ioaddr) - * - * Purpose: - * Tests to see if a given ioaddr points to an SMC911x chip. - * Returns a 0 on success - * - * Algorithm: - * (1) see if the endian word is OK - * (1) see if I recognize the chip ID in the appropriate register - * - * Here I do typical initialization tasks. - * - * o Initialize the structure if needed - * o print out my vanity message if not done so already - * o print out what type of hardware is detected - * o print out the ethernet address - * o find the IRQ - * o set up my private data - * o configure the dev structure with my subroutines - * o actually GRAB the irq. - * o GRAB the region - */ -static int __devinit smc911x_probe(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - int i, retval; - unsigned int val, chip_id, revision; - const char *version_string; - unsigned long irq_flags; - - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); - - /* First, see if the endian word is recognized */ - val = SMC_GET_BYTE_TEST(lp); - DBG(SMC_DEBUG_MISC, "%s: endian probe returned 0x%04x\n", CARDNAME, val); - if (val != 0x87654321) { - printk(KERN_ERR "Invalid chip endian 0x%08x\n",val); - retval = -ENODEV; - goto err_out; - } - - /* - * check if the revision register is something that I - * recognize. These might need to be added to later, - * as future revisions could be added. - */ - chip_id = SMC_GET_PN(lp); - DBG(SMC_DEBUG_MISC, "%s: id probe returned 0x%04x\n", CARDNAME, chip_id); - for(i=0;chip_ids[i].id != 0; i++) { - if (chip_ids[i].id == chip_id) break; - } - if (!chip_ids[i].id) { - printk(KERN_ERR "Unknown chip ID %04x\n", chip_id); - retval = -ENODEV; - goto err_out; - } - version_string = chip_ids[i].name; - - revision = SMC_GET_REV(lp); - DBG(SMC_DEBUG_MISC, "%s: revision = 0x%04x\n", CARDNAME, revision); - - /* At this point I'll assume that the chip is an SMC911x. */ - DBG(SMC_DEBUG_MISC, "%s: Found a %s\n", CARDNAME, chip_ids[i].name); - - /* Validate the TX FIFO size requested */ - if ((tx_fifo_kb < 2) || (tx_fifo_kb > 14)) { - printk(KERN_ERR "Invalid TX FIFO size requested %d\n", tx_fifo_kb); - retval = -EINVAL; - goto err_out; - } - - /* fill in some of the fields */ - lp->version = chip_ids[i].id; - lp->revision = revision; - lp->tx_fifo_kb = tx_fifo_kb; - /* Reverse calculate the RX FIFO size from the TX */ - lp->tx_fifo_size=(lp->tx_fifo_kb<<10) - 512; - lp->rx_fifo_size= ((0x4000 - 512 - lp->tx_fifo_size) / 16) * 15; - - /* Set the automatic flow control values */ - switch(lp->tx_fifo_kb) { - /* - * AFC_HI is about ((Rx Data Fifo Size)*2/3)/64 - * AFC_LO is AFC_HI/2 - * BACK_DUR is about 5uS*(AFC_LO) rounded down - */ - case 2:/* 13440 Rx Data Fifo Size */ - lp->afc_cfg=0x008C46AF;break; - case 3:/* 12480 Rx Data Fifo Size */ - lp->afc_cfg=0x0082419F;break; - case 4:/* 11520 Rx Data Fifo Size */ - lp->afc_cfg=0x00783C9F;break; - case 5:/* 10560 Rx Data Fifo Size */ - lp->afc_cfg=0x006E374F;break; - case 6:/* 9600 Rx Data Fifo Size */ - lp->afc_cfg=0x0064328F;break; - case 7:/* 8640 Rx Data Fifo Size */ - lp->afc_cfg=0x005A2D7F;break; - case 8:/* 7680 Rx Data Fifo Size */ - lp->afc_cfg=0x0050287F;break; - case 9:/* 6720 Rx Data Fifo Size */ - lp->afc_cfg=0x0046236F;break; - case 10:/* 5760 Rx Data Fifo Size */ - lp->afc_cfg=0x003C1E6F;break; - case 11:/* 4800 Rx Data Fifo Size */ - lp->afc_cfg=0x0032195F;break; - /* - * AFC_HI is ~1520 bytes less than RX Data Fifo Size - * AFC_LO is AFC_HI/2 - * BACK_DUR is about 5uS*(AFC_LO) rounded down - */ - case 12:/* 3840 Rx Data Fifo Size */ - lp->afc_cfg=0x0024124F;break; - case 13:/* 2880 Rx Data Fifo Size */ - lp->afc_cfg=0x0015073F;break; - case 14:/* 1920 Rx Data Fifo Size */ - lp->afc_cfg=0x0006032F;break; - default: - PRINTK("%s: ERROR -- no AFC_CFG setting found", - dev->name); - break; - } - - DBG(SMC_DEBUG_MISC | SMC_DEBUG_TX | SMC_DEBUG_RX, - "%s: tx_fifo %d rx_fifo %d afc_cfg 0x%08x\n", CARDNAME, - lp->tx_fifo_size, lp->rx_fifo_size, lp->afc_cfg); - - spin_lock_init(&lp->lock); - - /* Get the MAC address */ - SMC_GET_MAC_ADDR(lp, dev->dev_addr); - - /* now, reset the chip, and put it into a known state */ - smc911x_reset(dev); - - /* - * If dev->irq is 0, then the device has to be banged on to see - * what the IRQ is. - * - * Specifying an IRQ is done with the assumption that the user knows - * what (s)he is doing. No checking is done!!!! - */ - if (dev->irq < 1) { - int trials; - - trials = 3; - while (trials--) { - dev->irq = smc911x_findirq(dev); - if (dev->irq) - break; - /* kick the card and try again */ - smc911x_reset(dev); - } - } - if (dev->irq == 0) { - printk("%s: Couldn't autodetect your IRQ. Use irq=xx.\n", - dev->name); - retval = -ENODEV; - goto err_out; - } - dev->irq = irq_canonicalize(dev->irq); - - /* Fill in the fields of the device structure with ethernet values. */ - ether_setup(dev); - - dev->netdev_ops = &smc911x_netdev_ops; - dev->watchdog_timeo = msecs_to_jiffies(watchdog); - dev->ethtool_ops = &smc911x_ethtool_ops; - - INIT_WORK(&lp->phy_configure, smc911x_phy_configure); - lp->mii.phy_id_mask = 0x1f; - lp->mii.reg_num_mask = 0x1f; - lp->mii.force_media = 0; - lp->mii.full_duplex = 0; - lp->mii.dev = dev; - lp->mii.mdio_read = smc911x_phy_read; - lp->mii.mdio_write = smc911x_phy_write; - - /* - * Locate the phy, if any. - */ - smc911x_phy_detect(dev); - - /* Set default parameters */ - lp->msg_enable = NETIF_MSG_LINK; - lp->ctl_rfduplx = 1; - lp->ctl_rspeed = 100; - -#ifdef SMC_DYNAMIC_BUS_CONFIG - irq_flags = lp->cfg.irq_flags; -#else - irq_flags = IRQF_SHARED | SMC_IRQ_SENSE; -#endif - - /* Grab the IRQ */ - retval = request_irq(dev->irq, smc911x_interrupt, - irq_flags, dev->name, dev); - if (retval) - goto err_out; - -#ifdef SMC_USE_DMA - lp->rxdma = SMC_DMA_REQUEST(dev, smc911x_rx_dma_irq); - lp->txdma = SMC_DMA_REQUEST(dev, smc911x_tx_dma_irq); - lp->rxdma_active = 0; - lp->txdma_active = 0; - dev->dma = lp->rxdma; -#endif - - retval = register_netdev(dev); - if (retval == 0) { - /* now, print out the card info, in a short format.. */ - printk("%s: %s (rev %d) at %#lx IRQ %d", - dev->name, version_string, lp->revision, - dev->base_addr, dev->irq); - -#ifdef SMC_USE_DMA - if (lp->rxdma != -1) - printk(" RXDMA %d ", lp->rxdma); - - if (lp->txdma != -1) - printk("TXDMA %d", lp->txdma); -#endif - printk("\n"); - if (!is_valid_ether_addr(dev->dev_addr)) { - printk("%s: Invalid ethernet MAC address. Please " - "set using ifconfig\n", dev->name); - } else { - /* Print the Ethernet address */ - printk("%s: Ethernet addr: %pM\n", - dev->name, dev->dev_addr); - } - - if (lp->phy_type == 0) { - PRINTK("%s: No PHY found\n", dev->name); - } else if ((lp->phy_type & ~0xff) == LAN911X_INTERNAL_PHY_ID) { - PRINTK("%s: LAN911x Internal PHY\n", dev->name); - } else { - PRINTK("%s: External PHY 0x%08x\n", dev->name, lp->phy_type); - } - } - -err_out: -#ifdef SMC_USE_DMA - if (retval) { - if (lp->rxdma != -1) { - SMC_DMA_FREE(dev, lp->rxdma); - } - if (lp->txdma != -1) { - SMC_DMA_FREE(dev, lp->txdma); - } - } -#endif - return retval; -} - -/* - * smc911x_init(void) - * - * Output: - * 0 --> there is a device - * anything else, error - */ -static int __devinit smc911x_drv_probe(struct platform_device *pdev) -{ - struct net_device *ndev; - struct resource *res; - struct smc911x_local *lp; - unsigned int *addr; - int ret; - - DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - ret = -ENODEV; - goto out; - } - - /* - * Request the regions. - */ - if (!request_mem_region(res->start, SMC911X_IO_EXTENT, CARDNAME)) { - ret = -EBUSY; - goto out; - } - - ndev = alloc_etherdev(sizeof(struct smc911x_local)); - if (!ndev) { - printk("%s: could not allocate device.\n", CARDNAME); - ret = -ENOMEM; - goto release_1; - } - SET_NETDEV_DEV(ndev, &pdev->dev); - - ndev->dma = (unsigned char)-1; - ndev->irq = platform_get_irq(pdev, 0); - lp = netdev_priv(ndev); - lp->netdev = ndev; -#ifdef SMC_DYNAMIC_BUS_CONFIG - { - struct smc911x_platdata *pd = pdev->dev.platform_data; - if (!pd) { - ret = -EINVAL; - goto release_both; - } - memcpy(&lp->cfg, pd, sizeof(lp->cfg)); - } -#endif - - addr = ioremap(res->start, SMC911X_IO_EXTENT); - if (!addr) { - ret = -ENOMEM; - goto release_both; - } - - platform_set_drvdata(pdev, ndev); - lp->base = addr; - ndev->base_addr = res->start; - ret = smc911x_probe(ndev); - if (ret != 0) { - platform_set_drvdata(pdev, NULL); - iounmap(addr); -release_both: - free_netdev(ndev); -release_1: - release_mem_region(res->start, SMC911X_IO_EXTENT); -out: - printk("%s: not found (%d).\n", CARDNAME, ret); - } -#ifdef SMC_USE_DMA - else { - lp->physaddr = res->start; - lp->dev = &pdev->dev; - } -#endif - - return ret; -} - -static int __devexit smc911x_drv_remove(struct platform_device *pdev) -{ - struct net_device *ndev = platform_get_drvdata(pdev); - struct smc911x_local *lp = netdev_priv(ndev); - struct resource *res; - - DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__); - platform_set_drvdata(pdev, NULL); - - unregister_netdev(ndev); - - free_irq(ndev->irq, ndev); - -#ifdef SMC_USE_DMA - { - if (lp->rxdma != -1) { - SMC_DMA_FREE(dev, lp->rxdma); - } - if (lp->txdma != -1) { - SMC_DMA_FREE(dev, lp->txdma); - } - } -#endif - iounmap(lp->base); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, SMC911X_IO_EXTENT); - - free_netdev(ndev); - return 0; -} - -static int smc911x_drv_suspend(struct platform_device *dev, pm_message_t state) -{ - struct net_device *ndev = platform_get_drvdata(dev); - struct smc911x_local *lp = netdev_priv(ndev); - - DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__); - if (ndev) { - if (netif_running(ndev)) { - netif_device_detach(ndev); - smc911x_shutdown(ndev); -#if POWER_DOWN - /* Set D2 - Energy detect only setting */ - SMC_SET_PMT_CTRL(lp, 2<<12); -#endif - } - } - return 0; -} - -static int smc911x_drv_resume(struct platform_device *dev) -{ - struct net_device *ndev = platform_get_drvdata(dev); - - DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__); - if (ndev) { - struct smc911x_local *lp = netdev_priv(ndev); - - if (netif_running(ndev)) { - smc911x_reset(ndev); - if (lp->phy_type != 0) - smc911x_phy_configure(&lp->phy_configure); - smc911x_enable(ndev); - netif_device_attach(ndev); - } - } - return 0; -} - -static struct platform_driver smc911x_driver = { - .probe = smc911x_drv_probe, - .remove = __devexit_p(smc911x_drv_remove), - .suspend = smc911x_drv_suspend, - .resume = smc911x_drv_resume, - .driver = { - .name = CARDNAME, - .owner = THIS_MODULE, - }, -}; - -static int __init smc911x_init(void) -{ - return platform_driver_register(&smc911x_driver); -} - -static void __exit smc911x_cleanup(void) -{ - platform_driver_unregister(&smc911x_driver); -} - -module_init(smc911x_init); -module_exit(smc911x_cleanup); diff --git a/drivers/net/smc911x.h b/drivers/net/smc911x.h deleted file mode 100644 index 3269292..0000000 --- a/drivers/net/smc911x.h +++ /dev/null @@ -1,924 +0,0 @@ -/*------------------------------------------------------------------------ - . smc911x.h - macros for SMSC's LAN911{5,6,7,8} single-chip Ethernet device. - . - . Copyright (C) 2005 Sensoria Corp. - . Derived from the unified SMC91x driver by Nicolas Pitre - . - . This program is free software; you can redistribute it and/or modify - . it under the terms of the GNU General Public License as published by - . the Free Software Foundation; either version 2 of the License, or - . (at your option) any later version. - . - . This program is distributed in the hope that it will be useful, - . but WITHOUT ANY WARRANTY; without even the implied warranty of - . MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - . GNU General Public License for more details. - . - . You should have received a copy of the GNU General Public License - . along with this program; if not, write to the Free Software - . Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - . - . Information contained in this file was obtained from the LAN9118 - . manual from SMC. To get a copy, if you really want one, you can find - . information under www.smsc.com. - . - . Authors - . Dustin McIntire - . - ---------------------------------------------------------------------------*/ -#ifndef _SMC911X_H_ -#define _SMC911X_H_ - -#include -/* - * Use the DMA feature on PXA chips - */ -#ifdef CONFIG_ARCH_PXA - #define SMC_USE_PXA_DMA 1 - #define SMC_USE_16BIT 0 - #define SMC_USE_32BIT 1 - #define SMC_IRQ_SENSE IRQF_TRIGGER_FALLING -#elif defined(CONFIG_SH_MAGIC_PANEL_R2) - #define SMC_USE_16BIT 0 - #define SMC_USE_32BIT 1 - #define SMC_IRQ_SENSE IRQF_TRIGGER_LOW -#elif defined(CONFIG_ARCH_OMAP3) - #define SMC_USE_16BIT 0 - #define SMC_USE_32BIT 1 - #define SMC_IRQ_SENSE IRQF_TRIGGER_LOW - #define SMC_MEM_RESERVED 1 -#elif defined(CONFIG_ARCH_OMAP2) - #define SMC_USE_16BIT 0 - #define SMC_USE_32BIT 1 - #define SMC_IRQ_SENSE IRQF_TRIGGER_LOW - #define SMC_MEM_RESERVED 1 -#else -/* - * Default configuration - */ - -#define SMC_DYNAMIC_BUS_CONFIG -#endif - -#ifdef SMC_USE_PXA_DMA -#define SMC_USE_DMA -#endif - -/* store this information for the driver.. */ -struct smc911x_local { - /* - * If I have to wait until the DMA is finished and ready to reload a - * packet, I will store the skbuff here. Then, the DMA will send it - * out and free it. - */ - struct sk_buff *pending_tx_skb; - - /* version/revision of the SMC911x chip */ - u16 version; - u16 revision; - - /* FIFO sizes */ - int tx_fifo_kb; - int tx_fifo_size; - int rx_fifo_size; - int afc_cfg; - - /* Contains the current active receive/phy mode */ - int ctl_rfduplx; - int ctl_rspeed; - - u32 msg_enable; - u32 phy_type; - struct mii_if_info mii; - - /* work queue */ - struct work_struct phy_configure; - - int tx_throttle; - spinlock_t lock; - - struct net_device *netdev; - -#ifdef SMC_USE_DMA - /* DMA needs the physical address of the chip */ - u_long physaddr; - int rxdma; - int txdma; - int rxdma_active; - int txdma_active; - struct sk_buff *current_rx_skb; - struct sk_buff *current_tx_skb; - struct device *dev; -#endif - void __iomem *base; -#ifdef SMC_DYNAMIC_BUS_CONFIG - struct smc911x_platdata cfg; -#endif -}; - -/* - * Define the bus width specific IO macros - */ - -#ifdef SMC_DYNAMIC_BUS_CONFIG -static inline unsigned int SMC_inl(struct smc911x_local *lp, int reg) -{ - void __iomem *ioaddr = lp->base + reg; - - if (lp->cfg.flags & SMC911X_USE_32BIT) - return readl(ioaddr); - - if (lp->cfg.flags & SMC911X_USE_16BIT) - return readw(ioaddr) | (readw(ioaddr + 2) << 16); - - BUG(); -} - -static inline void SMC_outl(unsigned int value, struct smc911x_local *lp, - int reg) -{ - void __iomem *ioaddr = lp->base + reg; - - if (lp->cfg.flags & SMC911X_USE_32BIT) { - writel(value, ioaddr); - return; - } - - if (lp->cfg.flags & SMC911X_USE_16BIT) { - writew(value & 0xffff, ioaddr); - writew(value >> 16, ioaddr + 2); - return; - } - - BUG(); -} - -static inline void SMC_insl(struct smc911x_local *lp, int reg, - void *addr, unsigned int count) -{ - void __iomem *ioaddr = lp->base + reg; - - if (lp->cfg.flags & SMC911X_USE_32BIT) { - readsl(ioaddr, addr, count); - return; - } - - if (lp->cfg.flags & SMC911X_USE_16BIT) { - readsw(ioaddr, addr, count * 2); - return; - } - - BUG(); -} - -static inline void SMC_outsl(struct smc911x_local *lp, int reg, - void *addr, unsigned int count) -{ - void __iomem *ioaddr = lp->base + reg; - - if (lp->cfg.flags & SMC911X_USE_32BIT) { - writesl(ioaddr, addr, count); - return; - } - - if (lp->cfg.flags & SMC911X_USE_16BIT) { - writesw(ioaddr, addr, count * 2); - return; - } - - BUG(); -} -#else -#if SMC_USE_16BIT -#define SMC_inl(lp, r) ((readw((lp)->base + (r)) & 0xFFFF) + (readw((lp)->base + (r) + 2) << 16)) -#define SMC_outl(v, lp, r) \ - do{ \ - writew(v & 0xFFFF, (lp)->base + (r)); \ - writew(v >> 16, (lp)->base + (r) + 2); \ - } while (0) -#define SMC_insl(lp, r, p, l) readsw((short*)((lp)->base + (r)), p, l*2) -#define SMC_outsl(lp, r, p, l) writesw((short*)((lp)->base + (r)), p, l*2) - -#elif SMC_USE_32BIT -#define SMC_inl(lp, r) readl((lp)->base + (r)) -#define SMC_outl(v, lp, r) writel(v, (lp)->base + (r)) -#define SMC_insl(lp, r, p, l) readsl((int*)((lp)->base + (r)), p, l) -#define SMC_outsl(lp, r, p, l) writesl((int*)((lp)->base + (r)), p, l) - -#endif /* SMC_USE_16BIT */ -#endif /* SMC_DYNAMIC_BUS_CONFIG */ - - -#ifdef SMC_USE_PXA_DMA - -#include - -/* - * Define the request and free functions - * These are unfortunately architecture specific as no generic allocation - * mechanism exits - */ -#define SMC_DMA_REQUEST(dev, handler) \ - pxa_request_dma(dev->name, DMA_PRIO_LOW, handler, dev) - -#define SMC_DMA_FREE(dev, dma) \ - pxa_free_dma(dma) - -#define SMC_DMA_ACK_IRQ(dev, dma) \ -{ \ - if (DCSR(dma) & DCSR_BUSERR) { \ - printk("%s: DMA %d bus error!\n", dev->name, dma); \ - } \ - DCSR(dma) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR; \ -} - -/* - * Use a DMA for RX and TX packets. - */ -#include - -static dma_addr_t rx_dmabuf, tx_dmabuf; -static int rx_dmalen, tx_dmalen; - -#ifdef SMC_insl -#undef SMC_insl -#define SMC_insl(lp, r, p, l) \ - smc_pxa_dma_insl(lp, lp->physaddr, r, lp->rxdma, p, l) - -static inline void -smc_pxa_dma_insl(struct smc911x_local *lp, u_long physaddr, - int reg, int dma, u_char *buf, int len) -{ - /* 64 bit alignment is required for memory to memory DMA */ - if ((long)buf & 4) { - *((u32 *)buf) = SMC_inl(lp, reg); - buf += 4; - len--; - } - - len *= 4; - rx_dmabuf = dma_map_single(lp->dev, buf, len, DMA_FROM_DEVICE); - rx_dmalen = len; - DCSR(dma) = DCSR_NODESC; - DTADR(dma) = rx_dmabuf; - DSADR(dma) = physaddr + reg; - DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 | - DCMD_WIDTH4 | DCMD_ENDIRQEN | (DCMD_LENGTH & rx_dmalen)); - DCSR(dma) = DCSR_NODESC | DCSR_RUN; -} -#endif - -#ifdef SMC_outsl -#undef SMC_outsl -#define SMC_outsl(lp, r, p, l) \ - smc_pxa_dma_outsl(lp, lp->physaddr, r, lp->txdma, p, l) - -static inline void -smc_pxa_dma_outsl(struct smc911x_local *lp, u_long physaddr, - int reg, int dma, u_char *buf, int len) -{ - /* 64 bit alignment is required for memory to memory DMA */ - if ((long)buf & 4) { - SMC_outl(*((u32 *)buf), lp, reg); - buf += 4; - len--; - } - - len *= 4; - tx_dmabuf = dma_map_single(lp->dev, buf, len, DMA_TO_DEVICE); - tx_dmalen = len; - DCSR(dma) = DCSR_NODESC; - DSADR(dma) = tx_dmabuf; - DTADR(dma) = physaddr + reg; - DCMD(dma) = (DCMD_INCSRCADDR | DCMD_BURST32 | - DCMD_WIDTH4 | DCMD_ENDIRQEN | (DCMD_LENGTH & tx_dmalen)); - DCSR(dma) = DCSR_NODESC | DCSR_RUN; -} -#endif -#endif /* SMC_USE_PXA_DMA */ - - -/* Chip Parameters and Register Definitions */ - -#define SMC911X_TX_FIFO_LOW_THRESHOLD (1536*2) - -#define SMC911X_IO_EXTENT 0x100 - -#define SMC911X_EEPROM_LEN 7 - -/* Below are the register offsets and bit definitions - * of the Lan911x memory space - */ -#define RX_DATA_FIFO (0x00) - -#define TX_DATA_FIFO (0x20) -#define TX_CMD_A_INT_ON_COMP_ (0x80000000) -#define TX_CMD_A_INT_BUF_END_ALGN_ (0x03000000) -#define TX_CMD_A_INT_4_BYTE_ALGN_ (0x00000000) -#define TX_CMD_A_INT_16_BYTE_ALGN_ (0x01000000) -#define TX_CMD_A_INT_32_BYTE_ALGN_ (0x02000000) -#define TX_CMD_A_INT_DATA_OFFSET_ (0x001F0000) -#define TX_CMD_A_INT_FIRST_SEG_ (0x00002000) -#define TX_CMD_A_INT_LAST_SEG_ (0x00001000) -#define TX_CMD_A_BUF_SIZE_ (0x000007FF) -#define TX_CMD_B_PKT_TAG_ (0xFFFF0000) -#define TX_CMD_B_ADD_CRC_DISABLE_ (0x00002000) -#define TX_CMD_B_DISABLE_PADDING_ (0x00001000) -#define TX_CMD_B_PKT_BYTE_LENGTH_ (0x000007FF) - -#define RX_STATUS_FIFO (0x40) -#define RX_STS_PKT_LEN_ (0x3FFF0000) -#define RX_STS_ES_ (0x00008000) -#define RX_STS_BCST_ (0x00002000) -#define RX_STS_LEN_ERR_ (0x00001000) -#define RX_STS_RUNT_ERR_ (0x00000800) -#define RX_STS_MCAST_ (0x00000400) -#define RX_STS_TOO_LONG_ (0x00000080) -#define RX_STS_COLL_ (0x00000040) -#define RX_STS_ETH_TYPE_ (0x00000020) -#define RX_STS_WDOG_TMT_ (0x00000010) -#define RX_STS_MII_ERR_ (0x00000008) -#define RX_STS_DRIBBLING_ (0x00000004) -#define RX_STS_CRC_ERR_ (0x00000002) -#define RX_STATUS_FIFO_PEEK (0x44) -#define TX_STATUS_FIFO (0x48) -#define TX_STS_TAG_ (0xFFFF0000) -#define TX_STS_ES_ (0x00008000) -#define TX_STS_LOC_ (0x00000800) -#define TX_STS_NO_CARR_ (0x00000400) -#define TX_STS_LATE_COLL_ (0x00000200) -#define TX_STS_MANY_COLL_ (0x00000100) -#define TX_STS_COLL_CNT_ (0x00000078) -#define TX_STS_MANY_DEFER_ (0x00000004) -#define TX_STS_UNDERRUN_ (0x00000002) -#define TX_STS_DEFERRED_ (0x00000001) -#define TX_STATUS_FIFO_PEEK (0x4C) -#define ID_REV (0x50) -#define ID_REV_CHIP_ID_ (0xFFFF0000) /* RO */ -#define ID_REV_REV_ID_ (0x0000FFFF) /* RO */ - -#define INT_CFG (0x54) -#define INT_CFG_INT_DEAS_ (0xFF000000) /* R/W */ -#define INT_CFG_INT_DEAS_CLR_ (0x00004000) -#define INT_CFG_INT_DEAS_STS_ (0x00002000) -#define INT_CFG_IRQ_INT_ (0x00001000) /* RO */ -#define INT_CFG_IRQ_EN_ (0x00000100) /* R/W */ -#define INT_CFG_IRQ_POL_ (0x00000010) /* R/W Not Affected by SW Reset */ -#define INT_CFG_IRQ_TYPE_ (0x00000001) /* R/W Not Affected by SW Reset */ - -#define INT_STS (0x58) -#define INT_STS_SW_INT_ (0x80000000) /* R/WC */ -#define INT_STS_TXSTOP_INT_ (0x02000000) /* R/WC */ -#define INT_STS_RXSTOP_INT_ (0x01000000) /* R/WC */ -#define INT_STS_RXDFH_INT_ (0x00800000) /* R/WC */ -#define INT_STS_RXDF_INT_ (0x00400000) /* R/WC */ -#define INT_STS_TX_IOC_ (0x00200000) /* R/WC */ -#define INT_STS_RXD_INT_ (0x00100000) /* R/WC */ -#define INT_STS_GPT_INT_ (0x00080000) /* R/WC */ -#define INT_STS_PHY_INT_ (0x00040000) /* RO */ -#define INT_STS_PME_INT_ (0x00020000) /* R/WC */ -#define INT_STS_TXSO_ (0x00010000) /* R/WC */ -#define INT_STS_RWT_ (0x00008000) /* R/WC */ -#define INT_STS_RXE_ (0x00004000) /* R/WC */ -#define INT_STS_TXE_ (0x00002000) /* R/WC */ -//#define INT_STS_ERX_ (0x00001000) /* R/WC */ -#define INT_STS_TDFU_ (0x00000800) /* R/WC */ -#define INT_STS_TDFO_ (0x00000400) /* R/WC */ -#define INT_STS_TDFA_ (0x00000200) /* R/WC */ -#define INT_STS_TSFF_ (0x00000100) /* R/WC */ -#define INT_STS_TSFL_ (0x00000080) /* R/WC */ -//#define INT_STS_RXDF_ (0x00000040) /* R/WC */ -#define INT_STS_RDFO_ (0x00000040) /* R/WC */ -#define INT_STS_RDFL_ (0x00000020) /* R/WC */ -#define INT_STS_RSFF_ (0x00000010) /* R/WC */ -#define INT_STS_RSFL_ (0x00000008) /* R/WC */ -#define INT_STS_GPIO2_INT_ (0x00000004) /* R/WC */ -#define INT_STS_GPIO1_INT_ (0x00000002) /* R/WC */ -#define INT_STS_GPIO0_INT_ (0x00000001) /* R/WC */ - -#define INT_EN (0x5C) -#define INT_EN_SW_INT_EN_ (0x80000000) /* R/W */ -#define INT_EN_TXSTOP_INT_EN_ (0x02000000) /* R/W */ -#define INT_EN_RXSTOP_INT_EN_ (0x01000000) /* R/W */ -#define INT_EN_RXDFH_INT_EN_ (0x00800000) /* R/W */ -//#define INT_EN_RXDF_INT_EN_ (0x00400000) /* R/W */ -#define INT_EN_TIOC_INT_EN_ (0x00200000) /* R/W */ -#define INT_EN_RXD_INT_EN_ (0x00100000) /* R/W */ -#define INT_EN_GPT_INT_EN_ (0x00080000) /* R/W */ -#define INT_EN_PHY_INT_EN_ (0x00040000) /* R/W */ -#define INT_EN_PME_INT_EN_ (0x00020000) /* R/W */ -#define INT_EN_TXSO_EN_ (0x00010000) /* R/W */ -#define INT_EN_RWT_EN_ (0x00008000) /* R/W */ -#define INT_EN_RXE_EN_ (0x00004000) /* R/W */ -#define INT_EN_TXE_EN_ (0x00002000) /* R/W */ -//#define INT_EN_ERX_EN_ (0x00001000) /* R/W */ -#define INT_EN_TDFU_EN_ (0x00000800) /* R/W */ -#define INT_EN_TDFO_EN_ (0x00000400) /* R/W */ -#define INT_EN_TDFA_EN_ (0x00000200) /* R/W */ -#define INT_EN_TSFF_EN_ (0x00000100) /* R/W */ -#define INT_EN_TSFL_EN_ (0x00000080) /* R/W */ -//#define INT_EN_RXDF_EN_ (0x00000040) /* R/W */ -#define INT_EN_RDFO_EN_ (0x00000040) /* R/W */ -#define INT_EN_RDFL_EN_ (0x00000020) /* R/W */ -#define INT_EN_RSFF_EN_ (0x00000010) /* R/W */ -#define INT_EN_RSFL_EN_ (0x00000008) /* R/W */ -#define INT_EN_GPIO2_INT_ (0x00000004) /* R/W */ -#define INT_EN_GPIO1_INT_ (0x00000002) /* R/W */ -#define INT_EN_GPIO0_INT_ (0x00000001) /* R/W */ - -#define BYTE_TEST (0x64) -#define FIFO_INT (0x68) -#define FIFO_INT_TX_AVAIL_LEVEL_ (0xFF000000) /* R/W */ -#define FIFO_INT_TX_STS_LEVEL_ (0x00FF0000) /* R/W */ -#define FIFO_INT_RX_AVAIL_LEVEL_ (0x0000FF00) /* R/W */ -#define FIFO_INT_RX_STS_LEVEL_ (0x000000FF) /* R/W */ - -#define RX_CFG (0x6C) -#define RX_CFG_RX_END_ALGN_ (0xC0000000) /* R/W */ -#define RX_CFG_RX_END_ALGN4_ (0x00000000) /* R/W */ -#define RX_CFG_RX_END_ALGN16_ (0x40000000) /* R/W */ -#define RX_CFG_RX_END_ALGN32_ (0x80000000) /* R/W */ -#define RX_CFG_RX_DMA_CNT_ (0x0FFF0000) /* R/W */ -#define RX_CFG_RX_DUMP_ (0x00008000) /* R/W */ -#define RX_CFG_RXDOFF_ (0x00001F00) /* R/W */ -//#define RX_CFG_RXBAD_ (0x00000001) /* R/W */ - -#define TX_CFG (0x70) -//#define TX_CFG_TX_DMA_LVL_ (0xE0000000) /* R/W */ -//#define TX_CFG_TX_DMA_CNT_ (0x0FFF0000) /* R/W Self Clearing */ -#define TX_CFG_TXS_DUMP_ (0x00008000) /* Self Clearing */ -#define TX_CFG_TXD_DUMP_ (0x00004000) /* Self Clearing */ -#define TX_CFG_TXSAO_ (0x00000004) /* R/W */ -#define TX_CFG_TX_ON_ (0x00000002) /* R/W */ -#define TX_CFG_STOP_TX_ (0x00000001) /* Self Clearing */ - -#define HW_CFG (0x74) -#define HW_CFG_TTM_ (0x00200000) /* R/W */ -#define HW_CFG_SF_ (0x00100000) /* R/W */ -#define HW_CFG_TX_FIF_SZ_ (0x000F0000) /* R/W */ -#define HW_CFG_TR_ (0x00003000) /* R/W */ -#define HW_CFG_PHY_CLK_SEL_ (0x00000060) /* R/W */ -#define HW_CFG_PHY_CLK_SEL_INT_PHY_ (0x00000000) /* R/W */ -#define HW_CFG_PHY_CLK_SEL_EXT_PHY_ (0x00000020) /* R/W */ -#define HW_CFG_PHY_CLK_SEL_CLK_DIS_ (0x00000040) /* R/W */ -#define HW_CFG_SMI_SEL_ (0x00000010) /* R/W */ -#define HW_CFG_EXT_PHY_DET_ (0x00000008) /* RO */ -#define HW_CFG_EXT_PHY_EN_ (0x00000004) /* R/W */ -#define HW_CFG_32_16_BIT_MODE_ (0x00000004) /* RO */ -#define HW_CFG_SRST_TO_ (0x00000002) /* RO */ -#define HW_CFG_SRST_ (0x00000001) /* Self Clearing */ - -#define RX_DP_CTRL (0x78) -#define RX_DP_CTRL_RX_FFWD_ (0x80000000) /* R/W */ -#define RX_DP_CTRL_FFWD_BUSY_ (0x80000000) /* RO */ - -#define RX_FIFO_INF (0x7C) -#define RX_FIFO_INF_RXSUSED_ (0x00FF0000) /* RO */ -#define RX_FIFO_INF_RXDUSED_ (0x0000FFFF) /* RO */ - -#define TX_FIFO_INF (0x80) -#define TX_FIFO_INF_TSUSED_ (0x00FF0000) /* RO */ -#define TX_FIFO_INF_TDFREE_ (0x0000FFFF) /* RO */ - -#define PMT_CTRL (0x84) -#define PMT_CTRL_PM_MODE_ (0x00003000) /* Self Clearing */ -#define PMT_CTRL_PHY_RST_ (0x00000400) /* Self Clearing */ -#define PMT_CTRL_WOL_EN_ (0x00000200) /* R/W */ -#define PMT_CTRL_ED_EN_ (0x00000100) /* R/W */ -#define PMT_CTRL_PME_TYPE_ (0x00000040) /* R/W Not Affected by SW Reset */ -#define PMT_CTRL_WUPS_ (0x00000030) /* R/WC */ -#define PMT_CTRL_WUPS_NOWAKE_ (0x00000000) /* R/WC */ -#define PMT_CTRL_WUPS_ED_ (0x00000010) /* R/WC */ -#define PMT_CTRL_WUPS_WOL_ (0x00000020) /* R/WC */ -#define PMT_CTRL_WUPS_MULTI_ (0x00000030) /* R/WC */ -#define PMT_CTRL_PME_IND_ (0x00000008) /* R/W */ -#define PMT_CTRL_PME_POL_ (0x00000004) /* R/W */ -#define PMT_CTRL_PME_EN_ (0x00000002) /* R/W Not Affected by SW Reset */ -#define PMT_CTRL_READY_ (0x00000001) /* RO */ - -#define GPIO_CFG (0x88) -#define GPIO_CFG_LED3_EN_ (0x40000000) /* R/W */ -#define GPIO_CFG_LED2_EN_ (0x20000000) /* R/W */ -#define GPIO_CFG_LED1_EN_ (0x10000000) /* R/W */ -#define GPIO_CFG_GPIO2_INT_POL_ (0x04000000) /* R/W */ -#define GPIO_CFG_GPIO1_INT_POL_ (0x02000000) /* R/W */ -#define GPIO_CFG_GPIO0_INT_POL_ (0x01000000) /* R/W */ -#define GPIO_CFG_EEPR_EN_ (0x00700000) /* R/W */ -#define GPIO_CFG_GPIOBUF2_ (0x00040000) /* R/W */ -#define GPIO_CFG_GPIOBUF1_ (0x00020000) /* R/W */ -#define GPIO_CFG_GPIOBUF0_ (0x00010000) /* R/W */ -#define GPIO_CFG_GPIODIR2_ (0x00000400) /* R/W */ -#define GPIO_CFG_GPIODIR1_ (0x00000200) /* R/W */ -#define GPIO_CFG_GPIODIR0_ (0x00000100) /* R/W */ -#define GPIO_CFG_GPIOD4_ (0x00000010) /* R/W */ -#define GPIO_CFG_GPIOD3_ (0x00000008) /* R/W */ -#define GPIO_CFG_GPIOD2_ (0x00000004) /* R/W */ -#define GPIO_CFG_GPIOD1_ (0x00000002) /* R/W */ -#define GPIO_CFG_GPIOD0_ (0x00000001) /* R/W */ - -#define GPT_CFG (0x8C) -#define GPT_CFG_TIMER_EN_ (0x20000000) /* R/W */ -#define GPT_CFG_GPT_LOAD_ (0x0000FFFF) /* R/W */ - -#define GPT_CNT (0x90) -#define GPT_CNT_GPT_CNT_ (0x0000FFFF) /* RO */ - -#define ENDIAN (0x98) -#define FREE_RUN (0x9C) -#define RX_DROP (0xA0) -#define MAC_CSR_CMD (0xA4) -#define MAC_CSR_CMD_CSR_BUSY_ (0x80000000) /* Self Clearing */ -#define MAC_CSR_CMD_R_NOT_W_ (0x40000000) /* R/W */ -#define MAC_CSR_CMD_CSR_ADDR_ (0x000000FF) /* R/W */ - -#define MAC_CSR_DATA (0xA8) -#define AFC_CFG (0xAC) -#define AFC_CFG_AFC_HI_ (0x00FF0000) /* R/W */ -#define AFC_CFG_AFC_LO_ (0x0000FF00) /* R/W */ -#define AFC_CFG_BACK_DUR_ (0x000000F0) /* R/W */ -#define AFC_CFG_FCMULT_ (0x00000008) /* R/W */ -#define AFC_CFG_FCBRD_ (0x00000004) /* R/W */ -#define AFC_CFG_FCADD_ (0x00000002) /* R/W */ -#define AFC_CFG_FCANY_ (0x00000001) /* R/W */ - -#define E2P_CMD (0xB0) -#define E2P_CMD_EPC_BUSY_ (0x80000000) /* Self Clearing */ -#define E2P_CMD_EPC_CMD_ (0x70000000) /* R/W */ -#define E2P_CMD_EPC_CMD_READ_ (0x00000000) /* R/W */ -#define E2P_CMD_EPC_CMD_EWDS_ (0x10000000) /* R/W */ -#define E2P_CMD_EPC_CMD_EWEN_ (0x20000000) /* R/W */ -#define E2P_CMD_EPC_CMD_WRITE_ (0x30000000) /* R/W */ -#define E2P_CMD_EPC_CMD_WRAL_ (0x40000000) /* R/W */ -#define E2P_CMD_EPC_CMD_ERASE_ (0x50000000) /* R/W */ -#define E2P_CMD_EPC_CMD_ERAL_ (0x60000000) /* R/W */ -#define E2P_CMD_EPC_CMD_RELOAD_ (0x70000000) /* R/W */ -#define E2P_CMD_EPC_TIMEOUT_ (0x00000200) /* RO */ -#define E2P_CMD_MAC_ADDR_LOADED_ (0x00000100) /* RO */ -#define E2P_CMD_EPC_ADDR_ (0x000000FF) /* R/W */ - -#define E2P_DATA (0xB4) -#define E2P_DATA_EEPROM_DATA_ (0x000000FF) /* R/W */ -/* end of LAN register offsets and bit definitions */ - -/* - **************************************************************************** - **************************************************************************** - * MAC Control and Status Register (Indirect Address) - * Offset (through the MAC_CSR CMD and DATA port) - **************************************************************************** - **************************************************************************** - * - */ -#define MAC_CR (0x01) /* R/W */ - -/* MAC_CR - MAC Control Register */ -#define MAC_CR_RXALL_ (0x80000000) -// TODO: delete this bit? It is not described in the data sheet. -#define MAC_CR_HBDIS_ (0x10000000) -#define MAC_CR_RCVOWN_ (0x00800000) -#define MAC_CR_LOOPBK_ (0x00200000) -#define MAC_CR_FDPX_ (0x00100000) -#define MAC_CR_MCPAS_ (0x00080000) -#define MAC_CR_PRMS_ (0x00040000) -#define MAC_CR_INVFILT_ (0x00020000) -#define MAC_CR_PASSBAD_ (0x00010000) -#define MAC_CR_HFILT_ (0x00008000) -#define MAC_CR_HPFILT_ (0x00002000) -#define MAC_CR_LCOLL_ (0x00001000) -#define MAC_CR_BCAST_ (0x00000800) -#define MAC_CR_DISRTY_ (0x00000400) -#define MAC_CR_PADSTR_ (0x00000100) -#define MAC_CR_BOLMT_MASK_ (0x000000C0) -#define MAC_CR_DFCHK_ (0x00000020) -#define MAC_CR_TXEN_ (0x00000008) -#define MAC_CR_RXEN_ (0x00000004) - -#define ADDRH (0x02) /* R/W mask 0x0000FFFFUL */ -#define ADDRL (0x03) /* R/W mask 0xFFFFFFFFUL */ -#define HASHH (0x04) /* R/W */ -#define HASHL (0x05) /* R/W */ - -#define MII_ACC (0x06) /* R/W */ -#define MII_ACC_PHY_ADDR_ (0x0000F800) -#define MII_ACC_MIIRINDA_ (0x000007C0) -#define MII_ACC_MII_WRITE_ (0x00000002) -#define MII_ACC_MII_BUSY_ (0x00000001) - -#define MII_DATA (0x07) /* R/W mask 0x0000FFFFUL */ - -#define FLOW (0x08) /* R/W */ -#define FLOW_FCPT_ (0xFFFF0000) -#define FLOW_FCPASS_ (0x00000004) -#define FLOW_FCEN_ (0x00000002) -#define FLOW_FCBSY_ (0x00000001) - -#define VLAN1 (0x09) /* R/W mask 0x0000FFFFUL */ -#define VLAN1_VTI1_ (0x0000ffff) - -#define VLAN2 (0x0A) /* R/W mask 0x0000FFFFUL */ -#define VLAN2_VTI2_ (0x0000ffff) - -#define WUFF (0x0B) /* WO */ - -#define WUCSR (0x0C) /* R/W */ -#define WUCSR_GUE_ (0x00000200) -#define WUCSR_WUFR_ (0x00000040) -#define WUCSR_MPR_ (0x00000020) -#define WUCSR_WAKE_EN_ (0x00000004) -#define WUCSR_MPEN_ (0x00000002) - -/* - **************************************************************************** - * Chip Specific MII Defines - **************************************************************************** - * - * Phy register offsets and bit definitions - * - */ - -#define PHY_MODE_CTRL_STS ((u32)17) /* Mode Control/Status Register */ -//#define MODE_CTRL_STS_FASTRIP_ ((u16)0x4000) -#define MODE_CTRL_STS_EDPWRDOWN_ ((u16)0x2000) -//#define MODE_CTRL_STS_LOWSQEN_ ((u16)0x0800) -//#define MODE_CTRL_STS_MDPREBP_ ((u16)0x0400) -//#define MODE_CTRL_STS_FARLOOPBACK_ ((u16)0x0200) -//#define MODE_CTRL_STS_FASTEST_ ((u16)0x0100) -//#define MODE_CTRL_STS_REFCLKEN_ ((u16)0x0010) -//#define MODE_CTRL_STS_PHYADBP_ ((u16)0x0008) -//#define MODE_CTRL_STS_FORCE_G_LINK_ ((u16)0x0004) -#define MODE_CTRL_STS_ENERGYON_ ((u16)0x0002) - -#define PHY_INT_SRC ((u32)29) -#define PHY_INT_SRC_ENERGY_ON_ ((u16)0x0080) -#define PHY_INT_SRC_ANEG_COMP_ ((u16)0x0040) -#define PHY_INT_SRC_REMOTE_FAULT_ ((u16)0x0020) -#define PHY_INT_SRC_LINK_DOWN_ ((u16)0x0010) -#define PHY_INT_SRC_ANEG_LP_ACK_ ((u16)0x0008) -#define PHY_INT_SRC_PAR_DET_FAULT_ ((u16)0x0004) -#define PHY_INT_SRC_ANEG_PGRX_ ((u16)0x0002) - -#define PHY_INT_MASK ((u32)30) -#define PHY_INT_MASK_ENERGY_ON_ ((u16)0x0080) -#define PHY_INT_MASK_ANEG_COMP_ ((u16)0x0040) -#define PHY_INT_MASK_REMOTE_FAULT_ ((u16)0x0020) -#define PHY_INT_MASK_LINK_DOWN_ ((u16)0x0010) -#define PHY_INT_MASK_ANEG_LP_ACK_ ((u16)0x0008) -#define PHY_INT_MASK_PAR_DET_FAULT_ ((u16)0x0004) -#define PHY_INT_MASK_ANEG_PGRX_ ((u16)0x0002) - -#define PHY_SPECIAL ((u32)31) -#define PHY_SPECIAL_ANEG_DONE_ ((u16)0x1000) -#define PHY_SPECIAL_RES_ ((u16)0x0040) -#define PHY_SPECIAL_RES_MASK_ ((u16)0x0FE1) -#define PHY_SPECIAL_SPD_ ((u16)0x001C) -#define PHY_SPECIAL_SPD_10HALF_ ((u16)0x0004) -#define PHY_SPECIAL_SPD_10FULL_ ((u16)0x0014) -#define PHY_SPECIAL_SPD_100HALF_ ((u16)0x0008) -#define PHY_SPECIAL_SPD_100FULL_ ((u16)0x0018) - -#define LAN911X_INTERNAL_PHY_ID (0x0007C000) - -/* Chip ID values */ -#define CHIP_9115 0x0115 -#define CHIP_9116 0x0116 -#define CHIP_9117 0x0117 -#define CHIP_9118 0x0118 -#define CHIP_9211 0x9211 -#define CHIP_9215 0x115A -#define CHIP_9217 0x117A -#define CHIP_9218 0x118A - -struct chip_id { - u16 id; - char *name; -}; - -static const struct chip_id chip_ids[] = { - { CHIP_9115, "LAN9115" }, - { CHIP_9116, "LAN9116" }, - { CHIP_9117, "LAN9117" }, - { CHIP_9118, "LAN9118" }, - { CHIP_9211, "LAN9211" }, - { CHIP_9215, "LAN9215" }, - { CHIP_9217, "LAN9217" }, - { CHIP_9218, "LAN9218" }, - { 0, NULL }, -}; - -#define IS_REV_A(x) ((x & 0xFFFF)==0) - -/* - * Macros to abstract register access according to the data bus - * capabilities. Please use those and not the in/out primitives. - */ -/* FIFO read/write macros */ -#define SMC_PUSH_DATA(lp, p, l) SMC_outsl( lp, TX_DATA_FIFO, p, (l) >> 2 ) -#define SMC_PULL_DATA(lp, p, l) SMC_insl ( lp, RX_DATA_FIFO, p, (l) >> 2 ) -#define SMC_SET_TX_FIFO(lp, x) SMC_outl( x, lp, TX_DATA_FIFO ) -#define SMC_GET_RX_FIFO(lp) SMC_inl( lp, RX_DATA_FIFO ) - - -/* I/O mapped register read/write macros */ -#define SMC_GET_TX_STS_FIFO(lp) SMC_inl( lp, TX_STATUS_FIFO ) -#define SMC_GET_RX_STS_FIFO(lp) SMC_inl( lp, RX_STATUS_FIFO ) -#define SMC_GET_RX_STS_FIFO_PEEK(lp) SMC_inl( lp, RX_STATUS_FIFO_PEEK ) -#define SMC_GET_PN(lp) (SMC_inl( lp, ID_REV ) >> 16) -#define SMC_GET_REV(lp) (SMC_inl( lp, ID_REV ) & 0xFFFF) -#define SMC_GET_IRQ_CFG(lp) SMC_inl( lp, INT_CFG ) -#define SMC_SET_IRQ_CFG(lp, x) SMC_outl( x, lp, INT_CFG ) -#define SMC_GET_INT(lp) SMC_inl( lp, INT_STS ) -#define SMC_ACK_INT(lp, x) SMC_outl( x, lp, INT_STS ) -#define SMC_GET_INT_EN(lp) SMC_inl( lp, INT_EN ) -#define SMC_SET_INT_EN(lp, x) SMC_outl( x, lp, INT_EN ) -#define SMC_GET_BYTE_TEST(lp) SMC_inl( lp, BYTE_TEST ) -#define SMC_SET_BYTE_TEST(lp, x) SMC_outl( x, lp, BYTE_TEST ) -#define SMC_GET_FIFO_INT(lp) SMC_inl( lp, FIFO_INT ) -#define SMC_SET_FIFO_INT(lp, x) SMC_outl( x, lp, FIFO_INT ) -#define SMC_SET_FIFO_TDA(lp, x) \ - do { \ - unsigned long __flags; \ - int __mask; \ - local_irq_save(__flags); \ - __mask = SMC_GET_FIFO_INT((lp)) & ~(0xFF<<24); \ - SMC_SET_FIFO_INT( (lp), __mask | (x)<<24 ); \ - local_irq_restore(__flags); \ - } while (0) -#define SMC_SET_FIFO_TSL(lp, x) \ - do { \ - unsigned long __flags; \ - int __mask; \ - local_irq_save(__flags); \ - __mask = SMC_GET_FIFO_INT((lp)) & ~(0xFF<<16); \ - SMC_SET_FIFO_INT( (lp), __mask | (((x) & 0xFF)<<16)); \ - local_irq_restore(__flags); \ - } while (0) -#define SMC_SET_FIFO_RSA(lp, x) \ - do { \ - unsigned long __flags; \ - int __mask; \ - local_irq_save(__flags); \ - __mask = SMC_GET_FIFO_INT((lp)) & ~(0xFF<<8); \ - SMC_SET_FIFO_INT( (lp), __mask | (((x) & 0xFF)<<8)); \ - local_irq_restore(__flags); \ - } while (0) -#define SMC_SET_FIFO_RSL(lp, x) \ - do { \ - unsigned long __flags; \ - int __mask; \ - local_irq_save(__flags); \ - __mask = SMC_GET_FIFO_INT((lp)) & ~0xFF; \ - SMC_SET_FIFO_INT( (lp),__mask | ((x) & 0xFF)); \ - local_irq_restore(__flags); \ - } while (0) -#define SMC_GET_RX_CFG(lp) SMC_inl( lp, RX_CFG ) -#define SMC_SET_RX_CFG(lp, x) SMC_outl( x, lp, RX_CFG ) -#define SMC_GET_TX_CFG(lp) SMC_inl( lp, TX_CFG ) -#define SMC_SET_TX_CFG(lp, x) SMC_outl( x, lp, TX_CFG ) -#define SMC_GET_HW_CFG(lp) SMC_inl( lp, HW_CFG ) -#define SMC_SET_HW_CFG(lp, x) SMC_outl( x, lp, HW_CFG ) -#define SMC_GET_RX_DP_CTRL(lp) SMC_inl( lp, RX_DP_CTRL ) -#define SMC_SET_RX_DP_CTRL(lp, x) SMC_outl( x, lp, RX_DP_CTRL ) -#define SMC_GET_PMT_CTRL(lp) SMC_inl( lp, PMT_CTRL ) -#define SMC_SET_PMT_CTRL(lp, x) SMC_outl( x, lp, PMT_CTRL ) -#define SMC_GET_GPIO_CFG(lp) SMC_inl( lp, GPIO_CFG ) -#define SMC_SET_GPIO_CFG(lp, x) SMC_outl( x, lp, GPIO_CFG ) -#define SMC_GET_RX_FIFO_INF(lp) SMC_inl( lp, RX_FIFO_INF ) -#define SMC_SET_RX_FIFO_INF(lp, x) SMC_outl( x, lp, RX_FIFO_INF ) -#define SMC_GET_TX_FIFO_INF(lp) SMC_inl( lp, TX_FIFO_INF ) -#define SMC_SET_TX_FIFO_INF(lp, x) SMC_outl( x, lp, TX_FIFO_INF ) -#define SMC_GET_GPT_CFG(lp) SMC_inl( lp, GPT_CFG ) -#define SMC_SET_GPT_CFG(lp, x) SMC_outl( x, lp, GPT_CFG ) -#define SMC_GET_RX_DROP(lp) SMC_inl( lp, RX_DROP ) -#define SMC_SET_RX_DROP(lp, x) SMC_outl( x, lp, RX_DROP ) -#define SMC_GET_MAC_CMD(lp) SMC_inl( lp, MAC_CSR_CMD ) -#define SMC_SET_MAC_CMD(lp, x) SMC_outl( x, lp, MAC_CSR_CMD ) -#define SMC_GET_MAC_DATA(lp) SMC_inl( lp, MAC_CSR_DATA ) -#define SMC_SET_MAC_DATA(lp, x) SMC_outl( x, lp, MAC_CSR_DATA ) -#define SMC_GET_AFC_CFG(lp) SMC_inl( lp, AFC_CFG ) -#define SMC_SET_AFC_CFG(lp, x) SMC_outl( x, lp, AFC_CFG ) -#define SMC_GET_E2P_CMD(lp) SMC_inl( lp, E2P_CMD ) -#define SMC_SET_E2P_CMD(lp, x) SMC_outl( x, lp, E2P_CMD ) -#define SMC_GET_E2P_DATA(lp) SMC_inl( lp, E2P_DATA ) -#define SMC_SET_E2P_DATA(lp, x) SMC_outl( x, lp, E2P_DATA ) - -/* MAC register read/write macros */ -#define SMC_GET_MAC_CSR(lp,a,v) \ - do { \ - while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \ - SMC_SET_MAC_CMD((lp),MAC_CSR_CMD_CSR_BUSY_ | \ - MAC_CSR_CMD_R_NOT_W_ | (a) ); \ - while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \ - v = SMC_GET_MAC_DATA((lp)); \ - } while (0) -#define SMC_SET_MAC_CSR(lp,a,v) \ - do { \ - while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \ - SMC_SET_MAC_DATA((lp), v); \ - SMC_SET_MAC_CMD((lp), MAC_CSR_CMD_CSR_BUSY_ | (a) ); \ - while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \ - } while (0) -#define SMC_GET_MAC_CR(lp, x) SMC_GET_MAC_CSR( (lp), MAC_CR, x ) -#define SMC_SET_MAC_CR(lp, x) SMC_SET_MAC_CSR( (lp), MAC_CR, x ) -#define SMC_GET_ADDRH(lp, x) SMC_GET_MAC_CSR( (lp), ADDRH, x ) -#define SMC_SET_ADDRH(lp, x) SMC_SET_MAC_CSR( (lp), ADDRH, x ) -#define SMC_GET_ADDRL(lp, x) SMC_GET_MAC_CSR( (lp), ADDRL, x ) -#define SMC_SET_ADDRL(lp, x) SMC_SET_MAC_CSR( (lp), ADDRL, x ) -#define SMC_GET_HASHH(lp, x) SMC_GET_MAC_CSR( (lp), HASHH, x ) -#define SMC_SET_HASHH(lp, x) SMC_SET_MAC_CSR( (lp), HASHH, x ) -#define SMC_GET_HASHL(lp, x) SMC_GET_MAC_CSR( (lp), HASHL, x ) -#define SMC_SET_HASHL(lp, x) SMC_SET_MAC_CSR( (lp), HASHL, x ) -#define SMC_GET_MII_ACC(lp, x) SMC_GET_MAC_CSR( (lp), MII_ACC, x ) -#define SMC_SET_MII_ACC(lp, x) SMC_SET_MAC_CSR( (lp), MII_ACC, x ) -#define SMC_GET_MII_DATA(lp, x) SMC_GET_MAC_CSR( (lp), MII_DATA, x ) -#define SMC_SET_MII_DATA(lp, x) SMC_SET_MAC_CSR( (lp), MII_DATA, x ) -#define SMC_GET_FLOW(lp, x) SMC_GET_MAC_CSR( (lp), FLOW, x ) -#define SMC_SET_FLOW(lp, x) SMC_SET_MAC_CSR( (lp), FLOW, x ) -#define SMC_GET_VLAN1(lp, x) SMC_GET_MAC_CSR( (lp), VLAN1, x ) -#define SMC_SET_VLAN1(lp, x) SMC_SET_MAC_CSR( (lp), VLAN1, x ) -#define SMC_GET_VLAN2(lp, x) SMC_GET_MAC_CSR( (lp), VLAN2, x ) -#define SMC_SET_VLAN2(lp, x) SMC_SET_MAC_CSR( (lp), VLAN2, x ) -#define SMC_SET_WUFF(lp, x) SMC_SET_MAC_CSR( (lp), WUFF, x ) -#define SMC_GET_WUCSR(lp, x) SMC_GET_MAC_CSR( (lp), WUCSR, x ) -#define SMC_SET_WUCSR(lp, x) SMC_SET_MAC_CSR( (lp), WUCSR, x ) - -/* PHY register read/write macros */ -#define SMC_GET_MII(lp,a,phy,v) \ - do { \ - u32 __v; \ - do { \ - SMC_GET_MII_ACC((lp), __v); \ - } while ( __v & MII_ACC_MII_BUSY_ ); \ - SMC_SET_MII_ACC( (lp), ((phy)<<11) | ((a)<<6) | \ - MII_ACC_MII_BUSY_); \ - do { \ - SMC_GET_MII_ACC( (lp), __v); \ - } while ( __v & MII_ACC_MII_BUSY_ ); \ - SMC_GET_MII_DATA((lp), v); \ - } while (0) -#define SMC_SET_MII(lp,a,phy,v) \ - do { \ - u32 __v; \ - do { \ - SMC_GET_MII_ACC((lp), __v); \ - } while ( __v & MII_ACC_MII_BUSY_ ); \ - SMC_SET_MII_DATA((lp), v); \ - SMC_SET_MII_ACC( (lp), ((phy)<<11) | ((a)<<6) | \ - MII_ACC_MII_BUSY_ | \ - MII_ACC_MII_WRITE_ ); \ - do { \ - SMC_GET_MII_ACC((lp), __v); \ - } while ( __v & MII_ACC_MII_BUSY_ ); \ - } while (0) -#define SMC_GET_PHY_BMCR(lp,phy,x) SMC_GET_MII( (lp), MII_BMCR, phy, x ) -#define SMC_SET_PHY_BMCR(lp,phy,x) SMC_SET_MII( (lp), MII_BMCR, phy, x ) -#define SMC_GET_PHY_BMSR(lp,phy,x) SMC_GET_MII( (lp), MII_BMSR, phy, x ) -#define SMC_GET_PHY_ID1(lp,phy,x) SMC_GET_MII( (lp), MII_PHYSID1, phy, x ) -#define SMC_GET_PHY_ID2(lp,phy,x) SMC_GET_MII( (lp), MII_PHYSID2, phy, x ) -#define SMC_GET_PHY_MII_ADV(lp,phy,x) SMC_GET_MII( (lp), MII_ADVERTISE, phy, x ) -#define SMC_SET_PHY_MII_ADV(lp,phy,x) SMC_SET_MII( (lp), MII_ADVERTISE, phy, x ) -#define SMC_GET_PHY_MII_LPA(lp,phy,x) SMC_GET_MII( (lp), MII_LPA, phy, x ) -#define SMC_SET_PHY_MII_LPA(lp,phy,x) SMC_SET_MII( (lp), MII_LPA, phy, x ) -#define SMC_GET_PHY_CTRL_STS(lp,phy,x) SMC_GET_MII( (lp), PHY_MODE_CTRL_STS, phy, x ) -#define SMC_SET_PHY_CTRL_STS(lp,phy,x) SMC_SET_MII( (lp), PHY_MODE_CTRL_STS, phy, x ) -#define SMC_GET_PHY_INT_SRC(lp,phy,x) SMC_GET_MII( (lp), PHY_INT_SRC, phy, x ) -#define SMC_SET_PHY_INT_SRC(lp,phy,x) SMC_SET_MII( (lp), PHY_INT_SRC, phy, x ) -#define SMC_GET_PHY_INT_MASK(lp,phy,x) SMC_GET_MII( (lp), PHY_INT_MASK, phy, x ) -#define SMC_SET_PHY_INT_MASK(lp,phy,x) SMC_SET_MII( (lp), PHY_INT_MASK, phy, x ) -#define SMC_GET_PHY_SPECIAL(lp,phy,x) SMC_GET_MII( (lp), PHY_SPECIAL, phy, x ) - - - -/* Misc read/write macros */ - -#ifndef SMC_GET_MAC_ADDR -#define SMC_GET_MAC_ADDR(lp, addr) \ - do { \ - unsigned int __v; \ - \ - SMC_GET_MAC_CSR((lp), ADDRL, __v); \ - addr[0] = __v; addr[1] = __v >> 8; \ - addr[2] = __v >> 16; addr[3] = __v >> 24; \ - SMC_GET_MAC_CSR((lp), ADDRH, __v); \ - addr[4] = __v; addr[5] = __v >> 8; \ - } while (0) -#endif - -#define SMC_SET_MAC_ADDR(lp, addr) \ - do { \ - SMC_SET_MAC_CSR((lp), ADDRL, \ - addr[0] | \ - (addr[1] << 8) | \ - (addr[2] << 16) | \ - (addr[3] << 24)); \ - SMC_SET_MAC_CSR((lp), ADDRH, addr[4]|(addr[5] << 8));\ - } while (0) - - -#define SMC_WRITE_EEPROM_CMD(lp, cmd, addr) \ - do { \ - while (SMC_GET_E2P_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \ - SMC_SET_MAC_CMD((lp), MAC_CSR_CMD_R_NOT_W_ | a ); \ - while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \ - } while (0) - -#endif /* _SMC911X_H_ */ diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c deleted file mode 100644 index 5b65ac4..0000000 --- a/drivers/net/smc9194.c +++ /dev/null @@ -1,1589 +0,0 @@ -/*------------------------------------------------------------------------ - . smc9194.c - . This is a driver for SMC's 9000 series of Ethernet cards. - . - . Copyright (C) 1996 by Erik Stahlman - . This software may be used and distributed according to the terms - . of the GNU General Public License, incorporated herein by reference. - . - . "Features" of the SMC chip: - . 4608 byte packet memory. ( for the 91C92. Others have more ) - . EEPROM for configuration - . AUI/TP selection ( mine has 10Base2/10BaseT select ) - . - . Arguments: - . io = for the base address - . irq = for the IRQ - . ifport = 0 for autodetect, 1 for TP, 2 for AUI ( or 10base2 ) - . - . author: - . Erik Stahlman ( erik@vt.edu ) - . contributors: - . Arnaldo Carvalho de Melo - . - . Hardware multicast code from Peter Cammaert ( pc@denkart.be ) - . - . Sources: - . o SMC databook - . o skeleton.c by Donald Becker ( becker@scyld.com ) - . o ( a LOT of advice from Becker as well ) - . - . History: - . 12/07/95 Erik Stahlman written, got receive/xmit handled - . 01/03/96 Erik Stahlman worked out some bugs, actually usable!!! :-) - . 01/06/96 Erik Stahlman cleaned up some, better testing, etc - . 01/29/96 Erik Stahlman fixed autoirq, added multicast - . 02/01/96 Erik Stahlman 1. disabled all interrupts in smc_reset - . 2. got rid of post-decrementing bug -- UGH. - . 02/13/96 Erik Stahlman Tried to fix autoirq failure. Added more - . descriptive error messages. - . 02/15/96 Erik Stahlman Fixed typo that caused detection failure - . 02/23/96 Erik Stahlman Modified it to fit into kernel tree - . Added support to change hardware address - . Cleared stats on opens - . 02/26/96 Erik Stahlman Trial support for Kernel 1.2.13 - . Kludge for automatic IRQ detection - . 03/04/96 Erik Stahlman Fixed kernel 1.3.70 + - . Fixed bug reported by Gardner Buchanan in - . smc_enable, with outw instead of outb - . 03/06/96 Erik Stahlman Added hardware multicast from Peter Cammaert - . 04/14/00 Heiko Pruessing (SMA Regelsysteme) Fixed bug in chip memory - . allocation - . 08/20/00 Arnaldo Melo fix kfree(skb) in smc_hardware_send_packet - . 12/15/00 Christian Jullien fix "Warning: kfree_skb on hard IRQ" - . 11/08/01 Matt Domsch Use common crc32 function - ----------------------------------------------------------------------------*/ - -static const char version[] = - "smc9194.c:v0.14 12/15/00 by Erik Stahlman (erik@vt.edu)\n"; - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "smc9194.h" - -#define DRV_NAME "smc9194" - -/*------------------------------------------------------------------------ - . - . Configuration options, for the experienced user to change. - . - -------------------------------------------------------------------------*/ - -/* - . Do you want to use 32 bit xfers? This should work on all chips, as - . the chipset is designed to accommodate them. -*/ -#ifdef __sh__ -#undef USE_32_BIT -#else -#define USE_32_BIT 1 -#endif - -#if defined(__H8300H__) || defined(__H8300S__) -#define NO_AUTOPROBE -#undef insl -#undef outsl -#define insl(a,b,l) io_insl_noswap(a,b,l) -#define outsl(a,b,l) io_outsl_noswap(a,b,l) -#endif - -/* - .the SMC9194 can be at any of the following port addresses. To change, - .for a slightly different card, you can add it to the array. Keep in - .mind that the array must end in zero. -*/ - -struct devlist { - unsigned int port; - unsigned int irq; -}; - -#if defined(CONFIG_H8S_EDOSK2674) -static struct devlist smc_devlist[] __initdata = { - {.port = 0xf80000, .irq = 16}, - {.port = 0, .irq = 0 }, -}; -#else -static struct devlist smc_devlist[] __initdata = { - {.port = 0x200, .irq = 0}, - {.port = 0x220, .irq = 0}, - {.port = 0x240, .irq = 0}, - {.port = 0x260, .irq = 0}, - {.port = 0x280, .irq = 0}, - {.port = 0x2A0, .irq = 0}, - {.port = 0x2C0, .irq = 0}, - {.port = 0x2E0, .irq = 0}, - {.port = 0x300, .irq = 0}, - {.port = 0x320, .irq = 0}, - {.port = 0x340, .irq = 0}, - {.port = 0x360, .irq = 0}, - {.port = 0x380, .irq = 0}, - {.port = 0x3A0, .irq = 0}, - {.port = 0x3C0, .irq = 0}, - {.port = 0x3E0, .irq = 0}, - {.port = 0, .irq = 0}, -}; -#endif -/* - . Wait time for memory to be free. This probably shouldn't be - . tuned that much, as waiting for this means nothing else happens - . in the system -*/ -#define MEMORY_WAIT_TIME 16 - -/* - . DEBUGGING LEVELS - . - . 0 for normal operation - . 1 for slightly more details - . >2 for various levels of increasingly useless information - . 2 for interrupt tracking, status flags - . 3 for packet dumps, etc. -*/ -#define SMC_DEBUG 0 - -#if (SMC_DEBUG > 2 ) -#define PRINTK3(x) printk x -#else -#define PRINTK3(x) -#endif - -#if SMC_DEBUG > 1 -#define PRINTK2(x) printk x -#else -#define PRINTK2(x) -#endif - -#ifdef SMC_DEBUG -#define PRINTK(x) printk x -#else -#define PRINTK(x) -#endif - - -/*------------------------------------------------------------------------ - . - . The internal workings of the driver. If you are changing anything - . here with the SMC stuff, you should have the datasheet and known - . what you are doing. - . - -------------------------------------------------------------------------*/ -#define CARDNAME "SMC9194" - - -/* store this information for the driver.. */ -struct smc_local { - /* - If I have to wait until memory is available to send - a packet, I will store the skbuff here, until I get the - desired memory. Then, I'll send it out and free it. - */ - struct sk_buff * saved_skb; - - /* - . This keeps track of how many packets that I have - . sent out. When an TX_EMPTY interrupt comes, I know - . that all of these have been sent. - */ - int packets_waiting; -}; - - -/*----------------------------------------------------------------- - . - . The driver can be entered at any of the following entry points. - . - .------------------------------------------------------------------ */ - -/* - . This is called by register_netdev(). It is responsible for - . checking the portlist for the SMC9000 series chipset. If it finds - . one, then it will initialize the device, find the hardware information, - . and sets up the appropriate device parameters. - . NOTE: Interrupts are *OFF* when this procedure is called. - . - . NB:This shouldn't be static since it is referred to externally. -*/ -struct net_device *smc_init(int unit); - -/* - . The kernel calls this function when someone wants to use the device, - . typically 'ifconfig ethX up'. -*/ -static int smc_open(struct net_device *dev); - -/* - . Our watchdog timed out. Called by the networking layer -*/ -static void smc_timeout(struct net_device *dev); - -/* - . This is called by the kernel in response to 'ifconfig ethX down'. It - . is responsible for cleaning up everything that the open routine - . does, and maybe putting the card into a powerdown state. -*/ -static int smc_close(struct net_device *dev); - -/* - . Finally, a call to set promiscuous mode ( for TCPDUMP and related - . programs ) and multicast modes. -*/ -static void smc_set_multicast_list(struct net_device *dev); - - -/*--------------------------------------------------------------- - . - . Interrupt level calls.. - . - ----------------------------------------------------------------*/ - -/* - . Handles the actual interrupt -*/ -static irqreturn_t smc_interrupt(int irq, void *); -/* - . This is a separate procedure to handle the receipt of a packet, to - . leave the interrupt code looking slightly cleaner -*/ -static inline void smc_rcv( struct net_device *dev ); -/* - . This handles a TX interrupt, which is only called when an error - . relating to a packet is sent. -*/ -static inline void smc_tx( struct net_device * dev ); - -/* - ------------------------------------------------------------ - . - . Internal routines - . - ------------------------------------------------------------ -*/ - -/* - . Test if a given location contains a chip, trying to cause as - . little damage as possible if it's not a SMC chip. -*/ -static int smc_probe(struct net_device *dev, int ioaddr); - -/* - . A rather simple routine to print out a packet for debugging purposes. -*/ -#if SMC_DEBUG > 2 -static void print_packet( byte *, int ); -#endif - -#define tx_done(dev) 1 - -/* this is called to actually send the packet to the chip */ -static void smc_hardware_send_packet( struct net_device * dev ); - -/* Since I am not sure if I will have enough room in the chip's ram - . to store the packet, I call this routine, which either sends it - . now, or generates an interrupt when the card is ready for the - . packet */ -static netdev_tx_t smc_wait_to_send_packet( struct sk_buff * skb, - struct net_device *dev ); - -/* this does a soft reset on the device */ -static void smc_reset( int ioaddr ); - -/* Enable Interrupts, Receive, and Transmit */ -static void smc_enable( int ioaddr ); - -/* this puts the device in an inactive state */ -static void smc_shutdown( int ioaddr ); - -/* This routine will find the IRQ of the driver if one is not - . specified in the input to the device. */ -static int smc_findirq( int ioaddr ); - -/* - . Function: smc_reset( int ioaddr ) - . Purpose: - . This sets the SMC91xx chip to its normal state, hopefully from whatever - . mess that any other DOS driver has put it in. - . - . Maybe I should reset more registers to defaults in here? SOFTRESET should - . do that for me. - . - . Method: - . 1. send a SOFT RESET - . 2. wait for it to finish - . 3. enable autorelease mode - . 4. reset the memory management unit - . 5. clear all interrupts - . -*/ -static void smc_reset( int ioaddr ) -{ - /* This resets the registers mostly to defaults, but doesn't - affect EEPROM. That seems unnecessary */ - SMC_SELECT_BANK( 0 ); - outw( RCR_SOFTRESET, ioaddr + RCR ); - - /* this should pause enough for the chip to be happy */ - SMC_DELAY( ); - - /* Set the transmit and receive configuration registers to - default values */ - outw( RCR_CLEAR, ioaddr + RCR ); - outw( TCR_CLEAR, ioaddr + TCR ); - - /* set the control register to automatically - release successfully transmitted packets, to make the best - use out of our limited memory */ - SMC_SELECT_BANK( 1 ); - outw( inw( ioaddr + CONTROL ) | CTL_AUTO_RELEASE , ioaddr + CONTROL ); - - /* Reset the MMU */ - SMC_SELECT_BANK( 2 ); - outw( MC_RESET, ioaddr + MMU_CMD ); - - /* Note: It doesn't seem that waiting for the MMU busy is needed here, - but this is a place where future chipsets _COULD_ break. Be wary - of issuing another MMU command right after this */ - - outb( 0, ioaddr + INT_MASK ); -} - -/* - . Function: smc_enable - . Purpose: let the chip talk to the outside work - . Method: - . 1. Enable the transmitter - . 2. Enable the receiver - . 3. Enable interrupts -*/ -static void smc_enable( int ioaddr ) -{ - SMC_SELECT_BANK( 0 ); - /* see the header file for options in TCR/RCR NORMAL*/ - outw( TCR_NORMAL, ioaddr + TCR ); - outw( RCR_NORMAL, ioaddr + RCR ); - - /* now, enable interrupts */ - SMC_SELECT_BANK( 2 ); - outb( SMC_INTERRUPT_MASK, ioaddr + INT_MASK ); -} - -/* - . Function: smc_shutdown - . Purpose: closes down the SMC91xxx chip. - . Method: - . 1. zero the interrupt mask - . 2. clear the enable receive flag - . 3. clear the enable xmit flags - . - . TODO: - . (1) maybe utilize power down mode. - . Why not yet? Because while the chip will go into power down mode, - . the manual says that it will wake up in response to any I/O requests - . in the register space. Empirical results do not show this working. -*/ -static void smc_shutdown( int ioaddr ) -{ - /* no more interrupts for me */ - SMC_SELECT_BANK( 2 ); - outb( 0, ioaddr + INT_MASK ); - - /* and tell the card to stay away from that nasty outside world */ - SMC_SELECT_BANK( 0 ); - outb( RCR_CLEAR, ioaddr + RCR ); - outb( TCR_CLEAR, ioaddr + TCR ); -#if 0 - /* finally, shut the chip down */ - SMC_SELECT_BANK( 1 ); - outw( inw( ioaddr + CONTROL ), CTL_POWERDOWN, ioaddr + CONTROL ); -#endif -} - - -/* - . Function: smc_setmulticast( int ioaddr, struct net_device *dev ) - . Purpose: - . This sets the internal hardware table to filter out unwanted multicast - . packets before they take up memory. - . - . The SMC chip uses a hash table where the high 6 bits of the CRC of - . address are the offset into the table. If that bit is 1, then the - . multicast packet is accepted. Otherwise, it's dropped silently. - . - . To use the 6 bits as an offset into the table, the high 3 bits are the - . number of the 8 bit register, while the low 3 bits are the bit within - . that register. - . - . This routine is based very heavily on the one provided by Peter Cammaert. -*/ - - -static void smc_setmulticast(int ioaddr, struct net_device *dev) -{ - int i; - unsigned char multicast_table[ 8 ]; - struct netdev_hw_addr *ha; - /* table for flipping the order of 3 bits */ - unsigned char invert3[] = { 0, 4, 2, 6, 1, 5, 3, 7 }; - - /* start with a table of all zeros: reject all */ - memset( multicast_table, 0, sizeof( multicast_table ) ); - - netdev_for_each_mc_addr(ha, dev) { - int position; - - /* only use the low order bits */ - position = ether_crc_le(6, ha->addr) & 0x3f; - - /* do some messy swapping to put the bit in the right spot */ - multicast_table[invert3[position&7]] |= - (1<>3)&7]); - - } - /* now, the table can be loaded into the chipset */ - SMC_SELECT_BANK( 3 ); - - for ( i = 0; i < 8 ; i++ ) { - outb( multicast_table[i], ioaddr + MULTICAST1 + i ); - } -} - -/* - . Function: smc_wait_to_send_packet( struct sk_buff * skb, struct net_device * ) - . Purpose: - . Attempt to allocate memory for a packet, if chip-memory is not - . available, then tell the card to generate an interrupt when it - . is available. - . - . Algorithm: - . - . o if the saved_skb is not currently null, then drop this packet - . on the floor. This should never happen, because of TBUSY. - . o if the saved_skb is null, then replace it with the current packet, - . o See if I can sending it now. - . o (NO): Enable interrupts and let the interrupt handler deal with it. - . o (YES):Send it now. -*/ -static netdev_tx_t smc_wait_to_send_packet(struct sk_buff *skb, - struct net_device *dev) -{ - struct smc_local *lp = netdev_priv(dev); - unsigned int ioaddr = dev->base_addr; - word length; - unsigned short numPages; - word time_out; - - netif_stop_queue(dev); - /* Well, I want to send the packet.. but I don't know - if I can send it right now... */ - - if ( lp->saved_skb) { - /* THIS SHOULD NEVER HAPPEN. */ - dev->stats.tx_aborted_errors++; - printk(CARDNAME": Bad Craziness - sent packet while busy.\n" ); - return NETDEV_TX_BUSY; - } - lp->saved_skb = skb; - - length = skb->len; - - if (length < ETH_ZLEN) { - if (skb_padto(skb, ETH_ZLEN)) { - netif_wake_queue(dev); - return NETDEV_TX_OK; - } - length = ETH_ZLEN; - } - - /* - ** The MMU wants the number of pages to be the number of 256 bytes - ** 'pages', minus 1 ( since a packet can't ever have 0 pages :) ) - ** - ** Pkt size for allocating is data length +6 (for additional status words, - ** length and ctl!) If odd size last byte is included in this header. - */ - numPages = ((length & 0xfffe) + 6) / 256; - - if (numPages > 7 ) { - printk(CARDNAME": Far too big packet error.\n"); - /* freeing the packet is a good thing here... but should - . any packets of this size get down here? */ - dev_kfree_skb (skb); - lp->saved_skb = NULL; - /* this IS an error, but, i don't want the skb saved */ - netif_wake_queue(dev); - return NETDEV_TX_OK; - } - /* either way, a packet is waiting now */ - lp->packets_waiting++; - - /* now, try to allocate the memory */ - SMC_SELECT_BANK( 2 ); - outw( MC_ALLOC | numPages, ioaddr + MMU_CMD ); - /* - . Performance Hack - . - . wait a short amount of time.. if I can send a packet now, I send - . it now. Otherwise, I enable an interrupt and wait for one to be - . available. - . - . I could have handled this a slightly different way, by checking to - . see if any memory was available in the FREE MEMORY register. However, - . either way, I need to generate an allocation, and the allocation works - . no matter what, so I saw no point in checking free memory. - */ - time_out = MEMORY_WAIT_TIME; - do { - word status; - - status = inb( ioaddr + INTERRUPT ); - if ( status & IM_ALLOC_INT ) { - /* acknowledge the interrupt */ - outb( IM_ALLOC_INT, ioaddr + INTERRUPT ); - break; - } - } while ( -- time_out ); - - if ( !time_out ) { - /* oh well, wait until the chip finds memory later */ - SMC_ENABLE_INT( IM_ALLOC_INT ); - PRINTK2((CARDNAME": memory allocation deferred.\n")); - /* it's deferred, but I'll handle it later */ - return NETDEV_TX_OK; - } - /* or YES! I can send the packet now.. */ - smc_hardware_send_packet(dev); - netif_wake_queue(dev); - return NETDEV_TX_OK; -} - -/* - . Function: smc_hardware_send_packet(struct net_device * ) - . Purpose: - . This sends the actual packet to the SMC9xxx chip. - . - . Algorithm: - . First, see if a saved_skb is available. - . ( this should NOT be called if there is no 'saved_skb' - . Now, find the packet number that the chip allocated - . Point the data pointers at it in memory - . Set the length word in the chip's memory - . Dump the packet to chip memory - . Check if a last byte is needed ( odd length packet ) - . if so, set the control flag right - . Tell the card to send it - . Enable the transmit interrupt, so I know if it failed - . Free the kernel data if I actually sent it. -*/ -static void smc_hardware_send_packet( struct net_device * dev ) -{ - struct smc_local *lp = netdev_priv(dev); - byte packet_no; - struct sk_buff * skb = lp->saved_skb; - word length; - unsigned int ioaddr; - byte * buf; - - ioaddr = dev->base_addr; - - if ( !skb ) { - PRINTK((CARDNAME": In XMIT with no packet to send\n")); - return; - } - length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; - buf = skb->data; - - /* If I get here, I _know_ there is a packet slot waiting for me */ - packet_no = inb( ioaddr + PNR_ARR + 1 ); - if ( packet_no & 0x80 ) { - /* or isn't there? BAD CHIP! */ - printk(KERN_DEBUG CARDNAME": Memory allocation failed.\n"); - dev_kfree_skb_any(skb); - lp->saved_skb = NULL; - netif_wake_queue(dev); - return; - } - - /* we have a packet address, so tell the card to use it */ - outb( packet_no, ioaddr + PNR_ARR ); - - /* point to the beginning of the packet */ - outw( PTR_AUTOINC , ioaddr + POINTER ); - - PRINTK3((CARDNAME": Trying to xmit packet of length %x\n", length )); -#if SMC_DEBUG > 2 - print_packet( buf, length ); -#endif - - /* send the packet length ( +6 for status, length and ctl byte ) - and the status word ( set to zeros ) */ -#ifdef USE_32_BIT - outl( (length +6 ) << 16 , ioaddr + DATA_1 ); -#else - outw( 0, ioaddr + DATA_1 ); - /* send the packet length ( +6 for status words, length, and ctl*/ - outb( (length+6) & 0xFF,ioaddr + DATA_1 ); - outb( (length+6) >> 8 , ioaddr + DATA_1 ); -#endif - - /* send the actual data - . I _think_ it's faster to send the longs first, and then - . mop up by sending the last word. It depends heavily - . on alignment, at least on the 486. Maybe it would be - . a good idea to check which is optimal? But that could take - . almost as much time as is saved? - */ -#ifdef USE_32_BIT - if ( length & 0x2 ) { - outsl(ioaddr + DATA_1, buf, length >> 2 ); -#if !defined(__H8300H__) && !defined(__H8300S__) - outw( *((word *)(buf + (length & 0xFFFFFFFC))),ioaddr +DATA_1); -#else - ctrl_outw( *((word *)(buf + (length & 0xFFFFFFFC))),ioaddr +DATA_1); -#endif - } - else - outsl(ioaddr + DATA_1, buf, length >> 2 ); -#else - outsw(ioaddr + DATA_1 , buf, (length ) >> 1); -#endif - /* Send the last byte, if there is one. */ - - if ( (length & 1) == 0 ) { - outw( 0, ioaddr + DATA_1 ); - } else { - outb( buf[length -1 ], ioaddr + DATA_1 ); - outb( 0x20, ioaddr + DATA_1); - } - - /* enable the interrupts */ - SMC_ENABLE_INT( (IM_TX_INT | IM_TX_EMPTY_INT) ); - - /* and let the chipset deal with it */ - outw( MC_ENQUEUE , ioaddr + MMU_CMD ); - - PRINTK2((CARDNAME": Sent packet of length %d\n", length)); - - lp->saved_skb = NULL; - dev_kfree_skb_any (skb); - - dev->trans_start = jiffies; - - /* we can send another packet */ - netif_wake_queue(dev); -} - -/*------------------------------------------------------------------------- - | - | smc_init(int unit) - | Input parameters: - | dev->base_addr == 0, try to find all possible locations - | dev->base_addr == 1, return failure code - | dev->base_addr == 2, always allocate space, and return success - | dev->base_addr == this is the address to check - | - | Output: - | pointer to net_device or ERR_PTR(error) - | - --------------------------------------------------------------------------- -*/ -static int io; -static int irq; -static int ifport; - -struct net_device * __init smc_init(int unit) -{ - struct net_device *dev = alloc_etherdev(sizeof(struct smc_local)); - struct devlist *smcdev = smc_devlist; - int err = 0; - - if (!dev) - return ERR_PTR(-ENODEV); - - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - io = dev->base_addr; - irq = dev->irq; - } - - if (io > 0x1ff) { /* Check a single specified location. */ - err = smc_probe(dev, io); - } else if (io != 0) { /* Don't probe at all. */ - err = -ENXIO; - } else { - for (;smcdev->port; smcdev++) { - if (smc_probe(dev, smcdev->port) == 0) - break; - } - if (!smcdev->port) - err = -ENODEV; - } - if (err) - goto out; - err = register_netdev(dev); - if (err) - goto out1; - return dev; -out1: - free_irq(dev->irq, dev); - release_region(dev->base_addr, SMC_IO_EXTENT); -out: - free_netdev(dev); - return ERR_PTR(err); -} - -/*---------------------------------------------------------------------- - . smc_findirq - . - . This routine has a simple purpose -- make the SMC chip generate an - . interrupt, so an auto-detect routine can detect it, and find the IRQ, - ------------------------------------------------------------------------ -*/ -static int __init smc_findirq(int ioaddr) -{ -#ifndef NO_AUTOPROBE - int timeout = 20; - unsigned long cookie; - - - cookie = probe_irq_on(); - - /* - * What I try to do here is trigger an ALLOC_INT. This is done - * by allocating a small chunk of memory, which will give an interrupt - * when done. - */ - - - SMC_SELECT_BANK(2); - /* enable ALLOCation interrupts ONLY */ - outb( IM_ALLOC_INT, ioaddr + INT_MASK ); - - /* - . Allocate 512 bytes of memory. Note that the chip was just - . reset so all the memory is available - */ - outw( MC_ALLOC | 1, ioaddr + MMU_CMD ); - - /* - . Wait until positive that the interrupt has been generated - */ - while ( timeout ) { - byte int_status; - - int_status = inb( ioaddr + INTERRUPT ); - - if ( int_status & IM_ALLOC_INT ) - break; /* got the interrupt */ - timeout--; - } - /* there is really nothing that I can do here if timeout fails, - as probe_irq_off will return a 0 anyway, which is what I - want in this case. Plus, the clean up is needed in both - cases. */ - - /* DELAY HERE! - On a fast machine, the status might change before the interrupt - is given to the processor. This means that the interrupt was - never detected, and probe_irq_off fails to report anything. - This should fix probe_irq_* problems. - */ - SMC_DELAY(); - SMC_DELAY(); - - /* and disable all interrupts again */ - outb( 0, ioaddr + INT_MASK ); - - /* and return what I found */ - return probe_irq_off(cookie); -#else /* NO_AUTOPROBE */ - struct devlist *smcdev; - for (smcdev = smc_devlist; smcdev->port; smcdev++) { - if (smcdev->port == ioaddr) - return smcdev->irq; - } - return 0; -#endif -} - -static const struct net_device_ops smc_netdev_ops = { - .ndo_open = smc_open, - .ndo_stop = smc_close, - .ndo_start_xmit = smc_wait_to_send_packet, - .ndo_tx_timeout = smc_timeout, - .ndo_set_multicast_list = smc_set_multicast_list, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -/*---------------------------------------------------------------------- - . Function: smc_probe( int ioaddr ) - . - . Purpose: - . Tests to see if a given ioaddr points to an SMC9xxx chip. - . Returns a 0 on success - . - . Algorithm: - . (1) see if the high byte of BANK_SELECT is 0x33 - . (2) compare the ioaddr with the base register's address - . (3) see if I recognize the chip ID in the appropriate register - . - .--------------------------------------------------------------------- - */ - -/*--------------------------------------------------------------- - . Here I do typical initialization tasks. - . - . o Initialize the structure if needed - . o print out my vanity message if not done so already - . o print out what type of hardware is detected - . o print out the ethernet address - . o find the IRQ - . o set up my private data - . o configure the dev structure with my subroutines - . o actually GRAB the irq. - . o GRAB the region - .----------------------------------------------------------------- -*/ -static int __init smc_probe(struct net_device *dev, int ioaddr) -{ - int i, memory, retval; - static unsigned version_printed; - unsigned int bank; - - const char *version_string; - const char *if_string; - - /* registers */ - word revision_register; - word base_address_register; - word configuration_register; - word memory_info_register; - word memory_cfg_register; - - /* Grab the region so that no one else tries to probe our ioports. */ - if (!request_region(ioaddr, SMC_IO_EXTENT, DRV_NAME)) - return -EBUSY; - - dev->irq = irq; - dev->if_port = ifport; - - /* First, see if the high byte is 0x33 */ - bank = inw( ioaddr + BANK_SELECT ); - if ( (bank & 0xFF00) != 0x3300 ) { - retval = -ENODEV; - goto err_out; - } - /* The above MIGHT indicate a device, but I need to write to further - test this. */ - outw( 0x0, ioaddr + BANK_SELECT ); - bank = inw( ioaddr + BANK_SELECT ); - if ( (bank & 0xFF00 ) != 0x3300 ) { - retval = -ENODEV; - goto err_out; - } -#if !defined(CONFIG_H8S_EDOSK2674) - /* well, we've already written once, so hopefully another time won't - hurt. This time, I need to switch the bank register to bank 1, - so I can access the base address register */ - SMC_SELECT_BANK(1); - base_address_register = inw( ioaddr + BASE ); - if ( ioaddr != ( base_address_register >> 3 & 0x3E0 ) ) { - printk(CARDNAME ": IOADDR %x doesn't match configuration (%x). " - "Probably not a SMC chip\n", - ioaddr, base_address_register >> 3 & 0x3E0 ); - /* well, the base address register didn't match. Must not have - been a SMC chip after all. */ - retval = -ENODEV; - goto err_out; - } -#else - (void)base_address_register; /* Warning suppression */ -#endif - - - /* check if the revision register is something that I recognize. - These might need to be added to later, as future revisions - could be added. */ - SMC_SELECT_BANK(3); - revision_register = inw( ioaddr + REVISION ); - if ( !chip_ids[ ( revision_register >> 4 ) & 0xF ] ) { - /* I don't recognize this chip, so... */ - printk(CARDNAME ": IO %x: Unrecognized revision register:" - " %x, Contact author.\n", ioaddr, revision_register); - - retval = -ENODEV; - goto err_out; - } - - /* at this point I'll assume that the chip is an SMC9xxx. - It might be prudent to check a listing of MAC addresses - against the hardware address, or do some other tests. */ - - if (version_printed++ == 0) - printk("%s", version); - - /* fill in some of the fields */ - dev->base_addr = ioaddr; - - /* - . Get the MAC address ( bank 1, regs 4 - 9 ) - */ - SMC_SELECT_BANK( 1 ); - for ( i = 0; i < 6; i += 2 ) { - word address; - - address = inw( ioaddr + ADDR0 + i ); - dev->dev_addr[ i + 1] = address >> 8; - dev->dev_addr[ i ] = address & 0xFF; - } - - /* get the memory information */ - - SMC_SELECT_BANK( 0 ); - memory_info_register = inw( ioaddr + MIR ); - memory_cfg_register = inw( ioaddr + MCR ); - memory = ( memory_cfg_register >> 9 ) & 0x7; /* multiplier */ - memory *= 256 * ( memory_info_register & 0xFF ); - - /* - Now, I want to find out more about the chip. This is sort of - redundant, but it's cleaner to have it in both, rather than having - one VERY long probe procedure. - */ - SMC_SELECT_BANK(3); - revision_register = inw( ioaddr + REVISION ); - version_string = chip_ids[ ( revision_register >> 4 ) & 0xF ]; - if ( !version_string ) { - /* I shouldn't get here because this call was done before.... */ - retval = -ENODEV; - goto err_out; - } - - /* is it using AUI or 10BaseT ? */ - if ( dev->if_port == 0 ) { - SMC_SELECT_BANK(1); - configuration_register = inw( ioaddr + CONFIG ); - if ( configuration_register & CFG_AUI_SELECT ) - dev->if_port = 2; - else - dev->if_port = 1; - } - if_string = interfaces[ dev->if_port - 1 ]; - - /* now, reset the chip, and put it into a known state */ - smc_reset( ioaddr ); - - /* - . If dev->irq is 0, then the device has to be banged on to see - . what the IRQ is. - . - . This banging doesn't always detect the IRQ, for unknown reasons. - . a workaround is to reset the chip and try again. - . - . Interestingly, the DOS packet driver *SETS* the IRQ on the card to - . be what is requested on the command line. I don't do that, mostly - . because the card that I have uses a non-standard method of accessing - . the IRQs, and because this _should_ work in most configurations. - . - . Specifying an IRQ is done with the assumption that the user knows - . what (s)he is doing. No checking is done!!!! - . - */ - if ( dev->irq < 2 ) { - int trials; - - trials = 3; - while ( trials-- ) { - dev->irq = smc_findirq( ioaddr ); - if ( dev->irq ) - break; - /* kick the card and try again */ - smc_reset( ioaddr ); - } - } - if (dev->irq == 0 ) { - printk(CARDNAME": Couldn't autodetect your IRQ. Use irq=xx.\n"); - retval = -ENODEV; - goto err_out; - } - - /* now, print out the card info, in a short format.. */ - - printk("%s: %s(r:%d) at %#3x IRQ:%d INTF:%s MEM:%db ", dev->name, - version_string, revision_register & 0xF, ioaddr, dev->irq, - if_string, memory ); - /* - . Print the Ethernet address - */ - printk("ADDR: %pM\n", dev->dev_addr); - - /* Grab the IRQ */ - retval = request_irq(dev->irq, smc_interrupt, 0, DRV_NAME, dev); - if (retval) { - printk("%s: unable to get IRQ %d (irqval=%d).\n", DRV_NAME, - dev->irq, retval); - goto err_out; - } - - dev->netdev_ops = &smc_netdev_ops; - dev->watchdog_timeo = HZ/20; - - return 0; - -err_out: - release_region(ioaddr, SMC_IO_EXTENT); - return retval; -} - -#if SMC_DEBUG > 2 -static void print_packet( byte * buf, int length ) -{ -#if 0 - int i; - int remainder; - int lines; - - printk("Packet of length %d\n", length); - lines = length / 16; - remainder = length % 16; - - for ( i = 0; i < lines ; i ++ ) { - int cur; - - for ( cur = 0; cur < 8; cur ++ ) { - byte a, b; - - a = *(buf ++ ); - b = *(buf ++ ); - printk("%02x%02x ", a, b ); - } - printk("\n"); - } - for ( i = 0; i < remainder/2 ; i++ ) { - byte a, b; - - a = *(buf ++ ); - b = *(buf ++ ); - printk("%02x%02x ", a, b ); - } - printk("\n"); -#endif -} -#endif - - -/* - * Open and Initialize the board - * - * Set up everything, reset the card, etc .. - * - */ -static int smc_open(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - - int i; /* used to set hw ethernet address */ - - /* clear out all the junk that was put here before... */ - memset(netdev_priv(dev), 0, sizeof(struct smc_local)); - - /* reset the hardware */ - - smc_reset( ioaddr ); - smc_enable( ioaddr ); - - /* Select which interface to use */ - - SMC_SELECT_BANK( 1 ); - if ( dev->if_port == 1 ) { - outw( inw( ioaddr + CONFIG ) & ~CFG_AUI_SELECT, - ioaddr + CONFIG ); - } - else if ( dev->if_port == 2 ) { - outw( inw( ioaddr + CONFIG ) | CFG_AUI_SELECT, - ioaddr + CONFIG ); - } - - /* - According to Becker, I have to set the hardware address - at this point, because the (l)user can set it with an - ioctl. Easily done... - */ - SMC_SELECT_BANK( 1 ); - for ( i = 0; i < 6; i += 2 ) { - word address; - - address = dev->dev_addr[ i + 1 ] << 8 ; - address |= dev->dev_addr[ i ]; - outw( address, ioaddr + ADDR0 + i ); - } - - netif_start_queue(dev); - return 0; -} - -/*-------------------------------------------------------- - . Called by the kernel to send a packet out into the void - . of the net. This routine is largely based on - . skeleton.c, from Becker. - .-------------------------------------------------------- -*/ - -static void smc_timeout(struct net_device *dev) -{ - /* If we get here, some higher level has decided we are broken. - There should really be a "kick me" function call instead. */ - printk(KERN_WARNING CARDNAME": transmit timed out, %s?\n", - tx_done(dev) ? "IRQ conflict" : - "network cable problem"); - /* "kick" the adaptor */ - smc_reset( dev->base_addr ); - smc_enable( dev->base_addr ); - dev->trans_start = jiffies; /* prevent tx timeout */ - /* clear anything saved */ - ((struct smc_local *)netdev_priv(dev))->saved_skb = NULL; - netif_wake_queue(dev); -} - -/*------------------------------------------------------------- - . - . smc_rcv - receive a packet from the card - . - . There is ( at least ) a packet waiting to be read from - . chip-memory. - . - . o Read the status - . o If an error, record it - . o otherwise, read in the packet - -------------------------------------------------------------- -*/ -static void smc_rcv(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - int packet_number; - word status; - word packet_length; - - /* assume bank 2 */ - - packet_number = inw( ioaddr + FIFO_PORTS ); - - if ( packet_number & FP_RXEMPTY ) { - /* we got called , but nothing was on the FIFO */ - PRINTK((CARDNAME ": WARNING: smc_rcv with nothing on FIFO.\n")); - /* don't need to restore anything */ - return; - } - - /* start reading from the start of the packet */ - outw( PTR_READ | PTR_RCV | PTR_AUTOINC, ioaddr + POINTER ); - - /* First two words are status and packet_length */ - status = inw( ioaddr + DATA_1 ); - packet_length = inw( ioaddr + DATA_1 ); - - packet_length &= 0x07ff; /* mask off top bits */ - - PRINTK2(("RCV: STATUS %4x LENGTH %4x\n", status, packet_length )); - /* - . the packet length contains 3 extra words : - . status, length, and an extra word with an odd byte . - */ - packet_length -= 6; - - if ( !(status & RS_ERRORS ) ){ - /* do stuff to make a new packet */ - struct sk_buff * skb; - byte * data; - - /* read one extra byte */ - if ( status & RS_ODDFRAME ) - packet_length++; - - /* set multicast stats */ - if ( status & RS_MULTICAST ) - dev->stats.multicast++; - - skb = dev_alloc_skb( packet_length + 5); - - if ( skb == NULL ) { - printk(KERN_NOTICE CARDNAME ": Low memory, packet dropped.\n"); - dev->stats.rx_dropped++; - goto done; - } - - /* - ! This should work without alignment, but it could be - ! in the worse case - */ - - skb_reserve( skb, 2 ); /* 16 bit alignment */ - - data = skb_put( skb, packet_length); - -#ifdef USE_32_BIT - /* QUESTION: Like in the TX routine, do I want - to send the DWORDs or the bytes first, or some - mixture. A mixture might improve already slow PIO - performance */ - PRINTK3((" Reading %d dwords (and %d bytes)\n", - packet_length >> 2, packet_length & 3 )); - insl(ioaddr + DATA_1 , data, packet_length >> 2 ); - /* read the left over bytes */ - insb( ioaddr + DATA_1, data + (packet_length & 0xFFFFFC), - packet_length & 0x3 ); -#else - PRINTK3((" Reading %d words and %d byte(s)\n", - (packet_length >> 1 ), packet_length & 1 )); - insw(ioaddr + DATA_1 , data, packet_length >> 1); - if ( packet_length & 1 ) { - data += packet_length & ~1; - *(data++) = inb( ioaddr + DATA_1 ); - } -#endif -#if SMC_DEBUG > 2 - print_packet( data, packet_length ); -#endif - - skb->protocol = eth_type_trans(skb, dev ); - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += packet_length; - } else { - /* error ... */ - dev->stats.rx_errors++; - - if ( status & RS_ALGNERR ) dev->stats.rx_frame_errors++; - if ( status & (RS_TOOSHORT | RS_TOOLONG ) ) - dev->stats.rx_length_errors++; - if ( status & RS_BADCRC) dev->stats.rx_crc_errors++; - } - -done: - /* error or good, tell the card to get rid of this packet */ - outw( MC_RELEASE, ioaddr + MMU_CMD ); -} - - -/************************************************************************* - . smc_tx - . - . Purpose: Handle a transmit error message. This will only be called - . when an error, because of the AUTO_RELEASE mode. - . - . Algorithm: - . Save pointer and packet no - . Get the packet no from the top of the queue - . check if it's valid ( if not, is this an error??? ) - . read the status word - . record the error - . ( resend? Not really, since we don't want old packets around ) - . Restore saved values - ************************************************************************/ -static void smc_tx( struct net_device * dev ) -{ - int ioaddr = dev->base_addr; - struct smc_local *lp = netdev_priv(dev); - byte saved_packet; - byte packet_no; - word tx_status; - - - /* assume bank 2 */ - - saved_packet = inb( ioaddr + PNR_ARR ); - packet_no = inw( ioaddr + FIFO_PORTS ); - packet_no &= 0x7F; - - /* select this as the packet to read from */ - outb( packet_no, ioaddr + PNR_ARR ); - - /* read the first word from this packet */ - outw( PTR_AUTOINC | PTR_READ, ioaddr + POINTER ); - - tx_status = inw( ioaddr + DATA_1 ); - PRINTK3((CARDNAME": TX DONE STATUS: %4x\n", tx_status)); - - dev->stats.tx_errors++; - if ( tx_status & TS_LOSTCAR ) dev->stats.tx_carrier_errors++; - if ( tx_status & TS_LATCOL ) { - printk(KERN_DEBUG CARDNAME - ": Late collision occurred on last xmit.\n"); - dev->stats.tx_window_errors++; - } -#if 0 - if ( tx_status & TS_16COL ) { ... } -#endif - - if ( tx_status & TS_SUCCESS ) { - printk(CARDNAME": Successful packet caused interrupt\n"); - } - /* re-enable transmit */ - SMC_SELECT_BANK( 0 ); - outw( inw( ioaddr + TCR ) | TCR_ENABLE, ioaddr + TCR ); - - /* kill the packet */ - SMC_SELECT_BANK( 2 ); - outw( MC_FREEPKT, ioaddr + MMU_CMD ); - - /* one less packet waiting for me */ - lp->packets_waiting--; - - outb( saved_packet, ioaddr + PNR_ARR ); -} - -/*-------------------------------------------------------------------- - . - . This is the main routine of the driver, to handle the device when - . it needs some attention. - . - . So: - . first, save state of the chipset - . branch off into routines to handle each case, and acknowledge - . each to the interrupt register - . and finally restore state. - . - ---------------------------------------------------------------------*/ - -static irqreturn_t smc_interrupt(int irq, void * dev_id) -{ - struct net_device *dev = dev_id; - int ioaddr = dev->base_addr; - struct smc_local *lp = netdev_priv(dev); - - byte status; - word card_stats; - byte mask; - int timeout; - /* state registers */ - word saved_bank; - word saved_pointer; - int handled = 0; - - - PRINTK3((CARDNAME": SMC interrupt started\n")); - - saved_bank = inw( ioaddr + BANK_SELECT ); - - SMC_SELECT_BANK(2); - saved_pointer = inw( ioaddr + POINTER ); - - mask = inb( ioaddr + INT_MASK ); - /* clear all interrupts */ - outb( 0, ioaddr + INT_MASK ); - - - /* set a timeout value, so I don't stay here forever */ - timeout = 4; - - PRINTK2((KERN_WARNING CARDNAME ": MASK IS %x\n", mask)); - do { - /* read the status flag, and mask it */ - status = inb( ioaddr + INTERRUPT ) & mask; - if (!status ) - break; - - handled = 1; - - PRINTK3((KERN_WARNING CARDNAME - ": Handling interrupt status %x\n", status)); - - if (status & IM_RCV_INT) { - /* Got a packet(s). */ - PRINTK2((KERN_WARNING CARDNAME - ": Receive Interrupt\n")); - smc_rcv(dev); - } else if (status & IM_TX_INT ) { - PRINTK2((KERN_WARNING CARDNAME - ": TX ERROR handled\n")); - smc_tx(dev); - outb(IM_TX_INT, ioaddr + INTERRUPT ); - } else if (status & IM_TX_EMPTY_INT ) { - /* update stats */ - SMC_SELECT_BANK( 0 ); - card_stats = inw( ioaddr + COUNTER ); - /* single collisions */ - dev->stats.collisions += card_stats & 0xF; - card_stats >>= 4; - /* multiple collisions */ - dev->stats.collisions += card_stats & 0xF; - - /* these are for when linux supports these statistics */ - - SMC_SELECT_BANK( 2 ); - PRINTK2((KERN_WARNING CARDNAME - ": TX_BUFFER_EMPTY handled\n")); - outb( IM_TX_EMPTY_INT, ioaddr + INTERRUPT ); - mask &= ~IM_TX_EMPTY_INT; - dev->stats.tx_packets += lp->packets_waiting; - lp->packets_waiting = 0; - - } else if (status & IM_ALLOC_INT ) { - PRINTK2((KERN_DEBUG CARDNAME - ": Allocation interrupt\n")); - /* clear this interrupt so it doesn't happen again */ - mask &= ~IM_ALLOC_INT; - - smc_hardware_send_packet( dev ); - - /* enable xmit interrupts based on this */ - mask |= ( IM_TX_EMPTY_INT | IM_TX_INT ); - - /* and let the card send more packets to me */ - netif_wake_queue(dev); - - PRINTK2((CARDNAME": Handoff done successfully.\n")); - } else if (status & IM_RX_OVRN_INT ) { - dev->stats.rx_errors++; - dev->stats.rx_fifo_errors++; - outb( IM_RX_OVRN_INT, ioaddr + INTERRUPT ); - } else if (status & IM_EPH_INT ) { - PRINTK((CARDNAME ": UNSUPPORTED: EPH INTERRUPT\n")); - } else if (status & IM_ERCV_INT ) { - PRINTK((CARDNAME ": UNSUPPORTED: ERCV INTERRUPT\n")); - outb( IM_ERCV_INT, ioaddr + INTERRUPT ); - } - } while ( timeout -- ); - - - /* restore state register */ - SMC_SELECT_BANK( 2 ); - outb( mask, ioaddr + INT_MASK ); - - PRINTK3((KERN_WARNING CARDNAME ": MASK is now %x\n", mask)); - outw( saved_pointer, ioaddr + POINTER ); - - SMC_SELECT_BANK( saved_bank ); - - PRINTK3((CARDNAME ": Interrupt done\n")); - return IRQ_RETVAL(handled); -} - - -/*---------------------------------------------------- - . smc_close - . - . this makes the board clean up everything that it can - . and not talk to the outside world. Caused by - . an 'ifconfig ethX down' - . - -----------------------------------------------------*/ -static int smc_close(struct net_device *dev) -{ - netif_stop_queue(dev); - /* clear everything */ - smc_shutdown( dev->base_addr ); - - /* Update the statistics here. */ - return 0; -} - -/*----------------------------------------------------------- - . smc_set_multicast_list - . - . This routine will, depending on the values passed to it, - . either make it accept multicast packets, go into - . promiscuous mode ( for TCPDUMP and cousins ) or accept - . a select set of multicast packets -*/ -static void smc_set_multicast_list(struct net_device *dev) -{ - short ioaddr = dev->base_addr; - - SMC_SELECT_BANK(0); - if ( dev->flags & IFF_PROMISC ) - outw( inw(ioaddr + RCR ) | RCR_PROMISC, ioaddr + RCR ); - -/* BUG? I never disable promiscuous mode if multicasting was turned on. - Now, I turn off promiscuous mode, but I don't do anything to multicasting - when promiscuous mode is turned on. -*/ - - /* Here, I am setting this to accept all multicast packets. - I don't need to zero the multicast table, because the flag is - checked before the table is - */ - else if (dev->flags & IFF_ALLMULTI) - outw( inw(ioaddr + RCR ) | RCR_ALMUL, ioaddr + RCR ); - - /* We just get all multicast packets even if we only want them - . from one source. This will be changed at some future - . point. */ - else if (!netdev_mc_empty(dev)) { - /* support hardware multicasting */ - - /* be sure I get rid of flags I might have set */ - outw( inw( ioaddr + RCR ) & ~(RCR_PROMISC | RCR_ALMUL), - ioaddr + RCR ); - /* NOTE: this has to set the bank, so make sure it is the - last thing called. The bank is set to zero at the top */ - smc_setmulticast(ioaddr, dev); - } - else { - outw( inw( ioaddr + RCR ) & ~(RCR_PROMISC | RCR_ALMUL), - ioaddr + RCR ); - - /* - since I'm disabling all multicast entirely, I need to - clear the multicast list - */ - SMC_SELECT_BANK( 3 ); - outw( 0, ioaddr + MULTICAST1 ); - outw( 0, ioaddr + MULTICAST2 ); - outw( 0, ioaddr + MULTICAST3 ); - outw( 0, ioaddr + MULTICAST4 ); - } -} - -#ifdef MODULE - -static struct net_device *devSMC9194; -MODULE_LICENSE("GPL"); - -module_param(io, int, 0); -module_param(irq, int, 0); -module_param(ifport, int, 0); -MODULE_PARM_DESC(io, "SMC 99194 I/O base address"); -MODULE_PARM_DESC(irq, "SMC 99194 IRQ number"); -MODULE_PARM_DESC(ifport, "SMC 99194 interface port (0-default, 1-TP, 2-AUI)"); - -int __init init_module(void) -{ - if (io == 0) - printk(KERN_WARNING - CARDNAME": You shouldn't use auto-probing with insmod!\n" ); - - /* copy the parameters from insmod into the device structure */ - devSMC9194 = smc_init(-1); - if (IS_ERR(devSMC9194)) - return PTR_ERR(devSMC9194); - return 0; -} - -void __exit cleanup_module(void) -{ - unregister_netdev(devSMC9194); - free_irq(devSMC9194->irq, devSMC9194); - release_region(devSMC9194->base_addr, SMC_IO_EXTENT); - free_netdev(devSMC9194); -} - -#endif /* MODULE */ diff --git a/drivers/net/smc9194.h b/drivers/net/smc9194.h deleted file mode 100644 index cf69d0a..0000000 --- a/drivers/net/smc9194.h +++ /dev/null @@ -1,241 +0,0 @@ -/*------------------------------------------------------------------------ - . smc9194.h - . Copyright (C) 1996 by Erik Stahlman - . - . This software may be used and distributed according to the terms - . of the GNU General Public License, incorporated herein by reference. - . - . This file contains register information and access macros for - . the SMC91xxx chipset. - . - . Information contained in this file was obtained from the SMC91C94 - . manual from SMC. To get a copy, if you really want one, you can find - . information under www.smc.com in the components division. - . ( this thanks to advice from Donald Becker ). - . - . Authors - . Erik Stahlman ( erik@vt.edu ) - . - . History - . 01/06/96 Erik Stahlman moved definitions here from main .c file - . 01/19/96 Erik Stahlman polished this up some, and added better - . error handling - . - ---------------------------------------------------------------------------*/ -#ifndef _SMC9194_H_ -#define _SMC9194_H_ - -/* I want some simple types */ - -typedef unsigned char byte; -typedef unsigned short word; -typedef unsigned long int dword; - - -/* Because of bank switching, the SMC91xxx uses only 16 I/O ports */ - -#define SMC_IO_EXTENT 16 - - -/*--------------------------------------------------------------- - . - . A description of the SMC registers is probably in order here, - . although for details, the SMC datasheet is invaluable. - . - . Basically, the chip has 4 banks of registers ( 0 to 3 ), which - . are accessed by writing a number into the BANK_SELECT register - . ( I also use a SMC_SELECT_BANK macro for this ). - . - . The banks are configured so that for most purposes, bank 2 is all - . that is needed for simple run time tasks. - -----------------------------------------------------------------------*/ - -/* - . Bank Select Register: - . - . yyyy yyyy 0000 00xx - . xx = bank number - . yyyy yyyy = 0x33, for identification purposes. -*/ -#define BANK_SELECT 14 - -/* BANK 0 */ - -#define TCR 0 /* transmit control register */ -#define TCR_ENABLE 0x0001 /* if this is 1, we can transmit */ -#define TCR_FDUPLX 0x0800 /* receive packets sent out */ -#define TCR_STP_SQET 0x1000 /* stop transmitting if Signal quality error */ -#define TCR_MON_CNS 0x0400 /* monitors the carrier status */ -#define TCR_PAD_ENABLE 0x0080 /* pads short packets to 64 bytes */ - -#define TCR_CLEAR 0 /* do NOTHING */ -/* the normal settings for the TCR register : */ -/* QUESTION: do I want to enable padding of short packets ? */ -#define TCR_NORMAL TCR_ENABLE - - -#define EPH_STATUS 2 -#define ES_LINK_OK 0x4000 /* is the link integrity ok ? */ - -#define RCR 4 -#define RCR_SOFTRESET 0x8000 /* resets the chip */ -#define RCR_STRIP_CRC 0x200 /* strips CRC */ -#define RCR_ENABLE 0x100 /* IFF this is set, we can receive packets */ -#define RCR_ALMUL 0x4 /* receive all multicast packets */ -#define RCR_PROMISC 0x2 /* enable promiscuous mode */ - -/* the normal settings for the RCR register : */ -#define RCR_NORMAL (RCR_STRIP_CRC | RCR_ENABLE) -#define RCR_CLEAR 0x0 /* set it to a base state */ - -#define COUNTER 6 -#define MIR 8 -#define MCR 10 -/* 12 is reserved */ - -/* BANK 1 */ -#define CONFIG 0 -#define CFG_AUI_SELECT 0x100 -#define BASE 2 -#define ADDR0 4 -#define ADDR1 6 -#define ADDR2 8 -#define GENERAL 10 -#define CONTROL 12 -#define CTL_POWERDOWN 0x2000 -#define CTL_LE_ENABLE 0x80 -#define CTL_CR_ENABLE 0x40 -#define CTL_TE_ENABLE 0x0020 -#define CTL_AUTO_RELEASE 0x0800 -#define CTL_EPROM_ACCESS 0x0003 /* high if Eprom is being read */ - -/* BANK 2 */ -#define MMU_CMD 0 -#define MC_BUSY 1 /* only readable bit in the register */ -#define MC_NOP 0 -#define MC_ALLOC 0x20 /* or with number of 256 byte packets */ -#define MC_RESET 0x40 -#define MC_REMOVE 0x60 /* remove the current rx packet */ -#define MC_RELEASE 0x80 /* remove and release the current rx packet */ -#define MC_FREEPKT 0xA0 /* Release packet in PNR register */ -#define MC_ENQUEUE 0xC0 /* Enqueue the packet for transmit */ - -#define PNR_ARR 2 -#define FIFO_PORTS 4 - -#define FP_RXEMPTY 0x8000 -#define FP_TXEMPTY 0x80 - -#define POINTER 6 -#define PTR_READ 0x2000 -#define PTR_RCV 0x8000 -#define PTR_AUTOINC 0x4000 -#define PTR_AUTO_INC 0x0040 - -#define DATA_1 8 -#define DATA_2 10 -#define INTERRUPT 12 - -#define INT_MASK 13 -#define IM_RCV_INT 0x1 -#define IM_TX_INT 0x2 -#define IM_TX_EMPTY_INT 0x4 -#define IM_ALLOC_INT 0x8 -#define IM_RX_OVRN_INT 0x10 -#define IM_EPH_INT 0x20 -#define IM_ERCV_INT 0x40 /* not on SMC9192 */ - -/* BANK 3 */ -#define MULTICAST1 0 -#define MULTICAST2 2 -#define MULTICAST3 4 -#define MULTICAST4 6 -#define MGMT 8 -#define REVISION 10 /* ( hi: chip id low: rev # ) */ - - -/* this is NOT on SMC9192 */ -#define ERCV 12 - -#define CHIP_9190 3 -#define CHIP_9194 4 -#define CHIP_9195 5 -#define CHIP_91100 7 - -static const char * chip_ids[ 15 ] = { - NULL, NULL, NULL, - /* 3 */ "SMC91C90/91C92", - /* 4 */ "SMC91C94", - /* 5 */ "SMC91C95", - NULL, - /* 7 */ "SMC91C100", - /* 8 */ "SMC91C100FD", - NULL, NULL, NULL, - NULL, NULL, NULL}; - -/* - . Transmit status bits -*/ -#define TS_SUCCESS 0x0001 -#define TS_LOSTCAR 0x0400 -#define TS_LATCOL 0x0200 -#define TS_16COL 0x0010 - -/* - . Receive status bits -*/ -#define RS_ALGNERR 0x8000 -#define RS_BADCRC 0x2000 -#define RS_ODDFRAME 0x1000 -#define RS_TOOLONG 0x0800 -#define RS_TOOSHORT 0x0400 -#define RS_MULTICAST 0x0001 -#define RS_ERRORS (RS_ALGNERR | RS_BADCRC | RS_TOOLONG | RS_TOOSHORT) - -static const char * interfaces[ 2 ] = { "TP", "AUI" }; - -/*------------------------------------------------------------------------- - . I define some macros to make it easier to do somewhat common - . or slightly complicated, repeated tasks. - --------------------------------------------------------------------------*/ - -/* select a register bank, 0 to 3 */ - -#define SMC_SELECT_BANK(x) { outw( x, ioaddr + BANK_SELECT ); } - -/* define a small delay for the reset */ -#define SMC_DELAY() { inw( ioaddr + RCR );\ - inw( ioaddr + RCR );\ - inw( ioaddr + RCR ); } - -/* this enables an interrupt in the interrupt mask register */ -#define SMC_ENABLE_INT(x) {\ - unsigned char mask;\ - SMC_SELECT_BANK(2);\ - mask = inb( ioaddr + INT_MASK );\ - mask |= (x);\ - outb( mask, ioaddr + INT_MASK ); \ -} - -/* this disables an interrupt from the interrupt mask register */ - -#define SMC_DISABLE_INT(x) {\ - unsigned char mask;\ - SMC_SELECT_BANK(2);\ - mask = inb( ioaddr + INT_MASK );\ - mask &= ~(x);\ - outb( mask, ioaddr + INT_MASK ); \ -} - -/*---------------------------------------------------------------------- - . Define the interrupts that I want to receive from the card - . - . I want: - . IM_EPH_INT, for nasty errors - . IM_RCV_INT, for happy received packets - . IM_RX_OVRN_INT, because I have to kick the receiver - --------------------------------------------------------------------------*/ -#define SMC_INTERRUPT_MASK (IM_EPH_INT | IM_RX_OVRN_INT | IM_RCV_INT) - -#endif /* _SMC_9194_H_ */ - diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c deleted file mode 100644 index 2b1d254..0000000 --- a/drivers/net/smc91x.c +++ /dev/null @@ -1,2431 +0,0 @@ -/* - * smc91x.c - * This is a driver for SMSC's 91C9x/91C1xx single-chip Ethernet devices. - * - * Copyright (C) 1996 by Erik Stahlman - * Copyright (C) 2001 Standard Microsystems Corporation - * Developed by Simple Network Magic Corporation - * Copyright (C) 2003 Monta Vista Software, Inc. - * Unified SMC91x driver by Nicolas Pitre - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * Arguments: - * io = for the base address - * irq = for the IRQ - * nowait = 0 for normal wait states, 1 eliminates additional wait states - * - * original author: - * Erik Stahlman - * - * hardware multicast code: - * Peter Cammaert - * - * contributors: - * Daris A Nevil - * Nicolas Pitre - * Russell King - * - * History: - * 08/20/00 Arnaldo Melo fix kfree(skb) in smc_hardware_send_packet - * 12/15/00 Christian Jullien fix "Warning: kfree_skb on hard IRQ" - * 03/16/01 Daris A Nevil modified smc9194.c for use with LAN91C111 - * 08/22/01 Scott Anderson merge changes from smc9194 to smc91111 - * 08/21/01 Pramod B Bhardwaj added support for RevB of LAN91C111 - * 12/20/01 Jeff Sutherland initial port to Xscale PXA with DMA support - * 04/07/03 Nicolas Pitre unified SMC91x driver, killed irq races, - * more bus abstraction, big cleanup, etc. - * 29/09/03 Russell King - add driver model support - * - ethtool support - * - convert to use generic MII interface - * - add link up/down notification - * - don't try to handle full negotiation in - * smc_phy_configure - * - clean up (and fix stack overrun) in PHY - * MII read/write functions - * 22/09/04 Nicolas Pitre big update (see commit log for details) - */ -static const char version[] = - "smc91x.c: v1.1, sep 22 2004 by Nicolas Pitre \n"; - -/* Debugging level */ -#ifndef SMC_DEBUG -#define SMC_DEBUG 0 -#endif - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include - -#include "smc91x.h" - -#ifndef SMC_NOWAIT -# define SMC_NOWAIT 0 -#endif -static int nowait = SMC_NOWAIT; -module_param(nowait, int, 0400); -MODULE_PARM_DESC(nowait, "set to 1 for no wait state"); - -/* - * Transmit timeout, default 5 seconds. - */ -static int watchdog = 1000; -module_param(watchdog, int, 0400); -MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds"); - -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:smc91x"); - -/* - * The internal workings of the driver. If you are changing anything - * here with the SMC stuff, you should have the datasheet and know - * what you are doing. - */ -#define CARDNAME "smc91x" - -/* - * Use power-down feature of the chip - */ -#define POWER_DOWN 1 - -/* - * Wait time for memory to be free. This probably shouldn't be - * tuned that much, as waiting for this means nothing else happens - * in the system - */ -#define MEMORY_WAIT_TIME 16 - -/* - * The maximum number of processing loops allowed for each call to the - * IRQ handler. - */ -#define MAX_IRQ_LOOPS 8 - -/* - * This selects whether TX packets are sent one by one to the SMC91x internal - * memory and throttled until transmission completes. This may prevent - * RX overruns a litle by keeping much of the memory free for RX packets - * but to the expense of reduced TX throughput and increased IRQ overhead. - * Note this is not a cure for a too slow data bus or too high IRQ latency. - */ -#define THROTTLE_TX_PKTS 0 - -/* - * The MII clock high/low times. 2x this number gives the MII clock period - * in microseconds. (was 50, but this gives 6.4ms for each MII transaction!) - */ -#define MII_DELAY 1 - -#if SMC_DEBUG > 0 -#define DBG(n, args...) \ - do { \ - if (SMC_DEBUG >= (n)) \ - printk(args); \ - } while (0) - -#define PRINTK(args...) printk(args) -#else -#define DBG(n, args...) do { } while(0) -#define PRINTK(args...) printk(KERN_DEBUG args) -#endif - -#if SMC_DEBUG > 3 -static void PRINT_PKT(u_char *buf, int length) -{ - int i; - int remainder; - int lines; - - lines = length / 16; - remainder = length % 16; - - for (i = 0; i < lines ; i ++) { - int cur; - for (cur = 0; cur < 8; cur++) { - u_char a, b; - a = *buf++; - b = *buf++; - printk("%02x%02x ", a, b); - } - printk("\n"); - } - for (i = 0; i < remainder/2 ; i++) { - u_char a, b; - a = *buf++; - b = *buf++; - printk("%02x%02x ", a, b); - } - printk("\n"); -} -#else -#define PRINT_PKT(x...) do { } while(0) -#endif - - -/* this enables an interrupt in the interrupt mask register */ -#define SMC_ENABLE_INT(lp, x) do { \ - unsigned char mask; \ - unsigned long smc_enable_flags; \ - spin_lock_irqsave(&lp->lock, smc_enable_flags); \ - mask = SMC_GET_INT_MASK(lp); \ - mask |= (x); \ - SMC_SET_INT_MASK(lp, mask); \ - spin_unlock_irqrestore(&lp->lock, smc_enable_flags); \ -} while (0) - -/* this disables an interrupt from the interrupt mask register */ -#define SMC_DISABLE_INT(lp, x) do { \ - unsigned char mask; \ - unsigned long smc_disable_flags; \ - spin_lock_irqsave(&lp->lock, smc_disable_flags); \ - mask = SMC_GET_INT_MASK(lp); \ - mask &= ~(x); \ - SMC_SET_INT_MASK(lp, mask); \ - spin_unlock_irqrestore(&lp->lock, smc_disable_flags); \ -} while (0) - -/* - * Wait while MMU is busy. This is usually in the order of a few nanosecs - * if at all, but let's avoid deadlocking the system if the hardware - * decides to go south. - */ -#define SMC_WAIT_MMU_BUSY(lp) do { \ - if (unlikely(SMC_GET_MMU_CMD(lp) & MC_BUSY)) { \ - unsigned long timeout = jiffies + 2; \ - while (SMC_GET_MMU_CMD(lp) & MC_BUSY) { \ - if (time_after(jiffies, timeout)) { \ - printk("%s: timeout %s line %d\n", \ - dev->name, __FILE__, __LINE__); \ - break; \ - } \ - cpu_relax(); \ - } \ - } \ -} while (0) - - -/* - * this does a soft reset on the device - */ -static void smc_reset(struct net_device *dev) -{ - struct smc_local *lp = netdev_priv(dev); - void __iomem *ioaddr = lp->base; - unsigned int ctl, cfg; - struct sk_buff *pending_skb; - - DBG(2, "%s: %s\n", dev->name, __func__); - - /* Disable all interrupts, block TX tasklet */ - spin_lock_irq(&lp->lock); - SMC_SELECT_BANK(lp, 2); - SMC_SET_INT_MASK(lp, 0); - pending_skb = lp->pending_tx_skb; - lp->pending_tx_skb = NULL; - spin_unlock_irq(&lp->lock); - - /* free any pending tx skb */ - if (pending_skb) { - dev_kfree_skb(pending_skb); - dev->stats.tx_errors++; - dev->stats.tx_aborted_errors++; - } - - /* - * This resets the registers mostly to defaults, but doesn't - * affect EEPROM. That seems unnecessary - */ - SMC_SELECT_BANK(lp, 0); - SMC_SET_RCR(lp, RCR_SOFTRST); - - /* - * Setup the Configuration Register - * This is necessary because the CONFIG_REG is not affected - * by a soft reset - */ - SMC_SELECT_BANK(lp, 1); - - cfg = CONFIG_DEFAULT; - - /* - * Setup for fast accesses if requested. If the card/system - * can't handle it then there will be no recovery except for - * a hard reset or power cycle - */ - if (lp->cfg.flags & SMC91X_NOWAIT) - cfg |= CONFIG_NO_WAIT; - - /* - * Release from possible power-down state - * Configuration register is not affected by Soft Reset - */ - cfg |= CONFIG_EPH_POWER_EN; - - SMC_SET_CONFIG(lp, cfg); - - /* this should pause enough for the chip to be happy */ - /* - * elaborate? What does the chip _need_? --jgarzik - * - * This seems to be undocumented, but something the original - * driver(s) have always done. Suspect undocumented timing - * info/determined empirically. --rmk - */ - udelay(1); - - /* Disable transmit and receive functionality */ - SMC_SELECT_BANK(lp, 0); - SMC_SET_RCR(lp, RCR_CLEAR); - SMC_SET_TCR(lp, TCR_CLEAR); - - SMC_SELECT_BANK(lp, 1); - ctl = SMC_GET_CTL(lp) | CTL_LE_ENABLE; - - /* - * Set the control register to automatically release successfully - * transmitted packets, to make the best use out of our limited - * memory - */ - if(!THROTTLE_TX_PKTS) - ctl |= CTL_AUTO_RELEASE; - else - ctl &= ~CTL_AUTO_RELEASE; - SMC_SET_CTL(lp, ctl); - - /* Reset the MMU */ - SMC_SELECT_BANK(lp, 2); - SMC_SET_MMU_CMD(lp, MC_RESET); - SMC_WAIT_MMU_BUSY(lp); -} - -/* - * Enable Interrupts, Receive, and Transmit - */ -static void smc_enable(struct net_device *dev) -{ - struct smc_local *lp = netdev_priv(dev); - void __iomem *ioaddr = lp->base; - int mask; - - DBG(2, "%s: %s\n", dev->name, __func__); - - /* see the header file for options in TCR/RCR DEFAULT */ - SMC_SELECT_BANK(lp, 0); - SMC_SET_TCR(lp, lp->tcr_cur_mode); - SMC_SET_RCR(lp, lp->rcr_cur_mode); - - SMC_SELECT_BANK(lp, 1); - SMC_SET_MAC_ADDR(lp, dev->dev_addr); - - /* now, enable interrupts */ - mask = IM_EPH_INT|IM_RX_OVRN_INT|IM_RCV_INT; - if (lp->version >= (CHIP_91100 << 4)) - mask |= IM_MDINT; - SMC_SELECT_BANK(lp, 2); - SMC_SET_INT_MASK(lp, mask); - - /* - * From this point the register bank must _NOT_ be switched away - * to something else than bank 2 without proper locking against - * races with any tasklet or interrupt handlers until smc_shutdown() - * or smc_reset() is called. - */ -} - -/* - * this puts the device in an inactive state - */ -static void smc_shutdown(struct net_device *dev) -{ - struct smc_local *lp = netdev_priv(dev); - void __iomem *ioaddr = lp->base; - struct sk_buff *pending_skb; - - DBG(2, "%s: %s\n", CARDNAME, __func__); - - /* no more interrupts for me */ - spin_lock_irq(&lp->lock); - SMC_SELECT_BANK(lp, 2); - SMC_SET_INT_MASK(lp, 0); - pending_skb = lp->pending_tx_skb; - lp->pending_tx_skb = NULL; - spin_unlock_irq(&lp->lock); - if (pending_skb) - dev_kfree_skb(pending_skb); - - /* and tell the card to stay away from that nasty outside world */ - SMC_SELECT_BANK(lp, 0); - SMC_SET_RCR(lp, RCR_CLEAR); - SMC_SET_TCR(lp, TCR_CLEAR); - -#ifdef POWER_DOWN - /* finally, shut the chip down */ - SMC_SELECT_BANK(lp, 1); - SMC_SET_CONFIG(lp, SMC_GET_CONFIG(lp) & ~CONFIG_EPH_POWER_EN); -#endif -} - -/* - * This is the procedure to handle the receipt of a packet. - */ -static inline void smc_rcv(struct net_device *dev) -{ - struct smc_local *lp = netdev_priv(dev); - void __iomem *ioaddr = lp->base; - unsigned int packet_number, status, packet_len; - - DBG(3, "%s: %s\n", dev->name, __func__); - - packet_number = SMC_GET_RXFIFO(lp); - if (unlikely(packet_number & RXFIFO_REMPTY)) { - PRINTK("%s: smc_rcv with nothing on FIFO.\n", dev->name); - return; - } - - /* read from start of packet */ - SMC_SET_PTR(lp, PTR_READ | PTR_RCV | PTR_AUTOINC); - - /* First two words are status and packet length */ - SMC_GET_PKT_HDR(lp, status, packet_len); - packet_len &= 0x07ff; /* mask off top bits */ - DBG(2, "%s: RX PNR 0x%x STATUS 0x%04x LENGTH 0x%04x (%d)\n", - dev->name, packet_number, status, - packet_len, packet_len); - - back: - if (unlikely(packet_len < 6 || status & RS_ERRORS)) { - if (status & RS_TOOLONG && packet_len <= (1514 + 4 + 6)) { - /* accept VLAN packets */ - status &= ~RS_TOOLONG; - goto back; - } - if (packet_len < 6) { - /* bloody hardware */ - printk(KERN_ERR "%s: fubar (rxlen %u status %x\n", - dev->name, packet_len, status); - status |= RS_TOOSHORT; - } - SMC_WAIT_MMU_BUSY(lp); - SMC_SET_MMU_CMD(lp, MC_RELEASE); - dev->stats.rx_errors++; - if (status & RS_ALGNERR) - dev->stats.rx_frame_errors++; - if (status & (RS_TOOSHORT | RS_TOOLONG)) - dev->stats.rx_length_errors++; - if (status & RS_BADCRC) - dev->stats.rx_crc_errors++; - } else { - struct sk_buff *skb; - unsigned char *data; - unsigned int data_len; - - /* set multicast stats */ - if (status & RS_MULTICAST) - dev->stats.multicast++; - - /* - * Actual payload is packet_len - 6 (or 5 if odd byte). - * We want skb_reserve(2) and the final ctrl word - * (2 bytes, possibly containing the payload odd byte). - * Furthermore, we add 2 bytes to allow rounding up to - * multiple of 4 bytes on 32 bit buses. - * Hence packet_len - 6 + 2 + 2 + 2. - */ - skb = dev_alloc_skb(packet_len); - if (unlikely(skb == NULL)) { - printk(KERN_NOTICE "%s: Low memory, packet dropped.\n", - dev->name); - SMC_WAIT_MMU_BUSY(lp); - SMC_SET_MMU_CMD(lp, MC_RELEASE); - dev->stats.rx_dropped++; - return; - } - - /* Align IP header to 32 bits */ - skb_reserve(skb, 2); - - /* BUG: the LAN91C111 rev A never sets this bit. Force it. */ - if (lp->version == 0x90) - status |= RS_ODDFRAME; - - /* - * If odd length: packet_len - 5, - * otherwise packet_len - 6. - * With the trailing ctrl byte it's packet_len - 4. - */ - data_len = packet_len - ((status & RS_ODDFRAME) ? 5 : 6); - data = skb_put(skb, data_len); - SMC_PULL_DATA(lp, data, packet_len - 4); - - SMC_WAIT_MMU_BUSY(lp); - SMC_SET_MMU_CMD(lp, MC_RELEASE); - - PRINT_PKT(data, packet_len - 4); - - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += data_len; - } -} - -#ifdef CONFIG_SMP -/* - * On SMP we have the following problem: - * - * A = smc_hardware_send_pkt() - * B = smc_hard_start_xmit() - * C = smc_interrupt() - * - * A and B can never be executed simultaneously. However, at least on UP, - * it is possible (and even desirable) for C to interrupt execution of - * A or B in order to have better RX reliability and avoid overruns. - * C, just like A and B, must have exclusive access to the chip and - * each of them must lock against any other concurrent access. - * Unfortunately this is not possible to have C suspend execution of A or - * B taking place on another CPU. On UP this is no an issue since A and B - * are run from softirq context and C from hard IRQ context, and there is - * no other CPU where concurrent access can happen. - * If ever there is a way to force at least B and C to always be executed - * on the same CPU then we could use read/write locks to protect against - * any other concurrent access and C would always interrupt B. But life - * isn't that easy in a SMP world... - */ -#define smc_special_trylock(lock, flags) \ -({ \ - int __ret; \ - local_irq_save(flags); \ - __ret = spin_trylock(lock); \ - if (!__ret) \ - local_irq_restore(flags); \ - __ret; \ -}) -#define smc_special_lock(lock, flags) spin_lock_irqsave(lock, flags) -#define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags) -#else -#define smc_special_trylock(lock, flags) (flags == flags) -#define smc_special_lock(lock, flags) do { flags = 0; } while (0) -#define smc_special_unlock(lock, flags) do { flags = 0; } while (0) -#endif - -/* - * This is called to actually send a packet to the chip. - */ -static void smc_hardware_send_pkt(unsigned long data) -{ - struct net_device *dev = (struct net_device *)data; - struct smc_local *lp = netdev_priv(dev); - void __iomem *ioaddr = lp->base; - struct sk_buff *skb; - unsigned int packet_no, len; - unsigned char *buf; - unsigned long flags; - - DBG(3, "%s: %s\n", dev->name, __func__); - - if (!smc_special_trylock(&lp->lock, flags)) { - netif_stop_queue(dev); - tasklet_schedule(&lp->tx_task); - return; - } - - skb = lp->pending_tx_skb; - if (unlikely(!skb)) { - smc_special_unlock(&lp->lock, flags); - return; - } - lp->pending_tx_skb = NULL; - - packet_no = SMC_GET_AR(lp); - if (unlikely(packet_no & AR_FAILED)) { - printk("%s: Memory allocation failed.\n", dev->name); - dev->stats.tx_errors++; - dev->stats.tx_fifo_errors++; - smc_special_unlock(&lp->lock, flags); - goto done; - } - - /* point to the beginning of the packet */ - SMC_SET_PN(lp, packet_no); - SMC_SET_PTR(lp, PTR_AUTOINC); - - buf = skb->data; - len = skb->len; - DBG(2, "%s: TX PNR 0x%x LENGTH 0x%04x (%d) BUF 0x%p\n", - dev->name, packet_no, len, len, buf); - PRINT_PKT(buf, len); - - /* - * Send the packet length (+6 for status words, length, and ctl. - * The card will pad to 64 bytes with zeroes if packet is too small. - */ - SMC_PUT_PKT_HDR(lp, 0, len + 6); - - /* send the actual data */ - SMC_PUSH_DATA(lp, buf, len & ~1); - - /* Send final ctl word with the last byte if there is one */ - SMC_outw(((len & 1) ? (0x2000 | buf[len-1]) : 0), ioaddr, DATA_REG(lp)); - - /* - * If THROTTLE_TX_PKTS is set, we stop the queue here. This will - * have the effect of having at most one packet queued for TX - * in the chip's memory at all time. - * - * If THROTTLE_TX_PKTS is not set then the queue is stopped only - * when memory allocation (MC_ALLOC) does not succeed right away. - */ - if (THROTTLE_TX_PKTS) - netif_stop_queue(dev); - - /* queue the packet for TX */ - SMC_SET_MMU_CMD(lp, MC_ENQUEUE); - smc_special_unlock(&lp->lock, flags); - - dev->trans_start = jiffies; - dev->stats.tx_packets++; - dev->stats.tx_bytes += len; - - SMC_ENABLE_INT(lp, IM_TX_INT | IM_TX_EMPTY_INT); - -done: if (!THROTTLE_TX_PKTS) - netif_wake_queue(dev); - - dev_kfree_skb(skb); -} - -/* - * Since I am not sure if I will have enough room in the chip's ram - * to store the packet, I call this routine which either sends it - * now, or set the card to generates an interrupt when ready - * for the packet. - */ -static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct smc_local *lp = netdev_priv(dev); - void __iomem *ioaddr = lp->base; - unsigned int numPages, poll_count, status; - unsigned long flags; - - DBG(3, "%s: %s\n", dev->name, __func__); - - BUG_ON(lp->pending_tx_skb != NULL); - - /* - * The MMU wants the number of pages to be the number of 256 bytes - * 'pages', minus 1 (since a packet can't ever have 0 pages :)) - * - * The 91C111 ignores the size bits, but earlier models don't. - * - * Pkt size for allocating is data length +6 (for additional status - * words, length and ctl) - * - * If odd size then last byte is included in ctl word. - */ - numPages = ((skb->len & ~1) + (6 - 1)) >> 8; - if (unlikely(numPages > 7)) { - printk("%s: Far too big packet error.\n", dev->name); - dev->stats.tx_errors++; - dev->stats.tx_dropped++; - dev_kfree_skb(skb); - return NETDEV_TX_OK; - } - - smc_special_lock(&lp->lock, flags); - - /* now, try to allocate the memory */ - SMC_SET_MMU_CMD(lp, MC_ALLOC | numPages); - - /* - * Poll the chip for a short amount of time in case the - * allocation succeeds quickly. - */ - poll_count = MEMORY_WAIT_TIME; - do { - status = SMC_GET_INT(lp); - if (status & IM_ALLOC_INT) { - SMC_ACK_INT(lp, IM_ALLOC_INT); - break; - } - } while (--poll_count); - - smc_special_unlock(&lp->lock, flags); - - lp->pending_tx_skb = skb; - if (!poll_count) { - /* oh well, wait until the chip finds memory later */ - netif_stop_queue(dev); - DBG(2, "%s: TX memory allocation deferred.\n", dev->name); - SMC_ENABLE_INT(lp, IM_ALLOC_INT); - } else { - /* - * Allocation succeeded: push packet to the chip's own memory - * immediately. - */ - smc_hardware_send_pkt((unsigned long)dev); - } - - return NETDEV_TX_OK; -} - -/* - * This handles a TX interrupt, which is only called when: - * - a TX error occurred, or - * - CTL_AUTO_RELEASE is not set and TX of a packet completed. - */ -static void smc_tx(struct net_device *dev) -{ - struct smc_local *lp = netdev_priv(dev); - void __iomem *ioaddr = lp->base; - unsigned int saved_packet, packet_no, tx_status, pkt_len; - - DBG(3, "%s: %s\n", dev->name, __func__); - - /* If the TX FIFO is empty then nothing to do */ - packet_no = SMC_GET_TXFIFO(lp); - if (unlikely(packet_no & TXFIFO_TEMPTY)) { - PRINTK("%s: smc_tx with nothing on FIFO.\n", dev->name); - return; - } - - /* select packet to read from */ - saved_packet = SMC_GET_PN(lp); - SMC_SET_PN(lp, packet_no); - - /* read the first word (status word) from this packet */ - SMC_SET_PTR(lp, PTR_AUTOINC | PTR_READ); - SMC_GET_PKT_HDR(lp, tx_status, pkt_len); - DBG(2, "%s: TX STATUS 0x%04x PNR 0x%02x\n", - dev->name, tx_status, packet_no); - - if (!(tx_status & ES_TX_SUC)) - dev->stats.tx_errors++; - - if (tx_status & ES_LOSTCARR) - dev->stats.tx_carrier_errors++; - - if (tx_status & (ES_LATCOL | ES_16COL)) { - PRINTK("%s: %s occurred on last xmit\n", dev->name, - (tx_status & ES_LATCOL) ? - "late collision" : "too many collisions"); - dev->stats.tx_window_errors++; - if (!(dev->stats.tx_window_errors & 63) && net_ratelimit()) { - printk(KERN_INFO "%s: unexpectedly large number of " - "bad collisions. Please check duplex " - "setting.\n", dev->name); - } - } - - /* kill the packet */ - SMC_WAIT_MMU_BUSY(lp); - SMC_SET_MMU_CMD(lp, MC_FREEPKT); - - /* Don't restore Packet Number Reg until busy bit is cleared */ - SMC_WAIT_MMU_BUSY(lp); - SMC_SET_PN(lp, saved_packet); - - /* re-enable transmit */ - SMC_SELECT_BANK(lp, 0); - SMC_SET_TCR(lp, lp->tcr_cur_mode); - SMC_SELECT_BANK(lp, 2); -} - - -/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/ - -static void smc_mii_out(struct net_device *dev, unsigned int val, int bits) -{ - struct smc_local *lp = netdev_priv(dev); - void __iomem *ioaddr = lp->base; - unsigned int mii_reg, mask; - - mii_reg = SMC_GET_MII(lp) & ~(MII_MCLK | MII_MDOE | MII_MDO); - mii_reg |= MII_MDOE; - - for (mask = 1 << (bits - 1); mask; mask >>= 1) { - if (val & mask) - mii_reg |= MII_MDO; - else - mii_reg &= ~MII_MDO; - - SMC_SET_MII(lp, mii_reg); - udelay(MII_DELAY); - SMC_SET_MII(lp, mii_reg | MII_MCLK); - udelay(MII_DELAY); - } -} - -static unsigned int smc_mii_in(struct net_device *dev, int bits) -{ - struct smc_local *lp = netdev_priv(dev); - void __iomem *ioaddr = lp->base; - unsigned int mii_reg, mask, val; - - mii_reg = SMC_GET_MII(lp) & ~(MII_MCLK | MII_MDOE | MII_MDO); - SMC_SET_MII(lp, mii_reg); - - for (mask = 1 << (bits - 1), val = 0; mask; mask >>= 1) { - if (SMC_GET_MII(lp) & MII_MDI) - val |= mask; - - SMC_SET_MII(lp, mii_reg); - udelay(MII_DELAY); - SMC_SET_MII(lp, mii_reg | MII_MCLK); - udelay(MII_DELAY); - } - - return val; -} - -/* - * Reads a register from the MII Management serial interface - */ -static int smc_phy_read(struct net_device *dev, int phyaddr, int phyreg) -{ - struct smc_local *lp = netdev_priv(dev); - void __iomem *ioaddr = lp->base; - unsigned int phydata; - - SMC_SELECT_BANK(lp, 3); - - /* Idle - 32 ones */ - smc_mii_out(dev, 0xffffffff, 32); - - /* Start code (01) + read (10) + phyaddr + phyreg */ - smc_mii_out(dev, 6 << 10 | phyaddr << 5 | phyreg, 14); - - /* Turnaround (2bits) + phydata */ - phydata = smc_mii_in(dev, 18); - - /* Return to idle state */ - SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO)); - - DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", - __func__, phyaddr, phyreg, phydata); - - SMC_SELECT_BANK(lp, 2); - return phydata; -} - -/* - * Writes a register to the MII Management serial interface - */ -static void smc_phy_write(struct net_device *dev, int phyaddr, int phyreg, - int phydata) -{ - struct smc_local *lp = netdev_priv(dev); - void __iomem *ioaddr = lp->base; - - SMC_SELECT_BANK(lp, 3); - - /* Idle - 32 ones */ - smc_mii_out(dev, 0xffffffff, 32); - - /* Start code (01) + write (01) + phyaddr + phyreg + turnaround + phydata */ - smc_mii_out(dev, 5 << 28 | phyaddr << 23 | phyreg << 18 | 2 << 16 | phydata, 32); - - /* Return to idle state */ - SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO)); - - DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", - __func__, phyaddr, phyreg, phydata); - - SMC_SELECT_BANK(lp, 2); -} - -/* - * Finds and reports the PHY address - */ -static void smc_phy_detect(struct net_device *dev) -{ - struct smc_local *lp = netdev_priv(dev); - int phyaddr; - - DBG(2, "%s: %s\n", dev->name, __func__); - - lp->phy_type = 0; - - /* - * Scan all 32 PHY addresses if necessary, starting at - * PHY#1 to PHY#31, and then PHY#0 last. - */ - for (phyaddr = 1; phyaddr < 33; ++phyaddr) { - unsigned int id1, id2; - - /* Read the PHY identifiers */ - id1 = smc_phy_read(dev, phyaddr & 31, MII_PHYSID1); - id2 = smc_phy_read(dev, phyaddr & 31, MII_PHYSID2); - - DBG(3, "%s: phy_id1=0x%x, phy_id2=0x%x\n", - dev->name, id1, id2); - - /* Make sure it is a valid identifier */ - if (id1 != 0x0000 && id1 != 0xffff && id1 != 0x8000 && - id2 != 0x0000 && id2 != 0xffff && id2 != 0x8000) { - /* Save the PHY's address */ - lp->mii.phy_id = phyaddr & 31; - lp->phy_type = id1 << 16 | id2; - break; - } - } -} - -/* - * Sets the PHY to a configuration as determined by the user - */ -static int smc_phy_fixed(struct net_device *dev) -{ - struct smc_local *lp = netdev_priv(dev); - void __iomem *ioaddr = lp->base; - int phyaddr = lp->mii.phy_id; - int bmcr, cfg1; - - DBG(3, "%s: %s\n", dev->name, __func__); - - /* Enter Link Disable state */ - cfg1 = smc_phy_read(dev, phyaddr, PHY_CFG1_REG); - cfg1 |= PHY_CFG1_LNKDIS; - smc_phy_write(dev, phyaddr, PHY_CFG1_REG, cfg1); - - /* - * Set our fixed capabilities - * Disable auto-negotiation - */ - bmcr = 0; - - if (lp->ctl_rfduplx) - bmcr |= BMCR_FULLDPLX; - - if (lp->ctl_rspeed == 100) - bmcr |= BMCR_SPEED100; - - /* Write our capabilities to the phy control register */ - smc_phy_write(dev, phyaddr, MII_BMCR, bmcr); - - /* Re-Configure the Receive/Phy Control register */ - SMC_SELECT_BANK(lp, 0); - SMC_SET_RPC(lp, lp->rpc_cur_mode); - SMC_SELECT_BANK(lp, 2); - - return 1; -} - -/* - * smc_phy_reset - reset the phy - * @dev: net device - * @phy: phy address - * - * Issue a software reset for the specified PHY and - * wait up to 100ms for the reset to complete. We should - * not access the PHY for 50ms after issuing the reset. - * - * The time to wait appears to be dependent on the PHY. - * - * Must be called with lp->lock locked. - */ -static int smc_phy_reset(struct net_device *dev, int phy) -{ - struct smc_local *lp = netdev_priv(dev); - unsigned int bmcr; - int timeout; - - smc_phy_write(dev, phy, MII_BMCR, BMCR_RESET); - - for (timeout = 2; timeout; timeout--) { - spin_unlock_irq(&lp->lock); - msleep(50); - spin_lock_irq(&lp->lock); - - bmcr = smc_phy_read(dev, phy, MII_BMCR); - if (!(bmcr & BMCR_RESET)) - break; - } - - return bmcr & BMCR_RESET; -} - -/* - * smc_phy_powerdown - powerdown phy - * @dev: net device - * - * Power down the specified PHY - */ -static void smc_phy_powerdown(struct net_device *dev) -{ - struct smc_local *lp = netdev_priv(dev); - unsigned int bmcr; - int phy = lp->mii.phy_id; - - if (lp->phy_type == 0) - return; - - /* We need to ensure that no calls to smc_phy_configure are - pending. - */ - cancel_work_sync(&lp->phy_configure); - - bmcr = smc_phy_read(dev, phy, MII_BMCR); - smc_phy_write(dev, phy, MII_BMCR, bmcr | BMCR_PDOWN); -} - -/* - * smc_phy_check_media - check the media status and adjust TCR - * @dev: net device - * @init: set true for initialisation - * - * Select duplex mode depending on negotiation state. This - * also updates our carrier state. - */ -static void smc_phy_check_media(struct net_device *dev, int init) -{ - struct smc_local *lp = netdev_priv(dev); - void __iomem *ioaddr = lp->base; - - if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) { - /* duplex state has changed */ - if (lp->mii.full_duplex) { - lp->tcr_cur_mode |= TCR_SWFDUP; - } else { - lp->tcr_cur_mode &= ~TCR_SWFDUP; - } - - SMC_SELECT_BANK(lp, 0); - SMC_SET_TCR(lp, lp->tcr_cur_mode); - } -} - -/* - * Configures the specified PHY through the MII management interface - * using Autonegotiation. - * Calls smc_phy_fixed() if the user has requested a certain config. - * If RPC ANEG bit is set, the media selection is dependent purely on - * the selection by the MII (either in the MII BMCR reg or the result - * of autonegotiation.) If the RPC ANEG bit is cleared, the selection - * is controlled by the RPC SPEED and RPC DPLX bits. - */ -static void smc_phy_configure(struct work_struct *work) -{ - struct smc_local *lp = - container_of(work, struct smc_local, phy_configure); - struct net_device *dev = lp->dev; - void __iomem *ioaddr = lp->base; - int phyaddr = lp->mii.phy_id; - int my_phy_caps; /* My PHY capabilities */ - int my_ad_caps; /* My Advertised capabilities */ - int status; - - DBG(3, "%s:smc_program_phy()\n", dev->name); - - spin_lock_irq(&lp->lock); - - /* - * We should not be called if phy_type is zero. - */ - if (lp->phy_type == 0) - goto smc_phy_configure_exit; - - if (smc_phy_reset(dev, phyaddr)) { - printk("%s: PHY reset timed out\n", dev->name); - goto smc_phy_configure_exit; - } - - /* - * Enable PHY Interrupts (for register 18) - * Interrupts listed here are disabled - */ - smc_phy_write(dev, phyaddr, PHY_MASK_REG, - PHY_INT_LOSSSYNC | PHY_INT_CWRD | PHY_INT_SSD | - PHY_INT_ESD | PHY_INT_RPOL | PHY_INT_JAB | - PHY_INT_SPDDET | PHY_INT_DPLXDET); - - /* Configure the Receive/Phy Control register */ - SMC_SELECT_BANK(lp, 0); - SMC_SET_RPC(lp, lp->rpc_cur_mode); - - /* If the user requested no auto neg, then go set his request */ - if (lp->mii.force_media) { - smc_phy_fixed(dev); - goto smc_phy_configure_exit; - } - - /* Copy our capabilities from MII_BMSR to MII_ADVERTISE */ - my_phy_caps = smc_phy_read(dev, phyaddr, MII_BMSR); - - if (!(my_phy_caps & BMSR_ANEGCAPABLE)) { - printk(KERN_INFO "Auto negotiation NOT supported\n"); - smc_phy_fixed(dev); - goto smc_phy_configure_exit; - } - - my_ad_caps = ADVERTISE_CSMA; /* I am CSMA capable */ - - if (my_phy_caps & BMSR_100BASE4) - my_ad_caps |= ADVERTISE_100BASE4; - if (my_phy_caps & BMSR_100FULL) - my_ad_caps |= ADVERTISE_100FULL; - if (my_phy_caps & BMSR_100HALF) - my_ad_caps |= ADVERTISE_100HALF; - if (my_phy_caps & BMSR_10FULL) - my_ad_caps |= ADVERTISE_10FULL; - if (my_phy_caps & BMSR_10HALF) - my_ad_caps |= ADVERTISE_10HALF; - - /* Disable capabilities not selected by our user */ - if (lp->ctl_rspeed != 100) - my_ad_caps &= ~(ADVERTISE_100BASE4|ADVERTISE_100FULL|ADVERTISE_100HALF); - - if (!lp->ctl_rfduplx) - my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL); - - /* Update our Auto-Neg Advertisement Register */ - smc_phy_write(dev, phyaddr, MII_ADVERTISE, my_ad_caps); - lp->mii.advertising = my_ad_caps; - - /* - * Read the register back. Without this, it appears that when - * auto-negotiation is restarted, sometimes it isn't ready and - * the link does not come up. - */ - status = smc_phy_read(dev, phyaddr, MII_ADVERTISE); - - DBG(2, "%s: phy caps=%x\n", dev->name, my_phy_caps); - DBG(2, "%s: phy advertised caps=%x\n", dev->name, my_ad_caps); - - /* Restart auto-negotiation process in order to advertise my caps */ - smc_phy_write(dev, phyaddr, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART); - - smc_phy_check_media(dev, 1); - -smc_phy_configure_exit: - SMC_SELECT_BANK(lp, 2); - spin_unlock_irq(&lp->lock); -} - -/* - * smc_phy_interrupt - * - * Purpose: Handle interrupts relating to PHY register 18. This is - * called from the "hard" interrupt handler under our private spinlock. - */ -static void smc_phy_interrupt(struct net_device *dev) -{ - struct smc_local *lp = netdev_priv(dev); - int phyaddr = lp->mii.phy_id; - int phy18; - - DBG(2, "%s: %s\n", dev->name, __func__); - - if (lp->phy_type == 0) - return; - - for(;;) { - smc_phy_check_media(dev, 0); - - /* Read PHY Register 18, Status Output */ - phy18 = smc_phy_read(dev, phyaddr, PHY_INT_REG); - if ((phy18 & PHY_INT_INT) == 0) - break; - } -} - -/*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/ - -static void smc_10bt_check_media(struct net_device *dev, int init) -{ - struct smc_local *lp = netdev_priv(dev); - void __iomem *ioaddr = lp->base; - unsigned int old_carrier, new_carrier; - - old_carrier = netif_carrier_ok(dev) ? 1 : 0; - - SMC_SELECT_BANK(lp, 0); - new_carrier = (SMC_GET_EPH_STATUS(lp) & ES_LINK_OK) ? 1 : 0; - SMC_SELECT_BANK(lp, 2); - - if (init || (old_carrier != new_carrier)) { - if (!new_carrier) { - netif_carrier_off(dev); - } else { - netif_carrier_on(dev); - } - if (netif_msg_link(lp)) - printk(KERN_INFO "%s: link %s\n", dev->name, - new_carrier ? "up" : "down"); - } -} - -static void smc_eph_interrupt(struct net_device *dev) -{ - struct smc_local *lp = netdev_priv(dev); - void __iomem *ioaddr = lp->base; - unsigned int ctl; - - smc_10bt_check_media(dev, 0); - - SMC_SELECT_BANK(lp, 1); - ctl = SMC_GET_CTL(lp); - SMC_SET_CTL(lp, ctl & ~CTL_LE_ENABLE); - SMC_SET_CTL(lp, ctl); - SMC_SELECT_BANK(lp, 2); -} - -/* - * This is the main routine of the driver, to handle the device when - * it needs some attention. - */ -static irqreturn_t smc_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct smc_local *lp = netdev_priv(dev); - void __iomem *ioaddr = lp->base; - int status, mask, timeout, card_stats; - int saved_pointer; - - DBG(3, "%s: %s\n", dev->name, __func__); - - spin_lock(&lp->lock); - - /* A preamble may be used when there is a potential race - * between the interruptible transmit functions and this - * ISR. */ - SMC_INTERRUPT_PREAMBLE; - - saved_pointer = SMC_GET_PTR(lp); - mask = SMC_GET_INT_MASK(lp); - SMC_SET_INT_MASK(lp, 0); - - /* set a timeout value, so I don't stay here forever */ - timeout = MAX_IRQ_LOOPS; - - do { - status = SMC_GET_INT(lp); - - DBG(2, "%s: INT 0x%02x MASK 0x%02x MEM 0x%04x FIFO 0x%04x\n", - dev->name, status, mask, - ({ int meminfo; SMC_SELECT_BANK(lp, 0); - meminfo = SMC_GET_MIR(lp); - SMC_SELECT_BANK(lp, 2); meminfo; }), - SMC_GET_FIFO(lp)); - - status &= mask; - if (!status) - break; - - if (status & IM_TX_INT) { - /* do this before RX as it will free memory quickly */ - DBG(3, "%s: TX int\n", dev->name); - smc_tx(dev); - SMC_ACK_INT(lp, IM_TX_INT); - if (THROTTLE_TX_PKTS) - netif_wake_queue(dev); - } else if (status & IM_RCV_INT) { - DBG(3, "%s: RX irq\n", dev->name); - smc_rcv(dev); - } else if (status & IM_ALLOC_INT) { - DBG(3, "%s: Allocation irq\n", dev->name); - tasklet_hi_schedule(&lp->tx_task); - mask &= ~IM_ALLOC_INT; - } else if (status & IM_TX_EMPTY_INT) { - DBG(3, "%s: TX empty\n", dev->name); - mask &= ~IM_TX_EMPTY_INT; - - /* update stats */ - SMC_SELECT_BANK(lp, 0); - card_stats = SMC_GET_COUNTER(lp); - SMC_SELECT_BANK(lp, 2); - - /* single collisions */ - dev->stats.collisions += card_stats & 0xF; - card_stats >>= 4; - - /* multiple collisions */ - dev->stats.collisions += card_stats & 0xF; - } else if (status & IM_RX_OVRN_INT) { - DBG(1, "%s: RX overrun (EPH_ST 0x%04x)\n", dev->name, - ({ int eph_st; SMC_SELECT_BANK(lp, 0); - eph_st = SMC_GET_EPH_STATUS(lp); - SMC_SELECT_BANK(lp, 2); eph_st; })); - SMC_ACK_INT(lp, IM_RX_OVRN_INT); - dev->stats.rx_errors++; - dev->stats.rx_fifo_errors++; - } else if (status & IM_EPH_INT) { - smc_eph_interrupt(dev); - } else if (status & IM_MDINT) { - SMC_ACK_INT(lp, IM_MDINT); - smc_phy_interrupt(dev); - } else if (status & IM_ERCV_INT) { - SMC_ACK_INT(lp, IM_ERCV_INT); - PRINTK("%s: UNSUPPORTED: ERCV INTERRUPT\n", dev->name); - } - } while (--timeout); - - /* restore register states */ - SMC_SET_PTR(lp, saved_pointer); - SMC_SET_INT_MASK(lp, mask); - spin_unlock(&lp->lock); - -#ifndef CONFIG_NET_POLL_CONTROLLER - if (timeout == MAX_IRQ_LOOPS) - PRINTK("%s: spurious interrupt (mask = 0x%02x)\n", - dev->name, mask); -#endif - DBG(3, "%s: Interrupt done (%d loops)\n", - dev->name, MAX_IRQ_LOOPS - timeout); - - /* - * We return IRQ_HANDLED unconditionally here even if there was - * nothing to do. There is a possibility that a packet might - * get enqueued into the chip right after TX_EMPTY_INT is raised - * but just before the CPU acknowledges the IRQ. - * Better take an unneeded IRQ in some occasions than complexifying - * the code for all cases. - */ - return IRQ_HANDLED; -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -/* - * Polling receive - used by netconsole and other diagnostic tools - * to allow network i/o with interrupts disabled. - */ -static void smc_poll_controller(struct net_device *dev) -{ - disable_irq(dev->irq); - smc_interrupt(dev->irq, dev); - enable_irq(dev->irq); -} -#endif - -/* Our watchdog timed out. Called by the networking layer */ -static void smc_timeout(struct net_device *dev) -{ - struct smc_local *lp = netdev_priv(dev); - void __iomem *ioaddr = lp->base; - int status, mask, eph_st, meminfo, fifo; - - DBG(2, "%s: %s\n", dev->name, __func__); - - spin_lock_irq(&lp->lock); - status = SMC_GET_INT(lp); - mask = SMC_GET_INT_MASK(lp); - fifo = SMC_GET_FIFO(lp); - SMC_SELECT_BANK(lp, 0); - eph_st = SMC_GET_EPH_STATUS(lp); - meminfo = SMC_GET_MIR(lp); - SMC_SELECT_BANK(lp, 2); - spin_unlock_irq(&lp->lock); - PRINTK( "%s: TX timeout (INT 0x%02x INTMASK 0x%02x " - "MEM 0x%04x FIFO 0x%04x EPH_ST 0x%04x)\n", - dev->name, status, mask, meminfo, fifo, eph_st ); - - smc_reset(dev); - smc_enable(dev); - - /* - * Reconfiguring the PHY doesn't seem like a bad idea here, but - * smc_phy_configure() calls msleep() which calls schedule_timeout() - * which calls schedule(). Hence we use a work queue. - */ - if (lp->phy_type != 0) - schedule_work(&lp->phy_configure); - - /* We can accept TX packets again */ - dev->trans_start = jiffies; /* prevent tx timeout */ - netif_wake_queue(dev); -} - -/* - * This routine will, depending on the values passed to it, - * either make it accept multicast packets, go into - * promiscuous mode (for TCPDUMP and cousins) or accept - * a select set of multicast packets - */ -static void smc_set_multicast_list(struct net_device *dev) -{ - struct smc_local *lp = netdev_priv(dev); - void __iomem *ioaddr = lp->base; - unsigned char multicast_table[8]; - int update_multicast = 0; - - DBG(2, "%s: %s\n", dev->name, __func__); - - if (dev->flags & IFF_PROMISC) { - DBG(2, "%s: RCR_PRMS\n", dev->name); - lp->rcr_cur_mode |= RCR_PRMS; - } - -/* BUG? I never disable promiscuous mode if multicasting was turned on. - Now, I turn off promiscuous mode, but I don't do anything to multicasting - when promiscuous mode is turned on. -*/ - - /* - * Here, I am setting this to accept all multicast packets. - * I don't need to zero the multicast table, because the flag is - * checked before the table is - */ - else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) { - DBG(2, "%s: RCR_ALMUL\n", dev->name); - lp->rcr_cur_mode |= RCR_ALMUL; - } - - /* - * This sets the internal hardware table to filter out unwanted - * multicast packets before they take up memory. - * - * The SMC chip uses a hash table where the high 6 bits of the CRC of - * address are the offset into the table. If that bit is 1, then the - * multicast packet is accepted. Otherwise, it's dropped silently. - * - * To use the 6 bits as an offset into the table, the high 3 bits are - * the number of the 8 bit register, while the low 3 bits are the bit - * within that register. - */ - else if (!netdev_mc_empty(dev)) { - struct netdev_hw_addr *ha; - - /* table for flipping the order of 3 bits */ - static const unsigned char invert3[] = {0, 4, 2, 6, 1, 5, 3, 7}; - - /* start with a table of all zeros: reject all */ - memset(multicast_table, 0, sizeof(multicast_table)); - - netdev_for_each_mc_addr(ha, dev) { - int position; - - /* only use the low order bits */ - position = crc32_le(~0, ha->addr, 6) & 0x3f; - - /* do some messy swapping to put the bit in the right spot */ - multicast_table[invert3[position&7]] |= - (1<>3)&7]); - } - - /* be sure I get rid of flags I might have set */ - lp->rcr_cur_mode &= ~(RCR_PRMS | RCR_ALMUL); - - /* now, the table can be loaded into the chipset */ - update_multicast = 1; - } else { - DBG(2, "%s: ~(RCR_PRMS|RCR_ALMUL)\n", dev->name); - lp->rcr_cur_mode &= ~(RCR_PRMS | RCR_ALMUL); - - /* - * since I'm disabling all multicast entirely, I need to - * clear the multicast list - */ - memset(multicast_table, 0, sizeof(multicast_table)); - update_multicast = 1; - } - - spin_lock_irq(&lp->lock); - SMC_SELECT_BANK(lp, 0); - SMC_SET_RCR(lp, lp->rcr_cur_mode); - if (update_multicast) { - SMC_SELECT_BANK(lp, 3); - SMC_SET_MCAST(lp, multicast_table); - } - SMC_SELECT_BANK(lp, 2); - spin_unlock_irq(&lp->lock); -} - - -/* - * Open and Initialize the board - * - * Set up everything, reset the card, etc.. - */ -static int -smc_open(struct net_device *dev) -{ - struct smc_local *lp = netdev_priv(dev); - - DBG(2, "%s: %s\n", dev->name, __func__); - - /* - * Check that the address is valid. If its not, refuse - * to bring the device up. The user must specify an - * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx - */ - if (!is_valid_ether_addr(dev->dev_addr)) { - PRINTK("%s: no valid ethernet hw addr\n", __func__); - return -EINVAL; - } - - /* Setup the default Register Modes */ - lp->tcr_cur_mode = TCR_DEFAULT; - lp->rcr_cur_mode = RCR_DEFAULT; - lp->rpc_cur_mode = RPC_DEFAULT | - lp->cfg.leda << RPC_LSXA_SHFT | - lp->cfg.ledb << RPC_LSXB_SHFT; - - /* - * If we are not using a MII interface, we need to - * monitor our own carrier signal to detect faults. - */ - if (lp->phy_type == 0) - lp->tcr_cur_mode |= TCR_MON_CSN; - - /* reset the hardware */ - smc_reset(dev); - smc_enable(dev); - - /* Configure the PHY, initialize the link state */ - if (lp->phy_type != 0) - smc_phy_configure(&lp->phy_configure); - else { - spin_lock_irq(&lp->lock); - smc_10bt_check_media(dev, 1); - spin_unlock_irq(&lp->lock); - } - - netif_start_queue(dev); - return 0; -} - -/* - * smc_close - * - * this makes the board clean up everything that it can - * and not talk to the outside world. Caused by - * an 'ifconfig ethX down' - */ -static int smc_close(struct net_device *dev) -{ - struct smc_local *lp = netdev_priv(dev); - - DBG(2, "%s: %s\n", dev->name, __func__); - - netif_stop_queue(dev); - netif_carrier_off(dev); - - /* clear everything */ - smc_shutdown(dev); - tasklet_kill(&lp->tx_task); - smc_phy_powerdown(dev); - return 0; -} - -/* - * Ethtool support - */ -static int -smc_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct smc_local *lp = netdev_priv(dev); - int ret; - - cmd->maxtxpkt = 1; - cmd->maxrxpkt = 1; - - if (lp->phy_type != 0) { - spin_lock_irq(&lp->lock); - ret = mii_ethtool_gset(&lp->mii, cmd); - spin_unlock_irq(&lp->lock); - } else { - cmd->supported = SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_TP | SUPPORTED_AUI; - - if (lp->ctl_rspeed == 10) - ethtool_cmd_speed_set(cmd, SPEED_10); - else if (lp->ctl_rspeed == 100) - ethtool_cmd_speed_set(cmd, SPEED_100); - - cmd->autoneg = AUTONEG_DISABLE; - cmd->transceiver = XCVR_INTERNAL; - cmd->port = 0; - cmd->duplex = lp->tcr_cur_mode & TCR_SWFDUP ? DUPLEX_FULL : DUPLEX_HALF; - - ret = 0; - } - - return ret; -} - -static int -smc_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct smc_local *lp = netdev_priv(dev); - int ret; - - if (lp->phy_type != 0) { - spin_lock_irq(&lp->lock); - ret = mii_ethtool_sset(&lp->mii, cmd); - spin_unlock_irq(&lp->lock); - } else { - if (cmd->autoneg != AUTONEG_DISABLE || - cmd->speed != SPEED_10 || - (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) || - (cmd->port != PORT_TP && cmd->port != PORT_AUI)) - return -EINVAL; - -// lp->port = cmd->port; - lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL; - -// if (netif_running(dev)) -// smc_set_port(dev); - - ret = 0; - } - - return ret; -} - -static void -smc_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info) -{ - strncpy(info->driver, CARDNAME, sizeof(info->driver)); - strncpy(info->version, version, sizeof(info->version)); - strncpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info)); -} - -static int smc_ethtool_nwayreset(struct net_device *dev) -{ - struct smc_local *lp = netdev_priv(dev); - int ret = -EINVAL; - - if (lp->phy_type != 0) { - spin_lock_irq(&lp->lock); - ret = mii_nway_restart(&lp->mii); - spin_unlock_irq(&lp->lock); - } - - return ret; -} - -static u32 smc_ethtool_getmsglevel(struct net_device *dev) -{ - struct smc_local *lp = netdev_priv(dev); - return lp->msg_enable; -} - -static void smc_ethtool_setmsglevel(struct net_device *dev, u32 level) -{ - struct smc_local *lp = netdev_priv(dev); - lp->msg_enable = level; -} - -static int smc_write_eeprom_word(struct net_device *dev, u16 addr, u16 word) -{ - u16 ctl; - struct smc_local *lp = netdev_priv(dev); - void __iomem *ioaddr = lp->base; - - spin_lock_irq(&lp->lock); - /* load word into GP register */ - SMC_SELECT_BANK(lp, 1); - SMC_SET_GP(lp, word); - /* set the address to put the data in EEPROM */ - SMC_SELECT_BANK(lp, 2); - SMC_SET_PTR(lp, addr); - /* tell it to write */ - SMC_SELECT_BANK(lp, 1); - ctl = SMC_GET_CTL(lp); - SMC_SET_CTL(lp, ctl | (CTL_EEPROM_SELECT | CTL_STORE)); - /* wait for it to finish */ - do { - udelay(1); - } while (SMC_GET_CTL(lp) & CTL_STORE); - /* clean up */ - SMC_SET_CTL(lp, ctl); - SMC_SELECT_BANK(lp, 2); - spin_unlock_irq(&lp->lock); - return 0; -} - -static int smc_read_eeprom_word(struct net_device *dev, u16 addr, u16 *word) -{ - u16 ctl; - struct smc_local *lp = netdev_priv(dev); - void __iomem *ioaddr = lp->base; - - spin_lock_irq(&lp->lock); - /* set the EEPROM address to get the data from */ - SMC_SELECT_BANK(lp, 2); - SMC_SET_PTR(lp, addr | PTR_READ); - /* tell it to load */ - SMC_SELECT_BANK(lp, 1); - SMC_SET_GP(lp, 0xffff); /* init to known */ - ctl = SMC_GET_CTL(lp); - SMC_SET_CTL(lp, ctl | (CTL_EEPROM_SELECT | CTL_RELOAD)); - /* wait for it to finish */ - do { - udelay(1); - } while (SMC_GET_CTL(lp) & CTL_RELOAD); - /* read word from GP register */ - *word = SMC_GET_GP(lp); - /* clean up */ - SMC_SET_CTL(lp, ctl); - SMC_SELECT_BANK(lp, 2); - spin_unlock_irq(&lp->lock); - return 0; -} - -static int smc_ethtool_geteeprom_len(struct net_device *dev) -{ - return 0x23 * 2; -} - -static int smc_ethtool_geteeprom(struct net_device *dev, - struct ethtool_eeprom *eeprom, u8 *data) -{ - int i; - int imax; - - DBG(1, "Reading %d bytes at %d(0x%x)\n", - eeprom->len, eeprom->offset, eeprom->offset); - imax = smc_ethtool_geteeprom_len(dev); - for (i = 0; i < eeprom->len; i += 2) { - int ret; - u16 wbuf; - int offset = i + eeprom->offset; - if (offset > imax) - break; - ret = smc_read_eeprom_word(dev, offset >> 1, &wbuf); - if (ret != 0) - return ret; - DBG(2, "Read 0x%x from 0x%x\n", wbuf, offset >> 1); - data[i] = (wbuf >> 8) & 0xff; - data[i+1] = wbuf & 0xff; - } - return 0; -} - -static int smc_ethtool_seteeprom(struct net_device *dev, - struct ethtool_eeprom *eeprom, u8 *data) -{ - int i; - int imax; - - DBG(1, "Writing %d bytes to %d(0x%x)\n", - eeprom->len, eeprom->offset, eeprom->offset); - imax = smc_ethtool_geteeprom_len(dev); - for (i = 0; i < eeprom->len; i += 2) { - int ret; - u16 wbuf; - int offset = i + eeprom->offset; - if (offset > imax) - break; - wbuf = (data[i] << 8) | data[i + 1]; - DBG(2, "Writing 0x%x to 0x%x\n", wbuf, offset >> 1); - ret = smc_write_eeprom_word(dev, offset >> 1, wbuf); - if (ret != 0) - return ret; - } - return 0; -} - - -static const struct ethtool_ops smc_ethtool_ops = { - .get_settings = smc_ethtool_getsettings, - .set_settings = smc_ethtool_setsettings, - .get_drvinfo = smc_ethtool_getdrvinfo, - - .get_msglevel = smc_ethtool_getmsglevel, - .set_msglevel = smc_ethtool_setmsglevel, - .nway_reset = smc_ethtool_nwayreset, - .get_link = ethtool_op_get_link, - .get_eeprom_len = smc_ethtool_geteeprom_len, - .get_eeprom = smc_ethtool_geteeprom, - .set_eeprom = smc_ethtool_seteeprom, -}; - -static const struct net_device_ops smc_netdev_ops = { - .ndo_open = smc_open, - .ndo_stop = smc_close, - .ndo_start_xmit = smc_hard_start_xmit, - .ndo_tx_timeout = smc_timeout, - .ndo_set_multicast_list = smc_set_multicast_list, - .ndo_change_mtu = eth_change_mtu, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = smc_poll_controller, -#endif -}; - -/* - * smc_findirq - * - * This routine has a simple purpose -- make the SMC chip generate an - * interrupt, so an auto-detect routine can detect it, and find the IRQ, - */ -/* - * does this still work? - * - * I just deleted auto_irq.c, since it was never built... - * --jgarzik - */ -static int __devinit smc_findirq(struct smc_local *lp) -{ - void __iomem *ioaddr = lp->base; - int timeout = 20; - unsigned long cookie; - - DBG(2, "%s: %s\n", CARDNAME, __func__); - - cookie = probe_irq_on(); - - /* - * What I try to do here is trigger an ALLOC_INT. This is done - * by allocating a small chunk of memory, which will give an interrupt - * when done. - */ - /* enable ALLOCation interrupts ONLY */ - SMC_SELECT_BANK(lp, 2); - SMC_SET_INT_MASK(lp, IM_ALLOC_INT); - - /* - * Allocate 512 bytes of memory. Note that the chip was just - * reset so all the memory is available - */ - SMC_SET_MMU_CMD(lp, MC_ALLOC | 1); - - /* - * Wait until positive that the interrupt has been generated - */ - do { - int int_status; - udelay(10); - int_status = SMC_GET_INT(lp); - if (int_status & IM_ALLOC_INT) - break; /* got the interrupt */ - } while (--timeout); - - /* - * there is really nothing that I can do here if timeout fails, - * as autoirq_report will return a 0 anyway, which is what I - * want in this case. Plus, the clean up is needed in both - * cases. - */ - - /* and disable all interrupts again */ - SMC_SET_INT_MASK(lp, 0); - - /* and return what I found */ - return probe_irq_off(cookie); -} - -/* - * Function: smc_probe(unsigned long ioaddr) - * - * Purpose: - * Tests to see if a given ioaddr points to an SMC91x chip. - * Returns a 0 on success - * - * Algorithm: - * (1) see if the high byte of BANK_SELECT is 0x33 - * (2) compare the ioaddr with the base register's address - * (3) see if I recognize the chip ID in the appropriate register - * - * Here I do typical initialization tasks. - * - * o Initialize the structure if needed - * o print out my vanity message if not done so already - * o print out what type of hardware is detected - * o print out the ethernet address - * o find the IRQ - * o set up my private data - * o configure the dev structure with my subroutines - * o actually GRAB the irq. - * o GRAB the region - */ -static int __devinit smc_probe(struct net_device *dev, void __iomem *ioaddr, - unsigned long irq_flags) -{ - struct smc_local *lp = netdev_priv(dev); - static int version_printed = 0; - int retval; - unsigned int val, revision_register; - const char *version_string; - - DBG(2, "%s: %s\n", CARDNAME, __func__); - - /* First, see if the high byte is 0x33 */ - val = SMC_CURRENT_BANK(lp); - DBG(2, "%s: bank signature probe returned 0x%04x\n", CARDNAME, val); - if ((val & 0xFF00) != 0x3300) { - if ((val & 0xFF) == 0x33) { - printk(KERN_WARNING - "%s: Detected possible byte-swapped interface" - " at IOADDR %p\n", CARDNAME, ioaddr); - } - retval = -ENODEV; - goto err_out; - } - - /* - * The above MIGHT indicate a device, but I need to write to - * further test this. - */ - SMC_SELECT_BANK(lp, 0); - val = SMC_CURRENT_BANK(lp); - if ((val & 0xFF00) != 0x3300) { - retval = -ENODEV; - goto err_out; - } - - /* - * well, we've already written once, so hopefully another - * time won't hurt. This time, I need to switch the bank - * register to bank 1, so I can access the base address - * register - */ - SMC_SELECT_BANK(lp, 1); - val = SMC_GET_BASE(lp); - val = ((val & 0x1F00) >> 3) << SMC_IO_SHIFT; - if (((unsigned int)ioaddr & (0x3e0 << SMC_IO_SHIFT)) != val) { - printk("%s: IOADDR %p doesn't match configuration (%x).\n", - CARDNAME, ioaddr, val); - } - - /* - * check if the revision register is something that I - * recognize. These might need to be added to later, - * as future revisions could be added. - */ - SMC_SELECT_BANK(lp, 3); - revision_register = SMC_GET_REV(lp); - DBG(2, "%s: revision = 0x%04x\n", CARDNAME, revision_register); - version_string = chip_ids[ (revision_register >> 4) & 0xF]; - if (!version_string || (revision_register & 0xff00) != 0x3300) { - /* I don't recognize this chip, so... */ - printk("%s: IO %p: Unrecognized revision register 0x%04x" - ", Contact author.\n", CARDNAME, - ioaddr, revision_register); - - retval = -ENODEV; - goto err_out; - } - - /* At this point I'll assume that the chip is an SMC91x. */ - if (version_printed++ == 0) - printk("%s", version); - - /* fill in some of the fields */ - dev->base_addr = (unsigned long)ioaddr; - lp->base = ioaddr; - lp->version = revision_register & 0xff; - spin_lock_init(&lp->lock); - - /* Get the MAC address */ - SMC_SELECT_BANK(lp, 1); - SMC_GET_MAC_ADDR(lp, dev->dev_addr); - - /* now, reset the chip, and put it into a known state */ - smc_reset(dev); - - /* - * If dev->irq is 0, then the device has to be banged on to see - * what the IRQ is. - * - * This banging doesn't always detect the IRQ, for unknown reasons. - * a workaround is to reset the chip and try again. - * - * Interestingly, the DOS packet driver *SETS* the IRQ on the card to - * be what is requested on the command line. I don't do that, mostly - * because the card that I have uses a non-standard method of accessing - * the IRQs, and because this _should_ work in most configurations. - * - * Specifying an IRQ is done with the assumption that the user knows - * what (s)he is doing. No checking is done!!!! - */ - if (dev->irq < 1) { - int trials; - - trials = 3; - while (trials--) { - dev->irq = smc_findirq(lp); - if (dev->irq) - break; - /* kick the card and try again */ - smc_reset(dev); - } - } - if (dev->irq == 0) { - printk("%s: Couldn't autodetect your IRQ. Use irq=xx.\n", - dev->name); - retval = -ENODEV; - goto err_out; - } - dev->irq = irq_canonicalize(dev->irq); - - /* Fill in the fields of the device structure with ethernet values. */ - ether_setup(dev); - - dev->watchdog_timeo = msecs_to_jiffies(watchdog); - dev->netdev_ops = &smc_netdev_ops; - dev->ethtool_ops = &smc_ethtool_ops; - - tasklet_init(&lp->tx_task, smc_hardware_send_pkt, (unsigned long)dev); - INIT_WORK(&lp->phy_configure, smc_phy_configure); - lp->dev = dev; - lp->mii.phy_id_mask = 0x1f; - lp->mii.reg_num_mask = 0x1f; - lp->mii.force_media = 0; - lp->mii.full_duplex = 0; - lp->mii.dev = dev; - lp->mii.mdio_read = smc_phy_read; - lp->mii.mdio_write = smc_phy_write; - - /* - * Locate the phy, if any. - */ - if (lp->version >= (CHIP_91100 << 4)) - smc_phy_detect(dev); - - /* then shut everything down to save power */ - smc_shutdown(dev); - smc_phy_powerdown(dev); - - /* Set default parameters */ - lp->msg_enable = NETIF_MSG_LINK; - lp->ctl_rfduplx = 0; - lp->ctl_rspeed = 10; - - if (lp->version >= (CHIP_91100 << 4)) { - lp->ctl_rfduplx = 1; - lp->ctl_rspeed = 100; - } - - /* Grab the IRQ */ - retval = request_irq(dev->irq, smc_interrupt, irq_flags, dev->name, dev); - if (retval) - goto err_out; - -#ifdef CONFIG_ARCH_PXA -# ifdef SMC_USE_PXA_DMA - lp->cfg.flags |= SMC91X_USE_DMA; -# endif - if (lp->cfg.flags & SMC91X_USE_DMA) { - int dma = pxa_request_dma(dev->name, DMA_PRIO_LOW, - smc_pxa_dma_irq, NULL); - if (dma >= 0) - dev->dma = dma; - } -#endif - - retval = register_netdev(dev); - if (retval == 0) { - /* now, print out the card info, in a short format.. */ - printk("%s: %s (rev %d) at %p IRQ %d", - dev->name, version_string, revision_register & 0x0f, - lp->base, dev->irq); - - if (dev->dma != (unsigned char)-1) - printk(" DMA %d", dev->dma); - - printk("%s%s\n", - lp->cfg.flags & SMC91X_NOWAIT ? " [nowait]" : "", - THROTTLE_TX_PKTS ? " [throttle_tx]" : ""); - - if (!is_valid_ether_addr(dev->dev_addr)) { - printk("%s: Invalid ethernet MAC address. Please " - "set using ifconfig\n", dev->name); - } else { - /* Print the Ethernet address */ - printk("%s: Ethernet addr: %pM\n", - dev->name, dev->dev_addr); - } - - if (lp->phy_type == 0) { - PRINTK("%s: No PHY found\n", dev->name); - } else if ((lp->phy_type & 0xfffffff0) == 0x0016f840) { - PRINTK("%s: PHY LAN83C183 (LAN91C111 Internal)\n", dev->name); - } else if ((lp->phy_type & 0xfffffff0) == 0x02821c50) { - PRINTK("%s: PHY LAN83C180\n", dev->name); - } - } - -err_out: -#ifdef CONFIG_ARCH_PXA - if (retval && dev->dma != (unsigned char)-1) - pxa_free_dma(dev->dma); -#endif - return retval; -} - -static int smc_enable_device(struct platform_device *pdev) -{ - struct net_device *ndev = platform_get_drvdata(pdev); - struct smc_local *lp = netdev_priv(ndev); - unsigned long flags; - unsigned char ecor, ecsr; - void __iomem *addr; - struct resource * res; - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib"); - if (!res) - return 0; - - /* - * Map the attribute space. This is overkill, but clean. - */ - addr = ioremap(res->start, ATTRIB_SIZE); - if (!addr) - return -ENOMEM; - - /* - * Reset the device. We must disable IRQs around this - * since a reset causes the IRQ line become active. - */ - local_irq_save(flags); - ecor = readb(addr + (ECOR << SMC_IO_SHIFT)) & ~ECOR_RESET; - writeb(ecor | ECOR_RESET, addr + (ECOR << SMC_IO_SHIFT)); - readb(addr + (ECOR << SMC_IO_SHIFT)); - - /* - * Wait 100us for the chip to reset. - */ - udelay(100); - - /* - * The device will ignore all writes to the enable bit while - * reset is asserted, even if the reset bit is cleared in the - * same write. Must clear reset first, then enable the device. - */ - writeb(ecor, addr + (ECOR << SMC_IO_SHIFT)); - writeb(ecor | ECOR_ENABLE, addr + (ECOR << SMC_IO_SHIFT)); - - /* - * Set the appropriate byte/word mode. - */ - ecsr = readb(addr + (ECSR << SMC_IO_SHIFT)) & ~ECSR_IOIS8; - if (!SMC_16BIT(lp)) - ecsr |= ECSR_IOIS8; - writeb(ecsr, addr + (ECSR << SMC_IO_SHIFT)); - local_irq_restore(flags); - - iounmap(addr); - - /* - * Wait for the chip to wake up. We could poll the control - * register in the main register space, but that isn't mapped - * yet. We know this is going to take 750us. - */ - msleep(1); - - return 0; -} - -static int smc_request_attrib(struct platform_device *pdev, - struct net_device *ndev) -{ - struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib"); - struct smc_local *lp __maybe_unused = netdev_priv(ndev); - - if (!res) - return 0; - - if (!request_mem_region(res->start, ATTRIB_SIZE, CARDNAME)) - return -EBUSY; - - return 0; -} - -static void smc_release_attrib(struct platform_device *pdev, - struct net_device *ndev) -{ - struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib"); - struct smc_local *lp __maybe_unused = netdev_priv(ndev); - - if (res) - release_mem_region(res->start, ATTRIB_SIZE); -} - -static inline void smc_request_datacs(struct platform_device *pdev, struct net_device *ndev) -{ - if (SMC_CAN_USE_DATACS) { - struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32"); - struct smc_local *lp = netdev_priv(ndev); - - if (!res) - return; - - if(!request_mem_region(res->start, SMC_DATA_EXTENT, CARDNAME)) { - printk(KERN_INFO "%s: failed to request datacs memory region.\n", CARDNAME); - return; - } - - lp->datacs = ioremap(res->start, SMC_DATA_EXTENT); - } -} - -static void smc_release_datacs(struct platform_device *pdev, struct net_device *ndev) -{ - if (SMC_CAN_USE_DATACS) { - struct smc_local *lp = netdev_priv(ndev); - struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32"); - - if (lp->datacs) - iounmap(lp->datacs); - - lp->datacs = NULL; - - if (res) - release_mem_region(res->start, SMC_DATA_EXTENT); - } -} - -/* - * smc_init(void) - * Input parameters: - * dev->base_addr == 0, try to find all possible locations - * dev->base_addr > 0x1ff, this is the address to check - * dev->base_addr == , return failure code - * - * Output: - * 0 --> there is a device - * anything else, error - */ -static int __devinit smc_drv_probe(struct platform_device *pdev) -{ - struct smc91x_platdata *pd = pdev->dev.platform_data; - struct smc_local *lp; - struct net_device *ndev; - struct resource *res, *ires; - unsigned int __iomem *addr; - unsigned long irq_flags = SMC_IRQ_FLAGS; - int ret; - - ndev = alloc_etherdev(sizeof(struct smc_local)); - if (!ndev) { - printk("%s: could not allocate device.\n", CARDNAME); - ret = -ENOMEM; - goto out; - } - SET_NETDEV_DEV(ndev, &pdev->dev); - - /* get configuration from platform data, only allow use of - * bus width if both SMC_CAN_USE_xxx and SMC91X_USE_xxx are set. - */ - - lp = netdev_priv(ndev); - - if (pd) { - memcpy(&lp->cfg, pd, sizeof(lp->cfg)); - lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags); - } else { - lp->cfg.flags |= (SMC_CAN_USE_8BIT) ? SMC91X_USE_8BIT : 0; - lp->cfg.flags |= (SMC_CAN_USE_16BIT) ? SMC91X_USE_16BIT : 0; - lp->cfg.flags |= (SMC_CAN_USE_32BIT) ? SMC91X_USE_32BIT : 0; - lp->cfg.flags |= (nowait) ? SMC91X_NOWAIT : 0; - } - - if (!lp->cfg.leda && !lp->cfg.ledb) { - lp->cfg.leda = RPC_LSA_DEFAULT; - lp->cfg.ledb = RPC_LSB_DEFAULT; - } - - ndev->dma = (unsigned char)-1; - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs"); - if (!res) - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - ret = -ENODEV; - goto out_free_netdev; - } - - - if (!request_mem_region(res->start, SMC_IO_EXTENT, CARDNAME)) { - ret = -EBUSY; - goto out_free_netdev; - } - - ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!ires) { - ret = -ENODEV; - goto out_release_io; - } - - ndev->irq = ires->start; - - if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK) - irq_flags = ires->flags & IRQF_TRIGGER_MASK; - - ret = smc_request_attrib(pdev, ndev); - if (ret) - goto out_release_io; -#if defined(CONFIG_SA1100_ASSABET) - NCR_0 |= NCR_ENET_OSC_EN; -#endif - platform_set_drvdata(pdev, ndev); - ret = smc_enable_device(pdev); - if (ret) - goto out_release_attrib; - - addr = ioremap(res->start, SMC_IO_EXTENT); - if (!addr) { - ret = -ENOMEM; - goto out_release_attrib; - } - -#ifdef CONFIG_ARCH_PXA - { - struct smc_local *lp = netdev_priv(ndev); - lp->device = &pdev->dev; - lp->physaddr = res->start; - } -#endif - - ret = smc_probe(ndev, addr, irq_flags); - if (ret != 0) - goto out_iounmap; - - smc_request_datacs(pdev, ndev); - - return 0; - - out_iounmap: - platform_set_drvdata(pdev, NULL); - iounmap(addr); - out_release_attrib: - smc_release_attrib(pdev, ndev); - out_release_io: - release_mem_region(res->start, SMC_IO_EXTENT); - out_free_netdev: - free_netdev(ndev); - out: - printk("%s: not found (%d).\n", CARDNAME, ret); - - return ret; -} - -static int __devexit smc_drv_remove(struct platform_device *pdev) -{ - struct net_device *ndev = platform_get_drvdata(pdev); - struct smc_local *lp = netdev_priv(ndev); - struct resource *res; - - platform_set_drvdata(pdev, NULL); - - unregister_netdev(ndev); - - free_irq(ndev->irq, ndev); - -#ifdef CONFIG_ARCH_PXA - if (ndev->dma != (unsigned char)-1) - pxa_free_dma(ndev->dma); -#endif - iounmap(lp->base); - - smc_release_datacs(pdev,ndev); - smc_release_attrib(pdev,ndev); - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs"); - if (!res) - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, SMC_IO_EXTENT); - - free_netdev(ndev); - - return 0; -} - -static int smc_drv_suspend(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - struct net_device *ndev = platform_get_drvdata(pdev); - - if (ndev) { - if (netif_running(ndev)) { - netif_device_detach(ndev); - smc_shutdown(ndev); - smc_phy_powerdown(ndev); - } - } - return 0; -} - -static int smc_drv_resume(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - struct net_device *ndev = platform_get_drvdata(pdev); - - if (ndev) { - struct smc_local *lp = netdev_priv(ndev); - smc_enable_device(pdev); - if (netif_running(ndev)) { - smc_reset(ndev); - smc_enable(ndev); - if (lp->phy_type != 0) - smc_phy_configure(&lp->phy_configure); - netif_device_attach(ndev); - } - } - return 0; -} - -#ifdef CONFIG_OF -static const struct of_device_id smc91x_match[] = { - { .compatible = "smsc,lan91c94", }, - { .compatible = "smsc,lan91c111", }, - {}, -}; -MODULE_DEVICE_TABLE(of, smc91x_match); -#else -#define smc91x_match NULL -#endif - -static struct dev_pm_ops smc_drv_pm_ops = { - .suspend = smc_drv_suspend, - .resume = smc_drv_resume, -}; - -static struct platform_driver smc_driver = { - .probe = smc_drv_probe, - .remove = __devexit_p(smc_drv_remove), - .driver = { - .name = CARDNAME, - .owner = THIS_MODULE, - .pm = &smc_drv_pm_ops, - .of_match_table = smc91x_match, - }, -}; - -static int __init smc_init(void) -{ - return platform_driver_register(&smc_driver); -} - -static void __exit smc_cleanup(void) -{ - platform_driver_unregister(&smc_driver); -} - -module_init(smc_init); -module_exit(smc_cleanup); diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h deleted file mode 100644 index 5f53fbb..0000000 --- a/drivers/net/smc91x.h +++ /dev/null @@ -1,1180 +0,0 @@ -/*------------------------------------------------------------------------ - . smc91x.h - macros for SMSC's 91C9x/91C1xx single-chip Ethernet device. - . - . Copyright (C) 1996 by Erik Stahlman - . Copyright (C) 2001 Standard Microsystems Corporation - . Developed by Simple Network Magic Corporation - . Copyright (C) 2003 Monta Vista Software, Inc. - . Unified SMC91x driver by Nicolas Pitre - . - . This program is free software; you can redistribute it and/or modify - . it under the terms of the GNU General Public License as published by - . the Free Software Foundation; either version 2 of the License, or - . (at your option) any later version. - . - . This program is distributed in the hope that it will be useful, - . but WITHOUT ANY WARRANTY; without even the implied warranty of - . MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - . GNU General Public License for more details. - . - . You should have received a copy of the GNU General Public License - . along with this program; if not, write to the Free Software - . Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - . - . Information contained in this file was obtained from the LAN91C111 - . manual from SMC. To get a copy, if you really want one, you can find - . information under www.smsc.com. - . - . Authors - . Erik Stahlman - . Daris A Nevil - . Nicolas Pitre - . - ---------------------------------------------------------------------------*/ -#ifndef _SMC91X_H_ -#define _SMC91X_H_ - -#include - -/* - * Define your architecture specific bus configuration parameters here. - */ - -#if defined(CONFIG_ARCH_LUBBOCK) ||\ - defined(CONFIG_MACH_MAINSTONE) ||\ - defined(CONFIG_MACH_ZYLONITE) ||\ - defined(CONFIG_MACH_LITTLETON) ||\ - defined(CONFIG_MACH_ZYLONITE2) ||\ - defined(CONFIG_ARCH_VIPER) ||\ - defined(CONFIG_MACH_STARGATE2) - -#include - -/* Now the bus width is specified in the platform data - * pretend here to support all I/O access types - */ -#define SMC_CAN_USE_8BIT 1 -#define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 1 -#define SMC_NOWAIT 1 - -#define SMC_IO_SHIFT (lp->io_shift) - -#define SMC_inb(a, r) readb((a) + (r)) -#define SMC_inw(a, r) readw((a) + (r)) -#define SMC_inl(a, r) readl((a) + (r)) -#define SMC_outb(v, a, r) writeb(v, (a) + (r)) -#define SMC_outl(v, a, r) writel(v, (a) + (r)) -#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) -#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) -#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) -#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) -#define SMC_IRQ_FLAGS (-1) /* from resource */ - -/* We actually can't write halfwords properly if not word aligned */ -static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) -{ - if ((machine_is_mainstone() || machine_is_stargate2()) && reg & 2) { - unsigned int v = val << 16; - v |= readl(ioaddr + (reg & ~2)) & 0xffff; - writel(v, ioaddr + (reg & ~2)); - } else { - writew(val, ioaddr + reg); - } -} - -#elif defined(CONFIG_SA1100_PLEB) -/* We can only do 16-bit reads and writes in the static memory space. */ -#define SMC_CAN_USE_8BIT 1 -#define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 0 -#define SMC_IO_SHIFT 0 -#define SMC_NOWAIT 1 - -#define SMC_inb(a, r) readb((a) + (r)) -#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l)) -#define SMC_inw(a, r) readw((a) + (r)) -#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) -#define SMC_outb(v, a, r) writeb(v, (a) + (r)) -#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l)) -#define SMC_outw(v, a, r) writew(v, (a) + (r)) -#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) - -#define SMC_IRQ_FLAGS (-1) - -#elif defined(CONFIG_SA1100_ASSABET) - -#include - -/* We can only do 8-bit reads and writes in the static memory space. */ -#define SMC_CAN_USE_8BIT 1 -#define SMC_CAN_USE_16BIT 0 -#define SMC_CAN_USE_32BIT 0 -#define SMC_NOWAIT 1 - -/* The first two address lines aren't connected... */ -#define SMC_IO_SHIFT 2 - -#define SMC_inb(a, r) readb((a) + (r)) -#define SMC_outb(v, a, r) writeb(v, (a) + (r)) -#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l)) -#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l)) -#define SMC_IRQ_FLAGS (-1) /* from resource */ - -#elif defined(CONFIG_MACH_LOGICPD_PXA270) || \ - defined(CONFIG_MACH_NOMADIK_8815NHK) - -#define SMC_CAN_USE_8BIT 0 -#define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 0 -#define SMC_IO_SHIFT 0 -#define SMC_NOWAIT 1 - -#define SMC_inw(a, r) readw((a) + (r)) -#define SMC_outw(v, a, r) writew(v, (a) + (r)) -#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) -#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) - -#elif defined(CONFIG_ARCH_INNOKOM) || \ - defined(CONFIG_ARCH_PXA_IDP) || \ - defined(CONFIG_ARCH_RAMSES) || \ - defined(CONFIG_ARCH_PCM027) - -#define SMC_CAN_USE_8BIT 1 -#define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 1 -#define SMC_IO_SHIFT 0 -#define SMC_NOWAIT 1 -#define SMC_USE_PXA_DMA 1 - -#define SMC_inb(a, r) readb((a) + (r)) -#define SMC_inw(a, r) readw((a) + (r)) -#define SMC_inl(a, r) readl((a) + (r)) -#define SMC_outb(v, a, r) writeb(v, (a) + (r)) -#define SMC_outl(v, a, r) writel(v, (a) + (r)) -#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) -#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) -#define SMC_IRQ_FLAGS (-1) /* from resource */ - -/* We actually can't write halfwords properly if not word aligned */ -static inline void -SMC_outw(u16 val, void __iomem *ioaddr, int reg) -{ - if (reg & 2) { - unsigned int v = val << 16; - v |= readl(ioaddr + (reg & ~2)) & 0xffff; - writel(v, ioaddr + (reg & ~2)); - } else { - writew(val, ioaddr + reg); - } -} - -#elif defined(CONFIG_SH_SH4202_MICRODEV) - -#define SMC_CAN_USE_8BIT 0 -#define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 0 - -#define SMC_inb(a, r) inb((a) + (r) - 0xa0000000) -#define SMC_inw(a, r) inw((a) + (r) - 0xa0000000) -#define SMC_inl(a, r) inl((a) + (r) - 0xa0000000) -#define SMC_outb(v, a, r) outb(v, (a) + (r) - 0xa0000000) -#define SMC_outw(v, a, r) outw(v, (a) + (r) - 0xa0000000) -#define SMC_outl(v, a, r) outl(v, (a) + (r) - 0xa0000000) -#define SMC_insl(a, r, p, l) insl((a) + (r) - 0xa0000000, p, l) -#define SMC_outsl(a, r, p, l) outsl((a) + (r) - 0xa0000000, p, l) -#define SMC_insw(a, r, p, l) insw((a) + (r) - 0xa0000000, p, l) -#define SMC_outsw(a, r, p, l) outsw((a) + (r) - 0xa0000000, p, l) - -#define SMC_IRQ_FLAGS (0) - -#elif defined(CONFIG_M32R) - -#define SMC_CAN_USE_8BIT 0 -#define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 0 - -#define SMC_inb(a, r) inb(((u32)a) + (r)) -#define SMC_inw(a, r) inw(((u32)a) + (r)) -#define SMC_outb(v, a, r) outb(v, ((u32)a) + (r)) -#define SMC_outw(v, a, r) outw(v, ((u32)a) + (r)) -#define SMC_insw(a, r, p, l) insw(((u32)a) + (r), p, l) -#define SMC_outsw(a, r, p, l) outsw(((u32)a) + (r), p, l) - -#define SMC_IRQ_FLAGS (0) - -#define RPC_LSA_DEFAULT RPC_LED_TX_RX -#define RPC_LSB_DEFAULT RPC_LED_100_10 - -#elif defined(CONFIG_ARCH_VERSATILE) - -#define SMC_CAN_USE_8BIT 1 -#define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 1 -#define SMC_NOWAIT 1 - -#define SMC_inb(a, r) readb((a) + (r)) -#define SMC_inw(a, r) readw((a) + (r)) -#define SMC_inl(a, r) readl((a) + (r)) -#define SMC_outb(v, a, r) writeb(v, (a) + (r)) -#define SMC_outw(v, a, r) writew(v, (a) + (r)) -#define SMC_outl(v, a, r) writel(v, (a) + (r)) -#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) -#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) -#define SMC_IRQ_FLAGS (-1) /* from resource */ - -#elif defined(CONFIG_MN10300) - -/* - * MN10300/AM33 configuration - */ - -#include - -#elif defined(CONFIG_ARCH_MSM) - -#define SMC_CAN_USE_8BIT 0 -#define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 0 -#define SMC_NOWAIT 1 - -#define SMC_inw(a, r) readw((a) + (r)) -#define SMC_outw(v, a, r) writew(v, (a) + (r)) -#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) -#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) - -#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH - -#elif defined(CONFIG_COLDFIRE) - -#define SMC_CAN_USE_8BIT 0 -#define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 0 -#define SMC_NOWAIT 1 - -static inline void mcf_insw(void *a, unsigned char *p, int l) -{ - u16 *wp = (u16 *) p; - while (l-- > 0) - *wp++ = readw(a); -} - -static inline void mcf_outsw(void *a, unsigned char *p, int l) -{ - u16 *wp = (u16 *) p; - while (l-- > 0) - writew(*wp++, a); -} - -#define SMC_inw(a, r) _swapw(readw((a) + (r))) -#define SMC_outw(v, a, r) writew(_swapw(v), (a) + (r)) -#define SMC_insw(a, r, p, l) mcf_insw(a + r, p, l) -#define SMC_outsw(a, r, p, l) mcf_outsw(a + r, p, l) - -#define SMC_IRQ_FLAGS (IRQF_DISABLED) - -#else - -/* - * Default configuration - */ - -#define SMC_CAN_USE_8BIT 1 -#define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 1 -#define SMC_NOWAIT 1 - -#define SMC_IO_SHIFT (lp->io_shift) - -#define SMC_inb(a, r) readb((a) + (r)) -#define SMC_inw(a, r) readw((a) + (r)) -#define SMC_inl(a, r) readl((a) + (r)) -#define SMC_outb(v, a, r) writeb(v, (a) + (r)) -#define SMC_outw(v, a, r) writew(v, (a) + (r)) -#define SMC_outl(v, a, r) writel(v, (a) + (r)) -#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) -#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) -#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) -#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) - -#define RPC_LSA_DEFAULT RPC_LED_100_10 -#define RPC_LSB_DEFAULT RPC_LED_TX_RX - -#endif - - -/* store this information for the driver.. */ -struct smc_local { - /* - * If I have to wait until memory is available to send a - * packet, I will store the skbuff here, until I get the - * desired memory. Then, I'll send it out and free it. - */ - struct sk_buff *pending_tx_skb; - struct tasklet_struct tx_task; - - /* version/revision of the SMC91x chip */ - int version; - - /* Contains the current active transmission mode */ - int tcr_cur_mode; - - /* Contains the current active receive mode */ - int rcr_cur_mode; - - /* Contains the current active receive/phy mode */ - int rpc_cur_mode; - int ctl_rfduplx; - int ctl_rspeed; - - u32 msg_enable; - u32 phy_type; - struct mii_if_info mii; - - /* work queue */ - struct work_struct phy_configure; - struct net_device *dev; - int work_pending; - - spinlock_t lock; - -#ifdef CONFIG_ARCH_PXA - /* DMA needs the physical address of the chip */ - u_long physaddr; - struct device *device; -#endif - void __iomem *base; - void __iomem *datacs; - - /* the low address lines on some platforms aren't connected... */ - int io_shift; - - struct smc91x_platdata cfg; -}; - -#define SMC_8BIT(p) ((p)->cfg.flags & SMC91X_USE_8BIT) -#define SMC_16BIT(p) ((p)->cfg.flags & SMC91X_USE_16BIT) -#define SMC_32BIT(p) ((p)->cfg.flags & SMC91X_USE_32BIT) - -#ifdef CONFIG_ARCH_PXA -/* - * Let's use the DMA engine on the XScale PXA2xx for RX packets. This is - * always happening in irq context so no need to worry about races. TX is - * different and probably not worth it for that reason, and not as critical - * as RX which can overrun memory and lose packets. - */ -#include -#include - -#ifdef SMC_insl -#undef SMC_insl -#define SMC_insl(a, r, p, l) \ - smc_pxa_dma_insl(a, lp, r, dev->dma, p, l) -static inline void -smc_pxa_dma_insl(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma, - u_char *buf, int len) -{ - u_long physaddr = lp->physaddr; - dma_addr_t dmabuf; - - /* fallback if no DMA available */ - if (dma == (unsigned char)-1) { - readsl(ioaddr + reg, buf, len); - return; - } - - /* 64 bit alignment is required for memory to memory DMA */ - if ((long)buf & 4) { - *((u32 *)buf) = SMC_inl(ioaddr, reg); - buf += 4; - len--; - } - - len *= 4; - dmabuf = dma_map_single(lp->device, buf, len, DMA_FROM_DEVICE); - DCSR(dma) = DCSR_NODESC; - DTADR(dma) = dmabuf; - DSADR(dma) = physaddr + reg; - DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 | - DCMD_WIDTH4 | (DCMD_LENGTH & len)); - DCSR(dma) = DCSR_NODESC | DCSR_RUN; - while (!(DCSR(dma) & DCSR_STOPSTATE)) - cpu_relax(); - DCSR(dma) = 0; - dma_unmap_single(lp->device, dmabuf, len, DMA_FROM_DEVICE); -} -#endif - -#ifdef SMC_insw -#undef SMC_insw -#define SMC_insw(a, r, p, l) \ - smc_pxa_dma_insw(a, lp, r, dev->dma, p, l) -static inline void -smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma, - u_char *buf, int len) -{ - u_long physaddr = lp->physaddr; - dma_addr_t dmabuf; - - /* fallback if no DMA available */ - if (dma == (unsigned char)-1) { - readsw(ioaddr + reg, buf, len); - return; - } - - /* 64 bit alignment is required for memory to memory DMA */ - while ((long)buf & 6) { - *((u16 *)buf) = SMC_inw(ioaddr, reg); - buf += 2; - len--; - } - - len *= 2; - dmabuf = dma_map_single(lp->device, buf, len, DMA_FROM_DEVICE); - DCSR(dma) = DCSR_NODESC; - DTADR(dma) = dmabuf; - DSADR(dma) = physaddr + reg; - DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 | - DCMD_WIDTH2 | (DCMD_LENGTH & len)); - DCSR(dma) = DCSR_NODESC | DCSR_RUN; - while (!(DCSR(dma) & DCSR_STOPSTATE)) - cpu_relax(); - DCSR(dma) = 0; - dma_unmap_single(lp->device, dmabuf, len, DMA_FROM_DEVICE); -} -#endif - -static void -smc_pxa_dma_irq(int dma, void *dummy) -{ - DCSR(dma) = 0; -} -#endif /* CONFIG_ARCH_PXA */ - - -/* - * Everything a particular hardware setup needs should have been defined - * at this point. Add stubs for the undefined cases, mainly to avoid - * compilation warnings since they'll be optimized away, or to prevent buggy - * use of them. - */ - -#if ! SMC_CAN_USE_32BIT -#define SMC_inl(ioaddr, reg) ({ BUG(); 0; }) -#define SMC_outl(x, ioaddr, reg) BUG() -#define SMC_insl(a, r, p, l) BUG() -#define SMC_outsl(a, r, p, l) BUG() -#endif - -#if !defined(SMC_insl) || !defined(SMC_outsl) -#define SMC_insl(a, r, p, l) BUG() -#define SMC_outsl(a, r, p, l) BUG() -#endif - -#if ! SMC_CAN_USE_16BIT - -/* - * Any 16-bit access is performed with two 8-bit accesses if the hardware - * can't do it directly. Most registers are 16-bit so those are mandatory. - */ -#define SMC_outw(x, ioaddr, reg) \ - do { \ - unsigned int __val16 = (x); \ - SMC_outb( __val16, ioaddr, reg ); \ - SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\ - } while (0) -#define SMC_inw(ioaddr, reg) \ - ({ \ - unsigned int __val16; \ - __val16 = SMC_inb( ioaddr, reg ); \ - __val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \ - __val16; \ - }) - -#define SMC_insw(a, r, p, l) BUG() -#define SMC_outsw(a, r, p, l) BUG() - -#endif - -#if !defined(SMC_insw) || !defined(SMC_outsw) -#define SMC_insw(a, r, p, l) BUG() -#define SMC_outsw(a, r, p, l) BUG() -#endif - -#if ! SMC_CAN_USE_8BIT -#define SMC_inb(ioaddr, reg) ({ BUG(); 0; }) -#define SMC_outb(x, ioaddr, reg) BUG() -#define SMC_insb(a, r, p, l) BUG() -#define SMC_outsb(a, r, p, l) BUG() -#endif - -#if !defined(SMC_insb) || !defined(SMC_outsb) -#define SMC_insb(a, r, p, l) BUG() -#define SMC_outsb(a, r, p, l) BUG() -#endif - -#ifndef SMC_CAN_USE_DATACS -#define SMC_CAN_USE_DATACS 0 -#endif - -#ifndef SMC_IO_SHIFT -#define SMC_IO_SHIFT 0 -#endif - -#ifndef SMC_IRQ_FLAGS -#define SMC_IRQ_FLAGS IRQF_TRIGGER_RISING -#endif - -#ifndef SMC_INTERRUPT_PREAMBLE -#define SMC_INTERRUPT_PREAMBLE -#endif - - -/* Because of bank switching, the LAN91x uses only 16 I/O ports */ -#define SMC_IO_EXTENT (16 << SMC_IO_SHIFT) -#define SMC_DATA_EXTENT (4) - -/* - . Bank Select Register: - . - . yyyy yyyy 0000 00xx - . xx = bank number - . yyyy yyyy = 0x33, for identification purposes. -*/ -#define BANK_SELECT (14 << SMC_IO_SHIFT) - - -// Transmit Control Register -/* BANK 0 */ -#define TCR_REG(lp) SMC_REG(lp, 0x0000, 0) -#define TCR_ENABLE 0x0001 // When 1 we can transmit -#define TCR_LOOP 0x0002 // Controls output pin LBK -#define TCR_FORCOL 0x0004 // When 1 will force a collision -#define TCR_PAD_EN 0x0080 // When 1 will pad tx frames < 64 bytes w/0 -#define TCR_NOCRC 0x0100 // When 1 will not append CRC to tx frames -#define TCR_MON_CSN 0x0400 // When 1 tx monitors carrier -#define TCR_FDUPLX 0x0800 // When 1 enables full duplex operation -#define TCR_STP_SQET 0x1000 // When 1 stops tx if Signal Quality Error -#define TCR_EPH_LOOP 0x2000 // When 1 enables EPH block loopback -#define TCR_SWFDUP 0x8000 // When 1 enables Switched Full Duplex mode - -#define TCR_CLEAR 0 /* do NOTHING */ -/* the default settings for the TCR register : */ -#define TCR_DEFAULT (TCR_ENABLE | TCR_PAD_EN) - - -// EPH Status Register -/* BANK 0 */ -#define EPH_STATUS_REG(lp) SMC_REG(lp, 0x0002, 0) -#define ES_TX_SUC 0x0001 // Last TX was successful -#define ES_SNGL_COL 0x0002 // Single collision detected for last tx -#define ES_MUL_COL 0x0004 // Multiple collisions detected for last tx -#define ES_LTX_MULT 0x0008 // Last tx was a multicast -#define ES_16COL 0x0010 // 16 Collisions Reached -#define ES_SQET 0x0020 // Signal Quality Error Test -#define ES_LTXBRD 0x0040 // Last tx was a broadcast -#define ES_TXDEFR 0x0080 // Transmit Deferred -#define ES_LATCOL 0x0200 // Late collision detected on last tx -#define ES_LOSTCARR 0x0400 // Lost Carrier Sense -#define ES_EXC_DEF 0x0800 // Excessive Deferral -#define ES_CTR_ROL 0x1000 // Counter Roll Over indication -#define ES_LINK_OK 0x4000 // Driven by inverted value of nLNK pin -#define ES_TXUNRN 0x8000 // Tx Underrun - - -// Receive Control Register -/* BANK 0 */ -#define RCR_REG(lp) SMC_REG(lp, 0x0004, 0) -#define RCR_RX_ABORT 0x0001 // Set if a rx frame was aborted -#define RCR_PRMS 0x0002 // Enable promiscuous mode -#define RCR_ALMUL 0x0004 // When set accepts all multicast frames -#define RCR_RXEN 0x0100 // IFF this is set, we can receive packets -#define RCR_STRIP_CRC 0x0200 // When set strips CRC from rx packets -#define RCR_ABORT_ENB 0x0200 // When set will abort rx on collision -#define RCR_FILT_CAR 0x0400 // When set filters leading 12 bit s of carrier -#define RCR_SOFTRST 0x8000 // resets the chip - -/* the normal settings for the RCR register : */ -#define RCR_DEFAULT (RCR_STRIP_CRC | RCR_RXEN) -#define RCR_CLEAR 0x0 // set it to a base state - - -// Counter Register -/* BANK 0 */ -#define COUNTER_REG(lp) SMC_REG(lp, 0x0006, 0) - - -// Memory Information Register -/* BANK 0 */ -#define MIR_REG(lp) SMC_REG(lp, 0x0008, 0) - - -// Receive/Phy Control Register -/* BANK 0 */ -#define RPC_REG(lp) SMC_REG(lp, 0x000A, 0) -#define RPC_SPEED 0x2000 // When 1 PHY is in 100Mbps mode. -#define RPC_DPLX 0x1000 // When 1 PHY is in Full-Duplex Mode -#define RPC_ANEG 0x0800 // When 1 PHY is in Auto-Negotiate Mode -#define RPC_LSXA_SHFT 5 // Bits to shift LS2A,LS1A,LS0A to lsb -#define RPC_LSXB_SHFT 2 // Bits to get LS2B,LS1B,LS0B to lsb - -#ifndef RPC_LSA_DEFAULT -#define RPC_LSA_DEFAULT RPC_LED_100 -#endif -#ifndef RPC_LSB_DEFAULT -#define RPC_LSB_DEFAULT RPC_LED_FD -#endif - -#define RPC_DEFAULT (RPC_ANEG | RPC_SPEED | RPC_DPLX) - - -/* Bank 0 0x0C is reserved */ - -// Bank Select Register -/* All Banks */ -#define BSR_REG 0x000E - - -// Configuration Reg -/* BANK 1 */ -#define CONFIG_REG(lp) SMC_REG(lp, 0x0000, 1) -#define CONFIG_EXT_PHY 0x0200 // 1=external MII, 0=internal Phy -#define CONFIG_GPCNTRL 0x0400 // Inverse value drives pin nCNTRL -#define CONFIG_NO_WAIT 0x1000 // When 1 no extra wait states on ISA bus -#define CONFIG_EPH_POWER_EN 0x8000 // When 0 EPH is placed into low power mode. - -// Default is powered-up, Internal Phy, Wait States, and pin nCNTRL=low -#define CONFIG_DEFAULT (CONFIG_EPH_POWER_EN) - - -// Base Address Register -/* BANK 1 */ -#define BASE_REG(lp) SMC_REG(lp, 0x0002, 1) - - -// Individual Address Registers -/* BANK 1 */ -#define ADDR0_REG(lp) SMC_REG(lp, 0x0004, 1) -#define ADDR1_REG(lp) SMC_REG(lp, 0x0006, 1) -#define ADDR2_REG(lp) SMC_REG(lp, 0x0008, 1) - - -// General Purpose Register -/* BANK 1 */ -#define GP_REG(lp) SMC_REG(lp, 0x000A, 1) - - -// Control Register -/* BANK 1 */ -#define CTL_REG(lp) SMC_REG(lp, 0x000C, 1) -#define CTL_RCV_BAD 0x4000 // When 1 bad CRC packets are received -#define CTL_AUTO_RELEASE 0x0800 // When 1 tx pages are released automatically -#define CTL_LE_ENABLE 0x0080 // When 1 enables Link Error interrupt -#define CTL_CR_ENABLE 0x0040 // When 1 enables Counter Rollover interrupt -#define CTL_TE_ENABLE 0x0020 // When 1 enables Transmit Error interrupt -#define CTL_EEPROM_SELECT 0x0004 // Controls EEPROM reload & store -#define CTL_RELOAD 0x0002 // When set reads EEPROM into registers -#define CTL_STORE 0x0001 // When set stores registers into EEPROM - - -// MMU Command Register -/* BANK 2 */ -#define MMU_CMD_REG(lp) SMC_REG(lp, 0x0000, 2) -#define MC_BUSY 1 // When 1 the last release has not completed -#define MC_NOP (0<<5) // No Op -#define MC_ALLOC (1<<5) // OR with number of 256 byte packets -#define MC_RESET (2<<5) // Reset MMU to initial state -#define MC_REMOVE (3<<5) // Remove the current rx packet -#define MC_RELEASE (4<<5) // Remove and release the current rx packet -#define MC_FREEPKT (5<<5) // Release packet in PNR register -#define MC_ENQUEUE (6<<5) // Enqueue the packet for transmit -#define MC_RSTTXFIFO (7<<5) // Reset the TX FIFOs - - -// Packet Number Register -/* BANK 2 */ -#define PN_REG(lp) SMC_REG(lp, 0x0002, 2) - - -// Allocation Result Register -/* BANK 2 */ -#define AR_REG(lp) SMC_REG(lp, 0x0003, 2) -#define AR_FAILED 0x80 // Alocation Failed - - -// TX FIFO Ports Register -/* BANK 2 */ -#define TXFIFO_REG(lp) SMC_REG(lp, 0x0004, 2) -#define TXFIFO_TEMPTY 0x80 // TX FIFO Empty - -// RX FIFO Ports Register -/* BANK 2 */ -#define RXFIFO_REG(lp) SMC_REG(lp, 0x0005, 2) -#define RXFIFO_REMPTY 0x80 // RX FIFO Empty - -#define FIFO_REG(lp) SMC_REG(lp, 0x0004, 2) - -// Pointer Register -/* BANK 2 */ -#define PTR_REG(lp) SMC_REG(lp, 0x0006, 2) -#define PTR_RCV 0x8000 // 1=Receive area, 0=Transmit area -#define PTR_AUTOINC 0x4000 // Auto increment the pointer on each access -#define PTR_READ 0x2000 // When 1 the operation is a read - - -// Data Register -/* BANK 2 */ -#define DATA_REG(lp) SMC_REG(lp, 0x0008, 2) - - -// Interrupt Status/Acknowledge Register -/* BANK 2 */ -#define INT_REG(lp) SMC_REG(lp, 0x000C, 2) - - -// Interrupt Mask Register -/* BANK 2 */ -#define IM_REG(lp) SMC_REG(lp, 0x000D, 2) -#define IM_MDINT 0x80 // PHY MI Register 18 Interrupt -#define IM_ERCV_INT 0x40 // Early Receive Interrupt -#define IM_EPH_INT 0x20 // Set by Ethernet Protocol Handler section -#define IM_RX_OVRN_INT 0x10 // Set by Receiver Overruns -#define IM_ALLOC_INT 0x08 // Set when allocation request is completed -#define IM_TX_EMPTY_INT 0x04 // Set if the TX FIFO goes empty -#define IM_TX_INT 0x02 // Transmit Interrupt -#define IM_RCV_INT 0x01 // Receive Interrupt - - -// Multicast Table Registers -/* BANK 3 */ -#define MCAST_REG1(lp) SMC_REG(lp, 0x0000, 3) -#define MCAST_REG2(lp) SMC_REG(lp, 0x0002, 3) -#define MCAST_REG3(lp) SMC_REG(lp, 0x0004, 3) -#define MCAST_REG4(lp) SMC_REG(lp, 0x0006, 3) - - -// Management Interface Register (MII) -/* BANK 3 */ -#define MII_REG(lp) SMC_REG(lp, 0x0008, 3) -#define MII_MSK_CRS100 0x4000 // Disables CRS100 detection during tx half dup -#define MII_MDOE 0x0008 // MII Output Enable -#define MII_MCLK 0x0004 // MII Clock, pin MDCLK -#define MII_MDI 0x0002 // MII Input, pin MDI -#define MII_MDO 0x0001 // MII Output, pin MDO - - -// Revision Register -/* BANK 3 */ -/* ( hi: chip id low: rev # ) */ -#define REV_REG(lp) SMC_REG(lp, 0x000A, 3) - - -// Early RCV Register -/* BANK 3 */ -/* this is NOT on SMC9192 */ -#define ERCV_REG(lp) SMC_REG(lp, 0x000C, 3) -#define ERCV_RCV_DISCRD 0x0080 // When 1 discards a packet being received -#define ERCV_THRESHOLD 0x001F // ERCV Threshold Mask - - -// External Register -/* BANK 7 */ -#define EXT_REG(lp) SMC_REG(lp, 0x0000, 7) - - -#define CHIP_9192 3 -#define CHIP_9194 4 -#define CHIP_9195 5 -#define CHIP_9196 6 -#define CHIP_91100 7 -#define CHIP_91100FD 8 -#define CHIP_91111FD 9 - -static const char * chip_ids[ 16 ] = { - NULL, NULL, NULL, - /* 3 */ "SMC91C90/91C92", - /* 4 */ "SMC91C94", - /* 5 */ "SMC91C95", - /* 6 */ "SMC91C96", - /* 7 */ "SMC91C100", - /* 8 */ "SMC91C100FD", - /* 9 */ "SMC91C11xFD", - NULL, NULL, NULL, - NULL, NULL, NULL}; - - -/* - . Receive status bits -*/ -#define RS_ALGNERR 0x8000 -#define RS_BRODCAST 0x4000 -#define RS_BADCRC 0x2000 -#define RS_ODDFRAME 0x1000 -#define RS_TOOLONG 0x0800 -#define RS_TOOSHORT 0x0400 -#define RS_MULTICAST 0x0001 -#define RS_ERRORS (RS_ALGNERR | RS_BADCRC | RS_TOOLONG | RS_TOOSHORT) - - -/* - * PHY IDs - * LAN83C183 == LAN91C111 Internal PHY - */ -#define PHY_LAN83C183 0x0016f840 -#define PHY_LAN83C180 0x02821c50 - -/* - * PHY Register Addresses (LAN91C111 Internal PHY) - * - * Generic PHY registers can be found in - * - * These phy registers are specific to our on-board phy. - */ - -// PHY Configuration Register 1 -#define PHY_CFG1_REG 0x10 -#define PHY_CFG1_LNKDIS 0x8000 // 1=Rx Link Detect Function disabled -#define PHY_CFG1_XMTDIS 0x4000 // 1=TP Transmitter Disabled -#define PHY_CFG1_XMTPDN 0x2000 // 1=TP Transmitter Powered Down -#define PHY_CFG1_BYPSCR 0x0400 // 1=Bypass scrambler/descrambler -#define PHY_CFG1_UNSCDS 0x0200 // 1=Unscramble Idle Reception Disable -#define PHY_CFG1_EQLZR 0x0100 // 1=Rx Equalizer Disabled -#define PHY_CFG1_CABLE 0x0080 // 1=STP(150ohm), 0=UTP(100ohm) -#define PHY_CFG1_RLVL0 0x0040 // 1=Rx Squelch level reduced by 4.5db -#define PHY_CFG1_TLVL_SHIFT 2 // Transmit Output Level Adjust -#define PHY_CFG1_TLVL_MASK 0x003C -#define PHY_CFG1_TRF_MASK 0x0003 // Transmitter Rise/Fall time - - -// PHY Configuration Register 2 -#define PHY_CFG2_REG 0x11 -#define PHY_CFG2_APOLDIS 0x0020 // 1=Auto Polarity Correction disabled -#define PHY_CFG2_JABDIS 0x0010 // 1=Jabber disabled -#define PHY_CFG2_MREG 0x0008 // 1=Multiple register access (MII mgt) -#define PHY_CFG2_INTMDIO 0x0004 // 1=Interrupt signaled with MDIO pulseo - -// PHY Status Output (and Interrupt status) Register -#define PHY_INT_REG 0x12 // Status Output (Interrupt Status) -#define PHY_INT_INT 0x8000 // 1=bits have changed since last read -#define PHY_INT_LNKFAIL 0x4000 // 1=Link Not detected -#define PHY_INT_LOSSSYNC 0x2000 // 1=Descrambler has lost sync -#define PHY_INT_CWRD 0x1000 // 1=Invalid 4B5B code detected on rx -#define PHY_INT_SSD 0x0800 // 1=No Start Of Stream detected on rx -#define PHY_INT_ESD 0x0400 // 1=No End Of Stream detected on rx -#define PHY_INT_RPOL 0x0200 // 1=Reverse Polarity detected -#define PHY_INT_JAB 0x0100 // 1=Jabber detected -#define PHY_INT_SPDDET 0x0080 // 1=100Base-TX mode, 0=10Base-T mode -#define PHY_INT_DPLXDET 0x0040 // 1=Device in Full Duplex - -// PHY Interrupt/Status Mask Register -#define PHY_MASK_REG 0x13 // Interrupt Mask -// Uses the same bit definitions as PHY_INT_REG - - -/* - * SMC91C96 ethernet config and status registers. - * These are in the "attribute" space. - */ -#define ECOR 0x8000 -#define ECOR_RESET 0x80 -#define ECOR_LEVEL_IRQ 0x40 -#define ECOR_WR_ATTRIB 0x04 -#define ECOR_ENABLE 0x01 - -#define ECSR 0x8002 -#define ECSR_IOIS8 0x20 -#define ECSR_PWRDWN 0x04 -#define ECSR_INT 0x02 - -#define ATTRIB_SIZE ((64*1024) << SMC_IO_SHIFT) - - -/* - * Macros to abstract register access according to the data bus - * capabilities. Please use those and not the in/out primitives. - * Note: the following macros do *not* select the bank -- this must - * be done separately as needed in the main code. The SMC_REG() macro - * only uses the bank argument for debugging purposes (when enabled). - * - * Note: despite inline functions being safer, everything leading to this - * should preferably be macros to let BUG() display the line number in - * the core source code since we're interested in the top call site - * not in any inline function location. - */ - -#if SMC_DEBUG > 0 -#define SMC_REG(lp, reg, bank) \ - ({ \ - int __b = SMC_CURRENT_BANK(lp); \ - if (unlikely((__b & ~0xf0) != (0x3300 | bank))) { \ - printk( "%s: bank reg screwed (0x%04x)\n", \ - CARDNAME, __b ); \ - BUG(); \ - } \ - reg<> 8)) - -#define SMC_GET_TXFIFO(lp) \ - (SMC_8BIT(lp) ? (SMC_inb(ioaddr, TXFIFO_REG(lp))) \ - : (SMC_inw(ioaddr, TXFIFO_REG(lp)) & 0xFF)) - -#define SMC_GET_RXFIFO(lp) \ - (SMC_8BIT(lp) ? (SMC_inb(ioaddr, RXFIFO_REG(lp))) \ - : (SMC_inw(ioaddr, TXFIFO_REG(lp)) >> 8)) - -#define SMC_GET_INT(lp) \ - (SMC_8BIT(lp) ? (SMC_inb(ioaddr, INT_REG(lp))) \ - : (SMC_inw(ioaddr, INT_REG(lp)) & 0xFF)) - -#define SMC_ACK_INT(lp, x) \ - do { \ - if (SMC_8BIT(lp)) \ - SMC_outb(x, ioaddr, INT_REG(lp)); \ - else { \ - unsigned long __flags; \ - int __mask; \ - local_irq_save(__flags); \ - __mask = SMC_inw(ioaddr, INT_REG(lp)) & ~0xff; \ - SMC_outw(__mask | (x), ioaddr, INT_REG(lp)); \ - local_irq_restore(__flags); \ - } \ - } while (0) - -#define SMC_GET_INT_MASK(lp) \ - (SMC_8BIT(lp) ? (SMC_inb(ioaddr, IM_REG(lp))) \ - : (SMC_inw(ioaddr, INT_REG(lp)) >> 8)) - -#define SMC_SET_INT_MASK(lp, x) \ - do { \ - if (SMC_8BIT(lp)) \ - SMC_outb(x, ioaddr, IM_REG(lp)); \ - else \ - SMC_outw((x) << 8, ioaddr, INT_REG(lp)); \ - } while (0) - -#define SMC_CURRENT_BANK(lp) SMC_inw(ioaddr, BANK_SELECT) - -#define SMC_SELECT_BANK(lp, x) \ - do { \ - if (SMC_MUST_ALIGN_WRITE(lp)) \ - SMC_outl((x)<<16, ioaddr, 12<> 8; \ - __v = SMC_inw(ioaddr, ADDR1_REG(lp)); \ - addr[2] = __v; addr[3] = __v >> 8; \ - __v = SMC_inw(ioaddr, ADDR2_REG(lp)); \ - addr[4] = __v; addr[5] = __v >> 8; \ - } while (0) -#endif - -#define SMC_SET_MAC_ADDR(lp, addr) \ - do { \ - SMC_outw(addr[0]|(addr[1] << 8), ioaddr, ADDR0_REG(lp)); \ - SMC_outw(addr[2]|(addr[3] << 8), ioaddr, ADDR1_REG(lp)); \ - SMC_outw(addr[4]|(addr[5] << 8), ioaddr, ADDR2_REG(lp)); \ - } while (0) - -#define SMC_SET_MCAST(lp, x) \ - do { \ - const unsigned char *mt = (x); \ - SMC_outw(mt[0] | (mt[1] << 8), ioaddr, MCAST_REG1(lp)); \ - SMC_outw(mt[2] | (mt[3] << 8), ioaddr, MCAST_REG2(lp)); \ - SMC_outw(mt[4] | (mt[5] << 8), ioaddr, MCAST_REG3(lp)); \ - SMC_outw(mt[6] | (mt[7] << 8), ioaddr, MCAST_REG4(lp)); \ - } while (0) - -#define SMC_PUT_PKT_HDR(lp, status, length) \ - do { \ - if (SMC_32BIT(lp)) \ - SMC_outl((status) | (length)<<16, ioaddr, \ - DATA_REG(lp)); \ - else { \ - SMC_outw(status, ioaddr, DATA_REG(lp)); \ - SMC_outw(length, ioaddr, DATA_REG(lp)); \ - } \ - } while (0) - -#define SMC_GET_PKT_HDR(lp, status, length) \ - do { \ - if (SMC_32BIT(lp)) { \ - unsigned int __val = SMC_inl(ioaddr, DATA_REG(lp)); \ - (status) = __val & 0xffff; \ - (length) = __val >> 16; \ - } else { \ - (status) = SMC_inw(ioaddr, DATA_REG(lp)); \ - (length) = SMC_inw(ioaddr, DATA_REG(lp)); \ - } \ - } while (0) - -#define SMC_PUSH_DATA(lp, p, l) \ - do { \ - if (SMC_32BIT(lp)) { \ - void *__ptr = (p); \ - int __len = (l); \ - void __iomem *__ioaddr = ioaddr; \ - if (__len >= 2 && (unsigned long)__ptr & 2) { \ - __len -= 2; \ - SMC_outw(*(u16 *)__ptr, ioaddr, \ - DATA_REG(lp)); \ - __ptr += 2; \ - } \ - if (SMC_CAN_USE_DATACS && lp->datacs) \ - __ioaddr = lp->datacs; \ - SMC_outsl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \ - if (__len & 2) { \ - __ptr += (__len & ~3); \ - SMC_outw(*((u16 *)__ptr), ioaddr, \ - DATA_REG(lp)); \ - } \ - } else if (SMC_16BIT(lp)) \ - SMC_outsw(ioaddr, DATA_REG(lp), p, (l) >> 1); \ - else if (SMC_8BIT(lp)) \ - SMC_outsb(ioaddr, DATA_REG(lp), p, l); \ - } while (0) - -#define SMC_PULL_DATA(lp, p, l) \ - do { \ - if (SMC_32BIT(lp)) { \ - void *__ptr = (p); \ - int __len = (l); \ - void __iomem *__ioaddr = ioaddr; \ - if ((unsigned long)__ptr & 2) { \ - /* \ - * We want 32bit alignment here. \ - * Since some buses perform a full \ - * 32bit fetch even for 16bit data \ - * we can't use SMC_inw() here. \ - * Back both source (on-chip) and \ - * destination pointers of 2 bytes. \ - * This is possible since the call to \ - * SMC_GET_PKT_HDR() already advanced \ - * the source pointer of 4 bytes, and \ - * the skb_reserve(skb, 2) advanced \ - * the destination pointer of 2 bytes. \ - */ \ - __ptr -= 2; \ - __len += 2; \ - SMC_SET_PTR(lp, \ - 2|PTR_READ|PTR_RCV|PTR_AUTOINC); \ - } \ - if (SMC_CAN_USE_DATACS && lp->datacs) \ - __ioaddr = lp->datacs; \ - __len += 2; \ - SMC_insl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \ - } else if (SMC_16BIT(lp)) \ - SMC_insw(ioaddr, DATA_REG(lp), p, (l) >> 1); \ - else if (SMC_8BIT(lp)) \ - SMC_insb(ioaddr, DATA_REG(lp), p, l); \ - } while (0) - -#endif /* _SMC91X_H_ */ diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c deleted file mode 100644 index 75c08a5..0000000 --- a/drivers/net/smsc911x.c +++ /dev/null @@ -1,2404 +0,0 @@ -/*************************************************************************** - * - * Copyright (C) 2004-2008 SMSC - * Copyright (C) 2005-2008 ARM - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - *************************************************************************** - * Rewritten, heavily based on smsc911x simple driver by SMSC. - * Partly uses io macros from smc91x.c by Nicolas Pitre - * - * Supported devices: - * LAN9115, LAN9116, LAN9117, LAN9118 - * LAN9215, LAN9216, LAN9217, LAN9218 - * LAN9210, LAN9211 - * LAN9220, LAN9221 - * - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "smsc911x.h" - -#define SMSC_CHIPNAME "smsc911x" -#define SMSC_MDIONAME "smsc911x-mdio" -#define SMSC_DRV_VERSION "2008-10-21" - -MODULE_LICENSE("GPL"); -MODULE_VERSION(SMSC_DRV_VERSION); -MODULE_ALIAS("platform:smsc911x"); - -#if USE_DEBUG > 0 -static int debug = 16; -#else -static int debug = 3; -#endif - -module_param(debug, int, 0); -MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); - -struct smsc911x_data; - -struct smsc911x_ops { - u32 (*reg_read)(struct smsc911x_data *pdata, u32 reg); - void (*reg_write)(struct smsc911x_data *pdata, u32 reg, u32 val); - void (*rx_readfifo)(struct smsc911x_data *pdata, - unsigned int *buf, unsigned int wordcount); - void (*tx_writefifo)(struct smsc911x_data *pdata, - unsigned int *buf, unsigned int wordcount); -}; - -struct smsc911x_data { - void __iomem *ioaddr; - - unsigned int idrev; - - /* used to decide which workarounds apply */ - unsigned int generation; - - /* device configuration (copied from platform_data during probe) */ - struct smsc911x_platform_config config; - - /* This needs to be acquired before calling any of below: - * smsc911x_mac_read(), smsc911x_mac_write() - */ - spinlock_t mac_lock; - - /* spinlock to ensure register accesses are serialised */ - spinlock_t dev_lock; - - struct phy_device *phy_dev; - struct mii_bus *mii_bus; - int phy_irq[PHY_MAX_ADDR]; - unsigned int using_extphy; - int last_duplex; - int last_carrier; - - u32 msg_enable; - unsigned int gpio_setting; - unsigned int gpio_orig_setting; - struct net_device *dev; - struct napi_struct napi; - - unsigned int software_irq_signal; - -#ifdef USE_PHY_WORK_AROUND -#define MIN_PACKET_SIZE (64) - char loopback_tx_pkt[MIN_PACKET_SIZE]; - char loopback_rx_pkt[MIN_PACKET_SIZE]; - unsigned int resetcount; -#endif - - /* Members for Multicast filter workaround */ - unsigned int multicast_update_pending; - unsigned int set_bits_mask; - unsigned int clear_bits_mask; - unsigned int hashhi; - unsigned int hashlo; - - /* register access functions */ - const struct smsc911x_ops *ops; -}; - -/* Easy access to information */ -#define __smsc_shift(pdata, reg) ((reg) << ((pdata)->config.shift)) - -static inline u32 __smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg) -{ - if (pdata->config.flags & SMSC911X_USE_32BIT) - return readl(pdata->ioaddr + reg); - - if (pdata->config.flags & SMSC911X_USE_16BIT) - return ((readw(pdata->ioaddr + reg) & 0xFFFF) | - ((readw(pdata->ioaddr + reg + 2) & 0xFFFF) << 16)); - - BUG(); - return 0; -} - -static inline u32 -__smsc911x_reg_read_shift(struct smsc911x_data *pdata, u32 reg) -{ - if (pdata->config.flags & SMSC911X_USE_32BIT) - return readl(pdata->ioaddr + __smsc_shift(pdata, reg)); - - if (pdata->config.flags & SMSC911X_USE_16BIT) - return (readw(pdata->ioaddr + - __smsc_shift(pdata, reg)) & 0xFFFF) | - ((readw(pdata->ioaddr + - __smsc_shift(pdata, reg + 2)) & 0xFFFF) << 16); - - BUG(); - return 0; -} - -static inline u32 smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg) -{ - u32 data; - unsigned long flags; - - spin_lock_irqsave(&pdata->dev_lock, flags); - data = pdata->ops->reg_read(pdata, reg); - spin_unlock_irqrestore(&pdata->dev_lock, flags); - - return data; -} - -static inline void __smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg, - u32 val) -{ - if (pdata->config.flags & SMSC911X_USE_32BIT) { - writel(val, pdata->ioaddr + reg); - return; - } - - if (pdata->config.flags & SMSC911X_USE_16BIT) { - writew(val & 0xFFFF, pdata->ioaddr + reg); - writew((val >> 16) & 0xFFFF, pdata->ioaddr + reg + 2); - return; - } - - BUG(); -} - -static inline void -__smsc911x_reg_write_shift(struct smsc911x_data *pdata, u32 reg, u32 val) -{ - if (pdata->config.flags & SMSC911X_USE_32BIT) { - writel(val, pdata->ioaddr + __smsc_shift(pdata, reg)); - return; - } - - if (pdata->config.flags & SMSC911X_USE_16BIT) { - writew(val & 0xFFFF, - pdata->ioaddr + __smsc_shift(pdata, reg)); - writew((val >> 16) & 0xFFFF, - pdata->ioaddr + __smsc_shift(pdata, reg + 2)); - return; - } - - BUG(); -} - -static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg, - u32 val) -{ - unsigned long flags; - - spin_lock_irqsave(&pdata->dev_lock, flags); - pdata->ops->reg_write(pdata, reg, val); - spin_unlock_irqrestore(&pdata->dev_lock, flags); -} - -/* Writes a packet to the TX_DATA_FIFO */ -static inline void -smsc911x_tx_writefifo(struct smsc911x_data *pdata, unsigned int *buf, - unsigned int wordcount) -{ - unsigned long flags; - - spin_lock_irqsave(&pdata->dev_lock, flags); - - if (pdata->config.flags & SMSC911X_SWAP_FIFO) { - while (wordcount--) - __smsc911x_reg_write(pdata, TX_DATA_FIFO, - swab32(*buf++)); - goto out; - } - - if (pdata->config.flags & SMSC911X_USE_32BIT) { - writesl(pdata->ioaddr + TX_DATA_FIFO, buf, wordcount); - goto out; - } - - if (pdata->config.flags & SMSC911X_USE_16BIT) { - while (wordcount--) - __smsc911x_reg_write(pdata, TX_DATA_FIFO, *buf++); - goto out; - } - - BUG(); -out: - spin_unlock_irqrestore(&pdata->dev_lock, flags); -} - -/* Writes a packet to the TX_DATA_FIFO - shifted version */ -static inline void -smsc911x_tx_writefifo_shift(struct smsc911x_data *pdata, unsigned int *buf, - unsigned int wordcount) -{ - unsigned long flags; - - spin_lock_irqsave(&pdata->dev_lock, flags); - - if (pdata->config.flags & SMSC911X_SWAP_FIFO) { - while (wordcount--) - __smsc911x_reg_write_shift(pdata, TX_DATA_FIFO, - swab32(*buf++)); - goto out; - } - - if (pdata->config.flags & SMSC911X_USE_32BIT) { - writesl(pdata->ioaddr + __smsc_shift(pdata, - TX_DATA_FIFO), buf, wordcount); - goto out; - } - - if (pdata->config.flags & SMSC911X_USE_16BIT) { - while (wordcount--) - __smsc911x_reg_write_shift(pdata, - TX_DATA_FIFO, *buf++); - goto out; - } - - BUG(); -out: - spin_unlock_irqrestore(&pdata->dev_lock, flags); -} - -/* Reads a packet out of the RX_DATA_FIFO */ -static inline void -smsc911x_rx_readfifo(struct smsc911x_data *pdata, unsigned int *buf, - unsigned int wordcount) -{ - unsigned long flags; - - spin_lock_irqsave(&pdata->dev_lock, flags); - - if (pdata->config.flags & SMSC911X_SWAP_FIFO) { - while (wordcount--) - *buf++ = swab32(__smsc911x_reg_read(pdata, - RX_DATA_FIFO)); - goto out; - } - - if (pdata->config.flags & SMSC911X_USE_32BIT) { - readsl(pdata->ioaddr + RX_DATA_FIFO, buf, wordcount); - goto out; - } - - if (pdata->config.flags & SMSC911X_USE_16BIT) { - while (wordcount--) - *buf++ = __smsc911x_reg_read(pdata, RX_DATA_FIFO); - goto out; - } - - BUG(); -out: - spin_unlock_irqrestore(&pdata->dev_lock, flags); -} - -/* Reads a packet out of the RX_DATA_FIFO - shifted version */ -static inline void -smsc911x_rx_readfifo_shift(struct smsc911x_data *pdata, unsigned int *buf, - unsigned int wordcount) -{ - unsigned long flags; - - spin_lock_irqsave(&pdata->dev_lock, flags); - - if (pdata->config.flags & SMSC911X_SWAP_FIFO) { - while (wordcount--) - *buf++ = swab32(__smsc911x_reg_read_shift(pdata, - RX_DATA_FIFO)); - goto out; - } - - if (pdata->config.flags & SMSC911X_USE_32BIT) { - readsl(pdata->ioaddr + __smsc_shift(pdata, - RX_DATA_FIFO), buf, wordcount); - goto out; - } - - if (pdata->config.flags & SMSC911X_USE_16BIT) { - while (wordcount--) - *buf++ = __smsc911x_reg_read_shift(pdata, - RX_DATA_FIFO); - goto out; - } - - BUG(); -out: - spin_unlock_irqrestore(&pdata->dev_lock, flags); -} - -/* waits for MAC not busy, with timeout. Only called by smsc911x_mac_read - * and smsc911x_mac_write, so assumes mac_lock is held */ -static int smsc911x_mac_complete(struct smsc911x_data *pdata) -{ - int i; - u32 val; - - SMSC_ASSERT_MAC_LOCK(pdata); - - for (i = 0; i < 40; i++) { - val = smsc911x_reg_read(pdata, MAC_CSR_CMD); - if (!(val & MAC_CSR_CMD_CSR_BUSY_)) - return 0; - } - SMSC_WARN(pdata, hw, "Timed out waiting for MAC not BUSY. " - "MAC_CSR_CMD: 0x%08X", val); - return -EIO; -} - -/* Fetches a MAC register value. Assumes mac_lock is acquired */ -static u32 smsc911x_mac_read(struct smsc911x_data *pdata, unsigned int offset) -{ - unsigned int temp; - - SMSC_ASSERT_MAC_LOCK(pdata); - - temp = smsc911x_reg_read(pdata, MAC_CSR_CMD); - if (unlikely(temp & MAC_CSR_CMD_CSR_BUSY_)) { - SMSC_WARN(pdata, hw, "MAC busy at entry"); - return 0xFFFFFFFF; - } - - /* Send the MAC cmd */ - smsc911x_reg_write(pdata, MAC_CSR_CMD, ((offset & 0xFF) | - MAC_CSR_CMD_CSR_BUSY_ | MAC_CSR_CMD_R_NOT_W_)); - - /* Workaround for hardware read-after-write restriction */ - temp = smsc911x_reg_read(pdata, BYTE_TEST); - - /* Wait for the read to complete */ - if (likely(smsc911x_mac_complete(pdata) == 0)) - return smsc911x_reg_read(pdata, MAC_CSR_DATA); - - SMSC_WARN(pdata, hw, "MAC busy after read"); - return 0xFFFFFFFF; -} - -/* Set a mac register, mac_lock must be acquired before calling */ -static void smsc911x_mac_write(struct smsc911x_data *pdata, - unsigned int offset, u32 val) -{ - unsigned int temp; - - SMSC_ASSERT_MAC_LOCK(pdata); - - temp = smsc911x_reg_read(pdata, MAC_CSR_CMD); - if (unlikely(temp & MAC_CSR_CMD_CSR_BUSY_)) { - SMSC_WARN(pdata, hw, - "smsc911x_mac_write failed, MAC busy at entry"); - return; - } - - /* Send data to write */ - smsc911x_reg_write(pdata, MAC_CSR_DATA, val); - - /* Write the actual data */ - smsc911x_reg_write(pdata, MAC_CSR_CMD, ((offset & 0xFF) | - MAC_CSR_CMD_CSR_BUSY_)); - - /* Workaround for hardware read-after-write restriction */ - temp = smsc911x_reg_read(pdata, BYTE_TEST); - - /* Wait for the write to complete */ - if (likely(smsc911x_mac_complete(pdata) == 0)) - return; - - SMSC_WARN(pdata, hw, "smsc911x_mac_write failed, MAC busy after write"); -} - -/* Get a phy register */ -static int smsc911x_mii_read(struct mii_bus *bus, int phyaddr, int regidx) -{ - struct smsc911x_data *pdata = (struct smsc911x_data *)bus->priv; - unsigned long flags; - unsigned int addr; - int i, reg; - - spin_lock_irqsave(&pdata->mac_lock, flags); - - /* Confirm MII not busy */ - if (unlikely(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) { - SMSC_WARN(pdata, hw, "MII is busy in smsc911x_mii_read???"); - reg = -EIO; - goto out; - } - - /* Set the address, index & direction (read from PHY) */ - addr = ((phyaddr & 0x1F) << 11) | ((regidx & 0x1F) << 6); - smsc911x_mac_write(pdata, MII_ACC, addr); - - /* Wait for read to complete w/ timeout */ - for (i = 0; i < 100; i++) - if (!(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) { - reg = smsc911x_mac_read(pdata, MII_DATA); - goto out; - } - - SMSC_WARN(pdata, hw, "Timed out waiting for MII read to finish"); - reg = -EIO; - -out: - spin_unlock_irqrestore(&pdata->mac_lock, flags); - return reg; -} - -/* Set a phy register */ -static int smsc911x_mii_write(struct mii_bus *bus, int phyaddr, int regidx, - u16 val) -{ - struct smsc911x_data *pdata = (struct smsc911x_data *)bus->priv; - unsigned long flags; - unsigned int addr; - int i, reg; - - spin_lock_irqsave(&pdata->mac_lock, flags); - - /* Confirm MII not busy */ - if (unlikely(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) { - SMSC_WARN(pdata, hw, "MII is busy in smsc911x_mii_write???"); - reg = -EIO; - goto out; - } - - /* Put the data to write in the MAC */ - smsc911x_mac_write(pdata, MII_DATA, val); - - /* Set the address, index & direction (write to PHY) */ - addr = ((phyaddr & 0x1F) << 11) | ((regidx & 0x1F) << 6) | - MII_ACC_MII_WRITE_; - smsc911x_mac_write(pdata, MII_ACC, addr); - - /* Wait for write to complete w/ timeout */ - for (i = 0; i < 100; i++) - if (!(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) { - reg = 0; - goto out; - } - - SMSC_WARN(pdata, hw, "Timed out waiting for MII write to finish"); - reg = -EIO; - -out: - spin_unlock_irqrestore(&pdata->mac_lock, flags); - return reg; -} - -/* Switch to external phy. Assumes tx and rx are stopped. */ -static void smsc911x_phy_enable_external(struct smsc911x_data *pdata) -{ - unsigned int hwcfg = smsc911x_reg_read(pdata, HW_CFG); - - /* Disable phy clocks to the MAC */ - hwcfg &= (~HW_CFG_PHY_CLK_SEL_); - hwcfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_; - smsc911x_reg_write(pdata, HW_CFG, hwcfg); - udelay(10); /* Enough time for clocks to stop */ - - /* Switch to external phy */ - hwcfg |= HW_CFG_EXT_PHY_EN_; - smsc911x_reg_write(pdata, HW_CFG, hwcfg); - - /* Enable phy clocks to the MAC */ - hwcfg &= (~HW_CFG_PHY_CLK_SEL_); - hwcfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_; - smsc911x_reg_write(pdata, HW_CFG, hwcfg); - udelay(10); /* Enough time for clocks to restart */ - - hwcfg |= HW_CFG_SMI_SEL_; - smsc911x_reg_write(pdata, HW_CFG, hwcfg); -} - -/* Autodetects and enables external phy if present on supported chips. - * autodetection can be overridden by specifying SMSC911X_FORCE_INTERNAL_PHY - * or SMSC911X_FORCE_EXTERNAL_PHY in the platform_data flags. */ -static void smsc911x_phy_initialise_external(struct smsc911x_data *pdata) -{ - unsigned int hwcfg = smsc911x_reg_read(pdata, HW_CFG); - - if (pdata->config.flags & SMSC911X_FORCE_INTERNAL_PHY) { - SMSC_TRACE(pdata, hw, "Forcing internal PHY"); - pdata->using_extphy = 0; - } else if (pdata->config.flags & SMSC911X_FORCE_EXTERNAL_PHY) { - SMSC_TRACE(pdata, hw, "Forcing external PHY"); - smsc911x_phy_enable_external(pdata); - pdata->using_extphy = 1; - } else if (hwcfg & HW_CFG_EXT_PHY_DET_) { - SMSC_TRACE(pdata, hw, - "HW_CFG EXT_PHY_DET set, using external PHY"); - smsc911x_phy_enable_external(pdata); - pdata->using_extphy = 1; - } else { - SMSC_TRACE(pdata, hw, - "HW_CFG EXT_PHY_DET clear, using internal PHY"); - pdata->using_extphy = 0; - } -} - -/* Fetches a tx status out of the status fifo */ -static unsigned int smsc911x_tx_get_txstatus(struct smsc911x_data *pdata) -{ - unsigned int result = - smsc911x_reg_read(pdata, TX_FIFO_INF) & TX_FIFO_INF_TSUSED_; - - if (result != 0) - result = smsc911x_reg_read(pdata, TX_STATUS_FIFO); - - return result; -} - -/* Fetches the next rx status */ -static unsigned int smsc911x_rx_get_rxstatus(struct smsc911x_data *pdata) -{ - unsigned int result = - smsc911x_reg_read(pdata, RX_FIFO_INF) & RX_FIFO_INF_RXSUSED_; - - if (result != 0) - result = smsc911x_reg_read(pdata, RX_STATUS_FIFO); - - return result; -} - -#ifdef USE_PHY_WORK_AROUND -static int smsc911x_phy_check_loopbackpkt(struct smsc911x_data *pdata) -{ - unsigned int tries; - u32 wrsz; - u32 rdsz; - ulong bufp; - - for (tries = 0; tries < 10; tries++) { - unsigned int txcmd_a; - unsigned int txcmd_b; - unsigned int status; - unsigned int pktlength; - unsigned int i; - - /* Zero-out rx packet memory */ - memset(pdata->loopback_rx_pkt, 0, MIN_PACKET_SIZE); - - /* Write tx packet to 118 */ - txcmd_a = (u32)((ulong)pdata->loopback_tx_pkt & 0x03) << 16; - txcmd_a |= TX_CMD_A_FIRST_SEG_ | TX_CMD_A_LAST_SEG_; - txcmd_a |= MIN_PACKET_SIZE; - - txcmd_b = MIN_PACKET_SIZE << 16 | MIN_PACKET_SIZE; - - smsc911x_reg_write(pdata, TX_DATA_FIFO, txcmd_a); - smsc911x_reg_write(pdata, TX_DATA_FIFO, txcmd_b); - - bufp = (ulong)pdata->loopback_tx_pkt & (~0x3); - wrsz = MIN_PACKET_SIZE + 3; - wrsz += (u32)((ulong)pdata->loopback_tx_pkt & 0x3); - wrsz >>= 2; - - pdata->ops->tx_writefifo(pdata, (unsigned int *)bufp, wrsz); - - /* Wait till transmit is done */ - i = 60; - do { - udelay(5); - status = smsc911x_tx_get_txstatus(pdata); - } while ((i--) && (!status)); - - if (!status) { - SMSC_WARN(pdata, hw, - "Failed to transmit during loopback test"); - continue; - } - if (status & TX_STS_ES_) { - SMSC_WARN(pdata, hw, - "Transmit encountered errors during loopback test"); - continue; - } - - /* Wait till receive is done */ - i = 60; - do { - udelay(5); - status = smsc911x_rx_get_rxstatus(pdata); - } while ((i--) && (!status)); - - if (!status) { - SMSC_WARN(pdata, hw, - "Failed to receive during loopback test"); - continue; - } - if (status & RX_STS_ES_) { - SMSC_WARN(pdata, hw, - "Receive encountered errors during loopback test"); - continue; - } - - pktlength = ((status & 0x3FFF0000UL) >> 16); - bufp = (ulong)pdata->loopback_rx_pkt; - rdsz = pktlength + 3; - rdsz += (u32)((ulong)pdata->loopback_rx_pkt & 0x3); - rdsz >>= 2; - - pdata->ops->rx_readfifo(pdata, (unsigned int *)bufp, rdsz); - - if (pktlength != (MIN_PACKET_SIZE + 4)) { - SMSC_WARN(pdata, hw, "Unexpected packet size " - "during loop back test, size=%d, will retry", - pktlength); - } else { - unsigned int j; - int mismatch = 0; - for (j = 0; j < MIN_PACKET_SIZE; j++) { - if (pdata->loopback_tx_pkt[j] - != pdata->loopback_rx_pkt[j]) { - mismatch = 1; - break; - } - } - if (!mismatch) { - SMSC_TRACE(pdata, hw, "Successfully verified " - "loopback packet"); - return 0; - } else { - SMSC_WARN(pdata, hw, "Data mismatch " - "during loop back test, will retry"); - } - } - } - - return -EIO; -} - -static int smsc911x_phy_reset(struct smsc911x_data *pdata) -{ - struct phy_device *phy_dev = pdata->phy_dev; - unsigned int temp; - unsigned int i = 100000; - - BUG_ON(!phy_dev); - BUG_ON(!phy_dev->bus); - - SMSC_TRACE(pdata, hw, "Performing PHY BCR Reset"); - smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, BMCR_RESET); - do { - msleep(1); - temp = smsc911x_mii_read(phy_dev->bus, phy_dev->addr, - MII_BMCR); - } while ((i--) && (temp & BMCR_RESET)); - - if (temp & BMCR_RESET) { - SMSC_WARN(pdata, hw, "PHY reset failed to complete"); - return -EIO; - } - /* Extra delay required because the phy may not be completed with - * its reset when BMCR_RESET is cleared. Specs say 256 uS is - * enough delay but using 1ms here to be safe */ - msleep(1); - - return 0; -} - -static int smsc911x_phy_loopbacktest(struct net_device *dev) -{ - struct smsc911x_data *pdata = netdev_priv(dev); - struct phy_device *phy_dev = pdata->phy_dev; - int result = -EIO; - unsigned int i, val; - unsigned long flags; - - /* Initialise tx packet using broadcast destination address */ - memset(pdata->loopback_tx_pkt, 0xff, ETH_ALEN); - - /* Use incrementing source address */ - for (i = 6; i < 12; i++) - pdata->loopback_tx_pkt[i] = (char)i; - - /* Set length type field */ - pdata->loopback_tx_pkt[12] = 0x00; - pdata->loopback_tx_pkt[13] = 0x00; - - for (i = 14; i < MIN_PACKET_SIZE; i++) - pdata->loopback_tx_pkt[i] = (char)i; - - val = smsc911x_reg_read(pdata, HW_CFG); - val &= HW_CFG_TX_FIF_SZ_; - val |= HW_CFG_SF_; - smsc911x_reg_write(pdata, HW_CFG, val); - - smsc911x_reg_write(pdata, TX_CFG, TX_CFG_TX_ON_); - smsc911x_reg_write(pdata, RX_CFG, - (u32)((ulong)pdata->loopback_rx_pkt & 0x03) << 8); - - for (i = 0; i < 10; i++) { - /* Set PHY to 10/FD, no ANEG, and loopback mode */ - smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, - BMCR_LOOPBACK | BMCR_FULLDPLX); - - /* Enable MAC tx/rx, FD */ - spin_lock_irqsave(&pdata->mac_lock, flags); - smsc911x_mac_write(pdata, MAC_CR, MAC_CR_FDPX_ - | MAC_CR_TXEN_ | MAC_CR_RXEN_); - spin_unlock_irqrestore(&pdata->mac_lock, flags); - - if (smsc911x_phy_check_loopbackpkt(pdata) == 0) { - result = 0; - break; - } - pdata->resetcount++; - - /* Disable MAC rx */ - spin_lock_irqsave(&pdata->mac_lock, flags); - smsc911x_mac_write(pdata, MAC_CR, 0); - spin_unlock_irqrestore(&pdata->mac_lock, flags); - - smsc911x_phy_reset(pdata); - } - - /* Disable MAC */ - spin_lock_irqsave(&pdata->mac_lock, flags); - smsc911x_mac_write(pdata, MAC_CR, 0); - spin_unlock_irqrestore(&pdata->mac_lock, flags); - - /* Cancel PHY loopback mode */ - smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, 0); - - smsc911x_reg_write(pdata, TX_CFG, 0); - smsc911x_reg_write(pdata, RX_CFG, 0); - - return result; -} -#endif /* USE_PHY_WORK_AROUND */ - -static void smsc911x_phy_update_flowcontrol(struct smsc911x_data *pdata) -{ - struct phy_device *phy_dev = pdata->phy_dev; - u32 afc = smsc911x_reg_read(pdata, AFC_CFG); - u32 flow; - unsigned long flags; - - if (phy_dev->duplex == DUPLEX_FULL) { - u16 lcladv = phy_read(phy_dev, MII_ADVERTISE); - u16 rmtadv = phy_read(phy_dev, MII_LPA); - u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); - - if (cap & FLOW_CTRL_RX) - flow = 0xFFFF0002; - else - flow = 0; - - if (cap & FLOW_CTRL_TX) - afc |= 0xF; - else - afc &= ~0xF; - - SMSC_TRACE(pdata, hw, "rx pause %s, tx pause %s", - (cap & FLOW_CTRL_RX ? "enabled" : "disabled"), - (cap & FLOW_CTRL_TX ? "enabled" : "disabled")); - } else { - SMSC_TRACE(pdata, hw, "half duplex"); - flow = 0; - afc |= 0xF; - } - - spin_lock_irqsave(&pdata->mac_lock, flags); - smsc911x_mac_write(pdata, FLOW, flow); - spin_unlock_irqrestore(&pdata->mac_lock, flags); - - smsc911x_reg_write(pdata, AFC_CFG, afc); -} - -/* Update link mode if anything has changed. Called periodically when the - * PHY is in polling mode, even if nothing has changed. */ -static void smsc911x_phy_adjust_link(struct net_device *dev) -{ - struct smsc911x_data *pdata = netdev_priv(dev); - struct phy_device *phy_dev = pdata->phy_dev; - unsigned long flags; - int carrier; - - if (phy_dev->duplex != pdata->last_duplex) { - unsigned int mac_cr; - SMSC_TRACE(pdata, hw, "duplex state has changed"); - - spin_lock_irqsave(&pdata->mac_lock, flags); - mac_cr = smsc911x_mac_read(pdata, MAC_CR); - if (phy_dev->duplex) { - SMSC_TRACE(pdata, hw, - "configuring for full duplex mode"); - mac_cr |= MAC_CR_FDPX_; - } else { - SMSC_TRACE(pdata, hw, - "configuring for half duplex mode"); - mac_cr &= ~MAC_CR_FDPX_; - } - smsc911x_mac_write(pdata, MAC_CR, mac_cr); - spin_unlock_irqrestore(&pdata->mac_lock, flags); - - smsc911x_phy_update_flowcontrol(pdata); - pdata->last_duplex = phy_dev->duplex; - } - - carrier = netif_carrier_ok(dev); - if (carrier != pdata->last_carrier) { - SMSC_TRACE(pdata, hw, "carrier state has changed"); - if (carrier) { - SMSC_TRACE(pdata, hw, "configuring for carrier OK"); - if ((pdata->gpio_orig_setting & GPIO_CFG_LED1_EN_) && - (!pdata->using_extphy)) { - /* Restore original GPIO configuration */ - pdata->gpio_setting = pdata->gpio_orig_setting; - smsc911x_reg_write(pdata, GPIO_CFG, - pdata->gpio_setting); - } - } else { - SMSC_TRACE(pdata, hw, "configuring for no carrier"); - /* Check global setting that LED1 - * usage is 10/100 indicator */ - pdata->gpio_setting = smsc911x_reg_read(pdata, - GPIO_CFG); - if ((pdata->gpio_setting & GPIO_CFG_LED1_EN_) && - (!pdata->using_extphy)) { - /* Force 10/100 LED off, after saving - * original GPIO configuration */ - pdata->gpio_orig_setting = pdata->gpio_setting; - - pdata->gpio_setting &= ~GPIO_CFG_LED1_EN_; - pdata->gpio_setting |= (GPIO_CFG_GPIOBUF0_ - | GPIO_CFG_GPIODIR0_ - | GPIO_CFG_GPIOD0_); - smsc911x_reg_write(pdata, GPIO_CFG, - pdata->gpio_setting); - } - } - pdata->last_carrier = carrier; - } -} - -static int smsc911x_mii_probe(struct net_device *dev) -{ - struct smsc911x_data *pdata = netdev_priv(dev); - struct phy_device *phydev = NULL; - int ret; - - /* find the first phy */ - phydev = phy_find_first(pdata->mii_bus); - if (!phydev) { - netdev_err(dev, "no PHY found\n"); - return -ENODEV; - } - - SMSC_TRACE(pdata, probe, "PHY: addr %d, phy_id 0x%08X", - phydev->addr, phydev->phy_id); - - ret = phy_connect_direct(dev, phydev, - &smsc911x_phy_adjust_link, 0, - pdata->config.phy_interface); - - if (ret) { - netdev_err(dev, "Could not attach to PHY\n"); - return ret; - } - - netdev_info(dev, - "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - phydev->drv->name, dev_name(&phydev->dev), phydev->irq); - - /* mask with MAC supported features */ - phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | - SUPPORTED_Asym_Pause); - phydev->advertising = phydev->supported; - - pdata->phy_dev = phydev; - pdata->last_duplex = -1; - pdata->last_carrier = -1; - -#ifdef USE_PHY_WORK_AROUND - if (smsc911x_phy_loopbacktest(dev) < 0) { - SMSC_WARN(pdata, hw, "Failed Loop Back Test"); - return -ENODEV; - } - SMSC_TRACE(pdata, hw, "Passed Loop Back Test"); -#endif /* USE_PHY_WORK_AROUND */ - - SMSC_TRACE(pdata, hw, "phy initialised successfully"); - return 0; -} - -static int __devinit smsc911x_mii_init(struct platform_device *pdev, - struct net_device *dev) -{ - struct smsc911x_data *pdata = netdev_priv(dev); - int err = -ENXIO, i; - - pdata->mii_bus = mdiobus_alloc(); - if (!pdata->mii_bus) { - err = -ENOMEM; - goto err_out_1; - } - - pdata->mii_bus->name = SMSC_MDIONAME; - snprintf(pdata->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id); - pdata->mii_bus->priv = pdata; - pdata->mii_bus->read = smsc911x_mii_read; - pdata->mii_bus->write = smsc911x_mii_write; - pdata->mii_bus->irq = pdata->phy_irq; - for (i = 0; i < PHY_MAX_ADDR; ++i) - pdata->mii_bus->irq[i] = PHY_POLL; - - pdata->mii_bus->parent = &pdev->dev; - - switch (pdata->idrev & 0xFFFF0000) { - case 0x01170000: - case 0x01150000: - case 0x117A0000: - case 0x115A0000: - /* External PHY supported, try to autodetect */ - smsc911x_phy_initialise_external(pdata); - break; - default: - SMSC_TRACE(pdata, hw, "External PHY is not supported, " - "using internal PHY"); - pdata->using_extphy = 0; - break; - } - - if (!pdata->using_extphy) { - /* Mask all PHYs except ID 1 (internal) */ - pdata->mii_bus->phy_mask = ~(1 << 1); - } - - if (mdiobus_register(pdata->mii_bus)) { - SMSC_WARN(pdata, probe, "Error registering mii bus"); - goto err_out_free_bus_2; - } - - if (smsc911x_mii_probe(dev) < 0) { - SMSC_WARN(pdata, probe, "Error registering mii bus"); - goto err_out_unregister_bus_3; - } - - return 0; - -err_out_unregister_bus_3: - mdiobus_unregister(pdata->mii_bus); -err_out_free_bus_2: - mdiobus_free(pdata->mii_bus); -err_out_1: - return err; -} - -/* Gets the number of tx statuses in the fifo */ -static unsigned int smsc911x_tx_get_txstatcount(struct smsc911x_data *pdata) -{ - return (smsc911x_reg_read(pdata, TX_FIFO_INF) - & TX_FIFO_INF_TSUSED_) >> 16; -} - -/* Reads tx statuses and increments counters where necessary */ -static void smsc911x_tx_update_txcounters(struct net_device *dev) -{ - struct smsc911x_data *pdata = netdev_priv(dev); - unsigned int tx_stat; - - while ((tx_stat = smsc911x_tx_get_txstatus(pdata)) != 0) { - if (unlikely(tx_stat & 0x80000000)) { - /* In this driver the packet tag is used as the packet - * length. Since a packet length can never reach the - * size of 0x8000, this bit is reserved. It is worth - * noting that the "reserved bit" in the warning above - * does not reference a hardware defined reserved bit - * but rather a driver defined one. - */ - SMSC_WARN(pdata, hw, "Packet tag reserved bit is high"); - } else { - if (unlikely(tx_stat & TX_STS_ES_)) { - dev->stats.tx_errors++; - } else { - dev->stats.tx_packets++; - dev->stats.tx_bytes += (tx_stat >> 16); - } - if (unlikely(tx_stat & TX_STS_EXCESS_COL_)) { - dev->stats.collisions += 16; - dev->stats.tx_aborted_errors += 1; - } else { - dev->stats.collisions += - ((tx_stat >> 3) & 0xF); - } - if (unlikely(tx_stat & TX_STS_LOST_CARRIER_)) - dev->stats.tx_carrier_errors += 1; - if (unlikely(tx_stat & TX_STS_LATE_COL_)) { - dev->stats.collisions++; - dev->stats.tx_aborted_errors++; - } - } - } -} - -/* Increments the Rx error counters */ -static void -smsc911x_rx_counterrors(struct net_device *dev, unsigned int rxstat) -{ - int crc_err = 0; - - if (unlikely(rxstat & RX_STS_ES_)) { - dev->stats.rx_errors++; - if (unlikely(rxstat & RX_STS_CRC_ERR_)) { - dev->stats.rx_crc_errors++; - crc_err = 1; - } - } - if (likely(!crc_err)) { - if (unlikely((rxstat & RX_STS_FRAME_TYPE_) && - (rxstat & RX_STS_LENGTH_ERR_))) - dev->stats.rx_length_errors++; - if (rxstat & RX_STS_MCAST_) - dev->stats.multicast++; - } -} - -/* Quickly dumps bad packets */ -static void -smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes) -{ - unsigned int pktwords = (pktbytes + NET_IP_ALIGN + 3) >> 2; - - if (likely(pktwords >= 4)) { - unsigned int timeout = 500; - unsigned int val; - smsc911x_reg_write(pdata, RX_DP_CTRL, RX_DP_CTRL_RX_FFWD_); - do { - udelay(1); - val = smsc911x_reg_read(pdata, RX_DP_CTRL); - } while ((val & RX_DP_CTRL_RX_FFWD_) && --timeout); - - if (unlikely(timeout == 0)) - SMSC_WARN(pdata, hw, "Timed out waiting for " - "RX FFWD to finish, RX_DP_CTRL: 0x%08X", val); - } else { - unsigned int temp; - while (pktwords--) - temp = smsc911x_reg_read(pdata, RX_DATA_FIFO); - } -} - -/* NAPI poll function */ -static int smsc911x_poll(struct napi_struct *napi, int budget) -{ - struct smsc911x_data *pdata = - container_of(napi, struct smsc911x_data, napi); - struct net_device *dev = pdata->dev; - int npackets = 0; - - while (npackets < budget) { - unsigned int pktlength; - unsigned int pktwords; - struct sk_buff *skb; - unsigned int rxstat = smsc911x_rx_get_rxstatus(pdata); - - if (!rxstat) { - unsigned int temp; - /* We processed all packets available. Tell NAPI it can - * stop polling then re-enable rx interrupts */ - smsc911x_reg_write(pdata, INT_STS, INT_STS_RSFL_); - napi_complete(napi); - temp = smsc911x_reg_read(pdata, INT_EN); - temp |= INT_EN_RSFL_EN_; - smsc911x_reg_write(pdata, INT_EN, temp); - break; - } - - /* Count packet for NAPI scheduling, even if it has an error. - * Error packets still require cycles to discard */ - npackets++; - - pktlength = ((rxstat & 0x3FFF0000) >> 16); - pktwords = (pktlength + NET_IP_ALIGN + 3) >> 2; - smsc911x_rx_counterrors(dev, rxstat); - - if (unlikely(rxstat & RX_STS_ES_)) { - SMSC_WARN(pdata, rx_err, - "Discarding packet with error bit set"); - /* Packet has an error, discard it and continue with - * the next */ - smsc911x_rx_fastforward(pdata, pktwords); - dev->stats.rx_dropped++; - continue; - } - - skb = netdev_alloc_skb(dev, pktlength + NET_IP_ALIGN); - if (unlikely(!skb)) { - SMSC_WARN(pdata, rx_err, - "Unable to allocate skb for rx packet"); - /* Drop the packet and stop this polling iteration */ - smsc911x_rx_fastforward(pdata, pktwords); - dev->stats.rx_dropped++; - break; - } - - skb->data = skb->head; - skb_reset_tail_pointer(skb); - - /* Align IP on 16B boundary */ - skb_reserve(skb, NET_IP_ALIGN); - skb_put(skb, pktlength - 4); - pdata->ops->rx_readfifo(pdata, - (unsigned int *)skb->head, pktwords); - skb->protocol = eth_type_trans(skb, dev); - skb_checksum_none_assert(skb); - netif_receive_skb(skb); - - /* Update counters */ - dev->stats.rx_packets++; - dev->stats.rx_bytes += (pktlength - 4); - } - - /* Return total received packets */ - return npackets; -} - -/* Returns hash bit number for given MAC address - * Example: - * 01 00 5E 00 00 01 -> returns bit number 31 */ -static unsigned int smsc911x_hash(char addr[ETH_ALEN]) -{ - return (ether_crc(ETH_ALEN, addr) >> 26) & 0x3f; -} - -static void smsc911x_rx_multicast_update(struct smsc911x_data *pdata) -{ - /* Performs the multicast & mac_cr update. This is called when - * safe on the current hardware, and with the mac_lock held */ - unsigned int mac_cr; - - SMSC_ASSERT_MAC_LOCK(pdata); - - mac_cr = smsc911x_mac_read(pdata, MAC_CR); - mac_cr |= pdata->set_bits_mask; - mac_cr &= ~(pdata->clear_bits_mask); - smsc911x_mac_write(pdata, MAC_CR, mac_cr); - smsc911x_mac_write(pdata, HASHH, pdata->hashhi); - smsc911x_mac_write(pdata, HASHL, pdata->hashlo); - SMSC_TRACE(pdata, hw, "maccr 0x%08X, HASHH 0x%08X, HASHL 0x%08X", - mac_cr, pdata->hashhi, pdata->hashlo); -} - -static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata) -{ - unsigned int mac_cr; - - /* This function is only called for older LAN911x devices - * (revA or revB), where MAC_CR, HASHH and HASHL should not - * be modified during Rx - newer devices immediately update the - * registers. - * - * This is called from interrupt context */ - - spin_lock(&pdata->mac_lock); - - /* Check Rx has stopped */ - if (smsc911x_mac_read(pdata, MAC_CR) & MAC_CR_RXEN_) - SMSC_WARN(pdata, drv, "Rx not stopped"); - - /* Perform the update - safe to do now Rx has stopped */ - smsc911x_rx_multicast_update(pdata); - - /* Re-enable Rx */ - mac_cr = smsc911x_mac_read(pdata, MAC_CR); - mac_cr |= MAC_CR_RXEN_; - smsc911x_mac_write(pdata, MAC_CR, mac_cr); - - pdata->multicast_update_pending = 0; - - spin_unlock(&pdata->mac_lock); -} - -static int smsc911x_soft_reset(struct smsc911x_data *pdata) -{ - unsigned int timeout; - unsigned int temp; - - /* Reset the LAN911x */ - smsc911x_reg_write(pdata, HW_CFG, HW_CFG_SRST_); - timeout = 10; - do { - udelay(10); - temp = smsc911x_reg_read(pdata, HW_CFG); - } while ((--timeout) && (temp & HW_CFG_SRST_)); - - if (unlikely(temp & HW_CFG_SRST_)) { - SMSC_WARN(pdata, drv, "Failed to complete reset"); - return -EIO; - } - return 0; -} - -/* Sets the device MAC address to dev_addr, called with mac_lock held */ -static void -smsc911x_set_hw_mac_address(struct smsc911x_data *pdata, u8 dev_addr[6]) -{ - u32 mac_high16 = (dev_addr[5] << 8) | dev_addr[4]; - u32 mac_low32 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | - (dev_addr[1] << 8) | dev_addr[0]; - - SMSC_ASSERT_MAC_LOCK(pdata); - - smsc911x_mac_write(pdata, ADDRH, mac_high16); - smsc911x_mac_write(pdata, ADDRL, mac_low32); -} - -static int smsc911x_open(struct net_device *dev) -{ - struct smsc911x_data *pdata = netdev_priv(dev); - unsigned int timeout; - unsigned int temp; - unsigned int intcfg; - - /* if the phy is not yet registered, retry later*/ - if (!pdata->phy_dev) { - SMSC_WARN(pdata, hw, "phy_dev is NULL"); - return -EAGAIN; - } - - if (!is_valid_ether_addr(dev->dev_addr)) { - SMSC_WARN(pdata, hw, "dev_addr is not a valid MAC address"); - return -EADDRNOTAVAIL; - } - - /* Reset the LAN911x */ - if (smsc911x_soft_reset(pdata)) { - SMSC_WARN(pdata, hw, "soft reset failed"); - return -EIO; - } - - smsc911x_reg_write(pdata, HW_CFG, 0x00050000); - smsc911x_reg_write(pdata, AFC_CFG, 0x006E3740); - - /* Increase the legal frame size of VLAN tagged frames to 1522 bytes */ - spin_lock_irq(&pdata->mac_lock); - smsc911x_mac_write(pdata, VLAN1, ETH_P_8021Q); - spin_unlock_irq(&pdata->mac_lock); - - /* Make sure EEPROM has finished loading before setting GPIO_CFG */ - timeout = 50; - while ((smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) && - --timeout) { - udelay(10); - } - - if (unlikely(timeout == 0)) - SMSC_WARN(pdata, ifup, - "Timed out waiting for EEPROM busy bit to clear"); - - smsc911x_reg_write(pdata, GPIO_CFG, 0x70070000); - - /* The soft reset above cleared the device's MAC address, - * restore it from local copy (set in probe) */ - spin_lock_irq(&pdata->mac_lock); - smsc911x_set_hw_mac_address(pdata, dev->dev_addr); - spin_unlock_irq(&pdata->mac_lock); - - /* Initialise irqs, but leave all sources disabled */ - smsc911x_reg_write(pdata, INT_EN, 0); - smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF); - - /* Set interrupt deassertion to 100uS */ - intcfg = ((10 << 24) | INT_CFG_IRQ_EN_); - - if (pdata->config.irq_polarity) { - SMSC_TRACE(pdata, ifup, "irq polarity: active high"); - intcfg |= INT_CFG_IRQ_POL_; - } else { - SMSC_TRACE(pdata, ifup, "irq polarity: active low"); - } - - if (pdata->config.irq_type) { - SMSC_TRACE(pdata, ifup, "irq type: push-pull"); - intcfg |= INT_CFG_IRQ_TYPE_; - } else { - SMSC_TRACE(pdata, ifup, "irq type: open drain"); - } - - smsc911x_reg_write(pdata, INT_CFG, intcfg); - - SMSC_TRACE(pdata, ifup, "Testing irq handler using IRQ %d", dev->irq); - pdata->software_irq_signal = 0; - smp_wmb(); - - temp = smsc911x_reg_read(pdata, INT_EN); - temp |= INT_EN_SW_INT_EN_; - smsc911x_reg_write(pdata, INT_EN, temp); - - timeout = 1000; - while (timeout--) { - if (pdata->software_irq_signal) - break; - msleep(1); - } - - if (!pdata->software_irq_signal) { - netdev_warn(dev, "ISR failed signaling test (IRQ %d)\n", - dev->irq); - return -ENODEV; - } - SMSC_TRACE(pdata, ifup, "IRQ handler passed test using IRQ %d", - dev->irq); - - netdev_info(dev, "SMSC911x/921x identified at %#08lx, IRQ: %d\n", - (unsigned long)pdata->ioaddr, dev->irq); - - /* Reset the last known duplex and carrier */ - pdata->last_duplex = -1; - pdata->last_carrier = -1; - - /* Bring the PHY up */ - phy_start(pdata->phy_dev); - - temp = smsc911x_reg_read(pdata, HW_CFG); - /* Preserve TX FIFO size and external PHY configuration */ - temp &= (HW_CFG_TX_FIF_SZ_|0x00000FFF); - temp |= HW_CFG_SF_; - smsc911x_reg_write(pdata, HW_CFG, temp); - - temp = smsc911x_reg_read(pdata, FIFO_INT); - temp |= FIFO_INT_TX_AVAIL_LEVEL_; - temp &= ~(FIFO_INT_RX_STS_LEVEL_); - smsc911x_reg_write(pdata, FIFO_INT, temp); - - /* set RX Data offset to 2 bytes for alignment */ - smsc911x_reg_write(pdata, RX_CFG, (2 << 8)); - - /* enable NAPI polling before enabling RX interrupts */ - napi_enable(&pdata->napi); - - temp = smsc911x_reg_read(pdata, INT_EN); - temp |= (INT_EN_TDFA_EN_ | INT_EN_RSFL_EN_ | INT_EN_RXSTOP_INT_EN_); - smsc911x_reg_write(pdata, INT_EN, temp); - - spin_lock_irq(&pdata->mac_lock); - temp = smsc911x_mac_read(pdata, MAC_CR); - temp |= (MAC_CR_TXEN_ | MAC_CR_RXEN_ | MAC_CR_HBDIS_); - smsc911x_mac_write(pdata, MAC_CR, temp); - spin_unlock_irq(&pdata->mac_lock); - - smsc911x_reg_write(pdata, TX_CFG, TX_CFG_TX_ON_); - - netif_start_queue(dev); - return 0; -} - -/* Entry point for stopping the interface */ -static int smsc911x_stop(struct net_device *dev) -{ - struct smsc911x_data *pdata = netdev_priv(dev); - unsigned int temp; - - /* Disable all device interrupts */ - temp = smsc911x_reg_read(pdata, INT_CFG); - temp &= ~INT_CFG_IRQ_EN_; - smsc911x_reg_write(pdata, INT_CFG, temp); - - /* Stop Tx and Rx polling */ - netif_stop_queue(dev); - napi_disable(&pdata->napi); - - /* At this point all Rx and Tx activity is stopped */ - dev->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP); - smsc911x_tx_update_txcounters(dev); - - /* Bring the PHY down */ - if (pdata->phy_dev) - phy_stop(pdata->phy_dev); - - SMSC_TRACE(pdata, ifdown, "Interface stopped"); - return 0; -} - -/* Entry point for transmitting a packet */ -static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct smsc911x_data *pdata = netdev_priv(dev); - unsigned int freespace; - unsigned int tx_cmd_a; - unsigned int tx_cmd_b; - unsigned int temp; - u32 wrsz; - ulong bufp; - - freespace = smsc911x_reg_read(pdata, TX_FIFO_INF) & TX_FIFO_INF_TDFREE_; - - if (unlikely(freespace < TX_FIFO_LOW_THRESHOLD)) - SMSC_WARN(pdata, tx_err, - "Tx data fifo low, space available: %d", freespace); - - /* Word alignment adjustment */ - tx_cmd_a = (u32)((ulong)skb->data & 0x03) << 16; - tx_cmd_a |= TX_CMD_A_FIRST_SEG_ | TX_CMD_A_LAST_SEG_; - tx_cmd_a |= (unsigned int)skb->len; - - tx_cmd_b = ((unsigned int)skb->len) << 16; - tx_cmd_b |= (unsigned int)skb->len; - - smsc911x_reg_write(pdata, TX_DATA_FIFO, tx_cmd_a); - smsc911x_reg_write(pdata, TX_DATA_FIFO, tx_cmd_b); - - bufp = (ulong)skb->data & (~0x3); - wrsz = (u32)skb->len + 3; - wrsz += (u32)((ulong)skb->data & 0x3); - wrsz >>= 2; - - pdata->ops->tx_writefifo(pdata, (unsigned int *)bufp, wrsz); - freespace -= (skb->len + 32); - skb_tx_timestamp(skb); - dev_kfree_skb(skb); - - if (unlikely(smsc911x_tx_get_txstatcount(pdata) >= 30)) - smsc911x_tx_update_txcounters(dev); - - if (freespace < TX_FIFO_LOW_THRESHOLD) { - netif_stop_queue(dev); - temp = smsc911x_reg_read(pdata, FIFO_INT); - temp &= 0x00FFFFFF; - temp |= 0x32000000; - smsc911x_reg_write(pdata, FIFO_INT, temp); - } - - return NETDEV_TX_OK; -} - -/* Entry point for getting status counters */ -static struct net_device_stats *smsc911x_get_stats(struct net_device *dev) -{ - struct smsc911x_data *pdata = netdev_priv(dev); - smsc911x_tx_update_txcounters(dev); - dev->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP); - return &dev->stats; -} - -/* Entry point for setting addressing modes */ -static void smsc911x_set_multicast_list(struct net_device *dev) -{ - struct smsc911x_data *pdata = netdev_priv(dev); - unsigned long flags; - - if (dev->flags & IFF_PROMISC) { - /* Enabling promiscuous mode */ - pdata->set_bits_mask = MAC_CR_PRMS_; - pdata->clear_bits_mask = (MAC_CR_MCPAS_ | MAC_CR_HPFILT_); - pdata->hashhi = 0; - pdata->hashlo = 0; - } else if (dev->flags & IFF_ALLMULTI) { - /* Enabling all multicast mode */ - pdata->set_bits_mask = MAC_CR_MCPAS_; - pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_HPFILT_); - pdata->hashhi = 0; - pdata->hashlo = 0; - } else if (!netdev_mc_empty(dev)) { - /* Enabling specific multicast addresses */ - unsigned int hash_high = 0; - unsigned int hash_low = 0; - struct netdev_hw_addr *ha; - - pdata->set_bits_mask = MAC_CR_HPFILT_; - pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_MCPAS_); - - netdev_for_each_mc_addr(ha, dev) { - unsigned int bitnum = smsc911x_hash(ha->addr); - unsigned int mask = 0x01 << (bitnum & 0x1F); - - if (bitnum & 0x20) - hash_high |= mask; - else - hash_low |= mask; - } - - pdata->hashhi = hash_high; - pdata->hashlo = hash_low; - } else { - /* Enabling local MAC address only */ - pdata->set_bits_mask = 0; - pdata->clear_bits_mask = - (MAC_CR_PRMS_ | MAC_CR_MCPAS_ | MAC_CR_HPFILT_); - pdata->hashhi = 0; - pdata->hashlo = 0; - } - - spin_lock_irqsave(&pdata->mac_lock, flags); - - if (pdata->generation <= 1) { - /* Older hardware revision - cannot change these flags while - * receiving data */ - if (!pdata->multicast_update_pending) { - unsigned int temp; - SMSC_TRACE(pdata, hw, "scheduling mcast update"); - pdata->multicast_update_pending = 1; - - /* Request the hardware to stop, then perform the - * update when we get an RX_STOP interrupt */ - temp = smsc911x_mac_read(pdata, MAC_CR); - temp &= ~(MAC_CR_RXEN_); - smsc911x_mac_write(pdata, MAC_CR, temp); - } else { - /* There is another update pending, this should now - * use the newer values */ - } - } else { - /* Newer hardware revision - can write immediately */ - smsc911x_rx_multicast_update(pdata); - } - - spin_unlock_irqrestore(&pdata->mac_lock, flags); -} - -static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct smsc911x_data *pdata = netdev_priv(dev); - u32 intsts = smsc911x_reg_read(pdata, INT_STS); - u32 inten = smsc911x_reg_read(pdata, INT_EN); - int serviced = IRQ_NONE; - u32 temp; - - if (unlikely(intsts & inten & INT_STS_SW_INT_)) { - temp = smsc911x_reg_read(pdata, INT_EN); - temp &= (~INT_EN_SW_INT_EN_); - smsc911x_reg_write(pdata, INT_EN, temp); - smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_); - pdata->software_irq_signal = 1; - smp_wmb(); - serviced = IRQ_HANDLED; - } - - if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) { - /* Called when there is a multicast update scheduled and - * it is now safe to complete the update */ - SMSC_TRACE(pdata, intr, "RX Stop interrupt"); - smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_); - if (pdata->multicast_update_pending) - smsc911x_rx_multicast_update_workaround(pdata); - serviced = IRQ_HANDLED; - } - - if (intsts & inten & INT_STS_TDFA_) { - temp = smsc911x_reg_read(pdata, FIFO_INT); - temp |= FIFO_INT_TX_AVAIL_LEVEL_; - smsc911x_reg_write(pdata, FIFO_INT, temp); - smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_); - netif_wake_queue(dev); - serviced = IRQ_HANDLED; - } - - if (unlikely(intsts & inten & INT_STS_RXE_)) { - SMSC_TRACE(pdata, intr, "RX Error interrupt"); - smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_); - serviced = IRQ_HANDLED; - } - - if (likely(intsts & inten & INT_STS_RSFL_)) { - if (likely(napi_schedule_prep(&pdata->napi))) { - /* Disable Rx interrupts */ - temp = smsc911x_reg_read(pdata, INT_EN); - temp &= (~INT_EN_RSFL_EN_); - smsc911x_reg_write(pdata, INT_EN, temp); - /* Schedule a NAPI poll */ - __napi_schedule(&pdata->napi); - } else { - SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed"); - } - serviced = IRQ_HANDLED; - } - - return serviced; -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void smsc911x_poll_controller(struct net_device *dev) -{ - disable_irq(dev->irq); - smsc911x_irqhandler(0, dev); - enable_irq(dev->irq); -} -#endif /* CONFIG_NET_POLL_CONTROLLER */ - -static int smsc911x_set_mac_address(struct net_device *dev, void *p) -{ - struct smsc911x_data *pdata = netdev_priv(dev); - struct sockaddr *addr = p; - - /* On older hardware revisions we cannot change the mac address - * registers while receiving data. Newer devices can safely change - * this at any time. */ - if (pdata->generation <= 1 && netif_running(dev)) - return -EBUSY; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - - memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); - - spin_lock_irq(&pdata->mac_lock); - smsc911x_set_hw_mac_address(pdata, dev->dev_addr); - spin_unlock_irq(&pdata->mac_lock); - - netdev_info(dev, "MAC Address: %pM\n", dev->dev_addr); - - return 0; -} - -/* Standard ioctls for mii-tool */ -static int smsc911x_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - struct smsc911x_data *pdata = netdev_priv(dev); - - if (!netif_running(dev) || !pdata->phy_dev) - return -EINVAL; - - return phy_mii_ioctl(pdata->phy_dev, ifr, cmd); -} - -static int -smsc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct smsc911x_data *pdata = netdev_priv(dev); - - cmd->maxtxpkt = 1; - cmd->maxrxpkt = 1; - return phy_ethtool_gset(pdata->phy_dev, cmd); -} - -static int -smsc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct smsc911x_data *pdata = netdev_priv(dev); - - return phy_ethtool_sset(pdata->phy_dev, cmd); -} - -static void smsc911x_ethtool_getdrvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - strlcpy(info->driver, SMSC_CHIPNAME, sizeof(info->driver)); - strlcpy(info->version, SMSC_DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, dev_name(dev->dev.parent), - sizeof(info->bus_info)); -} - -static int smsc911x_ethtool_nwayreset(struct net_device *dev) -{ - struct smsc911x_data *pdata = netdev_priv(dev); - - return phy_start_aneg(pdata->phy_dev); -} - -static u32 smsc911x_ethtool_getmsglevel(struct net_device *dev) -{ - struct smsc911x_data *pdata = netdev_priv(dev); - return pdata->msg_enable; -} - -static void smsc911x_ethtool_setmsglevel(struct net_device *dev, u32 level) -{ - struct smsc911x_data *pdata = netdev_priv(dev); - pdata->msg_enable = level; -} - -static int smsc911x_ethtool_getregslen(struct net_device *dev) -{ - return (((E2P_DATA - ID_REV) / 4 + 1) + (WUCSR - MAC_CR) + 1 + 32) * - sizeof(u32); -} - -static void -smsc911x_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs, - void *buf) -{ - struct smsc911x_data *pdata = netdev_priv(dev); - struct phy_device *phy_dev = pdata->phy_dev; - unsigned long flags; - unsigned int i; - unsigned int j = 0; - u32 *data = buf; - - regs->version = pdata->idrev; - for (i = ID_REV; i <= E2P_DATA; i += (sizeof(u32))) - data[j++] = smsc911x_reg_read(pdata, i); - - for (i = MAC_CR; i <= WUCSR; i++) { - spin_lock_irqsave(&pdata->mac_lock, flags); - data[j++] = smsc911x_mac_read(pdata, i); - spin_unlock_irqrestore(&pdata->mac_lock, flags); - } - - for (i = 0; i <= 31; i++) - data[j++] = smsc911x_mii_read(phy_dev->bus, phy_dev->addr, i); -} - -static void smsc911x_eeprom_enable_access(struct smsc911x_data *pdata) -{ - unsigned int temp = smsc911x_reg_read(pdata, GPIO_CFG); - temp &= ~GPIO_CFG_EEPR_EN_; - smsc911x_reg_write(pdata, GPIO_CFG, temp); - msleep(1); -} - -static int smsc911x_eeprom_send_cmd(struct smsc911x_data *pdata, u32 op) -{ - int timeout = 100; - u32 e2cmd; - - SMSC_TRACE(pdata, drv, "op 0x%08x", op); - if (smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) { - SMSC_WARN(pdata, drv, "Busy at start"); - return -EBUSY; - } - - e2cmd = op | E2P_CMD_EPC_BUSY_; - smsc911x_reg_write(pdata, E2P_CMD, e2cmd); - - do { - msleep(1); - e2cmd = smsc911x_reg_read(pdata, E2P_CMD); - } while ((e2cmd & E2P_CMD_EPC_BUSY_) && (--timeout)); - - if (!timeout) { - SMSC_TRACE(pdata, drv, "TIMED OUT"); - return -EAGAIN; - } - - if (e2cmd & E2P_CMD_EPC_TIMEOUT_) { - SMSC_TRACE(pdata, drv, "Error occurred during eeprom operation"); - return -EINVAL; - } - - return 0; -} - -static int smsc911x_eeprom_read_location(struct smsc911x_data *pdata, - u8 address, u8 *data) -{ - u32 op = E2P_CMD_EPC_CMD_READ_ | address; - int ret; - - SMSC_TRACE(pdata, drv, "address 0x%x", address); - ret = smsc911x_eeprom_send_cmd(pdata, op); - - if (!ret) - data[address] = smsc911x_reg_read(pdata, E2P_DATA); - - return ret; -} - -static int smsc911x_eeprom_write_location(struct smsc911x_data *pdata, - u8 address, u8 data) -{ - u32 op = E2P_CMD_EPC_CMD_ERASE_ | address; - u32 temp; - int ret; - - SMSC_TRACE(pdata, drv, "address 0x%x, data 0x%x", address, data); - ret = smsc911x_eeprom_send_cmd(pdata, op); - - if (!ret) { - op = E2P_CMD_EPC_CMD_WRITE_ | address; - smsc911x_reg_write(pdata, E2P_DATA, (u32)data); - - /* Workaround for hardware read-after-write restriction */ - temp = smsc911x_reg_read(pdata, BYTE_TEST); - - ret = smsc911x_eeprom_send_cmd(pdata, op); - } - - return ret; -} - -static int smsc911x_ethtool_get_eeprom_len(struct net_device *dev) -{ - return SMSC911X_EEPROM_SIZE; -} - -static int smsc911x_ethtool_get_eeprom(struct net_device *dev, - struct ethtool_eeprom *eeprom, u8 *data) -{ - struct smsc911x_data *pdata = netdev_priv(dev); - u8 eeprom_data[SMSC911X_EEPROM_SIZE]; - int len; - int i; - - smsc911x_eeprom_enable_access(pdata); - - len = min(eeprom->len, SMSC911X_EEPROM_SIZE); - for (i = 0; i < len; i++) { - int ret = smsc911x_eeprom_read_location(pdata, i, eeprom_data); - if (ret < 0) { - eeprom->len = 0; - return ret; - } - } - - memcpy(data, &eeprom_data[eeprom->offset], len); - eeprom->len = len; - return 0; -} - -static int smsc911x_ethtool_set_eeprom(struct net_device *dev, - struct ethtool_eeprom *eeprom, u8 *data) -{ - int ret; - struct smsc911x_data *pdata = netdev_priv(dev); - - smsc911x_eeprom_enable_access(pdata); - smsc911x_eeprom_send_cmd(pdata, E2P_CMD_EPC_CMD_EWEN_); - ret = smsc911x_eeprom_write_location(pdata, eeprom->offset, *data); - smsc911x_eeprom_send_cmd(pdata, E2P_CMD_EPC_CMD_EWDS_); - - /* Single byte write, according to man page */ - eeprom->len = 1; - - return ret; -} - -static const struct ethtool_ops smsc911x_ethtool_ops = { - .get_settings = smsc911x_ethtool_getsettings, - .set_settings = smsc911x_ethtool_setsettings, - .get_link = ethtool_op_get_link, - .get_drvinfo = smsc911x_ethtool_getdrvinfo, - .nway_reset = smsc911x_ethtool_nwayreset, - .get_msglevel = smsc911x_ethtool_getmsglevel, - .set_msglevel = smsc911x_ethtool_setmsglevel, - .get_regs_len = smsc911x_ethtool_getregslen, - .get_regs = smsc911x_ethtool_getregs, - .get_eeprom_len = smsc911x_ethtool_get_eeprom_len, - .get_eeprom = smsc911x_ethtool_get_eeprom, - .set_eeprom = smsc911x_ethtool_set_eeprom, -}; - -static const struct net_device_ops smsc911x_netdev_ops = { - .ndo_open = smsc911x_open, - .ndo_stop = smsc911x_stop, - .ndo_start_xmit = smsc911x_hard_start_xmit, - .ndo_get_stats = smsc911x_get_stats, - .ndo_set_multicast_list = smsc911x_set_multicast_list, - .ndo_do_ioctl = smsc911x_do_ioctl, - .ndo_change_mtu = eth_change_mtu, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = smsc911x_set_mac_address, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = smsc911x_poll_controller, -#endif -}; - -/* copies the current mac address from hardware to dev->dev_addr */ -static void __devinit smsc911x_read_mac_address(struct net_device *dev) -{ - struct smsc911x_data *pdata = netdev_priv(dev); - u32 mac_high16 = smsc911x_mac_read(pdata, ADDRH); - u32 mac_low32 = smsc911x_mac_read(pdata, ADDRL); - - dev->dev_addr[0] = (u8)(mac_low32); - dev->dev_addr[1] = (u8)(mac_low32 >> 8); - dev->dev_addr[2] = (u8)(mac_low32 >> 16); - dev->dev_addr[3] = (u8)(mac_low32 >> 24); - dev->dev_addr[4] = (u8)(mac_high16); - dev->dev_addr[5] = (u8)(mac_high16 >> 8); -} - -/* Initializing private device structures, only called from probe */ -static int __devinit smsc911x_init(struct net_device *dev) -{ - struct smsc911x_data *pdata = netdev_priv(dev); - unsigned int byte_test; - - SMSC_TRACE(pdata, probe, "Driver Parameters:"); - SMSC_TRACE(pdata, probe, "LAN base: 0x%08lX", - (unsigned long)pdata->ioaddr); - SMSC_TRACE(pdata, probe, "IRQ: %d", dev->irq); - SMSC_TRACE(pdata, probe, "PHY will be autodetected."); - - spin_lock_init(&pdata->dev_lock); - spin_lock_init(&pdata->mac_lock); - - if (pdata->ioaddr == 0) { - SMSC_WARN(pdata, probe, "pdata->ioaddr: 0x00000000"); - return -ENODEV; - } - - /* Check byte ordering */ - byte_test = smsc911x_reg_read(pdata, BYTE_TEST); - SMSC_TRACE(pdata, probe, "BYTE_TEST: 0x%08X", byte_test); - if (byte_test == 0x43218765) { - SMSC_TRACE(pdata, probe, "BYTE_TEST looks swapped, " - "applying WORD_SWAP"); - smsc911x_reg_write(pdata, WORD_SWAP, 0xffffffff); - - /* 1 dummy read of BYTE_TEST is needed after a write to - * WORD_SWAP before its contents are valid */ - byte_test = smsc911x_reg_read(pdata, BYTE_TEST); - - byte_test = smsc911x_reg_read(pdata, BYTE_TEST); - } - - if (byte_test != 0x87654321) { - SMSC_WARN(pdata, drv, "BYTE_TEST: 0x%08X", byte_test); - if (((byte_test >> 16) & 0xFFFF) == (byte_test & 0xFFFF)) { - SMSC_WARN(pdata, probe, - "top 16 bits equal to bottom 16 bits"); - SMSC_TRACE(pdata, probe, - "This may mean the chip is set " - "for 32 bit while the bus is reading 16 bit"); - } - return -ENODEV; - } - - /* Default generation to zero (all workarounds apply) */ - pdata->generation = 0; - - pdata->idrev = smsc911x_reg_read(pdata, ID_REV); - switch (pdata->idrev & 0xFFFF0000) { - case 0x01180000: - case 0x01170000: - case 0x01160000: - case 0x01150000: - /* LAN911[5678] family */ - pdata->generation = pdata->idrev & 0x0000FFFF; - break; - - case 0x118A0000: - case 0x117A0000: - case 0x116A0000: - case 0x115A0000: - /* LAN921[5678] family */ - pdata->generation = 3; - break; - - case 0x92100000: - case 0x92110000: - case 0x92200000: - case 0x92210000: - /* LAN9210/LAN9211/LAN9220/LAN9221 */ - pdata->generation = 4; - break; - - default: - SMSC_WARN(pdata, probe, "LAN911x not identified, idrev: 0x%08X", - pdata->idrev); - return -ENODEV; - } - - SMSC_TRACE(pdata, probe, - "LAN911x identified, idrev: 0x%08X, generation: %d", - pdata->idrev, pdata->generation); - - if (pdata->generation == 0) - SMSC_WARN(pdata, probe, - "This driver is not intended for this chip revision"); - - /* workaround for platforms without an eeprom, where the mac address - * is stored elsewhere and set by the bootloader. This saves the - * mac address before resetting the device */ - if (pdata->config.flags & SMSC911X_SAVE_MAC_ADDRESS) { - spin_lock_irq(&pdata->mac_lock); - smsc911x_read_mac_address(dev); - spin_unlock_irq(&pdata->mac_lock); - } - - /* Reset the LAN911x */ - if (smsc911x_soft_reset(pdata)) - return -ENODEV; - - /* Disable all interrupt sources until we bring the device up */ - smsc911x_reg_write(pdata, INT_EN, 0); - - ether_setup(dev); - dev->flags |= IFF_MULTICAST; - netif_napi_add(dev, &pdata->napi, smsc911x_poll, SMSC_NAPI_WEIGHT); - dev->netdev_ops = &smsc911x_netdev_ops; - dev->ethtool_ops = &smsc911x_ethtool_ops; - - return 0; -} - -static int __devexit smsc911x_drv_remove(struct platform_device *pdev) -{ - struct net_device *dev; - struct smsc911x_data *pdata; - struct resource *res; - - dev = platform_get_drvdata(pdev); - BUG_ON(!dev); - pdata = netdev_priv(dev); - BUG_ON(!pdata); - BUG_ON(!pdata->ioaddr); - BUG_ON(!pdata->phy_dev); - - SMSC_TRACE(pdata, ifdown, "Stopping driver"); - - phy_disconnect(pdata->phy_dev); - pdata->phy_dev = NULL; - mdiobus_unregister(pdata->mii_bus); - mdiobus_free(pdata->mii_bus); - - platform_set_drvdata(pdev, NULL); - unregister_netdev(dev); - free_irq(dev->irq, dev); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "smsc911x-memory"); - if (!res) - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - - release_mem_region(res->start, resource_size(res)); - - iounmap(pdata->ioaddr); - - free_netdev(dev); - - return 0; -} - -/* standard register acces */ -static const struct smsc911x_ops standard_smsc911x_ops = { - .reg_read = __smsc911x_reg_read, - .reg_write = __smsc911x_reg_write, - .rx_readfifo = smsc911x_rx_readfifo, - .tx_writefifo = smsc911x_tx_writefifo, -}; - -/* shifted register access */ -static const struct smsc911x_ops shifted_smsc911x_ops = { - .reg_read = __smsc911x_reg_read_shift, - .reg_write = __smsc911x_reg_write_shift, - .rx_readfifo = smsc911x_rx_readfifo_shift, - .tx_writefifo = smsc911x_tx_writefifo_shift, -}; - -#ifdef CONFIG_OF -static int __devinit smsc911x_probe_config_dt( - struct smsc911x_platform_config *config, - struct device_node *np) -{ - const char *mac; - u32 width = 0; - - if (!np) - return -ENODEV; - - config->phy_interface = of_get_phy_mode(np); - - mac = of_get_mac_address(np); - if (mac) - memcpy(config->mac, mac, ETH_ALEN); - - of_property_read_u32(np, "reg-shift", &config->shift); - - of_property_read_u32(np, "reg-io-width", &width); - if (width == 4) - config->flags |= SMSC911X_USE_32BIT; - - if (of_get_property(np, "smsc,irq-active-high", NULL)) - config->irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH; - - if (of_get_property(np, "smsc,irq-push-pull", NULL)) - config->irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL; - - if (of_get_property(np, "smsc,force-internal-phy", NULL)) - config->flags |= SMSC911X_FORCE_INTERNAL_PHY; - - if (of_get_property(np, "smsc,force-external-phy", NULL)) - config->flags |= SMSC911X_FORCE_EXTERNAL_PHY; - - if (of_get_property(np, "smsc,save-mac-address", NULL)) - config->flags |= SMSC911X_SAVE_MAC_ADDRESS; - - return 0; -} -#else -static inline int smsc911x_probe_config_dt( - struct smsc911x_platform_config *config, - struct device_node *np) -{ - return -ENODEV; -} -#endif /* CONFIG_OF */ - -static int __devinit smsc911x_drv_probe(struct platform_device *pdev) -{ - struct device_node *np = pdev->dev.of_node; - struct net_device *dev; - struct smsc911x_data *pdata; - struct smsc911x_platform_config *config = pdev->dev.platform_data; - struct resource *res, *irq_res; - unsigned int intcfg = 0; - int res_size, irq_flags; - int retval; - - pr_info("Driver version %s\n", SMSC_DRV_VERSION); - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "smsc911x-memory"); - if (!res) - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - pr_warn("Could not allocate resource\n"); - retval = -ENODEV; - goto out_0; - } - res_size = resource_size(res); - - irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!irq_res) { - pr_warn("Could not allocate irq resource\n"); - retval = -ENODEV; - goto out_0; - } - - if (!request_mem_region(res->start, res_size, SMSC_CHIPNAME)) { - retval = -EBUSY; - goto out_0; - } - - dev = alloc_etherdev(sizeof(struct smsc911x_data)); - if (!dev) { - pr_warn("Could not allocate device\n"); - retval = -ENOMEM; - goto out_release_io_1; - } - - SET_NETDEV_DEV(dev, &pdev->dev); - - pdata = netdev_priv(dev); - - dev->irq = irq_res->start; - irq_flags = irq_res->flags & IRQF_TRIGGER_MASK; - pdata->ioaddr = ioremap_nocache(res->start, res_size); - - pdata->dev = dev; - pdata->msg_enable = ((1 << debug) - 1); - - if (pdata->ioaddr == NULL) { - SMSC_WARN(pdata, probe, "Error smsc911x base address invalid"); - retval = -ENOMEM; - goto out_free_netdev_2; - } - - retval = smsc911x_probe_config_dt(&pdata->config, np); - if (retval && config) { - /* copy config parameters across to pdata */ - memcpy(&pdata->config, config, sizeof(pdata->config)); - retval = 0; - } - - if (retval) { - SMSC_WARN(pdata, probe, "Error smsc911x config not found"); - goto out_unmap_io_3; - } - - /* assume standard, non-shifted, access to HW registers */ - pdata->ops = &standard_smsc911x_ops; - /* apply the right access if shifting is needed */ - if (pdata->config.shift) - pdata->ops = &shifted_smsc911x_ops; - - retval = smsc911x_init(dev); - if (retval < 0) - goto out_unmap_io_3; - - /* configure irq polarity and type before connecting isr */ - if (pdata->config.irq_polarity == SMSC911X_IRQ_POLARITY_ACTIVE_HIGH) - intcfg |= INT_CFG_IRQ_POL_; - - if (pdata->config.irq_type == SMSC911X_IRQ_TYPE_PUSH_PULL) - intcfg |= INT_CFG_IRQ_TYPE_; - - smsc911x_reg_write(pdata, INT_CFG, intcfg); - - /* Ensure interrupts are globally disabled before connecting ISR */ - smsc911x_reg_write(pdata, INT_EN, 0); - smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF); - - retval = request_irq(dev->irq, smsc911x_irqhandler, - irq_flags | IRQF_SHARED, dev->name, dev); - if (retval) { - SMSC_WARN(pdata, probe, - "Unable to claim requested irq: %d", dev->irq); - goto out_unmap_io_3; - } - - platform_set_drvdata(pdev, dev); - - retval = register_netdev(dev); - if (retval) { - SMSC_WARN(pdata, probe, "Error %i registering device", retval); - goto out_unset_drvdata_4; - } else { - SMSC_TRACE(pdata, probe, - "Network interface: \"%s\"", dev->name); - } - - retval = smsc911x_mii_init(pdev, dev); - if (retval) { - SMSC_WARN(pdata, probe, "Error %i initialising mii", retval); - goto out_unregister_netdev_5; - } - - spin_lock_irq(&pdata->mac_lock); - - /* Check if mac address has been specified when bringing interface up */ - if (is_valid_ether_addr(dev->dev_addr)) { - smsc911x_set_hw_mac_address(pdata, dev->dev_addr); - SMSC_TRACE(pdata, probe, - "MAC Address is specified by configuration"); - } else if (is_valid_ether_addr(pdata->config.mac)) { - memcpy(dev->dev_addr, pdata->config.mac, 6); - SMSC_TRACE(pdata, probe, - "MAC Address specified by platform data"); - } else { - /* Try reading mac address from device. if EEPROM is present - * it will already have been set */ - smsc_get_mac(dev); - - if (is_valid_ether_addr(dev->dev_addr)) { - /* eeprom values are valid so use them */ - SMSC_TRACE(pdata, probe, - "Mac Address is read from LAN911x EEPROM"); - } else { - /* eeprom values are invalid, generate random MAC */ - random_ether_addr(dev->dev_addr); - smsc911x_set_hw_mac_address(pdata, dev->dev_addr); - SMSC_TRACE(pdata, probe, - "MAC Address is set to random_ether_addr"); - } - } - - spin_unlock_irq(&pdata->mac_lock); - - netdev_info(dev, "MAC Address: %pM\n", dev->dev_addr); - - return 0; - -out_unregister_netdev_5: - unregister_netdev(dev); -out_unset_drvdata_4: - platform_set_drvdata(pdev, NULL); - free_irq(dev->irq, dev); -out_unmap_io_3: - iounmap(pdata->ioaddr); -out_free_netdev_2: - free_netdev(dev); -out_release_io_1: - release_mem_region(res->start, resource_size(res)); -out_0: - return retval; -} - -#ifdef CONFIG_PM -/* This implementation assumes the devices remains powered on its VDDVARIO - * pins during suspend. */ - -/* TODO: implement freeze/thaw callbacks for hibernation.*/ - -static int smsc911x_suspend(struct device *dev) -{ - struct net_device *ndev = dev_get_drvdata(dev); - struct smsc911x_data *pdata = netdev_priv(ndev); - - /* enable wake on LAN, energy detection and the external PME - * signal. */ - smsc911x_reg_write(pdata, PMT_CTRL, - PMT_CTRL_PM_MODE_D1_ | PMT_CTRL_WOL_EN_ | - PMT_CTRL_ED_EN_ | PMT_CTRL_PME_EN_); - - return 0; -} - -static int smsc911x_resume(struct device *dev) -{ - struct net_device *ndev = dev_get_drvdata(dev); - struct smsc911x_data *pdata = netdev_priv(ndev); - unsigned int to = 100; - - /* Note 3.11 from the datasheet: - * "When the LAN9220 is in a power saving state, a write of any - * data to the BYTE_TEST register will wake-up the device." - */ - smsc911x_reg_write(pdata, BYTE_TEST, 0); - - /* poll the READY bit in PMT_CTRL. Any other access to the device is - * forbidden while this bit isn't set. Try for 100ms and return -EIO - * if it failed. */ - while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to) - udelay(1000); - - return (to == 0) ? -EIO : 0; -} - -static const struct dev_pm_ops smsc911x_pm_ops = { - .suspend = smsc911x_suspend, - .resume = smsc911x_resume, -}; - -#define SMSC911X_PM_OPS (&smsc911x_pm_ops) - -#else -#define SMSC911X_PM_OPS NULL -#endif - -static const struct of_device_id smsc911x_dt_ids[] = { - { .compatible = "smsc,lan9115", }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, smsc911x_dt_ids); - -static struct platform_driver smsc911x_driver = { - .probe = smsc911x_drv_probe, - .remove = __devexit_p(smsc911x_drv_remove), - .driver = { - .name = SMSC_CHIPNAME, - .owner = THIS_MODULE, - .pm = SMSC911X_PM_OPS, - .of_match_table = smsc911x_dt_ids, - }, -}; - -/* Entry point for loading the module */ -static int __init smsc911x_init_module(void) -{ - SMSC_INITIALIZE(); - return platform_driver_register(&smsc911x_driver); -} - -/* entry point for unloading the module */ -static void __exit smsc911x_cleanup_module(void) -{ - platform_driver_unregister(&smsc911x_driver); -} - -module_init(smsc911x_init_module); -module_exit(smsc911x_cleanup_module); diff --git a/drivers/net/smsc911x.h b/drivers/net/smsc911x.h deleted file mode 100644 index 8d67aac..0000000 --- a/drivers/net/smsc911x.h +++ /dev/null @@ -1,404 +0,0 @@ -/*************************************************************************** - * - * Copyright (C) 2004-2008 SMSC - * Copyright (C) 2005-2008 ARM - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - ***************************************************************************/ -#ifndef __SMSC911X_H__ -#define __SMSC911X_H__ - -#define TX_FIFO_LOW_THRESHOLD ((u32)1600) -#define SMSC911X_EEPROM_SIZE ((u32)128) -#define USE_DEBUG 0 - -/* This is the maximum number of packets to be received every - * NAPI poll */ -#define SMSC_NAPI_WEIGHT 16 - -/* implements a PHY loopback test at initialisation time, to ensure a packet - * can be successfully looped back */ -#define USE_PHY_WORK_AROUND - -#if USE_DEBUG >= 1 -#define SMSC_WARN(pdata, nlevel, fmt, args...) \ - netif_warn(pdata, nlevel, (pdata)->dev, \ - "%s: " fmt "\n", __func__, ##args) -#else -#define SMSC_WARN(pdata, nlevel, fmt, args...) \ - no_printk(fmt "\n", ##args) -#endif - -#if USE_DEBUG >= 2 -#define SMSC_TRACE(pdata, nlevel, fmt, args...) \ - netif_info(pdata, nlevel, pdata->dev, fmt "\n", ##args) -#else -#define SMSC_TRACE(pdata, nlevel, fmt, args...) \ - no_printk(fmt "\n", ##args) -#endif - -#ifdef CONFIG_DEBUG_SPINLOCK -#define SMSC_ASSERT_MAC_LOCK(pdata) \ - WARN_ON(!spin_is_locked(&pdata->mac_lock)) -#else -#define SMSC_ASSERT_MAC_LOCK(pdata) do {} while (0) -#endif /* CONFIG_DEBUG_SPINLOCK */ - -/* SMSC911x registers and bitfields */ -#define RX_DATA_FIFO 0x00 - -#define TX_DATA_FIFO 0x20 -#define TX_CMD_A_ON_COMP_ 0x80000000 -#define TX_CMD_A_BUF_END_ALGN_ 0x03000000 -#define TX_CMD_A_4_BYTE_ALGN_ 0x00000000 -#define TX_CMD_A_16_BYTE_ALGN_ 0x01000000 -#define TX_CMD_A_32_BYTE_ALGN_ 0x02000000 -#define TX_CMD_A_DATA_OFFSET_ 0x001F0000 -#define TX_CMD_A_FIRST_SEG_ 0x00002000 -#define TX_CMD_A_LAST_SEG_ 0x00001000 -#define TX_CMD_A_BUF_SIZE_ 0x000007FF -#define TX_CMD_B_PKT_TAG_ 0xFFFF0000 -#define TX_CMD_B_ADD_CRC_DISABLE_ 0x00002000 -#define TX_CMD_B_DISABLE_PADDING_ 0x00001000 -#define TX_CMD_B_PKT_BYTE_LENGTH_ 0x000007FF - -#define RX_STATUS_FIFO 0x40 -#define RX_STS_ES_ 0x00008000 -#define RX_STS_LENGTH_ERR_ 0x00001000 -#define RX_STS_MCAST_ 0x00000400 -#define RX_STS_FRAME_TYPE_ 0x00000020 -#define RX_STS_CRC_ERR_ 0x00000002 - -#define RX_STATUS_FIFO_PEEK 0x44 - -#define TX_STATUS_FIFO 0x48 -#define TX_STS_ES_ 0x00008000 -#define TX_STS_LOST_CARRIER_ 0x00000800 -#define TX_STS_NO_CARRIER_ 0x00000400 -#define TX_STS_LATE_COL_ 0x00000200 -#define TX_STS_EXCESS_COL_ 0x00000100 - -#define TX_STATUS_FIFO_PEEK 0x4C - -#define ID_REV 0x50 -#define ID_REV_CHIP_ID_ 0xFFFF0000 -#define ID_REV_REV_ID_ 0x0000FFFF - -#define INT_CFG 0x54 -#define INT_CFG_INT_DEAS_ 0xFF000000 -#define INT_CFG_INT_DEAS_CLR_ 0x00004000 -#define INT_CFG_INT_DEAS_STS_ 0x00002000 -#define INT_CFG_IRQ_INT_ 0x00001000 -#define INT_CFG_IRQ_EN_ 0x00000100 -#define INT_CFG_IRQ_POL_ 0x00000010 -#define INT_CFG_IRQ_TYPE_ 0x00000001 - -#define INT_STS 0x58 -#define INT_STS_SW_INT_ 0x80000000 -#define INT_STS_TXSTOP_INT_ 0x02000000 -#define INT_STS_RXSTOP_INT_ 0x01000000 -#define INT_STS_RXDFH_INT_ 0x00800000 -#define INT_STS_RXDF_INT_ 0x00400000 -#define INT_STS_TX_IOC_ 0x00200000 -#define INT_STS_RXD_INT_ 0x00100000 -#define INT_STS_GPT_INT_ 0x00080000 -#define INT_STS_PHY_INT_ 0x00040000 -#define INT_STS_PME_INT_ 0x00020000 -#define INT_STS_TXSO_ 0x00010000 -#define INT_STS_RWT_ 0x00008000 -#define INT_STS_RXE_ 0x00004000 -#define INT_STS_TXE_ 0x00002000 -#define INT_STS_TDFU_ 0x00000800 -#define INT_STS_TDFO_ 0x00000400 -#define INT_STS_TDFA_ 0x00000200 -#define INT_STS_TSFF_ 0x00000100 -#define INT_STS_TSFL_ 0x00000080 -#define INT_STS_RXDF_ 0x00000040 -#define INT_STS_RDFL_ 0x00000020 -#define INT_STS_RSFF_ 0x00000010 -#define INT_STS_RSFL_ 0x00000008 -#define INT_STS_GPIO2_INT_ 0x00000004 -#define INT_STS_GPIO1_INT_ 0x00000002 -#define INT_STS_GPIO0_INT_ 0x00000001 - -#define INT_EN 0x5C -#define INT_EN_SW_INT_EN_ 0x80000000 -#define INT_EN_TXSTOP_INT_EN_ 0x02000000 -#define INT_EN_RXSTOP_INT_EN_ 0x01000000 -#define INT_EN_RXDFH_INT_EN_ 0x00800000 -#define INT_EN_TIOC_INT_EN_ 0x00200000 -#define INT_EN_RXD_INT_EN_ 0x00100000 -#define INT_EN_GPT_INT_EN_ 0x00080000 -#define INT_EN_PHY_INT_EN_ 0x00040000 -#define INT_EN_PME_INT_EN_ 0x00020000 -#define INT_EN_TXSO_EN_ 0x00010000 -#define INT_EN_RWT_EN_ 0x00008000 -#define INT_EN_RXE_EN_ 0x00004000 -#define INT_EN_TXE_EN_ 0x00002000 -#define INT_EN_TDFU_EN_ 0x00000800 -#define INT_EN_TDFO_EN_ 0x00000400 -#define INT_EN_TDFA_EN_ 0x00000200 -#define INT_EN_TSFF_EN_ 0x00000100 -#define INT_EN_TSFL_EN_ 0x00000080 -#define INT_EN_RXDF_EN_ 0x00000040 -#define INT_EN_RDFL_EN_ 0x00000020 -#define INT_EN_RSFF_EN_ 0x00000010 -#define INT_EN_RSFL_EN_ 0x00000008 -#define INT_EN_GPIO2_INT_ 0x00000004 -#define INT_EN_GPIO1_INT_ 0x00000002 -#define INT_EN_GPIO0_INT_ 0x00000001 - -#define BYTE_TEST 0x64 - -#define FIFO_INT 0x68 -#define FIFO_INT_TX_AVAIL_LEVEL_ 0xFF000000 -#define FIFO_INT_TX_STS_LEVEL_ 0x00FF0000 -#define FIFO_INT_RX_AVAIL_LEVEL_ 0x0000FF00 -#define FIFO_INT_RX_STS_LEVEL_ 0x000000FF - -#define RX_CFG 0x6C -#define RX_CFG_RX_END_ALGN_ 0xC0000000 -#define RX_CFG_RX_END_ALGN4_ 0x00000000 -#define RX_CFG_RX_END_ALGN16_ 0x40000000 -#define RX_CFG_RX_END_ALGN32_ 0x80000000 -#define RX_CFG_RX_DMA_CNT_ 0x0FFF0000 -#define RX_CFG_RX_DUMP_ 0x00008000 -#define RX_CFG_RXDOFF_ 0x00001F00 - -#define TX_CFG 0x70 -#define TX_CFG_TXS_DUMP_ 0x00008000 -#define TX_CFG_TXD_DUMP_ 0x00004000 -#define TX_CFG_TXSAO_ 0x00000004 -#define TX_CFG_TX_ON_ 0x00000002 -#define TX_CFG_STOP_TX_ 0x00000001 - -#define HW_CFG 0x74 -#define HW_CFG_TTM_ 0x00200000 -#define HW_CFG_SF_ 0x00100000 -#define HW_CFG_TX_FIF_SZ_ 0x000F0000 -#define HW_CFG_TR_ 0x00003000 -#define HW_CFG_SRST_ 0x00000001 - -/* only available on 115/117 */ -#define HW_CFG_PHY_CLK_SEL_ 0x00000060 -#define HW_CFG_PHY_CLK_SEL_INT_PHY_ 0x00000000 -#define HW_CFG_PHY_CLK_SEL_EXT_PHY_ 0x00000020 -#define HW_CFG_PHY_CLK_SEL_CLK_DIS_ 0x00000040 -#define HW_CFG_SMI_SEL_ 0x00000010 -#define HW_CFG_EXT_PHY_DET_ 0x00000008 -#define HW_CFG_EXT_PHY_EN_ 0x00000004 -#define HW_CFG_SRST_TO_ 0x00000002 - -/* only available on 116/118 */ -#define HW_CFG_32_16_BIT_MODE_ 0x00000004 - -#define RX_DP_CTRL 0x78 -#define RX_DP_CTRL_RX_FFWD_ 0x80000000 - -#define RX_FIFO_INF 0x7C -#define RX_FIFO_INF_RXSUSED_ 0x00FF0000 -#define RX_FIFO_INF_RXDUSED_ 0x0000FFFF - -#define TX_FIFO_INF 0x80 -#define TX_FIFO_INF_TSUSED_ 0x00FF0000 -#define TX_FIFO_INF_TDFREE_ 0x0000FFFF - -#define PMT_CTRL 0x84 -#define PMT_CTRL_PM_MODE_ 0x00003000 -#define PMT_CTRL_PM_MODE_D0_ 0x00000000 -#define PMT_CTRL_PM_MODE_D1_ 0x00001000 -#define PMT_CTRL_PM_MODE_D2_ 0x00002000 -#define PMT_CTRL_PM_MODE_D3_ 0x00003000 -#define PMT_CTRL_PHY_RST_ 0x00000400 -#define PMT_CTRL_WOL_EN_ 0x00000200 -#define PMT_CTRL_ED_EN_ 0x00000100 -#define PMT_CTRL_PME_TYPE_ 0x00000040 -#define PMT_CTRL_WUPS_ 0x00000030 -#define PMT_CTRL_WUPS_NOWAKE_ 0x00000000 -#define PMT_CTRL_WUPS_ED_ 0x00000010 -#define PMT_CTRL_WUPS_WOL_ 0x00000020 -#define PMT_CTRL_WUPS_MULTI_ 0x00000030 -#define PMT_CTRL_PME_IND_ 0x00000008 -#define PMT_CTRL_PME_POL_ 0x00000004 -#define PMT_CTRL_PME_EN_ 0x00000002 -#define PMT_CTRL_READY_ 0x00000001 - -#define GPIO_CFG 0x88 -#define GPIO_CFG_LED3_EN_ 0x40000000 -#define GPIO_CFG_LED2_EN_ 0x20000000 -#define GPIO_CFG_LED1_EN_ 0x10000000 -#define GPIO_CFG_GPIO2_INT_POL_ 0x04000000 -#define GPIO_CFG_GPIO1_INT_POL_ 0x02000000 -#define GPIO_CFG_GPIO0_INT_POL_ 0x01000000 -#define GPIO_CFG_EEPR_EN_ 0x00700000 -#define GPIO_CFG_GPIOBUF2_ 0x00040000 -#define GPIO_CFG_GPIOBUF1_ 0x00020000 -#define GPIO_CFG_GPIOBUF0_ 0x00010000 -#define GPIO_CFG_GPIODIR2_ 0x00000400 -#define GPIO_CFG_GPIODIR1_ 0x00000200 -#define GPIO_CFG_GPIODIR0_ 0x00000100 -#define GPIO_CFG_GPIOD4_ 0x00000020 -#define GPIO_CFG_GPIOD3_ 0x00000010 -#define GPIO_CFG_GPIOD2_ 0x00000004 -#define GPIO_CFG_GPIOD1_ 0x00000002 -#define GPIO_CFG_GPIOD0_ 0x00000001 - -#define GPT_CFG 0x8C -#define GPT_CFG_TIMER_EN_ 0x20000000 -#define GPT_CFG_GPT_LOAD_ 0x0000FFFF - -#define GPT_CNT 0x90 -#define GPT_CNT_GPT_CNT_ 0x0000FFFF - -#define WORD_SWAP 0x98 - -#define FREE_RUN 0x9C - -#define RX_DROP 0xA0 - -#define MAC_CSR_CMD 0xA4 -#define MAC_CSR_CMD_CSR_BUSY_ 0x80000000 -#define MAC_CSR_CMD_R_NOT_W_ 0x40000000 -#define MAC_CSR_CMD_CSR_ADDR_ 0x000000FF - -#define MAC_CSR_DATA 0xA8 - -#define AFC_CFG 0xAC -#define AFC_CFG_AFC_HI_ 0x00FF0000 -#define AFC_CFG_AFC_LO_ 0x0000FF00 -#define AFC_CFG_BACK_DUR_ 0x000000F0 -#define AFC_CFG_FCMULT_ 0x00000008 -#define AFC_CFG_FCBRD_ 0x00000004 -#define AFC_CFG_FCADD_ 0x00000002 -#define AFC_CFG_FCANY_ 0x00000001 - -#define E2P_CMD 0xB0 -#define E2P_CMD_EPC_BUSY_ 0x80000000 -#define E2P_CMD_EPC_CMD_ 0x70000000 -#define E2P_CMD_EPC_CMD_READ_ 0x00000000 -#define E2P_CMD_EPC_CMD_EWDS_ 0x10000000 -#define E2P_CMD_EPC_CMD_EWEN_ 0x20000000 -#define E2P_CMD_EPC_CMD_WRITE_ 0x30000000 -#define E2P_CMD_EPC_CMD_WRAL_ 0x40000000 -#define E2P_CMD_EPC_CMD_ERASE_ 0x50000000 -#define E2P_CMD_EPC_CMD_ERAL_ 0x60000000 -#define E2P_CMD_EPC_CMD_RELOAD_ 0x70000000 -#define E2P_CMD_EPC_TIMEOUT_ 0x00000200 -#define E2P_CMD_MAC_ADDR_LOADED_ 0x00000100 -#define E2P_CMD_EPC_ADDR_ 0x000000FF - -#define E2P_DATA 0xB4 -#define E2P_DATA_EEPROM_DATA_ 0x000000FF -#define LAN_REGISTER_EXTENT 0x00000100 - -/* - * MAC Control and Status Register (Indirect Address) - * Offset (through the MAC_CSR CMD and DATA port) - */ -#define MAC_CR 0x01 -#define MAC_CR_RXALL_ 0x80000000 -#define MAC_CR_HBDIS_ 0x10000000 -#define MAC_CR_RCVOWN_ 0x00800000 -#define MAC_CR_LOOPBK_ 0x00200000 -#define MAC_CR_FDPX_ 0x00100000 -#define MAC_CR_MCPAS_ 0x00080000 -#define MAC_CR_PRMS_ 0x00040000 -#define MAC_CR_INVFILT_ 0x00020000 -#define MAC_CR_PASSBAD_ 0x00010000 -#define MAC_CR_HFILT_ 0x00008000 -#define MAC_CR_HPFILT_ 0x00002000 -#define MAC_CR_LCOLL_ 0x00001000 -#define MAC_CR_BCAST_ 0x00000800 -#define MAC_CR_DISRTY_ 0x00000400 -#define MAC_CR_PADSTR_ 0x00000100 -#define MAC_CR_BOLMT_MASK_ 0x000000C0 -#define MAC_CR_DFCHK_ 0x00000020 -#define MAC_CR_TXEN_ 0x00000008 -#define MAC_CR_RXEN_ 0x00000004 - -#define ADDRH 0x02 - -#define ADDRL 0x03 - -#define HASHH 0x04 - -#define HASHL 0x05 - -#define MII_ACC 0x06 -#define MII_ACC_PHY_ADDR_ 0x0000F800 -#define MII_ACC_MIIRINDA_ 0x000007C0 -#define MII_ACC_MII_WRITE_ 0x00000002 -#define MII_ACC_MII_BUSY_ 0x00000001 - -#define MII_DATA 0x07 - -#define FLOW 0x08 -#define FLOW_FCPT_ 0xFFFF0000 -#define FLOW_FCPASS_ 0x00000004 -#define FLOW_FCEN_ 0x00000002 -#define FLOW_FCBSY_ 0x00000001 - -#define VLAN1 0x09 - -#define VLAN2 0x0A - -#define WUFF 0x0B - -#define WUCSR 0x0C -#define WUCSR_GUE_ 0x00000200 -#define WUCSR_WUFR_ 0x00000040 -#define WUCSR_MPR_ 0x00000020 -#define WUCSR_WAKE_EN_ 0x00000004 -#define WUCSR_MPEN_ 0x00000002 - -/* - * Phy definitions (vendor-specific) - */ -#define LAN9118_PHY_ID 0x00C0001C - -#define MII_INTSTS 0x1D - -#define MII_INTMSK 0x1E -#define PHY_INTMSK_AN_RCV_ (1 << 1) -#define PHY_INTMSK_PDFAULT_ (1 << 2) -#define PHY_INTMSK_AN_ACK_ (1 << 3) -#define PHY_INTMSK_LNKDOWN_ (1 << 4) -#define PHY_INTMSK_RFAULT_ (1 << 5) -#define PHY_INTMSK_AN_COMP_ (1 << 6) -#define PHY_INTMSK_ENERGYON_ (1 << 7) -#define PHY_INTMSK_DEFAULT_ (PHY_INTMSK_ENERGYON_ | \ - PHY_INTMSK_AN_COMP_ | \ - PHY_INTMSK_RFAULT_ | \ - PHY_INTMSK_LNKDOWN_) - -#define ADVERTISE_PAUSE_ALL (ADVERTISE_PAUSE_CAP | \ - ADVERTISE_PAUSE_ASYM) - -#define LPA_PAUSE_ALL (LPA_PAUSE_CAP | \ - LPA_PAUSE_ASYM) - -/* - * Provide hooks to let the arch add to the initialisation procedure - * and to override the source of the MAC address. - */ -#define SMSC_INITIALIZE() do {} while (0) -#define smsc_get_mac(dev) smsc911x_read_mac_address((dev)) - -#ifdef CONFIG_SMSC911X_ARCH_HOOKS -#include -#endif - -#endif /* __SMSC911X_H__ */ diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c deleted file mode 100644 index 459726f..0000000 --- a/drivers/net/smsc9420.c +++ /dev/null @@ -1,1763 +0,0 @@ - /*************************************************************************** - * - * Copyright (C) 2007,2008 SMSC - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - *************************************************************************** - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "smsc9420.h" - -#define DRV_NAME "smsc9420" -#define PFX DRV_NAME ": " -#define DRV_MDIONAME "smsc9420-mdio" -#define DRV_DESCRIPTION "SMSC LAN9420 driver" -#define DRV_VERSION "1.01" - -MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_VERSION); - -struct smsc9420_dma_desc { - u32 status; - u32 length; - u32 buffer1; - u32 buffer2; -}; - -struct smsc9420_ring_info { - struct sk_buff *skb; - dma_addr_t mapping; -}; - -struct smsc9420_pdata { - void __iomem *base_addr; - struct pci_dev *pdev; - struct net_device *dev; - - struct smsc9420_dma_desc *rx_ring; - struct smsc9420_dma_desc *tx_ring; - struct smsc9420_ring_info *tx_buffers; - struct smsc9420_ring_info *rx_buffers; - dma_addr_t rx_dma_addr; - dma_addr_t tx_dma_addr; - int tx_ring_head, tx_ring_tail; - int rx_ring_head, rx_ring_tail; - - spinlock_t int_lock; - spinlock_t phy_lock; - - struct napi_struct napi; - - bool software_irq_signal; - bool rx_csum; - u32 msg_enable; - - struct phy_device *phy_dev; - struct mii_bus *mii_bus; - int phy_irq[PHY_MAX_ADDR]; - int last_duplex; - int last_carrier; -}; - -static DEFINE_PCI_DEVICE_TABLE(smsc9420_id_table) = { - { PCI_VENDOR_ID_9420, PCI_DEVICE_ID_9420, PCI_ANY_ID, PCI_ANY_ID, }, - { 0, } -}; - -MODULE_DEVICE_TABLE(pci, smsc9420_id_table); - -#define SMSC_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) - -static uint smsc_debug; -static uint debug = -1; -module_param(debug, uint, 0); -MODULE_PARM_DESC(debug, "debug level"); - -#define smsc_dbg(TYPE, f, a...) \ -do { if ((pd)->msg_enable & NETIF_MSG_##TYPE) \ - printk(KERN_DEBUG PFX f "\n", ## a); \ -} while (0) - -#define smsc_info(TYPE, f, a...) \ -do { if ((pd)->msg_enable & NETIF_MSG_##TYPE) \ - printk(KERN_INFO PFX f "\n", ## a); \ -} while (0) - -#define smsc_warn(TYPE, f, a...) \ -do { if ((pd)->msg_enable & NETIF_MSG_##TYPE) \ - printk(KERN_WARNING PFX f "\n", ## a); \ -} while (0) - -static inline u32 smsc9420_reg_read(struct smsc9420_pdata *pd, u32 offset) -{ - return ioread32(pd->base_addr + offset); -} - -static inline void -smsc9420_reg_write(struct smsc9420_pdata *pd, u32 offset, u32 value) -{ - iowrite32(value, pd->base_addr + offset); -} - -static inline void smsc9420_pci_flush_write(struct smsc9420_pdata *pd) -{ - /* to ensure PCI write completion, we must perform a PCI read */ - smsc9420_reg_read(pd, ID_REV); -} - -static int smsc9420_mii_read(struct mii_bus *bus, int phyaddr, int regidx) -{ - struct smsc9420_pdata *pd = (struct smsc9420_pdata *)bus->priv; - unsigned long flags; - u32 addr; - int i, reg = -EIO; - - spin_lock_irqsave(&pd->phy_lock, flags); - - /* confirm MII not busy */ - if ((smsc9420_reg_read(pd, MII_ACCESS) & MII_ACCESS_MII_BUSY_)) { - smsc_warn(DRV, "MII is busy???"); - goto out; - } - - /* set the address, index & direction (read from PHY) */ - addr = ((phyaddr & 0x1F) << 11) | ((regidx & 0x1F) << 6) | - MII_ACCESS_MII_READ_; - smsc9420_reg_write(pd, MII_ACCESS, addr); - - /* wait for read to complete with 50us timeout */ - for (i = 0; i < 5; i++) { - if (!(smsc9420_reg_read(pd, MII_ACCESS) & - MII_ACCESS_MII_BUSY_)) { - reg = (u16)smsc9420_reg_read(pd, MII_DATA); - goto out; - } - udelay(10); - } - - smsc_warn(DRV, "MII busy timeout!"); - -out: - spin_unlock_irqrestore(&pd->phy_lock, flags); - return reg; -} - -static int smsc9420_mii_write(struct mii_bus *bus, int phyaddr, int regidx, - u16 val) -{ - struct smsc9420_pdata *pd = (struct smsc9420_pdata *)bus->priv; - unsigned long flags; - u32 addr; - int i, reg = -EIO; - - spin_lock_irqsave(&pd->phy_lock, flags); - - /* confirm MII not busy */ - if ((smsc9420_reg_read(pd, MII_ACCESS) & MII_ACCESS_MII_BUSY_)) { - smsc_warn(DRV, "MII is busy???"); - goto out; - } - - /* put the data to write in the MAC */ - smsc9420_reg_write(pd, MII_DATA, (u32)val); - - /* set the address, index & direction (write to PHY) */ - addr = ((phyaddr & 0x1F) << 11) | ((regidx & 0x1F) << 6) | - MII_ACCESS_MII_WRITE_; - smsc9420_reg_write(pd, MII_ACCESS, addr); - - /* wait for write to complete with 50us timeout */ - for (i = 0; i < 5; i++) { - if (!(smsc9420_reg_read(pd, MII_ACCESS) & - MII_ACCESS_MII_BUSY_)) { - reg = 0; - goto out; - } - udelay(10); - } - - smsc_warn(DRV, "MII busy timeout!"); - -out: - spin_unlock_irqrestore(&pd->phy_lock, flags); - return reg; -} - -/* Returns hash bit number for given MAC address - * Example: - * 01 00 5E 00 00 01 -> returns bit number 31 */ -static u32 smsc9420_hash(u8 addr[ETH_ALEN]) -{ - return (ether_crc(ETH_ALEN, addr) >> 26) & 0x3f; -} - -static int smsc9420_eeprom_reload(struct smsc9420_pdata *pd) -{ - int timeout = 100000; - - BUG_ON(!pd); - - if (smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_) { - smsc_dbg(DRV, "smsc9420_eeprom_reload: Eeprom busy"); - return -EIO; - } - - smsc9420_reg_write(pd, E2P_CMD, - (E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_RELOAD_)); - - do { - udelay(10); - if (!(smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_)) - return 0; - } while (timeout--); - - smsc_warn(DRV, "smsc9420_eeprom_reload: Eeprom timed out"); - return -EIO; -} - -/* Standard ioctls for mii-tool */ -static int smsc9420_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - struct smsc9420_pdata *pd = netdev_priv(dev); - - if (!netif_running(dev) || !pd->phy_dev) - return -EINVAL; - - return phy_mii_ioctl(pd->phy_dev, ifr, cmd); -} - -static int smsc9420_ethtool_get_settings(struct net_device *dev, - struct ethtool_cmd *cmd) -{ - struct smsc9420_pdata *pd = netdev_priv(dev); - - if (!pd->phy_dev) - return -ENODEV; - - cmd->maxtxpkt = 1; - cmd->maxrxpkt = 1; - return phy_ethtool_gset(pd->phy_dev, cmd); -} - -static int smsc9420_ethtool_set_settings(struct net_device *dev, - struct ethtool_cmd *cmd) -{ - struct smsc9420_pdata *pd = netdev_priv(dev); - - if (!pd->phy_dev) - return -ENODEV; - - return phy_ethtool_sset(pd->phy_dev, cmd); -} - -static void smsc9420_ethtool_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) -{ - struct smsc9420_pdata *pd = netdev_priv(netdev); - - strcpy(drvinfo->driver, DRV_NAME); - strcpy(drvinfo->bus_info, pci_name(pd->pdev)); - strcpy(drvinfo->version, DRV_VERSION); -} - -static u32 smsc9420_ethtool_get_msglevel(struct net_device *netdev) -{ - struct smsc9420_pdata *pd = netdev_priv(netdev); - return pd->msg_enable; -} - -static void smsc9420_ethtool_set_msglevel(struct net_device *netdev, u32 data) -{ - struct smsc9420_pdata *pd = netdev_priv(netdev); - pd->msg_enable = data; -} - -static int smsc9420_ethtool_nway_reset(struct net_device *netdev) -{ - struct smsc9420_pdata *pd = netdev_priv(netdev); - - if (!pd->phy_dev) - return -ENODEV; - - return phy_start_aneg(pd->phy_dev); -} - -static int smsc9420_ethtool_getregslen(struct net_device *dev) -{ - /* all smsc9420 registers plus all phy registers */ - return 0x100 + (32 * sizeof(u32)); -} - -static void -smsc9420_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs, - void *buf) -{ - struct smsc9420_pdata *pd = netdev_priv(dev); - struct phy_device *phy_dev = pd->phy_dev; - unsigned int i, j = 0; - u32 *data = buf; - - regs->version = smsc9420_reg_read(pd, ID_REV); - for (i = 0; i < 0x100; i += (sizeof(u32))) - data[j++] = smsc9420_reg_read(pd, i); - - // cannot read phy registers if the net device is down - if (!phy_dev) - return; - - for (i = 0; i <= 31; i++) - data[j++] = smsc9420_mii_read(phy_dev->bus, phy_dev->addr, i); -} - -static void smsc9420_eeprom_enable_access(struct smsc9420_pdata *pd) -{ - unsigned int temp = smsc9420_reg_read(pd, GPIO_CFG); - temp &= ~GPIO_CFG_EEPR_EN_; - smsc9420_reg_write(pd, GPIO_CFG, temp); - msleep(1); -} - -static int smsc9420_eeprom_send_cmd(struct smsc9420_pdata *pd, u32 op) -{ - int timeout = 100; - u32 e2cmd; - - smsc_dbg(HW, "op 0x%08x", op); - if (smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_) { - smsc_warn(HW, "Busy at start"); - return -EBUSY; - } - - e2cmd = op | E2P_CMD_EPC_BUSY_; - smsc9420_reg_write(pd, E2P_CMD, e2cmd); - - do { - msleep(1); - e2cmd = smsc9420_reg_read(pd, E2P_CMD); - } while ((e2cmd & E2P_CMD_EPC_BUSY_) && (--timeout)); - - if (!timeout) { - smsc_info(HW, "TIMED OUT"); - return -EAGAIN; - } - - if (e2cmd & E2P_CMD_EPC_TIMEOUT_) { - smsc_info(HW, "Error occurred during eeprom operation"); - return -EINVAL; - } - - return 0; -} - -static int smsc9420_eeprom_read_location(struct smsc9420_pdata *pd, - u8 address, u8 *data) -{ - u32 op = E2P_CMD_EPC_CMD_READ_ | address; - int ret; - - smsc_dbg(HW, "address 0x%x", address); - ret = smsc9420_eeprom_send_cmd(pd, op); - - if (!ret) - data[address] = smsc9420_reg_read(pd, E2P_DATA); - - return ret; -} - -static int smsc9420_eeprom_write_location(struct smsc9420_pdata *pd, - u8 address, u8 data) -{ - u32 op = E2P_CMD_EPC_CMD_ERASE_ | address; - int ret; - - smsc_dbg(HW, "address 0x%x, data 0x%x", address, data); - ret = smsc9420_eeprom_send_cmd(pd, op); - - if (!ret) { - op = E2P_CMD_EPC_CMD_WRITE_ | address; - smsc9420_reg_write(pd, E2P_DATA, (u32)data); - ret = smsc9420_eeprom_send_cmd(pd, op); - } - - return ret; -} - -static int smsc9420_ethtool_get_eeprom_len(struct net_device *dev) -{ - return SMSC9420_EEPROM_SIZE; -} - -static int smsc9420_ethtool_get_eeprom(struct net_device *dev, - struct ethtool_eeprom *eeprom, u8 *data) -{ - struct smsc9420_pdata *pd = netdev_priv(dev); - u8 eeprom_data[SMSC9420_EEPROM_SIZE]; - int len, i; - - smsc9420_eeprom_enable_access(pd); - - len = min(eeprom->len, SMSC9420_EEPROM_SIZE); - for (i = 0; i < len; i++) { - int ret = smsc9420_eeprom_read_location(pd, i, eeprom_data); - if (ret < 0) { - eeprom->len = 0; - return ret; - } - } - - memcpy(data, &eeprom_data[eeprom->offset], len); - eeprom->magic = SMSC9420_EEPROM_MAGIC; - eeprom->len = len; - return 0; -} - -static int smsc9420_ethtool_set_eeprom(struct net_device *dev, - struct ethtool_eeprom *eeprom, u8 *data) -{ - struct smsc9420_pdata *pd = netdev_priv(dev); - int ret; - - if (eeprom->magic != SMSC9420_EEPROM_MAGIC) - return -EINVAL; - - smsc9420_eeprom_enable_access(pd); - smsc9420_eeprom_send_cmd(pd, E2P_CMD_EPC_CMD_EWEN_); - ret = smsc9420_eeprom_write_location(pd, eeprom->offset, *data); - smsc9420_eeprom_send_cmd(pd, E2P_CMD_EPC_CMD_EWDS_); - - /* Single byte write, according to man page */ - eeprom->len = 1; - - return ret; -} - -static const struct ethtool_ops smsc9420_ethtool_ops = { - .get_settings = smsc9420_ethtool_get_settings, - .set_settings = smsc9420_ethtool_set_settings, - .get_drvinfo = smsc9420_ethtool_get_drvinfo, - .get_msglevel = smsc9420_ethtool_get_msglevel, - .set_msglevel = smsc9420_ethtool_set_msglevel, - .nway_reset = smsc9420_ethtool_nway_reset, - .get_link = ethtool_op_get_link, - .get_eeprom_len = smsc9420_ethtool_get_eeprom_len, - .get_eeprom = smsc9420_ethtool_get_eeprom, - .set_eeprom = smsc9420_ethtool_set_eeprom, - .get_regs_len = smsc9420_ethtool_getregslen, - .get_regs = smsc9420_ethtool_getregs, -}; - -/* Sets the device MAC address to dev_addr */ -static void smsc9420_set_mac_address(struct net_device *dev) -{ - struct smsc9420_pdata *pd = netdev_priv(dev); - u8 *dev_addr = dev->dev_addr; - u32 mac_high16 = (dev_addr[5] << 8) | dev_addr[4]; - u32 mac_low32 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | - (dev_addr[1] << 8) | dev_addr[0]; - - smsc9420_reg_write(pd, ADDRH, mac_high16); - smsc9420_reg_write(pd, ADDRL, mac_low32); -} - -static void smsc9420_check_mac_address(struct net_device *dev) -{ - struct smsc9420_pdata *pd = netdev_priv(dev); - - /* Check if mac address has been specified when bringing interface up */ - if (is_valid_ether_addr(dev->dev_addr)) { - smsc9420_set_mac_address(dev); - smsc_dbg(PROBE, "MAC Address is specified by configuration"); - } else { - /* Try reading mac address from device. if EEPROM is present - * it will already have been set */ - u32 mac_high16 = smsc9420_reg_read(pd, ADDRH); - u32 mac_low32 = smsc9420_reg_read(pd, ADDRL); - dev->dev_addr[0] = (u8)(mac_low32); - dev->dev_addr[1] = (u8)(mac_low32 >> 8); - dev->dev_addr[2] = (u8)(mac_low32 >> 16); - dev->dev_addr[3] = (u8)(mac_low32 >> 24); - dev->dev_addr[4] = (u8)(mac_high16); - dev->dev_addr[5] = (u8)(mac_high16 >> 8); - - if (is_valid_ether_addr(dev->dev_addr)) { - /* eeprom values are valid so use them */ - smsc_dbg(PROBE, "Mac Address is read from EEPROM"); - } else { - /* eeprom values are invalid, generate random MAC */ - random_ether_addr(dev->dev_addr); - smsc9420_set_mac_address(dev); - smsc_dbg(PROBE, - "MAC Address is set to random_ether_addr"); - } - } -} - -static void smsc9420_stop_tx(struct smsc9420_pdata *pd) -{ - u32 dmac_control, mac_cr, dma_intr_ena; - int timeout = 1000; - - /* disable TX DMAC */ - dmac_control = smsc9420_reg_read(pd, DMAC_CONTROL); - dmac_control &= (~DMAC_CONTROL_ST_); - smsc9420_reg_write(pd, DMAC_CONTROL, dmac_control); - - /* Wait max 10ms for transmit process to stop */ - while (--timeout) { - if (smsc9420_reg_read(pd, DMAC_STATUS) & DMAC_STS_TS_) - break; - udelay(10); - } - - if (!timeout) - smsc_warn(IFDOWN, "TX DMAC failed to stop"); - - /* ACK Tx DMAC stop bit */ - smsc9420_reg_write(pd, DMAC_STATUS, DMAC_STS_TXPS_); - - /* mask TX DMAC interrupts */ - dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); - dma_intr_ena &= ~(DMAC_INTR_ENA_TX_); - smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena); - smsc9420_pci_flush_write(pd); - - /* stop MAC TX */ - mac_cr = smsc9420_reg_read(pd, MAC_CR) & (~MAC_CR_TXEN_); - smsc9420_reg_write(pd, MAC_CR, mac_cr); - smsc9420_pci_flush_write(pd); -} - -static void smsc9420_free_tx_ring(struct smsc9420_pdata *pd) -{ - int i; - - BUG_ON(!pd->tx_ring); - - if (!pd->tx_buffers) - return; - - for (i = 0; i < TX_RING_SIZE; i++) { - struct sk_buff *skb = pd->tx_buffers[i].skb; - - if (skb) { - BUG_ON(!pd->tx_buffers[i].mapping); - pci_unmap_single(pd->pdev, pd->tx_buffers[i].mapping, - skb->len, PCI_DMA_TODEVICE); - dev_kfree_skb_any(skb); - } - - pd->tx_ring[i].status = 0; - pd->tx_ring[i].length = 0; - pd->tx_ring[i].buffer1 = 0; - pd->tx_ring[i].buffer2 = 0; - } - wmb(); - - kfree(pd->tx_buffers); - pd->tx_buffers = NULL; - - pd->tx_ring_head = 0; - pd->tx_ring_tail = 0; -} - -static void smsc9420_free_rx_ring(struct smsc9420_pdata *pd) -{ - int i; - - BUG_ON(!pd->rx_ring); - - if (!pd->rx_buffers) - return; - - for (i = 0; i < RX_RING_SIZE; i++) { - if (pd->rx_buffers[i].skb) - dev_kfree_skb_any(pd->rx_buffers[i].skb); - - if (pd->rx_buffers[i].mapping) - pci_unmap_single(pd->pdev, pd->rx_buffers[i].mapping, - PKT_BUF_SZ, PCI_DMA_FROMDEVICE); - - pd->rx_ring[i].status = 0; - pd->rx_ring[i].length = 0; - pd->rx_ring[i].buffer1 = 0; - pd->rx_ring[i].buffer2 = 0; - } - wmb(); - - kfree(pd->rx_buffers); - pd->rx_buffers = NULL; - - pd->rx_ring_head = 0; - pd->rx_ring_tail = 0; -} - -static void smsc9420_stop_rx(struct smsc9420_pdata *pd) -{ - int timeout = 1000; - u32 mac_cr, dmac_control, dma_intr_ena; - - /* mask RX DMAC interrupts */ - dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); - dma_intr_ena &= (~DMAC_INTR_ENA_RX_); - smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena); - smsc9420_pci_flush_write(pd); - - /* stop RX MAC prior to stoping DMA */ - mac_cr = smsc9420_reg_read(pd, MAC_CR) & (~MAC_CR_RXEN_); - smsc9420_reg_write(pd, MAC_CR, mac_cr); - smsc9420_pci_flush_write(pd); - - /* stop RX DMAC */ - dmac_control = smsc9420_reg_read(pd, DMAC_CONTROL); - dmac_control &= (~DMAC_CONTROL_SR_); - smsc9420_reg_write(pd, DMAC_CONTROL, dmac_control); - smsc9420_pci_flush_write(pd); - - /* wait up to 10ms for receive to stop */ - while (--timeout) { - if (smsc9420_reg_read(pd, DMAC_STATUS) & DMAC_STS_RS_) - break; - udelay(10); - } - - if (!timeout) - smsc_warn(IFDOWN, "RX DMAC did not stop! timeout."); - - /* ACK the Rx DMAC stop bit */ - smsc9420_reg_write(pd, DMAC_STATUS, DMAC_STS_RXPS_); -} - -static irqreturn_t smsc9420_isr(int irq, void *dev_id) -{ - struct smsc9420_pdata *pd = dev_id; - u32 int_cfg, int_sts, int_ctl; - irqreturn_t ret = IRQ_NONE; - ulong flags; - - BUG_ON(!pd); - BUG_ON(!pd->base_addr); - - int_cfg = smsc9420_reg_read(pd, INT_CFG); - - /* check if it's our interrupt */ - if ((int_cfg & (INT_CFG_IRQ_EN_ | INT_CFG_IRQ_INT_)) != - (INT_CFG_IRQ_EN_ | INT_CFG_IRQ_INT_)) - return IRQ_NONE; - - int_sts = smsc9420_reg_read(pd, INT_STAT); - - if (likely(INT_STAT_DMAC_INT_ & int_sts)) { - u32 status = smsc9420_reg_read(pd, DMAC_STATUS); - u32 ints_to_clear = 0; - - if (status & DMAC_STS_TX_) { - ints_to_clear |= (DMAC_STS_TX_ | DMAC_STS_NIS_); - netif_wake_queue(pd->dev); - } - - if (status & DMAC_STS_RX_) { - /* mask RX DMAC interrupts */ - u32 dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); - dma_intr_ena &= (~DMAC_INTR_ENA_RX_); - smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena); - smsc9420_pci_flush_write(pd); - - ints_to_clear |= (DMAC_STS_RX_ | DMAC_STS_NIS_); - napi_schedule(&pd->napi); - } - - if (ints_to_clear) - smsc9420_reg_write(pd, DMAC_STATUS, ints_to_clear); - - ret = IRQ_HANDLED; - } - - if (unlikely(INT_STAT_SW_INT_ & int_sts)) { - /* mask software interrupt */ - spin_lock_irqsave(&pd->int_lock, flags); - int_ctl = smsc9420_reg_read(pd, INT_CTL); - int_ctl &= (~INT_CTL_SW_INT_EN_); - smsc9420_reg_write(pd, INT_CTL, int_ctl); - spin_unlock_irqrestore(&pd->int_lock, flags); - - smsc9420_reg_write(pd, INT_STAT, INT_STAT_SW_INT_); - pd->software_irq_signal = true; - smp_wmb(); - - ret = IRQ_HANDLED; - } - - /* to ensure PCI write completion, we must perform a PCI read */ - smsc9420_pci_flush_write(pd); - - return ret; -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void smsc9420_poll_controller(struct net_device *dev) -{ - disable_irq(dev->irq); - smsc9420_isr(0, dev); - enable_irq(dev->irq); -} -#endif /* CONFIG_NET_POLL_CONTROLLER */ - -static void smsc9420_dmac_soft_reset(struct smsc9420_pdata *pd) -{ - smsc9420_reg_write(pd, BUS_MODE, BUS_MODE_SWR_); - smsc9420_reg_read(pd, BUS_MODE); - udelay(2); - if (smsc9420_reg_read(pd, BUS_MODE) & BUS_MODE_SWR_) - smsc_warn(DRV, "Software reset not cleared"); -} - -static int smsc9420_stop(struct net_device *dev) -{ - struct smsc9420_pdata *pd = netdev_priv(dev); - u32 int_cfg; - ulong flags; - - BUG_ON(!pd); - BUG_ON(!pd->phy_dev); - - /* disable master interrupt */ - spin_lock_irqsave(&pd->int_lock, flags); - int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_); - smsc9420_reg_write(pd, INT_CFG, int_cfg); - spin_unlock_irqrestore(&pd->int_lock, flags); - - netif_tx_disable(dev); - napi_disable(&pd->napi); - - smsc9420_stop_tx(pd); - smsc9420_free_tx_ring(pd); - - smsc9420_stop_rx(pd); - smsc9420_free_rx_ring(pd); - - free_irq(dev->irq, pd); - - smsc9420_dmac_soft_reset(pd); - - phy_stop(pd->phy_dev); - - phy_disconnect(pd->phy_dev); - pd->phy_dev = NULL; - mdiobus_unregister(pd->mii_bus); - mdiobus_free(pd->mii_bus); - - return 0; -} - -static void smsc9420_rx_count_stats(struct net_device *dev, u32 desc_status) -{ - if (unlikely(desc_status & RDES0_ERROR_SUMMARY_)) { - dev->stats.rx_errors++; - if (desc_status & RDES0_DESCRIPTOR_ERROR_) - dev->stats.rx_over_errors++; - else if (desc_status & (RDES0_FRAME_TOO_LONG_ | - RDES0_RUNT_FRAME_ | RDES0_COLLISION_SEEN_)) - dev->stats.rx_frame_errors++; - else if (desc_status & RDES0_CRC_ERROR_) - dev->stats.rx_crc_errors++; - } - - if (unlikely(desc_status & RDES0_LENGTH_ERROR_)) - dev->stats.rx_length_errors++; - - if (unlikely(!((desc_status & RDES0_LAST_DESCRIPTOR_) && - (desc_status & RDES0_FIRST_DESCRIPTOR_)))) - dev->stats.rx_length_errors++; - - if (desc_status & RDES0_MULTICAST_FRAME_) - dev->stats.multicast++; -} - -static void smsc9420_rx_handoff(struct smsc9420_pdata *pd, const int index, - const u32 status) -{ - struct net_device *dev = pd->dev; - struct sk_buff *skb; - u16 packet_length = (status & RDES0_FRAME_LENGTH_MASK_) - >> RDES0_FRAME_LENGTH_SHFT_; - - /* remove crc from packet lendth */ - packet_length -= 4; - - if (pd->rx_csum) - packet_length -= 2; - - dev->stats.rx_packets++; - dev->stats.rx_bytes += packet_length; - - pci_unmap_single(pd->pdev, pd->rx_buffers[index].mapping, - PKT_BUF_SZ, PCI_DMA_FROMDEVICE); - pd->rx_buffers[index].mapping = 0; - - skb = pd->rx_buffers[index].skb; - pd->rx_buffers[index].skb = NULL; - - if (pd->rx_csum) { - u16 hw_csum = get_unaligned_le16(skb_tail_pointer(skb) + - NET_IP_ALIGN + packet_length + 4); - put_unaligned_le16(hw_csum, &skb->csum); - skb->ip_summed = CHECKSUM_COMPLETE; - } - - skb_reserve(skb, NET_IP_ALIGN); - skb_put(skb, packet_length); - - skb->protocol = eth_type_trans(skb, dev); - - netif_receive_skb(skb); -} - -static int smsc9420_alloc_rx_buffer(struct smsc9420_pdata *pd, int index) -{ - struct sk_buff *skb = netdev_alloc_skb(pd->dev, PKT_BUF_SZ); - dma_addr_t mapping; - - BUG_ON(pd->rx_buffers[index].skb); - BUG_ON(pd->rx_buffers[index].mapping); - - if (unlikely(!skb)) { - smsc_warn(RX_ERR, "Failed to allocate new skb!"); - return -ENOMEM; - } - - skb->dev = pd->dev; - - mapping = pci_map_single(pd->pdev, skb_tail_pointer(skb), - PKT_BUF_SZ, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(pd->pdev, mapping)) { - dev_kfree_skb_any(skb); - smsc_warn(RX_ERR, "pci_map_single failed!"); - return -ENOMEM; - } - - pd->rx_buffers[index].skb = skb; - pd->rx_buffers[index].mapping = mapping; - pd->rx_ring[index].buffer1 = mapping + NET_IP_ALIGN; - pd->rx_ring[index].status = RDES0_OWN_; - wmb(); - - return 0; -} - -static void smsc9420_alloc_new_rx_buffers(struct smsc9420_pdata *pd) -{ - while (pd->rx_ring_tail != pd->rx_ring_head) { - if (smsc9420_alloc_rx_buffer(pd, pd->rx_ring_tail)) - break; - - pd->rx_ring_tail = (pd->rx_ring_tail + 1) % RX_RING_SIZE; - } -} - -static int smsc9420_rx_poll(struct napi_struct *napi, int budget) -{ - struct smsc9420_pdata *pd = - container_of(napi, struct smsc9420_pdata, napi); - struct net_device *dev = pd->dev; - u32 drop_frame_cnt, dma_intr_ena, status; - int work_done; - - for (work_done = 0; work_done < budget; work_done++) { - rmb(); - status = pd->rx_ring[pd->rx_ring_head].status; - - /* stop if DMAC owns this dma descriptor */ - if (status & RDES0_OWN_) - break; - - smsc9420_rx_count_stats(dev, status); - smsc9420_rx_handoff(pd, pd->rx_ring_head, status); - pd->rx_ring_head = (pd->rx_ring_head + 1) % RX_RING_SIZE; - smsc9420_alloc_new_rx_buffers(pd); - } - - drop_frame_cnt = smsc9420_reg_read(pd, MISS_FRAME_CNTR); - dev->stats.rx_dropped += - (drop_frame_cnt & 0xFFFF) + ((drop_frame_cnt >> 17) & 0x3FF); - - /* Kick RXDMA */ - smsc9420_reg_write(pd, RX_POLL_DEMAND, 1); - smsc9420_pci_flush_write(pd); - - if (work_done < budget) { - napi_complete(&pd->napi); - - /* re-enable RX DMA interrupts */ - dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); - dma_intr_ena |= (DMAC_INTR_ENA_RX_ | DMAC_INTR_ENA_NIS_); - smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena); - smsc9420_pci_flush_write(pd); - } - return work_done; -} - -static void -smsc9420_tx_update_stats(struct net_device *dev, u32 status, u32 length) -{ - if (unlikely(status & TDES0_ERROR_SUMMARY_)) { - dev->stats.tx_errors++; - if (status & (TDES0_EXCESSIVE_DEFERRAL_ | - TDES0_EXCESSIVE_COLLISIONS_)) - dev->stats.tx_aborted_errors++; - - if (status & (TDES0_LOSS_OF_CARRIER_ | TDES0_NO_CARRIER_)) - dev->stats.tx_carrier_errors++; - } else { - dev->stats.tx_packets++; - dev->stats.tx_bytes += (length & 0x7FF); - } - - if (unlikely(status & TDES0_EXCESSIVE_COLLISIONS_)) { - dev->stats.collisions += 16; - } else { - dev->stats.collisions += - (status & TDES0_COLLISION_COUNT_MASK_) >> - TDES0_COLLISION_COUNT_SHFT_; - } - - if (unlikely(status & TDES0_HEARTBEAT_FAIL_)) - dev->stats.tx_heartbeat_errors++; -} - -/* Check for completed dma transfers, update stats and free skbs */ -static void smsc9420_complete_tx(struct net_device *dev) -{ - struct smsc9420_pdata *pd = netdev_priv(dev); - - while (pd->tx_ring_tail != pd->tx_ring_head) { - int index = pd->tx_ring_tail; - u32 status, length; - - rmb(); - status = pd->tx_ring[index].status; - length = pd->tx_ring[index].length; - - /* Check if DMA still owns this descriptor */ - if (unlikely(TDES0_OWN_ & status)) - break; - - smsc9420_tx_update_stats(dev, status, length); - - BUG_ON(!pd->tx_buffers[index].skb); - BUG_ON(!pd->tx_buffers[index].mapping); - - pci_unmap_single(pd->pdev, pd->tx_buffers[index].mapping, - pd->tx_buffers[index].skb->len, PCI_DMA_TODEVICE); - pd->tx_buffers[index].mapping = 0; - - dev_kfree_skb_any(pd->tx_buffers[index].skb); - pd->tx_buffers[index].skb = NULL; - - pd->tx_ring[index].buffer1 = 0; - wmb(); - - pd->tx_ring_tail = (pd->tx_ring_tail + 1) % TX_RING_SIZE; - } -} - -static netdev_tx_t smsc9420_hard_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - struct smsc9420_pdata *pd = netdev_priv(dev); - dma_addr_t mapping; - int index = pd->tx_ring_head; - u32 tmp_desc1; - bool about_to_take_last_desc = - (((pd->tx_ring_head + 2) % TX_RING_SIZE) == pd->tx_ring_tail); - - smsc9420_complete_tx(dev); - - rmb(); - BUG_ON(pd->tx_ring[index].status & TDES0_OWN_); - BUG_ON(pd->tx_buffers[index].skb); - BUG_ON(pd->tx_buffers[index].mapping); - - mapping = pci_map_single(pd->pdev, skb->data, - skb->len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(pd->pdev, mapping)) { - smsc_warn(TX_ERR, "pci_map_single failed, dropping packet"); - return NETDEV_TX_BUSY; - } - - pd->tx_buffers[index].skb = skb; - pd->tx_buffers[index].mapping = mapping; - - tmp_desc1 = (TDES1_LS_ | ((u32)skb->len & 0x7FF)); - if (unlikely(about_to_take_last_desc)) { - tmp_desc1 |= TDES1_IC_; - netif_stop_queue(pd->dev); - } - - /* check if we are at the last descriptor and need to set EOR */ - if (unlikely(index == (TX_RING_SIZE - 1))) - tmp_desc1 |= TDES1_TER_; - - pd->tx_ring[index].buffer1 = mapping; - pd->tx_ring[index].length = tmp_desc1; - wmb(); - - /* increment head */ - pd->tx_ring_head = (pd->tx_ring_head + 1) % TX_RING_SIZE; - - /* assign ownership to DMAC */ - pd->tx_ring[index].status = TDES0_OWN_; - wmb(); - - skb_tx_timestamp(skb); - - /* kick the DMA */ - smsc9420_reg_write(pd, TX_POLL_DEMAND, 1); - smsc9420_pci_flush_write(pd); - - return NETDEV_TX_OK; -} - -static struct net_device_stats *smsc9420_get_stats(struct net_device *dev) -{ - struct smsc9420_pdata *pd = netdev_priv(dev); - u32 counter = smsc9420_reg_read(pd, MISS_FRAME_CNTR); - dev->stats.rx_dropped += - (counter & 0x0000FFFF) + ((counter >> 17) & 0x000003FF); - return &dev->stats; -} - -static void smsc9420_set_multicast_list(struct net_device *dev) -{ - struct smsc9420_pdata *pd = netdev_priv(dev); - u32 mac_cr = smsc9420_reg_read(pd, MAC_CR); - - if (dev->flags & IFF_PROMISC) { - smsc_dbg(HW, "Promiscuous Mode Enabled"); - mac_cr |= MAC_CR_PRMS_; - mac_cr &= (~MAC_CR_MCPAS_); - mac_cr &= (~MAC_CR_HPFILT_); - } else if (dev->flags & IFF_ALLMULTI) { - smsc_dbg(HW, "Receive all Multicast Enabled"); - mac_cr &= (~MAC_CR_PRMS_); - mac_cr |= MAC_CR_MCPAS_; - mac_cr &= (~MAC_CR_HPFILT_); - } else if (!netdev_mc_empty(dev)) { - struct netdev_hw_addr *ha; - u32 hash_lo = 0, hash_hi = 0; - - smsc_dbg(HW, "Multicast filter enabled"); - netdev_for_each_mc_addr(ha, dev) { - u32 bit_num = smsc9420_hash(ha->addr); - u32 mask = 1 << (bit_num & 0x1F); - - if (bit_num & 0x20) - hash_hi |= mask; - else - hash_lo |= mask; - - } - smsc9420_reg_write(pd, HASHH, hash_hi); - smsc9420_reg_write(pd, HASHL, hash_lo); - - mac_cr &= (~MAC_CR_PRMS_); - mac_cr &= (~MAC_CR_MCPAS_); - mac_cr |= MAC_CR_HPFILT_; - } else { - smsc_dbg(HW, "Receive own packets only."); - smsc9420_reg_write(pd, HASHH, 0); - smsc9420_reg_write(pd, HASHL, 0); - - mac_cr &= (~MAC_CR_PRMS_); - mac_cr &= (~MAC_CR_MCPAS_); - mac_cr &= (~MAC_CR_HPFILT_); - } - - smsc9420_reg_write(pd, MAC_CR, mac_cr); - smsc9420_pci_flush_write(pd); -} - -static void smsc9420_phy_update_flowcontrol(struct smsc9420_pdata *pd) -{ - struct phy_device *phy_dev = pd->phy_dev; - u32 flow; - - if (phy_dev->duplex == DUPLEX_FULL) { - u16 lcladv = phy_read(phy_dev, MII_ADVERTISE); - u16 rmtadv = phy_read(phy_dev, MII_LPA); - u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); - - if (cap & FLOW_CTRL_RX) - flow = 0xFFFF0002; - else - flow = 0; - - smsc_info(LINK, "rx pause %s, tx pause %s", - (cap & FLOW_CTRL_RX ? "enabled" : "disabled"), - (cap & FLOW_CTRL_TX ? "enabled" : "disabled")); - } else { - smsc_info(LINK, "half duplex"); - flow = 0; - } - - smsc9420_reg_write(pd, FLOW, flow); -} - -/* Update link mode if anything has changed. Called periodically when the - * PHY is in polling mode, even if nothing has changed. */ -static void smsc9420_phy_adjust_link(struct net_device *dev) -{ - struct smsc9420_pdata *pd = netdev_priv(dev); - struct phy_device *phy_dev = pd->phy_dev; - int carrier; - - if (phy_dev->duplex != pd->last_duplex) { - u32 mac_cr = smsc9420_reg_read(pd, MAC_CR); - if (phy_dev->duplex) { - smsc_dbg(LINK, "full duplex mode"); - mac_cr |= MAC_CR_FDPX_; - } else { - smsc_dbg(LINK, "half duplex mode"); - mac_cr &= ~MAC_CR_FDPX_; - } - smsc9420_reg_write(pd, MAC_CR, mac_cr); - - smsc9420_phy_update_flowcontrol(pd); - pd->last_duplex = phy_dev->duplex; - } - - carrier = netif_carrier_ok(dev); - if (carrier != pd->last_carrier) { - if (carrier) - smsc_dbg(LINK, "carrier OK"); - else - smsc_dbg(LINK, "no carrier"); - pd->last_carrier = carrier; - } -} - -static int smsc9420_mii_probe(struct net_device *dev) -{ - struct smsc9420_pdata *pd = netdev_priv(dev); - struct phy_device *phydev = NULL; - - BUG_ON(pd->phy_dev); - - /* Device only supports internal PHY at address 1 */ - if (!pd->mii_bus->phy_map[1]) { - pr_err("%s: no PHY found at address 1\n", dev->name); - return -ENODEV; - } - - phydev = pd->mii_bus->phy_map[1]; - smsc_info(PROBE, "PHY addr %d, phy_id 0x%08X", phydev->addr, - phydev->phy_id); - - phydev = phy_connect(dev, dev_name(&phydev->dev), - smsc9420_phy_adjust_link, 0, PHY_INTERFACE_MODE_MII); - - if (IS_ERR(phydev)) { - pr_err("%s: Could not attach to PHY\n", dev->name); - return PTR_ERR(phydev); - } - - pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - dev->name, phydev->drv->name, dev_name(&phydev->dev), phydev->irq); - - /* mask with MAC supported features */ - phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | - SUPPORTED_Asym_Pause); - phydev->advertising = phydev->supported; - - pd->phy_dev = phydev; - pd->last_duplex = -1; - pd->last_carrier = -1; - - return 0; -} - -static int smsc9420_mii_init(struct net_device *dev) -{ - struct smsc9420_pdata *pd = netdev_priv(dev); - int err = -ENXIO, i; - - pd->mii_bus = mdiobus_alloc(); - if (!pd->mii_bus) { - err = -ENOMEM; - goto err_out_1; - } - pd->mii_bus->name = DRV_MDIONAME; - snprintf(pd->mii_bus->id, MII_BUS_ID_SIZE, "%x", - (pd->pdev->bus->number << 8) | pd->pdev->devfn); - pd->mii_bus->priv = pd; - pd->mii_bus->read = smsc9420_mii_read; - pd->mii_bus->write = smsc9420_mii_write; - pd->mii_bus->irq = pd->phy_irq; - for (i = 0; i < PHY_MAX_ADDR; ++i) - pd->mii_bus->irq[i] = PHY_POLL; - - /* Mask all PHYs except ID 1 (internal) */ - pd->mii_bus->phy_mask = ~(1 << 1); - - if (mdiobus_register(pd->mii_bus)) { - smsc_warn(PROBE, "Error registering mii bus"); - goto err_out_free_bus_2; - } - - if (smsc9420_mii_probe(dev) < 0) { - smsc_warn(PROBE, "Error probing mii bus"); - goto err_out_unregister_bus_3; - } - - return 0; - -err_out_unregister_bus_3: - mdiobus_unregister(pd->mii_bus); -err_out_free_bus_2: - mdiobus_free(pd->mii_bus); -err_out_1: - return err; -} - -static int smsc9420_alloc_tx_ring(struct smsc9420_pdata *pd) -{ - int i; - - BUG_ON(!pd->tx_ring); - - pd->tx_buffers = kmalloc((sizeof(struct smsc9420_ring_info) * - TX_RING_SIZE), GFP_KERNEL); - if (!pd->tx_buffers) { - smsc_warn(IFUP, "Failed to allocated tx_buffers"); - return -ENOMEM; - } - - /* Initialize the TX Ring */ - for (i = 0; i < TX_RING_SIZE; i++) { - pd->tx_buffers[i].skb = NULL; - pd->tx_buffers[i].mapping = 0; - pd->tx_ring[i].status = 0; - pd->tx_ring[i].length = 0; - pd->tx_ring[i].buffer1 = 0; - pd->tx_ring[i].buffer2 = 0; - } - pd->tx_ring[TX_RING_SIZE - 1].length = TDES1_TER_; - wmb(); - - pd->tx_ring_head = 0; - pd->tx_ring_tail = 0; - - smsc9420_reg_write(pd, TX_BASE_ADDR, pd->tx_dma_addr); - smsc9420_pci_flush_write(pd); - - return 0; -} - -static int smsc9420_alloc_rx_ring(struct smsc9420_pdata *pd) -{ - int i; - - BUG_ON(!pd->rx_ring); - - pd->rx_buffers = kmalloc((sizeof(struct smsc9420_ring_info) * - RX_RING_SIZE), GFP_KERNEL); - if (pd->rx_buffers == NULL) { - smsc_warn(IFUP, "Failed to allocated rx_buffers"); - goto out; - } - - /* initialize the rx ring */ - for (i = 0; i < RX_RING_SIZE; i++) { - pd->rx_ring[i].status = 0; - pd->rx_ring[i].length = PKT_BUF_SZ; - pd->rx_ring[i].buffer2 = 0; - pd->rx_buffers[i].skb = NULL; - pd->rx_buffers[i].mapping = 0; - } - pd->rx_ring[RX_RING_SIZE - 1].length = (PKT_BUF_SZ | RDES1_RER_); - - /* now allocate the entire ring of skbs */ - for (i = 0; i < RX_RING_SIZE; i++) { - if (smsc9420_alloc_rx_buffer(pd, i)) { - smsc_warn(IFUP, "failed to allocate rx skb %d", i); - goto out_free_rx_skbs; - } - } - - pd->rx_ring_head = 0; - pd->rx_ring_tail = 0; - - smsc9420_reg_write(pd, VLAN1, ETH_P_8021Q); - smsc_dbg(IFUP, "VLAN1 = 0x%08x", smsc9420_reg_read(pd, VLAN1)); - - if (pd->rx_csum) { - /* Enable RX COE */ - u32 coe = smsc9420_reg_read(pd, COE_CR) | RX_COE_EN; - smsc9420_reg_write(pd, COE_CR, coe); - smsc_dbg(IFUP, "COE_CR = 0x%08x", coe); - } - - smsc9420_reg_write(pd, RX_BASE_ADDR, pd->rx_dma_addr); - smsc9420_pci_flush_write(pd); - - return 0; - -out_free_rx_skbs: - smsc9420_free_rx_ring(pd); -out: - return -ENOMEM; -} - -static int smsc9420_open(struct net_device *dev) -{ - struct smsc9420_pdata *pd; - u32 bus_mode, mac_cr, dmac_control, int_cfg, dma_intr_ena, int_ctl; - unsigned long flags; - int result = 0, timeout; - - BUG_ON(!dev); - pd = netdev_priv(dev); - BUG_ON(!pd); - - if (!is_valid_ether_addr(dev->dev_addr)) { - smsc_warn(IFUP, "dev_addr is not a valid MAC address"); - result = -EADDRNOTAVAIL; - goto out_0; - } - - netif_carrier_off(dev); - - /* disable, mask and acknowledge all interrupts */ - spin_lock_irqsave(&pd->int_lock, flags); - int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_); - smsc9420_reg_write(pd, INT_CFG, int_cfg); - smsc9420_reg_write(pd, INT_CTL, 0); - spin_unlock_irqrestore(&pd->int_lock, flags); - smsc9420_reg_write(pd, DMAC_INTR_ENA, 0); - smsc9420_reg_write(pd, INT_STAT, 0xFFFFFFFF); - smsc9420_pci_flush_write(pd); - - if (request_irq(dev->irq, smsc9420_isr, IRQF_SHARED | IRQF_DISABLED, - DRV_NAME, pd)) { - smsc_warn(IFUP, "Unable to use IRQ = %d", dev->irq); - result = -ENODEV; - goto out_0; - } - - smsc9420_dmac_soft_reset(pd); - - /* make sure MAC_CR is sane */ - smsc9420_reg_write(pd, MAC_CR, 0); - - smsc9420_set_mac_address(dev); - - /* Configure GPIO pins to drive LEDs */ - smsc9420_reg_write(pd, GPIO_CFG, - (GPIO_CFG_LED_3_ | GPIO_CFG_LED_2_ | GPIO_CFG_LED_1_)); - - bus_mode = BUS_MODE_DMA_BURST_LENGTH_16; - -#ifdef __BIG_ENDIAN - bus_mode |= BUS_MODE_DBO_; -#endif - - smsc9420_reg_write(pd, BUS_MODE, bus_mode); - - smsc9420_pci_flush_write(pd); - - /* set bus master bridge arbitration priority for Rx and TX DMA */ - smsc9420_reg_write(pd, BUS_CFG, BUS_CFG_RXTXWEIGHT_4_1); - - smsc9420_reg_write(pd, DMAC_CONTROL, - (DMAC_CONTROL_SF_ | DMAC_CONTROL_OSF_)); - - smsc9420_pci_flush_write(pd); - - /* test the IRQ connection to the ISR */ - smsc_dbg(IFUP, "Testing ISR using IRQ %d", dev->irq); - pd->software_irq_signal = false; - - spin_lock_irqsave(&pd->int_lock, flags); - /* configure interrupt deassertion timer and enable interrupts */ - int_cfg = smsc9420_reg_read(pd, INT_CFG) | INT_CFG_IRQ_EN_; - int_cfg &= ~(INT_CFG_INT_DEAS_MASK); - int_cfg |= (INT_DEAS_TIME & INT_CFG_INT_DEAS_MASK); - smsc9420_reg_write(pd, INT_CFG, int_cfg); - - /* unmask software interrupt */ - int_ctl = smsc9420_reg_read(pd, INT_CTL) | INT_CTL_SW_INT_EN_; - smsc9420_reg_write(pd, INT_CTL, int_ctl); - spin_unlock_irqrestore(&pd->int_lock, flags); - smsc9420_pci_flush_write(pd); - - timeout = 1000; - while (timeout--) { - if (pd->software_irq_signal) - break; - msleep(1); - } - - /* disable interrupts */ - spin_lock_irqsave(&pd->int_lock, flags); - int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_); - smsc9420_reg_write(pd, INT_CFG, int_cfg); - spin_unlock_irqrestore(&pd->int_lock, flags); - - if (!pd->software_irq_signal) { - smsc_warn(IFUP, "ISR failed signaling test"); - result = -ENODEV; - goto out_free_irq_1; - } - - smsc_dbg(IFUP, "ISR passed test using IRQ %d", dev->irq); - - result = smsc9420_alloc_tx_ring(pd); - if (result) { - smsc_warn(IFUP, "Failed to Initialize tx dma ring"); - result = -ENOMEM; - goto out_free_irq_1; - } - - result = smsc9420_alloc_rx_ring(pd); - if (result) { - smsc_warn(IFUP, "Failed to Initialize rx dma ring"); - result = -ENOMEM; - goto out_free_tx_ring_2; - } - - result = smsc9420_mii_init(dev); - if (result) { - smsc_warn(IFUP, "Failed to initialize Phy"); - result = -ENODEV; - goto out_free_rx_ring_3; - } - - /* Bring the PHY up */ - phy_start(pd->phy_dev); - - napi_enable(&pd->napi); - - /* start tx and rx */ - mac_cr = smsc9420_reg_read(pd, MAC_CR) | MAC_CR_TXEN_ | MAC_CR_RXEN_; - smsc9420_reg_write(pd, MAC_CR, mac_cr); - - dmac_control = smsc9420_reg_read(pd, DMAC_CONTROL); - dmac_control |= DMAC_CONTROL_ST_ | DMAC_CONTROL_SR_; - smsc9420_reg_write(pd, DMAC_CONTROL, dmac_control); - smsc9420_pci_flush_write(pd); - - dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); - dma_intr_ena |= - (DMAC_INTR_ENA_TX_ | DMAC_INTR_ENA_RX_ | DMAC_INTR_ENA_NIS_); - smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena); - smsc9420_pci_flush_write(pd); - - netif_wake_queue(dev); - - smsc9420_reg_write(pd, RX_POLL_DEMAND, 1); - - /* enable interrupts */ - spin_lock_irqsave(&pd->int_lock, flags); - int_cfg = smsc9420_reg_read(pd, INT_CFG) | INT_CFG_IRQ_EN_; - smsc9420_reg_write(pd, INT_CFG, int_cfg); - spin_unlock_irqrestore(&pd->int_lock, flags); - - return 0; - -out_free_rx_ring_3: - smsc9420_free_rx_ring(pd); -out_free_tx_ring_2: - smsc9420_free_tx_ring(pd); -out_free_irq_1: - free_irq(dev->irq, pd); -out_0: - return result; -} - -#ifdef CONFIG_PM - -static int smsc9420_suspend(struct pci_dev *pdev, pm_message_t state) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct smsc9420_pdata *pd = netdev_priv(dev); - u32 int_cfg; - ulong flags; - - /* disable interrupts */ - spin_lock_irqsave(&pd->int_lock, flags); - int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_); - smsc9420_reg_write(pd, INT_CFG, int_cfg); - spin_unlock_irqrestore(&pd->int_lock, flags); - - if (netif_running(dev)) { - netif_tx_disable(dev); - smsc9420_stop_tx(pd); - smsc9420_free_tx_ring(pd); - - napi_disable(&pd->napi); - smsc9420_stop_rx(pd); - smsc9420_free_rx_ring(pd); - - free_irq(dev->irq, pd); - - netif_device_detach(dev); - } - - pci_save_state(pdev); - pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); - pci_disable_device(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - - return 0; -} - -static int smsc9420_resume(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct smsc9420_pdata *pd = netdev_priv(dev); - int err; - - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - - err = pci_enable_device(pdev); - if (err) - return err; - - pci_set_master(pdev); - - err = pci_enable_wake(pdev, 0, 0); - if (err) - smsc_warn(IFUP, "pci_enable_wake failed: %d", err); - - if (netif_running(dev)) { - err = smsc9420_open(dev); - netif_device_attach(dev); - } - return err; -} - -#endif /* CONFIG_PM */ - -static const struct net_device_ops smsc9420_netdev_ops = { - .ndo_open = smsc9420_open, - .ndo_stop = smsc9420_stop, - .ndo_start_xmit = smsc9420_hard_start_xmit, - .ndo_get_stats = smsc9420_get_stats, - .ndo_set_multicast_list = smsc9420_set_multicast_list, - .ndo_do_ioctl = smsc9420_do_ioctl, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = smsc9420_poll_controller, -#endif /* CONFIG_NET_POLL_CONTROLLER */ -}; - -static int __devinit -smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id) -{ - struct net_device *dev; - struct smsc9420_pdata *pd; - void __iomem *virt_addr; - int result = 0; - u32 id_rev; - - printk(KERN_INFO DRV_DESCRIPTION " version " DRV_VERSION "\n"); - - /* First do the PCI initialisation */ - result = pci_enable_device(pdev); - if (unlikely(result)) { - printk(KERN_ERR "Cannot enable smsc9420\n"); - goto out_0; - } - - pci_set_master(pdev); - - dev = alloc_etherdev(sizeof(*pd)); - if (!dev) { - printk(KERN_ERR "ether device alloc failed\n"); - goto out_disable_pci_device_1; - } - - SET_NETDEV_DEV(dev, &pdev->dev); - - if (!(pci_resource_flags(pdev, SMSC_BAR) & IORESOURCE_MEM)) { - printk(KERN_ERR "Cannot find PCI device base address\n"); - goto out_free_netdev_2; - } - - if ((pci_request_regions(pdev, DRV_NAME))) { - printk(KERN_ERR "Cannot obtain PCI resources, aborting.\n"); - goto out_free_netdev_2; - } - - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { - printk(KERN_ERR "No usable DMA configuration, aborting.\n"); - goto out_free_regions_3; - } - - virt_addr = ioremap(pci_resource_start(pdev, SMSC_BAR), - pci_resource_len(pdev, SMSC_BAR)); - if (!virt_addr) { - printk(KERN_ERR "Cannot map device registers, aborting.\n"); - goto out_free_regions_3; - } - - /* registers are double mapped with 0 offset for LE and 0x200 for BE */ - virt_addr += LAN9420_CPSR_ENDIAN_OFFSET; - - dev->base_addr = (ulong)virt_addr; - - pd = netdev_priv(dev); - - /* pci descriptors are created in the PCI consistent area */ - pd->rx_ring = pci_alloc_consistent(pdev, - sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE + - sizeof(struct smsc9420_dma_desc) * TX_RING_SIZE, - &pd->rx_dma_addr); - - if (!pd->rx_ring) - goto out_free_io_4; - - /* descriptors are aligned due to the nature of pci_alloc_consistent */ - pd->tx_ring = (struct smsc9420_dma_desc *) - (pd->rx_ring + RX_RING_SIZE); - pd->tx_dma_addr = pd->rx_dma_addr + - sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE; - - pd->pdev = pdev; - pd->dev = dev; - pd->base_addr = virt_addr; - pd->msg_enable = smsc_debug; - pd->rx_csum = true; - - smsc_dbg(PROBE, "lan_base=0x%08lx", (ulong)virt_addr); - - id_rev = smsc9420_reg_read(pd, ID_REV); - switch (id_rev & 0xFFFF0000) { - case 0x94200000: - smsc_info(PROBE, "LAN9420 identified, ID_REV=0x%08X", id_rev); - break; - default: - smsc_warn(PROBE, "LAN9420 NOT identified"); - smsc_warn(PROBE, "ID_REV=0x%08X", id_rev); - goto out_free_dmadesc_5; - } - - smsc9420_dmac_soft_reset(pd); - smsc9420_eeprom_reload(pd); - smsc9420_check_mac_address(dev); - - dev->netdev_ops = &smsc9420_netdev_ops; - dev->ethtool_ops = &smsc9420_ethtool_ops; - dev->irq = pdev->irq; - - netif_napi_add(dev, &pd->napi, smsc9420_rx_poll, NAPI_WEIGHT); - - result = register_netdev(dev); - if (result) { - smsc_warn(PROBE, "error %i registering device", result); - goto out_free_dmadesc_5; - } - - pci_set_drvdata(pdev, dev); - - spin_lock_init(&pd->int_lock); - spin_lock_init(&pd->phy_lock); - - dev_info(&dev->dev, "MAC Address: %pM\n", dev->dev_addr); - - return 0; - -out_free_dmadesc_5: - pci_free_consistent(pdev, sizeof(struct smsc9420_dma_desc) * - (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr); -out_free_io_4: - iounmap(virt_addr - LAN9420_CPSR_ENDIAN_OFFSET); -out_free_regions_3: - pci_release_regions(pdev); -out_free_netdev_2: - free_netdev(dev); -out_disable_pci_device_1: - pci_disable_device(pdev); -out_0: - return -ENODEV; -} - -static void __devexit smsc9420_remove(struct pci_dev *pdev) -{ - struct net_device *dev; - struct smsc9420_pdata *pd; - - dev = pci_get_drvdata(pdev); - if (!dev) - return; - - pci_set_drvdata(pdev, NULL); - - pd = netdev_priv(dev); - unregister_netdev(dev); - - /* tx_buffers and rx_buffers are freed in stop */ - BUG_ON(pd->tx_buffers); - BUG_ON(pd->rx_buffers); - - BUG_ON(!pd->tx_ring); - BUG_ON(!pd->rx_ring); - - pci_free_consistent(pdev, sizeof(struct smsc9420_dma_desc) * - (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr); - - iounmap(pd->base_addr - LAN9420_CPSR_ENDIAN_OFFSET); - pci_release_regions(pdev); - free_netdev(dev); - pci_disable_device(pdev); -} - -static struct pci_driver smsc9420_driver = { - .name = DRV_NAME, - .id_table = smsc9420_id_table, - .probe = smsc9420_probe, - .remove = __devexit_p(smsc9420_remove), -#ifdef CONFIG_PM - .suspend = smsc9420_suspend, - .resume = smsc9420_resume, -#endif /* CONFIG_PM */ -}; - -static int __init smsc9420_init_module(void) -{ - smsc_debug = netif_msg_init(debug, SMSC_MSG_DEFAULT); - - return pci_register_driver(&smsc9420_driver); -} - -static void __exit smsc9420_exit_module(void) -{ - pci_unregister_driver(&smsc9420_driver); -} - -module_init(smsc9420_init_module); -module_exit(smsc9420_exit_module); diff --git a/drivers/net/smsc9420.h b/drivers/net/smsc9420.h deleted file mode 100644 index e441402..0000000 --- a/drivers/net/smsc9420.h +++ /dev/null @@ -1,276 +0,0 @@ - /*************************************************************************** - * - * Copyright (C) 2007,2008 SMSC - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - *************************************************************************** - */ - -#ifndef _SMSC9420_H -#define _SMSC9420_H - -#define TX_RING_SIZE (32) -#define RX_RING_SIZE (128) - -/* interrupt deassertion in multiples of 10us */ -#define INT_DEAS_TIME (50) - -#define NAPI_WEIGHT (64) -#define SMSC_BAR (3) - -#ifdef __BIG_ENDIAN -/* Register set is duplicated for BE at an offset of 0x200 */ -#define LAN9420_CPSR_ENDIAN_OFFSET (0x200) -#else -#define LAN9420_CPSR_ENDIAN_OFFSET (0) -#endif - -#define PCI_VENDOR_ID_9420 (0x1055) -#define PCI_DEVICE_ID_9420 (0xE420) - -#define LAN_REGISTER_EXTENT (0x400) - -#define SMSC9420_EEPROM_SIZE ((u32)11) -#define SMSC9420_EEPROM_MAGIC (0x9420) - -#define PKT_BUF_SZ (VLAN_ETH_FRAME_LEN + NET_IP_ALIGN + 4) - -/***********************************************/ -/* DMA Controller Control and Status Registers */ -/***********************************************/ -#define BUS_MODE (0x00) -#define BUS_MODE_SWR_ (BIT(0)) -#define BUS_MODE_DMA_BURST_LENGTH_1 (BIT(8)) -#define BUS_MODE_DMA_BURST_LENGTH_2 (BIT(9)) -#define BUS_MODE_DMA_BURST_LENGTH_4 (BIT(10)) -#define BUS_MODE_DMA_BURST_LENGTH_8 (BIT(11)) -#define BUS_MODE_DMA_BURST_LENGTH_16 (BIT(12)) -#define BUS_MODE_DMA_BURST_LENGTH_32 (BIT(13)) -#define BUS_MODE_DBO_ (BIT(20)) - -#define TX_POLL_DEMAND (0x04) - -#define RX_POLL_DEMAND (0x08) - -#define RX_BASE_ADDR (0x0C) - -#define TX_BASE_ADDR (0x10) - -#define DMAC_STATUS (0x14) -#define DMAC_STS_TS_ (7 << 20) -#define DMAC_STS_RS_ (7 << 17) -#define DMAC_STS_NIS_ (BIT(16)) -#define DMAC_STS_AIS_ (BIT(15)) -#define DMAC_STS_RWT_ (BIT(9)) -#define DMAC_STS_RXPS_ (BIT(8)) -#define DMAC_STS_RXBU_ (BIT(7)) -#define DMAC_STS_RX_ (BIT(6)) -#define DMAC_STS_TXUNF_ (BIT(5)) -#define DMAC_STS_TXBU_ (BIT(2)) -#define DMAC_STS_TXPS_ (BIT(1)) -#define DMAC_STS_TX_ (BIT(0)) - -#define DMAC_CONTROL (0x18) -#define DMAC_CONTROL_TTM_ (BIT(22)) -#define DMAC_CONTROL_SF_ (BIT(21)) -#define DMAC_CONTROL_ST_ (BIT(13)) -#define DMAC_CONTROL_OSF_ (BIT(2)) -#define DMAC_CONTROL_SR_ (BIT(1)) - -#define DMAC_INTR_ENA (0x1C) -#define DMAC_INTR_ENA_NIS_ (BIT(16)) -#define DMAC_INTR_ENA_AIS_ (BIT(15)) -#define DMAC_INTR_ENA_RWT_ (BIT(9)) -#define DMAC_INTR_ENA_RXPS_ (BIT(8)) -#define DMAC_INTR_ENA_RXBU_ (BIT(7)) -#define DMAC_INTR_ENA_RX_ (BIT(6)) -#define DMAC_INTR_ENA_TXBU_ (BIT(2)) -#define DMAC_INTR_ENA_TXPS_ (BIT(1)) -#define DMAC_INTR_ENA_TX_ (BIT(0)) - -#define MISS_FRAME_CNTR (0x20) - -#define TX_BUFF_ADDR (0x50) - -#define RX_BUFF_ADDR (0x54) - -/* Transmit Descriptor Bit Defs */ -#define TDES0_OWN_ (0x80000000) -#define TDES0_ERROR_SUMMARY_ (0x00008000) -#define TDES0_LOSS_OF_CARRIER_ (0x00000800) -#define TDES0_NO_CARRIER_ (0x00000400) -#define TDES0_LATE_COLLISION_ (0x00000200) -#define TDES0_EXCESSIVE_COLLISIONS_ (0x00000100) -#define TDES0_HEARTBEAT_FAIL_ (0x00000080) -#define TDES0_COLLISION_COUNT_MASK_ (0x00000078) -#define TDES0_COLLISION_COUNT_SHFT_ (3) -#define TDES0_EXCESSIVE_DEFERRAL_ (0x00000004) -#define TDES0_DEFERRED_ (0x00000001) - -#define TDES1_IC_ 0x80000000 -#define TDES1_LS_ 0x40000000 -#define TDES1_FS_ 0x20000000 -#define TDES1_TXCSEN_ 0x08000000 -#define TDES1_TER_ (BIT(25)) -#define TDES1_TCH_ 0x01000000 - -/* Receive Descriptor 0 Bit Defs */ -#define RDES0_OWN_ (0x80000000) -#define RDES0_FRAME_LENGTH_MASK_ (0x07FF0000) -#define RDES0_FRAME_LENGTH_SHFT_ (16) -#define RDES0_ERROR_SUMMARY_ (0x00008000) -#define RDES0_DESCRIPTOR_ERROR_ (0x00004000) -#define RDES0_LENGTH_ERROR_ (0x00001000) -#define RDES0_RUNT_FRAME_ (0x00000800) -#define RDES0_MULTICAST_FRAME_ (0x00000400) -#define RDES0_FIRST_DESCRIPTOR_ (0x00000200) -#define RDES0_LAST_DESCRIPTOR_ (0x00000100) -#define RDES0_FRAME_TOO_LONG_ (0x00000080) -#define RDES0_COLLISION_SEEN_ (0x00000040) -#define RDES0_FRAME_TYPE_ (0x00000020) -#define RDES0_WATCHDOG_TIMEOUT_ (0x00000010) -#define RDES0_MII_ERROR_ (0x00000008) -#define RDES0_DRIBBLING_BIT_ (0x00000004) -#define RDES0_CRC_ERROR_ (0x00000002) - -/* Receive Descriptor 1 Bit Defs */ -#define RDES1_RER_ (0x02000000) - -/***********************************************/ -/* MAC Control and Status Registers */ -/***********************************************/ -#define MAC_CR (0x80) -#define MAC_CR_RXALL_ (0x80000000) -#define MAC_CR_DIS_RXOWN_ (0x00800000) -#define MAC_CR_LOOPBK_ (0x00200000) -#define MAC_CR_FDPX_ (0x00100000) -#define MAC_CR_MCPAS_ (0x00080000) -#define MAC_CR_PRMS_ (0x00040000) -#define MAC_CR_INVFILT_ (0x00020000) -#define MAC_CR_PASSBAD_ (0x00010000) -#define MAC_CR_HFILT_ (0x00008000) -#define MAC_CR_HPFILT_ (0x00002000) -#define MAC_CR_LCOLL_ (0x00001000) -#define MAC_CR_DIS_BCAST_ (0x00000800) -#define MAC_CR_DIS_RTRY_ (0x00000400) -#define MAC_CR_PADSTR_ (0x00000100) -#define MAC_CR_BOLMT_MSK (0x000000C0) -#define MAC_CR_MFCHK_ (0x00000020) -#define MAC_CR_TXEN_ (0x00000008) -#define MAC_CR_RXEN_ (0x00000004) - -#define ADDRH (0x84) - -#define ADDRL (0x88) - -#define HASHH (0x8C) - -#define HASHL (0x90) - -#define MII_ACCESS (0x94) -#define MII_ACCESS_MII_BUSY_ (0x00000001) -#define MII_ACCESS_MII_WRITE_ (0x00000002) -#define MII_ACCESS_MII_READ_ (0x00000000) -#define MII_ACCESS_INDX_MSK_ (0x000007C0) -#define MII_ACCESS_PHYADDR_MSK_ (0x0000F8C0) -#define MII_ACCESS_INDX_SHFT_CNT (6) -#define MII_ACCESS_PHYADDR_SHFT_CNT (11) - -#define MII_DATA (0x98) - -#define FLOW (0x9C) - -#define VLAN1 (0xA0) - -#define VLAN2 (0xA4) - -#define WUFF (0xA8) - -#define WUCSR (0xAC) - -#define COE_CR (0xB0) -#define TX_COE_EN (0x00010000) -#define RX_COE_MODE (0x00000002) -#define RX_COE_EN (0x00000001) - -/***********************************************/ -/* System Control and Status Registers */ -/***********************************************/ -#define ID_REV (0xC0) - -#define INT_CTL (0xC4) -#define INT_CTL_SW_INT_EN_ (0x00008000) -#define INT_CTL_SBERR_INT_EN_ (1 << 12) -#define INT_CTL_MBERR_INT_EN_ (1 << 13) -#define INT_CTL_GPT_INT_EN_ (0x00000008) -#define INT_CTL_PHY_INT_EN_ (0x00000004) -#define INT_CTL_WAKE_INT_EN_ (0x00000002) - -#define INT_STAT (0xC8) -#define INT_STAT_SW_INT_ (1 << 15) -#define INT_STAT_MBERR_INT_ (1 << 13) -#define INT_STAT_SBERR_INT_ (1 << 12) -#define INT_STAT_GPT_INT_ (1 << 3) -#define INT_STAT_PHY_INT_ (0x00000004) -#define INT_STAT_WAKE_INT_ (0x00000002) -#define INT_STAT_DMAC_INT_ (0x00000001) - -#define INT_CFG (0xCC) -#define INT_CFG_IRQ_INT_ (0x00080000) -#define INT_CFG_IRQ_EN_ (0x00040000) -#define INT_CFG_INT_DEAS_CLR_ (0x00000200) -#define INT_CFG_INT_DEAS_MASK (0x000000FF) - -#define GPIO_CFG (0xD0) -#define GPIO_CFG_LED_3_ (0x40000000) -#define GPIO_CFG_LED_2_ (0x20000000) -#define GPIO_CFG_LED_1_ (0x10000000) -#define GPIO_CFG_EEPR_EN_ (0x00700000) - -#define GPT_CFG (0xD4) -#define GPT_CFG_TIMER_EN_ (0x20000000) - -#define GPT_CNT (0xD8) - -#define BUS_CFG (0xDC) -#define BUS_CFG_RXTXWEIGHT_1_1 (0 << 25) -#define BUS_CFG_RXTXWEIGHT_2_1 (1 << 25) -#define BUS_CFG_RXTXWEIGHT_3_1 (2 << 25) -#define BUS_CFG_RXTXWEIGHT_4_1 (3 << 25) - -#define PMT_CTRL (0xE0) - -#define FREE_RUN (0xF4) - -#define E2P_CMD (0xF8) -#define E2P_CMD_EPC_BUSY_ (0x80000000) -#define E2P_CMD_EPC_CMD_ (0x70000000) -#define E2P_CMD_EPC_CMD_READ_ (0x00000000) -#define E2P_CMD_EPC_CMD_EWDS_ (0x10000000) -#define E2P_CMD_EPC_CMD_EWEN_ (0x20000000) -#define E2P_CMD_EPC_CMD_WRITE_ (0x30000000) -#define E2P_CMD_EPC_CMD_WRAL_ (0x40000000) -#define E2P_CMD_EPC_CMD_ERASE_ (0x50000000) -#define E2P_CMD_EPC_CMD_ERAL_ (0x60000000) -#define E2P_CMD_EPC_CMD_RELOAD_ (0x70000000) -#define E2P_CMD_EPC_TIMEOUT_ (0x00000200) -#define E2P_CMD_MAC_ADDR_LOADED_ (0x00000100) -#define E2P_CMD_EPC_ADDR_ (0x000000FF) - -#define E2P_DATA (0xFC) -#define E2P_DATA_EEPROM_DATA_ (0x000000FF) - -#endif /* _SMSC9420_H */ -- cgit v0.10.2 From 115978859272b958366d4a08c99a24f9625fa663 Mon Sep 17 00:00:00 2001 From: Jeff Kirsher Date: Wed, 13 Jul 2011 15:38:08 -0700 Subject: i825xx: Move the Intel 82586/82593/82596 based drivers Move the drivers that use the i82586/i82593/i82596 chipsets into drivers/net/ethernet/i825xx/ and make the necessary Kconfig and Makefile changes. There were 4 3Com drivers which were initially moved into 3com/, which now reside in i825xx since they all used the i82586 chip. CC: Philip Blundell CC: Russell King CC: CC: Donald Becker CC: Chris Beauregard CC: Richard Procter CC: Andries Brouwer CC: "M.Hipp" CC: Richard Hirst CC: Sam Creasey CC: Thomas Bogendoerfer Signed-off-by: Jeff Kirsher diff --git a/MAINTAINERS b/MAINTAINERS index 7c0294c..5b368cc 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -117,7 +117,7 @@ Maintainers List (try to look for most precise areas first) M: Philip Blundell L: netdev@vger.kernel.org S: Maintained -F: drivers/net/3c505* +F: drivers/net/ethernet/i825xx/3c505* 3C59X NETWORK DRIVER M: Steffen Klassert @@ -1014,7 +1014,7 @@ F: arch/arm/include/asm/hardware/ioc.h F: arch/arm/include/asm/hardware/iomd.h F: arch/arm/include/asm/hardware/memc.h F: arch/arm/mach-rpc/ -F: drivers/net/arm/ether* +F: drivers/net/arm/ether3* F: drivers/scsi/arm/ ARM/SHARK MACHINE SUPPORT @@ -2510,7 +2510,7 @@ ETHEREXPRESS-16 NETWORK DRIVER M: Philip Blundell L: netdev@vger.kernel.org S: Maintained -F: drivers/net/eexpress.* +F: drivers/net/ethernet/i825xx/eexpress.* ETHERNET BRIDGE M: Stephen Hemminger diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c deleted file mode 100644 index 88d766e..0000000 --- a/drivers/net/3c505.c +++ /dev/null @@ -1,1673 +0,0 @@ -/* - * Linux Ethernet device driver for the 3Com Etherlink Plus (3C505) - * By Craig Southeren, Juha Laiho and Philip Blundell - * - * 3c505.c This module implements an interface to the 3Com - * Etherlink Plus (3c505) Ethernet card. Linux device - * driver interface reverse engineered from the Linux 3C509 - * device drivers. Some 3C505 information gleaned from - * the Crynwr packet driver. Still this driver would not - * be here without 3C505 technical reference provided by - * 3Com. - * - * $Id: 3c505.c,v 1.10 1996/04/16 13:06:27 phil Exp $ - * - * Authors: Linux 3c505 device driver by - * Craig Southeren, - * Final debugging by - * Andrew Tridgell, - * Auto irq/address, tuning, cleanup and v1.1.4+ kernel mods by - * Juha Laiho, - * Linux 3C509 driver by - * Donald Becker, - * (Now at ) - * Crynwr packet driver by - * Krishnan Gopalan and Gregg Stefancik, - * Clemson University Engineering Computer Operations. - * Portions of the code have been adapted from the 3c505 - * driver for NCSA Telnet by Bruce Orchard and later - * modified by Warren Van Houten and krus@diku.dk. - * 3C505 technical information provided by - * Terry Murphy, of 3Com Network Adapter Division - * Linux 1.3.0 changes by - * Alan Cox - * More debugging, DMA support, currently maintained by - * Philip Blundell - * Multicard/soft configurable dma channel/rev 2 hardware support - * by Christopher Collins - * Ethtool support (jgarzik), 11/17/2001 - */ - -#define DRV_NAME "3c505" -#define DRV_VERSION "1.10a" - - -/* Theory of operation: - * - * The 3c505 is quite an intelligent board. All communication with it is done - * by means of Primary Command Blocks (PCBs); these are transferred using PIO - * through the command register. The card has 256k of on-board RAM, which is - * used to buffer received packets. It might seem at first that more buffers - * are better, but in fact this isn't true. From my tests, it seems that - * more than about 10 buffers are unnecessary, and there is a noticeable - * performance hit in having more active on the card. So the majority of the - * card's memory isn't, in fact, used. Sadly, the card only has one transmit - * buffer and, short of loading our own firmware into it (which is what some - * drivers resort to) there's nothing we can do about this. - * - * We keep up to 4 "receive packet" commands active on the board at a time. - * When a packet comes in, so long as there is a receive command active, the - * board will send us a "packet received" PCB and then add the data for that - * packet to the DMA queue. If a DMA transfer is not already in progress, we - * set one up to start uploading the data. We have to maintain a list of - * backlogged receive packets, because the card may decide to tell us about - * a newly-arrived packet at any time, and we may not be able to start a DMA - * transfer immediately (ie one may already be going on). We can't NAK the - * PCB, because then it would throw the packet away. - * - * Trying to send a PCB to the card at the wrong moment seems to have bad - * effects. If we send it a transmit PCB while a receive DMA is happening, - * it will just NAK the PCB and so we will have wasted our time. Worse, it - * sometimes seems to interrupt the transfer. The majority of the low-level - * code is protected by one huge semaphore -- "busy" -- which is set whenever - * it probably isn't safe to do anything to the card. The receive routine - * must gain a lock on "busy" before it can start a DMA transfer, and the - * transmit routine must gain a lock before it sends the first PCB to the card. - * The send_pcb() routine also has an internal semaphore to protect it against - * being re-entered (which would be disastrous) -- this is needed because - * several things can happen asynchronously (re-priming the receiver and - * asking the card for statistics, for example). send_pcb() will also refuse - * to talk to the card at all if a DMA upload is happening. The higher-level - * networking code will reschedule a later retry if some part of the driver - * is blocked. In practice, this doesn't seem to happen very often. - */ - -/* This driver may now work with revision 2.x hardware, since all the read - * operations on the HCR have been removed (we now keep our own softcopy). - * But I don't have an old card to test it on. - * - * This has had the bad effect that the autoprobe routine is now a bit - * less friendly to other devices. However, it was never very good. - * before, so I doubt it will hurt anybody. - */ - -/* The driver is a mess. I took Craig's and Juha's code, and hacked it firstly - * to make it more reliable, and secondly to add DMA mode. Many things could - * probably be done better; the concurrency protection is particularly awful. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include -#include -#include -#include - -#include "3c505.h" - -/********************************************************* - * - * define debug messages here as common strings to reduce space - * - *********************************************************/ - -#define filename __FILE__ - -#define timeout_msg "*** timeout at %s:%s (line %d) ***\n" -#define TIMEOUT_MSG(lineno) \ - pr_notice(timeout_msg, filename, __func__, (lineno)) - -#define invalid_pcb_msg "*** invalid pcb length %d at %s:%s (line %d) ***\n" -#define INVALID_PCB_MSG(len) \ - pr_notice(invalid_pcb_msg, (len), filename, __func__, __LINE__) - -#define search_msg "%s: Looking for 3c505 adapter at address %#x..." - -#define stilllooking_msg "still looking..." - -#define found_msg "found.\n" - -#define notfound_msg "not found (reason = %d)\n" - -#define couldnot_msg "%s: 3c505 not found\n" - -/********************************************************* - * - * various other debug stuff - * - *********************************************************/ - -#ifdef ELP_DEBUG -static int elp_debug = ELP_DEBUG; -#else -static int elp_debug; -#endif -#define debug elp_debug - -/* - * 0 = no messages (well, some) - * 1 = messages when high level commands performed - * 2 = messages when low level commands performed - * 3 = messages when interrupts received - */ - -/***************************************************************** - * - * List of I/O-addresses we try to auto-sense - * Last element MUST BE 0! - *****************************************************************/ - -static int addr_list[] __initdata = {0x300, 0x280, 0x310, 0}; - -/* Dma Memory related stuff */ - -static unsigned long dma_mem_alloc(int size) -{ - int order = get_order(size); - return __get_dma_pages(GFP_KERNEL, order); -} - - -/***************************************************************** - * - * Functions for I/O (note the inline !) - * - *****************************************************************/ - -static inline unsigned char inb_status(unsigned int base_addr) -{ - return inb(base_addr + PORT_STATUS); -} - -static inline int inb_command(unsigned int base_addr) -{ - return inb(base_addr + PORT_COMMAND); -} - -static inline void outb_control(unsigned char val, struct net_device *dev) -{ - outb(val, dev->base_addr + PORT_CONTROL); - ((elp_device *)(netdev_priv(dev)))->hcr_val = val; -} - -#define HCR_VAL(x) (((elp_device *)(netdev_priv(x)))->hcr_val) - -static inline void outb_command(unsigned char val, unsigned int base_addr) -{ - outb(val, base_addr + PORT_COMMAND); -} - -static inline unsigned int backlog_next(unsigned int n) -{ - return (n + 1) % BACKLOG_SIZE; -} - -/***************************************************************** - * - * useful functions for accessing the adapter - * - *****************************************************************/ - -/* - * use this routine when accessing the ASF bits as they are - * changed asynchronously by the adapter - */ - -/* get adapter PCB status */ -#define GET_ASF(addr) \ - (get_status(addr)&ASF_PCB_MASK) - -static inline int get_status(unsigned int base_addr) -{ - unsigned long timeout = jiffies + 10*HZ/100; - register int stat1; - do { - stat1 = inb_status(base_addr); - } while (stat1 != inb_status(base_addr) && time_before(jiffies, timeout)); - if (time_after_eq(jiffies, timeout)) - TIMEOUT_MSG(__LINE__); - return stat1; -} - -static inline void set_hsf(struct net_device *dev, int hsf) -{ - elp_device *adapter = netdev_priv(dev); - unsigned long flags; - - spin_lock_irqsave(&adapter->lock, flags); - outb_control((HCR_VAL(dev) & ~HSF_PCB_MASK) | hsf, dev); - spin_unlock_irqrestore(&adapter->lock, flags); -} - -static bool start_receive(struct net_device *, pcb_struct *); - -static inline void adapter_reset(struct net_device *dev) -{ - unsigned long timeout; - elp_device *adapter = netdev_priv(dev); - unsigned char orig_hcr = adapter->hcr_val; - - outb_control(0, dev); - - if (inb_status(dev->base_addr) & ACRF) { - do { - inb_command(dev->base_addr); - timeout = jiffies + 2*HZ/100; - while (time_before_eq(jiffies, timeout) && !(inb_status(dev->base_addr) & ACRF)); - } while (inb_status(dev->base_addr) & ACRF); - set_hsf(dev, HSF_PCB_NAK); - } - outb_control(adapter->hcr_val | ATTN | DIR, dev); - mdelay(10); - outb_control(adapter->hcr_val & ~ATTN, dev); - mdelay(10); - outb_control(adapter->hcr_val | FLSH, dev); - mdelay(10); - outb_control(adapter->hcr_val & ~FLSH, dev); - mdelay(10); - - outb_control(orig_hcr, dev); - if (!start_receive(dev, &adapter->tx_pcb)) - pr_err("%s: start receive command failed\n", dev->name); -} - -/* Check to make sure that a DMA transfer hasn't timed out. This should - * never happen in theory, but seems to occur occasionally if the card gets - * prodded at the wrong time. - */ -static inline void check_3c505_dma(struct net_device *dev) -{ - elp_device *adapter = netdev_priv(dev); - if (adapter->dmaing && time_after(jiffies, adapter->current_dma.start_time + 10)) { - unsigned long flags, f; - pr_err("%s: DMA %s timed out, %d bytes left\n", dev->name, - adapter->current_dma.direction ? "download" : "upload", - get_dma_residue(dev->dma)); - spin_lock_irqsave(&adapter->lock, flags); - adapter->dmaing = 0; - adapter->busy = 0; - - f=claim_dma_lock(); - disable_dma(dev->dma); - release_dma_lock(f); - - if (adapter->rx_active) - adapter->rx_active--; - outb_control(adapter->hcr_val & ~(DMAE | TCEN | DIR), dev); - spin_unlock_irqrestore(&adapter->lock, flags); - } -} - -/* Primitive functions used by send_pcb() */ -static inline bool send_pcb_slow(unsigned int base_addr, unsigned char byte) -{ - unsigned long timeout; - outb_command(byte, base_addr); - for (timeout = jiffies + 5*HZ/100; time_before(jiffies, timeout);) { - if (inb_status(base_addr) & HCRE) - return false; - } - pr_warning("3c505: send_pcb_slow timed out\n"); - return true; -} - -static inline bool send_pcb_fast(unsigned int base_addr, unsigned char byte) -{ - unsigned int timeout; - outb_command(byte, base_addr); - for (timeout = 0; timeout < 40000; timeout++) { - if (inb_status(base_addr) & HCRE) - return false; - } - pr_warning("3c505: send_pcb_fast timed out\n"); - return true; -} - -/* Check to see if the receiver needs restarting, and kick it if so */ -static inline void prime_rx(struct net_device *dev) -{ - elp_device *adapter = netdev_priv(dev); - while (adapter->rx_active < ELP_RX_PCBS && netif_running(dev)) { - if (!start_receive(dev, &adapter->itx_pcb)) - break; - } -} - -/***************************************************************** - * - * send_pcb - * Send a PCB to the adapter. - * - * output byte to command reg --<--+ - * wait until HCRE is non zero | - * loop until all bytes sent -->--+ - * set HSF1 and HSF2 to 1 - * output pcb length - * wait until ASF give ACK or NAK - * set HSF1 and HSF2 to 0 - * - *****************************************************************/ - -/* This can be quite slow -- the adapter is allowed to take up to 40ms - * to respond to the initial interrupt. - * - * We run initially with interrupts turned on, but with a semaphore set - * so that nobody tries to re-enter this code. Once the first byte has - * gone through, we turn interrupts off and then send the others (the - * timeout is reduced to 500us). - */ - -static bool send_pcb(struct net_device *dev, pcb_struct * pcb) -{ - int i; - unsigned long timeout; - elp_device *adapter = netdev_priv(dev); - unsigned long flags; - - check_3c505_dma(dev); - - if (adapter->dmaing && adapter->current_dma.direction == 0) - return false; - - /* Avoid contention */ - if (test_and_set_bit(1, &adapter->send_pcb_semaphore)) { - if (elp_debug >= 3) { - pr_debug("%s: send_pcb entered while threaded\n", dev->name); - } - return false; - } - /* - * load each byte into the command register and - * wait for the HCRE bit to indicate the adapter - * had read the byte - */ - set_hsf(dev, 0); - - if (send_pcb_slow(dev->base_addr, pcb->command)) - goto abort; - - spin_lock_irqsave(&adapter->lock, flags); - - if (send_pcb_fast(dev->base_addr, pcb->length)) - goto sti_abort; - - for (i = 0; i < pcb->length; i++) { - if (send_pcb_fast(dev->base_addr, pcb->data.raw[i])) - goto sti_abort; - } - - outb_control(adapter->hcr_val | 3, dev); /* signal end of PCB */ - outb_command(2 + pcb->length, dev->base_addr); - - /* now wait for the acknowledgement */ - spin_unlock_irqrestore(&adapter->lock, flags); - - for (timeout = jiffies + 5*HZ/100; time_before(jiffies, timeout);) { - switch (GET_ASF(dev->base_addr)) { - case ASF_PCB_ACK: - adapter->send_pcb_semaphore = 0; - return true; - - case ASF_PCB_NAK: -#ifdef ELP_DEBUG - pr_debug("%s: send_pcb got NAK\n", dev->name); -#endif - goto abort; - } - } - - if (elp_debug >= 1) - pr_debug("%s: timeout waiting for PCB acknowledge (status %02x)\n", - dev->name, inb_status(dev->base_addr)); - goto abort; - - sti_abort: - spin_unlock_irqrestore(&adapter->lock, flags); - abort: - adapter->send_pcb_semaphore = 0; - return false; -} - - -/***************************************************************** - * - * receive_pcb - * Read a PCB from the adapter - * - * wait for ACRF to be non-zero ---<---+ - * input a byte | - * if ASF1 and ASF2 were not both one | - * before byte was read, loop --->---+ - * set HSF1 and HSF2 for ack - * - *****************************************************************/ - -static bool receive_pcb(struct net_device *dev, pcb_struct * pcb) -{ - int i, j; - int total_length; - int stat; - unsigned long timeout; - unsigned long flags; - - elp_device *adapter = netdev_priv(dev); - - set_hsf(dev, 0); - - /* get the command code */ - timeout = jiffies + 2*HZ/100; - while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && time_before(jiffies, timeout)); - if (time_after_eq(jiffies, timeout)) { - TIMEOUT_MSG(__LINE__); - return false; - } - pcb->command = inb_command(dev->base_addr); - - /* read the data length */ - timeout = jiffies + 3*HZ/100; - while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && time_before(jiffies, timeout)); - if (time_after_eq(jiffies, timeout)) { - TIMEOUT_MSG(__LINE__); - pr_info("%s: status %02x\n", dev->name, stat); - return false; - } - pcb->length = inb_command(dev->base_addr); - - if (pcb->length > MAX_PCB_DATA) { - INVALID_PCB_MSG(pcb->length); - adapter_reset(dev); - return false; - } - /* read the data */ - spin_lock_irqsave(&adapter->lock, flags); - for (i = 0; i < MAX_PCB_DATA; i++) { - for (j = 0; j < 20000; j++) { - stat = get_status(dev->base_addr); - if (stat & ACRF) - break; - } - pcb->data.raw[i] = inb_command(dev->base_addr); - if ((stat & ASF_PCB_MASK) == ASF_PCB_END || j >= 20000) - break; - } - spin_unlock_irqrestore(&adapter->lock, flags); - if (i >= MAX_PCB_DATA) { - INVALID_PCB_MSG(i); - return false; - } - if (j >= 20000) { - TIMEOUT_MSG(__LINE__); - return false; - } - /* the last "data" byte was really the length! */ - total_length = pcb->data.raw[i]; - - /* safety check total length vs data length */ - if (total_length != (pcb->length + 2)) { - if (elp_debug >= 2) - pr_warning("%s: mangled PCB received\n", dev->name); - set_hsf(dev, HSF_PCB_NAK); - return false; - } - - if (pcb->command == CMD_RECEIVE_PACKET_COMPLETE) { - if (test_and_set_bit(0, (void *) &adapter->busy)) { - if (backlog_next(adapter->rx_backlog.in) == adapter->rx_backlog.out) { - set_hsf(dev, HSF_PCB_NAK); - pr_warning("%s: PCB rejected, transfer in progress and backlog full\n", dev->name); - pcb->command = 0; - return true; - } else { - pcb->command = 0xff; - } - } - } - set_hsf(dev, HSF_PCB_ACK); - return true; -} - -/****************************************************** - * - * queue a receive command on the adapter so we will get an - * interrupt when a packet is received. - * - ******************************************************/ - -static bool start_receive(struct net_device *dev, pcb_struct * tx_pcb) -{ - bool status; - elp_device *adapter = netdev_priv(dev); - - if (elp_debug >= 3) - pr_debug("%s: restarting receiver\n", dev->name); - tx_pcb->command = CMD_RECEIVE_PACKET; - tx_pcb->length = sizeof(struct Rcv_pkt); - tx_pcb->data.rcv_pkt.buf_seg - = tx_pcb->data.rcv_pkt.buf_ofs = 0; /* Unused */ - tx_pcb->data.rcv_pkt.buf_len = 1600; - tx_pcb->data.rcv_pkt.timeout = 0; /* set timeout to zero */ - status = send_pcb(dev, tx_pcb); - if (status) - adapter->rx_active++; - return status; -} - -/****************************************************** - * - * extract a packet from the adapter - * this routine is only called from within the interrupt - * service routine, so no cli/sti calls are needed - * note that the length is always assumed to be even - * - ******************************************************/ - -static void receive_packet(struct net_device *dev, int len) -{ - int rlen; - elp_device *adapter = netdev_priv(dev); - void *target; - struct sk_buff *skb; - unsigned long flags; - - rlen = (len + 1) & ~1; - skb = dev_alloc_skb(rlen + 2); - - if (!skb) { - pr_warning("%s: memory squeeze, dropping packet\n", dev->name); - target = adapter->dma_buffer; - adapter->current_dma.target = NULL; - /* FIXME: stats */ - return; - } - - skb_reserve(skb, 2); - target = skb_put(skb, rlen); - if ((unsigned long)(target + rlen) >= MAX_DMA_ADDRESS) { - adapter->current_dma.target = target; - target = adapter->dma_buffer; - } else { - adapter->current_dma.target = NULL; - } - - /* if this happens, we die */ - if (test_and_set_bit(0, (void *) &adapter->dmaing)) - pr_err("%s: rx blocked, DMA in progress, dir %d\n", - dev->name, adapter->current_dma.direction); - - adapter->current_dma.direction = 0; - adapter->current_dma.length = rlen; - adapter->current_dma.skb = skb; - adapter->current_dma.start_time = jiffies; - - outb_control(adapter->hcr_val | DIR | TCEN | DMAE, dev); - - flags=claim_dma_lock(); - disable_dma(dev->dma); - clear_dma_ff(dev->dma); - set_dma_mode(dev->dma, 0x04); /* dma read */ - set_dma_addr(dev->dma, isa_virt_to_bus(target)); - set_dma_count(dev->dma, rlen); - enable_dma(dev->dma); - release_dma_lock(flags); - - if (elp_debug >= 3) { - pr_debug("%s: rx DMA transfer started\n", dev->name); - } - - if (adapter->rx_active) - adapter->rx_active--; - - if (!adapter->busy) - pr_warning("%s: receive_packet called, busy not set.\n", dev->name); -} - -/****************************************************** - * - * interrupt handler - * - ******************************************************/ - -static irqreturn_t elp_interrupt(int irq, void *dev_id) -{ - int len; - int dlen; - int icount = 0; - struct net_device *dev = dev_id; - elp_device *adapter = netdev_priv(dev); - unsigned long timeout; - - spin_lock(&adapter->lock); - - do { - /* - * has a DMA transfer finished? - */ - if (inb_status(dev->base_addr) & DONE) { - if (!adapter->dmaing) - pr_warning("%s: phantom DMA completed\n", dev->name); - - if (elp_debug >= 3) - pr_debug("%s: %s DMA complete, status %02x\n", dev->name, - adapter->current_dma.direction ? "tx" : "rx", - inb_status(dev->base_addr)); - - outb_control(adapter->hcr_val & ~(DMAE | TCEN | DIR), dev); - if (adapter->current_dma.direction) { - dev_kfree_skb_irq(adapter->current_dma.skb); - } else { - struct sk_buff *skb = adapter->current_dma.skb; - if (skb) { - if (adapter->current_dma.target) { - /* have already done the skb_put() */ - memcpy(adapter->current_dma.target, adapter->dma_buffer, adapter->current_dma.length); - } - skb->protocol = eth_type_trans(skb,dev); - dev->stats.rx_bytes += skb->len; - netif_rx(skb); - } - } - adapter->dmaing = 0; - if (adapter->rx_backlog.in != adapter->rx_backlog.out) { - int t = adapter->rx_backlog.length[adapter->rx_backlog.out]; - adapter->rx_backlog.out = backlog_next(adapter->rx_backlog.out); - if (elp_debug >= 2) - pr_debug("%s: receiving backlogged packet (%d)\n", dev->name, t); - receive_packet(dev, t); - } else { - adapter->busy = 0; - } - } else { - /* has one timed out? */ - check_3c505_dma(dev); - } - - /* - * receive a PCB from the adapter - */ - timeout = jiffies + 3*HZ/100; - while ((inb_status(dev->base_addr) & ACRF) != 0 && time_before(jiffies, timeout)) { - if (receive_pcb(dev, &adapter->irx_pcb)) { - switch (adapter->irx_pcb.command) - { - case 0: - break; - /* - * received a packet - this must be handled fast - */ - case 0xff: - case CMD_RECEIVE_PACKET_COMPLETE: - /* if the device isn't open, don't pass packets up the stack */ - if (!netif_running(dev)) - break; - len = adapter->irx_pcb.data.rcv_resp.pkt_len; - dlen = adapter->irx_pcb.data.rcv_resp.buf_len; - if (adapter->irx_pcb.data.rcv_resp.timeout != 0) { - pr_err("%s: interrupt - packet not received correctly\n", dev->name); - } else { - if (elp_debug >= 3) { - pr_debug("%s: interrupt - packet received of length %i (%i)\n", - dev->name, len, dlen); - } - if (adapter->irx_pcb.command == 0xff) { - if (elp_debug >= 2) - pr_debug("%s: adding packet to backlog (len = %d)\n", - dev->name, dlen); - adapter->rx_backlog.length[adapter->rx_backlog.in] = dlen; - adapter->rx_backlog.in = backlog_next(adapter->rx_backlog.in); - } else { - receive_packet(dev, dlen); - } - if (elp_debug >= 3) - pr_debug("%s: packet received\n", dev->name); - } - break; - - /* - * 82586 configured correctly - */ - case CMD_CONFIGURE_82586_RESPONSE: - adapter->got[CMD_CONFIGURE_82586] = 1; - if (elp_debug >= 3) - pr_debug("%s: interrupt - configure response received\n", dev->name); - break; - - /* - * Adapter memory configuration - */ - case CMD_CONFIGURE_ADAPTER_RESPONSE: - adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] = 1; - if (elp_debug >= 3) - pr_debug("%s: Adapter memory configuration %s.\n", dev->name, - adapter->irx_pcb.data.failed ? "failed" : "succeeded"); - break; - - /* - * Multicast list loading - */ - case CMD_LOAD_MULTICAST_RESPONSE: - adapter->got[CMD_LOAD_MULTICAST_LIST] = 1; - if (elp_debug >= 3) - pr_debug("%s: Multicast address list loading %s.\n", dev->name, - adapter->irx_pcb.data.failed ? "failed" : "succeeded"); - break; - - /* - * Station address setting - */ - case CMD_SET_ADDRESS_RESPONSE: - adapter->got[CMD_SET_STATION_ADDRESS] = 1; - if (elp_debug >= 3) - pr_debug("%s: Ethernet address setting %s.\n", dev->name, - adapter->irx_pcb.data.failed ? "failed" : "succeeded"); - break; - - - /* - * received board statistics - */ - case CMD_NETWORK_STATISTICS_RESPONSE: - dev->stats.rx_packets += adapter->irx_pcb.data.netstat.tot_recv; - dev->stats.tx_packets += adapter->irx_pcb.data.netstat.tot_xmit; - dev->stats.rx_crc_errors += adapter->irx_pcb.data.netstat.err_CRC; - dev->stats.rx_frame_errors += adapter->irx_pcb.data.netstat.err_align; - dev->stats.rx_fifo_errors += adapter->irx_pcb.data.netstat.err_ovrrun; - dev->stats.rx_over_errors += adapter->irx_pcb.data.netstat.err_res; - adapter->got[CMD_NETWORK_STATISTICS] = 1; - if (elp_debug >= 3) - pr_debug("%s: interrupt - statistics response received\n", dev->name); - break; - - /* - * sent a packet - */ - case CMD_TRANSMIT_PACKET_COMPLETE: - if (elp_debug >= 3) - pr_debug("%s: interrupt - packet sent\n", dev->name); - if (!netif_running(dev)) - break; - switch (adapter->irx_pcb.data.xmit_resp.c_stat) { - case 0xffff: - dev->stats.tx_aborted_errors++; - pr_info("%s: transmit timed out, network cable problem?\n", dev->name); - break; - case 0xfffe: - dev->stats.tx_fifo_errors++; - pr_info("%s: transmit timed out, FIFO underrun\n", dev->name); - break; - } - netif_wake_queue(dev); - break; - - /* - * some unknown PCB - */ - default: - pr_debug("%s: unknown PCB received - %2.2x\n", - dev->name, adapter->irx_pcb.command); - break; - } - } else { - pr_warning("%s: failed to read PCB on interrupt\n", dev->name); - adapter_reset(dev); - } - } - - } while (icount++ < 5 && (inb_status(dev->base_addr) & (ACRF | DONE))); - - prime_rx(dev); - - /* - * indicate no longer in interrupt routine - */ - spin_unlock(&adapter->lock); - return IRQ_HANDLED; -} - - -/****************************************************** - * - * open the board - * - ******************************************************/ - -static int elp_open(struct net_device *dev) -{ - elp_device *adapter = netdev_priv(dev); - int retval; - - if (elp_debug >= 3) - pr_debug("%s: request to open device\n", dev->name); - - /* - * make sure we actually found the device - */ - if (adapter == NULL) { - pr_err("%s: Opening a non-existent physical device\n", dev->name); - return -EAGAIN; - } - /* - * disable interrupts on the board - */ - outb_control(0, dev); - - /* - * clear any pending interrupts - */ - inb_command(dev->base_addr); - adapter_reset(dev); - - /* - * no receive PCBs active - */ - adapter->rx_active = 0; - - adapter->busy = 0; - adapter->send_pcb_semaphore = 0; - adapter->rx_backlog.in = 0; - adapter->rx_backlog.out = 0; - - spin_lock_init(&adapter->lock); - - /* - * install our interrupt service routine - */ - if ((retval = request_irq(dev->irq, elp_interrupt, 0, dev->name, dev))) { - pr_err("%s: could not allocate IRQ%d\n", dev->name, dev->irq); - return retval; - } - if ((retval = request_dma(dev->dma, dev->name))) { - free_irq(dev->irq, dev); - pr_err("%s: could not allocate DMA%d channel\n", dev->name, dev->dma); - return retval; - } - adapter->dma_buffer = (void *) dma_mem_alloc(DMA_BUFFER_SIZE); - if (!adapter->dma_buffer) { - pr_err("%s: could not allocate DMA buffer\n", dev->name); - free_dma(dev->dma); - free_irq(dev->irq, dev); - return -ENOMEM; - } - adapter->dmaing = 0; - - /* - * enable interrupts on the board - */ - outb_control(CMDE, dev); - - /* - * configure adapter memory: we need 10 multicast addresses, default==0 - */ - if (elp_debug >= 3) - pr_debug("%s: sending 3c505 memory configuration command\n", dev->name); - adapter->tx_pcb.command = CMD_CONFIGURE_ADAPTER_MEMORY; - adapter->tx_pcb.data.memconf.cmd_q = 10; - adapter->tx_pcb.data.memconf.rcv_q = 20; - adapter->tx_pcb.data.memconf.mcast = 10; - adapter->tx_pcb.data.memconf.frame = 20; - adapter->tx_pcb.data.memconf.rcv_b = 20; - adapter->tx_pcb.data.memconf.progs = 0; - adapter->tx_pcb.length = sizeof(struct Memconf); - adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] = 0; - if (!send_pcb(dev, &adapter->tx_pcb)) - pr_err("%s: couldn't send memory configuration command\n", dev->name); - else { - unsigned long timeout = jiffies + TIMEOUT; - while (adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] == 0 && time_before(jiffies, timeout)); - if (time_after_eq(jiffies, timeout)) - TIMEOUT_MSG(__LINE__); - } - - - /* - * configure adapter to receive broadcast messages and wait for response - */ - if (elp_debug >= 3) - pr_debug("%s: sending 82586 configure command\n", dev->name); - adapter->tx_pcb.command = CMD_CONFIGURE_82586; - adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD; - adapter->tx_pcb.length = 2; - adapter->got[CMD_CONFIGURE_82586] = 0; - if (!send_pcb(dev, &adapter->tx_pcb)) - pr_err("%s: couldn't send 82586 configure command\n", dev->name); - else { - unsigned long timeout = jiffies + TIMEOUT; - while (adapter->got[CMD_CONFIGURE_82586] == 0 && time_before(jiffies, timeout)); - if (time_after_eq(jiffies, timeout)) - TIMEOUT_MSG(__LINE__); - } - - /* enable burst-mode DMA */ - /* outb(0x1, dev->base_addr + PORT_AUXDMA); */ - - /* - * queue receive commands to provide buffering - */ - prime_rx(dev); - if (elp_debug >= 3) - pr_debug("%s: %d receive PCBs active\n", dev->name, adapter->rx_active); - - /* - * device is now officially open! - */ - - netif_start_queue(dev); - return 0; -} - - -/****************************************************** - * - * send a packet to the adapter - * - ******************************************************/ - -static netdev_tx_t send_packet(struct net_device *dev, struct sk_buff *skb) -{ - elp_device *adapter = netdev_priv(dev); - unsigned long target; - unsigned long flags; - - /* - * make sure the length is even and no shorter than 60 bytes - */ - unsigned int nlen = (((skb->len < 60) ? 60 : skb->len) + 1) & (~1); - - if (test_and_set_bit(0, (void *) &adapter->busy)) { - if (elp_debug >= 2) - pr_debug("%s: transmit blocked\n", dev->name); - return false; - } - - dev->stats.tx_bytes += nlen; - - /* - * send the adapter a transmit packet command. Ignore segment and offset - * and make sure the length is even - */ - adapter->tx_pcb.command = CMD_TRANSMIT_PACKET; - adapter->tx_pcb.length = sizeof(struct Xmit_pkt); - adapter->tx_pcb.data.xmit_pkt.buf_ofs - = adapter->tx_pcb.data.xmit_pkt.buf_seg = 0; /* Unused */ - adapter->tx_pcb.data.xmit_pkt.pkt_len = nlen; - - if (!send_pcb(dev, &adapter->tx_pcb)) { - adapter->busy = 0; - return false; - } - /* if this happens, we die */ - if (test_and_set_bit(0, (void *) &adapter->dmaing)) - pr_debug("%s: tx: DMA %d in progress\n", dev->name, adapter->current_dma.direction); - - adapter->current_dma.direction = 1; - adapter->current_dma.start_time = jiffies; - - if ((unsigned long)(skb->data + nlen) >= MAX_DMA_ADDRESS || nlen != skb->len) { - skb_copy_from_linear_data(skb, adapter->dma_buffer, nlen); - memset(adapter->dma_buffer+skb->len, 0, nlen-skb->len); - target = isa_virt_to_bus(adapter->dma_buffer); - } - else { - target = isa_virt_to_bus(skb->data); - } - adapter->current_dma.skb = skb; - - flags=claim_dma_lock(); - disable_dma(dev->dma); - clear_dma_ff(dev->dma); - set_dma_mode(dev->dma, 0x48); /* dma memory -> io */ - set_dma_addr(dev->dma, target); - set_dma_count(dev->dma, nlen); - outb_control(adapter->hcr_val | DMAE | TCEN, dev); - enable_dma(dev->dma); - release_dma_lock(flags); - - if (elp_debug >= 3) - pr_debug("%s: DMA transfer started\n", dev->name); - - return true; -} - -/* - * The upper layer thinks we timed out - */ - -static void elp_timeout(struct net_device *dev) -{ - int stat; - - stat = inb_status(dev->base_addr); - pr_warning("%s: transmit timed out, lost %s?\n", dev->name, - (stat & ACRF) ? "interrupt" : "command"); - if (elp_debug >= 1) - pr_debug("%s: status %#02x\n", dev->name, stat); - dev->trans_start = jiffies; /* prevent tx timeout */ - dev->stats.tx_dropped++; - netif_wake_queue(dev); -} - -/****************************************************** - * - * start the transmitter - * return 0 if sent OK, else return 1 - * - ******************************************************/ - -static netdev_tx_t elp_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - unsigned long flags; - elp_device *adapter = netdev_priv(dev); - - spin_lock_irqsave(&adapter->lock, flags); - check_3c505_dma(dev); - - if (elp_debug >= 3) - pr_debug("%s: request to send packet of length %d\n", dev->name, (int) skb->len); - - netif_stop_queue(dev); - - /* - * send the packet at skb->data for skb->len - */ - if (!send_packet(dev, skb)) { - if (elp_debug >= 2) { - pr_debug("%s: failed to transmit packet\n", dev->name); - } - spin_unlock_irqrestore(&adapter->lock, flags); - return NETDEV_TX_BUSY; - } - if (elp_debug >= 3) - pr_debug("%s: packet of length %d sent\n", dev->name, (int) skb->len); - - prime_rx(dev); - spin_unlock_irqrestore(&adapter->lock, flags); - netif_start_queue(dev); - return NETDEV_TX_OK; -} - -/****************************************************** - * - * return statistics on the board - * - ******************************************************/ - -static struct net_device_stats *elp_get_stats(struct net_device *dev) -{ - elp_device *adapter = netdev_priv(dev); - - if (elp_debug >= 3) - pr_debug("%s: request for stats\n", dev->name); - - /* If the device is closed, just return the latest stats we have, - - we cannot ask from the adapter without interrupts */ - if (!netif_running(dev)) - return &dev->stats; - - /* send a get statistics command to the board */ - adapter->tx_pcb.command = CMD_NETWORK_STATISTICS; - adapter->tx_pcb.length = 0; - adapter->got[CMD_NETWORK_STATISTICS] = 0; - if (!send_pcb(dev, &adapter->tx_pcb)) - pr_err("%s: couldn't send get statistics command\n", dev->name); - else { - unsigned long timeout = jiffies + TIMEOUT; - while (adapter->got[CMD_NETWORK_STATISTICS] == 0 && time_before(jiffies, timeout)); - if (time_after_eq(jiffies, timeout)) { - TIMEOUT_MSG(__LINE__); - return &dev->stats; - } - } - - /* statistics are now up to date */ - return &dev->stats; -} - - -static void netdev_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr); -} - -static u32 netdev_get_msglevel(struct net_device *dev) -{ - return debug; -} - -static void netdev_set_msglevel(struct net_device *dev, u32 level) -{ - debug = level; -} - -static const struct ethtool_ops netdev_ethtool_ops = { - .get_drvinfo = netdev_get_drvinfo, - .get_msglevel = netdev_get_msglevel, - .set_msglevel = netdev_set_msglevel, -}; - -/****************************************************** - * - * close the board - * - ******************************************************/ - -static int elp_close(struct net_device *dev) -{ - elp_device *adapter = netdev_priv(dev); - - if (elp_debug >= 3) - pr_debug("%s: request to close device\n", dev->name); - - netif_stop_queue(dev); - - /* Someone may request the device statistic information even when - * the interface is closed. The following will update the statistics - * structure in the driver, so we'll be able to give current statistics. - */ - (void) elp_get_stats(dev); - - /* - * disable interrupts on the board - */ - outb_control(0, dev); - - /* - * release the IRQ - */ - free_irq(dev->irq, dev); - - free_dma(dev->dma); - free_pages((unsigned long) adapter->dma_buffer, get_order(DMA_BUFFER_SIZE)); - - return 0; -} - - -/************************************************************ - * - * Set multicast list - * num_addrs==0: clear mc_list - * num_addrs==-1: set promiscuous mode - * num_addrs>0: set mc_list - * - ************************************************************/ - -static void elp_set_mc_list(struct net_device *dev) -{ - elp_device *adapter = netdev_priv(dev); - struct netdev_hw_addr *ha; - int i; - unsigned long flags; - - if (elp_debug >= 3) - pr_debug("%s: request to set multicast list\n", dev->name); - - spin_lock_irqsave(&adapter->lock, flags); - - if (!(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) { - /* send a "load multicast list" command to the board, max 10 addrs/cmd */ - /* if num_addrs==0 the list will be cleared */ - adapter->tx_pcb.command = CMD_LOAD_MULTICAST_LIST; - adapter->tx_pcb.length = 6 * netdev_mc_count(dev); - i = 0; - netdev_for_each_mc_addr(ha, dev) - memcpy(adapter->tx_pcb.data.multicast[i++], - ha->addr, 6); - adapter->got[CMD_LOAD_MULTICAST_LIST] = 0; - if (!send_pcb(dev, &adapter->tx_pcb)) - pr_err("%s: couldn't send set_multicast command\n", dev->name); - else { - unsigned long timeout = jiffies + TIMEOUT; - while (adapter->got[CMD_LOAD_MULTICAST_LIST] == 0 && time_before(jiffies, timeout)); - if (time_after_eq(jiffies, timeout)) { - TIMEOUT_MSG(__LINE__); - } - } - if (!netdev_mc_empty(dev)) - adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD | RECV_MULTI; - else /* num_addrs == 0 */ - adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD; - } else - adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_PROMISC; - /* - * configure adapter to receive messages (as specified above) - * and wait for response - */ - if (elp_debug >= 3) - pr_debug("%s: sending 82586 configure command\n", dev->name); - adapter->tx_pcb.command = CMD_CONFIGURE_82586; - adapter->tx_pcb.length = 2; - adapter->got[CMD_CONFIGURE_82586] = 0; - if (!send_pcb(dev, &adapter->tx_pcb)) - { - spin_unlock_irqrestore(&adapter->lock, flags); - pr_err("%s: couldn't send 82586 configure command\n", dev->name); - } - else { - unsigned long timeout = jiffies + TIMEOUT; - spin_unlock_irqrestore(&adapter->lock, flags); - while (adapter->got[CMD_CONFIGURE_82586] == 0 && time_before(jiffies, timeout)); - if (time_after_eq(jiffies, timeout)) - TIMEOUT_MSG(__LINE__); - } -} - -/************************************************************ - * - * A couple of tests to see if there's 3C505 or not - * Called only by elp_autodetect - ************************************************************/ - -static int __init elp_sense(struct net_device *dev) -{ - int addr = dev->base_addr; - const char *name = dev->name; - byte orig_HSR; - - if (!request_region(addr, ELP_IO_EXTENT, "3c505")) - return -ENODEV; - - orig_HSR = inb_status(addr); - - if (elp_debug > 0) - pr_debug(search_msg, name, addr); - - if (orig_HSR == 0xff) { - if (elp_debug > 0) - pr_cont(notfound_msg, 1); - goto out; - } - - /* Wait for a while; the adapter may still be booting up */ - if (elp_debug > 0) - pr_cont(stilllooking_msg); - - if (orig_HSR & DIR) { - /* If HCR.DIR is up, we pull it down. HSR.DIR should follow. */ - outb(0, dev->base_addr + PORT_CONTROL); - msleep(300); - if (inb_status(addr) & DIR) { - if (elp_debug > 0) - pr_cont(notfound_msg, 2); - goto out; - } - } else { - /* If HCR.DIR is down, we pull it up. HSR.DIR should follow. */ - outb(DIR, dev->base_addr + PORT_CONTROL); - msleep(300); - if (!(inb_status(addr) & DIR)) { - if (elp_debug > 0) - pr_cont(notfound_msg, 3); - goto out; - } - } - /* - * It certainly looks like a 3c505. - */ - if (elp_debug > 0) - pr_cont(found_msg); - - return 0; -out: - release_region(addr, ELP_IO_EXTENT); - return -ENODEV; -} - -/************************************************************* - * - * Search through addr_list[] and try to find a 3C505 - * Called only by eplus_probe - *************************************************************/ - -static int __init elp_autodetect(struct net_device *dev) -{ - int idx = 0; - - /* if base address set, then only check that address - otherwise, run through the table */ - if (dev->base_addr != 0) { /* dev->base_addr == 0 ==> plain autodetect */ - if (elp_sense(dev) == 0) - return dev->base_addr; - } else - while ((dev->base_addr = addr_list[idx++])) { - if (elp_sense(dev) == 0) - return dev->base_addr; - } - - /* could not find an adapter */ - if (elp_debug > 0) - pr_debug(couldnot_msg, dev->name); - - return 0; /* Because of this, the layer above will return -ENODEV */ -} - -static const struct net_device_ops elp_netdev_ops = { - .ndo_open = elp_open, - .ndo_stop = elp_close, - .ndo_get_stats = elp_get_stats, - .ndo_start_xmit = elp_start_xmit, - .ndo_tx_timeout = elp_timeout, - .ndo_set_multicast_list = elp_set_mc_list, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -/****************************************************** - * - * probe for an Etherlink Plus board at the specified address - * - ******************************************************/ - -/* There are three situations we need to be able to detect here: - - * a) the card is idle - * b) the card is still booting up - * c) the card is stuck in a strange state (some DOS drivers do this) - * - * In case (a), all is well. In case (b), we wait 10 seconds to see if the - * card finishes booting, and carry on if so. In case (c), we do a hard reset, - * loop round, and hope for the best. - * - * This is all very unpleasant, but hopefully avoids the problems with the old - * probe code (which had a 15-second delay if the card was idle, and didn't - * work at all if it was in a weird state). - */ - -static int __init elplus_setup(struct net_device *dev) -{ - elp_device *adapter = netdev_priv(dev); - int i, tries, tries1, okay; - unsigned long timeout; - unsigned long cookie = 0; - int err = -ENODEV; - - /* - * setup adapter structure - */ - - dev->base_addr = elp_autodetect(dev); - if (!dev->base_addr) - return -ENODEV; - - adapter->send_pcb_semaphore = 0; - - for (tries1 = 0; tries1 < 3; tries1++) { - outb_control((adapter->hcr_val | CMDE) & ~DIR, dev); - /* First try to write just one byte, to see if the card is - * responding at all normally. - */ - timeout = jiffies + 5*HZ/100; - okay = 0; - while (time_before(jiffies, timeout) && !(inb_status(dev->base_addr) & HCRE)); - if ((inb_status(dev->base_addr) & HCRE)) { - outb_command(0, dev->base_addr); /* send a spurious byte */ - timeout = jiffies + 5*HZ/100; - while (time_before(jiffies, timeout) && !(inb_status(dev->base_addr) & HCRE)); - if (inb_status(dev->base_addr) & HCRE) - okay = 1; - } - if (!okay) { - /* Nope, it's ignoring the command register. This means that - * either it's still booting up, or it's died. - */ - pr_err("%s: command register wouldn't drain, ", dev->name); - if ((inb_status(dev->base_addr) & 7) == 3) { - /* If the adapter status is 3, it *could* still be booting. - * Give it the benefit of the doubt for 10 seconds. - */ - pr_cont("assuming 3c505 still starting\n"); - timeout = jiffies + 10*HZ; - while (time_before(jiffies, timeout) && (inb_status(dev->base_addr) & 7)); - if (inb_status(dev->base_addr) & 7) { - pr_err("%s: 3c505 failed to start\n", dev->name); - } else { - okay = 1; /* It started */ - } - } else { - /* Otherwise, it must just be in a strange - * state. We probably need to kick it. - */ - pr_cont("3c505 is sulking\n"); - } - } - for (tries = 0; tries < 5 && okay; tries++) { - - /* - * Try to set the Ethernet address, to make sure that the board - * is working. - */ - adapter->tx_pcb.command = CMD_STATION_ADDRESS; - adapter->tx_pcb.length = 0; - cookie = probe_irq_on(); - if (!send_pcb(dev, &adapter->tx_pcb)) { - pr_err("%s: could not send first PCB\n", dev->name); - probe_irq_off(cookie); - continue; - } - if (!receive_pcb(dev, &adapter->rx_pcb)) { - pr_err("%s: could not read first PCB\n", dev->name); - probe_irq_off(cookie); - continue; - } - if ((adapter->rx_pcb.command != CMD_ADDRESS_RESPONSE) || - (adapter->rx_pcb.length != 6)) { - pr_err("%s: first PCB wrong (%d, %d)\n", dev->name, - adapter->rx_pcb.command, adapter->rx_pcb.length); - probe_irq_off(cookie); - continue; - } - goto okay; - } - /* It's broken. Do a hard reset to re-initialise the board, - * and try again. - */ - pr_info("%s: resetting adapter\n", dev->name); - outb_control(adapter->hcr_val | FLSH | ATTN, dev); - outb_control(adapter->hcr_val & ~(FLSH | ATTN), dev); - } - pr_err("%s: failed to initialise 3c505\n", dev->name); - goto out; - - okay: - if (dev->irq) { /* Is there a preset IRQ? */ - int rpt = probe_irq_off(cookie); - if (dev->irq != rpt) { - pr_warning("%s: warning, irq %d configured but %d detected\n", dev->name, dev->irq, rpt); - } - /* if dev->irq == probe_irq_off(cookie), all is well */ - } else /* No preset IRQ; just use what we can detect */ - dev->irq = probe_irq_off(cookie); - switch (dev->irq) { /* Legal, sane? */ - case 0: - pr_err("%s: IRQ probe failed: check 3c505 jumpers.\n", - dev->name); - goto out; - case 1: - case 6: - case 8: - case 13: - pr_err("%s: Impossible IRQ %d reported by probe_irq_off().\n", - dev->name, dev->irq); - goto out; - } - /* - * Now we have the IRQ number so we can disable the interrupts from - * the board until the board is opened. - */ - outb_control(adapter->hcr_val & ~CMDE, dev); - - /* - * copy Ethernet address into structure - */ - for (i = 0; i < 6; i++) - dev->dev_addr[i] = adapter->rx_pcb.data.eth_addr[i]; - - /* find a DMA channel */ - if (!dev->dma) { - if (dev->mem_start) { - dev->dma = dev->mem_start & 7; - } - else { - pr_warning("%s: warning, DMA channel not specified, using default\n", dev->name); - dev->dma = ELP_DMA; - } - } - - /* - * print remainder of startup message - */ - pr_info("%s: 3c505 at %#lx, irq %d, dma %d, addr %pM, ", - dev->name, dev->base_addr, dev->irq, dev->dma, dev->dev_addr); - /* - * read more information from the adapter - */ - - adapter->tx_pcb.command = CMD_ADAPTER_INFO; - adapter->tx_pcb.length = 0; - if (!send_pcb(dev, &adapter->tx_pcb) || - !receive_pcb(dev, &adapter->rx_pcb) || - (adapter->rx_pcb.command != CMD_ADAPTER_INFO_RESPONSE) || - (adapter->rx_pcb.length != 10)) { - pr_cont("not responding to second PCB\n"); - } - pr_cont("rev %d.%d, %dk\n", adapter->rx_pcb.data.info.major_vers, - adapter->rx_pcb.data.info.minor_vers, adapter->rx_pcb.data.info.RAM_sz); - - /* - * reconfigure the adapter memory to better suit our purposes - */ - adapter->tx_pcb.command = CMD_CONFIGURE_ADAPTER_MEMORY; - adapter->tx_pcb.length = 12; - adapter->tx_pcb.data.memconf.cmd_q = 8; - adapter->tx_pcb.data.memconf.rcv_q = 8; - adapter->tx_pcb.data.memconf.mcast = 10; - adapter->tx_pcb.data.memconf.frame = 10; - adapter->tx_pcb.data.memconf.rcv_b = 10; - adapter->tx_pcb.data.memconf.progs = 0; - if (!send_pcb(dev, &adapter->tx_pcb) || - !receive_pcb(dev, &adapter->rx_pcb) || - (adapter->rx_pcb.command != CMD_CONFIGURE_ADAPTER_RESPONSE) || - (adapter->rx_pcb.length != 2)) { - pr_err("%s: could not configure adapter memory\n", dev->name); - } - if (adapter->rx_pcb.data.configure) { - pr_err("%s: adapter configuration failed\n", dev->name); - } - - dev->netdev_ops = &elp_netdev_ops; - dev->watchdog_timeo = 10*HZ; - dev->ethtool_ops = &netdev_ethtool_ops; /* local */ - - dev->mem_start = dev->mem_end = 0; - - err = register_netdev(dev); - if (err) - goto out; - - return 0; -out: - release_region(dev->base_addr, ELP_IO_EXTENT); - return err; -} - -#ifndef MODULE -struct net_device * __init elplus_probe(int unit) -{ - struct net_device *dev = alloc_etherdev(sizeof(elp_device)); - int err; - if (!dev) - return ERR_PTR(-ENOMEM); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - - err = elplus_setup(dev); - if (err) { - free_netdev(dev); - return ERR_PTR(err); - } - return dev; -} - -#else -static struct net_device *dev_3c505[ELP_MAX_CARDS]; -static int io[ELP_MAX_CARDS]; -static int irq[ELP_MAX_CARDS]; -static int dma[ELP_MAX_CARDS]; -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -module_param_array(dma, int, NULL, 0); -MODULE_PARM_DESC(io, "EtherLink Plus I/O base address(es)"); -MODULE_PARM_DESC(irq, "EtherLink Plus IRQ number(s) (assigned)"); -MODULE_PARM_DESC(dma, "EtherLink Plus DMA channel(s)"); - -int __init init_module(void) -{ - int this_dev, found = 0; - - for (this_dev = 0; this_dev < ELP_MAX_CARDS; this_dev++) { - struct net_device *dev = alloc_etherdev(sizeof(elp_device)); - if (!dev) - break; - - dev->irq = irq[this_dev]; - dev->base_addr = io[this_dev]; - if (dma[this_dev]) { - dev->dma = dma[this_dev]; - } else { - dev->dma = ELP_DMA; - pr_warning("3c505.c: warning, using default DMA channel,\n"); - } - if (io[this_dev] == 0) { - if (this_dev) { - free_netdev(dev); - break; - } - pr_notice("3c505.c: module autoprobe not recommended, give io=xx.\n"); - } - if (elplus_setup(dev) != 0) { - pr_warning("3c505.c: Failed to register card at 0x%x.\n", io[this_dev]); - free_netdev(dev); - break; - } - dev_3c505[this_dev] = dev; - found++; - } - if (!found) - return -ENODEV; - return 0; -} - -void __exit cleanup_module(void) -{ - int this_dev; - - for (this_dev = 0; this_dev < ELP_MAX_CARDS; this_dev++) { - struct net_device *dev = dev_3c505[this_dev]; - if (dev) { - unregister_netdev(dev); - release_region(dev->base_addr, ELP_IO_EXTENT); - free_netdev(dev); - } - } -} - -#endif /* MODULE */ -MODULE_LICENSE("GPL"); diff --git a/drivers/net/3c505.h b/drivers/net/3c505.h deleted file mode 100644 index 04df2a9..0000000 --- a/drivers/net/3c505.h +++ /dev/null @@ -1,292 +0,0 @@ -/***************************************************************** - * - * defines for 3Com Etherlink Plus adapter - * - *****************************************************************/ - -#define ELP_DMA 6 -#define ELP_RX_PCBS 4 -#define ELP_MAX_CARDS 4 - -/* - * I/O register offsets - */ -#define PORT_COMMAND 0x00 /* read/write, 8-bit */ -#define PORT_STATUS 0x02 /* read only, 8-bit */ -#define PORT_AUXDMA 0x02 /* write only, 8-bit */ -#define PORT_DATA 0x04 /* read/write, 16-bit */ -#define PORT_CONTROL 0x06 /* read/write, 8-bit */ - -#define ELP_IO_EXTENT 0x10 /* size of used IO registers */ - -/* - * host control registers bits - */ -#define ATTN 0x80 /* attention */ -#define FLSH 0x40 /* flush data register */ -#define DMAE 0x20 /* DMA enable */ -#define DIR 0x10 /* direction */ -#define TCEN 0x08 /* terminal count interrupt enable */ -#define CMDE 0x04 /* command register interrupt enable */ -#define HSF2 0x02 /* host status flag 2 */ -#define HSF1 0x01 /* host status flag 1 */ - -/* - * combinations of HSF flags used for PCB transmission - */ -#define HSF_PCB_ACK HSF1 -#define HSF_PCB_NAK HSF2 -#define HSF_PCB_END (HSF2|HSF1) -#define HSF_PCB_MASK (HSF2|HSF1) - -/* - * host status register bits - */ -#define HRDY 0x80 /* data register ready */ -#define HCRE 0x40 /* command register empty */ -#define ACRF 0x20 /* adapter command register full */ -/* #define DIR 0x10 direction - same as in control register */ -#define DONE 0x08 /* DMA done */ -#define ASF3 0x04 /* adapter status flag 3 */ -#define ASF2 0x02 /* adapter status flag 2 */ -#define ASF1 0x01 /* adapter status flag 1 */ - -/* - * combinations of ASF flags used for PCB reception - */ -#define ASF_PCB_ACK ASF1 -#define ASF_PCB_NAK ASF2 -#define ASF_PCB_END (ASF2|ASF1) -#define ASF_PCB_MASK (ASF2|ASF1) - -/* - * host aux DMA register bits - */ -#define DMA_BRST 0x01 /* DMA burst */ - -/* - * maximum amount of data allowed in a PCB - */ -#define MAX_PCB_DATA 62 - -/***************************************************************** - * - * timeout value - * this is a rough value used for loops to stop them from - * locking up the whole machine in the case of failure or - * error conditions - * - *****************************************************************/ - -#define TIMEOUT 300 - -/***************************************************************** - * - * PCB commands - * - *****************************************************************/ - -enum { - /* - * host PCB commands - */ - CMD_CONFIGURE_ADAPTER_MEMORY = 0x01, - CMD_CONFIGURE_82586 = 0x02, - CMD_STATION_ADDRESS = 0x03, - CMD_DMA_DOWNLOAD = 0x04, - CMD_DMA_UPLOAD = 0x05, - CMD_PIO_DOWNLOAD = 0x06, - CMD_PIO_UPLOAD = 0x07, - CMD_RECEIVE_PACKET = 0x08, - CMD_TRANSMIT_PACKET = 0x09, - CMD_NETWORK_STATISTICS = 0x0a, - CMD_LOAD_MULTICAST_LIST = 0x0b, - CMD_CLEAR_PROGRAM = 0x0c, - CMD_DOWNLOAD_PROGRAM = 0x0d, - CMD_EXECUTE_PROGRAM = 0x0e, - CMD_SELF_TEST = 0x0f, - CMD_SET_STATION_ADDRESS = 0x10, - CMD_ADAPTER_INFO = 0x11, - NUM_TRANSMIT_CMDS, - - /* - * adapter PCB commands - */ - CMD_CONFIGURE_ADAPTER_RESPONSE = 0x31, - CMD_CONFIGURE_82586_RESPONSE = 0x32, - CMD_ADDRESS_RESPONSE = 0x33, - CMD_DOWNLOAD_DATA_REQUEST = 0x34, - CMD_UPLOAD_DATA_REQUEST = 0x35, - CMD_RECEIVE_PACKET_COMPLETE = 0x38, - CMD_TRANSMIT_PACKET_COMPLETE = 0x39, - CMD_NETWORK_STATISTICS_RESPONSE = 0x3a, - CMD_LOAD_MULTICAST_RESPONSE = 0x3b, - CMD_CLEAR_PROGRAM_RESPONSE = 0x3c, - CMD_DOWNLOAD_PROGRAM_RESPONSE = 0x3d, - CMD_EXECUTE_RESPONSE = 0x3e, - CMD_SELF_TEST_RESPONSE = 0x3f, - CMD_SET_ADDRESS_RESPONSE = 0x40, - CMD_ADAPTER_INFO_RESPONSE = 0x41 -}; - -/* Definitions for the PCB data structure */ - -/* Data units */ -typedef unsigned char byte; -typedef unsigned short int word; -typedef unsigned long int dword; - -/* Data structures */ -struct Memconf { - word cmd_q, - rcv_q, - mcast, - frame, - rcv_b, - progs; -}; - -struct Rcv_pkt { - word buf_ofs, - buf_seg, - buf_len, - timeout; -}; - -struct Xmit_pkt { - word buf_ofs, - buf_seg, - pkt_len; -}; - -struct Rcv_resp { - word buf_ofs, - buf_seg, - buf_len, - pkt_len, - timeout, - status; - dword timetag; -}; - -struct Xmit_resp { - word buf_ofs, - buf_seg, - c_stat, - status; -}; - - -struct Netstat { - dword tot_recv, - tot_xmit; - word err_CRC, - err_align, - err_res, - err_ovrrun; -}; - - -struct Selftest { - word error; - union { - word ROM_cksum; - struct { - word ofs, seg; - } RAM; - word i82586; - } failure; -}; - -struct Info { - byte minor_vers, - major_vers; - word ROM_cksum, - RAM_sz, - free_ofs, - free_seg; -}; - -struct Memdump { - word size, - off, - seg; -}; - -/* -Primary Command Block. The most important data structure. All communication -between the host and the adapter is done with these. (Except for the actual -Ethernet data, which has different packaging.) -*/ -typedef struct { - byte command; - byte length; - union { - struct Memconf memconf; - word configure; - struct Rcv_pkt rcv_pkt; - struct Xmit_pkt xmit_pkt; - byte multicast[10][6]; - byte eth_addr[6]; - byte failed; - struct Rcv_resp rcv_resp; - struct Xmit_resp xmit_resp; - struct Netstat netstat; - struct Selftest selftest; - struct Info info; - struct Memdump memdump; - byte raw[62]; - } data; -} pcb_struct; - -/* These defines for 'configure' */ -#define RECV_STATION 0x00 -#define RECV_BROAD 0x01 -#define RECV_MULTI 0x02 -#define RECV_PROMISC 0x04 -#define NO_LOOPBACK 0x00 -#define INT_LOOPBACK 0x08 -#define EXT_LOOPBACK 0x10 - -/***************************************************************** - * - * structure to hold context information for adapter - * - *****************************************************************/ - -#define DMA_BUFFER_SIZE 1600 -#define BACKLOG_SIZE 4 - -typedef struct { - volatile short got[NUM_TRANSMIT_CMDS]; /* flags for - command completion */ - pcb_struct tx_pcb; /* PCB for foreground sending */ - pcb_struct rx_pcb; /* PCB for foreground receiving */ - pcb_struct itx_pcb; /* PCB for background sending */ - pcb_struct irx_pcb; /* PCB for background receiving */ - - void *dma_buffer; - - struct { - unsigned int length[BACKLOG_SIZE]; - unsigned int in; - unsigned int out; - } rx_backlog; - - struct { - unsigned int direction; - unsigned int length; - struct sk_buff *skb; - void *target; - unsigned long start_time; - } current_dma; - - /* flags */ - unsigned long send_pcb_semaphore; - unsigned long dmaing; - unsigned long busy; - - unsigned int rx_active; /* number of receive PCBs */ - volatile unsigned char hcr_val; /* what we think the HCR contains */ - spinlock_t lock; /* Interrupt v tx lock */ -} elp_device; diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c deleted file mode 100644 index 1e94555..0000000 --- a/drivers/net/3c507.c +++ /dev/null @@ -1,939 +0,0 @@ -/* 3c507.c: An EtherLink16 device driver for Linux. */ -/* - Written 1993,1994 by Donald Becker. - - Copyright 1993 United States Government as represented by the - Director, National Security Agency. - - This software may be used and distributed according to the terms - of the GNU General Public License, incorporated herein by reference. - - The author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation - 410 Severn Ave., Suite 210 - Annapolis MD 21403 - - - Thanks go to jennings@Montrouge.SMR.slb.com ( Patrick Jennings) - and jrs@world.std.com (Rick Sladkey) for testing and bugfixes. - Mark Salazar made the changes for cards with - only 16K packet buffers. - - Things remaining to do: - Verify that the tx and rx buffers don't have fencepost errors. - Move the theory of operation and memory map documentation. - The statistics need to be updated correctly. -*/ - -#define DRV_NAME "3c507" -#define DRV_VERSION "1.10a" -#define DRV_RELDATE "11/17/2001" - -static const char version[] = - DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Donald Becker (becker@scyld.com)\n"; - -/* - Sources: - This driver wouldn't have been written with the availability of the - Crynwr driver source code. It provided a known-working implementation - that filled in the gaping holes of the Intel documentation. Three cheers - for Russ Nelson. - - Intel Microcommunications Databook, Vol. 1, 1990. It provides just enough - info that the casual reader might think that it documents the i82586 :-<. -*/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -/* use 0 for production, 1 for verification, 2..7 for debug */ -#ifndef NET_DEBUG -#define NET_DEBUG 1 -#endif -static unsigned int net_debug = NET_DEBUG; -#define debug net_debug - - -/* - Details of the i82586. - - You'll really need the databook to understand the details of this part, - but the outline is that the i82586 has two separate processing units. - Both are started from a list of three configuration tables, of which only - the last, the System Control Block (SCB), is used after reset-time. The SCB - has the following fields: - Status word - Command word - Tx/Command block addr. - Rx block addr. - The command word accepts the following controls for the Tx and Rx units: - */ - -#define CUC_START 0x0100 -#define CUC_RESUME 0x0200 -#define CUC_SUSPEND 0x0300 -#define RX_START 0x0010 -#define RX_RESUME 0x0020 -#define RX_SUSPEND 0x0030 - -/* The Rx unit uses a list of frame descriptors and a list of data buffer - descriptors. We use full-sized (1518 byte) data buffers, so there is - a one-to-one pairing of frame descriptors to buffer descriptors. - - The Tx ("command") unit executes a list of commands that look like: - Status word Written by the 82586 when the command is done. - Command word Command in lower 3 bits, post-command action in upper 3 - Link word The address of the next command. - Parameters (as needed). - - Some definitions related to the Command Word are: - */ -#define CMD_EOL 0x8000 /* The last command of the list, stop. */ -#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */ -#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */ - -enum commands { - CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3, - CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7}; - -/* Information that need to be kept for each board. */ -struct net_local { - int last_restart; - ushort rx_head; - ushort rx_tail; - ushort tx_head; - ushort tx_cmd_link; - ushort tx_reap; - ushort tx_pkts_in_ring; - spinlock_t lock; - void __iomem *base; -}; - -/* - Details of the EtherLink16 Implementation - The 3c507 is a generic shared-memory i82586 implementation. - The host can map 16K, 32K, 48K, or 64K of the 64K memory into - 0x0[CD][08]0000, or all 64K into 0xF[02468]0000. - */ - -/* Offsets from the base I/O address. */ -#define SA_DATA 0 /* Station address data, or 3Com signature. */ -#define MISC_CTRL 6 /* Switch the SA_DATA banks, and bus config bits. */ -#define RESET_IRQ 10 /* Reset the latched IRQ line. */ -#define SIGNAL_CA 11 /* Frob the 82586 Channel Attention line. */ -#define ROM_CONFIG 13 -#define MEM_CONFIG 14 -#define IRQ_CONFIG 15 -#define EL16_IO_EXTENT 16 - -/* The ID port is used at boot-time to locate the ethercard. */ -#define ID_PORT 0x100 - -/* Offsets to registers in the mailbox (SCB). */ -#define iSCB_STATUS 0x8 -#define iSCB_CMD 0xA -#define iSCB_CBL 0xC /* Command BLock offset. */ -#define iSCB_RFA 0xE /* Rx Frame Area offset. */ - -/* Since the 3c507 maps the shared memory window so that the last byte is - at 82586 address FFFF, the first byte is at 82586 address 0, 16K, 32K, or - 48K corresponding to window sizes of 64K, 48K, 32K and 16K respectively. - We can account for this be setting the 'SBC Base' entry in the ISCP table - below for all the 16 bit offset addresses, and also adding the 'SCB Base' - value to all 24 bit physical addresses (in the SCP table and the TX and RX - Buffer Descriptors). - -Mark - */ -#define SCB_BASE ((unsigned)64*1024 - (dev->mem_end - dev->mem_start)) - -/* - What follows in 'init_words[]' is the "program" that is downloaded to the - 82586 memory. It's mostly tables and command blocks, and starts at the - reset address 0xfffff6. This is designed to be similar to the EtherExpress, - thus the unusual location of the SCB at 0x0008. - - Even with the additional "don't care" values, doing it this way takes less - program space than initializing the individual tables, and I feel it's much - cleaner. - - The databook is particularly useless for the first two structures, I had - to use the Crynwr driver as an example. - - The memory setup is as follows: - */ - -#define CONFIG_CMD 0x0018 -#define SET_SA_CMD 0x0024 -#define SA_OFFSET 0x002A -#define IDLELOOP 0x30 -#define TDR_CMD 0x38 -#define TDR_TIME 0x3C -#define DUMP_CMD 0x40 -#define DIAG_CMD 0x48 -#define SET_MC_CMD 0x4E -#define DUMP_DATA 0x56 /* A 170 byte buffer for dump and Set-MC into. */ - -#define TX_BUF_START 0x0100 -#define NUM_TX_BUFS 5 -#define TX_BUF_SIZE (1518+14+20+16) /* packet+header+TBD */ - -#define RX_BUF_START 0x2000 -#define RX_BUF_SIZE (1518+14+18) /* packet+header+RBD */ -#define RX_BUF_END (dev->mem_end - dev->mem_start) - -#define TX_TIMEOUT (HZ/20) - -/* - That's it: only 86 bytes to set up the beast, including every extra - command available. The 170 byte buffer at DUMP_DATA is shared between the - Dump command (called only by the diagnostic program) and the SetMulticastList - command. - - To complete the memory setup you only have to write the station address at - SA_OFFSET and create the Tx & Rx buffer lists. - - The Tx command chain and buffer list is setup as follows: - A Tx command table, with the data buffer pointing to... - A Tx data buffer descriptor. The packet is in a single buffer, rather than - chaining together several smaller buffers. - A NoOp command, which initially points to itself, - And the packet data. - - A transmit is done by filling in the Tx command table and data buffer, - re-writing the NoOp command, and finally changing the offset of the last - command to point to the current Tx command. When the Tx command is finished, - it jumps to the NoOp, when it loops until the next Tx command changes the - "link offset" in the NoOp. This way the 82586 never has to go through the - slow restart sequence. - - The Rx buffer list is set up in the obvious ring structure. We have enough - memory (and low enough interrupt latency) that we can avoid the complicated - Rx buffer linked lists by alway associating a full-size Rx data buffer with - each Rx data frame. - - I current use four transmit buffers starting at TX_BUF_START (0x0100), and - use the rest of memory, from RX_BUF_START to RX_BUF_END, for Rx buffers. - - */ - -static unsigned short init_words[] = { - /* System Configuration Pointer (SCP). */ - 0x0000, /* Set bus size to 16 bits. */ - 0,0, /* pad words. */ - 0x0000,0x0000, /* ISCP phys addr, set in init_82586_mem(). */ - - /* Intermediate System Configuration Pointer (ISCP). */ - 0x0001, /* Status word that's cleared when init is done. */ - 0x0008,0,0, /* SCB offset, (skip, skip) */ - - /* System Control Block (SCB). */ - 0,0xf000|RX_START|CUC_START, /* SCB status and cmd. */ - CONFIG_CMD, /* Command list pointer, points to Configure. */ - RX_BUF_START, /* Rx block list. */ - 0,0,0,0, /* Error count: CRC, align, buffer, overrun. */ - - /* 0x0018: Configure command. Change to put MAC data with packet. */ - 0, CmdConfigure, /* Status, command. */ - SET_SA_CMD, /* Next command is Set Station Addr. */ - 0x0804, /* "4" bytes of config data, 8 byte FIFO. */ - 0x2e40, /* Magic values, including MAC data location. */ - 0, /* Unused pad word. */ - - /* 0x0024: Setup station address command. */ - 0, CmdSASetup, - SET_MC_CMD, /* Next command. */ - 0xaa00,0xb000,0x0bad, /* Station address (to be filled in) */ - - /* 0x0030: NOP, looping back to itself. Point to first Tx buffer to Tx. */ - 0, CmdNOp, IDLELOOP, 0 /* pad */, - - /* 0x0038: A unused Time-Domain Reflectometer command. */ - 0, CmdTDR, IDLELOOP, 0, - - /* 0x0040: An unused Dump State command. */ - 0, CmdDump, IDLELOOP, DUMP_DATA, - - /* 0x0048: An unused Diagnose command. */ - 0, CmdDiagnose, IDLELOOP, - - /* 0x004E: An empty set-multicast-list command. */ - 0, CmdMulticastList, IDLELOOP, 0, -}; - -/* Index to functions, as function prototypes. */ - -static int el16_probe1(struct net_device *dev, int ioaddr); -static int el16_open(struct net_device *dev); -static netdev_tx_t el16_send_packet(struct sk_buff *skb, - struct net_device *dev); -static irqreturn_t el16_interrupt(int irq, void *dev_id); -static void el16_rx(struct net_device *dev); -static int el16_close(struct net_device *dev); -static void el16_tx_timeout (struct net_device *dev); - -static void hardware_send_packet(struct net_device *dev, void *buf, short length, short pad); -static void init_82586_mem(struct net_device *dev); -static const struct ethtool_ops netdev_ethtool_ops; -static void init_rx_bufs(struct net_device *); - -static int io = 0x300; -static int irq; -static int mem_start; - - -/* Check for a network adaptor of this type, and return '0' iff one exists. - If dev->base_addr == 0, probe all likely locations. - If dev->base_addr == 1, always return failure. - If dev->base_addr == 2, (detachable devices only) allocate space for the - device and return success. - */ - -struct net_device * __init el16_probe(int unit) -{ - struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); - static const unsigned ports[] = { 0x300, 0x320, 0x340, 0x280, 0}; - const unsigned *port; - int err = -ENODEV; - - if (!dev) - return ERR_PTR(-ENODEV); - - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - io = dev->base_addr; - irq = dev->irq; - mem_start = dev->mem_start & 15; - } - - if (io > 0x1ff) /* Check a single specified location. */ - err = el16_probe1(dev, io); - else if (io != 0) - err = -ENXIO; /* Don't probe at all. */ - else { - for (port = ports; *port; port++) { - err = el16_probe1(dev, *port); - if (!err) - break; - } - } - - if (err) - goto out; - err = register_netdev(dev); - if (err) - goto out1; - return dev; -out1: - free_irq(dev->irq, dev); - iounmap(((struct net_local *)netdev_priv(dev))->base); - release_region(dev->base_addr, EL16_IO_EXTENT); -out: - free_netdev(dev); - return ERR_PTR(err); -} - -static const struct net_device_ops netdev_ops = { - .ndo_open = el16_open, - .ndo_stop = el16_close, - .ndo_start_xmit = el16_send_packet, - .ndo_tx_timeout = el16_tx_timeout, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -static int __init el16_probe1(struct net_device *dev, int ioaddr) -{ - static unsigned char init_ID_done; - int i, irq, irqval, retval; - struct net_local *lp; - - if (init_ID_done == 0) { - ushort lrs_state = 0xff; - /* Send the ID sequence to the ID_PORT to enable the board(s). */ - outb(0x00, ID_PORT); - for(i = 0; i < 255; i++) { - outb(lrs_state, ID_PORT); - lrs_state <<= 1; - if (lrs_state & 0x100) - lrs_state ^= 0xe7; - } - outb(0x00, ID_PORT); - init_ID_done = 1; - } - - if (!request_region(ioaddr, EL16_IO_EXTENT, DRV_NAME)) - return -ENODEV; - - if ((inb(ioaddr) != '*') || (inb(ioaddr + 1) != '3') || - (inb(ioaddr + 2) != 'C') || (inb(ioaddr + 3) != 'O')) { - retval = -ENODEV; - goto out; - } - - pr_info("%s: 3c507 at %#x,", dev->name, ioaddr); - - /* We should make a few more checks here, like the first three octets of - the S.A. for the manufacturer's code. */ - - irq = inb(ioaddr + IRQ_CONFIG) & 0x0f; - - irqval = request_irq(irq, el16_interrupt, 0, DRV_NAME, dev); - if (irqval) { - pr_cont("\n"); - pr_err("3c507: unable to get IRQ %d (irqval=%d).\n", irq, irqval); - retval = -EAGAIN; - goto out; - } - - /* We've committed to using the board, and can start filling in *dev. */ - dev->base_addr = ioaddr; - - outb(0x01, ioaddr + MISC_CTRL); - for (i = 0; i < 6; i++) - dev->dev_addr[i] = inb(ioaddr + i); - pr_cont(" %pM", dev->dev_addr); - - if (mem_start) - net_debug = mem_start & 7; - -#ifdef MEM_BASE - dev->mem_start = MEM_BASE; - dev->mem_end = dev->mem_start + 0x10000; -#else - { - int base; - int size; - char mem_config = inb(ioaddr + MEM_CONFIG); - if (mem_config & 0x20) { - size = 64*1024; - base = 0xf00000 + (mem_config & 0x08 ? 0x080000 - : ((mem_config & 3) << 17)); - } else { - size = ((mem_config & 3) + 1) << 14; - base = 0x0c0000 + ( (mem_config & 0x18) << 12); - } - dev->mem_start = base; - dev->mem_end = base + size; - } -#endif - - dev->if_port = (inb(ioaddr + ROM_CONFIG) & 0x80) ? 1 : 0; - dev->irq = inb(ioaddr + IRQ_CONFIG) & 0x0f; - - pr_cont(", IRQ %d, %sternal xcvr, memory %#lx-%#lx.\n", dev->irq, - dev->if_port ? "ex" : "in", dev->mem_start, dev->mem_end-1); - - if (net_debug) - pr_debug("%s", version); - - lp = netdev_priv(dev); - spin_lock_init(&lp->lock); - lp->base = ioremap(dev->mem_start, RX_BUF_END); - if (!lp->base) { - pr_err("3c507: unable to remap memory\n"); - retval = -EAGAIN; - goto out1; - } - - dev->netdev_ops = &netdev_ops; - dev->watchdog_timeo = TX_TIMEOUT; - dev->ethtool_ops = &netdev_ethtool_ops; - dev->flags &= ~IFF_MULTICAST; /* Multicast doesn't work */ - return 0; -out1: - free_irq(dev->irq, dev); -out: - release_region(ioaddr, EL16_IO_EXTENT); - return retval; -} - -static int el16_open(struct net_device *dev) -{ - /* Initialize the 82586 memory and start it. */ - init_82586_mem(dev); - - netif_start_queue(dev); - return 0; -} - - -static void el16_tx_timeout (struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - void __iomem *shmem = lp->base; - - if (net_debug > 1) - pr_debug("%s: transmit timed out, %s? ", dev->name, - readw(shmem + iSCB_STATUS) & 0x8000 ? "IRQ conflict" : - "network cable problem"); - /* Try to restart the adaptor. */ - if (lp->last_restart == dev->stats.tx_packets) { - if (net_debug > 1) - pr_cont("Resetting board.\n"); - /* Completely reset the adaptor. */ - init_82586_mem (dev); - lp->tx_pkts_in_ring = 0; - } else { - /* Issue the channel attention signal and hope it "gets better". */ - if (net_debug > 1) - pr_cont("Kicking board.\n"); - writew(0xf000 | CUC_START | RX_START, shmem + iSCB_CMD); - outb (0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */ - lp->last_restart = dev->stats.tx_packets; - } - dev->trans_start = jiffies; /* prevent tx timeout */ - netif_wake_queue (dev); -} - - -static netdev_tx_t el16_send_packet (struct sk_buff *skb, - struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - unsigned long flags; - short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; - unsigned char *buf = skb->data; - - netif_stop_queue (dev); - - spin_lock_irqsave (&lp->lock, flags); - - dev->stats.tx_bytes += length; - /* Disable the 82586's input to the interrupt line. */ - outb (0x80, ioaddr + MISC_CTRL); - - hardware_send_packet (dev, buf, skb->len, length - skb->len); - - /* Enable the 82586 interrupt input. */ - outb (0x84, ioaddr + MISC_CTRL); - - spin_unlock_irqrestore (&lp->lock, flags); - - dev_kfree_skb (skb); - - /* You might need to clean up and record Tx statistics here. */ - - return NETDEV_TX_OK; -} - -/* The typical workload of the driver: - Handle the network interface interrupts. */ -static irqreturn_t el16_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct net_local *lp; - int ioaddr, status, boguscount = 0; - ushort ack_cmd = 0; - void __iomem *shmem; - - if (dev == NULL) { - pr_err("net_interrupt(): irq %d for unknown device.\n", irq); - return IRQ_NONE; - } - - ioaddr = dev->base_addr; - lp = netdev_priv(dev); - shmem = lp->base; - - spin_lock(&lp->lock); - - status = readw(shmem+iSCB_STATUS); - - if (net_debug > 4) { - pr_debug("%s: 3c507 interrupt, status %4.4x.\n", dev->name, status); - } - - /* Disable the 82586's input to the interrupt line. */ - outb(0x80, ioaddr + MISC_CTRL); - - /* Reap the Tx packet buffers. */ - while (lp->tx_pkts_in_ring) { - unsigned short tx_status = readw(shmem+lp->tx_reap); - if (!(tx_status & 0x8000)) { - if (net_debug > 5) - pr_debug("Tx command incomplete (%#x).\n", lp->tx_reap); - break; - } - /* Tx unsuccessful or some interesting status bit set. */ - if (!(tx_status & 0x2000) || (tx_status & 0x0f3f)) { - dev->stats.tx_errors++; - if (tx_status & 0x0600) dev->stats.tx_carrier_errors++; - if (tx_status & 0x0100) dev->stats.tx_fifo_errors++; - if (!(tx_status & 0x0040)) dev->stats.tx_heartbeat_errors++; - if (tx_status & 0x0020) dev->stats.tx_aborted_errors++; - dev->stats.collisions += tx_status & 0xf; - } - dev->stats.tx_packets++; - if (net_debug > 5) - pr_debug("Reaped %x, Tx status %04x.\n" , lp->tx_reap, tx_status); - lp->tx_reap += TX_BUF_SIZE; - if (lp->tx_reap > RX_BUF_START - TX_BUF_SIZE) - lp->tx_reap = TX_BUF_START; - - lp->tx_pkts_in_ring--; - /* There is always more space in the Tx ring buffer now. */ - netif_wake_queue(dev); - - if (++boguscount > 10) - break; - } - - if (status & 0x4000) { /* Packet received. */ - if (net_debug > 5) - pr_debug("Received packet, rx_head %04x.\n", lp->rx_head); - el16_rx(dev); - } - - /* Acknowledge the interrupt sources. */ - ack_cmd = status & 0xf000; - - if ((status & 0x0700) != 0x0200 && netif_running(dev)) { - if (net_debug) - pr_debug("%s: Command unit stopped, status %04x, restarting.\n", - dev->name, status); - /* If this ever occurs we should really re-write the idle loop, reset - the Tx list, and do a complete restart of the command unit. - For now we rely on the Tx timeout if the resume doesn't work. */ - ack_cmd |= CUC_RESUME; - } - - if ((status & 0x0070) != 0x0040 && netif_running(dev)) { - /* The Rx unit is not ready, it must be hung. Restart the receiver by - initializing the rx buffers, and issuing an Rx start command. */ - if (net_debug) - pr_debug("%s: Rx unit stopped, status %04x, restarting.\n", - dev->name, status); - init_rx_bufs(dev); - writew(RX_BUF_START,shmem+iSCB_RFA); - ack_cmd |= RX_START; - } - - writew(ack_cmd,shmem+iSCB_CMD); - outb(0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */ - - /* Clear the latched interrupt. */ - outb(0, ioaddr + RESET_IRQ); - - /* Enable the 82586's interrupt input. */ - outb(0x84, ioaddr + MISC_CTRL); - spin_unlock(&lp->lock); - return IRQ_HANDLED; -} - -static int el16_close(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - void __iomem *shmem = lp->base; - - netif_stop_queue(dev); - - /* Flush the Tx and disable Rx. */ - writew(RX_SUSPEND | CUC_SUSPEND,shmem+iSCB_CMD); - outb(0, ioaddr + SIGNAL_CA); - - /* Disable the 82586's input to the interrupt line. */ - outb(0x80, ioaddr + MISC_CTRL); - - /* We always physically use the IRQ line, so we don't do free_irq(). */ - - /* Update the statistics here. */ - - return 0; -} - -/* Initialize the Rx-block list. */ -static void init_rx_bufs(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - void __iomem *write_ptr; - unsigned short SCB_base = SCB_BASE; - - int cur_rxbuf = lp->rx_head = RX_BUF_START; - - /* Initialize each Rx frame + data buffer. */ - do { /* While there is room for one more. */ - - write_ptr = lp->base + cur_rxbuf; - - writew(0x0000,write_ptr); /* Status */ - writew(0x0000,write_ptr+=2); /* Command */ - writew(cur_rxbuf + RX_BUF_SIZE,write_ptr+=2); /* Link */ - writew(cur_rxbuf + 22,write_ptr+=2); /* Buffer offset */ - writew(0x0000,write_ptr+=2); /* Pad for dest addr. */ - writew(0x0000,write_ptr+=2); - writew(0x0000,write_ptr+=2); - writew(0x0000,write_ptr+=2); /* Pad for source addr. */ - writew(0x0000,write_ptr+=2); - writew(0x0000,write_ptr+=2); - writew(0x0000,write_ptr+=2); /* Pad for protocol. */ - - writew(0x0000,write_ptr+=2); /* Buffer: Actual count */ - writew(-1,write_ptr+=2); /* Buffer: Next (none). */ - writew(cur_rxbuf + 0x20 + SCB_base,write_ptr+=2);/* Buffer: Address low */ - writew(0x0000,write_ptr+=2); - /* Finally, the number of bytes in the buffer. */ - writew(0x8000 + RX_BUF_SIZE-0x20,write_ptr+=2); - - lp->rx_tail = cur_rxbuf; - cur_rxbuf += RX_BUF_SIZE; - } while (cur_rxbuf <= RX_BUF_END - RX_BUF_SIZE); - - /* Terminate the list by setting the EOL bit, and wrap the pointer to make - the list a ring. */ - write_ptr = lp->base + lp->rx_tail + 2; - writew(0xC000,write_ptr); /* Command, mark as last. */ - writew(lp->rx_head,write_ptr+2); /* Link */ -} - -static void init_82586_mem(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - short ioaddr = dev->base_addr; - void __iomem *shmem = lp->base; - - /* Enable loopback to protect the wire while starting up, - and hold the 586 in reset during the memory initialization. */ - outb(0x20, ioaddr + MISC_CTRL); - - /* Fix the ISCP address and base. */ - init_words[3] = SCB_BASE; - init_words[7] = SCB_BASE; - - /* Write the words at 0xfff6 (address-aliased to 0xfffff6). */ - memcpy_toio(lp->base + RX_BUF_END - 10, init_words, 10); - - /* Write the words at 0x0000. */ - memcpy_toio(lp->base, init_words + 5, sizeof(init_words) - 10); - - /* Fill in the station address. */ - memcpy_toio(lp->base+SA_OFFSET, dev->dev_addr, ETH_ALEN); - - /* The Tx-block list is written as needed. We just set up the values. */ - lp->tx_cmd_link = IDLELOOP + 4; - lp->tx_head = lp->tx_reap = TX_BUF_START; - - init_rx_bufs(dev); - - /* Start the 586 by releasing the reset line, but leave loopback. */ - outb(0xA0, ioaddr + MISC_CTRL); - - /* This was time consuming to track down: you need to give two channel - attention signals to reliably start up the i82586. */ - outb(0, ioaddr + SIGNAL_CA); - - { - int boguscnt = 50; - while (readw(shmem+iSCB_STATUS) == 0) - if (--boguscnt == 0) { - pr_warning("%s: i82586 initialization timed out with status %04x, cmd %04x.\n", - dev->name, readw(shmem+iSCB_STATUS), readw(shmem+iSCB_CMD)); - break; - } - /* Issue channel-attn -- the 82586 won't start. */ - outb(0, ioaddr + SIGNAL_CA); - } - - /* Disable loopback and enable interrupts. */ - outb(0x84, ioaddr + MISC_CTRL); - if (net_debug > 4) - pr_debug("%s: Initialized 82586, status %04x.\n", dev->name, - readw(shmem+iSCB_STATUS)); -} - -static void hardware_send_packet(struct net_device *dev, void *buf, short length, short pad) -{ - struct net_local *lp = netdev_priv(dev); - short ioaddr = dev->base_addr; - ushort tx_block = lp->tx_head; - void __iomem *write_ptr = lp->base + tx_block; - static char padding[ETH_ZLEN]; - - /* Set the write pointer to the Tx block, and put out the header. */ - writew(0x0000,write_ptr); /* Tx status */ - writew(CMD_INTR|CmdTx,write_ptr+=2); /* Tx command */ - writew(tx_block+16,write_ptr+=2); /* Next command is a NoOp. */ - writew(tx_block+8,write_ptr+=2); /* Data Buffer offset. */ - - /* Output the data buffer descriptor. */ - writew((pad + length) | 0x8000,write_ptr+=2); /* Byte count parameter. */ - writew(-1,write_ptr+=2); /* No next data buffer. */ - writew(tx_block+22+SCB_BASE,write_ptr+=2); /* Buffer follows the NoOp command. */ - writew(0x0000,write_ptr+=2); /* Buffer address high bits (always zero). */ - - /* Output the Loop-back NoOp command. */ - writew(0x0000,write_ptr+=2); /* Tx status */ - writew(CmdNOp,write_ptr+=2); /* Tx command */ - writew(tx_block+16,write_ptr+=2); /* Next is myself. */ - - /* Output the packet at the write pointer. */ - memcpy_toio(write_ptr+2, buf, length); - if (pad) - memcpy_toio(write_ptr+length+2, padding, pad); - - /* Set the old command link pointing to this send packet. */ - writew(tx_block,lp->base + lp->tx_cmd_link); - lp->tx_cmd_link = tx_block + 20; - - /* Set the next free tx region. */ - lp->tx_head = tx_block + TX_BUF_SIZE; - if (lp->tx_head > RX_BUF_START - TX_BUF_SIZE) - lp->tx_head = TX_BUF_START; - - if (net_debug > 4) { - pr_debug("%s: 3c507 @%x send length = %d, tx_block %3x, next %3x.\n", - dev->name, ioaddr, length, tx_block, lp->tx_head); - } - - /* Grimly block further packets if there has been insufficient reaping. */ - if (++lp->tx_pkts_in_ring < NUM_TX_BUFS) - netif_wake_queue(dev); -} - -static void el16_rx(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - void __iomem *shmem = lp->base; - ushort rx_head = lp->rx_head; - ushort rx_tail = lp->rx_tail; - ushort boguscount = 10; - short frame_status; - - while ((frame_status = readw(shmem+rx_head)) < 0) { /* Command complete */ - void __iomem *read_frame = lp->base + rx_head; - ushort rfd_cmd = readw(read_frame+2); - ushort next_rx_frame = readw(read_frame+4); - ushort data_buffer_addr = readw(read_frame+6); - void __iomem *data_frame = lp->base + data_buffer_addr; - ushort pkt_len = readw(data_frame); - - if (rfd_cmd != 0 || data_buffer_addr != rx_head + 22 || - (pkt_len & 0xC000) != 0xC000) { - pr_err("%s: Rx frame at %#x corrupted, " - "status %04x cmd %04x next %04x " - "data-buf @%04x %04x.\n", - dev->name, rx_head, frame_status, rfd_cmd, - next_rx_frame, data_buffer_addr, pkt_len); - } else if ((frame_status & 0x2000) == 0) { - /* Frame Rxed, but with error. */ - dev->stats.rx_errors++; - if (frame_status & 0x0800) dev->stats.rx_crc_errors++; - if (frame_status & 0x0400) dev->stats.rx_frame_errors++; - if (frame_status & 0x0200) dev->stats.rx_fifo_errors++; - if (frame_status & 0x0100) dev->stats.rx_over_errors++; - if (frame_status & 0x0080) dev->stats.rx_length_errors++; - } else { - /* Malloc up new buffer. */ - struct sk_buff *skb; - - pkt_len &= 0x3fff; - skb = dev_alloc_skb(pkt_len+2); - if (skb == NULL) { - pr_err("%s: Memory squeeze, dropping packet.\n", - dev->name); - dev->stats.rx_dropped++; - break; - } - - skb_reserve(skb,2); - - /* 'skb->data' points to the start of sk_buff data area. */ - memcpy_fromio(skb_put(skb,pkt_len), data_frame + 10, pkt_len); - - skb->protocol=eth_type_trans(skb,dev); - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; - } - - /* Clear the status word and set End-of-List on the rx frame. */ - writew(0,read_frame); - writew(0xC000,read_frame+2); - /* Clear the end-of-list on the prev. RFD. */ - writew(0x0000,lp->base + rx_tail + 2); - - rx_tail = rx_head; - rx_head = next_rx_frame; - if (--boguscount == 0) - break; - } - - lp->rx_head = rx_head; - lp->rx_tail = rx_tail; -} - -static void netdev_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr); -} - -static u32 netdev_get_msglevel(struct net_device *dev) -{ - return debug; -} - -static void netdev_set_msglevel(struct net_device *dev, u32 level) -{ - debug = level; -} - -static const struct ethtool_ops netdev_ethtool_ops = { - .get_drvinfo = netdev_get_drvinfo, - .get_msglevel = netdev_get_msglevel, - .set_msglevel = netdev_set_msglevel, -}; - -#ifdef MODULE -static struct net_device *dev_3c507; -module_param(io, int, 0); -module_param(irq, int, 0); -MODULE_PARM_DESC(io, "EtherLink16 I/O base address"); -MODULE_PARM_DESC(irq, "(ignored)"); - -int __init init_module(void) -{ - if (io == 0) - pr_notice("3c507: You should not use auto-probing with insmod!\n"); - dev_3c507 = el16_probe(-1); - return IS_ERR(dev_3c507) ? PTR_ERR(dev_3c507) : 0; -} - -void __exit -cleanup_module(void) -{ - struct net_device *dev = dev_3c507; - unregister_netdev(dev); - free_irq(dev->irq, dev); - iounmap(((struct net_local *)netdev_priv(dev))->base); - release_region(dev->base_addr, EL16_IO_EXTENT); - free_netdev(dev); -} -#endif /* MODULE */ -MODULE_LICENSE("GPL"); diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c deleted file mode 100644 index bc0d1a1..0000000 --- a/drivers/net/3c523.c +++ /dev/null @@ -1,1312 +0,0 @@ -/* - net-3-driver for the 3c523 Etherlink/MC card (i82586 Ethernet chip) - - - This is an extension to the Linux operating system, and is covered by the - same GNU General Public License that covers that work. - - Copyright 1995, 1996 by Chris Beauregard (cpbeaure@undergrad.math.uwaterloo.ca) - - This is basically Michael Hipp's ni52 driver, with a new probing - algorithm and some minor changes to the 82586 CA and reset routines. - Thanks a lot Michael for a really clean i82586 implementation! Unless - otherwise documented in ni52.c, any bugs are mine. - - Contrary to the Ethernet-HOWTO, this isn't based on the 3c507 driver in - any way. The ni52 is a lot easier to modify. - - sources: - ni52.c - - Crynwr packet driver collection was a great reference for my first - attempt at this sucker. The 3c507 driver also helped, until I noticed - that ni52.c was a lot nicer. - - EtherLink/MC: Micro Channel Ethernet Adapter Technical Reference - Manual, courtesy of 3Com CardFacts, documents the 3c523-specific - stuff. Information on CardFacts is found in the Ethernet HOWTO. - Also see - - Microprocessor Communications Support Chips, T.J. Byers, ISBN - 0-444-01224-9, has a section on the i82586. It tells you just enough - to know that you really don't want to learn how to program the chip. - - The original device probe code was stolen from ps2esdi.c - - Known Problems: - Since most of the code was stolen from ni52.c, you'll run across the - same bugs in the 0.62 version of ni52.c, plus maybe a few because of - the 3c523 idiosynchacies. The 3c523 has 16K of RAM though, so there - shouldn't be the overrun problem that the 8K ni52 has. - - This driver is for a 16K adapter. It should work fine on the 64K - adapters, but it will only use one of the 4 banks of RAM. Modifying - this for the 64K version would require a lot of heinous bank - switching, which I'm sure not interested in doing. If you try to - implement a bank switching version, you'll basically have to remember - what bank is enabled and do a switch every time you access a memory - location that's not current. You'll also have to remap pointers on - the driver side, because it only knows about 16K of the memory. - Anyone desperate or masochistic enough to try? - - It seems to be stable now when multiple transmit buffers are used. I - can't see any performance difference, but then I'm working on a 386SX. - - Multicast doesn't work. It doesn't even pretend to work. Don't use - it. Don't compile your kernel with multicast support. I don't know - why. - - Features: - This driver is useable as a loadable module. If you try to specify an - IRQ or a IO address (via insmod 3c523.o irq=xx io=0xyyy), it will - search the MCA slots until it finds a 3c523 with the specified - parameters. - - This driver does support multiple ethernet cards when used as a module - (up to MAX_3C523_CARDS, the default being 4) - - This has been tested with both BNC and TP versions, internal and - external transceivers. Haven't tested with the 64K version (that I - know of). - - History: - Jan 1st, 1996 - first public release - Feb 4th, 1996 - update to 1.3.59, incorporated multicast diffs from ni52.c - Feb 15th, 1996 - added shared irq support - Apr 1999 - added support for multiple cards when used as a module - added option to disable multicast as is causes problems - Ganesh Sittampalam - Stuart Adamson - Nov 2001 - added support for ethtool (jgarzik) - - $Header: /fsys2/home/chrisb/linux-1.3.59-MCA/drivers/net/RCS/3c523.c,v 1.1 1996/02/05 01:53:46 chrisb Exp chrisb $ - */ - -#define DRV_NAME "3c523" -#define DRV_VERSION "17-Nov-2001" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "3c523.h" - -/*************************************************************************/ -#define DEBUG /* debug on */ -#define SYSBUSVAL 0 /* 1 = 8 Bit, 0 = 16 bit - 3c523 only does 16 bit */ -#undef ELMC_MULTICAST /* Disable multicast support as it is somewhat seriously broken at the moment */ - -#define make32(ptr16) (p->memtop + (short) (ptr16) ) -#define make24(ptr32) ((char *) (ptr32) - p->base) -#define make16(ptr32) ((unsigned short) ((unsigned long) (ptr32) - (unsigned long) p->memtop )) - -/*************************************************************************/ -/* - Tables to which we can map values in the configuration registers. - */ -static int irq_table[] __initdata = { - 12, 7, 3, 9 -}; - -static int csr_table[] __initdata = { - 0x300, 0x1300, 0x2300, 0x3300 -}; - -static int shm_table[] __initdata = { - 0x0c0000, 0x0c8000, 0x0d0000, 0x0d8000 -}; - -/******************* how to calculate the buffers ***************************** - - - * IMPORTANT NOTE: if you configure only one NUM_XMIT_BUFFS, the driver works - * --------------- in a different (more stable?) mode. Only in this mode it's - * possible to configure the driver with 'NO_NOPCOMMANDS' - -sizeof(scp)=12; sizeof(scb)=16; sizeof(iscp)=8; -sizeof(scp)+sizeof(iscp)+sizeof(scb) = 36 = INIT -sizeof(rfd) = 24; sizeof(rbd) = 12; -sizeof(tbd) = 8; sizeof(transmit_cmd) = 16; -sizeof(nop_cmd) = 8; - - * if you don't know the driver, better do not change this values: */ - -#define RECV_BUFF_SIZE 1524 /* slightly oversized */ -#define XMIT_BUFF_SIZE 1524 /* slightly oversized */ -#define NUM_XMIT_BUFFS 1 /* config for both, 8K and 16K shmem */ -#define NUM_RECV_BUFFS_8 4 /* config for 8K shared mem */ -#define NUM_RECV_BUFFS_16 9 /* config for 16K shared mem */ - -#if (NUM_XMIT_BUFFS == 1) -#define NO_NOPCOMMANDS /* only possible with NUM_XMIT_BUFFS=1 */ -#endif - -/**************************************************************************/ - -#define DELAY(x) { mdelay(32 * x); } - -/* a much shorter delay: */ -#define DELAY_16(); { udelay(16) ; } - -/* wait for command with timeout: */ -#define WAIT_4_SCB_CMD() { int i; \ - for(i=0;i<1024;i++) { \ - if(!p->scb->cmd) break; \ - DELAY_16(); \ - if(i == 1023) { \ - pr_warning("%s:%d: scb_cmd timed out .. resetting i82586\n",\ - dev->name,__LINE__); \ - elmc_id_reset586(); } } } - -static irqreturn_t elmc_interrupt(int irq, void *dev_id); -static int elmc_open(struct net_device *dev); -static int elmc_close(struct net_device *dev); -static netdev_tx_t elmc_send_packet(struct sk_buff *, struct net_device *); -static struct net_device_stats *elmc_get_stats(struct net_device *dev); -static void elmc_timeout(struct net_device *dev); -#ifdef ELMC_MULTICAST -static void set_multicast_list(struct net_device *dev); -#endif -static const struct ethtool_ops netdev_ethtool_ops; - -/* helper-functions */ -static int init586(struct net_device *dev); -static int check586(struct net_device *dev, unsigned long where, unsigned size); -static void alloc586(struct net_device *dev); -static void startrecv586(struct net_device *dev); -static void *alloc_rfa(struct net_device *dev, void *ptr); -static void elmc_rcv_int(struct net_device *dev); -static void elmc_xmt_int(struct net_device *dev); -static void elmc_rnr_int(struct net_device *dev); - -struct priv { - unsigned long base; - char *memtop; - unsigned long mapped_start; /* Start of ioremap */ - volatile struct rfd_struct *rfd_last, *rfd_top, *rfd_first; - volatile struct scp_struct *scp; /* volatile is important */ - volatile struct iscp_struct *iscp; /* volatile is important */ - volatile struct scb_struct *scb; /* volatile is important */ - volatile struct tbd_struct *xmit_buffs[NUM_XMIT_BUFFS]; -#if (NUM_XMIT_BUFFS == 1) - volatile struct transmit_cmd_struct *xmit_cmds[2]; - volatile struct nop_cmd_struct *nop_cmds[2]; -#else - volatile struct transmit_cmd_struct *xmit_cmds[NUM_XMIT_BUFFS]; - volatile struct nop_cmd_struct *nop_cmds[NUM_XMIT_BUFFS]; -#endif - volatile int nop_point, num_recv_buffs; - volatile char *xmit_cbuffs[NUM_XMIT_BUFFS]; - volatile int xmit_count, xmit_last; - volatile int slot; -}; - -#define elmc_attn586() {elmc_do_attn586(dev->base_addr,ELMC_CTRL_INTE);} -#define elmc_reset586() {elmc_do_reset586(dev->base_addr,ELMC_CTRL_INTE);} - -/* with interrupts disabled - this will clear the interrupt bit in the - 3c523 control register, and won't put it back. This effectively - disables interrupts on the card. */ -#define elmc_id_attn586() {elmc_do_attn586(dev->base_addr,0);} -#define elmc_id_reset586() {elmc_do_reset586(dev->base_addr,0);} - -/*************************************************************************/ -/* - Do a Channel Attention on the 3c523. This is extremely board dependent. - */ -static void elmc_do_attn586(int ioaddr, int ints) -{ - /* the 3c523 requires a minimum of 500 ns. The delays here might be - a little too large, and hence they may cut the performance of the - card slightly. If someone who knows a little more about Linux - timing would care to play with these, I'd appreciate it. */ - - /* this bit masking stuff is crap. I'd rather have separate - registers with strobe triggers for each of these functions. - Ya take what ya got. */ - - outb(ELMC_CTRL_RST | 0x3 | ELMC_CTRL_CA | ints, ioaddr + ELMC_CTRL); - DELAY_16(); /* > 500 ns */ - outb(ELMC_CTRL_RST | 0x3 | ints, ioaddr + ELMC_CTRL); -} - -/*************************************************************************/ -/* - Reset the 82586 on the 3c523. Also very board dependent. - */ -static void elmc_do_reset586(int ioaddr, int ints) -{ - /* toggle the RST bit low then high */ - outb(0x3 | ELMC_CTRL_LBK, ioaddr + ELMC_CTRL); - DELAY_16(); /* > 500 ns */ - outb(ELMC_CTRL_RST | ELMC_CTRL_LBK | 0x3, ioaddr + ELMC_CTRL); - - elmc_do_attn586(ioaddr, ints); -} - -/********************************************** - * close device - */ - -static int elmc_close(struct net_device *dev) -{ - netif_stop_queue(dev); - elmc_id_reset586(); /* the hard way to stop the receiver */ - free_irq(dev->irq, dev); - return 0; -} - -/********************************************** - * open device - */ - -static int elmc_open(struct net_device *dev) -{ - int ret; - - elmc_id_attn586(); /* disable interrupts */ - - ret = request_irq(dev->irq, elmc_interrupt, IRQF_SHARED, - dev->name, dev); - if (ret) { - pr_err("%s: couldn't get irq %d\n", dev->name, dev->irq); - elmc_id_reset586(); - return ret; - } - alloc586(dev); - init586(dev); - startrecv586(dev); - netif_start_queue(dev); - return 0; /* most done by init */ -} - -/********************************************** - * Check to see if there's an 82586 out there. - */ - -static int __init check586(struct net_device *dev, unsigned long where, unsigned size) -{ - struct priv *p = netdev_priv(dev); - char *iscp_addrs[2]; - int i = 0; - - p->base = (unsigned long) isa_bus_to_virt((unsigned long)where) + size - 0x01000000; - p->memtop = isa_bus_to_virt((unsigned long)where) + size; - p->scp = (struct scp_struct *)(p->base + SCP_DEFAULT_ADDRESS); - memset((char *) p->scp, 0, sizeof(struct scp_struct)); - p->scp->sysbus = SYSBUSVAL; /* 1 = 8Bit-Bus, 0 = 16 Bit */ - - iscp_addrs[0] = isa_bus_to_virt((unsigned long)where); - iscp_addrs[1] = (char *) p->scp - sizeof(struct iscp_struct); - - for (i = 0; i < 2; i++) { - p->iscp = (struct iscp_struct *) iscp_addrs[i]; - memset((char *) p->iscp, 0, sizeof(struct iscp_struct)); - - p->scp->iscp = make24(p->iscp); - p->iscp->busy = 1; - - elmc_id_reset586(); - - /* reset586 does an implicit CA */ - - /* apparently, you sometimes have to kick the 82586 twice... */ - elmc_id_attn586(); - DELAY(1); - - if (p->iscp->busy) { /* i82586 clears 'busy' after successful init */ - return 0; - } - } - return 1; -} - -/****************************************************************** - * set iscp at the right place, called by elmc_probe and open586. - */ - -static void alloc586(struct net_device *dev) -{ - struct priv *p = netdev_priv(dev); - - elmc_id_reset586(); - DELAY(2); - - p->scp = (struct scp_struct *) (p->base + SCP_DEFAULT_ADDRESS); - p->scb = (struct scb_struct *) isa_bus_to_virt(dev->mem_start); - p->iscp = (struct iscp_struct *) ((char *) p->scp - sizeof(struct iscp_struct)); - - memset((char *) p->iscp, 0, sizeof(struct iscp_struct)); - memset((char *) p->scp, 0, sizeof(struct scp_struct)); - - p->scp->iscp = make24(p->iscp); - p->scp->sysbus = SYSBUSVAL; - p->iscp->scb_offset = make16(p->scb); - - p->iscp->busy = 1; - elmc_id_reset586(); - elmc_id_attn586(); - - DELAY(2); - - if (p->iscp->busy) - pr_err("%s: Init-Problems (alloc).\n", dev->name); - - memset((char *) p->scb, 0, sizeof(struct scb_struct)); -} - -/*****************************************************************/ - -static int elmc_getinfo(char *buf, int slot, void *d) -{ - int len = 0; - struct net_device *dev = d; - - if (dev == NULL) - return len; - - len += sprintf(buf + len, "Revision: 0x%x\n", - inb(dev->base_addr + ELMC_REVISION) & 0xf); - len += sprintf(buf + len, "IRQ: %d\n", dev->irq); - len += sprintf(buf + len, "IO Address: %#lx-%#lx\n", dev->base_addr, - dev->base_addr + ELMC_IO_EXTENT); - len += sprintf(buf + len, "Memory: %#lx-%#lx\n", dev->mem_start, - dev->mem_end - 1); - len += sprintf(buf + len, "Transceiver: %s\n", dev->if_port ? - "External" : "Internal"); - len += sprintf(buf + len, "Device: %s\n", dev->name); - len += sprintf(buf + len, "Hardware Address: %pM\n", - dev->dev_addr); - - return len; -} /* elmc_getinfo() */ - -static const struct net_device_ops netdev_ops = { - .ndo_open = elmc_open, - .ndo_stop = elmc_close, - .ndo_get_stats = elmc_get_stats, - .ndo_start_xmit = elmc_send_packet, - .ndo_tx_timeout = elmc_timeout, -#ifdef ELMC_MULTICAST - .ndo_set_multicast_list = set_multicast_list, -#endif - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -/*****************************************************************/ - -static int __init do_elmc_probe(struct net_device *dev) -{ - static int slot; - int base_addr = dev->base_addr; - int irq = dev->irq; - u_char status = 0; - u_char revision = 0; - int i = 0; - unsigned int size = 0; - int retval; - struct priv *pr = netdev_priv(dev); - - if (MCA_bus == 0) { - return -ENODEV; - } - /* search through the slots for the 3c523. */ - slot = mca_find_adapter(ELMC_MCA_ID, 0); - while (slot != -1) { - status = mca_read_stored_pos(slot, 2); - - dev->irq=irq_table[(status & ELMC_STATUS_IRQ_SELECT) >> 6]; - dev->base_addr=csr_table[(status & ELMC_STATUS_CSR_SELECT) >> 1]; - - /* - If we're trying to match a specified irq or IO address, - we'll reject a match unless it's what we're looking for. - Also reject it if the card is already in use. - */ - - if ((irq && irq != dev->irq) || - (base_addr && base_addr != dev->base_addr)) { - slot = mca_find_adapter(ELMC_MCA_ID, slot + 1); - continue; - } - if (!request_region(dev->base_addr, ELMC_IO_EXTENT, DRV_NAME)) { - slot = mca_find_adapter(ELMC_MCA_ID, slot + 1); - continue; - } - - /* found what we're looking for... */ - break; - } - - /* we didn't find any 3c523 in the slots we checked for */ - if (slot == MCA_NOTFOUND) - return (base_addr || irq) ? -ENXIO : -ENODEV; - - mca_set_adapter_name(slot, "3Com 3c523 Etherlink/MC"); - mca_set_adapter_procfn(slot, (MCA_ProcFn) elmc_getinfo, dev); - - /* if we get this far, adapter has been found - carry on */ - pr_info("%s: 3c523 adapter found in slot %d\n", dev->name, slot + 1); - - /* Now we extract configuration info from the card. - The 3c523 provides information in two of the POS registers, but - the second one is only needed if we want to tell the card what IRQ - to use. I suspect that whoever sets the thing up initially would - prefer we don't screw with those things. - - Note that we read the status info when we found the card... - - See 3c523.h for more details. - */ - - /* revision is stored in the first 4 bits of the revision register */ - revision = inb(dev->base_addr + ELMC_REVISION) & 0xf; - - /* according to docs, we read the interrupt and write it back to - the IRQ select register, since the POST might not configure the IRQ - properly. */ - switch (dev->irq) { - case 3: - mca_write_pos(slot, 3, 0x04); - break; - case 7: - mca_write_pos(slot, 3, 0x02); - break; - case 9: - mca_write_pos(slot, 3, 0x08); - break; - case 12: - mca_write_pos(slot, 3, 0x01); - break; - } - - pr->slot = slot; - - pr_info("%s: 3Com 3c523 Rev 0x%x at %#lx\n", dev->name, (int) revision, - dev->base_addr); - - /* Determine if we're using the on-board transceiver (i.e. coax) or - an external one. The information is pretty much useless, but I - guess it's worth brownie points. */ - dev->if_port = (status & ELMC_STATUS_DISABLE_THIN); - - /* The 3c523 has a 24K chunk of memory. The first 16K is the - shared memory, while the last 8K is for the EtherStart BIOS ROM. - Which we don't care much about here. We'll just tell Linux that - we're using 16K. MCA won't permit address space conflicts caused - by not mapping the other 8K. */ - dev->mem_start = shm_table[(status & ELMC_STATUS_MEMORY_SELECT) >> 3]; - - /* We're using MCA, so it's a given that the information about memory - size is correct. The Crynwr drivers do something like this. */ - - elmc_id_reset586(); /* seems like a good idea before checking it... */ - - size = 0x4000; /* check for 16K mem */ - if (!check586(dev, dev->mem_start, size)) { - pr_err("%s: memprobe, Can't find memory at 0x%lx!\n", dev->name, - dev->mem_start); - retval = -ENODEV; - goto err_out; - } - dev->mem_end = dev->mem_start + size; /* set mem_end showed by 'ifconfig' */ - - pr->memtop = isa_bus_to_virt(dev->mem_start) + size; - pr->base = (unsigned long) isa_bus_to_virt(dev->mem_start) + size - 0x01000000; - alloc586(dev); - - elmc_id_reset586(); /* make sure it doesn't generate spurious ints */ - - /* set number of receive-buffs according to memsize */ - pr->num_recv_buffs = NUM_RECV_BUFFS_16; - - /* dump all the assorted information */ - pr_info("%s: IRQ %d, %sternal xcvr, memory %#lx-%#lx.\n", dev->name, - dev->irq, dev->if_port ? "ex" : "in", - dev->mem_start, dev->mem_end - 1); - - /* The hardware address for the 3c523 is stored in the first six - bytes of the IO address. */ - for (i = 0; i < 6; i++) - dev->dev_addr[i] = inb(dev->base_addr + i); - - pr_info("%s: hardware address %pM\n", - dev->name, dev->dev_addr); - - dev->netdev_ops = &netdev_ops; - dev->watchdog_timeo = HZ; - dev->ethtool_ops = &netdev_ethtool_ops; - - /* note that we haven't actually requested the IRQ from the kernel. - That gets done in elmc_open(). I'm not sure that's such a good idea, - but it works, so I'll go with it. */ - -#ifndef ELMC_MULTICAST - dev->flags&=~IFF_MULTICAST; /* Multicast doesn't work */ -#endif - - retval = register_netdev(dev); - if (retval) - goto err_out; - - return 0; -err_out: - mca_set_adapter_procfn(slot, NULL, NULL); - release_region(dev->base_addr, ELMC_IO_EXTENT); - return retval; -} - -#ifdef MODULE -static void cleanup_card(struct net_device *dev) -{ - mca_set_adapter_procfn(((struct priv *)netdev_priv(dev))->slot, - NULL, NULL); - release_region(dev->base_addr, ELMC_IO_EXTENT); -} -#else -struct net_device * __init elmc_probe(int unit) -{ - struct net_device *dev = alloc_etherdev(sizeof(struct priv)); - int err; - - if (!dev) - return ERR_PTR(-ENOMEM); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - - err = do_elmc_probe(dev); - if (err) - goto out; - return dev; -out: - free_netdev(dev); - return ERR_PTR(err); -} -#endif - -/********************************************** - * init the chip (elmc-interrupt should be disabled?!) - * needs a correct 'allocated' memory - */ - -static int init586(struct net_device *dev) -{ - void *ptr; - unsigned long s; - int i, result = 0; - struct priv *p = netdev_priv(dev); - volatile struct configure_cmd_struct *cfg_cmd; - volatile struct iasetup_cmd_struct *ias_cmd; - volatile struct tdr_cmd_struct *tdr_cmd; - volatile struct mcsetup_cmd_struct *mc_cmd; - struct netdev_hw_addr *ha; - int num_addrs = netdev_mc_count(dev); - - ptr = (void *) ((char *) p->scb + sizeof(struct scb_struct)); - - cfg_cmd = (struct configure_cmd_struct *) ptr; /* configure-command */ - cfg_cmd->cmd_status = 0; - cfg_cmd->cmd_cmd = CMD_CONFIGURE | CMD_LAST; - cfg_cmd->cmd_link = 0xffff; - - cfg_cmd->byte_cnt = 0x0a; /* number of cfg bytes */ - cfg_cmd->fifo = 0x08; /* fifo-limit (8=tx:32/rx:64) */ - cfg_cmd->sav_bf = 0x40; /* hold or discard bad recv frames (bit 7) */ - cfg_cmd->adr_len = 0x2e; /* addr_len |!src_insert |pre-len |loopback */ - cfg_cmd->priority = 0x00; - cfg_cmd->ifs = 0x60; - cfg_cmd->time_low = 0x00; - cfg_cmd->time_high = 0xf2; - cfg_cmd->promisc = 0; - if (dev->flags & (IFF_ALLMULTI | IFF_PROMISC)) - cfg_cmd->promisc = 1; - cfg_cmd->carr_coll = 0x00; - - p->scb->cbl_offset = make16(cfg_cmd); - - p->scb->cmd = CUC_START; /* cmd.-unit start */ - elmc_id_attn586(); - - s = jiffies; /* warning: only active with interrupts on !! */ - while (!(cfg_cmd->cmd_status & STAT_COMPL)) { - if (time_after(jiffies, s + 30*HZ/100)) - break; - } - - if ((cfg_cmd->cmd_status & (STAT_OK | STAT_COMPL)) != (STAT_COMPL | STAT_OK)) { - pr_warning("%s (elmc): configure command failed: %x\n", dev->name, cfg_cmd->cmd_status); - return 1; - } - /* - * individual address setup - */ - ias_cmd = (struct iasetup_cmd_struct *) ptr; - - ias_cmd->cmd_status = 0; - ias_cmd->cmd_cmd = CMD_IASETUP | CMD_LAST; - ias_cmd->cmd_link = 0xffff; - - memcpy((char *) &ias_cmd->iaddr, (char *) dev->dev_addr, ETH_ALEN); - - p->scb->cbl_offset = make16(ias_cmd); - - p->scb->cmd = CUC_START; /* cmd.-unit start */ - elmc_id_attn586(); - - s = jiffies; - while (!(ias_cmd->cmd_status & STAT_COMPL)) { - if (time_after(jiffies, s + 30*HZ/100)) - break; - } - - if ((ias_cmd->cmd_status & (STAT_OK | STAT_COMPL)) != (STAT_OK | STAT_COMPL)) { - pr_warning("%s (elmc): individual address setup command failed: %04x\n", - dev->name, ias_cmd->cmd_status); - return 1; - } - /* - * TDR, wire check .. e.g. no resistor e.t.c - */ - tdr_cmd = (struct tdr_cmd_struct *) ptr; - - tdr_cmd->cmd_status = 0; - tdr_cmd->cmd_cmd = CMD_TDR | CMD_LAST; - tdr_cmd->cmd_link = 0xffff; - tdr_cmd->status = 0; - - p->scb->cbl_offset = make16(tdr_cmd); - - p->scb->cmd = CUC_START; /* cmd.-unit start */ - elmc_attn586(); - - s = jiffies; - while (!(tdr_cmd->cmd_status & STAT_COMPL)) { - if (time_after(jiffies, s + 30*HZ/100)) { - pr_warning("%s: %d Problems while running the TDR.\n", dev->name, __LINE__); - result = 1; - break; - } - } - - if (!result) { - DELAY(2); /* wait for result */ - result = tdr_cmd->status; - - p->scb->cmd = p->scb->status & STAT_MASK; - elmc_id_attn586(); /* ack the interrupts */ - - if (result & TDR_LNK_OK) { - /* empty */ - } else if (result & TDR_XCVR_PRB) { - pr_warning("%s: TDR: Transceiver problem!\n", dev->name); - } else if (result & TDR_ET_OPN) { - pr_warning("%s: TDR: No correct termination %d clocks away.\n", dev->name, result & TDR_TIMEMASK); - } else if (result & TDR_ET_SRT) { - if (result & TDR_TIMEMASK) /* time == 0 -> strange :-) */ - pr_warning("%s: TDR: Detected a short circuit %d clocks away.\n", dev->name, result & TDR_TIMEMASK); - } else { - pr_warning("%s: TDR: Unknown status %04x\n", dev->name, result); - } - } - /* - * ack interrupts - */ - p->scb->cmd = p->scb->status & STAT_MASK; - elmc_id_attn586(); - - /* - * alloc nop/xmit-cmds - */ -#if (NUM_XMIT_BUFFS == 1) - for (i = 0; i < 2; i++) { - p->nop_cmds[i] = (struct nop_cmd_struct *) ptr; - p->nop_cmds[i]->cmd_cmd = CMD_NOP; - p->nop_cmds[i]->cmd_status = 0; - p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i])); - ptr = (char *) ptr + sizeof(struct nop_cmd_struct); - } - p->xmit_cmds[0] = (struct transmit_cmd_struct *) ptr; /* transmit cmd/buff 0 */ - ptr = (char *) ptr + sizeof(struct transmit_cmd_struct); -#else - for (i = 0; i < NUM_XMIT_BUFFS; i++) { - p->nop_cmds[i] = (struct nop_cmd_struct *) ptr; - p->nop_cmds[i]->cmd_cmd = CMD_NOP; - p->nop_cmds[i]->cmd_status = 0; - p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i])); - ptr = (char *) ptr + sizeof(struct nop_cmd_struct); - p->xmit_cmds[i] = (struct transmit_cmd_struct *) ptr; /*transmit cmd/buff 0 */ - ptr = (char *) ptr + sizeof(struct transmit_cmd_struct); - } -#endif - - ptr = alloc_rfa(dev, (void *) ptr); /* init receive-frame-area */ - - /* - * Multicast setup - */ - - if (num_addrs) { - /* I don't understand this: do we really need memory after the init? */ - int len = ((char *) p->iscp - (char *) ptr - 8) / 6; - if (len <= 0) { - pr_err("%s: Ooooops, no memory for MC-Setup!\n", dev->name); - } else { - if (len < num_addrs) { - num_addrs = len; - pr_warning("%s: Sorry, can only apply %d MC-Address(es).\n", - dev->name, num_addrs); - } - mc_cmd = (struct mcsetup_cmd_struct *) ptr; - mc_cmd->cmd_status = 0; - mc_cmd->cmd_cmd = CMD_MCSETUP | CMD_LAST; - mc_cmd->cmd_link = 0xffff; - mc_cmd->mc_cnt = num_addrs * 6; - i = 0; - netdev_for_each_mc_addr(ha, dev) - memcpy((char *) mc_cmd->mc_list[i++], - ha->addr, 6); - p->scb->cbl_offset = make16(mc_cmd); - p->scb->cmd = CUC_START; - elmc_id_attn586(); - s = jiffies; - while (!(mc_cmd->cmd_status & STAT_COMPL)) { - if (time_after(jiffies, s + 30*HZ/100)) - break; - } - if (!(mc_cmd->cmd_status & STAT_COMPL)) { - pr_warning("%s: Can't apply multicast-address-list.\n", dev->name); - } - } - } - /* - * alloc xmit-buffs / init xmit_cmds - */ - for (i = 0; i < NUM_XMIT_BUFFS; i++) { - p->xmit_cbuffs[i] = (char *) ptr; /* char-buffs */ - ptr = (char *) ptr + XMIT_BUFF_SIZE; - p->xmit_buffs[i] = (struct tbd_struct *) ptr; /* TBD */ - ptr = (char *) ptr + sizeof(struct tbd_struct); - if ((void *) ptr > (void *) p->iscp) { - pr_err("%s: not enough shared-mem for your configuration!\n", dev->name); - return 1; - } - memset((char *) (p->xmit_cmds[i]), 0, sizeof(struct transmit_cmd_struct)); - memset((char *) (p->xmit_buffs[i]), 0, sizeof(struct tbd_struct)); - p->xmit_cmds[i]->cmd_status = STAT_COMPL; - p->xmit_cmds[i]->cmd_cmd = CMD_XMIT | CMD_INT; - p->xmit_cmds[i]->tbd_offset = make16((p->xmit_buffs[i])); - p->xmit_buffs[i]->next = 0xffff; - p->xmit_buffs[i]->buffer = make24((p->xmit_cbuffs[i])); - } - - p->xmit_count = 0; - p->xmit_last = 0; -#ifndef NO_NOPCOMMANDS - p->nop_point = 0; -#endif - - /* - * 'start transmitter' (nop-loop) - */ -#ifndef NO_NOPCOMMANDS - p->scb->cbl_offset = make16(p->nop_cmds[0]); - p->scb->cmd = CUC_START; - elmc_id_attn586(); - WAIT_4_SCB_CMD(); -#else - p->xmit_cmds[0]->cmd_link = 0xffff; - p->xmit_cmds[0]->cmd_cmd = CMD_XMIT | CMD_LAST | CMD_INT; -#endif - - return 0; -} - -/****************************************************** - * This is a helper routine for elmc_rnr_int() and init586(). - * It sets up the Receive Frame Area (RFA). - */ - -static void *alloc_rfa(struct net_device *dev, void *ptr) -{ - volatile struct rfd_struct *rfd = (struct rfd_struct *) ptr; - volatile struct rbd_struct *rbd; - int i; - struct priv *p = netdev_priv(dev); - - memset((char *) rfd, 0, sizeof(struct rfd_struct) * p->num_recv_buffs); - p->rfd_first = rfd; - - for (i = 0; i < p->num_recv_buffs; i++) { - rfd[i].next = make16(rfd + (i + 1) % p->num_recv_buffs); - } - rfd[p->num_recv_buffs - 1].last = RFD_SUSP; /* RU suspend */ - - ptr = (void *) (rfd + p->num_recv_buffs); - - rbd = (struct rbd_struct *) ptr; - ptr = (void *) (rbd + p->num_recv_buffs); - - /* clr descriptors */ - memset((char *) rbd, 0, sizeof(struct rbd_struct) * p->num_recv_buffs); - - for (i = 0; i < p->num_recv_buffs; i++) { - rbd[i].next = make16((rbd + (i + 1) % p->num_recv_buffs)); - rbd[i].size = RECV_BUFF_SIZE; - rbd[i].buffer = make24(ptr); - ptr = (char *) ptr + RECV_BUFF_SIZE; - } - - p->rfd_top = p->rfd_first; - p->rfd_last = p->rfd_first + p->num_recv_buffs - 1; - - p->scb->rfa_offset = make16(p->rfd_first); - p->rfd_first->rbd_offset = make16(rbd); - - return ptr; -} - - -/************************************************** - * Interrupt Handler ... - */ - -static irqreturn_t -elmc_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - unsigned short stat; - struct priv *p; - - if (!netif_running(dev)) { - /* The 3c523 has this habit of generating interrupts during the - reset. I'm not sure if the ni52 has this same problem, but it's - really annoying if we haven't finished initializing it. I was - hoping all the elmc_id_* commands would disable this, but I - might have missed a few. */ - - elmc_id_attn586(); /* ack inter. and disable any more */ - return IRQ_HANDLED; - } else if (!(ELMC_CTRL_INT & inb(dev->base_addr + ELMC_CTRL))) { - /* wasn't this device */ - return IRQ_NONE; - } - /* reading ELMC_CTRL also clears the INT bit. */ - - p = netdev_priv(dev); - - while ((stat = p->scb->status & STAT_MASK)) - { - p->scb->cmd = stat; - elmc_attn586(); /* ack inter. */ - - if (stat & STAT_CX) { - /* command with I-bit set complete */ - elmc_xmt_int(dev); - } - if (stat & STAT_FR) { - /* received a frame */ - elmc_rcv_int(dev); - } -#ifndef NO_NOPCOMMANDS - if (stat & STAT_CNA) { - /* CU went 'not ready' */ - if (netif_running(dev)) { - pr_warning("%s: oops! CU has left active state. stat: %04x/%04x.\n", - dev->name, (int) stat, (int) p->scb->status); - } - } -#endif - - if (stat & STAT_RNR) { - /* RU went 'not ready' */ - - if (p->scb->status & RU_SUSPEND) { - /* special case: RU_SUSPEND */ - - WAIT_4_SCB_CMD(); - p->scb->cmd = RUC_RESUME; - elmc_attn586(); - } else { - pr_warning("%s: Receiver-Unit went 'NOT READY': %04x/%04x.\n", - dev->name, (int) stat, (int) p->scb->status); - elmc_rnr_int(dev); - } - } - WAIT_4_SCB_CMD(); /* wait for ack. (elmc_xmt_int can be faster than ack!!) */ - if (p->scb->cmd) { /* timed out? */ - break; - } - } - return IRQ_HANDLED; -} - -/******************************************************* - * receive-interrupt - */ - -static void elmc_rcv_int(struct net_device *dev) -{ - int status; - unsigned short totlen; - struct sk_buff *skb; - struct rbd_struct *rbd; - struct priv *p = netdev_priv(dev); - - for (; (status = p->rfd_top->status) & STAT_COMPL;) { - rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset); - - if (status & STAT_OK) { /* frame received without error? */ - if ((totlen = rbd->status) & RBD_LAST) { /* the first and the last buffer? */ - totlen &= RBD_MASK; /* length of this frame */ - rbd->status = 0; - skb = (struct sk_buff *) dev_alloc_skb(totlen + 2); - if (skb != NULL) { - skb_reserve(skb, 2); /* 16 byte alignment */ - skb_put(skb,totlen); - skb_copy_to_linear_data(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen); - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += totlen; - } else { - dev->stats.rx_dropped++; - } - } else { - pr_warning("%s: received oversized frame.\n", dev->name); - dev->stats.rx_dropped++; - } - } else { /* frame !(ok), only with 'save-bad-frames' */ - pr_warning("%s: oops! rfd-error-status: %04x\n", dev->name, status); - dev->stats.rx_errors++; - } - p->rfd_top->status = 0; - p->rfd_top->last = RFD_SUSP; - p->rfd_last->last = 0; /* delete RU_SUSP */ - p->rfd_last = p->rfd_top; - p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */ - } -} - -/********************************************************** - * handle 'Receiver went not ready'. - */ - -static void elmc_rnr_int(struct net_device *dev) -{ - struct priv *p = netdev_priv(dev); - - dev->stats.rx_errors++; - - WAIT_4_SCB_CMD(); /* wait for the last cmd */ - p->scb->cmd = RUC_ABORT; /* usually the RU is in the 'no resource'-state .. abort it now. */ - elmc_attn586(); - WAIT_4_SCB_CMD(); /* wait for accept cmd. */ - - alloc_rfa(dev, (char *) p->rfd_first); - startrecv586(dev); /* restart RU */ - - pr_warning("%s: Receive-Unit restarted. Status: %04x\n", dev->name, p->scb->status); - -} - -/********************************************************** - * handle xmit - interrupt - */ - -static void elmc_xmt_int(struct net_device *dev) -{ - int status; - struct priv *p = netdev_priv(dev); - - status = p->xmit_cmds[p->xmit_last]->cmd_status; - if (!(status & STAT_COMPL)) { - pr_warning("%s: strange .. xmit-int without a 'COMPLETE'\n", dev->name); - } - if (status & STAT_OK) { - dev->stats.tx_packets++; - dev->stats.collisions += (status & TCMD_MAXCOLLMASK); - } else { - dev->stats.tx_errors++; - if (status & TCMD_LATECOLL) { - pr_warning("%s: late collision detected.\n", dev->name); - dev->stats.collisions++; - } else if (status & TCMD_NOCARRIER) { - dev->stats.tx_carrier_errors++; - pr_warning("%s: no carrier detected.\n", dev->name); - } else if (status & TCMD_LOSTCTS) { - pr_warning("%s: loss of CTS detected.\n", dev->name); - } else if (status & TCMD_UNDERRUN) { - dev->stats.tx_fifo_errors++; - pr_warning("%s: DMA underrun detected.\n", dev->name); - } else if (status & TCMD_MAXCOLL) { - pr_warning("%s: Max. collisions exceeded.\n", dev->name); - dev->stats.collisions += 16; - } - } - -#if (NUM_XMIT_BUFFS != 1) - if ((++p->xmit_last) == NUM_XMIT_BUFFS) { - p->xmit_last = 0; - } -#endif - - netif_wake_queue(dev); -} - -/*********************************************************** - * (re)start the receiver - */ - -static void startrecv586(struct net_device *dev) -{ - struct priv *p = netdev_priv(dev); - - p->scb->rfa_offset = make16(p->rfd_first); - p->scb->cmd = RUC_START; - elmc_attn586(); /* start cmd. */ - WAIT_4_SCB_CMD(); /* wait for accept cmd. (no timeout!!) */ -} - -/****************************************************** - * timeout - */ - -static void elmc_timeout(struct net_device *dev) -{ - struct priv *p = netdev_priv(dev); - /* COMMAND-UNIT active? */ - if (p->scb->status & CU_ACTIVE) { - pr_debug("%s: strange ... timeout with CU active?!?\n", dev->name); - pr_debug("%s: X0: %04x N0: %04x N1: %04x %d\n", dev->name, - (int)p->xmit_cmds[0]->cmd_status, - (int)p->nop_cmds[0]->cmd_status, - (int)p->nop_cmds[1]->cmd_status, (int)p->nop_point); - p->scb->cmd = CUC_ABORT; - elmc_attn586(); - WAIT_4_SCB_CMD(); - p->scb->cbl_offset = make16(p->nop_cmds[p->nop_point]); - p->scb->cmd = CUC_START; - elmc_attn586(); - WAIT_4_SCB_CMD(); - netif_wake_queue(dev); - } else { - pr_debug("%s: xmitter timed out, try to restart! stat: %04x\n", - dev->name, p->scb->status); - pr_debug("%s: command-stats: %04x %04x\n", dev->name, - p->xmit_cmds[0]->cmd_status, p->xmit_cmds[1]->cmd_status); - elmc_close(dev); - elmc_open(dev); - } -} - -/****************************************************** - * send frame - */ - -static netdev_tx_t elmc_send_packet(struct sk_buff *skb, struct net_device *dev) -{ - int len; - int i; -#ifndef NO_NOPCOMMANDS - int next_nop; -#endif - struct priv *p = netdev_priv(dev); - - netif_stop_queue(dev); - - len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; - - if (len != skb->len) - memset((char *) p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN); - skb_copy_from_linear_data(skb, (char *) p->xmit_cbuffs[p->xmit_count], skb->len); - -#if (NUM_XMIT_BUFFS == 1) -#ifdef NO_NOPCOMMANDS - p->xmit_buffs[0]->size = TBD_LAST | len; - for (i = 0; i < 16; i++) { - p->scb->cbl_offset = make16(p->xmit_cmds[0]); - p->scb->cmd = CUC_START; - p->xmit_cmds[0]->cmd_status = 0; - elmc_attn586(); - if (!i) { - dev_kfree_skb(skb); - } - WAIT_4_SCB_CMD(); - if ((p->scb->status & CU_ACTIVE)) { /* test it, because CU sometimes doesn't start immediately */ - break; - } - if (p->xmit_cmds[0]->cmd_status) { - break; - } - if (i == 15) { - pr_warning("%s: Can't start transmit-command.\n", dev->name); - } - } -#else - next_nop = (p->nop_point + 1) & 0x1; - p->xmit_buffs[0]->size = TBD_LAST | len; - - p->xmit_cmds[0]->cmd_link = p->nop_cmds[next_nop]->cmd_link - = make16((p->nop_cmds[next_nop])); - p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0; - - p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0])); - p->nop_point = next_nop; - dev_kfree_skb(skb); -#endif -#else - p->xmit_buffs[p->xmit_count]->size = TBD_LAST | len; - if ((next_nop = p->xmit_count + 1) == NUM_XMIT_BUFFS) { - next_nop = 0; - } - p->xmit_cmds[p->xmit_count]->cmd_status = 0; - p->xmit_cmds[p->xmit_count]->cmd_link = p->nop_cmds[next_nop]->cmd_link - = make16((p->nop_cmds[next_nop])); - p->nop_cmds[next_nop]->cmd_status = 0; - p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count])); - p->xmit_count = next_nop; - if (p->xmit_count != p->xmit_last) - netif_wake_queue(dev); - dev_kfree_skb(skb); -#endif - return NETDEV_TX_OK; -} - -/******************************************* - * Someone wanna have the statistics - */ - -static struct net_device_stats *elmc_get_stats(struct net_device *dev) -{ - struct priv *p = netdev_priv(dev); - unsigned short crc, aln, rsc, ovrn; - - crc = p->scb->crc_errs; /* get error-statistic from the ni82586 */ - p->scb->crc_errs -= crc; - aln = p->scb->aln_errs; - p->scb->aln_errs -= aln; - rsc = p->scb->rsc_errs; - p->scb->rsc_errs -= rsc; - ovrn = p->scb->ovrn_errs; - p->scb->ovrn_errs -= ovrn; - - dev->stats.rx_crc_errors += crc; - dev->stats.rx_fifo_errors += ovrn; - dev->stats.rx_frame_errors += aln; - dev->stats.rx_dropped += rsc; - - return &dev->stats; -} - -/******************************************************** - * Set MC list .. - */ - -#ifdef ELMC_MULTICAST -static void set_multicast_list(struct net_device *dev) -{ - if (!dev->start) { - /* without a running interface, promiscuous doesn't work */ - return; - } - dev->start = 0; - alloc586(dev); - init586(dev); - startrecv586(dev); - dev->start = 1; -} -#endif - -static void netdev_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - sprintf(info->bus_info, "MCA 0x%lx", dev->base_addr); -} - -static const struct ethtool_ops netdev_ethtool_ops = { - .get_drvinfo = netdev_get_drvinfo, -}; - -#ifdef MODULE - -/* Increase if needed ;) */ -#define MAX_3C523_CARDS 4 - -static struct net_device *dev_elmc[MAX_3C523_CARDS]; -static int irq[MAX_3C523_CARDS]; -static int io[MAX_3C523_CARDS]; -module_param_array(irq, int, NULL, 0); -module_param_array(io, int, NULL, 0); -MODULE_PARM_DESC(io, "EtherLink/MC I/O base address(es)"); -MODULE_PARM_DESC(irq, "EtherLink/MC IRQ number(s)"); -MODULE_LICENSE("GPL"); - -int __init init_module(void) -{ - int this_dev,found = 0; - - /* Loop until we either can't find any more cards, or we have MAX_3C523_CARDS */ - for(this_dev=0; this_devirq=irq[this_dev]; - dev->base_addr=io[this_dev]; - if (do_elmc_probe(dev) == 0) { - dev_elmc[this_dev] = dev; - found++; - continue; - } - free_netdev(dev); - if (io[this_dev]==0) - break; - pr_warning("3c523.c: No 3c523 card found at io=%#x\n",io[this_dev]); - } - - if(found==0) { - if (io[0]==0) - pr_notice("3c523.c: No 3c523 cards found\n"); - return -ENXIO; - } else return 0; -} - -void __exit cleanup_module(void) -{ - int this_dev; - for (this_dev=0; this_dev - * Heavily modified by Richard Procter - * - * Based on skeleton.c written 1993-94 by Donald Becker and ne2.c - * (for the MCA stuff) written by Wim Dumon. - * - * Thanks to 3Com for making this possible by providing me with the - * documentation. - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - */ - -#define DRV_NAME "3c527" -#define DRV_VERSION "0.7-SMP" -#define DRV_RELDATE "2003/09/21" - -static const char *version = -DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Richard Procter \n"; - -/** - * DOC: Traps for the unwary - * - * The diagram (Figure 1-1) and the POS summary disagree with the - * "Interrupt Level" section in the manual. - * - * The manual contradicts itself when describing the minimum number - * buffers in the 'configure lists' command. - * My card accepts a buffer config of 4/4. - * - * Setting the SAV BP bit does not save bad packets, but - * only enables RX on-card stats collection. - * - * The documentation in places seems to miss things. In actual fact - * I've always eventually found everything is documented, it just - * requires careful study. - * - * DOC: Theory Of Operation - * - * The 3com 3c527 is a 32bit MCA bus mastering adapter with a large - * amount of on board intelligence that housekeeps a somewhat dumber - * Intel NIC. For performance we want to keep the transmit queue deep - * as the card can transmit packets while fetching others from main - * memory by bus master DMA. Transmission and reception are driven by - * circular buffer queues. - * - * The mailboxes can be used for controlling how the card traverses - * its buffer rings, but are used only for initial setup in this - * implementation. The exec mailbox allows a variety of commands to - * be executed. Each command must complete before the next is - * executed. Primarily we use the exec mailbox for controlling the - * multicast lists. We have to do a certain amount of interesting - * hoop jumping as the multicast list changes can occur in interrupt - * state when the card has an exec command pending. We defer such - * events until the command completion interrupt. - * - * A copy break scheme (taken from 3c59x.c) is employed whereby - * received frames exceeding a configurable length are passed - * directly to the higher networking layers without incuring a copy, - * in what amounts to a time/space trade-off. - * - * The card also keeps a large amount of statistical information - * on-board. In a perfect world, these could be used safely at no - * cost. However, lacking information to the contrary, processing - * them without races would involve so much extra complexity as to - * make it unworthwhile to do so. In the end, a hybrid SW/HW - * implementation was made necessary --- see mc32_update_stats(). - * - * DOC: Notes - * - * It should be possible to use two or more cards, but at this stage - * only by loading two copies of the same module. - * - * The on-board 82586 NIC has trouble receiving multiple - * back-to-back frames and so is likely to drop packets from fast - * senders. -**/ - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "3c527.h" - -MODULE_LICENSE("GPL"); - -/* - * The name of the card. Is used for messages and in the requests for - * io regions, irqs and dma channels - */ -static const char* cardname = DRV_NAME; - -/* use 0 for production, 1 for verification, >2 for debug */ -#ifndef NET_DEBUG -#define NET_DEBUG 2 -#endif - -static unsigned int mc32_debug = NET_DEBUG; - -/* The number of low I/O ports used by the ethercard. */ -#define MC32_IO_EXTENT 8 - -/* As implemented, values must be a power-of-2 -- 4/8/16/32 */ -#define TX_RING_LEN 32 /* Typically the card supports 37 */ -#define RX_RING_LEN 8 /* " " " */ - -/* Copy break point, see above for details. - * Setting to > 1512 effectively disables this feature. */ -#define RX_COPYBREAK 200 /* Value from 3c59x.c */ - -/* Issue the 82586 workaround command - this is for "busy lans", but - * basically means for all lans now days - has a performance (latency) - * cost, but best set. */ -static const int WORKAROUND_82586=1; - -/* Pointers to buffers and their on-card records */ -struct mc32_ring_desc -{ - volatile struct skb_header *p; - struct sk_buff *skb; -}; - -/* Information that needs to be kept for each board. */ -struct mc32_local -{ - int slot; - - u32 base; - volatile struct mc32_mailbox *rx_box; - volatile struct mc32_mailbox *tx_box; - volatile struct mc32_mailbox *exec_box; - volatile struct mc32_stats *stats; /* Start of on-card statistics */ - u16 tx_chain; /* Transmit list start offset */ - u16 rx_chain; /* Receive list start offset */ - u16 tx_len; /* Transmit list count */ - u16 rx_len; /* Receive list count */ - - u16 xceiver_desired_state; /* HALTED or RUNNING */ - u16 cmd_nonblocking; /* Thread is uninterested in command result */ - u16 mc_reload_wait; /* A multicast load request is pending */ - u32 mc_list_valid; /* True when the mclist is set */ - - struct mc32_ring_desc tx_ring[TX_RING_LEN]; /* Host Transmit ring */ - struct mc32_ring_desc rx_ring[RX_RING_LEN]; /* Host Receive ring */ - - atomic_t tx_count; /* buffers left */ - atomic_t tx_ring_head; /* index to tx en-queue end */ - u16 tx_ring_tail; /* index to tx de-queue end */ - - u16 rx_ring_tail; /* index to rx de-queue end */ - - struct semaphore cmd_mutex; /* Serialises issuing of execute commands */ - struct completion execution_cmd; /* Card has completed an execute command */ - struct completion xceiver_cmd; /* Card has completed a tx or rx command */ -}; - -/* The station (ethernet) address prefix, used for a sanity check. */ -#define SA_ADDR0 0x02 -#define SA_ADDR1 0x60 -#define SA_ADDR2 0xAC - -struct mca_adapters_t { - unsigned int id; - char *name; -}; - -static const struct mca_adapters_t mc32_adapters[] = { - { 0x0041, "3COM EtherLink MC/32" }, - { 0x8EF5, "IBM High Performance Lan Adapter" }, - { 0x0000, NULL } -}; - - -/* Macros for ring index manipulations */ -static inline u16 next_rx(u16 rx) { return (rx+1)&(RX_RING_LEN-1); }; -static inline u16 prev_rx(u16 rx) { return (rx-1)&(RX_RING_LEN-1); }; - -static inline u16 next_tx(u16 tx) { return (tx+1)&(TX_RING_LEN-1); }; - - -/* Index to functions, as function prototypes. */ -static int mc32_probe1(struct net_device *dev, int ioaddr); -static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len); -static int mc32_open(struct net_device *dev); -static void mc32_timeout(struct net_device *dev); -static netdev_tx_t mc32_send_packet(struct sk_buff *skb, - struct net_device *dev); -static irqreturn_t mc32_interrupt(int irq, void *dev_id); -static int mc32_close(struct net_device *dev); -static struct net_device_stats *mc32_get_stats(struct net_device *dev); -static void mc32_set_multicast_list(struct net_device *dev); -static void mc32_reset_multicast_list(struct net_device *dev); -static const struct ethtool_ops netdev_ethtool_ops; - -static void cleanup_card(struct net_device *dev) -{ - struct mc32_local *lp = netdev_priv(dev); - unsigned slot = lp->slot; - mca_mark_as_unused(slot); - mca_set_adapter_name(slot, NULL); - free_irq(dev->irq, dev); - release_region(dev->base_addr, MC32_IO_EXTENT); -} - -/** - * mc32_probe - Search for supported boards - * @unit: interface number to use - * - * Because MCA bus is a real bus and we can scan for cards we could do a - * single scan for all boards here. Right now we use the passed in device - * structure and scan for only one board. This needs fixing for modules - * in particular. - */ - -struct net_device *__init mc32_probe(int unit) -{ - struct net_device *dev = alloc_etherdev(sizeof(struct mc32_local)); - static int current_mca_slot = -1; - int i; - int err; - - if (!dev) - return ERR_PTR(-ENOMEM); - - if (unit >= 0) - sprintf(dev->name, "eth%d", unit); - - /* Do not check any supplied i/o locations. - POS registers usually don't fail :) */ - - /* MCA cards have POS registers. - Autodetecting MCA cards is extremely simple. - Just search for the card. */ - - for(i = 0; (mc32_adapters[i].name != NULL); i++) { - current_mca_slot = - mca_find_unused_adapter(mc32_adapters[i].id, 0); - - if(current_mca_slot != MCA_NOTFOUND) { - if(!mc32_probe1(dev, current_mca_slot)) - { - mca_set_adapter_name(current_mca_slot, - mc32_adapters[i].name); - mca_mark_as_used(current_mca_slot); - err = register_netdev(dev); - if (err) { - cleanup_card(dev); - free_netdev(dev); - dev = ERR_PTR(err); - } - return dev; - } - - } - } - free_netdev(dev); - return ERR_PTR(-ENODEV); -} - -static const struct net_device_ops netdev_ops = { - .ndo_open = mc32_open, - .ndo_stop = mc32_close, - .ndo_start_xmit = mc32_send_packet, - .ndo_get_stats = mc32_get_stats, - .ndo_set_multicast_list = mc32_set_multicast_list, - .ndo_tx_timeout = mc32_timeout, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -/** - * mc32_probe1 - Check a given slot for a board and test the card - * @dev: Device structure to fill in - * @slot: The MCA bus slot being used by this card - * - * Decode the slot data and configure the card structures. Having done this we - * can reset the card and configure it. The card does a full self test cycle - * in firmware so we have to wait for it to return and post us either a - * failure case or some addresses we use to find the board internals. - */ - -static int __init mc32_probe1(struct net_device *dev, int slot) -{ - static unsigned version_printed; - int i, err; - u8 POS; - u32 base; - struct mc32_local *lp = netdev_priv(dev); - static const u16 mca_io_bases[] = { - 0x7280,0x7290, - 0x7680,0x7690, - 0x7A80,0x7A90, - 0x7E80,0x7E90 - }; - static const u32 mca_mem_bases[] = { - 0x00C0000, - 0x00C4000, - 0x00C8000, - 0x00CC000, - 0x00D0000, - 0x00D4000, - 0x00D8000, - 0x00DC000 - }; - static const char * const failures[] = { - "Processor instruction", - "Processor data bus", - "Processor data bus", - "Processor data bus", - "Adapter bus", - "ROM checksum", - "Base RAM", - "Extended RAM", - "82586 internal loopback", - "82586 initialisation failure", - "Adapter list configuration error" - }; - - /* Time to play MCA games */ - - if (mc32_debug && version_printed++ == 0) - pr_debug("%s", version); - - pr_info("%s: %s found in slot %d: ", dev->name, cardname, slot); - - POS = mca_read_stored_pos(slot, 2); - - if(!(POS&1)) - { - pr_cont("disabled.\n"); - return -ENODEV; - } - - /* Fill in the 'dev' fields. */ - dev->base_addr = mca_io_bases[(POS>>1)&7]; - dev->mem_start = mca_mem_bases[(POS>>4)&7]; - - POS = mca_read_stored_pos(slot, 4); - if(!(POS&1)) - { - pr_cont("memory window disabled.\n"); - return -ENODEV; - } - - POS = mca_read_stored_pos(slot, 5); - - i=(POS>>4)&3; - if(i==3) - { - pr_cont("invalid memory window.\n"); - return -ENODEV; - } - - i*=16384; - i+=16384; - - dev->mem_end=dev->mem_start + i; - - dev->irq = ((POS>>2)&3)+9; - - if(!request_region(dev->base_addr, MC32_IO_EXTENT, cardname)) - { - pr_cont("io 0x%3lX, which is busy.\n", dev->base_addr); - return -EBUSY; - } - - pr_cont("io 0x%3lX irq %d mem 0x%lX (%dK)\n", - dev->base_addr, dev->irq, dev->mem_start, i/1024); - - - /* We ought to set the cache line size here.. */ - - - /* - * Go PROM browsing - */ - - /* Retrieve and print the ethernet address. */ - for (i = 0; i < 6; i++) - { - mca_write_pos(slot, 6, i+12); - mca_write_pos(slot, 7, 0); - - dev->dev_addr[i] = mca_read_pos(slot,3); - } - - pr_info("%s: Address %pM ", dev->name, dev->dev_addr); - - mca_write_pos(slot, 6, 0); - mca_write_pos(slot, 7, 0); - - POS = mca_read_stored_pos(slot, 4); - - if(POS&2) - pr_cont(": BNC port selected.\n"); - else - pr_cont(": AUI port selected.\n"); - - POS=inb(dev->base_addr+HOST_CTRL); - POS|=HOST_CTRL_ATTN|HOST_CTRL_RESET; - POS&=~HOST_CTRL_INTE; - outb(POS, dev->base_addr+HOST_CTRL); - /* Reset adapter */ - udelay(100); - /* Reset off */ - POS&=~(HOST_CTRL_ATTN|HOST_CTRL_RESET); - outb(POS, dev->base_addr+HOST_CTRL); - - udelay(300); - - /* - * Grab the IRQ - */ - - err = request_irq(dev->irq, mc32_interrupt, IRQF_SHARED, DRV_NAME, dev); - if (err) { - release_region(dev->base_addr, MC32_IO_EXTENT); - pr_err("%s: unable to get IRQ %d.\n", DRV_NAME, dev->irq); - goto err_exit_ports; - } - - memset(lp, 0, sizeof(struct mc32_local)); - lp->slot = slot; - - i=0; - - base = inb(dev->base_addr); - - while(base == 0xFF) - { - i++; - if(i == 1000) - { - pr_err("%s: failed to boot adapter.\n", dev->name); - err = -ENODEV; - goto err_exit_irq; - } - udelay(1000); - if(inb(dev->base_addr+2)&(1<<5)) - base = inb(dev->base_addr); - } - - if(base>0) - { - if(base < 0x0C) - pr_err("%s: %s%s.\n", dev->name, failures[base-1], - base<0x0A?" test failure":""); - else - pr_err("%s: unknown failure %d.\n", dev->name, base); - err = -ENODEV; - goto err_exit_irq; - } - - base=0; - for(i=0;i<4;i++) - { - int n=0; - - while(!(inb(dev->base_addr+2)&(1<<5))) - { - n++; - udelay(50); - if(n>100) - { - pr_err("%s: mailbox read fail (%d).\n", dev->name, i); - err = -ENODEV; - goto err_exit_irq; - } - } - - base|=(inb(dev->base_addr)<<(8*i)); - } - - lp->exec_box=isa_bus_to_virt(dev->mem_start+base); - - base=lp->exec_box->data[1]<<16|lp->exec_box->data[0]; - - lp->base = dev->mem_start+base; - - lp->rx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[2]); - lp->tx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[3]); - - lp->stats = isa_bus_to_virt(lp->base + lp->exec_box->data[5]); - - /* - * Descriptor chains (card relative) - */ - - lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */ - lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */ - lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */ - lp->rx_len = lp->exec_box->data[11]; /* Receive list count */ - - sema_init(&lp->cmd_mutex, 0); - init_completion(&lp->execution_cmd); - init_completion(&lp->xceiver_cmd); - - pr_info("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n", - dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base); - - dev->netdev_ops = &netdev_ops; - dev->watchdog_timeo = HZ*5; /* Board does all the work */ - dev->ethtool_ops = &netdev_ethtool_ops; - - return 0; - -err_exit_irq: - free_irq(dev->irq, dev); -err_exit_ports: - release_region(dev->base_addr, MC32_IO_EXTENT); - return err; -} - - -/** - * mc32_ready_poll - wait until we can feed it a command - * @dev: The device to wait for - * - * Wait until the card becomes ready to accept a command via the - * command register. This tells us nothing about the completion - * status of any pending commands and takes very little time at all. - */ - -static inline void mc32_ready_poll(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - while(!(inb(ioaddr+HOST_STATUS)&HOST_STATUS_CRR)); -} - - -/** - * mc32_command_nowait - send a command non blocking - * @dev: The 3c527 to issue the command to - * @cmd: The command word to write to the mailbox - * @data: A data block if the command expects one - * @len: Length of the data block - * - * Send a command from interrupt state. If there is a command - * currently being executed then we return an error of -1. It - * simply isn't viable to wait around as commands may be - * slow. This can theoretically be starved on SMP, but it's hard - * to see a realistic situation. We do not wait for the command - * to complete --- we rely on the interrupt handler to tidy up - * after us. - */ - -static int mc32_command_nowait(struct net_device *dev, u16 cmd, void *data, int len) -{ - struct mc32_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - int ret = -1; - - if (down_trylock(&lp->cmd_mutex) == 0) - { - lp->cmd_nonblocking=1; - lp->exec_box->mbox=0; - lp->exec_box->mbox=cmd; - memcpy((void *)lp->exec_box->data, data, len); - barrier(); /* the memcpy forgot the volatile so be sure */ - - /* Send the command */ - mc32_ready_poll(dev); - outb(1<<6, ioaddr+HOST_CMD); - - ret = 0; - - /* Interrupt handler will signal mutex on completion */ - } - - return ret; -} - - -/** - * mc32_command - send a command and sleep until completion - * @dev: The 3c527 card to issue the command to - * @cmd: The command word to write to the mailbox - * @data: A data block if the command expects one - * @len: Length of the data block - * - * Sends exec commands in a user context. This permits us to wait around - * for the replies and also to wait for the command buffer to complete - * from a previous command before we execute our command. After our - * command completes we will attempt any pending multicast reload - * we blocked off by hogging the exec buffer. - * - * You feed the card a command, you wait, it interrupts you get a - * reply. All well and good. The complication arises because you use - * commands for filter list changes which come in at bh level from things - * like IPV6 group stuff. - */ - -static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len) -{ - struct mc32_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - int ret = 0; - - down(&lp->cmd_mutex); - - /* - * My Turn - */ - - lp->cmd_nonblocking=0; - lp->exec_box->mbox=0; - lp->exec_box->mbox=cmd; - memcpy((void *)lp->exec_box->data, data, len); - barrier(); /* the memcpy forgot the volatile so be sure */ - - mc32_ready_poll(dev); - outb(1<<6, ioaddr+HOST_CMD); - - wait_for_completion(&lp->execution_cmd); - - if(lp->exec_box->mbox&(1<<13)) - ret = -1; - - up(&lp->cmd_mutex); - - /* - * A multicast set got blocked - try it now - */ - - if(lp->mc_reload_wait) - { - mc32_reset_multicast_list(dev); - } - - return ret; -} - - -/** - * mc32_start_transceiver - tell board to restart tx/rx - * @dev: The 3c527 card to issue the command to - * - * This may be called from the interrupt state, where it is used - * to restart the rx ring if the card runs out of rx buffers. - * - * We must first check if it's ok to (re)start the transceiver. See - * mc32_close for details. - */ - -static void mc32_start_transceiver(struct net_device *dev) { - - struct mc32_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - - /* Ignore RX overflow on device closure */ - if (lp->xceiver_desired_state==HALTED) - return; - - /* Give the card the offset to the post-EOL-bit RX descriptor */ - mc32_ready_poll(dev); - lp->rx_box->mbox=0; - lp->rx_box->data[0]=lp->rx_ring[prev_rx(lp->rx_ring_tail)].p->next; - outb(HOST_CMD_START_RX, ioaddr+HOST_CMD); - - mc32_ready_poll(dev); - lp->tx_box->mbox=0; - outb(HOST_CMD_RESTRT_TX, ioaddr+HOST_CMD); /* card ignores this on RX restart */ - - /* We are not interrupted on start completion */ -} - - -/** - * mc32_halt_transceiver - tell board to stop tx/rx - * @dev: The 3c527 card to issue the command to - * - * We issue the commands to halt the card's transceiver. In fact, - * after some experimenting we now simply tell the card to - * suspend. When issuing aborts occasionally odd things happened. - * - * We then sleep until the card has notified us that both rx and - * tx have been suspended. - */ - -static void mc32_halt_transceiver(struct net_device *dev) -{ - struct mc32_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - - mc32_ready_poll(dev); - lp->rx_box->mbox=0; - outb(HOST_CMD_SUSPND_RX, ioaddr+HOST_CMD); - wait_for_completion(&lp->xceiver_cmd); - - mc32_ready_poll(dev); - lp->tx_box->mbox=0; - outb(HOST_CMD_SUSPND_TX, ioaddr+HOST_CMD); - wait_for_completion(&lp->xceiver_cmd); -} - - -/** - * mc32_load_rx_ring - load the ring of receive buffers - * @dev: 3c527 to build the ring for - * - * This initialises the on-card and driver datastructures to - * the point where mc32_start_transceiver() can be called. - * - * The card sets up the receive ring for us. We are required to use the - * ring it provides, although the size of the ring is configurable. - * - * We allocate an sk_buff for each ring entry in turn and - * initialise its house-keeping info. At the same time, we read - * each 'next' pointer in our rx_ring array. This reduces slow - * shared-memory reads and makes it easy to access predecessor - * descriptors. - * - * We then set the end-of-list bit for the last entry so that the - * card will know when it has run out of buffers. - */ - -static int mc32_load_rx_ring(struct net_device *dev) -{ - struct mc32_local *lp = netdev_priv(dev); - int i; - u16 rx_base; - volatile struct skb_header *p; - - rx_base=lp->rx_chain; - - for(i=0; irx_ring[i].skb=alloc_skb(1532, GFP_KERNEL); - if (lp->rx_ring[i].skb==NULL) { - for (;i>=0;i--) - kfree_skb(lp->rx_ring[i].skb); - return -ENOBUFS; - } - skb_reserve(lp->rx_ring[i].skb, 18); - - p=isa_bus_to_virt(lp->base+rx_base); - - p->control=0; - p->data=isa_virt_to_bus(lp->rx_ring[i].skb->data); - p->status=0; - p->length=1532; - - lp->rx_ring[i].p=p; - rx_base=p->next; - } - - lp->rx_ring[i-1].p->control |= CONTROL_EOL; - - lp->rx_ring_tail=0; - - return 0; -} - - -/** - * mc32_flush_rx_ring - free the ring of receive buffers - * @lp: Local data of 3c527 to flush the rx ring of - * - * Free the buffer for each ring slot. This may be called - * before mc32_load_rx_ring(), eg. on error in mc32_open(). - * Requires rx skb pointers to point to a valid skb, or NULL. - */ - -static void mc32_flush_rx_ring(struct net_device *dev) -{ - struct mc32_local *lp = netdev_priv(dev); - int i; - - for(i=0; i < RX_RING_LEN; i++) - { - if (lp->rx_ring[i].skb) { - dev_kfree_skb(lp->rx_ring[i].skb); - lp->rx_ring[i].skb = NULL; - } - lp->rx_ring[i].p=NULL; - } -} - - -/** - * mc32_load_tx_ring - load transmit ring - * @dev: The 3c527 card to issue the command to - * - * This sets up the host transmit data-structures. - * - * First, we obtain from the card it's current position in the tx - * ring, so that we will know where to begin transmitting - * packets. - * - * Then, we read the 'next' pointers from the on-card tx ring into - * our tx_ring array to reduce slow shared-mem reads. Finally, we - * intitalise the tx house keeping variables. - * - */ - -static void mc32_load_tx_ring(struct net_device *dev) -{ - struct mc32_local *lp = netdev_priv(dev); - volatile struct skb_header *p; - int i; - u16 tx_base; - - tx_base=lp->tx_box->data[0]; - - for(i=0 ; ibase+tx_base); - lp->tx_ring[i].p=p; - lp->tx_ring[i].skb=NULL; - - tx_base=p->next; - } - - /* -1 so that tx_ring_head cannot "lap" tx_ring_tail */ - /* see mc32_tx_ring */ - - atomic_set(&lp->tx_count, TX_RING_LEN-1); - atomic_set(&lp->tx_ring_head, 0); - lp->tx_ring_tail=0; -} - - -/** - * mc32_flush_tx_ring - free transmit ring - * @lp: Local data of 3c527 to flush the tx ring of - * - * If the ring is non-empty, zip over the it, freeing any - * allocated skb_buffs. The tx ring house-keeping variables are - * then reset. Requires rx skb pointers to point to a valid skb, - * or NULL. - */ - -static void mc32_flush_tx_ring(struct net_device *dev) -{ - struct mc32_local *lp = netdev_priv(dev); - int i; - - for (i=0; i < TX_RING_LEN; i++) - { - if (lp->tx_ring[i].skb) - { - dev_kfree_skb(lp->tx_ring[i].skb); - lp->tx_ring[i].skb = NULL; - } - } - - atomic_set(&lp->tx_count, 0); - atomic_set(&lp->tx_ring_head, 0); - lp->tx_ring_tail=0; -} - - -/** - * mc32_open - handle 'up' of card - * @dev: device to open - * - * The user is trying to bring the card into ready state. This requires - * a brief dialogue with the card. Firstly we enable interrupts and then - * 'indications'. Without these enabled the card doesn't bother telling - * us what it has done. This had me puzzled for a week. - * - * We configure the number of card descriptors, then load the network - * address and multicast filters. Turn on the workaround mode. This - * works around a bug in the 82586 - it asks the firmware to do - * so. It has a performance (latency) hit but is needed on busy - * [read most] lans. We load the ring with buffers then we kick it - * all off. - */ - -static int mc32_open(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - struct mc32_local *lp = netdev_priv(dev); - u8 one=1; - u8 regs; - u16 descnumbuffs[2] = {TX_RING_LEN, RX_RING_LEN}; - - /* - * Interrupts enabled - */ - - regs=inb(ioaddr+HOST_CTRL); - regs|=HOST_CTRL_INTE; - outb(regs, ioaddr+HOST_CTRL); - - /* - * Allow ourselves to issue commands - */ - - up(&lp->cmd_mutex); - - - /* - * Send the indications on command - */ - - mc32_command(dev, 4, &one, 2); - - /* - * Poke it to make sure it's really dead. - */ - - mc32_halt_transceiver(dev); - mc32_flush_tx_ring(dev); - - /* - * Ask card to set up on-card descriptors to our spec - */ - - if(mc32_command(dev, 8, descnumbuffs, 4)) { - pr_info("%s: %s rejected our buffer configuration!\n", - dev->name, cardname); - mc32_close(dev); - return -ENOBUFS; - } - - /* Report new configuration */ - mc32_command(dev, 6, NULL, 0); - - lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */ - lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */ - lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */ - lp->rx_len = lp->exec_box->data[11]; /* Receive list count */ - - /* Set Network Address */ - mc32_command(dev, 1, dev->dev_addr, 6); - - /* Set the filters */ - mc32_set_multicast_list(dev); - - if (WORKAROUND_82586) { - u16 zero_word=0; - mc32_command(dev, 0x0D, &zero_word, 2); /* 82586 bug workaround on */ - } - - mc32_load_tx_ring(dev); - - if(mc32_load_rx_ring(dev)) - { - mc32_close(dev); - return -ENOBUFS; - } - - lp->xceiver_desired_state = RUNNING; - - /* And finally, set the ball rolling... */ - mc32_start_transceiver(dev); - - netif_start_queue(dev); - - return 0; -} - - -/** - * mc32_timeout - handle a timeout from the network layer - * @dev: 3c527 that timed out - * - * Handle a timeout on transmit from the 3c527. This normally means - * bad things as the hardware handles cable timeouts and mess for - * us. - * - */ - -static void mc32_timeout(struct net_device *dev) -{ - pr_warning("%s: transmit timed out?\n", dev->name); - /* Try to restart the adaptor. */ - netif_wake_queue(dev); -} - - -/** - * mc32_send_packet - queue a frame for transmit - * @skb: buffer to transmit - * @dev: 3c527 to send it out of - * - * Transmit a buffer. This normally means throwing the buffer onto - * the transmit queue as the queue is quite large. If the queue is - * full then we set tx_busy and return. Once the interrupt handler - * gets messages telling it to reclaim transmit queue entries, we will - * clear tx_busy and the kernel will start calling this again. - * - * We do not disable interrupts or acquire any locks; this can - * run concurrently with mc32_tx_ring(), and the function itself - * is serialised at a higher layer. However, similarly for the - * card itself, we must ensure that we update tx_ring_head only - * after we've established a valid packet on the tx ring (and - * before we let the card "see" it, to prevent it racing with the - * irq handler). - * - */ - -static netdev_tx_t mc32_send_packet(struct sk_buff *skb, - struct net_device *dev) -{ - struct mc32_local *lp = netdev_priv(dev); - u32 head = atomic_read(&lp->tx_ring_head); - - volatile struct skb_header *p, *np; - - netif_stop_queue(dev); - - if(atomic_read(&lp->tx_count)==0) { - return NETDEV_TX_BUSY; - } - - if (skb_padto(skb, ETH_ZLEN)) { - netif_wake_queue(dev); - return NETDEV_TX_OK; - } - - atomic_dec(&lp->tx_count); - - /* P is the last sending/sent buffer as a pointer */ - p=lp->tx_ring[head].p; - - head = next_tx(head); - - /* NP is the buffer we will be loading */ - np=lp->tx_ring[head].p; - - /* We will need this to flush the buffer out */ - lp->tx_ring[head].skb=skb; - - np->length = unlikely(skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len; - np->data = isa_virt_to_bus(skb->data); - np->status = 0; - np->control = CONTROL_EOP | CONTROL_EOL; - wmb(); - - /* - * The new frame has been setup; we can now - * let the interrupt handler and card "see" it - */ - - atomic_set(&lp->tx_ring_head, head); - p->control &= ~CONTROL_EOL; - - netif_wake_queue(dev); - return NETDEV_TX_OK; -} - - -/** - * mc32_update_stats - pull off the on board statistics - * @dev: 3c527 to service - * - * - * Query and reset the on-card stats. There's the small possibility - * of a race here, which would result in an underestimation of - * actual errors. As such, we'd prefer to keep all our stats - * collection in software. As a rule, we do. However it can't be - * used for rx errors and collisions as, by default, the card discards - * bad rx packets. - * - * Setting the SAV BP in the rx filter command supposedly - * stops this behaviour. However, testing shows that it only seems to - * enable the collation of on-card rx statistics --- the driver - * never sees an RX descriptor with an error status set. - * - */ - -static void mc32_update_stats(struct net_device *dev) -{ - struct mc32_local *lp = netdev_priv(dev); - volatile struct mc32_stats *st = lp->stats; - - u32 rx_errors=0; - - rx_errors+=dev->stats.rx_crc_errors +=st->rx_crc_errors; - st->rx_crc_errors=0; - rx_errors+=dev->stats.rx_fifo_errors +=st->rx_overrun_errors; - st->rx_overrun_errors=0; - rx_errors+=dev->stats.rx_frame_errors +=st->rx_alignment_errors; - st->rx_alignment_errors=0; - rx_errors+=dev->stats.rx_length_errors+=st->rx_tooshort_errors; - st->rx_tooshort_errors=0; - rx_errors+=dev->stats.rx_missed_errors+=st->rx_outofresource_errors; - st->rx_outofresource_errors=0; - dev->stats.rx_errors=rx_errors; - - /* Number of packets which saw one collision */ - dev->stats.collisions+=st->dataC[10]; - st->dataC[10]=0; - - /* Number of packets which saw 2--15 collisions */ - dev->stats.collisions+=st->dataC[11]; - st->dataC[11]=0; -} - - -/** - * mc32_rx_ring - process the receive ring - * @dev: 3c527 that needs its receive ring processing - * - * - * We have received one or more indications from the card that a - * receive has completed. The buffer ring thus contains dirty - * entries. We walk the ring by iterating over the circular rx_ring - * array, starting at the next dirty buffer (which happens to be the - * one we finished up at last time around). - * - * For each completed packet, we will either copy it and pass it up - * the stack or, if the packet is near MTU sized, we allocate - * another buffer and flip the old one up the stack. - * - * We must succeed in keeping a buffer on the ring. If necessary we - * will toss a received packet rather than lose a ring entry. Once - * the first uncompleted descriptor is found, we move the - * End-Of-List bit to include the buffers just processed. - * - */ - -static void mc32_rx_ring(struct net_device *dev) -{ - struct mc32_local *lp = netdev_priv(dev); - volatile struct skb_header *p; - u16 rx_ring_tail; - u16 rx_old_tail; - int x=0; - - rx_old_tail = rx_ring_tail = lp->rx_ring_tail; - - do - { - p=lp->rx_ring[rx_ring_tail].p; - - if(!(p->status & (1<<7))) { /* Not COMPLETED */ - break; - } - if(p->status & (1<<6)) /* COMPLETED_OK */ - { - - u16 length=p->length; - struct sk_buff *skb; - struct sk_buff *newskb; - - /* Try to save time by avoiding a copy on big frames */ - - if ((length > RX_COPYBREAK) && - ((newskb=dev_alloc_skb(1532)) != NULL)) - { - skb=lp->rx_ring[rx_ring_tail].skb; - skb_put(skb, length); - - skb_reserve(newskb,18); - lp->rx_ring[rx_ring_tail].skb=newskb; - p->data=isa_virt_to_bus(newskb->data); - } - else - { - skb=dev_alloc_skb(length+2); - - if(skb==NULL) { - dev->stats.rx_dropped++; - goto dropped; - } - - skb_reserve(skb,2); - memcpy(skb_put(skb, length), - lp->rx_ring[rx_ring_tail].skb->data, length); - } - - skb->protocol=eth_type_trans(skb,dev); - dev->stats.rx_packets++; - dev->stats.rx_bytes += length; - netif_rx(skb); - } - - dropped: - p->length = 1532; - p->status = 0; - - rx_ring_tail=next_rx(rx_ring_tail); - } - while(x++<48); - - /* If there was actually a frame to be processed, place the EOL bit */ - /* at the descriptor prior to the one to be filled next */ - - if (rx_ring_tail != rx_old_tail) - { - lp->rx_ring[prev_rx(rx_ring_tail)].p->control |= CONTROL_EOL; - lp->rx_ring[prev_rx(rx_old_tail)].p->control &= ~CONTROL_EOL; - - lp->rx_ring_tail=rx_ring_tail; - } -} - - -/** - * mc32_tx_ring - process completed transmits - * @dev: 3c527 that needs its transmit ring processing - * - * - * This operates in a similar fashion to mc32_rx_ring. We iterate - * over the transmit ring. For each descriptor which has been - * processed by the card, we free its associated buffer and note - * any errors. This continues until the transmit ring is emptied - * or we reach a descriptor that hasn't yet been processed by the - * card. - * - */ - -static void mc32_tx_ring(struct net_device *dev) -{ - struct mc32_local *lp = netdev_priv(dev); - volatile struct skb_header *np; - - /* - * We rely on head==tail to mean 'queue empty'. - * This is why lp->tx_count=TX_RING_LEN-1: in order to prevent - * tx_ring_head wrapping to tail and confusing a 'queue empty' - * condition with 'queue full' - */ - - while (lp->tx_ring_tail != atomic_read(&lp->tx_ring_head)) - { - u16 t; - - t=next_tx(lp->tx_ring_tail); - np=lp->tx_ring[t].p; - - if(!(np->status & (1<<7))) - { - /* Not COMPLETED */ - break; - } - dev->stats.tx_packets++; - if(!(np->status & (1<<6))) /* Not COMPLETED_OK */ - { - dev->stats.tx_errors++; - - switch(np->status&0x0F) - { - case 1: - dev->stats.tx_aborted_errors++; - break; /* Max collisions */ - case 2: - dev->stats.tx_fifo_errors++; - break; - case 3: - dev->stats.tx_carrier_errors++; - break; - case 4: - dev->stats.tx_window_errors++; - break; /* CTS Lost */ - case 5: - dev->stats.tx_aborted_errors++; - break; /* Transmit timeout */ - } - } - /* Packets are sent in order - this is - basically a FIFO queue of buffers matching - the card ring */ - dev->stats.tx_bytes+=lp->tx_ring[t].skb->len; - dev_kfree_skb_irq(lp->tx_ring[t].skb); - lp->tx_ring[t].skb=NULL; - atomic_inc(&lp->tx_count); - netif_wake_queue(dev); - - lp->tx_ring_tail=t; - } - -} - - -/** - * mc32_interrupt - handle an interrupt from a 3c527 - * @irq: Interrupt number - * @dev_id: 3c527 that requires servicing - * @regs: Registers (unused) - * - * - * An interrupt is raised whenever the 3c527 writes to the command - * register. This register contains the message it wishes to send us - * packed into a single byte field. We keep reading status entries - * until we have processed all the control items, but simply count - * transmit and receive reports. When all reports are in we empty the - * transceiver rings as appropriate. This saves the overhead of - * multiple command requests. - * - * Because MCA is level-triggered, we shouldn't miss indications. - * Therefore, we needn't ask the card to suspend interrupts within - * this handler. The card receives an implicit acknowledgment of the - * current interrupt when we read the command register. - * - */ - -static irqreturn_t mc32_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct mc32_local *lp; - int ioaddr, status, boguscount = 0; - int rx_event = 0; - int tx_event = 0; - - ioaddr = dev->base_addr; - lp = netdev_priv(dev); - - /* See whats cooking */ - - while((inb(ioaddr+HOST_STATUS)&HOST_STATUS_CWR) && boguscount++<2000) - { - status=inb(ioaddr+HOST_CMD); - - pr_debug("Status TX%d RX%d EX%d OV%d BC%d\n", - (status&7), (status>>3)&7, (status>>6)&1, - (status>>7)&1, boguscount); - - switch(status&7) - { - case 0: - break; - case 6: /* TX fail */ - case 2: /* TX ok */ - tx_event = 1; - break; - case 3: /* Halt */ - case 4: /* Abort */ - complete(&lp->xceiver_cmd); - break; - default: - pr_notice("%s: strange tx ack %d\n", dev->name, status&7); - } - status>>=3; - switch(status&7) - { - case 0: - break; - case 2: /* RX */ - rx_event=1; - break; - case 3: /* Halt */ - case 4: /* Abort */ - complete(&lp->xceiver_cmd); - break; - case 6: - /* Out of RX buffers stat */ - /* Must restart rx */ - dev->stats.rx_dropped++; - mc32_rx_ring(dev); - mc32_start_transceiver(dev); - break; - default: - pr_notice("%s: strange rx ack %d\n", - dev->name, status&7); - } - status>>=3; - if(status&1) - { - /* - * No thread is waiting: we need to tidy - * up ourself. - */ - - if (lp->cmd_nonblocking) { - up(&lp->cmd_mutex); - if (lp->mc_reload_wait) - mc32_reset_multicast_list(dev); - } - else complete(&lp->execution_cmd); - } - if(status&2) - { - /* - * We get interrupted once per - * counter that is about to overflow. - */ - - mc32_update_stats(dev); - } - } - - - /* - * Process the transmit and receive rings - */ - - if(tx_event) - mc32_tx_ring(dev); - - if(rx_event) - mc32_rx_ring(dev); - - return IRQ_HANDLED; -} - - -/** - * mc32_close - user configuring the 3c527 down - * @dev: 3c527 card to shut down - * - * The 3c527 is a bus mastering device. We must be careful how we - * shut it down. It may also be running shared interrupt so we have - * to be sure to silence it properly - * - * We indicate that the card is closing to the rest of the - * driver. Otherwise, it is possible that the card may run out - * of receive buffers and restart the transceiver while we're - * trying to close it. - * - * We abort any receive and transmits going on and then wait until - * any pending exec commands have completed in other code threads. - * In theory we can't get here while that is true, in practice I am - * paranoid - * - * We turn off the interrupt enable for the board to be sure it can't - * intefere with other devices. - */ - -static int mc32_close(struct net_device *dev) -{ - struct mc32_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - - u8 regs; - u16 one=1; - - lp->xceiver_desired_state = HALTED; - netif_stop_queue(dev); - - /* - * Send the indications on command (handy debug check) - */ - - mc32_command(dev, 4, &one, 2); - - /* Shut down the transceiver */ - - mc32_halt_transceiver(dev); - - /* Ensure we issue no more commands beyond this point */ - - down(&lp->cmd_mutex); - - /* Ok the card is now stopping */ - - regs=inb(ioaddr+HOST_CTRL); - regs&=~HOST_CTRL_INTE; - outb(regs, ioaddr+HOST_CTRL); - - mc32_flush_rx_ring(dev); - mc32_flush_tx_ring(dev); - - mc32_update_stats(dev); - - return 0; -} - - -/** - * mc32_get_stats - hand back stats to network layer - * @dev: The 3c527 card to handle - * - * We've collected all the stats we can in software already. Now - * it's time to update those kept on-card and return the lot. - * - */ - -static struct net_device_stats *mc32_get_stats(struct net_device *dev) -{ - mc32_update_stats(dev); - return &dev->stats; -} - - -/** - * do_mc32_set_multicast_list - attempt to update multicasts - * @dev: 3c527 device to load the list on - * @retry: indicates this is not the first call. - * - * - * Actually set or clear the multicast filter for this adaptor. The - * locking issues are handled by this routine. We have to track - * state as it may take multiple calls to get the command sequence - * completed. We just keep trying to schedule the loads until we - * manage to process them all. - * - * num_addrs == -1 Promiscuous mode, receive all packets - * - * num_addrs == 0 Normal mode, clear multicast list - * - * num_addrs > 0 Multicast mode, receive normal and MC packets, - * and do best-effort filtering. - * - * See mc32_update_stats() regards setting the SAV BP bit. - * - */ - -static void do_mc32_set_multicast_list(struct net_device *dev, int retry) -{ - struct mc32_local *lp = netdev_priv(dev); - u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */ - - if ((dev->flags&IFF_PROMISC) || - (dev->flags&IFF_ALLMULTI) || - netdev_mc_count(dev) > 10) - /* Enable promiscuous mode */ - filt |= 1; - else if (!netdev_mc_empty(dev)) - { - unsigned char block[62]; - unsigned char *bp; - struct netdev_hw_addr *ha; - - if(retry==0) - lp->mc_list_valid = 0; - if(!lp->mc_list_valid) - { - block[1]=0; - block[0]=netdev_mc_count(dev); - bp=block+2; - - netdev_for_each_mc_addr(ha, dev) { - memcpy(bp, ha->addr, 6); - bp+=6; - } - if(mc32_command_nowait(dev, 2, block, - 2+6*netdev_mc_count(dev))==-1) - { - lp->mc_reload_wait = 1; - return; - } - lp->mc_list_valid=1; - } - } - - if(mc32_command_nowait(dev, 0, &filt, 2)==-1) - { - lp->mc_reload_wait = 1; - } - else { - lp->mc_reload_wait = 0; - } -} - - -/** - * mc32_set_multicast_list - queue multicast list update - * @dev: The 3c527 to use - * - * Commence loading the multicast list. This is called when the kernel - * changes the lists. It will override any pending list we are trying to - * load. - */ - -static void mc32_set_multicast_list(struct net_device *dev) -{ - do_mc32_set_multicast_list(dev,0); -} - - -/** - * mc32_reset_multicast_list - reset multicast list - * @dev: The 3c527 to use - * - * Attempt the next step in loading the multicast lists. If this attempt - * fails to complete then it will be scheduled and this function called - * again later from elsewhere. - */ - -static void mc32_reset_multicast_list(struct net_device *dev) -{ - do_mc32_set_multicast_list(dev,1); -} - -static void netdev_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - sprintf(info->bus_info, "MCA 0x%lx", dev->base_addr); -} - -static u32 netdev_get_msglevel(struct net_device *dev) -{ - return mc32_debug; -} - -static void netdev_set_msglevel(struct net_device *dev, u32 level) -{ - mc32_debug = level; -} - -static const struct ethtool_ops netdev_ethtool_ops = { - .get_drvinfo = netdev_get_drvinfo, - .get_msglevel = netdev_get_msglevel, - .set_msglevel = netdev_set_msglevel, -}; - -#ifdef MODULE - -static struct net_device *this_device; - -/** - * init_module - entry point - * - * Probe and locate a 3c527 card. This really should probe and locate - * all the 3c527 cards in the machine not just one of them. Yes you can - * insmod multiple modules for now but it's a hack. - */ - -int __init init_module(void) -{ - this_device = mc32_probe(-1); - if (IS_ERR(this_device)) - return PTR_ERR(this_device); - return 0; -} - -/** - * cleanup_module - free resources for an unload - * - * Unloading time. We release the MCA bus resources and the interrupt - * at which point everything is ready to unload. The card must be stopped - * at this point or we would not have been called. When we unload we - * leave the card stopped but not totally shut down. When the card is - * initialized it must be rebooted or the rings reloaded before any - * transmit operations are allowed to start scribbling into memory. - */ - -void __exit cleanup_module(void) -{ - unregister_netdev(this_device); - cleanup_card(this_device); - free_netdev(this_device); -} - -#endif /* MODULE */ diff --git a/drivers/net/3c527.h b/drivers/net/3c527.h deleted file mode 100644 index d693b8d..0000000 --- a/drivers/net/3c527.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * 3COM "EtherLink MC/32" Descriptions - */ - -/* - * Registers - */ - -#define HOST_CMD 0 -#define HOST_CMD_START_RX (1<<3) -#define HOST_CMD_SUSPND_RX (3<<3) -#define HOST_CMD_RESTRT_RX (5<<3) - -#define HOST_CMD_SUSPND_TX 3 -#define HOST_CMD_RESTRT_TX 5 - - -#define HOST_STATUS 2 -#define HOST_STATUS_CRR (1<<6) -#define HOST_STATUS_CWR (1<<5) - - -#define HOST_CTRL 6 -#define HOST_CTRL_ATTN (1<<7) -#define HOST_CTRL_RESET (1<<6) -#define HOST_CTRL_INTE (1<<2) - -#define HOST_RAMPAGE 8 - -#define HALTED 0 -#define RUNNING 1 - -struct mc32_mailbox -{ - u16 mbox; - u16 data[1]; -} __packed; - -struct skb_header -{ - u8 status; - u8 control; - u16 next; /* Do not change! */ - u16 length; - u32 data; -} __packed; - -struct mc32_stats -{ - /* RX Errors */ - u32 rx_crc_errors; - u32 rx_alignment_errors; - u32 rx_overrun_errors; - u32 rx_tooshort_errors; - u32 rx_toolong_errors; - u32 rx_outofresource_errors; - - u32 rx_discarded; /* via card pattern match filter */ - - /* TX Errors */ - u32 tx_max_collisions; - u32 tx_carrier_errors; - u32 tx_underrun_errors; - u32 tx_cts_errors; - u32 tx_timeout_errors; - - /* various cruft */ - u32 dataA[6]; - u16 dataB[5]; - u32 dataC[14]; -} __packed; - -#define STATUS_MASK 0x0F -#define COMPLETED (1<<7) -#define COMPLETED_OK (1<<6) -#define BUFFER_BUSY (1<<5) - -#define CONTROL_EOP (1<<7) /* End Of Packet */ -#define CONTROL_EOL (1<<6) /* End of List */ - -#define MCA_MC32_ID 0x0041 /* Our MCA ident */ diff --git a/drivers/net/82596.c b/drivers/net/82596.c deleted file mode 100644 index be1f197..0000000 --- a/drivers/net/82596.c +++ /dev/null @@ -1,1632 +0,0 @@ -/* 82596.c: A generic 82596 ethernet driver for linux. */ -/* - Based on Apricot.c - Written 1994 by Mark Evans. - This driver is for the Apricot 82596 bus-master interface - - Modularised 12/94 Mark Evans - - - Modified to support the 82596 ethernet chips on 680x0 VME boards. - by Richard Hirst - Renamed to be 82596.c - - 980825: Changed to receive directly in to sk_buffs which are - allocated at open() time. Eliminates copy on incoming frames - (small ones are still copied). Shared data now held in a - non-cached page, so we can run on 68060 in copyback mode. - - TBD: - * look at deferring rx frames rather than discarding (as per tulip) - * handle tx ring full as per tulip - * performance test to tune rx_copybreak - - Most of my modifications relate to the braindead big-endian - implementation by Intel. When the i596 is operating in - 'big-endian' mode, it thinks a 32 bit value of 0x12345678 - should be stored as 0x56781234. This is a real pain, when - you have linked lists which are shared by the 680x0 and the - i596. - - Driver skeleton - Written 1993 by Donald Becker. - Copyright 1993 United States Government as represented by the Director, - National Security Agency. This software may only be used and distributed - according to the terms of the GNU General Public License as modified by SRC, - incorporated herein by reference. - - The author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403 - - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -static char version[] __initdata = - "82596.c $Revision: 1.5 $\n"; - -#define DRV_NAME "82596" - -/* DEBUG flags - */ - -#define DEB_INIT 0x0001 -#define DEB_PROBE 0x0002 -#define DEB_SERIOUS 0x0004 -#define DEB_ERRORS 0x0008 -#define DEB_MULTI 0x0010 -#define DEB_TDR 0x0020 -#define DEB_OPEN 0x0040 -#define DEB_RESET 0x0080 -#define DEB_ADDCMD 0x0100 -#define DEB_STATUS 0x0200 -#define DEB_STARTTX 0x0400 -#define DEB_RXADDR 0x0800 -#define DEB_TXADDR 0x1000 -#define DEB_RXFRAME 0x2000 -#define DEB_INTS 0x4000 -#define DEB_STRUCT 0x8000 -#define DEB_ANY 0xffff - - -#define DEB(x,y) if (i596_debug & (x)) y - - -#if defined(CONFIG_MVME16x_NET) || defined(CONFIG_MVME16x_NET_MODULE) -#define ENABLE_MVME16x_NET -#endif -#if defined(CONFIG_BVME6000_NET) || defined(CONFIG_BVME6000_NET_MODULE) -#define ENABLE_BVME6000_NET -#endif -#if defined(CONFIG_APRICOT) || defined(CONFIG_APRICOT_MODULE) -#define ENABLE_APRICOT -#endif - -#ifdef ENABLE_MVME16x_NET -#include -#endif -#ifdef ENABLE_BVME6000_NET -#include -#endif - -/* - * Define various macros for Channel Attention, word swapping etc., dependent - * on architecture. MVME and BVME are 680x0 based, otherwise it is Intel. - */ - -#ifdef __mc68000__ -#define WSWAPrfd(x) ((struct i596_rfd *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) -#define WSWAPrbd(x) ((struct i596_rbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) -#define WSWAPiscp(x) ((struct i596_iscp *)(((u32)(x)<<16) | ((((u32)(x)))>>16))) -#define WSWAPscb(x) ((struct i596_scb *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) -#define WSWAPcmd(x) ((struct i596_cmd *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) -#define WSWAPtbd(x) ((struct i596_tbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) -#define WSWAPchar(x) ((char *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) -#define ISCP_BUSY 0x00010000 -#define MACH_IS_APRICOT 0 -#else -#define WSWAPrfd(x) ((struct i596_rfd *)((long)x)) -#define WSWAPrbd(x) ((struct i596_rbd *)((long)x)) -#define WSWAPiscp(x) ((struct i596_iscp *)((long)x)) -#define WSWAPscb(x) ((struct i596_scb *)((long)x)) -#define WSWAPcmd(x) ((struct i596_cmd *)((long)x)) -#define WSWAPtbd(x) ((struct i596_tbd *)((long)x)) -#define WSWAPchar(x) ((char *)((long)x)) -#define ISCP_BUSY 0x0001 -#define MACH_IS_APRICOT 1 -#endif - -/* - * The MPU_PORT command allows direct access to the 82596. With PORT access - * the following commands are available (p5-18). The 32-bit port command - * must be word-swapped with the most significant word written first. - * This only applies to VME boards. - */ -#define PORT_RESET 0x00 /* reset 82596 */ -#define PORT_SELFTEST 0x01 /* selftest */ -#define PORT_ALTSCP 0x02 /* alternate SCB address */ -#define PORT_ALTDUMP 0x03 /* Alternate DUMP address */ - -static int i596_debug = (DEB_SERIOUS|DEB_PROBE); - -MODULE_AUTHOR("Richard Hirst"); -MODULE_DESCRIPTION("i82596 driver"); -MODULE_LICENSE("GPL"); - -module_param(i596_debug, int, 0); -MODULE_PARM_DESC(i596_debug, "i82596 debug mask"); - - -/* Copy frames shorter than rx_copybreak, otherwise pass on up in - * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha). - */ -static int rx_copybreak = 100; - -#define PKT_BUF_SZ 1536 -#define MAX_MC_CNT 64 - -#define I596_TOTAL_SIZE 17 - -#define I596_NULL ((void *)0xffffffff) - -#define CMD_EOL 0x8000 /* The last command of the list, stop. */ -#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */ -#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */ - -#define CMD_FLEX 0x0008 /* Enable flexible memory model */ - -enum commands { - CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3, - CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7 -}; - -#define STAT_C 0x8000 /* Set to 0 after execution */ -#define STAT_B 0x4000 /* Command being executed */ -#define STAT_OK 0x2000 /* Command executed ok */ -#define STAT_A 0x1000 /* Command aborted */ - -#define CUC_START 0x0100 -#define CUC_RESUME 0x0200 -#define CUC_SUSPEND 0x0300 -#define CUC_ABORT 0x0400 -#define RX_START 0x0010 -#define RX_RESUME 0x0020 -#define RX_SUSPEND 0x0030 -#define RX_ABORT 0x0040 - -#define TX_TIMEOUT (HZ/20) - - -struct i596_reg { - unsigned short porthi; - unsigned short portlo; - unsigned long ca; -}; - -#define EOF 0x8000 -#define SIZE_MASK 0x3fff - -struct i596_tbd { - unsigned short size; - unsigned short pad; - struct i596_tbd *next; - char *data; -}; - -/* The command structure has two 'next' pointers; v_next is the address of - * the next command as seen by the CPU, b_next is the address of the next - * command as seen by the 82596. The b_next pointer, as used by the 82596 - * always references the status field of the next command, rather than the - * v_next field, because the 82596 is unaware of v_next. It may seem more - * logical to put v_next at the end of the structure, but we cannot do that - * because the 82596 expects other fields to be there, depending on command - * type. - */ - -struct i596_cmd { - struct i596_cmd *v_next; /* Address from CPUs viewpoint */ - unsigned short status; - unsigned short command; - struct i596_cmd *b_next; /* Address from i596 viewpoint */ -}; - -struct tx_cmd { - struct i596_cmd cmd; - struct i596_tbd *tbd; - unsigned short size; - unsigned short pad; - struct sk_buff *skb; /* So we can free it after tx */ -}; - -struct tdr_cmd { - struct i596_cmd cmd; - unsigned short status; - unsigned short pad; -}; - -struct mc_cmd { - struct i596_cmd cmd; - short mc_cnt; - char mc_addrs[MAX_MC_CNT*6]; -}; - -struct sa_cmd { - struct i596_cmd cmd; - char eth_addr[8]; -}; - -struct cf_cmd { - struct i596_cmd cmd; - char i596_config[16]; -}; - -struct i596_rfd { - unsigned short stat; - unsigned short cmd; - struct i596_rfd *b_next; /* Address from i596 viewpoint */ - struct i596_rbd *rbd; - unsigned short count; - unsigned short size; - struct i596_rfd *v_next; /* Address from CPUs viewpoint */ - struct i596_rfd *v_prev; -}; - -struct i596_rbd { - unsigned short count; - unsigned short zero1; - struct i596_rbd *b_next; - unsigned char *b_data; /* Address from i596 viewpoint */ - unsigned short size; - unsigned short zero2; - struct sk_buff *skb; - struct i596_rbd *v_next; - struct i596_rbd *b_addr; /* This rbd addr from i596 view */ - unsigned char *v_data; /* Address from CPUs viewpoint */ -}; - -#define TX_RING_SIZE 64 -#define RX_RING_SIZE 16 - -struct i596_scb { - unsigned short status; - unsigned short command; - struct i596_cmd *cmd; - struct i596_rfd *rfd; - unsigned long crc_err; - unsigned long align_err; - unsigned long resource_err; - unsigned long over_err; - unsigned long rcvdt_err; - unsigned long short_err; - unsigned short t_on; - unsigned short t_off; -}; - -struct i596_iscp { - unsigned long stat; - struct i596_scb *scb; -}; - -struct i596_scp { - unsigned long sysbus; - unsigned long pad; - struct i596_iscp *iscp; -}; - -struct i596_private { - volatile struct i596_scp scp; - volatile struct i596_iscp iscp; - volatile struct i596_scb scb; - struct sa_cmd sa_cmd; - struct cf_cmd cf_cmd; - struct tdr_cmd tdr_cmd; - struct mc_cmd mc_cmd; - unsigned long stat; - int last_restart __attribute__((aligned(4))); - struct i596_rfd *rfd_head; - struct i596_rbd *rbd_head; - struct i596_cmd *cmd_tail; - struct i596_cmd *cmd_head; - int cmd_backlog; - unsigned long last_cmd; - struct i596_rfd rfds[RX_RING_SIZE]; - struct i596_rbd rbds[RX_RING_SIZE]; - struct tx_cmd tx_cmds[TX_RING_SIZE]; - struct i596_tbd tbds[TX_RING_SIZE]; - int next_tx_cmd; - spinlock_t lock; -}; - -static char init_setup[] = -{ - 0x8E, /* length, prefetch on */ - 0xC8, /* fifo to 8, monitor off */ -#ifdef CONFIG_VME - 0xc0, /* don't save bad frames */ -#else - 0x80, /* don't save bad frames */ -#endif - 0x2E, /* No source address insertion, 8 byte preamble */ - 0x00, /* priority and backoff defaults */ - 0x60, /* interframe spacing */ - 0x00, /* slot time LSB */ - 0xf2, /* slot time and retries */ - 0x00, /* promiscuous mode */ - 0x00, /* collision detect */ - 0x40, /* minimum frame length */ - 0xff, - 0x00, - 0x7f /* *multi IA */ }; - -static int i596_open(struct net_device *dev); -static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev); -static irqreturn_t i596_interrupt(int irq, void *dev_id); -static int i596_close(struct net_device *dev); -static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); -static void i596_tx_timeout (struct net_device *dev); -static void print_eth(unsigned char *buf, char *str); -static void set_multicast_list(struct net_device *dev); - -static int rx_ring_size = RX_RING_SIZE; -static int ticks_limit = 25; -static int max_cmd_backlog = TX_RING_SIZE-1; - - -static inline void CA(struct net_device *dev) -{ -#ifdef ENABLE_MVME16x_NET - if (MACH_IS_MVME16x) { - ((struct i596_reg *) dev->base_addr)->ca = 1; - } -#endif -#ifdef ENABLE_BVME6000_NET - if (MACH_IS_BVME6000) { - volatile u32 i; - - i = *(volatile u32 *) (dev->base_addr); - } -#endif -#ifdef ENABLE_APRICOT - if (MACH_IS_APRICOT) { - outw(0, (short) (dev->base_addr) + 4); - } -#endif -} - - -static inline void MPU_PORT(struct net_device *dev, int c, volatile void *x) -{ -#ifdef ENABLE_MVME16x_NET - if (MACH_IS_MVME16x) { - struct i596_reg *p = (struct i596_reg *) (dev->base_addr); - p->porthi = ((c) | (u32) (x)) & 0xffff; - p->portlo = ((c) | (u32) (x)) >> 16; - } -#endif -#ifdef ENABLE_BVME6000_NET - if (MACH_IS_BVME6000) { - u32 v = (u32) (c) | (u32) (x); - v = ((u32) (v) << 16) | ((u32) (v) >> 16); - *(volatile u32 *) dev->base_addr = v; - udelay(1); - *(volatile u32 *) dev->base_addr = v; - } -#endif -} - - -static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str) -{ - while (--delcnt && lp->iscp.stat) - udelay(10); - if (!delcnt) { - printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n", - dev->name, str, lp->scb.status, lp->scb.command); - return -1; - } - else - return 0; -} - - -static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str) -{ - while (--delcnt && lp->scb.command) - udelay(10); - if (!delcnt) { - printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n", - dev->name, str, lp->scb.status, lp->scb.command); - return -1; - } - else - return 0; -} - - -static inline int wait_cfg(struct net_device *dev, struct i596_cmd *cmd, int delcnt, char *str) -{ - volatile struct i596_cmd *c = cmd; - - while (--delcnt && c->command) - udelay(10); - if (!delcnt) { - printk(KERN_ERR "%s: %s.\n", dev->name, str); - return -1; - } - else - return 0; -} - - -static void i596_display_data(struct net_device *dev) -{ - struct i596_private *lp = dev->ml_priv; - struct i596_cmd *cmd; - struct i596_rfd *rfd; - struct i596_rbd *rbd; - - printk(KERN_ERR "lp and scp at %p, .sysbus = %08lx, .iscp = %p\n", - &lp->scp, lp->scp.sysbus, lp->scp.iscp); - printk(KERN_ERR "iscp at %p, iscp.stat = %08lx, .scb = %p\n", - &lp->iscp, lp->iscp.stat, lp->iscp.scb); - printk(KERN_ERR "scb at %p, scb.status = %04x, .command = %04x," - " .cmd = %p, .rfd = %p\n", - &lp->scb, lp->scb.status, lp->scb.command, - lp->scb.cmd, lp->scb.rfd); - printk(KERN_ERR " errors: crc %lx, align %lx, resource %lx," - " over %lx, rcvdt %lx, short %lx\n", - lp->scb.crc_err, lp->scb.align_err, lp->scb.resource_err, - lp->scb.over_err, lp->scb.rcvdt_err, lp->scb.short_err); - cmd = lp->cmd_head; - while (cmd != I596_NULL) { - printk(KERN_ERR "cmd at %p, .status = %04x, .command = %04x, .b_next = %p\n", - cmd, cmd->status, cmd->command, cmd->b_next); - cmd = cmd->v_next; - } - rfd = lp->rfd_head; - printk(KERN_ERR "rfd_head = %p\n", rfd); - do { - printk(KERN_ERR " %p .stat %04x, .cmd %04x, b_next %p, rbd %p," - " count %04x\n", - rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd, - rfd->count); - rfd = rfd->v_next; - } while (rfd != lp->rfd_head); - rbd = lp->rbd_head; - printk(KERN_ERR "rbd_head = %p\n", rbd); - do { - printk(KERN_ERR " %p .count %04x, b_next %p, b_data %p, size %04x\n", - rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size); - rbd = rbd->v_next; - } while (rbd != lp->rbd_head); -} - - -#if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET) -static irqreturn_t i596_error(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; -#ifdef ENABLE_MVME16x_NET - if (MACH_IS_MVME16x) { - volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000; - - pcc2[0x28] = 1; - pcc2[0x2b] = 0x1d; - } -#endif -#ifdef ENABLE_BVME6000_NET - if (MACH_IS_BVME6000) { - volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG; - - *ethirq = 1; - *ethirq = 3; - } -#endif - printk(KERN_ERR "%s: Error interrupt\n", dev->name); - i596_display_data(dev); - return IRQ_HANDLED; -} -#endif - -static inline void remove_rx_bufs(struct net_device *dev) -{ - struct i596_private *lp = dev->ml_priv; - struct i596_rbd *rbd; - int i; - - for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) { - if (rbd->skb == NULL) - break; - dev_kfree_skb(rbd->skb); - rbd->skb = NULL; - } -} - -static inline int init_rx_bufs(struct net_device *dev) -{ - struct i596_private *lp = dev->ml_priv; - int i; - struct i596_rfd *rfd; - struct i596_rbd *rbd; - - /* First build the Receive Buffer Descriptor List */ - - for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) { - struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ); - - if (skb == NULL) { - remove_rx_bufs(dev); - return -ENOMEM; - } - - skb->dev = dev; - rbd->v_next = rbd+1; - rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1)); - rbd->b_addr = WSWAPrbd(virt_to_bus(rbd)); - rbd->skb = skb; - rbd->v_data = skb->data; - rbd->b_data = WSWAPchar(virt_to_bus(skb->data)); - rbd->size = PKT_BUF_SZ; -#ifdef __mc68000__ - cache_clear(virt_to_phys(skb->data), PKT_BUF_SZ); -#endif - } - lp->rbd_head = lp->rbds; - rbd = lp->rbds + rx_ring_size - 1; - rbd->v_next = lp->rbds; - rbd->b_next = WSWAPrbd(virt_to_bus(lp->rbds)); - - /* Now build the Receive Frame Descriptor List */ - - for (i = 0, rfd = lp->rfds; i < rx_ring_size; i++, rfd++) { - rfd->rbd = I596_NULL; - rfd->v_next = rfd+1; - rfd->v_prev = rfd-1; - rfd->b_next = WSWAPrfd(virt_to_bus(rfd+1)); - rfd->cmd = CMD_FLEX; - } - lp->rfd_head = lp->rfds; - lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds)); - rfd = lp->rfds; - rfd->rbd = lp->rbd_head; - rfd->v_prev = lp->rfds + rx_ring_size - 1; - rfd = lp->rfds + rx_ring_size - 1; - rfd->v_next = lp->rfds; - rfd->b_next = WSWAPrfd(virt_to_bus(lp->rfds)); - rfd->cmd = CMD_EOL|CMD_FLEX; - - return 0; -} - - -static void rebuild_rx_bufs(struct net_device *dev) -{ - struct i596_private *lp = dev->ml_priv; - int i; - - /* Ensure rx frame/buffer descriptors are tidy */ - - for (i = 0; i < rx_ring_size; i++) { - lp->rfds[i].rbd = I596_NULL; - lp->rfds[i].cmd = CMD_FLEX; - } - lp->rfds[rx_ring_size-1].cmd = CMD_EOL|CMD_FLEX; - lp->rfd_head = lp->rfds; - lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds)); - lp->rbd_head = lp->rbds; - lp->rfds[0].rbd = WSWAPrbd(virt_to_bus(lp->rbds)); -} - - -static int init_i596_mem(struct net_device *dev) -{ - struct i596_private *lp = dev->ml_priv; -#if !defined(ENABLE_MVME16x_NET) && !defined(ENABLE_BVME6000_NET) || defined(ENABLE_APRICOT) - short ioaddr = dev->base_addr; -#endif - unsigned long flags; - - MPU_PORT(dev, PORT_RESET, NULL); - - udelay(100); /* Wait 100us - seems to help */ - -#if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET) -#ifdef ENABLE_MVME16x_NET - if (MACH_IS_MVME16x) { - volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000; - - /* Disable all ints for now */ - pcc2[0x28] = 1; - pcc2[0x2a] = 0x48; - /* Following disables snooping. Snooping is not required - * as we make appropriate use of non-cached pages for - * shared data, and cache_push/cache_clear. - */ - pcc2[0x2b] = 0x08; - } -#endif -#ifdef ENABLE_BVME6000_NET - if (MACH_IS_BVME6000) { - volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG; - - *ethirq = 1; - } -#endif - - /* change the scp address */ - - MPU_PORT(dev, PORT_ALTSCP, (void *)virt_to_bus((void *)&lp->scp)); - -#elif defined(ENABLE_APRICOT) - - { - u32 scp = virt_to_bus(&lp->scp); - - /* change the scp address */ - outw(0, ioaddr); - outw(0, ioaddr); - outb(4, ioaddr + 0xf); - outw(scp | 2, ioaddr); - outw(scp >> 16, ioaddr); - } -#endif - - lp->last_cmd = jiffies; - -#ifdef ENABLE_MVME16x_NET - if (MACH_IS_MVME16x) - lp->scp.sysbus = 0x00000054; -#endif -#ifdef ENABLE_BVME6000_NET - if (MACH_IS_BVME6000) - lp->scp.sysbus = 0x0000004c; -#endif -#ifdef ENABLE_APRICOT - if (MACH_IS_APRICOT) - lp->scp.sysbus = 0x00440000; -#endif - - lp->scp.iscp = WSWAPiscp(virt_to_bus((void *)&lp->iscp)); - lp->iscp.scb = WSWAPscb(virt_to_bus((void *)&lp->scb)); - lp->iscp.stat = ISCP_BUSY; - lp->cmd_backlog = 0; - - lp->cmd_head = lp->scb.cmd = I596_NULL; - -#ifdef ENABLE_BVME6000_NET - if (MACH_IS_BVME6000) { - lp->scb.t_on = 7 * 25; - lp->scb.t_off = 1 * 25; - } -#endif - - DEB(DEB_INIT,printk(KERN_DEBUG "%s: starting i82596.\n", dev->name)); - -#if defined(ENABLE_APRICOT) - (void) inb(ioaddr + 0x10); - outb(4, ioaddr + 0xf); -#endif - CA(dev); - - if (wait_istat(dev,lp,1000,"initialization timed out")) - goto failed; - DEB(DEB_INIT,printk(KERN_DEBUG "%s: i82596 initialization successful\n", dev->name)); - - /* Ensure rx frame/buffer descriptors are tidy */ - rebuild_rx_bufs(dev); - lp->scb.command = 0; - -#ifdef ENABLE_MVME16x_NET - if (MACH_IS_MVME16x) { - volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000; - - /* Enable ints, etc. now */ - pcc2[0x2a] = 0x55; /* Edge sensitive */ - pcc2[0x2b] = 0x15; - } -#endif -#ifdef ENABLE_BVME6000_NET - if (MACH_IS_BVME6000) { - volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG; - - *ethirq = 3; - } -#endif - - - DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdConfigure\n", dev->name)); - memcpy(lp->cf_cmd.i596_config, init_setup, 14); - lp->cf_cmd.cmd.command = CmdConfigure; - i596_add_cmd(dev, &lp->cf_cmd.cmd); - - DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name)); - memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6); - lp->sa_cmd.cmd.command = CmdSASetup; - i596_add_cmd(dev, &lp->sa_cmd.cmd); - - DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name)); - lp->tdr_cmd.cmd.command = CmdTDR; - i596_add_cmd(dev, &lp->tdr_cmd.cmd); - - spin_lock_irqsave (&lp->lock, flags); - - if (wait_cmd(dev,lp,1000,"timed out waiting to issue RX_START")) { - spin_unlock_irqrestore (&lp->lock, flags); - goto failed; - } - DEB(DEB_INIT,printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name)); - lp->scb.command = RX_START; - CA(dev); - - spin_unlock_irqrestore (&lp->lock, flags); - - if (wait_cmd(dev,lp,1000,"RX_START not processed")) - goto failed; - DEB(DEB_INIT,printk(KERN_DEBUG "%s: Receive unit started OK\n", dev->name)); - return 0; - -failed: - printk(KERN_CRIT "%s: Failed to initialise 82596\n", dev->name); - MPU_PORT(dev, PORT_RESET, NULL); - return -1; -} - -static inline int i596_rx(struct net_device *dev) -{ - struct i596_private *lp = dev->ml_priv; - struct i596_rfd *rfd; - struct i596_rbd *rbd; - int frames = 0; - - DEB(DEB_RXFRAME,printk(KERN_DEBUG "i596_rx(), rfd_head %p, rbd_head %p\n", - lp->rfd_head, lp->rbd_head)); - - rfd = lp->rfd_head; /* Ref next frame to check */ - - while ((rfd->stat) & STAT_C) { /* Loop while complete frames */ - if (rfd->rbd == I596_NULL) - rbd = I596_NULL; - else if (rfd->rbd == lp->rbd_head->b_addr) - rbd = lp->rbd_head; - else { - printk(KERN_CRIT "%s: rbd chain broken!\n", dev->name); - /* XXX Now what? */ - rbd = I596_NULL; - } - DEB(DEB_RXFRAME, printk(KERN_DEBUG " rfd %p, rfd.rbd %p, rfd.stat %04x\n", - rfd, rfd->rbd, rfd->stat)); - - if (rbd != I596_NULL && ((rfd->stat) & STAT_OK)) { - /* a good frame */ - int pkt_len = rbd->count & 0x3fff; - struct sk_buff *skb = rbd->skb; - int rx_in_place = 0; - - DEB(DEB_RXADDR,print_eth(rbd->v_data, "received")); - frames++; - - /* Check if the packet is long enough to just accept - * without copying to a properly sized skbuff. - */ - - if (pkt_len > rx_copybreak) { - struct sk_buff *newskb; - - /* Get fresh skbuff to replace filled one. */ - newskb = dev_alloc_skb(PKT_BUF_SZ); - if (newskb == NULL) { - skb = NULL; /* drop pkt */ - goto memory_squeeze; - } - /* Pass up the skb already on the Rx ring. */ - skb_put(skb, pkt_len); - rx_in_place = 1; - rbd->skb = newskb; - newskb->dev = dev; - rbd->v_data = newskb->data; - rbd->b_data = WSWAPchar(virt_to_bus(newskb->data)); -#ifdef __mc68000__ - cache_clear(virt_to_phys(newskb->data), PKT_BUF_SZ); -#endif - } - else - skb = dev_alloc_skb(pkt_len + 2); -memory_squeeze: - if (skb == NULL) { - /* XXX tulip.c can defer packets here!! */ - printk(KERN_WARNING "%s: i596_rx Memory squeeze, dropping packet.\n", dev->name); - dev->stats.rx_dropped++; - } - else { - if (!rx_in_place) { - /* 16 byte align the data fields */ - skb_reserve(skb, 2); - memcpy(skb_put(skb,pkt_len), rbd->v_data, pkt_len); - } - skb->protocol=eth_type_trans(skb,dev); - skb->len = pkt_len; -#ifdef __mc68000__ - cache_clear(virt_to_phys(rbd->skb->data), - pkt_len); -#endif - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes+=pkt_len; - } - } - else { - DEB(DEB_ERRORS, printk(KERN_DEBUG "%s: Error, rfd.stat = 0x%04x\n", - dev->name, rfd->stat)); - dev->stats.rx_errors++; - if ((rfd->stat) & 0x0001) - dev->stats.collisions++; - if ((rfd->stat) & 0x0080) - dev->stats.rx_length_errors++; - if ((rfd->stat) & 0x0100) - dev->stats.rx_over_errors++; - if ((rfd->stat) & 0x0200) - dev->stats.rx_fifo_errors++; - if ((rfd->stat) & 0x0400) - dev->stats.rx_frame_errors++; - if ((rfd->stat) & 0x0800) - dev->stats.rx_crc_errors++; - if ((rfd->stat) & 0x1000) - dev->stats.rx_length_errors++; - } - - /* Clear the buffer descriptor count and EOF + F flags */ - - if (rbd != I596_NULL && (rbd->count & 0x4000)) { - rbd->count = 0; - lp->rbd_head = rbd->v_next; - } - - /* Tidy the frame descriptor, marking it as end of list */ - - rfd->rbd = I596_NULL; - rfd->stat = 0; - rfd->cmd = CMD_EOL|CMD_FLEX; - rfd->count = 0; - - /* Remove end-of-list from old end descriptor */ - - rfd->v_prev->cmd = CMD_FLEX; - - /* Update record of next frame descriptor to process */ - - lp->scb.rfd = rfd->b_next; - lp->rfd_head = rfd->v_next; - rfd = lp->rfd_head; - } - - DEB(DEB_RXFRAME,printk(KERN_DEBUG "frames %d\n", frames)); - - return 0; -} - - -static void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp) -{ - struct i596_cmd *ptr; - - while (lp->cmd_head != I596_NULL) { - ptr = lp->cmd_head; - lp->cmd_head = ptr->v_next; - lp->cmd_backlog--; - - switch ((ptr->command) & 0x7) { - case CmdTx: - { - struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr; - struct sk_buff *skb = tx_cmd->skb; - - dev_kfree_skb(skb); - - dev->stats.tx_errors++; - dev->stats.tx_aborted_errors++; - - ptr->v_next = ptr->b_next = I596_NULL; - tx_cmd->cmd.command = 0; /* Mark as free */ - break; - } - default: - ptr->v_next = ptr->b_next = I596_NULL; - } - } - - wait_cmd(dev,lp,100,"i596_cleanup_cmd timed out"); - lp->scb.cmd = I596_NULL; -} - -static void i596_reset(struct net_device *dev, struct i596_private *lp, - int ioaddr) -{ - unsigned long flags; - - DEB(DEB_RESET,printk(KERN_DEBUG "i596_reset\n")); - - spin_lock_irqsave (&lp->lock, flags); - - wait_cmd(dev,lp,100,"i596_reset timed out"); - - netif_stop_queue(dev); - - lp->scb.command = CUC_ABORT | RX_ABORT; - CA(dev); - - /* wait for shutdown */ - wait_cmd(dev,lp,1000,"i596_reset 2 timed out"); - spin_unlock_irqrestore (&lp->lock, flags); - - i596_cleanup_cmd(dev,lp); - i596_rx(dev); - - netif_start_queue(dev); - init_i596_mem(dev); -} - -static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd) -{ - struct i596_private *lp = dev->ml_priv; - int ioaddr = dev->base_addr; - unsigned long flags; - - DEB(DEB_ADDCMD,printk(KERN_DEBUG "i596_add_cmd\n")); - - cmd->status = 0; - cmd->command |= (CMD_EOL | CMD_INTR); - cmd->v_next = cmd->b_next = I596_NULL; - - spin_lock_irqsave (&lp->lock, flags); - - if (lp->cmd_head != I596_NULL) { - lp->cmd_tail->v_next = cmd; - lp->cmd_tail->b_next = WSWAPcmd(virt_to_bus(&cmd->status)); - } else { - lp->cmd_head = cmd; - wait_cmd(dev,lp,100,"i596_add_cmd timed out"); - lp->scb.cmd = WSWAPcmd(virt_to_bus(&cmd->status)); - lp->scb.command = CUC_START; - CA(dev); - } - lp->cmd_tail = cmd; - lp->cmd_backlog++; - - spin_unlock_irqrestore (&lp->lock, flags); - - if (lp->cmd_backlog > max_cmd_backlog) { - unsigned long tickssofar = jiffies - lp->last_cmd; - - if (tickssofar < ticks_limit) - return; - - printk(KERN_NOTICE "%s: command unit timed out, status resetting.\n", dev->name); - - i596_reset(dev, lp, ioaddr); - } -} - -static int i596_open(struct net_device *dev) -{ - int res = 0; - - DEB(DEB_OPEN,printk(KERN_DEBUG "%s: i596_open() irq %d.\n", dev->name, dev->irq)); - - if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) { - printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq); - return -EAGAIN; - } -#ifdef ENABLE_MVME16x_NET - if (MACH_IS_MVME16x) { - if (request_irq(0x56, i596_error, 0, "i82596_error", dev)) { - res = -EAGAIN; - goto err_irq_dev; - } - } -#endif - res = init_rx_bufs(dev); - if (res) - goto err_irq_56; - - netif_start_queue(dev); - - if (init_i596_mem(dev)) { - res = -EAGAIN; - goto err_queue; - } - - return 0; - -err_queue: - netif_stop_queue(dev); - remove_rx_bufs(dev); -err_irq_56: -#ifdef ENABLE_MVME16x_NET - free_irq(0x56, dev); -err_irq_dev: -#endif - free_irq(dev->irq, dev); - - return res; -} - -static void i596_tx_timeout (struct net_device *dev) -{ - struct i596_private *lp = dev->ml_priv; - int ioaddr = dev->base_addr; - - /* Transmitter timeout, serious problems. */ - DEB(DEB_ERRORS,printk(KERN_ERR "%s: transmit timed out, status resetting.\n", - dev->name)); - - dev->stats.tx_errors++; - - /* Try to restart the adaptor */ - if (lp->last_restart == dev->stats.tx_packets) { - DEB(DEB_ERRORS,printk(KERN_ERR "Resetting board.\n")); - /* Shutdown and restart */ - i596_reset (dev, lp, ioaddr); - } else { - /* Issue a channel attention signal */ - DEB(DEB_ERRORS,printk(KERN_ERR "Kicking board.\n")); - lp->scb.command = CUC_START | RX_START; - CA (dev); - lp->last_restart = dev->stats.tx_packets; - } - - dev->trans_start = jiffies; /* prevent tx timeout */ - netif_wake_queue (dev); -} - -static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct i596_private *lp = dev->ml_priv; - struct tx_cmd *tx_cmd; - struct i596_tbd *tbd; - short length = skb->len; - - DEB(DEB_STARTTX,printk(KERN_DEBUG "%s: i596_start_xmit(%x,%p) called\n", - dev->name, skb->len, skb->data)); - - if (skb->len < ETH_ZLEN) { - if (skb_padto(skb, ETH_ZLEN)) - return NETDEV_TX_OK; - length = ETH_ZLEN; - } - netif_stop_queue(dev); - - tx_cmd = lp->tx_cmds + lp->next_tx_cmd; - tbd = lp->tbds + lp->next_tx_cmd; - - if (tx_cmd->cmd.command) { - printk(KERN_NOTICE "%s: xmit ring full, dropping packet.\n", - dev->name); - dev->stats.tx_dropped++; - - dev_kfree_skb(skb); - } else { - if (++lp->next_tx_cmd == TX_RING_SIZE) - lp->next_tx_cmd = 0; - tx_cmd->tbd = WSWAPtbd(virt_to_bus(tbd)); - tbd->next = I596_NULL; - - tx_cmd->cmd.command = CMD_FLEX | CmdTx; - tx_cmd->skb = skb; - - tx_cmd->pad = 0; - tx_cmd->size = 0; - tbd->pad = 0; - tbd->size = EOF | length; - - tbd->data = WSWAPchar(virt_to_bus(skb->data)); - -#ifdef __mc68000__ - cache_push(virt_to_phys(skb->data), length); -#endif - DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued")); - i596_add_cmd(dev, &tx_cmd->cmd); - - dev->stats.tx_packets++; - dev->stats.tx_bytes += length; - } - - netif_start_queue(dev); - - return NETDEV_TX_OK; -} - -static void print_eth(unsigned char *add, char *str) -{ - printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n", - add, add + 6, add, add[12], add[13], str); -} - -static int io = 0x300; -static int irq = 10; - -static const struct net_device_ops i596_netdev_ops = { - .ndo_open = i596_open, - .ndo_stop = i596_close, - .ndo_start_xmit = i596_start_xmit, - .ndo_set_multicast_list = set_multicast_list, - .ndo_tx_timeout = i596_tx_timeout, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -struct net_device * __init i82596_probe(int unit) -{ - struct net_device *dev; - int i; - struct i596_private *lp; - char eth_addr[8]; - static int probed; - int err; - - if (probed) - return ERR_PTR(-ENODEV); - probed++; - - dev = alloc_etherdev(0); - if (!dev) - return ERR_PTR(-ENOMEM); - - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - } else { - dev->base_addr = io; - dev->irq = irq; - } - -#ifdef ENABLE_MVME16x_NET - if (MACH_IS_MVME16x) { - if (mvme16x_config & MVME16x_CONFIG_NO_ETHERNET) { - printk(KERN_NOTICE "Ethernet probe disabled - chip not present\n"); - err = -ENODEV; - goto out; - } - memcpy(eth_addr, (void *) 0xfffc1f2c, 6); /* YUCK! Get addr from NOVRAM */ - dev->base_addr = MVME_I596_BASE; - dev->irq = (unsigned) MVME16x_IRQ_I596; - goto found; - } -#endif -#ifdef ENABLE_BVME6000_NET - if (MACH_IS_BVME6000) { - volatile unsigned char *rtc = (unsigned char *) BVME_RTC_BASE; - unsigned char msr = rtc[3]; - int i; - - rtc[3] |= 0x80; - for (i = 0; i < 6; i++) - eth_addr[i] = rtc[i * 4 + 7]; /* Stored in RTC RAM at offset 1 */ - rtc[3] = msr; - dev->base_addr = BVME_I596_BASE; - dev->irq = (unsigned) BVME_IRQ_I596; - goto found; - } -#endif -#ifdef ENABLE_APRICOT - { - int checksum = 0; - int ioaddr = 0x300; - - /* this is easy the ethernet interface can only be at 0x300 */ - /* first check nothing is already registered here */ - - if (!request_region(ioaddr, I596_TOTAL_SIZE, DRV_NAME)) { - printk(KERN_ERR "82596: IO address 0x%04x in use\n", ioaddr); - err = -EBUSY; - goto out; - } - - dev->base_addr = ioaddr; - - for (i = 0; i < 8; i++) { - eth_addr[i] = inb(ioaddr + 8 + i); - checksum += eth_addr[i]; - } - - /* checksum is a multiple of 0x100, got this wrong first time - some machines have 0x100, some 0x200. The DOS driver doesn't - even bother with the checksum. - Some other boards trip the checksum.. but then appear as - ether address 0. Trap these - AC */ - - if ((checksum % 0x100) || - (memcmp(eth_addr, "\x00\x00\x49", 3) != 0)) { - err = -ENODEV; - goto out1; - } - - dev->irq = 10; - goto found; - } -#endif - err = -ENODEV; - goto out; - -found: - dev->mem_start = (int)__get_free_pages(GFP_ATOMIC, 0); - if (!dev->mem_start) { - err = -ENOMEM; - goto out1; - } - - DEB(DEB_PROBE,printk(KERN_INFO "%s: 82596 at %#3lx,", dev->name, dev->base_addr)); - - for (i = 0; i < 6; i++) - DEB(DEB_PROBE,printk(" %2.2X", dev->dev_addr[i] = eth_addr[i])); - - DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq)); - - DEB(DEB_PROBE,printk(KERN_INFO "%s", version)); - - /* The 82596-specific entries in the device structure. */ - dev->netdev_ops = &i596_netdev_ops; - dev->watchdog_timeo = TX_TIMEOUT; - - dev->ml_priv = (void *)(dev->mem_start); - - lp = dev->ml_priv; - DEB(DEB_INIT,printk(KERN_DEBUG "%s: lp at 0x%08lx (%zd bytes), " - "lp->scb at 0x%08lx\n", - dev->name, (unsigned long)lp, - sizeof(struct i596_private), (unsigned long)&lp->scb)); - memset((void *) lp, 0, sizeof(struct i596_private)); - -#ifdef __mc68000__ - cache_push(virt_to_phys((void *)(dev->mem_start)), 4096); - cache_clear(virt_to_phys((void *)(dev->mem_start)), 4096); - kernel_set_cachemode((void *)(dev->mem_start), 4096, IOMAP_NOCACHE_SER); -#endif - lp->scb.command = 0; - lp->scb.cmd = I596_NULL; - lp->scb.rfd = I596_NULL; - spin_lock_init(&lp->lock); - - err = register_netdev(dev); - if (err) - goto out2; - return dev; -out2: -#ifdef __mc68000__ - /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING, - * XXX which may be invalid (CONFIG_060_WRITETHROUGH) - */ - kernel_set_cachemode((void *)(dev->mem_start), 4096, - IOMAP_FULL_CACHING); -#endif - free_page ((u32)(dev->mem_start)); -out1: -#ifdef ENABLE_APRICOT - release_region(dev->base_addr, I596_TOTAL_SIZE); -#endif -out: - free_netdev(dev); - return ERR_PTR(err); -} - -static irqreturn_t i596_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct i596_private *lp; - short ioaddr; - unsigned short status, ack_cmd = 0; - int handled = 0; - -#ifdef ENABLE_BVME6000_NET - if (MACH_IS_BVME6000) { - if (*(char *) BVME_LOCAL_IRQ_STAT & BVME_ETHERR) { - i596_error(irq, dev_id); - return IRQ_HANDLED; - } - } -#endif - if (dev == NULL) { - printk(KERN_ERR "i596_interrupt(): irq %d for unknown device.\n", irq); - return IRQ_NONE; - } - - ioaddr = dev->base_addr; - lp = dev->ml_priv; - - spin_lock (&lp->lock); - - wait_cmd(dev,lp,100,"i596 interrupt, timeout"); - status = lp->scb.status; - - DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt, IRQ %d, status %4.4x.\n", - dev->name, irq, status)); - - ack_cmd = status & 0xf000; - - if ((status & 0x8000) || (status & 0x2000)) { - struct i596_cmd *ptr; - - handled = 1; - if ((status & 0x8000)) - DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt completed command.\n", dev->name)); - if ((status & 0x2000)) - DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700)); - - while ((lp->cmd_head != I596_NULL) && (lp->cmd_head->status & STAT_C)) { - ptr = lp->cmd_head; - - DEB(DEB_STATUS,printk(KERN_DEBUG "cmd_head->status = %04x, ->command = %04x\n", - lp->cmd_head->status, lp->cmd_head->command)); - lp->cmd_head = ptr->v_next; - lp->cmd_backlog--; - - switch ((ptr->command) & 0x7) { - case CmdTx: - { - struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr; - struct sk_buff *skb = tx_cmd->skb; - - if ((ptr->status) & STAT_OK) { - DEB(DEB_TXADDR,print_eth(skb->data, "tx-done")); - } else { - dev->stats.tx_errors++; - if ((ptr->status) & 0x0020) - dev->stats.collisions++; - if (!((ptr->status) & 0x0040)) - dev->stats.tx_heartbeat_errors++; - if ((ptr->status) & 0x0400) - dev->stats.tx_carrier_errors++; - if ((ptr->status) & 0x0800) - dev->stats.collisions++; - if ((ptr->status) & 0x1000) - dev->stats.tx_aborted_errors++; - } - - dev_kfree_skb_irq(skb); - - tx_cmd->cmd.command = 0; /* Mark free */ - break; - } - case CmdTDR: - { - unsigned short status = ((struct tdr_cmd *)ptr)->status; - - if (status & 0x8000) { - DEB(DEB_TDR,printk(KERN_INFO "%s: link ok.\n", dev->name)); - } else { - if (status & 0x4000) - printk(KERN_ERR "%s: Transceiver problem.\n", dev->name); - if (status & 0x2000) - printk(KERN_ERR "%s: Termination problem.\n", dev->name); - if (status & 0x1000) - printk(KERN_ERR "%s: Short circuit.\n", dev->name); - - DEB(DEB_TDR,printk(KERN_INFO "%s: Time %d.\n", dev->name, status & 0x07ff)); - } - break; - } - case CmdConfigure: - case CmdMulticastList: - /* Zap command so set_multicast_list() knows it is free */ - ptr->command = 0; - break; - } - ptr->v_next = ptr->b_next = I596_NULL; - lp->last_cmd = jiffies; - } - - ptr = lp->cmd_head; - while ((ptr != I596_NULL) && (ptr != lp->cmd_tail)) { - ptr->command &= 0x1fff; - ptr = ptr->v_next; - } - - if ((lp->cmd_head != I596_NULL)) - ack_cmd |= CUC_START; - lp->scb.cmd = WSWAPcmd(virt_to_bus(&lp->cmd_head->status)); - } - if ((status & 0x1000) || (status & 0x4000)) { - if ((status & 0x4000)) - DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt received a frame.\n", dev->name)); - i596_rx(dev); - /* Only RX_START if stopped - RGH 07-07-96 */ - if (status & 0x1000) { - if (netif_running(dev)) { - DEB(DEB_ERRORS,printk(KERN_ERR "%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status)); - ack_cmd |= RX_START; - dev->stats.rx_errors++; - dev->stats.rx_fifo_errors++; - rebuild_rx_bufs(dev); - } - } - } - wait_cmd(dev,lp,100,"i596 interrupt, timeout"); - lp->scb.command = ack_cmd; - -#ifdef ENABLE_MVME16x_NET - if (MACH_IS_MVME16x) { - /* Ack the interrupt */ - - volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000; - - pcc2[0x2a] |= 0x08; - } -#endif -#ifdef ENABLE_BVME6000_NET - if (MACH_IS_BVME6000) { - volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG; - - *ethirq = 1; - *ethirq = 3; - } -#endif -#ifdef ENABLE_APRICOT - (void) inb(ioaddr + 0x10); - outb(4, ioaddr + 0xf); -#endif - CA(dev); - - DEB(DEB_INTS,printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name)); - - spin_unlock (&lp->lock); - return IRQ_RETVAL(handled); -} - -static int i596_close(struct net_device *dev) -{ - struct i596_private *lp = dev->ml_priv; - unsigned long flags; - - netif_stop_queue(dev); - - DEB(DEB_INIT,printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n", - dev->name, lp->scb.status)); - - spin_lock_irqsave(&lp->lock, flags); - - wait_cmd(dev,lp,100,"close1 timed out"); - lp->scb.command = CUC_ABORT | RX_ABORT; - CA(dev); - - wait_cmd(dev,lp,100,"close2 timed out"); - - spin_unlock_irqrestore(&lp->lock, flags); - DEB(DEB_STRUCT,i596_display_data(dev)); - i596_cleanup_cmd(dev,lp); - -#ifdef ENABLE_MVME16x_NET - if (MACH_IS_MVME16x) { - volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000; - - /* Disable all ints */ - pcc2[0x28] = 1; - pcc2[0x2a] = 0x40; - pcc2[0x2b] = 0x40; /* Set snooping bits now! */ - } -#endif -#ifdef ENABLE_BVME6000_NET - if (MACH_IS_BVME6000) { - volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG; - - *ethirq = 1; - } -#endif - -#ifdef ENABLE_MVME16x_NET - free_irq(0x56, dev); -#endif - free_irq(dev->irq, dev); - remove_rx_bufs(dev); - - return 0; -} - -/* - * Set or clear the multicast filter for this adaptor. - */ - -static void set_multicast_list(struct net_device *dev) -{ - struct i596_private *lp = dev->ml_priv; - int config = 0, cnt; - - DEB(DEB_MULTI,printk(KERN_DEBUG "%s: set multicast list, %d entries, promisc %s, allmulti %s\n", - dev->name, netdev_mc_count(dev), - dev->flags & IFF_PROMISC ? "ON" : "OFF", - dev->flags & IFF_ALLMULTI ? "ON" : "OFF")); - - if (wait_cfg(dev, &lp->cf_cmd.cmd, 1000, "config change request timed out")) - return; - - if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) { - lp->cf_cmd.i596_config[8] |= 0x01; - config = 1; - } - if (!(dev->flags & IFF_PROMISC) && (lp->cf_cmd.i596_config[8] & 0x01)) { - lp->cf_cmd.i596_config[8] &= ~0x01; - config = 1; - } - if ((dev->flags & IFF_ALLMULTI) && (lp->cf_cmd.i596_config[11] & 0x20)) { - lp->cf_cmd.i596_config[11] &= ~0x20; - config = 1; - } - if (!(dev->flags & IFF_ALLMULTI) && !(lp->cf_cmd.i596_config[11] & 0x20)) { - lp->cf_cmd.i596_config[11] |= 0x20; - config = 1; - } - if (config) { - lp->cf_cmd.cmd.command = CmdConfigure; - i596_add_cmd(dev, &lp->cf_cmd.cmd); - } - - cnt = netdev_mc_count(dev); - if (cnt > MAX_MC_CNT) - { - cnt = MAX_MC_CNT; - printk(KERN_ERR "%s: Only %d multicast addresses supported", - dev->name, cnt); - } - - if (!netdev_mc_empty(dev)) { - struct netdev_hw_addr *ha; - unsigned char *cp; - struct mc_cmd *cmd; - - if (wait_cfg(dev, &lp->mc_cmd.cmd, 1000, "multicast list change request timed out")) - return; - cmd = &lp->mc_cmd; - cmd->cmd.command = CmdMulticastList; - cmd->mc_cnt = cnt * ETH_ALEN; - cp = cmd->mc_addrs; - netdev_for_each_mc_addr(ha, dev) { - if (!cnt--) - break; - memcpy(cp, ha->addr, ETH_ALEN); - if (i596_debug > 1) - DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %pM\n", - dev->name, cp)); - cp += ETH_ALEN; - } - i596_add_cmd(dev, &cmd->cmd); - } -} - -#ifdef MODULE -static struct net_device *dev_82596; - -#ifdef ENABLE_APRICOT -module_param(irq, int, 0); -MODULE_PARM_DESC(irq, "Apricot IRQ number"); -#endif - -static int debug = -1; -module_param(debug, int, 0); -MODULE_PARM_DESC(debug, "i82596 debug mask"); - -int __init init_module(void) -{ - if (debug >= 0) - i596_debug = debug; - dev_82596 = i82596_probe(-1); - if (IS_ERR(dev_82596)) - return PTR_ERR(dev_82596); - return 0; -} - -void __exit cleanup_module(void) -{ - unregister_netdev(dev_82596); -#ifdef __mc68000__ - /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING, - * XXX which may be invalid (CONFIG_060_WRITETHROUGH) - */ - - kernel_set_cachemode((void *)(dev_82596->mem_start), 4096, - IOMAP_FULL_CACHING); -#endif - free_page ((u32)(dev_82596->mem_start)); -#ifdef ENABLE_APRICOT - /* If we don't do this, we can't re-insmod it later. */ - release_region(dev_82596->base_addr, I596_TOTAL_SIZE); -#endif - free_netdev(dev_82596); -} - -#endif /* MODULE */ diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 6499186..d7d0b35 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -306,47 +306,6 @@ config MACMACE say Y and read the Ethernet-HOWTO, available from . -config MVME16x_NET - tristate "MVME16x Ethernet support" - depends on MVME16x - help - This is the driver for the Ethernet interface on the Motorola - MVME162, 166, 167, 172 and 177 boards. Say Y here to include the - driver for this chip in your kernel. - To compile this driver as a module, choose M here. - -config BVME6000_NET - tristate "BVME6000 Ethernet support" - depends on BVME6000 - help - This is the driver for the Ethernet interface on BVME4000 and - BVME6000 VME boards. Say Y here to include the driver for this chip - in your kernel. - To compile this driver as a module, choose M here. - -config SUN3_82586 - bool "Sun3 on-board Intel 82586 support" - depends on SUN3 - help - This driver enables support for the on-board Intel 82586 based - Ethernet adapter found on Sun 3/1xx and 3/2xx motherboards. Note - that this driver does not support 82586-based adapters on additional - VME boards. - -config LASI_82596 - tristate "Lasi ethernet" - depends on GSC - help - Say Y here to support the builtin Intel 82596 ethernet controller - found in Hewlett-Packard PA-RISC machines with 10Mbit ethernet. - -config SNI_82596 - tristate "SNI RM ethernet" - depends on NET_ETHERNET && SNI_RM - help - Say Y here to support the on-board Intel 82596 ethernet controller - built into SNI RM machines. - config KORINA tristate "Korina (IDT RC32434) Ethernet support" depends on NET_ETHERNET && MIKROTIK_RB532 @@ -462,63 +421,6 @@ config SUNVNET help Support for virtual network devices under Sun Logical Domains. -config EL2 - tristate "3c503 \"EtherLink II\" support" - depends on ISA - select CRC32 - ---help--- - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - . - - To compile this driver as a module, choose M here. The module - will be called 3c503. - -config ELPLUS - tristate "3c505 \"EtherLink Plus\" support" - depends on ISA && ISA_DMA_API - ---help--- - Information about this network (Ethernet) card can be found in - . If you have a card of - this type, say Y and read the Ethernet-HOWTO, available from - . - - To compile this driver as a module, choose M here. The module - will be called 3c505. - -config EL16 - tristate "3c507 \"EtherLink 16\" support (EXPERIMENTAL)" - depends on ISA && EXPERIMENTAL - ---help--- - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - . - - To compile this driver as a module, choose M here. The module - will be called 3c507. - -config ELMC - tristate "3c523 \"EtherLink/MC\" support" - depends on MCA_LEGACY - ---help--- - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - . - - To compile this driver as a module, choose M here. The module - will be called 3c523. - -config ELMC_II - tristate "3c527 \"EtherLink/MC 32\" support (EXPERIMENTAL)" - depends on MCA && MCA_LEGACY - ---help--- - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - . - - To compile this driver as a module, choose M here. The module - will be called 3c527. - config BFIN_MAC tristate "Blackfin on-chip MAC support" depends on NET_ETHERNET && (BF516 || BF518 || BF526 || BF527 || BF536 || BF537) @@ -684,7 +586,7 @@ config NET_VENDOR_RACAL depends on ISA help If you have a network (Ethernet) card belonging to this class, such - as the NI5010, NI5210 or NI6210, say Y and read the Ethernet-HOWTO, + as the NI5010, say Y and read the Ethernet-HOWTO, available from . Note that the answer to this question doesn't directly affect the @@ -704,17 +606,6 @@ config NI5010 To compile this driver as a module, choose M here. The module will be called ni5010. -config NI52 - tristate "NI5210 support" - depends on NET_VENDOR_RACAL && ISA - help - If you have a network (Ethernet) card of this type, say Y and read - the Ethernet-HOWTO, available from - . - - To compile this driver as a module, choose M here. The module - will be called ni52. - config DNET tristate "Dave ethernet support (DNET)" depends on NET_ETHERNET && HAS_IOMEM @@ -782,41 +673,6 @@ config EWRK3 To compile this driver as a module, choose M here. The module will be called ewrk3. -config EEXPRESS - tristate "EtherExpress 16 support" - depends on NET_ISA - ---help--- - If you have an EtherExpress16 network (Ethernet) card, say Y and - read the Ethernet-HOWTO, available from - . Note that the Intel - EtherExpress16 card used to be regarded as a very poor choice - because the driver was very unreliable. We now have a new driver - that should do better. - - To compile this driver as a module, choose M here. The module - will be called eexpress. - -config EEXPRESS_PRO - tristate "EtherExpressPro support/EtherExpress 10 (i82595) support" - depends on NET_ISA - ---help--- - If you have a network (Ethernet) card of this type, say Y. This - driver supports Intel i82595{FX,TX} based boards. Note however - that the EtherExpress PRO/100 Ethernet card has its own separate - driver. Please read the Ethernet-HOWTO, available from - . - - To compile this driver as a module, choose M here. The module - will be called eepro. - -config LP486E - tristate "LP486E on board Ethernet" - depends on NET_ISA - help - Say Y here to support the 82596-based on-board Ethernet controller - for the Panther motherboard, which is one of the two shipped in the - Intel Professional Workstation. - config ETH16I tristate "ICL EtherTeam 16i/32 support" depends on NET_ISA @@ -828,16 +684,6 @@ config ETH16I To compile this driver as a module, choose M here. The module will be called eth16i. -config ZNET - tristate "Zenith Z-Note support (EXPERIMENTAL)" - depends on NET_ISA && EXPERIMENTAL && ISA_DMA_API - help - The Zenith Z-Note notebook computer has a built-in network - (Ethernet) card, and this is the Linux driver for it. Note that the - IBM Thinkpad 300 is compatible with the Z-Note and is also supported - by this driver. Read the Ethernet-HOWTO, available from - . - config SEEQ8005 tristate "SEEQ8005 support (EXPERIMENTAL)" depends on NET_ISA && EXPERIMENTAL @@ -915,17 +761,6 @@ config KSZ884X_PCI To compile this driver as a module, choose M here. The module will be called ksz884x. -config APRICOT - tristate "Apricot Xen-II on board Ethernet" - depends on NET_PCI && ISA - help - If you have a network (Ethernet) controller of this type, say Y and - read the Ethernet-HOWTO, available from - . - - To compile this driver as a module, choose M here. The module - will be called apricot. - config FORCEDETH tristate "nForce Ethernet support" depends on NET_PCI && PCI diff --git a/drivers/net/Makefile b/drivers/net/Makefile index e74b424..49b3e87 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -130,36 +130,19 @@ obj-$(CONFIG_MACVLAN) += macvlan.o obj-$(CONFIG_MACVTAP) += macvtap.o obj-$(CONFIG_DE600) += de600.o obj-$(CONFIG_DE620) += de620.o -obj-$(CONFIG_SUN3_82586) += sun3_82586.o obj-$(CONFIG_DEFXX) += defxx.o obj-$(CONFIG_SGISEEQ) += sgiseeq.o obj-$(CONFIG_SGI_O2MACE_ETH) += meth.o obj-$(CONFIG_AT1700) += at1700.o -obj-$(CONFIG_EL16) += 3c507.o -obj-$(CONFIG_ELMC) += 3c523.o obj-$(CONFIG_IBMLANA) += ibmlana.o -obj-$(CONFIG_ELMC_II) += 3c527.o -obj-$(CONFIG_EEXPRESS) += eexpress.o -obj-$(CONFIG_EEXPRESS_PRO) += eepro.o obj-$(CONFIG_8139CP) += 8139cp.o obj-$(CONFIG_8139TOO) += 8139too.o -obj-$(CONFIG_ZNET) += znet.o obj-$(CONFIG_CPMAC) += cpmac.o obj-$(CONFIG_EWRK3) += ewrk3.o obj-$(CONFIG_ATP) += atp.o obj-$(CONFIG_NI5010) += ni5010.o -obj-$(CONFIG_NI52) += ni52.o -obj-$(CONFIG_ELPLUS) += 3c505.o -obj-$(CONFIG_APRICOT) += 82596.o -obj-$(CONFIG_LASI_82596) += lasi_82596.o -obj-$(CONFIG_SNI_82596) += sni_82596.o -obj-$(CONFIG_MVME16x_NET) += 82596.o -obj-$(CONFIG_BVME6000_NET) += 82596.o obj-$(CONFIG_SC92031) += sc92031.o -# This is also a 82596 and should probably be merged -obj-$(CONFIG_LP486E) += lp486e.o - obj-$(CONFIG_ETH16I) += eth16i.o obj-$(CONFIG_EQUALIZER) += eql.o obj-$(CONFIG_KORINA) += korina.o diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig index 715bf2a..7848b5f 100644 --- a/drivers/net/arm/Kconfig +++ b/drivers/net/arm/Kconfig @@ -3,13 +3,6 @@ # These are for Acorn's Expansion card network interfaces # -config ARM_ETHER1 - tristate "Acorn Ether1 support" - depends on ARM && ARCH_ACORN - help - If you have an Acorn system with one of these (AKA25) network cards, - you should say Y to this option if you wish to use it with Linux. - config ARM_ETHER3 tristate "Acorn/ANT Ether3 support" depends on ARM && ARCH_ACORN diff --git a/drivers/net/arm/Makefile b/drivers/net/arm/Makefile index f1e6150..6cca728b 100644 --- a/drivers/net/arm/Makefile +++ b/drivers/net/arm/Makefile @@ -4,7 +4,6 @@ # obj-$(CONFIG_ARM_ETHER3) += ether3.o -obj-$(CONFIG_ARM_ETHER1) += ether1.o obj-$(CONFIG_ARM_AT91_ETHER) += at91_ether.o obj-$(CONFIG_ARM_KS8695_ETHER) += ks8695net.o obj-$(CONFIG_EP93XX_ETH) += ep93xx_eth.o diff --git a/drivers/net/arm/ether1.c b/drivers/net/arm/ether1.c deleted file mode 100644 index b00781c..0000000 --- a/drivers/net/arm/ether1.c +++ /dev/null @@ -1,1094 +0,0 @@ -/* - * linux/drivers/acorn/net/ether1.c - * - * Copyright (C) 1996-2000 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Acorn ether1 driver (82586 chip) for Acorn machines - * - * We basically keep two queues in the cards memory - one for transmit - * and one for receive. Each has a head and a tail. The head is where - * we/the chip adds packets to be transmitted/received, and the tail - * is where the transmitter has got to/where the receiver will stop. - * Both of these queues are circular, and since the chip is running - * all the time, we have to be careful when we modify the pointers etc - * so that the buffer memory contents is valid all the time. - * - * Change log: - * 1.00 RMK Released - * 1.01 RMK 19/03/1996 Transfers the last odd byte onto/off of the card now. - * 1.02 RMK 25/05/1997 Added code to restart RU if it goes not ready - * 1.03 RMK 14/09/1997 Cleaned up the handling of a reset during the TX interrupt. - * Should prevent lockup. - * 1.04 RMK 17/09/1997 Added more info when initialsation of chip goes wrong. - * TDR now only reports failure when chip reports non-zero - * TDR time-distance. - * 1.05 RMK 31/12/1997 Removed calls to dev_tint for 2.1 - * 1.06 RMK 10/02/2000 Updated for 2.3.43 - * 1.07 RMK 13/05/2000 Updated for 2.3.99-pre8 - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#define __ETHER1_C -#include "ether1.h" - -static unsigned int net_debug = NET_DEBUG; - -#define BUFFER_SIZE 0x10000 -#define TX_AREA_START 0x00100 -#define TX_AREA_END 0x05000 -#define RX_AREA_START 0x05000 -#define RX_AREA_END 0x0fc00 - -static int ether1_open(struct net_device *dev); -static int ether1_sendpacket(struct sk_buff *skb, struct net_device *dev); -static irqreturn_t ether1_interrupt(int irq, void *dev_id); -static int ether1_close(struct net_device *dev); -static void ether1_setmulticastlist(struct net_device *dev); -static void ether1_timeout(struct net_device *dev); - -/* ------------------------------------------------------------------------- */ - -static char version[] __devinitdata = "ether1 ethernet driver (c) 2000 Russell King v1.07\n"; - -#define BUS_16 16 -#define BUS_8 8 - -/* ------------------------------------------------------------------------- */ - -#define DISABLEIRQS 1 -#define NORMALIRQS 0 - -#define ether1_readw(dev, addr, type, offset, svflgs) ether1_inw_p (dev, addr + (int)(&((type *)0)->offset), svflgs) -#define ether1_writew(dev, val, addr, type, offset, svflgs) ether1_outw_p (dev, val, addr + (int)(&((type *)0)->offset), svflgs) - -static inline unsigned short -ether1_inw_p (struct net_device *dev, int addr, int svflgs) -{ - unsigned long flags; - unsigned short ret; - - if (svflgs) - local_irq_save (flags); - - writeb(addr >> 12, REG_PAGE); - ret = readw(ETHER1_RAM + ((addr & 4095) << 1)); - if (svflgs) - local_irq_restore (flags); - return ret; -} - -static inline void -ether1_outw_p (struct net_device *dev, unsigned short val, int addr, int svflgs) -{ - unsigned long flags; - - if (svflgs) - local_irq_save (flags); - - writeb(addr >> 12, REG_PAGE); - writew(val, ETHER1_RAM + ((addr & 4095) << 1)); - if (svflgs) - local_irq_restore (flags); -} - -/* - * Some inline assembler to allow fast transfers on to/off of the card. - * Since this driver depends on some features presented by the ARM - * specific architecture, and that you can't configure this driver - * without specifiing ARM mode, this is not a problem. - * - * This routine is essentially an optimised memcpy from the card's - * onboard RAM to kernel memory. - */ -static void -ether1_writebuffer (struct net_device *dev, void *data, unsigned int start, unsigned int length) -{ - unsigned int page, thislen, offset; - void __iomem *addr; - - offset = start & 4095; - page = start >> 12; - addr = ETHER1_RAM + (offset << 1); - - if (offset + length > 4096) - thislen = 4096 - offset; - else - thislen = length; - - do { - int used; - - writeb(page, REG_PAGE); - length -= thislen; - - __asm__ __volatile__( - "subs %3, %3, #2\n\ - bmi 2f\n\ -1: ldr %0, [%1], #2\n\ - mov %0, %0, lsl #16\n\ - orr %0, %0, %0, lsr #16\n\ - str %0, [%2], #4\n\ - subs %3, %3, #2\n\ - bmi 2f\n\ - ldr %0, [%1], #2\n\ - mov %0, %0, lsl #16\n\ - orr %0, %0, %0, lsr #16\n\ - str %0, [%2], #4\n\ - subs %3, %3, #2\n\ - bmi 2f\n\ - ldr %0, [%1], #2\n\ - mov %0, %0, lsl #16\n\ - orr %0, %0, %0, lsr #16\n\ - str %0, [%2], #4\n\ - subs %3, %3, #2\n\ - bmi 2f\n\ - ldr %0, [%1], #2\n\ - mov %0, %0, lsl #16\n\ - orr %0, %0, %0, lsr #16\n\ - str %0, [%2], #4\n\ - subs %3, %3, #2\n\ - bpl 1b\n\ -2: adds %3, %3, #1\n\ - ldreqb %0, [%1]\n\ - streqb %0, [%2]" - : "=&r" (used), "=&r" (data) - : "r" (addr), "r" (thislen), "1" (data)); - - addr = ETHER1_RAM; - - thislen = length; - if (thislen > 4096) - thislen = 4096; - page++; - } while (thislen); -} - -static void -ether1_readbuffer (struct net_device *dev, void *data, unsigned int start, unsigned int length) -{ - unsigned int page, thislen, offset; - void __iomem *addr; - - offset = start & 4095; - page = start >> 12; - addr = ETHER1_RAM + (offset << 1); - - if (offset + length > 4096) - thislen = 4096 - offset; - else - thislen = length; - - do { - int used; - - writeb(page, REG_PAGE); - length -= thislen; - - __asm__ __volatile__( - "subs %3, %3, #2\n\ - bmi 2f\n\ -1: ldr %0, [%2], #4\n\ - strb %0, [%1], #1\n\ - mov %0, %0, lsr #8\n\ - strb %0, [%1], #1\n\ - subs %3, %3, #2\n\ - bmi 2f\n\ - ldr %0, [%2], #4\n\ - strb %0, [%1], #1\n\ - mov %0, %0, lsr #8\n\ - strb %0, [%1], #1\n\ - subs %3, %3, #2\n\ - bmi 2f\n\ - ldr %0, [%2], #4\n\ - strb %0, [%1], #1\n\ - mov %0, %0, lsr #8\n\ - strb %0, [%1], #1\n\ - subs %3, %3, #2\n\ - bmi 2f\n\ - ldr %0, [%2], #4\n\ - strb %0, [%1], #1\n\ - mov %0, %0, lsr #8\n\ - strb %0, [%1], #1\n\ - subs %3, %3, #2\n\ - bpl 1b\n\ -2: adds %3, %3, #1\n\ - ldreqb %0, [%2]\n\ - streqb %0, [%1]" - : "=&r" (used), "=&r" (data) - : "r" (addr), "r" (thislen), "1" (data)); - - addr = ETHER1_RAM; - - thislen = length; - if (thislen > 4096) - thislen = 4096; - page++; - } while (thislen); -} - -static int __devinit -ether1_ramtest(struct net_device *dev, unsigned char byte) -{ - unsigned char *buffer = kmalloc (BUFFER_SIZE, GFP_KERNEL); - int i, ret = BUFFER_SIZE; - int max_errors = 15; - int bad = -1; - int bad_start = 0; - - if (!buffer) - return 1; - - memset (buffer, byte, BUFFER_SIZE); - ether1_writebuffer (dev, buffer, 0, BUFFER_SIZE); - memset (buffer, byte ^ 0xff, BUFFER_SIZE); - ether1_readbuffer (dev, buffer, 0, BUFFER_SIZE); - - for (i = 0; i < BUFFER_SIZE; i++) { - if (buffer[i] != byte) { - if (max_errors >= 0 && bad != buffer[i]) { - if (bad != -1) - printk ("\n"); - printk (KERN_CRIT "%s: RAM failed with (%02X instead of %02X) at 0x%04X", - dev->name, buffer[i], byte, i); - ret = -ENODEV; - max_errors --; - bad = buffer[i]; - bad_start = i; - } - } else { - if (bad != -1) { - if (bad_start == i - 1) - printk ("\n"); - else - printk (" - 0x%04X\n", i - 1); - bad = -1; - } - } - } - - if (bad != -1) - printk (" - 0x%04X\n", BUFFER_SIZE); - kfree (buffer); - - return ret; -} - -static int -ether1_reset (struct net_device *dev) -{ - writeb(CTRL_RST|CTRL_ACK, REG_CONTROL); - return BUS_16; -} - -static int __devinit -ether1_init_2(struct net_device *dev) -{ - int i; - dev->mem_start = 0; - - i = ether1_ramtest (dev, 0x5a); - - if (i > 0) - i = ether1_ramtest (dev, 0x1e); - - if (i <= 0) - return -ENODEV; - - dev->mem_end = i; - return 0; -} - -/* - * These are the structures that are loaded into the ether RAM card to - * initialise the 82586 - */ - -/* at 0x0100 */ -#define NOP_ADDR (TX_AREA_START) -#define NOP_SIZE (0x06) -static nop_t init_nop = { - 0, - CMD_NOP, - NOP_ADDR -}; - -/* at 0x003a */ -#define TDR_ADDR (0x003a) -#define TDR_SIZE (0x08) -static tdr_t init_tdr = { - 0, - CMD_TDR | CMD_INTR, - NOP_ADDR, - 0 -}; - -/* at 0x002e */ -#define MC_ADDR (0x002e) -#define MC_SIZE (0x0c) -static mc_t init_mc = { - 0, - CMD_SETMULTICAST, - TDR_ADDR, - 0, - { { 0, } } -}; - -/* at 0x0022 */ -#define SA_ADDR (0x0022) -#define SA_SIZE (0x0c) -static sa_t init_sa = { - 0, - CMD_SETADDRESS, - MC_ADDR, - { 0, } -}; - -/* at 0x0010 */ -#define CFG_ADDR (0x0010) -#define CFG_SIZE (0x12) -static cfg_t init_cfg = { - 0, - CMD_CONFIG, - SA_ADDR, - 8, - 8, - CFG8_SRDY, - CFG9_PREAMB8 | CFG9_ADDRLENBUF | CFG9_ADDRLEN(6), - 0, - 0x60, - 0, - CFG13_RETRY(15) | CFG13_SLOTH(2), - 0, -}; - -/* at 0x0000 */ -#define SCB_ADDR (0x0000) -#define SCB_SIZE (0x10) -static scb_t init_scb = { - 0, - SCB_CMDACKRNR | SCB_CMDACKCNA | SCB_CMDACKFR | SCB_CMDACKCX, - CFG_ADDR, - RX_AREA_START, - 0, - 0, - 0, - 0 -}; - -/* at 0xffee */ -#define ISCP_ADDR (0xffee) -#define ISCP_SIZE (0x08) -static iscp_t init_iscp = { - 1, - SCB_ADDR, - 0x0000, - 0x0000 -}; - -/* at 0xfff6 */ -#define SCP_ADDR (0xfff6) -#define SCP_SIZE (0x0a) -static scp_t init_scp = { - SCP_SY_16BBUS, - { 0, 0 }, - ISCP_ADDR, - 0 -}; - -#define RFD_SIZE (0x16) -static rfd_t init_rfd = { - 0, - 0, - 0, - 0, - { 0, }, - { 0, }, - 0 -}; - -#define RBD_SIZE (0x0a) -static rbd_t init_rbd = { - 0, - 0, - 0, - 0, - ETH_FRAME_LEN + 8 -}; - -#define TX_SIZE (0x08) -#define TBD_SIZE (0x08) - -static int -ether1_init_for_open (struct net_device *dev) -{ - int i, status, addr, next, next2; - int failures = 0; - unsigned long timeout; - - writeb(CTRL_RST|CTRL_ACK, REG_CONTROL); - - for (i = 0; i < 6; i++) - init_sa.sa_addr[i] = dev->dev_addr[i]; - - /* load data structures into ether1 RAM */ - ether1_writebuffer (dev, &init_scp, SCP_ADDR, SCP_SIZE); - ether1_writebuffer (dev, &init_iscp, ISCP_ADDR, ISCP_SIZE); - ether1_writebuffer (dev, &init_scb, SCB_ADDR, SCB_SIZE); - ether1_writebuffer (dev, &init_cfg, CFG_ADDR, CFG_SIZE); - ether1_writebuffer (dev, &init_sa, SA_ADDR, SA_SIZE); - ether1_writebuffer (dev, &init_mc, MC_ADDR, MC_SIZE); - ether1_writebuffer (dev, &init_tdr, TDR_ADDR, TDR_SIZE); - ether1_writebuffer (dev, &init_nop, NOP_ADDR, NOP_SIZE); - - if (ether1_readw(dev, CFG_ADDR, cfg_t, cfg_command, NORMALIRQS) != CMD_CONFIG) { - printk (KERN_ERR "%s: detected either RAM fault or compiler bug\n", - dev->name); - return 1; - } - - /* - * setup circularly linked list of { rfd, rbd, buffer }, with - * all rfds circularly linked, rbds circularly linked. - * First rfd is linked to scp, first rbd is linked to first - * rfd. Last rbd has a suspend command. - */ - addr = RX_AREA_START; - do { - next = addr + RFD_SIZE + RBD_SIZE + ETH_FRAME_LEN + 10; - next2 = next + RFD_SIZE + RBD_SIZE + ETH_FRAME_LEN + 10; - - if (next2 >= RX_AREA_END) { - next = RX_AREA_START; - init_rfd.rfd_command = RFD_CMDEL | RFD_CMDSUSPEND; - priv(dev)->rx_tail = addr; - } else - init_rfd.rfd_command = 0; - if (addr == RX_AREA_START) - init_rfd.rfd_rbdoffset = addr + RFD_SIZE; - else - init_rfd.rfd_rbdoffset = 0; - init_rfd.rfd_link = next; - init_rbd.rbd_link = next + RFD_SIZE; - init_rbd.rbd_bufl = addr + RFD_SIZE + RBD_SIZE; - - ether1_writebuffer (dev, &init_rfd, addr, RFD_SIZE); - ether1_writebuffer (dev, &init_rbd, addr + RFD_SIZE, RBD_SIZE); - addr = next; - } while (next2 < RX_AREA_END); - - priv(dev)->tx_link = NOP_ADDR; - priv(dev)->tx_head = NOP_ADDR + NOP_SIZE; - priv(dev)->tx_tail = TDR_ADDR; - priv(dev)->rx_head = RX_AREA_START; - - /* release reset & give 586 a prod */ - priv(dev)->resetting = 1; - priv(dev)->initialising = 1; - writeb(CTRL_RST, REG_CONTROL); - writeb(0, REG_CONTROL); - writeb(CTRL_CA, REG_CONTROL); - - /* 586 should now unset iscp.busy */ - timeout = jiffies + HZ/2; - while (ether1_readw(dev, ISCP_ADDR, iscp_t, iscp_busy, DISABLEIRQS) == 1) { - if (time_after(jiffies, timeout)) { - printk (KERN_WARNING "%s: can't initialise 82586: iscp is busy\n", dev->name); - return 1; - } - } - - /* check status of commands that we issued */ - timeout += HZ/10; - while (((status = ether1_readw(dev, CFG_ADDR, cfg_t, cfg_status, DISABLEIRQS)) - & STAT_COMPLETE) == 0) { - if (time_after(jiffies, timeout)) - break; - } - - if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) { - printk (KERN_WARNING "%s: can't initialise 82586: config status %04X\n", dev->name, status); - printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name, - ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS), - ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS), - ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS), - ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS)); - failures += 1; - } - - timeout += HZ/10; - while (((status = ether1_readw(dev, SA_ADDR, sa_t, sa_status, DISABLEIRQS)) - & STAT_COMPLETE) == 0) { - if (time_after(jiffies, timeout)) - break; - } - - if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) { - printk (KERN_WARNING "%s: can't initialise 82586: set address status %04X\n", dev->name, status); - printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name, - ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS), - ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS), - ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS), - ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS)); - failures += 1; - } - - timeout += HZ/10; - while (((status = ether1_readw(dev, MC_ADDR, mc_t, mc_status, DISABLEIRQS)) - & STAT_COMPLETE) == 0) { - if (time_after(jiffies, timeout)) - break; - } - - if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) { - printk (KERN_WARNING "%s: can't initialise 82586: set multicast status %04X\n", dev->name, status); - printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name, - ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS), - ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS), - ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS), - ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS)); - failures += 1; - } - - timeout += HZ; - while (((status = ether1_readw(dev, TDR_ADDR, tdr_t, tdr_status, DISABLEIRQS)) - & STAT_COMPLETE) == 0) { - if (time_after(jiffies, timeout)) - break; - } - - if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) { - printk (KERN_WARNING "%s: can't tdr (ignored)\n", dev->name); - printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name, - ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS), - ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS), - ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS), - ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS)); - } else { - status = ether1_readw(dev, TDR_ADDR, tdr_t, tdr_result, DISABLEIRQS); - if (status & TDR_XCVRPROB) - printk (KERN_WARNING "%s: i/f failed tdr: transceiver problem\n", dev->name); - else if ((status & (TDR_SHORT|TDR_OPEN)) && (status & TDR_TIME)) { -#ifdef FANCY - printk (KERN_WARNING "%s: i/f failed tdr: cable %s %d.%d us away\n", dev->name, - status & TDR_SHORT ? "short" : "open", (status & TDR_TIME) / 10, - (status & TDR_TIME) % 10); -#else - printk (KERN_WARNING "%s: i/f failed tdr: cable %s %d clks away\n", dev->name, - status & TDR_SHORT ? "short" : "open", (status & TDR_TIME)); -#endif - } - } - - if (failures) - ether1_reset (dev); - return failures ? 1 : 0; -} - -/* ------------------------------------------------------------------------- */ - -static int -ether1_txalloc (struct net_device *dev, int size) -{ - int start, tail; - - size = (size + 1) & ~1; - tail = priv(dev)->tx_tail; - - if (priv(dev)->tx_head + size > TX_AREA_END) { - if (tail > priv(dev)->tx_head) - return -1; - start = TX_AREA_START; - if (start + size > tail) - return -1; - priv(dev)->tx_head = start + size; - } else { - if (priv(dev)->tx_head < tail && (priv(dev)->tx_head + size) > tail) - return -1; - start = priv(dev)->tx_head; - priv(dev)->tx_head += size; - } - - return start; -} - -static int -ether1_open (struct net_device *dev) -{ - if (!is_valid_ether_addr(dev->dev_addr)) { - printk(KERN_WARNING "%s: invalid ethernet MAC address\n", - dev->name); - return -EINVAL; - } - - if (request_irq(dev->irq, ether1_interrupt, 0, "ether1", dev)) - return -EAGAIN; - - if (ether1_init_for_open (dev)) { - free_irq (dev->irq, dev); - return -EAGAIN; - } - - netif_start_queue(dev); - - return 0; -} - -static void -ether1_timeout(struct net_device *dev) -{ - printk(KERN_WARNING "%s: transmit timeout, network cable problem?\n", - dev->name); - printk(KERN_WARNING "%s: resetting device\n", dev->name); - - ether1_reset (dev); - - if (ether1_init_for_open (dev)) - printk (KERN_ERR "%s: unable to restart interface\n", dev->name); - - dev->stats.tx_errors++; - netif_wake_queue(dev); -} - -static int -ether1_sendpacket (struct sk_buff *skb, struct net_device *dev) -{ - int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr; - unsigned long flags; - tx_t tx; - tbd_t tbd; - nop_t nop; - - if (priv(dev)->restart) { - printk(KERN_WARNING "%s: resetting device\n", dev->name); - - ether1_reset(dev); - - if (ether1_init_for_open(dev)) - printk(KERN_ERR "%s: unable to restart interface\n", dev->name); - else - priv(dev)->restart = 0; - } - - if (skb->len < ETH_ZLEN) { - if (skb_padto(skb, ETH_ZLEN)) - goto out; - } - - /* - * insert packet followed by a nop - */ - txaddr = ether1_txalloc (dev, TX_SIZE); - tbdaddr = ether1_txalloc (dev, TBD_SIZE); - dataddr = ether1_txalloc (dev, skb->len); - nopaddr = ether1_txalloc (dev, NOP_SIZE); - - tx.tx_status = 0; - tx.tx_command = CMD_TX | CMD_INTR; - tx.tx_link = nopaddr; - tx.tx_tbdoffset = tbdaddr; - tbd.tbd_opts = TBD_EOL | skb->len; - tbd.tbd_link = I82586_NULL; - tbd.tbd_bufl = dataddr; - tbd.tbd_bufh = 0; - nop.nop_status = 0; - nop.nop_command = CMD_NOP; - nop.nop_link = nopaddr; - - local_irq_save(flags); - ether1_writebuffer (dev, &tx, txaddr, TX_SIZE); - ether1_writebuffer (dev, &tbd, tbdaddr, TBD_SIZE); - ether1_writebuffer (dev, skb->data, dataddr, skb->len); - ether1_writebuffer (dev, &nop, nopaddr, NOP_SIZE); - tmp = priv(dev)->tx_link; - priv(dev)->tx_link = nopaddr; - - /* now reset the previous nop pointer */ - ether1_writew(dev, txaddr, tmp, nop_t, nop_link, NORMALIRQS); - - local_irq_restore(flags); - - /* handle transmit */ - - /* check to see if we have room for a full sized ether frame */ - tmp = priv(dev)->tx_head; - tst = ether1_txalloc (dev, TX_SIZE + TBD_SIZE + NOP_SIZE + ETH_FRAME_LEN); - priv(dev)->tx_head = tmp; - dev_kfree_skb (skb); - - if (tst == -1) - netif_stop_queue(dev); - - out: - return NETDEV_TX_OK; -} - -static void -ether1_xmit_done (struct net_device *dev) -{ - nop_t nop; - int caddr, tst; - - caddr = priv(dev)->tx_tail; - -again: - ether1_readbuffer (dev, &nop, caddr, NOP_SIZE); - - switch (nop.nop_command & CMD_MASK) { - case CMD_TDR: - /* special case */ - if (ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS) - != (unsigned short)I82586_NULL) { - ether1_writew(dev, SCB_CMDCUCSTART | SCB_CMDRXSTART, SCB_ADDR, scb_t, - scb_command, NORMALIRQS); - writeb(CTRL_CA, REG_CONTROL); - } - priv(dev)->tx_tail = NOP_ADDR; - return; - - case CMD_NOP: - if (nop.nop_link == caddr) { - if (priv(dev)->initialising == 0) - printk (KERN_WARNING "%s: strange command complete with no tx command!\n", dev->name); - else - priv(dev)->initialising = 0; - return; - } - if (caddr == nop.nop_link) - return; - caddr = nop.nop_link; - goto again; - - case CMD_TX: - if (nop.nop_status & STAT_COMPLETE) - break; - printk (KERN_ERR "%s: strange command complete without completed command\n", dev->name); - priv(dev)->restart = 1; - return; - - default: - printk (KERN_WARNING "%s: strange command %d complete! (offset %04X)", dev->name, - nop.nop_command & CMD_MASK, caddr); - priv(dev)->restart = 1; - return; - } - - while (nop.nop_status & STAT_COMPLETE) { - if (nop.nop_status & STAT_OK) { - dev->stats.tx_packets++; - dev->stats.collisions += (nop.nop_status & STAT_COLLISIONS); - } else { - dev->stats.tx_errors++; - - if (nop.nop_status & STAT_COLLAFTERTX) - dev->stats.collisions++; - if (nop.nop_status & STAT_NOCARRIER) - dev->stats.tx_carrier_errors++; - if (nop.nop_status & STAT_TXLOSTCTS) - printk (KERN_WARNING "%s: cts lost\n", dev->name); - if (nop.nop_status & STAT_TXSLOWDMA) - dev->stats.tx_fifo_errors++; - if (nop.nop_status & STAT_COLLEXCESSIVE) - dev->stats.collisions += 16; - } - - if (nop.nop_link == caddr) { - printk (KERN_ERR "%s: tx buffer chaining error: tx command points to itself\n", dev->name); - break; - } - - caddr = nop.nop_link; - ether1_readbuffer (dev, &nop, caddr, NOP_SIZE); - if ((nop.nop_command & CMD_MASK) != CMD_NOP) { - printk (KERN_ERR "%s: tx buffer chaining error: no nop after tx command\n", dev->name); - break; - } - - if (caddr == nop.nop_link) - break; - - caddr = nop.nop_link; - ether1_readbuffer (dev, &nop, caddr, NOP_SIZE); - if ((nop.nop_command & CMD_MASK) != CMD_TX) { - printk (KERN_ERR "%s: tx buffer chaining error: no tx command after nop\n", dev->name); - break; - } - } - priv(dev)->tx_tail = caddr; - - caddr = priv(dev)->tx_head; - tst = ether1_txalloc (dev, TX_SIZE + TBD_SIZE + NOP_SIZE + ETH_FRAME_LEN); - priv(dev)->tx_head = caddr; - if (tst != -1) - netif_wake_queue(dev); -} - -static void -ether1_recv_done (struct net_device *dev) -{ - int status; - int nexttail, rbdaddr; - rbd_t rbd; - - do { - status = ether1_readw(dev, priv(dev)->rx_head, rfd_t, rfd_status, NORMALIRQS); - if ((status & RFD_COMPLETE) == 0) - break; - - rbdaddr = ether1_readw(dev, priv(dev)->rx_head, rfd_t, rfd_rbdoffset, NORMALIRQS); - ether1_readbuffer (dev, &rbd, rbdaddr, RBD_SIZE); - - if ((rbd.rbd_status & (RBD_EOF | RBD_ACNTVALID)) == (RBD_EOF | RBD_ACNTVALID)) { - int length = rbd.rbd_status & RBD_ACNT; - struct sk_buff *skb; - - length = (length + 1) & ~1; - skb = dev_alloc_skb (length + 2); - - if (skb) { - skb_reserve (skb, 2); - - ether1_readbuffer (dev, skb_put (skb, length), rbd.rbd_bufl, length); - - skb->protocol = eth_type_trans (skb, dev); - netif_rx (skb); - dev->stats.rx_packets++; - } else - dev->stats.rx_dropped++; - } else { - printk(KERN_WARNING "%s: %s\n", dev->name, - (rbd.rbd_status & RBD_EOF) ? "oversized packet" : "acnt not valid"); - dev->stats.rx_dropped++; - } - - nexttail = ether1_readw(dev, priv(dev)->rx_tail, rfd_t, rfd_link, NORMALIRQS); - /* nexttail should be rx_head */ - if (nexttail != priv(dev)->rx_head) - printk(KERN_ERR "%s: receiver buffer chaining error (%04X != %04X)\n", - dev->name, nexttail, priv(dev)->rx_head); - ether1_writew(dev, RFD_CMDEL | RFD_CMDSUSPEND, nexttail, rfd_t, rfd_command, NORMALIRQS); - ether1_writew(dev, 0, priv(dev)->rx_tail, rfd_t, rfd_command, NORMALIRQS); - ether1_writew(dev, 0, priv(dev)->rx_tail, rfd_t, rfd_status, NORMALIRQS); - ether1_writew(dev, 0, priv(dev)->rx_tail, rfd_t, rfd_rbdoffset, NORMALIRQS); - - priv(dev)->rx_tail = nexttail; - priv(dev)->rx_head = ether1_readw(dev, priv(dev)->rx_head, rfd_t, rfd_link, NORMALIRQS); - } while (1); -} - -static irqreturn_t -ether1_interrupt (int irq, void *dev_id) -{ - struct net_device *dev = (struct net_device *)dev_id; - int status; - - status = ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS); - - if (status) { - ether1_writew(dev, status & (SCB_STRNR | SCB_STCNA | SCB_STFR | SCB_STCX), - SCB_ADDR, scb_t, scb_command, NORMALIRQS); - writeb(CTRL_CA | CTRL_ACK, REG_CONTROL); - if (status & SCB_STCX) { - ether1_xmit_done (dev); - } - if (status & SCB_STCNA) { - if (priv(dev)->resetting == 0) - printk (KERN_WARNING "%s: CU went not ready ???\n", dev->name); - else - priv(dev)->resetting += 1; - if (ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS) - != (unsigned short)I82586_NULL) { - ether1_writew(dev, SCB_CMDCUCSTART, SCB_ADDR, scb_t, scb_command, NORMALIRQS); - writeb(CTRL_CA, REG_CONTROL); - } - if (priv(dev)->resetting == 2) - priv(dev)->resetting = 0; - } - if (status & SCB_STFR) { - ether1_recv_done (dev); - } - if (status & SCB_STRNR) { - if (ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS) & SCB_STRXSUSP) { - printk (KERN_WARNING "%s: RU went not ready: RU suspended\n", dev->name); - ether1_writew(dev, SCB_CMDRXRESUME, SCB_ADDR, scb_t, scb_command, NORMALIRQS); - writeb(CTRL_CA, REG_CONTROL); - dev->stats.rx_dropped++; /* we suspended due to lack of buffer space */ - } else - printk(KERN_WARNING "%s: RU went not ready: %04X\n", dev->name, - ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS)); - printk (KERN_WARNING "RU ptr = %04X\n", ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, - NORMALIRQS)); - } - } else - writeb(CTRL_ACK, REG_CONTROL); - - return IRQ_HANDLED; -} - -static int -ether1_close (struct net_device *dev) -{ - ether1_reset (dev); - - free_irq(dev->irq, dev); - - return 0; -} - -/* - * Set or clear the multicast filter for this adaptor. - * num_addrs == -1 Promiscuous mode, receive all packets. - * num_addrs == 0 Normal mode, clear multicast list. - * num_addrs > 0 Multicast mode, receive normal and MC packets, and do - * best-effort filtering. - */ -static void -ether1_setmulticastlist (struct net_device *dev) -{ -} - -/* ------------------------------------------------------------------------- */ - -static void __devinit ether1_banner(void) -{ - static unsigned int version_printed = 0; - - if (net_debug && version_printed++ == 0) - printk(KERN_INFO "%s", version); -} - -static const struct net_device_ops ether1_netdev_ops = { - .ndo_open = ether1_open, - .ndo_stop = ether1_close, - .ndo_start_xmit = ether1_sendpacket, - .ndo_set_multicast_list = ether1_setmulticastlist, - .ndo_tx_timeout = ether1_timeout, - .ndo_validate_addr = eth_validate_addr, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, -}; - -static int __devinit -ether1_probe(struct expansion_card *ec, const struct ecard_id *id) -{ - struct net_device *dev; - int i, ret = 0; - - ether1_banner(); - - ret = ecard_request_resources(ec); - if (ret) - goto out; - - dev = alloc_etherdev(sizeof(struct ether1_priv)); - if (!dev) { - ret = -ENOMEM; - goto release; - } - - SET_NETDEV_DEV(dev, &ec->dev); - - dev->irq = ec->irq; - priv(dev)->base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); - if (!priv(dev)->base) { - ret = -ENOMEM; - goto free; - } - - if ((priv(dev)->bus_type = ether1_reset(dev)) == 0) { - ret = -ENODEV; - goto free; - } - - for (i = 0; i < 6; i++) - dev->dev_addr[i] = readb(IDPROM_ADDRESS + (i << 2)); - - if (ether1_init_2(dev)) { - ret = -ENODEV; - goto free; - } - - dev->netdev_ops = ðer1_netdev_ops; - dev->watchdog_timeo = 5 * HZ / 100; - - ret = register_netdev(dev); - if (ret) - goto free; - - printk(KERN_INFO "%s: ether1 in slot %d, %pM\n", - dev->name, ec->slot_no, dev->dev_addr); - - ecard_set_drvdata(ec, dev); - return 0; - - free: - free_netdev(dev); - release: - ecard_release_resources(ec); - out: - return ret; -} - -static void __devexit ether1_remove(struct expansion_card *ec) -{ - struct net_device *dev = ecard_get_drvdata(ec); - - ecard_set_drvdata(ec, NULL); - - unregister_netdev(dev); - free_netdev(dev); - ecard_release_resources(ec); -} - -static const struct ecard_id ether1_ids[] = { - { MANU_ACORN, PROD_ACORN_ETHER1 }, - { 0xffff, 0xffff } -}; - -static struct ecard_driver ether1_driver = { - .probe = ether1_probe, - .remove = __devexit_p(ether1_remove), - .id_table = ether1_ids, - .drv = { - .name = "ether1", - }, -}; - -static int __init ether1_init(void) -{ - return ecard_register_driver(ðer1_driver); -} - -static void __exit ether1_exit(void) -{ - ecard_remove_driver(ðer1_driver); -} - -module_init(ether1_init); -module_exit(ether1_exit); - -MODULE_LICENSE("GPL"); diff --git a/drivers/net/arm/ether1.h b/drivers/net/arm/ether1.h deleted file mode 100644 index 3a5830a..0000000 --- a/drivers/net/arm/ether1.h +++ /dev/null @@ -1,280 +0,0 @@ -/* - * linux/drivers/acorn/net/ether1.h - * - * Copyright (C) 1996 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Network driver for Acorn Ether1 cards. - */ - -#ifndef _LINUX_ether1_H -#define _LINUX_ether1_H - -#ifdef __ETHER1_C -/* use 0 for production, 1 for verification, >2 for debug */ -#ifndef NET_DEBUG -#define NET_DEBUG 0 -#endif - -#define priv(dev) ((struct ether1_priv *)netdev_priv(dev)) - -/* Page register */ -#define REG_PAGE (priv(dev)->base + 0x0000) - -/* Control register */ -#define REG_CONTROL (priv(dev)->base + 0x0004) -#define CTRL_RST 0x01 -#define CTRL_LOOPBACK 0x02 -#define CTRL_CA 0x04 -#define CTRL_ACK 0x08 - -#define ETHER1_RAM (priv(dev)->base + 0x2000) - -/* HW address */ -#define IDPROM_ADDRESS (priv(dev)->base + 0x0024) - -struct ether1_priv { - void __iomem *base; - unsigned int tx_link; - unsigned int tx_head; - volatile unsigned int tx_tail; - volatile unsigned int rx_head; - volatile unsigned int rx_tail; - unsigned char bus_type; - unsigned char resetting; - unsigned char initialising : 1; - unsigned char restart : 1; -}; - -#define I82586_NULL (-1) - -typedef struct { /* tdr */ - unsigned short tdr_status; - unsigned short tdr_command; - unsigned short tdr_link; - unsigned short tdr_result; -#define TDR_TIME (0x7ff) -#define TDR_SHORT (1 << 12) -#define TDR_OPEN (1 << 13) -#define TDR_XCVRPROB (1 << 14) -#define TDR_LNKOK (1 << 15) -} tdr_t; - -typedef struct { /* transmit */ - unsigned short tx_status; - unsigned short tx_command; - unsigned short tx_link; - unsigned short tx_tbdoffset; -} tx_t; - -typedef struct { /* tbd */ - unsigned short tbd_opts; -#define TBD_CNT (0x3fff) -#define TBD_EOL (1 << 15) - unsigned short tbd_link; - unsigned short tbd_bufl; - unsigned short tbd_bufh; -} tbd_t; - -typedef struct { /* rfd */ - unsigned short rfd_status; -#define RFD_NOEOF (1 << 6) -#define RFD_FRAMESHORT (1 << 7) -#define RFD_DMAOVRN (1 << 8) -#define RFD_NORESOURCES (1 << 9) -#define RFD_ALIGNERROR (1 << 10) -#define RFD_CRCERROR (1 << 11) -#define RFD_OK (1 << 13) -#define RFD_FDCONSUMED (1 << 14) -#define RFD_COMPLETE (1 << 15) - unsigned short rfd_command; -#define RFD_CMDSUSPEND (1 << 14) -#define RFD_CMDEL (1 << 15) - unsigned short rfd_link; - unsigned short rfd_rbdoffset; - unsigned char rfd_dest[6]; - unsigned char rfd_src[6]; - unsigned short rfd_len; -} rfd_t; - -typedef struct { /* rbd */ - unsigned short rbd_status; -#define RBD_ACNT (0x3fff) -#define RBD_ACNTVALID (1 << 14) -#define RBD_EOF (1 << 15) - unsigned short rbd_link; - unsigned short rbd_bufl; - unsigned short rbd_bufh; - unsigned short rbd_len; -} rbd_t; - -typedef struct { /* nop */ - unsigned short nop_status; - unsigned short nop_command; - unsigned short nop_link; -} nop_t; - -typedef struct { /* set multicast */ - unsigned short mc_status; - unsigned short mc_command; - unsigned short mc_link; - unsigned short mc_cnt; - unsigned char mc_addrs[1][6]; -} mc_t; - -typedef struct { /* set address */ - unsigned short sa_status; - unsigned short sa_command; - unsigned short sa_link; - unsigned char sa_addr[6]; -} sa_t; - -typedef struct { /* config command */ - unsigned short cfg_status; - unsigned short cfg_command; - unsigned short cfg_link; - unsigned char cfg_bytecnt; /* size foll data: 4 - 12 */ - unsigned char cfg_fifolim; /* FIFO threshold */ - unsigned char cfg_byte8; -#define CFG8_SRDY (1 << 6) -#define CFG8_SAVEBADF (1 << 7) - unsigned char cfg_byte9; -#define CFG9_ADDRLEN(x) (x) -#define CFG9_ADDRLENBUF (1 << 3) -#define CFG9_PREAMB2 (0 << 4) -#define CFG9_PREAMB4 (1 << 4) -#define CFG9_PREAMB8 (2 << 4) -#define CFG9_PREAMB16 (3 << 4) -#define CFG9_ILOOPBACK (1 << 6) -#define CFG9_ELOOPBACK (1 << 7) - unsigned char cfg_byte10; -#define CFG10_LINPRI(x) (x) -#define CFG10_ACR(x) (x << 4) -#define CFG10_BOFMET (1 << 7) - unsigned char cfg_ifs; - unsigned char cfg_slotl; - unsigned char cfg_byte13; -#define CFG13_SLOTH(x) (x) -#define CFG13_RETRY(x) (x << 4) - unsigned char cfg_byte14; -#define CFG14_PROMISC (1 << 0) -#define CFG14_DISBRD (1 << 1) -#define CFG14_MANCH (1 << 2) -#define CFG14_TNCRS (1 << 3) -#define CFG14_NOCRC (1 << 4) -#define CFG14_CRC16 (1 << 5) -#define CFG14_BTSTF (1 << 6) -#define CFG14_FLGPAD (1 << 7) - unsigned char cfg_byte15; -#define CFG15_CSTF(x) (x) -#define CFG15_ICSS (1 << 3) -#define CFG15_CDTF(x) (x << 4) -#define CFG15_ICDS (1 << 7) - unsigned short cfg_minfrmlen; -} cfg_t; - -typedef struct { /* scb */ - unsigned short scb_status; /* status of 82586 */ -#define SCB_STRXMASK (7 << 4) /* Receive unit status */ -#define SCB_STRXIDLE (0 << 4) /* Idle */ -#define SCB_STRXSUSP (1 << 4) /* Suspended */ -#define SCB_STRXNRES (2 << 4) /* No resources */ -#define SCB_STRXRDY (4 << 4) /* Ready */ -#define SCB_STCUMASK (7 << 8) /* Command unit status */ -#define SCB_STCUIDLE (0 << 8) /* Idle */ -#define SCB_STCUSUSP (1 << 8) /* Suspended */ -#define SCB_STCUACTV (2 << 8) /* Active */ -#define SCB_STRNR (1 << 12) /* Receive unit not ready */ -#define SCB_STCNA (1 << 13) /* Command unit not ready */ -#define SCB_STFR (1 << 14) /* Frame received */ -#define SCB_STCX (1 << 15) /* Command completed */ - unsigned short scb_command; /* Next command */ -#define SCB_CMDRXSTART (1 << 4) /* Start (at rfa_offset) */ -#define SCB_CMDRXRESUME (2 << 4) /* Resume reception */ -#define SCB_CMDRXSUSPEND (3 << 4) /* Suspend reception */ -#define SCB_CMDRXABORT (4 << 4) /* Abort reception */ -#define SCB_CMDCUCSTART (1 << 8) /* Start (at cbl_offset) */ -#define SCB_CMDCUCRESUME (2 << 8) /* Resume execution */ -#define SCB_CMDCUCSUSPEND (3 << 8) /* Suspend execution */ -#define SCB_CMDCUCABORT (4 << 8) /* Abort execution */ -#define SCB_CMDACKRNR (1 << 12) /* Ack RU not ready */ -#define SCB_CMDACKCNA (1 << 13) /* Ack CU not ready */ -#define SCB_CMDACKFR (1 << 14) /* Ack Frame received */ -#define SCB_CMDACKCX (1 << 15) /* Ack Command complete */ - unsigned short scb_cbl_offset; /* Offset of first command unit */ - unsigned short scb_rfa_offset; /* Offset of first receive frame area */ - unsigned short scb_crc_errors; /* Properly aligned frame with CRC error*/ - unsigned short scb_aln_errors; /* Misaligned frames */ - unsigned short scb_rsc_errors; /* Frames lost due to no space */ - unsigned short scb_ovn_errors; /* Frames lost due to slow bus */ -} scb_t; - -typedef struct { /* iscp */ - unsigned short iscp_busy; /* set by CPU before CA */ - unsigned short iscp_offset; /* offset of SCB */ - unsigned short iscp_basel; /* base of SCB */ - unsigned short iscp_baseh; -} iscp_t; - - /* this address must be 0xfff6 */ -typedef struct { /* scp */ - unsigned short scp_sysbus; /* bus size */ -#define SCP_SY_16BBUS 0x00 -#define SCP_SY_8BBUS 0x01 - unsigned short scp_junk[2]; /* junk */ - unsigned short scp_iscpl; /* lower 16 bits of iscp */ - unsigned short scp_iscph; /* upper 16 bits of iscp */ -} scp_t; - -/* commands */ -#define CMD_NOP 0 -#define CMD_SETADDRESS 1 -#define CMD_CONFIG 2 -#define CMD_SETMULTICAST 3 -#define CMD_TX 4 -#define CMD_TDR 5 -#define CMD_DUMP 6 -#define CMD_DIAGNOSE 7 - -#define CMD_MASK 7 - -#define CMD_INTR (1 << 13) -#define CMD_SUSP (1 << 14) -#define CMD_EOL (1 << 15) - -#define STAT_COLLISIONS (15) -#define STAT_COLLEXCESSIVE (1 << 5) -#define STAT_COLLAFTERTX (1 << 6) -#define STAT_TXDEFERRED (1 << 7) -#define STAT_TXSLOWDMA (1 << 8) -#define STAT_TXLOSTCTS (1 << 9) -#define STAT_NOCARRIER (1 << 10) -#define STAT_FAIL (1 << 11) -#define STAT_ABORTED (1 << 12) -#define STAT_OK (1 << 13) -#define STAT_BUSY (1 << 14) -#define STAT_COMPLETE (1 << 15) -#endif -#endif - -/* - * Ether1 card definitions: - * - * FAST accesses: - * +0 Page register - * 16 pages - * +4 Control - * '1' = reset - * '2' = loopback - * '4' = CA - * '8' = int ack - * - * RAM at address + 0x2000 - * Pod. Prod id = 3 - * Words after ID block [base + 8 words] - * +0 pcb issue (0x0c and 0xf3 invalid) - * +1 - +6 eth hw address - */ diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c deleted file mode 100644 index dfeb006..0000000 --- a/drivers/net/eepro.c +++ /dev/null @@ -1,1822 +0,0 @@ -/* eepro.c: Intel EtherExpress Pro/10 device driver for Linux. */ -/* - Written 1994, 1995,1996 by Bao C. Ha. - - Copyright (C) 1994, 1995,1996 by Bao C. Ha. - - This software may be used and distributed - according to the terms of the GNU General Public License, - incorporated herein by reference. - - The author may be reached at bao.ha@srs.gov - or 418 Hastings Place, Martinez, GA 30907. - - Things remaining to do: - Better record keeping of errors. - Eliminate transmit interrupt to reduce overhead. - Implement "concurrent processing". I won't be doing it! - - Bugs: - - If you have a problem of not detecting the 82595 during a - reboot (warm reset), disable the FLASH memory should fix it. - This is a compatibility hardware problem. - - Versions: - 0.13b basic ethtool support (aris, 09/13/2004) - 0.13a in memory shortage, drop packets also in board - (Michael Westermann , 07/30/2002) - 0.13 irq sharing, rewrote probe function, fixed a nasty bug in - hardware_send_packet and a major cleanup (aris, 11/08/2001) - 0.12d fixing a problem with single card detected as eight eth devices - fixing a problem with sudden drop in card performance - (chris (asdn@go2.pl), 10/29/2001) - 0.12c fixing some problems with old cards (aris, 01/08/2001) - 0.12b misc fixes (aris, 06/26/2000) - 0.12a port of version 0.12a of 2.2.x kernels to 2.3.x - (aris (aris@conectiva.com.br), 05/19/2000) - 0.11e some tweaks about multiple cards support (PdP, jul/aug 1999) - 0.11d added __initdata, __init stuff; call spin_lock_init - in eepro_probe1. Replaced "eepro" by dev->name. Augmented - the code protected by spin_lock in interrupt routine - (PdP, 12/12/1998) - 0.11c minor cleanup (PdP, RMC, 09/12/1998) - 0.11b Pascal Dupuis (dupuis@lei.ucl.ac.be): works as a module - under 2.1.xx. Debug messages are flagged as KERN_DEBUG to - avoid console flooding. Added locking at critical parts. Now - the dawn thing is SMP safe. - 0.11a Attempt to get 2.1.xx support up (RMC) - 0.11 Brian Candler added support for multiple cards. Tested as - a module, no idea if it works when compiled into kernel. - - 0.10e Rick Bressler notified me that ifconfig up;ifconfig down fails - because the irq is lost somewhere. Fixed that by moving - request_irq and free_irq to eepro_open and eepro_close respectively. - 0.10d Ugh! Now Wakeup works. Was seriously broken in my first attempt. - I'll need to find a way to specify an ioport other than - the default one in the PnP case. PnP definitively sucks. - And, yes, this is not the only reason. - 0.10c PnP Wakeup Test for 595FX. uncomment #define PnPWakeup; - to use. - 0.10b Should work now with (some) Pro/10+. At least for - me (and my two cards) it does. _No_ guarantee for - function with non-Pro/10+ cards! (don't have any) - (RMC, 9/11/96) - - 0.10 Added support for the Etherexpress Pro/10+. The - IRQ map was changed significantly from the old - pro/10. The new interrupt map was provided by - Rainer M. Canavan (Canavan@Zeus.cs.bonn.edu). - (BCH, 9/3/96) - - 0.09 Fixed a race condition in the transmit algorithm, - which causes crashes under heavy load with fast - pentium computers. The performance should also - improve a bit. The size of RX buffer, and hence - TX buffer, can also be changed via lilo or insmod. - (BCH, 7/31/96) - - 0.08 Implement 32-bit I/O for the 82595TX and 82595FX - based lan cards. Disable full-duplex mode if TPE - is not used. (BCH, 4/8/96) - - 0.07a Fix a stat report which counts every packet as a - heart-beat failure. (BCH, 6/3/95) - - 0.07 Modified to support all other 82595-based lan cards. - The IRQ vector of the EtherExpress Pro will be set - according to the value saved in the EEPROM. For other - cards, I will do autoirq_request() to grab the next - available interrupt vector. (BCH, 3/17/95) - - 0.06a,b Interim released. Minor changes in the comments and - print out format. (BCH, 3/9/95 and 3/14/95) - - 0.06 First stable release that I am comfortable with. (BCH, - 3/2/95) - - 0.05 Complete testing of multicast. (BCH, 2/23/95) - - 0.04 Adding multicast support. (BCH, 2/14/95) - - 0.03 First widely alpha release for public testing. - (BCH, 2/14/95) - -*/ - -static const char version[] = - "eepro.c: v0.13b 09/13/2004 aris@cathedrallabs.org\n"; - -#include - -/* - Sources: - - This driver wouldn't have been written without the availability - of the Crynwr's Lan595 driver source code. It helps me to - familiarize with the 82595 chipset while waiting for the Intel - documentation. I also learned how to detect the 82595 using - the packet driver's technique. - - This driver is written by cutting and pasting the skeleton.c driver - provided by Donald Becker. I also borrowed the EEPROM routine from - Donald Becker's 82586 driver. - - Datasheet for the Intel 82595 (including the TX and FX version). It - provides just enough info that the casual reader might think that it - documents the i82595. - - The User Manual for the 82595. It provides a lot of the missing - information. - -*/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#define DRV_NAME "eepro" -#define DRV_VERSION "0.13c" - -#define compat_dev_kfree_skb( skb, mode ) dev_kfree_skb( (skb) ) -/* I had reports of looong delays with SLOW_DOWN defined as udelay(2) */ -#define SLOW_DOWN inb(0x80) -/* udelay(2) */ -#define compat_init_data __initdata -enum iftype { AUI=0, BNC=1, TPE=2 }; - -/* First, a few definitions that the brave might change. */ -/* A zero-terminated list of I/O addresses to be probed. */ -static unsigned int eepro_portlist[] compat_init_data = - { 0x300, 0x210, 0x240, 0x280, 0x2C0, 0x200, 0x320, 0x340, 0x360, 0}; -/* note: 0x300 is default, the 595FX supports ALL IO Ports - from 0x000 to 0x3F0, some of which are reserved in PCs */ - -/* To try the (not-really PnP Wakeup: */ -/* -#define PnPWakeup -*/ - -/* use 0 for production, 1 for verification, >2 for debug */ -#ifndef NET_DEBUG -#define NET_DEBUG 0 -#endif -static unsigned int net_debug = NET_DEBUG; - -/* The number of low I/O ports used by the ethercard. */ -#define EEPRO_IO_EXTENT 16 - -/* Different 82595 chips */ -#define LAN595 0 -#define LAN595TX 1 -#define LAN595FX 2 -#define LAN595FX_10ISA 3 - -/* Information that need to be kept for each board. */ -struct eepro_local { - unsigned rx_start; - unsigned tx_start; /* start of the transmit chain */ - int tx_last; /* pointer to last packet in the transmit chain */ - unsigned tx_end; /* end of the transmit chain (plus 1) */ - int eepro; /* 1 for the EtherExpress Pro/10, - 2 for the EtherExpress Pro/10+, - 3 for the EtherExpress 10 (blue cards), - 0 for other 82595-based lan cards. */ - int version; /* a flag to indicate if this is a TX or FX - version of the 82595 chip. */ - int stepping; - - spinlock_t lock; /* Serializing lock */ - - unsigned rcv_ram; /* pre-calculated space for rx */ - unsigned xmt_ram; /* pre-calculated space for tx */ - unsigned char xmt_bar; - unsigned char xmt_lower_limit_reg; - unsigned char xmt_upper_limit_reg; - short xmt_lower_limit; - short xmt_upper_limit; - short rcv_lower_limit; - short rcv_upper_limit; - unsigned char eeprom_reg; - unsigned short word[8]; -}; - -/* The station (ethernet) address prefix, used for IDing the board. */ -#define SA_ADDR0 0x00 /* Etherexpress Pro/10 */ -#define SA_ADDR1 0xaa -#define SA_ADDR2 0x00 - -#define GetBit(x,y) ((x & (1<>y) - -/* EEPROM Word 0: */ -#define ee_PnP 0 /* Plug 'n Play enable bit */ -#define ee_Word1 1 /* Word 1? */ -#define ee_BusWidth 2 /* 8/16 bit */ -#define ee_FlashAddr 3 /* Flash Address */ -#define ee_FlashMask 0x7 /* Mask */ -#define ee_AutoIO 6 /* */ -#define ee_reserved0 7 /* =0! */ -#define ee_Flash 8 /* Flash there? */ -#define ee_AutoNeg 9 /* Auto Negotiation enabled? */ -#define ee_IO0 10 /* IO Address LSB */ -#define ee_IO0Mask 0x /*...*/ -#define ee_IO1 15 /* IO MSB */ - -/* EEPROM Word 1: */ -#define ee_IntSel 0 /* Interrupt */ -#define ee_IntMask 0x7 -#define ee_LI 3 /* Link Integrity 0= enabled */ -#define ee_PC 4 /* Polarity Correction 0= enabled */ -#define ee_TPE_AUI 5 /* PortSelection 1=TPE */ -#define ee_Jabber 6 /* Jabber prevention 0= enabled */ -#define ee_AutoPort 7 /* Auto Port Selection 1= Disabled */ -#define ee_SMOUT 8 /* SMout Pin Control 0= Input */ -#define ee_PROM 9 /* Flash EPROM / PROM 0=Flash */ -#define ee_reserved1 10 /* .. 12 =0! */ -#define ee_AltReady 13 /* Alternate Ready, 0=normal */ -#define ee_reserved2 14 /* =0! */ -#define ee_Duplex 15 - -/* Word2,3,4: */ -#define ee_IA5 0 /*bit start for individual Addr Byte 5 */ -#define ee_IA4 8 /*bit start for individual Addr Byte 5 */ -#define ee_IA3 0 /*bit start for individual Addr Byte 5 */ -#define ee_IA2 8 /*bit start for individual Addr Byte 5 */ -#define ee_IA1 0 /*bit start for individual Addr Byte 5 */ -#define ee_IA0 8 /*bit start for individual Addr Byte 5 */ - -/* Word 5: */ -#define ee_BNC_TPE 0 /* 0=TPE */ -#define ee_BootType 1 /* 00=None, 01=IPX, 10=ODI, 11=NDIS */ -#define ee_BootTypeMask 0x3 -#define ee_NumConn 3 /* Number of Connections 0= One or Two */ -#define ee_FlashSock 4 /* Presence of Flash Socket 0= Present */ -#define ee_PortTPE 5 -#define ee_PortBNC 6 -#define ee_PortAUI 7 -#define ee_PowerMgt 10 /* 0= disabled */ -#define ee_CP 13 /* Concurrent Processing */ -#define ee_CPMask 0x7 - -/* Word 6: */ -#define ee_Stepping 0 /* Stepping info */ -#define ee_StepMask 0x0F -#define ee_BoardID 4 /* Manucaturer Board ID, reserved */ -#define ee_BoardMask 0x0FFF - -/* Word 7: */ -#define ee_INT_TO_IRQ 0 /* int to IRQ Mapping = 0x1EB8 for Pro/10+ */ -#define ee_FX_INT2IRQ 0x1EB8 /* the _only_ mapping allowed for FX chips */ - -/*..*/ -#define ee_SIZE 0x40 /* total EEprom Size */ -#define ee_Checksum 0xBABA /* initial and final value for adding checksum */ - - -/* Card identification via EEprom: */ -#define ee_addr_vendor 0x10 /* Word offset for EISA Vendor ID */ -#define ee_addr_id 0x11 /* Word offset for Card ID */ -#define ee_addr_SN 0x12 /* Serial Number */ -#define ee_addr_CRC_8 0x14 /* CRC over last thee Bytes */ - - -#define ee_vendor_intel0 0x25 /* Vendor ID Intel */ -#define ee_vendor_intel1 0xD4 -#define ee_id_eepro10p0 0x10 /* ID for eepro/10+ */ -#define ee_id_eepro10p1 0x31 - -#define TX_TIMEOUT ((4*HZ)/10) - -/* Index to functions, as function prototypes. */ - -static int eepro_probe1(struct net_device *dev, int autoprobe); -static int eepro_open(struct net_device *dev); -static netdev_tx_t eepro_send_packet(struct sk_buff *skb, - struct net_device *dev); -static irqreturn_t eepro_interrupt(int irq, void *dev_id); -static void eepro_rx(struct net_device *dev); -static void eepro_transmit_interrupt(struct net_device *dev); -static int eepro_close(struct net_device *dev); -static void set_multicast_list(struct net_device *dev); -static void eepro_tx_timeout (struct net_device *dev); - -static int read_eeprom(int ioaddr, int location, struct net_device *dev); -static int hardware_send_packet(struct net_device *dev, void *buf, short length); -static int eepro_grab_irq(struct net_device *dev); - -/* - Details of the i82595. - -You will need either the datasheet or the user manual to understand what -is going on here. The 82595 is very different from the 82586, 82593. - -The receive algorithm in eepro_rx() is just an implementation of the -RCV ring structure that the Intel 82595 imposes at the hardware level. -The receive buffer is set at 24K, and the transmit buffer is 8K. I -am assuming that the total buffer memory is 32K, which is true for the -Intel EtherExpress Pro/10. If it is less than that on a generic card, -the driver will be broken. - -The transmit algorithm in the hardware_send_packet() is similar to the -one in the eepro_rx(). The transmit buffer is a ring linked list. -I just queue the next available packet to the end of the list. In my -system, the 82595 is so fast that the list seems to always contain a -single packet. In other systems with faster computers and more congested -network traffics, the ring linked list should improve performance by -allowing up to 8K worth of packets to be queued. - -The sizes of the receive and transmit buffers can now be changed via lilo -or insmod. Lilo uses the appended line "ether=io,irq,debug,rx-buffer,eth0" -where rx-buffer is in KB unit. Modules uses the parameter mem which is -also in KB unit, for example "insmod io=io-address irq=0 mem=rx-buffer." -The receive buffer has to be more than 3K or less than 29K. Otherwise, -it is reset to the default of 24K, and, hence, 8K for the trasnmit -buffer (transmit-buffer = 32K - receive-buffer). - -*/ -#define RAM_SIZE 0x8000 - -#define RCV_HEADER 8 -#define RCV_DEFAULT_RAM 0x6000 - -#define XMT_HEADER 8 -#define XMT_DEFAULT_RAM (RAM_SIZE - RCV_DEFAULT_RAM) - -#define XMT_START_PRO RCV_DEFAULT_RAM -#define XMT_START_10 0x0000 -#define RCV_START_PRO 0x0000 -#define RCV_START_10 XMT_DEFAULT_RAM - -#define RCV_DONE 0x0008 -#define RX_OK 0x2000 -#define RX_ERROR 0x0d81 - -#define TX_DONE_BIT 0x0080 -#define TX_OK 0x2000 -#define CHAIN_BIT 0x8000 -#define XMT_STATUS 0x02 -#define XMT_CHAIN 0x04 -#define XMT_COUNT 0x06 - -#define BANK0_SELECT 0x00 -#define BANK1_SELECT 0x40 -#define BANK2_SELECT 0x80 - -/* Bank 0 registers */ -#define COMMAND_REG 0x00 /* Register 0 */ -#define MC_SETUP 0x03 -#define XMT_CMD 0x04 -#define DIAGNOSE_CMD 0x07 -#define RCV_ENABLE_CMD 0x08 -#define RCV_DISABLE_CMD 0x0a -#define STOP_RCV_CMD 0x0b -#define RESET_CMD 0x0e -#define POWER_DOWN_CMD 0x18 -#define RESUME_XMT_CMD 0x1c -#define SEL_RESET_CMD 0x1e -#define STATUS_REG 0x01 /* Register 1 */ -#define RX_INT 0x02 -#define TX_INT 0x04 -#define EXEC_STATUS 0x30 -#define ID_REG 0x02 /* Register 2 */ -#define R_ROBIN_BITS 0xc0 /* round robin counter */ -#define ID_REG_MASK 0x2c -#define ID_REG_SIG 0x24 -#define AUTO_ENABLE 0x10 -#define INT_MASK_REG 0x03 /* Register 3 */ -#define RX_STOP_MASK 0x01 -#define RX_MASK 0x02 -#define TX_MASK 0x04 -#define EXEC_MASK 0x08 -#define ALL_MASK 0x0f -#define IO_32_BIT 0x10 -#define RCV_BAR 0x04 /* The following are word (16-bit) registers */ -#define RCV_STOP 0x06 - -#define XMT_BAR_PRO 0x0a -#define XMT_BAR_10 0x0b - -#define HOST_ADDRESS_REG 0x0c -#define IO_PORT 0x0e -#define IO_PORT_32_BIT 0x0c - -/* Bank 1 registers */ -#define REG1 0x01 -#define WORD_WIDTH 0x02 -#define INT_ENABLE 0x80 -#define INT_NO_REG 0x02 -#define RCV_LOWER_LIMIT_REG 0x08 -#define RCV_UPPER_LIMIT_REG 0x09 - -#define XMT_LOWER_LIMIT_REG_PRO 0x0a -#define XMT_UPPER_LIMIT_REG_PRO 0x0b -#define XMT_LOWER_LIMIT_REG_10 0x0b -#define XMT_UPPER_LIMIT_REG_10 0x0a - -/* Bank 2 registers */ -#define XMT_Chain_Int 0x20 /* Interrupt at the end of the transmit chain */ -#define XMT_Chain_ErrStop 0x40 /* Interrupt at the end of the chain even if there are errors */ -#define RCV_Discard_BadFrame 0x80 /* Throw bad frames away, and continue to receive others */ -#define REG2 0x02 -#define PRMSC_Mode 0x01 -#define Multi_IA 0x20 -#define REG3 0x03 -#define TPE_BIT 0x04 -#define BNC_BIT 0x20 -#define REG13 0x0d -#define FDX 0x00 -#define A_N_ENABLE 0x02 - -#define I_ADD_REG0 0x04 -#define I_ADD_REG1 0x05 -#define I_ADD_REG2 0x06 -#define I_ADD_REG3 0x07 -#define I_ADD_REG4 0x08 -#define I_ADD_REG5 0x09 - -#define EEPROM_REG_PRO 0x0a -#define EEPROM_REG_10 0x0b - -#define EESK 0x01 -#define EECS 0x02 -#define EEDI 0x04 -#define EEDO 0x08 - -/* do a full reset */ -#define eepro_reset(ioaddr) outb(RESET_CMD, ioaddr) - -/* do a nice reset */ -#define eepro_sel_reset(ioaddr) { \ - outb(SEL_RESET_CMD, ioaddr); \ - SLOW_DOWN; \ - SLOW_DOWN; \ - } - -/* disable all interrupts */ -#define eepro_dis_int(ioaddr) outb(ALL_MASK, ioaddr + INT_MASK_REG) - -/* clear all interrupts */ -#define eepro_clear_int(ioaddr) outb(ALL_MASK, ioaddr + STATUS_REG) - -/* enable tx/rx */ -#define eepro_en_int(ioaddr) outb(ALL_MASK & ~(RX_MASK | TX_MASK), \ - ioaddr + INT_MASK_REG) - -/* enable exec event interrupt */ -#define eepro_en_intexec(ioaddr) outb(ALL_MASK & ~(EXEC_MASK), ioaddr + INT_MASK_REG) - -/* enable rx */ -#define eepro_en_rx(ioaddr) outb(RCV_ENABLE_CMD, ioaddr) - -/* disable rx */ -#define eepro_dis_rx(ioaddr) outb(RCV_DISABLE_CMD, ioaddr) - -/* switch bank */ -#define eepro_sw2bank0(ioaddr) outb(BANK0_SELECT, ioaddr) -#define eepro_sw2bank1(ioaddr) outb(BANK1_SELECT, ioaddr) -#define eepro_sw2bank2(ioaddr) outb(BANK2_SELECT, ioaddr) - -/* enable interrupt line */ -#define eepro_en_intline(ioaddr) outb(inb(ioaddr + REG1) | INT_ENABLE,\ - ioaddr + REG1) - -/* disable interrupt line */ -#define eepro_dis_intline(ioaddr) outb(inb(ioaddr + REG1) & 0x7f, \ - ioaddr + REG1); - -/* set diagnose flag */ -#define eepro_diag(ioaddr) outb(DIAGNOSE_CMD, ioaddr) - -/* ack for rx int */ -#define eepro_ack_rx(ioaddr) outb (RX_INT, ioaddr + STATUS_REG) - -/* ack for tx int */ -#define eepro_ack_tx(ioaddr) outb (TX_INT, ioaddr + STATUS_REG) - -/* a complete sel reset */ -#define eepro_complete_selreset(ioaddr) { \ - dev->stats.tx_errors++;\ - eepro_sel_reset(ioaddr);\ - lp->tx_end = \ - lp->xmt_lower_limit;\ - lp->tx_start = lp->tx_end;\ - lp->tx_last = 0;\ - dev->trans_start = jiffies;\ - netif_wake_queue(dev);\ - eepro_en_rx(ioaddr);\ - } - -/* Check for a network adaptor of this type, and return '0' if one exists. - If dev->base_addr == 0, probe all likely locations. - If dev->base_addr == 1, always return failure. - If dev->base_addr == 2, allocate space for the device and return success - (detachable devices only). - */ -static int __init do_eepro_probe(struct net_device *dev) -{ - int i; - int base_addr = dev->base_addr; - int irq = dev->irq; - -#ifdef PnPWakeup - /* XXXX for multiple cards should this only be run once? */ - - /* Wakeup: */ - #define WakeupPort 0x279 - #define WakeupSeq {0x6A, 0xB5, 0xDA, 0xED, 0xF6, 0xFB, 0x7D, 0xBE,\ - 0xDF, 0x6F, 0x37, 0x1B, 0x0D, 0x86, 0xC3, 0x61,\ - 0xB0, 0x58, 0x2C, 0x16, 0x8B, 0x45, 0xA2, 0xD1,\ - 0xE8, 0x74, 0x3A, 0x9D, 0xCE, 0xE7, 0x73, 0x43} - - { - unsigned short int WS[32]=WakeupSeq; - - if (request_region(WakeupPort, 2, "eepro wakeup")) { - if (net_debug>5) - printk(KERN_DEBUG "Waking UP\n"); - - outb_p(0,WakeupPort); - outb_p(0,WakeupPort); - for (i=0; i<32; i++) { - outb_p(WS[i],WakeupPort); - if (net_debug>5) printk(KERN_DEBUG ": %#x ",WS[i]); - } - - release_region(WakeupPort, 2); - } else - printk(KERN_WARNING "PnP wakeup region busy!\n"); - } -#endif - - if (base_addr > 0x1ff) /* Check a single specified location. */ - return eepro_probe1(dev, 0); - - else if (base_addr != 0) /* Don't probe at all. */ - return -ENXIO; - - for (i = 0; eepro_portlist[i]; i++) { - dev->base_addr = eepro_portlist[i]; - dev->irq = irq; - if (eepro_probe1(dev, 1) == 0) - return 0; - } - - return -ENODEV; -} - -#ifndef MODULE -struct net_device * __init eepro_probe(int unit) -{ - struct net_device *dev = alloc_etherdev(sizeof(struct eepro_local)); - int err; - - if (!dev) - return ERR_PTR(-ENODEV); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - - err = do_eepro_probe(dev); - if (err) - goto out; - return dev; -out: - free_netdev(dev); - return ERR_PTR(err); -} -#endif - -static void __init printEEPROMInfo(struct net_device *dev) -{ - struct eepro_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - unsigned short Word; - int i,j; - - j = ee_Checksum; - for (i = 0; i < 8; i++) - j += lp->word[i]; - for ( ; i < ee_SIZE; i++) - j += read_eeprom(ioaddr, i, dev); - - printk(KERN_DEBUG "Checksum: %#x\n",j&0xffff); - - Word = lp->word[0]; - printk(KERN_DEBUG "Word0:\n"); - printk(KERN_DEBUG " Plug 'n Pray: %d\n",GetBit(Word,ee_PnP)); - printk(KERN_DEBUG " Buswidth: %d\n",(GetBit(Word,ee_BusWidth)+1)*8 ); - printk(KERN_DEBUG " AutoNegotiation: %d\n",GetBit(Word,ee_AutoNeg)); - printk(KERN_DEBUG " IO Address: %#x\n", (Word>>ee_IO0)<<4); - - if (net_debug>4) { - Word = lp->word[1]; - printk(KERN_DEBUG "Word1:\n"); - printk(KERN_DEBUG " INT: %d\n", Word & ee_IntMask); - printk(KERN_DEBUG " LI: %d\n", GetBit(Word,ee_LI)); - printk(KERN_DEBUG " PC: %d\n", GetBit(Word,ee_PC)); - printk(KERN_DEBUG " TPE/AUI: %d\n", GetBit(Word,ee_TPE_AUI)); - printk(KERN_DEBUG " Jabber: %d\n", GetBit(Word,ee_Jabber)); - printk(KERN_DEBUG " AutoPort: %d\n", !GetBit(Word,ee_AutoPort)); - printk(KERN_DEBUG " Duplex: %d\n", GetBit(Word,ee_Duplex)); - } - - Word = lp->word[5]; - printk(KERN_DEBUG "Word5:\n"); - printk(KERN_DEBUG " BNC: %d\n",GetBit(Word,ee_BNC_TPE)); - printk(KERN_DEBUG " NumConnectors: %d\n",GetBit(Word,ee_NumConn)); - printk(KERN_DEBUG " Has "); - if (GetBit(Word,ee_PortTPE)) printk(KERN_DEBUG "TPE "); - if (GetBit(Word,ee_PortBNC)) printk(KERN_DEBUG "BNC "); - if (GetBit(Word,ee_PortAUI)) printk(KERN_DEBUG "AUI "); - printk(KERN_DEBUG "port(s)\n"); - - Word = lp->word[6]; - printk(KERN_DEBUG "Word6:\n"); - printk(KERN_DEBUG " Stepping: %d\n",Word & ee_StepMask); - printk(KERN_DEBUG " BoardID: %d\n",Word>>ee_BoardID); - - Word = lp->word[7]; - printk(KERN_DEBUG "Word7:\n"); - printk(KERN_DEBUG " INT to IRQ:\n"); - - for (i=0, j=0; i<15; i++) - if (GetBit(Word,i)) printk(KERN_DEBUG " INT%d -> IRQ %d;",j++,i); - - printk(KERN_DEBUG "\n"); -} - -/* function to recalculate the limits of buffer based on rcv_ram */ -static void eepro_recalc (struct net_device *dev) -{ - struct eepro_local * lp; - - lp = netdev_priv(dev); - lp->xmt_ram = RAM_SIZE - lp->rcv_ram; - - if (lp->eepro == LAN595FX_10ISA) { - lp->xmt_lower_limit = XMT_START_10; - lp->xmt_upper_limit = (lp->xmt_ram - 2); - lp->rcv_lower_limit = lp->xmt_ram; - lp->rcv_upper_limit = (RAM_SIZE - 2); - } - else { - lp->rcv_lower_limit = RCV_START_PRO; - lp->rcv_upper_limit = (lp->rcv_ram - 2); - lp->xmt_lower_limit = lp->rcv_ram; - lp->xmt_upper_limit = (RAM_SIZE - 2); - } -} - -/* prints boot-time info */ -static void __init eepro_print_info (struct net_device *dev) -{ - struct eepro_local * lp = netdev_priv(dev); - int i; - const char * ifmap[] = {"AUI", "10Base2", "10BaseT"}; - - i = inb(dev->base_addr + ID_REG); - printk(KERN_DEBUG " id: %#x ",i); - printk(" io: %#x ", (unsigned)dev->base_addr); - - switch (lp->eepro) { - case LAN595FX_10ISA: - printk("%s: Intel EtherExpress 10 ISA\n at %#x,", - dev->name, (unsigned)dev->base_addr); - break; - case LAN595FX: - printk("%s: Intel EtherExpress Pro/10+ ISA\n at %#x,", - dev->name, (unsigned)dev->base_addr); - break; - case LAN595TX: - printk("%s: Intel EtherExpress Pro/10 ISA at %#x,", - dev->name, (unsigned)dev->base_addr); - break; - case LAN595: - printk("%s: Intel 82595-based lan card at %#x,", - dev->name, (unsigned)dev->base_addr); - break; - } - - printk(" %pM", dev->dev_addr); - - if (net_debug > 3) - printk(KERN_DEBUG ", %dK RCV buffer", - (int)(lp->rcv_ram)/1024); - - if (dev->irq > 2) - printk(", IRQ %d, %s.\n", dev->irq, ifmap[dev->if_port]); - else - printk(", %s.\n", ifmap[dev->if_port]); - - if (net_debug > 3) { - i = lp->word[5]; - if (i & 0x2000) /* bit 13 of EEPROM word 5 */ - printk(KERN_DEBUG "%s: Concurrent Processing is " - "enabled but not used!\n", dev->name); - } - - /* Check the station address for the manufacturer's code */ - if (net_debug>3) - printEEPROMInfo(dev); -} - -static const struct ethtool_ops eepro_ethtool_ops; - -static const struct net_device_ops eepro_netdev_ops = { - .ndo_open = eepro_open, - .ndo_stop = eepro_close, - .ndo_start_xmit = eepro_send_packet, - .ndo_set_multicast_list = set_multicast_list, - .ndo_tx_timeout = eepro_tx_timeout, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -/* This is the real probe routine. Linux has a history of friendly device - probes on the ISA bus. A good device probe avoids doing writes, and - verifies that the correct device exists and functions. */ - -static int __init eepro_probe1(struct net_device *dev, int autoprobe) -{ - unsigned short station_addr[3], id, counter; - int i; - struct eepro_local *lp; - int ioaddr = dev->base_addr; - int err; - - /* Grab the region so we can find another board if autoIRQ fails. */ - if (!request_region(ioaddr, EEPRO_IO_EXTENT, DRV_NAME)) { - if (!autoprobe) - printk(KERN_WARNING "EEPRO: io-port 0x%04x in use\n", - ioaddr); - return -EBUSY; - } - - /* Now, we are going to check for the signature of the - ID_REG (register 2 of bank 0) */ - - id = inb(ioaddr + ID_REG); - - if ((id & ID_REG_MASK) != ID_REG_SIG) - goto exit; - - /* We seem to have the 82595 signature, let's - play with its counter (last 2 bits of - register 2 of bank 0) to be sure. */ - - counter = id & R_ROBIN_BITS; - - if ((inb(ioaddr + ID_REG) & R_ROBIN_BITS) != (counter + 0x40)) - goto exit; - - lp = netdev_priv(dev); - memset(lp, 0, sizeof(struct eepro_local)); - lp->xmt_bar = XMT_BAR_PRO; - lp->xmt_lower_limit_reg = XMT_LOWER_LIMIT_REG_PRO; - lp->xmt_upper_limit_reg = XMT_UPPER_LIMIT_REG_PRO; - lp->eeprom_reg = EEPROM_REG_PRO; - spin_lock_init(&lp->lock); - - /* Now, get the ethernet hardware address from - the EEPROM */ - station_addr[0] = read_eeprom(ioaddr, 2, dev); - - /* FIXME - find another way to know that we've found - * an Etherexpress 10 - */ - if (station_addr[0] == 0x0000 || station_addr[0] == 0xffff) { - lp->eepro = LAN595FX_10ISA; - lp->eeprom_reg = EEPROM_REG_10; - lp->xmt_lower_limit_reg = XMT_LOWER_LIMIT_REG_10; - lp->xmt_upper_limit_reg = XMT_UPPER_LIMIT_REG_10; - lp->xmt_bar = XMT_BAR_10; - station_addr[0] = read_eeprom(ioaddr, 2, dev); - } - - /* get all words at once. will be used here and for ethtool */ - for (i = 0; i < 8; i++) { - lp->word[i] = read_eeprom(ioaddr, i, dev); - } - station_addr[1] = lp->word[3]; - station_addr[2] = lp->word[4]; - - if (!lp->eepro) { - if (lp->word[7] == ee_FX_INT2IRQ) - lp->eepro = 2; - else if (station_addr[2] == SA_ADDR1) - lp->eepro = 1; - } - - /* Fill in the 'dev' fields. */ - for (i=0; i < 6; i++) - dev->dev_addr[i] = ((unsigned char *) station_addr)[5-i]; - - /* RX buffer must be more than 3K and less than 29K */ - if (dev->mem_end < 3072 || dev->mem_end > 29696) - lp->rcv_ram = RCV_DEFAULT_RAM; - - /* calculate {xmt,rcv}_{lower,upper}_limit */ - eepro_recalc(dev); - - if (GetBit(lp->word[5], ee_BNC_TPE)) - dev->if_port = BNC; - else - dev->if_port = TPE; - - if (dev->irq < 2 && lp->eepro != 0) { - /* Mask off INT number */ - int count = lp->word[1] & 7; - unsigned irqMask = lp->word[7]; - - while (count--) - irqMask &= irqMask - 1; - - count = ffs(irqMask); - - if (count) - dev->irq = count - 1; - - if (dev->irq < 2) { - printk(KERN_ERR " Duh! illegal interrupt vector stored in EEPROM.\n"); - goto exit; - } else if (dev->irq == 2) { - dev->irq = 9; - } - } - - dev->netdev_ops = &eepro_netdev_ops; - dev->watchdog_timeo = TX_TIMEOUT; - dev->ethtool_ops = &eepro_ethtool_ops; - - /* print boot time info */ - eepro_print_info(dev); - - /* reset 82595 */ - eepro_reset(ioaddr); - - err = register_netdev(dev); - if (err) - goto err; - return 0; -exit: - err = -ENODEV; -err: - release_region(dev->base_addr, EEPRO_IO_EXTENT); - return err; -} - -/* Open/initialize the board. This is called (in the current kernel) - sometime after booting when the 'ifconfig' program is run. - - This routine should set everything up anew at each open, even - registers that "should" only need to be set once at boot, so that - there is non-reboot way to recover if something goes wrong. - */ - -static const char irqrmap[] = {-1,-1,0,1,-1,2,-1,-1,-1,0,3,4,-1,-1,-1,-1}; -static const char irqrmap2[] = {-1,-1,4,0,1,2,-1,3,-1,4,5,6,7,-1,-1,-1}; -static int eepro_grab_irq(struct net_device *dev) -{ - static const int irqlist[] = { 3, 4, 5, 7, 9, 10, 11, 12, 0 }; - const int *irqp = irqlist; - int temp_reg, ioaddr = dev->base_addr; - - eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */ - - /* Enable the interrupt line. */ - eepro_en_intline(ioaddr); - - /* be CAREFUL, BANK 0 now */ - eepro_sw2bank0(ioaddr); - - /* clear all interrupts */ - eepro_clear_int(ioaddr); - - /* Let EXEC event to interrupt */ - eepro_en_intexec(ioaddr); - - do { - eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */ - - temp_reg = inb(ioaddr + INT_NO_REG); - outb((temp_reg & 0xf8) | irqrmap[*irqp], ioaddr + INT_NO_REG); - - eepro_sw2bank0(ioaddr); /* Switch back to Bank 0 */ - - if (request_irq (*irqp, NULL, IRQF_SHARED, "bogus", dev) != EBUSY) { - unsigned long irq_mask; - /* Twinkle the interrupt, and check if it's seen */ - irq_mask = probe_irq_on(); - - eepro_diag(ioaddr); /* RESET the 82595 */ - mdelay(20); - - if (*irqp == probe_irq_off(irq_mask)) /* It's a good IRQ line */ - break; - - /* clear all interrupts */ - eepro_clear_int(ioaddr); - } - } while (*++irqp); - - eepro_sw2bank1(ioaddr); /* Switch back to Bank 1 */ - - /* Disable the physical interrupt line. */ - eepro_dis_intline(ioaddr); - - eepro_sw2bank0(ioaddr); /* Switch back to Bank 0 */ - - /* Mask all the interrupts. */ - eepro_dis_int(ioaddr); - - /* clear all interrupts */ - eepro_clear_int(ioaddr); - - return dev->irq; -} - -static int eepro_open(struct net_device *dev) -{ - unsigned short temp_reg, old8, old9; - int irqMask; - int i, ioaddr = dev->base_addr; - struct eepro_local *lp = netdev_priv(dev); - - if (net_debug > 3) - printk(KERN_DEBUG "%s: entering eepro_open routine.\n", dev->name); - - irqMask = lp->word[7]; - - if (lp->eepro == LAN595FX_10ISA) { - if (net_debug > 3) printk(KERN_DEBUG "p->eepro = 3;\n"); - } - else if (irqMask == ee_FX_INT2IRQ) /* INT to IRQ Mask */ - { - lp->eepro = 2; /* Yes, an Intel EtherExpress Pro/10+ */ - if (net_debug > 3) printk(KERN_DEBUG "p->eepro = 2;\n"); - } - - else if ((dev->dev_addr[0] == SA_ADDR0 && - dev->dev_addr[1] == SA_ADDR1 && - dev->dev_addr[2] == SA_ADDR2)) - { - lp->eepro = 1; - if (net_debug > 3) printk(KERN_DEBUG "p->eepro = 1;\n"); - } /* Yes, an Intel EtherExpress Pro/10 */ - - else lp->eepro = 0; /* No, it is a generic 82585 lan card */ - - /* Get the interrupt vector for the 82595 */ - if (dev->irq < 2 && eepro_grab_irq(dev) == 0) { - printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq); - return -EAGAIN; - } - - if (request_irq(dev->irq , eepro_interrupt, 0, dev->name, dev)) { - printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq); - return -EAGAIN; - } - - /* Initialize the 82595. */ - - eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ - temp_reg = inb(ioaddr + lp->eeprom_reg); - - lp->stepping = temp_reg >> 5; /* Get the stepping number of the 595 */ - - if (net_debug > 3) - printk(KERN_DEBUG "The stepping of the 82595 is %d\n", lp->stepping); - - if (temp_reg & 0x10) /* Check the TurnOff Enable bit */ - outb(temp_reg & 0xef, ioaddr + lp->eeprom_reg); - for (i=0; i < 6; i++) - outb(dev->dev_addr[i] , ioaddr + I_ADD_REG0 + i); - - temp_reg = inb(ioaddr + REG1); /* Setup Transmit Chaining */ - outb(temp_reg | XMT_Chain_Int | XMT_Chain_ErrStop /* and discard bad RCV frames */ - | RCV_Discard_BadFrame, ioaddr + REG1); - - temp_reg = inb(ioaddr + REG2); /* Match broadcast */ - outb(temp_reg | 0x14, ioaddr + REG2); - - temp_reg = inb(ioaddr + REG3); - outb(temp_reg & 0x3f, ioaddr + REG3); /* clear test mode */ - - /* Set the receiving mode */ - eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */ - - /* Set the interrupt vector */ - temp_reg = inb(ioaddr + INT_NO_REG); - if (lp->eepro == LAN595FX || lp->eepro == LAN595FX_10ISA) - outb((temp_reg & 0xf8) | irqrmap2[dev->irq], ioaddr + INT_NO_REG); - else outb((temp_reg & 0xf8) | irqrmap[dev->irq], ioaddr + INT_NO_REG); - - - temp_reg = inb(ioaddr + INT_NO_REG); - if (lp->eepro == LAN595FX || lp->eepro == LAN595FX_10ISA) - outb((temp_reg & 0xf0) | irqrmap2[dev->irq] | 0x08,ioaddr+INT_NO_REG); - else outb((temp_reg & 0xf8) | irqrmap[dev->irq], ioaddr + INT_NO_REG); - - if (net_debug > 3) - printk(KERN_DEBUG "eepro_open: content of INT Reg is %x\n", temp_reg); - - - /* Initialize the RCV and XMT upper and lower limits */ - outb(lp->rcv_lower_limit >> 8, ioaddr + RCV_LOWER_LIMIT_REG); - outb(lp->rcv_upper_limit >> 8, ioaddr + RCV_UPPER_LIMIT_REG); - outb(lp->xmt_lower_limit >> 8, ioaddr + lp->xmt_lower_limit_reg); - outb(lp->xmt_upper_limit >> 8, ioaddr + lp->xmt_upper_limit_reg); - - /* Enable the interrupt line. */ - eepro_en_intline(ioaddr); - - /* Switch back to Bank 0 */ - eepro_sw2bank0(ioaddr); - - /* Let RX and TX events to interrupt */ - eepro_en_int(ioaddr); - - /* clear all interrupts */ - eepro_clear_int(ioaddr); - - /* Initialize RCV */ - outw(lp->rcv_lower_limit, ioaddr + RCV_BAR); - lp->rx_start = lp->rcv_lower_limit; - outw(lp->rcv_upper_limit | 0xfe, ioaddr + RCV_STOP); - - /* Initialize XMT */ - outw(lp->xmt_lower_limit, ioaddr + lp->xmt_bar); - lp->tx_start = lp->tx_end = lp->xmt_lower_limit; - lp->tx_last = 0; - - /* Check for the i82595TX and i82595FX */ - old8 = inb(ioaddr + 8); - outb(~old8, ioaddr + 8); - - if ((temp_reg = inb(ioaddr + 8)) == old8) { - if (net_debug > 3) - printk(KERN_DEBUG "i82595 detected!\n"); - lp->version = LAN595; - } - else { - lp->version = LAN595TX; - outb(old8, ioaddr + 8); - old9 = inb(ioaddr + 9); - - if (irqMask==ee_FX_INT2IRQ) { - if (net_debug > 3) { - printk(KERN_DEBUG "IrqMask: %#x\n",irqMask); - printk(KERN_DEBUG "i82595FX detected!\n"); - } - lp->version = LAN595FX; - outb(old9, ioaddr + 9); - if (dev->if_port != TPE) { /* Hopefully, this will fix the - problem of using Pentiums and - pro/10 w/ BNC. */ - eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ - temp_reg = inb(ioaddr + REG13); - /* disable the full duplex mode since it is not - applicable with the 10Base2 cable. */ - outb(temp_reg & ~(FDX | A_N_ENABLE), REG13); - eepro_sw2bank0(ioaddr); /* be CAREFUL, BANK 0 now */ - } - } - else if (net_debug > 3) { - printk(KERN_DEBUG "temp_reg: %#x ~old9: %#x\n",temp_reg,((~old9)&0xff)); - printk(KERN_DEBUG "i82595TX detected!\n"); - } - } - - eepro_sel_reset(ioaddr); - - netif_start_queue(dev); - - if (net_debug > 3) - printk(KERN_DEBUG "%s: exiting eepro_open routine.\n", dev->name); - - /* enabling rx */ - eepro_en_rx(ioaddr); - - return 0; -} - -static void eepro_tx_timeout (struct net_device *dev) -{ - struct eepro_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - - /* if (net_debug > 1) */ - printk (KERN_ERR "%s: transmit timed out, %s?\n", dev->name, - "network cable problem"); - /* This is not a duplicate. One message for the console, - one for the log file */ - printk (KERN_DEBUG "%s: transmit timed out, %s?\n", dev->name, - "network cable problem"); - eepro_complete_selreset(ioaddr); -} - - -static netdev_tx_t eepro_send_packet(struct sk_buff *skb, - struct net_device *dev) -{ - struct eepro_local *lp = netdev_priv(dev); - unsigned long flags; - int ioaddr = dev->base_addr; - short length = skb->len; - - if (net_debug > 5) - printk(KERN_DEBUG "%s: entering eepro_send_packet routine.\n", dev->name); - - if (length < ETH_ZLEN) { - if (skb_padto(skb, ETH_ZLEN)) - return NETDEV_TX_OK; - length = ETH_ZLEN; - } - netif_stop_queue (dev); - - eepro_dis_int(ioaddr); - spin_lock_irqsave(&lp->lock, flags); - - { - unsigned char *buf = skb->data; - - if (hardware_send_packet(dev, buf, length)) - /* we won't wake queue here because we're out of space */ - dev->stats.tx_dropped++; - else { - dev->stats.tx_bytes+=skb->len; - netif_wake_queue(dev); - } - - } - - dev_kfree_skb (skb); - - /* You might need to clean up and record Tx statistics here. */ - /* dev->stats.tx_aborted_errors++; */ - - if (net_debug > 5) - printk(KERN_DEBUG "%s: exiting eepro_send_packet routine.\n", dev->name); - - eepro_en_int(ioaddr); - spin_unlock_irqrestore(&lp->lock, flags); - - return NETDEV_TX_OK; -} - - -/* The typical workload of the driver: - Handle the network interface interrupts. */ - -static irqreturn_t -eepro_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct eepro_local *lp; - int ioaddr, status, boguscount = 20; - int handled = 0; - - lp = netdev_priv(dev); - - spin_lock(&lp->lock); - - if (net_debug > 5) - printk(KERN_DEBUG "%s: entering eepro_interrupt routine.\n", dev->name); - - ioaddr = dev->base_addr; - - while (((status = inb(ioaddr + STATUS_REG)) & (RX_INT|TX_INT)) && (boguscount--)) - { - handled = 1; - if (status & RX_INT) { - if (net_debug > 4) - printk(KERN_DEBUG "%s: packet received interrupt.\n", dev->name); - - eepro_dis_int(ioaddr); - - /* Get the received packets */ - eepro_ack_rx(ioaddr); - eepro_rx(dev); - - eepro_en_int(ioaddr); - } - if (status & TX_INT) { - if (net_debug > 4) - printk(KERN_DEBUG "%s: packet transmit interrupt.\n", dev->name); - - - eepro_dis_int(ioaddr); - - /* Process the status of transmitted packets */ - eepro_ack_tx(ioaddr); - eepro_transmit_interrupt(dev); - - eepro_en_int(ioaddr); - } - } - - if (net_debug > 5) - printk(KERN_DEBUG "%s: exiting eepro_interrupt routine.\n", dev->name); - - spin_unlock(&lp->lock); - return IRQ_RETVAL(handled); -} - -static int eepro_close(struct net_device *dev) -{ - struct eepro_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - short temp_reg; - - netif_stop_queue(dev); - - eepro_sw2bank1(ioaddr); /* Switch back to Bank 1 */ - - /* Disable the physical interrupt line. */ - temp_reg = inb(ioaddr + REG1); - outb(temp_reg & 0x7f, ioaddr + REG1); - - eepro_sw2bank0(ioaddr); /* Switch back to Bank 0 */ - - /* Flush the Tx and disable Rx. */ - outb(STOP_RCV_CMD, ioaddr); - lp->tx_start = lp->tx_end = lp->xmt_lower_limit; - lp->tx_last = 0; - - /* Mask all the interrupts. */ - eepro_dis_int(ioaddr); - - /* clear all interrupts */ - eepro_clear_int(ioaddr); - - /* Reset the 82595 */ - eepro_reset(ioaddr); - - /* release the interrupt */ - free_irq(dev->irq, dev); - - /* Update the statistics here. What statistics? */ - - return 0; -} - -/* Set or clear the multicast filter for this adaptor. - */ -static void -set_multicast_list(struct net_device *dev) -{ - struct eepro_local *lp = netdev_priv(dev); - short ioaddr = dev->base_addr; - unsigned short mode; - struct netdev_hw_addr *ha; - int mc_count = netdev_mc_count(dev); - - if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || mc_count > 63) - { - eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ - mode = inb(ioaddr + REG2); - outb(mode | PRMSC_Mode, ioaddr + REG2); - mode = inb(ioaddr + REG3); - outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */ - eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */ - } - - else if (mc_count == 0) - { - eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ - mode = inb(ioaddr + REG2); - outb(mode & 0xd6, ioaddr + REG2); /* Turn off Multi-IA and PRMSC_Mode bits */ - mode = inb(ioaddr + REG3); - outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */ - eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */ - } - - else - { - unsigned short status, *eaddrs; - int i, boguscount = 0; - - /* Disable RX and TX interrupts. Necessary to avoid - corruption of the HOST_ADDRESS_REG by interrupt - service routines. */ - eepro_dis_int(ioaddr); - - eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ - mode = inb(ioaddr + REG2); - outb(mode | Multi_IA, ioaddr + REG2); - mode = inb(ioaddr + REG3); - outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */ - eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */ - outw(lp->tx_end, ioaddr + HOST_ADDRESS_REG); - outw(MC_SETUP, ioaddr + IO_PORT); - outw(0, ioaddr + IO_PORT); - outw(0, ioaddr + IO_PORT); - outw(6 * (mc_count + 1), ioaddr + IO_PORT); - - netdev_for_each_mc_addr(ha, dev) { - eaddrs = (unsigned short *) ha->addr; - outw(*eaddrs++, ioaddr + IO_PORT); - outw(*eaddrs++, ioaddr + IO_PORT); - outw(*eaddrs++, ioaddr + IO_PORT); - } - - eaddrs = (unsigned short *) dev->dev_addr; - outw(eaddrs[0], ioaddr + IO_PORT); - outw(eaddrs[1], ioaddr + IO_PORT); - outw(eaddrs[2], ioaddr + IO_PORT); - outw(lp->tx_end, ioaddr + lp->xmt_bar); - outb(MC_SETUP, ioaddr); - - /* Update the transmit queue */ - i = lp->tx_end + XMT_HEADER + 6 * (mc_count + 1); - - if (lp->tx_start != lp->tx_end) - { - /* update the next address and the chain bit in the - last packet */ - outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG); - outw(i, ioaddr + IO_PORT); - outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG); - status = inw(ioaddr + IO_PORT); - outw(status | CHAIN_BIT, ioaddr + IO_PORT); - lp->tx_end = i ; - } - else { - lp->tx_start = lp->tx_end = i ; - } - - /* Acknowledge that the MC setup is done */ - do { /* We should be doing this in the eepro_interrupt()! */ - SLOW_DOWN; - SLOW_DOWN; - if (inb(ioaddr + STATUS_REG) & 0x08) - { - i = inb(ioaddr); - outb(0x08, ioaddr + STATUS_REG); - - if (i & 0x20) { /* command ABORTed */ - printk(KERN_NOTICE "%s: multicast setup failed.\n", - dev->name); - break; - } else if ((i & 0x0f) == 0x03) { /* MC-Done */ - printk(KERN_DEBUG "%s: set Rx mode to %d address%s.\n", - dev->name, mc_count, - mc_count > 1 ? "es":""); - break; - } - } - } while (++boguscount < 100); - - /* Re-enable RX and TX interrupts */ - eepro_en_int(ioaddr); - } - if (lp->eepro == LAN595FX_10ISA) { - eepro_complete_selreset(ioaddr); - } - else - eepro_en_rx(ioaddr); -} - -/* The horrible routine to read a word from the serial EEPROM. */ -/* IMPORTANT - the 82595 will be set to Bank 0 after the eeprom is read */ - -/* The delay between EEPROM clock transitions. */ -#define eeprom_delay() { udelay(40); } -#define EE_READ_CMD (6 << 6) - -static int -read_eeprom(int ioaddr, int location, struct net_device *dev) -{ - int i; - unsigned short retval = 0; - struct eepro_local *lp = netdev_priv(dev); - short ee_addr = ioaddr + lp->eeprom_reg; - int read_cmd = location | EE_READ_CMD; - short ctrl_val = EECS ; - - /* XXXX - black magic */ - eepro_sw2bank1(ioaddr); - outb(0x00, ioaddr + STATUS_REG); - /* XXXX - black magic */ - - eepro_sw2bank2(ioaddr); - outb(ctrl_val, ee_addr); - - /* Shift the read command bits out. */ - for (i = 8; i >= 0; i--) { - short outval = (read_cmd & (1 << i)) ? ctrl_val | EEDI - : ctrl_val; - outb(outval, ee_addr); - outb(outval | EESK, ee_addr); /* EEPROM clock tick. */ - eeprom_delay(); - outb(outval, ee_addr); /* Finish EEPROM a clock tick. */ - eeprom_delay(); - } - outb(ctrl_val, ee_addr); - - for (i = 16; i > 0; i--) { - outb(ctrl_val | EESK, ee_addr); eeprom_delay(); - retval = (retval << 1) | ((inb(ee_addr) & EEDO) ? 1 : 0); - outb(ctrl_val, ee_addr); eeprom_delay(); - } - - /* Terminate the EEPROM access. */ - ctrl_val &= ~EECS; - outb(ctrl_val | EESK, ee_addr); - eeprom_delay(); - outb(ctrl_val, ee_addr); - eeprom_delay(); - eepro_sw2bank0(ioaddr); - return retval; -} - -static int -hardware_send_packet(struct net_device *dev, void *buf, short length) -{ - struct eepro_local *lp = netdev_priv(dev); - short ioaddr = dev->base_addr; - unsigned status, tx_available, last, end; - - if (net_debug > 5) - printk(KERN_DEBUG "%s: entering hardware_send_packet routine.\n", dev->name); - - /* determine how much of the transmit buffer space is available */ - if (lp->tx_end > lp->tx_start) - tx_available = lp->xmt_ram - (lp->tx_end - lp->tx_start); - else if (lp->tx_end < lp->tx_start) - tx_available = lp->tx_start - lp->tx_end; - else tx_available = lp->xmt_ram; - - if (((((length + 3) >> 1) << 1) + 2*XMT_HEADER) >= tx_available) { - /* No space available ??? */ - return 1; - } - - last = lp->tx_end; - end = last + (((length + 3) >> 1) << 1) + XMT_HEADER; - - if (end >= lp->xmt_upper_limit + 2) { /* the transmit buffer is wrapped around */ - if ((lp->xmt_upper_limit + 2 - last) <= XMT_HEADER) { - /* Arrrr!!!, must keep the xmt header together, - several days were lost to chase this one down. */ - last = lp->xmt_lower_limit; - end = last + (((length + 3) >> 1) << 1) + XMT_HEADER; - } - else end = lp->xmt_lower_limit + (end - - lp->xmt_upper_limit + 2); - } - - outw(last, ioaddr + HOST_ADDRESS_REG); - outw(XMT_CMD, ioaddr + IO_PORT); - outw(0, ioaddr + IO_PORT); - outw(end, ioaddr + IO_PORT); - outw(length, ioaddr + IO_PORT); - - if (lp->version == LAN595) - outsw(ioaddr + IO_PORT, buf, (length + 3) >> 1); - else { /* LAN595TX or LAN595FX, capable of 32-bit I/O processing */ - unsigned short temp = inb(ioaddr + INT_MASK_REG); - outb(temp | IO_32_BIT, ioaddr + INT_MASK_REG); - outsl(ioaddr + IO_PORT_32_BIT, buf, (length + 3) >> 2); - outb(temp & ~(IO_32_BIT), ioaddr + INT_MASK_REG); - } - - /* A dummy read to flush the DRAM write pipeline */ - status = inw(ioaddr + IO_PORT); - - if (lp->tx_start == lp->tx_end) { - outw(last, ioaddr + lp->xmt_bar); - outb(XMT_CMD, ioaddr); - lp->tx_start = last; /* I don't like to change tx_start here */ - } - else { - /* update the next address and the chain bit in the - last packet */ - - if (lp->tx_end != last) { - outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG); - outw(last, ioaddr + IO_PORT); - } - - outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG); - status = inw(ioaddr + IO_PORT); - outw(status | CHAIN_BIT, ioaddr + IO_PORT); - - /* Continue the transmit command */ - outb(RESUME_XMT_CMD, ioaddr); - } - - lp->tx_last = last; - lp->tx_end = end; - - if (net_debug > 5) - printk(KERN_DEBUG "%s: exiting hardware_send_packet routine.\n", dev->name); - - return 0; -} - -static void -eepro_rx(struct net_device *dev) -{ - struct eepro_local *lp = netdev_priv(dev); - short ioaddr = dev->base_addr; - short boguscount = 20; - short rcv_car = lp->rx_start; - unsigned rcv_event, rcv_status, rcv_next_frame, rcv_size; - - if (net_debug > 5) - printk(KERN_DEBUG "%s: entering eepro_rx routine.\n", dev->name); - - /* Set the read pointer to the start of the RCV */ - outw(rcv_car, ioaddr + HOST_ADDRESS_REG); - - rcv_event = inw(ioaddr + IO_PORT); - - while (rcv_event == RCV_DONE) { - - rcv_status = inw(ioaddr + IO_PORT); - rcv_next_frame = inw(ioaddr + IO_PORT); - rcv_size = inw(ioaddr + IO_PORT); - - if ((rcv_status & (RX_OK | RX_ERROR)) == RX_OK) { - - /* Malloc up new buffer. */ - struct sk_buff *skb; - - dev->stats.rx_bytes+=rcv_size; - rcv_size &= 0x3fff; - skb = dev_alloc_skb(rcv_size+5); - if (skb == NULL) { - printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); - dev->stats.rx_dropped++; - rcv_car = lp->rx_start + RCV_HEADER + rcv_size; - lp->rx_start = rcv_next_frame; - outw(rcv_next_frame, ioaddr + HOST_ADDRESS_REG); - - break; - } - skb_reserve(skb,2); - - if (lp->version == LAN595) - insw(ioaddr+IO_PORT, skb_put(skb,rcv_size), (rcv_size + 3) >> 1); - else { /* LAN595TX or LAN595FX, capable of 32-bit I/O processing */ - unsigned short temp = inb(ioaddr + INT_MASK_REG); - outb(temp | IO_32_BIT, ioaddr + INT_MASK_REG); - insl(ioaddr+IO_PORT_32_BIT, skb_put(skb,rcv_size), - (rcv_size + 3) >> 2); - outb(temp & ~(IO_32_BIT), ioaddr + INT_MASK_REG); - } - - skb->protocol = eth_type_trans(skb,dev); - netif_rx(skb); - dev->stats.rx_packets++; - } - - else { /* Not sure will ever reach here, - I set the 595 to discard bad received frames */ - dev->stats.rx_errors++; - - if (rcv_status & 0x0100) - dev->stats.rx_over_errors++; - - else if (rcv_status & 0x0400) - dev->stats.rx_frame_errors++; - - else if (rcv_status & 0x0800) - dev->stats.rx_crc_errors++; - - printk(KERN_DEBUG "%s: event = %#x, status = %#x, next = %#x, size = %#x\n", - dev->name, rcv_event, rcv_status, rcv_next_frame, rcv_size); - } - - if (rcv_status & 0x1000) - dev->stats.rx_length_errors++; - - rcv_car = lp->rx_start + RCV_HEADER + rcv_size; - lp->rx_start = rcv_next_frame; - - if (--boguscount == 0) - break; - - outw(rcv_next_frame, ioaddr + HOST_ADDRESS_REG); - rcv_event = inw(ioaddr + IO_PORT); - - } - if (rcv_car == 0) - rcv_car = lp->rcv_upper_limit | 0xff; - - outw(rcv_car - 1, ioaddr + RCV_STOP); - - if (net_debug > 5) - printk(KERN_DEBUG "%s: exiting eepro_rx routine.\n", dev->name); -} - -static void -eepro_transmit_interrupt(struct net_device *dev) -{ - struct eepro_local *lp = netdev_priv(dev); - short ioaddr = dev->base_addr; - short boguscount = 25; - short xmt_status; - - while ((lp->tx_start != lp->tx_end) && boguscount--) { - - outw(lp->tx_start, ioaddr + HOST_ADDRESS_REG); - xmt_status = inw(ioaddr+IO_PORT); - - if (!(xmt_status & TX_DONE_BIT)) - break; - - xmt_status = inw(ioaddr+IO_PORT); - lp->tx_start = inw(ioaddr+IO_PORT); - - netif_wake_queue (dev); - - if (xmt_status & TX_OK) - dev->stats.tx_packets++; - else { - dev->stats.tx_errors++; - if (xmt_status & 0x0400) { - dev->stats.tx_carrier_errors++; - printk(KERN_DEBUG "%s: carrier error\n", - dev->name); - printk(KERN_DEBUG "%s: XMT status = %#x\n", - dev->name, xmt_status); - } - else { - printk(KERN_DEBUG "%s: XMT status = %#x\n", - dev->name, xmt_status); - printk(KERN_DEBUG "%s: XMT status = %#x\n", - dev->name, xmt_status); - } - } - if (xmt_status & 0x000f) { - dev->stats.collisions += (xmt_status & 0x000f); - } - - if ((xmt_status & 0x0040) == 0x0) { - dev->stats.tx_heartbeat_errors++; - } - } -} - -static int eepro_ethtool_get_settings(struct net_device *dev, - struct ethtool_cmd *cmd) -{ - struct eepro_local *lp = netdev_priv(dev); - - cmd->supported = SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_Autoneg; - cmd->advertising = ADVERTISED_10baseT_Half | - ADVERTISED_10baseT_Full | - ADVERTISED_Autoneg; - - if (GetBit(lp->word[5], ee_PortTPE)) { - cmd->supported |= SUPPORTED_TP; - cmd->advertising |= ADVERTISED_TP; - } - if (GetBit(lp->word[5], ee_PortBNC)) { - cmd->supported |= SUPPORTED_BNC; - cmd->advertising |= ADVERTISED_BNC; - } - if (GetBit(lp->word[5], ee_PortAUI)) { - cmd->supported |= SUPPORTED_AUI; - cmd->advertising |= ADVERTISED_AUI; - } - - ethtool_cmd_speed_set(cmd, SPEED_10); - - if (dev->if_port == TPE && lp->word[1] & ee_Duplex) { - cmd->duplex = DUPLEX_FULL; - } - else { - cmd->duplex = DUPLEX_HALF; - } - - cmd->port = dev->if_port; - cmd->phy_address = dev->base_addr; - cmd->transceiver = XCVR_INTERNAL; - - if (lp->word[0] & ee_AutoNeg) { - cmd->autoneg = 1; - } - - return 0; -} - -static void eepro_ethtool_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *drvinfo) -{ - strcpy(drvinfo->driver, DRV_NAME); - strcpy(drvinfo->version, DRV_VERSION); - sprintf(drvinfo->bus_info, "ISA 0x%lx", dev->base_addr); -} - -static const struct ethtool_ops eepro_ethtool_ops = { - .get_settings = eepro_ethtool_get_settings, - .get_drvinfo = eepro_ethtool_get_drvinfo, -}; - -#ifdef MODULE - -#define MAX_EEPRO 8 -static struct net_device *dev_eepro[MAX_EEPRO]; - -static int io[MAX_EEPRO] = { - [0 ... MAX_EEPRO-1] = -1 -}; -static int irq[MAX_EEPRO]; -static int mem[MAX_EEPRO] = { /* Size of the rx buffer in KB */ - [0 ... MAX_EEPRO-1] = RCV_DEFAULT_RAM/1024 -}; -static int autodetect; - -static int n_eepro; -/* For linux 2.1.xx */ - -MODULE_AUTHOR("Pascal Dupuis and others"); -MODULE_DESCRIPTION("Intel i82595 ISA EtherExpressPro10/10+ driver"); -MODULE_LICENSE("GPL"); - -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -module_param_array(mem, int, NULL, 0); -module_param(autodetect, int, 0); -MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base address(es)"); -MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)"); -MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)"); -MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)"); - -int __init init_module(void) -{ - struct net_device *dev; - int i; - if (io[0] == -1 && autodetect == 0) { - printk(KERN_WARNING "eepro_init_module: Probe is very dangerous in ISA boards!\n"); - printk(KERN_WARNING "eepro_init_module: Please add \"autodetect=1\" to force probe\n"); - return -ENODEV; - } - else if (autodetect) { - /* if autodetect is set then we must force detection */ - for (i = 0; i < MAX_EEPRO; i++) { - io[i] = 0; - } - - printk(KERN_INFO "eepro_init_module: Auto-detecting boards (May God protect us...)\n"); - } - - for (i = 0; i < MAX_EEPRO && io[i] != -1; i++) { - dev = alloc_etherdev(sizeof(struct eepro_local)); - if (!dev) - break; - - dev->mem_end = mem[i]; - dev->base_addr = io[i]; - dev->irq = irq[i]; - - if (do_eepro_probe(dev) == 0) { - dev_eepro[n_eepro++] = dev; - continue; - } - free_netdev(dev); - break; - } - - if (n_eepro) - printk(KERN_INFO "%s", version); - - return n_eepro ? 0 : -ENODEV; -} - -void __exit -cleanup_module(void) -{ - int i; - - for (i=0; ibase_addr, EEPRO_IO_EXTENT); - free_netdev(dev); - } -} -#endif /* MODULE */ diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c deleted file mode 100644 index a192285..0000000 --- a/drivers/net/eexpress.c +++ /dev/null @@ -1,1720 +0,0 @@ -/* Intel EtherExpress 16 device driver for Linux - * - * Written by John Sullivan, 1995 - * based on original code by Donald Becker, with changes by - * Alan Cox and Pauline Middelink. - * - * Support for 8-bit mode by Zoltan Szilagyi - * - * Many modifications, and currently maintained, by - * Philip Blundell - * Added the Compaq LTE Alan Cox - * Added MCA support Adam Fritzler - * - * Note - this driver is experimental still - it has problems on faster - * machines. Someone needs to sit down and go through it line by line with - * a databook... - */ - -/* The EtherExpress 16 is a fairly simple card, based on a shared-memory - * design using the i82586 Ethernet coprocessor. It bears no relationship, - * as far as I know, to the similarly-named "EtherExpress Pro" range. - * - * Historically, Linux support for these cards has been very bad. However, - * things seem to be getting better slowly. - */ - -/* If your card is confused about what sort of interface it has (eg it - * persistently reports "10baseT" when none is fitted), running 'SOFTSET /BART' - * or 'SOFTSET /LISA' from DOS seems to help. - */ - -/* Here's the scoop on memory mapping. - * - * There are three ways to access EtherExpress card memory: either using the - * shared-memory mapping, or using PIO through the dataport, or using PIO - * through the "shadow memory" ports. - * - * The shadow memory system works by having the card map some of its memory - * as follows: - * - * (the low five bits of the SMPTR are ignored) - * - * base+0x4000..400f memory at SMPTR+0..15 - * base+0x8000..800f memory at SMPTR+16..31 - * base+0xc000..c007 dubious stuff (memory at SMPTR+16..23 apparently) - * base+0xc008..c00f memory at 0x0008..0x000f - * - * This last set (the one at c008) is particularly handy because the SCB - * lives at 0x0008. So that set of ports gives us easy random access to data - * in the SCB without having to mess around setting up pointers and the like. - * We always use this method to access the SCB (via the scb_xx() functions). - * - * Dataport access works by aiming the appropriate (read or write) pointer - * at the first address you're interested in, and then reading or writing from - * the dataport. The pointers auto-increment after each transfer. We use - * this for data transfer. - * - * We don't use the shared-memory system because it allegedly doesn't work on - * all cards, and because it's a bit more prone to go wrong (it's one more - * thing to configure...). - */ - -/* Known bugs: - * - * - The card seems to want to give us two interrupts every time something - * happens, where just one would be better. - */ - -/* - * - * Note by Zoltan Szilagyi 10-12-96: - * - * I've succeeded in eliminating the "CU wedged" messages, and hence the - * lockups, which were only occurring with cards running in 8-bit mode ("force - * 8-bit operation" in Intel's SoftSet utility). This version of the driver - * sets the 82586 and the ASIC to 8-bit mode at startup; it also stops the - * CU before submitting a packet for transmission, and then restarts it as soon - * as the process of handing the packet is complete. This is definitely an - * unnecessary slowdown if the card is running in 16-bit mode; therefore one - * should detect 16-bit vs 8-bit mode from the EEPROM settings and act - * accordingly. In 8-bit mode with this bugfix I'm getting about 150 K/s for - * ftp's, which is significantly better than I get in DOS, so the overhead of - * stopping and restarting the CU with each transmit is not prohibitive in - * practice. - * - * Update by David Woodhouse 11/5/99: - * - * I've seen "CU wedged" messages in 16-bit mode, on the Alpha architecture. - * I assume that this is because 16-bit accesses are actually handled as two - * 8-bit accesses. - */ - -#ifdef __alpha__ -#define LOCKUP16 1 -#endif -#ifndef LOCKUP16 -#define LOCKUP16 0 -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#ifndef NET_DEBUG -#define NET_DEBUG 4 -#endif - -#include "eexpress.h" - -#define EEXP_IO_EXTENT 16 - -/* - * Private data declarations - */ - -struct net_local -{ - unsigned long last_tx; /* jiffies when last transmit started */ - unsigned long init_time; /* jiffies when eexp_hw_init586 called */ - unsigned short rx_first; /* first rx buf, same as RX_BUF_START */ - unsigned short rx_last; /* last rx buf */ - unsigned short rx_ptr; /* first rx buf to look at */ - unsigned short tx_head; /* next free tx buf */ - unsigned short tx_reap; /* first in-use tx buf */ - unsigned short tx_tail; /* previous tx buf to tx_head */ - unsigned short tx_link; /* last known-executing tx buf */ - unsigned short last_tx_restart; /* set to tx_link when we - restart the CU */ - unsigned char started; - unsigned short rx_buf_start; - unsigned short rx_buf_end; - unsigned short num_tx_bufs; - unsigned short num_rx_bufs; - unsigned char width; /* 0 for 16bit, 1 for 8bit */ - unsigned char was_promisc; - unsigned char old_mc_count; - spinlock_t lock; -}; - -/* This is the code and data that is downloaded to the EtherExpress card's - * memory at boot time. - */ - -static unsigned short start_code[] = { -/* 0x0000 */ - 0x0001, /* ISCP: busy - cleared after reset */ - 0x0008,0x0000,0x0000, /* offset,address (lo,hi) of SCB */ - - 0x0000,0x0000, /* SCB: status, commands */ - 0x0000,0x0000, /* links to first command block, - first receive descriptor */ - 0x0000,0x0000, /* CRC error, alignment error counts */ - 0x0000,0x0000, /* out of resources, overrun error counts */ - - 0x0000,0x0000, /* pad */ - 0x0000,0x0000, - -/* 0x20 -- start of 82586 CU program */ -#define CONF_LINK 0x20 - 0x0000,Cmd_Config, - 0x0032, /* link to next command */ - 0x080c, /* 12 bytes follow : fifo threshold=8 */ - 0x2e40, /* don't rx bad frames - * SRDY/ARDY => ext. sync. : preamble len=8 - * take addresses from data buffers - * 6 bytes/address - */ - 0x6000, /* default backoff method & priority - * interframe spacing = 0x60 */ - 0xf200, /* slot time=0x200 - * max collision retry = 0xf */ -#define CONF_PROMISC 0x2e - 0x0000, /* no HDLC : normal CRC : enable broadcast - * disable promiscuous/multicast modes */ - 0x003c, /* minimum frame length = 60 octets) */ - - 0x0000,Cmd_SetAddr, - 0x003e, /* link to next command */ -#define CONF_HWADDR 0x38 - 0x0000,0x0000,0x0000, /* hardware address placed here */ - - 0x0000,Cmd_MCast, - 0x0076, /* link to next command */ -#define CONF_NR_MULTICAST 0x44 - 0x0000, /* number of bytes in multicast address(es) */ -#define CONF_MULTICAST 0x46 - 0x0000, 0x0000, 0x0000, /* some addresses */ - 0x0000, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, - -#define CONF_DIAG_RESULT 0x76 - 0x0000, Cmd_Diag, - 0x007c, /* link to next command */ - - 0x0000,Cmd_TDR|Cmd_INT, - 0x0084, -#define CONF_TDR_RESULT 0x82 - 0x0000, - - 0x0000,Cmd_END|Cmd_Nop, /* end of configure sequence */ - 0x0084 /* dummy link */ -}; - -/* maps irq number to EtherExpress magic value */ -static char irqrmap[] = { 0,0,1,2,3,4,0,0,0,1,5,6,0,0,0,0 }; - -#ifdef CONFIG_MCA_LEGACY -/* mapping of the first four bits of the second POS register */ -static unsigned short mca_iomap[] = { - 0x270, 0x260, 0x250, 0x240, 0x230, 0x220, 0x210, 0x200, - 0x370, 0x360, 0x350, 0x340, 0x330, 0x320, 0x310, 0x300 -}; -/* bits 5-7 of the second POS register */ -static char mca_irqmap[] = { 12, 9, 3, 4, 5, 10, 11, 15 }; -#endif - -/* - * Prototypes for Linux interface - */ - -static int eexp_open(struct net_device *dev); -static int eexp_close(struct net_device *dev); -static void eexp_timeout(struct net_device *dev); -static netdev_tx_t eexp_xmit(struct sk_buff *buf, - struct net_device *dev); - -static irqreturn_t eexp_irq(int irq, void *dev_addr); -static void eexp_set_multicast(struct net_device *dev); - -/* - * Prototypes for hardware access functions - */ - -static void eexp_hw_rx_pio(struct net_device *dev); -static void eexp_hw_tx_pio(struct net_device *dev, unsigned short *buf, - unsigned short len); -static int eexp_hw_probe(struct net_device *dev,unsigned short ioaddr); -static unsigned short eexp_hw_readeeprom(unsigned short ioaddr, - unsigned char location); - -static unsigned short eexp_hw_lasttxstat(struct net_device *dev); -static void eexp_hw_txrestart(struct net_device *dev); - -static void eexp_hw_txinit (struct net_device *dev); -static void eexp_hw_rxinit (struct net_device *dev); - -static void eexp_hw_init586 (struct net_device *dev); -static void eexp_setup_filter (struct net_device *dev); - -static char *eexp_ifmap[]={"AUI", "BNC", "RJ45"}; -enum eexp_iftype {AUI=0, BNC=1, TPE=2}; - -#define STARTED_RU 2 -#define STARTED_CU 1 - -/* - * Primitive hardware access functions. - */ - -static inline unsigned short scb_status(struct net_device *dev) -{ - return inw(dev->base_addr + 0xc008); -} - -static inline unsigned short scb_rdcmd(struct net_device *dev) -{ - return inw(dev->base_addr + 0xc00a); -} - -static inline void scb_command(struct net_device *dev, unsigned short cmd) -{ - outw(cmd, dev->base_addr + 0xc00a); -} - -static inline void scb_wrcbl(struct net_device *dev, unsigned short val) -{ - outw(val, dev->base_addr + 0xc00c); -} - -static inline void scb_wrrfa(struct net_device *dev, unsigned short val) -{ - outw(val, dev->base_addr + 0xc00e); -} - -static inline void set_loopback(struct net_device *dev) -{ - outb(inb(dev->base_addr + Config) | 2, dev->base_addr + Config); -} - -static inline void clear_loopback(struct net_device *dev) -{ - outb(inb(dev->base_addr + Config) & ~2, dev->base_addr + Config); -} - -static inline unsigned short int SHADOW(short int addr) -{ - addr &= 0x1f; - if (addr > 0xf) addr += 0x3ff0; - return addr + 0x4000; -} - -/* - * Linux interface - */ - -/* - * checks for presence of EtherExpress card - */ - -static int __init do_express_probe(struct net_device *dev) -{ - unsigned short *port; - static unsigned short ports[] = { 0x240,0x300,0x310,0x270,0x320,0x340,0 }; - unsigned short ioaddr = dev->base_addr; - int dev_irq = dev->irq; - int err; - - dev->if_port = 0xff; /* not set */ - -#ifdef CONFIG_MCA_LEGACY - if (MCA_bus) { - int slot = 0; - - /* - * Only find one card at a time. Subsequent calls - * will find others, however, proper multicard MCA - * probing and setup can't be done with the - * old-style Space.c init routines. -- ASF - */ - while (slot != MCA_NOTFOUND) { - int pos0, pos1; - - slot = mca_find_unused_adapter(0x628B, slot); - if (slot == MCA_NOTFOUND) - break; - - pos0 = mca_read_stored_pos(slot, 2); - pos1 = mca_read_stored_pos(slot, 3); - ioaddr = mca_iomap[pos1&0xf]; - - dev->irq = mca_irqmap[(pos1>>4)&0x7]; - - /* - * XXX: Transceiver selection is done - * differently on the MCA version. - * How to get it to select something - * other than external/AUI is currently - * unknown. This code is just for looks. -- ASF - */ - if ((pos0 & 0x7) == 0x1) - dev->if_port = AUI; - else if ((pos0 & 0x7) == 0x5) { - if (pos1 & 0x80) - dev->if_port = BNC; - else - dev->if_port = TPE; - } - - mca_set_adapter_name(slot, "Intel EtherExpress 16 MCA"); - mca_set_adapter_procfn(slot, NULL, dev); - mca_mark_as_used(slot); - - break; - } - } -#endif - if (ioaddr&0xfe00) { - if (!request_region(ioaddr, EEXP_IO_EXTENT, "EtherExpress")) - return -EBUSY; - err = eexp_hw_probe(dev,ioaddr); - release_region(ioaddr, EEXP_IO_EXTENT); - return err; - } else if (ioaddr) - return -ENXIO; - - for (port=&ports[0] ; *port ; port++ ) - { - unsigned short sum = 0; - int i; - if (!request_region(*port, EEXP_IO_EXTENT, "EtherExpress")) - continue; - for ( i=0 ; i<4 ; i++ ) - { - unsigned short t; - t = inb(*port + ID_PORT); - sum |= (t>>4) << ((t & 0x03)<<2); - } - if (sum==0xbaba && !eexp_hw_probe(dev,*port)) { - release_region(*port, EEXP_IO_EXTENT); - return 0; - } - release_region(*port, EEXP_IO_EXTENT); - dev->irq = dev_irq; - } - return -ENODEV; -} - -#ifndef MODULE -struct net_device * __init express_probe(int unit) -{ - struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); - int err; - - if (!dev) - return ERR_PTR(-ENOMEM); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - - err = do_express_probe(dev); - if (!err) - return dev; - free_netdev(dev); - return ERR_PTR(err); -} -#endif - -/* - * open and initialize the adapter, ready for use - */ - -static int eexp_open(struct net_device *dev) -{ - int ret; - unsigned short ioaddr = dev->base_addr; - struct net_local *lp = netdev_priv(dev); - -#if NET_DEBUG > 6 - printk(KERN_DEBUG "%s: eexp_open()\n", dev->name); -#endif - - if (!dev->irq || !irqrmap[dev->irq]) - return -ENXIO; - - ret = request_irq(dev->irq, eexp_irq, 0, dev->name, dev); - if (ret) - return ret; - - if (!request_region(ioaddr, EEXP_IO_EXTENT, "EtherExpress")) { - printk(KERN_WARNING "EtherExpress io port %x, is busy.\n" - , ioaddr); - goto err_out1; - } - if (!request_region(ioaddr+0x4000, EEXP_IO_EXTENT, "EtherExpress shadow")) { - printk(KERN_WARNING "EtherExpress io port %x, is busy.\n" - , ioaddr+0x4000); - goto err_out2; - } - if (!request_region(ioaddr+0x8000, EEXP_IO_EXTENT, "EtherExpress shadow")) { - printk(KERN_WARNING "EtherExpress io port %x, is busy.\n" - , ioaddr+0x8000); - goto err_out3; - } - if (!request_region(ioaddr+0xc000, EEXP_IO_EXTENT, "EtherExpress shadow")) { - printk(KERN_WARNING "EtherExpress io port %x, is busy.\n" - , ioaddr+0xc000); - goto err_out4; - } - - if (lp->width) { - printk("%s: forcing ASIC to 8-bit mode\n", dev->name); - outb(inb(dev->base_addr+Config)&~4, dev->base_addr+Config); - } - - eexp_hw_init586(dev); - netif_start_queue(dev); -#if NET_DEBUG > 6 - printk(KERN_DEBUG "%s: leaving eexp_open()\n", dev->name); -#endif - return 0; - - err_out4: - release_region(ioaddr+0x8000, EEXP_IO_EXTENT); - err_out3: - release_region(ioaddr+0x4000, EEXP_IO_EXTENT); - err_out2: - release_region(ioaddr, EEXP_IO_EXTENT); - err_out1: - free_irq(dev->irq, dev); - return -EBUSY; -} - -/* - * close and disable the interface, leaving the 586 in reset. - */ - -static int eexp_close(struct net_device *dev) -{ - unsigned short ioaddr = dev->base_addr; - struct net_local *lp = netdev_priv(dev); - - int irq = dev->irq; - - netif_stop_queue(dev); - - outb(SIRQ_dis|irqrmap[irq],ioaddr+SET_IRQ); - lp->started = 0; - scb_command(dev, SCB_CUsuspend|SCB_RUsuspend); - outb(0,ioaddr+SIGNAL_CA); - free_irq(irq,dev); - outb(i586_RST,ioaddr+EEPROM_Ctrl); - release_region(ioaddr, EEXP_IO_EXTENT); - release_region(ioaddr+0x4000, 16); - release_region(ioaddr+0x8000, 16); - release_region(ioaddr+0xc000, 16); - - return 0; -} - -/* - * This gets called when a higher level thinks we are broken. Check that - * nothing has become jammed in the CU. - */ - -static void unstick_cu(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - unsigned short ioaddr = dev->base_addr; - - if (lp->started) - { - if (time_after(jiffies, dev_trans_start(dev) + HZ/2)) - { - if (lp->tx_link==lp->last_tx_restart) - { - unsigned short boguscount=200,rsst; - printk(KERN_WARNING "%s: Retransmit timed out, status %04x, resetting...\n", - dev->name, scb_status(dev)); - eexp_hw_txinit(dev); - lp->last_tx_restart = 0; - scb_wrcbl(dev, lp->tx_link); - scb_command(dev, SCB_CUstart); - outb(0,ioaddr+SIGNAL_CA); - while (!SCB_complete(rsst=scb_status(dev))) - { - if (!--boguscount) - { - boguscount=200; - printk(KERN_WARNING "%s: Reset timed out status %04x, retrying...\n", - dev->name,rsst); - scb_wrcbl(dev, lp->tx_link); - scb_command(dev, SCB_CUstart); - outb(0,ioaddr+SIGNAL_CA); - } - } - netif_wake_queue(dev); - } - else - { - unsigned short status = scb_status(dev); - if (SCB_CUdead(status)) - { - unsigned short txstatus = eexp_hw_lasttxstat(dev); - printk(KERN_WARNING "%s: Transmit timed out, CU not active status %04x %04x, restarting...\n", - dev->name, status, txstatus); - eexp_hw_txrestart(dev); - } - else - { - unsigned short txstatus = eexp_hw_lasttxstat(dev); - if (netif_queue_stopped(dev) && !txstatus) - { - printk(KERN_WARNING "%s: CU wedged, status %04x %04x, resetting...\n", - dev->name,status,txstatus); - eexp_hw_init586(dev); - netif_wake_queue(dev); - } - else - { - printk(KERN_WARNING "%s: transmit timed out\n", dev->name); - } - } - } - } - } - else - { - if (time_after(jiffies, lp->init_time + 10)) - { - unsigned short status = scb_status(dev); - printk(KERN_WARNING "%s: i82586 startup timed out, status %04x, resetting...\n", - dev->name, status); - eexp_hw_init586(dev); - netif_wake_queue(dev); - } - } -} - -static void eexp_timeout(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); -#ifdef CONFIG_SMP - unsigned long flags; -#endif - int status; - - disable_irq(dev->irq); - - /* - * Best would be to use synchronize_irq(); spin_lock() here - * lets make it work first.. - */ - -#ifdef CONFIG_SMP - spin_lock_irqsave(&lp->lock, flags); -#endif - - status = scb_status(dev); - unstick_cu(dev); - printk(KERN_INFO "%s: transmit timed out, %s?\n", dev->name, - (SCB_complete(status)?"lost interrupt": - "board on fire")); - dev->stats.tx_errors++; - lp->last_tx = jiffies; - if (!SCB_complete(status)) { - scb_command(dev, SCB_CUabort); - outb(0,dev->base_addr+SIGNAL_CA); - } - netif_wake_queue(dev); -#ifdef CONFIG_SMP - spin_unlock_irqrestore(&lp->lock, flags); -#endif -} - -/* - * Called to transmit a packet, or to allow us to right ourselves - * if the kernel thinks we've died. - */ -static netdev_tx_t eexp_xmit(struct sk_buff *buf, struct net_device *dev) -{ - short length = buf->len; -#ifdef CONFIG_SMP - struct net_local *lp = netdev_priv(dev); - unsigned long flags; -#endif - -#if NET_DEBUG > 6 - printk(KERN_DEBUG "%s: eexp_xmit()\n", dev->name); -#endif - - if (buf->len < ETH_ZLEN) { - if (skb_padto(buf, ETH_ZLEN)) - return NETDEV_TX_OK; - length = ETH_ZLEN; - } - - disable_irq(dev->irq); - - /* - * Best would be to use synchronize_irq(); spin_lock() here - * lets make it work first.. - */ - -#ifdef CONFIG_SMP - spin_lock_irqsave(&lp->lock, flags); -#endif - - { - unsigned short *data = (unsigned short *)buf->data; - - dev->stats.tx_bytes += length; - - eexp_hw_tx_pio(dev,data,length); - } - dev_kfree_skb(buf); -#ifdef CONFIG_SMP - spin_unlock_irqrestore(&lp->lock, flags); -#endif - enable_irq(dev->irq); - return NETDEV_TX_OK; -} - -/* - * Handle an EtherExpress interrupt - * If we've finished initializing, start the RU and CU up. - * If we've already started, reap tx buffers, handle any received packets, - * check to make sure we've not become wedged. - */ - -static unsigned short eexp_start_irq(struct net_device *dev, - unsigned short status) -{ - unsigned short ack_cmd = SCB_ack(status); - struct net_local *lp = netdev_priv(dev); - unsigned short ioaddr = dev->base_addr; - if ((dev->flags & IFF_UP) && !(lp->started & STARTED_CU)) { - short diag_status, tdr_status; - while (SCB_CUstat(status)==2) - status = scb_status(dev); -#if NET_DEBUG > 4 - printk("%s: CU went non-active (status %04x)\n", - dev->name, status); -#endif - - outw(CONF_DIAG_RESULT & ~31, ioaddr + SM_PTR); - diag_status = inw(ioaddr + SHADOW(CONF_DIAG_RESULT)); - if (diag_status & 1<<11) { - printk(KERN_WARNING "%s: 82586 failed self-test\n", - dev->name); - } else if (!(diag_status & 1<<13)) { - printk(KERN_WARNING "%s: 82586 self-test failed to complete\n", dev->name); - } - - outw(CONF_TDR_RESULT & ~31, ioaddr + SM_PTR); - tdr_status = inw(ioaddr + SHADOW(CONF_TDR_RESULT)); - if (tdr_status & (TDR_SHORT|TDR_OPEN)) { - printk(KERN_WARNING "%s: TDR reports cable %s at %d tick%s\n", dev->name, (tdr_status & TDR_SHORT)?"short":"broken", tdr_status & TDR_TIME, ((tdr_status & TDR_TIME) != 1) ? "s" : ""); - } - else if (tdr_status & TDR_XCVRPROBLEM) { - printk(KERN_WARNING "%s: TDR reports transceiver problem\n", dev->name); - } - else if (tdr_status & TDR_LINKOK) { -#if NET_DEBUG > 4 - printk(KERN_DEBUG "%s: TDR reports link OK\n", dev->name); -#endif - } else { - printk("%s: TDR is ga-ga (status %04x)\n", dev->name, - tdr_status); - } - - lp->started |= STARTED_CU; - scb_wrcbl(dev, lp->tx_link); - /* if the RU isn't running, start it now */ - if (!(lp->started & STARTED_RU)) { - ack_cmd |= SCB_RUstart; - scb_wrrfa(dev, lp->rx_buf_start); - lp->rx_ptr = lp->rx_buf_start; - lp->started |= STARTED_RU; - } - ack_cmd |= SCB_CUstart | 0x2000; - } - - if ((dev->flags & IFF_UP) && !(lp->started & STARTED_RU) && SCB_RUstat(status)==4) - lp->started|=STARTED_RU; - - return ack_cmd; -} - -static void eexp_cmd_clear(struct net_device *dev) -{ - unsigned long int oldtime = jiffies; - while (scb_rdcmd(dev) && (time_before(jiffies, oldtime + 10))); - if (scb_rdcmd(dev)) { - printk("%s: command didn't clear\n", dev->name); - } -} - -static irqreturn_t eexp_irq(int dummy, void *dev_info) -{ - struct net_device *dev = dev_info; - struct net_local *lp; - unsigned short ioaddr,status,ack_cmd; - unsigned short old_read_ptr, old_write_ptr; - - lp = netdev_priv(dev); - ioaddr = dev->base_addr; - - spin_lock(&lp->lock); - - old_read_ptr = inw(ioaddr+READ_PTR); - old_write_ptr = inw(ioaddr+WRITE_PTR); - - outb(SIRQ_dis|irqrmap[dev->irq], ioaddr+SET_IRQ); - - status = scb_status(dev); - -#if NET_DEBUG > 4 - printk(KERN_DEBUG "%s: interrupt (status %x)\n", dev->name, status); -#endif - - if (lp->started == (STARTED_CU | STARTED_RU)) { - - do { - eexp_cmd_clear(dev); - - ack_cmd = SCB_ack(status); - scb_command(dev, ack_cmd); - outb(0,ioaddr+SIGNAL_CA); - - eexp_cmd_clear(dev); - - if (SCB_complete(status)) { - if (!eexp_hw_lasttxstat(dev)) { - printk("%s: tx interrupt but no status\n", dev->name); - } - } - - if (SCB_rxdframe(status)) - eexp_hw_rx_pio(dev); - - status = scb_status(dev); - } while (status & 0xc000); - - if (SCB_RUdead(status)) - { - printk(KERN_WARNING "%s: RU stopped: status %04x\n", - dev->name,status); -#if 0 - printk(KERN_WARNING "%s: cur_rfd=%04x, cur_rbd=%04x\n", dev->name, lp->cur_rfd, lp->cur_rbd); - outw(lp->cur_rfd, ioaddr+READ_PTR); - printk(KERN_WARNING "%s: [%04x]\n", dev->name, inw(ioaddr+DATAPORT)); - outw(lp->cur_rfd+6, ioaddr+READ_PTR); - printk(KERN_WARNING "%s: rbd is %04x\n", dev->name, rbd= inw(ioaddr+DATAPORT)); - outw(rbd, ioaddr+READ_PTR); - printk(KERN_WARNING "%s: [%04x %04x] ", dev->name, inw(ioaddr+DATAPORT), inw(ioaddr+DATAPORT)); - outw(rbd+8, ioaddr+READ_PTR); - printk("[%04x]\n", inw(ioaddr+DATAPORT)); -#endif - dev->stats.rx_errors++; -#if 1 - eexp_hw_rxinit(dev); -#else - lp->cur_rfd = lp->first_rfd; -#endif - scb_wrrfa(dev, lp->rx_buf_start); - scb_command(dev, SCB_RUstart); - outb(0,ioaddr+SIGNAL_CA); - } - } else { - if (status & 0x8000) - ack_cmd = eexp_start_irq(dev, status); - else - ack_cmd = SCB_ack(status); - scb_command(dev, ack_cmd); - outb(0,ioaddr+SIGNAL_CA); - } - - eexp_cmd_clear(dev); - - outb(SIRQ_en|irqrmap[dev->irq], ioaddr+SET_IRQ); - -#if NET_DEBUG > 6 - printk("%s: leaving eexp_irq()\n", dev->name); -#endif - outw(old_read_ptr, ioaddr+READ_PTR); - outw(old_write_ptr, ioaddr+WRITE_PTR); - - spin_unlock(&lp->lock); - return IRQ_HANDLED; -} - -/* - * Hardware access functions - */ - -/* - * Set the cable type to use. - */ - -static void eexp_hw_set_interface(struct net_device *dev) -{ - unsigned char oldval = inb(dev->base_addr + 0x300e); - oldval &= ~0x82; - switch (dev->if_port) { - case TPE: - oldval |= 0x2; - case BNC: - oldval |= 0x80; - break; - } - outb(oldval, dev->base_addr+0x300e); - mdelay(20); -} - -/* - * Check all the receive buffers, and hand any received packets - * to the upper levels. Basic sanity check on each frame - * descriptor, though we don't bother trying to fix broken ones. - */ - -static void eexp_hw_rx_pio(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - unsigned short rx_block = lp->rx_ptr; - unsigned short boguscount = lp->num_rx_bufs; - unsigned short ioaddr = dev->base_addr; - unsigned short status; - -#if NET_DEBUG > 6 - printk(KERN_DEBUG "%s: eexp_hw_rx()\n", dev->name); -#endif - - do { - unsigned short rfd_cmd, rx_next, pbuf, pkt_len; - - outw(rx_block, ioaddr + READ_PTR); - status = inw(ioaddr + DATAPORT); - - if (FD_Done(status)) - { - rfd_cmd = inw(ioaddr + DATAPORT); - rx_next = inw(ioaddr + DATAPORT); - pbuf = inw(ioaddr + DATAPORT); - - outw(pbuf, ioaddr + READ_PTR); - pkt_len = inw(ioaddr + DATAPORT); - - if (rfd_cmd!=0x0000) - { - printk(KERN_WARNING "%s: rfd_cmd not zero:0x%04x\n", - dev->name, rfd_cmd); - continue; - } - else if (pbuf!=rx_block+0x16) - { - printk(KERN_WARNING "%s: rfd and rbd out of sync 0x%04x 0x%04x\n", - dev->name, rx_block+0x16, pbuf); - continue; - } - else if ((pkt_len & 0xc000)!=0xc000) - { - printk(KERN_WARNING "%s: EOF or F not set on received buffer (%04x)\n", - dev->name, pkt_len & 0xc000); - continue; - } - else if (!FD_OK(status)) - { - dev->stats.rx_errors++; - if (FD_CRC(status)) - dev->stats.rx_crc_errors++; - if (FD_Align(status)) - dev->stats.rx_frame_errors++; - if (FD_Resrc(status)) - dev->stats.rx_fifo_errors++; - if (FD_DMA(status)) - dev->stats.rx_over_errors++; - if (FD_Short(status)) - dev->stats.rx_length_errors++; - } - else - { - struct sk_buff *skb; - pkt_len &= 0x3fff; - skb = dev_alloc_skb(pkt_len+16); - if (skb == NULL) - { - printk(KERN_WARNING "%s: Memory squeeze, dropping packet\n",dev->name); - dev->stats.rx_dropped++; - break; - } - skb_reserve(skb, 2); - outw(pbuf+10, ioaddr+READ_PTR); - insw(ioaddr+DATAPORT, skb_put(skb,pkt_len),(pkt_len+1)>>1); - skb->protocol = eth_type_trans(skb,dev); - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; - } - outw(rx_block, ioaddr+WRITE_PTR); - outw(0, ioaddr+DATAPORT); - outw(0, ioaddr+DATAPORT); - rx_block = rx_next; - } - } while (FD_Done(status) && boguscount--); - lp->rx_ptr = rx_block; -} - -/* - * Hand a packet to the card for transmission - * If we get here, we MUST have already checked - * to make sure there is room in the transmit - * buffer region. - */ - -static void eexp_hw_tx_pio(struct net_device *dev, unsigned short *buf, - unsigned short len) -{ - struct net_local *lp = netdev_priv(dev); - unsigned short ioaddr = dev->base_addr; - - if (LOCKUP16 || lp->width) { - /* Stop the CU so that there is no chance that it - jumps off to a bogus address while we are writing the - pointer to the next transmit packet in 8-bit mode -- - this eliminates the "CU wedged" errors in 8-bit mode. - (Zoltan Szilagyi 10-12-96) */ - scb_command(dev, SCB_CUsuspend); - outw(0xFFFF, ioaddr+SIGNAL_CA); - } - - outw(lp->tx_head, ioaddr + WRITE_PTR); - - outw(0x0000, ioaddr + DATAPORT); - outw(Cmd_INT|Cmd_Xmit, ioaddr + DATAPORT); - outw(lp->tx_head+0x08, ioaddr + DATAPORT); - outw(lp->tx_head+0x0e, ioaddr + DATAPORT); - - outw(0x0000, ioaddr + DATAPORT); - outw(0x0000, ioaddr + DATAPORT); - outw(lp->tx_head+0x08, ioaddr + DATAPORT); - - outw(0x8000|len, ioaddr + DATAPORT); - outw(-1, ioaddr + DATAPORT); - outw(lp->tx_head+0x16, ioaddr + DATAPORT); - outw(0, ioaddr + DATAPORT); - - outsw(ioaddr + DATAPORT, buf, (len+1)>>1); - - outw(lp->tx_tail+0xc, ioaddr + WRITE_PTR); - outw(lp->tx_head, ioaddr + DATAPORT); - - dev->trans_start = jiffies; - lp->tx_tail = lp->tx_head; - if (lp->tx_head==TX_BUF_START+((lp->num_tx_bufs-1)*TX_BUF_SIZE)) - lp->tx_head = TX_BUF_START; - else - lp->tx_head += TX_BUF_SIZE; - if (lp->tx_head != lp->tx_reap) - netif_wake_queue(dev); - - if (LOCKUP16 || lp->width) { - /* Restart the CU so that the packet can actually - be transmitted. (Zoltan Szilagyi 10-12-96) */ - scb_command(dev, SCB_CUresume); - outw(0xFFFF, ioaddr+SIGNAL_CA); - } - - dev->stats.tx_packets++; - lp->last_tx = jiffies; -} - -static const struct net_device_ops eexp_netdev_ops = { - .ndo_open = eexp_open, - .ndo_stop = eexp_close, - .ndo_start_xmit = eexp_xmit, - .ndo_set_multicast_list = eexp_set_multicast, - .ndo_tx_timeout = eexp_timeout, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -/* - * Sanity check the suspected EtherExpress card - * Read hardware address, reset card, size memory and initialize buffer - * memory pointers. These are held in netdev_priv(), in case someone has more - * than one card in a machine. - */ - -static int __init eexp_hw_probe(struct net_device *dev, unsigned short ioaddr) -{ - unsigned short hw_addr[3]; - unsigned char buswidth; - unsigned int memory_size; - int i; - unsigned short xsum = 0; - struct net_local *lp = netdev_priv(dev); - - printk("%s: EtherExpress 16 at %#x ",dev->name,ioaddr); - - outb(ASIC_RST, ioaddr+EEPROM_Ctrl); - outb(0, ioaddr+EEPROM_Ctrl); - udelay(500); - outb(i586_RST, ioaddr+EEPROM_Ctrl); - - hw_addr[0] = eexp_hw_readeeprom(ioaddr,2); - hw_addr[1] = eexp_hw_readeeprom(ioaddr,3); - hw_addr[2] = eexp_hw_readeeprom(ioaddr,4); - - /* Standard Address or Compaq LTE Address */ - if (!((hw_addr[2]==0x00aa && ((hw_addr[1] & 0xff00)==0x0000)) || - (hw_addr[2]==0x0080 && ((hw_addr[1] & 0xff00)==0x5F00)))) - { - printk(" rejected: invalid address %04x%04x%04x\n", - hw_addr[2],hw_addr[1],hw_addr[0]); - return -ENODEV; - } - - /* Calculate the EEPROM checksum. Carry on anyway if it's bad, - * though. - */ - for (i = 0; i < 64; i++) - xsum += eexp_hw_readeeprom(ioaddr, i); - if (xsum != 0xbaba) - printk(" (bad EEPROM xsum 0x%02x)", xsum); - - dev->base_addr = ioaddr; - for ( i=0 ; i<6 ; i++ ) - dev->dev_addr[i] = ((unsigned char *)hw_addr)[5-i]; - - { - static const char irqmap[] = { 0, 9, 3, 4, 5, 10, 11, 0 }; - unsigned short setupval = eexp_hw_readeeprom(ioaddr,0); - - /* Use the IRQ from EEPROM if none was given */ - if (!dev->irq) - dev->irq = irqmap[setupval>>13]; - - if (dev->if_port == 0xff) { - dev->if_port = !(setupval & 0x1000) ? AUI : - eexp_hw_readeeprom(ioaddr,5) & 0x1 ? TPE : BNC; - } - - buswidth = !((setupval & 0x400) >> 10); - } - - memset(lp, 0, sizeof(struct net_local)); - spin_lock_init(&lp->lock); - - printk("(IRQ %d, %s connector, %d-bit bus", dev->irq, - eexp_ifmap[dev->if_port], buswidth?8:16); - - if (!request_region(dev->base_addr + 0x300e, 1, "EtherExpress")) - return -EBUSY; - - eexp_hw_set_interface(dev); - - release_region(dev->base_addr + 0x300e, 1); - - /* Find out how much RAM we have on the card */ - outw(0, dev->base_addr + WRITE_PTR); - for (i = 0; i < 32768; i++) - outw(0, dev->base_addr + DATAPORT); - - for (memory_size = 0; memory_size < 64; memory_size++) - { - outw(memory_size<<10, dev->base_addr + READ_PTR); - if (inw(dev->base_addr+DATAPORT)) - break; - outw(memory_size<<10, dev->base_addr + WRITE_PTR); - outw(memory_size | 0x5000, dev->base_addr+DATAPORT); - outw(memory_size<<10, dev->base_addr + READ_PTR); - if (inw(dev->base_addr+DATAPORT) != (memory_size | 0x5000)) - break; - } - - /* Sort out the number of buffers. We may have 16, 32, 48 or 64k - * of RAM to play with. - */ - lp->num_tx_bufs = 4; - lp->rx_buf_end = 0x3ff6; - switch (memory_size) - { - case 64: - lp->rx_buf_end += 0x4000; - case 48: - lp->num_tx_bufs += 4; - lp->rx_buf_end += 0x4000; - case 32: - lp->rx_buf_end += 0x4000; - case 16: - printk(", %dk RAM)\n", memory_size); - break; - default: - printk(") bad memory size (%dk).\n", memory_size); - return -ENODEV; - break; - } - - lp->rx_buf_start = TX_BUF_START + (lp->num_tx_bufs*TX_BUF_SIZE); - lp->width = buswidth; - - dev->netdev_ops = &eexp_netdev_ops; - dev->watchdog_timeo = 2*HZ; - - return register_netdev(dev); -} - -/* - * Read a word from the EtherExpress on-board serial EEPROM. - * The EEPROM contains 64 words of 16 bits. - */ -static unsigned short __init eexp_hw_readeeprom(unsigned short ioaddr, - unsigned char location) -{ - unsigned short cmd = 0x180|(location&0x7f); - unsigned short rval = 0,wval = EC_CS|i586_RST; - int i; - - outb(EC_CS|i586_RST,ioaddr+EEPROM_Ctrl); - for (i=0x100 ; i ; i>>=1 ) - { - if (cmd&i) - wval |= EC_Wr; - else - wval &= ~EC_Wr; - - outb(wval,ioaddr+EEPROM_Ctrl); - outb(wval|EC_Clk,ioaddr+EEPROM_Ctrl); - eeprom_delay(); - outb(wval,ioaddr+EEPROM_Ctrl); - eeprom_delay(); - } - wval &= ~EC_Wr; - outb(wval,ioaddr+EEPROM_Ctrl); - for (i=0x8000 ; i ; i>>=1 ) - { - outb(wval|EC_Clk,ioaddr+EEPROM_Ctrl); - eeprom_delay(); - if (inb(ioaddr+EEPROM_Ctrl)&EC_Rd) - rval |= i; - outb(wval,ioaddr+EEPROM_Ctrl); - eeprom_delay(); - } - wval &= ~EC_CS; - outb(wval|EC_Clk,ioaddr+EEPROM_Ctrl); - eeprom_delay(); - outb(wval,ioaddr+EEPROM_Ctrl); - eeprom_delay(); - return rval; -} - -/* - * Reap tx buffers and return last transmit status. - * if ==0 then either: - * a) we're not transmitting anything, so why are we here? - * b) we've died. - * otherwise, Stat_Busy(return) means we've still got some packets - * to transmit, Stat_Done(return) means our buffers should be empty - * again - */ - -static unsigned short eexp_hw_lasttxstat(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - unsigned short tx_block = lp->tx_reap; - unsigned short status; - - if (!netif_queue_stopped(dev) && lp->tx_head==lp->tx_reap) - return 0x0000; - - do - { - outw(tx_block & ~31, dev->base_addr + SM_PTR); - status = inw(dev->base_addr + SHADOW(tx_block)); - if (!Stat_Done(status)) - { - lp->tx_link = tx_block; - return status; - } - else - { - lp->last_tx_restart = 0; - dev->stats.collisions += Stat_NoColl(status); - if (!Stat_OK(status)) - { - char *whatsup = NULL; - dev->stats.tx_errors++; - if (Stat_Abort(status)) - dev->stats.tx_aborted_errors++; - if (Stat_TNoCar(status)) { - whatsup = "aborted, no carrier"; - dev->stats.tx_carrier_errors++; - } - if (Stat_TNoCTS(status)) { - whatsup = "aborted, lost CTS"; - dev->stats.tx_carrier_errors++; - } - if (Stat_TNoDMA(status)) { - whatsup = "FIFO underran"; - dev->stats.tx_fifo_errors++; - } - if (Stat_TXColl(status)) { - whatsup = "aborted, too many collisions"; - dev->stats.tx_aborted_errors++; - } - if (whatsup) - printk(KERN_INFO "%s: transmit %s\n", - dev->name, whatsup); - } - else - dev->stats.tx_packets++; - } - if (tx_block == TX_BUF_START+((lp->num_tx_bufs-1)*TX_BUF_SIZE)) - lp->tx_reap = tx_block = TX_BUF_START; - else - lp->tx_reap = tx_block += TX_BUF_SIZE; - netif_wake_queue(dev); - } - while (lp->tx_reap != lp->tx_head); - - lp->tx_link = lp->tx_tail + 0x08; - - return status; -} - -/* - * This should never happen. It is called when some higher routine detects - * that the CU has stopped, to try to restart it from the last packet we knew - * we were working on, or the idle loop if we had finished for the time. - */ - -static void eexp_hw_txrestart(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - unsigned short ioaddr = dev->base_addr; - - lp->last_tx_restart = lp->tx_link; - scb_wrcbl(dev, lp->tx_link); - scb_command(dev, SCB_CUstart); - outb(0,ioaddr+SIGNAL_CA); - - { - unsigned short boguscount=50,failcount=5; - while (!scb_status(dev)) - { - if (!--boguscount) - { - if (--failcount) - { - printk(KERN_WARNING "%s: CU start timed out, status %04x, cmd %04x\n", dev->name, scb_status(dev), scb_rdcmd(dev)); - scb_wrcbl(dev, lp->tx_link); - scb_command(dev, SCB_CUstart); - outb(0,ioaddr+SIGNAL_CA); - boguscount = 100; - } - else - { - printk(KERN_WARNING "%s: Failed to restart CU, resetting board...\n",dev->name); - eexp_hw_init586(dev); - netif_wake_queue(dev); - return; - } - } - } - } -} - -/* - * Writes down the list of transmit buffers into card memory. Each - * entry consists of an 82586 transmit command, followed by a jump - * pointing to itself. When we want to transmit a packet, we write - * the data into the appropriate transmit buffer and then modify the - * preceding jump to point at the new transmit command. This means that - * the 586 command unit is continuously active. - */ - -static void eexp_hw_txinit(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - unsigned short tx_block = TX_BUF_START; - unsigned short curtbuf; - unsigned short ioaddr = dev->base_addr; - - for ( curtbuf=0 ; curtbufnum_tx_bufs ; curtbuf++ ) - { - outw(tx_block, ioaddr + WRITE_PTR); - - outw(0x0000, ioaddr + DATAPORT); - outw(Cmd_INT|Cmd_Xmit, ioaddr + DATAPORT); - outw(tx_block+0x08, ioaddr + DATAPORT); - outw(tx_block+0x0e, ioaddr + DATAPORT); - - outw(0x0000, ioaddr + DATAPORT); - outw(0x0000, ioaddr + DATAPORT); - outw(tx_block+0x08, ioaddr + DATAPORT); - - outw(0x8000, ioaddr + DATAPORT); - outw(-1, ioaddr + DATAPORT); - outw(tx_block+0x16, ioaddr + DATAPORT); - outw(0x0000, ioaddr + DATAPORT); - - tx_block += TX_BUF_SIZE; - } - lp->tx_head = TX_BUF_START; - lp->tx_reap = TX_BUF_START; - lp->tx_tail = tx_block - TX_BUF_SIZE; - lp->tx_link = lp->tx_tail + 0x08; - lp->rx_buf_start = tx_block; - -} - -/* - * Write the circular list of receive buffer descriptors to card memory. - * The end of the list isn't marked, which means that the 82586 receive - * unit will loop until buffers become available (this avoids it giving us - * "out of resources" messages). - */ - -static void eexp_hw_rxinit(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - unsigned short rx_block = lp->rx_buf_start; - unsigned short ioaddr = dev->base_addr; - - lp->num_rx_bufs = 0; - lp->rx_first = lp->rx_ptr = rx_block; - do - { - lp->num_rx_bufs++; - - outw(rx_block, ioaddr + WRITE_PTR); - - outw(0, ioaddr + DATAPORT); outw(0, ioaddr+DATAPORT); - outw(rx_block + RX_BUF_SIZE, ioaddr+DATAPORT); - outw(0xffff, ioaddr+DATAPORT); - - outw(0x0000, ioaddr+DATAPORT); - outw(0xdead, ioaddr+DATAPORT); - outw(0xdead, ioaddr+DATAPORT); - outw(0xdead, ioaddr+DATAPORT); - outw(0xdead, ioaddr+DATAPORT); - outw(0xdead, ioaddr+DATAPORT); - outw(0xdead, ioaddr+DATAPORT); - - outw(0x0000, ioaddr+DATAPORT); - outw(rx_block + RX_BUF_SIZE + 0x16, ioaddr+DATAPORT); - outw(rx_block + 0x20, ioaddr+DATAPORT); - outw(0, ioaddr+DATAPORT); - outw(RX_BUF_SIZE-0x20, ioaddr+DATAPORT); - - lp->rx_last = rx_block; - rx_block += RX_BUF_SIZE; - } while (rx_block <= lp->rx_buf_end-RX_BUF_SIZE); - - - /* Make first Rx frame descriptor point to first Rx buffer - descriptor */ - outw(lp->rx_first + 6, ioaddr+WRITE_PTR); - outw(lp->rx_first + 0x16, ioaddr+DATAPORT); - - /* Close Rx frame descriptor ring */ - outw(lp->rx_last + 4, ioaddr+WRITE_PTR); - outw(lp->rx_first, ioaddr+DATAPORT); - - /* Close Rx buffer descriptor ring */ - outw(lp->rx_last + 0x16 + 2, ioaddr+WRITE_PTR); - outw(lp->rx_first + 0x16, ioaddr+DATAPORT); - -} - -/* - * Un-reset the 586, and start the configuration sequence. We don't wait for - * this to finish, but allow the interrupt handler to start the CU and RU for - * us. We can't start the receive/transmission system up before we know that - * the hardware is configured correctly. - */ - -static void eexp_hw_init586(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - unsigned short ioaddr = dev->base_addr; - int i; - -#if NET_DEBUG > 6 - printk("%s: eexp_hw_init586()\n", dev->name); -#endif - - lp->started = 0; - - set_loopback(dev); - - outb(SIRQ_dis|irqrmap[dev->irq],ioaddr+SET_IRQ); - - /* Download the startup code */ - outw(lp->rx_buf_end & ~31, ioaddr + SM_PTR); - outw(lp->width?0x0001:0x0000, ioaddr + 0x8006); - outw(0x0000, ioaddr + 0x8008); - outw(0x0000, ioaddr + 0x800a); - outw(0x0000, ioaddr + 0x800c); - outw(0x0000, ioaddr + 0x800e); - - for (i = 0; i < ARRAY_SIZE(start_code) * 2; i+=32) { - int j; - outw(i, ioaddr + SM_PTR); - for (j = 0; j < 16 && (i+j)/2 < ARRAY_SIZE(start_code); j+=2) - outw(start_code[(i+j)/2], - ioaddr+0x4000+j); - for (j = 0; j < 16 && (i+j+16)/2 < ARRAY_SIZE(start_code); j+=2) - outw(start_code[(i+j+16)/2], - ioaddr+0x8000+j); - } - - /* Do we want promiscuous mode or multicast? */ - outw(CONF_PROMISC & ~31, ioaddr+SM_PTR); - i = inw(ioaddr+SHADOW(CONF_PROMISC)); - outw((dev->flags & IFF_PROMISC)?(i|1):(i & ~1), - ioaddr+SHADOW(CONF_PROMISC)); - lp->was_promisc = dev->flags & IFF_PROMISC; -#if 0 - eexp_setup_filter(dev); -#endif - - /* Write our hardware address */ - outw(CONF_HWADDR & ~31, ioaddr+SM_PTR); - outw(((unsigned short *)dev->dev_addr)[0], ioaddr+SHADOW(CONF_HWADDR)); - outw(((unsigned short *)dev->dev_addr)[1], - ioaddr+SHADOW(CONF_HWADDR+2)); - outw(((unsigned short *)dev->dev_addr)[2], - ioaddr+SHADOW(CONF_HWADDR+4)); - - eexp_hw_txinit(dev); - eexp_hw_rxinit(dev); - - outb(0,ioaddr+EEPROM_Ctrl); - mdelay(5); - - scb_command(dev, 0xf000); - outb(0,ioaddr+SIGNAL_CA); - - outw(0, ioaddr+SM_PTR); - - { - unsigned short rboguscount=50,rfailcount=5; - while (inw(ioaddr+0x4000)) - { - if (!--rboguscount) - { - printk(KERN_WARNING "%s: i82586 reset timed out, kicking...\n", - dev->name); - scb_command(dev, 0); - outb(0,ioaddr+SIGNAL_CA); - rboguscount = 100; - if (!--rfailcount) - { - printk(KERN_WARNING "%s: i82586 not responding, giving up.\n", - dev->name); - return; - } - } - } - } - - scb_wrcbl(dev, CONF_LINK); - scb_command(dev, 0xf000|SCB_CUstart); - outb(0,ioaddr+SIGNAL_CA); - - { - unsigned short iboguscount=50,ifailcount=5; - while (!scb_status(dev)) - { - if (!--iboguscount) - { - if (--ifailcount) - { - printk(KERN_WARNING "%s: i82586 initialization timed out, status %04x, cmd %04x\n", - dev->name, scb_status(dev), scb_rdcmd(dev)); - scb_wrcbl(dev, CONF_LINK); - scb_command(dev, 0xf000|SCB_CUstart); - outb(0,ioaddr+SIGNAL_CA); - iboguscount = 100; - } - else - { - printk(KERN_WARNING "%s: Failed to initialize i82586, giving up.\n",dev->name); - return; - } - } - } - } - - clear_loopback(dev); - outb(SIRQ_en|irqrmap[dev->irq],ioaddr+SET_IRQ); - - lp->init_time = jiffies; -#if NET_DEBUG > 6 - printk("%s: leaving eexp_hw_init586()\n", dev->name); -#endif -} - -static void eexp_setup_filter(struct net_device *dev) -{ - struct netdev_hw_addr *ha; - unsigned short ioaddr = dev->base_addr; - int count = netdev_mc_count(dev); - int i; - if (count > 8) { - printk(KERN_INFO "%s: too many multicast addresses (%d)\n", - dev->name, count); - count = 8; - } - - outw(CONF_NR_MULTICAST & ~31, ioaddr+SM_PTR); - outw(6*count, ioaddr+SHADOW(CONF_NR_MULTICAST)); - i = 0; - netdev_for_each_mc_addr(ha, dev) { - unsigned short *data = (unsigned short *) ha->addr; - - if (i == count) - break; - outw((CONF_MULTICAST+(6*i)) & ~31, ioaddr+SM_PTR); - outw(data[0], ioaddr+SHADOW(CONF_MULTICAST+(6*i))); - outw((CONF_MULTICAST+(6*i)+2) & ~31, ioaddr+SM_PTR); - outw(data[1], ioaddr+SHADOW(CONF_MULTICAST+(6*i)+2)); - outw((CONF_MULTICAST+(6*i)+4) & ~31, ioaddr+SM_PTR); - outw(data[2], ioaddr+SHADOW(CONF_MULTICAST+(6*i)+4)); - i++; - } -} - -/* - * Set or clear the multicast filter for this adaptor. - */ -static void -eexp_set_multicast(struct net_device *dev) -{ - unsigned short ioaddr = dev->base_addr; - struct net_local *lp = netdev_priv(dev); - int kick = 0, i; - if ((dev->flags & IFF_PROMISC) != lp->was_promisc) { - outw(CONF_PROMISC & ~31, ioaddr+SM_PTR); - i = inw(ioaddr+SHADOW(CONF_PROMISC)); - outw((dev->flags & IFF_PROMISC)?(i|1):(i & ~1), - ioaddr+SHADOW(CONF_PROMISC)); - lp->was_promisc = dev->flags & IFF_PROMISC; - kick = 1; - } - if (!(dev->flags & IFF_PROMISC)) { - eexp_setup_filter(dev); - if (lp->old_mc_count != netdev_mc_count(dev)) { - kick = 1; - lp->old_mc_count = netdev_mc_count(dev); - } - } - if (kick) { - unsigned long oj; - scb_command(dev, SCB_CUsuspend); - outb(0, ioaddr+SIGNAL_CA); - outb(0, ioaddr+SIGNAL_CA); -#if 0 - printk("%s: waiting for CU to go suspended\n", dev->name); -#endif - oj = jiffies; - while ((SCB_CUstat(scb_status(dev)) == 2) && - (time_before(jiffies, oj + 2000))); - if (SCB_CUstat(scb_status(dev)) == 2) - printk("%s: warning, CU didn't stop\n", dev->name); - lp->started &= ~(STARTED_CU); - scb_wrcbl(dev, CONF_LINK); - scb_command(dev, SCB_CUstart); - outb(0, ioaddr+SIGNAL_CA); - } -} - - -/* - * MODULE stuff - */ - -#ifdef MODULE - -#define EEXP_MAX_CARDS 4 /* max number of cards to support */ - -static struct net_device *dev_eexp[EEXP_MAX_CARDS]; -static int irq[EEXP_MAX_CARDS]; -static int io[EEXP_MAX_CARDS]; - -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -MODULE_PARM_DESC(io, "EtherExpress 16 I/O base address(es)"); -MODULE_PARM_DESC(irq, "EtherExpress 16 IRQ number(s)"); -MODULE_LICENSE("GPL"); - - -/* Ideally the user would give us io=, irq= for every card. If any parameters - * are specified, we verify and then use them. If no parameters are given, we - * autoprobe for one card only. - */ -int __init init_module(void) -{ - struct net_device *dev; - int this_dev, found = 0; - - for (this_dev = 0; this_dev < EEXP_MAX_CARDS; this_dev++) { - dev = alloc_etherdev(sizeof(struct net_local)); - dev->irq = irq[this_dev]; - dev->base_addr = io[this_dev]; - if (io[this_dev] == 0) { - if (this_dev) - break; - printk(KERN_NOTICE "eexpress.c: Module autoprobe not recommended, give io=xx.\n"); - } - if (do_express_probe(dev) == 0) { - dev_eexp[this_dev] = dev; - found++; - continue; - } - printk(KERN_WARNING "eexpress.c: Failed to register card at 0x%x.\n", io[this_dev]); - free_netdev(dev); - break; - } - if (found) - return 0; - return -ENXIO; -} - -void __exit cleanup_module(void) -{ - int this_dev; - - for (this_dev = 0; this_dev < EEXP_MAX_CARDS; this_dev++) { - struct net_device *dev = dev_eexp[this_dev]; - if (dev) { - unregister_netdev(dev); - free_netdev(dev); - } - } -} -#endif - -/* - * Local Variables: - * c-file-style: "linux" - * tab-width: 8 - * End: - */ diff --git a/drivers/net/eexpress.h b/drivers/net/eexpress.h deleted file mode 100644 index dc9c6ea..0000000 --- a/drivers/net/eexpress.h +++ /dev/null @@ -1,179 +0,0 @@ -/* - * eexpress.h: Intel EtherExpress16 defines - */ - -/* - * EtherExpress card register addresses - * as offsets from the base IO region (dev->base_addr) - */ - -#define DATAPORT 0x0000 -#define WRITE_PTR 0x0002 -#define READ_PTR 0x0004 -#define SIGNAL_CA 0x0006 -#define SET_IRQ 0x0007 -#define SM_PTR 0x0008 -#define MEM_Dec 0x000a -#define MEM_Ctrl 0x000b -#define MEM_Page_Ctrl 0x000c -#define Config 0x000d -#define EEPROM_Ctrl 0x000e -#define ID_PORT 0x000f -#define MEM_ECtrl 0x000f - -/* - * card register defines - */ - -/* SET_IRQ */ -#define SIRQ_en 0x08 -#define SIRQ_dis 0x00 - -/* EEPROM_Ctrl */ -#define EC_Clk 0x01 -#define EC_CS 0x02 -#define EC_Wr 0x04 -#define EC_Rd 0x08 -#define ASIC_RST 0x40 -#define i586_RST 0x80 - -#define eeprom_delay() { udelay(40); } - -/* - * i82586 Memory Configuration - */ - -/* (System Configuration Pointer) System start up block, read after 586_RST */ -#define SCP_START 0xfff6 - -/* Intermediate System Configuration Pointer */ -#define ISCP_START 0x0000 - -/* System Command Block */ -#define SCB_START 0x0008 - -/* Start of buffer region. Everything before this is used for control - * structures and the CU configuration program. The memory layout is - * determined in eexp_hw_probe(), once we know how much memory is - * available on the card. - */ - -#define TX_BUF_START 0x0100 - -#define TX_BUF_SIZE ((24+ETH_FRAME_LEN+31)&~0x1f) -#define RX_BUF_SIZE ((32+ETH_FRAME_LEN+31)&~0x1f) - -/* - * SCB defines - */ - -/* these functions take the SCB status word and test the relevant status bit */ -#define SCB_complete(s) (((s) & 0x8000) != 0) -#define SCB_rxdframe(s) (((s) & 0x4000) != 0) -#define SCB_CUdead(s) (((s) & 0x2000) != 0) -#define SCB_RUdead(s) (((s) & 0x1000) != 0) -#define SCB_ack(s) ((s) & 0xf000) - -/* Command unit status: 0=idle, 1=suspended, 2=active */ -#define SCB_CUstat(s) (((s)&0x0300)>>8) - -/* Receive unit status: 0=idle, 1=suspended, 2=out of resources, 4=ready */ -#define SCB_RUstat(s) (((s)&0x0070)>>4) - -/* SCB commands */ -#define SCB_CUnop 0x0000 -#define SCB_CUstart 0x0100 -#define SCB_CUresume 0x0200 -#define SCB_CUsuspend 0x0300 -#define SCB_CUabort 0x0400 -#define SCB_resetchip 0x0080 - -#define SCB_RUnop 0x0000 -#define SCB_RUstart 0x0010 -#define SCB_RUresume 0x0020 -#define SCB_RUsuspend 0x0030 -#define SCB_RUabort 0x0040 - -/* - * Command block defines - */ - -#define Stat_Done(s) (((s) & 0x8000) != 0) -#define Stat_Busy(s) (((s) & 0x4000) != 0) -#define Stat_OK(s) (((s) & 0x2000) != 0) -#define Stat_Abort(s) (((s) & 0x1000) != 0) -#define Stat_STFail (((s) & 0x0800) != 0) -#define Stat_TNoCar(s) (((s) & 0x0400) != 0) -#define Stat_TNoCTS(s) (((s) & 0x0200) != 0) -#define Stat_TNoDMA(s) (((s) & 0x0100) != 0) -#define Stat_TDefer(s) (((s) & 0x0080) != 0) -#define Stat_TColl(s) (((s) & 0x0040) != 0) -#define Stat_TXColl(s) (((s) & 0x0020) != 0) -#define Stat_NoColl(s) ((s) & 0x000f) - -/* Cmd_END will end AFTER the command if this is the first - * command block after an SCB_CUstart, but BEFORE the command - * for all subsequent commands. Best strategy is to place - * Cmd_INT on the last command in the sequence, followed by a - * dummy Cmd_Nop with Cmd_END after this. - */ - -#define Cmd_END 0x8000 -#define Cmd_SUS 0x4000 -#define Cmd_INT 0x2000 - -#define Cmd_Nop 0x0000 -#define Cmd_SetAddr 0x0001 -#define Cmd_Config 0x0002 -#define Cmd_MCast 0x0003 -#define Cmd_Xmit 0x0004 -#define Cmd_TDR 0x0005 -#define Cmd_Dump 0x0006 -#define Cmd_Diag 0x0007 - - -/* - * Frame Descriptor (Receive block) defines - */ - -#define FD_Done(s) (((s) & 0x8000) != 0) -#define FD_Busy(s) (((s) & 0x4000) != 0) -#define FD_OK(s) (((s) & 0x2000) != 0) - -#define FD_CRC(s) (((s) & 0x0800) != 0) -#define FD_Align(s) (((s) & 0x0400) != 0) -#define FD_Resrc(s) (((s) & 0x0200) != 0) -#define FD_DMA(s) (((s) & 0x0100) != 0) -#define FD_Short(s) (((s) & 0x0080) != 0) -#define FD_NoEOF(s) (((s) & 0x0040) != 0) - -struct rfd_header { - volatile unsigned long flags; - volatile unsigned short link; - volatile unsigned short rbd_offset; - volatile unsigned short dstaddr1; - volatile unsigned short dstaddr2; - volatile unsigned short dstaddr3; - volatile unsigned short srcaddr1; - volatile unsigned short srcaddr2; - volatile unsigned short srcaddr3; - volatile unsigned short length; - - /* This is actually a Receive Buffer Descriptor. The way we - * arrange memory means that an RBD always follows the RFD that - * points to it, so they might as well be in the same structure. - */ - volatile unsigned short actual_count; - volatile unsigned short next_rbd; - volatile unsigned short buf_addr1; - volatile unsigned short buf_addr2; - volatile unsigned short size; -}; - -/* Returned data from the Time Domain Reflectometer */ - -#define TDR_LINKOK (1<<15) -#define TDR_XCVRPROBLEM (1<<14) -#define TDR_OPEN (1<<13) -#define TDR_SHORT (1<<12) -#define TDR_TIME 0x7ff diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index ed5836c..d0a8fa8 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -17,6 +17,7 @@ source "drivers/net/ethernet/amd/Kconfig" source "drivers/net/ethernet/broadcom/Kconfig" source "drivers/net/ethernet/chelsio/Kconfig" source "drivers/net/ethernet/intel/Kconfig" +source "drivers/net/ethernet/i825xx/Kconfig" source "drivers/net/ethernet/qlogic/Kconfig" source "drivers/net/ethernet/smsc/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 983fd27..6d3276a 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -8,5 +8,6 @@ obj-$(CONFIG_NET_VENDOR_AMD) += amd/ obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ +obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/ obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/ obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/ diff --git a/drivers/net/ethernet/i825xx/3c505.c b/drivers/net/ethernet/i825xx/3c505.c new file mode 100644 index 0000000..88d766e --- /dev/null +++ b/drivers/net/ethernet/i825xx/3c505.c @@ -0,0 +1,1673 @@ +/* + * Linux Ethernet device driver for the 3Com Etherlink Plus (3C505) + * By Craig Southeren, Juha Laiho and Philip Blundell + * + * 3c505.c This module implements an interface to the 3Com + * Etherlink Plus (3c505) Ethernet card. Linux device + * driver interface reverse engineered from the Linux 3C509 + * device drivers. Some 3C505 information gleaned from + * the Crynwr packet driver. Still this driver would not + * be here without 3C505 technical reference provided by + * 3Com. + * + * $Id: 3c505.c,v 1.10 1996/04/16 13:06:27 phil Exp $ + * + * Authors: Linux 3c505 device driver by + * Craig Southeren, + * Final debugging by + * Andrew Tridgell, + * Auto irq/address, tuning, cleanup and v1.1.4+ kernel mods by + * Juha Laiho, + * Linux 3C509 driver by + * Donald Becker, + * (Now at ) + * Crynwr packet driver by + * Krishnan Gopalan and Gregg Stefancik, + * Clemson University Engineering Computer Operations. + * Portions of the code have been adapted from the 3c505 + * driver for NCSA Telnet by Bruce Orchard and later + * modified by Warren Van Houten and krus@diku.dk. + * 3C505 technical information provided by + * Terry Murphy, of 3Com Network Adapter Division + * Linux 1.3.0 changes by + * Alan Cox + * More debugging, DMA support, currently maintained by + * Philip Blundell + * Multicard/soft configurable dma channel/rev 2 hardware support + * by Christopher Collins + * Ethtool support (jgarzik), 11/17/2001 + */ + +#define DRV_NAME "3c505" +#define DRV_VERSION "1.10a" + + +/* Theory of operation: + * + * The 3c505 is quite an intelligent board. All communication with it is done + * by means of Primary Command Blocks (PCBs); these are transferred using PIO + * through the command register. The card has 256k of on-board RAM, which is + * used to buffer received packets. It might seem at first that more buffers + * are better, but in fact this isn't true. From my tests, it seems that + * more than about 10 buffers are unnecessary, and there is a noticeable + * performance hit in having more active on the card. So the majority of the + * card's memory isn't, in fact, used. Sadly, the card only has one transmit + * buffer and, short of loading our own firmware into it (which is what some + * drivers resort to) there's nothing we can do about this. + * + * We keep up to 4 "receive packet" commands active on the board at a time. + * When a packet comes in, so long as there is a receive command active, the + * board will send us a "packet received" PCB and then add the data for that + * packet to the DMA queue. If a DMA transfer is not already in progress, we + * set one up to start uploading the data. We have to maintain a list of + * backlogged receive packets, because the card may decide to tell us about + * a newly-arrived packet at any time, and we may not be able to start a DMA + * transfer immediately (ie one may already be going on). We can't NAK the + * PCB, because then it would throw the packet away. + * + * Trying to send a PCB to the card at the wrong moment seems to have bad + * effects. If we send it a transmit PCB while a receive DMA is happening, + * it will just NAK the PCB and so we will have wasted our time. Worse, it + * sometimes seems to interrupt the transfer. The majority of the low-level + * code is protected by one huge semaphore -- "busy" -- which is set whenever + * it probably isn't safe to do anything to the card. The receive routine + * must gain a lock on "busy" before it can start a DMA transfer, and the + * transmit routine must gain a lock before it sends the first PCB to the card. + * The send_pcb() routine also has an internal semaphore to protect it against + * being re-entered (which would be disastrous) -- this is needed because + * several things can happen asynchronously (re-priming the receiver and + * asking the card for statistics, for example). send_pcb() will also refuse + * to talk to the card at all if a DMA upload is happening. The higher-level + * networking code will reschedule a later retry if some part of the driver + * is blocked. In practice, this doesn't seem to happen very often. + */ + +/* This driver may now work with revision 2.x hardware, since all the read + * operations on the HCR have been removed (we now keep our own softcopy). + * But I don't have an old card to test it on. + * + * This has had the bad effect that the autoprobe routine is now a bit + * less friendly to other devices. However, it was never very good. + * before, so I doubt it will hurt anybody. + */ + +/* The driver is a mess. I took Craig's and Juha's code, and hacked it firstly + * to make it more reliable, and secondly to add DMA mode. Many things could + * probably be done better; the concurrency protection is particularly awful. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include "3c505.h" + +/********************************************************* + * + * define debug messages here as common strings to reduce space + * + *********************************************************/ + +#define filename __FILE__ + +#define timeout_msg "*** timeout at %s:%s (line %d) ***\n" +#define TIMEOUT_MSG(lineno) \ + pr_notice(timeout_msg, filename, __func__, (lineno)) + +#define invalid_pcb_msg "*** invalid pcb length %d at %s:%s (line %d) ***\n" +#define INVALID_PCB_MSG(len) \ + pr_notice(invalid_pcb_msg, (len), filename, __func__, __LINE__) + +#define search_msg "%s: Looking for 3c505 adapter at address %#x..." + +#define stilllooking_msg "still looking..." + +#define found_msg "found.\n" + +#define notfound_msg "not found (reason = %d)\n" + +#define couldnot_msg "%s: 3c505 not found\n" + +/********************************************************* + * + * various other debug stuff + * + *********************************************************/ + +#ifdef ELP_DEBUG +static int elp_debug = ELP_DEBUG; +#else +static int elp_debug; +#endif +#define debug elp_debug + +/* + * 0 = no messages (well, some) + * 1 = messages when high level commands performed + * 2 = messages when low level commands performed + * 3 = messages when interrupts received + */ + +/***************************************************************** + * + * List of I/O-addresses we try to auto-sense + * Last element MUST BE 0! + *****************************************************************/ + +static int addr_list[] __initdata = {0x300, 0x280, 0x310, 0}; + +/* Dma Memory related stuff */ + +static unsigned long dma_mem_alloc(int size) +{ + int order = get_order(size); + return __get_dma_pages(GFP_KERNEL, order); +} + + +/***************************************************************** + * + * Functions for I/O (note the inline !) + * + *****************************************************************/ + +static inline unsigned char inb_status(unsigned int base_addr) +{ + return inb(base_addr + PORT_STATUS); +} + +static inline int inb_command(unsigned int base_addr) +{ + return inb(base_addr + PORT_COMMAND); +} + +static inline void outb_control(unsigned char val, struct net_device *dev) +{ + outb(val, dev->base_addr + PORT_CONTROL); + ((elp_device *)(netdev_priv(dev)))->hcr_val = val; +} + +#define HCR_VAL(x) (((elp_device *)(netdev_priv(x)))->hcr_val) + +static inline void outb_command(unsigned char val, unsigned int base_addr) +{ + outb(val, base_addr + PORT_COMMAND); +} + +static inline unsigned int backlog_next(unsigned int n) +{ + return (n + 1) % BACKLOG_SIZE; +} + +/***************************************************************** + * + * useful functions for accessing the adapter + * + *****************************************************************/ + +/* + * use this routine when accessing the ASF bits as they are + * changed asynchronously by the adapter + */ + +/* get adapter PCB status */ +#define GET_ASF(addr) \ + (get_status(addr)&ASF_PCB_MASK) + +static inline int get_status(unsigned int base_addr) +{ + unsigned long timeout = jiffies + 10*HZ/100; + register int stat1; + do { + stat1 = inb_status(base_addr); + } while (stat1 != inb_status(base_addr) && time_before(jiffies, timeout)); + if (time_after_eq(jiffies, timeout)) + TIMEOUT_MSG(__LINE__); + return stat1; +} + +static inline void set_hsf(struct net_device *dev, int hsf) +{ + elp_device *adapter = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&adapter->lock, flags); + outb_control((HCR_VAL(dev) & ~HSF_PCB_MASK) | hsf, dev); + spin_unlock_irqrestore(&adapter->lock, flags); +} + +static bool start_receive(struct net_device *, pcb_struct *); + +static inline void adapter_reset(struct net_device *dev) +{ + unsigned long timeout; + elp_device *adapter = netdev_priv(dev); + unsigned char orig_hcr = adapter->hcr_val; + + outb_control(0, dev); + + if (inb_status(dev->base_addr) & ACRF) { + do { + inb_command(dev->base_addr); + timeout = jiffies + 2*HZ/100; + while (time_before_eq(jiffies, timeout) && !(inb_status(dev->base_addr) & ACRF)); + } while (inb_status(dev->base_addr) & ACRF); + set_hsf(dev, HSF_PCB_NAK); + } + outb_control(adapter->hcr_val | ATTN | DIR, dev); + mdelay(10); + outb_control(adapter->hcr_val & ~ATTN, dev); + mdelay(10); + outb_control(adapter->hcr_val | FLSH, dev); + mdelay(10); + outb_control(adapter->hcr_val & ~FLSH, dev); + mdelay(10); + + outb_control(orig_hcr, dev); + if (!start_receive(dev, &adapter->tx_pcb)) + pr_err("%s: start receive command failed\n", dev->name); +} + +/* Check to make sure that a DMA transfer hasn't timed out. This should + * never happen in theory, but seems to occur occasionally if the card gets + * prodded at the wrong time. + */ +static inline void check_3c505_dma(struct net_device *dev) +{ + elp_device *adapter = netdev_priv(dev); + if (adapter->dmaing && time_after(jiffies, adapter->current_dma.start_time + 10)) { + unsigned long flags, f; + pr_err("%s: DMA %s timed out, %d bytes left\n", dev->name, + adapter->current_dma.direction ? "download" : "upload", + get_dma_residue(dev->dma)); + spin_lock_irqsave(&adapter->lock, flags); + adapter->dmaing = 0; + adapter->busy = 0; + + f=claim_dma_lock(); + disable_dma(dev->dma); + release_dma_lock(f); + + if (adapter->rx_active) + adapter->rx_active--; + outb_control(adapter->hcr_val & ~(DMAE | TCEN | DIR), dev); + spin_unlock_irqrestore(&adapter->lock, flags); + } +} + +/* Primitive functions used by send_pcb() */ +static inline bool send_pcb_slow(unsigned int base_addr, unsigned char byte) +{ + unsigned long timeout; + outb_command(byte, base_addr); + for (timeout = jiffies + 5*HZ/100; time_before(jiffies, timeout);) { + if (inb_status(base_addr) & HCRE) + return false; + } + pr_warning("3c505: send_pcb_slow timed out\n"); + return true; +} + +static inline bool send_pcb_fast(unsigned int base_addr, unsigned char byte) +{ + unsigned int timeout; + outb_command(byte, base_addr); + for (timeout = 0; timeout < 40000; timeout++) { + if (inb_status(base_addr) & HCRE) + return false; + } + pr_warning("3c505: send_pcb_fast timed out\n"); + return true; +} + +/* Check to see if the receiver needs restarting, and kick it if so */ +static inline void prime_rx(struct net_device *dev) +{ + elp_device *adapter = netdev_priv(dev); + while (adapter->rx_active < ELP_RX_PCBS && netif_running(dev)) { + if (!start_receive(dev, &adapter->itx_pcb)) + break; + } +} + +/***************************************************************** + * + * send_pcb + * Send a PCB to the adapter. + * + * output byte to command reg --<--+ + * wait until HCRE is non zero | + * loop until all bytes sent -->--+ + * set HSF1 and HSF2 to 1 + * output pcb length + * wait until ASF give ACK or NAK + * set HSF1 and HSF2 to 0 + * + *****************************************************************/ + +/* This can be quite slow -- the adapter is allowed to take up to 40ms + * to respond to the initial interrupt. + * + * We run initially with interrupts turned on, but with a semaphore set + * so that nobody tries to re-enter this code. Once the first byte has + * gone through, we turn interrupts off and then send the others (the + * timeout is reduced to 500us). + */ + +static bool send_pcb(struct net_device *dev, pcb_struct * pcb) +{ + int i; + unsigned long timeout; + elp_device *adapter = netdev_priv(dev); + unsigned long flags; + + check_3c505_dma(dev); + + if (adapter->dmaing && adapter->current_dma.direction == 0) + return false; + + /* Avoid contention */ + if (test_and_set_bit(1, &adapter->send_pcb_semaphore)) { + if (elp_debug >= 3) { + pr_debug("%s: send_pcb entered while threaded\n", dev->name); + } + return false; + } + /* + * load each byte into the command register and + * wait for the HCRE bit to indicate the adapter + * had read the byte + */ + set_hsf(dev, 0); + + if (send_pcb_slow(dev->base_addr, pcb->command)) + goto abort; + + spin_lock_irqsave(&adapter->lock, flags); + + if (send_pcb_fast(dev->base_addr, pcb->length)) + goto sti_abort; + + for (i = 0; i < pcb->length; i++) { + if (send_pcb_fast(dev->base_addr, pcb->data.raw[i])) + goto sti_abort; + } + + outb_control(adapter->hcr_val | 3, dev); /* signal end of PCB */ + outb_command(2 + pcb->length, dev->base_addr); + + /* now wait for the acknowledgement */ + spin_unlock_irqrestore(&adapter->lock, flags); + + for (timeout = jiffies + 5*HZ/100; time_before(jiffies, timeout);) { + switch (GET_ASF(dev->base_addr)) { + case ASF_PCB_ACK: + adapter->send_pcb_semaphore = 0; + return true; + + case ASF_PCB_NAK: +#ifdef ELP_DEBUG + pr_debug("%s: send_pcb got NAK\n", dev->name); +#endif + goto abort; + } + } + + if (elp_debug >= 1) + pr_debug("%s: timeout waiting for PCB acknowledge (status %02x)\n", + dev->name, inb_status(dev->base_addr)); + goto abort; + + sti_abort: + spin_unlock_irqrestore(&adapter->lock, flags); + abort: + adapter->send_pcb_semaphore = 0; + return false; +} + + +/***************************************************************** + * + * receive_pcb + * Read a PCB from the adapter + * + * wait for ACRF to be non-zero ---<---+ + * input a byte | + * if ASF1 and ASF2 were not both one | + * before byte was read, loop --->---+ + * set HSF1 and HSF2 for ack + * + *****************************************************************/ + +static bool receive_pcb(struct net_device *dev, pcb_struct * pcb) +{ + int i, j; + int total_length; + int stat; + unsigned long timeout; + unsigned long flags; + + elp_device *adapter = netdev_priv(dev); + + set_hsf(dev, 0); + + /* get the command code */ + timeout = jiffies + 2*HZ/100; + while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && time_before(jiffies, timeout)); + if (time_after_eq(jiffies, timeout)) { + TIMEOUT_MSG(__LINE__); + return false; + } + pcb->command = inb_command(dev->base_addr); + + /* read the data length */ + timeout = jiffies + 3*HZ/100; + while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && time_before(jiffies, timeout)); + if (time_after_eq(jiffies, timeout)) { + TIMEOUT_MSG(__LINE__); + pr_info("%s: status %02x\n", dev->name, stat); + return false; + } + pcb->length = inb_command(dev->base_addr); + + if (pcb->length > MAX_PCB_DATA) { + INVALID_PCB_MSG(pcb->length); + adapter_reset(dev); + return false; + } + /* read the data */ + spin_lock_irqsave(&adapter->lock, flags); + for (i = 0; i < MAX_PCB_DATA; i++) { + for (j = 0; j < 20000; j++) { + stat = get_status(dev->base_addr); + if (stat & ACRF) + break; + } + pcb->data.raw[i] = inb_command(dev->base_addr); + if ((stat & ASF_PCB_MASK) == ASF_PCB_END || j >= 20000) + break; + } + spin_unlock_irqrestore(&adapter->lock, flags); + if (i >= MAX_PCB_DATA) { + INVALID_PCB_MSG(i); + return false; + } + if (j >= 20000) { + TIMEOUT_MSG(__LINE__); + return false; + } + /* the last "data" byte was really the length! */ + total_length = pcb->data.raw[i]; + + /* safety check total length vs data length */ + if (total_length != (pcb->length + 2)) { + if (elp_debug >= 2) + pr_warning("%s: mangled PCB received\n", dev->name); + set_hsf(dev, HSF_PCB_NAK); + return false; + } + + if (pcb->command == CMD_RECEIVE_PACKET_COMPLETE) { + if (test_and_set_bit(0, (void *) &adapter->busy)) { + if (backlog_next(adapter->rx_backlog.in) == adapter->rx_backlog.out) { + set_hsf(dev, HSF_PCB_NAK); + pr_warning("%s: PCB rejected, transfer in progress and backlog full\n", dev->name); + pcb->command = 0; + return true; + } else { + pcb->command = 0xff; + } + } + } + set_hsf(dev, HSF_PCB_ACK); + return true; +} + +/****************************************************** + * + * queue a receive command on the adapter so we will get an + * interrupt when a packet is received. + * + ******************************************************/ + +static bool start_receive(struct net_device *dev, pcb_struct * tx_pcb) +{ + bool status; + elp_device *adapter = netdev_priv(dev); + + if (elp_debug >= 3) + pr_debug("%s: restarting receiver\n", dev->name); + tx_pcb->command = CMD_RECEIVE_PACKET; + tx_pcb->length = sizeof(struct Rcv_pkt); + tx_pcb->data.rcv_pkt.buf_seg + = tx_pcb->data.rcv_pkt.buf_ofs = 0; /* Unused */ + tx_pcb->data.rcv_pkt.buf_len = 1600; + tx_pcb->data.rcv_pkt.timeout = 0; /* set timeout to zero */ + status = send_pcb(dev, tx_pcb); + if (status) + adapter->rx_active++; + return status; +} + +/****************************************************** + * + * extract a packet from the adapter + * this routine is only called from within the interrupt + * service routine, so no cli/sti calls are needed + * note that the length is always assumed to be even + * + ******************************************************/ + +static void receive_packet(struct net_device *dev, int len) +{ + int rlen; + elp_device *adapter = netdev_priv(dev); + void *target; + struct sk_buff *skb; + unsigned long flags; + + rlen = (len + 1) & ~1; + skb = dev_alloc_skb(rlen + 2); + + if (!skb) { + pr_warning("%s: memory squeeze, dropping packet\n", dev->name); + target = adapter->dma_buffer; + adapter->current_dma.target = NULL; + /* FIXME: stats */ + return; + } + + skb_reserve(skb, 2); + target = skb_put(skb, rlen); + if ((unsigned long)(target + rlen) >= MAX_DMA_ADDRESS) { + adapter->current_dma.target = target; + target = adapter->dma_buffer; + } else { + adapter->current_dma.target = NULL; + } + + /* if this happens, we die */ + if (test_and_set_bit(0, (void *) &adapter->dmaing)) + pr_err("%s: rx blocked, DMA in progress, dir %d\n", + dev->name, adapter->current_dma.direction); + + adapter->current_dma.direction = 0; + adapter->current_dma.length = rlen; + adapter->current_dma.skb = skb; + adapter->current_dma.start_time = jiffies; + + outb_control(adapter->hcr_val | DIR | TCEN | DMAE, dev); + + flags=claim_dma_lock(); + disable_dma(dev->dma); + clear_dma_ff(dev->dma); + set_dma_mode(dev->dma, 0x04); /* dma read */ + set_dma_addr(dev->dma, isa_virt_to_bus(target)); + set_dma_count(dev->dma, rlen); + enable_dma(dev->dma); + release_dma_lock(flags); + + if (elp_debug >= 3) { + pr_debug("%s: rx DMA transfer started\n", dev->name); + } + + if (adapter->rx_active) + adapter->rx_active--; + + if (!adapter->busy) + pr_warning("%s: receive_packet called, busy not set.\n", dev->name); +} + +/****************************************************** + * + * interrupt handler + * + ******************************************************/ + +static irqreturn_t elp_interrupt(int irq, void *dev_id) +{ + int len; + int dlen; + int icount = 0; + struct net_device *dev = dev_id; + elp_device *adapter = netdev_priv(dev); + unsigned long timeout; + + spin_lock(&adapter->lock); + + do { + /* + * has a DMA transfer finished? + */ + if (inb_status(dev->base_addr) & DONE) { + if (!adapter->dmaing) + pr_warning("%s: phantom DMA completed\n", dev->name); + + if (elp_debug >= 3) + pr_debug("%s: %s DMA complete, status %02x\n", dev->name, + adapter->current_dma.direction ? "tx" : "rx", + inb_status(dev->base_addr)); + + outb_control(adapter->hcr_val & ~(DMAE | TCEN | DIR), dev); + if (adapter->current_dma.direction) { + dev_kfree_skb_irq(adapter->current_dma.skb); + } else { + struct sk_buff *skb = adapter->current_dma.skb; + if (skb) { + if (adapter->current_dma.target) { + /* have already done the skb_put() */ + memcpy(adapter->current_dma.target, adapter->dma_buffer, adapter->current_dma.length); + } + skb->protocol = eth_type_trans(skb,dev); + dev->stats.rx_bytes += skb->len; + netif_rx(skb); + } + } + adapter->dmaing = 0; + if (adapter->rx_backlog.in != adapter->rx_backlog.out) { + int t = adapter->rx_backlog.length[adapter->rx_backlog.out]; + adapter->rx_backlog.out = backlog_next(adapter->rx_backlog.out); + if (elp_debug >= 2) + pr_debug("%s: receiving backlogged packet (%d)\n", dev->name, t); + receive_packet(dev, t); + } else { + adapter->busy = 0; + } + } else { + /* has one timed out? */ + check_3c505_dma(dev); + } + + /* + * receive a PCB from the adapter + */ + timeout = jiffies + 3*HZ/100; + while ((inb_status(dev->base_addr) & ACRF) != 0 && time_before(jiffies, timeout)) { + if (receive_pcb(dev, &adapter->irx_pcb)) { + switch (adapter->irx_pcb.command) + { + case 0: + break; + /* + * received a packet - this must be handled fast + */ + case 0xff: + case CMD_RECEIVE_PACKET_COMPLETE: + /* if the device isn't open, don't pass packets up the stack */ + if (!netif_running(dev)) + break; + len = adapter->irx_pcb.data.rcv_resp.pkt_len; + dlen = adapter->irx_pcb.data.rcv_resp.buf_len; + if (adapter->irx_pcb.data.rcv_resp.timeout != 0) { + pr_err("%s: interrupt - packet not received correctly\n", dev->name); + } else { + if (elp_debug >= 3) { + pr_debug("%s: interrupt - packet received of length %i (%i)\n", + dev->name, len, dlen); + } + if (adapter->irx_pcb.command == 0xff) { + if (elp_debug >= 2) + pr_debug("%s: adding packet to backlog (len = %d)\n", + dev->name, dlen); + adapter->rx_backlog.length[adapter->rx_backlog.in] = dlen; + adapter->rx_backlog.in = backlog_next(adapter->rx_backlog.in); + } else { + receive_packet(dev, dlen); + } + if (elp_debug >= 3) + pr_debug("%s: packet received\n", dev->name); + } + break; + + /* + * 82586 configured correctly + */ + case CMD_CONFIGURE_82586_RESPONSE: + adapter->got[CMD_CONFIGURE_82586] = 1; + if (elp_debug >= 3) + pr_debug("%s: interrupt - configure response received\n", dev->name); + break; + + /* + * Adapter memory configuration + */ + case CMD_CONFIGURE_ADAPTER_RESPONSE: + adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] = 1; + if (elp_debug >= 3) + pr_debug("%s: Adapter memory configuration %s.\n", dev->name, + adapter->irx_pcb.data.failed ? "failed" : "succeeded"); + break; + + /* + * Multicast list loading + */ + case CMD_LOAD_MULTICAST_RESPONSE: + adapter->got[CMD_LOAD_MULTICAST_LIST] = 1; + if (elp_debug >= 3) + pr_debug("%s: Multicast address list loading %s.\n", dev->name, + adapter->irx_pcb.data.failed ? "failed" : "succeeded"); + break; + + /* + * Station address setting + */ + case CMD_SET_ADDRESS_RESPONSE: + adapter->got[CMD_SET_STATION_ADDRESS] = 1; + if (elp_debug >= 3) + pr_debug("%s: Ethernet address setting %s.\n", dev->name, + adapter->irx_pcb.data.failed ? "failed" : "succeeded"); + break; + + + /* + * received board statistics + */ + case CMD_NETWORK_STATISTICS_RESPONSE: + dev->stats.rx_packets += adapter->irx_pcb.data.netstat.tot_recv; + dev->stats.tx_packets += adapter->irx_pcb.data.netstat.tot_xmit; + dev->stats.rx_crc_errors += adapter->irx_pcb.data.netstat.err_CRC; + dev->stats.rx_frame_errors += adapter->irx_pcb.data.netstat.err_align; + dev->stats.rx_fifo_errors += adapter->irx_pcb.data.netstat.err_ovrrun; + dev->stats.rx_over_errors += adapter->irx_pcb.data.netstat.err_res; + adapter->got[CMD_NETWORK_STATISTICS] = 1; + if (elp_debug >= 3) + pr_debug("%s: interrupt - statistics response received\n", dev->name); + break; + + /* + * sent a packet + */ + case CMD_TRANSMIT_PACKET_COMPLETE: + if (elp_debug >= 3) + pr_debug("%s: interrupt - packet sent\n", dev->name); + if (!netif_running(dev)) + break; + switch (adapter->irx_pcb.data.xmit_resp.c_stat) { + case 0xffff: + dev->stats.tx_aborted_errors++; + pr_info("%s: transmit timed out, network cable problem?\n", dev->name); + break; + case 0xfffe: + dev->stats.tx_fifo_errors++; + pr_info("%s: transmit timed out, FIFO underrun\n", dev->name); + break; + } + netif_wake_queue(dev); + break; + + /* + * some unknown PCB + */ + default: + pr_debug("%s: unknown PCB received - %2.2x\n", + dev->name, adapter->irx_pcb.command); + break; + } + } else { + pr_warning("%s: failed to read PCB on interrupt\n", dev->name); + adapter_reset(dev); + } + } + + } while (icount++ < 5 && (inb_status(dev->base_addr) & (ACRF | DONE))); + + prime_rx(dev); + + /* + * indicate no longer in interrupt routine + */ + spin_unlock(&adapter->lock); + return IRQ_HANDLED; +} + + +/****************************************************** + * + * open the board + * + ******************************************************/ + +static int elp_open(struct net_device *dev) +{ + elp_device *adapter = netdev_priv(dev); + int retval; + + if (elp_debug >= 3) + pr_debug("%s: request to open device\n", dev->name); + + /* + * make sure we actually found the device + */ + if (adapter == NULL) { + pr_err("%s: Opening a non-existent physical device\n", dev->name); + return -EAGAIN; + } + /* + * disable interrupts on the board + */ + outb_control(0, dev); + + /* + * clear any pending interrupts + */ + inb_command(dev->base_addr); + adapter_reset(dev); + + /* + * no receive PCBs active + */ + adapter->rx_active = 0; + + adapter->busy = 0; + adapter->send_pcb_semaphore = 0; + adapter->rx_backlog.in = 0; + adapter->rx_backlog.out = 0; + + spin_lock_init(&adapter->lock); + + /* + * install our interrupt service routine + */ + if ((retval = request_irq(dev->irq, elp_interrupt, 0, dev->name, dev))) { + pr_err("%s: could not allocate IRQ%d\n", dev->name, dev->irq); + return retval; + } + if ((retval = request_dma(dev->dma, dev->name))) { + free_irq(dev->irq, dev); + pr_err("%s: could not allocate DMA%d channel\n", dev->name, dev->dma); + return retval; + } + adapter->dma_buffer = (void *) dma_mem_alloc(DMA_BUFFER_SIZE); + if (!adapter->dma_buffer) { + pr_err("%s: could not allocate DMA buffer\n", dev->name); + free_dma(dev->dma); + free_irq(dev->irq, dev); + return -ENOMEM; + } + adapter->dmaing = 0; + + /* + * enable interrupts on the board + */ + outb_control(CMDE, dev); + + /* + * configure adapter memory: we need 10 multicast addresses, default==0 + */ + if (elp_debug >= 3) + pr_debug("%s: sending 3c505 memory configuration command\n", dev->name); + adapter->tx_pcb.command = CMD_CONFIGURE_ADAPTER_MEMORY; + adapter->tx_pcb.data.memconf.cmd_q = 10; + adapter->tx_pcb.data.memconf.rcv_q = 20; + adapter->tx_pcb.data.memconf.mcast = 10; + adapter->tx_pcb.data.memconf.frame = 20; + adapter->tx_pcb.data.memconf.rcv_b = 20; + adapter->tx_pcb.data.memconf.progs = 0; + adapter->tx_pcb.length = sizeof(struct Memconf); + adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] = 0; + if (!send_pcb(dev, &adapter->tx_pcb)) + pr_err("%s: couldn't send memory configuration command\n", dev->name); + else { + unsigned long timeout = jiffies + TIMEOUT; + while (adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] == 0 && time_before(jiffies, timeout)); + if (time_after_eq(jiffies, timeout)) + TIMEOUT_MSG(__LINE__); + } + + + /* + * configure adapter to receive broadcast messages and wait for response + */ + if (elp_debug >= 3) + pr_debug("%s: sending 82586 configure command\n", dev->name); + adapter->tx_pcb.command = CMD_CONFIGURE_82586; + adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD; + adapter->tx_pcb.length = 2; + adapter->got[CMD_CONFIGURE_82586] = 0; + if (!send_pcb(dev, &adapter->tx_pcb)) + pr_err("%s: couldn't send 82586 configure command\n", dev->name); + else { + unsigned long timeout = jiffies + TIMEOUT; + while (adapter->got[CMD_CONFIGURE_82586] == 0 && time_before(jiffies, timeout)); + if (time_after_eq(jiffies, timeout)) + TIMEOUT_MSG(__LINE__); + } + + /* enable burst-mode DMA */ + /* outb(0x1, dev->base_addr + PORT_AUXDMA); */ + + /* + * queue receive commands to provide buffering + */ + prime_rx(dev); + if (elp_debug >= 3) + pr_debug("%s: %d receive PCBs active\n", dev->name, adapter->rx_active); + + /* + * device is now officially open! + */ + + netif_start_queue(dev); + return 0; +} + + +/****************************************************** + * + * send a packet to the adapter + * + ******************************************************/ + +static netdev_tx_t send_packet(struct net_device *dev, struct sk_buff *skb) +{ + elp_device *adapter = netdev_priv(dev); + unsigned long target; + unsigned long flags; + + /* + * make sure the length is even and no shorter than 60 bytes + */ + unsigned int nlen = (((skb->len < 60) ? 60 : skb->len) + 1) & (~1); + + if (test_and_set_bit(0, (void *) &adapter->busy)) { + if (elp_debug >= 2) + pr_debug("%s: transmit blocked\n", dev->name); + return false; + } + + dev->stats.tx_bytes += nlen; + + /* + * send the adapter a transmit packet command. Ignore segment and offset + * and make sure the length is even + */ + adapter->tx_pcb.command = CMD_TRANSMIT_PACKET; + adapter->tx_pcb.length = sizeof(struct Xmit_pkt); + adapter->tx_pcb.data.xmit_pkt.buf_ofs + = adapter->tx_pcb.data.xmit_pkt.buf_seg = 0; /* Unused */ + adapter->tx_pcb.data.xmit_pkt.pkt_len = nlen; + + if (!send_pcb(dev, &adapter->tx_pcb)) { + adapter->busy = 0; + return false; + } + /* if this happens, we die */ + if (test_and_set_bit(0, (void *) &adapter->dmaing)) + pr_debug("%s: tx: DMA %d in progress\n", dev->name, adapter->current_dma.direction); + + adapter->current_dma.direction = 1; + adapter->current_dma.start_time = jiffies; + + if ((unsigned long)(skb->data + nlen) >= MAX_DMA_ADDRESS || nlen != skb->len) { + skb_copy_from_linear_data(skb, adapter->dma_buffer, nlen); + memset(adapter->dma_buffer+skb->len, 0, nlen-skb->len); + target = isa_virt_to_bus(adapter->dma_buffer); + } + else { + target = isa_virt_to_bus(skb->data); + } + adapter->current_dma.skb = skb; + + flags=claim_dma_lock(); + disable_dma(dev->dma); + clear_dma_ff(dev->dma); + set_dma_mode(dev->dma, 0x48); /* dma memory -> io */ + set_dma_addr(dev->dma, target); + set_dma_count(dev->dma, nlen); + outb_control(adapter->hcr_val | DMAE | TCEN, dev); + enable_dma(dev->dma); + release_dma_lock(flags); + + if (elp_debug >= 3) + pr_debug("%s: DMA transfer started\n", dev->name); + + return true; +} + +/* + * The upper layer thinks we timed out + */ + +static void elp_timeout(struct net_device *dev) +{ + int stat; + + stat = inb_status(dev->base_addr); + pr_warning("%s: transmit timed out, lost %s?\n", dev->name, + (stat & ACRF) ? "interrupt" : "command"); + if (elp_debug >= 1) + pr_debug("%s: status %#02x\n", dev->name, stat); + dev->trans_start = jiffies; /* prevent tx timeout */ + dev->stats.tx_dropped++; + netif_wake_queue(dev); +} + +/****************************************************** + * + * start the transmitter + * return 0 if sent OK, else return 1 + * + ******************************************************/ + +static netdev_tx_t elp_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + unsigned long flags; + elp_device *adapter = netdev_priv(dev); + + spin_lock_irqsave(&adapter->lock, flags); + check_3c505_dma(dev); + + if (elp_debug >= 3) + pr_debug("%s: request to send packet of length %d\n", dev->name, (int) skb->len); + + netif_stop_queue(dev); + + /* + * send the packet at skb->data for skb->len + */ + if (!send_packet(dev, skb)) { + if (elp_debug >= 2) { + pr_debug("%s: failed to transmit packet\n", dev->name); + } + spin_unlock_irqrestore(&adapter->lock, flags); + return NETDEV_TX_BUSY; + } + if (elp_debug >= 3) + pr_debug("%s: packet of length %d sent\n", dev->name, (int) skb->len); + + prime_rx(dev); + spin_unlock_irqrestore(&adapter->lock, flags); + netif_start_queue(dev); + return NETDEV_TX_OK; +} + +/****************************************************** + * + * return statistics on the board + * + ******************************************************/ + +static struct net_device_stats *elp_get_stats(struct net_device *dev) +{ + elp_device *adapter = netdev_priv(dev); + + if (elp_debug >= 3) + pr_debug("%s: request for stats\n", dev->name); + + /* If the device is closed, just return the latest stats we have, + - we cannot ask from the adapter without interrupts */ + if (!netif_running(dev)) + return &dev->stats; + + /* send a get statistics command to the board */ + adapter->tx_pcb.command = CMD_NETWORK_STATISTICS; + adapter->tx_pcb.length = 0; + adapter->got[CMD_NETWORK_STATISTICS] = 0; + if (!send_pcb(dev, &adapter->tx_pcb)) + pr_err("%s: couldn't send get statistics command\n", dev->name); + else { + unsigned long timeout = jiffies + TIMEOUT; + while (adapter->got[CMD_NETWORK_STATISTICS] == 0 && time_before(jiffies, timeout)); + if (time_after_eq(jiffies, timeout)) { + TIMEOUT_MSG(__LINE__); + return &dev->stats; + } + } + + /* statistics are now up to date */ + return &dev->stats; +} + + +static void netdev_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr); +} + +static u32 netdev_get_msglevel(struct net_device *dev) +{ + return debug; +} + +static void netdev_set_msglevel(struct net_device *dev, u32 level) +{ + debug = level; +} + +static const struct ethtool_ops netdev_ethtool_ops = { + .get_drvinfo = netdev_get_drvinfo, + .get_msglevel = netdev_get_msglevel, + .set_msglevel = netdev_set_msglevel, +}; + +/****************************************************** + * + * close the board + * + ******************************************************/ + +static int elp_close(struct net_device *dev) +{ + elp_device *adapter = netdev_priv(dev); + + if (elp_debug >= 3) + pr_debug("%s: request to close device\n", dev->name); + + netif_stop_queue(dev); + + /* Someone may request the device statistic information even when + * the interface is closed. The following will update the statistics + * structure in the driver, so we'll be able to give current statistics. + */ + (void) elp_get_stats(dev); + + /* + * disable interrupts on the board + */ + outb_control(0, dev); + + /* + * release the IRQ + */ + free_irq(dev->irq, dev); + + free_dma(dev->dma); + free_pages((unsigned long) adapter->dma_buffer, get_order(DMA_BUFFER_SIZE)); + + return 0; +} + + +/************************************************************ + * + * Set multicast list + * num_addrs==0: clear mc_list + * num_addrs==-1: set promiscuous mode + * num_addrs>0: set mc_list + * + ************************************************************/ + +static void elp_set_mc_list(struct net_device *dev) +{ + elp_device *adapter = netdev_priv(dev); + struct netdev_hw_addr *ha; + int i; + unsigned long flags; + + if (elp_debug >= 3) + pr_debug("%s: request to set multicast list\n", dev->name); + + spin_lock_irqsave(&adapter->lock, flags); + + if (!(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) { + /* send a "load multicast list" command to the board, max 10 addrs/cmd */ + /* if num_addrs==0 the list will be cleared */ + adapter->tx_pcb.command = CMD_LOAD_MULTICAST_LIST; + adapter->tx_pcb.length = 6 * netdev_mc_count(dev); + i = 0; + netdev_for_each_mc_addr(ha, dev) + memcpy(adapter->tx_pcb.data.multicast[i++], + ha->addr, 6); + adapter->got[CMD_LOAD_MULTICAST_LIST] = 0; + if (!send_pcb(dev, &adapter->tx_pcb)) + pr_err("%s: couldn't send set_multicast command\n", dev->name); + else { + unsigned long timeout = jiffies + TIMEOUT; + while (adapter->got[CMD_LOAD_MULTICAST_LIST] == 0 && time_before(jiffies, timeout)); + if (time_after_eq(jiffies, timeout)) { + TIMEOUT_MSG(__LINE__); + } + } + if (!netdev_mc_empty(dev)) + adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD | RECV_MULTI; + else /* num_addrs == 0 */ + adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD; + } else + adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_PROMISC; + /* + * configure adapter to receive messages (as specified above) + * and wait for response + */ + if (elp_debug >= 3) + pr_debug("%s: sending 82586 configure command\n", dev->name); + adapter->tx_pcb.command = CMD_CONFIGURE_82586; + adapter->tx_pcb.length = 2; + adapter->got[CMD_CONFIGURE_82586] = 0; + if (!send_pcb(dev, &adapter->tx_pcb)) + { + spin_unlock_irqrestore(&adapter->lock, flags); + pr_err("%s: couldn't send 82586 configure command\n", dev->name); + } + else { + unsigned long timeout = jiffies + TIMEOUT; + spin_unlock_irqrestore(&adapter->lock, flags); + while (adapter->got[CMD_CONFIGURE_82586] == 0 && time_before(jiffies, timeout)); + if (time_after_eq(jiffies, timeout)) + TIMEOUT_MSG(__LINE__); + } +} + +/************************************************************ + * + * A couple of tests to see if there's 3C505 or not + * Called only by elp_autodetect + ************************************************************/ + +static int __init elp_sense(struct net_device *dev) +{ + int addr = dev->base_addr; + const char *name = dev->name; + byte orig_HSR; + + if (!request_region(addr, ELP_IO_EXTENT, "3c505")) + return -ENODEV; + + orig_HSR = inb_status(addr); + + if (elp_debug > 0) + pr_debug(search_msg, name, addr); + + if (orig_HSR == 0xff) { + if (elp_debug > 0) + pr_cont(notfound_msg, 1); + goto out; + } + + /* Wait for a while; the adapter may still be booting up */ + if (elp_debug > 0) + pr_cont(stilllooking_msg); + + if (orig_HSR & DIR) { + /* If HCR.DIR is up, we pull it down. HSR.DIR should follow. */ + outb(0, dev->base_addr + PORT_CONTROL); + msleep(300); + if (inb_status(addr) & DIR) { + if (elp_debug > 0) + pr_cont(notfound_msg, 2); + goto out; + } + } else { + /* If HCR.DIR is down, we pull it up. HSR.DIR should follow. */ + outb(DIR, dev->base_addr + PORT_CONTROL); + msleep(300); + if (!(inb_status(addr) & DIR)) { + if (elp_debug > 0) + pr_cont(notfound_msg, 3); + goto out; + } + } + /* + * It certainly looks like a 3c505. + */ + if (elp_debug > 0) + pr_cont(found_msg); + + return 0; +out: + release_region(addr, ELP_IO_EXTENT); + return -ENODEV; +} + +/************************************************************* + * + * Search through addr_list[] and try to find a 3C505 + * Called only by eplus_probe + *************************************************************/ + +static int __init elp_autodetect(struct net_device *dev) +{ + int idx = 0; + + /* if base address set, then only check that address + otherwise, run through the table */ + if (dev->base_addr != 0) { /* dev->base_addr == 0 ==> plain autodetect */ + if (elp_sense(dev) == 0) + return dev->base_addr; + } else + while ((dev->base_addr = addr_list[idx++])) { + if (elp_sense(dev) == 0) + return dev->base_addr; + } + + /* could not find an adapter */ + if (elp_debug > 0) + pr_debug(couldnot_msg, dev->name); + + return 0; /* Because of this, the layer above will return -ENODEV */ +} + +static const struct net_device_ops elp_netdev_ops = { + .ndo_open = elp_open, + .ndo_stop = elp_close, + .ndo_get_stats = elp_get_stats, + .ndo_start_xmit = elp_start_xmit, + .ndo_tx_timeout = elp_timeout, + .ndo_set_multicast_list = elp_set_mc_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +/****************************************************** + * + * probe for an Etherlink Plus board at the specified address + * + ******************************************************/ + +/* There are three situations we need to be able to detect here: + + * a) the card is idle + * b) the card is still booting up + * c) the card is stuck in a strange state (some DOS drivers do this) + * + * In case (a), all is well. In case (b), we wait 10 seconds to see if the + * card finishes booting, and carry on if so. In case (c), we do a hard reset, + * loop round, and hope for the best. + * + * This is all very unpleasant, but hopefully avoids the problems with the old + * probe code (which had a 15-second delay if the card was idle, and didn't + * work at all if it was in a weird state). + */ + +static int __init elplus_setup(struct net_device *dev) +{ + elp_device *adapter = netdev_priv(dev); + int i, tries, tries1, okay; + unsigned long timeout; + unsigned long cookie = 0; + int err = -ENODEV; + + /* + * setup adapter structure + */ + + dev->base_addr = elp_autodetect(dev); + if (!dev->base_addr) + return -ENODEV; + + adapter->send_pcb_semaphore = 0; + + for (tries1 = 0; tries1 < 3; tries1++) { + outb_control((adapter->hcr_val | CMDE) & ~DIR, dev); + /* First try to write just one byte, to see if the card is + * responding at all normally. + */ + timeout = jiffies + 5*HZ/100; + okay = 0; + while (time_before(jiffies, timeout) && !(inb_status(dev->base_addr) & HCRE)); + if ((inb_status(dev->base_addr) & HCRE)) { + outb_command(0, dev->base_addr); /* send a spurious byte */ + timeout = jiffies + 5*HZ/100; + while (time_before(jiffies, timeout) && !(inb_status(dev->base_addr) & HCRE)); + if (inb_status(dev->base_addr) & HCRE) + okay = 1; + } + if (!okay) { + /* Nope, it's ignoring the command register. This means that + * either it's still booting up, or it's died. + */ + pr_err("%s: command register wouldn't drain, ", dev->name); + if ((inb_status(dev->base_addr) & 7) == 3) { + /* If the adapter status is 3, it *could* still be booting. + * Give it the benefit of the doubt for 10 seconds. + */ + pr_cont("assuming 3c505 still starting\n"); + timeout = jiffies + 10*HZ; + while (time_before(jiffies, timeout) && (inb_status(dev->base_addr) & 7)); + if (inb_status(dev->base_addr) & 7) { + pr_err("%s: 3c505 failed to start\n", dev->name); + } else { + okay = 1; /* It started */ + } + } else { + /* Otherwise, it must just be in a strange + * state. We probably need to kick it. + */ + pr_cont("3c505 is sulking\n"); + } + } + for (tries = 0; tries < 5 && okay; tries++) { + + /* + * Try to set the Ethernet address, to make sure that the board + * is working. + */ + adapter->tx_pcb.command = CMD_STATION_ADDRESS; + adapter->tx_pcb.length = 0; + cookie = probe_irq_on(); + if (!send_pcb(dev, &adapter->tx_pcb)) { + pr_err("%s: could not send first PCB\n", dev->name); + probe_irq_off(cookie); + continue; + } + if (!receive_pcb(dev, &adapter->rx_pcb)) { + pr_err("%s: could not read first PCB\n", dev->name); + probe_irq_off(cookie); + continue; + } + if ((adapter->rx_pcb.command != CMD_ADDRESS_RESPONSE) || + (adapter->rx_pcb.length != 6)) { + pr_err("%s: first PCB wrong (%d, %d)\n", dev->name, + adapter->rx_pcb.command, adapter->rx_pcb.length); + probe_irq_off(cookie); + continue; + } + goto okay; + } + /* It's broken. Do a hard reset to re-initialise the board, + * and try again. + */ + pr_info("%s: resetting adapter\n", dev->name); + outb_control(adapter->hcr_val | FLSH | ATTN, dev); + outb_control(adapter->hcr_val & ~(FLSH | ATTN), dev); + } + pr_err("%s: failed to initialise 3c505\n", dev->name); + goto out; + + okay: + if (dev->irq) { /* Is there a preset IRQ? */ + int rpt = probe_irq_off(cookie); + if (dev->irq != rpt) { + pr_warning("%s: warning, irq %d configured but %d detected\n", dev->name, dev->irq, rpt); + } + /* if dev->irq == probe_irq_off(cookie), all is well */ + } else /* No preset IRQ; just use what we can detect */ + dev->irq = probe_irq_off(cookie); + switch (dev->irq) { /* Legal, sane? */ + case 0: + pr_err("%s: IRQ probe failed: check 3c505 jumpers.\n", + dev->name); + goto out; + case 1: + case 6: + case 8: + case 13: + pr_err("%s: Impossible IRQ %d reported by probe_irq_off().\n", + dev->name, dev->irq); + goto out; + } + /* + * Now we have the IRQ number so we can disable the interrupts from + * the board until the board is opened. + */ + outb_control(adapter->hcr_val & ~CMDE, dev); + + /* + * copy Ethernet address into structure + */ + for (i = 0; i < 6; i++) + dev->dev_addr[i] = adapter->rx_pcb.data.eth_addr[i]; + + /* find a DMA channel */ + if (!dev->dma) { + if (dev->mem_start) { + dev->dma = dev->mem_start & 7; + } + else { + pr_warning("%s: warning, DMA channel not specified, using default\n", dev->name); + dev->dma = ELP_DMA; + } + } + + /* + * print remainder of startup message + */ + pr_info("%s: 3c505 at %#lx, irq %d, dma %d, addr %pM, ", + dev->name, dev->base_addr, dev->irq, dev->dma, dev->dev_addr); + /* + * read more information from the adapter + */ + + adapter->tx_pcb.command = CMD_ADAPTER_INFO; + adapter->tx_pcb.length = 0; + if (!send_pcb(dev, &adapter->tx_pcb) || + !receive_pcb(dev, &adapter->rx_pcb) || + (adapter->rx_pcb.command != CMD_ADAPTER_INFO_RESPONSE) || + (adapter->rx_pcb.length != 10)) { + pr_cont("not responding to second PCB\n"); + } + pr_cont("rev %d.%d, %dk\n", adapter->rx_pcb.data.info.major_vers, + adapter->rx_pcb.data.info.minor_vers, adapter->rx_pcb.data.info.RAM_sz); + + /* + * reconfigure the adapter memory to better suit our purposes + */ + adapter->tx_pcb.command = CMD_CONFIGURE_ADAPTER_MEMORY; + adapter->tx_pcb.length = 12; + adapter->tx_pcb.data.memconf.cmd_q = 8; + adapter->tx_pcb.data.memconf.rcv_q = 8; + adapter->tx_pcb.data.memconf.mcast = 10; + adapter->tx_pcb.data.memconf.frame = 10; + adapter->tx_pcb.data.memconf.rcv_b = 10; + adapter->tx_pcb.data.memconf.progs = 0; + if (!send_pcb(dev, &adapter->tx_pcb) || + !receive_pcb(dev, &adapter->rx_pcb) || + (adapter->rx_pcb.command != CMD_CONFIGURE_ADAPTER_RESPONSE) || + (adapter->rx_pcb.length != 2)) { + pr_err("%s: could not configure adapter memory\n", dev->name); + } + if (adapter->rx_pcb.data.configure) { + pr_err("%s: adapter configuration failed\n", dev->name); + } + + dev->netdev_ops = &elp_netdev_ops; + dev->watchdog_timeo = 10*HZ; + dev->ethtool_ops = &netdev_ethtool_ops; /* local */ + + dev->mem_start = dev->mem_end = 0; + + err = register_netdev(dev); + if (err) + goto out; + + return 0; +out: + release_region(dev->base_addr, ELP_IO_EXTENT); + return err; +} + +#ifndef MODULE +struct net_device * __init elplus_probe(int unit) +{ + struct net_device *dev = alloc_etherdev(sizeof(elp_device)); + int err; + if (!dev) + return ERR_PTR(-ENOMEM); + + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + + err = elplus_setup(dev); + if (err) { + free_netdev(dev); + return ERR_PTR(err); + } + return dev; +} + +#else +static struct net_device *dev_3c505[ELP_MAX_CARDS]; +static int io[ELP_MAX_CARDS]; +static int irq[ELP_MAX_CARDS]; +static int dma[ELP_MAX_CARDS]; +module_param_array(io, int, NULL, 0); +module_param_array(irq, int, NULL, 0); +module_param_array(dma, int, NULL, 0); +MODULE_PARM_DESC(io, "EtherLink Plus I/O base address(es)"); +MODULE_PARM_DESC(irq, "EtherLink Plus IRQ number(s) (assigned)"); +MODULE_PARM_DESC(dma, "EtherLink Plus DMA channel(s)"); + +int __init init_module(void) +{ + int this_dev, found = 0; + + for (this_dev = 0; this_dev < ELP_MAX_CARDS; this_dev++) { + struct net_device *dev = alloc_etherdev(sizeof(elp_device)); + if (!dev) + break; + + dev->irq = irq[this_dev]; + dev->base_addr = io[this_dev]; + if (dma[this_dev]) { + dev->dma = dma[this_dev]; + } else { + dev->dma = ELP_DMA; + pr_warning("3c505.c: warning, using default DMA channel,\n"); + } + if (io[this_dev] == 0) { + if (this_dev) { + free_netdev(dev); + break; + } + pr_notice("3c505.c: module autoprobe not recommended, give io=xx.\n"); + } + if (elplus_setup(dev) != 0) { + pr_warning("3c505.c: Failed to register card at 0x%x.\n", io[this_dev]); + free_netdev(dev); + break; + } + dev_3c505[this_dev] = dev; + found++; + } + if (!found) + return -ENODEV; + return 0; +} + +void __exit cleanup_module(void) +{ + int this_dev; + + for (this_dev = 0; this_dev < ELP_MAX_CARDS; this_dev++) { + struct net_device *dev = dev_3c505[this_dev]; + if (dev) { + unregister_netdev(dev); + release_region(dev->base_addr, ELP_IO_EXTENT); + free_netdev(dev); + } + } +} + +#endif /* MODULE */ +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/i825xx/3c505.h b/drivers/net/ethernet/i825xx/3c505.h new file mode 100644 index 0000000..04df2a9 --- /dev/null +++ b/drivers/net/ethernet/i825xx/3c505.h @@ -0,0 +1,292 @@ +/***************************************************************** + * + * defines for 3Com Etherlink Plus adapter + * + *****************************************************************/ + +#define ELP_DMA 6 +#define ELP_RX_PCBS 4 +#define ELP_MAX_CARDS 4 + +/* + * I/O register offsets + */ +#define PORT_COMMAND 0x00 /* read/write, 8-bit */ +#define PORT_STATUS 0x02 /* read only, 8-bit */ +#define PORT_AUXDMA 0x02 /* write only, 8-bit */ +#define PORT_DATA 0x04 /* read/write, 16-bit */ +#define PORT_CONTROL 0x06 /* read/write, 8-bit */ + +#define ELP_IO_EXTENT 0x10 /* size of used IO registers */ + +/* + * host control registers bits + */ +#define ATTN 0x80 /* attention */ +#define FLSH 0x40 /* flush data register */ +#define DMAE 0x20 /* DMA enable */ +#define DIR 0x10 /* direction */ +#define TCEN 0x08 /* terminal count interrupt enable */ +#define CMDE 0x04 /* command register interrupt enable */ +#define HSF2 0x02 /* host status flag 2 */ +#define HSF1 0x01 /* host status flag 1 */ + +/* + * combinations of HSF flags used for PCB transmission + */ +#define HSF_PCB_ACK HSF1 +#define HSF_PCB_NAK HSF2 +#define HSF_PCB_END (HSF2|HSF1) +#define HSF_PCB_MASK (HSF2|HSF1) + +/* + * host status register bits + */ +#define HRDY 0x80 /* data register ready */ +#define HCRE 0x40 /* command register empty */ +#define ACRF 0x20 /* adapter command register full */ +/* #define DIR 0x10 direction - same as in control register */ +#define DONE 0x08 /* DMA done */ +#define ASF3 0x04 /* adapter status flag 3 */ +#define ASF2 0x02 /* adapter status flag 2 */ +#define ASF1 0x01 /* adapter status flag 1 */ + +/* + * combinations of ASF flags used for PCB reception + */ +#define ASF_PCB_ACK ASF1 +#define ASF_PCB_NAK ASF2 +#define ASF_PCB_END (ASF2|ASF1) +#define ASF_PCB_MASK (ASF2|ASF1) + +/* + * host aux DMA register bits + */ +#define DMA_BRST 0x01 /* DMA burst */ + +/* + * maximum amount of data allowed in a PCB + */ +#define MAX_PCB_DATA 62 + +/***************************************************************** + * + * timeout value + * this is a rough value used for loops to stop them from + * locking up the whole machine in the case of failure or + * error conditions + * + *****************************************************************/ + +#define TIMEOUT 300 + +/***************************************************************** + * + * PCB commands + * + *****************************************************************/ + +enum { + /* + * host PCB commands + */ + CMD_CONFIGURE_ADAPTER_MEMORY = 0x01, + CMD_CONFIGURE_82586 = 0x02, + CMD_STATION_ADDRESS = 0x03, + CMD_DMA_DOWNLOAD = 0x04, + CMD_DMA_UPLOAD = 0x05, + CMD_PIO_DOWNLOAD = 0x06, + CMD_PIO_UPLOAD = 0x07, + CMD_RECEIVE_PACKET = 0x08, + CMD_TRANSMIT_PACKET = 0x09, + CMD_NETWORK_STATISTICS = 0x0a, + CMD_LOAD_MULTICAST_LIST = 0x0b, + CMD_CLEAR_PROGRAM = 0x0c, + CMD_DOWNLOAD_PROGRAM = 0x0d, + CMD_EXECUTE_PROGRAM = 0x0e, + CMD_SELF_TEST = 0x0f, + CMD_SET_STATION_ADDRESS = 0x10, + CMD_ADAPTER_INFO = 0x11, + NUM_TRANSMIT_CMDS, + + /* + * adapter PCB commands + */ + CMD_CONFIGURE_ADAPTER_RESPONSE = 0x31, + CMD_CONFIGURE_82586_RESPONSE = 0x32, + CMD_ADDRESS_RESPONSE = 0x33, + CMD_DOWNLOAD_DATA_REQUEST = 0x34, + CMD_UPLOAD_DATA_REQUEST = 0x35, + CMD_RECEIVE_PACKET_COMPLETE = 0x38, + CMD_TRANSMIT_PACKET_COMPLETE = 0x39, + CMD_NETWORK_STATISTICS_RESPONSE = 0x3a, + CMD_LOAD_MULTICAST_RESPONSE = 0x3b, + CMD_CLEAR_PROGRAM_RESPONSE = 0x3c, + CMD_DOWNLOAD_PROGRAM_RESPONSE = 0x3d, + CMD_EXECUTE_RESPONSE = 0x3e, + CMD_SELF_TEST_RESPONSE = 0x3f, + CMD_SET_ADDRESS_RESPONSE = 0x40, + CMD_ADAPTER_INFO_RESPONSE = 0x41 +}; + +/* Definitions for the PCB data structure */ + +/* Data units */ +typedef unsigned char byte; +typedef unsigned short int word; +typedef unsigned long int dword; + +/* Data structures */ +struct Memconf { + word cmd_q, + rcv_q, + mcast, + frame, + rcv_b, + progs; +}; + +struct Rcv_pkt { + word buf_ofs, + buf_seg, + buf_len, + timeout; +}; + +struct Xmit_pkt { + word buf_ofs, + buf_seg, + pkt_len; +}; + +struct Rcv_resp { + word buf_ofs, + buf_seg, + buf_len, + pkt_len, + timeout, + status; + dword timetag; +}; + +struct Xmit_resp { + word buf_ofs, + buf_seg, + c_stat, + status; +}; + + +struct Netstat { + dword tot_recv, + tot_xmit; + word err_CRC, + err_align, + err_res, + err_ovrrun; +}; + + +struct Selftest { + word error; + union { + word ROM_cksum; + struct { + word ofs, seg; + } RAM; + word i82586; + } failure; +}; + +struct Info { + byte minor_vers, + major_vers; + word ROM_cksum, + RAM_sz, + free_ofs, + free_seg; +}; + +struct Memdump { + word size, + off, + seg; +}; + +/* +Primary Command Block. The most important data structure. All communication +between the host and the adapter is done with these. (Except for the actual +Ethernet data, which has different packaging.) +*/ +typedef struct { + byte command; + byte length; + union { + struct Memconf memconf; + word configure; + struct Rcv_pkt rcv_pkt; + struct Xmit_pkt xmit_pkt; + byte multicast[10][6]; + byte eth_addr[6]; + byte failed; + struct Rcv_resp rcv_resp; + struct Xmit_resp xmit_resp; + struct Netstat netstat; + struct Selftest selftest; + struct Info info; + struct Memdump memdump; + byte raw[62]; + } data; +} pcb_struct; + +/* These defines for 'configure' */ +#define RECV_STATION 0x00 +#define RECV_BROAD 0x01 +#define RECV_MULTI 0x02 +#define RECV_PROMISC 0x04 +#define NO_LOOPBACK 0x00 +#define INT_LOOPBACK 0x08 +#define EXT_LOOPBACK 0x10 + +/***************************************************************** + * + * structure to hold context information for adapter + * + *****************************************************************/ + +#define DMA_BUFFER_SIZE 1600 +#define BACKLOG_SIZE 4 + +typedef struct { + volatile short got[NUM_TRANSMIT_CMDS]; /* flags for + command completion */ + pcb_struct tx_pcb; /* PCB for foreground sending */ + pcb_struct rx_pcb; /* PCB for foreground receiving */ + pcb_struct itx_pcb; /* PCB for background sending */ + pcb_struct irx_pcb; /* PCB for background receiving */ + + void *dma_buffer; + + struct { + unsigned int length[BACKLOG_SIZE]; + unsigned int in; + unsigned int out; + } rx_backlog; + + struct { + unsigned int direction; + unsigned int length; + struct sk_buff *skb; + void *target; + unsigned long start_time; + } current_dma; + + /* flags */ + unsigned long send_pcb_semaphore; + unsigned long dmaing; + unsigned long busy; + + unsigned int rx_active; /* number of receive PCBs */ + volatile unsigned char hcr_val; /* what we think the HCR contains */ + spinlock_t lock; /* Interrupt v tx lock */ +} elp_device; diff --git a/drivers/net/ethernet/i825xx/3c507.c b/drivers/net/ethernet/i825xx/3c507.c new file mode 100644 index 0000000..1e94555 --- /dev/null +++ b/drivers/net/ethernet/i825xx/3c507.c @@ -0,0 +1,939 @@ +/* 3c507.c: An EtherLink16 device driver for Linux. */ +/* + Written 1993,1994 by Donald Becker. + + Copyright 1993 United States Government as represented by the + Director, National Security Agency. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + + Thanks go to jennings@Montrouge.SMR.slb.com ( Patrick Jennings) + and jrs@world.std.com (Rick Sladkey) for testing and bugfixes. + Mark Salazar made the changes for cards with + only 16K packet buffers. + + Things remaining to do: + Verify that the tx and rx buffers don't have fencepost errors. + Move the theory of operation and memory map documentation. + The statistics need to be updated correctly. +*/ + +#define DRV_NAME "3c507" +#define DRV_VERSION "1.10a" +#define DRV_RELDATE "11/17/2001" + +static const char version[] = + DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Donald Becker (becker@scyld.com)\n"; + +/* + Sources: + This driver wouldn't have been written with the availability of the + Crynwr driver source code. It provided a known-working implementation + that filled in the gaping holes of the Intel documentation. Three cheers + for Russ Nelson. + + Intel Microcommunications Databook, Vol. 1, 1990. It provides just enough + info that the casual reader might think that it documents the i82586 :-<. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* use 0 for production, 1 for verification, 2..7 for debug */ +#ifndef NET_DEBUG +#define NET_DEBUG 1 +#endif +static unsigned int net_debug = NET_DEBUG; +#define debug net_debug + + +/* + Details of the i82586. + + You'll really need the databook to understand the details of this part, + but the outline is that the i82586 has two separate processing units. + Both are started from a list of three configuration tables, of which only + the last, the System Control Block (SCB), is used after reset-time. The SCB + has the following fields: + Status word + Command word + Tx/Command block addr. + Rx block addr. + The command word accepts the following controls for the Tx and Rx units: + */ + +#define CUC_START 0x0100 +#define CUC_RESUME 0x0200 +#define CUC_SUSPEND 0x0300 +#define RX_START 0x0010 +#define RX_RESUME 0x0020 +#define RX_SUSPEND 0x0030 + +/* The Rx unit uses a list of frame descriptors and a list of data buffer + descriptors. We use full-sized (1518 byte) data buffers, so there is + a one-to-one pairing of frame descriptors to buffer descriptors. + + The Tx ("command") unit executes a list of commands that look like: + Status word Written by the 82586 when the command is done. + Command word Command in lower 3 bits, post-command action in upper 3 + Link word The address of the next command. + Parameters (as needed). + + Some definitions related to the Command Word are: + */ +#define CMD_EOL 0x8000 /* The last command of the list, stop. */ +#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */ +#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */ + +enum commands { + CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3, + CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7}; + +/* Information that need to be kept for each board. */ +struct net_local { + int last_restart; + ushort rx_head; + ushort rx_tail; + ushort tx_head; + ushort tx_cmd_link; + ushort tx_reap; + ushort tx_pkts_in_ring; + spinlock_t lock; + void __iomem *base; +}; + +/* + Details of the EtherLink16 Implementation + The 3c507 is a generic shared-memory i82586 implementation. + The host can map 16K, 32K, 48K, or 64K of the 64K memory into + 0x0[CD][08]0000, or all 64K into 0xF[02468]0000. + */ + +/* Offsets from the base I/O address. */ +#define SA_DATA 0 /* Station address data, or 3Com signature. */ +#define MISC_CTRL 6 /* Switch the SA_DATA banks, and bus config bits. */ +#define RESET_IRQ 10 /* Reset the latched IRQ line. */ +#define SIGNAL_CA 11 /* Frob the 82586 Channel Attention line. */ +#define ROM_CONFIG 13 +#define MEM_CONFIG 14 +#define IRQ_CONFIG 15 +#define EL16_IO_EXTENT 16 + +/* The ID port is used at boot-time to locate the ethercard. */ +#define ID_PORT 0x100 + +/* Offsets to registers in the mailbox (SCB). */ +#define iSCB_STATUS 0x8 +#define iSCB_CMD 0xA +#define iSCB_CBL 0xC /* Command BLock offset. */ +#define iSCB_RFA 0xE /* Rx Frame Area offset. */ + +/* Since the 3c507 maps the shared memory window so that the last byte is + at 82586 address FFFF, the first byte is at 82586 address 0, 16K, 32K, or + 48K corresponding to window sizes of 64K, 48K, 32K and 16K respectively. + We can account for this be setting the 'SBC Base' entry in the ISCP table + below for all the 16 bit offset addresses, and also adding the 'SCB Base' + value to all 24 bit physical addresses (in the SCP table and the TX and RX + Buffer Descriptors). + -Mark + */ +#define SCB_BASE ((unsigned)64*1024 - (dev->mem_end - dev->mem_start)) + +/* + What follows in 'init_words[]' is the "program" that is downloaded to the + 82586 memory. It's mostly tables and command blocks, and starts at the + reset address 0xfffff6. This is designed to be similar to the EtherExpress, + thus the unusual location of the SCB at 0x0008. + + Even with the additional "don't care" values, doing it this way takes less + program space than initializing the individual tables, and I feel it's much + cleaner. + + The databook is particularly useless for the first two structures, I had + to use the Crynwr driver as an example. + + The memory setup is as follows: + */ + +#define CONFIG_CMD 0x0018 +#define SET_SA_CMD 0x0024 +#define SA_OFFSET 0x002A +#define IDLELOOP 0x30 +#define TDR_CMD 0x38 +#define TDR_TIME 0x3C +#define DUMP_CMD 0x40 +#define DIAG_CMD 0x48 +#define SET_MC_CMD 0x4E +#define DUMP_DATA 0x56 /* A 170 byte buffer for dump and Set-MC into. */ + +#define TX_BUF_START 0x0100 +#define NUM_TX_BUFS 5 +#define TX_BUF_SIZE (1518+14+20+16) /* packet+header+TBD */ + +#define RX_BUF_START 0x2000 +#define RX_BUF_SIZE (1518+14+18) /* packet+header+RBD */ +#define RX_BUF_END (dev->mem_end - dev->mem_start) + +#define TX_TIMEOUT (HZ/20) + +/* + That's it: only 86 bytes to set up the beast, including every extra + command available. The 170 byte buffer at DUMP_DATA is shared between the + Dump command (called only by the diagnostic program) and the SetMulticastList + command. + + To complete the memory setup you only have to write the station address at + SA_OFFSET and create the Tx & Rx buffer lists. + + The Tx command chain and buffer list is setup as follows: + A Tx command table, with the data buffer pointing to... + A Tx data buffer descriptor. The packet is in a single buffer, rather than + chaining together several smaller buffers. + A NoOp command, which initially points to itself, + And the packet data. + + A transmit is done by filling in the Tx command table and data buffer, + re-writing the NoOp command, and finally changing the offset of the last + command to point to the current Tx command. When the Tx command is finished, + it jumps to the NoOp, when it loops until the next Tx command changes the + "link offset" in the NoOp. This way the 82586 never has to go through the + slow restart sequence. + + The Rx buffer list is set up in the obvious ring structure. We have enough + memory (and low enough interrupt latency) that we can avoid the complicated + Rx buffer linked lists by alway associating a full-size Rx data buffer with + each Rx data frame. + + I current use four transmit buffers starting at TX_BUF_START (0x0100), and + use the rest of memory, from RX_BUF_START to RX_BUF_END, for Rx buffers. + + */ + +static unsigned short init_words[] = { + /* System Configuration Pointer (SCP). */ + 0x0000, /* Set bus size to 16 bits. */ + 0,0, /* pad words. */ + 0x0000,0x0000, /* ISCP phys addr, set in init_82586_mem(). */ + + /* Intermediate System Configuration Pointer (ISCP). */ + 0x0001, /* Status word that's cleared when init is done. */ + 0x0008,0,0, /* SCB offset, (skip, skip) */ + + /* System Control Block (SCB). */ + 0,0xf000|RX_START|CUC_START, /* SCB status and cmd. */ + CONFIG_CMD, /* Command list pointer, points to Configure. */ + RX_BUF_START, /* Rx block list. */ + 0,0,0,0, /* Error count: CRC, align, buffer, overrun. */ + + /* 0x0018: Configure command. Change to put MAC data with packet. */ + 0, CmdConfigure, /* Status, command. */ + SET_SA_CMD, /* Next command is Set Station Addr. */ + 0x0804, /* "4" bytes of config data, 8 byte FIFO. */ + 0x2e40, /* Magic values, including MAC data location. */ + 0, /* Unused pad word. */ + + /* 0x0024: Setup station address command. */ + 0, CmdSASetup, + SET_MC_CMD, /* Next command. */ + 0xaa00,0xb000,0x0bad, /* Station address (to be filled in) */ + + /* 0x0030: NOP, looping back to itself. Point to first Tx buffer to Tx. */ + 0, CmdNOp, IDLELOOP, 0 /* pad */, + + /* 0x0038: A unused Time-Domain Reflectometer command. */ + 0, CmdTDR, IDLELOOP, 0, + + /* 0x0040: An unused Dump State command. */ + 0, CmdDump, IDLELOOP, DUMP_DATA, + + /* 0x0048: An unused Diagnose command. */ + 0, CmdDiagnose, IDLELOOP, + + /* 0x004E: An empty set-multicast-list command. */ + 0, CmdMulticastList, IDLELOOP, 0, +}; + +/* Index to functions, as function prototypes. */ + +static int el16_probe1(struct net_device *dev, int ioaddr); +static int el16_open(struct net_device *dev); +static netdev_tx_t el16_send_packet(struct sk_buff *skb, + struct net_device *dev); +static irqreturn_t el16_interrupt(int irq, void *dev_id); +static void el16_rx(struct net_device *dev); +static int el16_close(struct net_device *dev); +static void el16_tx_timeout (struct net_device *dev); + +static void hardware_send_packet(struct net_device *dev, void *buf, short length, short pad); +static void init_82586_mem(struct net_device *dev); +static const struct ethtool_ops netdev_ethtool_ops; +static void init_rx_bufs(struct net_device *); + +static int io = 0x300; +static int irq; +static int mem_start; + + +/* Check for a network adaptor of this type, and return '0' iff one exists. + If dev->base_addr == 0, probe all likely locations. + If dev->base_addr == 1, always return failure. + If dev->base_addr == 2, (detachable devices only) allocate space for the + device and return success. + */ + +struct net_device * __init el16_probe(int unit) +{ + struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); + static const unsigned ports[] = { 0x300, 0x320, 0x340, 0x280, 0}; + const unsigned *port; + int err = -ENODEV; + + if (!dev) + return ERR_PTR(-ENODEV); + + if (unit >= 0) { + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + io = dev->base_addr; + irq = dev->irq; + mem_start = dev->mem_start & 15; + } + + if (io > 0x1ff) /* Check a single specified location. */ + err = el16_probe1(dev, io); + else if (io != 0) + err = -ENXIO; /* Don't probe at all. */ + else { + for (port = ports; *port; port++) { + err = el16_probe1(dev, *port); + if (!err) + break; + } + } + + if (err) + goto out; + err = register_netdev(dev); + if (err) + goto out1; + return dev; +out1: + free_irq(dev->irq, dev); + iounmap(((struct net_local *)netdev_priv(dev))->base); + release_region(dev->base_addr, EL16_IO_EXTENT); +out: + free_netdev(dev); + return ERR_PTR(err); +} + +static const struct net_device_ops netdev_ops = { + .ndo_open = el16_open, + .ndo_stop = el16_close, + .ndo_start_xmit = el16_send_packet, + .ndo_tx_timeout = el16_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int __init el16_probe1(struct net_device *dev, int ioaddr) +{ + static unsigned char init_ID_done; + int i, irq, irqval, retval; + struct net_local *lp; + + if (init_ID_done == 0) { + ushort lrs_state = 0xff; + /* Send the ID sequence to the ID_PORT to enable the board(s). */ + outb(0x00, ID_PORT); + for(i = 0; i < 255; i++) { + outb(lrs_state, ID_PORT); + lrs_state <<= 1; + if (lrs_state & 0x100) + lrs_state ^= 0xe7; + } + outb(0x00, ID_PORT); + init_ID_done = 1; + } + + if (!request_region(ioaddr, EL16_IO_EXTENT, DRV_NAME)) + return -ENODEV; + + if ((inb(ioaddr) != '*') || (inb(ioaddr + 1) != '3') || + (inb(ioaddr + 2) != 'C') || (inb(ioaddr + 3) != 'O')) { + retval = -ENODEV; + goto out; + } + + pr_info("%s: 3c507 at %#x,", dev->name, ioaddr); + + /* We should make a few more checks here, like the first three octets of + the S.A. for the manufacturer's code. */ + + irq = inb(ioaddr + IRQ_CONFIG) & 0x0f; + + irqval = request_irq(irq, el16_interrupt, 0, DRV_NAME, dev); + if (irqval) { + pr_cont("\n"); + pr_err("3c507: unable to get IRQ %d (irqval=%d).\n", irq, irqval); + retval = -EAGAIN; + goto out; + } + + /* We've committed to using the board, and can start filling in *dev. */ + dev->base_addr = ioaddr; + + outb(0x01, ioaddr + MISC_CTRL); + for (i = 0; i < 6; i++) + dev->dev_addr[i] = inb(ioaddr + i); + pr_cont(" %pM", dev->dev_addr); + + if (mem_start) + net_debug = mem_start & 7; + +#ifdef MEM_BASE + dev->mem_start = MEM_BASE; + dev->mem_end = dev->mem_start + 0x10000; +#else + { + int base; + int size; + char mem_config = inb(ioaddr + MEM_CONFIG); + if (mem_config & 0x20) { + size = 64*1024; + base = 0xf00000 + (mem_config & 0x08 ? 0x080000 + : ((mem_config & 3) << 17)); + } else { + size = ((mem_config & 3) + 1) << 14; + base = 0x0c0000 + ( (mem_config & 0x18) << 12); + } + dev->mem_start = base; + dev->mem_end = base + size; + } +#endif + + dev->if_port = (inb(ioaddr + ROM_CONFIG) & 0x80) ? 1 : 0; + dev->irq = inb(ioaddr + IRQ_CONFIG) & 0x0f; + + pr_cont(", IRQ %d, %sternal xcvr, memory %#lx-%#lx.\n", dev->irq, + dev->if_port ? "ex" : "in", dev->mem_start, dev->mem_end-1); + + if (net_debug) + pr_debug("%s", version); + + lp = netdev_priv(dev); + spin_lock_init(&lp->lock); + lp->base = ioremap(dev->mem_start, RX_BUF_END); + if (!lp->base) { + pr_err("3c507: unable to remap memory\n"); + retval = -EAGAIN; + goto out1; + } + + dev->netdev_ops = &netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + dev->ethtool_ops = &netdev_ethtool_ops; + dev->flags &= ~IFF_MULTICAST; /* Multicast doesn't work */ + return 0; +out1: + free_irq(dev->irq, dev); +out: + release_region(ioaddr, EL16_IO_EXTENT); + return retval; +} + +static int el16_open(struct net_device *dev) +{ + /* Initialize the 82586 memory and start it. */ + init_82586_mem(dev); + + netif_start_queue(dev); + return 0; +} + + +static void el16_tx_timeout (struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + int ioaddr = dev->base_addr; + void __iomem *shmem = lp->base; + + if (net_debug > 1) + pr_debug("%s: transmit timed out, %s? ", dev->name, + readw(shmem + iSCB_STATUS) & 0x8000 ? "IRQ conflict" : + "network cable problem"); + /* Try to restart the adaptor. */ + if (lp->last_restart == dev->stats.tx_packets) { + if (net_debug > 1) + pr_cont("Resetting board.\n"); + /* Completely reset the adaptor. */ + init_82586_mem (dev); + lp->tx_pkts_in_ring = 0; + } else { + /* Issue the channel attention signal and hope it "gets better". */ + if (net_debug > 1) + pr_cont("Kicking board.\n"); + writew(0xf000 | CUC_START | RX_START, shmem + iSCB_CMD); + outb (0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */ + lp->last_restart = dev->stats.tx_packets; + } + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue (dev); +} + + +static netdev_tx_t el16_send_packet (struct sk_buff *skb, + struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + int ioaddr = dev->base_addr; + unsigned long flags; + short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; + unsigned char *buf = skb->data; + + netif_stop_queue (dev); + + spin_lock_irqsave (&lp->lock, flags); + + dev->stats.tx_bytes += length; + /* Disable the 82586's input to the interrupt line. */ + outb (0x80, ioaddr + MISC_CTRL); + + hardware_send_packet (dev, buf, skb->len, length - skb->len); + + /* Enable the 82586 interrupt input. */ + outb (0x84, ioaddr + MISC_CTRL); + + spin_unlock_irqrestore (&lp->lock, flags); + + dev_kfree_skb (skb); + + /* You might need to clean up and record Tx statistics here. */ + + return NETDEV_TX_OK; +} + +/* The typical workload of the driver: + Handle the network interface interrupts. */ +static irqreturn_t el16_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct net_local *lp; + int ioaddr, status, boguscount = 0; + ushort ack_cmd = 0; + void __iomem *shmem; + + if (dev == NULL) { + pr_err("net_interrupt(): irq %d for unknown device.\n", irq); + return IRQ_NONE; + } + + ioaddr = dev->base_addr; + lp = netdev_priv(dev); + shmem = lp->base; + + spin_lock(&lp->lock); + + status = readw(shmem+iSCB_STATUS); + + if (net_debug > 4) { + pr_debug("%s: 3c507 interrupt, status %4.4x.\n", dev->name, status); + } + + /* Disable the 82586's input to the interrupt line. */ + outb(0x80, ioaddr + MISC_CTRL); + + /* Reap the Tx packet buffers. */ + while (lp->tx_pkts_in_ring) { + unsigned short tx_status = readw(shmem+lp->tx_reap); + if (!(tx_status & 0x8000)) { + if (net_debug > 5) + pr_debug("Tx command incomplete (%#x).\n", lp->tx_reap); + break; + } + /* Tx unsuccessful or some interesting status bit set. */ + if (!(tx_status & 0x2000) || (tx_status & 0x0f3f)) { + dev->stats.tx_errors++; + if (tx_status & 0x0600) dev->stats.tx_carrier_errors++; + if (tx_status & 0x0100) dev->stats.tx_fifo_errors++; + if (!(tx_status & 0x0040)) dev->stats.tx_heartbeat_errors++; + if (tx_status & 0x0020) dev->stats.tx_aborted_errors++; + dev->stats.collisions += tx_status & 0xf; + } + dev->stats.tx_packets++; + if (net_debug > 5) + pr_debug("Reaped %x, Tx status %04x.\n" , lp->tx_reap, tx_status); + lp->tx_reap += TX_BUF_SIZE; + if (lp->tx_reap > RX_BUF_START - TX_BUF_SIZE) + lp->tx_reap = TX_BUF_START; + + lp->tx_pkts_in_ring--; + /* There is always more space in the Tx ring buffer now. */ + netif_wake_queue(dev); + + if (++boguscount > 10) + break; + } + + if (status & 0x4000) { /* Packet received. */ + if (net_debug > 5) + pr_debug("Received packet, rx_head %04x.\n", lp->rx_head); + el16_rx(dev); + } + + /* Acknowledge the interrupt sources. */ + ack_cmd = status & 0xf000; + + if ((status & 0x0700) != 0x0200 && netif_running(dev)) { + if (net_debug) + pr_debug("%s: Command unit stopped, status %04x, restarting.\n", + dev->name, status); + /* If this ever occurs we should really re-write the idle loop, reset + the Tx list, and do a complete restart of the command unit. + For now we rely on the Tx timeout if the resume doesn't work. */ + ack_cmd |= CUC_RESUME; + } + + if ((status & 0x0070) != 0x0040 && netif_running(dev)) { + /* The Rx unit is not ready, it must be hung. Restart the receiver by + initializing the rx buffers, and issuing an Rx start command. */ + if (net_debug) + pr_debug("%s: Rx unit stopped, status %04x, restarting.\n", + dev->name, status); + init_rx_bufs(dev); + writew(RX_BUF_START,shmem+iSCB_RFA); + ack_cmd |= RX_START; + } + + writew(ack_cmd,shmem+iSCB_CMD); + outb(0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */ + + /* Clear the latched interrupt. */ + outb(0, ioaddr + RESET_IRQ); + + /* Enable the 82586's interrupt input. */ + outb(0x84, ioaddr + MISC_CTRL); + spin_unlock(&lp->lock); + return IRQ_HANDLED; +} + +static int el16_close(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + int ioaddr = dev->base_addr; + void __iomem *shmem = lp->base; + + netif_stop_queue(dev); + + /* Flush the Tx and disable Rx. */ + writew(RX_SUSPEND | CUC_SUSPEND,shmem+iSCB_CMD); + outb(0, ioaddr + SIGNAL_CA); + + /* Disable the 82586's input to the interrupt line. */ + outb(0x80, ioaddr + MISC_CTRL); + + /* We always physically use the IRQ line, so we don't do free_irq(). */ + + /* Update the statistics here. */ + + return 0; +} + +/* Initialize the Rx-block list. */ +static void init_rx_bufs(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + void __iomem *write_ptr; + unsigned short SCB_base = SCB_BASE; + + int cur_rxbuf = lp->rx_head = RX_BUF_START; + + /* Initialize each Rx frame + data buffer. */ + do { /* While there is room for one more. */ + + write_ptr = lp->base + cur_rxbuf; + + writew(0x0000,write_ptr); /* Status */ + writew(0x0000,write_ptr+=2); /* Command */ + writew(cur_rxbuf + RX_BUF_SIZE,write_ptr+=2); /* Link */ + writew(cur_rxbuf + 22,write_ptr+=2); /* Buffer offset */ + writew(0x0000,write_ptr+=2); /* Pad for dest addr. */ + writew(0x0000,write_ptr+=2); + writew(0x0000,write_ptr+=2); + writew(0x0000,write_ptr+=2); /* Pad for source addr. */ + writew(0x0000,write_ptr+=2); + writew(0x0000,write_ptr+=2); + writew(0x0000,write_ptr+=2); /* Pad for protocol. */ + + writew(0x0000,write_ptr+=2); /* Buffer: Actual count */ + writew(-1,write_ptr+=2); /* Buffer: Next (none). */ + writew(cur_rxbuf + 0x20 + SCB_base,write_ptr+=2);/* Buffer: Address low */ + writew(0x0000,write_ptr+=2); + /* Finally, the number of bytes in the buffer. */ + writew(0x8000 + RX_BUF_SIZE-0x20,write_ptr+=2); + + lp->rx_tail = cur_rxbuf; + cur_rxbuf += RX_BUF_SIZE; + } while (cur_rxbuf <= RX_BUF_END - RX_BUF_SIZE); + + /* Terminate the list by setting the EOL bit, and wrap the pointer to make + the list a ring. */ + write_ptr = lp->base + lp->rx_tail + 2; + writew(0xC000,write_ptr); /* Command, mark as last. */ + writew(lp->rx_head,write_ptr+2); /* Link */ +} + +static void init_82586_mem(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + short ioaddr = dev->base_addr; + void __iomem *shmem = lp->base; + + /* Enable loopback to protect the wire while starting up, + and hold the 586 in reset during the memory initialization. */ + outb(0x20, ioaddr + MISC_CTRL); + + /* Fix the ISCP address and base. */ + init_words[3] = SCB_BASE; + init_words[7] = SCB_BASE; + + /* Write the words at 0xfff6 (address-aliased to 0xfffff6). */ + memcpy_toio(lp->base + RX_BUF_END - 10, init_words, 10); + + /* Write the words at 0x0000. */ + memcpy_toio(lp->base, init_words + 5, sizeof(init_words) - 10); + + /* Fill in the station address. */ + memcpy_toio(lp->base+SA_OFFSET, dev->dev_addr, ETH_ALEN); + + /* The Tx-block list is written as needed. We just set up the values. */ + lp->tx_cmd_link = IDLELOOP + 4; + lp->tx_head = lp->tx_reap = TX_BUF_START; + + init_rx_bufs(dev); + + /* Start the 586 by releasing the reset line, but leave loopback. */ + outb(0xA0, ioaddr + MISC_CTRL); + + /* This was time consuming to track down: you need to give two channel + attention signals to reliably start up the i82586. */ + outb(0, ioaddr + SIGNAL_CA); + + { + int boguscnt = 50; + while (readw(shmem+iSCB_STATUS) == 0) + if (--boguscnt == 0) { + pr_warning("%s: i82586 initialization timed out with status %04x, cmd %04x.\n", + dev->name, readw(shmem+iSCB_STATUS), readw(shmem+iSCB_CMD)); + break; + } + /* Issue channel-attn -- the 82586 won't start. */ + outb(0, ioaddr + SIGNAL_CA); + } + + /* Disable loopback and enable interrupts. */ + outb(0x84, ioaddr + MISC_CTRL); + if (net_debug > 4) + pr_debug("%s: Initialized 82586, status %04x.\n", dev->name, + readw(shmem+iSCB_STATUS)); +} + +static void hardware_send_packet(struct net_device *dev, void *buf, short length, short pad) +{ + struct net_local *lp = netdev_priv(dev); + short ioaddr = dev->base_addr; + ushort tx_block = lp->tx_head; + void __iomem *write_ptr = lp->base + tx_block; + static char padding[ETH_ZLEN]; + + /* Set the write pointer to the Tx block, and put out the header. */ + writew(0x0000,write_ptr); /* Tx status */ + writew(CMD_INTR|CmdTx,write_ptr+=2); /* Tx command */ + writew(tx_block+16,write_ptr+=2); /* Next command is a NoOp. */ + writew(tx_block+8,write_ptr+=2); /* Data Buffer offset. */ + + /* Output the data buffer descriptor. */ + writew((pad + length) | 0x8000,write_ptr+=2); /* Byte count parameter. */ + writew(-1,write_ptr+=2); /* No next data buffer. */ + writew(tx_block+22+SCB_BASE,write_ptr+=2); /* Buffer follows the NoOp command. */ + writew(0x0000,write_ptr+=2); /* Buffer address high bits (always zero). */ + + /* Output the Loop-back NoOp command. */ + writew(0x0000,write_ptr+=2); /* Tx status */ + writew(CmdNOp,write_ptr+=2); /* Tx command */ + writew(tx_block+16,write_ptr+=2); /* Next is myself. */ + + /* Output the packet at the write pointer. */ + memcpy_toio(write_ptr+2, buf, length); + if (pad) + memcpy_toio(write_ptr+length+2, padding, pad); + + /* Set the old command link pointing to this send packet. */ + writew(tx_block,lp->base + lp->tx_cmd_link); + lp->tx_cmd_link = tx_block + 20; + + /* Set the next free tx region. */ + lp->tx_head = tx_block + TX_BUF_SIZE; + if (lp->tx_head > RX_BUF_START - TX_BUF_SIZE) + lp->tx_head = TX_BUF_START; + + if (net_debug > 4) { + pr_debug("%s: 3c507 @%x send length = %d, tx_block %3x, next %3x.\n", + dev->name, ioaddr, length, tx_block, lp->tx_head); + } + + /* Grimly block further packets if there has been insufficient reaping. */ + if (++lp->tx_pkts_in_ring < NUM_TX_BUFS) + netif_wake_queue(dev); +} + +static void el16_rx(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + void __iomem *shmem = lp->base; + ushort rx_head = lp->rx_head; + ushort rx_tail = lp->rx_tail; + ushort boguscount = 10; + short frame_status; + + while ((frame_status = readw(shmem+rx_head)) < 0) { /* Command complete */ + void __iomem *read_frame = lp->base + rx_head; + ushort rfd_cmd = readw(read_frame+2); + ushort next_rx_frame = readw(read_frame+4); + ushort data_buffer_addr = readw(read_frame+6); + void __iomem *data_frame = lp->base + data_buffer_addr; + ushort pkt_len = readw(data_frame); + + if (rfd_cmd != 0 || data_buffer_addr != rx_head + 22 || + (pkt_len & 0xC000) != 0xC000) { + pr_err("%s: Rx frame at %#x corrupted, " + "status %04x cmd %04x next %04x " + "data-buf @%04x %04x.\n", + dev->name, rx_head, frame_status, rfd_cmd, + next_rx_frame, data_buffer_addr, pkt_len); + } else if ((frame_status & 0x2000) == 0) { + /* Frame Rxed, but with error. */ + dev->stats.rx_errors++; + if (frame_status & 0x0800) dev->stats.rx_crc_errors++; + if (frame_status & 0x0400) dev->stats.rx_frame_errors++; + if (frame_status & 0x0200) dev->stats.rx_fifo_errors++; + if (frame_status & 0x0100) dev->stats.rx_over_errors++; + if (frame_status & 0x0080) dev->stats.rx_length_errors++; + } else { + /* Malloc up new buffer. */ + struct sk_buff *skb; + + pkt_len &= 0x3fff; + skb = dev_alloc_skb(pkt_len+2); + if (skb == NULL) { + pr_err("%s: Memory squeeze, dropping packet.\n", + dev->name); + dev->stats.rx_dropped++; + break; + } + + skb_reserve(skb,2); + + /* 'skb->data' points to the start of sk_buff data area. */ + memcpy_fromio(skb_put(skb,pkt_len), data_frame + 10, pkt_len); + + skb->protocol=eth_type_trans(skb,dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + } + + /* Clear the status word and set End-of-List on the rx frame. */ + writew(0,read_frame); + writew(0xC000,read_frame+2); + /* Clear the end-of-list on the prev. RFD. */ + writew(0x0000,lp->base + rx_tail + 2); + + rx_tail = rx_head; + rx_head = next_rx_frame; + if (--boguscount == 0) + break; + } + + lp->rx_head = rx_head; + lp->rx_tail = rx_tail; +} + +static void netdev_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr); +} + +static u32 netdev_get_msglevel(struct net_device *dev) +{ + return debug; +} + +static void netdev_set_msglevel(struct net_device *dev, u32 level) +{ + debug = level; +} + +static const struct ethtool_ops netdev_ethtool_ops = { + .get_drvinfo = netdev_get_drvinfo, + .get_msglevel = netdev_get_msglevel, + .set_msglevel = netdev_set_msglevel, +}; + +#ifdef MODULE +static struct net_device *dev_3c507; +module_param(io, int, 0); +module_param(irq, int, 0); +MODULE_PARM_DESC(io, "EtherLink16 I/O base address"); +MODULE_PARM_DESC(irq, "(ignored)"); + +int __init init_module(void) +{ + if (io == 0) + pr_notice("3c507: You should not use auto-probing with insmod!\n"); + dev_3c507 = el16_probe(-1); + return IS_ERR(dev_3c507) ? PTR_ERR(dev_3c507) : 0; +} + +void __exit +cleanup_module(void) +{ + struct net_device *dev = dev_3c507; + unregister_netdev(dev); + free_irq(dev->irq, dev); + iounmap(((struct net_local *)netdev_priv(dev))->base); + release_region(dev->base_addr, EL16_IO_EXTENT); + free_netdev(dev); +} +#endif /* MODULE */ +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/i825xx/3c523.c b/drivers/net/ethernet/i825xx/3c523.c new file mode 100644 index 0000000..bc0d1a1 --- /dev/null +++ b/drivers/net/ethernet/i825xx/3c523.c @@ -0,0 +1,1312 @@ +/* + net-3-driver for the 3c523 Etherlink/MC card (i82586 Ethernet chip) + + + This is an extension to the Linux operating system, and is covered by the + same GNU General Public License that covers that work. + + Copyright 1995, 1996 by Chris Beauregard (cpbeaure@undergrad.math.uwaterloo.ca) + + This is basically Michael Hipp's ni52 driver, with a new probing + algorithm and some minor changes to the 82586 CA and reset routines. + Thanks a lot Michael for a really clean i82586 implementation! Unless + otherwise documented in ni52.c, any bugs are mine. + + Contrary to the Ethernet-HOWTO, this isn't based on the 3c507 driver in + any way. The ni52 is a lot easier to modify. + + sources: + ni52.c + + Crynwr packet driver collection was a great reference for my first + attempt at this sucker. The 3c507 driver also helped, until I noticed + that ni52.c was a lot nicer. + + EtherLink/MC: Micro Channel Ethernet Adapter Technical Reference + Manual, courtesy of 3Com CardFacts, documents the 3c523-specific + stuff. Information on CardFacts is found in the Ethernet HOWTO. + Also see + + Microprocessor Communications Support Chips, T.J. Byers, ISBN + 0-444-01224-9, has a section on the i82586. It tells you just enough + to know that you really don't want to learn how to program the chip. + + The original device probe code was stolen from ps2esdi.c + + Known Problems: + Since most of the code was stolen from ni52.c, you'll run across the + same bugs in the 0.62 version of ni52.c, plus maybe a few because of + the 3c523 idiosynchacies. The 3c523 has 16K of RAM though, so there + shouldn't be the overrun problem that the 8K ni52 has. + + This driver is for a 16K adapter. It should work fine on the 64K + adapters, but it will only use one of the 4 banks of RAM. Modifying + this for the 64K version would require a lot of heinous bank + switching, which I'm sure not interested in doing. If you try to + implement a bank switching version, you'll basically have to remember + what bank is enabled and do a switch every time you access a memory + location that's not current. You'll also have to remap pointers on + the driver side, because it only knows about 16K of the memory. + Anyone desperate or masochistic enough to try? + + It seems to be stable now when multiple transmit buffers are used. I + can't see any performance difference, but then I'm working on a 386SX. + + Multicast doesn't work. It doesn't even pretend to work. Don't use + it. Don't compile your kernel with multicast support. I don't know + why. + + Features: + This driver is useable as a loadable module. If you try to specify an + IRQ or a IO address (via insmod 3c523.o irq=xx io=0xyyy), it will + search the MCA slots until it finds a 3c523 with the specified + parameters. + + This driver does support multiple ethernet cards when used as a module + (up to MAX_3C523_CARDS, the default being 4) + + This has been tested with both BNC and TP versions, internal and + external transceivers. Haven't tested with the 64K version (that I + know of). + + History: + Jan 1st, 1996 + first public release + Feb 4th, 1996 + update to 1.3.59, incorporated multicast diffs from ni52.c + Feb 15th, 1996 + added shared irq support + Apr 1999 + added support for multiple cards when used as a module + added option to disable multicast as is causes problems + Ganesh Sittampalam + Stuart Adamson + Nov 2001 + added support for ethtool (jgarzik) + + $Header: /fsys2/home/chrisb/linux-1.3.59-MCA/drivers/net/RCS/3c523.c,v 1.1 1996/02/05 01:53:46 chrisb Exp chrisb $ + */ + +#define DRV_NAME "3c523" +#define DRV_VERSION "17-Nov-2001" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "3c523.h" + +/*************************************************************************/ +#define DEBUG /* debug on */ +#define SYSBUSVAL 0 /* 1 = 8 Bit, 0 = 16 bit - 3c523 only does 16 bit */ +#undef ELMC_MULTICAST /* Disable multicast support as it is somewhat seriously broken at the moment */ + +#define make32(ptr16) (p->memtop + (short) (ptr16) ) +#define make24(ptr32) ((char *) (ptr32) - p->base) +#define make16(ptr32) ((unsigned short) ((unsigned long) (ptr32) - (unsigned long) p->memtop )) + +/*************************************************************************/ +/* + Tables to which we can map values in the configuration registers. + */ +static int irq_table[] __initdata = { + 12, 7, 3, 9 +}; + +static int csr_table[] __initdata = { + 0x300, 0x1300, 0x2300, 0x3300 +}; + +static int shm_table[] __initdata = { + 0x0c0000, 0x0c8000, 0x0d0000, 0x0d8000 +}; + +/******************* how to calculate the buffers ***************************** + + + * IMPORTANT NOTE: if you configure only one NUM_XMIT_BUFFS, the driver works + * --------------- in a different (more stable?) mode. Only in this mode it's + * possible to configure the driver with 'NO_NOPCOMMANDS' + +sizeof(scp)=12; sizeof(scb)=16; sizeof(iscp)=8; +sizeof(scp)+sizeof(iscp)+sizeof(scb) = 36 = INIT +sizeof(rfd) = 24; sizeof(rbd) = 12; +sizeof(tbd) = 8; sizeof(transmit_cmd) = 16; +sizeof(nop_cmd) = 8; + + * if you don't know the driver, better do not change this values: */ + +#define RECV_BUFF_SIZE 1524 /* slightly oversized */ +#define XMIT_BUFF_SIZE 1524 /* slightly oversized */ +#define NUM_XMIT_BUFFS 1 /* config for both, 8K and 16K shmem */ +#define NUM_RECV_BUFFS_8 4 /* config for 8K shared mem */ +#define NUM_RECV_BUFFS_16 9 /* config for 16K shared mem */ + +#if (NUM_XMIT_BUFFS == 1) +#define NO_NOPCOMMANDS /* only possible with NUM_XMIT_BUFFS=1 */ +#endif + +/**************************************************************************/ + +#define DELAY(x) { mdelay(32 * x); } + +/* a much shorter delay: */ +#define DELAY_16(); { udelay(16) ; } + +/* wait for command with timeout: */ +#define WAIT_4_SCB_CMD() { int i; \ + for(i=0;i<1024;i++) { \ + if(!p->scb->cmd) break; \ + DELAY_16(); \ + if(i == 1023) { \ + pr_warning("%s:%d: scb_cmd timed out .. resetting i82586\n",\ + dev->name,__LINE__); \ + elmc_id_reset586(); } } } + +static irqreturn_t elmc_interrupt(int irq, void *dev_id); +static int elmc_open(struct net_device *dev); +static int elmc_close(struct net_device *dev); +static netdev_tx_t elmc_send_packet(struct sk_buff *, struct net_device *); +static struct net_device_stats *elmc_get_stats(struct net_device *dev); +static void elmc_timeout(struct net_device *dev); +#ifdef ELMC_MULTICAST +static void set_multicast_list(struct net_device *dev); +#endif +static const struct ethtool_ops netdev_ethtool_ops; + +/* helper-functions */ +static int init586(struct net_device *dev); +static int check586(struct net_device *dev, unsigned long where, unsigned size); +static void alloc586(struct net_device *dev); +static void startrecv586(struct net_device *dev); +static void *alloc_rfa(struct net_device *dev, void *ptr); +static void elmc_rcv_int(struct net_device *dev); +static void elmc_xmt_int(struct net_device *dev); +static void elmc_rnr_int(struct net_device *dev); + +struct priv { + unsigned long base; + char *memtop; + unsigned long mapped_start; /* Start of ioremap */ + volatile struct rfd_struct *rfd_last, *rfd_top, *rfd_first; + volatile struct scp_struct *scp; /* volatile is important */ + volatile struct iscp_struct *iscp; /* volatile is important */ + volatile struct scb_struct *scb; /* volatile is important */ + volatile struct tbd_struct *xmit_buffs[NUM_XMIT_BUFFS]; +#if (NUM_XMIT_BUFFS == 1) + volatile struct transmit_cmd_struct *xmit_cmds[2]; + volatile struct nop_cmd_struct *nop_cmds[2]; +#else + volatile struct transmit_cmd_struct *xmit_cmds[NUM_XMIT_BUFFS]; + volatile struct nop_cmd_struct *nop_cmds[NUM_XMIT_BUFFS]; +#endif + volatile int nop_point, num_recv_buffs; + volatile char *xmit_cbuffs[NUM_XMIT_BUFFS]; + volatile int xmit_count, xmit_last; + volatile int slot; +}; + +#define elmc_attn586() {elmc_do_attn586(dev->base_addr,ELMC_CTRL_INTE);} +#define elmc_reset586() {elmc_do_reset586(dev->base_addr,ELMC_CTRL_INTE);} + +/* with interrupts disabled - this will clear the interrupt bit in the + 3c523 control register, and won't put it back. This effectively + disables interrupts on the card. */ +#define elmc_id_attn586() {elmc_do_attn586(dev->base_addr,0);} +#define elmc_id_reset586() {elmc_do_reset586(dev->base_addr,0);} + +/*************************************************************************/ +/* + Do a Channel Attention on the 3c523. This is extremely board dependent. + */ +static void elmc_do_attn586(int ioaddr, int ints) +{ + /* the 3c523 requires a minimum of 500 ns. The delays here might be + a little too large, and hence they may cut the performance of the + card slightly. If someone who knows a little more about Linux + timing would care to play with these, I'd appreciate it. */ + + /* this bit masking stuff is crap. I'd rather have separate + registers with strobe triggers for each of these functions. + Ya take what ya got. */ + + outb(ELMC_CTRL_RST | 0x3 | ELMC_CTRL_CA | ints, ioaddr + ELMC_CTRL); + DELAY_16(); /* > 500 ns */ + outb(ELMC_CTRL_RST | 0x3 | ints, ioaddr + ELMC_CTRL); +} + +/*************************************************************************/ +/* + Reset the 82586 on the 3c523. Also very board dependent. + */ +static void elmc_do_reset586(int ioaddr, int ints) +{ + /* toggle the RST bit low then high */ + outb(0x3 | ELMC_CTRL_LBK, ioaddr + ELMC_CTRL); + DELAY_16(); /* > 500 ns */ + outb(ELMC_CTRL_RST | ELMC_CTRL_LBK | 0x3, ioaddr + ELMC_CTRL); + + elmc_do_attn586(ioaddr, ints); +} + +/********************************************** + * close device + */ + +static int elmc_close(struct net_device *dev) +{ + netif_stop_queue(dev); + elmc_id_reset586(); /* the hard way to stop the receiver */ + free_irq(dev->irq, dev); + return 0; +} + +/********************************************** + * open device + */ + +static int elmc_open(struct net_device *dev) +{ + int ret; + + elmc_id_attn586(); /* disable interrupts */ + + ret = request_irq(dev->irq, elmc_interrupt, IRQF_SHARED, + dev->name, dev); + if (ret) { + pr_err("%s: couldn't get irq %d\n", dev->name, dev->irq); + elmc_id_reset586(); + return ret; + } + alloc586(dev); + init586(dev); + startrecv586(dev); + netif_start_queue(dev); + return 0; /* most done by init */ +} + +/********************************************** + * Check to see if there's an 82586 out there. + */ + +static int __init check586(struct net_device *dev, unsigned long where, unsigned size) +{ + struct priv *p = netdev_priv(dev); + char *iscp_addrs[2]; + int i = 0; + + p->base = (unsigned long) isa_bus_to_virt((unsigned long)where) + size - 0x01000000; + p->memtop = isa_bus_to_virt((unsigned long)where) + size; + p->scp = (struct scp_struct *)(p->base + SCP_DEFAULT_ADDRESS); + memset((char *) p->scp, 0, sizeof(struct scp_struct)); + p->scp->sysbus = SYSBUSVAL; /* 1 = 8Bit-Bus, 0 = 16 Bit */ + + iscp_addrs[0] = isa_bus_to_virt((unsigned long)where); + iscp_addrs[1] = (char *) p->scp - sizeof(struct iscp_struct); + + for (i = 0; i < 2; i++) { + p->iscp = (struct iscp_struct *) iscp_addrs[i]; + memset((char *) p->iscp, 0, sizeof(struct iscp_struct)); + + p->scp->iscp = make24(p->iscp); + p->iscp->busy = 1; + + elmc_id_reset586(); + + /* reset586 does an implicit CA */ + + /* apparently, you sometimes have to kick the 82586 twice... */ + elmc_id_attn586(); + DELAY(1); + + if (p->iscp->busy) { /* i82586 clears 'busy' after successful init */ + return 0; + } + } + return 1; +} + +/****************************************************************** + * set iscp at the right place, called by elmc_probe and open586. + */ + +static void alloc586(struct net_device *dev) +{ + struct priv *p = netdev_priv(dev); + + elmc_id_reset586(); + DELAY(2); + + p->scp = (struct scp_struct *) (p->base + SCP_DEFAULT_ADDRESS); + p->scb = (struct scb_struct *) isa_bus_to_virt(dev->mem_start); + p->iscp = (struct iscp_struct *) ((char *) p->scp - sizeof(struct iscp_struct)); + + memset((char *) p->iscp, 0, sizeof(struct iscp_struct)); + memset((char *) p->scp, 0, sizeof(struct scp_struct)); + + p->scp->iscp = make24(p->iscp); + p->scp->sysbus = SYSBUSVAL; + p->iscp->scb_offset = make16(p->scb); + + p->iscp->busy = 1; + elmc_id_reset586(); + elmc_id_attn586(); + + DELAY(2); + + if (p->iscp->busy) + pr_err("%s: Init-Problems (alloc).\n", dev->name); + + memset((char *) p->scb, 0, sizeof(struct scb_struct)); +} + +/*****************************************************************/ + +static int elmc_getinfo(char *buf, int slot, void *d) +{ + int len = 0; + struct net_device *dev = d; + + if (dev == NULL) + return len; + + len += sprintf(buf + len, "Revision: 0x%x\n", + inb(dev->base_addr + ELMC_REVISION) & 0xf); + len += sprintf(buf + len, "IRQ: %d\n", dev->irq); + len += sprintf(buf + len, "IO Address: %#lx-%#lx\n", dev->base_addr, + dev->base_addr + ELMC_IO_EXTENT); + len += sprintf(buf + len, "Memory: %#lx-%#lx\n", dev->mem_start, + dev->mem_end - 1); + len += sprintf(buf + len, "Transceiver: %s\n", dev->if_port ? + "External" : "Internal"); + len += sprintf(buf + len, "Device: %s\n", dev->name); + len += sprintf(buf + len, "Hardware Address: %pM\n", + dev->dev_addr); + + return len; +} /* elmc_getinfo() */ + +static const struct net_device_ops netdev_ops = { + .ndo_open = elmc_open, + .ndo_stop = elmc_close, + .ndo_get_stats = elmc_get_stats, + .ndo_start_xmit = elmc_send_packet, + .ndo_tx_timeout = elmc_timeout, +#ifdef ELMC_MULTICAST + .ndo_set_multicast_list = set_multicast_list, +#endif + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +/*****************************************************************/ + +static int __init do_elmc_probe(struct net_device *dev) +{ + static int slot; + int base_addr = dev->base_addr; + int irq = dev->irq; + u_char status = 0; + u_char revision = 0; + int i = 0; + unsigned int size = 0; + int retval; + struct priv *pr = netdev_priv(dev); + + if (MCA_bus == 0) { + return -ENODEV; + } + /* search through the slots for the 3c523. */ + slot = mca_find_adapter(ELMC_MCA_ID, 0); + while (slot != -1) { + status = mca_read_stored_pos(slot, 2); + + dev->irq=irq_table[(status & ELMC_STATUS_IRQ_SELECT) >> 6]; + dev->base_addr=csr_table[(status & ELMC_STATUS_CSR_SELECT) >> 1]; + + /* + If we're trying to match a specified irq or IO address, + we'll reject a match unless it's what we're looking for. + Also reject it if the card is already in use. + */ + + if ((irq && irq != dev->irq) || + (base_addr && base_addr != dev->base_addr)) { + slot = mca_find_adapter(ELMC_MCA_ID, slot + 1); + continue; + } + if (!request_region(dev->base_addr, ELMC_IO_EXTENT, DRV_NAME)) { + slot = mca_find_adapter(ELMC_MCA_ID, slot + 1); + continue; + } + + /* found what we're looking for... */ + break; + } + + /* we didn't find any 3c523 in the slots we checked for */ + if (slot == MCA_NOTFOUND) + return (base_addr || irq) ? -ENXIO : -ENODEV; + + mca_set_adapter_name(slot, "3Com 3c523 Etherlink/MC"); + mca_set_adapter_procfn(slot, (MCA_ProcFn) elmc_getinfo, dev); + + /* if we get this far, adapter has been found - carry on */ + pr_info("%s: 3c523 adapter found in slot %d\n", dev->name, slot + 1); + + /* Now we extract configuration info from the card. + The 3c523 provides information in two of the POS registers, but + the second one is only needed if we want to tell the card what IRQ + to use. I suspect that whoever sets the thing up initially would + prefer we don't screw with those things. + + Note that we read the status info when we found the card... + + See 3c523.h for more details. + */ + + /* revision is stored in the first 4 bits of the revision register */ + revision = inb(dev->base_addr + ELMC_REVISION) & 0xf; + + /* according to docs, we read the interrupt and write it back to + the IRQ select register, since the POST might not configure the IRQ + properly. */ + switch (dev->irq) { + case 3: + mca_write_pos(slot, 3, 0x04); + break; + case 7: + mca_write_pos(slot, 3, 0x02); + break; + case 9: + mca_write_pos(slot, 3, 0x08); + break; + case 12: + mca_write_pos(slot, 3, 0x01); + break; + } + + pr->slot = slot; + + pr_info("%s: 3Com 3c523 Rev 0x%x at %#lx\n", dev->name, (int) revision, + dev->base_addr); + + /* Determine if we're using the on-board transceiver (i.e. coax) or + an external one. The information is pretty much useless, but I + guess it's worth brownie points. */ + dev->if_port = (status & ELMC_STATUS_DISABLE_THIN); + + /* The 3c523 has a 24K chunk of memory. The first 16K is the + shared memory, while the last 8K is for the EtherStart BIOS ROM. + Which we don't care much about here. We'll just tell Linux that + we're using 16K. MCA won't permit address space conflicts caused + by not mapping the other 8K. */ + dev->mem_start = shm_table[(status & ELMC_STATUS_MEMORY_SELECT) >> 3]; + + /* We're using MCA, so it's a given that the information about memory + size is correct. The Crynwr drivers do something like this. */ + + elmc_id_reset586(); /* seems like a good idea before checking it... */ + + size = 0x4000; /* check for 16K mem */ + if (!check586(dev, dev->mem_start, size)) { + pr_err("%s: memprobe, Can't find memory at 0x%lx!\n", dev->name, + dev->mem_start); + retval = -ENODEV; + goto err_out; + } + dev->mem_end = dev->mem_start + size; /* set mem_end showed by 'ifconfig' */ + + pr->memtop = isa_bus_to_virt(dev->mem_start) + size; + pr->base = (unsigned long) isa_bus_to_virt(dev->mem_start) + size - 0x01000000; + alloc586(dev); + + elmc_id_reset586(); /* make sure it doesn't generate spurious ints */ + + /* set number of receive-buffs according to memsize */ + pr->num_recv_buffs = NUM_RECV_BUFFS_16; + + /* dump all the assorted information */ + pr_info("%s: IRQ %d, %sternal xcvr, memory %#lx-%#lx.\n", dev->name, + dev->irq, dev->if_port ? "ex" : "in", + dev->mem_start, dev->mem_end - 1); + + /* The hardware address for the 3c523 is stored in the first six + bytes of the IO address. */ + for (i = 0; i < 6; i++) + dev->dev_addr[i] = inb(dev->base_addr + i); + + pr_info("%s: hardware address %pM\n", + dev->name, dev->dev_addr); + + dev->netdev_ops = &netdev_ops; + dev->watchdog_timeo = HZ; + dev->ethtool_ops = &netdev_ethtool_ops; + + /* note that we haven't actually requested the IRQ from the kernel. + That gets done in elmc_open(). I'm not sure that's such a good idea, + but it works, so I'll go with it. */ + +#ifndef ELMC_MULTICAST + dev->flags&=~IFF_MULTICAST; /* Multicast doesn't work */ +#endif + + retval = register_netdev(dev); + if (retval) + goto err_out; + + return 0; +err_out: + mca_set_adapter_procfn(slot, NULL, NULL); + release_region(dev->base_addr, ELMC_IO_EXTENT); + return retval; +} + +#ifdef MODULE +static void cleanup_card(struct net_device *dev) +{ + mca_set_adapter_procfn(((struct priv *)netdev_priv(dev))->slot, + NULL, NULL); + release_region(dev->base_addr, ELMC_IO_EXTENT); +} +#else +struct net_device * __init elmc_probe(int unit) +{ + struct net_device *dev = alloc_etherdev(sizeof(struct priv)); + int err; + + if (!dev) + return ERR_PTR(-ENOMEM); + + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + + err = do_elmc_probe(dev); + if (err) + goto out; + return dev; +out: + free_netdev(dev); + return ERR_PTR(err); +} +#endif + +/********************************************** + * init the chip (elmc-interrupt should be disabled?!) + * needs a correct 'allocated' memory + */ + +static int init586(struct net_device *dev) +{ + void *ptr; + unsigned long s; + int i, result = 0; + struct priv *p = netdev_priv(dev); + volatile struct configure_cmd_struct *cfg_cmd; + volatile struct iasetup_cmd_struct *ias_cmd; + volatile struct tdr_cmd_struct *tdr_cmd; + volatile struct mcsetup_cmd_struct *mc_cmd; + struct netdev_hw_addr *ha; + int num_addrs = netdev_mc_count(dev); + + ptr = (void *) ((char *) p->scb + sizeof(struct scb_struct)); + + cfg_cmd = (struct configure_cmd_struct *) ptr; /* configure-command */ + cfg_cmd->cmd_status = 0; + cfg_cmd->cmd_cmd = CMD_CONFIGURE | CMD_LAST; + cfg_cmd->cmd_link = 0xffff; + + cfg_cmd->byte_cnt = 0x0a; /* number of cfg bytes */ + cfg_cmd->fifo = 0x08; /* fifo-limit (8=tx:32/rx:64) */ + cfg_cmd->sav_bf = 0x40; /* hold or discard bad recv frames (bit 7) */ + cfg_cmd->adr_len = 0x2e; /* addr_len |!src_insert |pre-len |loopback */ + cfg_cmd->priority = 0x00; + cfg_cmd->ifs = 0x60; + cfg_cmd->time_low = 0x00; + cfg_cmd->time_high = 0xf2; + cfg_cmd->promisc = 0; + if (dev->flags & (IFF_ALLMULTI | IFF_PROMISC)) + cfg_cmd->promisc = 1; + cfg_cmd->carr_coll = 0x00; + + p->scb->cbl_offset = make16(cfg_cmd); + + p->scb->cmd = CUC_START; /* cmd.-unit start */ + elmc_id_attn586(); + + s = jiffies; /* warning: only active with interrupts on !! */ + while (!(cfg_cmd->cmd_status & STAT_COMPL)) { + if (time_after(jiffies, s + 30*HZ/100)) + break; + } + + if ((cfg_cmd->cmd_status & (STAT_OK | STAT_COMPL)) != (STAT_COMPL | STAT_OK)) { + pr_warning("%s (elmc): configure command failed: %x\n", dev->name, cfg_cmd->cmd_status); + return 1; + } + /* + * individual address setup + */ + ias_cmd = (struct iasetup_cmd_struct *) ptr; + + ias_cmd->cmd_status = 0; + ias_cmd->cmd_cmd = CMD_IASETUP | CMD_LAST; + ias_cmd->cmd_link = 0xffff; + + memcpy((char *) &ias_cmd->iaddr, (char *) dev->dev_addr, ETH_ALEN); + + p->scb->cbl_offset = make16(ias_cmd); + + p->scb->cmd = CUC_START; /* cmd.-unit start */ + elmc_id_attn586(); + + s = jiffies; + while (!(ias_cmd->cmd_status & STAT_COMPL)) { + if (time_after(jiffies, s + 30*HZ/100)) + break; + } + + if ((ias_cmd->cmd_status & (STAT_OK | STAT_COMPL)) != (STAT_OK | STAT_COMPL)) { + pr_warning("%s (elmc): individual address setup command failed: %04x\n", + dev->name, ias_cmd->cmd_status); + return 1; + } + /* + * TDR, wire check .. e.g. no resistor e.t.c + */ + tdr_cmd = (struct tdr_cmd_struct *) ptr; + + tdr_cmd->cmd_status = 0; + tdr_cmd->cmd_cmd = CMD_TDR | CMD_LAST; + tdr_cmd->cmd_link = 0xffff; + tdr_cmd->status = 0; + + p->scb->cbl_offset = make16(tdr_cmd); + + p->scb->cmd = CUC_START; /* cmd.-unit start */ + elmc_attn586(); + + s = jiffies; + while (!(tdr_cmd->cmd_status & STAT_COMPL)) { + if (time_after(jiffies, s + 30*HZ/100)) { + pr_warning("%s: %d Problems while running the TDR.\n", dev->name, __LINE__); + result = 1; + break; + } + } + + if (!result) { + DELAY(2); /* wait for result */ + result = tdr_cmd->status; + + p->scb->cmd = p->scb->status & STAT_MASK; + elmc_id_attn586(); /* ack the interrupts */ + + if (result & TDR_LNK_OK) { + /* empty */ + } else if (result & TDR_XCVR_PRB) { + pr_warning("%s: TDR: Transceiver problem!\n", dev->name); + } else if (result & TDR_ET_OPN) { + pr_warning("%s: TDR: No correct termination %d clocks away.\n", dev->name, result & TDR_TIMEMASK); + } else if (result & TDR_ET_SRT) { + if (result & TDR_TIMEMASK) /* time == 0 -> strange :-) */ + pr_warning("%s: TDR: Detected a short circuit %d clocks away.\n", dev->name, result & TDR_TIMEMASK); + } else { + pr_warning("%s: TDR: Unknown status %04x\n", dev->name, result); + } + } + /* + * ack interrupts + */ + p->scb->cmd = p->scb->status & STAT_MASK; + elmc_id_attn586(); + + /* + * alloc nop/xmit-cmds + */ +#if (NUM_XMIT_BUFFS == 1) + for (i = 0; i < 2; i++) { + p->nop_cmds[i] = (struct nop_cmd_struct *) ptr; + p->nop_cmds[i]->cmd_cmd = CMD_NOP; + p->nop_cmds[i]->cmd_status = 0; + p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i])); + ptr = (char *) ptr + sizeof(struct nop_cmd_struct); + } + p->xmit_cmds[0] = (struct transmit_cmd_struct *) ptr; /* transmit cmd/buff 0 */ + ptr = (char *) ptr + sizeof(struct transmit_cmd_struct); +#else + for (i = 0; i < NUM_XMIT_BUFFS; i++) { + p->nop_cmds[i] = (struct nop_cmd_struct *) ptr; + p->nop_cmds[i]->cmd_cmd = CMD_NOP; + p->nop_cmds[i]->cmd_status = 0; + p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i])); + ptr = (char *) ptr + sizeof(struct nop_cmd_struct); + p->xmit_cmds[i] = (struct transmit_cmd_struct *) ptr; /*transmit cmd/buff 0 */ + ptr = (char *) ptr + sizeof(struct transmit_cmd_struct); + } +#endif + + ptr = alloc_rfa(dev, (void *) ptr); /* init receive-frame-area */ + + /* + * Multicast setup + */ + + if (num_addrs) { + /* I don't understand this: do we really need memory after the init? */ + int len = ((char *) p->iscp - (char *) ptr - 8) / 6; + if (len <= 0) { + pr_err("%s: Ooooops, no memory for MC-Setup!\n", dev->name); + } else { + if (len < num_addrs) { + num_addrs = len; + pr_warning("%s: Sorry, can only apply %d MC-Address(es).\n", + dev->name, num_addrs); + } + mc_cmd = (struct mcsetup_cmd_struct *) ptr; + mc_cmd->cmd_status = 0; + mc_cmd->cmd_cmd = CMD_MCSETUP | CMD_LAST; + mc_cmd->cmd_link = 0xffff; + mc_cmd->mc_cnt = num_addrs * 6; + i = 0; + netdev_for_each_mc_addr(ha, dev) + memcpy((char *) mc_cmd->mc_list[i++], + ha->addr, 6); + p->scb->cbl_offset = make16(mc_cmd); + p->scb->cmd = CUC_START; + elmc_id_attn586(); + s = jiffies; + while (!(mc_cmd->cmd_status & STAT_COMPL)) { + if (time_after(jiffies, s + 30*HZ/100)) + break; + } + if (!(mc_cmd->cmd_status & STAT_COMPL)) { + pr_warning("%s: Can't apply multicast-address-list.\n", dev->name); + } + } + } + /* + * alloc xmit-buffs / init xmit_cmds + */ + for (i = 0; i < NUM_XMIT_BUFFS; i++) { + p->xmit_cbuffs[i] = (char *) ptr; /* char-buffs */ + ptr = (char *) ptr + XMIT_BUFF_SIZE; + p->xmit_buffs[i] = (struct tbd_struct *) ptr; /* TBD */ + ptr = (char *) ptr + sizeof(struct tbd_struct); + if ((void *) ptr > (void *) p->iscp) { + pr_err("%s: not enough shared-mem for your configuration!\n", dev->name); + return 1; + } + memset((char *) (p->xmit_cmds[i]), 0, sizeof(struct transmit_cmd_struct)); + memset((char *) (p->xmit_buffs[i]), 0, sizeof(struct tbd_struct)); + p->xmit_cmds[i]->cmd_status = STAT_COMPL; + p->xmit_cmds[i]->cmd_cmd = CMD_XMIT | CMD_INT; + p->xmit_cmds[i]->tbd_offset = make16((p->xmit_buffs[i])); + p->xmit_buffs[i]->next = 0xffff; + p->xmit_buffs[i]->buffer = make24((p->xmit_cbuffs[i])); + } + + p->xmit_count = 0; + p->xmit_last = 0; +#ifndef NO_NOPCOMMANDS + p->nop_point = 0; +#endif + + /* + * 'start transmitter' (nop-loop) + */ +#ifndef NO_NOPCOMMANDS + p->scb->cbl_offset = make16(p->nop_cmds[0]); + p->scb->cmd = CUC_START; + elmc_id_attn586(); + WAIT_4_SCB_CMD(); +#else + p->xmit_cmds[0]->cmd_link = 0xffff; + p->xmit_cmds[0]->cmd_cmd = CMD_XMIT | CMD_LAST | CMD_INT; +#endif + + return 0; +} + +/****************************************************** + * This is a helper routine for elmc_rnr_int() and init586(). + * It sets up the Receive Frame Area (RFA). + */ + +static void *alloc_rfa(struct net_device *dev, void *ptr) +{ + volatile struct rfd_struct *rfd = (struct rfd_struct *) ptr; + volatile struct rbd_struct *rbd; + int i; + struct priv *p = netdev_priv(dev); + + memset((char *) rfd, 0, sizeof(struct rfd_struct) * p->num_recv_buffs); + p->rfd_first = rfd; + + for (i = 0; i < p->num_recv_buffs; i++) { + rfd[i].next = make16(rfd + (i + 1) % p->num_recv_buffs); + } + rfd[p->num_recv_buffs - 1].last = RFD_SUSP; /* RU suspend */ + + ptr = (void *) (rfd + p->num_recv_buffs); + + rbd = (struct rbd_struct *) ptr; + ptr = (void *) (rbd + p->num_recv_buffs); + + /* clr descriptors */ + memset((char *) rbd, 0, sizeof(struct rbd_struct) * p->num_recv_buffs); + + for (i = 0; i < p->num_recv_buffs; i++) { + rbd[i].next = make16((rbd + (i + 1) % p->num_recv_buffs)); + rbd[i].size = RECV_BUFF_SIZE; + rbd[i].buffer = make24(ptr); + ptr = (char *) ptr + RECV_BUFF_SIZE; + } + + p->rfd_top = p->rfd_first; + p->rfd_last = p->rfd_first + p->num_recv_buffs - 1; + + p->scb->rfa_offset = make16(p->rfd_first); + p->rfd_first->rbd_offset = make16(rbd); + + return ptr; +} + + +/************************************************** + * Interrupt Handler ... + */ + +static irqreturn_t +elmc_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + unsigned short stat; + struct priv *p; + + if (!netif_running(dev)) { + /* The 3c523 has this habit of generating interrupts during the + reset. I'm not sure if the ni52 has this same problem, but it's + really annoying if we haven't finished initializing it. I was + hoping all the elmc_id_* commands would disable this, but I + might have missed a few. */ + + elmc_id_attn586(); /* ack inter. and disable any more */ + return IRQ_HANDLED; + } else if (!(ELMC_CTRL_INT & inb(dev->base_addr + ELMC_CTRL))) { + /* wasn't this device */ + return IRQ_NONE; + } + /* reading ELMC_CTRL also clears the INT bit. */ + + p = netdev_priv(dev); + + while ((stat = p->scb->status & STAT_MASK)) + { + p->scb->cmd = stat; + elmc_attn586(); /* ack inter. */ + + if (stat & STAT_CX) { + /* command with I-bit set complete */ + elmc_xmt_int(dev); + } + if (stat & STAT_FR) { + /* received a frame */ + elmc_rcv_int(dev); + } +#ifndef NO_NOPCOMMANDS + if (stat & STAT_CNA) { + /* CU went 'not ready' */ + if (netif_running(dev)) { + pr_warning("%s: oops! CU has left active state. stat: %04x/%04x.\n", + dev->name, (int) stat, (int) p->scb->status); + } + } +#endif + + if (stat & STAT_RNR) { + /* RU went 'not ready' */ + + if (p->scb->status & RU_SUSPEND) { + /* special case: RU_SUSPEND */ + + WAIT_4_SCB_CMD(); + p->scb->cmd = RUC_RESUME; + elmc_attn586(); + } else { + pr_warning("%s: Receiver-Unit went 'NOT READY': %04x/%04x.\n", + dev->name, (int) stat, (int) p->scb->status); + elmc_rnr_int(dev); + } + } + WAIT_4_SCB_CMD(); /* wait for ack. (elmc_xmt_int can be faster than ack!!) */ + if (p->scb->cmd) { /* timed out? */ + break; + } + } + return IRQ_HANDLED; +} + +/******************************************************* + * receive-interrupt + */ + +static void elmc_rcv_int(struct net_device *dev) +{ + int status; + unsigned short totlen; + struct sk_buff *skb; + struct rbd_struct *rbd; + struct priv *p = netdev_priv(dev); + + for (; (status = p->rfd_top->status) & STAT_COMPL;) { + rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset); + + if (status & STAT_OK) { /* frame received without error? */ + if ((totlen = rbd->status) & RBD_LAST) { /* the first and the last buffer? */ + totlen &= RBD_MASK; /* length of this frame */ + rbd->status = 0; + skb = (struct sk_buff *) dev_alloc_skb(totlen + 2); + if (skb != NULL) { + skb_reserve(skb, 2); /* 16 byte alignment */ + skb_put(skb,totlen); + skb_copy_to_linear_data(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += totlen; + } else { + dev->stats.rx_dropped++; + } + } else { + pr_warning("%s: received oversized frame.\n", dev->name); + dev->stats.rx_dropped++; + } + } else { /* frame !(ok), only with 'save-bad-frames' */ + pr_warning("%s: oops! rfd-error-status: %04x\n", dev->name, status); + dev->stats.rx_errors++; + } + p->rfd_top->status = 0; + p->rfd_top->last = RFD_SUSP; + p->rfd_last->last = 0; /* delete RU_SUSP */ + p->rfd_last = p->rfd_top; + p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */ + } +} + +/********************************************************** + * handle 'Receiver went not ready'. + */ + +static void elmc_rnr_int(struct net_device *dev) +{ + struct priv *p = netdev_priv(dev); + + dev->stats.rx_errors++; + + WAIT_4_SCB_CMD(); /* wait for the last cmd */ + p->scb->cmd = RUC_ABORT; /* usually the RU is in the 'no resource'-state .. abort it now. */ + elmc_attn586(); + WAIT_4_SCB_CMD(); /* wait for accept cmd. */ + + alloc_rfa(dev, (char *) p->rfd_first); + startrecv586(dev); /* restart RU */ + + pr_warning("%s: Receive-Unit restarted. Status: %04x\n", dev->name, p->scb->status); + +} + +/********************************************************** + * handle xmit - interrupt + */ + +static void elmc_xmt_int(struct net_device *dev) +{ + int status; + struct priv *p = netdev_priv(dev); + + status = p->xmit_cmds[p->xmit_last]->cmd_status; + if (!(status & STAT_COMPL)) { + pr_warning("%s: strange .. xmit-int without a 'COMPLETE'\n", dev->name); + } + if (status & STAT_OK) { + dev->stats.tx_packets++; + dev->stats.collisions += (status & TCMD_MAXCOLLMASK); + } else { + dev->stats.tx_errors++; + if (status & TCMD_LATECOLL) { + pr_warning("%s: late collision detected.\n", dev->name); + dev->stats.collisions++; + } else if (status & TCMD_NOCARRIER) { + dev->stats.tx_carrier_errors++; + pr_warning("%s: no carrier detected.\n", dev->name); + } else if (status & TCMD_LOSTCTS) { + pr_warning("%s: loss of CTS detected.\n", dev->name); + } else if (status & TCMD_UNDERRUN) { + dev->stats.tx_fifo_errors++; + pr_warning("%s: DMA underrun detected.\n", dev->name); + } else if (status & TCMD_MAXCOLL) { + pr_warning("%s: Max. collisions exceeded.\n", dev->name); + dev->stats.collisions += 16; + } + } + +#if (NUM_XMIT_BUFFS != 1) + if ((++p->xmit_last) == NUM_XMIT_BUFFS) { + p->xmit_last = 0; + } +#endif + + netif_wake_queue(dev); +} + +/*********************************************************** + * (re)start the receiver + */ + +static void startrecv586(struct net_device *dev) +{ + struct priv *p = netdev_priv(dev); + + p->scb->rfa_offset = make16(p->rfd_first); + p->scb->cmd = RUC_START; + elmc_attn586(); /* start cmd. */ + WAIT_4_SCB_CMD(); /* wait for accept cmd. (no timeout!!) */ +} + +/****************************************************** + * timeout + */ + +static void elmc_timeout(struct net_device *dev) +{ + struct priv *p = netdev_priv(dev); + /* COMMAND-UNIT active? */ + if (p->scb->status & CU_ACTIVE) { + pr_debug("%s: strange ... timeout with CU active?!?\n", dev->name); + pr_debug("%s: X0: %04x N0: %04x N1: %04x %d\n", dev->name, + (int)p->xmit_cmds[0]->cmd_status, + (int)p->nop_cmds[0]->cmd_status, + (int)p->nop_cmds[1]->cmd_status, (int)p->nop_point); + p->scb->cmd = CUC_ABORT; + elmc_attn586(); + WAIT_4_SCB_CMD(); + p->scb->cbl_offset = make16(p->nop_cmds[p->nop_point]); + p->scb->cmd = CUC_START; + elmc_attn586(); + WAIT_4_SCB_CMD(); + netif_wake_queue(dev); + } else { + pr_debug("%s: xmitter timed out, try to restart! stat: %04x\n", + dev->name, p->scb->status); + pr_debug("%s: command-stats: %04x %04x\n", dev->name, + p->xmit_cmds[0]->cmd_status, p->xmit_cmds[1]->cmd_status); + elmc_close(dev); + elmc_open(dev); + } +} + +/****************************************************** + * send frame + */ + +static netdev_tx_t elmc_send_packet(struct sk_buff *skb, struct net_device *dev) +{ + int len; + int i; +#ifndef NO_NOPCOMMANDS + int next_nop; +#endif + struct priv *p = netdev_priv(dev); + + netif_stop_queue(dev); + + len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; + + if (len != skb->len) + memset((char *) p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN); + skb_copy_from_linear_data(skb, (char *) p->xmit_cbuffs[p->xmit_count], skb->len); + +#if (NUM_XMIT_BUFFS == 1) +#ifdef NO_NOPCOMMANDS + p->xmit_buffs[0]->size = TBD_LAST | len; + for (i = 0; i < 16; i++) { + p->scb->cbl_offset = make16(p->xmit_cmds[0]); + p->scb->cmd = CUC_START; + p->xmit_cmds[0]->cmd_status = 0; + elmc_attn586(); + if (!i) { + dev_kfree_skb(skb); + } + WAIT_4_SCB_CMD(); + if ((p->scb->status & CU_ACTIVE)) { /* test it, because CU sometimes doesn't start immediately */ + break; + } + if (p->xmit_cmds[0]->cmd_status) { + break; + } + if (i == 15) { + pr_warning("%s: Can't start transmit-command.\n", dev->name); + } + } +#else + next_nop = (p->nop_point + 1) & 0x1; + p->xmit_buffs[0]->size = TBD_LAST | len; + + p->xmit_cmds[0]->cmd_link = p->nop_cmds[next_nop]->cmd_link + = make16((p->nop_cmds[next_nop])); + p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0; + + p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0])); + p->nop_point = next_nop; + dev_kfree_skb(skb); +#endif +#else + p->xmit_buffs[p->xmit_count]->size = TBD_LAST | len; + if ((next_nop = p->xmit_count + 1) == NUM_XMIT_BUFFS) { + next_nop = 0; + } + p->xmit_cmds[p->xmit_count]->cmd_status = 0; + p->xmit_cmds[p->xmit_count]->cmd_link = p->nop_cmds[next_nop]->cmd_link + = make16((p->nop_cmds[next_nop])); + p->nop_cmds[next_nop]->cmd_status = 0; + p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count])); + p->xmit_count = next_nop; + if (p->xmit_count != p->xmit_last) + netif_wake_queue(dev); + dev_kfree_skb(skb); +#endif + return NETDEV_TX_OK; +} + +/******************************************* + * Someone wanna have the statistics + */ + +static struct net_device_stats *elmc_get_stats(struct net_device *dev) +{ + struct priv *p = netdev_priv(dev); + unsigned short crc, aln, rsc, ovrn; + + crc = p->scb->crc_errs; /* get error-statistic from the ni82586 */ + p->scb->crc_errs -= crc; + aln = p->scb->aln_errs; + p->scb->aln_errs -= aln; + rsc = p->scb->rsc_errs; + p->scb->rsc_errs -= rsc; + ovrn = p->scb->ovrn_errs; + p->scb->ovrn_errs -= ovrn; + + dev->stats.rx_crc_errors += crc; + dev->stats.rx_fifo_errors += ovrn; + dev->stats.rx_frame_errors += aln; + dev->stats.rx_dropped += rsc; + + return &dev->stats; +} + +/******************************************************** + * Set MC list .. + */ + +#ifdef ELMC_MULTICAST +static void set_multicast_list(struct net_device *dev) +{ + if (!dev->start) { + /* without a running interface, promiscuous doesn't work */ + return; + } + dev->start = 0; + alloc586(dev); + init586(dev); + startrecv586(dev); + dev->start = 1; +} +#endif + +static void netdev_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + sprintf(info->bus_info, "MCA 0x%lx", dev->base_addr); +} + +static const struct ethtool_ops netdev_ethtool_ops = { + .get_drvinfo = netdev_get_drvinfo, +}; + +#ifdef MODULE + +/* Increase if needed ;) */ +#define MAX_3C523_CARDS 4 + +static struct net_device *dev_elmc[MAX_3C523_CARDS]; +static int irq[MAX_3C523_CARDS]; +static int io[MAX_3C523_CARDS]; +module_param_array(irq, int, NULL, 0); +module_param_array(io, int, NULL, 0); +MODULE_PARM_DESC(io, "EtherLink/MC I/O base address(es)"); +MODULE_PARM_DESC(irq, "EtherLink/MC IRQ number(s)"); +MODULE_LICENSE("GPL"); + +int __init init_module(void) +{ + int this_dev,found = 0; + + /* Loop until we either can't find any more cards, or we have MAX_3C523_CARDS */ + for(this_dev=0; this_devirq=irq[this_dev]; + dev->base_addr=io[this_dev]; + if (do_elmc_probe(dev) == 0) { + dev_elmc[this_dev] = dev; + found++; + continue; + } + free_netdev(dev); + if (io[this_dev]==0) + break; + pr_warning("3c523.c: No 3c523 card found at io=%#x\n",io[this_dev]); + } + + if(found==0) { + if (io[0]==0) + pr_notice("3c523.c: No 3c523 cards found\n"); + return -ENXIO; + } else return 0; +} + +void __exit cleanup_module(void) +{ + int this_dev; + for (this_dev=0; this_dev + * Heavily modified by Richard Procter + * + * Based on skeleton.c written 1993-94 by Donald Becker and ne2.c + * (for the MCA stuff) written by Wim Dumon. + * + * Thanks to 3Com for making this possible by providing me with the + * documentation. + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + */ + +#define DRV_NAME "3c527" +#define DRV_VERSION "0.7-SMP" +#define DRV_RELDATE "2003/09/21" + +static const char *version = +DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Richard Procter \n"; + +/** + * DOC: Traps for the unwary + * + * The diagram (Figure 1-1) and the POS summary disagree with the + * "Interrupt Level" section in the manual. + * + * The manual contradicts itself when describing the minimum number + * buffers in the 'configure lists' command. + * My card accepts a buffer config of 4/4. + * + * Setting the SAV BP bit does not save bad packets, but + * only enables RX on-card stats collection. + * + * The documentation in places seems to miss things. In actual fact + * I've always eventually found everything is documented, it just + * requires careful study. + * + * DOC: Theory Of Operation + * + * The 3com 3c527 is a 32bit MCA bus mastering adapter with a large + * amount of on board intelligence that housekeeps a somewhat dumber + * Intel NIC. For performance we want to keep the transmit queue deep + * as the card can transmit packets while fetching others from main + * memory by bus master DMA. Transmission and reception are driven by + * circular buffer queues. + * + * The mailboxes can be used for controlling how the card traverses + * its buffer rings, but are used only for initial setup in this + * implementation. The exec mailbox allows a variety of commands to + * be executed. Each command must complete before the next is + * executed. Primarily we use the exec mailbox for controlling the + * multicast lists. We have to do a certain amount of interesting + * hoop jumping as the multicast list changes can occur in interrupt + * state when the card has an exec command pending. We defer such + * events until the command completion interrupt. + * + * A copy break scheme (taken from 3c59x.c) is employed whereby + * received frames exceeding a configurable length are passed + * directly to the higher networking layers without incuring a copy, + * in what amounts to a time/space trade-off. + * + * The card also keeps a large amount of statistical information + * on-board. In a perfect world, these could be used safely at no + * cost. However, lacking information to the contrary, processing + * them without races would involve so much extra complexity as to + * make it unworthwhile to do so. In the end, a hybrid SW/HW + * implementation was made necessary --- see mc32_update_stats(). + * + * DOC: Notes + * + * It should be possible to use two or more cards, but at this stage + * only by loading two copies of the same module. + * + * The on-board 82586 NIC has trouble receiving multiple + * back-to-back frames and so is likely to drop packets from fast + * senders. +**/ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "3c527.h" + +MODULE_LICENSE("GPL"); + +/* + * The name of the card. Is used for messages and in the requests for + * io regions, irqs and dma channels + */ +static const char* cardname = DRV_NAME; + +/* use 0 for production, 1 for verification, >2 for debug */ +#ifndef NET_DEBUG +#define NET_DEBUG 2 +#endif + +static unsigned int mc32_debug = NET_DEBUG; + +/* The number of low I/O ports used by the ethercard. */ +#define MC32_IO_EXTENT 8 + +/* As implemented, values must be a power-of-2 -- 4/8/16/32 */ +#define TX_RING_LEN 32 /* Typically the card supports 37 */ +#define RX_RING_LEN 8 /* " " " */ + +/* Copy break point, see above for details. + * Setting to > 1512 effectively disables this feature. */ +#define RX_COPYBREAK 200 /* Value from 3c59x.c */ + +/* Issue the 82586 workaround command - this is for "busy lans", but + * basically means for all lans now days - has a performance (latency) + * cost, but best set. */ +static const int WORKAROUND_82586=1; + +/* Pointers to buffers and their on-card records */ +struct mc32_ring_desc +{ + volatile struct skb_header *p; + struct sk_buff *skb; +}; + +/* Information that needs to be kept for each board. */ +struct mc32_local +{ + int slot; + + u32 base; + volatile struct mc32_mailbox *rx_box; + volatile struct mc32_mailbox *tx_box; + volatile struct mc32_mailbox *exec_box; + volatile struct mc32_stats *stats; /* Start of on-card statistics */ + u16 tx_chain; /* Transmit list start offset */ + u16 rx_chain; /* Receive list start offset */ + u16 tx_len; /* Transmit list count */ + u16 rx_len; /* Receive list count */ + + u16 xceiver_desired_state; /* HALTED or RUNNING */ + u16 cmd_nonblocking; /* Thread is uninterested in command result */ + u16 mc_reload_wait; /* A multicast load request is pending */ + u32 mc_list_valid; /* True when the mclist is set */ + + struct mc32_ring_desc tx_ring[TX_RING_LEN]; /* Host Transmit ring */ + struct mc32_ring_desc rx_ring[RX_RING_LEN]; /* Host Receive ring */ + + atomic_t tx_count; /* buffers left */ + atomic_t tx_ring_head; /* index to tx en-queue end */ + u16 tx_ring_tail; /* index to tx de-queue end */ + + u16 rx_ring_tail; /* index to rx de-queue end */ + + struct semaphore cmd_mutex; /* Serialises issuing of execute commands */ + struct completion execution_cmd; /* Card has completed an execute command */ + struct completion xceiver_cmd; /* Card has completed a tx or rx command */ +}; + +/* The station (ethernet) address prefix, used for a sanity check. */ +#define SA_ADDR0 0x02 +#define SA_ADDR1 0x60 +#define SA_ADDR2 0xAC + +struct mca_adapters_t { + unsigned int id; + char *name; +}; + +static const struct mca_adapters_t mc32_adapters[] = { + { 0x0041, "3COM EtherLink MC/32" }, + { 0x8EF5, "IBM High Performance Lan Adapter" }, + { 0x0000, NULL } +}; + + +/* Macros for ring index manipulations */ +static inline u16 next_rx(u16 rx) { return (rx+1)&(RX_RING_LEN-1); }; +static inline u16 prev_rx(u16 rx) { return (rx-1)&(RX_RING_LEN-1); }; + +static inline u16 next_tx(u16 tx) { return (tx+1)&(TX_RING_LEN-1); }; + + +/* Index to functions, as function prototypes. */ +static int mc32_probe1(struct net_device *dev, int ioaddr); +static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len); +static int mc32_open(struct net_device *dev); +static void mc32_timeout(struct net_device *dev); +static netdev_tx_t mc32_send_packet(struct sk_buff *skb, + struct net_device *dev); +static irqreturn_t mc32_interrupt(int irq, void *dev_id); +static int mc32_close(struct net_device *dev); +static struct net_device_stats *mc32_get_stats(struct net_device *dev); +static void mc32_set_multicast_list(struct net_device *dev); +static void mc32_reset_multicast_list(struct net_device *dev); +static const struct ethtool_ops netdev_ethtool_ops; + +static void cleanup_card(struct net_device *dev) +{ + struct mc32_local *lp = netdev_priv(dev); + unsigned slot = lp->slot; + mca_mark_as_unused(slot); + mca_set_adapter_name(slot, NULL); + free_irq(dev->irq, dev); + release_region(dev->base_addr, MC32_IO_EXTENT); +} + +/** + * mc32_probe - Search for supported boards + * @unit: interface number to use + * + * Because MCA bus is a real bus and we can scan for cards we could do a + * single scan for all boards here. Right now we use the passed in device + * structure and scan for only one board. This needs fixing for modules + * in particular. + */ + +struct net_device *__init mc32_probe(int unit) +{ + struct net_device *dev = alloc_etherdev(sizeof(struct mc32_local)); + static int current_mca_slot = -1; + int i; + int err; + + if (!dev) + return ERR_PTR(-ENOMEM); + + if (unit >= 0) + sprintf(dev->name, "eth%d", unit); + + /* Do not check any supplied i/o locations. + POS registers usually don't fail :) */ + + /* MCA cards have POS registers. + Autodetecting MCA cards is extremely simple. + Just search for the card. */ + + for(i = 0; (mc32_adapters[i].name != NULL); i++) { + current_mca_slot = + mca_find_unused_adapter(mc32_adapters[i].id, 0); + + if(current_mca_slot != MCA_NOTFOUND) { + if(!mc32_probe1(dev, current_mca_slot)) + { + mca_set_adapter_name(current_mca_slot, + mc32_adapters[i].name); + mca_mark_as_used(current_mca_slot); + err = register_netdev(dev); + if (err) { + cleanup_card(dev); + free_netdev(dev); + dev = ERR_PTR(err); + } + return dev; + } + + } + } + free_netdev(dev); + return ERR_PTR(-ENODEV); +} + +static const struct net_device_ops netdev_ops = { + .ndo_open = mc32_open, + .ndo_stop = mc32_close, + .ndo_start_xmit = mc32_send_packet, + .ndo_get_stats = mc32_get_stats, + .ndo_set_multicast_list = mc32_set_multicast_list, + .ndo_tx_timeout = mc32_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +/** + * mc32_probe1 - Check a given slot for a board and test the card + * @dev: Device structure to fill in + * @slot: The MCA bus slot being used by this card + * + * Decode the slot data and configure the card structures. Having done this we + * can reset the card and configure it. The card does a full self test cycle + * in firmware so we have to wait for it to return and post us either a + * failure case or some addresses we use to find the board internals. + */ + +static int __init mc32_probe1(struct net_device *dev, int slot) +{ + static unsigned version_printed; + int i, err; + u8 POS; + u32 base; + struct mc32_local *lp = netdev_priv(dev); + static const u16 mca_io_bases[] = { + 0x7280,0x7290, + 0x7680,0x7690, + 0x7A80,0x7A90, + 0x7E80,0x7E90 + }; + static const u32 mca_mem_bases[] = { + 0x00C0000, + 0x00C4000, + 0x00C8000, + 0x00CC000, + 0x00D0000, + 0x00D4000, + 0x00D8000, + 0x00DC000 + }; + static const char * const failures[] = { + "Processor instruction", + "Processor data bus", + "Processor data bus", + "Processor data bus", + "Adapter bus", + "ROM checksum", + "Base RAM", + "Extended RAM", + "82586 internal loopback", + "82586 initialisation failure", + "Adapter list configuration error" + }; + + /* Time to play MCA games */ + + if (mc32_debug && version_printed++ == 0) + pr_debug("%s", version); + + pr_info("%s: %s found in slot %d: ", dev->name, cardname, slot); + + POS = mca_read_stored_pos(slot, 2); + + if(!(POS&1)) + { + pr_cont("disabled.\n"); + return -ENODEV; + } + + /* Fill in the 'dev' fields. */ + dev->base_addr = mca_io_bases[(POS>>1)&7]; + dev->mem_start = mca_mem_bases[(POS>>4)&7]; + + POS = mca_read_stored_pos(slot, 4); + if(!(POS&1)) + { + pr_cont("memory window disabled.\n"); + return -ENODEV; + } + + POS = mca_read_stored_pos(slot, 5); + + i=(POS>>4)&3; + if(i==3) + { + pr_cont("invalid memory window.\n"); + return -ENODEV; + } + + i*=16384; + i+=16384; + + dev->mem_end=dev->mem_start + i; + + dev->irq = ((POS>>2)&3)+9; + + if(!request_region(dev->base_addr, MC32_IO_EXTENT, cardname)) + { + pr_cont("io 0x%3lX, which is busy.\n", dev->base_addr); + return -EBUSY; + } + + pr_cont("io 0x%3lX irq %d mem 0x%lX (%dK)\n", + dev->base_addr, dev->irq, dev->mem_start, i/1024); + + + /* We ought to set the cache line size here.. */ + + + /* + * Go PROM browsing + */ + + /* Retrieve and print the ethernet address. */ + for (i = 0; i < 6; i++) + { + mca_write_pos(slot, 6, i+12); + mca_write_pos(slot, 7, 0); + + dev->dev_addr[i] = mca_read_pos(slot,3); + } + + pr_info("%s: Address %pM ", dev->name, dev->dev_addr); + + mca_write_pos(slot, 6, 0); + mca_write_pos(slot, 7, 0); + + POS = mca_read_stored_pos(slot, 4); + + if(POS&2) + pr_cont(": BNC port selected.\n"); + else + pr_cont(": AUI port selected.\n"); + + POS=inb(dev->base_addr+HOST_CTRL); + POS|=HOST_CTRL_ATTN|HOST_CTRL_RESET; + POS&=~HOST_CTRL_INTE; + outb(POS, dev->base_addr+HOST_CTRL); + /* Reset adapter */ + udelay(100); + /* Reset off */ + POS&=~(HOST_CTRL_ATTN|HOST_CTRL_RESET); + outb(POS, dev->base_addr+HOST_CTRL); + + udelay(300); + + /* + * Grab the IRQ + */ + + err = request_irq(dev->irq, mc32_interrupt, IRQF_SHARED, DRV_NAME, dev); + if (err) { + release_region(dev->base_addr, MC32_IO_EXTENT); + pr_err("%s: unable to get IRQ %d.\n", DRV_NAME, dev->irq); + goto err_exit_ports; + } + + memset(lp, 0, sizeof(struct mc32_local)); + lp->slot = slot; + + i=0; + + base = inb(dev->base_addr); + + while(base == 0xFF) + { + i++; + if(i == 1000) + { + pr_err("%s: failed to boot adapter.\n", dev->name); + err = -ENODEV; + goto err_exit_irq; + } + udelay(1000); + if(inb(dev->base_addr+2)&(1<<5)) + base = inb(dev->base_addr); + } + + if(base>0) + { + if(base < 0x0C) + pr_err("%s: %s%s.\n", dev->name, failures[base-1], + base<0x0A?" test failure":""); + else + pr_err("%s: unknown failure %d.\n", dev->name, base); + err = -ENODEV; + goto err_exit_irq; + } + + base=0; + for(i=0;i<4;i++) + { + int n=0; + + while(!(inb(dev->base_addr+2)&(1<<5))) + { + n++; + udelay(50); + if(n>100) + { + pr_err("%s: mailbox read fail (%d).\n", dev->name, i); + err = -ENODEV; + goto err_exit_irq; + } + } + + base|=(inb(dev->base_addr)<<(8*i)); + } + + lp->exec_box=isa_bus_to_virt(dev->mem_start+base); + + base=lp->exec_box->data[1]<<16|lp->exec_box->data[0]; + + lp->base = dev->mem_start+base; + + lp->rx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[2]); + lp->tx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[3]); + + lp->stats = isa_bus_to_virt(lp->base + lp->exec_box->data[5]); + + /* + * Descriptor chains (card relative) + */ + + lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */ + lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */ + lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */ + lp->rx_len = lp->exec_box->data[11]; /* Receive list count */ + + sema_init(&lp->cmd_mutex, 0); + init_completion(&lp->execution_cmd); + init_completion(&lp->xceiver_cmd); + + pr_info("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n", + dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base); + + dev->netdev_ops = &netdev_ops; + dev->watchdog_timeo = HZ*5; /* Board does all the work */ + dev->ethtool_ops = &netdev_ethtool_ops; + + return 0; + +err_exit_irq: + free_irq(dev->irq, dev); +err_exit_ports: + release_region(dev->base_addr, MC32_IO_EXTENT); + return err; +} + + +/** + * mc32_ready_poll - wait until we can feed it a command + * @dev: The device to wait for + * + * Wait until the card becomes ready to accept a command via the + * command register. This tells us nothing about the completion + * status of any pending commands and takes very little time at all. + */ + +static inline void mc32_ready_poll(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + while(!(inb(ioaddr+HOST_STATUS)&HOST_STATUS_CRR)); +} + + +/** + * mc32_command_nowait - send a command non blocking + * @dev: The 3c527 to issue the command to + * @cmd: The command word to write to the mailbox + * @data: A data block if the command expects one + * @len: Length of the data block + * + * Send a command from interrupt state. If there is a command + * currently being executed then we return an error of -1. It + * simply isn't viable to wait around as commands may be + * slow. This can theoretically be starved on SMP, but it's hard + * to see a realistic situation. We do not wait for the command + * to complete --- we rely on the interrupt handler to tidy up + * after us. + */ + +static int mc32_command_nowait(struct net_device *dev, u16 cmd, void *data, int len) +{ + struct mc32_local *lp = netdev_priv(dev); + int ioaddr = dev->base_addr; + int ret = -1; + + if (down_trylock(&lp->cmd_mutex) == 0) + { + lp->cmd_nonblocking=1; + lp->exec_box->mbox=0; + lp->exec_box->mbox=cmd; + memcpy((void *)lp->exec_box->data, data, len); + barrier(); /* the memcpy forgot the volatile so be sure */ + + /* Send the command */ + mc32_ready_poll(dev); + outb(1<<6, ioaddr+HOST_CMD); + + ret = 0; + + /* Interrupt handler will signal mutex on completion */ + } + + return ret; +} + + +/** + * mc32_command - send a command and sleep until completion + * @dev: The 3c527 card to issue the command to + * @cmd: The command word to write to the mailbox + * @data: A data block if the command expects one + * @len: Length of the data block + * + * Sends exec commands in a user context. This permits us to wait around + * for the replies and also to wait for the command buffer to complete + * from a previous command before we execute our command. After our + * command completes we will attempt any pending multicast reload + * we blocked off by hogging the exec buffer. + * + * You feed the card a command, you wait, it interrupts you get a + * reply. All well and good. The complication arises because you use + * commands for filter list changes which come in at bh level from things + * like IPV6 group stuff. + */ + +static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len) +{ + struct mc32_local *lp = netdev_priv(dev); + int ioaddr = dev->base_addr; + int ret = 0; + + down(&lp->cmd_mutex); + + /* + * My Turn + */ + + lp->cmd_nonblocking=0; + lp->exec_box->mbox=0; + lp->exec_box->mbox=cmd; + memcpy((void *)lp->exec_box->data, data, len); + barrier(); /* the memcpy forgot the volatile so be sure */ + + mc32_ready_poll(dev); + outb(1<<6, ioaddr+HOST_CMD); + + wait_for_completion(&lp->execution_cmd); + + if(lp->exec_box->mbox&(1<<13)) + ret = -1; + + up(&lp->cmd_mutex); + + /* + * A multicast set got blocked - try it now + */ + + if(lp->mc_reload_wait) + { + mc32_reset_multicast_list(dev); + } + + return ret; +} + + +/** + * mc32_start_transceiver - tell board to restart tx/rx + * @dev: The 3c527 card to issue the command to + * + * This may be called from the interrupt state, where it is used + * to restart the rx ring if the card runs out of rx buffers. + * + * We must first check if it's ok to (re)start the transceiver. See + * mc32_close for details. + */ + +static void mc32_start_transceiver(struct net_device *dev) { + + struct mc32_local *lp = netdev_priv(dev); + int ioaddr = dev->base_addr; + + /* Ignore RX overflow on device closure */ + if (lp->xceiver_desired_state==HALTED) + return; + + /* Give the card the offset to the post-EOL-bit RX descriptor */ + mc32_ready_poll(dev); + lp->rx_box->mbox=0; + lp->rx_box->data[0]=lp->rx_ring[prev_rx(lp->rx_ring_tail)].p->next; + outb(HOST_CMD_START_RX, ioaddr+HOST_CMD); + + mc32_ready_poll(dev); + lp->tx_box->mbox=0; + outb(HOST_CMD_RESTRT_TX, ioaddr+HOST_CMD); /* card ignores this on RX restart */ + + /* We are not interrupted on start completion */ +} + + +/** + * mc32_halt_transceiver - tell board to stop tx/rx + * @dev: The 3c527 card to issue the command to + * + * We issue the commands to halt the card's transceiver. In fact, + * after some experimenting we now simply tell the card to + * suspend. When issuing aborts occasionally odd things happened. + * + * We then sleep until the card has notified us that both rx and + * tx have been suspended. + */ + +static void mc32_halt_transceiver(struct net_device *dev) +{ + struct mc32_local *lp = netdev_priv(dev); + int ioaddr = dev->base_addr; + + mc32_ready_poll(dev); + lp->rx_box->mbox=0; + outb(HOST_CMD_SUSPND_RX, ioaddr+HOST_CMD); + wait_for_completion(&lp->xceiver_cmd); + + mc32_ready_poll(dev); + lp->tx_box->mbox=0; + outb(HOST_CMD_SUSPND_TX, ioaddr+HOST_CMD); + wait_for_completion(&lp->xceiver_cmd); +} + + +/** + * mc32_load_rx_ring - load the ring of receive buffers + * @dev: 3c527 to build the ring for + * + * This initialises the on-card and driver datastructures to + * the point where mc32_start_transceiver() can be called. + * + * The card sets up the receive ring for us. We are required to use the + * ring it provides, although the size of the ring is configurable. + * + * We allocate an sk_buff for each ring entry in turn and + * initialise its house-keeping info. At the same time, we read + * each 'next' pointer in our rx_ring array. This reduces slow + * shared-memory reads and makes it easy to access predecessor + * descriptors. + * + * We then set the end-of-list bit for the last entry so that the + * card will know when it has run out of buffers. + */ + +static int mc32_load_rx_ring(struct net_device *dev) +{ + struct mc32_local *lp = netdev_priv(dev); + int i; + u16 rx_base; + volatile struct skb_header *p; + + rx_base=lp->rx_chain; + + for(i=0; irx_ring[i].skb=alloc_skb(1532, GFP_KERNEL); + if (lp->rx_ring[i].skb==NULL) { + for (;i>=0;i--) + kfree_skb(lp->rx_ring[i].skb); + return -ENOBUFS; + } + skb_reserve(lp->rx_ring[i].skb, 18); + + p=isa_bus_to_virt(lp->base+rx_base); + + p->control=0; + p->data=isa_virt_to_bus(lp->rx_ring[i].skb->data); + p->status=0; + p->length=1532; + + lp->rx_ring[i].p=p; + rx_base=p->next; + } + + lp->rx_ring[i-1].p->control |= CONTROL_EOL; + + lp->rx_ring_tail=0; + + return 0; +} + + +/** + * mc32_flush_rx_ring - free the ring of receive buffers + * @lp: Local data of 3c527 to flush the rx ring of + * + * Free the buffer for each ring slot. This may be called + * before mc32_load_rx_ring(), eg. on error in mc32_open(). + * Requires rx skb pointers to point to a valid skb, or NULL. + */ + +static void mc32_flush_rx_ring(struct net_device *dev) +{ + struct mc32_local *lp = netdev_priv(dev); + int i; + + for(i=0; i < RX_RING_LEN; i++) + { + if (lp->rx_ring[i].skb) { + dev_kfree_skb(lp->rx_ring[i].skb); + lp->rx_ring[i].skb = NULL; + } + lp->rx_ring[i].p=NULL; + } +} + + +/** + * mc32_load_tx_ring - load transmit ring + * @dev: The 3c527 card to issue the command to + * + * This sets up the host transmit data-structures. + * + * First, we obtain from the card it's current position in the tx + * ring, so that we will know where to begin transmitting + * packets. + * + * Then, we read the 'next' pointers from the on-card tx ring into + * our tx_ring array to reduce slow shared-mem reads. Finally, we + * intitalise the tx house keeping variables. + * + */ + +static void mc32_load_tx_ring(struct net_device *dev) +{ + struct mc32_local *lp = netdev_priv(dev); + volatile struct skb_header *p; + int i; + u16 tx_base; + + tx_base=lp->tx_box->data[0]; + + for(i=0 ; ibase+tx_base); + lp->tx_ring[i].p=p; + lp->tx_ring[i].skb=NULL; + + tx_base=p->next; + } + + /* -1 so that tx_ring_head cannot "lap" tx_ring_tail */ + /* see mc32_tx_ring */ + + atomic_set(&lp->tx_count, TX_RING_LEN-1); + atomic_set(&lp->tx_ring_head, 0); + lp->tx_ring_tail=0; +} + + +/** + * mc32_flush_tx_ring - free transmit ring + * @lp: Local data of 3c527 to flush the tx ring of + * + * If the ring is non-empty, zip over the it, freeing any + * allocated skb_buffs. The tx ring house-keeping variables are + * then reset. Requires rx skb pointers to point to a valid skb, + * or NULL. + */ + +static void mc32_flush_tx_ring(struct net_device *dev) +{ + struct mc32_local *lp = netdev_priv(dev); + int i; + + for (i=0; i < TX_RING_LEN; i++) + { + if (lp->tx_ring[i].skb) + { + dev_kfree_skb(lp->tx_ring[i].skb); + lp->tx_ring[i].skb = NULL; + } + } + + atomic_set(&lp->tx_count, 0); + atomic_set(&lp->tx_ring_head, 0); + lp->tx_ring_tail=0; +} + + +/** + * mc32_open - handle 'up' of card + * @dev: device to open + * + * The user is trying to bring the card into ready state. This requires + * a brief dialogue with the card. Firstly we enable interrupts and then + * 'indications'. Without these enabled the card doesn't bother telling + * us what it has done. This had me puzzled for a week. + * + * We configure the number of card descriptors, then load the network + * address and multicast filters. Turn on the workaround mode. This + * works around a bug in the 82586 - it asks the firmware to do + * so. It has a performance (latency) hit but is needed on busy + * [read most] lans. We load the ring with buffers then we kick it + * all off. + */ + +static int mc32_open(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + struct mc32_local *lp = netdev_priv(dev); + u8 one=1; + u8 regs; + u16 descnumbuffs[2] = {TX_RING_LEN, RX_RING_LEN}; + + /* + * Interrupts enabled + */ + + regs=inb(ioaddr+HOST_CTRL); + regs|=HOST_CTRL_INTE; + outb(regs, ioaddr+HOST_CTRL); + + /* + * Allow ourselves to issue commands + */ + + up(&lp->cmd_mutex); + + + /* + * Send the indications on command + */ + + mc32_command(dev, 4, &one, 2); + + /* + * Poke it to make sure it's really dead. + */ + + mc32_halt_transceiver(dev); + mc32_flush_tx_ring(dev); + + /* + * Ask card to set up on-card descriptors to our spec + */ + + if(mc32_command(dev, 8, descnumbuffs, 4)) { + pr_info("%s: %s rejected our buffer configuration!\n", + dev->name, cardname); + mc32_close(dev); + return -ENOBUFS; + } + + /* Report new configuration */ + mc32_command(dev, 6, NULL, 0); + + lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */ + lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */ + lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */ + lp->rx_len = lp->exec_box->data[11]; /* Receive list count */ + + /* Set Network Address */ + mc32_command(dev, 1, dev->dev_addr, 6); + + /* Set the filters */ + mc32_set_multicast_list(dev); + + if (WORKAROUND_82586) { + u16 zero_word=0; + mc32_command(dev, 0x0D, &zero_word, 2); /* 82586 bug workaround on */ + } + + mc32_load_tx_ring(dev); + + if(mc32_load_rx_ring(dev)) + { + mc32_close(dev); + return -ENOBUFS; + } + + lp->xceiver_desired_state = RUNNING; + + /* And finally, set the ball rolling... */ + mc32_start_transceiver(dev); + + netif_start_queue(dev); + + return 0; +} + + +/** + * mc32_timeout - handle a timeout from the network layer + * @dev: 3c527 that timed out + * + * Handle a timeout on transmit from the 3c527. This normally means + * bad things as the hardware handles cable timeouts and mess for + * us. + * + */ + +static void mc32_timeout(struct net_device *dev) +{ + pr_warning("%s: transmit timed out?\n", dev->name); + /* Try to restart the adaptor. */ + netif_wake_queue(dev); +} + + +/** + * mc32_send_packet - queue a frame for transmit + * @skb: buffer to transmit + * @dev: 3c527 to send it out of + * + * Transmit a buffer. This normally means throwing the buffer onto + * the transmit queue as the queue is quite large. If the queue is + * full then we set tx_busy and return. Once the interrupt handler + * gets messages telling it to reclaim transmit queue entries, we will + * clear tx_busy and the kernel will start calling this again. + * + * We do not disable interrupts or acquire any locks; this can + * run concurrently with mc32_tx_ring(), and the function itself + * is serialised at a higher layer. However, similarly for the + * card itself, we must ensure that we update tx_ring_head only + * after we've established a valid packet on the tx ring (and + * before we let the card "see" it, to prevent it racing with the + * irq handler). + * + */ + +static netdev_tx_t mc32_send_packet(struct sk_buff *skb, + struct net_device *dev) +{ + struct mc32_local *lp = netdev_priv(dev); + u32 head = atomic_read(&lp->tx_ring_head); + + volatile struct skb_header *p, *np; + + netif_stop_queue(dev); + + if(atomic_read(&lp->tx_count)==0) { + return NETDEV_TX_BUSY; + } + + if (skb_padto(skb, ETH_ZLEN)) { + netif_wake_queue(dev); + return NETDEV_TX_OK; + } + + atomic_dec(&lp->tx_count); + + /* P is the last sending/sent buffer as a pointer */ + p=lp->tx_ring[head].p; + + head = next_tx(head); + + /* NP is the buffer we will be loading */ + np=lp->tx_ring[head].p; + + /* We will need this to flush the buffer out */ + lp->tx_ring[head].skb=skb; + + np->length = unlikely(skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len; + np->data = isa_virt_to_bus(skb->data); + np->status = 0; + np->control = CONTROL_EOP | CONTROL_EOL; + wmb(); + + /* + * The new frame has been setup; we can now + * let the interrupt handler and card "see" it + */ + + atomic_set(&lp->tx_ring_head, head); + p->control &= ~CONTROL_EOL; + + netif_wake_queue(dev); + return NETDEV_TX_OK; +} + + +/** + * mc32_update_stats - pull off the on board statistics + * @dev: 3c527 to service + * + * + * Query and reset the on-card stats. There's the small possibility + * of a race here, which would result in an underestimation of + * actual errors. As such, we'd prefer to keep all our stats + * collection in software. As a rule, we do. However it can't be + * used for rx errors and collisions as, by default, the card discards + * bad rx packets. + * + * Setting the SAV BP in the rx filter command supposedly + * stops this behaviour. However, testing shows that it only seems to + * enable the collation of on-card rx statistics --- the driver + * never sees an RX descriptor with an error status set. + * + */ + +static void mc32_update_stats(struct net_device *dev) +{ + struct mc32_local *lp = netdev_priv(dev); + volatile struct mc32_stats *st = lp->stats; + + u32 rx_errors=0; + + rx_errors+=dev->stats.rx_crc_errors +=st->rx_crc_errors; + st->rx_crc_errors=0; + rx_errors+=dev->stats.rx_fifo_errors +=st->rx_overrun_errors; + st->rx_overrun_errors=0; + rx_errors+=dev->stats.rx_frame_errors +=st->rx_alignment_errors; + st->rx_alignment_errors=0; + rx_errors+=dev->stats.rx_length_errors+=st->rx_tooshort_errors; + st->rx_tooshort_errors=0; + rx_errors+=dev->stats.rx_missed_errors+=st->rx_outofresource_errors; + st->rx_outofresource_errors=0; + dev->stats.rx_errors=rx_errors; + + /* Number of packets which saw one collision */ + dev->stats.collisions+=st->dataC[10]; + st->dataC[10]=0; + + /* Number of packets which saw 2--15 collisions */ + dev->stats.collisions+=st->dataC[11]; + st->dataC[11]=0; +} + + +/** + * mc32_rx_ring - process the receive ring + * @dev: 3c527 that needs its receive ring processing + * + * + * We have received one or more indications from the card that a + * receive has completed. The buffer ring thus contains dirty + * entries. We walk the ring by iterating over the circular rx_ring + * array, starting at the next dirty buffer (which happens to be the + * one we finished up at last time around). + * + * For each completed packet, we will either copy it and pass it up + * the stack or, if the packet is near MTU sized, we allocate + * another buffer and flip the old one up the stack. + * + * We must succeed in keeping a buffer on the ring. If necessary we + * will toss a received packet rather than lose a ring entry. Once + * the first uncompleted descriptor is found, we move the + * End-Of-List bit to include the buffers just processed. + * + */ + +static void mc32_rx_ring(struct net_device *dev) +{ + struct mc32_local *lp = netdev_priv(dev); + volatile struct skb_header *p; + u16 rx_ring_tail; + u16 rx_old_tail; + int x=0; + + rx_old_tail = rx_ring_tail = lp->rx_ring_tail; + + do + { + p=lp->rx_ring[rx_ring_tail].p; + + if(!(p->status & (1<<7))) { /* Not COMPLETED */ + break; + } + if(p->status & (1<<6)) /* COMPLETED_OK */ + { + + u16 length=p->length; + struct sk_buff *skb; + struct sk_buff *newskb; + + /* Try to save time by avoiding a copy on big frames */ + + if ((length > RX_COPYBREAK) && + ((newskb=dev_alloc_skb(1532)) != NULL)) + { + skb=lp->rx_ring[rx_ring_tail].skb; + skb_put(skb, length); + + skb_reserve(newskb,18); + lp->rx_ring[rx_ring_tail].skb=newskb; + p->data=isa_virt_to_bus(newskb->data); + } + else + { + skb=dev_alloc_skb(length+2); + + if(skb==NULL) { + dev->stats.rx_dropped++; + goto dropped; + } + + skb_reserve(skb,2); + memcpy(skb_put(skb, length), + lp->rx_ring[rx_ring_tail].skb->data, length); + } + + skb->protocol=eth_type_trans(skb,dev); + dev->stats.rx_packets++; + dev->stats.rx_bytes += length; + netif_rx(skb); + } + + dropped: + p->length = 1532; + p->status = 0; + + rx_ring_tail=next_rx(rx_ring_tail); + } + while(x++<48); + + /* If there was actually a frame to be processed, place the EOL bit */ + /* at the descriptor prior to the one to be filled next */ + + if (rx_ring_tail != rx_old_tail) + { + lp->rx_ring[prev_rx(rx_ring_tail)].p->control |= CONTROL_EOL; + lp->rx_ring[prev_rx(rx_old_tail)].p->control &= ~CONTROL_EOL; + + lp->rx_ring_tail=rx_ring_tail; + } +} + + +/** + * mc32_tx_ring - process completed transmits + * @dev: 3c527 that needs its transmit ring processing + * + * + * This operates in a similar fashion to mc32_rx_ring. We iterate + * over the transmit ring. For each descriptor which has been + * processed by the card, we free its associated buffer and note + * any errors. This continues until the transmit ring is emptied + * or we reach a descriptor that hasn't yet been processed by the + * card. + * + */ + +static void mc32_tx_ring(struct net_device *dev) +{ + struct mc32_local *lp = netdev_priv(dev); + volatile struct skb_header *np; + + /* + * We rely on head==tail to mean 'queue empty'. + * This is why lp->tx_count=TX_RING_LEN-1: in order to prevent + * tx_ring_head wrapping to tail and confusing a 'queue empty' + * condition with 'queue full' + */ + + while (lp->tx_ring_tail != atomic_read(&lp->tx_ring_head)) + { + u16 t; + + t=next_tx(lp->tx_ring_tail); + np=lp->tx_ring[t].p; + + if(!(np->status & (1<<7))) + { + /* Not COMPLETED */ + break; + } + dev->stats.tx_packets++; + if(!(np->status & (1<<6))) /* Not COMPLETED_OK */ + { + dev->stats.tx_errors++; + + switch(np->status&0x0F) + { + case 1: + dev->stats.tx_aborted_errors++; + break; /* Max collisions */ + case 2: + dev->stats.tx_fifo_errors++; + break; + case 3: + dev->stats.tx_carrier_errors++; + break; + case 4: + dev->stats.tx_window_errors++; + break; /* CTS Lost */ + case 5: + dev->stats.tx_aborted_errors++; + break; /* Transmit timeout */ + } + } + /* Packets are sent in order - this is + basically a FIFO queue of buffers matching + the card ring */ + dev->stats.tx_bytes+=lp->tx_ring[t].skb->len; + dev_kfree_skb_irq(lp->tx_ring[t].skb); + lp->tx_ring[t].skb=NULL; + atomic_inc(&lp->tx_count); + netif_wake_queue(dev); + + lp->tx_ring_tail=t; + } + +} + + +/** + * mc32_interrupt - handle an interrupt from a 3c527 + * @irq: Interrupt number + * @dev_id: 3c527 that requires servicing + * @regs: Registers (unused) + * + * + * An interrupt is raised whenever the 3c527 writes to the command + * register. This register contains the message it wishes to send us + * packed into a single byte field. We keep reading status entries + * until we have processed all the control items, but simply count + * transmit and receive reports. When all reports are in we empty the + * transceiver rings as appropriate. This saves the overhead of + * multiple command requests. + * + * Because MCA is level-triggered, we shouldn't miss indications. + * Therefore, we needn't ask the card to suspend interrupts within + * this handler. The card receives an implicit acknowledgment of the + * current interrupt when we read the command register. + * + */ + +static irqreturn_t mc32_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct mc32_local *lp; + int ioaddr, status, boguscount = 0; + int rx_event = 0; + int tx_event = 0; + + ioaddr = dev->base_addr; + lp = netdev_priv(dev); + + /* See whats cooking */ + + while((inb(ioaddr+HOST_STATUS)&HOST_STATUS_CWR) && boguscount++<2000) + { + status=inb(ioaddr+HOST_CMD); + + pr_debug("Status TX%d RX%d EX%d OV%d BC%d\n", + (status&7), (status>>3)&7, (status>>6)&1, + (status>>7)&1, boguscount); + + switch(status&7) + { + case 0: + break; + case 6: /* TX fail */ + case 2: /* TX ok */ + tx_event = 1; + break; + case 3: /* Halt */ + case 4: /* Abort */ + complete(&lp->xceiver_cmd); + break; + default: + pr_notice("%s: strange tx ack %d\n", dev->name, status&7); + } + status>>=3; + switch(status&7) + { + case 0: + break; + case 2: /* RX */ + rx_event=1; + break; + case 3: /* Halt */ + case 4: /* Abort */ + complete(&lp->xceiver_cmd); + break; + case 6: + /* Out of RX buffers stat */ + /* Must restart rx */ + dev->stats.rx_dropped++; + mc32_rx_ring(dev); + mc32_start_transceiver(dev); + break; + default: + pr_notice("%s: strange rx ack %d\n", + dev->name, status&7); + } + status>>=3; + if(status&1) + { + /* + * No thread is waiting: we need to tidy + * up ourself. + */ + + if (lp->cmd_nonblocking) { + up(&lp->cmd_mutex); + if (lp->mc_reload_wait) + mc32_reset_multicast_list(dev); + } + else complete(&lp->execution_cmd); + } + if(status&2) + { + /* + * We get interrupted once per + * counter that is about to overflow. + */ + + mc32_update_stats(dev); + } + } + + + /* + * Process the transmit and receive rings + */ + + if(tx_event) + mc32_tx_ring(dev); + + if(rx_event) + mc32_rx_ring(dev); + + return IRQ_HANDLED; +} + + +/** + * mc32_close - user configuring the 3c527 down + * @dev: 3c527 card to shut down + * + * The 3c527 is a bus mastering device. We must be careful how we + * shut it down. It may also be running shared interrupt so we have + * to be sure to silence it properly + * + * We indicate that the card is closing to the rest of the + * driver. Otherwise, it is possible that the card may run out + * of receive buffers and restart the transceiver while we're + * trying to close it. + * + * We abort any receive and transmits going on and then wait until + * any pending exec commands have completed in other code threads. + * In theory we can't get here while that is true, in practice I am + * paranoid + * + * We turn off the interrupt enable for the board to be sure it can't + * intefere with other devices. + */ + +static int mc32_close(struct net_device *dev) +{ + struct mc32_local *lp = netdev_priv(dev); + int ioaddr = dev->base_addr; + + u8 regs; + u16 one=1; + + lp->xceiver_desired_state = HALTED; + netif_stop_queue(dev); + + /* + * Send the indications on command (handy debug check) + */ + + mc32_command(dev, 4, &one, 2); + + /* Shut down the transceiver */ + + mc32_halt_transceiver(dev); + + /* Ensure we issue no more commands beyond this point */ + + down(&lp->cmd_mutex); + + /* Ok the card is now stopping */ + + regs=inb(ioaddr+HOST_CTRL); + regs&=~HOST_CTRL_INTE; + outb(regs, ioaddr+HOST_CTRL); + + mc32_flush_rx_ring(dev); + mc32_flush_tx_ring(dev); + + mc32_update_stats(dev); + + return 0; +} + + +/** + * mc32_get_stats - hand back stats to network layer + * @dev: The 3c527 card to handle + * + * We've collected all the stats we can in software already. Now + * it's time to update those kept on-card and return the lot. + * + */ + +static struct net_device_stats *mc32_get_stats(struct net_device *dev) +{ + mc32_update_stats(dev); + return &dev->stats; +} + + +/** + * do_mc32_set_multicast_list - attempt to update multicasts + * @dev: 3c527 device to load the list on + * @retry: indicates this is not the first call. + * + * + * Actually set or clear the multicast filter for this adaptor. The + * locking issues are handled by this routine. We have to track + * state as it may take multiple calls to get the command sequence + * completed. We just keep trying to schedule the loads until we + * manage to process them all. + * + * num_addrs == -1 Promiscuous mode, receive all packets + * + * num_addrs == 0 Normal mode, clear multicast list + * + * num_addrs > 0 Multicast mode, receive normal and MC packets, + * and do best-effort filtering. + * + * See mc32_update_stats() regards setting the SAV BP bit. + * + */ + +static void do_mc32_set_multicast_list(struct net_device *dev, int retry) +{ + struct mc32_local *lp = netdev_priv(dev); + u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */ + + if ((dev->flags&IFF_PROMISC) || + (dev->flags&IFF_ALLMULTI) || + netdev_mc_count(dev) > 10) + /* Enable promiscuous mode */ + filt |= 1; + else if (!netdev_mc_empty(dev)) + { + unsigned char block[62]; + unsigned char *bp; + struct netdev_hw_addr *ha; + + if(retry==0) + lp->mc_list_valid = 0; + if(!lp->mc_list_valid) + { + block[1]=0; + block[0]=netdev_mc_count(dev); + bp=block+2; + + netdev_for_each_mc_addr(ha, dev) { + memcpy(bp, ha->addr, 6); + bp+=6; + } + if(mc32_command_nowait(dev, 2, block, + 2+6*netdev_mc_count(dev))==-1) + { + lp->mc_reload_wait = 1; + return; + } + lp->mc_list_valid=1; + } + } + + if(mc32_command_nowait(dev, 0, &filt, 2)==-1) + { + lp->mc_reload_wait = 1; + } + else { + lp->mc_reload_wait = 0; + } +} + + +/** + * mc32_set_multicast_list - queue multicast list update + * @dev: The 3c527 to use + * + * Commence loading the multicast list. This is called when the kernel + * changes the lists. It will override any pending list we are trying to + * load. + */ + +static void mc32_set_multicast_list(struct net_device *dev) +{ + do_mc32_set_multicast_list(dev,0); +} + + +/** + * mc32_reset_multicast_list - reset multicast list + * @dev: The 3c527 to use + * + * Attempt the next step in loading the multicast lists. If this attempt + * fails to complete then it will be scheduled and this function called + * again later from elsewhere. + */ + +static void mc32_reset_multicast_list(struct net_device *dev) +{ + do_mc32_set_multicast_list(dev,1); +} + +static void netdev_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + sprintf(info->bus_info, "MCA 0x%lx", dev->base_addr); +} + +static u32 netdev_get_msglevel(struct net_device *dev) +{ + return mc32_debug; +} + +static void netdev_set_msglevel(struct net_device *dev, u32 level) +{ + mc32_debug = level; +} + +static const struct ethtool_ops netdev_ethtool_ops = { + .get_drvinfo = netdev_get_drvinfo, + .get_msglevel = netdev_get_msglevel, + .set_msglevel = netdev_set_msglevel, +}; + +#ifdef MODULE + +static struct net_device *this_device; + +/** + * init_module - entry point + * + * Probe and locate a 3c527 card. This really should probe and locate + * all the 3c527 cards in the machine not just one of them. Yes you can + * insmod multiple modules for now but it's a hack. + */ + +int __init init_module(void) +{ + this_device = mc32_probe(-1); + if (IS_ERR(this_device)) + return PTR_ERR(this_device); + return 0; +} + +/** + * cleanup_module - free resources for an unload + * + * Unloading time. We release the MCA bus resources and the interrupt + * at which point everything is ready to unload. The card must be stopped + * at this point or we would not have been called. When we unload we + * leave the card stopped but not totally shut down. When the card is + * initialized it must be rebooted or the rings reloaded before any + * transmit operations are allowed to start scribbling into memory. + */ + +void __exit cleanup_module(void) +{ + unregister_netdev(this_device); + cleanup_card(this_device); + free_netdev(this_device); +} + +#endif /* MODULE */ diff --git a/drivers/net/ethernet/i825xx/3c527.h b/drivers/net/ethernet/i825xx/3c527.h new file mode 100644 index 0000000..d693b8d --- /dev/null +++ b/drivers/net/ethernet/i825xx/3c527.h @@ -0,0 +1,81 @@ +/* + * 3COM "EtherLink MC/32" Descriptions + */ + +/* + * Registers + */ + +#define HOST_CMD 0 +#define HOST_CMD_START_RX (1<<3) +#define HOST_CMD_SUSPND_RX (3<<3) +#define HOST_CMD_RESTRT_RX (5<<3) + +#define HOST_CMD_SUSPND_TX 3 +#define HOST_CMD_RESTRT_TX 5 + + +#define HOST_STATUS 2 +#define HOST_STATUS_CRR (1<<6) +#define HOST_STATUS_CWR (1<<5) + + +#define HOST_CTRL 6 +#define HOST_CTRL_ATTN (1<<7) +#define HOST_CTRL_RESET (1<<6) +#define HOST_CTRL_INTE (1<<2) + +#define HOST_RAMPAGE 8 + +#define HALTED 0 +#define RUNNING 1 + +struct mc32_mailbox +{ + u16 mbox; + u16 data[1]; +} __packed; + +struct skb_header +{ + u8 status; + u8 control; + u16 next; /* Do not change! */ + u16 length; + u32 data; +} __packed; + +struct mc32_stats +{ + /* RX Errors */ + u32 rx_crc_errors; + u32 rx_alignment_errors; + u32 rx_overrun_errors; + u32 rx_tooshort_errors; + u32 rx_toolong_errors; + u32 rx_outofresource_errors; + + u32 rx_discarded; /* via card pattern match filter */ + + /* TX Errors */ + u32 tx_max_collisions; + u32 tx_carrier_errors; + u32 tx_underrun_errors; + u32 tx_cts_errors; + u32 tx_timeout_errors; + + /* various cruft */ + u32 dataA[6]; + u16 dataB[5]; + u32 dataC[14]; +} __packed; + +#define STATUS_MASK 0x0F +#define COMPLETED (1<<7) +#define COMPLETED_OK (1<<6) +#define BUFFER_BUSY (1<<5) + +#define CONTROL_EOP (1<<7) /* End Of Packet */ +#define CONTROL_EOL (1<<6) /* End of List */ + +#define MCA_MC32_ID 0x0041 /* Our MCA ident */ diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c new file mode 100644 index 0000000..be1f197 --- /dev/null +++ b/drivers/net/ethernet/i825xx/82596.c @@ -0,0 +1,1632 @@ +/* 82596.c: A generic 82596 ethernet driver for linux. */ +/* + Based on Apricot.c + Written 1994 by Mark Evans. + This driver is for the Apricot 82596 bus-master interface + + Modularised 12/94 Mark Evans + + + Modified to support the 82596 ethernet chips on 680x0 VME boards. + by Richard Hirst + Renamed to be 82596.c + + 980825: Changed to receive directly in to sk_buffs which are + allocated at open() time. Eliminates copy on incoming frames + (small ones are still copied). Shared data now held in a + non-cached page, so we can run on 68060 in copyback mode. + + TBD: + * look at deferring rx frames rather than discarding (as per tulip) + * handle tx ring full as per tulip + * performance test to tune rx_copybreak + + Most of my modifications relate to the braindead big-endian + implementation by Intel. When the i596 is operating in + 'big-endian' mode, it thinks a 32 bit value of 0x12345678 + should be stored as 0x56781234. This is a real pain, when + you have linked lists which are shared by the 680x0 and the + i596. + + Driver skeleton + Written 1993 by Donald Becker. + Copyright 1993 United States Government as represented by the Director, + National Security Agency. This software may only be used and distributed + according to the terms of the GNU General Public License as modified by SRC, + incorporated herein by reference. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403 + + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +static char version[] __initdata = + "82596.c $Revision: 1.5 $\n"; + +#define DRV_NAME "82596" + +/* DEBUG flags + */ + +#define DEB_INIT 0x0001 +#define DEB_PROBE 0x0002 +#define DEB_SERIOUS 0x0004 +#define DEB_ERRORS 0x0008 +#define DEB_MULTI 0x0010 +#define DEB_TDR 0x0020 +#define DEB_OPEN 0x0040 +#define DEB_RESET 0x0080 +#define DEB_ADDCMD 0x0100 +#define DEB_STATUS 0x0200 +#define DEB_STARTTX 0x0400 +#define DEB_RXADDR 0x0800 +#define DEB_TXADDR 0x1000 +#define DEB_RXFRAME 0x2000 +#define DEB_INTS 0x4000 +#define DEB_STRUCT 0x8000 +#define DEB_ANY 0xffff + + +#define DEB(x,y) if (i596_debug & (x)) y + + +#if defined(CONFIG_MVME16x_NET) || defined(CONFIG_MVME16x_NET_MODULE) +#define ENABLE_MVME16x_NET +#endif +#if defined(CONFIG_BVME6000_NET) || defined(CONFIG_BVME6000_NET_MODULE) +#define ENABLE_BVME6000_NET +#endif +#if defined(CONFIG_APRICOT) || defined(CONFIG_APRICOT_MODULE) +#define ENABLE_APRICOT +#endif + +#ifdef ENABLE_MVME16x_NET +#include +#endif +#ifdef ENABLE_BVME6000_NET +#include +#endif + +/* + * Define various macros for Channel Attention, word swapping etc., dependent + * on architecture. MVME and BVME are 680x0 based, otherwise it is Intel. + */ + +#ifdef __mc68000__ +#define WSWAPrfd(x) ((struct i596_rfd *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) +#define WSWAPrbd(x) ((struct i596_rbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) +#define WSWAPiscp(x) ((struct i596_iscp *)(((u32)(x)<<16) | ((((u32)(x)))>>16))) +#define WSWAPscb(x) ((struct i596_scb *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) +#define WSWAPcmd(x) ((struct i596_cmd *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) +#define WSWAPtbd(x) ((struct i596_tbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) +#define WSWAPchar(x) ((char *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) +#define ISCP_BUSY 0x00010000 +#define MACH_IS_APRICOT 0 +#else +#define WSWAPrfd(x) ((struct i596_rfd *)((long)x)) +#define WSWAPrbd(x) ((struct i596_rbd *)((long)x)) +#define WSWAPiscp(x) ((struct i596_iscp *)((long)x)) +#define WSWAPscb(x) ((struct i596_scb *)((long)x)) +#define WSWAPcmd(x) ((struct i596_cmd *)((long)x)) +#define WSWAPtbd(x) ((struct i596_tbd *)((long)x)) +#define WSWAPchar(x) ((char *)((long)x)) +#define ISCP_BUSY 0x0001 +#define MACH_IS_APRICOT 1 +#endif + +/* + * The MPU_PORT command allows direct access to the 82596. With PORT access + * the following commands are available (p5-18). The 32-bit port command + * must be word-swapped with the most significant word written first. + * This only applies to VME boards. + */ +#define PORT_RESET 0x00 /* reset 82596 */ +#define PORT_SELFTEST 0x01 /* selftest */ +#define PORT_ALTSCP 0x02 /* alternate SCB address */ +#define PORT_ALTDUMP 0x03 /* Alternate DUMP address */ + +static int i596_debug = (DEB_SERIOUS|DEB_PROBE); + +MODULE_AUTHOR("Richard Hirst"); +MODULE_DESCRIPTION("i82596 driver"); +MODULE_LICENSE("GPL"); + +module_param(i596_debug, int, 0); +MODULE_PARM_DESC(i596_debug, "i82596 debug mask"); + + +/* Copy frames shorter than rx_copybreak, otherwise pass on up in + * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha). + */ +static int rx_copybreak = 100; + +#define PKT_BUF_SZ 1536 +#define MAX_MC_CNT 64 + +#define I596_TOTAL_SIZE 17 + +#define I596_NULL ((void *)0xffffffff) + +#define CMD_EOL 0x8000 /* The last command of the list, stop. */ +#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */ +#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */ + +#define CMD_FLEX 0x0008 /* Enable flexible memory model */ + +enum commands { + CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3, + CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7 +}; + +#define STAT_C 0x8000 /* Set to 0 after execution */ +#define STAT_B 0x4000 /* Command being executed */ +#define STAT_OK 0x2000 /* Command executed ok */ +#define STAT_A 0x1000 /* Command aborted */ + +#define CUC_START 0x0100 +#define CUC_RESUME 0x0200 +#define CUC_SUSPEND 0x0300 +#define CUC_ABORT 0x0400 +#define RX_START 0x0010 +#define RX_RESUME 0x0020 +#define RX_SUSPEND 0x0030 +#define RX_ABORT 0x0040 + +#define TX_TIMEOUT (HZ/20) + + +struct i596_reg { + unsigned short porthi; + unsigned short portlo; + unsigned long ca; +}; + +#define EOF 0x8000 +#define SIZE_MASK 0x3fff + +struct i596_tbd { + unsigned short size; + unsigned short pad; + struct i596_tbd *next; + char *data; +}; + +/* The command structure has two 'next' pointers; v_next is the address of + * the next command as seen by the CPU, b_next is the address of the next + * command as seen by the 82596. The b_next pointer, as used by the 82596 + * always references the status field of the next command, rather than the + * v_next field, because the 82596 is unaware of v_next. It may seem more + * logical to put v_next at the end of the structure, but we cannot do that + * because the 82596 expects other fields to be there, depending on command + * type. + */ + +struct i596_cmd { + struct i596_cmd *v_next; /* Address from CPUs viewpoint */ + unsigned short status; + unsigned short command; + struct i596_cmd *b_next; /* Address from i596 viewpoint */ +}; + +struct tx_cmd { + struct i596_cmd cmd; + struct i596_tbd *tbd; + unsigned short size; + unsigned short pad; + struct sk_buff *skb; /* So we can free it after tx */ +}; + +struct tdr_cmd { + struct i596_cmd cmd; + unsigned short status; + unsigned short pad; +}; + +struct mc_cmd { + struct i596_cmd cmd; + short mc_cnt; + char mc_addrs[MAX_MC_CNT*6]; +}; + +struct sa_cmd { + struct i596_cmd cmd; + char eth_addr[8]; +}; + +struct cf_cmd { + struct i596_cmd cmd; + char i596_config[16]; +}; + +struct i596_rfd { + unsigned short stat; + unsigned short cmd; + struct i596_rfd *b_next; /* Address from i596 viewpoint */ + struct i596_rbd *rbd; + unsigned short count; + unsigned short size; + struct i596_rfd *v_next; /* Address from CPUs viewpoint */ + struct i596_rfd *v_prev; +}; + +struct i596_rbd { + unsigned short count; + unsigned short zero1; + struct i596_rbd *b_next; + unsigned char *b_data; /* Address from i596 viewpoint */ + unsigned short size; + unsigned short zero2; + struct sk_buff *skb; + struct i596_rbd *v_next; + struct i596_rbd *b_addr; /* This rbd addr from i596 view */ + unsigned char *v_data; /* Address from CPUs viewpoint */ +}; + +#define TX_RING_SIZE 64 +#define RX_RING_SIZE 16 + +struct i596_scb { + unsigned short status; + unsigned short command; + struct i596_cmd *cmd; + struct i596_rfd *rfd; + unsigned long crc_err; + unsigned long align_err; + unsigned long resource_err; + unsigned long over_err; + unsigned long rcvdt_err; + unsigned long short_err; + unsigned short t_on; + unsigned short t_off; +}; + +struct i596_iscp { + unsigned long stat; + struct i596_scb *scb; +}; + +struct i596_scp { + unsigned long sysbus; + unsigned long pad; + struct i596_iscp *iscp; +}; + +struct i596_private { + volatile struct i596_scp scp; + volatile struct i596_iscp iscp; + volatile struct i596_scb scb; + struct sa_cmd sa_cmd; + struct cf_cmd cf_cmd; + struct tdr_cmd tdr_cmd; + struct mc_cmd mc_cmd; + unsigned long stat; + int last_restart __attribute__((aligned(4))); + struct i596_rfd *rfd_head; + struct i596_rbd *rbd_head; + struct i596_cmd *cmd_tail; + struct i596_cmd *cmd_head; + int cmd_backlog; + unsigned long last_cmd; + struct i596_rfd rfds[RX_RING_SIZE]; + struct i596_rbd rbds[RX_RING_SIZE]; + struct tx_cmd tx_cmds[TX_RING_SIZE]; + struct i596_tbd tbds[TX_RING_SIZE]; + int next_tx_cmd; + spinlock_t lock; +}; + +static char init_setup[] = +{ + 0x8E, /* length, prefetch on */ + 0xC8, /* fifo to 8, monitor off */ +#ifdef CONFIG_VME + 0xc0, /* don't save bad frames */ +#else + 0x80, /* don't save bad frames */ +#endif + 0x2E, /* No source address insertion, 8 byte preamble */ + 0x00, /* priority and backoff defaults */ + 0x60, /* interframe spacing */ + 0x00, /* slot time LSB */ + 0xf2, /* slot time and retries */ + 0x00, /* promiscuous mode */ + 0x00, /* collision detect */ + 0x40, /* minimum frame length */ + 0xff, + 0x00, + 0x7f /* *multi IA */ }; + +static int i596_open(struct net_device *dev); +static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev); +static irqreturn_t i596_interrupt(int irq, void *dev_id); +static int i596_close(struct net_device *dev); +static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); +static void i596_tx_timeout (struct net_device *dev); +static void print_eth(unsigned char *buf, char *str); +static void set_multicast_list(struct net_device *dev); + +static int rx_ring_size = RX_RING_SIZE; +static int ticks_limit = 25; +static int max_cmd_backlog = TX_RING_SIZE-1; + + +static inline void CA(struct net_device *dev) +{ +#ifdef ENABLE_MVME16x_NET + if (MACH_IS_MVME16x) { + ((struct i596_reg *) dev->base_addr)->ca = 1; + } +#endif +#ifdef ENABLE_BVME6000_NET + if (MACH_IS_BVME6000) { + volatile u32 i; + + i = *(volatile u32 *) (dev->base_addr); + } +#endif +#ifdef ENABLE_APRICOT + if (MACH_IS_APRICOT) { + outw(0, (short) (dev->base_addr) + 4); + } +#endif +} + + +static inline void MPU_PORT(struct net_device *dev, int c, volatile void *x) +{ +#ifdef ENABLE_MVME16x_NET + if (MACH_IS_MVME16x) { + struct i596_reg *p = (struct i596_reg *) (dev->base_addr); + p->porthi = ((c) | (u32) (x)) & 0xffff; + p->portlo = ((c) | (u32) (x)) >> 16; + } +#endif +#ifdef ENABLE_BVME6000_NET + if (MACH_IS_BVME6000) { + u32 v = (u32) (c) | (u32) (x); + v = ((u32) (v) << 16) | ((u32) (v) >> 16); + *(volatile u32 *) dev->base_addr = v; + udelay(1); + *(volatile u32 *) dev->base_addr = v; + } +#endif +} + + +static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str) +{ + while (--delcnt && lp->iscp.stat) + udelay(10); + if (!delcnt) { + printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n", + dev->name, str, lp->scb.status, lp->scb.command); + return -1; + } + else + return 0; +} + + +static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str) +{ + while (--delcnt && lp->scb.command) + udelay(10); + if (!delcnt) { + printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n", + dev->name, str, lp->scb.status, lp->scb.command); + return -1; + } + else + return 0; +} + + +static inline int wait_cfg(struct net_device *dev, struct i596_cmd *cmd, int delcnt, char *str) +{ + volatile struct i596_cmd *c = cmd; + + while (--delcnt && c->command) + udelay(10); + if (!delcnt) { + printk(KERN_ERR "%s: %s.\n", dev->name, str); + return -1; + } + else + return 0; +} + + +static void i596_display_data(struct net_device *dev) +{ + struct i596_private *lp = dev->ml_priv; + struct i596_cmd *cmd; + struct i596_rfd *rfd; + struct i596_rbd *rbd; + + printk(KERN_ERR "lp and scp at %p, .sysbus = %08lx, .iscp = %p\n", + &lp->scp, lp->scp.sysbus, lp->scp.iscp); + printk(KERN_ERR "iscp at %p, iscp.stat = %08lx, .scb = %p\n", + &lp->iscp, lp->iscp.stat, lp->iscp.scb); + printk(KERN_ERR "scb at %p, scb.status = %04x, .command = %04x," + " .cmd = %p, .rfd = %p\n", + &lp->scb, lp->scb.status, lp->scb.command, + lp->scb.cmd, lp->scb.rfd); + printk(KERN_ERR " errors: crc %lx, align %lx, resource %lx," + " over %lx, rcvdt %lx, short %lx\n", + lp->scb.crc_err, lp->scb.align_err, lp->scb.resource_err, + lp->scb.over_err, lp->scb.rcvdt_err, lp->scb.short_err); + cmd = lp->cmd_head; + while (cmd != I596_NULL) { + printk(KERN_ERR "cmd at %p, .status = %04x, .command = %04x, .b_next = %p\n", + cmd, cmd->status, cmd->command, cmd->b_next); + cmd = cmd->v_next; + } + rfd = lp->rfd_head; + printk(KERN_ERR "rfd_head = %p\n", rfd); + do { + printk(KERN_ERR " %p .stat %04x, .cmd %04x, b_next %p, rbd %p," + " count %04x\n", + rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd, + rfd->count); + rfd = rfd->v_next; + } while (rfd != lp->rfd_head); + rbd = lp->rbd_head; + printk(KERN_ERR "rbd_head = %p\n", rbd); + do { + printk(KERN_ERR " %p .count %04x, b_next %p, b_data %p, size %04x\n", + rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size); + rbd = rbd->v_next; + } while (rbd != lp->rbd_head); +} + + +#if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET) +static irqreturn_t i596_error(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; +#ifdef ENABLE_MVME16x_NET + if (MACH_IS_MVME16x) { + volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000; + + pcc2[0x28] = 1; + pcc2[0x2b] = 0x1d; + } +#endif +#ifdef ENABLE_BVME6000_NET + if (MACH_IS_BVME6000) { + volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG; + + *ethirq = 1; + *ethirq = 3; + } +#endif + printk(KERN_ERR "%s: Error interrupt\n", dev->name); + i596_display_data(dev); + return IRQ_HANDLED; +} +#endif + +static inline void remove_rx_bufs(struct net_device *dev) +{ + struct i596_private *lp = dev->ml_priv; + struct i596_rbd *rbd; + int i; + + for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) { + if (rbd->skb == NULL) + break; + dev_kfree_skb(rbd->skb); + rbd->skb = NULL; + } +} + +static inline int init_rx_bufs(struct net_device *dev) +{ + struct i596_private *lp = dev->ml_priv; + int i; + struct i596_rfd *rfd; + struct i596_rbd *rbd; + + /* First build the Receive Buffer Descriptor List */ + + for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) { + struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ); + + if (skb == NULL) { + remove_rx_bufs(dev); + return -ENOMEM; + } + + skb->dev = dev; + rbd->v_next = rbd+1; + rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1)); + rbd->b_addr = WSWAPrbd(virt_to_bus(rbd)); + rbd->skb = skb; + rbd->v_data = skb->data; + rbd->b_data = WSWAPchar(virt_to_bus(skb->data)); + rbd->size = PKT_BUF_SZ; +#ifdef __mc68000__ + cache_clear(virt_to_phys(skb->data), PKT_BUF_SZ); +#endif + } + lp->rbd_head = lp->rbds; + rbd = lp->rbds + rx_ring_size - 1; + rbd->v_next = lp->rbds; + rbd->b_next = WSWAPrbd(virt_to_bus(lp->rbds)); + + /* Now build the Receive Frame Descriptor List */ + + for (i = 0, rfd = lp->rfds; i < rx_ring_size; i++, rfd++) { + rfd->rbd = I596_NULL; + rfd->v_next = rfd+1; + rfd->v_prev = rfd-1; + rfd->b_next = WSWAPrfd(virt_to_bus(rfd+1)); + rfd->cmd = CMD_FLEX; + } + lp->rfd_head = lp->rfds; + lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds)); + rfd = lp->rfds; + rfd->rbd = lp->rbd_head; + rfd->v_prev = lp->rfds + rx_ring_size - 1; + rfd = lp->rfds + rx_ring_size - 1; + rfd->v_next = lp->rfds; + rfd->b_next = WSWAPrfd(virt_to_bus(lp->rfds)); + rfd->cmd = CMD_EOL|CMD_FLEX; + + return 0; +} + + +static void rebuild_rx_bufs(struct net_device *dev) +{ + struct i596_private *lp = dev->ml_priv; + int i; + + /* Ensure rx frame/buffer descriptors are tidy */ + + for (i = 0; i < rx_ring_size; i++) { + lp->rfds[i].rbd = I596_NULL; + lp->rfds[i].cmd = CMD_FLEX; + } + lp->rfds[rx_ring_size-1].cmd = CMD_EOL|CMD_FLEX; + lp->rfd_head = lp->rfds; + lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds)); + lp->rbd_head = lp->rbds; + lp->rfds[0].rbd = WSWAPrbd(virt_to_bus(lp->rbds)); +} + + +static int init_i596_mem(struct net_device *dev) +{ + struct i596_private *lp = dev->ml_priv; +#if !defined(ENABLE_MVME16x_NET) && !defined(ENABLE_BVME6000_NET) || defined(ENABLE_APRICOT) + short ioaddr = dev->base_addr; +#endif + unsigned long flags; + + MPU_PORT(dev, PORT_RESET, NULL); + + udelay(100); /* Wait 100us - seems to help */ + +#if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET) +#ifdef ENABLE_MVME16x_NET + if (MACH_IS_MVME16x) { + volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000; + + /* Disable all ints for now */ + pcc2[0x28] = 1; + pcc2[0x2a] = 0x48; + /* Following disables snooping. Snooping is not required + * as we make appropriate use of non-cached pages for + * shared data, and cache_push/cache_clear. + */ + pcc2[0x2b] = 0x08; + } +#endif +#ifdef ENABLE_BVME6000_NET + if (MACH_IS_BVME6000) { + volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG; + + *ethirq = 1; + } +#endif + + /* change the scp address */ + + MPU_PORT(dev, PORT_ALTSCP, (void *)virt_to_bus((void *)&lp->scp)); + +#elif defined(ENABLE_APRICOT) + + { + u32 scp = virt_to_bus(&lp->scp); + + /* change the scp address */ + outw(0, ioaddr); + outw(0, ioaddr); + outb(4, ioaddr + 0xf); + outw(scp | 2, ioaddr); + outw(scp >> 16, ioaddr); + } +#endif + + lp->last_cmd = jiffies; + +#ifdef ENABLE_MVME16x_NET + if (MACH_IS_MVME16x) + lp->scp.sysbus = 0x00000054; +#endif +#ifdef ENABLE_BVME6000_NET + if (MACH_IS_BVME6000) + lp->scp.sysbus = 0x0000004c; +#endif +#ifdef ENABLE_APRICOT + if (MACH_IS_APRICOT) + lp->scp.sysbus = 0x00440000; +#endif + + lp->scp.iscp = WSWAPiscp(virt_to_bus((void *)&lp->iscp)); + lp->iscp.scb = WSWAPscb(virt_to_bus((void *)&lp->scb)); + lp->iscp.stat = ISCP_BUSY; + lp->cmd_backlog = 0; + + lp->cmd_head = lp->scb.cmd = I596_NULL; + +#ifdef ENABLE_BVME6000_NET + if (MACH_IS_BVME6000) { + lp->scb.t_on = 7 * 25; + lp->scb.t_off = 1 * 25; + } +#endif + + DEB(DEB_INIT,printk(KERN_DEBUG "%s: starting i82596.\n", dev->name)); + +#if defined(ENABLE_APRICOT) + (void) inb(ioaddr + 0x10); + outb(4, ioaddr + 0xf); +#endif + CA(dev); + + if (wait_istat(dev,lp,1000,"initialization timed out")) + goto failed; + DEB(DEB_INIT,printk(KERN_DEBUG "%s: i82596 initialization successful\n", dev->name)); + + /* Ensure rx frame/buffer descriptors are tidy */ + rebuild_rx_bufs(dev); + lp->scb.command = 0; + +#ifdef ENABLE_MVME16x_NET + if (MACH_IS_MVME16x) { + volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000; + + /* Enable ints, etc. now */ + pcc2[0x2a] = 0x55; /* Edge sensitive */ + pcc2[0x2b] = 0x15; + } +#endif +#ifdef ENABLE_BVME6000_NET + if (MACH_IS_BVME6000) { + volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG; + + *ethirq = 3; + } +#endif + + + DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdConfigure\n", dev->name)); + memcpy(lp->cf_cmd.i596_config, init_setup, 14); + lp->cf_cmd.cmd.command = CmdConfigure; + i596_add_cmd(dev, &lp->cf_cmd.cmd); + + DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name)); + memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6); + lp->sa_cmd.cmd.command = CmdSASetup; + i596_add_cmd(dev, &lp->sa_cmd.cmd); + + DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name)); + lp->tdr_cmd.cmd.command = CmdTDR; + i596_add_cmd(dev, &lp->tdr_cmd.cmd); + + spin_lock_irqsave (&lp->lock, flags); + + if (wait_cmd(dev,lp,1000,"timed out waiting to issue RX_START")) { + spin_unlock_irqrestore (&lp->lock, flags); + goto failed; + } + DEB(DEB_INIT,printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name)); + lp->scb.command = RX_START; + CA(dev); + + spin_unlock_irqrestore (&lp->lock, flags); + + if (wait_cmd(dev,lp,1000,"RX_START not processed")) + goto failed; + DEB(DEB_INIT,printk(KERN_DEBUG "%s: Receive unit started OK\n", dev->name)); + return 0; + +failed: + printk(KERN_CRIT "%s: Failed to initialise 82596\n", dev->name); + MPU_PORT(dev, PORT_RESET, NULL); + return -1; +} + +static inline int i596_rx(struct net_device *dev) +{ + struct i596_private *lp = dev->ml_priv; + struct i596_rfd *rfd; + struct i596_rbd *rbd; + int frames = 0; + + DEB(DEB_RXFRAME,printk(KERN_DEBUG "i596_rx(), rfd_head %p, rbd_head %p\n", + lp->rfd_head, lp->rbd_head)); + + rfd = lp->rfd_head; /* Ref next frame to check */ + + while ((rfd->stat) & STAT_C) { /* Loop while complete frames */ + if (rfd->rbd == I596_NULL) + rbd = I596_NULL; + else if (rfd->rbd == lp->rbd_head->b_addr) + rbd = lp->rbd_head; + else { + printk(KERN_CRIT "%s: rbd chain broken!\n", dev->name); + /* XXX Now what? */ + rbd = I596_NULL; + } + DEB(DEB_RXFRAME, printk(KERN_DEBUG " rfd %p, rfd.rbd %p, rfd.stat %04x\n", + rfd, rfd->rbd, rfd->stat)); + + if (rbd != I596_NULL && ((rfd->stat) & STAT_OK)) { + /* a good frame */ + int pkt_len = rbd->count & 0x3fff; + struct sk_buff *skb = rbd->skb; + int rx_in_place = 0; + + DEB(DEB_RXADDR,print_eth(rbd->v_data, "received")); + frames++; + + /* Check if the packet is long enough to just accept + * without copying to a properly sized skbuff. + */ + + if (pkt_len > rx_copybreak) { + struct sk_buff *newskb; + + /* Get fresh skbuff to replace filled one. */ + newskb = dev_alloc_skb(PKT_BUF_SZ); + if (newskb == NULL) { + skb = NULL; /* drop pkt */ + goto memory_squeeze; + } + /* Pass up the skb already on the Rx ring. */ + skb_put(skb, pkt_len); + rx_in_place = 1; + rbd->skb = newskb; + newskb->dev = dev; + rbd->v_data = newskb->data; + rbd->b_data = WSWAPchar(virt_to_bus(newskb->data)); +#ifdef __mc68000__ + cache_clear(virt_to_phys(newskb->data), PKT_BUF_SZ); +#endif + } + else + skb = dev_alloc_skb(pkt_len + 2); +memory_squeeze: + if (skb == NULL) { + /* XXX tulip.c can defer packets here!! */ + printk(KERN_WARNING "%s: i596_rx Memory squeeze, dropping packet.\n", dev->name); + dev->stats.rx_dropped++; + } + else { + if (!rx_in_place) { + /* 16 byte align the data fields */ + skb_reserve(skb, 2); + memcpy(skb_put(skb,pkt_len), rbd->v_data, pkt_len); + } + skb->protocol=eth_type_trans(skb,dev); + skb->len = pkt_len; +#ifdef __mc68000__ + cache_clear(virt_to_phys(rbd->skb->data), + pkt_len); +#endif + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes+=pkt_len; + } + } + else { + DEB(DEB_ERRORS, printk(KERN_DEBUG "%s: Error, rfd.stat = 0x%04x\n", + dev->name, rfd->stat)); + dev->stats.rx_errors++; + if ((rfd->stat) & 0x0001) + dev->stats.collisions++; + if ((rfd->stat) & 0x0080) + dev->stats.rx_length_errors++; + if ((rfd->stat) & 0x0100) + dev->stats.rx_over_errors++; + if ((rfd->stat) & 0x0200) + dev->stats.rx_fifo_errors++; + if ((rfd->stat) & 0x0400) + dev->stats.rx_frame_errors++; + if ((rfd->stat) & 0x0800) + dev->stats.rx_crc_errors++; + if ((rfd->stat) & 0x1000) + dev->stats.rx_length_errors++; + } + + /* Clear the buffer descriptor count and EOF + F flags */ + + if (rbd != I596_NULL && (rbd->count & 0x4000)) { + rbd->count = 0; + lp->rbd_head = rbd->v_next; + } + + /* Tidy the frame descriptor, marking it as end of list */ + + rfd->rbd = I596_NULL; + rfd->stat = 0; + rfd->cmd = CMD_EOL|CMD_FLEX; + rfd->count = 0; + + /* Remove end-of-list from old end descriptor */ + + rfd->v_prev->cmd = CMD_FLEX; + + /* Update record of next frame descriptor to process */ + + lp->scb.rfd = rfd->b_next; + lp->rfd_head = rfd->v_next; + rfd = lp->rfd_head; + } + + DEB(DEB_RXFRAME,printk(KERN_DEBUG "frames %d\n", frames)); + + return 0; +} + + +static void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp) +{ + struct i596_cmd *ptr; + + while (lp->cmd_head != I596_NULL) { + ptr = lp->cmd_head; + lp->cmd_head = ptr->v_next; + lp->cmd_backlog--; + + switch ((ptr->command) & 0x7) { + case CmdTx: + { + struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr; + struct sk_buff *skb = tx_cmd->skb; + + dev_kfree_skb(skb); + + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; + + ptr->v_next = ptr->b_next = I596_NULL; + tx_cmd->cmd.command = 0; /* Mark as free */ + break; + } + default: + ptr->v_next = ptr->b_next = I596_NULL; + } + } + + wait_cmd(dev,lp,100,"i596_cleanup_cmd timed out"); + lp->scb.cmd = I596_NULL; +} + +static void i596_reset(struct net_device *dev, struct i596_private *lp, + int ioaddr) +{ + unsigned long flags; + + DEB(DEB_RESET,printk(KERN_DEBUG "i596_reset\n")); + + spin_lock_irqsave (&lp->lock, flags); + + wait_cmd(dev,lp,100,"i596_reset timed out"); + + netif_stop_queue(dev); + + lp->scb.command = CUC_ABORT | RX_ABORT; + CA(dev); + + /* wait for shutdown */ + wait_cmd(dev,lp,1000,"i596_reset 2 timed out"); + spin_unlock_irqrestore (&lp->lock, flags); + + i596_cleanup_cmd(dev,lp); + i596_rx(dev); + + netif_start_queue(dev); + init_i596_mem(dev); +} + +static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd) +{ + struct i596_private *lp = dev->ml_priv; + int ioaddr = dev->base_addr; + unsigned long flags; + + DEB(DEB_ADDCMD,printk(KERN_DEBUG "i596_add_cmd\n")); + + cmd->status = 0; + cmd->command |= (CMD_EOL | CMD_INTR); + cmd->v_next = cmd->b_next = I596_NULL; + + spin_lock_irqsave (&lp->lock, flags); + + if (lp->cmd_head != I596_NULL) { + lp->cmd_tail->v_next = cmd; + lp->cmd_tail->b_next = WSWAPcmd(virt_to_bus(&cmd->status)); + } else { + lp->cmd_head = cmd; + wait_cmd(dev,lp,100,"i596_add_cmd timed out"); + lp->scb.cmd = WSWAPcmd(virt_to_bus(&cmd->status)); + lp->scb.command = CUC_START; + CA(dev); + } + lp->cmd_tail = cmd; + lp->cmd_backlog++; + + spin_unlock_irqrestore (&lp->lock, flags); + + if (lp->cmd_backlog > max_cmd_backlog) { + unsigned long tickssofar = jiffies - lp->last_cmd; + + if (tickssofar < ticks_limit) + return; + + printk(KERN_NOTICE "%s: command unit timed out, status resetting.\n", dev->name); + + i596_reset(dev, lp, ioaddr); + } +} + +static int i596_open(struct net_device *dev) +{ + int res = 0; + + DEB(DEB_OPEN,printk(KERN_DEBUG "%s: i596_open() irq %d.\n", dev->name, dev->irq)); + + if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) { + printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq); + return -EAGAIN; + } +#ifdef ENABLE_MVME16x_NET + if (MACH_IS_MVME16x) { + if (request_irq(0x56, i596_error, 0, "i82596_error", dev)) { + res = -EAGAIN; + goto err_irq_dev; + } + } +#endif + res = init_rx_bufs(dev); + if (res) + goto err_irq_56; + + netif_start_queue(dev); + + if (init_i596_mem(dev)) { + res = -EAGAIN; + goto err_queue; + } + + return 0; + +err_queue: + netif_stop_queue(dev); + remove_rx_bufs(dev); +err_irq_56: +#ifdef ENABLE_MVME16x_NET + free_irq(0x56, dev); +err_irq_dev: +#endif + free_irq(dev->irq, dev); + + return res; +} + +static void i596_tx_timeout (struct net_device *dev) +{ + struct i596_private *lp = dev->ml_priv; + int ioaddr = dev->base_addr; + + /* Transmitter timeout, serious problems. */ + DEB(DEB_ERRORS,printk(KERN_ERR "%s: transmit timed out, status resetting.\n", + dev->name)); + + dev->stats.tx_errors++; + + /* Try to restart the adaptor */ + if (lp->last_restart == dev->stats.tx_packets) { + DEB(DEB_ERRORS,printk(KERN_ERR "Resetting board.\n")); + /* Shutdown and restart */ + i596_reset (dev, lp, ioaddr); + } else { + /* Issue a channel attention signal */ + DEB(DEB_ERRORS,printk(KERN_ERR "Kicking board.\n")); + lp->scb.command = CUC_START | RX_START; + CA (dev); + lp->last_restart = dev->stats.tx_packets; + } + + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue (dev); +} + +static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct i596_private *lp = dev->ml_priv; + struct tx_cmd *tx_cmd; + struct i596_tbd *tbd; + short length = skb->len; + + DEB(DEB_STARTTX,printk(KERN_DEBUG "%s: i596_start_xmit(%x,%p) called\n", + dev->name, skb->len, skb->data)); + + if (skb->len < ETH_ZLEN) { + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + length = ETH_ZLEN; + } + netif_stop_queue(dev); + + tx_cmd = lp->tx_cmds + lp->next_tx_cmd; + tbd = lp->tbds + lp->next_tx_cmd; + + if (tx_cmd->cmd.command) { + printk(KERN_NOTICE "%s: xmit ring full, dropping packet.\n", + dev->name); + dev->stats.tx_dropped++; + + dev_kfree_skb(skb); + } else { + if (++lp->next_tx_cmd == TX_RING_SIZE) + lp->next_tx_cmd = 0; + tx_cmd->tbd = WSWAPtbd(virt_to_bus(tbd)); + tbd->next = I596_NULL; + + tx_cmd->cmd.command = CMD_FLEX | CmdTx; + tx_cmd->skb = skb; + + tx_cmd->pad = 0; + tx_cmd->size = 0; + tbd->pad = 0; + tbd->size = EOF | length; + + tbd->data = WSWAPchar(virt_to_bus(skb->data)); + +#ifdef __mc68000__ + cache_push(virt_to_phys(skb->data), length); +#endif + DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued")); + i596_add_cmd(dev, &tx_cmd->cmd); + + dev->stats.tx_packets++; + dev->stats.tx_bytes += length; + } + + netif_start_queue(dev); + + return NETDEV_TX_OK; +} + +static void print_eth(unsigned char *add, char *str) +{ + printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n", + add, add + 6, add, add[12], add[13], str); +} + +static int io = 0x300; +static int irq = 10; + +static const struct net_device_ops i596_netdev_ops = { + .ndo_open = i596_open, + .ndo_stop = i596_close, + .ndo_start_xmit = i596_start_xmit, + .ndo_set_multicast_list = set_multicast_list, + .ndo_tx_timeout = i596_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +struct net_device * __init i82596_probe(int unit) +{ + struct net_device *dev; + int i; + struct i596_private *lp; + char eth_addr[8]; + static int probed; + int err; + + if (probed) + return ERR_PTR(-ENODEV); + probed++; + + dev = alloc_etherdev(0); + if (!dev) + return ERR_PTR(-ENOMEM); + + if (unit >= 0) { + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + } else { + dev->base_addr = io; + dev->irq = irq; + } + +#ifdef ENABLE_MVME16x_NET + if (MACH_IS_MVME16x) { + if (mvme16x_config & MVME16x_CONFIG_NO_ETHERNET) { + printk(KERN_NOTICE "Ethernet probe disabled - chip not present\n"); + err = -ENODEV; + goto out; + } + memcpy(eth_addr, (void *) 0xfffc1f2c, 6); /* YUCK! Get addr from NOVRAM */ + dev->base_addr = MVME_I596_BASE; + dev->irq = (unsigned) MVME16x_IRQ_I596; + goto found; + } +#endif +#ifdef ENABLE_BVME6000_NET + if (MACH_IS_BVME6000) { + volatile unsigned char *rtc = (unsigned char *) BVME_RTC_BASE; + unsigned char msr = rtc[3]; + int i; + + rtc[3] |= 0x80; + for (i = 0; i < 6; i++) + eth_addr[i] = rtc[i * 4 + 7]; /* Stored in RTC RAM at offset 1 */ + rtc[3] = msr; + dev->base_addr = BVME_I596_BASE; + dev->irq = (unsigned) BVME_IRQ_I596; + goto found; + } +#endif +#ifdef ENABLE_APRICOT + { + int checksum = 0; + int ioaddr = 0x300; + + /* this is easy the ethernet interface can only be at 0x300 */ + /* first check nothing is already registered here */ + + if (!request_region(ioaddr, I596_TOTAL_SIZE, DRV_NAME)) { + printk(KERN_ERR "82596: IO address 0x%04x in use\n", ioaddr); + err = -EBUSY; + goto out; + } + + dev->base_addr = ioaddr; + + for (i = 0; i < 8; i++) { + eth_addr[i] = inb(ioaddr + 8 + i); + checksum += eth_addr[i]; + } + + /* checksum is a multiple of 0x100, got this wrong first time + some machines have 0x100, some 0x200. The DOS driver doesn't + even bother with the checksum. + Some other boards trip the checksum.. but then appear as + ether address 0. Trap these - AC */ + + if ((checksum % 0x100) || + (memcmp(eth_addr, "\x00\x00\x49", 3) != 0)) { + err = -ENODEV; + goto out1; + } + + dev->irq = 10; + goto found; + } +#endif + err = -ENODEV; + goto out; + +found: + dev->mem_start = (int)__get_free_pages(GFP_ATOMIC, 0); + if (!dev->mem_start) { + err = -ENOMEM; + goto out1; + } + + DEB(DEB_PROBE,printk(KERN_INFO "%s: 82596 at %#3lx,", dev->name, dev->base_addr)); + + for (i = 0; i < 6; i++) + DEB(DEB_PROBE,printk(" %2.2X", dev->dev_addr[i] = eth_addr[i])); + + DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq)); + + DEB(DEB_PROBE,printk(KERN_INFO "%s", version)); + + /* The 82596-specific entries in the device structure. */ + dev->netdev_ops = &i596_netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + + dev->ml_priv = (void *)(dev->mem_start); + + lp = dev->ml_priv; + DEB(DEB_INIT,printk(KERN_DEBUG "%s: lp at 0x%08lx (%zd bytes), " + "lp->scb at 0x%08lx\n", + dev->name, (unsigned long)lp, + sizeof(struct i596_private), (unsigned long)&lp->scb)); + memset((void *) lp, 0, sizeof(struct i596_private)); + +#ifdef __mc68000__ + cache_push(virt_to_phys((void *)(dev->mem_start)), 4096); + cache_clear(virt_to_phys((void *)(dev->mem_start)), 4096); + kernel_set_cachemode((void *)(dev->mem_start), 4096, IOMAP_NOCACHE_SER); +#endif + lp->scb.command = 0; + lp->scb.cmd = I596_NULL; + lp->scb.rfd = I596_NULL; + spin_lock_init(&lp->lock); + + err = register_netdev(dev); + if (err) + goto out2; + return dev; +out2: +#ifdef __mc68000__ + /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING, + * XXX which may be invalid (CONFIG_060_WRITETHROUGH) + */ + kernel_set_cachemode((void *)(dev->mem_start), 4096, + IOMAP_FULL_CACHING); +#endif + free_page ((u32)(dev->mem_start)); +out1: +#ifdef ENABLE_APRICOT + release_region(dev->base_addr, I596_TOTAL_SIZE); +#endif +out: + free_netdev(dev); + return ERR_PTR(err); +} + +static irqreturn_t i596_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct i596_private *lp; + short ioaddr; + unsigned short status, ack_cmd = 0; + int handled = 0; + +#ifdef ENABLE_BVME6000_NET + if (MACH_IS_BVME6000) { + if (*(char *) BVME_LOCAL_IRQ_STAT & BVME_ETHERR) { + i596_error(irq, dev_id); + return IRQ_HANDLED; + } + } +#endif + if (dev == NULL) { + printk(KERN_ERR "i596_interrupt(): irq %d for unknown device.\n", irq); + return IRQ_NONE; + } + + ioaddr = dev->base_addr; + lp = dev->ml_priv; + + spin_lock (&lp->lock); + + wait_cmd(dev,lp,100,"i596 interrupt, timeout"); + status = lp->scb.status; + + DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt, IRQ %d, status %4.4x.\n", + dev->name, irq, status)); + + ack_cmd = status & 0xf000; + + if ((status & 0x8000) || (status & 0x2000)) { + struct i596_cmd *ptr; + + handled = 1; + if ((status & 0x8000)) + DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt completed command.\n", dev->name)); + if ((status & 0x2000)) + DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700)); + + while ((lp->cmd_head != I596_NULL) && (lp->cmd_head->status & STAT_C)) { + ptr = lp->cmd_head; + + DEB(DEB_STATUS,printk(KERN_DEBUG "cmd_head->status = %04x, ->command = %04x\n", + lp->cmd_head->status, lp->cmd_head->command)); + lp->cmd_head = ptr->v_next; + lp->cmd_backlog--; + + switch ((ptr->command) & 0x7) { + case CmdTx: + { + struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr; + struct sk_buff *skb = tx_cmd->skb; + + if ((ptr->status) & STAT_OK) { + DEB(DEB_TXADDR,print_eth(skb->data, "tx-done")); + } else { + dev->stats.tx_errors++; + if ((ptr->status) & 0x0020) + dev->stats.collisions++; + if (!((ptr->status) & 0x0040)) + dev->stats.tx_heartbeat_errors++; + if ((ptr->status) & 0x0400) + dev->stats.tx_carrier_errors++; + if ((ptr->status) & 0x0800) + dev->stats.collisions++; + if ((ptr->status) & 0x1000) + dev->stats.tx_aborted_errors++; + } + + dev_kfree_skb_irq(skb); + + tx_cmd->cmd.command = 0; /* Mark free */ + break; + } + case CmdTDR: + { + unsigned short status = ((struct tdr_cmd *)ptr)->status; + + if (status & 0x8000) { + DEB(DEB_TDR,printk(KERN_INFO "%s: link ok.\n", dev->name)); + } else { + if (status & 0x4000) + printk(KERN_ERR "%s: Transceiver problem.\n", dev->name); + if (status & 0x2000) + printk(KERN_ERR "%s: Termination problem.\n", dev->name); + if (status & 0x1000) + printk(KERN_ERR "%s: Short circuit.\n", dev->name); + + DEB(DEB_TDR,printk(KERN_INFO "%s: Time %d.\n", dev->name, status & 0x07ff)); + } + break; + } + case CmdConfigure: + case CmdMulticastList: + /* Zap command so set_multicast_list() knows it is free */ + ptr->command = 0; + break; + } + ptr->v_next = ptr->b_next = I596_NULL; + lp->last_cmd = jiffies; + } + + ptr = lp->cmd_head; + while ((ptr != I596_NULL) && (ptr != lp->cmd_tail)) { + ptr->command &= 0x1fff; + ptr = ptr->v_next; + } + + if ((lp->cmd_head != I596_NULL)) + ack_cmd |= CUC_START; + lp->scb.cmd = WSWAPcmd(virt_to_bus(&lp->cmd_head->status)); + } + if ((status & 0x1000) || (status & 0x4000)) { + if ((status & 0x4000)) + DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt received a frame.\n", dev->name)); + i596_rx(dev); + /* Only RX_START if stopped - RGH 07-07-96 */ + if (status & 0x1000) { + if (netif_running(dev)) { + DEB(DEB_ERRORS,printk(KERN_ERR "%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status)); + ack_cmd |= RX_START; + dev->stats.rx_errors++; + dev->stats.rx_fifo_errors++; + rebuild_rx_bufs(dev); + } + } + } + wait_cmd(dev,lp,100,"i596 interrupt, timeout"); + lp->scb.command = ack_cmd; + +#ifdef ENABLE_MVME16x_NET + if (MACH_IS_MVME16x) { + /* Ack the interrupt */ + + volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000; + + pcc2[0x2a] |= 0x08; + } +#endif +#ifdef ENABLE_BVME6000_NET + if (MACH_IS_BVME6000) { + volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG; + + *ethirq = 1; + *ethirq = 3; + } +#endif +#ifdef ENABLE_APRICOT + (void) inb(ioaddr + 0x10); + outb(4, ioaddr + 0xf); +#endif + CA(dev); + + DEB(DEB_INTS,printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name)); + + spin_unlock (&lp->lock); + return IRQ_RETVAL(handled); +} + +static int i596_close(struct net_device *dev) +{ + struct i596_private *lp = dev->ml_priv; + unsigned long flags; + + netif_stop_queue(dev); + + DEB(DEB_INIT,printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n", + dev->name, lp->scb.status)); + + spin_lock_irqsave(&lp->lock, flags); + + wait_cmd(dev,lp,100,"close1 timed out"); + lp->scb.command = CUC_ABORT | RX_ABORT; + CA(dev); + + wait_cmd(dev,lp,100,"close2 timed out"); + + spin_unlock_irqrestore(&lp->lock, flags); + DEB(DEB_STRUCT,i596_display_data(dev)); + i596_cleanup_cmd(dev,lp); + +#ifdef ENABLE_MVME16x_NET + if (MACH_IS_MVME16x) { + volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000; + + /* Disable all ints */ + pcc2[0x28] = 1; + pcc2[0x2a] = 0x40; + pcc2[0x2b] = 0x40; /* Set snooping bits now! */ + } +#endif +#ifdef ENABLE_BVME6000_NET + if (MACH_IS_BVME6000) { + volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG; + + *ethirq = 1; + } +#endif + +#ifdef ENABLE_MVME16x_NET + free_irq(0x56, dev); +#endif + free_irq(dev->irq, dev); + remove_rx_bufs(dev); + + return 0; +} + +/* + * Set or clear the multicast filter for this adaptor. + */ + +static void set_multicast_list(struct net_device *dev) +{ + struct i596_private *lp = dev->ml_priv; + int config = 0, cnt; + + DEB(DEB_MULTI,printk(KERN_DEBUG "%s: set multicast list, %d entries, promisc %s, allmulti %s\n", + dev->name, netdev_mc_count(dev), + dev->flags & IFF_PROMISC ? "ON" : "OFF", + dev->flags & IFF_ALLMULTI ? "ON" : "OFF")); + + if (wait_cfg(dev, &lp->cf_cmd.cmd, 1000, "config change request timed out")) + return; + + if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) { + lp->cf_cmd.i596_config[8] |= 0x01; + config = 1; + } + if (!(dev->flags & IFF_PROMISC) && (lp->cf_cmd.i596_config[8] & 0x01)) { + lp->cf_cmd.i596_config[8] &= ~0x01; + config = 1; + } + if ((dev->flags & IFF_ALLMULTI) && (lp->cf_cmd.i596_config[11] & 0x20)) { + lp->cf_cmd.i596_config[11] &= ~0x20; + config = 1; + } + if (!(dev->flags & IFF_ALLMULTI) && !(lp->cf_cmd.i596_config[11] & 0x20)) { + lp->cf_cmd.i596_config[11] |= 0x20; + config = 1; + } + if (config) { + lp->cf_cmd.cmd.command = CmdConfigure; + i596_add_cmd(dev, &lp->cf_cmd.cmd); + } + + cnt = netdev_mc_count(dev); + if (cnt > MAX_MC_CNT) + { + cnt = MAX_MC_CNT; + printk(KERN_ERR "%s: Only %d multicast addresses supported", + dev->name, cnt); + } + + if (!netdev_mc_empty(dev)) { + struct netdev_hw_addr *ha; + unsigned char *cp; + struct mc_cmd *cmd; + + if (wait_cfg(dev, &lp->mc_cmd.cmd, 1000, "multicast list change request timed out")) + return; + cmd = &lp->mc_cmd; + cmd->cmd.command = CmdMulticastList; + cmd->mc_cnt = cnt * ETH_ALEN; + cp = cmd->mc_addrs; + netdev_for_each_mc_addr(ha, dev) { + if (!cnt--) + break; + memcpy(cp, ha->addr, ETH_ALEN); + if (i596_debug > 1) + DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %pM\n", + dev->name, cp)); + cp += ETH_ALEN; + } + i596_add_cmd(dev, &cmd->cmd); + } +} + +#ifdef MODULE +static struct net_device *dev_82596; + +#ifdef ENABLE_APRICOT +module_param(irq, int, 0); +MODULE_PARM_DESC(irq, "Apricot IRQ number"); +#endif + +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "i82596 debug mask"); + +int __init init_module(void) +{ + if (debug >= 0) + i596_debug = debug; + dev_82596 = i82596_probe(-1); + if (IS_ERR(dev_82596)) + return PTR_ERR(dev_82596); + return 0; +} + +void __exit cleanup_module(void) +{ + unregister_netdev(dev_82596); +#ifdef __mc68000__ + /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING, + * XXX which may be invalid (CONFIG_060_WRITETHROUGH) + */ + + kernel_set_cachemode((void *)(dev_82596->mem_start), 4096, + IOMAP_FULL_CACHING); +#endif + free_page ((u32)(dev_82596->mem_start)); +#ifdef ENABLE_APRICOT + /* If we don't do this, we can't re-insmod it later. */ + release_region(dev_82596->base_addr, I596_TOTAL_SIZE); +#endif + free_netdev(dev_82596); +} + +#endif /* MODULE */ diff --git a/drivers/net/ethernet/i825xx/Kconfig b/drivers/net/ethernet/i825xx/Kconfig new file mode 100644 index 0000000..5c30a5b --- /dev/null +++ b/drivers/net/ethernet/i825xx/Kconfig @@ -0,0 +1,182 @@ +# +# Intel 82596/82593/82596 network device configuration +# + +config NET_VENDOR_I825XX + bool "Intel (82586/82593/82596) devices" + depends on NET_VENDOR_INTEL && (ISA || ISA_DMA_API || ARM || \ + ARCH_ACORN || MCA || MCA_LEGACY || SNI_RM || SUN3 || \ + GSC || BVME6000 || MVME16x || EXPERIMENTAL) + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question does not directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about these devices. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_I825XX + +config ELPLUS + tristate "3c505 \"EtherLink Plus\" support" + depends on ISA && ISA_DMA_API + ---help--- + Information about this network (Ethernet) card can be found in + . If you have a card of + this type, say Y and read the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called 3c505. + +config EL16 + tristate "3c507 \"EtherLink 16\" support (EXPERIMENTAL)" + depends on ISA && EXPERIMENTAL + ---help--- + If you have a network (Ethernet) card of this type, say Y and read + the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called 3c507. + +config ELMC + tristate "3c523 \"EtherLink/MC\" support" + depends on MCA_LEGACY + ---help--- + If you have a network (Ethernet) card of this type, say Y and read + the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called 3c523. + +config ELMC_II + tristate "3c527 \"EtherLink/MC 32\" support (EXPERIMENTAL)" + depends on MCA && MCA_LEGACY + ---help--- + If you have a network (Ethernet) card of this type, say Y and read + the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called 3c527. + +config ARM_ETHER1 + tristate "Acorn Ether1 support" + depends on ARM && ARCH_ACORN + ---help--- + If you have an Acorn system with one of these (AKA25) network cards, + you should say Y to this option if you wish to use it with Linux. + +config APRICOT + tristate "Apricot Xen-II on board Ethernet" + depends on ISA + ---help--- + If you have a network (Ethernet) controller of this type, say Y and + read the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called apricot. + +config BVME6000_NET + tristate "BVME6000 Ethernet support" + depends on BVME6000MVME16x + ---help--- + This is the driver for the Ethernet interface on BVME4000 and + BVME6000 VME boards. Say Y here to include the driver for this chip + in your kernel. + To compile this driver as a module, choose M here. + +config EEXPRESS + tristate "EtherExpress 16 support" + depends on ISA + ---help--- + If you have an EtherExpress16 network (Ethernet) card, say Y and + read the Ethernet-HOWTO, available from + . Note that the Intel + EtherExpress16 card used to be regarded as a very poor choice + because the driver was very unreliable. We now have a new driver + that should do better. + + To compile this driver as a module, choose M here. The module + will be called eexpress. + +config EEXPRESS_PRO + tristate "EtherExpressPro support/EtherExpress 10 (i82595) support" + depends on ISA + ---help--- + If you have a network (Ethernet) card of this type, say Y. This + driver supports Intel i82595{FX,TX} based boards. Note however + that the EtherExpress PRO/100 Ethernet card has its own separate + driver. Please read the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called eepro. + +config LASI_82596 + tristate "Lasi ethernet" + depends on GSC + ---help--- + Say Y here to support the builtin Intel 82596 ethernet controller + found in Hewlett-Packard PA-RISC machines with 10Mbit ethernet. + +config LP486E + tristate "LP486E on board Ethernet" + depends on ISA + ---help--- + Say Y here to support the 82596-based on-board Ethernet controller + for the Panther motherboard, which is one of the two shipped in the + Intel Professional Workstation. + +config MVME16x_NET + tristate "MVME16x Ethernet support" + depends on MVME16x + ---help--- + This is the driver for the Ethernet interface on the Motorola + MVME162, 166, 167, 172 and 177 boards. Say Y here to include the + driver for this chip in your kernel. + To compile this driver as a module, choose M here. + +config NI52 + tristate "NI5210 support" + depends on ISA + ---help--- + If you have a network (Ethernet) card of this type, say Y and read + the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called ni52. + +config SNI_82596 + tristate "SNI RM ethernet" + depends on SNI_RM + ---help--- + Say Y here to support the on-board Intel 82596 ethernet controller + built into SNI RM machines. + +config SUN3_82586 + bool "Sun3 on-board Intel 82586 support" + depends on SUN3 + ---help--- + This driver enables support for the on-board Intel 82586 based + Ethernet adapter found on Sun 3/1xx and 3/2xx motherboards. Note + that this driver does not support 82586-based adapters on additional + VME boards. + +config ZNET + tristate "Zenith Z-Note support (EXPERIMENTAL)" + depends on EXPERIMENTAL && ISA_DMA_API + ---help--- + The Zenith Z-Note notebook computer has a built-in network + (Ethernet) card, and this is the Linux driver for it. Note that the + IBM Thinkpad 300 is compatible with the Z-Note and is also supported + by this driver. Read the Ethernet-HOWTO, available from + . + +endif # NET_VENDOR_I825XX diff --git a/drivers/net/ethernet/i825xx/Makefile b/drivers/net/ethernet/i825xx/Makefile new file mode 100644 index 0000000..f68a369 --- /dev/null +++ b/drivers/net/ethernet/i825xx/Makefile @@ -0,0 +1,20 @@ +# +# Makefile for the Intel 82586/82593/82596 chipset device drivers. +# + +obj-$(CONFIG_ARM_ETHER1) += ether1.o +obj-$(CONFIG_EEXPRESS) += eexpress.o +obj-$(CONFIG_EEXPRESS_PRO) += eepro.o +obj-$(CONFIG_ELPLUS) += 3c505.o +obj-$(CONFIG_EL16) += 3c507.o +obj-$(CONFIG_ELMC) += 3c523.o +obj-$(CONFIG_ELMC_II) += 3c527.o +obj-$(CONFIG_LP486E) += lp486e.o +obj-$(CONFIG_NI52) += ni52.o +obj-$(CONFIG_SUN3_82586) += sun3_82586.o +obj-$(CONFIG_ZNET) += znet.o +obj-$(CONFIG_APRICOT) += 82596.o +obj-$(CONFIG_LASI_82596) += lasi_82596.o +obj-$(CONFIG_SNI_82596) += sni_82596.o +obj-$(CONFIG_MVME16x_NET) += 82596.o +obj-$(CONFIG_BVME6000_NET) += 82596.o diff --git a/drivers/net/ethernet/i825xx/eepro.c b/drivers/net/ethernet/i825xx/eepro.c new file mode 100644 index 0000000..dfeb006 --- /dev/null +++ b/drivers/net/ethernet/i825xx/eepro.c @@ -0,0 +1,1822 @@ +/* eepro.c: Intel EtherExpress Pro/10 device driver for Linux. */ +/* + Written 1994, 1995,1996 by Bao C. Ha. + + Copyright (C) 1994, 1995,1996 by Bao C. Ha. + + This software may be used and distributed + according to the terms of the GNU General Public License, + incorporated herein by reference. + + The author may be reached at bao.ha@srs.gov + or 418 Hastings Place, Martinez, GA 30907. + + Things remaining to do: + Better record keeping of errors. + Eliminate transmit interrupt to reduce overhead. + Implement "concurrent processing". I won't be doing it! + + Bugs: + + If you have a problem of not detecting the 82595 during a + reboot (warm reset), disable the FLASH memory should fix it. + This is a compatibility hardware problem. + + Versions: + 0.13b basic ethtool support (aris, 09/13/2004) + 0.13a in memory shortage, drop packets also in board + (Michael Westermann , 07/30/2002) + 0.13 irq sharing, rewrote probe function, fixed a nasty bug in + hardware_send_packet and a major cleanup (aris, 11/08/2001) + 0.12d fixing a problem with single card detected as eight eth devices + fixing a problem with sudden drop in card performance + (chris (asdn@go2.pl), 10/29/2001) + 0.12c fixing some problems with old cards (aris, 01/08/2001) + 0.12b misc fixes (aris, 06/26/2000) + 0.12a port of version 0.12a of 2.2.x kernels to 2.3.x + (aris (aris@conectiva.com.br), 05/19/2000) + 0.11e some tweaks about multiple cards support (PdP, jul/aug 1999) + 0.11d added __initdata, __init stuff; call spin_lock_init + in eepro_probe1. Replaced "eepro" by dev->name. Augmented + the code protected by spin_lock in interrupt routine + (PdP, 12/12/1998) + 0.11c minor cleanup (PdP, RMC, 09/12/1998) + 0.11b Pascal Dupuis (dupuis@lei.ucl.ac.be): works as a module + under 2.1.xx. Debug messages are flagged as KERN_DEBUG to + avoid console flooding. Added locking at critical parts. Now + the dawn thing is SMP safe. + 0.11a Attempt to get 2.1.xx support up (RMC) + 0.11 Brian Candler added support for multiple cards. Tested as + a module, no idea if it works when compiled into kernel. + + 0.10e Rick Bressler notified me that ifconfig up;ifconfig down fails + because the irq is lost somewhere. Fixed that by moving + request_irq and free_irq to eepro_open and eepro_close respectively. + 0.10d Ugh! Now Wakeup works. Was seriously broken in my first attempt. + I'll need to find a way to specify an ioport other than + the default one in the PnP case. PnP definitively sucks. + And, yes, this is not the only reason. + 0.10c PnP Wakeup Test for 595FX. uncomment #define PnPWakeup; + to use. + 0.10b Should work now with (some) Pro/10+. At least for + me (and my two cards) it does. _No_ guarantee for + function with non-Pro/10+ cards! (don't have any) + (RMC, 9/11/96) + + 0.10 Added support for the Etherexpress Pro/10+. The + IRQ map was changed significantly from the old + pro/10. The new interrupt map was provided by + Rainer M. Canavan (Canavan@Zeus.cs.bonn.edu). + (BCH, 9/3/96) + + 0.09 Fixed a race condition in the transmit algorithm, + which causes crashes under heavy load with fast + pentium computers. The performance should also + improve a bit. The size of RX buffer, and hence + TX buffer, can also be changed via lilo or insmod. + (BCH, 7/31/96) + + 0.08 Implement 32-bit I/O for the 82595TX and 82595FX + based lan cards. Disable full-duplex mode if TPE + is not used. (BCH, 4/8/96) + + 0.07a Fix a stat report which counts every packet as a + heart-beat failure. (BCH, 6/3/95) + + 0.07 Modified to support all other 82595-based lan cards. + The IRQ vector of the EtherExpress Pro will be set + according to the value saved in the EEPROM. For other + cards, I will do autoirq_request() to grab the next + available interrupt vector. (BCH, 3/17/95) + + 0.06a,b Interim released. Minor changes in the comments and + print out format. (BCH, 3/9/95 and 3/14/95) + + 0.06 First stable release that I am comfortable with. (BCH, + 3/2/95) + + 0.05 Complete testing of multicast. (BCH, 2/23/95) + + 0.04 Adding multicast support. (BCH, 2/14/95) + + 0.03 First widely alpha release for public testing. + (BCH, 2/14/95) + +*/ + +static const char version[] = + "eepro.c: v0.13b 09/13/2004 aris@cathedrallabs.org\n"; + +#include + +/* + Sources: + + This driver wouldn't have been written without the availability + of the Crynwr's Lan595 driver source code. It helps me to + familiarize with the 82595 chipset while waiting for the Intel + documentation. I also learned how to detect the 82595 using + the packet driver's technique. + + This driver is written by cutting and pasting the skeleton.c driver + provided by Donald Becker. I also borrowed the EEPROM routine from + Donald Becker's 82586 driver. + + Datasheet for the Intel 82595 (including the TX and FX version). It + provides just enough info that the casual reader might think that it + documents the i82595. + + The User Manual for the 82595. It provides a lot of the missing + information. + +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define DRV_NAME "eepro" +#define DRV_VERSION "0.13c" + +#define compat_dev_kfree_skb( skb, mode ) dev_kfree_skb( (skb) ) +/* I had reports of looong delays with SLOW_DOWN defined as udelay(2) */ +#define SLOW_DOWN inb(0x80) +/* udelay(2) */ +#define compat_init_data __initdata +enum iftype { AUI=0, BNC=1, TPE=2 }; + +/* First, a few definitions that the brave might change. */ +/* A zero-terminated list of I/O addresses to be probed. */ +static unsigned int eepro_portlist[] compat_init_data = + { 0x300, 0x210, 0x240, 0x280, 0x2C0, 0x200, 0x320, 0x340, 0x360, 0}; +/* note: 0x300 is default, the 595FX supports ALL IO Ports + from 0x000 to 0x3F0, some of which are reserved in PCs */ + +/* To try the (not-really PnP Wakeup: */ +/* +#define PnPWakeup +*/ + +/* use 0 for production, 1 for verification, >2 for debug */ +#ifndef NET_DEBUG +#define NET_DEBUG 0 +#endif +static unsigned int net_debug = NET_DEBUG; + +/* The number of low I/O ports used by the ethercard. */ +#define EEPRO_IO_EXTENT 16 + +/* Different 82595 chips */ +#define LAN595 0 +#define LAN595TX 1 +#define LAN595FX 2 +#define LAN595FX_10ISA 3 + +/* Information that need to be kept for each board. */ +struct eepro_local { + unsigned rx_start; + unsigned tx_start; /* start of the transmit chain */ + int tx_last; /* pointer to last packet in the transmit chain */ + unsigned tx_end; /* end of the transmit chain (plus 1) */ + int eepro; /* 1 for the EtherExpress Pro/10, + 2 for the EtherExpress Pro/10+, + 3 for the EtherExpress 10 (blue cards), + 0 for other 82595-based lan cards. */ + int version; /* a flag to indicate if this is a TX or FX + version of the 82595 chip. */ + int stepping; + + spinlock_t lock; /* Serializing lock */ + + unsigned rcv_ram; /* pre-calculated space for rx */ + unsigned xmt_ram; /* pre-calculated space for tx */ + unsigned char xmt_bar; + unsigned char xmt_lower_limit_reg; + unsigned char xmt_upper_limit_reg; + short xmt_lower_limit; + short xmt_upper_limit; + short rcv_lower_limit; + short rcv_upper_limit; + unsigned char eeprom_reg; + unsigned short word[8]; +}; + +/* The station (ethernet) address prefix, used for IDing the board. */ +#define SA_ADDR0 0x00 /* Etherexpress Pro/10 */ +#define SA_ADDR1 0xaa +#define SA_ADDR2 0x00 + +#define GetBit(x,y) ((x & (1<>y) + +/* EEPROM Word 0: */ +#define ee_PnP 0 /* Plug 'n Play enable bit */ +#define ee_Word1 1 /* Word 1? */ +#define ee_BusWidth 2 /* 8/16 bit */ +#define ee_FlashAddr 3 /* Flash Address */ +#define ee_FlashMask 0x7 /* Mask */ +#define ee_AutoIO 6 /* */ +#define ee_reserved0 7 /* =0! */ +#define ee_Flash 8 /* Flash there? */ +#define ee_AutoNeg 9 /* Auto Negotiation enabled? */ +#define ee_IO0 10 /* IO Address LSB */ +#define ee_IO0Mask 0x /*...*/ +#define ee_IO1 15 /* IO MSB */ + +/* EEPROM Word 1: */ +#define ee_IntSel 0 /* Interrupt */ +#define ee_IntMask 0x7 +#define ee_LI 3 /* Link Integrity 0= enabled */ +#define ee_PC 4 /* Polarity Correction 0= enabled */ +#define ee_TPE_AUI 5 /* PortSelection 1=TPE */ +#define ee_Jabber 6 /* Jabber prevention 0= enabled */ +#define ee_AutoPort 7 /* Auto Port Selection 1= Disabled */ +#define ee_SMOUT 8 /* SMout Pin Control 0= Input */ +#define ee_PROM 9 /* Flash EPROM / PROM 0=Flash */ +#define ee_reserved1 10 /* .. 12 =0! */ +#define ee_AltReady 13 /* Alternate Ready, 0=normal */ +#define ee_reserved2 14 /* =0! */ +#define ee_Duplex 15 + +/* Word2,3,4: */ +#define ee_IA5 0 /*bit start for individual Addr Byte 5 */ +#define ee_IA4 8 /*bit start for individual Addr Byte 5 */ +#define ee_IA3 0 /*bit start for individual Addr Byte 5 */ +#define ee_IA2 8 /*bit start for individual Addr Byte 5 */ +#define ee_IA1 0 /*bit start for individual Addr Byte 5 */ +#define ee_IA0 8 /*bit start for individual Addr Byte 5 */ + +/* Word 5: */ +#define ee_BNC_TPE 0 /* 0=TPE */ +#define ee_BootType 1 /* 00=None, 01=IPX, 10=ODI, 11=NDIS */ +#define ee_BootTypeMask 0x3 +#define ee_NumConn 3 /* Number of Connections 0= One or Two */ +#define ee_FlashSock 4 /* Presence of Flash Socket 0= Present */ +#define ee_PortTPE 5 +#define ee_PortBNC 6 +#define ee_PortAUI 7 +#define ee_PowerMgt 10 /* 0= disabled */ +#define ee_CP 13 /* Concurrent Processing */ +#define ee_CPMask 0x7 + +/* Word 6: */ +#define ee_Stepping 0 /* Stepping info */ +#define ee_StepMask 0x0F +#define ee_BoardID 4 /* Manucaturer Board ID, reserved */ +#define ee_BoardMask 0x0FFF + +/* Word 7: */ +#define ee_INT_TO_IRQ 0 /* int to IRQ Mapping = 0x1EB8 for Pro/10+ */ +#define ee_FX_INT2IRQ 0x1EB8 /* the _only_ mapping allowed for FX chips */ + +/*..*/ +#define ee_SIZE 0x40 /* total EEprom Size */ +#define ee_Checksum 0xBABA /* initial and final value for adding checksum */ + + +/* Card identification via EEprom: */ +#define ee_addr_vendor 0x10 /* Word offset for EISA Vendor ID */ +#define ee_addr_id 0x11 /* Word offset for Card ID */ +#define ee_addr_SN 0x12 /* Serial Number */ +#define ee_addr_CRC_8 0x14 /* CRC over last thee Bytes */ + + +#define ee_vendor_intel0 0x25 /* Vendor ID Intel */ +#define ee_vendor_intel1 0xD4 +#define ee_id_eepro10p0 0x10 /* ID for eepro/10+ */ +#define ee_id_eepro10p1 0x31 + +#define TX_TIMEOUT ((4*HZ)/10) + +/* Index to functions, as function prototypes. */ + +static int eepro_probe1(struct net_device *dev, int autoprobe); +static int eepro_open(struct net_device *dev); +static netdev_tx_t eepro_send_packet(struct sk_buff *skb, + struct net_device *dev); +static irqreturn_t eepro_interrupt(int irq, void *dev_id); +static void eepro_rx(struct net_device *dev); +static void eepro_transmit_interrupt(struct net_device *dev); +static int eepro_close(struct net_device *dev); +static void set_multicast_list(struct net_device *dev); +static void eepro_tx_timeout (struct net_device *dev); + +static int read_eeprom(int ioaddr, int location, struct net_device *dev); +static int hardware_send_packet(struct net_device *dev, void *buf, short length); +static int eepro_grab_irq(struct net_device *dev); + +/* + Details of the i82595. + +You will need either the datasheet or the user manual to understand what +is going on here. The 82595 is very different from the 82586, 82593. + +The receive algorithm in eepro_rx() is just an implementation of the +RCV ring structure that the Intel 82595 imposes at the hardware level. +The receive buffer is set at 24K, and the transmit buffer is 8K. I +am assuming that the total buffer memory is 32K, which is true for the +Intel EtherExpress Pro/10. If it is less than that on a generic card, +the driver will be broken. + +The transmit algorithm in the hardware_send_packet() is similar to the +one in the eepro_rx(). The transmit buffer is a ring linked list. +I just queue the next available packet to the end of the list. In my +system, the 82595 is so fast that the list seems to always contain a +single packet. In other systems with faster computers and more congested +network traffics, the ring linked list should improve performance by +allowing up to 8K worth of packets to be queued. + +The sizes of the receive and transmit buffers can now be changed via lilo +or insmod. Lilo uses the appended line "ether=io,irq,debug,rx-buffer,eth0" +where rx-buffer is in KB unit. Modules uses the parameter mem which is +also in KB unit, for example "insmod io=io-address irq=0 mem=rx-buffer." +The receive buffer has to be more than 3K or less than 29K. Otherwise, +it is reset to the default of 24K, and, hence, 8K for the trasnmit +buffer (transmit-buffer = 32K - receive-buffer). + +*/ +#define RAM_SIZE 0x8000 + +#define RCV_HEADER 8 +#define RCV_DEFAULT_RAM 0x6000 + +#define XMT_HEADER 8 +#define XMT_DEFAULT_RAM (RAM_SIZE - RCV_DEFAULT_RAM) + +#define XMT_START_PRO RCV_DEFAULT_RAM +#define XMT_START_10 0x0000 +#define RCV_START_PRO 0x0000 +#define RCV_START_10 XMT_DEFAULT_RAM + +#define RCV_DONE 0x0008 +#define RX_OK 0x2000 +#define RX_ERROR 0x0d81 + +#define TX_DONE_BIT 0x0080 +#define TX_OK 0x2000 +#define CHAIN_BIT 0x8000 +#define XMT_STATUS 0x02 +#define XMT_CHAIN 0x04 +#define XMT_COUNT 0x06 + +#define BANK0_SELECT 0x00 +#define BANK1_SELECT 0x40 +#define BANK2_SELECT 0x80 + +/* Bank 0 registers */ +#define COMMAND_REG 0x00 /* Register 0 */ +#define MC_SETUP 0x03 +#define XMT_CMD 0x04 +#define DIAGNOSE_CMD 0x07 +#define RCV_ENABLE_CMD 0x08 +#define RCV_DISABLE_CMD 0x0a +#define STOP_RCV_CMD 0x0b +#define RESET_CMD 0x0e +#define POWER_DOWN_CMD 0x18 +#define RESUME_XMT_CMD 0x1c +#define SEL_RESET_CMD 0x1e +#define STATUS_REG 0x01 /* Register 1 */ +#define RX_INT 0x02 +#define TX_INT 0x04 +#define EXEC_STATUS 0x30 +#define ID_REG 0x02 /* Register 2 */ +#define R_ROBIN_BITS 0xc0 /* round robin counter */ +#define ID_REG_MASK 0x2c +#define ID_REG_SIG 0x24 +#define AUTO_ENABLE 0x10 +#define INT_MASK_REG 0x03 /* Register 3 */ +#define RX_STOP_MASK 0x01 +#define RX_MASK 0x02 +#define TX_MASK 0x04 +#define EXEC_MASK 0x08 +#define ALL_MASK 0x0f +#define IO_32_BIT 0x10 +#define RCV_BAR 0x04 /* The following are word (16-bit) registers */ +#define RCV_STOP 0x06 + +#define XMT_BAR_PRO 0x0a +#define XMT_BAR_10 0x0b + +#define HOST_ADDRESS_REG 0x0c +#define IO_PORT 0x0e +#define IO_PORT_32_BIT 0x0c + +/* Bank 1 registers */ +#define REG1 0x01 +#define WORD_WIDTH 0x02 +#define INT_ENABLE 0x80 +#define INT_NO_REG 0x02 +#define RCV_LOWER_LIMIT_REG 0x08 +#define RCV_UPPER_LIMIT_REG 0x09 + +#define XMT_LOWER_LIMIT_REG_PRO 0x0a +#define XMT_UPPER_LIMIT_REG_PRO 0x0b +#define XMT_LOWER_LIMIT_REG_10 0x0b +#define XMT_UPPER_LIMIT_REG_10 0x0a + +/* Bank 2 registers */ +#define XMT_Chain_Int 0x20 /* Interrupt at the end of the transmit chain */ +#define XMT_Chain_ErrStop 0x40 /* Interrupt at the end of the chain even if there are errors */ +#define RCV_Discard_BadFrame 0x80 /* Throw bad frames away, and continue to receive others */ +#define REG2 0x02 +#define PRMSC_Mode 0x01 +#define Multi_IA 0x20 +#define REG3 0x03 +#define TPE_BIT 0x04 +#define BNC_BIT 0x20 +#define REG13 0x0d +#define FDX 0x00 +#define A_N_ENABLE 0x02 + +#define I_ADD_REG0 0x04 +#define I_ADD_REG1 0x05 +#define I_ADD_REG2 0x06 +#define I_ADD_REG3 0x07 +#define I_ADD_REG4 0x08 +#define I_ADD_REG5 0x09 + +#define EEPROM_REG_PRO 0x0a +#define EEPROM_REG_10 0x0b + +#define EESK 0x01 +#define EECS 0x02 +#define EEDI 0x04 +#define EEDO 0x08 + +/* do a full reset */ +#define eepro_reset(ioaddr) outb(RESET_CMD, ioaddr) + +/* do a nice reset */ +#define eepro_sel_reset(ioaddr) { \ + outb(SEL_RESET_CMD, ioaddr); \ + SLOW_DOWN; \ + SLOW_DOWN; \ + } + +/* disable all interrupts */ +#define eepro_dis_int(ioaddr) outb(ALL_MASK, ioaddr + INT_MASK_REG) + +/* clear all interrupts */ +#define eepro_clear_int(ioaddr) outb(ALL_MASK, ioaddr + STATUS_REG) + +/* enable tx/rx */ +#define eepro_en_int(ioaddr) outb(ALL_MASK & ~(RX_MASK | TX_MASK), \ + ioaddr + INT_MASK_REG) + +/* enable exec event interrupt */ +#define eepro_en_intexec(ioaddr) outb(ALL_MASK & ~(EXEC_MASK), ioaddr + INT_MASK_REG) + +/* enable rx */ +#define eepro_en_rx(ioaddr) outb(RCV_ENABLE_CMD, ioaddr) + +/* disable rx */ +#define eepro_dis_rx(ioaddr) outb(RCV_DISABLE_CMD, ioaddr) + +/* switch bank */ +#define eepro_sw2bank0(ioaddr) outb(BANK0_SELECT, ioaddr) +#define eepro_sw2bank1(ioaddr) outb(BANK1_SELECT, ioaddr) +#define eepro_sw2bank2(ioaddr) outb(BANK2_SELECT, ioaddr) + +/* enable interrupt line */ +#define eepro_en_intline(ioaddr) outb(inb(ioaddr + REG1) | INT_ENABLE,\ + ioaddr + REG1) + +/* disable interrupt line */ +#define eepro_dis_intline(ioaddr) outb(inb(ioaddr + REG1) & 0x7f, \ + ioaddr + REG1); + +/* set diagnose flag */ +#define eepro_diag(ioaddr) outb(DIAGNOSE_CMD, ioaddr) + +/* ack for rx int */ +#define eepro_ack_rx(ioaddr) outb (RX_INT, ioaddr + STATUS_REG) + +/* ack for tx int */ +#define eepro_ack_tx(ioaddr) outb (TX_INT, ioaddr + STATUS_REG) + +/* a complete sel reset */ +#define eepro_complete_selreset(ioaddr) { \ + dev->stats.tx_errors++;\ + eepro_sel_reset(ioaddr);\ + lp->tx_end = \ + lp->xmt_lower_limit;\ + lp->tx_start = lp->tx_end;\ + lp->tx_last = 0;\ + dev->trans_start = jiffies;\ + netif_wake_queue(dev);\ + eepro_en_rx(ioaddr);\ + } + +/* Check for a network adaptor of this type, and return '0' if one exists. + If dev->base_addr == 0, probe all likely locations. + If dev->base_addr == 1, always return failure. + If dev->base_addr == 2, allocate space for the device and return success + (detachable devices only). + */ +static int __init do_eepro_probe(struct net_device *dev) +{ + int i; + int base_addr = dev->base_addr; + int irq = dev->irq; + +#ifdef PnPWakeup + /* XXXX for multiple cards should this only be run once? */ + + /* Wakeup: */ + #define WakeupPort 0x279 + #define WakeupSeq {0x6A, 0xB5, 0xDA, 0xED, 0xF6, 0xFB, 0x7D, 0xBE,\ + 0xDF, 0x6F, 0x37, 0x1B, 0x0D, 0x86, 0xC3, 0x61,\ + 0xB0, 0x58, 0x2C, 0x16, 0x8B, 0x45, 0xA2, 0xD1,\ + 0xE8, 0x74, 0x3A, 0x9D, 0xCE, 0xE7, 0x73, 0x43} + + { + unsigned short int WS[32]=WakeupSeq; + + if (request_region(WakeupPort, 2, "eepro wakeup")) { + if (net_debug>5) + printk(KERN_DEBUG "Waking UP\n"); + + outb_p(0,WakeupPort); + outb_p(0,WakeupPort); + for (i=0; i<32; i++) { + outb_p(WS[i],WakeupPort); + if (net_debug>5) printk(KERN_DEBUG ": %#x ",WS[i]); + } + + release_region(WakeupPort, 2); + } else + printk(KERN_WARNING "PnP wakeup region busy!\n"); + } +#endif + + if (base_addr > 0x1ff) /* Check a single specified location. */ + return eepro_probe1(dev, 0); + + else if (base_addr != 0) /* Don't probe at all. */ + return -ENXIO; + + for (i = 0; eepro_portlist[i]; i++) { + dev->base_addr = eepro_portlist[i]; + dev->irq = irq; + if (eepro_probe1(dev, 1) == 0) + return 0; + } + + return -ENODEV; +} + +#ifndef MODULE +struct net_device * __init eepro_probe(int unit) +{ + struct net_device *dev = alloc_etherdev(sizeof(struct eepro_local)); + int err; + + if (!dev) + return ERR_PTR(-ENODEV); + + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + + err = do_eepro_probe(dev); + if (err) + goto out; + return dev; +out: + free_netdev(dev); + return ERR_PTR(err); +} +#endif + +static void __init printEEPROMInfo(struct net_device *dev) +{ + struct eepro_local *lp = netdev_priv(dev); + int ioaddr = dev->base_addr; + unsigned short Word; + int i,j; + + j = ee_Checksum; + for (i = 0; i < 8; i++) + j += lp->word[i]; + for ( ; i < ee_SIZE; i++) + j += read_eeprom(ioaddr, i, dev); + + printk(KERN_DEBUG "Checksum: %#x\n",j&0xffff); + + Word = lp->word[0]; + printk(KERN_DEBUG "Word0:\n"); + printk(KERN_DEBUG " Plug 'n Pray: %d\n",GetBit(Word,ee_PnP)); + printk(KERN_DEBUG " Buswidth: %d\n",(GetBit(Word,ee_BusWidth)+1)*8 ); + printk(KERN_DEBUG " AutoNegotiation: %d\n",GetBit(Word,ee_AutoNeg)); + printk(KERN_DEBUG " IO Address: %#x\n", (Word>>ee_IO0)<<4); + + if (net_debug>4) { + Word = lp->word[1]; + printk(KERN_DEBUG "Word1:\n"); + printk(KERN_DEBUG " INT: %d\n", Word & ee_IntMask); + printk(KERN_DEBUG " LI: %d\n", GetBit(Word,ee_LI)); + printk(KERN_DEBUG " PC: %d\n", GetBit(Word,ee_PC)); + printk(KERN_DEBUG " TPE/AUI: %d\n", GetBit(Word,ee_TPE_AUI)); + printk(KERN_DEBUG " Jabber: %d\n", GetBit(Word,ee_Jabber)); + printk(KERN_DEBUG " AutoPort: %d\n", !GetBit(Word,ee_AutoPort)); + printk(KERN_DEBUG " Duplex: %d\n", GetBit(Word,ee_Duplex)); + } + + Word = lp->word[5]; + printk(KERN_DEBUG "Word5:\n"); + printk(KERN_DEBUG " BNC: %d\n",GetBit(Word,ee_BNC_TPE)); + printk(KERN_DEBUG " NumConnectors: %d\n",GetBit(Word,ee_NumConn)); + printk(KERN_DEBUG " Has "); + if (GetBit(Word,ee_PortTPE)) printk(KERN_DEBUG "TPE "); + if (GetBit(Word,ee_PortBNC)) printk(KERN_DEBUG "BNC "); + if (GetBit(Word,ee_PortAUI)) printk(KERN_DEBUG "AUI "); + printk(KERN_DEBUG "port(s)\n"); + + Word = lp->word[6]; + printk(KERN_DEBUG "Word6:\n"); + printk(KERN_DEBUG " Stepping: %d\n",Word & ee_StepMask); + printk(KERN_DEBUG " BoardID: %d\n",Word>>ee_BoardID); + + Word = lp->word[7]; + printk(KERN_DEBUG "Word7:\n"); + printk(KERN_DEBUG " INT to IRQ:\n"); + + for (i=0, j=0; i<15; i++) + if (GetBit(Word,i)) printk(KERN_DEBUG " INT%d -> IRQ %d;",j++,i); + + printk(KERN_DEBUG "\n"); +} + +/* function to recalculate the limits of buffer based on rcv_ram */ +static void eepro_recalc (struct net_device *dev) +{ + struct eepro_local * lp; + + lp = netdev_priv(dev); + lp->xmt_ram = RAM_SIZE - lp->rcv_ram; + + if (lp->eepro == LAN595FX_10ISA) { + lp->xmt_lower_limit = XMT_START_10; + lp->xmt_upper_limit = (lp->xmt_ram - 2); + lp->rcv_lower_limit = lp->xmt_ram; + lp->rcv_upper_limit = (RAM_SIZE - 2); + } + else { + lp->rcv_lower_limit = RCV_START_PRO; + lp->rcv_upper_limit = (lp->rcv_ram - 2); + lp->xmt_lower_limit = lp->rcv_ram; + lp->xmt_upper_limit = (RAM_SIZE - 2); + } +} + +/* prints boot-time info */ +static void __init eepro_print_info (struct net_device *dev) +{ + struct eepro_local * lp = netdev_priv(dev); + int i; + const char * ifmap[] = {"AUI", "10Base2", "10BaseT"}; + + i = inb(dev->base_addr + ID_REG); + printk(KERN_DEBUG " id: %#x ",i); + printk(" io: %#x ", (unsigned)dev->base_addr); + + switch (lp->eepro) { + case LAN595FX_10ISA: + printk("%s: Intel EtherExpress 10 ISA\n at %#x,", + dev->name, (unsigned)dev->base_addr); + break; + case LAN595FX: + printk("%s: Intel EtherExpress Pro/10+ ISA\n at %#x,", + dev->name, (unsigned)dev->base_addr); + break; + case LAN595TX: + printk("%s: Intel EtherExpress Pro/10 ISA at %#x,", + dev->name, (unsigned)dev->base_addr); + break; + case LAN595: + printk("%s: Intel 82595-based lan card at %#x,", + dev->name, (unsigned)dev->base_addr); + break; + } + + printk(" %pM", dev->dev_addr); + + if (net_debug > 3) + printk(KERN_DEBUG ", %dK RCV buffer", + (int)(lp->rcv_ram)/1024); + + if (dev->irq > 2) + printk(", IRQ %d, %s.\n", dev->irq, ifmap[dev->if_port]); + else + printk(", %s.\n", ifmap[dev->if_port]); + + if (net_debug > 3) { + i = lp->word[5]; + if (i & 0x2000) /* bit 13 of EEPROM word 5 */ + printk(KERN_DEBUG "%s: Concurrent Processing is " + "enabled but not used!\n", dev->name); + } + + /* Check the station address for the manufacturer's code */ + if (net_debug>3) + printEEPROMInfo(dev); +} + +static const struct ethtool_ops eepro_ethtool_ops; + +static const struct net_device_ops eepro_netdev_ops = { + .ndo_open = eepro_open, + .ndo_stop = eepro_close, + .ndo_start_xmit = eepro_send_packet, + .ndo_set_multicast_list = set_multicast_list, + .ndo_tx_timeout = eepro_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +/* This is the real probe routine. Linux has a history of friendly device + probes on the ISA bus. A good device probe avoids doing writes, and + verifies that the correct device exists and functions. */ + +static int __init eepro_probe1(struct net_device *dev, int autoprobe) +{ + unsigned short station_addr[3], id, counter; + int i; + struct eepro_local *lp; + int ioaddr = dev->base_addr; + int err; + + /* Grab the region so we can find another board if autoIRQ fails. */ + if (!request_region(ioaddr, EEPRO_IO_EXTENT, DRV_NAME)) { + if (!autoprobe) + printk(KERN_WARNING "EEPRO: io-port 0x%04x in use\n", + ioaddr); + return -EBUSY; + } + + /* Now, we are going to check for the signature of the + ID_REG (register 2 of bank 0) */ + + id = inb(ioaddr + ID_REG); + + if ((id & ID_REG_MASK) != ID_REG_SIG) + goto exit; + + /* We seem to have the 82595 signature, let's + play with its counter (last 2 bits of + register 2 of bank 0) to be sure. */ + + counter = id & R_ROBIN_BITS; + + if ((inb(ioaddr + ID_REG) & R_ROBIN_BITS) != (counter + 0x40)) + goto exit; + + lp = netdev_priv(dev); + memset(lp, 0, sizeof(struct eepro_local)); + lp->xmt_bar = XMT_BAR_PRO; + lp->xmt_lower_limit_reg = XMT_LOWER_LIMIT_REG_PRO; + lp->xmt_upper_limit_reg = XMT_UPPER_LIMIT_REG_PRO; + lp->eeprom_reg = EEPROM_REG_PRO; + spin_lock_init(&lp->lock); + + /* Now, get the ethernet hardware address from + the EEPROM */ + station_addr[0] = read_eeprom(ioaddr, 2, dev); + + /* FIXME - find another way to know that we've found + * an Etherexpress 10 + */ + if (station_addr[0] == 0x0000 || station_addr[0] == 0xffff) { + lp->eepro = LAN595FX_10ISA; + lp->eeprom_reg = EEPROM_REG_10; + lp->xmt_lower_limit_reg = XMT_LOWER_LIMIT_REG_10; + lp->xmt_upper_limit_reg = XMT_UPPER_LIMIT_REG_10; + lp->xmt_bar = XMT_BAR_10; + station_addr[0] = read_eeprom(ioaddr, 2, dev); + } + + /* get all words at once. will be used here and for ethtool */ + for (i = 0; i < 8; i++) { + lp->word[i] = read_eeprom(ioaddr, i, dev); + } + station_addr[1] = lp->word[3]; + station_addr[2] = lp->word[4]; + + if (!lp->eepro) { + if (lp->word[7] == ee_FX_INT2IRQ) + lp->eepro = 2; + else if (station_addr[2] == SA_ADDR1) + lp->eepro = 1; + } + + /* Fill in the 'dev' fields. */ + for (i=0; i < 6; i++) + dev->dev_addr[i] = ((unsigned char *) station_addr)[5-i]; + + /* RX buffer must be more than 3K and less than 29K */ + if (dev->mem_end < 3072 || dev->mem_end > 29696) + lp->rcv_ram = RCV_DEFAULT_RAM; + + /* calculate {xmt,rcv}_{lower,upper}_limit */ + eepro_recalc(dev); + + if (GetBit(lp->word[5], ee_BNC_TPE)) + dev->if_port = BNC; + else + dev->if_port = TPE; + + if (dev->irq < 2 && lp->eepro != 0) { + /* Mask off INT number */ + int count = lp->word[1] & 7; + unsigned irqMask = lp->word[7]; + + while (count--) + irqMask &= irqMask - 1; + + count = ffs(irqMask); + + if (count) + dev->irq = count - 1; + + if (dev->irq < 2) { + printk(KERN_ERR " Duh! illegal interrupt vector stored in EEPROM.\n"); + goto exit; + } else if (dev->irq == 2) { + dev->irq = 9; + } + } + + dev->netdev_ops = &eepro_netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + dev->ethtool_ops = &eepro_ethtool_ops; + + /* print boot time info */ + eepro_print_info(dev); + + /* reset 82595 */ + eepro_reset(ioaddr); + + err = register_netdev(dev); + if (err) + goto err; + return 0; +exit: + err = -ENODEV; +err: + release_region(dev->base_addr, EEPRO_IO_EXTENT); + return err; +} + +/* Open/initialize the board. This is called (in the current kernel) + sometime after booting when the 'ifconfig' program is run. + + This routine should set everything up anew at each open, even + registers that "should" only need to be set once at boot, so that + there is non-reboot way to recover if something goes wrong. + */ + +static const char irqrmap[] = {-1,-1,0,1,-1,2,-1,-1,-1,0,3,4,-1,-1,-1,-1}; +static const char irqrmap2[] = {-1,-1,4,0,1,2,-1,3,-1,4,5,6,7,-1,-1,-1}; +static int eepro_grab_irq(struct net_device *dev) +{ + static const int irqlist[] = { 3, 4, 5, 7, 9, 10, 11, 12, 0 }; + const int *irqp = irqlist; + int temp_reg, ioaddr = dev->base_addr; + + eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */ + + /* Enable the interrupt line. */ + eepro_en_intline(ioaddr); + + /* be CAREFUL, BANK 0 now */ + eepro_sw2bank0(ioaddr); + + /* clear all interrupts */ + eepro_clear_int(ioaddr); + + /* Let EXEC event to interrupt */ + eepro_en_intexec(ioaddr); + + do { + eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */ + + temp_reg = inb(ioaddr + INT_NO_REG); + outb((temp_reg & 0xf8) | irqrmap[*irqp], ioaddr + INT_NO_REG); + + eepro_sw2bank0(ioaddr); /* Switch back to Bank 0 */ + + if (request_irq (*irqp, NULL, IRQF_SHARED, "bogus", dev) != EBUSY) { + unsigned long irq_mask; + /* Twinkle the interrupt, and check if it's seen */ + irq_mask = probe_irq_on(); + + eepro_diag(ioaddr); /* RESET the 82595 */ + mdelay(20); + + if (*irqp == probe_irq_off(irq_mask)) /* It's a good IRQ line */ + break; + + /* clear all interrupts */ + eepro_clear_int(ioaddr); + } + } while (*++irqp); + + eepro_sw2bank1(ioaddr); /* Switch back to Bank 1 */ + + /* Disable the physical interrupt line. */ + eepro_dis_intline(ioaddr); + + eepro_sw2bank0(ioaddr); /* Switch back to Bank 0 */ + + /* Mask all the interrupts. */ + eepro_dis_int(ioaddr); + + /* clear all interrupts */ + eepro_clear_int(ioaddr); + + return dev->irq; +} + +static int eepro_open(struct net_device *dev) +{ + unsigned short temp_reg, old8, old9; + int irqMask; + int i, ioaddr = dev->base_addr; + struct eepro_local *lp = netdev_priv(dev); + + if (net_debug > 3) + printk(KERN_DEBUG "%s: entering eepro_open routine.\n", dev->name); + + irqMask = lp->word[7]; + + if (lp->eepro == LAN595FX_10ISA) { + if (net_debug > 3) printk(KERN_DEBUG "p->eepro = 3;\n"); + } + else if (irqMask == ee_FX_INT2IRQ) /* INT to IRQ Mask */ + { + lp->eepro = 2; /* Yes, an Intel EtherExpress Pro/10+ */ + if (net_debug > 3) printk(KERN_DEBUG "p->eepro = 2;\n"); + } + + else if ((dev->dev_addr[0] == SA_ADDR0 && + dev->dev_addr[1] == SA_ADDR1 && + dev->dev_addr[2] == SA_ADDR2)) + { + lp->eepro = 1; + if (net_debug > 3) printk(KERN_DEBUG "p->eepro = 1;\n"); + } /* Yes, an Intel EtherExpress Pro/10 */ + + else lp->eepro = 0; /* No, it is a generic 82585 lan card */ + + /* Get the interrupt vector for the 82595 */ + if (dev->irq < 2 && eepro_grab_irq(dev) == 0) { + printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq); + return -EAGAIN; + } + + if (request_irq(dev->irq , eepro_interrupt, 0, dev->name, dev)) { + printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq); + return -EAGAIN; + } + + /* Initialize the 82595. */ + + eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ + temp_reg = inb(ioaddr + lp->eeprom_reg); + + lp->stepping = temp_reg >> 5; /* Get the stepping number of the 595 */ + + if (net_debug > 3) + printk(KERN_DEBUG "The stepping of the 82595 is %d\n", lp->stepping); + + if (temp_reg & 0x10) /* Check the TurnOff Enable bit */ + outb(temp_reg & 0xef, ioaddr + lp->eeprom_reg); + for (i=0; i < 6; i++) + outb(dev->dev_addr[i] , ioaddr + I_ADD_REG0 + i); + + temp_reg = inb(ioaddr + REG1); /* Setup Transmit Chaining */ + outb(temp_reg | XMT_Chain_Int | XMT_Chain_ErrStop /* and discard bad RCV frames */ + | RCV_Discard_BadFrame, ioaddr + REG1); + + temp_reg = inb(ioaddr + REG2); /* Match broadcast */ + outb(temp_reg | 0x14, ioaddr + REG2); + + temp_reg = inb(ioaddr + REG3); + outb(temp_reg & 0x3f, ioaddr + REG3); /* clear test mode */ + + /* Set the receiving mode */ + eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */ + + /* Set the interrupt vector */ + temp_reg = inb(ioaddr + INT_NO_REG); + if (lp->eepro == LAN595FX || lp->eepro == LAN595FX_10ISA) + outb((temp_reg & 0xf8) | irqrmap2[dev->irq], ioaddr + INT_NO_REG); + else outb((temp_reg & 0xf8) | irqrmap[dev->irq], ioaddr + INT_NO_REG); + + + temp_reg = inb(ioaddr + INT_NO_REG); + if (lp->eepro == LAN595FX || lp->eepro == LAN595FX_10ISA) + outb((temp_reg & 0xf0) | irqrmap2[dev->irq] | 0x08,ioaddr+INT_NO_REG); + else outb((temp_reg & 0xf8) | irqrmap[dev->irq], ioaddr + INT_NO_REG); + + if (net_debug > 3) + printk(KERN_DEBUG "eepro_open: content of INT Reg is %x\n", temp_reg); + + + /* Initialize the RCV and XMT upper and lower limits */ + outb(lp->rcv_lower_limit >> 8, ioaddr + RCV_LOWER_LIMIT_REG); + outb(lp->rcv_upper_limit >> 8, ioaddr + RCV_UPPER_LIMIT_REG); + outb(lp->xmt_lower_limit >> 8, ioaddr + lp->xmt_lower_limit_reg); + outb(lp->xmt_upper_limit >> 8, ioaddr + lp->xmt_upper_limit_reg); + + /* Enable the interrupt line. */ + eepro_en_intline(ioaddr); + + /* Switch back to Bank 0 */ + eepro_sw2bank0(ioaddr); + + /* Let RX and TX events to interrupt */ + eepro_en_int(ioaddr); + + /* clear all interrupts */ + eepro_clear_int(ioaddr); + + /* Initialize RCV */ + outw(lp->rcv_lower_limit, ioaddr + RCV_BAR); + lp->rx_start = lp->rcv_lower_limit; + outw(lp->rcv_upper_limit | 0xfe, ioaddr + RCV_STOP); + + /* Initialize XMT */ + outw(lp->xmt_lower_limit, ioaddr + lp->xmt_bar); + lp->tx_start = lp->tx_end = lp->xmt_lower_limit; + lp->tx_last = 0; + + /* Check for the i82595TX and i82595FX */ + old8 = inb(ioaddr + 8); + outb(~old8, ioaddr + 8); + + if ((temp_reg = inb(ioaddr + 8)) == old8) { + if (net_debug > 3) + printk(KERN_DEBUG "i82595 detected!\n"); + lp->version = LAN595; + } + else { + lp->version = LAN595TX; + outb(old8, ioaddr + 8); + old9 = inb(ioaddr + 9); + + if (irqMask==ee_FX_INT2IRQ) { + if (net_debug > 3) { + printk(KERN_DEBUG "IrqMask: %#x\n",irqMask); + printk(KERN_DEBUG "i82595FX detected!\n"); + } + lp->version = LAN595FX; + outb(old9, ioaddr + 9); + if (dev->if_port != TPE) { /* Hopefully, this will fix the + problem of using Pentiums and + pro/10 w/ BNC. */ + eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ + temp_reg = inb(ioaddr + REG13); + /* disable the full duplex mode since it is not + applicable with the 10Base2 cable. */ + outb(temp_reg & ~(FDX | A_N_ENABLE), REG13); + eepro_sw2bank0(ioaddr); /* be CAREFUL, BANK 0 now */ + } + } + else if (net_debug > 3) { + printk(KERN_DEBUG "temp_reg: %#x ~old9: %#x\n",temp_reg,((~old9)&0xff)); + printk(KERN_DEBUG "i82595TX detected!\n"); + } + } + + eepro_sel_reset(ioaddr); + + netif_start_queue(dev); + + if (net_debug > 3) + printk(KERN_DEBUG "%s: exiting eepro_open routine.\n", dev->name); + + /* enabling rx */ + eepro_en_rx(ioaddr); + + return 0; +} + +static void eepro_tx_timeout (struct net_device *dev) +{ + struct eepro_local *lp = netdev_priv(dev); + int ioaddr = dev->base_addr; + + /* if (net_debug > 1) */ + printk (KERN_ERR "%s: transmit timed out, %s?\n", dev->name, + "network cable problem"); + /* This is not a duplicate. One message for the console, + one for the log file */ + printk (KERN_DEBUG "%s: transmit timed out, %s?\n", dev->name, + "network cable problem"); + eepro_complete_selreset(ioaddr); +} + + +static netdev_tx_t eepro_send_packet(struct sk_buff *skb, + struct net_device *dev) +{ + struct eepro_local *lp = netdev_priv(dev); + unsigned long flags; + int ioaddr = dev->base_addr; + short length = skb->len; + + if (net_debug > 5) + printk(KERN_DEBUG "%s: entering eepro_send_packet routine.\n", dev->name); + + if (length < ETH_ZLEN) { + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + length = ETH_ZLEN; + } + netif_stop_queue (dev); + + eepro_dis_int(ioaddr); + spin_lock_irqsave(&lp->lock, flags); + + { + unsigned char *buf = skb->data; + + if (hardware_send_packet(dev, buf, length)) + /* we won't wake queue here because we're out of space */ + dev->stats.tx_dropped++; + else { + dev->stats.tx_bytes+=skb->len; + netif_wake_queue(dev); + } + + } + + dev_kfree_skb (skb); + + /* You might need to clean up and record Tx statistics here. */ + /* dev->stats.tx_aborted_errors++; */ + + if (net_debug > 5) + printk(KERN_DEBUG "%s: exiting eepro_send_packet routine.\n", dev->name); + + eepro_en_int(ioaddr); + spin_unlock_irqrestore(&lp->lock, flags); + + return NETDEV_TX_OK; +} + + +/* The typical workload of the driver: + Handle the network interface interrupts. */ + +static irqreturn_t +eepro_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct eepro_local *lp; + int ioaddr, status, boguscount = 20; + int handled = 0; + + lp = netdev_priv(dev); + + spin_lock(&lp->lock); + + if (net_debug > 5) + printk(KERN_DEBUG "%s: entering eepro_interrupt routine.\n", dev->name); + + ioaddr = dev->base_addr; + + while (((status = inb(ioaddr + STATUS_REG)) & (RX_INT|TX_INT)) && (boguscount--)) + { + handled = 1; + if (status & RX_INT) { + if (net_debug > 4) + printk(KERN_DEBUG "%s: packet received interrupt.\n", dev->name); + + eepro_dis_int(ioaddr); + + /* Get the received packets */ + eepro_ack_rx(ioaddr); + eepro_rx(dev); + + eepro_en_int(ioaddr); + } + if (status & TX_INT) { + if (net_debug > 4) + printk(KERN_DEBUG "%s: packet transmit interrupt.\n", dev->name); + + + eepro_dis_int(ioaddr); + + /* Process the status of transmitted packets */ + eepro_ack_tx(ioaddr); + eepro_transmit_interrupt(dev); + + eepro_en_int(ioaddr); + } + } + + if (net_debug > 5) + printk(KERN_DEBUG "%s: exiting eepro_interrupt routine.\n", dev->name); + + spin_unlock(&lp->lock); + return IRQ_RETVAL(handled); +} + +static int eepro_close(struct net_device *dev) +{ + struct eepro_local *lp = netdev_priv(dev); + int ioaddr = dev->base_addr; + short temp_reg; + + netif_stop_queue(dev); + + eepro_sw2bank1(ioaddr); /* Switch back to Bank 1 */ + + /* Disable the physical interrupt line. */ + temp_reg = inb(ioaddr + REG1); + outb(temp_reg & 0x7f, ioaddr + REG1); + + eepro_sw2bank0(ioaddr); /* Switch back to Bank 0 */ + + /* Flush the Tx and disable Rx. */ + outb(STOP_RCV_CMD, ioaddr); + lp->tx_start = lp->tx_end = lp->xmt_lower_limit; + lp->tx_last = 0; + + /* Mask all the interrupts. */ + eepro_dis_int(ioaddr); + + /* clear all interrupts */ + eepro_clear_int(ioaddr); + + /* Reset the 82595 */ + eepro_reset(ioaddr); + + /* release the interrupt */ + free_irq(dev->irq, dev); + + /* Update the statistics here. What statistics? */ + + return 0; +} + +/* Set or clear the multicast filter for this adaptor. + */ +static void +set_multicast_list(struct net_device *dev) +{ + struct eepro_local *lp = netdev_priv(dev); + short ioaddr = dev->base_addr; + unsigned short mode; + struct netdev_hw_addr *ha; + int mc_count = netdev_mc_count(dev); + + if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || mc_count > 63) + { + eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ + mode = inb(ioaddr + REG2); + outb(mode | PRMSC_Mode, ioaddr + REG2); + mode = inb(ioaddr + REG3); + outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */ + eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */ + } + + else if (mc_count == 0) + { + eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ + mode = inb(ioaddr + REG2); + outb(mode & 0xd6, ioaddr + REG2); /* Turn off Multi-IA and PRMSC_Mode bits */ + mode = inb(ioaddr + REG3); + outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */ + eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */ + } + + else + { + unsigned short status, *eaddrs; + int i, boguscount = 0; + + /* Disable RX and TX interrupts. Necessary to avoid + corruption of the HOST_ADDRESS_REG by interrupt + service routines. */ + eepro_dis_int(ioaddr); + + eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ + mode = inb(ioaddr + REG2); + outb(mode | Multi_IA, ioaddr + REG2); + mode = inb(ioaddr + REG3); + outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */ + eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */ + outw(lp->tx_end, ioaddr + HOST_ADDRESS_REG); + outw(MC_SETUP, ioaddr + IO_PORT); + outw(0, ioaddr + IO_PORT); + outw(0, ioaddr + IO_PORT); + outw(6 * (mc_count + 1), ioaddr + IO_PORT); + + netdev_for_each_mc_addr(ha, dev) { + eaddrs = (unsigned short *) ha->addr; + outw(*eaddrs++, ioaddr + IO_PORT); + outw(*eaddrs++, ioaddr + IO_PORT); + outw(*eaddrs++, ioaddr + IO_PORT); + } + + eaddrs = (unsigned short *) dev->dev_addr; + outw(eaddrs[0], ioaddr + IO_PORT); + outw(eaddrs[1], ioaddr + IO_PORT); + outw(eaddrs[2], ioaddr + IO_PORT); + outw(lp->tx_end, ioaddr + lp->xmt_bar); + outb(MC_SETUP, ioaddr); + + /* Update the transmit queue */ + i = lp->tx_end + XMT_HEADER + 6 * (mc_count + 1); + + if (lp->tx_start != lp->tx_end) + { + /* update the next address and the chain bit in the + last packet */ + outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG); + outw(i, ioaddr + IO_PORT); + outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG); + status = inw(ioaddr + IO_PORT); + outw(status | CHAIN_BIT, ioaddr + IO_PORT); + lp->tx_end = i ; + } + else { + lp->tx_start = lp->tx_end = i ; + } + + /* Acknowledge that the MC setup is done */ + do { /* We should be doing this in the eepro_interrupt()! */ + SLOW_DOWN; + SLOW_DOWN; + if (inb(ioaddr + STATUS_REG) & 0x08) + { + i = inb(ioaddr); + outb(0x08, ioaddr + STATUS_REG); + + if (i & 0x20) { /* command ABORTed */ + printk(KERN_NOTICE "%s: multicast setup failed.\n", + dev->name); + break; + } else if ((i & 0x0f) == 0x03) { /* MC-Done */ + printk(KERN_DEBUG "%s: set Rx mode to %d address%s.\n", + dev->name, mc_count, + mc_count > 1 ? "es":""); + break; + } + } + } while (++boguscount < 100); + + /* Re-enable RX and TX interrupts */ + eepro_en_int(ioaddr); + } + if (lp->eepro == LAN595FX_10ISA) { + eepro_complete_selreset(ioaddr); + } + else + eepro_en_rx(ioaddr); +} + +/* The horrible routine to read a word from the serial EEPROM. */ +/* IMPORTANT - the 82595 will be set to Bank 0 after the eeprom is read */ + +/* The delay between EEPROM clock transitions. */ +#define eeprom_delay() { udelay(40); } +#define EE_READ_CMD (6 << 6) + +static int +read_eeprom(int ioaddr, int location, struct net_device *dev) +{ + int i; + unsigned short retval = 0; + struct eepro_local *lp = netdev_priv(dev); + short ee_addr = ioaddr + lp->eeprom_reg; + int read_cmd = location | EE_READ_CMD; + short ctrl_val = EECS ; + + /* XXXX - black magic */ + eepro_sw2bank1(ioaddr); + outb(0x00, ioaddr + STATUS_REG); + /* XXXX - black magic */ + + eepro_sw2bank2(ioaddr); + outb(ctrl_val, ee_addr); + + /* Shift the read command bits out. */ + for (i = 8; i >= 0; i--) { + short outval = (read_cmd & (1 << i)) ? ctrl_val | EEDI + : ctrl_val; + outb(outval, ee_addr); + outb(outval | EESK, ee_addr); /* EEPROM clock tick. */ + eeprom_delay(); + outb(outval, ee_addr); /* Finish EEPROM a clock tick. */ + eeprom_delay(); + } + outb(ctrl_val, ee_addr); + + for (i = 16; i > 0; i--) { + outb(ctrl_val | EESK, ee_addr); eeprom_delay(); + retval = (retval << 1) | ((inb(ee_addr) & EEDO) ? 1 : 0); + outb(ctrl_val, ee_addr); eeprom_delay(); + } + + /* Terminate the EEPROM access. */ + ctrl_val &= ~EECS; + outb(ctrl_val | EESK, ee_addr); + eeprom_delay(); + outb(ctrl_val, ee_addr); + eeprom_delay(); + eepro_sw2bank0(ioaddr); + return retval; +} + +static int +hardware_send_packet(struct net_device *dev, void *buf, short length) +{ + struct eepro_local *lp = netdev_priv(dev); + short ioaddr = dev->base_addr; + unsigned status, tx_available, last, end; + + if (net_debug > 5) + printk(KERN_DEBUG "%s: entering hardware_send_packet routine.\n", dev->name); + + /* determine how much of the transmit buffer space is available */ + if (lp->tx_end > lp->tx_start) + tx_available = lp->xmt_ram - (lp->tx_end - lp->tx_start); + else if (lp->tx_end < lp->tx_start) + tx_available = lp->tx_start - lp->tx_end; + else tx_available = lp->xmt_ram; + + if (((((length + 3) >> 1) << 1) + 2*XMT_HEADER) >= tx_available) { + /* No space available ??? */ + return 1; + } + + last = lp->tx_end; + end = last + (((length + 3) >> 1) << 1) + XMT_HEADER; + + if (end >= lp->xmt_upper_limit + 2) { /* the transmit buffer is wrapped around */ + if ((lp->xmt_upper_limit + 2 - last) <= XMT_HEADER) { + /* Arrrr!!!, must keep the xmt header together, + several days were lost to chase this one down. */ + last = lp->xmt_lower_limit; + end = last + (((length + 3) >> 1) << 1) + XMT_HEADER; + } + else end = lp->xmt_lower_limit + (end - + lp->xmt_upper_limit + 2); + } + + outw(last, ioaddr + HOST_ADDRESS_REG); + outw(XMT_CMD, ioaddr + IO_PORT); + outw(0, ioaddr + IO_PORT); + outw(end, ioaddr + IO_PORT); + outw(length, ioaddr + IO_PORT); + + if (lp->version == LAN595) + outsw(ioaddr + IO_PORT, buf, (length + 3) >> 1); + else { /* LAN595TX or LAN595FX, capable of 32-bit I/O processing */ + unsigned short temp = inb(ioaddr + INT_MASK_REG); + outb(temp | IO_32_BIT, ioaddr + INT_MASK_REG); + outsl(ioaddr + IO_PORT_32_BIT, buf, (length + 3) >> 2); + outb(temp & ~(IO_32_BIT), ioaddr + INT_MASK_REG); + } + + /* A dummy read to flush the DRAM write pipeline */ + status = inw(ioaddr + IO_PORT); + + if (lp->tx_start == lp->tx_end) { + outw(last, ioaddr + lp->xmt_bar); + outb(XMT_CMD, ioaddr); + lp->tx_start = last; /* I don't like to change tx_start here */ + } + else { + /* update the next address and the chain bit in the + last packet */ + + if (lp->tx_end != last) { + outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG); + outw(last, ioaddr + IO_PORT); + } + + outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG); + status = inw(ioaddr + IO_PORT); + outw(status | CHAIN_BIT, ioaddr + IO_PORT); + + /* Continue the transmit command */ + outb(RESUME_XMT_CMD, ioaddr); + } + + lp->tx_last = last; + lp->tx_end = end; + + if (net_debug > 5) + printk(KERN_DEBUG "%s: exiting hardware_send_packet routine.\n", dev->name); + + return 0; +} + +static void +eepro_rx(struct net_device *dev) +{ + struct eepro_local *lp = netdev_priv(dev); + short ioaddr = dev->base_addr; + short boguscount = 20; + short rcv_car = lp->rx_start; + unsigned rcv_event, rcv_status, rcv_next_frame, rcv_size; + + if (net_debug > 5) + printk(KERN_DEBUG "%s: entering eepro_rx routine.\n", dev->name); + + /* Set the read pointer to the start of the RCV */ + outw(rcv_car, ioaddr + HOST_ADDRESS_REG); + + rcv_event = inw(ioaddr + IO_PORT); + + while (rcv_event == RCV_DONE) { + + rcv_status = inw(ioaddr + IO_PORT); + rcv_next_frame = inw(ioaddr + IO_PORT); + rcv_size = inw(ioaddr + IO_PORT); + + if ((rcv_status & (RX_OK | RX_ERROR)) == RX_OK) { + + /* Malloc up new buffer. */ + struct sk_buff *skb; + + dev->stats.rx_bytes+=rcv_size; + rcv_size &= 0x3fff; + skb = dev_alloc_skb(rcv_size+5); + if (skb == NULL) { + printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); + dev->stats.rx_dropped++; + rcv_car = lp->rx_start + RCV_HEADER + rcv_size; + lp->rx_start = rcv_next_frame; + outw(rcv_next_frame, ioaddr + HOST_ADDRESS_REG); + + break; + } + skb_reserve(skb,2); + + if (lp->version == LAN595) + insw(ioaddr+IO_PORT, skb_put(skb,rcv_size), (rcv_size + 3) >> 1); + else { /* LAN595TX or LAN595FX, capable of 32-bit I/O processing */ + unsigned short temp = inb(ioaddr + INT_MASK_REG); + outb(temp | IO_32_BIT, ioaddr + INT_MASK_REG); + insl(ioaddr+IO_PORT_32_BIT, skb_put(skb,rcv_size), + (rcv_size + 3) >> 2); + outb(temp & ~(IO_32_BIT), ioaddr + INT_MASK_REG); + } + + skb->protocol = eth_type_trans(skb,dev); + netif_rx(skb); + dev->stats.rx_packets++; + } + + else { /* Not sure will ever reach here, + I set the 595 to discard bad received frames */ + dev->stats.rx_errors++; + + if (rcv_status & 0x0100) + dev->stats.rx_over_errors++; + + else if (rcv_status & 0x0400) + dev->stats.rx_frame_errors++; + + else if (rcv_status & 0x0800) + dev->stats.rx_crc_errors++; + + printk(KERN_DEBUG "%s: event = %#x, status = %#x, next = %#x, size = %#x\n", + dev->name, rcv_event, rcv_status, rcv_next_frame, rcv_size); + } + + if (rcv_status & 0x1000) + dev->stats.rx_length_errors++; + + rcv_car = lp->rx_start + RCV_HEADER + rcv_size; + lp->rx_start = rcv_next_frame; + + if (--boguscount == 0) + break; + + outw(rcv_next_frame, ioaddr + HOST_ADDRESS_REG); + rcv_event = inw(ioaddr + IO_PORT); + + } + if (rcv_car == 0) + rcv_car = lp->rcv_upper_limit | 0xff; + + outw(rcv_car - 1, ioaddr + RCV_STOP); + + if (net_debug > 5) + printk(KERN_DEBUG "%s: exiting eepro_rx routine.\n", dev->name); +} + +static void +eepro_transmit_interrupt(struct net_device *dev) +{ + struct eepro_local *lp = netdev_priv(dev); + short ioaddr = dev->base_addr; + short boguscount = 25; + short xmt_status; + + while ((lp->tx_start != lp->tx_end) && boguscount--) { + + outw(lp->tx_start, ioaddr + HOST_ADDRESS_REG); + xmt_status = inw(ioaddr+IO_PORT); + + if (!(xmt_status & TX_DONE_BIT)) + break; + + xmt_status = inw(ioaddr+IO_PORT); + lp->tx_start = inw(ioaddr+IO_PORT); + + netif_wake_queue (dev); + + if (xmt_status & TX_OK) + dev->stats.tx_packets++; + else { + dev->stats.tx_errors++; + if (xmt_status & 0x0400) { + dev->stats.tx_carrier_errors++; + printk(KERN_DEBUG "%s: carrier error\n", + dev->name); + printk(KERN_DEBUG "%s: XMT status = %#x\n", + dev->name, xmt_status); + } + else { + printk(KERN_DEBUG "%s: XMT status = %#x\n", + dev->name, xmt_status); + printk(KERN_DEBUG "%s: XMT status = %#x\n", + dev->name, xmt_status); + } + } + if (xmt_status & 0x000f) { + dev->stats.collisions += (xmt_status & 0x000f); + } + + if ((xmt_status & 0x0040) == 0x0) { + dev->stats.tx_heartbeat_errors++; + } + } +} + +static int eepro_ethtool_get_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct eepro_local *lp = netdev_priv(dev); + + cmd->supported = SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_Autoneg; + cmd->advertising = ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_Autoneg; + + if (GetBit(lp->word[5], ee_PortTPE)) { + cmd->supported |= SUPPORTED_TP; + cmd->advertising |= ADVERTISED_TP; + } + if (GetBit(lp->word[5], ee_PortBNC)) { + cmd->supported |= SUPPORTED_BNC; + cmd->advertising |= ADVERTISED_BNC; + } + if (GetBit(lp->word[5], ee_PortAUI)) { + cmd->supported |= SUPPORTED_AUI; + cmd->advertising |= ADVERTISED_AUI; + } + + ethtool_cmd_speed_set(cmd, SPEED_10); + + if (dev->if_port == TPE && lp->word[1] & ee_Duplex) { + cmd->duplex = DUPLEX_FULL; + } + else { + cmd->duplex = DUPLEX_HALF; + } + + cmd->port = dev->if_port; + cmd->phy_address = dev->base_addr; + cmd->transceiver = XCVR_INTERNAL; + + if (lp->word[0] & ee_AutoNeg) { + cmd->autoneg = 1; + } + + return 0; +} + +static void eepro_ethtool_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + strcpy(drvinfo->driver, DRV_NAME); + strcpy(drvinfo->version, DRV_VERSION); + sprintf(drvinfo->bus_info, "ISA 0x%lx", dev->base_addr); +} + +static const struct ethtool_ops eepro_ethtool_ops = { + .get_settings = eepro_ethtool_get_settings, + .get_drvinfo = eepro_ethtool_get_drvinfo, +}; + +#ifdef MODULE + +#define MAX_EEPRO 8 +static struct net_device *dev_eepro[MAX_EEPRO]; + +static int io[MAX_EEPRO] = { + [0 ... MAX_EEPRO-1] = -1 +}; +static int irq[MAX_EEPRO]; +static int mem[MAX_EEPRO] = { /* Size of the rx buffer in KB */ + [0 ... MAX_EEPRO-1] = RCV_DEFAULT_RAM/1024 +}; +static int autodetect; + +static int n_eepro; +/* For linux 2.1.xx */ + +MODULE_AUTHOR("Pascal Dupuis and others"); +MODULE_DESCRIPTION("Intel i82595 ISA EtherExpressPro10/10+ driver"); +MODULE_LICENSE("GPL"); + +module_param_array(io, int, NULL, 0); +module_param_array(irq, int, NULL, 0); +module_param_array(mem, int, NULL, 0); +module_param(autodetect, int, 0); +MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base address(es)"); +MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)"); +MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)"); +MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)"); + +int __init init_module(void) +{ + struct net_device *dev; + int i; + if (io[0] == -1 && autodetect == 0) { + printk(KERN_WARNING "eepro_init_module: Probe is very dangerous in ISA boards!\n"); + printk(KERN_WARNING "eepro_init_module: Please add \"autodetect=1\" to force probe\n"); + return -ENODEV; + } + else if (autodetect) { + /* if autodetect is set then we must force detection */ + for (i = 0; i < MAX_EEPRO; i++) { + io[i] = 0; + } + + printk(KERN_INFO "eepro_init_module: Auto-detecting boards (May God protect us...)\n"); + } + + for (i = 0; i < MAX_EEPRO && io[i] != -1; i++) { + dev = alloc_etherdev(sizeof(struct eepro_local)); + if (!dev) + break; + + dev->mem_end = mem[i]; + dev->base_addr = io[i]; + dev->irq = irq[i]; + + if (do_eepro_probe(dev) == 0) { + dev_eepro[n_eepro++] = dev; + continue; + } + free_netdev(dev); + break; + } + + if (n_eepro) + printk(KERN_INFO "%s", version); + + return n_eepro ? 0 : -ENODEV; +} + +void __exit +cleanup_module(void) +{ + int i; + + for (i=0; ibase_addr, EEPRO_IO_EXTENT); + free_netdev(dev); + } +} +#endif /* MODULE */ diff --git a/drivers/net/ethernet/i825xx/eexpress.c b/drivers/net/ethernet/i825xx/eexpress.c new file mode 100644 index 0000000..a192285 --- /dev/null +++ b/drivers/net/ethernet/i825xx/eexpress.c @@ -0,0 +1,1720 @@ +/* Intel EtherExpress 16 device driver for Linux + * + * Written by John Sullivan, 1995 + * based on original code by Donald Becker, with changes by + * Alan Cox and Pauline Middelink. + * + * Support for 8-bit mode by Zoltan Szilagyi + * + * Many modifications, and currently maintained, by + * Philip Blundell + * Added the Compaq LTE Alan Cox + * Added MCA support Adam Fritzler + * + * Note - this driver is experimental still - it has problems on faster + * machines. Someone needs to sit down and go through it line by line with + * a databook... + */ + +/* The EtherExpress 16 is a fairly simple card, based on a shared-memory + * design using the i82586 Ethernet coprocessor. It bears no relationship, + * as far as I know, to the similarly-named "EtherExpress Pro" range. + * + * Historically, Linux support for these cards has been very bad. However, + * things seem to be getting better slowly. + */ + +/* If your card is confused about what sort of interface it has (eg it + * persistently reports "10baseT" when none is fitted), running 'SOFTSET /BART' + * or 'SOFTSET /LISA' from DOS seems to help. + */ + +/* Here's the scoop on memory mapping. + * + * There are three ways to access EtherExpress card memory: either using the + * shared-memory mapping, or using PIO through the dataport, or using PIO + * through the "shadow memory" ports. + * + * The shadow memory system works by having the card map some of its memory + * as follows: + * + * (the low five bits of the SMPTR are ignored) + * + * base+0x4000..400f memory at SMPTR+0..15 + * base+0x8000..800f memory at SMPTR+16..31 + * base+0xc000..c007 dubious stuff (memory at SMPTR+16..23 apparently) + * base+0xc008..c00f memory at 0x0008..0x000f + * + * This last set (the one at c008) is particularly handy because the SCB + * lives at 0x0008. So that set of ports gives us easy random access to data + * in the SCB without having to mess around setting up pointers and the like. + * We always use this method to access the SCB (via the scb_xx() functions). + * + * Dataport access works by aiming the appropriate (read or write) pointer + * at the first address you're interested in, and then reading or writing from + * the dataport. The pointers auto-increment after each transfer. We use + * this for data transfer. + * + * We don't use the shared-memory system because it allegedly doesn't work on + * all cards, and because it's a bit more prone to go wrong (it's one more + * thing to configure...). + */ + +/* Known bugs: + * + * - The card seems to want to give us two interrupts every time something + * happens, where just one would be better. + */ + +/* + * + * Note by Zoltan Szilagyi 10-12-96: + * + * I've succeeded in eliminating the "CU wedged" messages, and hence the + * lockups, which were only occurring with cards running in 8-bit mode ("force + * 8-bit operation" in Intel's SoftSet utility). This version of the driver + * sets the 82586 and the ASIC to 8-bit mode at startup; it also stops the + * CU before submitting a packet for transmission, and then restarts it as soon + * as the process of handing the packet is complete. This is definitely an + * unnecessary slowdown if the card is running in 16-bit mode; therefore one + * should detect 16-bit vs 8-bit mode from the EEPROM settings and act + * accordingly. In 8-bit mode with this bugfix I'm getting about 150 K/s for + * ftp's, which is significantly better than I get in DOS, so the overhead of + * stopping and restarting the CU with each transmit is not prohibitive in + * practice. + * + * Update by David Woodhouse 11/5/99: + * + * I've seen "CU wedged" messages in 16-bit mode, on the Alpha architecture. + * I assume that this is because 16-bit accesses are actually handled as two + * 8-bit accesses. + */ + +#ifdef __alpha__ +#define LOCKUP16 1 +#endif +#ifndef LOCKUP16 +#define LOCKUP16 0 +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#ifndef NET_DEBUG +#define NET_DEBUG 4 +#endif + +#include "eexpress.h" + +#define EEXP_IO_EXTENT 16 + +/* + * Private data declarations + */ + +struct net_local +{ + unsigned long last_tx; /* jiffies when last transmit started */ + unsigned long init_time; /* jiffies when eexp_hw_init586 called */ + unsigned short rx_first; /* first rx buf, same as RX_BUF_START */ + unsigned short rx_last; /* last rx buf */ + unsigned short rx_ptr; /* first rx buf to look at */ + unsigned short tx_head; /* next free tx buf */ + unsigned short tx_reap; /* first in-use tx buf */ + unsigned short tx_tail; /* previous tx buf to tx_head */ + unsigned short tx_link; /* last known-executing tx buf */ + unsigned short last_tx_restart; /* set to tx_link when we + restart the CU */ + unsigned char started; + unsigned short rx_buf_start; + unsigned short rx_buf_end; + unsigned short num_tx_bufs; + unsigned short num_rx_bufs; + unsigned char width; /* 0 for 16bit, 1 for 8bit */ + unsigned char was_promisc; + unsigned char old_mc_count; + spinlock_t lock; +}; + +/* This is the code and data that is downloaded to the EtherExpress card's + * memory at boot time. + */ + +static unsigned short start_code[] = { +/* 0x0000 */ + 0x0001, /* ISCP: busy - cleared after reset */ + 0x0008,0x0000,0x0000, /* offset,address (lo,hi) of SCB */ + + 0x0000,0x0000, /* SCB: status, commands */ + 0x0000,0x0000, /* links to first command block, + first receive descriptor */ + 0x0000,0x0000, /* CRC error, alignment error counts */ + 0x0000,0x0000, /* out of resources, overrun error counts */ + + 0x0000,0x0000, /* pad */ + 0x0000,0x0000, + +/* 0x20 -- start of 82586 CU program */ +#define CONF_LINK 0x20 + 0x0000,Cmd_Config, + 0x0032, /* link to next command */ + 0x080c, /* 12 bytes follow : fifo threshold=8 */ + 0x2e40, /* don't rx bad frames + * SRDY/ARDY => ext. sync. : preamble len=8 + * take addresses from data buffers + * 6 bytes/address + */ + 0x6000, /* default backoff method & priority + * interframe spacing = 0x60 */ + 0xf200, /* slot time=0x200 + * max collision retry = 0xf */ +#define CONF_PROMISC 0x2e + 0x0000, /* no HDLC : normal CRC : enable broadcast + * disable promiscuous/multicast modes */ + 0x003c, /* minimum frame length = 60 octets) */ + + 0x0000,Cmd_SetAddr, + 0x003e, /* link to next command */ +#define CONF_HWADDR 0x38 + 0x0000,0x0000,0x0000, /* hardware address placed here */ + + 0x0000,Cmd_MCast, + 0x0076, /* link to next command */ +#define CONF_NR_MULTICAST 0x44 + 0x0000, /* number of bytes in multicast address(es) */ +#define CONF_MULTICAST 0x46 + 0x0000, 0x0000, 0x0000, /* some addresses */ + 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, + +#define CONF_DIAG_RESULT 0x76 + 0x0000, Cmd_Diag, + 0x007c, /* link to next command */ + + 0x0000,Cmd_TDR|Cmd_INT, + 0x0084, +#define CONF_TDR_RESULT 0x82 + 0x0000, + + 0x0000,Cmd_END|Cmd_Nop, /* end of configure sequence */ + 0x0084 /* dummy link */ +}; + +/* maps irq number to EtherExpress magic value */ +static char irqrmap[] = { 0,0,1,2,3,4,0,0,0,1,5,6,0,0,0,0 }; + +#ifdef CONFIG_MCA_LEGACY +/* mapping of the first four bits of the second POS register */ +static unsigned short mca_iomap[] = { + 0x270, 0x260, 0x250, 0x240, 0x230, 0x220, 0x210, 0x200, + 0x370, 0x360, 0x350, 0x340, 0x330, 0x320, 0x310, 0x300 +}; +/* bits 5-7 of the second POS register */ +static char mca_irqmap[] = { 12, 9, 3, 4, 5, 10, 11, 15 }; +#endif + +/* + * Prototypes for Linux interface + */ + +static int eexp_open(struct net_device *dev); +static int eexp_close(struct net_device *dev); +static void eexp_timeout(struct net_device *dev); +static netdev_tx_t eexp_xmit(struct sk_buff *buf, + struct net_device *dev); + +static irqreturn_t eexp_irq(int irq, void *dev_addr); +static void eexp_set_multicast(struct net_device *dev); + +/* + * Prototypes for hardware access functions + */ + +static void eexp_hw_rx_pio(struct net_device *dev); +static void eexp_hw_tx_pio(struct net_device *dev, unsigned short *buf, + unsigned short len); +static int eexp_hw_probe(struct net_device *dev,unsigned short ioaddr); +static unsigned short eexp_hw_readeeprom(unsigned short ioaddr, + unsigned char location); + +static unsigned short eexp_hw_lasttxstat(struct net_device *dev); +static void eexp_hw_txrestart(struct net_device *dev); + +static void eexp_hw_txinit (struct net_device *dev); +static void eexp_hw_rxinit (struct net_device *dev); + +static void eexp_hw_init586 (struct net_device *dev); +static void eexp_setup_filter (struct net_device *dev); + +static char *eexp_ifmap[]={"AUI", "BNC", "RJ45"}; +enum eexp_iftype {AUI=0, BNC=1, TPE=2}; + +#define STARTED_RU 2 +#define STARTED_CU 1 + +/* + * Primitive hardware access functions. + */ + +static inline unsigned short scb_status(struct net_device *dev) +{ + return inw(dev->base_addr + 0xc008); +} + +static inline unsigned short scb_rdcmd(struct net_device *dev) +{ + return inw(dev->base_addr + 0xc00a); +} + +static inline void scb_command(struct net_device *dev, unsigned short cmd) +{ + outw(cmd, dev->base_addr + 0xc00a); +} + +static inline void scb_wrcbl(struct net_device *dev, unsigned short val) +{ + outw(val, dev->base_addr + 0xc00c); +} + +static inline void scb_wrrfa(struct net_device *dev, unsigned short val) +{ + outw(val, dev->base_addr + 0xc00e); +} + +static inline void set_loopback(struct net_device *dev) +{ + outb(inb(dev->base_addr + Config) | 2, dev->base_addr + Config); +} + +static inline void clear_loopback(struct net_device *dev) +{ + outb(inb(dev->base_addr + Config) & ~2, dev->base_addr + Config); +} + +static inline unsigned short int SHADOW(short int addr) +{ + addr &= 0x1f; + if (addr > 0xf) addr += 0x3ff0; + return addr + 0x4000; +} + +/* + * Linux interface + */ + +/* + * checks for presence of EtherExpress card + */ + +static int __init do_express_probe(struct net_device *dev) +{ + unsigned short *port; + static unsigned short ports[] = { 0x240,0x300,0x310,0x270,0x320,0x340,0 }; + unsigned short ioaddr = dev->base_addr; + int dev_irq = dev->irq; + int err; + + dev->if_port = 0xff; /* not set */ + +#ifdef CONFIG_MCA_LEGACY + if (MCA_bus) { + int slot = 0; + + /* + * Only find one card at a time. Subsequent calls + * will find others, however, proper multicard MCA + * probing and setup can't be done with the + * old-style Space.c init routines. -- ASF + */ + while (slot != MCA_NOTFOUND) { + int pos0, pos1; + + slot = mca_find_unused_adapter(0x628B, slot); + if (slot == MCA_NOTFOUND) + break; + + pos0 = mca_read_stored_pos(slot, 2); + pos1 = mca_read_stored_pos(slot, 3); + ioaddr = mca_iomap[pos1&0xf]; + + dev->irq = mca_irqmap[(pos1>>4)&0x7]; + + /* + * XXX: Transceiver selection is done + * differently on the MCA version. + * How to get it to select something + * other than external/AUI is currently + * unknown. This code is just for looks. -- ASF + */ + if ((pos0 & 0x7) == 0x1) + dev->if_port = AUI; + else if ((pos0 & 0x7) == 0x5) { + if (pos1 & 0x80) + dev->if_port = BNC; + else + dev->if_port = TPE; + } + + mca_set_adapter_name(slot, "Intel EtherExpress 16 MCA"); + mca_set_adapter_procfn(slot, NULL, dev); + mca_mark_as_used(slot); + + break; + } + } +#endif + if (ioaddr&0xfe00) { + if (!request_region(ioaddr, EEXP_IO_EXTENT, "EtherExpress")) + return -EBUSY; + err = eexp_hw_probe(dev,ioaddr); + release_region(ioaddr, EEXP_IO_EXTENT); + return err; + } else if (ioaddr) + return -ENXIO; + + for (port=&ports[0] ; *port ; port++ ) + { + unsigned short sum = 0; + int i; + if (!request_region(*port, EEXP_IO_EXTENT, "EtherExpress")) + continue; + for ( i=0 ; i<4 ; i++ ) + { + unsigned short t; + t = inb(*port + ID_PORT); + sum |= (t>>4) << ((t & 0x03)<<2); + } + if (sum==0xbaba && !eexp_hw_probe(dev,*port)) { + release_region(*port, EEXP_IO_EXTENT); + return 0; + } + release_region(*port, EEXP_IO_EXTENT); + dev->irq = dev_irq; + } + return -ENODEV; +} + +#ifndef MODULE +struct net_device * __init express_probe(int unit) +{ + struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); + int err; + + if (!dev) + return ERR_PTR(-ENOMEM); + + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + + err = do_express_probe(dev); + if (!err) + return dev; + free_netdev(dev); + return ERR_PTR(err); +} +#endif + +/* + * open and initialize the adapter, ready for use + */ + +static int eexp_open(struct net_device *dev) +{ + int ret; + unsigned short ioaddr = dev->base_addr; + struct net_local *lp = netdev_priv(dev); + +#if NET_DEBUG > 6 + printk(KERN_DEBUG "%s: eexp_open()\n", dev->name); +#endif + + if (!dev->irq || !irqrmap[dev->irq]) + return -ENXIO; + + ret = request_irq(dev->irq, eexp_irq, 0, dev->name, dev); + if (ret) + return ret; + + if (!request_region(ioaddr, EEXP_IO_EXTENT, "EtherExpress")) { + printk(KERN_WARNING "EtherExpress io port %x, is busy.\n" + , ioaddr); + goto err_out1; + } + if (!request_region(ioaddr+0x4000, EEXP_IO_EXTENT, "EtherExpress shadow")) { + printk(KERN_WARNING "EtherExpress io port %x, is busy.\n" + , ioaddr+0x4000); + goto err_out2; + } + if (!request_region(ioaddr+0x8000, EEXP_IO_EXTENT, "EtherExpress shadow")) { + printk(KERN_WARNING "EtherExpress io port %x, is busy.\n" + , ioaddr+0x8000); + goto err_out3; + } + if (!request_region(ioaddr+0xc000, EEXP_IO_EXTENT, "EtherExpress shadow")) { + printk(KERN_WARNING "EtherExpress io port %x, is busy.\n" + , ioaddr+0xc000); + goto err_out4; + } + + if (lp->width) { + printk("%s: forcing ASIC to 8-bit mode\n", dev->name); + outb(inb(dev->base_addr+Config)&~4, dev->base_addr+Config); + } + + eexp_hw_init586(dev); + netif_start_queue(dev); +#if NET_DEBUG > 6 + printk(KERN_DEBUG "%s: leaving eexp_open()\n", dev->name); +#endif + return 0; + + err_out4: + release_region(ioaddr+0x8000, EEXP_IO_EXTENT); + err_out3: + release_region(ioaddr+0x4000, EEXP_IO_EXTENT); + err_out2: + release_region(ioaddr, EEXP_IO_EXTENT); + err_out1: + free_irq(dev->irq, dev); + return -EBUSY; +} + +/* + * close and disable the interface, leaving the 586 in reset. + */ + +static int eexp_close(struct net_device *dev) +{ + unsigned short ioaddr = dev->base_addr; + struct net_local *lp = netdev_priv(dev); + + int irq = dev->irq; + + netif_stop_queue(dev); + + outb(SIRQ_dis|irqrmap[irq],ioaddr+SET_IRQ); + lp->started = 0; + scb_command(dev, SCB_CUsuspend|SCB_RUsuspend); + outb(0,ioaddr+SIGNAL_CA); + free_irq(irq,dev); + outb(i586_RST,ioaddr+EEPROM_Ctrl); + release_region(ioaddr, EEXP_IO_EXTENT); + release_region(ioaddr+0x4000, 16); + release_region(ioaddr+0x8000, 16); + release_region(ioaddr+0xc000, 16); + + return 0; +} + +/* + * This gets called when a higher level thinks we are broken. Check that + * nothing has become jammed in the CU. + */ + +static void unstick_cu(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + unsigned short ioaddr = dev->base_addr; + + if (lp->started) + { + if (time_after(jiffies, dev_trans_start(dev) + HZ/2)) + { + if (lp->tx_link==lp->last_tx_restart) + { + unsigned short boguscount=200,rsst; + printk(KERN_WARNING "%s: Retransmit timed out, status %04x, resetting...\n", + dev->name, scb_status(dev)); + eexp_hw_txinit(dev); + lp->last_tx_restart = 0; + scb_wrcbl(dev, lp->tx_link); + scb_command(dev, SCB_CUstart); + outb(0,ioaddr+SIGNAL_CA); + while (!SCB_complete(rsst=scb_status(dev))) + { + if (!--boguscount) + { + boguscount=200; + printk(KERN_WARNING "%s: Reset timed out status %04x, retrying...\n", + dev->name,rsst); + scb_wrcbl(dev, lp->tx_link); + scb_command(dev, SCB_CUstart); + outb(0,ioaddr+SIGNAL_CA); + } + } + netif_wake_queue(dev); + } + else + { + unsigned short status = scb_status(dev); + if (SCB_CUdead(status)) + { + unsigned short txstatus = eexp_hw_lasttxstat(dev); + printk(KERN_WARNING "%s: Transmit timed out, CU not active status %04x %04x, restarting...\n", + dev->name, status, txstatus); + eexp_hw_txrestart(dev); + } + else + { + unsigned short txstatus = eexp_hw_lasttxstat(dev); + if (netif_queue_stopped(dev) && !txstatus) + { + printk(KERN_WARNING "%s: CU wedged, status %04x %04x, resetting...\n", + dev->name,status,txstatus); + eexp_hw_init586(dev); + netif_wake_queue(dev); + } + else + { + printk(KERN_WARNING "%s: transmit timed out\n", dev->name); + } + } + } + } + } + else + { + if (time_after(jiffies, lp->init_time + 10)) + { + unsigned short status = scb_status(dev); + printk(KERN_WARNING "%s: i82586 startup timed out, status %04x, resetting...\n", + dev->name, status); + eexp_hw_init586(dev); + netif_wake_queue(dev); + } + } +} + +static void eexp_timeout(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); +#ifdef CONFIG_SMP + unsigned long flags; +#endif + int status; + + disable_irq(dev->irq); + + /* + * Best would be to use synchronize_irq(); spin_lock() here + * lets make it work first.. + */ + +#ifdef CONFIG_SMP + spin_lock_irqsave(&lp->lock, flags); +#endif + + status = scb_status(dev); + unstick_cu(dev); + printk(KERN_INFO "%s: transmit timed out, %s?\n", dev->name, + (SCB_complete(status)?"lost interrupt": + "board on fire")); + dev->stats.tx_errors++; + lp->last_tx = jiffies; + if (!SCB_complete(status)) { + scb_command(dev, SCB_CUabort); + outb(0,dev->base_addr+SIGNAL_CA); + } + netif_wake_queue(dev); +#ifdef CONFIG_SMP + spin_unlock_irqrestore(&lp->lock, flags); +#endif +} + +/* + * Called to transmit a packet, or to allow us to right ourselves + * if the kernel thinks we've died. + */ +static netdev_tx_t eexp_xmit(struct sk_buff *buf, struct net_device *dev) +{ + short length = buf->len; +#ifdef CONFIG_SMP + struct net_local *lp = netdev_priv(dev); + unsigned long flags; +#endif + +#if NET_DEBUG > 6 + printk(KERN_DEBUG "%s: eexp_xmit()\n", dev->name); +#endif + + if (buf->len < ETH_ZLEN) { + if (skb_padto(buf, ETH_ZLEN)) + return NETDEV_TX_OK; + length = ETH_ZLEN; + } + + disable_irq(dev->irq); + + /* + * Best would be to use synchronize_irq(); spin_lock() here + * lets make it work first.. + */ + +#ifdef CONFIG_SMP + spin_lock_irqsave(&lp->lock, flags); +#endif + + { + unsigned short *data = (unsigned short *)buf->data; + + dev->stats.tx_bytes += length; + + eexp_hw_tx_pio(dev,data,length); + } + dev_kfree_skb(buf); +#ifdef CONFIG_SMP + spin_unlock_irqrestore(&lp->lock, flags); +#endif + enable_irq(dev->irq); + return NETDEV_TX_OK; +} + +/* + * Handle an EtherExpress interrupt + * If we've finished initializing, start the RU and CU up. + * If we've already started, reap tx buffers, handle any received packets, + * check to make sure we've not become wedged. + */ + +static unsigned short eexp_start_irq(struct net_device *dev, + unsigned short status) +{ + unsigned short ack_cmd = SCB_ack(status); + struct net_local *lp = netdev_priv(dev); + unsigned short ioaddr = dev->base_addr; + if ((dev->flags & IFF_UP) && !(lp->started & STARTED_CU)) { + short diag_status, tdr_status; + while (SCB_CUstat(status)==2) + status = scb_status(dev); +#if NET_DEBUG > 4 + printk("%s: CU went non-active (status %04x)\n", + dev->name, status); +#endif + + outw(CONF_DIAG_RESULT & ~31, ioaddr + SM_PTR); + diag_status = inw(ioaddr + SHADOW(CONF_DIAG_RESULT)); + if (diag_status & 1<<11) { + printk(KERN_WARNING "%s: 82586 failed self-test\n", + dev->name); + } else if (!(diag_status & 1<<13)) { + printk(KERN_WARNING "%s: 82586 self-test failed to complete\n", dev->name); + } + + outw(CONF_TDR_RESULT & ~31, ioaddr + SM_PTR); + tdr_status = inw(ioaddr + SHADOW(CONF_TDR_RESULT)); + if (tdr_status & (TDR_SHORT|TDR_OPEN)) { + printk(KERN_WARNING "%s: TDR reports cable %s at %d tick%s\n", dev->name, (tdr_status & TDR_SHORT)?"short":"broken", tdr_status & TDR_TIME, ((tdr_status & TDR_TIME) != 1) ? "s" : ""); + } + else if (tdr_status & TDR_XCVRPROBLEM) { + printk(KERN_WARNING "%s: TDR reports transceiver problem\n", dev->name); + } + else if (tdr_status & TDR_LINKOK) { +#if NET_DEBUG > 4 + printk(KERN_DEBUG "%s: TDR reports link OK\n", dev->name); +#endif + } else { + printk("%s: TDR is ga-ga (status %04x)\n", dev->name, + tdr_status); + } + + lp->started |= STARTED_CU; + scb_wrcbl(dev, lp->tx_link); + /* if the RU isn't running, start it now */ + if (!(lp->started & STARTED_RU)) { + ack_cmd |= SCB_RUstart; + scb_wrrfa(dev, lp->rx_buf_start); + lp->rx_ptr = lp->rx_buf_start; + lp->started |= STARTED_RU; + } + ack_cmd |= SCB_CUstart | 0x2000; + } + + if ((dev->flags & IFF_UP) && !(lp->started & STARTED_RU) && SCB_RUstat(status)==4) + lp->started|=STARTED_RU; + + return ack_cmd; +} + +static void eexp_cmd_clear(struct net_device *dev) +{ + unsigned long int oldtime = jiffies; + while (scb_rdcmd(dev) && (time_before(jiffies, oldtime + 10))); + if (scb_rdcmd(dev)) { + printk("%s: command didn't clear\n", dev->name); + } +} + +static irqreturn_t eexp_irq(int dummy, void *dev_info) +{ + struct net_device *dev = dev_info; + struct net_local *lp; + unsigned short ioaddr,status,ack_cmd; + unsigned short old_read_ptr, old_write_ptr; + + lp = netdev_priv(dev); + ioaddr = dev->base_addr; + + spin_lock(&lp->lock); + + old_read_ptr = inw(ioaddr+READ_PTR); + old_write_ptr = inw(ioaddr+WRITE_PTR); + + outb(SIRQ_dis|irqrmap[dev->irq], ioaddr+SET_IRQ); + + status = scb_status(dev); + +#if NET_DEBUG > 4 + printk(KERN_DEBUG "%s: interrupt (status %x)\n", dev->name, status); +#endif + + if (lp->started == (STARTED_CU | STARTED_RU)) { + + do { + eexp_cmd_clear(dev); + + ack_cmd = SCB_ack(status); + scb_command(dev, ack_cmd); + outb(0,ioaddr+SIGNAL_CA); + + eexp_cmd_clear(dev); + + if (SCB_complete(status)) { + if (!eexp_hw_lasttxstat(dev)) { + printk("%s: tx interrupt but no status\n", dev->name); + } + } + + if (SCB_rxdframe(status)) + eexp_hw_rx_pio(dev); + + status = scb_status(dev); + } while (status & 0xc000); + + if (SCB_RUdead(status)) + { + printk(KERN_WARNING "%s: RU stopped: status %04x\n", + dev->name,status); +#if 0 + printk(KERN_WARNING "%s: cur_rfd=%04x, cur_rbd=%04x\n", dev->name, lp->cur_rfd, lp->cur_rbd); + outw(lp->cur_rfd, ioaddr+READ_PTR); + printk(KERN_WARNING "%s: [%04x]\n", dev->name, inw(ioaddr+DATAPORT)); + outw(lp->cur_rfd+6, ioaddr+READ_PTR); + printk(KERN_WARNING "%s: rbd is %04x\n", dev->name, rbd= inw(ioaddr+DATAPORT)); + outw(rbd, ioaddr+READ_PTR); + printk(KERN_WARNING "%s: [%04x %04x] ", dev->name, inw(ioaddr+DATAPORT), inw(ioaddr+DATAPORT)); + outw(rbd+8, ioaddr+READ_PTR); + printk("[%04x]\n", inw(ioaddr+DATAPORT)); +#endif + dev->stats.rx_errors++; +#if 1 + eexp_hw_rxinit(dev); +#else + lp->cur_rfd = lp->first_rfd; +#endif + scb_wrrfa(dev, lp->rx_buf_start); + scb_command(dev, SCB_RUstart); + outb(0,ioaddr+SIGNAL_CA); + } + } else { + if (status & 0x8000) + ack_cmd = eexp_start_irq(dev, status); + else + ack_cmd = SCB_ack(status); + scb_command(dev, ack_cmd); + outb(0,ioaddr+SIGNAL_CA); + } + + eexp_cmd_clear(dev); + + outb(SIRQ_en|irqrmap[dev->irq], ioaddr+SET_IRQ); + +#if NET_DEBUG > 6 + printk("%s: leaving eexp_irq()\n", dev->name); +#endif + outw(old_read_ptr, ioaddr+READ_PTR); + outw(old_write_ptr, ioaddr+WRITE_PTR); + + spin_unlock(&lp->lock); + return IRQ_HANDLED; +} + +/* + * Hardware access functions + */ + +/* + * Set the cable type to use. + */ + +static void eexp_hw_set_interface(struct net_device *dev) +{ + unsigned char oldval = inb(dev->base_addr + 0x300e); + oldval &= ~0x82; + switch (dev->if_port) { + case TPE: + oldval |= 0x2; + case BNC: + oldval |= 0x80; + break; + } + outb(oldval, dev->base_addr+0x300e); + mdelay(20); +} + +/* + * Check all the receive buffers, and hand any received packets + * to the upper levels. Basic sanity check on each frame + * descriptor, though we don't bother trying to fix broken ones. + */ + +static void eexp_hw_rx_pio(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + unsigned short rx_block = lp->rx_ptr; + unsigned short boguscount = lp->num_rx_bufs; + unsigned short ioaddr = dev->base_addr; + unsigned short status; + +#if NET_DEBUG > 6 + printk(KERN_DEBUG "%s: eexp_hw_rx()\n", dev->name); +#endif + + do { + unsigned short rfd_cmd, rx_next, pbuf, pkt_len; + + outw(rx_block, ioaddr + READ_PTR); + status = inw(ioaddr + DATAPORT); + + if (FD_Done(status)) + { + rfd_cmd = inw(ioaddr + DATAPORT); + rx_next = inw(ioaddr + DATAPORT); + pbuf = inw(ioaddr + DATAPORT); + + outw(pbuf, ioaddr + READ_PTR); + pkt_len = inw(ioaddr + DATAPORT); + + if (rfd_cmd!=0x0000) + { + printk(KERN_WARNING "%s: rfd_cmd not zero:0x%04x\n", + dev->name, rfd_cmd); + continue; + } + else if (pbuf!=rx_block+0x16) + { + printk(KERN_WARNING "%s: rfd and rbd out of sync 0x%04x 0x%04x\n", + dev->name, rx_block+0x16, pbuf); + continue; + } + else if ((pkt_len & 0xc000)!=0xc000) + { + printk(KERN_WARNING "%s: EOF or F not set on received buffer (%04x)\n", + dev->name, pkt_len & 0xc000); + continue; + } + else if (!FD_OK(status)) + { + dev->stats.rx_errors++; + if (FD_CRC(status)) + dev->stats.rx_crc_errors++; + if (FD_Align(status)) + dev->stats.rx_frame_errors++; + if (FD_Resrc(status)) + dev->stats.rx_fifo_errors++; + if (FD_DMA(status)) + dev->stats.rx_over_errors++; + if (FD_Short(status)) + dev->stats.rx_length_errors++; + } + else + { + struct sk_buff *skb; + pkt_len &= 0x3fff; + skb = dev_alloc_skb(pkt_len+16); + if (skb == NULL) + { + printk(KERN_WARNING "%s: Memory squeeze, dropping packet\n",dev->name); + dev->stats.rx_dropped++; + break; + } + skb_reserve(skb, 2); + outw(pbuf+10, ioaddr+READ_PTR); + insw(ioaddr+DATAPORT, skb_put(skb,pkt_len),(pkt_len+1)>>1); + skb->protocol = eth_type_trans(skb,dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + } + outw(rx_block, ioaddr+WRITE_PTR); + outw(0, ioaddr+DATAPORT); + outw(0, ioaddr+DATAPORT); + rx_block = rx_next; + } + } while (FD_Done(status) && boguscount--); + lp->rx_ptr = rx_block; +} + +/* + * Hand a packet to the card for transmission + * If we get here, we MUST have already checked + * to make sure there is room in the transmit + * buffer region. + */ + +static void eexp_hw_tx_pio(struct net_device *dev, unsigned short *buf, + unsigned short len) +{ + struct net_local *lp = netdev_priv(dev); + unsigned short ioaddr = dev->base_addr; + + if (LOCKUP16 || lp->width) { + /* Stop the CU so that there is no chance that it + jumps off to a bogus address while we are writing the + pointer to the next transmit packet in 8-bit mode -- + this eliminates the "CU wedged" errors in 8-bit mode. + (Zoltan Szilagyi 10-12-96) */ + scb_command(dev, SCB_CUsuspend); + outw(0xFFFF, ioaddr+SIGNAL_CA); + } + + outw(lp->tx_head, ioaddr + WRITE_PTR); + + outw(0x0000, ioaddr + DATAPORT); + outw(Cmd_INT|Cmd_Xmit, ioaddr + DATAPORT); + outw(lp->tx_head+0x08, ioaddr + DATAPORT); + outw(lp->tx_head+0x0e, ioaddr + DATAPORT); + + outw(0x0000, ioaddr + DATAPORT); + outw(0x0000, ioaddr + DATAPORT); + outw(lp->tx_head+0x08, ioaddr + DATAPORT); + + outw(0x8000|len, ioaddr + DATAPORT); + outw(-1, ioaddr + DATAPORT); + outw(lp->tx_head+0x16, ioaddr + DATAPORT); + outw(0, ioaddr + DATAPORT); + + outsw(ioaddr + DATAPORT, buf, (len+1)>>1); + + outw(lp->tx_tail+0xc, ioaddr + WRITE_PTR); + outw(lp->tx_head, ioaddr + DATAPORT); + + dev->trans_start = jiffies; + lp->tx_tail = lp->tx_head; + if (lp->tx_head==TX_BUF_START+((lp->num_tx_bufs-1)*TX_BUF_SIZE)) + lp->tx_head = TX_BUF_START; + else + lp->tx_head += TX_BUF_SIZE; + if (lp->tx_head != lp->tx_reap) + netif_wake_queue(dev); + + if (LOCKUP16 || lp->width) { + /* Restart the CU so that the packet can actually + be transmitted. (Zoltan Szilagyi 10-12-96) */ + scb_command(dev, SCB_CUresume); + outw(0xFFFF, ioaddr+SIGNAL_CA); + } + + dev->stats.tx_packets++; + lp->last_tx = jiffies; +} + +static const struct net_device_ops eexp_netdev_ops = { + .ndo_open = eexp_open, + .ndo_stop = eexp_close, + .ndo_start_xmit = eexp_xmit, + .ndo_set_multicast_list = eexp_set_multicast, + .ndo_tx_timeout = eexp_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +/* + * Sanity check the suspected EtherExpress card + * Read hardware address, reset card, size memory and initialize buffer + * memory pointers. These are held in netdev_priv(), in case someone has more + * than one card in a machine. + */ + +static int __init eexp_hw_probe(struct net_device *dev, unsigned short ioaddr) +{ + unsigned short hw_addr[3]; + unsigned char buswidth; + unsigned int memory_size; + int i; + unsigned short xsum = 0; + struct net_local *lp = netdev_priv(dev); + + printk("%s: EtherExpress 16 at %#x ",dev->name,ioaddr); + + outb(ASIC_RST, ioaddr+EEPROM_Ctrl); + outb(0, ioaddr+EEPROM_Ctrl); + udelay(500); + outb(i586_RST, ioaddr+EEPROM_Ctrl); + + hw_addr[0] = eexp_hw_readeeprom(ioaddr,2); + hw_addr[1] = eexp_hw_readeeprom(ioaddr,3); + hw_addr[2] = eexp_hw_readeeprom(ioaddr,4); + + /* Standard Address or Compaq LTE Address */ + if (!((hw_addr[2]==0x00aa && ((hw_addr[1] & 0xff00)==0x0000)) || + (hw_addr[2]==0x0080 && ((hw_addr[1] & 0xff00)==0x5F00)))) + { + printk(" rejected: invalid address %04x%04x%04x\n", + hw_addr[2],hw_addr[1],hw_addr[0]); + return -ENODEV; + } + + /* Calculate the EEPROM checksum. Carry on anyway if it's bad, + * though. + */ + for (i = 0; i < 64; i++) + xsum += eexp_hw_readeeprom(ioaddr, i); + if (xsum != 0xbaba) + printk(" (bad EEPROM xsum 0x%02x)", xsum); + + dev->base_addr = ioaddr; + for ( i=0 ; i<6 ; i++ ) + dev->dev_addr[i] = ((unsigned char *)hw_addr)[5-i]; + + { + static const char irqmap[] = { 0, 9, 3, 4, 5, 10, 11, 0 }; + unsigned short setupval = eexp_hw_readeeprom(ioaddr,0); + + /* Use the IRQ from EEPROM if none was given */ + if (!dev->irq) + dev->irq = irqmap[setupval>>13]; + + if (dev->if_port == 0xff) { + dev->if_port = !(setupval & 0x1000) ? AUI : + eexp_hw_readeeprom(ioaddr,5) & 0x1 ? TPE : BNC; + } + + buswidth = !((setupval & 0x400) >> 10); + } + + memset(lp, 0, sizeof(struct net_local)); + spin_lock_init(&lp->lock); + + printk("(IRQ %d, %s connector, %d-bit bus", dev->irq, + eexp_ifmap[dev->if_port], buswidth?8:16); + + if (!request_region(dev->base_addr + 0x300e, 1, "EtherExpress")) + return -EBUSY; + + eexp_hw_set_interface(dev); + + release_region(dev->base_addr + 0x300e, 1); + + /* Find out how much RAM we have on the card */ + outw(0, dev->base_addr + WRITE_PTR); + for (i = 0; i < 32768; i++) + outw(0, dev->base_addr + DATAPORT); + + for (memory_size = 0; memory_size < 64; memory_size++) + { + outw(memory_size<<10, dev->base_addr + READ_PTR); + if (inw(dev->base_addr+DATAPORT)) + break; + outw(memory_size<<10, dev->base_addr + WRITE_PTR); + outw(memory_size | 0x5000, dev->base_addr+DATAPORT); + outw(memory_size<<10, dev->base_addr + READ_PTR); + if (inw(dev->base_addr+DATAPORT) != (memory_size | 0x5000)) + break; + } + + /* Sort out the number of buffers. We may have 16, 32, 48 or 64k + * of RAM to play with. + */ + lp->num_tx_bufs = 4; + lp->rx_buf_end = 0x3ff6; + switch (memory_size) + { + case 64: + lp->rx_buf_end += 0x4000; + case 48: + lp->num_tx_bufs += 4; + lp->rx_buf_end += 0x4000; + case 32: + lp->rx_buf_end += 0x4000; + case 16: + printk(", %dk RAM)\n", memory_size); + break; + default: + printk(") bad memory size (%dk).\n", memory_size); + return -ENODEV; + break; + } + + lp->rx_buf_start = TX_BUF_START + (lp->num_tx_bufs*TX_BUF_SIZE); + lp->width = buswidth; + + dev->netdev_ops = &eexp_netdev_ops; + dev->watchdog_timeo = 2*HZ; + + return register_netdev(dev); +} + +/* + * Read a word from the EtherExpress on-board serial EEPROM. + * The EEPROM contains 64 words of 16 bits. + */ +static unsigned short __init eexp_hw_readeeprom(unsigned short ioaddr, + unsigned char location) +{ + unsigned short cmd = 0x180|(location&0x7f); + unsigned short rval = 0,wval = EC_CS|i586_RST; + int i; + + outb(EC_CS|i586_RST,ioaddr+EEPROM_Ctrl); + for (i=0x100 ; i ; i>>=1 ) + { + if (cmd&i) + wval |= EC_Wr; + else + wval &= ~EC_Wr; + + outb(wval,ioaddr+EEPROM_Ctrl); + outb(wval|EC_Clk,ioaddr+EEPROM_Ctrl); + eeprom_delay(); + outb(wval,ioaddr+EEPROM_Ctrl); + eeprom_delay(); + } + wval &= ~EC_Wr; + outb(wval,ioaddr+EEPROM_Ctrl); + for (i=0x8000 ; i ; i>>=1 ) + { + outb(wval|EC_Clk,ioaddr+EEPROM_Ctrl); + eeprom_delay(); + if (inb(ioaddr+EEPROM_Ctrl)&EC_Rd) + rval |= i; + outb(wval,ioaddr+EEPROM_Ctrl); + eeprom_delay(); + } + wval &= ~EC_CS; + outb(wval|EC_Clk,ioaddr+EEPROM_Ctrl); + eeprom_delay(); + outb(wval,ioaddr+EEPROM_Ctrl); + eeprom_delay(); + return rval; +} + +/* + * Reap tx buffers and return last transmit status. + * if ==0 then either: + * a) we're not transmitting anything, so why are we here? + * b) we've died. + * otherwise, Stat_Busy(return) means we've still got some packets + * to transmit, Stat_Done(return) means our buffers should be empty + * again + */ + +static unsigned short eexp_hw_lasttxstat(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + unsigned short tx_block = lp->tx_reap; + unsigned short status; + + if (!netif_queue_stopped(dev) && lp->tx_head==lp->tx_reap) + return 0x0000; + + do + { + outw(tx_block & ~31, dev->base_addr + SM_PTR); + status = inw(dev->base_addr + SHADOW(tx_block)); + if (!Stat_Done(status)) + { + lp->tx_link = tx_block; + return status; + } + else + { + lp->last_tx_restart = 0; + dev->stats.collisions += Stat_NoColl(status); + if (!Stat_OK(status)) + { + char *whatsup = NULL; + dev->stats.tx_errors++; + if (Stat_Abort(status)) + dev->stats.tx_aborted_errors++; + if (Stat_TNoCar(status)) { + whatsup = "aborted, no carrier"; + dev->stats.tx_carrier_errors++; + } + if (Stat_TNoCTS(status)) { + whatsup = "aborted, lost CTS"; + dev->stats.tx_carrier_errors++; + } + if (Stat_TNoDMA(status)) { + whatsup = "FIFO underran"; + dev->stats.tx_fifo_errors++; + } + if (Stat_TXColl(status)) { + whatsup = "aborted, too many collisions"; + dev->stats.tx_aborted_errors++; + } + if (whatsup) + printk(KERN_INFO "%s: transmit %s\n", + dev->name, whatsup); + } + else + dev->stats.tx_packets++; + } + if (tx_block == TX_BUF_START+((lp->num_tx_bufs-1)*TX_BUF_SIZE)) + lp->tx_reap = tx_block = TX_BUF_START; + else + lp->tx_reap = tx_block += TX_BUF_SIZE; + netif_wake_queue(dev); + } + while (lp->tx_reap != lp->tx_head); + + lp->tx_link = lp->tx_tail + 0x08; + + return status; +} + +/* + * This should never happen. It is called when some higher routine detects + * that the CU has stopped, to try to restart it from the last packet we knew + * we were working on, or the idle loop if we had finished for the time. + */ + +static void eexp_hw_txrestart(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + unsigned short ioaddr = dev->base_addr; + + lp->last_tx_restart = lp->tx_link; + scb_wrcbl(dev, lp->tx_link); + scb_command(dev, SCB_CUstart); + outb(0,ioaddr+SIGNAL_CA); + + { + unsigned short boguscount=50,failcount=5; + while (!scb_status(dev)) + { + if (!--boguscount) + { + if (--failcount) + { + printk(KERN_WARNING "%s: CU start timed out, status %04x, cmd %04x\n", dev->name, scb_status(dev), scb_rdcmd(dev)); + scb_wrcbl(dev, lp->tx_link); + scb_command(dev, SCB_CUstart); + outb(0,ioaddr+SIGNAL_CA); + boguscount = 100; + } + else + { + printk(KERN_WARNING "%s: Failed to restart CU, resetting board...\n",dev->name); + eexp_hw_init586(dev); + netif_wake_queue(dev); + return; + } + } + } + } +} + +/* + * Writes down the list of transmit buffers into card memory. Each + * entry consists of an 82586 transmit command, followed by a jump + * pointing to itself. When we want to transmit a packet, we write + * the data into the appropriate transmit buffer and then modify the + * preceding jump to point at the new transmit command. This means that + * the 586 command unit is continuously active. + */ + +static void eexp_hw_txinit(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + unsigned short tx_block = TX_BUF_START; + unsigned short curtbuf; + unsigned short ioaddr = dev->base_addr; + + for ( curtbuf=0 ; curtbufnum_tx_bufs ; curtbuf++ ) + { + outw(tx_block, ioaddr + WRITE_PTR); + + outw(0x0000, ioaddr + DATAPORT); + outw(Cmd_INT|Cmd_Xmit, ioaddr + DATAPORT); + outw(tx_block+0x08, ioaddr + DATAPORT); + outw(tx_block+0x0e, ioaddr + DATAPORT); + + outw(0x0000, ioaddr + DATAPORT); + outw(0x0000, ioaddr + DATAPORT); + outw(tx_block+0x08, ioaddr + DATAPORT); + + outw(0x8000, ioaddr + DATAPORT); + outw(-1, ioaddr + DATAPORT); + outw(tx_block+0x16, ioaddr + DATAPORT); + outw(0x0000, ioaddr + DATAPORT); + + tx_block += TX_BUF_SIZE; + } + lp->tx_head = TX_BUF_START; + lp->tx_reap = TX_BUF_START; + lp->tx_tail = tx_block - TX_BUF_SIZE; + lp->tx_link = lp->tx_tail + 0x08; + lp->rx_buf_start = tx_block; + +} + +/* + * Write the circular list of receive buffer descriptors to card memory. + * The end of the list isn't marked, which means that the 82586 receive + * unit will loop until buffers become available (this avoids it giving us + * "out of resources" messages). + */ + +static void eexp_hw_rxinit(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + unsigned short rx_block = lp->rx_buf_start; + unsigned short ioaddr = dev->base_addr; + + lp->num_rx_bufs = 0; + lp->rx_first = lp->rx_ptr = rx_block; + do + { + lp->num_rx_bufs++; + + outw(rx_block, ioaddr + WRITE_PTR); + + outw(0, ioaddr + DATAPORT); outw(0, ioaddr+DATAPORT); + outw(rx_block + RX_BUF_SIZE, ioaddr+DATAPORT); + outw(0xffff, ioaddr+DATAPORT); + + outw(0x0000, ioaddr+DATAPORT); + outw(0xdead, ioaddr+DATAPORT); + outw(0xdead, ioaddr+DATAPORT); + outw(0xdead, ioaddr+DATAPORT); + outw(0xdead, ioaddr+DATAPORT); + outw(0xdead, ioaddr+DATAPORT); + outw(0xdead, ioaddr+DATAPORT); + + outw(0x0000, ioaddr+DATAPORT); + outw(rx_block + RX_BUF_SIZE + 0x16, ioaddr+DATAPORT); + outw(rx_block + 0x20, ioaddr+DATAPORT); + outw(0, ioaddr+DATAPORT); + outw(RX_BUF_SIZE-0x20, ioaddr+DATAPORT); + + lp->rx_last = rx_block; + rx_block += RX_BUF_SIZE; + } while (rx_block <= lp->rx_buf_end-RX_BUF_SIZE); + + + /* Make first Rx frame descriptor point to first Rx buffer + descriptor */ + outw(lp->rx_first + 6, ioaddr+WRITE_PTR); + outw(lp->rx_first + 0x16, ioaddr+DATAPORT); + + /* Close Rx frame descriptor ring */ + outw(lp->rx_last + 4, ioaddr+WRITE_PTR); + outw(lp->rx_first, ioaddr+DATAPORT); + + /* Close Rx buffer descriptor ring */ + outw(lp->rx_last + 0x16 + 2, ioaddr+WRITE_PTR); + outw(lp->rx_first + 0x16, ioaddr+DATAPORT); + +} + +/* + * Un-reset the 586, and start the configuration sequence. We don't wait for + * this to finish, but allow the interrupt handler to start the CU and RU for + * us. We can't start the receive/transmission system up before we know that + * the hardware is configured correctly. + */ + +static void eexp_hw_init586(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + unsigned short ioaddr = dev->base_addr; + int i; + +#if NET_DEBUG > 6 + printk("%s: eexp_hw_init586()\n", dev->name); +#endif + + lp->started = 0; + + set_loopback(dev); + + outb(SIRQ_dis|irqrmap[dev->irq],ioaddr+SET_IRQ); + + /* Download the startup code */ + outw(lp->rx_buf_end & ~31, ioaddr + SM_PTR); + outw(lp->width?0x0001:0x0000, ioaddr + 0x8006); + outw(0x0000, ioaddr + 0x8008); + outw(0x0000, ioaddr + 0x800a); + outw(0x0000, ioaddr + 0x800c); + outw(0x0000, ioaddr + 0x800e); + + for (i = 0; i < ARRAY_SIZE(start_code) * 2; i+=32) { + int j; + outw(i, ioaddr + SM_PTR); + for (j = 0; j < 16 && (i+j)/2 < ARRAY_SIZE(start_code); j+=2) + outw(start_code[(i+j)/2], + ioaddr+0x4000+j); + for (j = 0; j < 16 && (i+j+16)/2 < ARRAY_SIZE(start_code); j+=2) + outw(start_code[(i+j+16)/2], + ioaddr+0x8000+j); + } + + /* Do we want promiscuous mode or multicast? */ + outw(CONF_PROMISC & ~31, ioaddr+SM_PTR); + i = inw(ioaddr+SHADOW(CONF_PROMISC)); + outw((dev->flags & IFF_PROMISC)?(i|1):(i & ~1), + ioaddr+SHADOW(CONF_PROMISC)); + lp->was_promisc = dev->flags & IFF_PROMISC; +#if 0 + eexp_setup_filter(dev); +#endif + + /* Write our hardware address */ + outw(CONF_HWADDR & ~31, ioaddr+SM_PTR); + outw(((unsigned short *)dev->dev_addr)[0], ioaddr+SHADOW(CONF_HWADDR)); + outw(((unsigned short *)dev->dev_addr)[1], + ioaddr+SHADOW(CONF_HWADDR+2)); + outw(((unsigned short *)dev->dev_addr)[2], + ioaddr+SHADOW(CONF_HWADDR+4)); + + eexp_hw_txinit(dev); + eexp_hw_rxinit(dev); + + outb(0,ioaddr+EEPROM_Ctrl); + mdelay(5); + + scb_command(dev, 0xf000); + outb(0,ioaddr+SIGNAL_CA); + + outw(0, ioaddr+SM_PTR); + + { + unsigned short rboguscount=50,rfailcount=5; + while (inw(ioaddr+0x4000)) + { + if (!--rboguscount) + { + printk(KERN_WARNING "%s: i82586 reset timed out, kicking...\n", + dev->name); + scb_command(dev, 0); + outb(0,ioaddr+SIGNAL_CA); + rboguscount = 100; + if (!--rfailcount) + { + printk(KERN_WARNING "%s: i82586 not responding, giving up.\n", + dev->name); + return; + } + } + } + } + + scb_wrcbl(dev, CONF_LINK); + scb_command(dev, 0xf000|SCB_CUstart); + outb(0,ioaddr+SIGNAL_CA); + + { + unsigned short iboguscount=50,ifailcount=5; + while (!scb_status(dev)) + { + if (!--iboguscount) + { + if (--ifailcount) + { + printk(KERN_WARNING "%s: i82586 initialization timed out, status %04x, cmd %04x\n", + dev->name, scb_status(dev), scb_rdcmd(dev)); + scb_wrcbl(dev, CONF_LINK); + scb_command(dev, 0xf000|SCB_CUstart); + outb(0,ioaddr+SIGNAL_CA); + iboguscount = 100; + } + else + { + printk(KERN_WARNING "%s: Failed to initialize i82586, giving up.\n",dev->name); + return; + } + } + } + } + + clear_loopback(dev); + outb(SIRQ_en|irqrmap[dev->irq],ioaddr+SET_IRQ); + + lp->init_time = jiffies; +#if NET_DEBUG > 6 + printk("%s: leaving eexp_hw_init586()\n", dev->name); +#endif +} + +static void eexp_setup_filter(struct net_device *dev) +{ + struct netdev_hw_addr *ha; + unsigned short ioaddr = dev->base_addr; + int count = netdev_mc_count(dev); + int i; + if (count > 8) { + printk(KERN_INFO "%s: too many multicast addresses (%d)\n", + dev->name, count); + count = 8; + } + + outw(CONF_NR_MULTICAST & ~31, ioaddr+SM_PTR); + outw(6*count, ioaddr+SHADOW(CONF_NR_MULTICAST)); + i = 0; + netdev_for_each_mc_addr(ha, dev) { + unsigned short *data = (unsigned short *) ha->addr; + + if (i == count) + break; + outw((CONF_MULTICAST+(6*i)) & ~31, ioaddr+SM_PTR); + outw(data[0], ioaddr+SHADOW(CONF_MULTICAST+(6*i))); + outw((CONF_MULTICAST+(6*i)+2) & ~31, ioaddr+SM_PTR); + outw(data[1], ioaddr+SHADOW(CONF_MULTICAST+(6*i)+2)); + outw((CONF_MULTICAST+(6*i)+4) & ~31, ioaddr+SM_PTR); + outw(data[2], ioaddr+SHADOW(CONF_MULTICAST+(6*i)+4)); + i++; + } +} + +/* + * Set or clear the multicast filter for this adaptor. + */ +static void +eexp_set_multicast(struct net_device *dev) +{ + unsigned short ioaddr = dev->base_addr; + struct net_local *lp = netdev_priv(dev); + int kick = 0, i; + if ((dev->flags & IFF_PROMISC) != lp->was_promisc) { + outw(CONF_PROMISC & ~31, ioaddr+SM_PTR); + i = inw(ioaddr+SHADOW(CONF_PROMISC)); + outw((dev->flags & IFF_PROMISC)?(i|1):(i & ~1), + ioaddr+SHADOW(CONF_PROMISC)); + lp->was_promisc = dev->flags & IFF_PROMISC; + kick = 1; + } + if (!(dev->flags & IFF_PROMISC)) { + eexp_setup_filter(dev); + if (lp->old_mc_count != netdev_mc_count(dev)) { + kick = 1; + lp->old_mc_count = netdev_mc_count(dev); + } + } + if (kick) { + unsigned long oj; + scb_command(dev, SCB_CUsuspend); + outb(0, ioaddr+SIGNAL_CA); + outb(0, ioaddr+SIGNAL_CA); +#if 0 + printk("%s: waiting for CU to go suspended\n", dev->name); +#endif + oj = jiffies; + while ((SCB_CUstat(scb_status(dev)) == 2) && + (time_before(jiffies, oj + 2000))); + if (SCB_CUstat(scb_status(dev)) == 2) + printk("%s: warning, CU didn't stop\n", dev->name); + lp->started &= ~(STARTED_CU); + scb_wrcbl(dev, CONF_LINK); + scb_command(dev, SCB_CUstart); + outb(0, ioaddr+SIGNAL_CA); + } +} + + +/* + * MODULE stuff + */ + +#ifdef MODULE + +#define EEXP_MAX_CARDS 4 /* max number of cards to support */ + +static struct net_device *dev_eexp[EEXP_MAX_CARDS]; +static int irq[EEXP_MAX_CARDS]; +static int io[EEXP_MAX_CARDS]; + +module_param_array(io, int, NULL, 0); +module_param_array(irq, int, NULL, 0); +MODULE_PARM_DESC(io, "EtherExpress 16 I/O base address(es)"); +MODULE_PARM_DESC(irq, "EtherExpress 16 IRQ number(s)"); +MODULE_LICENSE("GPL"); + + +/* Ideally the user would give us io=, irq= for every card. If any parameters + * are specified, we verify and then use them. If no parameters are given, we + * autoprobe for one card only. + */ +int __init init_module(void) +{ + struct net_device *dev; + int this_dev, found = 0; + + for (this_dev = 0; this_dev < EEXP_MAX_CARDS; this_dev++) { + dev = alloc_etherdev(sizeof(struct net_local)); + dev->irq = irq[this_dev]; + dev->base_addr = io[this_dev]; + if (io[this_dev] == 0) { + if (this_dev) + break; + printk(KERN_NOTICE "eexpress.c: Module autoprobe not recommended, give io=xx.\n"); + } + if (do_express_probe(dev) == 0) { + dev_eexp[this_dev] = dev; + found++; + continue; + } + printk(KERN_WARNING "eexpress.c: Failed to register card at 0x%x.\n", io[this_dev]); + free_netdev(dev); + break; + } + if (found) + return 0; + return -ENXIO; +} + +void __exit cleanup_module(void) +{ + int this_dev; + + for (this_dev = 0; this_dev < EEXP_MAX_CARDS; this_dev++) { + struct net_device *dev = dev_eexp[this_dev]; + if (dev) { + unregister_netdev(dev); + free_netdev(dev); + } + } +} +#endif + +/* + * Local Variables: + * c-file-style: "linux" + * tab-width: 8 + * End: + */ diff --git a/drivers/net/ethernet/i825xx/eexpress.h b/drivers/net/ethernet/i825xx/eexpress.h new file mode 100644 index 0000000..dc9c6ea --- /dev/null +++ b/drivers/net/ethernet/i825xx/eexpress.h @@ -0,0 +1,179 @@ +/* + * eexpress.h: Intel EtherExpress16 defines + */ + +/* + * EtherExpress card register addresses + * as offsets from the base IO region (dev->base_addr) + */ + +#define DATAPORT 0x0000 +#define WRITE_PTR 0x0002 +#define READ_PTR 0x0004 +#define SIGNAL_CA 0x0006 +#define SET_IRQ 0x0007 +#define SM_PTR 0x0008 +#define MEM_Dec 0x000a +#define MEM_Ctrl 0x000b +#define MEM_Page_Ctrl 0x000c +#define Config 0x000d +#define EEPROM_Ctrl 0x000e +#define ID_PORT 0x000f +#define MEM_ECtrl 0x000f + +/* + * card register defines + */ + +/* SET_IRQ */ +#define SIRQ_en 0x08 +#define SIRQ_dis 0x00 + +/* EEPROM_Ctrl */ +#define EC_Clk 0x01 +#define EC_CS 0x02 +#define EC_Wr 0x04 +#define EC_Rd 0x08 +#define ASIC_RST 0x40 +#define i586_RST 0x80 + +#define eeprom_delay() { udelay(40); } + +/* + * i82586 Memory Configuration + */ + +/* (System Configuration Pointer) System start up block, read after 586_RST */ +#define SCP_START 0xfff6 + +/* Intermediate System Configuration Pointer */ +#define ISCP_START 0x0000 + +/* System Command Block */ +#define SCB_START 0x0008 + +/* Start of buffer region. Everything before this is used for control + * structures and the CU configuration program. The memory layout is + * determined in eexp_hw_probe(), once we know how much memory is + * available on the card. + */ + +#define TX_BUF_START 0x0100 + +#define TX_BUF_SIZE ((24+ETH_FRAME_LEN+31)&~0x1f) +#define RX_BUF_SIZE ((32+ETH_FRAME_LEN+31)&~0x1f) + +/* + * SCB defines + */ + +/* these functions take the SCB status word and test the relevant status bit */ +#define SCB_complete(s) (((s) & 0x8000) != 0) +#define SCB_rxdframe(s) (((s) & 0x4000) != 0) +#define SCB_CUdead(s) (((s) & 0x2000) != 0) +#define SCB_RUdead(s) (((s) & 0x1000) != 0) +#define SCB_ack(s) ((s) & 0xf000) + +/* Command unit status: 0=idle, 1=suspended, 2=active */ +#define SCB_CUstat(s) (((s)&0x0300)>>8) + +/* Receive unit status: 0=idle, 1=suspended, 2=out of resources, 4=ready */ +#define SCB_RUstat(s) (((s)&0x0070)>>4) + +/* SCB commands */ +#define SCB_CUnop 0x0000 +#define SCB_CUstart 0x0100 +#define SCB_CUresume 0x0200 +#define SCB_CUsuspend 0x0300 +#define SCB_CUabort 0x0400 +#define SCB_resetchip 0x0080 + +#define SCB_RUnop 0x0000 +#define SCB_RUstart 0x0010 +#define SCB_RUresume 0x0020 +#define SCB_RUsuspend 0x0030 +#define SCB_RUabort 0x0040 + +/* + * Command block defines + */ + +#define Stat_Done(s) (((s) & 0x8000) != 0) +#define Stat_Busy(s) (((s) & 0x4000) != 0) +#define Stat_OK(s) (((s) & 0x2000) != 0) +#define Stat_Abort(s) (((s) & 0x1000) != 0) +#define Stat_STFail (((s) & 0x0800) != 0) +#define Stat_TNoCar(s) (((s) & 0x0400) != 0) +#define Stat_TNoCTS(s) (((s) & 0x0200) != 0) +#define Stat_TNoDMA(s) (((s) & 0x0100) != 0) +#define Stat_TDefer(s) (((s) & 0x0080) != 0) +#define Stat_TColl(s) (((s) & 0x0040) != 0) +#define Stat_TXColl(s) (((s) & 0x0020) != 0) +#define Stat_NoColl(s) ((s) & 0x000f) + +/* Cmd_END will end AFTER the command if this is the first + * command block after an SCB_CUstart, but BEFORE the command + * for all subsequent commands. Best strategy is to place + * Cmd_INT on the last command in the sequence, followed by a + * dummy Cmd_Nop with Cmd_END after this. + */ + +#define Cmd_END 0x8000 +#define Cmd_SUS 0x4000 +#define Cmd_INT 0x2000 + +#define Cmd_Nop 0x0000 +#define Cmd_SetAddr 0x0001 +#define Cmd_Config 0x0002 +#define Cmd_MCast 0x0003 +#define Cmd_Xmit 0x0004 +#define Cmd_TDR 0x0005 +#define Cmd_Dump 0x0006 +#define Cmd_Diag 0x0007 + + +/* + * Frame Descriptor (Receive block) defines + */ + +#define FD_Done(s) (((s) & 0x8000) != 0) +#define FD_Busy(s) (((s) & 0x4000) != 0) +#define FD_OK(s) (((s) & 0x2000) != 0) + +#define FD_CRC(s) (((s) & 0x0800) != 0) +#define FD_Align(s) (((s) & 0x0400) != 0) +#define FD_Resrc(s) (((s) & 0x0200) != 0) +#define FD_DMA(s) (((s) & 0x0100) != 0) +#define FD_Short(s) (((s) & 0x0080) != 0) +#define FD_NoEOF(s) (((s) & 0x0040) != 0) + +struct rfd_header { + volatile unsigned long flags; + volatile unsigned short link; + volatile unsigned short rbd_offset; + volatile unsigned short dstaddr1; + volatile unsigned short dstaddr2; + volatile unsigned short dstaddr3; + volatile unsigned short srcaddr1; + volatile unsigned short srcaddr2; + volatile unsigned short srcaddr3; + volatile unsigned short length; + + /* This is actually a Receive Buffer Descriptor. The way we + * arrange memory means that an RBD always follows the RFD that + * points to it, so they might as well be in the same structure. + */ + volatile unsigned short actual_count; + volatile unsigned short next_rbd; + volatile unsigned short buf_addr1; + volatile unsigned short buf_addr2; + volatile unsigned short size; +}; + +/* Returned data from the Time Domain Reflectometer */ + +#define TDR_LINKOK (1<<15) +#define TDR_XCVRPROBLEM (1<<14) +#define TDR_OPEN (1<<13) +#define TDR_SHORT (1<<12) +#define TDR_TIME 0x7ff diff --git a/drivers/net/ethernet/i825xx/ether1.c b/drivers/net/ethernet/i825xx/ether1.c new file mode 100644 index 0000000..b00781c --- /dev/null +++ b/drivers/net/ethernet/i825xx/ether1.c @@ -0,0 +1,1094 @@ +/* + * linux/drivers/acorn/net/ether1.c + * + * Copyright (C) 1996-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Acorn ether1 driver (82586 chip) for Acorn machines + * + * We basically keep two queues in the cards memory - one for transmit + * and one for receive. Each has a head and a tail. The head is where + * we/the chip adds packets to be transmitted/received, and the tail + * is where the transmitter has got to/where the receiver will stop. + * Both of these queues are circular, and since the chip is running + * all the time, we have to be careful when we modify the pointers etc + * so that the buffer memory contents is valid all the time. + * + * Change log: + * 1.00 RMK Released + * 1.01 RMK 19/03/1996 Transfers the last odd byte onto/off of the card now. + * 1.02 RMK 25/05/1997 Added code to restart RU if it goes not ready + * 1.03 RMK 14/09/1997 Cleaned up the handling of a reset during the TX interrupt. + * Should prevent lockup. + * 1.04 RMK 17/09/1997 Added more info when initialsation of chip goes wrong. + * TDR now only reports failure when chip reports non-zero + * TDR time-distance. + * 1.05 RMK 31/12/1997 Removed calls to dev_tint for 2.1 + * 1.06 RMK 10/02/2000 Updated for 2.3.43 + * 1.07 RMK 13/05/2000 Updated for 2.3.99-pre8 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define __ETHER1_C +#include "ether1.h" + +static unsigned int net_debug = NET_DEBUG; + +#define BUFFER_SIZE 0x10000 +#define TX_AREA_START 0x00100 +#define TX_AREA_END 0x05000 +#define RX_AREA_START 0x05000 +#define RX_AREA_END 0x0fc00 + +static int ether1_open(struct net_device *dev); +static int ether1_sendpacket(struct sk_buff *skb, struct net_device *dev); +static irqreturn_t ether1_interrupt(int irq, void *dev_id); +static int ether1_close(struct net_device *dev); +static void ether1_setmulticastlist(struct net_device *dev); +static void ether1_timeout(struct net_device *dev); + +/* ------------------------------------------------------------------------- */ + +static char version[] __devinitdata = "ether1 ethernet driver (c) 2000 Russell King v1.07\n"; + +#define BUS_16 16 +#define BUS_8 8 + +/* ------------------------------------------------------------------------- */ + +#define DISABLEIRQS 1 +#define NORMALIRQS 0 + +#define ether1_readw(dev, addr, type, offset, svflgs) ether1_inw_p (dev, addr + (int)(&((type *)0)->offset), svflgs) +#define ether1_writew(dev, val, addr, type, offset, svflgs) ether1_outw_p (dev, val, addr + (int)(&((type *)0)->offset), svflgs) + +static inline unsigned short +ether1_inw_p (struct net_device *dev, int addr, int svflgs) +{ + unsigned long flags; + unsigned short ret; + + if (svflgs) + local_irq_save (flags); + + writeb(addr >> 12, REG_PAGE); + ret = readw(ETHER1_RAM + ((addr & 4095) << 1)); + if (svflgs) + local_irq_restore (flags); + return ret; +} + +static inline void +ether1_outw_p (struct net_device *dev, unsigned short val, int addr, int svflgs) +{ + unsigned long flags; + + if (svflgs) + local_irq_save (flags); + + writeb(addr >> 12, REG_PAGE); + writew(val, ETHER1_RAM + ((addr & 4095) << 1)); + if (svflgs) + local_irq_restore (flags); +} + +/* + * Some inline assembler to allow fast transfers on to/off of the card. + * Since this driver depends on some features presented by the ARM + * specific architecture, and that you can't configure this driver + * without specifiing ARM mode, this is not a problem. + * + * This routine is essentially an optimised memcpy from the card's + * onboard RAM to kernel memory. + */ +static void +ether1_writebuffer (struct net_device *dev, void *data, unsigned int start, unsigned int length) +{ + unsigned int page, thislen, offset; + void __iomem *addr; + + offset = start & 4095; + page = start >> 12; + addr = ETHER1_RAM + (offset << 1); + + if (offset + length > 4096) + thislen = 4096 - offset; + else + thislen = length; + + do { + int used; + + writeb(page, REG_PAGE); + length -= thislen; + + __asm__ __volatile__( + "subs %3, %3, #2\n\ + bmi 2f\n\ +1: ldr %0, [%1], #2\n\ + mov %0, %0, lsl #16\n\ + orr %0, %0, %0, lsr #16\n\ + str %0, [%2], #4\n\ + subs %3, %3, #2\n\ + bmi 2f\n\ + ldr %0, [%1], #2\n\ + mov %0, %0, lsl #16\n\ + orr %0, %0, %0, lsr #16\n\ + str %0, [%2], #4\n\ + subs %3, %3, #2\n\ + bmi 2f\n\ + ldr %0, [%1], #2\n\ + mov %0, %0, lsl #16\n\ + orr %0, %0, %0, lsr #16\n\ + str %0, [%2], #4\n\ + subs %3, %3, #2\n\ + bmi 2f\n\ + ldr %0, [%1], #2\n\ + mov %0, %0, lsl #16\n\ + orr %0, %0, %0, lsr #16\n\ + str %0, [%2], #4\n\ + subs %3, %3, #2\n\ + bpl 1b\n\ +2: adds %3, %3, #1\n\ + ldreqb %0, [%1]\n\ + streqb %0, [%2]" + : "=&r" (used), "=&r" (data) + : "r" (addr), "r" (thislen), "1" (data)); + + addr = ETHER1_RAM; + + thislen = length; + if (thislen > 4096) + thislen = 4096; + page++; + } while (thislen); +} + +static void +ether1_readbuffer (struct net_device *dev, void *data, unsigned int start, unsigned int length) +{ + unsigned int page, thislen, offset; + void __iomem *addr; + + offset = start & 4095; + page = start >> 12; + addr = ETHER1_RAM + (offset << 1); + + if (offset + length > 4096) + thislen = 4096 - offset; + else + thislen = length; + + do { + int used; + + writeb(page, REG_PAGE); + length -= thislen; + + __asm__ __volatile__( + "subs %3, %3, #2\n\ + bmi 2f\n\ +1: ldr %0, [%2], #4\n\ + strb %0, [%1], #1\n\ + mov %0, %0, lsr #8\n\ + strb %0, [%1], #1\n\ + subs %3, %3, #2\n\ + bmi 2f\n\ + ldr %0, [%2], #4\n\ + strb %0, [%1], #1\n\ + mov %0, %0, lsr #8\n\ + strb %0, [%1], #1\n\ + subs %3, %3, #2\n\ + bmi 2f\n\ + ldr %0, [%2], #4\n\ + strb %0, [%1], #1\n\ + mov %0, %0, lsr #8\n\ + strb %0, [%1], #1\n\ + subs %3, %3, #2\n\ + bmi 2f\n\ + ldr %0, [%2], #4\n\ + strb %0, [%1], #1\n\ + mov %0, %0, lsr #8\n\ + strb %0, [%1], #1\n\ + subs %3, %3, #2\n\ + bpl 1b\n\ +2: adds %3, %3, #1\n\ + ldreqb %0, [%2]\n\ + streqb %0, [%1]" + : "=&r" (used), "=&r" (data) + : "r" (addr), "r" (thislen), "1" (data)); + + addr = ETHER1_RAM; + + thislen = length; + if (thislen > 4096) + thislen = 4096; + page++; + } while (thislen); +} + +static int __devinit +ether1_ramtest(struct net_device *dev, unsigned char byte) +{ + unsigned char *buffer = kmalloc (BUFFER_SIZE, GFP_KERNEL); + int i, ret = BUFFER_SIZE; + int max_errors = 15; + int bad = -1; + int bad_start = 0; + + if (!buffer) + return 1; + + memset (buffer, byte, BUFFER_SIZE); + ether1_writebuffer (dev, buffer, 0, BUFFER_SIZE); + memset (buffer, byte ^ 0xff, BUFFER_SIZE); + ether1_readbuffer (dev, buffer, 0, BUFFER_SIZE); + + for (i = 0; i < BUFFER_SIZE; i++) { + if (buffer[i] != byte) { + if (max_errors >= 0 && bad != buffer[i]) { + if (bad != -1) + printk ("\n"); + printk (KERN_CRIT "%s: RAM failed with (%02X instead of %02X) at 0x%04X", + dev->name, buffer[i], byte, i); + ret = -ENODEV; + max_errors --; + bad = buffer[i]; + bad_start = i; + } + } else { + if (bad != -1) { + if (bad_start == i - 1) + printk ("\n"); + else + printk (" - 0x%04X\n", i - 1); + bad = -1; + } + } + } + + if (bad != -1) + printk (" - 0x%04X\n", BUFFER_SIZE); + kfree (buffer); + + return ret; +} + +static int +ether1_reset (struct net_device *dev) +{ + writeb(CTRL_RST|CTRL_ACK, REG_CONTROL); + return BUS_16; +} + +static int __devinit +ether1_init_2(struct net_device *dev) +{ + int i; + dev->mem_start = 0; + + i = ether1_ramtest (dev, 0x5a); + + if (i > 0) + i = ether1_ramtest (dev, 0x1e); + + if (i <= 0) + return -ENODEV; + + dev->mem_end = i; + return 0; +} + +/* + * These are the structures that are loaded into the ether RAM card to + * initialise the 82586 + */ + +/* at 0x0100 */ +#define NOP_ADDR (TX_AREA_START) +#define NOP_SIZE (0x06) +static nop_t init_nop = { + 0, + CMD_NOP, + NOP_ADDR +}; + +/* at 0x003a */ +#define TDR_ADDR (0x003a) +#define TDR_SIZE (0x08) +static tdr_t init_tdr = { + 0, + CMD_TDR | CMD_INTR, + NOP_ADDR, + 0 +}; + +/* at 0x002e */ +#define MC_ADDR (0x002e) +#define MC_SIZE (0x0c) +static mc_t init_mc = { + 0, + CMD_SETMULTICAST, + TDR_ADDR, + 0, + { { 0, } } +}; + +/* at 0x0022 */ +#define SA_ADDR (0x0022) +#define SA_SIZE (0x0c) +static sa_t init_sa = { + 0, + CMD_SETADDRESS, + MC_ADDR, + { 0, } +}; + +/* at 0x0010 */ +#define CFG_ADDR (0x0010) +#define CFG_SIZE (0x12) +static cfg_t init_cfg = { + 0, + CMD_CONFIG, + SA_ADDR, + 8, + 8, + CFG8_SRDY, + CFG9_PREAMB8 | CFG9_ADDRLENBUF | CFG9_ADDRLEN(6), + 0, + 0x60, + 0, + CFG13_RETRY(15) | CFG13_SLOTH(2), + 0, +}; + +/* at 0x0000 */ +#define SCB_ADDR (0x0000) +#define SCB_SIZE (0x10) +static scb_t init_scb = { + 0, + SCB_CMDACKRNR | SCB_CMDACKCNA | SCB_CMDACKFR | SCB_CMDACKCX, + CFG_ADDR, + RX_AREA_START, + 0, + 0, + 0, + 0 +}; + +/* at 0xffee */ +#define ISCP_ADDR (0xffee) +#define ISCP_SIZE (0x08) +static iscp_t init_iscp = { + 1, + SCB_ADDR, + 0x0000, + 0x0000 +}; + +/* at 0xfff6 */ +#define SCP_ADDR (0xfff6) +#define SCP_SIZE (0x0a) +static scp_t init_scp = { + SCP_SY_16BBUS, + { 0, 0 }, + ISCP_ADDR, + 0 +}; + +#define RFD_SIZE (0x16) +static rfd_t init_rfd = { + 0, + 0, + 0, + 0, + { 0, }, + { 0, }, + 0 +}; + +#define RBD_SIZE (0x0a) +static rbd_t init_rbd = { + 0, + 0, + 0, + 0, + ETH_FRAME_LEN + 8 +}; + +#define TX_SIZE (0x08) +#define TBD_SIZE (0x08) + +static int +ether1_init_for_open (struct net_device *dev) +{ + int i, status, addr, next, next2; + int failures = 0; + unsigned long timeout; + + writeb(CTRL_RST|CTRL_ACK, REG_CONTROL); + + for (i = 0; i < 6; i++) + init_sa.sa_addr[i] = dev->dev_addr[i]; + + /* load data structures into ether1 RAM */ + ether1_writebuffer (dev, &init_scp, SCP_ADDR, SCP_SIZE); + ether1_writebuffer (dev, &init_iscp, ISCP_ADDR, ISCP_SIZE); + ether1_writebuffer (dev, &init_scb, SCB_ADDR, SCB_SIZE); + ether1_writebuffer (dev, &init_cfg, CFG_ADDR, CFG_SIZE); + ether1_writebuffer (dev, &init_sa, SA_ADDR, SA_SIZE); + ether1_writebuffer (dev, &init_mc, MC_ADDR, MC_SIZE); + ether1_writebuffer (dev, &init_tdr, TDR_ADDR, TDR_SIZE); + ether1_writebuffer (dev, &init_nop, NOP_ADDR, NOP_SIZE); + + if (ether1_readw(dev, CFG_ADDR, cfg_t, cfg_command, NORMALIRQS) != CMD_CONFIG) { + printk (KERN_ERR "%s: detected either RAM fault or compiler bug\n", + dev->name); + return 1; + } + + /* + * setup circularly linked list of { rfd, rbd, buffer }, with + * all rfds circularly linked, rbds circularly linked. + * First rfd is linked to scp, first rbd is linked to first + * rfd. Last rbd has a suspend command. + */ + addr = RX_AREA_START; + do { + next = addr + RFD_SIZE + RBD_SIZE + ETH_FRAME_LEN + 10; + next2 = next + RFD_SIZE + RBD_SIZE + ETH_FRAME_LEN + 10; + + if (next2 >= RX_AREA_END) { + next = RX_AREA_START; + init_rfd.rfd_command = RFD_CMDEL | RFD_CMDSUSPEND; + priv(dev)->rx_tail = addr; + } else + init_rfd.rfd_command = 0; + if (addr == RX_AREA_START) + init_rfd.rfd_rbdoffset = addr + RFD_SIZE; + else + init_rfd.rfd_rbdoffset = 0; + init_rfd.rfd_link = next; + init_rbd.rbd_link = next + RFD_SIZE; + init_rbd.rbd_bufl = addr + RFD_SIZE + RBD_SIZE; + + ether1_writebuffer (dev, &init_rfd, addr, RFD_SIZE); + ether1_writebuffer (dev, &init_rbd, addr + RFD_SIZE, RBD_SIZE); + addr = next; + } while (next2 < RX_AREA_END); + + priv(dev)->tx_link = NOP_ADDR; + priv(dev)->tx_head = NOP_ADDR + NOP_SIZE; + priv(dev)->tx_tail = TDR_ADDR; + priv(dev)->rx_head = RX_AREA_START; + + /* release reset & give 586 a prod */ + priv(dev)->resetting = 1; + priv(dev)->initialising = 1; + writeb(CTRL_RST, REG_CONTROL); + writeb(0, REG_CONTROL); + writeb(CTRL_CA, REG_CONTROL); + + /* 586 should now unset iscp.busy */ + timeout = jiffies + HZ/2; + while (ether1_readw(dev, ISCP_ADDR, iscp_t, iscp_busy, DISABLEIRQS) == 1) { + if (time_after(jiffies, timeout)) { + printk (KERN_WARNING "%s: can't initialise 82586: iscp is busy\n", dev->name); + return 1; + } + } + + /* check status of commands that we issued */ + timeout += HZ/10; + while (((status = ether1_readw(dev, CFG_ADDR, cfg_t, cfg_status, DISABLEIRQS)) + & STAT_COMPLETE) == 0) { + if (time_after(jiffies, timeout)) + break; + } + + if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) { + printk (KERN_WARNING "%s: can't initialise 82586: config status %04X\n", dev->name, status); + printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name, + ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS), + ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS), + ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS), + ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS)); + failures += 1; + } + + timeout += HZ/10; + while (((status = ether1_readw(dev, SA_ADDR, sa_t, sa_status, DISABLEIRQS)) + & STAT_COMPLETE) == 0) { + if (time_after(jiffies, timeout)) + break; + } + + if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) { + printk (KERN_WARNING "%s: can't initialise 82586: set address status %04X\n", dev->name, status); + printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name, + ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS), + ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS), + ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS), + ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS)); + failures += 1; + } + + timeout += HZ/10; + while (((status = ether1_readw(dev, MC_ADDR, mc_t, mc_status, DISABLEIRQS)) + & STAT_COMPLETE) == 0) { + if (time_after(jiffies, timeout)) + break; + } + + if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) { + printk (KERN_WARNING "%s: can't initialise 82586: set multicast status %04X\n", dev->name, status); + printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name, + ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS), + ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS), + ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS), + ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS)); + failures += 1; + } + + timeout += HZ; + while (((status = ether1_readw(dev, TDR_ADDR, tdr_t, tdr_status, DISABLEIRQS)) + & STAT_COMPLETE) == 0) { + if (time_after(jiffies, timeout)) + break; + } + + if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) { + printk (KERN_WARNING "%s: can't tdr (ignored)\n", dev->name); + printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name, + ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS), + ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS), + ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS), + ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS)); + } else { + status = ether1_readw(dev, TDR_ADDR, tdr_t, tdr_result, DISABLEIRQS); + if (status & TDR_XCVRPROB) + printk (KERN_WARNING "%s: i/f failed tdr: transceiver problem\n", dev->name); + else if ((status & (TDR_SHORT|TDR_OPEN)) && (status & TDR_TIME)) { +#ifdef FANCY + printk (KERN_WARNING "%s: i/f failed tdr: cable %s %d.%d us away\n", dev->name, + status & TDR_SHORT ? "short" : "open", (status & TDR_TIME) / 10, + (status & TDR_TIME) % 10); +#else + printk (KERN_WARNING "%s: i/f failed tdr: cable %s %d clks away\n", dev->name, + status & TDR_SHORT ? "short" : "open", (status & TDR_TIME)); +#endif + } + } + + if (failures) + ether1_reset (dev); + return failures ? 1 : 0; +} + +/* ------------------------------------------------------------------------- */ + +static int +ether1_txalloc (struct net_device *dev, int size) +{ + int start, tail; + + size = (size + 1) & ~1; + tail = priv(dev)->tx_tail; + + if (priv(dev)->tx_head + size > TX_AREA_END) { + if (tail > priv(dev)->tx_head) + return -1; + start = TX_AREA_START; + if (start + size > tail) + return -1; + priv(dev)->tx_head = start + size; + } else { + if (priv(dev)->tx_head < tail && (priv(dev)->tx_head + size) > tail) + return -1; + start = priv(dev)->tx_head; + priv(dev)->tx_head += size; + } + + return start; +} + +static int +ether1_open (struct net_device *dev) +{ + if (!is_valid_ether_addr(dev->dev_addr)) { + printk(KERN_WARNING "%s: invalid ethernet MAC address\n", + dev->name); + return -EINVAL; + } + + if (request_irq(dev->irq, ether1_interrupt, 0, "ether1", dev)) + return -EAGAIN; + + if (ether1_init_for_open (dev)) { + free_irq (dev->irq, dev); + return -EAGAIN; + } + + netif_start_queue(dev); + + return 0; +} + +static void +ether1_timeout(struct net_device *dev) +{ + printk(KERN_WARNING "%s: transmit timeout, network cable problem?\n", + dev->name); + printk(KERN_WARNING "%s: resetting device\n", dev->name); + + ether1_reset (dev); + + if (ether1_init_for_open (dev)) + printk (KERN_ERR "%s: unable to restart interface\n", dev->name); + + dev->stats.tx_errors++; + netif_wake_queue(dev); +} + +static int +ether1_sendpacket (struct sk_buff *skb, struct net_device *dev) +{ + int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr; + unsigned long flags; + tx_t tx; + tbd_t tbd; + nop_t nop; + + if (priv(dev)->restart) { + printk(KERN_WARNING "%s: resetting device\n", dev->name); + + ether1_reset(dev); + + if (ether1_init_for_open(dev)) + printk(KERN_ERR "%s: unable to restart interface\n", dev->name); + else + priv(dev)->restart = 0; + } + + if (skb->len < ETH_ZLEN) { + if (skb_padto(skb, ETH_ZLEN)) + goto out; + } + + /* + * insert packet followed by a nop + */ + txaddr = ether1_txalloc (dev, TX_SIZE); + tbdaddr = ether1_txalloc (dev, TBD_SIZE); + dataddr = ether1_txalloc (dev, skb->len); + nopaddr = ether1_txalloc (dev, NOP_SIZE); + + tx.tx_status = 0; + tx.tx_command = CMD_TX | CMD_INTR; + tx.tx_link = nopaddr; + tx.tx_tbdoffset = tbdaddr; + tbd.tbd_opts = TBD_EOL | skb->len; + tbd.tbd_link = I82586_NULL; + tbd.tbd_bufl = dataddr; + tbd.tbd_bufh = 0; + nop.nop_status = 0; + nop.nop_command = CMD_NOP; + nop.nop_link = nopaddr; + + local_irq_save(flags); + ether1_writebuffer (dev, &tx, txaddr, TX_SIZE); + ether1_writebuffer (dev, &tbd, tbdaddr, TBD_SIZE); + ether1_writebuffer (dev, skb->data, dataddr, skb->len); + ether1_writebuffer (dev, &nop, nopaddr, NOP_SIZE); + tmp = priv(dev)->tx_link; + priv(dev)->tx_link = nopaddr; + + /* now reset the previous nop pointer */ + ether1_writew(dev, txaddr, tmp, nop_t, nop_link, NORMALIRQS); + + local_irq_restore(flags); + + /* handle transmit */ + + /* check to see if we have room for a full sized ether frame */ + tmp = priv(dev)->tx_head; + tst = ether1_txalloc (dev, TX_SIZE + TBD_SIZE + NOP_SIZE + ETH_FRAME_LEN); + priv(dev)->tx_head = tmp; + dev_kfree_skb (skb); + + if (tst == -1) + netif_stop_queue(dev); + + out: + return NETDEV_TX_OK; +} + +static void +ether1_xmit_done (struct net_device *dev) +{ + nop_t nop; + int caddr, tst; + + caddr = priv(dev)->tx_tail; + +again: + ether1_readbuffer (dev, &nop, caddr, NOP_SIZE); + + switch (nop.nop_command & CMD_MASK) { + case CMD_TDR: + /* special case */ + if (ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS) + != (unsigned short)I82586_NULL) { + ether1_writew(dev, SCB_CMDCUCSTART | SCB_CMDRXSTART, SCB_ADDR, scb_t, + scb_command, NORMALIRQS); + writeb(CTRL_CA, REG_CONTROL); + } + priv(dev)->tx_tail = NOP_ADDR; + return; + + case CMD_NOP: + if (nop.nop_link == caddr) { + if (priv(dev)->initialising == 0) + printk (KERN_WARNING "%s: strange command complete with no tx command!\n", dev->name); + else + priv(dev)->initialising = 0; + return; + } + if (caddr == nop.nop_link) + return; + caddr = nop.nop_link; + goto again; + + case CMD_TX: + if (nop.nop_status & STAT_COMPLETE) + break; + printk (KERN_ERR "%s: strange command complete without completed command\n", dev->name); + priv(dev)->restart = 1; + return; + + default: + printk (KERN_WARNING "%s: strange command %d complete! (offset %04X)", dev->name, + nop.nop_command & CMD_MASK, caddr); + priv(dev)->restart = 1; + return; + } + + while (nop.nop_status & STAT_COMPLETE) { + if (nop.nop_status & STAT_OK) { + dev->stats.tx_packets++; + dev->stats.collisions += (nop.nop_status & STAT_COLLISIONS); + } else { + dev->stats.tx_errors++; + + if (nop.nop_status & STAT_COLLAFTERTX) + dev->stats.collisions++; + if (nop.nop_status & STAT_NOCARRIER) + dev->stats.tx_carrier_errors++; + if (nop.nop_status & STAT_TXLOSTCTS) + printk (KERN_WARNING "%s: cts lost\n", dev->name); + if (nop.nop_status & STAT_TXSLOWDMA) + dev->stats.tx_fifo_errors++; + if (nop.nop_status & STAT_COLLEXCESSIVE) + dev->stats.collisions += 16; + } + + if (nop.nop_link == caddr) { + printk (KERN_ERR "%s: tx buffer chaining error: tx command points to itself\n", dev->name); + break; + } + + caddr = nop.nop_link; + ether1_readbuffer (dev, &nop, caddr, NOP_SIZE); + if ((nop.nop_command & CMD_MASK) != CMD_NOP) { + printk (KERN_ERR "%s: tx buffer chaining error: no nop after tx command\n", dev->name); + break; + } + + if (caddr == nop.nop_link) + break; + + caddr = nop.nop_link; + ether1_readbuffer (dev, &nop, caddr, NOP_SIZE); + if ((nop.nop_command & CMD_MASK) != CMD_TX) { + printk (KERN_ERR "%s: tx buffer chaining error: no tx command after nop\n", dev->name); + break; + } + } + priv(dev)->tx_tail = caddr; + + caddr = priv(dev)->tx_head; + tst = ether1_txalloc (dev, TX_SIZE + TBD_SIZE + NOP_SIZE + ETH_FRAME_LEN); + priv(dev)->tx_head = caddr; + if (tst != -1) + netif_wake_queue(dev); +} + +static void +ether1_recv_done (struct net_device *dev) +{ + int status; + int nexttail, rbdaddr; + rbd_t rbd; + + do { + status = ether1_readw(dev, priv(dev)->rx_head, rfd_t, rfd_status, NORMALIRQS); + if ((status & RFD_COMPLETE) == 0) + break; + + rbdaddr = ether1_readw(dev, priv(dev)->rx_head, rfd_t, rfd_rbdoffset, NORMALIRQS); + ether1_readbuffer (dev, &rbd, rbdaddr, RBD_SIZE); + + if ((rbd.rbd_status & (RBD_EOF | RBD_ACNTVALID)) == (RBD_EOF | RBD_ACNTVALID)) { + int length = rbd.rbd_status & RBD_ACNT; + struct sk_buff *skb; + + length = (length + 1) & ~1; + skb = dev_alloc_skb (length + 2); + + if (skb) { + skb_reserve (skb, 2); + + ether1_readbuffer (dev, skb_put (skb, length), rbd.rbd_bufl, length); + + skb->protocol = eth_type_trans (skb, dev); + netif_rx (skb); + dev->stats.rx_packets++; + } else + dev->stats.rx_dropped++; + } else { + printk(KERN_WARNING "%s: %s\n", dev->name, + (rbd.rbd_status & RBD_EOF) ? "oversized packet" : "acnt not valid"); + dev->stats.rx_dropped++; + } + + nexttail = ether1_readw(dev, priv(dev)->rx_tail, rfd_t, rfd_link, NORMALIRQS); + /* nexttail should be rx_head */ + if (nexttail != priv(dev)->rx_head) + printk(KERN_ERR "%s: receiver buffer chaining error (%04X != %04X)\n", + dev->name, nexttail, priv(dev)->rx_head); + ether1_writew(dev, RFD_CMDEL | RFD_CMDSUSPEND, nexttail, rfd_t, rfd_command, NORMALIRQS); + ether1_writew(dev, 0, priv(dev)->rx_tail, rfd_t, rfd_command, NORMALIRQS); + ether1_writew(dev, 0, priv(dev)->rx_tail, rfd_t, rfd_status, NORMALIRQS); + ether1_writew(dev, 0, priv(dev)->rx_tail, rfd_t, rfd_rbdoffset, NORMALIRQS); + + priv(dev)->rx_tail = nexttail; + priv(dev)->rx_head = ether1_readw(dev, priv(dev)->rx_head, rfd_t, rfd_link, NORMALIRQS); + } while (1); +} + +static irqreturn_t +ether1_interrupt (int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + int status; + + status = ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS); + + if (status) { + ether1_writew(dev, status & (SCB_STRNR | SCB_STCNA | SCB_STFR | SCB_STCX), + SCB_ADDR, scb_t, scb_command, NORMALIRQS); + writeb(CTRL_CA | CTRL_ACK, REG_CONTROL); + if (status & SCB_STCX) { + ether1_xmit_done (dev); + } + if (status & SCB_STCNA) { + if (priv(dev)->resetting == 0) + printk (KERN_WARNING "%s: CU went not ready ???\n", dev->name); + else + priv(dev)->resetting += 1; + if (ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS) + != (unsigned short)I82586_NULL) { + ether1_writew(dev, SCB_CMDCUCSTART, SCB_ADDR, scb_t, scb_command, NORMALIRQS); + writeb(CTRL_CA, REG_CONTROL); + } + if (priv(dev)->resetting == 2) + priv(dev)->resetting = 0; + } + if (status & SCB_STFR) { + ether1_recv_done (dev); + } + if (status & SCB_STRNR) { + if (ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS) & SCB_STRXSUSP) { + printk (KERN_WARNING "%s: RU went not ready: RU suspended\n", dev->name); + ether1_writew(dev, SCB_CMDRXRESUME, SCB_ADDR, scb_t, scb_command, NORMALIRQS); + writeb(CTRL_CA, REG_CONTROL); + dev->stats.rx_dropped++; /* we suspended due to lack of buffer space */ + } else + printk(KERN_WARNING "%s: RU went not ready: %04X\n", dev->name, + ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS)); + printk (KERN_WARNING "RU ptr = %04X\n", ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, + NORMALIRQS)); + } + } else + writeb(CTRL_ACK, REG_CONTROL); + + return IRQ_HANDLED; +} + +static int +ether1_close (struct net_device *dev) +{ + ether1_reset (dev); + + free_irq(dev->irq, dev); + + return 0; +} + +/* + * Set or clear the multicast filter for this adaptor. + * num_addrs == -1 Promiscuous mode, receive all packets. + * num_addrs == 0 Normal mode, clear multicast list. + * num_addrs > 0 Multicast mode, receive normal and MC packets, and do + * best-effort filtering. + */ +static void +ether1_setmulticastlist (struct net_device *dev) +{ +} + +/* ------------------------------------------------------------------------- */ + +static void __devinit ether1_banner(void) +{ + static unsigned int version_printed = 0; + + if (net_debug && version_printed++ == 0) + printk(KERN_INFO "%s", version); +} + +static const struct net_device_ops ether1_netdev_ops = { + .ndo_open = ether1_open, + .ndo_stop = ether1_close, + .ndo_start_xmit = ether1_sendpacket, + .ndo_set_multicast_list = ether1_setmulticastlist, + .ndo_tx_timeout = ether1_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +}; + +static int __devinit +ether1_probe(struct expansion_card *ec, const struct ecard_id *id) +{ + struct net_device *dev; + int i, ret = 0; + + ether1_banner(); + + ret = ecard_request_resources(ec); + if (ret) + goto out; + + dev = alloc_etherdev(sizeof(struct ether1_priv)); + if (!dev) { + ret = -ENOMEM; + goto release; + } + + SET_NETDEV_DEV(dev, &ec->dev); + + dev->irq = ec->irq; + priv(dev)->base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); + if (!priv(dev)->base) { + ret = -ENOMEM; + goto free; + } + + if ((priv(dev)->bus_type = ether1_reset(dev)) == 0) { + ret = -ENODEV; + goto free; + } + + for (i = 0; i < 6; i++) + dev->dev_addr[i] = readb(IDPROM_ADDRESS + (i << 2)); + + if (ether1_init_2(dev)) { + ret = -ENODEV; + goto free; + } + + dev->netdev_ops = ðer1_netdev_ops; + dev->watchdog_timeo = 5 * HZ / 100; + + ret = register_netdev(dev); + if (ret) + goto free; + + printk(KERN_INFO "%s: ether1 in slot %d, %pM\n", + dev->name, ec->slot_no, dev->dev_addr); + + ecard_set_drvdata(ec, dev); + return 0; + + free: + free_netdev(dev); + release: + ecard_release_resources(ec); + out: + return ret; +} + +static void __devexit ether1_remove(struct expansion_card *ec) +{ + struct net_device *dev = ecard_get_drvdata(ec); + + ecard_set_drvdata(ec, NULL); + + unregister_netdev(dev); + free_netdev(dev); + ecard_release_resources(ec); +} + +static const struct ecard_id ether1_ids[] = { + { MANU_ACORN, PROD_ACORN_ETHER1 }, + { 0xffff, 0xffff } +}; + +static struct ecard_driver ether1_driver = { + .probe = ether1_probe, + .remove = __devexit_p(ether1_remove), + .id_table = ether1_ids, + .drv = { + .name = "ether1", + }, +}; + +static int __init ether1_init(void) +{ + return ecard_register_driver(ðer1_driver); +} + +static void __exit ether1_exit(void) +{ + ecard_remove_driver(ðer1_driver); +} + +module_init(ether1_init); +module_exit(ether1_exit); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/i825xx/ether1.h b/drivers/net/ethernet/i825xx/ether1.h new file mode 100644 index 0000000..3a5830a --- /dev/null +++ b/drivers/net/ethernet/i825xx/ether1.h @@ -0,0 +1,280 @@ +/* + * linux/drivers/acorn/net/ether1.h + * + * Copyright (C) 1996 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Network driver for Acorn Ether1 cards. + */ + +#ifndef _LINUX_ether1_H +#define _LINUX_ether1_H + +#ifdef __ETHER1_C +/* use 0 for production, 1 for verification, >2 for debug */ +#ifndef NET_DEBUG +#define NET_DEBUG 0 +#endif + +#define priv(dev) ((struct ether1_priv *)netdev_priv(dev)) + +/* Page register */ +#define REG_PAGE (priv(dev)->base + 0x0000) + +/* Control register */ +#define REG_CONTROL (priv(dev)->base + 0x0004) +#define CTRL_RST 0x01 +#define CTRL_LOOPBACK 0x02 +#define CTRL_CA 0x04 +#define CTRL_ACK 0x08 + +#define ETHER1_RAM (priv(dev)->base + 0x2000) + +/* HW address */ +#define IDPROM_ADDRESS (priv(dev)->base + 0x0024) + +struct ether1_priv { + void __iomem *base; + unsigned int tx_link; + unsigned int tx_head; + volatile unsigned int tx_tail; + volatile unsigned int rx_head; + volatile unsigned int rx_tail; + unsigned char bus_type; + unsigned char resetting; + unsigned char initialising : 1; + unsigned char restart : 1; +}; + +#define I82586_NULL (-1) + +typedef struct { /* tdr */ + unsigned short tdr_status; + unsigned short tdr_command; + unsigned short tdr_link; + unsigned short tdr_result; +#define TDR_TIME (0x7ff) +#define TDR_SHORT (1 << 12) +#define TDR_OPEN (1 << 13) +#define TDR_XCVRPROB (1 << 14) +#define TDR_LNKOK (1 << 15) +} tdr_t; + +typedef struct { /* transmit */ + unsigned short tx_status; + unsigned short tx_command; + unsigned short tx_link; + unsigned short tx_tbdoffset; +} tx_t; + +typedef struct { /* tbd */ + unsigned short tbd_opts; +#define TBD_CNT (0x3fff) +#define TBD_EOL (1 << 15) + unsigned short tbd_link; + unsigned short tbd_bufl; + unsigned short tbd_bufh; +} tbd_t; + +typedef struct { /* rfd */ + unsigned short rfd_status; +#define RFD_NOEOF (1 << 6) +#define RFD_FRAMESHORT (1 << 7) +#define RFD_DMAOVRN (1 << 8) +#define RFD_NORESOURCES (1 << 9) +#define RFD_ALIGNERROR (1 << 10) +#define RFD_CRCERROR (1 << 11) +#define RFD_OK (1 << 13) +#define RFD_FDCONSUMED (1 << 14) +#define RFD_COMPLETE (1 << 15) + unsigned short rfd_command; +#define RFD_CMDSUSPEND (1 << 14) +#define RFD_CMDEL (1 << 15) + unsigned short rfd_link; + unsigned short rfd_rbdoffset; + unsigned char rfd_dest[6]; + unsigned char rfd_src[6]; + unsigned short rfd_len; +} rfd_t; + +typedef struct { /* rbd */ + unsigned short rbd_status; +#define RBD_ACNT (0x3fff) +#define RBD_ACNTVALID (1 << 14) +#define RBD_EOF (1 << 15) + unsigned short rbd_link; + unsigned short rbd_bufl; + unsigned short rbd_bufh; + unsigned short rbd_len; +} rbd_t; + +typedef struct { /* nop */ + unsigned short nop_status; + unsigned short nop_command; + unsigned short nop_link; +} nop_t; + +typedef struct { /* set multicast */ + unsigned short mc_status; + unsigned short mc_command; + unsigned short mc_link; + unsigned short mc_cnt; + unsigned char mc_addrs[1][6]; +} mc_t; + +typedef struct { /* set address */ + unsigned short sa_status; + unsigned short sa_command; + unsigned short sa_link; + unsigned char sa_addr[6]; +} sa_t; + +typedef struct { /* config command */ + unsigned short cfg_status; + unsigned short cfg_command; + unsigned short cfg_link; + unsigned char cfg_bytecnt; /* size foll data: 4 - 12 */ + unsigned char cfg_fifolim; /* FIFO threshold */ + unsigned char cfg_byte8; +#define CFG8_SRDY (1 << 6) +#define CFG8_SAVEBADF (1 << 7) + unsigned char cfg_byte9; +#define CFG9_ADDRLEN(x) (x) +#define CFG9_ADDRLENBUF (1 << 3) +#define CFG9_PREAMB2 (0 << 4) +#define CFG9_PREAMB4 (1 << 4) +#define CFG9_PREAMB8 (2 << 4) +#define CFG9_PREAMB16 (3 << 4) +#define CFG9_ILOOPBACK (1 << 6) +#define CFG9_ELOOPBACK (1 << 7) + unsigned char cfg_byte10; +#define CFG10_LINPRI(x) (x) +#define CFG10_ACR(x) (x << 4) +#define CFG10_BOFMET (1 << 7) + unsigned char cfg_ifs; + unsigned char cfg_slotl; + unsigned char cfg_byte13; +#define CFG13_SLOTH(x) (x) +#define CFG13_RETRY(x) (x << 4) + unsigned char cfg_byte14; +#define CFG14_PROMISC (1 << 0) +#define CFG14_DISBRD (1 << 1) +#define CFG14_MANCH (1 << 2) +#define CFG14_TNCRS (1 << 3) +#define CFG14_NOCRC (1 << 4) +#define CFG14_CRC16 (1 << 5) +#define CFG14_BTSTF (1 << 6) +#define CFG14_FLGPAD (1 << 7) + unsigned char cfg_byte15; +#define CFG15_CSTF(x) (x) +#define CFG15_ICSS (1 << 3) +#define CFG15_CDTF(x) (x << 4) +#define CFG15_ICDS (1 << 7) + unsigned short cfg_minfrmlen; +} cfg_t; + +typedef struct { /* scb */ + unsigned short scb_status; /* status of 82586 */ +#define SCB_STRXMASK (7 << 4) /* Receive unit status */ +#define SCB_STRXIDLE (0 << 4) /* Idle */ +#define SCB_STRXSUSP (1 << 4) /* Suspended */ +#define SCB_STRXNRES (2 << 4) /* No resources */ +#define SCB_STRXRDY (4 << 4) /* Ready */ +#define SCB_STCUMASK (7 << 8) /* Command unit status */ +#define SCB_STCUIDLE (0 << 8) /* Idle */ +#define SCB_STCUSUSP (1 << 8) /* Suspended */ +#define SCB_STCUACTV (2 << 8) /* Active */ +#define SCB_STRNR (1 << 12) /* Receive unit not ready */ +#define SCB_STCNA (1 << 13) /* Command unit not ready */ +#define SCB_STFR (1 << 14) /* Frame received */ +#define SCB_STCX (1 << 15) /* Command completed */ + unsigned short scb_command; /* Next command */ +#define SCB_CMDRXSTART (1 << 4) /* Start (at rfa_offset) */ +#define SCB_CMDRXRESUME (2 << 4) /* Resume reception */ +#define SCB_CMDRXSUSPEND (3 << 4) /* Suspend reception */ +#define SCB_CMDRXABORT (4 << 4) /* Abort reception */ +#define SCB_CMDCUCSTART (1 << 8) /* Start (at cbl_offset) */ +#define SCB_CMDCUCRESUME (2 << 8) /* Resume execution */ +#define SCB_CMDCUCSUSPEND (3 << 8) /* Suspend execution */ +#define SCB_CMDCUCABORT (4 << 8) /* Abort execution */ +#define SCB_CMDACKRNR (1 << 12) /* Ack RU not ready */ +#define SCB_CMDACKCNA (1 << 13) /* Ack CU not ready */ +#define SCB_CMDACKFR (1 << 14) /* Ack Frame received */ +#define SCB_CMDACKCX (1 << 15) /* Ack Command complete */ + unsigned short scb_cbl_offset; /* Offset of first command unit */ + unsigned short scb_rfa_offset; /* Offset of first receive frame area */ + unsigned short scb_crc_errors; /* Properly aligned frame with CRC error*/ + unsigned short scb_aln_errors; /* Misaligned frames */ + unsigned short scb_rsc_errors; /* Frames lost due to no space */ + unsigned short scb_ovn_errors; /* Frames lost due to slow bus */ +} scb_t; + +typedef struct { /* iscp */ + unsigned short iscp_busy; /* set by CPU before CA */ + unsigned short iscp_offset; /* offset of SCB */ + unsigned short iscp_basel; /* base of SCB */ + unsigned short iscp_baseh; +} iscp_t; + + /* this address must be 0xfff6 */ +typedef struct { /* scp */ + unsigned short scp_sysbus; /* bus size */ +#define SCP_SY_16BBUS 0x00 +#define SCP_SY_8BBUS 0x01 + unsigned short scp_junk[2]; /* junk */ + unsigned short scp_iscpl; /* lower 16 bits of iscp */ + unsigned short scp_iscph; /* upper 16 bits of iscp */ +} scp_t; + +/* commands */ +#define CMD_NOP 0 +#define CMD_SETADDRESS 1 +#define CMD_CONFIG 2 +#define CMD_SETMULTICAST 3 +#define CMD_TX 4 +#define CMD_TDR 5 +#define CMD_DUMP 6 +#define CMD_DIAGNOSE 7 + +#define CMD_MASK 7 + +#define CMD_INTR (1 << 13) +#define CMD_SUSP (1 << 14) +#define CMD_EOL (1 << 15) + +#define STAT_COLLISIONS (15) +#define STAT_COLLEXCESSIVE (1 << 5) +#define STAT_COLLAFTERTX (1 << 6) +#define STAT_TXDEFERRED (1 << 7) +#define STAT_TXSLOWDMA (1 << 8) +#define STAT_TXLOSTCTS (1 << 9) +#define STAT_NOCARRIER (1 << 10) +#define STAT_FAIL (1 << 11) +#define STAT_ABORTED (1 << 12) +#define STAT_OK (1 << 13) +#define STAT_BUSY (1 << 14) +#define STAT_COMPLETE (1 << 15) +#endif +#endif + +/* + * Ether1 card definitions: + * + * FAST accesses: + * +0 Page register + * 16 pages + * +4 Control + * '1' = reset + * '2' = loopback + * '4' = CA + * '8' = int ack + * + * RAM at address + 0x2000 + * Pod. Prod id = 3 + * Words after ID block [base + 8 words] + * +0 pcb issue (0x0c and 0xf3 invalid) + * +1 - +6 eth hw address + */ diff --git a/drivers/net/ethernet/i825xx/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c new file mode 100644 index 0000000..6eba352 --- /dev/null +++ b/drivers/net/ethernet/i825xx/lasi_82596.c @@ -0,0 +1,238 @@ +/* lasi_82596.c -- driver for the intel 82596 ethernet controller, as + munged into HPPA boxen . + + This driver is based upon 82596.c, original credits are below... + but there were too many hoops which HP wants jumped through to + keep this code in there in a sane manner. + + 3 primary sources of the mess -- + 1) hppa needs *lots* of cacheline flushing to keep this kind of + MMIO running. + + 2) The 82596 needs to see all of its pointers as their physical + address. Thus virt_to_bus/bus_to_virt are *everywhere*. + + 3) The implementation HP is using seems to be significantly pickier + about when and how the command and RX units are started. some + command ordering was changed. + + Examination of the mach driver leads one to believe that there + might be a saner way to pull this off... anyone who feels like a + full rewrite can be my guest. + + Split 02/13/2000 Sam Creasey (sammy@oh.verio.com) + + 02/01/2000 Initial modifications for parisc by Helge Deller (deller@gmx.de) + 03/02/2000 changes for better/correct(?) cache-flushing (deller) +*/ + +/* 82596.c: A generic 82596 ethernet driver for linux. */ +/* + Based on Apricot.c + Written 1994 by Mark Evans. + This driver is for the Apricot 82596 bus-master interface + + Modularised 12/94 Mark Evans + + + Modified to support the 82596 ethernet chips on 680x0 VME boards. + by Richard Hirst + Renamed to be 82596.c + + 980825: Changed to receive directly in to sk_buffs which are + allocated at open() time. Eliminates copy on incoming frames + (small ones are still copied). Shared data now held in a + non-cached page, so we can run on 68060 in copyback mode. + + TBD: + * look at deferring rx frames rather than discarding (as per tulip) + * handle tx ring full as per tulip + * performance test to tune rx_copybreak + + Most of my modifications relate to the braindead big-endian + implementation by Intel. When the i596 is operating in + 'big-endian' mode, it thinks a 32 bit value of 0x12345678 + should be stored as 0x56781234. This is a real pain, when + you have linked lists which are shared by the 680x0 and the + i596. + + Driver skeleton + Written 1993 by Donald Becker. + Copyright 1993 United States Government as represented by the Director, + National Security Agency. This software may only be used and distributed + according to the terms of the GNU General Public License as modified by SRC, + incorporated herein by reference. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403 + + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define LASI_82596_DRIVER_VERSION "LASI 82596 driver - Revision: 1.30" + +#define PA_I82596_RESET 0 /* Offsets relative to LASI-LAN-Addr.*/ +#define PA_CPU_PORT_L_ACCESS 4 +#define PA_CHANNEL_ATTENTION 8 + +#define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */ + +#define DMA_ALLOC dma_alloc_noncoherent +#define DMA_FREE dma_free_noncoherent +#define DMA_WBACK(ndev, addr, len) \ + do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_TO_DEVICE); } while (0) + +#define DMA_INV(ndev, addr, len) \ + do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_FROM_DEVICE); } while (0) + +#define DMA_WBACK_INV(ndev, addr, len) \ + do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_BIDIRECTIONAL); } while (0) + +#define SYSBUS 0x0000006c; + +/* big endian CPU, 82596 "big" endian mode */ +#define SWAP32(x) (((u32)(x)<<16) | ((((u32)(x)))>>16)) +#define SWAP16(x) (x) + +#include "lib82596.c" + +MODULE_AUTHOR("Richard Hirst"); +MODULE_DESCRIPTION("i82596 driver"); +MODULE_LICENSE("GPL"); +module_param(i596_debug, int, 0); +MODULE_PARM_DESC(i596_debug, "lasi_82596 debug mask"); + +static inline void ca(struct net_device *dev) +{ + gsc_writel(0, dev->base_addr + PA_CHANNEL_ATTENTION); +} + + +static void mpu_port(struct net_device *dev, int c, dma_addr_t x) +{ + struct i596_private *lp = netdev_priv(dev); + + u32 v = (u32) (c) | (u32) (x); + u16 a, b; + + if (lp->options & OPT_SWAP_PORT) { + a = v >> 16; + b = v & 0xffff; + } else { + a = v & 0xffff; + b = v >> 16; + } + + gsc_writel(a, dev->base_addr + PA_CPU_PORT_L_ACCESS); + udelay(1); + gsc_writel(b, dev->base_addr + PA_CPU_PORT_L_ACCESS); +} + +#define LAN_PROM_ADDR 0xF0810000 + +static int __devinit +lan_init_chip(struct parisc_device *dev) +{ + struct net_device *netdevice; + struct i596_private *lp; + int retval; + int i; + + if (!dev->irq) { + printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n", + __FILE__, (unsigned long)dev->hpa.start); + return -ENODEV; + } + + printk(KERN_INFO "Found i82596 at 0x%lx, IRQ %d\n", + (unsigned long)dev->hpa.start, dev->irq); + + netdevice = alloc_etherdev(sizeof(struct i596_private)); + if (!netdevice) + return -ENOMEM; + SET_NETDEV_DEV(netdevice, &dev->dev); + parisc_set_drvdata (dev, netdevice); + + netdevice->base_addr = dev->hpa.start; + netdevice->irq = dev->irq; + + if (pdc_lan_station_id(netdevice->dev_addr, netdevice->base_addr)) { + for (i = 0; i < 6; i++) { + netdevice->dev_addr[i] = gsc_readb(LAN_PROM_ADDR + i); + } + printk(KERN_INFO + "%s: MAC of HP700 LAN read from EEPROM\n", __FILE__); + } + + lp = netdev_priv(netdevice); + lp->options = dev->id.sversion == 0x72 ? OPT_SWAP_PORT : 0; + + retval = i82596_probe(netdevice); + if (retval) { + free_netdev(netdevice); + return -ENODEV; + } + return retval; +} + +static int __devexit lan_remove_chip (struct parisc_device *pdev) +{ + struct net_device *dev = parisc_get_drvdata(pdev); + struct i596_private *lp = netdev_priv(dev); + + unregister_netdev (dev); + DMA_FREE(&pdev->dev, sizeof(struct i596_private), + (void *)lp->dma, lp->dma_addr); + free_netdev (dev); + return 0; +} + +static struct parisc_device_id lan_tbl[] = { + { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008a }, + { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00072 }, + { 0, } +}; + +MODULE_DEVICE_TABLE(parisc, lan_tbl); + +static struct parisc_driver lan_driver = { + .name = "lasi_82596", + .id_table = lan_tbl, + .probe = lan_init_chip, + .remove = __devexit_p(lan_remove_chip), +}; + +static int __devinit lasi_82596_init(void) +{ + printk(KERN_INFO LASI_82596_DRIVER_VERSION "\n"); + return register_parisc_driver(&lan_driver); +} + +module_init(lasi_82596_init); + +static void __exit lasi_82596_exit(void) +{ + unregister_parisc_driver(&lan_driver); +} + +module_exit(lasi_82596_exit); diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c new file mode 100644 index 0000000..9e04289 --- /dev/null +++ b/drivers/net/ethernet/i825xx/lib82596.c @@ -0,0 +1,1412 @@ +/* lasi_82596.c -- driver for the intel 82596 ethernet controller, as + munged into HPPA boxen . + + This driver is based upon 82596.c, original credits are below... + but there were too many hoops which HP wants jumped through to + keep this code in there in a sane manner. + + 3 primary sources of the mess -- + 1) hppa needs *lots* of cacheline flushing to keep this kind of + MMIO running. + + 2) The 82596 needs to see all of its pointers as their physical + address. Thus virt_to_bus/bus_to_virt are *everywhere*. + + 3) The implementation HP is using seems to be significantly pickier + about when and how the command and RX units are started. some + command ordering was changed. + + Examination of the mach driver leads one to believe that there + might be a saner way to pull this off... anyone who feels like a + full rewrite can be my guest. + + Split 02/13/2000 Sam Creasey (sammy@oh.verio.com) + + 02/01/2000 Initial modifications for parisc by Helge Deller (deller@gmx.de) + 03/02/2000 changes for better/correct(?) cache-flushing (deller) +*/ + +/* 82596.c: A generic 82596 ethernet driver for linux. */ +/* + Based on Apricot.c + Written 1994 by Mark Evans. + This driver is for the Apricot 82596 bus-master interface + + Modularised 12/94 Mark Evans + + + Modified to support the 82596 ethernet chips on 680x0 VME boards. + by Richard Hirst + Renamed to be 82596.c + + 980825: Changed to receive directly in to sk_buffs which are + allocated at open() time. Eliminates copy on incoming frames + (small ones are still copied). Shared data now held in a + non-cached page, so we can run on 68060 in copyback mode. + + TBD: + * look at deferring rx frames rather than discarding (as per tulip) + * handle tx ring full as per tulip + * performance test to tune rx_copybreak + + Most of my modifications relate to the braindead big-endian + implementation by Intel. When the i596 is operating in + 'big-endian' mode, it thinks a 32 bit value of 0x12345678 + should be stored as 0x56781234. This is a real pain, when + you have linked lists which are shared by the 680x0 and the + i596. + + Driver skeleton + Written 1993 by Donald Becker. + Copyright 1993 United States Government as represented by the Director, + National Security Agency. This software may only be used and distributed + according to the terms of the GNU General Public License as modified by SRC, + incorporated herein by reference. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403 + + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* DEBUG flags + */ + +#define DEB_INIT 0x0001 +#define DEB_PROBE 0x0002 +#define DEB_SERIOUS 0x0004 +#define DEB_ERRORS 0x0008 +#define DEB_MULTI 0x0010 +#define DEB_TDR 0x0020 +#define DEB_OPEN 0x0040 +#define DEB_RESET 0x0080 +#define DEB_ADDCMD 0x0100 +#define DEB_STATUS 0x0200 +#define DEB_STARTTX 0x0400 +#define DEB_RXADDR 0x0800 +#define DEB_TXADDR 0x1000 +#define DEB_RXFRAME 0x2000 +#define DEB_INTS 0x4000 +#define DEB_STRUCT 0x8000 +#define DEB_ANY 0xffff + + +#define DEB(x, y) if (i596_debug & (x)) { y; } + + +/* + * The MPU_PORT command allows direct access to the 82596. With PORT access + * the following commands are available (p5-18). The 32-bit port command + * must be word-swapped with the most significant word written first. + * This only applies to VME boards. + */ +#define PORT_RESET 0x00 /* reset 82596 */ +#define PORT_SELFTEST 0x01 /* selftest */ +#define PORT_ALTSCP 0x02 /* alternate SCB address */ +#define PORT_ALTDUMP 0x03 /* Alternate DUMP address */ + +static int i596_debug = (DEB_SERIOUS|DEB_PROBE); + +/* Copy frames shorter than rx_copybreak, otherwise pass on up in + * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha). + */ +static int rx_copybreak = 100; + +#define PKT_BUF_SZ 1536 +#define MAX_MC_CNT 64 + +#define ISCP_BUSY 0x0001 + +#define I596_NULL ((u32)0xffffffff) + +#define CMD_EOL 0x8000 /* The last command of the list, stop. */ +#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */ +#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */ + +#define CMD_FLEX 0x0008 /* Enable flexible memory model */ + +enum commands { + CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3, + CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7 +}; + +#define STAT_C 0x8000 /* Set to 0 after execution */ +#define STAT_B 0x4000 /* Command being executed */ +#define STAT_OK 0x2000 /* Command executed ok */ +#define STAT_A 0x1000 /* Command aborted */ + +#define CUC_START 0x0100 +#define CUC_RESUME 0x0200 +#define CUC_SUSPEND 0x0300 +#define CUC_ABORT 0x0400 +#define RX_START 0x0010 +#define RX_RESUME 0x0020 +#define RX_SUSPEND 0x0030 +#define RX_ABORT 0x0040 + +#define TX_TIMEOUT (HZ/20) + + +struct i596_reg { + unsigned short porthi; + unsigned short portlo; + u32 ca; +}; + +#define EOF 0x8000 +#define SIZE_MASK 0x3fff + +struct i596_tbd { + unsigned short size; + unsigned short pad; + u32 next; + u32 data; + u32 cache_pad[5]; /* Total 32 bytes... */ +}; + +/* The command structure has two 'next' pointers; v_next is the address of + * the next command as seen by the CPU, b_next is the address of the next + * command as seen by the 82596. The b_next pointer, as used by the 82596 + * always references the status field of the next command, rather than the + * v_next field, because the 82596 is unaware of v_next. It may seem more + * logical to put v_next at the end of the structure, but we cannot do that + * because the 82596 expects other fields to be there, depending on command + * type. + */ + +struct i596_cmd { + struct i596_cmd *v_next; /* Address from CPUs viewpoint */ + unsigned short status; + unsigned short command; + u32 b_next; /* Address from i596 viewpoint */ +}; + +struct tx_cmd { + struct i596_cmd cmd; + u32 tbd; + unsigned short size; + unsigned short pad; + struct sk_buff *skb; /* So we can free it after tx */ + dma_addr_t dma_addr; +#ifdef __LP64__ + u32 cache_pad[6]; /* Total 64 bytes... */ +#else + u32 cache_pad[1]; /* Total 32 bytes... */ +#endif +}; + +struct tdr_cmd { + struct i596_cmd cmd; + unsigned short status; + unsigned short pad; +}; + +struct mc_cmd { + struct i596_cmd cmd; + short mc_cnt; + char mc_addrs[MAX_MC_CNT*6]; +}; + +struct sa_cmd { + struct i596_cmd cmd; + char eth_addr[8]; +}; + +struct cf_cmd { + struct i596_cmd cmd; + char i596_config[16]; +}; + +struct i596_rfd { + unsigned short stat; + unsigned short cmd; + u32 b_next; /* Address from i596 viewpoint */ + u32 rbd; + unsigned short count; + unsigned short size; + struct i596_rfd *v_next; /* Address from CPUs viewpoint */ + struct i596_rfd *v_prev; +#ifndef __LP64__ + u32 cache_pad[2]; /* Total 32 bytes... */ +#endif +}; + +struct i596_rbd { + /* hardware data */ + unsigned short count; + unsigned short zero1; + u32 b_next; + u32 b_data; /* Address from i596 viewpoint */ + unsigned short size; + unsigned short zero2; + /* driver data */ + struct sk_buff *skb; + struct i596_rbd *v_next; + u32 b_addr; /* This rbd addr from i596 view */ + unsigned char *v_data; /* Address from CPUs viewpoint */ + /* Total 32 bytes... */ +#ifdef __LP64__ + u32 cache_pad[4]; +#endif +}; + +/* These values as chosen so struct i596_dma fits in one page... */ + +#define TX_RING_SIZE 32 +#define RX_RING_SIZE 16 + +struct i596_scb { + unsigned short status; + unsigned short command; + u32 cmd; + u32 rfd; + u32 crc_err; + u32 align_err; + u32 resource_err; + u32 over_err; + u32 rcvdt_err; + u32 short_err; + unsigned short t_on; + unsigned short t_off; +}; + +struct i596_iscp { + u32 stat; + u32 scb; +}; + +struct i596_scp { + u32 sysbus; + u32 pad; + u32 iscp; +}; + +struct i596_dma { + struct i596_scp scp __attribute__((aligned(32))); + volatile struct i596_iscp iscp __attribute__((aligned(32))); + volatile struct i596_scb scb __attribute__((aligned(32))); + struct sa_cmd sa_cmd __attribute__((aligned(32))); + struct cf_cmd cf_cmd __attribute__((aligned(32))); + struct tdr_cmd tdr_cmd __attribute__((aligned(32))); + struct mc_cmd mc_cmd __attribute__((aligned(32))); + struct i596_rfd rfds[RX_RING_SIZE] __attribute__((aligned(32))); + struct i596_rbd rbds[RX_RING_SIZE] __attribute__((aligned(32))); + struct tx_cmd tx_cmds[TX_RING_SIZE] __attribute__((aligned(32))); + struct i596_tbd tbds[TX_RING_SIZE] __attribute__((aligned(32))); +}; + +struct i596_private { + struct i596_dma *dma; + u32 stat; + int last_restart; + struct i596_rfd *rfd_head; + struct i596_rbd *rbd_head; + struct i596_cmd *cmd_tail; + struct i596_cmd *cmd_head; + int cmd_backlog; + u32 last_cmd; + int next_tx_cmd; + int options; + spinlock_t lock; /* serialize access to chip */ + dma_addr_t dma_addr; + void __iomem *mpu_port; + void __iomem *ca; +}; + +static const char init_setup[] = +{ + 0x8E, /* length, prefetch on */ + 0xC8, /* fifo to 8, monitor off */ + 0x80, /* don't save bad frames */ + 0x2E, /* No source address insertion, 8 byte preamble */ + 0x00, /* priority and backoff defaults */ + 0x60, /* interframe spacing */ + 0x00, /* slot time LSB */ + 0xf2, /* slot time and retries */ + 0x00, /* promiscuous mode */ + 0x00, /* collision detect */ + 0x40, /* minimum frame length */ + 0xff, + 0x00, + 0x7f /* *multi IA */ }; + +static int i596_open(struct net_device *dev); +static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev); +static irqreturn_t i596_interrupt(int irq, void *dev_id); +static int i596_close(struct net_device *dev); +static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); +static void i596_tx_timeout (struct net_device *dev); +static void print_eth(unsigned char *buf, char *str); +static void set_multicast_list(struct net_device *dev); +static inline void ca(struct net_device *dev); +static void mpu_port(struct net_device *dev, int c, dma_addr_t x); + +static int rx_ring_size = RX_RING_SIZE; +static int ticks_limit = 100; +static int max_cmd_backlog = TX_RING_SIZE-1; + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void i596_poll_controller(struct net_device *dev); +#endif + + +static inline int wait_istat(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str) +{ + DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp)); + while (--delcnt && dma->iscp.stat) { + udelay(10); + DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp)); + } + if (!delcnt) { + printk(KERN_ERR "%s: %s, iscp.stat %04x, didn't clear\n", + dev->name, str, SWAP16(dma->iscp.stat)); + return -1; + } else + return 0; +} + + +static inline int wait_cmd(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str) +{ + DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb)); + while (--delcnt && dma->scb.command) { + udelay(10); + DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb)); + } + if (!delcnt) { + printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n", + dev->name, str, + SWAP16(dma->scb.status), + SWAP16(dma->scb.command)); + return -1; + } else + return 0; +} + + +static void i596_display_data(struct net_device *dev) +{ + struct i596_private *lp = netdev_priv(dev); + struct i596_dma *dma = lp->dma; + struct i596_cmd *cmd; + struct i596_rfd *rfd; + struct i596_rbd *rbd; + + printk(KERN_DEBUG "lp and scp at %p, .sysbus = %08x, .iscp = %08x\n", + &dma->scp, dma->scp.sysbus, SWAP32(dma->scp.iscp)); + printk(KERN_DEBUG "iscp at %p, iscp.stat = %08x, .scb = %08x\n", + &dma->iscp, SWAP32(dma->iscp.stat), SWAP32(dma->iscp.scb)); + printk(KERN_DEBUG "scb at %p, scb.status = %04x, .command = %04x," + " .cmd = %08x, .rfd = %08x\n", + &dma->scb, SWAP16(dma->scb.status), SWAP16(dma->scb.command), + SWAP16(dma->scb.cmd), SWAP32(dma->scb.rfd)); + printk(KERN_DEBUG " errors: crc %x, align %x, resource %x," + " over %x, rcvdt %x, short %x\n", + SWAP32(dma->scb.crc_err), SWAP32(dma->scb.align_err), + SWAP32(dma->scb.resource_err), SWAP32(dma->scb.over_err), + SWAP32(dma->scb.rcvdt_err), SWAP32(dma->scb.short_err)); + cmd = lp->cmd_head; + while (cmd != NULL) { + printk(KERN_DEBUG + "cmd at %p, .status = %04x, .command = %04x," + " .b_next = %08x\n", + cmd, SWAP16(cmd->status), SWAP16(cmd->command), + SWAP32(cmd->b_next)); + cmd = cmd->v_next; + } + rfd = lp->rfd_head; + printk(KERN_DEBUG "rfd_head = %p\n", rfd); + do { + printk(KERN_DEBUG + " %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x," + " count %04x\n", + rfd, SWAP16(rfd->stat), SWAP16(rfd->cmd), + SWAP32(rfd->b_next), SWAP32(rfd->rbd), + SWAP16(rfd->count)); + rfd = rfd->v_next; + } while (rfd != lp->rfd_head); + rbd = lp->rbd_head; + printk(KERN_DEBUG "rbd_head = %p\n", rbd); + do { + printk(KERN_DEBUG + " %p .count %04x, b_next %08x, b_data %08x," + " size %04x\n", + rbd, SWAP16(rbd->count), SWAP32(rbd->b_next), + SWAP32(rbd->b_data), SWAP16(rbd->size)); + rbd = rbd->v_next; + } while (rbd != lp->rbd_head); + DMA_INV(dev, dma, sizeof(struct i596_dma)); +} + + +#define virt_to_dma(lp, v) ((lp)->dma_addr + (dma_addr_t)((unsigned long)(v)-(unsigned long)((lp)->dma))) + +static inline int init_rx_bufs(struct net_device *dev) +{ + struct i596_private *lp = netdev_priv(dev); + struct i596_dma *dma = lp->dma; + int i; + struct i596_rfd *rfd; + struct i596_rbd *rbd; + + /* First build the Receive Buffer Descriptor List */ + + for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) { + dma_addr_t dma_addr; + struct sk_buff *skb; + + skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ); + if (skb == NULL) + return -1; + dma_addr = dma_map_single(dev->dev.parent, skb->data, + PKT_BUF_SZ, DMA_FROM_DEVICE); + rbd->v_next = rbd+1; + rbd->b_next = SWAP32(virt_to_dma(lp, rbd+1)); + rbd->b_addr = SWAP32(virt_to_dma(lp, rbd)); + rbd->skb = skb; + rbd->v_data = skb->data; + rbd->b_data = SWAP32(dma_addr); + rbd->size = SWAP16(PKT_BUF_SZ); + } + lp->rbd_head = dma->rbds; + rbd = dma->rbds + rx_ring_size - 1; + rbd->v_next = dma->rbds; + rbd->b_next = SWAP32(virt_to_dma(lp, dma->rbds)); + + /* Now build the Receive Frame Descriptor List */ + + for (i = 0, rfd = dma->rfds; i < rx_ring_size; i++, rfd++) { + rfd->rbd = I596_NULL; + rfd->v_next = rfd+1; + rfd->v_prev = rfd-1; + rfd->b_next = SWAP32(virt_to_dma(lp, rfd+1)); + rfd->cmd = SWAP16(CMD_FLEX); + } + lp->rfd_head = dma->rfds; + dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds)); + rfd = dma->rfds; + rfd->rbd = SWAP32(virt_to_dma(lp, lp->rbd_head)); + rfd->v_prev = dma->rfds + rx_ring_size - 1; + rfd = dma->rfds + rx_ring_size - 1; + rfd->v_next = dma->rfds; + rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds)); + rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX); + + DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma)); + return 0; +} + +static inline void remove_rx_bufs(struct net_device *dev) +{ + struct i596_private *lp = netdev_priv(dev); + struct i596_rbd *rbd; + int i; + + for (i = 0, rbd = lp->dma->rbds; i < rx_ring_size; i++, rbd++) { + if (rbd->skb == NULL) + break; + dma_unmap_single(dev->dev.parent, + (dma_addr_t)SWAP32(rbd->b_data), + PKT_BUF_SZ, DMA_FROM_DEVICE); + dev_kfree_skb(rbd->skb); + } +} + + +static void rebuild_rx_bufs(struct net_device *dev) +{ + struct i596_private *lp = netdev_priv(dev); + struct i596_dma *dma = lp->dma; + int i; + + /* Ensure rx frame/buffer descriptors are tidy */ + + for (i = 0; i < rx_ring_size; i++) { + dma->rfds[i].rbd = I596_NULL; + dma->rfds[i].cmd = SWAP16(CMD_FLEX); + } + dma->rfds[rx_ring_size-1].cmd = SWAP16(CMD_EOL|CMD_FLEX); + lp->rfd_head = dma->rfds; + dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds)); + lp->rbd_head = dma->rbds; + dma->rfds[0].rbd = SWAP32(virt_to_dma(lp, dma->rbds)); + + DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma)); +} + + +static int init_i596_mem(struct net_device *dev) +{ + struct i596_private *lp = netdev_priv(dev); + struct i596_dma *dma = lp->dma; + unsigned long flags; + + mpu_port(dev, PORT_RESET, 0); + udelay(100); /* Wait 100us - seems to help */ + + /* change the scp address */ + + lp->last_cmd = jiffies; + + dma->scp.sysbus = SYSBUS; + dma->scp.iscp = SWAP32(virt_to_dma(lp, &(dma->iscp))); + dma->iscp.scb = SWAP32(virt_to_dma(lp, &(dma->scb))); + dma->iscp.stat = SWAP32(ISCP_BUSY); + lp->cmd_backlog = 0; + + lp->cmd_head = NULL; + dma->scb.cmd = I596_NULL; + + DEB(DEB_INIT, printk(KERN_DEBUG "%s: starting i82596.\n", dev->name)); + + DMA_WBACK(dev, &(dma->scp), sizeof(struct i596_scp)); + DMA_WBACK(dev, &(dma->iscp), sizeof(struct i596_iscp)); + DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb)); + + mpu_port(dev, PORT_ALTSCP, virt_to_dma(lp, &dma->scp)); + ca(dev); + if (wait_istat(dev, dma, 1000, "initialization timed out")) + goto failed; + DEB(DEB_INIT, printk(KERN_DEBUG + "%s: i82596 initialization successful\n", + dev->name)); + + if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) { + printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq); + goto failed; + } + + /* Ensure rx frame/buffer descriptors are tidy */ + rebuild_rx_bufs(dev); + + dma->scb.command = 0; + DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb)); + + DEB(DEB_INIT, printk(KERN_DEBUG + "%s: queuing CmdConfigure\n", dev->name)); + memcpy(dma->cf_cmd.i596_config, init_setup, 14); + dma->cf_cmd.cmd.command = SWAP16(CmdConfigure); + DMA_WBACK(dev, &(dma->cf_cmd), sizeof(struct cf_cmd)); + i596_add_cmd(dev, &dma->cf_cmd.cmd); + + DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name)); + memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, 6); + dma->sa_cmd.cmd.command = SWAP16(CmdSASetup); + DMA_WBACK(dev, &(dma->sa_cmd), sizeof(struct sa_cmd)); + i596_add_cmd(dev, &dma->sa_cmd.cmd); + + DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name)); + dma->tdr_cmd.cmd.command = SWAP16(CmdTDR); + DMA_WBACK(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd)); + i596_add_cmd(dev, &dma->tdr_cmd.cmd); + + spin_lock_irqsave (&lp->lock, flags); + + if (wait_cmd(dev, dma, 1000, "timed out waiting to issue RX_START")) { + spin_unlock_irqrestore (&lp->lock, flags); + goto failed_free_irq; + } + DEB(DEB_INIT, printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name)); + dma->scb.command = SWAP16(RX_START); + dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds)); + DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb)); + + ca(dev); + + spin_unlock_irqrestore (&lp->lock, flags); + if (wait_cmd(dev, dma, 1000, "RX_START not processed")) + goto failed_free_irq; + DEB(DEB_INIT, printk(KERN_DEBUG + "%s: Receive unit started OK\n", dev->name)); + return 0; + +failed_free_irq: + free_irq(dev->irq, dev); +failed: + printk(KERN_ERR "%s: Failed to initialise 82596\n", dev->name); + mpu_port(dev, PORT_RESET, 0); + return -1; +} + + +static inline int i596_rx(struct net_device *dev) +{ + struct i596_private *lp = netdev_priv(dev); + struct i596_rfd *rfd; + struct i596_rbd *rbd; + int frames = 0; + + DEB(DEB_RXFRAME, printk(KERN_DEBUG + "i596_rx(), rfd_head %p, rbd_head %p\n", + lp->rfd_head, lp->rbd_head)); + + + rfd = lp->rfd_head; /* Ref next frame to check */ + + DMA_INV(dev, rfd, sizeof(struct i596_rfd)); + while (rfd->stat & SWAP16(STAT_C)) { /* Loop while complete frames */ + if (rfd->rbd == I596_NULL) + rbd = NULL; + else if (rfd->rbd == lp->rbd_head->b_addr) { + rbd = lp->rbd_head; + DMA_INV(dev, rbd, sizeof(struct i596_rbd)); + } else { + printk(KERN_ERR "%s: rbd chain broken!\n", dev->name); + /* XXX Now what? */ + rbd = NULL; + } + DEB(DEB_RXFRAME, printk(KERN_DEBUG + " rfd %p, rfd.rbd %08x, rfd.stat %04x\n", + rfd, rfd->rbd, rfd->stat)); + + if (rbd != NULL && (rfd->stat & SWAP16(STAT_OK))) { + /* a good frame */ + int pkt_len = SWAP16(rbd->count) & 0x3fff; + struct sk_buff *skb = rbd->skb; + int rx_in_place = 0; + + DEB(DEB_RXADDR, print_eth(rbd->v_data, "received")); + frames++; + + /* Check if the packet is long enough to just accept + * without copying to a properly sized skbuff. + */ + + if (pkt_len > rx_copybreak) { + struct sk_buff *newskb; + dma_addr_t dma_addr; + + dma_unmap_single(dev->dev.parent, + (dma_addr_t)SWAP32(rbd->b_data), + PKT_BUF_SZ, DMA_FROM_DEVICE); + /* Get fresh skbuff to replace filled one. */ + newskb = netdev_alloc_skb_ip_align(dev, + PKT_BUF_SZ); + if (newskb == NULL) { + skb = NULL; /* drop pkt */ + goto memory_squeeze; + } + + /* Pass up the skb already on the Rx ring. */ + skb_put(skb, pkt_len); + rx_in_place = 1; + rbd->skb = newskb; + dma_addr = dma_map_single(dev->dev.parent, + newskb->data, + PKT_BUF_SZ, + DMA_FROM_DEVICE); + rbd->v_data = newskb->data; + rbd->b_data = SWAP32(dma_addr); + DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd)); + } else + skb = netdev_alloc_skb_ip_align(dev, pkt_len); +memory_squeeze: + if (skb == NULL) { + /* XXX tulip.c can defer packets here!! */ + printk(KERN_ERR + "%s: i596_rx Memory squeeze, dropping packet.\n", + dev->name); + dev->stats.rx_dropped++; + } else { + if (!rx_in_place) { + /* 16 byte align the data fields */ + dma_sync_single_for_cpu(dev->dev.parent, + (dma_addr_t)SWAP32(rbd->b_data), + PKT_BUF_SZ, DMA_FROM_DEVICE); + memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len); + dma_sync_single_for_device(dev->dev.parent, + (dma_addr_t)SWAP32(rbd->b_data), + PKT_BUF_SZ, DMA_FROM_DEVICE); + } + skb->len = pkt_len; + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + } + } else { + DEB(DEB_ERRORS, printk(KERN_DEBUG + "%s: Error, rfd.stat = 0x%04x\n", + dev->name, rfd->stat)); + dev->stats.rx_errors++; + if (rfd->stat & SWAP16(0x0100)) + dev->stats.collisions++; + if (rfd->stat & SWAP16(0x8000)) + dev->stats.rx_length_errors++; + if (rfd->stat & SWAP16(0x0001)) + dev->stats.rx_over_errors++; + if (rfd->stat & SWAP16(0x0002)) + dev->stats.rx_fifo_errors++; + if (rfd->stat & SWAP16(0x0004)) + dev->stats.rx_frame_errors++; + if (rfd->stat & SWAP16(0x0008)) + dev->stats.rx_crc_errors++; + if (rfd->stat & SWAP16(0x0010)) + dev->stats.rx_length_errors++; + } + + /* Clear the buffer descriptor count and EOF + F flags */ + + if (rbd != NULL && (rbd->count & SWAP16(0x4000))) { + rbd->count = 0; + lp->rbd_head = rbd->v_next; + DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd)); + } + + /* Tidy the frame descriptor, marking it as end of list */ + + rfd->rbd = I596_NULL; + rfd->stat = 0; + rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX); + rfd->count = 0; + + /* Update record of next frame descriptor to process */ + + lp->dma->scb.rfd = rfd->b_next; + lp->rfd_head = rfd->v_next; + DMA_WBACK_INV(dev, rfd, sizeof(struct i596_rfd)); + + /* Remove end-of-list from old end descriptor */ + + rfd->v_prev->cmd = SWAP16(CMD_FLEX); + DMA_WBACK_INV(dev, rfd->v_prev, sizeof(struct i596_rfd)); + rfd = lp->rfd_head; + DMA_INV(dev, rfd, sizeof(struct i596_rfd)); + } + + DEB(DEB_RXFRAME, printk(KERN_DEBUG "frames %d\n", frames)); + + return 0; +} + + +static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp) +{ + struct i596_cmd *ptr; + + while (lp->cmd_head != NULL) { + ptr = lp->cmd_head; + lp->cmd_head = ptr->v_next; + lp->cmd_backlog--; + + switch (SWAP16(ptr->command) & 0x7) { + case CmdTx: + { + struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr; + struct sk_buff *skb = tx_cmd->skb; + dma_unmap_single(dev->dev.parent, + tx_cmd->dma_addr, + skb->len, DMA_TO_DEVICE); + + dev_kfree_skb(skb); + + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; + + ptr->v_next = NULL; + ptr->b_next = I596_NULL; + tx_cmd->cmd.command = 0; /* Mark as free */ + break; + } + default: + ptr->v_next = NULL; + ptr->b_next = I596_NULL; + } + DMA_WBACK_INV(dev, ptr, sizeof(struct i596_cmd)); + } + + wait_cmd(dev, lp->dma, 100, "i596_cleanup_cmd timed out"); + lp->dma->scb.cmd = I596_NULL; + DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb)); +} + + +static inline void i596_reset(struct net_device *dev, struct i596_private *lp) +{ + unsigned long flags; + + DEB(DEB_RESET, printk(KERN_DEBUG "i596_reset\n")); + + spin_lock_irqsave (&lp->lock, flags); + + wait_cmd(dev, lp->dma, 100, "i596_reset timed out"); + + netif_stop_queue(dev); + + /* FIXME: this command might cause an lpmc */ + lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT); + DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb)); + ca(dev); + + /* wait for shutdown */ + wait_cmd(dev, lp->dma, 1000, "i596_reset 2 timed out"); + spin_unlock_irqrestore (&lp->lock, flags); + + i596_cleanup_cmd(dev, lp); + i596_rx(dev); + + netif_start_queue(dev); + init_i596_mem(dev); +} + + +static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd) +{ + struct i596_private *lp = netdev_priv(dev); + struct i596_dma *dma = lp->dma; + unsigned long flags; + + DEB(DEB_ADDCMD, printk(KERN_DEBUG "i596_add_cmd cmd_head %p\n", + lp->cmd_head)); + + cmd->status = 0; + cmd->command |= SWAP16(CMD_EOL | CMD_INTR); + cmd->v_next = NULL; + cmd->b_next = I596_NULL; + DMA_WBACK(dev, cmd, sizeof(struct i596_cmd)); + + spin_lock_irqsave (&lp->lock, flags); + + if (lp->cmd_head != NULL) { + lp->cmd_tail->v_next = cmd; + lp->cmd_tail->b_next = SWAP32(virt_to_dma(lp, &cmd->status)); + DMA_WBACK(dev, lp->cmd_tail, sizeof(struct i596_cmd)); + } else { + lp->cmd_head = cmd; + wait_cmd(dev, dma, 100, "i596_add_cmd timed out"); + dma->scb.cmd = SWAP32(virt_to_dma(lp, &cmd->status)); + dma->scb.command = SWAP16(CUC_START); + DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb)); + ca(dev); + } + lp->cmd_tail = cmd; + lp->cmd_backlog++; + + spin_unlock_irqrestore (&lp->lock, flags); + + if (lp->cmd_backlog > max_cmd_backlog) { + unsigned long tickssofar = jiffies - lp->last_cmd; + + if (tickssofar < ticks_limit) + return; + + printk(KERN_ERR + "%s: command unit timed out, status resetting.\n", + dev->name); +#if 1 + i596_reset(dev, lp); +#endif + } +} + +static int i596_open(struct net_device *dev) +{ + DEB(DEB_OPEN, printk(KERN_DEBUG + "%s: i596_open() irq %d.\n", dev->name, dev->irq)); + + if (init_rx_bufs(dev)) { + printk(KERN_ERR "%s: Failed to init rx bufs\n", dev->name); + return -EAGAIN; + } + if (init_i596_mem(dev)) { + printk(KERN_ERR "%s: Failed to init memory\n", dev->name); + goto out_remove_rx_bufs; + } + netif_start_queue(dev); + + return 0; + +out_remove_rx_bufs: + remove_rx_bufs(dev); + return -EAGAIN; +} + +static void i596_tx_timeout (struct net_device *dev) +{ + struct i596_private *lp = netdev_priv(dev); + + /* Transmitter timeout, serious problems. */ + DEB(DEB_ERRORS, printk(KERN_DEBUG + "%s: transmit timed out, status resetting.\n", + dev->name)); + + dev->stats.tx_errors++; + + /* Try to restart the adaptor */ + if (lp->last_restart == dev->stats.tx_packets) { + DEB(DEB_ERRORS, printk(KERN_DEBUG "Resetting board.\n")); + /* Shutdown and restart */ + i596_reset (dev, lp); + } else { + /* Issue a channel attention signal */ + DEB(DEB_ERRORS, printk(KERN_DEBUG "Kicking board.\n")); + lp->dma->scb.command = SWAP16(CUC_START | RX_START); + DMA_WBACK_INV(dev, &(lp->dma->scb), sizeof(struct i596_scb)); + ca (dev); + lp->last_restart = dev->stats.tx_packets; + } + + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue (dev); +} + + +static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct i596_private *lp = netdev_priv(dev); + struct tx_cmd *tx_cmd; + struct i596_tbd *tbd; + short length = skb->len; + + DEB(DEB_STARTTX, printk(KERN_DEBUG + "%s: i596_start_xmit(%x,%p) called\n", + dev->name, skb->len, skb->data)); + + if (length < ETH_ZLEN) { + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + length = ETH_ZLEN; + } + + netif_stop_queue(dev); + + tx_cmd = lp->dma->tx_cmds + lp->next_tx_cmd; + tbd = lp->dma->tbds + lp->next_tx_cmd; + + if (tx_cmd->cmd.command) { + DEB(DEB_ERRORS, printk(KERN_DEBUG + "%s: xmit ring full, dropping packet.\n", + dev->name)); + dev->stats.tx_dropped++; + + dev_kfree_skb(skb); + } else { + if (++lp->next_tx_cmd == TX_RING_SIZE) + lp->next_tx_cmd = 0; + tx_cmd->tbd = SWAP32(virt_to_dma(lp, tbd)); + tbd->next = I596_NULL; + + tx_cmd->cmd.command = SWAP16(CMD_FLEX | CmdTx); + tx_cmd->skb = skb; + + tx_cmd->pad = 0; + tx_cmd->size = 0; + tbd->pad = 0; + tbd->size = SWAP16(EOF | length); + + tx_cmd->dma_addr = dma_map_single(dev->dev.parent, skb->data, + skb->len, DMA_TO_DEVICE); + tbd->data = SWAP32(tx_cmd->dma_addr); + + DEB(DEB_TXADDR, print_eth(skb->data, "tx-queued")); + DMA_WBACK_INV(dev, tx_cmd, sizeof(struct tx_cmd)); + DMA_WBACK_INV(dev, tbd, sizeof(struct i596_tbd)); + i596_add_cmd(dev, &tx_cmd->cmd); + + dev->stats.tx_packets++; + dev->stats.tx_bytes += length; + } + + netif_start_queue(dev); + + return NETDEV_TX_OK; +} + +static void print_eth(unsigned char *add, char *str) +{ + printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n", + add, add + 6, add, add[12], add[13], str); +} +static const struct net_device_ops i596_netdev_ops = { + .ndo_open = i596_open, + .ndo_stop = i596_close, + .ndo_start_xmit = i596_start_xmit, + .ndo_set_multicast_list = set_multicast_list, + .ndo_tx_timeout = i596_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = i596_poll_controller, +#endif +}; + +static int __devinit i82596_probe(struct net_device *dev) +{ + int i; + struct i596_private *lp = netdev_priv(dev); + struct i596_dma *dma; + + /* This lot is ensure things have been cache line aligned. */ + BUILD_BUG_ON(sizeof(struct i596_rfd) != 32); + BUILD_BUG_ON(sizeof(struct i596_rbd) & 31); + BUILD_BUG_ON(sizeof(struct tx_cmd) & 31); + BUILD_BUG_ON(sizeof(struct i596_tbd) != 32); +#ifndef __LP64__ + BUILD_BUG_ON(sizeof(struct i596_dma) > 4096); +#endif + + if (!dev->base_addr || !dev->irq) + return -ENODEV; + + dma = (struct i596_dma *) DMA_ALLOC(dev->dev.parent, + sizeof(struct i596_dma), &lp->dma_addr, GFP_KERNEL); + if (!dma) { + printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__); + return -ENOMEM; + } + + dev->netdev_ops = &i596_netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + + memset(dma, 0, sizeof(struct i596_dma)); + lp->dma = dma; + + dma->scb.command = 0; + dma->scb.cmd = I596_NULL; + dma->scb.rfd = I596_NULL; + spin_lock_init(&lp->lock); + + DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma)); + + i = register_netdev(dev); + if (i) { + DMA_FREE(dev->dev.parent, sizeof(struct i596_dma), + (void *)dma, lp->dma_addr); + return i; + } + + DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n", + dev->name, dev->base_addr, dev->dev_addr, + dev->irq)); + DEB(DEB_INIT, printk(KERN_INFO + "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n", + dev->name, dma, (int)sizeof(struct i596_dma), + &dma->scb)); + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void i596_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + i596_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +static irqreturn_t i596_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct i596_private *lp; + struct i596_dma *dma; + unsigned short status, ack_cmd = 0; + + lp = netdev_priv(dev); + dma = lp->dma; + + spin_lock (&lp->lock); + + wait_cmd(dev, dma, 100, "i596 interrupt, timeout"); + status = SWAP16(dma->scb.status); + + DEB(DEB_INTS, printk(KERN_DEBUG + "%s: i596 interrupt, IRQ %d, status %4.4x.\n", + dev->name, dev->irq, status)); + + ack_cmd = status & 0xf000; + + if (!ack_cmd) { + DEB(DEB_ERRORS, printk(KERN_DEBUG + "%s: interrupt with no events\n", + dev->name)); + spin_unlock (&lp->lock); + return IRQ_NONE; + } + + if ((status & 0x8000) || (status & 0x2000)) { + struct i596_cmd *ptr; + + if ((status & 0x8000)) + DEB(DEB_INTS, + printk(KERN_DEBUG + "%s: i596 interrupt completed command.\n", + dev->name)); + if ((status & 0x2000)) + DEB(DEB_INTS, + printk(KERN_DEBUG + "%s: i596 interrupt command unit inactive %x.\n", + dev->name, status & 0x0700)); + + while (lp->cmd_head != NULL) { + DMA_INV(dev, lp->cmd_head, sizeof(struct i596_cmd)); + if (!(lp->cmd_head->status & SWAP16(STAT_C))) + break; + + ptr = lp->cmd_head; + + DEB(DEB_STATUS, + printk(KERN_DEBUG + "cmd_head->status = %04x, ->command = %04x\n", + SWAP16(lp->cmd_head->status), + SWAP16(lp->cmd_head->command))); + lp->cmd_head = ptr->v_next; + lp->cmd_backlog--; + + switch (SWAP16(ptr->command) & 0x7) { + case CmdTx: + { + struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr; + struct sk_buff *skb = tx_cmd->skb; + + if (ptr->status & SWAP16(STAT_OK)) { + DEB(DEB_TXADDR, + print_eth(skb->data, "tx-done")); + } else { + dev->stats.tx_errors++; + if (ptr->status & SWAP16(0x0020)) + dev->stats.collisions++; + if (!(ptr->status & SWAP16(0x0040))) + dev->stats.tx_heartbeat_errors++; + if (ptr->status & SWAP16(0x0400)) + dev->stats.tx_carrier_errors++; + if (ptr->status & SWAP16(0x0800)) + dev->stats.collisions++; + if (ptr->status & SWAP16(0x1000)) + dev->stats.tx_aborted_errors++; + } + dma_unmap_single(dev->dev.parent, + tx_cmd->dma_addr, + skb->len, DMA_TO_DEVICE); + dev_kfree_skb_irq(skb); + + tx_cmd->cmd.command = 0; /* Mark free */ + break; + } + case CmdTDR: + { + unsigned short status = SWAP16(((struct tdr_cmd *)ptr)->status); + + if (status & 0x8000) { + DEB(DEB_ANY, + printk(KERN_DEBUG "%s: link ok.\n", + dev->name)); + } else { + if (status & 0x4000) + printk(KERN_ERR + "%s: Transceiver problem.\n", + dev->name); + if (status & 0x2000) + printk(KERN_ERR + "%s: Termination problem.\n", + dev->name); + if (status & 0x1000) + printk(KERN_ERR + "%s: Short circuit.\n", + dev->name); + + DEB(DEB_TDR, + printk(KERN_DEBUG "%s: Time %d.\n", + dev->name, status & 0x07ff)); + } + break; + } + case CmdConfigure: + /* + * Zap command so set_multicast_list() know + * it is free + */ + ptr->command = 0; + break; + } + ptr->v_next = NULL; + ptr->b_next = I596_NULL; + DMA_WBACK(dev, ptr, sizeof(struct i596_cmd)); + lp->last_cmd = jiffies; + } + + /* This mess is arranging that only the last of any outstanding + * commands has the interrupt bit set. Should probably really + * only add to the cmd queue when the CU is stopped. + */ + ptr = lp->cmd_head; + while ((ptr != NULL) && (ptr != lp->cmd_tail)) { + struct i596_cmd *prev = ptr; + + ptr->command &= SWAP16(0x1fff); + ptr = ptr->v_next; + DMA_WBACK_INV(dev, prev, sizeof(struct i596_cmd)); + } + + if (lp->cmd_head != NULL) + ack_cmd |= CUC_START; + dma->scb.cmd = SWAP32(virt_to_dma(lp, &lp->cmd_head->status)); + DMA_WBACK_INV(dev, &dma->scb, sizeof(struct i596_scb)); + } + if ((status & 0x1000) || (status & 0x4000)) { + if ((status & 0x4000)) + DEB(DEB_INTS, + printk(KERN_DEBUG + "%s: i596 interrupt received a frame.\n", + dev->name)); + i596_rx(dev); + /* Only RX_START if stopped - RGH 07-07-96 */ + if (status & 0x1000) { + if (netif_running(dev)) { + DEB(DEB_ERRORS, + printk(KERN_DEBUG + "%s: i596 interrupt receive unit inactive, status 0x%x\n", + dev->name, status)); + ack_cmd |= RX_START; + dev->stats.rx_errors++; + dev->stats.rx_fifo_errors++; + rebuild_rx_bufs(dev); + } + } + } + wait_cmd(dev, dma, 100, "i596 interrupt, timeout"); + dma->scb.command = SWAP16(ack_cmd); + DMA_WBACK(dev, &dma->scb, sizeof(struct i596_scb)); + + /* DANGER: I suspect that some kind of interrupt + acknowledgement aside from acking the 82596 might be needed + here... but it's running acceptably without */ + + ca(dev); + + wait_cmd(dev, dma, 100, "i596 interrupt, exit timeout"); + DEB(DEB_INTS, printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name)); + + spin_unlock (&lp->lock); + return IRQ_HANDLED; +} + +static int i596_close(struct net_device *dev) +{ + struct i596_private *lp = netdev_priv(dev); + unsigned long flags; + + netif_stop_queue(dev); + + DEB(DEB_INIT, + printk(KERN_DEBUG + "%s: Shutting down ethercard, status was %4.4x.\n", + dev->name, SWAP16(lp->dma->scb.status))); + + spin_lock_irqsave(&lp->lock, flags); + + wait_cmd(dev, lp->dma, 100, "close1 timed out"); + lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT); + DMA_WBACK(dev, &lp->dma->scb, sizeof(struct i596_scb)); + + ca(dev); + + wait_cmd(dev, lp->dma, 100, "close2 timed out"); + spin_unlock_irqrestore(&lp->lock, flags); + DEB(DEB_STRUCT, i596_display_data(dev)); + i596_cleanup_cmd(dev, lp); + + free_irq(dev->irq, dev); + remove_rx_bufs(dev); + + return 0; +} + +/* + * Set or clear the multicast filter for this adaptor. + */ + +static void set_multicast_list(struct net_device *dev) +{ + struct i596_private *lp = netdev_priv(dev); + struct i596_dma *dma = lp->dma; + int config = 0, cnt; + + DEB(DEB_MULTI, + printk(KERN_DEBUG + "%s: set multicast list, %d entries, promisc %s, allmulti %s\n", + dev->name, netdev_mc_count(dev), + dev->flags & IFF_PROMISC ? "ON" : "OFF", + dev->flags & IFF_ALLMULTI ? "ON" : "OFF")); + + if ((dev->flags & IFF_PROMISC) && + !(dma->cf_cmd.i596_config[8] & 0x01)) { + dma->cf_cmd.i596_config[8] |= 0x01; + config = 1; + } + if (!(dev->flags & IFF_PROMISC) && + (dma->cf_cmd.i596_config[8] & 0x01)) { + dma->cf_cmd.i596_config[8] &= ~0x01; + config = 1; + } + if ((dev->flags & IFF_ALLMULTI) && + (dma->cf_cmd.i596_config[11] & 0x20)) { + dma->cf_cmd.i596_config[11] &= ~0x20; + config = 1; + } + if (!(dev->flags & IFF_ALLMULTI) && + !(dma->cf_cmd.i596_config[11] & 0x20)) { + dma->cf_cmd.i596_config[11] |= 0x20; + config = 1; + } + if (config) { + if (dma->cf_cmd.cmd.command) + printk(KERN_INFO + "%s: config change request already queued\n", + dev->name); + else { + dma->cf_cmd.cmd.command = SWAP16(CmdConfigure); + DMA_WBACK_INV(dev, &dma->cf_cmd, sizeof(struct cf_cmd)); + i596_add_cmd(dev, &dma->cf_cmd.cmd); + } + } + + cnt = netdev_mc_count(dev); + if (cnt > MAX_MC_CNT) { + cnt = MAX_MC_CNT; + printk(KERN_NOTICE "%s: Only %d multicast addresses supported", + dev->name, cnt); + } + + if (!netdev_mc_empty(dev)) { + struct netdev_hw_addr *ha; + unsigned char *cp; + struct mc_cmd *cmd; + + cmd = &dma->mc_cmd; + cmd->cmd.command = SWAP16(CmdMulticastList); + cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6); + cp = cmd->mc_addrs; + netdev_for_each_mc_addr(ha, dev) { + if (!cnt--) + break; + memcpy(cp, ha->addr, 6); + if (i596_debug > 1) + DEB(DEB_MULTI, + printk(KERN_DEBUG + "%s: Adding address %pM\n", + dev->name, cp)); + cp += 6; + } + DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd)); + i596_add_cmd(dev, &cmd->cmd); + } +} diff --git a/drivers/net/ethernet/i825xx/lp486e.c b/drivers/net/ethernet/i825xx/lp486e.c new file mode 100644 index 0000000..385a953 --- /dev/null +++ b/drivers/net/ethernet/i825xx/lp486e.c @@ -0,0 +1,1339 @@ +/* Intel Professional Workstation/panther ethernet driver */ +/* lp486e.c: A panther 82596 ethernet driver for linux. */ +/* + History and copyrights: + + Driver skeleton + Written 1993 by Donald Becker. + Copyright 1993 United States Government as represented by the Director, + National Security Agency. This software may only be used and + distributed according to the terms of the GNU General Public License + as modified by SRC, incorporated herein by reference. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + Apricot + Written 1994 by Mark Evans. + This driver is for the Apricot 82596 bus-master interface + + Modularised 12/94 Mark Evans + + Professional Workstation + Derived from apricot.c by Ard van Breemen + || + + Credits: + Thanks to Murphy Software BV for letting me write this in their time. + Well, actually, I get paid doing this... + (Also: see http://www.murphy.nl for murphy, and my homepage ~ard for + more information on the Professional Workstation) + + Present version + aeb@cwi.nl +*/ +/* + There are currently two motherboards that I know of in the + professional workstation. The only one that I know is the + intel panther motherboard. -- ard +*/ +/* +The pws is equipped with an intel 82596. This is a very intelligent controller +which runs its own micro-code. Communication with the hostprocessor is done +through linked lists of commands and buffers in the hostprocessors memory. +A complete description of the 82596 is available from intel. Search for +a file called "29021806.pdf". It is a complete description of the chip itself. +To use it for the pws some additions are needed regarding generation of +the PORT and CA signal, and the interrupt glue needed for a pc. +I/O map: +PORT SIZE ACTION MEANING +0xCB0 2 WRITE Lower 16 bits for PORT command +0xCB2 2 WRITE Upper 16 bits for PORT command, and issue of PORT command +0xCB4 1 WRITE Generation of CA signal +0xCB8 1 WRITE Clear interrupt glue +All other communication is through memory! +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define DRV_NAME "lp486e" + +/* debug print flags */ +#define LOG_SRCDST 0x80000000 +#define LOG_STATINT 0x40000000 +#define LOG_STARTINT 0x20000000 + +#define i596_debug debug + +static int i596_debug = 0; + +static const char * const medianame[] = { + "10baseT", "AUI", + "10baseT-FD", "AUI-FD", +}; + +#define LP486E_TOTAL_SIZE 16 + +#define I596_NULL (0xffffffff) + +#define CMD_EOL 0x8000 /* The last command of the list, stop. */ +#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */ +#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */ + +#define CMD_FLEX 0x0008 /* Enable flexible memory model */ + +enum commands { + CmdNOP = 0, + CmdIASetup = 1, + CmdConfigure = 2, + CmdMulticastList = 3, + CmdTx = 4, + CmdTDR = 5, + CmdDump = 6, + CmdDiagnose = 7 +}; + +#if 0 +static const char *CUcmdnames[8] = { "NOP", "IASetup", "Configure", "MulticastList", + "Tx", "TDR", "Dump", "Diagnose" }; +#endif + +/* Status word bits */ +#define STAT_CX 0x8000 /* The CU finished executing a command + with the Interrupt bit set */ +#define STAT_FR 0x4000 /* The RU finished receiving a frame */ +#define STAT_CNA 0x2000 /* The CU left the active state */ +#define STAT_RNR 0x1000 /* The RU left the active state */ +#define STAT_ACK (STAT_CX | STAT_FR | STAT_CNA | STAT_RNR) +#define STAT_CUS 0x0700 /* Status of CU: 0: idle, 1: suspended, + 2: active, 3-7: unused */ +#define STAT_RUS 0x00f0 /* Status of RU: 0: idle, 1: suspended, + 2: no resources, 4: ready, + 10: no resources due to no more RBDs, + 12: no more RBDs, other: unused */ +#define STAT_T 0x0008 /* Bus throttle timers loaded */ +#define STAT_ZERO 0x0807 /* Always zero */ + +#if 0 +static char *CUstates[8] = { + "idle", "suspended", "active", 0, 0, 0, 0, 0 +}; +static char *RUstates[16] = { + "idle", "suspended", "no resources", 0, "ready", 0, 0, 0, + 0, 0, "no RBDs", 0, "out of RBDs", 0, 0, 0 +}; + +static void +i596_out_status(int status) { + int bad = 0; + char *s; + + printk("status %4.4x:", status); + if (status == 0xffff) + printk(" strange..\n"); + else { + if (status & STAT_CX) + printk(" CU done"); + if (status & STAT_CNA) + printk(" CU stopped"); + if (status & STAT_FR) + printk(" got a frame"); + if (status & STAT_RNR) + printk(" RU stopped"); + if (status & STAT_T) + printk(" throttled"); + if (status & STAT_ZERO) + bad = 1; + s = CUstates[(status & STAT_CUS) >> 8]; + if (!s) + bad = 1; + else + printk(" CU(%s)", s); + s = RUstates[(status & STAT_RUS) >> 4]; + if (!s) + bad = 1; + else + printk(" RU(%s)", s); + if (bad) + printk(" bad status"); + printk("\n"); + } +} +#endif + +/* Command word bits */ +#define ACK_CX 0x8000 +#define ACK_FR 0x4000 +#define ACK_CNA 0x2000 +#define ACK_RNR 0x1000 + +#define CUC_START 0x0100 +#define CUC_RESUME 0x0200 +#define CUC_SUSPEND 0x0300 +#define CUC_ABORT 0x0400 + +#define RX_START 0x0010 +#define RX_RESUME 0x0020 +#define RX_SUSPEND 0x0030 +#define RX_ABORT 0x0040 + +typedef u32 phys_addr; + +static inline phys_addr +va_to_pa(void *x) { + return x ? virt_to_bus(x) : I596_NULL; +} + +static inline void * +pa_to_va(phys_addr x) { + return (x == I596_NULL) ? NULL : bus_to_virt(x); +} + +/* status bits for cmd */ +#define CMD_STAT_C 0x8000 /* CU command complete */ +#define CMD_STAT_B 0x4000 /* CU command in progress */ +#define CMD_STAT_OK 0x2000 /* CU command completed without errors */ +#define CMD_STAT_A 0x1000 /* CU command abnormally terminated */ + +struct i596_cmd { /* 8 bytes */ + unsigned short status; + unsigned short command; + phys_addr pa_next; /* va_to_pa(struct i596_cmd *next) */ +}; + +#define EOF 0x8000 +#define SIZE_MASK 0x3fff + +struct i596_tbd { + unsigned short size; + unsigned short pad; + phys_addr pa_next; /* va_to_pa(struct i596_tbd *next) */ + phys_addr pa_data; /* va_to_pa(char *data) */ + struct sk_buff *skb; +}; + +struct tx_cmd { + struct i596_cmd cmd; + phys_addr pa_tbd; /* va_to_pa(struct i596_tbd *tbd) */ + unsigned short size; + unsigned short pad; +}; + +/* status bits for rfd */ +#define RFD_STAT_C 0x8000 /* Frame reception complete */ +#define RFD_STAT_B 0x4000 /* Frame reception in progress */ +#define RFD_STAT_OK 0x2000 /* Frame received without errors */ +#define RFD_STATUS 0x1fff +#define RFD_LENGTH_ERR 0x1000 +#define RFD_CRC_ERR 0x0800 +#define RFD_ALIGN_ERR 0x0400 +#define RFD_NOBUFS_ERR 0x0200 +#define RFD_DMA_ERR 0x0100 /* DMA overrun failure to acquire system bus */ +#define RFD_SHORT_FRAME_ERR 0x0080 +#define RFD_NOEOP_ERR 0x0040 +#define RFD_TRUNC_ERR 0x0020 +#define RFD_MULTICAST 0x0002 /* 0: destination had our address + 1: destination was broadcast/multicast */ +#define RFD_COLLISION 0x0001 + +/* receive frame descriptor */ +struct i596_rfd { + unsigned short stat; + unsigned short cmd; + phys_addr pa_next; /* va_to_pa(struct i596_rfd *next) */ + phys_addr pa_rbd; /* va_to_pa(struct i596_rbd *rbd) */ + unsigned short count; + unsigned short size; + char data[1532]; +}; + +#define RBD_EL 0x8000 +#define RBD_P 0x4000 +#define RBD_SIZEMASK 0x3fff +#define RBD_EOF 0x8000 +#define RBD_F 0x4000 + +/* receive buffer descriptor */ +struct i596_rbd { + unsigned short size; + unsigned short pad; + phys_addr pa_next; /* va_to_pa(struct i596_tbd *next) */ + phys_addr pa_data; /* va_to_pa(char *data) */ + phys_addr pa_prev; /* va_to_pa(struct i596_tbd *prev) */ + + /* Driver private part */ + struct sk_buff *skb; +}; + +#define RX_RING_SIZE 64 +#define RX_SKBSIZE (ETH_FRAME_LEN+10) +#define RX_RBD_SIZE 32 + +/* System Control Block - 40 bytes */ +struct i596_scb { + u16 status; /* 0 */ + u16 command; /* 2 */ + phys_addr pa_cmd; /* 4 - va_to_pa(struct i596_cmd *cmd) */ + phys_addr pa_rfd; /* 8 - va_to_pa(struct i596_rfd *rfd) */ + u32 crc_err; /* 12 */ + u32 align_err; /* 16 */ + u32 resource_err; /* 20 */ + u32 over_err; /* 24 */ + u32 rcvdt_err; /* 28 */ + u32 short_err; /* 32 */ + u16 t_on; /* 36 */ + u16 t_off; /* 38 */ +}; + +/* Intermediate System Configuration Pointer - 8 bytes */ +struct i596_iscp { + u32 busy; /* 0 */ + phys_addr pa_scb; /* 4 - va_to_pa(struct i596_scb *scb) */ +}; + +/* System Configuration Pointer - 12 bytes */ +struct i596_scp { + u32 sysbus; /* 0 */ + u32 pad; /* 4 */ + phys_addr pa_iscp; /* 8 - va_to_pa(struct i596_iscp *iscp) */ +}; + +/* Selftest and dump results - needs 16-byte alignment */ +/* + * The size of the dump area is 304 bytes. When the dump is executed + * by the Port command an extra word will be appended to the dump area. + * The extra word is a copy of the Dump status word (containing the + * C, B, OK bits). [I find 0xa006, with a0 for C+OK and 6 for dump] + */ +struct i596_dump { + u16 dump[153]; /* (304 = 130h) + 2 bytes */ +}; + +struct i596_private { /* aligned to a 16-byte boundary */ + struct i596_scp scp; /* 0 - needs 16-byte alignment */ + struct i596_iscp iscp; /* 12 */ + struct i596_scb scb; /* 20 */ + u32 dummy; /* 60 */ + struct i596_dump dump; /* 64 - needs 16-byte alignment */ + + struct i596_cmd set_add; + char eth_addr[8]; /* directly follows set_add */ + + struct i596_cmd set_conf; + char i596_config[16]; /* directly follows set_conf */ + + struct i596_cmd tdr; + unsigned long tdr_stat; /* directly follows tdr */ + + int last_restart; + struct i596_rbd *rbd_list; + struct i596_rbd *rbd_tail; + struct i596_rfd *rx_tail; + struct i596_cmd *cmd_tail; + struct i596_cmd *cmd_head; + int cmd_backlog; + unsigned long last_cmd; + spinlock_t cmd_lock; +}; + +static char init_setup[14] = { + 0x8E, /* length 14 bytes, prefetch on */ + 0xC8, /* default: fifo to 8, monitor off */ + 0x40, /* default: don't save bad frames (apricot.c had 0x80) */ + 0x2E, /* (default is 0x26) + No source address insertion, 8 byte preamble */ + 0x00, /* default priority and backoff */ + 0x60, /* default interframe spacing */ + 0x00, /* default slot time LSB */ + 0xf2, /* default slot time and nr of retries */ + 0x00, /* default various bits + (0: promiscuous mode, 1: broadcast disable, + 2: encoding mode, 3: transmit on no CRS, + 4: no CRC insertion, 5: CRC type, + 6: bit stuffing, 7: padding) */ + 0x00, /* default carrier sense and collision detect */ + 0x40, /* default minimum frame length */ + 0xff, /* (default is 0xff, and that is what apricot.c has; + elp486.c has 0xfb: Enable crc append in memory.) */ + 0x00, /* default: not full duplex */ + 0x7f /* (default is 0x3f) multi IA */ +}; + +static int i596_open(struct net_device *dev); +static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev); +static irqreturn_t i596_interrupt(int irq, void *dev_id); +static int i596_close(struct net_device *dev); +static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); +static void print_eth(char *); +static void set_multicast_list(struct net_device *dev); +static void i596_tx_timeout(struct net_device *dev); + +static int +i596_timeout(struct net_device *dev, char *msg, int ct) { + struct i596_private *lp; + int boguscnt = ct; + + lp = netdev_priv(dev); + while (lp->scb.command) { + if (--boguscnt == 0) { + printk("%s: %s timed out - stat %4.4x, cmd %4.4x\n", + dev->name, msg, + lp->scb.status, lp->scb.command); + return 1; + } + udelay(5); + barrier(); + } + return 0; +} + +static inline int +init_rx_bufs(struct net_device *dev, int num) { + struct i596_private *lp; + struct i596_rfd *rfd; + int i; + // struct i596_rbd *rbd; + + lp = netdev_priv(dev); + lp->scb.pa_rfd = I596_NULL; + + for (i = 0; i < num; i++) { + rfd = kmalloc(sizeof(struct i596_rfd), GFP_KERNEL); + if (rfd == NULL) + break; + + rfd->stat = 0; + rfd->pa_rbd = I596_NULL; + rfd->count = 0; + rfd->size = 1532; + if (i == 0) { + rfd->cmd = CMD_EOL; + lp->rx_tail = rfd; + } else { + rfd->cmd = 0; + } + rfd->pa_next = lp->scb.pa_rfd; + lp->scb.pa_rfd = va_to_pa(rfd); + lp->rx_tail->pa_next = lp->scb.pa_rfd; + } + +#if 0 + for (i = 0; ipad = 0; + rbd->count = 0; + rbd->skb = dev_alloc_skb(RX_SKBSIZE); + if (!rbd->skb) { + printk("dev_alloc_skb failed"); + } + rbd->next = rfd->rbd; + if (i) { + rfd->rbd->prev = rbd; + rbd->size = RX_SKBSIZE; + } else { + rbd->size = (RX_SKBSIZE | RBD_EL); + lp->rbd_tail = rbd; + } + + rfd->rbd = rbd; + } else { + printk("Could not kmalloc rbd\n"); + } + } + lp->rbd_tail->next = rfd->rbd; +#endif + return i; +} + +static inline void +remove_rx_bufs(struct net_device *dev) { + struct i596_private *lp; + struct i596_rfd *rfd; + + lp = netdev_priv(dev); + lp->rx_tail->pa_next = I596_NULL; + + do { + rfd = pa_to_va(lp->scb.pa_rfd); + lp->scb.pa_rfd = rfd->pa_next; + kfree(rfd); + } while (rfd != lp->rx_tail); + + lp->rx_tail = NULL; + +#if 0 + for (lp->rbd_list) { + } +#endif +} + +#define PORT_RESET 0x00 /* reset 82596 */ +#define PORT_SELFTEST 0x01 /* selftest */ +#define PORT_ALTSCP 0x02 /* alternate SCB address */ +#define PORT_DUMP 0x03 /* dump */ + +#define IOADDR 0xcb0 /* real constant */ +#define IRQ 10 /* default IRQ - can be changed by ECU */ + +/* The 82596 requires two 16-bit write cycles for a port command */ +static inline void +PORT(phys_addr a, unsigned int cmd) { + if (a & 0xf) + printk("lp486e.c: PORT: address not aligned\n"); + outw(((a & 0xffff) | cmd), IOADDR); + outw(((a>>16) & 0xffff), IOADDR+2); +} + +static inline void +CA(void) { + outb(0, IOADDR+4); + udelay(8); +} + +static inline void +CLEAR_INT(void) { + outb(0, IOADDR+8); +} + +#if 0 +/* selftest or dump */ +static void +i596_port_do(struct net_device *dev, int portcmd, char *cmdname) { + struct i596_private *lp = netdev_priv(dev); + u16 *outp; + int i, m; + + memset((void *)&(lp->dump), 0, sizeof(struct i596_dump)); + outp = &(lp->dump.dump[0]); + + PORT(va_to_pa(outp), portcmd); + mdelay(30); /* random, unmotivated */ + + printk("lp486e i82596 %s result:\n", cmdname); + for (m = ARRAY_SIZE(lp->dump.dump); m && lp->dump.dump[m-1] == 0; m--) + ; + for (i = 0; i < m; i++) { + printk(" %04x", lp->dump.dump[i]); + if (i%8 == 7) + printk("\n"); + } + printk("\n"); +} +#endif + +static int +i596_scp_setup(struct net_device *dev) { + struct i596_private *lp = netdev_priv(dev); + int boguscnt; + + /* Setup SCP, ISCP, SCB */ + /* + * sysbus bits: + * only a single byte is significant - here 0x44 + * 0x80: big endian mode (details depend on stepping) + * 0x40: 1 + * 0x20: interrupt pin is active low + * 0x10: lock function disabled + * 0x08: external triggering of bus throttle timers + * 0x06: 00: 82586 compat mode, 01: segmented mode, 10: linear mode + * 0x01: unused + */ + lp->scp.sysbus = 0x00440000; /* linear mode */ + lp->scp.pad = 0; /* must be zero */ + lp->scp.pa_iscp = va_to_pa(&(lp->iscp)); + + /* + * The CPU sets the ISCP to 1 before it gives the first CA() + */ + lp->iscp.busy = 0x0001; + lp->iscp.pa_scb = va_to_pa(&(lp->scb)); + + lp->scb.command = 0; + lp->scb.status = 0; + lp->scb.pa_cmd = I596_NULL; + /* lp->scb.pa_rfd has been initialised already */ + + lp->last_cmd = jiffies; + lp->cmd_backlog = 0; + lp->cmd_head = NULL; + + /* + * Reset the 82596. + * We need to wait 10 systemclock cycles, and + * 5 serial clock cycles. + */ + PORT(0, PORT_RESET); /* address part ignored */ + udelay(100); + + /* + * Before the CA signal is asserted, the default SCP address + * (0x00fffff4) can be changed to a 16-byte aligned value + */ + PORT(va_to_pa(&lp->scp), PORT_ALTSCP); /* change the scp address */ + + /* + * The initialization procedure begins when a + * Channel Attention signal is asserted after a reset. + */ + + CA(); + + /* + * The ISCP busy is cleared by the 82596 after the SCB address is read. + */ + boguscnt = 100; + while (lp->iscp.busy) { + if (--boguscnt == 0) { + /* No i82596 present? */ + printk("%s: i82596 initialization timed out\n", + dev->name); + return 1; + } + udelay(5); + barrier(); + } + /* I find here boguscnt==100, so no delay was required. */ + + return 0; +} + +static int +init_i596(struct net_device *dev) { + struct i596_private *lp; + + if (i596_scp_setup(dev)) + return 1; + + lp = netdev_priv(dev); + lp->scb.command = 0; + + memcpy ((void *)lp->i596_config, init_setup, 14); + lp->set_conf.command = CmdConfigure; + i596_add_cmd(dev, (void *)&lp->set_conf); + + memcpy ((void *)lp->eth_addr, dev->dev_addr, 6); + lp->set_add.command = CmdIASetup; + i596_add_cmd(dev, (struct i596_cmd *)&lp->set_add); + + lp->tdr.command = CmdTDR; + i596_add_cmd(dev, (struct i596_cmd *)&lp->tdr); + + if (lp->scb.command && i596_timeout(dev, "i82596 init", 200)) + return 1; + + lp->scb.command = RX_START; + CA(); + + barrier(); + + if (lp->scb.command && i596_timeout(dev, "Receive Unit start", 100)) + return 1; + + return 0; +} + +/* Receive a single frame */ +static inline int +i596_rx_one(struct net_device *dev, struct i596_private *lp, + struct i596_rfd *rfd, int *frames) { + + if (rfd->stat & RFD_STAT_OK) { + /* a good frame */ + int pkt_len = (rfd->count & 0x3fff); + struct sk_buff *skb = dev_alloc_skb(pkt_len); + + (*frames)++; + + if (rfd->cmd & CMD_EOL) + printk("Received on EOL\n"); + + if (skb == NULL) { + printk ("%s: i596_rx Memory squeeze, " + "dropping packet.\n", dev->name); + dev->stats.rx_dropped++; + return 1; + } + + memcpy(skb_put(skb,pkt_len), rfd->data, pkt_len); + + skb->protocol = eth_type_trans(skb,dev); + netif_rx(skb); + dev->stats.rx_packets++; + } else { +#if 0 + printk("Frame reception error status %04x\n", + rfd->stat); +#endif + dev->stats.rx_errors++; + if (rfd->stat & RFD_COLLISION) + dev->stats.collisions++; + if (rfd->stat & RFD_SHORT_FRAME_ERR) + dev->stats.rx_length_errors++; + if (rfd->stat & RFD_DMA_ERR) + dev->stats.rx_over_errors++; + if (rfd->stat & RFD_NOBUFS_ERR) + dev->stats.rx_fifo_errors++; + if (rfd->stat & RFD_ALIGN_ERR) + dev->stats.rx_frame_errors++; + if (rfd->stat & RFD_CRC_ERR) + dev->stats.rx_crc_errors++; + if (rfd->stat & RFD_LENGTH_ERR) + dev->stats.rx_length_errors++; + } + rfd->stat = rfd->count = 0; + return 0; +} + +static int +i596_rx(struct net_device *dev) { + struct i596_private *lp = netdev_priv(dev); + struct i596_rfd *rfd; + int frames = 0; + + while (1) { + rfd = pa_to_va(lp->scb.pa_rfd); + if (!rfd) { + printk(KERN_ERR "i596_rx: NULL rfd?\n"); + return 0; + } +#if 1 + if (rfd->stat && !(rfd->stat & (RFD_STAT_C | RFD_STAT_B))) + printk("SF:%p-%04x\n", rfd, rfd->stat); +#endif + if (!(rfd->stat & RFD_STAT_C)) + break; /* next one not ready */ + if (i596_rx_one(dev, lp, rfd, &frames)) + break; /* out of memory */ + rfd->cmd = CMD_EOL; + lp->rx_tail->cmd = 0; + lp->rx_tail = rfd; + lp->scb.pa_rfd = rfd->pa_next; + barrier(); + } + + return frames; +} + +static void +i596_cleanup_cmd(struct net_device *dev) { + struct i596_private *lp; + struct i596_cmd *cmd; + + lp = netdev_priv(dev); + while (lp->cmd_head) { + cmd = (struct i596_cmd *)lp->cmd_head; + + lp->cmd_head = pa_to_va(lp->cmd_head->pa_next); + lp->cmd_backlog--; + + switch ((cmd->command) & 0x7) { + case CmdTx: { + struct tx_cmd *tx_cmd = (struct tx_cmd *) cmd; + struct i596_tbd * tx_cmd_tbd; + tx_cmd_tbd = pa_to_va(tx_cmd->pa_tbd); + + dev_kfree_skb_any(tx_cmd_tbd->skb); + + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; + + cmd->pa_next = I596_NULL; + kfree((unsigned char *)tx_cmd); + netif_wake_queue(dev); + break; + } + case CmdMulticastList: { + // unsigned short count = *((unsigned short *) (ptr + 1)); + + cmd->pa_next = I596_NULL; + kfree((unsigned char *)cmd); + break; + } + default: { + cmd->pa_next = I596_NULL; + break; + } + } + barrier(); + } + + if (lp->scb.command && i596_timeout(dev, "i596_cleanup_cmd", 100)) + ; + + lp->scb.pa_cmd = va_to_pa(lp->cmd_head); +} + +static void i596_reset(struct net_device *dev, struct i596_private *lp, int ioaddr) { + + if (lp->scb.command && i596_timeout(dev, "i596_reset", 100)) + ; + + netif_stop_queue(dev); + + lp->scb.command = CUC_ABORT | RX_ABORT; + CA(); + barrier(); + + /* wait for shutdown */ + if (lp->scb.command && i596_timeout(dev, "i596_reset(2)", 400)) + ; + + i596_cleanup_cmd(dev); + i596_rx(dev); + + netif_start_queue(dev); + /*dev_kfree_skb(skb, FREE_WRITE);*/ + init_i596(dev); +} + +static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd) { + struct i596_private *lp = netdev_priv(dev); + int ioaddr = dev->base_addr; + unsigned long flags; + + cmd->status = 0; + cmd->command |= (CMD_EOL | CMD_INTR); + cmd->pa_next = I596_NULL; + + spin_lock_irqsave(&lp->cmd_lock, flags); + + if (lp->cmd_head) { + lp->cmd_tail->pa_next = va_to_pa(cmd); + } else { + lp->cmd_head = cmd; + if (lp->scb.command && i596_timeout(dev, "i596_add_cmd", 100)) + ; + lp->scb.pa_cmd = va_to_pa(cmd); + lp->scb.command = CUC_START; + CA(); + } + lp->cmd_tail = cmd; + lp->cmd_backlog++; + + lp->cmd_head = pa_to_va(lp->scb.pa_cmd); + spin_unlock_irqrestore(&lp->cmd_lock, flags); + + if (lp->cmd_backlog > 16) { + int tickssofar = jiffies - lp->last_cmd; + if (tickssofar < HZ/4) + return; + + printk(KERN_WARNING "%s: command unit timed out, status resetting.\n", dev->name); + i596_reset(dev, lp, ioaddr); + } +} + +static int i596_open(struct net_device *dev) +{ + int i; + + i = request_irq(dev->irq, i596_interrupt, IRQF_SHARED, dev->name, dev); + if (i) { + printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq); + return i; + } + + if ((i = init_rx_bufs(dev, RX_RING_SIZE)) < RX_RING_SIZE) + printk(KERN_ERR "%s: only able to allocate %d receive buffers\n", dev->name, i); + + if (i < 4) { + free_irq(dev->irq, dev); + return -EAGAIN; + } + netif_start_queue(dev); + init_i596(dev); + return 0; /* Always succeed */ +} + +static netdev_tx_t i596_start_xmit (struct sk_buff *skb, struct net_device *dev) { + struct tx_cmd *tx_cmd; + short length; + + length = skb->len; + + if (length < ETH_ZLEN) { + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + length = ETH_ZLEN; + } + + tx_cmd = kmalloc((sizeof (struct tx_cmd) + sizeof (struct i596_tbd)), GFP_ATOMIC); + if (tx_cmd == NULL) { + printk(KERN_WARNING "%s: i596_xmit Memory squeeze, dropping packet.\n", dev->name); + dev->stats.tx_dropped++; + dev_kfree_skb (skb); + } else { + struct i596_tbd *tx_cmd_tbd; + tx_cmd_tbd = (struct i596_tbd *) (tx_cmd + 1); + tx_cmd->pa_tbd = va_to_pa (tx_cmd_tbd); + tx_cmd_tbd->pa_next = I596_NULL; + + tx_cmd->cmd.command = (CMD_FLEX | CmdTx); + + tx_cmd->pad = 0; + tx_cmd->size = 0; + tx_cmd_tbd->pad = 0; + tx_cmd_tbd->size = (EOF | length); + + tx_cmd_tbd->pa_data = va_to_pa (skb->data); + tx_cmd_tbd->skb = skb; + + if (i596_debug & LOG_SRCDST) + print_eth (skb->data); + + i596_add_cmd (dev, (struct i596_cmd *) tx_cmd); + + dev->stats.tx_packets++; + } + + return NETDEV_TX_OK; +} + +static void +i596_tx_timeout (struct net_device *dev) { + struct i596_private *lp = netdev_priv(dev); + int ioaddr = dev->base_addr; + + /* Transmitter timeout, serious problems. */ + printk(KERN_WARNING "%s: transmit timed out, status resetting.\n", dev->name); + dev->stats.tx_errors++; + + /* Try to restart the adaptor */ + if (lp->last_restart == dev->stats.tx_packets) { + printk ("Resetting board.\n"); + + /* Shutdown and restart */ + i596_reset (dev, lp, ioaddr); + } else { + /* Issue a channel attention signal */ + printk ("Kicking board.\n"); + lp->scb.command = (CUC_START | RX_START); + CA(); + lp->last_restart = dev->stats.tx_packets; + } + netif_wake_queue(dev); +} + +static void print_eth(char *add) +{ + int i; + + printk ("Dest "); + for (i = 0; i < 6; i++) + printk(" %2.2X", (unsigned char) add[i]); + printk ("\n"); + + printk ("Source"); + for (i = 0; i < 6; i++) + printk(" %2.2X", (unsigned char) add[i+6]); + printk ("\n"); + + printk ("type %2.2X%2.2X\n", + (unsigned char) add[12], (unsigned char) add[13]); +} + +static const struct net_device_ops i596_netdev_ops = { + .ndo_open = i596_open, + .ndo_stop = i596_close, + .ndo_start_xmit = i596_start_xmit, + .ndo_set_multicast_list = set_multicast_list, + .ndo_tx_timeout = i596_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int __init lp486e_probe(struct net_device *dev) { + struct i596_private *lp; + unsigned char eth_addr[6] = { 0, 0xaa, 0, 0, 0, 0 }; + unsigned char *bios; + int i, j; + int ret = -ENOMEM; + static int probed; + + if (probed) + return -ENODEV; + probed++; + + if (!request_region(IOADDR, LP486E_TOTAL_SIZE, DRV_NAME)) { + printk(KERN_ERR "lp486e: IO address 0x%x in use\n", IOADDR); + return -EBUSY; + } + + lp = netdev_priv(dev); + spin_lock_init(&lp->cmd_lock); + + /* + * Do we really have this thing? + */ + if (i596_scp_setup(dev)) { + ret = -ENODEV; + goto err_out_kfree; + } + + dev->base_addr = IOADDR; + dev->irq = IRQ; + + + /* + * How do we find the ethernet address? I don't know. + * One possibility is to look at the EISA configuration area + * [0xe8000-0xe9fff]. This contains the ethernet address + * but not at a fixed address - things depend on setup options. + * + * If we find no address, or the wrong address, use + * ifconfig eth0 hw ether a1:a2:a3:a4:a5:a6 + * with the value found in the BIOS setup. + */ + bios = bus_to_virt(0xe8000); + for (j = 0; j < 0x2000; j++) { + if (bios[j] == 0 && bios[j+1] == 0xaa && bios[j+2] == 0) { + printk("%s: maybe address at BIOS 0x%x:", + dev->name, 0xe8000+j); + for (i = 0; i < 6; i++) { + eth_addr[i] = bios[i+j]; + printk(" %2.2X", eth_addr[i]); + } + printk("\n"); + } + } + + printk("%s: lp486e 82596 at %#3lx, IRQ %d,", + dev->name, dev->base_addr, dev->irq); + for (i = 0; i < 6; i++) + printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]); + printk("\n"); + + /* The LP486E-specific entries in the device structure. */ + dev->netdev_ops = &i596_netdev_ops; + dev->watchdog_timeo = 5*HZ; + +#if 0 + /* selftest reports 0x320925ae - don't know what that means */ + i596_port_do(dev, PORT_SELFTEST, "selftest"); + i596_port_do(dev, PORT_DUMP, "dump"); +#endif + return 0; + +err_out_kfree: + release_region(IOADDR, LP486E_TOTAL_SIZE); + return ret; +} + +static inline void +i596_handle_CU_completion(struct net_device *dev, + struct i596_private *lp, + unsigned short status, + unsigned short *ack_cmdp) { + struct i596_cmd *cmd; + int frames_out = 0; + int commands_done = 0; + int cmd_val; + unsigned long flags; + + spin_lock_irqsave(&lp->cmd_lock, flags); + cmd = lp->cmd_head; + + while (lp->cmd_head && (lp->cmd_head->status & CMD_STAT_C)) { + cmd = lp->cmd_head; + + lp->cmd_head = pa_to_va(lp->cmd_head->pa_next); + lp->cmd_backlog--; + + commands_done++; + cmd_val = cmd->command & 0x7; +#if 0 + printk("finished CU %s command (%d)\n", + CUcmdnames[cmd_val], cmd_val); +#endif + switch (cmd_val) { + case CmdTx: + { + struct tx_cmd *tx_cmd; + struct i596_tbd *tx_cmd_tbd; + + tx_cmd = (struct tx_cmd *) cmd; + tx_cmd_tbd = pa_to_va(tx_cmd->pa_tbd); + + frames_out++; + if (cmd->status & CMD_STAT_OK) { + if (i596_debug) + print_eth(pa_to_va(tx_cmd_tbd->pa_data)); + } else { + dev->stats.tx_errors++; + if (i596_debug) + printk("transmission failure:%04x\n", + cmd->status); + if (cmd->status & 0x0020) + dev->stats.collisions++; + if (!(cmd->status & 0x0040)) + dev->stats.tx_heartbeat_errors++; + if (cmd->status & 0x0400) + dev->stats.tx_carrier_errors++; + if (cmd->status & 0x0800) + dev->stats.collisions++; + if (cmd->status & 0x1000) + dev->stats.tx_aborted_errors++; + } + dev_kfree_skb_irq(tx_cmd_tbd->skb); + + cmd->pa_next = I596_NULL; + kfree((unsigned char *)tx_cmd); + netif_wake_queue(dev); + break; + } + + case CmdMulticastList: + cmd->pa_next = I596_NULL; + kfree((unsigned char *)cmd); + break; + + case CmdTDR: + { + unsigned long status = *((unsigned long *) (cmd + 1)); + if (status & 0x8000) { + if (i596_debug) + printk("%s: link ok.\n", dev->name); + } else { + if (status & 0x4000) + printk("%s: Transceiver problem.\n", + dev->name); + if (status & 0x2000) + printk("%s: Termination problem.\n", + dev->name); + if (status & 0x1000) + printk("%s: Short circuit.\n", + dev->name); + printk("%s: Time %ld.\n", + dev->name, status & 0x07ff); + } + } + default: + cmd->pa_next = I596_NULL; + lp->last_cmd = jiffies; + + } + barrier(); + } + + cmd = lp->cmd_head; + while (cmd && (cmd != lp->cmd_tail)) { + cmd->command &= 0x1fff; + cmd = pa_to_va(cmd->pa_next); + barrier(); + } + + if (lp->cmd_head) + *ack_cmdp |= CUC_START; + lp->scb.pa_cmd = va_to_pa(lp->cmd_head); + spin_unlock_irqrestore(&lp->cmd_lock, flags); +} + +static irqreturn_t +i596_interrupt(int irq, void *dev_instance) +{ + struct net_device *dev = dev_instance; + struct i596_private *lp = netdev_priv(dev); + unsigned short status, ack_cmd = 0; + int frames_in = 0; + + /* + * The 82596 examines the command, performs the required action, + * and then clears the SCB command word. + */ + if (lp->scb.command && i596_timeout(dev, "interrupt", 40)) + ; + + /* + * The status word indicates the status of the 82596. + * It is modified only by the 82596. + * + * [So, we must not clear it. I find often status 0xffff, + * which is not one of the values allowed by the docs.] + */ + status = lp->scb.status; +#if 0 + if (i596_debug) { + printk("%s: i596 interrupt, ", dev->name); + i596_out_status(status); + } +#endif + /* Impossible, but it happens - perhaps when we get + a receive interrupt but scb.pa_rfd is I596_NULL. */ + if (status == 0xffff) { + printk("%s: i596_interrupt: got status 0xffff\n", dev->name); + goto out; + } + + ack_cmd = (status & STAT_ACK); + + if (status & (STAT_CX | STAT_CNA)) + i596_handle_CU_completion(dev, lp, status, &ack_cmd); + + if (status & (STAT_FR | STAT_RNR)) { + /* Restart the receive unit when it got inactive somehow */ + if ((status & STAT_RNR) && netif_running(dev)) + ack_cmd |= RX_START; + + if (status & STAT_FR) { + frames_in = i596_rx(dev); + if (!frames_in) + printk("receive frame reported, but no frames\n"); + } + } + + /* acknowledge the interrupt */ + /* + if ((lp->scb.pa_cmd != I596_NULL) && netif_running(dev)) + ack_cmd |= CUC_START; + */ + + if (lp->scb.command && i596_timeout(dev, "i596 interrupt", 100)) + ; + + lp->scb.command = ack_cmd; + + CLEAR_INT(); + CA(); + + out: + return IRQ_HANDLED; +} + +static int i596_close(struct net_device *dev) { + struct i596_private *lp = netdev_priv(dev); + + netif_stop_queue(dev); + + if (i596_debug) + printk("%s: Shutting down ethercard, status was %4.4x.\n", + dev->name, lp->scb.status); + + lp->scb.command = (CUC_ABORT | RX_ABORT); + CA(); + + i596_cleanup_cmd(dev); + + if (lp->scb.command && i596_timeout(dev, "i596_close", 200)) + ; + + free_irq(dev->irq, dev); + remove_rx_bufs(dev); + + return 0; +} + +/* +* Set or clear the multicast filter for this adaptor. +*/ + +static void set_multicast_list(struct net_device *dev) { + struct i596_private *lp = netdev_priv(dev); + struct i596_cmd *cmd; + + if (i596_debug > 1) + printk ("%s: set multicast list %d\n", + dev->name, netdev_mc_count(dev)); + + if (!netdev_mc_empty(dev)) { + struct netdev_hw_addr *ha; + char *cp; + cmd = kmalloc(sizeof(struct i596_cmd) + 2 + + netdev_mc_count(dev) * 6, GFP_ATOMIC); + if (cmd == NULL) { + printk (KERN_ERR "%s: set_multicast Memory squeeze.\n", dev->name); + return; + } + cmd->command = CmdMulticastList; + *((unsigned short *) (cmd + 1)) = netdev_mc_count(dev) * 6; + cp = ((char *)(cmd + 1))+2; + netdev_for_each_mc_addr(ha, dev) { + memcpy(cp, ha->addr, 6); + cp += 6; + } + if (i596_debug & LOG_SRCDST) + print_eth (((char *)(cmd + 1)) + 2); + i596_add_cmd(dev, cmd); + } else { + if (lp->set_conf.pa_next != I596_NULL) { + return; + } + if (netdev_mc_empty(dev) && + !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) { + lp->i596_config[8] &= ~0x01; + } else { + lp->i596_config[8] |= 0x01; + } + + i596_add_cmd(dev, (struct i596_cmd *) &lp->set_conf); + } +} + +MODULE_AUTHOR("Ard van Breemen "); +MODULE_DESCRIPTION("Intel Panther onboard i82596 driver"); +MODULE_LICENSE("GPL"); + +static struct net_device *dev_lp486e; +static int full_duplex; +static int options; +static int io = IOADDR; +static int irq = IRQ; + +module_param(debug, int, 0); +//module_param(max_interrupt_work, int, 0); +//module_param(reverse_probe, int, 0); +//module_param(rx_copybreak, int, 0); +module_param(options, int, 0); +module_param(full_duplex, int, 0); + +static int __init lp486e_init_module(void) { + int err; + struct net_device *dev = alloc_etherdev(sizeof(struct i596_private)); + if (!dev) + return -ENOMEM; + + dev->irq = irq; + dev->base_addr = io; + err = lp486e_probe(dev); + if (err) { + free_netdev(dev); + return err; + } + err = register_netdev(dev); + if (err) { + release_region(dev->base_addr, LP486E_TOTAL_SIZE); + free_netdev(dev); + return err; + } + dev_lp486e = dev; + full_duplex = 0; + options = 0; + return 0; +} + +static void __exit lp486e_cleanup_module(void) { + unregister_netdev(dev_lp486e); + release_region(dev_lp486e->base_addr, LP486E_TOTAL_SIZE); + free_netdev(dev_lp486e); +} + +module_init(lp486e_init_module); +module_exit(lp486e_cleanup_module); diff --git a/drivers/net/ethernet/i825xx/ni52.c b/drivers/net/ethernet/i825xx/ni52.c new file mode 100644 index 0000000..d973fc6 --- /dev/null +++ b/drivers/net/ethernet/i825xx/ni52.c @@ -0,0 +1,1346 @@ +/* + * net-3-driver for the NI5210 card (i82586 Ethernet chip) + * + * This is an extension to the Linux operating system, and is covered by the + * same GNU General Public License that covers that work. + * + * Alphacode 0.82 (96/09/29) for Linux 2.0.0 (or later) + * Copyrights (c) 1994,1995,1996 by M.Hipp (hippm@informatik.uni-tuebingen.de) + * [feel free to mail ....] + * + * when using as module: (no autoprobing!) + * run with e.g: + * insmod ni52.o io=0x360 irq=9 memstart=0xd0000 memend=0xd4000 + * + * CAN YOU PLEASE REPORT ME YOUR PERFORMANCE EXPERIENCES !!. + * + * If you find a bug, please report me: + * The kernel panic output and any kmsg from the ni52 driver + * the ni5210-driver-version and the linux-kernel version + * how many shared memory (memsize) on the netcard, + * bootprom: yes/no, base_addr, mem_start + * maybe the ni5210-card revision and the i82586 version + * + * autoprobe for: base_addr: 0x300,0x280,0x360,0x320,0x340 + * mem_start: 0xd0000,0xd2000,0xc8000,0xca000,0xd4000,0xd6000, + * 0xd8000,0xcc000,0xce000,0xda000,0xdc000 + * + * sources: + * skeleton.c from Donald Becker + * + * I have also done a look in the following sources: (mail me if you need them) + * crynwr-packet-driver by Russ Nelson + * Garret A. Wollman's (fourth) i82586-driver for BSD + * (before getting an i82596 (yes 596 not 586) manual, the existing drivers + * helped me a lot to understand this tricky chip.) + * + * Known Problems: + * The internal sysbus seems to be slow. So we often lose packets because of + * overruns while receiving from a fast remote host. + * This can slow down TCP connections. Maybe the newer ni5210 cards are + * better. My experience is, that if a machine sends with more than about + * 500-600K/s the fifo/sysbus overflows. + * + * IMPORTANT NOTE: + * On fast networks, it's a (very) good idea to have 16K shared memory. With + * 8K, we can store only 4 receive frames, so it can (easily) happen that a + * remote machine 'overruns' our system. + * + * Known i82586/card problems (I'm sure, there are many more!): + * Running the NOP-mode, the i82586 sometimes seems to forget to report + * every xmit-interrupt until we restart the CU. + * Another MAJOR bug is, that the RU sometimes seems to ignore the EL-Bit + * in the RBD-Struct which indicates an end of the RBD queue. + * Instead, the RU fetches another (randomly selected and + * usually used) RBD and begins to fill it. (Maybe, this happens only if + * the last buffer from the previous RFD fits exact into the queue and + * the next RFD can't fetch an initial RBD. Anyone knows more? ) + * + * results from ftp performance tests with Linux 1.2.5 + * send and receive about 350-400 KByte/s (peak up to 460 kbytes/s) + * sending in NOP-mode: peak performance up to 530K/s (but better don't + * run this mode) + */ + +/* + * 29.Sept.96: virt_to_bus changes for new memory scheme + * 19.Feb.96: more Mcast changes, module support (MH) + * + * 18.Nov.95: Mcast changes (AC). + * + * 23.April.95: fixed(?) receiving problems by configuring a RFD more + * than the number of RBD's. Can maybe cause other problems. + * 18.April.95: Added MODULE support (MH) + * 17.April.95: MC related changes in init586() and set_multicast_list(). + * removed use of 'jiffies' in init586() (MH) + * + * 19.Sep.94: Added Multicast support (not tested yet) (MH) + * + * 18.Sep.94: Workaround for 'EL-Bug'. Removed flexible RBD-handling. + * Now, every RFD has exact one RBD. (MH) + * + * 14.Sep.94: added promiscuous mode, a few cleanups (MH) + * + * 19.Aug.94: changed request_irq() parameter (MH) + * + * 20.July.94: removed cleanup bugs, removed a 16K-mem-probe-bug (MH) + * + * 19.July.94: lotsa cleanups .. (MH) + * + * 17.July.94: some patches ... verified to run with 1.1.29 (MH) + * + * 4.July.94: patches for Linux 1.1.24 (MH) + * + * 26.March.94: patches for Linux 1.0 and iomem-auto-probe (MH) + * + * 30.Sep.93: Added nop-chain .. driver now runs with only one Xmit-Buff, + * too (MH) + * + * < 30.Sep.93: first versions + */ + +static int debuglevel; /* debug-printk 0: off 1: a few 2: more */ +static int automatic_resume; /* experimental .. better should be zero */ +static int rfdadd; /* rfdadd=1 may be better for 8K MEM cards */ +static int fifo = 0x8; /* don't change */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "ni52.h" + +#define DRV_NAME "ni52" + +#define DEBUG /* debug on */ +#define SYSBUSVAL 1 /* 8 Bit */ + +#define ni_attn586() { outb(0, dev->base_addr + NI52_ATTENTION); } +#define ni_reset586() { outb(0, dev->base_addr + NI52_RESET); } +#define ni_disint() { outb(0, dev->base_addr + NI52_INTDIS); } +#define ni_enaint() { outb(0, dev->base_addr + NI52_INTENA); } + +#define make32(ptr16) ((void __iomem *)(p->memtop + (short) (ptr16))) +#define make24(ptr32) ((char __iomem *)(ptr32)) - p->base +#define make16(ptr32) ((unsigned short) ((char __iomem *)(ptr32)\ + - p->memtop)) + +/******************* how to calculate the buffers ***************************** + + * IMPORTANT NOTE: if you configure only one NUM_XMIT_BUFFS, the driver works + * --------------- in a different (more stable?) mode. Only in this mode it's + * possible to configure the driver with 'NO_NOPCOMMANDS' + +sizeof(scp)=12; sizeof(scb)=16; sizeof(iscp)=8; +sizeof(scp)+sizeof(iscp)+sizeof(scb) = 36 = INIT +sizeof(rfd) = 24; sizeof(rbd) = 12; +sizeof(tbd) = 8; sizeof(transmit_cmd) = 16; +sizeof(nop_cmd) = 8; + + * if you don't know the driver, better do not change these values: */ + +#define RECV_BUFF_SIZE 1524 /* slightly oversized */ +#define XMIT_BUFF_SIZE 1524 /* slightly oversized */ +#define NUM_XMIT_BUFFS 1 /* config for both, 8K and 16K shmem */ +#define NUM_RECV_BUFFS_8 4 /* config for 8K shared mem */ +#define NUM_RECV_BUFFS_16 9 /* config for 16K shared mem */ +#define NO_NOPCOMMANDS /* only possible with NUM_XMIT_BUFFS=1 */ + +/**************************************************************************/ + + +#define NI52_TOTAL_SIZE 16 +#define NI52_ADDR0 0x02 +#define NI52_ADDR1 0x07 +#define NI52_ADDR2 0x01 + +static int ni52_probe1(struct net_device *dev, int ioaddr); +static irqreturn_t ni52_interrupt(int irq, void *dev_id); +static int ni52_open(struct net_device *dev); +static int ni52_close(struct net_device *dev); +static netdev_tx_t ni52_send_packet(struct sk_buff *, struct net_device *); +static struct net_device_stats *ni52_get_stats(struct net_device *dev); +static void set_multicast_list(struct net_device *dev); +static void ni52_timeout(struct net_device *dev); + +/* helper-functions */ +static int init586(struct net_device *dev); +static int check586(struct net_device *dev, unsigned size); +static void alloc586(struct net_device *dev); +static void startrecv586(struct net_device *dev); +static void __iomem *alloc_rfa(struct net_device *dev, void __iomem *ptr); +static void ni52_rcv_int(struct net_device *dev); +static void ni52_xmt_int(struct net_device *dev); +static void ni52_rnr_int(struct net_device *dev); + +struct priv { + char __iomem *base; + char __iomem *mapped; + char __iomem *memtop; + spinlock_t spinlock; + int reset; + struct rfd_struct __iomem *rfd_last, *rfd_top, *rfd_first; + struct scp_struct __iomem *scp; + struct iscp_struct __iomem *iscp; + struct scb_struct __iomem *scb; + struct tbd_struct __iomem *xmit_buffs[NUM_XMIT_BUFFS]; +#if (NUM_XMIT_BUFFS == 1) + struct transmit_cmd_struct __iomem *xmit_cmds[2]; + struct nop_cmd_struct __iomem *nop_cmds[2]; +#else + struct transmit_cmd_struct __iomem *xmit_cmds[NUM_XMIT_BUFFS]; + struct nop_cmd_struct __iomem *nop_cmds[NUM_XMIT_BUFFS]; +#endif + int nop_point, num_recv_buffs; + char __iomem *xmit_cbuffs[NUM_XMIT_BUFFS]; + int xmit_count, xmit_last; +}; + +/* wait for command with timeout: */ +static void wait_for_scb_cmd(struct net_device *dev) +{ + struct priv *p = netdev_priv(dev); + int i; + for (i = 0; i < 16384; i++) { + if (readb(&p->scb->cmd_cuc) == 0) + break; + udelay(4); + if (i == 16383) { + printk(KERN_ERR "%s: scb_cmd timed out: %04x,%04x .. disabling i82586!!\n", + dev->name, readb(&p->scb->cmd_cuc), readb(&p->scb->cus)); + if (!p->reset) { + p->reset = 1; + ni_reset586(); + } + } + } +} + +static void wait_for_scb_cmd_ruc(struct net_device *dev) +{ + struct priv *p = netdev_priv(dev); + int i; + for (i = 0; i < 16384; i++) { + if (readb(&p->scb->cmd_ruc) == 0) + break; + udelay(4); + if (i == 16383) { + printk(KERN_ERR "%s: scb_cmd (ruc) timed out: %04x,%04x .. disabling i82586!!\n", + dev->name, readb(&p->scb->cmd_ruc), + readb(&p->scb->rus)); + if (!p->reset) { + p->reset = 1; + ni_reset586(); + } + } + } +} + +static void wait_for_stat_compl(void __iomem *p) +{ + struct nop_cmd_struct __iomem *addr = p; + int i; + for (i = 0; i < 32767; i++) { + if (readw(&((addr)->cmd_status)) & STAT_COMPL) + break; + udelay(32); + } +} + +/********************************************** + * close device + */ +static int ni52_close(struct net_device *dev) +{ + free_irq(dev->irq, dev); + ni_reset586(); /* the hard way to stop the receiver */ + netif_stop_queue(dev); + return 0; +} + +/********************************************** + * open device + */ +static int ni52_open(struct net_device *dev) +{ + int ret; + + ni_disint(); + alloc586(dev); + init586(dev); + startrecv586(dev); + ni_enaint(); + + ret = request_irq(dev->irq, ni52_interrupt, 0, dev->name, dev); + if (ret) { + ni_reset586(); + return ret; + } + netif_start_queue(dev); + return 0; /* most done by init */ +} + +static int check_iscp(struct net_device *dev, void __iomem *addr) +{ + struct iscp_struct __iomem *iscp = addr; + struct priv *p = netdev_priv(dev); + memset_io(iscp, 0, sizeof(struct iscp_struct)); + + writel(make24(iscp), &p->scp->iscp); + writeb(1, &iscp->busy); + + ni_reset586(); + ni_attn586(); + mdelay(32); /* wait a while... */ + /* i82586 clears 'busy' after successful init */ + if (readb(&iscp->busy)) + return 0; + return 1; +} + +/********************************************** + * Check to see if there's an 82586 out there. + */ +static int check586(struct net_device *dev, unsigned size) +{ + struct priv *p = netdev_priv(dev); + int i; + + p->mapped = ioremap(dev->mem_start, size); + if (!p->mapped) + return 0; + + p->base = p->mapped + size - 0x01000000; + p->memtop = p->mapped + size; + p->scp = (struct scp_struct __iomem *)(p->base + SCP_DEFAULT_ADDRESS); + p->scb = (struct scb_struct __iomem *) p->mapped; + p->iscp = (struct iscp_struct __iomem *)p->scp - 1; + memset_io(p->scp, 0, sizeof(struct scp_struct)); + for (i = 0; i < sizeof(struct scp_struct); i++) + /* memory was writeable? */ + if (readb((char __iomem *)p->scp + i)) + goto Enodev; + writeb(SYSBUSVAL, &p->scp->sysbus); /* 1 = 8Bit-Bus, 0 = 16 Bit */ + if (readb(&p->scp->sysbus) != SYSBUSVAL) + goto Enodev; + + if (!check_iscp(dev, p->mapped)) + goto Enodev; + if (!check_iscp(dev, p->iscp)) + goto Enodev; + return 1; +Enodev: + iounmap(p->mapped); + return 0; +} + +/****************************************************************** + * set iscp at the right place, called by ni52_probe1 and open586. + */ +static void alloc586(struct net_device *dev) +{ + struct priv *p = netdev_priv(dev); + + ni_reset586(); + mdelay(32); + + memset_io(p->iscp, 0, sizeof(struct iscp_struct)); + memset_io(p->scp , 0, sizeof(struct scp_struct)); + + writel(make24(p->iscp), &p->scp->iscp); + writeb(SYSBUSVAL, &p->scp->sysbus); + writew(make16(p->scb), &p->iscp->scb_offset); + + writeb(1, &p->iscp->busy); + ni_reset586(); + ni_attn586(); + + mdelay(32); + + if (readb(&p->iscp->busy)) + printk(KERN_ERR "%s: Init-Problems (alloc).\n", dev->name); + + p->reset = 0; + + memset_io(p->scb, 0, sizeof(struct scb_struct)); +} + +/* set: io,irq,memstart,memend or set it when calling insmod */ +static int irq = 9; +static int io = 0x300; +static long memstart; /* e.g 0xd0000 */ +static long memend; /* e.g 0xd4000 */ + +/********************************************** + * probe the ni5210-card + */ +struct net_device * __init ni52_probe(int unit) +{ + struct net_device *dev = alloc_etherdev(sizeof(struct priv)); + static const int ports[] = {0x300, 0x280, 0x360, 0x320, 0x340, 0}; + const int *port; + struct priv *p; + int err = 0; + + if (!dev) + return ERR_PTR(-ENOMEM); + + p = netdev_priv(dev); + + if (unit >= 0) { + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + io = dev->base_addr; + irq = dev->irq; + memstart = dev->mem_start; + memend = dev->mem_end; + } + + if (io > 0x1ff) { /* Check a single specified location. */ + err = ni52_probe1(dev, io); + } else if (io > 0) { /* Don't probe at all. */ + err = -ENXIO; + } else { + for (port = ports; *port && ni52_probe1(dev, *port) ; port++) + ; + if (*port) + goto got_it; +#ifdef FULL_IO_PROBE + for (io = 0x200; io < 0x400 && ni52_probe1(dev, io); io += 8) + ; + if (io < 0x400) + goto got_it; +#endif + err = -ENODEV; + } + if (err) + goto out; +got_it: + err = register_netdev(dev); + if (err) + goto out1; + return dev; +out1: + iounmap(p->mapped); + release_region(dev->base_addr, NI52_TOTAL_SIZE); +out: + free_netdev(dev); + return ERR_PTR(err); +} + +static const struct net_device_ops ni52_netdev_ops = { + .ndo_open = ni52_open, + .ndo_stop = ni52_close, + .ndo_get_stats = ni52_get_stats, + .ndo_tx_timeout = ni52_timeout, + .ndo_start_xmit = ni52_send_packet, + .ndo_set_multicast_list = set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int __init ni52_probe1(struct net_device *dev, int ioaddr) +{ + int i, size, retval; + struct priv *priv = netdev_priv(dev); + + dev->base_addr = ioaddr; + dev->irq = irq; + dev->mem_start = memstart; + dev->mem_end = memend; + + spin_lock_init(&priv->spinlock); + + if (!request_region(ioaddr, NI52_TOTAL_SIZE, DRV_NAME)) + return -EBUSY; + + if (!(inb(ioaddr+NI52_MAGIC1) == NI52_MAGICVAL1) || + !(inb(ioaddr+NI52_MAGIC2) == NI52_MAGICVAL2)) { + retval = -ENODEV; + goto out; + } + + for (i = 0; i < ETH_ALEN; i++) + dev->dev_addr[i] = inb(dev->base_addr+i); + + if (dev->dev_addr[0] != NI52_ADDR0 || dev->dev_addr[1] != NI52_ADDR1 || + dev->dev_addr[2] != NI52_ADDR2) { + retval = -ENODEV; + goto out; + } + + printk(KERN_INFO "%s: NI5210 found at %#3lx, ", + dev->name, dev->base_addr); + + /* + * check (or search) IO-Memory, 8K and 16K + */ +#ifdef MODULE + size = dev->mem_end - dev->mem_start; + if (size != 0x2000 && size != 0x4000) { + printk("\n"); + printk(KERN_ERR "%s: Invalid memory size %d. Allowed is 0x2000 or 0x4000 bytes.\n", dev->name, size); + retval = -ENODEV; + goto out; + } + if (!check586(dev, size)) { + printk(KERN_ERR "?memcheck, Can't find memory at 0x%lx with size %d!\n", dev->mem_start, size); + retval = -ENODEV; + goto out; + } +#else + if (dev->mem_start != 0) { + /* no auto-mem-probe */ + size = 0x4000; /* check for 16K mem */ + if (!check586(dev, size)) { + size = 0x2000; /* check for 8K mem */ + if (!check586(dev, size)) { + printk(KERN_ERR "?memprobe, Can't find memory at 0x%lx!\n", dev->mem_start); + retval = -ENODEV; + goto out; + } + } + } else { + static const unsigned long memaddrs[] = { + 0xc8000, 0xca000, 0xcc000, 0xce000, 0xd0000, 0xd2000, + 0xd4000, 0xd6000, 0xd8000, 0xda000, 0xdc000, 0 + }; + for (i = 0;; i++) { + if (!memaddrs[i]) { + printk(KERN_ERR "?memprobe, Can't find io-memory!\n"); + retval = -ENODEV; + goto out; + } + dev->mem_start = memaddrs[i]; + size = 0x2000; /* check for 8K mem */ + if (check586(dev, size)) + /* 8K-check */ + break; + size = 0x4000; /* check for 16K mem */ + if (check586(dev, size)) + /* 16K-check */ + break; + } + } + /* set mem_end showed by 'ifconfig' */ + dev->mem_end = dev->mem_start + size; +#endif + + alloc586(dev); + + /* set number of receive-buffs according to memsize */ + if (size == 0x2000) + priv->num_recv_buffs = NUM_RECV_BUFFS_8; + else + priv->num_recv_buffs = NUM_RECV_BUFFS_16; + + printk(KERN_DEBUG "Memaddr: 0x%lx, Memsize: %d, ", + dev->mem_start, size); + + if (dev->irq < 2) { + unsigned long irq_mask; + + irq_mask = probe_irq_on(); + ni_reset586(); + ni_attn586(); + + mdelay(20); + dev->irq = probe_irq_off(irq_mask); + if (!dev->irq) { + printk("?autoirq, Failed to detect IRQ line!\n"); + retval = -EAGAIN; + iounmap(priv->mapped); + goto out; + } + printk("IRQ %d (autodetected).\n", dev->irq); + } else { + if (dev->irq == 2) + dev->irq = 9; + printk("IRQ %d (assigned and not checked!).\n", dev->irq); + } + + dev->netdev_ops = &ni52_netdev_ops; + dev->watchdog_timeo = HZ/20; + + return 0; +out: + release_region(ioaddr, NI52_TOTAL_SIZE); + return retval; +} + +/********************************************** + * init the chip (ni52-interrupt should be disabled?!) + * needs a correct 'allocated' memory + */ + +static int init586(struct net_device *dev) +{ + void __iomem *ptr; + int i, result = 0; + struct priv *p = netdev_priv(dev); + struct configure_cmd_struct __iomem *cfg_cmd; + struct iasetup_cmd_struct __iomem *ias_cmd; + struct tdr_cmd_struct __iomem *tdr_cmd; + struct mcsetup_cmd_struct __iomem *mc_cmd; + struct netdev_hw_addr *ha; + int num_addrs = netdev_mc_count(dev); + + ptr = p->scb + 1; + + cfg_cmd = ptr; /* configure-command */ + writew(0, &cfg_cmd->cmd_status); + writew(CMD_CONFIGURE | CMD_LAST, &cfg_cmd->cmd_cmd); + writew(0xFFFF, &cfg_cmd->cmd_link); + + /* number of cfg bytes */ + writeb(0x0a, &cfg_cmd->byte_cnt); + /* fifo-limit (8=tx:32/rx:64) */ + writeb(fifo, &cfg_cmd->fifo); + /* hold or discard bad recv frames (bit 7) */ + writeb(0x40, &cfg_cmd->sav_bf); + /* addr_len |!src_insert |pre-len |loopback */ + writeb(0x2e, &cfg_cmd->adr_len); + writeb(0x00, &cfg_cmd->priority); + writeb(0x60, &cfg_cmd->ifs); + writeb(0x00, &cfg_cmd->time_low); + writeb(0xf2, &cfg_cmd->time_high); + writeb(0x00, &cfg_cmd->promisc); + if (dev->flags & IFF_ALLMULTI) { + int len = ((char __iomem *)p->iscp - (char __iomem *)ptr - 8) / 6; + if (num_addrs > len) { + printk(KERN_ERR "%s: switching to promisc. mode\n", + dev->name); + writeb(0x01, &cfg_cmd->promisc); + } + } + if (dev->flags & IFF_PROMISC) + writeb(0x01, &cfg_cmd->promisc); + writeb(0x00, &cfg_cmd->carr_coll); + writew(make16(cfg_cmd), &p->scb->cbl_offset); + writeb(0, &p->scb->cmd_ruc); + + writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */ + ni_attn586(); + + wait_for_stat_compl(cfg_cmd); + + if ((readw(&cfg_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) != + (STAT_COMPL|STAT_OK)) { + printk(KERN_ERR "%s: configure command failed: %x\n", + dev->name, readw(&cfg_cmd->cmd_status)); + return 1; + } + + /* + * individual address setup + */ + + ias_cmd = ptr; + + writew(0, &ias_cmd->cmd_status); + writew(CMD_IASETUP | CMD_LAST, &ias_cmd->cmd_cmd); + writew(0xffff, &ias_cmd->cmd_link); + + memcpy_toio(&ias_cmd->iaddr, (char *)dev->dev_addr, ETH_ALEN); + + writew(make16(ias_cmd), &p->scb->cbl_offset); + + writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */ + ni_attn586(); + + wait_for_stat_compl(ias_cmd); + + if ((readw(&ias_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) != + (STAT_OK|STAT_COMPL)) { + printk(KERN_ERR "%s (ni52): individual address setup command failed: %04x\n", dev->name, readw(&ias_cmd->cmd_status)); + return 1; + } + + /* + * TDR, wire check .. e.g. no resistor e.t.c + */ + + tdr_cmd = ptr; + + writew(0, &tdr_cmd->cmd_status); + writew(CMD_TDR | CMD_LAST, &tdr_cmd->cmd_cmd); + writew(0xffff, &tdr_cmd->cmd_link); + writew(0, &tdr_cmd->status); + + writew(make16(tdr_cmd), &p->scb->cbl_offset); + writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */ + ni_attn586(); + + wait_for_stat_compl(tdr_cmd); + + if (!(readw(&tdr_cmd->cmd_status) & STAT_COMPL)) + printk(KERN_ERR "%s: Problems while running the TDR.\n", + dev->name); + else { + udelay(16); + result = readw(&tdr_cmd->status); + writeb(readb(&p->scb->cus) & STAT_MASK, &p->scb->cmd_cuc); + ni_attn586(); /* ack the interrupts */ + + if (result & TDR_LNK_OK) + ; + else if (result & TDR_XCVR_PRB) + printk(KERN_ERR "%s: TDR: Transceiver problem. Check the cable(s)!\n", + dev->name); + else if (result & TDR_ET_OPN) + printk(KERN_ERR "%s: TDR: No correct termination %d clocks away.\n", + dev->name, result & TDR_TIMEMASK); + else if (result & TDR_ET_SRT) { + /* time == 0 -> strange :-) */ + if (result & TDR_TIMEMASK) + printk(KERN_ERR "%s: TDR: Detected a short circuit %d clocks away.\n", + dev->name, result & TDR_TIMEMASK); + } else + printk(KERN_ERR "%s: TDR: Unknown status %04x\n", + dev->name, result); + } + + /* + * Multicast setup + */ + if (num_addrs && !(dev->flags & IFF_PROMISC)) { + mc_cmd = ptr; + writew(0, &mc_cmd->cmd_status); + writew(CMD_MCSETUP | CMD_LAST, &mc_cmd->cmd_cmd); + writew(0xffff, &mc_cmd->cmd_link); + writew(num_addrs * 6, &mc_cmd->mc_cnt); + + i = 0; + netdev_for_each_mc_addr(ha, dev) + memcpy_toio(mc_cmd->mc_list[i++], ha->addr, 6); + + writew(make16(mc_cmd), &p->scb->cbl_offset); + writeb(CUC_START, &p->scb->cmd_cuc); + ni_attn586(); + + wait_for_stat_compl(mc_cmd); + + if ((readw(&mc_cmd->cmd_status) & (STAT_COMPL|STAT_OK)) + != (STAT_COMPL|STAT_OK)) + printk(KERN_ERR "%s: Can't apply multicast-address-list.\n", dev->name); + } + + /* + * alloc nop/xmit-cmds + */ +#if (NUM_XMIT_BUFFS == 1) + for (i = 0; i < 2; i++) { + p->nop_cmds[i] = ptr; + writew(CMD_NOP, &p->nop_cmds[i]->cmd_cmd); + writew(0, &p->nop_cmds[i]->cmd_status); + writew(make16(p->nop_cmds[i]), &p->nop_cmds[i]->cmd_link); + ptr = ptr + sizeof(struct nop_cmd_struct); + } +#else + for (i = 0; i < NUM_XMIT_BUFFS; i++) { + p->nop_cmds[i] = ptr; + writew(CMD_NOP, &p->nop_cmds[i]->cmd_cmd); + writew(0, &p->nop_cmds[i]->cmd_status); + writew(make16(p->nop_cmds[i]), &p->nop_cmds[i]->cmd_link); + ptr = ptr + sizeof(struct nop_cmd_struct); + } +#endif + + ptr = alloc_rfa(dev, ptr); /* init receive-frame-area */ + + /* + * alloc xmit-buffs / init xmit_cmds + */ + for (i = 0; i < NUM_XMIT_BUFFS; i++) { + /* Transmit cmd/buff 0 */ + p->xmit_cmds[i] = ptr; + ptr = ptr + sizeof(struct transmit_cmd_struct); + p->xmit_cbuffs[i] = ptr; /* char-buffs */ + ptr = ptr + XMIT_BUFF_SIZE; + p->xmit_buffs[i] = ptr; /* TBD */ + ptr = ptr + sizeof(struct tbd_struct); + if ((void __iomem *)ptr > (void __iomem *)p->iscp) { + printk(KERN_ERR "%s: not enough shared-mem for your configuration!\n", + dev->name); + return 1; + } + memset_io(p->xmit_cmds[i], 0, + sizeof(struct transmit_cmd_struct)); + memset_io(p->xmit_buffs[i], 0, + sizeof(struct tbd_struct)); + writew(make16(p->nop_cmds[(i+1)%NUM_XMIT_BUFFS]), + &p->xmit_cmds[i]->cmd_link); + writew(STAT_COMPL, &p->xmit_cmds[i]->cmd_status); + writew(CMD_XMIT|CMD_INT, &p->xmit_cmds[i]->cmd_cmd); + writew(make16(p->xmit_buffs[i]), &p->xmit_cmds[i]->tbd_offset); + writew(0xffff, &p->xmit_buffs[i]->next); + writel(make24(p->xmit_cbuffs[i]), &p->xmit_buffs[i]->buffer); + } + + p->xmit_count = 0; + p->xmit_last = 0; +#ifndef NO_NOPCOMMANDS + p->nop_point = 0; +#endif + + /* + * 'start transmitter' + */ +#ifndef NO_NOPCOMMANDS + writew(make16(p->nop_cmds[0]), &p->scb->cbl_offset); + writeb(CUC_START, &p->scb->cmd_cuc); + ni_attn586(); + wait_for_scb_cmd(dev); +#else + writew(make16(p->xmit_cmds[0]), &p->xmit_cmds[0]->cmd_link); + writew(CMD_XMIT | CMD_SUSPEND | CMD_INT, &p->xmit_cmds[0]->cmd_cmd); +#endif + + /* + * ack. interrupts + */ + writeb(readb(&p->scb->cus) & STAT_MASK, &p->scb->cmd_cuc); + ni_attn586(); + udelay(16); + + ni_enaint(); + + return 0; +} + +/****************************************************** + * This is a helper routine for ni52_rnr_int() and init586(). + * It sets up the Receive Frame Area (RFA). + */ + +static void __iomem *alloc_rfa(struct net_device *dev, void __iomem *ptr) +{ + struct rfd_struct __iomem *rfd = ptr; + struct rbd_struct __iomem *rbd; + int i; + struct priv *p = netdev_priv(dev); + + memset_io(rfd, 0, + sizeof(struct rfd_struct) * (p->num_recv_buffs + rfdadd)); + p->rfd_first = rfd; + + for (i = 0; i < (p->num_recv_buffs + rfdadd); i++) { + writew(make16(rfd + (i+1) % (p->num_recv_buffs+rfdadd)), + &rfd[i].next); + writew(0xffff, &rfd[i].rbd_offset); + } + /* RU suspend */ + writeb(RFD_SUSP, &rfd[p->num_recv_buffs-1+rfdadd].last); + + ptr = rfd + (p->num_recv_buffs + rfdadd); + + rbd = ptr; + ptr = rbd + p->num_recv_buffs; + + /* clr descriptors */ + memset_io(rbd, 0, sizeof(struct rbd_struct) * (p->num_recv_buffs)); + + for (i = 0; i < p->num_recv_buffs; i++) { + writew(make16(rbd + (i+1) % p->num_recv_buffs), &rbd[i].next); + writew(RECV_BUFF_SIZE, &rbd[i].size); + writel(make24(ptr), &rbd[i].buffer); + ptr = ptr + RECV_BUFF_SIZE; + } + p->rfd_top = p->rfd_first; + p->rfd_last = p->rfd_first + (p->num_recv_buffs - 1 + rfdadd); + + writew(make16(p->rfd_first), &p->scb->rfa_offset); + writew(make16(rbd), &p->rfd_first->rbd_offset); + + return ptr; +} + + +/************************************************** + * Interrupt Handler ... + */ + +static irqreturn_t ni52_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + unsigned int stat; + int cnt = 0; + struct priv *p; + + p = netdev_priv(dev); + + if (debuglevel > 1) + printk("I"); + + spin_lock(&p->spinlock); + + wait_for_scb_cmd(dev); /* wait for last command */ + + while ((stat = readb(&p->scb->cus) & STAT_MASK)) { + writeb(stat, &p->scb->cmd_cuc); + ni_attn586(); + + if (stat & STAT_FR) /* received a frame */ + ni52_rcv_int(dev); + + if (stat & STAT_RNR) { /* RU went 'not ready' */ + printk("(R)"); + if (readb(&p->scb->rus) & RU_SUSPEND) { + /* special case: RU_SUSPEND */ + wait_for_scb_cmd(dev); + writeb(RUC_RESUME, &p->scb->cmd_ruc); + ni_attn586(); + wait_for_scb_cmd_ruc(dev); + } else { + printk(KERN_ERR "%s: Receiver-Unit went 'NOT READY': %04x/%02x.\n", + dev->name, stat, readb(&p->scb->rus)); + ni52_rnr_int(dev); + } + } + + /* Command with I-bit set complete */ + if (stat & STAT_CX) + ni52_xmt_int(dev); + +#ifndef NO_NOPCOMMANDS + if (stat & STAT_CNA) { /* CU went 'not ready' */ + if (netif_running(dev)) + printk(KERN_ERR "%s: oops! CU has left active state. stat: %04x/%02x.\n", + dev->name, stat, readb(&p->scb->cus)); + } +#endif + + if (debuglevel > 1) + printk("%d", cnt++); + + /* Wait for ack. (ni52_xmt_int can be faster than ack!!) */ + wait_for_scb_cmd(dev); + if (readb(&p->scb->cmd_cuc)) { /* timed out? */ + printk(KERN_ERR "%s: Acknowledge timed out.\n", + dev->name); + ni_disint(); + break; + } + } + spin_unlock(&p->spinlock); + + if (debuglevel > 1) + printk("i"); + return IRQ_HANDLED; +} + +/******************************************************* + * receive-interrupt + */ + +static void ni52_rcv_int(struct net_device *dev) +{ + int status, cnt = 0; + unsigned short totlen; + struct sk_buff *skb; + struct rbd_struct __iomem *rbd; + struct priv *p = netdev_priv(dev); + + if (debuglevel > 0) + printk("R"); + + for (; (status = readb(&p->rfd_top->stat_high)) & RFD_COMPL;) { + rbd = make32(readw(&p->rfd_top->rbd_offset)); + if (status & RFD_OK) { /* frame received without error? */ + totlen = readw(&rbd->status); + if (totlen & RBD_LAST) { + /* the first and the last buffer? */ + totlen &= RBD_MASK; /* length of this frame */ + writew(0x00, &rbd->status); + skb = (struct sk_buff *)dev_alloc_skb(totlen+2); + if (skb != NULL) { + skb_reserve(skb, 2); + skb_put(skb, totlen); + memcpy_fromio(skb->data, p->base + readl(&rbd->buffer), totlen); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += totlen; + } else + dev->stats.rx_dropped++; + } else { + int rstat; + /* free all RBD's until RBD_LAST is set */ + totlen = 0; + while (!((rstat = readw(&rbd->status)) & RBD_LAST)) { + totlen += rstat & RBD_MASK; + if (!rstat) { + printk(KERN_ERR "%s: Whoops .. no end mark in RBD list\n", dev->name); + break; + } + writew(0, &rbd->status); + rbd = make32(readw(&rbd->next)); + } + totlen += rstat & RBD_MASK; + writew(0, &rbd->status); + printk(KERN_ERR "%s: received oversized frame! length: %d\n", + dev->name, totlen); + dev->stats.rx_dropped++; + } + } else {/* frame !(ok), only with 'save-bad-frames' */ + printk(KERN_ERR "%s: oops! rfd-error-status: %04x\n", + dev->name, status); + dev->stats.rx_errors++; + } + writeb(0, &p->rfd_top->stat_high); + writeb(RFD_SUSP, &p->rfd_top->last); /* maybe exchange by RFD_LAST */ + writew(0xffff, &p->rfd_top->rbd_offset); + writeb(0, &p->rfd_last->last); /* delete RFD_SUSP */ + p->rfd_last = p->rfd_top; + p->rfd_top = make32(readw(&p->rfd_top->next)); /* step to next RFD */ + writew(make16(p->rfd_top), &p->scb->rfa_offset); + + if (debuglevel > 0) + printk("%d", cnt++); + } + + if (automatic_resume) { + wait_for_scb_cmd(dev); + writeb(RUC_RESUME, &p->scb->cmd_ruc); + ni_attn586(); + wait_for_scb_cmd_ruc(dev); + } + +#ifdef WAIT_4_BUSY + { + int i; + for (i = 0; i < 1024; i++) { + if (p->rfd_top->status) + break; + udelay(16); + if (i == 1023) + printk(KERN_ERR "%s: RU hasn't fetched next RFD (not busy/complete)\n", dev->name); + } + } +#endif + if (debuglevel > 0) + printk("r"); +} + +/********************************************************** + * handle 'Receiver went not ready'. + */ + +static void ni52_rnr_int(struct net_device *dev) +{ + struct priv *p = netdev_priv(dev); + + dev->stats.rx_errors++; + + wait_for_scb_cmd(dev); /* wait for the last cmd, WAIT_4_FULLSTAT?? */ + writeb(RUC_ABORT, &p->scb->cmd_ruc); /* usually the RU is in the 'no resource'-state .. abort it now. */ + ni_attn586(); + wait_for_scb_cmd_ruc(dev); /* wait for accept cmd. */ + + alloc_rfa(dev, p->rfd_first); + /* maybe add a check here, before restarting the RU */ + startrecv586(dev); /* restart RU */ + + printk(KERN_ERR "%s: Receive-Unit restarted. Status: %04x\n", + dev->name, readb(&p->scb->rus)); + +} + +/********************************************************** + * handle xmit - interrupt + */ + +static void ni52_xmt_int(struct net_device *dev) +{ + int status; + struct priv *p = netdev_priv(dev); + + if (debuglevel > 0) + printk("X"); + + status = readw(&p->xmit_cmds[p->xmit_last]->cmd_status); + if (!(status & STAT_COMPL)) + printk(KERN_ERR "%s: strange .. xmit-int without a 'COMPLETE'\n", dev->name); + + if (status & STAT_OK) { + dev->stats.tx_packets++; + dev->stats.collisions += (status & TCMD_MAXCOLLMASK); + } else { + dev->stats.tx_errors++; + if (status & TCMD_LATECOLL) { + printk(KERN_ERR "%s: late collision detected.\n", + dev->name); + dev->stats.collisions++; + } else if (status & TCMD_NOCARRIER) { + dev->stats.tx_carrier_errors++; + printk(KERN_ERR "%s: no carrier detected.\n", + dev->name); + } else if (status & TCMD_LOSTCTS) + printk(KERN_ERR "%s: loss of CTS detected.\n", + dev->name); + else if (status & TCMD_UNDERRUN) { + dev->stats.tx_fifo_errors++; + printk(KERN_ERR "%s: DMA underrun detected.\n", + dev->name); + } else if (status & TCMD_MAXCOLL) { + printk(KERN_ERR "%s: Max. collisions exceeded.\n", + dev->name); + dev->stats.collisions += 16; + } + } +#if (NUM_XMIT_BUFFS > 1) + if ((++p->xmit_last) == NUM_XMIT_BUFFS) + p->xmit_last = 0; +#endif + netif_wake_queue(dev); +} + +/*********************************************************** + * (re)start the receiver + */ + +static void startrecv586(struct net_device *dev) +{ + struct priv *p = netdev_priv(dev); + + wait_for_scb_cmd(dev); + wait_for_scb_cmd_ruc(dev); + writew(make16(p->rfd_first), &p->scb->rfa_offset); + writeb(RUC_START, &p->scb->cmd_ruc); + ni_attn586(); /* start cmd. */ + wait_for_scb_cmd_ruc(dev); + /* wait for accept cmd. (no timeout!!) */ +} + +static void ni52_timeout(struct net_device *dev) +{ + struct priv *p = netdev_priv(dev); +#ifndef NO_NOPCOMMANDS + if (readb(&p->scb->cus) & CU_ACTIVE) { /* COMMAND-UNIT active? */ + netif_wake_queue(dev); +#ifdef DEBUG + printk(KERN_ERR "%s: strange ... timeout with CU active?!?\n", + dev->name); + printk(KERN_ERR "%s: X0: %04x N0: %04x N1: %04x %d\n", + dev->name, (int)p->xmit_cmds[0]->cmd_status, + readw(&p->nop_cmds[0]->cmd_status), + readw(&p->nop_cmds[1]->cmd_status), + p->nop_point); +#endif + writeb(CUC_ABORT, &p->scb->cmd_cuc); + ni_attn586(); + wait_for_scb_cmd(dev); + writew(make16(p->nop_cmds[p->nop_point]), &p->scb->cbl_offset); + writeb(CUC_START, &p->scb->cmd_cuc); + ni_attn586(); + wait_for_scb_cmd(dev); + dev->trans_start = jiffies; /* prevent tx timeout */ + return 0; + } +#endif + { +#ifdef DEBUG + printk(KERN_ERR "%s: xmitter timed out, try to restart! stat: %02x\n", + dev->name, readb(&p->scb->cus)); + printk(KERN_ERR "%s: command-stats: %04x %04x\n", + dev->name, + readw(&p->xmit_cmds[0]->cmd_status), + readw(&p->xmit_cmds[1]->cmd_status)); + printk(KERN_ERR "%s: check, whether you set the right interrupt number!\n", + dev->name); +#endif + ni52_close(dev); + ni52_open(dev); + } + dev->trans_start = jiffies; /* prevent tx timeout */ +} + +/****************************************************** + * send frame + */ + +static netdev_tx_t ni52_send_packet(struct sk_buff *skb, + struct net_device *dev) +{ + int len, i; +#ifndef NO_NOPCOMMANDS + int next_nop; +#endif + struct priv *p = netdev_priv(dev); + + if (skb->len > XMIT_BUFF_SIZE) { + printk(KERN_ERR "%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n", dev->name, XMIT_BUFF_SIZE, skb->len); + return NETDEV_TX_OK; + } + + netif_stop_queue(dev); + + memcpy_toio(p->xmit_cbuffs[p->xmit_count], skb->data, skb->len); + len = skb->len; + if (len < ETH_ZLEN) { + len = ETH_ZLEN; + memset_io(p->xmit_cbuffs[p->xmit_count]+skb->len, 0, + len - skb->len); + } + +#if (NUM_XMIT_BUFFS == 1) +# ifdef NO_NOPCOMMANDS + +#ifdef DEBUG + if (readb(&p->scb->cus) & CU_ACTIVE) { + printk(KERN_ERR "%s: Hmmm .. CU is still running and we wanna send a new packet.\n", dev->name); + printk(KERN_ERR "%s: stat: %04x %04x\n", + dev->name, readb(&p->scb->cus), + readw(&p->xmit_cmds[0]->cmd_status)); + } +#endif + writew(TBD_LAST | len, &p->xmit_buffs[0]->size); + for (i = 0; i < 16; i++) { + writew(0, &p->xmit_cmds[0]->cmd_status); + wait_for_scb_cmd(dev); + if ((readb(&p->scb->cus) & CU_STATUS) == CU_SUSPEND) + writeb(CUC_RESUME, &p->scb->cmd_cuc); + else { + writew(make16(p->xmit_cmds[0]), &p->scb->cbl_offset); + writeb(CUC_START, &p->scb->cmd_cuc); + } + ni_attn586(); + if (!i) + dev_kfree_skb(skb); + wait_for_scb_cmd(dev); + /* test it, because CU sometimes doesn't start immediately */ + if (readb(&p->scb->cus) & CU_ACTIVE) + break; + if (readw(&p->xmit_cmds[0]->cmd_status)) + break; + if (i == 15) + printk(KERN_WARNING "%s: Can't start transmit-command.\n", dev->name); + } +# else + next_nop = (p->nop_point + 1) & 0x1; + writew(TBD_LAST | len, &p->xmit_buffs[0]->size); + writew(make16(p->nop_cmds[next_nop]), &p->xmit_cmds[0]->cmd_link); + writew(make16(p->nop_cmds[next_nop]), + &p->nop_cmds[next_nop]->cmd_link); + writew(0, &p->xmit_cmds[0]->cmd_status); + writew(0, &p->nop_cmds[next_nop]->cmd_status); + + writew(make16(p->xmit_cmds[0]), &p->nop_cmds[p->nop_point]->cmd_link); + p->nop_point = next_nop; + dev_kfree_skb(skb); +# endif +#else + writew(TBD_LAST | len, &p->xmit_buffs[p->xmit_count]->size); + next_nop = p->xmit_count + 1 + if (next_nop == NUM_XMIT_BUFFS) + next_nop = 0; + writew(0, &p->xmit_cmds[p->xmit_count]->cmd_status); + /* linkpointer of xmit-command already points to next nop cmd */ + writew(make16(p->nop_cmds[next_nop]), + &p->nop_cmds[next_nop]->cmd_link); + writew(0, &p->nop_cmds[next_nop]->cmd_status); + writew(make16(p->xmit_cmds[p->xmit_count]), + &p->nop_cmds[p->xmit_count]->cmd_link); + p->xmit_count = next_nop; + { + unsigned long flags; + spin_lock_irqsave(&p->spinlock); + if (p->xmit_count != p->xmit_last) + netif_wake_queue(dev); + spin_unlock_irqrestore(&p->spinlock); + } + dev_kfree_skb(skb); +#endif + return NETDEV_TX_OK; +} + +/******************************************* + * Someone wanna have the statistics + */ + +static struct net_device_stats *ni52_get_stats(struct net_device *dev) +{ + struct priv *p = netdev_priv(dev); + unsigned short crc, aln, rsc, ovrn; + + /* Get error-statistics from the ni82586 */ + crc = readw(&p->scb->crc_errs); + writew(0, &p->scb->crc_errs); + aln = readw(&p->scb->aln_errs); + writew(0, &p->scb->aln_errs); + rsc = readw(&p->scb->rsc_errs); + writew(0, &p->scb->rsc_errs); + ovrn = readw(&p->scb->ovrn_errs); + writew(0, &p->scb->ovrn_errs); + + dev->stats.rx_crc_errors += crc; + dev->stats.rx_fifo_errors += ovrn; + dev->stats.rx_frame_errors += aln; + dev->stats.rx_dropped += rsc; + + return &dev->stats; +} + +/******************************************************** + * Set MC list .. + */ + +static void set_multicast_list(struct net_device *dev) +{ + netif_stop_queue(dev); + ni_disint(); + alloc586(dev); + init586(dev); + startrecv586(dev); + ni_enaint(); + netif_wake_queue(dev); +} + +#ifdef MODULE +static struct net_device *dev_ni52; + +module_param(io, int, 0); +module_param(irq, int, 0); +module_param(memstart, long, 0); +module_param(memend, long, 0); +MODULE_PARM_DESC(io, "NI5210 I/O base address,required"); +MODULE_PARM_DESC(irq, "NI5210 IRQ number,required"); +MODULE_PARM_DESC(memstart, "NI5210 memory base address,required"); +MODULE_PARM_DESC(memend, "NI5210 memory end address,required"); + +int __init init_module(void) +{ + if (io <= 0x0 || !memend || !memstart || irq < 2) { + printk(KERN_ERR "ni52: Autoprobing not allowed for modules.\n"); + printk(KERN_ERR "ni52: Set symbols 'io' 'irq' 'memstart' and 'memend'\n"); + return -ENODEV; + } + dev_ni52 = ni52_probe(-1); + if (IS_ERR(dev_ni52)) + return PTR_ERR(dev_ni52); + return 0; +} + +void __exit cleanup_module(void) +{ + struct priv *p = netdev_priv(dev_ni52); + unregister_netdev(dev_ni52); + iounmap(p->mapped); + release_region(dev_ni52->base_addr, NI52_TOTAL_SIZE); + free_netdev(dev_ni52); +} +#endif /* MODULE */ + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/i825xx/ni52.h b/drivers/net/ethernet/i825xx/ni52.h new file mode 100644 index 0000000..0a03b28 --- /dev/null +++ b/drivers/net/ethernet/i825xx/ni52.h @@ -0,0 +1,310 @@ +/* + * Intel i82586 Ethernet definitions + * + * This is an extension to the Linux operating system, and is covered by the + * same GNU General Public License that covers that work. + * + * copyrights (c) 1994 by Michael Hipp (hippm@informatik.uni-tuebingen.de) + * + * I have done a look in the following sources: + * crynwr-packet-driver by Russ Nelson + * Garret A. Wollman's i82586-driver for BSD + */ + + +#define NI52_RESET 0 /* writing to this address, resets the i82586 */ +#define NI52_ATTENTION 1 /* channel attention, kick the 586 */ +#define NI52_TENA 3 /* 2-5 possibly wrong, Xmit enable */ +#define NI52_TDIS 2 /* Xmit disable */ +#define NI52_INTENA 5 /* Interrupt enable */ +#define NI52_INTDIS 4 /* Interrupt disable */ +#define NI52_MAGIC1 6 /* dunno exact function */ +#define NI52_MAGIC2 7 /* dunno exact function */ + +#define NI52_MAGICVAL1 0x00 /* magic-values for ni5210 card */ +#define NI52_MAGICVAL2 0x55 + +/* + * where to find the System Configuration Pointer (SCP) + */ +#define SCP_DEFAULT_ADDRESS 0xfffff4 + + +/* + * System Configuration Pointer Struct + */ + +struct scp_struct +{ + u16 zero_dum0; /* has to be zero */ + u8 sysbus; /* 0=16Bit,1=8Bit */ + u8 zero_dum1; /* has to be zero for 586 */ + u16 zero_dum2; + u16 zero_dum3; + u32 iscp; /* pointer to the iscp-block */ +}; + + +/* + * Intermediate System Configuration Pointer (ISCP) + */ +struct iscp_struct +{ + u8 busy; /* 586 clears after successful init */ + u8 zero_dummy; /* has to be zero */ + u16 scb_offset; /* pointeroffset to the scb_base */ + u32 scb_base; /* base-address of all 16-bit offsets */ +}; + +/* + * System Control Block (SCB) + */ +struct scb_struct +{ + u8 rus; + u8 cus; + u8 cmd_ruc; /* command word: RU part */ + u8 cmd_cuc; /* command word: CU part & ACK */ + u16 cbl_offset; /* pointeroffset, command block list */ + u16 rfa_offset; /* pointeroffset, receive frame area */ + u16 crc_errs; /* CRC-Error counter */ + u16 aln_errs; /* alignmenterror counter */ + u16 rsc_errs; /* Resourceerror counter */ + u16 ovrn_errs; /* OVerrunerror counter */ +}; + +/* + * possible command values for the command word + */ +#define RUC_MASK 0x0070 /* mask for RU commands */ +#define RUC_NOP 0x0000 /* NOP-command */ +#define RUC_START 0x0010 /* start RU */ +#define RUC_RESUME 0x0020 /* resume RU after suspend */ +#define RUC_SUSPEND 0x0030 /* suspend RU */ +#define RUC_ABORT 0x0040 /* abort receiver operation immediately */ + +#define CUC_MASK 0x07 /* mask for CU command */ +#define CUC_NOP 0x00 /* NOP-command */ +#define CUC_START 0x01 /* start execution of 1. cmd on the CBL */ +#define CUC_RESUME 0x02 /* resume after suspend */ +#define CUC_SUSPEND 0x03 /* Suspend CU */ +#define CUC_ABORT 0x04 /* abort command operation immediately */ + +#define ACK_MASK 0xf0 /* mask for ACK command */ +#define ACK_CX 0x80 /* acknowledges STAT_CX */ +#define ACK_FR 0x40 /* ack. STAT_FR */ +#define ACK_CNA 0x20 /* ack. STAT_CNA */ +#define ACK_RNR 0x10 /* ack. STAT_RNR */ + +/* + * possible status values for the status word + */ +#define STAT_MASK 0xf0 /* mask for cause of interrupt */ +#define STAT_CX 0x80 /* CU finished cmd with its I bit set */ +#define STAT_FR 0x40 /* RU finished receiving a frame */ +#define STAT_CNA 0x20 /* CU left active state */ +#define STAT_RNR 0x10 /* RU left ready state */ + +#define CU_STATUS 0x7 /* CU status, 0=idle */ +#define CU_SUSPEND 0x1 /* CU is suspended */ +#define CU_ACTIVE 0x2 /* CU is active */ + +#define RU_STATUS 0x70 /* RU status, 0=idle */ +#define RU_SUSPEND 0x10 /* RU suspended */ +#define RU_NOSPACE 0x20 /* RU no resources */ +#define RU_READY 0x40 /* RU is ready */ + +/* + * Receive Frame Descriptor (RFD) + */ +struct rfd_struct +{ + u8 stat_low; /* status word */ + u8 stat_high; /* status word */ + u8 rfd_sf; /* 82596 mode only */ + u8 last; /* Bit15,Last Frame on List / Bit14,suspend */ + u16 next; /* linkoffset to next RFD */ + u16 rbd_offset; /* pointeroffset to RBD-buffer */ + u8 dest[6]; /* ethernet-address, destination */ + u8 source[6]; /* ethernet-address, source */ + u16 length; /* 802.3 frame-length */ + u16 zero_dummy; /* dummy */ +}; + +#define RFD_LAST 0x80 /* last: last rfd in the list */ +#define RFD_SUSP 0x40 /* last: suspend RU after */ +#define RFD_COMPL 0x80 +#define RFD_OK 0x20 +#define RFD_BUSY 0x40 +#define RFD_ERR_LEN 0x10 /* Length error (if enabled length-checking */ +#define RFD_ERR_CRC 0x08 /* CRC error */ +#define RFD_ERR_ALGN 0x04 /* Alignment error */ +#define RFD_ERR_RNR 0x02 /* status: receiver out of resources */ +#define RFD_ERR_OVR 0x01 /* DMA Overrun! */ + +#define RFD_ERR_FTS 0x0080 /* Frame to short */ +#define RFD_ERR_NEOP 0x0040 /* No EOP flag (for bitstuffing only) */ +#define RFD_ERR_TRUN 0x0020 /* (82596 only/SF mode) indicates truncated frame */ +#define RFD_MATCHADD 0x0002 /* status: Destinationaddress !matches IA (only 82596) */ +#define RFD_COLLDET 0x0001 /* Detected collision during reception */ + +/* + * Receive Buffer Descriptor (RBD) + */ +struct rbd_struct +{ + u16 status; /* status word,number of used bytes in buff */ + u16 next; /* pointeroffset to next RBD */ + u32 buffer; /* receive buffer address pointer */ + u16 size; /* size of this buffer */ + u16 zero_dummy; /* dummy */ +}; + +#define RBD_LAST 0x8000 /* last buffer */ +#define RBD_USED 0x4000 /* this buffer has data */ +#define RBD_MASK 0x3fff /* size-mask for length */ + +/* + * Statusvalues for Commands/RFD + */ +#define STAT_COMPL 0x8000 /* status: frame/command is complete */ +#define STAT_BUSY 0x4000 /* status: frame/command is busy */ +#define STAT_OK 0x2000 /* status: frame/command is ok */ + +/* + * Action-Commands + */ +#define CMD_NOP 0x0000 /* NOP */ +#define CMD_IASETUP 0x0001 /* initial address setup command */ +#define CMD_CONFIGURE 0x0002 /* configure command */ +#define CMD_MCSETUP 0x0003 /* MC setup command */ +#define CMD_XMIT 0x0004 /* transmit command */ +#define CMD_TDR 0x0005 /* time domain reflectometer (TDR) command */ +#define CMD_DUMP 0x0006 /* dump command */ +#define CMD_DIAGNOSE 0x0007 /* diagnose command */ + +/* + * Action command bits + */ +#define CMD_LAST 0x8000 /* indicates last command in the CBL */ +#define CMD_SUSPEND 0x4000 /* suspend CU after this CB */ +#define CMD_INT 0x2000 /* generate interrupt after execution */ + +/* + * NOP - command + */ +struct nop_cmd_struct +{ + u16 cmd_status; /* status of this command */ + u16 cmd_cmd; /* the command itself (+bits) */ + u16 cmd_link; /* offsetpointer to next command */ +}; + +/* + * IA Setup command + */ +struct iasetup_cmd_struct +{ + u16 cmd_status; + u16 cmd_cmd; + u16 cmd_link; + u8 iaddr[6]; +}; + +/* + * Configure command + */ +struct configure_cmd_struct +{ + u16 cmd_status; + u16 cmd_cmd; + u16 cmd_link; + u8 byte_cnt; /* size of the config-cmd */ + u8 fifo; /* fifo/recv monitor */ + u8 sav_bf; /* save bad frames (bit7=1)*/ + u8 adr_len; /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/ + u8 priority; /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */ + u8 ifs; /* inter frame spacing */ + u8 time_low; /* slot time low */ + u8 time_high; /* slot time high(0-2) and max. retries(4-7) */ + u8 promisc; /* promisc-mode(0) , et al (1-7) */ + u8 carr_coll; /* carrier(0-3)/collision(4-7) stuff */ + u8 fram_len; /* minimal frame len */ + u8 dummy; /* dummy */ +}; + +/* + * Multicast Setup command + */ +struct mcsetup_cmd_struct +{ + u16 cmd_status; + u16 cmd_cmd; + u16 cmd_link; + u16 mc_cnt; /* number of bytes in the MC-List */ + u8 mc_list[0][6]; /* pointer to 6 bytes entries */ +}; + +/* + * DUMP command + */ +struct dump_cmd_struct +{ + u16 cmd_status; + u16 cmd_cmd; + u16 cmd_link; + u16 dump_offset; /* pointeroffset to DUMP space */ +}; + +/* + * transmit command + */ +struct transmit_cmd_struct +{ + u16 cmd_status; + u16 cmd_cmd; + u16 cmd_link; + u16 tbd_offset; /* pointeroffset to TBD */ + u8 dest[6]; /* destination address of the frame */ + u16 length; /* user defined: 802.3 length / Ether type */ +}; + +#define TCMD_ERRMASK 0x0fa0 +#define TCMD_MAXCOLLMASK 0x000f +#define TCMD_MAXCOLL 0x0020 +#define TCMD_HEARTBEAT 0x0040 +#define TCMD_DEFERRED 0x0080 +#define TCMD_UNDERRUN 0x0100 +#define TCMD_LOSTCTS 0x0200 +#define TCMD_NOCARRIER 0x0400 +#define TCMD_LATECOLL 0x0800 + +struct tdr_cmd_struct +{ + u16 cmd_status; + u16 cmd_cmd; + u16 cmd_link; + u16 status; +}; + +#define TDR_LNK_OK 0x8000 /* No link problem identified */ +#define TDR_XCVR_PRB 0x4000 /* indicates a transceiver problem */ +#define TDR_ET_OPN 0x2000 /* open, no correct termination */ +#define TDR_ET_SRT 0x1000 /* TDR detected a short circuit */ +#define TDR_TIMEMASK 0x07ff /* mask for the time field */ + +/* + * Transmit Buffer Descriptor (TBD) + */ +struct tbd_struct +{ + u16 size; /* size + EOF-Flag(15) */ + u16 next; /* pointeroffset to next TBD */ + u32 buffer; /* pointer to buffer */ +}; + +#define TBD_LAST 0x8000 /* EOF-Flag, indicates last buffer in list */ + + + + diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c new file mode 100644 index 0000000..6b2a888 --- /dev/null +++ b/drivers/net/ethernet/i825xx/sni_82596.c @@ -0,0 +1,186 @@ +/* + * sni_82596.c -- driver for intel 82596 ethernet controller, as + * used in older SNI RM machines + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SNI_82596_DRIVER_VERSION "SNI RM 82596 driver - Revision: 0.01" + +static const char sni_82596_string[] = "snirm_82596"; + +#define DMA_ALLOC dma_alloc_coherent +#define DMA_FREE dma_free_coherent +#define DMA_WBACK(priv, addr, len) do { } while (0) +#define DMA_INV(priv, addr, len) do { } while (0) +#define DMA_WBACK_INV(priv, addr, len) do { } while (0) + +#define SYSBUS 0x00004400 + +/* big endian CPU, 82596 little endian */ +#define SWAP32(x) cpu_to_le32((u32)(x)) +#define SWAP16(x) cpu_to_le16((u16)(x)) + +#define OPT_MPU_16BIT 0x01 + +#include "lib82596.c" + +MODULE_AUTHOR("Thomas Bogendoerfer"); +MODULE_DESCRIPTION("i82596 driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:snirm_82596"); +module_param(i596_debug, int, 0); +MODULE_PARM_DESC(i596_debug, "82596 debug mask"); + +static inline void ca(struct net_device *dev) +{ + struct i596_private *lp = netdev_priv(dev); + + writel(0, lp->ca); +} + + +static void mpu_port(struct net_device *dev, int c, dma_addr_t x) +{ + struct i596_private *lp = netdev_priv(dev); + + u32 v = (u32) (c) | (u32) (x); + + if (lp->options & OPT_MPU_16BIT) { + writew(v & 0xffff, lp->mpu_port); + wmb(); /* order writes to MPU port */ + udelay(1); + writew(v >> 16, lp->mpu_port); + } else { + writel(v, lp->mpu_port); + wmb(); /* order writes to MPU port */ + udelay(1); + writel(v, lp->mpu_port); + } +} + + +static int __devinit sni_82596_probe(struct platform_device *dev) +{ + struct net_device *netdevice; + struct i596_private *lp; + struct resource *res, *ca, *idprom, *options; + int retval = -ENOMEM; + void __iomem *mpu_addr; + void __iomem *ca_addr; + u8 __iomem *eth_addr; + + res = platform_get_resource(dev, IORESOURCE_MEM, 0); + ca = platform_get_resource(dev, IORESOURCE_MEM, 1); + options = platform_get_resource(dev, 0, 0); + idprom = platform_get_resource(dev, IORESOURCE_MEM, 2); + if (!res || !ca || !options || !idprom) + return -ENODEV; + mpu_addr = ioremap_nocache(res->start, 4); + if (!mpu_addr) + return -ENOMEM; + ca_addr = ioremap_nocache(ca->start, 4); + if (!ca_addr) + goto probe_failed_free_mpu; + + printk(KERN_INFO "Found i82596 at 0x%x\n", res->start); + + netdevice = alloc_etherdev(sizeof(struct i596_private)); + if (!netdevice) + goto probe_failed_free_ca; + + SET_NETDEV_DEV(netdevice, &dev->dev); + platform_set_drvdata (dev, netdevice); + + netdevice->base_addr = res->start; + netdevice->irq = platform_get_irq(dev, 0); + + eth_addr = ioremap_nocache(idprom->start, 0x10); + if (!eth_addr) + goto probe_failed; + + /* someone seems to like messed up stuff */ + netdevice->dev_addr[0] = readb(eth_addr + 0x0b); + netdevice->dev_addr[1] = readb(eth_addr + 0x0a); + netdevice->dev_addr[2] = readb(eth_addr + 0x09); + netdevice->dev_addr[3] = readb(eth_addr + 0x08); + netdevice->dev_addr[4] = readb(eth_addr + 0x07); + netdevice->dev_addr[5] = readb(eth_addr + 0x06); + iounmap(eth_addr); + + if (!netdevice->irq) { + printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n", + __FILE__, netdevice->base_addr); + goto probe_failed; + } + + lp = netdev_priv(netdevice); + lp->options = options->flags & IORESOURCE_BITS; + lp->ca = ca_addr; + lp->mpu_port = mpu_addr; + + retval = i82596_probe(netdevice); + if (retval == 0) + return 0; + +probe_failed: + free_netdev(netdevice); +probe_failed_free_ca: + iounmap(ca_addr); +probe_failed_free_mpu: + iounmap(mpu_addr); + return retval; +} + +static int __devexit sni_82596_driver_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct i596_private *lp = netdev_priv(dev); + + unregister_netdev(dev); + DMA_FREE(dev->dev.parent, sizeof(struct i596_private), + lp->dma, lp->dma_addr); + iounmap(lp->ca); + iounmap(lp->mpu_port); + free_netdev (dev); + return 0; +} + +static struct platform_driver sni_82596_driver = { + .probe = sni_82596_probe, + .remove = __devexit_p(sni_82596_driver_remove), + .driver = { + .name = sni_82596_string, + .owner = THIS_MODULE, + }, +}; + +static int __devinit sni_82596_init(void) +{ + printk(KERN_INFO SNI_82596_DRIVER_VERSION "\n"); + return platform_driver_register(&sni_82596_driver); +} + + +static void __exit sni_82596_exit(void) +{ + platform_driver_unregister(&sni_82596_driver); +} + +module_init(sni_82596_init); +module_exit(sni_82596_exit); diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c new file mode 100644 index 0000000..b6ae53b --- /dev/null +++ b/drivers/net/ethernet/i825xx/sun3_82586.c @@ -0,0 +1,1213 @@ +/* + * Sun3 i82586 Ethernet driver + * + * Cloned from ni52.c for the Sun3 by Sam Creasey (sammy@sammy.net) + * + * Original copyright follows: + * -------------------------- + * + * net-3-driver for the NI5210 card (i82586 Ethernet chip) + * + * This is an extension to the Linux operating system, and is covered by the + * same Gnu Public License that covers that work. + * + * Alphacode 0.82 (96/09/29) for Linux 2.0.0 (or later) + * Copyrights (c) 1994,1995,1996 by M.Hipp (hippm@informatik.uni-tuebingen.de) + * -------------------------- + * + * Consult ni52.c for further notes from the original driver. + * + * This incarnation currently supports the OBIO version of the i82586 chip + * used in certain sun3 models. It should be fairly doable to expand this + * to support VME if I should every acquire such a board. + * + */ + +static int debuglevel = 0; /* debug-printk 0: off 1: a few 2: more */ +static int automatic_resume = 0; /* experimental .. better should be zero */ +static int rfdadd = 0; /* rfdadd=1 may be better for 8K MEM cards */ +static int fifo=0x8; /* don't change */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "sun3_82586.h" + +#define DRV_NAME "sun3_82586" + +#define DEBUG /* debug on */ +#define SYSBUSVAL 0 /* 16 Bit */ +#define SUN3_82586_TOTAL_SIZE PAGE_SIZE + +#define sun3_attn586() {*(volatile unsigned char *)(dev->base_addr) |= IEOB_ATTEN; *(volatile unsigned char *)(dev->base_addr) &= ~IEOB_ATTEN;} +#define sun3_reset586() {*(volatile unsigned char *)(dev->base_addr) = 0; udelay(100); *(volatile unsigned char *)(dev->base_addr) = IEOB_NORSET;} +#define sun3_disint() {*(volatile unsigned char *)(dev->base_addr) &= ~IEOB_IENAB;} +#define sun3_enaint() {*(volatile unsigned char *)(dev->base_addr) |= IEOB_IENAB;} +#define sun3_active() {*(volatile unsigned char *)(dev->base_addr) |= (IEOB_IENAB|IEOB_ONAIR|IEOB_NORSET);} + +#define make32(ptr16) (p->memtop + (swab16((unsigned short) (ptr16))) ) +#define make24(ptr32) (char *)swab32(( ((unsigned long) (ptr32)) - p->base)) +#define make16(ptr32) (swab16((unsigned short) ((unsigned long)(ptr32) - (unsigned long) p->memtop ))) + +/******************* how to calculate the buffers ***************************** + + * IMPORTANT NOTE: if you configure only one NUM_XMIT_BUFFS, the driver works + * --------------- in a different (more stable?) mode. Only in this mode it's + * possible to configure the driver with 'NO_NOPCOMMANDS' + +sizeof(scp)=12; sizeof(scb)=16; sizeof(iscp)=8; +sizeof(scp)+sizeof(iscp)+sizeof(scb) = 36 = INIT +sizeof(rfd) = 24; sizeof(rbd) = 12; +sizeof(tbd) = 8; sizeof(transmit_cmd) = 16; +sizeof(nop_cmd) = 8; + + * if you don't know the driver, better do not change these values: */ + +#define RECV_BUFF_SIZE 1536 /* slightly oversized */ +#define XMIT_BUFF_SIZE 1536 /* slightly oversized */ +#define NUM_XMIT_BUFFS 1 /* config for 32K shmem */ +#define NUM_RECV_BUFFS_8 4 /* config for 32K shared mem */ +#define NUM_RECV_BUFFS_16 9 /* config for 32K shared mem */ +#define NUM_RECV_BUFFS_32 16 /* config for 32K shared mem */ +#define NO_NOPCOMMANDS /* only possible with NUM_XMIT_BUFFS=1 */ + +/**************************************************************************/ + +/* different DELAYs */ +#define DELAY(x) mdelay(32 * x); +#define DELAY_16(); { udelay(16); } +#define DELAY_18(); { udelay(4); } + +/* wait for command with timeout: */ +#define WAIT_4_SCB_CMD() \ +{ int i; \ + for(i=0;i<16384;i++) { \ + if(!p->scb->cmd_cuc) break; \ + DELAY_18(); \ + if(i == 16383) { \ + printk("%s: scb_cmd timed out: %04x,%04x .. disabling i82586!!\n",dev->name,p->scb->cmd_cuc,p->scb->cus); \ + if(!p->reseted) { p->reseted = 1; sun3_reset586(); } } } } + +#define WAIT_4_SCB_CMD_RUC() { int i; \ + for(i=0;i<16384;i++) { \ + if(!p->scb->cmd_ruc) break; \ + DELAY_18(); \ + if(i == 16383) { \ + printk("%s: scb_cmd (ruc) timed out: %04x,%04x .. disabling i82586!!\n",dev->name,p->scb->cmd_ruc,p->scb->rus); \ + if(!p->reseted) { p->reseted = 1; sun3_reset586(); } } } } + +#define WAIT_4_STAT_COMPL(addr) { int i; \ + for(i=0;i<32767;i++) { \ + if(swab16((addr)->cmd_status) & STAT_COMPL) break; \ + DELAY_16(); DELAY_16(); } } + +static int sun3_82586_probe1(struct net_device *dev,int ioaddr); +static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id); +static int sun3_82586_open(struct net_device *dev); +static int sun3_82586_close(struct net_device *dev); +static int sun3_82586_send_packet(struct sk_buff *,struct net_device *); +static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev); +static void set_multicast_list(struct net_device *dev); +static void sun3_82586_timeout(struct net_device *dev); +#if 0 +static void sun3_82586_dump(struct net_device *,void *); +#endif + +/* helper-functions */ +static int init586(struct net_device *dev); +static int check586(struct net_device *dev,char *where,unsigned size); +static void alloc586(struct net_device *dev); +static void startrecv586(struct net_device *dev); +static void *alloc_rfa(struct net_device *dev,void *ptr); +static void sun3_82586_rcv_int(struct net_device *dev); +static void sun3_82586_xmt_int(struct net_device *dev); +static void sun3_82586_rnr_int(struct net_device *dev); + +struct priv +{ + unsigned long base; + char *memtop; + long int lock; + int reseted; + volatile struct rfd_struct *rfd_last,*rfd_top,*rfd_first; + volatile struct scp_struct *scp; /* volatile is important */ + volatile struct iscp_struct *iscp; /* volatile is important */ + volatile struct scb_struct *scb; /* volatile is important */ + volatile struct tbd_struct *xmit_buffs[NUM_XMIT_BUFFS]; + volatile struct transmit_cmd_struct *xmit_cmds[NUM_XMIT_BUFFS]; +#if (NUM_XMIT_BUFFS == 1) + volatile struct nop_cmd_struct *nop_cmds[2]; +#else + volatile struct nop_cmd_struct *nop_cmds[NUM_XMIT_BUFFS]; +#endif + volatile int nop_point,num_recv_buffs; + volatile char *xmit_cbuffs[NUM_XMIT_BUFFS]; + volatile int xmit_count,xmit_last; +}; + +/********************************************** + * close device + */ +static int sun3_82586_close(struct net_device *dev) +{ + free_irq(dev->irq, dev); + + sun3_reset586(); /* the hard way to stop the receiver */ + + netif_stop_queue(dev); + + return 0; +} + +/********************************************** + * open device + */ +static int sun3_82586_open(struct net_device *dev) +{ + int ret; + + sun3_disint(); + alloc586(dev); + init586(dev); + startrecv586(dev); + sun3_enaint(); + + ret = request_irq(dev->irq, sun3_82586_interrupt,0,dev->name,dev); + if (ret) + { + sun3_reset586(); + return ret; + } + + netif_start_queue(dev); + + return 0; /* most done by init */ +} + +/********************************************** + * Check to see if there's an 82586 out there. + */ +static int check586(struct net_device *dev,char *where,unsigned size) +{ + struct priv pb; + struct priv *p = &pb; + char *iscp_addr; + int i; + + p->base = (unsigned long) dvma_btov(0); + p->memtop = (char *)dvma_btov((unsigned long)where); + p->scp = (struct scp_struct *)(p->base + SCP_DEFAULT_ADDRESS); + memset((char *)p->scp,0, sizeof(struct scp_struct)); + for(i=0;iscp)[i]) + return 0; + p->scp->sysbus = SYSBUSVAL; /* 1 = 8Bit-Bus, 0 = 16 Bit */ + if(p->scp->sysbus != SYSBUSVAL) + return 0; + + iscp_addr = (char *)dvma_btov((unsigned long)where); + + p->iscp = (struct iscp_struct *) iscp_addr; + memset((char *)p->iscp,0, sizeof(struct iscp_struct)); + + p->scp->iscp = make24(p->iscp); + p->iscp->busy = 1; + + sun3_reset586(); + sun3_attn586(); + DELAY(1); /* wait a while... */ + + if(p->iscp->busy) /* i82586 clears 'busy' after successful init */ + return 0; + + return 1; +} + +/****************************************************************** + * set iscp at the right place, called by sun3_82586_probe1 and open586. + */ +static void alloc586(struct net_device *dev) +{ + struct priv *p = netdev_priv(dev); + + sun3_reset586(); + DELAY(1); + + p->scp = (struct scp_struct *) (p->base + SCP_DEFAULT_ADDRESS); + p->iscp = (struct iscp_struct *) dvma_btov(dev->mem_start); + p->scb = (struct scb_struct *) ((char *)p->iscp + sizeof(struct iscp_struct)); + + memset((char *) p->iscp,0,sizeof(struct iscp_struct)); + memset((char *) p->scp ,0,sizeof(struct scp_struct)); + + p->scp->iscp = make24(p->iscp); + p->scp->sysbus = SYSBUSVAL; + p->iscp->scb_offset = make16(p->scb); + p->iscp->scb_base = make24(dvma_btov(dev->mem_start)); + + p->iscp->busy = 1; + sun3_reset586(); + sun3_attn586(); + + DELAY(1); + + if(p->iscp->busy) + printk("%s: Init-Problems (alloc).\n",dev->name); + + p->reseted = 0; + + memset((char *)p->scb,0,sizeof(struct scb_struct)); +} + +struct net_device * __init sun3_82586_probe(int unit) +{ + struct net_device *dev; + unsigned long ioaddr; + static int found = 0; + int err = -ENOMEM; + + /* check that this machine has an onboard 82586 */ + switch(idprom->id_machtype) { + case SM_SUN3|SM_3_160: + case SM_SUN3|SM_3_260: + /* these machines have 82586 */ + break; + + default: + return ERR_PTR(-ENODEV); + } + + if (found) + return ERR_PTR(-ENODEV); + + ioaddr = (unsigned long)ioremap(IE_OBIO, SUN3_82586_TOTAL_SIZE); + if (!ioaddr) + return ERR_PTR(-ENOMEM); + found = 1; + + dev = alloc_etherdev(sizeof(struct priv)); + if (!dev) + goto out; + if (unit >= 0) { + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + } + + dev->irq = IE_IRQ; + dev->base_addr = ioaddr; + err = sun3_82586_probe1(dev, ioaddr); + if (err) + goto out1; + err = register_netdev(dev); + if (err) + goto out2; + return dev; + +out2: + release_region(ioaddr, SUN3_82586_TOTAL_SIZE); +out1: + free_netdev(dev); +out: + iounmap((void __iomem *)ioaddr); + return ERR_PTR(err); +} + +static const struct net_device_ops sun3_82586_netdev_ops = { + .ndo_open = sun3_82586_open, + .ndo_stop = sun3_82586_close, + .ndo_start_xmit = sun3_82586_send_packet, + .ndo_set_multicast_list = set_multicast_list, + .ndo_tx_timeout = sun3_82586_timeout, + .ndo_get_stats = sun3_82586_get_stats, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +}; + +static int __init sun3_82586_probe1(struct net_device *dev,int ioaddr) +{ + int i, size, retval; + + if (!request_region(ioaddr, SUN3_82586_TOTAL_SIZE, DRV_NAME)) + return -EBUSY; + + /* copy in the ethernet address from the prom */ + for(i = 0; i < 6 ; i++) + dev->dev_addr[i] = idprom->id_ethaddr[i]; + + printk("%s: SUN3 Intel 82586 found at %lx, ",dev->name,dev->base_addr); + + /* + * check (or search) IO-Memory, 32K + */ + size = 0x8000; + + dev->mem_start = (unsigned long)dvma_malloc_align(0x8000, 0x1000); + dev->mem_end = dev->mem_start + size; + + if(size != 0x2000 && size != 0x4000 && size != 0x8000) { + printk("\n%s: Illegal memory size %d. Allowed is 0x2000 or 0x4000 or 0x8000 bytes.\n",dev->name,size); + retval = -ENODEV; + goto out; + } + if(!check586(dev,(char *) dev->mem_start,size)) { + printk("?memcheck, Can't find memory at 0x%lx with size %d!\n",dev->mem_start,size); + retval = -ENODEV; + goto out; + } + + ((struct priv *)netdev_priv(dev))->memtop = + (char *)dvma_btov(dev->mem_start); + ((struct priv *)netdev_priv(dev))->base = (unsigned long) dvma_btov(0); + alloc586(dev); + + /* set number of receive-buffs according to memsize */ + if(size == 0x2000) + ((struct priv *)netdev_priv(dev))->num_recv_buffs = + NUM_RECV_BUFFS_8; + else if(size == 0x4000) + ((struct priv *)netdev_priv(dev))->num_recv_buffs = + NUM_RECV_BUFFS_16; + else + ((struct priv *)netdev_priv(dev))->num_recv_buffs = + NUM_RECV_BUFFS_32; + + printk("Memaddr: 0x%lx, Memsize: %d, IRQ %d\n",dev->mem_start,size, dev->irq); + + dev->netdev_ops = &sun3_82586_netdev_ops; + dev->watchdog_timeo = HZ/20; + + dev->if_port = 0; + return 0; +out: + release_region(ioaddr, SUN3_82586_TOTAL_SIZE); + return retval; +} + + +static int init586(struct net_device *dev) +{ + void *ptr; + int i,result=0; + struct priv *p = netdev_priv(dev); + volatile struct configure_cmd_struct *cfg_cmd; + volatile struct iasetup_cmd_struct *ias_cmd; + volatile struct tdr_cmd_struct *tdr_cmd; + volatile struct mcsetup_cmd_struct *mc_cmd; + struct netdev_hw_addr *ha; + int num_addrs=netdev_mc_count(dev); + + ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct)); + + cfg_cmd = (struct configure_cmd_struct *)ptr; /* configure-command */ + cfg_cmd->cmd_status = 0; + cfg_cmd->cmd_cmd = swab16(CMD_CONFIGURE | CMD_LAST); + cfg_cmd->cmd_link = 0xffff; + + cfg_cmd->byte_cnt = 0x0a; /* number of cfg bytes */ + cfg_cmd->fifo = fifo; /* fifo-limit (8=tx:32/rx:64) */ + cfg_cmd->sav_bf = 0x40; /* hold or discard bad recv frames (bit 7) */ + cfg_cmd->adr_len = 0x2e; /* addr_len |!src_insert |pre-len |loopback */ + cfg_cmd->priority = 0x00; + cfg_cmd->ifs = 0x60; + cfg_cmd->time_low = 0x00; + cfg_cmd->time_high = 0xf2; + cfg_cmd->promisc = 0; + if(dev->flags & IFF_ALLMULTI) { + int len = ((char *) p->iscp - (char *) ptr - 8) / 6; + if(num_addrs > len) { + printk("%s: switching to promisc. mode\n",dev->name); + cfg_cmd->promisc = 1; + } + } + if(dev->flags&IFF_PROMISC) + cfg_cmd->promisc = 1; + cfg_cmd->carr_coll = 0x00; + + p->scb->cbl_offset = make16(cfg_cmd); + p->scb->cmd_ruc = 0; + + p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */ + sun3_attn586(); + + WAIT_4_STAT_COMPL(cfg_cmd); + + if((swab16(cfg_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) != (STAT_COMPL|STAT_OK)) + { + printk("%s: configure command failed: %x\n",dev->name,swab16(cfg_cmd->cmd_status)); + return 1; + } + + /* + * individual address setup + */ + + ias_cmd = (struct iasetup_cmd_struct *)ptr; + + ias_cmd->cmd_status = 0; + ias_cmd->cmd_cmd = swab16(CMD_IASETUP | CMD_LAST); + ias_cmd->cmd_link = 0xffff; + + memcpy((char *)&ias_cmd->iaddr,(char *) dev->dev_addr,ETH_ALEN); + + p->scb->cbl_offset = make16(ias_cmd); + + p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */ + sun3_attn586(); + + WAIT_4_STAT_COMPL(ias_cmd); + + if((swab16(ias_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) != (STAT_OK|STAT_COMPL)) { + printk("%s (82586): individual address setup command failed: %04x\n",dev->name,swab16(ias_cmd->cmd_status)); + return 1; + } + + /* + * TDR, wire check .. e.g. no resistor e.t.c + */ + + tdr_cmd = (struct tdr_cmd_struct *)ptr; + + tdr_cmd->cmd_status = 0; + tdr_cmd->cmd_cmd = swab16(CMD_TDR | CMD_LAST); + tdr_cmd->cmd_link = 0xffff; + tdr_cmd->status = 0; + + p->scb->cbl_offset = make16(tdr_cmd); + p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */ + sun3_attn586(); + + WAIT_4_STAT_COMPL(tdr_cmd); + + if(!(swab16(tdr_cmd->cmd_status) & STAT_COMPL)) + { + printk("%s: Problems while running the TDR.\n",dev->name); + } + else + { + DELAY_16(); /* wait for result */ + result = swab16(tdr_cmd->status); + + p->scb->cmd_cuc = p->scb->cus & STAT_MASK; + sun3_attn586(); /* ack the interrupts */ + + if(result & TDR_LNK_OK) + ; + else if(result & TDR_XCVR_PRB) + printk("%s: TDR: Transceiver problem. Check the cable(s)!\n",dev->name); + else if(result & TDR_ET_OPN) + printk("%s: TDR: No correct termination %d clocks away.\n",dev->name,result & TDR_TIMEMASK); + else if(result & TDR_ET_SRT) + { + if (result & TDR_TIMEMASK) /* time == 0 -> strange :-) */ + printk("%s: TDR: Detected a short circuit %d clocks away.\n",dev->name,result & TDR_TIMEMASK); + } + else + printk("%s: TDR: Unknown status %04x\n",dev->name,result); + } + + /* + * Multicast setup + */ + if(num_addrs && !(dev->flags & IFF_PROMISC) ) + { + mc_cmd = (struct mcsetup_cmd_struct *) ptr; + mc_cmd->cmd_status = 0; + mc_cmd->cmd_cmd = swab16(CMD_MCSETUP | CMD_LAST); + mc_cmd->cmd_link = 0xffff; + mc_cmd->mc_cnt = swab16(num_addrs * 6); + + i = 0; + netdev_for_each_mc_addr(ha, dev) + memcpy((char *) mc_cmd->mc_list[i++], + ha->addr, ETH_ALEN); + + p->scb->cbl_offset = make16(mc_cmd); + p->scb->cmd_cuc = CUC_START; + sun3_attn586(); + + WAIT_4_STAT_COMPL(mc_cmd); + + if( (swab16(mc_cmd->cmd_status) & (STAT_COMPL|STAT_OK)) != (STAT_COMPL|STAT_OK) ) + printk("%s: Can't apply multicast-address-list.\n",dev->name); + } + + /* + * alloc nop/xmit-cmds + */ +#if (NUM_XMIT_BUFFS == 1) + for(i=0;i<2;i++) + { + p->nop_cmds[i] = (struct nop_cmd_struct *)ptr; + p->nop_cmds[i]->cmd_cmd = swab16(CMD_NOP); + p->nop_cmds[i]->cmd_status = 0; + p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i])); + ptr = (char *) ptr + sizeof(struct nop_cmd_struct); + } +#else + for(i=0;inop_cmds[i] = (struct nop_cmd_struct *)ptr; + p->nop_cmds[i]->cmd_cmd = swab16(CMD_NOP); + p->nop_cmds[i]->cmd_status = 0; + p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i])); + ptr = (char *) ptr + sizeof(struct nop_cmd_struct); + } +#endif + + ptr = alloc_rfa(dev,(void *)ptr); /* init receive-frame-area */ + + /* + * alloc xmit-buffs / init xmit_cmds + */ + for(i=0;ixmit_cmds[i] = (struct transmit_cmd_struct *)ptr; /*transmit cmd/buff 0*/ + ptr = (char *) ptr + sizeof(struct transmit_cmd_struct); + p->xmit_cbuffs[i] = (char *)ptr; /* char-buffs */ + ptr = (char *) ptr + XMIT_BUFF_SIZE; + p->xmit_buffs[i] = (struct tbd_struct *)ptr; /* TBD */ + ptr = (char *) ptr + sizeof(struct tbd_struct); + if((void *)ptr > (void *)dev->mem_end) + { + printk("%s: not enough shared-mem for your configuration!\n",dev->name); + return 1; + } + memset((char *)(p->xmit_cmds[i]) ,0, sizeof(struct transmit_cmd_struct)); + memset((char *)(p->xmit_buffs[i]),0, sizeof(struct tbd_struct)); + p->xmit_cmds[i]->cmd_link = make16(p->nop_cmds[(i+1)%NUM_XMIT_BUFFS]); + p->xmit_cmds[i]->cmd_status = swab16(STAT_COMPL); + p->xmit_cmds[i]->cmd_cmd = swab16(CMD_XMIT | CMD_INT); + p->xmit_cmds[i]->tbd_offset = make16((p->xmit_buffs[i])); + p->xmit_buffs[i]->next = 0xffff; + p->xmit_buffs[i]->buffer = make24((p->xmit_cbuffs[i])); + } + + p->xmit_count = 0; + p->xmit_last = 0; +#ifndef NO_NOPCOMMANDS + p->nop_point = 0; +#endif + + /* + * 'start transmitter' + */ +#ifndef NO_NOPCOMMANDS + p->scb->cbl_offset = make16(p->nop_cmds[0]); + p->scb->cmd_cuc = CUC_START; + sun3_attn586(); + WAIT_4_SCB_CMD(); +#else + p->xmit_cmds[0]->cmd_link = make16(p->xmit_cmds[0]); + p->xmit_cmds[0]->cmd_cmd = swab16(CMD_XMIT | CMD_SUSPEND | CMD_INT); +#endif + + /* + * ack. interrupts + */ + p->scb->cmd_cuc = p->scb->cus & STAT_MASK; + sun3_attn586(); + DELAY_16(); + + sun3_enaint(); + sun3_active(); + + return 0; +} + +/****************************************************** + * This is a helper routine for sun3_82586_rnr_int() and init586(). + * It sets up the Receive Frame Area (RFA). + */ + +static void *alloc_rfa(struct net_device *dev,void *ptr) +{ + volatile struct rfd_struct *rfd = (struct rfd_struct *)ptr; + volatile struct rbd_struct *rbd; + int i; + struct priv *p = netdev_priv(dev); + + memset((char *) rfd,0,sizeof(struct rfd_struct)*(p->num_recv_buffs+rfdadd)); + p->rfd_first = rfd; + + for(i = 0; i < (p->num_recv_buffs+rfdadd); i++) { + rfd[i].next = make16(rfd + (i+1) % (p->num_recv_buffs+rfdadd) ); + rfd[i].rbd_offset = 0xffff; + } + rfd[p->num_recv_buffs-1+rfdadd].last = RFD_SUSP; /* RU suspend */ + + ptr = (void *) (rfd + (p->num_recv_buffs + rfdadd) ); + + rbd = (struct rbd_struct *) ptr; + ptr = (void *) (rbd + p->num_recv_buffs); + + /* clr descriptors */ + memset((char *) rbd,0,sizeof(struct rbd_struct)*(p->num_recv_buffs)); + + for(i=0;inum_recv_buffs;i++) + { + rbd[i].next = make16((rbd + (i+1) % p->num_recv_buffs)); + rbd[i].size = swab16(RECV_BUFF_SIZE); + rbd[i].buffer = make24(ptr); + ptr = (char *) ptr + RECV_BUFF_SIZE; + } + + p->rfd_top = p->rfd_first; + p->rfd_last = p->rfd_first + (p->num_recv_buffs - 1 + rfdadd); + + p->scb->rfa_offset = make16(p->rfd_first); + p->rfd_first->rbd_offset = make16(rbd); + + return ptr; +} + + +/************************************************** + * Interrupt Handler ... + */ + +static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id) +{ + struct net_device *dev = dev_id; + unsigned short stat; + int cnt=0; + struct priv *p; + + if (!dev) { + printk ("sun3_82586-interrupt: irq %d for unknown device.\n",irq); + return IRQ_NONE; + } + p = netdev_priv(dev); + + if(debuglevel > 1) + printk("I"); + + WAIT_4_SCB_CMD(); /* wait for last command */ + + while((stat=p->scb->cus & STAT_MASK)) + { + p->scb->cmd_cuc = stat; + sun3_attn586(); + + if(stat & STAT_FR) /* received a frame */ + sun3_82586_rcv_int(dev); + + if(stat & STAT_RNR) /* RU went 'not ready' */ + { + printk("(R)"); + if(p->scb->rus & RU_SUSPEND) /* special case: RU_SUSPEND */ + { + WAIT_4_SCB_CMD(); + p->scb->cmd_ruc = RUC_RESUME; + sun3_attn586(); + WAIT_4_SCB_CMD_RUC(); + } + else + { + printk("%s: Receiver-Unit went 'NOT READY': %04x/%02x.\n",dev->name,(int) stat,(int) p->scb->rus); + sun3_82586_rnr_int(dev); + } + } + + if(stat & STAT_CX) /* command with I-bit set complete */ + sun3_82586_xmt_int(dev); + +#ifndef NO_NOPCOMMANDS + if(stat & STAT_CNA) /* CU went 'not ready' */ + { + if(netif_running(dev)) + printk("%s: oops! CU has left active state. stat: %04x/%02x.\n",dev->name,(int) stat,(int) p->scb->cus); + } +#endif + + if(debuglevel > 1) + printk("%d",cnt++); + + WAIT_4_SCB_CMD(); /* wait for ack. (sun3_82586_xmt_int can be faster than ack!!) */ + if(p->scb->cmd_cuc) /* timed out? */ + { + printk("%s: Acknowledge timed out.\n",dev->name); + sun3_disint(); + break; + } + } + + if(debuglevel > 1) + printk("i"); + return IRQ_HANDLED; +} + +/******************************************************* + * receive-interrupt + */ + +static void sun3_82586_rcv_int(struct net_device *dev) +{ + int status,cnt=0; + unsigned short totlen; + struct sk_buff *skb; + struct rbd_struct *rbd; + struct priv *p = netdev_priv(dev); + + if(debuglevel > 0) + printk("R"); + + for(;(status = p->rfd_top->stat_high) & RFD_COMPL;) + { + rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset); + + if(status & RFD_OK) /* frame received without error? */ + { + if( (totlen = swab16(rbd->status)) & RBD_LAST) /* the first and the last buffer? */ + { + totlen &= RBD_MASK; /* length of this frame */ + rbd->status = 0; + skb = (struct sk_buff *) dev_alloc_skb(totlen+2); + if(skb != NULL) + { + skb_reserve(skb,2); + skb_put(skb,totlen); + skb_copy_to_linear_data(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen); + skb->protocol=eth_type_trans(skb,dev); + netif_rx(skb); + dev->stats.rx_packets++; + } + else + dev->stats.rx_dropped++; + } + else + { + int rstat; + /* free all RBD's until RBD_LAST is set */ + totlen = 0; + while(!((rstat=swab16(rbd->status)) & RBD_LAST)) + { + totlen += rstat & RBD_MASK; + if(!rstat) + { + printk("%s: Whoops .. no end mark in RBD list\n",dev->name); + break; + } + rbd->status = 0; + rbd = (struct rbd_struct *) make32(rbd->next); + } + totlen += rstat & RBD_MASK; + rbd->status = 0; + printk("%s: received oversized frame! length: %d\n",dev->name,totlen); + dev->stats.rx_dropped++; + } + } + else /* frame !(ok), only with 'save-bad-frames' */ + { + printk("%s: oops! rfd-error-status: %04x\n",dev->name,status); + dev->stats.rx_errors++; + } + p->rfd_top->stat_high = 0; + p->rfd_top->last = RFD_SUSP; /* maybe exchange by RFD_LAST */ + p->rfd_top->rbd_offset = 0xffff; + p->rfd_last->last = 0; /* delete RFD_SUSP */ + p->rfd_last = p->rfd_top; + p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */ + p->scb->rfa_offset = make16(p->rfd_top); + + if(debuglevel > 0) + printk("%d",cnt++); + } + + if(automatic_resume) + { + WAIT_4_SCB_CMD(); + p->scb->cmd_ruc = RUC_RESUME; + sun3_attn586(); + WAIT_4_SCB_CMD_RUC(); + } + +#ifdef WAIT_4_BUSY + { + int i; + for(i=0;i<1024;i++) + { + if(p->rfd_top->status) + break; + DELAY_16(); + if(i == 1023) + printk("%s: RU hasn't fetched next RFD (not busy/complete)\n",dev->name); + } + } +#endif + +#if 0 + if(!at_least_one) + { + int i; + volatile struct rfd_struct *rfds=p->rfd_top; + volatile struct rbd_struct *rbds; + printk("%s: received a FC intr. without having a frame: %04x %d\n",dev->name,status,old_at_least); + for(i=0;i< (p->num_recv_buffs+4);i++) + { + rbds = (struct rbd_struct *) make32(rfds->rbd_offset); + printk("%04x:%04x ",rfds->status,rbds->status); + rfds = (struct rfd_struct *) make32(rfds->next); + } + printk("\nerrs: %04x %04x stat: %04x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->status); + printk("\nerrs: %04x %04x rus: %02x, cus: %02x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->rus,(int)p->scb->cus); + } + old_at_least = at_least_one; +#endif + + if(debuglevel > 0) + printk("r"); +} + +/********************************************************** + * handle 'Receiver went not ready'. + */ + +static void sun3_82586_rnr_int(struct net_device *dev) +{ + struct priv *p = netdev_priv(dev); + + dev->stats.rx_errors++; + + WAIT_4_SCB_CMD(); /* wait for the last cmd, WAIT_4_FULLSTAT?? */ + p->scb->cmd_ruc = RUC_ABORT; /* usually the RU is in the 'no resource'-state .. abort it now. */ + sun3_attn586(); + WAIT_4_SCB_CMD_RUC(); /* wait for accept cmd. */ + + alloc_rfa(dev,(char *)p->rfd_first); +/* maybe add a check here, before restarting the RU */ + startrecv586(dev); /* restart RU */ + + printk("%s: Receive-Unit restarted. Status: %04x\n",dev->name,p->scb->rus); + +} + +/********************************************************** + * handle xmit - interrupt + */ + +static void sun3_82586_xmt_int(struct net_device *dev) +{ + int status; + struct priv *p = netdev_priv(dev); + + if(debuglevel > 0) + printk("X"); + + status = swab16(p->xmit_cmds[p->xmit_last]->cmd_status); + if(!(status & STAT_COMPL)) + printk("%s: strange .. xmit-int without a 'COMPLETE'\n",dev->name); + + if(status & STAT_OK) + { + dev->stats.tx_packets++; + dev->stats.collisions += (status & TCMD_MAXCOLLMASK); + } + else + { + dev->stats.tx_errors++; + if(status & TCMD_LATECOLL) { + printk("%s: late collision detected.\n",dev->name); + dev->stats.collisions++; + } + else if(status & TCMD_NOCARRIER) { + dev->stats.tx_carrier_errors++; + printk("%s: no carrier detected.\n",dev->name); + } + else if(status & TCMD_LOSTCTS) + printk("%s: loss of CTS detected.\n",dev->name); + else if(status & TCMD_UNDERRUN) { + dev->stats.tx_fifo_errors++; + printk("%s: DMA underrun detected.\n",dev->name); + } + else if(status & TCMD_MAXCOLL) { + printk("%s: Max. collisions exceeded.\n",dev->name); + dev->stats.collisions += 16; + } + } + +#if (NUM_XMIT_BUFFS > 1) + if( (++p->xmit_last) == NUM_XMIT_BUFFS) + p->xmit_last = 0; +#endif + netif_wake_queue(dev); +} + +/*********************************************************** + * (re)start the receiver + */ + +static void startrecv586(struct net_device *dev) +{ + struct priv *p = netdev_priv(dev); + + WAIT_4_SCB_CMD(); + WAIT_4_SCB_CMD_RUC(); + p->scb->rfa_offset = make16(p->rfd_first); + p->scb->cmd_ruc = RUC_START; + sun3_attn586(); /* start cmd. */ + WAIT_4_SCB_CMD_RUC(); /* wait for accept cmd. (no timeout!!) */ +} + +static void sun3_82586_timeout(struct net_device *dev) +{ + struct priv *p = netdev_priv(dev); +#ifndef NO_NOPCOMMANDS + if(p->scb->cus & CU_ACTIVE) /* COMMAND-UNIT active? */ + { + netif_wake_queue(dev); +#ifdef DEBUG + printk("%s: strange ... timeout with CU active?!?\n",dev->name); + printk("%s: X0: %04x N0: %04x N1: %04x %d\n",dev->name,(int)swab16(p->xmit_cmds[0]->cmd_status),(int)swab16(p->nop_cmds[0]->cmd_status),(int)swab16(p->nop_cmds[1]->cmd_status),(int)p->nop_point); +#endif + p->scb->cmd_cuc = CUC_ABORT; + sun3_attn586(); + WAIT_4_SCB_CMD(); + p->scb->cbl_offset = make16(p->nop_cmds[p->nop_point]); + p->scb->cmd_cuc = CUC_START; + sun3_attn586(); + WAIT_4_SCB_CMD(); + dev->trans_start = jiffies; /* prevent tx timeout */ + return 0; + } +#endif + { +#ifdef DEBUG + printk("%s: xmitter timed out, try to restart! stat: %02x\n",dev->name,p->scb->cus); + printk("%s: command-stats: %04x %04x\n",dev->name,swab16(p->xmit_cmds[0]->cmd_status),swab16(p->xmit_cmds[1]->cmd_status)); + printk("%s: check, whether you set the right interrupt number!\n",dev->name); +#endif + sun3_82586_close(dev); + sun3_82586_open(dev); + } + dev->trans_start = jiffies; /* prevent tx timeout */ +} + +/****************************************************** + * send frame + */ + +static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev) +{ + int len,i; +#ifndef NO_NOPCOMMANDS + int next_nop; +#endif + struct priv *p = netdev_priv(dev); + + if(skb->len > XMIT_BUFF_SIZE) + { + printk("%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n",dev->name,XMIT_BUFF_SIZE,skb->len); + return NETDEV_TX_OK; + } + + netif_stop_queue(dev); + +#if(NUM_XMIT_BUFFS > 1) + if(test_and_set_bit(0,(void *) &p->lock)) { + printk("%s: Queue was locked\n",dev->name); + return NETDEV_TX_BUSY; + } + else +#endif + { + len = skb->len; + if (len < ETH_ZLEN) { + memset((void *)p->xmit_cbuffs[p->xmit_count], 0, + ETH_ZLEN); + len = ETH_ZLEN; + } + skb_copy_from_linear_data(skb, (void *)p->xmit_cbuffs[p->xmit_count], skb->len); + +#if (NUM_XMIT_BUFFS == 1) +# ifdef NO_NOPCOMMANDS + +#ifdef DEBUG + if(p->scb->cus & CU_ACTIVE) + { + printk("%s: Hmmm .. CU is still running and we wanna send a new packet.\n",dev->name); + printk("%s: stat: %04x %04x\n",dev->name,p->scb->cus,swab16(p->xmit_cmds[0]->cmd_status)); + } +#endif + + p->xmit_buffs[0]->size = swab16(TBD_LAST | len); + for(i=0;i<16;i++) + { + p->xmit_cmds[0]->cmd_status = 0; + WAIT_4_SCB_CMD(); + if( (p->scb->cus & CU_STATUS) == CU_SUSPEND) + p->scb->cmd_cuc = CUC_RESUME; + else + { + p->scb->cbl_offset = make16(p->xmit_cmds[0]); + p->scb->cmd_cuc = CUC_START; + } + + sun3_attn586(); + if(!i) + dev_kfree_skb(skb); + WAIT_4_SCB_CMD(); + if( (p->scb->cus & CU_ACTIVE)) /* test it, because CU sometimes doesn't start immediately */ + break; + if(p->xmit_cmds[0]->cmd_status) + break; + if(i==15) + printk("%s: Can't start transmit-command.\n",dev->name); + } +# else + next_nop = (p->nop_point + 1) & 0x1; + p->xmit_buffs[0]->size = swab16(TBD_LAST | len); + + p->xmit_cmds[0]->cmd_link = p->nop_cmds[next_nop]->cmd_link + = make16((p->nop_cmds[next_nop])); + p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0; + + p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0])); + p->nop_point = next_nop; + dev_kfree_skb(skb); +# endif +#else + p->xmit_buffs[p->xmit_count]->size = swab16(TBD_LAST | len); + if( (next_nop = p->xmit_count + 1) == NUM_XMIT_BUFFS ) + next_nop = 0; + + p->xmit_cmds[p->xmit_count]->cmd_status = 0; + /* linkpointer of xmit-command already points to next nop cmd */ + p->nop_cmds[next_nop]->cmd_link = make16((p->nop_cmds[next_nop])); + p->nop_cmds[next_nop]->cmd_status = 0; + + p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count])); + p->xmit_count = next_nop; + + { + unsigned long flags; + local_irq_save(flags); + if(p->xmit_count != p->xmit_last) + netif_wake_queue(dev); + p->lock = 0; + local_irq_restore(flags); + } + dev_kfree_skb(skb); +#endif + } + return NETDEV_TX_OK; +} + +/******************************************* + * Someone wanna have the statistics + */ + +static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev) +{ + struct priv *p = netdev_priv(dev); + unsigned short crc,aln,rsc,ovrn; + + crc = swab16(p->scb->crc_errs); /* get error-statistic from the ni82586 */ + p->scb->crc_errs = 0; + aln = swab16(p->scb->aln_errs); + p->scb->aln_errs = 0; + rsc = swab16(p->scb->rsc_errs); + p->scb->rsc_errs = 0; + ovrn = swab16(p->scb->ovrn_errs); + p->scb->ovrn_errs = 0; + + dev->stats.rx_crc_errors += crc; + dev->stats.rx_fifo_errors += ovrn; + dev->stats.rx_frame_errors += aln; + dev->stats.rx_dropped += rsc; + + return &dev->stats; +} + +/******************************************************** + * Set MC list .. + */ + +static void set_multicast_list(struct net_device *dev) +{ + netif_stop_queue(dev); + sun3_disint(); + alloc586(dev); + init586(dev); + startrecv586(dev); + sun3_enaint(); + netif_wake_queue(dev); +} + +#ifdef MODULE +#error This code is not currently supported as a module +static struct net_device *dev_sun3_82586; + +int init_module(void) +{ + dev_sun3_82586 = sun3_82586_probe(-1); + if (IS_ERR(dev_sun3_82586)) + return PTR_ERR(dev_sun3_82586); + return 0; +} + +void cleanup_module(void) +{ + unsigned long ioaddr = dev_sun3_82586->base_addr; + unregister_netdev(dev_sun3_82586); + release_region(ioaddr, SUN3_82586_TOTAL_SIZE); + iounmap((void *)ioaddr); + free_netdev(dev_sun3_82586); +} +#endif /* MODULE */ + +#if 0 +/* + * DUMP .. we expect a not running CMD unit and enough space + */ +void sun3_82586_dump(struct net_device *dev,void *ptr) +{ + struct priv *p = netdev_priv(dev); + struct dump_cmd_struct *dump_cmd = (struct dump_cmd_struct *) ptr; + int i; + + p->scb->cmd_cuc = CUC_ABORT; + sun3_attn586(); + WAIT_4_SCB_CMD(); + WAIT_4_SCB_CMD_RUC(); + + dump_cmd->cmd_status = 0; + dump_cmd->cmd_cmd = CMD_DUMP | CMD_LAST; + dump_cmd->dump_offset = make16((dump_cmd + 1)); + dump_cmd->cmd_link = 0xffff; + + p->scb->cbl_offset = make16(dump_cmd); + p->scb->cmd_cuc = CUC_START; + sun3_attn586(); + WAIT_4_STAT_COMPL(dump_cmd); + + if( (dump_cmd->cmd_status & (STAT_COMPL|STAT_OK)) != (STAT_COMPL|STAT_OK) ) + printk("%s: Can't get dump information.\n",dev->name); + + for(i=0;i<170;i++) { + printk("%02x ",(int) ((unsigned char *) (dump_cmd + 1))[i]); + if(i % 24 == 23) + printk("\n"); + } + printk("\n"); +} +#endif + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/i825xx/sun3_82586.h b/drivers/net/ethernet/i825xx/sun3_82586.h new file mode 100644 index 0000000..93346f0 --- /dev/null +++ b/drivers/net/ethernet/i825xx/sun3_82586.h @@ -0,0 +1,318 @@ +/* + * Intel i82586 Ethernet definitions + * + * This is an extension to the Linux operating system, and is covered by the + * same Gnu Public License that covers that work. + * + * copyrights (c) 1994 by Michael Hipp (hippm@informatik.uni-tuebingen.de) + * + * I have done a look in the following sources: + * crynwr-packet-driver by Russ Nelson + * Garret A. Wollman's i82586-driver for BSD + */ + +/* + * Cloned from ni52.h, copyright as above. + * + * Modified for Sun3 OBIO i82586 by Sam Creasey (sammy@sammy.net) + */ + + +/* defines for the obio chip (not vme) */ +#define IEOB_NORSET 0x80 /* don't reset the board */ +#define IEOB_ONAIR 0x40 /* put us on the air */ +#define IEOB_ATTEN 0x20 /* attention! */ +#define IEOB_IENAB 0x10 /* interrupt enable */ +#define IEOB_XXXXX 0x08 /* free bit */ +#define IEOB_XCVRL2 0x04 /* level 2 transceiver? */ +#define IEOB_BUSERR 0x02 /* bus error */ +#define IEOB_INT 0x01 /* interrupt */ + +/* where the obio one lives */ +#define IE_OBIO 0xc0000 +#define IE_IRQ 3 + +/* + * where to find the System Configuration Pointer (SCP) + */ +#define SCP_DEFAULT_ADDRESS 0xfffff4 + + +/* + * System Configuration Pointer Struct + */ + +struct scp_struct +{ + unsigned short zero_dum0; /* has to be zero */ + unsigned char sysbus; /* 0=16Bit,1=8Bit */ + unsigned char zero_dum1; /* has to be zero for 586 */ + unsigned short zero_dum2; + unsigned short zero_dum3; + char *iscp; /* pointer to the iscp-block */ +}; + + +/* + * Intermediate System Configuration Pointer (ISCP) + */ +struct iscp_struct +{ + unsigned char busy; /* 586 clears after successful init */ + unsigned char zero_dummy; /* has to be zero */ + unsigned short scb_offset; /* pointeroffset to the scb_base */ + char *scb_base; /* base-address of all 16-bit offsets */ +}; + +/* + * System Control Block (SCB) + */ +struct scb_struct +{ + unsigned char rus; + unsigned char cus; + unsigned char cmd_ruc; /* command word: RU part */ + unsigned char cmd_cuc; /* command word: CU part & ACK */ + unsigned short cbl_offset; /* pointeroffset, command block list */ + unsigned short rfa_offset; /* pointeroffset, receive frame area */ + unsigned short crc_errs; /* CRC-Error counter */ + unsigned short aln_errs; /* allignmenterror counter */ + unsigned short rsc_errs; /* Resourceerror counter */ + unsigned short ovrn_errs; /* OVerrunerror counter */ +}; + +/* + * possible command values for the command word + */ +#define RUC_MASK 0x0070 /* mask for RU commands */ +#define RUC_NOP 0x0000 /* NOP-command */ +#define RUC_START 0x0010 /* start RU */ +#define RUC_RESUME 0x0020 /* resume RU after suspend */ +#define RUC_SUSPEND 0x0030 /* suspend RU */ +#define RUC_ABORT 0x0040 /* abort receiver operation immediately */ + +#define CUC_MASK 0x07 /* mask for CU command */ +#define CUC_NOP 0x00 /* NOP-command */ +#define CUC_START 0x01 /* start execution of 1. cmd on the CBL */ +#define CUC_RESUME 0x02 /* resume after suspend */ +#define CUC_SUSPEND 0x03 /* Suspend CU */ +#define CUC_ABORT 0x04 /* abort command operation immediately */ + +#define ACK_MASK 0xf0 /* mask for ACK command */ +#define ACK_CX 0x80 /* acknowledges STAT_CX */ +#define ACK_FR 0x40 /* ack. STAT_FR */ +#define ACK_CNA 0x20 /* ack. STAT_CNA */ +#define ACK_RNR 0x10 /* ack. STAT_RNR */ + +/* + * possible status values for the status word + */ +#define STAT_MASK 0xf0 /* mask for cause of interrupt */ +#define STAT_CX 0x80 /* CU finished cmd with its I bit set */ +#define STAT_FR 0x40 /* RU finished receiving a frame */ +#define STAT_CNA 0x20 /* CU left active state */ +#define STAT_RNR 0x10 /* RU left ready state */ + +#define CU_STATUS 0x7 /* CU status, 0=idle */ +#define CU_SUSPEND 0x1 /* CU is suspended */ +#define CU_ACTIVE 0x2 /* CU is active */ + +#define RU_STATUS 0x70 /* RU status, 0=idle */ +#define RU_SUSPEND 0x10 /* RU suspended */ +#define RU_NOSPACE 0x20 /* RU no resources */ +#define RU_READY 0x40 /* RU is ready */ + +/* + * Receive Frame Descriptor (RFD) + */ +struct rfd_struct +{ + unsigned char stat_low; /* status word */ + unsigned char stat_high; /* status word */ + unsigned char rfd_sf; /* 82596 mode only */ + unsigned char last; /* Bit15,Last Frame on List / Bit14,suspend */ + unsigned short next; /* linkoffset to next RFD */ + unsigned short rbd_offset; /* pointeroffset to RBD-buffer */ + unsigned char dest[6]; /* ethernet-address, destination */ + unsigned char source[6]; /* ethernet-address, source */ + unsigned short length; /* 802.3 frame-length */ + unsigned short zero_dummy; /* dummy */ +}; + +#define RFD_LAST 0x80 /* last: last rfd in the list */ +#define RFD_SUSP 0x40 /* last: suspend RU after */ +#define RFD_COMPL 0x80 +#define RFD_OK 0x20 +#define RFD_BUSY 0x40 +#define RFD_ERR_LEN 0x10 /* Length error (if enabled length-checking */ +#define RFD_ERR_CRC 0x08 /* CRC error */ +#define RFD_ERR_ALGN 0x04 /* Alignment error */ +#define RFD_ERR_RNR 0x02 /* status: receiver out of resources */ +#define RFD_ERR_OVR 0x01 /* DMA Overrun! */ + +#define RFD_ERR_FTS 0x0080 /* Frame to short */ +#define RFD_ERR_NEOP 0x0040 /* No EOP flag (for bitstuffing only) */ +#define RFD_ERR_TRUN 0x0020 /* (82596 only/SF mode) indicates truncated frame */ +#define RFD_MATCHADD 0x0002 /* status: Destinationaddress !matches IA (only 82596) */ +#define RFD_COLLDET 0x0001 /* Detected collision during reception */ + +/* + * Receive Buffer Descriptor (RBD) + */ +struct rbd_struct +{ + unsigned short status; /* status word,number of used bytes in buff */ + unsigned short next; /* pointeroffset to next RBD */ + char *buffer; /* receive buffer address pointer */ + unsigned short size; /* size of this buffer */ + unsigned short zero_dummy; /* dummy */ +}; + +#define RBD_LAST 0x8000 /* last buffer */ +#define RBD_USED 0x4000 /* this buffer has data */ +#define RBD_MASK 0x3fff /* size-mask for length */ + +/* + * Statusvalues for Commands/RFD + */ +#define STAT_COMPL 0x8000 /* status: frame/command is complete */ +#define STAT_BUSY 0x4000 /* status: frame/command is busy */ +#define STAT_OK 0x2000 /* status: frame/command is ok */ + +/* + * Action-Commands + */ +#define CMD_NOP 0x0000 /* NOP */ +#define CMD_IASETUP 0x0001 /* initial address setup command */ +#define CMD_CONFIGURE 0x0002 /* configure command */ +#define CMD_MCSETUP 0x0003 /* MC setup command */ +#define CMD_XMIT 0x0004 /* transmit command */ +#define CMD_TDR 0x0005 /* time domain reflectometer (TDR) command */ +#define CMD_DUMP 0x0006 /* dump command */ +#define CMD_DIAGNOSE 0x0007 /* diagnose command */ + +/* + * Action command bits + */ +#define CMD_LAST 0x8000 /* indicates last command in the CBL */ +#define CMD_SUSPEND 0x4000 /* suspend CU after this CB */ +#define CMD_INT 0x2000 /* generate interrupt after execution */ + +/* + * NOP - command + */ +struct nop_cmd_struct +{ + unsigned short cmd_status; /* status of this command */ + unsigned short cmd_cmd; /* the command itself (+bits) */ + unsigned short cmd_link; /* offsetpointer to next command */ +}; + +/* + * IA Setup command + */ +struct iasetup_cmd_struct +{ + unsigned short cmd_status; + unsigned short cmd_cmd; + unsigned short cmd_link; + unsigned char iaddr[6]; +}; + +/* + * Configure command + */ +struct configure_cmd_struct +{ + unsigned short cmd_status; + unsigned short cmd_cmd; + unsigned short cmd_link; + unsigned char byte_cnt; /* size of the config-cmd */ + unsigned char fifo; /* fifo/recv monitor */ + unsigned char sav_bf; /* save bad frames (bit7=1)*/ + unsigned char adr_len; /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/ + unsigned char priority; /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */ + unsigned char ifs; /* inter frame spacing */ + unsigned char time_low; /* slot time low */ + unsigned char time_high; /* slot time high(0-2) and max. retries(4-7) */ + unsigned char promisc; /* promisc-mode(0) , et al (1-7) */ + unsigned char carr_coll; /* carrier(0-3)/collision(4-7) stuff */ + unsigned char fram_len; /* minimal frame len */ + unsigned char dummy; /* dummy */ +}; + +/* + * Multicast Setup command + */ +struct mcsetup_cmd_struct +{ + unsigned short cmd_status; + unsigned short cmd_cmd; + unsigned short cmd_link; + unsigned short mc_cnt; /* number of bytes in the MC-List */ + unsigned char mc_list[0][6]; /* pointer to 6 bytes entries */ +}; + +/* + * DUMP command + */ +struct dump_cmd_struct +{ + unsigned short cmd_status; + unsigned short cmd_cmd; + unsigned short cmd_link; + unsigned short dump_offset; /* pointeroffset to DUMP space */ +}; + +/* + * transmit command + */ +struct transmit_cmd_struct +{ + unsigned short cmd_status; + unsigned short cmd_cmd; + unsigned short cmd_link; + unsigned short tbd_offset; /* pointeroffset to TBD */ + unsigned char dest[6]; /* destination address of the frame */ + unsigned short length; /* user defined: 802.3 length / Ether type */ +}; + +#define TCMD_ERRMASK 0x0fa0 +#define TCMD_MAXCOLLMASK 0x000f +#define TCMD_MAXCOLL 0x0020 +#define TCMD_HEARTBEAT 0x0040 +#define TCMD_DEFERRED 0x0080 +#define TCMD_UNDERRUN 0x0100 +#define TCMD_LOSTCTS 0x0200 +#define TCMD_NOCARRIER 0x0400 +#define TCMD_LATECOLL 0x0800 + +struct tdr_cmd_struct +{ + unsigned short cmd_status; + unsigned short cmd_cmd; + unsigned short cmd_link; + unsigned short status; +}; + +#define TDR_LNK_OK 0x8000 /* No link problem identified */ +#define TDR_XCVR_PRB 0x4000 /* indicates a transceiver problem */ +#define TDR_ET_OPN 0x2000 /* open, no correct termination */ +#define TDR_ET_SRT 0x1000 /* TDR detected a short circuit */ +#define TDR_TIMEMASK 0x07ff /* mask for the time field */ + +/* + * Transmit Buffer Descriptor (TBD) + */ +struct tbd_struct +{ + unsigned short size; /* size + EOF-Flag(15) */ + unsigned short next; /* pointeroffset to next TBD */ + char *buffer; /* pointer to buffer */ +}; + +#define TBD_LAST 0x8000 /* EOF-Flag, indicates last buffer in list */ + + + + diff --git a/drivers/net/ethernet/i825xx/znet.c b/drivers/net/ethernet/i825xx/znet.c new file mode 100644 index 0000000..8b88817 --- /dev/null +++ b/drivers/net/ethernet/i825xx/znet.c @@ -0,0 +1,924 @@ +/* znet.c: An Zenith Z-Note ethernet driver for linux. */ + +/* + Written by Donald Becker. + + The author may be reached as becker@scyld.com. + This driver is based on the Linux skeleton driver. The copyright of the + skeleton driver is held by the United States Government, as represented + by DIRNSA, and it is released under the GPL. + + Thanks to Mike Hollick for alpha testing and suggestions. + + References: + The Crynwr packet driver. + + "82593 CSMA/CD Core LAN Controller" Intel datasheet, 1992 + Intel Microcommunications Databook, Vol. 1, 1990. + As usual with Intel, the documentation is incomplete and inaccurate. + I had to read the Crynwr packet driver to figure out how to actually + use the i82593, and guess at what register bits matched the loosely + related i82586. + + Theory of Operation + + The i82593 used in the Zenith Z-Note series operates using two(!) slave + DMA channels, one interrupt, and one 8-bit I/O port. + + While there several ways to configure '593 DMA system, I chose the one + that seemed commensurate with the highest system performance in the face + of moderate interrupt latency: Both DMA channels are configured as + recirculating ring buffers, with one channel (#0) dedicated to Rx and + the other channel (#1) to Tx and configuration. (Note that this is + different than the Crynwr driver, where the Tx DMA channel is initialized + before each operation. That approach simplifies operation and Tx error + recovery, but requires additional I/O in normal operation and precludes + transmit buffer chaining.) + + Both rings are set to 8192 bytes using {TX,RX}_RING_SIZE. This provides + a reasonable ring size for Rx, while simplifying DMA buffer allocation -- + DMA buffers must not cross a 128K boundary. (In truth the size selection + was influenced by my lack of '593 documentation. I thus was constrained + to use the Crynwr '593 initialization table, which sets the Rx ring size + to 8K.) + + Despite my usual low opinion about Intel-designed parts, I must admit + that the bulk data handling of the i82593 is a good design for + an integrated system, like a laptop, where using two slave DMA channels + doesn't pose a problem. I still take issue with using only a single I/O + port. In the same controlled environment there are essentially no + limitations on I/O space, and using multiple locations would eliminate + the need for multiple operations when looking at status registers, + setting the Rx ring boundary, or switching to promiscuous mode. + + I also question Zenith's selection of the '593: one of the advertised + advantages of earlier Intel parts was that if you figured out the magic + initialization incantation you could use the same part on many different + network types. Zenith's use of the "FriendlyNet" (sic) connector rather + than an on-board transceiver leads me to believe that they were planning + to take advantage of this. But, uhmmm, the '593 omits all but ethernet + functionality from the serial subsystem. + */ + +/* 10/2002 + + o Resurected for Linux 2.5+ by Marc Zyngier : + + - Removed strange DMA snooping in znet_sent_packet, which lead to + TX buffer corruption on my laptop. + - Use init_etherdev stuff. + - Use kmalloc-ed DMA buffers. + - Use as few global variables as possible. + - Use proper resources management. + - Use wireless/i82593.h as much as possible (structure, constants) + - Compiles as module or build-in. + - Now survives unplugging/replugging cable. + + Some code was taken from wavelan_cs. + + Tested on a vintage Zenith Z-Note 433Lnp+. Probably broken on + anything else. Testers (and detailed bug reports) are welcome :-). + + o TODO : + + - Properly handle multicast + - Understand why some traffic patterns add a 1s latency... + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +static char version[] __initdata = "znet.c:v1.02 9/23/94 becker@scyld.com\n"; + +#ifndef ZNET_DEBUG +#define ZNET_DEBUG 1 +#endif +static unsigned int znet_debug = ZNET_DEBUG; +module_param (znet_debug, int, 0); +MODULE_PARM_DESC (znet_debug, "ZNet debug level"); +MODULE_LICENSE("GPL"); + +/* The DMA modes we need aren't in . */ +#define DMA_RX_MODE 0x14 /* Auto init, I/O to mem, ++, demand. */ +#define DMA_TX_MODE 0x18 /* Auto init, Mem to I/O, ++, demand. */ +#define dma_page_eq(ptr1, ptr2) ((long)(ptr1)>>17 == (long)(ptr2)>>17) +#define RX_BUF_SIZE 8192 +#define TX_BUF_SIZE 8192 +#define DMA_BUF_SIZE (RX_BUF_SIZE + 16) /* 8k + 16 bytes for trailers */ + +#define TX_TIMEOUT (HZ/10) + +struct znet_private { + int rx_dma, tx_dma; + spinlock_t lock; + short sia_base, sia_size, io_size; + struct i82593_conf_block i593_init; + /* The starting, current, and end pointers for the packet buffers. */ + ushort *rx_start, *rx_cur, *rx_end; + ushort *tx_start, *tx_cur, *tx_end; + ushort tx_buf_len; /* Tx buffer length, in words. */ +}; + +/* Only one can be built-in;-> */ +static struct net_device *znet_dev; + +struct netidblk { + char magic[8]; /* The magic number (string) "NETIDBLK" */ + unsigned char netid[8]; /* The physical station address */ + char nettype, globalopt; + char vendor[8]; /* The machine vendor and product name. */ + char product[8]; + char irq1, irq2; /* Interrupts, only one is currently used. */ + char dma1, dma2; + short dma_mem_misc[8]; /* DMA buffer locations (unused in Linux). */ + short iobase1, iosize1; + short iobase2, iosize2; /* Second iobase unused. */ + char driver_options; /* Misc. bits */ + char pad; +}; + +static int znet_open(struct net_device *dev); +static netdev_tx_t znet_send_packet(struct sk_buff *skb, + struct net_device *dev); +static irqreturn_t znet_interrupt(int irq, void *dev_id); +static void znet_rx(struct net_device *dev); +static int znet_close(struct net_device *dev); +static void hardware_init(struct net_device *dev); +static void update_stop_hit(short ioaddr, unsigned short rx_stop_offset); +static void znet_tx_timeout (struct net_device *dev); + +/* Request needed resources */ +static int znet_request_resources (struct net_device *dev) +{ + struct znet_private *znet = netdev_priv(dev); + + if (request_irq (dev->irq, znet_interrupt, 0, "ZNet", dev)) + goto failed; + if (request_dma (znet->rx_dma, "ZNet rx")) + goto free_irq; + if (request_dma (znet->tx_dma, "ZNet tx")) + goto free_rx_dma; + if (!request_region (znet->sia_base, znet->sia_size, "ZNet SIA")) + goto free_tx_dma; + if (!request_region (dev->base_addr, znet->io_size, "ZNet I/O")) + goto free_sia; + + return 0; /* Happy ! */ + + free_sia: + release_region (znet->sia_base, znet->sia_size); + free_tx_dma: + free_dma (znet->tx_dma); + free_rx_dma: + free_dma (znet->rx_dma); + free_irq: + free_irq (dev->irq, dev); + failed: + return -1; +} + +static void znet_release_resources (struct net_device *dev) +{ + struct znet_private *znet = netdev_priv(dev); + + release_region (znet->sia_base, znet->sia_size); + release_region (dev->base_addr, znet->io_size); + free_dma (znet->tx_dma); + free_dma (znet->rx_dma); + free_irq (dev->irq, dev); +} + +/* Keep the magical SIA stuff in a single function... */ +static void znet_transceiver_power (struct net_device *dev, int on) +{ + struct znet_private *znet = netdev_priv(dev); + unsigned char v; + + /* Turn on/off the 82501 SIA, using zenith-specific magic. */ + /* Select LAN control register */ + outb(0x10, znet->sia_base); + + if (on) + v = inb(znet->sia_base + 1) | 0x84; + else + v = inb(znet->sia_base + 1) & ~0x84; + + outb(v, znet->sia_base+1); /* Turn on/off LAN power (bit 2). */ +} + +/* Init the i82593, with current promisc/mcast configuration. + Also used from hardware_init. */ +static void znet_set_multicast_list (struct net_device *dev) +{ + struct znet_private *znet = netdev_priv(dev); + short ioaddr = dev->base_addr; + struct i82593_conf_block *cfblk = &znet->i593_init; + + memset(cfblk, 0x00, sizeof(struct i82593_conf_block)); + + /* The configuration block. What an undocumented nightmare. + The first set of values are those suggested (without explanation) + for ethernet in the Intel 82586 databook. The rest appear to be + completely undocumented, except for cryptic notes in the Crynwr + packet driver. This driver uses the Crynwr values verbatim. */ + + /* maz : Rewritten to take advantage of the wanvelan includes. + At least we have names, not just blind values */ + + /* Byte 0 */ + cfblk->fifo_limit = 10; /* = 16 B rx and 80 B tx fifo thresholds */ + cfblk->forgnesi = 0; /* 0=82C501, 1=AMD7992B compatibility */ + cfblk->fifo_32 = 1; + cfblk->d6mod = 0; /* Run in i82593 advanced mode */ + cfblk->throttle_enb = 1; + + /* Byte 1 */ + cfblk->throttle = 8; /* Continuous w/interrupts, 128-clock DMA. */ + cfblk->cntrxint = 0; /* enable continuous mode receive interrupts */ + cfblk->contin = 1; /* enable continuous mode */ + + /* Byte 2 */ + cfblk->addr_len = ETH_ALEN; + cfblk->acloc = 1; /* Disable source addr insertion by i82593 */ + cfblk->preamb_len = 2; /* 8 bytes preamble */ + cfblk->loopback = 0; /* Loopback off */ + + /* Byte 3 */ + cfblk->lin_prio = 0; /* Default priorities & backoff methods. */ + cfblk->tbofstop = 0; + cfblk->exp_prio = 0; + cfblk->bof_met = 0; + + /* Byte 4 */ + cfblk->ifrm_spc = 6; /* 96 bit times interframe spacing */ + + /* Byte 5 */ + cfblk->slottim_low = 0; /* 512 bit times slot time (low) */ + + /* Byte 6 */ + cfblk->slottim_hi = 2; /* 512 bit times slot time (high) */ + cfblk->max_retr = 15; /* 15 collisions retries */ + + /* Byte 7 */ + cfblk->prmisc = ((dev->flags & IFF_PROMISC) ? 1 : 0); /* Promiscuous mode */ + cfblk->bc_dis = 0; /* Enable broadcast reception */ + cfblk->crs_1 = 0; /* Don't transmit without carrier sense */ + cfblk->nocrc_ins = 0; /* i82593 generates CRC */ + cfblk->crc_1632 = 0; /* 32-bit Autodin-II CRC */ + cfblk->crs_cdt = 0; /* CD not to be interpreted as CS */ + + /* Byte 8 */ + cfblk->cs_filter = 0; /* CS is recognized immediately */ + cfblk->crs_src = 0; /* External carrier sense */ + cfblk->cd_filter = 0; /* CD is recognized immediately */ + + /* Byte 9 */ + cfblk->min_fr_len = ETH_ZLEN >> 2; /* Minimum frame length */ + + /* Byte A */ + cfblk->lng_typ = 1; /* Type/length checks OFF */ + cfblk->lng_fld = 1; /* Disable 802.3 length field check */ + cfblk->rxcrc_xf = 1; /* Don't transfer CRC to memory */ + cfblk->artx = 1; /* Disable automatic retransmission */ + cfblk->sarec = 1; /* Disable source addr trig of CD */ + cfblk->tx_jabber = 0; /* Disable jabber jam sequence */ + cfblk->hash_1 = 1; /* Use bits 0-5 in mc address hash */ + cfblk->lbpkpol = 0; /* Loopback pin active high */ + + /* Byte B */ + cfblk->fdx = 0; /* Disable full duplex operation */ + + /* Byte C */ + cfblk->dummy_6 = 0x3f; /* all ones, Default multicast addresses & backoff. */ + cfblk->mult_ia = 0; /* No multiple individual addresses */ + cfblk->dis_bof = 0; /* Disable the backoff algorithm ?! */ + + /* Byte D */ + cfblk->dummy_1 = 1; /* set to 1 */ + cfblk->tx_ifs_retrig = 3; /* Hmm... Disabled */ + cfblk->mc_all = (!netdev_mc_empty(dev) || + (dev->flags & IFF_ALLMULTI)); /* multicast all mode */ + cfblk->rcv_mon = 0; /* Monitor mode disabled */ + cfblk->frag_acpt = 0; /* Do not accept fragments */ + cfblk->tstrttrs = 0; /* No start transmission threshold */ + + /* Byte E */ + cfblk->fretx = 1; /* FIFO automatic retransmission */ + cfblk->runt_eop = 0; /* drop "runt" packets */ + cfblk->hw_sw_pin = 0; /* ?? */ + cfblk->big_endn = 0; /* Big Endian ? no... */ + cfblk->syncrqs = 1; /* Synchronous DRQ deassertion... */ + cfblk->sttlen = 1; /* 6 byte status registers */ + cfblk->rx_eop = 0; /* Signal EOP on packet reception */ + cfblk->tx_eop = 0; /* Signal EOP on packet transmission */ + + /* Byte F */ + cfblk->rbuf_size = RX_BUF_SIZE >> 12; /* Set receive buffer size */ + cfblk->rcvstop = 1; /* Enable Receive Stop Register */ + + if (znet_debug > 2) { + int i; + unsigned char *c; + + for (i = 0, c = (char *) cfblk; i < sizeof (*cfblk); i++) + printk ("%02X ", c[i]); + printk ("\n"); + } + + *znet->tx_cur++ = sizeof(struct i82593_conf_block); + memcpy(znet->tx_cur, cfblk, sizeof(struct i82593_conf_block)); + znet->tx_cur += sizeof(struct i82593_conf_block)/2; + outb(OP0_CONFIGURE | CR0_CHNL, ioaddr); + + /* XXX FIXME maz : Add multicast addresses here, so having a + * multicast address configured isn't equal to IFF_ALLMULTI */ +} + +static const struct net_device_ops znet_netdev_ops = { + .ndo_open = znet_open, + .ndo_stop = znet_close, + .ndo_start_xmit = znet_send_packet, + .ndo_set_multicast_list = znet_set_multicast_list, + .ndo_tx_timeout = znet_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +/* The Z-Note probe is pretty easy. The NETIDBLK exists in the safe-to-probe + BIOS area. We just scan for the signature, and pull the vital parameters + out of the structure. */ + +static int __init znet_probe (void) +{ + int i; + struct netidblk *netinfo; + struct znet_private *znet; + struct net_device *dev; + char *p; + int err = -ENOMEM; + + /* This code scans the region 0xf0000 to 0xfffff for a "NETIDBLK". */ + for(p = (char *)phys_to_virt(0xf0000); p < (char *)phys_to_virt(0x100000); p++) + if (*p == 'N' && strncmp(p, "NETIDBLK", 8) == 0) + break; + + if (p >= (char *)phys_to_virt(0x100000)) { + if (znet_debug > 1) + printk(KERN_INFO "No Z-Note ethernet adaptor found.\n"); + return -ENODEV; + } + + dev = alloc_etherdev(sizeof(struct znet_private)); + if (!dev) + return -ENOMEM; + + znet = netdev_priv(dev); + + netinfo = (struct netidblk *)p; + dev->base_addr = netinfo->iobase1; + dev->irq = netinfo->irq1; + + /* The station address is in the "netidblk" at 0x0f0000. */ + for (i = 0; i < 6; i++) + dev->dev_addr[i] = netinfo->netid[i]; + + printk(KERN_INFO "%s: ZNET at %#3lx, %pM" + ", using IRQ %d DMA %d and %d.\n", + dev->name, dev->base_addr, dev->dev_addr, + dev->irq, netinfo->dma1, netinfo->dma2); + + if (znet_debug > 1) { + printk(KERN_INFO "%s: vendor '%16.16s' IRQ1 %d IRQ2 %d DMA1 %d DMA2 %d.\n", + dev->name, netinfo->vendor, + netinfo->irq1, netinfo->irq2, + netinfo->dma1, netinfo->dma2); + printk(KERN_INFO "%s: iobase1 %#x size %d iobase2 %#x size %d net type %2.2x.\n", + dev->name, netinfo->iobase1, netinfo->iosize1, + netinfo->iobase2, netinfo->iosize2, netinfo->nettype); + } + + if (znet_debug > 0) + printk(KERN_INFO "%s", version); + + znet->rx_dma = netinfo->dma1; + znet->tx_dma = netinfo->dma2; + spin_lock_init(&znet->lock); + znet->sia_base = 0xe6; /* Magic address for the 82501 SIA */ + znet->sia_size = 2; + /* maz: Despite the '593 being advertised above as using a + * single 8bits I/O port, this driver does many 16bits + * access. So set io_size accordingly */ + znet->io_size = 2; + + if (!(znet->rx_start = kmalloc (DMA_BUF_SIZE, GFP_KERNEL | GFP_DMA))) + goto free_dev; + if (!(znet->tx_start = kmalloc (DMA_BUF_SIZE, GFP_KERNEL | GFP_DMA))) + goto free_rx; + + if (!dma_page_eq (znet->rx_start, znet->rx_start + (RX_BUF_SIZE/2-1)) || + !dma_page_eq (znet->tx_start, znet->tx_start + (TX_BUF_SIZE/2-1))) { + printk (KERN_WARNING "tx/rx crossing DMA frontiers, giving up\n"); + goto free_tx; + } + + znet->rx_end = znet->rx_start + RX_BUF_SIZE/2; + znet->tx_buf_len = TX_BUF_SIZE/2; + znet->tx_end = znet->tx_start + znet->tx_buf_len; + + /* The ZNET-specific entries in the device structure. */ + dev->netdev_ops = &znet_netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + err = register_netdev(dev); + if (err) + goto free_tx; + znet_dev = dev; + return 0; + + free_tx: + kfree(znet->tx_start); + free_rx: + kfree(znet->rx_start); + free_dev: + free_netdev(dev); + return err; +} + + +static int znet_open(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + + if (znet_debug > 2) + printk(KERN_DEBUG "%s: znet_open() called.\n", dev->name); + + /* These should never fail. You can't add devices to a sealed box! */ + if (znet_request_resources (dev)) { + printk(KERN_WARNING "%s: Not opened -- resource busy?!?\n", dev->name); + return -EBUSY; + } + + znet_transceiver_power (dev, 1); + + /* According to the Crynwr driver we should wait 50 msec. for the + LAN clock to stabilize. My experiments indicates that the '593 can + be initialized immediately. The delay is probably needed for the + DC-to-DC converter to come up to full voltage, and for the oscillator + to be spot-on at 20Mhz before transmitting. + Until this proves to be a problem we rely on the higher layers for the + delay and save allocating a timer entry. */ + + /* maz : Well, I'm getting every time the following message + * without the delay on a 486@33. This machine is much too + * fast... :-) So maybe the Crynwr driver wasn't wrong after + * all, even if the message is completly harmless on my + * setup. */ + mdelay (50); + + /* This follows the packet driver's lead, and checks for success. */ + if (inb(ioaddr) != 0x10 && inb(ioaddr) != 0x00) + printk(KERN_WARNING "%s: Problem turning on the transceiver power.\n", + dev->name); + + hardware_init(dev); + netif_start_queue (dev); + + return 0; +} + + +static void znet_tx_timeout (struct net_device *dev) +{ + int ioaddr = dev->base_addr; + ushort event, tx_status, rx_offset, state; + + outb (CR0_STATUS_0, ioaddr); + event = inb (ioaddr); + outb (CR0_STATUS_1, ioaddr); + tx_status = inw (ioaddr); + outb (CR0_STATUS_2, ioaddr); + rx_offset = inw (ioaddr); + outb (CR0_STATUS_3, ioaddr); + state = inb (ioaddr); + printk (KERN_WARNING "%s: transmit timed out, status %02x %04x %04x %02x," + " resetting.\n", dev->name, event, tx_status, rx_offset, state); + if (tx_status == TX_LOST_CRS) + printk (KERN_WARNING "%s: Tx carrier error, check transceiver cable.\n", + dev->name); + outb (OP0_RESET, ioaddr); + hardware_init (dev); + netif_wake_queue (dev); +} + +static netdev_tx_t znet_send_packet(struct sk_buff *skb, struct net_device *dev) +{ + int ioaddr = dev->base_addr; + struct znet_private *znet = netdev_priv(dev); + unsigned long flags; + short length = skb->len; + + if (znet_debug > 4) + printk(KERN_DEBUG "%s: ZNet_send_packet.\n", dev->name); + + if (length < ETH_ZLEN) { + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + length = ETH_ZLEN; + } + + netif_stop_queue (dev); + + /* Check that the part hasn't reset itself, probably from suspend. */ + outb(CR0_STATUS_0, ioaddr); + if (inw(ioaddr) == 0x0010 && + inw(ioaddr) == 0x0000 && + inw(ioaddr) == 0x0010) { + if (znet_debug > 1) + printk (KERN_WARNING "%s : waking up\n", dev->name); + hardware_init(dev); + znet_transceiver_power (dev, 1); + } + + if (1) { + unsigned char *buf = (void *)skb->data; + ushort *tx_link = znet->tx_cur - 1; + ushort rnd_len = (length + 1)>>1; + + dev->stats.tx_bytes+=length; + + if (znet->tx_cur >= znet->tx_end) + znet->tx_cur = znet->tx_start; + *znet->tx_cur++ = length; + if (znet->tx_cur + rnd_len + 1 > znet->tx_end) { + int semi_cnt = (znet->tx_end - znet->tx_cur)<<1; /* Cvrt to byte cnt. */ + memcpy(znet->tx_cur, buf, semi_cnt); + rnd_len -= semi_cnt>>1; + memcpy(znet->tx_start, buf + semi_cnt, length - semi_cnt); + znet->tx_cur = znet->tx_start + rnd_len; + } else { + memcpy(znet->tx_cur, buf, skb->len); + znet->tx_cur += rnd_len; + } + *znet->tx_cur++ = 0; + + spin_lock_irqsave(&znet->lock, flags); + { + *tx_link = OP0_TRANSMIT | CR0_CHNL; + /* Is this always safe to do? */ + outb(OP0_TRANSMIT | CR0_CHNL, ioaddr); + } + spin_unlock_irqrestore (&znet->lock, flags); + + netif_start_queue (dev); + + if (znet_debug > 4) + printk(KERN_DEBUG "%s: Transmitter queued, length %d.\n", dev->name, length); + } + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +/* The ZNET interrupt handler. */ +static irqreturn_t znet_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct znet_private *znet = netdev_priv(dev); + int ioaddr; + int boguscnt = 20; + int handled = 0; + + spin_lock (&znet->lock); + + ioaddr = dev->base_addr; + + outb(CR0_STATUS_0, ioaddr); + do { + ushort status = inb(ioaddr); + if (znet_debug > 5) { + ushort result, rx_ptr, running; + outb(CR0_STATUS_1, ioaddr); + result = inw(ioaddr); + outb(CR0_STATUS_2, ioaddr); + rx_ptr = inw(ioaddr); + outb(CR0_STATUS_3, ioaddr); + running = inb(ioaddr); + printk(KERN_DEBUG "%s: interrupt, status %02x, %04x %04x %02x serial %d.\n", + dev->name, status, result, rx_ptr, running, boguscnt); + } + if ((status & SR0_INTERRUPT) == 0) + break; + + handled = 1; + + if ((status & SR0_EVENT_MASK) == SR0_TRANSMIT_DONE || + (status & SR0_EVENT_MASK) == SR0_RETRANSMIT_DONE || + (status & SR0_EVENT_MASK) == SR0_TRANSMIT_NO_CRC_DONE) { + int tx_status; + outb(CR0_STATUS_1, ioaddr); + tx_status = inw(ioaddr); + /* It's undocumented, but tx_status seems to match the i82586. */ + if (tx_status & TX_OK) { + dev->stats.tx_packets++; + dev->stats.collisions += tx_status & TX_NCOL_MASK; + } else { + if (tx_status & (TX_LOST_CTS | TX_LOST_CRS)) + dev->stats.tx_carrier_errors++; + if (tx_status & TX_UND_RUN) + dev->stats.tx_fifo_errors++; + if (!(tx_status & TX_HRT_BEAT)) + dev->stats.tx_heartbeat_errors++; + if (tx_status & TX_MAX_COL) + dev->stats.tx_aborted_errors++; + /* ...and the catch-all. */ + if ((tx_status | (TX_LOST_CRS | TX_LOST_CTS | TX_UND_RUN | TX_HRT_BEAT | TX_MAX_COL)) != (TX_LOST_CRS | TX_LOST_CTS | TX_UND_RUN | TX_HRT_BEAT | TX_MAX_COL)) + dev->stats.tx_errors++; + + /* Transceiver may be stuck if cable + * was removed while emitting a + * packet. Flip it off, then on to + * reset it. This is very empirical, + * but it seems to work. */ + + znet_transceiver_power (dev, 0); + znet_transceiver_power (dev, 1); + } + netif_wake_queue (dev); + } + + if ((status & SR0_RECEPTION) || + (status & SR0_EVENT_MASK) == SR0_STOP_REG_HIT) { + znet_rx(dev); + } + /* Clear the interrupts we've handled. */ + outb(CR0_INT_ACK, ioaddr); + } while (boguscnt--); + + spin_unlock (&znet->lock); + + return IRQ_RETVAL(handled); +} + +static void znet_rx(struct net_device *dev) +{ + struct znet_private *znet = netdev_priv(dev); + int ioaddr = dev->base_addr; + int boguscount = 1; + short next_frame_end_offset = 0; /* Offset of next frame start. */ + short *cur_frame_end; + short cur_frame_end_offset; + + outb(CR0_STATUS_2, ioaddr); + cur_frame_end_offset = inw(ioaddr); + + if (cur_frame_end_offset == znet->rx_cur - znet->rx_start) { + printk(KERN_WARNING "%s: Interrupted, but nothing to receive, offset %03x.\n", + dev->name, cur_frame_end_offset); + return; + } + + /* Use same method as the Crynwr driver: construct a forward list in + the same area of the backwards links we now have. This allows us to + pass packets to the upper layers in the order they were received -- + important for fast-path sequential operations. */ + while (znet->rx_start + cur_frame_end_offset != znet->rx_cur && + ++boguscount < 5) { + unsigned short hi_cnt, lo_cnt, hi_status, lo_status; + int count, status; + + if (cur_frame_end_offset < 4) { + /* Oh no, we have a special case: the frame trailer wraps around + the end of the ring buffer. We've saved space at the end of + the ring buffer for just this problem. */ + memcpy(znet->rx_end, znet->rx_start, 8); + cur_frame_end_offset += (RX_BUF_SIZE/2); + } + cur_frame_end = znet->rx_start + cur_frame_end_offset - 4; + + lo_status = *cur_frame_end++; + hi_status = *cur_frame_end++; + status = ((hi_status & 0xff) << 8) + (lo_status & 0xff); + lo_cnt = *cur_frame_end++; + hi_cnt = *cur_frame_end++; + count = ((hi_cnt & 0xff) << 8) + (lo_cnt & 0xff); + + if (znet_debug > 5) + printk(KERN_DEBUG "Constructing trailer at location %03x, %04x %04x %04x %04x" + " count %#x status %04x.\n", + cur_frame_end_offset<<1, lo_status, hi_status, lo_cnt, hi_cnt, + count, status); + cur_frame_end[-4] = status; + cur_frame_end[-3] = next_frame_end_offset; + cur_frame_end[-2] = count; + next_frame_end_offset = cur_frame_end_offset; + cur_frame_end_offset -= ((count + 1)>>1) + 3; + if (cur_frame_end_offset < 0) + cur_frame_end_offset += RX_BUF_SIZE/2; + } + + /* Now step forward through the list. */ + do { + ushort *this_rfp_ptr = znet->rx_start + next_frame_end_offset; + int status = this_rfp_ptr[-4]; + int pkt_len = this_rfp_ptr[-2]; + + if (znet_debug > 5) + printk(KERN_DEBUG "Looking at trailer ending at %04x status %04x length %03x" + " next %04x.\n", next_frame_end_offset<<1, status, pkt_len, + this_rfp_ptr[-3]<<1); + /* Once again we must assume that the i82586 docs apply. */ + if ( ! (status & RX_RCV_OK)) { /* There was an error. */ + dev->stats.rx_errors++; + if (status & RX_CRC_ERR) dev->stats.rx_crc_errors++; + if (status & RX_ALG_ERR) dev->stats.rx_frame_errors++; +#if 0 + if (status & 0x0200) dev->stats.rx_over_errors++; /* Wrong. */ + if (status & 0x0100) dev->stats.rx_fifo_errors++; +#else + /* maz : Wild guess... */ + if (status & RX_OVRRUN) dev->stats.rx_over_errors++; +#endif + if (status & RX_SRT_FRM) dev->stats.rx_length_errors++; + } else if (pkt_len > 1536) { + dev->stats.rx_length_errors++; + } else { + /* Malloc up new buffer. */ + struct sk_buff *skb; + + skb = dev_alloc_skb(pkt_len); + if (skb == NULL) { + if (znet_debug) + printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name); + dev->stats.rx_dropped++; + break; + } + + if (&znet->rx_cur[(pkt_len+1)>>1] > znet->rx_end) { + int semi_cnt = (znet->rx_end - znet->rx_cur)<<1; + memcpy(skb_put(skb,semi_cnt), znet->rx_cur, semi_cnt); + memcpy(skb_put(skb,pkt_len-semi_cnt), znet->rx_start, + pkt_len - semi_cnt); + } else { + memcpy(skb_put(skb,pkt_len), znet->rx_cur, pkt_len); + if (znet_debug > 6) { + unsigned int *packet = (unsigned int *) skb->data; + printk(KERN_DEBUG "Packet data is %08x %08x %08x %08x.\n", packet[0], + packet[1], packet[2], packet[3]); + } + } + skb->protocol=eth_type_trans(skb,dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + } + znet->rx_cur = this_rfp_ptr; + if (znet->rx_cur >= znet->rx_end) + znet->rx_cur -= RX_BUF_SIZE/2; + update_stop_hit(ioaddr, (znet->rx_cur - znet->rx_start)<<1); + next_frame_end_offset = this_rfp_ptr[-3]; + if (next_frame_end_offset == 0) /* Read all the frames? */ + break; /* Done for now */ + this_rfp_ptr = znet->rx_start + next_frame_end_offset; + } while (--boguscount); + + /* If any worth-while packets have been received, dev_rint() + has done a mark_bh(INET_BH) for us and will work on them + when we get to the bottom-half routine. */ +} + +/* The inverse routine to znet_open(). */ +static int znet_close(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + + netif_stop_queue (dev); + + outb(OP0_RESET, ioaddr); /* CMD0_RESET */ + + if (znet_debug > 1) + printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name); + /* Turn off transceiver power. */ + znet_transceiver_power (dev, 0); + + znet_release_resources (dev); + + return 0; +} + +static void show_dma(struct net_device *dev) +{ + short ioaddr = dev->base_addr; + unsigned char stat = inb (ioaddr); + struct znet_private *znet = netdev_priv(dev); + unsigned long flags; + short dma_port = ((znet->tx_dma&3)<<2) + IO_DMA2_BASE; + unsigned addr = inb(dma_port); + short residue; + + addr |= inb(dma_port) << 8; + residue = get_dma_residue(znet->tx_dma); + + if (znet_debug > 1) { + flags=claim_dma_lock(); + printk(KERN_DEBUG "Stat:%02x Addr: %04x cnt:%3x\n", + stat, addr<<1, residue); + release_dma_lock(flags); + } +} + +/* Initialize the hardware. We have to do this when the board is open()ed + or when we come out of suspend mode. */ +static void hardware_init(struct net_device *dev) +{ + unsigned long flags; + short ioaddr = dev->base_addr; + struct znet_private *znet = netdev_priv(dev); + + znet->rx_cur = znet->rx_start; + znet->tx_cur = znet->tx_start; + + /* Reset the chip, and start it up. */ + outb(OP0_RESET, ioaddr); + + flags=claim_dma_lock(); + disable_dma(znet->rx_dma); /* reset by an interrupting task. */ + clear_dma_ff(znet->rx_dma); + set_dma_mode(znet->rx_dma, DMA_RX_MODE); + set_dma_addr(znet->rx_dma, (unsigned int) znet->rx_start); + set_dma_count(znet->rx_dma, RX_BUF_SIZE); + enable_dma(znet->rx_dma); + /* Now set up the Tx channel. */ + disable_dma(znet->tx_dma); + clear_dma_ff(znet->tx_dma); + set_dma_mode(znet->tx_dma, DMA_TX_MODE); + set_dma_addr(znet->tx_dma, (unsigned int) znet->tx_start); + set_dma_count(znet->tx_dma, znet->tx_buf_len<<1); + enable_dma(znet->tx_dma); + release_dma_lock(flags); + + if (znet_debug > 1) + printk(KERN_DEBUG "%s: Initializing the i82593, rx buf %p tx buf %p\n", + dev->name, znet->rx_start,znet->tx_start); + /* Do an empty configure command, just like the Crynwr driver. This + resets to chip to its default values. */ + *znet->tx_cur++ = 0; + *znet->tx_cur++ = 0; + show_dma(dev); + outb(OP0_CONFIGURE | CR0_CHNL, ioaddr); + + znet_set_multicast_list (dev); + + *znet->tx_cur++ = 6; + memcpy(znet->tx_cur, dev->dev_addr, 6); + znet->tx_cur += 3; + show_dma(dev); + outb(OP0_IA_SETUP | CR0_CHNL, ioaddr); + show_dma(dev); + + update_stop_hit(ioaddr, 8192); + if (znet_debug > 1) printk(KERN_DEBUG "enabling Rx.\n"); + outb(OP0_RCV_ENABLE, ioaddr); + netif_start_queue (dev); +} + +static void update_stop_hit(short ioaddr, unsigned short rx_stop_offset) +{ + outb(OP0_SWIT_TO_PORT_1 | CR0_CHNL, ioaddr); + if (znet_debug > 5) + printk(KERN_DEBUG "Updating stop hit with value %02x.\n", + (rx_stop_offset >> 6) | CR1_STOP_REG_UPDATE); + outb((rx_stop_offset >> 6) | CR1_STOP_REG_UPDATE, ioaddr); + outb(OP1_SWIT_TO_PORT_0, ioaddr); +} + +static __exit void znet_cleanup (void) +{ + if (znet_dev) { + struct znet_private *znet = netdev_priv(znet_dev); + + unregister_netdev (znet_dev); + kfree (znet->rx_start); + kfree (znet->tx_start); + free_netdev (znet_dev); + } +} + +module_init (znet_probe); +module_exit (znet_cleanup); diff --git a/drivers/net/lasi_82596.c b/drivers/net/lasi_82596.c deleted file mode 100644 index 6eba352..0000000 --- a/drivers/net/lasi_82596.c +++ /dev/null @@ -1,238 +0,0 @@ -/* lasi_82596.c -- driver for the intel 82596 ethernet controller, as - munged into HPPA boxen . - - This driver is based upon 82596.c, original credits are below... - but there were too many hoops which HP wants jumped through to - keep this code in there in a sane manner. - - 3 primary sources of the mess -- - 1) hppa needs *lots* of cacheline flushing to keep this kind of - MMIO running. - - 2) The 82596 needs to see all of its pointers as their physical - address. Thus virt_to_bus/bus_to_virt are *everywhere*. - - 3) The implementation HP is using seems to be significantly pickier - about when and how the command and RX units are started. some - command ordering was changed. - - Examination of the mach driver leads one to believe that there - might be a saner way to pull this off... anyone who feels like a - full rewrite can be my guest. - - Split 02/13/2000 Sam Creasey (sammy@oh.verio.com) - - 02/01/2000 Initial modifications for parisc by Helge Deller (deller@gmx.de) - 03/02/2000 changes for better/correct(?) cache-flushing (deller) -*/ - -/* 82596.c: A generic 82596 ethernet driver for linux. */ -/* - Based on Apricot.c - Written 1994 by Mark Evans. - This driver is for the Apricot 82596 bus-master interface - - Modularised 12/94 Mark Evans - - - Modified to support the 82596 ethernet chips on 680x0 VME boards. - by Richard Hirst - Renamed to be 82596.c - - 980825: Changed to receive directly in to sk_buffs which are - allocated at open() time. Eliminates copy on incoming frames - (small ones are still copied). Shared data now held in a - non-cached page, so we can run on 68060 in copyback mode. - - TBD: - * look at deferring rx frames rather than discarding (as per tulip) - * handle tx ring full as per tulip - * performance test to tune rx_copybreak - - Most of my modifications relate to the braindead big-endian - implementation by Intel. When the i596 is operating in - 'big-endian' mode, it thinks a 32 bit value of 0x12345678 - should be stored as 0x56781234. This is a real pain, when - you have linked lists which are shared by the 680x0 and the - i596. - - Driver skeleton - Written 1993 by Donald Becker. - Copyright 1993 United States Government as represented by the Director, - National Security Agency. This software may only be used and distributed - according to the terms of the GNU General Public License as modified by SRC, - incorporated herein by reference. - - The author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403 - - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#define LASI_82596_DRIVER_VERSION "LASI 82596 driver - Revision: 1.30" - -#define PA_I82596_RESET 0 /* Offsets relative to LASI-LAN-Addr.*/ -#define PA_CPU_PORT_L_ACCESS 4 -#define PA_CHANNEL_ATTENTION 8 - -#define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */ - -#define DMA_ALLOC dma_alloc_noncoherent -#define DMA_FREE dma_free_noncoherent -#define DMA_WBACK(ndev, addr, len) \ - do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_TO_DEVICE); } while (0) - -#define DMA_INV(ndev, addr, len) \ - do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_FROM_DEVICE); } while (0) - -#define DMA_WBACK_INV(ndev, addr, len) \ - do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_BIDIRECTIONAL); } while (0) - -#define SYSBUS 0x0000006c; - -/* big endian CPU, 82596 "big" endian mode */ -#define SWAP32(x) (((u32)(x)<<16) | ((((u32)(x)))>>16)) -#define SWAP16(x) (x) - -#include "lib82596.c" - -MODULE_AUTHOR("Richard Hirst"); -MODULE_DESCRIPTION("i82596 driver"); -MODULE_LICENSE("GPL"); -module_param(i596_debug, int, 0); -MODULE_PARM_DESC(i596_debug, "lasi_82596 debug mask"); - -static inline void ca(struct net_device *dev) -{ - gsc_writel(0, dev->base_addr + PA_CHANNEL_ATTENTION); -} - - -static void mpu_port(struct net_device *dev, int c, dma_addr_t x) -{ - struct i596_private *lp = netdev_priv(dev); - - u32 v = (u32) (c) | (u32) (x); - u16 a, b; - - if (lp->options & OPT_SWAP_PORT) { - a = v >> 16; - b = v & 0xffff; - } else { - a = v & 0xffff; - b = v >> 16; - } - - gsc_writel(a, dev->base_addr + PA_CPU_PORT_L_ACCESS); - udelay(1); - gsc_writel(b, dev->base_addr + PA_CPU_PORT_L_ACCESS); -} - -#define LAN_PROM_ADDR 0xF0810000 - -static int __devinit -lan_init_chip(struct parisc_device *dev) -{ - struct net_device *netdevice; - struct i596_private *lp; - int retval; - int i; - - if (!dev->irq) { - printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n", - __FILE__, (unsigned long)dev->hpa.start); - return -ENODEV; - } - - printk(KERN_INFO "Found i82596 at 0x%lx, IRQ %d\n", - (unsigned long)dev->hpa.start, dev->irq); - - netdevice = alloc_etherdev(sizeof(struct i596_private)); - if (!netdevice) - return -ENOMEM; - SET_NETDEV_DEV(netdevice, &dev->dev); - parisc_set_drvdata (dev, netdevice); - - netdevice->base_addr = dev->hpa.start; - netdevice->irq = dev->irq; - - if (pdc_lan_station_id(netdevice->dev_addr, netdevice->base_addr)) { - for (i = 0; i < 6; i++) { - netdevice->dev_addr[i] = gsc_readb(LAN_PROM_ADDR + i); - } - printk(KERN_INFO - "%s: MAC of HP700 LAN read from EEPROM\n", __FILE__); - } - - lp = netdev_priv(netdevice); - lp->options = dev->id.sversion == 0x72 ? OPT_SWAP_PORT : 0; - - retval = i82596_probe(netdevice); - if (retval) { - free_netdev(netdevice); - return -ENODEV; - } - return retval; -} - -static int __devexit lan_remove_chip (struct parisc_device *pdev) -{ - struct net_device *dev = parisc_get_drvdata(pdev); - struct i596_private *lp = netdev_priv(dev); - - unregister_netdev (dev); - DMA_FREE(&pdev->dev, sizeof(struct i596_private), - (void *)lp->dma, lp->dma_addr); - free_netdev (dev); - return 0; -} - -static struct parisc_device_id lan_tbl[] = { - { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008a }, - { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00072 }, - { 0, } -}; - -MODULE_DEVICE_TABLE(parisc, lan_tbl); - -static struct parisc_driver lan_driver = { - .name = "lasi_82596", - .id_table = lan_tbl, - .probe = lan_init_chip, - .remove = __devexit_p(lan_remove_chip), -}; - -static int __devinit lasi_82596_init(void) -{ - printk(KERN_INFO LASI_82596_DRIVER_VERSION "\n"); - return register_parisc_driver(&lan_driver); -} - -module_init(lasi_82596_init); - -static void __exit lasi_82596_exit(void) -{ - unregister_parisc_driver(&lan_driver); -} - -module_exit(lasi_82596_exit); diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c deleted file mode 100644 index 9e04289..0000000 --- a/drivers/net/lib82596.c +++ /dev/null @@ -1,1412 +0,0 @@ -/* lasi_82596.c -- driver for the intel 82596 ethernet controller, as - munged into HPPA boxen . - - This driver is based upon 82596.c, original credits are below... - but there were too many hoops which HP wants jumped through to - keep this code in there in a sane manner. - - 3 primary sources of the mess -- - 1) hppa needs *lots* of cacheline flushing to keep this kind of - MMIO running. - - 2) The 82596 needs to see all of its pointers as their physical - address. Thus virt_to_bus/bus_to_virt are *everywhere*. - - 3) The implementation HP is using seems to be significantly pickier - about when and how the command and RX units are started. some - command ordering was changed. - - Examination of the mach driver leads one to believe that there - might be a saner way to pull this off... anyone who feels like a - full rewrite can be my guest. - - Split 02/13/2000 Sam Creasey (sammy@oh.verio.com) - - 02/01/2000 Initial modifications for parisc by Helge Deller (deller@gmx.de) - 03/02/2000 changes for better/correct(?) cache-flushing (deller) -*/ - -/* 82596.c: A generic 82596 ethernet driver for linux. */ -/* - Based on Apricot.c - Written 1994 by Mark Evans. - This driver is for the Apricot 82596 bus-master interface - - Modularised 12/94 Mark Evans - - - Modified to support the 82596 ethernet chips on 680x0 VME boards. - by Richard Hirst - Renamed to be 82596.c - - 980825: Changed to receive directly in to sk_buffs which are - allocated at open() time. Eliminates copy on incoming frames - (small ones are still copied). Shared data now held in a - non-cached page, so we can run on 68060 in copyback mode. - - TBD: - * look at deferring rx frames rather than discarding (as per tulip) - * handle tx ring full as per tulip - * performance test to tune rx_copybreak - - Most of my modifications relate to the braindead big-endian - implementation by Intel. When the i596 is operating in - 'big-endian' mode, it thinks a 32 bit value of 0x12345678 - should be stored as 0x56781234. This is a real pain, when - you have linked lists which are shared by the 680x0 and the - i596. - - Driver skeleton - Written 1993 by Donald Becker. - Copyright 1993 United States Government as represented by the Director, - National Security Agency. This software may only be used and distributed - according to the terms of the GNU General Public License as modified by SRC, - incorporated herein by reference. - - The author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403 - - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* DEBUG flags - */ - -#define DEB_INIT 0x0001 -#define DEB_PROBE 0x0002 -#define DEB_SERIOUS 0x0004 -#define DEB_ERRORS 0x0008 -#define DEB_MULTI 0x0010 -#define DEB_TDR 0x0020 -#define DEB_OPEN 0x0040 -#define DEB_RESET 0x0080 -#define DEB_ADDCMD 0x0100 -#define DEB_STATUS 0x0200 -#define DEB_STARTTX 0x0400 -#define DEB_RXADDR 0x0800 -#define DEB_TXADDR 0x1000 -#define DEB_RXFRAME 0x2000 -#define DEB_INTS 0x4000 -#define DEB_STRUCT 0x8000 -#define DEB_ANY 0xffff - - -#define DEB(x, y) if (i596_debug & (x)) { y; } - - -/* - * The MPU_PORT command allows direct access to the 82596. With PORT access - * the following commands are available (p5-18). The 32-bit port command - * must be word-swapped with the most significant word written first. - * This only applies to VME boards. - */ -#define PORT_RESET 0x00 /* reset 82596 */ -#define PORT_SELFTEST 0x01 /* selftest */ -#define PORT_ALTSCP 0x02 /* alternate SCB address */ -#define PORT_ALTDUMP 0x03 /* Alternate DUMP address */ - -static int i596_debug = (DEB_SERIOUS|DEB_PROBE); - -/* Copy frames shorter than rx_copybreak, otherwise pass on up in - * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha). - */ -static int rx_copybreak = 100; - -#define PKT_BUF_SZ 1536 -#define MAX_MC_CNT 64 - -#define ISCP_BUSY 0x0001 - -#define I596_NULL ((u32)0xffffffff) - -#define CMD_EOL 0x8000 /* The last command of the list, stop. */ -#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */ -#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */ - -#define CMD_FLEX 0x0008 /* Enable flexible memory model */ - -enum commands { - CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3, - CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7 -}; - -#define STAT_C 0x8000 /* Set to 0 after execution */ -#define STAT_B 0x4000 /* Command being executed */ -#define STAT_OK 0x2000 /* Command executed ok */ -#define STAT_A 0x1000 /* Command aborted */ - -#define CUC_START 0x0100 -#define CUC_RESUME 0x0200 -#define CUC_SUSPEND 0x0300 -#define CUC_ABORT 0x0400 -#define RX_START 0x0010 -#define RX_RESUME 0x0020 -#define RX_SUSPEND 0x0030 -#define RX_ABORT 0x0040 - -#define TX_TIMEOUT (HZ/20) - - -struct i596_reg { - unsigned short porthi; - unsigned short portlo; - u32 ca; -}; - -#define EOF 0x8000 -#define SIZE_MASK 0x3fff - -struct i596_tbd { - unsigned short size; - unsigned short pad; - u32 next; - u32 data; - u32 cache_pad[5]; /* Total 32 bytes... */ -}; - -/* The command structure has two 'next' pointers; v_next is the address of - * the next command as seen by the CPU, b_next is the address of the next - * command as seen by the 82596. The b_next pointer, as used by the 82596 - * always references the status field of the next command, rather than the - * v_next field, because the 82596 is unaware of v_next. It may seem more - * logical to put v_next at the end of the structure, but we cannot do that - * because the 82596 expects other fields to be there, depending on command - * type. - */ - -struct i596_cmd { - struct i596_cmd *v_next; /* Address from CPUs viewpoint */ - unsigned short status; - unsigned short command; - u32 b_next; /* Address from i596 viewpoint */ -}; - -struct tx_cmd { - struct i596_cmd cmd; - u32 tbd; - unsigned short size; - unsigned short pad; - struct sk_buff *skb; /* So we can free it after tx */ - dma_addr_t dma_addr; -#ifdef __LP64__ - u32 cache_pad[6]; /* Total 64 bytes... */ -#else - u32 cache_pad[1]; /* Total 32 bytes... */ -#endif -}; - -struct tdr_cmd { - struct i596_cmd cmd; - unsigned short status; - unsigned short pad; -}; - -struct mc_cmd { - struct i596_cmd cmd; - short mc_cnt; - char mc_addrs[MAX_MC_CNT*6]; -}; - -struct sa_cmd { - struct i596_cmd cmd; - char eth_addr[8]; -}; - -struct cf_cmd { - struct i596_cmd cmd; - char i596_config[16]; -}; - -struct i596_rfd { - unsigned short stat; - unsigned short cmd; - u32 b_next; /* Address from i596 viewpoint */ - u32 rbd; - unsigned short count; - unsigned short size; - struct i596_rfd *v_next; /* Address from CPUs viewpoint */ - struct i596_rfd *v_prev; -#ifndef __LP64__ - u32 cache_pad[2]; /* Total 32 bytes... */ -#endif -}; - -struct i596_rbd { - /* hardware data */ - unsigned short count; - unsigned short zero1; - u32 b_next; - u32 b_data; /* Address from i596 viewpoint */ - unsigned short size; - unsigned short zero2; - /* driver data */ - struct sk_buff *skb; - struct i596_rbd *v_next; - u32 b_addr; /* This rbd addr from i596 view */ - unsigned char *v_data; /* Address from CPUs viewpoint */ - /* Total 32 bytes... */ -#ifdef __LP64__ - u32 cache_pad[4]; -#endif -}; - -/* These values as chosen so struct i596_dma fits in one page... */ - -#define TX_RING_SIZE 32 -#define RX_RING_SIZE 16 - -struct i596_scb { - unsigned short status; - unsigned short command; - u32 cmd; - u32 rfd; - u32 crc_err; - u32 align_err; - u32 resource_err; - u32 over_err; - u32 rcvdt_err; - u32 short_err; - unsigned short t_on; - unsigned short t_off; -}; - -struct i596_iscp { - u32 stat; - u32 scb; -}; - -struct i596_scp { - u32 sysbus; - u32 pad; - u32 iscp; -}; - -struct i596_dma { - struct i596_scp scp __attribute__((aligned(32))); - volatile struct i596_iscp iscp __attribute__((aligned(32))); - volatile struct i596_scb scb __attribute__((aligned(32))); - struct sa_cmd sa_cmd __attribute__((aligned(32))); - struct cf_cmd cf_cmd __attribute__((aligned(32))); - struct tdr_cmd tdr_cmd __attribute__((aligned(32))); - struct mc_cmd mc_cmd __attribute__((aligned(32))); - struct i596_rfd rfds[RX_RING_SIZE] __attribute__((aligned(32))); - struct i596_rbd rbds[RX_RING_SIZE] __attribute__((aligned(32))); - struct tx_cmd tx_cmds[TX_RING_SIZE] __attribute__((aligned(32))); - struct i596_tbd tbds[TX_RING_SIZE] __attribute__((aligned(32))); -}; - -struct i596_private { - struct i596_dma *dma; - u32 stat; - int last_restart; - struct i596_rfd *rfd_head; - struct i596_rbd *rbd_head; - struct i596_cmd *cmd_tail; - struct i596_cmd *cmd_head; - int cmd_backlog; - u32 last_cmd; - int next_tx_cmd; - int options; - spinlock_t lock; /* serialize access to chip */ - dma_addr_t dma_addr; - void __iomem *mpu_port; - void __iomem *ca; -}; - -static const char init_setup[] = -{ - 0x8E, /* length, prefetch on */ - 0xC8, /* fifo to 8, monitor off */ - 0x80, /* don't save bad frames */ - 0x2E, /* No source address insertion, 8 byte preamble */ - 0x00, /* priority and backoff defaults */ - 0x60, /* interframe spacing */ - 0x00, /* slot time LSB */ - 0xf2, /* slot time and retries */ - 0x00, /* promiscuous mode */ - 0x00, /* collision detect */ - 0x40, /* minimum frame length */ - 0xff, - 0x00, - 0x7f /* *multi IA */ }; - -static int i596_open(struct net_device *dev); -static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev); -static irqreturn_t i596_interrupt(int irq, void *dev_id); -static int i596_close(struct net_device *dev); -static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); -static void i596_tx_timeout (struct net_device *dev); -static void print_eth(unsigned char *buf, char *str); -static void set_multicast_list(struct net_device *dev); -static inline void ca(struct net_device *dev); -static void mpu_port(struct net_device *dev, int c, dma_addr_t x); - -static int rx_ring_size = RX_RING_SIZE; -static int ticks_limit = 100; -static int max_cmd_backlog = TX_RING_SIZE-1; - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void i596_poll_controller(struct net_device *dev); -#endif - - -static inline int wait_istat(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str) -{ - DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp)); - while (--delcnt && dma->iscp.stat) { - udelay(10); - DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp)); - } - if (!delcnt) { - printk(KERN_ERR "%s: %s, iscp.stat %04x, didn't clear\n", - dev->name, str, SWAP16(dma->iscp.stat)); - return -1; - } else - return 0; -} - - -static inline int wait_cmd(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str) -{ - DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb)); - while (--delcnt && dma->scb.command) { - udelay(10); - DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb)); - } - if (!delcnt) { - printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n", - dev->name, str, - SWAP16(dma->scb.status), - SWAP16(dma->scb.command)); - return -1; - } else - return 0; -} - - -static void i596_display_data(struct net_device *dev) -{ - struct i596_private *lp = netdev_priv(dev); - struct i596_dma *dma = lp->dma; - struct i596_cmd *cmd; - struct i596_rfd *rfd; - struct i596_rbd *rbd; - - printk(KERN_DEBUG "lp and scp at %p, .sysbus = %08x, .iscp = %08x\n", - &dma->scp, dma->scp.sysbus, SWAP32(dma->scp.iscp)); - printk(KERN_DEBUG "iscp at %p, iscp.stat = %08x, .scb = %08x\n", - &dma->iscp, SWAP32(dma->iscp.stat), SWAP32(dma->iscp.scb)); - printk(KERN_DEBUG "scb at %p, scb.status = %04x, .command = %04x," - " .cmd = %08x, .rfd = %08x\n", - &dma->scb, SWAP16(dma->scb.status), SWAP16(dma->scb.command), - SWAP16(dma->scb.cmd), SWAP32(dma->scb.rfd)); - printk(KERN_DEBUG " errors: crc %x, align %x, resource %x," - " over %x, rcvdt %x, short %x\n", - SWAP32(dma->scb.crc_err), SWAP32(dma->scb.align_err), - SWAP32(dma->scb.resource_err), SWAP32(dma->scb.over_err), - SWAP32(dma->scb.rcvdt_err), SWAP32(dma->scb.short_err)); - cmd = lp->cmd_head; - while (cmd != NULL) { - printk(KERN_DEBUG - "cmd at %p, .status = %04x, .command = %04x," - " .b_next = %08x\n", - cmd, SWAP16(cmd->status), SWAP16(cmd->command), - SWAP32(cmd->b_next)); - cmd = cmd->v_next; - } - rfd = lp->rfd_head; - printk(KERN_DEBUG "rfd_head = %p\n", rfd); - do { - printk(KERN_DEBUG - " %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x," - " count %04x\n", - rfd, SWAP16(rfd->stat), SWAP16(rfd->cmd), - SWAP32(rfd->b_next), SWAP32(rfd->rbd), - SWAP16(rfd->count)); - rfd = rfd->v_next; - } while (rfd != lp->rfd_head); - rbd = lp->rbd_head; - printk(KERN_DEBUG "rbd_head = %p\n", rbd); - do { - printk(KERN_DEBUG - " %p .count %04x, b_next %08x, b_data %08x," - " size %04x\n", - rbd, SWAP16(rbd->count), SWAP32(rbd->b_next), - SWAP32(rbd->b_data), SWAP16(rbd->size)); - rbd = rbd->v_next; - } while (rbd != lp->rbd_head); - DMA_INV(dev, dma, sizeof(struct i596_dma)); -} - - -#define virt_to_dma(lp, v) ((lp)->dma_addr + (dma_addr_t)((unsigned long)(v)-(unsigned long)((lp)->dma))) - -static inline int init_rx_bufs(struct net_device *dev) -{ - struct i596_private *lp = netdev_priv(dev); - struct i596_dma *dma = lp->dma; - int i; - struct i596_rfd *rfd; - struct i596_rbd *rbd; - - /* First build the Receive Buffer Descriptor List */ - - for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) { - dma_addr_t dma_addr; - struct sk_buff *skb; - - skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ); - if (skb == NULL) - return -1; - dma_addr = dma_map_single(dev->dev.parent, skb->data, - PKT_BUF_SZ, DMA_FROM_DEVICE); - rbd->v_next = rbd+1; - rbd->b_next = SWAP32(virt_to_dma(lp, rbd+1)); - rbd->b_addr = SWAP32(virt_to_dma(lp, rbd)); - rbd->skb = skb; - rbd->v_data = skb->data; - rbd->b_data = SWAP32(dma_addr); - rbd->size = SWAP16(PKT_BUF_SZ); - } - lp->rbd_head = dma->rbds; - rbd = dma->rbds + rx_ring_size - 1; - rbd->v_next = dma->rbds; - rbd->b_next = SWAP32(virt_to_dma(lp, dma->rbds)); - - /* Now build the Receive Frame Descriptor List */ - - for (i = 0, rfd = dma->rfds; i < rx_ring_size; i++, rfd++) { - rfd->rbd = I596_NULL; - rfd->v_next = rfd+1; - rfd->v_prev = rfd-1; - rfd->b_next = SWAP32(virt_to_dma(lp, rfd+1)); - rfd->cmd = SWAP16(CMD_FLEX); - } - lp->rfd_head = dma->rfds; - dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds)); - rfd = dma->rfds; - rfd->rbd = SWAP32(virt_to_dma(lp, lp->rbd_head)); - rfd->v_prev = dma->rfds + rx_ring_size - 1; - rfd = dma->rfds + rx_ring_size - 1; - rfd->v_next = dma->rfds; - rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds)); - rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX); - - DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma)); - return 0; -} - -static inline void remove_rx_bufs(struct net_device *dev) -{ - struct i596_private *lp = netdev_priv(dev); - struct i596_rbd *rbd; - int i; - - for (i = 0, rbd = lp->dma->rbds; i < rx_ring_size; i++, rbd++) { - if (rbd->skb == NULL) - break; - dma_unmap_single(dev->dev.parent, - (dma_addr_t)SWAP32(rbd->b_data), - PKT_BUF_SZ, DMA_FROM_DEVICE); - dev_kfree_skb(rbd->skb); - } -} - - -static void rebuild_rx_bufs(struct net_device *dev) -{ - struct i596_private *lp = netdev_priv(dev); - struct i596_dma *dma = lp->dma; - int i; - - /* Ensure rx frame/buffer descriptors are tidy */ - - for (i = 0; i < rx_ring_size; i++) { - dma->rfds[i].rbd = I596_NULL; - dma->rfds[i].cmd = SWAP16(CMD_FLEX); - } - dma->rfds[rx_ring_size-1].cmd = SWAP16(CMD_EOL|CMD_FLEX); - lp->rfd_head = dma->rfds; - dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds)); - lp->rbd_head = dma->rbds; - dma->rfds[0].rbd = SWAP32(virt_to_dma(lp, dma->rbds)); - - DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma)); -} - - -static int init_i596_mem(struct net_device *dev) -{ - struct i596_private *lp = netdev_priv(dev); - struct i596_dma *dma = lp->dma; - unsigned long flags; - - mpu_port(dev, PORT_RESET, 0); - udelay(100); /* Wait 100us - seems to help */ - - /* change the scp address */ - - lp->last_cmd = jiffies; - - dma->scp.sysbus = SYSBUS; - dma->scp.iscp = SWAP32(virt_to_dma(lp, &(dma->iscp))); - dma->iscp.scb = SWAP32(virt_to_dma(lp, &(dma->scb))); - dma->iscp.stat = SWAP32(ISCP_BUSY); - lp->cmd_backlog = 0; - - lp->cmd_head = NULL; - dma->scb.cmd = I596_NULL; - - DEB(DEB_INIT, printk(KERN_DEBUG "%s: starting i82596.\n", dev->name)); - - DMA_WBACK(dev, &(dma->scp), sizeof(struct i596_scp)); - DMA_WBACK(dev, &(dma->iscp), sizeof(struct i596_iscp)); - DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb)); - - mpu_port(dev, PORT_ALTSCP, virt_to_dma(lp, &dma->scp)); - ca(dev); - if (wait_istat(dev, dma, 1000, "initialization timed out")) - goto failed; - DEB(DEB_INIT, printk(KERN_DEBUG - "%s: i82596 initialization successful\n", - dev->name)); - - if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) { - printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq); - goto failed; - } - - /* Ensure rx frame/buffer descriptors are tidy */ - rebuild_rx_bufs(dev); - - dma->scb.command = 0; - DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb)); - - DEB(DEB_INIT, printk(KERN_DEBUG - "%s: queuing CmdConfigure\n", dev->name)); - memcpy(dma->cf_cmd.i596_config, init_setup, 14); - dma->cf_cmd.cmd.command = SWAP16(CmdConfigure); - DMA_WBACK(dev, &(dma->cf_cmd), sizeof(struct cf_cmd)); - i596_add_cmd(dev, &dma->cf_cmd.cmd); - - DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name)); - memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, 6); - dma->sa_cmd.cmd.command = SWAP16(CmdSASetup); - DMA_WBACK(dev, &(dma->sa_cmd), sizeof(struct sa_cmd)); - i596_add_cmd(dev, &dma->sa_cmd.cmd); - - DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name)); - dma->tdr_cmd.cmd.command = SWAP16(CmdTDR); - DMA_WBACK(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd)); - i596_add_cmd(dev, &dma->tdr_cmd.cmd); - - spin_lock_irqsave (&lp->lock, flags); - - if (wait_cmd(dev, dma, 1000, "timed out waiting to issue RX_START")) { - spin_unlock_irqrestore (&lp->lock, flags); - goto failed_free_irq; - } - DEB(DEB_INIT, printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name)); - dma->scb.command = SWAP16(RX_START); - dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds)); - DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb)); - - ca(dev); - - spin_unlock_irqrestore (&lp->lock, flags); - if (wait_cmd(dev, dma, 1000, "RX_START not processed")) - goto failed_free_irq; - DEB(DEB_INIT, printk(KERN_DEBUG - "%s: Receive unit started OK\n", dev->name)); - return 0; - -failed_free_irq: - free_irq(dev->irq, dev); -failed: - printk(KERN_ERR "%s: Failed to initialise 82596\n", dev->name); - mpu_port(dev, PORT_RESET, 0); - return -1; -} - - -static inline int i596_rx(struct net_device *dev) -{ - struct i596_private *lp = netdev_priv(dev); - struct i596_rfd *rfd; - struct i596_rbd *rbd; - int frames = 0; - - DEB(DEB_RXFRAME, printk(KERN_DEBUG - "i596_rx(), rfd_head %p, rbd_head %p\n", - lp->rfd_head, lp->rbd_head)); - - - rfd = lp->rfd_head; /* Ref next frame to check */ - - DMA_INV(dev, rfd, sizeof(struct i596_rfd)); - while (rfd->stat & SWAP16(STAT_C)) { /* Loop while complete frames */ - if (rfd->rbd == I596_NULL) - rbd = NULL; - else if (rfd->rbd == lp->rbd_head->b_addr) { - rbd = lp->rbd_head; - DMA_INV(dev, rbd, sizeof(struct i596_rbd)); - } else { - printk(KERN_ERR "%s: rbd chain broken!\n", dev->name); - /* XXX Now what? */ - rbd = NULL; - } - DEB(DEB_RXFRAME, printk(KERN_DEBUG - " rfd %p, rfd.rbd %08x, rfd.stat %04x\n", - rfd, rfd->rbd, rfd->stat)); - - if (rbd != NULL && (rfd->stat & SWAP16(STAT_OK))) { - /* a good frame */ - int pkt_len = SWAP16(rbd->count) & 0x3fff; - struct sk_buff *skb = rbd->skb; - int rx_in_place = 0; - - DEB(DEB_RXADDR, print_eth(rbd->v_data, "received")); - frames++; - - /* Check if the packet is long enough to just accept - * without copying to a properly sized skbuff. - */ - - if (pkt_len > rx_copybreak) { - struct sk_buff *newskb; - dma_addr_t dma_addr; - - dma_unmap_single(dev->dev.parent, - (dma_addr_t)SWAP32(rbd->b_data), - PKT_BUF_SZ, DMA_FROM_DEVICE); - /* Get fresh skbuff to replace filled one. */ - newskb = netdev_alloc_skb_ip_align(dev, - PKT_BUF_SZ); - if (newskb == NULL) { - skb = NULL; /* drop pkt */ - goto memory_squeeze; - } - - /* Pass up the skb already on the Rx ring. */ - skb_put(skb, pkt_len); - rx_in_place = 1; - rbd->skb = newskb; - dma_addr = dma_map_single(dev->dev.parent, - newskb->data, - PKT_BUF_SZ, - DMA_FROM_DEVICE); - rbd->v_data = newskb->data; - rbd->b_data = SWAP32(dma_addr); - DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd)); - } else - skb = netdev_alloc_skb_ip_align(dev, pkt_len); -memory_squeeze: - if (skb == NULL) { - /* XXX tulip.c can defer packets here!! */ - printk(KERN_ERR - "%s: i596_rx Memory squeeze, dropping packet.\n", - dev->name); - dev->stats.rx_dropped++; - } else { - if (!rx_in_place) { - /* 16 byte align the data fields */ - dma_sync_single_for_cpu(dev->dev.parent, - (dma_addr_t)SWAP32(rbd->b_data), - PKT_BUF_SZ, DMA_FROM_DEVICE); - memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len); - dma_sync_single_for_device(dev->dev.parent, - (dma_addr_t)SWAP32(rbd->b_data), - PKT_BUF_SZ, DMA_FROM_DEVICE); - } - skb->len = pkt_len; - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; - } - } else { - DEB(DEB_ERRORS, printk(KERN_DEBUG - "%s: Error, rfd.stat = 0x%04x\n", - dev->name, rfd->stat)); - dev->stats.rx_errors++; - if (rfd->stat & SWAP16(0x0100)) - dev->stats.collisions++; - if (rfd->stat & SWAP16(0x8000)) - dev->stats.rx_length_errors++; - if (rfd->stat & SWAP16(0x0001)) - dev->stats.rx_over_errors++; - if (rfd->stat & SWAP16(0x0002)) - dev->stats.rx_fifo_errors++; - if (rfd->stat & SWAP16(0x0004)) - dev->stats.rx_frame_errors++; - if (rfd->stat & SWAP16(0x0008)) - dev->stats.rx_crc_errors++; - if (rfd->stat & SWAP16(0x0010)) - dev->stats.rx_length_errors++; - } - - /* Clear the buffer descriptor count and EOF + F flags */ - - if (rbd != NULL && (rbd->count & SWAP16(0x4000))) { - rbd->count = 0; - lp->rbd_head = rbd->v_next; - DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd)); - } - - /* Tidy the frame descriptor, marking it as end of list */ - - rfd->rbd = I596_NULL; - rfd->stat = 0; - rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX); - rfd->count = 0; - - /* Update record of next frame descriptor to process */ - - lp->dma->scb.rfd = rfd->b_next; - lp->rfd_head = rfd->v_next; - DMA_WBACK_INV(dev, rfd, sizeof(struct i596_rfd)); - - /* Remove end-of-list from old end descriptor */ - - rfd->v_prev->cmd = SWAP16(CMD_FLEX); - DMA_WBACK_INV(dev, rfd->v_prev, sizeof(struct i596_rfd)); - rfd = lp->rfd_head; - DMA_INV(dev, rfd, sizeof(struct i596_rfd)); - } - - DEB(DEB_RXFRAME, printk(KERN_DEBUG "frames %d\n", frames)); - - return 0; -} - - -static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp) -{ - struct i596_cmd *ptr; - - while (lp->cmd_head != NULL) { - ptr = lp->cmd_head; - lp->cmd_head = ptr->v_next; - lp->cmd_backlog--; - - switch (SWAP16(ptr->command) & 0x7) { - case CmdTx: - { - struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr; - struct sk_buff *skb = tx_cmd->skb; - dma_unmap_single(dev->dev.parent, - tx_cmd->dma_addr, - skb->len, DMA_TO_DEVICE); - - dev_kfree_skb(skb); - - dev->stats.tx_errors++; - dev->stats.tx_aborted_errors++; - - ptr->v_next = NULL; - ptr->b_next = I596_NULL; - tx_cmd->cmd.command = 0; /* Mark as free */ - break; - } - default: - ptr->v_next = NULL; - ptr->b_next = I596_NULL; - } - DMA_WBACK_INV(dev, ptr, sizeof(struct i596_cmd)); - } - - wait_cmd(dev, lp->dma, 100, "i596_cleanup_cmd timed out"); - lp->dma->scb.cmd = I596_NULL; - DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb)); -} - - -static inline void i596_reset(struct net_device *dev, struct i596_private *lp) -{ - unsigned long flags; - - DEB(DEB_RESET, printk(KERN_DEBUG "i596_reset\n")); - - spin_lock_irqsave (&lp->lock, flags); - - wait_cmd(dev, lp->dma, 100, "i596_reset timed out"); - - netif_stop_queue(dev); - - /* FIXME: this command might cause an lpmc */ - lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT); - DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb)); - ca(dev); - - /* wait for shutdown */ - wait_cmd(dev, lp->dma, 1000, "i596_reset 2 timed out"); - spin_unlock_irqrestore (&lp->lock, flags); - - i596_cleanup_cmd(dev, lp); - i596_rx(dev); - - netif_start_queue(dev); - init_i596_mem(dev); -} - - -static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd) -{ - struct i596_private *lp = netdev_priv(dev); - struct i596_dma *dma = lp->dma; - unsigned long flags; - - DEB(DEB_ADDCMD, printk(KERN_DEBUG "i596_add_cmd cmd_head %p\n", - lp->cmd_head)); - - cmd->status = 0; - cmd->command |= SWAP16(CMD_EOL | CMD_INTR); - cmd->v_next = NULL; - cmd->b_next = I596_NULL; - DMA_WBACK(dev, cmd, sizeof(struct i596_cmd)); - - spin_lock_irqsave (&lp->lock, flags); - - if (lp->cmd_head != NULL) { - lp->cmd_tail->v_next = cmd; - lp->cmd_tail->b_next = SWAP32(virt_to_dma(lp, &cmd->status)); - DMA_WBACK(dev, lp->cmd_tail, sizeof(struct i596_cmd)); - } else { - lp->cmd_head = cmd; - wait_cmd(dev, dma, 100, "i596_add_cmd timed out"); - dma->scb.cmd = SWAP32(virt_to_dma(lp, &cmd->status)); - dma->scb.command = SWAP16(CUC_START); - DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb)); - ca(dev); - } - lp->cmd_tail = cmd; - lp->cmd_backlog++; - - spin_unlock_irqrestore (&lp->lock, flags); - - if (lp->cmd_backlog > max_cmd_backlog) { - unsigned long tickssofar = jiffies - lp->last_cmd; - - if (tickssofar < ticks_limit) - return; - - printk(KERN_ERR - "%s: command unit timed out, status resetting.\n", - dev->name); -#if 1 - i596_reset(dev, lp); -#endif - } -} - -static int i596_open(struct net_device *dev) -{ - DEB(DEB_OPEN, printk(KERN_DEBUG - "%s: i596_open() irq %d.\n", dev->name, dev->irq)); - - if (init_rx_bufs(dev)) { - printk(KERN_ERR "%s: Failed to init rx bufs\n", dev->name); - return -EAGAIN; - } - if (init_i596_mem(dev)) { - printk(KERN_ERR "%s: Failed to init memory\n", dev->name); - goto out_remove_rx_bufs; - } - netif_start_queue(dev); - - return 0; - -out_remove_rx_bufs: - remove_rx_bufs(dev); - return -EAGAIN; -} - -static void i596_tx_timeout (struct net_device *dev) -{ - struct i596_private *lp = netdev_priv(dev); - - /* Transmitter timeout, serious problems. */ - DEB(DEB_ERRORS, printk(KERN_DEBUG - "%s: transmit timed out, status resetting.\n", - dev->name)); - - dev->stats.tx_errors++; - - /* Try to restart the adaptor */ - if (lp->last_restart == dev->stats.tx_packets) { - DEB(DEB_ERRORS, printk(KERN_DEBUG "Resetting board.\n")); - /* Shutdown and restart */ - i596_reset (dev, lp); - } else { - /* Issue a channel attention signal */ - DEB(DEB_ERRORS, printk(KERN_DEBUG "Kicking board.\n")); - lp->dma->scb.command = SWAP16(CUC_START | RX_START); - DMA_WBACK_INV(dev, &(lp->dma->scb), sizeof(struct i596_scb)); - ca (dev); - lp->last_restart = dev->stats.tx_packets; - } - - dev->trans_start = jiffies; /* prevent tx timeout */ - netif_wake_queue (dev); -} - - -static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct i596_private *lp = netdev_priv(dev); - struct tx_cmd *tx_cmd; - struct i596_tbd *tbd; - short length = skb->len; - - DEB(DEB_STARTTX, printk(KERN_DEBUG - "%s: i596_start_xmit(%x,%p) called\n", - dev->name, skb->len, skb->data)); - - if (length < ETH_ZLEN) { - if (skb_padto(skb, ETH_ZLEN)) - return NETDEV_TX_OK; - length = ETH_ZLEN; - } - - netif_stop_queue(dev); - - tx_cmd = lp->dma->tx_cmds + lp->next_tx_cmd; - tbd = lp->dma->tbds + lp->next_tx_cmd; - - if (tx_cmd->cmd.command) { - DEB(DEB_ERRORS, printk(KERN_DEBUG - "%s: xmit ring full, dropping packet.\n", - dev->name)); - dev->stats.tx_dropped++; - - dev_kfree_skb(skb); - } else { - if (++lp->next_tx_cmd == TX_RING_SIZE) - lp->next_tx_cmd = 0; - tx_cmd->tbd = SWAP32(virt_to_dma(lp, tbd)); - tbd->next = I596_NULL; - - tx_cmd->cmd.command = SWAP16(CMD_FLEX | CmdTx); - tx_cmd->skb = skb; - - tx_cmd->pad = 0; - tx_cmd->size = 0; - tbd->pad = 0; - tbd->size = SWAP16(EOF | length); - - tx_cmd->dma_addr = dma_map_single(dev->dev.parent, skb->data, - skb->len, DMA_TO_DEVICE); - tbd->data = SWAP32(tx_cmd->dma_addr); - - DEB(DEB_TXADDR, print_eth(skb->data, "tx-queued")); - DMA_WBACK_INV(dev, tx_cmd, sizeof(struct tx_cmd)); - DMA_WBACK_INV(dev, tbd, sizeof(struct i596_tbd)); - i596_add_cmd(dev, &tx_cmd->cmd); - - dev->stats.tx_packets++; - dev->stats.tx_bytes += length; - } - - netif_start_queue(dev); - - return NETDEV_TX_OK; -} - -static void print_eth(unsigned char *add, char *str) -{ - printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n", - add, add + 6, add, add[12], add[13], str); -} -static const struct net_device_ops i596_netdev_ops = { - .ndo_open = i596_open, - .ndo_stop = i596_close, - .ndo_start_xmit = i596_start_xmit, - .ndo_set_multicast_list = set_multicast_list, - .ndo_tx_timeout = i596_tx_timeout, - .ndo_change_mtu = eth_change_mtu, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = i596_poll_controller, -#endif -}; - -static int __devinit i82596_probe(struct net_device *dev) -{ - int i; - struct i596_private *lp = netdev_priv(dev); - struct i596_dma *dma; - - /* This lot is ensure things have been cache line aligned. */ - BUILD_BUG_ON(sizeof(struct i596_rfd) != 32); - BUILD_BUG_ON(sizeof(struct i596_rbd) & 31); - BUILD_BUG_ON(sizeof(struct tx_cmd) & 31); - BUILD_BUG_ON(sizeof(struct i596_tbd) != 32); -#ifndef __LP64__ - BUILD_BUG_ON(sizeof(struct i596_dma) > 4096); -#endif - - if (!dev->base_addr || !dev->irq) - return -ENODEV; - - dma = (struct i596_dma *) DMA_ALLOC(dev->dev.parent, - sizeof(struct i596_dma), &lp->dma_addr, GFP_KERNEL); - if (!dma) { - printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__); - return -ENOMEM; - } - - dev->netdev_ops = &i596_netdev_ops; - dev->watchdog_timeo = TX_TIMEOUT; - - memset(dma, 0, sizeof(struct i596_dma)); - lp->dma = dma; - - dma->scb.command = 0; - dma->scb.cmd = I596_NULL; - dma->scb.rfd = I596_NULL; - spin_lock_init(&lp->lock); - - DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma)); - - i = register_netdev(dev); - if (i) { - DMA_FREE(dev->dev.parent, sizeof(struct i596_dma), - (void *)dma, lp->dma_addr); - return i; - } - - DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n", - dev->name, dev->base_addr, dev->dev_addr, - dev->irq)); - DEB(DEB_INIT, printk(KERN_INFO - "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n", - dev->name, dma, (int)sizeof(struct i596_dma), - &dma->scb)); - - return 0; -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void i596_poll_controller(struct net_device *dev) -{ - disable_irq(dev->irq); - i596_interrupt(dev->irq, dev); - enable_irq(dev->irq); -} -#endif - -static irqreturn_t i596_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct i596_private *lp; - struct i596_dma *dma; - unsigned short status, ack_cmd = 0; - - lp = netdev_priv(dev); - dma = lp->dma; - - spin_lock (&lp->lock); - - wait_cmd(dev, dma, 100, "i596 interrupt, timeout"); - status = SWAP16(dma->scb.status); - - DEB(DEB_INTS, printk(KERN_DEBUG - "%s: i596 interrupt, IRQ %d, status %4.4x.\n", - dev->name, dev->irq, status)); - - ack_cmd = status & 0xf000; - - if (!ack_cmd) { - DEB(DEB_ERRORS, printk(KERN_DEBUG - "%s: interrupt with no events\n", - dev->name)); - spin_unlock (&lp->lock); - return IRQ_NONE; - } - - if ((status & 0x8000) || (status & 0x2000)) { - struct i596_cmd *ptr; - - if ((status & 0x8000)) - DEB(DEB_INTS, - printk(KERN_DEBUG - "%s: i596 interrupt completed command.\n", - dev->name)); - if ((status & 0x2000)) - DEB(DEB_INTS, - printk(KERN_DEBUG - "%s: i596 interrupt command unit inactive %x.\n", - dev->name, status & 0x0700)); - - while (lp->cmd_head != NULL) { - DMA_INV(dev, lp->cmd_head, sizeof(struct i596_cmd)); - if (!(lp->cmd_head->status & SWAP16(STAT_C))) - break; - - ptr = lp->cmd_head; - - DEB(DEB_STATUS, - printk(KERN_DEBUG - "cmd_head->status = %04x, ->command = %04x\n", - SWAP16(lp->cmd_head->status), - SWAP16(lp->cmd_head->command))); - lp->cmd_head = ptr->v_next; - lp->cmd_backlog--; - - switch (SWAP16(ptr->command) & 0x7) { - case CmdTx: - { - struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr; - struct sk_buff *skb = tx_cmd->skb; - - if (ptr->status & SWAP16(STAT_OK)) { - DEB(DEB_TXADDR, - print_eth(skb->data, "tx-done")); - } else { - dev->stats.tx_errors++; - if (ptr->status & SWAP16(0x0020)) - dev->stats.collisions++; - if (!(ptr->status & SWAP16(0x0040))) - dev->stats.tx_heartbeat_errors++; - if (ptr->status & SWAP16(0x0400)) - dev->stats.tx_carrier_errors++; - if (ptr->status & SWAP16(0x0800)) - dev->stats.collisions++; - if (ptr->status & SWAP16(0x1000)) - dev->stats.tx_aborted_errors++; - } - dma_unmap_single(dev->dev.parent, - tx_cmd->dma_addr, - skb->len, DMA_TO_DEVICE); - dev_kfree_skb_irq(skb); - - tx_cmd->cmd.command = 0; /* Mark free */ - break; - } - case CmdTDR: - { - unsigned short status = SWAP16(((struct tdr_cmd *)ptr)->status); - - if (status & 0x8000) { - DEB(DEB_ANY, - printk(KERN_DEBUG "%s: link ok.\n", - dev->name)); - } else { - if (status & 0x4000) - printk(KERN_ERR - "%s: Transceiver problem.\n", - dev->name); - if (status & 0x2000) - printk(KERN_ERR - "%s: Termination problem.\n", - dev->name); - if (status & 0x1000) - printk(KERN_ERR - "%s: Short circuit.\n", - dev->name); - - DEB(DEB_TDR, - printk(KERN_DEBUG "%s: Time %d.\n", - dev->name, status & 0x07ff)); - } - break; - } - case CmdConfigure: - /* - * Zap command so set_multicast_list() know - * it is free - */ - ptr->command = 0; - break; - } - ptr->v_next = NULL; - ptr->b_next = I596_NULL; - DMA_WBACK(dev, ptr, sizeof(struct i596_cmd)); - lp->last_cmd = jiffies; - } - - /* This mess is arranging that only the last of any outstanding - * commands has the interrupt bit set. Should probably really - * only add to the cmd queue when the CU is stopped. - */ - ptr = lp->cmd_head; - while ((ptr != NULL) && (ptr != lp->cmd_tail)) { - struct i596_cmd *prev = ptr; - - ptr->command &= SWAP16(0x1fff); - ptr = ptr->v_next; - DMA_WBACK_INV(dev, prev, sizeof(struct i596_cmd)); - } - - if (lp->cmd_head != NULL) - ack_cmd |= CUC_START; - dma->scb.cmd = SWAP32(virt_to_dma(lp, &lp->cmd_head->status)); - DMA_WBACK_INV(dev, &dma->scb, sizeof(struct i596_scb)); - } - if ((status & 0x1000) || (status & 0x4000)) { - if ((status & 0x4000)) - DEB(DEB_INTS, - printk(KERN_DEBUG - "%s: i596 interrupt received a frame.\n", - dev->name)); - i596_rx(dev); - /* Only RX_START if stopped - RGH 07-07-96 */ - if (status & 0x1000) { - if (netif_running(dev)) { - DEB(DEB_ERRORS, - printk(KERN_DEBUG - "%s: i596 interrupt receive unit inactive, status 0x%x\n", - dev->name, status)); - ack_cmd |= RX_START; - dev->stats.rx_errors++; - dev->stats.rx_fifo_errors++; - rebuild_rx_bufs(dev); - } - } - } - wait_cmd(dev, dma, 100, "i596 interrupt, timeout"); - dma->scb.command = SWAP16(ack_cmd); - DMA_WBACK(dev, &dma->scb, sizeof(struct i596_scb)); - - /* DANGER: I suspect that some kind of interrupt - acknowledgement aside from acking the 82596 might be needed - here... but it's running acceptably without */ - - ca(dev); - - wait_cmd(dev, dma, 100, "i596 interrupt, exit timeout"); - DEB(DEB_INTS, printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name)); - - spin_unlock (&lp->lock); - return IRQ_HANDLED; -} - -static int i596_close(struct net_device *dev) -{ - struct i596_private *lp = netdev_priv(dev); - unsigned long flags; - - netif_stop_queue(dev); - - DEB(DEB_INIT, - printk(KERN_DEBUG - "%s: Shutting down ethercard, status was %4.4x.\n", - dev->name, SWAP16(lp->dma->scb.status))); - - spin_lock_irqsave(&lp->lock, flags); - - wait_cmd(dev, lp->dma, 100, "close1 timed out"); - lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT); - DMA_WBACK(dev, &lp->dma->scb, sizeof(struct i596_scb)); - - ca(dev); - - wait_cmd(dev, lp->dma, 100, "close2 timed out"); - spin_unlock_irqrestore(&lp->lock, flags); - DEB(DEB_STRUCT, i596_display_data(dev)); - i596_cleanup_cmd(dev, lp); - - free_irq(dev->irq, dev); - remove_rx_bufs(dev); - - return 0; -} - -/* - * Set or clear the multicast filter for this adaptor. - */ - -static void set_multicast_list(struct net_device *dev) -{ - struct i596_private *lp = netdev_priv(dev); - struct i596_dma *dma = lp->dma; - int config = 0, cnt; - - DEB(DEB_MULTI, - printk(KERN_DEBUG - "%s: set multicast list, %d entries, promisc %s, allmulti %s\n", - dev->name, netdev_mc_count(dev), - dev->flags & IFF_PROMISC ? "ON" : "OFF", - dev->flags & IFF_ALLMULTI ? "ON" : "OFF")); - - if ((dev->flags & IFF_PROMISC) && - !(dma->cf_cmd.i596_config[8] & 0x01)) { - dma->cf_cmd.i596_config[8] |= 0x01; - config = 1; - } - if (!(dev->flags & IFF_PROMISC) && - (dma->cf_cmd.i596_config[8] & 0x01)) { - dma->cf_cmd.i596_config[8] &= ~0x01; - config = 1; - } - if ((dev->flags & IFF_ALLMULTI) && - (dma->cf_cmd.i596_config[11] & 0x20)) { - dma->cf_cmd.i596_config[11] &= ~0x20; - config = 1; - } - if (!(dev->flags & IFF_ALLMULTI) && - !(dma->cf_cmd.i596_config[11] & 0x20)) { - dma->cf_cmd.i596_config[11] |= 0x20; - config = 1; - } - if (config) { - if (dma->cf_cmd.cmd.command) - printk(KERN_INFO - "%s: config change request already queued\n", - dev->name); - else { - dma->cf_cmd.cmd.command = SWAP16(CmdConfigure); - DMA_WBACK_INV(dev, &dma->cf_cmd, sizeof(struct cf_cmd)); - i596_add_cmd(dev, &dma->cf_cmd.cmd); - } - } - - cnt = netdev_mc_count(dev); - if (cnt > MAX_MC_CNT) { - cnt = MAX_MC_CNT; - printk(KERN_NOTICE "%s: Only %d multicast addresses supported", - dev->name, cnt); - } - - if (!netdev_mc_empty(dev)) { - struct netdev_hw_addr *ha; - unsigned char *cp; - struct mc_cmd *cmd; - - cmd = &dma->mc_cmd; - cmd->cmd.command = SWAP16(CmdMulticastList); - cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6); - cp = cmd->mc_addrs; - netdev_for_each_mc_addr(ha, dev) { - if (!cnt--) - break; - memcpy(cp, ha->addr, 6); - if (i596_debug > 1) - DEB(DEB_MULTI, - printk(KERN_DEBUG - "%s: Adding address %pM\n", - dev->name, cp)); - cp += 6; - } - DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd)); - i596_add_cmd(dev, &cmd->cmd); - } -} diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c deleted file mode 100644 index 385a953..0000000 --- a/drivers/net/lp486e.c +++ /dev/null @@ -1,1339 +0,0 @@ -/* Intel Professional Workstation/panther ethernet driver */ -/* lp486e.c: A panther 82596 ethernet driver for linux. */ -/* - History and copyrights: - - Driver skeleton - Written 1993 by Donald Becker. - Copyright 1993 United States Government as represented by the Director, - National Security Agency. This software may only be used and - distributed according to the terms of the GNU General Public License - as modified by SRC, incorporated herein by reference. - - The author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation - 410 Severn Ave., Suite 210 - Annapolis MD 21403 - - Apricot - Written 1994 by Mark Evans. - This driver is for the Apricot 82596 bus-master interface - - Modularised 12/94 Mark Evans - - Professional Workstation - Derived from apricot.c by Ard van Breemen - || - - Credits: - Thanks to Murphy Software BV for letting me write this in their time. - Well, actually, I get paid doing this... - (Also: see http://www.murphy.nl for murphy, and my homepage ~ard for - more information on the Professional Workstation) - - Present version - aeb@cwi.nl -*/ -/* - There are currently two motherboards that I know of in the - professional workstation. The only one that I know is the - intel panther motherboard. -- ard -*/ -/* -The pws is equipped with an intel 82596. This is a very intelligent controller -which runs its own micro-code. Communication with the hostprocessor is done -through linked lists of commands and buffers in the hostprocessors memory. -A complete description of the 82596 is available from intel. Search for -a file called "29021806.pdf". It is a complete description of the chip itself. -To use it for the pws some additions are needed regarding generation of -the PORT and CA signal, and the interrupt glue needed for a pc. -I/O map: -PORT SIZE ACTION MEANING -0xCB0 2 WRITE Lower 16 bits for PORT command -0xCB2 2 WRITE Upper 16 bits for PORT command, and issue of PORT command -0xCB4 1 WRITE Generation of CA signal -0xCB8 1 WRITE Clear interrupt glue -All other communication is through memory! -*/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#define DRV_NAME "lp486e" - -/* debug print flags */ -#define LOG_SRCDST 0x80000000 -#define LOG_STATINT 0x40000000 -#define LOG_STARTINT 0x20000000 - -#define i596_debug debug - -static int i596_debug = 0; - -static const char * const medianame[] = { - "10baseT", "AUI", - "10baseT-FD", "AUI-FD", -}; - -#define LP486E_TOTAL_SIZE 16 - -#define I596_NULL (0xffffffff) - -#define CMD_EOL 0x8000 /* The last command of the list, stop. */ -#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */ -#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */ - -#define CMD_FLEX 0x0008 /* Enable flexible memory model */ - -enum commands { - CmdNOP = 0, - CmdIASetup = 1, - CmdConfigure = 2, - CmdMulticastList = 3, - CmdTx = 4, - CmdTDR = 5, - CmdDump = 6, - CmdDiagnose = 7 -}; - -#if 0 -static const char *CUcmdnames[8] = { "NOP", "IASetup", "Configure", "MulticastList", - "Tx", "TDR", "Dump", "Diagnose" }; -#endif - -/* Status word bits */ -#define STAT_CX 0x8000 /* The CU finished executing a command - with the Interrupt bit set */ -#define STAT_FR 0x4000 /* The RU finished receiving a frame */ -#define STAT_CNA 0x2000 /* The CU left the active state */ -#define STAT_RNR 0x1000 /* The RU left the active state */ -#define STAT_ACK (STAT_CX | STAT_FR | STAT_CNA | STAT_RNR) -#define STAT_CUS 0x0700 /* Status of CU: 0: idle, 1: suspended, - 2: active, 3-7: unused */ -#define STAT_RUS 0x00f0 /* Status of RU: 0: idle, 1: suspended, - 2: no resources, 4: ready, - 10: no resources due to no more RBDs, - 12: no more RBDs, other: unused */ -#define STAT_T 0x0008 /* Bus throttle timers loaded */ -#define STAT_ZERO 0x0807 /* Always zero */ - -#if 0 -static char *CUstates[8] = { - "idle", "suspended", "active", 0, 0, 0, 0, 0 -}; -static char *RUstates[16] = { - "idle", "suspended", "no resources", 0, "ready", 0, 0, 0, - 0, 0, "no RBDs", 0, "out of RBDs", 0, 0, 0 -}; - -static void -i596_out_status(int status) { - int bad = 0; - char *s; - - printk("status %4.4x:", status); - if (status == 0xffff) - printk(" strange..\n"); - else { - if (status & STAT_CX) - printk(" CU done"); - if (status & STAT_CNA) - printk(" CU stopped"); - if (status & STAT_FR) - printk(" got a frame"); - if (status & STAT_RNR) - printk(" RU stopped"); - if (status & STAT_T) - printk(" throttled"); - if (status & STAT_ZERO) - bad = 1; - s = CUstates[(status & STAT_CUS) >> 8]; - if (!s) - bad = 1; - else - printk(" CU(%s)", s); - s = RUstates[(status & STAT_RUS) >> 4]; - if (!s) - bad = 1; - else - printk(" RU(%s)", s); - if (bad) - printk(" bad status"); - printk("\n"); - } -} -#endif - -/* Command word bits */ -#define ACK_CX 0x8000 -#define ACK_FR 0x4000 -#define ACK_CNA 0x2000 -#define ACK_RNR 0x1000 - -#define CUC_START 0x0100 -#define CUC_RESUME 0x0200 -#define CUC_SUSPEND 0x0300 -#define CUC_ABORT 0x0400 - -#define RX_START 0x0010 -#define RX_RESUME 0x0020 -#define RX_SUSPEND 0x0030 -#define RX_ABORT 0x0040 - -typedef u32 phys_addr; - -static inline phys_addr -va_to_pa(void *x) { - return x ? virt_to_bus(x) : I596_NULL; -} - -static inline void * -pa_to_va(phys_addr x) { - return (x == I596_NULL) ? NULL : bus_to_virt(x); -} - -/* status bits for cmd */ -#define CMD_STAT_C 0x8000 /* CU command complete */ -#define CMD_STAT_B 0x4000 /* CU command in progress */ -#define CMD_STAT_OK 0x2000 /* CU command completed without errors */ -#define CMD_STAT_A 0x1000 /* CU command abnormally terminated */ - -struct i596_cmd { /* 8 bytes */ - unsigned short status; - unsigned short command; - phys_addr pa_next; /* va_to_pa(struct i596_cmd *next) */ -}; - -#define EOF 0x8000 -#define SIZE_MASK 0x3fff - -struct i596_tbd { - unsigned short size; - unsigned short pad; - phys_addr pa_next; /* va_to_pa(struct i596_tbd *next) */ - phys_addr pa_data; /* va_to_pa(char *data) */ - struct sk_buff *skb; -}; - -struct tx_cmd { - struct i596_cmd cmd; - phys_addr pa_tbd; /* va_to_pa(struct i596_tbd *tbd) */ - unsigned short size; - unsigned short pad; -}; - -/* status bits for rfd */ -#define RFD_STAT_C 0x8000 /* Frame reception complete */ -#define RFD_STAT_B 0x4000 /* Frame reception in progress */ -#define RFD_STAT_OK 0x2000 /* Frame received without errors */ -#define RFD_STATUS 0x1fff -#define RFD_LENGTH_ERR 0x1000 -#define RFD_CRC_ERR 0x0800 -#define RFD_ALIGN_ERR 0x0400 -#define RFD_NOBUFS_ERR 0x0200 -#define RFD_DMA_ERR 0x0100 /* DMA overrun failure to acquire system bus */ -#define RFD_SHORT_FRAME_ERR 0x0080 -#define RFD_NOEOP_ERR 0x0040 -#define RFD_TRUNC_ERR 0x0020 -#define RFD_MULTICAST 0x0002 /* 0: destination had our address - 1: destination was broadcast/multicast */ -#define RFD_COLLISION 0x0001 - -/* receive frame descriptor */ -struct i596_rfd { - unsigned short stat; - unsigned short cmd; - phys_addr pa_next; /* va_to_pa(struct i596_rfd *next) */ - phys_addr pa_rbd; /* va_to_pa(struct i596_rbd *rbd) */ - unsigned short count; - unsigned short size; - char data[1532]; -}; - -#define RBD_EL 0x8000 -#define RBD_P 0x4000 -#define RBD_SIZEMASK 0x3fff -#define RBD_EOF 0x8000 -#define RBD_F 0x4000 - -/* receive buffer descriptor */ -struct i596_rbd { - unsigned short size; - unsigned short pad; - phys_addr pa_next; /* va_to_pa(struct i596_tbd *next) */ - phys_addr pa_data; /* va_to_pa(char *data) */ - phys_addr pa_prev; /* va_to_pa(struct i596_tbd *prev) */ - - /* Driver private part */ - struct sk_buff *skb; -}; - -#define RX_RING_SIZE 64 -#define RX_SKBSIZE (ETH_FRAME_LEN+10) -#define RX_RBD_SIZE 32 - -/* System Control Block - 40 bytes */ -struct i596_scb { - u16 status; /* 0 */ - u16 command; /* 2 */ - phys_addr pa_cmd; /* 4 - va_to_pa(struct i596_cmd *cmd) */ - phys_addr pa_rfd; /* 8 - va_to_pa(struct i596_rfd *rfd) */ - u32 crc_err; /* 12 */ - u32 align_err; /* 16 */ - u32 resource_err; /* 20 */ - u32 over_err; /* 24 */ - u32 rcvdt_err; /* 28 */ - u32 short_err; /* 32 */ - u16 t_on; /* 36 */ - u16 t_off; /* 38 */ -}; - -/* Intermediate System Configuration Pointer - 8 bytes */ -struct i596_iscp { - u32 busy; /* 0 */ - phys_addr pa_scb; /* 4 - va_to_pa(struct i596_scb *scb) */ -}; - -/* System Configuration Pointer - 12 bytes */ -struct i596_scp { - u32 sysbus; /* 0 */ - u32 pad; /* 4 */ - phys_addr pa_iscp; /* 8 - va_to_pa(struct i596_iscp *iscp) */ -}; - -/* Selftest and dump results - needs 16-byte alignment */ -/* - * The size of the dump area is 304 bytes. When the dump is executed - * by the Port command an extra word will be appended to the dump area. - * The extra word is a copy of the Dump status word (containing the - * C, B, OK bits). [I find 0xa006, with a0 for C+OK and 6 for dump] - */ -struct i596_dump { - u16 dump[153]; /* (304 = 130h) + 2 bytes */ -}; - -struct i596_private { /* aligned to a 16-byte boundary */ - struct i596_scp scp; /* 0 - needs 16-byte alignment */ - struct i596_iscp iscp; /* 12 */ - struct i596_scb scb; /* 20 */ - u32 dummy; /* 60 */ - struct i596_dump dump; /* 64 - needs 16-byte alignment */ - - struct i596_cmd set_add; - char eth_addr[8]; /* directly follows set_add */ - - struct i596_cmd set_conf; - char i596_config[16]; /* directly follows set_conf */ - - struct i596_cmd tdr; - unsigned long tdr_stat; /* directly follows tdr */ - - int last_restart; - struct i596_rbd *rbd_list; - struct i596_rbd *rbd_tail; - struct i596_rfd *rx_tail; - struct i596_cmd *cmd_tail; - struct i596_cmd *cmd_head; - int cmd_backlog; - unsigned long last_cmd; - spinlock_t cmd_lock; -}; - -static char init_setup[14] = { - 0x8E, /* length 14 bytes, prefetch on */ - 0xC8, /* default: fifo to 8, monitor off */ - 0x40, /* default: don't save bad frames (apricot.c had 0x80) */ - 0x2E, /* (default is 0x26) - No source address insertion, 8 byte preamble */ - 0x00, /* default priority and backoff */ - 0x60, /* default interframe spacing */ - 0x00, /* default slot time LSB */ - 0xf2, /* default slot time and nr of retries */ - 0x00, /* default various bits - (0: promiscuous mode, 1: broadcast disable, - 2: encoding mode, 3: transmit on no CRS, - 4: no CRC insertion, 5: CRC type, - 6: bit stuffing, 7: padding) */ - 0x00, /* default carrier sense and collision detect */ - 0x40, /* default minimum frame length */ - 0xff, /* (default is 0xff, and that is what apricot.c has; - elp486.c has 0xfb: Enable crc append in memory.) */ - 0x00, /* default: not full duplex */ - 0x7f /* (default is 0x3f) multi IA */ -}; - -static int i596_open(struct net_device *dev); -static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev); -static irqreturn_t i596_interrupt(int irq, void *dev_id); -static int i596_close(struct net_device *dev); -static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); -static void print_eth(char *); -static void set_multicast_list(struct net_device *dev); -static void i596_tx_timeout(struct net_device *dev); - -static int -i596_timeout(struct net_device *dev, char *msg, int ct) { - struct i596_private *lp; - int boguscnt = ct; - - lp = netdev_priv(dev); - while (lp->scb.command) { - if (--boguscnt == 0) { - printk("%s: %s timed out - stat %4.4x, cmd %4.4x\n", - dev->name, msg, - lp->scb.status, lp->scb.command); - return 1; - } - udelay(5); - barrier(); - } - return 0; -} - -static inline int -init_rx_bufs(struct net_device *dev, int num) { - struct i596_private *lp; - struct i596_rfd *rfd; - int i; - // struct i596_rbd *rbd; - - lp = netdev_priv(dev); - lp->scb.pa_rfd = I596_NULL; - - for (i = 0; i < num; i++) { - rfd = kmalloc(sizeof(struct i596_rfd), GFP_KERNEL); - if (rfd == NULL) - break; - - rfd->stat = 0; - rfd->pa_rbd = I596_NULL; - rfd->count = 0; - rfd->size = 1532; - if (i == 0) { - rfd->cmd = CMD_EOL; - lp->rx_tail = rfd; - } else { - rfd->cmd = 0; - } - rfd->pa_next = lp->scb.pa_rfd; - lp->scb.pa_rfd = va_to_pa(rfd); - lp->rx_tail->pa_next = lp->scb.pa_rfd; - } - -#if 0 - for (i = 0; ipad = 0; - rbd->count = 0; - rbd->skb = dev_alloc_skb(RX_SKBSIZE); - if (!rbd->skb) { - printk("dev_alloc_skb failed"); - } - rbd->next = rfd->rbd; - if (i) { - rfd->rbd->prev = rbd; - rbd->size = RX_SKBSIZE; - } else { - rbd->size = (RX_SKBSIZE | RBD_EL); - lp->rbd_tail = rbd; - } - - rfd->rbd = rbd; - } else { - printk("Could not kmalloc rbd\n"); - } - } - lp->rbd_tail->next = rfd->rbd; -#endif - return i; -} - -static inline void -remove_rx_bufs(struct net_device *dev) { - struct i596_private *lp; - struct i596_rfd *rfd; - - lp = netdev_priv(dev); - lp->rx_tail->pa_next = I596_NULL; - - do { - rfd = pa_to_va(lp->scb.pa_rfd); - lp->scb.pa_rfd = rfd->pa_next; - kfree(rfd); - } while (rfd != lp->rx_tail); - - lp->rx_tail = NULL; - -#if 0 - for (lp->rbd_list) { - } -#endif -} - -#define PORT_RESET 0x00 /* reset 82596 */ -#define PORT_SELFTEST 0x01 /* selftest */ -#define PORT_ALTSCP 0x02 /* alternate SCB address */ -#define PORT_DUMP 0x03 /* dump */ - -#define IOADDR 0xcb0 /* real constant */ -#define IRQ 10 /* default IRQ - can be changed by ECU */ - -/* The 82596 requires two 16-bit write cycles for a port command */ -static inline void -PORT(phys_addr a, unsigned int cmd) { - if (a & 0xf) - printk("lp486e.c: PORT: address not aligned\n"); - outw(((a & 0xffff) | cmd), IOADDR); - outw(((a>>16) & 0xffff), IOADDR+2); -} - -static inline void -CA(void) { - outb(0, IOADDR+4); - udelay(8); -} - -static inline void -CLEAR_INT(void) { - outb(0, IOADDR+8); -} - -#if 0 -/* selftest or dump */ -static void -i596_port_do(struct net_device *dev, int portcmd, char *cmdname) { - struct i596_private *lp = netdev_priv(dev); - u16 *outp; - int i, m; - - memset((void *)&(lp->dump), 0, sizeof(struct i596_dump)); - outp = &(lp->dump.dump[0]); - - PORT(va_to_pa(outp), portcmd); - mdelay(30); /* random, unmotivated */ - - printk("lp486e i82596 %s result:\n", cmdname); - for (m = ARRAY_SIZE(lp->dump.dump); m && lp->dump.dump[m-1] == 0; m--) - ; - for (i = 0; i < m; i++) { - printk(" %04x", lp->dump.dump[i]); - if (i%8 == 7) - printk("\n"); - } - printk("\n"); -} -#endif - -static int -i596_scp_setup(struct net_device *dev) { - struct i596_private *lp = netdev_priv(dev); - int boguscnt; - - /* Setup SCP, ISCP, SCB */ - /* - * sysbus bits: - * only a single byte is significant - here 0x44 - * 0x80: big endian mode (details depend on stepping) - * 0x40: 1 - * 0x20: interrupt pin is active low - * 0x10: lock function disabled - * 0x08: external triggering of bus throttle timers - * 0x06: 00: 82586 compat mode, 01: segmented mode, 10: linear mode - * 0x01: unused - */ - lp->scp.sysbus = 0x00440000; /* linear mode */ - lp->scp.pad = 0; /* must be zero */ - lp->scp.pa_iscp = va_to_pa(&(lp->iscp)); - - /* - * The CPU sets the ISCP to 1 before it gives the first CA() - */ - lp->iscp.busy = 0x0001; - lp->iscp.pa_scb = va_to_pa(&(lp->scb)); - - lp->scb.command = 0; - lp->scb.status = 0; - lp->scb.pa_cmd = I596_NULL; - /* lp->scb.pa_rfd has been initialised already */ - - lp->last_cmd = jiffies; - lp->cmd_backlog = 0; - lp->cmd_head = NULL; - - /* - * Reset the 82596. - * We need to wait 10 systemclock cycles, and - * 5 serial clock cycles. - */ - PORT(0, PORT_RESET); /* address part ignored */ - udelay(100); - - /* - * Before the CA signal is asserted, the default SCP address - * (0x00fffff4) can be changed to a 16-byte aligned value - */ - PORT(va_to_pa(&lp->scp), PORT_ALTSCP); /* change the scp address */ - - /* - * The initialization procedure begins when a - * Channel Attention signal is asserted after a reset. - */ - - CA(); - - /* - * The ISCP busy is cleared by the 82596 after the SCB address is read. - */ - boguscnt = 100; - while (lp->iscp.busy) { - if (--boguscnt == 0) { - /* No i82596 present? */ - printk("%s: i82596 initialization timed out\n", - dev->name); - return 1; - } - udelay(5); - barrier(); - } - /* I find here boguscnt==100, so no delay was required. */ - - return 0; -} - -static int -init_i596(struct net_device *dev) { - struct i596_private *lp; - - if (i596_scp_setup(dev)) - return 1; - - lp = netdev_priv(dev); - lp->scb.command = 0; - - memcpy ((void *)lp->i596_config, init_setup, 14); - lp->set_conf.command = CmdConfigure; - i596_add_cmd(dev, (void *)&lp->set_conf); - - memcpy ((void *)lp->eth_addr, dev->dev_addr, 6); - lp->set_add.command = CmdIASetup; - i596_add_cmd(dev, (struct i596_cmd *)&lp->set_add); - - lp->tdr.command = CmdTDR; - i596_add_cmd(dev, (struct i596_cmd *)&lp->tdr); - - if (lp->scb.command && i596_timeout(dev, "i82596 init", 200)) - return 1; - - lp->scb.command = RX_START; - CA(); - - barrier(); - - if (lp->scb.command && i596_timeout(dev, "Receive Unit start", 100)) - return 1; - - return 0; -} - -/* Receive a single frame */ -static inline int -i596_rx_one(struct net_device *dev, struct i596_private *lp, - struct i596_rfd *rfd, int *frames) { - - if (rfd->stat & RFD_STAT_OK) { - /* a good frame */ - int pkt_len = (rfd->count & 0x3fff); - struct sk_buff *skb = dev_alloc_skb(pkt_len); - - (*frames)++; - - if (rfd->cmd & CMD_EOL) - printk("Received on EOL\n"); - - if (skb == NULL) { - printk ("%s: i596_rx Memory squeeze, " - "dropping packet.\n", dev->name); - dev->stats.rx_dropped++; - return 1; - } - - memcpy(skb_put(skb,pkt_len), rfd->data, pkt_len); - - skb->protocol = eth_type_trans(skb,dev); - netif_rx(skb); - dev->stats.rx_packets++; - } else { -#if 0 - printk("Frame reception error status %04x\n", - rfd->stat); -#endif - dev->stats.rx_errors++; - if (rfd->stat & RFD_COLLISION) - dev->stats.collisions++; - if (rfd->stat & RFD_SHORT_FRAME_ERR) - dev->stats.rx_length_errors++; - if (rfd->stat & RFD_DMA_ERR) - dev->stats.rx_over_errors++; - if (rfd->stat & RFD_NOBUFS_ERR) - dev->stats.rx_fifo_errors++; - if (rfd->stat & RFD_ALIGN_ERR) - dev->stats.rx_frame_errors++; - if (rfd->stat & RFD_CRC_ERR) - dev->stats.rx_crc_errors++; - if (rfd->stat & RFD_LENGTH_ERR) - dev->stats.rx_length_errors++; - } - rfd->stat = rfd->count = 0; - return 0; -} - -static int -i596_rx(struct net_device *dev) { - struct i596_private *lp = netdev_priv(dev); - struct i596_rfd *rfd; - int frames = 0; - - while (1) { - rfd = pa_to_va(lp->scb.pa_rfd); - if (!rfd) { - printk(KERN_ERR "i596_rx: NULL rfd?\n"); - return 0; - } -#if 1 - if (rfd->stat && !(rfd->stat & (RFD_STAT_C | RFD_STAT_B))) - printk("SF:%p-%04x\n", rfd, rfd->stat); -#endif - if (!(rfd->stat & RFD_STAT_C)) - break; /* next one not ready */ - if (i596_rx_one(dev, lp, rfd, &frames)) - break; /* out of memory */ - rfd->cmd = CMD_EOL; - lp->rx_tail->cmd = 0; - lp->rx_tail = rfd; - lp->scb.pa_rfd = rfd->pa_next; - barrier(); - } - - return frames; -} - -static void -i596_cleanup_cmd(struct net_device *dev) { - struct i596_private *lp; - struct i596_cmd *cmd; - - lp = netdev_priv(dev); - while (lp->cmd_head) { - cmd = (struct i596_cmd *)lp->cmd_head; - - lp->cmd_head = pa_to_va(lp->cmd_head->pa_next); - lp->cmd_backlog--; - - switch ((cmd->command) & 0x7) { - case CmdTx: { - struct tx_cmd *tx_cmd = (struct tx_cmd *) cmd; - struct i596_tbd * tx_cmd_tbd; - tx_cmd_tbd = pa_to_va(tx_cmd->pa_tbd); - - dev_kfree_skb_any(tx_cmd_tbd->skb); - - dev->stats.tx_errors++; - dev->stats.tx_aborted_errors++; - - cmd->pa_next = I596_NULL; - kfree((unsigned char *)tx_cmd); - netif_wake_queue(dev); - break; - } - case CmdMulticastList: { - // unsigned short count = *((unsigned short *) (ptr + 1)); - - cmd->pa_next = I596_NULL; - kfree((unsigned char *)cmd); - break; - } - default: { - cmd->pa_next = I596_NULL; - break; - } - } - barrier(); - } - - if (lp->scb.command && i596_timeout(dev, "i596_cleanup_cmd", 100)) - ; - - lp->scb.pa_cmd = va_to_pa(lp->cmd_head); -} - -static void i596_reset(struct net_device *dev, struct i596_private *lp, int ioaddr) { - - if (lp->scb.command && i596_timeout(dev, "i596_reset", 100)) - ; - - netif_stop_queue(dev); - - lp->scb.command = CUC_ABORT | RX_ABORT; - CA(); - barrier(); - - /* wait for shutdown */ - if (lp->scb.command && i596_timeout(dev, "i596_reset(2)", 400)) - ; - - i596_cleanup_cmd(dev); - i596_rx(dev); - - netif_start_queue(dev); - /*dev_kfree_skb(skb, FREE_WRITE);*/ - init_i596(dev); -} - -static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd) { - struct i596_private *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - unsigned long flags; - - cmd->status = 0; - cmd->command |= (CMD_EOL | CMD_INTR); - cmd->pa_next = I596_NULL; - - spin_lock_irqsave(&lp->cmd_lock, flags); - - if (lp->cmd_head) { - lp->cmd_tail->pa_next = va_to_pa(cmd); - } else { - lp->cmd_head = cmd; - if (lp->scb.command && i596_timeout(dev, "i596_add_cmd", 100)) - ; - lp->scb.pa_cmd = va_to_pa(cmd); - lp->scb.command = CUC_START; - CA(); - } - lp->cmd_tail = cmd; - lp->cmd_backlog++; - - lp->cmd_head = pa_to_va(lp->scb.pa_cmd); - spin_unlock_irqrestore(&lp->cmd_lock, flags); - - if (lp->cmd_backlog > 16) { - int tickssofar = jiffies - lp->last_cmd; - if (tickssofar < HZ/4) - return; - - printk(KERN_WARNING "%s: command unit timed out, status resetting.\n", dev->name); - i596_reset(dev, lp, ioaddr); - } -} - -static int i596_open(struct net_device *dev) -{ - int i; - - i = request_irq(dev->irq, i596_interrupt, IRQF_SHARED, dev->name, dev); - if (i) { - printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq); - return i; - } - - if ((i = init_rx_bufs(dev, RX_RING_SIZE)) < RX_RING_SIZE) - printk(KERN_ERR "%s: only able to allocate %d receive buffers\n", dev->name, i); - - if (i < 4) { - free_irq(dev->irq, dev); - return -EAGAIN; - } - netif_start_queue(dev); - init_i596(dev); - return 0; /* Always succeed */ -} - -static netdev_tx_t i596_start_xmit (struct sk_buff *skb, struct net_device *dev) { - struct tx_cmd *tx_cmd; - short length; - - length = skb->len; - - if (length < ETH_ZLEN) { - if (skb_padto(skb, ETH_ZLEN)) - return NETDEV_TX_OK; - length = ETH_ZLEN; - } - - tx_cmd = kmalloc((sizeof (struct tx_cmd) + sizeof (struct i596_tbd)), GFP_ATOMIC); - if (tx_cmd == NULL) { - printk(KERN_WARNING "%s: i596_xmit Memory squeeze, dropping packet.\n", dev->name); - dev->stats.tx_dropped++; - dev_kfree_skb (skb); - } else { - struct i596_tbd *tx_cmd_tbd; - tx_cmd_tbd = (struct i596_tbd *) (tx_cmd + 1); - tx_cmd->pa_tbd = va_to_pa (tx_cmd_tbd); - tx_cmd_tbd->pa_next = I596_NULL; - - tx_cmd->cmd.command = (CMD_FLEX | CmdTx); - - tx_cmd->pad = 0; - tx_cmd->size = 0; - tx_cmd_tbd->pad = 0; - tx_cmd_tbd->size = (EOF | length); - - tx_cmd_tbd->pa_data = va_to_pa (skb->data); - tx_cmd_tbd->skb = skb; - - if (i596_debug & LOG_SRCDST) - print_eth (skb->data); - - i596_add_cmd (dev, (struct i596_cmd *) tx_cmd); - - dev->stats.tx_packets++; - } - - return NETDEV_TX_OK; -} - -static void -i596_tx_timeout (struct net_device *dev) { - struct i596_private *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - - /* Transmitter timeout, serious problems. */ - printk(KERN_WARNING "%s: transmit timed out, status resetting.\n", dev->name); - dev->stats.tx_errors++; - - /* Try to restart the adaptor */ - if (lp->last_restart == dev->stats.tx_packets) { - printk ("Resetting board.\n"); - - /* Shutdown and restart */ - i596_reset (dev, lp, ioaddr); - } else { - /* Issue a channel attention signal */ - printk ("Kicking board.\n"); - lp->scb.command = (CUC_START | RX_START); - CA(); - lp->last_restart = dev->stats.tx_packets; - } - netif_wake_queue(dev); -} - -static void print_eth(char *add) -{ - int i; - - printk ("Dest "); - for (i = 0; i < 6; i++) - printk(" %2.2X", (unsigned char) add[i]); - printk ("\n"); - - printk ("Source"); - for (i = 0; i < 6; i++) - printk(" %2.2X", (unsigned char) add[i+6]); - printk ("\n"); - - printk ("type %2.2X%2.2X\n", - (unsigned char) add[12], (unsigned char) add[13]); -} - -static const struct net_device_ops i596_netdev_ops = { - .ndo_open = i596_open, - .ndo_stop = i596_close, - .ndo_start_xmit = i596_start_xmit, - .ndo_set_multicast_list = set_multicast_list, - .ndo_tx_timeout = i596_tx_timeout, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -static int __init lp486e_probe(struct net_device *dev) { - struct i596_private *lp; - unsigned char eth_addr[6] = { 0, 0xaa, 0, 0, 0, 0 }; - unsigned char *bios; - int i, j; - int ret = -ENOMEM; - static int probed; - - if (probed) - return -ENODEV; - probed++; - - if (!request_region(IOADDR, LP486E_TOTAL_SIZE, DRV_NAME)) { - printk(KERN_ERR "lp486e: IO address 0x%x in use\n", IOADDR); - return -EBUSY; - } - - lp = netdev_priv(dev); - spin_lock_init(&lp->cmd_lock); - - /* - * Do we really have this thing? - */ - if (i596_scp_setup(dev)) { - ret = -ENODEV; - goto err_out_kfree; - } - - dev->base_addr = IOADDR; - dev->irq = IRQ; - - - /* - * How do we find the ethernet address? I don't know. - * One possibility is to look at the EISA configuration area - * [0xe8000-0xe9fff]. This contains the ethernet address - * but not at a fixed address - things depend on setup options. - * - * If we find no address, or the wrong address, use - * ifconfig eth0 hw ether a1:a2:a3:a4:a5:a6 - * with the value found in the BIOS setup. - */ - bios = bus_to_virt(0xe8000); - for (j = 0; j < 0x2000; j++) { - if (bios[j] == 0 && bios[j+1] == 0xaa && bios[j+2] == 0) { - printk("%s: maybe address at BIOS 0x%x:", - dev->name, 0xe8000+j); - for (i = 0; i < 6; i++) { - eth_addr[i] = bios[i+j]; - printk(" %2.2X", eth_addr[i]); - } - printk("\n"); - } - } - - printk("%s: lp486e 82596 at %#3lx, IRQ %d,", - dev->name, dev->base_addr, dev->irq); - for (i = 0; i < 6; i++) - printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]); - printk("\n"); - - /* The LP486E-specific entries in the device structure. */ - dev->netdev_ops = &i596_netdev_ops; - dev->watchdog_timeo = 5*HZ; - -#if 0 - /* selftest reports 0x320925ae - don't know what that means */ - i596_port_do(dev, PORT_SELFTEST, "selftest"); - i596_port_do(dev, PORT_DUMP, "dump"); -#endif - return 0; - -err_out_kfree: - release_region(IOADDR, LP486E_TOTAL_SIZE); - return ret; -} - -static inline void -i596_handle_CU_completion(struct net_device *dev, - struct i596_private *lp, - unsigned short status, - unsigned short *ack_cmdp) { - struct i596_cmd *cmd; - int frames_out = 0; - int commands_done = 0; - int cmd_val; - unsigned long flags; - - spin_lock_irqsave(&lp->cmd_lock, flags); - cmd = lp->cmd_head; - - while (lp->cmd_head && (lp->cmd_head->status & CMD_STAT_C)) { - cmd = lp->cmd_head; - - lp->cmd_head = pa_to_va(lp->cmd_head->pa_next); - lp->cmd_backlog--; - - commands_done++; - cmd_val = cmd->command & 0x7; -#if 0 - printk("finished CU %s command (%d)\n", - CUcmdnames[cmd_val], cmd_val); -#endif - switch (cmd_val) { - case CmdTx: - { - struct tx_cmd *tx_cmd; - struct i596_tbd *tx_cmd_tbd; - - tx_cmd = (struct tx_cmd *) cmd; - tx_cmd_tbd = pa_to_va(tx_cmd->pa_tbd); - - frames_out++; - if (cmd->status & CMD_STAT_OK) { - if (i596_debug) - print_eth(pa_to_va(tx_cmd_tbd->pa_data)); - } else { - dev->stats.tx_errors++; - if (i596_debug) - printk("transmission failure:%04x\n", - cmd->status); - if (cmd->status & 0x0020) - dev->stats.collisions++; - if (!(cmd->status & 0x0040)) - dev->stats.tx_heartbeat_errors++; - if (cmd->status & 0x0400) - dev->stats.tx_carrier_errors++; - if (cmd->status & 0x0800) - dev->stats.collisions++; - if (cmd->status & 0x1000) - dev->stats.tx_aborted_errors++; - } - dev_kfree_skb_irq(tx_cmd_tbd->skb); - - cmd->pa_next = I596_NULL; - kfree((unsigned char *)tx_cmd); - netif_wake_queue(dev); - break; - } - - case CmdMulticastList: - cmd->pa_next = I596_NULL; - kfree((unsigned char *)cmd); - break; - - case CmdTDR: - { - unsigned long status = *((unsigned long *) (cmd + 1)); - if (status & 0x8000) { - if (i596_debug) - printk("%s: link ok.\n", dev->name); - } else { - if (status & 0x4000) - printk("%s: Transceiver problem.\n", - dev->name); - if (status & 0x2000) - printk("%s: Termination problem.\n", - dev->name); - if (status & 0x1000) - printk("%s: Short circuit.\n", - dev->name); - printk("%s: Time %ld.\n", - dev->name, status & 0x07ff); - } - } - default: - cmd->pa_next = I596_NULL; - lp->last_cmd = jiffies; - - } - barrier(); - } - - cmd = lp->cmd_head; - while (cmd && (cmd != lp->cmd_tail)) { - cmd->command &= 0x1fff; - cmd = pa_to_va(cmd->pa_next); - barrier(); - } - - if (lp->cmd_head) - *ack_cmdp |= CUC_START; - lp->scb.pa_cmd = va_to_pa(lp->cmd_head); - spin_unlock_irqrestore(&lp->cmd_lock, flags); -} - -static irqreturn_t -i596_interrupt(int irq, void *dev_instance) -{ - struct net_device *dev = dev_instance; - struct i596_private *lp = netdev_priv(dev); - unsigned short status, ack_cmd = 0; - int frames_in = 0; - - /* - * The 82596 examines the command, performs the required action, - * and then clears the SCB command word. - */ - if (lp->scb.command && i596_timeout(dev, "interrupt", 40)) - ; - - /* - * The status word indicates the status of the 82596. - * It is modified only by the 82596. - * - * [So, we must not clear it. I find often status 0xffff, - * which is not one of the values allowed by the docs.] - */ - status = lp->scb.status; -#if 0 - if (i596_debug) { - printk("%s: i596 interrupt, ", dev->name); - i596_out_status(status); - } -#endif - /* Impossible, but it happens - perhaps when we get - a receive interrupt but scb.pa_rfd is I596_NULL. */ - if (status == 0xffff) { - printk("%s: i596_interrupt: got status 0xffff\n", dev->name); - goto out; - } - - ack_cmd = (status & STAT_ACK); - - if (status & (STAT_CX | STAT_CNA)) - i596_handle_CU_completion(dev, lp, status, &ack_cmd); - - if (status & (STAT_FR | STAT_RNR)) { - /* Restart the receive unit when it got inactive somehow */ - if ((status & STAT_RNR) && netif_running(dev)) - ack_cmd |= RX_START; - - if (status & STAT_FR) { - frames_in = i596_rx(dev); - if (!frames_in) - printk("receive frame reported, but no frames\n"); - } - } - - /* acknowledge the interrupt */ - /* - if ((lp->scb.pa_cmd != I596_NULL) && netif_running(dev)) - ack_cmd |= CUC_START; - */ - - if (lp->scb.command && i596_timeout(dev, "i596 interrupt", 100)) - ; - - lp->scb.command = ack_cmd; - - CLEAR_INT(); - CA(); - - out: - return IRQ_HANDLED; -} - -static int i596_close(struct net_device *dev) { - struct i596_private *lp = netdev_priv(dev); - - netif_stop_queue(dev); - - if (i596_debug) - printk("%s: Shutting down ethercard, status was %4.4x.\n", - dev->name, lp->scb.status); - - lp->scb.command = (CUC_ABORT | RX_ABORT); - CA(); - - i596_cleanup_cmd(dev); - - if (lp->scb.command && i596_timeout(dev, "i596_close", 200)) - ; - - free_irq(dev->irq, dev); - remove_rx_bufs(dev); - - return 0; -} - -/* -* Set or clear the multicast filter for this adaptor. -*/ - -static void set_multicast_list(struct net_device *dev) { - struct i596_private *lp = netdev_priv(dev); - struct i596_cmd *cmd; - - if (i596_debug > 1) - printk ("%s: set multicast list %d\n", - dev->name, netdev_mc_count(dev)); - - if (!netdev_mc_empty(dev)) { - struct netdev_hw_addr *ha; - char *cp; - cmd = kmalloc(sizeof(struct i596_cmd) + 2 + - netdev_mc_count(dev) * 6, GFP_ATOMIC); - if (cmd == NULL) { - printk (KERN_ERR "%s: set_multicast Memory squeeze.\n", dev->name); - return; - } - cmd->command = CmdMulticastList; - *((unsigned short *) (cmd + 1)) = netdev_mc_count(dev) * 6; - cp = ((char *)(cmd + 1))+2; - netdev_for_each_mc_addr(ha, dev) { - memcpy(cp, ha->addr, 6); - cp += 6; - } - if (i596_debug & LOG_SRCDST) - print_eth (((char *)(cmd + 1)) + 2); - i596_add_cmd(dev, cmd); - } else { - if (lp->set_conf.pa_next != I596_NULL) { - return; - } - if (netdev_mc_empty(dev) && - !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) { - lp->i596_config[8] &= ~0x01; - } else { - lp->i596_config[8] |= 0x01; - } - - i596_add_cmd(dev, (struct i596_cmd *) &lp->set_conf); - } -} - -MODULE_AUTHOR("Ard van Breemen "); -MODULE_DESCRIPTION("Intel Panther onboard i82596 driver"); -MODULE_LICENSE("GPL"); - -static struct net_device *dev_lp486e; -static int full_duplex; -static int options; -static int io = IOADDR; -static int irq = IRQ; - -module_param(debug, int, 0); -//module_param(max_interrupt_work, int, 0); -//module_param(reverse_probe, int, 0); -//module_param(rx_copybreak, int, 0); -module_param(options, int, 0); -module_param(full_duplex, int, 0); - -static int __init lp486e_init_module(void) { - int err; - struct net_device *dev = alloc_etherdev(sizeof(struct i596_private)); - if (!dev) - return -ENOMEM; - - dev->irq = irq; - dev->base_addr = io; - err = lp486e_probe(dev); - if (err) { - free_netdev(dev); - return err; - } - err = register_netdev(dev); - if (err) { - release_region(dev->base_addr, LP486E_TOTAL_SIZE); - free_netdev(dev); - return err; - } - dev_lp486e = dev; - full_duplex = 0; - options = 0; - return 0; -} - -static void __exit lp486e_cleanup_module(void) { - unregister_netdev(dev_lp486e); - release_region(dev_lp486e->base_addr, LP486E_TOTAL_SIZE); - free_netdev(dev_lp486e); -} - -module_init(lp486e_init_module); -module_exit(lp486e_cleanup_module); diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c deleted file mode 100644 index d973fc6..0000000 --- a/drivers/net/ni52.c +++ /dev/null @@ -1,1346 +0,0 @@ -/* - * net-3-driver for the NI5210 card (i82586 Ethernet chip) - * - * This is an extension to the Linux operating system, and is covered by the - * same GNU General Public License that covers that work. - * - * Alphacode 0.82 (96/09/29) for Linux 2.0.0 (or later) - * Copyrights (c) 1994,1995,1996 by M.Hipp (hippm@informatik.uni-tuebingen.de) - * [feel free to mail ....] - * - * when using as module: (no autoprobing!) - * run with e.g: - * insmod ni52.o io=0x360 irq=9 memstart=0xd0000 memend=0xd4000 - * - * CAN YOU PLEASE REPORT ME YOUR PERFORMANCE EXPERIENCES !!. - * - * If you find a bug, please report me: - * The kernel panic output and any kmsg from the ni52 driver - * the ni5210-driver-version and the linux-kernel version - * how many shared memory (memsize) on the netcard, - * bootprom: yes/no, base_addr, mem_start - * maybe the ni5210-card revision and the i82586 version - * - * autoprobe for: base_addr: 0x300,0x280,0x360,0x320,0x340 - * mem_start: 0xd0000,0xd2000,0xc8000,0xca000,0xd4000,0xd6000, - * 0xd8000,0xcc000,0xce000,0xda000,0xdc000 - * - * sources: - * skeleton.c from Donald Becker - * - * I have also done a look in the following sources: (mail me if you need them) - * crynwr-packet-driver by Russ Nelson - * Garret A. Wollman's (fourth) i82586-driver for BSD - * (before getting an i82596 (yes 596 not 586) manual, the existing drivers - * helped me a lot to understand this tricky chip.) - * - * Known Problems: - * The internal sysbus seems to be slow. So we often lose packets because of - * overruns while receiving from a fast remote host. - * This can slow down TCP connections. Maybe the newer ni5210 cards are - * better. My experience is, that if a machine sends with more than about - * 500-600K/s the fifo/sysbus overflows. - * - * IMPORTANT NOTE: - * On fast networks, it's a (very) good idea to have 16K shared memory. With - * 8K, we can store only 4 receive frames, so it can (easily) happen that a - * remote machine 'overruns' our system. - * - * Known i82586/card problems (I'm sure, there are many more!): - * Running the NOP-mode, the i82586 sometimes seems to forget to report - * every xmit-interrupt until we restart the CU. - * Another MAJOR bug is, that the RU sometimes seems to ignore the EL-Bit - * in the RBD-Struct which indicates an end of the RBD queue. - * Instead, the RU fetches another (randomly selected and - * usually used) RBD and begins to fill it. (Maybe, this happens only if - * the last buffer from the previous RFD fits exact into the queue and - * the next RFD can't fetch an initial RBD. Anyone knows more? ) - * - * results from ftp performance tests with Linux 1.2.5 - * send and receive about 350-400 KByte/s (peak up to 460 kbytes/s) - * sending in NOP-mode: peak performance up to 530K/s (but better don't - * run this mode) - */ - -/* - * 29.Sept.96: virt_to_bus changes for new memory scheme - * 19.Feb.96: more Mcast changes, module support (MH) - * - * 18.Nov.95: Mcast changes (AC). - * - * 23.April.95: fixed(?) receiving problems by configuring a RFD more - * than the number of RBD's. Can maybe cause other problems. - * 18.April.95: Added MODULE support (MH) - * 17.April.95: MC related changes in init586() and set_multicast_list(). - * removed use of 'jiffies' in init586() (MH) - * - * 19.Sep.94: Added Multicast support (not tested yet) (MH) - * - * 18.Sep.94: Workaround for 'EL-Bug'. Removed flexible RBD-handling. - * Now, every RFD has exact one RBD. (MH) - * - * 14.Sep.94: added promiscuous mode, a few cleanups (MH) - * - * 19.Aug.94: changed request_irq() parameter (MH) - * - * 20.July.94: removed cleanup bugs, removed a 16K-mem-probe-bug (MH) - * - * 19.July.94: lotsa cleanups .. (MH) - * - * 17.July.94: some patches ... verified to run with 1.1.29 (MH) - * - * 4.July.94: patches for Linux 1.1.24 (MH) - * - * 26.March.94: patches for Linux 1.0 and iomem-auto-probe (MH) - * - * 30.Sep.93: Added nop-chain .. driver now runs with only one Xmit-Buff, - * too (MH) - * - * < 30.Sep.93: first versions - */ - -static int debuglevel; /* debug-printk 0: off 1: a few 2: more */ -static int automatic_resume; /* experimental .. better should be zero */ -static int rfdadd; /* rfdadd=1 may be better for 8K MEM cards */ -static int fifo = 0x8; /* don't change */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "ni52.h" - -#define DRV_NAME "ni52" - -#define DEBUG /* debug on */ -#define SYSBUSVAL 1 /* 8 Bit */ - -#define ni_attn586() { outb(0, dev->base_addr + NI52_ATTENTION); } -#define ni_reset586() { outb(0, dev->base_addr + NI52_RESET); } -#define ni_disint() { outb(0, dev->base_addr + NI52_INTDIS); } -#define ni_enaint() { outb(0, dev->base_addr + NI52_INTENA); } - -#define make32(ptr16) ((void __iomem *)(p->memtop + (short) (ptr16))) -#define make24(ptr32) ((char __iomem *)(ptr32)) - p->base -#define make16(ptr32) ((unsigned short) ((char __iomem *)(ptr32)\ - - p->memtop)) - -/******************* how to calculate the buffers ***************************** - - * IMPORTANT NOTE: if you configure only one NUM_XMIT_BUFFS, the driver works - * --------------- in a different (more stable?) mode. Only in this mode it's - * possible to configure the driver with 'NO_NOPCOMMANDS' - -sizeof(scp)=12; sizeof(scb)=16; sizeof(iscp)=8; -sizeof(scp)+sizeof(iscp)+sizeof(scb) = 36 = INIT -sizeof(rfd) = 24; sizeof(rbd) = 12; -sizeof(tbd) = 8; sizeof(transmit_cmd) = 16; -sizeof(nop_cmd) = 8; - - * if you don't know the driver, better do not change these values: */ - -#define RECV_BUFF_SIZE 1524 /* slightly oversized */ -#define XMIT_BUFF_SIZE 1524 /* slightly oversized */ -#define NUM_XMIT_BUFFS 1 /* config for both, 8K and 16K shmem */ -#define NUM_RECV_BUFFS_8 4 /* config for 8K shared mem */ -#define NUM_RECV_BUFFS_16 9 /* config for 16K shared mem */ -#define NO_NOPCOMMANDS /* only possible with NUM_XMIT_BUFFS=1 */ - -/**************************************************************************/ - - -#define NI52_TOTAL_SIZE 16 -#define NI52_ADDR0 0x02 -#define NI52_ADDR1 0x07 -#define NI52_ADDR2 0x01 - -static int ni52_probe1(struct net_device *dev, int ioaddr); -static irqreturn_t ni52_interrupt(int irq, void *dev_id); -static int ni52_open(struct net_device *dev); -static int ni52_close(struct net_device *dev); -static netdev_tx_t ni52_send_packet(struct sk_buff *, struct net_device *); -static struct net_device_stats *ni52_get_stats(struct net_device *dev); -static void set_multicast_list(struct net_device *dev); -static void ni52_timeout(struct net_device *dev); - -/* helper-functions */ -static int init586(struct net_device *dev); -static int check586(struct net_device *dev, unsigned size); -static void alloc586(struct net_device *dev); -static void startrecv586(struct net_device *dev); -static void __iomem *alloc_rfa(struct net_device *dev, void __iomem *ptr); -static void ni52_rcv_int(struct net_device *dev); -static void ni52_xmt_int(struct net_device *dev); -static void ni52_rnr_int(struct net_device *dev); - -struct priv { - char __iomem *base; - char __iomem *mapped; - char __iomem *memtop; - spinlock_t spinlock; - int reset; - struct rfd_struct __iomem *rfd_last, *rfd_top, *rfd_first; - struct scp_struct __iomem *scp; - struct iscp_struct __iomem *iscp; - struct scb_struct __iomem *scb; - struct tbd_struct __iomem *xmit_buffs[NUM_XMIT_BUFFS]; -#if (NUM_XMIT_BUFFS == 1) - struct transmit_cmd_struct __iomem *xmit_cmds[2]; - struct nop_cmd_struct __iomem *nop_cmds[2]; -#else - struct transmit_cmd_struct __iomem *xmit_cmds[NUM_XMIT_BUFFS]; - struct nop_cmd_struct __iomem *nop_cmds[NUM_XMIT_BUFFS]; -#endif - int nop_point, num_recv_buffs; - char __iomem *xmit_cbuffs[NUM_XMIT_BUFFS]; - int xmit_count, xmit_last; -}; - -/* wait for command with timeout: */ -static void wait_for_scb_cmd(struct net_device *dev) -{ - struct priv *p = netdev_priv(dev); - int i; - for (i = 0; i < 16384; i++) { - if (readb(&p->scb->cmd_cuc) == 0) - break; - udelay(4); - if (i == 16383) { - printk(KERN_ERR "%s: scb_cmd timed out: %04x,%04x .. disabling i82586!!\n", - dev->name, readb(&p->scb->cmd_cuc), readb(&p->scb->cus)); - if (!p->reset) { - p->reset = 1; - ni_reset586(); - } - } - } -} - -static void wait_for_scb_cmd_ruc(struct net_device *dev) -{ - struct priv *p = netdev_priv(dev); - int i; - for (i = 0; i < 16384; i++) { - if (readb(&p->scb->cmd_ruc) == 0) - break; - udelay(4); - if (i == 16383) { - printk(KERN_ERR "%s: scb_cmd (ruc) timed out: %04x,%04x .. disabling i82586!!\n", - dev->name, readb(&p->scb->cmd_ruc), - readb(&p->scb->rus)); - if (!p->reset) { - p->reset = 1; - ni_reset586(); - } - } - } -} - -static void wait_for_stat_compl(void __iomem *p) -{ - struct nop_cmd_struct __iomem *addr = p; - int i; - for (i = 0; i < 32767; i++) { - if (readw(&((addr)->cmd_status)) & STAT_COMPL) - break; - udelay(32); - } -} - -/********************************************** - * close device - */ -static int ni52_close(struct net_device *dev) -{ - free_irq(dev->irq, dev); - ni_reset586(); /* the hard way to stop the receiver */ - netif_stop_queue(dev); - return 0; -} - -/********************************************** - * open device - */ -static int ni52_open(struct net_device *dev) -{ - int ret; - - ni_disint(); - alloc586(dev); - init586(dev); - startrecv586(dev); - ni_enaint(); - - ret = request_irq(dev->irq, ni52_interrupt, 0, dev->name, dev); - if (ret) { - ni_reset586(); - return ret; - } - netif_start_queue(dev); - return 0; /* most done by init */ -} - -static int check_iscp(struct net_device *dev, void __iomem *addr) -{ - struct iscp_struct __iomem *iscp = addr; - struct priv *p = netdev_priv(dev); - memset_io(iscp, 0, sizeof(struct iscp_struct)); - - writel(make24(iscp), &p->scp->iscp); - writeb(1, &iscp->busy); - - ni_reset586(); - ni_attn586(); - mdelay(32); /* wait a while... */ - /* i82586 clears 'busy' after successful init */ - if (readb(&iscp->busy)) - return 0; - return 1; -} - -/********************************************** - * Check to see if there's an 82586 out there. - */ -static int check586(struct net_device *dev, unsigned size) -{ - struct priv *p = netdev_priv(dev); - int i; - - p->mapped = ioremap(dev->mem_start, size); - if (!p->mapped) - return 0; - - p->base = p->mapped + size - 0x01000000; - p->memtop = p->mapped + size; - p->scp = (struct scp_struct __iomem *)(p->base + SCP_DEFAULT_ADDRESS); - p->scb = (struct scb_struct __iomem *) p->mapped; - p->iscp = (struct iscp_struct __iomem *)p->scp - 1; - memset_io(p->scp, 0, sizeof(struct scp_struct)); - for (i = 0; i < sizeof(struct scp_struct); i++) - /* memory was writeable? */ - if (readb((char __iomem *)p->scp + i)) - goto Enodev; - writeb(SYSBUSVAL, &p->scp->sysbus); /* 1 = 8Bit-Bus, 0 = 16 Bit */ - if (readb(&p->scp->sysbus) != SYSBUSVAL) - goto Enodev; - - if (!check_iscp(dev, p->mapped)) - goto Enodev; - if (!check_iscp(dev, p->iscp)) - goto Enodev; - return 1; -Enodev: - iounmap(p->mapped); - return 0; -} - -/****************************************************************** - * set iscp at the right place, called by ni52_probe1 and open586. - */ -static void alloc586(struct net_device *dev) -{ - struct priv *p = netdev_priv(dev); - - ni_reset586(); - mdelay(32); - - memset_io(p->iscp, 0, sizeof(struct iscp_struct)); - memset_io(p->scp , 0, sizeof(struct scp_struct)); - - writel(make24(p->iscp), &p->scp->iscp); - writeb(SYSBUSVAL, &p->scp->sysbus); - writew(make16(p->scb), &p->iscp->scb_offset); - - writeb(1, &p->iscp->busy); - ni_reset586(); - ni_attn586(); - - mdelay(32); - - if (readb(&p->iscp->busy)) - printk(KERN_ERR "%s: Init-Problems (alloc).\n", dev->name); - - p->reset = 0; - - memset_io(p->scb, 0, sizeof(struct scb_struct)); -} - -/* set: io,irq,memstart,memend or set it when calling insmod */ -static int irq = 9; -static int io = 0x300; -static long memstart; /* e.g 0xd0000 */ -static long memend; /* e.g 0xd4000 */ - -/********************************************** - * probe the ni5210-card - */ -struct net_device * __init ni52_probe(int unit) -{ - struct net_device *dev = alloc_etherdev(sizeof(struct priv)); - static const int ports[] = {0x300, 0x280, 0x360, 0x320, 0x340, 0}; - const int *port; - struct priv *p; - int err = 0; - - if (!dev) - return ERR_PTR(-ENOMEM); - - p = netdev_priv(dev); - - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - io = dev->base_addr; - irq = dev->irq; - memstart = dev->mem_start; - memend = dev->mem_end; - } - - if (io > 0x1ff) { /* Check a single specified location. */ - err = ni52_probe1(dev, io); - } else if (io > 0) { /* Don't probe at all. */ - err = -ENXIO; - } else { - for (port = ports; *port && ni52_probe1(dev, *port) ; port++) - ; - if (*port) - goto got_it; -#ifdef FULL_IO_PROBE - for (io = 0x200; io < 0x400 && ni52_probe1(dev, io); io += 8) - ; - if (io < 0x400) - goto got_it; -#endif - err = -ENODEV; - } - if (err) - goto out; -got_it: - err = register_netdev(dev); - if (err) - goto out1; - return dev; -out1: - iounmap(p->mapped); - release_region(dev->base_addr, NI52_TOTAL_SIZE); -out: - free_netdev(dev); - return ERR_PTR(err); -} - -static const struct net_device_ops ni52_netdev_ops = { - .ndo_open = ni52_open, - .ndo_stop = ni52_close, - .ndo_get_stats = ni52_get_stats, - .ndo_tx_timeout = ni52_timeout, - .ndo_start_xmit = ni52_send_packet, - .ndo_set_multicast_list = set_multicast_list, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -static int __init ni52_probe1(struct net_device *dev, int ioaddr) -{ - int i, size, retval; - struct priv *priv = netdev_priv(dev); - - dev->base_addr = ioaddr; - dev->irq = irq; - dev->mem_start = memstart; - dev->mem_end = memend; - - spin_lock_init(&priv->spinlock); - - if (!request_region(ioaddr, NI52_TOTAL_SIZE, DRV_NAME)) - return -EBUSY; - - if (!(inb(ioaddr+NI52_MAGIC1) == NI52_MAGICVAL1) || - !(inb(ioaddr+NI52_MAGIC2) == NI52_MAGICVAL2)) { - retval = -ENODEV; - goto out; - } - - for (i = 0; i < ETH_ALEN; i++) - dev->dev_addr[i] = inb(dev->base_addr+i); - - if (dev->dev_addr[0] != NI52_ADDR0 || dev->dev_addr[1] != NI52_ADDR1 || - dev->dev_addr[2] != NI52_ADDR2) { - retval = -ENODEV; - goto out; - } - - printk(KERN_INFO "%s: NI5210 found at %#3lx, ", - dev->name, dev->base_addr); - - /* - * check (or search) IO-Memory, 8K and 16K - */ -#ifdef MODULE - size = dev->mem_end - dev->mem_start; - if (size != 0x2000 && size != 0x4000) { - printk("\n"); - printk(KERN_ERR "%s: Invalid memory size %d. Allowed is 0x2000 or 0x4000 bytes.\n", dev->name, size); - retval = -ENODEV; - goto out; - } - if (!check586(dev, size)) { - printk(KERN_ERR "?memcheck, Can't find memory at 0x%lx with size %d!\n", dev->mem_start, size); - retval = -ENODEV; - goto out; - } -#else - if (dev->mem_start != 0) { - /* no auto-mem-probe */ - size = 0x4000; /* check for 16K mem */ - if (!check586(dev, size)) { - size = 0x2000; /* check for 8K mem */ - if (!check586(dev, size)) { - printk(KERN_ERR "?memprobe, Can't find memory at 0x%lx!\n", dev->mem_start); - retval = -ENODEV; - goto out; - } - } - } else { - static const unsigned long memaddrs[] = { - 0xc8000, 0xca000, 0xcc000, 0xce000, 0xd0000, 0xd2000, - 0xd4000, 0xd6000, 0xd8000, 0xda000, 0xdc000, 0 - }; - for (i = 0;; i++) { - if (!memaddrs[i]) { - printk(KERN_ERR "?memprobe, Can't find io-memory!\n"); - retval = -ENODEV; - goto out; - } - dev->mem_start = memaddrs[i]; - size = 0x2000; /* check for 8K mem */ - if (check586(dev, size)) - /* 8K-check */ - break; - size = 0x4000; /* check for 16K mem */ - if (check586(dev, size)) - /* 16K-check */ - break; - } - } - /* set mem_end showed by 'ifconfig' */ - dev->mem_end = dev->mem_start + size; -#endif - - alloc586(dev); - - /* set number of receive-buffs according to memsize */ - if (size == 0x2000) - priv->num_recv_buffs = NUM_RECV_BUFFS_8; - else - priv->num_recv_buffs = NUM_RECV_BUFFS_16; - - printk(KERN_DEBUG "Memaddr: 0x%lx, Memsize: %d, ", - dev->mem_start, size); - - if (dev->irq < 2) { - unsigned long irq_mask; - - irq_mask = probe_irq_on(); - ni_reset586(); - ni_attn586(); - - mdelay(20); - dev->irq = probe_irq_off(irq_mask); - if (!dev->irq) { - printk("?autoirq, Failed to detect IRQ line!\n"); - retval = -EAGAIN; - iounmap(priv->mapped); - goto out; - } - printk("IRQ %d (autodetected).\n", dev->irq); - } else { - if (dev->irq == 2) - dev->irq = 9; - printk("IRQ %d (assigned and not checked!).\n", dev->irq); - } - - dev->netdev_ops = &ni52_netdev_ops; - dev->watchdog_timeo = HZ/20; - - return 0; -out: - release_region(ioaddr, NI52_TOTAL_SIZE); - return retval; -} - -/********************************************** - * init the chip (ni52-interrupt should be disabled?!) - * needs a correct 'allocated' memory - */ - -static int init586(struct net_device *dev) -{ - void __iomem *ptr; - int i, result = 0; - struct priv *p = netdev_priv(dev); - struct configure_cmd_struct __iomem *cfg_cmd; - struct iasetup_cmd_struct __iomem *ias_cmd; - struct tdr_cmd_struct __iomem *tdr_cmd; - struct mcsetup_cmd_struct __iomem *mc_cmd; - struct netdev_hw_addr *ha; - int num_addrs = netdev_mc_count(dev); - - ptr = p->scb + 1; - - cfg_cmd = ptr; /* configure-command */ - writew(0, &cfg_cmd->cmd_status); - writew(CMD_CONFIGURE | CMD_LAST, &cfg_cmd->cmd_cmd); - writew(0xFFFF, &cfg_cmd->cmd_link); - - /* number of cfg bytes */ - writeb(0x0a, &cfg_cmd->byte_cnt); - /* fifo-limit (8=tx:32/rx:64) */ - writeb(fifo, &cfg_cmd->fifo); - /* hold or discard bad recv frames (bit 7) */ - writeb(0x40, &cfg_cmd->sav_bf); - /* addr_len |!src_insert |pre-len |loopback */ - writeb(0x2e, &cfg_cmd->adr_len); - writeb(0x00, &cfg_cmd->priority); - writeb(0x60, &cfg_cmd->ifs); - writeb(0x00, &cfg_cmd->time_low); - writeb(0xf2, &cfg_cmd->time_high); - writeb(0x00, &cfg_cmd->promisc); - if (dev->flags & IFF_ALLMULTI) { - int len = ((char __iomem *)p->iscp - (char __iomem *)ptr - 8) / 6; - if (num_addrs > len) { - printk(KERN_ERR "%s: switching to promisc. mode\n", - dev->name); - writeb(0x01, &cfg_cmd->promisc); - } - } - if (dev->flags & IFF_PROMISC) - writeb(0x01, &cfg_cmd->promisc); - writeb(0x00, &cfg_cmd->carr_coll); - writew(make16(cfg_cmd), &p->scb->cbl_offset); - writeb(0, &p->scb->cmd_ruc); - - writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */ - ni_attn586(); - - wait_for_stat_compl(cfg_cmd); - - if ((readw(&cfg_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) != - (STAT_COMPL|STAT_OK)) { - printk(KERN_ERR "%s: configure command failed: %x\n", - dev->name, readw(&cfg_cmd->cmd_status)); - return 1; - } - - /* - * individual address setup - */ - - ias_cmd = ptr; - - writew(0, &ias_cmd->cmd_status); - writew(CMD_IASETUP | CMD_LAST, &ias_cmd->cmd_cmd); - writew(0xffff, &ias_cmd->cmd_link); - - memcpy_toio(&ias_cmd->iaddr, (char *)dev->dev_addr, ETH_ALEN); - - writew(make16(ias_cmd), &p->scb->cbl_offset); - - writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */ - ni_attn586(); - - wait_for_stat_compl(ias_cmd); - - if ((readw(&ias_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) != - (STAT_OK|STAT_COMPL)) { - printk(KERN_ERR "%s (ni52): individual address setup command failed: %04x\n", dev->name, readw(&ias_cmd->cmd_status)); - return 1; - } - - /* - * TDR, wire check .. e.g. no resistor e.t.c - */ - - tdr_cmd = ptr; - - writew(0, &tdr_cmd->cmd_status); - writew(CMD_TDR | CMD_LAST, &tdr_cmd->cmd_cmd); - writew(0xffff, &tdr_cmd->cmd_link); - writew(0, &tdr_cmd->status); - - writew(make16(tdr_cmd), &p->scb->cbl_offset); - writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */ - ni_attn586(); - - wait_for_stat_compl(tdr_cmd); - - if (!(readw(&tdr_cmd->cmd_status) & STAT_COMPL)) - printk(KERN_ERR "%s: Problems while running the TDR.\n", - dev->name); - else { - udelay(16); - result = readw(&tdr_cmd->status); - writeb(readb(&p->scb->cus) & STAT_MASK, &p->scb->cmd_cuc); - ni_attn586(); /* ack the interrupts */ - - if (result & TDR_LNK_OK) - ; - else if (result & TDR_XCVR_PRB) - printk(KERN_ERR "%s: TDR: Transceiver problem. Check the cable(s)!\n", - dev->name); - else if (result & TDR_ET_OPN) - printk(KERN_ERR "%s: TDR: No correct termination %d clocks away.\n", - dev->name, result & TDR_TIMEMASK); - else if (result & TDR_ET_SRT) { - /* time == 0 -> strange :-) */ - if (result & TDR_TIMEMASK) - printk(KERN_ERR "%s: TDR: Detected a short circuit %d clocks away.\n", - dev->name, result & TDR_TIMEMASK); - } else - printk(KERN_ERR "%s: TDR: Unknown status %04x\n", - dev->name, result); - } - - /* - * Multicast setup - */ - if (num_addrs && !(dev->flags & IFF_PROMISC)) { - mc_cmd = ptr; - writew(0, &mc_cmd->cmd_status); - writew(CMD_MCSETUP | CMD_LAST, &mc_cmd->cmd_cmd); - writew(0xffff, &mc_cmd->cmd_link); - writew(num_addrs * 6, &mc_cmd->mc_cnt); - - i = 0; - netdev_for_each_mc_addr(ha, dev) - memcpy_toio(mc_cmd->mc_list[i++], ha->addr, 6); - - writew(make16(mc_cmd), &p->scb->cbl_offset); - writeb(CUC_START, &p->scb->cmd_cuc); - ni_attn586(); - - wait_for_stat_compl(mc_cmd); - - if ((readw(&mc_cmd->cmd_status) & (STAT_COMPL|STAT_OK)) - != (STAT_COMPL|STAT_OK)) - printk(KERN_ERR "%s: Can't apply multicast-address-list.\n", dev->name); - } - - /* - * alloc nop/xmit-cmds - */ -#if (NUM_XMIT_BUFFS == 1) - for (i = 0; i < 2; i++) { - p->nop_cmds[i] = ptr; - writew(CMD_NOP, &p->nop_cmds[i]->cmd_cmd); - writew(0, &p->nop_cmds[i]->cmd_status); - writew(make16(p->nop_cmds[i]), &p->nop_cmds[i]->cmd_link); - ptr = ptr + sizeof(struct nop_cmd_struct); - } -#else - for (i = 0; i < NUM_XMIT_BUFFS; i++) { - p->nop_cmds[i] = ptr; - writew(CMD_NOP, &p->nop_cmds[i]->cmd_cmd); - writew(0, &p->nop_cmds[i]->cmd_status); - writew(make16(p->nop_cmds[i]), &p->nop_cmds[i]->cmd_link); - ptr = ptr + sizeof(struct nop_cmd_struct); - } -#endif - - ptr = alloc_rfa(dev, ptr); /* init receive-frame-area */ - - /* - * alloc xmit-buffs / init xmit_cmds - */ - for (i = 0; i < NUM_XMIT_BUFFS; i++) { - /* Transmit cmd/buff 0 */ - p->xmit_cmds[i] = ptr; - ptr = ptr + sizeof(struct transmit_cmd_struct); - p->xmit_cbuffs[i] = ptr; /* char-buffs */ - ptr = ptr + XMIT_BUFF_SIZE; - p->xmit_buffs[i] = ptr; /* TBD */ - ptr = ptr + sizeof(struct tbd_struct); - if ((void __iomem *)ptr > (void __iomem *)p->iscp) { - printk(KERN_ERR "%s: not enough shared-mem for your configuration!\n", - dev->name); - return 1; - } - memset_io(p->xmit_cmds[i], 0, - sizeof(struct transmit_cmd_struct)); - memset_io(p->xmit_buffs[i], 0, - sizeof(struct tbd_struct)); - writew(make16(p->nop_cmds[(i+1)%NUM_XMIT_BUFFS]), - &p->xmit_cmds[i]->cmd_link); - writew(STAT_COMPL, &p->xmit_cmds[i]->cmd_status); - writew(CMD_XMIT|CMD_INT, &p->xmit_cmds[i]->cmd_cmd); - writew(make16(p->xmit_buffs[i]), &p->xmit_cmds[i]->tbd_offset); - writew(0xffff, &p->xmit_buffs[i]->next); - writel(make24(p->xmit_cbuffs[i]), &p->xmit_buffs[i]->buffer); - } - - p->xmit_count = 0; - p->xmit_last = 0; -#ifndef NO_NOPCOMMANDS - p->nop_point = 0; -#endif - - /* - * 'start transmitter' - */ -#ifndef NO_NOPCOMMANDS - writew(make16(p->nop_cmds[0]), &p->scb->cbl_offset); - writeb(CUC_START, &p->scb->cmd_cuc); - ni_attn586(); - wait_for_scb_cmd(dev); -#else - writew(make16(p->xmit_cmds[0]), &p->xmit_cmds[0]->cmd_link); - writew(CMD_XMIT | CMD_SUSPEND | CMD_INT, &p->xmit_cmds[0]->cmd_cmd); -#endif - - /* - * ack. interrupts - */ - writeb(readb(&p->scb->cus) & STAT_MASK, &p->scb->cmd_cuc); - ni_attn586(); - udelay(16); - - ni_enaint(); - - return 0; -} - -/****************************************************** - * This is a helper routine for ni52_rnr_int() and init586(). - * It sets up the Receive Frame Area (RFA). - */ - -static void __iomem *alloc_rfa(struct net_device *dev, void __iomem *ptr) -{ - struct rfd_struct __iomem *rfd = ptr; - struct rbd_struct __iomem *rbd; - int i; - struct priv *p = netdev_priv(dev); - - memset_io(rfd, 0, - sizeof(struct rfd_struct) * (p->num_recv_buffs + rfdadd)); - p->rfd_first = rfd; - - for (i = 0; i < (p->num_recv_buffs + rfdadd); i++) { - writew(make16(rfd + (i+1) % (p->num_recv_buffs+rfdadd)), - &rfd[i].next); - writew(0xffff, &rfd[i].rbd_offset); - } - /* RU suspend */ - writeb(RFD_SUSP, &rfd[p->num_recv_buffs-1+rfdadd].last); - - ptr = rfd + (p->num_recv_buffs + rfdadd); - - rbd = ptr; - ptr = rbd + p->num_recv_buffs; - - /* clr descriptors */ - memset_io(rbd, 0, sizeof(struct rbd_struct) * (p->num_recv_buffs)); - - for (i = 0; i < p->num_recv_buffs; i++) { - writew(make16(rbd + (i+1) % p->num_recv_buffs), &rbd[i].next); - writew(RECV_BUFF_SIZE, &rbd[i].size); - writel(make24(ptr), &rbd[i].buffer); - ptr = ptr + RECV_BUFF_SIZE; - } - p->rfd_top = p->rfd_first; - p->rfd_last = p->rfd_first + (p->num_recv_buffs - 1 + rfdadd); - - writew(make16(p->rfd_first), &p->scb->rfa_offset); - writew(make16(rbd), &p->rfd_first->rbd_offset); - - return ptr; -} - - -/************************************************** - * Interrupt Handler ... - */ - -static irqreturn_t ni52_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - unsigned int stat; - int cnt = 0; - struct priv *p; - - p = netdev_priv(dev); - - if (debuglevel > 1) - printk("I"); - - spin_lock(&p->spinlock); - - wait_for_scb_cmd(dev); /* wait for last command */ - - while ((stat = readb(&p->scb->cus) & STAT_MASK)) { - writeb(stat, &p->scb->cmd_cuc); - ni_attn586(); - - if (stat & STAT_FR) /* received a frame */ - ni52_rcv_int(dev); - - if (stat & STAT_RNR) { /* RU went 'not ready' */ - printk("(R)"); - if (readb(&p->scb->rus) & RU_SUSPEND) { - /* special case: RU_SUSPEND */ - wait_for_scb_cmd(dev); - writeb(RUC_RESUME, &p->scb->cmd_ruc); - ni_attn586(); - wait_for_scb_cmd_ruc(dev); - } else { - printk(KERN_ERR "%s: Receiver-Unit went 'NOT READY': %04x/%02x.\n", - dev->name, stat, readb(&p->scb->rus)); - ni52_rnr_int(dev); - } - } - - /* Command with I-bit set complete */ - if (stat & STAT_CX) - ni52_xmt_int(dev); - -#ifndef NO_NOPCOMMANDS - if (stat & STAT_CNA) { /* CU went 'not ready' */ - if (netif_running(dev)) - printk(KERN_ERR "%s: oops! CU has left active state. stat: %04x/%02x.\n", - dev->name, stat, readb(&p->scb->cus)); - } -#endif - - if (debuglevel > 1) - printk("%d", cnt++); - - /* Wait for ack. (ni52_xmt_int can be faster than ack!!) */ - wait_for_scb_cmd(dev); - if (readb(&p->scb->cmd_cuc)) { /* timed out? */ - printk(KERN_ERR "%s: Acknowledge timed out.\n", - dev->name); - ni_disint(); - break; - } - } - spin_unlock(&p->spinlock); - - if (debuglevel > 1) - printk("i"); - return IRQ_HANDLED; -} - -/******************************************************* - * receive-interrupt - */ - -static void ni52_rcv_int(struct net_device *dev) -{ - int status, cnt = 0; - unsigned short totlen; - struct sk_buff *skb; - struct rbd_struct __iomem *rbd; - struct priv *p = netdev_priv(dev); - - if (debuglevel > 0) - printk("R"); - - for (; (status = readb(&p->rfd_top->stat_high)) & RFD_COMPL;) { - rbd = make32(readw(&p->rfd_top->rbd_offset)); - if (status & RFD_OK) { /* frame received without error? */ - totlen = readw(&rbd->status); - if (totlen & RBD_LAST) { - /* the first and the last buffer? */ - totlen &= RBD_MASK; /* length of this frame */ - writew(0x00, &rbd->status); - skb = (struct sk_buff *)dev_alloc_skb(totlen+2); - if (skb != NULL) { - skb_reserve(skb, 2); - skb_put(skb, totlen); - memcpy_fromio(skb->data, p->base + readl(&rbd->buffer), totlen); - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += totlen; - } else - dev->stats.rx_dropped++; - } else { - int rstat; - /* free all RBD's until RBD_LAST is set */ - totlen = 0; - while (!((rstat = readw(&rbd->status)) & RBD_LAST)) { - totlen += rstat & RBD_MASK; - if (!rstat) { - printk(KERN_ERR "%s: Whoops .. no end mark in RBD list\n", dev->name); - break; - } - writew(0, &rbd->status); - rbd = make32(readw(&rbd->next)); - } - totlen += rstat & RBD_MASK; - writew(0, &rbd->status); - printk(KERN_ERR "%s: received oversized frame! length: %d\n", - dev->name, totlen); - dev->stats.rx_dropped++; - } - } else {/* frame !(ok), only with 'save-bad-frames' */ - printk(KERN_ERR "%s: oops! rfd-error-status: %04x\n", - dev->name, status); - dev->stats.rx_errors++; - } - writeb(0, &p->rfd_top->stat_high); - writeb(RFD_SUSP, &p->rfd_top->last); /* maybe exchange by RFD_LAST */ - writew(0xffff, &p->rfd_top->rbd_offset); - writeb(0, &p->rfd_last->last); /* delete RFD_SUSP */ - p->rfd_last = p->rfd_top; - p->rfd_top = make32(readw(&p->rfd_top->next)); /* step to next RFD */ - writew(make16(p->rfd_top), &p->scb->rfa_offset); - - if (debuglevel > 0) - printk("%d", cnt++); - } - - if (automatic_resume) { - wait_for_scb_cmd(dev); - writeb(RUC_RESUME, &p->scb->cmd_ruc); - ni_attn586(); - wait_for_scb_cmd_ruc(dev); - } - -#ifdef WAIT_4_BUSY - { - int i; - for (i = 0; i < 1024; i++) { - if (p->rfd_top->status) - break; - udelay(16); - if (i == 1023) - printk(KERN_ERR "%s: RU hasn't fetched next RFD (not busy/complete)\n", dev->name); - } - } -#endif - if (debuglevel > 0) - printk("r"); -} - -/********************************************************** - * handle 'Receiver went not ready'. - */ - -static void ni52_rnr_int(struct net_device *dev) -{ - struct priv *p = netdev_priv(dev); - - dev->stats.rx_errors++; - - wait_for_scb_cmd(dev); /* wait for the last cmd, WAIT_4_FULLSTAT?? */ - writeb(RUC_ABORT, &p->scb->cmd_ruc); /* usually the RU is in the 'no resource'-state .. abort it now. */ - ni_attn586(); - wait_for_scb_cmd_ruc(dev); /* wait for accept cmd. */ - - alloc_rfa(dev, p->rfd_first); - /* maybe add a check here, before restarting the RU */ - startrecv586(dev); /* restart RU */ - - printk(KERN_ERR "%s: Receive-Unit restarted. Status: %04x\n", - dev->name, readb(&p->scb->rus)); - -} - -/********************************************************** - * handle xmit - interrupt - */ - -static void ni52_xmt_int(struct net_device *dev) -{ - int status; - struct priv *p = netdev_priv(dev); - - if (debuglevel > 0) - printk("X"); - - status = readw(&p->xmit_cmds[p->xmit_last]->cmd_status); - if (!(status & STAT_COMPL)) - printk(KERN_ERR "%s: strange .. xmit-int without a 'COMPLETE'\n", dev->name); - - if (status & STAT_OK) { - dev->stats.tx_packets++; - dev->stats.collisions += (status & TCMD_MAXCOLLMASK); - } else { - dev->stats.tx_errors++; - if (status & TCMD_LATECOLL) { - printk(KERN_ERR "%s: late collision detected.\n", - dev->name); - dev->stats.collisions++; - } else if (status & TCMD_NOCARRIER) { - dev->stats.tx_carrier_errors++; - printk(KERN_ERR "%s: no carrier detected.\n", - dev->name); - } else if (status & TCMD_LOSTCTS) - printk(KERN_ERR "%s: loss of CTS detected.\n", - dev->name); - else if (status & TCMD_UNDERRUN) { - dev->stats.tx_fifo_errors++; - printk(KERN_ERR "%s: DMA underrun detected.\n", - dev->name); - } else if (status & TCMD_MAXCOLL) { - printk(KERN_ERR "%s: Max. collisions exceeded.\n", - dev->name); - dev->stats.collisions += 16; - } - } -#if (NUM_XMIT_BUFFS > 1) - if ((++p->xmit_last) == NUM_XMIT_BUFFS) - p->xmit_last = 0; -#endif - netif_wake_queue(dev); -} - -/*********************************************************** - * (re)start the receiver - */ - -static void startrecv586(struct net_device *dev) -{ - struct priv *p = netdev_priv(dev); - - wait_for_scb_cmd(dev); - wait_for_scb_cmd_ruc(dev); - writew(make16(p->rfd_first), &p->scb->rfa_offset); - writeb(RUC_START, &p->scb->cmd_ruc); - ni_attn586(); /* start cmd. */ - wait_for_scb_cmd_ruc(dev); - /* wait for accept cmd. (no timeout!!) */ -} - -static void ni52_timeout(struct net_device *dev) -{ - struct priv *p = netdev_priv(dev); -#ifndef NO_NOPCOMMANDS - if (readb(&p->scb->cus) & CU_ACTIVE) { /* COMMAND-UNIT active? */ - netif_wake_queue(dev); -#ifdef DEBUG - printk(KERN_ERR "%s: strange ... timeout with CU active?!?\n", - dev->name); - printk(KERN_ERR "%s: X0: %04x N0: %04x N1: %04x %d\n", - dev->name, (int)p->xmit_cmds[0]->cmd_status, - readw(&p->nop_cmds[0]->cmd_status), - readw(&p->nop_cmds[1]->cmd_status), - p->nop_point); -#endif - writeb(CUC_ABORT, &p->scb->cmd_cuc); - ni_attn586(); - wait_for_scb_cmd(dev); - writew(make16(p->nop_cmds[p->nop_point]), &p->scb->cbl_offset); - writeb(CUC_START, &p->scb->cmd_cuc); - ni_attn586(); - wait_for_scb_cmd(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ - return 0; - } -#endif - { -#ifdef DEBUG - printk(KERN_ERR "%s: xmitter timed out, try to restart! stat: %02x\n", - dev->name, readb(&p->scb->cus)); - printk(KERN_ERR "%s: command-stats: %04x %04x\n", - dev->name, - readw(&p->xmit_cmds[0]->cmd_status), - readw(&p->xmit_cmds[1]->cmd_status)); - printk(KERN_ERR "%s: check, whether you set the right interrupt number!\n", - dev->name); -#endif - ni52_close(dev); - ni52_open(dev); - } - dev->trans_start = jiffies; /* prevent tx timeout */ -} - -/****************************************************** - * send frame - */ - -static netdev_tx_t ni52_send_packet(struct sk_buff *skb, - struct net_device *dev) -{ - int len, i; -#ifndef NO_NOPCOMMANDS - int next_nop; -#endif - struct priv *p = netdev_priv(dev); - - if (skb->len > XMIT_BUFF_SIZE) { - printk(KERN_ERR "%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n", dev->name, XMIT_BUFF_SIZE, skb->len); - return NETDEV_TX_OK; - } - - netif_stop_queue(dev); - - memcpy_toio(p->xmit_cbuffs[p->xmit_count], skb->data, skb->len); - len = skb->len; - if (len < ETH_ZLEN) { - len = ETH_ZLEN; - memset_io(p->xmit_cbuffs[p->xmit_count]+skb->len, 0, - len - skb->len); - } - -#if (NUM_XMIT_BUFFS == 1) -# ifdef NO_NOPCOMMANDS - -#ifdef DEBUG - if (readb(&p->scb->cus) & CU_ACTIVE) { - printk(KERN_ERR "%s: Hmmm .. CU is still running and we wanna send a new packet.\n", dev->name); - printk(KERN_ERR "%s: stat: %04x %04x\n", - dev->name, readb(&p->scb->cus), - readw(&p->xmit_cmds[0]->cmd_status)); - } -#endif - writew(TBD_LAST | len, &p->xmit_buffs[0]->size); - for (i = 0; i < 16; i++) { - writew(0, &p->xmit_cmds[0]->cmd_status); - wait_for_scb_cmd(dev); - if ((readb(&p->scb->cus) & CU_STATUS) == CU_SUSPEND) - writeb(CUC_RESUME, &p->scb->cmd_cuc); - else { - writew(make16(p->xmit_cmds[0]), &p->scb->cbl_offset); - writeb(CUC_START, &p->scb->cmd_cuc); - } - ni_attn586(); - if (!i) - dev_kfree_skb(skb); - wait_for_scb_cmd(dev); - /* test it, because CU sometimes doesn't start immediately */ - if (readb(&p->scb->cus) & CU_ACTIVE) - break; - if (readw(&p->xmit_cmds[0]->cmd_status)) - break; - if (i == 15) - printk(KERN_WARNING "%s: Can't start transmit-command.\n", dev->name); - } -# else - next_nop = (p->nop_point + 1) & 0x1; - writew(TBD_LAST | len, &p->xmit_buffs[0]->size); - writew(make16(p->nop_cmds[next_nop]), &p->xmit_cmds[0]->cmd_link); - writew(make16(p->nop_cmds[next_nop]), - &p->nop_cmds[next_nop]->cmd_link); - writew(0, &p->xmit_cmds[0]->cmd_status); - writew(0, &p->nop_cmds[next_nop]->cmd_status); - - writew(make16(p->xmit_cmds[0]), &p->nop_cmds[p->nop_point]->cmd_link); - p->nop_point = next_nop; - dev_kfree_skb(skb); -# endif -#else - writew(TBD_LAST | len, &p->xmit_buffs[p->xmit_count]->size); - next_nop = p->xmit_count + 1 - if (next_nop == NUM_XMIT_BUFFS) - next_nop = 0; - writew(0, &p->xmit_cmds[p->xmit_count]->cmd_status); - /* linkpointer of xmit-command already points to next nop cmd */ - writew(make16(p->nop_cmds[next_nop]), - &p->nop_cmds[next_nop]->cmd_link); - writew(0, &p->nop_cmds[next_nop]->cmd_status); - writew(make16(p->xmit_cmds[p->xmit_count]), - &p->nop_cmds[p->xmit_count]->cmd_link); - p->xmit_count = next_nop; - { - unsigned long flags; - spin_lock_irqsave(&p->spinlock); - if (p->xmit_count != p->xmit_last) - netif_wake_queue(dev); - spin_unlock_irqrestore(&p->spinlock); - } - dev_kfree_skb(skb); -#endif - return NETDEV_TX_OK; -} - -/******************************************* - * Someone wanna have the statistics - */ - -static struct net_device_stats *ni52_get_stats(struct net_device *dev) -{ - struct priv *p = netdev_priv(dev); - unsigned short crc, aln, rsc, ovrn; - - /* Get error-statistics from the ni82586 */ - crc = readw(&p->scb->crc_errs); - writew(0, &p->scb->crc_errs); - aln = readw(&p->scb->aln_errs); - writew(0, &p->scb->aln_errs); - rsc = readw(&p->scb->rsc_errs); - writew(0, &p->scb->rsc_errs); - ovrn = readw(&p->scb->ovrn_errs); - writew(0, &p->scb->ovrn_errs); - - dev->stats.rx_crc_errors += crc; - dev->stats.rx_fifo_errors += ovrn; - dev->stats.rx_frame_errors += aln; - dev->stats.rx_dropped += rsc; - - return &dev->stats; -} - -/******************************************************** - * Set MC list .. - */ - -static void set_multicast_list(struct net_device *dev) -{ - netif_stop_queue(dev); - ni_disint(); - alloc586(dev); - init586(dev); - startrecv586(dev); - ni_enaint(); - netif_wake_queue(dev); -} - -#ifdef MODULE -static struct net_device *dev_ni52; - -module_param(io, int, 0); -module_param(irq, int, 0); -module_param(memstart, long, 0); -module_param(memend, long, 0); -MODULE_PARM_DESC(io, "NI5210 I/O base address,required"); -MODULE_PARM_DESC(irq, "NI5210 IRQ number,required"); -MODULE_PARM_DESC(memstart, "NI5210 memory base address,required"); -MODULE_PARM_DESC(memend, "NI5210 memory end address,required"); - -int __init init_module(void) -{ - if (io <= 0x0 || !memend || !memstart || irq < 2) { - printk(KERN_ERR "ni52: Autoprobing not allowed for modules.\n"); - printk(KERN_ERR "ni52: Set symbols 'io' 'irq' 'memstart' and 'memend'\n"); - return -ENODEV; - } - dev_ni52 = ni52_probe(-1); - if (IS_ERR(dev_ni52)) - return PTR_ERR(dev_ni52); - return 0; -} - -void __exit cleanup_module(void) -{ - struct priv *p = netdev_priv(dev_ni52); - unregister_netdev(dev_ni52); - iounmap(p->mapped); - release_region(dev_ni52->base_addr, NI52_TOTAL_SIZE); - free_netdev(dev_ni52); -} -#endif /* MODULE */ - -MODULE_LICENSE("GPL"); diff --git a/drivers/net/ni52.h b/drivers/net/ni52.h deleted file mode 100644 index 0a03b28..0000000 --- a/drivers/net/ni52.h +++ /dev/null @@ -1,310 +0,0 @@ -/* - * Intel i82586 Ethernet definitions - * - * This is an extension to the Linux operating system, and is covered by the - * same GNU General Public License that covers that work. - * - * copyrights (c) 1994 by Michael Hipp (hippm@informatik.uni-tuebingen.de) - * - * I have done a look in the following sources: - * crynwr-packet-driver by Russ Nelson - * Garret A. Wollman's i82586-driver for BSD - */ - - -#define NI52_RESET 0 /* writing to this address, resets the i82586 */ -#define NI52_ATTENTION 1 /* channel attention, kick the 586 */ -#define NI52_TENA 3 /* 2-5 possibly wrong, Xmit enable */ -#define NI52_TDIS 2 /* Xmit disable */ -#define NI52_INTENA 5 /* Interrupt enable */ -#define NI52_INTDIS 4 /* Interrupt disable */ -#define NI52_MAGIC1 6 /* dunno exact function */ -#define NI52_MAGIC2 7 /* dunno exact function */ - -#define NI52_MAGICVAL1 0x00 /* magic-values for ni5210 card */ -#define NI52_MAGICVAL2 0x55 - -/* - * where to find the System Configuration Pointer (SCP) - */ -#define SCP_DEFAULT_ADDRESS 0xfffff4 - - -/* - * System Configuration Pointer Struct - */ - -struct scp_struct -{ - u16 zero_dum0; /* has to be zero */ - u8 sysbus; /* 0=16Bit,1=8Bit */ - u8 zero_dum1; /* has to be zero for 586 */ - u16 zero_dum2; - u16 zero_dum3; - u32 iscp; /* pointer to the iscp-block */ -}; - - -/* - * Intermediate System Configuration Pointer (ISCP) - */ -struct iscp_struct -{ - u8 busy; /* 586 clears after successful init */ - u8 zero_dummy; /* has to be zero */ - u16 scb_offset; /* pointeroffset to the scb_base */ - u32 scb_base; /* base-address of all 16-bit offsets */ -}; - -/* - * System Control Block (SCB) - */ -struct scb_struct -{ - u8 rus; - u8 cus; - u8 cmd_ruc; /* command word: RU part */ - u8 cmd_cuc; /* command word: CU part & ACK */ - u16 cbl_offset; /* pointeroffset, command block list */ - u16 rfa_offset; /* pointeroffset, receive frame area */ - u16 crc_errs; /* CRC-Error counter */ - u16 aln_errs; /* alignmenterror counter */ - u16 rsc_errs; /* Resourceerror counter */ - u16 ovrn_errs; /* OVerrunerror counter */ -}; - -/* - * possible command values for the command word - */ -#define RUC_MASK 0x0070 /* mask for RU commands */ -#define RUC_NOP 0x0000 /* NOP-command */ -#define RUC_START 0x0010 /* start RU */ -#define RUC_RESUME 0x0020 /* resume RU after suspend */ -#define RUC_SUSPEND 0x0030 /* suspend RU */ -#define RUC_ABORT 0x0040 /* abort receiver operation immediately */ - -#define CUC_MASK 0x07 /* mask for CU command */ -#define CUC_NOP 0x00 /* NOP-command */ -#define CUC_START 0x01 /* start execution of 1. cmd on the CBL */ -#define CUC_RESUME 0x02 /* resume after suspend */ -#define CUC_SUSPEND 0x03 /* Suspend CU */ -#define CUC_ABORT 0x04 /* abort command operation immediately */ - -#define ACK_MASK 0xf0 /* mask for ACK command */ -#define ACK_CX 0x80 /* acknowledges STAT_CX */ -#define ACK_FR 0x40 /* ack. STAT_FR */ -#define ACK_CNA 0x20 /* ack. STAT_CNA */ -#define ACK_RNR 0x10 /* ack. STAT_RNR */ - -/* - * possible status values for the status word - */ -#define STAT_MASK 0xf0 /* mask for cause of interrupt */ -#define STAT_CX 0x80 /* CU finished cmd with its I bit set */ -#define STAT_FR 0x40 /* RU finished receiving a frame */ -#define STAT_CNA 0x20 /* CU left active state */ -#define STAT_RNR 0x10 /* RU left ready state */ - -#define CU_STATUS 0x7 /* CU status, 0=idle */ -#define CU_SUSPEND 0x1 /* CU is suspended */ -#define CU_ACTIVE 0x2 /* CU is active */ - -#define RU_STATUS 0x70 /* RU status, 0=idle */ -#define RU_SUSPEND 0x10 /* RU suspended */ -#define RU_NOSPACE 0x20 /* RU no resources */ -#define RU_READY 0x40 /* RU is ready */ - -/* - * Receive Frame Descriptor (RFD) - */ -struct rfd_struct -{ - u8 stat_low; /* status word */ - u8 stat_high; /* status word */ - u8 rfd_sf; /* 82596 mode only */ - u8 last; /* Bit15,Last Frame on List / Bit14,suspend */ - u16 next; /* linkoffset to next RFD */ - u16 rbd_offset; /* pointeroffset to RBD-buffer */ - u8 dest[6]; /* ethernet-address, destination */ - u8 source[6]; /* ethernet-address, source */ - u16 length; /* 802.3 frame-length */ - u16 zero_dummy; /* dummy */ -}; - -#define RFD_LAST 0x80 /* last: last rfd in the list */ -#define RFD_SUSP 0x40 /* last: suspend RU after */ -#define RFD_COMPL 0x80 -#define RFD_OK 0x20 -#define RFD_BUSY 0x40 -#define RFD_ERR_LEN 0x10 /* Length error (if enabled length-checking */ -#define RFD_ERR_CRC 0x08 /* CRC error */ -#define RFD_ERR_ALGN 0x04 /* Alignment error */ -#define RFD_ERR_RNR 0x02 /* status: receiver out of resources */ -#define RFD_ERR_OVR 0x01 /* DMA Overrun! */ - -#define RFD_ERR_FTS 0x0080 /* Frame to short */ -#define RFD_ERR_NEOP 0x0040 /* No EOP flag (for bitstuffing only) */ -#define RFD_ERR_TRUN 0x0020 /* (82596 only/SF mode) indicates truncated frame */ -#define RFD_MATCHADD 0x0002 /* status: Destinationaddress !matches IA (only 82596) */ -#define RFD_COLLDET 0x0001 /* Detected collision during reception */ - -/* - * Receive Buffer Descriptor (RBD) - */ -struct rbd_struct -{ - u16 status; /* status word,number of used bytes in buff */ - u16 next; /* pointeroffset to next RBD */ - u32 buffer; /* receive buffer address pointer */ - u16 size; /* size of this buffer */ - u16 zero_dummy; /* dummy */ -}; - -#define RBD_LAST 0x8000 /* last buffer */ -#define RBD_USED 0x4000 /* this buffer has data */ -#define RBD_MASK 0x3fff /* size-mask for length */ - -/* - * Statusvalues for Commands/RFD - */ -#define STAT_COMPL 0x8000 /* status: frame/command is complete */ -#define STAT_BUSY 0x4000 /* status: frame/command is busy */ -#define STAT_OK 0x2000 /* status: frame/command is ok */ - -/* - * Action-Commands - */ -#define CMD_NOP 0x0000 /* NOP */ -#define CMD_IASETUP 0x0001 /* initial address setup command */ -#define CMD_CONFIGURE 0x0002 /* configure command */ -#define CMD_MCSETUP 0x0003 /* MC setup command */ -#define CMD_XMIT 0x0004 /* transmit command */ -#define CMD_TDR 0x0005 /* time domain reflectometer (TDR) command */ -#define CMD_DUMP 0x0006 /* dump command */ -#define CMD_DIAGNOSE 0x0007 /* diagnose command */ - -/* - * Action command bits - */ -#define CMD_LAST 0x8000 /* indicates last command in the CBL */ -#define CMD_SUSPEND 0x4000 /* suspend CU after this CB */ -#define CMD_INT 0x2000 /* generate interrupt after execution */ - -/* - * NOP - command - */ -struct nop_cmd_struct -{ - u16 cmd_status; /* status of this command */ - u16 cmd_cmd; /* the command itself (+bits) */ - u16 cmd_link; /* offsetpointer to next command */ -}; - -/* - * IA Setup command - */ -struct iasetup_cmd_struct -{ - u16 cmd_status; - u16 cmd_cmd; - u16 cmd_link; - u8 iaddr[6]; -}; - -/* - * Configure command - */ -struct configure_cmd_struct -{ - u16 cmd_status; - u16 cmd_cmd; - u16 cmd_link; - u8 byte_cnt; /* size of the config-cmd */ - u8 fifo; /* fifo/recv monitor */ - u8 sav_bf; /* save bad frames (bit7=1)*/ - u8 adr_len; /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/ - u8 priority; /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */ - u8 ifs; /* inter frame spacing */ - u8 time_low; /* slot time low */ - u8 time_high; /* slot time high(0-2) and max. retries(4-7) */ - u8 promisc; /* promisc-mode(0) , et al (1-7) */ - u8 carr_coll; /* carrier(0-3)/collision(4-7) stuff */ - u8 fram_len; /* minimal frame len */ - u8 dummy; /* dummy */ -}; - -/* - * Multicast Setup command - */ -struct mcsetup_cmd_struct -{ - u16 cmd_status; - u16 cmd_cmd; - u16 cmd_link; - u16 mc_cnt; /* number of bytes in the MC-List */ - u8 mc_list[0][6]; /* pointer to 6 bytes entries */ -}; - -/* - * DUMP command - */ -struct dump_cmd_struct -{ - u16 cmd_status; - u16 cmd_cmd; - u16 cmd_link; - u16 dump_offset; /* pointeroffset to DUMP space */ -}; - -/* - * transmit command - */ -struct transmit_cmd_struct -{ - u16 cmd_status; - u16 cmd_cmd; - u16 cmd_link; - u16 tbd_offset; /* pointeroffset to TBD */ - u8 dest[6]; /* destination address of the frame */ - u16 length; /* user defined: 802.3 length / Ether type */ -}; - -#define TCMD_ERRMASK 0x0fa0 -#define TCMD_MAXCOLLMASK 0x000f -#define TCMD_MAXCOLL 0x0020 -#define TCMD_HEARTBEAT 0x0040 -#define TCMD_DEFERRED 0x0080 -#define TCMD_UNDERRUN 0x0100 -#define TCMD_LOSTCTS 0x0200 -#define TCMD_NOCARRIER 0x0400 -#define TCMD_LATECOLL 0x0800 - -struct tdr_cmd_struct -{ - u16 cmd_status; - u16 cmd_cmd; - u16 cmd_link; - u16 status; -}; - -#define TDR_LNK_OK 0x8000 /* No link problem identified */ -#define TDR_XCVR_PRB 0x4000 /* indicates a transceiver problem */ -#define TDR_ET_OPN 0x2000 /* open, no correct termination */ -#define TDR_ET_SRT 0x1000 /* TDR detected a short circuit */ -#define TDR_TIMEMASK 0x07ff /* mask for the time field */ - -/* - * Transmit Buffer Descriptor (TBD) - */ -struct tbd_struct -{ - u16 size; /* size + EOF-Flag(15) */ - u16 next; /* pointeroffset to next TBD */ - u32 buffer; /* pointer to buffer */ -}; - -#define TBD_LAST 0x8000 /* EOF-Flag, indicates last buffer in list */ - - - - diff --git a/drivers/net/sni_82596.c b/drivers/net/sni_82596.c deleted file mode 100644 index 6b2a888..0000000 --- a/drivers/net/sni_82596.c +++ /dev/null @@ -1,186 +0,0 @@ -/* - * sni_82596.c -- driver for intel 82596 ethernet controller, as - * used in older SNI RM machines - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define SNI_82596_DRIVER_VERSION "SNI RM 82596 driver - Revision: 0.01" - -static const char sni_82596_string[] = "snirm_82596"; - -#define DMA_ALLOC dma_alloc_coherent -#define DMA_FREE dma_free_coherent -#define DMA_WBACK(priv, addr, len) do { } while (0) -#define DMA_INV(priv, addr, len) do { } while (0) -#define DMA_WBACK_INV(priv, addr, len) do { } while (0) - -#define SYSBUS 0x00004400 - -/* big endian CPU, 82596 little endian */ -#define SWAP32(x) cpu_to_le32((u32)(x)) -#define SWAP16(x) cpu_to_le16((u16)(x)) - -#define OPT_MPU_16BIT 0x01 - -#include "lib82596.c" - -MODULE_AUTHOR("Thomas Bogendoerfer"); -MODULE_DESCRIPTION("i82596 driver"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:snirm_82596"); -module_param(i596_debug, int, 0); -MODULE_PARM_DESC(i596_debug, "82596 debug mask"); - -static inline void ca(struct net_device *dev) -{ - struct i596_private *lp = netdev_priv(dev); - - writel(0, lp->ca); -} - - -static void mpu_port(struct net_device *dev, int c, dma_addr_t x) -{ - struct i596_private *lp = netdev_priv(dev); - - u32 v = (u32) (c) | (u32) (x); - - if (lp->options & OPT_MPU_16BIT) { - writew(v & 0xffff, lp->mpu_port); - wmb(); /* order writes to MPU port */ - udelay(1); - writew(v >> 16, lp->mpu_port); - } else { - writel(v, lp->mpu_port); - wmb(); /* order writes to MPU port */ - udelay(1); - writel(v, lp->mpu_port); - } -} - - -static int __devinit sni_82596_probe(struct platform_device *dev) -{ - struct net_device *netdevice; - struct i596_private *lp; - struct resource *res, *ca, *idprom, *options; - int retval = -ENOMEM; - void __iomem *mpu_addr; - void __iomem *ca_addr; - u8 __iomem *eth_addr; - - res = platform_get_resource(dev, IORESOURCE_MEM, 0); - ca = platform_get_resource(dev, IORESOURCE_MEM, 1); - options = platform_get_resource(dev, 0, 0); - idprom = platform_get_resource(dev, IORESOURCE_MEM, 2); - if (!res || !ca || !options || !idprom) - return -ENODEV; - mpu_addr = ioremap_nocache(res->start, 4); - if (!mpu_addr) - return -ENOMEM; - ca_addr = ioremap_nocache(ca->start, 4); - if (!ca_addr) - goto probe_failed_free_mpu; - - printk(KERN_INFO "Found i82596 at 0x%x\n", res->start); - - netdevice = alloc_etherdev(sizeof(struct i596_private)); - if (!netdevice) - goto probe_failed_free_ca; - - SET_NETDEV_DEV(netdevice, &dev->dev); - platform_set_drvdata (dev, netdevice); - - netdevice->base_addr = res->start; - netdevice->irq = platform_get_irq(dev, 0); - - eth_addr = ioremap_nocache(idprom->start, 0x10); - if (!eth_addr) - goto probe_failed; - - /* someone seems to like messed up stuff */ - netdevice->dev_addr[0] = readb(eth_addr + 0x0b); - netdevice->dev_addr[1] = readb(eth_addr + 0x0a); - netdevice->dev_addr[2] = readb(eth_addr + 0x09); - netdevice->dev_addr[3] = readb(eth_addr + 0x08); - netdevice->dev_addr[4] = readb(eth_addr + 0x07); - netdevice->dev_addr[5] = readb(eth_addr + 0x06); - iounmap(eth_addr); - - if (!netdevice->irq) { - printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n", - __FILE__, netdevice->base_addr); - goto probe_failed; - } - - lp = netdev_priv(netdevice); - lp->options = options->flags & IORESOURCE_BITS; - lp->ca = ca_addr; - lp->mpu_port = mpu_addr; - - retval = i82596_probe(netdevice); - if (retval == 0) - return 0; - -probe_failed: - free_netdev(netdevice); -probe_failed_free_ca: - iounmap(ca_addr); -probe_failed_free_mpu: - iounmap(mpu_addr); - return retval; -} - -static int __devexit sni_82596_driver_remove(struct platform_device *pdev) -{ - struct net_device *dev = platform_get_drvdata(pdev); - struct i596_private *lp = netdev_priv(dev); - - unregister_netdev(dev); - DMA_FREE(dev->dev.parent, sizeof(struct i596_private), - lp->dma, lp->dma_addr); - iounmap(lp->ca); - iounmap(lp->mpu_port); - free_netdev (dev); - return 0; -} - -static struct platform_driver sni_82596_driver = { - .probe = sni_82596_probe, - .remove = __devexit_p(sni_82596_driver_remove), - .driver = { - .name = sni_82596_string, - .owner = THIS_MODULE, - }, -}; - -static int __devinit sni_82596_init(void) -{ - printk(KERN_INFO SNI_82596_DRIVER_VERSION "\n"); - return platform_driver_register(&sni_82596_driver); -} - - -static void __exit sni_82596_exit(void) -{ - platform_driver_unregister(&sni_82596_driver); -} - -module_init(sni_82596_init); -module_exit(sni_82596_exit); diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c deleted file mode 100644 index b6ae53b..0000000 --- a/drivers/net/sun3_82586.c +++ /dev/null @@ -1,1213 +0,0 @@ -/* - * Sun3 i82586 Ethernet driver - * - * Cloned from ni52.c for the Sun3 by Sam Creasey (sammy@sammy.net) - * - * Original copyright follows: - * -------------------------- - * - * net-3-driver for the NI5210 card (i82586 Ethernet chip) - * - * This is an extension to the Linux operating system, and is covered by the - * same Gnu Public License that covers that work. - * - * Alphacode 0.82 (96/09/29) for Linux 2.0.0 (or later) - * Copyrights (c) 1994,1995,1996 by M.Hipp (hippm@informatik.uni-tuebingen.de) - * -------------------------- - * - * Consult ni52.c for further notes from the original driver. - * - * This incarnation currently supports the OBIO version of the i82586 chip - * used in certain sun3 models. It should be fairly doable to expand this - * to support VME if I should every acquire such a board. - * - */ - -static int debuglevel = 0; /* debug-printk 0: off 1: a few 2: more */ -static int automatic_resume = 0; /* experimental .. better should be zero */ -static int rfdadd = 0; /* rfdadd=1 may be better for 8K MEM cards */ -static int fifo=0x8; /* don't change */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "sun3_82586.h" - -#define DRV_NAME "sun3_82586" - -#define DEBUG /* debug on */ -#define SYSBUSVAL 0 /* 16 Bit */ -#define SUN3_82586_TOTAL_SIZE PAGE_SIZE - -#define sun3_attn586() {*(volatile unsigned char *)(dev->base_addr) |= IEOB_ATTEN; *(volatile unsigned char *)(dev->base_addr) &= ~IEOB_ATTEN;} -#define sun3_reset586() {*(volatile unsigned char *)(dev->base_addr) = 0; udelay(100); *(volatile unsigned char *)(dev->base_addr) = IEOB_NORSET;} -#define sun3_disint() {*(volatile unsigned char *)(dev->base_addr) &= ~IEOB_IENAB;} -#define sun3_enaint() {*(volatile unsigned char *)(dev->base_addr) |= IEOB_IENAB;} -#define sun3_active() {*(volatile unsigned char *)(dev->base_addr) |= (IEOB_IENAB|IEOB_ONAIR|IEOB_NORSET);} - -#define make32(ptr16) (p->memtop + (swab16((unsigned short) (ptr16))) ) -#define make24(ptr32) (char *)swab32(( ((unsigned long) (ptr32)) - p->base)) -#define make16(ptr32) (swab16((unsigned short) ((unsigned long)(ptr32) - (unsigned long) p->memtop ))) - -/******************* how to calculate the buffers ***************************** - - * IMPORTANT NOTE: if you configure only one NUM_XMIT_BUFFS, the driver works - * --------------- in a different (more stable?) mode. Only in this mode it's - * possible to configure the driver with 'NO_NOPCOMMANDS' - -sizeof(scp)=12; sizeof(scb)=16; sizeof(iscp)=8; -sizeof(scp)+sizeof(iscp)+sizeof(scb) = 36 = INIT -sizeof(rfd) = 24; sizeof(rbd) = 12; -sizeof(tbd) = 8; sizeof(transmit_cmd) = 16; -sizeof(nop_cmd) = 8; - - * if you don't know the driver, better do not change these values: */ - -#define RECV_BUFF_SIZE 1536 /* slightly oversized */ -#define XMIT_BUFF_SIZE 1536 /* slightly oversized */ -#define NUM_XMIT_BUFFS 1 /* config for 32K shmem */ -#define NUM_RECV_BUFFS_8 4 /* config for 32K shared mem */ -#define NUM_RECV_BUFFS_16 9 /* config for 32K shared mem */ -#define NUM_RECV_BUFFS_32 16 /* config for 32K shared mem */ -#define NO_NOPCOMMANDS /* only possible with NUM_XMIT_BUFFS=1 */ - -/**************************************************************************/ - -/* different DELAYs */ -#define DELAY(x) mdelay(32 * x); -#define DELAY_16(); { udelay(16); } -#define DELAY_18(); { udelay(4); } - -/* wait for command with timeout: */ -#define WAIT_4_SCB_CMD() \ -{ int i; \ - for(i=0;i<16384;i++) { \ - if(!p->scb->cmd_cuc) break; \ - DELAY_18(); \ - if(i == 16383) { \ - printk("%s: scb_cmd timed out: %04x,%04x .. disabling i82586!!\n",dev->name,p->scb->cmd_cuc,p->scb->cus); \ - if(!p->reseted) { p->reseted = 1; sun3_reset586(); } } } } - -#define WAIT_4_SCB_CMD_RUC() { int i; \ - for(i=0;i<16384;i++) { \ - if(!p->scb->cmd_ruc) break; \ - DELAY_18(); \ - if(i == 16383) { \ - printk("%s: scb_cmd (ruc) timed out: %04x,%04x .. disabling i82586!!\n",dev->name,p->scb->cmd_ruc,p->scb->rus); \ - if(!p->reseted) { p->reseted = 1; sun3_reset586(); } } } } - -#define WAIT_4_STAT_COMPL(addr) { int i; \ - for(i=0;i<32767;i++) { \ - if(swab16((addr)->cmd_status) & STAT_COMPL) break; \ - DELAY_16(); DELAY_16(); } } - -static int sun3_82586_probe1(struct net_device *dev,int ioaddr); -static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id); -static int sun3_82586_open(struct net_device *dev); -static int sun3_82586_close(struct net_device *dev); -static int sun3_82586_send_packet(struct sk_buff *,struct net_device *); -static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev); -static void set_multicast_list(struct net_device *dev); -static void sun3_82586_timeout(struct net_device *dev); -#if 0 -static void sun3_82586_dump(struct net_device *,void *); -#endif - -/* helper-functions */ -static int init586(struct net_device *dev); -static int check586(struct net_device *dev,char *where,unsigned size); -static void alloc586(struct net_device *dev); -static void startrecv586(struct net_device *dev); -static void *alloc_rfa(struct net_device *dev,void *ptr); -static void sun3_82586_rcv_int(struct net_device *dev); -static void sun3_82586_xmt_int(struct net_device *dev); -static void sun3_82586_rnr_int(struct net_device *dev); - -struct priv -{ - unsigned long base; - char *memtop; - long int lock; - int reseted; - volatile struct rfd_struct *rfd_last,*rfd_top,*rfd_first; - volatile struct scp_struct *scp; /* volatile is important */ - volatile struct iscp_struct *iscp; /* volatile is important */ - volatile struct scb_struct *scb; /* volatile is important */ - volatile struct tbd_struct *xmit_buffs[NUM_XMIT_BUFFS]; - volatile struct transmit_cmd_struct *xmit_cmds[NUM_XMIT_BUFFS]; -#if (NUM_XMIT_BUFFS == 1) - volatile struct nop_cmd_struct *nop_cmds[2]; -#else - volatile struct nop_cmd_struct *nop_cmds[NUM_XMIT_BUFFS]; -#endif - volatile int nop_point,num_recv_buffs; - volatile char *xmit_cbuffs[NUM_XMIT_BUFFS]; - volatile int xmit_count,xmit_last; -}; - -/********************************************** - * close device - */ -static int sun3_82586_close(struct net_device *dev) -{ - free_irq(dev->irq, dev); - - sun3_reset586(); /* the hard way to stop the receiver */ - - netif_stop_queue(dev); - - return 0; -} - -/********************************************** - * open device - */ -static int sun3_82586_open(struct net_device *dev) -{ - int ret; - - sun3_disint(); - alloc586(dev); - init586(dev); - startrecv586(dev); - sun3_enaint(); - - ret = request_irq(dev->irq, sun3_82586_interrupt,0,dev->name,dev); - if (ret) - { - sun3_reset586(); - return ret; - } - - netif_start_queue(dev); - - return 0; /* most done by init */ -} - -/********************************************** - * Check to see if there's an 82586 out there. - */ -static int check586(struct net_device *dev,char *where,unsigned size) -{ - struct priv pb; - struct priv *p = &pb; - char *iscp_addr; - int i; - - p->base = (unsigned long) dvma_btov(0); - p->memtop = (char *)dvma_btov((unsigned long)where); - p->scp = (struct scp_struct *)(p->base + SCP_DEFAULT_ADDRESS); - memset((char *)p->scp,0, sizeof(struct scp_struct)); - for(i=0;iscp)[i]) - return 0; - p->scp->sysbus = SYSBUSVAL; /* 1 = 8Bit-Bus, 0 = 16 Bit */ - if(p->scp->sysbus != SYSBUSVAL) - return 0; - - iscp_addr = (char *)dvma_btov((unsigned long)where); - - p->iscp = (struct iscp_struct *) iscp_addr; - memset((char *)p->iscp,0, sizeof(struct iscp_struct)); - - p->scp->iscp = make24(p->iscp); - p->iscp->busy = 1; - - sun3_reset586(); - sun3_attn586(); - DELAY(1); /* wait a while... */ - - if(p->iscp->busy) /* i82586 clears 'busy' after successful init */ - return 0; - - return 1; -} - -/****************************************************************** - * set iscp at the right place, called by sun3_82586_probe1 and open586. - */ -static void alloc586(struct net_device *dev) -{ - struct priv *p = netdev_priv(dev); - - sun3_reset586(); - DELAY(1); - - p->scp = (struct scp_struct *) (p->base + SCP_DEFAULT_ADDRESS); - p->iscp = (struct iscp_struct *) dvma_btov(dev->mem_start); - p->scb = (struct scb_struct *) ((char *)p->iscp + sizeof(struct iscp_struct)); - - memset((char *) p->iscp,0,sizeof(struct iscp_struct)); - memset((char *) p->scp ,0,sizeof(struct scp_struct)); - - p->scp->iscp = make24(p->iscp); - p->scp->sysbus = SYSBUSVAL; - p->iscp->scb_offset = make16(p->scb); - p->iscp->scb_base = make24(dvma_btov(dev->mem_start)); - - p->iscp->busy = 1; - sun3_reset586(); - sun3_attn586(); - - DELAY(1); - - if(p->iscp->busy) - printk("%s: Init-Problems (alloc).\n",dev->name); - - p->reseted = 0; - - memset((char *)p->scb,0,sizeof(struct scb_struct)); -} - -struct net_device * __init sun3_82586_probe(int unit) -{ - struct net_device *dev; - unsigned long ioaddr; - static int found = 0; - int err = -ENOMEM; - - /* check that this machine has an onboard 82586 */ - switch(idprom->id_machtype) { - case SM_SUN3|SM_3_160: - case SM_SUN3|SM_3_260: - /* these machines have 82586 */ - break; - - default: - return ERR_PTR(-ENODEV); - } - - if (found) - return ERR_PTR(-ENODEV); - - ioaddr = (unsigned long)ioremap(IE_OBIO, SUN3_82586_TOTAL_SIZE); - if (!ioaddr) - return ERR_PTR(-ENOMEM); - found = 1; - - dev = alloc_etherdev(sizeof(struct priv)); - if (!dev) - goto out; - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - } - - dev->irq = IE_IRQ; - dev->base_addr = ioaddr; - err = sun3_82586_probe1(dev, ioaddr); - if (err) - goto out1; - err = register_netdev(dev); - if (err) - goto out2; - return dev; - -out2: - release_region(ioaddr, SUN3_82586_TOTAL_SIZE); -out1: - free_netdev(dev); -out: - iounmap((void __iomem *)ioaddr); - return ERR_PTR(err); -} - -static const struct net_device_ops sun3_82586_netdev_ops = { - .ndo_open = sun3_82586_open, - .ndo_stop = sun3_82586_close, - .ndo_start_xmit = sun3_82586_send_packet, - .ndo_set_multicast_list = set_multicast_list, - .ndo_tx_timeout = sun3_82586_timeout, - .ndo_get_stats = sun3_82586_get_stats, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, - .ndo_change_mtu = eth_change_mtu, -}; - -static int __init sun3_82586_probe1(struct net_device *dev,int ioaddr) -{ - int i, size, retval; - - if (!request_region(ioaddr, SUN3_82586_TOTAL_SIZE, DRV_NAME)) - return -EBUSY; - - /* copy in the ethernet address from the prom */ - for(i = 0; i < 6 ; i++) - dev->dev_addr[i] = idprom->id_ethaddr[i]; - - printk("%s: SUN3 Intel 82586 found at %lx, ",dev->name,dev->base_addr); - - /* - * check (or search) IO-Memory, 32K - */ - size = 0x8000; - - dev->mem_start = (unsigned long)dvma_malloc_align(0x8000, 0x1000); - dev->mem_end = dev->mem_start + size; - - if(size != 0x2000 && size != 0x4000 && size != 0x8000) { - printk("\n%s: Illegal memory size %d. Allowed is 0x2000 or 0x4000 or 0x8000 bytes.\n",dev->name,size); - retval = -ENODEV; - goto out; - } - if(!check586(dev,(char *) dev->mem_start,size)) { - printk("?memcheck, Can't find memory at 0x%lx with size %d!\n",dev->mem_start,size); - retval = -ENODEV; - goto out; - } - - ((struct priv *)netdev_priv(dev))->memtop = - (char *)dvma_btov(dev->mem_start); - ((struct priv *)netdev_priv(dev))->base = (unsigned long) dvma_btov(0); - alloc586(dev); - - /* set number of receive-buffs according to memsize */ - if(size == 0x2000) - ((struct priv *)netdev_priv(dev))->num_recv_buffs = - NUM_RECV_BUFFS_8; - else if(size == 0x4000) - ((struct priv *)netdev_priv(dev))->num_recv_buffs = - NUM_RECV_BUFFS_16; - else - ((struct priv *)netdev_priv(dev))->num_recv_buffs = - NUM_RECV_BUFFS_32; - - printk("Memaddr: 0x%lx, Memsize: %d, IRQ %d\n",dev->mem_start,size, dev->irq); - - dev->netdev_ops = &sun3_82586_netdev_ops; - dev->watchdog_timeo = HZ/20; - - dev->if_port = 0; - return 0; -out: - release_region(ioaddr, SUN3_82586_TOTAL_SIZE); - return retval; -} - - -static int init586(struct net_device *dev) -{ - void *ptr; - int i,result=0; - struct priv *p = netdev_priv(dev); - volatile struct configure_cmd_struct *cfg_cmd; - volatile struct iasetup_cmd_struct *ias_cmd; - volatile struct tdr_cmd_struct *tdr_cmd; - volatile struct mcsetup_cmd_struct *mc_cmd; - struct netdev_hw_addr *ha; - int num_addrs=netdev_mc_count(dev); - - ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct)); - - cfg_cmd = (struct configure_cmd_struct *)ptr; /* configure-command */ - cfg_cmd->cmd_status = 0; - cfg_cmd->cmd_cmd = swab16(CMD_CONFIGURE | CMD_LAST); - cfg_cmd->cmd_link = 0xffff; - - cfg_cmd->byte_cnt = 0x0a; /* number of cfg bytes */ - cfg_cmd->fifo = fifo; /* fifo-limit (8=tx:32/rx:64) */ - cfg_cmd->sav_bf = 0x40; /* hold or discard bad recv frames (bit 7) */ - cfg_cmd->adr_len = 0x2e; /* addr_len |!src_insert |pre-len |loopback */ - cfg_cmd->priority = 0x00; - cfg_cmd->ifs = 0x60; - cfg_cmd->time_low = 0x00; - cfg_cmd->time_high = 0xf2; - cfg_cmd->promisc = 0; - if(dev->flags & IFF_ALLMULTI) { - int len = ((char *) p->iscp - (char *) ptr - 8) / 6; - if(num_addrs > len) { - printk("%s: switching to promisc. mode\n",dev->name); - cfg_cmd->promisc = 1; - } - } - if(dev->flags&IFF_PROMISC) - cfg_cmd->promisc = 1; - cfg_cmd->carr_coll = 0x00; - - p->scb->cbl_offset = make16(cfg_cmd); - p->scb->cmd_ruc = 0; - - p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */ - sun3_attn586(); - - WAIT_4_STAT_COMPL(cfg_cmd); - - if((swab16(cfg_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) != (STAT_COMPL|STAT_OK)) - { - printk("%s: configure command failed: %x\n",dev->name,swab16(cfg_cmd->cmd_status)); - return 1; - } - - /* - * individual address setup - */ - - ias_cmd = (struct iasetup_cmd_struct *)ptr; - - ias_cmd->cmd_status = 0; - ias_cmd->cmd_cmd = swab16(CMD_IASETUP | CMD_LAST); - ias_cmd->cmd_link = 0xffff; - - memcpy((char *)&ias_cmd->iaddr,(char *) dev->dev_addr,ETH_ALEN); - - p->scb->cbl_offset = make16(ias_cmd); - - p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */ - sun3_attn586(); - - WAIT_4_STAT_COMPL(ias_cmd); - - if((swab16(ias_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) != (STAT_OK|STAT_COMPL)) { - printk("%s (82586): individual address setup command failed: %04x\n",dev->name,swab16(ias_cmd->cmd_status)); - return 1; - } - - /* - * TDR, wire check .. e.g. no resistor e.t.c - */ - - tdr_cmd = (struct tdr_cmd_struct *)ptr; - - tdr_cmd->cmd_status = 0; - tdr_cmd->cmd_cmd = swab16(CMD_TDR | CMD_LAST); - tdr_cmd->cmd_link = 0xffff; - tdr_cmd->status = 0; - - p->scb->cbl_offset = make16(tdr_cmd); - p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */ - sun3_attn586(); - - WAIT_4_STAT_COMPL(tdr_cmd); - - if(!(swab16(tdr_cmd->cmd_status) & STAT_COMPL)) - { - printk("%s: Problems while running the TDR.\n",dev->name); - } - else - { - DELAY_16(); /* wait for result */ - result = swab16(tdr_cmd->status); - - p->scb->cmd_cuc = p->scb->cus & STAT_MASK; - sun3_attn586(); /* ack the interrupts */ - - if(result & TDR_LNK_OK) - ; - else if(result & TDR_XCVR_PRB) - printk("%s: TDR: Transceiver problem. Check the cable(s)!\n",dev->name); - else if(result & TDR_ET_OPN) - printk("%s: TDR: No correct termination %d clocks away.\n",dev->name,result & TDR_TIMEMASK); - else if(result & TDR_ET_SRT) - { - if (result & TDR_TIMEMASK) /* time == 0 -> strange :-) */ - printk("%s: TDR: Detected a short circuit %d clocks away.\n",dev->name,result & TDR_TIMEMASK); - } - else - printk("%s: TDR: Unknown status %04x\n",dev->name,result); - } - - /* - * Multicast setup - */ - if(num_addrs && !(dev->flags & IFF_PROMISC) ) - { - mc_cmd = (struct mcsetup_cmd_struct *) ptr; - mc_cmd->cmd_status = 0; - mc_cmd->cmd_cmd = swab16(CMD_MCSETUP | CMD_LAST); - mc_cmd->cmd_link = 0xffff; - mc_cmd->mc_cnt = swab16(num_addrs * 6); - - i = 0; - netdev_for_each_mc_addr(ha, dev) - memcpy((char *) mc_cmd->mc_list[i++], - ha->addr, ETH_ALEN); - - p->scb->cbl_offset = make16(mc_cmd); - p->scb->cmd_cuc = CUC_START; - sun3_attn586(); - - WAIT_4_STAT_COMPL(mc_cmd); - - if( (swab16(mc_cmd->cmd_status) & (STAT_COMPL|STAT_OK)) != (STAT_COMPL|STAT_OK) ) - printk("%s: Can't apply multicast-address-list.\n",dev->name); - } - - /* - * alloc nop/xmit-cmds - */ -#if (NUM_XMIT_BUFFS == 1) - for(i=0;i<2;i++) - { - p->nop_cmds[i] = (struct nop_cmd_struct *)ptr; - p->nop_cmds[i]->cmd_cmd = swab16(CMD_NOP); - p->nop_cmds[i]->cmd_status = 0; - p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i])); - ptr = (char *) ptr + sizeof(struct nop_cmd_struct); - } -#else - for(i=0;inop_cmds[i] = (struct nop_cmd_struct *)ptr; - p->nop_cmds[i]->cmd_cmd = swab16(CMD_NOP); - p->nop_cmds[i]->cmd_status = 0; - p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i])); - ptr = (char *) ptr + sizeof(struct nop_cmd_struct); - } -#endif - - ptr = alloc_rfa(dev,(void *)ptr); /* init receive-frame-area */ - - /* - * alloc xmit-buffs / init xmit_cmds - */ - for(i=0;ixmit_cmds[i] = (struct transmit_cmd_struct *)ptr; /*transmit cmd/buff 0*/ - ptr = (char *) ptr + sizeof(struct transmit_cmd_struct); - p->xmit_cbuffs[i] = (char *)ptr; /* char-buffs */ - ptr = (char *) ptr + XMIT_BUFF_SIZE; - p->xmit_buffs[i] = (struct tbd_struct *)ptr; /* TBD */ - ptr = (char *) ptr + sizeof(struct tbd_struct); - if((void *)ptr > (void *)dev->mem_end) - { - printk("%s: not enough shared-mem for your configuration!\n",dev->name); - return 1; - } - memset((char *)(p->xmit_cmds[i]) ,0, sizeof(struct transmit_cmd_struct)); - memset((char *)(p->xmit_buffs[i]),0, sizeof(struct tbd_struct)); - p->xmit_cmds[i]->cmd_link = make16(p->nop_cmds[(i+1)%NUM_XMIT_BUFFS]); - p->xmit_cmds[i]->cmd_status = swab16(STAT_COMPL); - p->xmit_cmds[i]->cmd_cmd = swab16(CMD_XMIT | CMD_INT); - p->xmit_cmds[i]->tbd_offset = make16((p->xmit_buffs[i])); - p->xmit_buffs[i]->next = 0xffff; - p->xmit_buffs[i]->buffer = make24((p->xmit_cbuffs[i])); - } - - p->xmit_count = 0; - p->xmit_last = 0; -#ifndef NO_NOPCOMMANDS - p->nop_point = 0; -#endif - - /* - * 'start transmitter' - */ -#ifndef NO_NOPCOMMANDS - p->scb->cbl_offset = make16(p->nop_cmds[0]); - p->scb->cmd_cuc = CUC_START; - sun3_attn586(); - WAIT_4_SCB_CMD(); -#else - p->xmit_cmds[0]->cmd_link = make16(p->xmit_cmds[0]); - p->xmit_cmds[0]->cmd_cmd = swab16(CMD_XMIT | CMD_SUSPEND | CMD_INT); -#endif - - /* - * ack. interrupts - */ - p->scb->cmd_cuc = p->scb->cus & STAT_MASK; - sun3_attn586(); - DELAY_16(); - - sun3_enaint(); - sun3_active(); - - return 0; -} - -/****************************************************** - * This is a helper routine for sun3_82586_rnr_int() and init586(). - * It sets up the Receive Frame Area (RFA). - */ - -static void *alloc_rfa(struct net_device *dev,void *ptr) -{ - volatile struct rfd_struct *rfd = (struct rfd_struct *)ptr; - volatile struct rbd_struct *rbd; - int i; - struct priv *p = netdev_priv(dev); - - memset((char *) rfd,0,sizeof(struct rfd_struct)*(p->num_recv_buffs+rfdadd)); - p->rfd_first = rfd; - - for(i = 0; i < (p->num_recv_buffs+rfdadd); i++) { - rfd[i].next = make16(rfd + (i+1) % (p->num_recv_buffs+rfdadd) ); - rfd[i].rbd_offset = 0xffff; - } - rfd[p->num_recv_buffs-1+rfdadd].last = RFD_SUSP; /* RU suspend */ - - ptr = (void *) (rfd + (p->num_recv_buffs + rfdadd) ); - - rbd = (struct rbd_struct *) ptr; - ptr = (void *) (rbd + p->num_recv_buffs); - - /* clr descriptors */ - memset((char *) rbd,0,sizeof(struct rbd_struct)*(p->num_recv_buffs)); - - for(i=0;inum_recv_buffs;i++) - { - rbd[i].next = make16((rbd + (i+1) % p->num_recv_buffs)); - rbd[i].size = swab16(RECV_BUFF_SIZE); - rbd[i].buffer = make24(ptr); - ptr = (char *) ptr + RECV_BUFF_SIZE; - } - - p->rfd_top = p->rfd_first; - p->rfd_last = p->rfd_first + (p->num_recv_buffs - 1 + rfdadd); - - p->scb->rfa_offset = make16(p->rfd_first); - p->rfd_first->rbd_offset = make16(rbd); - - return ptr; -} - - -/************************************************** - * Interrupt Handler ... - */ - -static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id) -{ - struct net_device *dev = dev_id; - unsigned short stat; - int cnt=0; - struct priv *p; - - if (!dev) { - printk ("sun3_82586-interrupt: irq %d for unknown device.\n",irq); - return IRQ_NONE; - } - p = netdev_priv(dev); - - if(debuglevel > 1) - printk("I"); - - WAIT_4_SCB_CMD(); /* wait for last command */ - - while((stat=p->scb->cus & STAT_MASK)) - { - p->scb->cmd_cuc = stat; - sun3_attn586(); - - if(stat & STAT_FR) /* received a frame */ - sun3_82586_rcv_int(dev); - - if(stat & STAT_RNR) /* RU went 'not ready' */ - { - printk("(R)"); - if(p->scb->rus & RU_SUSPEND) /* special case: RU_SUSPEND */ - { - WAIT_4_SCB_CMD(); - p->scb->cmd_ruc = RUC_RESUME; - sun3_attn586(); - WAIT_4_SCB_CMD_RUC(); - } - else - { - printk("%s: Receiver-Unit went 'NOT READY': %04x/%02x.\n",dev->name,(int) stat,(int) p->scb->rus); - sun3_82586_rnr_int(dev); - } - } - - if(stat & STAT_CX) /* command with I-bit set complete */ - sun3_82586_xmt_int(dev); - -#ifndef NO_NOPCOMMANDS - if(stat & STAT_CNA) /* CU went 'not ready' */ - { - if(netif_running(dev)) - printk("%s: oops! CU has left active state. stat: %04x/%02x.\n",dev->name,(int) stat,(int) p->scb->cus); - } -#endif - - if(debuglevel > 1) - printk("%d",cnt++); - - WAIT_4_SCB_CMD(); /* wait for ack. (sun3_82586_xmt_int can be faster than ack!!) */ - if(p->scb->cmd_cuc) /* timed out? */ - { - printk("%s: Acknowledge timed out.\n",dev->name); - sun3_disint(); - break; - } - } - - if(debuglevel > 1) - printk("i"); - return IRQ_HANDLED; -} - -/******************************************************* - * receive-interrupt - */ - -static void sun3_82586_rcv_int(struct net_device *dev) -{ - int status,cnt=0; - unsigned short totlen; - struct sk_buff *skb; - struct rbd_struct *rbd; - struct priv *p = netdev_priv(dev); - - if(debuglevel > 0) - printk("R"); - - for(;(status = p->rfd_top->stat_high) & RFD_COMPL;) - { - rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset); - - if(status & RFD_OK) /* frame received without error? */ - { - if( (totlen = swab16(rbd->status)) & RBD_LAST) /* the first and the last buffer? */ - { - totlen &= RBD_MASK; /* length of this frame */ - rbd->status = 0; - skb = (struct sk_buff *) dev_alloc_skb(totlen+2); - if(skb != NULL) - { - skb_reserve(skb,2); - skb_put(skb,totlen); - skb_copy_to_linear_data(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen); - skb->protocol=eth_type_trans(skb,dev); - netif_rx(skb); - dev->stats.rx_packets++; - } - else - dev->stats.rx_dropped++; - } - else - { - int rstat; - /* free all RBD's until RBD_LAST is set */ - totlen = 0; - while(!((rstat=swab16(rbd->status)) & RBD_LAST)) - { - totlen += rstat & RBD_MASK; - if(!rstat) - { - printk("%s: Whoops .. no end mark in RBD list\n",dev->name); - break; - } - rbd->status = 0; - rbd = (struct rbd_struct *) make32(rbd->next); - } - totlen += rstat & RBD_MASK; - rbd->status = 0; - printk("%s: received oversized frame! length: %d\n",dev->name,totlen); - dev->stats.rx_dropped++; - } - } - else /* frame !(ok), only with 'save-bad-frames' */ - { - printk("%s: oops! rfd-error-status: %04x\n",dev->name,status); - dev->stats.rx_errors++; - } - p->rfd_top->stat_high = 0; - p->rfd_top->last = RFD_SUSP; /* maybe exchange by RFD_LAST */ - p->rfd_top->rbd_offset = 0xffff; - p->rfd_last->last = 0; /* delete RFD_SUSP */ - p->rfd_last = p->rfd_top; - p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */ - p->scb->rfa_offset = make16(p->rfd_top); - - if(debuglevel > 0) - printk("%d",cnt++); - } - - if(automatic_resume) - { - WAIT_4_SCB_CMD(); - p->scb->cmd_ruc = RUC_RESUME; - sun3_attn586(); - WAIT_4_SCB_CMD_RUC(); - } - -#ifdef WAIT_4_BUSY - { - int i; - for(i=0;i<1024;i++) - { - if(p->rfd_top->status) - break; - DELAY_16(); - if(i == 1023) - printk("%s: RU hasn't fetched next RFD (not busy/complete)\n",dev->name); - } - } -#endif - -#if 0 - if(!at_least_one) - { - int i; - volatile struct rfd_struct *rfds=p->rfd_top; - volatile struct rbd_struct *rbds; - printk("%s: received a FC intr. without having a frame: %04x %d\n",dev->name,status,old_at_least); - for(i=0;i< (p->num_recv_buffs+4);i++) - { - rbds = (struct rbd_struct *) make32(rfds->rbd_offset); - printk("%04x:%04x ",rfds->status,rbds->status); - rfds = (struct rfd_struct *) make32(rfds->next); - } - printk("\nerrs: %04x %04x stat: %04x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->status); - printk("\nerrs: %04x %04x rus: %02x, cus: %02x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->rus,(int)p->scb->cus); - } - old_at_least = at_least_one; -#endif - - if(debuglevel > 0) - printk("r"); -} - -/********************************************************** - * handle 'Receiver went not ready'. - */ - -static void sun3_82586_rnr_int(struct net_device *dev) -{ - struct priv *p = netdev_priv(dev); - - dev->stats.rx_errors++; - - WAIT_4_SCB_CMD(); /* wait for the last cmd, WAIT_4_FULLSTAT?? */ - p->scb->cmd_ruc = RUC_ABORT; /* usually the RU is in the 'no resource'-state .. abort it now. */ - sun3_attn586(); - WAIT_4_SCB_CMD_RUC(); /* wait for accept cmd. */ - - alloc_rfa(dev,(char *)p->rfd_first); -/* maybe add a check here, before restarting the RU */ - startrecv586(dev); /* restart RU */ - - printk("%s: Receive-Unit restarted. Status: %04x\n",dev->name,p->scb->rus); - -} - -/********************************************************** - * handle xmit - interrupt - */ - -static void sun3_82586_xmt_int(struct net_device *dev) -{ - int status; - struct priv *p = netdev_priv(dev); - - if(debuglevel > 0) - printk("X"); - - status = swab16(p->xmit_cmds[p->xmit_last]->cmd_status); - if(!(status & STAT_COMPL)) - printk("%s: strange .. xmit-int without a 'COMPLETE'\n",dev->name); - - if(status & STAT_OK) - { - dev->stats.tx_packets++; - dev->stats.collisions += (status & TCMD_MAXCOLLMASK); - } - else - { - dev->stats.tx_errors++; - if(status & TCMD_LATECOLL) { - printk("%s: late collision detected.\n",dev->name); - dev->stats.collisions++; - } - else if(status & TCMD_NOCARRIER) { - dev->stats.tx_carrier_errors++; - printk("%s: no carrier detected.\n",dev->name); - } - else if(status & TCMD_LOSTCTS) - printk("%s: loss of CTS detected.\n",dev->name); - else if(status & TCMD_UNDERRUN) { - dev->stats.tx_fifo_errors++; - printk("%s: DMA underrun detected.\n",dev->name); - } - else if(status & TCMD_MAXCOLL) { - printk("%s: Max. collisions exceeded.\n",dev->name); - dev->stats.collisions += 16; - } - } - -#if (NUM_XMIT_BUFFS > 1) - if( (++p->xmit_last) == NUM_XMIT_BUFFS) - p->xmit_last = 0; -#endif - netif_wake_queue(dev); -} - -/*********************************************************** - * (re)start the receiver - */ - -static void startrecv586(struct net_device *dev) -{ - struct priv *p = netdev_priv(dev); - - WAIT_4_SCB_CMD(); - WAIT_4_SCB_CMD_RUC(); - p->scb->rfa_offset = make16(p->rfd_first); - p->scb->cmd_ruc = RUC_START; - sun3_attn586(); /* start cmd. */ - WAIT_4_SCB_CMD_RUC(); /* wait for accept cmd. (no timeout!!) */ -} - -static void sun3_82586_timeout(struct net_device *dev) -{ - struct priv *p = netdev_priv(dev); -#ifndef NO_NOPCOMMANDS - if(p->scb->cus & CU_ACTIVE) /* COMMAND-UNIT active? */ - { - netif_wake_queue(dev); -#ifdef DEBUG - printk("%s: strange ... timeout with CU active?!?\n",dev->name); - printk("%s: X0: %04x N0: %04x N1: %04x %d\n",dev->name,(int)swab16(p->xmit_cmds[0]->cmd_status),(int)swab16(p->nop_cmds[0]->cmd_status),(int)swab16(p->nop_cmds[1]->cmd_status),(int)p->nop_point); -#endif - p->scb->cmd_cuc = CUC_ABORT; - sun3_attn586(); - WAIT_4_SCB_CMD(); - p->scb->cbl_offset = make16(p->nop_cmds[p->nop_point]); - p->scb->cmd_cuc = CUC_START; - sun3_attn586(); - WAIT_4_SCB_CMD(); - dev->trans_start = jiffies; /* prevent tx timeout */ - return 0; - } -#endif - { -#ifdef DEBUG - printk("%s: xmitter timed out, try to restart! stat: %02x\n",dev->name,p->scb->cus); - printk("%s: command-stats: %04x %04x\n",dev->name,swab16(p->xmit_cmds[0]->cmd_status),swab16(p->xmit_cmds[1]->cmd_status)); - printk("%s: check, whether you set the right interrupt number!\n",dev->name); -#endif - sun3_82586_close(dev); - sun3_82586_open(dev); - } - dev->trans_start = jiffies; /* prevent tx timeout */ -} - -/****************************************************** - * send frame - */ - -static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev) -{ - int len,i; -#ifndef NO_NOPCOMMANDS - int next_nop; -#endif - struct priv *p = netdev_priv(dev); - - if(skb->len > XMIT_BUFF_SIZE) - { - printk("%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n",dev->name,XMIT_BUFF_SIZE,skb->len); - return NETDEV_TX_OK; - } - - netif_stop_queue(dev); - -#if(NUM_XMIT_BUFFS > 1) - if(test_and_set_bit(0,(void *) &p->lock)) { - printk("%s: Queue was locked\n",dev->name); - return NETDEV_TX_BUSY; - } - else -#endif - { - len = skb->len; - if (len < ETH_ZLEN) { - memset((void *)p->xmit_cbuffs[p->xmit_count], 0, - ETH_ZLEN); - len = ETH_ZLEN; - } - skb_copy_from_linear_data(skb, (void *)p->xmit_cbuffs[p->xmit_count], skb->len); - -#if (NUM_XMIT_BUFFS == 1) -# ifdef NO_NOPCOMMANDS - -#ifdef DEBUG - if(p->scb->cus & CU_ACTIVE) - { - printk("%s: Hmmm .. CU is still running and we wanna send a new packet.\n",dev->name); - printk("%s: stat: %04x %04x\n",dev->name,p->scb->cus,swab16(p->xmit_cmds[0]->cmd_status)); - } -#endif - - p->xmit_buffs[0]->size = swab16(TBD_LAST | len); - for(i=0;i<16;i++) - { - p->xmit_cmds[0]->cmd_status = 0; - WAIT_4_SCB_CMD(); - if( (p->scb->cus & CU_STATUS) == CU_SUSPEND) - p->scb->cmd_cuc = CUC_RESUME; - else - { - p->scb->cbl_offset = make16(p->xmit_cmds[0]); - p->scb->cmd_cuc = CUC_START; - } - - sun3_attn586(); - if(!i) - dev_kfree_skb(skb); - WAIT_4_SCB_CMD(); - if( (p->scb->cus & CU_ACTIVE)) /* test it, because CU sometimes doesn't start immediately */ - break; - if(p->xmit_cmds[0]->cmd_status) - break; - if(i==15) - printk("%s: Can't start transmit-command.\n",dev->name); - } -# else - next_nop = (p->nop_point + 1) & 0x1; - p->xmit_buffs[0]->size = swab16(TBD_LAST | len); - - p->xmit_cmds[0]->cmd_link = p->nop_cmds[next_nop]->cmd_link - = make16((p->nop_cmds[next_nop])); - p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0; - - p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0])); - p->nop_point = next_nop; - dev_kfree_skb(skb); -# endif -#else - p->xmit_buffs[p->xmit_count]->size = swab16(TBD_LAST | len); - if( (next_nop = p->xmit_count + 1) == NUM_XMIT_BUFFS ) - next_nop = 0; - - p->xmit_cmds[p->xmit_count]->cmd_status = 0; - /* linkpointer of xmit-command already points to next nop cmd */ - p->nop_cmds[next_nop]->cmd_link = make16((p->nop_cmds[next_nop])); - p->nop_cmds[next_nop]->cmd_status = 0; - - p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count])); - p->xmit_count = next_nop; - - { - unsigned long flags; - local_irq_save(flags); - if(p->xmit_count != p->xmit_last) - netif_wake_queue(dev); - p->lock = 0; - local_irq_restore(flags); - } - dev_kfree_skb(skb); -#endif - } - return NETDEV_TX_OK; -} - -/******************************************* - * Someone wanna have the statistics - */ - -static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev) -{ - struct priv *p = netdev_priv(dev); - unsigned short crc,aln,rsc,ovrn; - - crc = swab16(p->scb->crc_errs); /* get error-statistic from the ni82586 */ - p->scb->crc_errs = 0; - aln = swab16(p->scb->aln_errs); - p->scb->aln_errs = 0; - rsc = swab16(p->scb->rsc_errs); - p->scb->rsc_errs = 0; - ovrn = swab16(p->scb->ovrn_errs); - p->scb->ovrn_errs = 0; - - dev->stats.rx_crc_errors += crc; - dev->stats.rx_fifo_errors += ovrn; - dev->stats.rx_frame_errors += aln; - dev->stats.rx_dropped += rsc; - - return &dev->stats; -} - -/******************************************************** - * Set MC list .. - */ - -static void set_multicast_list(struct net_device *dev) -{ - netif_stop_queue(dev); - sun3_disint(); - alloc586(dev); - init586(dev); - startrecv586(dev); - sun3_enaint(); - netif_wake_queue(dev); -} - -#ifdef MODULE -#error This code is not currently supported as a module -static struct net_device *dev_sun3_82586; - -int init_module(void) -{ - dev_sun3_82586 = sun3_82586_probe(-1); - if (IS_ERR(dev_sun3_82586)) - return PTR_ERR(dev_sun3_82586); - return 0; -} - -void cleanup_module(void) -{ - unsigned long ioaddr = dev_sun3_82586->base_addr; - unregister_netdev(dev_sun3_82586); - release_region(ioaddr, SUN3_82586_TOTAL_SIZE); - iounmap((void *)ioaddr); - free_netdev(dev_sun3_82586); -} -#endif /* MODULE */ - -#if 0 -/* - * DUMP .. we expect a not running CMD unit and enough space - */ -void sun3_82586_dump(struct net_device *dev,void *ptr) -{ - struct priv *p = netdev_priv(dev); - struct dump_cmd_struct *dump_cmd = (struct dump_cmd_struct *) ptr; - int i; - - p->scb->cmd_cuc = CUC_ABORT; - sun3_attn586(); - WAIT_4_SCB_CMD(); - WAIT_4_SCB_CMD_RUC(); - - dump_cmd->cmd_status = 0; - dump_cmd->cmd_cmd = CMD_DUMP | CMD_LAST; - dump_cmd->dump_offset = make16((dump_cmd + 1)); - dump_cmd->cmd_link = 0xffff; - - p->scb->cbl_offset = make16(dump_cmd); - p->scb->cmd_cuc = CUC_START; - sun3_attn586(); - WAIT_4_STAT_COMPL(dump_cmd); - - if( (dump_cmd->cmd_status & (STAT_COMPL|STAT_OK)) != (STAT_COMPL|STAT_OK) ) - printk("%s: Can't get dump information.\n",dev->name); - - for(i=0;i<170;i++) { - printk("%02x ",(int) ((unsigned char *) (dump_cmd + 1))[i]); - if(i % 24 == 23) - printk("\n"); - } - printk("\n"); -} -#endif - -MODULE_LICENSE("GPL"); diff --git a/drivers/net/sun3_82586.h b/drivers/net/sun3_82586.h deleted file mode 100644 index 93346f0..0000000 --- a/drivers/net/sun3_82586.h +++ /dev/null @@ -1,318 +0,0 @@ -/* - * Intel i82586 Ethernet definitions - * - * This is an extension to the Linux operating system, and is covered by the - * same Gnu Public License that covers that work. - * - * copyrights (c) 1994 by Michael Hipp (hippm@informatik.uni-tuebingen.de) - * - * I have done a look in the following sources: - * crynwr-packet-driver by Russ Nelson - * Garret A. Wollman's i82586-driver for BSD - */ - -/* - * Cloned from ni52.h, copyright as above. - * - * Modified for Sun3 OBIO i82586 by Sam Creasey (sammy@sammy.net) - */ - - -/* defines for the obio chip (not vme) */ -#define IEOB_NORSET 0x80 /* don't reset the board */ -#define IEOB_ONAIR 0x40 /* put us on the air */ -#define IEOB_ATTEN 0x20 /* attention! */ -#define IEOB_IENAB 0x10 /* interrupt enable */ -#define IEOB_XXXXX 0x08 /* free bit */ -#define IEOB_XCVRL2 0x04 /* level 2 transceiver? */ -#define IEOB_BUSERR 0x02 /* bus error */ -#define IEOB_INT 0x01 /* interrupt */ - -/* where the obio one lives */ -#define IE_OBIO 0xc0000 -#define IE_IRQ 3 - -/* - * where to find the System Configuration Pointer (SCP) - */ -#define SCP_DEFAULT_ADDRESS 0xfffff4 - - -/* - * System Configuration Pointer Struct - */ - -struct scp_struct -{ - unsigned short zero_dum0; /* has to be zero */ - unsigned char sysbus; /* 0=16Bit,1=8Bit */ - unsigned char zero_dum1; /* has to be zero for 586 */ - unsigned short zero_dum2; - unsigned short zero_dum3; - char *iscp; /* pointer to the iscp-block */ -}; - - -/* - * Intermediate System Configuration Pointer (ISCP) - */ -struct iscp_struct -{ - unsigned char busy; /* 586 clears after successful init */ - unsigned char zero_dummy; /* has to be zero */ - unsigned short scb_offset; /* pointeroffset to the scb_base */ - char *scb_base; /* base-address of all 16-bit offsets */ -}; - -/* - * System Control Block (SCB) - */ -struct scb_struct -{ - unsigned char rus; - unsigned char cus; - unsigned char cmd_ruc; /* command word: RU part */ - unsigned char cmd_cuc; /* command word: CU part & ACK */ - unsigned short cbl_offset; /* pointeroffset, command block list */ - unsigned short rfa_offset; /* pointeroffset, receive frame area */ - unsigned short crc_errs; /* CRC-Error counter */ - unsigned short aln_errs; /* allignmenterror counter */ - unsigned short rsc_errs; /* Resourceerror counter */ - unsigned short ovrn_errs; /* OVerrunerror counter */ -}; - -/* - * possible command values for the command word - */ -#define RUC_MASK 0x0070 /* mask for RU commands */ -#define RUC_NOP 0x0000 /* NOP-command */ -#define RUC_START 0x0010 /* start RU */ -#define RUC_RESUME 0x0020 /* resume RU after suspend */ -#define RUC_SUSPEND 0x0030 /* suspend RU */ -#define RUC_ABORT 0x0040 /* abort receiver operation immediately */ - -#define CUC_MASK 0x07 /* mask for CU command */ -#define CUC_NOP 0x00 /* NOP-command */ -#define CUC_START 0x01 /* start execution of 1. cmd on the CBL */ -#define CUC_RESUME 0x02 /* resume after suspend */ -#define CUC_SUSPEND 0x03 /* Suspend CU */ -#define CUC_ABORT 0x04 /* abort command operation immediately */ - -#define ACK_MASK 0xf0 /* mask for ACK command */ -#define ACK_CX 0x80 /* acknowledges STAT_CX */ -#define ACK_FR 0x40 /* ack. STAT_FR */ -#define ACK_CNA 0x20 /* ack. STAT_CNA */ -#define ACK_RNR 0x10 /* ack. STAT_RNR */ - -/* - * possible status values for the status word - */ -#define STAT_MASK 0xf0 /* mask for cause of interrupt */ -#define STAT_CX 0x80 /* CU finished cmd with its I bit set */ -#define STAT_FR 0x40 /* RU finished receiving a frame */ -#define STAT_CNA 0x20 /* CU left active state */ -#define STAT_RNR 0x10 /* RU left ready state */ - -#define CU_STATUS 0x7 /* CU status, 0=idle */ -#define CU_SUSPEND 0x1 /* CU is suspended */ -#define CU_ACTIVE 0x2 /* CU is active */ - -#define RU_STATUS 0x70 /* RU status, 0=idle */ -#define RU_SUSPEND 0x10 /* RU suspended */ -#define RU_NOSPACE 0x20 /* RU no resources */ -#define RU_READY 0x40 /* RU is ready */ - -/* - * Receive Frame Descriptor (RFD) - */ -struct rfd_struct -{ - unsigned char stat_low; /* status word */ - unsigned char stat_high; /* status word */ - unsigned char rfd_sf; /* 82596 mode only */ - unsigned char last; /* Bit15,Last Frame on List / Bit14,suspend */ - unsigned short next; /* linkoffset to next RFD */ - unsigned short rbd_offset; /* pointeroffset to RBD-buffer */ - unsigned char dest[6]; /* ethernet-address, destination */ - unsigned char source[6]; /* ethernet-address, source */ - unsigned short length; /* 802.3 frame-length */ - unsigned short zero_dummy; /* dummy */ -}; - -#define RFD_LAST 0x80 /* last: last rfd in the list */ -#define RFD_SUSP 0x40 /* last: suspend RU after */ -#define RFD_COMPL 0x80 -#define RFD_OK 0x20 -#define RFD_BUSY 0x40 -#define RFD_ERR_LEN 0x10 /* Length error (if enabled length-checking */ -#define RFD_ERR_CRC 0x08 /* CRC error */ -#define RFD_ERR_ALGN 0x04 /* Alignment error */ -#define RFD_ERR_RNR 0x02 /* status: receiver out of resources */ -#define RFD_ERR_OVR 0x01 /* DMA Overrun! */ - -#define RFD_ERR_FTS 0x0080 /* Frame to short */ -#define RFD_ERR_NEOP 0x0040 /* No EOP flag (for bitstuffing only) */ -#define RFD_ERR_TRUN 0x0020 /* (82596 only/SF mode) indicates truncated frame */ -#define RFD_MATCHADD 0x0002 /* status: Destinationaddress !matches IA (only 82596) */ -#define RFD_COLLDET 0x0001 /* Detected collision during reception */ - -/* - * Receive Buffer Descriptor (RBD) - */ -struct rbd_struct -{ - unsigned short status; /* status word,number of used bytes in buff */ - unsigned short next; /* pointeroffset to next RBD */ - char *buffer; /* receive buffer address pointer */ - unsigned short size; /* size of this buffer */ - unsigned short zero_dummy; /* dummy */ -}; - -#define RBD_LAST 0x8000 /* last buffer */ -#define RBD_USED 0x4000 /* this buffer has data */ -#define RBD_MASK 0x3fff /* size-mask for length */ - -/* - * Statusvalues for Commands/RFD - */ -#define STAT_COMPL 0x8000 /* status: frame/command is complete */ -#define STAT_BUSY 0x4000 /* status: frame/command is busy */ -#define STAT_OK 0x2000 /* status: frame/command is ok */ - -/* - * Action-Commands - */ -#define CMD_NOP 0x0000 /* NOP */ -#define CMD_IASETUP 0x0001 /* initial address setup command */ -#define CMD_CONFIGURE 0x0002 /* configure command */ -#define CMD_MCSETUP 0x0003 /* MC setup command */ -#define CMD_XMIT 0x0004 /* transmit command */ -#define CMD_TDR 0x0005 /* time domain reflectometer (TDR) command */ -#define CMD_DUMP 0x0006 /* dump command */ -#define CMD_DIAGNOSE 0x0007 /* diagnose command */ - -/* - * Action command bits - */ -#define CMD_LAST 0x8000 /* indicates last command in the CBL */ -#define CMD_SUSPEND 0x4000 /* suspend CU after this CB */ -#define CMD_INT 0x2000 /* generate interrupt after execution */ - -/* - * NOP - command - */ -struct nop_cmd_struct -{ - unsigned short cmd_status; /* status of this command */ - unsigned short cmd_cmd; /* the command itself (+bits) */ - unsigned short cmd_link; /* offsetpointer to next command */ -}; - -/* - * IA Setup command - */ -struct iasetup_cmd_struct -{ - unsigned short cmd_status; - unsigned short cmd_cmd; - unsigned short cmd_link; - unsigned char iaddr[6]; -}; - -/* - * Configure command - */ -struct configure_cmd_struct -{ - unsigned short cmd_status; - unsigned short cmd_cmd; - unsigned short cmd_link; - unsigned char byte_cnt; /* size of the config-cmd */ - unsigned char fifo; /* fifo/recv monitor */ - unsigned char sav_bf; /* save bad frames (bit7=1)*/ - unsigned char adr_len; /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/ - unsigned char priority; /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */ - unsigned char ifs; /* inter frame spacing */ - unsigned char time_low; /* slot time low */ - unsigned char time_high; /* slot time high(0-2) and max. retries(4-7) */ - unsigned char promisc; /* promisc-mode(0) , et al (1-7) */ - unsigned char carr_coll; /* carrier(0-3)/collision(4-7) stuff */ - unsigned char fram_len; /* minimal frame len */ - unsigned char dummy; /* dummy */ -}; - -/* - * Multicast Setup command - */ -struct mcsetup_cmd_struct -{ - unsigned short cmd_status; - unsigned short cmd_cmd; - unsigned short cmd_link; - unsigned short mc_cnt; /* number of bytes in the MC-List */ - unsigned char mc_list[0][6]; /* pointer to 6 bytes entries */ -}; - -/* - * DUMP command - */ -struct dump_cmd_struct -{ - unsigned short cmd_status; - unsigned short cmd_cmd; - unsigned short cmd_link; - unsigned short dump_offset; /* pointeroffset to DUMP space */ -}; - -/* - * transmit command - */ -struct transmit_cmd_struct -{ - unsigned short cmd_status; - unsigned short cmd_cmd; - unsigned short cmd_link; - unsigned short tbd_offset; /* pointeroffset to TBD */ - unsigned char dest[6]; /* destination address of the frame */ - unsigned short length; /* user defined: 802.3 length / Ether type */ -}; - -#define TCMD_ERRMASK 0x0fa0 -#define TCMD_MAXCOLLMASK 0x000f -#define TCMD_MAXCOLL 0x0020 -#define TCMD_HEARTBEAT 0x0040 -#define TCMD_DEFERRED 0x0080 -#define TCMD_UNDERRUN 0x0100 -#define TCMD_LOSTCTS 0x0200 -#define TCMD_NOCARRIER 0x0400 -#define TCMD_LATECOLL 0x0800 - -struct tdr_cmd_struct -{ - unsigned short cmd_status; - unsigned short cmd_cmd; - unsigned short cmd_link; - unsigned short status; -}; - -#define TDR_LNK_OK 0x8000 /* No link problem identified */ -#define TDR_XCVR_PRB 0x4000 /* indicates a transceiver problem */ -#define TDR_ET_OPN 0x2000 /* open, no correct termination */ -#define TDR_ET_SRT 0x1000 /* TDR detected a short circuit */ -#define TDR_TIMEMASK 0x07ff /* mask for the time field */ - -/* - * Transmit Buffer Descriptor (TBD) - */ -struct tbd_struct -{ - unsigned short size; /* size + EOF-Flag(15) */ - unsigned short next; /* pointeroffset to next TBD */ - char *buffer; /* pointer to buffer */ -}; - -#define TBD_LAST 0x8000 /* EOF-Flag, indicates last buffer in list */ - - - - diff --git a/drivers/net/znet.c b/drivers/net/znet.c deleted file mode 100644 index 8b88817..0000000 --- a/drivers/net/znet.c +++ /dev/null @@ -1,924 +0,0 @@ -/* znet.c: An Zenith Z-Note ethernet driver for linux. */ - -/* - Written by Donald Becker. - - The author may be reached as becker@scyld.com. - This driver is based on the Linux skeleton driver. The copyright of the - skeleton driver is held by the United States Government, as represented - by DIRNSA, and it is released under the GPL. - - Thanks to Mike Hollick for alpha testing and suggestions. - - References: - The Crynwr packet driver. - - "82593 CSMA/CD Core LAN Controller" Intel datasheet, 1992 - Intel Microcommunications Databook, Vol. 1, 1990. - As usual with Intel, the documentation is incomplete and inaccurate. - I had to read the Crynwr packet driver to figure out how to actually - use the i82593, and guess at what register bits matched the loosely - related i82586. - - Theory of Operation - - The i82593 used in the Zenith Z-Note series operates using two(!) slave - DMA channels, one interrupt, and one 8-bit I/O port. - - While there several ways to configure '593 DMA system, I chose the one - that seemed commensurate with the highest system performance in the face - of moderate interrupt latency: Both DMA channels are configured as - recirculating ring buffers, with one channel (#0) dedicated to Rx and - the other channel (#1) to Tx and configuration. (Note that this is - different than the Crynwr driver, where the Tx DMA channel is initialized - before each operation. That approach simplifies operation and Tx error - recovery, but requires additional I/O in normal operation and precludes - transmit buffer chaining.) - - Both rings are set to 8192 bytes using {TX,RX}_RING_SIZE. This provides - a reasonable ring size for Rx, while simplifying DMA buffer allocation -- - DMA buffers must not cross a 128K boundary. (In truth the size selection - was influenced by my lack of '593 documentation. I thus was constrained - to use the Crynwr '593 initialization table, which sets the Rx ring size - to 8K.) - - Despite my usual low opinion about Intel-designed parts, I must admit - that the bulk data handling of the i82593 is a good design for - an integrated system, like a laptop, where using two slave DMA channels - doesn't pose a problem. I still take issue with using only a single I/O - port. In the same controlled environment there are essentially no - limitations on I/O space, and using multiple locations would eliminate - the need for multiple operations when looking at status registers, - setting the Rx ring boundary, or switching to promiscuous mode. - - I also question Zenith's selection of the '593: one of the advertised - advantages of earlier Intel parts was that if you figured out the magic - initialization incantation you could use the same part on many different - network types. Zenith's use of the "FriendlyNet" (sic) connector rather - than an on-board transceiver leads me to believe that they were planning - to take advantage of this. But, uhmmm, the '593 omits all but ethernet - functionality from the serial subsystem. - */ - -/* 10/2002 - - o Resurected for Linux 2.5+ by Marc Zyngier : - - - Removed strange DMA snooping in znet_sent_packet, which lead to - TX buffer corruption on my laptop. - - Use init_etherdev stuff. - - Use kmalloc-ed DMA buffers. - - Use as few global variables as possible. - - Use proper resources management. - - Use wireless/i82593.h as much as possible (structure, constants) - - Compiles as module or build-in. - - Now survives unplugging/replugging cable. - - Some code was taken from wavelan_cs. - - Tested on a vintage Zenith Z-Note 433Lnp+. Probably broken on - anything else. Testers (and detailed bug reports) are welcome :-). - - o TODO : - - - Properly handle multicast - - Understand why some traffic patterns add a 1s latency... - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include - -static char version[] __initdata = "znet.c:v1.02 9/23/94 becker@scyld.com\n"; - -#ifndef ZNET_DEBUG -#define ZNET_DEBUG 1 -#endif -static unsigned int znet_debug = ZNET_DEBUG; -module_param (znet_debug, int, 0); -MODULE_PARM_DESC (znet_debug, "ZNet debug level"); -MODULE_LICENSE("GPL"); - -/* The DMA modes we need aren't in . */ -#define DMA_RX_MODE 0x14 /* Auto init, I/O to mem, ++, demand. */ -#define DMA_TX_MODE 0x18 /* Auto init, Mem to I/O, ++, demand. */ -#define dma_page_eq(ptr1, ptr2) ((long)(ptr1)>>17 == (long)(ptr2)>>17) -#define RX_BUF_SIZE 8192 -#define TX_BUF_SIZE 8192 -#define DMA_BUF_SIZE (RX_BUF_SIZE + 16) /* 8k + 16 bytes for trailers */ - -#define TX_TIMEOUT (HZ/10) - -struct znet_private { - int rx_dma, tx_dma; - spinlock_t lock; - short sia_base, sia_size, io_size; - struct i82593_conf_block i593_init; - /* The starting, current, and end pointers for the packet buffers. */ - ushort *rx_start, *rx_cur, *rx_end; - ushort *tx_start, *tx_cur, *tx_end; - ushort tx_buf_len; /* Tx buffer length, in words. */ -}; - -/* Only one can be built-in;-> */ -static struct net_device *znet_dev; - -struct netidblk { - char magic[8]; /* The magic number (string) "NETIDBLK" */ - unsigned char netid[8]; /* The physical station address */ - char nettype, globalopt; - char vendor[8]; /* The machine vendor and product name. */ - char product[8]; - char irq1, irq2; /* Interrupts, only one is currently used. */ - char dma1, dma2; - short dma_mem_misc[8]; /* DMA buffer locations (unused in Linux). */ - short iobase1, iosize1; - short iobase2, iosize2; /* Second iobase unused. */ - char driver_options; /* Misc. bits */ - char pad; -}; - -static int znet_open(struct net_device *dev); -static netdev_tx_t znet_send_packet(struct sk_buff *skb, - struct net_device *dev); -static irqreturn_t znet_interrupt(int irq, void *dev_id); -static void znet_rx(struct net_device *dev); -static int znet_close(struct net_device *dev); -static void hardware_init(struct net_device *dev); -static void update_stop_hit(short ioaddr, unsigned short rx_stop_offset); -static void znet_tx_timeout (struct net_device *dev); - -/* Request needed resources */ -static int znet_request_resources (struct net_device *dev) -{ - struct znet_private *znet = netdev_priv(dev); - - if (request_irq (dev->irq, znet_interrupt, 0, "ZNet", dev)) - goto failed; - if (request_dma (znet->rx_dma, "ZNet rx")) - goto free_irq; - if (request_dma (znet->tx_dma, "ZNet tx")) - goto free_rx_dma; - if (!request_region (znet->sia_base, znet->sia_size, "ZNet SIA")) - goto free_tx_dma; - if (!request_region (dev->base_addr, znet->io_size, "ZNet I/O")) - goto free_sia; - - return 0; /* Happy ! */ - - free_sia: - release_region (znet->sia_base, znet->sia_size); - free_tx_dma: - free_dma (znet->tx_dma); - free_rx_dma: - free_dma (znet->rx_dma); - free_irq: - free_irq (dev->irq, dev); - failed: - return -1; -} - -static void znet_release_resources (struct net_device *dev) -{ - struct znet_private *znet = netdev_priv(dev); - - release_region (znet->sia_base, znet->sia_size); - release_region (dev->base_addr, znet->io_size); - free_dma (znet->tx_dma); - free_dma (znet->rx_dma); - free_irq (dev->irq, dev); -} - -/* Keep the magical SIA stuff in a single function... */ -static void znet_transceiver_power (struct net_device *dev, int on) -{ - struct znet_private *znet = netdev_priv(dev); - unsigned char v; - - /* Turn on/off the 82501 SIA, using zenith-specific magic. */ - /* Select LAN control register */ - outb(0x10, znet->sia_base); - - if (on) - v = inb(znet->sia_base + 1) | 0x84; - else - v = inb(znet->sia_base + 1) & ~0x84; - - outb(v, znet->sia_base+1); /* Turn on/off LAN power (bit 2). */ -} - -/* Init the i82593, with current promisc/mcast configuration. - Also used from hardware_init. */ -static void znet_set_multicast_list (struct net_device *dev) -{ - struct znet_private *znet = netdev_priv(dev); - short ioaddr = dev->base_addr; - struct i82593_conf_block *cfblk = &znet->i593_init; - - memset(cfblk, 0x00, sizeof(struct i82593_conf_block)); - - /* The configuration block. What an undocumented nightmare. - The first set of values are those suggested (without explanation) - for ethernet in the Intel 82586 databook. The rest appear to be - completely undocumented, except for cryptic notes in the Crynwr - packet driver. This driver uses the Crynwr values verbatim. */ - - /* maz : Rewritten to take advantage of the wanvelan includes. - At least we have names, not just blind values */ - - /* Byte 0 */ - cfblk->fifo_limit = 10; /* = 16 B rx and 80 B tx fifo thresholds */ - cfblk->forgnesi = 0; /* 0=82C501, 1=AMD7992B compatibility */ - cfblk->fifo_32 = 1; - cfblk->d6mod = 0; /* Run in i82593 advanced mode */ - cfblk->throttle_enb = 1; - - /* Byte 1 */ - cfblk->throttle = 8; /* Continuous w/interrupts, 128-clock DMA. */ - cfblk->cntrxint = 0; /* enable continuous mode receive interrupts */ - cfblk->contin = 1; /* enable continuous mode */ - - /* Byte 2 */ - cfblk->addr_len = ETH_ALEN; - cfblk->acloc = 1; /* Disable source addr insertion by i82593 */ - cfblk->preamb_len = 2; /* 8 bytes preamble */ - cfblk->loopback = 0; /* Loopback off */ - - /* Byte 3 */ - cfblk->lin_prio = 0; /* Default priorities & backoff methods. */ - cfblk->tbofstop = 0; - cfblk->exp_prio = 0; - cfblk->bof_met = 0; - - /* Byte 4 */ - cfblk->ifrm_spc = 6; /* 96 bit times interframe spacing */ - - /* Byte 5 */ - cfblk->slottim_low = 0; /* 512 bit times slot time (low) */ - - /* Byte 6 */ - cfblk->slottim_hi = 2; /* 512 bit times slot time (high) */ - cfblk->max_retr = 15; /* 15 collisions retries */ - - /* Byte 7 */ - cfblk->prmisc = ((dev->flags & IFF_PROMISC) ? 1 : 0); /* Promiscuous mode */ - cfblk->bc_dis = 0; /* Enable broadcast reception */ - cfblk->crs_1 = 0; /* Don't transmit without carrier sense */ - cfblk->nocrc_ins = 0; /* i82593 generates CRC */ - cfblk->crc_1632 = 0; /* 32-bit Autodin-II CRC */ - cfblk->crs_cdt = 0; /* CD not to be interpreted as CS */ - - /* Byte 8 */ - cfblk->cs_filter = 0; /* CS is recognized immediately */ - cfblk->crs_src = 0; /* External carrier sense */ - cfblk->cd_filter = 0; /* CD is recognized immediately */ - - /* Byte 9 */ - cfblk->min_fr_len = ETH_ZLEN >> 2; /* Minimum frame length */ - - /* Byte A */ - cfblk->lng_typ = 1; /* Type/length checks OFF */ - cfblk->lng_fld = 1; /* Disable 802.3 length field check */ - cfblk->rxcrc_xf = 1; /* Don't transfer CRC to memory */ - cfblk->artx = 1; /* Disable automatic retransmission */ - cfblk->sarec = 1; /* Disable source addr trig of CD */ - cfblk->tx_jabber = 0; /* Disable jabber jam sequence */ - cfblk->hash_1 = 1; /* Use bits 0-5 in mc address hash */ - cfblk->lbpkpol = 0; /* Loopback pin active high */ - - /* Byte B */ - cfblk->fdx = 0; /* Disable full duplex operation */ - - /* Byte C */ - cfblk->dummy_6 = 0x3f; /* all ones, Default multicast addresses & backoff. */ - cfblk->mult_ia = 0; /* No multiple individual addresses */ - cfblk->dis_bof = 0; /* Disable the backoff algorithm ?! */ - - /* Byte D */ - cfblk->dummy_1 = 1; /* set to 1 */ - cfblk->tx_ifs_retrig = 3; /* Hmm... Disabled */ - cfblk->mc_all = (!netdev_mc_empty(dev) || - (dev->flags & IFF_ALLMULTI)); /* multicast all mode */ - cfblk->rcv_mon = 0; /* Monitor mode disabled */ - cfblk->frag_acpt = 0; /* Do not accept fragments */ - cfblk->tstrttrs = 0; /* No start transmission threshold */ - - /* Byte E */ - cfblk->fretx = 1; /* FIFO automatic retransmission */ - cfblk->runt_eop = 0; /* drop "runt" packets */ - cfblk->hw_sw_pin = 0; /* ?? */ - cfblk->big_endn = 0; /* Big Endian ? no... */ - cfblk->syncrqs = 1; /* Synchronous DRQ deassertion... */ - cfblk->sttlen = 1; /* 6 byte status registers */ - cfblk->rx_eop = 0; /* Signal EOP on packet reception */ - cfblk->tx_eop = 0; /* Signal EOP on packet transmission */ - - /* Byte F */ - cfblk->rbuf_size = RX_BUF_SIZE >> 12; /* Set receive buffer size */ - cfblk->rcvstop = 1; /* Enable Receive Stop Register */ - - if (znet_debug > 2) { - int i; - unsigned char *c; - - for (i = 0, c = (char *) cfblk; i < sizeof (*cfblk); i++) - printk ("%02X ", c[i]); - printk ("\n"); - } - - *znet->tx_cur++ = sizeof(struct i82593_conf_block); - memcpy(znet->tx_cur, cfblk, sizeof(struct i82593_conf_block)); - znet->tx_cur += sizeof(struct i82593_conf_block)/2; - outb(OP0_CONFIGURE | CR0_CHNL, ioaddr); - - /* XXX FIXME maz : Add multicast addresses here, so having a - * multicast address configured isn't equal to IFF_ALLMULTI */ -} - -static const struct net_device_ops znet_netdev_ops = { - .ndo_open = znet_open, - .ndo_stop = znet_close, - .ndo_start_xmit = znet_send_packet, - .ndo_set_multicast_list = znet_set_multicast_list, - .ndo_tx_timeout = znet_tx_timeout, - .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -/* The Z-Note probe is pretty easy. The NETIDBLK exists in the safe-to-probe - BIOS area. We just scan for the signature, and pull the vital parameters - out of the structure. */ - -static int __init znet_probe (void) -{ - int i; - struct netidblk *netinfo; - struct znet_private *znet; - struct net_device *dev; - char *p; - int err = -ENOMEM; - - /* This code scans the region 0xf0000 to 0xfffff for a "NETIDBLK". */ - for(p = (char *)phys_to_virt(0xf0000); p < (char *)phys_to_virt(0x100000); p++) - if (*p == 'N' && strncmp(p, "NETIDBLK", 8) == 0) - break; - - if (p >= (char *)phys_to_virt(0x100000)) { - if (znet_debug > 1) - printk(KERN_INFO "No Z-Note ethernet adaptor found.\n"); - return -ENODEV; - } - - dev = alloc_etherdev(sizeof(struct znet_private)); - if (!dev) - return -ENOMEM; - - znet = netdev_priv(dev); - - netinfo = (struct netidblk *)p; - dev->base_addr = netinfo->iobase1; - dev->irq = netinfo->irq1; - - /* The station address is in the "netidblk" at 0x0f0000. */ - for (i = 0; i < 6; i++) - dev->dev_addr[i] = netinfo->netid[i]; - - printk(KERN_INFO "%s: ZNET at %#3lx, %pM" - ", using IRQ %d DMA %d and %d.\n", - dev->name, dev->base_addr, dev->dev_addr, - dev->irq, netinfo->dma1, netinfo->dma2); - - if (znet_debug > 1) { - printk(KERN_INFO "%s: vendor '%16.16s' IRQ1 %d IRQ2 %d DMA1 %d DMA2 %d.\n", - dev->name, netinfo->vendor, - netinfo->irq1, netinfo->irq2, - netinfo->dma1, netinfo->dma2); - printk(KERN_INFO "%s: iobase1 %#x size %d iobase2 %#x size %d net type %2.2x.\n", - dev->name, netinfo->iobase1, netinfo->iosize1, - netinfo->iobase2, netinfo->iosize2, netinfo->nettype); - } - - if (znet_debug > 0) - printk(KERN_INFO "%s", version); - - znet->rx_dma = netinfo->dma1; - znet->tx_dma = netinfo->dma2; - spin_lock_init(&znet->lock); - znet->sia_base = 0xe6; /* Magic address for the 82501 SIA */ - znet->sia_size = 2; - /* maz: Despite the '593 being advertised above as using a - * single 8bits I/O port, this driver does many 16bits - * access. So set io_size accordingly */ - znet->io_size = 2; - - if (!(znet->rx_start = kmalloc (DMA_BUF_SIZE, GFP_KERNEL | GFP_DMA))) - goto free_dev; - if (!(znet->tx_start = kmalloc (DMA_BUF_SIZE, GFP_KERNEL | GFP_DMA))) - goto free_rx; - - if (!dma_page_eq (znet->rx_start, znet->rx_start + (RX_BUF_SIZE/2-1)) || - !dma_page_eq (znet->tx_start, znet->tx_start + (TX_BUF_SIZE/2-1))) { - printk (KERN_WARNING "tx/rx crossing DMA frontiers, giving up\n"); - goto free_tx; - } - - znet->rx_end = znet->rx_start + RX_BUF_SIZE/2; - znet->tx_buf_len = TX_BUF_SIZE/2; - znet->tx_end = znet->tx_start + znet->tx_buf_len; - - /* The ZNET-specific entries in the device structure. */ - dev->netdev_ops = &znet_netdev_ops; - dev->watchdog_timeo = TX_TIMEOUT; - err = register_netdev(dev); - if (err) - goto free_tx; - znet_dev = dev; - return 0; - - free_tx: - kfree(znet->tx_start); - free_rx: - kfree(znet->rx_start); - free_dev: - free_netdev(dev); - return err; -} - - -static int znet_open(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - - if (znet_debug > 2) - printk(KERN_DEBUG "%s: znet_open() called.\n", dev->name); - - /* These should never fail. You can't add devices to a sealed box! */ - if (znet_request_resources (dev)) { - printk(KERN_WARNING "%s: Not opened -- resource busy?!?\n", dev->name); - return -EBUSY; - } - - znet_transceiver_power (dev, 1); - - /* According to the Crynwr driver we should wait 50 msec. for the - LAN clock to stabilize. My experiments indicates that the '593 can - be initialized immediately. The delay is probably needed for the - DC-to-DC converter to come up to full voltage, and for the oscillator - to be spot-on at 20Mhz before transmitting. - Until this proves to be a problem we rely on the higher layers for the - delay and save allocating a timer entry. */ - - /* maz : Well, I'm getting every time the following message - * without the delay on a 486@33. This machine is much too - * fast... :-) So maybe the Crynwr driver wasn't wrong after - * all, even if the message is completly harmless on my - * setup. */ - mdelay (50); - - /* This follows the packet driver's lead, and checks for success. */ - if (inb(ioaddr) != 0x10 && inb(ioaddr) != 0x00) - printk(KERN_WARNING "%s: Problem turning on the transceiver power.\n", - dev->name); - - hardware_init(dev); - netif_start_queue (dev); - - return 0; -} - - -static void znet_tx_timeout (struct net_device *dev) -{ - int ioaddr = dev->base_addr; - ushort event, tx_status, rx_offset, state; - - outb (CR0_STATUS_0, ioaddr); - event = inb (ioaddr); - outb (CR0_STATUS_1, ioaddr); - tx_status = inw (ioaddr); - outb (CR0_STATUS_2, ioaddr); - rx_offset = inw (ioaddr); - outb (CR0_STATUS_3, ioaddr); - state = inb (ioaddr); - printk (KERN_WARNING "%s: transmit timed out, status %02x %04x %04x %02x," - " resetting.\n", dev->name, event, tx_status, rx_offset, state); - if (tx_status == TX_LOST_CRS) - printk (KERN_WARNING "%s: Tx carrier error, check transceiver cable.\n", - dev->name); - outb (OP0_RESET, ioaddr); - hardware_init (dev); - netif_wake_queue (dev); -} - -static netdev_tx_t znet_send_packet(struct sk_buff *skb, struct net_device *dev) -{ - int ioaddr = dev->base_addr; - struct znet_private *znet = netdev_priv(dev); - unsigned long flags; - short length = skb->len; - - if (znet_debug > 4) - printk(KERN_DEBUG "%s: ZNet_send_packet.\n", dev->name); - - if (length < ETH_ZLEN) { - if (skb_padto(skb, ETH_ZLEN)) - return NETDEV_TX_OK; - length = ETH_ZLEN; - } - - netif_stop_queue (dev); - - /* Check that the part hasn't reset itself, probably from suspend. */ - outb(CR0_STATUS_0, ioaddr); - if (inw(ioaddr) == 0x0010 && - inw(ioaddr) == 0x0000 && - inw(ioaddr) == 0x0010) { - if (znet_debug > 1) - printk (KERN_WARNING "%s : waking up\n", dev->name); - hardware_init(dev); - znet_transceiver_power (dev, 1); - } - - if (1) { - unsigned char *buf = (void *)skb->data; - ushort *tx_link = znet->tx_cur - 1; - ushort rnd_len = (length + 1)>>1; - - dev->stats.tx_bytes+=length; - - if (znet->tx_cur >= znet->tx_end) - znet->tx_cur = znet->tx_start; - *znet->tx_cur++ = length; - if (znet->tx_cur + rnd_len + 1 > znet->tx_end) { - int semi_cnt = (znet->tx_end - znet->tx_cur)<<1; /* Cvrt to byte cnt. */ - memcpy(znet->tx_cur, buf, semi_cnt); - rnd_len -= semi_cnt>>1; - memcpy(znet->tx_start, buf + semi_cnt, length - semi_cnt); - znet->tx_cur = znet->tx_start + rnd_len; - } else { - memcpy(znet->tx_cur, buf, skb->len); - znet->tx_cur += rnd_len; - } - *znet->tx_cur++ = 0; - - spin_lock_irqsave(&znet->lock, flags); - { - *tx_link = OP0_TRANSMIT | CR0_CHNL; - /* Is this always safe to do? */ - outb(OP0_TRANSMIT | CR0_CHNL, ioaddr); - } - spin_unlock_irqrestore (&znet->lock, flags); - - netif_start_queue (dev); - - if (znet_debug > 4) - printk(KERN_DEBUG "%s: Transmitter queued, length %d.\n", dev->name, length); - } - dev_kfree_skb(skb); - return NETDEV_TX_OK; -} - -/* The ZNET interrupt handler. */ -static irqreturn_t znet_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct znet_private *znet = netdev_priv(dev); - int ioaddr; - int boguscnt = 20; - int handled = 0; - - spin_lock (&znet->lock); - - ioaddr = dev->base_addr; - - outb(CR0_STATUS_0, ioaddr); - do { - ushort status = inb(ioaddr); - if (znet_debug > 5) { - ushort result, rx_ptr, running; - outb(CR0_STATUS_1, ioaddr); - result = inw(ioaddr); - outb(CR0_STATUS_2, ioaddr); - rx_ptr = inw(ioaddr); - outb(CR0_STATUS_3, ioaddr); - running = inb(ioaddr); - printk(KERN_DEBUG "%s: interrupt, status %02x, %04x %04x %02x serial %d.\n", - dev->name, status, result, rx_ptr, running, boguscnt); - } - if ((status & SR0_INTERRUPT) == 0) - break; - - handled = 1; - - if ((status & SR0_EVENT_MASK) == SR0_TRANSMIT_DONE || - (status & SR0_EVENT_MASK) == SR0_RETRANSMIT_DONE || - (status & SR0_EVENT_MASK) == SR0_TRANSMIT_NO_CRC_DONE) { - int tx_status; - outb(CR0_STATUS_1, ioaddr); - tx_status = inw(ioaddr); - /* It's undocumented, but tx_status seems to match the i82586. */ - if (tx_status & TX_OK) { - dev->stats.tx_packets++; - dev->stats.collisions += tx_status & TX_NCOL_MASK; - } else { - if (tx_status & (TX_LOST_CTS | TX_LOST_CRS)) - dev->stats.tx_carrier_errors++; - if (tx_status & TX_UND_RUN) - dev->stats.tx_fifo_errors++; - if (!(tx_status & TX_HRT_BEAT)) - dev->stats.tx_heartbeat_errors++; - if (tx_status & TX_MAX_COL) - dev->stats.tx_aborted_errors++; - /* ...and the catch-all. */ - if ((tx_status | (TX_LOST_CRS | TX_LOST_CTS | TX_UND_RUN | TX_HRT_BEAT | TX_MAX_COL)) != (TX_LOST_CRS | TX_LOST_CTS | TX_UND_RUN | TX_HRT_BEAT | TX_MAX_COL)) - dev->stats.tx_errors++; - - /* Transceiver may be stuck if cable - * was removed while emitting a - * packet. Flip it off, then on to - * reset it. This is very empirical, - * but it seems to work. */ - - znet_transceiver_power (dev, 0); - znet_transceiver_power (dev, 1); - } - netif_wake_queue (dev); - } - - if ((status & SR0_RECEPTION) || - (status & SR0_EVENT_MASK) == SR0_STOP_REG_HIT) { - znet_rx(dev); - } - /* Clear the interrupts we've handled. */ - outb(CR0_INT_ACK, ioaddr); - } while (boguscnt--); - - spin_unlock (&znet->lock); - - return IRQ_RETVAL(handled); -} - -static void znet_rx(struct net_device *dev) -{ - struct znet_private *znet = netdev_priv(dev); - int ioaddr = dev->base_addr; - int boguscount = 1; - short next_frame_end_offset = 0; /* Offset of next frame start. */ - short *cur_frame_end; - short cur_frame_end_offset; - - outb(CR0_STATUS_2, ioaddr); - cur_frame_end_offset = inw(ioaddr); - - if (cur_frame_end_offset == znet->rx_cur - znet->rx_start) { - printk(KERN_WARNING "%s: Interrupted, but nothing to receive, offset %03x.\n", - dev->name, cur_frame_end_offset); - return; - } - - /* Use same method as the Crynwr driver: construct a forward list in - the same area of the backwards links we now have. This allows us to - pass packets to the upper layers in the order they were received -- - important for fast-path sequential operations. */ - while (znet->rx_start + cur_frame_end_offset != znet->rx_cur && - ++boguscount < 5) { - unsigned short hi_cnt, lo_cnt, hi_status, lo_status; - int count, status; - - if (cur_frame_end_offset < 4) { - /* Oh no, we have a special case: the frame trailer wraps around - the end of the ring buffer. We've saved space at the end of - the ring buffer for just this problem. */ - memcpy(znet->rx_end, znet->rx_start, 8); - cur_frame_end_offset += (RX_BUF_SIZE/2); - } - cur_frame_end = znet->rx_start + cur_frame_end_offset - 4; - - lo_status = *cur_frame_end++; - hi_status = *cur_frame_end++; - status = ((hi_status & 0xff) << 8) + (lo_status & 0xff); - lo_cnt = *cur_frame_end++; - hi_cnt = *cur_frame_end++; - count = ((hi_cnt & 0xff) << 8) + (lo_cnt & 0xff); - - if (znet_debug > 5) - printk(KERN_DEBUG "Constructing trailer at location %03x, %04x %04x %04x %04x" - " count %#x status %04x.\n", - cur_frame_end_offset<<1, lo_status, hi_status, lo_cnt, hi_cnt, - count, status); - cur_frame_end[-4] = status; - cur_frame_end[-3] = next_frame_end_offset; - cur_frame_end[-2] = count; - next_frame_end_offset = cur_frame_end_offset; - cur_frame_end_offset -= ((count + 1)>>1) + 3; - if (cur_frame_end_offset < 0) - cur_frame_end_offset += RX_BUF_SIZE/2; - } - - /* Now step forward through the list. */ - do { - ushort *this_rfp_ptr = znet->rx_start + next_frame_end_offset; - int status = this_rfp_ptr[-4]; - int pkt_len = this_rfp_ptr[-2]; - - if (znet_debug > 5) - printk(KERN_DEBUG "Looking at trailer ending at %04x status %04x length %03x" - " next %04x.\n", next_frame_end_offset<<1, status, pkt_len, - this_rfp_ptr[-3]<<1); - /* Once again we must assume that the i82586 docs apply. */ - if ( ! (status & RX_RCV_OK)) { /* There was an error. */ - dev->stats.rx_errors++; - if (status & RX_CRC_ERR) dev->stats.rx_crc_errors++; - if (status & RX_ALG_ERR) dev->stats.rx_frame_errors++; -#if 0 - if (status & 0x0200) dev->stats.rx_over_errors++; /* Wrong. */ - if (status & 0x0100) dev->stats.rx_fifo_errors++; -#else - /* maz : Wild guess... */ - if (status & RX_OVRRUN) dev->stats.rx_over_errors++; -#endif - if (status & RX_SRT_FRM) dev->stats.rx_length_errors++; - } else if (pkt_len > 1536) { - dev->stats.rx_length_errors++; - } else { - /* Malloc up new buffer. */ - struct sk_buff *skb; - - skb = dev_alloc_skb(pkt_len); - if (skb == NULL) { - if (znet_debug) - printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name); - dev->stats.rx_dropped++; - break; - } - - if (&znet->rx_cur[(pkt_len+1)>>1] > znet->rx_end) { - int semi_cnt = (znet->rx_end - znet->rx_cur)<<1; - memcpy(skb_put(skb,semi_cnt), znet->rx_cur, semi_cnt); - memcpy(skb_put(skb,pkt_len-semi_cnt), znet->rx_start, - pkt_len - semi_cnt); - } else { - memcpy(skb_put(skb,pkt_len), znet->rx_cur, pkt_len); - if (znet_debug > 6) { - unsigned int *packet = (unsigned int *) skb->data; - printk(KERN_DEBUG "Packet data is %08x %08x %08x %08x.\n", packet[0], - packet[1], packet[2], packet[3]); - } - } - skb->protocol=eth_type_trans(skb,dev); - netif_rx(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; - } - znet->rx_cur = this_rfp_ptr; - if (znet->rx_cur >= znet->rx_end) - znet->rx_cur -= RX_BUF_SIZE/2; - update_stop_hit(ioaddr, (znet->rx_cur - znet->rx_start)<<1); - next_frame_end_offset = this_rfp_ptr[-3]; - if (next_frame_end_offset == 0) /* Read all the frames? */ - break; /* Done for now */ - this_rfp_ptr = znet->rx_start + next_frame_end_offset; - } while (--boguscount); - - /* If any worth-while packets have been received, dev_rint() - has done a mark_bh(INET_BH) for us and will work on them - when we get to the bottom-half routine. */ -} - -/* The inverse routine to znet_open(). */ -static int znet_close(struct net_device *dev) -{ - int ioaddr = dev->base_addr; - - netif_stop_queue (dev); - - outb(OP0_RESET, ioaddr); /* CMD0_RESET */ - - if (znet_debug > 1) - printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name); - /* Turn off transceiver power. */ - znet_transceiver_power (dev, 0); - - znet_release_resources (dev); - - return 0; -} - -static void show_dma(struct net_device *dev) -{ - short ioaddr = dev->base_addr; - unsigned char stat = inb (ioaddr); - struct znet_private *znet = netdev_priv(dev); - unsigned long flags; - short dma_port = ((znet->tx_dma&3)<<2) + IO_DMA2_BASE; - unsigned addr = inb(dma_port); - short residue; - - addr |= inb(dma_port) << 8; - residue = get_dma_residue(znet->tx_dma); - - if (znet_debug > 1) { - flags=claim_dma_lock(); - printk(KERN_DEBUG "Stat:%02x Addr: %04x cnt:%3x\n", - stat, addr<<1, residue); - release_dma_lock(flags); - } -} - -/* Initialize the hardware. We have to do this when the board is open()ed - or when we come out of suspend mode. */ -static void hardware_init(struct net_device *dev) -{ - unsigned long flags; - short ioaddr = dev->base_addr; - struct znet_private *znet = netdev_priv(dev); - - znet->rx_cur = znet->rx_start; - znet->tx_cur = znet->tx_start; - - /* Reset the chip, and start it up. */ - outb(OP0_RESET, ioaddr); - - flags=claim_dma_lock(); - disable_dma(znet->rx_dma); /* reset by an interrupting task. */ - clear_dma_ff(znet->rx_dma); - set_dma_mode(znet->rx_dma, DMA_RX_MODE); - set_dma_addr(znet->rx_dma, (unsigned int) znet->rx_start); - set_dma_count(znet->rx_dma, RX_BUF_SIZE); - enable_dma(znet->rx_dma); - /* Now set up the Tx channel. */ - disable_dma(znet->tx_dma); - clear_dma_ff(znet->tx_dma); - set_dma_mode(znet->tx_dma, DMA_TX_MODE); - set_dma_addr(znet->tx_dma, (unsigned int) znet->tx_start); - set_dma_count(znet->tx_dma, znet->tx_buf_len<<1); - enable_dma(znet->tx_dma); - release_dma_lock(flags); - - if (znet_debug > 1) - printk(KERN_DEBUG "%s: Initializing the i82593, rx buf %p tx buf %p\n", - dev->name, znet->rx_start,znet->tx_start); - /* Do an empty configure command, just like the Crynwr driver. This - resets to chip to its default values. */ - *znet->tx_cur++ = 0; - *znet->tx_cur++ = 0; - show_dma(dev); - outb(OP0_CONFIGURE | CR0_CHNL, ioaddr); - - znet_set_multicast_list (dev); - - *znet->tx_cur++ = 6; - memcpy(znet->tx_cur, dev->dev_addr, 6); - znet->tx_cur += 3; - show_dma(dev); - outb(OP0_IA_SETUP | CR0_CHNL, ioaddr); - show_dma(dev); - - update_stop_hit(ioaddr, 8192); - if (znet_debug > 1) printk(KERN_DEBUG "enabling Rx.\n"); - outb(OP0_RCV_ENABLE, ioaddr); - netif_start_queue (dev); -} - -static void update_stop_hit(short ioaddr, unsigned short rx_stop_offset) -{ - outb(OP0_SWIT_TO_PORT_1 | CR0_CHNL, ioaddr); - if (znet_debug > 5) - printk(KERN_DEBUG "Updating stop hit with value %02x.\n", - (rx_stop_offset >> 6) | CR1_STOP_REG_UPDATE); - outb((rx_stop_offset >> 6) | CR1_STOP_REG_UPDATE, ioaddr); - outb(OP1_SWIT_TO_PORT_0, ioaddr); -} - -static __exit void znet_cleanup (void) -{ - if (znet_dev) { - struct znet_private *znet = netdev_priv(znet_dev); - - unregister_netdev (znet_dev); - kfree (znet->rx_start); - kfree (znet->tx_start); - free_netdev (znet_dev); - } -} - -module_init (znet_probe); -module_exit (znet_cleanup); -- cgit v0.10.2